[
  {
    "path": ".azure-pipelines/bazel.yml",
    "content": "parameters:\n  - name: ciTarget\n    displayName: \"CI target\"\n    type: string\n    default: bazel.release\n  - name: artifactSuffix\n    displayName: \"Suffix of artifact\"\n    type: string\n    default: \"\"\n  - name: rbe\n    displayName: \"Enable RBE\"\n    type: boolean\n    default: true\n  - name: managedAgent\n    type: boolean\n    default: true\n  - name: bazelBuildExtraOptions\n    type: string\n    default: \"--flaky_test_attempts=2\"\n\nsteps:\n  - task: Cache@2\n    inputs:\n      key: '\"${{ parameters.ciTarget }}\" | ./WORKSPACE | **/*.bzl'\n      path: $(Build.StagingDirectory)/repository_cache\n    continueOnError: true\n\n  - bash: .azure-pipelines/cleanup.sh\n    displayName: \"Removing tools from agent\"\n    condition: ${{ parameters.managedAgent }}\n\n  - bash: |\n      echo \"disk space at beginning of build:\"\n      df -h\n    displayName: \"Check disk space at beginning\"\n\n  - bash: |\n      sudo mkdir -p /etc/docker\n      echo '{\n        \"ipv6\": true,\n        \"fixed-cidr-v6\": \"2001:db8:1::/64\"\n      }' | sudo tee /etc/docker/daemon.json\n      sudo service docker restart\n    displayName: \"Enable IPv6\"\n    condition: ${{ parameters.managedAgent }}\n\n  - script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}'\n    workingDirectory: $(Build.SourcesDirectory)\n    env:\n      ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)\n      SLACK_TOKEN: $(SLACK_TOKEN)\n      REPO_URI: $(Build.Repository.Uri)\n      BUILD_URI: $(Build.BuildUri)\n      ${{ if parameters.rbe }}:\n        ENVOY_RBE: \"1\"\n        BAZEL_BUILD_EXTRA_OPTIONS: \"--config=remote-ci --jobs=$(RbeJobs) ${{ parameters.bazelBuildExtraOptions }}\"\n        BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com\n        BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance\n        GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)\n      ${{ if eq(parameters.rbe, false) }}:\n        BAZEL_BUILD_EXTRA_OPTIONS: \"${{ parameters.bazelBuildExtraOptions }}\"\n        BAZEL_REMOTE_CACHE: $(LocalBuildCache)\n\n    displayName: \"Run CI script\"\n\n  - bash: |\n      echo \"disk space at end of build:\"\n      df -h\n    displayName: \"Check disk space at end\"\n    condition: always()\n\n  - task: PublishTestResults@2\n    inputs:\n      testResultsFiles: \"**/bazel-out/**/testlogs/**/test.xml\"\n      testRunTitle: \"${{ parameters.ciTarget }}\"\n      searchFolder: $(Build.StagingDirectory)/tmp\n    condition: always()\n\n  - task: PublishBuildArtifacts@1\n    inputs:\n      pathtoPublish: \"$(Build.StagingDirectory)/envoy\"\n      artifactName: ${{ parameters.ciTarget }}${{ parameters.artifactSuffix }}\n    condition: always()\n"
  },
  {
    "path": ".azure-pipelines/cleanup.sh",
    "content": "#!/bin/bash\n\nset -e\n\n# Temporary script to remove tools from Azure pipelines agent to create more disk space room.\nsudo apt-get update -y\nsudo apt-get purge -y --no-upgrade 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'libgl1' \\\n  'adoptopenjdk-*' 'azure-cli' 'google-chrome-stable' 'firefox' 'hhvm'\n\ndpkg-query -Wf '${Installed-Size}\\t${Package}\\n' | sort -rn\n"
  },
  {
    "path": ".azure-pipelines/pipelines.yml",
    "content": "trigger:\n  branches:\n    include:\n      - \"master\"\n      - \"release/v*\"\n  tags:\n    include:\n      - \"v*\"\n\n# PR build config is manually overridden in Azure pipelines UI with different secrets\npr: none\n\njobs:\n  - job: format\n    dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel.\n    pool:\n      vmImage: \"ubuntu-18.04\"\n    steps:\n      - task: Cache@2\n        inputs:\n          key: \"format | ./WORKSPACE | **/*.bzl\"\n          path: $(Build.StagingDirectory)/repository_cache\n        continueOnError: true\n\n      - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh'\n        workingDirectory: $(Build.SourcesDirectory)\n        env:\n          ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)\n          BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com\n          BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance\n          GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)\n        displayName: \"Run check format scripts\"\n\n      - task: PublishBuildArtifacts@1\n        inputs:\n          pathtoPublish: \"$(Build.StagingDirectory)/fix_format.diff\"\n          artifactName: format\n        condition: failed()\n\n  - job: release\n    displayName: \"Linux-x64 release\"\n    dependsOn: [\"format\"]\n    # For master builds, continue even if format fails\n    condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest')))\n    timeoutInMinutes: 360\n    pool:\n      vmImage: \"ubuntu-18.04\"\n    steps:\n      - template: bazel.yml\n        parameters:\n          ciTarget: bazel.release\n\n  - job: release_arm64\n    displayName: \"Linux-arm64 release\"\n    dependsOn: [\"format\"]\n    # For master builds, continue even if format fails\n    condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest')))\n    timeoutInMinutes: 360\n    pool: \"arm-large\"\n    steps:\n      - template: bazel.yml\n        parameters:\n          managedAgent: false\n          ciTarget: bazel.release\n          rbe: false\n          artifactSuffix: \".arm64\"\n          bazelBuildExtraOptions: \"--sandbox_base=/tmp/sandbox_base\"\n\n  - job: bazel\n    displayName: \"Linux-x64\"\n    dependsOn: [\"release\"]\n    # For master builds, continue even if format fails\n    condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest')))\n    strategy:\n      maxParallel: 3\n      matrix:\n        gcc:\n          CI_TARGET: \"bazel.gcc\"\n        clang_tidy:\n          CI_TARGET: \"bazel.clang_tidy\"\n        asan:\n          CI_TARGET: \"bazel.asan\"\n        tsan:\n          CI_TARGET: \"bazel.tsan\"\n        compile_time_options:\n          CI_TARGET: \"bazel.compile_time_options\"\n    timeoutInMinutes: 360\n    pool:\n      vmImage: \"ubuntu-18.04\"\n    steps:\n      - template: bazel.yml\n        parameters:\n          ciTarget: $(CI_TARGET)\n\n  - job: coverage\n    displayName: \"Linux-x64\"\n    dependsOn: [\"release\"]\n    timeoutInMinutes: 360\n    pool: \"x64-large\"\n    strategy:\n      maxParallel: 2\n      matrix:\n        coverage:\n          CI_TARGET: \"coverage\"\n        fuzz_coverage:\n          CI_TARGET: \"fuzz_coverage\"\n    steps:\n      - template: bazel.yml\n        parameters:\n          managedAgent: false\n          ciTarget: bazel.$(CI_TARGET)\n          rbe: false\n          # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces\n          bazelBuildExtraOptions: \"--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base\"\n\n      - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)'\n        displayName: \"Upload $(CI_TARGET) Report to GCS\"\n        env:\n          ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)\n          GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)\n          GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket)\n        condition: always()\n\n  - job: docker\n    displayName: \"Linux multi-arch docker\"\n    dependsOn: [\"release\", \"release_arm64\"]\n    pool:\n      vmImage: \"ubuntu-18.04\"\n    steps:\n      - bash: .azure-pipelines/cleanup.sh\n        displayName: \"Removing tools from agent\"\n      - task: DownloadBuildArtifacts@0\n        inputs:\n          buildType: current\n          artifactName: \"bazel.release\"\n          itemPattern: \"bazel.release/envoy_binary.tar.gz\"\n          downloadType: single\n          targetPath: $(Build.StagingDirectory)\n      - task: DownloadBuildArtifacts@0\n        inputs:\n          buildType: current\n          artifactName: \"bazel.release.arm64\"\n          itemPattern: \"bazel.release.arm64/envoy_binary.tar.gz\"\n          downloadType: single\n          targetPath: $(Build.StagingDirectory)\n      - bash: |\n          set -e\n          mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64\n          mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64\n          ci/docker_ci.sh\n        workingDirectory: $(Build.SourcesDirectory)\n        env:\n          AZP_BRANCH: $(Build.SourceBranch)\n          AZP_SHA1: $(Build.SourceVersion)\n          DOCKERHUB_USERNAME: $(DockerUsername)\n          DOCKERHUB_PASSWORD: $(DockerPassword)\n      - task: PublishBuildArtifacts@1\n        inputs:\n          pathtoPublish: \"$(Build.StagingDirectory)/build_images\"\n          artifactName: docker\n        condition: always()\n\n  - job: examples\n    dependsOn: [\"docker\"]\n    displayName: \"Verify examples run as documented\"\n    pool:\n      vmImage: \"ubuntu-18.04\"\n    steps:\n      - task: DownloadBuildArtifacts@0\n        inputs:\n          buildType: current\n          artifactName: \"docker\"\n          itemPattern: \"docker/envoy-docker-images.tar.xz\"\n          downloadType: single\n          targetPath: $(Build.StagingDirectory)\n      - bash: ./ci/do_ci.sh verify_examples\n        env:\n          ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)\n          NO_BUILD_SETUP: 1\n\n  - job: macOS\n    dependsOn: [\"format\"]\n    timeoutInMinutes: 360\n    pool:\n      vmImage: \"macos-latest\"\n    steps:\n      - script: ./ci/mac_ci_setup.sh\n        displayName: \"Install dependencies\"\n\n      - script: ./ci/mac_ci_steps.sh\n        displayName: \"Run Mac CI\"\n        env:\n          BAZEL_BUILD_EXTRA_OPTIONS: \"--remote_download_toplevel --flaky_test_attempts=2\"\n          BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com\n          BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance\n          GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)\n\n      - task: PublishTestResults@2\n        inputs:\n          testResultsFiles: \"**/bazel-testlogs/**/test.xml\"\n          testRunTitle: \"macOS\"\n        condition: always()\n\n      - script: ./ci/flaky_test/run_process_xml_mac.sh\n        displayName: \"Process Test Results\"\n        env:\n          TEST_TMPDIR: $(Build.SourcesDirectory)\n          SLACK_TOKEN: $(SLACK_TOKEN)\n          CI_TARGET: \"MacOS\"\n          REPO_URI: $(Build.Repository.Uri)\n          BUILD_URI: $(Build.BuildUri)\n\n  - job: Windows\n    dependsOn: [\"format\"]\n    timeoutInMinutes: 360\n    pool:\n      vmImage: \"windows-latest\"\n    steps:\n      - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh\n        displayName: \"Run Windows CI\"\n        env:\n          ENVOY_DOCKER_BUILD_DIR: \"$(Build.StagingDirectory)\"\n          ENVOY_RBE: \"true\"\n          BAZEL_BUILD_EXTRA_OPTIONS: \"--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)\"\n          BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com\n          BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance\n          GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)\n      - task: PublishBuildArtifacts@1\n        inputs:\n          pathtoPublish: \"$(Build.StagingDirectory)/envoy\"\n          artifactName: windows.release\n        condition: always()\n"
  },
  {
    "path": ".bazelci/presubmit.yml",
    "content": "---\ntasks:\n  rbe:\n    name: \"RBE\"\n    platform: ubuntu1804\n    test_targets:\n      - \"//test/common/common/...\"\n      - \"//test/integration/...\"\n      - \"//test/exe/...\"\n    test_flags:\n      - \"--config=remote-clang-libc++\"\n      - \"--config=remote-ci\"\n      - \"--jobs=75\"\n  coverage:\n    name: \"Coverage\"\n    platform: ubuntu1804\n    shell_commands:\n      - \"bazel/setup_clang.sh /usr/lib/llvm-10\"\n    test_targets:\n      - \"//test/common/common/...\"\n      - \"//test/integration/...\"\n      - \"//test/exe/...\"\n    test_flags:\n      - \"--config=coverage\"\n      - \"--config=clang\"\n"
  },
  {
    "path": ".bazelignore",
    "content": "api\nexamples/grpc-bridge/script\ntools/clang_tools\n"
  },
  {
    "path": ".bazelrc",
    "content": "# Envoy specific Bazel build/test options.\n\n# Bazel doesn't need more than 200MB of memory for local build based on memory profiling:\n# https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling\n# The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large\n# enough to consume all memory constrained by cgroup in large host, which is the case in CircleCI.\n# Limiting JVM heapsize here to let it do GC more when approaching the limit to\n# leave room for compiler/linker.\n# The number 2G is choosed heuristically to both support in CircleCI and large enough for RBE.\n# Startup options cannot be selected via config.\nstartup --host_jvm_args=-Xmx2g\n\nbuild --workspace_status_command=\"bash bazel/get_workspace_status\"\nbuild --experimental_strict_action_env=true\nbuild --host_force_python=PY3\nbuild --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a\nbuild --action_env=BAZEL_LINKOPTS=-lm\nbuild --host_javabase=@bazel_tools//tools/jdk:remote_jdk11\nbuild --javabase=@bazel_tools//tools/jdk:remote_jdk11\nbuild --enable_platform_specific_config\n\n# Enable position independent code, this option is not supported on Windows and default on on macOS.\nbuild:linux --copt=-fPIC\nbuild:linux --cxxopt=-std=c++17\nbuild:linux --conlyopt=-fexceptions\nbuild:linux --fission=dbg,opt\nbuild:linux --features=per_object_debug_info\n\n# We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace.\nbuild --define absl=1\n\n# Pass PATH, CC, CXX and LLVM_CONFIG variables from the environment.\nbuild --action_env=CC\nbuild --action_env=CXX\nbuild --action_env=LLVM_CONFIG\nbuild --action_env=PATH\n\n# Common flags for sanitizers\nbuild:sanitizer --define tcmalloc=disabled\nbuild:sanitizer --linkopt -ldl\nbuild:sanitizer --build_tag_filters=-no_san\nbuild:sanitizer --test_tag_filters=-no_san\n\n# Common flags for Clang\nbuild:clang --action_env=BAZEL_COMPILER=clang\nbuild:clang --linkopt=-fuse-ld=lld\n\n# Basic ASAN/UBSAN that works for gcc\nbuild:asan --action_env=ENVOY_ASAN=1\nbuild:asan --config=sanitizer\n# ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN\nbuild:asan --define signal_trace=disabled\nbuild:asan --define ENVOY_CONFIG_ASAN=1\nbuild:asan --copt -fsanitize=address,undefined\nbuild:asan --linkopt -fsanitize=address,undefined\n# vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh.\nbuild:asan --copt -fno-sanitize=vptr,function\nbuild:asan --linkopt -fno-sanitize=vptr,function\nbuild:asan --copt -DADDRESS_SANITIZER=1\nbuild:asan --copt -D__SANITIZE_ADDRESS__\nbuild:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true:detect_odr_violation=1\nbuild:asan --test_env=UBSAN_OPTIONS=halt_on_error=true:print_stacktrace=1\nbuild:asan --test_env=ASAN_SYMBOLIZER_PATH\n\n# Clang ASAN/UBSAN\nbuild:clang-asan --config=asan\nbuild:clang-asan --linkopt -fuse-ld=lld\n\n# macOS ASAN/UBSAN\nbuild:macos --cxxopt=-std=c++17\nbuild:macos-asan --config=asan\n# Workaround, see https://github.com/bazelbuild/bazel/issues/6932\nbuild:macos-asan --copt -Wno-macro-redefined\nbuild:macos-asan --copt -D_FORTIFY_SOURCE=0\n# Workaround, see https://github.com/bazelbuild/bazel/issues/4341\nbuild:macos-asan --copt -DGRPC_BAZEL_BUILD\n# Dynamic link cause issues like: `dyld: malformed mach-o: load commands size (59272) > 32768`\nbuild:macos-asan --dynamic_mode=off\n\n# Clang TSAN\nbuild:clang-tsan --action_env=ENVOY_TSAN=1\nbuild:clang-tsan --config=sanitizer\nbuild:clang-tsan --define ENVOY_CONFIG_TSAN=1\nbuild:clang-tsan --copt -fsanitize=thread\nbuild:clang-tsan --linkopt -fsanitize=thread\nbuild:clang-tsan --linkopt -fuse-ld=lld\nbuild:clang-tsan --build_tag_filters=-no_san,-no_tsan\nbuild:clang-tsan --test_tag_filters=-no_san,-no_tsan\n# Needed due to https://github.com/libevent/libevent/issues/777\nbuild:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE\n# https://github.com/abseil/abseil-cpp/issues/760\n# https://github.com/google/sanitizers/issues/953\nbuild:clang-tsan --test_env=\"TSAN_OPTIONS=report_atomic_races=0\"\n\n# Clang MSAN - this is the base config for remote-msan and docker-msan. To run this config without\n# our build image, follow https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo\n# with libc++ instruction and provide corresponding `--copt` and `--linkopt` as well.\nbuild:clang-msan --action_env=ENVOY_MSAN=1\nbuild:clang-msan --config=sanitizer\nbuild:clang-msan --define ENVOY_CONFIG_MSAN=1\nbuild:clang-msan --copt -fsanitize=memory\nbuild:clang-msan --linkopt -fsanitize=memory\nbuild:clang-msan --copt -fsanitize-memory-track-origins=2\n# MSAN needs -O1 to get reasonable performance.\nbuild:clang-msan --copt -O1\n\n# Clang with libc++\nbuild:libc++ --config=clang\nbuild:libc++ --action_env=CXXFLAGS=-stdlib=libc++\nbuild:libc++ --action_env=LDFLAGS=-stdlib=libc++\nbuild:libc++ --action_env=BAZEL_CXXOPTS=-stdlib=libc++\nbuild:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a\nbuild:libc++ --action_env=BAZEL_LINKOPTS=-lm:-pthread\nbuild:libc++ --define force_libcpp=enabled\n\n# Optimize build for binary size reduction.\nbuild:sizeopt -c opt --copt -Os\n\n# Test options\nbuild --test_env=HEAPCHECK=normal --test_env=PPROF_PATH\n\n# Coverage options\ncoverage --config=coverage\ncoverage --build_tests_only\nbuild:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1\nbuild:coverage --action_env=GCOV=llvm-profdata\nbuild:coverage --copt=-DNDEBUG\n# 1.5x original timeout + 300s for trace merger in all categories\nbuild:coverage --test_timeout=390,750,1500,5700\nbuild:coverage --define=dynamic_link_tests=true\nbuild:coverage --define=ENVOY_CONFIG_COVERAGE=1\nbuild:coverage --cxxopt=\"-DENVOY_CONFIG_COVERAGE=1\"\nbuild:coverage --coverage_support=@envoy//bazel/coverage:coverage_support\nbuild:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=external/envoy/bazel/coverage/collect_cc_coverage.sh\nbuild:coverage --test_env=HEAPCHECK=\nbuild:coverage --combined_report=lcov\nbuild:coverage --strategy=TestRunner=sandboxed,local\nbuild:coverage --strategy=CoverageReport=sandboxed,local\nbuild:coverage --experimental_use_llvm_covmap\nbuild:coverage --collect_code_coverage\nbuild:coverage --test_tag_filters=-nocoverage\nbuild:coverage --instrumentation_filter=\"//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]\"\ncoverage:test-coverage --test_arg=\"-l trace\"\ncoverage:fuzz-coverage --config=plain-fuzzer\ncoverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh\n\n# Remote execution: https://docs.bazel.build/versions/master/remote-execution.html\nbuild:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1\n\nbuild:rbe-toolchain-clang --config=rbe-toolchain\nbuild:rbe-toolchain-clang --platforms=@rbe_ubuntu_clang//config:platform\nbuild:rbe-toolchain-clang --host_platform=@rbe_ubuntu_clang//config:platform\nbuild:rbe-toolchain-clang --crosstool_top=@rbe_ubuntu_clang//cc:toolchain\nbuild:rbe-toolchain-clang --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain\nbuild:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin\n\nbuild:rbe-toolchain-clang-libc++ --config=rbe-toolchain\nbuild:rbe-toolchain-clang-libc++ --platforms=@rbe_ubuntu_clang_libcxx//config:platform\nbuild:rbe-toolchain-clang-libc++ --host_platform=@rbe_ubuntu_clang_libcxx//config:platform\nbuild:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain\nbuild:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain\nbuild:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin\nbuild:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++\nbuild:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++\nbuild:rbe-toolchain-clang-libc++ --define force_libcpp=enabled\n\nbuild:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib\nbuild:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib\nbuild:rbe-toolchain-msan --config=clang-msan\n\nbuild:rbe-toolchain-tsan --linkopt=-L/opt/libcxx_tsan/lib\nbuild:rbe-toolchain-tsan --linkopt=-Wl,-rpath,/opt/libcxx_tsan/lib\nbuild:rbe-toolchain-tsan --config=clang-tsan\n\nbuild:rbe-toolchain-gcc --config=rbe-toolchain\nbuild:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform\nbuild:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform\nbuild:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain\nbuild:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain\n\nbuild:rbe-toolchain-msvc-cl --host_platform=@rbe_windows_msvc_cl//config:platform\nbuild:rbe-toolchain-msvc-cl --platforms=@rbe_windows_msvc_cl//config:platform\nbuild:rbe-toolchain-msvc-cl --crosstool_top=@rbe_windows_msvc_cl//cc:toolchain\nbuild:rbe-toolchain-msvc-cl --extra_toolchains=@rbe_windows_msvc_cl//config:cc-toolchain\n\nbuild:rbe-toolchain-clang-cl --host_platform=@rbe_windows_clang_cl//config:platform\nbuild:rbe-toolchain-clang-cl --platforms=@rbe_windows_clang_cl//config:platform\nbuild:rbe-toolchain-clang-cl --crosstool_top=@rbe_windows_clang_cl//cc:toolchain\nbuild:rbe-toolchain-clang-cl --extra_toolchains=@rbe_windows_clang_cl//config:cc-toolchain\n\nbuild:remote --spawn_strategy=remote,sandboxed,local\nbuild:remote --strategy=Javac=remote,sandboxed,local\nbuild:remote --strategy=Closure=remote,sandboxed,local\nbuild:remote --strategy=Genrule=remote,sandboxed,local\n# rules_rust is not remote runnable (yet)\nbuild:remote --strategy=Rustc=sandboxed,local\nbuild:remote --remote_timeout=7200\nbuild:remote --auth_enabled=true\nbuild:remote --remote_download_toplevel\n\n# Windows bazel does not allow sandboxed as a spawn strategy\nbuild:remote-windows --spawn_strategy=remote,local\nbuild:remote-windows --strategy=Javac=remote,local\nbuild:remote-windows --strategy=Closure=remote,local\nbuild:remote-windows --strategy=Genrule=remote,local\nbuild:remote-windows --remote_timeout=7200\nbuild:remote-windows --auth_enabled=true\nbuild:remote-windows --remote_download_toplevel\n\nbuild:remote-clang --config=remote\nbuild:remote-clang --config=rbe-toolchain-clang\n\nbuild:remote-clang-libc++ --config=remote\nbuild:remote-clang-libc++ --config=rbe-toolchain-clang-libc++\n\nbuild:remote-gcc --config=remote\nbuild:remote-gcc --config=rbe-toolchain-gcc\n\nbuild:remote-msan --config=remote\nbuild:remote-msan --config=rbe-toolchain-clang-libc++\nbuild:remote-msan --config=rbe-toolchain-msan\n\nbuild:remote-msvc-cl --config=remote-windows\nbuild:remote-msvc-cl --config=msvc-cl\nbuild:remote-msvc-cl --config=rbe-toolchain-msvc-cl\n\nbuild:remote-clang-cl --config=remote-windows\nbuild:remote-clang-cl --config=clang-cl\nbuild:remote-clang-cl --config=rbe-toolchain-clang-cl\n\n# Docker sandbox\n# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8\nbuild:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a\nbuild:docker-sandbox --spawn_strategy=docker\nbuild:docker-sandbox --strategy=Javac=docker\nbuild:docker-sandbox --strategy=Closure=docker\nbuild:docker-sandbox --strategy=Genrule=docker\nbuild:docker-sandbox --define=EXECUTOR=remote\nbuild:docker-sandbox --experimental_docker_verbose\nbuild:docker-sandbox --experimental_enable_docker_sandbox\n\nbuild:docker-clang --config=docker-sandbox\nbuild:docker-clang --config=rbe-toolchain-clang\n\nbuild:docker-clang-libc++ --config=docker-sandbox\nbuild:docker-clang-libc++ --config=rbe-toolchain-clang-libc++\n\nbuild:docker-gcc --config=docker-sandbox\nbuild:docker-gcc --config=rbe-toolchain-gcc\n\nbuild:docker-msan --config=docker-sandbox\nbuild:docker-msan --config=rbe-toolchain-clang-libc++\nbuild:docker-msan --config=rbe-toolchain-msan\n\nbuild:docker-tsan --config=docker-sandbox\nbuild:docker-tsan --config=rbe-toolchain-clang-libc++\nbuild:docker-tsan --config=rbe-toolchain-tsan\n\n# CI configurations\nbuild:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com\nbuild:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com\n\n# Fuzz builds\n# -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION is passed in in the bazel build target\n# rules for fuzz tests. Passing it in the CLI will cause dependencies to be build\n# with the macro. Causing issues in RouteMatcherTest.TestRoutes that expect prod\n# behavior from RE2 library.\nbuild:asan-fuzzer --config=asan\nbuild:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer\nbuild:asan-fuzzer --copt=-fsanitize=fuzzer-no-link\nbuild:asan-fuzzer --copt=-fno-omit-frame-pointer\n# Remove UBSAN halt_on_error to avoid crashing on protobuf errors.\nbuild:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1\n\n# Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts.\nbuild:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer\nbuild:plain-fuzzer --define ENVOY_CONFIG_ASAN=1\nbuild:plain-fuzzer --copt=-fsanitize=fuzzer-no-link\nbuild:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link\n\n# Compile database generation config\nbuild:compdb --build_tag_filters=-nocompdb\n\n# Windows build quirks\nbuild:windows --action_env=TMPDIR\nbuild:windows --define signal_trace=disabled\nbuild:windows --define hot_restart=disabled\nbuild:windows --define tcmalloc=disabled\nbuild:windows --define manual_stamp=manual_stamp\n\n# Should not be required after upstream fix to bazel,\n# and already a no-op to linux/macos builds\n# see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301\nbuild:windows --copt=\"-DCARES_STATICLIB\"\nbuild:windows --copt=\"-DNGHTTP2_STATICLIB\"\nbuild:windows --copt=\"-DCURL_STATICLIB\"\nbuild:windows --cxxopt=\"/std:c++17\"\n\n# Required to work around build defects on Windows MSVC cl\n# Unguarded gcc pragmas in quiche are not recognized by MSVC\nbuild:msvc-cl --copt=\"/wd4068\"\n# Allows 'nodiscard' function return values to be discarded\nbuild:msvc-cl --copt=\"/wd4834\"\n# Allows inline functions to be undefined\nbuild:msvc-cl --copt=\"/wd4506\"\nbuild:msvc-cl --copt=\"-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING\"\n\n# Required to work around Windows clang-cl build defects\n# Ignore conflicting definitions of _WIN32_WINNT\n# Overriding __TIME__ etc is problematic (and is actually an invalid no-op)\nbuild:clang-cl --copt=\"-Wno-macro-redefined\"\nbuild:clang-cl --copt=\"-Wno-builtin-macro-redefined\"\nbuild:clang-cl --action_env=USE_CLANG_CL=1\n\n# Defaults to 'auto' - Off for windows, so override to linux behavior\nbuild:windows --enable_runfiles=yes\n\n# This should become adopted by bazel as the default\nbuild:windows --features=compiler_param_file\n\n# These options attempt to force a monolithic binary including the CRT\nbuild:windows --features=fully_static_link\nbuild:windows --features=static_link_msvcrt\nbuild:windows --dynamic_mode=off\n\ntry-import %workspace%/clang.bazelrc\ntry-import %workspace%/user.bazelrc\ntry-import %workspace%/local_tsan.bazelrc\n"
  },
  {
    "path": ".bazelversion",
    "content": "3.4.1\n"
  },
  {
    "path": ".circleci/config.yml",
    "content": "version: 2.1\n\nexecutors:\n  ubuntu-build:\n    description: \"A regular build executor based on ubuntu image\"\n    docker:\n      # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8\n      - image: envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a\n    resource_class: xlarge\n    working_directory: /source\n\njobs:\n   api:\n     executor: ubuntu-build\n     steps:\n       - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken\n       - checkout\n       - run: ci/do_circle_ci.sh bazel.api\n       - add_ssh_keys:\n           fingerprints:\n             - \"fb:f3:fe:be:1c:b2:ec:b6:25:f9:7b:a6:87:54:02:8c\"\n       - run: ci/api_mirror.sh\n       - store_artifacts:\n           path: /build/envoy/generated\n           destination: /\n\n   go_control_plane_mirror:\n     executor: ubuntu-build\n     steps:\n       - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken\n       - checkout\n       - run: ci/do_circle_ci.sh bazel.api\n       - add_ssh_keys:\n           fingerprints:\n             - \"9d:3b:fe:7c:09:3b:ce:a9:6a:de:de:41:fb:6b:52:62\"\n       - run: ci/go_mirror.sh\n\n   filter_example_mirror:\n     executor: ubuntu-build\n     steps:\n       - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken\n       - checkout\n       - add_ssh_keys:\n           fingerprints:\n             - \"f6:f9:df:90:9c:4b:5f:9c:f4:69:fd:42:94:ff:88:24\"\n       - run: ci/filter_example_mirror.sh\n\n   docs:\n     executor: ubuntu-build\n     steps:\n       - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken\n       - checkout\n       - run: ci/do_circle_ci.sh docs\n       - add_ssh_keys:\n           fingerprints:\n             - \"44:c7:a1:9e:f4:9e:a5:33:11:f1:0e:79:e1:55:c9:04\"\n       - run: docs/publish.sh\n       - store_artifacts:\n           path: generated/docs\n\nworkflows:\n  version: 2\n  all:\n    jobs:\n      - api\n      - go_control_plane_mirror\n      - filter_example_mirror\n      - docs:\n          filters:\n            tags:\n              only: /^v.*/\n"
  },
  {
    "path": ".clang-format",
    "content": "---\nLanguage:        Cpp\nAccessModifierOffset: -2\nColumnLimit: 100\nDerivePointerAlignment: false\nPointerAlignment: Left\nSortIncludes: false\n...\n\n---\nLanguage: Proto\nColumnLimit: 100\nSpacesInContainerLiterals: false\nAllowShortFunctionsOnASingleLine: false\nReflowComments: false\n...\n"
  },
  {
    "path": ".clang-tidy",
    "content": "Checks:           '-clang-analyzer-core.NonNullParamChecker,\n                   -clang-analyzer-optin.cplusplus.UninitializedObject,\n                   abseil-duration-*,\n                   abseil-faster-strsplit-delimiter,\n                   abseil-no-namespace,\n                   abseil-redundant-strcat-calls,\n                   abseil-str-cat-append,\n                   abseil-string-find-startswith,\n                   abseil-upgrade-duration-conversions,\n                   bugprone-assert-side-effect,\n                   bugprone-unused-raii,\n                   bugprone-use-after-move,\n                   clang-analyzer-core.DivideZero,\n                   misc-unused-using-decls,\n                   modernize-deprecated-headers,\n                   modernize-loop-convert,\n                   modernize-make-shared,\n                   modernize-make-unique,\n                   modernize-return-braced-init-list,\n                   modernize-use-default-member-init,\n                   modernize-use-equals-default,\n                   modernize-use-nullptr,\n                   modernize-use-override,\n                   modernize-use-using,\n                   performance-faster-string-find,\n                   performance-for-range-copy,\n                   performance-inefficient-algorithm,\n                   performance-inefficient-vector-operation,\n                   performance-noexcept-move-constructor,\n                   performance-move-constructor-init,\n                   performance-type-promotion-in-math-fn,\n                   performance-unnecessary-copy-initialization,\n                   readability-braces-around-statements,\n                   readability-container-size-empty,\n                   readability-identifier-naming,\n                   readability-redundant-control-flow,\n                   readability-redundant-member-init,\n                   readability-redundant-smartptr-get,\n                   readability-redundant-string-cstr'\n\nWarningsAsErrors:  '*'\n\nCheckOptions:\n  - key:             bugprone-assert-side-effect.AssertMacros\n    value:           'ASSERT'\n\n  - key:             bugprone-dangling-handle.HandleClasses\n    value:           'std::basic_string_view;std::experimental::basic_string_view;absl::string_view'\n\n  - key:             modernize-use-auto.MinTypeNameLength\n    value:           '10'\n\n  - key:             readability-identifier-naming.ClassCase\n    value:           'CamelCase'\n\n  - key:             readability-identifier-naming.EnumCase\n    value:           'CamelCase'\n\n  - key:             readability-identifier-naming.EnumConstantCase\n    value:           'CamelCase'\n\n  - key:             readability-identifier-naming.ParameterCase\n    value:           'lower_case'\n\n  - key:             readability-identifier-naming.PrivateMemberCase\n    value:           'lower_case'\n\n  - key:             readability-identifier-naming.PrivateMemberSuffix\n    value:           '_'\n\n  - key:             readability-identifier-naming.StructCase\n    value:           'CamelCase'\n\n  - key:             readability-identifier-naming.TypeAliasCase\n    value:           'CamelCase'\n\n  - key:             readability-identifier-naming.UnionCase\n    value:           'CamelCase'\n\n  - key:             readability-identifier-naming.FunctionCase\n    value:           'camelBack'\n"
  },
  {
    "path": ".devcontainer/.gitignore",
    "content": "devcontainer.env\n"
  },
  {
    "path": ".devcontainer/Dockerfile",
    "content": "FROM gcr.io/envoy-ci/envoy-build:b480535e8423b5fd7c102fd30c92f4785519e33a\n\nARG USERNAME=vscode\nARG USER_UID=501\nARG USER_GID=$USER_UID\n\nENV BUILD_DIR=/build\nENV ENVOY_STDLIB=libstdc++\n\nENV DEBIAN_FRONTEND=noninteractive\nRUN apt-get -y update \\\n  && apt-get -y install --no-install-recommends libpython2.7 net-tools psmisc vim 2>&1 \\\n  # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user.\n  && groupadd --gid $USER_GID $USERNAME \\\n  && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME -G pcap -d /build \\\n  # [Optional] Add sudo support for non-root user\n  && echo $USERNAME ALL=\\(root\\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \\\n  && chmod 0440 /etc/sudoers.d/$USERNAME\n\nENV DEBIAN_FRONTEND=\nENV PATH=/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n\nENV CLANG_FORMAT=/opt/llvm/bin/clang-format\n"
  },
  {
    "path": ".devcontainer/README.md",
    "content": "# Envoy Dev Container (experimental)\n\nThis directory contains some experimental tools for Envoy Development in [VSCode Remote - Containers](https://code.visualstudio.com/docs/remote/containers).\n\n## How to use\n\nOpen with VSCode with the Container extension installed. Follow the [official guide](https://code.visualstudio.com/docs/remote/containers) to open this\nrepository directly from GitHub or from checked-out source tree.\n\nAfter opening, run the `Refresh Compilation Database` task to generate compilation database to navigate in source code. \nThis will run partial build of Envoy and may take a while depends on the machine performance.\nThis task is needed to run everytime after:\n- Changing a BUILD file that add/remove files from a target, changes dependencies\n- Changing API proto files\n\nThere are additional tools for VS Code located in [`tools/vscode`](../tools/vscode) directory.\n\n## Advanced Usages\n\n### Using Remote Build Execution\n\nWrite the following content to `devcontainer.env` and rebuild the container. The key will be persisted in the container's `~/.bazelrc`.\n\n```\nGCP_SERVICE_ACCOUNT_KEY=<base64 encoded service account key>\nBAZEL_REMOTE_INSTANCE=<RBE Instance>\nBAZEL_REMOTE_CACHE=grpcs://remotebuildexecution.googleapis.com\nBAZEL_BUILD_EXTRA_OPTIONS=--config=remote-ci --config=remote --jobs=<Number of jobs>\n```\n\nBy default the `--config=remote` implies [`--remote_download_toplevel`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--remote_download_toplevel),\nchange this to `minimal` or `all` depending on where you're running the container by adding them to `BAZEL_BUILD_EXTRA_OPTIONS`.\n\n### Disk performance\n\nDocker for Mac/Windows is known to have disk performance issue, this makes formatting all files in the container very slow.\n[Update the mount consistency to 'delegated'](https://code.visualstudio.com/docs/remote/containers-advanced#_update-the-mount-consistency-to-delegated-for-macos) is recommended.\n"
  },
  {
    "path": ".devcontainer/devcontainer.json",
    "content": "{\n  \"name\": \"Envoy Dev\",\n  \"dockerFile\": \"Dockerfile\",\n  \"runArgs\": [\n    \"--user=vscode\",\n    \"--cap-add=SYS_PTRACE\",\n    \"--cap-add=NET_RAW\",\n    \"--cap-add=NET_ADMIN\",\n    \"--security-opt=seccomp=unconfined\",\n    \"--volume=${env:HOME}:${env:HOME}\",\n    \"--volume=envoy-build:/build\",\n    // Uncomment next line if you have devcontainer.env\n    // \"--env-file=.devcontainer/devcontainer.env\"\n  ],\n  \"settings\": {\n    \"terminal.integrated.shell.linux\": \"/bin/bash\",\n    \"bazel.buildifierFixOnFormat\": true,\n    \"clangd.path\": \"/opt/llvm/bin/clangd\",\n    \"python.pythonPath\": \"/usr/bin/python3\",\n    \"python.formatting.provider\": \"yapf\",\n    \"python.formatting.yapfArgs\": [\n      \"--style=${workspaceFolder}/tools/code_format/.style.yapf\"\n    ],\n    \"files.exclude\": {\n      \"**/.clangd/**\": true,\n      \"**/bazel-*/**\": true\n    },\n    \"files.watcherExclude\": {\n      \"**/.clangd/**\": true,\n      \"**/bazel-*/**\": true\n    }\n  },\n  \"remoteUser\": \"vscode\",\n  \"containerUser\": \"vscode\",\n  \"postCreateCommand\": \".devcontainer/setup.sh\",\n  \"extensions\": [\n    \"github.vscode-pull-request-github\",\n    \"zxh404.vscode-proto3\",\n    \"bazelbuild.vscode-bazel\",\n    \"llvm-vs-code-extensions.vscode-clangd\",\n    \"vadimcn.vscode-lldb\",\n    \"webfreak.debug\",\n    \"ms-python.python\"\n  ]\n}\n"
  },
  {
    "path": ".devcontainer/setup.sh",
    "content": "#!/usr/bin/env bash\n\n. ci/setup_cache.sh\ntrap - EXIT # Don't remove the key file written into a temporary file\n\nBAZELRC_FILE=~/.bazelrc bazel/setup_clang.sh /opt/llvm\n\n# Use generated toolchain config because we know the base container is the one we're using in RBE.\n# Not using libc++ here because clangd will raise some tidy issue in libc++ header as of version 9.\necho \"build --config=rbe-toolchain-clang\" >> ~/.bazelrc\necho \"build ${BAZEL_BUILD_EXTRA_OPTIONS}\" | tee -a ~/.bazelrc\n\n# Ideally we want this line so bazel doesn't pollute things outside of the devcontainer, but some of\n# API tooling (proto_sync) depends on symlink like bazel-bin.\n# TODO(lizan): Fix API tooling and enable this again\n#echo \"build --symlink_prefix=/\" >> ~/.bazelrc\n\n[[ -n \"${BUILD_DIR}\" ]] && sudo chown -R \"$(id -u):$(id -g)\" \"${BUILD_DIR}\"\n"
  },
  {
    "path": ".gitattributes",
    "content": "/docs/root/version_history/current.rst merge=union\n/api/envoy/**/v4alpha/* linguist-generated=true\n/generated_api_shadow/envoy/** linguist-generated=true\n/generated_api_shadow/bazel/** linguist-generated=true\n*.svg binary\n/test/**/*_corpus/* linguist-generated=true\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "blank_issues_enabled: false\ncontact_links:\n  - name: \"Crash bug\"\n    url: https://github.com/envoyproxy/envoy/security/policy\n    about: \"Please file any crash bug with envoy-security@googlegroups.com.\"\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: enhancement,triage\nassignees: ''\n\n---\n\n*Title*: *One line description*\n\n*Description*:\n>Describe the the desired behavior, what scenario it enables and how it\nwould be used.\n\n[optional *Relevant Links*:]\n>Any extra documentation required to understand the issue.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/non--crash-security--bug.md",
    "content": "---\nname: Non-{crash,security} bug\nabout: Bugs which are not crashes, DoS or other security issue\ntitle: ''\nlabels: bug,triage\nassignees: ''\n\n---\n\n**If you are reporting *any* crash or *any* potential security issue, *do not*\nopen an issue in this repo. Please report the issue via emailing\nenvoy-security@googlegroups.com where the issue will be triaged appropriately.**\n\n*Title*: *One line description*\n\n*Description*:\n>What issue is being seen? Describe what should be happening instead of\nthe bug, for example: Envoy should not crash, the expected value isn't\nreturned, etc.\n\n*Repro steps*:\n> Include sample requests, environment, etc. All data and inputs\nrequired to reproduce the bug.\n\n>**Note**: The [Envoy_collect tool](https://github.com/envoyproxy/envoy/blob/master/tools/envoy_collect/README.md)\ngathers a tarball with debug logs, config and the following admin\nendpoints: /stats, /clusters and /server_info. Please note if there are\nprivacy concerns, sanitize the data prior to sharing the tarball/pasting.\n\n*Admin and Stats Output*:\n>Include the admin output for the following endpoints: /stats,\n/clusters, /routes, /server_info. For more information, refer to the\n[admin endpoint documentation.](https://www.envoyproxy.io/docs/envoy/latest/operations/admin)\n\n>**Note**: If there are privacy concerns, sanitize the data prior to\nsharing.\n\n*Config*:\n>Include the config used to configure Envoy.\n\n*Logs*:\n>Include the access logs and the Envoy logs.\n\n>**Note**: If there are privacy concerns, sanitize the data prior to\nsharing.\n\n*Call Stack*:\n> If the Envoy binary is crashing, a call stack is **required**.\nPlease refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution).\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/other.md",
    "content": "---\nname: Other\nabout: Questions, design proposals, tech debt, etc.\ntitle: ''\nlabels: triage\nassignees: ''\n\n---\n\n**If you are reporting *any* crash or *any* potential security issue, *do not*\nopen an issue in this repo. Please report the issue via emailing\nenvoy-security@googlegroups.com where the issue will be triaged appropriately.**\n\n*Title*: *One line description*\n\n*Description*:\n>Describe the issue.\n\n[optional *Relevant Links*:]\n>Any extra documentation required to understand the issue.\n"
  },
  {
    "path": ".github/stale.yml",
    "content": "# Configuration for probot-stale - https://github.com/probot/stale\n\n# General configuration\n# Label to use when marking as stale\nstaleLabel: stale\n\n# Pull request specific configuration\npulls:\n  # Number of days of inactivity before an Issue or Pull Request becomes stale\n  daysUntilStale: 7\n  # Number of days of inactivity before a stale Issue or Pull Request is closed.\n  # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.\n  daysUntilClose: 7\n  # Comment to post when marking as stale. Set to `false` to disable\n  markComment: >\n    This pull request has been automatically marked as stale because it has not had\n    activity in the last 7 days. It will be closed in 7 days if no further activity occurs. Please\n    feel free to give a status update now, ping for review, or re-open when it's ready.\n    Thank you for your contributions!\n  # Comment to post when closing a stale Issue or Pull Request.\n  closeComment: >\n     This pull request has been automatically closed because it has not had\n     activity in the last 14 days. Please feel free to give a status update now, ping for review, or re-open when it's ready.\n     Thank you for your contributions!\n  # Limit the number of actions per hour, from 1-30. Default is 30\n  limitPerRun: 1\n  exemptLabels:\n    - no stalebot\n\n# Issue specific configuration\nissues:\n  # TODO: Consider increasing the limitPerRun once we are satisfied with the bot's performance\n  limitPerRun: 1\n  daysUntilStale: 30\n  daysUntilClose: 7\n  markComment: >\n    This issue has been automatically marked as stale because it has not had activity in the\n    last 30 days. It will be closed in the next 7 days unless it is tagged \"help wanted\" or other activity\n    occurs. Thank you for your contributions.\n  closeComment: >\n    This issue has been automatically closed because it has not had activity in the\n    last 37 days. If this issue is still valid, please ping a maintainer and ask them to label it as \"help wanted\".\n    Thank you for your contributions.\n  exemptLabels:\n    - help wanted\n    - no stalebot\n"
  },
  {
    "path": ".github/workflows/codeql-daily.yml",
    "content": "on:\n  schedule:\n    - cron: '0 12 * * 4'\n\njobs:\n  CodeQL-Build:\n\n    strategy:\n      fail-fast: false\n\n    # CodeQL runs on ubuntu-latest and windows-latest\n    runs-on: ubuntu-latest\n\n    steps:\n    - name: Checkout repository\n      uses: actions/checkout@v2\n      with:\n        # We must fetch at least the immediate parents so that if this is\n        # a pull request then we can checkout the head.\n        fetch-depth: 2\n\n    # If this run was triggered by a pull request event, then checkout\n    # the head of the pull request instead of the merge commit.\n    - run: git checkout HEAD^2\n      if: ${{ github.event_name == 'pull_request' }}\n\n    # Initializes the CodeQL tools for scanning.\n    - name: Initialize CodeQL\n      uses: github/codeql-action/init@v1\n      # Override language selection by uncommenting this and choosing your languages\n      with:\n         languages: cpp\n\n    - name: Install deps\n      shell: bash\n      run: |\n       sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1\n       mkdir -p bin/clang10\n       cd bin/clang10\n       wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz                                                                        \n       tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1  \n       export PATH=bin/clang10/bin:$PATH\n\n    - name: Build\n      run: |\n       bazel/setup_clang.sh bin/clang10\n       bazelisk shutdown\n       bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ //source/common/http/... \n\n    - name: Clean Artifacts\n      run: |\n        git clean -xdf\n       \n    - name: Perform CodeQL Analysis\n      uses: github/codeql-action/analyze@v1\n"
  },
  {
    "path": ".github/workflows/codeql-push.yml",
    "content": "on:\n  push:\n    paths:\n    - 'source/common/**'\n  pull_request:\n\njobs:\n  CodeQL-Build:\n\n    strategy:\n      fail-fast: false\n\n    # CodeQL runs on ubuntu-latest and windows-latest\n    runs-on: ubuntu-latest\n\n    steps:\n    - name: Checkout repository\n      uses: actions/checkout@v2\n      with:\n        # We must fetch at least the immediate parents so that if this is\n        # a pull request then we can checkout the head.\n        fetch-depth: 2\n\n    - name: Get build targets\n      run: |\n       . .github/workflows/get_build_targets.sh\n       echo ::set-env name=BUILD_TARGETS::$(echo $BUILD_TARGETS_LOCAL)\n    # If this run was triggered by a pull request event, then checkout\n    # the head of the pull request instead of the merge commit.\n    - run: git checkout HEAD^2\n      if: ${{ github.event_name == 'pull_request' }}\n\n    # Initializes the CodeQL tools for scanning.\n    - name: Initialize CodeQL\n      uses: github/codeql-action/init@v1\n      # Override language selection by uncommenting this and choosing your languages\n      with:\n         languages: cpp\n\n    - name: Install deps\n      shell: bash\n      run: |\n       sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1\n       mkdir -p bin/clang10\n       cd bin/clang10\n       wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz\n       tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1\n       export PATH=bin/clang10/bin:$PATH\n\n    - name: Build\n      run: |\n       bazel/setup_clang.sh bin/clang10\n       bazelisk shutdown\n       bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ $BUILD_TARGETS\n       echo -e \"Built targets...\\n$BUILD_TARGETS\"\n\n    - name: Clean Artifacts\n      run: |\n        git clean -xdf\n       \n    - name: Perform CodeQL Analysis\n      if: env.BUILD_TARGETS != ''\n      uses: github/codeql-action/analyze@v1\n"
  },
  {
    "path": ".github/workflows/get_build_targets.sh",
    "content": "#!/bin/bash\n\n# This limits the directory that bazel query is going to search under.\nreadonly SEARCH_FOLDER=\"//source/common/...\"\n\nset -e -o pipefail\n\nfunction get_targets() {\n  # Comparing the PR HEAD with the upstream master HEAD.\n  git diff --name-only HEAD FETCH_HEAD | while IFS= read -r line\n  do\n    # Only targets under those folders.\n    case \"$line\" in\n      source/*|include/*)\n        bazel query \"rdeps($SEARCH_FOLDER, $line, 1)\" 2>/dev/null\n        ;;\n    esac\n    # This chain of commands from left to right are:\n    # 1. Excluding the redundant .cc/.h targets that bazel query emits.\n    # 2. Storing only the unique output.\n    # 3. Limiting to the first 10 targets.\n  done | grep -v '\\.cc\\|\\.h' | sort -u | head -n 10\n}\n\n# Fetching the upstream HEAD to compare with and stored in FETCH_HEAD.\ngit fetch https://github.com/envoyproxy/envoy.git master 2>/dev/null\n\nexport BUILD_TARGETS_LOCAL=$(echo $(get_targets))\n"
  },
  {
    "path": ".gitignore",
    "content": "/bazel-*\nBROWSE\n/build\n/build_*\n*.bzlc\n.cache\n.clangd\n.classpath\n.clwb/\n/ci/bazel-*\ncompile_commands.json\ncscope.*\n.deps\n.devcontainer.json\n/docs/landing_source/.bundle\n/generated\n.idea/\n.project\n*.pyc\n**/pyformat\nSOURCE_VERSION\n.settings/\n*.sw*\ntags\nTAGS\n/test/coverage/BUILD\n/tools/spelling/.aspell.en.pws\n.vimrc\n.vs\n.vscode\nclang-tidy-fixes.yaml\n.gdb_history\nclang.bazelrc\nuser.bazelrc\nCMakeLists.txt\ncmake-build-debug\n/linux\nbazel.output.txt\n*~\n"
  },
  {
    "path": ".zuul/playbooks/envoy-build/run.yaml",
    "content": "- hosts: all\n  become: yes\n  roles:\n    - role: config-gcc\n      gcc_version: 7\n    - role: config-bazel\n      bazel_version: 0.28.1\n  tasks:\n    - name: Build envoy\n      shell:\n        cmd: |\n          apt update\n          apt-get update\n          apt-get install -y \\\n             libtool \\\n             cmake \\\n             automake \\\n             autoconf \\\n             make \\\n             ninja-build \\\n             curl \\\n             unzip \\\n             virtualenv\n\n          bazel build //source/exe:envoy-static | tee $LOGS_PATH//bazel.txt\n\n          cp -r ./bazel-bin $RESULTS_PATH\n        chdir: '{{ zuul.project.src_dir }}'\n        executable: /bin/bash\n      environment: '{{ global_env }}'\n"
  },
  {
    "path": ".zuul.yaml",
    "content": "- project:\n    name: envoyproxy/envoy\n    check:\n      jobs:\n        - envoy-build-arm64\n\n- job:\n    name: envoy-build-arm64\n    parent: init-test\n    description: |\n      Envoy build in openlab cluster.\n    run: .zuul/playbooks/envoy-build/run.yaml\n    nodeset: ubuntu-xenial-arm64\n    voting: false\n"
  },
  {
    "path": "BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nexports_files([\n    \"VERSION\",\n    \".clang-format\",\n])\n\n# These two definitions exist to help reduce Envoy upstream core code depending on extensions.\n# To avoid visibility problems, see notes in source/extensions/extensions_build_config.bzl\n#\n# TODO(#9953) //test/config_test:__pkg__ should probably be split up and removed.\n# TODO(#9953) the config fuzz tests should be moved somewhere local and //test/config_test and //test/server removed.\npackage_group(\n    name = \"extension_config\",\n    packages = [\n        \"//source/exe\",\n        \"//source/extensions/...\",\n        \"//test/config_test\",\n        \"//test/extensions/...\",\n        \"//test/server\",\n        \"//test/server/config_validation\",\n    ],\n)\n\npackage_group(\n    name = \"extension_library\",\n    packages = [\n        \"//source/extensions/...\",\n        \"//test/extensions/...\",\n    ],\n)\n"
  },
  {
    "path": "CODEOWNERS",
    "content": "# TODO(zuercher): determine how we want to deal with auto-assignment\n# By default, @envoyproxy/maintainers own everything.\n#*       @envoyproxy/maintainers\n\n# api\n/api/ @envoyproxy/api-shepherds\n# access loggers\n/*/extensions/access_loggers/common @auni53 @zuercher\n# compression extensions\n/*/extensions/compression/common/compressor @rojkov @junr03\n/*/extensions/compression/gzip/compressor @rojkov @junr03\n# csrf extension\n/*/extensions/filters/http/csrf @dschaller @mattklein123\n# original_src http filter extension\n/*/extensions/filters/http/original_src @snowp @klarose\n# original_src listener filter extension\n/*/extensions/filters/listener/original_src @snowp @klarose\n# original_src common extension\nextensions/filters/common/original_src @snowp @klarose\n# dubbo_proxy extension\n/*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan\n# rocketmq_proxy extension\n/*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan\n# thrift_proxy extension\n/*/extensions/filters/network/thrift_proxy @zuercher @rgs1\n# cdn_loop extension\n/*/extensions/filters/http/cdn_loop @justin-mp @penguingao @alyssawilk\n# compressor used by http compression filters\n/*/extensions/filters/http/common/compressor @gsagula @rojkov @dio\n/*/extensions/filters/http/compressor @rojkov @dio\n# jwt_authn http filter extension\n/*/extensions/filters/http/jwt_authn @qiwzhang @lizan\n# grpc_http1_reverse_bridge http filter extension\n/*/extensions/filters/http/grpc_http1_reverse_bridge @snowp @zuercher\n# header_to_metadata extension\n/*/extensions/filters/http/header_to_metadata @rgs1 @zuercher\n# alts transport socket extension\n/*/extensions/transport_sockets/alts @htuch @yangminzhu\n# tls transport socket extension\n/*/extensions/transport_sockets/tls @PiotrSikora @lizan\n# proxy protocol socket extension\n/*/extensions/transport_sockets/proxy_protocol @alyssawilk @wez470\n# common transport socket\n/*/extensions/transport_sockets/common @alyssawilk @wez470\n# sni_cluster extension\n/*/extensions/filters/network/sni_cluster @rshriram @lizan\n# sni_dynamic_forward_proxy extension\n/*/extensions/filters/network/sni_dynamic_forward_proxy @rshriram @lizan\n# tracers.datadog extension\n/*/extensions/tracers/datadog @cgilmour @palazzem @mattklein123\n# tracers.xray extension\n/*/extensions/tracers/xray @marcomagdy @lavignes @mattklein123\n# mysql_proxy extension\n/*/extensions/filters/network/mysql_proxy @rshriram @venilnoronha @mattklein123\n# postgres_proxy extension\n/*/extensions/filters/network/postgres_proxy @fabriziomello @cpakulski @dio\n# quic extension\n/*/extensions/quic_listeners/ @alyssawilk @danzh2010 @mattklein123 @mpwarres @wu-bin\n# zookeeper_proxy extension\n/*/extensions/filters/network/zookeeper_proxy @rgs1 @snowp\n# redis cluster extension\n/*/extensions/clusters/redis @msukalski @henryyyang @mattklein123\n/*/extensions/common/redis @msukalski @henryyyang @mattklein123\n# dynamic forward proxy\n/*/extensions/clusters/dynamic_forward_proxy @mattklein123 @alyssawilk\n/*/extensions/common/dynamic_forward_proxy @mattklein123 @alyssawilk\n/*/extensions/filters/http/dynamic_forward_proxy @mattklein123 @alyssawilk\n# omit_canary_hosts retry predicate\n/*/extensions/retry/host/omit_canary_hosts @sriduth @snowp\n# HTTP caching extension\n/*/extensions/filters/http/cache @toddmgreer @jmarantz\n# aws_iam grpc credentials\n/*/extensions/grpc_credentials/aws_iam @lavignes @mattklein123\n/*/extensions/common/aws @lavignes @mattklein123\n# adaptive concurrency limit extension.\n/*/extensions/filters/http/adaptive_concurrency @tonya11en @mattklein123\n# admission control extension.\n/*/extensions/filters/http/admission_control @tonya11en @mattklein123\n# http inspector\n/*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan\n# attribute context\n/*/extensions/filters/common/expr @kyessenov @yangminzhu @lizan\n# webassembly access logger extensions\n/*/extensions/access_loggers/wasm @jplevyak @PiotrSikora @lizan\n# webassembly bootstrap extensions\n/*/extensions/bootstrap/wasm @jplevyak @PiotrSikora @lizan\n# webassembly http extensions\n/*/extensions/filters/http/wasm @jplevyak @PiotrSikora @lizan\n# webassembly network extensions\n/*/extensions/filters/network/wasm @jplevyak @PiotrSikora @lizan\n# webassembly common extension\n/*/extensions/common/wasm @jplevyak @PiotrSikora @lizan\n# common matcher\n/*/extensions/common/matcher @mattklein123 @yangminzhu\n# common crypto extension\n/*/extensions/common/crypto @lizan @PiotrSikora @bdecoste\n/*/extensions/common/proxy_protocol @alyssawilk @wez470\n/*/extensions/common/sqlutils @cpakulski @dio\n/*/extensions/filters/http/grpc_http1_bridge @snowp @jose\n/*/extensions/filters/http/gzip @gsagula @dio\n/*/extensions/filters/http/fault @rshriram @alyssawilk\n/*/extensions/filters/common/fault @rshriram @alyssawilk\n/*/extensions/filters/http/grpc_json_transcoder @qiwzhang @lizan\n/*/extensions/filters/http/router @alyssawilk @mattklein123 @snowp\n/*/extensions/filters/http/ext_authz @gsagula @dio\n/*/extensions/filters/http/grpc_web @fengli79 @lizan\n/*/extensions/filters/http/grpc_stats @kyessenov @lizan\n/*/extensions/filters/http/squash @yuval-k @alyssawilk\n/*/extensions/filters/common/ext_authz @gsagula @dio\n/*/extensions/filters/common/original_src @klarose @snowp\n/*/extensions/filters/listener/tls_inspector @piotrsikora @htuch\n/*/extensions/grpc_credentials/example @wozz @htuch\n/*/extensions/grpc_credentials/file_based_metadata @wozz @htuch\n/*/extensions/internal_redirect @alyssawilk @penguingao\n/*/extensions/stat_sinks/dog_statsd @taiki45 @jmarantz\n/*/extensions/stat_sinks/hystrix @trabetti @jmarantz\n/*/extensions/stat_sinks/metrics_service @ramaraochavali @jmarantz\n# webassembly stat-sink extensions\n/*/extensions/stat_sinks/wasm @Aakash2017 @jplevyak @lizan\n/*/extensions/resource_monitors/injected_resource @eziskind @htuch\n/*/extensions/resource_monitors/common @eziskind @htuch\n/*/extensions/resource_monitors/fixed_heap @eziskind @htuch\n/*/extensions/retry/priority @snowp @alyssawilk\n/*/extensions/retry/priority/previous_priorities @snowp @alyssawilk\n/*/extensions/retry/host @snowp @alyssawilk\n/*/extensions/filters/network/http_connection_manager @alyssawilk @mattklein123\n/*/extensions/filters/network/ext_authz @gsagula @dio\n/*/extensions/filters/network/tcp_proxy @alyssawilk @zuercher\n/*/extensions/filters/network/echo @htuch @alyssawilk\n/*/extensions/filters/udp/dns_filter @abaptiste @mattklein123\n/*/extensions/filters/network/direct_response @kyessenov @zuercher\n/*/extensions/filters/udp/udp_proxy @mattklein123 @danzh2010\n/*/extensions/clusters/aggregate @yxue @snowp\n# support for on-demand VHDS requests\n/*/extensions/filters/http/on_demand @dmitri-d @htuch @lambdai\n/*/extensions/filters/network/local_ratelimit @mattklein123 @junr03\n/*/extensions/filters/http/aws_request_signing @rgs1 @derekargueta @mattklein123 @marcomagdy\n/*/extensions/filters/http/aws_lambda @mattklein123 @marcomagdy @lavignes\n# Compression\n/*/extensions/compression/common @junr03 @rojkov\n/*/extensions/compression/gzip @junr03 @rojkov\n/*/extensions/filters/http/decompressor @rojkov @dio\n# Watchdog Extensions\n/*/extensions/watchdog/profile_action @kbaichoo @antoniovicente\n/*/extensions/watchdog/abort_action @kbaichoo @antoniovicente\n# Core upstream code\nextensions/upstreams/http @alyssawilk @snowp @mattklein123\nextensions/upstreams/http/http @alyssawilk @snowp @mattklein123\nextensions/upstreams/http/tcp @alyssawilk @mattklein123\nextensions/upstreams/http/default @alyssawilk @snowp @mattklein123\n# OAuth2\nextensions/filters/http/oauth2 @rgs1 @derekargueta @snowp\n# HTTP Local Rate Limit\n/*/extensions/filters/http/local_ratelimit @rgs1 @mattklein123\n/*/extensions/filters/common/local_ratelimit @mattklein123 @rgs1\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "## Community Code of Conduct\n\nEnvoy follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "We welcome contributions from the community. Please read the following guidelines carefully to\nmaximize the chances of your PR being merged.\n\n# Communication\n\n* Before starting work on a major feature, please reach out to us via GitHub, Slack,\n  email, etc. We will make sure no one else is already working on it and ask you to open a\n  GitHub issue.\n* A \"major feature\" is defined as any change that is > 100 LOC altered (not including tests), or\n  changes any user-facing behavior. We will use the GitHub issue to discuss the feature and come to\n  agreement. This is to prevent your time being wasted, as well as ours. The GitHub review process\n  for major features is also important so that [organizations with commit access](OWNERS.md) can\n  come to agreement on design. If it is appropriate to write a design document, the document must\n  be hosted either in the GitHub tracking issue, or linked to from the issue and hosted in a\n  world-readable location.\n* Specifically, if the goal is to add a new [extension](REPO_LAYOUT.md#sourceextensions-layout),\n  please read the [extension policy](GOVERNANCE.md#extension-addition-policy).\n* Small patches and bug fixes don't need prior communication.\n\n# Coding style\n\n* See [STYLE.md](STYLE.md)\n\n# Inclusive language policy\n\nThe Envoy community has an explicit goal to be inclusive to all. As such, all PRs must adhere to the\nfollowing guidelines for all code, APIs, and documentation:\n\n* The following words and phrases are not allowed:\n  * *Whitelist*: use allowlist instead.\n  * *Blacklist*: use denylist or blocklist instead.\n  * *Master*: use primary instead.\n  * *Slave*: use secondary or replica instead.\n* Documentation should be written in an inclusive style. The [Google developer\n  documentation](https://developers.google.com/style/inclusive-documentation) contains an excellent\n  reference on this topic.\n* The above policy is not considered definitive and may be amended in the future as industry best\n  practices evolve. Additional comments on this topic may be provided by maintainers during code\n  review.\n\n# Breaking change policy\n\nBoth API and implementation stability are important to Envoy. Since the API is consumed by clients\nbeyond Envoy, it has a distinct set of [versioning guidelines](api/API_VERSIONING.md). Below, we\narticulate the Envoy implementation stability rules, which operate within the context of the API\nversioning guidelines:\n\n* Features may be marked as deprecated in a given versioned API at any point in time, but this may\n  only be done when a replacement implementation and configuration path is available in Envoy on\n  master. Deprecators must implement a conversion from the deprecated configuration to the latest\n  `vNalpha` (with the deprecated field) that Envoy uses internally. A field may be deprecated if\n  this tool would be able to perform the conversion. For example, removing a field to describe\n  HTTP/2 window settings is valid if a more comprehensive HTTP/2 protocol options field is being\n  introduced to replace it. The PR author deprecating the old configuration is responsible for\n  updating all tests and canonical configuration, or guarding them with the\n  `DEPRECATED_FEATURE_TEST()` macro. This will be validated by the `bazel.compile_time_options`\n  target, which will hard-fail when deprecated configuration is used. The majority of tests and\n  configuration for a feature should be expressed in terms of the latest Envoy internal\n  configuration (i.e. `vNalpha`), only a minimal number of tests necessary to validate configuration\n  translation should be guarded via the `DEPRECATED_FEATURE_TEST()` macro.\n* We will delete deprecated configuration across major API versions. E.g. a field marked deprecated\n  in v2 will be removed in v3.\n* Unless the community and Envoy maintainer team agrees on an exception, during the\n  first release cycle after a feature has been deprecated, use of that feature\n  will cause a logged warning, and incrementing the\n  [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime#statistics)\n  `runtime.deprecated_feature_use` stat.\n  During the second release cycle, use of the deprecated configuration will\n  cause a configuration load failure, unless the feature in question is\n  explicitly overridden in\n  [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features)\n  config ([example](configs/using_deprecated_config.v2.yaml)). Finally, following the deprecation\n  of the API major version where the field was first\n  marked deprecated, the entire implementation code will be removed from the Envoy implementation.\n* This policy means that organizations deploying master should have some time to get ready for\n  breaking changes at the next major API version. This is typically a window of at least 12 months\n  or until the organization moves to the next major API version.\n* The breaking change policy also applies to source level extensions (e.g., filters). Code that\n  conforms to the public interface documentation should continue to compile and work within the\n  deprecation window. Within this window, a warning of deprecation should be carefully logged (some\n  features might need rate limiting for logging this). We make no guarantees about code or deployments\n  that rely on undocumented behavior.\n* All deprecations/breaking changes will be clearly listed in the [version history](docs/root/version_history/).\n* High risk deprecations/breaking changes may be announced to the\n  [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list but by default\n  it is expected the multi-phase warn-by-default/fail-by-default is sufficient to warn users to move\n  away from deprecated features.\n\n# Submitting a PR\n\n* Fork the repo.\n* In your local repo, install the git hooks that implement various important pre-commit and\n  pre-push checks:\n\n  ```\n  ./support/bootstrap\n  ```\n\n  Please see [support/README.md](support/README.md) for more information on these hooks.\n\n* Create your PR.\n* Tests will automatically run for you.\n* We will **not** merge any PR that is not passing tests.\n* PRs are expected to have 100% test coverage for added code. This can be verified with a coverage\n  build. If your PR cannot have 100% coverage for some reason please clearly explain why when you\n  open it.\n* Any PR that changes user-facing behavior **must** have associated documentation in [docs](docs) as\n  well as [release notes](docs/root/version_history/current.rst). API changes should be documented\n  inline with protos as per the [API contribution guidelines](api/CONTRIBUTING.md). If a change applies\n  to multiple sections of the release notes, it should be noted in the first (most important) section\n  that applies. For instance, a bug fix that introduces incompatible behavior should be noted in\n  `Incompatible Behavior Changes` but not in `Bug Fixes`.\n* All code comments and documentation are expected to have proper English grammar and punctuation.\n  If you are not a fluent English speaker (or a bad writer ;-)) please let us know and we will try\n  to find some help but there are no guarantees.\n* Your PR title should be descriptive, and generally start with a subsystem name followed by a\n  colon. Examples:\n  * \"docs: fix grammar error\"\n  * \"http conn man: add new feature\"\n* Your PR commit message will be used as the commit message when your PR is merged. You should\n  update this field if your PR diverges during review.\n* Your PR description should have details on what the PR does. If it fixes an existing issue it\n  should end with \"Fixes #XXX\".\n* If your PR is co-authored or based on an earlier PR from another contributor,\n  please attribute them with `Co-authored-by: name <name@example.com>`. See\n  GitHub's [multiple author\n  guidance](https://help.github.com/en/github/committing-changes-to-your-project/creating-a-commit-with-multiple-authors)\n  for further details.\n* When all of the tests are passing and all other conditions described herein are satisfied, a\n  maintainer will be assigned to review and merge the PR.\n* Once you submit a PR, *please do not rebase it*. It's much easier to review if subsequent commits\n  are new commits and/or merges. We squash rebase the final merged commit so the number of commits\n  you have in the PR don't matter.\n* We expect that once a PR is opened, it will be actively worked on until it is merged or closed.\n  We reserve the right to close PRs that are not making progress. This is generally defined as no\n  changes for 7 days. Obviously PRs that are closed due to lack of activity can be reopened later.\n  Closing stale PRs helps us to keep on top of all of the work currently in flight.\n* If a commit deprecates a feature, the commit message must mention what has been deprecated.\n  Additionally, the [version history](docs/root/version_history/current.rst) must be updated with\n  relevant RST links for fields and messages as part of the commit.\n* Please consider joining the [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev)\n  mailing list.\n* If your PR involves any changes to\n  [envoy-filter-example](https://github.com/envoyproxy/envoy-filter-example) (for example making a new\n  branch so that CI can pass) it is your responsibility to follow through with merging those\n  changes back to master once the CI dance is done.\n* If your PR is a high risk change, the reviewer may ask that you runtime guard\n  it. See the section on runtime guarding below.\n\n\n# Runtime guarding\n\nSome changes in Envoy are deemed worthy of runtime guarding. Instead of just replacing\nold code with new code, both code paths are supported for between one Envoy release (if it is\nguarded due to performance concerns) and a full deprecation cycle (if it is a high risk behavioral\nchange). Generally as a community we try to guard both high risk changes (major\nrefactors such as replacing Envoy's buffer implementation) and most user-visible\nnon-config-guarded changes to protocol processing (for example additions or changes to HTTP headers or\nhow HTTP is serialized out) for non-alpha features. Feel free to tag @envoyproxy/maintainers\nif you aren't sure if a given change merits runtime guarding.\n\nThe canonical way to runtime guard a feature is\n```\nif (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.my_feature_name\")) {\n  [new code path]\n} else {\n  [old_code_path]\n}\n```\nRuntime guarded features named with the \"envoy.reloadable_features.\" prefix must be safe to flip\ntrue or false on running Envoy instances. In some situations it may make more sense to\nlatch the value in a member variable on class creation, for example:\n\n```\nbool use_new_code_path_ =\n    Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.my_feature_name\")\n```\n\nThis should only be done if the lifetime of the object in question is relatively short compared to\nthe lifetime of most Envoy instances, i.e. latching state on creation of the\nHttp::ConnectionManagerImpl or all Network::ConnectionImpl classes, to ensure that the new behavior\nwill be exercised as the runtime value is flipped, and that the old behavior will trail off over\ntime.\n\nRuntime guarded features may either set true (running the new code by default) in the initial PR,\nafter a testing interval, or during the next release cycle, at the PR author's and reviewing\nmaintainer's discretion. Generally all runtime guarded features will be set true when a\nrelease is cut. Old code paths for refactors can be cleaned up after a release and there has been\nsome production run time. Old code for behavioral changes will be deprecated after six months.\nRuntime features are set true by default by inclusion in\n[source/common/runtime/runtime_features.cc](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.cc)\n\nThere are four suggested options for testing new runtime features:\n\n1. Create a per-test Runtime::LoaderSingleton as done in [DeprecatedFieldsTest.IndividualFieldDisallowedWithRuntimeOverride](https://github.com/envoyproxy/envoy/blob/master/test/common/protobuf/utility_test.cc)\n2. Create a [parameterized test](https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#how-to-write-value-parameterized-tests)\n   where the set up of the test sets the new runtime value explicitly to\n   GetParam() as outlined in (1).\n3. Set up integration tests with custom runtime defaults as documented in the\n   [integration test README](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md)\n4. Run a given unit test with the new runtime value explicitly set true or false as done\n   for [runtime_flag_override_test](https://github.com/envoyproxy/envoy/blob/master/test/common/runtime/BUILD)\n\nRuntime code is held to the same standard as regular Envoy code, so both the old\npath and the new should have 100% coverage both with the feature defaulting true\nand false.\n\n# PR review policy for maintainers\n\n* Typically we try to turn around reviews within one business day.\n* See [OWNERS.md](OWNERS.md) for the current list of maintainers.\n* It is generally expected that a senior maintainer should review every PR.\n* It is also generally expected that a \"domain expert\" for the code the PR touches should review the\n  PR. This person does not necessarily need to have commit access.\n* The previous two points generally mean that every PR should have two approvals. (Exceptions can\n  be made by the senior maintainers).\n* The above rules may be waived for PRs which only update docs or comments, or trivial changes to\n  tests and tools (where trivial is decided by the maintainer in question).\n* In general, we should also attempt to make sure that at least one of the approvals is *from an\n  organization different from the PR author.* E.g., if Lyft authors a PR, at least one approver\n  should be from an organization other than Lyft. This helps us make sure that we aren't putting\n  organization specific shortcuts into the code.\n* If there is a question on who should review a PR please discuss in Slack.\n* Anyone is welcome to review any PR that they want, whether they are a maintainer or not.\n* Please make sure that the PR title, commit message, and description are updated if the PR changes\n  significantly during review.\n* Please **clean up the title and body** before merging. By default, GitHub fills the squash merge\n  title with the original title, and the commit body with every individual commit from the PR.\n  The maintainer doing the merge should make sure the title follows the guidelines above and should\n  overwrite the body with the original commit message from the PR (cleaning it up if necessary)\n  while preserving the PR author's final DCO sign-off.\n* If a PR includes a deprecation/breaking change, notification should be sent to the\n  [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list.\n\n# Adding new extensions\n\nFor developers adding a new extension, one can take an existing extension as the starting point.\n\nExtension configuration should be located in a directory structure like\n`api/envoy/extensions/area/plugin/`, for example `api/envoy/extensions/access_loggers/file/`\n\nThe code for the extension should be located under the equivalent\n`source/extensions/area/plugin`, and include an *envoy_cc_extension* with the\nconfiguration and tagged with the appropriate security posture, and an\n*envoy_cc_library* with the code. More details on how to add a new extension\nAPI can be found [here](api/STYLE.md#adding-an-extension-configuration-to-the-api):\n\nOther changes will likely include\n\n  * Editing [source/extensions/extensions_build_config.bzl](source/extensions/extensions_build_config.bzl) to include the new extensions\n  * Editing [docs/root/api-v3/config/config.rst](docs/root/api-v3/config/config.rst) to add area/area\n  * Adding `docs/root/api-v3/config/area/area.rst` to add a table of contents for the API docs\n  * Adding `source/extensions/area/well_known_names.h` for registered plugins\n\n# DCO: Sign your work\n\nEnvoy ships commit hooks that allow you to auto-generate the DCO signoff line if\nit doesn't exist when you run `git commit`. Simply navigate to the Envoy project\nroot and run:\n\n```bash\n./support/bootstrap\n```\n\nFrom here, simply commit as normal, and you will see the signoff at the bottom\nof each commit.\n\nThe sign-off is a simple line at the end of the explanation for the\npatch, which certifies that you wrote it or otherwise have the right to\npass it on as an open-source patch. The rules are pretty simple: if you\ncan certify the below (from\n[developercertificate.org](https://developercertificate.org/)):\n\n```\nDeveloper Certificate of Origin\nVersion 1.1\n\nCopyright (C) 2004, 2006 The Linux Foundation and its contributors.\n660 York Street, Suite 102,\nSan Francisco, CA 94110 USA\n\nEveryone is permitted to copy and distribute verbatim copies of this\nlicense document, but changing it is not allowed.\n\n\nDeveloper's Certificate of Origin 1.1\n\nBy making a contribution to this project, I certify that:\n\n(a) The contribution was created in whole or in part by me and I\n    have the right to submit it under the open source license\n    indicated in the file; or\n\n(b) The contribution is based upon previous work that, to the best\n    of my knowledge, is covered under an appropriate open source\n    license and I have the right under that license to submit that\n    work with modifications, whether created in whole or in part\n    by me, under the same open source license (unless I am\n    permitted to submit under a different license), as indicated\n    in the file; or\n\n(c) The contribution was provided directly to me by some other\n    person who certified (a), (b) or (c) and I have not modified\n    it.\n\n(d) I understand and agree that this project and the contribution\n    are public and that a record of the contribution (including all\n    personal information I submit with it, including my sign-off) is\n    maintained indefinitely and may be redistributed consistent with\n    this project or the open source license(s) involved.\n```\n\nthen you just add a line to every git commit message:\n\n    Signed-off-by: Joe Smith <joe@gmail.com>\n\nusing your real name (sorry, no pseudonyms or anonymous contributions.)\n\nYou can add the sign off when creating the git commit via `git commit -s`.\n\nIf you want this to be automatic you can set up some aliases:\n\n```bash\ngit config --add alias.amend \"commit -s --amend\"\ngit config --add alias.c \"commit -s\"\n```\n\n## Fixing DCO\n\nIf your PR fails the DCO check, it's necessary to fix the entire commit history in the PR. Best\npractice is to [squash](https://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html)\nthe commit history to a single commit, append the DCO sign-off as described above, and [force\npush](https://git-scm.com/docs/git-push#git-push---force). For example, if you have 2 commits in\nyour history:\n\n```bash\ngit rebase -i HEAD^^\n(interactive squash + DCO append)\ngit push origin -f\n```\n\nNote, that in general rewriting history in this way is a hindrance to the review process and this\nshould only be done to correct a DCO mistake.\n\n## Triggering CI re-run without making changes\n\nTo rerun failed tasks in Circle-CI, add a comment with the line\n\n```\n/retest-circle\n```\n\nin it. This should rebuild only the failed tasks.\n\nTo rerun failed tasks in Azure pipelines, add a comment with the line\n\n```\n/retest\n```\n\nin it. This should rebuild only the failed tasks.\n\nSometimes tasks will be stuck in CI and won't be marked as failed, which means\nthe above command won't work. Should this happen, pushing an empty commit should\nre-run all the CI tasks. Consider adding an alias into your `.gitconfig` file:\n\n```\n[alias]\n    kick-ci = !\"git commit -s --allow-empty -m 'Kick CI' && git push\"\n```\n\nOnce you add this alias you can issue the command `git kick-ci` and the PR\nwill be sent back for a retest.\n"
  },
  {
    "path": "DCO",
    "content": "Developer Certificate of Origin\nVersion 1.1\n\nCopyright (C) 2004, 2006 The Linux Foundation and its contributors.\n1 Letterman Drive\nSuite D4700\nSan Francisco, CA, 94129\n\nEveryone is permitted to copy and distribute verbatim copies of this\nlicense document, but changing it is not allowed.\n\n\nDeveloper's Certificate of Origin 1.1\n\nBy making a contribution to this project, I certify that:\n\n(a) The contribution was created in whole or in part by me and I\n    have the right to submit it under the open source license\n    indicated in the file; or\n\n(b) The contribution is based upon previous work that, to the best\n    of my knowledge, is covered under an appropriate open source\n    license and I have the right under that license to submit that\n    work with modifications, whether created in whole or in part\n    by me, under the same open source license (unless I am\n    permitted to submit under a different license), as indicated\n    in the file; or\n\n(c) The contribution was provided directly to me by some other\n    person who certified (a), (b) or (c) and I have not modified\n    it.\n\n(d) I understand and agree that this project and the contribution\n    are public and that a record of the contribution (including all\n    personal information I submit with it, including my sign-off) is\n    maintained indefinitely and may be redistributed consistent with\n    this project or the open source license(s) involved.\n"
  },
  {
    "path": "DEPENDENCY_POLICY.md",
    "content": "# Envoy External Dependency Policy\n\nEnvoy has an evolving policy on external dependencies, tracked at\nhttps://github.com/envoyproxy/envoy/issues/10471. This will become stricter over time, below we\ndetail the policy as it currently applies.\n\n## External dependencies dashboard\n\nThe list of external dependencies in Envoy with their current version is available at\nhttps://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/external_deps\n\n## Declaring external dependencies\n\nIn general, all external dependencies for the Envoy proxy binary build and test should be declared\nin either [bazel/repository_locations.bzl](bazel/repository_locations.bzl) or\n[api/bazel/repository_locations.bzl](api/bazel/repository_locations.bzl), unless listed under\n[policy exceptions](#policy-exceptions).\n\nAn example entry for the `nghttp2` dependency is:\n\n```python\ncom_github_nghttp2_nghttp2 = dict(\n    project_name = \"Nghttp2\",\n    project_desc = \"Implementation of HTTP/2 and its header compression ...\",\n    project_url = \"https://nghttp2.org\",\n    version = \"1.41.0\",\n    sha256 = \"eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8\",\n    strip_prefix = \"nghttp2-{version}\",\n    urls = [\"https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz\"],\n    use_category = [\"dataplane\"],\n    last_updated = \"2020-06-02\",\n    cpe = \"cpe:2.3:a:nghttp2:nghttp2:*\",\n),\n```\n\nDependency declarations must:\n\n* Provide a meaningful project name and URL.\n* State the version in the `version` field. String interpolation should be used in `strip_prefix`\n  and `urls` to reference the version. If you need to reference version `X.Y.Z` as `X_Y_Z`, this\n  may appear in a string as `{underscore_version}`, similarly for `X-Y-Z` you can use\n  `{dash_version}`.\n* Versions should prefer release versions over master branch GitHub SHA tarballs. A comment is\n  necessary if the latter is used. This comment should contain the reason that a non-release\n  version is being used.\n* Provide accurate entries for `use_category`. Please think carefully about whether there are data\n  or control plane implications of the dependency.\n* Reflect the date (YYYY-MM-DD) at which they were last updated in the `last_updated` field. This\n  date is preferably the date at which the PR is created.\n* CPEs are compulsory for all dependencies that are not purely build/test.\n  [CPEs](https://en.wikipedia.org/wiki/Common_Platform_Enumeration) provide metadata that allow us\n  to correlate with related CVEs in dashboards and other tooling, and also provide a machine\n  consumable join key. You can consult the latest [CPE\n  dictionary](https://nvd.nist.gov/products/cpe) to find a CPE for a dependency.`\"N/A\"` should only\n  be used if no CPE for the project is available in the CPE database. CPEs should be _versionless_\n  with a `:*` suffix, since the version can be computed from `version`.\n\nWhen build or test code references Python modules, they should be imported via `pip3_import` in\n[bazel/repositories_extra.bzl](bazel/repositories_extra.bzl). Python modules should not be listed in\n`repository_locations.bzl` entries. `requirements.txt` files for Python dependencies must pin to\nexact versions, e.g. `PyYAML==5.3.1` and ideally also include a [SHA256\nchecksum](https://davidwalsh.name/hashin).\n\nPure developer tooling and documentation builds may reference Python via standalone\n`requirements.txt`, following the above policy.\n\n## New external dependencies\n\n* Any new dependency on the Envoy data or control plane that impacts Envoy core (i.e. is not\n  specific to a single non-core extension) must be cleared with the Envoy security team, please file\n  an issue and tag\n  [@envoyproxy/security-team](https://github.com/orgs/envoyproxy/teams/security-team). While policy\n  is still [evolving](robust_to_untrusted_downstream_and_upstream), criteria that will be used in\n  evaluation include:\n  * Does the project have release versions? How often do releases happen?\n  * Does the project have a security vulnerability disclosure process and contact details?\n  * Does the project have effective governance, e.g. multiple maintainers, a governance policy?\n  * Does the project have a code review culture? Are patches reviewed by independent maintainers\n    prior to merge?\n  * Does the project enable mandatory GitHub 2FA for contributors?\n  * Does the project have evidence of high test coverage, fuzzing, static analysis (e.g. CodeQL),\n    etc.?\n\n* Dependencies for extensions that are tagged as `robust_to_untrusted_downstream` or\n  `robust_to_untrusted_downstream_and_upstream` should be sensitive to the same set of concerns\n  as the core data plane.\n\n## Maintaining existing dependencies\n\nWe rely on community volunteers to help track the latest versions of dependencies. On a best effort\nbasis:\n\n* Core Envoy dependencies will be updated by the Envoy maintainers/security team.\n\n* Extension [CODEOWNERS](CODEOWNERS) should update extension specific dependencies.\n\nWhere possible, we prefer the latest release version for external dependencies, rather than master\nbranch GitHub SHA tarballs.\n\n## Dependency patches\n\nOccasionally it is necessary to introduce an Envoy-side patch to a dependency in a `.patch` file.\nThese are typically applied in [bazel/repositories.bzl](bazel/repositories.bzl). Our policy on this\nis as follows:\n\n* Patch files impede dependency updates. They are expedient at creation time but are a maintenance\n  penalty. They reduce the velocity and increase the effort of upgrades in response to security\n  vulnerabilities in external dependencies.\n\n* No patch will be accepted without a sincere and sustained effort to upstream the patch to the\n  dependency's canonical repository.\n\n* There should exist a plan-of-record, filed as an issue in Envoy or the upstream GitHub tracking\n  elimination of the patch.\n\n* Every patch must have comments at its point-of-use in [bazel/repositories.bzl](bazel/repositories.bzl)\n  providing a rationale and detailing the tracking issue.\n\n## Policy exceptions\n\nThe following dependencies are exempt from the policy:\n\n* Any developer-only facing tooling or the documentation build.\n\n* Transitive build time dependencies, e.g. Go projects vendored into\n  [protoc-gen-validate](https://github.com/envoyproxy/protoc-gen-validate).\n"
  },
  {
    "path": "DEPRECATED.md",
    "content": "# DEPRECATED\n\nThe [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/version_history/version_history)\nfor each version can be found in the official Envoy developer documentation.\n"
  },
  {
    "path": "DEVELOPER.md",
    "content": "# Developer documentation\n\nEnvoy is built using the Bazel build system. CircleCI builds, tests, and runs coverage against all pull requests and the master branch.\n\nTo get started building Envoy locally, see the [Bazel quick start](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers).\nTo run tests, there are Bazel [targets](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#testing-envoy-with-bazel) for Google Test.\nTo generate a coverage report, there is a [coverage build script](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#coverage-builds).\n\nIf you plan to contribute to Envoy, you may find it useful to install the Envoy [development support toolchain](https://github.com/envoyproxy/envoy/blob/master/support/README.md), which helps automate parts of the development process, particularly those involving code review.\n\nBelow is a list of additional documentation to aid the development process:\n\n- [General build and installation documentation](https://www.envoyproxy.io/docs/envoy/latest/install/install)\n\n- [Building and testing Envoy with Bazel](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md)\n\n- [Managing external dependencies with Bazel](https://github.com/envoyproxy/envoy/blob/master/bazel/EXTERNAL_DEPS.md)\n\n- [Guide to Envoy Bazel rules (managing `BUILD` files)](https://github.com/envoyproxy/envoy/blob/master/bazel/DEVELOPER.md)\n\n- [Using Docker for building and testing](https://github.com/envoyproxy/envoy/tree/master/ci)\n\n- [Guide to contributing to Envoy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md)\n\n- [Overview of Envoy's testing frameworks](https://github.com/envoyproxy/envoy/blob/master/test/README.md)\n\n- [Overview of how to write integration tests for new code](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md)\n\n- [Envoy filter example project (how to consume and extend Envoy as a submodule)](https://github.com/envoyproxy/envoy-filter-example)\n\n- [Performance testing Envoy with `tcmalloc`/`pprof`](https://github.com/envoyproxy/envoy/blob/master/bazel/PPROF.md)\n\nAnd some documents on components of Envoy architecture:\n\n- [Envoy flow control](https://github.com/envoyproxy/envoy/blob/master/source/docs/flow_control.md)\n\n- [Envoy's subset load balancer](https://github.com/envoyproxy/envoy/blob/master/source/docs/subset_load_balancer.md)\n\n"
  },
  {
    "path": "EXTENSION_POLICY.md",
    "content": "# Envoy Extension Policy\n\n## Quality requirements\n\nAll extensions contained in the main Envoy repository will be held to the same quality bar as the\ncore Envoy code. This includes coding style, code reviews, test coverage, etc. In the future we\nmay consider creating a sandbox repository for extensions that are not compiled/tested by default\nand held to a lower quality standard, but that is out of scope currently.\n\n## Adding new extensions\n\nThe following procedure will be used when proposing new extensions for inclusion in the repository:\n  1. A GitHub issue should be opened describing the proposed extension as with any major feature\n  proposal.\n  2. All extensions must be sponsored by an existing maintainer. Sponsorship means that the\n  maintainer will shepherd the extension through design/code reviews. Maintainers can self-sponsor\n  extensions if they are going to write them, shepherd them, and maintain them.\n  \n     Sponsorship serves two purposes:\n     * It ensures that the extension will ultimately meet the Envoy quality bar.\n     * It makes sure that incentives are aligned and that extensions are not added to the repo without\n     sufficient thought put into future maintenance.\n\n     *If sponsorship cannot be found from an existing maintainer, an organization can consider\n     [doing the work to become a maintainer](./GOVERNANCE.md#process-for-becoming-a-maintainer) in\n     order to be able to self-sponsor extensions.*\n  \n  3. Each extension must have two reviewers proposed for reviewing PRs to the extension. Neither of\n  the reviewers must be a senior maintainer. Existing maintainers (including the sponsor) and other\n  contributors can count towards this number. The initial reviewers will be codified in the\n  [CODEOWNERS](./CODEOWNERS) file for long term maintenance. These reviewers can be swapped out as\n  needed.\n  4. Any extension added via this process becomes a full part of the repository. This means that any\n  API breaking changes in the core code will be automatically fixed as part of the normal PR process\n  by other contributors.\n  5. Any new dependencies added for this extension must comply with\n  [DEPENDENCY_POLICY.md](DEPENDENCY_POLICY.md), please follow the steps detailed there.\n\n## Removing existing extensions\n\nAs stated in the previous section, once an extension becomes part of the repository it will be\nmaintained by the collective set of Envoy contributors as needed.\n\nHowever, if an extension has known issues that are not being rectified by the original sponsor and\nreviewers or new contributors that are willing to step into the role of extension owner, a\n[vote of the maintainers](./GOVERNANCE.md#conflict-resolution-and-voting) can be called to remove the\nextension from the repository.\n\n## Extension pull request reviews\n\nExtension PRs must not modify core Envoy code. In the event that an extension requires changes to core\nEnvoy code, those changes should be submitted as a separate PR and will undergo the normal code review\nprocess, as documented in the [contributor's guide](./CONTRIBUTING.md).\n\nExtension PRs must be approved by at least one sponsoring maintainer and an extension reviewer. These\nmay be a single individual, but it is always preferred to have multiple reviewers when feasible.\n\nIn the event that the Extension PR author is a sponsoring maintainer and no other sponsoring maintainer\nis available, another maintainer may be enlisted to perform a minimal review for style and common C++\nanti-patterns. The Extension PR must still be approved by a non-maintainer reviewer.\n\n## Extension stability and security posture\n\nEvery extension is expected to be tagged with a `status` and `security_posture` in its\n`envoy_cc_extension` rule.\n\nThe `status` is one of:\n* `stable`: The extension is stable and is expected to be production usable. This is the default if\n  no `status` is specified.\n* `alpha`: The extension is functional but has not had substantial production burn time, use only\n  with this caveat.\n* `wip`: The extension is work-in-progress. Functionality is incomplete and it is not intended for\n  production use.\n\nThe extension status may be adjusted by the extension [CODEOWNERS](./CODEOWNERS) and/or Envoy\nmaintainers based on an assessment of the above criteria. Note that the status of the extension\nreflects the implementation status. It is orthogonal to the API stability, for example, an extension\nwith configuration `envoy.foo.v3alpha.Bar` might have a `stable` implementation and\n`envoy.foo.v3.Baz` can have a `wip` implementation.\n\nThe `security_posture` is one of:\n* `robust_to_untrusted_downstream`: The extension is hardened against untrusted downstream traffic. It\n   assumes that the upstream is trusted.\n* `robust_to_untrusted_downstream_and_upstream`: The extension is hardened against both untrusted\n   downstream and upstream traffic.\n* `requires_trusted_downstream_and_upstream`: The extension is not hardened and should only be used in deployments\n   where both the downstream and upstream are trusted.\n* `unknown`: This is functionally equivalent to `requires_trusted_downstream_and_upstream`, but acts\n  as a placeholder to allow us to identify extensions that need classifying.\n* `data_plane_agnostic`: Not relevant to data plane threats, e.g. stats sinks.\n \nAn assessment of a robust security posture for an extension is subject to the following guidelines:\n\n* Does the extension have fuzz coverage? If it's only receiving fuzzing\n  courtesy of the generic listener/network/HTTP filter fuzzers, does it have a\n  dedicated fuzzer for any parts of the code that would benefit?\n* Does the extension have unbounded internal buffering? Does it participate in\n  flow control via watermarking as needed?\n* Does the extension have at least one deployment with live untrusted traffic\n  for a period of time, N months?\n* Does the extension rely on dependencies that meet our [extension maturity\n  model](https://github.com/envoyproxy/envoy/issues/10471)?\n* Is the extension reasonable to audit by Envoy security team?\n* Is the extension free of obvious scary things, e.g. `memcpy`, does it have gnarly parsing code, etc?\n* Does the extension have active [CODEOWNERS](CODEOWNERS) who are willing to\n  vouch for the robustness of the extension?\n* Is the extension absent a [low coverage\n  exception](https://github.com/envoyproxy/envoy/blob/master/test/per_file_coverage.sh#L5)?\n\nThe current stability and security posture of all extensions can be seen\n[here](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/threat_model#core-and-extensions).\n"
  },
  {
    "path": "GOVERNANCE.md",
    "content": "# Process for becoming a maintainer\n\n## Your organization is not yet a maintainer\n\n* Express interest to the senior maintainers that your organization is interested in becoming a\n  maintainer. Becoming a maintainer generally means that you are going to be spending substantial\n  time (>25%) on Envoy for the foreseeable future. You should have domain expertise and be extremely\n  proficient in C++. Ultimately your goal is to become a senior maintainer that will represent your\n  organization.\n* We will expect you to start contributing increasingly complicated PRs, under the guidance\n  of the existing senior maintainers.\n* We may ask you to do some PRs from our backlog.\n* As you gain experience with the code base and our standards, we will ask you to do code reviews\n  for incoming PRs (i.e., all maintainers are expected to shoulder a proportional share of\n  community reviews).\n* After a period of approximately 2-3 months of working together and making sure we see eye to eye,\n  the existing senior maintainers will confer and decide whether to grant maintainer status or not.\n  We make no guarantees on the length of time this will take, but 2-3 months is the approximate\n  goal.\n\n## Your organization is currently a maintainer\n\n* First decide whether your organization really needs more people with maintainer access. Valid\n  reasons are \"blast radius\", a large organization that is working on multiple unrelated projects,\n  etc.\n* Contact a senior maintainer for your organization and express interest.\n* Start doing PRs and code reviews under the guidance of your senior maintainer.\n* After a period of 1-2 months the existing senior maintainers will discuss granting \"standard\"\n  maintainer access.\n* \"Standard\" maintainer access can be upgraded to \"senior\" maintainer access after another 1-2\n  months of work and another conference of the existing senior committers.\n\n## Maintainer responsibilities\n\n* Monitor email aliases.\n* Monitor Slack (delayed response is perfectly acceptable).\n* Triage GitHub issues and perform pull request reviews for other maintainers and the community.\n  The areas of specialization listed in [OWNERS.md](OWNERS.md) can be used to help with routing\n  an issue/question to the right person.\n* Triage build issues - file issues for known flaky builds or bugs, and either fix or find someone\n  to fix any master build breakages.\n* During GitHub issue triage, apply all applicable [labels](https://github.com/envoyproxy/envoy/labels)\n  to each new issue. Labels are extremely useful for future issue follow up. Which labels to apply\n  is somewhat subjective so just use your best judgment. A few of the most important labels that are\n  not self explanatory are:\n  * **beginner**: Mark any issue that can reasonably be accomplished by a new contributor with\n    this label.\n  * **help wanted**: Unless it is immediately obvious that someone is going to work on an issue (and\n    if so assign it), mark it help wanted.\n  * **question**: If it's unclear if an issue is immediately actionable, mark it with the\n    question label. Questions are easy to search for and close out at a later time. Questions\n    can be promoted to other issue types once it's clear they are actionable (at which point the\n    question label should be removed).\n* Make sure that ongoing PRs are moving forward at the right pace or closing them.\n* Participate when called upon in the [security release process](SECURITY.md). Note that although\n  this should be a rare occurrence, if a serious vulnerability is found, the process may take up to\n  several full days of work to implement. This reality should be taken into account when discussing\n  time commitment obligations with employers.\n* In general continue to be willing to spend at least 25% of ones time working on Envoy (~1.25\n  business days per week).\n* We currently maintain an \"on-call\" rotation within the maintainers. Each on-call is 1 week.\n  Although all maintainers are welcome to perform all of the above tasks, it is the on-call\n  maintainer's responsibility to triage incoming issues/questions and marshal ongoing work\n  forward. To reiterate, it is *not* the responsibility of the on-call maintainer to answer all\n  questions and do all reviews, but it is their responsibility to make sure that everything is\n  being actively covered by someone.\n* The on-call rotation is tracked at Opsgenie. The calendar is visible\n[here](https://calendar.google.com/calendar/embed?src=d6glc0l5rc3v235q9l2j29dgovh3dn48%40import.calendar.google.com&ctz=America%2FNew_York)\nor you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.com/webapi/webcal/getRecentSchedule?webcalToken=39dd1a892faa8d0d689f889b9d09ae787355ddff894396546726a5a02bac5b26&scheduleId=a3505963-c064-4c97-8865-947dfcb06060)\n\n## Cutting a release\n\n* We do releases every 3 months, at the end of each quarter, as described in the\n  [release schedule](RELEASES.md#release-schedule).\n* Take a look at open issues tagged with the current release, by\n  [searching](https://github.com/envoyproxy/envoy/issues) for\n  \"is:open is:issue milestone:[current milestone]\" and either hold off until\n  they are fixed or bump them to the next milestone.\n* Begin marshalling the ongoing PR flow in this repo. Ask maintainers to hold off merging any\n  particularly risky PRs until after the release is tagged. This is because we aim for master to be\n  at release candidate quality at all times.\n* Do a final check of the [release notes](docs/root/version_history/current.rst):\n  * Make any needed corrections (grammar, punctuation, formatting, etc.).\n  * Check to see if any security/stable version release notes are duplicated in\n    the major version release notes. These should not be duplicated.\n  * If the \"Deprecated\" section is empty, delete it.\n  * Remove the \"Pending\" tags and add dates to the top of the [release notes for this version](docs/root/version_history/current.rst).\n  * Switch the [VERSION](VERSION) from a \"dev\" variant to a final variant. E.g., \"1.6.0-dev\" to\n    \"1.6.0\".\n  * Update the [RELEASES](RELEASES.md) doc with the relevant dates.\n  * Get a review and merge.\n* Wait for tests to pass on [master](https://dev.azure.com/cncf/envoy/_build).\n* Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should\n  start with \"v\" and be followed by the version number. E.g., \"v1.6.0\". **This must match the\n  [VERSION](VERSION).**\n* From the envoy [landing page](https://github.com/envoyproxy/envoy) use the branch drop-down to create a branch\n  from the tagged release, e.g. \"release/v1.6\". It will be used for the\n  [stable releases](RELEASES.md#stable-releases).\n* Monitor the AZP tag build to make sure that the final docker images get pushed along with\n  the final docs. The final documentation will end up in the\n  [envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy).\n* Update the website ([example PR](https://github.com/envoyproxy/envoyproxy.github.io/pull/148)) for the new release.\n* Craft a witty/uplifting email and send it to all the email aliases including envoy-announce@.\n* Make sure we tweet the new release: either have Matt do it or email social@cncf.io and ask them to do an Envoy account\n  post.\n* Do a new PR to setup the next version\n  * Update [VERSION](VERSION) to the next development release. E.g., \"1.7.0-dev\". \n  * `git mv docs/root/version_history/current.rst docs/root/version_history/v1.6.0.rst`, filling in the previous\n    release version number in the filename, and add an entry for the new file in the `toctree` in \n    [version_history.rst](docs/root/version_history/version_history.rst).\n  * Create a new \"current\" version history file at the [release\n  notes](docs/root/version_history/current.rst) for the following version. E.g., \"1.7.0 (pending)\". Use\n  this text as the template for the new file:\n```\n1.7.0 (Pending)\n===============\n\nIncompatible Behavior Changes\n-----------------------------\n*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required*\n\nMinor Behavior Changes\n----------------------\n*Changes that may cause incompatibilities for some users, but should not for most*\n\nBug Fixes\n---------\n*Changes expected to improve the state of the world and are unlikely to have negative effects*\n\nRemoved Config or Runtime\n-------------------------\n*Normally occurs at the end of the* :ref:`deprecation period <deprecated>`\n\nNew Features\n------------\n\nDeprecated\n----------\n```\n* Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh`)\n  to file tracking issues for runtime guarded code which can be removed.\n* Check source/common/runtime/runtime_features.cc and see if any runtime guards in\n  disabled_runtime_features should be reassessed, and ping on the relevant issues.\n\n## When does a maintainer lose maintainer status\n\nIf a maintainer is no longer interested or cannot perform the maintainer duties listed above, they\nshould volunteer to be moved to emeritus status. In extreme cases this can also occur by a vote of\nthe maintainers per the voting process below.\n\n# xDS API shepherds\n\nThe [xDS API shepherds](https://github.com/orgs/envoyproxy/teams/api-shepherds) are responsible for\napproving any PR that modifies the [api/](api/) tree. They ensure that API [style](api/STYLE.md) and\n[versioning](api/API_VERSIONING.md) policies are enforced and that a consistent approach is taken\ntowards API evolution.\n\nThe xDS API shepherds are also the xDS API maintainers; they work collaboratively with the community\nto drive the xDS API roadmap and review major proposed design changes. The API shepherds are\nintended to be representative of xDS client and control plane developers who are actively working on\nxDS development and evolution.\n\nAs with maintainers, an API shepherd should be spending at least 25% of their time working on xDS\ndevelopments and expect to be active in this space in the near future. API shepherds are expected to\ntake on API shepherd review load and participate in meetings. They should be active on Slack `#xds`\nand responsive to GitHub issues and PRs on which they are tagged.\n\nThe API shepherds are distinct to the [UDPA working\ngroup](https://github.com/cncf/udpa/blob/master/README.md), which aims to evolve xDS directionally\ntowards a universal dataplane API. API shepherds are responsible for the execution of the xDS\nday-to-day and guiding xDS implementation changes. Proposals from UDPA-WG will be aligned with the\nxDS API shepherds to ensure that xDS is heading towards the UDPA goal. xDS API shepherds operate\nunder the [envoyproxy](https://github.com/envoyproxy) organization but are expected to keep in mind\nthe needs of all xDS clients (currently Envoy and gRPC, but we are aware of other in-house\nimplementations) and the goals of UDPA-WG.\n\nIf you wish to become an API shepherd and satisfy the above criteria, please contact an existing\nAPI shepherd. We will factor in PR and review history to determine if the above API shepherd\nrequirements are met. We may ask you to shadow an existing API shepherd for a period of time to\nbuild confidence in consistent application of the API guidelines to PRs.\n\n# Extension addition policy\n\nAdding new [extensions](REPO_LAYOUT.md#sourceextensions-layout) has a dedicated policy. Please\nsee [this](./EXTENSION_POLICY.md) document for more information.\n\n# External dependency policy\n\nAdding new external dependencies has a dedicated policy. Please see [this](DEPENDENCY_POLICY.md)\ndocument for more information.\n\n# Conflict resolution and voting\n\nIn general, we prefer that technical issues and maintainer membership are amicably worked out\nbetween the persons involved. If a dispute cannot be decided independently, the maintainers can be\ncalled in to decide an issue. If the maintainers themselves cannot decide an issue, the issue will\nbe resolved by voting. The voting process is a simple majority in which each senior maintainer\nreceives two votes and each normal maintainer receives one vote.\n\n# Adding new projects to the envoyproxy GitHub organization\n\nNew projects will be added to the envoyproxy organization via GitHub issue discussion in one of the\nexisting projects in the organization. Once sufficient discussion has taken place (~3-5 business\ndays but depending on the volume of conversation), the maintainers of *the project where the issue\nwas opened* (since different projects in the organization may have different maintainers) will\ndecide whether the new project should be added. See the section above on voting if the maintainers\ncannot easily decide.\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner].\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License."
  },
  {
    "path": "NOTICE",
    "content": "Envoy\nCopyright 2016-2019 Envoy Project Authors\n\nLicensed under Apache License 2.0.  See LICENSE for terms.\n"
  },
  {
    "path": "OWNERS.md",
    "content": "* See [CONTRIBUTING.md](CONTRIBUTING.md) for general contribution guidelines.\n* See [GOVERNANCE.md](GOVERNANCE.md) for governance guidelines and maintainer responsibilities.\n\nThis page lists all active maintainers and their areas of expertise. This can be used for\nrouting PRs, questions, etc. to the right place.\n\n# Senior maintainers\n\n* Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com)\n  * Catch-all, \"all the things\", and generally trying to make himself obsolete as fast as\n    possible.\n* Harvey Tuch ([htuch](https://github.com/htuch)) (htuch@google.com)\n  * APIs, xDS, UDPA, gRPC, configuration, security, Python, and Bash.\n* Alyssa Wilk ([alyssawilk](https://github.com/alyssawilk)) (alyssar@google.com)\n  * HTTP, flow control, cluster manager, load balancing, and core networking (listeners,\n    connections, etc.).\n* Stephan Zuercher ([zuercher](https://github.com/zuercher)) (zuercher@gmail.com)\n  * Load balancing, upstream clusters and cluster manager, logging, complex HTTP routing\n    (metadata, etc.), and macOS build.\n* Lizan Zhou ([lizan](https://github.com/lizan)) (lizan@tetrate.io)\n  * gRPC, gRPC/JSON transcoding, and core networking (transport socket abstractions), Bazel, build\n    issues, and CI in general.\n* Snow Pettersen ([snowp](https://github.com/snowp)) (aickck@gmail.com)\n  * Upstream, host/priority sets, load balancing, and retry plugins.\n* Greg Greenway ([ggreenway](https://github.com/ggreenway)) (ggreenway@apple.com)\n  * TLS, TCP proxy, listeners, and HTTP proxy/connection pooling.\n\n# Maintainers\n\n* Asra Ali ([asraa](https://github.com/asraa)) (asraa@google.com)\n  * Fuzzing, security, headers, HTTP/gRPC, router, access log, tests.\n* Yan Avlasov ([yanavlasov](https://github.com/yanavlasov)) (yavlasov@google.com)\n  * Data plane, codecs, security, configuration.\n* Jose Nino ([junr03](https://github.com/junr03)) (jnino@lyft.com)\n  * Outlier detection, HTTP routing, xDS, configuration/operational questions.\n* Dhi Aurrahman ([dio](https://github.com/dio)) (dio@tetrate.io)\n  * Lua, access logging, and general miscellany.\n* Joshua Marantz ([jmarantz](https://github.com/jmarantz)) (jmarantz@google.com)\n  * Stats, abseil, scalability, and performance.\n* Antonio Vicente ([antoniovicente](https://github.com/antoniovicente)) (avd@google.com)\n  * Event management, security, performance, data plane.\n\n# Envoy security team\n\n* All maintainers\n* Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com)\n* Tony Allen ([tonya11en](https://github.com/tonya11en)) (tallen@lyft.com)\n\n# Emeritus maintainers\n\n* Constance Caramanolis ([ccaraman](https://github.com/ccaraman)) (ccaramanolis@lyft.com)\n* Roman Dzhabarov ([RomanDzhabarov](https://github.com/RomanDzhabarov)) (rdzhabarov@lyft.com)\n* Bill Gallagher ([wgallagher](https://github.com/wgallagher)) (bgallagher@lyft.com)\n* Dan Noé ([dnoe](https://github.com/dnoe)) (dpn@google.com)\n\n# Friends of Envoy\n\nThis section lists a few people that are not maintainers but typically help out with subject\nmatter expert reviews. Feel free to loop them in as needed.\n\n* Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com)\n  * TLS, BoringSSL, and core networking (listeners, connections, etc.).\n* Shriram Rajagopalan ([rshriram](https://github.com/rshriram)) (shriram@us.ibm.com)\n  * Istio, APIs, HTTP routing, and WebSocket.\n* John Millikin ([jmillikin-stripe](https://github.com/jmillikin-stripe)) (jmillikin@stripe.com)\n  * Bazel/build.\n* Daniel Hochman ([danielhochman](https://github.com/danielhochman)) (dhochman@lyft.com)\n  * Redis, Python, configuration/operational questions.\n* Yuchen Dai ([lambdai](https://github.com/lambdai)) (lambdai@google.com)\n  * v2 xDS, listeners, filter chain discovery service.\n"
  },
  {
    "path": "PULL_REQUESTS.md",
    "content": "When creating an Envoy pull request (PR) the text box will automatically be filled\nin with the basic fields from the [pull request template](PULL_REQUEST_TEMPLATE.md). The following\nis a more detailed explanation of what should go in each field.\n\n### <a name=\"title\"></a>Title\n\nThe title of the PR should brief (one line) noting the subsystem or the aspect this PR applies to and\nexplaining the overall change. Both the component and the explanation must be lower case. For example:\n\n* ci: update build image to 44d539cb\n* docs: fix indent, buffer: add copyOut() method\n* router:add x-envoy-overloaded header\n* tls: add support for specifying TLS session ticket keys\n\n### <a name=\"desc\"></a>Commit Message\n\nThe commit message field should include an explanation of what this PR\ndoes. This will be used as the final commit message that maintainers will use to\npopulate the commit message when merging. If this PR causes a change in behavior\nit should document the behavior before and after. If fixing a bug, please\ndescribe what the original issue is and how the change resolves it. If it is\nconfiguration controlled, it should note how the feature is enabled etc...\n\n\n### <a name=\"desc\"></a>Additional Description\n\nThe additional description field should include information of what this PR does\nthat may be out of scope for a commit message. This could include additional\ninformation or context useful to reviewers.\n\n### <a name=\"risk\"></a>Risk\n\nRisk Level is one of: Low | Medium | High\n\nLow: Small bug fix or small optional feature.\n\nMedium: New features that are not enabled(for example: new filter). Small-medium\nfeatures added to existing components(for example: modification to an existing\nfilter).\n\nHigh: Complicated changes such as flow control, rewrites of critical\ncomponents, etc.\n\nNote: The above is only a rough guide for choosing a level,\nplease ask if you have any concerns about the risk of the PR.\n\n### <a name=\"testing\"></a>Testing\n\nThe testing section should include an explanation of what testing was done, for example: unit test,\nintegration, manual testing, etc.\n\nNote: It isn’t expected to do all forms of testing, please use your best judgement or ask for\nguidance if you are unsure. A good rule of thumb is the riskier the change, the\nmore comprehensive the testing should be.\n\n### <a name=\"docs\"></a>Documentation\n\nIf there are documentation changes, please include a brief description of what they are. Docs\nchanges may be in [docs/root](docs/root) and/or inline with the API protos. Please write in\nN/A if there were no documentation changes.\n\nAny PRs with structural changes to the dataplane should also update the [Life of a\nRequest](docs/root/intro/life_of_a_request.md) documentation as appropriate.\n\n### <a name=\"relnotes\"></a>Release notes\n\nIf this change is user impacting OR extension developer impacting (filter API, etc.) you **must**\nadd a release note to the [version history](docs/root/version_history/current.rst) for the\ncurrent version. Please include any relevant links. Each release note should be prefixed with the\nrelevant subsystem in **alphabetical order** (see existing examples as a guide) and include links\nto relevant parts of the documentation. Thank you! Please write in N/A if there are no release notes.\n\n### <a name=\"runtime_guard\"></a>Runtime guard\n\nIf this PR has a user-visible behavioral change, or otherwise falls under the\nguidelines for runtime guarding in the [contributing doc](CONTRIBUTING.md)\nit should have a runtime guard, which should be documented both in the release\nnotes and here in the PR description.\n\nFor new feature additions guarded by configs, no-op refactors, docs changes etc.\nthis field can be disregarded and/or removed.\n\n### <a name=\"issues\"></a>Issues\n\nIf this PR fixes an outstanding issue, please add a line of the form:\n\nFixes #Issue\n\nThis will result in the linked issue being automatically closed when the PR is\nmerged. If you want to associate an issue with a PR without closing the issue,\nyou may instead just tag the PR with the issue:\n\n\\#Issue\n\n### <a name=\"deprecated\"></a>Deprecated\n\nIf this PR deprecates existing Envoy APIs or code, it should include an update to the deprecated\nsection of the [version history](docs/root/version_history/current.rst) and a one line note in the\nPR description.\n\nIf you mark existing APIs or code as deprecated, when the next release is cut, the\ndeprecation script will create and assign an issue to you for\ncleaning up the deprecated code path.\n"
  },
  {
    "path": "PULL_REQUEST_TEMPLATE.md",
    "content": "<!--\n!!!ATTENTION!!!\n\nIf you are fixing *any* crash or *any* potential security issue, *do not*\nopen a pull request in this repo. Please report the issue via emailing\nenvoy-security@googlegroups.com where the issue will be triaged appropriately.\nThank you in advance for helping to keep Envoy secure.\n\n!!!ATTENTION!!!\n\n-->\nFor an explanation of how to fill out the fields, please see the relevant section\nin [PULL_REQUESTS.md](https://github.com/envoyproxy/envoy/blob/master/PULL_REQUESTS.md)\n\nCommit Message:\nAdditional Description:\nRisk Level:\nTesting:\nDocs Changes:\nRelease Notes:\n[Optional Runtime guard:]\n[Optional Fixes #Issue]\n[Optional Deprecated:]\n"
  },
  {
    "path": "README.md",
    "content": "![Envoy Logo](https://github.com/envoyproxy/artwork/blob/master/PNG/Envoy_Logo_Final_PANTONE.png)\n\n[Cloud-native high-performance edge/middle/service proxy](https://www.envoyproxy.io/)\n\nEnvoy is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF). If you are a\ncompany that wants to help shape the evolution of technologies that are container-packaged,\ndynamically-scheduled and microservices-oriented, consider joining the CNCF. For details about who's\ninvolved and how Envoy plays a role, read the CNCF\n[announcement](https://www.cncf.io/blog/2017/09/13/cncf-hosts-envoy/).\n\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266)\n[![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/11?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=11&branchName=master)\n[![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master)\n[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/envoy.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:envoy)\n[![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/)\n\n## Documentation\n\n* [Official documentation](https://www.envoyproxy.io/)\n* [FAQ](https://www.envoyproxy.io/docs/envoy/latest/faq/overview)\n* [Unofficial Chinese documentation](https://www.servicemesher.com/envoy/)\n* Watch [a video overview of Envoy](https://www.youtube.com/watch?v=RVZX4CwKhGE)\n([transcript](https://www.microservices.com/talks/lyfts-envoy-monolith-service-mesh-matt-klein/))\nto find out more about the origin story and design philosophy of Envoy\n* [Blog](https://medium.com/@mattklein123/envoy-threading-model-a8d44b922310) about the threading model\n* [Blog](https://medium.com/@mattklein123/envoy-hot-restart-1d16b14555b5) about hot restart\n* [Blog](https://medium.com/@mattklein123/envoy-stats-b65c7f363342) about stats architecture\n* [Blog](https://medium.com/@mattklein123/the-universal-data-plane-api-d15cec7a) about universal data plane API\n* [Blog](https://medium.com/@mattklein123/lyfts-envoy-dashboards-5c91738816b1) on Lyft's Envoy dashboards\n\n## Related\n\n* [data-plane-api](https://github.com/envoyproxy/data-plane-api): v2 API definitions as a standalone\n  repository. This is a read-only mirror of [api](api/).\n* [envoy-perf](https://github.com/envoyproxy/envoy-perf): Performance testing framework.\n* [envoy-filter-example](https://github.com/envoyproxy/envoy-filter-example): Example of how to add new filters\n  and link to the main repository.\n\n## Contact\n\n* [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing\n  list where we will email announcements only.\n* [envoy-security-announce](https://groups.google.com/forum/#!forum/envoy-security-announce): Low frequency mailing\n  list where we will email security related announcements only.\n* [envoy-users](https://groups.google.com/forum/#!forum/envoy-users): General user discussion.\n* [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev): Envoy developer discussion (APIs,\n  feature design, etc.).\n* [envoy-maintainers](https://groups.google.com/forum/#!forum/envoy-maintainers): Use this list\n  to reach all core Envoy maintainers.\n* [Twitter](https://twitter.com/EnvoyProxy/): Follow along on Twitter!\n* [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](https://envoyslack.cncf.io).\n  We have the IRC/XMPP gateways enabled if you prefer either of those. Once an account is created,\n  connection instructions for IRC/XMPP can be found [here](https://envoyproxy.slack.com/account/gateways).\n  * NOTE: Response to user questions is best effort on Slack. For a \"guaranteed\" response please email\n    envoy-users@ per the guidance in the following linked thread.\n\nPlease see [this](https://groups.google.com/forum/#!topic/envoy-announce/l9zjYsnS3TY) email thread\nfor information on email list usage.\n\n## Contributing\n\nContributing to Envoy is fun and modern C++ is a lot less scary than you might think if you don't\nhave prior experience. To get started:\n\n* [Contributing guide](CONTRIBUTING.md)\n* [Beginner issues](https://github.com/envoyproxy/envoy/issues?q=is%3Aopen+is%3Aissue+label%3Abeginner)\n* [Build/test quick start using docker](ci#building-and-running-tests-as-a-developer)\n* [Developer guide](DEVELOPER.md)\n* Consider installing the Envoy [development support toolchain](https://github.com/envoyproxy/envoy/blob/master/support/README.md), which helps automate parts of the development process, particularly those involving code review.\n* Please make sure that you let us know if you are working on an issue so we don't duplicate work!\n\n## Community Meeting\n\nThe Envoy team meets twice per month on Tuesday, alternating between 9am PT and 5PM PT. The public\nGoogle calendar is here: https://goo.gl/PkDijT\n\n* Meeting minutes are [here](https://goo.gl/5Cergb)\n* Recorded videos are posted [here](https://www.youtube.com/channel/UCvqbFHwN-nwalWPjPUKpvTA/videos?view=0&sort=dd&shelf_id=1)\n\n## Security\n\n### Security Audit\n\nA third party security audit was performed by Cure53, you can see the full report [here](docs/SECURITY_AUDIT.pdf).\n\n### Reporting security vulnerabilities\n\nIf you've found a vulnerability or a potential vulnerability in Envoy please let us know at\n[envoy-security](mailto:envoy-security@googlegroups.com). We'll send a confirmation\nemail to acknowledge your report, and we'll send an additional email when we've identified the issue\npositively or negatively.\n\nFor further details please see our complete [security release process](SECURITY.md).\n"
  },
  {
    "path": "RELEASES.md",
    "content": "# Release Process\n\n## Active development\n\nActive development is happening on the `master` branch, and a new version is released from it\nat the end of each quarter.\n\n## Stable releases\n\nStable releases of Envoy include:\n\n* Extended maintenance window (any version released in the last 12 months).\n* Security fixes backported from the `master` branch (including those deemed not worthy\n  of creating a CVE).\n* Stability fixes backported from the `master` branch (anything that can result in a crash,\n  including crashes triggered by a trusted control plane).\n* Bugfixes, deemed worthwhile by the maintainers of stable releases.\n\n### Hand-off\n\nHand-off to the maintainers of stable releases happens after Envoy maintainers release a new\nversion from the `master` branch by creating a `vX.Y.0` tag and a corresponding `release/vX.Y`\nbranch, with merge permissions given to the release manager of stable releases, and CI configured\nto execute tests on it.\n\n### Security releases\n\nCritical security fixes are owned by the Envoy security team, which provides fixes for the\n`master` branch, and the latest release branch. Once those fixes are ready, the maintainers\nof stable releases backport them to the remaining supported stable releases.\n\n### Backports\n\nAll other security and reliability fixes can be nominated for backporting to stable releases\nby Envoy maintainers, Envoy security team, the change author, or members of the Envoy community\nby adding the `backport/review` or `backport/approved` label (this can be done using [repokitteh]'s\n`/backport` command). Changes nominated by the change author and/or members of the Envoy community\nare evaluated for backporting on a case-by-case basis, and require approval from either the release\nmanager of stable release, Envoy maintainers, or Envoy security team. Once approved, those fixes\nare backported from the `master` branch to all supported stable branches by the maintainers of\nstable releases. New stable versions from non-critical security fixes are released on a regular\nschedule, initially aiming for the bi-weekly releases.\n\n### Release management\n\nRelease managers of stable releases are responsible for approving and merging backports, tagging\nstable releases and sending announcements about them. This role is rotating on a quarterly basis.\n\n| Quarter |       Release manager        |\n|:-------:|:----------------------------:|\n| 2020 Q1 | Piotr Sikora ([PiotrSikora]) |\n| 2020 Q2 | Piotr Sikora ([PiotrSikora]) |\n| 2020 Q3 | Yuchen Dai ([lambdai])       |\n\n## Release schedule\n\nIn order to accommodate downstream projects, new Envoy releases are produced on a fixed release\nschedule (at the end of each quarter), with an acceptable delay of up to 2 weeks, with a hard\ndeadline of 3 weeks.\n\n| Version |  Expected  |   Actual   | Difference | End of Life |\n|:-------:|:----------:|:----------:|:----------:|:-----------:|\n| 1.12.0  | 2019/09/30 | 2019/10/31 |  +31 days  | 2020/10/31  |\n| 1.13.0  | 2019/12/31 | 2020/01/20 |  +20 days  | 2021/01/20  |\n| 1.14.0  | 2020/03/31 | 2020/04/08 |   +8 days  | 2021/04/08  |\n| 1.15.0  | 2020/06/30 | 2020/07/07 |   +7 days  | 2021/07/07  |\n| 1.16.0  | 2020/09/30 | 2020/10/08 |   +8 days  | 2021/10/08  |\n| 1.17.0  | 2020/12/31 |            |            |             |\n\n\n[repokitteh]: https://github.com/repokitteh\n[PiotrSikora]: https://github.com/PiotrSikora\n"
  },
  {
    "path": "REPO_LAYOUT.md",
    "content": "# Repository layout overview\n\nThis is a high level overview of how the repository is laid out to both aid in code investigation,\nas well as to clearly specify how extensions are added to the repository. The top level directories\nare:\n\n* [.circleci/](.circleci/): Configuration for [CircleCI](https://circleci.com/gh/envoyproxy).\n* [api/](api/): Envoy data plane API.\n* [bazel/](bazel/): Configuration for Envoy's use of [Bazel](https://bazel.build/).\n* [ci/](ci/): Scripts used both during CI as well as to build Docker containers.\n* [configs/](configs/): Example Envoy configurations.\n* [docs/](docs/): End user facing Envoy proxy and data plane API documentation as well as scripts\n  for publishing final docs during releases.\n* [examples/](examples/): Larger Envoy examples using Docker and Docker Compose.\n* [include/](include/): \"Public\" interface headers for \"core\" Envoy. In general,\n  these are almost entirely 100% abstract classes. There are a few cases of not-abstract classes in\n  the \"public\" headers, typically for performance reasons. Note that \"core\" includes some\n  \"extensions\" such as the HTTP connection manager filter and associated functionality which are\n  so fundamental to Envoy that they will likely never be optional from a compilation perspective.\n* [restarter/](restarter/): Envoy's hot restart wrapper Python script.\n* [source/](source/): Source code for core Envoy as well as extensions. The layout of this directory\n  is discussed in further detail below.\n* [support/](support/): Development support scripts (pre-commit Git hooks, etc.)\n* [test/](test/): Test code for core Envoy as well as extensions. The layout of this directory is\n  discussed in further detail below.\n* [tools/](tools/): Miscellaneous tools that have not found a home somewhere else.\n\n## [source/](source/)\n\n* [common/](source/common/): Core Envoy code (not specific to extensions) that is also not\n  specific to a standalone server implementation. I.e., this is the code that could be used if Envoy\n  were eventually embedded as a library.\n* [docs/](source/docs/): Miscellaneous developer/design documentation that is not relevant for\n  the public user documentation.\n* [exe/](source/exe/): Code specific to building the final production Envoy server binary. This is\n  the only code that is not shared by integration and unit tests.\n* [extensions/](source/extensions/): Extensions to the core Envoy code. The layout of this\n  directory is discussed in further detail below.\n* [server/](source/server/): Code specific to running Envoy as a standalone server. E.g,\n  configuration, server startup, workers, etc. Over time, the line between `common/` and `server/`\n  has become somewhat blurred. Use best judgment as to where to place something.\n\n## [test/](test/)\n\nNot every directory within test is described below, but a few highlights:\n\n* Unit tests are found in directories matching their [source/](source/) equivalents. E.g.,\n  [common/](test/common/), [exe/](test/exe/), and [server/](test/server/).\n* Extension unit tests also match their source equivalents in [extensions/](test/extensions/).\n* [integration/](test/integration/) holds end-to-end integration tests using roughly the real\n  Envoy server code, fake downstream clients, and fake upstream servers. Integration tests also\n  test some of the extensions found in the repository. Note that in the future, we would like to\n  allow integration tests that are specific to extensions and are not required for covering\n  \"core\" Envoy functionality. Those integration tests will likely end up in the\n  [extensions/](test/extensions/) directory but further work and thinking is required before\n  we get to that point.\n* [mocks/](test/mocks/) contains mock implementations of all of the core Envoy interfaces found in\n  [include/](include/).\n* Other directories include tooling used for configuration testing, coverage testing, fuzz testing,\n  common test code, etc.\n\n## [source/extensions](source/extensions/) layout\n\nWe maintain a very specific code and namespace layout for extensions. This aids in discovering\ncode/extensions, and also will allow us in the future to more easily scale out our extension\nmaintainers by having OWNERS files specific to certain extensions. (As of this writing, this is not\ncurrently implemented but that is the plan moving forward.)\n\n* All extensions are either registered in [all_extensions.bzl](source/extensions/all_extensions.bzl)\n  or [extensions_build_config.bzl](source/extensions/extensions_build_config.bzl). The former is\n  for extensions that cannot be removed from the primary Envoy build. The latter is for extensions\n  that can be removed on a site specific basis. See [bazel/README.md](bazel/README.md) for how to\n  compile out extensions on a site specific basis. Note that by default extensions should be\n  removable from the build unless there is a very good reason.\n* These are the top level extension directories and associated namespaces:\n  * [access_loggers/](/source/extensions/access_loggers): Access log implementations which use\n    the `Envoy::Extensions::AccessLoggers` namespace.\n  * [filters/http/](/source/extensions/filters/http): HTTP L7 filters which use the\n    `Envoy::Extensions::HttpFilters` namespace.\n  * [filters/listener/](/source/extensions/filters/listener): Listener filters which use the\n    `Envoy::Extensions::ListenerFilters` namespace.\n  * [filters/network/](/source/extensions/filters/network): L4 network filters which use the\n    `Envoy::Extensions::NetworkFilters` namespace.\n  * [grpc_credentials/](/source/extensions/grpc_credentials): Custom gRPC credentials which use the\n    `Envoy::Extensions::GrpcCredentials` namespace.\n  * [health_checker/](/source/extensions/health_checker): Custom health checkers which use the\n    `Envoy::Extensions::HealthCheckers` namespace.\n  * [resolvers/](/source/extensions/resolvers): Network address resolvers which use the\n    `Envoy::Extensions::Resolvers` namespace.\n  * [stat_sinks/](/source/extensions/stat_sinks): Stat sink implementations which use the\n    `Envoy::Extensions::StatSinks` namespace.\n  * [tracers/](/source/extensions/tracers): Tracers which use the\n    `Envoy::Extensions::Tracers` namespace.\n  * [transport_sockets/](/source/extensions/transport_sockets): Transport socket implementations\n    which use the `Envoy::Extensions::TransportSockets` namespace.\n* Each extension is contained wholly in its own namespace. E.g.,\n  `Envoy::Extensions::NetworkFilters::Echo`.\n* Common code that is used by multiple extensions should be in a `common/` directory as close to\n  the extensions as possible. E.g., [filters/common/](/source/extensions/filters/common) for common\n  code that is used by both HTTP and network filters. Common code used only by two HTTP filters\n  would be found in `filters/http/common/`. Common code should be placed in a common namespace.\n  E.g., `Envoy::Extensions::Filters::Common`.\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Security Reporting Process\n\nPlease report any security issue or Envoy crash report to\nenvoy-security@googlegroups.com where the issue will be triaged appropriately.\nThank you in advance for helping to keep Envoy secure.\n\n# Security Release Process\n\nEnvoy is a large growing community of volunteers, users, and vendors. The Envoy community has\nadopted this security disclosure and response policy to ensure we responsibly handle critical\nissues.\n\n## Product Security Team (PST)\n\nSecurity vulnerabilities should be handled quickly and sometimes privately. The primary goal of this\nprocess is to reduce the total time users are vulnerable to publicly known exploits.\n\nThe Product Security Team (PST) is responsible for organizing the entire response including internal\ncommunication and external disclosure but will need help from relevant developers to successfully\nrun this process.\n\nThe initial Product Security Team will consist of all [maintainers](OWNERS.md) in the private\n[envoy-security](https://groups.google.com/forum/#!forum/envoy-security) list. In the future we may\ndecide to have a subset of maintainers work on security response given that this process is time\nconsuming.\n\n## Disclosures\n\n### Private Disclosure Processes\n\nThe Envoy community asks that all suspected vulnerabilities be privately and responsibly disclosed\nvia the [reporting policy](README.md#reporting-security-vulnerabilities).\n\n### Public Disclosure Processes\n\nIf you know of a publicly disclosed security vulnerability please IMMEDIATELY email\n[envoy-security](https://groups.google.com/forum/#!forum/envoy-security) to inform the Product\nSecurity Team (PST) about the vulnerability so they may start the patch, release, and communication\nprocess.\n\nIf possible the PST will ask the person making the public report if the issue can be handled via a\nprivate disclosure process (for example if the full exploit details have not yet been published). If\nthe reporter denies the request for private disclosure, the PST will move swiftly with the fix and\nrelease process. In extreme cases GitHub can be asked to delete the issue but this generally isn't\nnecessary and is unlikely to make a public disclosure less damaging.\n\n## Patch, Release, and Public Communication\n\nFor each vulnerability a member of the PST will volunteer to lead coordination with the \"Fix Team\"\nand is responsible for sending disclosure emails to the rest of the community. This lead will be\nreferred to as the \"Fix Lead.\"\n\nThe role of Fix Lead should rotate round-robin across the PST.\n\nNote that given the current size of the Envoy community it is likely that the PST is the same as\nthe \"Fix team.\" (I.e., all maintainers). The PST may decide to bring in additional contributors\nfor added expertise depending on the area of the code that contains the vulnerability.\n\nAll of the timelines below are suggestions and assume a private disclosure. The Fix Lead drives the\nschedule using their best judgment based on severity and development time. If the Fix Lead is\ndealing with a public disclosure all timelines become ASAP (assuming the vulnerability has a CVSS\nscore >= 4; see below). If the fix relies on another upstream project's disclosure timeline, that\nwill adjust the process as well. We will work with the upstream project to fit their timeline and\nbest protect our users.\n\n### Released versions and master branch\n\nIf the vulnerability affects the last point release version, e.g. 1.10, then the full security\nrelease process described in this document will be activated. A security point release will be\ncreated for 1.10, e.g. 1.10.1, together with a fix to master if necessary. Older point releases,\ne.g. 1.9, are not supported by the Envoy project and will not have any security release created.\n\nIf a security vulnerability affects only these older versions but not master or the last supported\npoint release, the Envoy security team will share this information with the private distributor\nlist, following the standard embargo process, but not create a security release. After the embargo\nexpires, the vulnerability will be described as a GitHub issue. A CVE will be filed if warranted by\nseverity.\n\nIf a vulnerability does not affect any point release but only master, additional caveats apply:\n\n* If the issue is detected and a fix is available within 7 days of the introduction of the\n  vulnerability, or the issue is deemed a low severity vulnerability by the Envoy maintainer and\n  security teams, the fix will be publicly reviewed and landed on master. If the severity is at least\n  medium or at maintainer discretion a courtesy e-mail will be sent to envoy-users@googlegroups.com,\n  envoy-dev@googlegroups.com, envoy-security-announce@googlegroups.com and\n  cncf-envoy-distributors-announce@lists.cncf.io.\n* If the vulnerability has been in existence for more than 7 days and is medium or higher, we will\n  activate the security release process.\n\nWe advise distributors and operators working from the master branch to allow at least 5 days soak\ntime after cutting a binary release before distribution or rollout, to allow time for our fuzzers to\ndetect issues during their execution on ClusterFuzz. A soak period of 7 days provides an even stronger\nguarantee, since we will invoke the security release process for medium or higher severity issues\nfor these older bugs.\n\n### Threat model\n\nSee https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/threat_model.\nVulnerabilities are evaluated against this threat model when deciding whether to activate the Envoy\nsecurity release process.\n\n### Fix Team Organization\n\nThese steps should be completed within the first 24 hours of disclosure.\n\n- The Fix Lead will work quickly to identify relevant engineers from the affected projects and\n  packages and CC those engineers into the disclosure thread. These selected developers are the Fix\n  Team.\n- The Fix Lead will get the Fix Team access to private security repos to develop the fix.\n\n### Fix Development Process\n\nThese steps should be completed within the 1-7 days of Disclosure.\n\n- The Fix Lead and the Fix Team will create a\n  [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS\n  Calculator](https://www.first.org/cvss/calculator/3.0). The Fix Lead makes the final call on the\n  calculated CVSS; it is better to move quickly than making the CVSS perfect.\n- The Fix Team will notify the Fix Lead that work on the fix branch is complete once there are LGTMs\n  on all commits in the private repo from one or more maintainers.\n\nIf the CVSS score is under 4.0 ([a low severity\nscore](https://www.first.org/cvss/specification-document#i5)) the Fix Team can decide to slow the\nrelease process down in the face of holidays, developer bandwidth, etc. These decisions must be\ndiscussed on the envoy-security mailing list.\n\nA three week window will be provided to members of the private distributor list from candidate patch\navailability until the security release date. It is expected that distributors will normally be able\nto perform a release within this time window. If there are exceptional circumstances, the Envoy\nsecurity team will raise this window to four weeks. The release window will be reduced if the\nsecurity issue is public or embargo is broken.\n\nWe will endeavor not to overlap this three week window with or place it adjacent to major corporate\nholiday periods or end-of-quarter (e.g. impacting downstream Istio releases), where possible.\n\n### Fix and disclosure SLOs\n\n* All reports to envoy-security@googlegroups.com will be triaged and have an\n  initial response within 1 business day.\n\n* Privately disclosed issues will be fixed or publicly disclosed within 90 days\n  by the Envoy security team. In exceptional circumstances we reserve the right\n  to work with the discloser to coordinate on an extension, but this will be\n  rarely used.\n\n* Any issue discovered by the Envoy security team and raised in our private bug\n  tracker will be converted to a public issue within 90 days. We will regularly\n  audit these issues to ensure that no major vulnerability (from the perspective\n  of the threat model) is accidentally leaked.\n\n* Fuzz bugs are subject to a 90 day disclosure deadline.\n\n* Three weeks notice will be provided to private distributors from patch\n  availability until the embargo deadline.\n\n* Public zero days will be fixed ASAP, but there is no SLO for this, since this\n  will depend on the severity and impact to the organizations backing the Envoy\n  security team.\n\n### Fix Disclosure Process\n\nWith the fix development underway, the Fix Lead needs to come up with an overall communication plan\nfor the wider community. This Disclosure process should begin after the Fix Team has developed a Fix\nor mitigation so that a realistic timeline can be communicated to users.\n\n**Disclosure of Forthcoming Fix to Users** (Completed within 1-7 days of Disclosure)\n\n- The Fix Lead will email [envoy-security-announce@googlegroups.com](https://groups.google.com/forum/#!forum/envoy-security-announce)\n  (CC [envoy-announce@googlegroups.com](https://groups.google.com/forum/#!forum/envoy-announce))\n  informing users that a security vulnerability has been disclosed and that a fix will be made\n  available at YYYY-MM-DD HH:MM UTC in the future via this list. This time is the Release Date.\n- The Fix Lead will include any mitigating steps users can take until a fix is available.\n\nThe communication to users should be actionable. They should know when to block time to apply\npatches, understand exact mitigation steps, etc.\n\n**Optional Fix Disclosure to Private Distributors List** (Completed within 1-14 days of Disclosure):\n\n- The Fix Lead will make a determination with the help of the Fix Team if an issue is critical enough\n  to require early disclosure to distributors. Generally this Private Distributor Disclosure process\n  should be reserved for remotely exploitable or privilege escalation issues. Otherwise, this\n  process can be skipped.\n- The Fix Lead will email the patches to cncf-envoy-distributors-announce@lists.cncf.io so\n  distributors can prepare builds to be available to users on the day of the issue's announcement. Any \n  patches against main will be updated and resent weekly.\n  Distributors should read about the [Private Distributors List](#private-distributors-list) to find\n  out the requirements for being added to this list.\n- **What if a vendor breaks embargo?** The PST will assess the damage. The Fix Lead will make the\n  call to release earlier or continue with the plan. When in doubt push forward and go public ASAP.\n\n**Fix Release Day** (Completed within 1-21 days of Disclosure)\n\n- The maintainers will create a new patch release branch from the latest patch release tag + the fix\n  from the security branch. As a practical example if v1.5.3 is the latest patch release in Envoy.git\n  a new branch will be created called v1.5.4 which includes only patches required to fix the issue.\n- The Fix Lead will cherry-pick the patches onto the master branch and all relevant release branches.\n  The Fix Team will LGTM and merge. Maintainers will merge these PRs as quickly as possible. Changes\n  shouldn't be made to the commits even for a typo in the CHANGELOG as this will change the git sha\n  of the commits leading to confusion and potentially conflicts as the fix is cherry-picked around\n  branches.\n- The Fix Lead will request a CVE from [DWF](https://github.com/distributedweaknessfiling/DWF-Documentation)\n  and include the CVSS and release details.\n- The Fix Lead will email envoy-{dev,users,announce}@googlegroups.com now that everything is public\n  announcing the new releases, the CVE number, and the relevant merged PRs to get wide distribution\n  and user action. As much as possible this email should be actionable and include links on how to apply\n  the fix to user's environments; this can include links to external distributor documentation.\n- The Fix Lead will remove the Fix Team from the private security repo.\n\n### Retrospective\n\nThese steps should be completed 1-3 days after the Release Date. The retrospective process\n[should be blameless](https://landing.google.com/sre/book/chapters/postmortem-culture.html).\n\n- The Fix Lead will send a retrospective of the process to envoy-dev@googlegroups.com including\n  details on everyone involved, the timeline of the process, links to relevant PRs that introduced\n  the issue, if relevant, and any critiques of the response and release process.\n- Maintainers and Fix Team are also encouraged to send their own feedback on the process to\n  envoy-dev@googlegroups.com. Honest critique is the only way we are going to get good at this as a\n  community.\n\n## Private Distributors List\n\nThis list is intended to be used primarily to provide actionable information to\nmultiple distribution vendors as well as a *limited* set of high impact end users at once. *This\nlist is not intended in the general case for end users to find out about security issues*.\n\n### Embargo Policy\n\nThe information members receive on cncf-envoy-distributors-announce must not be made public, shared, nor\neven hinted at anywhere beyond the need-to-know within your specific team except with the list's\nexplicit approval. This holds true until the public disclosure date/time that was agreed upon by the\nlist. Members of the list and others may not use the information for anything other than getting the\nissue fixed for your respective users.\n\nBefore any information from the list is shared with respective members of your team required to fix\nsaid issue, they must agree to the same terms and only find out information on a need-to-know basis.\n\nWe typically expect a single point-of-contact (PoC) at any given legal entity. Within the\norganization, it is the responsibility of the PoC to share CVE and related patches internally. This\nshould be performed on a strictly need-to-know basis with affected groups to the extent that this is\ntechnically plausible. All teams should be aware of the embargo conditions and accept them.\nUltimately, if an organization breaks embargo transitively through such sharing, they will lose\nthe early disclosure privilege, so it's in their best interest to carefully share information internally,\nfollowing best practices and use their judgement in balancing the tradeoff between protecting users\nand maintaining confidentiality.\n\nThe embargo applies to information shared, source code and binary images. **It is a violation of the\nembargo policy to share binary distributions of the security fixes before the public release date.**\nThis includes, but is not limited to, Envoy binaries and Docker images. It is expected that\ndistributors have a method to stage and validate new binaries without exposing them publicly.\n\nIf the information shared is under embargo from a third party, where Envoy is one of many projects\nthat a disclosure is shared with, it is critical to consider that the ramifications of any leak will\nextend beyond the Envoy community and will leave us in a position in which we will be less likely to\nreceive embargoed reports in the future.\n\nIn the unfortunate event you share the information beyond what is allowed by this policy, you _must_\nurgently inform the envoy-security@googlegroups.com mailing list of exactly what information leaked\nand to whom. A retrospective will take place after the leak so we can assess how to prevent making the\nsame mistake in the future.\n\nIf you continue to leak information and break the policy outlined here, you will be removed from the\nlist.\n\n### Contributing Back\n\nThis is a team effort. As a member of the list you must carry some water. This\ncould be in the form of the following:\n\n**Technical**\n\n- Review and/or test the proposed patches and point out potential issues with\n  them (such as incomplete fixes for the originally reported issues, additional\n  issues you might notice, and newly introduced bugs), and inform the list of the\n  work done even if no issues were encountered.\n\n**Administrative**\n\n- Help draft emails to the public disclosure mailing list.\n- Help with release notes.\n\n### Membership Criteria\n\nTo be eligible for the cncf-envoy-distributors-announce mailing list, your\nuse of Envoy should:\n\n1. Be either:\n   1. An actively maintained distribution of Envoy components. An example is\n      \"SuperAwesomeLinuxDistro\" which offers Envoy pre-built packages. Another\n      example is \"SuperAwesomeServiceMesh\" which offers a service mesh product\n      that includes Envoy as a component.\n\n   OR\n\n   2. Offer Envoy as a publicly available infrastructure or platform service, in\n      which the product clearly states (e.g. public documentation, blog posts,\n      marketing copy, etc.) that it is built on top of Envoy. E.g.,\n      \"SuperAwesomeCloudProvider's Envoy as a Service (EaaS)\". An infrastructure\n      service that uses Envoy for a product but does not publicly say they are\n      using Envoy does not *generally* qualify (see option 3 that follows). This is essentially IaaS\n      or PaaS. If you use Envoy to support a SaaS, e.g. \"SuperAwesomeCatVideoService\", this does not\n      *generally* qualify.\n\n   OR\n\n   3. An end user of Envoy that satisfies the following requirements:\n       1. Is \"well known\" to the Envoy community. Being \"well known\" is fully subjective and\n          determined by the Envoy maintainers and security team. Becoming \"well known\" would\n          generally be achieved by activities such as: PR contributions, either code or\n          documentation; helping other end users on Slack, GitHub, and the mailing lists; speaking\n          about use of Envoy at conferences; writing about use of Envoy in blog posts; sponsoring\n          Envoy conferences, meetups, and other activities; etc. This is a more strict variant of\n          item 5 below.\n       2. Is of sufficient size, scale, and impact to make your inclusion on the list\n          worthwhile. The definition of size, scale, and impact is fully subjective and\n          determined by the Envoy maintainers and security team. The definition will not be\n          discussed further in this document.\n       3. You *must* smoke test and then widely deploy security patches promptly and report back\n          success or failure ASAP. Furthermore, the Envoy maintainers may occasionally ask you to\n          smoke test especially risky public PRs before they are merged. Not performing these tasks\n          in a reasonably prompt timeframe will result in removal from the list. This is a more\n          strict variant of item 7 below.\n       4. In order to balance inclusion in the list versus a greater chance of accidental\n          disclosure, end users added to the list via this option will be limited to a total of\n          **10** slots. Periodic review (see below) may allow new slots to open, so please continue\n          to apply if it seems your organization would otherwise qualify. The security team also\n          reserves the right to change this limit in the future.\n2. Have a user or customer base not limited to your own organization (except for option 3 above).\n   We will use the size of the user or customer base as part of the criteria to determine\n   eligibility.\n3. Have a publicly verifiable track record up to present day of fixing security\n   issues.\n4. Not be a downstream or rebuild of another distribution.\n5. Be a participant and active contributor in the community.\n6. Accept the [Embargo Policy](#embargo-policy) that is outlined above. You must\n   have a way to privately stage and validate your updates that does not violate\n   the embargo.\n7. Be willing to [contribute back](#contributing-back) as outlined above.\n8. Be able to perform a security release of your product within a three week window from candidate fix\n   patch availability.\n9. Have someone already on the list vouch for the person requesting membership\n   on behalf of your distribution.\n10. Nominate an e-mail alias or list for your organization to receive updates. This should not be\n    an individual user address, but instead a list that can be maintained by your organization as\n    individuals come and go. A good example is envoy-security@seven.com, a bad example is\n    acidburn@seven.com. You must accept the invite sent to this address or you will not receive any\n    e-mail updates. This e-mail address will be [shared with the Envoy community](#Members).\n\nNote that Envoy maintainers are members of the Envoy security team. [Members of the Envoy security\nteam](OWNERS.md#envoy-security-team) and the organizations that they represent are implicitly\nincluded in the private distributor list. These organizations do not need to meet the above list of\ncriteria with the exception of the acceptance of the embargo policy.\n\n### Requesting to Join\n\nNew membership requests are sent to envoy-security@googlegroups.com.\n\nIn the body of your request please specify how you qualify and fulfill each\ncriterion listed in [Membership Criteria](#membership-criteria).\n\nHere is a pseudo example:\n\n```\nTo: envoy-security@googlegroups.com\nSubject: Seven-Corp Membership to cncf-envoy-distributors-announce\n\nBelow are each criterion and why I think we, Seven-Corp, qualify.\n\n> 1. Be an actively maintained distribution of Envoy components OR offer Envoy as a publicly\n     available service in which the product clearly states that it is built on top of Envoy OR\n     be a well known end user of sufficient size, scale, and impact to make your\n     inclusion worthwhile.\n\nWe distribute the \"Seven\" distribution of Envoy [link]. We have been doing\nthis since 1999 before proxies were even cool.\n\nOR\n\nWe use Envoy for our #1 rated cat video service and have 40 billion MAU, proxying 40 trillion^2 RPS\nthrough Envoy at the edge. Secure cat videos are our top priority. We also contribute a lot to the Envoy\ncommunity by implementing features, not making Matt ask for documentation or tests, and writing blog\nposts about efficient Envoy cat video serving.\n\n> 2. Have a user or customer base not limited to your own organization. Please specify an\n>    approximate size of your user or customer base, including the number of\n>    production deployments.\n\nOur user base spans of the extensive \"Seven\" community. We have a slack and\nGitHub repos and mailing lists where the community hangs out. We have ~2000\ncustomers, of which approximately 400 are using Seven in production. [links]\n\n> 3. Have a publicly verifiable track record up to present day of fixing security\n     issues.\n\nWe announce on our blog all upstream patches we apply to \"Seven.\" [link to blog\nposts]\n\n> 4. Not be a downstream or rebuild of another distribution. If you offer Envoy as a publicly\n>    available infrastructure or platform service, this condition does not need to apply.\n\nThis does not apply, \"Seven\" is a unique snowflake distribution.\n\n> 5. Be a participant and active contributor in the community.\n\nOur members, Acidburn, Cereal, and ZeroCool are outstanding members and are well\nknown throughout the Envoy community. Especially for their contributions\nin hacking the Gibson.\n\n> 6. Accept the Embargo Policy that is outlined above. You must\n     have a way to privately stage and validate your updates that does not violate\n     the embargo.\n\nWe accept.\n\n> 7. Be willing to contribute back as outlined above.\n\nWe are definitely willing to help!\n\n> 8. Be able to perform a security release of your product within a three week window from candidate fix\n     patch availability.\n\nWe affirm we can spin out new security releases within a 2 week window.\n\n> 9. Have someone already on the list vouch for the person requesting membership\n>    on behalf of your distribution.\n\nCrashOverride will vouch for the \"Seven\" distribution joining the distribution list.\n\n> 10. Nominate an e-mail alias or list for your organization to receive updates. This should not be\n      an individual user address, but instead a list that can be maintained by your organization as\n      individuals come and go. A good example is envoy-security@seven.com, a bad example is\n      acidburn@seven.com. You must accept the invite sent to this address or you will not receive any\n      e-mail updates. This e-mail address will be shared with the Envoy community.\n\nenvoy-security@seven.com\n```\n\n### Review of membership criteria\n\nIn all cases, members of the distribution list will be reviewed on a yearly basis by the maintainers\nand security team to ensure they still qualify for inclusion on the list.\n\n### Members\n\n| E-mail                                                | Organization  | End User | Last Review |\n|-------------------------------------------------------|:-------------:|:--------:|:-----------:|\n| envoy-security-team@aspenmesh.io                      | Aspen Mesh    | No       | 12/19       |\n| aws-app-mesh-security@amazon.com                      | AWS           | No       | 12/19       |\n| security@cilium.io                                    | Cilium        | No       | 12/19       |\n| vulnerabilityreports@cloudfoundry.org                 | Cloud Foundry | No       | 12/19       |\n| secalert@datawire.io                                  | Datawire      | No       | 12/19       |\n| google-internal-envoy-security@google.com             | Google        | No       | 12/19       |\n| argoprod@us.ibm.com                                   | IBM           | No       | 12/19       |\n| istio-security-vulnerability-reports@googlegroups.com | Istio         | No       | 12/19       |\n| secalert@redhat.com                                   | Red Hat       | No       | 12/19       |\n| envoy-security@solo.io                                | solo.io       | No       | 12/19       |\n| envoy-security@tetrate.io                             | Tetrate       | No       | 12/19       |\n| security@vmware.com                                   | VMware        | No       | 12/19       |\n| envoy-security@pinterest.com                          | Pinterest     | Yes      | 12/19       |\n| envoy-security@dropbox.com                            | Dropbox       | Yes      | 01/20       |\n| envoy-security-predisclosure@stripe.com               | Stripe        | Yes      | 01/20       |\n"
  },
  {
    "path": "STYLE.md",
    "content": "# C++ coding style\n\n* The Envoy source code is formatted using clang-format. Thus all white spaces, etc.\n  issues are taken care of automatically. The CircleCI tests will automatically check\n  the code format and fail. There are make targets that can both check the format\n  (check_format) as well as fix the code format for you (fix_format). Errors in\n  .clang-tidy are enforced while other warnings are suggestions. Note that code and\n  comment blocks designated `clang-format off` must be closed with `clang-format on`.\n  To run these checks locally, see [Support Tools](support/README.md).\n* Beyond code formatting, for the most part Envoy uses the\n  [Google C++ style guidelines](https://google.github.io/styleguide/cppguide.html).\n  The following section covers the major areas where we deviate from the Google\n  guidelines.\n\n# Repository file layout\n\n* Please see [REPO_LAYOUT.md](REPO_LAYOUT.md).\n\n# Documentation\n\n* If you are modifying the data plane structually, please keep the [Life of a\n  Request](docs/root/intro/life_of_a_request.md) documentation up-to-date.\n\n# Deviations from Google C++ style guidelines\n\n* Exceptions are allowed and encouraged where appropriate. When using exceptions, do not add\n  additional error handing that cannot possibly happen in the case an exception is thrown.\n* Do use exceptions for:\n  - Configuration ingestion error handling. Invalid configurations (dynamic and\n    static) should throw meaningful `EnvoyException`s, the configuration\n    ingestion code will catch these.\n  - Constructor failure.\n  - Error handling in deep call stacks, where exceptions provide material\n    improvements to code complexity and readability.\n* Apply caution when using exceptions on the data path for general purpose error\n  handling. Exceptions are not caught on the data path and they should not be\n  used for simple error handling, e.g. with shallow call stacks, where explicit\n  error handling provides a more readable and easier to reason about\n  implementation.\n* References are always preferred over pointers when the reference cannot be null. This\n  includes both const and non-const references.\n* Function names should all use camel case starting with a lower case letter (e.g., `doFoo()`).\n* Struct/Class member variables have a `_` postfix (e.g., `int foo_;`).\n* Enum values using PascalCase (e.g., `RoundRobin`).\n* 100 columns is the line limit.\n* Use your GitHub name in TODO comments, e.g. `TODO(foobar): blah`.\n* Smart pointers are type aliased:\n  * `using FooPtr = std::unique_ptr<Foo>;`\n  * `using BarSharedPtr = std::shared_ptr<Bar>;`\n  * `using BlahConstSharedPtr = std::shared_ptr<const Blah>;`\n  * Regular pointers (e.g. `int* foo`) should not be type aliased.\n* `absl::optional<std::reference_wrapper<T>> is type aliased:\n  * `using FooOptRef = absl::optional<std::reference_wrapper<T>>;`\n  * `using FooOptConstRef = absl::optional<std::reference_wrapper<const T>>;`\n* If move semantics are intended, prefer specifying function arguments with `&&`.\n  E.g., `void onHeaders(Http::HeaderMapPtr&& headers, ...)`. The rationale for this is that it\n  forces the caller to specify `std::move(...)` or pass a temporary and makes the intention at\n  the callsite clear. Otherwise, it's difficult to tell if a const reference is actually being\n  passed to the called function. This is true even for `std::unique_ptr`.\n* Prefer `unique_ptr` over `shared_ptr` wherever possible. `unique_ptr` makes ownership in\n  production code easier to reason about. Note that this creates some test oddities where\n  production code requires a `unique_ptr` but the test must still have access to the memory\n  the production code is using (mock or otherwise). In these cases it is acceptable to allocate\n  raw memory in a test and return it to the production code with the expectation that the\n  production code will hold it in a `unique_ptr` and free it. Envoy uses the factory pattern\n  quite a bit for these cases. (Search the code for \"factory\").\n* The Google C++ style guide points out that [non-PoD static and global variables are forbidden](https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables).\n  This _includes_ types such as `std::string`. We encourage the use of the\n  advice in the [C++ FAQ on the static initialization\n  fiasco](https://isocpp.org/wiki/faq/ctors#static-init-order-on-first-use) for\n  how to best handle this.\n* The Google C++ style guide points out that [constant vars should be named `kConstantVar`](https://google.github.io/styleguide/cppguide.html#Constant_Names).\n  In the Envoy codebase we use `ConstantVar` or `CONSTANT_VAR`. If you pick `CONSTANT_VAR`,\n  please be certain the name is globally significant to avoid potential conflicts with #defines,\n  which are not namespace-scoped, and may appear in externally controlled header files.\n* API-level comments should follow normal Doxygen conventions. Use `@param` to describe\n  parameters and `@return <return-type>` for return values. Internal comments for\n  methods and member variables may be regular C++ `//` comments or Doxygen at\n  developer discretion. Where possible, methods should have meaningful\n  documentation on expected input and state preconditions.\n* Header guards should use `#pragma once`.\n* All code should be inside a top-level Envoy namespace. There are some\n  exceptions such as `main()` functions. When code cannot be placed inside the\n  Envoy namespace there should be a comment of the form `// NOLINT(namespace-envoy)` at\n  the top of the file.\n* If a method that must be defined outside the `test` directory is intended to be called only\n  from test code then it should have a name that ends in `ForTest()` such as `aMethodForTest()`.\n  In most cases tests can and should be structured so this is not necessary.\n* Tests default to StrictMock so will fail if hitting unexpected warnings. Feel free to use\n  NiceMock for mocks whose behavior is not the focus of a test.\n* [Thread\n  annotations](https://github.com/abseil/abseil-cpp/blob/master/absl/base/thread_annotations.h),\n  such as `GUARDED_BY`, should be used for shared state guarded by\n  locks/mutexes.\n* Functions intended to be local to a cc file should be declared in an anonymous namespace,\n  rather than using the 'static' keyword. Note that the\n  [Google C++ style guide](https://google.github.io/styleguide/cppguide.html#Unnamed_Namespaces_and_Static_Variables)\n   allows either, but in Envoy we prefer anonymous namespaces.\n* Braces are required for all control statements include single line if, while, etc. statements.\n* Don't use [mangled Protobuf enum\n  names](https://developers.google.com/protocol-buffers/docs/reference/cpp-generated#enum).\n\n# Error handling\n\nA few general notes on our error handling philosophy:\n\n* All error code returns should be checked.\n* At a very high level, our philosophy is that errors should be handled gracefully when caused by:\n  - Untrusted network traffic OR\n  - Raised by the Envoy process environment and are *likely* to happen\n* Examples of likely environnmental errors include any type of network error, disk IO error, bad\n  data returned by an API call, bad data read from runtime files, etc. Errors in the Envoy\n  environment that are *unlikely* to happen after process initialization, should lead to process\n  death, under the assumption that the additional burden of defensive coding and testing is not an\n  effective use of time for an error that should not happen given proper system setup. Examples of\n  these types of errors include not being able to open the shared memory region, system calls that\n  should not fail assuming correct parameters (which should be validated via tests), etc. Examples\n  of system calls that should not fail when passed valid parameters include the kernel returning a\n  valid `sockaddr` after a successful call to `accept()`, `pthread_create()`, `pthread_join()`, etc.\n* OOM events (both memory and FDs) are considered fatal crashing errors. An OOM error should never\n  silently be ignored and should crash the process either via the C++ allocation error exception, an\n  explicit `RELEASE_ASSERT` following a third party library call, or an obvious crash on a subsequent\n  line via null pointer dereference. This rule is again based on the philosophy that the engineering\n  costs of properly handling these cases are not worth it. Time is better spent designing proper system\n  controls that shed load if resource usage becomes too high, etc.\n* The \"less is more\" error handling philosophy described in the previous two points is primarily\n  based on the fact that restarts are designed to be fast, reliable and cheap.\n* Although we strongly recommend that any type of startup error leads to a fatal error, since this\n  is almost always a result of faulty configuration which should be caught during a canary process,\n  there may be cases in which we want some classes of startup errors to be non-fatal. For example,\n  if a misconfigured option is not necessary for server operation. Although this is discouraged, we\n  will discuss these on a case by case basis during code review (an example of this\n  is the `--admin-address-path` option). **If degraded mode error handling is implemented, we require\n  that there is complete test coverage for the degraded case.** Additionally, the user should be\n  aware of the degraded state minimally via an error log of level warn or greater and via the\n  increment of a stat.\n* If you do need to log a non-fatal warning or error, you can unit-test it with EXPECT_LOG_CONTAINS\n  or EXPECT_NO_LOGS from [logging.h](test/test_common/logging.h). It's generally bad practice to\n  test by depending on log messages unless the actual behavior being validated is logging.\n  It's preferable to export statistics to enable consumption by external monitoring for any\n  behavior that should be externally consumed or to introduce appropriate internal interfaces\n  such as mocks for internal behavior.\n* The error handling philosophy described herein is based on the assumption that Envoy is deployed\n  using industry best practices (primarily canary). Major and obvious errors should always be\n  caught in canary. If a low rate error leads to periodic crash cycling when deployed to\n  production, the error rate should allow for rollback without large customer impact.\n* Tip: If the thought of adding the extra test coverage, logging, and stats to handle an error and\n  continue seems ridiculous because *\"this should never happen\"*, it's a very good indication that\n  the appropriate behavior is to terminate the process and not handle the error. When in doubt,\n  please discuss.\n* Per above it's acceptable to turn failures into crash semantics\n  via `RELEASE_ASSERT(condition)` or `PANIC(message)` if there is no other sensible behavior, e.g.\n  in OOM (memory/FD) scenarios. Only `RELEASE_ASSERT(condition)` should be used to validate\n  conditions that might be imposed by the external environment. `ASSERT(condition)` should be used\n  to document (and check in debug-only builds) program invariants. Use `ASSERT` liberally, but do\n  not use it for things that will crash in an obvious way in a subsequent line. E.g., do not do\n  `ASSERT(foo != nullptr); foo->doSomething();`. Note that there is a gray line between external\n  environment failures and program invariant violations. For example, memory corruption due to a\n  security issue (a bug, deliberate buffer overflow etc.) might manifest as a violation of program\n  invariants or as a detectable condition in the external environment (e.g. some library returning a\n  highly unexpected error code or buffer contents). Unfortunately no rule can cleanly cover when to\n  use `RELEASE_ASSERT` vs. `ASSERT`. In general we view `ASSERT` as the common case and\n  `RELEASE_ASSERT` as the uncommon case, but experience and judgment may dictate a particular approach\n  depending on the situation.\n\n# Hermetic and deterministic tests\n\nTests should be hermetic, i.e. have all dependencies explicitly captured and not depend on the local\nenvironment. In general, there should be no non-local network access. In addition:\n\n* Port numbers should not be hardcoded. Tests should bind to port zero and then discover the bound\n  port when needed. This avoids flakes due to conflicting ports and allows tests to be executed\n  concurrently by Bazel. See\n  [`test/integration/integration_test.h`](test/integration/integration_test.h) and\n  [`test/common/network/listener_impl_test.cc`](test/common/network/listener_impl_test.cc)\n  for examples of tests that do this.\n\n* Paths should be constructed using:\n  * The methods in [`TestEnvironment`](test/test_common/environment.h) for C++ tests.\n  * With `${TEST_TMPDIR}` (for writable temporary space) or `${TEST_SRCDIR}` for read-only access to\n    test inputs in shell tests.\n  * With `{{ test_tmpdir }}`, `{{ test_rundir }}` and `{{ test_udsdir }}` respectively for JSON templates.\n    `{{ test_udsdir }}` is provided for pathname based Unix Domain Sockets, which must fit within a\n    108 character limit on Linux, a property that might not hold for `{{ test_tmpdir }}`.\n\nTests should be deterministic. They should not rely on randomness or details\nsuch as the current time. Instead, mocks such as\n[`MockRandomGenerator`](test/mocks/runtime/mocks.h) and\n[`Mock*TimeSource`](test/mocks/common.h) should be used.\n\n# Google style guides for other languages\n\n* [Python](https://google.github.io/styleguide/pyguide.html)\n* [Bash](https://google.github.io/styleguide/shell.xml)\n* [Bazel](https://bazel.build/versions/master/docs/skylark/build-style.html)\n"
  },
  {
    "path": "VERSION",
    "content": "1.16.0\n"
  },
  {
    "path": "WORKSPACE",
    "content": "workspace(name = \"envoy\")\n\nload(\"//bazel:api_binding.bzl\", \"envoy_api_binding\")\n\nenvoy_api_binding()\n\nload(\"//bazel:api_repositories.bzl\", \"envoy_api_dependencies\")\n\nenvoy_api_dependencies()\n\nload(\"//bazel:repositories.bzl\", \"envoy_dependencies\")\n\nenvoy_dependencies()\n\nload(\"//bazel:repositories_extra.bzl\", \"envoy_dependencies_extra\")\n\nenvoy_dependencies_extra()\n\nload(\"//bazel:dependency_imports.bzl\", \"envoy_dependency_imports\")\n\nenvoy_dependency_imports()\n"
  },
  {
    "path": "api/API_OVERVIEW.md",
    "content": "# Envoy v2 APIs for developers\n\n## Goals\n\nThis repository contains both the implemented and draft v2 JSON REST and gRPC\n[Envoy](https://github.com/envoyproxy/envoy/) APIs.\n\nVersion 2 of the Envoy API evolves existing APIs and introduces new APIs to:\n\n* Allow for more advanced load balancing through load and resource utilization reporting to management servers.\n* Improve N^2 health check scalability issues by optionally offloading health checking to other Envoy instances.\n* Support Envoy deployment in edge, sidecar and middle proxy deployment models via changes to the listener model,\n  CDS API, and EDS (formerly called SDS in v1) API.\n* Allow streaming updates from the management server on change, instead of polling APIs from Envoy. gRPC APIs will be supported\n  alongside JSON REST APIs to provide for this.\n* Ensure all Envoy runtime configuration is dynamically discoverable via API\n  calls, including listener configuration, certificates and runtime settings, which are today sourced from the filesystem. There\n  will still remain a static bootstrap configuration file that will specify items\n  unlikely to change during runtime, including the Envoy node identity, xDS\n  management server addresses, administration interface and tracing\n  configuration.\n* Revisit and where appropriate cleanup any v1 technical debt.\n\n## Status\n\nSee\n[here](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview.html#status)\nfor the current status of the v2 APIs.\n\nSee [here](CONTRIBUTING.md#api-changes) for the v2 API change process.\n\n## Principles\n\n* [Proto3](https://developers.google.com/protocol-buffers/docs/proto3) will be\n  used to specify the canonical API. This will provide directly the gRPC API and\n  via gRPC-JSON transcoding the JSON REST API. A textual YAML input will be\n  supported for filesystem configuration files (e.g. the bootstrap file), in\n  addition to JSON, as a syntactic convenience. YAML file contents will be\n  internally converted to JSON and then follow the standard JSON-proto3\n  conversion during Envoy config ingestion.\n\n* xDS APIs should support eventual consistency. For example, if RDS references a\n  cluster that has not yet been supplied by CDS, it should be silently ignored\n  and traffic not forwarded until the CDS update occurs. Stronger consistency\n  guarantees are possible if the management server is able to sequence the xDS\n  APIs carefully (for example by using the ADS API below). By following the\n  `[CDS, EDS, LDS, RDS]` sequence for all pertinent resources, it will be\n  possible to avoid traffic outages during configuration update.\n\n* The API is primarily intended for machine generation and consumption. It is\n  expected that the management server is responsible for mapping higher level\n  configuration concepts to API responses. Similarly, static configuration\n  fragments may be generated by templating tools, etc. The APIs and tools\n  used to generate xDS configuration are beyond the scope of the definitions in\n  this repository.\n\n* REST-JSON API equivalents will be provided for the basic singleton xDS\n  subscription services CDS/EDS/LDS/RDS/SDS. Advanced APIs such as HDS, ADS and\n  EDS multi-dimensional LB will be gRPC only. This avoids having to map\n  complicated bidirectional stream semantics onto REST.\n\n* Listeners will be immutable. Any updates to a listener via LDS will require\n  the draining of existing connections for the specific bound IP/port. As a\n  result, new requests will only be guaranteed to observe the new configuration\n  after existing connections have drained or the drain timeout.\n\n* Versioning will be expressed via [proto3 package\n  namespaces](https://developers.google.com/protocol-buffers/docs/proto3#packages),\n  i.e. `package envoy.api.v2;`.\n\n* Custom components (e.g. filters, resolvers, loggers) will use a reverse DNS naming scheme,\n  e.g. `com.google.widget`, `com.lyft.widget`.\n\n## APIs\n\nUnless otherwise stated, the APIs with the same names as v1 APIs have a similar role.\n\n* [Cluster Discovery Service (CDS)](envoy/api/v2/cds.proto).\n* [Endpoint Discovery Service (EDS)](envoy/api/v2/eds.proto). This has the same role as SDS in the [v1 API](https://www.envoyproxy.io/docs/envoy/latest/api-v1/cluster_manager/sds),\n  the new name better describes what the API does in practice. Advanced global load balancing capable of utilizing N-dimensional upstream metrics is now supported.\n* [Health Discovery Service (HDS)](envoy/service/discovery/v2/hds.proto). This new API supports efficient endpoint health discovery by the management server via the Envoy instances it manages. Individual Envoy instances\n  will typically receive HDS instructions to health check a subset of all\n  endpoints. The health check subset may not be a subset of the Envoy instance's\n  EDS endpoints.\n* [Listener Discovery Service (LDS)](envoy/api/v2/lds.proto). This new API supports dynamic discovery of the listener configuration (which ports to bind to, TLS details, filter chains, etc.).\n* [Metric Service (MS)](envoy/service/metrics/v2/metrics_service.proto). This new API allows Envoy to push (stream) metrics forever for servers to consume.\n* [Rate Limit Service (RLS)](envoy/service/ratelimit/v2/rls.proto)\n* [Route Discovery Service (RDS)](envoy/api/v2/rds.proto).\n* [Secret Discovery Service (SDS)](envoy/service/discovery/v2/sds.proto).\n\nIn addition to the above APIs, an aggregation API will be provided to allow for\nfine grained control over the sequencing of API updates across discovery\nservices:\n\n* [Aggregated Discovery Service (ADS)](envoy/api/v2/discovery.proto). See\n  the [ADS overview](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#aggregated-discovery-service).\n\nA protocol description for the xDS APIs is provided [here](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol).\n\n## Terminology\n\nSome relevant [existing terminology](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/terminology.html) is\nrepeated below and some new v2 terms introduced.\n\n* Cluster: A cluster is a group of logically similar endpoints that Envoy\n  connects to. In v2, RDS routes points to clusters, CDS provides cluster configuration and\n  Envoy discovers the cluster members via EDS.\n\n* Downstream: A downstream host connects to Envoy, sends requests, and receives responses.\n\n* Endpoint: An endpoint is an upstream host that is a member of one or more clusters. Endpoints are discovered via EDS.\n\n* Listener: A listener is a named network location (e.g., port, unix domain socket, etc.) that can be connected to by downstream clients. Envoy exposes one or more listeners that downstream hosts connect to.\n\n* Locality: A location where an Envoy instance or an endpoint runs. This includes\n  region, zone and sub-zone identification.\n\n* Management server: A logical server implementing the v2 Envoy APIs. This is not necessarily a single physical machine since it may be replicated/sharded and API serving for different xDS APIs may be implemented on different physical machines.\n\n* Region: Geographic region where a zone is located.\n\n* Sub-zone: Location within a zone where an Envoy instance or an endpoint runs.\n  This allows for multiple load balancing targets within a zone.\n\n* Upstream: An upstream host receives connections and requests from Envoy and returns responses.\n\n* xDS: CDS/EDS/HDS/LDS/RLS/RDS/SDS APIs.\n\n* Zone: Availability Zone (AZ) in AWS, Zone in GCP.\n"
  },
  {
    "path": "api/API_VERSIONING.md",
    "content": "# API versioning guidelines\n\nThe Envoy project (and in the future [UDPA](https://github.com/cncf/udpa)) takes API stability and\nversioning seriously. Providing stable APIs is a necessary step in ensuring API adoption and success\nof the ecosystem. Below we articulate the API versioning guidelines that aim to deliver this\nstability.\n\n# API semantic versioning\n\nThe Envoy APIs consist of a family of packages, e.g. `envoy.admin.v2alpha`,\n`envoy.service.trace.v2`. Each package is independently versioned with a protobuf semantic\nversioning scheme based on https://cloud.google.com/apis/design/versioning.\n\nThe major version for a package is captured in its name (and directory structure). E.g. version 2\nof the tracing API package is named `envoy.service.trace.v2` and its constituent protos are located\nin `api/envoy/service/trace/v2`. Every protobuf must live directly in a versioned package namespace,\nwe do not allow subpackages such as `envoy.service.trace.v2.somethingelse`.\n\nMinor and patch versions will be implemented in the future, this effort is tracked in\nhttps://github.com/envoyproxy/envoy/issues/8416.\n\nIn everyday discussion and GitHub labels, we refer to the `v2`, `v3`, `vN`, `...` APIs. This has a\nspecific technical meaning. Any given message in the Envoy API, e.g. the `Bootstrap` at\n`envoy.config.bootstrap.v3.Boostrap`, will transitively reference a number of packages in the Envoy\nAPI. These may be at `vN`, `v(N-1)`, etc. The Envoy API is technically a DAG of versioned package\nnamespaces. When we talk about the `vN xDS API`, we really refer to the `N` of the root\nconfiguration resources (e.g. bootstrap, xDS resources such as `Cluster`). The\nv3 API bootstrap configuration is `envoy.config.bootstrap.v3.Boostrap`, even\nthough it might might transitively reference `envoy.service.trace.v2`.\n\n# Backwards compatibility\n\nIn general, within a package's major API version, we do not allow any breaking changes. The guiding\nprinciple is that neither the wire format nor protobuf compiler generated language bindings should\nexperience a backward compatible break on a change. Specifically:\n\n* Fields should not be renumbered or have their types changed. This is standard proto development\n  procedure.\n\n* Renaming of fields or package namespaces for a proto must not occur. This is inherently dangerous,\n  since:\n  * Field renames break wire compatibility. This is stricter than standard proto development\n    procedure in the sense that it does not break binary wire format. However, it **does** break\n    loading of YAML/JSON into protos as well as text protos. Since we consider YAML/JSON to be first\n    class inputs, we must not change field names.\n\n  * For service definitions, the gRPC endpoint URL is inferred from package namespace, so this will\n    break client/server communication.\n\n  * For a message embedded in an `Any` object, the type URL, which the package namespace is a part\n    of, may be used by Envoy or other API consuming code. Currently, this applies to the top-level\n    resources embedded in `DiscoveryResponse` objects, e.g. `Cluster`, `Listener`, etc.\n\n  * Consuming code will break and require source code changes to match the API changes.\n\n* Some other changes are considered breaking for Envoy APIs that are usually considered safe in\n  terms of protobuf wire compatibility:\n  * Upgrading a singleton field to a repeated, e.g. `uint32 foo = 1;` to `repeated uint32 foo = 1`.\n    This changes the JSON wire representation and hence is considered a breaking change.\n\n  * Wrapping an existing field with `oneof`. This has no protobuf or JSON/YAML wire implications,\n    but is disruptive to various consuming stubs in languages such as Go, creating unnecessary\n    churn.\n\n  * Increasing the strictness of\n    [protoc-gen-validate](https://github.com/envoyproxy/protoc-gen-validate) annotations. Exceptions\n    may be granted for scenarios in which these stricter conditions model behavior already implied\n    structurally or by documentation.\n\nThe exception to the above policy is for API versions tagged `vNalpha`. Within an alpha major\nversion, arbitrary breaking changes are allowed.\n\nNote that changes to default values for wrapped types, e.g. `google.protobuf.UInt32Value` are not\ngoverned by the above policy. Any management server requiring stability across Envoy API or\nimplementations within a major version should set explicit values for these fields.\n\n# API lifecycle\n\nA new major version is a significant event in the xDS API ecosystem, inevitably requiring support\nfrom clients (Envoy, gRPC) and a large number of control planes, ranging from simple in-house custom\nmanagement servers to xDS-as-a-service offerings run by vendors. The [xDS API\nshepherds](https://github.com/orgs/envoyproxy/teams/api-shepherds) will make the decision to add a\nnew major version subject to the following constraints:\n* There exists sufficient technical debt in the xDS APIs in the existing supported major version\n  to justify the cost burden for xDS client/server implementations.\n* At least one year has elapsed since the last major version was cut.\n* Consultation with the Envoy community (via Envoy community call, `#xds` channel on Slack), as\n  well as gRPC OSS community (via reaching out to language maintainers) is made. This is not a veto\n  process; the API shepherds retain the right to move forward with a new major API version after\n  weighing this input with the first two considerations above.\n\nFollowing the release of a new major version, the API lifecycle follows a deprecation clock.\nEnvoy will support at most three major versions of any API package at all times:\n* The current stable major version, e.g. v3.\n* The previous stable major version, e.g. v2. This is needed to ensure that we provide at least 1\n  year for a supported major version to sunset. By supporting two stable major versions\n  simultaneously, this makes it easier to coordinate control plane and Envoy\n  rollouts as well. This previous stable major version will be supported for exactly 1\n  year after the introduction of the new current stable major version, after which it will be\n  removed from the Envoy implementation.\n* Optionally, the next experimental alpha major version, e.g. v4alpha. This is a release candidate\n  for the next stable major version. This is only generated when the current stable major version\n  requires a breaking change at the next cycle, e.g. a deprecation or field rename. This release\n  candidate is mechanically generated via the\n  [protoxform](https://github.com/envoyproxy/envoy/tree/master/tools/protoxform) tool from the\n  current stable major version, making use of annotations such as `deprecated = true`. This is not a\n  human editable artifact.\n\nAn example of how this might play out is that at the end of December in 2020, if a v4 major version\nis justified, we might freeze\n`envoy.config.bootstrap.v4alpha` and this package would then become the current stable major version\n`envoy.config.bootstrap.v4`. The `envoy.config.bootstrap.v3` package will become the previous stable\nmajor version and support for `envoy.config.bootstrap.v2` will be dropped from the Envoy\nimplementation. Note that some transitively referenced package, e.g.\n`envoy.config.filter.network.foo.v2` may remain at version 2 during this release, if no changes were\nmade to the referenced package. If no major version is justified at this point, the decision to cut\nv4 might occur at some point in 2021 or beyond, however v2 support will still be removed at the end\nof 2020.\n\nThe implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will\nretain implementation support for at least 1-2 years.\n\nWe are currently working on a strategy to introduce minor versions\n(https://github.com/envoyproxy/envoy/issues/8416). This will bump the xDS API minor version on every\ndeprecation and field introduction/modification. This will provide an opportunity for the control\nplane to condition on client and major/minor API version support. Currently under discussion, but\nnot finalized will be the sunsetting of Envoy client support for deprecated features after a year\nof support within a major version. Please post to https://github.com/envoyproxy/envoy/issues/8416\nany thoughts around this.\n\n# New API features\n\nThe Envoy APIs can be [safely extended](https://cloud.google.com/apis/design/compatibility) with new\npackages, messages, enums, fields and enum values, while maintaining [backwards\ncompatibility](#backwards-compatibility). Additions to the API for a given package should normally\nonly be made to the *current stable major version*. The rationale for this policy is that:\n* The feature is immediately available to Envoy users who consume the current stable major version.\n  This would not be the case if the feature was placed in `vNalpha`.\n* `vNalpha` can be mechanically generated from `vN` without requiring developers to maintain the new\n  feature in both locations.\n* We encourage Envoy users to move to the current stable major version from the previous one to\n  consume new functionality.\n\n# When can an API change be made to a package's previous stable major version?\n\nAs a pragmatic concession, we allow API feature additions to the previous stable major version for a\nsingle quarter following a major API version increment. Any changes to the previous stable major\nversion must be manually reflected in a consistent manner in the current stable major version as\nwell.\n\n# How to make a breaking change across major versions\n\nWe maintain [backwards compatibility](#backwards-compatibility) within a major version but allow\nbreaking changes across major versions. This enables API deprecations, cleanups, refactoring and\nreorganization. The Envoy APIs have a stylized workflow for achieving this. There are two prescribed\nmethods, depending on whether the change is mechanical or manual.\n\n## Mechanical breaking changes\n\nField deprecations, renames, etc. are mechanical changes that are supported by the\n[protoxform](https://github.com/envoyproxy/envoy/tree/master/tools/protoxform) tool. These are\nguided by [annotations](STYLE.md#api-annotations).\n\n## Manual breaking changes\n\nA manual breaking change is distinct from the mechanical changes such as field deprecation, since in\ngeneral it requires new code and tests to be implemented in Envoy by hand. For example, if a developer\nwants to unify `HeaderMatcher` with `StringMatcher` in the route configuration, this is a likely\ncandidate for this class of change. The following steps are required:\n1. The new version of the feature, e.g. the `NewHeaderMatcher` message should be added, together\n   with referencing fields, in the current stable major version for the route configuration proto.\n2. The Envoy implementation should be changed to consume configuration from the fields added in (1).\n   Translation code (and tests) should be written to map from the existing field and messages to\n   (1).\n3. The old message/enum/field/enum value should be annotated as deprecated.\n4. At the next major version, `protoxform` will remove the deprecated version automatically.\n\nThis make-before-break approach ensures that API major version releases are predictable and\nmechanical, and has the bulk of the Envoy code and test changes owned by feature developers, rather\nthan the API owners. There will be no major `vN` initiative to address technical debt beyond that\nenabled by the above process.\n\n# Client features\n\nNot all clients will support all fields and features in a given major API version. In general, it is\npreferable to use Protobuf semantics to support this, for example:\n* Ignoring a field's contents is sufficient to indicate that the support is missing in a client.\n* Setting both deprecated and the new method for expressing a field if support for a range of\n  clients is desired (where this does not involve huge overhead or gymnastics).\n\nThis approach does not always work, for example:\n* A route matcher conjunct condition should not be ignored just because the client is missing the\n  ability to implement the match; this might result in route policy bypass.\n* A client may expect the server to provide a response in a certain format or encoding, for example\n  a JSON encoded `Struct`-in-`Any` representation of opaque extension configuration.\n\nFor this purpose, we have [client\nfeatures](https://www.envoyproxy.io/docs/envoy/latest/api/client_features).\n\n# One Definition Rule (ODR)\n\nTo avoid maintaining more than two stable major versions of a package, and to cope with diamond\ndependency, we add a restriction on how packages may be referenced transitively; a package may have\nat most one version of another package in its transitive dependency set. This implies that some\npackages will have a major version bump during a release cycle simply to allow them to catch up to\nthe current stable version of their dependencies.\n\nSome of this complexity and churn can be avoided by having strict rules on how packages may\nreference each other. Package organization and `BUILD` visibility constraints should be used\nrestrictions to maintain a shallow depth in the dependency tree for any given package.\n\n# Minimizing the impact of churn\n\nIn addition to stability, the API versioning policy has an explicit goal of minimizing the developer\noverhead for the Envoy community, other clients of the APIs (e.g. gRPC), management server vendors\nand the wider API tooling ecosystem. A certain amount of API churn between major versions is\ndesirable to reduce technical debt and to support API evolution, but too much creates costs and\nbarriers to upgrade.\n\nWe consider deprecations to be *mandatory changes*. Any deprecation will be removed at the next\nstable API version.\n\nOther mechanical breaking changes are considered *discretionary*. These include changes such as\nfield renames and are largely reflected in protobuf comments. The `protoxform` tool may decide to\nminimize API churn by deferring application of discretionary changes until a major version cycle\nwhere the respective message is undergoing a mandatory change.\n\nThe Envoy API structure helps with minimizing churn between versions. Developers should architect\nand split packages such that high churn protos, e.g. HTTP connection manager, are isolated in\npackages and have a shallow reference hierarchy.\n"
  },
  {
    "path": "api/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"v2_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//envoy/admin/v2alpha:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/cluster:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"//envoy/api/v2/listener:pkg\",\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/accesslog/v2:pkg\",\n        \"//envoy/config/bootstrap/v2:pkg\",\n        \"//envoy/config/cluster/aggregate/v2alpha:pkg\",\n        \"//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/cluster/redis:pkg\",\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/filter/dubbo/router/v2alpha1:pkg\",\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg\",\n        \"//envoy/config/filter/http/aws_lambda/v2alpha:pkg\",\n        \"//envoy/config/filter/http/aws_request_signing/v2alpha:pkg\",\n        \"//envoy/config/filter/http/buffer/v2:pkg\",\n        \"//envoy/config/filter/http/cache/v2alpha:pkg\",\n        \"//envoy/config/filter/http/compressor/v2:pkg\",\n        \"//envoy/config/filter/http/cors/v2:pkg\",\n        \"//envoy/config/filter/http/csrf/v2:pkg\",\n        \"//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/filter/http/dynamo/v2:pkg\",\n        \"//envoy/config/filter/http/ext_authz/v2:pkg\",\n        \"//envoy/config/filter/http/fault/v2:pkg\",\n        \"//envoy/config/filter/http/grpc_http1_bridge/v2:pkg\",\n        \"//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg\",\n        \"//envoy/config/filter/http/grpc_stats/v2alpha:pkg\",\n        \"//envoy/config/filter/http/grpc_web/v2:pkg\",\n        \"//envoy/config/filter/http/gzip/v2:pkg\",\n        \"//envoy/config/filter/http/header_to_metadata/v2:pkg\",\n        \"//envoy/config/filter/http/health_check/v2:pkg\",\n        \"//envoy/config/filter/http/ip_tagging/v2:pkg\",\n        \"//envoy/config/filter/http/jwt_authn/v2alpha:pkg\",\n        \"//envoy/config/filter/http/lua/v2:pkg\",\n        \"//envoy/config/filter/http/on_demand/v2:pkg\",\n        \"//envoy/config/filter/http/original_src/v2alpha1:pkg\",\n        \"//envoy/config/filter/http/rate_limit/v2:pkg\",\n        \"//envoy/config/filter/http/rbac/v2:pkg\",\n        \"//envoy/config/filter/http/router/v2:pkg\",\n        \"//envoy/config/filter/http/squash/v2:pkg\",\n        \"//envoy/config/filter/http/tap/v2alpha:pkg\",\n        \"//envoy/config/filter/http/transcoder/v2:pkg\",\n        \"//envoy/config/filter/listener/http_inspector/v2:pkg\",\n        \"//envoy/config/filter/listener/original_dst/v2:pkg\",\n        \"//envoy/config/filter/listener/original_src/v2alpha1:pkg\",\n        \"//envoy/config/filter/listener/proxy_protocol/v2:pkg\",\n        \"//envoy/config/filter/listener/tls_inspector/v2:pkg\",\n        \"//envoy/config/filter/network/client_ssl_auth/v2:pkg\",\n        \"//envoy/config/filter/network/direct_response/v2:pkg\",\n        \"//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/echo/v2:pkg\",\n        \"//envoy/config/filter/network/ext_authz/v2:pkg\",\n        \"//envoy/config/filter/network/http_connection_manager/v2:pkg\",\n        \"//envoy/config/filter/network/kafka_broker/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/local_rate_limit/v2alpha:pkg\",\n        \"//envoy/config/filter/network/mongo_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg\",\n        \"//envoy/config/filter/network/rate_limit/v2:pkg\",\n        \"//envoy/config/filter/network/rbac/v2:pkg\",\n        \"//envoy/config/filter/network/redis_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/sni_cluster/v2:pkg\",\n        \"//envoy/config/filter/network/tcp_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg\",\n        \"//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg\",\n        \"//envoy/config/filter/thrift/router/v2alpha1:pkg\",\n        \"//envoy/config/filter/udp/udp_proxy/v2alpha:pkg\",\n        \"//envoy/config/grpc_credential/v2alpha:pkg\",\n        \"//envoy/config/health_checker/redis/v2:pkg\",\n        \"//envoy/config/listener/v2:pkg\",\n        \"//envoy/config/metrics/v2:pkg\",\n        \"//envoy/config/overload/v2alpha:pkg\",\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"//envoy/config/rbac/v2:pkg\",\n        \"//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg\",\n        \"//envoy/config/resource_monitor/injected_resource/v2alpha:pkg\",\n        \"//envoy/config/retry/omit_canary_hosts/v2:pkg\",\n        \"//envoy/config/retry/omit_host_metadata/v2:pkg\",\n        \"//envoy/config/retry/previous_hosts/v2:pkg\",\n        \"//envoy/config/retry/previous_priorities:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"//envoy/config/trace/v2alpha:pkg\",\n        \"//envoy/config/transport_socket/alts/v2alpha:pkg\",\n        \"//envoy/config/transport_socket/raw_buffer/v2:pkg\",\n        \"//envoy/config/transport_socket/tap/v2alpha:pkg\",\n        \"//envoy/data/accesslog/v2:pkg\",\n        \"//envoy/data/cluster/v2alpha:pkg\",\n        \"//envoy/data/core/v2alpha:pkg\",\n        \"//envoy/data/dns/v2alpha:pkg\",\n        \"//envoy/data/tap/v2alpha:pkg\",\n        \"//envoy/service/accesslog/v2:pkg\",\n        \"//envoy/service/auth/v2:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"//envoy/service/event_reporting/v2alpha:pkg\",\n        \"//envoy/service/load_stats/v2:pkg\",\n        \"//envoy/service/metrics/v2:pkg\",\n        \"//envoy/service/ratelimit/v2:pkg\",\n        \"//envoy/service/status/v2:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"//envoy/service/trace/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"//envoy/type/metadata/v2:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n    ],\n)\n\nproto_library(\n    name = \"v3_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//envoy/admin/v3:pkg\",\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/bootstrap/v3:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/common/matcher/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/config/filter/thrift/router/v2alpha1:pkg\",\n        \"//envoy/config/grpc_credential/v3:pkg\",\n        \"//envoy/config/health_checker/redis/v2:pkg\",\n        \"//envoy/config/listener/v3:pkg\",\n        \"//envoy/config/metrics/v3:pkg\",\n        \"//envoy/config/overload/v3:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg\",\n        \"//envoy/config/resource_monitor/injected_resource/v2alpha:pkg\",\n        \"//envoy/config/retry/omit_canary_hosts/v2:pkg\",\n        \"//envoy/config/retry/previous_hosts/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"//envoy/data/accesslog/v3:pkg\",\n        \"//envoy/data/cluster/v3:pkg\",\n        \"//envoy/data/core/v3:pkg\",\n        \"//envoy/data/dns/v3:pkg\",\n        \"//envoy/data/tap/v3:pkg\",\n        \"//envoy/extensions/access_loggers/file/v3:pkg\",\n        \"//envoy/extensions/access_loggers/grpc/v3:pkg\",\n        \"//envoy/extensions/access_loggers/wasm/v3:pkg\",\n        \"//envoy/extensions/clusters/aggregate/v3:pkg\",\n        \"//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/clusters/redis/v3:pkg\",\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/common/ratelimit/v3:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"//envoy/extensions/compression/gzip/compressor/v3:pkg\",\n        \"//envoy/extensions/compression/gzip/decompressor/v3:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg\",\n        \"//envoy/extensions/filters/http/admission_control/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/aws_lambda/v3:pkg\",\n        \"//envoy/extensions/filters/http/aws_request_signing/v3:pkg\",\n        \"//envoy/extensions/filters/http/buffer/v3:pkg\",\n        \"//envoy/extensions/filters/http/cache/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/compressor/v3:pkg\",\n        \"//envoy/extensions/filters/http/cors/v3:pkg\",\n        \"//envoy/extensions/filters/http/csrf/v3:pkg\",\n        \"//envoy/extensions/filters/http/decompressor/v3:pkg\",\n        \"//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/http/dynamo/v3:pkg\",\n        \"//envoy/extensions/filters/http/ext_authz/v3:pkg\",\n        \"//envoy/extensions/filters/http/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_stats/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_web/v3:pkg\",\n        \"//envoy/extensions/filters/http/gzip/v3:pkg\",\n        \"//envoy/extensions/filters/http/header_to_metadata/v3:pkg\",\n        \"//envoy/extensions/filters/http/health_check/v3:pkg\",\n        \"//envoy/extensions/filters/http/ip_tagging/v3:pkg\",\n        \"//envoy/extensions/filters/http/jwt_authn/v3:pkg\",\n        \"//envoy/extensions/filters/http/local_ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/http/lua/v3:pkg\",\n        \"//envoy/extensions/filters/http/oauth2/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/on_demand/v3:pkg\",\n        \"//envoy/extensions/filters/http/original_src/v3:pkg\",\n        \"//envoy/extensions/filters/http/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/http/rbac/v3:pkg\",\n        \"//envoy/extensions/filters/http/router/v3:pkg\",\n        \"//envoy/extensions/filters/http/squash/v3:pkg\",\n        \"//envoy/extensions/filters/http/tap/v3:pkg\",\n        \"//envoy/extensions/filters/http/wasm/v3:pkg\",\n        \"//envoy/extensions/filters/listener/http_inspector/v3:pkg\",\n        \"//envoy/extensions/filters/listener/original_dst/v3:pkg\",\n        \"//envoy/extensions/filters/listener/original_src/v3:pkg\",\n        \"//envoy/extensions/filters/listener/proxy_protocol/v3:pkg\",\n        \"//envoy/extensions/filters/listener/tls_inspector/v3:pkg\",\n        \"//envoy/extensions/filters/network/client_ssl_auth/v3:pkg\",\n        \"//envoy/extensions/filters/network/direct_response/v3:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/echo/v3:pkg\",\n        \"//envoy/extensions/filters/network/ext_authz/v3:pkg\",\n        \"//envoy/extensions/filters/network/http_connection_manager/v3:pkg\",\n        \"//envoy/extensions/filters/network/kafka_broker/v3:pkg\",\n        \"//envoy/extensions/filters/network/local_ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/mongo_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/mysql_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg\",\n        \"//envoy/extensions/filters/network/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/rbac/v3:pkg\",\n        \"//envoy/extensions/filters/network/redis_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/sni_cluster/v3:pkg\",\n        \"//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg\",\n        \"//envoy/extensions/filters/network/tcp_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/wasm/v3:pkg\",\n        \"//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg\",\n        \"//envoy/extensions/filters/udp/udp_proxy/v3:pkg\",\n        \"//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg\",\n        \"//envoy/extensions/internal_redirect/previous_routes/v3:pkg\",\n        \"//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg\",\n        \"//envoy/extensions/network/socket_interface/v3:pkg\",\n        \"//envoy/extensions/retry/host/omit_host_metadata/v3:pkg\",\n        \"//envoy/extensions/retry/priority/previous_priorities/v3:pkg\",\n        \"//envoy/extensions/stat_sinks/wasm/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/alts/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/quic/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/raw_buffer/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tap/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/extensions/upstreams/http/generic/v3:pkg\",\n        \"//envoy/extensions/upstreams/http/http/v3:pkg\",\n        \"//envoy/extensions/upstreams/http/tcp/v3:pkg\",\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"//envoy/extensions/watchdog/abort_action/v3alpha:pkg\",\n        \"//envoy/extensions/watchdog/profile_action/v3alpha:pkg\",\n        \"//envoy/service/accesslog/v3:pkg\",\n        \"//envoy/service/auth/v3:pkg\",\n        \"//envoy/service/cluster/v3:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"//envoy/service/endpoint/v3:pkg\",\n        \"//envoy/service/event_reporting/v3:pkg\",\n        \"//envoy/service/extension/v3:pkg\",\n        \"//envoy/service/health/v3:pkg\",\n        \"//envoy/service/listener/v3:pkg\",\n        \"//envoy/service/load_stats/v3:pkg\",\n        \"//envoy/service/metrics/v3:pkg\",\n        \"//envoy/service/ratelimit/v3:pkg\",\n        \"//envoy/service/route/v3:pkg\",\n        \"//envoy/service/runtime/v3:pkg\",\n        \"//envoy/service/secret/v3:pkg\",\n        \"//envoy/service/status/v3:pkg\",\n        \"//envoy/service/tap/v3:pkg\",\n        \"//envoy/service/trace/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n    ],\n)\n\nproto_library(\n    name = \"all_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":v2_protos\",\n        \":v3_protos\",\n    ],\n)\n"
  },
  {
    "path": "api/CONTRIBUTING.md",
    "content": "# Contributing guide\n\n## API changes\n\nAll API changes should follow the [style guide](STYLE.md).\n\nAPI changes are regular PRs in https://github.com/envoyproxy/envoy for the API/configuration\nchanges. They may be as part of a larger implementation PR. Please follow the standard Bazel and CI\nprocess for validating build/test sanity of `api/` before submitting a PR.\n\n*Note: New .proto files should be added to\n[BUILD](https://github.com/envoyproxy/envoy/blob/master/api/versioning/BUILD) in order to get the RSTs generated.*\n\n## Documentation changes\n\nThe Envoy project takes documentation seriously. We view it as one of the reasons the project has\nseen rapid adoption. As such, it is required that all features have complete documentation. This is\ngenerally going to be a combination of API documentation as well as architecture/overview\ndocumentation.\n\n### Building documentation locally\n\nThe documentation can be built locally in the root of https://github.com/envoyproxy/envoy via:\n\n```\ndocs/build.sh\n```\n\nTo skip configuration examples validation:\n\n```\nSPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh\n```\n\nOr to use a hermetic Docker container:\n\n```\n./ci/run_envoy_docker.sh './ci/do_ci.sh docs'\n```\n\nThis process builds RST documentation directly from the proto files, merges it with the static RST\nfiles, and then runs [Sphinx](https://www.sphinx-doc.org/en/stable/rest.html) over the entire tree to\nproduce the final documentation. The generated RST files are not committed as they are regenerated\nevery time the documentation is built.\n\n### Viewing documentation\n\nOnce the documentation is built, it is available rooted at `generated/docs/index.html`. The\ngenerated RST files are also viewable in `generated/rst`.\n\nNote also that the generated documentation can be viewed in CI:\n\n1. Open docs job in CircleCI.\n2. Navigate to \"artifacts\" tab.\n3. Expand files and click on `index.html`.\n\nIf you do not see an artifacts tab this is a bug in CircleCI. Try logging out and logging back in.\n\n### Documentation guidelines\n\nThe following are some general guidelines around documentation.\n\n* Cross link as much as possible. Sphinx is fantastic at this. Use it! See ample examples with the\n  existing documentation as a guide.\n* Please use a **single space** after a period in documentation so that all generated text is\n  consistent.\n* Comments can be left inside comments if needed (that's pretty deep, right?) via the `[#comment:]`\n  special tag. E.g.,\n\n  ```\n  // This is a really cool field!\n  // [#comment:TODO(mattklein123): Do something cooler]\n  string foo_field = 3;\n  ```\n\n* Prefer *italics* for emphasis as `backtick` emphasis is somewhat jarring in our Sphinx theme.\n* All documentation is expected to use proper English grammar with proper punctuation. If you are\n  not a fluent English speaker please let us know and we will help out.\n"
  },
  {
    "path": "api/README.md",
    "content": "# Data plane API\n\nThis tree hosts the configuration and APIs that drive [Envoy](https://www.envoyproxy.io/). The\nAPIs are also in some cases used by other proxy solutions that aim to interoperate with management\nsystems and configuration generators that are built against this standard. Thus, we consider these a\nset of *universal data plane* APIs. See [this](https://medium.com/@mattklein123/the-universal-data-plane-api-d15cec7a)\nblog post for more information on the universal data plane concept.\n\n# Repository structure\n\nThe API tree can be found at two locations:\n* https://github.com/envoyproxy/envoy/tree/master/api - canonical read/write home for the APIs.\n* https://github.com/envoyproxy/data-plane-api - read-only mirror of\n  https://github.com/envoyproxy/envoy/tree/master/api, providing the ability to consume the data\n  plane APIs without the Envoy implementation.\n\n# Further API reading\n\n* [API overview for developers](API_OVERVIEW.md)\n* [API overview for users](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#)\n* [xDS protocol overview](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol)\n* [Contributing guide](CONTRIBUTING.md)\n"
  },
  {
    "path": "api/STYLE.md",
    "content": "# API style guidelines\n\nGenerally follow guidance at https://cloud.google.com/apis/design/, in\nparticular for proto3 as described at:\n\n* https://cloud.google.com/apis/design/proto3\n* https://cloud.google.com/apis/design/naming_convention\n* https://developers.google.com/protocol-buffers/docs/style\n\nA key aspect of our API style is maintaining stability by following the [API versioning\nguidelines](API_VERSIONING.md). All developers must familiarize themselves with these guidelines,\nany PR which makes breaking changes to the API will not be merged.\n\nIn addition, the following conventions should be followed:\n\n* Every proto directory should have a `README.md` describing its content. See\n  for example [envoy.service](envoy/service/README.md).\n\n* The data plane APIs are primarily intended for machine generation and consumption.\n  It is expected that the management server is responsible for mapping higher\n  level configuration concepts to concrete API concepts. Similarly, static configuration\n  fragments may be generated by tools and UIs, etc. The APIs and tools used\n  to generate xDS configuration are beyond the scope of the definitions in this\n  repository.\n\n* Use [wrapped scalar\n  types](https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto)\n  where there is a real need for the field to have a default value that does not\n  match the proto3 defaults (0/false/\"\"). This should not be done for fields\n  where the proto3 defaults make sense. All things being equal, pick appropriate\n  logic, e.g. enable vs. disable for a `bool` field, such that the proto3\n  defaults work, but only where this doesn't result in API gymnastics.\n\n* Use a `[#not-implemented-hide:]` `protodoc` annotation in comments for fields that lack Envoy\n  implementation. These indicate that the entity is not implemented in Envoy and the entity\n  should be hidden from the Envoy documentation.\n\n* Always use plural field names for `repeated` fields, such as `filters`.\n\n* Due to the fact that we consider JSON/YAML to be first class inputs, we cannot easily change a\n  a singular field to a repeated field (both due to JSON/YAML array structural differences as well\n  as singular vs. plural field naming). If there is a reasonable expectation that a field may need\n  to be repeated in the future, but we don't need it to be repeated right away, consider making it\n  repeated now but using constraints to enforce a maximum repeated size of 1. E.g.:\n\n  ```proto\n  repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1, max_items: 1}];\n  ```\n\n* Always use upper camel case names for message types and enum types without embedded\n  acronyms, such as `HttpRequest`.\n\n* Prefer `oneof` selections to boolean overloads of fields, for example, prefer:\n\n  ```proto\n  oneof path_specifier {\n    string simple_path = 1;\n    string regex_path = 2;\n  }\n  ```\n\n  to\n\n  ```proto\n  string path = 1;\n  bool path_is_regex = 2;\n  ```\n\n  This is more efficient, extendable and self-describing.\n\n* The API includes two types for representing [percents](envoy/type/percent.proto). `Percent` is\n  effectively a double value in the range 0.0-100.0. `FractionalPercent` is an integral fraction\n  that can be used to create a truncated percentage also in the range 0.0-100.0. In high performance\n  paths, `FractionalPercent` is preferred as randomness calculations can be performed using integral\n  modulo and comparison operations only without any floating point conversions. Typically, most\n  users do not need infinite precision in these paths.\n\n* For enum types, if one of the enum values is used for most cases, make it the\n  first enum value with `0` numeric value. Otherwise, define the first enum\n  value like `TYPE_NAME_UNSPECIFIED = 0`, and treat it as an error. This design\n  pattern forces developers to explicitly choose the correct enum value for\n  their use case, and avoid misunderstanding of the default behavior.\n\n* Proto fields should be sorted logically, not by field number.\n\n## Package organization\n\nAPI definitions are layered hierarchically in packages from top-to-bottom as following:\n- `envoy.extensions` contains all definitions for the extensions, the package should match the structure of the `source` directory.\n- `envoy.service` contains gRPC definitions of supporting services and top-level messages for the services.\ne.g. `envoy.service.route.v3` contains RDS, `envoy.service.listener.v3` contains LDS.\n- `envoy.config` contains other definitions for service configuration, bootstrap and some legacy core types.\n- `envoy.data` contains data format declaration for data types that Envoy produces.\n- `envoy.type` contains common protobuf types such as percent, range and matchers.\n\nExtensions should use the regular hierarchy. For example, configuration for network filters belongs\nin a package under `envoy.extensions.filter.network`.\n\n## Adding an extension configuration to the API\n\nExtensions must currently be added as v3 APIs following the [package\norganization](#package-organization) above.\nTo add an extension config to the API, the steps below should be followed:\n\n1. If this is still WiP and subject to breaking changes, use `vNalpha` instead of `vN` in steps\n   below. Refer to the [Cache filter config](envoy/extensions/filter/http/cache/v3alpha/cache.proto)\n   as an example of `v3alpha`, and the\n   [Buffer filter config](envoy/extensions/filter/http/buffer/v3/buffer.proto) as an example of `v3`.\n1. Place the v3 extension configuration `.proto` in `api/envoy/extensions`, e.g.\n   `api/envoy/extensions/filter/http/foobar/v3/foobar.proto` together with an initial BUILD file:\n   ```bazel\n   load(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\n   licenses([\"notice\"])  # Apache 2\n\n   api_proto_package(\n       deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n   )\n   ```\n1. Add to the v3 extension config proto `import \"udpa/annotations/migrate.proto\";`\n   and `import \"udpa/annotations/status.proto\";`\n1. If this is still WiP and subject to breaking changes, set\n   `option (udpa.annotations.file_status).work_in_progress = true;`.\n1. Add to the v3 extension config proto a file level\n   `option (udpa.annotations.file_status).package_version_status = ACTIVE;`.\n   This is required to automatically include the config proto in [api/versioning/BUILD](versioning/BUILD).\n1. Add a reference to the v3 extension config in (1) in [api/versioning/BUILD](versioning/BUILD) under `active_protos`.\n1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file,\n   reformat `foobar.proto` as needed and also generate the v4alpha extension config (if needed),\n   together with shadow API protos.\n1. `git add api/ generated_api_shadow/` to add any new files to your Git index.\n\n## API annotations\n\nA number of annotations are used in the Envoy APIs to provide additional API\nmetadata. We describe these annotations below by category.\n\n### Field level\n* `[deprecated = true]` to denote fields that are deprecated in a major version.\n  These fields are slated for removal at the next major cycle and follow the\n  [breaking change policy](../CONTRIBUTING.md#breaking-change-policy).\n* `[envoy.annotations.disallowed_by_default = true]` to denote fields that have\n  been disallowed by default as per the [breaking change policy](../CONTRIBUTING.md#breaking-change-policy).\n* `[(udpa.annotations.field_migrate).rename = \"<new field name>\"]` to denote that\n  the field will be renamed to a given name in the next API major version.\n* `[(udpa.annotations.field_migrate).oneof_promotion = \"<oneof name>\"]` to denote that\n  the field will be promoted to a given `oneof` in the next API major version.\n* `[(udpa.annotations.sensitive) = true]` to denote sensitive fields that\n  should be redacted in output such as logging or configuration dumps.\n* [PGV annotations](https://github.com/envoyproxy/protoc-gen-validate) to denote field\n  value constraints.\n\n### Enum value level\n* `[(udpa.annotations.enum_value_migrate).rename = \"new enum value name\"]` to denote that\n  the enum value will be renamed to a given name in the next API major version.\n\n### Message level\n* `option (udpa.annotations.versioning).previous_message_type = \"<message type\n  name>\";` to denote the previous type name for an upgraded message. You should\n  never have to write these manually, they are generated by `protoxform`.\n\n### Service level\n* `option (envoy.annotations.resource).type = \"<resource type name>\";` to denote\n  the resource type for an xDS service definition.\n\n### File level\n* `option (udpa.annotations.file_migrate).move_to_package = \"<package name>\";`\n  to denote that in the next major version of the API, the file will be moved to\n  the given package. This is consumed by `protoxform`.\n* `option (udpa.annotations.file_status).work_in_progress = true;` to denote a\n  file that is still work-in-progress and subject to breaking changes.\n"
  },
  {
    "path": "api/bazel/BUILD",
    "content": "load(\"@io_bazel_rules_go//proto:compiler.bzl\", \"go_proto_compiler\")\n\nlicenses([\"notice\"])  # Apache 2\n\ngo_proto_compiler(\n    name = \"pgv_plugin_go\",\n    options = [\"lang=go\"],\n    plugin = \"@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate\",\n    suffix = \".pb.validate.go\",\n    valid_archive = False,\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "api/bazel/api_build_system.bzl",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_test\")\nload(\"@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl\", \"pgv_cc_proto_library\")\nload(\"@com_github_grpc_grpc//bazel:cc_grpc_library.bzl\", \"cc_grpc_library\")\nload(\"@com_google_protobuf//:protobuf.bzl\", _py_proto_library = \"py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\nload(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\nload(\n    \"//bazel:external_proto_deps.bzl\",\n    \"EXTERNAL_PROTO_CC_BAZEL_DEP_MAP\",\n    \"EXTERNAL_PROTO_GO_BAZEL_DEP_MAP\",\n    \"EXTERNAL_PROTO_PY_BAZEL_DEP_MAP\",\n)\n\n_PY_PROTO_SUFFIX = \"_py_proto\"\n_CC_PROTO_SUFFIX = \"_cc_proto\"\n_CC_GRPC_SUFFIX = \"_cc_grpc\"\n_GO_PROTO_SUFFIX = \"_go_proto\"\n_GO_IMPORTPATH_PREFIX = \"github.com/envoyproxy/go-control-plane/\"\n\n_COMMON_PROTO_DEPS = [\n    \"@com_google_protobuf//:any_proto\",\n    \"@com_google_protobuf//:descriptor_proto\",\n    \"@com_google_protobuf//:duration_proto\",\n    \"@com_google_protobuf//:empty_proto\",\n    \"@com_google_protobuf//:struct_proto\",\n    \"@com_google_protobuf//:timestamp_proto\",\n    \"@com_google_protobuf//:wrappers_proto\",\n    \"@com_google_googleapis//google/api:http_proto\",\n    \"@com_google_googleapis//google/api:httpbody_proto\",\n    \"@com_google_googleapis//google/api:annotations_proto\",\n    \"@com_google_googleapis//google/rpc:status_proto\",\n    \"@com_envoyproxy_protoc_gen_validate//validate:validate_proto\",\n]\n\ndef _proto_mapping(dep, proto_dep_map, proto_suffix):\n    mapped = proto_dep_map.get(dep)\n    if mapped == None:\n        prefix = \"@\" + Label(dep).workspace_name if not dep.startswith(\"//\") else \"\"\n        return prefix + \"//\" + Label(dep).package + \":\" + Label(dep).name + proto_suffix\n    return mapped\n\ndef _go_proto_mapping(dep):\n    return _proto_mapping(dep, EXTERNAL_PROTO_GO_BAZEL_DEP_MAP, _GO_PROTO_SUFFIX)\n\ndef _cc_proto_mapping(dep):\n    return _proto_mapping(dep, EXTERNAL_PROTO_CC_BAZEL_DEP_MAP, _CC_PROTO_SUFFIX)\n\ndef _py_proto_mapping(dep):\n    return _proto_mapping(dep, EXTERNAL_PROTO_PY_BAZEL_DEP_MAP, _PY_PROTO_SUFFIX)\n\n# TODO(htuch): Convert this to native py_proto_library once\n# https://github.com/bazelbuild/bazel/issues/3935 and/or\n# https://github.com/bazelbuild/bazel/issues/2626 are resolved.\ndef _api_py_proto_library(name, srcs = [], deps = []):\n    _py_proto_library(\n        name = name + _PY_PROTO_SUFFIX,\n        srcs = srcs,\n        default_runtime = \"@com_google_protobuf//:protobuf_python\",\n        protoc = \"@com_google_protobuf//:protoc\",\n        deps = [_py_proto_mapping(dep) for dep in deps] + [\n            \"@com_envoyproxy_protoc_gen_validate//validate:validate_py\",\n            \"@com_google_googleapis//google/rpc:status_py_proto\",\n            \"@com_google_googleapis//google/api:annotations_py_proto\",\n            \"@com_google_googleapis//google/api:http_py_proto\",\n            \"@com_google_googleapis//google/api:httpbody_py_proto\",\n        ],\n        visibility = [\"//visibility:public\"],\n    )\n\n# This defines googleapis py_proto_library. The repository does not provide its definition and requires\n# overriding it in the consuming project (see https://github.com/grpc/grpc/issues/19255 for more details).\ndef py_proto_library(name, deps = [], plugin = None):\n    srcs = [dep[:-6] + \".proto\" if dep.endswith(\"_proto\") else dep for dep in deps]\n    proto_deps = []\n\n    # py_proto_library in googleapis specifies *_proto rules in dependencies.\n    # By rewriting *_proto to *.proto above, the dependencies in *_proto rules are not preserved.\n    # As a workaround, manually specify the proto dependencies for the imported python rules.\n    if name == \"annotations_py_proto\":\n        proto_deps = proto_deps + [\":http_py_proto\"]\n\n    # checked.proto depends on syntax.proto, we have to add this dependency manually as well.\n    if name == \"checked_py_proto\":\n        proto_deps = proto_deps + [\":syntax_py_proto\"]\n\n    # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0:\n    # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72.\n    # plugin should also be passed in here when gRPC version is greater than v1.25.x.\n    _py_proto_library(\n        name = name,\n        srcs = srcs,\n        default_runtime = \"@com_google_protobuf//:protobuf_python\",\n        protoc = \"@com_google_protobuf//:protoc\",\n        deps = proto_deps + [\"@com_google_protobuf//:protobuf_python\"],\n        visibility = [\"//visibility:public\"],\n    )\n\ndef _api_cc_grpc_library(name, proto, deps = []):\n    cc_grpc_library(\n        name = name,\n        srcs = [proto],\n        deps = deps,\n        proto_only = False,\n        grpc_only = True,\n        visibility = [\"//visibility:public\"],\n    )\n\ndef api_cc_py_proto_library(\n        name,\n        visibility = [\"//visibility:private\"],\n        srcs = [],\n        deps = [],\n        linkstatic = 0,\n        has_services = 0):\n    relative_name = \":\" + name\n    proto_library(\n        name = name,\n        srcs = srcs,\n        deps = deps + _COMMON_PROTO_DEPS,\n        visibility = visibility,\n    )\n    cc_proto_library_name = name + _CC_PROTO_SUFFIX\n    pgv_cc_proto_library(\n        name = cc_proto_library_name,\n        linkstatic = linkstatic,\n        cc_deps = [_cc_proto_mapping(dep) for dep in deps] + [\n            \"@com_google_googleapis//google/api:http_cc_proto\",\n            \"@com_google_googleapis//google/api:httpbody_cc_proto\",\n            \"@com_google_googleapis//google/api:annotations_cc_proto\",\n            \"@com_google_googleapis//google/rpc:status_cc_proto\",\n        ],\n        deps = [relative_name],\n        visibility = [\"//visibility:public\"],\n    )\n    _api_py_proto_library(name, srcs, deps)\n\n    # Optionally define gRPC services\n    if has_services:\n        # TODO: when Python services are required, add to the below stub generations.\n        cc_grpc_name = name + _CC_GRPC_SUFFIX\n        cc_proto_deps = [cc_proto_library_name] + [_cc_proto_mapping(dep) for dep in deps]\n        _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps)\n\ndef api_cc_test(name, **kwargs):\n    cc_test(\n        name = name,\n        **kwargs\n    )\n\ndef api_go_test(name, **kwargs):\n    go_test(\n        name = name,\n        **kwargs\n    )\n\ndef api_proto_package(\n        name = \"pkg\",\n        srcs = [],\n        deps = [],\n        has_services = False,\n        visibility = [\"//visibility:public\"]):\n    if srcs == []:\n        srcs = native.glob([\"*.proto\"])\n\n    name = \"pkg\"\n    api_cc_py_proto_library(\n        name = name,\n        visibility = visibility,\n        srcs = srcs,\n        deps = deps,\n        has_services = has_services,\n    )\n\n    compilers = [\"@io_bazel_rules_go//proto:go_proto\", \"@envoy_api//bazel:pgv_plugin_go\"]\n    if has_services:\n        compilers = [\"@io_bazel_rules_go//proto:go_grpc\", \"@envoy_api//bazel:pgv_plugin_go\"]\n\n    # Because RBAC proro depends on googleapis syntax.proto and checked.proto,\n    # which share the same go proto library, it causes duplicative dependencies.\n    # Thus, we use depset().to_list() to remove duplicated depenencies.\n    go_proto_library(\n        name = name + _GO_PROTO_SUFFIX,\n        compilers = compilers,\n        importpath = _GO_IMPORTPATH_PREFIX + native.package_name(),\n        proto = name,\n        visibility = [\"//visibility:public\"],\n        deps = depset([_go_proto_mapping(dep) for dep in deps] + [\n            \"@com_github_golang_protobuf//ptypes:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/any:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/duration:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/struct:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/timestamp:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/wrappers:go_default_library\",\n            \"@com_envoyproxy_protoc_gen_validate//validate:go_default_library\",\n            \"@com_google_googleapis//google/api:annotations_go_proto\",\n            \"@com_google_googleapis//google/rpc:status_go_proto\",\n        ]).to_list(),\n    )\n"
  },
  {
    "path": "api/bazel/envoy_http_archive.bzl",
    "content": "load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\n\ndef envoy_http_archive(name, locations, **kwargs):\n    # `existing_rule_keys` contains the names of repositories that have already\n    # been defined in the Bazel workspace. By skipping repos with existing keys,\n    # users can override dependency versions by using standard Bazel repository\n    # rules in their WORKSPACE files.\n    existing_rule_keys = native.existing_rules().keys()\n    if name in existing_rule_keys:\n        # This repository has already been defined, probably because the user\n        # wants to override the version. Do nothing.\n        return\n    loc_key = kwargs.pop(\"repository_key\", name)\n    location = locations[loc_key]\n\n    # HTTP tarball at a given URL. Add a BUILD file if requested.\n    http_archive(\n        name = name,\n        urls = location[\"urls\"],\n        sha256 = location[\"sha256\"],\n        strip_prefix = location.get(\"strip_prefix\", \"\"),\n        **kwargs\n    )\n"
  },
  {
    "path": "api/bazel/external_proto_deps.bzl",
    "content": "# Any external dependency imported in the api/ .protos requires entries in\n# the maps below, to allow the Bazel proto and language specific bindings to be\n# inferred from the import directives.\n#\n# This file needs to be interpreted as both Python 3 and Starlark, so only the\n# common subset of Python should be used.\n\n# This maps from .proto import directive path to the Bazel dependency path for\n# external dependencies. Since BUILD files are generated, this is the canonical\n# place to define this mapping.\nEXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = {\n    \"google/api/expr/v1alpha1/checked.proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\",\n    \"google/api/expr/v1alpha1/syntax.proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    \"metrics.proto\": \"@prometheus_metrics_model//:client_model\",\n    \"opencensus/proto/trace/v1/trace.proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    \"opencensus/proto/trace/v1/trace_config.proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n}\n\n# This maps from the Bazel proto_library target to the Go language binding target for external dependencies.\nEXTERNAL_PROTO_GO_BAZEL_DEP_MAP = {\n    \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto\",\n    \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go\",\n}\n\n# This maps from the Bazel proto_library target to the C++ language binding target for external dependencies.\nEXTERNAL_PROTO_CC_BAZEL_DEP_MAP = {\n    \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto\",\n    \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc\",\n}\n\n# This maps from the Bazel proto_library target to the Python language binding target for external dependencies.\nEXTERNAL_PROTO_PY_BAZEL_DEP_MAP = {\n    \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto\",\n    \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py\",\n}\n"
  },
  {
    "path": "api/bazel/repositories.bzl",
    "content": "load(\":envoy_http_archive.bzl\", \"envoy_http_archive\")\nload(\":repository_locations.bzl\", \"REPOSITORY_LOCATIONS\")\n\ndef api_dependencies():\n    envoy_http_archive(\n        \"bazel_skylib\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        \"com_envoyproxy_protoc_gen_validate\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"com_google_googleapis\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"com_github_cncf_udpa\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n\n    envoy_http_archive(\n        name = \"prometheus_metrics_model\",\n        locations = REPOSITORY_LOCATIONS,\n        build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT,\n    )\n    envoy_http_archive(\n        name = \"opencensus_proto\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"rules_proto\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"com_github_openzipkin_zipkinapi\",\n        locations = REPOSITORY_LOCATIONS,\n        build_file_content = ZIPKINAPI_BUILD_CONTENT,\n    )\n\nPROMETHEUSMETRICS_BUILD_CONTENT = \"\"\"\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\n\napi_cc_py_proto_library(\n    name = \"client_model\",\n    srcs = [\n        \"metrics.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\ngo_proto_library(\n    name = \"client_model_go_proto\",\n    importpath = \"github.com/prometheus/client_model/go\",\n    proto = \":client_model\",\n    visibility = [\"//visibility:public\"],\n)\n\"\"\"\n\nOPENCENSUSTRACE_BUILD_CONTENT = \"\"\"\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\n\napi_cc_py_proto_library(\n    name = \"trace_model\",\n    srcs = [\n        \"trace.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\ngo_proto_library(\n    name = \"trace_model_go_proto\",\n    importpath = \"trace_model\",\n    proto = \":trace_model\",\n    visibility = [\"//visibility:public\"],\n)\n\"\"\"\n\nZIPKINAPI_BUILD_CONTENT = \"\"\"\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\n\napi_cc_py_proto_library(\n    name = \"zipkin\",\n    srcs = [\n        \"zipkin-jsonv2.proto\",\n        \"zipkin.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\ngo_proto_library(\n    name = \"zipkin_go_proto\",\n    proto = \":zipkin\",\n    visibility = [\"//visibility:public\"],\n)\n\"\"\"\n"
  },
  {
    "path": "api/bazel/repository_locations.bzl",
    "content": "DEPENDENCY_REPOSITORIES_SPEC = dict(\n    bazel_skylib = dict(\n        project_name = \"bazel-skylib\",\n        project_desc = \"Common useful functions and rules for Bazel\",\n        project_url = \"https://github.com/bazelbuild/bazel-skylib\",\n        version = \"1.0.3\",\n        sha256 = \"1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c\",\n        urls = [\"https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz\"],\n        last_updated = \"2020-08-27\",\n        use_category = [\"api\"],\n    ),\n    com_envoyproxy_protoc_gen_validate = dict(\n        project_name = \"protoc-gen-validate (PGV)\",\n        project_desc = \"protoc plugin to generate polyglot message validators\",\n        project_url = \"https://github.com/envoyproxy/protoc-gen-validate\",\n        version = \"278964a8052f96a2f514add0298098f63fb7f47f\",\n        sha256 = \"e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8\",\n        strip_prefix = \"protoc-gen-validate-{version}\",\n        urls = [\"https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz\"],\n        last_updated = \"2020-06-09\",\n        use_category = [\"api\"],\n    ),\n    com_github_cncf_udpa = dict(\n        project_name = \"Universal Data Plane API\",\n        project_desc = \"Universal Data Plane API Working Group (UDPA-WG)\",\n        project_url = \"https://github.com/cncf/udpa\",\n        version = \"0.0.1\",\n        sha256 = \"83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8\",\n        strip_prefix = \"udpa-{version}\",\n        urls = [\"https://github.com/cncf/udpa/archive/v{version}.tar.gz\"],\n        last_updated = \"2020-09-23\",\n        use_category = [\"api\"],\n    ),\n    com_github_openzipkin_zipkinapi = dict(\n        project_name = \"Zipkin API\",\n        project_desc = \"Zipkin's language independent model and HTTP Api Definitions\",\n        project_url = \"https://github.com/openzipkin/zipkin-api\",\n        version = \"0.2.2\",\n        sha256 = \"688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b\",\n        strip_prefix = \"zipkin-api-{version}\",\n        urls = [\"https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz\"],\n        last_updated = \"2020-09-23\",\n        use_category = [\"api\"],\n    ),\n    com_google_googleapis = dict(\n        # TODO(dio): Consider writing a Starlark macro for importing Google API proto.\n        project_name = \"Google APIs\",\n        project_desc = \"Public interface definitions of Google APIs\",\n        project_url = \"https://github.com/googleapis/googleapis\",\n        version = \"82944da21578a53b74e547774cf62ed31a05b841\",\n        sha256 = \"a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405\",\n        strip_prefix = \"googleapis-{version}\",\n        urls = [\"https://github.com/googleapis/googleapis/archive/{version}.tar.gz\"],\n        last_updated = \"2019-12-02\",\n        use_category = [\"api\"],\n    ),\n    opencensus_proto = dict(\n        project_name = \"OpenCensus Proto\",\n        project_desc = \"Language Independent Interface Types For OpenCensus\",\n        project_url = \"https://github.com/census-instrumentation/opencensus-proto\",\n        version = \"0.3.0\",\n        sha256 = \"b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0\",\n        strip_prefix = \"opencensus-proto-{version}/src\",\n        urls = [\"https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz\"],\n        last_updated = \"2020-06-20\",\n        use_category = [\"api\"],\n    ),\n    prometheus_metrics_model = dict(\n        project_name = \"Prometheus client model\",\n        project_desc = \"Data model artifacts for Prometheus\",\n        project_url = \"https://github.com/prometheus/client_model\",\n        version = \"60555c9708c786597e6b07bf846d0dc5c2a46f54\",\n        sha256 = \"6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e\",\n        strip_prefix = \"client_model-{version}\",\n        urls = [\"https://github.com/prometheus/client_model/archive/{version}.tar.gz\"],\n        last_updated = \"2020-06-23\",\n        use_category = [\"api\"],\n    ),\n    rules_proto = dict(\n        project_name = \"Protobuf Rules for Bazel\",\n        project_desc = \"Protocol buffer rules for Bazel\",\n        project_url = \"https://github.com/bazelbuild/rules_proto\",\n        version = \"40298556293ae502c66579620a7ce867d5f57311\",\n        sha256 = \"aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5\",\n        strip_prefix = \"rules_proto-{version}\",\n        urls = [\"https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz\"],\n        last_updated = \"2020-08-17\",\n        use_category = [\"api\"],\n    ),\n)\n\ndef _format_version(s, version):\n    return s.format(version = version, dash_version = version.replace(\".\", \"-\"), underscore_version = version.replace(\".\", \"_\"))\n\n# Interpolate {version} in the above dependency specs. This code should be capable of running in both Python\n# and Starlark.\ndef _dependency_repositories():\n    locations = {}\n    for key, location in DEPENDENCY_REPOSITORIES_SPEC.items():\n        mutable_location = dict(location)\n        locations[key] = mutable_location\n\n        # Fixup with version information.\n        if \"version\" in location:\n            if \"strip_prefix\" in location:\n                mutable_location[\"strip_prefix\"] = _format_version(location[\"strip_prefix\"], location[\"version\"])\n            mutable_location[\"urls\"] = [_format_version(url, location[\"version\"]) for url in location[\"urls\"]]\n    return locations\n\nREPOSITORY_LOCATIONS = _dependency_repositories()\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/bootstrap/v2:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/certs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"CertsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Certificates]\n\n// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to\n// display certificate information. See :ref:`/certs <operations_admin_interface_certs>` for more\n// information.\nmessage Certificates {\n  // List of certificates known to an Envoy.\n  repeated Certificate certificates = 1;\n}\n\nmessage Certificate {\n  // Details of CA certificate.\n  repeated CertificateDetails ca_cert = 1;\n\n  // Details of Certificate Chain\n  repeated CertificateDetails cert_chain = 2;\n}\n\n// [#next-free-field: 7]\nmessage CertificateDetails {\n  // Path of the certificate.\n  string path = 1;\n\n  // Certificate Serial Number.\n  string serial_number = 2;\n\n  // List of Subject Alternate names.\n  repeated SubjectAlternateName subject_alt_names = 3;\n\n  // Minimum of days until expiration of certificate and it's chain.\n  uint64 days_until_expiration = 4;\n\n  // Indicates the time from which the certificate is valid.\n  google.protobuf.Timestamp valid_from = 5;\n\n  // Indicates the time at which the certificate expires.\n  google.protobuf.Timestamp expiration_time = 6;\n}\n\nmessage SubjectAlternateName {\n  // Subject Alternate Name.\n  oneof name {\n    string dns = 1;\n\n    string uri = 2;\n\n    string ip_address = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/clusters.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/admin/v2alpha/metrics.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ClustersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Clusters]\n\n// Admin endpoint uses this wrapper for `/clusters` to display cluster status information.\n// See :ref:`/clusters <operations_admin_interface_clusters>` for more information.\nmessage Clusters {\n  // Mapping from cluster name to each cluster's status.\n  repeated ClusterStatus cluster_statuses = 1;\n}\n\n// Details an individual cluster's current status.\n// [#next-free-field: 6]\nmessage ClusterStatus {\n  // Name of the cluster.\n  string name = 1;\n\n  // Denotes whether this cluster was added via API or configured statically.\n  bool added_via_api = 2;\n\n  // The success rate threshold used in the last interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used to calculate the threshold.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used to calculate the threshold.\n  // The threshold is used to eject hosts based on their success rate. See\n  // :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.Percent success_rate_ejection_threshold = 3;\n\n  // Mapping from host address to the host's current status.\n  repeated HostStatus host_statuses = 4;\n\n  // The success rate threshold used in the last interval when only locally originated failures were\n  // taken into account and externally originated errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*. The threshold is used to eject hosts based on their success rate.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.Percent local_origin_success_rate_ejection_threshold = 5;\n}\n\n// Current state of a particular host.\n// [#next-free-field: 10]\nmessage HostStatus {\n  // Address of this host.\n  api.v2.core.Address address = 1;\n\n  // List of stats specific to this host.\n  repeated SimpleMetric stats = 2;\n\n  // The host's current health status.\n  HostHealthStatus health_status = 3;\n\n  // Request success rate for this host over the last calculated interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used in success rate\n  // calculation. If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used in success rate calculation.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.Percent success_rate = 4;\n\n  // The host's weight. If not configured, the value defaults to 1.\n  uint32 weight = 5;\n\n  // The hostname of the host, if applicable.\n  string hostname = 6;\n\n  // The host's priority. If not configured, the value defaults to 0 (highest priority).\n  uint32 priority = 7;\n\n  // Request success rate for this host over the last calculated\n  // interval when only locally originated errors are taken into account and externally originated\n  // errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.Percent local_origin_success_rate = 8;\n\n  // locality of the host.\n  api.v2.core.Locality locality = 9;\n}\n\n// Health status for a host.\n// [#next-free-field: 7]\nmessage HostHealthStatus {\n  // The host is currently failing active health checks.\n  bool failed_active_health_check = 1;\n\n  // The host is currently considered an outlier and has been ejected.\n  bool failed_outlier_check = 2;\n\n  // The host is currently being marked as degraded through active health checking.\n  bool failed_active_degraded_check = 4;\n\n  // The host has been removed from service discovery, but is being stabilized due to active\n  // health checking.\n  bool pending_dynamic_removal = 5;\n\n  // The host has not yet been health checked.\n  bool pending_active_hc = 6;\n\n  // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported\n  // here.\n  // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]\n  api.v2.core.HealthStatus eds_health_status = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/config_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/config/bootstrap/v2/bootstrap.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ConfigDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: ConfigDump]\n\n// The :ref:`/config_dump <operations_admin_interface_config_dump>` admin endpoint uses this wrapper\n// message to maintain and serve arbitrary configuration information from any component in Envoy.\nmessage ConfigDump {\n  // This list is serialized and dumped in its entirety at the\n  // :ref:`/config_dump <operations_admin_interface_config_dump>` endpoint.\n  //\n  // The following configurations are currently supported and will be dumped in the order given\n  // below:\n  //\n  // * *bootstrap*: :ref:`BootstrapConfigDump <envoy_api_msg_admin.v2alpha.BootstrapConfigDump>`\n  // * *clusters*: :ref:`ClustersConfigDump <envoy_api_msg_admin.v2alpha.ClustersConfigDump>`\n  // * *listeners*: :ref:`ListenersConfigDump <envoy_api_msg_admin.v2alpha.ListenersConfigDump>`\n  // * *routes*:  :ref:`RoutesConfigDump <envoy_api_msg_admin.v2alpha.RoutesConfigDump>`\n  //\n  // You can filter output with the resource and mask query parameters.\n  // See :ref:`/config_dump?resource={} <operations_admin_interface_config_dump_by_resource>`,\n  // :ref:`/config_dump?mask={} <operations_admin_interface_config_dump_by_mask>`,\n  // or :ref:`/config_dump?resource={},mask={}\n  // <operations_admin_interface_config_dump_by_resource_and_mask>` for more information.\n  repeated google.protobuf.Any configs = 1;\n}\n\nmessage UpdateFailureState {\n  // What the component configuration would have been if the update had succeeded.\n  google.protobuf.Any failed_configuration = 1;\n\n  // Time of the latest failed update attempt.\n  google.protobuf.Timestamp last_update_attempt = 2;\n\n  // Details about the last failed update attempt.\n  string details = 3;\n}\n\n// This message describes the bootstrap configuration that Envoy was started with. This includes\n// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate\n// the static portions of an Envoy configuration by reusing the output as the bootstrap\n// configuration for another Envoy.\nmessage BootstrapConfigDump {\n  config.bootstrap.v2.Bootstrap bootstrap = 1;\n\n  // The timestamp when the BootstrapConfig was last updated.\n  google.protobuf.Timestamp last_updated = 2;\n}\n\n// Envoy's listener manager fills this message with all currently known listeners. Listener\n// configuration information can be used to recreate an Envoy configuration by populating all\n// listeners as static listeners or by returning them in a LDS response.\nmessage ListenersConfigDump {\n  // Describes a statically loaded listener.\n  message StaticListener {\n    // The listener config.\n    google.protobuf.Any listener = 1;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicListenerState {\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time\n    // that the listener was loaded. In the future, discrete per-listener versions may be supported\n    // by the API.\n    string version_info = 1;\n\n    // The listener config.\n    google.protobuf.Any listener = 2;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // Describes a dynamically loaded listener via the LDS API.\n  // [#next-free-field: 6]\n  message DynamicListener {\n    // The name or unique id of this listener, pulled from the DynamicListenerState config.\n    string name = 1;\n\n    // The listener state for any active listener by this name.\n    // These are listeners that are available to service data plane traffic.\n    DynamicListenerState active_state = 2;\n\n    // The listener state for any warming listener by this name.\n    // These are listeners that are currently undergoing warming in preparation to service data\n    // plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the warming listeners should generally be discarded.\n    DynamicListenerState warming_state = 3;\n\n    // The listener state for any draining listener by this name.\n    // These are listeners that are currently undergoing draining in preparation to stop servicing\n    // data plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the draining listeners should generally be discarded.\n    DynamicListenerState draining_state = 4;\n\n    // Set if the last update failed, cleared after the next successful update.\n    UpdateFailureState error_state = 5;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` in the\n  // last processed LDS discovery response. If there are only static bootstrap listeners, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded listener configs.\n  repeated StaticListener static_listeners = 2;\n\n  // State for any warming, active, or draining listeners.\n  repeated DynamicListener dynamic_listeners = 3;\n}\n\n// Envoy's cluster manager fills this message with all currently known clusters. Cluster\n// configuration information can be used to recreate an Envoy configuration by populating all\n// clusters as static clusters or by returning them in a CDS response.\nmessage ClustersConfigDump {\n  // Describes a statically loaded cluster.\n  message StaticCluster {\n    // The cluster config.\n    google.protobuf.Any cluster = 1;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  // Describes a dynamically loaded cluster via the CDS API.\n  message DynamicCluster {\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time\n    // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by\n    // the API.\n    string version_info = 1;\n\n    // The cluster config.\n    google.protobuf.Any cluster = 2;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` in the\n  // last processed CDS discovery response. If there are only static bootstrap clusters, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded cluster configs.\n  repeated StaticCluster static_clusters = 2;\n\n  // The dynamically loaded active clusters. These are clusters that are available to service\n  // data plane traffic.\n  repeated DynamicCluster dynamic_active_clusters = 3;\n\n  // The dynamically loaded warming clusters. These are clusters that are currently undergoing\n  // warming in preparation to service data plane traffic. Note that if attempting to recreate an\n  // Envoy configuration from a configuration dump, the warming clusters should generally be\n  // discarded.\n  repeated DynamicCluster dynamic_warming_clusters = 4;\n}\n\n// Envoy's RDS implementation fills this message with all currently loaded routes, as described by\n// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration\n// or defined inline while configuring listeners are separated from those configured dynamically via RDS.\n// Route configuration information can be used to recreate an Envoy configuration by populating all routes\n// as static routes or by returning them in RDS responses.\nmessage RoutesConfigDump {\n  message StaticRouteConfig {\n    // The route config.\n    google.protobuf.Any route_config = 1;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicRouteConfig {\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time that\n    // the route configuration was loaded.\n    string version_info = 1;\n\n    // The route config.\n    google.protobuf.Any route_config = 2;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded route configs.\n  repeated StaticRouteConfig static_route_configs = 2;\n\n  // The dynamically loaded route configs.\n  repeated DynamicRouteConfig dynamic_route_configs = 3;\n}\n\n// Envoy's scoped RDS implementation fills this message with all currently loaded route\n// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both\n// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the\n// dynamically obtained scopes via the SRDS API.\nmessage ScopedRoutesConfigDump {\n  message InlineScopedRouteConfigs {\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 2;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  message DynamicScopedRouteConfigs {\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time that\n    // the scoped routes configuration was loaded.\n    string version_info = 2;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 3;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 4;\n  }\n\n  // The statically loaded scoped route configs.\n  repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1;\n\n  // The dynamically loaded scoped route configs.\n  repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2;\n}\n\n// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.\nmessage SecretsConfigDump {\n  // DynamicSecret contains secret information fetched via SDS.\n  message DynamicSecret {\n    // The name assigned to the secret.\n    string name = 1;\n\n    // This is the per-resource version information.\n    string version_info = 2;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 4;\n  }\n\n  // StaticSecret specifies statically loaded secret in bootstrap.\n  message StaticSecret {\n    // The name assigned to the secret.\n    string name = 1;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 3;\n  }\n\n  // The statically loaded secrets.\n  repeated StaticSecret static_secrets = 1;\n\n  // The dynamically loaded active secrets. These are secrets that are available to service\n  // clusters or listeners.\n  repeated DynamicSecret dynamic_active_secrets = 2;\n\n  // The dynamically loaded warming secrets. These are secrets that are currently undergoing\n  // warming in preparation to service clusters or listeners.\n  repeated DynamicSecret dynamic_warming_secrets = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/listeners.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ListenersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listeners]\n\n// Admin endpoint uses this wrapper for `/listeners` to display listener status information.\n// See :ref:`/listeners <operations_admin_interface_listeners>` for more information.\nmessage Listeners {\n  // List of listener statuses.\n  repeated ListenerStatus listener_statuses = 1;\n}\n\n// Details an individual listener's current status.\nmessage ListenerStatus {\n  // Name of the listener\n  string name = 1;\n\n  // The actual local address that the listener is listening on. If a listener was configured\n  // to listen on port 0, then this address has the port that was allocated by the OS.\n  api.v2.core.Address local_address = 2;\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/memory.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"MemoryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Memory]\n\n// Proto representation of the internal memory consumption of an Envoy instance. These represent\n// values extracted from an internal TCMalloc instance. For more information, see the section of the\n// docs entitled [\"Generic Tcmalloc Status\"](https://gperftools.github.io/gperftools/tcmalloc.html).\n// [#next-free-field: 7]\nmessage Memory {\n  // The number of bytes allocated by the heap for Envoy. This is an alias for\n  // `generic.current_allocated_bytes`.\n  uint64 allocated = 1;\n\n  // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for\n  // `generic.heap_size`.\n  uint64 heap_size = 2;\n\n  // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and depending on the OS, typically do not count towards physical memory\n  // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`.\n  uint64 pageheap_unmapped = 3;\n\n  // The number of bytes in free, mapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also\n  // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`.\n  uint64 pageheap_free = 4;\n\n  // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias\n  // for `tcmalloc.current_total_thread_cache_bytes`.\n  uint64 total_thread_cache = 5;\n\n  // The number of bytes of the physical memory usage by the allocator. This is an alias for\n  // `generic.total_physical_bytes`.\n  uint64 total_physical_bytes = 6;\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"MetricsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metrics]\n\n// Proto representation of an Envoy Counter or Gauge value.\nmessage SimpleMetric {\n  enum Type {\n    COUNTER = 0;\n    GAUGE = 1;\n  }\n\n  // Type of the metric represented.\n  Type type = 1;\n\n  // Current metric value.\n  uint64 value = 2;\n\n  // Name of the metric.\n  string name = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/mutex_stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"MutexStatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: MutexStats]\n\n// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run\n// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex`\n// [docs](https://abseil.io/about/design/mutex#extra-features).\n//\n// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not\n// correspond to core clock frequency. For more information, see the `CycleClock`\n// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).\nmessage MutexStats {\n  // The number of individual mutex contentions which have occurred since startup.\n  uint64 num_contentions = 1;\n\n  // The length of the current contention wait cycle.\n  uint64 current_wait_cycles = 2;\n\n  // The lifetime total of all contention wait cycles.\n  uint64 lifetime_wait_cycles = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/server_info.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ServerInfoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Server State]\n\n// Proto representation of the value returned by /server_info, containing\n// server version/server status information.\n// [#next-free-field: 7]\nmessage ServerInfo {\n  enum State {\n    // Server is live and serving traffic.\n    LIVE = 0;\n\n    // Server is draining listeners in response to external health checks failing.\n    DRAINING = 1;\n\n    // Server has not yet completed cluster manager initialization.\n    PRE_INITIALIZING = 2;\n\n    // Server is running the cluster manager initialization callbacks (e.g., RDS).\n    INITIALIZING = 3;\n  }\n\n  // Server version.\n  string version = 1;\n\n  // State of the server.\n  State state = 2;\n\n  // Uptime since current epoch was started.\n  google.protobuf.Duration uptime_current_epoch = 3;\n\n  // Uptime since the start of the first epoch.\n  google.protobuf.Duration uptime_all_epochs = 4;\n\n  // Hot restart version.\n  string hot_restart_version = 5;\n\n  // Command line options the server is currently running with.\n  CommandLineOptions command_line_options = 6;\n}\n\n// [#next-free-field: 29]\nmessage CommandLineOptions {\n  enum IpVersion {\n    v4 = 0;\n    v6 = 1;\n  }\n\n  enum Mode {\n    // Validate configs and then serve traffic normally.\n    Serve = 0;\n\n    // Validate configs and exit.\n    Validate = 1;\n\n    // Completely load and initialize the config, and then exit without running the listener loop.\n    InitOnly = 2;\n  }\n\n  reserved 12;\n\n  // See :option:`--base-id` for details.\n  uint64 base_id = 1;\n\n  // See :option:`--concurrency` for details.\n  uint32 concurrency = 2;\n\n  // See :option:`--config-path` for details.\n  string config_path = 3;\n\n  // See :option:`--config-yaml` for details.\n  string config_yaml = 4;\n\n  // See :option:`--allow-unknown-static-fields` for details.\n  bool allow_unknown_static_fields = 5;\n\n  // See :option:`--reject-unknown-dynamic-fields` for details.\n  bool reject_unknown_dynamic_fields = 26;\n\n  // See :option:`--admin-address-path` for details.\n  string admin_address_path = 6;\n\n  // See :option:`--local-address-ip-version` for details.\n  IpVersion local_address_ip_version = 7;\n\n  // See :option:`--log-level` for details.\n  string log_level = 8;\n\n  // See :option:`--component-log-level` for details.\n  string component_log_level = 9;\n\n  // See :option:`--log-format` for details.\n  string log_format = 10;\n\n  // See :option:`--log-format-escaped` for details.\n  bool log_format_escaped = 27;\n\n  // See :option:`--log-path` for details.\n  string log_path = 11;\n\n  // See :option:`--service-cluster` for details.\n  string service_cluster = 13;\n\n  // See :option:`--service-node` for details.\n  string service_node = 14;\n\n  // See :option:`--service-zone` for details.\n  string service_zone = 15;\n\n  // See :option:`--file-flush-interval-msec` for details.\n  google.protobuf.Duration file_flush_interval = 16;\n\n  // See :option:`--drain-time-s` for details.\n  google.protobuf.Duration drain_time = 17;\n\n  // See :option:`--parent-shutdown-time-s` for details.\n  google.protobuf.Duration parent_shutdown_time = 18;\n\n  // See :option:`--mode` for details.\n  Mode mode = 19;\n\n  // max_stats and max_obj_name_len are now unused and have no effect.\n  uint64 max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  uint64 max_obj_name_len = 21\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // See :option:`--disable-hot-restart` for details.\n  bool disable_hot_restart = 22;\n\n  // See :option:`--enable-mutex-tracing` for details.\n  bool enable_mutex_tracing = 23;\n\n  // See :option:`--restart-epoch` for details.\n  uint32 restart_epoch = 24;\n\n  // See :option:`--cpuset-threads` for details.\n  bool cpuset_threads = 25;\n\n  // See :option:`--disable-extensions` for details.\n  repeated string disabled_extensions = 28;\n}\n"
  },
  {
    "path": "api/envoy/admin/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/service/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap]\n\n// The /tap admin request body that is used to configure an active tap session.\nmessage TapRequest {\n  // The opaque configuration ID used to match the configuration to a loaded extension.\n  // A tap extension configures a similar opaque ID that is used to match.\n  string config_id = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The tap configuration to load.\n  service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/admin/v2alpha:pkg\",\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v3:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/admin/v3/certs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"CertsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Certificates]\n\n// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to\n// display certificate information. See :ref:`/certs <operations_admin_interface_certs>` for more\n// information.\nmessage Certificates {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Certificates\";\n\n  // List of certificates known to an Envoy.\n  repeated Certificate certificates = 1;\n}\n\nmessage Certificate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Certificate\";\n\n  // Details of CA certificate.\n  repeated CertificateDetails ca_cert = 1;\n\n  // Details of Certificate Chain\n  repeated CertificateDetails cert_chain = 2;\n}\n\n// [#next-free-field: 8]\nmessage CertificateDetails {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.CertificateDetails\";\n\n  message OcspDetails {\n    // Indicates the time from which the OCSP response is valid.\n    google.protobuf.Timestamp valid_from = 1;\n\n    // Indicates the time at which the OCSP response expires.\n    google.protobuf.Timestamp expiration = 2;\n  }\n\n  // Path of the certificate.\n  string path = 1;\n\n  // Certificate Serial Number.\n  string serial_number = 2;\n\n  // List of Subject Alternate names.\n  repeated SubjectAlternateName subject_alt_names = 3;\n\n  // Minimum of days until expiration of certificate and it's chain.\n  uint64 days_until_expiration = 4;\n\n  // Indicates the time from which the certificate is valid.\n  google.protobuf.Timestamp valid_from = 5;\n\n  // Indicates the time at which the certificate expires.\n  google.protobuf.Timestamp expiration_time = 6;\n\n  // Details related to the OCSP response associated with this certificate, if any.\n  OcspDetails ocsp_details = 7;\n}\n\nmessage SubjectAlternateName {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.SubjectAlternateName\";\n\n  // Subject Alternate Name.\n  oneof name {\n    string dns = 1;\n\n    string uri = 2;\n\n    string ip_address = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/clusters.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/admin/v3/metrics.proto\";\nimport \"envoy/config/cluster/v3/circuit_breaker.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ClustersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Clusters]\n\n// Admin endpoint uses this wrapper for `/clusters` to display cluster status information.\n// See :ref:`/clusters <operations_admin_interface_clusters>` for more information.\nmessage Clusters {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Clusters\";\n\n  // Mapping from cluster name to each cluster's status.\n  repeated ClusterStatus cluster_statuses = 1;\n}\n\n// Details an individual cluster's current status.\n// [#next-free-field: 7]\nmessage ClusterStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ClusterStatus\";\n\n  // Name of the cluster.\n  string name = 1;\n\n  // Denotes whether this cluster was added via API or configured statically.\n  bool added_via_api = 2;\n\n  // The success rate threshold used in the last interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used to calculate the threshold.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used to calculate the threshold.\n  // The threshold is used to eject hosts based on their success rate. See\n  // :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent success_rate_ejection_threshold = 3;\n\n  // Mapping from host address to the host's current status.\n  repeated HostStatus host_statuses = 4;\n\n  // The success rate threshold used in the last interval when only locally originated failures were\n  // taken into account and externally originated errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*. The threshold is used to eject hosts based on their success rate.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent local_origin_success_rate_ejection_threshold = 5;\n\n  // :ref:`Circuit breaking <arch_overview_circuit_break>` settings of the cluster.\n  config.cluster.v3.CircuitBreakers circuit_breakers = 6;\n}\n\n// Current state of a particular host.\n// [#next-free-field: 10]\nmessage HostStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.HostStatus\";\n\n  // Address of this host.\n  config.core.v3.Address address = 1;\n\n  // List of stats specific to this host.\n  repeated SimpleMetric stats = 2;\n\n  // The host's current health status.\n  HostHealthStatus health_status = 3;\n\n  // Request success rate for this host over the last calculated interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used in success rate\n  // calculation. If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used in success rate calculation.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent success_rate = 4;\n\n  // The host's weight. If not configured, the value defaults to 1.\n  uint32 weight = 5;\n\n  // The hostname of the host, if applicable.\n  string hostname = 6;\n\n  // The host's priority. If not configured, the value defaults to 0 (highest priority).\n  uint32 priority = 7;\n\n  // Request success rate for this host over the last calculated\n  // interval when only locally originated errors are taken into account and externally originated\n  // errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent local_origin_success_rate = 8;\n\n  // locality of the host.\n  config.core.v3.Locality locality = 9;\n}\n\n// Health status for a host.\n// [#next-free-field: 7]\nmessage HostHealthStatus {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.HostHealthStatus\";\n\n  // The host is currently failing active health checks.\n  bool failed_active_health_check = 1;\n\n  // The host is currently considered an outlier and has been ejected.\n  bool failed_outlier_check = 2;\n\n  // The host is currently being marked as degraded through active health checking.\n  bool failed_active_degraded_check = 4;\n\n  // The host has been removed from service discovery, but is being stabilized due to active\n  // health checking.\n  bool pending_dynamic_removal = 5;\n\n  // The host has not yet been health checked.\n  bool pending_active_hc = 6;\n\n  // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported\n  // here.\n  // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]\n  config.core.v3.HealthStatus eds_health_status = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/config_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/bootstrap/v3/bootstrap.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ConfigDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: ConfigDump]\n\n// The :ref:`/config_dump <operations_admin_interface_config_dump>` admin endpoint uses this wrapper\n// message to maintain and serve arbitrary configuration information from any component in Envoy.\nmessage ConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ConfigDump\";\n\n  // This list is serialized and dumped in its entirety at the\n  // :ref:`/config_dump <operations_admin_interface_config_dump>` endpoint.\n  //\n  // The following configurations are currently supported and will be dumped in the order given\n  // below:\n  //\n  // * *bootstrap*: :ref:`BootstrapConfigDump <envoy_api_msg_admin.v3.BootstrapConfigDump>`\n  // * *clusters*: :ref:`ClustersConfigDump <envoy_api_msg_admin.v3.ClustersConfigDump>`\n  // * *endpoints*:  :ref:`EndpointsConfigDump <envoy_api_msg_admin.v3.EndpointsConfigDump>`\n  // * *listeners*: :ref:`ListenersConfigDump <envoy_api_msg_admin.v3.ListenersConfigDump>`\n  // * *routes*:  :ref:`RoutesConfigDump <envoy_api_msg_admin.v3.RoutesConfigDump>`\n  //\n  // EDS Configuration will only be dumped by using parameter `?include_eds`\n  //\n  // You can filter output with the resource and mask query parameters.\n  // See :ref:`/config_dump?resource={} <operations_admin_interface_config_dump_by_resource>`,\n  // :ref:`/config_dump?mask={} <operations_admin_interface_config_dump_by_mask>`,\n  // or :ref:`/config_dump?resource={},mask={}\n  // <operations_admin_interface_config_dump_by_resource_and_mask>` for more information.\n  repeated google.protobuf.Any configs = 1;\n}\n\nmessage UpdateFailureState {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.UpdateFailureState\";\n\n  // What the component configuration would have been if the update had succeeded.\n  google.protobuf.Any failed_configuration = 1;\n\n  // Time of the latest failed update attempt.\n  google.protobuf.Timestamp last_update_attempt = 2;\n\n  // Details about the last failed update attempt.\n  string details = 3;\n}\n\n// This message describes the bootstrap configuration that Envoy was started with. This includes\n// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate\n// the static portions of an Envoy configuration by reusing the output as the bootstrap\n// configuration for another Envoy.\nmessage BootstrapConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.BootstrapConfigDump\";\n\n  config.bootstrap.v3.Bootstrap bootstrap = 1;\n\n  // The timestamp when the BootstrapConfig was last updated.\n  google.protobuf.Timestamp last_updated = 2;\n}\n\n// Envoy's listener manager fills this message with all currently known listeners. Listener\n// configuration information can be used to recreate an Envoy configuration by populating all\n// listeners as static listeners or by returning them in a LDS response.\nmessage ListenersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.ListenersConfigDump\";\n\n  // Describes a statically loaded listener.\n  message StaticListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ListenersConfigDump.StaticListener\";\n\n    // The listener config.\n    google.protobuf.Any listener = 1;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicListenerState {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ListenersConfigDump.DynamicListenerState\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time\n    // that the listener was loaded. In the future, discrete per-listener versions may be supported\n    // by the API.\n    string version_info = 1;\n\n    // The listener config.\n    google.protobuf.Any listener = 2;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // Describes a dynamically loaded listener via the LDS API.\n  // [#next-free-field: 6]\n  message DynamicListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ListenersConfigDump.DynamicListener\";\n\n    // The name or unique id of this listener, pulled from the DynamicListenerState config.\n    string name = 1;\n\n    // The listener state for any active listener by this name.\n    // These are listeners that are available to service data plane traffic.\n    DynamicListenerState active_state = 2;\n\n    // The listener state for any warming listener by this name.\n    // These are listeners that are currently undergoing warming in preparation to service data\n    // plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the warming listeners should generally be discarded.\n    DynamicListenerState warming_state = 3;\n\n    // The listener state for any draining listener by this name.\n    // These are listeners that are currently undergoing draining in preparation to stop servicing\n    // data plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the draining listeners should generally be discarded.\n    DynamicListenerState draining_state = 4;\n\n    // Set if the last update failed, cleared after the next successful update.\n    UpdateFailureState error_state = 5;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` in the\n  // last processed LDS discovery response. If there are only static bootstrap listeners, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded listener configs.\n  repeated StaticListener static_listeners = 2;\n\n  // State for any warming, active, or draining listeners.\n  repeated DynamicListener dynamic_listeners = 3;\n}\n\n// Envoy's cluster manager fills this message with all currently known clusters. Cluster\n// configuration information can be used to recreate an Envoy configuration by populating all\n// clusters as static clusters or by returning them in a CDS response.\nmessage ClustersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.ClustersConfigDump\";\n\n  // Describes a statically loaded cluster.\n  message StaticCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ClustersConfigDump.StaticCluster\";\n\n    // The cluster config.\n    google.protobuf.Any cluster = 1;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  // Describes a dynamically loaded cluster via the CDS API.\n  message DynamicCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ClustersConfigDump.DynamicCluster\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time\n    // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by\n    // the API.\n    string version_info = 1;\n\n    // The cluster config.\n    google.protobuf.Any cluster = 2;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` in the\n  // last processed CDS discovery response. If there are only static bootstrap clusters, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded cluster configs.\n  repeated StaticCluster static_clusters = 2;\n\n  // The dynamically loaded active clusters. These are clusters that are available to service\n  // data plane traffic.\n  repeated DynamicCluster dynamic_active_clusters = 3;\n\n  // The dynamically loaded warming clusters. These are clusters that are currently undergoing\n  // warming in preparation to service data plane traffic. Note that if attempting to recreate an\n  // Envoy configuration from a configuration dump, the warming clusters should generally be\n  // discarded.\n  repeated DynamicCluster dynamic_warming_clusters = 4;\n}\n\n// Envoy's RDS implementation fills this message with all currently loaded routes, as described by\n// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration\n// or defined inline while configuring listeners are separated from those configured dynamically via RDS.\n// Route configuration information can be used to recreate an Envoy configuration by populating all routes\n// as static routes or by returning them in RDS responses.\nmessage RoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.RoutesConfigDump\";\n\n  message StaticRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.RoutesConfigDump.StaticRouteConfig\";\n\n    // The route config.\n    google.protobuf.Any route_config = 1;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.RoutesConfigDump.DynamicRouteConfig\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time that\n    // the route configuration was loaded.\n    string version_info = 1;\n\n    // The route config.\n    google.protobuf.Any route_config = 2;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded route configs.\n  repeated StaticRouteConfig static_route_configs = 2;\n\n  // The dynamically loaded route configs.\n  repeated DynamicRouteConfig dynamic_route_configs = 3;\n}\n\n// Envoy's scoped RDS implementation fills this message with all currently loaded route\n// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both\n// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the\n// dynamically obtained scopes via the SRDS API.\nmessage ScopedRoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.ScopedRoutesConfigDump\";\n\n  message InlineScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ScopedRoutesConfigDump.InlineScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 2;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  message DynamicScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ScopedRoutesConfigDump.DynamicScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time that\n    // the scoped routes configuration was loaded.\n    string version_info = 2;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 3;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 4;\n  }\n\n  // The statically loaded scoped route configs.\n  repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1;\n\n  // The dynamically loaded scoped route configs.\n  repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2;\n}\n\n// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.\nmessage SecretsConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.SecretsConfigDump\";\n\n  // DynamicSecret contains secret information fetched via SDS.\n  message DynamicSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.SecretsConfigDump.DynamicSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // This is the per-resource version information.\n    string version_info = 2;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 4;\n  }\n\n  // StaticSecret specifies statically loaded secret in bootstrap.\n  message StaticSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.SecretsConfigDump.StaticSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 3;\n  }\n\n  // The statically loaded secrets.\n  repeated StaticSecret static_secrets = 1;\n\n  // The dynamically loaded active secrets. These are secrets that are available to service\n  // clusters or listeners.\n  repeated DynamicSecret dynamic_active_secrets = 2;\n\n  // The dynamically loaded warming secrets. These are secrets that are currently undergoing\n  // warming in preparation to service clusters or listeners.\n  repeated DynamicSecret dynamic_warming_secrets = 3;\n}\n\n// Envoy's admin fill this message with all currently known endpoints. Endpoint\n// configuration information can be used to recreate an Envoy configuration by populating all\n// endpoints as static endpoints or by returning them in an EDS response.\nmessage EndpointsConfigDump {\n  message StaticEndpointConfig {\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 1;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicEndpointConfig {\n    // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time that\n    // the endpoint configuration was loaded.\n    string version_info = 1;\n\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 2;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded endpoint configs.\n  repeated StaticEndpointConfig static_endpoint_configs = 2;\n\n  // The dynamically loaded endpoint configs.\n  repeated DynamicEndpointConfig dynamic_endpoint_configs = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/init_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"InitDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: InitDump]\n\n// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers,\n// which provides the information of their unready targets.\n// The :ref:`/init_dump <operations_admin_interface_init_dump>` will dump all unready targets information.\nmessage UnreadyTargetsDumps {\n  // Message of unready targets information of an init manager.\n  message UnreadyTargetsDump {\n    // Name of the init manager. Example: \"init_manager_xxx\".\n    string name = 1;\n\n    // Names of unready targets of the init manager. Example: \"target_xxx\".\n    repeated string target_names = 2;\n  }\n\n  // You can choose specific component to dump unready targets with mask query parameter.\n  // See :ref:`/init_dump?mask={} <operations_admin_interface_init_dump_by_mask>` for more information.\n  // The dumps of unready targets of all init managers.\n  repeated UnreadyTargetsDump unready_targets_dumps = 1;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/listeners.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ListenersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listeners]\n\n// Admin endpoint uses this wrapper for `/listeners` to display listener status information.\n// See :ref:`/listeners <operations_admin_interface_listeners>` for more information.\nmessage Listeners {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Listeners\";\n\n  // List of listener statuses.\n  repeated ListenerStatus listener_statuses = 1;\n}\n\n// Details an individual listener's current status.\nmessage ListenerStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ListenerStatus\";\n\n  // Name of the listener\n  string name = 1;\n\n  // The actual local address that the listener is listening on. If a listener was configured\n  // to listen on port 0, then this address has the port that was allocated by the OS.\n  config.core.v3.Address local_address = 2;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/memory.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"MemoryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Memory]\n\n// Proto representation of the internal memory consumption of an Envoy instance. These represent\n// values extracted from an internal TCMalloc instance. For more information, see the section of the\n// docs entitled [\"Generic Tcmalloc Status\"](https://gperftools.github.io/gperftools/tcmalloc.html).\n// [#next-free-field: 7]\nmessage Memory {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Memory\";\n\n  // The number of bytes allocated by the heap for Envoy. This is an alias for\n  // `generic.current_allocated_bytes`.\n  uint64 allocated = 1;\n\n  // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for\n  // `generic.heap_size`.\n  uint64 heap_size = 2;\n\n  // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and depending on the OS, typically do not count towards physical memory\n  // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`.\n  uint64 pageheap_unmapped = 3;\n\n  // The number of bytes in free, mapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also\n  // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`.\n  uint64 pageheap_free = 4;\n\n  // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias\n  // for `tcmalloc.current_total_thread_cache_bytes`.\n  uint64 total_thread_cache = 5;\n\n  // The number of bytes of the physical memory usage by the allocator. This is an alias for\n  // `generic.total_physical_bytes`.\n  uint64 total_physical_bytes = 6;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"MetricsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metrics]\n\n// Proto representation of an Envoy Counter or Gauge value.\nmessage SimpleMetric {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.SimpleMetric\";\n\n  enum Type {\n    COUNTER = 0;\n    GAUGE = 1;\n  }\n\n  // Type of the metric represented.\n  Type type = 1;\n\n  // Current metric value.\n  uint64 value = 2;\n\n  // Name of the metric.\n  string name = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/mutex_stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"MutexStatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: MutexStats]\n\n// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run\n// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex`\n// [docs](https://abseil.io/about/design/mutex#extra-features).\n//\n// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not\n// correspond to core clock frequency. For more information, see the `CycleClock`\n// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).\nmessage MutexStats {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.MutexStats\";\n\n  // The number of individual mutex contentions which have occurred since startup.\n  uint64 num_contentions = 1;\n\n  // The length of the current contention wait cycle.\n  uint64 current_wait_cycles = 2;\n\n  // The lifetime total of all contention wait cycles.\n  uint64 lifetime_wait_cycles = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/server_info.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ServerInfoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Server State]\n\n// Proto representation of the value returned by /server_info, containing\n// server version/server status information.\n// [#next-free-field: 8]\nmessage ServerInfo {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ServerInfo\";\n\n  enum State {\n    // Server is live and serving traffic.\n    LIVE = 0;\n\n    // Server is draining listeners in response to external health checks failing.\n    DRAINING = 1;\n\n    // Server has not yet completed cluster manager initialization.\n    PRE_INITIALIZING = 2;\n\n    // Server is running the cluster manager initialization callbacks (e.g., RDS).\n    INITIALIZING = 3;\n  }\n\n  // Server version.\n  string version = 1;\n\n  // State of the server.\n  State state = 2;\n\n  // Uptime since current epoch was started.\n  google.protobuf.Duration uptime_current_epoch = 3;\n\n  // Uptime since the start of the first epoch.\n  google.protobuf.Duration uptime_all_epochs = 4;\n\n  // Hot restart version.\n  string hot_restart_version = 5;\n\n  // Command line options the server is currently running with.\n  CommandLineOptions command_line_options = 6;\n\n  // Populated node identity of this server.\n  config.core.v3.Node node = 7;\n}\n\n// [#next-free-field: 37]\nmessage CommandLineOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.CommandLineOptions\";\n\n  enum IpVersion {\n    v4 = 0;\n    v6 = 1;\n  }\n\n  enum Mode {\n    // Validate configs and then serve traffic normally.\n    Serve = 0;\n\n    // Validate configs and exit.\n    Validate = 1;\n\n    // Completely load and initialize the config, and then exit without running the listener loop.\n    InitOnly = 2;\n  }\n\n  enum DrainStrategy {\n    // Gradually discourage connections over the course of the drain period.\n    Gradual = 0;\n\n    // Discourage all connections for the duration of the drain sequence.\n    Immediate = 1;\n  }\n\n  reserved 12, 20, 21;\n\n  reserved \"max_stats\", \"max_obj_name_len\";\n\n  // See :option:`--base-id` for details.\n  uint64 base_id = 1;\n\n  // See :option:`--use-dynamic-base-id` for details.\n  bool use_dynamic_base_id = 31;\n\n  // See :option:`--base-id-path` for details.\n  string base_id_path = 32;\n\n  // See :option:`--concurrency` for details.\n  uint32 concurrency = 2;\n\n  // See :option:`--config-path` for details.\n  string config_path = 3;\n\n  // See :option:`--config-yaml` for details.\n  string config_yaml = 4;\n\n  // See :option:`--allow-unknown-static-fields` for details.\n  bool allow_unknown_static_fields = 5;\n\n  // See :option:`--reject-unknown-dynamic-fields` for details.\n  bool reject_unknown_dynamic_fields = 26;\n\n  // See :option:`--ignore-unknown-dynamic-fields` for details.\n  bool ignore_unknown_dynamic_fields = 30;\n\n  // See :option:`--admin-address-path` for details.\n  string admin_address_path = 6;\n\n  // See :option:`--local-address-ip-version` for details.\n  IpVersion local_address_ip_version = 7;\n\n  // See :option:`--log-level` for details.\n  string log_level = 8;\n\n  // See :option:`--component-log-level` for details.\n  string component_log_level = 9;\n\n  // See :option:`--log-format` for details.\n  string log_format = 10;\n\n  // See :option:`--log-format-escaped` for details.\n  bool log_format_escaped = 27;\n\n  // See :option:`--log-path` for details.\n  string log_path = 11;\n\n  // See :option:`--service-cluster` for details.\n  string service_cluster = 13;\n\n  // See :option:`--service-node` for details.\n  string service_node = 14;\n\n  // See :option:`--service-zone` for details.\n  string service_zone = 15;\n\n  // See :option:`--file-flush-interval-msec` for details.\n  google.protobuf.Duration file_flush_interval = 16;\n\n  // See :option:`--drain-time-s` for details.\n  google.protobuf.Duration drain_time = 17;\n\n  // See :option:`--drain-strategy` for details.\n  DrainStrategy drain_strategy = 33;\n\n  // See :option:`--parent-shutdown-time-s` for details.\n  google.protobuf.Duration parent_shutdown_time = 18;\n\n  // See :option:`--mode` for details.\n  Mode mode = 19;\n\n  // See :option:`--disable-hot-restart` for details.\n  bool disable_hot_restart = 22;\n\n  // See :option:`--enable-mutex-tracing` for details.\n  bool enable_mutex_tracing = 23;\n\n  // See :option:`--restart-epoch` for details.\n  uint32 restart_epoch = 24;\n\n  // See :option:`--cpuset-threads` for details.\n  bool cpuset_threads = 25;\n\n  // See :option:`--disable-extensions` for details.\n  repeated string disabled_extensions = 28;\n\n  // See :option:`--bootstrap-version` for details.\n  uint32 bootstrap_version = 29;\n\n  // See :option:`--enable-fine-grain-logging` for details.\n  bool enable_fine_grain_logging = 34;\n\n  // See :option:`--socket-path` for details.\n  string socket_path = 35;\n\n  // See :option:`--socket-mode` for details.\n  uint32 socket_mode = 36;\n}\n"
  },
  {
    "path": "api/envoy/admin/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap]\n\n// The /tap admin request body that is used to configure an active tap session.\nmessage TapRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.TapRequest\";\n\n  // The opaque configuration ID used to match the configuration to a loaded extension.\n  // A tap extension configures a similar opaque ID that is used to match.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The tap configuration to load.\n  config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/admin/v3:pkg\",\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v4alpha:pkg\",\n        \"//envoy/config/cluster/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/tap/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/certs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"CertsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Certificates]\n\n// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to\n// display certificate information. See :ref:`/certs <operations_admin_interface_certs>` for more\n// information.\nmessage Certificates {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Certificates\";\n\n  // List of certificates known to an Envoy.\n  repeated Certificate certificates = 1;\n}\n\nmessage Certificate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Certificate\";\n\n  // Details of CA certificate.\n  repeated CertificateDetails ca_cert = 1;\n\n  // Details of Certificate Chain\n  repeated CertificateDetails cert_chain = 2;\n}\n\n// [#next-free-field: 8]\nmessage CertificateDetails {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.CertificateDetails\";\n\n  message OcspDetails {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.CertificateDetails.OcspDetails\";\n\n    // Indicates the time from which the OCSP response is valid.\n    google.protobuf.Timestamp valid_from = 1;\n\n    // Indicates the time at which the OCSP response expires.\n    google.protobuf.Timestamp expiration = 2;\n  }\n\n  // Path of the certificate.\n  string path = 1;\n\n  // Certificate Serial Number.\n  string serial_number = 2;\n\n  // List of Subject Alternate names.\n  repeated SubjectAlternateName subject_alt_names = 3;\n\n  // Minimum of days until expiration of certificate and it's chain.\n  uint64 days_until_expiration = 4;\n\n  // Indicates the time from which the certificate is valid.\n  google.protobuf.Timestamp valid_from = 5;\n\n  // Indicates the time at which the certificate expires.\n  google.protobuf.Timestamp expiration_time = 6;\n\n  // Details related to the OCSP response associated with this certificate, if any.\n  OcspDetails ocsp_details = 7;\n}\n\nmessage SubjectAlternateName {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v3.SubjectAlternateName\";\n\n  // Subject Alternate Name.\n  oneof name {\n    string dns = 1;\n\n    string uri = 2;\n\n    string ip_address = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/clusters.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/admin/v4alpha/metrics.proto\";\nimport \"envoy/config/cluster/v4alpha/circuit_breaker.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/health_check.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ClustersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Clusters]\n\n// Admin endpoint uses this wrapper for `/clusters` to display cluster status information.\n// See :ref:`/clusters <operations_admin_interface_clusters>` for more information.\nmessage Clusters {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Clusters\";\n\n  // Mapping from cluster name to each cluster's status.\n  repeated ClusterStatus cluster_statuses = 1;\n}\n\n// Details an individual cluster's current status.\n// [#next-free-field: 7]\nmessage ClusterStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ClusterStatus\";\n\n  // Name of the cluster.\n  string name = 1;\n\n  // Denotes whether this cluster was added via API or configured statically.\n  bool added_via_api = 2;\n\n  // The success rate threshold used in the last interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used to calculate the threshold.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used to calculate the threshold.\n  // The threshold is used to eject hosts based on their success rate. See\n  // :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent success_rate_ejection_threshold = 3;\n\n  // Mapping from host address to the host's current status.\n  repeated HostStatus host_statuses = 4;\n\n  // The success rate threshold used in the last interval when only locally originated failures were\n  // taken into account and externally originated errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*. The threshold is used to eject hosts based on their success rate.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent local_origin_success_rate_ejection_threshold = 5;\n\n  // :ref:`Circuit breaking <arch_overview_circuit_break>` settings of the cluster.\n  config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6;\n}\n\n// Current state of a particular host.\n// [#next-free-field: 10]\nmessage HostStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.HostStatus\";\n\n  // Address of this host.\n  config.core.v4alpha.Address address = 1;\n\n  // List of stats specific to this host.\n  repeated SimpleMetric stats = 2;\n\n  // The host's current health status.\n  HostHealthStatus health_status = 3;\n\n  // Request success rate for this host over the last calculated interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used in success rate\n  // calculation. If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used in success rate calculation.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent success_rate = 4;\n\n  // The host's weight. If not configured, the value defaults to 1.\n  uint32 weight = 5;\n\n  // The hostname of the host, if applicable.\n  string hostname = 6;\n\n  // The host's priority. If not configured, the value defaults to 0 (highest priority).\n  uint32 priority = 7;\n\n  // Request success rate for this host over the last calculated\n  // interval when only locally originated errors are taken into account and externally originated\n  // errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent local_origin_success_rate = 8;\n\n  // locality of the host.\n  config.core.v4alpha.Locality locality = 9;\n}\n\n// Health status for a host.\n// [#next-free-field: 7]\nmessage HostHealthStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.HostHealthStatus\";\n\n  // The host is currently failing active health checks.\n  bool failed_active_health_check = 1;\n\n  // The host is currently considered an outlier and has been ejected.\n  bool failed_outlier_check = 2;\n\n  // The host is currently being marked as degraded through active health checking.\n  bool failed_active_degraded_check = 4;\n\n  // The host has been removed from service discovery, but is being stabilized due to active\n  // health checking.\n  bool pending_dynamic_removal = 5;\n\n  // The host has not yet been health checked.\n  bool pending_active_hc = 6;\n\n  // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported\n  // here.\n  // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]\n  config.core.v4alpha.HealthStatus eds_health_status = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/config_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/bootstrap/v4alpha/bootstrap.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ConfigDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: ConfigDump]\n\n// The :ref:`/config_dump <operations_admin_interface_config_dump>` admin endpoint uses this wrapper\n// message to maintain and serve arbitrary configuration information from any component in Envoy.\nmessage ConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ConfigDump\";\n\n  // This list is serialized and dumped in its entirety at the\n  // :ref:`/config_dump <operations_admin_interface_config_dump>` endpoint.\n  //\n  // The following configurations are currently supported and will be dumped in the order given\n  // below:\n  //\n  // * *bootstrap*: :ref:`BootstrapConfigDump <envoy_api_msg_admin.v4alpha.BootstrapConfigDump>`\n  // * *clusters*: :ref:`ClustersConfigDump <envoy_api_msg_admin.v4alpha.ClustersConfigDump>`\n  // * *endpoints*:  :ref:`EndpointsConfigDump <envoy_api_msg_admin.v4alpha.EndpointsConfigDump>`\n  // * *listeners*: :ref:`ListenersConfigDump <envoy_api_msg_admin.v4alpha.ListenersConfigDump>`\n  // * *routes*:  :ref:`RoutesConfigDump <envoy_api_msg_admin.v4alpha.RoutesConfigDump>`\n  //\n  // EDS Configuration will only be dumped by using parameter `?include_eds`\n  //\n  // You can filter output with the resource and mask query parameters.\n  // See :ref:`/config_dump?resource={} <operations_admin_interface_config_dump_by_resource>`,\n  // :ref:`/config_dump?mask={} <operations_admin_interface_config_dump_by_mask>`,\n  // or :ref:`/config_dump?resource={},mask={}\n  // <operations_admin_interface_config_dump_by_resource_and_mask>` for more information.\n  repeated google.protobuf.Any configs = 1;\n}\n\nmessage UpdateFailureState {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.UpdateFailureState\";\n\n  // What the component configuration would have been if the update had succeeded.\n  google.protobuf.Any failed_configuration = 1;\n\n  // Time of the latest failed update attempt.\n  google.protobuf.Timestamp last_update_attempt = 2;\n\n  // Details about the last failed update attempt.\n  string details = 3;\n}\n\n// This message describes the bootstrap configuration that Envoy was started with. This includes\n// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate\n// the static portions of an Envoy configuration by reusing the output as the bootstrap\n// configuration for another Envoy.\nmessage BootstrapConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.BootstrapConfigDump\";\n\n  config.bootstrap.v4alpha.Bootstrap bootstrap = 1;\n\n  // The timestamp when the BootstrapConfig was last updated.\n  google.protobuf.Timestamp last_updated = 2;\n}\n\n// Envoy's listener manager fills this message with all currently known listeners. Listener\n// configuration information can be used to recreate an Envoy configuration by populating all\n// listeners as static listeners or by returning them in a LDS response.\nmessage ListenersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ListenersConfigDump\";\n\n  // Describes a statically loaded listener.\n  message StaticListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ListenersConfigDump.StaticListener\";\n\n    // The listener config.\n    google.protobuf.Any listener = 1;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicListenerState {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ListenersConfigDump.DynamicListenerState\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time\n    // that the listener was loaded. In the future, discrete per-listener versions may be supported\n    // by the API.\n    string version_info = 1;\n\n    // The listener config.\n    google.protobuf.Any listener = 2;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // Describes a dynamically loaded listener via the LDS API.\n  // [#next-free-field: 6]\n  message DynamicListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ListenersConfigDump.DynamicListener\";\n\n    // The name or unique id of this listener, pulled from the DynamicListenerState config.\n    string name = 1;\n\n    // The listener state for any active listener by this name.\n    // These are listeners that are available to service data plane traffic.\n    DynamicListenerState active_state = 2;\n\n    // The listener state for any warming listener by this name.\n    // These are listeners that are currently undergoing warming in preparation to service data\n    // plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the warming listeners should generally be discarded.\n    DynamicListenerState warming_state = 3;\n\n    // The listener state for any draining listener by this name.\n    // These are listeners that are currently undergoing draining in preparation to stop servicing\n    // data plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the draining listeners should generally be discarded.\n    DynamicListenerState draining_state = 4;\n\n    // Set if the last update failed, cleared after the next successful update.\n    UpdateFailureState error_state = 5;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` in the\n  // last processed LDS discovery response. If there are only static bootstrap listeners, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded listener configs.\n  repeated StaticListener static_listeners = 2;\n\n  // State for any warming, active, or draining listeners.\n  repeated DynamicListener dynamic_listeners = 3;\n}\n\n// Envoy's cluster manager fills this message with all currently known clusters. Cluster\n// configuration information can be used to recreate an Envoy configuration by populating all\n// clusters as static clusters or by returning them in a CDS response.\nmessage ClustersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ClustersConfigDump\";\n\n  // Describes a statically loaded cluster.\n  message StaticCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ClustersConfigDump.StaticCluster\";\n\n    // The cluster config.\n    google.protobuf.Any cluster = 1;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  // Describes a dynamically loaded cluster via the CDS API.\n  message DynamicCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ClustersConfigDump.DynamicCluster\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time\n    // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by\n    // the API.\n    string version_info = 1;\n\n    // The cluster config.\n    google.protobuf.Any cluster = 2;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` in the\n  // last processed CDS discovery response. If there are only static bootstrap clusters, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded cluster configs.\n  repeated StaticCluster static_clusters = 2;\n\n  // The dynamically loaded active clusters. These are clusters that are available to service\n  // data plane traffic.\n  repeated DynamicCluster dynamic_active_clusters = 3;\n\n  // The dynamically loaded warming clusters. These are clusters that are currently undergoing\n  // warming in preparation to service data plane traffic. Note that if attempting to recreate an\n  // Envoy configuration from a configuration dump, the warming clusters should generally be\n  // discarded.\n  repeated DynamicCluster dynamic_warming_clusters = 4;\n}\n\n// Envoy's RDS implementation fills this message with all currently loaded routes, as described by\n// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration\n// or defined inline while configuring listeners are separated from those configured dynamically via RDS.\n// Route configuration information can be used to recreate an Envoy configuration by populating all routes\n// as static routes or by returning them in RDS responses.\nmessage RoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.RoutesConfigDump\";\n\n  message StaticRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.RoutesConfigDump.StaticRouteConfig\";\n\n    // The route config.\n    google.protobuf.Any route_config = 1;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time that\n    // the route configuration was loaded.\n    string version_info = 1;\n\n    // The route config.\n    google.protobuf.Any route_config = 2;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded route configs.\n  repeated StaticRouteConfig static_route_configs = 2;\n\n  // The dynamically loaded route configs.\n  repeated DynamicRouteConfig dynamic_route_configs = 3;\n}\n\n// Envoy's scoped RDS implementation fills this message with all currently loaded route\n// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both\n// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the\n// dynamically obtained scopes via the SRDS API.\nmessage ScopedRoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v3.ScopedRoutesConfigDump\";\n\n  message InlineScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 2;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  message DynamicScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time that\n    // the scoped routes configuration was loaded.\n    string version_info = 2;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 3;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 4;\n  }\n\n  // The statically loaded scoped route configs.\n  repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1;\n\n  // The dynamically loaded scoped route configs.\n  repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2;\n}\n\n// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.\nmessage SecretsConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.SecretsConfigDump\";\n\n  // DynamicSecret contains secret information fetched via SDS.\n  message DynamicSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.SecretsConfigDump.DynamicSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // This is the per-resource version information.\n    string version_info = 2;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 4;\n  }\n\n  // StaticSecret specifies statically loaded secret in bootstrap.\n  message StaticSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.SecretsConfigDump.StaticSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 3;\n  }\n\n  // The statically loaded secrets.\n  repeated StaticSecret static_secrets = 1;\n\n  // The dynamically loaded active secrets. These are secrets that are available to service\n  // clusters or listeners.\n  repeated DynamicSecret dynamic_active_secrets = 2;\n\n  // The dynamically loaded warming secrets. These are secrets that are currently undergoing\n  // warming in preparation to service clusters or listeners.\n  repeated DynamicSecret dynamic_warming_secrets = 3;\n}\n\n// Envoy's admin fill this message with all currently known endpoints. Endpoint\n// configuration information can be used to recreate an Envoy configuration by populating all\n// endpoints as static endpoints or by returning them in an EDS response.\nmessage EndpointsConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.EndpointsConfigDump\";\n\n  message StaticEndpointConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig\";\n\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 1;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicEndpointConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig\";\n\n    // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time that\n    // the endpoint configuration was loaded.\n    string version_info = 1;\n\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 2;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded endpoint configs.\n  repeated StaticEndpointConfig static_endpoint_configs = 2;\n\n  // The dynamically loaded endpoint configs.\n  repeated DynamicEndpointConfig dynamic_endpoint_configs = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/init_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"InitDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: InitDump]\n\n// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers,\n// which provides the information of their unready targets.\n// The :ref:`/init_dump <operations_admin_interface_init_dump>` will dump all unready targets information.\nmessage UnreadyTargetsDumps {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.UnreadyTargetsDumps\";\n\n  // Message of unready targets information of an init manager.\n  message UnreadyTargetsDump {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump\";\n\n    // Name of the init manager. Example: \"init_manager_xxx\".\n    string name = 1;\n\n    // Names of unready targets of the init manager. Example: \"target_xxx\".\n    repeated string target_names = 2;\n  }\n\n  // You can choose specific component to dump unready targets with mask query parameter.\n  // See :ref:`/init_dump?mask={} <operations_admin_interface_init_dump_by_mask>` for more information.\n  // The dumps of unready targets of all init managers.\n  repeated UnreadyTargetsDump unready_targets_dumps = 1;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/listeners.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ListenersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Listeners]\n\n// Admin endpoint uses this wrapper for `/listeners` to display listener status information.\n// See :ref:`/listeners <operations_admin_interface_listeners>` for more information.\nmessage Listeners {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Listeners\";\n\n  // List of listener statuses.\n  repeated ListenerStatus listener_statuses = 1;\n}\n\n// Details an individual listener's current status.\nmessage ListenerStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ListenerStatus\";\n\n  // Name of the listener\n  string name = 1;\n\n  // The actual local address that the listener is listening on. If a listener was configured\n  // to listen on port 0, then this address has the port that was allocated by the OS.\n  config.core.v4alpha.Address local_address = 2;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/memory.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"MemoryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Memory]\n\n// Proto representation of the internal memory consumption of an Envoy instance. These represent\n// values extracted from an internal TCMalloc instance. For more information, see the section of the\n// docs entitled [\"Generic Tcmalloc Status\"](https://gperftools.github.io/gperftools/tcmalloc.html).\n// [#next-free-field: 7]\nmessage Memory {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Memory\";\n\n  // The number of bytes allocated by the heap for Envoy. This is an alias for\n  // `generic.current_allocated_bytes`.\n  uint64 allocated = 1;\n\n  // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for\n  // `generic.heap_size`.\n  uint64 heap_size = 2;\n\n  // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and depending on the OS, typically do not count towards physical memory\n  // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`.\n  uint64 pageheap_unmapped = 3;\n\n  // The number of bytes in free, mapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also\n  // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`.\n  uint64 pageheap_free = 4;\n\n  // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias\n  // for `tcmalloc.current_total_thread_cache_bytes`.\n  uint64 total_thread_cache = 5;\n\n  // The number of bytes of the physical memory usage by the allocator. This is an alias for\n  // `generic.total_physical_bytes`.\n  uint64 total_physical_bytes = 6;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"MetricsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metrics]\n\n// Proto representation of an Envoy Counter or Gauge value.\nmessage SimpleMetric {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.SimpleMetric\";\n\n  enum Type {\n    COUNTER = 0;\n    GAUGE = 1;\n  }\n\n  // Type of the metric represented.\n  Type type = 1;\n\n  // Current metric value.\n  uint64 value = 2;\n\n  // Name of the metric.\n  string name = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/mutex_stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"MutexStatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: MutexStats]\n\n// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run\n// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex`\n// [docs](https://abseil.io/about/design/mutex#extra-features).\n//\n// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not\n// correspond to core clock frequency. For more information, see the `CycleClock`\n// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).\nmessage MutexStats {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.MutexStats\";\n\n  // The number of individual mutex contentions which have occurred since startup.\n  uint64 num_contentions = 1;\n\n  // The length of the current contention wait cycle.\n  uint64 current_wait_cycles = 2;\n\n  // The lifetime total of all contention wait cycles.\n  uint64 lifetime_wait_cycles = 3;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/server_info.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ServerInfoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Server State]\n\n// Proto representation of the value returned by /server_info, containing\n// server version/server status information.\n// [#next-free-field: 8]\nmessage ServerInfo {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ServerInfo\";\n\n  enum State {\n    // Server is live and serving traffic.\n    LIVE = 0;\n\n    // Server is draining listeners in response to external health checks failing.\n    DRAINING = 1;\n\n    // Server has not yet completed cluster manager initialization.\n    PRE_INITIALIZING = 2;\n\n    // Server is running the cluster manager initialization callbacks (e.g., RDS).\n    INITIALIZING = 3;\n  }\n\n  // Server version.\n  string version = 1;\n\n  // State of the server.\n  State state = 2;\n\n  // Uptime since current epoch was started.\n  google.protobuf.Duration uptime_current_epoch = 3;\n\n  // Uptime since the start of the first epoch.\n  google.protobuf.Duration uptime_all_epochs = 4;\n\n  // Hot restart version.\n  string hot_restart_version = 5;\n\n  // Command line options the server is currently running with.\n  CommandLineOptions command_line_options = 6;\n\n  // Populated node identity of this server.\n  config.core.v4alpha.Node node = 7;\n}\n\n// [#next-free-field: 37]\nmessage CommandLineOptions {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.CommandLineOptions\";\n\n  enum IpVersion {\n    v4 = 0;\n    v6 = 1;\n  }\n\n  enum Mode {\n    // Validate configs and then serve traffic normally.\n    Serve = 0;\n\n    // Validate configs and exit.\n    Validate = 1;\n\n    // Completely load and initialize the config, and then exit without running the listener loop.\n    InitOnly = 2;\n  }\n\n  enum DrainStrategy {\n    // Gradually discourage connections over the course of the drain period.\n    Gradual = 0;\n\n    // Discourage all connections for the duration of the drain sequence.\n    Immediate = 1;\n  }\n\n  reserved 12, 20, 21;\n\n  reserved \"max_stats\", \"max_obj_name_len\";\n\n  // See :option:`--base-id` for details.\n  uint64 base_id = 1;\n\n  // See :option:`--use-dynamic-base-id` for details.\n  bool use_dynamic_base_id = 31;\n\n  // See :option:`--base-id-path` for details.\n  string base_id_path = 32;\n\n  // See :option:`--concurrency` for details.\n  uint32 concurrency = 2;\n\n  // See :option:`--config-path` for details.\n  string config_path = 3;\n\n  // See :option:`--config-yaml` for details.\n  string config_yaml = 4;\n\n  // See :option:`--allow-unknown-static-fields` for details.\n  bool allow_unknown_static_fields = 5;\n\n  // See :option:`--reject-unknown-dynamic-fields` for details.\n  bool reject_unknown_dynamic_fields = 26;\n\n  // See :option:`--ignore-unknown-dynamic-fields` for details.\n  bool ignore_unknown_dynamic_fields = 30;\n\n  // See :option:`--admin-address-path` for details.\n  string admin_address_path = 6;\n\n  // See :option:`--local-address-ip-version` for details.\n  IpVersion local_address_ip_version = 7;\n\n  // See :option:`--log-level` for details.\n  string log_level = 8;\n\n  // See :option:`--component-log-level` for details.\n  string component_log_level = 9;\n\n  // See :option:`--log-format` for details.\n  string log_format = 10;\n\n  // See :option:`--log-format-escaped` for details.\n  bool log_format_escaped = 27;\n\n  // See :option:`--log-path` for details.\n  string log_path = 11;\n\n  // See :option:`--service-cluster` for details.\n  string service_cluster = 13;\n\n  // See :option:`--service-node` for details.\n  string service_node = 14;\n\n  // See :option:`--service-zone` for details.\n  string service_zone = 15;\n\n  // See :option:`--file-flush-interval-msec` for details.\n  google.protobuf.Duration file_flush_interval = 16;\n\n  // See :option:`--drain-time-s` for details.\n  google.protobuf.Duration drain_time = 17;\n\n  // See :option:`--drain-strategy` for details.\n  DrainStrategy drain_strategy = 33;\n\n  // See :option:`--parent-shutdown-time-s` for details.\n  google.protobuf.Duration parent_shutdown_time = 18;\n\n  // See :option:`--mode` for details.\n  Mode mode = 19;\n\n  // See :option:`--disable-hot-restart` for details.\n  bool disable_hot_restart = 22;\n\n  // See :option:`--enable-mutex-tracing` for details.\n  bool enable_mutex_tracing = 23;\n\n  // See :option:`--restart-epoch` for details.\n  uint32 restart_epoch = 24;\n\n  // See :option:`--cpuset-threads` for details.\n  bool cpuset_threads = 25;\n\n  // See :option:`--disable-extensions` for details.\n  repeated string disabled_extensions = 28;\n\n  // See :option:`--bootstrap-version` for details.\n  uint32 bootstrap_version = 29;\n\n  // See :option:`--enable-fine-grain-logging` for details.\n  bool enable_fine_grain_logging = 34;\n\n  // See :option:`--socket-path` for details.\n  string socket_path = 35;\n\n  // See :option:`--socket-mode` for details.\n  uint32 socket_mode = 36;\n}\n"
  },
  {
    "path": "api/envoy/admin/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/tap/v4alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap]\n\n// The /tap admin request body that is used to configure an active tap session.\nmessage TapRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.TapRequest\";\n\n  // The opaque configuration ID used to match the configuration to a loaded extension.\n  // A tap extension configures a similar opaque ID that is used to match.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The tap configuration to load.\n  config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/annotations/BUILD",
    "content": "load(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package()\n"
  },
  {
    "path": "api/envoy/annotations/deprecation.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.annotations;\n\nimport \"google/protobuf/descriptor.proto\";\n\n// [#protodoc-title: Deprecation]\n// Allows tagging proto fields as fatal by default. One Envoy release after\n// deprecation, deprecated fields will be disallowed by default, a state which\n// is reversible with :ref:`runtime overrides <config_runtime_deprecation>`.\n\n// Magic number in this file derived from top 28bit of SHA256 digest of\n// \"envoy.annotation.disallowed_by_default\"\nextend google.protobuf.FieldOptions {\n  bool disallowed_by_default = 189503207;\n}\n\n// Magic number in this file derived from top 28bit of SHA256 digest of\n// \"envoy.annotation.disallowed_by_default_enum\"\nextend google.protobuf.EnumValueOptions {\n  bool disallowed_by_default_enum = 70100853;\n}\n"
  },
  {
    "path": "api/envoy/annotations/resource.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.annotations;\n\nimport \"google/protobuf/descriptor.proto\";\n\n// [#protodoc-title: Resource]\n\n// Magic number in this file derived from top 28bit of SHA256 digest of \"envoy.annotation.resource\".\nextend google.protobuf.ServiceOptions {\n  ResourceAnnotation resource = 265073217;\n}\n\nmessage ResourceAnnotation {\n  // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource\n  // type.\n  string type = 1;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/cluster:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"//envoy/api/v2/listener:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/listener/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/README.md",
    "content": "Protocol buffer definitions for xDS and top-level resource API messages.\n\nPackage group `//envoy/api/v2:friends` enumerates all consumers of the shared\nAPI messages. That includes package envoy.api.v2 itself, which contains several\nxDS definitions. Default visibility for all shared definitions should be set to\n`//envoy/api/v2:friends`.\n\nAdditionally, packages envoy.api.v2.core and envoy.api.v2.auth are also\nconsumed throughout the subpackages of `//envoy/api/v2`.\n"
  },
  {
    "path": "api/envoy/api/v2/auth/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/auth/cert.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/auth/common.proto\";\nimport public \"envoy/api/v2/auth/secret.proto\";\nimport public \"envoy/api/v2/auth/tls.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"CertProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\n"
  },
  {
    "path": "api/envoy/api/v2/auth/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common TLS configuration]\n\nmessage TlsParameters {\n  enum TlsProtocol {\n    // Envoy will choose the optimal TLS version.\n    TLS_AUTO = 0;\n\n    // TLS 1.0\n    TLSv1_0 = 1;\n\n    // TLS 1.1\n    TLSv1_1 = 2;\n\n    // TLS 1.2\n    TLSv1_2 = 3;\n\n    // TLS 1.3\n    TLSv1_3 = 4;\n  }\n\n  // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for\n  // servers.\n  TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for\n  // servers.\n  TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // If specified, the TLS listener will only support the specified `cipher list\n  // <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration>`_\n  // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not\n  // specified, the default list will be used.\n  //\n  // In non-FIPS builds, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]\n  //   [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   ECDHE-ECDSA-AES128-GCM-SHA256\n  //   ECDHE-RSA-AES128-GCM-SHA256\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  repeated string cipher_suites = 3;\n\n  // If specified, the TLS connection will only support the specified ECDH\n  // curves. If not specified, the default curves will be used.\n  //\n  // In non-FIPS builds, the default curves are:\n  //\n  // .. code-block:: none\n  //\n  //   X25519\n  //   P-256\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default curve is:\n  //\n  // .. code-block:: none\n  //\n  //   P-256\n  repeated string ecdh_curves = 4;\n}\n\n// BoringSSL private key method configuration. The private key methods are used for external\n// (potentially asynchronous) signing and decryption operations. Some use cases for private key\n// methods would be TPM support and TLS acceleration.\nmessage PrivateKeyProvider {\n  // Private key method provider name. The name must match a\n  // supported private key method provider type.\n  string provider_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Private key method provider specific configuration.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true];\n\n    google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true];\n  }\n}\n\n// [#next-free-field: 7]\nmessage TlsCertificate {\n  // The TLS certificate chain.\n  core.DataSource certificate_chain = 1;\n\n  // The TLS private key.\n  core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n  // BoringSSL private key method provider. This is an alternative to :ref:`private_key\n  // <envoy_api_field_auth.TlsCertificate.private_key>` field. This can't be\n  // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key\n  // <envoy_api_field_auth.TlsCertificate.private_key>` and\n  // :ref:`private_key_provider\n  // <envoy_api_field_auth.TlsCertificate.private_key_provider>` fields will result in an\n  // error.\n  PrivateKeyProvider private_key_provider = 6;\n\n  // The password to decrypt the TLS private key. If this field is not set, it is assumed that the\n  // TLS private key is not password encrypted.\n  core.DataSource password = 3 [(udpa.annotations.sensitive) = true];\n\n  // [#not-implemented-hide:]\n  core.DataSource ocsp_staple = 4;\n\n  // [#not-implemented-hide:]\n  repeated core.DataSource signed_certificate_timestamp = 5;\n}\n\nmessage TlsSessionTicketKeys {\n  // Keys for encrypting and decrypting TLS session tickets. The\n  // first key in the array contains the key to encrypt all new sessions created by this context.\n  // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys\n  // by, for example, putting the new key first, and the previous key second.\n  //\n  // If :ref:`session_ticket_keys <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys>`\n  // is not specified, the TLS library will still support resuming sessions via tickets, but it will\n  // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts\n  // or on different hosts.\n  //\n  // Each key must contain exactly 80 bytes of cryptographically-secure random data. For\n  // example, the output of ``openssl rand 80``.\n  //\n  // .. attention::\n  //\n  //   Using this feature has serious security considerations and risks. Improper handling of keys\n  //   may result in loss of secrecy in connections, even if ciphers supporting perfect forward\n  //   secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some\n  //   discussion. To minimize the risk, you must:\n  //\n  //   * Keep the session ticket keys at least as secure as your TLS certificate private keys\n  //   * Rotate session ticket keys at least daily, and preferably hourly\n  //   * Always generate keys using a cryptographically-secure random data source\n  repeated core.DataSource keys = 1\n      [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true];\n}\n\n// [#next-free-field: 11]\nmessage CertificateValidationContext {\n  // Peer certificate verification mode.\n  enum TrustChainVerification {\n    // Perform default certificate verification (e.g., against CA / verification lists)\n    VERIFY_TRUST_CHAIN = 0;\n\n    // Connections where the certificate fails verification will be permitted.\n    // For HTTP connections, the result of certificate verification can be used in route matching. (\n    // see :ref:`validated <envoy_api_field_route.RouteMatch.TlsContextMatchOptions.validated>` ).\n    ACCEPT_UNTRUSTED = 1;\n  }\n\n  // TLS certificate data containing certificate authority certificates to use in verifying\n  // a presented peer certificate (e.g. server certificate for clusters or client certificate\n  // for listeners). If not specified and a peer certificate is presented it will not be\n  // verified. By default, a client certificate is optional, unless one of the additional\n  // options (:ref:`require_client_certificate\n  // <envoy_api_field_auth.DownstreamTlsContext.require_client_certificate>`,\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>`,\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`, or\n  // :ref:`match_subject_alt_names\n  // <envoy_api_field_auth.CertificateValidationContext.match_subject_alt_names>`) is also\n  // specified.\n  //\n  // It can optionally contain certificate revocation lists, in which case Envoy will verify\n  // that the presented peer certificate has not been revoked by one of the included CRLs.\n  //\n  // See :ref:`the TLS overview <arch_overview_ssl_enabling_verification>` for a list of common\n  // system CA locations.\n  core.DataSource trusted_ca = 1;\n\n  // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the\n  // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate\n  // matches one of the specified values.\n  //\n  // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -pubkey\n  //     | openssl pkey -pubin -outform DER\n  //     | openssl dgst -sha256 -binary\n  //     | openssl enc -base64\n  //   NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A=\n  //\n  // This is the format used in HTTP Public Key Pinning.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  //\n  // .. attention::\n  //\n  //   This option is preferred over :ref:`verify_certificate_hash\n  //   <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`,\n  //   because SPKI is tied to a private key, so it doesn't change when the certificate\n  //   is renewed using the same private key.\n  repeated string verify_certificate_spki = 3\n      [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}];\n\n  // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that\n  // the SHA-256 of the DER-encoded presented certificate matches one of the specified values.\n  //\n  // A hex-encoded SHA-256 of the certificate can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d\" \" -f2\n  //   df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a\n  //\n  // A long hex-encoded and colon-separated SHA-256 (a.k.a. \"fingerprint\") of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d\"=\" -f2\n  //   DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A\n  //\n  // Both of those formats are acceptable.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  repeated string verify_certificate_hash = 2\n      [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}];\n\n  // An optional list of Subject Alternative Names. If specified, Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified values.\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_auth.CertificateValidationContext.trusted_ca>`.\n  repeated string verify_subject_alt_name = 4 [deprecated = true];\n\n  // An optional list of Subject Alternative name matchers. Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified matches.\n  //\n  // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be\n  // configured with exact match type in the :ref:`string matcher <envoy_api_msg_type.matcher.StringMatcher>`.\n  // For example if the certificate has \"\\*.example.com\" as DNS SAN entry, to allow only \"api.example.com\",\n  // it should be configured as shown below.\n  //\n  // .. code-block:: yaml\n  //\n  //  match_subject_alt_names:\n  //    exact: \"api.example.com\"\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_auth.CertificateValidationContext.trusted_ca>`.\n  repeated type.matcher.StringMatcher match_subject_alt_names = 9;\n\n  // [#not-implemented-hide:] Must present a signed time-stamped OCSP response.\n  google.protobuf.BoolValue require_ocsp_staple = 5;\n\n  // [#not-implemented-hide:] Must present signed certificate time-stamp.\n  google.protobuf.BoolValue require_signed_certificate_timestamp = 6;\n\n  // An optional `certificate revocation list\n  // <https://en.wikipedia.org/wiki/Certificate_revocation_list>`_\n  // (in PEM format). If specified, Envoy will verify that the presented peer\n  // certificate has not been revoked by this CRL. If this DataSource contains\n  // multiple CRLs, all of them will be used.\n  core.DataSource crl = 7;\n\n  // If specified, Envoy will not reject expired certificates.\n  bool allow_expired_certificate = 8;\n\n  // Certificate trust chain verification mode.\n  TrustChainVerification trust_chain_verification = 10\n      [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/auth/secret.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"envoy/api/v2/auth/common.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"SecretProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Secrets configuration]\n\nmessage GenericSecret {\n  // Secret of generic type and is available to filters.\n  core.DataSource secret = 1 [(udpa.annotations.sensitive) = true];\n}\n\nmessage SdsSecretConfig {\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  // When both name and config are specified, then secret can be fetched and/or reloaded via\n  // SDS. When only name is specified, then secret will be loaded from static resources.\n  string name = 1;\n\n  core.ConfigSource sds_config = 2;\n}\n\n// [#next-free-field: 6]\nmessage Secret {\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  string name = 1;\n\n  oneof type {\n    TlsCertificate tls_certificate = 2;\n\n    TlsSessionTicketKeys session_ticket_keys = 3;\n\n    CertificateValidationContext validation_context = 4;\n\n    GenericSecret generic_secret = 5;\n  }\n}\n"
  },
  {
    "path": "api/envoy/api/v2/auth/tls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"envoy/api/v2/auth/common.proto\";\nimport \"envoy/api/v2/auth/secret.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"TlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: TLS transport socket]\n// [#extension: envoy.transport_sockets.tls]\n// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS.\n\nmessage UpstreamTlsContext {\n  // Common TLS context settings.\n  //\n  // .. attention::\n  //\n  //   Server certificate verification is not enabled by default. Configure\n  //   :ref:`trusted_ca<envoy_api_field_auth.CertificateValidationContext.trusted_ca>` to enable\n  //   verification.\n  CommonTlsContext common_tls_context = 1;\n\n  // SNI string to use when creating TLS backend connections.\n  string sni = 2 [(validate.rules).string = {max_bytes: 255}];\n\n  // If true, server-initiated TLS renegotiation will be allowed.\n  //\n  // .. attention::\n  //\n  //   TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary.\n  bool allow_renegotiation = 3;\n\n  // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets\n  // for TLSv1.2 and older) to store for the purpose of session resumption.\n  //\n  // Defaults to 1, setting this to 0 disables session resumption.\n  google.protobuf.UInt32Value max_session_keys = 4;\n}\n\n// [#next-free-field: 8]\nmessage DownstreamTlsContext {\n  // Common TLS context settings.\n  CommonTlsContext common_tls_context = 1;\n\n  // If specified, Envoy will reject connections without a valid client\n  // certificate.\n  google.protobuf.BoolValue require_client_certificate = 2;\n\n  // If specified, Envoy will reject connections without a valid and matching SNI.\n  // [#not-implemented-hide:]\n  google.protobuf.BoolValue require_sni = 3;\n\n  oneof session_ticket_keys_type {\n    // TLS session ticket key settings.\n    TlsSessionTicketKeys session_ticket_keys = 4;\n\n    // Config for fetching TLS session ticket keys via SDS API.\n    SdsSecretConfig session_ticket_keys_sds_secret_config = 5;\n\n    // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS\n    // server to not issue TLS session tickets for the purposes of stateless TLS session resumption.\n    // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using\n    // the keys specified through either :ref:`session_ticket_keys <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys>`\n    // or :ref:`session_ticket_keys_sds_secret_config <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys_sds_secret_config>`.\n    // If this config is set to false and no keys are explicitly configured, the TLS server will issue\n    // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the\n    // implication that sessions cannot be resumed across hot restarts or on different hosts.\n    bool disable_stateless_session_resumption = 7;\n  }\n\n  // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session\n  // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2)\n  // <https://tools.ietf.org/html/rfc5077#section-5.6>`\n  // only seconds could be specified (fractional seconds are going to be ignored).\n  google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = {\n    lt {seconds: 4294967296}\n    gte {}\n  }];\n}\n\n// TLS context shared by both client and server TLS contexts.\n// [#next-free-field: 9]\nmessage CommonTlsContext {\n  message CombinedCertificateValidationContext {\n    // How to validate peer certificates.\n    CertificateValidationContext default_validation_context = 1\n        [(validate.rules).message = {required: true}];\n\n    // Config for fetching validation context via SDS API.\n    SdsSecretConfig validation_context_sds_secret_config = 2\n        [(validate.rules).message = {required: true}];\n  }\n\n  reserved 5;\n\n  // TLS protocol versions, cipher suites etc.\n  TlsParameters tls_params = 1;\n\n  // :ref:`Multiple TLS certificates <arch_overview_ssl_cert_select>` can be associated with the\n  // same context to allow both RSA and ECDSA certificates.\n  //\n  // Only a single TLS certificate is supported in client contexts. In server contexts, the first\n  // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is\n  // used for clients that support ECDSA.\n  repeated TlsCertificate tls_certificates = 2;\n\n  // Configs for fetching TLS certificates via SDS API.\n  repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6\n      [(validate.rules).repeated = {max_items: 1}];\n\n  oneof validation_context_type {\n    // How to validate peer certificates.\n    CertificateValidationContext validation_context = 3;\n\n    // Config for fetching validation context via SDS API.\n    SdsSecretConfig validation_context_sds_secret_config = 7;\n\n    // Combined certificate validation context holds a default CertificateValidationContext\n    // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic\n    // and default CertificateValidationContext are merged into a new CertificateValidationContext\n    // for validation. This merge is done by Message::MergeFrom(), so dynamic\n    // CertificateValidationContext overwrites singular fields in default\n    // CertificateValidationContext, and concatenates repeated fields to default\n    // CertificateValidationContext, and logical OR is applied to boolean fields.\n    CombinedCertificateValidationContext combined_validation_context = 8;\n  }\n\n  // Supplies the list of ALPN protocols that the listener should expose. In\n  // practice this is likely to be set to one of two values (see the\n  // :ref:`codec_type\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.codec_type>`\n  // parameter in the HTTP connection manager for more information):\n  //\n  // * \"h2,http/1.1\" If the listener is going to support both HTTP/2 and HTTP/1.1.\n  // * \"http/1.1\" If the listener is only going to support HTTP/1.1.\n  //\n  // There is no default for this parameter. If empty, Envoy will not expose ALPN.\n  repeated string alpn_protocols = 4;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/cds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/cluster.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"CdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: CDS]\n\n// Return list of all clusters this proxy will load balance to.\nservice ClusterDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.Cluster\";\n\n  rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:clusters\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage CdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/api/v2/cluster/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/cluster/circuit_breaker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.cluster;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.cluster\";\noption java_outer_classname = \"CircuitBreakerProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ClusterNS\";\noption ruby_package = \"Envoy.Api.V2.ClusterNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Circuit breakers]\n\n// :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be\n// specified individually for each defined priority.\nmessage CircuitBreakers {\n  // A Thresholds defines CircuitBreaker settings for a\n  // :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`.\n  // [#next-free-field: 9]\n  message Thresholds {\n    message RetryBudget {\n      // Specifies the limit on concurrent retries as a percentage of the sum of active requests and\n      // active pending requests. For example, if there are 100 active requests and the\n      // budget_percent is set to 25, there may be 25 active retries.\n      //\n      // This parameter is optional. Defaults to 20%.\n      type.Percent budget_percent = 1;\n\n      // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the\n      // number of active retries may never go below this number.\n      //\n      // This parameter is optional. Defaults to 3.\n      google.protobuf.UInt32Value min_retry_concurrency = 2;\n    }\n\n    // The :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`\n    // the specified CircuitBreaker settings apply to.\n    core.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // The maximum number of connections that Envoy will make to the upstream\n    // cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_connections = 2;\n\n    // The maximum number of pending requests that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_pending_requests = 3;\n\n    // The maximum number of parallel requests that Envoy will make to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_requests = 4;\n\n    // The maximum number of parallel retries that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 3.\n    google.protobuf.UInt32Value max_retries = 5;\n\n    // Specifies a limit on concurrent retries in relation to the number of active requests. This\n    // parameter is optional.\n    //\n    // .. note::\n    //\n    //    If this field is set, the retry budget will override any configured retry circuit\n    //    breaker.\n    RetryBudget retry_budget = 8;\n\n    // If track_remaining is true, then stats will be published that expose\n    // the number of resources remaining until the circuit breakers open. If\n    // not specified, the default is false.\n    //\n    // .. note::\n    //\n    //    If a retry budget is used in lieu of the max_retries circuit breaker,\n    //    the remaining retry resources remaining will not be tracked.\n    bool track_remaining = 6;\n\n    // The maximum number of connection pools per cluster that Envoy will concurrently support at\n    // once. If not specified, the default is unlimited. Set this for clusters which create a\n    // large number of connection pools. See\n    // :ref:`Circuit Breaking <arch_overview_circuit_break_cluster_maximum_connection_pools>` for\n    // more details.\n    google.protobuf.UInt32Value max_connection_pools = 7;\n  }\n\n  // If multiple :ref:`Thresholds<envoy_api_msg_cluster.CircuitBreakers.Thresholds>`\n  // are defined with the same :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`,\n  // the first one in the list is used. If no Thresholds is defined for a given\n  // :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`, the default values\n  // are used.\n  repeated Thresholds thresholds = 1;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/cluster/filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.cluster;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.cluster\";\noption java_outer_classname = \"FilterProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ClusterNS\";\noption ruby_package = \"Envoy.Api.V2.ClusterNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Upstream filters]\n// Upstream filters apply to the connections to the upstream cluster hosts.\n\nmessage Filter {\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any typed_config = 2;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/cluster/outlier_detection.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.cluster;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.cluster\";\noption java_outer_classname = \"OutlierDetectionProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ClusterNS\";\noption ruby_package = \"Envoy.Api.V2.ClusterNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Outlier detection]\n\n// See the :ref:`architecture overview <arch_overview_outlier_detection>` for\n// more information on outlier detection.\n// [#next-free-field: 21]\nmessage OutlierDetection {\n  // The number of consecutive 5xx responses or local origin errors that are mapped\n  // to 5xx error codes before a consecutive 5xx ejection\n  // occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_5xx = 1;\n\n  // The time interval between ejection analysis sweeps. This can result in\n  // both new ejections as well as hosts being returned to service. Defaults\n  // to 10000ms or 10s.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}];\n\n  // The base time that a host is ejected for. The real time is equal to the\n  // base time multiplied by the number of times the host has been ejected.\n  // Defaults to 30000ms or 30s.\n  google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];\n\n  // The maximum % of an upstream cluster that can be ejected due to outlier\n  // detection. Defaults to 10% but will eject at least one host regardless of the value.\n  google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive 5xx. This setting can be used to disable\n  // ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics. This setting can be used to\n  // disable ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}];\n\n  // The number of hosts in a cluster that must have enough request volume to\n  // detect success rate outliers. If the number of hosts is less than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for any host in the cluster. Defaults to 5.\n  google.protobuf.UInt32Value success_rate_minimum_hosts = 7;\n\n  // The minimum number of total requests that must be collected in one\n  // interval (as defined by the interval duration above) to include this host\n  // in success rate based outlier detection. If the volume is lower than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for that host. Defaults to 100.\n  google.protobuf.UInt32Value success_rate_request_volume = 8;\n\n  // This factor is used to determine the ejection threshold for success rate\n  // outlier ejection. The ejection threshold is the difference between the\n  // mean success rate, and the product of this factor and the standard\n  // deviation of the mean success rate: mean - (stdev *\n  // success_rate_stdev_factor). This factor is divided by a thousand to get a\n  // double. That is, if the desired factor is 1.9, the runtime value should\n  // be 1900. Defaults to 1900.\n  google.protobuf.UInt32Value success_rate_stdev_factor = 9;\n\n  // The number of consecutive gateway failures (502, 503, 504 status codes)\n  // before a consecutive gateway failure ejection occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_gateway_failure = 10;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive gateway failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // Determines whether to distinguish local origin failures from external errors. If set to true\n  // the following configuration parameters are taken into account:\n  // :ref:`consecutive_local_origin_failure<envoy_api_field_cluster.OutlierDetection.consecutive_local_origin_failure>`,\n  // :ref:`enforcing_consecutive_local_origin_failure<envoy_api_field_cluster.OutlierDetection.enforcing_consecutive_local_origin_failure>`\n  // and\n  // :ref:`enforcing_local_origin_success_rate<envoy_api_field_cluster.OutlierDetection.enforcing_local_origin_success_rate>`.\n  // Defaults to false.\n  bool split_external_local_origin_errors = 12;\n\n  // The number of consecutive locally originated failures before ejection\n  // occurs. Defaults to 5. Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value consecutive_local_origin_failure = 13;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive locally originated failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics for locally originated errors.\n  // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The failure percentage to use when determining failure percentage-based outlier detection. If\n  // the failure percentage of a given host is greater than or equal to this value, it will be\n  // ejected. Defaults to 85.\n  google.protobuf.UInt32Value failure_percentage_threshold = 16\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // failure percentage statistics. This setting can be used to disable ejection or to ramp it up\n  // slowly. Defaults to 0.\n  //\n  // [#next-major-version: setting this without setting failure_percentage_threshold should be\n  // invalid in v4.]\n  google.protobuf.UInt32Value enforcing_failure_percentage = 17\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // local-origin failure percentage statistics. This setting can be used to disable ejection or to\n  // ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection.\n  // If the total number of hosts in the cluster is less than this value, failure percentage-based\n  // ejection will not be performed. Defaults to 5.\n  google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19;\n\n  // The minimum number of total requests that must be collected in one interval (as defined by the\n  // interval duration above) to perform failure percentage-based ejection for this host. If the\n  // volume is lower than this setting, failure percentage-based ejection will not be performed for\n  // this host. Defaults to 50.\n  google.protobuf.UInt32Value failure_percentage_request_volume = 20;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/auth/tls.proto\";\nimport \"envoy/api/v2/cluster/circuit_breaker.proto\";\nimport \"envoy/api/v2/cluster/filter.proto\";\nimport \"envoy/api/v2/cluster/outlier_detection.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\nimport \"envoy/api/v2/core/protocol.proto\";\nimport \"envoy/api/v2/endpoint.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Cluster configuration]\n\n// Configuration for a single upstream cluster.\n// [#next-free-field: 48]\nmessage Cluster {\n  // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`\n  // for an explanation on each type.\n  enum DiscoveryType {\n    // Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`\n    // for an explanation.\n    STATIC = 0;\n\n    // Refer to the :ref:`strict DNS discovery\n    // type<arch_overview_service_discovery_types_strict_dns>`\n    // for an explanation.\n    STRICT_DNS = 1;\n\n    // Refer to the :ref:`logical DNS discovery\n    // type<arch_overview_service_discovery_types_logical_dns>`\n    // for an explanation.\n    LOGICAL_DNS = 2;\n\n    // Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`\n    // for an explanation.\n    EDS = 3;\n\n    // Refer to the :ref:`original destination discovery\n    // type<arch_overview_service_discovery_types_original_destination>`\n    // for an explanation.\n    ORIGINAL_DST = 4;\n  }\n\n  // Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture\n  // overview section for information on each type.\n  enum LbPolicy {\n    // Refer to the :ref:`round robin load balancing\n    // policy<arch_overview_load_balancing_types_round_robin>`\n    // for an explanation.\n    ROUND_ROBIN = 0;\n\n    // Refer to the :ref:`least request load balancing\n    // policy<arch_overview_load_balancing_types_least_request>`\n    // for an explanation.\n    LEAST_REQUEST = 1;\n\n    // Refer to the :ref:`ring hash load balancing\n    // policy<arch_overview_load_balancing_types_ring_hash>`\n    // for an explanation.\n    RING_HASH = 2;\n\n    // Refer to the :ref:`random load balancing\n    // policy<arch_overview_load_balancing_types_random>`\n    // for an explanation.\n    RANDOM = 3;\n\n    // Refer to the :ref:`original destination load balancing\n    // policy<arch_overview_load_balancing_types_original_destination>`\n    // for an explanation.\n    //\n    // .. attention::\n    //\n    //   **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead.\n    //\n    ORIGINAL_DST_LB = 4 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`\n    // for an explanation.\n    MAGLEV = 5;\n\n    // This load balancer type must be specified if the configured cluster provides a cluster\n    // specific load balancer. Consult the configured cluster's documentation for whether to set\n    // this option or not.\n    CLUSTER_PROVIDED = 6;\n\n    // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy\n    // <envoy_api_field_Cluster.load_balancing_policy>` field to determine the LB policy.\n    // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field\n    // and instead using the new load_balancing_policy field as the one and only mechanism for\n    // configuring this.]\n    LOAD_BALANCING_POLICY_CONFIG = 7;\n  }\n\n  // When V4_ONLY is selected, the DNS resolver will only perform a lookup for\n  // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will\n  // only perform a lookup for addresses in the IPv6 family. If AUTO is\n  // specified, the DNS resolver will first perform a lookup for addresses in\n  // the IPv6 family and fallback to a lookup for addresses in the IPv4 family.\n  // For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this setting is\n  // ignored.\n  enum DnsLookupFamily {\n    AUTO = 0;\n    V4_ONLY = 1;\n    V6_ONLY = 2;\n  }\n\n  enum ClusterProtocolSelection {\n    // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).\n    // If :ref:`http2_protocol_options <envoy_api_field_Cluster.http2_protocol_options>` are\n    // present, HTTP2 will be used, otherwise HTTP1.1 will be used.\n    USE_CONFIGURED_PROTOCOL = 0;\n\n    // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.\n    USE_DOWNSTREAM_PROTOCOL = 1;\n  }\n\n  // TransportSocketMatch specifies what transport socket config will be used\n  // when the match conditions are satisfied.\n  message TransportSocketMatch {\n    // The name of the match, used in stats generation.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Optional endpoint metadata match criteria.\n    // The connection to the endpoint with metadata matching what is set in this field\n    // will use the transport socket configuration specified here.\n    // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match\n    // against the values specified in this field.\n    google.protobuf.Struct match = 2;\n\n    // The configuration of the transport socket.\n    core.TransportSocket transport_socket = 3;\n  }\n\n  // Extended cluster type.\n  message CustomClusterType {\n    // The type of the cluster to instantiate. The name must match a supported cluster type.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Cluster specific configuration which depends on the cluster being instantiated.\n    // See the supported cluster for further documentation.\n    google.protobuf.Any typed_config = 2;\n  }\n\n  // Only valid when discovery type is EDS.\n  message EdsClusterConfig {\n    // Configuration for the source of EDS updates for this Cluster.\n    core.ConfigSource eds_config = 1;\n\n    // Optional alternative to cluster name to present to EDS. This does not\n    // have the same restrictions as cluster name, i.e. it may be arbitrary\n    // length.\n    string service_name = 2;\n  }\n\n  // Optionally divide the endpoints in this cluster into subsets defined by\n  // endpoint metadata and selected by route and weighted cluster metadata.\n  // [#next-free-field: 8]\n  message LbSubsetConfig {\n    // If NO_FALLBACK is selected, a result\n    // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,\n    // any cluster endpoint may be returned (subject to policy, health checks,\n    // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the\n    // endpoints matching the values from the default_subset field.\n    enum LbSubsetFallbackPolicy {\n      NO_FALLBACK = 0;\n      ANY_ENDPOINT = 1;\n      DEFAULT_SUBSET = 2;\n    }\n\n    // Specifications for subsets.\n    message LbSubsetSelector {\n      // Allows to override top level fallback policy per selector.\n      enum LbSubsetSelectorFallbackPolicy {\n        // If NOT_DEFINED top level config fallback policy is used instead.\n        NOT_DEFINED = 0;\n\n        // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.\n        NO_FALLBACK = 1;\n\n        // If ANY_ENDPOINT is selected, any cluster endpoint may be returned\n        // (subject to policy, health checks, etc).\n        ANY_ENDPOINT = 2;\n\n        // If DEFAULT_SUBSET is selected, load balancing is performed over the\n        // endpoints matching the values from the default_subset field.\n        DEFAULT_SUBSET = 3;\n\n        // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata\n        // keys reduced to\n        // :ref:`fallback_keys_subset<envoy_api_field_Cluster.LbSubsetConfig.LbSubsetSelector.fallback_keys_subset>`.\n        // It allows for a fallback to a different, less specific selector if some of the keys of\n        // the selector are considered optional.\n        KEYS_SUBSET = 4;\n      }\n\n      // List of keys to match with the weighted cluster metadata.\n      repeated string keys = 1;\n\n      // The behavior used when no endpoint subset matches the selected route's\n      // metadata.\n      LbSubsetSelectorFallbackPolicy fallback_policy = 2\n          [(validate.rules).enum = {defined_only: true}];\n\n      // Subset of\n      // :ref:`keys<envoy_api_field_Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by\n      // :ref:`KEYS_SUBSET<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`\n      // fallback policy.\n      // It has to be a non empty list if KEYS_SUBSET fallback policy is selected.\n      // For any other fallback policy the parameter is not used and should not be set.\n      // Only values also present in\n      // :ref:`keys<envoy_api_field_Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but\n      // `fallback_keys_subset` cannot be equal to `keys`.\n      repeated string fallback_keys_subset = 3;\n    }\n\n    // The behavior used when no endpoint subset matches the selected route's\n    // metadata. The value defaults to\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Specifies the default subset of endpoints used during fallback if\n    // fallback_policy is\n    // :ref:`DEFAULT_SUBSET<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.\n    // Each field in default_subset is\n    // compared to the matching LbEndpoint.Metadata under the *envoy.lb*\n    // namespace. It is valid for no hosts to match, in which case the behavior\n    // is the same as a fallback_policy of\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    google.protobuf.Struct default_subset = 2;\n\n    // For each entry, LbEndpoint.Metadata's\n    // *envoy.lb* namespace is traversed and a subset is created for each unique\n    // combination of key and value. For example:\n    //\n    // .. code-block:: json\n    //\n    //   { \"subset_selectors\": [\n    //       { \"keys\": [ \"version\" ] },\n    //       { \"keys\": [ \"stage\", \"hardware_type\" ] }\n    //   ]}\n    //\n    // A subset is matched when the metadata from the selected route and\n    // weighted cluster contains the same keys and values as the subset's\n    // metadata. The same host may appear in multiple subsets.\n    repeated LbSubsetSelector subset_selectors = 3;\n\n    // If true, routing to subsets will take into account the localities and locality weights of the\n    // endpoints when making the routing decision.\n    //\n    // There are some potential pitfalls associated with enabling this feature, as the resulting\n    // traffic split after applying both a subset match and locality weights might be undesirable.\n    //\n    // Consider for example a situation in which you have 50/50 split across two localities X/Y\n    // which have 100 hosts each without subsetting. If the subset LB results in X having only 1\n    // host selected but Y having 100, then a lot more load is being dumped on the single host in X\n    // than originally anticipated in the load balancing assignment delivered via EDS.\n    bool locality_weight_aware = 4;\n\n    // When used with locality_weight_aware, scales the weight of each locality by the ratio\n    // of hosts in the subset vs hosts in the original subset. This aims to even out the load\n    // going to an individual locality if said locality is disproportionately affected by the\n    // subset predicate.\n    bool scale_locality_weight = 5;\n\n    // If true, when a fallback policy is configured and its corresponding subset fails to find\n    // a host this will cause any host to be selected instead.\n    //\n    // This is useful when using the default subset as the fallback policy, given the default\n    // subset might become empty. With this option enabled, if that happens the LB will attempt\n    // to select a host from the entire cluster.\n    bool panic_mode_any = 6;\n\n    // If true, metadata specified for a metadata key will be matched against the corresponding\n    // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value\n    // and any of the elements in the list matches the criteria.\n    bool list_as_any = 7;\n  }\n\n  // Specific configuration for the LeastRequest load balancing policy.\n  message LeastRequestLbConfig {\n    // The number of random healthy hosts from which the host with the fewest active requests will\n    // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.\n    google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];\n  }\n\n  // Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`\n  // load balancing policy.\n  message RingHashLbConfig {\n    // The hash function used to hash hosts onto the ketama ring.\n    enum HashFunction {\n      // Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.\n      XX_HASH = 0;\n\n      // Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with\n      // std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled\n      // on Linux and not macOS.\n      MURMUR_HASH_2 = 1;\n    }\n\n    reserved 2;\n\n    // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each\n    // provided host) the better the request distribution will reflect the desired weights. Defaults\n    // to 1024 entries, and limited to 8M entries. See also\n    // :ref:`maximum_ring_size<envoy_api_field_Cluster.RingHashLbConfig.maximum_ring_size>`.\n    google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}];\n\n    // The hash function used to hash hosts onto the ketama ring. The value defaults to\n    // :ref:`XX_HASH<envoy_api_enum_value_Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.\n    HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}];\n\n    // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered\n    // to further constrain resource use. See also\n    // :ref:`minimum_ring_size<envoy_api_field_Cluster.RingHashLbConfig.minimum_ring_size>`.\n    google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}];\n  }\n\n  // Specific configuration for the\n  // :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\n  // load balancing policy.\n  message OriginalDstLbConfig {\n    // When true, :ref:`x-envoy-original-dst-host\n    // <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination\n    // address.\n    //\n    // .. attention::\n    //\n    //   This header isn't sanitized by default, so enabling this feature allows HTTP clients to\n    //   route traffic to arbitrary hosts and/or ports, which may have serious security\n    //   consequences.\n    bool use_http_header = 1;\n  }\n\n  // Common configuration for all load balancer implementations.\n  // [#next-free-field: 8]\n  message CommonLbConfig {\n    // Configuration for :ref:`zone aware routing\n    // <arch_overview_load_balancing_zone_aware_routing>`.\n    message ZoneAwareLbConfig {\n      // Configures percentage of requests that will be considered for zone aware routing\n      // if zone aware routing is configured. If not specified, the default is 100%.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      type.Percent routing_enabled = 1;\n\n      // Configures minimum upstream cluster size required for zone aware routing\n      // If upstream cluster size is less than specified, zone aware routing is not performed\n      // even if zone aware routing is configured. If not specified, the default is 6.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      google.protobuf.UInt64Value min_cluster_size = 2;\n\n      // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic\n      // mode<arch_overview_load_balancing_panic_threshold>`. Instead, the cluster will fail all\n      // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a\n      // failing service.\n      bool fail_traffic_on_panic = 3;\n    }\n\n    // Configuration for :ref:`locality weighted load balancing\n    // <arch_overview_load_balancing_locality_weighted_lb>`\n    message LocalityWeightedLbConfig {\n    }\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    message ConsistentHashingLbConfig {\n      // If set to `true`, the cluster will use hostname instead of the resolved\n      // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.\n      bool use_hostname_for_hashing = 1;\n    }\n\n    // Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.\n    // If not specified, the default is 50%.\n    // To disable panic mode, set to 0%.\n    //\n    // .. note::\n    //   The specified percent will be truncated to the nearest 1%.\n    type.Percent healthy_panic_threshold = 1;\n\n    oneof locality_config_specifier {\n      ZoneAwareLbConfig zone_aware_lb_config = 2;\n\n      LocalityWeightedLbConfig locality_weighted_lb_config = 3;\n    }\n\n    // If set, all health check/weight/metadata updates that happen within this duration will be\n    // merged and delivered in one shot when the duration expires. The start of the duration is when\n    // the first update happens. This is useful for big clusters, with potentially noisy deploys\n    // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes\n    // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new\n    // cluster). Please always keep in mind that the use of sandbox technologies may change this\n    // behavior.\n    //\n    // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge\n    // window to 0.\n    //\n    // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is\n    // because merging those updates isn't currently safe. See\n    // https://github.com/envoyproxy/envoy/pull/3941.\n    google.protobuf.Duration update_merge_window = 4;\n\n    // If set to true, Envoy will not consider new hosts when computing load balancing weights until\n    // they have been health checked for the first time. This will have no effect unless\n    // active health checking is also configured.\n    //\n    // Ignoring a host means that for any load balancing calculations that adjust weights based\n    // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and\n    // panic mode) Envoy will exclude these hosts in the denominator.\n    //\n    // For example, with hosts in two priorities P0 and P1, where P0 looks like\n    // {healthy, unhealthy (new), unhealthy (new)}\n    // and where P1 looks like\n    // {healthy, healthy}\n    // all traffic will still hit P0, as 1 / (3 - 2) = 1.\n    //\n    // Enabling this will allow scaling up the number of hosts for a given cluster without entering\n    // panic mode or triggering priority spillover, assuming the hosts pass the first health check.\n    //\n    // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not\n    // contribute to the calculation when deciding whether panic mode is enabled or not.\n    bool ignore_new_hosts_until_first_hc = 5;\n\n    // If set to `true`, the cluster manager will drain all existing\n    // connections to upstream hosts whenever hosts are added or removed from the cluster.\n    bool close_connections_on_host_set_change = 6;\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    ConsistentHashingLbConfig consistent_hashing_lb_config = 7;\n  }\n\n  message RefreshRate {\n    // Specifies the base interval between refreshes. This parameter is required and must be greater\n    // than zero and less than\n    // :ref:`max_interval <envoy_api_field_Cluster.RefreshRate.max_interval>`.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {nanos: 1000000}\n    }];\n\n    // Specifies the maximum interval between refreshes. This parameter is optional, but must be\n    // greater than or equal to the\n    // :ref:`base_interval <envoy_api_field_Cluster.RefreshRate.base_interval>`  if set. The default\n    // is 10 times the :ref:`base_interval <envoy_api_field_Cluster.RefreshRate.base_interval>`.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}];\n  }\n\n  reserved 12, 15;\n\n  // Configuration to use different transport sockets for different endpoints.\n  // The entry of *envoy.transport_socket_match* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_endpoint.LbEndpoint.metadata>`\n  // is used to match against the transport sockets as they appear in the list. The first\n  // :ref:`match <envoy_api_msg_Cluster.TransportSocketMatch>` is used.\n  // For example, with the following match\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"enableMTLS\"\n  //    match:\n  //      acceptMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //  - name: \"defaultToPlaintext\"\n  //    match: {}\n  //    transport_socket:\n  //      name: envoy.transport_sockets.raw_buffer\n  //\n  // Connections to the endpoints whose metadata value under *envoy.transport_socket_match*\n  // having \"acceptMTLS\"/\"true\" key/value pair use the \"enableMTLS\" socket configuration.\n  //\n  // If a :ref:`socket match <envoy_api_msg_Cluster.TransportSocketMatch>` with empty match\n  // criteria is provided, that always match any endpoint. For example, the \"defaultToPlaintext\"\n  // socket match in case above.\n  //\n  // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any\n  // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or\n  // *transport_socket* specified in this cluster.\n  //\n  // This field allows gradual and flexible transport socket configuration changes.\n  //\n  // The metadata of endpoints in EDS can indicate transport socket capabilities. For example,\n  // an endpoint's metadata can have two key value pairs as \"acceptMTLS\": \"true\",\n  // \"acceptPlaintext\": \"true\". While some other endpoints, only accepting plaintext traffic\n  // has \"acceptPlaintext\": \"true\" metadata information.\n  //\n  // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS\n  // traffic for endpoints with \"acceptMTLS\": \"true\", by adding a corresponding\n  // *TransportSocketMatch* in this field. Other client Envoys receive CDS without\n  // *transport_socket_match* set, and still send plain text traffic to the same cluster.\n  //\n  // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]\n  repeated TransportSocketMatch transport_socket_matches = 43;\n\n  // Supplies the name of the cluster which must be unique across all clusters.\n  // The cluster name is used when emitting\n  // :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name\n  // <envoy_api_field_Cluster.alt_stat_name>` is not provided.\n  // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // An optional alternative to the cluster name to be used while emitting stats.\n  // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be\n  // confused with :ref:`Router Filter Header\n  // <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.\n  string alt_stat_name = 28;\n\n  oneof cluster_discovery_type {\n    // The :ref:`service discovery type <arch_overview_service_discovery_types>`\n    // to use for resolving the cluster.\n    DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}];\n\n    // The custom cluster type.\n    CustomClusterType cluster_type = 38;\n  }\n\n  // Configuration to use for EDS updates for the Cluster.\n  EdsClusterConfig eds_cluster_config = 3;\n\n  // The timeout for new network connections to hosts in the cluster.\n  google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}];\n\n  // Soft limit on size of the cluster’s connections read and write buffers. If\n  // unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5;\n\n  // The :ref:`load balancer type <arch_overview_load_balancing_types>` to use\n  // when picking a host in the cluster.\n  LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}];\n\n  // If the service discovery type is\n  // :ref:`STATIC<envoy_api_enum_value_Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // then hosts is required.\n  //\n  // .. attention::\n  //\n  //   **This field is deprecated**. Set the\n  //   :ref:`load_assignment<envoy_api_field_Cluster.load_assignment>` field instead.\n  //\n  repeated core.Address hosts = 7 [deprecated = true];\n\n  // Setting this is required for specifying members of\n  // :ref:`STATIC<envoy_api_enum_value_Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>` clusters.\n  // This field supersedes the *hosts* field in the v2 API.\n  //\n  // .. attention::\n  //\n  //   Setting this allows non-EDS cluster types to contain embedded EDS equivalent\n  //   :ref:`endpoint assignments<envoy_api_msg_ClusterLoadAssignment>`.\n  //\n  ClusterLoadAssignment load_assignment = 33;\n\n  // Optional :ref:`active health checking <arch_overview_health_checking>`\n  // configuration for the cluster. If no\n  // configuration is specified no health checking will be done and all cluster\n  // members will be considered healthy at all times.\n  repeated core.HealthCheck health_checks = 8;\n\n  // Optional maximum requests for a single upstream connection. This parameter\n  // is respected by both the HTTP/1.1 and HTTP/2 connection pool\n  // implementations. If not specified, there is no limit. Setting this\n  // parameter to 1 will effectively disable keep alive.\n  google.protobuf.UInt32Value max_requests_per_connection = 9;\n\n  // Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.\n  cluster.CircuitBreakers circuit_breakers = 10;\n\n  // The TLS configuration for connections to the upstream cluster.\n  //\n  // .. attention::\n  //\n  //   **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are\n  //   set, `transport_socket` takes priority.\n  auth.UpstreamTlsContext tls_context = 11\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // HTTP protocol options that are applied only to upstream HTTP connections.\n  // These options apply to all HTTP versions.\n  core.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46;\n\n  // Additional options when handling HTTP requests upstream. These options will be applicable to\n  // both HTTP1 and HTTP2 requests.\n  core.HttpProtocolOptions common_http_protocol_options = 29;\n\n  // Additional options when handling HTTP1 requests.\n  core.Http1ProtocolOptions http_protocol_options = 13;\n\n  // Even if default HTTP2 protocol options are desired, this field must be\n  // set so that Envoy will assume that the upstream supports HTTP/2 when\n  // making new HTTP connection pool connections. Currently, Envoy only\n  // supports prior knowledge for upstream connections. Even if TLS is used\n  // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2\n  // connections to happen over plain text.\n  core.Http2ProtocolOptions http2_protocol_options = 14;\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Struct> extension_protocol_options = 35\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Any> typed_extension_protocol_options = 36;\n\n  // If the DNS refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used as the cluster’s DNS refresh\n  // rate. The value configured must be at least 1ms. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  google.protobuf.Duration dns_refresh_rate = 16\n      [(validate.rules).duration = {gt {nanos: 1000000}}];\n\n  // If the DNS failure refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types\n  // other than :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>` this setting is\n  // ignored.\n  RefreshRate dns_failure_refresh_rate = 44;\n\n  // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true,\n  // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS\n  // resolution.\n  bool respect_dns_ttl = 39;\n\n  // The DNS IP address resolution policy. If this setting is not specified, the\n  // value defaults to\n  // :ref:`AUTO<envoy_api_enum_value_Cluster.DnsLookupFamily.AUTO>`.\n  DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}];\n\n  // If DNS resolvers are specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used to specify the cluster’s dns resolvers.\n  // If this setting is not specified, the value defaults to the default\n  // resolver, which uses /etc/resolv.conf for configuration. For cluster types\n  // other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple's API only allows overriding DNS resolvers via system settings.\n  repeated core.Address dns_resolvers = 18;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 45;\n\n  // If specified, outlier detection will be enabled for this upstream cluster.\n  // Each of the configuration values can be overridden via\n  // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.\n  cluster.OutlierDetection outlier_detection = 19;\n\n  // The interval for removing stale hosts from a cluster type\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_Cluster.DiscoveryType.ORIGINAL_DST>`.\n  // Hosts are considered stale if they have not been used\n  // as upstream destinations during this interval. New hosts are added\n  // to original destination clusters on demand as new connections are\n  // redirected to Envoy, causing the number of hosts in the cluster to\n  // grow over time. Hosts that are not stale (they are actively used as\n  // destinations) are kept in the cluster, which allows connections to\n  // them remain open, saving the latency that would otherwise be spent\n  // on opening new connections. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_Cluster.DiscoveryType.ORIGINAL_DST>`\n  // this setting is ignored.\n  google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}];\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This overrides any bind_config specified in the bootstrap proto.\n  // If the address and port are empty, no bind will be performed.\n  core.BindConfig upstream_bind_config = 21;\n\n  // Configuration for load balancing subsetting.\n  LbSubsetConfig lb_subset_config = 22;\n\n  // Optional configuration for the load balancing algorithm selected by\n  // LbPolicy. Currently only\n  // :ref:`RING_HASH<envoy_api_enum_value_Cluster.LbPolicy.RING_HASH>` and\n  // :ref:`LEAST_REQUEST<envoy_api_enum_value_Cluster.LbPolicy.LEAST_REQUEST>`\n  // has additional configuration options.\n  // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding\n  // LbPolicy will generate an error at runtime.\n  oneof lb_config {\n    // Optional configuration for the Ring Hash load balancing policy.\n    RingHashLbConfig ring_hash_lb_config = 23;\n\n    // Optional configuration for the Original Destination load balancing policy.\n    OriginalDstLbConfig original_dst_lb_config = 34;\n\n    // Optional configuration for the LeastRequest load balancing policy.\n    LeastRequestLbConfig least_request_lb_config = 37;\n  }\n\n  // Common configuration for all load balancer implementations.\n  CommonLbConfig common_lb_config = 27;\n\n  // Optional custom transport socket implementation to use for upstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`UpstreamTlsContexts <envoy_api_msg_auth.UpstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.TransportSocket transport_socket = 24;\n\n  // The Metadata field can be used to provide additional information about the\n  // cluster. It can be used for stats, logging, and varying filter behavior.\n  // Fields should use reverse DNS notation to denote which entity within Envoy\n  // will need the information. For instance, if the metadata is intended for\n  // the Router filter, the filter name should be specified as *envoy.filters.http.router*.\n  core.Metadata metadata = 25;\n\n  // Determines how Envoy selects the protocol used to speak to upstream hosts.\n  ClusterProtocolSelection protocol_selection = 26;\n\n  // Optional options for upstream connections.\n  UpstreamConnectionOptions upstream_connection_options = 30;\n\n  // If an upstream host becomes unhealthy (as determined by the configured health checks\n  // or outlier detection), immediately close all connections to the failed host.\n  //\n  // .. note::\n  //\n  //   This is currently only supported for connections created by tcp_proxy.\n  //\n  // .. note::\n  //\n  //   The current implementation of this feature closes all connections immediately when\n  //   the unhealthy status is detected. If there are a large number of connections open\n  //   to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of\n  //   time exclusively closing these connections, and not processing any other traffic.\n  bool close_connections_on_host_health_failure = 31;\n\n  // If set to true, Envoy will ignore the health value of a host when processing its removal\n  // from service discovery. This means that if active health checking is used, Envoy will *not*\n  // wait for the endpoint to go unhealthy before removing it.\n  bool drain_connections_on_host_removal = 32\n      [(udpa.annotations.field_migrate).rename = \"ignore_health_on_host_removal\"];\n\n  // An (optional) network filter chain, listed in the order the filters should be applied.\n  // The chain will be applied to all outgoing connections that Envoy makes to the upstream\n  // servers of this cluster.\n  repeated cluster.Filter filters = 40;\n\n  // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the\n  // :ref:`lb_policy<envoy_api_field_Cluster.lb_policy>` field has the value\n  // :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>`.\n  LoadBalancingPolicy load_balancing_policy = 41;\n\n  // [#not-implemented-hide:]\n  // If present, tells the client where to send load reports via LRS. If not present, the\n  // client will fall back to a client-side default, which may be either (a) don't send any\n  // load reports or (b) send load reports for all clusters to a single default server\n  // (which may be configured in the bootstrap file).\n  //\n  // Note that if multiple clusters point to the same LRS server, the client may choose to\n  // create a separate stream for each cluster or it may choose to coalesce the data for\n  // multiple clusters onto a single stream. Either way, the client must make sure to send\n  // the data for any given cluster on no more than one stream.\n  //\n  // [#next-major-version: In the v3 API, we should consider restructuring this somehow,\n  // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation\n  // from the LRS stream here.]\n  core.ConfigSource lrs_server = 42;\n\n  // If track_timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  bool track_timeout_budgets = 47;\n}\n\n// [#not-implemented-hide:] Extensible load balancing policy configuration.\n//\n// Every LB policy defined via this mechanism will be identified via a unique name using reverse\n// DNS notation. If the policy needs configuration parameters, it must define a message for its\n// own configuration, which will be stored in the config field. The name of the policy will tell\n// clients which type of message they should expect to see in the config field.\n//\n// Note that there are cases where it is useful to be able to independently select LB policies\n// for choosing a locality and for choosing an endpoint within that locality. For example, a\n// given deployment may always use the same policy to choose the locality, but for choosing the\n// endpoint within the locality, some clusters may use weighted-round-robin, while others may\n// use some sort of session-based balancing.\n//\n// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a\n// child LB policy for each locality. For each request, the parent chooses the locality and then\n// delegates to the child policy for that locality to choose the endpoint within the locality.\n//\n// To facilitate this, the config message for the top-level LB policy may include a field of\n// type LoadBalancingPolicy that specifies the child policy.\nmessage LoadBalancingPolicy {\n  message Policy {\n    // Required. The name of the LB policy.\n    string name = 1;\n\n    // Optional config for the LB policy.\n    // No more than one of these two fields may be populated.\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Each client will iterate over the list in order and stop at the first policy that it\n  // supports. This provides a mechanism for starting to use new LB policies that are not yet\n  // supported by all clients.\n  repeated Policy policies = 1;\n}\n\n// An extensible structure containing the address Envoy should bind to when\n// establishing upstream connections.\nmessage UpstreamBindConfig {\n  // The address Envoy should bind to when establishing upstream connections.\n  core.Address source_address = 1;\n}\n\nmessage UpstreamConnectionOptions {\n  // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.\n  core.TcpKeepalive tcp_keepalive = 1;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/core/address.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/socket_option.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"AddressProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Network addresses]\n\nmessage Pipe {\n  // Unix Domain Socket path. On Linux, paths starting with '@' will use the\n  // abstract namespace. The starting '@' is replaced by a null byte by Envoy.\n  // Paths starting with '@' will result in an error in environments other than\n  // Linux.\n  string path = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The mode for the Pipe. Not applicable for abstract sockets.\n  uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];\n}\n\n// [#next-free-field: 7]\nmessage SocketAddress {\n  enum Protocol {\n    TCP = 0;\n    UDP = 1;\n  }\n\n  Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The address for this socket. :ref:`Listeners <config_listeners>` will bind\n  // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::``\n  // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:\n  // It is possible to distinguish a Listener address via the prefix/suffix matching\n  // in :ref:`FilterChainMatch <envoy_api_msg_listener.FilterChainMatch>`.] When used\n  // within an upstream :ref:`BindConfig <envoy_api_msg_core.BindConfig>`, the address\n  // controls the source address of outbound connections. For :ref:`clusters\n  // <envoy_api_msg_Cluster>`, the cluster type determines whether the\n  // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS\n  // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized\n  // via :ref:`resolver_name <envoy_api_field_core.SocketAddress.resolver_name>`.\n  string address = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof port_specifier {\n    option (validate.required) = true;\n\n    uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}];\n\n    // This is only valid if :ref:`resolver_name\n    // <envoy_api_field_core.SocketAddress.resolver_name>` is specified below and the\n    // named resolver is capable of named port resolution.\n    string named_port = 4;\n  }\n\n  // The name of the custom resolver. This must have been registered with Envoy. If\n  // this is empty, a context dependent default applies. If the address is a concrete\n  // IP address, no resolution will occur. If address is a hostname this\n  // should be set for resolution other than DNS. Specifying a custom resolver with\n  // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime.\n  string resolver_name = 5;\n\n  // When binding to an IPv6 address above, this enables `IPv4 compatibility\n  // <https://tools.ietf.org/html/rfc3493#page-11>`_. Binding to ``::`` will\n  // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into\n  // IPv6 space as ``::FFFF:<IPv4-address>``.\n  bool ipv4_compat = 6;\n}\n\nmessage TcpKeepalive {\n  // Maximum number of keepalive probes to send without response before deciding\n  // the connection is dead. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 9.)\n  google.protobuf.UInt32Value keepalive_probes = 1;\n\n  // The number of seconds a connection needs to be idle before keep-alive probes\n  // start being sent. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 7200s (i.e., 2 hours.)\n  google.protobuf.UInt32Value keepalive_time = 2;\n\n  // The number of seconds between keep-alive probes. Default is to use the OS\n  // level configuration (unless overridden, Linux defaults to 75s.)\n  google.protobuf.UInt32Value keepalive_interval = 3;\n}\n\nmessage BindConfig {\n  // The address to bind to when creating a socket.\n  SocketAddress source_address = 1 [(validate.rules).message = {required: true}];\n\n  // Whether to set the *IP_FREEBIND* option when creating the socket. When this\n  // flag is set to true, allows the :ref:`source_address\n  // <envoy_api_field_UpstreamBindConfig.source_address>` to be an IP address\n  // that is not configured on the system running Envoy. When this flag is set\n  // to false, the option *IP_FREEBIND* is disabled on the socket. When this\n  // flag is not set (default), the socket is not modified, i.e. the option is\n  // neither enabled nor disabled.\n  google.protobuf.BoolValue freebind = 2;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated SocketOption socket_options = 3;\n}\n\n// Addresses specify either a logical or physical address and port, which are\n// used to tell Envoy where to bind/listen, connect to upstream and find\n// management servers.\nmessage Address {\n  oneof address {\n    option (validate.required) = true;\n\n    SocketAddress socket_address = 1;\n\n    Pipe pipe = 2;\n  }\n}\n\n// CidrRange specifies an IP Address and a prefix length to construct\n// the subnet mask for a `CIDR <https://tools.ietf.org/html/rfc4632>`_ range.\nmessage CidrRange {\n  // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.\n  string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Length of prefix, e.g. 0, 32.\n  google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/backoff.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"BackoffProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Backoff Strategy]\n\n// Configuration defining a jittered exponential back off strategy.\nmessage BackoffStrategy {\n  // The base interval to be used for the next back off computation. It should\n  // be greater than zero and less than or equal to :ref:`max_interval\n  // <envoy_api_field_core.BackoffStrategy.max_interval>`.\n  google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // Specifies the maximum interval between retries. This parameter is optional,\n  // but must be greater than or equal to the :ref:`base_interval\n  // <envoy_api_field_core.BackoffStrategy.base_interval>` if set. The default\n  // is 10 times the :ref:`base_interval\n  // <envoy_api_field_core.BackoffStrategy.base_interval>`.\n  google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/base.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/backoff.proto\";\nimport \"envoy/api/v2/core/http_uri.proto\";\nimport \"envoy/type/percent.proto\";\nimport \"envoy/type/semantic_version.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/core/socket_option.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"BaseProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common types]\n\n// Envoy supports :ref:`upstream priority routing\n// <arch_overview_http_routing_priority>` both at the route and the virtual\n// cluster level. The current priority implementation uses different connection\n// pool and circuit breaking settings for each priority level. This means that\n// even for HTTP/2 requests, two physical connections will be used to an\n// upstream host. In the future Envoy will likely support true HTTP/2 priority\n// over a single upstream connection.\nenum RoutingPriority {\n  DEFAULT = 0;\n  HIGH = 1;\n}\n\n// HTTP request method.\nenum RequestMethod {\n  METHOD_UNSPECIFIED = 0;\n  GET = 1;\n  HEAD = 2;\n  POST = 3;\n  PUT = 4;\n  DELETE = 5;\n  CONNECT = 6;\n  OPTIONS = 7;\n  TRACE = 8;\n  PATCH = 9;\n}\n\n// Identifies the direction of the traffic relative to the local Envoy.\nenum TrafficDirection {\n  // Default option is unspecified.\n  UNSPECIFIED = 0;\n\n  // The transport is used for incoming traffic.\n  INBOUND = 1;\n\n  // The transport is used for outgoing traffic.\n  OUTBOUND = 2;\n}\n\n// Identifies location of where either Envoy runs or where upstream hosts run.\nmessage Locality {\n  // Region this :ref:`zone <envoy_api_field_core.Locality.zone>` belongs to.\n  string region = 1;\n\n  // Defines the local service zone where Envoy is running. Though optional, it\n  // should be set if discovery service routing is used and the discovery\n  // service exposes :ref:`zone data <envoy_api_field_endpoint.LocalityLbEndpoints.locality>`,\n  // either in this message or via :option:`--service-zone`. The meaning of zone\n  // is context dependent, e.g. `Availability Zone (AZ)\n  // <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html>`_\n  // on AWS, `Zone <https://cloud.google.com/compute/docs/regions-zones/>`_ on\n  // GCP, etc.\n  string zone = 2;\n\n  // When used for locality of upstream hosts, this field further splits zone\n  // into smaller chunks of sub-zones so they can be load balanced\n  // independently.\n  string sub_zone = 3;\n}\n\n// BuildVersion combines SemVer version of extension with free-form build information\n// (i.e. 'alpha', 'private-build') as a set of strings.\nmessage BuildVersion {\n  // SemVer version of extension.\n  type.SemanticVersion version = 1;\n\n  // Free-form build information.\n  // Envoy defines several well known keys in the source/common/version/version.h file\n  google.protobuf.Struct metadata = 2;\n}\n\n// Version and identification for an Envoy extension.\n// [#next-free-field: 6]\nmessage Extension {\n  // This is the name of the Envoy filter as specified in the Envoy\n  // configuration, e.g. envoy.filters.http.router, com.acme.widget.\n  string name = 1;\n\n  // Category of the extension.\n  // Extension category names use reverse DNS notation. For instance \"envoy.filters.listener\"\n  // for Envoy's built-in listener filters or \"com.acme.filters.http\" for HTTP filters from\n  // acme.com vendor.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]\n  string category = 2;\n\n  // [#not-implemented-hide:] Type descriptor of extension configuration proto.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]\n  // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]\n  string type_descriptor = 3;\n\n  // The version is a property of the extension and maintained independently\n  // of other extensions and the Envoy API.\n  // This field is not set when extension did not provide version information.\n  BuildVersion version = 4;\n\n  // Indicates that the extension is present but was disabled via dynamic configuration.\n  bool disabled = 5;\n}\n\n// Identifies a specific Envoy instance. The node identifier is presented to the\n// management server, which may use this identifier to distinguish per Envoy\n// configuration for serving.\n// [#next-free-field: 12]\nmessage Node {\n  // An opaque node identifier for the Envoy node. This also provides the local\n  // service node name. It should be set if any of the following features are\n  // used: :ref:`statsd <arch_overview_statistics>`, :ref:`CDS\n  // <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-node`.\n  string id = 1;\n\n  // Defines the local service cluster name where Envoy is running. Though\n  // optional, it should be set if any of the following features are used:\n  // :ref:`statsd <arch_overview_statistics>`, :ref:`health check cluster\n  // verification\n  // <envoy_api_field_core.HealthCheck.HttpHealthCheck.service_name_matcher>`,\n  // :ref:`runtime override directory <envoy_api_msg_config.bootstrap.v2.Runtime>`,\n  // :ref:`user agent addition\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.add_user_agent>`,\n  // :ref:`HTTP global rate limiting <config_http_filters_rate_limit>`,\n  // :ref:`CDS <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-cluster`.\n  string cluster = 2;\n\n  // Opaque metadata extending the node identifier. Envoy will pass this\n  // directly to the management server.\n  google.protobuf.Struct metadata = 3;\n\n  // Locality specifying where the Envoy instance is running.\n  Locality locality = 4;\n\n  // This is motivated by informing a management server during canary which\n  // version of Envoy is being tested in a heterogeneous fleet. This will be set\n  // by Envoy in management server RPCs.\n  // This field is deprecated in favor of the user_agent_name and user_agent_version values.\n  string build_version = 5 [deprecated = true];\n\n  // Free-form string that identifies the entity requesting config.\n  // E.g. \"envoy\" or \"grpc\"\n  string user_agent_name = 6;\n\n  oneof user_agent_version_type {\n    // Free-form string that identifies the version of the entity requesting config.\n    // E.g. \"1.12.2\" or \"abcd1234\", or \"SpecialEnvoyBuild\"\n    string user_agent_version = 7;\n\n    // Structured version of the entity requesting config.\n    BuildVersion user_agent_build_version = 8;\n  }\n\n  // List of extensions and their versions supported by the node.\n  repeated Extension extensions = 9;\n\n  // Client feature support list. These are well known features described\n  // in the Envoy API repository for a given major version of an API. Client features\n  // use reverse DNS naming scheme, for example `com.acme.feature`.\n  // See :ref:`the list of features <client_features>` that xDS client may\n  // support.\n  repeated string client_features = 10;\n\n  // Known listening ports on the node as a generic hint to the management server\n  // for filtering :ref:`listeners <config_listeners>` to be returned. For example,\n  // if there is a listener bound to port 80, the list can optionally contain the\n  // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint.\n  repeated Address listening_addresses = 11;\n}\n\n// Metadata provides additional inputs to filters based on matched listeners,\n// filter chains, routes and endpoints. It is structured as a map, usually from\n// filter name (in reverse DNS format) to metadata specific to the filter. Metadata\n// key-values for a filter are merged as connection and request handling occurs,\n// with later values for the same key overriding earlier values.\n//\n// An example use of metadata is providing additional values to\n// http_connection_manager in the envoy.http_connection_manager.access_log\n// namespace.\n//\n// Another example use of metadata is to per service config info in cluster metadata, which may get\n// consumed by multiple filters.\n//\n// For load balancing, Metadata provides a means to subset cluster endpoints.\n// Endpoints have a Metadata object associated and routes contain a Metadata\n// object to match against. There are some well defined metadata used today for\n// this purpose:\n//\n// * ``{\"envoy.lb\": {\"canary\": <bool> }}`` This indicates the canary status of an\n//   endpoint and is also used during header processing\n//   (x-envoy-upstream-canary) and for stats purposes.\n// [#next-major-version: move to type/metadata/v2]\nmessage Metadata {\n  // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*\n  // namespace is reserved for Envoy's built-in filters.\n  map<string, google.protobuf.Struct> filter_metadata = 1;\n}\n\n// Runtime derived uint32 with a default when not specified.\nmessage RuntimeUInt32 {\n  // Default value if runtime value is not available.\n  uint32 default_value = 2;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// Runtime derived double with a default when not specified.\nmessage RuntimeDouble {\n  // Default value if runtime value is not available.\n  double default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// Runtime derived bool with a default when not specified.\nmessage RuntimeFeatureFlag {\n  // Default value if runtime value is not available.\n  google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key to get value for comparison. This value is used if defined. The boolean value must\n  // be represented via its\n  // `canonical JSON encoding <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n  string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// Header name/value pair.\nmessage HeaderValue {\n  // Header name.\n  string key = 1\n      [(validate.rules).string =\n           {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Header value.\n  //\n  // The same :ref:`format specifier <config_access_log_format>` as used for\n  // :ref:`HTTP access logging <config_access_log>` applies here, however\n  // unknown header values are replaced with the empty string instead of `-`.\n  string value = 2 [\n    (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}\n  ];\n}\n\n// Header name/value pair plus option to control append behavior.\nmessage HeaderValueOption {\n  // Header name/value pair that this option applies to.\n  HeaderValue header = 1 [(validate.rules).message = {required: true}];\n\n  // Should the value be appended? If true (default), the value is appended to\n  // existing values.\n  google.protobuf.BoolValue append = 2;\n}\n\n// Wrapper for a set of headers.\nmessage HeaderMap {\n  repeated HeaderValue headers = 1;\n}\n\n// Data source consisting of either a file or an inline value.\nmessage DataSource {\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local filesystem data source.\n    string filename = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Bytes inlined in the configuration.\n    bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];\n\n    // String inlined in the configuration.\n    string inline_string = 3 [(validate.rules).string = {min_bytes: 1}];\n  }\n}\n\n// The message specifies the retry policy of remote data source when fetching fails.\nmessage RetryPolicy {\n  // Specifies parameters that control :ref:`retry backoff strategy <envoy_api_msg_core.BackoffStrategy>`.\n  // This parameter is optional, in which case the default base interval is 1000 milliseconds. The\n  // default maximum interval is 10 times the base interval.\n  BackoffStrategy retry_back_off = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1.\n  google.protobuf.UInt32Value num_retries = 2;\n}\n\n// The message specifies how to fetch data from remote and how to verify it.\nmessage RemoteDataSource {\n  // The HTTP URI to fetch the remote data.\n  HttpUri http_uri = 1 [(validate.rules).message = {required: true}];\n\n  // SHA256 string for verifying data.\n  string sha256 = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Retry policy for fetching remote data.\n  RetryPolicy retry_policy = 3;\n}\n\n// Async data source which support async data fetch.\nmessage AsyncDataSource {\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local async data source.\n    DataSource local = 1;\n\n    // Remote async data source.\n    RemoteDataSource remote = 2;\n  }\n}\n\n// Configuration for transport socket in :ref:`listeners <config_listeners>` and\n// :ref:`clusters <envoy_api_msg_Cluster>`. If the configuration is\n// empty, a default transport socket implementation and configuration will be\n// chosen based on the platform and existence of tls_context.\nmessage TransportSocket {\n  // The name of the transport socket to instantiate. The name must match a supported transport\n  // socket implementation.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Implementation specific configuration which depends on the implementation being instantiated.\n  // See the supported transport socket implementations for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not\n// specified via a runtime key.\n//\n// .. note::\n//\n//   Parsing of the runtime key's data is implemented such that it may be represented as a\n//   :ref:`FractionalPercent <envoy_api_msg_type.FractionalPercent>` proto represented as JSON/YAML\n//   and may also be represented as an integer with the assumption that the value is an integral\n//   percentage out of 100. For instance, a runtime key lookup returning the value \"42\" would parse\n//   as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.\nmessage RuntimeFractionalPercent {\n  // Default value if the runtime value's for the numerator/denominator keys are not available.\n  type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key for a YAML representation of a FractionalPercent.\n  string runtime_key = 2;\n}\n\n// Identifies a specific ControlPlane instance that Envoy is connected to.\nmessage ControlPlane {\n  // An opaque control plane identifier that uniquely identifies an instance\n  // of control plane. This can be used to identify which control plane instance,\n  // the Envoy is connected to.\n  string identifier = 1;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/config_source.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"ConfigSourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Configuration sources]\n\n// xDS API version. This is used to describe both resource and transport\n// protocol versions (in distinct configuration fields).\nenum ApiVersion {\n  // When not specified, we assume v2, to ease migration to Envoy's stable API\n  // versioning. If a client does not support v2 (e.g. due to deprecation), this\n  // is an invalid value.\n  AUTO = 0;\n\n  // Use xDS v2 API.\n  V2 = 1;\n\n  // Use xDS v3 API.\n  V3 = 2;\n}\n\n// API configuration source. This identifies the API type and cluster that Envoy\n// will use to fetch an xDS API.\n// [#next-free-field: 9]\nmessage ApiConfigSource {\n  // APIs may be fetched via either REST or gRPC.\n  enum ApiType {\n    // Ideally this would be 'reserved 0' but one can't reserve the default\n    // value. Instead we throw an exception if this is ever used.\n    UNSUPPORTED_REST_LEGACY = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // REST-JSON v2 API. The `canonical JSON encoding\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_ for\n    // the v2 protos is used.\n    REST = 1;\n\n    // gRPC v2 API.\n    GRPC = 2;\n\n    // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}\n    // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state\n    // with every update, the xDS server only sends what has changed since the last update.\n    DELTA_GRPC = 3;\n  }\n\n  // API type (gRPC, REST, delta gRPC)\n  ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}];\n\n  // Cluster names should be used only with REST. If > 1\n  // cluster is defined, clusters will be cycled through if any kind of failure\n  // occurs.\n  //\n  // .. note::\n  //\n  //  The cluster with name ``cluster_name`` must be statically defined and its\n  //  type must not be ``EDS``.\n  repeated string cluster_names = 2;\n\n  // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,\n  // services will be cycled through if any kind of failure occurs.\n  repeated GrpcService grpc_services = 4;\n\n  // For REST APIs, the delay between successive polls.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // For REST APIs, the request timeout. If not set, a default value of 1s will be used.\n  google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}];\n\n  // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be\n  // rate limited.\n  RateLimitSettings rate_limit_settings = 6;\n\n  // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.\n  bool set_node_on_first_message_only = 7;\n}\n\n// Aggregated Discovery Service (ADS) options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` can be used to\n// specify that ADS is to be used.\nmessage AggregatedConfigSource {\n}\n\n// [#not-implemented-hide:]\n// Self-referencing config source options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` can be used to\n// specify that other data can be obtained from the same server.\nmessage SelfConfigSource {\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Rate Limit settings to be applied for discovery requests made by Envoy.\nmessage RateLimitSettings {\n  // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a\n  // default value of 100 will be used.\n  google.protobuf.UInt32Value max_tokens = 1;\n\n  // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens\n  // per second will be used.\n  google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];\n}\n\n// Configuration for :ref:`listeners <config_listeners>`, :ref:`clusters\n// <config_cluster_manager>`, :ref:`routes\n// <envoy_api_msg_RouteConfiguration>`, :ref:`endpoints\n// <arch_overview_service_discovery>` etc. may either be sourced from the\n// filesystem or from an xDS API source. Filesystem configs are watched with\n// inotify for updates.\n// [#next-free-field: 7]\nmessage ConfigSource {\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Path on the filesystem to source and watch for configuration updates.\n    // When sourcing configuration for :ref:`secret <envoy_api_msg_auth.Secret>`,\n    // the certificate and key files are also watched for updates.\n    //\n    // .. note::\n    //\n    //  The path to the source must exist at config load time.\n    //\n    // .. note::\n    //\n    //   Envoy will only watch the file path for *moves.* This is because in general only moves\n    //   are atomic. The same method of swapping files as is demonstrated in the\n    //   :ref:`runtime documentation <config_runtime_symbolic_link_swap>` can be used here also.\n    string path = 1;\n\n    // API configuration source.\n    ApiConfigSource api_config_source = 2;\n\n    // When set, ADS will be used to fetch resources. The ADS API configuration\n    // source in the bootstrap configuration is used.\n    AggregatedConfigSource ads = 3;\n\n    // [#not-implemented-hide:]\n    // When set, the client will access the resources from the same server it got the\n    // ConfigSource from, although not necessarily from the same stream. This is similar to the\n    // :ref:`ads<envoy_api_field.ConfigSource.ads>` field, except that the client may use a\n    // different stream to the same server. As a result, this field can be used for things\n    // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)\n    // LDS to RDS on the same server without requiring the management server to know its name\n    // or required credentials.\n    // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since\n    // this field can implicitly mean to use the same stream in the case where the ConfigSource\n    // is provided via ADS and the specified data can also be obtained via ADS.]\n    SelfConfigSource self = 5;\n  }\n\n  // When this timeout is specified, Envoy will wait no longer than the specified time for first\n  // config response on this xDS subscription during the :ref:`initialization process\n  // <arch_overview_initialization>`. After reaching the timeout, Envoy will move to the next\n  // initialization phase, even if the first config is not delivered yet. The timer is activated\n  // when the xDS API subscription starts, and is disarmed on first config update or on error. 0\n  // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another\n  // timeout applies). The default is 15s.\n  google.protobuf.Duration initial_fetch_timeout = 4;\n\n  // API version for xDS resources. This implies the type URLs that the client\n  // will request for resources and the resource type that the client will in\n  // turn expect to be delivered.\n  ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/event_service_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"EventServiceConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#not-implemented-hide:]\n// Configuration of the event reporting service endpoint.\nmessage EventServiceConfig {\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Specifies the gRPC service that hosts the event reporting service.\n    GrpcService grpc_service = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/grpc_method_list.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"GrpcMethodListProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC method list]\n\n// A list of gRPC methods which can be used as an allowlist, for example.\nmessage GrpcMethodList {\n  message Service {\n    // The name of the gRPC service.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // The names of the gRPC methods in this service.\n    repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  repeated Service services = 1;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/grpc_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"GrpcServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC services]\n\n// gRPC service configuration. This is used by :ref:`ApiConfigSource\n// <envoy_api_msg_core.ApiConfigSource>` and filter configurations.\n// [#next-free-field: 6]\nmessage GrpcService {\n  message EnvoyGrpc {\n    // The name of the upstream gRPC cluster. SSL credentials will be supplied\n    // in the :ref:`Cluster <envoy_api_msg_Cluster>` :ref:`transport_socket\n    // <envoy_api_field_Cluster.transport_socket>`.\n    string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // [#next-free-field: 7]\n  message GoogleGrpc {\n    // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.\n    message SslCredentials {\n      // PEM encoded server root certificates.\n      DataSource root_certs = 1;\n\n      // PEM encoded client private key.\n      DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n      // PEM encoded client certificate chain.\n      DataSource cert_chain = 3;\n    }\n\n    // Local channel credentials. Only UDS is supported for now.\n    // See https://github.com/grpc/grpc/pull/15909.\n    message GoogleLocalCredentials {\n    }\n\n    // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call\n    // credential types.\n    message ChannelCredentials {\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        SslCredentials ssl_credentials = 1;\n\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_default = 2;\n\n        GoogleLocalCredentials local_credentials = 3;\n      }\n    }\n\n    // [#next-free-field: 8]\n    message CallCredentials {\n      message ServiceAccountJWTAccessCredentials {\n        string json_key = 1;\n\n        uint64 token_lifetime_seconds = 2;\n      }\n\n      message GoogleIAMCredentials {\n        string authorization_token = 1;\n\n        string authority_selector = 2;\n      }\n\n      message MetadataCredentialsFromPlugin {\n        string name = 1;\n\n        oneof config_type {\n          google.protobuf.Struct config = 2 [deprecated = true];\n\n          google.protobuf.Any typed_config = 3;\n        }\n      }\n\n      // Security token service configuration that allows Google gRPC to\n      // fetch security token from an OAuth 2.0 authorization server.\n      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and\n      // https://github.com/grpc/grpc/pull/19587.\n      // [#next-free-field: 10]\n      message StsService {\n        // URI of the token exchange service that handles token exchange requests.\n        // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by\n        // https://github.com/envoyproxy/protoc-gen-validate/issues/303]\n        string token_exchange_service_uri = 1;\n\n        // Location of the target service or resource where the client\n        // intends to use the requested security token.\n        string resource = 2;\n\n        // Logical name of the target service where the client intends to\n        // use the requested security token.\n        string audience = 3;\n\n        // The desired scope of the requested security token in the\n        // context of the service or resource where the token will be used.\n        string scope = 4;\n\n        // Type of the requested security token.\n        string requested_token_type = 5;\n\n        // The path of subject token, a security token that represents the\n        // identity of the party on behalf of whom the request is being made.\n        string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}];\n\n        // Type of the subject token.\n        string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}];\n\n        // The path of actor token, a security token that represents the identity\n        // of the acting party. The acting party is authorized to use the\n        // requested security token and act on behalf of the subject.\n        string actor_token_path = 8;\n\n        // Type of the actor token.\n        string actor_token_type = 9;\n      }\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        // Access token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.\n        string access_token = 1;\n\n        // Google Compute Engine credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_compute_engine = 2;\n\n        // Google refresh token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.\n        string google_refresh_token = 3;\n\n        // Service Account JWT Access credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.\n        ServiceAccountJWTAccessCredentials service_account_jwt_access = 4;\n\n        // Google IAM credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.\n        GoogleIAMCredentials google_iam = 5;\n\n        // Custom authenticator credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.\n        // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.\n        MetadataCredentialsFromPlugin from_plugin = 6;\n\n        // Custom security token service which implements OAuth 2.0 token exchange.\n        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16\n        // See https://github.com/grpc/grpc/pull/19587.\n        StsService sts_service = 7;\n      }\n    }\n\n    // The target URI when using the `Google C++ gRPC client\n    // <https://github.com/grpc/grpc>`_. SSL credentials will be supplied in\n    // :ref:`channel_credentials <envoy_api_field_core.GrpcService.GoogleGrpc.channel_credentials>`.\n    string target_uri = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    ChannelCredentials channel_credentials = 2;\n\n    // A set of call credentials that can be composed with `channel credentials\n    // <https://grpc.io/docs/guides/auth.html#credential-types>`_.\n    repeated CallCredentials call_credentials = 3;\n\n    // The human readable prefix to use when emitting statistics for the gRPC\n    // service.\n    //\n    // .. csv-table::\n    //    :header: Name, Type, Description\n    //    :widths: 1, 1, 2\n    //\n    //    streams_total, Counter, Total number of streams opened\n    //    streams_closed_<gRPC status code>, Counter, Total streams closed with <gRPC status code>\n    string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}];\n\n    // The name of the Google gRPC credentials factory to use. This must have been registered with\n    // Envoy. If this is empty, a default credentials factory will be used that sets up channel\n    // credentials based on other configuration parameters.\n    string credentials_factory_name = 5;\n\n    // Additional configuration for site-specific customizations of the Google\n    // gRPC library.\n    google.protobuf.Struct config = 6;\n  }\n\n  reserved 4;\n\n  oneof target_specifier {\n    option (validate.required) = true;\n\n    // Envoy's in-built gRPC client.\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    EnvoyGrpc envoy_grpc = 1;\n\n    // `Google C++ gRPC client <https://github.com/grpc/grpc>`_\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    GoogleGrpc google_grpc = 2;\n  }\n\n  // The timeout for the gRPC request. This is the timeout for a specific\n  // request.\n  google.protobuf.Duration timeout = 3;\n\n  // Additional metadata to include in streams initiated to the GrpcService.\n  // This can be used for scenarios in which additional ad hoc authorization\n  // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.\n  repeated HeaderValue initial_metadata = 5;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/event_service_config.proto\";\nimport \"envoy/type/http.proto\";\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health check]\n// * Health checking :ref:`architecture overview <arch_overview_health_checking>`.\n// * If health checking is configured for a cluster, additional statistics are emitted. They are\n//   documented :ref:`here <config_cluster_manager_cluster_stats>`.\n\n// Endpoint health status.\nenum HealthStatus {\n  // The health status is not known. This is interpreted by Envoy as *HEALTHY*.\n  UNKNOWN = 0;\n\n  // Healthy.\n  HEALTHY = 1;\n\n  // Unhealthy.\n  UNHEALTHY = 2;\n\n  // Connection draining in progress. E.g.,\n  // `<https://aws.amazon.com/blogs/aws/elb-connection-draining-remove-instances-from-service-with-care/>`_\n  // or\n  // `<https://cloud.google.com/compute/docs/load-balancing/enabling-connection-draining>`_.\n  // This is interpreted by Envoy as *UNHEALTHY*.\n  DRAINING = 3;\n\n  // Health check timed out. This is part of HDS and is interpreted by Envoy as\n  // *UNHEALTHY*.\n  TIMEOUT = 4;\n\n  // Degraded.\n  DEGRADED = 5;\n}\n\n// [#next-free-field: 23]\nmessage HealthCheck {\n  // Describes the encoding of the payload bytes in the payload.\n  message Payload {\n    oneof payload {\n      option (validate.required) = true;\n\n      // Hex encoded payload. E.g., \"000000FF\".\n      string text = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // [#not-implemented-hide:] Binary payload.\n      bytes binary = 2;\n    }\n  }\n\n  // [#next-free-field: 12]\n  message HttpHealthCheck {\n    // The value of the host header in the HTTP health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The host header can be customized for a specific endpoint by setting the\n    // :ref:`hostname <envoy_api_field_endpoint.Endpoint.HealthCheckConfig.hostname>` field.\n    string host = 1;\n\n    // Specifies the HTTP path that will be requested during health checking. For example\n    // */healthcheck*.\n    string path = 2 [(validate.rules).string = {min_bytes: 1}];\n\n    // [#not-implemented-hide:] HTTP specific payload.\n    Payload send = 3;\n\n    // [#not-implemented-hide:] HTTP specific response.\n    Payload receive = 4;\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    //\n    // .. attention::\n    //\n    //   This field has been deprecated in favor of `service_name_matcher` for better flexibility\n    //   over matching with service-cluster name.\n    string service_name = 5 [deprecated = true];\n\n    // Specifies a list of HTTP headers that should be added to each request that is sent to the\n    // health checked cluster. For more information, including details on header value syntax, see\n    // the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated HeaderValueOption request_headers_to_add = 6\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request that is sent to the\n    // health checked cluster.\n    repeated string request_headers_to_remove = 8;\n\n    // If set, health checks will be made using http/2.\n    // Deprecated, use :ref:`codec_client_type\n    // <envoy_api_field_core.HealthCheck.HttpHealthCheck.codec_client_type>` instead.\n    bool use_http2 = 7 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default\n    // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open\n    // semantics of :ref:`Int64Range <envoy_api_msg_type.Int64Range>`. The start and end of each\n    // range are required. Only statuses in the range [100, 600) are allowed.\n    repeated type.Int64Range expected_statuses = 9;\n\n    // Use specified application protocol for health checks.\n    type.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}];\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster using a :ref:`StringMatcher\n    // <envoy_api_msg_type.matcher.StringMatcher>`. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    type.matcher.StringMatcher service_name_matcher = 11;\n  }\n\n  message TcpHealthCheck {\n    // Empty payloads imply a connect-only health check.\n    Payload send = 1;\n\n    // When checking the response, “fuzzy” matching is performed such that each\n    // binary block must be found, and in the order specified, but not\n    // necessarily contiguous.\n    repeated Payload receive = 2;\n  }\n\n  message RedisHealthCheck {\n    // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n    // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n    // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n    // by setting the specified key to any value and waiting for traffic to drain.\n    string key = 1;\n  }\n\n  // `grpc.health.v1.Health\n  // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto>`_-based\n  // healthcheck. See `gRPC doc <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_\n  // for details.\n  message GrpcHealthCheck {\n    // An optional service name parameter which will be sent to gRPC service in\n    // `grpc.health.v1.HealthCheckRequest\n    // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto#L20>`_.\n    // message. See `gRPC health-checking overview\n    // <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_ for more information.\n    string service_name = 1;\n\n    // The value of the :authority header in the gRPC health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The authority header can be customized for a specific endpoint by setting\n    // the :ref:`hostname <envoy_api_field_endpoint.Endpoint.HealthCheckConfig.hostname>` field.\n    string authority = 2;\n  }\n\n  // Custom health check.\n  message CustomHealthCheck {\n    // The registered name of the custom health checker.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // A custom health checker specific configuration which depends on the custom health checker\n    // being instantiated. See :api:`envoy/config/health_checker` for reference.\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Health checks occur over the transport socket specified for the cluster. This implies that if a\n  // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.\n  //\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  message TlsOptions {\n    // Specifies the ALPN protocols for health check connections. This is useful if the\n    // corresponding upstream is using ALPN-based :ref:`FilterChainMatch\n    // <envoy_api_msg_listener.FilterChainMatch>` along with different protocols for health checks\n    // versus data connections. If empty, no ALPN protocols will be set on health check connections.\n    repeated string alpn_protocols = 1;\n  }\n\n  reserved 10;\n\n  // The time to wait for a health check response. If the timeout is reached the\n  // health check attempt will be considered a failure.\n  google.protobuf.Duration timeout = 1 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // The interval between health checks.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // An optional jitter amount in milliseconds. If specified, Envoy will start health\n  // checking after for a random time in ms between 0 and initial_jitter. This only\n  // applies to the first health check.\n  google.protobuf.Duration initial_jitter = 20;\n\n  // An optional jitter amount in milliseconds. If specified, during every\n  // interval Envoy will add interval_jitter to the wait time.\n  google.protobuf.Duration interval_jitter = 3;\n\n  // An optional jitter amount as a percentage of interval_ms. If specified,\n  // during every interval Envoy will add interval_ms *\n  // interval_jitter_percent / 100 to the wait time.\n  //\n  // If interval_jitter_ms and interval_jitter_percent are both set, both of\n  // them will be used to increase the wait time.\n  uint32 interval_jitter_percent = 18;\n\n  // The number of unhealthy health checks required before a host is marked\n  // unhealthy. Note that for *http* health checking if a host responds with 503\n  // this threshold is ignored and the host is considered unhealthy immediately.\n  google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}];\n\n  // The number of healthy health checks required before a host is marked\n  // healthy. Note that during startup, only a single successful health check is\n  // required to mark a host healthy.\n  google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Non-serving port for health checking.\n  google.protobuf.UInt32Value alt_port = 6;\n\n  // Reuse health check connection between health checks. Default is true.\n  google.protobuf.BoolValue reuse_connection = 7;\n\n  oneof health_checker {\n    option (validate.required) = true;\n\n    // HTTP health check.\n    HttpHealthCheck http_health_check = 8;\n\n    // TCP health check.\n    TcpHealthCheck tcp_health_check = 9;\n\n    // gRPC health check.\n    GrpcHealthCheck grpc_health_check = 11;\n\n    // Custom health check.\n    CustomHealthCheck custom_health_check = 13;\n  }\n\n  // The \"no traffic interval\" is a special health check interval that is used when a cluster has\n  // never had traffic routed to it. This lower interval allows cluster information to be kept up to\n  // date, without sending a potentially large amount of active health checking traffic for no\n  // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the\n  // standard health check interval that is defined. Note that this interval takes precedence over\n  // any other.\n  //\n  // The default value for \"no traffic interval\" is 60 seconds.\n  google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy interval\" is a health check interval that is used for hosts that are marked as\n  // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the\n  // standard health check interval that is defined.\n  //\n  // The default value for \"unhealthy interval\" is the same as \"interval\".\n  google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as unhealthy. For subsequent health checks\n  // Envoy will shift back to using either \"unhealthy interval\" if present or the standard health\n  // check interval that is defined.\n  //\n  // The default value for \"unhealthy edge interval\" is the same as \"unhealthy interval\".\n  google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}];\n\n  // The \"healthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as healthy. For subsequent health checks\n  // Envoy will shift back to using the standard health check interval that is defined.\n  //\n  // The default value for \"healthy edge interval\" is the same as the default interval.\n  google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];\n\n  // Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.\n  // If empty, no event log will be written.\n  string event_log_path = 17;\n\n  // [#not-implemented-hide:]\n  // The gRPC service for the health check event service.\n  // If empty, health check events won't be sent to a remote endpoint.\n  EventServiceConfig event_service = 22;\n\n  // If set to true, health check failure events will always be logged. If set to false, only the\n  // initial health check failure event will be logged.\n  // The default value is false.\n  bool always_log_health_check_failures = 19;\n\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  TlsOptions tls_options = 21;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/http_uri.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"HttpUriProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP Service URI ]\n\n// Envoy external URI descriptor\nmessage HttpUri {\n  // The HTTP server URI. It should be a full FQDN with protocol, host and path.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    uri: https://www.googleapis.com/oauth2/v1/certs\n  //\n  string uri = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Specify how `uri` is to be fetched. Today, this requires an explicit\n  // cluster, but in the future we may support dynamic cluster creation or\n  // inline DNS resolution. See `issue\n  // <https://github.com/envoyproxy/envoy/issues/1606>`_.\n  oneof http_upstream_type {\n    option (validate.required) = true;\n\n    // A cluster is created in the Envoy \"cluster_manager\" config\n    // section. This field specifies the cluster name.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    cluster: jwks_cluster\n    //\n    string cluster = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Sets the maximum duration in milliseconds that a response can take to arrive upon request.\n  google.protobuf.Duration timeout = 3 [(validate.rules).duration = {\n    required: true\n    gte {}\n  }];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"ProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Protocol options]\n\n// [#not-implemented-hide:]\nmessage TcpProtocolOptions {\n}\n\nmessage UpstreamHttpProtocolOptions {\n  // Set transport socket `SNI <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ for new\n  // upstream connections based on the downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  bool auto_sni = 1;\n\n  // Automatic validate upstream presented certificate for new upstream connections based on the\n  // downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  // This field is intended to set with `auto_sni` field.\n  bool auto_san_validation = 2;\n}\n\n// [#next-free-field: 6]\nmessage HttpProtocolOptions {\n  // Action to take when Envoy receives client request with header names containing underscore\n  // characters.\n  // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented\n  // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore\n  // characters.\n  enum HeadersWithUnderscoresAction {\n    // Allow headers with underscores. This is the default behavior.\n    ALLOW = 0;\n\n    // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests\n    // end with the stream reset. The \"httpN.requests_rejected_with_underscores_in_headers\" counter\n    // is incremented for each rejected request.\n    REJECT_REQUEST = 1;\n\n    // Drop the header with name containing underscores. The header is dropped before the filter chain is\n    // invoked and as such filters will not see dropped headers. The\n    // \"httpN.dropped_headers_with_underscores\" is incremented for each dropped header.\n    DROP_HEADER = 2;\n  }\n\n  // The idle timeout for connections. The idle timeout is defined as the\n  // period in which there are no active requests. When the\n  // idle timeout is reached the connection will be closed. If the connection is an HTTP/2\n  // downstream connection a drain sequence will occur prior to closing the connection, see\n  // :ref:`drain_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.drain_timeout>`.\n  // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.\n  // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 1;\n\n  // The maximum duration of a connection. The duration is defined as a period since a connection\n  // was established. If not set, there is no max duration. When max_connection_duration is reached\n  // the connection will be closed. Drain sequence will occur prior to closing the connection if\n  // if's applicable. See :ref:`drain_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.drain_timeout>`.\n  // Note: not implemented for upstream connections.\n  google.protobuf.Duration max_connection_duration = 3;\n\n  // The maximum number of headers. If unconfigured, the default\n  // maximum number of request headers allowed is 100. Requests that exceed this limit will receive\n  // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.\n  google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}];\n\n  // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be\n  // reset independent of any other timeouts. If not specified, this value is not set.\n  google.protobuf.Duration max_stream_duration = 4;\n\n  // Action to take when a client request with a header name containing underscore characters is received.\n  // If this setting is not specified, the value defaults to ALLOW.\n  // Note: upstream responses are not affected by this setting.\n  HeadersWithUnderscoresAction headers_with_underscores_action = 5;\n}\n\n// [#next-free-field: 6]\nmessage Http1ProtocolOptions {\n  message HeaderKeyFormat {\n    message ProperCaseWords {\n    }\n\n    oneof header_format {\n      option (validate.required) = true;\n\n      // Formats the header by proper casing words: the first character and any character following\n      // a special character will be capitalized if it's an alpha character. For example,\n      // \"content-type\" becomes \"Content-Type\", and \"foo$b#$are\" becomes \"Foo$B#$Are\".\n      // Note that while this results in most headers following conventional casing, certain headers\n      // are not covered. For example, the \"TE\" header will be formatted as \"Te\".\n      ProperCaseWords proper_case_words = 1;\n    }\n  }\n\n  // Handle HTTP requests with absolute URLs in the requests. These requests\n  // are generally sent by clients to forward/explicit proxies. This allows clients to configure\n  // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the\n  // *http_proxy* environment variable.\n  google.protobuf.BoolValue allow_absolute_url = 1;\n\n  // Handle incoming HTTP/1.0 and HTTP 0.9 requests.\n  // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1\n  // style connect logic, dechunking, and handling lack of client host iff\n  // *default_host_for_http_10* is configured.\n  bool accept_http_10 = 2;\n\n  // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as\n  // Envoy does not otherwise support HTTP/1.0 without a Host header.\n  // This is a no-op if *accept_http_10* is not true.\n  string default_host_for_http_10 = 3;\n\n  // Describes how the keys for response headers should be formatted. By default, all header keys\n  // are lower cased.\n  HeaderKeyFormat header_key_format = 4;\n\n  // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.\n  //\n  // .. attention::\n  //\n  //   Note that this only happens when Envoy is chunk encoding which occurs when:\n  //   - The request is HTTP/1.1.\n  //   - Is neither a HEAD only request nor a HTTP Upgrade.\n  //   - Not a response to a HEAD request.\n  //   - The content length header is not present.\n  bool enable_trailers = 5;\n}\n\n// [#next-free-field: 14]\nmessage Http2ProtocolOptions {\n  // Defines a parameter to be sent in the SETTINGS frame.\n  // See `RFC7540, sec. 6.5.1 <https://tools.ietf.org/html/rfc7540#section-6.5.1>`_ for details.\n  message SettingsParameter {\n    // The 16 bit parameter identifier.\n    google.protobuf.UInt32Value identifier = 1 [\n      (validate.rules).uint32 = {lte: 65536 gte: 1},\n      (validate.rules).message = {required: true}\n    ];\n\n    // The 32 bit parameter value.\n    google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_\n  // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values\n  // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header\n  // compression.\n  google.protobuf.UInt32Value hpack_table_size = 1;\n\n  // `Maximum concurrent streams <https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2>`_\n  // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)\n  // and defaults to 2147483647.\n  //\n  // For upstream connections, this also limits how many streams Envoy will initiate concurrently\n  // on a single connection. If the limit is reached, Envoy may queue requests or establish\n  // additional connections (as allowed per circuit breaker limits).\n  google.protobuf.UInt32Value max_concurrent_streams = 2\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 1}];\n\n  // `Initial stream-level flow-control window\n  // <https://httpwg.org/specs/rfc7540.html#rfc.section.6.9.2>`_ size. Valid values range from 65535\n  // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456\n  // (256 * 1024 * 1024).\n  //\n  // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default\n  // window size now, so it's also the minimum.\n  //\n  // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the\n  // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to\n  // stop the flow of data to the codec buffers.\n  google.protobuf.UInt32Value initial_stream_window_size = 3\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Similar to *initial_stream_window_size*, but for connection-level flow-control\n  // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.\n  google.protobuf.UInt32Value initial_connection_window_size = 4\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Allows proxying Websocket and other upgrades over H2 connect.\n  bool allow_connect = 5;\n\n  // [#not-implemented-hide:] Hiding until envoy has full metadata support.\n  // Still under implementation. DO NOT USE.\n  //\n  // Allows metadata. See [metadata\n  // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more\n  // information.\n  bool allow_metadata = 6;\n\n  // Limit the number of pending outbound downstream frames of all types (frames that are waiting to\n  // be written into the socket). Exceeding this limit triggers flood mitigation and connection is\n  // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due\n  // to flood mitigation. The default limit is 10000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,\n  // preventing high memory utilization when receiving continuous stream of these frames. Exceeding\n  // this limit triggers flood mitigation and connection is terminated. The\n  // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood\n  // mitigation. The default limit is 1000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an\n  // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but\n  // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``\n  // stat tracks the number of connections terminated due to flood mitigation.\n  // Setting this to 0 will terminate connection upon receiving first frame with an empty payload\n  // and no end stream flag. The default limit is 1.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9;\n\n  // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number\n  // of PRIORITY frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     max_inbound_priority_frames_per_stream * (1 + inbound_streams)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 100.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10;\n\n  // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number\n  // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     1 + 2 * (inbound_streams +\n  //              max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 10.\n  // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,\n  // but more complex implementations that try to estimate available bandwidth require at least 2.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11\n      [(validate.rules).uint32 = {gte: 1}];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  bool stream_error_on_invalid_http_messaging = 12;\n\n  // [#not-implemented-hide:]\n  // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:\n  //\n  // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by\n  // Envoy.\n  //\n  // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field\n  // 'allow_connect'.\n  //\n  // Note that custom parameters specified through this field can not also be set in the\n  // corresponding named parameters:\n  //\n  // .. code-block:: text\n  //\n  //   ID    Field Name\n  //   ----------------\n  //   0x1   hpack_table_size\n  //   0x3   max_concurrent_streams\n  //   0x4   initial_stream_window_size\n  //\n  // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies\n  // between custom parameters with the same identifier will trigger a failure.\n  //\n  // See `IANA HTTP/2 Settings\n  // <https://www.iana.org/assignments/http2-parameters/http2-parameters.xhtml#settings>`_ for\n  // standardized identifiers.\n  repeated SettingsParameter custom_settings_parameters = 13;\n}\n\n// [#not-implemented-hide:]\nmessage GrpcProtocolOptions {\n  Http2ProtocolOptions http2_protocol_options = 1;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/core/socket_option.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"SocketOptionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Socket Option ]\n\n// Generic socket option message. This would be used to set socket options that\n// might not exist in upstream kernels or precompiled Envoy binaries.\n// [#next-free-field: 7]\nmessage SocketOption {\n  enum SocketState {\n    // Socket options are applied after socket creation but before binding the socket to a port\n    STATE_PREBIND = 0;\n\n    // Socket options are applied after binding the socket to a port but before calling listen()\n    STATE_BOUND = 1;\n\n    // Socket options are applied after calling listen()\n    STATE_LISTENING = 2;\n  }\n\n  // An optional name to give this socket option for debugging, etc.\n  // Uniqueness is not required and no special meaning is assumed.\n  string description = 1;\n\n  // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP\n  int64 level = 2;\n\n  // The numeric name as passed to setsockopt\n  int64 name = 3;\n\n  oneof value {\n    option (validate.required) = true;\n\n    // Because many sockopts take an int value.\n    int64 int_value = 4;\n\n    // Otherwise it's a byte buffer.\n    bytes buf_value = 5;\n  }\n\n  // The state in which the option will be applied. When used in BindConfig\n  // STATE_PREBIND is currently the only valid value.\n  SocketState state = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"DiscoveryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.discovery.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common discovery API components]\n\n// A DiscoveryRequest requests a set of versioned resources of the same type for\n// a given Envoy node on some API.\n// [#next-free-field: 7]\nmessage DiscoveryRequest {\n  // The version_info provided in the request messages will be the version_info\n  // received with the most recent successfully processed response or empty on\n  // the first request. It is expected that no new request is sent after a\n  // response is received until the Envoy instance is ready to ACK/NACK the new\n  // configuration. ACK/NACK takes place by returning the new API config version\n  // as applied or the previous API config version respectively. Each type_url\n  // (see below) has an independent version associated with it.\n  string version_info = 1;\n\n  // The node making the request.\n  core.Node node = 2;\n\n  // List of resources to subscribe to, e.g. list of cluster names or a route\n  // configuration name. If this is empty, all resources for the API are\n  // returned. LDS/CDS may have empty resource_names, which will cause all\n  // resources for the Envoy instance to be returned. The LDS and CDS responses\n  // will then imply a number of resources that need to be fetched via EDS/RDS,\n  // which will be explicitly enumerated in resource_names.\n  repeated string resource_names = 3;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This is implicit\n  // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is\n  // required for ADS.\n  string type_url = 4;\n\n  // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above\n  // discussion on version_info and the DiscoveryResponse nonce comment. This\n  // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP,\n  // or 2) the client has not yet accepted an update in this xDS stream (unlike\n  // delta, where it is populated only for new explicit ACKs).\n  string response_nonce = 5;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details* provides the Envoy\n  // internal exception related to the failure. It is only intended for consumption during manual\n  // debugging, the string provided is not guaranteed to be stable across Envoy versions.\n  google.rpc.Status error_detail = 6;\n}\n\n// [#next-free-field: 7]\nmessage DiscoveryResponse {\n  // The version of the response data.\n  string version_info = 1;\n\n  // The response resources. These resources are typed and depend on the API being called.\n  repeated google.protobuf.Any resources = 2;\n\n  // [#not-implemented-hide:]\n  // Canary is used to support two Envoy command line flags:\n  //\n  // * --terminate-on-canary-transition-failure. When set, Envoy is able to\n  //   terminate if it detects that configuration is stuck at canary. Consider\n  //   this example sequence of updates:\n  //   - Management server applies a canary config successfully.\n  //   - Management server rolls back to a production config.\n  //   - Envoy rejects the new production config.\n  //   Since there is no sensible way to continue receiving configuration\n  //   updates, Envoy will then terminate and apply production config from a\n  //   clean slate.\n  // * --dry-run-canary. When set, a canary response will never be applied, only\n  //   validated via a dry run.\n  bool canary = 3;\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty).\n  string type_url = 4;\n\n  // For gRPC based subscriptions, the nonce provides a way to explicitly ack a\n  // specific DiscoveryResponse in a following DiscoveryRequest. Additional\n  // messages may have been sent by Envoy to the management server for the\n  // previous version on the stream prior to this DiscoveryResponse, that were\n  // unprocessed at response send time. The nonce allows the management server\n  // to ignore any further DiscoveryRequests for the previous version until a\n  // DiscoveryRequest bearing the nonce. The nonce is optional and is not\n  // required for non-stream based xDS implementations.\n  string nonce = 5;\n\n  // [#not-implemented-hide:]\n  // The control plane instance that sent the response.\n  core.ControlPlane control_plane = 6;\n}\n\n// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC\n// endpoint for Delta xDS.\n//\n// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full\n// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a\n// diff to the state of a xDS client.\n// In Delta XDS there are per-resource versions, which allow tracking state at\n// the resource granularity.\n// An xDS Delta session is always in the context of a gRPC bidirectional\n// stream. This allows the xDS server to keep track of the state of xDS clients\n// connected to it.\n//\n// In Delta xDS the nonce field is required and used to pair\n// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK.\n// Optionally, a response message level system_version_info is present for\n// debugging purposes only.\n//\n// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest\n// can be either or both of: [1] informing the server of what resources the\n// client has gained/lost interest in (using resource_names_subscribe and\n// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from\n// the server (using response_nonce, with presence of error_detail making it a NACK).\n// Additionally, the first message (for a given type_url) of a reconnected gRPC stream\n// has a third role: informing the server of the resources (and their versions)\n// that the client already possesses, using the initial_resource_versions field.\n//\n// As with state-of-the-world, when multiple resource types are multiplexed (ADS),\n// all requests/acknowledgments/updates are logically walled off by type_url:\n// a Cluster ACK exists in a completely separate world from a prior Route NACK.\n// In particular, initial_resource_versions being sent at the \"start\" of every\n// gRPC stream actually entails a message for each type_url, each with its own\n// initial_resource_versions.\n// [#next-free-field: 8]\nmessage DeltaDiscoveryRequest {\n  // The node making the request.\n  core.Node node = 1;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\".\n  string type_url = 2;\n\n  // DeltaDiscoveryRequests allow the client to add or remove individual\n  // resources to the set of tracked resources in the context of a stream.\n  // All resource names in the resource_names_subscribe list are added to the\n  // set of tracked resources and all resource names in the resource_names_unsubscribe\n  // list are removed from the set of tracked resources.\n  //\n  // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or\n  // resource_names_unsubscribe list simply means that no resources are to be\n  // added or removed to the resource list.\n  // *Like* state-of-the-world xDS, the server must send updates for all tracked\n  // resources, but can also send updates for resources the client has not subscribed to.\n  //\n  // NOTE: the server must respond with all resources listed in resource_names_subscribe,\n  // even if it believes the client has the most recent version of them. The reason:\n  // the client may have dropped them, but then regained interest before it had a chance\n  // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd.\n  //\n  // These two fields can be set in any DeltaDiscoveryRequest, including ACKs\n  // and initial_resource_versions.\n  //\n  // A list of Resource names to add to the list of tracked resources.\n  repeated string resource_names_subscribe = 3;\n\n  // A list of Resource names to remove from the list of tracked resources.\n  repeated string resource_names_unsubscribe = 4;\n\n  // Informs the server of the versions of the resources the xDS client knows of, to enable the\n  // client to continue the same logical xDS session even in the face of gRPC stream reconnection.\n  // It will not be populated: [1] in the very first stream of a session, since the client will\n  // not yet have any resources,  [2] in any message after the first in a stream (for a given\n  // type_url), since the server will already be correctly tracking the client's state.\n  // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.)\n  // The map's keys are names of xDS resources known to the xDS client.\n  // The map's values are opaque resource versions.\n  map<string, string> initial_resource_versions = 5;\n\n  // When the DeltaDiscoveryRequest is a ACK or NACK message in response\n  // to a previous DeltaDiscoveryResponse, the response_nonce must be the\n  // nonce in the DeltaDiscoveryResponse.\n  // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted.\n  string response_nonce = 6;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details*\n  // provides the Envoy internal exception related to the failure.\n  google.rpc.Status error_detail = 7;\n}\n\n// [#next-free-field: 7]\nmessage DeltaDiscoveryResponse {\n  // The version of the response data (used for debugging).\n  string system_version_info = 1;\n\n  // The response resources. These are typed resources, whose types must match\n  // the type_url field.\n  repeated Resource resources = 2;\n\n  // field id 3 IS available!\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty.\n  string type_url = 4;\n\n  // Resources names of resources that have be deleted and to be removed from the xDS Client.\n  // Removed resources for missing resources can be ignored.\n  repeated string removed_resources = 6;\n\n  // The nonce provides a way for DeltaDiscoveryRequests to uniquely\n  // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required.\n  string nonce = 5;\n}\n\nmessage Resource {\n  // The resource's name, to distinguish it from others of the same type of resource.\n  string name = 3;\n\n  // The aliases are a list of other names that this resource can go by.\n  repeated string aliases = 4;\n\n  // The resource level version. It allows xDS to track the state of individual\n  // resources.\n  string version = 1;\n\n  // The resource being tracked.\n  google.protobuf.Any resource = 2;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/eds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/endpoint.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"EdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: EDS]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\nservice EndpointDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.ClusterLoadAssignment\";\n\n  // The resource_names field in DiscoveryRequest specifies a list of clusters\n  // to subscribe to updates for.\n  rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaEndpoints(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:endpoints\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage EdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/api/v2/endpoint/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/endpoint/endpoint.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.endpoint;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/endpoint/endpoint_components.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.endpoint\";\noption java_outer_classname = \"EndpointProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "api/envoy/api/v2/endpoint/endpoint_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.endpoint;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.endpoint\";\noption java_outer_classname = \"EndpointComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Endpoints]\n\n// Upstream host identifier.\nmessage Endpoint {\n  // The optional health check configuration.\n  message HealthCheckConfig {\n    // Optional alternative health check port value.\n    //\n    // By default the health check address port of an upstream host is the same\n    // as the host's serving address port. This provides an alternative health\n    // check port. Setting this with a non-zero value allows an upstream host\n    // to have different health check address port.\n    uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}];\n\n    // By default, the host header for L7 health checks is controlled by cluster level configuration\n    // (see: :ref:`host <envoy_api_field_core.HealthCheck.HttpHealthCheck.host>` and\n    // :ref:`authority <envoy_api_field_core.HealthCheck.GrpcHealthCheck.authority>`). Setting this\n    // to a non-empty value allows overriding the cluster level configuration for a specific\n    // endpoint.\n    string hostname = 2;\n  }\n\n  // The upstream host address.\n  //\n  // .. attention::\n  //\n  //   The form of host address depends on the given cluster type. For STATIC or EDS,\n  //   it is expected to be a direct IP address (or something resolvable by the\n  //   specified :ref:`resolver <envoy_api_field_core.SocketAddress.resolver_name>`\n  //   in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname,\n  //   and will be resolved via DNS.\n  core.Address address = 1;\n\n  // The optional health check configuration is used as configuration for the\n  // health checker to contact the health checked host.\n  //\n  // .. attention::\n  //\n  //   This takes into effect only for upstream clusters with\n  //   :ref:`active health checking <arch_overview_health_checking>` enabled.\n  HealthCheckConfig health_check_config = 2;\n\n  // The hostname associated with this endpoint. This hostname is not used for routing or address\n  // resolution. If provided, it will be associated with the endpoint, and can be used for features\n  // that require a hostname, like\n  // :ref:`auto_host_rewrite <envoy_api_field_route.RouteAction.auto_host_rewrite>`.\n  string hostname = 3;\n}\n\n// An Endpoint that Envoy can route traffic to.\n// [#next-free-field: 6]\nmessage LbEndpoint {\n  // Upstream host identifier or a named reference.\n  oneof host_identifier {\n    Endpoint endpoint = 1;\n\n    // [#not-implemented-hide:]\n    string endpoint_name = 5;\n  }\n\n  // Optional health status when known and supplied by EDS server.\n  core.HealthStatus health_status = 2;\n\n  // The endpoint metadata specifies values that may be used by the load\n  // balancer to select endpoints in a cluster for a given request. The filter\n  // name should be specified as *envoy.lb*. An example boolean key-value pair\n  // is *canary*, providing the optional canary status of the upstream host.\n  // This may be matched against in a route's\n  // :ref:`RouteAction <envoy_api_msg_route.RouteAction>` metadata_match field\n  // to subset the endpoints considered in cluster load balancing.\n  core.Metadata metadata = 3;\n\n  // The optional load balancing weight of the upstream host; at least 1.\n  // Envoy uses the load balancing weight in some of the built in load\n  // balancers. The load balancing weight for an endpoint is divided by the sum\n  // of the weights of all endpoints in the endpoint's locality to produce a\n  // percentage of traffic for the endpoint. This percentage is then further\n  // weighted by the endpoint's locality's load balancing weight from\n  // LocalityLbEndpoints. If unspecified, each host is presumed to have equal\n  // weight in a locality. The sum of the weights of all endpoints in the\n  // endpoint's locality must not exceed uint32_t maximal value (4294967295).\n  google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}];\n}\n\n// A group of endpoints belonging to a Locality.\n// One can have multiple LocalityLbEndpoints for a locality, but this is\n// generally only done if the different groups need to have different load\n// balancing weights or different priorities.\n// [#next-free-field: 7]\nmessage LocalityLbEndpoints {\n  // Identifies location of where the upstream hosts run.\n  core.Locality locality = 1;\n\n  // The group of endpoints belonging to the locality specified.\n  repeated LbEndpoint lb_endpoints = 2;\n\n  // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load\n  // balancing weight for a locality is divided by the sum of the weights of all\n  // localities  at the same priority level to produce the effective percentage\n  // of traffic for the locality. The sum of the weights of all localities at\n  // the same priority level must not exceed uint32_t maximal value (4294967295).\n  //\n  // Locality weights are only considered when :ref:`locality weighted load\n  // balancing <arch_overview_load_balancing_locality_weighted_lb>` is\n  // configured. These weights are ignored otherwise. If no weights are\n  // specified when locality weighted load balancing is enabled, the locality is\n  // assigned no load.\n  google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional: the priority for this LocalityLbEndpoints. If unspecified this will\n  // default to the highest priority (0).\n  //\n  // Under usual circumstances, Envoy will only select endpoints for the highest\n  // priority (0). In the event all endpoints for a particular priority are\n  // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the\n  // next highest priority group.\n  //\n  // Priorities should range from 0 (highest) to N (lowest) without skipping.\n  uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}];\n\n  // Optional: Per locality proximity value which indicates how close this\n  // locality is from the source locality. This value only provides ordering\n  // information (lower the value, closer it is to the source locality).\n  // This will be consumed by load balancing schemes that need proximity order\n  // to determine where to route the requests.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value proximity = 6;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/endpoint/load_report.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.endpoint;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.endpoint\";\noption java_outer_classname = \"LoadReportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// These are stats Envoy reports to GLB every so often. Report frequency is\n// defined by\n// :ref:`LoadStatsResponse.load_reporting_interval<envoy_api_field_service.load_stats.v2.LoadStatsResponse.load_reporting_interval>`.\n// Stats per upstream region/zone and optionally per subzone.\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\n// [#next-free-field: 9]\nmessage UpstreamLocalityStats {\n  // Name of zone, region and optionally endpoint group these metrics were\n  // collected from. Zone and region names could be empty if unknown.\n  core.Locality locality = 1;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint,\n  // aggregated over all endpoints in the locality.\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued by this Envoy since\n  // the last report. This information is aggregated over all the\n  // upstream endpoints in the locality.\n  uint64 total_issued_requests = 8;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n\n  // Endpoint granularity stats information for this locality. This information\n  // is populated if the Server requests it by setting\n  // :ref:`LoadStatsResponse.report_endpoint_granularity<envoy_api_field_service.load_stats.v2.LoadStatsResponse.report_endpoint_granularity>`.\n  repeated UpstreamEndpointStats upstream_endpoint_stats = 7;\n\n  // [#not-implemented-hide:] The priority of the endpoint group these metrics\n  // were collected from.\n  uint32 priority = 6;\n}\n\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\n// [#next-free-field: 8]\nmessage UpstreamEndpointStats {\n  // Upstream host address.\n  core.Address address = 1;\n\n  // Opaque and implementation dependent metadata of the\n  // endpoint. Envoy will pass this directly to the management server.\n  google.protobuf.Struct metadata = 6;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality. These include non-5xx responses for HTTP, where errors\n  // originate at the client and the endpoint responded successfully. For gRPC,\n  // the grpc-status values are those not covered by total_error_requests below.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests for this endpoint.\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint.\n  // For HTTP these are responses with 5xx status codes and for gRPC the\n  // grpc-status values:\n  //\n  //   - DeadlineExceeded\n  //   - Unimplemented\n  //   - Internal\n  //   - Unavailable\n  //   - Unknown\n  //   - DataLoss\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued to this endpoint\n  // since the last report. A single TCP connection, HTTP or gRPC\n  // request or stream is counted as one request.\n  uint64 total_issued_requests = 7;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n}\n\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\nmessage EndpointLoadMetricStats {\n  // Name of the metric; may be empty.\n  string metric_name = 1;\n\n  // Number of calls that finished and included this metric.\n  uint64 num_requests_finished_with_metric = 2;\n\n  // Sum of metric values across all calls that finished with this metric for\n  // load_reporting_interval.\n  double total_metric_value = 3;\n}\n\n// Per cluster load stats. Envoy reports these stats a management server in a\n// :ref:`LoadStatsRequest<envoy_api_msg_service.load_stats.v2.LoadStatsRequest>`\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\n// Next ID: 7\n// [#next-free-field: 7]\nmessage ClusterStats {\n  message DroppedRequests {\n    // Identifier for the policy specifying the drop.\n    string category = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Total number of deliberately dropped requests for the category.\n    uint64 dropped_count = 2;\n  }\n\n  // The name of the cluster.\n  string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The eds_cluster_config service_name of the cluster.\n  // It's possible that two clusters send the same service_name to EDS,\n  // in that case, the management server is supposed to do aggregation on the load reports.\n  string cluster_service_name = 6;\n\n  // Need at least one.\n  repeated UpstreamLocalityStats upstream_locality_stats = 2\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // Cluster-level stats such as total_successful_requests may be computed by\n  // summing upstream_locality_stats. In addition, below there are additional\n  // cluster-wide stats.\n  //\n  // The total number of dropped requests. This covers requests\n  // deliberately dropped by the drop_overload policy and circuit breaking.\n  uint64 total_dropped_requests = 3;\n\n  // Information about deliberately dropped requests for each category specified\n  // in the DropOverload policy.\n  repeated DroppedRequests dropped_requests = 5;\n\n  // Period over which the actual load report occurred. This will be guaranteed to include every\n  // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy\n  // and the *LoadStatsResponse* message sent from the management server, this may be longer than\n  // the requested load reporting interval in the *LoadStatsResponse*.\n  google.protobuf.Duration load_report_interval = 4;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/endpoint.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/endpoint/endpoint_components.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"EndpointProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Endpoint configuration]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\n// Each route from RDS will map to a single cluster or traffic split across\n// clusters using weights expressed in the RDS WeightedCluster.\n//\n// With EDS, each cluster is treated independently from a LB perspective, with\n// LB taking place between the Localities within a cluster and at a finer\n// granularity between the hosts within a locality. The percentage of traffic\n// for each endpoint is determined by both its load_balancing_weight, and the\n// load_balancing_weight of its locality. First, a locality will be selected,\n// then an endpoint within that locality will be chose based on its weight.\n// [#next-free-field: 6]\nmessage ClusterLoadAssignment {\n  // Load balancing policy settings.\n  // [#next-free-field: 6]\n  message Policy {\n    // [#not-implemented-hide:]\n    message DropOverload {\n      // Identifier for the policy specifying the drop.\n      string category = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // Percentage of traffic that should be dropped for the category.\n      type.FractionalPercent drop_percentage = 2;\n    }\n\n    reserved 1;\n\n    // Action to trim the overall incoming traffic to protect the upstream\n    // hosts. This action allows protection in case the hosts are unable to\n    // recover from an outage, or unable to autoscale or unable to handle\n    // incoming traffic volume for any reason.\n    //\n    // At the client each category is applied one after the other to generate\n    // the 'actual' drop percentage on all outgoing traffic. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"drop_overloads\": [\n    //      { \"category\": \"throttle\", \"drop_percentage\": 60 }\n    //      { \"category\": \"lb\", \"drop_percentage\": 50 }\n    //  ]}\n    //\n    // The actual drop percentages applied to the traffic at the clients will be\n    //    \"throttle\"_drop = 60%\n    //    \"lb\"_drop = 20%  // 50% of the remaining 'actual' load, which is 40%.\n    //    actual_outgoing_load = 20% // remaining after applying all categories.\n    // [#not-implemented-hide:]\n    repeated DropOverload drop_overloads = 2;\n\n    // Priority levels and localities are considered overprovisioned with this\n    // factor (in percentage). This means that we don't consider a priority\n    // level or locality unhealthy until the percentage of healthy hosts\n    // multiplied by the overprovisioning factor drops below 100.\n    // With the default value 140(1.4), Envoy doesn't consider a priority level\n    // or a locality unhealthy until their percentage of healthy hosts drops\n    // below 72%. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"overprovisioning_factor\": 100 }\n    //\n    // Read more at :ref:`priority levels <arch_overview_load_balancing_priority_levels>` and\n    // :ref:`localities <arch_overview_load_balancing_locality_weighted_lb>`.\n    google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}];\n\n    // The max time until which the endpoints from this assignment can be used.\n    // If no new assignments are received before this time expires the endpoints\n    // are considered stale and should be marked unhealthy.\n    // Defaults to 0 which means endpoints never go stale.\n    google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}];\n\n    // The flag to disable overprovisioning. If it is set to true,\n    // :ref:`overprovisioning factor\n    // <arch_overview_load_balancing_overprovisioning_factor>` will be ignored\n    // and Envoy will not perform graceful failover between priority levels or\n    // localities as endpoints become unhealthy. Otherwise Envoy will perform\n    // graceful failover as :ref:`overprovisioning factor\n    // <arch_overview_load_balancing_overprovisioning_factor>` suggests.\n    // [#not-implemented-hide:]\n    bool disable_overprovisioning = 5 [deprecated = true];\n  }\n\n  // Name of the cluster. This will be the :ref:`service_name\n  // <envoy_api_field_Cluster.EdsClusterConfig.service_name>` value if specified\n  // in the cluster :ref:`EdsClusterConfig\n  // <envoy_api_msg_Cluster.EdsClusterConfig>`.\n  string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // List of endpoints to load balance to.\n  repeated endpoint.LocalityLbEndpoints endpoints = 2;\n\n  // Map of named endpoints that can be referenced in LocalityLbEndpoints.\n  // [#not-implemented-hide:]\n  map<string, endpoint.Endpoint> named_endpoints = 5;\n\n  // Load balancing policy settings.\n  Policy policy = 4;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/lds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/listener.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"LdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listener]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// The Envoy instance initiates an RPC at startup to discover a list of\n// listeners. Updates are delivered via streaming from the LDS server and\n// consist of a complete update of all listeners. Existing connections will be\n// allowed to drain from listeners that are no longer present.\nservice ListenerDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.Listener\";\n\n  rpc DeltaListeners(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:listeners\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage LdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/api/v2/listener/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/listener/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/listener/listener_components.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\n"
  },
  {
    "path": "api/envoy/api/v2/listener/listener_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"envoy/api/v2/auth/tls.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"ListenerComponentsProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listener components]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage Filter {\n  reserved 3;\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// Specifies the match criteria for selecting a specific filter chain for a\n// listener.\n//\n// In order for a filter chain to be selected, *ALL* of its criteria must be\n// fulfilled by the incoming connection, properties of which are set by the\n// networking stack and/or listener filters.\n//\n// The following order applies:\n//\n// 1. Destination port.\n// 2. Destination IP address.\n// 3. Server name (e.g. SNI for TLS protocol),\n// 4. Transport protocol.\n// 5. Application protocols (e.g. ALPN for TLS protocol).\n// 6. Source type (e.g. any, local or external network).\n// 7. Source IP address.\n// 8. Source port.\n//\n// For criteria that allow ranges or wildcards, the most specific value in any\n// of the configured filter chains that matches the incoming connection is going\n// to be used (e.g. for SNI ``www.example.com`` the most specific match would be\n// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter\n// chain without ``server_names`` requirements).\n//\n// [#comment: Implemented rules are kept in the preference order, with deprecated fields\n// listed at the end, because that's how we want to list them in the docs.\n//\n// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]\n// [#next-free-field: 13]\nmessage FilterChainMatch {\n  enum ConnectionSourceType {\n    // Any connection source matches.\n    ANY = 0;\n\n    // Match a connection originating from the same host.\n    LOCAL = 1 [(udpa.annotations.enum_value_migrate).rename = \"SAME_IP_OR_LOOPBACK\"];\n\n    // Match a connection originating from a different host.\n    EXTERNAL = 2;\n  }\n\n  reserved 1;\n\n  // Optional destination port to consider when use_original_dst is set on the\n  // listener in determining a filter chain match.\n  google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}];\n\n  // If non-empty, an IP address and prefix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  repeated core.CidrRange prefix_ranges = 3;\n\n  // If non-empty, an IP address and suffix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  // [#not-implemented-hide:]\n  string address_suffix = 4;\n\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value suffix_len = 5;\n\n  // Specifies the connection source IP match type. Can be any, local or external network.\n  ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}];\n\n  // The criteria is satisfied if the source IP address of the downstream\n  // connection is contained in at least one of the specified subnets. If the\n  // parameter is not specified or the list is empty, the source IP address is\n  // ignored.\n  repeated core.CidrRange source_prefix_ranges = 6;\n\n  // The criteria is satisfied if the source port of the downstream connection\n  // is contained in at least one of the specified ports. If the parameter is\n  // not specified, the source port is ignored.\n  repeated uint32 source_ports = 7\n      [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}];\n\n  // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining\n  // a filter chain match. Those values will be compared against the server names of a new\n  // connection, when detected by one of the listener filters.\n  //\n  // The server name will be matched against all wildcard domains, i.e. ``www.example.com``\n  // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.\n  //\n  // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.\n  //\n  // .. attention::\n  //\n  //   See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more\n  //   information.\n  repeated string server_names = 11;\n\n  // If non-empty, a transport protocol to consider when determining a filter chain match.\n  // This value will be compared against the transport protocol of a new connection, when\n  // it's detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``raw_buffer`` - default, used when no transport protocol is detected,\n  // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //   when TLS protocol is detected.\n  string transport_protocol = 9;\n\n  // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when\n  // determining a filter chain match. Those values will be compared against the application\n  // protocols of a new connection, when detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector\n  //   <config_listener_filters_tls_inspector>`,\n  // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //\n  // .. attention::\n  //\n  //   Currently, only :ref:`TLS Inspector <config_listener_filters_tls_inspector>` provides\n  //   application protocol detection based on the requested\n  //   `ALPN <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ values.\n  //\n  //   However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet,\n  //   and matching on values other than ``h2`` is going to lead to a lot of false negatives,\n  //   unless all connecting clients are known to use ALPN.\n  repeated string application_protocols = 10;\n}\n\n// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and\n// various other parameters.\n// [#next-free-field: 8]\nmessage FilterChain {\n  // The criteria to use when matching a connection to this filter chain.\n  FilterChainMatch filter_chain_match = 1;\n\n  // The TLS context for this filter chain.\n  //\n  // .. attention::\n  //\n  //   **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are\n  //   set, `transport_socket` takes priority.\n  auth.DownstreamTlsContext tls_context = 2 [deprecated = true];\n\n  // A list of individual network filters that make up the filter chain for\n  // connections established with the listener. Order matters as the filters are\n  // processed sequentially as connection events happen. Note: If the filter\n  // list is empty, the connection will close by default.\n  repeated Filter filters = 3;\n\n  // Whether the listener should expect a PROXY protocol V1 header on new\n  // connections. If this option is enabled, the listener will assume that that\n  // remote address of the connection is the one specified in the header. Some\n  // load balancers including the AWS ELB support this option. If the option is\n  // absent or set to false, Envoy will use the physical peer address of the\n  // connection as the remote address.\n  google.protobuf.BoolValue use_proxy_proto = 4;\n\n  // [#not-implemented-hide:] filter chain metadata.\n  core.Metadata metadata = 5;\n\n  // Optional custom transport socket implementation to use for downstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`DownstreamTlsContext <envoy_api_msg_auth.DownstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.TransportSocket transport_socket = 6;\n\n  // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no\n  // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter\n  // chain is to be dynamically updated or removed via FCDS a unique name must be provided.\n  string name = 7;\n}\n\n// Listener filter chain match configuration. This is a recursive structure which allows complex\n// nested match configurations to be built using various logical operators.\n//\n// Examples:\n//\n// * Matches if the destination port is 3306.\n//\n// .. code-block:: yaml\n//\n//  destination_port_range:\n//   start: 3306\n//   end: 3307\n//\n// * Matches if the destination port is 3306 or 15000.\n//\n// .. code-block:: yaml\n//\n//  or_match:\n//    rules:\n//      - destination_port_range:\n//          start: 3306\n//          end: 3306\n//      - destination_port_range:\n//          start: 15000\n//          end: 15001\n//\n// [#next-free-field: 6]\nmessage ListenerFilterChainMatchPredicate {\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    // The list of rules that make up the set.\n    repeated ListenerFilterChainMatchPredicate rules = 1\n        [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    ListenerFilterChainMatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // Match destination port. Particularly, the match evaluation must use the recovered local port if\n    // the owning listener filter is after :ref:`an original_dst listener filter <config_listener_filters_original_dst>`.\n    type.Int32Range destination_port_range = 5;\n  }\n}\n\nmessage ListenerFilter {\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_listener_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated.\n  // See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Optional match predicate used to disable the filter. The filter is enabled when this field is empty.\n  // See :ref:`ListenerFilterChainMatchPredicate <envoy_api_msg_listener.ListenerFilterChainMatchPredicate>`\n  // for further examples.\n  ListenerFilterChainMatchPredicate filter_disabled = 4;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/listener/quic_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"QuicConfigProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: QUIC listener Config]\n\n// Configuration specific to the QUIC protocol.\n// Next id: 4\nmessage QuicProtocolOptions {\n  // Maximum number of streams that the client can negotiate per connection. 100\n  // if not specified.\n  google.protobuf.UInt32Value max_concurrent_streams = 1;\n\n  // Maximum number of milliseconds that connection will be alive when there is\n  // no network activity. 300000ms if not specified.\n  google.protobuf.Duration idle_timeout = 2;\n\n  // Connection timeout in milliseconds before the crypto handshake is finished.\n  // 20000ms if not specified.\n  google.protobuf.Duration crypto_handshake_timeout = 3;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/listener/udp_listener_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"UdpListenerConfigProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: UDP Listener Config]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage UdpListenerConfig {\n  // Used to look up UDP listener factory, matches \"raw_udp_listener\" or\n  // \"quic_listener\" to create a specific udp listener.\n  // If not specified, treat as \"raw_udp_listener\".\n  string udp_listener_name = 1;\n\n  // Used to create a specific listener factory. To some factory, e.g.\n  // \"raw_udp_listener\", config is not needed.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ActiveRawUdpListenerConfig {\n}\n"
  },
  {
    "path": "api/envoy/api/v2/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/socket_option.proto\";\nimport \"envoy/api/v2/listener/listener_components.proto\";\nimport \"envoy/api/v2/listener/udp_listener_config.proto\";\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\nimport \"envoy/config/listener/v2/api_listener.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listener configuration]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// [#next-free-field: 23]\nmessage Listener {\n  enum DrainType {\n    // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check\n    // filter), listener removal/modification, and hot restart.\n    DEFAULT = 0;\n\n    // Drain in response to listener removal/modification and hot restart. This setting does not\n    // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress\n    // and egress listeners.\n    MODIFY_ONLY = 1;\n  }\n\n  // [#not-implemented-hide:]\n  message DeprecatedV1 {\n    // Whether the listener should bind to the port. A listener that doesn't\n    // bind can only receive connections redirected from other listeners that\n    // set use_original_dst parameter to true. Default is true.\n    //\n    // This is deprecated in v2, all Listeners will bind to their port. An\n    // additional filter chain must be created for every original destination\n    // port this listener may redirect to in v2, with the original port\n    // specified in the FilterChainMatch destination_port field.\n    //\n    // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.]\n    google.protobuf.BoolValue bind_to_port = 1;\n  }\n\n  // Configuration for listener connection balancing.\n  message ConnectionBalanceConfig {\n    // A connection balancer implementation that does exact balancing. This means that a lock is\n    // held during balancing so that connection counts are nearly exactly balanced between worker\n    // threads. This is \"nearly\" exact in the sense that a connection might close in parallel thus\n    // making the counts incorrect, but this should be rectified on the next accept. This balancer\n    // sacrifices accept throughput for accuracy and should be used when there are a small number of\n    // connections that rarely cycle (e.g., service mesh gRPC egress).\n    message ExactBalance {\n    }\n\n    oneof balance_type {\n      option (validate.required) = true;\n\n      // If specified, the listener will use the exact connection balancer.\n      ExactBalance exact_balance = 1;\n    }\n  }\n\n  reserved 14;\n\n  // The unique name by which this listener is known. If no name is provided,\n  // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically\n  // updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.\n  string name = 1;\n\n  // The address that the listener should listen on. In general, the address must be unique, though\n  // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on\n  // Linux as the actual port will be allocated by the OS.\n  core.Address address = 2 [(validate.rules).message = {required: true}];\n\n  // A list of filter chains to consider for this listener. The\n  // :ref:`FilterChain <envoy_api_msg_listener.FilterChain>` with the most specific\n  // :ref:`FilterChainMatch <envoy_api_msg_listener.FilterChainMatch>` criteria is used on a\n  // connection.\n  //\n  // Example using SNI for filter chain selection can be found in the\n  // :ref:`FAQ entry <faq_how_to_setup_sni>`.\n  repeated listener.FilterChain filter_chains = 3;\n\n  // If a connection is redirected using *iptables*, the port on which the proxy\n  // receives it might be different from the original destination address. When this flag is set to\n  // true, the listener hands off redirected connections to the listener associated with the\n  // original destination address. If there is no listener associated with the original destination\n  // address, the connection is handled by the listener that receives it. Defaults to false.\n  //\n  // .. attention::\n  //\n  //   This field is deprecated. Use :ref:`an original_dst <config_listener_filters_original_dst>`\n  //   :ref:`listener filter <envoy_api_field_Listener.listener_filters>` instead.\n  //\n  //   Note that hand off to another listener is *NOT* performed without this flag. Once\n  //   :ref:`FilterChainMatch <envoy_api_msg_listener.FilterChainMatch>` is implemented this flag\n  //   will be removed, as filter chain matching can be used to select a filter chain based on the\n  //   restored destination address.\n  google.protobuf.BoolValue use_original_dst = 4 [deprecated = true];\n\n  // Soft limit on size of the listener’s new connection read and write buffers.\n  // If unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5;\n\n  // Listener metadata.\n  core.Metadata metadata = 6;\n\n  // [#not-implemented-hide:]\n  DeprecatedV1 deprecated_v1 = 7;\n\n  // The type of draining to perform at a listener-wide level.\n  DrainType drain_type = 8;\n\n  // Listener filters have the opportunity to manipulate and augment the connection metadata that\n  // is used in connection filter chain matching, for example. These filters are run before any in\n  // :ref:`filter_chains <envoy_api_field_Listener.filter_chains>`. Order matters as the\n  // filters are processed sequentially right after a socket has been accepted by the listener, and\n  // before a connection is created.\n  // UDP Listener filters can be specified when the protocol in the listener socket address in\n  // :ref:`protocol <envoy_api_field_core.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_core.SocketAddress.Protocol.UDP>`.\n  // UDP listeners currently support a single filter.\n  repeated listener.ListenerFilter listener_filters = 9;\n\n  // The timeout to wait for all listener filters to complete operation. If the timeout is reached,\n  // the accepted socket is closed without a connection being created unless\n  // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the\n  // timeout. If not specified, a default timeout of 15s is used.\n  google.protobuf.Duration listener_filters_timeout = 15;\n\n  // Whether a connection should be created when listener filters timeout. Default is false.\n  //\n  // .. attention::\n  //\n  //   Some listener filters, such as :ref:`Proxy Protocol filter\n  //   <config_listener_filters_proxy_protocol>`, should not be used with this option. It will cause\n  //   unexpected behavior when a connection is created.\n  bool continue_on_listener_filters_timeout = 17;\n\n  // Whether the listener should be set as a transparent socket.\n  // When this flag is set to true, connections can be redirected to the listener using an\n  // *iptables* *TPROXY* target, in which case the original source and destination addresses and\n  // ports are preserved on accepted connections. This flag should be used in combination with\n  // :ref:`an original_dst <config_listener_filters_original_dst>` :ref:`listener filter\n  // <envoy_api_field_Listener.listener_filters>` to mark the connections' local addresses as\n  // \"restored.\" This can be used to hand off each redirected connection to another listener\n  // associated with the connection's destination address. Direct connections to the socket without\n  // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are\n  // therefore treated as if they were redirected.\n  // When this flag is set to false, the listener's socket is explicitly reset as non-transparent.\n  // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability.\n  // When this flag is not set (default), the socket is not modified, i.e. the transparent option\n  // is neither set nor reset.\n  google.protobuf.BoolValue transparent = 10;\n\n  // Whether the listener should set the *IP_FREEBIND* socket option. When this\n  // flag is set to true, listeners can be bound to an IP address that is not\n  // configured on the system running Envoy. When this flag is set to false, the\n  // option *IP_FREEBIND* is disabled on the socket. When this flag is not set\n  // (default), the socket is not modified, i.e. the option is neither enabled\n  // nor disabled.\n  google.protobuf.BoolValue freebind = 11;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.SocketOption socket_options = 13;\n\n  // Whether the listener should accept TCP Fast Open (TFO) connections.\n  // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on\n  // the socket, with a queue length of the specified size\n  // (see `details in RFC7413 <https://tools.ietf.org/html/rfc7413#section-5.1>`_).\n  // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket.\n  // When this flag is not set (default), the socket is not modified,\n  // i.e. the option is neither enabled nor disabled.\n  //\n  // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable\n  // TCP_FASTOPEN.\n  // See `ip-sysctl.txt <https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt>`_.\n  //\n  // On macOS, only values of 0, 1, and unset are valid; other values may result in an error.\n  // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter.\n  google.protobuf.UInt32Value tcp_fast_open_queue_length = 12;\n\n  // Specifies the intended direction of the traffic relative to the local Envoy.\n  core.TrafficDirection traffic_direction = 16;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_core.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_core.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // listener to create, i.e. :ref:`udp_listener_name\n  // <envoy_api_field_listener.UdpListenerConfig.udp_listener_name>` = \"raw_udp_listener\" for\n  // creating a packet-oriented UDP listener. If not present, treat it as \"raw_udp_listener\".\n  listener.UdpListenerConfig udp_listener_config = 18;\n\n  // Used to represent an API listener, which is used in non-proxy clients. The type of API\n  // exposed to the non-proxy application depends on the type of API listener.\n  // When this field is set, no other field except for :ref:`name<envoy_api_field_Listener.name>`\n  // should be set.\n  //\n  // .. note::\n  //\n  //  Currently only one ApiListener can be installed; and it can only be done via bootstrap config,\n  //  not LDS.\n  //\n  // [#next-major-version: In the v3 API, instead of this messy approach where the socket\n  // listener fields are directly in the top-level Listener message and the API listener types\n  // are in the ApiListener message, the socket listener messages should be in their own message,\n  // and the top-level Listener should essentially be a oneof that selects between the\n  // socket listener and the various types of API listener. That way, a given Listener message\n  // can structurally only contain the fields of the relevant type.]\n  config.listener.v2.ApiListener api_listener = 19;\n\n  // The listener's connection balancer configuration, currently only applicable to TCP listeners.\n  // If no configuration is specified, Envoy will not attempt to balance active connections between\n  // worker threads.\n  ConnectionBalanceConfig connection_balance_config = 20;\n\n  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and\n  // create one socket for each worker thread. This makes inbound connections\n  // distribute among worker threads roughly evenly in cases where there are a high number\n  // of connections. When this flag is set to false, all worker threads share one socket.\n  //\n  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart\n  // (see `3rd paragraph in 'soreuseport' commit message\n  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).\n  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket\n  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.\n  bool reuse_port = 21;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by this listener.\n  repeated config.filter.accesslog.v2.AccessLog access_log = 22;\n}\n"
  },
  {
    "path": "api/envoy/api/v2/ratelimit/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/ratelimit/ratelimit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.ratelimit;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.ratelimit\";\noption java_outer_classname = \"RatelimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.common.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common rate limit components]\n\n// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to\n// determine the final rate limit key and overall allowed limit. Here are some examples of how\n// they might be used for the domain \"envoy\".\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The\n// configuration supplies a default limit for the *remote_address* key. If there is a desire to\n// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the\n// configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if\n// configured that way in the service).\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits unauthenticated traffic to a specific path for a specific IP address.\n// Like (1) we can raise/block specific IP addresses if we want with an override configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"]\n//\n// What it does: Limits all traffic for an authenticated client \"foo\"\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits traffic to a specific path for an authenticated client \"foo\"\n//\n// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired.\n// This enables building complex application scenarios with a generic backend.\nmessage RateLimitDescriptor {\n  message Entry {\n    // Descriptor key.\n    string key = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Descriptor value.\n    string value = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Descriptor entries.\n  repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/rds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/route.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"RdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: RDS]\n\n// The resource_names field in DiscoveryRequest specifies a route configuration.\n// This allows an Envoy configuration with multiple HTTP listeners (and\n// associated HTTP connection manager filters) to use different route\n// configurations. Each listener will bind its HTTP connection manager filter to\n// a route table via this identifier.\nservice RouteDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.RouteConfiguration\";\n\n  rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for\n// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered\n// during the processing of an HTTP request if a route for the request cannot be resolved. The\n// :ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>`\n// field contains a list of virtual host names or aliases to track. The contents of an alias would\n// be the contents of a *host* or *authority* header used to make an http request. An xDS server\n// will match an alias to a virtual host based on the content of :ref:`domains'\n// <envoy_api_field_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field\n// contains a list of virtual host names that have been :ref:`unsubscribed\n// <xds_protocol_unsubscribe>` from the routing table associated with the RouteConfiguration.\nservice VirtualHostDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.route.VirtualHost\";\n\n  rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage RdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/api/v2/route/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/api/v2/route/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.route;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/route/route_components.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.route\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "api/envoy/api/v2/route/route_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.route;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/regex.proto\";\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/percent.proto\";\nimport \"envoy/type/range.proto\";\nimport \"envoy/type/tracing/v2/custom_tag.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.route\";\noption java_outer_classname = \"RouteComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP route components]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// The top level element in the routing configuration is a virtual host. Each virtual host has\n// a logical name as well as a set of domains that get routed to it based on the incoming request's\n// host header. This allows a single listener to service multiple top level domain path trees. Once\n// a virtual host is selected based on the domain, the routes are processed in order to see which\n// upstream cluster to route to or whether to perform a redirect.\n// [#next-free-field: 21]\nmessage VirtualHost {\n  enum TlsRequirementType {\n    // No TLS requirement for the virtual host.\n    NONE = 0;\n\n    // External requests must use TLS. If a request is external and it is not\n    // using TLS, a 301 redirect will be sent telling the client to use HTTPS.\n    EXTERNAL_ONLY = 1;\n\n    // All requests must use TLS. If a request is not using TLS, a 301 redirect\n    // will be sent telling the client to use HTTPS.\n    ALL = 2;\n  }\n\n  reserved 9;\n\n  // The logical name of the virtual host. This is used when emitting certain\n  // statistics but is not relevant for routing.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // A list of domains (host/authority header) that will be matched to this\n  // virtual host. Wildcard hosts are supported in the suffix or prefix form.\n  //\n  // Domain search order:\n  //  1. Exact domain names: ``www.foo.com``.\n  //  2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``.\n  //  3. Prefix domain wildcards: ``foo.*`` or ``foo-*``.\n  //  4. Special wildcard ``*`` matching any domain.\n  //\n  // .. note::\n  //\n  //   The wildcard will not match the empty string.\n  //   e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``.\n  //   The longest wildcards match first.\n  //   Only a single virtual host in the entire route configuration can match on ``*``. A domain\n  //   must be unique across all virtual hosts or the config will fail to load.\n  //\n  // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE.\n  repeated string domains = 2 [(validate.rules).repeated = {\n    min_items: 1\n    items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}}\n  }];\n\n  // The list of routes that will be matched, in order, for incoming requests.\n  // The first route that matches will be used.\n  repeated Route routes = 3;\n\n  // Specifies the type of TLS enforcement the virtual host expects. If this option is not\n  // specified, there is no TLS requirement for the virtual host.\n  TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // A list of virtual clusters defined for this virtual host. Virtual clusters\n  // are used for additional statistics gathering.\n  repeated VirtualCluster virtual_clusters = 5;\n\n  // Specifies a set of rate limit configurations that will be applied to the\n  // virtual host.\n  repeated RateLimit rate_limits = 6;\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption request_headers_to_add = 7\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // handled by this virtual host.\n  repeated string request_headers_to_remove = 13;\n\n  // Specifies a list of HTTP headers that should be added to each response\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // handled by this virtual host.\n  repeated string response_headers_to_remove = 11;\n\n  // Indicates that the virtual host has a CORS policy.\n  CorsPolicy cors = 8;\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Struct> per_filter_config = 12 [deprecated = true];\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 15;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the upstream request. Setting this option will cause it to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the upstream\n  // will see the attempt count as perceived by the second Envoy. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_config.filter.http.router.v2.Router.suppress_envoy_headers>` flag.\n  //\n  // [#next-major-version: rename to include_attempt_count_in_request.]\n  bool include_request_attempt_count = 14;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the downstream response. Setting this option will cause the router to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the downstream\n  // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_config.filter.http.router.v2.Router.suppress_envoy_headers>` flag.\n  bool include_attempt_count_in_response = 19;\n\n  // Indicates the retry policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  RetryPolicy retry_policy = 16;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that setting a route level entry\n  // will take precedence over this config and it'll be treated independently (e.g.: values are not\n  // inherited). :ref:`Retry policy <envoy_api_field_route.VirtualHost.retry_policy>` should not be\n  // set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 20;\n\n  // Indicates the hedge policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  HedgePolicy hedge_policy = 17;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum\n  // value of this and the listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18;\n}\n\n// A filter-defined action type.\nmessage FilterAction {\n  google.protobuf.Any action = 1;\n}\n\n// A route is both a specification of how to match a request as well as an indication of what to do\n// next (e.g., redirect, forward, rewrite, etc.).\n//\n// .. attention::\n//\n//   Envoy supports routing on HTTP method via :ref:`header matching\n//   <envoy_api_msg_route.HeaderMatcher>`.\n// [#next-free-field: 18]\nmessage Route {\n  reserved 6;\n\n  // Name for the route.\n  string name = 14;\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  oneof action {\n    option (validate.required) = true;\n\n    // Route request to some upstream cluster.\n    RouteAction route = 2;\n\n    // Return a redirect.\n    RedirectAction redirect = 3;\n\n    // Return an arbitrary HTTP response directly, without proxying.\n    DirectResponseAction direct_response = 7;\n\n    // [#not-implemented-hide:]\n    // If true, a filter will define the action (e.g., it could dynamically generate the\n    // RouteAction).\n    FilterAction filter_action = 17;\n  }\n\n  // The Metadata field can be used to provide additional information\n  // about the route. It can be used for configuration, stats, and logging.\n  // The metadata should go under the filter namespace that will need it.\n  // For instance, if the metadata is intended for the Router filter,\n  // the filter name should be specified as *envoy.filters.http.router*.\n  core.Metadata metadata = 4;\n\n  // Decorator for the matched route.\n  Decorator decorator = 5;\n\n  // The per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Struct> per_filter_config = 8 [deprecated = true];\n\n  // The typed_per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 13;\n\n  // Specifies a set of headers that will be added to requests matching this\n  // route. Headers specified at this level are applied before headers from the\n  // enclosing :ref:`envoy_api_msg_route.VirtualHost` and\n  // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption request_headers_to_add = 9\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // matching this route.\n  repeated string request_headers_to_remove = 12;\n\n  // Specifies a set of headers that will be added to responses to requests\n  // matching this route. Headers specified at this level are applied before\n  // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and\n  // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on\n  // :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // to requests matching this route.\n  repeated string response_headers_to_remove = 11;\n\n  // Presence of the object defines whether the connection manager's tracing configuration\n  // is overridden by this route specific instance.\n  Tracing tracing = 15;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set, the bytes actually buffered will be the minimum value of this and the\n  // listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16;\n}\n\n// Compared to the :ref:`cluster <envoy_api_field_route.RouteAction.cluster>` field that specifies a\n// single upstream cluster as the target of a request, the :ref:`weighted_clusters\n// <envoy_api_field_route.RouteAction.weighted_clusters>` option allows for specification of\n// multiple upstream clusters along with weights that indicate the percentage of\n// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the\n// weights.\nmessage WeightedCluster {\n  // [#next-free-field: 11]\n  message ClusterWeight {\n    reserved 7;\n\n    // Name of the upstream cluster. The cluster must exist in the\n    // :ref:`cluster manager configuration <config_cluster_manager>`.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // An integer between 0 and :ref:`total_weight\n    // <envoy_api_field_route.WeightedCluster.total_weight>`. When a request matches the route,\n    // the choice of an upstream cluster is determined by its weight. The sum of weights across all\n    // entries in the clusters array must add up to the total_weight, which defaults to 100.\n    google.protobuf.UInt32Value weight = 2;\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field will be considered for\n    // load balancing. Note that this will be merged with what's provided in\n    // :ref:`RouteAction.metadata_match <envoy_api_field_route.RouteAction.metadata_match>`, with\n    // values here taking precedence. The filter name should be specified as *envoy.lb*.\n    core.Metadata metadata_match = 3;\n\n    // Specifies a list of headers to be added to requests when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and\n    // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.HeaderValueOption request_headers_to_add = 4\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request when\n    // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    repeated string request_headers_to_remove = 9;\n\n    // Specifies a list of headers to be added to responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and\n    // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.HeaderValueOption response_headers_to_add = 5\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of headers to be removed from responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    repeated string response_headers_to_remove = 6;\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Struct> per_filter_config = 8 [deprecated = true];\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Any> typed_per_filter_config = 10;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Specifies the total weight across all clusters. The sum of all cluster weights must equal this\n  // value, which must be greater than 0. Defaults to 100.\n  google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies the runtime key prefix that should be used to construct the\n  // runtime keys associated with each cluster. When the *runtime_key_prefix* is\n  // specified, the router will look for weights associated with each upstream\n  // cluster under the key *runtime_key_prefix* + \".\" + *cluster[i].name* where\n  // *cluster[i]* denotes an entry in the clusters array field. If the runtime\n  // key for the cluster does not exist, the value specified in the\n  // configuration file will be used as the default weight. See the :ref:`runtime documentation\n  // <operations_runtime>` for how key names map to the underlying implementation.\n  string runtime_key_prefix = 2;\n}\n\n// [#next-free-field: 12]\nmessage RouteMatch {\n  message GrpcRouteMatchOptions {\n  }\n\n  message TlsContextMatchOptions {\n    // If specified, the route will match against whether or not a certificate is presented.\n    // If not specified, certificate presentation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue presented = 1;\n\n    // If specified, the route will match against whether or not a certificate is validated.\n    // If not specified, certificate validation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue validated = 2;\n  }\n\n  reserved 5;\n\n  oneof path_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route is a prefix rule meaning that the prefix must\n    // match the beginning of the *:path* header.\n    string prefix = 1;\n\n    // If specified, the route is an exact path rule meaning that the path must\n    // exactly match the *:path* header once the query string is removed.\n    string path = 2;\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex. The regex grammar is defined `here\n    // <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n    //\n    // Examples:\n    //\n    // * The regex ``/b[io]t`` matches the path */bit*\n    // * The regex ``/b[io]t`` matches the path */bot*\n    // * The regex ``/b[io]t`` does not match the path */bite*\n    // * The regex ``/b[io]t`` does not match the path */bit/bot*\n    //\n    // .. attention::\n    //   This field has been deprecated in favor of `safe_regex` as it is not safe for use with\n    //   untrusted input in all cases.\n    string regex = 3 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex.\n    //\n    // [#next-major-version: In the v3 API we should redo how path specification works such\n    // that we utilize StringMatcher, and additionally have consistent options around whether we\n    // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive\n    // to deprecate the existing options. We should even consider whether we want to do away with\n    // path_specifier entirely and just rely on a set of header matchers which can already match\n    // on :path, etc. The issue with that is it is unclear how to generically deal with query string\n    // stripping. This needs more thought.]\n    type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];\n  }\n\n  // Indicates that prefix/path matching should be case sensitive. The default\n  // is true.\n  google.protobuf.BoolValue case_sensitive = 4;\n\n  // Indicates that the route should additionally match on a runtime key. Every time the route\n  // is considered for a match, it must also fall under the percentage of matches indicated by\n  // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the\n  // number is <= the value of the numerator N, or if the key is not present, the default\n  // value, the router continues to evaluate the remaining match criteria. A runtime_fraction\n  // route configuration can be used to roll out route changes in a gradual manner without full\n  // code/config deploys. Refer to the :ref:`traffic shifting\n  // <config_http_conn_man_route_table_traffic_splitting_shift>` docs for additional documentation.\n  //\n  // .. note::\n  //\n  //    Parsing this field is implemented such that the runtime key's data may be represented\n  //    as a FractionalPercent proto represented as JSON/YAML and may also be represented as an\n  //    integer with the assumption that the value is an integral percentage out of 100. For\n  //    instance, a runtime key lookup returning the value \"42\" would parse as a FractionalPercent\n  //    whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics.\n  core.RuntimeFractionalPercent runtime_fraction = 9;\n\n  // Specifies a set of headers that the route should match on. The router will\n  // check the request’s headers against all the specified headers in the route\n  // config. A match will happen if all the headers in the route are present in\n  // the request with the same values (or based on presence if the value field\n  // is not in the config).\n  repeated HeaderMatcher headers = 6;\n\n  // Specifies a set of URL query parameters on which the route should\n  // match. The router will check the query string from the *path* header\n  // against all the specified query parameters. If the number of specified\n  // query parameters is nonzero, they all must match the *path* header's\n  // query string for a match to occur.\n  repeated QueryParameterMatcher query_parameters = 7;\n\n  // If specified, only gRPC requests will be matched. The router will check\n  // that the content-type header has a application/grpc or one of the various\n  // application/grpc+ values.\n  GrpcRouteMatchOptions grpc = 8;\n\n  // If specified, the client tls context will be matched against the defined\n  // match options.\n  //\n  // [#next-major-version: unify with RBAC]\n  TlsContextMatchOptions tls_context = 11;\n}\n\n// [#next-free-field: 12]\nmessage CorsPolicy {\n  // Specifies the origins that will be allowed to do CORS requests.\n  //\n  // An origin is allowed if either allow_origin or allow_origin_regex match.\n  //\n  // .. attention::\n  //  This field has been deprecated in favor of `allow_origin_string_match`.\n  repeated string allow_origin = 1\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Specifies regex patterns that match allowed origins.\n  //\n  // An origin is allowed if either allow_origin or allow_origin_regex match.\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for\n  //   use with untrusted input in all cases.\n  repeated string allow_origin_regex = 8\n      [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}];\n\n  // Specifies string patterns that match allowed origins. An origin is allowed if any of the\n  // string matchers match.\n  repeated type.matcher.StringMatcher allow_origin_string_match = 11;\n\n  // Specifies the content for the *access-control-allow-methods* header.\n  string allow_methods = 2;\n\n  // Specifies the content for the *access-control-allow-headers* header.\n  string allow_headers = 3;\n\n  // Specifies the content for the *access-control-expose-headers* header.\n  string expose_headers = 4;\n\n  // Specifies the content for the *access-control-max-age* header.\n  string max_age = 5;\n\n  // Specifies whether the resource allows credentials.\n  google.protobuf.BoolValue allow_credentials = 6;\n\n  oneof enabled_specifier {\n    // Specifies if the CORS filter is enabled. Defaults to true. Only effective on route.\n    //\n    // .. attention::\n    //\n    //   **This field is deprecated**. Set the\n    //   :ref:`filter_enabled<envoy_api_field_route.CorsPolicy.filter_enabled>` field instead.\n    google.protobuf.BoolValue enabled = 7\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // Specifies the % of requests for which the CORS filter is enabled.\n    //\n    // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS\n    // filter will be enabled for 100% of the requests.\n    //\n    // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is\n    // specified, Envoy will lookup the runtime key to get the percentage of requests to filter.\n    core.RuntimeFractionalPercent filter_enabled = 9;\n  }\n\n  // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not\n  // enforced.\n  //\n  // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those\n  // fields have to explicitly disable the filter in order for this setting to take effect.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* to determine if it's valid but will not enforce any policies.\n  core.RuntimeFractionalPercent shadow_enabled = 10;\n}\n\n// [#next-free-field: 34]\nmessage RouteAction {\n  enum ClusterNotFoundResponseCode {\n    // HTTP status code - 503 Service Unavailable.\n    SERVICE_UNAVAILABLE = 0;\n\n    // HTTP status code - 404 Not Found.\n    NOT_FOUND = 1;\n  }\n\n  // Configures :ref:`internal redirect <arch_overview_internal_redirects>` behavior.\n  enum InternalRedirectAction {\n    PASS_THROUGH_INTERNAL_REDIRECT = 0;\n    HANDLE_INTERNAL_REDIRECT = 1;\n  }\n\n  // The router is capable of shadowing traffic from one cluster to another. The current\n  // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n  // respond before returning the response from the primary cluster. All normal statistics are\n  // collected for the shadow cluster making this feature useful for testing.\n  //\n  // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is\n  // useful for logging. For example, *cluster1* becomes *cluster1-shadow*.\n  //\n  // .. note::\n  //\n  //   Shadowing will not be triggered if the primary cluster does not exist.\n  message RequestMirrorPolicy {\n    // Specifies the cluster that requests will be mirrored to. The cluster must\n    // exist in the cluster manager configuration.\n    string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // If not specified, all requests to the target cluster will be mirrored. If\n    // specified, Envoy will lookup the runtime key to get the % of requests to\n    // mirror. Valid values are from 0 to 10000, allowing for increments of\n    // 0.01% of requests to be mirrored. If the runtime key is specified in the\n    // configuration but not present in runtime, 0 is the default and thus 0% of\n    // requests will be mirrored.\n    //\n    // .. attention::\n    //\n    //   **This field is deprecated**. Set the\n    //   :ref:`runtime_fraction\n    //   <envoy_api_field_route.RouteAction.RequestMirrorPolicy.runtime_fraction>`\n    //   field instead. Mirroring occurs if both this and\n    //   <envoy_api_field_route.RouteAction.RequestMirrorPolicy.runtime_fraction>`\n    //   are not set.\n    string runtime_key = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // If not specified, all requests to the target cluster will be mirrored.\n    //\n    // If specified, this field takes precedence over the `runtime_key` field and requests must also\n    // fall under the percentage of matches indicated by this field.\n    //\n    // For some fraction N/D, a random number in the range [0,D) is selected. If the\n    // number is <= the value of the numerator N, or if the key is not present, the default\n    // value, the request will be mirrored.\n    core.RuntimeFractionalPercent runtime_fraction = 3;\n\n    // Determines if the trace span should be sampled. Defaults to true.\n    google.protobuf.BoolValue trace_sampled = 4;\n  }\n\n  // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer\n  // <arch_overview_load_balancing_types>`.\n  // [#next-free-field: 7]\n  message HashPolicy {\n    message Header {\n      // The name of the request header that will be used to obtain the hash\n      // key. If the request header is not present, no hash will be produced.\n      string header_name = 1 [\n        (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}\n      ];\n    }\n\n    // Envoy supports two types of cookie affinity:\n    //\n    // 1. Passive. Envoy takes a cookie that's present in the cookies header and\n    //    hashes on its value.\n    //\n    // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL)\n    //    on the first request from the client in its response to the client,\n    //    based on the endpoint the request gets sent to. The client then\n    //    presents this on the next and all subsequent requests. The hash of\n    //    this is sufficient to ensure these requests get sent to the same\n    //    endpoint. The cookie is generated by hashing the source and\n    //    destination ports and addresses so that multiple independent HTTP2\n    //    streams on the same connection will independently receive the same\n    //    cookie, even if they arrive at the Envoy simultaneously.\n    message Cookie {\n      // The name of the cookie that will be used to obtain the hash key. If the\n      // cookie is not present and ttl below is not set, no hash will be\n      // produced.\n      string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // If specified, a cookie with the TTL will be generated if the cookie is\n      // not present. If the TTL is present and zero, the generated cookie will\n      // be a session cookie.\n      google.protobuf.Duration ttl = 2;\n\n      // The name of the path for the cookie. If no path is specified here, no path\n      // will be set for the cookie.\n      string path = 3;\n    }\n\n    message ConnectionProperties {\n      // Hash on source IP address.\n      bool source_ip = 1;\n    }\n\n    message QueryParameter {\n      // The name of the URL query parameter that will be used to obtain the hash\n      // key. If the parameter is not present, no hash will be produced. Query\n      // parameter names are case-sensitive.\n      string name = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    message FilterState {\n      // The name of the Object in the per-request filterState, which is an\n      // Envoy::Http::Hashable object. If there is no data associated with the key,\n      // or the stored object is not Envoy::Http::Hashable, no hash will be produced.\n      string key = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // Header hash policy.\n      Header header = 1;\n\n      // Cookie hash policy.\n      Cookie cookie = 2;\n\n      // Connection properties hash policy.\n      ConnectionProperties connection_properties = 3;\n\n      // Query parameter hash policy.\n      QueryParameter query_parameter = 5;\n\n      // Filter state hash policy.\n      FilterState filter_state = 6;\n    }\n\n    // The flag that short-circuits the hash computing. This field provides a\n    // 'fallback' style of configuration: \"if a terminal policy doesn't work,\n    // fallback to rest of the policy list\", it saves time when the terminal\n    // policy works.\n    //\n    // If true, and there is already a hash computed, ignore rest of the\n    // list of hash polices.\n    // For example, if the following hash methods are configured:\n    //\n    //  ========= ========\n    //  specifier terminal\n    //  ========= ========\n    //  Header A  true\n    //  Header B  false\n    //  Header C  false\n    //  ========= ========\n    //\n    // The generateHash process ends if policy \"header A\" generates a hash, as\n    // it's a terminal policy.\n    bool terminal = 4;\n  }\n\n  // Allows enabling and disabling upgrades on a per-route basis.\n  // This overrides any enabled/disabled upgrade filter chain specified in the\n  // HttpConnectionManager\n  // :ref:`upgrade_configs\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.upgrade_configs>`\n  // but does not affect any custom filter chain specified there.\n  message UpgradeConfig {\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type] will be proxied upstream.\n    string upgrade_type = 1\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Determines if upgrades are available on this route. Defaults to true.\n    google.protobuf.BoolValue enabled = 2;\n  }\n\n  reserved 12, 18, 19, 16, 22, 21;\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // HTTP header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist, Envoy will\n    // return a 404 response.\n    //\n    // .. attention::\n    //\n    //   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1\n    //   *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n    string cluster_header = 2\n        [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster. See\n    // :ref:`traffic splitting <config_http_conn_man_route_table_traffic_splitting_split>`\n    // for additional documentation.\n    WeightedCluster weighted_clusters = 3;\n  }\n\n  // The HTTP status code to use when configured cluster is not found.\n  // The default response code is 503 Service Unavailable.\n  ClusterNotFoundResponseCode cluster_not_found_response_code = 20\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n  // in the upstream cluster with metadata matching what's set in this field will be considered\n  // for load balancing. If using :ref:`weighted_clusters\n  // <envoy_api_field_route.RouteAction.weighted_clusters>`, metadata will be merged, with values\n  // provided there taking precedence. The filter name should be specified as *envoy.lb*.\n  core.Metadata metadata_match = 4;\n\n  // Indicates that during forwarding, the matched prefix (or path) should be\n  // swapped with this value. This option allows application URLs to be rooted\n  // at a different path from those exposed at the reverse proxy layer. The router filter will\n  // place the original path before rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of *prefix_rewrite* or\n  // :ref:`regex_rewrite <envoy_api_field_route.RouteAction.regex_rewrite>`\n  // may be specified.\n  //\n  // .. attention::\n  //\n  //   Pay careful attention to the use of trailing slashes in the\n  //   :ref:`route's match <envoy_api_field_route.Route.match>` prefix value.\n  //   Stripping a prefix from a path requires multiple Routes to handle all cases. For example,\n  //   rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single\n  //   :ref:`Route <envoy_api_msg_route.Route>`, as shown by the below config entries:\n  //\n  //   .. code-block:: yaml\n  //\n  //     - match:\n  //         prefix: \"/prefix/\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //     - match:\n  //         prefix: \"/prefix\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //\n  //   Having above entries in the config, requests to */prefix* will be stripped to */*, while\n  //   requests to */prefix/etc* will be stripped to */etc*.\n  string prefix_rewrite = 5\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Indicates that during forwarding, portions of the path that match the\n  // pattern should be rewritten, even allowing the substitution of capture\n  // groups from the pattern into the new path as specified by the rewrite\n  // substitution string. This is useful to allow application paths to be\n  // rewritten in a way that is aware of segments with variable content like\n  // identifiers. The router filter will place the original path as it was\n  // before the rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of :ref:`prefix_rewrite <envoy_api_field_route.RouteAction.prefix_rewrite>`\n  // or *regex_rewrite* may be specified.\n  //\n  // Examples using Google's `RE2 <https://github.com/google/re2>`_ engine:\n  //\n  // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution\n  //   string of ``\\2/instance/\\1`` would transform ``/service/foo/v1/api``\n  //   into ``/v1/api/instance/foo``.\n  //\n  // * The pattern ``one`` paired with a substitution string of ``two`` would\n  //   transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``.\n  //\n  // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of\n  //   ``\\1two\\2`` would replace only the first occurrence of ``one``,\n  //   transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``.\n  //\n  // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/``\n  //   would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to\n  //   ``/aaa/yyy/bbb``.\n  type.matcher.RegexMatchAndSubstitute regex_rewrite = 32;\n\n  oneof host_rewrite_specifier {\n    // Indicates that during forwarding, the host header will be swapped with\n    // this value.\n    string host_rewrite = 6 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false},\n      (udpa.annotations.field_migrate).rename = \"host_rewrite_literal\"\n    ];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the hostname of the upstream host chosen by the cluster manager. This\n    // option is applicable only when the destination cluster for a route is of\n    // type *strict_dns* or *logical_dns*. Setting this to true with other cluster\n    // types has no effect.\n    google.protobuf.BoolValue auto_host_rewrite = 7;\n\n    // Indicates that during forwarding, the host header will be swapped with the content of given\n    // downstream or :ref:`custom <config_http_conn_man_headers_custom_request_headers>` header.\n    // If header value is empty, host header is left intact.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the potential security implications of using this option. Provided header\n    //   must come from trusted source.\n    string auto_host_rewrite_header = 29 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false},\n      (udpa.annotations.field_migrate).rename = \"host_rewrite_header\"\n    ];\n  }\n\n  // Specifies the upstream timeout for the route. If not specified, the default is 15s. This\n  // spans between the point at which the entire downstream request (i.e. end-of-stream) has been\n  // processed and when the upstream response has been completely processed. A value of 0 will\n  // disable the route's timeout.\n  //\n  // .. note::\n  //\n  //   This timeout includes all retries. See also\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //   :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration timeout = 8;\n\n  // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout,\n  // although the connection manager wide :ref:`stream_idle_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  // will still apply. A value of 0 will completely disable the route's idle timeout, even if a\n  // connection manager stream idle timeout is configured.\n  //\n  // The idle timeout is distinct to :ref:`timeout\n  // <envoy_api_field_route.RouteAction.timeout>`, which provides an upper bound\n  // on the upstream response time; :ref:`idle_timeout\n  // <envoy_api_field_route.RouteAction.idle_timeout>` instead bounds the amount\n  // of time the request's stream may be idle.\n  //\n  // After header decoding, the idle timeout will apply on downstream and\n  // upstream request events. Each time an encode/decode event for headers or\n  // data is processed for the stream, the timer will be reset. If the timeout\n  // fires, the stream is terminated with a 408 Request Timeout error code if no\n  // upstream response header has been received, otherwise a stream reset\n  // occurs.\n  google.protobuf.Duration idle_timeout = 24;\n\n  // Indicates that the route has a retry policy. Note that if this is set,\n  // it'll take precedence over the virtual host level retry policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  RetryPolicy retry_policy = 9;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that if this is set, it'll take\n  // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged,\n  // most internal one becomes the enforced policy). :ref:`Retry policy <envoy_api_field_route.VirtualHost.retry_policy>`\n  // should not be set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 33;\n\n  // Indicates that the route has a request mirroring policy.\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `request_mirror_policies` which supports one or\n  //   more mirroring policies.\n  RequestMirrorPolicy request_mirror_policy = 10 [deprecated = true];\n\n  // Indicates that the route has request mirroring policies.\n  repeated RequestMirrorPolicy request_mirror_policies = 30;\n\n  // Optionally specifies the :ref:`routing priority <arch_overview_http_routing_priority>`.\n  core.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies a set of rate limit configurations that could be applied to the\n  // route.\n  repeated RateLimit rate_limits = 13;\n\n  // Specifies if the rate limit filter should include the virtual host rate\n  // limits. By default, if the route configured rate limits, the virtual host\n  // :ref:`rate_limits <envoy_api_field_route.VirtualHost.rate_limits>` are not applied to the\n  // request.\n  google.protobuf.BoolValue include_vh_rate_limits = 14;\n\n  // Specifies a list of hash policies to use for ring hash load balancing. Each\n  // hash policy is evaluated individually and the combined result is used to\n  // route the request. The method of combination is deterministic such that\n  // identical lists of hash policies will produce the same hash. Since a hash\n  // policy examines specific parts of a request, it can fail to produce a hash\n  // (i.e. if the hashed header is not present). If (and only if) all configured\n  // hash policies fail to generate a hash, no hash will be produced for\n  // the route. In this case, the behavior is the same as if no hash policies\n  // were specified (i.e. the ring hash load balancer will choose a random\n  // backend). If a hash policy has the \"terminal\" attribute set to true, and\n  // there is already a hash generated, the hash is returned immediately,\n  // ignoring the rest of the hash policy list.\n  repeated HashPolicy hash_policy = 15;\n\n  // Indicates that the route has a CORS policy.\n  CorsPolicy cors = 17;\n\n  // If present, and the request is a gRPC request, use the\n  // `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_,\n  // or its default value (infinity) instead of\n  // :ref:`timeout <envoy_api_field_route.RouteAction.timeout>`, but limit the applied timeout\n  // to the maximum value specified here. If configured as 0, the maximum allowed timeout for\n  // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used\n  // and gRPC requests time out like any other requests using\n  // :ref:`timeout <envoy_api_field_route.RouteAction.timeout>` or its default.\n  // This can be used to prevent unexpected upstream request timeouts due to potentially long\n  // time gaps between gRPC request and response in gRPC streaming mode.\n  //\n  // .. note::\n  //\n  //    If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes\n  //    precedence over `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, when\n  //    both are present. See also\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //    :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration max_grpc_timeout = 23;\n\n  // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting\n  // the provided duration from the header. This is useful in allowing Envoy to set its global\n  // timeout to be less than that of the deadline imposed by the calling client, which makes it more\n  // likely that Envoy will handle the timeout instead of having the call canceled by the client.\n  // The offset will only be applied if the provided grpc_timeout is greater than the offset. This\n  // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning\n  // infinity).\n  google.protobuf.Duration grpc_timeout_offset = 28;\n\n  repeated UpgradeConfig upgrade_configs = 25;\n\n  InternalRedirectAction internal_redirect_action = 26;\n\n  // An internal redirect is handled, iff the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value, and\n  // :ref:`internal_redirect_action <envoy_api_field_route.RouteAction.internal_redirect_action>`\n  // is set to :ref:`HANDLE_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_route.RouteAction.InternalRedirectAction.HANDLE_INTERNAL_REDIRECT>`\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or has\n  // :ref:`internal_redirect_action <envoy_api_field_route.RouteAction.internal_redirect_action>`\n  // set to\n  // :ref:`PASS_THROUGH_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_route.RouteAction.InternalRedirectAction.PASS_THROUGH_INTERNAL_REDIRECT>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 31;\n\n  // Indicates that the route has a hedge policy. Note that if this is set,\n  // it'll take precedence over the virtual host level hedge policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  HedgePolicy hedge_policy = 27;\n}\n\n// HTTP retry :ref:`architecture overview <arch_overview_http_routing_retry>`.\n// [#next-free-field: 11]\nmessage RetryPolicy {\n  message RetryPriority {\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryHostPredicate {\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryBackOff {\n    // Specifies the base interval between retries. This parameter is required and must be greater\n    // than zero. Values less than 1 ms are rounded up to 1 ms.\n    // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's\n    // back-off algorithm.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // Specifies the maximum interval between retries. This parameter is optional, but must be\n    // greater than or equal to the `base_interval` if set. The default is 10 times the\n    // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion\n    // of Envoy's back-off algorithm.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Specifies the conditions under which retry takes place. These are the same\n  // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and\n  // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`.\n  string retry_on = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1. These are the same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-max-retries`.\n  google.protobuf.UInt32Value num_retries = 2;\n\n  // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The\n  // same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.\n  //\n  // .. note::\n  //\n  //   If left unspecified, Envoy will use the global\n  //   :ref:`route timeout <envoy_api_field_route.RouteAction.timeout>` for the request.\n  //   Consequently, when using a :ref:`5xx <config_http_filters_router_x-envoy-retry-on>` based\n  //   retry policy, a request that times out will not be retried as the total timeout budget\n  //   would have been exhausted.\n  google.protobuf.Duration per_try_timeout = 3;\n\n  // Specifies an implementation of a RetryPriority which is used to determine the\n  // distribution of load across priorities used for retries. Refer to\n  // :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more details.\n  RetryPriority retry_priority = 4;\n\n  // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host\n  // for retries. If any of the predicates reject the host, host selection will be reattempted.\n  // Refer to :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more\n  // details.\n  repeated RetryHostPredicate retry_host_predicate = 5;\n\n  // The maximum number of times host selection will be reattempted before giving up, at which\n  // point the host that was last selected will be routed to. If unspecified, this will default to\n  // retrying once.\n  int64 host_selection_retry_max_attempts = 6;\n\n  // HTTP status codes that should trigger a retry in addition to those specified by retry_on.\n  repeated uint32 retriable_status_codes = 7;\n\n  // Specifies parameters that control retry back off. This parameter is optional, in which case the\n  // default base interval is 25 milliseconds or, if set, the current value of the\n  // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times\n  // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries`\n  // describes Envoy's back-off algorithm.\n  RetryBackOff retry_back_off = 8;\n\n  // HTTP response headers that trigger a retry if present in the response. A retry will be\n  // triggered if any of the header matches match the upstream response headers.\n  // The field is only consulted if 'retriable-headers' retry policy is active.\n  repeated HeaderMatcher retriable_headers = 9;\n\n  // HTTP headers which must be present in the request for retries to be attempted.\n  repeated HeaderMatcher retriable_request_headers = 10;\n}\n\n// HTTP request hedging :ref:`architecture overview <arch_overview_http_routing_hedging>`.\nmessage HedgePolicy {\n  // Specifies the number of initial requests that should be sent upstream.\n  // Must be at least 1.\n  // Defaults to 1.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies a probability that an additional upstream request should be sent\n  // on top of what is specified by initial_requests.\n  // Defaults to 0.\n  // [#not-implemented-hide:]\n  type.FractionalPercent additional_request_chance = 2;\n\n  // Indicates that a hedged request should be sent when the per-try timeout\n  // is hit. This will only occur if the retry policy also indicates that a\n  // timed out request should be retried.\n  // Once a timed out request is retried due to per try timeout, the router\n  // filter will ensure that it is not retried again even if the returned\n  // response headers would otherwise be retried according the specified\n  // :ref:`RetryPolicy <envoy_api_msg_route.RetryPolicy>`.\n  // Defaults to false.\n  bool hedge_on_per_try_timeout = 3;\n}\n\n// [#next-free-field: 9]\nmessage RedirectAction {\n  enum RedirectResponseCode {\n    // Moved Permanently HTTP Status Code - 301.\n    MOVED_PERMANENTLY = 0;\n\n    // Found HTTP Status Code - 302.\n    FOUND = 1;\n\n    // See Other HTTP Status Code - 303.\n    SEE_OTHER = 2;\n\n    // Temporary Redirect HTTP Status Code - 307.\n    TEMPORARY_REDIRECT = 3;\n\n    // Permanent Redirect HTTP Status Code - 308.\n    PERMANENT_REDIRECT = 4;\n  }\n\n  // When the scheme redirection take place, the following rules apply:\n  //  1. If the source URI scheme is `http` and the port is explicitly\n  //     set to `:80`, the port will be removed after the redirection\n  //  2. If the source URI scheme is `https` and the port is explicitly\n  //     set to `:443`, the port will be removed after the redirection\n  oneof scheme_rewrite_specifier {\n    // The scheme portion of the URL will be swapped with \"https\".\n    bool https_redirect = 4;\n\n    // The scheme portion of the URL will be swapped with this value.\n    string scheme_redirect = 7;\n  }\n\n  // The host portion of the URL will be swapped with this value.\n  string host_redirect = 1\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The port value of the URL will be swapped with this value.\n  uint32 port_redirect = 8;\n\n  oneof path_rewrite_specifier {\n    // The path portion of the URL will be swapped with this value.\n    // Please note that query string in path_redirect will override the\n    // request's query string and will not be stripped.\n    //\n    // For example, let's say we have the following routes:\n    //\n    // - match: { path: \"/old-path-1\" }\n    //   redirect: { path_redirect: \"/new-path-1\" }\n    // - match: { path: \"/old-path-2\" }\n    //   redirect: { path_redirect: \"/new-path-2\", strip-query: \"true\" }\n    // - match: { path: \"/old-path-3\" }\n    //   redirect: { path_redirect: \"/new-path-3?foo=1\", strip_query: \"true\" }\n    //\n    // 1. if request uri is \"/old-path-1?bar=1\", users will be redirected to \"/new-path-1?bar=1\"\n    // 2. if request uri is \"/old-path-2?bar=1\", users will be redirected to \"/new-path-2\"\n    // 3. if request uri is \"/old-path-3?bar=1\", users will be redirected to \"/new-path-3?foo=1\"\n    string path_redirect = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during redirection, the matched prefix (or path)\n    // should be swapped with this value. This option allows redirect URLs be dynamically created\n    // based on the request.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the use of trailing slashes as mentioned in\n    //   :ref:`RouteAction's prefix_rewrite <envoy_api_field_route.RouteAction.prefix_rewrite>`.\n    string prefix_rewrite = 5\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // The HTTP status code to use in the redirect response. The default response\n  // code is MOVED_PERMANENTLY (301).\n  RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Indicates that during redirection, the query portion of the URL will\n  // be removed. Default value is false.\n  bool strip_query = 6;\n}\n\nmessage DirectResponseAction {\n  // Specifies the HTTP response status to be returned.\n  uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}];\n\n  // Specifies the content of the response body. If this setting is omitted,\n  // no body is included in the generated response.\n  //\n  // .. note::\n  //\n  //   Headers can be specified using *response_headers_to_add* in the enclosing\n  //   :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or\n  //   :ref:`envoy_api_msg_route.VirtualHost`.\n  core.DataSource body = 2;\n}\n\nmessage Decorator {\n  // The operation name associated with the request matched to this route. If tracing is\n  // enabled, this information will be used as the span name reported for this request.\n  //\n  // .. note::\n  //\n  //   For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden\n  //   by the :ref:`x-envoy-decorator-operation\n  //   <config_http_filters_router_x-envoy-decorator-operation>` header.\n  string operation = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Whether the decorated details should be propagated to the other party. The default is true.\n  google.protobuf.BoolValue propagate = 2;\n}\n\nmessage Tracing {\n  // Target percentage of requests managed by this HTTP connection manager that will be force\n  // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n  // header is set. This field is a direct analog for the runtime variable\n  // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n  // <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.FractionalPercent client_sampling = 1;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be randomly\n  // selected for trace generation, if not requested by the client or not forced. This field is\n  // a direct analog for the runtime variable 'tracing.random_sampling' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.FractionalPercent random_sampling = 2;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be traced\n  // after all other sampling checks have been applied (client-directed, force tracing, random\n  // sampling). This field functions as an upper limit on the total configured sampling rate. For\n  // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n  // of client requests with the appropriate headers to be force traced. This field is a direct\n  // analog for the runtime variable 'tracing.global_enabled' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.FractionalPercent overall_sampling = 3;\n\n  // A list of custom tags with unique tag name to create tags for the active span.\n  // It will take effect after merging with the :ref:`corresponding configuration\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing.custom_tags>`\n  // configured in the HTTP connection manager. If two tags with the same name are configured\n  // each in the HTTP connection manager and the route level, the one configured here takes\n  // priority.\n  repeated type.tracing.v2.CustomTag custom_tags = 4;\n}\n\n// A virtual cluster is a way of specifying a regex matching rule against\n// certain important endpoints such that statistics are generated explicitly for\n// the matched requests. The reason this is useful is that when doing\n// prefix/path matching Envoy does not always know what the application\n// considers to be an endpoint. Thus, it’s impossible for Envoy to generically\n// emit per endpoint statistics. However, often systems have highly critical\n// endpoints that they wish to get “perfect” statistics on. Virtual cluster\n// statistics are perfect in the sense that they are emitted on the downstream\n// side such that they include network level failures.\n//\n// Documentation for :ref:`virtual cluster statistics <config_http_filters_router_vcluster_stats>`.\n//\n// .. note::\n//\n//    Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for\n//    every application endpoint. This is both not easily maintainable and as well the matching and\n//    statistics output are not free.\nmessage VirtualCluster {\n  // Specifies a regex pattern to use for matching requests. The entire path of the request\n  // must match the regex. The regex grammar used is defined `here\n  // <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n  //\n  // Examples:\n  //\n  // * The regex ``/rides/\\d+`` matches the path */rides/0*\n  // * The regex ``/rides/\\d+`` matches the path */rides/123*\n  // * The regex ``/rides/\\d+`` does not match the path */rides/123/456*\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `headers` as it is not safe for use with\n  //   untrusted input in all cases.\n  string pattern = 1 [\n    deprecated = true,\n    (validate.rules).string = {max_bytes: 1024},\n    (envoy.annotations.disallowed_by_default) = true\n  ];\n\n  // Specifies a list of header matchers to use for matching requests. Each specified header must\n  // match. The pseudo-headers `:path` and `:method` can be used to match the request path and\n  // method, respectively.\n  repeated HeaderMatcher headers = 4;\n\n  // Specifies the name of the virtual cluster. The virtual cluster name as well\n  // as the virtual host name are used when emitting statistics. The statistics are emitted by the\n  // router filter and are documented :ref:`here <config_http_filters_router_stats>`.\n  string name = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Optionally specifies the HTTP method to match on. For example GET, PUT,\n  // etc.\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `headers`.\n  core.RequestMethod method = 3\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`.\nmessage RateLimit {\n  // [#next-free-field: 7]\n  message Action {\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"source_cluster\", \"<local service cluster>\")\n    //\n    // <local service cluster> is derived from the :option:`--service-cluster` option.\n    message SourceCluster {\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"destination_cluster\", \"<routed target cluster>\")\n    //\n    // Once a request matches against a route table rule, a routed cluster is determined by one of\n    // the following :ref:`route table configuration <envoy_api_msg_RouteConfiguration>`\n    // settings:\n    //\n    // * :ref:`cluster <envoy_api_field_route.RouteAction.cluster>` indicates the upstream cluster\n    //   to route to.\n    // * :ref:`weighted_clusters <envoy_api_field_route.RouteAction.weighted_clusters>`\n    //   chooses a cluster randomly from a set of clusters with attributed weight.\n    // * :ref:`cluster_header <envoy_api_field_route.RouteAction.cluster_header>` indicates which\n    //   header in the request contains the target cluster.\n    message DestinationCluster {\n    }\n\n    // The following descriptor entry is appended when a header contains a key that matches the\n    // *header_name*:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<header_value_queried_from_header>\")\n    message RequestHeaders {\n      // The header name to be queried from the request headers. The header’s\n      // value is used to populate the value of the descriptor entry for the\n      // descriptor_key.\n      string header_name = 1 [\n        (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}\n      ];\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    // The following descriptor entry is appended to the descriptor and is populated using the\n    // trusted address from :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"remote_address\", \"<trusted address from x-forwarded-for>\")\n    message RemoteAddress {\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"generic_key\", \"<descriptor_value>\")\n    message GenericKey {\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"header_match\", \"<descriptor_value>\")\n    message HeaderValueMatch {\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // If set to true, the action will append a descriptor entry when the\n      // request matches the headers. If set to false, the action will append a\n      // descriptor entry when the request does not match the headers. The\n      // default value is true.\n      google.protobuf.BoolValue expect_match = 2;\n\n      // Specifies a set of headers that the rate limit action should match\n      // on. The action will check the request’s headers against all the\n      // specified headers in the config. A match will happen if all the\n      // headers in the config are present in the request with the same values\n      // (or based on presence if the value field is not in the config).\n      repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    oneof action_specifier {\n      option (validate.required) = true;\n\n      // Rate limit on source cluster.\n      SourceCluster source_cluster = 1;\n\n      // Rate limit on destination cluster.\n      DestinationCluster destination_cluster = 2;\n\n      // Rate limit on request headers.\n      RequestHeaders request_headers = 3;\n\n      // Rate limit on remote address.\n      RemoteAddress remote_address = 4;\n\n      // Rate limit on a generic key.\n      GenericKey generic_key = 5;\n\n      // Rate limit on the existence of request headers.\n      HeaderValueMatch header_value_match = 6;\n    }\n  }\n\n  // Refers to the stage set in the filter. The rate limit configuration only\n  // applies to filters with the same stage number. The default stage number is\n  // 0.\n  //\n  // .. note::\n  //\n  //   The filter supports a range of 0 - 10 inclusively for stage numbers.\n  google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}];\n\n  // The key to be set in runtime to disable this rate limit configuration.\n  string disable_key = 2;\n\n  // A list of actions that are to be applied for this rate limit configuration.\n  // Order matters as the actions are processed sequentially and the descriptor\n  // is composed by appending descriptor entries in that sequence. If an action\n  // cannot append a descriptor entry, no descriptor is generated for the\n  // configuration. See :ref:`composing actions\n  // <config_http_filters_rate_limit_composing_actions>` for additional documentation.\n  repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// .. attention::\n//\n//   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host*\n//   header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n//\n// .. attention::\n//\n//   To route on HTTP method, use the special HTTP/2 *:method* header. This works for both\n//   HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g.,\n//\n//   .. code-block:: json\n//\n//     {\n//       \"name\": \":method\",\n//       \"exact_match\": \"POST\"\n//     }\n//\n// .. attention::\n//   In the absence of any header match specifier, match will default to :ref:`present_match\n//   <envoy_api_field_route.HeaderMatcher.present_match>`. i.e, a request that has the :ref:`name\n//   <envoy_api_field_route.HeaderMatcher.name>` header will match, regardless of the header's\n//   value.\n//\n//  [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.]\n// [#next-free-field: 12]\nmessage HeaderMatcher {\n  reserved 2, 3;\n\n  // Specifies the name of the header in the request.\n  string name = 1\n      [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Specifies how the header match will be performed to route the request.\n  oneof header_match_specifier {\n    // If specified, header match will be performed based on the value of the header.\n    string exact_match = 4;\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex. The regex grammar used in the value field is defined\n    // `here <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n    //\n    // Examples:\n    //\n    // * The regex ``\\d{3}`` matches the value *123*\n    // * The regex ``\\d{3}`` does not match the value *1234*\n    // * The regex ``\\d{3}`` does not match the value *123.456*\n    //\n    // .. attention::\n    //   This field has been deprecated in favor of `safe_regex_match` as it is not safe for use\n    //   with untrusted input in all cases.\n    string regex_match = 5 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex.\n    type.matcher.RegexMatcher safe_regex_match = 11;\n\n    // If specified, header match will be performed based on range.\n    // The rule will match if the request header value is within this range.\n    // The entire request header value must represent an integer in base 10 notation: consisting of\n    // an optional plus or minus sign followed by a sequence of digits. The rule will not match if\n    // the header value does not represent an integer. Match will fail for empty values, floating\n    // point numbers or if only a subsequence of the header value is an integer.\n    //\n    // Examples:\n    //\n    // * For range [-10,0), route will match for header value -1, but not for 0, \"somestring\", 10.9,\n    //   \"-1somestring\"\n    type.Int64Range range_match = 6;\n\n    // If specified, header match will be performed based on whether the header is in the\n    // request.\n    bool present_match = 7;\n\n    // If specified, header match will be performed based on the prefix of the header value.\n    // Note: empty prefix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.\n    string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}];\n\n    // If specified, header match will be performed based on the suffix of the header value.\n    // Note: empty suffix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.\n    string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // If specified, the match result will be inverted before checking. Defaults to false.\n  //\n  // Examples:\n  //\n  // * The regex ``\\d{3}`` does not match the value *1234*, so it will match when inverted.\n  // * The range [-10,0) will match the value -1, so it will not match when inverted.\n  bool invert_match = 8;\n}\n\n// Query parameter matching treats the query string of a request's :path header\n// as an ampersand-separated list of keys and/or key=value elements.\n// [#next-free-field: 7]\nmessage QueryParameterMatcher {\n  // Specifies the name of a key that must be present in the requested\n  // *path*'s query string.\n  string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}];\n\n  // Specifies the value of the key. If the value is absent, a request\n  // that contains the key in its query string will match, whether the\n  // key appears with a value (e.g., \"?debug=true\") or not (e.g., \"?debug\")\n  //\n  // ..attention::\n  //   This field is deprecated. Use an `exact` match inside the `string_match` field.\n  string value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Specifies whether the query parameter value is a regular expression.\n  // Defaults to false. The entire query parameter value (i.e., the part to\n  // the right of the equals sign in \"key=value\") must match the regex.\n  // E.g., the regex ``\\d+$`` will match *123* but not *a123* or *123a*.\n  //\n  // ..attention::\n  //   This field is deprecated. Use a `safe_regex` match inside the `string_match` field.\n  google.protobuf.BoolValue regex = 4\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  oneof query_parameter_match_specifier {\n    // Specifies whether a query parameter value should match against a string.\n    type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}];\n\n    // Specifies whether a query parameter should be present.\n    bool present_match = 6;\n  }\n}\n"
  },
  {
    "path": "api/envoy/api/v2/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP route configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// [#next-free-field: 11]\nmessage RouteConfiguration {\n  // The name of the route configuration. For example, it might match\n  // :ref:`route_config_name\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.Rds.route_config_name>` in\n  // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`.\n  string name = 1;\n\n  // An array of virtual hosts that make up the route table.\n  repeated route.VirtualHost virtual_hosts = 2;\n\n  // An array of virtual hosts will be dynamically loaded via the VHDS API.\n  // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used\n  // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for\n  // on-demand discovery of virtual hosts. The contents of these two fields will be merged to\n  // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration\n  // taking precedence.\n  Vhds vhds = 9;\n\n  // Optionally specifies a list of HTTP headers that the connection manager\n  // will consider to be internal only. If they are found on external requests they will be cleaned\n  // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information.\n  repeated string internal_only_headers = 3 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each response that\n  // the connection manager encodes. Headers specified at this level are applied\n  // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or\n  // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption response_headers_to_add = 4\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // that the connection manager encodes.\n  repeated string response_headers_to_remove = 5 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // routed by the HTTP connection manager. Headers specified at this level are\n  // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or\n  // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption request_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // routed by the HTTP connection manager.\n  repeated string request_headers_to_remove = 8 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // By default, headers that should be added/removed are evaluated from most to least specific:\n  //\n  // * route level\n  // * virtual host level\n  // * connection manager level\n  //\n  // To allow setting overrides at the route or virtual host level, this order can be reversed\n  // by setting this option to true. Defaults to false.\n  //\n  // [#next-major-version: In the v3 API, this will default to true.]\n  bool most_specific_header_mutations_wins = 10;\n\n  // An optional boolean that specifies whether the clusters that the route\n  // table refers to will be validated by the cluster manager. If set to true\n  // and a route refers to a non-existent cluster, the route table will not\n  // load. If set to false and a route refers to a non-existent cluster, the\n  // route table will load and the router filter will return a 404 if the route\n  // is selected at runtime. This setting defaults to true if the route table\n  // is statically defined via the :ref:`route_config\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.route_config>`\n  // option. This setting default to false if the route table is loaded dynamically via the\n  // :ref:`rds\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.rds>`\n  // option. Users may wish to override the default behavior in certain cases (for example when\n  // using CDS with a static route table).\n  google.protobuf.BoolValue validate_clusters = 7;\n}\n\nmessage Vhds {\n  // Configuration source specifier for VHDS.\n  core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/scoped_route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"ScopedRouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP scoped routing configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// Specifies a routing scope, which associates a\n// :ref:`Key<envoy_api_msg_ScopedRouteConfiguration.Key>` to a\n// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name).\n//\n// The HTTP connection manager builds up a table consisting of these Key to\n// RouteConfiguration mappings, and looks up the RouteConfiguration to use per\n// request according to the algorithm specified in the\n// :ref:`scope_key_builder<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scope_key_builder>`\n// assigned to the HttpConnectionManager.\n//\n// For example, with the following configurations (in YAML):\n//\n// HttpConnectionManager config:\n//\n// .. code::\n//\n//   ...\n//   scoped_routes:\n//     name: foo-scoped-routes\n//     scope_key_builder:\n//       fragments:\n//         - header_value_extractor:\n//             name: X-Route-Selector\n//             element_separator: ,\n//             element:\n//               separator: =\n//               key: vip\n//\n// ScopedRouteConfiguration resources (specified statically via\n// :ref:`scoped_route_configurations_list<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scoped_route_configurations_list>`\n// or obtained dynamically via SRDS):\n//\n// .. code::\n//\n//  (1)\n//   name: route-scope1\n//   route_configuration_name: route-config1\n//   key:\n//      fragments:\n//        - string_key: 172.10.10.20\n//\n//  (2)\n//   name: route-scope2\n//   route_configuration_name: route-config2\n//   key:\n//     fragments:\n//       - string_key: 172.20.20.30\n//\n// A request from a client such as:\n//\n// .. code::\n//\n//     GET / HTTP/1.1\n//     Host: foo.com\n//     X-Route-Selector: vip=172.10.10.20\n//\n// would result in the routing table defined by the `route-config1`\n// RouteConfiguration being assigned to the HTTP request/stream.\n//\nmessage ScopedRouteConfiguration {\n  // Specifies a key which is matched against the output of the\n  // :ref:`scope_key_builder<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scope_key_builder>`\n  // specified in the HttpConnectionManager. The matching is done per HTTP\n  // request and is dependent on the order of the fragments contained in the\n  // Key.\n  message Key {\n    message Fragment {\n      oneof type {\n        option (validate.required) = true;\n\n        // A string to match against.\n        string string_key = 1;\n      }\n    }\n\n    // The ordered set of fragments to match against. The order must match the\n    // fragments in the corresponding\n    // :ref:`scope_key_builder<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scope_key_builder>`.\n    repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the routing scope.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an\n  // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated\n  // with this scope.\n  string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // The key to match against.\n  Key key = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/api/v2/srds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/scoped_route.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"SrdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: SRDS]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// The Scoped Routes Discovery Service (SRDS) API distributes\n// :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`\n// resources. Each ScopedRouteConfiguration resource represents a \"routing\n// scope\" containing a mapping that allows the HTTP connection manager to\n// dynamically assign a routing table (specified via a\n// :ref:`RouteConfiguration<envoy_api_msg_RouteConfiguration>` message) to each\n// HTTP request.\nservice ScopedRoutesDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.ScopedRouteConfiguration\";\n\n  rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaScopedRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:scoped-routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage SrdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/config/README.md",
    "content": "Protocol buffer definitions for Envoy's bootstrap, filter, and service configuration.\n\nVisibility should be constrained to none or `//envoy/config/bootstrap/v2` by default.\n"
  },
  {
    "path": "api/envoy/config/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/accesslog/v2/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v2\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.access_loggers.grpc.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Configuration for the built-in *envoy.access_loggers.http_grpc*\n// :ref:`AccessLog <envoy_api_msg_config.filter.accesslog.v2.AccessLog>`. This configuration will\n// populate :ref:`StreamAccessLogsMessage.http_logs\n// <envoy_api_field_service.accesslog.v2.StreamAccessLogsMessage.http_logs>`.\n// [#extension: envoy.access_loggers.http_grpc]\nmessage HttpGrpcAccessLogConfig {\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n\n  // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers\n  // <envoy_api_field_data.accesslog.v2.HTTPRequestProperties.request_headers>`.\n  repeated string additional_request_headers_to_log = 2;\n\n  // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers\n  // <envoy_api_field_data.accesslog.v2.HTTPResponseProperties.response_headers>`.\n  repeated string additional_response_headers_to_log = 3;\n\n  // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers\n  // <envoy_api_field_data.accesslog.v2.HTTPResponseProperties.response_trailers>`.\n  repeated string additional_response_trailers_to_log = 4;\n}\n\n// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will\n// populate *StreamAccessLogsMessage.tcp_logs*.\n// [#extension: envoy.access_loggers.tcp_grpc]\nmessage TcpGrpcAccessLogConfig {\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n}\n\n// Common configuration for gRPC access logs.\n// [#next-free-field: 6]\nmessage CommonGrpcAccessLogConfig {\n  // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier\n  // <envoy_api_msg_service.accesslog.v2.StreamAccessLogsMessage.Identifier>`. This allows the\n  // access log server to differentiate between different access logs coming from the same Envoy.\n  string log_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The gRPC service for the access log service.\n  api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n\n  // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time\n  // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to\n  // 1 second.\n  google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}];\n\n  // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until\n  // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it\n  // to zero effectively disables the batching. Defaults to 16384.\n  google.protobuf.UInt32Value buffer_size_bytes = 4;\n\n  // Additional filter state objects to log in :ref:`filter_state_objects\n  // <envoy_api_field_data.accesslog.v2.AccessLogCommon.filter_state_objects>`.\n  // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object.\n  repeated string filter_state_objects_to_log = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/accesslog/v2/file.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v2;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v2\";\noption java_outer_classname = \"FileProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.access_loggers.file.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: File access log]\n// [#extension: envoy.access_loggers.file]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.filter.accesslog.v2.AccessLog>`\n// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file*\n// AccessLog.\nmessage FileAccessLog {\n  // A path to a local file to which to write the access log entries.\n  string path = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof access_log_format {\n    // Access log :ref:`format string<config_access_log_format_strings>`.\n    // Envoy supports :ref:`custom access log formats <config_access_log_format>` as well as a\n    // :ref:`default format <config_access_log_default_format>`.\n    string format = 2;\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. All values\n    // are rendered as strings.\n    google.protobuf.Struct json_format = 3;\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. Values are\n    // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may\n    // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the\n    // documentation for a specific command operator for details.\n    google.protobuf.Struct typed_json_format = 4;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/accesslog/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/accesslog/v3/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v3\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common access log types]\n\nmessage AccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.AccessLog\";\n\n  reserved 3;\n\n  reserved \"config\";\n\n  // The name of the access log implementation to instantiate. The name must\n  // match a statically registered access log. Current built-in loggers include:\n  //\n  // #. \"envoy.access_loggers.file\"\n  // #. \"envoy.access_loggers.http_grpc\"\n  // #. \"envoy.access_loggers.tcp_grpc\"\n  string name = 1;\n\n  // Filter which is used to determine if the access log needs to be written.\n  AccessLogFilter filter = 2;\n\n  // Custom configuration that depends on the access log being instantiated.\n  // Built-in configurations include:\n  //\n  // #. \"envoy.access_loggers.file\": :ref:`FileAccessLog\n  //    <envoy_api_msg_extensions.access_loggers.file.v3.FileAccessLog>`\n  // #. \"envoy.access_loggers.http_grpc\": :ref:`HttpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig>`\n  // #. \"envoy.access_loggers.tcp_grpc\": :ref:`TcpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.TcpGrpcAccessLogConfig>`\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// [#next-free-field: 13]\nmessage AccessLogFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.AccessLogFilter\";\n\n  oneof filter_specifier {\n    option (validate.required) = true;\n\n    // Status code filter.\n    StatusCodeFilter status_code_filter = 1;\n\n    // Duration filter.\n    DurationFilter duration_filter = 2;\n\n    // Not health check filter.\n    NotHealthCheckFilter not_health_check_filter = 3;\n\n    // Traceable filter.\n    TraceableFilter traceable_filter = 4;\n\n    // Runtime filter.\n    RuntimeFilter runtime_filter = 5;\n\n    // And filter.\n    AndFilter and_filter = 6;\n\n    // Or filter.\n    OrFilter or_filter = 7;\n\n    // Header filter.\n    HeaderFilter header_filter = 8;\n\n    // Response flag filter.\n    ResponseFlagFilter response_flag_filter = 9;\n\n    // gRPC status filter.\n    GrpcStatusFilter grpc_status_filter = 10;\n\n    // Extension filter.\n    ExtensionFilter extension_filter = 11;\n\n    // Metadata Filter\n    MetadataFilter metadata_filter = 12;\n  }\n}\n\n// Filter on an integer comparison.\nmessage ComparisonFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.ComparisonFilter\";\n\n  enum Op {\n    // =\n    EQ = 0;\n\n    // >=\n    GE = 1;\n\n    // <=\n    LE = 2;\n  }\n\n  // Comparison operator.\n  Op op = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Value to compare against.\n  core.v3.RuntimeUInt32 value = 2;\n}\n\n// Filters on HTTP response/status code.\nmessage StatusCodeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.StatusCodeFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters on total request duration in milliseconds.\nmessage DurationFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.DurationFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters for requests that are not health check requests. A health check\n// request is marked by the health check filter.\nmessage NotHealthCheckFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.NotHealthCheckFilter\";\n}\n\n// Filters for requests that are traceable. See the tracing overview for more\n// information on how a request becomes traceable.\nmessage TraceableFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.TraceableFilter\";\n}\n\n// Filters for random sampling of requests.\nmessage RuntimeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.RuntimeFilter\";\n\n  // Runtime key to get an optional overridden numerator for use in the\n  // *percent_sampled* field. If found in runtime, this value will replace the\n  // default numerator.\n  string runtime_key = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The default sampling percentage. If not specified, defaults to 0% with\n  // denominator of 100.\n  type.v3.FractionalPercent percent_sampled = 2;\n\n  // By default, sampling pivots on the header\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being\n  // present. If :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`\n  // is present, the filter will consistently sample across multiple hosts based\n  // on the runtime key value and the value extracted from\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`. If it is\n  // missing, or *use_independent_randomness* is set to true, the filter will\n  // randomly sample based on the runtime key value alone.\n  // *use_independent_randomness* can be used for logging kill switches within\n  // complex nested :ref:`AndFilter\n  // <envoy_api_msg_config.accesslog.v3.AndFilter>` and :ref:`OrFilter\n  // <envoy_api_msg_config.accesslog.v3.OrFilter>` blocks that are easier to\n  // reason about from a probability perspective (i.e., setting to true will\n  // cause the filter to behave like an independent random variable when\n  // composed within logical operator filters).\n  bool use_independent_randomness = 3;\n}\n\n// Performs a logical “and” operation on the result of each filter in filters.\n// Filters are evaluated sequentially and if one of them returns false, the\n// filter returns false immediately.\nmessage AndFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.AndFilter\";\n\n  repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Performs a logical “or” operation on the result of each individual filter.\n// Filters are evaluated sequentially and if one of them returns true, the\n// filter returns true immediately.\nmessage OrFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.OrFilter\";\n\n  repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Filters requests based on the presence or value of a request header.\nmessage HeaderFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.HeaderFilter\";\n\n  // Only requests with a header which matches the specified HeaderMatcher will\n  // pass the filter check.\n  route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters requests that received responses with an Envoy response flag set.\n// A list of the response flags can be found\n// in the access log formatter\n// :ref:`documentation<config_access_log_format_response_flags>`.\nmessage ResponseFlagFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.ResponseFlagFilter\";\n\n  // Only responses with the any of the flags listed in this field will be\n  // logged. This field is optional. If it is not specified, then any response\n  // flag will pass the filter check.\n  repeated string flags = 1 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"LH\"\n        in: \"UH\"\n        in: \"UT\"\n        in: \"LR\"\n        in: \"UR\"\n        in: \"UF\"\n        in: \"UC\"\n        in: \"UO\"\n        in: \"NR\"\n        in: \"DI\"\n        in: \"FI\"\n        in: \"RL\"\n        in: \"UAEX\"\n        in: \"RLSE\"\n        in: \"DC\"\n        in: \"URX\"\n        in: \"SI\"\n        in: \"IH\"\n        in: \"DPE\"\n        in: \"UMSDR\"\n        in: \"RFCF\"\n        in: \"NFCF\"\n        in: \"DT\"\n      }\n    }\n  }];\n}\n\n// Filters gRPC requests based on their response status. If a gRPC status is not\n// provided, the filter will infer the status from the HTTP status code.\nmessage GrpcStatusFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.GrpcStatusFilter\";\n\n  enum Status {\n    OK = 0;\n    CANCELED = 1;\n    UNKNOWN = 2;\n    INVALID_ARGUMENT = 3;\n    DEADLINE_EXCEEDED = 4;\n    NOT_FOUND = 5;\n    ALREADY_EXISTS = 6;\n    PERMISSION_DENIED = 7;\n    RESOURCE_EXHAUSTED = 8;\n    FAILED_PRECONDITION = 9;\n    ABORTED = 10;\n    OUT_OF_RANGE = 11;\n    UNIMPLEMENTED = 12;\n    INTERNAL = 13;\n    UNAVAILABLE = 14;\n    DATA_LOSS = 15;\n    UNAUTHENTICATED = 16;\n  }\n\n  // Logs only responses that have any one of the gRPC statuses in this field.\n  repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n\n  // If included and set to true, the filter will instead block all responses\n  // with a gRPC status or inferred gRPC status enumerated in statuses, and\n  // allow all other responses.\n  bool exclude = 2;\n}\n\n// Filters based on matching dynamic metadata.\n// If the matcher path and key correspond to an existing key in dynamic\n// metadata, the request is logged only if the matcher value is equal to the\n// metadata value. If the matcher path and key *do not* correspond to an\n// existing key in dynamic metadata, the request is logged only if\n// match_if_key_not_found is \"true\" or unset.\nmessage MetadataFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.MetadataFilter\";\n\n  // Matcher to check metadata for specified value. For example, to match on the\n  // access_log_hint metadata, set the filter to \"envoy.common\" and the path to\n  // \"access_log_hint\", and the value to \"true\".\n  type.matcher.v3.MetadataMatcher matcher = 1;\n\n  // Default result if the key does not exist in dynamic metadata: if unset or\n  // true, then log; if false, then don't log.\n  google.protobuf.BoolValue match_if_key_not_found = 2;\n}\n\n// Extension filter is statically registered at runtime.\nmessage ExtensionFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.ExtensionFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter implementation to instantiate. The name must\n  // match a statically registered filter.\n  string name = 1;\n\n  // Custom configuration that depends on the filter being instantiated.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/accesslog/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/accesslog/v4alpha/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v4alpha\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common access log types]\n\nmessage AccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.AccessLog\";\n\n  reserved 3;\n\n  reserved \"config\";\n\n  // The name of the access log implementation to instantiate. The name must\n  // match a statically registered access log. Current built-in loggers include:\n  //\n  // #. \"envoy.access_loggers.file\"\n  // #. \"envoy.access_loggers.http_grpc\"\n  // #. \"envoy.access_loggers.tcp_grpc\"\n  string name = 1;\n\n  // Filter which is used to determine if the access log needs to be written.\n  AccessLogFilter filter = 2;\n\n  // Custom configuration that depends on the access log being instantiated.\n  // Built-in configurations include:\n  //\n  // #. \"envoy.access_loggers.file\": :ref:`FileAccessLog\n  //    <envoy_api_msg_extensions.access_loggers.file.v4alpha.FileAccessLog>`\n  // #. \"envoy.access_loggers.http_grpc\": :ref:`HttpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig>`\n  // #. \"envoy.access_loggers.tcp_grpc\": :ref:`TcpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.TcpGrpcAccessLogConfig>`\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// [#next-free-field: 13]\nmessage AccessLogFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.AccessLogFilter\";\n\n  oneof filter_specifier {\n    option (validate.required) = true;\n\n    // Status code filter.\n    StatusCodeFilter status_code_filter = 1;\n\n    // Duration filter.\n    DurationFilter duration_filter = 2;\n\n    // Not health check filter.\n    NotHealthCheckFilter not_health_check_filter = 3;\n\n    // Traceable filter.\n    TraceableFilter traceable_filter = 4;\n\n    // Runtime filter.\n    RuntimeFilter runtime_filter = 5;\n\n    // And filter.\n    AndFilter and_filter = 6;\n\n    // Or filter.\n    OrFilter or_filter = 7;\n\n    // Header filter.\n    HeaderFilter header_filter = 8;\n\n    // Response flag filter.\n    ResponseFlagFilter response_flag_filter = 9;\n\n    // gRPC status filter.\n    GrpcStatusFilter grpc_status_filter = 10;\n\n    // Extension filter.\n    ExtensionFilter extension_filter = 11;\n\n    // Metadata Filter\n    MetadataFilter metadata_filter = 12;\n  }\n}\n\n// Filter on an integer comparison.\nmessage ComparisonFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.ComparisonFilter\";\n\n  enum Op {\n    // =\n    EQ = 0;\n\n    // >=\n    GE = 1;\n\n    // <=\n    LE = 2;\n  }\n\n  // Comparison operator.\n  Op op = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Value to compare against.\n  core.v4alpha.RuntimeUInt32 value = 2;\n}\n\n// Filters on HTTP response/status code.\nmessage StatusCodeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.StatusCodeFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters on total request duration in milliseconds.\nmessage DurationFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.DurationFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters for requests that are not health check requests. A health check\n// request is marked by the health check filter.\nmessage NotHealthCheckFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.NotHealthCheckFilter\";\n}\n\n// Filters for requests that are traceable. See the tracing overview for more\n// information on how a request becomes traceable.\nmessage TraceableFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.TraceableFilter\";\n}\n\n// Filters for random sampling of requests.\nmessage RuntimeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.RuntimeFilter\";\n\n  // Runtime key to get an optional overridden numerator for use in the\n  // *percent_sampled* field. If found in runtime, this value will replace the\n  // default numerator.\n  string runtime_key = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The default sampling percentage. If not specified, defaults to 0% with\n  // denominator of 100.\n  type.v3.FractionalPercent percent_sampled = 2;\n\n  // By default, sampling pivots on the header\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being\n  // present. If :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`\n  // is present, the filter will consistently sample across multiple hosts based\n  // on the runtime key value and the value extracted from\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`. If it is\n  // missing, or *use_independent_randomness* is set to true, the filter will\n  // randomly sample based on the runtime key value alone.\n  // *use_independent_randomness* can be used for logging kill switches within\n  // complex nested :ref:`AndFilter\n  // <envoy_api_msg_config.accesslog.v4alpha.AndFilter>` and :ref:`OrFilter\n  // <envoy_api_msg_config.accesslog.v4alpha.OrFilter>` blocks that are easier to\n  // reason about from a probability perspective (i.e., setting to true will\n  // cause the filter to behave like an independent random variable when\n  // composed within logical operator filters).\n  bool use_independent_randomness = 3;\n}\n\n// Performs a logical “and” operation on the result of each filter in filters.\n// Filters are evaluated sequentially and if one of them returns false, the\n// filter returns false immediately.\nmessage AndFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.AndFilter\";\n\n  repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Performs a logical “or” operation on the result of each individual filter.\n// Filters are evaluated sequentially and if one of them returns true, the\n// filter returns true immediately.\nmessage OrFilter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.accesslog.v3.OrFilter\";\n\n  repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Filters requests based on the presence or value of a request header.\nmessage HeaderFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.HeaderFilter\";\n\n  // Only requests with a header which matches the specified HeaderMatcher will\n  // pass the filter check.\n  route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters requests that received responses with an Envoy response flag set.\n// A list of the response flags can be found\n// in the access log formatter\n// :ref:`documentation<config_access_log_format_response_flags>`.\nmessage ResponseFlagFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.ResponseFlagFilter\";\n\n  // Only responses with the any of the flags listed in this field will be\n  // logged. This field is optional. If it is not specified, then any response\n  // flag will pass the filter check.\n  repeated string flags = 1 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"LH\"\n        in: \"UH\"\n        in: \"UT\"\n        in: \"LR\"\n        in: \"UR\"\n        in: \"UF\"\n        in: \"UC\"\n        in: \"UO\"\n        in: \"NR\"\n        in: \"DI\"\n        in: \"FI\"\n        in: \"RL\"\n        in: \"UAEX\"\n        in: \"RLSE\"\n        in: \"DC\"\n        in: \"URX\"\n        in: \"SI\"\n        in: \"IH\"\n        in: \"DPE\"\n        in: \"UMSDR\"\n        in: \"RFCF\"\n        in: \"NFCF\"\n        in: \"DT\"\n      }\n    }\n  }];\n}\n\n// Filters gRPC requests based on their response status. If a gRPC status is not\n// provided, the filter will infer the status from the HTTP status code.\nmessage GrpcStatusFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.GrpcStatusFilter\";\n\n  enum Status {\n    OK = 0;\n    CANCELED = 1;\n    UNKNOWN = 2;\n    INVALID_ARGUMENT = 3;\n    DEADLINE_EXCEEDED = 4;\n    NOT_FOUND = 5;\n    ALREADY_EXISTS = 6;\n    PERMISSION_DENIED = 7;\n    RESOURCE_EXHAUSTED = 8;\n    FAILED_PRECONDITION = 9;\n    ABORTED = 10;\n    OUT_OF_RANGE = 11;\n    UNIMPLEMENTED = 12;\n    INTERNAL = 13;\n    UNAVAILABLE = 14;\n    DATA_LOSS = 15;\n    UNAUTHENTICATED = 16;\n  }\n\n  // Logs only responses that have any one of the gRPC statuses in this field.\n  repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n\n  // If included and set to true, the filter will instead block all responses\n  // with a gRPC status or inferred gRPC status enumerated in statuses, and\n  // allow all other responses.\n  bool exclude = 2;\n}\n\n// Filters based on matching dynamic metadata.\n// If the matcher path and key correspond to an existing key in dynamic\n// metadata, the request is logged only if the matcher value is equal to the\n// metadata value. If the matcher path and key *do not* correspond to an\n// existing key in dynamic metadata, the request is logged only if\n// match_if_key_not_found is \"true\" or unset.\nmessage MetadataFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.MetadataFilter\";\n\n  // Matcher to check metadata for specified value. For example, to match on the\n  // access_log_hint metadata, set the filter to \"envoy.common\" and the path to\n  // \"access_log_hint\", and the value to \"true\".\n  type.matcher.v4alpha.MetadataMatcher matcher = 1;\n\n  // Default result if the key does not exist in dynamic metadata: if unset or\n  // true, then log; if false, then don't log.\n  google.protobuf.BoolValue match_if_key_not_found = 2;\n}\n\n// Extension filter is statically registered at runtime.\nmessage ExtensionFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.ExtensionFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter implementation to instantiate. The name must\n  // match a statically registered filter.\n  string name = 1;\n\n  // Custom configuration that depends on the filter being instantiated.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/bootstrap/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/metrics/v2:pkg\",\n        \"//envoy/config/overload/v2alpha:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/bootstrap/v2/bootstrap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.bootstrap.v2;\n\nimport \"envoy/api/v2/auth/secret.proto\";\nimport \"envoy/api/v2/cluster.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/core/event_service_config.proto\";\nimport \"envoy/api/v2/core/socket_option.proto\";\nimport \"envoy/api/v2/listener.proto\";\nimport \"envoy/config/metrics/v2/stats.proto\";\nimport \"envoy/config/overload/v2alpha/overload.proto\";\nimport \"envoy/config/trace/v2/http_tracer.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.bootstrap.v2\";\noption java_outer_classname = \"BootstrapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Bootstrap]\n// This proto is supplied via the :option:`-c` CLI flag and acts as the root\n// of the Envoy v2 configuration. See the :ref:`v2 configuration overview\n// <config_overview_bootstrap>` for more detail.\n\n// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.\n// [#next-free-field: 21]\nmessage Bootstrap {\n  message StaticResources {\n    // Static :ref:`Listeners <envoy_api_msg_Listener>`. These listeners are\n    // available regardless of LDS configuration.\n    repeated api.v2.Listener listeners = 1;\n\n    // If a network based configuration source is specified for :ref:`cds_config\n    // <envoy_api_field_config.bootstrap.v2.Bootstrap.DynamicResources.cds_config>`, it's necessary\n    // to have some initial cluster definitions available to allow Envoy to know\n    // how to speak to the management server. These cluster definitions may not\n    // use :ref:`EDS <arch_overview_dynamic_config_eds>` (i.e. they should be static\n    // IP or DNS-based).\n    repeated api.v2.Cluster clusters = 2;\n\n    // These static secrets can be used by :ref:`SdsSecretConfig\n    // <envoy_api_msg_auth.SdsSecretConfig>`\n    repeated api.v2.auth.Secret secrets = 3;\n  }\n\n  message DynamicResources {\n    reserved 4;\n\n    // All :ref:`Listeners <envoy_api_msg_Listener>` are provided by a single\n    // :ref:`LDS <arch_overview_dynamic_config_lds>` configuration source.\n    api.v2.core.ConfigSource lds_config = 1;\n\n    // All post-bootstrap :ref:`Cluster <envoy_api_msg_Cluster>` definitions are\n    // provided by a single :ref:`CDS <arch_overview_dynamic_config_cds>`\n    // configuration source.\n    api.v2.core.ConfigSource cds_config = 2;\n\n    // A single :ref:`ADS <config_overview_ads>` source may be optionally\n    // specified. This must have :ref:`api_type\n    // <envoy_api_field_core.ApiConfigSource.api_type>` :ref:`GRPC\n    // <envoy_api_enum_value_core.ApiConfigSource.ApiType.GRPC>`. Only\n    // :ref:`ConfigSources <envoy_api_msg_core.ConfigSource>` that have\n    // the :ref:`ads <envoy_api_field_core.ConfigSource.ads>` field set will be\n    // streamed on the ADS channel.\n    api.v2.core.ApiConfigSource ads_config = 3;\n  }\n\n  reserved 10;\n\n  // Node identity to present to the management server and for instance\n  // identification purposes (e.g. in generated headers).\n  api.v2.core.Node node = 1;\n\n  // Statically specified resources.\n  StaticResources static_resources = 2;\n\n  // xDS configuration sources.\n  DynamicResources dynamic_resources = 3;\n\n  // Configuration for the cluster manager which owns all upstream clusters\n  // within the server.\n  ClusterManager cluster_manager = 4;\n\n  // Health discovery service config option.\n  // (:ref:`core.ApiConfigSource <envoy_api_msg_core.ApiConfigSource>`)\n  api.v2.core.ApiConfigSource hds_config = 14;\n\n  // Optional file system path to search for startup flag files.\n  string flags_path = 5;\n\n  // Optional set of stats sinks.\n  repeated metrics.v2.StatsSink stats_sinks = 6;\n\n  // Configuration for internal processing of stats.\n  metrics.v2.StatsConfig stats_config = 13;\n\n  // Optional duration between flushes to configured stats sinks. For\n  // performance reasons Envoy latches counters and only flushes counters and\n  // gauges at a periodic interval. If not specified the default is 5000ms (5\n  // seconds).\n  // Duration must be at least 1ms and at most 5 min.\n  google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = {\n    lt {seconds: 300}\n    gte {nanos: 1000000}\n  }];\n\n  // Optional watchdog configuration.\n  Watchdog watchdog = 8;\n\n  // Configuration for an external tracing provider.\n  //\n  // .. attention::\n  //  This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider\n  //  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing.provider>`.\n  trace.v2.Tracing tracing = 9;\n\n  // Configuration for the runtime configuration provider (deprecated). If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  Runtime runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Configuration for the runtime configuration provider. If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  LayeredRuntime layered_runtime = 17;\n\n  // Configuration for the local administration HTTP server.\n  Admin admin = 12;\n\n  // Optional overload manager configuration.\n  overload.v2alpha.OverloadManager overload_manager = 15;\n\n  // Enable :ref:`stats for event dispatcher <operations_performance>`, defaults to false.\n  // Note that this records a value for each iteration of the event loop on every thread. This\n  // should normally be minimal overhead, but when using\n  // :ref:`statsd <envoy_api_msg_config.metrics.v2.StatsdSink>`, it will send each observed value\n  // over the wire individually because the statsd protocol doesn't have any way to represent a\n  // histogram summary. Be aware that this can be a very large volume of data.\n  bool enable_dispatcher_stats = 16;\n\n  // Optional string which will be used in lieu of x-envoy in prefixing headers.\n  //\n  // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be\n  // transformed into x-foo-retry-on etc.\n  //\n  // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the\n  // headers Envoy will trust for core code and core extensions only. Be VERY careful making\n  // changes to this string, especially in multi-layer Envoy deployments or deployments using\n  // extensions which are not upstream.\n  string header_prefix = 18;\n\n  // Optional proxy version which will be used to set the value of :ref:`server.version statistic\n  // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to\n  // :ref:`stats sinks <envoy_api_msg_config.metrics.v2.StatsSink>`.\n  google.protobuf.UInt64Value stats_server_version_override = 19;\n\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // This may be overridden on a per-cluster basis in cds_config,\n  // when :ref:`dns_resolvers <envoy_api_field_Cluster.dns_resolvers>` and\n  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_Cluster.use_tcp_for_dns_lookups>` are\n  // specified.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 20;\n}\n\n// Administration interface :ref:`operations documentation\n// <operations_admin_interface>`.\nmessage Admin {\n  // The path to write the access log for the administration server. If no\n  // access log is desired specify ‘/dev/null’. This is only required if\n  // :ref:`address <envoy_api_field_config.bootstrap.v2.Admin.address>` is set.\n  string access_log_path = 1;\n\n  // The cpu profiler output path for the administration server. If no profile\n  // path is specified, the default is ‘/var/log/envoy/envoy.prof’.\n  string profile_path = 2;\n\n  // The TCP address that the administration server will listen on.\n  // If not specified, Envoy will not start an administration server.\n  api.v2.core.Address address = 3;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated api.v2.core.SocketOption socket_options = 4;\n}\n\n// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.\nmessage ClusterManager {\n  message OutlierDetection {\n    // Specifies the path to the outlier event log.\n    string event_log_path = 1;\n\n    // [#not-implemented-hide:]\n    // The gRPC service for the outlier detection event service.\n    // If empty, outlier detection events won't be sent to a remote endpoint.\n    api.v2.core.EventServiceConfig event_service = 2;\n  }\n\n  // Name of the local cluster (i.e., the cluster that owns the Envoy running\n  // this configuration). In order to enable :ref:`zone aware routing\n  // <arch_overview_load_balancing_zone_aware_routing>` this option must be set.\n  // If *local_cluster_name* is defined then :ref:`clusters\n  // <envoy_api_msg_Cluster>` must be defined in the :ref:`Bootstrap\n  // static cluster resources\n  // <envoy_api_field_config.bootstrap.v2.Bootstrap.StaticResources.clusters>`. This is unrelated to\n  // the :option:`--service-cluster` option which does not `affect zone aware\n  // routing <https://github.com/envoyproxy/envoy/issues/774>`_.\n  string local_cluster_name = 1;\n\n  // Optional global configuration for outlier detection.\n  OutlierDetection outlier_detection = 2;\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.\n  api.v2.core.BindConfig upstream_bind_config = 3;\n\n  // A management server endpoint to stream load stats to via\n  // *StreamLoadStats*. This must have :ref:`api_type\n  // <envoy_api_field_core.ApiConfigSource.api_type>` :ref:`GRPC\n  // <envoy_api_enum_value_core.ApiConfigSource.ApiType.GRPC>`.\n  api.v2.core.ApiConfigSource load_stats_config = 4;\n}\n\n// Envoy process watchdog configuration. When configured, this monitors for\n// nonresponsive threads and kills the process after the configured thresholds.\n// See the :ref:`watchdog documentation <operations_performance_watchdog>` for more information.\nmessage Watchdog {\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_miss* statistic. If not specified the default is 200ms.\n  google.protobuf.Duration miss_timeout = 1;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_mega_miss* statistic. If not specified the default is\n  // 1000ms.\n  google.protobuf.Duration megamiss_timeout = 2;\n\n  // If a watched thread has been nonresponsive for this duration, assume a\n  // programming error and kill the entire Envoy process. Set to 0 to disable\n  // kill behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration kill_timeout = 3;\n\n  // If at least two watched threads have been nonresponsive for at least this\n  // duration assume a true deadlock and kill the entire Envoy process. Set to 0\n  // to disable this behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration multikill_timeout = 4;\n}\n\n// Runtime :ref:`configuration overview <config_runtime>` (deprecated).\nmessage Runtime {\n  // The implementation assumes that the file system tree is accessed via a\n  // symbolic link. An atomic link swap is used when a new tree should be\n  // switched to. This parameter specifies the path to the symbolic link. Envoy\n  // will watch the location for changes and reload the file system tree when\n  // they happen. If this parameter is not set, there will be no disk based\n  // runtime.\n  string symlink_root = 1;\n\n  // Specifies the subdirectory to load within the root directory. This is\n  // useful if multiple systems share the same delivery mechanism. Envoy\n  // configuration elements can be contained in a dedicated subdirectory.\n  string subdirectory = 2;\n\n  // Specifies an optional subdirectory to load within the root directory. If\n  // specified and the directory exists, configuration values within this\n  // directory will override those found in the primary subdirectory. This is\n  // useful when Envoy is deployed across many different types of servers.\n  // Sometimes it is useful to have a per service cluster directory for runtime\n  // configuration. See below for exactly how the override directory is used.\n  string override_subdirectory = 3;\n\n  // Static base runtime. This will be :ref:`overridden\n  // <config_runtime_layering>` by other runtime layers, e.g.\n  // disk or admin. This follows the :ref:`runtime protobuf JSON representation\n  // encoding <config_runtime_proto_json>`.\n  google.protobuf.Struct base = 4;\n}\n\n// [#next-free-field: 6]\nmessage RuntimeLayer {\n  // :ref:`Disk runtime <config_runtime_local_disk>` layer.\n  message DiskLayer {\n    // The implementation assumes that the file system tree is accessed via a\n    // symbolic link. An atomic link swap is used when a new tree should be\n    // switched to. This parameter specifies the path to the symbolic link.\n    // Envoy will watch the location for changes and reload the file system tree\n    // when they happen. See documentation on runtime :ref:`atomicity\n    // <config_runtime_atomicity>` for further details on how reloads are\n    // treated.\n    string symlink_root = 1;\n\n    // Specifies the subdirectory to load within the root directory. This is\n    // useful if multiple systems share the same delivery mechanism. Envoy\n    // configuration elements can be contained in a dedicated subdirectory.\n    string subdirectory = 3;\n\n    // :ref:`Append <config_runtime_local_disk_service_cluster_subdirs>` the\n    // service cluster to the path under symlink root.\n    bool append_service_cluster = 2;\n  }\n\n  // :ref:`Admin console runtime <config_runtime_admin>` layer.\n  message AdminLayer {\n  }\n\n  // :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` layer.\n  message RtdsLayer {\n    // Resource to subscribe to at *rtds_config* for the RTDS layer.\n    string name = 1;\n\n    // RTDS configuration source.\n    api.v2.core.ConfigSource rtds_config = 2;\n  }\n\n  // Descriptive name for the runtime layer. This is only used for the runtime\n  // :http:get:`/runtime` output.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof layer_specifier {\n    option (validate.required) = true;\n\n    // :ref:`Static runtime <config_runtime_bootstrap>` layer.\n    // This follows the :ref:`runtime protobuf JSON representation encoding\n    // <config_runtime_proto_json>`. Unlike static xDS resources, this static\n    // layer is overridable by later layers in the runtime virtual filesystem.\n    google.protobuf.Struct static_layer = 2;\n\n    DiskLayer disk_layer = 3;\n\n    AdminLayer admin_layer = 4;\n\n    RtdsLayer rtds_layer = 5;\n  }\n}\n\n// Runtime :ref:`configuration overview <config_runtime>`.\nmessage LayeredRuntime {\n  // The :ref:`layers <config_runtime_layering>` of the runtime. This is ordered\n  // such that later layers in the list overlay earlier entries.\n  repeated RuntimeLayer layers = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/bootstrap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v2:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/listener/v3:pkg\",\n        \"//envoy/config/metrics/v3:pkg\",\n        \"//envoy/config/overload/v3:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/bootstrap/v3/bootstrap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.bootstrap.v3;\n\nimport \"envoy/config/cluster/v3/cluster.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/event_service_config.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/socket_option.proto\";\nimport \"envoy/config/listener/v3/listener.proto\";\nimport \"envoy/config/metrics/v3/stats.proto\";\nimport \"envoy/config/overload/v3/overload.proto\";\nimport \"envoy/config/trace/v3/http_tracer.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.bootstrap.v3\";\noption java_outer_classname = \"BootstrapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Bootstrap]\n// This proto is supplied via the :option:`-c` CLI flag and acts as the root\n// of the Envoy v2 configuration. See the :ref:`v2 configuration overview\n// <config_overview_bootstrap>` for more detail.\n\n// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.\n// [#next-free-field: 28]\nmessage Bootstrap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.Bootstrap\";\n\n  message StaticResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.Bootstrap.StaticResources\";\n\n    // Static :ref:`Listeners <envoy_api_msg_config.listener.v3.Listener>`. These listeners are\n    // available regardless of LDS configuration.\n    repeated listener.v3.Listener listeners = 1;\n\n    // If a network based configuration source is specified for :ref:`cds_config\n    // <envoy_api_field_config.bootstrap.v3.Bootstrap.DynamicResources.cds_config>`, it's necessary\n    // to have some initial cluster definitions available to allow Envoy to know\n    // how to speak to the management server. These cluster definitions may not\n    // use :ref:`EDS <arch_overview_dynamic_config_eds>` (i.e. they should be static\n    // IP or DNS-based).\n    repeated cluster.v3.Cluster clusters = 2;\n\n    // These static secrets can be used by :ref:`SdsSecretConfig\n    // <envoy_api_msg_extensions.transport_sockets.tls.v3.SdsSecretConfig>`\n    repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3;\n  }\n\n  // [#next-free-field: 7]\n  message DynamicResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.Bootstrap.DynamicResources\";\n\n    reserved 4;\n\n    // All :ref:`Listeners <envoy_api_msg_config.listener.v3.Listener>` are provided by a single\n    // :ref:`LDS <arch_overview_dynamic_config_lds>` configuration source.\n    core.v3.ConfigSource lds_config = 1;\n\n    // Resource locator for listener collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator lds_resources_locator = 5;\n\n    // All post-bootstrap :ref:`Cluster <envoy_api_msg_config.cluster.v3.Cluster>` definitions are\n    // provided by a single :ref:`CDS <arch_overview_dynamic_config_cds>`\n    // configuration source.\n    core.v3.ConfigSource cds_config = 2;\n\n    // Resource locator for cluster collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator cds_resources_locator = 6;\n\n    // A single :ref:`ADS <config_overview_ads>` source may be optionally\n    // specified. This must have :ref:`api_type\n    // <envoy_api_field_config.core.v3.ApiConfigSource.api_type>` :ref:`GRPC\n    // <envoy_api_enum_value_config.core.v3.ApiConfigSource.ApiType.GRPC>`. Only\n    // :ref:`ConfigSources <envoy_api_msg_config.core.v3.ConfigSource>` that have\n    // the :ref:`ads <envoy_api_field_config.core.v3.ConfigSource.ads>` field set will be\n    // streamed on the ADS channel.\n    core.v3.ApiConfigSource ads_config = 3;\n  }\n\n  reserved 10, 11;\n\n  reserved \"runtime\";\n\n  // Node identity to present to the management server and for instance\n  // identification purposes (e.g. in generated headers).\n  core.v3.Node node = 1;\n\n  // A list of :ref:`Node <envoy_v3_api_msg_config.core.v3.Node>` field names\n  // that will be included in the context parameters of the effective\n  // *UdpaResourceLocator* that is sent in a discovery request when resource\n  // locators are used for LDS/CDS. Any non-string field will have its JSON\n  // encoding set as the context parameter value, with the exception of\n  // metadata, which will be flattened (see example below). The supported field\n  // names are:\n  // - \"cluster\"\n  // - \"id\"\n  // - \"locality.region\"\n  // - \"locality.sub_zone\"\n  // - \"locality.zone\"\n  // - \"metadata\"\n  // - \"user_agent_build_version.metadata\"\n  // - \"user_agent_build_version.version\"\n  // - \"user_agent_name\"\n  // - \"user_agent_version\"\n  //\n  // The node context parameters act as a base layer dictionary for the context\n  // parameters (i.e. more specific resource specific context parameters will\n  // override). Field names will be prefixed with “udpa.node.” when included in\n  // context parameters.\n  //\n  // For example, if node_context_params is ``[\"user_agent_name\", \"metadata\"]``,\n  // the implied context parameters might be::\n  //\n  //   node.user_agent_name: \"envoy\"\n  //   node.metadata.foo: \"{\\\"bar\\\": \\\"baz\\\"}\"\n  //   node.metadata.some: \"42\"\n  //   node.metadata.thing: \"\\\"thing\\\"\"\n  //\n  // [#not-implemented-hide:]\n  repeated string node_context_params = 26;\n\n  // Statically specified resources.\n  StaticResources static_resources = 2;\n\n  // xDS configuration sources.\n  DynamicResources dynamic_resources = 3;\n\n  // Configuration for the cluster manager which owns all upstream clusters\n  // within the server.\n  ClusterManager cluster_manager = 4;\n\n  // Health discovery service config option.\n  // (:ref:`core.ApiConfigSource <envoy_api_msg_config.core.v3.ApiConfigSource>`)\n  core.v3.ApiConfigSource hds_config = 14;\n\n  // Optional file system path to search for startup flag files.\n  string flags_path = 5;\n\n  // Optional set of stats sinks.\n  repeated metrics.v3.StatsSink stats_sinks = 6;\n\n  // Configuration for internal processing of stats.\n  metrics.v3.StatsConfig stats_config = 13;\n\n  // Optional duration between flushes to configured stats sinks. For\n  // performance reasons Envoy latches counters and only flushes counters and\n  // gauges at a periodic interval. If not specified the default is 5000ms (5\n  // seconds).\n  // Duration must be at least 1ms and at most 5 min.\n  google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = {\n    lt {seconds: 300}\n    gte {nanos: 1000000}\n  }];\n\n  // Optional watchdog configuration.\n  // This is for a single watchdog configuration for the entire system.\n  // Deprecated in favor of *watchdogs* which has finer granularity.\n  Watchdog watchdog = 8 [deprecated = true];\n\n  // Optional watchdogs configuration.\n  // This is used for specifying different watchdogs for the different subsystems.\n  Watchdogs watchdogs = 27;\n\n  // Configuration for an external tracing provider.\n  //\n  // .. attention::\n  //  This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider\n  //  <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider>`.\n  trace.v3.Tracing tracing = 9 [deprecated = true];\n\n  // Configuration for the runtime configuration provider. If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  LayeredRuntime layered_runtime = 17;\n\n  // Configuration for the local administration HTTP server.\n  Admin admin = 12;\n\n  // Optional overload manager configuration.\n  overload.v3.OverloadManager overload_manager = 15 [\n    (udpa.annotations.security).configure_for_untrusted_downstream = true,\n    (udpa.annotations.security).configure_for_untrusted_upstream = true\n  ];\n\n  // Enable :ref:`stats for event dispatcher <operations_performance>`, defaults to false.\n  // Note that this records a value for each iteration of the event loop on every thread. This\n  // should normally be minimal overhead, but when using\n  // :ref:`statsd <envoy_api_msg_config.metrics.v3.StatsdSink>`, it will send each observed value\n  // over the wire individually because the statsd protocol doesn't have any way to represent a\n  // histogram summary. Be aware that this can be a very large volume of data.\n  bool enable_dispatcher_stats = 16;\n\n  // Optional string which will be used in lieu of x-envoy in prefixing headers.\n  //\n  // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be\n  // transformed into x-foo-retry-on etc.\n  //\n  // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the\n  // headers Envoy will trust for core code and core extensions only. Be VERY careful making\n  // changes to this string, especially in multi-layer Envoy deployments or deployments using\n  // extensions which are not upstream.\n  string header_prefix = 18;\n\n  // Optional proxy version which will be used to set the value of :ref:`server.version statistic\n  // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to\n  // :ref:`stats sinks <envoy_api_msg_config.metrics.v3.StatsSink>`.\n  google.protobuf.UInt64Value stats_server_version_override = 19;\n\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // This may be overridden on a per-cluster basis in cds_config,\n  // when :ref:`dns_resolvers <envoy_api_field_config.cluster.v3.Cluster.dns_resolvers>` and\n  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_config.cluster.v3.Cluster.use_tcp_for_dns_lookups>` are\n  // specified.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 20;\n\n  // Specifies optional bootstrap extensions to be instantiated at startup time.\n  // Each item contains extension specific configuration.\n  repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21;\n\n  // Configuration sources that will participate in\n  // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as\n  // follows:\n  // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call\n  //    this *resource_authority*.\n  // 2. *resource_authority* is compared against the authorities in any peer\n  //    *ConfigSource*. The peer *ConfigSource* is the configuration source\n  //    message which would have been used unconditionally for resolution\n  //    with opaque resource names. If there is a match with an authority, the\n  //    peer *ConfigSource* message is used.\n  // 3. *resource_authority* is compared sequentially with the authorities in\n  //    each configuration source in *config_sources*. The first *ConfigSource*\n  //    to match wins.\n  // 4. As a fallback, if no configuration source matches, then\n  //    *default_config_source* is used.\n  // 5. If *default_config_source* is not specified, resolution fails.\n  // [#not-implemented-hide:]\n  repeated core.v3.ConfigSource config_sources = 22;\n\n  // Default configuration source for *udpa.core.v1.ResourceLocator* if all\n  // other resolution fails.\n  // [#not-implemented-hide:]\n  core.v3.ConfigSource default_config_source = 23;\n\n  // Optional overriding of default socket interface. The value must be the name of one of the\n  // socket interface factories initialized through a bootstrap extension\n  string default_socket_interface = 24;\n\n  // Global map of CertificateProvider instances. These instances are referred to by name in the\n  // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance.instance_name>`\n  // field.\n  // [#not-implemented-hide:]\n  map<string, core.v3.TypedExtensionConfig> certificate_provider_instances = 25;\n}\n\n// Administration interface :ref:`operations documentation\n// <operations_admin_interface>`.\nmessage Admin {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v2.Admin\";\n\n  // The path to write the access log for the administration server. If no\n  // access log is desired specify ‘/dev/null’. This is only required if\n  // :ref:`address <envoy_api_field_config.bootstrap.v3.Admin.address>` is set.\n  string access_log_path = 1;\n\n  // The cpu profiler output path for the administration server. If no profile\n  // path is specified, the default is ‘/var/log/envoy/envoy.prof’.\n  string profile_path = 2;\n\n  // The TCP address that the administration server will listen on.\n  // If not specified, Envoy will not start an administration server.\n  core.v3.Address address = 3;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v3.SocketOption socket_options = 4;\n}\n\n// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.\nmessage ClusterManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.ClusterManager\";\n\n  message OutlierDetection {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.ClusterManager.OutlierDetection\";\n\n    // Specifies the path to the outlier event log.\n    string event_log_path = 1;\n\n    // [#not-implemented-hide:]\n    // The gRPC service for the outlier detection event service.\n    // If empty, outlier detection events won't be sent to a remote endpoint.\n    core.v3.EventServiceConfig event_service = 2;\n  }\n\n  // Name of the local cluster (i.e., the cluster that owns the Envoy running\n  // this configuration). In order to enable :ref:`zone aware routing\n  // <arch_overview_load_balancing_zone_aware_routing>` this option must be set.\n  // If *local_cluster_name* is defined then :ref:`clusters\n  // <envoy_api_msg_config.cluster.v3.Cluster>` must be defined in the :ref:`Bootstrap\n  // static cluster resources\n  // <envoy_api_field_config.bootstrap.v3.Bootstrap.StaticResources.clusters>`. This is unrelated to\n  // the :option:`--service-cluster` option which does not `affect zone aware\n  // routing <https://github.com/envoyproxy/envoy/issues/774>`_.\n  string local_cluster_name = 1;\n\n  // Optional global configuration for outlier detection.\n  OutlierDetection outlier_detection = 2;\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.\n  core.v3.BindConfig upstream_bind_config = 3;\n\n  // A management server endpoint to stream load stats to via\n  // *StreamLoadStats*. This must have :ref:`api_type\n  // <envoy_api_field_config.core.v3.ApiConfigSource.api_type>` :ref:`GRPC\n  // <envoy_api_enum_value_config.core.v3.ApiConfigSource.ApiType.GRPC>`.\n  core.v3.ApiConfigSource load_stats_config = 4;\n}\n\n// Allows you to specify different watchdog configs for different subsystems.\n// This allows finer tuned policies for the watchdog. If a subsystem is omitted\n// the default values for that system will be used.\nmessage Watchdogs {\n  // Watchdog for the main thread.\n  Watchdog main_thread_watchdog = 1;\n\n  // Watchdog for the worker threads.\n  Watchdog worker_watchdog = 2;\n}\n\n// Envoy process watchdog configuration. When configured, this monitors for\n// nonresponsive threads and kills the process after the configured thresholds.\n// See the :ref:`watchdog documentation <operations_performance_watchdog>` for more information.\n// [#next-free-field: 8]\nmessage Watchdog {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v2.Watchdog\";\n\n  message WatchdogAction {\n    // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS.\n    // Within an event type, actions execute in the order they are configured.\n    // For KILL/MULTIKILL there is a default PANIC that will run after the\n    // registered actions and kills the process if it wasn't already killed.\n    // It might be useful to specify several debug actions, and possibly an\n    // alternate FATAL action.\n    enum WatchdogEvent {\n      UNKNOWN = 0;\n      KILL = 1;\n      MULTIKILL = 2;\n      MEGAMISS = 3;\n      MISS = 4;\n    }\n\n    // Extension specific configuration for the action.\n    core.v3.TypedExtensionConfig config = 1;\n\n    WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // Register actions that will fire on given WatchDog events.\n  // See *WatchDogAction* for priority of events.\n  repeated WatchdogAction actions = 7;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_miss* statistic. If not specified the default is 200ms.\n  google.protobuf.Duration miss_timeout = 1;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_mega_miss* statistic. If not specified the default is\n  // 1000ms.\n  google.protobuf.Duration megamiss_timeout = 2;\n\n  // If a watched thread has been nonresponsive for this duration, assume a\n  // programming error and kill the entire Envoy process. Set to 0 to disable\n  // kill behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration kill_timeout = 3;\n\n  // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is\n  // enabled. Enabling this feature would help to reduce risk of synchronized\n  // watchdog kill events across proxies due to external triggers. Set to 0 to\n  // disable. If not specified the default is 0 (disabled).\n  google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}];\n\n  // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))\n  // threads have been nonresponsive for at least this duration kill the entire\n  // Envoy process. Set to 0 to disable this behavior. If not specified the\n  // default is 0 (disabled).\n  google.protobuf.Duration multikill_timeout = 4;\n\n  // Sets the threshold for *multikill_timeout* in terms of the percentage of\n  // nonresponsive threads required for the *multikill_timeout*.\n  // If not specified the default is 0.\n  type.v3.Percent multikill_threshold = 5;\n}\n\n// Runtime :ref:`configuration overview <config_runtime>` (deprecated).\nmessage Runtime {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v2.Runtime\";\n\n  // The implementation assumes that the file system tree is accessed via a\n  // symbolic link. An atomic link swap is used when a new tree should be\n  // switched to. This parameter specifies the path to the symbolic link. Envoy\n  // will watch the location for changes and reload the file system tree when\n  // they happen. If this parameter is not set, there will be no disk based\n  // runtime.\n  string symlink_root = 1;\n\n  // Specifies the subdirectory to load within the root directory. This is\n  // useful if multiple systems share the same delivery mechanism. Envoy\n  // configuration elements can be contained in a dedicated subdirectory.\n  string subdirectory = 2;\n\n  // Specifies an optional subdirectory to load within the root directory. If\n  // specified and the directory exists, configuration values within this\n  // directory will override those found in the primary subdirectory. This is\n  // useful when Envoy is deployed across many different types of servers.\n  // Sometimes it is useful to have a per service cluster directory for runtime\n  // configuration. See below for exactly how the override directory is used.\n  string override_subdirectory = 3;\n\n  // Static base runtime. This will be :ref:`overridden\n  // <config_runtime_layering>` by other runtime layers, e.g.\n  // disk or admin. This follows the :ref:`runtime protobuf JSON representation\n  // encoding <config_runtime_proto_json>`.\n  google.protobuf.Struct base = 4;\n}\n\n// [#next-free-field: 6]\nmessage RuntimeLayer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.RuntimeLayer\";\n\n  // :ref:`Disk runtime <config_runtime_local_disk>` layer.\n  message DiskLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.RuntimeLayer.DiskLayer\";\n\n    // The implementation assumes that the file system tree is accessed via a\n    // symbolic link. An atomic link swap is used when a new tree should be\n    // switched to. This parameter specifies the path to the symbolic link.\n    // Envoy will watch the location for changes and reload the file system tree\n    // when they happen. See documentation on runtime :ref:`atomicity\n    // <config_runtime_atomicity>` for further details on how reloads are\n    // treated.\n    string symlink_root = 1;\n\n    // Specifies the subdirectory to load within the root directory. This is\n    // useful if multiple systems share the same delivery mechanism. Envoy\n    // configuration elements can be contained in a dedicated subdirectory.\n    string subdirectory = 3;\n\n    // :ref:`Append <config_runtime_local_disk_service_cluster_subdirs>` the\n    // service cluster to the path under symlink root.\n    bool append_service_cluster = 2;\n  }\n\n  // :ref:`Admin console runtime <config_runtime_admin>` layer.\n  message AdminLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.RuntimeLayer.AdminLayer\";\n  }\n\n  // :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` layer.\n  message RtdsLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer\";\n\n    // Resource to subscribe to at *rtds_config* for the RTDS layer.\n    string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // Resource locator for RTDS layer. This is mutually exclusive to *name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator rtds_resource_locator = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // RTDS configuration source.\n    core.v3.ConfigSource rtds_config = 2;\n  }\n\n  // Descriptive name for the runtime layer. This is only used for the runtime\n  // :http:get:`/runtime` output.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof layer_specifier {\n    option (validate.required) = true;\n\n    // :ref:`Static runtime <config_runtime_bootstrap>` layer.\n    // This follows the :ref:`runtime protobuf JSON representation encoding\n    // <config_runtime_proto_json>`. Unlike static xDS resources, this static\n    // layer is overridable by later layers in the runtime virtual filesystem.\n    google.protobuf.Struct static_layer = 2;\n\n    DiskLayer disk_layer = 3;\n\n    AdminLayer admin_layer = 4;\n\n    RtdsLayer rtds_layer = 5;\n  }\n}\n\n// Runtime :ref:`configuration overview <config_runtime>`.\nmessage LayeredRuntime {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.LayeredRuntime\";\n\n  // The :ref:`layers <config_runtime_layering>` of the runtime. This is ordered\n  // such that later layers in the list overlay earlier entries.\n  repeated RuntimeLayer layers = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/bootstrap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v3:pkg\",\n        \"//envoy/config/cluster/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/listener/v4alpha:pkg\",\n        \"//envoy/config/metrics/v4alpha:pkg\",\n        \"//envoy/config/overload/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/bootstrap/v4alpha/bootstrap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.bootstrap.v4alpha;\n\nimport \"envoy/config/cluster/v4alpha/cluster.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/event_service_config.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/socket_option.proto\";\nimport \"envoy/config/listener/v4alpha/listener.proto\";\nimport \"envoy/config/metrics/v4alpha/stats.proto\";\nimport \"envoy/config/overload/v3/overload.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/secret.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.bootstrap.v4alpha\";\noption java_outer_classname = \"BootstrapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Bootstrap]\n// This proto is supplied via the :option:`-c` CLI flag and acts as the root\n// of the Envoy v2 configuration. See the :ref:`v2 configuration overview\n// <config_overview_bootstrap>` for more detail.\n\n// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.\n// [#next-free-field: 28]\nmessage Bootstrap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.Bootstrap\";\n\n  message StaticResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.Bootstrap.StaticResources\";\n\n    // Static :ref:`Listeners <envoy_api_msg_config.listener.v4alpha.Listener>`. These listeners are\n    // available regardless of LDS configuration.\n    repeated listener.v4alpha.Listener listeners = 1;\n\n    // If a network based configuration source is specified for :ref:`cds_config\n    // <envoy_api_field_config.bootstrap.v4alpha.Bootstrap.DynamicResources.cds_config>`, it's necessary\n    // to have some initial cluster definitions available to allow Envoy to know\n    // how to speak to the management server. These cluster definitions may not\n    // use :ref:`EDS <arch_overview_dynamic_config_eds>` (i.e. they should be static\n    // IP or DNS-based).\n    repeated cluster.v4alpha.Cluster clusters = 2;\n\n    // These static secrets can be used by :ref:`SdsSecretConfig\n    // <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.SdsSecretConfig>`\n    repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3;\n  }\n\n  // [#next-free-field: 7]\n  message DynamicResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.Bootstrap.DynamicResources\";\n\n    reserved 4;\n\n    // All :ref:`Listeners <envoy_api_msg_config.listener.v4alpha.Listener>` are provided by a single\n    // :ref:`LDS <arch_overview_dynamic_config_lds>` configuration source.\n    core.v4alpha.ConfigSource lds_config = 1;\n\n    // Resource locator for listener collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator lds_resources_locator = 5;\n\n    // All post-bootstrap :ref:`Cluster <envoy_api_msg_config.cluster.v4alpha.Cluster>` definitions are\n    // provided by a single :ref:`CDS <arch_overview_dynamic_config_cds>`\n    // configuration source.\n    core.v4alpha.ConfigSource cds_config = 2;\n\n    // Resource locator for cluster collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator cds_resources_locator = 6;\n\n    // A single :ref:`ADS <config_overview_ads>` source may be optionally\n    // specified. This must have :ref:`api_type\n    // <envoy_api_field_config.core.v4alpha.ApiConfigSource.api_type>` :ref:`GRPC\n    // <envoy_api_enum_value_config.core.v4alpha.ApiConfigSource.ApiType.GRPC>`. Only\n    // :ref:`ConfigSources <envoy_api_msg_config.core.v4alpha.ConfigSource>` that have\n    // the :ref:`ads <envoy_api_field_config.core.v4alpha.ConfigSource.ads>` field set will be\n    // streamed on the ADS channel.\n    core.v4alpha.ApiConfigSource ads_config = 3;\n  }\n\n  reserved 10, 11, 8, 9;\n\n  reserved \"runtime\", \"watchdog\", \"tracing\";\n\n  // Node identity to present to the management server and for instance\n  // identification purposes (e.g. in generated headers).\n  core.v4alpha.Node node = 1;\n\n  // A list of :ref:`Node <envoy_v3_api_msg_config.core.v3.Node>` field names\n  // that will be included in the context parameters of the effective\n  // *UdpaResourceLocator* that is sent in a discovery request when resource\n  // locators are used for LDS/CDS. Any non-string field will have its JSON\n  // encoding set as the context parameter value, with the exception of\n  // metadata, which will be flattened (see example below). The supported field\n  // names are:\n  // - \"cluster\"\n  // - \"id\"\n  // - \"locality.region\"\n  // - \"locality.sub_zone\"\n  // - \"locality.zone\"\n  // - \"metadata\"\n  // - \"user_agent_build_version.metadata\"\n  // - \"user_agent_build_version.version\"\n  // - \"user_agent_name\"\n  // - \"user_agent_version\"\n  //\n  // The node context parameters act as a base layer dictionary for the context\n  // parameters (i.e. more specific resource specific context parameters will\n  // override). Field names will be prefixed with “udpa.node.” when included in\n  // context parameters.\n  //\n  // For example, if node_context_params is ``[\"user_agent_name\", \"metadata\"]``,\n  // the implied context parameters might be::\n  //\n  //   node.user_agent_name: \"envoy\"\n  //   node.metadata.foo: \"{\\\"bar\\\": \\\"baz\\\"}\"\n  //   node.metadata.some: \"42\"\n  //   node.metadata.thing: \"\\\"thing\\\"\"\n  //\n  // [#not-implemented-hide:]\n  repeated string node_context_params = 26;\n\n  // Statically specified resources.\n  StaticResources static_resources = 2;\n\n  // xDS configuration sources.\n  DynamicResources dynamic_resources = 3;\n\n  // Configuration for the cluster manager which owns all upstream clusters\n  // within the server.\n  ClusterManager cluster_manager = 4;\n\n  // Health discovery service config option.\n  // (:ref:`core.ApiConfigSource <envoy_api_msg_config.core.v4alpha.ApiConfigSource>`)\n  core.v4alpha.ApiConfigSource hds_config = 14;\n\n  // Optional file system path to search for startup flag files.\n  string flags_path = 5;\n\n  // Optional set of stats sinks.\n  repeated metrics.v4alpha.StatsSink stats_sinks = 6;\n\n  // Configuration for internal processing of stats.\n  metrics.v4alpha.StatsConfig stats_config = 13;\n\n  // Optional duration between flushes to configured stats sinks. For\n  // performance reasons Envoy latches counters and only flushes counters and\n  // gauges at a periodic interval. If not specified the default is 5000ms (5\n  // seconds).\n  // Duration must be at least 1ms and at most 5 min.\n  google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = {\n    lt {seconds: 300}\n    gte {nanos: 1000000}\n  }];\n\n  // Optional watchdogs configuration.\n  // This is used for specifying different watchdogs for the different subsystems.\n  Watchdogs watchdogs = 27;\n\n  // Configuration for the runtime configuration provider. If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  LayeredRuntime layered_runtime = 17;\n\n  // Configuration for the local administration HTTP server.\n  Admin admin = 12;\n\n  // Optional overload manager configuration.\n  overload.v3.OverloadManager overload_manager = 15 [\n    (udpa.annotations.security).configure_for_untrusted_downstream = true,\n    (udpa.annotations.security).configure_for_untrusted_upstream = true\n  ];\n\n  // Enable :ref:`stats for event dispatcher <operations_performance>`, defaults to false.\n  // Note that this records a value for each iteration of the event loop on every thread. This\n  // should normally be minimal overhead, but when using\n  // :ref:`statsd <envoy_api_msg_config.metrics.v4alpha.StatsdSink>`, it will send each observed value\n  // over the wire individually because the statsd protocol doesn't have any way to represent a\n  // histogram summary. Be aware that this can be a very large volume of data.\n  bool enable_dispatcher_stats = 16;\n\n  // Optional string which will be used in lieu of x-envoy in prefixing headers.\n  //\n  // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be\n  // transformed into x-foo-retry-on etc.\n  //\n  // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the\n  // headers Envoy will trust for core code and core extensions only. Be VERY careful making\n  // changes to this string, especially in multi-layer Envoy deployments or deployments using\n  // extensions which are not upstream.\n  string header_prefix = 18;\n\n  // Optional proxy version which will be used to set the value of :ref:`server.version statistic\n  // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to\n  // :ref:`stats sinks <envoy_api_msg_config.metrics.v4alpha.StatsSink>`.\n  google.protobuf.UInt64Value stats_server_version_override = 19;\n\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // This may be overridden on a per-cluster basis in cds_config,\n  // when :ref:`dns_resolvers <envoy_api_field_config.cluster.v4alpha.Cluster.dns_resolvers>` and\n  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_config.cluster.v4alpha.Cluster.use_tcp_for_dns_lookups>` are\n  // specified.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 20;\n\n  // Specifies optional bootstrap extensions to be instantiated at startup time.\n  // Each item contains extension specific configuration.\n  repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21;\n\n  // Configuration sources that will participate in\n  // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as\n  // follows:\n  // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call\n  //    this *resource_authority*.\n  // 2. *resource_authority* is compared against the authorities in any peer\n  //    *ConfigSource*. The peer *ConfigSource* is the configuration source\n  //    message which would have been used unconditionally for resolution\n  //    with opaque resource names. If there is a match with an authority, the\n  //    peer *ConfigSource* message is used.\n  // 3. *resource_authority* is compared sequentially with the authorities in\n  //    each configuration source in *config_sources*. The first *ConfigSource*\n  //    to match wins.\n  // 4. As a fallback, if no configuration source matches, then\n  //    *default_config_source* is used.\n  // 5. If *default_config_source* is not specified, resolution fails.\n  // [#not-implemented-hide:]\n  repeated core.v4alpha.ConfigSource config_sources = 22;\n\n  // Default configuration source for *udpa.core.v1.ResourceLocator* if all\n  // other resolution fails.\n  // [#not-implemented-hide:]\n  core.v4alpha.ConfigSource default_config_source = 23;\n\n  // Optional overriding of default socket interface. The value must be the name of one of the\n  // socket interface factories initialized through a bootstrap extension\n  string default_socket_interface = 24;\n\n  // Global map of CertificateProvider instances. These instances are referred to by name in the\n  // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CommonTlsContext.CertificateProviderInstance.instance_name>`\n  // field.\n  // [#not-implemented-hide:]\n  map<string, core.v4alpha.TypedExtensionConfig> certificate_provider_instances = 25;\n}\n\n// Administration interface :ref:`operations documentation\n// <operations_admin_interface>`.\nmessage Admin {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v3.Admin\";\n\n  // The path to write the access log for the administration server. If no\n  // access log is desired specify ‘/dev/null’. This is only required if\n  // :ref:`address <envoy_api_field_config.bootstrap.v4alpha.Admin.address>` is set.\n  string access_log_path = 1;\n\n  // The cpu profiler output path for the administration server. If no profile\n  // path is specified, the default is ‘/var/log/envoy/envoy.prof’.\n  string profile_path = 2;\n\n  // The TCP address that the administration server will listen on.\n  // If not specified, Envoy will not start an administration server.\n  core.v4alpha.Address address = 3;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v4alpha.SocketOption socket_options = 4;\n}\n\n// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.\nmessage ClusterManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.ClusterManager\";\n\n  message OutlierDetection {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.ClusterManager.OutlierDetection\";\n\n    // Specifies the path to the outlier event log.\n    string event_log_path = 1;\n\n    // [#not-implemented-hide:]\n    // The gRPC service for the outlier detection event service.\n    // If empty, outlier detection events won't be sent to a remote endpoint.\n    core.v4alpha.EventServiceConfig event_service = 2;\n  }\n\n  // Name of the local cluster (i.e., the cluster that owns the Envoy running\n  // this configuration). In order to enable :ref:`zone aware routing\n  // <arch_overview_load_balancing_zone_aware_routing>` this option must be set.\n  // If *local_cluster_name* is defined then :ref:`clusters\n  // <envoy_api_msg_config.cluster.v4alpha.Cluster>` must be defined in the :ref:`Bootstrap\n  // static cluster resources\n  // <envoy_api_field_config.bootstrap.v4alpha.Bootstrap.StaticResources.clusters>`. This is unrelated to\n  // the :option:`--service-cluster` option which does not `affect zone aware\n  // routing <https://github.com/envoyproxy/envoy/issues/774>`_.\n  string local_cluster_name = 1;\n\n  // Optional global configuration for outlier detection.\n  OutlierDetection outlier_detection = 2;\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.\n  core.v4alpha.BindConfig upstream_bind_config = 3;\n\n  // A management server endpoint to stream load stats to via\n  // *StreamLoadStats*. This must have :ref:`api_type\n  // <envoy_api_field_config.core.v4alpha.ApiConfigSource.api_type>` :ref:`GRPC\n  // <envoy_api_enum_value_config.core.v4alpha.ApiConfigSource.ApiType.GRPC>`.\n  core.v4alpha.ApiConfigSource load_stats_config = 4;\n}\n\n// Allows you to specify different watchdog configs for different subsystems.\n// This allows finer tuned policies for the watchdog. If a subsystem is omitted\n// the default values for that system will be used.\nmessage Watchdogs {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.Watchdogs\";\n\n  // Watchdog for the main thread.\n  Watchdog main_thread_watchdog = 1;\n\n  // Watchdog for the worker threads.\n  Watchdog worker_watchdog = 2;\n}\n\n// Envoy process watchdog configuration. When configured, this monitors for\n// nonresponsive threads and kills the process after the configured thresholds.\n// See the :ref:`watchdog documentation <operations_performance_watchdog>` for more information.\n// [#next-free-field: 8]\nmessage Watchdog {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v3.Watchdog\";\n\n  message WatchdogAction {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.Watchdog.WatchdogAction\";\n\n    // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS.\n    // Within an event type, actions execute in the order they are configured.\n    // For KILL/MULTIKILL there is a default PANIC that will run after the\n    // registered actions and kills the process if it wasn't already killed.\n    // It might be useful to specify several debug actions, and possibly an\n    // alternate FATAL action.\n    enum WatchdogEvent {\n      UNKNOWN = 0;\n      KILL = 1;\n      MULTIKILL = 2;\n      MEGAMISS = 3;\n      MISS = 4;\n    }\n\n    // Extension specific configuration for the action.\n    core.v4alpha.TypedExtensionConfig config = 1;\n\n    WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // Register actions that will fire on given WatchDog events.\n  // See *WatchDogAction* for priority of events.\n  repeated WatchdogAction actions = 7;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_miss* statistic. If not specified the default is 200ms.\n  google.protobuf.Duration miss_timeout = 1;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_mega_miss* statistic. If not specified the default is\n  // 1000ms.\n  google.protobuf.Duration megamiss_timeout = 2;\n\n  // If a watched thread has been nonresponsive for this duration, assume a\n  // programming error and kill the entire Envoy process. Set to 0 to disable\n  // kill behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration kill_timeout = 3;\n\n  // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is\n  // enabled. Enabling this feature would help to reduce risk of synchronized\n  // watchdog kill events across proxies due to external triggers. Set to 0 to\n  // disable. If not specified the default is 0 (disabled).\n  google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}];\n\n  // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))\n  // threads have been nonresponsive for at least this duration kill the entire\n  // Envoy process. Set to 0 to disable this behavior. If not specified the\n  // default is 0 (disabled).\n  google.protobuf.Duration multikill_timeout = 4;\n\n  // Sets the threshold for *multikill_timeout* in terms of the percentage of\n  // nonresponsive threads required for the *multikill_timeout*.\n  // If not specified the default is 0.\n  type.v3.Percent multikill_threshold = 5;\n}\n\n// Runtime :ref:`configuration overview <config_runtime>` (deprecated).\nmessage Runtime {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v3.Runtime\";\n\n  // The implementation assumes that the file system tree is accessed via a\n  // symbolic link. An atomic link swap is used when a new tree should be\n  // switched to. This parameter specifies the path to the symbolic link. Envoy\n  // will watch the location for changes and reload the file system tree when\n  // they happen. If this parameter is not set, there will be no disk based\n  // runtime.\n  string symlink_root = 1;\n\n  // Specifies the subdirectory to load within the root directory. This is\n  // useful if multiple systems share the same delivery mechanism. Envoy\n  // configuration elements can be contained in a dedicated subdirectory.\n  string subdirectory = 2;\n\n  // Specifies an optional subdirectory to load within the root directory. If\n  // specified and the directory exists, configuration values within this\n  // directory will override those found in the primary subdirectory. This is\n  // useful when Envoy is deployed across many different types of servers.\n  // Sometimes it is useful to have a per service cluster directory for runtime\n  // configuration. See below for exactly how the override directory is used.\n  string override_subdirectory = 3;\n\n  // Static base runtime. This will be :ref:`overridden\n  // <config_runtime_layering>` by other runtime layers, e.g.\n  // disk or admin. This follows the :ref:`runtime protobuf JSON representation\n  // encoding <config_runtime_proto_json>`.\n  google.protobuf.Struct base = 4;\n}\n\n// [#next-free-field: 6]\nmessage RuntimeLayer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.RuntimeLayer\";\n\n  // :ref:`Disk runtime <config_runtime_local_disk>` layer.\n  message DiskLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer\";\n\n    // The implementation assumes that the file system tree is accessed via a\n    // symbolic link. An atomic link swap is used when a new tree should be\n    // switched to. This parameter specifies the path to the symbolic link.\n    // Envoy will watch the location for changes and reload the file system tree\n    // when they happen. See documentation on runtime :ref:`atomicity\n    // <config_runtime_atomicity>` for further details on how reloads are\n    // treated.\n    string symlink_root = 1;\n\n    // Specifies the subdirectory to load within the root directory. This is\n    // useful if multiple systems share the same delivery mechanism. Envoy\n    // configuration elements can be contained in a dedicated subdirectory.\n    string subdirectory = 3;\n\n    // :ref:`Append <config_runtime_local_disk_service_cluster_subdirs>` the\n    // service cluster to the path under symlink root.\n    bool append_service_cluster = 2;\n  }\n\n  // :ref:`Admin console runtime <config_runtime_admin>` layer.\n  message AdminLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer\";\n  }\n\n  // :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` layer.\n  message RtdsLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer\";\n\n    oneof name_specifier {\n      // Resource to subscribe to at *rtds_config* for the RTDS layer.\n      string name = 1;\n\n      // Resource locator for RTDS layer. This is mutually exclusive to *name*.\n      // [#not-implemented-hide:]\n      udpa.core.v1.ResourceLocator rtds_resource_locator = 3;\n    }\n\n    // RTDS configuration source.\n    core.v4alpha.ConfigSource rtds_config = 2;\n  }\n\n  // Descriptive name for the runtime layer. This is only used for the runtime\n  // :http:get:`/runtime` output.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof layer_specifier {\n    option (validate.required) = true;\n\n    // :ref:`Static runtime <config_runtime_bootstrap>` layer.\n    // This follows the :ref:`runtime protobuf JSON representation encoding\n    // <config_runtime_proto_json>`. Unlike static xDS resources, this static\n    // layer is overridable by later layers in the runtime virtual filesystem.\n    google.protobuf.Struct static_layer = 2;\n\n    DiskLayer disk_layer = 3;\n\n    AdminLayer admin_layer = 4;\n\n    RtdsLayer rtds_layer = 5;\n  }\n}\n\n// Runtime :ref:`configuration overview <config_runtime>`.\nmessage LayeredRuntime {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.LayeredRuntime\";\n\n  // The :ref:`layers <config_runtime_layering>` of the runtime. This is ordered\n  // such that later layers in the list overlay earlier entries.\n  repeated RuntimeLayer layers = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/aggregate/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/cluster/aggregate/v2alpha/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.aggregate.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.aggregate.v2alpha\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.clusters.aggregate.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Aggregate cluster configuration]\n\n// Configuration for the aggregate cluster. See the :ref:`architecture overview\n// <arch_overview_aggregate_cluster>` for more information.\n// [#extension: envoy.clusters.aggregate]\nmessage ClusterConfig {\n  // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they\n  // appear in this list.\n  repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.dynamic_forward_proxy.v2alpha;\n\nimport \"envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.clusters.dynamic_forward_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamic forward proxy cluster configuration]\n\n// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.clusters.dynamic_forward_proxy]\nmessage ClusterConfig {\n  // The DNS cache configuration that the cluster will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy HTTP filter configuration\n  // <envoy_api_field_config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/redis/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/cluster/redis/redis_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.redis;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.redis\";\noption java_outer_classname = \"RedisClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Redis Cluster Configuration]\n// This cluster adds support for `Redis Cluster <https://redis.io/topics/cluster-spec>`_, as part\n// of :ref:`Envoy's support for Redis Cluster <arch_overview_redis>`.\n//\n// Redis Cluster is an extension of Redis which supports sharding and high availability (where a\n// shard that loses its primary fails over to a replica, and designates it as the new primary).\n// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client\n// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the\n// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS\n// command <https://redis.io/commands/cluster-slots>`_. This result is then stored locally, and\n// updated at user-configured intervals.\n//\n// Additionally, if\n// :ref:`enable_redirection<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.enable_redirection>`\n// is true, then moved and ask redirection errors from upstream servers will trigger a topology\n// refresh when they exceed a user-configured error threshold.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     name: name\n//     connect_timeout: 0.25s\n//     dns_lookup_family: V4_ONLY\n//     hosts:\n//     - socket_address:\n//       address: foo.bar.com\n//       port_value: 22120\n//     cluster_type:\n//     name: envoy.clusters.redis\n//     typed_config:\n//       \"@type\": type.googleapis.com/google.protobuf.Struct\n//       value:\n//         cluster_refresh_rate: 30s\n//         cluster_refresh_timeout: 0.5s\n//         redirect_refresh_interval: 10s\n//         redirect_refresh_threshold: 10\n// [#extension: envoy.clusters.redis]\n\n// [#next-free-field: 7]\nmessage RedisClusterConfig {\n  // Interval between successive topology refresh requests. If not set, this defaults to 5s.\n  google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}];\n\n  // Timeout for topology refresh request. If not set, this defaults to 3s.\n  google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}];\n\n  // The minimum interval that must pass after triggering a topology refresh request before a new\n  // request can possibly be triggered again. Any errors received during one of these\n  // time intervals are ignored. If not set, this defaults to 5s.\n  google.protobuf.Duration redirect_refresh_interval = 3;\n\n  // The number of redirection errors that must be received before\n  // triggering a topology refresh request. If not set, this defaults to 5.\n  // If this is set to 0, topology refresh after redirect is disabled.\n  google.protobuf.UInt32Value redirect_refresh_threshold = 4;\n\n  // The number of failures that must be received before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to failure.\n  uint32 failure_refresh_threshold = 5;\n\n  // The number of hosts became degraded or unhealthy before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to degraded or\n  // unhealthy host.\n  uint32 host_degraded_refresh_threshold = 6;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/cluster:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/cluster/v3/circuit_breaker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"CircuitBreakerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Circuit breakers]\n\n// :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be\n// specified individually for each defined priority.\nmessage CircuitBreakers {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.cluster.CircuitBreakers\";\n\n  // A Thresholds defines CircuitBreaker settings for a\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`.\n  // [#next-free-field: 9]\n  message Thresholds {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.cluster.CircuitBreakers.Thresholds\";\n\n    message RetryBudget {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.cluster.CircuitBreakers.Thresholds.RetryBudget\";\n\n      // Specifies the limit on concurrent retries as a percentage of the sum of active requests and\n      // active pending requests. For example, if there are 100 active requests and the\n      // budget_percent is set to 25, there may be 25 active retries.\n      //\n      // This parameter is optional. Defaults to 20%.\n      type.v3.Percent budget_percent = 1;\n\n      // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the\n      // number of active retries may never go below this number.\n      //\n      // This parameter is optional. Defaults to 3.\n      google.protobuf.UInt32Value min_retry_concurrency = 2;\n    }\n\n    // The :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`\n    // the specified CircuitBreaker settings apply to.\n    core.v3.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // The maximum number of connections that Envoy will make to the upstream\n    // cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_connections = 2;\n\n    // The maximum number of pending requests that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_pending_requests = 3;\n\n    // The maximum number of parallel requests that Envoy will make to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_requests = 4;\n\n    // The maximum number of parallel retries that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 3.\n    google.protobuf.UInt32Value max_retries = 5;\n\n    // Specifies a limit on concurrent retries in relation to the number of active requests. This\n    // parameter is optional.\n    //\n    // .. note::\n    //\n    //    If this field is set, the retry budget will override any configured retry circuit\n    //    breaker.\n    RetryBudget retry_budget = 8;\n\n    // If track_remaining is true, then stats will be published that expose\n    // the number of resources remaining until the circuit breakers open. If\n    // not specified, the default is false.\n    //\n    // .. note::\n    //\n    //    If a retry budget is used in lieu of the max_retries circuit breaker,\n    //    the remaining retry resources remaining will not be tracked.\n    bool track_remaining = 6;\n\n    // The maximum number of connection pools per cluster that Envoy will concurrently support at\n    // once. If not specified, the default is unlimited. Set this for clusters which create a\n    // large number of connection pools. See\n    // :ref:`Circuit Breaking <arch_overview_circuit_break_cluster_maximum_connection_pools>` for\n    // more details.\n    google.protobuf.UInt32Value max_connection_pools = 7;\n  }\n\n  // If multiple :ref:`Thresholds<envoy_api_msg_config.cluster.v3.CircuitBreakers.Thresholds>`\n  // are defined with the same :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`,\n  // the first one in the list is used. If no Thresholds is defined for a given\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`, the default values\n  // are used.\n  repeated Thresholds thresholds = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v3/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"envoy/config/cluster/v3/circuit_breaker.proto\";\nimport \"envoy/config/cluster/v3/filter.proto\";\nimport \"envoy/config/cluster/v3/outlier_detection.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\nimport \"envoy/config/core/v3/protocol.proto\";\nimport \"envoy/config/endpoint/v3/endpoint.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Cluster configuration]\n\n// Cluster list collections. Entries are *Cluster* resources or references.\n// [#not-implemented-hide:]\nmessage ClusterCollection {\n  udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// Configuration for a single upstream cluster.\n// [#next-free-field: 53]\nmessage Cluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Cluster\";\n\n  // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`\n  // for an explanation on each type.\n  enum DiscoveryType {\n    // Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`\n    // for an explanation.\n    STATIC = 0;\n\n    // Refer to the :ref:`strict DNS discovery\n    // type<arch_overview_service_discovery_types_strict_dns>`\n    // for an explanation.\n    STRICT_DNS = 1;\n\n    // Refer to the :ref:`logical DNS discovery\n    // type<arch_overview_service_discovery_types_logical_dns>`\n    // for an explanation.\n    LOGICAL_DNS = 2;\n\n    // Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`\n    // for an explanation.\n    EDS = 3;\n\n    // Refer to the :ref:`original destination discovery\n    // type<arch_overview_service_discovery_types_original_destination>`\n    // for an explanation.\n    ORIGINAL_DST = 4;\n  }\n\n  // Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture\n  // overview section for information on each type.\n  enum LbPolicy {\n    reserved 4;\n\n    reserved \"ORIGINAL_DST_LB\";\n\n    // Refer to the :ref:`round robin load balancing\n    // policy<arch_overview_load_balancing_types_round_robin>`\n    // for an explanation.\n    ROUND_ROBIN = 0;\n\n    // Refer to the :ref:`least request load balancing\n    // policy<arch_overview_load_balancing_types_least_request>`\n    // for an explanation.\n    LEAST_REQUEST = 1;\n\n    // Refer to the :ref:`ring hash load balancing\n    // policy<arch_overview_load_balancing_types_ring_hash>`\n    // for an explanation.\n    RING_HASH = 2;\n\n    // Refer to the :ref:`random load balancing\n    // policy<arch_overview_load_balancing_types_random>`\n    // for an explanation.\n    RANDOM = 3;\n\n    // Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`\n    // for an explanation.\n    MAGLEV = 5;\n\n    // This load balancer type must be specified if the configured cluster provides a cluster\n    // specific load balancer. Consult the configured cluster's documentation for whether to set\n    // this option or not.\n    CLUSTER_PROVIDED = 6;\n\n    // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy\n    // <envoy_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field to determine the LB policy.\n    // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field\n    // and instead using the new load_balancing_policy field as the one and only mechanism for\n    // configuring this.]\n    LOAD_BALANCING_POLICY_CONFIG = 7;\n  }\n\n  // When V4_ONLY is selected, the DNS resolver will only perform a lookup for\n  // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will\n  // only perform a lookup for addresses in the IPv6 family. If AUTO is\n  // specified, the DNS resolver will first perform a lookup for addresses in\n  // the IPv6 family and fallback to a lookup for addresses in the IPv4 family.\n  // For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this setting is\n  // ignored.\n  enum DnsLookupFamily {\n    AUTO = 0;\n    V4_ONLY = 1;\n    V6_ONLY = 2;\n  }\n\n  enum ClusterProtocolSelection {\n    // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).\n    // If :ref:`http2_protocol_options <envoy_api_field_config.cluster.v3.Cluster.http2_protocol_options>` are\n    // present, HTTP2 will be used, otherwise HTTP1.1 will be used.\n    USE_CONFIGURED_PROTOCOL = 0;\n\n    // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.\n    USE_DOWNSTREAM_PROTOCOL = 1;\n  }\n\n  // TransportSocketMatch specifies what transport socket config will be used\n  // when the match conditions are satisfied.\n  message TransportSocketMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.TransportSocketMatch\";\n\n    // The name of the match, used in stats generation.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Optional endpoint metadata match criteria.\n    // The connection to the endpoint with metadata matching what is set in this field\n    // will use the transport socket configuration specified here.\n    // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match\n    // against the values specified in this field.\n    google.protobuf.Struct match = 2;\n\n    // The configuration of the transport socket.\n    core.v3.TransportSocket transport_socket = 3;\n  }\n\n  // Extended cluster type.\n  message CustomClusterType {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.CustomClusterType\";\n\n    // The type of the cluster to instantiate. The name must match a supported cluster type.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Cluster specific configuration which depends on the cluster being instantiated.\n    // See the supported cluster for further documentation.\n    google.protobuf.Any typed_config = 2;\n  }\n\n  // Only valid when discovery type is EDS.\n  message EdsClusterConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.EdsClusterConfig\";\n\n    // Configuration for the source of EDS updates for this Cluster.\n    core.v3.ConfigSource eds_config = 1;\n\n    // Optional alternative to cluster name to present to EDS. This does not\n    // have the same restrictions as cluster name, i.e. it may be arbitrary\n    // length.\n    string service_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // Resource locator for EDS. This is mutually exclusive to *service_name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator eds_resource_locator = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n  }\n\n  // Optionally divide the endpoints in this cluster into subsets defined by\n  // endpoint metadata and selected by route and weighted cluster metadata.\n  // [#next-free-field: 8]\n  message LbSubsetConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.LbSubsetConfig\";\n\n    // If NO_FALLBACK is selected, a result\n    // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,\n    // any cluster endpoint may be returned (subject to policy, health checks,\n    // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the\n    // endpoints matching the values from the default_subset field.\n    enum LbSubsetFallbackPolicy {\n      NO_FALLBACK = 0;\n      ANY_ENDPOINT = 1;\n      DEFAULT_SUBSET = 2;\n    }\n\n    // Specifications for subsets.\n    message LbSubsetSelector {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector\";\n\n      // Allows to override top level fallback policy per selector.\n      enum LbSubsetSelectorFallbackPolicy {\n        // If NOT_DEFINED top level config fallback policy is used instead.\n        NOT_DEFINED = 0;\n\n        // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.\n        NO_FALLBACK = 1;\n\n        // If ANY_ENDPOINT is selected, any cluster endpoint may be returned\n        // (subject to policy, health checks, etc).\n        ANY_ENDPOINT = 2;\n\n        // If DEFAULT_SUBSET is selected, load balancing is performed over the\n        // endpoints matching the values from the default_subset field.\n        DEFAULT_SUBSET = 3;\n\n        // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata\n        // keys reduced to\n        // :ref:`fallback_keys_subset<envoy_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_keys_subset>`.\n        // It allows for a fallback to a different, less specific selector if some of the keys of\n        // the selector are considered optional.\n        KEYS_SUBSET = 4;\n      }\n\n      // List of keys to match with the weighted cluster metadata.\n      repeated string keys = 1;\n\n      // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for\n      // choosing a host, but updating hosts is faster, especially for large numbers of hosts.\n      //\n      // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy.\n      //\n      // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains\n      // only one entry.\n      //\n      // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys`\n      // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge\n      // :ref:`lb_subsets_single_host_per_subset_duplicate<config_cluster_manager_cluster_stats_subset_lb>` indicates how many duplicates are\n      // present in the current configuration.\n      bool single_host_per_subset = 4;\n\n      // The behavior used when no endpoint subset matches the selected route's\n      // metadata.\n      LbSubsetSelectorFallbackPolicy fallback_policy = 2\n          [(validate.rules).enum = {defined_only: true}];\n\n      // Subset of\n      // :ref:`keys<envoy_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by\n      // :ref:`KEYS_SUBSET<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`\n      // fallback policy.\n      // It has to be a non empty list if KEYS_SUBSET fallback policy is selected.\n      // For any other fallback policy the parameter is not used and should not be set.\n      // Only values also present in\n      // :ref:`keys<envoy_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but\n      // `fallback_keys_subset` cannot be equal to `keys`.\n      repeated string fallback_keys_subset = 3;\n    }\n\n    // The behavior used when no endpoint subset matches the selected route's\n    // metadata. The value defaults to\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Specifies the default subset of endpoints used during fallback if\n    // fallback_policy is\n    // :ref:`DEFAULT_SUBSET<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.\n    // Each field in default_subset is\n    // compared to the matching LbEndpoint.Metadata under the *envoy.lb*\n    // namespace. It is valid for no hosts to match, in which case the behavior\n    // is the same as a fallback_policy of\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    google.protobuf.Struct default_subset = 2;\n\n    // For each entry, LbEndpoint.Metadata's\n    // *envoy.lb* namespace is traversed and a subset is created for each unique\n    // combination of key and value. For example:\n    //\n    // .. code-block:: json\n    //\n    //   { \"subset_selectors\": [\n    //       { \"keys\": [ \"version\" ] },\n    //       { \"keys\": [ \"stage\", \"hardware_type\" ] }\n    //   ]}\n    //\n    // A subset is matched when the metadata from the selected route and\n    // weighted cluster contains the same keys and values as the subset's\n    // metadata. The same host may appear in multiple subsets.\n    repeated LbSubsetSelector subset_selectors = 3;\n\n    // If true, routing to subsets will take into account the localities and locality weights of the\n    // endpoints when making the routing decision.\n    //\n    // There are some potential pitfalls associated with enabling this feature, as the resulting\n    // traffic split after applying both a subset match and locality weights might be undesirable.\n    //\n    // Consider for example a situation in which you have 50/50 split across two localities X/Y\n    // which have 100 hosts each without subsetting. If the subset LB results in X having only 1\n    // host selected but Y having 100, then a lot more load is being dumped on the single host in X\n    // than originally anticipated in the load balancing assignment delivered via EDS.\n    bool locality_weight_aware = 4;\n\n    // When used with locality_weight_aware, scales the weight of each locality by the ratio\n    // of hosts in the subset vs hosts in the original subset. This aims to even out the load\n    // going to an individual locality if said locality is disproportionately affected by the\n    // subset predicate.\n    bool scale_locality_weight = 5;\n\n    // If true, when a fallback policy is configured and its corresponding subset fails to find\n    // a host this will cause any host to be selected instead.\n    //\n    // This is useful when using the default subset as the fallback policy, given the default\n    // subset might become empty. With this option enabled, if that happens the LB will attempt\n    // to select a host from the entire cluster.\n    bool panic_mode_any = 6;\n\n    // If true, metadata specified for a metadata key will be matched against the corresponding\n    // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value\n    // and any of the elements in the list matches the criteria.\n    bool list_as_any = 7;\n  }\n\n  // Specific configuration for the LeastRequest load balancing policy.\n  message LeastRequestLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.LeastRequestLbConfig\";\n\n    // The number of random healthy hosts from which the host with the fewest active requests will\n    // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.\n    google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];\n\n    // The following formula is used to calculate the dynamic weights when hosts have different load\n    // balancing weights:\n    //\n    // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`\n    //\n    // The larger the active request bias is, the more aggressively active requests will lower the\n    // effective weight when all host weights are not equal.\n    //\n    // `active_request_bias` must be greater than or equal to 0.0.\n    //\n    // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number\n    // of active requests at the time it picks a host and behaves like the Round Robin Load\n    // Balancer.\n    //\n    // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing\n    // weight by the number of active requests at the time it does a pick.\n    //\n    // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's\n    // host sets changes, e.g., whenever there is a host membership update or a host load balancing\n    // weight change.\n    //\n    // .. note::\n    //   This setting only takes effect if all host weights are not equal.\n    core.v3.RuntimeDouble active_request_bias = 2;\n  }\n\n  // Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`\n  // load balancing policy.\n  message RingHashLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.RingHashLbConfig\";\n\n    // The hash function used to hash hosts onto the ketama ring.\n    enum HashFunction {\n      // Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.\n      XX_HASH = 0;\n\n      // Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with\n      // std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled\n      // on Linux and not macOS.\n      MURMUR_HASH_2 = 1;\n    }\n\n    reserved 2;\n\n    // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each\n    // provided host) the better the request distribution will reflect the desired weights. Defaults\n    // to 1024 entries, and limited to 8M entries. See also\n    // :ref:`maximum_ring_size<envoy_api_field_config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size>`.\n    google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}];\n\n    // The hash function used to hash hosts onto the ketama ring. The value defaults to\n    // :ref:`XX_HASH<envoy_api_enum_value_config.cluster.v3.Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.\n    HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}];\n\n    // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered\n    // to further constrain resource use. See also\n    // :ref:`minimum_ring_size<envoy_api_field_config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size>`.\n    google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}];\n  }\n\n  // Specific configuration for the :ref:`Maglev<arch_overview_load_balancing_types_maglev>`\n  // load balancing policy.\n  message MaglevLbConfig {\n    // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee.\n    // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same\n    // upstream as it was before. Increasing the table size reduces the amount of disruption.\n    // The table size must be prime number. If it is not specified, the default is 65537.\n    google.protobuf.UInt64Value table_size = 1;\n  }\n\n  // Specific configuration for the\n  // :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\n  // load balancing policy.\n  message OriginalDstLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.OriginalDstLbConfig\";\n\n    // When true, :ref:`x-envoy-original-dst-host\n    // <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination\n    // address.\n    //\n    // .. attention::\n    //\n    //   This header isn't sanitized by default, so enabling this feature allows HTTP clients to\n    //   route traffic to arbitrary hosts and/or ports, which may have serious security\n    //   consequences.\n    bool use_http_header = 1;\n  }\n\n  // Common configuration for all load balancer implementations.\n  // [#next-free-field: 8]\n  message CommonLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.CommonLbConfig\";\n\n    // Configuration for :ref:`zone aware routing\n    // <arch_overview_load_balancing_zone_aware_routing>`.\n    message ZoneAwareLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig\";\n\n      // Configures percentage of requests that will be considered for zone aware routing\n      // if zone aware routing is configured. If not specified, the default is 100%.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      type.v3.Percent routing_enabled = 1;\n\n      // Configures minimum upstream cluster size required for zone aware routing\n      // If upstream cluster size is less than specified, zone aware routing is not performed\n      // even if zone aware routing is configured. If not specified, the default is 6.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      google.protobuf.UInt64Value min_cluster_size = 2;\n\n      // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic\n      // mode<arch_overview_load_balancing_panic_threshold>`. Instead, the cluster will fail all\n      // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a\n      // failing service.\n      bool fail_traffic_on_panic = 3;\n    }\n\n    // Configuration for :ref:`locality weighted load balancing\n    // <arch_overview_load_balancing_locality_weighted_lb>`\n    message LocalityWeightedLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig\";\n    }\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    message ConsistentHashingLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig\";\n\n      // If set to `true`, the cluster will use hostname instead of the resolved\n      // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.\n      bool use_hostname_for_hashing = 1;\n\n      // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150\n      // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster.\n      // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200.\n      // Minimum is 100.\n      //\n      // Applies to both Ring Hash and Maglev load balancers.\n      //\n      // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified\n      // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests\n      // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing\n      // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify\n      // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the\n      // cascading overflow effect when choosing the next host in the ring/table).\n      //\n      // If weights are specified on the hosts, they are respected.\n      //\n      // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts\n      // being probed, so use a higher value if you require better performance.\n      google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}];\n    }\n\n    // Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.\n    // If not specified, the default is 50%.\n    // To disable panic mode, set to 0%.\n    //\n    // .. note::\n    //   The specified percent will be truncated to the nearest 1%.\n    type.v3.Percent healthy_panic_threshold = 1;\n\n    oneof locality_config_specifier {\n      ZoneAwareLbConfig zone_aware_lb_config = 2;\n\n      LocalityWeightedLbConfig locality_weighted_lb_config = 3;\n    }\n\n    // If set, all health check/weight/metadata updates that happen within this duration will be\n    // merged and delivered in one shot when the duration expires. The start of the duration is when\n    // the first update happens. This is useful for big clusters, with potentially noisy deploys\n    // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes\n    // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new\n    // cluster). Please always keep in mind that the use of sandbox technologies may change this\n    // behavior.\n    //\n    // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge\n    // window to 0.\n    //\n    // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is\n    // because merging those updates isn't currently safe. See\n    // https://github.com/envoyproxy/envoy/pull/3941.\n    google.protobuf.Duration update_merge_window = 4;\n\n    // If set to true, Envoy will not consider new hosts when computing load balancing weights until\n    // they have been health checked for the first time. This will have no effect unless\n    // active health checking is also configured.\n    //\n    // Ignoring a host means that for any load balancing calculations that adjust weights based\n    // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and\n    // panic mode) Envoy will exclude these hosts in the denominator.\n    //\n    // For example, with hosts in two priorities P0 and P1, where P0 looks like\n    // {healthy, unhealthy (new), unhealthy (new)}\n    // and where P1 looks like\n    // {healthy, healthy}\n    // all traffic will still hit P0, as 1 / (3 - 2) = 1.\n    //\n    // Enabling this will allow scaling up the number of hosts for a given cluster without entering\n    // panic mode or triggering priority spillover, assuming the hosts pass the first health check.\n    //\n    // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not\n    // contribute to the calculation when deciding whether panic mode is enabled or not.\n    bool ignore_new_hosts_until_first_hc = 5;\n\n    // If set to `true`, the cluster manager will drain all existing\n    // connections to upstream hosts whenever hosts are added or removed from the cluster.\n    bool close_connections_on_host_set_change = 6;\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    ConsistentHashingLbConfig consistent_hashing_lb_config = 7;\n  }\n\n  message RefreshRate {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Cluster.RefreshRate\";\n\n    // Specifies the base interval between refreshes. This parameter is required and must be greater\n    // than zero and less than\n    // :ref:`max_interval <envoy_api_field_config.cluster.v3.Cluster.RefreshRate.max_interval>`.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {nanos: 1000000}\n    }];\n\n    // Specifies the maximum interval between refreshes. This parameter is optional, but must be\n    // greater than or equal to the\n    // :ref:`base_interval <envoy_api_field_config.cluster.v3.Cluster.RefreshRate.base_interval>`  if set. The default\n    // is 10 times the :ref:`base_interval <envoy_api_field_config.cluster.v3.Cluster.RefreshRate.base_interval>`.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}];\n  }\n\n  // [#not-implemented-hide:]\n  message PrefetchPolicy {\n    // Indicates how many streams (rounded up) can be anticipated per-upstream for each\n    // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching\n    // will only be done if the upstream is healthy.\n    //\n    // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be\n    // established, one for the new incoming stream, and one for a presumed follow-up stream. For\n    // HTTP/2, only one connection would be established by default as one connection can\n    // serve both the original and presumed follow-up stream.\n    //\n    // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100\n    // active streams, there would be 100 connections in use, and 50 connections prefetched.\n    // This might be a useful value for something like short lived single-use connections,\n    // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection\n    // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP\n    // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more\n    // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue\n    // in case of unexpected disconnects where the connection could not be reused.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight. This means in steady state if a connection is torn down,\n    // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be\n    // prefetched.\n    //\n    // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can\n    // harm latency more than the prefetching helps.\n    google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n\n    // Indicates how many many streams (rounded up) can be anticipated across a cluster for each\n    // stream, useful for low QPS services. This is currently supported for a subset of\n    // deterministic non-hash-based load-balancing algorithms (weighted round robin, random).\n    // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a\n    // cluster, doing best effort predictions of what upstream would be picked next and\n    // pre-establishing a connection.\n    //\n    // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first\n    // incoming stream, 2 connections will be prefetched - one to the first upstream for this\n    // cluster, one to the second on the assumption there will be a follow-up stream.\n    //\n    // Prefetching will be limited to one prefetch per configured upstream in the cluster.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight, so during warm up and in steady state if a connection\n    // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for\n    // connection establishment.\n    //\n    // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met,\n    // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream.\n    // TODO(alyssawilk) per LB docs and LB overview docs when unhiding.\n    google.protobuf.DoubleValue predictive_prefetch_ratio = 2\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n  }\n\n  reserved 12, 15, 7, 11, 35;\n\n  reserved \"hosts\", \"tls_context\", \"extension_protocol_options\";\n\n  // Configuration to use different transport sockets for different endpoints.\n  // The entry of *envoy.transport_socket_match* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`\n  // is used to match against the transport sockets as they appear in the list. The first\n  // :ref:`match <envoy_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` is used.\n  // For example, with the following match\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"enableMTLS\"\n  //    match:\n  //      acceptMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //  - name: \"defaultToPlaintext\"\n  //    match: {}\n  //    transport_socket:\n  //      name: envoy.transport_sockets.raw_buffer\n  //\n  // Connections to the endpoints whose metadata value under *envoy.transport_socket_match*\n  // having \"acceptMTLS\"/\"true\" key/value pair use the \"enableMTLS\" socket configuration.\n  //\n  // If a :ref:`socket match <envoy_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` with empty match\n  // criteria is provided, that always match any endpoint. For example, the \"defaultToPlaintext\"\n  // socket match in case above.\n  //\n  // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any\n  // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or\n  // *transport_socket* specified in this cluster.\n  //\n  // This field allows gradual and flexible transport socket configuration changes.\n  //\n  // The metadata of endpoints in EDS can indicate transport socket capabilities. For example,\n  // an endpoint's metadata can have two key value pairs as \"acceptMTLS\": \"true\",\n  // \"acceptPlaintext\": \"true\". While some other endpoints, only accepting plaintext traffic\n  // has \"acceptPlaintext\": \"true\" metadata information.\n  //\n  // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS\n  // traffic for endpoints with \"acceptMTLS\": \"true\", by adding a corresponding\n  // *TransportSocketMatch* in this field. Other client Envoys receive CDS without\n  // *transport_socket_match* set, and still send plain text traffic to the same cluster.\n  //\n  // This field can be used to specify custom transport socket configurations for health\n  // checks by adding matching key/value pairs in a health check's\n  // :ref:`transport socket match criteria <envoy_api_field_config.core.v3.HealthCheck.transport_socket_match_criteria>` field.\n  //\n  // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]\n  repeated TransportSocketMatch transport_socket_matches = 43;\n\n  // Supplies the name of the cluster which must be unique across all clusters.\n  // The cluster name is used when emitting\n  // :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name\n  // <envoy_api_field_config.cluster.v3.Cluster.alt_stat_name>` is not provided.\n  // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // An optional alternative to the cluster name to be used while emitting stats.\n  // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be\n  // confused with :ref:`Router Filter Header\n  // <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.\n  string alt_stat_name = 28;\n\n  oneof cluster_discovery_type {\n    // The :ref:`service discovery type <arch_overview_service_discovery_types>`\n    // to use for resolving the cluster.\n    DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}];\n\n    // The custom cluster type.\n    CustomClusterType cluster_type = 38;\n  }\n\n  // Configuration to use for EDS updates for the Cluster.\n  EdsClusterConfig eds_cluster_config = 3;\n\n  // The timeout for new network connections to hosts in the cluster.\n  google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}];\n\n  // Soft limit on size of the cluster’s connections read and write buffers. If\n  // unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The :ref:`load balancer type <arch_overview_load_balancing_types>` to use\n  // when picking a host in the cluster.\n  // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>` when implemented.]\n  LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}];\n\n  // Setting this is required for specifying members of\n  // :ref:`STATIC<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>` clusters.\n  // This field supersedes the *hosts* field in the v2 API.\n  //\n  // .. attention::\n  //\n  //   Setting this allows non-EDS cluster types to contain embedded EDS equivalent\n  //   :ref:`endpoint assignments<envoy_api_msg_config.endpoint.v3.ClusterLoadAssignment>`.\n  //\n  endpoint.v3.ClusterLoadAssignment load_assignment = 33;\n\n  // Optional :ref:`active health checking <arch_overview_health_checking>`\n  // configuration for the cluster. If no\n  // configuration is specified no health checking will be done and all cluster\n  // members will be considered healthy at all times.\n  repeated core.v3.HealthCheck health_checks = 8;\n\n  // Optional maximum requests for a single upstream connection. This parameter\n  // is respected by both the HTTP/1.1 and HTTP/2 connection pool\n  // implementations. If not specified, there is no limit. Setting this\n  // parameter to 1 will effectively disable keep alive.\n  google.protobuf.UInt32Value max_requests_per_connection = 9;\n\n  // Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.\n  CircuitBreakers circuit_breakers = 10;\n\n  // HTTP protocol options that are applied only to upstream HTTP connections.\n  // These options apply to all HTTP versions.\n  core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46;\n\n  // Additional options when handling HTTP requests upstream. These options will be applicable to\n  // both HTTP1 and HTTP2 requests.\n  core.v3.HttpProtocolOptions common_http_protocol_options = 29;\n\n  // Additional options when handling HTTP1 requests.\n  core.v3.Http1ProtocolOptions http_protocol_options = 13;\n\n  // Even if default HTTP2 protocol options are desired, this field must be\n  // set so that Envoy will assume that the upstream supports HTTP/2 when\n  // making new HTTP connection pool connections. Currently, Envoy only\n  // supports prior knowledge for upstream connections. Even if TLS is used\n  // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2\n  // connections to happen over plain text.\n  core.v3.Http2ProtocolOptions http2_protocol_options = 14\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Any> typed_extension_protocol_options = 36;\n\n  // If the DNS refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used as the cluster’s DNS refresh\n  // rate. The value configured must be at least 1ms. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  google.protobuf.Duration dns_refresh_rate = 16\n      [(validate.rules).duration = {gt {nanos: 1000000}}];\n\n  // If the DNS failure refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types\n  // other than :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>` this setting is\n  // ignored.\n  RefreshRate dns_failure_refresh_rate = 44;\n\n  // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true,\n  // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS\n  // resolution.\n  bool respect_dns_ttl = 39;\n\n  // The DNS IP address resolution policy. If this setting is not specified, the\n  // value defaults to\n  // :ref:`AUTO<envoy_api_enum_value_config.cluster.v3.Cluster.DnsLookupFamily.AUTO>`.\n  DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}];\n\n  // If DNS resolvers are specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used to specify the cluster’s dns resolvers.\n  // If this setting is not specified, the value defaults to the default\n  // resolver, which uses /etc/resolv.conf for configuration. For cluster types\n  // other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple's API only allows overriding DNS resolvers via system settings.\n  repeated core.v3.Address dns_resolvers = 18;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 45;\n\n  // If specified, outlier detection will be enabled for this upstream cluster.\n  // Each of the configuration values can be overridden via\n  // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.\n  OutlierDetection outlier_detection = 19;\n\n  // The interval for removing stale hosts from a cluster type\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.ORIGINAL_DST>`.\n  // Hosts are considered stale if they have not been used\n  // as upstream destinations during this interval. New hosts are added\n  // to original destination clusters on demand as new connections are\n  // redirected to Envoy, causing the number of hosts in the cluster to\n  // grow over time. Hosts that are not stale (they are actively used as\n  // destinations) are kept in the cluster, which allows connections to\n  // them remain open, saving the latency that would otherwise be spent\n  // on opening new connections. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.ORIGINAL_DST>`\n  // this setting is ignored.\n  google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}];\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This overrides any bind_config specified in the bootstrap proto.\n  // If the address and port are empty, no bind will be performed.\n  core.v3.BindConfig upstream_bind_config = 21;\n\n  // Configuration for load balancing subsetting.\n  LbSubsetConfig lb_subset_config = 22;\n\n  // Optional configuration for the load balancing algorithm selected by\n  // LbPolicy. Currently only\n  // :ref:`RING_HASH<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.RING_HASH>`,\n  // :ref:`MAGLEV<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.MAGLEV>` and\n  // :ref:`LEAST_REQUEST<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.LEAST_REQUEST>`\n  // has additional configuration options.\n  // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding\n  // LbPolicy will generate an error at runtime.\n  oneof lb_config {\n    // Optional configuration for the Ring Hash load balancing policy.\n    RingHashLbConfig ring_hash_lb_config = 23;\n\n    // Optional configuration for the Maglev load balancing policy.\n    MaglevLbConfig maglev_lb_config = 52;\n\n    // Optional configuration for the Original Destination load balancing policy.\n    OriginalDstLbConfig original_dst_lb_config = 34;\n\n    // Optional configuration for the LeastRequest load balancing policy.\n    LeastRequestLbConfig least_request_lb_config = 37;\n  }\n\n  // Common configuration for all load balancer implementations.\n  CommonLbConfig common_lb_config = 27;\n\n  // Optional custom transport socket implementation to use for upstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`UpstreamTlsContexts <envoy_api_msg_extensions.transport_sockets.tls.v3.UpstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v3.TransportSocket transport_socket = 24;\n\n  // The Metadata field can be used to provide additional information about the\n  // cluster. It can be used for stats, logging, and varying filter behavior.\n  // Fields should use reverse DNS notation to denote which entity within Envoy\n  // will need the information. For instance, if the metadata is intended for\n  // the Router filter, the filter name should be specified as *envoy.filters.http.router*.\n  core.v3.Metadata metadata = 25;\n\n  // Determines how Envoy selects the protocol used to speak to upstream hosts.\n  ClusterProtocolSelection protocol_selection = 26;\n\n  // Optional options for upstream connections.\n  UpstreamConnectionOptions upstream_connection_options = 30;\n\n  // If an upstream host becomes unhealthy (as determined by the configured health checks\n  // or outlier detection), immediately close all connections to the failed host.\n  //\n  // .. note::\n  //\n  //   This is currently only supported for connections created by tcp_proxy.\n  //\n  // .. note::\n  //\n  //   The current implementation of this feature closes all connections immediately when\n  //   the unhealthy status is detected. If there are a large number of connections open\n  //   to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of\n  //   time exclusively closing these connections, and not processing any other traffic.\n  bool close_connections_on_host_health_failure = 31;\n\n  // If set to true, Envoy will ignore the health value of a host when processing its removal\n  // from service discovery. This means that if active health checking is used, Envoy will *not*\n  // wait for the endpoint to go unhealthy before removing it.\n  bool ignore_health_on_host_removal = 32;\n\n  // An (optional) network filter chain, listed in the order the filters should be applied.\n  // The chain will be applied to all outgoing connections that Envoy makes to the upstream\n  // servers of this cluster.\n  repeated Filter filters = 40;\n\n  // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the\n  // :ref:`lb_policy<envoy_api_field_config.cluster.v3.Cluster.lb_policy>` field has the value\n  // :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>`.\n  LoadBalancingPolicy load_balancing_policy = 41;\n\n  // [#not-implemented-hide:]\n  // If present, tells the client where to send load reports via LRS. If not present, the\n  // client will fall back to a client-side default, which may be either (a) don't send any\n  // load reports or (b) send load reports for all clusters to a single default server\n  // (which may be configured in the bootstrap file).\n  //\n  // Note that if multiple clusters point to the same LRS server, the client may choose to\n  // create a separate stream for each cluster or it may choose to coalesce the data for\n  // multiple clusters onto a single stream. Either way, the client must make sure to send\n  // the data for any given cluster on no more than one stream.\n  //\n  // [#next-major-version: In the v3 API, we should consider restructuring this somehow,\n  // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation\n  // from the LRS stream here.]\n  core.v3.ConfigSource lrs_server = 42;\n\n  // If track_timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  //\n  // .. attention::\n  //\n  //   This field has been deprecated in favor of `timeout_budgets`, part of\n  //   :ref:`track_cluster_stats <envoy_api_field_config.cluster.v3.Cluster.track_cluster_stats>`.\n  bool track_timeout_budgets = 47 [deprecated = true];\n\n  // Optional customization and configuration of upstream connection pool, and upstream type.\n  //\n  // Currently this field only applies for HTTP traffic but is designed for eventual use for custom\n  // TCP upstreams.\n  //\n  // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream\n  // HTTP, using the http connection pool and the codec from `http2_protocol_options`\n  //\n  // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT\n  // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool.\n  //\n  // The default pool used is the generic connection pool which creates the HTTP upstream for most\n  // HTTP requests, and the TCP upstream if CONNECT termination is configured.\n  //\n  // If users desire custom connection pool or upstream behavior, for example terminating\n  // CONNECT only if a custom filter indicates it is appropriate, the custom factories\n  // can be registered and configured here.\n  core.v3.TypedExtensionConfig upstream_config = 48;\n\n  // Configuration to track optional cluster stats.\n  TrackClusterStats track_cluster_stats = 49;\n\n  // [#not-implemented-hide:]\n  // Prefetch configuration for this cluster.\n  PrefetchPolicy prefetch_policy = 50;\n\n  // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate\n  // connection pool for every downstream connection\n  bool connection_pool_per_downstream_connection = 51;\n}\n\n// [#not-implemented-hide:] Extensible load balancing policy configuration.\n//\n// Every LB policy defined via this mechanism will be identified via a unique name using reverse\n// DNS notation. If the policy needs configuration parameters, it must define a message for its\n// own configuration, which will be stored in the config field. The name of the policy will tell\n// clients which type of message they should expect to see in the config field.\n//\n// Note that there are cases where it is useful to be able to independently select LB policies\n// for choosing a locality and for choosing an endpoint within that locality. For example, a\n// given deployment may always use the same policy to choose the locality, but for choosing the\n// endpoint within the locality, some clusters may use weighted-round-robin, while others may\n// use some sort of session-based balancing.\n//\n// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a\n// child LB policy for each locality. For each request, the parent chooses the locality and then\n// delegates to the child policy for that locality to choose the endpoint within the locality.\n//\n// To facilitate this, the config message for the top-level LB policy may include a field of\n// type LoadBalancingPolicy that specifies the child policy.\nmessage LoadBalancingPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.LoadBalancingPolicy\";\n\n  message Policy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.LoadBalancingPolicy.Policy\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // Required. The name of the LB policy.\n    string name = 1;\n\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Each client will iterate over the list in order and stop at the first policy that it\n  // supports. This provides a mechanism for starting to use new LB policies that are not yet\n  // supported by all clients.\n  repeated Policy policies = 1;\n}\n\n// An extensible structure containing the address Envoy should bind to when\n// establishing upstream connections.\nmessage UpstreamBindConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.UpstreamBindConfig\";\n\n  // The address Envoy should bind to when establishing upstream connections.\n  core.v3.Address source_address = 1;\n}\n\nmessage UpstreamConnectionOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.UpstreamConnectionOptions\";\n\n  // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.\n  core.v3.TcpKeepalive tcp_keepalive = 1;\n}\n\nmessage TrackClusterStats {\n  // If timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  bool timeout_budgets = 1;\n\n  // If request_response_sizes is true, then the :ref:`histograms\n  // <config_cluster_manager_cluster_stats_request_response_sizes>`  tracking header and body sizes\n  // of requests and responses will be published.\n  bool request_response_sizes = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v3/filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"FilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Upstream filters]\n// Upstream filters apply to the connections to the upstream cluster hosts.\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.cluster.Filter\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any typed_config = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v3/outlier_detection.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"OutlierDetectionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Outlier detection]\n\n// See the :ref:`architecture overview <arch_overview_outlier_detection>` for\n// more information on outlier detection.\n// [#next-free-field: 21]\nmessage OutlierDetection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.cluster.OutlierDetection\";\n\n  // The number of consecutive 5xx responses or local origin errors that are mapped\n  // to 5xx error codes before a consecutive 5xx ejection\n  // occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_5xx = 1;\n\n  // The time interval between ejection analysis sweeps. This can result in\n  // both new ejections as well as hosts being returned to service. Defaults\n  // to 10000ms or 10s.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}];\n\n  // The base time that a host is ejected for. The real time is equal to the\n  // base time multiplied by the number of times the host has been ejected.\n  // Defaults to 30000ms or 30s.\n  google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];\n\n  // The maximum % of an upstream cluster that can be ejected due to outlier\n  // detection. Defaults to 10% but will eject at least one host regardless of the value.\n  google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive 5xx. This setting can be used to disable\n  // ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics. This setting can be used to\n  // disable ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}];\n\n  // The number of hosts in a cluster that must have enough request volume to\n  // detect success rate outliers. If the number of hosts is less than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for any host in the cluster. Defaults to 5.\n  google.protobuf.UInt32Value success_rate_minimum_hosts = 7;\n\n  // The minimum number of total requests that must be collected in one\n  // interval (as defined by the interval duration above) to include this host\n  // in success rate based outlier detection. If the volume is lower than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for that host. Defaults to 100.\n  google.protobuf.UInt32Value success_rate_request_volume = 8;\n\n  // This factor is used to determine the ejection threshold for success rate\n  // outlier ejection. The ejection threshold is the difference between the\n  // mean success rate, and the product of this factor and the standard\n  // deviation of the mean success rate: mean - (stdev *\n  // success_rate_stdev_factor). This factor is divided by a thousand to get a\n  // double. That is, if the desired factor is 1.9, the runtime value should\n  // be 1900. Defaults to 1900.\n  google.protobuf.UInt32Value success_rate_stdev_factor = 9;\n\n  // The number of consecutive gateway failures (502, 503, 504 status codes)\n  // before a consecutive gateway failure ejection occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_gateway_failure = 10;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive gateway failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // Determines whether to distinguish local origin failures from external errors. If set to true\n  // the following configuration parameters are taken into account:\n  // :ref:`consecutive_local_origin_failure<envoy_api_field_config.cluster.v3.OutlierDetection.consecutive_local_origin_failure>`,\n  // :ref:`enforcing_consecutive_local_origin_failure<envoy_api_field_config.cluster.v3.OutlierDetection.enforcing_consecutive_local_origin_failure>`\n  // and\n  // :ref:`enforcing_local_origin_success_rate<envoy_api_field_config.cluster.v3.OutlierDetection.enforcing_local_origin_success_rate>`.\n  // Defaults to false.\n  bool split_external_local_origin_errors = 12;\n\n  // The number of consecutive locally originated failures before ejection\n  // occurs. Defaults to 5. Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value consecutive_local_origin_failure = 13;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive locally originated failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics for locally originated errors.\n  // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The failure percentage to use when determining failure percentage-based outlier detection. If\n  // the failure percentage of a given host is greater than or equal to this value, it will be\n  // ejected. Defaults to 85.\n  google.protobuf.UInt32Value failure_percentage_threshold = 16\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // failure percentage statistics. This setting can be used to disable ejection or to ramp it up\n  // slowly. Defaults to 0.\n  //\n  // [#next-major-version: setting this without setting failure_percentage_threshold should be\n  // invalid in v4.]\n  google.protobuf.UInt32Value enforcing_failure_percentage = 17\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // local-origin failure percentage statistics. This setting can be used to disable ejection or to\n  // ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection.\n  // If the total number of hosts in the cluster is less than this value, failure percentage-based\n  // ejection will not be performed. Defaults to 5.\n  google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19;\n\n  // The minimum number of total requests that must be collected in one interval (as defined by the\n  // interval duration above) to perform failure percentage-based ejection for this host. If the\n  // volume is lower than this setting, failure percentage-based ejection will not be performed for\n  // this host. Defaults to 50.\n  google.protobuf.UInt32Value failure_percentage_request_volume = 20;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/cluster/v4alpha/circuit_breaker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"CircuitBreakerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Circuit breakers]\n\n// :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be\n// specified individually for each defined priority.\nmessage CircuitBreakers {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.CircuitBreakers\";\n\n  // A Thresholds defines CircuitBreaker settings for a\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`.\n  // [#next-free-field: 9]\n  message Thresholds {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.CircuitBreakers.Thresholds\";\n\n    message RetryBudget {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget\";\n\n      // Specifies the limit on concurrent retries as a percentage of the sum of active requests and\n      // active pending requests. For example, if there are 100 active requests and the\n      // budget_percent is set to 25, there may be 25 active retries.\n      //\n      // This parameter is optional. Defaults to 20%.\n      type.v3.Percent budget_percent = 1;\n\n      // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the\n      // number of active retries may never go below this number.\n      //\n      // This parameter is optional. Defaults to 3.\n      google.protobuf.UInt32Value min_retry_concurrency = 2;\n    }\n\n    // The :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`\n    // the specified CircuitBreaker settings apply to.\n    core.v4alpha.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // The maximum number of connections that Envoy will make to the upstream\n    // cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_connections = 2;\n\n    // The maximum number of pending requests that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_pending_requests = 3;\n\n    // The maximum number of parallel requests that Envoy will make to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_requests = 4;\n\n    // The maximum number of parallel retries that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 3.\n    google.protobuf.UInt32Value max_retries = 5;\n\n    // Specifies a limit on concurrent retries in relation to the number of active requests. This\n    // parameter is optional.\n    //\n    // .. note::\n    //\n    //    If this field is set, the retry budget will override any configured retry circuit\n    //    breaker.\n    RetryBudget retry_budget = 8;\n\n    // If track_remaining is true, then stats will be published that expose\n    // the number of resources remaining until the circuit breakers open. If\n    // not specified, the default is false.\n    //\n    // .. note::\n    //\n    //    If a retry budget is used in lieu of the max_retries circuit breaker,\n    //    the remaining retry resources remaining will not be tracked.\n    bool track_remaining = 6;\n\n    // The maximum number of connection pools per cluster that Envoy will concurrently support at\n    // once. If not specified, the default is unlimited. Set this for clusters which create a\n    // large number of connection pools. See\n    // :ref:`Circuit Breaking <arch_overview_circuit_break_cluster_maximum_connection_pools>` for\n    // more details.\n    google.protobuf.UInt32Value max_connection_pools = 7;\n  }\n\n  // If multiple :ref:`Thresholds<envoy_api_msg_config.cluster.v4alpha.CircuitBreakers.Thresholds>`\n  // are defined with the same :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`,\n  // the first one in the list is used. If no Thresholds is defined for a given\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`, the default values\n  // are used.\n  repeated Thresholds thresholds = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v4alpha/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"envoy/config/cluster/v4alpha/circuit_breaker.proto\";\nimport \"envoy/config/cluster/v4alpha/filter.proto\";\nimport \"envoy/config/cluster/v4alpha/outlier_detection.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/health_check.proto\";\nimport \"envoy/config/core/v4alpha/protocol.proto\";\nimport \"envoy/config/endpoint/v3/endpoint.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Cluster configuration]\n\n// Cluster list collections. Entries are *Cluster* resources or references.\n// [#not-implemented-hide:]\nmessage ClusterCollection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.ClusterCollection\";\n\n  udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// Configuration for a single upstream cluster.\n// [#next-free-field: 53]\nmessage Cluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.cluster.v3.Cluster\";\n\n  // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`\n  // for an explanation on each type.\n  enum DiscoveryType {\n    // Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`\n    // for an explanation.\n    STATIC = 0;\n\n    // Refer to the :ref:`strict DNS discovery\n    // type<arch_overview_service_discovery_types_strict_dns>`\n    // for an explanation.\n    STRICT_DNS = 1;\n\n    // Refer to the :ref:`logical DNS discovery\n    // type<arch_overview_service_discovery_types_logical_dns>`\n    // for an explanation.\n    LOGICAL_DNS = 2;\n\n    // Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`\n    // for an explanation.\n    EDS = 3;\n\n    // Refer to the :ref:`original destination discovery\n    // type<arch_overview_service_discovery_types_original_destination>`\n    // for an explanation.\n    ORIGINAL_DST = 4;\n  }\n\n  // Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture\n  // overview section for information on each type.\n  enum LbPolicy {\n    reserved 4;\n\n    reserved \"ORIGINAL_DST_LB\";\n\n    // Refer to the :ref:`round robin load balancing\n    // policy<arch_overview_load_balancing_types_round_robin>`\n    // for an explanation.\n    ROUND_ROBIN = 0;\n\n    // Refer to the :ref:`least request load balancing\n    // policy<arch_overview_load_balancing_types_least_request>`\n    // for an explanation.\n    LEAST_REQUEST = 1;\n\n    // Refer to the :ref:`ring hash load balancing\n    // policy<arch_overview_load_balancing_types_ring_hash>`\n    // for an explanation.\n    RING_HASH = 2;\n\n    // Refer to the :ref:`random load balancing\n    // policy<arch_overview_load_balancing_types_random>`\n    // for an explanation.\n    RANDOM = 3;\n\n    // Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`\n    // for an explanation.\n    MAGLEV = 5;\n\n    // This load balancer type must be specified if the configured cluster provides a cluster\n    // specific load balancer. Consult the configured cluster's documentation for whether to set\n    // this option or not.\n    CLUSTER_PROVIDED = 6;\n\n    // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy\n    // <envoy_api_field_config.cluster.v4alpha.Cluster.load_balancing_policy>` field to determine the LB policy.\n    // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field\n    // and instead using the new load_balancing_policy field as the one and only mechanism for\n    // configuring this.]\n    LOAD_BALANCING_POLICY_CONFIG = 7;\n  }\n\n  // When V4_ONLY is selected, the DNS resolver will only perform a lookup for\n  // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will\n  // only perform a lookup for addresses in the IPv6 family. If AUTO is\n  // specified, the DNS resolver will first perform a lookup for addresses in\n  // the IPv6 family and fallback to a lookup for addresses in the IPv4 family.\n  // For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this setting is\n  // ignored.\n  enum DnsLookupFamily {\n    AUTO = 0;\n    V4_ONLY = 1;\n    V6_ONLY = 2;\n  }\n\n  enum ClusterProtocolSelection {\n    // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).\n    // If :ref:`http2_protocol_options <envoy_api_field_config.cluster.v4alpha.Cluster.http2_protocol_options>` are\n    // present, HTTP2 will be used, otherwise HTTP1.1 will be used.\n    USE_CONFIGURED_PROTOCOL = 0;\n\n    // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.\n    USE_DOWNSTREAM_PROTOCOL = 1;\n  }\n\n  // TransportSocketMatch specifies what transport socket config will be used\n  // when the match conditions are satisfied.\n  message TransportSocketMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.TransportSocketMatch\";\n\n    // The name of the match, used in stats generation.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Optional endpoint metadata match criteria.\n    // The connection to the endpoint with metadata matching what is set in this field\n    // will use the transport socket configuration specified here.\n    // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match\n    // against the values specified in this field.\n    google.protobuf.Struct match = 2;\n\n    // The configuration of the transport socket.\n    core.v4alpha.TransportSocket transport_socket = 3;\n  }\n\n  // Extended cluster type.\n  message CustomClusterType {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.CustomClusterType\";\n\n    // The type of the cluster to instantiate. The name must match a supported cluster type.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Cluster specific configuration which depends on the cluster being instantiated.\n    // See the supported cluster for further documentation.\n    google.protobuf.Any typed_config = 2;\n  }\n\n  // Only valid when discovery type is EDS.\n  message EdsClusterConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.EdsClusterConfig\";\n\n    // Configuration for the source of EDS updates for this Cluster.\n    core.v4alpha.ConfigSource eds_config = 1;\n\n    oneof name_specifier {\n      // Optional alternative to cluster name to present to EDS. This does not\n      // have the same restrictions as cluster name, i.e. it may be arbitrary\n      // length.\n      string service_name = 2;\n\n      // Resource locator for EDS. This is mutually exclusive to *service_name*.\n      // [#not-implemented-hide:]\n      udpa.core.v1.ResourceLocator eds_resource_locator = 3;\n    }\n  }\n\n  // Optionally divide the endpoints in this cluster into subsets defined by\n  // endpoint metadata and selected by route and weighted cluster metadata.\n  // [#next-free-field: 8]\n  message LbSubsetConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.LbSubsetConfig\";\n\n    // If NO_FALLBACK is selected, a result\n    // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,\n    // any cluster endpoint may be returned (subject to policy, health checks,\n    // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the\n    // endpoints matching the values from the default_subset field.\n    enum LbSubsetFallbackPolicy {\n      NO_FALLBACK = 0;\n      ANY_ENDPOINT = 1;\n      DEFAULT_SUBSET = 2;\n    }\n\n    // Specifications for subsets.\n    message LbSubsetSelector {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector\";\n\n      // Allows to override top level fallback policy per selector.\n      enum LbSubsetSelectorFallbackPolicy {\n        // If NOT_DEFINED top level config fallback policy is used instead.\n        NOT_DEFINED = 0;\n\n        // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.\n        NO_FALLBACK = 1;\n\n        // If ANY_ENDPOINT is selected, any cluster endpoint may be returned\n        // (subject to policy, health checks, etc).\n        ANY_ENDPOINT = 2;\n\n        // If DEFAULT_SUBSET is selected, load balancing is performed over the\n        // endpoints matching the values from the default_subset field.\n        DEFAULT_SUBSET = 3;\n\n        // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata\n        // keys reduced to\n        // :ref:`fallback_keys_subset<envoy_api_field_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_keys_subset>`.\n        // It allows for a fallback to a different, less specific selector if some of the keys of\n        // the selector are considered optional.\n        KEYS_SUBSET = 4;\n      }\n\n      // List of keys to match with the weighted cluster metadata.\n      repeated string keys = 1;\n\n      // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for\n      // choosing a host, but updating hosts is faster, especially for large numbers of hosts.\n      //\n      // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy.\n      //\n      // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains\n      // only one entry.\n      //\n      // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys`\n      // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge\n      // :ref:`lb_subsets_single_host_per_subset_duplicate<config_cluster_manager_cluster_stats_subset_lb>` indicates how many duplicates are\n      // present in the current configuration.\n      bool single_host_per_subset = 4;\n\n      // The behavior used when no endpoint subset matches the selected route's\n      // metadata.\n      LbSubsetSelectorFallbackPolicy fallback_policy = 2\n          [(validate.rules).enum = {defined_only: true}];\n\n      // Subset of\n      // :ref:`keys<envoy_api_field_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by\n      // :ref:`KEYS_SUBSET<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`\n      // fallback policy.\n      // It has to be a non empty list if KEYS_SUBSET fallback policy is selected.\n      // For any other fallback policy the parameter is not used and should not be set.\n      // Only values also present in\n      // :ref:`keys<envoy_api_field_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but\n      // `fallback_keys_subset` cannot be equal to `keys`.\n      repeated string fallback_keys_subset = 3;\n    }\n\n    // The behavior used when no endpoint subset matches the selected route's\n    // metadata. The value defaults to\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Specifies the default subset of endpoints used during fallback if\n    // fallback_policy is\n    // :ref:`DEFAULT_SUBSET<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.\n    // Each field in default_subset is\n    // compared to the matching LbEndpoint.Metadata under the *envoy.lb*\n    // namespace. It is valid for no hosts to match, in which case the behavior\n    // is the same as a fallback_policy of\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    google.protobuf.Struct default_subset = 2;\n\n    // For each entry, LbEndpoint.Metadata's\n    // *envoy.lb* namespace is traversed and a subset is created for each unique\n    // combination of key and value. For example:\n    //\n    // .. code-block:: json\n    //\n    //   { \"subset_selectors\": [\n    //       { \"keys\": [ \"version\" ] },\n    //       { \"keys\": [ \"stage\", \"hardware_type\" ] }\n    //   ]}\n    //\n    // A subset is matched when the metadata from the selected route and\n    // weighted cluster contains the same keys and values as the subset's\n    // metadata. The same host may appear in multiple subsets.\n    repeated LbSubsetSelector subset_selectors = 3;\n\n    // If true, routing to subsets will take into account the localities and locality weights of the\n    // endpoints when making the routing decision.\n    //\n    // There are some potential pitfalls associated with enabling this feature, as the resulting\n    // traffic split after applying both a subset match and locality weights might be undesirable.\n    //\n    // Consider for example a situation in which you have 50/50 split across two localities X/Y\n    // which have 100 hosts each without subsetting. If the subset LB results in X having only 1\n    // host selected but Y having 100, then a lot more load is being dumped on the single host in X\n    // than originally anticipated in the load balancing assignment delivered via EDS.\n    bool locality_weight_aware = 4;\n\n    // When used with locality_weight_aware, scales the weight of each locality by the ratio\n    // of hosts in the subset vs hosts in the original subset. This aims to even out the load\n    // going to an individual locality if said locality is disproportionately affected by the\n    // subset predicate.\n    bool scale_locality_weight = 5;\n\n    // If true, when a fallback policy is configured and its corresponding subset fails to find\n    // a host this will cause any host to be selected instead.\n    //\n    // This is useful when using the default subset as the fallback policy, given the default\n    // subset might become empty. With this option enabled, if that happens the LB will attempt\n    // to select a host from the entire cluster.\n    bool panic_mode_any = 6;\n\n    // If true, metadata specified for a metadata key will be matched against the corresponding\n    // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value\n    // and any of the elements in the list matches the criteria.\n    bool list_as_any = 7;\n  }\n\n  // Specific configuration for the LeastRequest load balancing policy.\n  message LeastRequestLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.LeastRequestLbConfig\";\n\n    // The number of random healthy hosts from which the host with the fewest active requests will\n    // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.\n    google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];\n\n    // The following formula is used to calculate the dynamic weights when hosts have different load\n    // balancing weights:\n    //\n    // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`\n    //\n    // The larger the active request bias is, the more aggressively active requests will lower the\n    // effective weight when all host weights are not equal.\n    //\n    // `active_request_bias` must be greater than or equal to 0.0.\n    //\n    // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number\n    // of active requests at the time it picks a host and behaves like the Round Robin Load\n    // Balancer.\n    //\n    // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing\n    // weight by the number of active requests at the time it does a pick.\n    //\n    // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's\n    // host sets changes, e.g., whenever there is a host membership update or a host load balancing\n    // weight change.\n    //\n    // .. note::\n    //   This setting only takes effect if all host weights are not equal.\n    core.v4alpha.RuntimeDouble active_request_bias = 2;\n  }\n\n  // Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`\n  // load balancing policy.\n  message RingHashLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.RingHashLbConfig\";\n\n    // The hash function used to hash hosts onto the ketama ring.\n    enum HashFunction {\n      // Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.\n      XX_HASH = 0;\n\n      // Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with\n      // std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled\n      // on Linux and not macOS.\n      MURMUR_HASH_2 = 1;\n    }\n\n    reserved 2;\n\n    // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each\n    // provided host) the better the request distribution will reflect the desired weights. Defaults\n    // to 1024 entries, and limited to 8M entries. See also\n    // :ref:`maximum_ring_size<envoy_api_field_config.cluster.v4alpha.Cluster.RingHashLbConfig.maximum_ring_size>`.\n    google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}];\n\n    // The hash function used to hash hosts onto the ketama ring. The value defaults to\n    // :ref:`XX_HASH<envoy_api_enum_value_config.cluster.v4alpha.Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.\n    HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}];\n\n    // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered\n    // to further constrain resource use. See also\n    // :ref:`minimum_ring_size<envoy_api_field_config.cluster.v4alpha.Cluster.RingHashLbConfig.minimum_ring_size>`.\n    google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}];\n  }\n\n  // Specific configuration for the :ref:`Maglev<arch_overview_load_balancing_types_maglev>`\n  // load balancing policy.\n  message MaglevLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.MaglevLbConfig\";\n\n    // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee.\n    // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same\n    // upstream as it was before. Increasing the table size reduces the amount of disruption.\n    // The table size must be prime number. If it is not specified, the default is 65537.\n    google.protobuf.UInt64Value table_size = 1;\n  }\n\n  // Specific configuration for the\n  // :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\n  // load balancing policy.\n  message OriginalDstLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.OriginalDstLbConfig\";\n\n    // When true, :ref:`x-envoy-original-dst-host\n    // <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination\n    // address.\n    //\n    // .. attention::\n    //\n    //   This header isn't sanitized by default, so enabling this feature allows HTTP clients to\n    //   route traffic to arbitrary hosts and/or ports, which may have serious security\n    //   consequences.\n    bool use_http_header = 1;\n  }\n\n  // Common configuration for all load balancer implementations.\n  // [#next-free-field: 8]\n  message CommonLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.CommonLbConfig\";\n\n    // Configuration for :ref:`zone aware routing\n    // <arch_overview_load_balancing_zone_aware_routing>`.\n    message ZoneAwareLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig\";\n\n      // Configures percentage of requests that will be considered for zone aware routing\n      // if zone aware routing is configured. If not specified, the default is 100%.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      type.v3.Percent routing_enabled = 1;\n\n      // Configures minimum upstream cluster size required for zone aware routing\n      // If upstream cluster size is less than specified, zone aware routing is not performed\n      // even if zone aware routing is configured. If not specified, the default is 6.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      google.protobuf.UInt64Value min_cluster_size = 2;\n\n      // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic\n      // mode<arch_overview_load_balancing_panic_threshold>`. Instead, the cluster will fail all\n      // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a\n      // failing service.\n      bool fail_traffic_on_panic = 3;\n    }\n\n    // Configuration for :ref:`locality weighted load balancing\n    // <arch_overview_load_balancing_locality_weighted_lb>`\n    message LocalityWeightedLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig\";\n    }\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    message ConsistentHashingLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig\";\n\n      // If set to `true`, the cluster will use hostname instead of the resolved\n      // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.\n      bool use_hostname_for_hashing = 1;\n\n      // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150\n      // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster.\n      // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200.\n      // Minimum is 100.\n      //\n      // Applies to both Ring Hash and Maglev load balancers.\n      //\n      // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified\n      // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests\n      // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing\n      // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify\n      // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the\n      // cascading overflow effect when choosing the next host in the ring/table).\n      //\n      // If weights are specified on the hosts, they are respected.\n      //\n      // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts\n      // being probed, so use a higher value if you require better performance.\n      google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}];\n    }\n\n    // Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.\n    // If not specified, the default is 50%.\n    // To disable panic mode, set to 0%.\n    //\n    // .. note::\n    //   The specified percent will be truncated to the nearest 1%.\n    type.v3.Percent healthy_panic_threshold = 1;\n\n    oneof locality_config_specifier {\n      ZoneAwareLbConfig zone_aware_lb_config = 2;\n\n      LocalityWeightedLbConfig locality_weighted_lb_config = 3;\n    }\n\n    // If set, all health check/weight/metadata updates that happen within this duration will be\n    // merged and delivered in one shot when the duration expires. The start of the duration is when\n    // the first update happens. This is useful for big clusters, with potentially noisy deploys\n    // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes\n    // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new\n    // cluster). Please always keep in mind that the use of sandbox technologies may change this\n    // behavior.\n    //\n    // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge\n    // window to 0.\n    //\n    // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is\n    // because merging those updates isn't currently safe. See\n    // https://github.com/envoyproxy/envoy/pull/3941.\n    google.protobuf.Duration update_merge_window = 4;\n\n    // If set to true, Envoy will not consider new hosts when computing load balancing weights until\n    // they have been health checked for the first time. This will have no effect unless\n    // active health checking is also configured.\n    //\n    // Ignoring a host means that for any load balancing calculations that adjust weights based\n    // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and\n    // panic mode) Envoy will exclude these hosts in the denominator.\n    //\n    // For example, with hosts in two priorities P0 and P1, where P0 looks like\n    // {healthy, unhealthy (new), unhealthy (new)}\n    // and where P1 looks like\n    // {healthy, healthy}\n    // all traffic will still hit P0, as 1 / (3 - 2) = 1.\n    //\n    // Enabling this will allow scaling up the number of hosts for a given cluster without entering\n    // panic mode or triggering priority spillover, assuming the hosts pass the first health check.\n    //\n    // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not\n    // contribute to the calculation when deciding whether panic mode is enabled or not.\n    bool ignore_new_hosts_until_first_hc = 5;\n\n    // If set to `true`, the cluster manager will drain all existing\n    // connections to upstream hosts whenever hosts are added or removed from the cluster.\n    bool close_connections_on_host_set_change = 6;\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    ConsistentHashingLbConfig consistent_hashing_lb_config = 7;\n  }\n\n  message RefreshRate {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.RefreshRate\";\n\n    // Specifies the base interval between refreshes. This parameter is required and must be greater\n    // than zero and less than\n    // :ref:`max_interval <envoy_api_field_config.cluster.v4alpha.Cluster.RefreshRate.max_interval>`.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {nanos: 1000000}\n    }];\n\n    // Specifies the maximum interval between refreshes. This parameter is optional, but must be\n    // greater than or equal to the\n    // :ref:`base_interval <envoy_api_field_config.cluster.v4alpha.Cluster.RefreshRate.base_interval>`  if set. The default\n    // is 10 times the :ref:`base_interval <envoy_api_field_config.cluster.v4alpha.Cluster.RefreshRate.base_interval>`.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}];\n  }\n\n  // [#not-implemented-hide:]\n  message PrefetchPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.PrefetchPolicy\";\n\n    // Indicates how many streams (rounded up) can be anticipated per-upstream for each\n    // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching\n    // will only be done if the upstream is healthy.\n    //\n    // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be\n    // established, one for the new incoming stream, and one for a presumed follow-up stream. For\n    // HTTP/2, only one connection would be established by default as one connection can\n    // serve both the original and presumed follow-up stream.\n    //\n    // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100\n    // active streams, there would be 100 connections in use, and 50 connections prefetched.\n    // This might be a useful value for something like short lived single-use connections,\n    // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection\n    // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP\n    // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more\n    // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue\n    // in case of unexpected disconnects where the connection could not be reused.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight. This means in steady state if a connection is torn down,\n    // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be\n    // prefetched.\n    //\n    // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can\n    // harm latency more than the prefetching helps.\n    google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n\n    // Indicates how many many streams (rounded up) can be anticipated across a cluster for each\n    // stream, useful for low QPS services. This is currently supported for a subset of\n    // deterministic non-hash-based load-balancing algorithms (weighted round robin, random).\n    // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a\n    // cluster, doing best effort predictions of what upstream would be picked next and\n    // pre-establishing a connection.\n    //\n    // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first\n    // incoming stream, 2 connections will be prefetched - one to the first upstream for this\n    // cluster, one to the second on the assumption there will be a follow-up stream.\n    //\n    // Prefetching will be limited to one prefetch per configured upstream in the cluster.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight, so during warm up and in steady state if a connection\n    // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for\n    // connection establishment.\n    //\n    // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met,\n    // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream.\n    // TODO(alyssawilk) per LB docs and LB overview docs when unhiding.\n    google.protobuf.DoubleValue predictive_prefetch_ratio = 2\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n  }\n\n  reserved 12, 15, 7, 11, 35, 47;\n\n  reserved \"hosts\", \"tls_context\", \"extension_protocol_options\", \"track_timeout_budgets\";\n\n  // Configuration to use different transport sockets for different endpoints.\n  // The entry of *envoy.transport_socket_match* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`\n  // is used to match against the transport sockets as they appear in the list. The first\n  // :ref:`match <envoy_api_msg_config.cluster.v4alpha.Cluster.TransportSocketMatch>` is used.\n  // For example, with the following match\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"enableMTLS\"\n  //    match:\n  //      acceptMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //  - name: \"defaultToPlaintext\"\n  //    match: {}\n  //    transport_socket:\n  //      name: envoy.transport_sockets.raw_buffer\n  //\n  // Connections to the endpoints whose metadata value under *envoy.transport_socket_match*\n  // having \"acceptMTLS\"/\"true\" key/value pair use the \"enableMTLS\" socket configuration.\n  //\n  // If a :ref:`socket match <envoy_api_msg_config.cluster.v4alpha.Cluster.TransportSocketMatch>` with empty match\n  // criteria is provided, that always match any endpoint. For example, the \"defaultToPlaintext\"\n  // socket match in case above.\n  //\n  // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any\n  // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or\n  // *transport_socket* specified in this cluster.\n  //\n  // This field allows gradual and flexible transport socket configuration changes.\n  //\n  // The metadata of endpoints in EDS can indicate transport socket capabilities. For example,\n  // an endpoint's metadata can have two key value pairs as \"acceptMTLS\": \"true\",\n  // \"acceptPlaintext\": \"true\". While some other endpoints, only accepting plaintext traffic\n  // has \"acceptPlaintext\": \"true\" metadata information.\n  //\n  // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS\n  // traffic for endpoints with \"acceptMTLS\": \"true\", by adding a corresponding\n  // *TransportSocketMatch* in this field. Other client Envoys receive CDS without\n  // *transport_socket_match* set, and still send plain text traffic to the same cluster.\n  //\n  // This field can be used to specify custom transport socket configurations for health\n  // checks by adding matching key/value pairs in a health check's\n  // :ref:`transport socket match criteria <envoy_api_field_config.core.v4alpha.HealthCheck.transport_socket_match_criteria>` field.\n  //\n  // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]\n  repeated TransportSocketMatch transport_socket_matches = 43;\n\n  // Supplies the name of the cluster which must be unique across all clusters.\n  // The cluster name is used when emitting\n  // :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name\n  // <envoy_api_field_config.cluster.v4alpha.Cluster.alt_stat_name>` is not provided.\n  // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // An optional alternative to the cluster name to be used while emitting stats.\n  // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be\n  // confused with :ref:`Router Filter Header\n  // <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.\n  string alt_stat_name = 28;\n\n  oneof cluster_discovery_type {\n    // The :ref:`service discovery type <arch_overview_service_discovery_types>`\n    // to use for resolving the cluster.\n    DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}];\n\n    // The custom cluster type.\n    CustomClusterType cluster_type = 38;\n  }\n\n  // Configuration to use for EDS updates for the Cluster.\n  EdsClusterConfig eds_cluster_config = 3;\n\n  // The timeout for new network connections to hosts in the cluster.\n  google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}];\n\n  // Soft limit on size of the cluster’s connections read and write buffers. If\n  // unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The :ref:`load balancer type <arch_overview_load_balancing_types>` to use\n  // when picking a host in the cluster.\n  // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>` when implemented.]\n  LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}];\n\n  // Setting this is required for specifying members of\n  // :ref:`STATIC<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>` clusters.\n  // This field supersedes the *hosts* field in the v2 API.\n  //\n  // .. attention::\n  //\n  //   Setting this allows non-EDS cluster types to contain embedded EDS equivalent\n  //   :ref:`endpoint assignments<envoy_api_msg_config.endpoint.v3.ClusterLoadAssignment>`.\n  //\n  endpoint.v3.ClusterLoadAssignment load_assignment = 33;\n\n  // Optional :ref:`active health checking <arch_overview_health_checking>`\n  // configuration for the cluster. If no\n  // configuration is specified no health checking will be done and all cluster\n  // members will be considered healthy at all times.\n  repeated core.v4alpha.HealthCheck health_checks = 8;\n\n  // Optional maximum requests for a single upstream connection. This parameter\n  // is respected by both the HTTP/1.1 and HTTP/2 connection pool\n  // implementations. If not specified, there is no limit. Setting this\n  // parameter to 1 will effectively disable keep alive.\n  google.protobuf.UInt32Value max_requests_per_connection = 9;\n\n  // Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.\n  CircuitBreakers circuit_breakers = 10;\n\n  // HTTP protocol options that are applied only to upstream HTTP connections.\n  // These options apply to all HTTP versions.\n  core.v4alpha.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46;\n\n  // Additional options when handling HTTP requests upstream. These options will be applicable to\n  // both HTTP1 and HTTP2 requests.\n  core.v4alpha.HttpProtocolOptions common_http_protocol_options = 29;\n\n  // Additional options when handling HTTP1 requests.\n  core.v4alpha.Http1ProtocolOptions http_protocol_options = 13;\n\n  // Even if default HTTP2 protocol options are desired, this field must be\n  // set so that Envoy will assume that the upstream supports HTTP/2 when\n  // making new HTTP connection pool connections. Currently, Envoy only\n  // supports prior knowledge for upstream connections. Even if TLS is used\n  // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2\n  // connections to happen over plain text.\n  core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Any> typed_extension_protocol_options = 36;\n\n  // If the DNS refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used as the cluster’s DNS refresh\n  // rate. The value configured must be at least 1ms. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  google.protobuf.Duration dns_refresh_rate = 16\n      [(validate.rules).duration = {gt {nanos: 1000000}}];\n\n  // If the DNS failure refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types\n  // other than :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>` this setting is\n  // ignored.\n  RefreshRate dns_failure_refresh_rate = 44;\n\n  // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true,\n  // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS\n  // resolution.\n  bool respect_dns_ttl = 39;\n\n  // The DNS IP address resolution policy. If this setting is not specified, the\n  // value defaults to\n  // :ref:`AUTO<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DnsLookupFamily.AUTO>`.\n  DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}];\n\n  // If DNS resolvers are specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used to specify the cluster’s dns resolvers.\n  // If this setting is not specified, the value defaults to the default\n  // resolver, which uses /etc/resolv.conf for configuration. For cluster types\n  // other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple's API only allows overriding DNS resolvers via system settings.\n  repeated core.v4alpha.Address dns_resolvers = 18;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 45;\n\n  // If specified, outlier detection will be enabled for this upstream cluster.\n  // Each of the configuration values can be overridden via\n  // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.\n  OutlierDetection outlier_detection = 19;\n\n  // The interval for removing stale hosts from a cluster type\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.ORIGINAL_DST>`.\n  // Hosts are considered stale if they have not been used\n  // as upstream destinations during this interval. New hosts are added\n  // to original destination clusters on demand as new connections are\n  // redirected to Envoy, causing the number of hosts in the cluster to\n  // grow over time. Hosts that are not stale (they are actively used as\n  // destinations) are kept in the cluster, which allows connections to\n  // them remain open, saving the latency that would otherwise be spent\n  // on opening new connections. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.ORIGINAL_DST>`\n  // this setting is ignored.\n  google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}];\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This overrides any bind_config specified in the bootstrap proto.\n  // If the address and port are empty, no bind will be performed.\n  core.v4alpha.BindConfig upstream_bind_config = 21;\n\n  // Configuration for load balancing subsetting.\n  LbSubsetConfig lb_subset_config = 22;\n\n  // Optional configuration for the load balancing algorithm selected by\n  // LbPolicy. Currently only\n  // :ref:`RING_HASH<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.RING_HASH>`,\n  // :ref:`MAGLEV<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.MAGLEV>` and\n  // :ref:`LEAST_REQUEST<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.LEAST_REQUEST>`\n  // has additional configuration options.\n  // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding\n  // LbPolicy will generate an error at runtime.\n  oneof lb_config {\n    // Optional configuration for the Ring Hash load balancing policy.\n    RingHashLbConfig ring_hash_lb_config = 23;\n\n    // Optional configuration for the Maglev load balancing policy.\n    MaglevLbConfig maglev_lb_config = 52;\n\n    // Optional configuration for the Original Destination load balancing policy.\n    OriginalDstLbConfig original_dst_lb_config = 34;\n\n    // Optional configuration for the LeastRequest load balancing policy.\n    LeastRequestLbConfig least_request_lb_config = 37;\n  }\n\n  // Common configuration for all load balancer implementations.\n  CommonLbConfig common_lb_config = 27;\n\n  // Optional custom transport socket implementation to use for upstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`UpstreamTlsContexts <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.UpstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v4alpha.TransportSocket transport_socket = 24;\n\n  // The Metadata field can be used to provide additional information about the\n  // cluster. It can be used for stats, logging, and varying filter behavior.\n  // Fields should use reverse DNS notation to denote which entity within Envoy\n  // will need the information. For instance, if the metadata is intended for\n  // the Router filter, the filter name should be specified as *envoy.filters.http.router*.\n  core.v4alpha.Metadata metadata = 25;\n\n  // Determines how Envoy selects the protocol used to speak to upstream hosts.\n  ClusterProtocolSelection protocol_selection = 26;\n\n  // Optional options for upstream connections.\n  UpstreamConnectionOptions upstream_connection_options = 30;\n\n  // If an upstream host becomes unhealthy (as determined by the configured health checks\n  // or outlier detection), immediately close all connections to the failed host.\n  //\n  // .. note::\n  //\n  //   This is currently only supported for connections created by tcp_proxy.\n  //\n  // .. note::\n  //\n  //   The current implementation of this feature closes all connections immediately when\n  //   the unhealthy status is detected. If there are a large number of connections open\n  //   to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of\n  //   time exclusively closing these connections, and not processing any other traffic.\n  bool close_connections_on_host_health_failure = 31;\n\n  // If set to true, Envoy will ignore the health value of a host when processing its removal\n  // from service discovery. This means that if active health checking is used, Envoy will *not*\n  // wait for the endpoint to go unhealthy before removing it.\n  bool ignore_health_on_host_removal = 32;\n\n  // An (optional) network filter chain, listed in the order the filters should be applied.\n  // The chain will be applied to all outgoing connections that Envoy makes to the upstream\n  // servers of this cluster.\n  repeated Filter filters = 40;\n\n  // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the\n  // :ref:`lb_policy<envoy_api_field_config.cluster.v4alpha.Cluster.lb_policy>` field has the value\n  // :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>`.\n  LoadBalancingPolicy load_balancing_policy = 41;\n\n  // [#not-implemented-hide:]\n  // If present, tells the client where to send load reports via LRS. If not present, the\n  // client will fall back to a client-side default, which may be either (a) don't send any\n  // load reports or (b) send load reports for all clusters to a single default server\n  // (which may be configured in the bootstrap file).\n  //\n  // Note that if multiple clusters point to the same LRS server, the client may choose to\n  // create a separate stream for each cluster or it may choose to coalesce the data for\n  // multiple clusters onto a single stream. Either way, the client must make sure to send\n  // the data for any given cluster on no more than one stream.\n  //\n  // [#next-major-version: In the v3 API, we should consider restructuring this somehow,\n  // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation\n  // from the LRS stream here.]\n  core.v4alpha.ConfigSource lrs_server = 42;\n\n  // Optional customization and configuration of upstream connection pool, and upstream type.\n  //\n  // Currently this field only applies for HTTP traffic but is designed for eventual use for custom\n  // TCP upstreams.\n  //\n  // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream\n  // HTTP, using the http connection pool and the codec from `http2_protocol_options`\n  //\n  // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT\n  // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool.\n  //\n  // The default pool used is the generic connection pool which creates the HTTP upstream for most\n  // HTTP requests, and the TCP upstream if CONNECT termination is configured.\n  //\n  // If users desire custom connection pool or upstream behavior, for example terminating\n  // CONNECT only if a custom filter indicates it is appropriate, the custom factories\n  // can be registered and configured here.\n  core.v4alpha.TypedExtensionConfig upstream_config = 48;\n\n  // Configuration to track optional cluster stats.\n  TrackClusterStats track_cluster_stats = 49;\n\n  // [#not-implemented-hide:]\n  // Prefetch configuration for this cluster.\n  PrefetchPolicy prefetch_policy = 50;\n\n  // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate\n  // connection pool for every downstream connection\n  bool connection_pool_per_downstream_connection = 51;\n}\n\n// [#not-implemented-hide:] Extensible load balancing policy configuration.\n//\n// Every LB policy defined via this mechanism will be identified via a unique name using reverse\n// DNS notation. If the policy needs configuration parameters, it must define a message for its\n// own configuration, which will be stored in the config field. The name of the policy will tell\n// clients which type of message they should expect to see in the config field.\n//\n// Note that there are cases where it is useful to be able to independently select LB policies\n// for choosing a locality and for choosing an endpoint within that locality. For example, a\n// given deployment may always use the same policy to choose the locality, but for choosing the\n// endpoint within the locality, some clusters may use weighted-round-robin, while others may\n// use some sort of session-based balancing.\n//\n// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a\n// child LB policy for each locality. For each request, the parent chooses the locality and then\n// delegates to the child policy for that locality to choose the endpoint within the locality.\n//\n// To facilitate this, the config message for the top-level LB policy may include a field of\n// type LoadBalancingPolicy that specifies the child policy.\nmessage LoadBalancingPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.LoadBalancingPolicy\";\n\n  message Policy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.LoadBalancingPolicy.Policy\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // Required. The name of the LB policy.\n    string name = 1;\n\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Each client will iterate over the list in order and stop at the first policy that it\n  // supports. This provides a mechanism for starting to use new LB policies that are not yet\n  // supported by all clients.\n  repeated Policy policies = 1;\n}\n\n// An extensible structure containing the address Envoy should bind to when\n// establishing upstream connections.\nmessage UpstreamBindConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.UpstreamBindConfig\";\n\n  // The address Envoy should bind to when establishing upstream connections.\n  core.v4alpha.Address source_address = 1;\n}\n\nmessage UpstreamConnectionOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.UpstreamConnectionOptions\";\n\n  // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.\n  core.v4alpha.TcpKeepalive tcp_keepalive = 1;\n}\n\nmessage TrackClusterStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.TrackClusterStats\";\n\n  // If timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  bool timeout_budgets = 1;\n\n  // If request_response_sizes is true, then the :ref:`histograms\n  // <config_cluster_manager_cluster_stats_request_response_sizes>`  tracking header and body sizes\n  // of requests and responses will be published.\n  bool request_response_sizes = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v4alpha/filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"FilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Upstream filters]\n// Upstream filters apply to the connections to the upstream cluster hosts.\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.cluster.v3.Filter\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any typed_config = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/cluster/v4alpha/outlier_detection.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"OutlierDetectionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Outlier detection]\n\n// See the :ref:`architecture overview <arch_overview_outlier_detection>` for\n// more information on outlier detection.\n// [#next-free-field: 21]\nmessage OutlierDetection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.OutlierDetection\";\n\n  // The number of consecutive 5xx responses or local origin errors that are mapped\n  // to 5xx error codes before a consecutive 5xx ejection\n  // occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_5xx = 1;\n\n  // The time interval between ejection analysis sweeps. This can result in\n  // both new ejections as well as hosts being returned to service. Defaults\n  // to 10000ms or 10s.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}];\n\n  // The base time that a host is ejected for. The real time is equal to the\n  // base time multiplied by the number of times the host has been ejected.\n  // Defaults to 30000ms or 30s.\n  google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];\n\n  // The maximum % of an upstream cluster that can be ejected due to outlier\n  // detection. Defaults to 10% but will eject at least one host regardless of the value.\n  google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive 5xx. This setting can be used to disable\n  // ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics. This setting can be used to\n  // disable ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}];\n\n  // The number of hosts in a cluster that must have enough request volume to\n  // detect success rate outliers. If the number of hosts is less than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for any host in the cluster. Defaults to 5.\n  google.protobuf.UInt32Value success_rate_minimum_hosts = 7;\n\n  // The minimum number of total requests that must be collected in one\n  // interval (as defined by the interval duration above) to include this host\n  // in success rate based outlier detection. If the volume is lower than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for that host. Defaults to 100.\n  google.protobuf.UInt32Value success_rate_request_volume = 8;\n\n  // This factor is used to determine the ejection threshold for success rate\n  // outlier ejection. The ejection threshold is the difference between the\n  // mean success rate, and the product of this factor and the standard\n  // deviation of the mean success rate: mean - (stdev *\n  // success_rate_stdev_factor). This factor is divided by a thousand to get a\n  // double. That is, if the desired factor is 1.9, the runtime value should\n  // be 1900. Defaults to 1900.\n  google.protobuf.UInt32Value success_rate_stdev_factor = 9;\n\n  // The number of consecutive gateway failures (502, 503, 504 status codes)\n  // before a consecutive gateway failure ejection occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_gateway_failure = 10;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive gateway failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // Determines whether to distinguish local origin failures from external errors. If set to true\n  // the following configuration parameters are taken into account:\n  // :ref:`consecutive_local_origin_failure<envoy_api_field_config.cluster.v4alpha.OutlierDetection.consecutive_local_origin_failure>`,\n  // :ref:`enforcing_consecutive_local_origin_failure<envoy_api_field_config.cluster.v4alpha.OutlierDetection.enforcing_consecutive_local_origin_failure>`\n  // and\n  // :ref:`enforcing_local_origin_success_rate<envoy_api_field_config.cluster.v4alpha.OutlierDetection.enforcing_local_origin_success_rate>`.\n  // Defaults to false.\n  bool split_external_local_origin_errors = 12;\n\n  // The number of consecutive locally originated failures before ejection\n  // occurs. Defaults to 5. Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value consecutive_local_origin_failure = 13;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive locally originated failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics for locally originated errors.\n  // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The failure percentage to use when determining failure percentage-based outlier detection. If\n  // the failure percentage of a given host is greater than or equal to this value, it will be\n  // ejected. Defaults to 85.\n  google.protobuf.UInt32Value failure_percentage_threshold = 16\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // failure percentage statistics. This setting can be used to disable ejection or to ramp it up\n  // slowly. Defaults to 0.\n  //\n  // [#next-major-version: setting this without setting failure_percentage_threshold should be\n  // invalid in v4.]\n  google.protobuf.UInt32Value enforcing_failure_percentage = 17\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // local-origin failure percentage statistics. This setting can be used to disable ejection or to\n  // ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection.\n  // If the total number of hosts in the cluster is less than this value, failure percentage-based\n  // ejection will not be performed. Defaults to 5.\n  google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19;\n\n  // The minimum number of total requests that must be collected in one interval (as defined by the\n  // interval duration above) to perform failure percentage-based ejection for this host. If the\n  // volume is lower than this setting, failure percentage-based ejection will not be performed for\n  // this host. Defaults to 50.\n  google.protobuf.UInt32Value failure_percentage_request_volume = 20;\n}\n"
  },
  {
    "path": "api/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.dynamic_forward_proxy.v2alpha;\n\nimport \"envoy/api/v2/cluster.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha\";\noption java_outer_classname = \"DnsCacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.common.dynamic_forward_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamic forward proxy common configuration]\n\n// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#next-free-field: 7]\nmessage DnsCacheConfig {\n  // The name of the cache. Multiple named caches allow independent dynamic forward proxy\n  // configurations to operate within a single Envoy process using different configurations. All\n  // configurations with the same name *must* otherwise have the same settings when referenced\n  // from different configuration components. Configuration will fail to load if this is not\n  // the case.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The DNS lookup family to use during resolution.\n  //\n  // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 \"happy eyeballs\" mode. The\n  // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and\n  // then configures a host to have a primary and fall back address. With this, we could very\n  // likely build a \"happy eyeballs\" connection pool which would race the primary / fall back\n  // address and return the one that wins. This same method could potentially also be used for\n  // QUIC to TCP fall back.]\n  api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s.\n  //\n  // .. note:\n  //\n  //  The returned DNS TTL is not currently used to alter the refresh rate. This feature will be\n  //  added in a future change.\n  //\n  // .. note:\n  //\n  // The refresh rate is rounded to the closest millisecond, and must be at least 1ms.\n  google.protobuf.Duration dns_refresh_rate = 3\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n\n  // The TTL for hosts that are unused. Hosts that have not been used in the configured time\n  // interval will be purged. If not specified defaults to 5m.\n  //\n  // .. note:\n  //\n  //   The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This\n  //   means that if the configured TTL is shorter than the refresh rate the host may not be removed\n  //   immediately.\n  //\n  //  .. note:\n  //\n  //   The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage.\n  google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}];\n\n  // The maximum number of hosts that the cache will hold. If not specified defaults to 1024.\n  //\n  // .. note:\n  //\n  //   The implementation is approximate and enforced independently on each worker thread, thus\n  //   it is possible for the maximum hosts in the cache to go slightly above the configured\n  //   value depending on timing. This is similar to how other circuit breakers work.\n  google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}];\n\n  // If the DNS failure refresh rate is specified,\n  // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the dns_refresh_rate.\n  api.v2.Cluster.RefreshRate dns_failure_refresh_rate = 6;\n}\n"
  },
  {
    "path": "api/envoy/config/common/matcher/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/common/matcher/v3/matcher.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.matcher.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.matcher.v3\";\noption java_outer_classname = \"MatcherProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Unified Matcher API]\n\n// Match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  // HTTP headers to match.\n  repeated route.v3.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  message GenericTextMatch {\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/common/matcher/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/matcher/v3:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/common/matcher/v4alpha/matcher.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.matcher.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.matcher.v4alpha\";\noption java_outer_classname = \"MatcherProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Unified Matcher API]\n\n// Match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.matcher.v3.MatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.common.matcher.v3.MatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.matcher.v3.HttpHeadersMatch\";\n\n  // HTTP headers to match.\n  repeated route.v4alpha.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.matcher.v3.HttpGenericBodyMatch\";\n\n  message GenericTextMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch\";\n\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/common/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/common/tap/v2alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.tap.v2alpha;\n\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/service/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.tap.v2alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.common.tap.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common tap extension configuration]\n\n// Common configuration for all tap extensions.\nmessage CommonExtensionConfig {\n  // [#not-implemented-hide:]\n  message TapDSConfig {\n    // Configuration for the source of TapDS updates for this Cluster.\n    api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n    // Tap config to request from XDS server.\n    string name = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  oneof config_type {\n    option (validate.required) = true;\n\n    // If specified, the tap filter will be configured via an admin handler.\n    AdminConfig admin_config = 1;\n\n    // If specified, the tap filter will be configured via a static configuration that cannot be\n    // changed.\n    service.tap.v2alpha.TapConfig static_config = 2;\n\n    // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter.\n    TapDSConfig tapds_config = 3;\n  }\n}\n\n// Configuration for the admin handler. See :ref:`here <config_http_filters_tap_admin_handler>` for\n// more information.\nmessage AdminConfig {\n  // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is\n  // matched to the configured filter opaque ID to determine which filter to configure.\n  string config_id = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/core/v3/address.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/socket_option.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"AddressProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Network addresses]\n\nmessage Pipe {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Pipe\";\n\n  // Unix Domain Socket path. On Linux, paths starting with '@' will use the\n  // abstract namespace. The starting '@' is replaced by a null byte by Envoy.\n  // Paths starting with '@' will result in an error in environments other than\n  // Linux.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The mode for the Pipe. Not applicable for abstract sockets.\n  uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];\n}\n\n// [#not-implemented-hide:] The address represents an envoy internal listener.\n// TODO(lambdai): Make this address available for listener and endpoint.\n// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.\nmessage EnvoyInternalAddress {\n  oneof address_name_specifier {\n    option (validate.required) = true;\n\n    // [#not-implemented-hide:] The :ref:`listener name <envoy_api_field_config.listener.v3.Listener.name>` of the destination internal listener.\n    string server_listener_name = 1;\n  }\n}\n\n// [#next-free-field: 7]\nmessage SocketAddress {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.SocketAddress\";\n\n  enum Protocol {\n    TCP = 0;\n    UDP = 1;\n  }\n\n  Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The address for this socket. :ref:`Listeners <config_listeners>` will bind\n  // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::``\n  // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:\n  // It is possible to distinguish a Listener address via the prefix/suffix matching\n  // in :ref:`FilterChainMatch <envoy_api_msg_config.listener.v3.FilterChainMatch>`.] When used\n  // within an upstream :ref:`BindConfig <envoy_api_msg_config.core.v3.BindConfig>`, the address\n  // controls the source address of outbound connections. For :ref:`clusters\n  // <envoy_api_msg_config.cluster.v3.Cluster>`, the cluster type determines whether the\n  // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS\n  // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized\n  // via :ref:`resolver_name <envoy_api_field_config.core.v3.SocketAddress.resolver_name>`.\n  string address = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof port_specifier {\n    option (validate.required) = true;\n\n    uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}];\n\n    // This is only valid if :ref:`resolver_name\n    // <envoy_api_field_config.core.v3.SocketAddress.resolver_name>` is specified below and the\n    // named resolver is capable of named port resolution.\n    string named_port = 4;\n  }\n\n  // The name of the custom resolver. This must have been registered with Envoy. If\n  // this is empty, a context dependent default applies. If the address is a concrete\n  // IP address, no resolution will occur. If address is a hostname this\n  // should be set for resolution other than DNS. Specifying a custom resolver with\n  // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime.\n  string resolver_name = 5;\n\n  // When binding to an IPv6 address above, this enables `IPv4 compatibility\n  // <https://tools.ietf.org/html/rfc3493#page-11>`_. Binding to ``::`` will\n  // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into\n  // IPv6 space as ``::FFFF:<IPv4-address>``.\n  bool ipv4_compat = 6;\n}\n\nmessage TcpKeepalive {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.TcpKeepalive\";\n\n  // Maximum number of keepalive probes to send without response before deciding\n  // the connection is dead. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 9.)\n  google.protobuf.UInt32Value keepalive_probes = 1;\n\n  // The number of seconds a connection needs to be idle before keep-alive probes\n  // start being sent. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 7200s (i.e., 2 hours.)\n  google.protobuf.UInt32Value keepalive_time = 2;\n\n  // The number of seconds between keep-alive probes. Default is to use the OS\n  // level configuration (unless overridden, Linux defaults to 75s.)\n  google.protobuf.UInt32Value keepalive_interval = 3;\n}\n\nmessage BindConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.BindConfig\";\n\n  // The address to bind to when creating a socket.\n  SocketAddress source_address = 1 [(validate.rules).message = {required: true}];\n\n  // Whether to set the *IP_FREEBIND* option when creating the socket. When this\n  // flag is set to true, allows the :ref:`source_address\n  // <envoy_api_field_config.cluster.v3.UpstreamBindConfig.source_address>` to be an IP address\n  // that is not configured on the system running Envoy. When this flag is set\n  // to false, the option *IP_FREEBIND* is disabled on the socket. When this\n  // flag is not set (default), the socket is not modified, i.e. the option is\n  // neither enabled nor disabled.\n  google.protobuf.BoolValue freebind = 2;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated SocketOption socket_options = 3;\n}\n\n// Addresses specify either a logical or physical address and port, which are\n// used to tell Envoy where to bind/listen, connect to upstream and find\n// management servers.\nmessage Address {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Address\";\n\n  oneof address {\n    option (validate.required) = true;\n\n    SocketAddress socket_address = 1;\n\n    Pipe pipe = 2;\n\n    // [#not-implemented-hide:]\n    EnvoyInternalAddress envoy_internal_address = 3;\n  }\n}\n\n// CidrRange specifies an IP Address and a prefix length to construct\n// the subnet mask for a `CIDR <https://tools.ietf.org/html/rfc4632>`_ range.\nmessage CidrRange {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.CidrRange\";\n\n  // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.\n  string address_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Length of prefix, e.g. 0, 32.\n  google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/backoff.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"BackoffProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Backoff Strategy]\n\n// Configuration defining a jittered exponential back off strategy.\nmessage BackoffStrategy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.BackoffStrategy\";\n\n  // The base interval to be used for the next back off computation. It should\n  // be greater than zero and less than or equal to :ref:`max_interval\n  // <envoy_api_field_config.core.v3.BackoffStrategy.max_interval>`.\n  google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // Specifies the maximum interval between retries. This parameter is optional,\n  // but must be greater than or equal to the :ref:`base_interval\n  // <envoy_api_field_config.core.v3.BackoffStrategy.base_interval>` if set. The default\n  // is 10 times the :ref:`base_interval\n  // <envoy_api_field_config.core.v3.BackoffStrategy.base_interval>`.\n  google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/base.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/backoff.proto\";\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/semantic_version.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"BaseProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common types]\n\n// Envoy supports :ref:`upstream priority routing\n// <arch_overview_http_routing_priority>` both at the route and the virtual\n// cluster level. The current priority implementation uses different connection\n// pool and circuit breaking settings for each priority level. This means that\n// even for HTTP/2 requests, two physical connections will be used to an\n// upstream host. In the future Envoy will likely support true HTTP/2 priority\n// over a single upstream connection.\nenum RoutingPriority {\n  DEFAULT = 0;\n  HIGH = 1;\n}\n\n// HTTP request method.\nenum RequestMethod {\n  METHOD_UNSPECIFIED = 0;\n  GET = 1;\n  HEAD = 2;\n  POST = 3;\n  PUT = 4;\n  DELETE = 5;\n  CONNECT = 6;\n  OPTIONS = 7;\n  TRACE = 8;\n  PATCH = 9;\n}\n\n// Identifies the direction of the traffic relative to the local Envoy.\nenum TrafficDirection {\n  // Default option is unspecified.\n  UNSPECIFIED = 0;\n\n  // The transport is used for incoming traffic.\n  INBOUND = 1;\n\n  // The transport is used for outgoing traffic.\n  OUTBOUND = 2;\n}\n\n// Identifies location of where either Envoy runs or where upstream hosts run.\nmessage Locality {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Locality\";\n\n  // Region this :ref:`zone <envoy_api_field_config.core.v3.Locality.zone>` belongs to.\n  string region = 1;\n\n  // Defines the local service zone where Envoy is running. Though optional, it\n  // should be set if discovery service routing is used and the discovery\n  // service exposes :ref:`zone data <envoy_api_field_config.endpoint.v3.LocalityLbEndpoints.locality>`,\n  // either in this message or via :option:`--service-zone`. The meaning of zone\n  // is context dependent, e.g. `Availability Zone (AZ)\n  // <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html>`_\n  // on AWS, `Zone <https://cloud.google.com/compute/docs/regions-zones/>`_ on\n  // GCP, etc.\n  string zone = 2;\n\n  // When used for locality of upstream hosts, this field further splits zone\n  // into smaller chunks of sub-zones so they can be load balanced\n  // independently.\n  string sub_zone = 3;\n}\n\n// BuildVersion combines SemVer version of extension with free-form build information\n// (i.e. 'alpha', 'private-build') as a set of strings.\nmessage BuildVersion {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.BuildVersion\";\n\n  // SemVer version of extension.\n  type.v3.SemanticVersion version = 1;\n\n  // Free-form build information.\n  // Envoy defines several well known keys in the source/common/version/version.h file\n  google.protobuf.Struct metadata = 2;\n}\n\n// Version and identification for an Envoy extension.\n// [#next-free-field: 6]\nmessage Extension {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Extension\";\n\n  // This is the name of the Envoy filter as specified in the Envoy\n  // configuration, e.g. envoy.filters.http.router, com.acme.widget.\n  string name = 1;\n\n  // Category of the extension.\n  // Extension category names use reverse DNS notation. For instance \"envoy.filters.listener\"\n  // for Envoy's built-in listener filters or \"com.acme.filters.http\" for HTTP filters from\n  // acme.com vendor.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]\n  string category = 2;\n\n  // [#not-implemented-hide:] Type descriptor of extension configuration proto.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]\n  // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]\n  string type_descriptor = 3;\n\n  // The version is a property of the extension and maintained independently\n  // of other extensions and the Envoy API.\n  // This field is not set when extension did not provide version information.\n  BuildVersion version = 4;\n\n  // Indicates that the extension is present but was disabled via dynamic configuration.\n  bool disabled = 5;\n}\n\n// Identifies a specific Envoy instance. The node identifier is presented to the\n// management server, which may use this identifier to distinguish per Envoy\n// configuration for serving.\n// [#next-free-field: 12]\nmessage Node {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Node\";\n\n  reserved 5;\n\n  reserved \"build_version\";\n\n  // An opaque node identifier for the Envoy node. This also provides the local\n  // service node name. It should be set if any of the following features are\n  // used: :ref:`statsd <arch_overview_statistics>`, :ref:`CDS\n  // <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-node`.\n  string id = 1;\n\n  // Defines the local service cluster name where Envoy is running. Though\n  // optional, it should be set if any of the following features are used:\n  // :ref:`statsd <arch_overview_statistics>`, :ref:`health check cluster\n  // verification\n  // <envoy_api_field_config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher>`,\n  // :ref:`runtime override directory <envoy_api_msg_config.bootstrap.v3.Runtime>`,\n  // :ref:`user agent addition\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent>`,\n  // :ref:`HTTP global rate limiting <config_http_filters_rate_limit>`,\n  // :ref:`CDS <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-cluster`.\n  string cluster = 2;\n\n  // Opaque metadata extending the node identifier. Envoy will pass this\n  // directly to the management server.\n  google.protobuf.Struct metadata = 3;\n\n  // Locality specifying where the Envoy instance is running.\n  Locality locality = 4;\n\n  // Free-form string that identifies the entity requesting config.\n  // E.g. \"envoy\" or \"grpc\"\n  string user_agent_name = 6;\n\n  oneof user_agent_version_type {\n    // Free-form string that identifies the version of the entity requesting config.\n    // E.g. \"1.12.2\" or \"abcd1234\", or \"SpecialEnvoyBuild\"\n    string user_agent_version = 7;\n\n    // Structured version of the entity requesting config.\n    BuildVersion user_agent_build_version = 8;\n  }\n\n  // List of extensions and their versions supported by the node.\n  repeated Extension extensions = 9;\n\n  // Client feature support list. These are well known features described\n  // in the Envoy API repository for a given major version of an API. Client features\n  // use reverse DNS naming scheme, for example `com.acme.feature`.\n  // See :ref:`the list of features <client_features>` that xDS client may\n  // support.\n  repeated string client_features = 10;\n\n  // Known listening ports on the node as a generic hint to the management server\n  // for filtering :ref:`listeners <config_listeners>` to be returned. For example,\n  // if there is a listener bound to port 80, the list can optionally contain the\n  // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint.\n  repeated Address listening_addresses = 11 [deprecated = true];\n}\n\n// Metadata provides additional inputs to filters based on matched listeners,\n// filter chains, routes and endpoints. It is structured as a map, usually from\n// filter name (in reverse DNS format) to metadata specific to the filter. Metadata\n// key-values for a filter are merged as connection and request handling occurs,\n// with later values for the same key overriding earlier values.\n//\n// An example use of metadata is providing additional values to\n// http_connection_manager in the envoy.http_connection_manager.access_log\n// namespace.\n//\n// Another example use of metadata is to per service config info in cluster metadata, which may get\n// consumed by multiple filters.\n//\n// For load balancing, Metadata provides a means to subset cluster endpoints.\n// Endpoints have a Metadata object associated and routes contain a Metadata\n// object to match against. There are some well defined metadata used today for\n// this purpose:\n//\n// * ``{\"envoy.lb\": {\"canary\": <bool> }}`` This indicates the canary status of an\n//   endpoint and is also used during header processing\n//   (x-envoy-upstream-canary) and for stats purposes.\n// [#next-major-version: move to type/metadata/v2]\nmessage Metadata {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Metadata\";\n\n  // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*\n  // namespace is reserved for Envoy's built-in filters.\n  map<string, google.protobuf.Struct> filter_metadata = 1;\n}\n\n// Runtime derived uint32 with a default when not specified.\nmessage RuntimeUInt32 {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RuntimeUInt32\";\n\n  // Default value if runtime value is not available.\n  uint32 default_value = 2;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 3 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived percentage with a default when not specified.\nmessage RuntimePercent {\n  // Default value if runtime value is not available.\n  type.v3.Percent default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived double with a default when not specified.\nmessage RuntimeDouble {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RuntimeDouble\";\n\n  // Default value if runtime value is not available.\n  double default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived bool with a default when not specified.\nmessage RuntimeFeatureFlag {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.RuntimeFeatureFlag\";\n\n  // Default value if runtime value is not available.\n  google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key to get value for comparison. This value is used if defined. The boolean value must\n  // be represented via its\n  // `canonical JSON encoding <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Header name/value pair.\nmessage HeaderValue {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HeaderValue\";\n\n  // Header name.\n  string key = 1\n      [(validate.rules).string =\n           {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Header value.\n  //\n  // The same :ref:`format specifier <config_access_log_format>` as used for\n  // :ref:`HTTP access logging <config_access_log>` applies here, however\n  // unknown header values are replaced with the empty string instead of `-`.\n  string value = 2 [\n    (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}\n  ];\n}\n\n// Header name/value pair plus option to control append behavior.\nmessage HeaderValueOption {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.HeaderValueOption\";\n\n  // Header name/value pair that this option applies to.\n  HeaderValue header = 1 [(validate.rules).message = {required: true}];\n\n  // Should the value be appended? If true (default), the value is appended to\n  // existing values. Otherwise it replaces any existing values.\n  google.protobuf.BoolValue append = 2;\n}\n\n// Wrapper for a set of headers.\nmessage HeaderMap {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HeaderMap\";\n\n  repeated HeaderValue headers = 1;\n}\n\n// Data source consisting of either a file or an inline value.\nmessage DataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.DataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local filesystem data source.\n    string filename = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Bytes inlined in the configuration.\n    bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];\n\n    // String inlined in the configuration.\n    string inline_string = 3 [(validate.rules).string = {min_len: 1}];\n  }\n}\n\n// The message specifies the retry policy of remote data source when fetching fails.\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RetryPolicy\";\n\n  // Specifies parameters that control :ref:`retry backoff strategy <envoy_api_msg_config.core.v3.BackoffStrategy>`.\n  // This parameter is optional, in which case the default base interval is 1000 milliseconds. The\n  // default maximum interval is 10 times the base interval.\n  BackoffStrategy retry_back_off = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1.\n  google.protobuf.UInt32Value num_retries = 2\n      [(udpa.annotations.field_migrate).rename = \"max_retries\"];\n}\n\n// The message specifies how to fetch data from remote and how to verify it.\nmessage RemoteDataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RemoteDataSource\";\n\n  // The HTTP URI to fetch the remote data.\n  HttpUri http_uri = 1 [(validate.rules).message = {required: true}];\n\n  // SHA256 string for verifying data.\n  string sha256 = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Retry policy for fetching remote data.\n  RetryPolicy retry_policy = 3;\n}\n\n// Async data source which support async data fetch.\nmessage AsyncDataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.AsyncDataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local async data source.\n    DataSource local = 1;\n\n    // Remote async data source.\n    RemoteDataSource remote = 2;\n  }\n}\n\n// Configuration for transport socket in :ref:`listeners <config_listeners>` and\n// :ref:`clusters <envoy_api_msg_config.cluster.v3.Cluster>`. If the configuration is\n// empty, a default transport socket implementation and configuration will be\n// chosen based on the platform and existence of tls_context.\nmessage TransportSocket {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.TransportSocket\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the transport socket to instantiate. The name must match a supported transport\n  // socket implementation.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Implementation specific configuration which depends on the implementation being instantiated.\n  // See the supported transport socket implementations for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not\n// specified via a runtime key.\n//\n// .. note::\n//\n//   Parsing of the runtime key's data is implemented such that it may be represented as a\n//   :ref:`FractionalPercent <envoy_api_msg_type.v3.FractionalPercent>` proto represented as JSON/YAML\n//   and may also be represented as an integer with the assumption that the value is an integral\n//   percentage out of 100. For instance, a runtime key lookup returning the value \"42\" would parse\n//   as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.\nmessage RuntimeFractionalPercent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.RuntimeFractionalPercent\";\n\n  // Default value if the runtime value's for the numerator/denominator keys are not available.\n  type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key for a YAML representation of a FractionalPercent.\n  string runtime_key = 2;\n}\n\n// Identifies a specific ControlPlane instance that Envoy is connected to.\nmessage ControlPlane {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.ControlPlane\";\n\n  // An opaque control plane identifier that uniquely identifies an instance\n  // of control plane. This can be used to identify which control plane instance,\n  // the Envoy is connected to.\n  string identifier = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/config_source.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/authority.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ConfigSourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Configuration sources]\n\n// xDS API and non-xDS services version. This is used to describe both resource and transport\n// protocol versions (in distinct configuration fields).\nenum ApiVersion {\n  // When not specified, we assume v2, to ease migration to Envoy's stable API\n  // versioning. If a client does not support v2 (e.g. due to deprecation), this\n  // is an invalid value.\n  AUTO = 0;\n\n  // Use xDS v2 API.\n  V2 = 1;\n\n  // Use xDS v3 API.\n  V3 = 2;\n}\n\n// API configuration source. This identifies the API type and cluster that Envoy\n// will use to fetch an xDS API.\n// [#next-free-field: 9]\nmessage ApiConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.ApiConfigSource\";\n\n  // APIs may be fetched via either REST or gRPC.\n  enum ApiType {\n    // Ideally this would be 'reserved 0' but one can't reserve the default\n    // value. Instead we throw an exception if this is ever used.\n    DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // REST-JSON v2 API. The `canonical JSON encoding\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_ for\n    // the v2 protos is used.\n    REST = 1;\n\n    // SotW gRPC service.\n    GRPC = 2;\n\n    // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}\n    // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state\n    // with every update, the xDS server only sends what has changed since the last update.\n    DELTA_GRPC = 3;\n\n    // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_GRPC = 5;\n\n    // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_DELTA_GRPC = 6;\n  }\n\n  // API type (gRPC, REST, delta gRPC)\n  ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}];\n\n  // Cluster names should be used only with REST. If > 1\n  // cluster is defined, clusters will be cycled through if any kind of failure\n  // occurs.\n  //\n  // .. note::\n  //\n  //  The cluster with name ``cluster_name`` must be statically defined and its\n  //  type must not be ``EDS``.\n  repeated string cluster_names = 2;\n\n  // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,\n  // services will be cycled through if any kind of failure occurs.\n  repeated GrpcService grpc_services = 4;\n\n  // For REST APIs, the delay between successive polls.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // For REST APIs, the request timeout. If not set, a default value of 1s will be used.\n  google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}];\n\n  // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be\n  // rate limited.\n  RateLimitSettings rate_limit_settings = 6;\n\n  // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.\n  bool set_node_on_first_message_only = 7;\n}\n\n// Aggregated Discovery Service (ADS) options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v3.ConfigSource>` can be used to\n// specify that ADS is to be used.\nmessage AggregatedConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.AggregatedConfigSource\";\n}\n\n// [#not-implemented-hide:]\n// Self-referencing config source options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v3.ConfigSource>` can be used to\n// specify that other data can be obtained from the same server.\nmessage SelfConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.SelfConfigSource\";\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Rate Limit settings to be applied for discovery requests made by Envoy.\nmessage RateLimitSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.RateLimitSettings\";\n\n  // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a\n  // default value of 100 will be used.\n  google.protobuf.UInt32Value max_tokens = 1;\n\n  // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens\n  // per second will be used.\n  google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];\n}\n\n// Configuration for :ref:`listeners <config_listeners>`, :ref:`clusters\n// <config_cluster_manager>`, :ref:`routes\n// <envoy_api_msg_config.route.v3.RouteConfiguration>`, :ref:`endpoints\n// <arch_overview_service_discovery>` etc. may either be sourced from the\n// filesystem or from an xDS API source. Filesystem configs are watched with\n// inotify for updates.\n// [#next-free-field: 8]\nmessage ConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.ConfigSource\";\n\n  // Authorities that this config source may be used for. An authority specified\n  // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior\n  // to configuration fetch. This field provides the association between\n  // authority name and configuration source.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.Authority authorities = 7;\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Path on the filesystem to source and watch for configuration updates.\n    // When sourcing configuration for :ref:`secret <envoy_api_msg_extensions.transport_sockets.tls.v3.Secret>`,\n    // the certificate and key files are also watched for updates.\n    //\n    // .. note::\n    //\n    //  The path to the source must exist at config load time.\n    //\n    // .. note::\n    //\n    //   Envoy will only watch the file path for *moves.* This is because in general only moves\n    //   are atomic. The same method of swapping files as is demonstrated in the\n    //   :ref:`runtime documentation <config_runtime_symbolic_link_swap>` can be used here also.\n    string path = 1;\n\n    // API configuration source.\n    ApiConfigSource api_config_source = 2;\n\n    // When set, ADS will be used to fetch resources. The ADS API configuration\n    // source in the bootstrap configuration is used.\n    AggregatedConfigSource ads = 3;\n\n    // [#not-implemented-hide:]\n    // When set, the client will access the resources from the same server it got the\n    // ConfigSource from, although not necessarily from the same stream. This is similar to the\n    // :ref:`ads<envoy_api_field.ConfigSource.ads>` field, except that the client may use a\n    // different stream to the same server. As a result, this field can be used for things\n    // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)\n    // LDS to RDS on the same server without requiring the management server to know its name\n    // or required credentials.\n    // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since\n    // this field can implicitly mean to use the same stream in the case where the ConfigSource\n    // is provided via ADS and the specified data can also be obtained via ADS.]\n    SelfConfigSource self = 5;\n  }\n\n  // When this timeout is specified, Envoy will wait no longer than the specified time for first\n  // config response on this xDS subscription during the :ref:`initialization process\n  // <arch_overview_initialization>`. After reaching the timeout, Envoy will move to the next\n  // initialization phase, even if the first config is not delivered yet. The timer is activated\n  // when the xDS API subscription starts, and is disarmed on first config update or on error. 0\n  // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another\n  // timeout applies). The default is 15s.\n  google.protobuf.Duration initial_fetch_timeout = 4;\n\n  // API version for xDS resources. This implies the type URLs that the client\n  // will request for resources and the resource type that the client will in\n  // turn expect to be delivered.\n  ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/event_service_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"EventServiceConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#not-implemented-hide:]\n// Configuration of the event reporting service endpoint.\nmessage EventServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.EventServiceConfig\";\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Specifies the gRPC service that hosts the event reporting service.\n    GrpcService grpc_service = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/extension.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ExtensionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Extension configuration]\n\n// Message type for extension configuration.\n// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.].\nmessage TypedExtensionConfig {\n  // The name of an extension. This is not used to select the extension, instead\n  // it serves the role of an opaque identifier.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The typed config for the extension. The type URL will be used to identify\n  // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*,\n  // the inner type URL of *TypedStruct* will be utilized. See the\n  // :ref:`extension configuration overview\n  // <config_overview_extension_configuration>` for further details.\n  google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}];\n}\n\n// Configuration source specifier for a late-bound extension configuration. The\n// parent resource is warmed until all the initial extension configurations are\n// received, unless the flag to apply the default configuration is set.\n// Subsequent extension updates are atomic on a per-worker basis. Once an\n// extension configuration is applied to a request or a connection, it remains\n// constant for the duration of processing. If the initial delivery of the\n// extension configuration fails, due to a timeout for example, the optional\n// default configuration is applied. Without a default configuration, the\n// extension is disabled, until an extension configuration is received. The\n// behavior of a disabled extension depends on the context. For example, a\n// filter chain with a disabled extension filter rejects all incoming streams.\nmessage ExtensionConfigSource {\n  ConfigSource config_source = 1 [(validate.rules).any = {required: true}];\n\n  // Optional default configuration to use as the initial configuration if\n  // there is a failure to receive the initial extension configuration or if\n  // `apply_default_config_without_warming` flag is set.\n  google.protobuf.Any default_config = 2;\n\n  // Use the default config as the initial configuration without warming and\n  // waiting for the first discovery response. Requires the default configuration\n  // to be supplied.\n  bool apply_default_config_without_warming = 3;\n\n  // A set of permitted extension type URLs. Extension configuration updates are rejected\n  // if they do not match any type URL in the set.\n  repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/grpc_method_list.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"GrpcMethodListProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC method list]\n\n// A list of gRPC methods which can be used as an allowlist, for example.\nmessage GrpcMethodList {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.GrpcMethodList\";\n\n  message Service {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.GrpcMethodList.Service\";\n\n    // The name of the gRPC service.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The names of the gRPC methods in this service.\n    repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  repeated Service services = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/grpc_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"GrpcServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC services]\n\n// gRPC service configuration. This is used by :ref:`ApiConfigSource\n// <envoy_api_msg_config.core.v3.ApiConfigSource>` and filter configurations.\n// [#next-free-field: 6]\nmessage GrpcService {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.GrpcService\";\n\n  message EnvoyGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.GrpcService.EnvoyGrpc\";\n\n    // The name of the upstream gRPC cluster. SSL credentials will be supplied\n    // in the :ref:`Cluster <envoy_api_msg_config.cluster.v3.Cluster>` :ref:`transport_socket\n    // <envoy_api_field_config.cluster.v3.Cluster.transport_socket>`.\n    string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`.\n    // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.\n    string authority = 2\n        [(validate.rules).string =\n             {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // [#next-free-field: 9]\n  message GoogleGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.GrpcService.GoogleGrpc\";\n\n    // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.\n    message SslCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials\";\n\n      // PEM encoded server root certificates.\n      DataSource root_certs = 1;\n\n      // PEM encoded client private key.\n      DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n      // PEM encoded client certificate chain.\n      DataSource cert_chain = 3;\n    }\n\n    // Local channel credentials. Only UDS is supported for now.\n    // See https://github.com/grpc/grpc/pull/15909.\n    message GoogleLocalCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials\";\n    }\n\n    // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call\n    // credential types.\n    message ChannelCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials\";\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        SslCredentials ssl_credentials = 1;\n\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_default = 2;\n\n        GoogleLocalCredentials local_credentials = 3;\n      }\n    }\n\n    // [#next-free-field: 8]\n    message CallCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials\";\n\n      message ServiceAccountJWTAccessCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"ServiceAccountJWTAccessCredentials\";\n\n        string json_key = 1;\n\n        uint64 token_lifetime_seconds = 2;\n      }\n\n      message GoogleIAMCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials\";\n\n        string authorization_token = 1;\n\n        string authority_selector = 2;\n      }\n\n      message MetadataCredentialsFromPlugin {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"MetadataCredentialsFromPlugin\";\n\n        reserved 2;\n\n        reserved \"config\";\n\n        string name = 1;\n\n        oneof config_type {\n          google.protobuf.Any typed_config = 3;\n        }\n      }\n\n      // Security token service configuration that allows Google gRPC to\n      // fetch security token from an OAuth 2.0 authorization server.\n      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and\n      // https://github.com/grpc/grpc/pull/19587.\n      // [#next-free-field: 10]\n      message StsService {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService\";\n\n        // URI of the token exchange service that handles token exchange requests.\n        // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by\n        // https://github.com/envoyproxy/protoc-gen-validate/issues/303]\n        string token_exchange_service_uri = 1;\n\n        // Location of the target service or resource where the client\n        // intends to use the requested security token.\n        string resource = 2;\n\n        // Logical name of the target service where the client intends to\n        // use the requested security token.\n        string audience = 3;\n\n        // The desired scope of the requested security token in the\n        // context of the service or resource where the token will be used.\n        string scope = 4;\n\n        // Type of the requested security token.\n        string requested_token_type = 5;\n\n        // The path of subject token, a security token that represents the\n        // identity of the party on behalf of whom the request is being made.\n        string subject_token_path = 6 [(validate.rules).string = {min_len: 1}];\n\n        // Type of the subject token.\n        string subject_token_type = 7 [(validate.rules).string = {min_len: 1}];\n\n        // The path of actor token, a security token that represents the identity\n        // of the acting party. The acting party is authorized to use the\n        // requested security token and act on behalf of the subject.\n        string actor_token_path = 8;\n\n        // Type of the actor token.\n        string actor_token_type = 9;\n      }\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        // Access token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.\n        string access_token = 1;\n\n        // Google Compute Engine credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_compute_engine = 2;\n\n        // Google refresh token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.\n        string google_refresh_token = 3;\n\n        // Service Account JWT Access credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.\n        ServiceAccountJWTAccessCredentials service_account_jwt_access = 4;\n\n        // Google IAM credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.\n        GoogleIAMCredentials google_iam = 5;\n\n        // Custom authenticator credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.\n        // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.\n        MetadataCredentialsFromPlugin from_plugin = 6;\n\n        // Custom security token service which implements OAuth 2.0 token exchange.\n        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16\n        // See https://github.com/grpc/grpc/pull/19587.\n        StsService sts_service = 7;\n      }\n    }\n\n    // Channel arguments.\n    message ChannelArgs {\n      message Value {\n        // Pointer values are not supported, since they don't make any sense when\n        // delivered via the API.\n        oneof value_specifier {\n          option (validate.required) = true;\n\n          string string_value = 1;\n\n          int64 int_value = 2;\n        }\n      }\n\n      // See grpc_types.h GRPC_ARG #defines for keys that work here.\n      map<string, Value> args = 1;\n    }\n\n    // The target URI when using the `Google C++ gRPC client\n    // <https://github.com/grpc/grpc>`_. SSL credentials will be supplied in\n    // :ref:`channel_credentials <envoy_api_field_config.core.v3.GrpcService.GoogleGrpc.channel_credentials>`.\n    string target_uri = 1 [(validate.rules).string = {min_len: 1}];\n\n    ChannelCredentials channel_credentials = 2;\n\n    // A set of call credentials that can be composed with `channel credentials\n    // <https://grpc.io/docs/guides/auth.html#credential-types>`_.\n    repeated CallCredentials call_credentials = 3;\n\n    // The human readable prefix to use when emitting statistics for the gRPC\n    // service.\n    //\n    // .. csv-table::\n    //    :header: Name, Type, Description\n    //    :widths: 1, 1, 2\n    //\n    //    streams_total, Counter, Total number of streams opened\n    //    streams_closed_<gRPC status code>, Counter, Total streams closed with <gRPC status code>\n    string stat_prefix = 4 [(validate.rules).string = {min_len: 1}];\n\n    // The name of the Google gRPC credentials factory to use. This must have been registered with\n    // Envoy. If this is empty, a default credentials factory will be used that sets up channel\n    // credentials based on other configuration parameters.\n    string credentials_factory_name = 5;\n\n    // Additional configuration for site-specific customizations of the Google\n    // gRPC library.\n    google.protobuf.Struct config = 6;\n\n    // How many bytes each stream can buffer internally.\n    // If not set an implementation defined default is applied (1MiB).\n    google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7;\n\n    // Custom channels args.\n    ChannelArgs channel_args = 8;\n  }\n\n  reserved 4;\n\n  oneof target_specifier {\n    option (validate.required) = true;\n\n    // Envoy's in-built gRPC client.\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    EnvoyGrpc envoy_grpc = 1;\n\n    // `Google C++ gRPC client <https://github.com/grpc/grpc>`_\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    GoogleGrpc google_grpc = 2;\n  }\n\n  // The timeout for the gRPC request. This is the timeout for a specific\n  // request.\n  google.protobuf.Duration timeout = 3;\n\n  // Additional metadata to include in streams initiated to the GrpcService.\n  // This can be used for scenarios in which additional ad hoc authorization\n  // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.\n  repeated HeaderValue initial_metadata = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/event_service_config.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/v3/http.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health check]\n// * Health checking :ref:`architecture overview <arch_overview_health_checking>`.\n// * If health checking is configured for a cluster, additional statistics are emitted. They are\n//   documented :ref:`here <config_cluster_manager_cluster_stats>`.\n\n// Endpoint health status.\nenum HealthStatus {\n  // The health status is not known. This is interpreted by Envoy as *HEALTHY*.\n  UNKNOWN = 0;\n\n  // Healthy.\n  HEALTHY = 1;\n\n  // Unhealthy.\n  UNHEALTHY = 2;\n\n  // Connection draining in progress. E.g.,\n  // `<https://aws.amazon.com/blogs/aws/elb-connection-draining-remove-instances-from-service-with-care/>`_\n  // or\n  // `<https://cloud.google.com/compute/docs/load-balancing/enabling-connection-draining>`_.\n  // This is interpreted by Envoy as *UNHEALTHY*.\n  DRAINING = 3;\n\n  // Health check timed out. This is part of HDS and is interpreted by Envoy as\n  // *UNHEALTHY*.\n  TIMEOUT = 4;\n\n  // Degraded.\n  DEGRADED = 5;\n}\n\n// [#next-free-field: 24]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HealthCheck\";\n\n  // Describes the encoding of the payload bytes in the payload.\n  message Payload {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.Payload\";\n\n    oneof payload {\n      option (validate.required) = true;\n\n      // Hex encoded payload. E.g., \"000000FF\".\n      string text = 1 [(validate.rules).string = {min_len: 1}];\n\n      // [#not-implemented-hide:] Binary payload.\n      bytes binary = 2;\n    }\n  }\n\n  // [#next-free-field: 12]\n  message HttpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.HttpHealthCheck\";\n\n    reserved 5, 7;\n\n    reserved \"service_name\", \"use_http2\";\n\n    // The value of the host header in the HTTP health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The host header can be customized for a specific endpoint by setting the\n    // :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Specifies the HTTP path that will be requested during health checking. For example\n    // */healthcheck*.\n    string path = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // [#not-implemented-hide:] HTTP specific payload.\n    Payload send = 3;\n\n    // [#not-implemented-hide:] HTTP specific response.\n    Payload receive = 4;\n\n    // Specifies a list of HTTP headers that should be added to each request that is sent to the\n    // health checked cluster. For more information, including details on header value syntax, see\n    // the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated HeaderValueOption request_headers_to_add = 6\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request that is sent to the\n    // health checked cluster.\n    repeated string request_headers_to_remove = 8 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default\n    // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open\n    // semantics of :ref:`Int64Range <envoy_api_msg_type.v3.Int64Range>`. The start and end of each\n    // range are required. Only statuses in the range [100, 600) are allowed.\n    repeated type.v3.Int64Range expected_statuses = 9;\n\n    // Use specified application protocol for health checks.\n    type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}];\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster using a :ref:`StringMatcher\n    // <envoy_api_msg_type.matcher.v3.StringMatcher>`. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    type.matcher.v3.StringMatcher service_name_matcher = 11;\n  }\n\n  message TcpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.TcpHealthCheck\";\n\n    // Empty payloads imply a connect-only health check.\n    Payload send = 1;\n\n    // When checking the response, “fuzzy” matching is performed such that each\n    // binary block must be found, and in the order specified, but not\n    // necessarily contiguous.\n    repeated Payload receive = 2;\n  }\n\n  message RedisHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.RedisHealthCheck\";\n\n    // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n    // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n    // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n    // by setting the specified key to any value and waiting for traffic to drain.\n    string key = 1;\n  }\n\n  // `grpc.health.v1.Health\n  // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto>`_-based\n  // healthcheck. See `gRPC doc <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_\n  // for details.\n  message GrpcHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.GrpcHealthCheck\";\n\n    // An optional service name parameter which will be sent to gRPC service in\n    // `grpc.health.v1.HealthCheckRequest\n    // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto#L20>`_.\n    // message. See `gRPC health-checking overview\n    // <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_ for more information.\n    string service_name = 1;\n\n    // The value of the :authority header in the gRPC health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The authority header can be customized for a specific endpoint by setting\n    // the :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string authority = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Custom health check.\n  message CustomHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.CustomHealthCheck\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // The registered name of the custom health checker.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // A custom health checker specific configuration which depends on the custom health checker\n    // being instantiated. See :api:`envoy/config/health_checker` for reference.\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Health checks occur over the transport socket specified for the cluster. This implies that if a\n  // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.\n  //\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  message TlsOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.TlsOptions\";\n\n    // Specifies the ALPN protocols for health check connections. This is useful if the\n    // corresponding upstream is using ALPN-based :ref:`FilterChainMatch\n    // <envoy_api_msg_config.listener.v3.FilterChainMatch>` along with different protocols for health checks\n    // versus data connections. If empty, no ALPN protocols will be set on health check connections.\n    repeated string alpn_protocols = 1;\n  }\n\n  reserved 10;\n\n  // The time to wait for a health check response. If the timeout is reached the\n  // health check attempt will be considered a failure.\n  google.protobuf.Duration timeout = 1 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // The interval between health checks.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // An optional jitter amount in milliseconds. If specified, Envoy will start health\n  // checking after for a random time in ms between 0 and initial_jitter. This only\n  // applies to the first health check.\n  google.protobuf.Duration initial_jitter = 20;\n\n  // An optional jitter amount in milliseconds. If specified, during every\n  // interval Envoy will add interval_jitter to the wait time.\n  google.protobuf.Duration interval_jitter = 3;\n\n  // An optional jitter amount as a percentage of interval_ms. If specified,\n  // during every interval Envoy will add interval_ms *\n  // interval_jitter_percent / 100 to the wait time.\n  //\n  // If interval_jitter_ms and interval_jitter_percent are both set, both of\n  // them will be used to increase the wait time.\n  uint32 interval_jitter_percent = 18;\n\n  // The number of unhealthy health checks required before a host is marked\n  // unhealthy. Note that for *http* health checking if a host responds with 503\n  // this threshold is ignored and the host is considered unhealthy immediately.\n  google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}];\n\n  // The number of healthy health checks required before a host is marked\n  // healthy. Note that during startup, only a single successful health check is\n  // required to mark a host healthy.\n  google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Non-serving port for health checking.\n  google.protobuf.UInt32Value alt_port = 6;\n\n  // Reuse health check connection between health checks. Default is true.\n  google.protobuf.BoolValue reuse_connection = 7;\n\n  oneof health_checker {\n    option (validate.required) = true;\n\n    // HTTP health check.\n    HttpHealthCheck http_health_check = 8;\n\n    // TCP health check.\n    TcpHealthCheck tcp_health_check = 9;\n\n    // gRPC health check.\n    GrpcHealthCheck grpc_health_check = 11;\n\n    // Custom health check.\n    CustomHealthCheck custom_health_check = 13;\n  }\n\n  // The \"no traffic interval\" is a special health check interval that is used when a cluster has\n  // never had traffic routed to it. This lower interval allows cluster information to be kept up to\n  // date, without sending a potentially large amount of active health checking traffic for no\n  // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the\n  // standard health check interval that is defined. Note that this interval takes precedence over\n  // any other.\n  //\n  // The default value for \"no traffic interval\" is 60 seconds.\n  google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy interval\" is a health check interval that is used for hosts that are marked as\n  // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the\n  // standard health check interval that is defined.\n  //\n  // The default value for \"unhealthy interval\" is the same as \"interval\".\n  google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as unhealthy. For subsequent health checks\n  // Envoy will shift back to using either \"unhealthy interval\" if present or the standard health\n  // check interval that is defined.\n  //\n  // The default value for \"unhealthy edge interval\" is the same as \"unhealthy interval\".\n  google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}];\n\n  // The \"healthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as healthy. For subsequent health checks\n  // Envoy will shift back to using the standard health check interval that is defined.\n  //\n  // The default value for \"healthy edge interval\" is the same as the default interval.\n  google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];\n\n  // Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.\n  // If empty, no event log will be written.\n  string event_log_path = 17;\n\n  // [#not-implemented-hide:]\n  // The gRPC service for the health check event service.\n  // If empty, health check events won't be sent to a remote endpoint.\n  EventServiceConfig event_service = 22;\n\n  // If set to true, health check failure events will always be logged. If set to false, only the\n  // initial health check failure event will be logged.\n  // The default value is false.\n  bool always_log_health_check_failures = 19;\n\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  TlsOptions tls_options = 21;\n\n  // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's\n  // :ref:`tranport socket matches <envoy_api_field_config.cluster.v3.Cluster.transport_socket_matches>`.\n  // For example, the following match criteria\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_match_criteria:\n  //    useMTLS: true\n  //\n  // Will match the following :ref:`cluster socket match <envoy_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>`\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"useMTLS\"\n  //    match:\n  //      useMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //\n  // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`.\n  // This allows using different transport socket capabilities for health checking versus proxying to the\n  // endpoint.\n  //\n  // If the key/values pairs specified do not match any\n  // :ref:`transport socket matches <envoy_api_field_config.cluster.v3.Cluster.transport_socket_matches>`,\n  // the cluster's :ref:`transport socket <envoy_api_field_config.cluster.v3.Cluster.transport_socket>`\n  // will be used for health check socket configuration.\n  google.protobuf.Struct transport_socket_match_criteria = 23;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/http_uri.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"HttpUriProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP Service URI ]\n\n// Envoy external URI descriptor\nmessage HttpUri {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HttpUri\";\n\n  // The HTTP server URI. It should be a full FQDN with protocol, host and path.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    uri: https://www.googleapis.com/oauth2/v1/certs\n  //\n  string uri = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specify how `uri` is to be fetched. Today, this requires an explicit\n  // cluster, but in the future we may support dynamic cluster creation or\n  // inline DNS resolution. See `issue\n  // <https://github.com/envoyproxy/envoy/issues/1606>`_.\n  oneof http_upstream_type {\n    option (validate.required) = true;\n\n    // A cluster is created in the Envoy \"cluster_manager\" config\n    // section. This field specifies the cluster name.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    cluster: jwks_cluster\n    //\n    string cluster = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Sets the maximum duration in milliseconds that a response can take to arrive upon request.\n  google.protobuf.Duration timeout = 3 [(validate.rules).duration = {\n    required: true\n    gte {}\n  }];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Protocol options]\n\n// [#not-implemented-hide:]\nmessage TcpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.TcpProtocolOptions\";\n}\n\nmessage UpstreamHttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.UpstreamHttpProtocolOptions\";\n\n  // Set transport socket `SNI <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ for new\n  // upstream connections based on the downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  bool auto_sni = 1;\n\n  // Automatic validate upstream presented certificate for new upstream connections based on the\n  // downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  // This field is intended to set with `auto_sni` field.\n  bool auto_san_validation = 2;\n}\n\n// [#next-free-field: 6]\nmessage HttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.HttpProtocolOptions\";\n\n  // Action to take when Envoy receives client request with header names containing underscore\n  // characters.\n  // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented\n  // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore\n  // characters.\n  enum HeadersWithUnderscoresAction {\n    // Allow headers with underscores. This is the default behavior.\n    ALLOW = 0;\n\n    // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests\n    // end with the stream reset. The \"httpN.requests_rejected_with_underscores_in_headers\" counter\n    // is incremented for each rejected request.\n    REJECT_REQUEST = 1;\n\n    // Drop the header with name containing underscores. The header is dropped before the filter chain is\n    // invoked and as such filters will not see dropped headers. The\n    // \"httpN.dropped_headers_with_underscores\" is incremented for each dropped header.\n    DROP_HEADER = 2;\n  }\n\n  // The idle timeout for connections. The idle timeout is defined as the\n  // period in which there are no active requests. When the\n  // idle timeout is reached the connection will be closed. If the connection is an HTTP/2\n  // downstream connection a drain sequence will occur prior to closing the connection, see\n  // :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout>`.\n  // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.\n  // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 1;\n\n  // The maximum duration of a connection. The duration is defined as a period since a connection\n  // was established. If not set, there is no max duration. When max_connection_duration is reached\n  // the connection will be closed. Drain sequence will occur prior to closing the connection if\n  // if's applicable. See :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout>`.\n  // Note: not implemented for upstream connections.\n  google.protobuf.Duration max_connection_duration = 3;\n\n  // The maximum number of headers. If unconfigured, the default\n  // maximum number of request headers allowed is 100. Requests that exceed this limit will receive\n  // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.\n  google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}];\n\n  // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be\n  // reset independent of any other timeouts. If not specified, this value is not set.\n  google.protobuf.Duration max_stream_duration = 4;\n\n  // Action to take when a client request with a header name containing underscore characters is received.\n  // If this setting is not specified, the value defaults to ALLOW.\n  // Note: upstream responses are not affected by this setting.\n  HeadersWithUnderscoresAction headers_with_underscores_action = 5;\n}\n\n// [#next-free-field: 8]\nmessage Http1ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.Http1ProtocolOptions\";\n\n  message HeaderKeyFormat {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat\";\n\n    message ProperCaseWords {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords\";\n    }\n\n    oneof header_format {\n      option (validate.required) = true;\n\n      // Formats the header by proper casing words: the first character and any character following\n      // a special character will be capitalized if it's an alpha character. For example,\n      // \"content-type\" becomes \"Content-Type\", and \"foo$b#$are\" becomes \"Foo$B#$Are\".\n      // Note that while this results in most headers following conventional casing, certain headers\n      // are not covered. For example, the \"TE\" header will be formatted as \"Te\".\n      ProperCaseWords proper_case_words = 1;\n    }\n  }\n\n  // Handle HTTP requests with absolute URLs in the requests. These requests\n  // are generally sent by clients to forward/explicit proxies. This allows clients to configure\n  // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the\n  // *http_proxy* environment variable.\n  google.protobuf.BoolValue allow_absolute_url = 1;\n\n  // Handle incoming HTTP/1.0 and HTTP 0.9 requests.\n  // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1\n  // style connect logic, dechunking, and handling lack of client host iff\n  // *default_host_for_http_10* is configured.\n  bool accept_http_10 = 2;\n\n  // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as\n  // Envoy does not otherwise support HTTP/1.0 without a Host header.\n  // This is a no-op if *accept_http_10* is not true.\n  string default_host_for_http_10 = 3;\n\n  // Describes how the keys for response headers should be formatted. By default, all header keys\n  // are lower cased.\n  HeaderKeyFormat header_key_format = 4;\n\n  // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.\n  //\n  // .. attention::\n  //\n  //   Note that this only happens when Envoy is chunk encoding which occurs when:\n  //   - The request is HTTP/1.1.\n  //   - Is neither a HEAD only request nor a HTTP Upgrade.\n  //   - Not a response to a HEAD request.\n  //   - The content length header is not present.\n  bool enable_trailers = 5;\n\n  // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding`\n  // headers set. By default such messages are rejected, but if option is enabled - Envoy will\n  // remove Content-Length header and process message.\n  // See `RFC7230, sec. 3.3.3 <https://tools.ietf.org/html/rfc7230#section-3.3.3>` for details.\n  //\n  // .. attention::\n  //   Enabling this option might lead to request smuggling vulnerability, especially if traffic\n  //   is proxied via multiple layers of proxies.\n  bool allow_chunked_length = 6;\n\n  // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate\n  // HTTP/1.1 connections upon receiving an invalid HTTP message. However,\n  // when this option is true, then Envoy will leave the HTTP/1.1 connection\n  // open where possible.\n  // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7;\n}\n\nmessage KeepaliveSettings {\n  // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.\n  google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // How long to wait for a response to a keepalive PING. If a response is not received within this\n  // time period, the connection will be aborted.\n  google.protobuf.Duration timeout = 2 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // A random jitter amount as a percentage of interval that will be added to each interval.\n  // A value of zero means there will be no jitter.\n  // The default value is 15%.\n  type.v3.Percent interval_jitter = 3;\n}\n\n// [#next-free-field: 16]\nmessage Http2ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.Http2ProtocolOptions\";\n\n  // Defines a parameter to be sent in the SETTINGS frame.\n  // See `RFC7540, sec. 6.5.1 <https://tools.ietf.org/html/rfc7540#section-6.5.1>`_ for details.\n  message SettingsParameter {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter\";\n\n    // The 16 bit parameter identifier.\n    google.protobuf.UInt32Value identifier = 1 [\n      (validate.rules).uint32 = {lte: 65535 gte: 0},\n      (validate.rules).message = {required: true}\n    ];\n\n    // The 32 bit parameter value.\n    google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_\n  // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values\n  // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header\n  // compression.\n  google.protobuf.UInt32Value hpack_table_size = 1;\n\n  // `Maximum concurrent streams <https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2>`_\n  // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)\n  // and defaults to 2147483647.\n  //\n  // For upstream connections, this also limits how many streams Envoy will initiate concurrently\n  // on a single connection. If the limit is reached, Envoy may queue requests or establish\n  // additional connections (as allowed per circuit breaker limits).\n  google.protobuf.UInt32Value max_concurrent_streams = 2\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 1}];\n\n  // `Initial stream-level flow-control window\n  // <https://httpwg.org/specs/rfc7540.html#rfc.section.6.9.2>`_ size. Valid values range from 65535\n  // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456\n  // (256 * 1024 * 1024).\n  //\n  // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default\n  // window size now, so it's also the minimum.\n  //\n  // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the\n  // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to\n  // stop the flow of data to the codec buffers.\n  google.protobuf.UInt32Value initial_stream_window_size = 3\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Similar to *initial_stream_window_size*, but for connection-level flow-control\n  // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.\n  google.protobuf.UInt32Value initial_connection_window_size = 4\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Allows proxying Websocket and other upgrades over H2 connect.\n  bool allow_connect = 5;\n\n  // [#not-implemented-hide:] Hiding until envoy has full metadata support.\n  // Still under implementation. DO NOT USE.\n  //\n  // Allows metadata. See [metadata\n  // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more\n  // information.\n  bool allow_metadata = 6;\n\n  // Limit the number of pending outbound downstream frames of all types (frames that are waiting to\n  // be written into the socket). Exceeding this limit triggers flood mitigation and connection is\n  // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due\n  // to flood mitigation. The default limit is 10000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,\n  // preventing high memory utilization when receiving continuous stream of these frames. Exceeding\n  // this limit triggers flood mitigation and connection is terminated. The\n  // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood\n  // mitigation. The default limit is 1000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an\n  // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but\n  // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``\n  // stat tracks the number of connections terminated due to flood mitigation.\n  // Setting this to 0 will terminate connection upon receiving first frame with an empty payload\n  // and no end stream flag. The default limit is 1.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9;\n\n  // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number\n  // of PRIORITY frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     max_inbound_priority_frames_per_stream * (1 + inbound_streams)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 100.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10;\n\n  // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number\n  // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     1 + 2 * (inbound_streams +\n  //              max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 10.\n  // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,\n  // but more complex implementations that try to estimate available bandwidth require at least 2.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11\n      [(validate.rules).uint32 = {gte: 1}];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n  // iff present.\n  //\n  // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>`\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  bool stream_error_on_invalid_http_messaging = 12 [deprecated = true];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14;\n\n  // [#not-implemented-hide:]\n  // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:\n  //\n  // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by\n  // Envoy.\n  //\n  // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field\n  // 'allow_connect'.\n  //\n  // Note that custom parameters specified through this field can not also be set in the\n  // corresponding named parameters:\n  //\n  // .. code-block:: text\n  //\n  //   ID    Field Name\n  //   ----------------\n  //   0x1   hpack_table_size\n  //   0x3   max_concurrent_streams\n  //   0x4   initial_stream_window_size\n  //\n  // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies\n  // between custom parameters with the same identifier will trigger a failure.\n  //\n  // See `IANA HTTP/2 Settings\n  // <https://www.iana.org/assignments/http2-parameters/http2-parameters.xhtml#settings>`_ for\n  // standardized identifiers.\n  repeated SettingsParameter custom_settings_parameters = 13;\n\n  // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer\n  // does not respond within the configured timeout, the connection will be aborted.\n  KeepaliveSettings connection_keepalive = 15;\n}\n\n// [#not-implemented-hide:]\nmessage GrpcProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.GrpcProtocolOptions\";\n\n  Http2ProtocolOptions http2_protocol_options = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Proxy Protocol]\n\nmessage ProxyProtocolConfig {\n  enum Version {\n    // PROXY protocol version 1. Human readable format.\n    V1 = 0;\n\n    // PROXY protocol version 2. Binary format.\n    V2 = 1;\n  }\n\n  // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details\n  Version version = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/socket_option.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"SocketOptionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Socket Option ]\n\n// Generic socket option message. This would be used to set socket options that\n// might not exist in upstream kernels or precompiled Envoy binaries.\n// [#next-free-field: 7]\nmessage SocketOption {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.SocketOption\";\n\n  enum SocketState {\n    // Socket options are applied after socket creation but before binding the socket to a port\n    STATE_PREBIND = 0;\n\n    // Socket options are applied after binding the socket to a port but before calling listen()\n    STATE_BOUND = 1;\n\n    // Socket options are applied after calling listen()\n    STATE_LISTENING = 2;\n  }\n\n  // An optional name to give this socket option for debugging, etc.\n  // Uniqueness is not required and no special meaning is assumed.\n  string description = 1;\n\n  // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP\n  int64 level = 2;\n\n  // The numeric name as passed to setsockopt\n  int64 name = 3;\n\n  oneof value {\n    option (validate.required) = true;\n\n    // Because many sockopts take an int value.\n    int64 int_value = 4;\n\n    // Otherwise it's a byte buffer.\n    bytes buf_value = 5;\n  }\n\n  // The state in which the option will be applied. When used in BindConfig\n  // STATE_PREBIND is currently the only valid value.\n  SocketState state = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v3/substitution_format_string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"SubstitutionFormatStringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Substitution format string]\n\n// Configuration to use multiple :ref:`command operators <config_access_log_command_operators>`\n// to generate a new string in either plain text or JSON format.\nmessage SubstitutionFormatString {\n  oneof format {\n    option (validate.required) = true;\n\n    // Specify a format with command operators to form a text string.\n    // Its details is described in :ref:`format string<config_access_log_format_strings>`.\n    //\n    // For example, setting ``text_format`` like below,\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n    //\n    // generates plain text similar to:\n    //\n    // .. code-block:: text\n    //\n    //   upstream connect error:503:path=/foo\n    //\n    string text_format = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Specify a format with command operators to form a JSON string.\n    // Its details is described in :ref:`format dictionary<config_access_log_format_dictionaries>`.\n    // Values are rendered as strings, numbers, or boolean values as appropriate.\n    // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).\n    // See the documentation for a specific command operator for details.\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   json_format:\n    //     status: \"%RESPONSE_CODE%\"\n    //     message: \"%LOCAL_REPLY_BODY%\"\n    //\n    // The following JSON object would be created:\n    //\n    // .. code-block:: json\n    //\n    //  {\n    //    \"status\": 500,\n    //    \"message\": \"My error message\"\n    //  }\n    //\n    google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // If set to true, when command operators are evaluated to null,\n  //\n  // * for ``text_format``, the output of the empty operator is changed from ``-`` to an\n  //   empty string, so that empty values are omitted entirely.\n  // * for ``json_format`` the keys with null values are omitted in the output structure.\n  bool omit_empty_values = 3;\n\n  // Specify a *content_type* field.\n  // If this field is not set then ``text/plain`` is used for *text_format* and\n  // ``application/json`` is used for *json_format*.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   content_type: \"text/html; charset=UTF-8\"\n  //\n  string content_type = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/address.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/socket_option.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"AddressProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Network addresses]\n\nmessage Pipe {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Pipe\";\n\n  // Unix Domain Socket path. On Linux, paths starting with '@' will use the\n  // abstract namespace. The starting '@' is replaced by a null byte by Envoy.\n  // Paths starting with '@' will result in an error in environments other than\n  // Linux.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The mode for the Pipe. Not applicable for abstract sockets.\n  uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];\n}\n\n// [#not-implemented-hide:] The address represents an envoy internal listener.\n// TODO(lambdai): Make this address available for listener and endpoint.\n// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.\nmessage EnvoyInternalAddress {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.EnvoyInternalAddress\";\n\n  oneof address_name_specifier {\n    option (validate.required) = true;\n\n    // [#not-implemented-hide:] The :ref:`listener name <envoy_api_field_config.listener.v4alpha.Listener.name>` of the destination internal listener.\n    string server_listener_name = 1;\n  }\n}\n\n// [#next-free-field: 7]\nmessage SocketAddress {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.SocketAddress\";\n\n  enum Protocol {\n    TCP = 0;\n    UDP = 1;\n  }\n\n  Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The address for this socket. :ref:`Listeners <config_listeners>` will bind\n  // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::``\n  // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:\n  // It is possible to distinguish a Listener address via the prefix/suffix matching\n  // in :ref:`FilterChainMatch <envoy_api_msg_config.listener.v4alpha.FilterChainMatch>`.] When used\n  // within an upstream :ref:`BindConfig <envoy_api_msg_config.core.v4alpha.BindConfig>`, the address\n  // controls the source address of outbound connections. For :ref:`clusters\n  // <envoy_api_msg_config.cluster.v4alpha.Cluster>`, the cluster type determines whether the\n  // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS\n  // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized\n  // via :ref:`resolver_name <envoy_api_field_config.core.v4alpha.SocketAddress.resolver_name>`.\n  string address = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof port_specifier {\n    option (validate.required) = true;\n\n    uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}];\n\n    // This is only valid if :ref:`resolver_name\n    // <envoy_api_field_config.core.v4alpha.SocketAddress.resolver_name>` is specified below and the\n    // named resolver is capable of named port resolution.\n    string named_port = 4;\n  }\n\n  // The name of the custom resolver. This must have been registered with Envoy. If\n  // this is empty, a context dependent default applies. If the address is a concrete\n  // IP address, no resolution will occur. If address is a hostname this\n  // should be set for resolution other than DNS. Specifying a custom resolver with\n  // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime.\n  string resolver_name = 5;\n\n  // When binding to an IPv6 address above, this enables `IPv4 compatibility\n  // <https://tools.ietf.org/html/rfc3493#page-11>`_. Binding to ``::`` will\n  // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into\n  // IPv6 space as ``::FFFF:<IPv4-address>``.\n  bool ipv4_compat = 6;\n}\n\nmessage TcpKeepalive {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.TcpKeepalive\";\n\n  // Maximum number of keepalive probes to send without response before deciding\n  // the connection is dead. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 9.)\n  google.protobuf.UInt32Value keepalive_probes = 1;\n\n  // The number of seconds a connection needs to be idle before keep-alive probes\n  // start being sent. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 7200s (i.e., 2 hours.)\n  google.protobuf.UInt32Value keepalive_time = 2;\n\n  // The number of seconds between keep-alive probes. Default is to use the OS\n  // level configuration (unless overridden, Linux defaults to 75s.)\n  google.protobuf.UInt32Value keepalive_interval = 3;\n}\n\nmessage BindConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.BindConfig\";\n\n  // The address to bind to when creating a socket.\n  SocketAddress source_address = 1 [(validate.rules).message = {required: true}];\n\n  // Whether to set the *IP_FREEBIND* option when creating the socket. When this\n  // flag is set to true, allows the :ref:`source_address\n  // <envoy_api_field_config.cluster.v4alpha.UpstreamBindConfig.source_address>` to be an IP address\n  // that is not configured on the system running Envoy. When this flag is set\n  // to false, the option *IP_FREEBIND* is disabled on the socket. When this\n  // flag is not set (default), the socket is not modified, i.e. the option is\n  // neither enabled nor disabled.\n  google.protobuf.BoolValue freebind = 2;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated SocketOption socket_options = 3;\n}\n\n// Addresses specify either a logical or physical address and port, which are\n// used to tell Envoy where to bind/listen, connect to upstream and find\n// management servers.\nmessage Address {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Address\";\n\n  oneof address {\n    option (validate.required) = true;\n\n    SocketAddress socket_address = 1;\n\n    Pipe pipe = 2;\n\n    // [#not-implemented-hide:]\n    EnvoyInternalAddress envoy_internal_address = 3;\n  }\n}\n\n// CidrRange specifies an IP Address and a prefix length to construct\n// the subnet mask for a `CIDR <https://tools.ietf.org/html/rfc4632>`_ range.\nmessage CidrRange {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.CidrRange\";\n\n  // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.\n  string address_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Length of prefix, e.g. 0, 32.\n  google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/backoff.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"BackoffProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Backoff Strategy]\n\n// Configuration defining a jittered exponential back off strategy.\nmessage BackoffStrategy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.BackoffStrategy\";\n\n  // The base interval to be used for the next back off computation. It should\n  // be greater than zero and less than or equal to :ref:`max_interval\n  // <envoy_api_field_config.core.v4alpha.BackoffStrategy.max_interval>`.\n  google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // Specifies the maximum interval between retries. This parameter is optional,\n  // but must be greater than or equal to the :ref:`base_interval\n  // <envoy_api_field_config.core.v4alpha.BackoffStrategy.base_interval>` if set. The default\n  // is 10 times the :ref:`base_interval\n  // <envoy_api_field_config.core.v4alpha.BackoffStrategy.base_interval>`.\n  google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/base.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/backoff.proto\";\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/semantic_version.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"BaseProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common types]\n\n// Envoy supports :ref:`upstream priority routing\n// <arch_overview_http_routing_priority>` both at the route and the virtual\n// cluster level. The current priority implementation uses different connection\n// pool and circuit breaking settings for each priority level. This means that\n// even for HTTP/2 requests, two physical connections will be used to an\n// upstream host. In the future Envoy will likely support true HTTP/2 priority\n// over a single upstream connection.\nenum RoutingPriority {\n  DEFAULT = 0;\n  HIGH = 1;\n}\n\n// HTTP request method.\nenum RequestMethod {\n  METHOD_UNSPECIFIED = 0;\n  GET = 1;\n  HEAD = 2;\n  POST = 3;\n  PUT = 4;\n  DELETE = 5;\n  CONNECT = 6;\n  OPTIONS = 7;\n  TRACE = 8;\n  PATCH = 9;\n}\n\n// Identifies the direction of the traffic relative to the local Envoy.\nenum TrafficDirection {\n  // Default option is unspecified.\n  UNSPECIFIED = 0;\n\n  // The transport is used for incoming traffic.\n  INBOUND = 1;\n\n  // The transport is used for outgoing traffic.\n  OUTBOUND = 2;\n}\n\n// Identifies location of where either Envoy runs or where upstream hosts run.\nmessage Locality {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Locality\";\n\n  // Region this :ref:`zone <envoy_api_field_config.core.v4alpha.Locality.zone>` belongs to.\n  string region = 1;\n\n  // Defines the local service zone where Envoy is running. Though optional, it\n  // should be set if discovery service routing is used and the discovery\n  // service exposes :ref:`zone data <envoy_api_field_config.endpoint.v3.LocalityLbEndpoints.locality>`,\n  // either in this message or via :option:`--service-zone`. The meaning of zone\n  // is context dependent, e.g. `Availability Zone (AZ)\n  // <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html>`_\n  // on AWS, `Zone <https://cloud.google.com/compute/docs/regions-zones/>`_ on\n  // GCP, etc.\n  string zone = 2;\n\n  // When used for locality of upstream hosts, this field further splits zone\n  // into smaller chunks of sub-zones so they can be load balanced\n  // independently.\n  string sub_zone = 3;\n}\n\n// BuildVersion combines SemVer version of extension with free-form build information\n// (i.e. 'alpha', 'private-build') as a set of strings.\nmessage BuildVersion {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.BuildVersion\";\n\n  // SemVer version of extension.\n  type.v3.SemanticVersion version = 1;\n\n  // Free-form build information.\n  // Envoy defines several well known keys in the source/common/version/version.h file\n  google.protobuf.Struct metadata = 2;\n}\n\n// Version and identification for an Envoy extension.\n// [#next-free-field: 6]\nmessage Extension {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Extension\";\n\n  // This is the name of the Envoy filter as specified in the Envoy\n  // configuration, e.g. envoy.filters.http.router, com.acme.widget.\n  string name = 1;\n\n  // Category of the extension.\n  // Extension category names use reverse DNS notation. For instance \"envoy.filters.listener\"\n  // for Envoy's built-in listener filters or \"com.acme.filters.http\" for HTTP filters from\n  // acme.com vendor.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]\n  string category = 2;\n\n  // [#not-implemented-hide:] Type descriptor of extension configuration proto.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]\n  // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]\n  string type_descriptor = 3;\n\n  // The version is a property of the extension and maintained independently\n  // of other extensions and the Envoy API.\n  // This field is not set when extension did not provide version information.\n  BuildVersion version = 4;\n\n  // Indicates that the extension is present but was disabled via dynamic configuration.\n  bool disabled = 5;\n}\n\n// Identifies a specific Envoy instance. The node identifier is presented to the\n// management server, which may use this identifier to distinguish per Envoy\n// configuration for serving.\n// [#next-free-field: 12]\nmessage Node {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Node\";\n\n  reserved 5, 11;\n\n  reserved \"build_version\", \"listening_addresses\";\n\n  // An opaque node identifier for the Envoy node. This also provides the local\n  // service node name. It should be set if any of the following features are\n  // used: :ref:`statsd <arch_overview_statistics>`, :ref:`CDS\n  // <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-node`.\n  string id = 1;\n\n  // Defines the local service cluster name where Envoy is running. Though\n  // optional, it should be set if any of the following features are used:\n  // :ref:`statsd <arch_overview_statistics>`, :ref:`health check cluster\n  // verification\n  // <envoy_api_field_config.core.v4alpha.HealthCheck.HttpHealthCheck.service_name_matcher>`,\n  // :ref:`runtime override directory <envoy_api_msg_config.bootstrap.v4alpha.Runtime>`,\n  // :ref:`user agent addition\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.add_user_agent>`,\n  // :ref:`HTTP global rate limiting <config_http_filters_rate_limit>`,\n  // :ref:`CDS <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-cluster`.\n  string cluster = 2;\n\n  // Opaque metadata extending the node identifier. Envoy will pass this\n  // directly to the management server.\n  google.protobuf.Struct metadata = 3;\n\n  // Locality specifying where the Envoy instance is running.\n  Locality locality = 4;\n\n  // Free-form string that identifies the entity requesting config.\n  // E.g. \"envoy\" or \"grpc\"\n  string user_agent_name = 6;\n\n  oneof user_agent_version_type {\n    // Free-form string that identifies the version of the entity requesting config.\n    // E.g. \"1.12.2\" or \"abcd1234\", or \"SpecialEnvoyBuild\"\n    string user_agent_version = 7;\n\n    // Structured version of the entity requesting config.\n    BuildVersion user_agent_build_version = 8;\n  }\n\n  // List of extensions and their versions supported by the node.\n  repeated Extension extensions = 9;\n\n  // Client feature support list. These are well known features described\n  // in the Envoy API repository for a given major version of an API. Client features\n  // use reverse DNS naming scheme, for example `com.acme.feature`.\n  // See :ref:`the list of features <client_features>` that xDS client may\n  // support.\n  repeated string client_features = 10;\n}\n\n// Metadata provides additional inputs to filters based on matched listeners,\n// filter chains, routes and endpoints. It is structured as a map, usually from\n// filter name (in reverse DNS format) to metadata specific to the filter. Metadata\n// key-values for a filter are merged as connection and request handling occurs,\n// with later values for the same key overriding earlier values.\n//\n// An example use of metadata is providing additional values to\n// http_connection_manager in the envoy.http_connection_manager.access_log\n// namespace.\n//\n// Another example use of metadata is to per service config info in cluster metadata, which may get\n// consumed by multiple filters.\n//\n// For load balancing, Metadata provides a means to subset cluster endpoints.\n// Endpoints have a Metadata object associated and routes contain a Metadata\n// object to match against. There are some well defined metadata used today for\n// this purpose:\n//\n// * ``{\"envoy.lb\": {\"canary\": <bool> }}`` This indicates the canary status of an\n//   endpoint and is also used during header processing\n//   (x-envoy-upstream-canary) and for stats purposes.\n// [#next-major-version: move to type/metadata/v2]\nmessage Metadata {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Metadata\";\n\n  // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*\n  // namespace is reserved for Envoy's built-in filters.\n  map<string, google.protobuf.Struct> filter_metadata = 1;\n}\n\n// Runtime derived uint32 with a default when not specified.\nmessage RuntimeUInt32 {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.RuntimeUInt32\";\n\n  // Default value if runtime value is not available.\n  uint32 default_value = 2;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 3 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived percentage with a default when not specified.\nmessage RuntimePercent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RuntimePercent\";\n\n  // Default value if runtime value is not available.\n  type.v3.Percent default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived double with a default when not specified.\nmessage RuntimeDouble {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.RuntimeDouble\";\n\n  // Default value if runtime value is not available.\n  double default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived bool with a default when not specified.\nmessage RuntimeFeatureFlag {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RuntimeFeatureFlag\";\n\n  // Default value if runtime value is not available.\n  google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key to get value for comparison. This value is used if defined. The boolean value must\n  // be represented via its\n  // `canonical JSON encoding <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Header name/value pair.\nmessage HeaderValue {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HeaderValue\";\n\n  // Header name.\n  string key = 1\n      [(validate.rules).string =\n           {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Header value.\n  //\n  // The same :ref:`format specifier <config_access_log_format>` as used for\n  // :ref:`HTTP access logging <config_access_log>` applies here, however\n  // unknown header values are replaced with the empty string instead of `-`.\n  string value = 2 [\n    (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}\n  ];\n}\n\n// Header name/value pair plus option to control append behavior.\nmessage HeaderValueOption {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.HeaderValueOption\";\n\n  // Header name/value pair that this option applies to.\n  HeaderValue header = 1 [(validate.rules).message = {required: true}];\n\n  // Should the value be appended? If true (default), the value is appended to\n  // existing values. Otherwise it replaces any existing values.\n  google.protobuf.BoolValue append = 2;\n}\n\n// Wrapper for a set of headers.\nmessage HeaderMap {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HeaderMap\";\n\n  repeated HeaderValue headers = 1;\n}\n\n// Data source consisting of either a file or an inline value.\nmessage DataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.DataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local filesystem data source.\n    string filename = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Bytes inlined in the configuration.\n    bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];\n\n    // String inlined in the configuration.\n    string inline_string = 3 [(validate.rules).string = {min_len: 1}];\n  }\n}\n\n// The message specifies the retry policy of remote data source when fetching fails.\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.RetryPolicy\";\n\n  // Specifies parameters that control :ref:`retry backoff strategy <envoy_api_msg_config.core.v4alpha.BackoffStrategy>`.\n  // This parameter is optional, in which case the default base interval is 1000 milliseconds. The\n  // default maximum interval is 10 times the base interval.\n  BackoffStrategy retry_back_off = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1.\n  google.protobuf.UInt32Value max_retries = 2;\n}\n\n// The message specifies how to fetch data from remote and how to verify it.\nmessage RemoteDataSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RemoteDataSource\";\n\n  // The HTTP URI to fetch the remote data.\n  HttpUri http_uri = 1 [(validate.rules).message = {required: true}];\n\n  // SHA256 string for verifying data.\n  string sha256 = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Retry policy for fetching remote data.\n  RetryPolicy retry_policy = 3;\n}\n\n// Async data source which support async data fetch.\nmessage AsyncDataSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.AsyncDataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local async data source.\n    DataSource local = 1;\n\n    // Remote async data source.\n    RemoteDataSource remote = 2;\n  }\n}\n\n// Configuration for transport socket in :ref:`listeners <config_listeners>` and\n// :ref:`clusters <envoy_api_msg_config.cluster.v4alpha.Cluster>`. If the configuration is\n// empty, a default transport socket implementation and configuration will be\n// chosen based on the platform and existence of tls_context.\nmessage TransportSocket {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.TransportSocket\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the transport socket to instantiate. The name must match a supported transport\n  // socket implementation.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Implementation specific configuration which depends on the implementation being instantiated.\n  // See the supported transport socket implementations for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not\n// specified via a runtime key.\n//\n// .. note::\n//\n//   Parsing of the runtime key's data is implemented such that it may be represented as a\n//   :ref:`FractionalPercent <envoy_api_msg_type.v3.FractionalPercent>` proto represented as JSON/YAML\n//   and may also be represented as an integer with the assumption that the value is an integral\n//   percentage out of 100. For instance, a runtime key lookup returning the value \"42\" would parse\n//   as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.\nmessage RuntimeFractionalPercent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RuntimeFractionalPercent\";\n\n  // Default value if the runtime value's for the numerator/denominator keys are not available.\n  type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key for a YAML representation of a FractionalPercent.\n  string runtime_key = 2;\n}\n\n// Identifies a specific ControlPlane instance that Envoy is connected to.\nmessage ControlPlane {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.ControlPlane\";\n\n  // An opaque control plane identifier that uniquely identifies an instance\n  // of control plane. This can be used to identify which control plane instance,\n  // the Envoy is connected to.\n  string identifier = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/config_source.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/authority.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ConfigSourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Configuration sources]\n\n// xDS API and non-xDS services version. This is used to describe both resource and transport\n// protocol versions (in distinct configuration fields).\nenum ApiVersion {\n  // When not specified, we assume v2, to ease migration to Envoy's stable API\n  // versioning. If a client does not support v2 (e.g. due to deprecation), this\n  // is an invalid value.\n  AUTO = 0;\n\n  // Use xDS v2 API.\n  V2 = 1;\n\n  // Use xDS v3 API.\n  V3 = 2;\n}\n\n// API configuration source. This identifies the API type and cluster that Envoy\n// will use to fetch an xDS API.\n// [#next-free-field: 9]\nmessage ApiConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.ApiConfigSource\";\n\n  // APIs may be fetched via either REST or gRPC.\n  enum ApiType {\n    // Ideally this would be 'reserved 0' but one can't reserve the default\n    // value. Instead we throw an exception if this is ever used.\n    DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // REST-JSON v2 API. The `canonical JSON encoding\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_ for\n    // the v2 protos is used.\n    REST = 1;\n\n    // SotW gRPC service.\n    GRPC = 2;\n\n    // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}\n    // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state\n    // with every update, the xDS server only sends what has changed since the last update.\n    DELTA_GRPC = 3;\n\n    // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_GRPC = 5;\n\n    // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_DELTA_GRPC = 6;\n  }\n\n  // API type (gRPC, REST, delta gRPC)\n  ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}];\n\n  // Cluster names should be used only with REST. If > 1\n  // cluster is defined, clusters will be cycled through if any kind of failure\n  // occurs.\n  //\n  // .. note::\n  //\n  //  The cluster with name ``cluster_name`` must be statically defined and its\n  //  type must not be ``EDS``.\n  repeated string cluster_names = 2;\n\n  // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,\n  // services will be cycled through if any kind of failure occurs.\n  repeated GrpcService grpc_services = 4;\n\n  // For REST APIs, the delay between successive polls.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // For REST APIs, the request timeout. If not set, a default value of 1s will be used.\n  google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}];\n\n  // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be\n  // rate limited.\n  RateLimitSettings rate_limit_settings = 6;\n\n  // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.\n  bool set_node_on_first_message_only = 7;\n}\n\n// Aggregated Discovery Service (ADS) options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v4alpha.ConfigSource>` can be used to\n// specify that ADS is to be used.\nmessage AggregatedConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.AggregatedConfigSource\";\n}\n\n// [#not-implemented-hide:]\n// Self-referencing config source options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v4alpha.ConfigSource>` can be used to\n// specify that other data can be obtained from the same server.\nmessage SelfConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.SelfConfigSource\";\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Rate Limit settings to be applied for discovery requests made by Envoy.\nmessage RateLimitSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RateLimitSettings\";\n\n  // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a\n  // default value of 100 will be used.\n  google.protobuf.UInt32Value max_tokens = 1;\n\n  // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens\n  // per second will be used.\n  google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];\n}\n\n// Configuration for :ref:`listeners <config_listeners>`, :ref:`clusters\n// <config_cluster_manager>`, :ref:`routes\n// <envoy_api_msg_config.route.v4alpha.RouteConfiguration>`, :ref:`endpoints\n// <arch_overview_service_discovery>` etc. may either be sourced from the\n// filesystem or from an xDS API source. Filesystem configs are watched with\n// inotify for updates.\n// [#next-free-field: 8]\nmessage ConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.ConfigSource\";\n\n  // Authorities that this config source may be used for. An authority specified\n  // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior\n  // to configuration fetch. This field provides the association between\n  // authority name and configuration source.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.Authority authorities = 7;\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Path on the filesystem to source and watch for configuration updates.\n    // When sourcing configuration for :ref:`secret <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.Secret>`,\n    // the certificate and key files are also watched for updates.\n    //\n    // .. note::\n    //\n    //  The path to the source must exist at config load time.\n    //\n    // .. note::\n    //\n    //   Envoy will only watch the file path for *moves.* This is because in general only moves\n    //   are atomic. The same method of swapping files as is demonstrated in the\n    //   :ref:`runtime documentation <config_runtime_symbolic_link_swap>` can be used here also.\n    string path = 1;\n\n    // API configuration source.\n    ApiConfigSource api_config_source = 2;\n\n    // When set, ADS will be used to fetch resources. The ADS API configuration\n    // source in the bootstrap configuration is used.\n    AggregatedConfigSource ads = 3;\n\n    // [#not-implemented-hide:]\n    // When set, the client will access the resources from the same server it got the\n    // ConfigSource from, although not necessarily from the same stream. This is similar to the\n    // :ref:`ads<envoy_api_field.ConfigSource.ads>` field, except that the client may use a\n    // different stream to the same server. As a result, this field can be used for things\n    // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)\n    // LDS to RDS on the same server without requiring the management server to know its name\n    // or required credentials.\n    // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since\n    // this field can implicitly mean to use the same stream in the case where the ConfigSource\n    // is provided via ADS and the specified data can also be obtained via ADS.]\n    SelfConfigSource self = 5;\n  }\n\n  // When this timeout is specified, Envoy will wait no longer than the specified time for first\n  // config response on this xDS subscription during the :ref:`initialization process\n  // <arch_overview_initialization>`. After reaching the timeout, Envoy will move to the next\n  // initialization phase, even if the first config is not delivered yet. The timer is activated\n  // when the xDS API subscription starts, and is disarmed on first config update or on error. 0\n  // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another\n  // timeout applies). The default is 15s.\n  google.protobuf.Duration initial_fetch_timeout = 4;\n\n  // API version for xDS resources. This implies the type URLs that the client\n  // will request for resources and the resource type that the client will in\n  // turn expect to be delivered.\n  ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/event_service_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"EventServiceConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#not-implemented-hide:]\n// Configuration of the event reporting service endpoint.\nmessage EventServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.EventServiceConfig\";\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Specifies the gRPC service that hosts the event reporting service.\n    GrpcService grpc_service = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/extension.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ExtensionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Extension configuration]\n\n// Message type for extension configuration.\n// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.].\nmessage TypedExtensionConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.TypedExtensionConfig\";\n\n  // The name of an extension. This is not used to select the extension, instead\n  // it serves the role of an opaque identifier.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The typed config for the extension. The type URL will be used to identify\n  // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*,\n  // the inner type URL of *TypedStruct* will be utilized. See the\n  // :ref:`extension configuration overview\n  // <config_overview_extension_configuration>` for further details.\n  google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}];\n}\n\n// Configuration source specifier for a late-bound extension configuration. The\n// parent resource is warmed until all the initial extension configurations are\n// received, unless the flag to apply the default configuration is set.\n// Subsequent extension updates are atomic on a per-worker basis. Once an\n// extension configuration is applied to a request or a connection, it remains\n// constant for the duration of processing. If the initial delivery of the\n// extension configuration fails, due to a timeout for example, the optional\n// default configuration is applied. Without a default configuration, the\n// extension is disabled, until an extension configuration is received. The\n// behavior of a disabled extension depends on the context. For example, a\n// filter chain with a disabled extension filter rejects all incoming streams.\nmessage ExtensionConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.ExtensionConfigSource\";\n\n  ConfigSource config_source = 1 [(validate.rules).any = {required: true}];\n\n  // Optional default configuration to use as the initial configuration if\n  // there is a failure to receive the initial extension configuration or if\n  // `apply_default_config_without_warming` flag is set.\n  google.protobuf.Any default_config = 2;\n\n  // Use the default config as the initial configuration without warming and\n  // waiting for the first discovery response. Requires the default configuration\n  // to be supplied.\n  bool apply_default_config_without_warming = 3;\n\n  // A set of permitted extension type URLs. Extension configuration updates are rejected\n  // if they do not match any type URL in the set.\n  repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/grpc_method_list.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"GrpcMethodListProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC method list]\n\n// A list of gRPC methods which can be used as an allowlist, for example.\nmessage GrpcMethodList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.GrpcMethodList\";\n\n  message Service {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.GrpcMethodList.Service\";\n\n    // The name of the gRPC service.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The names of the gRPC methods in this service.\n    repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  repeated Service services = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/grpc_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"GrpcServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC services]\n\n// gRPC service configuration. This is used by :ref:`ApiConfigSource\n// <envoy_api_msg_config.core.v4alpha.ApiConfigSource>` and filter configurations.\n// [#next-free-field: 6]\nmessage GrpcService {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.GrpcService\";\n\n  message EnvoyGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.GrpcService.EnvoyGrpc\";\n\n    // The name of the upstream gRPC cluster. SSL credentials will be supplied\n    // in the :ref:`Cluster <envoy_api_msg_config.cluster.v4alpha.Cluster>` :ref:`transport_socket\n    // <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket>`.\n    string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`.\n    // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.\n    string authority = 2\n        [(validate.rules).string =\n             {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // [#next-free-field: 9]\n  message GoogleGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.GrpcService.GoogleGrpc\";\n\n    // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.\n    message SslCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials\";\n\n      // PEM encoded server root certificates.\n      DataSource root_certs = 1;\n\n      // PEM encoded client private key.\n      DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n      // PEM encoded client certificate chain.\n      DataSource cert_chain = 3;\n    }\n\n    // Local channel credentials. Only UDS is supported for now.\n    // See https://github.com/grpc/grpc/pull/15909.\n    message GoogleLocalCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials\";\n    }\n\n    // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call\n    // credential types.\n    message ChannelCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials\";\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        SslCredentials ssl_credentials = 1;\n\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_default = 2;\n\n        GoogleLocalCredentials local_credentials = 3;\n      }\n    }\n\n    // [#next-free-field: 8]\n    message CallCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials\";\n\n      message ServiceAccountJWTAccessCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"ServiceAccountJWTAccessCredentials\";\n\n        string json_key = 1;\n\n        uint64 token_lifetime_seconds = 2;\n      }\n\n      message GoogleIAMCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials\";\n\n        string authorization_token = 1;\n\n        string authority_selector = 2;\n      }\n\n      message MetadataCredentialsFromPlugin {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"MetadataCredentialsFromPlugin\";\n\n        reserved 2;\n\n        reserved \"config\";\n\n        string name = 1;\n\n        oneof config_type {\n          google.protobuf.Any typed_config = 3;\n        }\n      }\n\n      // Security token service configuration that allows Google gRPC to\n      // fetch security token from an OAuth 2.0 authorization server.\n      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and\n      // https://github.com/grpc/grpc/pull/19587.\n      // [#next-free-field: 10]\n      message StsService {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService\";\n\n        // URI of the token exchange service that handles token exchange requests.\n        // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by\n        // https://github.com/envoyproxy/protoc-gen-validate/issues/303]\n        string token_exchange_service_uri = 1;\n\n        // Location of the target service or resource where the client\n        // intends to use the requested security token.\n        string resource = 2;\n\n        // Logical name of the target service where the client intends to\n        // use the requested security token.\n        string audience = 3;\n\n        // The desired scope of the requested security token in the\n        // context of the service or resource where the token will be used.\n        string scope = 4;\n\n        // Type of the requested security token.\n        string requested_token_type = 5;\n\n        // The path of subject token, a security token that represents the\n        // identity of the party on behalf of whom the request is being made.\n        string subject_token_path = 6 [(validate.rules).string = {min_len: 1}];\n\n        // Type of the subject token.\n        string subject_token_type = 7 [(validate.rules).string = {min_len: 1}];\n\n        // The path of actor token, a security token that represents the identity\n        // of the acting party. The acting party is authorized to use the\n        // requested security token and act on behalf of the subject.\n        string actor_token_path = 8;\n\n        // Type of the actor token.\n        string actor_token_type = 9;\n      }\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        // Access token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.\n        string access_token = 1;\n\n        // Google Compute Engine credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_compute_engine = 2;\n\n        // Google refresh token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.\n        string google_refresh_token = 3;\n\n        // Service Account JWT Access credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.\n        ServiceAccountJWTAccessCredentials service_account_jwt_access = 4;\n\n        // Google IAM credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.\n        GoogleIAMCredentials google_iam = 5;\n\n        // Custom authenticator credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.\n        // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.\n        MetadataCredentialsFromPlugin from_plugin = 6;\n\n        // Custom security token service which implements OAuth 2.0 token exchange.\n        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16\n        // See https://github.com/grpc/grpc/pull/19587.\n        StsService sts_service = 7;\n      }\n    }\n\n    // Channel arguments.\n    message ChannelArgs {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs\";\n\n      message Value {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value\";\n\n        // Pointer values are not supported, since they don't make any sense when\n        // delivered via the API.\n        oneof value_specifier {\n          option (validate.required) = true;\n\n          string string_value = 1;\n\n          int64 int_value = 2;\n        }\n      }\n\n      // See grpc_types.h GRPC_ARG #defines for keys that work here.\n      map<string, Value> args = 1;\n    }\n\n    // The target URI when using the `Google C++ gRPC client\n    // <https://github.com/grpc/grpc>`_. SSL credentials will be supplied in\n    // :ref:`channel_credentials <envoy_api_field_config.core.v4alpha.GrpcService.GoogleGrpc.channel_credentials>`.\n    string target_uri = 1 [(validate.rules).string = {min_len: 1}];\n\n    ChannelCredentials channel_credentials = 2;\n\n    // A set of call credentials that can be composed with `channel credentials\n    // <https://grpc.io/docs/guides/auth.html#credential-types>`_.\n    repeated CallCredentials call_credentials = 3;\n\n    // The human readable prefix to use when emitting statistics for the gRPC\n    // service.\n    //\n    // .. csv-table::\n    //    :header: Name, Type, Description\n    //    :widths: 1, 1, 2\n    //\n    //    streams_total, Counter, Total number of streams opened\n    //    streams_closed_<gRPC status code>, Counter, Total streams closed with <gRPC status code>\n    string stat_prefix = 4 [(validate.rules).string = {min_len: 1}];\n\n    // The name of the Google gRPC credentials factory to use. This must have been registered with\n    // Envoy. If this is empty, a default credentials factory will be used that sets up channel\n    // credentials based on other configuration parameters.\n    string credentials_factory_name = 5;\n\n    // Additional configuration for site-specific customizations of the Google\n    // gRPC library.\n    google.protobuf.Struct config = 6;\n\n    // How many bytes each stream can buffer internally.\n    // If not set an implementation defined default is applied (1MiB).\n    google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7;\n\n    // Custom channels args.\n    ChannelArgs channel_args = 8;\n  }\n\n  reserved 4;\n\n  oneof target_specifier {\n    option (validate.required) = true;\n\n    // Envoy's in-built gRPC client.\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    EnvoyGrpc envoy_grpc = 1;\n\n    // `Google C++ gRPC client <https://github.com/grpc/grpc>`_\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    GoogleGrpc google_grpc = 2;\n  }\n\n  // The timeout for the gRPC request. This is the timeout for a specific\n  // request.\n  google.protobuf.Duration timeout = 3;\n\n  // Additional metadata to include in streams initiated to the GrpcService.\n  // This can be used for scenarios in which additional ad hoc authorization\n  // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.\n  repeated HeaderValue initial_metadata = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/event_service_config.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/v3/http.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Health check]\n// * Health checking :ref:`architecture overview <arch_overview_health_checking>`.\n// * If health checking is configured for a cluster, additional statistics are emitted. They are\n//   documented :ref:`here <config_cluster_manager_cluster_stats>`.\n\n// Endpoint health status.\nenum HealthStatus {\n  // The health status is not known. This is interpreted by Envoy as *HEALTHY*.\n  UNKNOWN = 0;\n\n  // Healthy.\n  HEALTHY = 1;\n\n  // Unhealthy.\n  UNHEALTHY = 2;\n\n  // Connection draining in progress. E.g.,\n  // `<https://aws.amazon.com/blogs/aws/elb-connection-draining-remove-instances-from-service-with-care/>`_\n  // or\n  // `<https://cloud.google.com/compute/docs/load-balancing/enabling-connection-draining>`_.\n  // This is interpreted by Envoy as *UNHEALTHY*.\n  DRAINING = 3;\n\n  // Health check timed out. This is part of HDS and is interpreted by Envoy as\n  // *UNHEALTHY*.\n  TIMEOUT = 4;\n\n  // Degraded.\n  DEGRADED = 5;\n}\n\n// [#next-free-field: 24]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HealthCheck\";\n\n  // Describes the encoding of the payload bytes in the payload.\n  message Payload {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.Payload\";\n\n    oneof payload {\n      option (validate.required) = true;\n\n      // Hex encoded payload. E.g., \"000000FF\".\n      string text = 1 [(validate.rules).string = {min_len: 1}];\n\n      // [#not-implemented-hide:] Binary payload.\n      bytes binary = 2;\n    }\n  }\n\n  // [#next-free-field: 12]\n  message HttpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.HttpHealthCheck\";\n\n    reserved 5, 7;\n\n    reserved \"service_name\", \"use_http2\";\n\n    // The value of the host header in the HTTP health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The host header can be customized for a specific endpoint by setting the\n    // :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Specifies the HTTP path that will be requested during health checking. For example\n    // */healthcheck*.\n    string path = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // [#not-implemented-hide:] HTTP specific payload.\n    Payload send = 3;\n\n    // [#not-implemented-hide:] HTTP specific response.\n    Payload receive = 4;\n\n    // Specifies a list of HTTP headers that should be added to each request that is sent to the\n    // health checked cluster. For more information, including details on header value syntax, see\n    // the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated HeaderValueOption request_headers_to_add = 6\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request that is sent to the\n    // health checked cluster.\n    repeated string request_headers_to_remove = 8 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default\n    // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open\n    // semantics of :ref:`Int64Range <envoy_api_msg_type.v3.Int64Range>`. The start and end of each\n    // range are required. Only statuses in the range [100, 600) are allowed.\n    repeated type.v3.Int64Range expected_statuses = 9;\n\n    // Use specified application protocol for health checks.\n    type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}];\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster using a :ref:`StringMatcher\n    // <envoy_api_msg_type.matcher.v4alpha.StringMatcher>`. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    type.matcher.v4alpha.StringMatcher service_name_matcher = 11;\n  }\n\n  message TcpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.TcpHealthCheck\";\n\n    // Empty payloads imply a connect-only health check.\n    Payload send = 1;\n\n    // When checking the response, “fuzzy” matching is performed such that each\n    // binary block must be found, and in the order specified, but not\n    // necessarily contiguous.\n    repeated Payload receive = 2;\n  }\n\n  message RedisHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.RedisHealthCheck\";\n\n    // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n    // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n    // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n    // by setting the specified key to any value and waiting for traffic to drain.\n    string key = 1;\n  }\n\n  // `grpc.health.v1.Health\n  // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto>`_-based\n  // healthcheck. See `gRPC doc <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_\n  // for details.\n  message GrpcHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.GrpcHealthCheck\";\n\n    // An optional service name parameter which will be sent to gRPC service in\n    // `grpc.health.v1.HealthCheckRequest\n    // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto#L20>`_.\n    // message. See `gRPC health-checking overview\n    // <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_ for more information.\n    string service_name = 1;\n\n    // The value of the :authority header in the gRPC health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The authority header can be customized for a specific endpoint by setting\n    // the :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string authority = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Custom health check.\n  message CustomHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.CustomHealthCheck\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // The registered name of the custom health checker.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // A custom health checker specific configuration which depends on the custom health checker\n    // being instantiated. See :api:`envoy/config/health_checker` for reference.\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Health checks occur over the transport socket specified for the cluster. This implies that if a\n  // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.\n  //\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  message TlsOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.TlsOptions\";\n\n    // Specifies the ALPN protocols for health check connections. This is useful if the\n    // corresponding upstream is using ALPN-based :ref:`FilterChainMatch\n    // <envoy_api_msg_config.listener.v4alpha.FilterChainMatch>` along with different protocols for health checks\n    // versus data connections. If empty, no ALPN protocols will be set on health check connections.\n    repeated string alpn_protocols = 1;\n  }\n\n  reserved 10;\n\n  // The time to wait for a health check response. If the timeout is reached the\n  // health check attempt will be considered a failure.\n  google.protobuf.Duration timeout = 1 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // The interval between health checks.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // An optional jitter amount in milliseconds. If specified, Envoy will start health\n  // checking after for a random time in ms between 0 and initial_jitter. This only\n  // applies to the first health check.\n  google.protobuf.Duration initial_jitter = 20;\n\n  // An optional jitter amount in milliseconds. If specified, during every\n  // interval Envoy will add interval_jitter to the wait time.\n  google.protobuf.Duration interval_jitter = 3;\n\n  // An optional jitter amount as a percentage of interval_ms. If specified,\n  // during every interval Envoy will add interval_ms *\n  // interval_jitter_percent / 100 to the wait time.\n  //\n  // If interval_jitter_ms and interval_jitter_percent are both set, both of\n  // them will be used to increase the wait time.\n  uint32 interval_jitter_percent = 18;\n\n  // The number of unhealthy health checks required before a host is marked\n  // unhealthy. Note that for *http* health checking if a host responds with 503\n  // this threshold is ignored and the host is considered unhealthy immediately.\n  google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}];\n\n  // The number of healthy health checks required before a host is marked\n  // healthy. Note that during startup, only a single successful health check is\n  // required to mark a host healthy.\n  google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Non-serving port for health checking.\n  google.protobuf.UInt32Value alt_port = 6;\n\n  // Reuse health check connection between health checks. Default is true.\n  google.protobuf.BoolValue reuse_connection = 7;\n\n  oneof health_checker {\n    option (validate.required) = true;\n\n    // HTTP health check.\n    HttpHealthCheck http_health_check = 8;\n\n    // TCP health check.\n    TcpHealthCheck tcp_health_check = 9;\n\n    // gRPC health check.\n    GrpcHealthCheck grpc_health_check = 11;\n\n    // Custom health check.\n    CustomHealthCheck custom_health_check = 13;\n  }\n\n  // The \"no traffic interval\" is a special health check interval that is used when a cluster has\n  // never had traffic routed to it. This lower interval allows cluster information to be kept up to\n  // date, without sending a potentially large amount of active health checking traffic for no\n  // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the\n  // standard health check interval that is defined. Note that this interval takes precedence over\n  // any other.\n  //\n  // The default value for \"no traffic interval\" is 60 seconds.\n  google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy interval\" is a health check interval that is used for hosts that are marked as\n  // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the\n  // standard health check interval that is defined.\n  //\n  // The default value for \"unhealthy interval\" is the same as \"interval\".\n  google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as unhealthy. For subsequent health checks\n  // Envoy will shift back to using either \"unhealthy interval\" if present or the standard health\n  // check interval that is defined.\n  //\n  // The default value for \"unhealthy edge interval\" is the same as \"unhealthy interval\".\n  google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}];\n\n  // The \"healthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as healthy. For subsequent health checks\n  // Envoy will shift back to using the standard health check interval that is defined.\n  //\n  // The default value for \"healthy edge interval\" is the same as the default interval.\n  google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];\n\n  // Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.\n  // If empty, no event log will be written.\n  string event_log_path = 17;\n\n  // [#not-implemented-hide:]\n  // The gRPC service for the health check event service.\n  // If empty, health check events won't be sent to a remote endpoint.\n  EventServiceConfig event_service = 22;\n\n  // If set to true, health check failure events will always be logged. If set to false, only the\n  // initial health check failure event will be logged.\n  // The default value is false.\n  bool always_log_health_check_failures = 19;\n\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  TlsOptions tls_options = 21;\n\n  // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's\n  // :ref:`tranport socket matches <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket_matches>`.\n  // For example, the following match criteria\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_match_criteria:\n  //    useMTLS: true\n  //\n  // Will match the following :ref:`cluster socket match <envoy_api_msg_config.cluster.v4alpha.Cluster.TransportSocketMatch>`\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"useMTLS\"\n  //    match:\n  //      useMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //\n  // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`.\n  // This allows using different transport socket capabilities for health checking versus proxying to the\n  // endpoint.\n  //\n  // If the key/values pairs specified do not match any\n  // :ref:`transport socket matches <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket_matches>`,\n  // the cluster's :ref:`transport socket <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket>`\n  // will be used for health check socket configuration.\n  google.protobuf.Struct transport_socket_match_criteria = 23;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/http_uri.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"HttpUriProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP Service URI ]\n\n// Envoy external URI descriptor\nmessage HttpUri {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HttpUri\";\n\n  // The HTTP server URI. It should be a full FQDN with protocol, host and path.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    uri: https://www.googleapis.com/oauth2/v1/certs\n  //\n  string uri = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specify how `uri` is to be fetched. Today, this requires an explicit\n  // cluster, but in the future we may support dynamic cluster creation or\n  // inline DNS resolution. See `issue\n  // <https://github.com/envoyproxy/envoy/issues/1606>`_.\n  oneof http_upstream_type {\n    option (validate.required) = true;\n\n    // A cluster is created in the Envoy \"cluster_manager\" config\n    // section. This field specifies the cluster name.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    cluster: jwks_cluster\n    //\n    string cluster = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Sets the maximum duration in milliseconds that a response can take to arrive upon request.\n  google.protobuf.Duration timeout = 3 [(validate.rules).duration = {\n    required: true\n    gte {}\n  }];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Protocol options]\n\n// [#not-implemented-hide:]\nmessage TcpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.TcpProtocolOptions\";\n}\n\nmessage UpstreamHttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.UpstreamHttpProtocolOptions\";\n\n  // Set transport socket `SNI <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ for new\n  // upstream connections based on the downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  bool auto_sni = 1;\n\n  // Automatic validate upstream presented certificate for new upstream connections based on the\n  // downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  // This field is intended to set with `auto_sni` field.\n  bool auto_san_validation = 2;\n}\n\n// [#next-free-field: 6]\nmessage HttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.HttpProtocolOptions\";\n\n  // Action to take when Envoy receives client request with header names containing underscore\n  // characters.\n  // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented\n  // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore\n  // characters.\n  enum HeadersWithUnderscoresAction {\n    // Allow headers with underscores. This is the default behavior.\n    ALLOW = 0;\n\n    // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests\n    // end with the stream reset. The \"httpN.requests_rejected_with_underscores_in_headers\" counter\n    // is incremented for each rejected request.\n    REJECT_REQUEST = 1;\n\n    // Drop the header with name containing underscores. The header is dropped before the filter chain is\n    // invoked and as such filters will not see dropped headers. The\n    // \"httpN.dropped_headers_with_underscores\" is incremented for each dropped header.\n    DROP_HEADER = 2;\n  }\n\n  // The idle timeout for connections. The idle timeout is defined as the\n  // period in which there are no active requests. When the\n  // idle timeout is reached the connection will be closed. If the connection is an HTTP/2\n  // downstream connection a drain sequence will occur prior to closing the connection, see\n  // :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.drain_timeout>`.\n  // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.\n  // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 1;\n\n  // The maximum duration of a connection. The duration is defined as a period since a connection\n  // was established. If not set, there is no max duration. When max_connection_duration is reached\n  // the connection will be closed. Drain sequence will occur prior to closing the connection if\n  // if's applicable. See :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.drain_timeout>`.\n  // Note: not implemented for upstream connections.\n  google.protobuf.Duration max_connection_duration = 3;\n\n  // The maximum number of headers. If unconfigured, the default\n  // maximum number of request headers allowed is 100. Requests that exceed this limit will receive\n  // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.\n  google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}];\n\n  // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be\n  // reset independent of any other timeouts. If not specified, this value is not set.\n  google.protobuf.Duration max_stream_duration = 4;\n\n  // Action to take when a client request with a header name containing underscore characters is received.\n  // If this setting is not specified, the value defaults to ALLOW.\n  // Note: upstream responses are not affected by this setting.\n  HeadersWithUnderscoresAction headers_with_underscores_action = 5;\n}\n\n// [#next-free-field: 8]\nmessage Http1ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.Http1ProtocolOptions\";\n\n  message HeaderKeyFormat {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat\";\n\n    message ProperCaseWords {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords\";\n    }\n\n    oneof header_format {\n      option (validate.required) = true;\n\n      // Formats the header by proper casing words: the first character and any character following\n      // a special character will be capitalized if it's an alpha character. For example,\n      // \"content-type\" becomes \"Content-Type\", and \"foo$b#$are\" becomes \"Foo$B#$Are\".\n      // Note that while this results in most headers following conventional casing, certain headers\n      // are not covered. For example, the \"TE\" header will be formatted as \"Te\".\n      ProperCaseWords proper_case_words = 1;\n    }\n  }\n\n  // Handle HTTP requests with absolute URLs in the requests. These requests\n  // are generally sent by clients to forward/explicit proxies. This allows clients to configure\n  // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the\n  // *http_proxy* environment variable.\n  google.protobuf.BoolValue allow_absolute_url = 1;\n\n  // Handle incoming HTTP/1.0 and HTTP 0.9 requests.\n  // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1\n  // style connect logic, dechunking, and handling lack of client host iff\n  // *default_host_for_http_10* is configured.\n  bool accept_http_10 = 2;\n\n  // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as\n  // Envoy does not otherwise support HTTP/1.0 without a Host header.\n  // This is a no-op if *accept_http_10* is not true.\n  string default_host_for_http_10 = 3;\n\n  // Describes how the keys for response headers should be formatted. By default, all header keys\n  // are lower cased.\n  HeaderKeyFormat header_key_format = 4;\n\n  // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.\n  //\n  // .. attention::\n  //\n  //   Note that this only happens when Envoy is chunk encoding which occurs when:\n  //   - The request is HTTP/1.1.\n  //   - Is neither a HEAD only request nor a HTTP Upgrade.\n  //   - Not a response to a HEAD request.\n  //   - The content length header is not present.\n  bool enable_trailers = 5;\n\n  // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding`\n  // headers set. By default such messages are rejected, but if option is enabled - Envoy will\n  // remove Content-Length header and process message.\n  // See `RFC7230, sec. 3.3.3 <https://tools.ietf.org/html/rfc7230#section-3.3.3>` for details.\n  //\n  // .. attention::\n  //   Enabling this option might lead to request smuggling vulnerability, especially if traffic\n  //   is proxied via multiple layers of proxies.\n  bool allow_chunked_length = 6;\n\n  // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate\n  // HTTP/1.1 connections upon receiving an invalid HTTP message. However,\n  // when this option is true, then Envoy will leave the HTTP/1.1 connection\n  // open where possible.\n  // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7;\n}\n\nmessage KeepaliveSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.KeepaliveSettings\";\n\n  // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.\n  google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // How long to wait for a response to a keepalive PING. If a response is not received within this\n  // time period, the connection will be aborted.\n  google.protobuf.Duration timeout = 2 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // A random jitter amount as a percentage of interval that will be added to each interval.\n  // A value of zero means there will be no jitter.\n  // The default value is 15%.\n  type.v3.Percent interval_jitter = 3;\n}\n\n// [#next-free-field: 16]\nmessage Http2ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.Http2ProtocolOptions\";\n\n  // Defines a parameter to be sent in the SETTINGS frame.\n  // See `RFC7540, sec. 6.5.1 <https://tools.ietf.org/html/rfc7540#section-6.5.1>`_ for details.\n  message SettingsParameter {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter\";\n\n    // The 16 bit parameter identifier.\n    google.protobuf.UInt32Value identifier = 1 [\n      (validate.rules).uint32 = {lte: 65535 gte: 0},\n      (validate.rules).message = {required: true}\n    ];\n\n    // The 32 bit parameter value.\n    google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}];\n  }\n\n  reserved 12;\n\n  reserved \"stream_error_on_invalid_http_messaging\";\n\n  // `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_\n  // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values\n  // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header\n  // compression.\n  google.protobuf.UInt32Value hpack_table_size = 1;\n\n  // `Maximum concurrent streams <https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2>`_\n  // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)\n  // and defaults to 2147483647.\n  //\n  // For upstream connections, this also limits how many streams Envoy will initiate concurrently\n  // on a single connection. If the limit is reached, Envoy may queue requests or establish\n  // additional connections (as allowed per circuit breaker limits).\n  google.protobuf.UInt32Value max_concurrent_streams = 2\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 1}];\n\n  // `Initial stream-level flow-control window\n  // <https://httpwg.org/specs/rfc7540.html#rfc.section.6.9.2>`_ size. Valid values range from 65535\n  // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456\n  // (256 * 1024 * 1024).\n  //\n  // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default\n  // window size now, so it's also the minimum.\n  //\n  // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the\n  // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to\n  // stop the flow of data to the codec buffers.\n  google.protobuf.UInt32Value initial_stream_window_size = 3\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Similar to *initial_stream_window_size*, but for connection-level flow-control\n  // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.\n  google.protobuf.UInt32Value initial_connection_window_size = 4\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Allows proxying Websocket and other upgrades over H2 connect.\n  bool allow_connect = 5;\n\n  // [#not-implemented-hide:] Hiding until envoy has full metadata support.\n  // Still under implementation. DO NOT USE.\n  //\n  // Allows metadata. See [metadata\n  // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more\n  // information.\n  bool allow_metadata = 6;\n\n  // Limit the number of pending outbound downstream frames of all types (frames that are waiting to\n  // be written into the socket). Exceeding this limit triggers flood mitigation and connection is\n  // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due\n  // to flood mitigation. The default limit is 10000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,\n  // preventing high memory utilization when receiving continuous stream of these frames. Exceeding\n  // this limit triggers flood mitigation and connection is terminated. The\n  // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood\n  // mitigation. The default limit is 1000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an\n  // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but\n  // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``\n  // stat tracks the number of connections terminated due to flood mitigation.\n  // Setting this to 0 will terminate connection upon receiving first frame with an empty payload\n  // and no end stream flag. The default limit is 1.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9;\n\n  // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number\n  // of PRIORITY frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     max_inbound_priority_frames_per_stream * (1 + inbound_streams)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 100.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10;\n\n  // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number\n  // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     1 + 2 * (inbound_streams +\n  //              max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 10.\n  // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,\n  // but more complex implementations that try to estimate available bandwidth require at least 2.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11\n      [(validate.rules).uint32 = {gte: 1}];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14;\n\n  // [#not-implemented-hide:]\n  // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:\n  //\n  // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by\n  // Envoy.\n  //\n  // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field\n  // 'allow_connect'.\n  //\n  // Note that custom parameters specified through this field can not also be set in the\n  // corresponding named parameters:\n  //\n  // .. code-block:: text\n  //\n  //   ID    Field Name\n  //   ----------------\n  //   0x1   hpack_table_size\n  //   0x3   max_concurrent_streams\n  //   0x4   initial_stream_window_size\n  //\n  // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies\n  // between custom parameters with the same identifier will trigger a failure.\n  //\n  // See `IANA HTTP/2 Settings\n  // <https://www.iana.org/assignments/http2-parameters/http2-parameters.xhtml#settings>`_ for\n  // standardized identifiers.\n  repeated SettingsParameter custom_settings_parameters = 13;\n\n  // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer\n  // does not respond within the configured timeout, the connection will be aborted.\n  KeepaliveSettings connection_keepalive = 15;\n}\n\n// [#not-implemented-hide:]\nmessage GrpcProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.GrpcProtocolOptions\";\n\n  Http2ProtocolOptions http2_protocol_options = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Proxy Protocol]\n\nmessage ProxyProtocolConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.ProxyProtocolConfig\";\n\n  enum Version {\n    // PROXY protocol version 1. Human readable format.\n    V1 = 0;\n\n    // PROXY protocol version 2. Binary format.\n    V2 = 1;\n  }\n\n  // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details\n  Version version = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/socket_option.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"SocketOptionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Socket Option ]\n\n// Generic socket option message. This would be used to set socket options that\n// might not exist in upstream kernels or precompiled Envoy binaries.\n// [#next-free-field: 7]\nmessage SocketOption {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.SocketOption\";\n\n  enum SocketState {\n    // Socket options are applied after socket creation but before binding the socket to a port\n    STATE_PREBIND = 0;\n\n    // Socket options are applied after binding the socket to a port but before calling listen()\n    STATE_BOUND = 1;\n\n    // Socket options are applied after calling listen()\n    STATE_LISTENING = 2;\n  }\n\n  // An optional name to give this socket option for debugging, etc.\n  // Uniqueness is not required and no special meaning is assumed.\n  string description = 1;\n\n  // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP\n  int64 level = 2;\n\n  // The numeric name as passed to setsockopt\n  int64 name = 3;\n\n  oneof value {\n    option (validate.required) = true;\n\n    // Because many sockopts take an int value.\n    int64 int_value = 4;\n\n    // Otherwise it's a byte buffer.\n    bytes buf_value = 5;\n  }\n\n  // The state in which the option will be applied. When used in BindConfig\n  // STATE_PREBIND is currently the only valid value.\n  SocketState state = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/core/v4alpha/substitution_format_string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"SubstitutionFormatStringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Substitution format string]\n\n// Configuration to use multiple :ref:`command operators <config_access_log_command_operators>`\n// to generate a new string in either plain text or JSON format.\nmessage SubstitutionFormatString {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.SubstitutionFormatString\";\n\n  oneof format {\n    option (validate.required) = true;\n\n    // Specify a format with command operators to form a text string.\n    // Its details is described in :ref:`format string<config_access_log_format_strings>`.\n    //\n    // For example, setting ``text_format`` like below,\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n    //\n    // generates plain text similar to:\n    //\n    // .. code-block:: text\n    //\n    //   upstream connect error:503:path=/foo\n    //\n    string text_format = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Specify a format with command operators to form a JSON string.\n    // Its details is described in :ref:`format dictionary<config_access_log_format_dictionaries>`.\n    // Values are rendered as strings, numbers, or boolean values as appropriate.\n    // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).\n    // See the documentation for a specific command operator for details.\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   json_format:\n    //     status: \"%RESPONSE_CODE%\"\n    //     message: \"%LOCAL_REPLY_BODY%\"\n    //\n    // The following JSON object would be created:\n    //\n    // .. code-block:: json\n    //\n    //  {\n    //    \"status\": 500,\n    //    \"message\": \"My error message\"\n    //  }\n    //\n    google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // If set to true, when command operators are evaluated to null,\n  //\n  // * for ``text_format``, the output of the empty operator is changed from ``-`` to an\n  //   empty string, so that empty values are omitted entirely.\n  // * for ``json_format`` the keys with null values are omitted in the output structure.\n  bool omit_empty_values = 3;\n\n  // Specify a *content_type* field.\n  // If this field is not set then ``text/plain`` is used for *text_format* and\n  // ``application/json`` is used for *json_format*.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   content_type: \"text/html; charset=UTF-8\"\n  //\n  string content_type = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/endpoint/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/endpoint/v3/endpoint.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.endpoint.v3;\n\nimport \"envoy/config/endpoint/v3/endpoint_components.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.endpoint.v3\";\noption java_outer_classname = \"EndpointProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Endpoint configuration]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\n// Each route from RDS will map to a single cluster or traffic split across\n// clusters using weights expressed in the RDS WeightedCluster.\n//\n// With EDS, each cluster is treated independently from a LB perspective, with\n// LB taking place between the Localities within a cluster and at a finer\n// granularity between the hosts within a locality. The percentage of traffic\n// for each endpoint is determined by both its load_balancing_weight, and the\n// load_balancing_weight of its locality. First, a locality will be selected,\n// then an endpoint within that locality will be chose based on its weight.\n// [#next-free-field: 6]\nmessage ClusterLoadAssignment {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.ClusterLoadAssignment\";\n\n  // Load balancing policy settings.\n  // [#next-free-field: 6]\n  message Policy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.ClusterLoadAssignment.Policy\";\n\n    // [#not-implemented-hide:]\n    message DropOverload {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload\";\n\n      // Identifier for the policy specifying the drop.\n      string category = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Percentage of traffic that should be dropped for the category.\n      type.v3.FractionalPercent drop_percentage = 2;\n    }\n\n    reserved 1, 5;\n\n    reserved \"disable_overprovisioning\";\n\n    // Action to trim the overall incoming traffic to protect the upstream\n    // hosts. This action allows protection in case the hosts are unable to\n    // recover from an outage, or unable to autoscale or unable to handle\n    // incoming traffic volume for any reason.\n    //\n    // At the client each category is applied one after the other to generate\n    // the 'actual' drop percentage on all outgoing traffic. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"drop_overloads\": [\n    //      { \"category\": \"throttle\", \"drop_percentage\": 60 }\n    //      { \"category\": \"lb\", \"drop_percentage\": 50 }\n    //  ]}\n    //\n    // The actual drop percentages applied to the traffic at the clients will be\n    //    \"throttle\"_drop = 60%\n    //    \"lb\"_drop = 20%  // 50% of the remaining 'actual' load, which is 40%.\n    //    actual_outgoing_load = 20% // remaining after applying all categories.\n    // [#not-implemented-hide:]\n    repeated DropOverload drop_overloads = 2;\n\n    // Priority levels and localities are considered overprovisioned with this\n    // factor (in percentage). This means that we don't consider a priority\n    // level or locality unhealthy until the fraction of healthy hosts\n    // multiplied by the overprovisioning factor drops below 100.\n    // With the default value 140(1.4), Envoy doesn't consider a priority level\n    // or a locality unhealthy until their percentage of healthy hosts drops\n    // below 72%. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"overprovisioning_factor\": 100 }\n    //\n    // Read more at :ref:`priority levels <arch_overview_load_balancing_priority_levels>` and\n    // :ref:`localities <arch_overview_load_balancing_locality_weighted_lb>`.\n    google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}];\n\n    // The max time until which the endpoints from this assignment can be used.\n    // If no new assignments are received before this time expires the endpoints\n    // are considered stale and should be marked unhealthy.\n    // Defaults to 0 which means endpoints never go stale.\n    google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Name of the cluster. This will be the :ref:`service_name\n  // <envoy_api_field_config.cluster.v3.Cluster.EdsClusterConfig.service_name>` value if specified\n  // in the cluster :ref:`EdsClusterConfig\n  // <envoy_api_msg_config.cluster.v3.Cluster.EdsClusterConfig>`.\n  string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // List of endpoints to load balance to.\n  repeated LocalityLbEndpoints endpoints = 2;\n\n  // Map of named endpoints that can be referenced in LocalityLbEndpoints.\n  // [#not-implemented-hide:]\n  map<string, Endpoint> named_endpoints = 5;\n\n  // Load balancing policy settings.\n  Policy policy = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/endpoint/v3/endpoint_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.endpoint.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.endpoint.v3\";\noption java_outer_classname = \"EndpointComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Endpoints]\n\n// Upstream host identifier.\nmessage Endpoint {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.endpoint.Endpoint\";\n\n  // The optional health check configuration.\n  message HealthCheckConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.endpoint.Endpoint.HealthCheckConfig\";\n\n    // Optional alternative health check port value.\n    //\n    // By default the health check address port of an upstream host is the same\n    // as the host's serving address port. This provides an alternative health\n    // check port. Setting this with a non-zero value allows an upstream host\n    // to have different health check address port.\n    uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}];\n\n    // By default, the host header for L7 health checks is controlled by cluster level configuration\n    // (see: :ref:`host <envoy_api_field_config.core.v3.HealthCheck.HttpHealthCheck.host>` and\n    // :ref:`authority <envoy_api_field_config.core.v3.HealthCheck.GrpcHealthCheck.authority>`). Setting this\n    // to a non-empty value allows overriding the cluster level configuration for a specific\n    // endpoint.\n    string hostname = 2;\n  }\n\n  // The upstream host address.\n  //\n  // .. attention::\n  //\n  //   The form of host address depends on the given cluster type. For STATIC or EDS,\n  //   it is expected to be a direct IP address (or something resolvable by the\n  //   specified :ref:`resolver <envoy_api_field_config.core.v3.SocketAddress.resolver_name>`\n  //   in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname,\n  //   and will be resolved via DNS.\n  core.v3.Address address = 1;\n\n  // The optional health check configuration is used as configuration for the\n  // health checker to contact the health checked host.\n  //\n  // .. attention::\n  //\n  //   This takes into effect only for upstream clusters with\n  //   :ref:`active health checking <arch_overview_health_checking>` enabled.\n  HealthCheckConfig health_check_config = 2;\n\n  // The hostname associated with this endpoint. This hostname is not used for routing or address\n  // resolution. If provided, it will be associated with the endpoint, and can be used for features\n  // that require a hostname, like\n  // :ref:`auto_host_rewrite <envoy_api_field_config.route.v3.RouteAction.auto_host_rewrite>`.\n  string hostname = 3;\n}\n\n// An Endpoint that Envoy can route traffic to.\n// [#next-free-field: 6]\nmessage LbEndpoint {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.endpoint.LbEndpoint\";\n\n  // Upstream host identifier or a named reference.\n  oneof host_identifier {\n    Endpoint endpoint = 1;\n\n    // [#not-implemented-hide:]\n    string endpoint_name = 5;\n  }\n\n  // Optional health status when known and supplied by EDS server.\n  core.v3.HealthStatus health_status = 2;\n\n  // The endpoint metadata specifies values that may be used by the load\n  // balancer to select endpoints in a cluster for a given request. The filter\n  // name should be specified as *envoy.lb*. An example boolean key-value pair\n  // is *canary*, providing the optional canary status of the upstream host.\n  // This may be matched against in a route's\n  // :ref:`RouteAction <envoy_api_msg_config.route.v3.RouteAction>` metadata_match field\n  // to subset the endpoints considered in cluster load balancing.\n  core.v3.Metadata metadata = 3;\n\n  // The optional load balancing weight of the upstream host; at least 1.\n  // Envoy uses the load balancing weight in some of the built in load\n  // balancers. The load balancing weight for an endpoint is divided by the sum\n  // of the weights of all endpoints in the endpoint's locality to produce a\n  // percentage of traffic for the endpoint. This percentage is then further\n  // weighted by the endpoint's locality's load balancing weight from\n  // LocalityLbEndpoints. If unspecified, each host is presumed to have equal\n  // weight in a locality. The sum of the weights of all endpoints in the\n  // endpoint's locality must not exceed uint32_t maximal value (4294967295).\n  google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}];\n}\n\n// A group of endpoints belonging to a Locality.\n// One can have multiple LocalityLbEndpoints for a locality, but this is\n// generally only done if the different groups need to have different load\n// balancing weights or different priorities.\n// [#next-free-field: 7]\nmessage LocalityLbEndpoints {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.LocalityLbEndpoints\";\n\n  // Identifies location of where the upstream hosts run.\n  core.v3.Locality locality = 1;\n\n  // The group of endpoints belonging to the locality specified.\n  repeated LbEndpoint lb_endpoints = 2;\n\n  // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load\n  // balancing weight for a locality is divided by the sum of the weights of all\n  // localities  at the same priority level to produce the effective percentage\n  // of traffic for the locality. The sum of the weights of all localities at\n  // the same priority level must not exceed uint32_t maximal value (4294967295).\n  //\n  // Locality weights are only considered when :ref:`locality weighted load\n  // balancing <arch_overview_load_balancing_locality_weighted_lb>` is\n  // configured. These weights are ignored otherwise. If no weights are\n  // specified when locality weighted load balancing is enabled, the locality is\n  // assigned no load.\n  google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional: the priority for this LocalityLbEndpoints. If unspecified this will\n  // default to the highest priority (0).\n  //\n  // Under usual circumstances, Envoy will only select endpoints for the highest\n  // priority (0). In the event all endpoints for a particular priority are\n  // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the\n  // next highest priority group.\n  //\n  // Priorities should range from 0 (highest) to N (lowest) without skipping.\n  uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}];\n\n  // Optional: Per locality proximity value which indicates how close this\n  // locality is from the source locality. This value only provides ordering\n  // information (lower the value, closer it is to the source locality).\n  // This will be consumed by load balancing schemes that need proximity order\n  // to determine where to route the requests.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value proximity = 6;\n}\n"
  },
  {
    "path": "api/envoy/config/endpoint/v3/load_report.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.endpoint.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.endpoint.v3\";\noption java_outer_classname = \"LoadReportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Load Report]\n\n// These are stats Envoy reports to the management server at a frequency defined by\n// :ref:`LoadStatsResponse.load_reporting_interval<envoy_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`.\n// Stats per upstream region/zone and optionally per subzone.\n// [#next-free-field: 9]\nmessage UpstreamLocalityStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.UpstreamLocalityStats\";\n\n  // Name of zone, region and optionally endpoint group these metrics were\n  // collected from. Zone and region names could be empty if unknown.\n  core.v3.Locality locality = 1;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint,\n  // aggregated over all endpoints in the locality.\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued by this Envoy since\n  // the last report. This information is aggregated over all the\n  // upstream endpoints in the locality.\n  uint64 total_issued_requests = 8;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n\n  // Endpoint granularity stats information for this locality. This information\n  // is populated if the Server requests it by setting\n  // :ref:`LoadStatsResponse.report_endpoint_granularity<envoy_api_field_service.load_stats.v3.LoadStatsResponse.report_endpoint_granularity>`.\n  repeated UpstreamEndpointStats upstream_endpoint_stats = 7;\n\n  // [#not-implemented-hide:] The priority of the endpoint group these metrics\n  // were collected from.\n  uint32 priority = 6;\n}\n\n// [#next-free-field: 8]\nmessage UpstreamEndpointStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.UpstreamEndpointStats\";\n\n  // Upstream host address.\n  core.v3.Address address = 1;\n\n  // Opaque and implementation dependent metadata of the\n  // endpoint. Envoy will pass this directly to the management server.\n  google.protobuf.Struct metadata = 6;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality. These include non-5xx responses for HTTP, where errors\n  // originate at the client and the endpoint responded successfully. For gRPC,\n  // the grpc-status values are those not covered by total_error_requests below.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests for this endpoint.\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint.\n  // For HTTP these are responses with 5xx status codes and for gRPC the\n  // grpc-status values:\n  //\n  //   - DeadlineExceeded\n  //   - Unimplemented\n  //   - Internal\n  //   - Unavailable\n  //   - Unknown\n  //   - DataLoss\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued to this endpoint\n  // since the last report. A single TCP connection, HTTP or gRPC\n  // request or stream is counted as one request.\n  uint64 total_issued_requests = 7;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n}\n\nmessage EndpointLoadMetricStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.EndpointLoadMetricStats\";\n\n  // Name of the metric; may be empty.\n  string metric_name = 1;\n\n  // Number of calls that finished and included this metric.\n  uint64 num_requests_finished_with_metric = 2;\n\n  // Sum of metric values across all calls that finished with this metric for\n  // load_reporting_interval.\n  double total_metric_value = 3;\n}\n\n// Per cluster load stats. Envoy reports these stats a management server in a\n// :ref:`LoadStatsRequest<envoy_api_msg_service.load_stats.v3.LoadStatsRequest>`\n// Next ID: 7\n// [#next-free-field: 7]\nmessage ClusterStats {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.endpoint.ClusterStats\";\n\n  message DroppedRequests {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.endpoint.ClusterStats.DroppedRequests\";\n\n    // Identifier for the policy specifying the drop.\n    string category = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Total number of deliberately dropped requests for the category.\n    uint64 dropped_count = 2;\n  }\n\n  // The name of the cluster.\n  string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The eds_cluster_config service_name of the cluster.\n  // It's possible that two clusters send the same service_name to EDS,\n  // in that case, the management server is supposed to do aggregation on the load reports.\n  string cluster_service_name = 6;\n\n  // Need at least one.\n  repeated UpstreamLocalityStats upstream_locality_stats = 2\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // Cluster-level stats such as total_successful_requests may be computed by\n  // summing upstream_locality_stats. In addition, below there are additional\n  // cluster-wide stats.\n  //\n  // The total number of dropped requests. This covers requests\n  // deliberately dropped by the drop_overload policy and circuit breaking.\n  uint64 total_dropped_requests = 3;\n\n  // Information about deliberately dropped requests for each category specified\n  // in the DropOverload policy.\n  repeated DroppedRequests dropped_requests = 5;\n\n  // Period over which the actual load report occurred. This will be guaranteed to include every\n  // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy\n  // and the *LoadStatsResponse* message sent from the management server, this may be longer than\n  // the requested load reporting interval in the *LoadStatsResponse*.\n  google.protobuf.Duration load_report_interval = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/README.md",
    "content": "Protocol buffer definitions for filters.\n\nVisibility of the definitions should be constrained to none except for\nshared definitions between explicitly enumerated filters (e.g. accesslog and fault definitions).\n"
  },
  {
    "path": "api/envoy/config/filter/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/accesslog/v2/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.accesslog.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.accesslog.v2\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.accesslog.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common access log types]\n\nmessage AccessLog {\n  // The name of the access log implementation to instantiate. The name must\n  // match a statically registered access log. Current built-in loggers include:\n  //\n  // #. \"envoy.access_loggers.file\"\n  // #. \"envoy.access_loggers.http_grpc\"\n  // #. \"envoy.access_loggers.tcp_grpc\"\n  string name = 1;\n\n  // Filter which is used to determine if the access log needs to be written.\n  AccessLogFilter filter = 2;\n\n  // Custom configuration that depends on the access log being instantiated. Built-in\n  // configurations include:\n  //\n  // #. \"envoy.access_loggers.file\": :ref:`FileAccessLog\n  //    <envoy_api_msg_config.accesslog.v2.FileAccessLog>`\n  // #. \"envoy.access_loggers.http_grpc\": :ref:`HttpGrpcAccessLogConfig\n  //    <envoy_api_msg_config.accesslog.v2.HttpGrpcAccessLogConfig>`\n  // #. \"envoy.access_loggers.tcp_grpc\": :ref:`TcpGrpcAccessLogConfig\n  //    <envoy_api_msg_config.accesslog.v2.TcpGrpcAccessLogConfig>`\n  oneof config_type {\n    google.protobuf.Struct config = 3 [deprecated = true];\n\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// [#next-free-field: 12]\nmessage AccessLogFilter {\n  oneof filter_specifier {\n    option (validate.required) = true;\n\n    // Status code filter.\n    StatusCodeFilter status_code_filter = 1;\n\n    // Duration filter.\n    DurationFilter duration_filter = 2;\n\n    // Not health check filter.\n    NotHealthCheckFilter not_health_check_filter = 3;\n\n    // Traceable filter.\n    TraceableFilter traceable_filter = 4;\n\n    // Runtime filter.\n    RuntimeFilter runtime_filter = 5;\n\n    // And filter.\n    AndFilter and_filter = 6;\n\n    // Or filter.\n    OrFilter or_filter = 7;\n\n    // Header filter.\n    HeaderFilter header_filter = 8;\n\n    // Response flag filter.\n    ResponseFlagFilter response_flag_filter = 9;\n\n    // gRPC status filter.\n    GrpcStatusFilter grpc_status_filter = 10;\n\n    // Extension filter.\n    ExtensionFilter extension_filter = 11;\n  }\n}\n\n// Filter on an integer comparison.\nmessage ComparisonFilter {\n  enum Op {\n    // =\n    EQ = 0;\n\n    // >=\n    GE = 1;\n\n    // <=\n    LE = 2;\n  }\n\n  // Comparison operator.\n  Op op = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Value to compare against.\n  api.v2.core.RuntimeUInt32 value = 2;\n}\n\n// Filters on HTTP response/status code.\nmessage StatusCodeFilter {\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters on total request duration in milliseconds.\nmessage DurationFilter {\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters for requests that are not health check requests. A health check\n// request is marked by the health check filter.\nmessage NotHealthCheckFilter {\n}\n\n// Filters for requests that are traceable. See the tracing overview for more\n// information on how a request becomes traceable.\nmessage TraceableFilter {\n}\n\n// Filters for random sampling of requests.\nmessage RuntimeFilter {\n  // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field.\n  // If found in runtime, this value will replace the default numerator.\n  string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The default sampling percentage. If not specified, defaults to 0% with denominator of 100.\n  type.FractionalPercent percent_sampled = 2;\n\n  // By default, sampling pivots on the header\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being present. If\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` is present, the filter will\n  // consistently sample across multiple hosts based on the runtime key value and the value\n  // extracted from :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`. If it is\n  // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based\n  // on the runtime key value alone. *use_independent_randomness* can be used for logging kill\n  // switches within complex nested :ref:`AndFilter\n  // <envoy_api_msg_config.filter.accesslog.v2.AndFilter>` and :ref:`OrFilter\n  // <envoy_api_msg_config.filter.accesslog.v2.OrFilter>` blocks that are easier to reason about\n  // from a probability perspective (i.e., setting to true will cause the filter to behave like\n  // an independent random variable when composed within logical operator filters).\n  bool use_independent_randomness = 3;\n}\n\n// Performs a logical “and” operation on the result of each filter in filters.\n// Filters are evaluated sequentially and if one of them returns false, the\n// filter returns false immediately.\nmessage AndFilter {\n  repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Performs a logical “or” operation on the result of each individual filter.\n// Filters are evaluated sequentially and if one of them returns true, the\n// filter returns true immediately.\nmessage OrFilter {\n  repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Filters requests based on the presence or value of a request header.\nmessage HeaderFilter {\n  // Only requests with a header which matches the specified HeaderMatcher will pass the filter\n  // check.\n  api.v2.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters requests that received responses with an Envoy response flag set.\n// A list of the response flags can be found\n// in the access log formatter :ref:`documentation<config_access_log_format_response_flags>`.\nmessage ResponseFlagFilter {\n  // Only responses with the any of the flags listed in this field will be logged.\n  // This field is optional. If it is not specified, then any response flag will pass\n  // the filter check.\n  repeated string flags = 1 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"LH\"\n        in: \"UH\"\n        in: \"UT\"\n        in: \"LR\"\n        in: \"UR\"\n        in: \"UF\"\n        in: \"UC\"\n        in: \"UO\"\n        in: \"NR\"\n        in: \"DI\"\n        in: \"FI\"\n        in: \"RL\"\n        in: \"UAEX\"\n        in: \"RLSE\"\n        in: \"DC\"\n        in: \"URX\"\n        in: \"SI\"\n        in: \"IH\"\n        in: \"DPE\"\n      }\n    }\n  }];\n}\n\n// Filters gRPC requests based on their response status. If a gRPC status is not provided, the\n// filter will infer the status from the HTTP status code.\nmessage GrpcStatusFilter {\n  enum Status {\n    OK = 0;\n    CANCELED = 1;\n    UNKNOWN = 2;\n    INVALID_ARGUMENT = 3;\n    DEADLINE_EXCEEDED = 4;\n    NOT_FOUND = 5;\n    ALREADY_EXISTS = 6;\n    PERMISSION_DENIED = 7;\n    RESOURCE_EXHAUSTED = 8;\n    FAILED_PRECONDITION = 9;\n    ABORTED = 10;\n    OUT_OF_RANGE = 11;\n    UNIMPLEMENTED = 12;\n    INTERNAL = 13;\n    UNAVAILABLE = 14;\n    DATA_LOSS = 15;\n    UNAUTHENTICATED = 16;\n  }\n\n  // Logs only responses that have any one of the gRPC statuses in this field.\n  repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n\n  // If included and set to true, the filter will instead block all responses with a gRPC status or\n  // inferred gRPC status enumerated in statuses, and allow all other responses.\n  bool exclude = 2;\n}\n\n// Extension filter is statically registered at runtime.\nmessage ExtensionFilter {\n  // The name of the filter implementation to instantiate. The name must\n  // match a statically registered filter.\n  string name = 1;\n\n  // Custom configuration that depends on the filter being instantiated.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/filter/dubbo/router/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/dubbo/router/v2alpha1/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.dubbo.router.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.dubbo_proxy.router.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Router]\n// Dubbo router :ref:`configuration overview <config_dubbo_filters_router>`.\n\nmessage Router {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/fault/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/fault/v2/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.fault.v2;\n\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.fault.v2\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.common.fault.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common fault injection types]\n\n// Delay specification is used to inject latency into the\n// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections.\n// [#next-free-field: 6]\nmessage FaultDelay {\n  enum FaultDelayType {\n    // Unused and deprecated.\n    FIXED = 0;\n  }\n\n  // Fault delays are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderDelay {\n  }\n\n  reserved 2;\n\n  // Unused and deprecated. Will be removed in the next release.\n  FaultDelayType type = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  oneof fault_delay_secifier {\n    option (validate.required) = true;\n\n    // Add a fixed delay before forwarding the operation upstream. See\n    // https://developers.google.com/protocol-buffers/docs/proto3#json for\n    // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified\n    // delay will be injected before a new request/operation. For TCP\n    // connections, the proxying of the connection upstream will be delayed\n    // for the specified period. This is required if type is FIXED.\n    google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}];\n\n    // Fault delays are controlled via an HTTP header (if applicable).\n    HeaderDelay header_delay = 5;\n  }\n\n  // The percentage of operations/connections/requests on which the delay will be injected.\n  type.FractionalPercent percentage = 4;\n}\n\n// Describes a rate limit to be applied.\nmessage FaultRateLimit {\n  // Describes a fixed/constant rate limit.\n  message FixedLimit {\n    // The limit supplied in KiB/s.\n    uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // Rate limits are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderLimit {\n  }\n\n  oneof limit_type {\n    option (validate.required) = true;\n\n    // A fixed rate limit.\n    FixedLimit fixed_limit = 1;\n\n    // Rate limits are controlled via an HTTP header (if applicable).\n    HeaderLimit header_limit = 3;\n  }\n\n  // The percentage of operations/connections/requests on which the rate limit will be injected.\n  type.FractionalPercent percentage = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.adaptive_concurrency.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha\";\noption java_outer_classname = \"AdaptiveConcurrencyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.adaptive_concurrency.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Adaptive Concurrency]\n// Adaptive Concurrency Control :ref:`configuration overview\n// <config_http_filters_adaptive_concurrency>`.\n// [#extension: envoy.filters.http.adaptive_concurrency]\n\n// Configuration parameters for the gradient controller.\nmessage GradientControllerConfig {\n  // Parameters controlling the periodic recalculation of the concurrency limit from sampled request\n  // latencies.\n  message ConcurrencyLimitCalculationParams {\n    // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000.\n    google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // The period of time samples are taken to recalculate the concurrency limit.\n    google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n  }\n\n  // Parameters controlling the periodic minRTT recalculation.\n  // [#next-free-field: 6]\n  message MinimumRTTCalculationParams {\n    // The time interval between recalculating the minimum request round-trip time.\n    google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // The number of requests to aggregate/sample during the minRTT recalculation window before\n    // updating. Defaults to 50.\n    google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // Randomized time delta that will be introduced to the start of the minRTT calculation window.\n    // This is represented as a percentage of the interval duration. Defaults to 15%.\n    //\n    // Example: If the interval is 10s and the jitter is 15%, the next window will begin\n    // somewhere in the range (10s - 11.5s).\n    type.Percent jitter = 3;\n\n    // The concurrency limit set while measuring the minRTT. Defaults to 3.\n    google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}];\n\n    // Amount added to the measured minRTT to add stability to the concurrency limit during natural\n    // variability in latency. This is expressed as a percentage of the measured value and can be\n    // adjusted to allow more or less tolerance to the sampled latency values.\n    //\n    // Defaults to 25%.\n    type.Percent buffer = 5;\n  }\n\n  // The percentile to use when summarizing aggregated samples. Defaults to p50.\n  type.Percent sample_aggregate_percentile = 1;\n\n  ConcurrencyLimitCalculationParams concurrency_limit_params = 2\n      [(validate.rules).message = {required: true}];\n\n  MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}];\n}\n\nmessage AdaptiveConcurrency {\n  oneof concurrency_controller_config {\n    option (validate.required) = true;\n\n    // Gradient concurrency control will be used.\n    GradientControllerConfig gradient_controller_config = 1\n        [(validate.rules).message = {required: true}];\n  }\n\n  // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the\n  // message is unspecified, the filter will be enabled.\n  api.v2.core.RuntimeFeatureFlag enabled = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/aws_lambda/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.aws_lambda.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.aws_lambda.v2alpha\";\noption java_outer_classname = \"AwsLambdaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.aws_lambda.v3\";\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: AWS Lambda]\n// AWS Lambda :ref:`configuration overview <config_http_filters_aws_lambda>`.\n// [#extension: envoy.filters.http.aws_lambda]\n\n// AWS Lambda filter config\nmessage Config {\n  enum InvocationMode {\n    // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In\n    // this mode the output of the Lambda function becomes the response of the HTTP request.\n    SYNCHRONOUS = 0;\n\n    // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be\n    // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the\n    // call which is translated to an HTTP 200 OK by the filter.\n    ASYNCHRONOUS = 1;\n  }\n\n  // The ARN of the AWS Lambda to invoke when the filter is engaged\n  // Must be in the following format:\n  // arn:<partition>:lambda:<region>:<account-number>:function:<function-name>\n  string arn = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether to transform the request (headers and body) to a JSON payload or pass it as is.\n  bool payload_passthrough = 2;\n\n  // Determines the way to invoke the Lambda function.\n  InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different\n// version of the same Lambda depending on the route.\nmessage PerRouteConfig {\n  Config invoke_config = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.aws_request_signing.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.aws_request_signing.v2alpha\";\noption java_outer_classname = \"AwsRequestSigningProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.aws_request_signing.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: AwsRequestSigning]\n// AwsRequestSigning :ref:`configuration overview <config_http_filters_aws_request_signing>`.\n// [#extension: envoy.filters.http.aws_request_signing]\n\n// Top level configuration for the AWS request signing filter.\nmessage AwsRequestSigning {\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the HTTP endpoint.\n  //\n  // Example: s3\n  string service_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the HTTP\n  // endpoint.\n  //\n  // Example: us-west-2\n  string region = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Indicates that before signing headers, the host header will be swapped with\n  // this value. If not set or empty, the original host header value\n  // will be used and no rewrite will happen.\n  //\n  // Note: this rewrite affects both signing and host header forwarding. However, this\n  // option shouldn't be used with\n  // :ref:`HCM host rewrite <envoy_api_field_route.RouteAction.host_rewrite>` given that the\n  // value set here would be used for signing whereas the value set in the HCM would be used\n  // for host header forwarding which is not the desired outcome.\n  string host_rewrite = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/buffer/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/buffer/v2/buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.buffer.v2;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.buffer.v2\";\noption java_outer_classname = \"BufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.buffer.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Buffer]\n// Buffer :ref:`configuration overview <config_http_filters_buffer>`.\n// [#extension: envoy.filters.http.buffer]\n\nmessage Buffer {\n  reserved 2;\n\n  // The maximum request size that the filter will buffer before the connection\n  // manager will stop buffering and return a 413 response.\n  google.protobuf.UInt32Value max_request_bytes = 1\n      [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}];\n}\n\nmessage BufferPerRoute {\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the buffer filter for this particular vhost or route.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Override the global configuration of the filter with this new config.\n    Buffer buffer = 2 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/cache/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/cache/v2alpha/cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.cache.v2alpha;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.cache.v2alpha\";\noption java_outer_classname = \"CacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.cache.v3alpha\";\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP Cache Filter]\n// [#extension: envoy.filters.http.cache]\n\nmessage CacheConfig {\n  // [#not-implemented-hide:]\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  message KeyCreatorParams {\n    // If true, exclude the URL scheme from the cache key. Set to true if your origins always\n    // produce the same response for http and https requests.\n    bool exclude_scheme = 1;\n\n    // If true, exclude the host from the cache key. Set to true if your origins' responses don't\n    // ever depend on host.\n    bool exclude_host = 2;\n\n    // If *query_parameters_included* is nonempty, only query parameters matched\n    // by one or more of its matchers are included in the cache key. Any other\n    // query params will not affect cache lookup.\n    repeated api.v2.route.QueryParameterMatcher query_parameters_included = 3;\n\n    // If *query_parameters_excluded* is nonempty, query parameters matched by one\n    // or more of its matchers are excluded from the cache key (even if also\n    // matched by *query_parameters_included*), and will not affect cache lookup.\n    repeated api.v2.route.QueryParameterMatcher query_parameters_excluded = 4;\n  }\n\n  // Config specific to the cache storage implementation.\n  google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];\n\n  // List of matching rules that defines allowed *Vary* headers.\n  //\n  // The *vary* response header holds a list of header names that affect the\n  // contents of a response, as described by\n  // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.\n  //\n  // During insertion, *allowed_vary_headers* acts as a allowlist: if a\n  // response's *vary* header mentions any header names that aren't matched by any rules in\n  // *allowed_vary_headers*, that response will not be cached.\n  //\n  // During lookup, *allowed_vary_headers* controls what request headers will be\n  // sent to the cache storage implementation.\n  repeated type.matcher.StringMatcher allowed_vary_headers = 2;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement key customization>\n  //\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  KeyCreatorParams key_creator_params = 3;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement size limit>\n  //\n  // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache\n  // storage implementation may have its own limit beyond which it will reject insertions).\n  uint32 max_body_bytes = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/compressor/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/compressor/v2/compressor.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.compressor.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.compressor.v2\";\noption java_outer_classname = \"CompressorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.compressor.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Compressor]\n\n// [#next-free-field: 6]\nmessage Compressor {\n  // Minimum response length, in bytes, which will trigger compression. The default value is 30.\n  google.protobuf.UInt32Value content_length = 1;\n\n  // Set of strings that allows specifying which mime-types yield compression; e.g.,\n  // application/json, text/html, etc. When this field is not defined, compression will be applied\n  // to the following mime-types: \"application/javascript\", \"application/json\",\n  // \"application/xhtml+xml\", \"image/svg+xml\", \"text/css\", \"text/html\", \"text/plain\", \"text/xml\"\n  // and their synonyms.\n  repeated string content_type = 2;\n\n  // If true, disables compression when the response contains an etag header. When it is false, the\n  // filter will preserve weak etags and remove the ones that require strong validation.\n  bool disable_on_etag_header = 3;\n\n  // If true, removes accept-encoding from the request headers before dispatching it to the upstream\n  // so that responses do not get compressed before reaching the filter.\n  // .. attention:\n  //\n  //    To avoid interfering with other compression filters in the same chain use this option in\n  //    the filter closest to the upstream.\n  bool remove_accept_encoding_header = 4;\n\n  // Runtime flag that controls whether the filter is enabled or not. If set to false, the\n  // filter will operate as a pass-through filter. If not specified, defaults to enabled.\n  api.v2.core.RuntimeFeatureFlag runtime_enabled = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/cors/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/cors/v2/cors.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.cors.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.cors.v2\";\noption java_outer_classname = \"CorsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.cors.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Cors]\n// CORS Filter :ref:`configuration overview <config_http_filters_cors>`.\n// [#extension: envoy.filters.http.cors]\n\n// Cors filter config.\nmessage Cors {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/csrf/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/csrf/v2/csrf.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.csrf.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.csrf.v2\";\noption java_outer_classname = \"CsrfProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.csrf.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: CSRF]\n// Cross-Site Request Forgery :ref:`configuration overview <config_http_filters_csrf>`.\n// [#extension: envoy.filters.http.csrf]\n\n// CSRF filter config.\nmessage CsrfPolicy {\n  // Specifies the % of requests for which the CSRF filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.FractionalPercent.DenominatorType>`.\n  api.v2.core.RuntimeFractionalPercent filter_enabled = 1\n      [(validate.rules).message = {required: true}];\n\n  // Specifies that CSRF policies will be evaluated and tracked, but not enforced.\n  //\n  // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* and *Destination* to determine if it's valid, but will not\n  // enforce any policies.\n  api.v2.core.RuntimeFractionalPercent shadow_enabled = 2;\n\n  // Specifies additional source origins that will be allowed in addition to\n  // the destination origin.\n  //\n  // More information on how this can be configured via runtime can be found\n  // :ref:`here <csrf-configuration>`.\n  repeated type.matcher.StringMatcher additional_origins = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.dynamic_forward_proxy.v2alpha;\n\nimport \"envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha\";\noption java_outer_classname = \"DynamicForwardProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.dynamic_forward_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamic forward proxy]\n\n// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.filters.http.dynamic_forward_proxy]\nmessage FilterConfig {\n  // The DNS cache configuration that the filter will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy cluster configuration\n  // <envoy_api_field_config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Per route Configuration for the dynamic forward proxy HTTP filter.\nmessage PerRouteConfig {\n  oneof host_rewrite_specifier {\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // this value. If not set or empty, the original host header value\n    // will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite <envoy_api_field_route.RouteAction.host_rewrite>` given that the\n    // value set here would be used for DNS lookups whereas the value set in the HCM would be used\n    // for host header forwarding which is not the desired outcome.\n    string host_rewrite = 1 [(udpa.annotations.field_migrate).rename = \"host_rewrite_literal\"];\n\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // the value of this header. If not set or empty, the original host header\n    // value will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite header <envoy_api_field_route.RouteAction.auto_host_rewrite_header>`\n    // given that the value set here would be used for DNS lookups whereas the value set in the HCM\n    // would be used for host header forwarding which is not the desired outcome.\n    string auto_host_rewrite_header = 2\n        [(udpa.annotations.field_migrate).rename = \"host_rewrite_header\"];\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/dynamo/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/dynamo/v2/dynamo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.dynamo.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.dynamo.v2\";\noption java_outer_classname = \"DynamoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.dynamo.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamo]\n// Dynamo :ref:`configuration overview <config_http_filters_dynamo>`.\n// [#extension: envoy.filters.http.dynamo]\n\n// Dynamo filter config.\nmessage Dynamo {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/ext_authz/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.ext_authz.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/grpc_service.proto\";\nimport \"envoy/api/v2/core/http_uri.proto\";\nimport \"envoy/type/http_status.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.ext_authz.v2\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.ext_authz.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: External Authorization]\n// External Authorization :ref:`configuration overview <config_http_filters_ext_authz>`.\n// [#extension: envoy.filters.http.ext_authz]\n\n// [#next-free-field: 12]\nmessage ExtAuthz {\n  // External authorization service configuration.\n  oneof services {\n    // gRPC service configuration (default timeout: 200ms).\n    api.v2.core.GrpcService grpc_service = 1;\n\n    // HTTP service configuration (default timeout: 200ms).\n    HttpService http_service = 3;\n  }\n\n  //  Changes filter's behaviour on errors:\n  //\n  //  1. When set to true, the filter will *accept* client request even if the communication with\n  //  the authorization service has failed, or if the authorization service has returned a HTTP 5xx\n  //  error.\n  //\n  //  2. When set to false, ext-authz will *reject* client requests and return a *Forbidden*\n  //  response if the communication with the authorization service has failed, or if the\n  //  authorization service has returned a HTTP 5xx error.\n  //\n  // Note that errors can be *always* tracked in the :ref:`stats\n  // <config_http_filters_ext_authz_stats>`.\n  bool failure_mode_allow = 2;\n\n  // Sets the package version the gRPC service should use. This is particularly\n  // useful when transitioning from alpha to release versions assuming that both definitions are\n  // semantically compatible. Deprecation note: This field is deprecated and should only be used for\n  // version upgrade. See release notes for more details.\n  bool use_alpha = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Enables filter to buffer the client request body and send it within the authorization request.\n  // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization\n  // request message indicating if the body data is partial.\n  BufferSettings with_request_body = 5;\n\n  // Clears route cache in order to allow the external authorization service to correctly affect\n  // routing decisions. Filter clears all cached routes when:\n  //\n  // 1. The field is set to *true*.\n  //\n  // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0.\n  //\n  // 3. At least one *authorization response header* is added to the client request, or is used for\n  // altering another client request header.\n  //\n  bool clear_route_cache = 6;\n\n  // Sets the HTTP status that is returned to the client when there is a network error between the\n  // filter and the authorization server. The default status is HTTP 403 Forbidden.\n  type.HttpStatus status_on_error = 7;\n\n  // Specifies a list of metadata namespaces whose values, if present, will be passed to the\n  // ext_authz service as an opaque *protobuf::Struct*.\n  //\n  // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata\n  // <envoy_api_field_config.filter.http.jwt_authn.v2alpha.JwtProvider.payload_in_metadata>` is set,\n  // then the following will pass the jwt payload to the authorization server.\n  //\n  // .. code-block:: yaml\n  //\n  //    metadata_context_namespaces:\n  //    - envoy.filters.http.jwt_authn\n  //\n  repeated string metadata_context_namespaces = 8;\n\n  // Specifies if the filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // If this field is not specified, the filter will be enabled for all requests.\n  api.v2.core.RuntimeFractionalPercent filter_enabled = 9;\n\n  // Specifies whether to deny the requests, when the filter is disabled.\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFeatureFlag.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to determine whether to deny request for\n  // filter protected path at filter disabling. If filter is disabled in\n  // typed_per_filter_config for the path, requests will not be denied.\n  //\n  // If this field is not specified, all requests will be allowed when disabled.\n  api.v2.core.RuntimeFeatureFlag deny_at_disable = 11;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v2.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 10;\n}\n\n// Configuration for buffering the request data.\nmessage BufferSettings {\n  // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return\n  // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number\n  // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow\n  // <envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.failure_mode_allow>`.\n  uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached.\n  // The authorization request will be dispatched and no 413 HTTP error will be returned by the\n  // filter.\n  bool allow_partial_message = 2;\n}\n\n// HttpService is used for raw HTTP communication between the filter and the authorization service.\n// When configured, the filter will parse the client request and use these attributes to call the\n// authorization server. Depending on the response, the filter may reject or accept the client\n// request. Note that in any of these events, metadata can be added, removed or overridden by the\n// filter:\n//\n// *On authorization request*, a list of allowed request headers may be supplied. See\n// :ref:`allowed_headers\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationRequest.allowed_headers>`\n// for details. Additional headers metadata may be added to the authorization request. See\n// :ref:`headers_to_add\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationRequest.headers_to_add>` for\n// details.\n//\n// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and\n// additional headers metadata may be added to the original client request. See\n// :ref:`allowed_upstream_headers\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_upstream_headers>`\n// for details.\n//\n// On other authorization response statuses, the filter will not allow traffic. Additional headers\n// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_client_headers>`\n// for details.\n// [#next-free-field: 9]\nmessage HttpService {\n  reserved 3, 4, 5, 6;\n\n  // Sets the HTTP server URI which the authorization requests must be sent to.\n  api.v2.core.HttpUri server_uri = 1;\n\n  // Sets a prefix to the value of authorization request header *Path*.\n  string path_prefix = 2;\n\n  // Settings used for controlling authorization request metadata.\n  AuthorizationRequest authorization_request = 7;\n\n  // Settings used for controlling authorization response metadata.\n  AuthorizationResponse authorization_response = 8;\n}\n\nmessage AuthorizationRequest {\n  // Authorization request will include the client request headers that have a correspondent match\n  // in the :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>`. Note that in addition to the\n  // user's supplied matchers:\n  //\n  // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list.\n  //\n  // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have\n  // a message body. However, the authorization request can include the buffered client request body\n  // (controlled by :ref:`with_request_body\n  // <envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.with_request_body>` setting),\n  // consequently the value of *Content-Length* of the authorization request reflects the size of\n  // its payload size.\n  //\n  type.matcher.ListStringMatcher allowed_headers = 1;\n\n  // Sets a list of headers that will be included to the request to authorization service. Note that\n  // client request of the same key will be overridden.\n  repeated api.v2.core.HeaderValue headers_to_add = 2;\n}\n\nmessage AuthorizationResponse {\n  // When this :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the original client request.\n  // Note that coexistent headers will be overridden.\n  type.matcher.ListStringMatcher allowed_upstream_headers = 1;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>`. is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that when this list is *not* set, all the authorization response headers, except *Authority\n  // (Host)* will be in the response to the client. When a header is included in this list, *Path*,\n  // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added.\n  type.matcher.ListStringMatcher allowed_client_headers = 2;\n}\n\n// Extra settings on a per virtualhost/route/weighted-cluster level.\nmessage ExtAuthzPerRoute {\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the ext auth filter for this particular vhost or route.\n    // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Check request settings for this route.\n    CheckSettings check_settings = 2 [(validate.rules).message = {required: true}];\n  }\n}\n\n// Extra settings for the check request. You can use this to provide extra context for the\n// external authorization server on specific virtual hosts \\ routes. For example, adding a context\n// extension on the virtual host level can give the ext-authz server information on what virtual\n// host is used without needing to parse the host header. If CheckSettings is specified in multiple\n// per-filter-configs, they will be merged in order, and the result will be used.\nmessage CheckSettings {\n  // Context extensions to set on the CheckRequest's\n  // :ref:`AttributeContext.context_extensions<envoy_api_field_service.auth.v2.AttributeContext.context_extensions>`\n  //\n  // Merge semantics for this field are such that keys from more specific configs override.\n  //\n  // .. note::\n  //\n  //   These settings are only applied to a filter configured with a\n  //   :ref:`grpc_service<envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.grpc_service>`.\n  map<string, string> context_extensions = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/fault/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/fault/v2/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.fault.v2;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/config/filter/fault/v2/fault.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.fault.v2\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.fault.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Fault Injection]\n// Fault Injection :ref:`configuration overview <config_http_filters_fault_injection>`.\n// [#extension: envoy.filters.http.fault]\n\nmessage FaultAbort {\n  // Fault aborts are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderAbort {\n  }\n\n  reserved 1;\n\n  oneof error_type {\n    option (validate.required) = true;\n\n    // HTTP status code to use to abort the HTTP request.\n    uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n    // Fault aborts are controlled via an HTTP header (if applicable).\n    HeaderAbort header_abort = 4;\n  }\n\n  // The percentage of requests/operations/connections that will be aborted with the error code\n  // provided.\n  type.FractionalPercent percentage = 3;\n}\n\n// [#next-free-field: 14]\nmessage HTTPFault {\n  // If specified, the filter will inject delays based on the values in the\n  // object.\n  filter.fault.v2.FaultDelay delay = 1;\n\n  // If specified, the filter will abort requests based on the values in\n  // the object. At least *abort* or *delay* must be specified.\n  FaultAbort abort = 2;\n\n  // Specifies the name of the (destination) upstream cluster that the\n  // filter should match on. Fault injection will be restricted to requests\n  // bound to the specific upstream cluster.\n  string upstream_cluster = 3;\n\n  // Specifies a set of headers that the filter should match on. The fault\n  // injection filter can be applied selectively to requests that match a set of\n  // headers specified in the fault filter config. The chances of actual fault\n  // injection further depend on the value of the :ref:`percentage\n  // <envoy_api_field_config.filter.http.fault.v2.FaultAbort.percentage>` field.\n  // The filter will check the request's headers against all the specified\n  // headers in the filter config. A match will happen if all the headers in the\n  // config are present in the request with the same values (or based on\n  // presence if the *value* field is not in the config).\n  repeated api.v2.route.HeaderMatcher headers = 4;\n\n  // Faults are injected for the specified list of downstream hosts. If this\n  // setting is not set, faults are injected for all downstream nodes.\n  // Downstream node name is taken from :ref:`the HTTP\n  // x-envoy-downstream-service-node\n  // <config_http_conn_man_headers_downstream-service-node>` header and compared\n  // against downstream_nodes list.\n  repeated string downstream_nodes = 5;\n\n  // The maximum number of faults that can be active at a single time via the configured fault\n  // filter. Note that because this setting can be overridden at the route level, it's possible\n  // for the number of active faults to be greater than this value (if injected via a different\n  // route). If not specified, defaults to unlimited. This setting can be overridden via\n  // `runtime <config_http_filters_fault_injection_runtime>` and any faults that are not injected\n  // due to overflow will be indicated via the `faults_overflow\n  // <config_http_filters_fault_injection_stats>` stat.\n  //\n  // .. attention::\n  //   Like other :ref:`circuit breakers <arch_overview_circuit_break>` in Envoy, this is a fuzzy\n  //   limit. It's possible for the number of active faults to rise slightly above the configured\n  //   amount due to the implementation details.\n  google.protobuf.UInt32Value max_active_faults = 6;\n\n  // The response rate limit to be applied to the response body of the stream. When configured,\n  // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent\n  // <config_http_filters_fault_injection_runtime>` runtime key.\n  //\n  // .. attention::\n  //  This is a per-stream limit versus a connection level limit. This means that concurrent streams\n  //  will each get an independent limit.\n  filter.fault.v2.FaultRateLimit response_rate_limit = 7;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_delay_percent\n  string delay_percent_runtime = 8;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.abort_percent\n  string abort_percent_runtime = 9;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_duration_ms\n  string delay_duration_runtime = 10;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.http_status\n  string abort_http_status_runtime = 11;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.max_active_faults\n  string max_active_faults_runtime = 12;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.rate_limit.response_percent\n  string response_rate_limit_percent_runtime = 13;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_http1_bridge.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_http1_bridge.v2\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_http1_bridge.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC HTTP/1.1 Bridge]\n// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview <config_http_filters_grpc_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_bridge]\n\n// gRPC HTTP/1.1 Bridge filter config.\nmessage Config {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge]\n// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview\n// <config_http_filters_grpc_http1_reverse_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_reverse_bridge]\n\n// gRPC reverse bridge filter configuration\nmessage FilterConfig {\n  // The content-type to pass to the upstream when the gRPC bridge filter is applied.\n  // The filter will also validate that the upstream responds with the same content type.\n  string content_type = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // If true, Envoy will assume that the upstream doesn't understand gRPC frames and\n  // strip the gRPC frame from the request, and add it back in to the response. This will\n  // hide the gRPC semantics from the upstream, allowing it to receive and respond with a\n  // simple binary encoded protobuf.\n  bool withhold_grpc_frames = 2;\n}\n\n// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level.\nmessage FilterConfigPerRoute {\n  // If true, disables gRPC reverse bridge filter for this particular vhost or route.\n  // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n  bool disabled = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_stats/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_stats.v2alpha;\n\nimport \"envoy/api/v2/core/grpc_method_list.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_stats.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC statistics] gRPC statistics filter\n// :ref:`configuration overview <config_http_filters_grpc_stats>`.\n// [#extension: envoy.filters.http.grpc_stats]\n\n// gRPC statistics filter configuration\nmessage FilterConfig {\n  // If true, the filter maintains a filter state object with the request and response message\n  // counts.\n  bool emit_filter_state = 1;\n\n  oneof per_method_stat_specifier {\n    // If set, specifies an allowlist of service/methods that will have individual stats\n    // emitted for them. Any call that does not match the allowlist will be counted\n    // in a stat with no method specifier: `cluster.<name>.grpc.*`.\n    api.v2.core.GrpcMethodList individual_method_stats_allowlist = 2;\n\n    // If set to true, emit stats for all service/method names.\n    //\n    // If set to false, emit stats for all service/message types to the same stats without including\n    // the service/method in the name, with prefix `cluster.<name>.grpc`. This can be useful if\n    // service/method granularity is not needed, or if each cluster only receives a single method.\n    //\n    // .. attention::\n    //   This option is only safe if all clients are trusted. If this option is enabled\n    //   with untrusted clients, the clients could cause unbounded growth in the number of stats in\n    //   Envoy, using unbounded memory and potentially slowing down stats pipelines.\n    //\n    // .. attention::\n    //   If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the\n    //   behavior will default to `stats_for_all_methods=true`. This default value is deprecated,\n    //   and in a future release, if neither field is set, it will default to\n    //   `stats_for_all_methods=false` in order to be safe by default. This behavior can be\n    //   controlled with runtime override\n    //   `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`.\n    google.protobuf.BoolValue stats_for_all_methods = 3;\n  }\n}\n\n// gRPC statistics filter state object in protobuf form.\nmessage FilterObject {\n  // Count of request messages in the request stream.\n  uint64 request_message_count = 1;\n\n  // Count of response messages in the response stream.\n  uint64 response_message_count = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_web/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/grpc_web/v2/grpc_web.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_web.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_web.v2\";\noption java_outer_classname = \"GrpcWebProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_web.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Web]\n// gRPC Web :ref:`configuration overview <config_http_filters_grpc_web>`.\n// [#extension: envoy.filters.http.grpc_web]\n\n// gRPC Web filter config.\nmessage GrpcWeb {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/gzip/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/compressor/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/gzip/v2/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.gzip.v2;\n\nimport \"envoy/config/filter/http/compressor/v2/compressor.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.gzip.v2\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.gzip.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Gzip]\n// Gzip :ref:`configuration overview <config_http_filters_gzip>`.\n// [#extension: envoy.filters.http.gzip]\n\n// [#next-free-field: 11]\nmessage Gzip {\n  enum CompressionStrategy {\n    DEFAULT = 0;\n    FILTERED = 1;\n    HUFFMAN = 2;\n    RLE = 3;\n  }\n\n  message CompressionLevel {\n    enum Enum {\n      DEFAULT = 0;\n      BEST = 1;\n      SPEED = 2;\n    }\n  }\n\n  // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values\n  // use more memory, but are faster and produce better compression results. The default value is 5.\n  google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}];\n\n  // Minimum response length, in bytes, which will trigger compression. The default value is 30.\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  google.protobuf.UInt32Value content_length = 2 [deprecated = true];\n\n  // A value used for selecting the zlib compression level. This setting will affect speed and\n  // amount of compression applied to the content. \"BEST\" provides higher compression at the cost of\n  // higher latency, \"SPEED\" provides lower compression with minimum impact on response time.\n  // \"DEFAULT\" provides an optimal result between speed and compression. This field will be set to\n  // \"DEFAULT\" if not specified.\n  CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // A value used for selecting the zlib compression strategy which is directly related to the\n  // characteristics of the content. Most of the time \"DEFAULT\" will be the best choice, though\n  // there are situations which changing this parameter might produce better results. For example,\n  // run-length encoding (RLE) is typically used when the content is known for having sequences\n  // which same data occurs many consecutive times. For more information about each strategy, please\n  // refer to zlib manual.\n  CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // Set of strings that allows specifying which mime-types yield compression; e.g.,\n  // application/json, text/html, etc. When this field is not defined, compression will be applied\n  // to the following mime-types: \"application/javascript\", \"application/json\",\n  // \"application/xhtml+xml\", \"image/svg+xml\", \"text/css\", \"text/html\", \"text/plain\", \"text/xml\".\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  repeated string content_type = 6 [deprecated = true];\n\n  // If true, disables compression when the response contains an etag header. When it is false, the\n  // filter will preserve weak etags and remove the ones that require strong validation.\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  bool disable_on_etag_header = 7 [deprecated = true];\n\n  // If true, removes accept-encoding from the request headers before dispatching it to the upstream\n  // so that responses do not get compressed before reaching the filter.\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  bool remove_accept_encoding_header = 8 [deprecated = true];\n\n  // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size.\n  // Larger window results in better compression at the expense of memory usage. The default is 12\n  // which will produce a 4096 bytes window. For more details about this parameter, please refer to\n  // zlib manual > deflateInit2.\n  google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Set of configuration parameters common for all compression filters. If this field is set then\n  // the fields `content_length`, `content_type`, `disable_on_etag_header` and\n  // `remove_accept_encoding_header` are ignored.\n  compressor.v2.Compressor compressor = 10;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/header_to_metadata/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.header_to_metadata.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2\";\noption java_outer_classname = \"HeaderToMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.header_to_metadata.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Header-To-Metadata Filter]\n//\n// The configuration for transforming headers into metadata. This is useful\n// for matching load balancer subsets, logging, etc.\n//\n// Header to Metadata :ref:`configuration overview <config_http_filters_header_to_metadata>`.\n// [#extension: envoy.filters.http.header_to_metadata]\n\nmessage Config {\n  enum ValueType {\n    STRING = 0;\n\n    NUMBER = 1;\n\n    // The value is a serialized `protobuf.Value\n    // <https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62>`_.\n    PROTOBUF_VALUE = 2;\n  }\n\n  // ValueEncode defines the encoding algorithm.\n  enum ValueEncode {\n    // The value is not encoded.\n    NONE = 0;\n\n    // The value is encoded in `Base64 <https://tools.ietf.org/html/rfc4648#section-4>`_.\n    // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the\n    // non-ASCII characters in the header.\n    BASE64 = 1;\n  }\n\n  // [#next-free-field: 6]\n  message KeyValuePair {\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_bytes: 1}];\n\n    // The value to pair with the given key.\n    //\n    // When used for a `on_header_present` case, if value is non-empty it'll be used\n    // instead of the header value. If both are empty, no metadata is added.\n    //\n    // When used for a `on_header_missing` case, a non-empty value must be provided\n    // otherwise no metadata is added.\n    string value = 3;\n\n    // The value's type — defaults to string.\n    ValueType type = 4;\n\n    // How is the value encoded, default is NONE (not encoded).\n    // The value will be decoded accordingly before storing to metadata.\n    ValueEncode encode = 5;\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  message Rule {\n    // The header that triggers this rule — required.\n    string header = 1\n        [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // If the header is present, apply this metadata KeyValuePair.\n    //\n    // If the value in the KeyValuePair is non-empty, it'll be used instead\n    // of the header value.\n    KeyValuePair on_header_present = 2;\n\n    // If the header is not present, apply this metadata KeyValuePair.\n    //\n    // The value in the KeyValuePair must be set, since it'll be used in lieu\n    // of the missing header value.\n    KeyValuePair on_header_missing = 3;\n\n    // Whether or not to remove the header after a rule is applied.\n    //\n    // This prevents headers from leaking.\n    bool remove = 4;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule request_rules = 1;\n\n  // The list of rules to apply to responses.\n  repeated Rule response_rules = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/health_check/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/health_check/v2/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.health_check.v2;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.health_check.v2\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.health_check.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health check]\n// Health check :ref:`configuration overview <config_http_filters_health_check>`.\n// [#extension: envoy.filters.http.health_check]\n\n// [#next-free-field: 6]\nmessage HealthCheck {\n  reserved 2;\n\n  // Specifies whether the filter operates in pass through mode or not.\n  google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}];\n\n  // If operating in pass through mode, the amount of time in milliseconds\n  // that the filter should cache the upstream response.\n  google.protobuf.Duration cache_time = 3;\n\n  // If operating in non-pass-through mode, specifies a set of upstream cluster\n  // names and the minimum percentage of servers in each of those clusters that\n  // must be healthy or degraded in order for the filter to return a 200.\n  //\n  // .. note::\n  //\n  //    This value is interpreted as an integer by truncating, so 12.50% will be calculated\n  //    as if it were 12%.\n  map<string, type.Percent> cluster_min_healthy_percentages = 4;\n\n  // Specifies a set of health check request headers to match on. The health check filter will\n  // check a request’s headers against all the specified headers. To specify the health check\n  // endpoint, set the ``:path`` header to match on.\n  repeated api.v2.route.HeaderMatcher headers = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/ip_tagging/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.ip_tagging.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.ip_tagging.v2\";\noption java_outer_classname = \"IpTaggingProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.ip_tagging.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: IP tagging]\n// IP tagging :ref:`configuration overview <config_http_filters_ip_tagging>`.\n// [#extension: envoy.filters.http.ip_tagging]\n\nmessage IPTagging {\n  // The type of requests the filter should apply to. The supported types\n  // are internal, external or both. The\n  // :ref:`x-forwarded-for<config_http_conn_man_headers_x-forwarded-for_internal_origin>` header is\n  // used to determine if a request is internal and will result in\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>`\n  // being set. The filter defaults to both, and it will apply to all request types.\n  enum RequestType {\n    // Both external and internal requests will be tagged. This is the default value.\n    BOTH = 0;\n\n    // Only internal requests will be tagged.\n    INTERNAL = 1;\n\n    // Only external requests will be tagged.\n    EXTERNAL = 2;\n  }\n\n  // Supplies the IP tag name and the IP address subnets.\n  message IPTag {\n    // Specifies the IP tag name to apply.\n    string ip_tag_name = 1;\n\n    // A list of IP address subnets that will be tagged with\n    // ip_tag_name. Both IPv4 and IPv6 are supported.\n    repeated api.v2.core.CidrRange ip_list = 2;\n  }\n\n  // The type of request the filter should apply to.\n  RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system.\n  // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695]\n  // The set of IP tags for the filter.\n  repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/jwt_authn/v2alpha/README.md",
    "content": "# JWT Authentication HTTP filter config\n\n## Overview\n\n1. The proto file in this folder defines an HTTP filter config for \"jwt_authn\" filter.\n\n2. This filter will verify the JWT in the HTTP request as:\n    - The signature should be valid\n    - JWT should not be expired\n    - Issuer and audiences are valid and specified in the filter config.\n\n3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter.\n\n3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message.\n\n## The locations to extract JWT\n\nJWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header:\n```\nAuthorization: Bearer <token>\n```\nThe next default location is in the query parameter as:\n```\n?access_token=<TOKEN>\n```\n\nIf a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT.\n\n## HTTP header to pass successfully verified JWT\n\nIf a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64url-encoded JWT payload in JSON.\n\n\n## Further header options\n\nIn addition to the `name` field, which specifies the HTTP header name,\nthe `from_headers` section can specify an optional `value_prefix` value, as in:\n\n```yaml\n    from_headers:\n      - name: bespoke\n        value_prefix: jwt_value\n```\n\nThe above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`.\n\nAny non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped,\nand all following, contiguous, JWT-legal chars will be taken as the JWT.\n\nThis means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`:\n\n```text\nbespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\n\nbespoke: {\"jwt_value\": \"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\"}\n\nbespoke: beta:true,jwt_value:\"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\",trace=1234\n```\n\nThe header `name` may be `Authorization`.\n\nThe `value_prefix` must match exactly, i.e., case-sensitively.\nIf the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token.\n\nIf there are no JWT-legal characters after the `value_prefix`, the entire string after it\nis taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser."
  },
  {
    "path": "api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.jwt_authn.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/http_uri.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.jwt_authn.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: JWT Authentication]\n// JWT Authentication :ref:`configuration overview <config_http_filters_jwt_authn>`.\n// [#extension: envoy.filters.http.jwt_authn]\n\n// Please see following for JWT authentication flow:\n//\n// * `JSON Web Token (JWT) <https://tools.ietf.org/html/rfc7519>`_\n// * `The OAuth 2.0 Authorization Framework <https://tools.ietf.org/html/rfc6749>`_\n// * `OpenID Connect <http://openid.net/connect>`_\n//\n// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies:\n//\n// * issuer: the principal that issues the JWT. It has to match the one from the token.\n// * allowed audiences: the ones in the token have to be listed here.\n// * how to fetch public key JWKS to verify the token signature.\n// * how to extract JWT token in the request.\n// * how to pass successfully verified token payload.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     issuer: https://example.com\n//     audiences:\n//     - bookstore_android.apps.googleusercontent.com\n//     - bookstore_web.apps.googleusercontent.com\n//     remote_jwks:\n//       http_uri:\n//         uri: https://example.com/.well-known/jwks.json\n//         cluster: example_jwks_cluster\n//       cache_duration:\n//         seconds: 300\n//\n// [#next-free-field: 10]\nmessage JwtProvider {\n  // Specify the `principal <https://tools.ietf.org/html/rfc7519#section-4.1.1>`_ that issued\n  // the JWT, usually a URL or an email address.\n  //\n  // Example: https://securetoken.google.com\n  // Example: 1234567-compute@developer.gserviceaccount.com\n  //\n  string issuer = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The list of JWT `audiences <https://tools.ietf.org/html/rfc7519#section-4.1.3>`_ are\n  // allowed to access. A JWT containing any of these audiences will be accepted. If not specified,\n  // will not check audiences in the token.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //     audiences:\n  //     - bookstore_android.apps.googleusercontent.com\n  //     - bookstore_web.apps.googleusercontent.com\n  //\n  repeated string audiences = 2;\n\n  // `JSON Web Key Set (JWKS) <https://tools.ietf.org/html/rfc7517#appendix-A>`_ is needed to\n  // validate signature of a JWT. This field specifies where to fetch JWKS.\n  oneof jwks_source_specifier {\n    option (validate.required) = true;\n\n    // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP\n    // URI and how the fetched JWKS should be cached.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    remote_jwks:\n    //      http_uri:\n    //        uri: https://www.googleapis.com/oauth2/v1/certs\n    //        cluster: jwt.www.googleapis.com|443\n    //      cache_duration:\n    //        seconds: 300\n    //\n    RemoteJwks remote_jwks = 3;\n\n    // JWKS is in local data source. It could be either in a local file or embedded in the\n    // inline_string.\n    //\n    // Example: local file\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      filename: /etc/envoy/jwks/jwks1.txt\n    //\n    // Example: inline_string\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      inline_string: ACADADADADA\n    //\n    api.v2.core.DataSource local_jwks = 4;\n  }\n\n  // If false, the JWT is removed in the request after a success verification. If true, the JWT is\n  // not removed in the request. Default value is false.\n  bool forward = 5;\n\n  // Two fields below define where to extract the JWT from an HTTP request.\n  //\n  // If no explicit location is specified, the following default locations are tried in order:\n  //\n  // 1. The Authorization header using the `Bearer schema\n  // <https://tools.ietf.org/html/rfc6750#section-2.1>`_. Example::\n  //\n  //    Authorization: Bearer <token>.\n  //\n  // 2. `access_token <https://tools.ietf.org/html/rfc6750#section-2.3>`_ query parameter.\n  //\n  // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations\n  // its provider specified or from the default locations.\n  //\n  // Specify the HTTP headers to extract JWT token. For examples, following config:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_headers:\n  //   - name: x-goog-iap-jwt-assertion\n  //\n  // can be used to extract token from header::\n  //\n  //   ``x-goog-iap-jwt-assertion: <JWT>``.\n  //\n  repeated JwtHeader from_headers = 6;\n\n  // JWT is sent in a query parameter. `jwt_params` represents the query parameter names.\n  //\n  // For example, if config is:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_params:\n  //   - jwt_token\n  //\n  // The JWT format in query parameter is::\n  //\n  //    /path?jwt_token=<JWT>\n  //\n  repeated string from_params = 7;\n\n  // This field specifies the header name to forward a successfully verified JWT payload to the\n  // backend. The forwarded data is::\n  //\n  //    base64url_encoded(jwt_payload_in_JSON)\n  //\n  // If it is not specified, the payload will not be forwarded.\n  string forward_payload_header = 8;\n\n  // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata\n  // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn**\n  // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields*\n  // and the value is the *protobuf::Struct* converted from JWT JSON payload.\n  //\n  // For example, if payload_in_metadata is *my_payload*:\n  //\n  // .. code-block:: yaml\n  //\n  //   envoy.filters.http.jwt_authn:\n  //     my_payload:\n  //       iss: https://example.com\n  //       sub: test@example.com\n  //       aud: https://example.com\n  //       exp: 1501281058\n  //\n  string payload_in_metadata = 9;\n}\n\n// This message specifies how to fetch JWKS from remote and how to cache it.\nmessage RemoteJwks {\n  // The HTTP URI to fetch the JWKS. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //    http_uri:\n  //      uri: https://www.googleapis.com/oauth2/v1/certs\n  //      cluster: jwt.www.googleapis.com|443\n  //\n  api.v2.core.HttpUri http_uri = 1;\n\n  // Duration after which the cached JWKS should be expired. If not specified, default cache\n  // duration is 5 minutes.\n  google.protobuf.Duration cache_duration = 2;\n}\n\n// This message specifies a header location to extract JWT token.\nmessage JwtHeader {\n  // The HTTP header name.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The value prefix. The value format is \"value_prefix<token>\"\n  // For example, for \"Authorization: Bearer <token>\", value_prefix=\"Bearer \" with a space at the\n  // end.\n  string value_prefix = 2;\n}\n\n// Specify a required provider with audiences.\nmessage ProviderWithAudiences {\n  // Specify a required provider name.\n  string provider_name = 1;\n\n  // This field overrides the one specified in the JwtProvider.\n  repeated string audiences = 2;\n}\n\n// This message specifies a Jwt requirement. An empty message means JWT verification is not\n// required. Here are some config examples:\n//\n// .. code-block:: yaml\n//\n//  # Example 1: not required with an empty message\n//\n//  # Example 2: require A\n//  provider_name: provider-A\n//\n//  # Example 3: require A or B\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 4: require A and B\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 5: require A and (B or C)\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_any:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 6: require A or (B and C)\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_all:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 7: A is optional (if token from A is provided, it must be valid, but also allows\n//  missing token.)\n//  requires_any:\n//    requirements:\n//    - provider_name: provider-A\n//    - allow_missing: {}\n//\n//  # Example 8: A is optional and B is required.\n//  requires_all:\n//    requirements:\n//    - requires_any:\n//        requirements:\n//        - provider_name: provider-A\n//        - allow_missing: {}\n//    - provider_name: provider-B\n//\n// [#next-free-field: 7]\nmessage JwtRequirement {\n  oneof requires_type {\n    // Specify a required provider name.\n    string provider_name = 1;\n\n    // Specify a required provider with audiences.\n    ProviderWithAudiences provider_and_audiences = 2;\n\n    // Specify list of JwtRequirement. Their results are OR-ed.\n    // If any one of them passes, the result is passed.\n    JwtRequirementOrList requires_any = 3;\n\n    // Specify list of JwtRequirement. Their results are AND-ed.\n    // All of them must pass, if one of them fails or missing, it fails.\n    JwtRequirementAndList requires_all = 4;\n\n    // The requirement is always satisfied even if JWT is missing or the JWT\n    // verification fails. A typical usage is: this filter is used to only verify\n    // JWTs and pass the verified JWT payloads to another filter, the other filter\n    // will make decision. In this mode, all JWT tokens will be verified.\n    google.protobuf.Empty allow_missing_or_failed = 5;\n\n    // The requirement is satisfied if JWT is missing, but failed if JWT is\n    // presented but invalid. Similar to allow_missing_or_failed, this is used\n    // to only verify JWTs and pass the verified payload to another filter. The\n    // different is this mode will reject requests with invalid tokens.\n    google.protobuf.Empty allow_missing = 6;\n  }\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are OR-ed; if any one of them passes, the result is passed\nmessage JwtRequirementOrList {\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails.\nmessage JwtRequirementAndList {\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a Jwt requirement for a specific Route condition.\n// Example 1:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /healthz\n//\n// In above example, \"requires\" field is empty for /healthz prefix match,\n// it means that requests matching the path prefix don't require JWT authentication.\n//\n// Example 2:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /\n//      requires: { provider_name: provider-A }\n//\n// In above example, all requests matched the path prefix require jwt authentication\n// from \"provider-A\".\nmessage RequirementRule {\n  // The route matching parameter. Only when the match is satisfied, the \"requires\" field will\n  // apply.\n  //\n  // For example: following match will match all requests.\n  //\n  // .. code-block:: yaml\n  //\n  //    match:\n  //      prefix: /\n  //\n  api.v2.route.RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Specify a Jwt Requirement. Please detail comment in message JwtRequirement.\n  JwtRequirement requires = 2;\n}\n\n// This message specifies Jwt requirements based on stream_info.filterState.\n// This FilterState should use `Router::StringAccessor` object to set a string value.\n// Other HTTP filters can use it to specify Jwt requirements dynamically.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//    name: jwt_selector\n//    requires:\n//      issuer_1:\n//        provider_name: issuer1\n//      issuer_2:\n//        provider_name: issuer2\n//\n// If a filter set \"jwt_selector\" with \"issuer_1\" to FilterState for a request,\n// jwt_authn filter will use JwtRequirement{\"provider_name\": \"issuer1\"} to verify.\nmessage FilterStateRule {\n  // The filter state name to retrieve the `Router::StringAccessor` object.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // A map of string keys to requirements. The string key is the string value\n  // in the FilterState with the name specified in the *name* field above.\n  map<string, JwtRequirement> requires = 3;\n}\n\n// This is the Envoy HTTP filter config for JWT authentication.\n//\n// For example:\n//\n// .. code-block:: yaml\n//\n//   providers:\n//      provider1:\n//        issuer: issuer1\n//        audiences:\n//        - audience1\n//        - audience2\n//        remote_jwks:\n//          http_uri:\n//            uri: https://example.com/.well-known/jwks.json\n//            cluster: example_jwks_cluster\n//      provider2:\n//        issuer: issuer2\n//        local_jwks:\n//          inline_string: jwks_string\n//\n//   rules:\n//      # Not jwt verification is required for /health path\n//      - match:\n//          prefix: /health\n//\n//      # Jwt verification for provider1 is required for path prefixed with \"prefix\"\n//      - match:\n//          prefix: /prefix\n//        requires:\n//          provider_name: provider1\n//\n//      # Jwt verification for either provider1 or provider2 is required for all other requests.\n//      - match:\n//          prefix: /\n//        requires:\n//          requires_any:\n//            requirements:\n//              - provider_name: provider1\n//              - provider_name: provider2\n//\nmessage JwtAuthentication {\n  // Map of provider names to JwtProviders.\n  //\n  // .. code-block:: yaml\n  //\n  //   providers:\n  //     provider1:\n  //        issuer: issuer1\n  //        audiences:\n  //        - audience1\n  //        - audience2\n  //        remote_jwks:\n  //          http_uri:\n  //            uri: https://example.com/.well-known/jwks.json\n  //            cluster: example_jwks_cluster\n  //      provider2:\n  //        issuer: provider2\n  //        local_jwks:\n  //          inline_string: jwks_string\n  //\n  map<string, JwtProvider> providers = 1;\n\n  // Specifies requirements based on the route matches. The first matched requirement will be\n  // applied. If there are overlapped match conditions, please put the most specific match first.\n  //\n  // Examples\n  //\n  // .. code-block:: yaml\n  //\n  //   rules:\n  //     - match:\n  //         prefix: /healthz\n  //     - match:\n  //         prefix: /baz\n  //       requires:\n  //         provider_name: provider1\n  //     - match:\n  //         prefix: /foo\n  //       requires:\n  //         requires_any:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //     - match:\n  //         prefix: /bar\n  //       requires:\n  //         requires_all:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //\n  repeated RequirementRule rules = 2;\n\n  // This message specifies Jwt requirements based on stream_info.filterState.\n  // Other HTTP filters can use it to specify Jwt requirements dynamically.\n  // The *rules* field above is checked first, if it could not find any matches,\n  // check this one.\n  FilterStateRule filter_state_rules = 3;\n\n  // When set to true, bypass the `CORS preflight request\n  // <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight>`_ regardless of JWT\n  // requirements specified in the rules.\n  bool bypass_cors_preflight = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/lua/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/lua/v2/lua.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.lua.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.lua.v2\";\noption java_outer_classname = \"LuaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.lua.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Lua]\n// Lua :ref:`configuration overview <config_http_filters_lua>`.\n// [#extension: envoy.filters.http.lua]\n\nmessage Lua {\n  // The Lua code that Envoy will execute. This can be a very small script that\n  // further loads code from disk if desired. Note that if JSON configuration is used, the code must\n  // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line\n  // strings so complex scripts can be easily expressed inline in the configuration.\n  string inline_code = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/on_demand/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/on_demand/v2/on_demand.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.on_demand.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.on_demand.v2\";\noption java_outer_classname = \"OnDemandProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.on_demand.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: OnDemand]\n// IP tagging :ref:`configuration overview <config_http_filters_on_demand>`.\n// [#extension: envoy.filters.http.on_demand]\n\nmessage OnDemand {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/original_src/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.original_src.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.original_src.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the request. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\n// [#extension: envoy.filters.http.original_src]\nmessage OriginalSrc {\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/rate_limit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.rate_limit.v2;\n\nimport \"envoy/config/ratelimit/v2/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.rate_limit.v2\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_http_filters_rate_limit>`.\n// [#extension: envoy.filters.http.ratelimit]\n\n// [#next-free-field: 8]\nmessage RateLimit {\n  // The rate limit domain to use when calling the rate limit service.\n  string domain = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Specifies the rate limit configurations to be applied with the same\n  // stage number. If not set, the default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The type of requests the filter should apply to. The supported\n  // types are *internal*, *external* or *both*. A request is considered internal if\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is set to true. If\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is not set or false, a\n  // request is considered external. The filter defaults to *both*, and it will apply to all request\n  // types.\n  string request_type = 3\n      [(validate.rules).string = {in: \"internal\" in: \"external\" in: \"both\" in: \"\"}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead\n  // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The\n  // HTTP code will be 200 for a gRPC response.\n  bool rate_limited_as_resource_exhausted = 6;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/rbac/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/rbac/v2/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.rbac.v2;\n\nimport \"envoy/config/rbac/v2/rbac.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.rbac.v2\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.rbac.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_http_filters_rbac>`.\n// [#extension: envoy.filters.http.rbac]\n\n// RBAC filter config.\nmessage RBAC {\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v2.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter (i.e., returning a 403)\n  // but will emit stats and logs and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v2.RBAC shadow_rules = 2;\n}\n\nmessage RBACPerRoute {\n  reserved 1;\n\n  // Override the global configuration of the filter with this new config.\n  // If absent, the global RBAC policy will be disabled for this route.\n  RBAC rbac = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/router/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/router/v2/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.router.v2;\n\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.router.v2\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.router.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Router]\n// Router :ref:`configuration overview <config_http_filters_router>`.\n// [#extension: envoy.filters.http.router]\n\n// [#next-free-field: 7]\nmessage Router {\n  // Whether the router generates dynamic cluster statistics. Defaults to\n  // true. Can be disabled in high performance scenarios.\n  google.protobuf.BoolValue dynamic_stats = 1;\n\n  // Whether to start a child span for egress routed calls. This can be\n  // useful in scenarios where other filters (auth, ratelimit, etc.) make\n  // outbound calls and have child spans rooted at the same ingress\n  // parent. Defaults to false.\n  bool start_child_span = 2;\n\n  // Configuration for HTTP upstream logs emitted by the router. Upstream logs\n  // are configured in the same way as access logs, but each log entry represents\n  // an upstream request. Presuming retries are configured, multiple upstream\n  // requests may be made for each downstream (inbound) request.\n  repeated accesslog.v2.AccessLog upstream_log = 3;\n\n  // Do not add any additional *x-envoy-* headers to requests or responses. This\n  // only affects the :ref:`router filter generated *x-envoy-* headers\n  // <config_http_filters_router_headers_set>`, other Envoy filters and the HTTP\n  // connection manager may continue to set *x-envoy-* headers.\n  bool suppress_envoy_headers = 4;\n\n  // Specifies a list of HTTP headers to strictly validate. Envoy will reject a\n  // request and respond with HTTP status 400 if the request contains an invalid\n  // value for any of the headers listed in this field. Strict header checking\n  // is only supported for the following headers:\n  //\n  // Value must be a ','-delimited list (i.e. no spaces) of supported retry\n  // policy values:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`\n  // * :ref:`config_http_filters_router_x-envoy-retry-on`\n  //\n  // Value must be an integer:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-max-retries`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`\n  repeated string strict_check_headers = 5 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"x-envoy-upstream-rq-timeout-ms\"\n        in: \"x-envoy-upstream-rq-per-try-timeout-ms\"\n        in: \"x-envoy-max-retries\"\n        in: \"x-envoy-retry-grpc-on\"\n        in: \"x-envoy-retry-on\"\n      }\n    }\n  }];\n\n  // If not set, ingress Envoy will ignore\n  // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress\n  // Envoy, when deriving timeout for upstream cluster.\n  bool respect_expected_rq_timeout = 6;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/squash/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/squash/v2/squash.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.squash.v2;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.squash.v2\";\noption java_outer_classname = \"SquashProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.squash.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Squash]\n// Squash :ref:`configuration overview <config_http_filters_squash>`.\n// [#extension: envoy.filters.http.squash]\n\n// [#next-free-field: 6]\nmessage Squash {\n  // The name of the cluster that hosts the Squash server.\n  string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // When the filter requests the Squash server to create a DebugAttachment, it will use this\n  // structure as template for the body of the request. It can contain reference to environment\n  // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server\n  // with more information to find the process to attach the debugger to. For example, in a\n  // Istio/k8s environment, this will contain information on the pod:\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"spec\": {\n  //      \"attachment\": {\n  //        \"pod\": \"{{ POD_NAME }}\",\n  //        \"namespace\": \"{{ POD_NAMESPACE }}\"\n  //      },\n  //      \"match_request\": true\n  //    }\n  //  }\n  //\n  // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API)\n  google.protobuf.Struct attachment_template = 2;\n\n  // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second.\n  google.protobuf.Duration request_timeout = 3;\n\n  // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60\n  // seconds.\n  google.protobuf.Duration attachment_timeout = 4;\n\n  // Amount of time to poll for the status of the attachment object in the Squash server\n  // (to check if has been attached). Defaults to 1 second.\n  google.protobuf.Duration attachment_poll_period = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/tap/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.tap.v2alpha;\n\nimport \"envoy/config/common/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.tap.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.tap.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap]\n// Tap :ref:`configuration overview <config_http_filters_tap>`.\n// [#extension: envoy.filters.http.tap]\n\n// Top level configuration for the tap filter.\nmessage Tap {\n  // Common configuration for the HTTP tap filter.\n  common.tap.v2alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/http/transcoder/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/http/transcoder/v2/transcoder.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.transcoder.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.transcoder.v2\";\noption java_outer_classname = \"TranscoderProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_json_transcoder.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC-JSON transcoder]\n// gRPC-JSON transcoder :ref:`configuration overview <config_http_filters_grpc_json_transcoder>`.\n// [#extension: envoy.filters.http.grpc_json_transcoder]\n\n// [#next-free-field: 10]\nmessage GrpcJsonTranscoder {\n  message PrintOptions {\n    // Whether to add spaces, line breaks and indentation to make the JSON\n    // output easy to read. Defaults to false.\n    bool add_whitespace = 1;\n\n    // Whether to always print primitive fields. By default primitive\n    // fields with default values will be omitted in JSON output. For\n    // example, an int32 field set to 0 will be omitted. Setting this flag to\n    // true will override the default behavior and print primitive fields\n    // regardless of their values. Defaults to false.\n    bool always_print_primitive_fields = 2;\n\n    // Whether to always print enums as ints. By default they are rendered\n    // as strings. Defaults to false.\n    bool always_print_enums_as_ints = 3;\n\n    // Whether to preserve proto field names. By default protobuf will\n    // generate JSON field names using the ``json_name`` option, or lower camel case,\n    // in that order. Setting this flag will preserve the original field names. Defaults to false.\n    bool preserve_proto_field_names = 4;\n  }\n\n  oneof descriptor_set {\n    option (validate.required) = true;\n\n    // Supplies the filename of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    string proto_descriptor = 1;\n\n    // Supplies the binary content of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    bytes proto_descriptor_bin = 4;\n  }\n\n  // A list of strings that\n  // supplies the fully qualified service names (i.e. \"package_name.service_name\") that\n  // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``,\n  // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than\n  // the service names specified here, but they won't be translated.\n  repeated string services = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // Control options for response JSON. These options are passed directly to\n  // `JsonPrintOptions <https://developers.google.com/protocol-buffers/docs/reference/cpp/\n  // google.protobuf.util.json_util#JsonPrintOptions>`_.\n  PrintOptions print_options = 3;\n\n  // Whether to keep the incoming request route after the outgoing headers have been transformed to\n  // the match the upstream gRPC service. Note: This means that routes for gRPC services that are\n  // not transcoded cannot be used in combination with *match_incoming_request_route*.\n  bool match_incoming_request_route = 5;\n\n  // A list of query parameters to be ignored for transcoding method mapping.\n  // By default, the transcoder filter will not transcode a request if there are any\n  // unknown/invalid query parameters.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {\n  //         option (google.api.http) = {\n  //           get: \"/shelves/{shelf}\"\n  //         };\n  //       }\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable\n  // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow\n  // the same request to be mapped to ``GetShelf``.\n  repeated string ignored_query_parameters = 6;\n\n  // Whether to route methods without the ``google.api.http`` option.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     package bookstore;\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {}\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The client could ``post`` a json body ``{\"shelf\": 1234}`` with the path of\n  // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``.\n  bool auto_mapping = 7;\n\n  // Whether to ignore query parameters that cannot be mapped to a corresponding\n  // protobuf field. Use this if you cannot control the query parameters and do\n  // not know them beforehand. Otherwise use ``ignored_query_parameters``.\n  // Defaults to false.\n  bool ignore_unknown_query_parameters = 8;\n\n  // Whether to convert gRPC status headers to JSON.\n  // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status``\n  // from the ``grpc-status-details-bin`` header and use it as JSON body.\n  // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and\n  // ``grpc-message`` headers.\n  // The error details types must be present in the ``proto_descriptor``.\n  //\n  // For example, if an upstream server replies with headers:\n  //\n  // .. code-block:: none\n  //\n  //     grpc-status: 5\n  //     grpc-status-details-bin:\n  //         CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ\n  //\n  // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message\n  // ``google.rpc.Status``. It will be transcoded into:\n  //\n  // .. code-block:: none\n  //\n  //     HTTP/1.1 404 Not Found\n  //     content-type: application/json\n  //\n  //     {\"code\":5,\"details\":[{\"@type\":\"type.googleapis.com/google.rpc.RequestInfo\",\"requestId\":\"r-1\"}]}\n  //\n  //  In order to transcode the message, the ``google.rpc.RequestInfo`` type from\n  //  the ``google/rpc/error_details.proto`` should be included in the configured\n  //  :ref:`proto descriptor set <config_grpc_json_generate_proto_descriptor_set>`.\n  bool convert_grpc_status = 9;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/listener/http_inspector/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.http_inspector.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.http_inspector.v2\";\noption java_outer_classname = \"HttpInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.http_inspector.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP Inspector Filter]\n// Detect whether the application protocol is HTTP.\n// [#extension: envoy.filters.listener.http_inspector]\n\nmessage HttpInspector {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/listener/original_dst/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/listener/original_dst/v2/original_dst.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.original_dst.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.original_dst.v2\";\noption java_outer_classname = \"OriginalDstProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.original_dst.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Original Dst Filter]\n// Use the Original destination address on downstream connections.\n// [#extension: envoy.filters.listener.original_dst]\n\nmessage OriginalDst {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/listener/original_src/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.original_src.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.original_src.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n// [#extension: envoy.filters.listener.original_src]\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the connection. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\nmessage OriginalSrc {\n  // Whether to bind the port to the one used in the original downstream connection.\n  // [#not-implemented-hide:]\n  bool bind_port = 1;\n\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/listener/proxy_protocol/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.proxy_protocol.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.proxy_protocol.v2\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.proxy_protocol.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Proxy Protocol Filter]\n// PROXY protocol listener filter.\n// [#extension: envoy.filters.listener.proxy_protocol]\n\nmessage ProxyProtocol {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/listener/tls_inspector/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.tls_inspector.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.tls_inspector.v2\";\noption java_outer_classname = \"TlsInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.tls_inspector.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: TLS Inspector Filter]\n// Allows detecting whether the transport appears to be TLS or plaintext.\n// [#extension: envoy.filters.listener.tls_inspector]\n\nmessage TlsInspector {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/client_ssl_auth/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.client_ssl_auth.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2\";\noption java_outer_classname = \"ClientSslAuthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.client_ssl_auth.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Client TLS authentication]\n// Client TLS authentication\n// :ref:`configuration overview <config_network_filters_client_ssl_auth>`.\n// [#extension: envoy.filters.network.client_ssl_auth]\n\nmessage ClientSSLAuth {\n  // The :ref:`cluster manager <arch_overview_cluster_manager>` cluster that runs\n  // the authentication service. The filter will connect to the service every 60s to fetch the list\n  // of principals. The service must support the expected :ref:`REST API\n  // <config_network_filters_client_ssl_auth_rest_api>`.\n  string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_client_ssl_auth_stats>`.\n  string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Time in milliseconds between principal refreshes from the\n  // authentication service. Default is 60000 (60s). The actual fetch time\n  // will be this value plus a random jittered value between\n  // 0-refresh_delay_ms milliseconds.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // An optional list of IP address and subnet masks that should be white\n  // listed for access by the filter. If no list is provided, there is no\n  // IP allowlist.\n  repeated api.v2.core.CidrRange ip_white_list = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/direct_response/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/direct_response/v2/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.direct_response.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.direct_response.v2\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.direct_response.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Direct response]\n// Direct response :ref:`configuration overview <config_network_filters_direct_response>`.\n// [#extension: envoy.filters.network.direct_response]\n\nmessage Config {\n  // Response data as a data source.\n  api.v2.core.DataSource response = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md",
    "content": "Protocol buffer definitions for the Dubbo proxy.\n"
  },
  {
    "path": "api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.dubbo_proxy.v2alpha1;\n\nimport \"envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1\";\noption java_outer_classname = \"DubboProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.dubbo_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dubbo Proxy]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n// [#extension: envoy.filters.network.dubbo_proxy]\n\n// Dubbo Protocol types supported by Envoy.\nenum ProtocolType {\n  // the default protocol.\n  Dubbo = 0;\n}\n\n// Dubbo Serialization types supported by Envoy.\nenum SerializationType {\n  // the default serialization protocol.\n  Hessian2 = 0;\n}\n\n// [#next-free-field: 6]\nmessage DubboProxy {\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Configure the protocol used.\n  ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Configure the serialization protocol used.\n  SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  repeated RouteConfiguration route_config = 4;\n\n  // A list of individual Dubbo filters that make up the filter chain for requests made to the\n  // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no dubbo_filters are specified, a default Dubbo router filter\n  // (`envoy.filters.dubbo.router`) is used.\n  repeated DubboFilter dubbo_filters = 5;\n}\n\n// DubboFilter configures a Dubbo filter.\nmessage DubboFilter {\n  // The name of the filter to instantiate. The name must match a supported\n  // filter.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any config = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.dubbo_proxy.v2alpha1;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/range.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.dubbo_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dubbo Proxy Route Configuration]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n\n// [#next-free-field: 6]\nmessage RouteConfiguration {\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The interface name of the service.\n  string interface = 2;\n\n  // Which group does the interface belong to.\n  string group = 3;\n\n  // The version number of the interface.\n  string version = 4;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 5;\n}\n\nmessage Route {\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  // Method level routing matching.\n  MethodMatch method = 1;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated api.v2.route.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed.\n    string cluster = 1;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    // Currently ClusterWeight only supports the name and weight fields.\n    api.v2.route.WeightedCluster weighted_clusters = 2;\n  }\n}\n\nmessage MethodMatch {\n  // The parameter matching type.\n  message ParameterMatchSpecifier {\n    oneof parameter_match_specifier {\n      // If specified, header match will be performed based on the value of the header.\n      string exact_match = 3;\n\n      // If specified, header match will be performed based on range.\n      // The rule will match if the request header value is within this range.\n      // The entire request header value must represent an integer in base 10 notation: consisting\n      // of an optional plus or minus sign followed by a sequence of digits. The rule will not match\n      // if the header value does not represent an integer. Match will fail for empty values,\n      // floating point numbers or if only a subsequence of the header value is an integer.\n      //\n      // Examples:\n      //\n      // * For range [-10,0), route will match for header value -1, but not for 0,\n      //   \"somestring\", 10.9, \"-1somestring\"\n      type.Int64Range range_match = 4;\n    }\n  }\n\n  // The name of the method.\n  type.matcher.StringMatcher name = 1;\n\n  // Method parameter definition.\n  // The key is the parameter index, starting from 0.\n  // The value is the parameter matching type.\n  map<uint32, ParameterMatchSpecifier> params_match = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/echo/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/echo/v2/echo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.echo.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.echo.v2\";\noption java_outer_classname = \"EchoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.network.echo.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Echo]\n// Echo :ref:`configuration overview <config_network_filters_echo>`.\n// [#extension: envoy.filters.network.echo]\n\nmessage Echo {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/ext_authz/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.ext_authz.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.ext_authz.v2\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.ext_authz.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Network External Authorization ]\n// The network layer external authorization service configuration\n// :ref:`configuration overview <config_network_filters_ext_authz>`.\n// [#extension: envoy.filters.network.ext_authz]\n\n// External Authorization filter calls out to an external service over the\n// gRPC Authorization API defined by\n// :ref:`CheckRequest <envoy_api_msg_service.auth.v2.CheckRequest>`.\n// A failed check will cause this filter to close the TCP connection.\nmessage ExtAuthz {\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The external authorization gRPC service configuration.\n  // The default timeout is set to 200ms by this filter.\n  api.v2.core.GrpcService grpc_service = 2;\n\n  // The filter's behaviour in case the external authorization service does\n  // not respond back. When it is set to true, Envoy will also allow traffic in case of\n  // communication failure between authorization service and the proxy.\n  // Defaults to false.\n  bool failure_mode_allow = 3;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v2.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/http_connection_manager/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.http_connection_manager.v2;\n\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/core/protocol.proto\";\nimport \"envoy/api/v2/route.proto\";\nimport \"envoy/api/v2/scoped_route.proto\";\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\nimport \"envoy/config/trace/v2/http_tracer.proto\";\nimport \"envoy/type/percent.proto\";\nimport \"envoy/type/tracing/v2/custom_tag.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2\";\noption java_outer_classname = \"HttpConnectionManagerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.http_connection_manager.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP connection manager]\n// HTTP connection manager :ref:`configuration overview <config_http_conn_man>`.\n// [#extension: envoy.filters.network.http_connection_manager]\n\n// [#next-free-field: 37]\nmessage HttpConnectionManager {\n  enum CodecType {\n    // For every new connection, the connection manager will determine which\n    // codec to use. This mode supports both ALPN for TLS listeners as well as\n    // protocol inference for plaintext listeners. If ALPN data is available, it\n    // is preferred, otherwise protocol inference is used. In almost all cases,\n    // this is the right option to choose for this setting.\n    AUTO = 0;\n\n    // The connection manager will assume that the client is speaking HTTP/1.1.\n    HTTP1 = 1;\n\n    // The connection manager will assume that the client is speaking HTTP/2\n    // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN.\n    // Prior knowledge is allowed).\n    HTTP2 = 2;\n\n    // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n    // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n    // to distinguish HTTP1 and HTTP2 traffic.\n    HTTP3 = 3;\n  }\n\n  enum ServerHeaderTransformation {\n    // Overwrite any Server header with the contents of server_name.\n    OVERWRITE = 0;\n\n    // If no Server header is present, append Server server_name\n    // If a Server header is present, pass it through.\n    APPEND_IF_ABSENT = 1;\n\n    // Pass through the value of the server header, and do not append a header\n    // if none is present.\n    PASS_THROUGH = 2;\n  }\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  enum ForwardClientCertDetails {\n    // Do not send the XFCC header to the next hop. This is the default value.\n    SANITIZE = 0;\n\n    // When the client connection is mTLS (Mutual TLS), forward the XFCC header\n    // in the request.\n    FORWARD_ONLY = 1;\n\n    // When the client connection is mTLS, append the client certificate\n    // information to the request’s XFCC header and forward it.\n    APPEND_FORWARD = 2;\n\n    // When the client connection is mTLS, reset the XFCC header with the client\n    // certificate information and send it to the next hop.\n    SANITIZE_SET = 3;\n\n    // Always forward the XFCC header in the request, regardless of whether the\n    // client connection is mTLS.\n    ALWAYS_FORWARD_ONLY = 4;\n  }\n\n  // [#next-free-field: 10]\n  message Tracing {\n    enum OperationName {\n      // The HTTP listener is used for ingress/incoming requests.\n      INGRESS = 0;\n\n      // The HTTP listener is used for egress/outgoing requests.\n      EGRESS = 1;\n    }\n\n    // The span name will be derived from this field. If\n    // :ref:`traffic_direction <envoy_api_field_Listener.traffic_direction>` is\n    // specified on the parent listener, then it is used instead of this field.\n    //\n    // .. attention::\n    //  This field has been deprecated in favor of `traffic_direction`.\n    OperationName operation_name = 1 [\n      deprecated = true,\n      (validate.rules).enum = {defined_only: true},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // A list of header names used to create tags for the active span. The header name is used to\n    // populate the tag name, and the header value is used to populate the tag value. The tag is\n    // created if the specified header name is present in the request's headers.\n    //\n    // .. attention::\n    //  This field has been deprecated in favor of :ref:`custom_tags\n    //  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>`.\n    repeated string request_headers_for_tags = 2 [deprecated = true];\n\n    // Target percentage of requests managed by this HTTP connection manager that will be force\n    // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n    // header is set. This field is a direct analog for the runtime variable\n    // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n    // <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.Percent client_sampling = 3;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be randomly\n    // selected for trace generation, if not requested by the client or not forced. This field is\n    // a direct analog for the runtime variable 'tracing.random_sampling' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.Percent random_sampling = 4;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be traced\n    // after all other sampling checks have been applied (client-directed, force tracing, random\n    // sampling). This field functions as an upper limit on the total configured sampling rate. For\n    // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n    // of client requests with the appropriate headers to be force traced. This field is a direct\n    // analog for the runtime variable 'tracing.global_enabled' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.Percent overall_sampling = 5;\n\n    // Whether to annotate spans with additional data. If true, spans will include logs for stream\n    // events.\n    bool verbose = 6;\n\n    // Maximum length of the request path to extract and include in the HttpUrl tag. Used to\n    // truncate lengthy request paths to meet the needs of a tracing backend.\n    // Default: 256\n    google.protobuf.UInt32Value max_path_tag_length = 7;\n\n    // A list of custom tags with unique tag name to create tags for the active span.\n    repeated type.tracing.v2.CustomTag custom_tags = 8;\n\n    // Configuration for an external tracing provider.\n    // If not specified, no tracing will be performed.\n    //\n    // .. attention::\n    //   Please be aware that *envoy.tracers.opencensus* provider can only be configured once\n    //   in Envoy lifetime.\n    //   Any attempts to reconfigure it or to use different configurations for different HCM filters\n    //   will be rejected.\n    //   Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes\n    //   on OpenCensus side.\n    trace.v2.Tracing.Http provider = 9;\n  }\n\n  message InternalAddressConfig {\n    // Whether unix socket addresses should be considered internal.\n    bool unix_sockets = 1;\n  }\n\n  // [#next-free-field: 7]\n  message SetCurrentClientCertDetails {\n    reserved 2;\n\n    // Whether to forward the subject of the client cert. Defaults to false.\n    google.protobuf.BoolValue subject = 1;\n\n    // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the\n    // XFCC header comma separated from other values with the value Cert=\"PEM\".\n    // Defaults to false.\n    bool cert = 3;\n\n    // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM\n    // format. This will appear in the XFCC header comma separated from other values with the value\n    // Chain=\"PEM\".\n    // Defaults to false.\n    bool chain = 6;\n\n    // Whether to forward the DNS type Subject Alternative Names of the client cert.\n    // Defaults to false.\n    bool dns = 4;\n\n    // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to\n    // false.\n    bool uri = 5;\n  }\n\n  // The configuration for HTTP upgrades.\n  // For each upgrade type desired, an UpgradeConfig must be added.\n  //\n  // .. warning::\n  //\n  //    The current implementation of upgrade headers does not handle\n  //    multi-valued upgrade headers. Support for multi-valued headers may be\n  //    added in the future if needed.\n  //\n  // .. warning::\n  //    The current implementation of upgrade headers does not work with HTTP/2\n  //    upstreams.\n  message UpgradeConfig {\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type]\n    // will be proxied upstream.\n    string upgrade_type = 1;\n\n    // If present, this represents the filter chain which will be created for\n    // this type of upgrade. If no filters are present, the filter chain for\n    // HTTP connections will be used for this upgrade type.\n    repeated HttpFilter filters = 2;\n\n    // Determines if upgrades are enabled or disabled by default. Defaults to true.\n    // This can be overridden on a per-route basis with :ref:`cluster\n    // <envoy_api_field_route.RouteAction.upgrade_configs>` as documented in the\n    // :ref:`upgrade documentation <arch_overview_upgrades>`.\n    google.protobuf.BoolValue enabled = 3;\n  }\n\n  reserved 27;\n\n  // Supplies the type of codec that the connection manager should use.\n  CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics for the\n  // connection manager. See the :ref:`statistics documentation <config_http_conn_man_stats>` for\n  // more information.\n  string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The connection manager’s route table will be dynamically loaded via the RDS API.\n    Rds rds = 3;\n\n    // The route table for the connection manager is static and is specified in this property.\n    api.v2.RouteConfiguration route_config = 4;\n\n    // A route table will be dynamically assigned to each request based on request attributes\n    // (e.g., the value of a header). The \"routing scopes\" (i.e., route tables) and \"scope keys\" are\n    // specified in this message.\n    ScopedRoutes scoped_routes = 31;\n  }\n\n  // A list of individual HTTP filters that make up the filter chain for\n  // requests made to the connection manager. :ref:`Order matters <arch_overview_http_filters_ordering>`\n  // as the filters are processed sequentially as request events happen.\n  repeated HttpFilter http_filters = 5;\n\n  // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent`\n  // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked\n  // documentation for more information. Defaults to false.\n  google.protobuf.BoolValue add_user_agent = 6;\n\n  // Presence of the object defines whether the connection manager\n  // emits :ref:`tracing <arch_overview_tracing>` data to the :ref:`configured tracing provider\n  // <envoy_api_msg_config.trace.v2.Tracing>`.\n  Tracing tracing = 7;\n\n  // Additional settings for HTTP requests handled by the connection manager. These will be\n  // applicable to both HTTP1 and HTTP2 requests.\n  api.v2.core.HttpProtocolOptions common_http_protocol_options = 35;\n\n  // Additional HTTP/1 settings that are passed to the HTTP/1 codec.\n  api.v2.core.Http1ProtocolOptions http_protocol_options = 8;\n\n  // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec.\n  api.v2.core.Http2ProtocolOptions http2_protocol_options = 9;\n\n  // An optional override that the connection manager will write to the server\n  // header in responses. If not set, the default is *envoy*.\n  string server_name = 10;\n\n  // Defines the action to be applied to the Server header on the response path.\n  // By default, Envoy will overwrite the header with the value specified in\n  // server_name.\n  ServerHeaderTransformation server_header_transformation = 34\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The maximum request headers size for incoming connections.\n  // If unconfigured, the default max request headers allowed is 60 KiB.\n  // Requests that exceed this limit will receive a 431 response.\n  // The max configurable limit is 96 KiB, based on current implementation\n  // constraints.\n  google.protobuf.UInt32Value max_request_headers_kb = 29\n      [(validate.rules).uint32 = {lte: 96 gt: 0}];\n\n  // The idle timeout for connections managed by the connection manager. The\n  // idle timeout is defined as the period in which there are no active\n  // requests. If not set, there is no idle timeout. When the idle timeout is\n  // reached the connection will be closed. If the connection is an HTTP/2\n  // connection a drain sequence will occur prior to closing the connection.\n  // This field is deprecated. Use :ref:`idle_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.common_http_protocol_options>`\n  // instead.\n  google.protobuf.Duration idle_timeout = 11\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // The stream idle timeout for connections managed by the connection manager.\n  // If not specified, this defaults to 5 minutes. The default value was selected\n  // so as not to interfere with any smaller configured timeouts that may have\n  // existed in configurations prior to the introduction of this feature, while\n  // introducing robustness to TCP connections that terminate without a FIN.\n  //\n  // This idle timeout applies to new streams and is overridable by the\n  // :ref:`route-level idle_timeout\n  // <envoy_api_field_route.RouteAction.idle_timeout>`. Even on a stream in\n  // which the override applies, prior to receipt of the initial request\n  // headers, the :ref:`stream_idle_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  // applies. Each time an encode/decode event for headers or data is processed\n  // for the stream, the timer will be reset. If the timeout fires, the stream\n  // is terminated with a 408 Request Timeout error code if no upstream response\n  // header has been received, otherwise a stream reset occurs.\n  //\n  // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough\n  // window to write any remaining stream data once the entirety of stream data (local end stream is\n  // true) has been buffered pending available window. In other words, this timeout defends against\n  // a peer that does not release enough window to completely write the stream, even though all\n  // data has been proxied within available flow control windows. If the timeout is hit in this\n  // case, the :ref:`tx_flush_timeout <config_http_conn_man_stats_per_codec>` counter will be\n  // incremented. Note that :ref:`max_stream_duration\n  // <envoy_api_field_core.HttpProtocolOptions.max_stream_duration>` does not apply to this corner\n  // case.\n  //\n  // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due\n  // to the granularity of events presented to the connection manager. For example, while receiving\n  // very large request headers, it may be the case that there is traffic regularly arriving on the\n  // wire while the connection manage is only able to observe the end-of-headers event, hence the\n  // stream may still idle timeout.\n  //\n  // A value of 0 will completely disable the connection manager stream idle\n  // timeout, although per-route idle timeout overrides will continue to apply.\n  google.protobuf.Duration stream_idle_timeout = 24;\n\n  // The amount of time that Envoy will wait for the entire request to be received.\n  // The timer is activated when the request is initiated, and is disarmed when the last byte of the\n  // request is sent upstream (i.e. all decoding filters have processed the request), OR when the\n  // response is initiated. If not specified or set to 0, this timeout is disabled.\n  google.protobuf.Duration request_timeout = 28;\n\n  // The time that Envoy will wait between sending an HTTP/2 “shutdown\n  // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame.\n  // This is used so that Envoy provides a grace period for new streams that\n  // race with the final GOAWAY frame. During this grace period, Envoy will\n  // continue to accept new streams. After the grace period, a final GOAWAY\n  // frame is sent and Envoy will start refusing new streams. Draining occurs\n  // both when a connection hits the idle timeout or during general server\n  // draining. The default grace period is 5000 milliseconds (5 seconds) if this\n  // option is not specified.\n  google.protobuf.Duration drain_timeout = 12;\n\n  // The delayed close timeout is for downstream connections managed by the HTTP connection manager.\n  // It is defined as a grace period after connection close processing has been locally initiated\n  // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy\n  // from the downstream connection) prior to Envoy closing the socket associated with that\n  // connection.\n  // NOTE: This timeout is enforced even when the socket associated with the downstream connection\n  // is pending a flush of the write buffer. However, any progress made writing data to the socket\n  // will restart the timer associated with this timeout. This means that the total grace period for\n  // a socket in this state will be\n  // <total_time_waiting_for_write_buffer_flushes>+<delayed_close_timeout>.\n  //\n  // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close\n  // sequence mitigates a race condition that exists when downstream clients do not drain/process\n  // data in a connection's receive buffer after a remote close has been detected via a socket\n  // write(). This race leads to such clients failing to process the response code sent by Envoy,\n  // which could result in erroneous downstream processing.\n  //\n  // If the timeout triggers, Envoy will close the connection's socket.\n  //\n  // The default timeout is 1000 ms if this option is not specified.\n  //\n  // .. NOTE::\n  //    To be useful in avoiding the race condition described above, this timeout must be set\n  //    to *at least* <max round trip time expected between clients and Envoy>+<100ms to account for\n  //    a reasonable \"worst\" case processing time for a full iteration of Envoy's event loop>.\n  //\n  // .. WARNING::\n  //    A value of 0 will completely disable delayed close processing. When disabled, the downstream\n  //    connection's socket will be closed immediately after the write flush is completed or will\n  //    never close if the write flush does not complete.\n  google.protobuf.Duration delayed_close_timeout = 26;\n\n  // Configuration for :ref:`HTTP access logs <arch_overview_access_logs>`\n  // emitted by the connection manager.\n  repeated accesslog.v2.AccessLog access_log = 13;\n\n  // If set to true, the connection manager will use the real remote address\n  // of the client connection when determining internal versus external origin and manipulating\n  // various headers. If set to false or absent, the connection manager will use the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for`,\n  // :ref:`config_http_conn_man_headers_x-envoy-internal`, and\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information.\n  google.protobuf.BoolValue use_remote_address = 14;\n\n  // The number of additional ingress proxy hops from the right side of the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when\n  // determining the origin client's IP address. The default is zero if this option\n  // is not specified. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information.\n  uint32 xff_num_trusted_hops = 19;\n\n  // Configures what network addresses are considered internal for stats and header sanitation\n  // purposes. If unspecified, only RFC1918 IP addresses will be considered internal.\n  // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information about internal/external addresses.\n  InternalAddressConfig internal_address_config = 25;\n\n  // If set, Envoy will not append the remote address to the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in\n  // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager\n  // has mutated the request headers. While :ref:`use_remote_address\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.use_remote_address>`\n  // will also suppress XFF addition, it has consequences for logging and other\n  // Envoy uses of the remote address, so *skip_xff_append* should be used\n  // when only an elision of XFF addition is intended.\n  bool skip_xff_append = 21;\n\n  // Via header value to append to request and response headers. If this is\n  // empty, no via header will be appended.\n  string via = 22;\n\n  // Whether the connection manager will generate the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to\n  // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature\n  // is not desired it can be disabled.\n  google.protobuf.BoolValue generate_request_id = 15;\n\n  // Whether the connection manager will keep the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if passed for a request that is edge\n  // (Edge request is the request from external clients to front Envoy) and not reset it, which\n  // is the current Envoy behaviour. This defaults to false.\n  bool preserve_external_request_id = 32;\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  ForwardClientCertDetails forward_client_cert_details = 16\n      [(validate.rules).enum = {defined_only: true}];\n\n  // This field is valid only when :ref:`forward_client_cert_details\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.forward_client_cert_details>`\n  // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in\n  // the client certificate to be forwarded. Note that in the\n  // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and\n  // *By* is always set when the client certificate presents the URI type Subject Alternative Name\n  // value.\n  SetCurrentClientCertDetails set_current_client_cert_details = 17;\n\n  // If proxy_100_continue is true, Envoy will proxy incoming \"Expect:\n  // 100-continue\" headers upstream, and forward \"100 Continue\" responses\n  // downstream. If this is false or not set, Envoy will instead strip the\n  // \"Expect: 100-continue\" header, and send a \"100 Continue\" response itself.\n  bool proxy_100_continue = 18;\n\n  // If\n  // :ref:`use_remote_address\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.use_remote_address>`\n  // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is\n  // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*.\n  // This is useful for testing compatibility of upstream services that parse the header value. For\n  // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses\n  // <https://tools.ietf.org/html/rfc4291#section-2.5.5.2>`_ for details. This will also affect the\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See\n  // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6\n  // <config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6>` for runtime\n  // control.\n  // [#not-implemented-hide:]\n  bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20;\n\n  repeated UpgradeConfig upgrade_configs = 23;\n\n  // Should paths be normalized according to RFC 3986 before any processing of\n  // requests by HTTP filters or routing? This affects the upstream *:path* header\n  // as well. For paths that fail this check, Envoy will respond with 400 to\n  // paths that are malformed. This defaults to false currently but will default\n  // true in the future. When not specified, this value may be overridden by the\n  // runtime variable\n  // :ref:`http_connection_manager.normalize_path<config_http_conn_man_runtime_normalize_path>`.\n  // See `Normalization and Comparison <https://tools.ietf.org/html/rfc3986#section-6>`_\n  // for details of normalization.\n  // Note that Envoy does not perform\n  // `case normalization <https://tools.ietf.org/html/rfc3986#section-6.2.2.1>`_\n  google.protobuf.BoolValue normalize_path = 30;\n\n  // Determines if adjacent slashes in the path are merged into one before any processing of\n  // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without\n  // setting this option, incoming requests with path `//dir///file` will not match against route\n  // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of\n  // `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool merge_slashes = 33;\n\n  // The configuration of the request ID extension. This includes operations such as\n  // generation, validation, and associated tracing operations.\n  //\n  // If not set, Envoy uses the default UUID-based behavior:\n  //\n  // 1. Request ID is propagated using *x-request-id* header.\n  //\n  // 2. Request ID is a universally unique identifier (UUID).\n  //\n  // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID.\n  RequestIDExtension request_id_extension = 36;\n}\n\nmessage Rds {\n  // Configuration source specifier for RDS.\n  api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n  // The name of the route configuration. This name will be passed to the RDS\n  // API. This allows an Envoy configuration with multiple HTTP listeners (and\n  // associated HTTP connection manager filters) to use different route\n  // configurations.\n  string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// This message is used to work around the limitations with 'oneof' and repeated fields.\nmessage ScopedRouteConfigurationsList {\n  repeated api.v2.ScopedRouteConfiguration scoped_route_configurations = 1\n      [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#next-free-field: 6]\nmessage ScopedRoutes {\n  // Specifies the mechanism for constructing \"scope keys\" based on HTTP request attributes. These\n  // keys are matched against a set of :ref:`Key<envoy_api_msg_ScopedRouteConfiguration.Key>`\n  // objects assembled from :ref:`ScopedRouteConfiguration<envoy_api_msg_ScopedRouteConfiguration>`\n  // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via\n  // :ref:`scoped_route_configurations_list<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scoped_route_configurations_list>`.\n  //\n  // Upon receiving a request's headers, the Router will build a key using the algorithm specified\n  // by this message. This key will be used to look up the routing table (i.e., the\n  // :ref:`RouteConfiguration<envoy_api_msg_RouteConfiguration>`) to use for the request.\n  message ScopeKeyBuilder {\n    // Specifies the mechanism for constructing key fragments which are composed into scope keys.\n    message FragmentBuilder {\n      // Specifies how the value of a header should be extracted.\n      // The following example maps the structure of a header to the fields in this message.\n      //\n      // .. code::\n      //\n      //              <0> <1>   <-- index\n      //    X-Header: a=b;c=d\n      //    |         || |\n      //    |         || \\----> <element_separator>\n      //    |         ||\n      //    |         |\\----> <element.separator>\n      //    |         |\n      //    |         \\----> <element.key>\n      //    |\n      //    \\----> <name>\n      //\n      //    Each 'a=b' key-value pair constitutes an 'element' of the header field.\n      message HeaderValueExtractor {\n        // Specifies a header field's key value pair to match on.\n        message KvElement {\n          // The separator between key and value (e.g., '=' separates 'k=v;...').\n          // If an element is an empty string, the element is ignored.\n          // If an element contains no separator, the whole element is parsed as key and the\n          // fragment value is an empty string.\n          // If there are multiple values for a matched key, the first value is returned.\n          string separator = 1 [(validate.rules).string = {min_bytes: 1}];\n\n          // The key to match on.\n          string key = 2 [(validate.rules).string = {min_bytes: 1}];\n        }\n\n        // The name of the header field to extract the value from.\n        string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n        // The element separator (e.g., ';' separates 'a;b;c;d').\n        // Default: empty string. This causes the entirety of the header field to be extracted.\n        // If this field is set to an empty string and 'index' is used in the oneof below, 'index'\n        // must be set to 0.\n        string element_separator = 2;\n\n        oneof extract_type {\n          // Specifies the zero based index of the element to extract.\n          // Note Envoy concatenates multiple values of the same header key into a comma separated\n          // string, the splitting always happens after the concatenation.\n          uint32 index = 3;\n\n          // Specifies the key value pair to extract the value from.\n          KvElement element = 4;\n        }\n      }\n\n      oneof type {\n        option (validate.required) = true;\n\n        // Specifies how a header field's value should be extracted.\n        HeaderValueExtractor header_value_extractor = 1;\n      }\n    }\n\n    // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the\n    // fragments of a :ref:`ScopedRouteConfiguration<envoy_api_msg_ScopedRouteConfiguration>`.\n    // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key.\n    repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the scoped routing configuration.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The algorithm to use for constructing a scope key for each request.\n  ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];\n\n  // Configuration source specifier for RDS.\n  // This config source is used to subscribe to RouteConfiguration resources specified in\n  // ScopedRouteConfiguration messages.\n  api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}];\n\n  oneof config_specifier {\n    option (validate.required) = true;\n\n    // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by\n    // matching a key constructed from the request's attributes according to the algorithm specified\n    // by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRouteConfigurationsList scoped_route_configurations_list = 4;\n\n    // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS\n    // API. A scope is assigned to a request by matching a key constructed from the request's\n    // attributes according to the algorithm specified by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRds scoped_rds = 5;\n  }\n}\n\nmessage ScopedRds {\n  // Configuration source specifier for scoped RDS.\n  api.v2.core.ConfigSource scoped_rds_config_source = 1\n      [(validate.rules).message = {required: true}];\n}\n\nmessage HttpFilter {\n  reserved 3;\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_http_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\nmessage RequestIDExtension {\n  // Request ID extension specific configuration.\n  google.protobuf.Any typed_config = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.kafka_broker.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.kafka_broker.v2alpha1\";\noption java_outer_classname = \"KafkaBrokerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.kafka_broker.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Kafka Broker]\n// Kafka Broker :ref:`configuration overview <config_network_filters_kafka_broker>`.\n// [#extension: envoy.filters.network.kafka_broker]\n\nmessage KafkaBroker {\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_kafka_broker_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.local_rate_limit.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/token_bucket.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.local_rate_limit.v2alpha\";\noption java_outer_classname = \"LocalRateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.local_ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Local rate limit]\n// Local rate limit :ref:`configuration overview <config_network_filters_local_rate_limit>`.\n// [#extension: envoy.filters.network.local_ratelimit]\n\nmessage LocalRateLimit {\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_local_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The token bucket configuration to use for rate limiting connections that are processed by the\n  // filter's filter chain. Each incoming connection processed by the filter consumes a single\n  // token. If the token is available, the connection will be allowed. If no tokens are available,\n  // the connection will be immediately closed.\n  //\n  // .. note::\n  //   In the current implementation each filter and filter chain has an independent rate limit.\n  //\n  // .. note::\n  //   In the current implementation the token bucket's :ref:`fill_interval\n  //   <envoy_api_field_type.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive\n  //   refills.\n  type.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}];\n\n  // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults\n  // to enabled.\n  api.v2.core.RuntimeFeatureFlag runtime_enabled = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/mongo_proxy/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.mongo_proxy.v2;\n\nimport \"envoy/config/filter/fault/v2/fault.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2\";\noption java_outer_classname = \"MongoProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.mongo_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Mongo proxy]\n// MongoDB :ref:`configuration overview <config_network_filters_mongo_proxy>`.\n// [#extension: envoy.filters.network.mongo_proxy]\n\nmessage MongoProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mongo_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The optional path to use for writing Mongo access logs. If not access log\n  // path is specified no access logs will be written. Note that access log is\n  // also gated :ref:`runtime <config_network_filters_mongo_proxy_runtime>`.\n  string access_log = 2;\n\n  // Inject a fixed delay before proxying a Mongo operation. Delays are\n  // applied to the following MongoDB operations: Query, Insert, GetMore,\n  // and KillCursors. Once an active delay is in progress, all incoming\n  // data up until the timer event fires will be a part of the delay.\n  fault.v2.FaultDelay delay = 3;\n\n  // Flag to specify whether :ref:`dynamic metadata\n  // <config_network_filters_mongo_proxy_dynamic_metadata>` should be emitted. Defaults to false.\n  bool emit_dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.mysql_proxy.v1alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1\";\noption java_outer_classname = \"MysqlProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.mysql_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: MySQL proxy]\n// MySQL Proxy :ref:`configuration overview <config_network_filters_mysql_proxy>`.\n// [#extension: envoy.filters.network.mysql_proxy]\n\nmessage MySQLProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mysql_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing MySQL access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/rate_limit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.rate_limit.v2;\n\nimport \"envoy/api/v2/ratelimit/ratelimit.proto\";\nimport \"envoy/config/ratelimit/v2/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.rate_limit.v2\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_network_filters_rate_limit>`.\n// [#extension: envoy.filters.network.ratelimit]\n\n// [#next-free-field: 7]\nmessage RateLimit {\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // The rate limit descriptor list to use in the rate limit service request.\n  repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 3\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/rbac/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/rbac/v2/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.rbac.v2;\n\nimport \"envoy/config/rbac/v2/rbac.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.rbac.v2\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.network.rbac.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_network_filters_rbac>`.\n// [#extension: envoy.filters.network.rbac]\n\n// RBAC network filter config.\n//\n// Header should not be used in rules/shadow_rules in RBAC network filter as\n// this information is only available in :ref:`RBAC http filter <config_http_filters_rbac>`.\nmessage RBAC {\n  enum EnforcementType {\n    // Apply RBAC policies when the first byte of data arrives on the connection.\n    ONE_TIME_ON_FIRST_BYTE = 0;\n\n    // Continuously apply RBAC policies as data arrives. Use this mode when\n    // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka,\n    // etc. when the protocol decoders emit dynamic metadata such as the\n    // resources being accessed and the operations on the resources.\n    CONTINUOUS = 1;\n  }\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v2.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter but will emit stats and logs\n  // and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v2.RBAC shadow_rules = 2;\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}];\n\n  // RBAC enforcement strategy. By default RBAC will be enforced only once\n  // when the first byte of data arrives from the downstream. When used in\n  // conjunction with filters that emit dynamic metadata after decoding\n  // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to\n  // CONTINUOUS to enforce RBAC policies on every message boundary.\n  EnforcementType enforcement_type = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/redis_proxy/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.redis_proxy.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.redis_proxy.v2\";\noption java_outer_classname = \"RedisProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.redis_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Redis Proxy]\n// Redis Proxy :ref:`configuration overview <config_network_filters_redis_proxy>`.\n// [#extension: envoy.filters.network.redis_proxy]\n\n// [#next-free-field: 7]\nmessage RedisProxy {\n  // Redis connection pool settings.\n  // [#next-free-field: 9]\n  message ConnPoolSettings {\n    // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently\n    // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data\n    // because replication is asynchronous and requires some delay. You need to ensure that your\n    // application can tolerate stale data.\n    enum ReadPolicy {\n      // Default mode. Read from the current primary node.\n      MASTER = 0;\n\n      // Read from the primary, but if it is unavailable, read from replica nodes.\n      PREFER_MASTER = 1;\n\n      // Read from replica nodes. If multiple replica nodes are present within a shard, a random\n      // node is selected. Healthy nodes have precedent over unhealthy nodes.\n      REPLICA = 2;\n\n      // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not\n      // present or unhealthy), read from the primary.\n      PREFER_REPLICA = 3;\n\n      // Read from any node of the cluster. A random node is selected among the primary and\n      // replicas, healthy nodes have precedent over unhealthy nodes.\n      ANY = 4;\n    }\n\n    // Per-operation timeout in milliseconds. The timer starts when the first\n    // command of a pipeline is written to the backend connection. Each response received from Redis\n    // resets the timer since it signifies that the next command is being processed by the backend.\n    // The only exception to this behavior is when a connection to a backend is not yet established.\n    // In that case, the connect timeout on the cluster will govern the timeout until the connection\n    // is ready.\n    google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}];\n\n    // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be\n    // forwarded to the same upstream. The hash key used for determining the upstream in a\n    // consistent hash ring configuration will be computed from the hash tagged key instead of the\n    // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster\n    // implementation <https://redis.io/topics/cluster-spec#keys-hash-tags>`_.\n    //\n    // Examples:\n    //\n    // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream\n    // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream\n    bool enable_hashtagging = 2;\n\n    // Accept `moved and ask redirection\n    // <https://redis.io/topics/cluster-spec#redirection-and-resharding>`_ errors from upstream\n    // redis servers, and retry commands to the specified target server. The target server does not\n    // need to be known to the cluster manager. If the command cannot be redirected, then the\n    // original error is passed downstream unchanged. By default, this support is not enabled.\n    bool enable_redirection = 3;\n\n    // Maximum size of encoded request buffer before flush is triggered and encoded requests\n    // are sent upstream. If this is unset, the buffer flushes whenever it receives data\n    // and performs no batching.\n    // This feature makes it possible for multiple clients to send requests to Envoy and have\n    // them batched- for example if one is running several worker processes, each with its own\n    // Redis connection. There is no benefit to using this with a single downstream process.\n    // Recommended size (if enabled) is 1024 bytes.\n    uint32 max_buffer_size_before_flush = 4;\n\n    // The encoded request buffer is flushed N milliseconds after the first request has been\n    // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`.\n    // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise,\n    // the timer should be set according to the number of clients, overall request rate and\n    // desired maximum latency for a single command. For example, if there are many requests\n    // being batched together at a high rate, the buffer will likely be filled before the timer\n    // fires. Alternatively, if the request rate is lower the buffer will not be filled as often\n    // before the timer fires.\n    // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter\n    // defaults to 3ms.\n    google.protobuf.Duration buffer_flush_timeout = 5;\n\n    // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts\n    // can be created at any given time by any given worker thread (see `enable_redirection` for\n    // more details). If the host is unknown and a connection cannot be created due to enforcing\n    // this limit, then redirection will fail and the original redirection error will be passed\n    // downstream unchanged. This limit defaults to 100.\n    google.protobuf.UInt32Value max_upstream_unknown_connections = 6;\n\n    // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate\n    // count.\n    bool enable_command_stats = 8;\n\n    // Read policy. The default is to read from the primary.\n    ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  message PrefixRoutes {\n    message Route {\n      // The router is capable of shadowing traffic from one cluster to another. The current\n      // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n      // respond before returning the response from the primary cluster. All normal statistics are\n      // collected for the shadow cluster making this feature useful for testing.\n      message RequestMirrorPolicy {\n        // Specifies the cluster that requests will be mirrored to. The cluster must\n        // exist in the cluster manager configuration.\n        string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n        // If not specified or the runtime key is not present, all requests to the target cluster\n        // will be mirrored.\n        //\n        // If specified, Envoy will lookup the runtime key to get the percentage of requests to the\n        // mirror.\n        api.v2.core.RuntimeFractionalPercent runtime_fraction = 2;\n\n        // Set this to TRUE to only mirror write commands, this is effectively replicating the\n        // writes in a \"fire and forget\" manner.\n        bool exclude_read_commands = 3;\n      }\n\n      // String prefix that must match the beginning of the keys. Envoy will always favor the\n      // longest match.\n      string prefix = 1;\n\n      // Indicates if the prefix needs to be removed from the key when forwarded.\n      bool remove_prefix = 2;\n\n      // Upstream cluster to forward the command to.\n      string cluster = 3 [(validate.rules).string = {min_bytes: 1}];\n\n      // Indicates that the route has a request mirroring policy.\n      repeated RequestMirrorPolicy request_mirror_policy = 4;\n    }\n\n    // List of prefix routes.\n    repeated Route routes = 1;\n\n    // Indicates that prefix matching should be case insensitive.\n    bool case_insensitive = 2;\n\n    // Optional catch-all route to forward commands that doesn't match any of the routes. The\n    // catch-all route becomes required when no routes are specified.\n    // .. attention::\n    //\n    //   This field is deprecated. Use a :ref:`catch_all\n    //   route<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>`\n    //   instead.\n    string catch_all_cluster = 3\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // Optional catch-all route to forward commands that doesn't match any of the routes. The\n    // catch-all route becomes required when no routes are specified.\n    Route catch_all_route = 4;\n  }\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_redis_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Name of cluster from cluster manager. See the :ref:`configuration section\n  // <arch_overview_redis_configuration>` of the architecture overview for recommendations on\n  // configuring the backing cluster.\n  //\n  // .. attention::\n  //\n  //   This field is deprecated. Use a :ref:`catch_all\n  //   route<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>`\n  //   instead.\n  string cluster = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Network settings for the connection pool to the upstream clusters.\n  ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}];\n\n  // Indicates that latency stat should be computed in microseconds. By default it is computed in\n  // milliseconds.\n  bool latency_in_micros = 4;\n\n  // List of **unique** prefixes used to separate keys from different workloads to different\n  // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all\n  // cluster can be used to forward commands when there is no match. Time complexity of the\n  // lookups are in O(min(longest key prefix, key length)).\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    prefix_routes:\n  //      routes:\n  //        - prefix: \"ab\"\n  //          cluster: \"cluster_a\"\n  //        - prefix: \"abc\"\n  //          cluster: \"cluster_b\"\n  //\n  // When using the above routes, the following prefixes would be sent to:\n  //\n  // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b.\n  // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a.\n  // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all\n  //   route<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>`\n  //   would have retrieved the key from that cluster instead.\n  //\n  // See the :ref:`configuration section\n  // <arch_overview_redis_configuration>` of the architecture overview for recommendations on\n  // configuring the backing clusters.\n  PrefixRoutes prefix_routes = 5;\n\n  // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis\n  // AUTH command <https://redis.io/commands/auth>`_ with this password before enabling any other\n  // command. If an AUTH command's password matches this password, an \"OK\" response will be returned\n  // to the client. If the AUTH command password does not match this password, then an \"ERR invalid\n  // password\" error will be returned. If any other command is received before AUTH when this\n  // password is set, then a \"NOAUTH Authentication required.\" error response will be sent to the\n  // client. If an AUTH command is received when the password is not set, then an \"ERR Client sent\n  // AUTH, but no password is set\" error will be returned.\n  api.v2.core.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true];\n}\n\n// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in\n// :ref:`typed_extension_protocol_options<envoy_api_field_Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.redis_proxy`.\nmessage RedisProtocolOptions {\n  // Upstream server password as defined by the `requirepass` directive\n  // <https://redis.io/topics/config>`_ in the server's configuration file.\n  api.v2.core.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/sni_cluster/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.sni_cluster.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.sni_cluster.v2\";\noption java_outer_classname = \"SniClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.sni_cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: SNI Cluster Filter]\n// Set the upstream cluster name from the SNI field in the TLS connection.\n// [#extension: envoy.filters.network.sni_cluster]\n\nmessage SniCluster {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/tcp_proxy/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.tcp_proxy.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\nimport \"envoy/type/hash_policy.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2\";\noption java_outer_classname = \"TcpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.tcp_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: TCP Proxy]\n// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.\n// [#extension: envoy.filters.network.tcp_proxy]\n\n// [#next-free-field: 13]\nmessage TcpProxy {\n  // [#not-implemented-hide:] Deprecated.\n  // TCP Proxy filter configuration using V1 format.\n  message DeprecatedV1 {\n    option deprecated = true;\n\n    // A TCP proxy route consists of a set of optional L4 criteria and the\n    // name of a cluster. If a downstream connection matches all the\n    // specified criteria, the cluster in the route is used for the\n    // corresponding upstream connection. Routes are tried in the order\n    // specified until a match is found. If no match is found, the connection\n    // is closed. A route with no criteria is valid and always produces a\n    // match.\n    // [#next-free-field: 6]\n    message TCPRoute {\n      // The cluster to connect to when a the downstream network connection\n      // matches the specified criteria.\n      string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // An optional list of IP address subnets in the form\n      // “ip_address/xx”. The criteria is satisfied if the destination IP\n      // address of the downstream connection is contained in at least one of\n      // the specified subnets. If the parameter is not specified or the list\n      // is empty, the destination IP address is ignored. The destination IP\n      // address of the downstream connection might be different from the\n      // addresses on which the proxy is listening if the connection has been\n      // redirected.\n      repeated api.v2.core.CidrRange destination_ip_list = 2;\n\n      // An optional string containing a comma-separated list of port numbers\n      // or ranges. The criteria is satisfied if the destination port of the\n      // downstream connection is contained in at least one of the specified\n      // ranges. If the parameter is not specified, the destination port is\n      // ignored. The destination port address of the downstream connection\n      // might be different from the port on which the proxy is listening if\n      // the connection has been redirected.\n      string destination_ports = 3;\n\n      // An optional list of IP address subnets in the form\n      // “ip_address/xx”. The criteria is satisfied if the source IP address\n      // of the downstream connection is contained in at least one of the\n      // specified subnets. If the parameter is not specified or the list is\n      // empty, the source IP address is ignored.\n      repeated api.v2.core.CidrRange source_ip_list = 4;\n\n      // An optional string containing a comma-separated list of port numbers\n      // or ranges. The criteria is satisfied if the source port of the\n      // downstream connection is contained in at least one of the specified\n      // ranges. If the parameter is not specified, the source port is\n      // ignored.\n      string source_ports = 5;\n    }\n\n    // The route table for the filter. All filter instances must have a route\n    // table, even if it is empty.\n    repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Allows for specification of multiple upstream clusters along with weights\n  // that indicate the percentage of traffic to be forwarded to each cluster.\n  // The router selects an upstream cluster based on these weights.\n  message WeightedCluster {\n    message ClusterWeight {\n      // Name of the upstream cluster.\n      string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // When a request matches the route, the choice of an upstream cluster is\n      // determined by its weight. The sum of weights across all entries in the\n      // clusters array determines the total weight.\n      uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n      // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n      // in the upstream cluster with metadata matching what is set in this field will be considered\n      // for load balancing. Note that this will be merged with what's provided in\n      // :ref:`TcpProxy.metadata_match\n      // <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.metadata_match>`, with values\n      // here taking precedence. The filter name should be specified as *envoy.lb*.\n      api.v2.core.Metadata metadata_match = 3;\n    }\n\n    // Specifies one or more upstream clusters associated with the route.\n    repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Configuration for tunneling TCP over other transports or application layers.\n  // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will\n  // remain the default.\n  message TunnelingConfig {\n    // The hostname to send in the synthesized CONNECT headers to the upstream proxy.\n    string hostname = 1 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_tcp_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 10;\n  }\n\n  // Optional endpoint metadata match criteria. Only endpoints in the upstream\n  // cluster with metadata matching that set in metadata_match will be\n  // considered. The filter name should be specified as *envoy.lb*.\n  api.v2.core.Metadata metadata_match = 9;\n\n  // The idle timeout for connections managed by the TCP proxy filter. The idle timeout\n  // is defined as the period in which there are no bytes sent or received on either\n  // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set\n  // to 0s, the timeout will be disabled.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 8;\n\n  // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy\n  // filter. The idle timeout is defined as the period in which there is no\n  // active traffic. If not set, there is no idle timeout. When the idle timeout\n  // is reached the connection will be closed. The distinction between\n  // downstream_idle_timeout/upstream_idle_timeout provides a means to set\n  // timeout based on the last byte sent on the downstream/upstream connection.\n  google.protobuf.Duration downstream_idle_timeout = 3;\n\n  // [#not-implemented-hide:]\n  google.protobuf.Duration upstream_idle_timeout = 4;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by the this tcp_proxy.\n  repeated accesslog.v2.AccessLog access_log = 5;\n\n  // [#not-implemented-hide:] Deprecated.\n  DeprecatedV1 deprecated_v1 = 6 [deprecated = true];\n\n  // The maximum number of unsuccessful connection attempts that will be made before\n  // giving up. If the parameter is not specified, 1 connection attempt will be made.\n  google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated type.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}];\n\n  // [#not-implemented-hide:] feature in progress\n  // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP\n  // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload\n  // will be proxied upstream as per usual.\n  TunnelingConfig tunneling_config = 12;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md",
    "content": "Protocol buffer definitions for the Thrift proxy.\n"
  },
  {
    "path": "api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.thrift_proxy.v2alpha1;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.thrift_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Thrift Proxy Route Configuration]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n\nmessage RouteConfiguration {\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  oneof match_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route must exactly match the request method name. As a special case, an\n    // empty string matches any request method name.\n    string method_name = 1;\n\n    // If specified, the route must have the service name as the request method name prefix. As a\n    // special case, an empty string matches any service name. Only relevant when service\n    // multiplexing.\n    string service_name = 2;\n  }\n\n  // Inverts whatever matching is done in the :ref:`method_name\n  // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteMatch.method_name>` or\n  // :ref:`service_name\n  // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteMatch.service_name>` fields.\n  // Cannot be combined with wildcard matching as that would result in routes never being matched.\n  //\n  // .. note::\n  //\n  //   This does not invert matching done as part of the :ref:`headers field\n  //   <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteMatch.headers>` field. To\n  //   invert header matching, see :ref:`invert_match\n  //   <envoy_api_field_route.HeaderMatcher.invert_match>`.\n  bool invert = 3;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config). Note that this only applies for Thrift transports and/or\n  // protocols that support headers.\n  repeated api.v2.route.HeaderMatcher headers = 4;\n}\n\n// [#next-free-field: 7]\nmessage RouteAction {\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates a single upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 2;\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // Thrift header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist Envoy will\n    // respond with an unknown method exception or an internal error exception,\n    // respectively.\n    string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n  // the upstream cluster with metadata matching what is set in this field will be considered.\n  // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match\n  // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight.metadata_match>`,\n  // with values there taking precedence. Keys and values should be provided under the \"envoy.lb\"\n  // metadata key.\n  api.v2.core.Metadata metadata_match = 3;\n\n  // Specifies a set of rate limit configurations that could be applied to the route.\n  // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders\n  // action with the header name \":method-name\".\n  repeated api.v2.route.RateLimit rate_limits = 4;\n\n  // Strip the service prefix from the method name, if there's a prefix. For\n  // example, the method call Service:method would end up being just method.\n  bool strip_service_name = 5;\n}\n\n// Allows for specification of multiple upstream clusters along with weights that indicate the\n// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster\n// based on these weights.\nmessage WeightedCluster {\n  message ClusterWeight {\n    // Name of the upstream cluster.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // When a request matches the route, the choice of an upstream cluster is determined by its\n    // weight. The sum of weights across all entries in the clusters array determines the total\n    // weight.\n    google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field, combined with what's\n    // provided in :ref:`RouteAction's metadata_match\n    // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteAction.metadata_match>`,\n    // will be considered. Values here will take precedence. Keys and values should be provided\n    // under the \"envoy.lb\" metadata key.\n    api.v2.core.Metadata metadata_match = 3;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.thrift_proxy.v2alpha1;\n\nimport \"envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1\";\noption java_outer_classname = \"ThriftProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.thrift_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Thrift Proxy]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n// [#extension: envoy.filters.network.thrift_proxy]\n\n// Thrift transport types supported by Envoy.\nenum TransportType {\n  // For downstream connections, the Thrift proxy will attempt to determine which transport to use.\n  // For upstream connections, the Thrift proxy will use same transport as the downstream\n  // connection.\n  AUTO_TRANSPORT = 0;\n\n  // The Thrift proxy will use the Thrift framed transport.\n  FRAMED = 1;\n\n  // The Thrift proxy will use the Thrift unframed transport.\n  UNFRAMED = 2;\n\n  // The Thrift proxy will assume the client is using the Thrift header transport.\n  HEADER = 3;\n}\n\n// Thrift Protocol types supported by Envoy.\nenum ProtocolType {\n  // For downstream connections, the Thrift proxy will attempt to determine which protocol to use.\n  // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol\n  // detection. For upstream connections, the Thrift proxy will use the same protocol as the\n  // downstream connection.\n  AUTO_PROTOCOL = 0;\n\n  // The Thrift proxy will use the Thrift binary protocol.\n  BINARY = 1;\n\n  // The Thrift proxy will use Thrift non-strict binary protocol.\n  LAX_BINARY = 2;\n\n  // The Thrift proxy will use the Thrift compact protocol.\n  COMPACT = 3;\n\n  // The Thrift proxy will use the Thrift \"Twitter\" protocol implemented by the finagle library.\n  TWITTER = 4;\n}\n\n// [#next-free-field: 6]\nmessage ThriftProxy {\n  // Supplies the type of transport that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.TransportType.AUTO_TRANSPORT>`.\n  TransportType transport = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.ProtocolType.AUTO_PROTOCOL>`.\n  ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  RouteConfiguration route_config = 4;\n\n  // A list of individual Thrift filters that make up the filter chain for requests made to the\n  // Thrift proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no thrift_filters are specified, a default Thrift router filter\n  // (`envoy.filters.thrift.router`) is used.\n  repeated ThriftFilter thrift_filters = 5;\n}\n\n// ThriftFilter configures a Thrift filter.\nmessage ThriftFilter {\n  // The name of the filter to instantiate. The name must match a supported\n  // filter. The built-in filters are:\n  //\n  // [#comment:TODO(zuercher): Auto generate the following list]\n  // * :ref:`envoy.filters.thrift.router <config_thrift_filters_router>`\n  // * :ref:`envoy.filters.thrift.rate_limit <config_thrift_filters_rate_limit>`\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in\n// in\n// :ref:`typed_extension_protocol_options<envoy_api_field_Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.thrift_proxy`.\nmessage ThriftProtocolOptions {\n  // Supplies the type of transport that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.TransportType.AUTO_TRANSPORT>`,\n  // which is the default, causes the proxy to use the same transport as the downstream connection.\n  TransportType transport = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.ProtocolType.AUTO_PROTOCOL>`,\n  // which is the default, causes the proxy to use the same protocol as the downstream connection.\n  ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.zookeeper_proxy.v1alpha1;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1\";\noption java_outer_classname = \"ZookeeperProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.zookeeper_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: ZooKeeper proxy]\n// ZooKeeper Proxy :ref:`configuration overview <config_network_filters_zookeeper_proxy>`.\n// [#extension: envoy.filters.network.zookeeper_proxy]\n\nmessage ZooKeeperProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_zookeeper_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n\n  // Messages — requests, responses and events — that are bigger than this value will\n  // be ignored. If it is not set, the default value is 1Mb.\n  //\n  // The value here should match the jute.maxbuffer property in your cluster configuration:\n  //\n  // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options\n  //\n  // if that is set. If it isn't, ZooKeeper's default is also 1Mb.\n  google.protobuf.UInt32Value max_packet_bytes = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.thrift.rate_limit.v2alpha1;\n\nimport \"envoy/config/ratelimit/v2/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_thrift_filters_rate_limit>`.\n// [#extension: envoy.filters.thrift.ratelimit]\n\n// [#next-free-field: 6]\nmessage RateLimit {\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Specifies the rate limit configuration stage. Each configured rate limit filter performs a\n  // rate limit check using descriptors configured in the\n  // :ref:`envoy_api_msg_config.filter.network.thrift_proxy.v2alpha1.RouteAction` for the request.\n  // Only those entries with a matching stage number are used for a given filter. If not set, the\n  // default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 3;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 4;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/filter/thrift/router/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/thrift/router/v2alpha1/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.thrift.router.v2alpha1;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Router]\n// Thrift router :ref:`configuration overview <config_thrift_filters_router>`.\n// [#extension: envoy.filters.thrift.router]\n\nmessage Router {\n}\n"
  },
  {
    "path": "api/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.udp.udp_proxy.v2alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha\";\noption java_outer_classname = \"UdpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.udp.udp_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: UDP proxy]\n// UDP proxy :ref:`configuration overview <config_udp_listener_filters_udp_proxy>`.\n// [#extension: envoy.filters.udp_listener.udp_proxy]\n\n// Configuration for the UDP proxy filter.\nmessage UdpProxyConfig {\n  // The stat prefix used when emitting UDP proxy filter stats.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by\n  // the session. The default if not specified is 1 minute.\n  google.protobuf.Duration idle_timeout = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/grpc_credential/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/grpc_credential/v2alpha/aws_iam.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v2alpha\";\noption java_outer_classname = \"AwsIamProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Grpc Credentials AWS IAM]\n// Configuration for AWS IAM Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.aws_iam]\n\nmessage AwsIamConfig {\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the Grpc endpoint.\n  //\n  // Example: appmesh\n  string service_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the Grpc\n  // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment\n  // variable.\n  //\n  // Example: us-west-2\n  string region = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v2alpha\";\noption java_outer_classname = \"FileBasedMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Grpc Credentials File Based Metadata]\n// Configuration for File Based Metadata Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.file_based_metadata]\n\nmessage FileBasedMetadataConfig {\n  // Location or inline data of secret to use for authentication of the Google gRPC connection\n  // this secret will be attached to a header of the gRPC connection\n  api.v2.core.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true];\n\n  // Metadata header key to use for sending the secret data\n  // if no header key is set, \"authorization\" header will be used\n  string header_key = 2;\n\n  // Prefix to prepend to the secret in the metadata header\n  // if no prefix is set, the default is to use no prefix\n  string header_prefix = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/grpc_credential/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/grpc_credential/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/grpc_credential/v3/aws_iam.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v3\";\noption java_outer_classname = \"AwsIamProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Grpc Credentials AWS IAM]\n// Configuration for AWS IAM Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.aws_iam]\n\nmessage AwsIamConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.grpc_credential.v2alpha.AwsIamConfig\";\n\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the Grpc endpoint.\n  //\n  // Example: appmesh\n  string service_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the Grpc\n  // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment\n  // variable.\n  //\n  // Example: us-west-2\n  string region = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/grpc_credential/v3/file_based_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v3\";\noption java_outer_classname = \"FileBasedMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Grpc Credentials File Based Metadata]\n// Configuration for File Based Metadata Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.file_based_metadata]\n\nmessage FileBasedMetadataConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig\";\n\n  // Location or inline data of secret to use for authentication of the Google gRPC connection\n  // this secret will be attached to a header of the gRPC connection\n  core.v3.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true];\n\n  // Metadata header key to use for sending the secret data\n  // if no header key is set, \"authorization\" header will be used\n  string header_key = 2;\n\n  // Prefix to prepend to the secret in the metadata header\n  // if no prefix is set, the default is to use no prefix\n  string header_prefix = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/health_checker/redis/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/health_checker/redis/v2/redis.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.health_checker.redis.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.health_checker.redis.v2\";\noption java_outer_classname = \"RedisProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Redis]\n// Redis health checker :ref:`configuration overview <config_health_checkers_redis>`.\n// [#extension: envoy.health_checkers.redis]\n\nmessage Redis {\n  // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n  // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n  // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n  // by setting the specified key to any value and waiting for traffic to drain.\n  string key = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/listener/v2/api_listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v2;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v2\";\noption java_outer_classname = \"ApiListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: API listener]\n\n// Describes a type of API listener, which is used in non-proxy clients. The type of API\n// exposed to the non-proxy application depends on the type of API listener.\nmessage ApiListener {\n  // The type in this field determines the type of API listener. At present, the following\n  // types are supported:\n  // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP)\n  // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the\n  // specific config message for each type of API listener. We could not do this in v2 because\n  // it would have caused circular dependencies for go protos: lds.proto depends on this file,\n  // and http_connection_manager.proto depends on rds.proto, which is in the same directory as\n  // lds.proto, so lds.proto cannot depend on this file.]\n  google.protobuf.Any api_listener = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/listener:pkg\",\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/listener/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/listener/v3/api_listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"ApiListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: API listener]\n\n// Describes a type of API listener, which is used in non-proxy clients. The type of API\n// exposed to the non-proxy application depends on the type of API listener.\nmessage ApiListener {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v2.ApiListener\";\n\n  // The type in this field determines the type of API listener. At present, the following\n  // types are supported:\n  // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP)\n  // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the\n  // specific config message for each type of API listener. We could not do this in v2 because\n  // it would have caused circular dependencies for go protos: lds.proto depends on this file,\n  // and http_connection_manager.proto depends on rds.proto, which is in the same directory as\n  // lds.proto, so lds.proto cannot depend on this file.]\n  google.protobuf.Any api_listener = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v3/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/socket_option.proto\";\nimport \"envoy/config/listener/v3/api_listener.proto\";\nimport \"envoy/config/listener/v3/listener_components.proto\";\nimport \"envoy/config/listener/v3/udp_listener_config.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\n\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listener configuration]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// Listener list collections. Entries are *Listener* resources or references.\n// [#not-implemented-hide:]\nmessage ListenerCollection {\n  repeated udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// [#next-free-field: 25]\nmessage Listener {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Listener\";\n\n  enum DrainType {\n    // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check\n    // filter), listener removal/modification, and hot restart.\n    DEFAULT = 0;\n\n    // Drain in response to listener removal/modification and hot restart. This setting does not\n    // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress\n    // and egress listeners.\n    MODIFY_ONLY = 1;\n  }\n\n  // [#not-implemented-hide:]\n  message DeprecatedV1 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Listener.DeprecatedV1\";\n\n    // Whether the listener should bind to the port. A listener that doesn't\n    // bind can only receive connections redirected from other listeners that\n    // set use_original_dst parameter to true. Default is true.\n    //\n    // This is deprecated in v2, all Listeners will bind to their port. An\n    // additional filter chain must be created for every original destination\n    // port this listener may redirect to in v2, with the original port\n    // specified in the FilterChainMatch destination_port field.\n    //\n    // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.]\n    google.protobuf.BoolValue bind_to_port = 1;\n  }\n\n  // Configuration for listener connection balancing.\n  message ConnectionBalanceConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Listener.ConnectionBalanceConfig\";\n\n    // A connection balancer implementation that does exact balancing. This means that a lock is\n    // held during balancing so that connection counts are nearly exactly balanced between worker\n    // threads. This is \"nearly\" exact in the sense that a connection might close in parallel thus\n    // making the counts incorrect, but this should be rectified on the next accept. This balancer\n    // sacrifices accept throughput for accuracy and should be used when there are a small number of\n    // connections that rarely cycle (e.g., service mesh gRPC egress).\n    message ExactBalance {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Listener.ConnectionBalanceConfig.ExactBalance\";\n    }\n\n    oneof balance_type {\n      option (validate.required) = true;\n\n      // If specified, the listener will use the exact connection balancer.\n      ExactBalance exact_balance = 1;\n    }\n  }\n\n  reserved 14, 4;\n\n  reserved \"use_original_dst\";\n\n  // The unique name by which this listener is known. If no name is provided,\n  // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically\n  // updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.\n  string name = 1;\n\n  // The address that the listener should listen on. In general, the address must be unique, though\n  // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on\n  // Linux as the actual port will be allocated by the OS.\n  core.v3.Address address = 2 [(validate.rules).message = {required: true}];\n\n  // A list of filter chains to consider for this listener. The\n  // :ref:`FilterChain <envoy_api_msg_config.listener.v3.FilterChain>` with the most specific\n  // :ref:`FilterChainMatch <envoy_api_msg_config.listener.v3.FilterChainMatch>` criteria is used on a\n  // connection.\n  //\n  // Example using SNI for filter chain selection can be found in the\n  // :ref:`FAQ entry <faq_how_to_setup_sni>`.\n  repeated FilterChain filter_chains = 3;\n\n  // Soft limit on size of the listener’s new connection read and write buffers.\n  // If unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Listener metadata.\n  core.v3.Metadata metadata = 6;\n\n  // [#not-implemented-hide:]\n  DeprecatedV1 deprecated_v1 = 7;\n\n  // The type of draining to perform at a listener-wide level.\n  DrainType drain_type = 8;\n\n  // Listener filters have the opportunity to manipulate and augment the connection metadata that\n  // is used in connection filter chain matching, for example. These filters are run before any in\n  // :ref:`filter_chains <envoy_api_field_config.listener.v3.Listener.filter_chains>`. Order matters as the\n  // filters are processed sequentially right after a socket has been accepted by the listener, and\n  // before a connection is created.\n  // UDP Listener filters can be specified when the protocol in the listener socket address in\n  // :ref:`protocol <envoy_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`.\n  // UDP listeners currently support a single filter.\n  repeated ListenerFilter listener_filters = 9;\n\n  // The timeout to wait for all listener filters to complete operation. If the timeout is reached,\n  // the accepted socket is closed without a connection being created unless\n  // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the\n  // timeout. If not specified, a default timeout of 15s is used.\n  google.protobuf.Duration listener_filters_timeout = 15;\n\n  // Whether a connection should be created when listener filters timeout. Default is false.\n  //\n  // .. attention::\n  //\n  //   Some listener filters, such as :ref:`Proxy Protocol filter\n  //   <config_listener_filters_proxy_protocol>`, should not be used with this option. It will cause\n  //   unexpected behavior when a connection is created.\n  bool continue_on_listener_filters_timeout = 17;\n\n  // Whether the listener should be set as a transparent socket.\n  // When this flag is set to true, connections can be redirected to the listener using an\n  // *iptables* *TPROXY* target, in which case the original source and destination addresses and\n  // ports are preserved on accepted connections. This flag should be used in combination with\n  // :ref:`an original_dst <config_listener_filters_original_dst>` :ref:`listener filter\n  // <envoy_api_field_config.listener.v3.Listener.listener_filters>` to mark the connections' local addresses as\n  // \"restored.\" This can be used to hand off each redirected connection to another listener\n  // associated with the connection's destination address. Direct connections to the socket without\n  // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are\n  // therefore treated as if they were redirected.\n  // When this flag is set to false, the listener's socket is explicitly reset as non-transparent.\n  // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability.\n  // When this flag is not set (default), the socket is not modified, i.e. the transparent option\n  // is neither set nor reset.\n  google.protobuf.BoolValue transparent = 10;\n\n  // Whether the listener should set the *IP_FREEBIND* socket option. When this\n  // flag is set to true, listeners can be bound to an IP address that is not\n  // configured on the system running Envoy. When this flag is set to false, the\n  // option *IP_FREEBIND* is disabled on the socket. When this flag is not set\n  // (default), the socket is not modified, i.e. the option is neither enabled\n  // nor disabled.\n  google.protobuf.BoolValue freebind = 11;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v3.SocketOption socket_options = 13;\n\n  // Whether the listener should accept TCP Fast Open (TFO) connections.\n  // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on\n  // the socket, with a queue length of the specified size\n  // (see `details in RFC7413 <https://tools.ietf.org/html/rfc7413#section-5.1>`_).\n  // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket.\n  // When this flag is not set (default), the socket is not modified,\n  // i.e. the option is neither enabled nor disabled.\n  //\n  // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable\n  // TCP_FASTOPEN.\n  // See `ip-sysctl.txt <https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt>`_.\n  //\n  // On macOS, only values of 0, 1, and unset are valid; other values may result in an error.\n  // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter.\n  google.protobuf.UInt32Value tcp_fast_open_queue_length = 12;\n\n  // Specifies the intended direction of the traffic relative to the local Envoy.\n  core.v3.TrafficDirection traffic_direction = 16;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // listener to create, i.e. :ref:`udp_listener_name\n  // <envoy_api_field_config.listener.v3.UdpListenerConfig.udp_listener_name>` = \"raw_udp_listener\" for\n  // creating a packet-oriented UDP listener. If not present, treat it as \"raw_udp_listener\".\n  UdpListenerConfig udp_listener_config = 18;\n\n  // Used to represent an API listener, which is used in non-proxy clients. The type of API\n  // exposed to the non-proxy application depends on the type of API listener.\n  // When this field is set, no other field except for :ref:`name<envoy_api_field_config.listener.v3.Listener.name>`\n  // should be set.\n  //\n  // .. note::\n  //\n  //  Currently only one ApiListener can be installed; and it can only be done via bootstrap config,\n  //  not LDS.\n  //\n  // [#next-major-version: In the v3 API, instead of this messy approach where the socket\n  // listener fields are directly in the top-level Listener message and the API listener types\n  // are in the ApiListener message, the socket listener messages should be in their own message,\n  // and the top-level Listener should essentially be a oneof that selects between the\n  // socket listener and the various types of API listener. That way, a given Listener message\n  // can structurally only contain the fields of the relevant type.]\n  ApiListener api_listener = 19;\n\n  // The listener's connection balancer configuration, currently only applicable to TCP listeners.\n  // If no configuration is specified, Envoy will not attempt to balance active connections between\n  // worker threads.\n  ConnectionBalanceConfig connection_balance_config = 20;\n\n  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and\n  // create one socket for each worker thread. This makes inbound connections\n  // distribute among worker threads roughly evenly in cases where there are a high number\n  // of connections. When this flag is set to false, all worker threads share one socket.\n  //\n  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart\n  // (see `3rd paragraph in 'soreuseport' commit message\n  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).\n  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket\n  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.\n  bool reuse_port = 21;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by this listener.\n  repeated accesslog.v3.AccessLog access_log = 22;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // writer to create, i.e. :ref:`name <envoy_api_field_config.core.v3.TypedExtensionConfig.name>`\n  //    = \"udp_default_writer\" for creating a udp writer with writing in passthrough mode,\n  //    = \"udp_gso_batch_writer\" for creating a udp writer with writing in batch mode.\n  // If not present, treat it as \"udp_default_writer\".\n  // [#not-implemented-hide:]\n  core.v3.TypedExtensionConfig udp_writer_config = 23;\n\n  // The maximum length a tcp listener's pending connections queue can grow to. If no value is\n  // provided net.core.somaxconn will be used on Linux and 128 otherwise.\n  google.protobuf.UInt32Value tcp_backlog_size = 24;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v3/listener_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"ListenerComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listener components]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.listener.Filter\";\n\n  reserved 3, 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// Specifies the match criteria for selecting a specific filter chain for a\n// listener.\n//\n// In order for a filter chain to be selected, *ALL* of its criteria must be\n// fulfilled by the incoming connection, properties of which are set by the\n// networking stack and/or listener filters.\n//\n// The following order applies:\n//\n// 1. Destination port.\n// 2. Destination IP address.\n// 3. Server name (e.g. SNI for TLS protocol),\n// 4. Transport protocol.\n// 5. Application protocols (e.g. ALPN for TLS protocol).\n// 6. Source type (e.g. any, local or external network).\n// 7. Source IP address.\n// 8. Source port.\n//\n// For criteria that allow ranges or wildcards, the most specific value in any\n// of the configured filter chains that matches the incoming connection is going\n// to be used (e.g. for SNI ``www.example.com`` the most specific match would be\n// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter\n// chain without ``server_names`` requirements).\n//\n// [#comment: Implemented rules are kept in the preference order, with deprecated fields\n// listed at the end, because that's how we want to list them in the docs.\n//\n// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]\n// [#next-free-field: 13]\nmessage FilterChainMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.FilterChainMatch\";\n\n  enum ConnectionSourceType {\n    // Any connection source matches.\n    ANY = 0;\n\n    // Match a connection originating from the same host.\n    SAME_IP_OR_LOOPBACK = 1;\n\n    // Match a connection originating from a different host.\n    EXTERNAL = 2;\n  }\n\n  reserved 1;\n\n  // Optional destination port to consider when use_original_dst is set on the\n  // listener in determining a filter chain match.\n  google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}];\n\n  // If non-empty, an IP address and prefix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  repeated core.v3.CidrRange prefix_ranges = 3;\n\n  // If non-empty, an IP address and suffix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  // [#not-implemented-hide:]\n  string address_suffix = 4;\n\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value suffix_len = 5;\n\n  // Specifies the connection source IP match type. Can be any, local or external network.\n  ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}];\n\n  // The criteria is satisfied if the source IP address of the downstream\n  // connection is contained in at least one of the specified subnets. If the\n  // parameter is not specified or the list is empty, the source IP address is\n  // ignored.\n  repeated core.v3.CidrRange source_prefix_ranges = 6;\n\n  // The criteria is satisfied if the source port of the downstream connection\n  // is contained in at least one of the specified ports. If the parameter is\n  // not specified, the source port is ignored.\n  repeated uint32 source_ports = 7\n      [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}];\n\n  // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining\n  // a filter chain match. Those values will be compared against the server names of a new\n  // connection, when detected by one of the listener filters.\n  //\n  // The server name will be matched against all wildcard domains, i.e. ``www.example.com``\n  // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.\n  //\n  // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.\n  //\n  // .. attention::\n  //\n  //   See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more\n  //   information.\n  repeated string server_names = 11;\n\n  // If non-empty, a transport protocol to consider when determining a filter chain match.\n  // This value will be compared against the transport protocol of a new connection, when\n  // it's detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``raw_buffer`` - default, used when no transport protocol is detected,\n  // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //   when TLS protocol is detected.\n  string transport_protocol = 9;\n\n  // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when\n  // determining a filter chain match. Those values will be compared against the application\n  // protocols of a new connection, when detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector\n  //   <config_listener_filters_tls_inspector>`,\n  // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //\n  // .. attention::\n  //\n  //   Currently, only :ref:`TLS Inspector <config_listener_filters_tls_inspector>` provides\n  //   application protocol detection based on the requested\n  //   `ALPN <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ values.\n  //\n  //   However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet,\n  //   and matching on values other than ``h2`` is going to lead to a lot of false negatives,\n  //   unless all connecting clients are known to use ALPN.\n  repeated string application_protocols = 10;\n}\n\n// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and\n// various other parameters.\n// [#next-free-field: 9]\nmessage FilterChain {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.listener.FilterChain\";\n\n  // The configuration for on-demand filter chain. If this field is not empty in FilterChain message,\n  // a filter chain will be built on-demand.\n  // On-demand filter chains help speedup the warming up of listeners since the building and initialization of\n  // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain.\n  // Filter chains that are not often used can be set as on-demand.\n  message OnDemandConfiguration {\n    // The timeout to wait for filter chain placeholders to complete rebuilding.\n    // 1. If this field is set to 0, timeout is disabled.\n    // 2. If not specified, a default timeout of 15s is used.\n    // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached.\n    // Upon failure or timeout, all connections related to this filter chain will be closed.\n    // Rebuilding will start again on the next new connection.\n    google.protobuf.Duration rebuild_timeout = 1;\n  }\n\n  reserved 2;\n\n  reserved \"tls_context\";\n\n  // The criteria to use when matching a connection to this filter chain.\n  FilterChainMatch filter_chain_match = 1;\n\n  // A list of individual network filters that make up the filter chain for\n  // connections established with the listener. Order matters as the filters are\n  // processed sequentially as connection events happen. Note: If the filter\n  // list is empty, the connection will close by default.\n  repeated Filter filters = 3;\n\n  // Whether the listener should expect a PROXY protocol V1 header on new\n  // connections. If this option is enabled, the listener will assume that that\n  // remote address of the connection is the one specified in the header. Some\n  // load balancers including the AWS ELB support this option. If the option is\n  // absent or set to false, Envoy will use the physical peer address of the\n  // connection as the remote address.\n  google.protobuf.BoolValue use_proxy_proto = 4;\n\n  // [#not-implemented-hide:] filter chain metadata.\n  core.v3.Metadata metadata = 5;\n\n  // Optional custom transport socket implementation to use for downstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`DownstreamTlsContext <envoy_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v3.TransportSocket transport_socket = 6;\n\n  // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no\n  // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter\n  // chain is to be dynamically updated or removed via FCDS a unique name must be provided.\n  string name = 7;\n\n  // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand.\n  // If this field is not empty, the filter chain will be built on-demand.\n  // Otherwise, the filter chain will be built normally and block listener warming.\n  OnDemandConfiguration on_demand_configuration = 8;\n}\n\n// Listener filter chain match configuration. This is a recursive structure which allows complex\n// nested match configurations to be built using various logical operators.\n//\n// Examples:\n//\n// * Matches if the destination port is 3306.\n//\n// .. code-block:: yaml\n//\n//  destination_port_range:\n//   start: 3306\n//   end: 3307\n//\n// * Matches if the destination port is 3306 or 15000.\n//\n// .. code-block:: yaml\n//\n//  or_match:\n//    rules:\n//      - destination_port_range:\n//          start: 3306\n//          end: 3306\n//      - destination_port_range:\n//          start: 15000\n//          end: 15001\n//\n// [#next-free-field: 6]\nmessage ListenerFilterChainMatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.ListenerFilterChainMatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.listener.ListenerFilterChainMatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated ListenerFilterChainMatchPredicate rules = 1\n        [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    ListenerFilterChainMatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // Match destination port. Particularly, the match evaluation must use the recovered local port if\n    // the owning listener filter is after :ref:`an original_dst listener filter <config_listener_filters_original_dst>`.\n    type.v3.Int32Range destination_port_range = 5;\n  }\n}\n\nmessage ListenerFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.ListenerFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_listener_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated.\n  // See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Optional match predicate used to disable the filter. The filter is enabled when this field is empty.\n  // See :ref:`ListenerFilterChainMatchPredicate <envoy_api_msg_config.listener.v3.ListenerFilterChainMatchPredicate>`\n  // for further examples.\n  ListenerFilterChainMatchPredicate filter_disabled = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v3/quic_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"QuicConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: QUIC listener Config]\n\n// Configuration specific to the QUIC protocol.\n// Next id: 5\nmessage QuicProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.QuicProtocolOptions\";\n\n  // Maximum number of streams that the client can negotiate per connection. 100\n  // if not specified.\n  google.protobuf.UInt32Value max_concurrent_streams = 1;\n\n  // Maximum number of milliseconds that connection will be alive when there is\n  // no network activity. 300000ms if not specified.\n  google.protobuf.Duration idle_timeout = 2;\n\n  // Connection timeout in milliseconds before the crypto handshake is finished.\n  // 20000ms if not specified.\n  google.protobuf.Duration crypto_handshake_timeout = 3;\n\n  // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults\n  // to enabled.\n  core.v3.RuntimeFeatureFlag enabled = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v3/udp_default_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"UdpDefaultWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Udp Default Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Default Writer.\nmessage UdpDefaultWriterOptions {\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"UdpGsoBatchWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Udp Gso Batch Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Gso Batch Writer.\nmessage UdpGsoBatchWriterOptions {\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v3/udp_listener_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"UdpListenerConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: UDP Listener Config]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage UdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.UdpListenerConfig\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // Used to look up UDP listener factory, matches \"raw_udp_listener\" or\n  // \"quic_listener\" to create a specific udp listener.\n  // If not specified, treat as \"raw_udp_listener\".\n  string udp_listener_name = 1;\n\n  // Used to create a specific listener factory. To some factory, e.g.\n  // \"raw_udp_listener\", config is not needed.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ActiveRawUdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.ActiveRawUdpListenerConfig\";\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/listener/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/api_listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"ApiListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: API listener]\n\n// Describes a type of API listener, which is used in non-proxy clients. The type of API\n// exposed to the non-proxy application depends on the type of API listener.\nmessage ApiListener {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ApiListener\";\n\n  // The type in this field determines the type of API listener. At present, the following\n  // types are supported:\n  // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP)\n  // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the\n  // specific config message for each type of API listener. We could not do this in v2 because\n  // it would have caused circular dependencies for go protos: lds.proto depends on this file,\n  // and http_connection_manager.proto depends on rds.proto, which is in the same directory as\n  // lds.proto, so lds.proto cannot depend on this file.]\n  google.protobuf.Any api_listener = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/socket_option.proto\";\nimport \"envoy/config/listener/v4alpha/api_listener.proto\";\nimport \"envoy/config/listener/v4alpha/listener_components.proto\";\nimport \"envoy/config/listener/v4alpha/udp_listener_config.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\n\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Listener configuration]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// Listener list collections. Entries are *Listener* resources or references.\n// [#not-implemented-hide:]\nmessage ListenerCollection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ListenerCollection\";\n\n  repeated udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// [#next-free-field: 25]\nmessage Listener {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.listener.v3.Listener\";\n\n  enum DrainType {\n    // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check\n    // filter), listener removal/modification, and hot restart.\n    DEFAULT = 0;\n\n    // Drain in response to listener removal/modification and hot restart. This setting does not\n    // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress\n    // and egress listeners.\n    MODIFY_ONLY = 1;\n  }\n\n  // [#not-implemented-hide:]\n  message DeprecatedV1 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.Listener.DeprecatedV1\";\n\n    // Whether the listener should bind to the port. A listener that doesn't\n    // bind can only receive connections redirected from other listeners that\n    // set use_original_dst parameter to true. Default is true.\n    //\n    // This is deprecated in v2, all Listeners will bind to their port. An\n    // additional filter chain must be created for every original destination\n    // port this listener may redirect to in v2, with the original port\n    // specified in the FilterChainMatch destination_port field.\n    //\n    // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.]\n    google.protobuf.BoolValue bind_to_port = 1;\n  }\n\n  // Configuration for listener connection balancing.\n  message ConnectionBalanceConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.Listener.ConnectionBalanceConfig\";\n\n    // A connection balancer implementation that does exact balancing. This means that a lock is\n    // held during balancing so that connection counts are nearly exactly balanced between worker\n    // threads. This is \"nearly\" exact in the sense that a connection might close in parallel thus\n    // making the counts incorrect, but this should be rectified on the next accept. This balancer\n    // sacrifices accept throughput for accuracy and should be used when there are a small number of\n    // connections that rarely cycle (e.g., service mesh gRPC egress).\n    message ExactBalance {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance\";\n    }\n\n    oneof balance_type {\n      option (validate.required) = true;\n\n      // If specified, the listener will use the exact connection balancer.\n      ExactBalance exact_balance = 1;\n    }\n  }\n\n  reserved 14, 4;\n\n  reserved \"use_original_dst\";\n\n  // The unique name by which this listener is known. If no name is provided,\n  // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically\n  // updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.\n  string name = 1;\n\n  // The address that the listener should listen on. In general, the address must be unique, though\n  // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on\n  // Linux as the actual port will be allocated by the OS.\n  core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}];\n\n  // A list of filter chains to consider for this listener. The\n  // :ref:`FilterChain <envoy_api_msg_config.listener.v4alpha.FilterChain>` with the most specific\n  // :ref:`FilterChainMatch <envoy_api_msg_config.listener.v4alpha.FilterChainMatch>` criteria is used on a\n  // connection.\n  //\n  // Example using SNI for filter chain selection can be found in the\n  // :ref:`FAQ entry <faq_how_to_setup_sni>`.\n  repeated FilterChain filter_chains = 3;\n\n  // Soft limit on size of the listener’s new connection read and write buffers.\n  // If unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Listener metadata.\n  core.v4alpha.Metadata metadata = 6;\n\n  // [#not-implemented-hide:]\n  DeprecatedV1 deprecated_v1 = 7;\n\n  // The type of draining to perform at a listener-wide level.\n  DrainType drain_type = 8;\n\n  // Listener filters have the opportunity to manipulate and augment the connection metadata that\n  // is used in connection filter chain matching, for example. These filters are run before any in\n  // :ref:`filter_chains <envoy_api_field_config.listener.v4alpha.Listener.filter_chains>`. Order matters as the\n  // filters are processed sequentially right after a socket has been accepted by the listener, and\n  // before a connection is created.\n  // UDP Listener filters can be specified when the protocol in the listener socket address in\n  // :ref:`protocol <envoy_api_field_config.core.v4alpha.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v4alpha.SocketAddress.Protocol.UDP>`.\n  // UDP listeners currently support a single filter.\n  repeated ListenerFilter listener_filters = 9;\n\n  // The timeout to wait for all listener filters to complete operation. If the timeout is reached,\n  // the accepted socket is closed without a connection being created unless\n  // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the\n  // timeout. If not specified, a default timeout of 15s is used.\n  google.protobuf.Duration listener_filters_timeout = 15;\n\n  // Whether a connection should be created when listener filters timeout. Default is false.\n  //\n  // .. attention::\n  //\n  //   Some listener filters, such as :ref:`Proxy Protocol filter\n  //   <config_listener_filters_proxy_protocol>`, should not be used with this option. It will cause\n  //   unexpected behavior when a connection is created.\n  bool continue_on_listener_filters_timeout = 17;\n\n  // Whether the listener should be set as a transparent socket.\n  // When this flag is set to true, connections can be redirected to the listener using an\n  // *iptables* *TPROXY* target, in which case the original source and destination addresses and\n  // ports are preserved on accepted connections. This flag should be used in combination with\n  // :ref:`an original_dst <config_listener_filters_original_dst>` :ref:`listener filter\n  // <envoy_api_field_config.listener.v4alpha.Listener.listener_filters>` to mark the connections' local addresses as\n  // \"restored.\" This can be used to hand off each redirected connection to another listener\n  // associated with the connection's destination address. Direct connections to the socket without\n  // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are\n  // therefore treated as if they were redirected.\n  // When this flag is set to false, the listener's socket is explicitly reset as non-transparent.\n  // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability.\n  // When this flag is not set (default), the socket is not modified, i.e. the transparent option\n  // is neither set nor reset.\n  google.protobuf.BoolValue transparent = 10;\n\n  // Whether the listener should set the *IP_FREEBIND* socket option. When this\n  // flag is set to true, listeners can be bound to an IP address that is not\n  // configured on the system running Envoy. When this flag is set to false, the\n  // option *IP_FREEBIND* is disabled on the socket. When this flag is not set\n  // (default), the socket is not modified, i.e. the option is neither enabled\n  // nor disabled.\n  google.protobuf.BoolValue freebind = 11;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v4alpha.SocketOption socket_options = 13;\n\n  // Whether the listener should accept TCP Fast Open (TFO) connections.\n  // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on\n  // the socket, with a queue length of the specified size\n  // (see `details in RFC7413 <https://tools.ietf.org/html/rfc7413#section-5.1>`_).\n  // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket.\n  // When this flag is not set (default), the socket is not modified,\n  // i.e. the option is neither enabled nor disabled.\n  //\n  // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable\n  // TCP_FASTOPEN.\n  // See `ip-sysctl.txt <https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt>`_.\n  //\n  // On macOS, only values of 0, 1, and unset are valid; other values may result in an error.\n  // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter.\n  google.protobuf.UInt32Value tcp_fast_open_queue_length = 12;\n\n  // Specifies the intended direction of the traffic relative to the local Envoy.\n  core.v4alpha.TrafficDirection traffic_direction = 16;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v4alpha.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v4alpha.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // listener to create, i.e. :ref:`udp_listener_name\n  // <envoy_api_field_config.listener.v4alpha.UdpListenerConfig.udp_listener_name>` = \"raw_udp_listener\" for\n  // creating a packet-oriented UDP listener. If not present, treat it as \"raw_udp_listener\".\n  UdpListenerConfig udp_listener_config = 18;\n\n  // Used to represent an API listener, which is used in non-proxy clients. The type of API\n  // exposed to the non-proxy application depends on the type of API listener.\n  // When this field is set, no other field except for :ref:`name<envoy_api_field_config.listener.v4alpha.Listener.name>`\n  // should be set.\n  //\n  // .. note::\n  //\n  //  Currently only one ApiListener can be installed; and it can only be done via bootstrap config,\n  //  not LDS.\n  //\n  // [#next-major-version: In the v3 API, instead of this messy approach where the socket\n  // listener fields are directly in the top-level Listener message and the API listener types\n  // are in the ApiListener message, the socket listener messages should be in their own message,\n  // and the top-level Listener should essentially be a oneof that selects between the\n  // socket listener and the various types of API listener. That way, a given Listener message\n  // can structurally only contain the fields of the relevant type.]\n  ApiListener api_listener = 19;\n\n  // The listener's connection balancer configuration, currently only applicable to TCP listeners.\n  // If no configuration is specified, Envoy will not attempt to balance active connections between\n  // worker threads.\n  ConnectionBalanceConfig connection_balance_config = 20;\n\n  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and\n  // create one socket for each worker thread. This makes inbound connections\n  // distribute among worker threads roughly evenly in cases where there are a high number\n  // of connections. When this flag is set to false, all worker threads share one socket.\n  //\n  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart\n  // (see `3rd paragraph in 'soreuseport' commit message\n  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).\n  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket\n  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.\n  bool reuse_port = 21;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by this listener.\n  repeated accesslog.v4alpha.AccessLog access_log = 22;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v4alpha.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v4alpha.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // writer to create, i.e. :ref:`name <envoy_api_field_config.core.v4alpha.TypedExtensionConfig.name>`\n  //    = \"udp_default_writer\" for creating a udp writer with writing in passthrough mode,\n  //    = \"udp_gso_batch_writer\" for creating a udp writer with writing in batch mode.\n  // If not present, treat it as \"udp_default_writer\".\n  // [#not-implemented-hide:]\n  core.v4alpha.TypedExtensionConfig udp_writer_config = 23;\n\n  // The maximum length a tcp listener's pending connections queue can grow to. If no value is\n  // provided net.core.somaxconn will be used on Linux and 128 otherwise.\n  google.protobuf.UInt32Value tcp_backlog_size = 24;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/listener_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"ListenerComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Listener components]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.listener.v3.Filter\";\n\n  reserved 3, 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// Specifies the match criteria for selecting a specific filter chain for a\n// listener.\n//\n// In order for a filter chain to be selected, *ALL* of its criteria must be\n// fulfilled by the incoming connection, properties of which are set by the\n// networking stack and/or listener filters.\n//\n// The following order applies:\n//\n// 1. Destination port.\n// 2. Destination IP address.\n// 3. Server name (e.g. SNI for TLS protocol),\n// 4. Transport protocol.\n// 5. Application protocols (e.g. ALPN for TLS protocol).\n// 6. Source type (e.g. any, local or external network).\n// 7. Source IP address.\n// 8. Source port.\n//\n// For criteria that allow ranges or wildcards, the most specific value in any\n// of the configured filter chains that matches the incoming connection is going\n// to be used (e.g. for SNI ``www.example.com`` the most specific match would be\n// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter\n// chain without ``server_names`` requirements).\n//\n// [#comment: Implemented rules are kept in the preference order, with deprecated fields\n// listed at the end, because that's how we want to list them in the docs.\n//\n// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]\n// [#next-free-field: 13]\nmessage FilterChainMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.FilterChainMatch\";\n\n  enum ConnectionSourceType {\n    // Any connection source matches.\n    ANY = 0;\n\n    // Match a connection originating from the same host.\n    SAME_IP_OR_LOOPBACK = 1;\n\n    // Match a connection originating from a different host.\n    EXTERNAL = 2;\n  }\n\n  reserved 1;\n\n  // Optional destination port to consider when use_original_dst is set on the\n  // listener in determining a filter chain match.\n  google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}];\n\n  // If non-empty, an IP address and prefix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  repeated core.v4alpha.CidrRange prefix_ranges = 3;\n\n  // If non-empty, an IP address and suffix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  // [#not-implemented-hide:]\n  string address_suffix = 4;\n\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value suffix_len = 5;\n\n  // Specifies the connection source IP match type. Can be any, local or external network.\n  ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}];\n\n  // The criteria is satisfied if the source IP address of the downstream\n  // connection is contained in at least one of the specified subnets. If the\n  // parameter is not specified or the list is empty, the source IP address is\n  // ignored.\n  repeated core.v4alpha.CidrRange source_prefix_ranges = 6;\n\n  // The criteria is satisfied if the source port of the downstream connection\n  // is contained in at least one of the specified ports. If the parameter is\n  // not specified, the source port is ignored.\n  repeated uint32 source_ports = 7\n      [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}];\n\n  // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining\n  // a filter chain match. Those values will be compared against the server names of a new\n  // connection, when detected by one of the listener filters.\n  //\n  // The server name will be matched against all wildcard domains, i.e. ``www.example.com``\n  // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.\n  //\n  // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.\n  //\n  // .. attention::\n  //\n  //   See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more\n  //   information.\n  repeated string server_names = 11;\n\n  // If non-empty, a transport protocol to consider when determining a filter chain match.\n  // This value will be compared against the transport protocol of a new connection, when\n  // it's detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``raw_buffer`` - default, used when no transport protocol is detected,\n  // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //   when TLS protocol is detected.\n  string transport_protocol = 9;\n\n  // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when\n  // determining a filter chain match. Those values will be compared against the application\n  // protocols of a new connection, when detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector\n  //   <config_listener_filters_tls_inspector>`,\n  // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //\n  // .. attention::\n  //\n  //   Currently, only :ref:`TLS Inspector <config_listener_filters_tls_inspector>` provides\n  //   application protocol detection based on the requested\n  //   `ALPN <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ values.\n  //\n  //   However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet,\n  //   and matching on values other than ``h2`` is going to lead to a lot of false negatives,\n  //   unless all connecting clients are known to use ALPN.\n  repeated string application_protocols = 10;\n}\n\n// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and\n// various other parameters.\n// [#next-free-field: 9]\nmessage FilterChain {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.FilterChain\";\n\n  // The configuration for on-demand filter chain. If this field is not empty in FilterChain message,\n  // a filter chain will be built on-demand.\n  // On-demand filter chains help speedup the warming up of listeners since the building and initialization of\n  // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain.\n  // Filter chains that are not often used can be set as on-demand.\n  message OnDemandConfiguration {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.FilterChain.OnDemandConfiguration\";\n\n    // The timeout to wait for filter chain placeholders to complete rebuilding.\n    // 1. If this field is set to 0, timeout is disabled.\n    // 2. If not specified, a default timeout of 15s is used.\n    // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached.\n    // Upon failure or timeout, all connections related to this filter chain will be closed.\n    // Rebuilding will start again on the next new connection.\n    google.protobuf.Duration rebuild_timeout = 1;\n  }\n\n  reserved 2;\n\n  reserved \"tls_context\";\n\n  // The criteria to use when matching a connection to this filter chain.\n  FilterChainMatch filter_chain_match = 1;\n\n  // A list of individual network filters that make up the filter chain for\n  // connections established with the listener. Order matters as the filters are\n  // processed sequentially as connection events happen. Note: If the filter\n  // list is empty, the connection will close by default.\n  repeated Filter filters = 3;\n\n  // Whether the listener should expect a PROXY protocol V1 header on new\n  // connections. If this option is enabled, the listener will assume that that\n  // remote address of the connection is the one specified in the header. Some\n  // load balancers including the AWS ELB support this option. If the option is\n  // absent or set to false, Envoy will use the physical peer address of the\n  // connection as the remote address.\n  google.protobuf.BoolValue use_proxy_proto = 4;\n\n  // [#not-implemented-hide:] filter chain metadata.\n  core.v4alpha.Metadata metadata = 5;\n\n  // Optional custom transport socket implementation to use for downstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`DownstreamTlsContext <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v4alpha.TransportSocket transport_socket = 6;\n\n  // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no\n  // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter\n  // chain is to be dynamically updated or removed via FCDS a unique name must be provided.\n  string name = 7;\n\n  // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand.\n  // If this field is not empty, the filter chain will be built on-demand.\n  // Otherwise, the filter chain will be built normally and block listener warming.\n  OnDemandConfiguration on_demand_configuration = 8;\n}\n\n// Listener filter chain match configuration. This is a recursive structure which allows complex\n// nested match configurations to be built using various logical operators.\n//\n// Examples:\n//\n// * Matches if the destination port is 3306.\n//\n// .. code-block:: yaml\n//\n//  destination_port_range:\n//   start: 3306\n//   end: 3307\n//\n// * Matches if the destination port is 3306 or 15000.\n//\n// .. code-block:: yaml\n//\n//  or_match:\n//    rules:\n//      - destination_port_range:\n//          start: 3306\n//          end: 3306\n//      - destination_port_range:\n//          start: 15000\n//          end: 15001\n//\n// [#next-free-field: 6]\nmessage ListenerFilterChainMatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ListenerFilterChainMatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated ListenerFilterChainMatchPredicate rules = 1\n        [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    ListenerFilterChainMatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // Match destination port. Particularly, the match evaluation must use the recovered local port if\n    // the owning listener filter is after :ref:`an original_dst listener filter <config_listener_filters_original_dst>`.\n    type.v3.Int32Range destination_port_range = 5;\n  }\n}\n\nmessage ListenerFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ListenerFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_listener_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated.\n  // See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Optional match predicate used to disable the filter. The filter is enabled when this field is empty.\n  // See :ref:`ListenerFilterChainMatchPredicate <envoy_api_msg_config.listener.v4alpha.ListenerFilterChainMatchPredicate>`\n  // for further examples.\n  ListenerFilterChainMatchPredicate filter_disabled = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/quic_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"QuicConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: QUIC listener Config]\n\n// Configuration specific to the QUIC protocol.\n// Next id: 5\nmessage QuicProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.QuicProtocolOptions\";\n\n  // Maximum number of streams that the client can negotiate per connection. 100\n  // if not specified.\n  google.protobuf.UInt32Value max_concurrent_streams = 1;\n\n  // Maximum number of milliseconds that connection will be alive when there is\n  // no network activity. 300000ms if not specified.\n  google.protobuf.Duration idle_timeout = 2;\n\n  // Connection timeout in milliseconds before the crypto handshake is finished.\n  // 20000ms if not specified.\n  google.protobuf.Duration crypto_handshake_timeout = 3;\n\n  // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults\n  // to enabled.\n  core.v4alpha.RuntimeFeatureFlag enabled = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/udp_default_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"UdpDefaultWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Udp Default Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Default Writer.\nmessage UdpDefaultWriterOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.UdpDefaultWriterOptions\";\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"UdpGsoBatchWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Udp Gso Batch Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Gso Batch Writer.\nmessage UdpGsoBatchWriterOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.UdpGsoBatchWriterOptions\";\n}\n"
  },
  {
    "path": "api/envoy/config/listener/v4alpha/udp_listener_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"UdpListenerConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: UDP Listener Config]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage UdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.UdpListenerConfig\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // Used to look up UDP listener factory, matches \"raw_udp_listener\" or\n  // \"quic_listener\" to create a specific udp listener.\n  // If not specified, treat as \"raw_udp_listener\".\n  string udp_listener_name = 1;\n\n  // Used to create a specific listener factory. To some factory, e.g.\n  // \"raw_udp_listener\", config is not needed.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ActiveRawUdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ActiveRawUdpListenerConfig\";\n}\n"
  },
  {
    "path": "api/envoy/config/metrics/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/metrics/v2/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v2\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metrics service]\n\n// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink\n// <envoy_api_msg_config.metrics.v2.StatsSink>`. This opaque configuration will be used to create\n// Metrics Service.\n// [#extension: envoy.stat_sinks.metrics_service]\nmessage MetricsServiceConfig {\n  // The upstream gRPC cluster that hosts the metrics service.\n  api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/metrics/v2/stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v2\";\noption java_outer_classname = \"StatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Stats]\n// Statistics :ref:`architecture overview <arch_overview_statistics>`.\n\n// Configuration for pluggable stats sinks.\nmessage StatsSink {\n  // The name of the stats sink to instantiate. The name must match a supported\n  // stats sink. The built-in stats sinks are:\n  //\n  // * :ref:`envoy.stat_sinks.statsd <envoy_api_msg_config.metrics.v2.StatsdSink>`\n  // * :ref:`envoy.stat_sinks.dog_statsd <envoy_api_msg_config.metrics.v2.DogStatsdSink>`\n  // * :ref:`envoy.stat_sinks.metrics_service <envoy_api_msg_config.metrics.v2.MetricsServiceConfig>`\n  // * :ref:`envoy.stat_sinks.hystrix <envoy_api_msg_config.metrics.v2.HystrixSink>`\n  //\n  // Sinks optionally support tagged/multiple dimensional metrics.\n  string name = 1;\n\n  // Stats sink specific configuration which depends on the sink being instantiated. See\n  // :ref:`StatsdSink <envoy_api_msg_config.metrics.v2.StatsdSink>` for an example.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Statistics configuration such as tagging.\nmessage StatsConfig {\n  // Each stat name is iteratively processed through these tag specifiers.\n  // When a tag is matched, the first capture group is removed from the name so\n  // later :ref:`TagSpecifiers <envoy_api_msg_config.metrics.v2.TagSpecifier>` cannot match that\n  // same portion of the match.\n  repeated TagSpecifier stats_tags = 1;\n\n  // Use all default tag regexes specified in Envoy. These can be combined with\n  // custom tags specified in :ref:`stats_tags\n  // <envoy_api_field_config.metrics.v2.StatsConfig.stats_tags>`. They will be processed before\n  // the custom tags.\n  //\n  // .. note::\n  //\n  //   If any default tags are specified twice, the config will be considered\n  //   invalid.\n  //\n  // See :repo:`well_known_names.h <source/common/config/well_known_names.h>` for a list of the\n  // default tags in Envoy.\n  //\n  // If not provided, the value is assumed to be true.\n  google.protobuf.BoolValue use_all_default_tags = 2;\n\n  // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated\n  // as normal. Preventing the instantiation of certain families of stats can improve memory\n  // performance for Envoys running especially large configs.\n  //\n  // .. warning::\n  //   Excluding stats may affect Envoy's behavior in undocumented ways. See\n  //   `issue #8771 <https://github.com/envoyproxy/envoy/issues/8771>`_ for more information.\n  //   If any unexpected behavior changes are observed, please open a new issue immediately.\n  StatsMatcher stats_matcher = 3;\n}\n\n// Configuration for disabling stat instantiation.\nmessage StatsMatcher {\n  // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to\n  // instantiate all stats, there is no need to construct a StatsMatcher.\n  //\n  // However, StatsMatcher can be used to limit the creation of families of stats in order to\n  // conserve memory. Stats can either be disabled entirely, or they can be\n  // limited by either an exclusion or an inclusion list of :ref:`StringMatcher\n  // <envoy_api_msg_type.matcher.StringMatcher>` protos:\n  //\n  // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to\n  //   `false`, all stats will be instantiated.\n  //\n  // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the\n  //   list will not instantiate.\n  //\n  // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of\n  //   the StringMatchers in the list.\n  //\n  //\n  // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex.\n  // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based\n  // matcher rather than a regex-based matcher.\n  //\n  // Example 1. Excluding all stats.\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"rejectAll\": \"true\"\n  //     }\n  //   }\n  //\n  // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"exclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n  // Example 3. Including only manager-related stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"inclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster_manager.\"\n  //           },\n  //           {\n  //             \"prefix\": \"listener_manager.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n\n  oneof stats_matcher {\n    option (validate.required) = true;\n\n    // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all\n    // stats are enabled.\n    bool reject_all = 1;\n\n    // Exclusive match. All stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.ListStringMatcher exclusion_list = 2;\n\n    // Inclusive match. No stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.ListStringMatcher inclusion_list = 3;\n  }\n}\n\n// Designates a tag name and value pair. The value may be either a fixed value\n// or a regex providing the value via capture groups. The specified tag will be\n// unconditionally set if a fixed value, otherwise it will only be set if one\n// or more capture groups in the regex match.\nmessage TagSpecifier {\n  // Attaches an identifier to the tag values to identify the tag being in the\n  // sink. Envoy has a set of default names and regexes to extract dynamic\n  // portions of existing stats, which can be found in :repo:`well_known_names.h\n  // <source/common/config/well_known_names.h>` in the Envoy repository. If a :ref:`tag_name\n  // <envoy_api_field_config.metrics.v2.TagSpecifier.tag_name>` is provided in the config and\n  // neither :ref:`regex <envoy_api_field_config.metrics.v2.TagSpecifier.regex>` or\n  // :ref:`fixed_value <envoy_api_field_config.metrics.v2.TagSpecifier.fixed_value>` were specified,\n  // Envoy will attempt to find that name in its set of defaults and use the accompanying regex.\n  //\n  // .. note::\n  //\n  //   It is invalid to specify the same tag name twice in a config.\n  string tag_name = 1;\n\n  oneof tag_value {\n    // Designates a tag to strip from the tag extracted name and provide as a named\n    // tag value for all statistics. This will only occur if any part of the name\n    // matches the regex provided with one or more capture groups.\n    //\n    // The first capture group identifies the portion of the name to remove. The\n    // second capture group (which will normally be nested inside the first) will\n    // designate the value of the tag for the statistic. If no second capture\n    // group is provided, the first will also be used to set the value of the tag.\n    // All other capture groups will be ignored.\n    //\n    // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and\n    // one tag specifier:\n    //\n    // .. code-block:: json\n    //\n    //   {\n    //     \"tag_name\": \"envoy.cluster_name\",\n    //     \"regex\": \"^cluster\\\\.((.+?)\\\\.)\"\n    //   }\n    //\n    // Note that the regex will remove ``foo_cluster.`` making the tag extracted\n    // name ``cluster.upstream_rq_timeout`` and the tag value for\n    // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no\n    // ``.`` character because of the second capture group).\n    //\n    // Example 2. a stat name\n    // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two\n    // tag specifiers:\n    //\n    // .. code-block:: json\n    //\n    //   [\n    //     {\n    //       \"tag_name\": \"envoy.http_user_agent\",\n    //       \"regex\": \"^http(?=\\\\.).*?\\\\.user_agent\\\\.((.+?)\\\\.)\\\\w+?$\"\n    //     },\n    //     {\n    //       \"tag_name\": \"envoy.http_conn_manager_prefix\",\n    //       \"regex\": \"^http\\\\.((.*?)\\\\.)\"\n    //     }\n    //   ]\n    //\n    // The two regexes of the specifiers will be processed in the definition order.\n    //\n    // The first regex will remove ``ios.``, leaving the tag extracted name\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag\n    // ``envoy.http_user_agent`` will be added with tag value ``ios``.\n    //\n    // The second regex will remove ``connection_manager_1.`` from the tag\n    // extracted name produced by the first regex\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving\n    // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag\n    // ``envoy.http_conn_manager_prefix`` will be added with the tag value\n    // ``connection_manager_1``.\n    string regex = 2 [(validate.rules).string = {max_bytes: 1024}];\n\n    // Specifies a fixed tag value for the ``tag_name``.\n    string fixed_value = 3;\n  }\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support\n// tagged metrics.\n// [#extension: envoy.stat_sinks.statsd]\nmessage StatsdSink {\n  oneof statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running `statsd <https://github.com/etsy/statsd>`_\n    // compliant listener. If specified, statistics will be flushed to this\n    // address.\n    api.v2.core.Address address = 1;\n\n    // The name of a cluster that is running a TCP `statsd\n    // <https://github.com/etsy/statsd>`_ compliant listener. If specified,\n    // Envoy will connect to this cluster to flush statistics.\n    string tcp_cluster_name = 2;\n  }\n\n  // Optional custom prefix for StatsdSink. If\n  // specified, this will override the default prefix.\n  // For example:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"prefix\" : \"envoy-prod\"\n  //   }\n  //\n  // will change emitted stats to\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy-prod.test_counter:1|c\n  //   envoy-prod.test_timer:5|ms\n  //\n  // Note that the default prefix, \"envoy\", will be used if a prefix is not\n  // specified.\n  //\n  // Stats with default prefix:\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy.test_counter:1|c\n  //   envoy.test_timer:5|ms\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink.\n// The sink emits stats with `DogStatsD <https://docs.datadoghq.com/guides/dogstatsd/>`_\n// compatible tags. Tags are configurable via :ref:`StatsConfig\n// <envoy_api_msg_config.metrics.v2.StatsConfig>`.\n// [#extension: envoy.stat_sinks.dog_statsd]\nmessage DogStatsdSink {\n  reserved 2;\n\n  oneof dog_statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running DogStatsD compliant listener. If specified,\n    // statistics will be flushed to this address.\n    api.v2.core.Address address = 1;\n  }\n\n  // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field\n  // <envoy_api_field_config.metrics.v2.StatsdSink.prefix>` for more details.\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink.\n// The sink emits stats in `text/event-stream\n// <https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events>`_\n// formatted stream for use by `Hystrix dashboard\n// <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n//\n// Note that only a single HystrixSink should be configured.\n//\n// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.\n// [#extension: envoy.stat_sinks.hystrix]\nmessage HystrixSink {\n  // The number of buckets the rolling statistical window is divided into.\n  //\n  // Each time the sink is flushed, all relevant Envoy statistics are sampled and\n  // added to the rolling window (removing the oldest samples in the window\n  // in the process). The sink then outputs the aggregate statistics across the\n  // current rolling window to the event stream(s).\n  //\n  // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets\n  //\n  // More detailed explanation can be found in `Hystrix wiki\n  // <https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#hystrixrollingnumber>`_.\n  int64 num_buckets = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/metrics/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/metrics/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/metrics/v3/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v3\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metrics service]\n\n// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink\n// <envoy_api_msg_config.metrics.v3.StatsSink>`. This opaque configuration will be used to create\n// Metrics Service.\n// [#extension: envoy.stat_sinks.metrics_service]\nmessage MetricsServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.MetricsServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n\n  // API version for metric service transport protocol. This describes the metric service gRPC\n  // endpoint and version of messages used on the wire.\n  core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // If true, counters are reported as the delta between flushing intervals. Otherwise, the current\n  // counter value is reported. Defaults to false.\n  // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the\n  // sink will take updates from the :ref:`MetricsResponse <envoy_api_msg_service.metrics.v3.StreamMetricsResponse>`.\n  google.protobuf.BoolValue report_counters_as_deltas = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/metrics/v3/stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v3\";\noption java_outer_classname = \"StatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Stats]\n// Statistics :ref:`architecture overview <arch_overview_statistics>`.\n\n// Configuration for pluggable stats sinks.\nmessage StatsSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v2.StatsSink\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the stats sink to instantiate. The name must match a supported\n  // stats sink. The built-in stats sinks are:\n  //\n  // * :ref:`envoy.stat_sinks.statsd <envoy_api_msg_config.metrics.v3.StatsdSink>`\n  // * :ref:`envoy.stat_sinks.dog_statsd <envoy_api_msg_config.metrics.v3.DogStatsdSink>`\n  // * :ref:`envoy.stat_sinks.metrics_service <envoy_api_msg_config.metrics.v3.MetricsServiceConfig>`\n  // * :ref:`envoy.stat_sinks.hystrix <envoy_api_msg_config.metrics.v3.HystrixSink>`\n  //\n  // Sinks optionally support tagged/multiple dimensional metrics.\n  string name = 1;\n\n  // Stats sink specific configuration which depends on the sink being instantiated. See\n  // :ref:`StatsdSink <envoy_api_msg_config.metrics.v3.StatsdSink>` for an example.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Statistics configuration such as tagging.\nmessage StatsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.StatsConfig\";\n\n  // Each stat name is iteratively processed through these tag specifiers.\n  // When a tag is matched, the first capture group is removed from the name so\n  // later :ref:`TagSpecifiers <envoy_api_msg_config.metrics.v3.TagSpecifier>` cannot match that\n  // same portion of the match.\n  repeated TagSpecifier stats_tags = 1;\n\n  // Use all default tag regexes specified in Envoy. These can be combined with\n  // custom tags specified in :ref:`stats_tags\n  // <envoy_api_field_config.metrics.v3.StatsConfig.stats_tags>`. They will be processed before\n  // the custom tags.\n  //\n  // .. note::\n  //\n  //   If any default tags are specified twice, the config will be considered\n  //   invalid.\n  //\n  // See :repo:`well_known_names.h <source/common/config/well_known_names.h>` for a list of the\n  // default tags in Envoy.\n  //\n  // If not provided, the value is assumed to be true.\n  google.protobuf.BoolValue use_all_default_tags = 2;\n\n  // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated\n  // as normal. Preventing the instantiation of certain families of stats can improve memory\n  // performance for Envoys running especially large configs.\n  //\n  // .. warning::\n  //   Excluding stats may affect Envoy's behavior in undocumented ways. See\n  //   `issue #8771 <https://github.com/envoyproxy/envoy/issues/8771>`_ for more information.\n  //   If any unexpected behavior changes are observed, please open a new issue immediately.\n  StatsMatcher stats_matcher = 3;\n\n  // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first\n  // match is applied. If no match is found (or if no rules are set), the following default buckets\n  // are used:\n  //\n  //   .. code-block:: json\n  //\n  //     [\n  //       0.5,\n  //       1,\n  //       5,\n  //       10,\n  //       25,\n  //       50,\n  //       100,\n  //       250,\n  //       500,\n  //       1000,\n  //       2500,\n  //       5000,\n  //       10000,\n  //       30000,\n  //       60000,\n  //       300000,\n  //       600000,\n  //       1800000,\n  //       3600000\n  //     ]\n  repeated HistogramBucketSettings histogram_bucket_settings = 4;\n}\n\n// Configuration for disabling stat instantiation.\nmessage StatsMatcher {\n  // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to\n  // instantiate all stats, there is no need to construct a StatsMatcher.\n  //\n  // However, StatsMatcher can be used to limit the creation of families of stats in order to\n  // conserve memory. Stats can either be disabled entirely, or they can be\n  // limited by either an exclusion or an inclusion list of :ref:`StringMatcher\n  // <envoy_api_msg_type.matcher.v3.StringMatcher>` protos:\n  //\n  // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to\n  //   `false`, all stats will be instantiated.\n  //\n  // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the\n  //   list will not instantiate.\n  //\n  // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of\n  //   the StringMatchers in the list.\n  //\n  //\n  // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex.\n  // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based\n  // matcher rather than a regex-based matcher.\n  //\n  // Example 1. Excluding all stats.\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"rejectAll\": \"true\"\n  //     }\n  //   }\n  //\n  // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"exclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n  // Example 3. Including only manager-related stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"inclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster_manager.\"\n  //           },\n  //           {\n  //             \"prefix\": \"listener_manager.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.StatsMatcher\";\n\n  oneof stats_matcher {\n    option (validate.required) = true;\n\n    // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all\n    // stats are enabled.\n    bool reject_all = 1;\n\n    // Exclusive match. All stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v3.ListStringMatcher exclusion_list = 2;\n\n    // Inclusive match. No stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v3.ListStringMatcher inclusion_list = 3;\n  }\n}\n\n// Designates a tag name and value pair. The value may be either a fixed value\n// or a regex providing the value via capture groups. The specified tag will be\n// unconditionally set if a fixed value, otherwise it will only be set if one\n// or more capture groups in the regex match.\nmessage TagSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.TagSpecifier\";\n\n  // Attaches an identifier to the tag values to identify the tag being in the\n  // sink. Envoy has a set of default names and regexes to extract dynamic\n  // portions of existing stats, which can be found in :repo:`well_known_names.h\n  // <source/common/config/well_known_names.h>` in the Envoy repository. If a :ref:`tag_name\n  // <envoy_api_field_config.metrics.v3.TagSpecifier.tag_name>` is provided in the config and\n  // neither :ref:`regex <envoy_api_field_config.metrics.v3.TagSpecifier.regex>` or\n  // :ref:`fixed_value <envoy_api_field_config.metrics.v3.TagSpecifier.fixed_value>` were specified,\n  // Envoy will attempt to find that name in its set of defaults and use the accompanying regex.\n  //\n  // .. note::\n  //\n  //   It is invalid to specify the same tag name twice in a config.\n  string tag_name = 1;\n\n  oneof tag_value {\n    // Designates a tag to strip from the tag extracted name and provide as a named\n    // tag value for all statistics. This will only occur if any part of the name\n    // matches the regex provided with one or more capture groups.\n    //\n    // The first capture group identifies the portion of the name to remove. The\n    // second capture group (which will normally be nested inside the first) will\n    // designate the value of the tag for the statistic. If no second capture\n    // group is provided, the first will also be used to set the value of the tag.\n    // All other capture groups will be ignored.\n    //\n    // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and\n    // one tag specifier:\n    //\n    // .. code-block:: json\n    //\n    //   {\n    //     \"tag_name\": \"envoy.cluster_name\",\n    //     \"regex\": \"^cluster\\\\.((.+?)\\\\.)\"\n    //   }\n    //\n    // Note that the regex will remove ``foo_cluster.`` making the tag extracted\n    // name ``cluster.upstream_rq_timeout`` and the tag value for\n    // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no\n    // ``.`` character because of the second capture group).\n    //\n    // Example 2. a stat name\n    // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two\n    // tag specifiers:\n    //\n    // .. code-block:: json\n    //\n    //   [\n    //     {\n    //       \"tag_name\": \"envoy.http_user_agent\",\n    //       \"regex\": \"^http(?=\\\\.).*?\\\\.user_agent\\\\.((.+?)\\\\.)\\\\w+?$\"\n    //     },\n    //     {\n    //       \"tag_name\": \"envoy.http_conn_manager_prefix\",\n    //       \"regex\": \"^http\\\\.((.*?)\\\\.)\"\n    //     }\n    //   ]\n    //\n    // The two regexes of the specifiers will be processed in the definition order.\n    //\n    // The first regex will remove ``ios.``, leaving the tag extracted name\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag\n    // ``envoy.http_user_agent`` will be added with tag value ``ios``.\n    //\n    // The second regex will remove ``connection_manager_1.`` from the tag\n    // extracted name produced by the first regex\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving\n    // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag\n    // ``envoy.http_conn_manager_prefix`` will be added with the tag value\n    // ``connection_manager_1``.\n    string regex = 2 [(validate.rules).string = {max_bytes: 1024}];\n\n    // Specifies a fixed tag value for the ``tag_name``.\n    string fixed_value = 3;\n  }\n}\n\n// Specifies a matcher for stats and the buckets that matching stats should use.\nmessage HistogramBucketSettings {\n  // The stats that this rule applies to. The match is applied to the original stat name\n  // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`.\n  type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}];\n\n  // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique.\n  // The order of the buckets does not matter.\n  repeated double buckets = 2 [(validate.rules).repeated = {\n    min_items: 1\n    unique: true\n    items {double {gt: 0.0}}\n  }];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support\n// tagged metrics.\n// [#extension: envoy.stat_sinks.statsd]\nmessage StatsdSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v2.StatsdSink\";\n\n  oneof statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running `statsd <https://github.com/etsy/statsd>`_\n    // compliant listener. If specified, statistics will be flushed to this\n    // address.\n    core.v3.Address address = 1;\n\n    // The name of a cluster that is running a TCP `statsd\n    // <https://github.com/etsy/statsd>`_ compliant listener. If specified,\n    // Envoy will connect to this cluster to flush statistics.\n    string tcp_cluster_name = 2;\n  }\n\n  // Optional custom prefix for StatsdSink. If\n  // specified, this will override the default prefix.\n  // For example:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"prefix\" : \"envoy-prod\"\n  //   }\n  //\n  // will change emitted stats to\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy-prod.test_counter:1|c\n  //   envoy-prod.test_timer:5|ms\n  //\n  // Note that the default prefix, \"envoy\", will be used if a prefix is not\n  // specified.\n  //\n  // Stats with default prefix:\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy.test_counter:1|c\n  //   envoy.test_timer:5|ms\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink.\n// The sink emits stats with `DogStatsD <https://docs.datadoghq.com/guides/dogstatsd/>`_\n// compatible tags. Tags are configurable via :ref:`StatsConfig\n// <envoy_api_msg_config.metrics.v3.StatsConfig>`.\n// [#extension: envoy.stat_sinks.dog_statsd]\nmessage DogStatsdSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.DogStatsdSink\";\n\n  reserved 2;\n\n  oneof dog_statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running DogStatsD compliant listener. If specified,\n    // statistics will be flushed to this address.\n    core.v3.Address address = 1;\n  }\n\n  // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field\n  // <envoy_api_field_config.metrics.v3.StatsdSink.prefix>` for more details.\n  string prefix = 3;\n\n  // Optional max datagram size to use when sending UDP messages. By default Envoy\n  // will emit one metric per datagram. By specifying a max-size larger than a single\n  // metric, Envoy will emit multiple, new-line separated metrics. The max datagram\n  // size should not exceed your network's MTU.\n  //\n  // Note that this value may not be respected if smaller than a single metric.\n  google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink.\n// The sink emits stats in `text/event-stream\n// <https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events>`_\n// formatted stream for use by `Hystrix dashboard\n// <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n//\n// Note that only a single HystrixSink should be configured.\n//\n// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.\n// [#extension: envoy.stat_sinks.hystrix]\nmessage HystrixSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.HystrixSink\";\n\n  // The number of buckets the rolling statistical window is divided into.\n  //\n  // Each time the sink is flushed, all relevant Envoy statistics are sampled and\n  // added to the rolling window (removing the oldest samples in the window\n  // in the process). The sink then outputs the aggregate statistics across the\n  // current rolling window to the event stream(s).\n  //\n  // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets\n  //\n  // More detailed explanation can be found in `Hystrix wiki\n  // <https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#hystrixrollingnumber>`_.\n  int64 num_buckets = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/metrics/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/metrics/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/metrics/v4alpha/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v4alpha\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metrics service]\n\n// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink\n// <envoy_api_msg_config.metrics.v4alpha.StatsSink>`. This opaque configuration will be used to create\n// Metrics Service.\n// [#extension: envoy.stat_sinks.metrics_service]\nmessage MetricsServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.MetricsServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n\n  // API version for metric service transport protocol. This describes the metric service gRPC\n  // endpoint and version of messages used on the wire.\n  core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // If true, counters are reported as the delta between flushing intervals. Otherwise, the current\n  // counter value is reported. Defaults to false.\n  // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the\n  // sink will take updates from the :ref:`MetricsResponse <envoy_api_msg_service.metrics.v4alpha.StreamMetricsResponse>`.\n  google.protobuf.BoolValue report_counters_as_deltas = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/metrics/v4alpha/stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v4alpha\";\noption java_outer_classname = \"StatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Stats]\n// Statistics :ref:`architecture overview <arch_overview_statistics>`.\n\n// Configuration for pluggable stats sinks.\nmessage StatsSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v3.StatsSink\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the stats sink to instantiate. The name must match a supported\n  // stats sink. The built-in stats sinks are:\n  //\n  // * :ref:`envoy.stat_sinks.statsd <envoy_api_msg_config.metrics.v4alpha.StatsdSink>`\n  // * :ref:`envoy.stat_sinks.dog_statsd <envoy_api_msg_config.metrics.v4alpha.DogStatsdSink>`\n  // * :ref:`envoy.stat_sinks.metrics_service <envoy_api_msg_config.metrics.v4alpha.MetricsServiceConfig>`\n  // * :ref:`envoy.stat_sinks.hystrix <envoy_api_msg_config.metrics.v4alpha.HystrixSink>`\n  //\n  // Sinks optionally support tagged/multiple dimensional metrics.\n  string name = 1;\n\n  // Stats sink specific configuration which depends on the sink being instantiated. See\n  // :ref:`StatsdSink <envoy_api_msg_config.metrics.v4alpha.StatsdSink>` for an example.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Statistics configuration such as tagging.\nmessage StatsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.StatsConfig\";\n\n  // Each stat name is iteratively processed through these tag specifiers.\n  // When a tag is matched, the first capture group is removed from the name so\n  // later :ref:`TagSpecifiers <envoy_api_msg_config.metrics.v4alpha.TagSpecifier>` cannot match that\n  // same portion of the match.\n  repeated TagSpecifier stats_tags = 1;\n\n  // Use all default tag regexes specified in Envoy. These can be combined with\n  // custom tags specified in :ref:`stats_tags\n  // <envoy_api_field_config.metrics.v4alpha.StatsConfig.stats_tags>`. They will be processed before\n  // the custom tags.\n  //\n  // .. note::\n  //\n  //   If any default tags are specified twice, the config will be considered\n  //   invalid.\n  //\n  // See :repo:`well_known_names.h <source/common/config/well_known_names.h>` for a list of the\n  // default tags in Envoy.\n  //\n  // If not provided, the value is assumed to be true.\n  google.protobuf.BoolValue use_all_default_tags = 2;\n\n  // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated\n  // as normal. Preventing the instantiation of certain families of stats can improve memory\n  // performance for Envoys running especially large configs.\n  //\n  // .. warning::\n  //   Excluding stats may affect Envoy's behavior in undocumented ways. See\n  //   `issue #8771 <https://github.com/envoyproxy/envoy/issues/8771>`_ for more information.\n  //   If any unexpected behavior changes are observed, please open a new issue immediately.\n  StatsMatcher stats_matcher = 3;\n\n  // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first\n  // match is applied. If no match is found (or if no rules are set), the following default buckets\n  // are used:\n  //\n  //   .. code-block:: json\n  //\n  //     [\n  //       0.5,\n  //       1,\n  //       5,\n  //       10,\n  //       25,\n  //       50,\n  //       100,\n  //       250,\n  //       500,\n  //       1000,\n  //       2500,\n  //       5000,\n  //       10000,\n  //       30000,\n  //       60000,\n  //       300000,\n  //       600000,\n  //       1800000,\n  //       3600000\n  //     ]\n  repeated HistogramBucketSettings histogram_bucket_settings = 4;\n}\n\n// Configuration for disabling stat instantiation.\nmessage StatsMatcher {\n  // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to\n  // instantiate all stats, there is no need to construct a StatsMatcher.\n  //\n  // However, StatsMatcher can be used to limit the creation of families of stats in order to\n  // conserve memory. Stats can either be disabled entirely, or they can be\n  // limited by either an exclusion or an inclusion list of :ref:`StringMatcher\n  // <envoy_api_msg_type.matcher.v4alpha.StringMatcher>` protos:\n  //\n  // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to\n  //   `false`, all stats will be instantiated.\n  //\n  // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the\n  //   list will not instantiate.\n  //\n  // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of\n  //   the StringMatchers in the list.\n  //\n  //\n  // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex.\n  // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based\n  // matcher rather than a regex-based matcher.\n  //\n  // Example 1. Excluding all stats.\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"rejectAll\": \"true\"\n  //     }\n  //   }\n  //\n  // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"exclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n  // Example 3. Including only manager-related stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"inclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster_manager.\"\n  //           },\n  //           {\n  //             \"prefix\": \"listener_manager.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.StatsMatcher\";\n\n  oneof stats_matcher {\n    option (validate.required) = true;\n\n    // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all\n    // stats are enabled.\n    bool reject_all = 1;\n\n    // Exclusive match. All stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v4alpha.ListStringMatcher exclusion_list = 2;\n\n    // Inclusive match. No stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v4alpha.ListStringMatcher inclusion_list = 3;\n  }\n}\n\n// Designates a tag name and value pair. The value may be either a fixed value\n// or a regex providing the value via capture groups. The specified tag will be\n// unconditionally set if a fixed value, otherwise it will only be set if one\n// or more capture groups in the regex match.\nmessage TagSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.TagSpecifier\";\n\n  // Attaches an identifier to the tag values to identify the tag being in the\n  // sink. Envoy has a set of default names and regexes to extract dynamic\n  // portions of existing stats, which can be found in :repo:`well_known_names.h\n  // <source/common/config/well_known_names.h>` in the Envoy repository. If a :ref:`tag_name\n  // <envoy_api_field_config.metrics.v4alpha.TagSpecifier.tag_name>` is provided in the config and\n  // neither :ref:`regex <envoy_api_field_config.metrics.v4alpha.TagSpecifier.regex>` or\n  // :ref:`fixed_value <envoy_api_field_config.metrics.v4alpha.TagSpecifier.fixed_value>` were specified,\n  // Envoy will attempt to find that name in its set of defaults and use the accompanying regex.\n  //\n  // .. note::\n  //\n  //   It is invalid to specify the same tag name twice in a config.\n  string tag_name = 1;\n\n  oneof tag_value {\n    // Designates a tag to strip from the tag extracted name and provide as a named\n    // tag value for all statistics. This will only occur if any part of the name\n    // matches the regex provided with one or more capture groups.\n    //\n    // The first capture group identifies the portion of the name to remove. The\n    // second capture group (which will normally be nested inside the first) will\n    // designate the value of the tag for the statistic. If no second capture\n    // group is provided, the first will also be used to set the value of the tag.\n    // All other capture groups will be ignored.\n    //\n    // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and\n    // one tag specifier:\n    //\n    // .. code-block:: json\n    //\n    //   {\n    //     \"tag_name\": \"envoy.cluster_name\",\n    //     \"regex\": \"^cluster\\\\.((.+?)\\\\.)\"\n    //   }\n    //\n    // Note that the regex will remove ``foo_cluster.`` making the tag extracted\n    // name ``cluster.upstream_rq_timeout`` and the tag value for\n    // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no\n    // ``.`` character because of the second capture group).\n    //\n    // Example 2. a stat name\n    // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two\n    // tag specifiers:\n    //\n    // .. code-block:: json\n    //\n    //   [\n    //     {\n    //       \"tag_name\": \"envoy.http_user_agent\",\n    //       \"regex\": \"^http(?=\\\\.).*?\\\\.user_agent\\\\.((.+?)\\\\.)\\\\w+?$\"\n    //     },\n    //     {\n    //       \"tag_name\": \"envoy.http_conn_manager_prefix\",\n    //       \"regex\": \"^http\\\\.((.*?)\\\\.)\"\n    //     }\n    //   ]\n    //\n    // The two regexes of the specifiers will be processed in the definition order.\n    //\n    // The first regex will remove ``ios.``, leaving the tag extracted name\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag\n    // ``envoy.http_user_agent`` will be added with tag value ``ios``.\n    //\n    // The second regex will remove ``connection_manager_1.`` from the tag\n    // extracted name produced by the first regex\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving\n    // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag\n    // ``envoy.http_conn_manager_prefix`` will be added with the tag value\n    // ``connection_manager_1``.\n    string regex = 2 [(validate.rules).string = {max_bytes: 1024}];\n\n    // Specifies a fixed tag value for the ``tag_name``.\n    string fixed_value = 3;\n  }\n}\n\n// Specifies a matcher for stats and the buckets that matching stats should use.\nmessage HistogramBucketSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.HistogramBucketSettings\";\n\n  // The stats that this rule applies to. The match is applied to the original stat name\n  // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`.\n  type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}];\n\n  // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique.\n  // The order of the buckets does not matter.\n  repeated double buckets = 2 [(validate.rules).repeated = {\n    min_items: 1\n    unique: true\n    items {double {gt: 0.0}}\n  }];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support\n// tagged metrics.\n// [#extension: envoy.stat_sinks.statsd]\nmessage StatsdSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v3.StatsdSink\";\n\n  oneof statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running `statsd <https://github.com/etsy/statsd>`_\n    // compliant listener. If specified, statistics will be flushed to this\n    // address.\n    core.v4alpha.Address address = 1;\n\n    // The name of a cluster that is running a TCP `statsd\n    // <https://github.com/etsy/statsd>`_ compliant listener. If specified,\n    // Envoy will connect to this cluster to flush statistics.\n    string tcp_cluster_name = 2;\n  }\n\n  // Optional custom prefix for StatsdSink. If\n  // specified, this will override the default prefix.\n  // For example:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"prefix\" : \"envoy-prod\"\n  //   }\n  //\n  // will change emitted stats to\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy-prod.test_counter:1|c\n  //   envoy-prod.test_timer:5|ms\n  //\n  // Note that the default prefix, \"envoy\", will be used if a prefix is not\n  // specified.\n  //\n  // Stats with default prefix:\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy.test_counter:1|c\n  //   envoy.test_timer:5|ms\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink.\n// The sink emits stats with `DogStatsD <https://docs.datadoghq.com/guides/dogstatsd/>`_\n// compatible tags. Tags are configurable via :ref:`StatsConfig\n// <envoy_api_msg_config.metrics.v4alpha.StatsConfig>`.\n// [#extension: envoy.stat_sinks.dog_statsd]\nmessage DogStatsdSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.DogStatsdSink\";\n\n  reserved 2;\n\n  oneof dog_statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running DogStatsD compliant listener. If specified,\n    // statistics will be flushed to this address.\n    core.v4alpha.Address address = 1;\n  }\n\n  // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field\n  // <envoy_api_field_config.metrics.v4alpha.StatsdSink.prefix>` for more details.\n  string prefix = 3;\n\n  // Optional max datagram size to use when sending UDP messages. By default Envoy\n  // will emit one metric per datagram. By specifying a max-size larger than a single\n  // metric, Envoy will emit multiple, new-line separated metrics. The max datagram\n  // size should not exceed your network's MTU.\n  //\n  // Note that this value may not be respected if smaller than a single metric.\n  google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink.\n// The sink emits stats in `text/event-stream\n// <https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events>`_\n// formatted stream for use by `Hystrix dashboard\n// <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n//\n// Note that only a single HystrixSink should be configured.\n//\n// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.\n// [#extension: envoy.stat_sinks.hystrix]\nmessage HystrixSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.HystrixSink\";\n\n  // The number of buckets the rolling statistical window is divided into.\n  //\n  // Each time the sink is flushed, all relevant Envoy statistics are sampled and\n  // added to the rolling window (removing the oldest samples in the window\n  // in the process). The sink then outputs the aggregate statistics across the\n  // current rolling window to the event stream(s).\n  //\n  // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets\n  //\n  // More detailed explanation can be found in `Hystrix wiki\n  // <https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#hystrixrollingnumber>`_.\n  int64 num_buckets = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/overload/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/overload/v2alpha/overload.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.overload.v2alpha;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.overload.v2alpha\";\noption java_outer_classname = \"OverloadProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Overload Manager]\n\n// The Overload Manager provides an extensible framework to protect Envoy instances\n// from overload of various resources (memory, cpu, file descriptors, etc).\n// It monitors a configurable set of resources and notifies registered listeners\n// when triggers related to those resources fire.\n\nmessage ResourceMonitor {\n  // The name of the resource monitor to instantiate. Must match a registered\n  // resource monitor type. The built-in resource monitors are:\n  //\n  // * :ref:`envoy.resource_monitors.fixed_heap\n  //   <envoy_api_msg_config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig>`\n  // * :ref:`envoy.resource_monitors.injected_resource\n  //   <envoy_api_msg_config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig>`\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Configuration for the resource monitor being instantiated.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ThresholdTrigger {\n  // If the resource pressure is greater than or equal to this value, the trigger\n  // will fire.\n  double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n}\n\nmessage Trigger {\n  // The name of the resource this is a trigger for.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof trigger_oneof {\n    option (validate.required) = true;\n\n    ThresholdTrigger threshold = 2;\n  }\n}\n\nmessage OverloadAction {\n  // The name of the overload action. This is just a well-known string that listeners can\n  // use for registering callbacks. Custom overload actions should be named using reverse\n  // DNS to ensure uniqueness.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // A set of triggers for this action. If any of these triggers fire the overload action\n  // is activated. Listeners are notified when the overload action transitions from\n  // inactivated to activated, or vice versa.\n  repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\nmessage OverloadManager {\n  // The interval for refreshing resource usage.\n  google.protobuf.Duration refresh_interval = 1;\n\n  // The set of resources to monitor.\n  repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The set of overload actions.\n  repeated OverloadAction actions = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/overload/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/overload/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/overload/v3/overload.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.overload.v3;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.overload.v3\";\noption java_outer_classname = \"OverloadProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Overload Manager]\n\n// The Overload Manager provides an extensible framework to protect Envoy instances\n// from overload of various resources (memory, cpu, file descriptors, etc).\n// It monitors a configurable set of resources and notifies registered listeners\n// when triggers related to those resources fire.\n\nmessage ResourceMonitor {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.ResourceMonitor\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the resource monitor to instantiate. Must match a registered\n  // resource monitor type. The built-in resource monitors are:\n  //\n  // * :ref:`envoy.resource_monitors.fixed_heap\n  //   <envoy_api_msg_config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig>`\n  // * :ref:`envoy.resource_monitors.injected_resource\n  //   <envoy_api_msg_config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig>`\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Configuration for the resource monitor being instantiated.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ThresholdTrigger {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.ThresholdTrigger\";\n\n  // If the resource pressure is greater than or equal to this value, the trigger\n  // will enter saturation.\n  double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n}\n\nmessage ScaledTrigger {\n  // If the resource pressure is greater than this value, the trigger will be in the\n  // :ref:`scaling <arch_overview_overload_manager-triggers-state>` state with value\n  // `(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)`.\n  double scaling_threshold = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n\n  // If the resource pressure is greater than this value, the trigger will enter saturation.\n  double saturation_threshold = 2 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n}\n\nmessage Trigger {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.Trigger\";\n\n  // The name of the resource this is a trigger for.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof trigger_oneof {\n    option (validate.required) = true;\n\n    ThresholdTrigger threshold = 2;\n\n    ScaledTrigger scaled = 3;\n  }\n}\n\nmessage OverloadAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.OverloadAction\";\n\n  // The name of the overload action. This is just a well-known string that listeners can\n  // use for registering callbacks. Custom overload actions should be named using reverse\n  // DNS to ensure uniqueness.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A set of triggers for this action. The state of the action is the maximum\n  // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners\n  // are notified when the overload action changes state.\n  repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\nmessage OverloadManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.OverloadManager\";\n\n  // The interval for refreshing resource usage.\n  google.protobuf.Duration refresh_interval = 1;\n\n  // The set of resources to monitor.\n  repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The set of overload actions.\n  repeated OverloadAction actions = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/ratelimit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/ratelimit/v2/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.ratelimit.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.ratelimit.v2\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit service]\n\n// Rate limit :ref:`configuration overview <config_rate_limit_service>`.\nmessage RateLimitServiceConfig {\n  reserved 1, 3;\n\n  // Specifies the gRPC service that hosts the rate limit service. The client\n  // will connect to this cluster when it needs to make rate limit service\n  // requests.\n  api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/ratelimit/v3/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.ratelimit.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.ratelimit.v3\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit service]\n\n// Rate limit :ref:`configuration overview <config_rate_limit_service>`.\nmessage RateLimitServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.ratelimit.v2.RateLimitServiceConfig\";\n\n  reserved 1, 3;\n\n  // Specifies the gRPC service that hosts the rate limit service. The client\n  // will connect to this cluster when it needs to make rate limit service\n  // requests.\n  core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n\n  // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and\n  // version of messages used on the wire.\n  core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/rbac/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/rbac/v2/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.rbac.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/matcher/metadata.proto\";\nimport \"envoy/type/matcher/path.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/api/expr/v1alpha1/syntax.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.rbac.v2\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Role Based Access Control (RBAC)]\n\n// Role Based Access Control (RBAC) provides service-level and method-level access control for a\n// service. RBAC policies are additive. The policies are examined in order. A request is allowed\n// once a matching policy is found (suppose the `action` is ALLOW).\n//\n// Here is an example of RBAC configuration. It has two policies:\n//\n// * Service account \"cluster.local/ns/default/sa/admin\" has full access to the service, and so\n//   does \"cluster.local/ns/default/sa/superuser\".\n//\n// * Any user can read (\"GET\") the service at paths with prefix \"/products\", so long as the\n//   destination port is either 80 or 443.\n//\n//  .. code-block:: yaml\n//\n//   action: ALLOW\n//   policies:\n//     \"service-admin\":\n//       permissions:\n//         - any: true\n//       principals:\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/admin\"\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/superuser\"\n//     \"product-viewer\":\n//       permissions:\n//           - and_rules:\n//               rules:\n//                 - header: { name: \":method\", exact_match: \"GET\" }\n//                 - url_path:\n//                     path: { prefix: \"/products\" }\n//                 - or_rules:\n//                     rules:\n//                       - destination_port: 80\n//                       - destination_port: 443\n//       principals:\n//         - any: true\n//\nmessage RBAC {\n  // Should we do safe-list or block-list style access control?\n  enum Action {\n    // The policies grant access to principals. The rest is denied. This is safe-list style\n    // access control. This is the default type.\n    ALLOW = 0;\n\n    // The policies deny access to principals. The rest is allowed. This is block-list style\n    // access control.\n    DENY = 1;\n  }\n\n  // The action to take if a policy matches. The request is allowed if and only if:\n  //\n  //   * `action` is \"ALLOWED\" and at least one policy matches\n  //   * `action` is \"DENY\" and none of the policies match\n  Action action = 1;\n\n  // Maps from policy name to policy. A match occurs when at least one policy matches the request.\n  map<string, Policy> policies = 2;\n}\n\n// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if\n// and only if at least one of its permissions match the action taking place AND at least one of its\n// principals match the downstream AND the condition is true if specified.\nmessage Policy {\n  // Required. The set of permissions that define a role. Each permission is matched with OR\n  // semantics. To match all actions for this policy, a single Permission with the `any` field set\n  // to true should be used.\n  repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Required. The set of principals that are assigned/denied the role based on “action”. Each\n  // principal is matched with OR semantics. To match all downstreams for this policy, a single\n  // Principal with the `any` field set to true should be used.\n  repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional symbolic expression specifying an access control\n  // :ref:`condition <arch_overview_condition>`. The condition is combined\n  // with the permissions and the principals as a clause with AND semantics.\n  google.api.expr.v1alpha1.Expr condition = 3;\n}\n\n// Permission defines an action (or actions) that a principal can take.\n// [#next-free-field: 11]\nmessage Permission {\n  // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set of rules that all must match in order to define the action.\n    Set and_rules = 1;\n\n    // A set of rules where at least one must match in order to define the action.\n    Set or_rules = 2;\n\n    // When any is set, it matches any action.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    api.v2.route.HeaderMatcher header = 4;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.PathMatcher url_path = 10;\n\n    // A CIDR block that describes the destination IP.\n    api.v2.core.CidrRange destination_ip = 5;\n\n    // A port number that describes the destination port connecting to.\n    uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}];\n\n    // Metadata that describes additional information about the action.\n    type.matcher.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided permission. For instance, if the value of `not_rule` would\n    // match, this permission would not match. Conversely, if the value of `not_rule` would not\n    // match, this permission would match.\n    Permission not_rule = 8;\n\n    // The request server from the client's connection request. This is\n    // typically TLS SNI.\n    //\n    // .. attention::\n    //\n    //   The behavior of this field may be affected by how Envoy is configured\n    //   as explained below.\n    //\n    //   * If the :ref:`TLS Inspector <config_listener_filters_tls_inspector>`\n    //     filter is not added, and if a `FilterChainMatch` is not defined for\n    //     the :ref:`server name <envoy_api_field_listener.FilterChainMatch.server_names>`,\n    //     a TLS connection's requested SNI server name will be treated as if it\n    //     wasn't present.\n    //\n    //   * A :ref:`listener filter <arch_overview_listener_filters>` may\n    //     overwrite a connection's requested server name within Envoy.\n    //\n    // Please refer to :ref:`this FAQ entry <faq_how_to_setup_sni>` to learn to\n    // setup SNI.\n    type.matcher.StringMatcher requested_server_name = 9;\n  }\n}\n\n// Principal defines an identity or a group of identities for a downstream subject.\n// [#next-free-field: 12]\nmessage Principal {\n  // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Authentication attributes for a downstream.\n  message Authenticated {\n    reserved 1;\n\n    // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the\n    // certificate, otherwise the subject field is used. If unset, it applies to any user that is\n    // authenticated.\n    type.matcher.StringMatcher principal_name = 2;\n  }\n\n  oneof identifier {\n    option (validate.required) = true;\n\n    // A set of identifiers that all must match in order to define the downstream.\n    Set and_ids = 1;\n\n    // A set of identifiers at least one must match in order to define the downstream.\n    Set or_ids = 2;\n\n    // When any is set, it matches any downstream.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // Authenticated attributes that identify the downstream.\n    Authenticated authenticated = 4;\n\n    // A CIDR block that describes the downstream IP.\n    // This address will honor proxy protocol, but will not honor XFF.\n    api.v2.core.CidrRange source_ip = 5 [deprecated = true];\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This is always the physical peer even if the\n    // :ref:`remote_ip <envoy_api_field_config.rbac.v2.Principal.remote_ip>` is inferred\n    // from for example the x-forwarder-for header, proxy protocol, etc.\n    api.v2.core.CidrRange direct_remote_ip = 10;\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This may not be the physical peer and could be different from the\n    // :ref:`direct_remote_ip <envoy_api_field_config.rbac.v2.Principal.direct_remote_ip>`.\n    // E.g, if the remote ip is inferred from for example the x-forwarder-for header,\n    // proxy protocol, etc.\n    api.v2.core.CidrRange remote_ip = 11;\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    api.v2.route.HeaderMatcher header = 6;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.PathMatcher url_path = 9;\n\n    // Metadata that describes additional information about the principal.\n    type.matcher.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided principal. For instance, if the value of `not_id` would match,\n    // this principal would not match. Conversely, if the value of `not_id` would not match, this\n    // principal would match.\n    Principal not_id = 8;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/rbac/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/rbac/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/rbac/v3/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.rbac.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\nimport \"envoy/type/matcher/v3/path.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/api/expr/v1alpha1/checked.proto\";\nimport \"google/api/expr/v1alpha1/syntax.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.rbac.v3\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Role Based Access Control (RBAC)]\n\n// Role Based Access Control (RBAC) provides service-level and method-level access control for a\n// service. RBAC policies are additive. The policies are examined in order. Requests are allowed\n// or denied based on the `action` and whether a matching policy is found. For instance, if the\n// action is ALLOW and a matching policy is found the request should be allowed.\n//\n// RBAC can also be used to make access logging decisions by communicating with access loggers\n// through dynamic metadata. When the action is LOG and at least one policy matches, the\n// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating\n// the request should be logged.\n//\n// Here is an example of RBAC configuration. It has two policies:\n//\n// * Service account \"cluster.local/ns/default/sa/admin\" has full access to the service, and so\n//   does \"cluster.local/ns/default/sa/superuser\".\n//\n// * Any user can read (\"GET\") the service at paths with prefix \"/products\", so long as the\n//   destination port is either 80 or 443.\n//\n//  .. code-block:: yaml\n//\n//   action: ALLOW\n//   policies:\n//     \"service-admin\":\n//       permissions:\n//         - any: true\n//       principals:\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/admin\"\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/superuser\"\n//     \"product-viewer\":\n//       permissions:\n//           - and_rules:\n//               rules:\n//                 - header: { name: \":method\", exact_match: \"GET\" }\n//                 - url_path:\n//                     path: { prefix: \"/products\" }\n//                 - or_rules:\n//                     rules:\n//                       - destination_port: 80\n//                       - destination_port: 443\n//       principals:\n//         - any: true\n//\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.RBAC\";\n\n  // Should we do safe-list or block-list style access control?\n  enum Action {\n    // The policies grant access to principals. The rest are denied. This is safe-list style\n    // access control. This is the default type.\n    ALLOW = 0;\n\n    // The policies deny access to principals. The rest are allowed. This is block-list style\n    // access control.\n    DENY = 1;\n\n    // The policies set the `access_log_hint` dynamic metadata key based on if requests match.\n    // All requests are allowed.\n    LOG = 2;\n  }\n\n  // The action to take if a policy matches. Every action either allows or denies a request,\n  // and can also carry out action-specific operations.\n  //\n  // Actions:\n  //\n  //  * ALLOW: Allows the request if and only if there is a policy that matches\n  //    the request.\n  //  * DENY: Allows the request if and only if there are no policies that\n  //    match the request.\n  //  * LOG: Allows all requests. If at least one policy matches, the dynamic\n  //    metadata key `access_log_hint` is set to the value `true` under the shared\n  //    key namespace 'envoy.common'. If no policies match, it is set to `false`.\n  //    Other actions do not modify this key.\n  //\n  Action action = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maps from policy name to policy. A match occurs when at least one policy matches the request.\n  map<string, Policy> policies = 2;\n}\n\n// Policy specifies a role and the principals that are assigned/denied the role.\n// A policy matches if and only if at least one of its permissions match the\n// action taking place AND at least one of its principals match the downstream\n// AND the condition is true if specified.\nmessage Policy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.Policy\";\n\n  // Required. The set of permissions that define a role. Each permission is\n  // matched with OR semantics. To match all actions for this policy, a single\n  // Permission with the `any` field set to true should be used.\n  repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Required. The set of principals that are assigned/denied the role based on\n  // “action”. Each principal is matched with OR semantics. To match all\n  // downstreams for this policy, a single Principal with the `any` field set to\n  // true should be used.\n  repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional symbolic expression specifying an access control\n  // :ref:`condition <arch_overview_condition>`. The condition is combined\n  // with the permissions and the principals as a clause with AND semantics.\n  // Only be used when checked_condition is not used.\n  google.api.expr.v1alpha1.Expr condition = 3\n      [(udpa.annotations.field_migrate).oneof_promotion = \"expression_specifier\"];\n\n  // [#not-implemented-hide:]\n  // An optional symbolic expression that has been successfully type checked.\n  // Only be used when condition is not used.\n  google.api.expr.v1alpha1.CheckedExpr checked_condition = 4\n      [(udpa.annotations.field_migrate).oneof_promotion = \"expression_specifier\"];\n}\n\n// Permission defines an action (or actions) that a principal can take.\n// [#next-free-field: 11]\nmessage Permission {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.Permission\";\n\n  // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v2.Permission.Set\";\n\n    repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set of rules that all must match in order to define the action.\n    Set and_rules = 1;\n\n    // A set of rules where at least one must match in order to define the action.\n    Set or_rules = 2;\n\n    // When any is set, it matches any action.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    route.v3.HeaderMatcher header = 4;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v3.PathMatcher url_path = 10;\n\n    // A CIDR block that describes the destination IP.\n    core.v3.CidrRange destination_ip = 5;\n\n    // A port number that describes the destination port connecting to.\n    uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}];\n\n    // Metadata that describes additional information about the action.\n    type.matcher.v3.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided permission. For instance, if the value of\n    // `not_rule` would match, this permission would not match. Conversely, if\n    // the value of `not_rule` would not match, this permission would match.\n    Permission not_rule = 8;\n\n    // The request server from the client's connection request. This is\n    // typically TLS SNI.\n    //\n    // .. attention::\n    //\n    //   The behavior of this field may be affected by how Envoy is configured\n    //   as explained below.\n    //\n    //   * If the :ref:`TLS Inspector <config_listener_filters_tls_inspector>`\n    //     filter is not added, and if a `FilterChainMatch` is not defined for\n    //     the :ref:`server name\n    //     <envoy_api_field_config.listener.v3.FilterChainMatch.server_names>`,\n    //     a TLS connection's requested SNI server name will be treated as if it\n    //     wasn't present.\n    //\n    //   * A :ref:`listener filter <arch_overview_listener_filters>` may\n    //     overwrite a connection's requested server name within Envoy.\n    //\n    // Please refer to :ref:`this FAQ entry <faq_how_to_setup_sni>` to learn to\n    // setup SNI.\n    type.matcher.v3.StringMatcher requested_server_name = 9;\n  }\n}\n\n// Principal defines an identity or a group of identities for a downstream\n// subject.\n// [#next-free-field: 12]\nmessage Principal {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.Principal\";\n\n  // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof.\n  // Depending on the context, each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v2.Principal.Set\";\n\n    repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Authentication attributes for a downstream.\n  message Authenticated {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v2.Principal.Authenticated\";\n\n    reserved 1;\n\n    // The name of the principal. If set, The URI SAN or DNS SAN in that order\n    // is used from the certificate, otherwise the subject field is used. If\n    // unset, it applies to any user that is authenticated.\n    type.matcher.v3.StringMatcher principal_name = 2;\n  }\n\n  oneof identifier {\n    option (validate.required) = true;\n\n    // A set of identifiers that all must match in order to define the\n    // downstream.\n    Set and_ids = 1;\n\n    // A set of identifiers at least one must match in order to define the\n    // downstream.\n    Set or_ids = 2;\n\n    // When any is set, it matches any downstream.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // Authenticated attributes that identify the downstream.\n    Authenticated authenticated = 4;\n\n    // A CIDR block that describes the downstream IP.\n    // This address will honor proxy protocol, but will not honor XFF.\n    core.v3.CidrRange source_ip = 5 [deprecated = true];\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This is always the physical peer even if the\n    // :ref:`remote_ip <envoy_api_field_config.rbac.v3.Principal.remote_ip>` is\n    // inferred from for example the x-forwarder-for header, proxy protocol,\n    // etc.\n    core.v3.CidrRange direct_remote_ip = 10;\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This may not be the physical peer and could be different from the\n    // :ref:`direct_remote_ip\n    // <envoy_api_field_config.rbac.v3.Principal.direct_remote_ip>`. E.g, if the\n    // remote ip is inferred from for example the x-forwarder-for header, proxy\n    // protocol, etc.\n    core.v3.CidrRange remote_ip = 11;\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP\n    // request. Only available for HTTP request. Note: the pseudo-header :path\n    // includes the query and fragment string. Use the `url_path` field if you\n    // want to match the URL path without the query and fragment string.\n    route.v3.HeaderMatcher header = 6;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v3.PathMatcher url_path = 9;\n\n    // Metadata that describes additional information about the principal.\n    type.matcher.v3.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided principal. For instance, if the value of\n    // `not_id` would match, this principal would not match. Conversely, if the\n    // value of `not_id` would not match, this principal would match.\n    Principal not_id = 8;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/rbac/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/rbac/v4alpha/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.rbac.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\nimport \"envoy/type/matcher/v4alpha/path.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/api/expr/v1alpha1/checked.proto\";\nimport \"google/api/expr/v1alpha1/syntax.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.rbac.v4alpha\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Role Based Access Control (RBAC)]\n\n// Role Based Access Control (RBAC) provides service-level and method-level access control for a\n// service. RBAC policies are additive. The policies are examined in order. Requests are allowed\n// or denied based on the `action` and whether a matching policy is found. For instance, if the\n// action is ALLOW and a matching policy is found the request should be allowed.\n//\n// RBAC can also be used to make access logging decisions by communicating with access loggers\n// through dynamic metadata. When the action is LOG and at least one policy matches, the\n// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating\n// the request should be logged.\n//\n// Here is an example of RBAC configuration. It has two policies:\n//\n// * Service account \"cluster.local/ns/default/sa/admin\" has full access to the service, and so\n//   does \"cluster.local/ns/default/sa/superuser\".\n//\n// * Any user can read (\"GET\") the service at paths with prefix \"/products\", so long as the\n//   destination port is either 80 or 443.\n//\n//  .. code-block:: yaml\n//\n//   action: ALLOW\n//   policies:\n//     \"service-admin\":\n//       permissions:\n//         - any: true\n//       principals:\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/admin\"\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/superuser\"\n//     \"product-viewer\":\n//       permissions:\n//           - and_rules:\n//               rules:\n//                 - header: { name: \":method\", exact_match: \"GET\" }\n//                 - url_path:\n//                     path: { prefix: \"/products\" }\n//                 - or_rules:\n//                     rules:\n//                       - destination_port: 80\n//                       - destination_port: 443\n//       principals:\n//         - any: true\n//\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.RBAC\";\n\n  // Should we do safe-list or block-list style access control?\n  enum Action {\n    // The policies grant access to principals. The rest are denied. This is safe-list style\n    // access control. This is the default type.\n    ALLOW = 0;\n\n    // The policies deny access to principals. The rest are allowed. This is block-list style\n    // access control.\n    DENY = 1;\n\n    // The policies set the `access_log_hint` dynamic metadata key based on if requests match.\n    // All requests are allowed.\n    LOG = 2;\n  }\n\n  // The action to take if a policy matches. Every action either allows or denies a request,\n  // and can also carry out action-specific operations.\n  //\n  // Actions:\n  //\n  //  * ALLOW: Allows the request if and only if there is a policy that matches\n  //    the request.\n  //  * DENY: Allows the request if and only if there are no policies that\n  //    match the request.\n  //  * LOG: Allows all requests. If at least one policy matches, the dynamic\n  //    metadata key `access_log_hint` is set to the value `true` under the shared\n  //    key namespace 'envoy.common'. If no policies match, it is set to `false`.\n  //    Other actions do not modify this key.\n  //\n  Action action = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maps from policy name to policy. A match occurs when at least one policy matches the request.\n  map<string, Policy> policies = 2;\n}\n\n// Policy specifies a role and the principals that are assigned/denied the role.\n// A policy matches if and only if at least one of its permissions match the\n// action taking place AND at least one of its principals match the downstream\n// AND the condition is true if specified.\nmessage Policy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.Policy\";\n\n  // Required. The set of permissions that define a role. Each permission is\n  // matched with OR semantics. To match all actions for this policy, a single\n  // Permission with the `any` field set to true should be used.\n  repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Required. The set of principals that are assigned/denied the role based on\n  // “action”. Each principal is matched with OR semantics. To match all\n  // downstreams for this policy, a single Principal with the `any` field set to\n  // true should be used.\n  repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  oneof expression_specifier {\n    // An optional symbolic expression specifying an access control\n    // :ref:`condition <arch_overview_condition>`. The condition is combined\n    // with the permissions and the principals as a clause with AND semantics.\n    // Only be used when checked_condition is not used.\n    google.api.expr.v1alpha1.Expr condition = 3;\n\n    // [#not-implemented-hide:]\n    // An optional symbolic expression that has been successfully type checked.\n    // Only be used when condition is not used.\n    google.api.expr.v1alpha1.CheckedExpr checked_condition = 4;\n  }\n}\n\n// Permission defines an action (or actions) that a principal can take.\n// [#next-free-field: 11]\nmessage Permission {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.Permission\";\n\n  // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v3.Permission.Set\";\n\n    repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set of rules that all must match in order to define the action.\n    Set and_rules = 1;\n\n    // A set of rules where at least one must match in order to define the action.\n    Set or_rules = 2;\n\n    // When any is set, it matches any action.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    route.v4alpha.HeaderMatcher header = 4;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v4alpha.PathMatcher url_path = 10;\n\n    // A CIDR block that describes the destination IP.\n    core.v4alpha.CidrRange destination_ip = 5;\n\n    // A port number that describes the destination port connecting to.\n    uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}];\n\n    // Metadata that describes additional information about the action.\n    type.matcher.v4alpha.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided permission. For instance, if the value of\n    // `not_rule` would match, this permission would not match. Conversely, if\n    // the value of `not_rule` would not match, this permission would match.\n    Permission not_rule = 8;\n\n    // The request server from the client's connection request. This is\n    // typically TLS SNI.\n    //\n    // .. attention::\n    //\n    //   The behavior of this field may be affected by how Envoy is configured\n    //   as explained below.\n    //\n    //   * If the :ref:`TLS Inspector <config_listener_filters_tls_inspector>`\n    //     filter is not added, and if a `FilterChainMatch` is not defined for\n    //     the :ref:`server name\n    //     <envoy_api_field_config.listener.v4alpha.FilterChainMatch.server_names>`,\n    //     a TLS connection's requested SNI server name will be treated as if it\n    //     wasn't present.\n    //\n    //   * A :ref:`listener filter <arch_overview_listener_filters>` may\n    //     overwrite a connection's requested server name within Envoy.\n    //\n    // Please refer to :ref:`this FAQ entry <faq_how_to_setup_sni>` to learn to\n    // setup SNI.\n    type.matcher.v4alpha.StringMatcher requested_server_name = 9;\n  }\n}\n\n// Principal defines an identity or a group of identities for a downstream\n// subject.\n// [#next-free-field: 12]\nmessage Principal {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.Principal\";\n\n  // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof.\n  // Depending on the context, each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v3.Principal.Set\";\n\n    repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Authentication attributes for a downstream.\n  message Authenticated {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v3.Principal.Authenticated\";\n\n    reserved 1;\n\n    // The name of the principal. If set, The URI SAN or DNS SAN in that order\n    // is used from the certificate, otherwise the subject field is used. If\n    // unset, it applies to any user that is authenticated.\n    type.matcher.v4alpha.StringMatcher principal_name = 2;\n  }\n\n  reserved 5;\n\n  reserved \"source_ip\";\n\n  oneof identifier {\n    option (validate.required) = true;\n\n    // A set of identifiers that all must match in order to define the\n    // downstream.\n    Set and_ids = 1;\n\n    // A set of identifiers at least one must match in order to define the\n    // downstream.\n    Set or_ids = 2;\n\n    // When any is set, it matches any downstream.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // Authenticated attributes that identify the downstream.\n    Authenticated authenticated = 4;\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This is always the physical peer even if the\n    // :ref:`remote_ip <envoy_api_field_config.rbac.v4alpha.Principal.remote_ip>` is\n    // inferred from for example the x-forwarder-for header, proxy protocol,\n    // etc.\n    core.v4alpha.CidrRange direct_remote_ip = 10;\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This may not be the physical peer and could be different from the\n    // :ref:`direct_remote_ip\n    // <envoy_api_field_config.rbac.v4alpha.Principal.direct_remote_ip>`. E.g, if the\n    // remote ip is inferred from for example the x-forwarder-for header, proxy\n    // protocol, etc.\n    core.v4alpha.CidrRange remote_ip = 11;\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP\n    // request. Only available for HTTP request. Note: the pseudo-header :path\n    // includes the query and fragment string. Use the `url_path` field if you\n    // want to match the URL path without the query and fragment string.\n    route.v4alpha.HeaderMatcher header = 6;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v4alpha.PathMatcher url_path = 9;\n\n    // Metadata that describes additional information about the principal.\n    type.matcher.v4alpha.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided principal. For instance, if the value of\n    // `not_id` would match, this principal would not match. Conversely, if the\n    // value of `not_id` would not match, this principal would match.\n    Principal not_id = 8;\n  }\n}\n"
  },
  {
    "path": "api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.resource_monitor.fixed_heap.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha\";\noption java_outer_classname = \"FixedHeapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Fixed heap]\n// [#extension: envoy.resource_monitors.fixed_heap]\n\n// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a\n// fraction of currently reserved heap memory divided by a statically configured maximum\n// specified in the FixedHeapConfig.\nmessage FixedHeapConfig {\n  uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}];\n}\n"
  },
  {
    "path": "api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.resource_monitor.injected_resource.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha\";\noption java_outer_classname = \"InjectedResourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Injected resource]\n// [#extension: envoy.resource_monitors.injected_resource]\n\n// The injected resource monitor allows injecting a synthetic resource pressure into Envoy\n// via a text file, which must contain a floating-point number in the range [0..1] representing\n// the resource pressure and be updated atomically by a symbolic link swap.\n// This is intended primarily for integration tests to force Envoy into an overloaded state.\nmessage InjectedResourceConfig {\n  string filename = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/retry/omit_canary_hosts/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.omit_canary_hosts.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.omit_canary_hosts.v2\";\noption java_outer_classname = \"OmitCanaryHostsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Omit Canary Hosts Predicate]\n// [#extension: envoy.retry_host_predicates.omit_canary_hosts]\n\nmessage OmitCanaryHostsPredicate {\n}\n"
  },
  {
    "path": "api/envoy/config/retry/omit_host_metadata/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.omit_host_metadata.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.omit_host_metadata.v2\";\noption java_outer_classname = \"OmitHostMetadataConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.retry.host.omit_host_metadata.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Omit host metadata retry predicate]\n\n// A retry host predicate that can be used to reject a host based on\n// predefined metadata match criteria.\n// [#extension: envoy.retry_host_predicates.omit_host_metadata]\nmessage OmitHostMetadataConfig {\n  // Retry host predicate metadata match criteria. The hosts in\n  // the upstream cluster with matching metadata will be omitted while\n  // attempting a retry of a failed request. The metadata should be specified\n  // under the *envoy.lb* key.\n  api.v2.core.Metadata metadata_match = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/retry/previous_hosts/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/retry/previous_hosts/v2/previous_hosts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.previous_hosts.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.previous_hosts.v2\";\noption java_outer_classname = \"PreviousHostsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Previous Hosts Predicate]\n// [#extension: envoy.retry_host_predicates.previous_hosts]\n\nmessage PreviousHostsPredicate {\n}\n"
  },
  {
    "path": "api/envoy/config/retry/previous_priorities/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/retry/previous_priorities/previous_priorities_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.previous_priorities;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.previous_priorities\";\noption java_outer_classname = \"PreviousPrioritiesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Previous priorities retry selector]\n\n// A retry host selector that attempts to spread retries between priorities, even if certain\n// priorities would not normally be attempted due to higher priorities being available.\n//\n// As priorities get excluded, load will be distributed amongst the remaining healthy priorities\n// based on the relative health of the priorities, matching how load is distributed during regular\n// host selection. For example, given priority healths of {100, 50, 50}, the original load will be\n// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load\n// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the\n// remaining to spill over to P2.\n//\n// Each priority attempted will be excluded until there are no healthy priorities left, at which\n// point the list of attempted priorities will be reset, essentially starting from the beginning.\n// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the\n// following sequence of priorities would be selected (assuming update_frequency = 1):\n// Attempt 1: P0 (P0 is 100% healthy)\n// Attempt 2: P2 (P0 already attempted, P2 only healthy priority)\n// Attempt 3: P0 (no healthy priorities, reset)\n// Attempt 4: P2\n//\n// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original\n// priority load, so behavior should be identical to not using this plugin.\n//\n// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of\n// priorities), which might incur significant overhead for clusters with many priorities.\n// [#extension: envoy.retry_priorities.previous_priorities]\nmessage PreviousPrioritiesConfig {\n  // How often the priority load should be updated based on previously attempted priorities. Useful\n  // to allow each priorities to receive more than one request before being excluded or to reduce\n  // the number of times that the priority load has to be recomputed.\n  //\n  // For example, by setting this to 2, then the first two attempts (initial attempt and first\n  // retry) will use the unmodified priority load. The third and fourth attempt will use priority\n  // load which excludes the priorities routed to with the first two attempts, and the fifth and\n  // sixth attempt will use the priority load excluding the priorities used for the first four\n  // attempts.\n  //\n  // Must be greater than 0.\n  int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}];\n}\n"
  },
  {
    "path": "api/envoy/config/route/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/route/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP route configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// [#next-free-field: 11]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.RouteConfiguration\";\n\n  // The name of the route configuration. For example, it might match\n  // :ref:`route_config_name\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.Rds.route_config_name>` in\n  // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`.\n  string name = 1;\n\n  // An array of virtual hosts that make up the route table.\n  repeated VirtualHost virtual_hosts = 2;\n\n  // An array of virtual hosts will be dynamically loaded via the VHDS API.\n  // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used\n  // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for\n  // on-demand discovery of virtual hosts. The contents of these two fields will be merged to\n  // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration\n  // taking precedence.\n  Vhds vhds = 9;\n\n  // Optionally specifies a list of HTTP headers that the connection manager\n  // will consider to be internal only. If they are found on external requests they will be cleaned\n  // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information.\n  repeated string internal_only_headers = 3 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each response that\n  // the connection manager encodes. Headers specified at this level are applied\n  // after headers from any enclosed :ref:`envoy_api_msg_config.route.v3.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v3.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption response_headers_to_add = 4\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // that the connection manager encodes.\n  repeated string response_headers_to_remove = 5 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // routed by the HTTP connection manager. Headers specified at this level are\n  // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v3.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v3.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption request_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // routed by the HTTP connection manager.\n  repeated string request_headers_to_remove = 8 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // By default, headers that should be added/removed are evaluated from most to least specific:\n  //\n  // * route level\n  // * virtual host level\n  // * connection manager level\n  //\n  // To allow setting overrides at the route or virtual host level, this order can be reversed\n  // by setting this option to true. Defaults to false.\n  //\n  // [#next-major-version: In the v3 API, this will default to true.]\n  bool most_specific_header_mutations_wins = 10;\n\n  // An optional boolean that specifies whether the clusters that the route\n  // table refers to will be validated by the cluster manager. If set to true\n  // and a route refers to a non-existent cluster, the route table will not\n  // load. If set to false and a route refers to a non-existent cluster, the\n  // route table will load and the router filter will return a 404 if the route\n  // is selected at runtime. This setting defaults to true if the route table\n  // is statically defined via the :ref:`route_config\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.route_config>`\n  // option. This setting default to false if the route table is loaded dynamically via the\n  // :ref:`rds\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.rds>`\n  // option. Users may wish to override the default behavior in certain cases (for example when\n  // using CDS with a static route table).\n  google.protobuf.BoolValue validate_clusters = 7;\n}\n\nmessage Vhds {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Vhds\";\n\n  // Configuration source specifier for VHDS.\n  core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/route/v3/route_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/proxy_protocol.proto\";\nimport \"envoy/type/matcher/v3/regex.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/metadata/v3/metadata.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v3\";\noption java_outer_classname = \"RouteComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP route components]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// The top level element in the routing configuration is a virtual host. Each virtual host has\n// a logical name as well as a set of domains that get routed to it based on the incoming request's\n// host header. This allows a single listener to service multiple top level domain path trees. Once\n// a virtual host is selected based on the domain, the routes are processed in order to see which\n// upstream cluster to route to or whether to perform a redirect.\n// [#next-free-field: 21]\nmessage VirtualHost {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.VirtualHost\";\n\n  enum TlsRequirementType {\n    // No TLS requirement for the virtual host.\n    NONE = 0;\n\n    // External requests must use TLS. If a request is external and it is not\n    // using TLS, a 301 redirect will be sent telling the client to use HTTPS.\n    EXTERNAL_ONLY = 1;\n\n    // All requests must use TLS. If a request is not using TLS, a 301 redirect\n    // will be sent telling the client to use HTTPS.\n    ALL = 2;\n  }\n\n  reserved 9, 12;\n\n  reserved \"per_filter_config\";\n\n  // The logical name of the virtual host. This is used when emitting certain\n  // statistics but is not relevant for routing.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A list of domains (host/authority header) that will be matched to this\n  // virtual host. Wildcard hosts are supported in the suffix or prefix form.\n  //\n  // Domain search order:\n  //  1. Exact domain names: ``www.foo.com``.\n  //  2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``.\n  //  3. Prefix domain wildcards: ``foo.*`` or ``foo-*``.\n  //  4. Special wildcard ``*`` matching any domain.\n  //\n  // .. note::\n  //\n  //   The wildcard will not match the empty string.\n  //   e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``.\n  //   The longest wildcards match first.\n  //   Only a single virtual host in the entire route configuration can match on ``*``. A domain\n  //   must be unique across all virtual hosts or the config will fail to load.\n  //\n  // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE.\n  repeated string domains = 2 [(validate.rules).repeated = {\n    min_items: 1\n    items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}}\n  }];\n\n  // The list of routes that will be matched, in order, for incoming requests.\n  // The first route that matches will be used.\n  repeated Route routes = 3;\n\n  // Specifies the type of TLS enforcement the virtual host expects. If this option is not\n  // specified, there is no TLS requirement for the virtual host.\n  TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // A list of virtual clusters defined for this virtual host. Virtual clusters\n  // are used for additional statistics gathering.\n  repeated VirtualCluster virtual_clusters = 5;\n\n  // Specifies a set of rate limit configurations that will be applied to the\n  // virtual host.\n  repeated RateLimit rate_limits = 6;\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v3.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption request_headers_to_add = 7\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // handled by this virtual host.\n  repeated string request_headers_to_remove = 13 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a list of HTTP headers that should be added to each response\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v3.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // handled by this virtual host.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Indicates that the virtual host has a CORS policy.\n  CorsPolicy cors = 8;\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 15;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the upstream request. Setting this option will cause it to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the upstream\n  // will see the attempt count as perceived by the second Envoy. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v3.Router.suppress_envoy_headers>` flag.\n  //\n  // [#next-major-version: rename to include_attempt_count_in_request.]\n  bool include_request_attempt_count = 14;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the downstream response. Setting this option will cause the router to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the downstream\n  // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v3.Router.suppress_envoy_headers>` flag.\n  bool include_attempt_count_in_response = 19;\n\n  // Indicates the retry policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  RetryPolicy retry_policy = 16;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that setting a route level entry\n  // will take precedence over this config and it'll be treated independently (e.g.: values are not\n  // inherited). :ref:`Retry policy <envoy_api_field_config.route.v3.VirtualHost.retry_policy>` should not be\n  // set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 20;\n\n  // Indicates the hedge policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  HedgePolicy hedge_policy = 17;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum\n  // value of this and the listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18;\n}\n\n// A filter-defined action type.\nmessage FilterAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.FilterAction\";\n\n  google.protobuf.Any action = 1;\n}\n\n// A route is both a specification of how to match a request as well as an indication of what to do\n// next (e.g., redirect, forward, rewrite, etc.).\n//\n// .. attention::\n//\n//   Envoy supports routing on HTTP method via :ref:`header matching\n//   <envoy_api_msg_config.route.v3.HeaderMatcher>`.\n// [#next-free-field: 18]\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.Route\";\n\n  reserved 6, 8;\n\n  reserved \"per_filter_config\";\n\n  // Name for the route.\n  string name = 14;\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  oneof action {\n    option (validate.required) = true;\n\n    // Route request to some upstream cluster.\n    RouteAction route = 2;\n\n    // Return a redirect.\n    RedirectAction redirect = 3;\n\n    // Return an arbitrary HTTP response directly, without proxying.\n    DirectResponseAction direct_response = 7;\n\n    // [#not-implemented-hide:]\n    // If true, a filter will define the action (e.g., it could dynamically generate the\n    // RouteAction).\n    // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when\n    // implemented]\n    FilterAction filter_action = 17;\n  }\n\n  // The Metadata field can be used to provide additional information\n  // about the route. It can be used for configuration, stats, and logging.\n  // The metadata should go under the filter namespace that will need it.\n  // For instance, if the metadata is intended for the Router filter,\n  // the filter name should be specified as *envoy.filters.http.router*.\n  core.v3.Metadata metadata = 4;\n\n  // Decorator for the matched route.\n  Decorator decorator = 5;\n\n  // The typed_per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 13;\n\n  // Specifies a set of headers that will be added to requests matching this\n  // route. Headers specified at this level are applied before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v3.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption request_headers_to_add = 9\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // matching this route.\n  repeated string request_headers_to_remove = 12 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a set of headers that will be added to responses to requests\n  // matching this route. Headers specified at this level are applied before\n  // headers from the enclosing :ref:`envoy_api_msg_config.route.v3.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on\n  // :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // to requests matching this route.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Presence of the object defines whether the connection manager's tracing configuration\n  // is overridden by this route specific instance.\n  Tracing tracing = 15;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set, the bytes actually buffered will be the minimum value of this and the\n  // listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16;\n}\n\n// Compared to the :ref:`cluster <envoy_api_field_config.route.v3.RouteAction.cluster>` field that specifies a\n// single upstream cluster as the target of a request, the :ref:`weighted_clusters\n// <envoy_api_field_config.route.v3.RouteAction.weighted_clusters>` option allows for specification of\n// multiple upstream clusters along with weights that indicate the percentage of\n// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the\n// weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.WeightedCluster\";\n\n  // [#next-free-field: 11]\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.WeightedCluster.ClusterWeight\";\n\n    reserved 7, 8;\n\n    reserved \"per_filter_config\";\n\n    // Name of the upstream cluster. The cluster must exist in the\n    // :ref:`cluster manager configuration <config_cluster_manager>`.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // An integer between 0 and :ref:`total_weight\n    // <envoy_api_field_config.route.v3.WeightedCluster.total_weight>`. When a request matches the route,\n    // the choice of an upstream cluster is determined by its weight. The sum of weights across all\n    // entries in the clusters array must add up to the total_weight, which defaults to 100.\n    google.protobuf.UInt32Value weight = 2;\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field will be considered for\n    // load balancing. Note that this will be merged with what's provided in\n    // :ref:`RouteAction.metadata_match <envoy_api_field_config.route.v3.RouteAction.metadata_match>`, with\n    // values here taking precedence. The filter name should be specified as *envoy.lb*.\n    core.v3.Metadata metadata_match = 3;\n\n    // Specifies a list of headers to be added to requests when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v3.HeaderValueOption request_headers_to_add = 4\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request when\n    // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    repeated string request_headers_to_remove = 9 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of headers to be added to responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v3.HeaderValueOption response_headers_to_add = 5\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of headers to be removed from responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    repeated string response_headers_to_remove = 6 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Any> typed_per_filter_config = 10;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Specifies the total weight across all clusters. The sum of all cluster weights must equal this\n  // value, which must be greater than 0. Defaults to 100.\n  google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies the runtime key prefix that should be used to construct the\n  // runtime keys associated with each cluster. When the *runtime_key_prefix* is\n  // specified, the router will look for weights associated with each upstream\n  // cluster under the key *runtime_key_prefix* + \".\" + *cluster[i].name* where\n  // *cluster[i]* denotes an entry in the clusters array field. If the runtime\n  // key for the cluster does not exist, the value specified in the\n  // configuration file will be used as the default weight. See the :ref:`runtime documentation\n  // <operations_runtime>` for how key names map to the underlying implementation.\n  string runtime_key_prefix = 2;\n}\n\n// [#next-free-field: 13]\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RouteMatch\";\n\n  message GrpcRouteMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteMatch.GrpcRouteMatchOptions\";\n  }\n\n  message TlsContextMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteMatch.TlsContextMatchOptions\";\n\n    // If specified, the route will match against whether or not a certificate is presented.\n    // If not specified, certificate presentation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue presented = 1;\n\n    // If specified, the route will match against whether or not a certificate is validated.\n    // If not specified, certificate validation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue validated = 2;\n  }\n\n  // An extensible message for matching CONNECT requests.\n  message ConnectMatcher {\n  }\n\n  reserved 5, 3;\n\n  reserved \"regex\";\n\n  oneof path_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route is a prefix rule meaning that the prefix must\n    // match the beginning of the *:path* header.\n    string prefix = 1;\n\n    // If specified, the route is an exact path rule meaning that the path must\n    // exactly match the *:path* header once the query string is removed.\n    string path = 2;\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex.\n    //\n    // [#next-major-version: In the v3 API we should redo how path specification works such\n    // that we utilize StringMatcher, and additionally have consistent options around whether we\n    // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive\n    // to deprecate the existing options. We should even consider whether we want to do away with\n    // path_specifier entirely and just rely on a set of header matchers which can already match\n    // on :path, etc. The issue with that is it is unclear how to generically deal with query string\n    // stripping. This needs more thought.]\n    type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];\n\n    // If this is used as the matcher, the matcher will only match CONNECT requests.\n    // Note that this will not match HTTP/2 upgrade-style CONNECT requests\n    // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style\n    // upgrades.\n    // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,\n    // where Extended CONNECT requests may have a path, the path matchers will work if\n    // there is a path present.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectMatcher connect_matcher = 12;\n  }\n\n  // Indicates that prefix/path matching should be case sensitive. The default\n  // is true.\n  google.protobuf.BoolValue case_sensitive = 4;\n\n  // Indicates that the route should additionally match on a runtime key. Every time the route\n  // is considered for a match, it must also fall under the percentage of matches indicated by\n  // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the\n  // number is <= the value of the numerator N, or if the key is not present, the default\n  // value, the router continues to evaluate the remaining match criteria. A runtime_fraction\n  // route configuration can be used to roll out route changes in a gradual manner without full\n  // code/config deploys. Refer to the :ref:`traffic shifting\n  // <config_http_conn_man_route_table_traffic_splitting_shift>` docs for additional documentation.\n  //\n  // .. note::\n  //\n  //    Parsing this field is implemented such that the runtime key's data may be represented\n  //    as a FractionalPercent proto represented as JSON/YAML and may also be represented as an\n  //    integer with the assumption that the value is an integral percentage out of 100. For\n  //    instance, a runtime key lookup returning the value \"42\" would parse as a FractionalPercent\n  //    whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics.\n  core.v3.RuntimeFractionalPercent runtime_fraction = 9;\n\n  // Specifies a set of headers that the route should match on. The router will\n  // check the request’s headers against all the specified headers in the route\n  // config. A match will happen if all the headers in the route are present in\n  // the request with the same values (or based on presence if the value field\n  // is not in the config).\n  repeated HeaderMatcher headers = 6;\n\n  // Specifies a set of URL query parameters on which the route should\n  // match. The router will check the query string from the *path* header\n  // against all the specified query parameters. If the number of specified\n  // query parameters is nonzero, they all must match the *path* header's\n  // query string for a match to occur.\n  repeated QueryParameterMatcher query_parameters = 7;\n\n  // If specified, only gRPC requests will be matched. The router will check\n  // that the content-type header has a application/grpc or one of the various\n  // application/grpc+ values.\n  GrpcRouteMatchOptions grpc = 8;\n\n  // If specified, the client tls context will be matched against the defined\n  // match options.\n  //\n  // [#next-major-version: unify with RBAC]\n  TlsContextMatchOptions tls_context = 11;\n}\n\n// [#next-free-field: 12]\nmessage CorsPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.CorsPolicy\";\n\n  reserved 1, 8, 7;\n\n  reserved \"allow_origin\", \"allow_origin_regex\", \"enabled\";\n\n  // Specifies string patterns that match allowed origins. An origin is allowed if any of the\n  // string matchers match.\n  repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11;\n\n  // Specifies the content for the *access-control-allow-methods* header.\n  string allow_methods = 2;\n\n  // Specifies the content for the *access-control-allow-headers* header.\n  string allow_headers = 3;\n\n  // Specifies the content for the *access-control-expose-headers* header.\n  string expose_headers = 4;\n\n  // Specifies the content for the *access-control-max-age* header.\n  string max_age = 5;\n\n  // Specifies whether the resource allows credentials.\n  google.protobuf.BoolValue allow_credentials = 6;\n\n  oneof enabled_specifier {\n    // Specifies the % of requests for which the CORS filter is enabled.\n    //\n    // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS\n    // filter will be enabled for 100% of the requests.\n    //\n    // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is\n    // specified, Envoy will lookup the runtime key to get the percentage of requests to filter.\n    core.v3.RuntimeFractionalPercent filter_enabled = 9;\n  }\n\n  // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not\n  // enforced.\n  //\n  // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those\n  // fields have to explicitly disable the filter in order for this setting to take effect.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* to determine if it's valid but will not enforce any policies.\n  core.v3.RuntimeFractionalPercent shadow_enabled = 10;\n}\n\n// [#next-free-field: 37]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RouteAction\";\n\n  enum ClusterNotFoundResponseCode {\n    // HTTP status code - 503 Service Unavailable.\n    SERVICE_UNAVAILABLE = 0;\n\n    // HTTP status code - 404 Not Found.\n    NOT_FOUND = 1;\n  }\n\n  // Configures :ref:`internal redirect <arch_overview_internal_redirects>` behavior.\n  // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.]\n  enum InternalRedirectAction {\n    option deprecated = true;\n\n    PASS_THROUGH_INTERNAL_REDIRECT = 0;\n    HANDLE_INTERNAL_REDIRECT = 1;\n  }\n\n  // The router is capable of shadowing traffic from one cluster to another. The current\n  // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n  // respond before returning the response from the primary cluster. All normal statistics are\n  // collected for the shadow cluster making this feature useful for testing.\n  //\n  // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is\n  // useful for logging. For example, *cluster1* becomes *cluster1-shadow*.\n  //\n  // .. note::\n  //\n  //   Shadowing will not be triggered if the primary cluster does not exist.\n  message RequestMirrorPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteAction.RequestMirrorPolicy\";\n\n    reserved 2;\n\n    reserved \"runtime_key\";\n\n    // Specifies the cluster that requests will be mirrored to. The cluster must\n    // exist in the cluster manager configuration.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // If not specified, all requests to the target cluster will be mirrored.\n    //\n    // If specified, this field takes precedence over the `runtime_key` field and requests must also\n    // fall under the percentage of matches indicated by this field.\n    //\n    // For some fraction N/D, a random number in the range [0,D) is selected. If the\n    // number is <= the value of the numerator N, or if the key is not present, the default\n    // value, the request will be mirrored.\n    core.v3.RuntimeFractionalPercent runtime_fraction = 3;\n\n    // Determines if the trace span should be sampled. Defaults to true.\n    google.protobuf.BoolValue trace_sampled = 4;\n  }\n\n  // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer\n  // <arch_overview_load_balancing_types>`.\n  // [#next-free-field: 7]\n  message HashPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteAction.HashPolicy\";\n\n    message Header {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.Header\";\n\n      // The name of the request header that will be used to obtain the hash\n      // key. If the request header is not present, no hash will be produced.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // If specified, the request header value will be rewritten and used\n      // to produce the hash key.\n      type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2;\n    }\n\n    // Envoy supports two types of cookie affinity:\n    //\n    // 1. Passive. Envoy takes a cookie that's present in the cookies header and\n    //    hashes on its value.\n    //\n    // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL)\n    //    on the first request from the client in its response to the client,\n    //    based on the endpoint the request gets sent to. The client then\n    //    presents this on the next and all subsequent requests. The hash of\n    //    this is sufficient to ensure these requests get sent to the same\n    //    endpoint. The cookie is generated by hashing the source and\n    //    destination ports and addresses so that multiple independent HTTP2\n    //    streams on the same connection will independently receive the same\n    //    cookie, even if they arrive at the Envoy simultaneously.\n    message Cookie {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.Cookie\";\n\n      // The name of the cookie that will be used to obtain the hash key. If the\n      // cookie is not present and ttl below is not set, no hash will be\n      // produced.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If specified, a cookie with the TTL will be generated if the cookie is\n      // not present. If the TTL is present and zero, the generated cookie will\n      // be a session cookie.\n      google.protobuf.Duration ttl = 2;\n\n      // The name of the path for the cookie. If no path is specified here, no path\n      // will be set for the cookie.\n      string path = 3;\n    }\n\n    message ConnectionProperties {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.ConnectionProperties\";\n\n      // Hash on source IP address.\n      bool source_ip = 1;\n    }\n\n    message QueryParameter {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.QueryParameter\";\n\n      // The name of the URL query parameter that will be used to obtain the hash\n      // key. If the parameter is not present, no hash will be produced. Query\n      // parameter names are case-sensitive.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    message FilterState {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.FilterState\";\n\n      // The name of the Object in the per-request filterState, which is an\n      // Envoy::Http::Hashable object. If there is no data associated with the key,\n      // or the stored object is not Envoy::Http::Hashable, no hash will be produced.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // Header hash policy.\n      Header header = 1;\n\n      // Cookie hash policy.\n      Cookie cookie = 2;\n\n      // Connection properties hash policy.\n      ConnectionProperties connection_properties = 3;\n\n      // Query parameter hash policy.\n      QueryParameter query_parameter = 5;\n\n      // Filter state hash policy.\n      FilterState filter_state = 6;\n    }\n\n    // The flag that short-circuits the hash computing. This field provides a\n    // 'fallback' style of configuration: \"if a terminal policy doesn't work,\n    // fallback to rest of the policy list\", it saves time when the terminal\n    // policy works.\n    //\n    // If true, and there is already a hash computed, ignore rest of the\n    // list of hash polices.\n    // For example, if the following hash methods are configured:\n    //\n    //  ========= ========\n    //  specifier terminal\n    //  ========= ========\n    //  Header A  true\n    //  Header B  false\n    //  Header C  false\n    //  ========= ========\n    //\n    // The generateHash process ends if policy \"header A\" generates a hash, as\n    // it's a terminal policy.\n    bool terminal = 4;\n  }\n\n  // Allows enabling and disabling upgrades on a per-route basis.\n  // This overrides any enabled/disabled upgrade filter chain specified in the\n  // HttpConnectionManager\n  // :ref:`upgrade_configs\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.upgrade_configs>`\n  // but does not affect any custom filter chain specified there.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteAction.UpgradeConfig\";\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    message ConnectConfig {\n      // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream.\n      core.v3.ProxyProtocolConfig proxy_protocol_config = 1;\n    }\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type] will be proxied upstream.\n    string upgrade_type = 1\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Determines if upgrades are available on this route. Defaults to true.\n    google.protobuf.BoolValue enabled = 2;\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectConfig connect_config = 3;\n  }\n\n  message MaxStreamDuration {\n    // Specifies the maximum duration allowed for streams on the route. If not specified, the value\n    // from the :ref:`max_stream_duration\n    // <envoy_api_field_config.core.v3.HttpProtocolOptions.max_stream_duration>` field in\n    // :ref:`HttpConnectionManager.common_http_protocol_options\n    // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options>`\n    // is used. If this field is set explicitly to zero, any\n    // HttpConnectionManager max_stream_duration timeout will be disabled for\n    // this route.\n    google.protobuf.Duration max_stream_duration = 1;\n\n    // If present, and the request contains a `grpc-timeout header\n    // <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, use that value as the\n    // *max_stream_duration*, but limit the applied timeout to the maximum value specified here.\n    // If set to 0, the `grpc-timeout` header is used without modification.\n    google.protobuf.Duration grpc_timeout_header_max = 2;\n\n    // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by\n    // subtracting the provided duration from the header. This is useful for allowing Envoy to set\n    // its global timeout to be less than that of the deadline imposed by the calling client, which\n    // makes it more likely that Envoy will handle the timeout instead of having the call canceled\n    // by the client. If, after applying the offset, the resulting timeout is zero or negative,\n    // the stream will timeout immediately.\n    google.protobuf.Duration grpc_timeout_header_offset = 3;\n  }\n\n  reserved 12, 18, 19, 16, 22, 21, 10;\n\n  reserved \"request_mirror_policy\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // HTTP header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist, Envoy will\n    // return a 404 response.\n    //\n    // .. attention::\n    //\n    //   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1\n    //   *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n    string cluster_header = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster. See\n    // :ref:`traffic splitting <config_http_conn_man_route_table_traffic_splitting_split>`\n    // for additional documentation.\n    WeightedCluster weighted_clusters = 3;\n  }\n\n  // The HTTP status code to use when configured cluster is not found.\n  // The default response code is 503 Service Unavailable.\n  ClusterNotFoundResponseCode cluster_not_found_response_code = 20\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n  // in the upstream cluster with metadata matching what's set in this field will be considered\n  // for load balancing. If using :ref:`weighted_clusters\n  // <envoy_api_field_config.route.v3.RouteAction.weighted_clusters>`, metadata will be merged, with values\n  // provided there taking precedence. The filter name should be specified as *envoy.lb*.\n  core.v3.Metadata metadata_match = 4;\n\n  // Indicates that during forwarding, the matched prefix (or path) should be\n  // swapped with this value. This option allows application URLs to be rooted\n  // at a different path from those exposed at the reverse proxy layer. The router filter will\n  // place the original path before rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of *prefix_rewrite* or\n  // :ref:`regex_rewrite <envoy_api_field_config.route.v3.RouteAction.regex_rewrite>`\n  // may be specified.\n  //\n  // .. attention::\n  //\n  //   Pay careful attention to the use of trailing slashes in the\n  //   :ref:`route's match <envoy_api_field_config.route.v3.Route.match>` prefix value.\n  //   Stripping a prefix from a path requires multiple Routes to handle all cases. For example,\n  //   rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single\n  //   :ref:`Route <envoy_api_msg_config.route.v3.Route>`, as shown by the below config entries:\n  //\n  //   .. code-block:: yaml\n  //\n  //     - match:\n  //         prefix: \"/prefix/\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //     - match:\n  //         prefix: \"/prefix\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //\n  //   Having above entries in the config, requests to */prefix* will be stripped to */*, while\n  //   requests to */prefix/etc* will be stripped to */etc*.\n  string prefix_rewrite = 5\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Indicates that during forwarding, portions of the path that match the\n  // pattern should be rewritten, even allowing the substitution of capture\n  // groups from the pattern into the new path as specified by the rewrite\n  // substitution string. This is useful to allow application paths to be\n  // rewritten in a way that is aware of segments with variable content like\n  // identifiers. The router filter will place the original path as it was\n  // before the rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of :ref:`prefix_rewrite <envoy_api_field_config.route.v3.RouteAction.prefix_rewrite>`\n  // or *regex_rewrite* may be specified.\n  //\n  // Examples using Google's `RE2 <https://github.com/google/re2>`_ engine:\n  //\n  // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution\n  //   string of ``\\2/instance/\\1`` would transform ``/service/foo/v1/api``\n  //   into ``/v1/api/instance/foo``.\n  //\n  // * The pattern ``one`` paired with a substitution string of ``two`` would\n  //   transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``.\n  //\n  // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of\n  //   ``\\1two\\2`` would replace only the first occurrence of ``one``,\n  //   transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``.\n  //\n  // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/``\n  //   would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to\n  //   ``/aaa/yyy/bbb``.\n  type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32;\n\n  oneof host_rewrite_specifier {\n    // Indicates that during forwarding, the host header will be swapped with\n    // this value.\n    string host_rewrite_literal = 6\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the hostname of the upstream host chosen by the cluster manager. This\n    // option is applicable only when the destination cluster for a route is of\n    // type *strict_dns* or *logical_dns*. Setting this to true with other cluster\n    // types has no effect.\n    google.protobuf.BoolValue auto_host_rewrite = 7;\n\n    // Indicates that during forwarding, the host header will be swapped with the content of given\n    // downstream or :ref:`custom <config_http_conn_man_headers_custom_request_headers>` header.\n    // If header value is empty, host header is left intact.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the potential security implications of using this option. Provided header\n    //   must come from trusted source.\n    string host_rewrite_header = 29\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the result of the regex substitution executed on path value with query and fragment removed.\n    // This is useful for transitioning variable content between path segment and subdomain.\n    //\n    // For example with the following config:\n    //\n    //   .. code-block:: yaml\n    //\n    //     host_rewrite_path_regex:\n    //       pattern:\n    //         google_re2: {}\n    //         regex: \"^/(.+)/.+$\"\n    //       substitution: \\1\n    //\n    // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`.\n    type.matcher.v3.RegexMatchAndSubstitute host_rewrite_path_regex = 35;\n  }\n\n  // Specifies the upstream timeout for the route. If not specified, the default is 15s. This\n  // spans between the point at which the entire downstream request (i.e. end-of-stream) has been\n  // processed and when the upstream response has been completely processed. A value of 0 will\n  // disable the route's timeout.\n  //\n  // .. note::\n  //\n  //   This timeout includes all retries. See also\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //   :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration timeout = 8;\n\n  // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout,\n  // although the connection manager wide :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout>`\n  // will still apply. A value of 0 will completely disable the route's idle timeout, even if a\n  // connection manager stream idle timeout is configured.\n  //\n  // The idle timeout is distinct to :ref:`timeout\n  // <envoy_api_field_config.route.v3.RouteAction.timeout>`, which provides an upper bound\n  // on the upstream response time; :ref:`idle_timeout\n  // <envoy_api_field_config.route.v3.RouteAction.idle_timeout>` instead bounds the amount\n  // of time the request's stream may be idle.\n  //\n  // After header decoding, the idle timeout will apply on downstream and\n  // upstream request events. Each time an encode/decode event for headers or\n  // data is processed for the stream, the timer will be reset. If the timeout\n  // fires, the stream is terminated with a 408 Request Timeout error code if no\n  // upstream response header has been received, otherwise a stream reset\n  // occurs.\n  google.protobuf.Duration idle_timeout = 24;\n\n  // Indicates that the route has a retry policy. Note that if this is set,\n  // it'll take precedence over the virtual host level retry policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  RetryPolicy retry_policy = 9;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that if this is set, it'll take\n  // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged,\n  // most internal one becomes the enforced policy). :ref:`Retry policy <envoy_api_field_config.route.v3.VirtualHost.retry_policy>`\n  // should not be set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 33;\n\n  // Indicates that the route has request mirroring policies.\n  repeated RequestMirrorPolicy request_mirror_policies = 30;\n\n  // Optionally specifies the :ref:`routing priority <arch_overview_http_routing_priority>`.\n  core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies a set of rate limit configurations that could be applied to the\n  // route.\n  repeated RateLimit rate_limits = 13;\n\n  // Specifies if the rate limit filter should include the virtual host rate\n  // limits. By default, if the route configured rate limits, the virtual host\n  // :ref:`rate_limits <envoy_api_field_config.route.v3.VirtualHost.rate_limits>` are not applied to the\n  // request.\n  //\n  // This field is deprecated. Please use :ref:`vh_rate_limits <envoy_v3_api_field_extensions.filters.http.ratelimit.v3.RateLimitPerRoute.vh_rate_limits>`\n  google.protobuf.BoolValue include_vh_rate_limits = 14 [deprecated = true];\n\n  // Specifies a list of hash policies to use for ring hash load balancing. Each\n  // hash policy is evaluated individually and the combined result is used to\n  // route the request. The method of combination is deterministic such that\n  // identical lists of hash policies will produce the same hash. Since a hash\n  // policy examines specific parts of a request, it can fail to produce a hash\n  // (i.e. if the hashed header is not present). If (and only if) all configured\n  // hash policies fail to generate a hash, no hash will be produced for\n  // the route. In this case, the behavior is the same as if no hash policies\n  // were specified (i.e. the ring hash load balancer will choose a random\n  // backend). If a hash policy has the \"terminal\" attribute set to true, and\n  // there is already a hash generated, the hash is returned immediately,\n  // ignoring the rest of the hash policy list.\n  repeated HashPolicy hash_policy = 15;\n\n  // Indicates that the route has a CORS policy.\n  CorsPolicy cors = 17;\n\n  // Deprecated by :ref:`grpc_timeout_header_max <envoy_api_field_config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max>`\n  // If present, and the request is a gRPC request, use the\n  // `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_,\n  // or its default value (infinity) instead of\n  // :ref:`timeout <envoy_api_field_config.route.v3.RouteAction.timeout>`, but limit the applied timeout\n  // to the maximum value specified here. If configured as 0, the maximum allowed timeout for\n  // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used\n  // and gRPC requests time out like any other requests using\n  // :ref:`timeout <envoy_api_field_config.route.v3.RouteAction.timeout>` or its default.\n  // This can be used to prevent unexpected upstream request timeouts due to potentially long\n  // time gaps between gRPC request and response in gRPC streaming mode.\n  //\n  // .. note::\n  //\n  //    If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes\n  //    precedence over `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, when\n  //    both are present. See also\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //    :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration max_grpc_timeout = 23 [deprecated = true];\n\n  // Deprecated by :ref:`grpc_timeout_header_offset <envoy_api_field_config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset>`.\n  // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting\n  // the provided duration from the header. This is useful in allowing Envoy to set its global\n  // timeout to be less than that of the deadline imposed by the calling client, which makes it more\n  // likely that Envoy will handle the timeout instead of having the call canceled by the client.\n  // The offset will only be applied if the provided grpc_timeout is greater than the offset. This\n  // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning\n  // infinity).\n  google.protobuf.Duration grpc_timeout_offset = 28 [deprecated = true];\n\n  repeated UpgradeConfig upgrade_configs = 25;\n\n  // If present, Envoy will try to follow an upstream redirect response instead of proxying the\n  // response back to the downstream. An upstream redirect response is defined\n  // by :ref:`redirect_response_codes\n  // <envoy_api_field_config.route.v3.InternalRedirectPolicy.redirect_response_codes>`.\n  InternalRedirectPolicy internal_redirect_policy = 34;\n\n  InternalRedirectAction internal_redirect_action = 26 [deprecated = true];\n\n  // An internal redirect is handled, iff the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value, and\n  // :ref:`internal_redirect_action <envoy_api_field_config.route.v3.RouteAction.internal_redirect_action>`\n  // is set to :ref:`HANDLE_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_config.route.v3.RouteAction.InternalRedirectAction.HANDLE_INTERNAL_REDIRECT>`\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or has\n  // :ref:`internal_redirect_action <envoy_api_field_config.route.v3.RouteAction.internal_redirect_action>`\n  // set to\n  // :ref:`PASS_THROUGH_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_config.route.v3.RouteAction.InternalRedirectAction.PASS_THROUGH_INTERNAL_REDIRECT>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 31 [deprecated = true];\n\n  // Indicates that the route has a hedge policy. Note that if this is set,\n  // it'll take precedence over the virtual host level hedge policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  HedgePolicy hedge_policy = 27;\n\n  // Specifies the maximum stream duration for this route.\n  MaxStreamDuration max_stream_duration = 36;\n}\n\n// HTTP retry :ref:`architecture overview <arch_overview_http_routing_retry>`.\n// [#next-free-field: 12]\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RetryPolicy\";\n\n  enum ResetHeaderFormat {\n    SECONDS = 0;\n    UNIX_TIMESTAMP = 1;\n  }\n\n  message RetryPriority {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RetryPolicy.RetryPriority\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryHostPredicate {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RetryPolicy.RetryHostPredicate\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryBackOff {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RetryPolicy.RetryBackOff\";\n\n    // Specifies the base interval between retries. This parameter is required and must be greater\n    // than zero. Values less than 1 ms are rounded up to 1 ms.\n    // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's\n    // back-off algorithm.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // Specifies the maximum interval between retries. This parameter is optional, but must be\n    // greater than or equal to the `base_interval` if set. The default is 10 times the\n    // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion\n    // of Envoy's back-off algorithm.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  message ResetHeader {\n    string name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // A retry back-off strategy that applies when the upstream server rate limits\n  // the request.\n  //\n  // Given this configuration:\n  //\n  // .. code-block:: yaml\n  //\n  //   rate_limited_retry_back_off:\n  //     reset_headers:\n  //     - name: Retry-After\n  //       format: SECONDS\n  //     - name: X-RateLimit-Reset\n  //       format: UNIX_TIMESTAMP\n  //     max_interval: \"300s\"\n  //\n  // The following algorithm will apply:\n  //\n  //  1. If the response contains the header ``Retry-After`` its value must be on\n  //     the form ``120`` (an integer that represents the number of seconds to\n  //     wait before retrying). If so, this value is used as the back-off interval.\n  //  2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its\n  //     value must be on the form ``1595320702`` (an integer that represents the\n  //     point in time at which to retry, as a Unix timestamp in seconds). If so,\n  //     the current time is subtracted from this value and the result is used as\n  //     the back-off interval.\n  //  3. Otherwise, Envoy will use the default\n  //     :ref:`exponential back-off <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_back_off>`\n  //     strategy.\n  //\n  // No matter which format is used, if the resulting back-off interval exceeds\n  // ``max_interval`` it is discarded and the next header in ``reset_headers``\n  // is tried. If a request timeout is configured for the route it will further\n  // limit how long the request will be allowed to run.\n  //\n  // To prevent many clients retrying at the same point in time jitter is added\n  // to the back-off interval, so the resulting interval is decided by taking:\n  // ``random(interval, interval * 1.5)``.\n  //\n  // .. attention::\n  //\n  //   Configuring ``rate_limited_retry_back_off`` will not by itself cause a request\n  //   to be retried. You will still need to configure the right retry policy to match\n  //   the responses from the upstream server.\n  message RateLimitedRetryBackOff {\n    // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``)\n    // to match against the response. Headers are tried in order, and matched case\n    // insensitive. The first header to be parsed successfully is used. If no headers\n    // match the default exponential back-off is used instead.\n    repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}];\n\n    // Specifies the maximum back off interval that Envoy will allow. If a reset\n    // header contains an interval longer than this then it will be discarded and\n    // the next header will be tried. Defaults to 300 seconds.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Specifies the conditions under which retry takes place. These are the same\n  // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and\n  // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`.\n  string retry_on = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1. These are the same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-max-retries`.\n  google.protobuf.UInt32Value num_retries = 2\n      [(udpa.annotations.field_migrate).rename = \"max_retries\"];\n\n  // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The\n  // same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.\n  //\n  // .. note::\n  //\n  //   If left unspecified, Envoy will use the global\n  //   :ref:`route timeout <envoy_api_field_config.route.v3.RouteAction.timeout>` for the request.\n  //   Consequently, when using a :ref:`5xx <config_http_filters_router_x-envoy-retry-on>` based\n  //   retry policy, a request that times out will not be retried as the total timeout budget\n  //   would have been exhausted.\n  google.protobuf.Duration per_try_timeout = 3;\n\n  // Specifies an implementation of a RetryPriority which is used to determine the\n  // distribution of load across priorities used for retries. Refer to\n  // :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more details.\n  RetryPriority retry_priority = 4;\n\n  // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host\n  // for retries. If any of the predicates reject the host, host selection will be reattempted.\n  // Refer to :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more\n  // details.\n  repeated RetryHostPredicate retry_host_predicate = 5;\n\n  // The maximum number of times host selection will be reattempted before giving up, at which\n  // point the host that was last selected will be routed to. If unspecified, this will default to\n  // retrying once.\n  int64 host_selection_retry_max_attempts = 6;\n\n  // HTTP status codes that should trigger a retry in addition to those specified by retry_on.\n  repeated uint32 retriable_status_codes = 7;\n\n  // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the\n  // default base interval is 25 milliseconds or, if set, the current value of the\n  // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times\n  // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries`\n  // describes Envoy's back-off algorithm.\n  RetryBackOff retry_back_off = 8;\n\n  // Specifies parameters that control a retry back-off strategy that is used\n  // when the request is rate limited by the upstream server. The server may\n  // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to\n  // provide feedback to the client on how long to wait before retrying. If\n  // configured, this back-off strategy will be used instead of the\n  // default exponential back off strategy (configured using `retry_back_off`)\n  // whenever a response includes the matching headers.\n  RateLimitedRetryBackOff rate_limited_retry_back_off = 11;\n\n  // HTTP response headers that trigger a retry if present in the response. A retry will be\n  // triggered if any of the header matches match the upstream response headers.\n  // The field is only consulted if 'retriable-headers' retry policy is active.\n  repeated HeaderMatcher retriable_headers = 9;\n\n  // HTTP headers which must be present in the request for retries to be attempted.\n  repeated HeaderMatcher retriable_request_headers = 10;\n}\n\n// HTTP request hedging :ref:`architecture overview <arch_overview_http_routing_hedging>`.\nmessage HedgePolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.HedgePolicy\";\n\n  // Specifies the number of initial requests that should be sent upstream.\n  // Must be at least 1.\n  // Defaults to 1.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies a probability that an additional upstream request should be sent\n  // on top of what is specified by initial_requests.\n  // Defaults to 0.\n  // [#not-implemented-hide:]\n  type.v3.FractionalPercent additional_request_chance = 2;\n\n  // Indicates that a hedged request should be sent when the per-try timeout\n  // is hit. This will only occur if the retry policy also indicates that a\n  // timed out request should be retried.\n  // Once a timed out request is retried due to per try timeout, the router\n  // filter will ensure that it is not retried again even if the returned\n  // response headers would otherwise be retried according the specified\n  // :ref:`RetryPolicy <envoy_api_msg_config.route.v3.RetryPolicy>`.\n  // Defaults to false.\n  bool hedge_on_per_try_timeout = 3;\n}\n\n// [#next-free-field: 9]\nmessage RedirectAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RedirectAction\";\n\n  enum RedirectResponseCode {\n    // Moved Permanently HTTP Status Code - 301.\n    MOVED_PERMANENTLY = 0;\n\n    // Found HTTP Status Code - 302.\n    FOUND = 1;\n\n    // See Other HTTP Status Code - 303.\n    SEE_OTHER = 2;\n\n    // Temporary Redirect HTTP Status Code - 307.\n    TEMPORARY_REDIRECT = 3;\n\n    // Permanent Redirect HTTP Status Code - 308.\n    PERMANENT_REDIRECT = 4;\n  }\n\n  // When the scheme redirection take place, the following rules apply:\n  //  1. If the source URI scheme is `http` and the port is explicitly\n  //     set to `:80`, the port will be removed after the redirection\n  //  2. If the source URI scheme is `https` and the port is explicitly\n  //     set to `:443`, the port will be removed after the redirection\n  oneof scheme_rewrite_specifier {\n    // The scheme portion of the URL will be swapped with \"https\".\n    bool https_redirect = 4;\n\n    // The scheme portion of the URL will be swapped with this value.\n    string scheme_redirect = 7;\n  }\n\n  // The host portion of the URL will be swapped with this value.\n  string host_redirect = 1\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The port value of the URL will be swapped with this value.\n  uint32 port_redirect = 8;\n\n  oneof path_rewrite_specifier {\n    // The path portion of the URL will be swapped with this value.\n    // Please note that query string in path_redirect will override the\n    // request's query string and will not be stripped.\n    //\n    // For example, let's say we have the following routes:\n    //\n    // - match: { path: \"/old-path-1\" }\n    //   redirect: { path_redirect: \"/new-path-1\" }\n    // - match: { path: \"/old-path-2\" }\n    //   redirect: { path_redirect: \"/new-path-2\", strip-query: \"true\" }\n    // - match: { path: \"/old-path-3\" }\n    //   redirect: { path_redirect: \"/new-path-3?foo=1\", strip_query: \"true\" }\n    //\n    // 1. if request uri is \"/old-path-1?bar=1\", users will be redirected to \"/new-path-1?bar=1\"\n    // 2. if request uri is \"/old-path-2?bar=1\", users will be redirected to \"/new-path-2\"\n    // 3. if request uri is \"/old-path-3?bar=1\", users will be redirected to \"/new-path-3?foo=1\"\n    string path_redirect = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during redirection, the matched prefix (or path)\n    // should be swapped with this value. This option allows redirect URLs be dynamically created\n    // based on the request.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the use of trailing slashes as mentioned in\n    //   :ref:`RouteAction's prefix_rewrite <envoy_api_field_config.route.v3.RouteAction.prefix_rewrite>`.\n    string prefix_rewrite = 5\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // The HTTP status code to use in the redirect response. The default response\n  // code is MOVED_PERMANENTLY (301).\n  RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Indicates that during redirection, the query portion of the URL will\n  // be removed. Default value is false.\n  bool strip_query = 6;\n}\n\nmessage DirectResponseAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.route.DirectResponseAction\";\n\n  // Specifies the HTTP response status to be returned.\n  uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}];\n\n  // Specifies the content of the response body. If this setting is omitted,\n  // no body is included in the generated response.\n  //\n  // .. note::\n  //\n  //   Headers can be specified using *response_headers_to_add* in the enclosing\n  //   :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` or\n  //   :ref:`envoy_api_msg_config.route.v3.VirtualHost`.\n  core.v3.DataSource body = 2;\n}\n\nmessage Decorator {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.Decorator\";\n\n  // The operation name associated with the request matched to this route. If tracing is\n  // enabled, this information will be used as the span name reported for this request.\n  //\n  // .. note::\n  //\n  //   For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden\n  //   by the :ref:`x-envoy-decorator-operation\n  //   <config_http_filters_router_x-envoy-decorator-operation>` header.\n  string operation = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether the decorated details should be propagated to the other party. The default is true.\n  google.protobuf.BoolValue propagate = 2;\n}\n\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.Tracing\";\n\n  // Target percentage of requests managed by this HTTP connection manager that will be force\n  // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n  // header is set. This field is a direct analog for the runtime variable\n  // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n  // <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent client_sampling = 1;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be randomly\n  // selected for trace generation, if not requested by the client or not forced. This field is\n  // a direct analog for the runtime variable 'tracing.random_sampling' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent random_sampling = 2;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be traced\n  // after all other sampling checks have been applied (client-directed, force tracing, random\n  // sampling). This field functions as an upper limit on the total configured sampling rate. For\n  // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n  // of client requests with the appropriate headers to be force traced. This field is a direct\n  // analog for the runtime variable 'tracing.global_enabled' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent overall_sampling = 3;\n\n  // A list of custom tags with unique tag name to create tags for the active span.\n  // It will take effect after merging with the :ref:`corresponding configuration\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.custom_tags>`\n  // configured in the HTTP connection manager. If two tags with the same name are configured\n  // each in the HTTP connection manager and the route level, the one configured here takes\n  // priority.\n  repeated type.tracing.v3.CustomTag custom_tags = 4;\n}\n\n// A virtual cluster is a way of specifying a regex matching rule against\n// certain important endpoints such that statistics are generated explicitly for\n// the matched requests. The reason this is useful is that when doing\n// prefix/path matching Envoy does not always know what the application\n// considers to be an endpoint. Thus, it’s impossible for Envoy to generically\n// emit per endpoint statistics. However, often systems have highly critical\n// endpoints that they wish to get “perfect” statistics on. Virtual cluster\n// statistics are perfect in the sense that they are emitted on the downstream\n// side such that they include network level failures.\n//\n// Documentation for :ref:`virtual cluster statistics <config_http_filters_router_vcluster_stats>`.\n//\n// .. note::\n//\n//    Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for\n//    every application endpoint. This is both not easily maintainable and as well the matching and\n//    statistics output are not free.\nmessage VirtualCluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.VirtualCluster\";\n\n  reserved 1, 3;\n\n  reserved \"pattern\", \"method\";\n\n  // Specifies a list of header matchers to use for matching requests. Each specified header must\n  // match. The pseudo-headers `:path` and `:method` can be used to match the request path and\n  // method, respectively.\n  repeated HeaderMatcher headers = 4;\n\n  // Specifies the name of the virtual cluster. The virtual cluster name as well\n  // as the virtual host name are used when emitting statistics. The statistics are emitted by the\n  // router filter and are documented :ref:`here <config_http_filters_router_stats>`.\n  string name = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`.\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RateLimit\";\n\n  // [#next-free-field: 8]\n  message Action {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RateLimit.Action\";\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"source_cluster\", \"<local service cluster>\")\n    //\n    // <local service cluster> is derived from the :option:`--service-cluster` option.\n    message SourceCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.SourceCluster\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"destination_cluster\", \"<routed target cluster>\")\n    //\n    // Once a request matches against a route table rule, a routed cluster is determined by one of\n    // the following :ref:`route table configuration <envoy_api_msg_config.route.v3.RouteConfiguration>`\n    // settings:\n    //\n    // * :ref:`cluster <envoy_api_field_config.route.v3.RouteAction.cluster>` indicates the upstream cluster\n    //   to route to.\n    // * :ref:`weighted_clusters <envoy_api_field_config.route.v3.RouteAction.weighted_clusters>`\n    //   chooses a cluster randomly from a set of clusters with attributed weight.\n    // * :ref:`cluster_header <envoy_api_field_config.route.v3.RouteAction.cluster_header>` indicates which\n    //   header in the request contains the target cluster.\n    message DestinationCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.DestinationCluster\";\n    }\n\n    // The following descriptor entry is appended when a header contains a key that matches the\n    // *header_name*:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<header_value_queried_from_header>\")\n    message RequestHeaders {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.RequestHeaders\";\n\n      // The header name to be queried from the request headers. The header’s\n      // value is used to populate the value of the descriptor entry for the\n      // descriptor_key.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 2 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, Envoy skips the descriptor while calling rate limiting service\n      // when header is not present in the request. By default it skips calling the\n      // rate limiting service if this header is not present in the request.\n      bool skip_if_absent = 3;\n    }\n\n    // The following descriptor entry is appended to the descriptor and is populated using the\n    // trusted address from :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"remote_address\", \"<trusted address from x-forwarded-for>\")\n    message RemoteAddress {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.RemoteAddress\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"generic_key\", \"<descriptor_value>\")\n    message GenericKey {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.GenericKey\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // An optional key to use in the descriptor entry. If not set it defaults\n      // to 'generic_key' as the descriptor key.\n      string descriptor_key = 2;\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"header_match\", \"<descriptor_value>\")\n    message HeaderValueMatch {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.HeaderValueMatch\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, the action will append a descriptor entry when the\n      // request matches the headers. If set to false, the action will append a\n      // descriptor entry when the request does not match the headers. The\n      // default value is true.\n      google.protobuf.BoolValue expect_match = 2;\n\n      // Specifies a set of headers that the rate limit action should match\n      // on. The action will check the request’s headers against all the\n      // specified headers in the config. A match will happen if all the\n      // headers in the config are present in the request with the same values\n      // (or based on presence if the value field is not in the config).\n      repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    // The following descriptor entry is appended when the dynamic metadata contains a key value:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<value_queried_from_metadata>\")\n    message DynamicMetaData {\n      // The key to use in the descriptor entry.\n      string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Metadata struct that defines the key and path to retrieve the string value. A match will\n      // only happen if the value in the dynamic metadata is of type string.\n      type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];\n\n      // An optional value to use if *metadata_key* is empty. If not set and\n      // no value is present under the metadata_key then no descriptor is generated.\n      string default_value = 3;\n    }\n\n    oneof action_specifier {\n      option (validate.required) = true;\n\n      // Rate limit on source cluster.\n      SourceCluster source_cluster = 1;\n\n      // Rate limit on destination cluster.\n      DestinationCluster destination_cluster = 2;\n\n      // Rate limit on request headers.\n      RequestHeaders request_headers = 3;\n\n      // Rate limit on remote address.\n      RemoteAddress remote_address = 4;\n\n      // Rate limit on a generic key.\n      GenericKey generic_key = 5;\n\n      // Rate limit on the existence of request headers.\n      HeaderValueMatch header_value_match = 6;\n\n      // Rate limit on dynamic metadata.\n      DynamicMetaData dynamic_metadata = 7;\n    }\n  }\n\n  message Override {\n    // Fetches the override from the dynamic metadata.\n    message DynamicMetadata {\n      // Metadata struct that defines the key and path to retrieve the struct value.\n      // The value must be a struct containing an integer \"requests_per_unit\" property\n      // and a \"unit\" property with a value parseable to :ref:`RateLimitUnit\n      // enum <envoy_api_enum_type.v3.RateLimitUnit>`\n      type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}];\n    }\n\n    oneof override_specifier {\n      option (validate.required) = true;\n\n      // Limit override from dynamic metadata.\n      DynamicMetadata dynamic_metadata = 1;\n    }\n  }\n\n  // Refers to the stage set in the filter. The rate limit configuration only\n  // applies to filters with the same stage number. The default stage number is\n  // 0.\n  //\n  // .. note::\n  //\n  //   The filter supports a range of 0 - 10 inclusively for stage numbers.\n  google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}];\n\n  // The key to be set in runtime to disable this rate limit configuration.\n  string disable_key = 2;\n\n  // A list of actions that are to be applied for this rate limit configuration.\n  // Order matters as the actions are processed sequentially and the descriptor\n  // is composed by appending descriptor entries in that sequence. If an action\n  // cannot append a descriptor entry, no descriptor is generated for the\n  // configuration. See :ref:`composing actions\n  // <config_http_filters_rate_limit_composing_actions>` for additional documentation.\n  repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional limit override to be appended to the descriptor produced by this\n  // rate limit configuration. If the override value is invalid or cannot be resolved\n  // from metadata, no override is provided. See :ref:`rate limit override\n  // <config_http_filters_rate_limit_rate_limit_override>` for more information.\n  Override limit = 4;\n}\n\n// .. attention::\n//\n//   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host*\n//   header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n//\n// .. attention::\n//\n//   To route on HTTP method, use the special HTTP/2 *:method* header. This works for both\n//   HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g.,\n//\n//   .. code-block:: json\n//\n//     {\n//       \"name\": \":method\",\n//       \"exact_match\": \"POST\"\n//     }\n//\n// .. attention::\n//   In the absence of any header match specifier, match will default to :ref:`present_match\n//   <envoy_api_field_config.route.v3.HeaderMatcher.present_match>`. i.e, a request that has the :ref:`name\n//   <envoy_api_field_config.route.v3.HeaderMatcher.name>` header will match, regardless of the header's\n//   value.\n//\n//  [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.]\n// [#next-free-field: 13]\nmessage HeaderMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.HeaderMatcher\";\n\n  reserved 2, 3, 5;\n\n  reserved \"regex_match\";\n\n  // Specifies the name of the header in the request.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Specifies how the header match will be performed to route the request.\n  oneof header_match_specifier {\n    // If specified, header match will be performed based on the value of the header.\n    string exact_match = 4;\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex.\n    type.matcher.v3.RegexMatcher safe_regex_match = 11;\n\n    // If specified, header match will be performed based on range.\n    // The rule will match if the request header value is within this range.\n    // The entire request header value must represent an integer in base 10 notation: consisting of\n    // an optional plus or minus sign followed by a sequence of digits. The rule will not match if\n    // the header value does not represent an integer. Match will fail for empty values, floating\n    // point numbers or if only a subsequence of the header value is an integer.\n    //\n    // Examples:\n    //\n    // * For range [-10,0), route will match for header value -1, but not for 0, \"somestring\", 10.9,\n    //   \"-1somestring\"\n    type.v3.Int64Range range_match = 6;\n\n    // If specified, header match will be performed based on whether the header is in the\n    // request.\n    bool present_match = 7;\n\n    // If specified, header match will be performed based on the prefix of the header value.\n    // Note: empty prefix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.\n    string prefix_match = 9 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on the suffix of the header value.\n    // Note: empty suffix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.\n    string suffix_match = 10 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on whether the header value contains\n    // the given value or not.\n    // Note: empty contains match is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*.\n    string contains_match = 12 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // If specified, the match result will be inverted before checking. Defaults to false.\n  //\n  // Examples:\n  //\n  // * The regex ``\\d{3}`` does not match the value *1234*, so it will match when inverted.\n  // * The range [-10,0) will match the value -1, so it will not match when inverted.\n  bool invert_match = 8;\n}\n\n// Query parameter matching treats the query string of a request's :path header\n// as an ampersand-separated list of keys and/or key=value elements.\n// [#next-free-field: 7]\nmessage QueryParameterMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.route.QueryParameterMatcher\";\n\n  reserved 3, 4;\n\n  reserved \"value\", \"regex\";\n\n  // Specifies the name of a key that must be present in the requested\n  // *path*'s query string.\n  string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}];\n\n  oneof query_parameter_match_specifier {\n    // Specifies whether a query parameter value should match against a string.\n    type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}];\n\n    // Specifies whether a query parameter should be present.\n    bool present_match = 6;\n  }\n}\n\n// HTTP Internal Redirect :ref:`architecture overview <arch_overview_internal_redirects>`.\nmessage InternalRedirectPolicy {\n  // An internal redirect is not handled, unless the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value.\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy\n  // <envoy_api_field_config.route.v3.RouteAction.internal_redirect_policy>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 1;\n\n  // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified,\n  // only 302 will be treated as internal redirect.\n  // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored.\n  repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}];\n\n  // Specifies a list of predicates that are queried when an upstream response is deemed\n  // to trigger an internal redirect by all other criteria. Any predicate in the list can reject\n  // the redirect, causing the response to be proxied to downstream.\n  repeated core.v3.TypedExtensionConfig predicates = 3;\n\n  // Allow internal redirect to follow a target URI with a different scheme than the value of\n  // x-forwarded-proto. The default is false.\n  bool allow_cross_scheme_redirect = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/route/v3/scoped_route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v3\";\noption java_outer_classname = \"ScopedRouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP scoped routing configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// Specifies a routing scope, which associates a\n// :ref:`Key<envoy_api_msg_config.route.v3.ScopedRouteConfiguration.Key>` to a\n// :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name).\n//\n// The HTTP connection manager builds up a table consisting of these Key to\n// RouteConfiguration mappings, and looks up the RouteConfiguration to use per\n// request according to the algorithm specified in the\n// :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder>`\n// assigned to the HttpConnectionManager.\n//\n// For example, with the following configurations (in YAML):\n//\n// HttpConnectionManager config:\n//\n// .. code::\n//\n//   ...\n//   scoped_routes:\n//     name: foo-scoped-routes\n//     scope_key_builder:\n//       fragments:\n//         - header_value_extractor:\n//             name: X-Route-Selector\n//             element_separator: ,\n//             element:\n//               separator: =\n//               key: vip\n//\n// ScopedRouteConfiguration resources (specified statically via\n// :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list>`\n// or obtained dynamically via SRDS):\n//\n// .. code::\n//\n//  (1)\n//   name: route-scope1\n//   route_configuration_name: route-config1\n//   key:\n//      fragments:\n//        - string_key: 172.10.10.20\n//\n//  (2)\n//   name: route-scope2\n//   route_configuration_name: route-config2\n//   key:\n//     fragments:\n//       - string_key: 172.20.20.30\n//\n// A request from a client such as:\n//\n// .. code::\n//\n//     GET / HTTP/1.1\n//     Host: foo.com\n//     X-Route-Selector: vip=172.10.10.20\n//\n// would result in the routing table defined by the `route-config1`\n// RouteConfiguration being assigned to the HTTP request/stream.\n//\nmessage ScopedRouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.ScopedRouteConfiguration\";\n\n  // Specifies a key which is matched against the output of the\n  // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder>`\n  // specified in the HttpConnectionManager. The matching is done per HTTP\n  // request and is dependent on the order of the fragments contained in the\n  // Key.\n  message Key {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.ScopedRouteConfiguration.Key\";\n\n    message Fragment {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.ScopedRouteConfiguration.Key.Fragment\";\n\n      oneof type {\n        option (validate.required) = true;\n\n        // A string to match against.\n        string string_key = 1;\n      }\n    }\n\n    // The ordered set of fragments to match against. The order must match the\n    // fragments in the corresponding\n    // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder>`.\n    repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Whether the RouteConfiguration should be loaded on demand.\n  bool on_demand = 4;\n\n  // The name assigned to the routing scope.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an\n  // RDS server to fetch the :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` associated\n  // with this scope.\n  string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The key to match against.\n  Key key = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/route/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/route/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP route configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// [#next-free-field: 11]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.RouteConfiguration\";\n\n  // The name of the route configuration. For example, it might match\n  // :ref:`route_config_name\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.Rds.route_config_name>` in\n  // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.Rds`.\n  string name = 1;\n\n  // An array of virtual hosts that make up the route table.\n  repeated VirtualHost virtual_hosts = 2;\n\n  // An array of virtual hosts will be dynamically loaded via the VHDS API.\n  // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used\n  // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for\n  // on-demand discovery of virtual hosts. The contents of these two fields will be merged to\n  // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration\n  // taking precedence.\n  Vhds vhds = 9;\n\n  // Optionally specifies a list of HTTP headers that the connection manager\n  // will consider to be internal only. If they are found on external requests they will be cleaned\n  // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information.\n  repeated string internal_only_headers = 3 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each response that\n  // the connection manager encodes. Headers specified at this level are applied\n  // after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption response_headers_to_add = 4\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // that the connection manager encodes.\n  repeated string response_headers_to_remove = 5 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // routed by the HTTP connection manager. Headers specified at this level are\n  // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption request_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // routed by the HTTP connection manager.\n  repeated string request_headers_to_remove = 8 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // By default, headers that should be added/removed are evaluated from most to least specific:\n  //\n  // * route level\n  // * virtual host level\n  // * connection manager level\n  //\n  // To allow setting overrides at the route or virtual host level, this order can be reversed\n  // by setting this option to true. Defaults to false.\n  //\n  // [#next-major-version: In the v3 API, this will default to true.]\n  bool most_specific_header_mutations_wins = 10;\n\n  // An optional boolean that specifies whether the clusters that the route\n  // table refers to will be validated by the cluster manager. If set to true\n  // and a route refers to a non-existent cluster, the route table will not\n  // load. If set to false and a route refers to a non-existent cluster, the\n  // route table will load and the router filter will return a 404 if the route\n  // is selected at runtime. This setting defaults to true if the route table\n  // is statically defined via the :ref:`route_config\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.route_config>`\n  // option. This setting default to false if the route table is loaded dynamically via the\n  // :ref:`rds\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.rds>`\n  // option. Users may wish to override the default behavior in certain cases (for example when\n  // using CDS with a static route table).\n  google.protobuf.BoolValue validate_clusters = 7;\n}\n\nmessage Vhds {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Vhds\";\n\n  // Configuration source specifier for VHDS.\n  core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/route/v4alpha/route_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/proxy_protocol.proto\";\nimport \"envoy/type/matcher/v4alpha/regex.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/metadata/v3/metadata.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v4alpha\";\noption java_outer_classname = \"RouteComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP route components]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// The top level element in the routing configuration is a virtual host. Each virtual host has\n// a logical name as well as a set of domains that get routed to it based on the incoming request's\n// host header. This allows a single listener to service multiple top level domain path trees. Once\n// a virtual host is selected based on the domain, the routes are processed in order to see which\n// upstream cluster to route to or whether to perform a redirect.\n// [#next-free-field: 21]\nmessage VirtualHost {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.VirtualHost\";\n\n  enum TlsRequirementType {\n    // No TLS requirement for the virtual host.\n    NONE = 0;\n\n    // External requests must use TLS. If a request is external and it is not\n    // using TLS, a 301 redirect will be sent telling the client to use HTTPS.\n    EXTERNAL_ONLY = 1;\n\n    // All requests must use TLS. If a request is not using TLS, a 301 redirect\n    // will be sent telling the client to use HTTPS.\n    ALL = 2;\n  }\n\n  reserved 9, 12;\n\n  reserved \"per_filter_config\";\n\n  // The logical name of the virtual host. This is used when emitting certain\n  // statistics but is not relevant for routing.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A list of domains (host/authority header) that will be matched to this\n  // virtual host. Wildcard hosts are supported in the suffix or prefix form.\n  //\n  // Domain search order:\n  //  1. Exact domain names: ``www.foo.com``.\n  //  2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``.\n  //  3. Prefix domain wildcards: ``foo.*`` or ``foo-*``.\n  //  4. Special wildcard ``*`` matching any domain.\n  //\n  // .. note::\n  //\n  //   The wildcard will not match the empty string.\n  //   e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``.\n  //   The longest wildcards match first.\n  //   Only a single virtual host in the entire route configuration can match on ``*``. A domain\n  //   must be unique across all virtual hosts or the config will fail to load.\n  //\n  // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE.\n  repeated string domains = 2 [(validate.rules).repeated = {\n    min_items: 1\n    items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}}\n  }];\n\n  // The list of routes that will be matched, in order, for incoming requests.\n  // The first route that matches will be used.\n  repeated Route routes = 3;\n\n  // Specifies the type of TLS enforcement the virtual host expects. If this option is not\n  // specified, there is no TLS requirement for the virtual host.\n  TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // A list of virtual clusters defined for this virtual host. Virtual clusters\n  // are used for additional statistics gathering.\n  repeated VirtualCluster virtual_clusters = 5;\n\n  // Specifies a set of rate limit configurations that will be applied to the\n  // virtual host.\n  repeated RateLimit rate_limits = 6;\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption request_headers_to_add = 7\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // handled by this virtual host.\n  repeated string request_headers_to_remove = 13 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a list of HTTP headers that should be added to each response\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // handled by this virtual host.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Indicates that the virtual host has a CORS policy.\n  CorsPolicy cors = 8;\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 15;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the upstream request. Setting this option will cause it to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the upstream\n  // will see the attempt count as perceived by the second Envoy. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v4alpha.Router.suppress_envoy_headers>` flag.\n  //\n  // [#next-major-version: rename to include_attempt_count_in_request.]\n  bool include_request_attempt_count = 14;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the downstream response. Setting this option will cause the router to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the downstream\n  // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v4alpha.Router.suppress_envoy_headers>` flag.\n  bool include_attempt_count_in_response = 19;\n\n  // Indicates the retry policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  RetryPolicy retry_policy = 16;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that setting a route level entry\n  // will take precedence over this config and it'll be treated independently (e.g.: values are not\n  // inherited). :ref:`Retry policy <envoy_api_field_config.route.v4alpha.VirtualHost.retry_policy>` should not be\n  // set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 20;\n\n  // Indicates the hedge policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  HedgePolicy hedge_policy = 17;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum\n  // value of this and the listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18;\n}\n\n// A filter-defined action type.\nmessage FilterAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.FilterAction\";\n\n  google.protobuf.Any action = 1;\n}\n\n// A route is both a specification of how to match a request as well as an indication of what to do\n// next (e.g., redirect, forward, rewrite, etc.).\n//\n// .. attention::\n//\n//   Envoy supports routing on HTTP method via :ref:`header matching\n//   <envoy_api_msg_config.route.v4alpha.HeaderMatcher>`.\n// [#next-free-field: 18]\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Route\";\n\n  reserved 6, 8;\n\n  reserved \"per_filter_config\";\n\n  // Name for the route.\n  string name = 14;\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  oneof action {\n    option (validate.required) = true;\n\n    // Route request to some upstream cluster.\n    RouteAction route = 2;\n\n    // Return a redirect.\n    RedirectAction redirect = 3;\n\n    // Return an arbitrary HTTP response directly, without proxying.\n    DirectResponseAction direct_response = 7;\n\n    // [#not-implemented-hide:]\n    // If true, a filter will define the action (e.g., it could dynamically generate the\n    // RouteAction).\n    // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when\n    // implemented]\n    FilterAction filter_action = 17;\n  }\n\n  // The Metadata field can be used to provide additional information\n  // about the route. It can be used for configuration, stats, and logging.\n  // The metadata should go under the filter namespace that will need it.\n  // For instance, if the metadata is intended for the Router filter,\n  // the filter name should be specified as *envoy.filters.http.router*.\n  core.v4alpha.Metadata metadata = 4;\n\n  // Decorator for the matched route.\n  Decorator decorator = 5;\n\n  // The typed_per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 13;\n\n  // Specifies a set of headers that will be added to requests matching this\n  // route. Headers specified at this level are applied before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption request_headers_to_add = 9\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // matching this route.\n  repeated string request_headers_to_remove = 12 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a set of headers that will be added to responses to requests\n  // matching this route. Headers specified at this level are applied before\n  // headers from the enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on\n  // :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // to requests matching this route.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Presence of the object defines whether the connection manager's tracing configuration\n  // is overridden by this route specific instance.\n  Tracing tracing = 15;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set, the bytes actually buffered will be the minimum value of this and the\n  // listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16;\n}\n\n// Compared to the :ref:`cluster <envoy_api_field_config.route.v4alpha.RouteAction.cluster>` field that specifies a\n// single upstream cluster as the target of a request, the :ref:`weighted_clusters\n// <envoy_api_field_config.route.v4alpha.RouteAction.weighted_clusters>` option allows for specification of\n// multiple upstream clusters along with weights that indicate the percentage of\n// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the\n// weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.WeightedCluster\";\n\n  // [#next-free-field: 11]\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.WeightedCluster.ClusterWeight\";\n\n    reserved 7, 8;\n\n    reserved \"per_filter_config\";\n\n    // Name of the upstream cluster. The cluster must exist in the\n    // :ref:`cluster manager configuration <config_cluster_manager>`.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // An integer between 0 and :ref:`total_weight\n    // <envoy_api_field_config.route.v4alpha.WeightedCluster.total_weight>`. When a request matches the route,\n    // the choice of an upstream cluster is determined by its weight. The sum of weights across all\n    // entries in the clusters array must add up to the total_weight, which defaults to 100.\n    google.protobuf.UInt32Value weight = 2;\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field will be considered for\n    // load balancing. Note that this will be merged with what's provided in\n    // :ref:`RouteAction.metadata_match <envoy_api_field_config.route.v4alpha.RouteAction.metadata_match>`, with\n    // values here taking precedence. The filter name should be specified as *envoy.lb*.\n    core.v4alpha.Metadata metadata_match = 3;\n\n    // Specifies a list of headers to be added to requests when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v4alpha.HeaderValueOption request_headers_to_add = 4\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request when\n    // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    repeated string request_headers_to_remove = 9 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of headers to be added to responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v4alpha.HeaderValueOption response_headers_to_add = 5\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of headers to be removed from responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    repeated string response_headers_to_remove = 6 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Any> typed_per_filter_config = 10;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Specifies the total weight across all clusters. The sum of all cluster weights must equal this\n  // value, which must be greater than 0. Defaults to 100.\n  google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies the runtime key prefix that should be used to construct the\n  // runtime keys associated with each cluster. When the *runtime_key_prefix* is\n  // specified, the router will look for weights associated with each upstream\n  // cluster under the key *runtime_key_prefix* + \".\" + *cluster[i].name* where\n  // *cluster[i]* denotes an entry in the clusters array field. If the runtime\n  // key for the cluster does not exist, the value specified in the\n  // configuration file will be used as the default weight. See the :ref:`runtime documentation\n  // <operations_runtime>` for how key names map to the underlying implementation.\n  string runtime_key_prefix = 2;\n}\n\n// [#next-free-field: 13]\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RouteMatch\";\n\n  message GrpcRouteMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions\";\n  }\n\n  message TlsContextMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteMatch.TlsContextMatchOptions\";\n\n    // If specified, the route will match against whether or not a certificate is presented.\n    // If not specified, certificate presentation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue presented = 1;\n\n    // If specified, the route will match against whether or not a certificate is validated.\n    // If not specified, certificate validation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue validated = 2;\n  }\n\n  // An extensible message for matching CONNECT requests.\n  message ConnectMatcher {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteMatch.ConnectMatcher\";\n  }\n\n  reserved 5, 3;\n\n  reserved \"regex\";\n\n  oneof path_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route is a prefix rule meaning that the prefix must\n    // match the beginning of the *:path* header.\n    string prefix = 1;\n\n    // If specified, the route is an exact path rule meaning that the path must\n    // exactly match the *:path* header once the query string is removed.\n    string path = 2;\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex.\n    //\n    // [#next-major-version: In the v3 API we should redo how path specification works such\n    // that we utilize StringMatcher, and additionally have consistent options around whether we\n    // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive\n    // to deprecate the existing options. We should even consider whether we want to do away with\n    // path_specifier entirely and just rely on a set of header matchers which can already match\n    // on :path, etc. The issue with that is it is unclear how to generically deal with query string\n    // stripping. This needs more thought.]\n    type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];\n\n    // If this is used as the matcher, the matcher will only match CONNECT requests.\n    // Note that this will not match HTTP/2 upgrade-style CONNECT requests\n    // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style\n    // upgrades.\n    // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,\n    // where Extended CONNECT requests may have a path, the path matchers will work if\n    // there is a path present.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectMatcher connect_matcher = 12;\n  }\n\n  // Indicates that prefix/path matching should be case sensitive. The default\n  // is true.\n  google.protobuf.BoolValue case_sensitive = 4;\n\n  // Indicates that the route should additionally match on a runtime key. Every time the route\n  // is considered for a match, it must also fall under the percentage of matches indicated by\n  // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the\n  // number is <= the value of the numerator N, or if the key is not present, the default\n  // value, the router continues to evaluate the remaining match criteria. A runtime_fraction\n  // route configuration can be used to roll out route changes in a gradual manner without full\n  // code/config deploys. Refer to the :ref:`traffic shifting\n  // <config_http_conn_man_route_table_traffic_splitting_shift>` docs for additional documentation.\n  //\n  // .. note::\n  //\n  //    Parsing this field is implemented such that the runtime key's data may be represented\n  //    as a FractionalPercent proto represented as JSON/YAML and may also be represented as an\n  //    integer with the assumption that the value is an integral percentage out of 100. For\n  //    instance, a runtime key lookup returning the value \"42\" would parse as a FractionalPercent\n  //    whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics.\n  core.v4alpha.RuntimeFractionalPercent runtime_fraction = 9;\n\n  // Specifies a set of headers that the route should match on. The router will\n  // check the request’s headers against all the specified headers in the route\n  // config. A match will happen if all the headers in the route are present in\n  // the request with the same values (or based on presence if the value field\n  // is not in the config).\n  repeated HeaderMatcher headers = 6;\n\n  // Specifies a set of URL query parameters on which the route should\n  // match. The router will check the query string from the *path* header\n  // against all the specified query parameters. If the number of specified\n  // query parameters is nonzero, they all must match the *path* header's\n  // query string for a match to occur.\n  repeated QueryParameterMatcher query_parameters = 7;\n\n  // If specified, only gRPC requests will be matched. The router will check\n  // that the content-type header has a application/grpc or one of the various\n  // application/grpc+ values.\n  GrpcRouteMatchOptions grpc = 8;\n\n  // If specified, the client tls context will be matched against the defined\n  // match options.\n  //\n  // [#next-major-version: unify with RBAC]\n  TlsContextMatchOptions tls_context = 11;\n}\n\n// [#next-free-field: 12]\nmessage CorsPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.CorsPolicy\";\n\n  reserved 1, 8, 7;\n\n  reserved \"allow_origin\", \"allow_origin_regex\", \"enabled\";\n\n  // Specifies string patterns that match allowed origins. An origin is allowed if any of the\n  // string matchers match.\n  repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11;\n\n  // Specifies the content for the *access-control-allow-methods* header.\n  string allow_methods = 2;\n\n  // Specifies the content for the *access-control-allow-headers* header.\n  string allow_headers = 3;\n\n  // Specifies the content for the *access-control-expose-headers* header.\n  string expose_headers = 4;\n\n  // Specifies the content for the *access-control-max-age* header.\n  string max_age = 5;\n\n  // Specifies whether the resource allows credentials.\n  google.protobuf.BoolValue allow_credentials = 6;\n\n  oneof enabled_specifier {\n    // Specifies the % of requests for which the CORS filter is enabled.\n    //\n    // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS\n    // filter will be enabled for 100% of the requests.\n    //\n    // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is\n    // specified, Envoy will lookup the runtime key to get the percentage of requests to filter.\n    core.v4alpha.RuntimeFractionalPercent filter_enabled = 9;\n  }\n\n  // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not\n  // enforced.\n  //\n  // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those\n  // fields have to explicitly disable the filter in order for this setting to take effect.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* to determine if it's valid but will not enforce any policies.\n  core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10;\n}\n\n// [#next-free-field: 37]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RouteAction\";\n\n  enum ClusterNotFoundResponseCode {\n    // HTTP status code - 503 Service Unavailable.\n    SERVICE_UNAVAILABLE = 0;\n\n    // HTTP status code - 404 Not Found.\n    NOT_FOUND = 1;\n  }\n\n  // The router is capable of shadowing traffic from one cluster to another. The current\n  // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n  // respond before returning the response from the primary cluster. All normal statistics are\n  // collected for the shadow cluster making this feature useful for testing.\n  //\n  // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is\n  // useful for logging. For example, *cluster1* becomes *cluster1-shadow*.\n  //\n  // .. note::\n  //\n  //   Shadowing will not be triggered if the primary cluster does not exist.\n  message RequestMirrorPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.RequestMirrorPolicy\";\n\n    reserved 2;\n\n    reserved \"runtime_key\";\n\n    // Specifies the cluster that requests will be mirrored to. The cluster must\n    // exist in the cluster manager configuration.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // If not specified, all requests to the target cluster will be mirrored.\n    //\n    // If specified, this field takes precedence over the `runtime_key` field and requests must also\n    // fall under the percentage of matches indicated by this field.\n    //\n    // For some fraction N/D, a random number in the range [0,D) is selected. If the\n    // number is <= the value of the numerator N, or if the key is not present, the default\n    // value, the request will be mirrored.\n    core.v4alpha.RuntimeFractionalPercent runtime_fraction = 3;\n\n    // Determines if the trace span should be sampled. Defaults to true.\n    google.protobuf.BoolValue trace_sampled = 4;\n  }\n\n  // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer\n  // <arch_overview_load_balancing_types>`.\n  // [#next-free-field: 7]\n  message HashPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.HashPolicy\";\n\n    message Header {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.Header\";\n\n      // The name of the request header that will be used to obtain the hash\n      // key. If the request header is not present, no hash will be produced.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // If specified, the request header value will be rewritten and used\n      // to produce the hash key.\n      type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2;\n    }\n\n    // Envoy supports two types of cookie affinity:\n    //\n    // 1. Passive. Envoy takes a cookie that's present in the cookies header and\n    //    hashes on its value.\n    //\n    // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL)\n    //    on the first request from the client in its response to the client,\n    //    based on the endpoint the request gets sent to. The client then\n    //    presents this on the next and all subsequent requests. The hash of\n    //    this is sufficient to ensure these requests get sent to the same\n    //    endpoint. The cookie is generated by hashing the source and\n    //    destination ports and addresses so that multiple independent HTTP2\n    //    streams on the same connection will independently receive the same\n    //    cookie, even if they arrive at the Envoy simultaneously.\n    message Cookie {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.Cookie\";\n\n      // The name of the cookie that will be used to obtain the hash key. If the\n      // cookie is not present and ttl below is not set, no hash will be\n      // produced.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If specified, a cookie with the TTL will be generated if the cookie is\n      // not present. If the TTL is present and zero, the generated cookie will\n      // be a session cookie.\n      google.protobuf.Duration ttl = 2;\n\n      // The name of the path for the cookie. If no path is specified here, no path\n      // will be set for the cookie.\n      string path = 3;\n    }\n\n    message ConnectionProperties {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties\";\n\n      // Hash on source IP address.\n      bool source_ip = 1;\n    }\n\n    message QueryParameter {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter\";\n\n      // The name of the URL query parameter that will be used to obtain the hash\n      // key. If the parameter is not present, no hash will be produced. Query\n      // parameter names are case-sensitive.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    message FilterState {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.FilterState\";\n\n      // The name of the Object in the per-request filterState, which is an\n      // Envoy::Http::Hashable object. If there is no data associated with the key,\n      // or the stored object is not Envoy::Http::Hashable, no hash will be produced.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // Header hash policy.\n      Header header = 1;\n\n      // Cookie hash policy.\n      Cookie cookie = 2;\n\n      // Connection properties hash policy.\n      ConnectionProperties connection_properties = 3;\n\n      // Query parameter hash policy.\n      QueryParameter query_parameter = 5;\n\n      // Filter state hash policy.\n      FilterState filter_state = 6;\n    }\n\n    // The flag that short-circuits the hash computing. This field provides a\n    // 'fallback' style of configuration: \"if a terminal policy doesn't work,\n    // fallback to rest of the policy list\", it saves time when the terminal\n    // policy works.\n    //\n    // If true, and there is already a hash computed, ignore rest of the\n    // list of hash polices.\n    // For example, if the following hash methods are configured:\n    //\n    //  ========= ========\n    //  specifier terminal\n    //  ========= ========\n    //  Header A  true\n    //  Header B  false\n    //  Header C  false\n    //  ========= ========\n    //\n    // The generateHash process ends if policy \"header A\" generates a hash, as\n    // it's a terminal policy.\n    bool terminal = 4;\n  }\n\n  // Allows enabling and disabling upgrades on a per-route basis.\n  // This overrides any enabled/disabled upgrade filter chain specified in the\n  // HttpConnectionManager\n  // :ref:`upgrade_configs\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.upgrade_configs>`\n  // but does not affect any custom filter chain specified there.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.UpgradeConfig\";\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    message ConnectConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig\";\n\n      // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream.\n      core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1;\n    }\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type] will be proxied upstream.\n    string upgrade_type = 1\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Determines if upgrades are available on this route. Defaults to true.\n    google.protobuf.BoolValue enabled = 2;\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectConfig connect_config = 3;\n  }\n\n  message MaxStreamDuration {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.MaxStreamDuration\";\n\n    // Specifies the maximum duration allowed for streams on the route. If not specified, the value\n    // from the :ref:`max_stream_duration\n    // <envoy_api_field_config.core.v4alpha.HttpProtocolOptions.max_stream_duration>` field in\n    // :ref:`HttpConnectionManager.common_http_protocol_options\n    // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.common_http_protocol_options>`\n    // is used. If this field is set explicitly to zero, any\n    // HttpConnectionManager max_stream_duration timeout will be disabled for\n    // this route.\n    google.protobuf.Duration max_stream_duration = 1;\n\n    // If present, and the request contains a `grpc-timeout header\n    // <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, use that value as the\n    // *max_stream_duration*, but limit the applied timeout to the maximum value specified here.\n    // If set to 0, the `grpc-timeout` header is used without modification.\n    google.protobuf.Duration grpc_timeout_header_max = 2;\n\n    // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by\n    // subtracting the provided duration from the header. This is useful for allowing Envoy to set\n    // its global timeout to be less than that of the deadline imposed by the calling client, which\n    // makes it more likely that Envoy will handle the timeout instead of having the call canceled\n    // by the client. If, after applying the offset, the resulting timeout is zero or negative,\n    // the stream will timeout immediately.\n    google.protobuf.Duration grpc_timeout_header_offset = 3;\n  }\n\n  reserved 12, 18, 19, 16, 22, 21, 10, 14, 23, 28, 26, 31;\n\n  reserved \"request_mirror_policy\", \"include_vh_rate_limits\", \"max_grpc_timeout\",\n      \"grpc_timeout_offset\", \"internal_redirect_action\", \"max_internal_redirects\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // HTTP header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist, Envoy will\n    // return a 404 response.\n    //\n    // .. attention::\n    //\n    //   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1\n    //   *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n    string cluster_header = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster. See\n    // :ref:`traffic splitting <config_http_conn_man_route_table_traffic_splitting_split>`\n    // for additional documentation.\n    WeightedCluster weighted_clusters = 3;\n  }\n\n  // The HTTP status code to use when configured cluster is not found.\n  // The default response code is 503 Service Unavailable.\n  ClusterNotFoundResponseCode cluster_not_found_response_code = 20\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n  // in the upstream cluster with metadata matching what's set in this field will be considered\n  // for load balancing. If using :ref:`weighted_clusters\n  // <envoy_api_field_config.route.v4alpha.RouteAction.weighted_clusters>`, metadata will be merged, with values\n  // provided there taking precedence. The filter name should be specified as *envoy.lb*.\n  core.v4alpha.Metadata metadata_match = 4;\n\n  // Indicates that during forwarding, the matched prefix (or path) should be\n  // swapped with this value. This option allows application URLs to be rooted\n  // at a different path from those exposed at the reverse proxy layer. The router filter will\n  // place the original path before rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of *prefix_rewrite* or\n  // :ref:`regex_rewrite <envoy_api_field_config.route.v4alpha.RouteAction.regex_rewrite>`\n  // may be specified.\n  //\n  // .. attention::\n  //\n  //   Pay careful attention to the use of trailing slashes in the\n  //   :ref:`route's match <envoy_api_field_config.route.v4alpha.Route.match>` prefix value.\n  //   Stripping a prefix from a path requires multiple Routes to handle all cases. For example,\n  //   rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single\n  //   :ref:`Route <envoy_api_msg_config.route.v4alpha.Route>`, as shown by the below config entries:\n  //\n  //   .. code-block:: yaml\n  //\n  //     - match:\n  //         prefix: \"/prefix/\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //     - match:\n  //         prefix: \"/prefix\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //\n  //   Having above entries in the config, requests to */prefix* will be stripped to */*, while\n  //   requests to */prefix/etc* will be stripped to */etc*.\n  string prefix_rewrite = 5\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Indicates that during forwarding, portions of the path that match the\n  // pattern should be rewritten, even allowing the substitution of capture\n  // groups from the pattern into the new path as specified by the rewrite\n  // substitution string. This is useful to allow application paths to be\n  // rewritten in a way that is aware of segments with variable content like\n  // identifiers. The router filter will place the original path as it was\n  // before the rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of :ref:`prefix_rewrite <envoy_api_field_config.route.v4alpha.RouteAction.prefix_rewrite>`\n  // or *regex_rewrite* may be specified.\n  //\n  // Examples using Google's `RE2 <https://github.com/google/re2>`_ engine:\n  //\n  // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution\n  //   string of ``\\2/instance/\\1`` would transform ``/service/foo/v1/api``\n  //   into ``/v1/api/instance/foo``.\n  //\n  // * The pattern ``one`` paired with a substitution string of ``two`` would\n  //   transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``.\n  //\n  // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of\n  //   ``\\1two\\2`` would replace only the first occurrence of ``one``,\n  //   transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``.\n  //\n  // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/``\n  //   would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to\n  //   ``/aaa/yyy/bbb``.\n  type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32;\n\n  oneof host_rewrite_specifier {\n    // Indicates that during forwarding, the host header will be swapped with\n    // this value.\n    string host_rewrite_literal = 6\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the hostname of the upstream host chosen by the cluster manager. This\n    // option is applicable only when the destination cluster for a route is of\n    // type *strict_dns* or *logical_dns*. Setting this to true with other cluster\n    // types has no effect.\n    google.protobuf.BoolValue auto_host_rewrite = 7;\n\n    // Indicates that during forwarding, the host header will be swapped with the content of given\n    // downstream or :ref:`custom <config_http_conn_man_headers_custom_request_headers>` header.\n    // If header value is empty, host header is left intact.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the potential security implications of using this option. Provided header\n    //   must come from trusted source.\n    string host_rewrite_header = 29\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the result of the regex substitution executed on path value with query and fragment removed.\n    // This is useful for transitioning variable content between path segment and subdomain.\n    //\n    // For example with the following config:\n    //\n    //   .. code-block:: yaml\n    //\n    //     host_rewrite_path_regex:\n    //       pattern:\n    //         google_re2: {}\n    //         regex: \"^/(.+)/.+$\"\n    //       substitution: \\1\n    //\n    // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`.\n    type.matcher.v4alpha.RegexMatchAndSubstitute host_rewrite_path_regex = 35;\n  }\n\n  // Specifies the upstream timeout for the route. If not specified, the default is 15s. This\n  // spans between the point at which the entire downstream request (i.e. end-of-stream) has been\n  // processed and when the upstream response has been completely processed. A value of 0 will\n  // disable the route's timeout.\n  //\n  // .. note::\n  //\n  //   This timeout includes all retries. See also\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //   :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration timeout = 8;\n\n  // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout,\n  // although the connection manager wide :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.stream_idle_timeout>`\n  // will still apply. A value of 0 will completely disable the route's idle timeout, even if a\n  // connection manager stream idle timeout is configured.\n  //\n  // The idle timeout is distinct to :ref:`timeout\n  // <envoy_api_field_config.route.v4alpha.RouteAction.timeout>`, which provides an upper bound\n  // on the upstream response time; :ref:`idle_timeout\n  // <envoy_api_field_config.route.v4alpha.RouteAction.idle_timeout>` instead bounds the amount\n  // of time the request's stream may be idle.\n  //\n  // After header decoding, the idle timeout will apply on downstream and\n  // upstream request events. Each time an encode/decode event for headers or\n  // data is processed for the stream, the timer will be reset. If the timeout\n  // fires, the stream is terminated with a 408 Request Timeout error code if no\n  // upstream response header has been received, otherwise a stream reset\n  // occurs.\n  google.protobuf.Duration idle_timeout = 24;\n\n  // Indicates that the route has a retry policy. Note that if this is set,\n  // it'll take precedence over the virtual host level retry policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  RetryPolicy retry_policy = 9;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that if this is set, it'll take\n  // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged,\n  // most internal one becomes the enforced policy). :ref:`Retry policy <envoy_api_field_config.route.v4alpha.VirtualHost.retry_policy>`\n  // should not be set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 33;\n\n  // Indicates that the route has request mirroring policies.\n  repeated RequestMirrorPolicy request_mirror_policies = 30;\n\n  // Optionally specifies the :ref:`routing priority <arch_overview_http_routing_priority>`.\n  core.v4alpha.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies a set of rate limit configurations that could be applied to the\n  // route.\n  repeated RateLimit rate_limits = 13;\n\n  // Specifies a list of hash policies to use for ring hash load balancing. Each\n  // hash policy is evaluated individually and the combined result is used to\n  // route the request. The method of combination is deterministic such that\n  // identical lists of hash policies will produce the same hash. Since a hash\n  // policy examines specific parts of a request, it can fail to produce a hash\n  // (i.e. if the hashed header is not present). If (and only if) all configured\n  // hash policies fail to generate a hash, no hash will be produced for\n  // the route. In this case, the behavior is the same as if no hash policies\n  // were specified (i.e. the ring hash load balancer will choose a random\n  // backend). If a hash policy has the \"terminal\" attribute set to true, and\n  // there is already a hash generated, the hash is returned immediately,\n  // ignoring the rest of the hash policy list.\n  repeated HashPolicy hash_policy = 15;\n\n  // Indicates that the route has a CORS policy.\n  CorsPolicy cors = 17;\n\n  repeated UpgradeConfig upgrade_configs = 25;\n\n  // If present, Envoy will try to follow an upstream redirect response instead of proxying the\n  // response back to the downstream. An upstream redirect response is defined\n  // by :ref:`redirect_response_codes\n  // <envoy_api_field_config.route.v4alpha.InternalRedirectPolicy.redirect_response_codes>`.\n  InternalRedirectPolicy internal_redirect_policy = 34;\n\n  // Indicates that the route has a hedge policy. Note that if this is set,\n  // it'll take precedence over the virtual host level hedge policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  HedgePolicy hedge_policy = 27;\n\n  // Specifies the maximum stream duration for this route.\n  MaxStreamDuration max_stream_duration = 36;\n}\n\n// HTTP retry :ref:`architecture overview <arch_overview_http_routing_retry>`.\n// [#next-free-field: 12]\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RetryPolicy\";\n\n  enum ResetHeaderFormat {\n    SECONDS = 0;\n    UNIX_TIMESTAMP = 1;\n  }\n\n  message RetryPriority {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RetryPriority\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryHostPredicate {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RetryHostPredicate\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryBackOff {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RetryBackOff\";\n\n    // Specifies the base interval between retries. This parameter is required and must be greater\n    // than zero. Values less than 1 ms are rounded up to 1 ms.\n    // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's\n    // back-off algorithm.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // Specifies the maximum interval between retries. This parameter is optional, but must be\n    // greater than or equal to the `base_interval` if set. The default is 10 times the\n    // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion\n    // of Envoy's back-off algorithm.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  message ResetHeader {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.ResetHeader\";\n\n    string name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // A retry back-off strategy that applies when the upstream server rate limits\n  // the request.\n  //\n  // Given this configuration:\n  //\n  // .. code-block:: yaml\n  //\n  //   rate_limited_retry_back_off:\n  //     reset_headers:\n  //     - name: Retry-After\n  //       format: SECONDS\n  //     - name: X-RateLimit-Reset\n  //       format: UNIX_TIMESTAMP\n  //     max_interval: \"300s\"\n  //\n  // The following algorithm will apply:\n  //\n  //  1. If the response contains the header ``Retry-After`` its value must be on\n  //     the form ``120`` (an integer that represents the number of seconds to\n  //     wait before retrying). If so, this value is used as the back-off interval.\n  //  2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its\n  //     value must be on the form ``1595320702`` (an integer that represents the\n  //     point in time at which to retry, as a Unix timestamp in seconds). If so,\n  //     the current time is subtracted from this value and the result is used as\n  //     the back-off interval.\n  //  3. Otherwise, Envoy will use the default\n  //     :ref:`exponential back-off <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_back_off>`\n  //     strategy.\n  //\n  // No matter which format is used, if the resulting back-off interval exceeds\n  // ``max_interval`` it is discarded and the next header in ``reset_headers``\n  // is tried. If a request timeout is configured for the route it will further\n  // limit how long the request will be allowed to run.\n  //\n  // To prevent many clients retrying at the same point in time jitter is added\n  // to the back-off interval, so the resulting interval is decided by taking:\n  // ``random(interval, interval * 1.5)``.\n  //\n  // .. attention::\n  //\n  //   Configuring ``rate_limited_retry_back_off`` will not by itself cause a request\n  //   to be retried. You will still need to configure the right retry policy to match\n  //   the responses from the upstream server.\n  message RateLimitedRetryBackOff {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff\";\n\n    // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``)\n    // to match against the response. Headers are tried in order, and matched case\n    // insensitive. The first header to be parsed successfully is used. If no headers\n    // match the default exponential back-off is used instead.\n    repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}];\n\n    // Specifies the maximum back off interval that Envoy will allow. If a reset\n    // header contains an interval longer than this then it will be discarded and\n    // the next header will be tried. Defaults to 300 seconds.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Specifies the conditions under which retry takes place. These are the same\n  // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and\n  // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`.\n  string retry_on = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1. These are the same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-max-retries`.\n  google.protobuf.UInt32Value max_retries = 2;\n\n  // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The\n  // same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.\n  //\n  // .. note::\n  //\n  //   If left unspecified, Envoy will use the global\n  //   :ref:`route timeout <envoy_api_field_config.route.v4alpha.RouteAction.timeout>` for the request.\n  //   Consequently, when using a :ref:`5xx <config_http_filters_router_x-envoy-retry-on>` based\n  //   retry policy, a request that times out will not be retried as the total timeout budget\n  //   would have been exhausted.\n  google.protobuf.Duration per_try_timeout = 3;\n\n  // Specifies an implementation of a RetryPriority which is used to determine the\n  // distribution of load across priorities used for retries. Refer to\n  // :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more details.\n  RetryPriority retry_priority = 4;\n\n  // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host\n  // for retries. If any of the predicates reject the host, host selection will be reattempted.\n  // Refer to :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more\n  // details.\n  repeated RetryHostPredicate retry_host_predicate = 5;\n\n  // The maximum number of times host selection will be reattempted before giving up, at which\n  // point the host that was last selected will be routed to. If unspecified, this will default to\n  // retrying once.\n  int64 host_selection_retry_max_attempts = 6;\n\n  // HTTP status codes that should trigger a retry in addition to those specified by retry_on.\n  repeated uint32 retriable_status_codes = 7;\n\n  // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the\n  // default base interval is 25 milliseconds or, if set, the current value of the\n  // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times\n  // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries`\n  // describes Envoy's back-off algorithm.\n  RetryBackOff retry_back_off = 8;\n\n  // Specifies parameters that control a retry back-off strategy that is used\n  // when the request is rate limited by the upstream server. The server may\n  // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to\n  // provide feedback to the client on how long to wait before retrying. If\n  // configured, this back-off strategy will be used instead of the\n  // default exponential back off strategy (configured using `retry_back_off`)\n  // whenever a response includes the matching headers.\n  RateLimitedRetryBackOff rate_limited_retry_back_off = 11;\n\n  // HTTP response headers that trigger a retry if present in the response. A retry will be\n  // triggered if any of the header matches match the upstream response headers.\n  // The field is only consulted if 'retriable-headers' retry policy is active.\n  repeated HeaderMatcher retriable_headers = 9;\n\n  // HTTP headers which must be present in the request for retries to be attempted.\n  repeated HeaderMatcher retriable_request_headers = 10;\n}\n\n// HTTP request hedging :ref:`architecture overview <arch_overview_http_routing_hedging>`.\nmessage HedgePolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.HedgePolicy\";\n\n  // Specifies the number of initial requests that should be sent upstream.\n  // Must be at least 1.\n  // Defaults to 1.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies a probability that an additional upstream request should be sent\n  // on top of what is specified by initial_requests.\n  // Defaults to 0.\n  // [#not-implemented-hide:]\n  type.v3.FractionalPercent additional_request_chance = 2;\n\n  // Indicates that a hedged request should be sent when the per-try timeout\n  // is hit. This will only occur if the retry policy also indicates that a\n  // timed out request should be retried.\n  // Once a timed out request is retried due to per try timeout, the router\n  // filter will ensure that it is not retried again even if the returned\n  // response headers would otherwise be retried according the specified\n  // :ref:`RetryPolicy <envoy_api_msg_config.route.v4alpha.RetryPolicy>`.\n  // Defaults to false.\n  bool hedge_on_per_try_timeout = 3;\n}\n\n// [#next-free-field: 9]\nmessage RedirectAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.RedirectAction\";\n\n  enum RedirectResponseCode {\n    // Moved Permanently HTTP Status Code - 301.\n    MOVED_PERMANENTLY = 0;\n\n    // Found HTTP Status Code - 302.\n    FOUND = 1;\n\n    // See Other HTTP Status Code - 303.\n    SEE_OTHER = 2;\n\n    // Temporary Redirect HTTP Status Code - 307.\n    TEMPORARY_REDIRECT = 3;\n\n    // Permanent Redirect HTTP Status Code - 308.\n    PERMANENT_REDIRECT = 4;\n  }\n\n  // When the scheme redirection take place, the following rules apply:\n  //  1. If the source URI scheme is `http` and the port is explicitly\n  //     set to `:80`, the port will be removed after the redirection\n  //  2. If the source URI scheme is `https` and the port is explicitly\n  //     set to `:443`, the port will be removed after the redirection\n  oneof scheme_rewrite_specifier {\n    // The scheme portion of the URL will be swapped with \"https\".\n    bool https_redirect = 4;\n\n    // The scheme portion of the URL will be swapped with this value.\n    string scheme_redirect = 7;\n  }\n\n  // The host portion of the URL will be swapped with this value.\n  string host_redirect = 1\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The port value of the URL will be swapped with this value.\n  uint32 port_redirect = 8;\n\n  oneof path_rewrite_specifier {\n    // The path portion of the URL will be swapped with this value.\n    // Please note that query string in path_redirect will override the\n    // request's query string and will not be stripped.\n    //\n    // For example, let's say we have the following routes:\n    //\n    // - match: { path: \"/old-path-1\" }\n    //   redirect: { path_redirect: \"/new-path-1\" }\n    // - match: { path: \"/old-path-2\" }\n    //   redirect: { path_redirect: \"/new-path-2\", strip-query: \"true\" }\n    // - match: { path: \"/old-path-3\" }\n    //   redirect: { path_redirect: \"/new-path-3?foo=1\", strip_query: \"true\" }\n    //\n    // 1. if request uri is \"/old-path-1?bar=1\", users will be redirected to \"/new-path-1?bar=1\"\n    // 2. if request uri is \"/old-path-2?bar=1\", users will be redirected to \"/new-path-2\"\n    // 3. if request uri is \"/old-path-3?bar=1\", users will be redirected to \"/new-path-3?foo=1\"\n    string path_redirect = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during redirection, the matched prefix (or path)\n    // should be swapped with this value. This option allows redirect URLs be dynamically created\n    // based on the request.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the use of trailing slashes as mentioned in\n    //   :ref:`RouteAction's prefix_rewrite <envoy_api_field_config.route.v4alpha.RouteAction.prefix_rewrite>`.\n    string prefix_rewrite = 5\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // The HTTP status code to use in the redirect response. The default response\n  // code is MOVED_PERMANENTLY (301).\n  RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Indicates that during redirection, the query portion of the URL will\n  // be removed. Default value is false.\n  bool strip_query = 6;\n}\n\nmessage DirectResponseAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.DirectResponseAction\";\n\n  // Specifies the HTTP response status to be returned.\n  uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}];\n\n  // Specifies the content of the response body. If this setting is omitted,\n  // no body is included in the generated response.\n  //\n  // .. note::\n  //\n  //   Headers can be specified using *response_headers_to_add* in the enclosing\n  //   :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` or\n  //   :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`.\n  core.v4alpha.DataSource body = 2;\n}\n\nmessage Decorator {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Decorator\";\n\n  // The operation name associated with the request matched to this route. If tracing is\n  // enabled, this information will be used as the span name reported for this request.\n  //\n  // .. note::\n  //\n  //   For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden\n  //   by the :ref:`x-envoy-decorator-operation\n  //   <config_http_filters_router_x-envoy-decorator-operation>` header.\n  string operation = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether the decorated details should be propagated to the other party. The default is true.\n  google.protobuf.BoolValue propagate = 2;\n}\n\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Tracing\";\n\n  // Target percentage of requests managed by this HTTP connection manager that will be force\n  // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n  // header is set. This field is a direct analog for the runtime variable\n  // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n  // <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent client_sampling = 1;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be randomly\n  // selected for trace generation, if not requested by the client or not forced. This field is\n  // a direct analog for the runtime variable 'tracing.random_sampling' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent random_sampling = 2;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be traced\n  // after all other sampling checks have been applied (client-directed, force tracing, random\n  // sampling). This field functions as an upper limit on the total configured sampling rate. For\n  // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n  // of client requests with the appropriate headers to be force traced. This field is a direct\n  // analog for the runtime variable 'tracing.global_enabled' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent overall_sampling = 3;\n\n  // A list of custom tags with unique tag name to create tags for the active span.\n  // It will take effect after merging with the :ref:`corresponding configuration\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.Tracing.custom_tags>`\n  // configured in the HTTP connection manager. If two tags with the same name are configured\n  // each in the HTTP connection manager and the route level, the one configured here takes\n  // priority.\n  repeated type.tracing.v3.CustomTag custom_tags = 4;\n}\n\n// A virtual cluster is a way of specifying a regex matching rule against\n// certain important endpoints such that statistics are generated explicitly for\n// the matched requests. The reason this is useful is that when doing\n// prefix/path matching Envoy does not always know what the application\n// considers to be an endpoint. Thus, it’s impossible for Envoy to generically\n// emit per endpoint statistics. However, often systems have highly critical\n// endpoints that they wish to get “perfect” statistics on. Virtual cluster\n// statistics are perfect in the sense that they are emitted on the downstream\n// side such that they include network level failures.\n//\n// Documentation for :ref:`virtual cluster statistics <config_http_filters_router_vcluster_stats>`.\n//\n// .. note::\n//\n//    Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for\n//    every application endpoint. This is both not easily maintainable and as well the matching and\n//    statistics output are not free.\nmessage VirtualCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.VirtualCluster\";\n\n  reserved 1, 3;\n\n  reserved \"pattern\", \"method\";\n\n  // Specifies a list of header matchers to use for matching requests. Each specified header must\n  // match. The pseudo-headers `:path` and `:method` can be used to match the request path and\n  // method, respectively.\n  repeated HeaderMatcher headers = 4;\n\n  // Specifies the name of the virtual cluster. The virtual cluster name as well\n  // as the virtual host name are used when emitting statistics. The statistics are emitted by the\n  // router filter and are documented :ref:`here <config_http_filters_router_stats>`.\n  string name = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`.\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RateLimit\";\n\n  // [#next-free-field: 8]\n  message Action {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RateLimit.Action\";\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"source_cluster\", \"<local service cluster>\")\n    //\n    // <local service cluster> is derived from the :option:`--service-cluster` option.\n    message SourceCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.SourceCluster\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"destination_cluster\", \"<routed target cluster>\")\n    //\n    // Once a request matches against a route table rule, a routed cluster is determined by one of\n    // the following :ref:`route table configuration <envoy_api_msg_config.route.v4alpha.RouteConfiguration>`\n    // settings:\n    //\n    // * :ref:`cluster <envoy_api_field_config.route.v4alpha.RouteAction.cluster>` indicates the upstream cluster\n    //   to route to.\n    // * :ref:`weighted_clusters <envoy_api_field_config.route.v4alpha.RouteAction.weighted_clusters>`\n    //   chooses a cluster randomly from a set of clusters with attributed weight.\n    // * :ref:`cluster_header <envoy_api_field_config.route.v4alpha.RouteAction.cluster_header>` indicates which\n    //   header in the request contains the target cluster.\n    message DestinationCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.DestinationCluster\";\n    }\n\n    // The following descriptor entry is appended when a header contains a key that matches the\n    // *header_name*:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<header_value_queried_from_header>\")\n    message RequestHeaders {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.RequestHeaders\";\n\n      // The header name to be queried from the request headers. The header’s\n      // value is used to populate the value of the descriptor entry for the\n      // descriptor_key.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 2 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, Envoy skips the descriptor while calling rate limiting service\n      // when header is not present in the request. By default it skips calling the\n      // rate limiting service if this header is not present in the request.\n      bool skip_if_absent = 3;\n    }\n\n    // The following descriptor entry is appended to the descriptor and is populated using the\n    // trusted address from :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"remote_address\", \"<trusted address from x-forwarded-for>\")\n    message RemoteAddress {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.RemoteAddress\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"generic_key\", \"<descriptor_value>\")\n    message GenericKey {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.GenericKey\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // An optional key to use in the descriptor entry. If not set it defaults\n      // to 'generic_key' as the descriptor key.\n      string descriptor_key = 2;\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"header_match\", \"<descriptor_value>\")\n    message HeaderValueMatch {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.HeaderValueMatch\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, the action will append a descriptor entry when the\n      // request matches the headers. If set to false, the action will append a\n      // descriptor entry when the request does not match the headers. The\n      // default value is true.\n      google.protobuf.BoolValue expect_match = 2;\n\n      // Specifies a set of headers that the rate limit action should match\n      // on. The action will check the request’s headers against all the\n      // specified headers in the config. A match will happen if all the\n      // headers in the config are present in the request with the same values\n      // (or based on presence if the value field is not in the config).\n      repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    // The following descriptor entry is appended when the dynamic metadata contains a key value:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<value_queried_from_metadata>\")\n    message DynamicMetaData {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.DynamicMetaData\";\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Metadata struct that defines the key and path to retrieve the string value. A match will\n      // only happen if the value in the dynamic metadata is of type string.\n      type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];\n\n      // An optional value to use if *metadata_key* is empty. If not set and\n      // no value is present under the metadata_key then no descriptor is generated.\n      string default_value = 3;\n    }\n\n    oneof action_specifier {\n      option (validate.required) = true;\n\n      // Rate limit on source cluster.\n      SourceCluster source_cluster = 1;\n\n      // Rate limit on destination cluster.\n      DestinationCluster destination_cluster = 2;\n\n      // Rate limit on request headers.\n      RequestHeaders request_headers = 3;\n\n      // Rate limit on remote address.\n      RemoteAddress remote_address = 4;\n\n      // Rate limit on a generic key.\n      GenericKey generic_key = 5;\n\n      // Rate limit on the existence of request headers.\n      HeaderValueMatch header_value_match = 6;\n\n      // Rate limit on dynamic metadata.\n      DynamicMetaData dynamic_metadata = 7;\n    }\n  }\n\n  message Override {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RateLimit.Override\";\n\n    // Fetches the override from the dynamic metadata.\n    message DynamicMetadata {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Override.DynamicMetadata\";\n\n      // Metadata struct that defines the key and path to retrieve the struct value.\n      // The value must be a struct containing an integer \"requests_per_unit\" property\n      // and a \"unit\" property with a value parseable to :ref:`RateLimitUnit\n      // enum <envoy_api_enum_type.v3.RateLimitUnit>`\n      type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}];\n    }\n\n    oneof override_specifier {\n      option (validate.required) = true;\n\n      // Limit override from dynamic metadata.\n      DynamicMetadata dynamic_metadata = 1;\n    }\n  }\n\n  // Refers to the stage set in the filter. The rate limit configuration only\n  // applies to filters with the same stage number. The default stage number is\n  // 0.\n  //\n  // .. note::\n  //\n  //   The filter supports a range of 0 - 10 inclusively for stage numbers.\n  google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}];\n\n  // The key to be set in runtime to disable this rate limit configuration.\n  string disable_key = 2;\n\n  // A list of actions that are to be applied for this rate limit configuration.\n  // Order matters as the actions are processed sequentially and the descriptor\n  // is composed by appending descriptor entries in that sequence. If an action\n  // cannot append a descriptor entry, no descriptor is generated for the\n  // configuration. See :ref:`composing actions\n  // <config_http_filters_rate_limit_composing_actions>` for additional documentation.\n  repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional limit override to be appended to the descriptor produced by this\n  // rate limit configuration. If the override value is invalid or cannot be resolved\n  // from metadata, no override is provided. See :ref:`rate limit override\n  // <config_http_filters_rate_limit_rate_limit_override>` for more information.\n  Override limit = 4;\n}\n\n// .. attention::\n//\n//   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host*\n//   header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n//\n// .. attention::\n//\n//   To route on HTTP method, use the special HTTP/2 *:method* header. This works for both\n//   HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g.,\n//\n//   .. code-block:: json\n//\n//     {\n//       \"name\": \":method\",\n//       \"exact_match\": \"POST\"\n//     }\n//\n// .. attention::\n//   In the absence of any header match specifier, match will default to :ref:`present_match\n//   <envoy_api_field_config.route.v4alpha.HeaderMatcher.present_match>`. i.e, a request that has the :ref:`name\n//   <envoy_api_field_config.route.v4alpha.HeaderMatcher.name>` header will match, regardless of the header's\n//   value.\n//\n//  [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.]\n// [#next-free-field: 13]\nmessage HeaderMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.HeaderMatcher\";\n\n  reserved 2, 3, 5;\n\n  reserved \"regex_match\";\n\n  // Specifies the name of the header in the request.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Specifies how the header match will be performed to route the request.\n  oneof header_match_specifier {\n    // If specified, header match will be performed based on the value of the header.\n    string exact_match = 4;\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex.\n    type.matcher.v4alpha.RegexMatcher safe_regex_match = 11;\n\n    // If specified, header match will be performed based on range.\n    // The rule will match if the request header value is within this range.\n    // The entire request header value must represent an integer in base 10 notation: consisting of\n    // an optional plus or minus sign followed by a sequence of digits. The rule will not match if\n    // the header value does not represent an integer. Match will fail for empty values, floating\n    // point numbers or if only a subsequence of the header value is an integer.\n    //\n    // Examples:\n    //\n    // * For range [-10,0), route will match for header value -1, but not for 0, \"somestring\", 10.9,\n    //   \"-1somestring\"\n    type.v3.Int64Range range_match = 6;\n\n    // If specified, header match will be performed based on whether the header is in the\n    // request.\n    bool present_match = 7;\n\n    // If specified, header match will be performed based on the prefix of the header value.\n    // Note: empty prefix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.\n    string prefix_match = 9 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on the suffix of the header value.\n    // Note: empty suffix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.\n    string suffix_match = 10 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on whether the header value contains\n    // the given value or not.\n    // Note: empty contains match is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*.\n    string contains_match = 12 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // If specified, the match result will be inverted before checking. Defaults to false.\n  //\n  // Examples:\n  //\n  // * The regex ``\\d{3}`` does not match the value *1234*, so it will match when inverted.\n  // * The range [-10,0) will match the value -1, so it will not match when inverted.\n  bool invert_match = 8;\n}\n\n// Query parameter matching treats the query string of a request's :path header\n// as an ampersand-separated list of keys and/or key=value elements.\n// [#next-free-field: 7]\nmessage QueryParameterMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.QueryParameterMatcher\";\n\n  reserved 3, 4;\n\n  reserved \"value\", \"regex\";\n\n  // Specifies the name of a key that must be present in the requested\n  // *path*'s query string.\n  string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}];\n\n  oneof query_parameter_match_specifier {\n    // Specifies whether a query parameter value should match against a string.\n    type.matcher.v4alpha.StringMatcher string_match = 5\n        [(validate.rules).message = {required: true}];\n\n    // Specifies whether a query parameter should be present.\n    bool present_match = 6;\n  }\n}\n\n// HTTP Internal Redirect :ref:`architecture overview <arch_overview_internal_redirects>`.\nmessage InternalRedirectPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.InternalRedirectPolicy\";\n\n  // An internal redirect is not handled, unless the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value.\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy\n  // <envoy_api_field_config.route.v4alpha.RouteAction.internal_redirect_policy>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 1;\n\n  // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified,\n  // only 302 will be treated as internal redirect.\n  // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored.\n  repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}];\n\n  // Specifies a list of predicates that are queried when an upstream response is deemed\n  // to trigger an internal redirect by all other criteria. Any predicate in the list can reject\n  // the redirect, causing the response to be proxied to downstream.\n  repeated core.v4alpha.TypedExtensionConfig predicates = 3;\n\n  // Allow internal redirect to follow a target URI with a different scheme than the value of\n  // x-forwarded-proto. The default is false.\n  bool allow_cross_scheme_redirect = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/route/v4alpha/scoped_route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v4alpha\";\noption java_outer_classname = \"ScopedRouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP scoped routing configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// Specifies a routing scope, which associates a\n// :ref:`Key<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration.Key>` to a\n// :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` (identified by its resource name).\n//\n// The HTTP connection manager builds up a table consisting of these Key to\n// RouteConfiguration mappings, and looks up the RouteConfiguration to use per\n// request according to the algorithm specified in the\n// :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scope_key_builder>`\n// assigned to the HttpConnectionManager.\n//\n// For example, with the following configurations (in YAML):\n//\n// HttpConnectionManager config:\n//\n// .. code::\n//\n//   ...\n//   scoped_routes:\n//     name: foo-scoped-routes\n//     scope_key_builder:\n//       fragments:\n//         - header_value_extractor:\n//             name: X-Route-Selector\n//             element_separator: ,\n//             element:\n//               separator: =\n//               key: vip\n//\n// ScopedRouteConfiguration resources (specified statically via\n// :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scoped_route_configurations_list>`\n// or obtained dynamically via SRDS):\n//\n// .. code::\n//\n//  (1)\n//   name: route-scope1\n//   route_configuration_name: route-config1\n//   key:\n//      fragments:\n//        - string_key: 172.10.10.20\n//\n//  (2)\n//   name: route-scope2\n//   route_configuration_name: route-config2\n//   key:\n//     fragments:\n//       - string_key: 172.20.20.30\n//\n// A request from a client such as:\n//\n// .. code::\n//\n//     GET / HTTP/1.1\n//     Host: foo.com\n//     X-Route-Selector: vip=172.10.10.20\n//\n// would result in the routing table defined by the `route-config1`\n// RouteConfiguration being assigned to the HTTP request/stream.\n//\nmessage ScopedRouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.ScopedRouteConfiguration\";\n\n  // Specifies a key which is matched against the output of the\n  // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scope_key_builder>`\n  // specified in the HttpConnectionManager. The matching is done per HTTP\n  // request and is dependent on the order of the fragments contained in the\n  // Key.\n  message Key {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.ScopedRouteConfiguration.Key\";\n\n    message Fragment {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment\";\n\n      oneof type {\n        option (validate.required) = true;\n\n        // A string to match against.\n        string string_key = 1;\n      }\n    }\n\n    // The ordered set of fragments to match against. The order must match the\n    // fragments in the corresponding\n    // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scope_key_builder>`.\n    repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Whether the RouteConfiguration should be loaded on demand.\n  bool on_demand = 4;\n\n  // The name assigned to the routing scope.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v4alpha.DiscoveryRequest` to an\n  // RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated\n  // with this scope.\n  string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The key to match against.\n  Key key = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/matcher/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/tap/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.tap.v3;\n\nimport \"envoy/config/common/matcher/v3/matcher.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.tap.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common tap configuration]\n\n// Tap configuration.\nmessage TapConfig {\n  // [#comment:TODO(mattklein123): Rate limiting]\n\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.TapConfig\";\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  // Exactly one of :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` and\n  // :ref:`match_config <envoy_api_field_config.tap.v3.TapConfig.match_config>` must be set. If both\n  // are set, the :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` will be used.\n  MatchPredicate match_config = 1 [deprecated = true];\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  // Exactly one of :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` and\n  // :ref:`match_config <envoy_api_field_config.tap.v3.TapConfig.match_config>` must be set. If both\n  // are set, the :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` will be used.\n  common.matcher.v3.MatchPredicate match = 4;\n\n  // The tap output configuration. If a match configuration matches a data source being tapped,\n  // a tap will occur and the data will be written to the configured output.\n  OutputConfig output_config = 2 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\\connections for\n  // which the tap matching is enabled. When not enabled, the request\\connection will not be\n  // recorded.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  core.v3.RuntimeFractionalPercent tap_enabled = 3;\n}\n\n// Tap match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.MatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.tap.v2alpha.MatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.HttpHeadersMatch\";\n\n  // HTTP headers to match.\n  repeated route.v3.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  message GenericTextMatch {\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Tap output configuration.\nmessage OutputConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.OutputConfig\";\n\n  // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple\n  // sink types are supported this constraint will be relaxed.\n  repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}];\n\n  // For buffered tapping, the maximum amount of received body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_rx_bytes = 2;\n\n  // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_tx_bytes = 3;\n\n  // Indicates whether taps produce a single buffered message per tap, or multiple streamed\n  // messages per tap in the emitted :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. Note that streamed tapping does not\n  // mean that no buffering takes place. Buffering may be required if data is processed before a\n  // match can be determined. See the HTTP tap filter :ref:`streaming\n  // <config_http_filters_tap_streaming>` documentation for more information.\n  bool streaming = 4;\n}\n\n// Tap output sink configuration.\nmessage OutputSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.OutputSink\";\n\n  // Output format. All output is in the form of one or more :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. This enumeration indicates\n  // how those messages are written. Note that not all sinks support all output formats. See\n  // individual sink documentation for more information.\n  enum Format {\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_bytes\n    // <envoy_api_field_data.tap.v3.Body.as_bytes>` field. This means that body data will be\n    // base64 encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n    JSON_BODY_AS_BYTES = 0;\n\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_string\n    // <envoy_api_field_data.tap.v3.Body.as_string>` field. This means that body data will be\n    // string encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_. This format type is\n    // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the\n    // user wishes to view it directly without being forced to base64 decode the body.\n    JSON_BODY_AS_STRING = 1;\n\n    // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes\n    // multiple binary messages without any length information the data stream will not be\n    // useful. However, for certain sinks that are self-delimiting (e.g., one message per file)\n    // this output format makes consumption simpler.\n    PROTO_BINARY = 2;\n\n    // Messages are written as a sequence tuples, where each tuple is the message length encoded\n    // as a `protobuf 32-bit varint\n    // <https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.coded_stream>`_\n    // followed by the binary message. The messages can be read back using the language specific\n    // protobuf coded stream implementation to obtain the message length and the message.\n    PROTO_BINARY_LENGTH_DELIMITED = 3;\n\n    // Text proto format.\n    PROTO_TEXT = 4;\n  }\n\n  // Sink output format.\n  Format format = 1 [(validate.rules).enum = {defined_only: true}];\n\n  oneof output_sink_type {\n    option (validate.required) = true;\n\n    // Tap output will be streamed out the :http:post:`/tap` admin endpoint.\n    //\n    // .. attention::\n    //\n    //   It is only allowed to specify the streaming admin output sink if the tap is being\n    //   configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has\n    //   been configured to receive tap configuration from some other source (e.g., static\n    //   file, XDS, etc.) configuring the streaming admin output type will fail.\n    StreamingAdminSink streaming_admin = 2;\n\n    // Tap output will be written to a file per tap sink.\n    FilePerTapSink file_per_tap = 3;\n\n    // [#not-implemented-hide:]\n    // GrpcService to stream data to. The format argument must be PROTO_BINARY.\n    // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented]\n    StreamingGrpcSink streaming_grpc = 4;\n  }\n}\n\n// Streaming admin sink configuration.\nmessage StreamingAdminSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamingAdminSink\";\n}\n\n// The file per tap sink outputs a discrete file for every tapped stream.\nmessage FilePerTapSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.FilePerTapSink\";\n\n  // Path prefix. The output file will be of the form <path_prefix>_<id>.pb, where <id> is an\n  // identifier distinguishing the recorded trace for stream instances (the Envoy\n  // connection ID, HTTP stream ID, etc.).\n  string path_prefix = 1 [(validate.rules).string = {min_len: 1}];\n}\n\n// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC\n// server.\nmessage StreamingGrpcSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamingGrpcSink\";\n\n  // Opaque identifier, that will be sent back to the streaming grpc server.\n  string tap_id = 1;\n\n  // The gRPC server that hosts the Tap Sink Service.\n  core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/matcher/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/tap/v4alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.tap.v4alpha;\n\nimport \"envoy/config/common/matcher/v4alpha/matcher.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.tap.v4alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common tap configuration]\n\n// Tap configuration.\nmessage TapConfig {\n  // [#comment:TODO(mattklein123): Rate limiting]\n\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.TapConfig\";\n\n  reserved 1;\n\n  reserved \"match_config\";\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  // Exactly one of :ref:`match <envoy_api_field_config.tap.v4alpha.TapConfig.match>` and\n  // :ref:`match_config <envoy_api_field_config.tap.v4alpha.TapConfig.match_config>` must be set. If both\n  // are set, the :ref:`match <envoy_api_field_config.tap.v4alpha.TapConfig.match>` will be used.\n  common.matcher.v4alpha.MatchPredicate match = 4;\n\n  // The tap output configuration. If a match configuration matches a data source being tapped,\n  // a tap will occur and the data will be written to the configured output.\n  OutputConfig output_config = 2 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\\connections for\n  // which the tap matching is enabled. When not enabled, the request\\connection will not be\n  // recorded.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  core.v4alpha.RuntimeFractionalPercent tap_enabled = 3;\n}\n\n// Tap match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.MatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.tap.v3.MatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.HttpHeadersMatch\";\n\n  // HTTP headers to match.\n  repeated route.v4alpha.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.HttpGenericBodyMatch\";\n\n  message GenericTextMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch\";\n\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Tap output configuration.\nmessage OutputConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.OutputConfig\";\n\n  // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple\n  // sink types are supported this constraint will be relaxed.\n  repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}];\n\n  // For buffered tapping, the maximum amount of received body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_rx_bytes = 2;\n\n  // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_tx_bytes = 3;\n\n  // Indicates whether taps produce a single buffered message per tap, or multiple streamed\n  // messages per tap in the emitted :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. Note that streamed tapping does not\n  // mean that no buffering takes place. Buffering may be required if data is processed before a\n  // match can be determined. See the HTTP tap filter :ref:`streaming\n  // <config_http_filters_tap_streaming>` documentation for more information.\n  bool streaming = 4;\n}\n\n// Tap output sink configuration.\nmessage OutputSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.OutputSink\";\n\n  // Output format. All output is in the form of one or more :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. This enumeration indicates\n  // how those messages are written. Note that not all sinks support all output formats. See\n  // individual sink documentation for more information.\n  enum Format {\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_bytes\n    // <envoy_api_field_data.tap.v3.Body.as_bytes>` field. This means that body data will be\n    // base64 encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n    JSON_BODY_AS_BYTES = 0;\n\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_string\n    // <envoy_api_field_data.tap.v3.Body.as_string>` field. This means that body data will be\n    // string encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_. This format type is\n    // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the\n    // user wishes to view it directly without being forced to base64 decode the body.\n    JSON_BODY_AS_STRING = 1;\n\n    // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes\n    // multiple binary messages without any length information the data stream will not be\n    // useful. However, for certain sinks that are self-delimiting (e.g., one message per file)\n    // this output format makes consumption simpler.\n    PROTO_BINARY = 2;\n\n    // Messages are written as a sequence tuples, where each tuple is the message length encoded\n    // as a `protobuf 32-bit varint\n    // <https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.coded_stream>`_\n    // followed by the binary message. The messages can be read back using the language specific\n    // protobuf coded stream implementation to obtain the message length and the message.\n    PROTO_BINARY_LENGTH_DELIMITED = 3;\n\n    // Text proto format.\n    PROTO_TEXT = 4;\n  }\n\n  // Sink output format.\n  Format format = 1 [(validate.rules).enum = {defined_only: true}];\n\n  oneof output_sink_type {\n    option (validate.required) = true;\n\n    // Tap output will be streamed out the :http:post:`/tap` admin endpoint.\n    //\n    // .. attention::\n    //\n    //   It is only allowed to specify the streaming admin output sink if the tap is being\n    //   configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has\n    //   been configured to receive tap configuration from some other source (e.g., static\n    //   file, XDS, etc.) configuring the streaming admin output type will fail.\n    StreamingAdminSink streaming_admin = 2;\n\n    // Tap output will be written to a file per tap sink.\n    FilePerTapSink file_per_tap = 3;\n\n    // [#not-implemented-hide:]\n    // GrpcService to stream data to. The format argument must be PROTO_BINARY.\n    // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented]\n    StreamingGrpcSink streaming_grpc = 4;\n  }\n}\n\n// Streaming admin sink configuration.\nmessage StreamingAdminSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.StreamingAdminSink\";\n}\n\n// The file per tap sink outputs a discrete file for every tapped stream.\nmessage FilePerTapSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.FilePerTapSink\";\n\n  // Path prefix. The output file will be of the form <path_prefix>_<id>.pb, where <id> is an\n  // identifier distinguishing the recorded trace for stream instances (the Envoy\n  // connection ID, HTTP stream ID, etc.).\n  string path_prefix = 1 [(validate.rules).string = {min_len: 1}];\n}\n\n// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC\n// server.\nmessage StreamingGrpcSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.StreamingGrpcSink\";\n\n  // Opaque identifier, that will be sent back to the streaming grpc server.\n  string tap_id = 1;\n\n  // The gRPC server that hosts the Tap Sink Service.\n  core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/trace/v2/datadog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"DatadogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Datadog tracer]\n\n// Configuration for the Datadog tracer.\n// [#extension: envoy.tracers.datadog]\nmessage DatadogConfig {\n  // The cluster to use for submitting traces to the Datadog agent.\n  string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The name used for the service when traces are generated by envoy.\n  string service_name = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2/dynamic_ot.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"DynamicOtProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamically loadable OpenTracing tracer]\n\n// DynamicOtConfig is used to dynamically load a tracer from a shared library\n// that implements the `OpenTracing dynamic loading API\n// <https://github.com/opentracing/opentracing-cpp>`_.\n// [#extension: envoy.tracers.dynamic_ot]\nmessage DynamicOtConfig {\n  // Dynamic library implementing the `OpenTracing API\n  // <https://github.com/opentracing/opentracing-cpp>`_.\n  string library = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The configuration to use when creating a tracer from the given dynamic\n  // library.\n  google.protobuf.Struct config = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2/http_tracer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"HttpTracerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tracing]\n// Tracing :ref:`architecture overview <arch_overview_tracing>`.\n\n// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy.\n//\n// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one\n// supported.\n//\n// .. attention::\n//\n//   Use of this message type has been deprecated in favor of direct use of\n//   :ref:`Tracing.Http <envoy_api_msg_config.trace.v2.Tracing.Http>`.\nmessage Tracing {\n  // Configuration for an HTTP tracer provider used by Envoy.\n  //\n  // The configuration is defined by the\n  // :ref:`HttpConnectionManager.Tracing <envoy_api_msg_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing>`\n  // :ref:`provider <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing.provider>`\n  // field.\n  message Http {\n    // The name of the HTTP trace driver to instantiate. The name must match a\n    // supported HTTP trace driver. Built-in trace drivers:\n    //\n    // - *envoy.tracers.lightstep*\n    // - *envoy.tracers.zipkin*\n    // - *envoy.tracers.dynamic_ot*\n    // - *envoy.tracers.datadog*\n    // - *envoy.tracers.opencensus*\n    // - *envoy.tracers.xray*\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Trace driver specific configuration which depends on the driver being instantiated.\n    // See the trace drivers for examples:\n    //\n    // - :ref:`LightstepConfig <envoy_api_msg_config.trace.v2.LightstepConfig>`\n    // - :ref:`ZipkinConfig <envoy_api_msg_config.trace.v2.ZipkinConfig>`\n    // - :ref:`DynamicOtConfig <envoy_api_msg_config.trace.v2.DynamicOtConfig>`\n    // - :ref:`DatadogConfig <envoy_api_msg_config.trace.v2.DatadogConfig>`\n    // - :ref:`OpenCensusConfig <envoy_api_msg_config.trace.v2.OpenCensusConfig>`\n    // - :ref:`AWS X-Ray <envoy_api_msg_config.trace.v2alpha.XRayConfig>`\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Provides configuration for the HTTP tracer.\n  Http http = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2/lightstep.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"LightstepProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: LightStep tracer]\n\n// Configuration for the LightStep tracer.\n// [#extension: envoy.tracers.lightstep]\nmessage LightstepConfig {\n  // Available propagation modes\n  enum PropagationMode {\n    // Propagate trace context in the single header x-ot-span-context.\n    ENVOY = 0;\n\n    // Propagate trace context using LightStep's native format.\n    LIGHTSTEP = 1;\n\n    // Propagate trace context using the b3 format.\n    B3 = 2;\n\n    // Propagation trace context using the w3 trace-context standard.\n    TRACE_CONTEXT = 3;\n  }\n\n  // The cluster manager cluster that hosts the LightStep collectors.\n  string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // File containing the access token to the `LightStep\n  // <https://lightstep.com/>`_ API.\n  string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Propagation modes to use by LightStep's tracer.\n  repeated PropagationMode propagation_modes = 3\n      [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2/opencensus.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"opencensus/proto/trace/v1/trace_config.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"OpencensusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: OpenCensus tracer]\n\n// Configuration for the OpenCensus tracer.\n// [#next-free-field: 15]\n// [#extension: envoy.tracers.opencensus]\nmessage OpenCensusConfig {\n  enum TraceContext {\n    // No-op default, no trace context is utilized.\n    NONE = 0;\n\n    // W3C Trace-Context format \"traceparent:\" header.\n    TRACE_CONTEXT = 1;\n\n    // Binary \"grpc-trace-bin:\" header.\n    GRPC_TRACE_BIN = 2;\n\n    // \"X-Cloud-Trace-Context:\" header.\n    CLOUD_TRACE_CONTEXT = 3;\n\n    // X-B3-* headers.\n    B3 = 4;\n  }\n\n  reserved 7;\n\n  // Configures tracing, e.g. the sampler, max number of annotations, etc.\n  opencensus.proto.trace.v1.TraceConfig trace_config = 1;\n\n  // Enables the stdout exporter if set to true. This is intended for debugging\n  // purposes.\n  bool stdout_exporter_enabled = 2;\n\n  // Enables the Stackdriver exporter if set to true. The project_id must also\n  // be set.\n  bool stackdriver_exporter_enabled = 3;\n\n  // The Cloud project_id to use for Stackdriver tracing.\n  string stackdriver_project_id = 4;\n\n  // (optional) By default, the Stackdriver exporter will connect to production\n  // Stackdriver. If stackdriver_address is non-empty, it will instead connect\n  // to this address, which is in the gRPC format:\n  // https://github.com/grpc/grpc/blob/master/doc/naming.md\n  string stackdriver_address = 10;\n\n  // (optional) The gRPC server that hosts Stackdriver tracing service. Only\n  // Google gRPC is supported. If :ref:`target_uri <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.target_uri>`\n  // is not provided, the default production Stackdriver address will be used.\n  api.v2.core.GrpcService stackdriver_grpc_service = 13;\n\n  // Enables the Zipkin exporter if set to true. The url and service name must\n  // also be set.\n  bool zipkin_exporter_enabled = 5;\n\n  // The URL to Zipkin, e.g. \"http://127.0.0.1:9411/api/v2/spans\"\n  string zipkin_url = 6;\n\n  // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or\n  // ocagent_grpc_service must also be set.\n  bool ocagent_exporter_enabled = 11;\n\n  // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC\n  // format: https://github.com/grpc/grpc/blob/master/doc/naming.md\n  // [#comment:TODO: deprecate this field]\n  string ocagent_address = 12;\n\n  // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported.\n  // This is only used if the ocagent_address is left empty.\n  api.v2.core.GrpcService ocagent_grpc_service = 14;\n\n  // List of incoming trace context headers we will accept. First one found\n  // wins.\n  repeated TraceContext incoming_trace_context = 8;\n\n  // List of outgoing trace context headers we will produce.\n  repeated TraceContext outgoing_trace_context = 9;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2/service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"ServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Trace Service]\n\n// Configuration structure.\nmessage TraceServiceConfig {\n  // The upstream gRPC cluster that hosts the metrics service.\n  api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2/trace.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/config/trace/v2/datadog.proto\";\nimport public \"envoy/config/trace/v2/dynamic_ot.proto\";\nimport public \"envoy/config/trace/v2/http_tracer.proto\";\nimport public \"envoy/config/trace/v2/lightstep.proto\";\nimport public \"envoy/config/trace/v2/opencensus.proto\";\nimport public \"envoy/config/trace/v2/service.proto\";\nimport public \"envoy/config/trace/v2/zipkin.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"TraceProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "api/envoy/config/trace/v2/zipkin.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"ZipkinProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Zipkin tracer]\n\n// Configuration for the Zipkin tracer.\n// [#extension: envoy.tracers.zipkin]\n// [#next-free-field: 6]\nmessage ZipkinConfig {\n  // Available Zipkin collector endpoint versions.\n  enum CollectorEndpointVersion {\n    // Zipkin API v1, JSON over HTTP.\n    // [#comment: The default implementation of Zipkin client before this field is added was only v1\n    // and the way user configure this was by not explicitly specifying the version. Consequently,\n    // before this is added, the corresponding Zipkin collector expected to receive v1 payload.\n    // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when\n    // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field,\n    // since in Zipkin realm this v1 version is considered to be not preferable anymore.]\n    HTTP_JSON_V1 = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Zipkin API v2, JSON over HTTP.\n    HTTP_JSON = 1;\n\n    // Zipkin API v2, protobuf over HTTP.\n    HTTP_PROTO = 2;\n\n    // [#not-implemented-hide:]\n    GRPC = 3;\n  }\n\n  // The cluster manager cluster that hosts the Zipkin collectors. Note that the\n  // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster\n  // resources <envoy_api_field_config.bootstrap.v2.Bootstrap.StaticResources.clusters>`.\n  string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The API endpoint of the Zipkin service where the spans will be sent. When\n  // using a standard Zipkin installation, the API endpoint is typically\n  // /api/v1/spans, which is the default value.\n  string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Determines whether a 128bit trace id will be used when creating a new\n  // trace instance. The default value is false, which will result in a 64 bit trace id being used.\n  bool trace_id_128bit = 3;\n\n  // Determines whether client and server spans will share the same span context.\n  // The default value is true.\n  google.protobuf.BoolValue shared_span_context = 4;\n\n  // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be\n  // used.\n  CollectorEndpointVersion collector_endpoint_version = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/trace/v2alpha/xray.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2alpha\";\noption java_outer_classname = \"XrayProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: AWS X-Ray Tracer Configuration]\n// Configuration for AWS X-Ray tracer\n\nmessage XRayConfig {\n  // The UDP endpoint of the X-Ray Daemon where the spans will be sent.\n  // If this value is not set, the default value of 127.0.0.1:2000 will be used.\n  api.v2.core.SocketAddress daemon_endpoint = 1;\n\n  // The name of the X-Ray segment.\n  string segment_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The location of a local custom sampling rules JSON file.\n  // For an example of the sampling rules see:\n  // `X-Ray SDK documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-sampling>`_\n  api.v2.core.DataSource sampling_rule_manifest = 3;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"//envoy/config/trace/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/trace/v3/datadog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"DatadogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.tracers.datadog.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Datadog tracer]\n\n// Configuration for the Datadog tracer.\n// [#extension: envoy.tracers.datadog]\nmessage DatadogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.DatadogConfig\";\n\n  // The cluster to use for submitting traces to the Datadog agent.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The name used for the service when traces are generated by envoy.\n  string service_name = 2 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/dynamic_ot.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"DynamicOtProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.tracers.dynamic_ot.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamically loadable OpenTracing tracer]\n\n// DynamicOtConfig is used to dynamically load a tracer from a shared library\n// that implements the `OpenTracing dynamic loading API\n// <https://github.com/opentracing/opentracing-cpp>`_.\n// [#extension: envoy.tracers.dynamic_ot]\nmessage DynamicOtConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.DynamicOtConfig\";\n\n  // Dynamic library implementing the `OpenTracing API\n  // <https://github.com/opentracing/opentracing-cpp>`_.\n  string library = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The configuration to use when creating a tracer from the given dynamic\n  // library.\n  google.protobuf.Struct config = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/http_tracer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"HttpTracerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tracing]\n// Tracing :ref:`architecture overview <arch_overview_tracing>`.\n\n// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy.\n//\n// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one\n// supported.\n//\n// .. attention::\n//\n//   Use of this message type has been deprecated in favor of direct use of\n//   :ref:`Tracing.Http <envoy_api_msg_config.trace.v3.Tracing.Http>`.\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v2.Tracing\";\n\n  // Configuration for an HTTP tracer provider used by Envoy.\n  //\n  // The configuration is defined by the\n  // :ref:`HttpConnectionManager.Tracing <envoy_api_msg_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing>`\n  // :ref:`provider <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider>`\n  // field.\n  message Http {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.trace.v2.Tracing.Http\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // The name of the HTTP trace driver to instantiate. The name must match a\n    // supported HTTP trace driver. Built-in trace drivers:\n    //\n    // - *envoy.tracers.lightstep*\n    // - *envoy.tracers.zipkin*\n    // - *envoy.tracers.dynamic_ot*\n    // - *envoy.tracers.datadog*\n    // - *envoy.tracers.opencensus*\n    // - *envoy.tracers.xray*\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Trace driver specific configuration which depends on the driver being instantiated.\n    // See the trace drivers for examples:\n    //\n    // - :ref:`LightstepConfig <envoy_api_msg_config.trace.v3.LightstepConfig>`\n    // - :ref:`ZipkinConfig <envoy_api_msg_config.trace.v3.ZipkinConfig>`\n    // - :ref:`DynamicOtConfig <envoy_api_msg_config.trace.v3.DynamicOtConfig>`\n    // - :ref:`DatadogConfig <envoy_api_msg_config.trace.v3.DatadogConfig>`\n    // - :ref:`OpenCensusConfig <envoy_api_msg_config.trace.v3.OpenCensusConfig>`\n    // - :ref:`AWS X-Ray <envoy_api_msg_config.trace.v3.XRayConfig>`\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Provides configuration for the HTTP tracer.\n  Http http = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/lightstep.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"LightstepProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.tracers.lightstep.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: LightStep tracer]\n\n// Configuration for the LightStep tracer.\n// [#extension: envoy.tracers.lightstep]\nmessage LightstepConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.LightstepConfig\";\n\n  // Available propagation modes\n  enum PropagationMode {\n    // Propagate trace context in the single header x-ot-span-context.\n    ENVOY = 0;\n\n    // Propagate trace context using LightStep's native format.\n    LIGHTSTEP = 1;\n\n    // Propagate trace context using the b3 format.\n    B3 = 2;\n\n    // Propagation trace context using the w3 trace-context standard.\n    TRACE_CONTEXT = 3;\n  }\n\n  // The cluster manager cluster that hosts the LightStep collectors.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // File containing the access token to the `LightStep\n  // <https://lightstep.com/>`_ API.\n  string access_token_file = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Propagation modes to use by LightStep's tracer.\n  repeated PropagationMode propagation_modes = 3\n      [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/opencensus.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"opencensus/proto/trace/v1/trace_config.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"OpencensusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.tracers.opencensus.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: OpenCensus tracer]\n\n// Configuration for the OpenCensus tracer.\n// [#next-free-field: 15]\n// [#extension: envoy.tracers.opencensus]\nmessage OpenCensusConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.OpenCensusConfig\";\n\n  enum TraceContext {\n    // No-op default, no trace context is utilized.\n    NONE = 0;\n\n    // W3C Trace-Context format \"traceparent:\" header.\n    TRACE_CONTEXT = 1;\n\n    // Binary \"grpc-trace-bin:\" header.\n    GRPC_TRACE_BIN = 2;\n\n    // \"X-Cloud-Trace-Context:\" header.\n    CLOUD_TRACE_CONTEXT = 3;\n\n    // X-B3-* headers.\n    B3 = 4;\n  }\n\n  reserved 7;\n\n  // Configures tracing, e.g. the sampler, max number of annotations, etc.\n  opencensus.proto.trace.v1.TraceConfig trace_config = 1;\n\n  // Enables the stdout exporter if set to true. This is intended for debugging\n  // purposes.\n  bool stdout_exporter_enabled = 2;\n\n  // Enables the Stackdriver exporter if set to true. The project_id must also\n  // be set.\n  bool stackdriver_exporter_enabled = 3;\n\n  // The Cloud project_id to use for Stackdriver tracing.\n  string stackdriver_project_id = 4;\n\n  // (optional) By default, the Stackdriver exporter will connect to production\n  // Stackdriver. If stackdriver_address is non-empty, it will instead connect\n  // to this address, which is in the gRPC format:\n  // https://github.com/grpc/grpc/blob/master/doc/naming.md\n  string stackdriver_address = 10;\n\n  // (optional) The gRPC server that hosts Stackdriver tracing service. Only\n  // Google gRPC is supported. If :ref:`target_uri <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.target_uri>`\n  // is not provided, the default production Stackdriver address will be used.\n  core.v3.GrpcService stackdriver_grpc_service = 13;\n\n  // Enables the Zipkin exporter if set to true. The url and service name must\n  // also be set.\n  bool zipkin_exporter_enabled = 5;\n\n  // The URL to Zipkin, e.g. \"http://127.0.0.1:9411/api/v2/spans\"\n  string zipkin_url = 6;\n\n  // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or\n  // ocagent_grpc_service must also be set.\n  bool ocagent_exporter_enabled = 11;\n\n  // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC\n  // format: https://github.com/grpc/grpc/blob/master/doc/naming.md\n  // [#comment:TODO: deprecate this field]\n  string ocagent_address = 12;\n\n  // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported.\n  // This is only used if the ocagent_address is left empty.\n  core.v3.GrpcService ocagent_grpc_service = 14;\n\n  // List of incoming trace context headers we will accept. First one found\n  // wins.\n  repeated TraceContext incoming_trace_context = 8;\n\n  // List of outgoing trace context headers we will produce.\n  repeated TraceContext outgoing_trace_context = 9;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"ServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Trace Service]\n\n// Configuration structure.\nmessage TraceServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.TraceServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/trace.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/config/trace/v3/datadog.proto\";\nimport public \"envoy/config/trace/v3/dynamic_ot.proto\";\nimport public \"envoy/config/trace/v3/http_tracer.proto\";\nimport public \"envoy/config/trace/v3/lightstep.proto\";\nimport public \"envoy/config/trace/v3/opencensus.proto\";\nimport public \"envoy/config/trace/v3/service.proto\";\nimport public \"envoy/config/trace/v3/zipkin.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"TraceProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "api/envoy/config/trace/v3/xray.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"XrayProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.tracers.xray.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: AWS X-Ray Tracer Configuration]\n// Configuration for AWS X-Ray tracer\n\nmessage XRayConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2alpha.XRayConfig\";\n\n  message SegmentFields {\n    // The type of AWS resource, e.g. \"AWS::AppMesh::Proxy\".\n    string origin = 1;\n\n    // AWS resource metadata dictionary.\n    // See: `X-Ray Segment Document documentation <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-aws>`__\n    google.protobuf.Struct aws = 2;\n  }\n\n  // The UDP endpoint of the X-Ray Daemon where the spans will be sent.\n  // If this value is not set, the default value of 127.0.0.1:2000 will be used.\n  core.v3.SocketAddress daemon_endpoint = 1;\n\n  // The name of the X-Ray segment.\n  string segment_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The location of a local custom sampling rules JSON file.\n  // For an example of the sampling rules see:\n  // `X-Ray SDK documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-sampling>`_\n  core.v3.DataSource sampling_rule_manifest = 3;\n\n  // Optional custom fields to be added to each trace segment.\n  // see: `X-Ray Segment Document documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html>`__\n  SegmentFields segment_fields = 4;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v3/zipkin.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"ZipkinProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.tracers.zipkin.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Zipkin tracer]\n\n// Configuration for the Zipkin tracer.\n// [#extension: envoy.tracers.zipkin]\n// [#next-free-field: 6]\nmessage ZipkinConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v2.ZipkinConfig\";\n\n  // Available Zipkin collector endpoint versions.\n  enum CollectorEndpointVersion {\n    // Zipkin API v1, JSON over HTTP.\n    // [#comment: The default implementation of Zipkin client before this field is added was only v1\n    // and the way user configure this was by not explicitly specifying the version. Consequently,\n    // before this is added, the corresponding Zipkin collector expected to receive v1 payload.\n    // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when\n    // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field,\n    // since in Zipkin realm this v1 version is considered to be not preferable anymore.]\n    DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Zipkin API v2, JSON over HTTP.\n    HTTP_JSON = 1;\n\n    // Zipkin API v2, protobuf over HTTP.\n    HTTP_PROTO = 2;\n\n    // [#not-implemented-hide:]\n    GRPC = 3;\n  }\n\n  // The cluster manager cluster that hosts the Zipkin collectors. Note that the\n  // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster\n  // resources <envoy_api_field_config.bootstrap.v3.Bootstrap.StaticResources.clusters>`.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The API endpoint of the Zipkin service where the spans will be sent. When\n  // using a standard Zipkin installation, the API endpoint is typically\n  // /api/v1/spans, which is the default value.\n  string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Determines whether a 128bit trace id will be used when creating a new\n  // trace instance. The default value is false, which will result in a 64 bit trace id being used.\n  bool trace_id_128bit = 3;\n\n  // Determines whether client and server spans will share the same span context.\n  // The default value is true.\n  google.protobuf.BoolValue shared_span_context = 4;\n\n  // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be\n  // used.\n  CollectorEndpointVersion collector_endpoint_version = 5;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/trace/v4alpha/http_tracer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v4alpha;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v4alpha\";\noption java_outer_classname = \"HttpTracerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tracing]\n// Tracing :ref:`architecture overview <arch_overview_tracing>`.\n\n// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy.\n//\n// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one\n// supported.\n//\n// .. attention::\n//\n//   Use of this message type has been deprecated in favor of direct use of\n//   :ref:`Tracing.Http <envoy_api_msg_config.trace.v4alpha.Tracing.Http>`.\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v3.Tracing\";\n\n  // Configuration for an HTTP tracer provider used by Envoy.\n  //\n  // The configuration is defined by the\n  // :ref:`HttpConnectionManager.Tracing <envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.Tracing>`\n  // :ref:`provider <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.Tracing.provider>`\n  // field.\n  message Http {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.trace.v3.Tracing.Http\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // The name of the HTTP trace driver to instantiate. The name must match a\n    // supported HTTP trace driver. Built-in trace drivers:\n    //\n    // - *envoy.tracers.lightstep*\n    // - *envoy.tracers.zipkin*\n    // - *envoy.tracers.dynamic_ot*\n    // - *envoy.tracers.datadog*\n    // - *envoy.tracers.opencensus*\n    // - *envoy.tracers.xray*\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Trace driver specific configuration which depends on the driver being instantiated.\n    // See the trace drivers for examples:\n    //\n    // - :ref:`LightstepConfig <envoy_api_msg_extensions.tracers.lightstep.v4alpha.LightstepConfig>`\n    // - :ref:`ZipkinConfig <envoy_api_msg_extensions.tracers.zipkin.v4alpha.ZipkinConfig>`\n    // - :ref:`DynamicOtConfig <envoy_api_msg_extensions.tracers.dynamic_ot.v4alpha.DynamicOtConfig>`\n    // - :ref:`DatadogConfig <envoy_api_msg_extensions.tracers.datadog.v4alpha.DatadogConfig>`\n    // - :ref:`OpenCensusConfig <envoy_api_msg_extensions.tracers.opencensus.v4alpha.OpenCensusConfig>`\n    // - :ref:`AWS X-Ray <envoy_api_msg_extensions.tracers.xray.v4alpha.XRayConfig>`\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Provides configuration for the HTTP tracer.\n  Http http = 1;\n}\n"
  },
  {
    "path": "api/envoy/config/trace/v4alpha/service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v4alpha\";\noption java_outer_classname = \"ServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Trace Service]\n\n// Configuration structure.\nmessage TraceServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.TraceServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/config/transport_socket/alts/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/transport_socket/alts/v2alpha/alts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.transport_socket.alts.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.transport_socket.alts.v2alpha\";\noption java_outer_classname = \"AltsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.alts.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: ALTS]\n// [#extension: envoy.transport_sockets.alts]\n\n// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy.\n// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/\nmessage Alts {\n  // The location of a handshaker service, this is usually 169.254.169.254:8080\n  // on GCE.\n  string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The acceptable service accounts from peer, peers not in the list will be rejected in the\n  // handshake validation step. If empty, no validation will be performed.\n  repeated string peer_service_accounts = 2;\n}\n"
  },
  {
    "path": "api/envoy/config/transport_socket/raw_buffer/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.transport_socket.raw_buffer.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.transport_socket.raw_buffer.v2\";\noption java_outer_classname = \"RawBufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.raw_buffer.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Raw Buffer]\n// [#extension: envoy.transport_sockets.raw_buffer]\n\n// Configuration for raw buffer transport socket.\nmessage RawBuffer {\n}\n"
  },
  {
    "path": "api/envoy/config/transport_socket/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/config/transport_socket/tap/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.transport_socket.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/config/common/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.transport_socket.tap.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tap.v3\";\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap]\n// [#extension: envoy.transport_sockets.tap]\n\n// Configuration for tap transport socket. This wraps another transport socket, providing the\n// ability to interpose and record in plain text any traffic that is surfaced to Envoy.\nmessage Tap {\n  // Common configuration for the tap transport socket.\n  common.tap.v2alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // The underlying transport socket being wrapped.\n  api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/data/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/accesslog/v2/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.accesslog.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.accesslog.v2\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC access logs]\n// Envoy access logs describe incoming interaction with Envoy over a fixed\n// period of time, and typically cover a single request/response exchange,\n// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP).\n// Access logs contain fields defined in protocol-specific protobuf messages.\n//\n// Except where explicitly declared otherwise, all fields describe\n// *downstream* interaction between Envoy and a connected client.\n// Fields describing *upstream* interaction will explicitly include ``upstream``\n// in their name.\n\nmessage TCPAccessLogEntry {\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  // Properties of the TCP connection.\n  ConnectionProperties connection_properties = 2;\n}\n\nmessage HTTPAccessLogEntry {\n  // HTTP version\n  enum HTTPVersion {\n    PROTOCOL_UNSPECIFIED = 0;\n    HTTP10 = 1;\n    HTTP11 = 2;\n    HTTP2 = 3;\n    HTTP3 = 4;\n  }\n\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  HTTPVersion protocol_version = 2;\n\n  // Description of the incoming HTTP request.\n  HTTPRequestProperties request = 3;\n\n  // Description of the outgoing HTTP response.\n  HTTPResponseProperties response = 4;\n}\n\n// Defines fields for a connection\nmessage ConnectionProperties {\n  // Number of bytes received from downstream.\n  uint64 received_bytes = 1;\n\n  // Number of bytes sent to downstream.\n  uint64 sent_bytes = 2;\n}\n\n// Defines fields that are shared by all Envoy access logs.\n// [#next-free-field: 22]\nmessage AccessLogCommon {\n  // [#not-implemented-hide:]\n  // This field indicates the rate at which this log entry was sampled.\n  // Valid range is (0.0, 1.0].\n  double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}];\n\n  // This field is the remote/origin address on which the request from the user was received.\n  // Note: This may not be the physical peer. E.g, if the remote address is inferred from for\n  // example the x-forwarder-for header, proxy protocol, etc.\n  api.v2.core.Address downstream_remote_address = 2;\n\n  // This field is the local/destination address on which the request from the user was received.\n  api.v2.core.Address downstream_local_address = 3;\n\n  // If the connection is secure,S this field will contain TLS properties.\n  TLSProperties tls_properties = 4;\n\n  // The time that Envoy started servicing this request. This is effectively the time that the first\n  // downstream byte is received.\n  google.protobuf.Timestamp start_time = 5;\n\n  // Interval between the first downstream byte received and the last\n  // downstream byte received (i.e. time it takes to receive a request).\n  google.protobuf.Duration time_to_last_rx_byte = 6;\n\n  // Interval between the first downstream byte received and the first upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_upstream_tx_byte = 7;\n\n  // Interval between the first downstream byte received and the last upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_last_upstream_tx_byte = 8;\n\n  // Interval between the first downstream byte received and the first upstream\n  // byte received (i.e. time it takes to start receiving a response).\n  google.protobuf.Duration time_to_first_upstream_rx_byte = 9;\n\n  // Interval between the first downstream byte received and the last upstream\n  // byte received (i.e. time it takes to receive a complete response).\n  google.protobuf.Duration time_to_last_upstream_rx_byte = 10;\n\n  // Interval between the first downstream byte received and the first downstream byte sent.\n  // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field\n  // due to filters. Additionally, the same caveats apply as documented in\n  // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_downstream_tx_byte = 11;\n\n  // Interval between the first downstream byte received and the last downstream byte sent.\n  // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta\n  // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate\n  // time. In the current implementation it does not include kernel socket buffer time. In the\n  // current implementation it also does not include send window buffering inside the HTTP/2 codec.\n  // In the future it is likely that work will be done to make this duration more accurate.\n  google.protobuf.Duration time_to_last_downstream_tx_byte = 12;\n\n  // The upstream remote/destination address that handles this exchange. This does not include\n  // retries.\n  api.v2.core.Address upstream_remote_address = 13;\n\n  // The upstream local/origin address that handles this exchange. This does not include retries.\n  api.v2.core.Address upstream_local_address = 14;\n\n  // The upstream cluster that *upstream_remote_address* belongs to.\n  string upstream_cluster = 15;\n\n  // Flags indicating occurrences during request/response processing.\n  ResponseFlags response_flags = 16;\n\n  // All metadata encountered during request processing, including endpoint\n  // selection.\n  //\n  // This can be used to associate IDs attached to the various configurations\n  // used to process this request with the access log entry. For example, a\n  // route created from a higher level forwarding rule with some ID can place\n  // that ID in this field and cross reference later. It can also be used to\n  // determine if a canary endpoint was used or not.\n  api.v2.core.Metadata metadata = 17;\n\n  // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the\n  // failure reason from the transport socket. The format of this field depends on the configured\n  // upstream transport socket. Common TLS failures are in\n  // :ref:`TLS trouble shooting <arch_overview_ssl_trouble_shooting>`.\n  string upstream_transport_failure_reason = 18;\n\n  // The name of the route\n  string route_name = 19;\n\n  // This field is the downstream direct remote address on which the request from the user was\n  // received. Note: This is always the physical peer, even if the remote address is inferred from\n  // for example the x-forwarder-for header, proxy protocol, etc.\n  api.v2.core.Address downstream_direct_remote_address = 20;\n\n  // Map of filter state in stream info that have been configured to be logged. If the filter\n  // state serialized to any message other than `google.protobuf.Any` it will be packed into\n  // `google.protobuf.Any`.\n  map<string, google.protobuf.Any> filter_state_objects = 21;\n}\n\n// Flags indicating occurrences during request/response processing.\n// [#next-free-field: 20]\nmessage ResponseFlags {\n  message Unauthorized {\n    // Reasons why the request was unauthorized\n    enum Reason {\n      REASON_UNSPECIFIED = 0;\n\n      // The request was denied by the external authorization service.\n      EXTERNAL_SERVICE = 1;\n    }\n\n    Reason reason = 1;\n  }\n\n  // Indicates local server healthcheck failed.\n  bool failed_local_healthcheck = 1;\n\n  // Indicates there was no healthy upstream.\n  bool no_healthy_upstream = 2;\n\n  // Indicates an there was an upstream request timeout.\n  bool upstream_request_timeout = 3;\n\n  // Indicates local codec level reset was sent on the stream.\n  bool local_reset = 4;\n\n  // Indicates remote codec level reset was received on the stream.\n  bool upstream_remote_reset = 5;\n\n  // Indicates there was a local reset by a connection pool due to an initial connection failure.\n  bool upstream_connection_failure = 6;\n\n  // Indicates the stream was reset due to an upstream connection termination.\n  bool upstream_connection_termination = 7;\n\n  // Indicates the stream was reset because of a resource overflow.\n  bool upstream_overflow = 8;\n\n  // Indicates no route was found for the request.\n  bool no_route_found = 9;\n\n  // Indicates that the request was delayed before proxying.\n  bool delay_injected = 10;\n\n  // Indicates that the request was aborted with an injected error code.\n  bool fault_injected = 11;\n\n  // Indicates that the request was rate-limited locally.\n  bool rate_limited = 12;\n\n  // Indicates if the request was deemed unauthorized and the reason for it.\n  Unauthorized unauthorized_details = 13;\n\n  // Indicates that the request was rejected because there was an error in rate limit service.\n  bool rate_limit_service_error = 14;\n\n  // Indicates the stream was reset due to a downstream connection termination.\n  bool downstream_connection_termination = 15;\n\n  // Indicates that the upstream retry limit was exceeded, resulting in a downstream error.\n  bool upstream_retry_limit_exceeded = 16;\n\n  // Indicates that the stream idle timeout was hit, resulting in a downstream 408.\n  bool stream_idle_timeout = 17;\n\n  // Indicates that the request was rejected because an envoy request header failed strict\n  // validation.\n  bool invalid_envoy_request_headers = 18;\n\n  // Indicates there was an HTTP protocol error on the downstream request.\n  bool downstream_protocol_error = 19;\n}\n\n// Properties of a negotiated TLS connection.\n// [#next-free-field: 7]\nmessage TLSProperties {\n  enum TLSVersion {\n    VERSION_UNSPECIFIED = 0;\n    TLSv1 = 1;\n    TLSv1_1 = 2;\n    TLSv1_2 = 3;\n    TLSv1_3 = 4;\n  }\n\n  message CertificateProperties {\n    message SubjectAltName {\n      oneof san {\n        string uri = 1;\n\n        // [#not-implemented-hide:]\n        string dns = 2;\n      }\n    }\n\n    // SANs present in the certificate.\n    repeated SubjectAltName subject_alt_name = 1;\n\n    // The subject field of the certificate.\n    string subject = 2;\n  }\n\n  // Version of TLS that was negotiated.\n  TLSVersion tls_version = 1;\n\n  // TLS cipher suite negotiated during handshake. The value is a\n  // four-digit hex code defined by the IANA TLS Cipher Suite Registry\n  // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``).\n  //\n  // Here it is expressed as an integer.\n  google.protobuf.UInt32Value tls_cipher_suite = 2;\n\n  // SNI hostname from handshake.\n  string tls_sni_hostname = 3;\n\n  // Properties of the local certificate used to negotiate TLS.\n  CertificateProperties local_certificate_properties = 4;\n\n  // Properties of the peer certificate used to negotiate TLS.\n  CertificateProperties peer_certificate_properties = 5;\n\n  // The TLS session ID.\n  string tls_session_id = 6;\n}\n\n// [#next-free-field: 14]\nmessage HTTPRequestProperties {\n  // The request method (RFC 7231/2616).\n  api.v2.core.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The scheme portion of the incoming request URI.\n  string scheme = 2;\n\n  // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value.\n  string authority = 3;\n\n  // The port of the incoming request URI\n  // (unused currently, as port is composed onto authority).\n  google.protobuf.UInt32Value port = 4;\n\n  // The path portion from the incoming request URI.\n  string path = 5;\n\n  // Value of the ``User-Agent`` request header.\n  string user_agent = 6;\n\n  // Value of the ``Referer`` request header.\n  string referer = 7;\n\n  // Value of the ``X-Forwarded-For`` request header.\n  string forwarded_for = 8;\n\n  // Value of the ``X-Request-Id`` request header\n  //\n  // This header is used by Envoy to uniquely identify a request.\n  // It will be generated for all external requests and internal requests that\n  // do not already have a request ID.\n  string request_id = 9;\n\n  // Value of the ``X-Envoy-Original-Path`` request header.\n  string original_path = 10;\n\n  // Size of the HTTP request headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_headers_bytes = 11;\n\n  // Size of the HTTP request body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_body_bytes = 12;\n\n  // Map of additional headers that have been configured to be logged.\n  map<string, string> request_headers = 13;\n}\n\n// [#next-free-field: 7]\nmessage HTTPResponseProperties {\n  // The HTTP response code returned by Envoy.\n  google.protobuf.UInt32Value response_code = 1;\n\n  // Size of the HTTP response headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_headers_bytes = 2;\n\n  // Size of the HTTP response body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_body_bytes = 3;\n\n  // Map of additional headers configured to be logged.\n  map<string, string> response_headers = 4;\n\n  // Map of trailers configured to be logged.\n  map<string, string> response_trailers = 5;\n\n  // The HTTP response code details.\n  string response_code_details = 6;\n}\n"
  },
  {
    "path": "api/envoy/data/accesslog/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/accesslog/v3/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.accesslog.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.accesslog.v3\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC access logs]\n// Envoy access logs describe incoming interaction with Envoy over a fixed\n// period of time, and typically cover a single request/response exchange,\n// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP).\n// Access logs contain fields defined in protocol-specific protobuf messages.\n//\n// Except where explicitly declared otherwise, all fields describe\n// *downstream* interaction between Envoy and a connected client.\n// Fields describing *upstream* interaction will explicitly include ``upstream``\n// in their name.\n\nmessage TCPAccessLogEntry {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.TCPAccessLogEntry\";\n\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  // Properties of the TCP connection.\n  ConnectionProperties connection_properties = 2;\n}\n\nmessage HTTPAccessLogEntry {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.HTTPAccessLogEntry\";\n\n  // HTTP version\n  enum HTTPVersion {\n    PROTOCOL_UNSPECIFIED = 0;\n    HTTP10 = 1;\n    HTTP11 = 2;\n    HTTP2 = 3;\n    HTTP3 = 4;\n  }\n\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  HTTPVersion protocol_version = 2;\n\n  // Description of the incoming HTTP request.\n  HTTPRequestProperties request = 3;\n\n  // Description of the outgoing HTTP response.\n  HTTPResponseProperties response = 4;\n}\n\n// Defines fields for a connection\nmessage ConnectionProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.ConnectionProperties\";\n\n  // Number of bytes received from downstream.\n  uint64 received_bytes = 1;\n\n  // Number of bytes sent to downstream.\n  uint64 sent_bytes = 2;\n}\n\n// Defines fields that are shared by all Envoy access logs.\n// [#next-free-field: 22]\nmessage AccessLogCommon {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.AccessLogCommon\";\n\n  // [#not-implemented-hide:]\n  // This field indicates the rate at which this log entry was sampled.\n  // Valid range is (0.0, 1.0].\n  double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}];\n\n  // This field is the remote/origin address on which the request from the user was received.\n  // Note: This may not be the physical peer. E.g, if the remote address is inferred from for\n  // example the x-forwarder-for header, proxy protocol, etc.\n  config.core.v3.Address downstream_remote_address = 2;\n\n  // This field is the local/destination address on which the request from the user was received.\n  config.core.v3.Address downstream_local_address = 3;\n\n  // If the connection is secure,S this field will contain TLS properties.\n  TLSProperties tls_properties = 4;\n\n  // The time that Envoy started servicing this request. This is effectively the time that the first\n  // downstream byte is received.\n  google.protobuf.Timestamp start_time = 5;\n\n  // Interval between the first downstream byte received and the last\n  // downstream byte received (i.e. time it takes to receive a request).\n  google.protobuf.Duration time_to_last_rx_byte = 6;\n\n  // Interval between the first downstream byte received and the first upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_upstream_tx_byte = 7;\n\n  // Interval between the first downstream byte received and the last upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_last_upstream_tx_byte = 8;\n\n  // Interval between the first downstream byte received and the first upstream\n  // byte received (i.e. time it takes to start receiving a response).\n  google.protobuf.Duration time_to_first_upstream_rx_byte = 9;\n\n  // Interval between the first downstream byte received and the last upstream\n  // byte received (i.e. time it takes to receive a complete response).\n  google.protobuf.Duration time_to_last_upstream_rx_byte = 10;\n\n  // Interval between the first downstream byte received and the first downstream byte sent.\n  // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field\n  // due to filters. Additionally, the same caveats apply as documented in\n  // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_downstream_tx_byte = 11;\n\n  // Interval between the first downstream byte received and the last downstream byte sent.\n  // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta\n  // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate\n  // time. In the current implementation it does not include kernel socket buffer time. In the\n  // current implementation it also does not include send window buffering inside the HTTP/2 codec.\n  // In the future it is likely that work will be done to make this duration more accurate.\n  google.protobuf.Duration time_to_last_downstream_tx_byte = 12;\n\n  // The upstream remote/destination address that handles this exchange. This does not include\n  // retries.\n  config.core.v3.Address upstream_remote_address = 13;\n\n  // The upstream local/origin address that handles this exchange. This does not include retries.\n  config.core.v3.Address upstream_local_address = 14;\n\n  // The upstream cluster that *upstream_remote_address* belongs to.\n  string upstream_cluster = 15;\n\n  // Flags indicating occurrences during request/response processing.\n  ResponseFlags response_flags = 16;\n\n  // All metadata encountered during request processing, including endpoint\n  // selection.\n  //\n  // This can be used to associate IDs attached to the various configurations\n  // used to process this request with the access log entry. For example, a\n  // route created from a higher level forwarding rule with some ID can place\n  // that ID in this field and cross reference later. It can also be used to\n  // determine if a canary endpoint was used or not.\n  config.core.v3.Metadata metadata = 17;\n\n  // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the\n  // failure reason from the transport socket. The format of this field depends on the configured\n  // upstream transport socket. Common TLS failures are in\n  // :ref:`TLS trouble shooting <arch_overview_ssl_trouble_shooting>`.\n  string upstream_transport_failure_reason = 18;\n\n  // The name of the route\n  string route_name = 19;\n\n  // This field is the downstream direct remote address on which the request from the user was\n  // received. Note: This is always the physical peer, even if the remote address is inferred from\n  // for example the x-forwarder-for header, proxy protocol, etc.\n  config.core.v3.Address downstream_direct_remote_address = 20;\n\n  // Map of filter state in stream info that have been configured to be logged. If the filter\n  // state serialized to any message other than `google.protobuf.Any` it will be packed into\n  // `google.protobuf.Any`.\n  map<string, google.protobuf.Any> filter_state_objects = 21;\n}\n\n// Flags indicating occurrences during request/response processing.\n// [#next-free-field: 24]\nmessage ResponseFlags {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.ResponseFlags\";\n\n  message Unauthorized {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.accesslog.v2.ResponseFlags.Unauthorized\";\n\n    // Reasons why the request was unauthorized\n    enum Reason {\n      REASON_UNSPECIFIED = 0;\n\n      // The request was denied by the external authorization service.\n      EXTERNAL_SERVICE = 1;\n    }\n\n    Reason reason = 1;\n  }\n\n  // Indicates local server healthcheck failed.\n  bool failed_local_healthcheck = 1;\n\n  // Indicates there was no healthy upstream.\n  bool no_healthy_upstream = 2;\n\n  // Indicates an there was an upstream request timeout.\n  bool upstream_request_timeout = 3;\n\n  // Indicates local codec level reset was sent on the stream.\n  bool local_reset = 4;\n\n  // Indicates remote codec level reset was received on the stream.\n  bool upstream_remote_reset = 5;\n\n  // Indicates there was a local reset by a connection pool due to an initial connection failure.\n  bool upstream_connection_failure = 6;\n\n  // Indicates the stream was reset due to an upstream connection termination.\n  bool upstream_connection_termination = 7;\n\n  // Indicates the stream was reset because of a resource overflow.\n  bool upstream_overflow = 8;\n\n  // Indicates no route was found for the request.\n  bool no_route_found = 9;\n\n  // Indicates that the request was delayed before proxying.\n  bool delay_injected = 10;\n\n  // Indicates that the request was aborted with an injected error code.\n  bool fault_injected = 11;\n\n  // Indicates that the request was rate-limited locally.\n  bool rate_limited = 12;\n\n  // Indicates if the request was deemed unauthorized and the reason for it.\n  Unauthorized unauthorized_details = 13;\n\n  // Indicates that the request was rejected because there was an error in rate limit service.\n  bool rate_limit_service_error = 14;\n\n  // Indicates the stream was reset due to a downstream connection termination.\n  bool downstream_connection_termination = 15;\n\n  // Indicates that the upstream retry limit was exceeded, resulting in a downstream error.\n  bool upstream_retry_limit_exceeded = 16;\n\n  // Indicates that the stream idle timeout was hit, resulting in a downstream 408.\n  bool stream_idle_timeout = 17;\n\n  // Indicates that the request was rejected because an envoy request header failed strict\n  // validation.\n  bool invalid_envoy_request_headers = 18;\n\n  // Indicates there was an HTTP protocol error on the downstream request.\n  bool downstream_protocol_error = 19;\n\n  // Indicates there was a max stream duration reached on the upstream request.\n  bool upstream_max_stream_duration_reached = 20;\n\n  // Indicates the response was served from a cache filter.\n  bool response_from_cache_filter = 21;\n\n  // Indicates that a filter configuration is not available.\n  bool no_filter_config_found = 22;\n\n  // Indicates that request or connection exceeded the downstream connection duration.\n  bool duration_timeout = 23;\n}\n\n// Properties of a negotiated TLS connection.\n// [#next-free-field: 7]\nmessage TLSProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.TLSProperties\";\n\n  enum TLSVersion {\n    VERSION_UNSPECIFIED = 0;\n    TLSv1 = 1;\n    TLSv1_1 = 2;\n    TLSv1_2 = 3;\n    TLSv1_3 = 4;\n  }\n\n  message CertificateProperties {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.accesslog.v2.TLSProperties.CertificateProperties\";\n\n    message SubjectAltName {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.data.accesslog.v2.TLSProperties.CertificateProperties.SubjectAltName\";\n\n      oneof san {\n        string uri = 1;\n\n        // [#not-implemented-hide:]\n        string dns = 2;\n      }\n    }\n\n    // SANs present in the certificate.\n    repeated SubjectAltName subject_alt_name = 1;\n\n    // The subject field of the certificate.\n    string subject = 2;\n  }\n\n  // Version of TLS that was negotiated.\n  TLSVersion tls_version = 1;\n\n  // TLS cipher suite negotiated during handshake. The value is a\n  // four-digit hex code defined by the IANA TLS Cipher Suite Registry\n  // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``).\n  //\n  // Here it is expressed as an integer.\n  google.protobuf.UInt32Value tls_cipher_suite = 2;\n\n  // SNI hostname from handshake.\n  string tls_sni_hostname = 3;\n\n  // Properties of the local certificate used to negotiate TLS.\n  CertificateProperties local_certificate_properties = 4;\n\n  // Properties of the peer certificate used to negotiate TLS.\n  CertificateProperties peer_certificate_properties = 5;\n\n  // The TLS session ID.\n  string tls_session_id = 6;\n}\n\n// [#next-free-field: 14]\nmessage HTTPRequestProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.HTTPRequestProperties\";\n\n  // The request method (RFC 7231/2616).\n  config.core.v3.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The scheme portion of the incoming request URI.\n  string scheme = 2;\n\n  // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value.\n  string authority = 3;\n\n  // The port of the incoming request URI\n  // (unused currently, as port is composed onto authority).\n  google.protobuf.UInt32Value port = 4;\n\n  // The path portion from the incoming request URI.\n  string path = 5;\n\n  // Value of the ``User-Agent`` request header.\n  string user_agent = 6;\n\n  // Value of the ``Referer`` request header.\n  string referer = 7;\n\n  // Value of the ``X-Forwarded-For`` request header.\n  string forwarded_for = 8;\n\n  // Value of the ``X-Request-Id`` request header\n  //\n  // This header is used by Envoy to uniquely identify a request.\n  // It will be generated for all external requests and internal requests that\n  // do not already have a request ID.\n  string request_id = 9;\n\n  // Value of the ``X-Envoy-Original-Path`` request header.\n  string original_path = 10;\n\n  // Size of the HTTP request headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_headers_bytes = 11;\n\n  // Size of the HTTP request body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_body_bytes = 12;\n\n  // Map of additional headers that have been configured to be logged.\n  map<string, string> request_headers = 13;\n}\n\n// [#next-free-field: 7]\nmessage HTTPResponseProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.HTTPResponseProperties\";\n\n  // The HTTP response code returned by Envoy.\n  google.protobuf.UInt32Value response_code = 1;\n\n  // Size of the HTTP response headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_headers_bytes = 2;\n\n  // Size of the HTTP response body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_body_bytes = 3;\n\n  // Map of additional headers configured to be logged.\n  map<string, string> response_headers = 4;\n\n  // Map of trailers configured to be logged.\n  map<string, string> response_trailers = 5;\n\n  // The HTTP response code details.\n  string response_code_details = 6;\n}\n"
  },
  {
    "path": "api/envoy/data/cluster/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/data/cluster/v2alpha/outlier_detection_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.cluster.v2alpha;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.cluster.v2alpha\";\noption java_outer_classname = \"OutlierDetectionEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.data.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Outlier detection logging events]\n// :ref:`Outlier detection logging <arch_overview_outlier_detection_logging>`.\n\n// Type of ejection that took place\nenum OutlierEjectionType {\n  // In case upstream host returns certain number of consecutive 5xx.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all type of errors are treated as HTTP 5xx errors.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  CONSECUTIVE_5XX = 0;\n\n  // In case upstream host returns certain number of consecutive gateway errors\n  CONSECUTIVE_GATEWAY_FAILURE = 1;\n\n  // Runs over aggregated success rate statistics from every host in cluster\n  // and selects hosts for which ratio of successful replies deviates from other hosts\n  // in the cluster.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors (externally and locally generated) are used to calculate success rate\n  // statistics. See :ref:`Cluster outlier detection <arch_overview_outlier_detection>`\n  // documentation for details.\n  SUCCESS_RATE = 2;\n\n  // Consecutive local origin failures: Connection failures, resets, timeouts, etc\n  // This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3;\n\n  // Runs over aggregated success rate statistics for local origin failures\n  // for all hosts in the cluster and selects hosts for which success rate deviates from other\n  // hosts in the cluster. This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  SUCCESS_RATE_LOCAL_ORIGIN = 4;\n\n  // Runs over aggregated success rate statistics from every host in cluster and selects hosts for\n  // which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE = 5;\n\n  // Runs over aggregated success rate statistics for local origin failures from every host in\n  // cluster and selects hosts for which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6;\n}\n\n// Represents possible action applied to upstream host\nenum Action {\n  // In case host was excluded from service\n  EJECT = 0;\n\n  // In case host was brought back into service\n  UNEJECT = 1;\n}\n\n// [#next-free-field: 12]\nmessage OutlierDetectionEvent {\n  // In case of eject represents type of ejection that took place.\n  OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 2;\n\n  // The time in seconds since the last action (either an ejection or unejection) took place.\n  google.protobuf.UInt64Value secs_since_last_action = 3;\n\n  // The :ref:`cluster <envoy_api_msg_Cluster>` that owns the ejected host.\n  string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}];\n\n  // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``.\n  string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}];\n\n  // The action that took place.\n  Action action = 6 [(validate.rules).enum = {defined_only: true}];\n\n  // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to\n  // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and\n  // then re-added).\n  uint32 num_ejections = 7;\n\n  // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was\n  // ejected. ``false`` means the event was logged but the host was not actually ejected.\n  bool enforced = 8;\n\n  oneof event {\n    option (validate.required) = true;\n\n    OutlierEjectSuccessRate eject_success_rate_event = 9;\n\n    OutlierEjectConsecutive eject_consecutive_event = 10;\n\n    OutlierEjectFailurePercentage eject_failure_percentage_event = 11;\n  }\n}\n\nmessage OutlierEjectSuccessRate {\n  // Host’s success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n\n  // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100\n  // range.\n  uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}];\n\n  // Success rate ejection threshold at the time of the ejection event.\n  uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}];\n}\n\nmessage OutlierEjectConsecutive {\n}\n\nmessage OutlierEjectFailurePercentage {\n  // Host's success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n}\n"
  },
  {
    "path": "api/envoy/data/cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/data/cluster/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/cluster/v3/outlier_detection_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.cluster.v3;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.cluster.v3\";\noption java_outer_classname = \"OutlierDetectionEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Outlier detection logging events]\n// :ref:`Outlier detection logging <arch_overview_outlier_detection_logging>`.\n\n// Type of ejection that took place\nenum OutlierEjectionType {\n  // In case upstream host returns certain number of consecutive 5xx.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all type of errors are treated as HTTP 5xx errors.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  CONSECUTIVE_5XX = 0;\n\n  // In case upstream host returns certain number of consecutive gateway errors\n  CONSECUTIVE_GATEWAY_FAILURE = 1;\n\n  // Runs over aggregated success rate statistics from every host in cluster\n  // and selects hosts for which ratio of successful replies deviates from other hosts\n  // in the cluster.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors (externally and locally generated) are used to calculate success rate\n  // statistics. See :ref:`Cluster outlier detection <arch_overview_outlier_detection>`\n  // documentation for details.\n  SUCCESS_RATE = 2;\n\n  // Consecutive local origin failures: Connection failures, resets, timeouts, etc\n  // This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3;\n\n  // Runs over aggregated success rate statistics for local origin failures\n  // for all hosts in the cluster and selects hosts for which success rate deviates from other\n  // hosts in the cluster. This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  SUCCESS_RATE_LOCAL_ORIGIN = 4;\n\n  // Runs over aggregated success rate statistics from every host in cluster and selects hosts for\n  // which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE = 5;\n\n  // Runs over aggregated success rate statistics for local origin failures from every host in\n  // cluster and selects hosts for which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6;\n}\n\n// Represents possible action applied to upstream host\nenum Action {\n  // In case host was excluded from service\n  EJECT = 0;\n\n  // In case host was brought back into service\n  UNEJECT = 1;\n}\n\n// [#next-free-field: 12]\nmessage OutlierDetectionEvent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierDetectionEvent\";\n\n  // In case of eject represents type of ejection that took place.\n  OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 2;\n\n  // The time in seconds since the last action (either an ejection or unejection) took place.\n  google.protobuf.UInt64Value secs_since_last_action = 3;\n\n  // The :ref:`cluster <envoy_api_msg_config.cluster.v3.Cluster>` that owns the ejected host.\n  string cluster_name = 4 [(validate.rules).string = {min_len: 1}];\n\n  // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``.\n  string upstream_url = 5 [(validate.rules).string = {min_len: 1}];\n\n  // The action that took place.\n  Action action = 6 [(validate.rules).enum = {defined_only: true}];\n\n  // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to\n  // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and\n  // then re-added).\n  uint32 num_ejections = 7;\n\n  // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was\n  // ejected. ``false`` means the event was logged but the host was not actually ejected.\n  bool enforced = 8;\n\n  oneof event {\n    option (validate.required) = true;\n\n    OutlierEjectSuccessRate eject_success_rate_event = 9;\n\n    OutlierEjectConsecutive eject_consecutive_event = 10;\n\n    OutlierEjectFailurePercentage eject_failure_percentage_event = 11;\n  }\n}\n\nmessage OutlierEjectSuccessRate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierEjectSuccessRate\";\n\n  // Host’s success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n\n  // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100\n  // range.\n  uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}];\n\n  // Success rate ejection threshold at the time of the ejection event.\n  uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}];\n}\n\nmessage OutlierEjectConsecutive {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierEjectConsecutive\";\n}\n\nmessage OutlierEjectFailurePercentage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierEjectFailurePercentage\";\n\n  // Host's success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n}\n"
  },
  {
    "path": "api/envoy/data/core/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/core/v2alpha/health_check_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.core.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.core.v2alpha\";\noption java_outer_classname = \"HealthCheckEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health check logging events]\n// :ref:`Health check logging <arch_overview_health_check_logging>`.\n\nenum HealthCheckFailureType {\n  ACTIVE = 0;\n  PASSIVE = 1;\n  NETWORK = 2;\n}\n\nenum HealthCheckerType {\n  HTTP = 0;\n  TCP = 1;\n  GRPC = 2;\n  REDIS = 3;\n}\n\n// [#next-free-field: 10]\nmessage HealthCheckEvent {\n  HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  api.v2.core.Address host = 2;\n\n  string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof event {\n    option (validate.required) = true;\n\n    // Host ejection.\n    HealthCheckEjectUnhealthy eject_unhealthy_event = 4;\n\n    // Host addition.\n    HealthCheckAddHealthy add_healthy_event = 5;\n\n    // Host failure.\n    HealthCheckFailure health_check_failure_event = 7;\n\n    // Healthy host became degraded.\n    DegradedHealthyHost degraded_healthy_host = 8;\n\n    // A degraded host returned to being healthy.\n    NoLongerDegradedHost no_longer_degraded_host = 9;\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 6;\n}\n\nmessage HealthCheckEjectUnhealthy {\n  // The type of failure that caused this ejection.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\nmessage HealthCheckAddHealthy {\n  // Whether this addition is the result of the first ever health check on a host, in which case\n  // the configured :ref:`healthy threshold <envoy_api_field_core.HealthCheck.healthy_threshold>`\n  // is bypassed and the host is immediately added.\n  bool first_check = 1;\n}\n\nmessage HealthCheckFailure {\n  // The type of failure that caused this event.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Whether this event is the result of the first ever health check on a host.\n  bool first_check = 2;\n}\n\nmessage DegradedHealthyHost {\n}\n\nmessage NoLongerDegradedHost {\n}\n"
  },
  {
    "path": "api/envoy/data/core/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/core/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/core/v3/health_check_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.core.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.core.v3\";\noption java_outer_classname = \"HealthCheckEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health check logging events]\n// :ref:`Health check logging <arch_overview_health_check_logging>`.\n\nenum HealthCheckFailureType {\n  ACTIVE = 0;\n  PASSIVE = 1;\n  NETWORK = 2;\n}\n\nenum HealthCheckerType {\n  HTTP = 0;\n  TCP = 1;\n  GRPC = 2;\n  REDIS = 3;\n}\n\n// [#next-free-field: 10]\nmessage HealthCheckEvent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckEvent\";\n\n  HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  config.core.v3.Address host = 2;\n\n  string cluster_name = 3 [(validate.rules).string = {min_len: 1}];\n\n  oneof event {\n    option (validate.required) = true;\n\n    // Host ejection.\n    HealthCheckEjectUnhealthy eject_unhealthy_event = 4;\n\n    // Host addition.\n    HealthCheckAddHealthy add_healthy_event = 5;\n\n    // Host failure.\n    HealthCheckFailure health_check_failure_event = 7;\n\n    // Healthy host became degraded.\n    DegradedHealthyHost degraded_healthy_host = 8;\n\n    // A degraded host returned to being healthy.\n    NoLongerDegradedHost no_longer_degraded_host = 9;\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 6;\n}\n\nmessage HealthCheckEjectUnhealthy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckEjectUnhealthy\";\n\n  // The type of failure that caused this ejection.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\nmessage HealthCheckAddHealthy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckAddHealthy\";\n\n  // Whether this addition is the result of the first ever health check on a host, in which case\n  // the configured :ref:`healthy threshold <envoy_api_field_config.core.v3.HealthCheck.healthy_threshold>`\n  // is bypassed and the host is immediately added.\n  bool first_check = 1;\n}\n\nmessage HealthCheckFailure {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckFailure\";\n\n  // The type of failure that caused this event.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Whether this event is the result of the first ever health check on a host.\n  bool first_check = 2;\n}\n\nmessage DegradedHealthyHost {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.DegradedHealthyHost\";\n}\n\nmessage NoLongerDegradedHost {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.NoLongerDegradedHost\";\n}\n"
  },
  {
    "path": "api/envoy/data/dns/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/dns/v2alpha/dns_table.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.dns.v2alpha;\n\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.dns.v2alpha\";\noption java_outer_classname = \"DnsTableProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: DNS Filter Table Data]\n// :ref:`DNS Filter config overview <config_udp_listener_filters_dns_filter>`.\n\n// This message contains the configuration for the DNS Filter if populated\n// from the control plane\nmessage DnsTable {\n  // This message contains a list of IP addresses returned for a query for a known name\n  message AddressList {\n    // This field contains a well formed IP address that is returned\n    // in the answer for a name query. The address field can be an\n    // IPv4 or IPv6 address. Address family detection is done automatically\n    // when Envoy parses the string. Since this field is repeated,\n    // Envoy will return one randomly chosen entry from this list in the\n    // DNS response. The random index will vary per query so that we prevent\n    // clients pinning on a single address for a configured domain\n    repeated string address = 1 [(validate.rules).repeated = {\n      min_items: 1\n      items {string {min_len: 3}}\n    }];\n  }\n\n  // This message type is extensible and can contain a list of addresses\n  // or dictate some other method for resolving the addresses for an\n  // endpoint\n  message DnsEndpoint {\n    oneof endpoint_config {\n      option (validate.required) = true;\n\n      AddressList address_list = 1;\n    }\n  }\n\n  message DnsVirtualDomain {\n    // The domain name for which Envoy will respond to query requests\n    string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The configuration containing the method to determine the address\n    // of this endpoint\n    DnsEndpoint endpoint = 2;\n\n    // Sets the TTL in dns answers from Envoy returned to the client\n    google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Control how many times envoy makes an attempt to forward a query to\n  // an external server\n  uint32 external_retry_count = 1;\n\n  // Fully qualified domain names for which Envoy will respond to queries\n  repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // This field serves to help Envoy determine whether it can authoritatively\n  // answer a query for a name matching a suffix in this list. If the query\n  // name does not match a suffix in this list, Envoy will forward\n  // the query to an upstream DNS server\n  repeated type.matcher.StringMatcher known_suffixes = 3;\n}\n"
  },
  {
    "path": "api/envoy/data/dns/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/data/dns/v2alpha:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/dns/v3/dns_table.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.dns.v3;\n\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.dns.v3\";\noption java_outer_classname = \"DnsTableProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: DNS Filter Table Data]\n// :ref:`DNS Filter config overview <config_udp_listener_filters_dns_filter>`.\n\n// This message contains the configuration for the DNS Filter if populated\n// from the control plane\nmessage DnsTable {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.dns.v2alpha.DnsTable\";\n\n  // This message contains a list of IP addresses returned for a query for a known name\n  message AddressList {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v2alpha.DnsTable.AddressList\";\n\n    // This field contains a well formed IP address that is returned in the answer for a\n    // name query. The address field can be an IPv4 or IPv6 address. Address family\n    // detection is done automatically when Envoy parses the string. Since this field is\n    // repeated, Envoy will return as many entries from this list in the DNS response while\n    // keeping the response under 512 bytes\n    repeated string address = 1 [(validate.rules).repeated = {\n      min_items: 1\n      items {string {min_len: 3}}\n    }];\n  }\n\n  // Specify the service protocol using a numeric or string value\n  message DnsServiceProtocol {\n    oneof protocol_config {\n      option (validate.required) = true;\n\n      // Specify the protocol number for the service. Envoy will try to resolve the number to\n      // the protocol name. For example, 6 will resolve to \"tcp\". Refer to:\n      // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml\n      // for protocol names and numbers\n      uint32 number = 1 [(validate.rules).uint32 = {lt: 255}];\n\n      // Specify the protocol name for the service.\n      string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n  }\n\n  // Specify the target for a given DNS service\n  // [#next-free-field: 6]\n  message DnsServiceTarget {\n    // Specify the name of the endpoint for the Service. The name is a hostname or a cluster\n    oneof endpoint_type {\n      option (validate.required) = true;\n\n      // Use a resolvable hostname as the endpoint for a service.\n      string host_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n      // Use a cluster name as the endpoint for a service.\n      string cluster_name = 2\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n\n    // The priority of the service record target\n    uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The weight of the service record target\n    uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The port to which the service is bound. This value is optional if the target is a\n    // cluster. Setting port to zero in this case makes the filter use the port value\n    // from the cluster host\n    uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}];\n  }\n\n  // This message defines a service selection record returned for a service query in a domain\n  message DnsService {\n    // The name of the service without the protocol or domain name\n    string service_name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The service protocol. This can be specified as a string or the numeric value of the protocol\n    DnsServiceProtocol protocol = 2;\n\n    // The service entry time to live. This is independent from the DNS Answer record TTL\n    google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // The list of targets hosting the service\n    repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Define a list of service records for a given service\n  message DnsServiceList {\n    repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  message DnsEndpoint {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v2alpha.DnsTable.DnsEndpoint\";\n\n    oneof endpoint_config {\n      option (validate.required) = true;\n\n      // Define a list of addresses to return for the specified endpoint\n      AddressList address_list = 1;\n\n      // Define a cluster whose addresses are returned for the specified endpoint\n      string cluster_name = 2;\n\n      // Define a DNS Service List for the specified endpoint\n      DnsServiceList service_list = 3;\n    }\n  }\n\n  message DnsVirtualDomain {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain\";\n\n    // A domain name for which Envoy will respond to query requests\n    string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The configuration containing the method to determine the address of this endpoint\n    DnsEndpoint endpoint = 2;\n\n    // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s\n    google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}];\n  }\n\n  // Control how many times Envoy makes an attempt to forward a query to an external DNS server\n  uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}];\n\n  // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this\n  // list empty, Envoy will forward all queries to external resolvers\n  repeated DnsVirtualDomain virtual_domains = 2;\n\n  // This field serves to help Envoy determine whether it can authoritatively answer a query\n  // for a name matching a suffix in this list. If the query name does not match a suffix in\n  // this list, Envoy will forward the query to an upstream DNS server\n  repeated type.matcher.v3.StringMatcher known_suffixes = 3;\n}\n"
  },
  {
    "path": "api/envoy/data/dns/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/data/dns/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/dns/v4alpha/dns_table.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.dns.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.dns.v4alpha\";\noption java_outer_classname = \"DnsTableProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: DNS Filter Table Data]\n// :ref:`DNS Filter config overview <config_udp_listener_filters_dns_filter>`.\n\n// This message contains the configuration for the DNS Filter if populated\n// from the control plane\nmessage DnsTable {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.dns.v3.DnsTable\";\n\n  // This message contains a list of IP addresses returned for a query for a known name\n  message AddressList {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.AddressList\";\n\n    // This field contains a well formed IP address that is returned in the answer for a\n    // name query. The address field can be an IPv4 or IPv6 address. Address family\n    // detection is done automatically when Envoy parses the string. Since this field is\n    // repeated, Envoy will return as many entries from this list in the DNS response while\n    // keeping the response under 512 bytes\n    repeated string address = 1 [(validate.rules).repeated = {\n      min_items: 1\n      items {string {min_len: 3}}\n    }];\n  }\n\n  // Specify the service protocol using a numeric or string value\n  message DnsServiceProtocol {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsServiceProtocol\";\n\n    oneof protocol_config {\n      option (validate.required) = true;\n\n      // Specify the protocol number for the service. Envoy will try to resolve the number to\n      // the protocol name. For example, 6 will resolve to \"tcp\". Refer to:\n      // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml\n      // for protocol names and numbers\n      uint32 number = 1 [(validate.rules).uint32 = {lt: 255}];\n\n      // Specify the protocol name for the service.\n      string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n  }\n\n  // Specify the target for a given DNS service\n  // [#next-free-field: 6]\n  message DnsServiceTarget {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsServiceTarget\";\n\n    // Specify the name of the endpoint for the Service. The name is a hostname or a cluster\n    oneof endpoint_type {\n      option (validate.required) = true;\n\n      // Use a resolvable hostname as the endpoint for a service.\n      string host_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n      // Use a cluster name as the endpoint for a service.\n      string cluster_name = 2\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n\n    // The priority of the service record target\n    uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The weight of the service record target\n    uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The port to which the service is bound. This value is optional if the target is a\n    // cluster. Setting port to zero in this case makes the filter use the port value\n    // from the cluster host\n    uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}];\n  }\n\n  // This message defines a service selection record returned for a service query in a domain\n  message DnsService {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsService\";\n\n    // The name of the service without the protocol or domain name\n    string service_name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The service protocol. This can be specified as a string or the numeric value of the protocol\n    DnsServiceProtocol protocol = 2;\n\n    // The service entry time to live. This is independent from the DNS Answer record TTL\n    google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // The list of targets hosting the service\n    repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Define a list of service records for a given service\n  message DnsServiceList {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsServiceList\";\n\n    repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  message DnsEndpoint {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsEndpoint\";\n\n    oneof endpoint_config {\n      option (validate.required) = true;\n\n      // Define a list of addresses to return for the specified endpoint\n      AddressList address_list = 1;\n\n      // Define a cluster whose addresses are returned for the specified endpoint\n      string cluster_name = 2;\n\n      // Define a DNS Service List for the specified endpoint\n      DnsServiceList service_list = 3;\n    }\n  }\n\n  message DnsVirtualDomain {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsVirtualDomain\";\n\n    // A domain name for which Envoy will respond to query requests\n    string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The configuration containing the method to determine the address of this endpoint\n    DnsEndpoint endpoint = 2;\n\n    // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s\n    google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}];\n  }\n\n  // Control how many times Envoy makes an attempt to forward a query to an external DNS server\n  uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}];\n\n  // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this\n  // list empty, Envoy will forward all queries to external resolvers\n  repeated DnsVirtualDomain virtual_domains = 2;\n\n  // This field serves to help Envoy determine whether it can authoritatively answer a query\n  // for a name matching a suffix in this list. If the query name does not match a suffix in\n  // this list, Envoy will forward the query to an upstream DNS server\n  repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3;\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/tap/v2alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap common data]\n\n// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received\n// and transmitted data, etc.\nmessage Body {\n  oneof body_type {\n    // Body data as bytes. By default, tap body data will be present in this field, as the proto\n    // `bytes` type can contain any valid byte.\n    bytes as_bytes = 1;\n\n    // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING\n    // <envoy_api_enum_value_service.tap.v2alpha.OutputSink.Format.JSON_BODY_AS_STRING>` sink\n    // format type is selected. See the documentation for that option for why this is useful.\n    string as_string = 2;\n  }\n\n  // Specifies whether body data has been truncated to fit within the specified\n  // :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_rx_bytes>` and\n  // :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_tx_bytes>` settings.\n  bool truncated = 3;\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v2alpha/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/data/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP tap data]\n\n// A fully buffered HTTP trace message.\nmessage HttpBufferedTrace {\n  // HTTP message wrapper.\n  message Message {\n    // Message headers.\n    repeated api.v2.core.HeaderValue headers = 1;\n\n    // Message body.\n    Body body = 2;\n\n    // Message trailers.\n    repeated api.v2.core.HeaderValue trailers = 3;\n  }\n\n  // Request message.\n  Message request = 1;\n\n  // Response message.\n  Message response = 2;\n}\n\n// A streamed HTTP trace segment. Multiple segments make up a full trace.\n// [#next-free-field: 8]\nmessage HttpStreamedTraceSegment {\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Request headers.\n    api.v2.core.HeaderMap request_headers = 2;\n\n    // Request body chunk.\n    Body request_body_chunk = 3;\n\n    // Request trailers.\n    api.v2.core.HeaderMap request_trailers = 4;\n\n    // Response headers.\n    api.v2.core.HeaderMap response_headers = 5;\n\n    // Response body chunk.\n    Body response_body_chunk = 6;\n\n    // Response trailers.\n    api.v2.core.HeaderMap response_trailers = 7;\n  }\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v2alpha/transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/data/tap/v2alpha/common.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"TransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Transport tap data]\n// Trace format for the tap transport socket extension. This dumps plain text read/write\n// sequences on a socket.\n\n// Connection properties.\nmessage Connection {\n  // Local address.\n  api.v2.core.Address local_address = 2;\n\n  // Remote address.\n  api.v2.core.Address remote_address = 3;\n}\n\n// Event in a socket trace.\nmessage SocketEvent {\n  // Data read by Envoy from the transport socket.\n  message Read {\n    // TODO(htuch): Half-close for reads.\n\n    // Binary data read.\n    Body data = 1;\n  }\n\n  // Data written by Envoy to the transport socket.\n  message Write {\n    // Binary data written.\n    Body data = 1;\n\n    // Stream was half closed after this write.\n    bool end_stream = 2;\n  }\n\n  // The connection was closed.\n  message Closed {\n    // TODO(mattklein123): Close event type.\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 1;\n\n  // Read or write with content as bytes string.\n  oneof event_selector {\n    Read read = 2;\n\n    Write write = 3;\n\n    Closed closed = 4;\n  }\n}\n\n// Sequence of read/write events that constitute a buffered trace on a socket.\n// [#next-free-field: 6]\nmessage SocketBufferedTrace {\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  // Connection properties.\n  Connection connection = 2;\n\n  // Sequence of observed events.\n  repeated SocketEvent events = 3;\n\n  // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_rx_bytes>` setting.\n  bool read_truncated = 4;\n\n  // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_tx_bytes>` setting.\n  bool write_truncated = 5;\n}\n\n// A streamed socket trace segment. Multiple segments make up a full trace.\nmessage SocketStreamedTraceSegment {\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Connection properties.\n    Connection connection = 2;\n\n    // Socket event.\n    SocketEvent event = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v2alpha/wrapper.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"envoy/data/tap/v2alpha/http.proto\";\nimport \"envoy/data/tap/v2alpha/transport.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"WrapperProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap data wrappers]\n\n// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for\n// sending traces over gRPC APIs or more easily persisting binary messages to files.\nmessage TraceWrapper {\n  oneof trace {\n    option (validate.required) = true;\n\n    // An HTTP buffered tap trace.\n    HttpBufferedTrace http_buffered_trace = 1;\n\n    // An HTTP streamed tap trace segment.\n    HttpStreamedTraceSegment http_streamed_trace_segment = 2;\n\n    // A socket buffered tap trace.\n    SocketBufferedTrace socket_buffered_trace = 3;\n\n    // A socket streamed tap trace segment.\n    SocketStreamedTraceSegment socket_streamed_trace_segment = 4;\n  }\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/data/tap/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap common data]\n\n// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received\n// and transmitted data, etc.\nmessage Body {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.tap.v2alpha.Body\";\n\n  oneof body_type {\n    // Body data as bytes. By default, tap body data will be present in this field, as the proto\n    // `bytes` type can contain any valid byte.\n    bytes as_bytes = 1;\n\n    // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING\n    // <envoy_api_enum_value_config.tap.v3.OutputSink.Format.JSON_BODY_AS_STRING>` sink\n    // format type is selected. See the documentation for that option for why this is useful.\n    string as_string = 2;\n  }\n\n  // Specifies whether body data has been truncated to fit within the specified\n  // :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_rx_bytes>` and\n  // :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_tx_bytes>` settings.\n  bool truncated = 3;\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v3/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP tap data]\n\n// A fully buffered HTTP trace message.\nmessage HttpBufferedTrace {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.HttpBufferedTrace\";\n\n  // HTTP message wrapper.\n  message Message {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.HttpBufferedTrace.Message\";\n\n    // Message headers.\n    repeated config.core.v3.HeaderValue headers = 1;\n\n    // Message body.\n    Body body = 2;\n\n    // Message trailers.\n    repeated config.core.v3.HeaderValue trailers = 3;\n  }\n\n  // Request message.\n  Message request = 1;\n\n  // Response message.\n  Message response = 2;\n}\n\n// A streamed HTTP trace segment. Multiple segments make up a full trace.\n// [#next-free-field: 8]\nmessage HttpStreamedTraceSegment {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.HttpStreamedTraceSegment\";\n\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Request headers.\n    config.core.v3.HeaderMap request_headers = 2;\n\n    // Request body chunk.\n    Body request_body_chunk = 3;\n\n    // Request trailers.\n    config.core.v3.HeaderMap request_trailers = 4;\n\n    // Response headers.\n    config.core.v3.HeaderMap response_headers = 5;\n\n    // Response body chunk.\n    Body response_body_chunk = 6;\n\n    // Response trailers.\n    config.core.v3.HeaderMap response_trailers = 7;\n  }\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v3/transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/data/tap/v3/common.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"TransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Transport tap data]\n// Trace format for the tap transport socket extension. This dumps plain text read/write\n// sequences on a socket.\n\n// Connection properties.\nmessage Connection {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.tap.v2alpha.Connection\";\n\n  // Local address.\n  config.core.v3.Address local_address = 2;\n\n  // Remote address.\n  config.core.v3.Address remote_address = 3;\n}\n\n// Event in a socket trace.\nmessage SocketEvent {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.tap.v2alpha.SocketEvent\";\n\n  // Data read by Envoy from the transport socket.\n  message Read {\n    // TODO(htuch): Half-close for reads.\n\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.SocketEvent.Read\";\n\n    // Binary data read.\n    Body data = 1;\n  }\n\n  // Data written by Envoy to the transport socket.\n  message Write {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.SocketEvent.Write\";\n\n    // Binary data written.\n    Body data = 1;\n\n    // Stream was half closed after this write.\n    bool end_stream = 2;\n  }\n\n  // The connection was closed.\n  message Closed {\n    // TODO(mattklein123): Close event type.\n\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.SocketEvent.Closed\";\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 1;\n\n  // Read or write with content as bytes string.\n  oneof event_selector {\n    Read read = 2;\n\n    Write write = 3;\n\n    Closed closed = 4;\n  }\n}\n\n// Sequence of read/write events that constitute a buffered trace on a socket.\n// [#next-free-field: 6]\nmessage SocketBufferedTrace {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.SocketBufferedTrace\";\n\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  // Connection properties.\n  Connection connection = 2;\n\n  // Sequence of observed events.\n  repeated SocketEvent events = 3;\n\n  // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_rx_bytes>` setting.\n  bool read_truncated = 4;\n\n  // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_tx_bytes>` setting.\n  bool write_truncated = 5;\n}\n\n// A streamed socket trace segment. Multiple segments make up a full trace.\nmessage SocketStreamedTraceSegment {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.SocketStreamedTraceSegment\";\n\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Connection properties.\n    Connection connection = 2;\n\n    // Socket event.\n    SocketEvent event = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/data/tap/v3/wrapper.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"envoy/data/tap/v3/http.proto\";\nimport \"envoy/data/tap/v3/transport.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"WrapperProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap data wrappers]\n\n// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for\n// sending traces over gRPC APIs or more easily persisting binary messages to files.\nmessage TraceWrapper {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.TraceWrapper\";\n\n  oneof trace {\n    option (validate.required) = true;\n\n    // An HTTP buffered tap trace.\n    HttpBufferedTrace http_buffered_trace = 1;\n\n    // An HTTP streamed tap trace segment.\n    HttpStreamedTraceSegment http_streamed_trace_segment = 2;\n\n    // A socket buffered tap trace.\n    SocketBufferedTrace socket_buffered_trace = 3;\n\n    // A socket streamed tap trace segment.\n    SocketStreamedTraceSegment socket_streamed_trace_segment = 4;\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/file/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v2:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/file/v3/file.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.file.v3;\n\nimport \"envoy/config/core/v3/substitution_format_string.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.file.v3\";\noption java_outer_classname = \"FileProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: File access log]\n// [#extension: envoy.access_loggers.file]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.accesslog.v3.AccessLog>`\n// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file*\n// AccessLog.\n// [#next-free-field: 6]\nmessage FileAccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.FileAccessLog\";\n\n  // A path to a local file to which to write the access log entries.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof access_log_format {\n    // Access log :ref:`format string<config_access_log_format_strings>`.\n    // Envoy supports :ref:`custom access log formats <config_access_log_format>` as well as a\n    // :ref:`default format <config_access_log_default_format>`.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    string format = 2 [deprecated = true];\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. All values\n    // are rendered as strings.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    google.protobuf.Struct json_format = 3 [deprecated = true];\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. Values are\n    // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may\n    // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the\n    // documentation for a specific command operator for details.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    google.protobuf.Struct typed_json_format = 4 [deprecated = true];\n\n    // Configuration to form access log data and format.\n    // If not specified, use :ref:`default format <config_access_log_default_format>`.\n    config.core.v3.SubstitutionFormatString log_format = 5\n        [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/file/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/access_loggers/file/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/file/v4alpha/file.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.file.v4alpha;\n\nimport \"envoy/config/core/v4alpha/substitution_format_string.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha\";\noption java_outer_classname = \"FileProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: File access log]\n// [#extension: envoy.access_loggers.file]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.accesslog.v4alpha.AccessLog>`\n// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file*\n// AccessLog.\n// [#next-free-field: 6]\nmessage FileAccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.access_loggers.file.v3.FileAccessLog\";\n\n  reserved 2, 3, 4;\n\n  reserved \"format\", \"json_format\", \"typed_json_format\";\n\n  // A path to a local file to which to write the access log entries.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof access_log_format {\n    // Configuration to form access log data and format.\n    // If not specified, use :ref:`default format <config_access_log_default_format>`.\n    config.core.v4alpha.SubstitutionFormatString log_format = 5\n        [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/grpc/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v2:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/grpc/v3/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.grpc.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.grpc.v3\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Configuration for the built-in *envoy.access_loggers.http_grpc*\n// :ref:`AccessLog <envoy_api_msg_config.accesslog.v3.AccessLog>`. This configuration will\n// populate :ref:`StreamAccessLogsMessage.http_logs\n// <envoy_api_field_service.accesslog.v3.StreamAccessLogsMessage.http_logs>`.\n// [#extension: envoy.access_loggers.http_grpc]\nmessage HttpGrpcAccessLogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.HttpGrpcAccessLogConfig\";\n\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n\n  // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers\n  // <envoy_api_field_data.accesslog.v3.HTTPRequestProperties.request_headers>`.\n  repeated string additional_request_headers_to_log = 2;\n\n  // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers\n  // <envoy_api_field_data.accesslog.v3.HTTPResponseProperties.response_headers>`.\n  repeated string additional_response_headers_to_log = 3;\n\n  // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers\n  // <envoy_api_field_data.accesslog.v3.HTTPResponseProperties.response_trailers>`.\n  repeated string additional_response_trailers_to_log = 4;\n}\n\n// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will\n// populate *StreamAccessLogsMessage.tcp_logs*.\n// [#extension: envoy.access_loggers.tcp_grpc]\nmessage TcpGrpcAccessLogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.TcpGrpcAccessLogConfig\";\n\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n}\n\n// Common configuration for gRPC access logs.\n// [#next-free-field: 7]\nmessage CommonGrpcAccessLogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.CommonGrpcAccessLogConfig\";\n\n  // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier\n  // <envoy_api_msg_service.accesslog.v3.StreamAccessLogsMessage.Identifier>`. This allows the\n  // access log server to differentiate between different access logs coming from the same Envoy.\n  string log_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The gRPC service for the access log service.\n  config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n\n  // API version for access logs service transport protocol. This describes the access logs service\n  // gRPC endpoint and version of messages used on the wire.\n  config.core.v3.ApiVersion transport_api_version = 6\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time\n  // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to\n  // 1 second.\n  google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}];\n\n  // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until\n  // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it\n  // to zero effectively disables the batching. Defaults to 16384.\n  google.protobuf.UInt32Value buffer_size_bytes = 4;\n\n  // Additional filter state objects to log in :ref:`filter_state_objects\n  // <envoy_api_field_data.accesslog.v3.AccessLogCommon.filter_state_objects>`.\n  // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object.\n  repeated string filter_state_objects_to_log = 5;\n}\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/access_loggers/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm access log]\n// [#extension: envoy.access_loggers.wasm]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.accesslog.v3.AccessLog>`\n// that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm*\n// AccessLog.\nmessage WasmAccessLog {\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/clusters/aggregate/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/aggregate/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/clusters/aggregate/v3/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.clusters.aggregate.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.clusters.aggregate.v3\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Aggregate cluster configuration]\n\n// Configuration for the aggregate cluster. See the :ref:`architecture overview\n// <arch_overview_aggregate_cluster>` for more information.\n// [#extension: envoy.clusters.aggregate]\nmessage ClusterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.aggregate.v2alpha.ClusterConfig\";\n\n  // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they\n  // appear in this list.\n  repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.clusters.dynamic_forward_proxy.v3;\n\nimport \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v3\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamic forward proxy cluster configuration]\n\n// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.clusters.dynamic_forward_proxy]\nmessage ClusterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig\";\n\n  // The DNS cache configuration that the cluster will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy HTTP filter configuration\n  // <envoy_api_field_extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options\n  // in the :ref:`cluster's upstream_http_protocol_options\n  // <envoy_api_field_config.cluster.v3.Cluster.upstream_http_protocol_options>`\n  bool allow_insecure_cluster_options = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/clusters/redis/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/redis:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/clusters/redis/v3/redis_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.clusters.redis.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.clusters.redis.v3\";\noption java_outer_classname = \"RedisClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Redis Cluster Configuration]\n// This cluster adds support for `Redis Cluster <https://redis.io/topics/cluster-spec>`_, as part\n// of :ref:`Envoy's support for Redis Cluster <arch_overview_redis>`.\n//\n// Redis Cluster is an extension of Redis which supports sharding and high availability (where a\n// shard that loses its primary fails over to a replica, and designates it as the new primary).\n// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client\n// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the\n// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS\n// command <https://redis.io/commands/cluster-slots>`_. This result is then stored locally, and\n// updated at user-configured intervals.\n//\n// Additionally, if\n// :ref:`enable_redirection<envoy_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.ConnPoolSettings.enable_redirection>`\n// is true, then moved and ask redirection errors from upstream servers will trigger a topology\n// refresh when they exceed a user-configured error threshold.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     name: name\n//     connect_timeout: 0.25s\n//     dns_lookup_family: V4_ONLY\n//     hosts:\n//     - socket_address:\n//       address: foo.bar.com\n//       port_value: 22120\n//     cluster_type:\n//     name: envoy.clusters.redis\n//     typed_config:\n//       \"@type\": type.googleapis.com/google.protobuf.Struct\n//       value:\n//         cluster_refresh_rate: 30s\n//         cluster_refresh_timeout: 0.5s\n//         redirect_refresh_interval: 10s\n//         redirect_refresh_threshold: 10\n// [#extension: envoy.clusters.redis]\n\n// [#next-free-field: 7]\nmessage RedisClusterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.redis.RedisClusterConfig\";\n\n  // Interval between successive topology refresh requests. If not set, this defaults to 5s.\n  google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}];\n\n  // Timeout for topology refresh request. If not set, this defaults to 3s.\n  google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}];\n\n  // The minimum interval that must pass after triggering a topology refresh request before a new\n  // request can possibly be triggered again. Any errors received during one of these\n  // time intervals are ignored. If not set, this defaults to 5s.\n  google.protobuf.Duration redirect_refresh_interval = 3;\n\n  // The number of redirection errors that must be received before\n  // triggering a topology refresh request. If not set, this defaults to 5.\n  // If this is set to 0, topology refresh after redirect is disabled.\n  google.protobuf.UInt32Value redirect_refresh_threshold = 4;\n\n  // The number of failures that must be received before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to failure.\n  uint32 failure_refresh_threshold = 5;\n\n  // The number of hosts became degraded or unhealthy before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to degraded or\n  // unhealthy host.\n  uint32 host_degraded_refresh_threshold = 6;\n}\n"
  },
  {
    "path": "api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.dynamic_forward_proxy.v3;\n\nimport \"envoy/config/cluster/v3/cluster.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v3\";\noption java_outer_classname = \"DnsCacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamic forward proxy common configuration]\n\n// Configuration of circuit breakers for resolver.\nmessage DnsCacheCircuitBreakers {\n  // The maximum number of pending requests that Envoy will allow to the\n  // resolver. If not specified, the default is 1024.\n  google.protobuf.UInt32Value max_pending_requests = 1;\n}\n\n// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#next-free-field: 9]\nmessage DnsCacheConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig\";\n\n  // The name of the cache. Multiple named caches allow independent dynamic forward proxy\n  // configurations to operate within a single Envoy process using different configurations. All\n  // configurations with the same name *must* otherwise have the same settings when referenced\n  // from different configuration components. Configuration will fail to load if this is not\n  // the case.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The DNS lookup family to use during resolution.\n  //\n  // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 \"happy eyeballs\" mode. The\n  // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and\n  // then configures a host to have a primary and fall back address. With this, we could very\n  // likely build a \"happy eyeballs\" connection pool which would race the primary / fall back\n  // address and return the one that wins. This same method could potentially also be used for\n  // QUIC to TCP fall back.]\n  config.cluster.v3.Cluster.DnsLookupFamily dns_lookup_family = 2\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s.\n  //\n  // .. note:\n  //\n  //  The returned DNS TTL is not currently used to alter the refresh rate. This feature will be\n  //  added in a future change.\n  //\n  // .. note:\n  //\n  // The refresh rate is rounded to the closest millisecond, and must be at least 1ms.\n  google.protobuf.Duration dns_refresh_rate = 3\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n\n  // The TTL for hosts that are unused. Hosts that have not been used in the configured time\n  // interval will be purged. If not specified defaults to 5m.\n  //\n  // .. note:\n  //\n  //   The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This\n  //   means that if the configured TTL is shorter than the refresh rate the host may not be removed\n  //   immediately.\n  //\n  //  .. note:\n  //\n  //   The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage.\n  google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}];\n\n  // The maximum number of hosts that the cache will hold. If not specified defaults to 1024.\n  //\n  // .. note:\n  //\n  //   The implementation is approximate and enforced independently on each worker thread, thus\n  //   it is possible for the maximum hosts in the cache to go slightly above the configured\n  //   value depending on timing. This is similar to how other circuit breakers work.\n  google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}];\n\n  // If the DNS failure refresh rate is specified,\n  // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the dns_refresh_rate.\n  config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6;\n\n  // The config of circuit breakers for resolver. It provides a configurable threshold.\n  // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled,\n  // envoy will use dns cache circuit breakers with default settings even if this value is not set.\n  DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 8;\n}\n"
  },
  {
    "path": "api/envoy/extensions/common/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/common/ratelimit/v3/ratelimit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.ratelimit.v3;\n\nimport \"envoy/type/v3/ratelimit_unit.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.ratelimit.v3\";\noption java_outer_classname = \"RatelimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common rate limit components]\n\n// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to\n// determine the final rate limit key and overall allowed limit. Here are some examples of how\n// they might be used for the domain \"envoy\".\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The\n// configuration supplies a default limit for the *remote_address* key. If there is a desire to\n// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the\n// configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if\n// configured that way in the service).\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits unauthenticated traffic to a specific path for a specific IP address.\n// Like (1) we can raise/block specific IP addresses if we want with an override configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"]\n//\n// What it does: Limits all traffic for an authenticated client \"foo\"\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits traffic to a specific path for an authenticated client \"foo\"\n//\n// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired.\n// This enables building complex application scenarios with a generic backend.\n//\n// Optionally the descriptor can contain a limit override under a \"limit\" key, that specifies\n// the number of requests per unit to use instead of the number configured in the\n// rate limiting service.\nmessage RateLimitDescriptor {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.ratelimit.RateLimitDescriptor\";\n\n  message Entry {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.ratelimit.RateLimitDescriptor.Entry\";\n\n    // Descriptor key.\n    string key = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Descriptor value.\n    string value = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Override rate limit to apply to this descriptor instead of the limit\n  // configured in the rate limit service. See :ref:`rate limit override\n  // <config_http_filters_rate_limit_rate_limit_override>` for more information.\n  message RateLimitOverride {\n    // The number of requests per unit of time.\n    uint32 requests_per_unit = 1;\n\n    // The unit of time.\n    type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // Descriptor entries.\n  repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Optional rate limit override to supply to the ratelimit service.\n  RateLimitOverride limit = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/common/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/common/tap/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.tap.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/tap/v3/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.tap.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common tap extension configuration]\n\n// Common configuration for all tap extensions.\nmessage CommonExtensionConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.tap.v2alpha.CommonExtensionConfig\";\n\n  // [#not-implemented-hide:]\n  message TapDSConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.common.tap.v2alpha.CommonExtensionConfig.TapDSConfig\";\n\n    // Configuration for the source of TapDS updates for this Cluster.\n    config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n    // Tap config to request from XDS server.\n    string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // Resource locator for TAP. This is mutually exclusive to *name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator tap_resource_locator = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n  }\n\n  oneof config_type {\n    option (validate.required) = true;\n\n    // If specified, the tap filter will be configured via an admin handler.\n    AdminConfig admin_config = 1;\n\n    // If specified, the tap filter will be configured via a static configuration that cannot be\n    // changed.\n    config.tap.v3.TapConfig static_config = 2;\n\n    // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter.\n    TapDSConfig tapds_config = 3;\n  }\n}\n\n// Configuration for the admin handler. See :ref:`here <config_http_filters_tap_admin_handler>` for\n// more information.\nmessage AdminConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.tap.v2alpha.AdminConfig\";\n\n  // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is\n  // matched to the configured filter opaque ID to determine which filter to configure.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/common/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/tap/v4alpha:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/common/tap/v4alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.tap.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/tap/v4alpha/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.tap.v4alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common tap extension configuration]\n\n// Common configuration for all tap extensions.\nmessage CommonExtensionConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.common.tap.v3.CommonExtensionConfig\";\n\n  // [#not-implemented-hide:]\n  message TapDSConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.common.tap.v3.CommonExtensionConfig.TapDSConfig\";\n\n    // Configuration for the source of TapDS updates for this Cluster.\n    config.core.v4alpha.ConfigSource config_source = 1\n        [(validate.rules).message = {required: true}];\n\n    oneof name_specifier {\n      // Tap config to request from XDS server.\n      string name = 2;\n\n      // Resource locator for TAP. This is mutually exclusive to *name*.\n      // [#not-implemented-hide:]\n      udpa.core.v1.ResourceLocator tap_resource_locator = 3;\n    }\n  }\n\n  oneof config_type {\n    option (validate.required) = true;\n\n    // If specified, the tap filter will be configured via an admin handler.\n    AdminConfig admin_config = 1;\n\n    // If specified, the tap filter will be configured via a static configuration that cannot be\n    // changed.\n    config.tap.v4alpha.TapConfig static_config = 2;\n\n    // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter.\n    TapDSConfig tapds_config = 3;\n  }\n}\n\n// Configuration for the admin handler. See :ref:`here <config_http_filters_tap_admin_handler>` for\n// more information.\nmessage AdminConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.common.tap.v3.AdminConfig\";\n\n  // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is\n  // matched to the configured filter opaque ID to determine which filter to configure.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/compression/gzip/compressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.compression.gzip.compressor.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Gzip Compressor]\n// [#extension: envoy.compression.gzip.compressor]\n\n// [#next-free-field: 6]\nmessage Gzip {\n  // All the values of this enumeration translate directly to zlib's compression strategies.\n  // For more information about each strategy, please refer to zlib manual.\n  enum CompressionStrategy {\n    DEFAULT_STRATEGY = 0;\n    FILTERED = 1;\n    HUFFMAN_ONLY = 2;\n    RLE = 3;\n    FIXED = 4;\n  }\n\n  enum CompressionLevel {\n    option allow_alias = true;\n\n    DEFAULT_COMPRESSION = 0;\n    BEST_SPEED = 1;\n    COMPRESSION_LEVEL_1 = 1;\n    COMPRESSION_LEVEL_2 = 2;\n    COMPRESSION_LEVEL_3 = 3;\n    COMPRESSION_LEVEL_4 = 4;\n    COMPRESSION_LEVEL_5 = 5;\n    COMPRESSION_LEVEL_6 = 6;\n    COMPRESSION_LEVEL_7 = 7;\n    COMPRESSION_LEVEL_8 = 8;\n    COMPRESSION_LEVEL_9 = 9;\n    BEST_COMPRESSION = 9;\n  }\n\n  // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values\n  // use more memory, but are faster and produce better compression results. The default value is 5.\n  google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}];\n\n  // A value used for selecting the zlib compression level. This setting will affect speed and\n  // amount of compression applied to the content. \"BEST_COMPRESSION\" provides higher compression\n  // at the cost of higher latency and is equal to \"COMPRESSION_LEVEL_9\". \"BEST_SPEED\" provides\n  // lower compression with minimum impact on response time, the same as \"COMPRESSION_LEVEL_1\".\n  // \"DEFAULT_COMPRESSION\" provides an optimal result between speed and compression. According\n  // to zlib's manual this level gives the same result as \"COMPRESSION_LEVEL_6\".\n  // This field will be set to \"DEFAULT_COMPRESSION\" if not specified.\n  CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // A value used for selecting the zlib compression strategy which is directly related to the\n  // characteristics of the content. Most of the time \"DEFAULT_STRATEGY\" will be the best choice,\n  // which is also the default value for the parameter, though there are situations when\n  // changing this parameter might produce better results. For example, run-length encoding (RLE)\n  // is typically used when the content is known for having sequences which same data occurs many\n  // consecutive times. For more information about each strategy, please refer to zlib manual.\n  CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size.\n  // Larger window results in better compression at the expense of memory usage. The default is 12\n  // which will produce a 4096 bytes window. For more details about this parameter, please refer to\n  // zlib manual > deflateInit2.\n  google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Value for Zlib's next output buffer. If not set, defaults to 4096.\n  // See https://www.zlib.net/manual.html for more details. Also see\n  // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance.\n  google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/compression/gzip/decompressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.compression.gzip.decompressor.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Gzip Decompressor]\n// [#extension: envoy.compression.gzip.decompressor]\n\nmessage Gzip {\n  // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size.\n  // The decompression window size needs to be equal or larger than the compression window size.\n  // The default is 12 to match the default in the\n  // :ref:`gzip compressor <envoy_api_field_extensions.compression.gzip.compressor.v3.Gzip.window_bits>`.\n  // For more details about this parameter, please refer to `zlib manual <https://www.zlib.net/manual.html>`_ > inflateInit2.\n  google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Value for zlib's decompressor output buffer. If not set, defaults to 4096.\n  // See https://www.zlib.net/manual.html for more details.\n  google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/common/fault/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/common/fault/v3/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.common.fault.v3;\n\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.common.fault.v3\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common fault injection types]\n\n// Delay specification is used to inject latency into the\n// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections.\n// [#next-free-field: 6]\nmessage FaultDelay {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.fault.v2.FaultDelay\";\n\n  enum FaultDelayType {\n    // Unused and deprecated.\n    FIXED = 0;\n  }\n\n  // Fault delays are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderDelay {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.fault.v2.FaultDelay.HeaderDelay\";\n  }\n\n  reserved 2, 1;\n\n  reserved \"type\";\n\n  oneof fault_delay_secifier {\n    option (validate.required) = true;\n\n    // Add a fixed delay before forwarding the operation upstream. See\n    // https://developers.google.com/protocol-buffers/docs/proto3#json for\n    // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified\n    // delay will be injected before a new request/operation. For TCP\n    // connections, the proxying of the connection upstream will be delayed\n    // for the specified period. This is required if type is FIXED.\n    google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}];\n\n    // Fault delays are controlled via an HTTP header (if applicable).\n    HeaderDelay header_delay = 5;\n  }\n\n  // The percentage of operations/connections/requests on which the delay will be injected.\n  type.v3.FractionalPercent percentage = 4;\n}\n\n// Describes a rate limit to be applied.\nmessage FaultRateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.fault.v2.FaultRateLimit\";\n\n  // Describes a fixed/constant rate limit.\n  message FixedLimit {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.fault.v2.FaultRateLimit.FixedLimit\";\n\n    // The limit supplied in KiB/s.\n    uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // Rate limits are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderLimit {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit\";\n  }\n\n  oneof limit_type {\n    option (validate.required) = true;\n\n    // A fixed rate limit.\n    FixedLimit fixed_limit = 1;\n\n    // Rate limits are controlled via an HTTP header (if applicable).\n    HeaderLimit header_limit = 3;\n  }\n\n  // The percentage of operations/connections/requests on which the rate limit will be injected.\n  type.v3.FractionalPercent percentage = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.adaptive_concurrency.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.adaptive_concurrency.v3\";\noption java_outer_classname = \"AdaptiveConcurrencyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Adaptive Concurrency]\n// Adaptive Concurrency Control :ref:`configuration overview\n// <config_http_filters_adaptive_concurrency>`.\n// [#extension: envoy.filters.http.adaptive_concurrency]\n\n// Configuration parameters for the gradient controller.\nmessage GradientControllerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig\";\n\n  // Parameters controlling the periodic recalculation of the concurrency limit from sampled request\n  // latencies.\n  message ConcurrencyLimitCalculationParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig.\"\n        \"ConcurrencyLimitCalculationParams\";\n\n    // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000.\n    google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // The period of time samples are taken to recalculate the concurrency limit.\n    google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n  }\n\n  // Parameters controlling the periodic minRTT recalculation.\n  // [#next-free-field: 6]\n  message MinimumRTTCalculationParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig.\"\n        \"MinimumRTTCalculationParams\";\n\n    // The time interval between recalculating the minimum request round-trip time. Has to be\n    // positive.\n    google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n      required: true\n      gte {nanos: 1000000}\n    }];\n\n    // The number of requests to aggregate/sample during the minRTT recalculation window before\n    // updating. Defaults to 50.\n    google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // Randomized time delta that will be introduced to the start of the minRTT calculation window.\n    // This is represented as a percentage of the interval duration. Defaults to 15%.\n    //\n    // Example: If the interval is 10s and the jitter is 15%, the next window will begin\n    // somewhere in the range (10s - 11.5s).\n    type.v3.Percent jitter = 3;\n\n    // The concurrency limit set while measuring the minRTT. Defaults to 3.\n    google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}];\n\n    // Amount added to the measured minRTT to add stability to the concurrency limit during natural\n    // variability in latency. This is expressed as a percentage of the measured value and can be\n    // adjusted to allow more or less tolerance to the sampled latency values.\n    //\n    // Defaults to 25%.\n    type.v3.Percent buffer = 5;\n  }\n\n  // The percentile to use when summarizing aggregated samples. Defaults to p50.\n  type.v3.Percent sample_aggregate_percentile = 1;\n\n  ConcurrencyLimitCalculationParams concurrency_limit_params = 2\n      [(validate.rules).message = {required: true}];\n\n  MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}];\n}\n\nmessage AdaptiveConcurrency {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency\";\n\n  oneof concurrency_controller_config {\n    option (validate.required) = true;\n\n    // Gradient concurrency control will be used.\n    GradientControllerConfig gradient_controller_config = 1\n        [(validate.rules).message = {required: true}];\n  }\n\n  // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the\n  // message is unspecified, the filter will be enabled.\n  config.core.v3.RuntimeFeatureFlag enabled = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.admission_control.v3alpha;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha\";\noption java_outer_classname = \"AdmissionControlProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Admission Control]\n// [#extension: envoy.filters.http.admission_control]\n\n// [#next-free-field: 6]\nmessage AdmissionControl {\n  // Default method of specifying what constitutes a successful request. All status codes that\n  // indicate a successful request must be explicitly specified if not relying on the default\n  // values.\n  message SuccessCriteria {\n    message HttpCriteria {\n      // Status code ranges that constitute a successful request. Configurable codes are in the\n      // range [100, 600).\n      repeated type.v3.Int32Range http_success_status = 1\n          [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    message GrpcCriteria {\n      // Status codes that constitute a successful request.\n      // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.\n      repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful\n    // responses.\n    //\n    // .. note::\n    //\n    //    The default HTTP codes considered successful by the admission controller are done so due\n    //    to the unlikelihood that sending fewer requests would change their behavior (for example:\n    //    redirects, unauthorized access, or bad requests won't be alleviated by sending less\n    //    traffic).\n    HttpCriteria http_criteria = 1;\n\n    // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok,\n    // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated,\n    // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented.\n    //\n    // .. note::\n    //\n    //    The default gRPC codes that are considered successful by the admission controller are\n    //    chosen because of the unlikelihood that sending fewer requests will change the behavior.\n    GrpcCriteria grpc_criteria = 2;\n  }\n\n  // If set to false, the admission control filter will operate as a pass-through filter. If the\n  // message is unspecified, the filter will be enabled.\n  config.core.v3.RuntimeFeatureFlag enabled = 1;\n\n  // Defines how a request is considered a success/failure.\n  oneof evaluation_criteria {\n    option (validate.required) = true;\n\n    SuccessCriteria success_criteria = 2;\n  }\n\n  // The sliding time window over which the success rate is calculated. The window is rounded to the\n  // nearest second. Defaults to 30s.\n  google.protobuf.Duration sampling_window = 3;\n\n  // Rejection probability is defined by the formula::\n  //\n  //     max(0, (rq_count -  rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression)\n  //\n  // The aggression dictates how heavily the admission controller will throttle requests upon SR\n  // dropping at or below the threshold. A value of 1 will result in a linear increase in\n  // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the\n  // message is unspecified, the aggression is 1.0. See `the admission control documentation\n  // <https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/admission_control_filter.html>`_\n  // for a diagram illustrating this.\n  config.core.v3.RuntimeDouble aggression = 4;\n\n  // Dictates the success rate at which the rejection probability is non-zero. As success rate drops\n  // below this threshold, rejection probability will increase. Any success rate above the threshold\n  // results in a rejection probability of 0. Defaults to 95%.\n  config.core.v3.RuntimePercent sr_threshold = 5;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/aws_lambda/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/aws_lambda/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.aws_lambda.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.aws_lambda.v3\";\noption java_outer_classname = \"AwsLambdaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: AWS Lambda]\n// AWS Lambda :ref:`configuration overview <config_http_filters_aws_lambda>`.\n// [#extension: envoy.filters.http.aws_lambda]\n\n// AWS Lambda filter config\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.aws_lambda.v2alpha.Config\";\n\n  enum InvocationMode {\n    // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In\n    // this mode the output of the Lambda function becomes the response of the HTTP request.\n    SYNCHRONOUS = 0;\n\n    // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be\n    // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the\n    // call which is translated to an HTTP 200 OK by the filter.\n    ASYNCHRONOUS = 1;\n  }\n\n  // The ARN of the AWS Lambda to invoke when the filter is engaged\n  // Must be in the following format:\n  // arn:<partition>:lambda:<region>:<account-number>:function:<function-name>\n  string arn = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether to transform the request (headers and body) to a JSON payload or pass it as is.\n  bool payload_passthrough = 2;\n\n  // Determines the way to invoke the Lambda function.\n  InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different\n// version of the same Lambda depending on the route.\nmessage PerRouteConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.aws_lambda.v2alpha.PerRouteConfig\";\n\n  Config invoke_config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/aws_request_signing/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/aws_request_signing/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.aws_request_signing.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.aws_request_signing.v3\";\noption java_outer_classname = \"AwsRequestSigningProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: AwsRequestSigning]\n// AwsRequestSigning :ref:`configuration overview <config_http_filters_aws_request_signing>`.\n// [#extension: envoy.filters.http.aws_request_signing]\n\n// Top level configuration for the AWS request signing filter.\nmessage AwsRequestSigning {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning\";\n\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the HTTP endpoint.\n  //\n  // Example: s3\n  string service_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the HTTP\n  // endpoint.\n  //\n  // Example: us-west-2\n  string region = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Indicates that before signing headers, the host header will be swapped with\n  // this value. If not set or empty, the original host header value\n  // will be used and no rewrite will happen.\n  //\n  // Note: this rewrite affects both signing and host header forwarding. However, this\n  // option shouldn't be used with\n  // :ref:`HCM host rewrite <envoy_api_field_config.route.v3.RouteAction.host_rewrite_literal>` given that the\n  // value set here would be used for signing whereas the value set in the HCM would be used\n  // for host header forwarding which is not the desired outcome.\n  string host_rewrite = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/buffer/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/buffer/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/buffer/v3/buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.buffer.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.buffer.v3\";\noption java_outer_classname = \"BufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Buffer]\n// Buffer :ref:`configuration overview <config_http_filters_buffer>`.\n// [#extension: envoy.filters.http.buffer]\n\nmessage Buffer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.buffer.v2.Buffer\";\n\n  reserved 2;\n\n  // The maximum request size that the filter will buffer before the connection\n  // manager will stop buffering and return a 413 response.\n  google.protobuf.UInt32Value max_request_bytes = 1\n      [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}];\n}\n\nmessage BufferPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.buffer.v2.BufferPerRoute\";\n\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the buffer filter for this particular vhost or route.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Override the global configuration of the filter with this new config.\n    Buffer buffer = 2 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cache/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/cache/v2alpha:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cache/v3alpha/cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cache.v3alpha;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha\";\noption java_outer_classname = \"CacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP Cache Filter]\n// [#extension: envoy.filters.http.cache]\n\nmessage CacheConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.cache.v2alpha.CacheConfig\";\n\n  // [#not-implemented-hide:]\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  message KeyCreatorParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.cache.v2alpha.CacheConfig.KeyCreatorParams\";\n\n    // If true, exclude the URL scheme from the cache key. Set to true if your origins always\n    // produce the same response for http and https requests.\n    bool exclude_scheme = 1;\n\n    // If true, exclude the host from the cache key. Set to true if your origins' responses don't\n    // ever depend on host.\n    bool exclude_host = 2;\n\n    // If *query_parameters_included* is nonempty, only query parameters matched\n    // by one or more of its matchers are included in the cache key. Any other\n    // query params will not affect cache lookup.\n    repeated config.route.v3.QueryParameterMatcher query_parameters_included = 3;\n\n    // If *query_parameters_excluded* is nonempty, query parameters matched by one\n    // or more of its matchers are excluded from the cache key (even if also\n    // matched by *query_parameters_included*), and will not affect cache lookup.\n    repeated config.route.v3.QueryParameterMatcher query_parameters_excluded = 4;\n  }\n\n  // Config specific to the cache storage implementation.\n  google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];\n\n  // List of matching rules that defines allowed *Vary* headers.\n  //\n  // The *vary* response header holds a list of header names that affect the\n  // contents of a response, as described by\n  // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.\n  //\n  // During insertion, *allowed_vary_headers* acts as a allowlist: if a\n  // response's *vary* header mentions any header names that aren't matched by any rules in\n  // *allowed_vary_headers*, that response will not be cached.\n  //\n  // During lookup, *allowed_vary_headers* controls what request headers will be\n  // sent to the cache storage implementation.\n  repeated type.matcher.v3.StringMatcher allowed_vary_headers = 2;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement key customization>\n  //\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  KeyCreatorParams key_creator_params = 3;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement size limit>\n  //\n  // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache\n  // storage implementation may have its own limit beyond which it will reject insertions).\n  uint32 max_body_bytes = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cache/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/cache/v3alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cache/v4alpha/cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cache.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha\";\noption java_outer_classname = \"CacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP Cache Filter]\n// [#extension: envoy.filters.http.cache]\n\nmessage CacheConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.cache.v3alpha.CacheConfig\";\n\n  // [#not-implemented-hide:]\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  message KeyCreatorParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams\";\n\n    // If true, exclude the URL scheme from the cache key. Set to true if your origins always\n    // produce the same response for http and https requests.\n    bool exclude_scheme = 1;\n\n    // If true, exclude the host from the cache key. Set to true if your origins' responses don't\n    // ever depend on host.\n    bool exclude_host = 2;\n\n    // If *query_parameters_included* is nonempty, only query parameters matched\n    // by one or more of its matchers are included in the cache key. Any other\n    // query params will not affect cache lookup.\n    repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3;\n\n    // If *query_parameters_excluded* is nonempty, query parameters matched by one\n    // or more of its matchers are excluded from the cache key (even if also\n    // matched by *query_parameters_included*), and will not affect cache lookup.\n    repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4;\n  }\n\n  // Config specific to the cache storage implementation.\n  google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];\n\n  // List of matching rules that defines allowed *Vary* headers.\n  //\n  // The *vary* response header holds a list of header names that affect the\n  // contents of a response, as described by\n  // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.\n  //\n  // During insertion, *allowed_vary_headers* acts as a allowlist: if a\n  // response's *vary* header mentions any header names that aren't matched by any rules in\n  // *allowed_vary_headers*, that response will not be cached.\n  //\n  // During lookup, *allowed_vary_headers* controls what request headers will be\n  // sent to the cache storage implementation.\n  repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement key customization>\n  //\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  KeyCreatorParams key_creator_params = 3;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement size limit>\n  //\n  // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache\n  // storage implementation may have its own limit beyond which it will reject insertions).\n  uint32 max_body_bytes = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cdn_loop.v3alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha\";\noption java_outer_classname = \"CdnLoopProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP CDN-Loop Filter]\n// [#extension: envoy.filters.http.cdn_loop]\n\n// CDN-Loop Header filter config. See the :ref:`configuration overview\n// <config_http_filters_cdn_loop>` for more information.\nmessage CdnLoopConfig {\n  // The CDN identifier to use for loop checks and to append to the\n  // CDN-Loop header.\n  //\n  // RFC 8586 calls this the cdn-id. The cdn-id can either be a\n  // pseudonym or hostname the CDN is in control of.\n  //\n  // cdn_id must not be empty.\n  string cdn_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The maximum allowed count of cdn_id in the downstream CDN-Loop\n  // request header.\n  //\n  // The default of 0 means a request can transit the CdnLoopFilter\n  // once. A value of 1 means that a request can transit the\n  // CdnLoopFilter twice and so on.\n  uint32 max_allowed_occurrences = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/compressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/compressor/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/compressor/v3/compressor.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.compressor.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.compressor.v3\";\noption java_outer_classname = \"CompressorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Compressor]\n// Compressor :ref:`configuration overview <config_http_filters_compressor>`.\n// [#extension: envoy.filters.http.compressor]\n\n// [#next-free-field: 7]\nmessage Compressor {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.compressor.v2.Compressor\";\n\n  // Minimum response length, in bytes, which will trigger compression. The default value is 30.\n  google.protobuf.UInt32Value content_length = 1;\n\n  // Set of strings that allows specifying which mime-types yield compression; e.g.,\n  // application/json, text/html, etc. When this field is not defined, compression will be applied\n  // to the following mime-types: \"application/javascript\", \"application/json\",\n  // \"application/xhtml+xml\", \"image/svg+xml\", \"text/css\", \"text/html\", \"text/plain\", \"text/xml\"\n  // and their synonyms.\n  repeated string content_type = 2;\n\n  // If true, disables compression when the response contains an etag header. When it is false, the\n  // filter will preserve weak etags and remove the ones that require strong validation.\n  bool disable_on_etag_header = 3;\n\n  // If true, removes accept-encoding from the request headers before dispatching it to the upstream\n  // so that responses do not get compressed before reaching the filter.\n  // .. attention:\n  //\n  //    To avoid interfering with other compression filters in the same chain use this option in\n  //    the filter closest to the upstream.\n  bool remove_accept_encoding_header = 4;\n\n  // Runtime flag that controls whether the filter is enabled or not. If set to false, the\n  // filter will operate as a pass-through filter. If not specified, defaults to enabled.\n  config.core.v3.RuntimeFeatureFlag runtime_enabled = 5;\n\n  // A compressor library to use for compression. Currently only\n  // :ref:`envoy.compression.gzip.compressor<envoy_api_msg_extensions.compression.gzip.compressor.v3.Gzip>`\n  // is included in Envoy.\n  // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise.\n  config.core.v3.TypedExtensionConfig compressor_library = 6;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cors/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/cors/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/cors/v3/cors.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cors.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cors.v3\";\noption java_outer_classname = \"CorsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Cors]\n// CORS Filter :ref:`configuration overview <config_http_filters_cors>`.\n// [#extension: envoy.filters.http.cors]\n\n// Cors filter config.\nmessage Cors {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.cors.v2.Cors\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/csrf/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/csrf/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/csrf/v3/csrf.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.csrf.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.csrf.v3\";\noption java_outer_classname = \"CsrfProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: CSRF]\n// Cross-Site Request Forgery :ref:`configuration overview <config_http_filters_csrf>`.\n// [#extension: envoy.filters.http.csrf]\n\n// CSRF filter config.\nmessage CsrfPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.csrf.v2.CsrfPolicy\";\n\n  // Specifies the % of requests for which the CSRF filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  config.core.v3.RuntimeFractionalPercent filter_enabled = 1\n      [(validate.rules).message = {required: true}];\n\n  // Specifies that CSRF policies will be evaluated and tracked, but not enforced.\n  //\n  // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* and *Destination* to determine if it's valid, but will not\n  // enforce any policies.\n  config.core.v3.RuntimeFractionalPercent shadow_enabled = 2;\n\n  // Specifies additional source origins that will be allowed in addition to\n  // the destination origin.\n  //\n  // More information on how this can be configured via runtime can be found\n  // :ref:`here <csrf-configuration>`.\n  repeated type.matcher.v3.StringMatcher additional_origins = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/csrf/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/csrf/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.csrf.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha\";\noption java_outer_classname = \"CsrfProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: CSRF]\n// Cross-Site Request Forgery :ref:`configuration overview <config_http_filters_csrf>`.\n// [#extension: envoy.filters.http.csrf]\n\n// CSRF filter config.\nmessage CsrfPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.csrf.v3.CsrfPolicy\";\n\n  // Specifies the % of requests for which the CSRF filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1\n      [(validate.rules).message = {required: true}];\n\n  // Specifies that CSRF policies will be evaluated and tracked, but not enforced.\n  //\n  // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* and *Destination* to determine if it's valid, but will not\n  // enforce any policies.\n  config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2;\n\n  // Specifies additional source origins that will be allowed in addition to\n  // the destination origin.\n  //\n  // More information on how this can be configured via runtime can be found\n  // :ref:`here <csrf-configuration>`.\n  repeated type.matcher.v4alpha.StringMatcher additional_origins = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/decompressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.decompressor.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.decompressor.v3\";\noption java_outer_classname = \"DecompressorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Decompressor]\n// [#extension: envoy.filters.http.decompressor]\n\nmessage Decompressor {\n  // Common configuration for filter behavior on both the request and response direction.\n  message CommonDirectionConfig {\n    // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the\n    // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled.\n    config.core.v3.RuntimeFeatureFlag enabled = 1;\n  }\n\n  // Configuration for filter behavior on the request direction.\n  message RequestDirectionConfig {\n    CommonDirectionConfig common_config = 1;\n\n    // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding\n    // request header by appending the decompressor_library's encoding. Defaults to true.\n    google.protobuf.BoolValue advertise_accept_encoding = 2;\n  }\n\n  // Configuration for filter behavior on the response direction.\n  message ResponseDirectionConfig {\n    CommonDirectionConfig common_config = 1;\n  }\n\n  // A decompressor library to use for both request and response decompression. Currently only\n  // :ref:`envoy.compression.gzip.compressor<envoy_api_msg_extensions.compression.gzip.decompressor.v3.Gzip>`\n  // is included in Envoy.\n  config.core.v3.TypedExtensionConfig decompressor_library = 1\n      [(validate.rules).message = {required: true}];\n\n  // Configuration for request decompression. Decompression is enabled by default if left empty.\n  RequestDirectionConfig request_direction_config = 2;\n\n  // Configuration for response decompression. Decompression is enabled by default if left empty.\n  ResponseDirectionConfig response_direction_config = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.dynamic_forward_proxy.v3;\n\nimport \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v3\";\noption java_outer_classname = \"DynamicForwardProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamic forward proxy]\n\n// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.filters.http.dynamic_forward_proxy]\nmessage FilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig\";\n\n  // The DNS cache configuration that the filter will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy cluster configuration\n  // <envoy_api_field_extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Per route Configuration for the dynamic forward proxy HTTP filter.\nmessage PerRouteConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig\";\n\n  oneof host_rewrite_specifier {\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // this value. If not set or empty, the original host header value\n    // will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite <envoy_api_field_config.route.v3.RouteAction.host_rewrite_literal>` given that the\n    // value set here would be used for DNS lookups whereas the value set in the HCM would be used\n    // for host header forwarding which is not the desired outcome.\n    string host_rewrite_literal = 1;\n\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // the value of this header. If not set or empty, the original host header\n    // value will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite header <envoy_api_field_config.route.v3.RouteAction.auto_host_rewrite>`\n    // given that the value set here would be used for DNS lookups whereas the value set in the HCM\n    // would be used for host header forwarding which is not the desired outcome.\n    string host_rewrite_header = 2;\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/dynamo/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/dynamo/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/dynamo/v3/dynamo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.dynamo.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.dynamo.v3\";\noption java_outer_classname = \"DynamoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamo]\n// Dynamo :ref:`configuration overview <config_http_filters_dynamo>`.\n// [#extension: envoy.filters.http.dynamo]\n\n// Dynamo filter config.\nmessage Dynamo {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.dynamo.v2.Dynamo\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ext_authz/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/ext_authz/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ext_authz.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ext_authz.v3\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: External Authorization]\n// External Authorization :ref:`configuration overview <config_http_filters_ext_authz>`.\n// [#extension: envoy.filters.http.ext_authz]\n\n// [#next-free-field: 15]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.ExtAuthz\";\n\n  reserved 4;\n\n  reserved \"use_alpha\";\n\n  // External authorization service configuration.\n  oneof services {\n    // gRPC service configuration (default timeout: 200ms).\n    config.core.v3.GrpcService grpc_service = 1;\n\n    // HTTP service configuration (default timeout: 200ms).\n    HttpService http_service = 3;\n  }\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of messages used on the wire.\n  config.core.v3.ApiVersion transport_api_version = 12\n      [(validate.rules).enum = {defined_only: true}];\n\n  //  Changes filter's behaviour on errors:\n  //\n  //  1. When set to true, the filter will *accept* client request even if the communication with\n  //  the authorization service has failed, or if the authorization service has returned a HTTP 5xx\n  //  error.\n  //\n  //  2. When set to false, ext-authz will *reject* client requests and return a *Forbidden*\n  //  response if the communication with the authorization service has failed, or if the\n  //  authorization service has returned a HTTP 5xx error.\n  //\n  // Note that errors can be *always* tracked in the :ref:`stats\n  // <config_http_filters_ext_authz_stats>`.\n  bool failure_mode_allow = 2;\n\n  // Enables filter to buffer the client request body and send it within the authorization request.\n  // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization\n  // request message indicating if the body data is partial.\n  BufferSettings with_request_body = 5;\n\n  // Clears route cache in order to allow the external authorization service to correctly affect\n  // routing decisions. Filter clears all cached routes when:\n  //\n  // 1. The field is set to *true*.\n  //\n  // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0.\n  //\n  // 3. At least one *authorization response header* is added to the client request, or is used for\n  // altering another client request header.\n  //\n  bool clear_route_cache = 6;\n\n  // Sets the HTTP status that is returned to the client when there is a network error between the\n  // filter and the authorization server. The default status is HTTP 403 Forbidden.\n  type.v3.HttpStatus status_on_error = 7;\n\n  // Specifies a list of metadata namespaces whose values, if present, will be passed to the\n  // ext_authz service as an opaque *protobuf::Struct*.\n  //\n  // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata\n  // <envoy_api_field_extensions.filters.http.jwt_authn.v3.JwtProvider.payload_in_metadata>` is set,\n  // then the following will pass the jwt payload to the authorization server.\n  //\n  // .. code-block:: yaml\n  //\n  //    metadata_context_namespaces:\n  //    - envoy.filters.http.jwt_authn\n  //\n  repeated string metadata_context_namespaces = 8;\n\n  // Specifies if the filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // If this field is not specified, the filter will be enabled for all requests.\n  config.core.v3.RuntimeFractionalPercent filter_enabled = 9;\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14;\n\n  // Specifies whether to deny the requests, when the filter is disabled.\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFeatureFlag.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to determine whether to deny request for\n  // filter protected path at filter disabling. If filter is disabled in\n  // typed_per_filter_config for the path, requests will not be denied.\n  //\n  // If this field is not specified, all requests will be allowed when disabled.\n  config.core.v3.RuntimeFeatureFlag deny_at_disable = 11;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v3.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 10;\n\n  // Optional additional prefix to use when emitting statistics. This allows to distinguish\n  // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //   http_filters:\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc.\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc.\n  //\n  string stat_prefix = 13;\n}\n\n// Configuration for buffering the request data.\nmessage BufferSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.BufferSettings\";\n\n  // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return\n  // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number\n  // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow\n  // <envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.failure_mode_allow>`.\n  uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached.\n  // The authorization request will be dispatched and no 413 HTTP error will be returned by the\n  // filter.\n  bool allow_partial_message = 2;\n\n  // If true, the body sent to the external authorization service is set with raw bytes, it sets\n  // the :ref:`raw_body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.raw_body>`\n  // field of HTTP request attribute context. Otherwise, :ref:`\n  // body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` will be filled\n  // with UTF-8 string request body.\n  bool pack_as_bytes = 3;\n}\n\n// HttpService is used for raw HTTP communication between the filter and the authorization service.\n// When configured, the filter will parse the client request and use these attributes to call the\n// authorization server. Depending on the response, the filter may reject or accept the client\n// request. Note that in any of these events, metadata can be added, removed or overridden by the\n// filter:\n//\n// *On authorization request*, a list of allowed request headers may be supplied. See\n// :ref:`allowed_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationRequest.allowed_headers>`\n// for details. Additional headers metadata may be added to the authorization request. See\n// :ref:`headers_to_add\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationRequest.headers_to_add>` for\n// details.\n//\n// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and\n// additional headers metadata may be added to the original client request. See\n// :ref:`allowed_upstream_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationResponse.allowed_upstream_headers>`\n// for details.\n//\n// On other authorization response statuses, the filter will not allow traffic. Additional headers\n// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationResponse.allowed_client_headers>`\n// for details.\n// [#next-free-field: 9]\nmessage HttpService {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.HttpService\";\n\n  reserved 3, 4, 5, 6;\n\n  // Sets the HTTP server URI which the authorization requests must be sent to.\n  config.core.v3.HttpUri server_uri = 1;\n\n  // Sets a prefix to the value of authorization request header *Path*.\n  string path_prefix = 2;\n\n  // Settings used for controlling authorization request metadata.\n  AuthorizationRequest authorization_request = 7;\n\n  // Settings used for controlling authorization response metadata.\n  AuthorizationResponse authorization_response = 8;\n}\n\nmessage AuthorizationRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.AuthorizationRequest\";\n\n  // Authorization request will include the client request headers that have a correspondent match\n  // in the :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>`. Note that in addition to the\n  // user's supplied matchers:\n  //\n  // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list.\n  //\n  // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have\n  // a message body. However, the authorization request can include the buffered client request body\n  // (controlled by :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.with_request_body>` setting),\n  // consequently the value of *Content-Length* of the authorization request reflects the size of\n  // its payload size.\n  //\n  type.matcher.v3.ListStringMatcher allowed_headers = 1;\n\n  // Sets a list of headers that will be included to the request to authorization service. Note that\n  // client request of the same key will be overridden.\n  repeated config.core.v3.HeaderValue headers_to_add = 2;\n}\n\nmessage AuthorizationResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.AuthorizationResponse\";\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the original client request.\n  // Note that coexistent headers will be overridden.\n  type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that coexistent headers will be appended.\n  type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>`. is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that when this list is *not* set, all the authorization response headers, except *Authority\n  // (Host)* will be in the response to the client. When a header is included in this list, *Path*,\n  // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added.\n  type.matcher.v3.ListStringMatcher allowed_client_headers = 2;\n}\n\n// Extra settings on a per virtualhost/route/weighted-cluster level.\nmessage ExtAuthzPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.ExtAuthzPerRoute\";\n\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the ext auth filter for this particular vhost or route.\n    // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Check request settings for this route.\n    CheckSettings check_settings = 2 [(validate.rules).message = {required: true}];\n  }\n}\n\n// Extra settings for the check request.\nmessage CheckSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.CheckSettings\";\n\n  // Context extensions to set on the CheckRequest's\n  // :ref:`AttributeContext.context_extensions<envoy_api_field_service.auth.v3.AttributeContext.context_extensions>`\n  //\n  // You can use this to provide extra context for the external authorization server on specific\n  // virtual hosts/routes. For example, adding a context extension on the virtual host level can\n  // give the ext-authz server information on what virtual host is used without needing to parse the\n  // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged\n  // in order, and the result will be used.\n  //\n  // Merge semantics for this field are such that keys from more specific configs override.\n  //\n  // .. note::\n  //\n  //   These settings are only applied to a filter configured with a\n  //   :ref:`grpc_service<envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.grpc_service>`.\n  map<string, string> context_extensions = 1;\n\n  // When set to true, disable the configured :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.with_request_body>` for a route.\n  bool disable_request_body_buffering = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/ext_authz/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ext_authz.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: External Authorization]\n// External Authorization :ref:`configuration overview <config_http_filters_ext_authz>`.\n// [#extension: envoy.filters.http.ext_authz]\n\n// [#next-free-field: 15]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\";\n\n  reserved 4;\n\n  reserved \"use_alpha\";\n\n  // External authorization service configuration.\n  oneof services {\n    // gRPC service configuration (default timeout: 200ms).\n    config.core.v4alpha.GrpcService grpc_service = 1;\n\n    // HTTP service configuration (default timeout: 200ms).\n    HttpService http_service = 3;\n  }\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of messages used on the wire.\n  config.core.v4alpha.ApiVersion transport_api_version = 12\n      [(validate.rules).enum = {defined_only: true}];\n\n  //  Changes filter's behaviour on errors:\n  //\n  //  1. When set to true, the filter will *accept* client request even if the communication with\n  //  the authorization service has failed, or if the authorization service has returned a HTTP 5xx\n  //  error.\n  //\n  //  2. When set to false, ext-authz will *reject* client requests and return a *Forbidden*\n  //  response if the communication with the authorization service has failed, or if the\n  //  authorization service has returned a HTTP 5xx error.\n  //\n  // Note that errors can be *always* tracked in the :ref:`stats\n  // <config_http_filters_ext_authz_stats>`.\n  bool failure_mode_allow = 2;\n\n  // Enables filter to buffer the client request body and send it within the authorization request.\n  // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization\n  // request message indicating if the body data is partial.\n  BufferSettings with_request_body = 5;\n\n  // Clears route cache in order to allow the external authorization service to correctly affect\n  // routing decisions. Filter clears all cached routes when:\n  //\n  // 1. The field is set to *true*.\n  //\n  // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0.\n  //\n  // 3. At least one *authorization response header* is added to the client request, or is used for\n  // altering another client request header.\n  //\n  bool clear_route_cache = 6;\n\n  // Sets the HTTP status that is returned to the client when there is a network error between the\n  // filter and the authorization server. The default status is HTTP 403 Forbidden.\n  type.v3.HttpStatus status_on_error = 7;\n\n  // Specifies a list of metadata namespaces whose values, if present, will be passed to the\n  // ext_authz service as an opaque *protobuf::Struct*.\n  //\n  // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata\n  // <envoy_api_field_extensions.filters.http.jwt_authn.v4alpha.JwtProvider.payload_in_metadata>` is set,\n  // then the following will pass the jwt payload to the authorization server.\n  //\n  // .. code-block:: yaml\n  //\n  //    metadata_context_namespaces:\n  //    - envoy.filters.http.jwt_authn\n  //\n  repeated string metadata_context_namespaces = 8;\n\n  // Specifies if the filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // If this field is not specified, the filter will be enabled for all requests.\n  config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9;\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14;\n\n  // Specifies whether to deny the requests, when the filter is disabled.\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFeatureFlag.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to determine whether to deny request for\n  // filter protected path at filter disabling. If filter is disabled in\n  // typed_per_filter_config for the path, requests will not be denied.\n  //\n  // If this field is not specified, all requests will be allowed when disabled.\n  config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v4alpha.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 10;\n\n  // Optional additional prefix to use when emitting statistics. This allows to distinguish\n  // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //   http_filters:\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc.\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc.\n  //\n  string stat_prefix = 13;\n}\n\n// Configuration for buffering the request data.\nmessage BufferSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.BufferSettings\";\n\n  // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return\n  // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number\n  // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow\n  // <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.failure_mode_allow>`.\n  uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached.\n  // The authorization request will be dispatched and no 413 HTTP error will be returned by the\n  // filter.\n  bool allow_partial_message = 2;\n\n  // If true, the body sent to the external authorization service is set with raw bytes, it sets\n  // the :ref:`raw_body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.raw_body>`\n  // field of HTTP request attribute context. Otherwise, :ref:`\n  // body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` will be filled\n  // with UTF-8 string request body.\n  bool pack_as_bytes = 3;\n}\n\n// HttpService is used for raw HTTP communication between the filter and the authorization service.\n// When configured, the filter will parse the client request and use these attributes to call the\n// authorization server. Depending on the response, the filter may reject or accept the client\n// request. Note that in any of these events, metadata can be added, removed or overridden by the\n// filter:\n//\n// *On authorization request*, a list of allowed request headers may be supplied. See\n// :ref:`allowed_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationRequest.allowed_headers>`\n// for details. Additional headers metadata may be added to the authorization request. See\n// :ref:`headers_to_add\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationRequest.headers_to_add>` for\n// details.\n//\n// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and\n// additional headers metadata may be added to the original client request. See\n// :ref:`allowed_upstream_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationResponse.allowed_upstream_headers>`\n// for details.\n//\n// On other authorization response statuses, the filter will not allow traffic. Additional headers\n// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationResponse.allowed_client_headers>`\n// for details.\n// [#next-free-field: 9]\nmessage HttpService {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.HttpService\";\n\n  reserved 3, 4, 5, 6;\n\n  // Sets the HTTP server URI which the authorization requests must be sent to.\n  config.core.v4alpha.HttpUri server_uri = 1;\n\n  // Sets a prefix to the value of authorization request header *Path*.\n  string path_prefix = 2;\n\n  // Settings used for controlling authorization request metadata.\n  AuthorizationRequest authorization_request = 7;\n\n  // Settings used for controlling authorization response metadata.\n  AuthorizationResponse authorization_response = 8;\n}\n\nmessage AuthorizationRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest\";\n\n  // Authorization request will include the client request headers that have a correspondent match\n  // in the :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>`. Note that in addition to the\n  // user's supplied matchers:\n  //\n  // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list.\n  //\n  // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have\n  // a message body. However, the authorization request can include the buffered client request body\n  // (controlled by :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.with_request_body>` setting),\n  // consequently the value of *Content-Length* of the authorization request reflects the size of\n  // its payload size.\n  //\n  type.matcher.v4alpha.ListStringMatcher allowed_headers = 1;\n\n  // Sets a list of headers that will be included to the request to authorization service. Note that\n  // client request of the same key will be overridden.\n  repeated config.core.v4alpha.HeaderValue headers_to_add = 2;\n}\n\nmessage AuthorizationResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse\";\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the original client request.\n  // Note that coexistent headers will be overridden.\n  type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that coexistent headers will be appended.\n  type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>`. is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that when this list is *not* set, all the authorization response headers, except *Authority\n  // (Host)* will be in the response to the client. When a header is included in this list, *Path*,\n  // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added.\n  type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2;\n}\n\n// Extra settings on a per virtualhost/route/weighted-cluster level.\nmessage ExtAuthzPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\";\n\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the ext auth filter for this particular vhost or route.\n    // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Check request settings for this route.\n    CheckSettings check_settings = 2 [(validate.rules).message = {required: true}];\n  }\n}\n\n// Extra settings for the check request.\nmessage CheckSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.CheckSettings\";\n\n  // Context extensions to set on the CheckRequest's\n  // :ref:`AttributeContext.context_extensions<envoy_api_field_service.auth.v4alpha.AttributeContext.context_extensions>`\n  //\n  // You can use this to provide extra context for the external authorization server on specific\n  // virtual hosts/routes. For example, adding a context extension on the virtual host level can\n  // give the ext-authz server information on what virtual host is used without needing to parse the\n  // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged\n  // in order, and the result will be used.\n  //\n  // Merge semantics for this field are such that keys from more specific configs override.\n  //\n  // .. note::\n  //\n  //   These settings are only applied to a filter configured with a\n  //   :ref:`grpc_service<envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.grpc_service>`.\n  map<string, string> context_extensions = 1;\n\n  // When set to true, disable the configured :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.with_request_body>` for a route.\n  bool disable_request_body_buffering = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/fault/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/fault/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/fault/v3/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.fault.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/extensions/filters/common/fault/v3/fault.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.fault.v3\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Fault Injection]\n// Fault Injection :ref:`configuration overview <config_http_filters_fault_injection>`.\n// [#extension: envoy.filters.http.fault]\n\n// [#next-free-field: 6]\nmessage FaultAbort {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.fault.v2.FaultAbort\";\n\n  // Fault aborts are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderAbort {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.fault.v2.FaultAbort.HeaderAbort\";\n  }\n\n  reserved 1;\n\n  oneof error_type {\n    option (validate.required) = true;\n\n    // HTTP status code to use to abort the HTTP request.\n    uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n    // gRPC status code to use to abort the gRPC request.\n    uint32 grpc_status = 5;\n\n    // Fault aborts are controlled via an HTTP header (if applicable).\n    HeaderAbort header_abort = 4;\n  }\n\n  // The percentage of requests/operations/connections that will be aborted with the error code\n  // provided.\n  type.v3.FractionalPercent percentage = 3;\n}\n\n// [#next-free-field: 15]\nmessage HTTPFault {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.fault.v2.HTTPFault\";\n\n  // If specified, the filter will inject delays based on the values in the\n  // object.\n  common.fault.v3.FaultDelay delay = 1;\n\n  // If specified, the filter will abort requests based on the values in\n  // the object. At least *abort* or *delay* must be specified.\n  FaultAbort abort = 2;\n\n  // Specifies the name of the (destination) upstream cluster that the\n  // filter should match on. Fault injection will be restricted to requests\n  // bound to the specific upstream cluster.\n  string upstream_cluster = 3;\n\n  // Specifies a set of headers that the filter should match on. The fault\n  // injection filter can be applied selectively to requests that match a set of\n  // headers specified in the fault filter config. The chances of actual fault\n  // injection further depend on the value of the :ref:`percentage\n  // <envoy_api_field_extensions.filters.http.fault.v3.FaultAbort.percentage>` field.\n  // The filter will check the request's headers against all the specified\n  // headers in the filter config. A match will happen if all the headers in the\n  // config are present in the request with the same values (or based on\n  // presence if the *value* field is not in the config).\n  repeated config.route.v3.HeaderMatcher headers = 4;\n\n  // Faults are injected for the specified list of downstream hosts. If this\n  // setting is not set, faults are injected for all downstream nodes.\n  // Downstream node name is taken from :ref:`the HTTP\n  // x-envoy-downstream-service-node\n  // <config_http_conn_man_headers_downstream-service-node>` header and compared\n  // against downstream_nodes list.\n  repeated string downstream_nodes = 5;\n\n  // The maximum number of faults that can be active at a single time via the configured fault\n  // filter. Note that because this setting can be overridden at the route level, it's possible\n  // for the number of active faults to be greater than this value (if injected via a different\n  // route). If not specified, defaults to unlimited. This setting can be overridden via\n  // `runtime <config_http_filters_fault_injection_runtime>` and any faults that are not injected\n  // due to overflow will be indicated via the `faults_overflow\n  // <config_http_filters_fault_injection_stats>` stat.\n  //\n  // .. attention::\n  //   Like other :ref:`circuit breakers <arch_overview_circuit_break>` in Envoy, this is a fuzzy\n  //   limit. It's possible for the number of active faults to rise slightly above the configured\n  //   amount due to the implementation details.\n  google.protobuf.UInt32Value max_active_faults = 6;\n\n  // The response rate limit to be applied to the response body of the stream. When configured,\n  // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent\n  // <config_http_filters_fault_injection_runtime>` runtime key.\n  //\n  // .. attention::\n  //  This is a per-stream limit versus a connection level limit. This means that concurrent streams\n  //  will each get an independent limit.\n  common.fault.v3.FaultRateLimit response_rate_limit = 7;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_delay_percent\n  string delay_percent_runtime = 8;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.abort_percent\n  string abort_percent_runtime = 9;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_duration_ms\n  string delay_duration_runtime = 10;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.http_status\n  string abort_http_status_runtime = 11;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.max_active_faults\n  string max_active_faults_runtime = 12;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.rate_limit.response_percent\n  string response_rate_limit_percent_runtime = 13;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.grpc_status\n  string abort_grpc_status_runtime = 14;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/fault/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/fault/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/fault/v4alpha/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.fault.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/extensions/filters/common/fault/v3/fault.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Fault Injection]\n// Fault Injection :ref:`configuration overview <config_http_filters_fault_injection>`.\n// [#extension: envoy.filters.http.fault]\n\n// [#next-free-field: 6]\nmessage FaultAbort {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.fault.v3.FaultAbort\";\n\n  // Fault aborts are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderAbort {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort\";\n  }\n\n  reserved 1;\n\n  oneof error_type {\n    option (validate.required) = true;\n\n    // HTTP status code to use to abort the HTTP request.\n    uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n    // gRPC status code to use to abort the gRPC request.\n    uint32 grpc_status = 5;\n\n    // Fault aborts are controlled via an HTTP header (if applicable).\n    HeaderAbort header_abort = 4;\n  }\n\n  // The percentage of requests/operations/connections that will be aborted with the error code\n  // provided.\n  type.v3.FractionalPercent percentage = 3;\n}\n\n// [#next-free-field: 15]\nmessage HTTPFault {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.fault.v3.HTTPFault\";\n\n  // If specified, the filter will inject delays based on the values in the\n  // object.\n  common.fault.v3.FaultDelay delay = 1;\n\n  // If specified, the filter will abort requests based on the values in\n  // the object. At least *abort* or *delay* must be specified.\n  FaultAbort abort = 2;\n\n  // Specifies the name of the (destination) upstream cluster that the\n  // filter should match on. Fault injection will be restricted to requests\n  // bound to the specific upstream cluster.\n  string upstream_cluster = 3;\n\n  // Specifies a set of headers that the filter should match on. The fault\n  // injection filter can be applied selectively to requests that match a set of\n  // headers specified in the fault filter config. The chances of actual fault\n  // injection further depend on the value of the :ref:`percentage\n  // <envoy_api_field_extensions.filters.http.fault.v4alpha.FaultAbort.percentage>` field.\n  // The filter will check the request's headers against all the specified\n  // headers in the filter config. A match will happen if all the headers in the\n  // config are present in the request with the same values (or based on\n  // presence if the *value* field is not in the config).\n  repeated config.route.v4alpha.HeaderMatcher headers = 4;\n\n  // Faults are injected for the specified list of downstream hosts. If this\n  // setting is not set, faults are injected for all downstream nodes.\n  // Downstream node name is taken from :ref:`the HTTP\n  // x-envoy-downstream-service-node\n  // <config_http_conn_man_headers_downstream-service-node>` header and compared\n  // against downstream_nodes list.\n  repeated string downstream_nodes = 5;\n\n  // The maximum number of faults that can be active at a single time via the configured fault\n  // filter. Note that because this setting can be overridden at the route level, it's possible\n  // for the number of active faults to be greater than this value (if injected via a different\n  // route). If not specified, defaults to unlimited. This setting can be overridden via\n  // `runtime <config_http_filters_fault_injection_runtime>` and any faults that are not injected\n  // due to overflow will be indicated via the `faults_overflow\n  // <config_http_filters_fault_injection_stats>` stat.\n  //\n  // .. attention::\n  //   Like other :ref:`circuit breakers <arch_overview_circuit_break>` in Envoy, this is a fuzzy\n  //   limit. It's possible for the number of active faults to rise slightly above the configured\n  //   amount due to the implementation details.\n  google.protobuf.UInt32Value max_active_faults = 6;\n\n  // The response rate limit to be applied to the response body of the stream. When configured,\n  // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent\n  // <config_http_filters_fault_injection_runtime>` runtime key.\n  //\n  // .. attention::\n  //  This is a per-stream limit versus a connection level limit. This means that concurrent streams\n  //  will each get an independent limit.\n  common.fault.v3.FaultRateLimit response_rate_limit = 7;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_delay_percent\n  string delay_percent_runtime = 8;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.abort_percent\n  string abort_percent_runtime = 9;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_duration_ms\n  string delay_duration_runtime = 10;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.http_status\n  string abort_http_status_runtime = 11;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.max_active_faults\n  string max_active_faults_runtime = 12;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.rate_limit.response_percent\n  string response_rate_limit_percent_runtime = 13;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.grpc_status\n  string abort_grpc_status_runtime = 14;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/grpc_http1_bridge/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_http1_bridge.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_http1_bridge.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC HTTP/1.1 Bridge]\n// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview <config_http_filters_grpc_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_bridge]\n\n// gRPC HTTP/1.1 Bridge filter config.\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_http1_bridge.v2.Config\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge]\n// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview\n// <config_http_filters_grpc_http1_reverse_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_reverse_bridge]\n\n// gRPC reverse bridge filter configuration\nmessage FilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfig\";\n\n  // The content-type to pass to the upstream when the gRPC bridge filter is applied.\n  // The filter will also validate that the upstream responds with the same content type.\n  string content_type = 1 [(validate.rules).string = {min_len: 1}];\n\n  // If true, Envoy will assume that the upstream doesn't understand gRPC frames and\n  // strip the gRPC frame from the request, and add it back in to the response. This will\n  // hide the gRPC semantics from the upstream, allowing it to receive and respond with a\n  // simple binary encoded protobuf.\n  bool withhold_grpc_frames = 2;\n}\n\n// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level.\nmessage FilterConfigPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfigPerRoute\";\n\n  // If true, disables gRPC reverse bridge filter for this particular vhost or route.\n  // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n  bool disabled = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/transcoder/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_json_transcoder.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_json_transcoder.v3\";\noption java_outer_classname = \"TranscoderProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC-JSON transcoder]\n// gRPC-JSON transcoder :ref:`configuration overview <config_http_filters_grpc_json_transcoder>`.\n// [#extension: envoy.filters.http.grpc_json_transcoder]\n\n// [#next-free-field: 10]\nmessage GrpcJsonTranscoder {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder\";\n\n  message PrintOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder.PrintOptions\";\n\n    // Whether to add spaces, line breaks and indentation to make the JSON\n    // output easy to read. Defaults to false.\n    bool add_whitespace = 1;\n\n    // Whether to always print primitive fields. By default primitive\n    // fields with default values will be omitted in JSON output. For\n    // example, an int32 field set to 0 will be omitted. Setting this flag to\n    // true will override the default behavior and print primitive fields\n    // regardless of their values. Defaults to false.\n    bool always_print_primitive_fields = 2;\n\n    // Whether to always print enums as ints. By default they are rendered\n    // as strings. Defaults to false.\n    bool always_print_enums_as_ints = 3;\n\n    // Whether to preserve proto field names. By default protobuf will\n    // generate JSON field names using the ``json_name`` option, or lower camel case,\n    // in that order. Setting this flag will preserve the original field names. Defaults to false.\n    bool preserve_proto_field_names = 4;\n  }\n\n  oneof descriptor_set {\n    option (validate.required) = true;\n\n    // Supplies the filename of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    string proto_descriptor = 1;\n\n    // Supplies the binary content of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    bytes proto_descriptor_bin = 4;\n  }\n\n  // A list of strings that\n  // supplies the fully qualified service names (i.e. \"package_name.service_name\") that\n  // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``,\n  // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than\n  // the service names specified here, but they won't be translated.\n  repeated string services = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // Control options for response JSON. These options are passed directly to\n  // `JsonPrintOptions <https://developers.google.com/protocol-buffers/docs/reference/cpp/\n  // google.protobuf.util.json_util#JsonPrintOptions>`_.\n  PrintOptions print_options = 3;\n\n  // Whether to keep the incoming request route after the outgoing headers have been transformed to\n  // the match the upstream gRPC service. Note: This means that routes for gRPC services that are\n  // not transcoded cannot be used in combination with *match_incoming_request_route*.\n  bool match_incoming_request_route = 5;\n\n  // A list of query parameters to be ignored for transcoding method mapping.\n  // By default, the transcoder filter will not transcode a request if there are any\n  // unknown/invalid query parameters.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {\n  //         option (google.api.http) = {\n  //           get: \"/shelves/{shelf}\"\n  //         };\n  //       }\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable\n  // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow\n  // the same request to be mapped to ``GetShelf``.\n  repeated string ignored_query_parameters = 6;\n\n  // Whether to route methods without the ``google.api.http`` option.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     package bookstore;\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {}\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The client could ``post`` a json body ``{\"shelf\": 1234}`` with the path of\n  // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``.\n  bool auto_mapping = 7;\n\n  // Whether to ignore query parameters that cannot be mapped to a corresponding\n  // protobuf field. Use this if you cannot control the query parameters and do\n  // not know them beforehand. Otherwise use ``ignored_query_parameters``.\n  // Defaults to false.\n  bool ignore_unknown_query_parameters = 8;\n\n  // Whether to convert gRPC status headers to JSON.\n  // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status``\n  // from the ``grpc-status-details-bin`` header and use it as JSON body.\n  // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and\n  // ``grpc-message`` headers.\n  // The error details types must be present in the ``proto_descriptor``.\n  //\n  // For example, if an upstream server replies with headers:\n  //\n  // .. code-block:: none\n  //\n  //     grpc-status: 5\n  //     grpc-status-details-bin:\n  //         CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ\n  //\n  // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message\n  // ``google.rpc.Status``. It will be transcoded into:\n  //\n  // .. code-block:: none\n  //\n  //     HTTP/1.1 404 Not Found\n  //     content-type: application/json\n  //\n  //     {\"code\":5,\"details\":[{\"@type\":\"type.googleapis.com/google.rpc.RequestInfo\",\"requestId\":\"r-1\"}]}\n  //\n  //  In order to transcode the message, the ``google.rpc.RequestInfo`` type from\n  //  the ``google/rpc/error_details.proto`` should be included in the configured\n  //  :ref:`proto descriptor set <config_grpc_json_generate_proto_descriptor_set>`.\n  bool convert_grpc_status = 9;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_stats/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/grpc_stats/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_stats/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_stats.v3;\n\nimport \"envoy/config/core/v3/grpc_method_list.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_stats.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC statistics] gRPC statistics filter\n// :ref:`configuration overview <config_http_filters_grpc_stats>`.\n// [#extension: envoy.filters.http.grpc_stats]\n\n// gRPC statistics filter configuration\nmessage FilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_stats.v2alpha.FilterConfig\";\n\n  // If true, the filter maintains a filter state object with the request and response message\n  // counts.\n  bool emit_filter_state = 1;\n\n  oneof per_method_stat_specifier {\n    // If set, specifies an allowlist of service/methods that will have individual stats\n    // emitted for them. Any call that does not match the allowlist will be counted\n    // in a stat with no method specifier: `cluster.<name>.grpc.*`.\n    config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2;\n\n    // If set to true, emit stats for all service/method names.\n    //\n    // If set to false, emit stats for all service/message types to the same stats without including\n    // the service/method in the name, with prefix `cluster.<name>.grpc`. This can be useful if\n    // service/method granularity is not needed, or if each cluster only receives a single method.\n    //\n    // .. attention::\n    //   This option is only safe if all clients are trusted. If this option is enabled\n    //   with untrusted clients, the clients could cause unbounded growth in the number of stats in\n    //   Envoy, using unbounded memory and potentially slowing down stats pipelines.\n    //\n    // .. attention::\n    //   If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the\n    //   behavior will default to `stats_for_all_methods=true`. This default value is deprecated,\n    //   and in a future release, if neither field is set, it will default to\n    //   `stats_for_all_methods=false` in order to be safe by default. This behavior can be\n    //   controlled with runtime override\n    //   `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`.\n    google.protobuf.BoolValue stats_for_all_methods = 3;\n  }\n\n  // If true, the filter will gather a histogram for the request time of the upstream.\n  // It works with :ref:`stats_for_all_methods\n  // <envoy_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_methods>`\n  // and :ref:`individual_method_stats_allowlist\n  // <envoy_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.individual_method_stats_allowlist>` the same way\n  // request_message_count and response_message_count works.\n  bool enable_upstream_stats = 4;\n}\n\n// gRPC statistics filter state object in protobuf form.\nmessage FilterObject {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_stats.v2alpha.FilterObject\";\n\n  // Count of request messages in the request stream.\n  uint64 request_message_count = 1;\n\n  // Count of response messages in the response stream.\n  uint64 response_message_count = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_web/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/grpc_web/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_web.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_web.v3\";\noption java_outer_classname = \"GrpcWebProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Web]\n// gRPC Web :ref:`configuration overview <config_http_filters_grpc_web>`.\n// [#extension: envoy.filters.http.grpc_web]\n\n// gRPC Web filter config.\nmessage GrpcWeb {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_web.v2.GrpcWeb\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/gzip/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/gzip/v2:pkg\",\n        \"//envoy/extensions/filters/http/compressor/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/gzip/v3/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.gzip.v3;\n\nimport \"envoy/extensions/filters/http/compressor/v3/compressor.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.gzip.v3\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Gzip]\n// Gzip :ref:`configuration overview <config_http_filters_gzip>`.\n// [#extension: envoy.filters.http.gzip]\n\n// [#next-free-field: 12]\nmessage Gzip {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.gzip.v2.Gzip\";\n\n  enum CompressionStrategy {\n    DEFAULT = 0;\n    FILTERED = 1;\n    HUFFMAN = 2;\n    RLE = 3;\n  }\n\n  message CompressionLevel {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.gzip.v2.Gzip.CompressionLevel\";\n\n    enum Enum {\n      DEFAULT = 0;\n      BEST = 1;\n      SPEED = 2;\n    }\n  }\n\n  reserved 2, 6, 7, 8;\n\n  reserved \"content_length\", \"content_type\", \"disable_on_etag_header\",\n      \"remove_accept_encoding_header\";\n\n  // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values\n  // use more memory, but are faster and produce better compression results. The default value is 5.\n  google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}];\n\n  // A value used for selecting the zlib compression level. This setting will affect speed and\n  // amount of compression applied to the content. \"BEST\" provides higher compression at the cost of\n  // higher latency, \"SPEED\" provides lower compression with minimum impact on response time.\n  // \"DEFAULT\" provides an optimal result between speed and compression. This field will be set to\n  // \"DEFAULT\" if not specified.\n  CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // A value used for selecting the zlib compression strategy which is directly related to the\n  // characteristics of the content. Most of the time \"DEFAULT\" will be the best choice, though\n  // there are situations which changing this parameter might produce better results. For example,\n  // run-length encoding (RLE) is typically used when the content is known for having sequences\n  // which same data occurs many consecutive times. For more information about each strategy, please\n  // refer to zlib manual.\n  CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size.\n  // Larger window results in better compression at the expense of memory usage. The default is 12\n  // which will produce a 4096 bytes window. For more details about this parameter, please refer to\n  // zlib manual > deflateInit2.\n  google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Set of configuration parameters common for all compression filters. If this field is set then\n  // the fields `content_length`, `content_type`, `disable_on_etag_header` and\n  // `remove_accept_encoding_header` are ignored.\n  compressor.v3.Compressor compressor = 10;\n\n  // Value for Zlib's next output buffer. If not set, defaults to 4096.\n  // See https://www.zlib.net/manual.html for more details. Also see\n  // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance.\n  google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/header_to_metadata/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.header_to_metadata.v3;\n\nimport \"envoy/type/matcher/v3/regex.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v3\";\noption java_outer_classname = \"HeaderToMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Header-To-Metadata Filter]\n//\n// The configuration for transforming headers into metadata. This is useful\n// for matching load balancer subsets, logging, etc.\n//\n// Header to Metadata :ref:`configuration overview <config_http_filters_header_to_metadata>`.\n// [#extension: envoy.filters.http.header_to_metadata]\n\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.header_to_metadata.v2.Config\";\n\n  enum ValueType {\n    STRING = 0;\n\n    NUMBER = 1;\n\n    // The value is a serialized `protobuf.Value\n    // <https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62>`_.\n    PROTOBUF_VALUE = 2;\n  }\n\n  // ValueEncode defines the encoding algorithm.\n  enum ValueEncode {\n    // The value is not encoded.\n    NONE = 0;\n\n    // The value is encoded in `Base64 <https://tools.ietf.org/html/rfc4648#section-4>`_.\n    // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the\n    // non-ASCII characters in the header.\n    BASE64 = 1;\n  }\n\n  // [#next-free-field: 7]\n  message KeyValuePair {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair\";\n\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The value to pair with the given key.\n    //\n    // When used for a\n    // :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`\n    // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added.\n    //\n    // When used for a :ref:`on_header_missing <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_missing>`\n    // case, a non-empty value must be provided otherwise no metadata is added.\n    string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = \"value_type\"];\n\n    // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value\n    // is used as-is.\n    //\n    // This is only used for :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`.\n    //\n    // Note: if the `value` field is non-empty this field should be empty.\n    type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6\n        [(udpa.annotations.field_migrate).oneof_promotion = \"value_type\"];\n\n    // The value's type — defaults to string.\n    ValueType type = 4 [(validate.rules).enum = {defined_only: true}];\n\n    // How is the value encoded, default is NONE (not encoded).\n    // The value will be decoded accordingly before storing to metadata.\n    ValueEncode encode = 5;\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  // [#next-free-field: 6]\n  message Rule {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.header_to_metadata.v2.Config.Rule\";\n\n    // Specifies that a match will be performed on the value of a header or a cookie.\n    //\n    // The header to be extracted.\n    string header = 1 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false},\n      (udpa.annotations.field_migrate).oneof_promotion = \"header_cookie_specifier\"\n    ];\n\n    // The cookie to be extracted.\n    string cookie = 5 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false},\n      (udpa.annotations.field_migrate).oneof_promotion = \"header_cookie_specifier\"\n    ];\n\n    // If the header or cookie is present, apply this metadata KeyValuePair.\n    //\n    // If the value in the KeyValuePair is non-empty, it'll be used instead\n    // of the header or cookie value.\n    KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = \"on_present\"];\n\n    // If the header or cookie is not present, apply this metadata KeyValuePair.\n    //\n    // The value in the KeyValuePair must be set, since it'll be used in lieu\n    // of the missing header or cookie value.\n    KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = \"on_missing\"];\n\n    // Whether or not to remove the header after a rule is applied.\n    //\n    // This prevents headers from leaking.\n    // This field is not supported in case of a cookie.\n    bool remove = 4;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule request_rules = 1;\n\n  // The list of rules to apply to responses.\n  repeated Rule response_rules = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/filters/http/header_to_metadata/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.header_to_metadata.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/regex.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha\";\noption java_outer_classname = \"HeaderToMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Header-To-Metadata Filter]\n//\n// The configuration for transforming headers into metadata. This is useful\n// for matching load balancer subsets, logging, etc.\n//\n// Header to Metadata :ref:`configuration overview <config_http_filters_header_to_metadata>`.\n// [#extension: envoy.filters.http.header_to_metadata]\n\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.header_to_metadata.v3.Config\";\n\n  enum ValueType {\n    STRING = 0;\n\n    NUMBER = 1;\n\n    // The value is a serialized `protobuf.Value\n    // <https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62>`_.\n    PROTOBUF_VALUE = 2;\n  }\n\n  // ValueEncode defines the encoding algorithm.\n  enum ValueEncode {\n    // The value is not encoded.\n    NONE = 0;\n\n    // The value is encoded in `Base64 <https://tools.ietf.org/html/rfc4648#section-4>`_.\n    // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the\n    // non-ASCII characters in the header.\n    BASE64 = 1;\n  }\n\n  // [#next-free-field: 7]\n  message KeyValuePair {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair\";\n\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_len: 1}];\n\n    oneof value_type {\n      // The value to pair with the given key.\n      //\n      // When used for a\n      // :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`\n      // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added.\n      //\n      // When used for a :ref:`on_header_missing <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_missing>`\n      // case, a non-empty value must be provided otherwise no metadata is added.\n      string value = 3;\n\n      // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value\n      // is used as-is.\n      //\n      // This is only used for :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`.\n      //\n      // Note: if the `value` field is non-empty this field should be empty.\n      type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6;\n    }\n\n    // The value's type — defaults to string.\n    ValueType type = 4 [(validate.rules).enum = {defined_only: true}];\n\n    // How is the value encoded, default is NONE (not encoded).\n    // The value will be decoded accordingly before storing to metadata.\n    ValueEncode encode = 5;\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  // [#next-free-field: 6]\n  message Rule {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule\";\n\n    oneof header_cookie_specifier {\n      // Specifies that a match will be performed on the value of a header or a cookie.\n      //\n      // The header to be extracted.\n      string header = 1\n          [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // The cookie to be extracted.\n      string cookie = 5\n          [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n    }\n\n    // If the header or cookie is present, apply this metadata KeyValuePair.\n    //\n    // If the value in the KeyValuePair is non-empty, it'll be used instead\n    // of the header or cookie value.\n    KeyValuePair on_present = 2;\n\n    // If the header or cookie is not present, apply this metadata KeyValuePair.\n    //\n    // The value in the KeyValuePair must be set, since it'll be used in lieu\n    // of the missing header or cookie value.\n    KeyValuePair on_missing = 3;\n\n    // Whether or not to remove the header after a rule is applied.\n    //\n    // This prevents headers from leaking.\n    // This field is not supported in case of a cookie.\n    bool remove = 4;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule request_rules = 1;\n\n  // The list of rules to apply to responses.\n  repeated Rule response_rules = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/health_check/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/health_check/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/health_check/v3/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.health_check.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.health_check.v3\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health check]\n// Health check :ref:`configuration overview <config_http_filters_health_check>`.\n// [#extension: envoy.filters.http.health_check]\n\n// [#next-free-field: 6]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.health_check.v2.HealthCheck\";\n\n  reserved 2;\n\n  // Specifies whether the filter operates in pass through mode or not.\n  google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}];\n\n  // If operating in pass through mode, the amount of time in milliseconds\n  // that the filter should cache the upstream response.\n  google.protobuf.Duration cache_time = 3;\n\n  // If operating in non-pass-through mode, specifies a set of upstream cluster\n  // names and the minimum percentage of servers in each of those clusters that\n  // must be healthy or degraded in order for the filter to return a 200.\n  //\n  // .. note::\n  //\n  //    This value is interpreted as an integer by truncating, so 12.50% will be calculated\n  //    as if it were 12%.\n  map<string, type.v3.Percent> cluster_min_healthy_percentages = 4;\n\n  // Specifies a set of health check request headers to match on. The health check filter will\n  // check a request’s headers against all the specified headers. To specify the health check\n  // endpoint, set the ``:path`` header to match on.\n  repeated config.route.v3.HeaderMatcher headers = 5;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/health_check/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/health_check/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.health_check.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Health check]\n// Health check :ref:`configuration overview <config_http_filters_health_check>`.\n// [#extension: envoy.filters.http.health_check]\n\n// [#next-free-field: 6]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.health_check.v3.HealthCheck\";\n\n  reserved 2;\n\n  // Specifies whether the filter operates in pass through mode or not.\n  google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}];\n\n  // If operating in pass through mode, the amount of time in milliseconds\n  // that the filter should cache the upstream response.\n  google.protobuf.Duration cache_time = 3;\n\n  // If operating in non-pass-through mode, specifies a set of upstream cluster\n  // names and the minimum percentage of servers in each of those clusters that\n  // must be healthy or degraded in order for the filter to return a 200.\n  //\n  // .. note::\n  //\n  //    This value is interpreted as an integer by truncating, so 12.50% will be calculated\n  //    as if it were 12%.\n  map<string, type.v3.Percent> cluster_min_healthy_percentages = 4;\n\n  // Specifies a set of health check request headers to match on. The health check filter will\n  // check a request’s headers against all the specified headers. To specify the health check\n  // endpoint, set the ``:path`` header to match on.\n  repeated config.route.v4alpha.HeaderMatcher headers = 5;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ip_tagging/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/ip_tagging/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ip_tagging.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ip_tagging.v3\";\noption java_outer_classname = \"IpTaggingProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: IP tagging]\n// IP tagging :ref:`configuration overview <config_http_filters_ip_tagging>`.\n// [#extension: envoy.filters.http.ip_tagging]\n\nmessage IPTagging {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ip_tagging.v2.IPTagging\";\n\n  // The type of requests the filter should apply to. The supported types\n  // are internal, external or both. The\n  // :ref:`x-forwarded-for<config_http_conn_man_headers_x-forwarded-for_internal_origin>` header is\n  // used to determine if a request is internal and will result in\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>`\n  // being set. The filter defaults to both, and it will apply to all request types.\n  enum RequestType {\n    // Both external and internal requests will be tagged. This is the default value.\n    BOTH = 0;\n\n    // Only internal requests will be tagged.\n    INTERNAL = 1;\n\n    // Only external requests will be tagged.\n    EXTERNAL = 2;\n  }\n\n  // Supplies the IP tag name and the IP address subnets.\n  message IPTag {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.ip_tagging.v2.IPTagging.IPTag\";\n\n    // Specifies the IP tag name to apply.\n    string ip_tag_name = 1;\n\n    // A list of IP address subnets that will be tagged with\n    // ip_tag_name. Both IPv4 and IPv6 are supported.\n    repeated config.core.v3.CidrRange ip_list = 2;\n  }\n\n  // The type of request the filter should apply to.\n  RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system.\n  // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695]\n  // The set of IP tags for the filter.\n  repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/jwt_authn/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/jwt_authn/v2alpha:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/jwt_authn/v3/README.md",
    "content": "# JWT Authentication HTTP filter config\n\n## Overview\n\n1. The proto file in this folder defines an HTTP filter config for \"jwt_authn\" filter.\n\n2. This filter will verify the JWT in the HTTP request as:\n    - The signature should be valid\n    - JWT should not be expired\n    - Issuer and audiences are valid and specified in the filter config.\n\n3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter.\n\n3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message.\n\n## The locations to extract JWT\n\nJWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header:\n```\nAuthorization: Bearer <token>\n```\nThe next default location is in the query parameter as:\n```\n?access_token=<TOKEN>\n```\n\nIf a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT.\n\n## HTTP header to pass successfully verified JWT\n\nIf a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64url-encoded JWT payload in JSON.\n\n\n## Further header options\n\nIn addition to the `name` field, which specifies the HTTP header name,\nthe `from_headers` section can specify an optional `value_prefix` value, as in:\n\n```yaml\n    from_headers:\n      - name: bespoke\n        value_prefix: jwt_value\n```\n\nThe above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`.\n\nAny non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped,\nand all following, contiguous, JWT-legal chars will be taken as the JWT.\n\nThis means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`:\n\n```text\nbespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\n\nbespoke: {\"jwt_value\": \"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\"}\n\nbespoke: beta:true,jwt_value:\"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\",trace=1234\n```\n\nThe header `name` may be `Authorization`.\n\nThe `value_prefix` must match exactly, i.e., case-sensitively.\nIf the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token.\n\nIf there are no JWT-legal characters after the `value_prefix`, the entire string after it\nis taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser."
  },
  {
    "path": "api/envoy/extensions/filters/http/jwt_authn/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.jwt_authn.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: JWT Authentication]\n// JWT Authentication :ref:`configuration overview <config_http_filters_jwt_authn>`.\n// [#extension: envoy.filters.http.jwt_authn]\n\n// Please see following for JWT authentication flow:\n//\n// * `JSON Web Token (JWT) <https://tools.ietf.org/html/rfc7519>`_\n// * `The OAuth 2.0 Authorization Framework <https://tools.ietf.org/html/rfc6749>`_\n// * `OpenID Connect <http://openid.net/connect>`_\n//\n// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies:\n//\n// * issuer: the principal that issues the JWT. It has to match the one from the token.\n// * allowed audiences: the ones in the token have to be listed here.\n// * how to fetch public key JWKS to verify the token signature.\n// * how to extract JWT token in the request.\n// * how to pass successfully verified token payload.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     issuer: https://example.com\n//     audiences:\n//     - bookstore_android.apps.googleusercontent.com\n//     - bookstore_web.apps.googleusercontent.com\n//     remote_jwks:\n//       http_uri:\n//         uri: https://example.com/.well-known/jwks.json\n//         cluster: example_jwks_cluster\n//       cache_duration:\n//         seconds: 300\n//\n// [#next-free-field: 10]\nmessage JwtProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider\";\n\n  // Specify the `principal <https://tools.ietf.org/html/rfc7519#section-4.1.1>`_ that issued\n  // the JWT, usually a URL or an email address.\n  //\n  // Example: https://securetoken.google.com\n  // Example: 1234567-compute@developer.gserviceaccount.com\n  //\n  string issuer = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The list of JWT `audiences <https://tools.ietf.org/html/rfc7519#section-4.1.3>`_ are\n  // allowed to access. A JWT containing any of these audiences will be accepted. If not specified,\n  // will not check audiences in the token.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //     audiences:\n  //     - bookstore_android.apps.googleusercontent.com\n  //     - bookstore_web.apps.googleusercontent.com\n  //\n  repeated string audiences = 2;\n\n  // `JSON Web Key Set (JWKS) <https://tools.ietf.org/html/rfc7517#appendix-A>`_ is needed to\n  // validate signature of a JWT. This field specifies where to fetch JWKS.\n  oneof jwks_source_specifier {\n    option (validate.required) = true;\n\n    // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP\n    // URI and how the fetched JWKS should be cached.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    remote_jwks:\n    //      http_uri:\n    //        uri: https://www.googleapis.com/oauth2/v1/certs\n    //        cluster: jwt.www.googleapis.com|443\n    //      cache_duration:\n    //        seconds: 300\n    //\n    RemoteJwks remote_jwks = 3;\n\n    // JWKS is in local data source. It could be either in a local file or embedded in the\n    // inline_string.\n    //\n    // Example: local file\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      filename: /etc/envoy/jwks/jwks1.txt\n    //\n    // Example: inline_string\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      inline_string: ACADADADADA\n    //\n    config.core.v3.DataSource local_jwks = 4;\n  }\n\n  // If false, the JWT is removed in the request after a success verification. If true, the JWT is\n  // not removed in the request. Default value is false.\n  bool forward = 5;\n\n  // Two fields below define where to extract the JWT from an HTTP request.\n  //\n  // If no explicit location is specified, the following default locations are tried in order:\n  //\n  // 1. The Authorization header using the `Bearer schema\n  // <https://tools.ietf.org/html/rfc6750#section-2.1>`_. Example::\n  //\n  //    Authorization: Bearer <token>.\n  //\n  // 2. `access_token <https://tools.ietf.org/html/rfc6750#section-2.3>`_ query parameter.\n  //\n  // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations\n  // its provider specified or from the default locations.\n  //\n  // Specify the HTTP headers to extract JWT token. For examples, following config:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_headers:\n  //   - name: x-goog-iap-jwt-assertion\n  //\n  // can be used to extract token from header::\n  //\n  //   ``x-goog-iap-jwt-assertion: <JWT>``.\n  //\n  repeated JwtHeader from_headers = 6;\n\n  // JWT is sent in a query parameter. `jwt_params` represents the query parameter names.\n  //\n  // For example, if config is:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_params:\n  //   - jwt_token\n  //\n  // The JWT format in query parameter is::\n  //\n  //    /path?jwt_token=<JWT>\n  //\n  repeated string from_params = 7;\n\n  // This field specifies the header name to forward a successfully verified JWT payload to the\n  // backend. The forwarded data is::\n  //\n  //    base64url_encoded(jwt_payload_in_JSON)\n  //\n  // If it is not specified, the payload will not be forwarded.\n  string forward_payload_header = 8\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata\n  // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn**\n  // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields*\n  // and the value is the *protobuf::Struct* converted from JWT JSON payload.\n  //\n  // For example, if payload_in_metadata is *my_payload*:\n  //\n  // .. code-block:: yaml\n  //\n  //   envoy.filters.http.jwt_authn:\n  //     my_payload:\n  //       iss: https://example.com\n  //       sub: test@example.com\n  //       aud: https://example.com\n  //       exp: 1501281058\n  //\n  string payload_in_metadata = 9;\n}\n\n// This message specifies how to fetch JWKS from remote and how to cache it.\nmessage RemoteJwks {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.RemoteJwks\";\n\n  // The HTTP URI to fetch the JWKS. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //    http_uri:\n  //      uri: https://www.googleapis.com/oauth2/v1/certs\n  //      cluster: jwt.www.googleapis.com|443\n  //\n  config.core.v3.HttpUri http_uri = 1;\n\n  // Duration after which the cached JWKS should be expired. If not specified, default cache\n  // duration is 5 minutes.\n  google.protobuf.Duration cache_duration = 2;\n}\n\n// This message specifies a header location to extract JWT token.\nmessage JwtHeader {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtHeader\";\n\n  // The HTTP header name.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // The value prefix. The value format is \"value_prefix<token>\"\n  // For example, for \"Authorization: Bearer <token>\", value_prefix=\"Bearer \" with a space at the\n  // end.\n  string value_prefix = 2\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n}\n\n// Specify a required provider with audiences.\nmessage ProviderWithAudiences {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.ProviderWithAudiences\";\n\n  // Specify a required provider name.\n  string provider_name = 1;\n\n  // This field overrides the one specified in the JwtProvider.\n  repeated string audiences = 2;\n}\n\n// This message specifies a Jwt requirement. An empty message means JWT verification is not\n// required. Here are some config examples:\n//\n// .. code-block:: yaml\n//\n//  # Example 1: not required with an empty message\n//\n//  # Example 2: require A\n//  provider_name: provider-A\n//\n//  # Example 3: require A or B\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 4: require A and B\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 5: require A and (B or C)\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_any:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 6: require A or (B and C)\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_all:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 7: A is optional (if token from A is provided, it must be valid, but also allows\n//  missing token.)\n//  requires_any:\n//    requirements:\n//    - provider_name: provider-A\n//    - allow_missing: {}\n//\n//  # Example 8: A is optional and B is required.\n//  requires_all:\n//    requirements:\n//    - requires_any:\n//        requirements:\n//        - provider_name: provider-A\n//        - allow_missing: {}\n//    - provider_name: provider-B\n//\n// [#next-free-field: 7]\nmessage JwtRequirement {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirement\";\n\n  oneof requires_type {\n    // Specify a required provider name.\n    string provider_name = 1;\n\n    // Specify a required provider with audiences.\n    ProviderWithAudiences provider_and_audiences = 2;\n\n    // Specify list of JwtRequirement. Their results are OR-ed.\n    // If any one of them passes, the result is passed.\n    JwtRequirementOrList requires_any = 3;\n\n    // Specify list of JwtRequirement. Their results are AND-ed.\n    // All of them must pass, if one of them fails or missing, it fails.\n    JwtRequirementAndList requires_all = 4;\n\n    // The requirement is always satisfied even if JWT is missing or the JWT\n    // verification fails. A typical usage is: this filter is used to only verify\n    // JWTs and pass the verified JWT payloads to another filter, the other filter\n    // will make decision. In this mode, all JWT tokens will be verified.\n    google.protobuf.Empty allow_missing_or_failed = 5;\n\n    // The requirement is satisfied if JWT is missing, but failed if JWT is\n    // presented but invalid. Similar to allow_missing_or_failed, this is used\n    // to only verify JWTs and pass the verified payload to another filter. The\n    // different is this mode will reject requests with invalid tokens.\n    google.protobuf.Empty allow_missing = 6;\n  }\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are OR-ed; if any one of them passes, the result is passed\nmessage JwtRequirementOrList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementOrList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails.\nmessage JwtRequirementAndList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementAndList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a Jwt requirement for a specific Route condition.\n// Example 1:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /healthz\n//\n// In above example, \"requires\" field is empty for /healthz prefix match,\n// it means that requests matching the path prefix don't require JWT authentication.\n//\n// Example 2:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /\n//      requires: { provider_name: provider-A }\n//\n// In above example, all requests matched the path prefix require jwt authentication\n// from \"provider-A\".\nmessage RequirementRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.RequirementRule\";\n\n  // The route matching parameter. Only when the match is satisfied, the \"requires\" field will\n  // apply.\n  //\n  // For example: following match will match all requests.\n  //\n  // .. code-block:: yaml\n  //\n  //    match:\n  //      prefix: /\n  //\n  config.route.v3.RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Specify a Jwt Requirement. Please detail comment in message JwtRequirement.\n  JwtRequirement requires = 2;\n}\n\n// This message specifies Jwt requirements based on stream_info.filterState.\n// This FilterState should use `Router::StringAccessor` object to set a string value.\n// Other HTTP filters can use it to specify Jwt requirements dynamically.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//    name: jwt_selector\n//    requires:\n//      issuer_1:\n//        provider_name: issuer1\n//      issuer_2:\n//        provider_name: issuer2\n//\n// If a filter set \"jwt_selector\" with \"issuer_1\" to FilterState for a request,\n// jwt_authn filter will use JwtRequirement{\"provider_name\": \"issuer1\"} to verify.\nmessage FilterStateRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule\";\n\n  // The filter state name to retrieve the `Router::StringAccessor` object.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A map of string keys to requirements. The string key is the string value\n  // in the FilterState with the name specified in the *name* field above.\n  map<string, JwtRequirement> requires = 3;\n}\n\n// This is the Envoy HTTP filter config for JWT authentication.\n//\n// For example:\n//\n// .. code-block:: yaml\n//\n//   providers:\n//      provider1:\n//        issuer: issuer1\n//        audiences:\n//        - audience1\n//        - audience2\n//        remote_jwks:\n//          http_uri:\n//            uri: https://example.com/.well-known/jwks.json\n//            cluster: example_jwks_cluster\n//      provider2:\n//        issuer: issuer2\n//        local_jwks:\n//          inline_string: jwks_string\n//\n//   rules:\n//      # Not jwt verification is required for /health path\n//      - match:\n//          prefix: /health\n//\n//      # Jwt verification for provider1 is required for path prefixed with \"prefix\"\n//      - match:\n//          prefix: /prefix\n//        requires:\n//          provider_name: provider1\n//\n//      # Jwt verification for either provider1 or provider2 is required for all other requests.\n//      - match:\n//          prefix: /\n//        requires:\n//          requires_any:\n//            requirements:\n//              - provider_name: provider1\n//              - provider_name: provider2\n//\nmessage JwtAuthentication {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtAuthentication\";\n\n  // Map of provider names to JwtProviders.\n  //\n  // .. code-block:: yaml\n  //\n  //   providers:\n  //     provider1:\n  //        issuer: issuer1\n  //        audiences:\n  //        - audience1\n  //        - audience2\n  //        remote_jwks:\n  //          http_uri:\n  //            uri: https://example.com/.well-known/jwks.json\n  //            cluster: example_jwks_cluster\n  //      provider2:\n  //        issuer: provider2\n  //        local_jwks:\n  //          inline_string: jwks_string\n  //\n  map<string, JwtProvider> providers = 1;\n\n  // Specifies requirements based on the route matches. The first matched requirement will be\n  // applied. If there are overlapped match conditions, please put the most specific match first.\n  //\n  // Examples\n  //\n  // .. code-block:: yaml\n  //\n  //   rules:\n  //     - match:\n  //         prefix: /healthz\n  //     - match:\n  //         prefix: /baz\n  //       requires:\n  //         provider_name: provider1\n  //     - match:\n  //         prefix: /foo\n  //       requires:\n  //         requires_any:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //     - match:\n  //         prefix: /bar\n  //       requires:\n  //         requires_all:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //\n  repeated RequirementRule rules = 2;\n\n  // This message specifies Jwt requirements based on stream_info.filterState.\n  // Other HTTP filters can use it to specify Jwt requirements dynamically.\n  // The *rules* field above is checked first, if it could not find any matches,\n  // check this one.\n  FilterStateRule filter_state_rules = 3;\n\n  // When set to true, bypass the `CORS preflight request\n  // <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight>`_ regardless of JWT\n  // requirements specified in the rules.\n  bool bypass_cors_preflight = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/jwt_authn/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.jwt_authn.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: JWT Authentication]\n// JWT Authentication :ref:`configuration overview <config_http_filters_jwt_authn>`.\n// [#extension: envoy.filters.http.jwt_authn]\n\n// Please see following for JWT authentication flow:\n//\n// * `JSON Web Token (JWT) <https://tools.ietf.org/html/rfc7519>`_\n// * `The OAuth 2.0 Authorization Framework <https://tools.ietf.org/html/rfc6749>`_\n// * `OpenID Connect <http://openid.net/connect>`_\n//\n// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies:\n//\n// * issuer: the principal that issues the JWT. It has to match the one from the token.\n// * allowed audiences: the ones in the token have to be listed here.\n// * how to fetch public key JWKS to verify the token signature.\n// * how to extract JWT token in the request.\n// * how to pass successfully verified token payload.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     issuer: https://example.com\n//     audiences:\n//     - bookstore_android.apps.googleusercontent.com\n//     - bookstore_web.apps.googleusercontent.com\n//     remote_jwks:\n//       http_uri:\n//         uri: https://example.com/.well-known/jwks.json\n//         cluster: example_jwks_cluster\n//       cache_duration:\n//         seconds: 300\n//\n// [#next-free-field: 10]\nmessage JwtProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtProvider\";\n\n  // Specify the `principal <https://tools.ietf.org/html/rfc7519#section-4.1.1>`_ that issued\n  // the JWT, usually a URL or an email address.\n  //\n  // Example: https://securetoken.google.com\n  // Example: 1234567-compute@developer.gserviceaccount.com\n  //\n  string issuer = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The list of JWT `audiences <https://tools.ietf.org/html/rfc7519#section-4.1.3>`_ are\n  // allowed to access. A JWT containing any of these audiences will be accepted. If not specified,\n  // will not check audiences in the token.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //     audiences:\n  //     - bookstore_android.apps.googleusercontent.com\n  //     - bookstore_web.apps.googleusercontent.com\n  //\n  repeated string audiences = 2;\n\n  // `JSON Web Key Set (JWKS) <https://tools.ietf.org/html/rfc7517#appendix-A>`_ is needed to\n  // validate signature of a JWT. This field specifies where to fetch JWKS.\n  oneof jwks_source_specifier {\n    option (validate.required) = true;\n\n    // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP\n    // URI and how the fetched JWKS should be cached.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    remote_jwks:\n    //      http_uri:\n    //        uri: https://www.googleapis.com/oauth2/v1/certs\n    //        cluster: jwt.www.googleapis.com|443\n    //      cache_duration:\n    //        seconds: 300\n    //\n    RemoteJwks remote_jwks = 3;\n\n    // JWKS is in local data source. It could be either in a local file or embedded in the\n    // inline_string.\n    //\n    // Example: local file\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      filename: /etc/envoy/jwks/jwks1.txt\n    //\n    // Example: inline_string\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      inline_string: ACADADADADA\n    //\n    config.core.v4alpha.DataSource local_jwks = 4;\n  }\n\n  // If false, the JWT is removed in the request after a success verification. If true, the JWT is\n  // not removed in the request. Default value is false.\n  bool forward = 5;\n\n  // Two fields below define where to extract the JWT from an HTTP request.\n  //\n  // If no explicit location is specified, the following default locations are tried in order:\n  //\n  // 1. The Authorization header using the `Bearer schema\n  // <https://tools.ietf.org/html/rfc6750#section-2.1>`_. Example::\n  //\n  //    Authorization: Bearer <token>.\n  //\n  // 2. `access_token <https://tools.ietf.org/html/rfc6750#section-2.3>`_ query parameter.\n  //\n  // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations\n  // its provider specified or from the default locations.\n  //\n  // Specify the HTTP headers to extract JWT token. For examples, following config:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_headers:\n  //   - name: x-goog-iap-jwt-assertion\n  //\n  // can be used to extract token from header::\n  //\n  //   ``x-goog-iap-jwt-assertion: <JWT>``.\n  //\n  repeated JwtHeader from_headers = 6;\n\n  // JWT is sent in a query parameter. `jwt_params` represents the query parameter names.\n  //\n  // For example, if config is:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_params:\n  //   - jwt_token\n  //\n  // The JWT format in query parameter is::\n  //\n  //    /path?jwt_token=<JWT>\n  //\n  repeated string from_params = 7;\n\n  // This field specifies the header name to forward a successfully verified JWT payload to the\n  // backend. The forwarded data is::\n  //\n  //    base64url_encoded(jwt_payload_in_JSON)\n  //\n  // If it is not specified, the payload will not be forwarded.\n  string forward_payload_header = 8\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata\n  // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn**\n  // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields*\n  // and the value is the *protobuf::Struct* converted from JWT JSON payload.\n  //\n  // For example, if payload_in_metadata is *my_payload*:\n  //\n  // .. code-block:: yaml\n  //\n  //   envoy.filters.http.jwt_authn:\n  //     my_payload:\n  //       iss: https://example.com\n  //       sub: test@example.com\n  //       aud: https://example.com\n  //       exp: 1501281058\n  //\n  string payload_in_metadata = 9;\n}\n\n// This message specifies how to fetch JWKS from remote and how to cache it.\nmessage RemoteJwks {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks\";\n\n  // The HTTP URI to fetch the JWKS. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //    http_uri:\n  //      uri: https://www.googleapis.com/oauth2/v1/certs\n  //      cluster: jwt.www.googleapis.com|443\n  //\n  config.core.v4alpha.HttpUri http_uri = 1;\n\n  // Duration after which the cached JWKS should be expired. If not specified, default cache\n  // duration is 5 minutes.\n  google.protobuf.Duration cache_duration = 2;\n}\n\n// This message specifies a header location to extract JWT token.\nmessage JwtHeader {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtHeader\";\n\n  // The HTTP header name.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // The value prefix. The value format is \"value_prefix<token>\"\n  // For example, for \"Authorization: Bearer <token>\", value_prefix=\"Bearer \" with a space at the\n  // end.\n  string value_prefix = 2\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n}\n\n// Specify a required provider with audiences.\nmessage ProviderWithAudiences {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences\";\n\n  // Specify a required provider name.\n  string provider_name = 1;\n\n  // This field overrides the one specified in the JwtProvider.\n  repeated string audiences = 2;\n}\n\n// This message specifies a Jwt requirement. An empty message means JWT verification is not\n// required. Here are some config examples:\n//\n// .. code-block:: yaml\n//\n//  # Example 1: not required with an empty message\n//\n//  # Example 2: require A\n//  provider_name: provider-A\n//\n//  # Example 3: require A or B\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 4: require A and B\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 5: require A and (B or C)\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_any:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 6: require A or (B and C)\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_all:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 7: A is optional (if token from A is provided, it must be valid, but also allows\n//  missing token.)\n//  requires_any:\n//    requirements:\n//    - provider_name: provider-A\n//    - allow_missing: {}\n//\n//  # Example 8: A is optional and B is required.\n//  requires_all:\n//    requirements:\n//    - requires_any:\n//        requirements:\n//        - provider_name: provider-A\n//        - allow_missing: {}\n//    - provider_name: provider-B\n//\n// [#next-free-field: 7]\nmessage JwtRequirement {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement\";\n\n  oneof requires_type {\n    // Specify a required provider name.\n    string provider_name = 1;\n\n    // Specify a required provider with audiences.\n    ProviderWithAudiences provider_and_audiences = 2;\n\n    // Specify list of JwtRequirement. Their results are OR-ed.\n    // If any one of them passes, the result is passed.\n    JwtRequirementOrList requires_any = 3;\n\n    // Specify list of JwtRequirement. Their results are AND-ed.\n    // All of them must pass, if one of them fails or missing, it fails.\n    JwtRequirementAndList requires_all = 4;\n\n    // The requirement is always satisfied even if JWT is missing or the JWT\n    // verification fails. A typical usage is: this filter is used to only verify\n    // JWTs and pass the verified JWT payloads to another filter, the other filter\n    // will make decision. In this mode, all JWT tokens will be verified.\n    google.protobuf.Empty allow_missing_or_failed = 5;\n\n    // The requirement is satisfied if JWT is missing, but failed if JWT is\n    // presented but invalid. Similar to allow_missing_or_failed, this is used\n    // to only verify JWTs and pass the verified payload to another filter. The\n    // different is this mode will reject requests with invalid tokens.\n    google.protobuf.Empty allow_missing = 6;\n  }\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are OR-ed; if any one of them passes, the result is passed\nmessage JwtRequirementOrList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails.\nmessage JwtRequirementAndList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a Jwt requirement for a specific Route condition.\n// Example 1:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /healthz\n//\n// In above example, \"requires\" field is empty for /healthz prefix match,\n// it means that requests matching the path prefix don't require JWT authentication.\n//\n// Example 2:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /\n//      requires: { provider_name: provider-A }\n//\n// In above example, all requests matched the path prefix require jwt authentication\n// from \"provider-A\".\nmessage RequirementRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.RequirementRule\";\n\n  // The route matching parameter. Only when the match is satisfied, the \"requires\" field will\n  // apply.\n  //\n  // For example: following match will match all requests.\n  //\n  // .. code-block:: yaml\n  //\n  //    match:\n  //      prefix: /\n  //\n  config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Specify a Jwt Requirement. Please detail comment in message JwtRequirement.\n  JwtRequirement requires = 2;\n}\n\n// This message specifies Jwt requirements based on stream_info.filterState.\n// This FilterState should use `Router::StringAccessor` object to set a string value.\n// Other HTTP filters can use it to specify Jwt requirements dynamically.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//    name: jwt_selector\n//    requires:\n//      issuer_1:\n//        provider_name: issuer1\n//      issuer_2:\n//        provider_name: issuer2\n//\n// If a filter set \"jwt_selector\" with \"issuer_1\" to FilterState for a request,\n// jwt_authn filter will use JwtRequirement{\"provider_name\": \"issuer1\"} to verify.\nmessage FilterStateRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule\";\n\n  // The filter state name to retrieve the `Router::StringAccessor` object.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A map of string keys to requirements. The string key is the string value\n  // in the FilterState with the name specified in the *name* field above.\n  map<string, JwtRequirement> requires = 3;\n}\n\n// This is the Envoy HTTP filter config for JWT authentication.\n//\n// For example:\n//\n// .. code-block:: yaml\n//\n//   providers:\n//      provider1:\n//        issuer: issuer1\n//        audiences:\n//        - audience1\n//        - audience2\n//        remote_jwks:\n//          http_uri:\n//            uri: https://example.com/.well-known/jwks.json\n//            cluster: example_jwks_cluster\n//      provider2:\n//        issuer: issuer2\n//        local_jwks:\n//          inline_string: jwks_string\n//\n//   rules:\n//      # Not jwt verification is required for /health path\n//      - match:\n//          prefix: /health\n//\n//      # Jwt verification for provider1 is required for path prefixed with \"prefix\"\n//      - match:\n//          prefix: /prefix\n//        requires:\n//          provider_name: provider1\n//\n//      # Jwt verification for either provider1 or provider2 is required for all other requests.\n//      - match:\n//          prefix: /\n//        requires:\n//          requires_any:\n//            requirements:\n//              - provider_name: provider1\n//              - provider_name: provider2\n//\nmessage JwtAuthentication {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\";\n\n  // Map of provider names to JwtProviders.\n  //\n  // .. code-block:: yaml\n  //\n  //   providers:\n  //     provider1:\n  //        issuer: issuer1\n  //        audiences:\n  //        - audience1\n  //        - audience2\n  //        remote_jwks:\n  //          http_uri:\n  //            uri: https://example.com/.well-known/jwks.json\n  //            cluster: example_jwks_cluster\n  //      provider2:\n  //        issuer: provider2\n  //        local_jwks:\n  //          inline_string: jwks_string\n  //\n  map<string, JwtProvider> providers = 1;\n\n  // Specifies requirements based on the route matches. The first matched requirement will be\n  // applied. If there are overlapped match conditions, please put the most specific match first.\n  //\n  // Examples\n  //\n  // .. code-block:: yaml\n  //\n  //   rules:\n  //     - match:\n  //         prefix: /healthz\n  //     - match:\n  //         prefix: /baz\n  //       requires:\n  //         provider_name: provider1\n  //     - match:\n  //         prefix: /foo\n  //       requires:\n  //         requires_any:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //     - match:\n  //         prefix: /bar\n  //       requires:\n  //         requires_all:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //\n  repeated RequirementRule rules = 2;\n\n  // This message specifies Jwt requirements based on stream_info.filterState.\n  // Other HTTP filters can use it to specify Jwt requirements dynamically.\n  // The *rules* field above is checked first, if it could not find any matches,\n  // check this one.\n  FilterStateRule filter_state_rules = 3;\n\n  // When set to true, bypass the `CORS preflight request\n  // <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight>`_ regardless of JWT\n  // requirements specified in the rules.\n  bool bypass_cors_preflight = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.local_ratelimit.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/http_status.proto\";\nimport \"envoy/type/v3/token_bucket.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3\";\noption java_outer_classname = \"LocalRateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Local Rate limit]\n// Local Rate limit :ref:`configuration overview <config_http_filters_local_rate_limit>`.\n// [#extension: envoy.filters.http.local_ratelimit]\n\n// [#next-free-field: 7]\nmessage LocalRateLimit {\n  // The human readable prefix to use when emitting stats.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // This field allows for a custom HTTP response status code to the downstream client when\n  // the request has been rate limited.\n  // Defaults to 429 (TooManyRequests).\n  //\n  // .. note::\n  //   If this is set to < 400, 429 will be used instead.\n  type.v3.HttpStatus status = 2;\n\n  // The token bucket configuration to use for rate limiting requests that are processed by this\n  // filter. Each request processed by the filter consumes a single token. If the token is available,\n  // the request will be allowed. If no tokens are available, the request will receive the configured\n  // rate limit status.\n  //\n  // .. note::\n  //   It's fine for the token bucket to be unset for the global configuration since the rate limit\n  //   can be applied at a the virtual host or route level. Thus, the token bucket must be set\n  //   for the per route configuration otherwise the config will be rejected.\n  //\n  // .. note::\n  //   When using per route configuration, the bucket becomes unique to that route.\n  //\n  // .. note::\n  //   In the current implementation the token bucket's :ref:`fill_interval\n  //   <envoy_api_field_type.v3.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive\n  //   refills.\n  type.v3.TokenBucket token_bucket = 3;\n\n  // If set, this will enable -- but not necessarily enforce -- the rate limit for the given\n  // fraction of requests.\n  // Defaults to 0% of requests for safety.\n  config.core.v3.RuntimeFractionalPercent filter_enabled = 4;\n\n  // If set, this will enforce the rate limit decisions for the given fraction of requests.\n  //\n  // Note: this only applies to the fraction of enabled requests.\n  //\n  // Defaults to 0% of requests for safety.\n  config.core.v3.RuntimeFractionalPercent filter_enforced = 5;\n\n  // Specifies a list of HTTP headers that should be added to each response for requests that\n  // have been rate limited.\n  repeated config.core.v3.HeaderValueOption response_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 10}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/lua/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/lua/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/lua/v3/lua.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.lua.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.lua.v3\";\noption java_outer_classname = \"LuaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Lua]\n// Lua :ref:`configuration overview <config_http_filters_lua>`.\n// [#extension: envoy.filters.http.lua]\n\nmessage Lua {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.lua.v2.Lua\";\n\n  // The Lua code that Envoy will execute. This can be a very small script that\n  // further loads code from disk if desired. Note that if JSON configuration is used, the code must\n  // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line\n  // strings so complex scripts can be easily expressed inline in the configuration.\n  string inline_code = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute\n  // <envoy_v3_api_msg_extensions.filters.http.lua.v3.LuaPerRoute>`. The Lua source codes can be\n  // loaded from inline string or local files.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //   source_codes:\n  //     hello.lua:\n  //       inline_string: |\n  //         function envoy_on_response(response_handle)\n  //           -- Do something.\n  //         end\n  //     world.lua:\n  //       filename: /etc/lua/world.lua\n  //\n  map<string, config.core.v3.DataSource> source_codes = 2;\n}\n\nmessage LuaPerRoute {\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the Lua filter for this particular vhost or route. If disabled is specified in\n    // multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // A name of a Lua source code stored in\n    // :ref:`Lua.source_codes <envoy_v3_api_field_extensions.filters.http.lua.v3.Lua.source_codes>`.\n    string name = 2 [(validate.rules).string = {min_len: 1}];\n\n    // A configured per-route Lua source code that can be served by RDS or provided inline.\n    config.core.v3.DataSource source_code = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.oauth2.v3alpha;\n\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\nimport \"envoy/type/matcher/v3/path.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha\";\noption java_outer_classname = \"OauthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: OAuth]\n// OAuth :ref:`configuration overview <config_http_filters_oauth>`.\n// [#extension: envoy.filters.http.oauth2]\n//\n\nmessage OAuth2Credentials {\n  // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server.\n  string client_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server.\n  transport_sockets.tls.v3.SdsSecretConfig token_secret = 2\n      [(validate.rules).message = {required: true}];\n\n  // Configures how the secret token should be created.\n  oneof token_formation {\n    option (validate.required) = true;\n\n    // If present, the secret token will be a HMAC using the provided secret.\n    transport_sockets.tls.v3.SdsSecretConfig hmac_secret = 3\n        [(validate.rules).message = {required: true}];\n  }\n}\n\n// OAuth config\n//\n// [#next-free-field: 9]\nmessage OAuth2Config {\n  // Endpoint on the authorization server to retrieve the access token from.\n  config.core.v3.HttpUri token_endpoint = 1;\n\n  // The endpoint redirect to for authorization in response to unauthorized requests.\n  string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Credentials used for OAuth.\n  OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}];\n\n  // The redirect URI passed to the authorization endpoint. Supports header formatting\n  // tokens. For more information, including details on header value syntax, see the\n  // documentation on :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  //\n  // This URI should not contain any query parameters.\n  string redirect_uri = 4 [(validate.rules).string = {min_len: 1}];\n\n  // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server.\n  type.matcher.v3.PathMatcher redirect_path_matcher = 5\n      [(validate.rules).message = {required: true}];\n\n  // The path to sign a user out, clearing their credential cookies.\n  type.matcher.v3.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}];\n\n  // Forward the OAuth token as a Bearer to upstream web service.\n  bool forward_bearer_token = 7;\n\n  // Any request that matches any of the provided matchers will be passed through without OAuth validation.\n  repeated config.route.v3.HeaderMatcher pass_through_matcher = 8;\n}\n\n// Filter config.\nmessage OAuth2 {\n  // Leave this empty to disable OAuth2 for a specific route, using per filter config.\n  OAuth2Config config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/oauth2/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/oauth2/v3alpha:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v4alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.oauth2.v4alpha;\n\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/secret.proto\";\nimport \"envoy/type/matcher/v4alpha/path.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.oauth2.v4alpha\";\noption java_outer_classname = \"OauthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: OAuth]\n// OAuth :ref:`configuration overview <config_http_filters_oauth>`.\n// [#extension: envoy.filters.http.oauth2]\n//\n\nmessage OAuth2Credentials {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials\";\n\n  // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server.\n  string client_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server.\n  transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2\n      [(validate.rules).message = {required: true}];\n\n  // Configures how the secret token should be created.\n  oneof token_formation {\n    option (validate.required) = true;\n\n    // If present, the secret token will be a HMAC using the provided secret.\n    transport_sockets.tls.v4alpha.SdsSecretConfig hmac_secret = 3\n        [(validate.rules).message = {required: true}];\n  }\n}\n\n// OAuth config\n//\n// [#next-free-field: 9]\nmessage OAuth2Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Config\";\n\n  // Endpoint on the authorization server to retrieve the access token from.\n  config.core.v4alpha.HttpUri token_endpoint = 1;\n\n  // The endpoint redirect to for authorization in response to unauthorized requests.\n  string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Credentials used for OAuth.\n  OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}];\n\n  // The redirect URI passed to the authorization endpoint. Supports header formatting\n  // tokens. For more information, including details on header value syntax, see the\n  // documentation on :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  //\n  // This URI should not contain any query parameters.\n  string redirect_uri = 4 [(validate.rules).string = {min_len: 1}];\n\n  // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server.\n  type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5\n      [(validate.rules).message = {required: true}];\n\n  // The path to sign a user out, clearing their credential cookies.\n  type.matcher.v4alpha.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}];\n\n  // Forward the OAuth token as a Bearer to upstream web service.\n  bool forward_bearer_token = 7;\n\n  // Any request that matches any of the provided matchers will be passed through without OAuth validation.\n  repeated config.route.v4alpha.HeaderMatcher pass_through_matcher = 8;\n}\n\n// Filter config.\nmessage OAuth2 {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.oauth2.v3alpha.OAuth2\";\n\n  // Leave this empty to disable OAuth2 for a specific route, using per filter config.\n  OAuth2Config config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/on_demand/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/on_demand/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/on_demand/v3/on_demand.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.on_demand.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.on_demand.v3\";\noption java_outer_classname = \"OnDemandProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: OnDemand]\n// IP tagging :ref:`configuration overview <config_http_filters_on_demand>`.\n// [#extension: envoy.filters.http.on_demand]\n\nmessage OnDemand {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.on_demand.v2.OnDemand\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/original_src/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/original_src/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/original_src/v3/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.original_src.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.original_src.v3\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the request. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\n// [#extension: envoy.filters.http.original_src]\nmessage OriginalSrc {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.original_src.v2alpha1.OriginalSrc\";\n\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/rate_limit/v2:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ratelimit.v3;\n\nimport \"envoy/config/ratelimit/v3/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ratelimit.v3\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_http_filters_rate_limit>`.\n// [#extension: envoy.filters.http.ratelimit]\n\n// [#next-free-field: 9]\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.rate_limit.v2.RateLimit\";\n\n  // Defines the version of the standard to use for X-RateLimit headers.\n  enum XRateLimitHeadersRFCVersion {\n    // X-RateLimit headers disabled.\n    OFF = 0;\n\n    // Use `draft RFC Version 03 <https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html>`_.\n    DRAFT_VERSION_03 = 1;\n  }\n\n  // The rate limit domain to use when calling the rate limit service.\n  string domain = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specifies the rate limit configurations to be applied with the same\n  // stage number. If not set, the default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The type of requests the filter should apply to. The supported\n  // types are *internal*, *external* or *both*. A request is considered internal if\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is set to true. If\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is not set or false, a\n  // request is considered external. The filter defaults to *both*, and it will apply to all request\n  // types.\n  string request_type = 3\n      [(validate.rules).string = {in: \"internal\" in: \"external\" in: \"both\" in: \"\"}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead\n  // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The\n  // HTTP code will be 200 for a gRPC response.\n  bool rate_limited_as_resource_exhausted = 6;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7\n      [(validate.rules).message = {required: true}];\n\n  // Defines the standard version to use for X-RateLimit headers emitted by the filter:\n  //\n  // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the\n  //   client in the current time-window followed by the description of the\n  //   quota policy. The values are returned by the rate limiting service in\n  //   :ref:`current_limit<envoy_v3_api_field_service.ratelimit.v3.RateLimitResponse.DescriptorStatus.current_limit>`\n  //   field. Example: `10, 10;w=1;name=\"per-ip\", 1000;w=3600`.\n  // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the\n  //   current time-window. The values are returned by the rate limiting service\n  //   in :ref:`limit_remaining<envoy_v3_api_field_service.ratelimit.v3.RateLimitResponse.DescriptorStatus.limit_remaining>`\n  //   field.\n  // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of\n  //   the current time-window. The values are returned by the rate limiting service\n  //   in :ref:`duration_until_reset<envoy_v3_api_field_service.ratelimit.v3.RateLimitResponse.DescriptorStatus.duration_until_reset>`\n  //   field.\n  //\n  // In case rate limiting policy specifies more then one time window, the values\n  // above represent the window that is closest to reaching its limit.\n  //\n  // For more information about the headers specification see selected version of\n  // the `draft RFC <https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html>`_.\n  //\n  // Disabled by default.\n  XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8\n      [(validate.rules).enum = {defined_only: true}];\n}\n\nmessage RateLimitPerRoute {\n  enum VhRateLimitsOptions {\n    // Use the virtual host rate limits unless the route has a rate limit policy.\n    OVERRIDE = 0;\n\n    // Use the virtual host rate limits even if the route has a rate limit policy.\n    INCLUDE = 1;\n\n    // Ignore the virtual host rate limits even if the route does not have a rate limit policy.\n    IGNORE = 2;\n  }\n\n  // Specifies if the rate limit filter should include the virtual host rate limits.\n  VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/rbac/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/rbac/v2:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/rbac/v3/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.rbac.v3;\n\nimport \"envoy/config/rbac/v3/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.rbac.v3\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_http_filters_rbac>`.\n// [#extension: envoy.filters.http.rbac]\n\n// RBAC filter config.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.rbac.v2.RBAC\";\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v3.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter (i.e., returning a 403)\n  // but will emit stats and logs and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v3.RBAC shadow_rules = 2;\n}\n\nmessage RBACPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.rbac.v2.RBACPerRoute\";\n\n  reserved 1;\n\n  // Override the global configuration of the filter with this new config.\n  // If absent, the global RBAC policy will be disabled for this route.\n  RBAC rbac = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/rbac/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.rbac.v4alpha;\n\nimport \"envoy/config/rbac/v4alpha/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.rbac.v4alpha\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_http_filters_rbac>`.\n// [#extension: envoy.filters.http.rbac]\n\n// RBAC filter config.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.rbac.v3.RBAC\";\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter (i.e., returning a 403)\n  // but will emit stats and logs and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC shadow_rules = 2;\n}\n\nmessage RBACPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.rbac.v3.RBACPerRoute\";\n\n  reserved 1;\n\n  // Override the global configuration of the filter with this new config.\n  // If absent, the global RBAC policy will be disabled for this route.\n  RBAC rbac = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/router/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/filter/http/router/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/router/v3/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.router.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.router.v3\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Router]\n// Router :ref:`configuration overview <config_http_filters_router>`.\n// [#extension: envoy.filters.http.router]\n\n// [#next-free-field: 7]\nmessage Router {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.router.v2.Router\";\n\n  // Whether the router generates dynamic cluster statistics. Defaults to\n  // true. Can be disabled in high performance scenarios.\n  google.protobuf.BoolValue dynamic_stats = 1;\n\n  // Whether to start a child span for egress routed calls. This can be\n  // useful in scenarios where other filters (auth, ratelimit, etc.) make\n  // outbound calls and have child spans rooted at the same ingress\n  // parent. Defaults to false.\n  bool start_child_span = 2;\n\n  // Configuration for HTTP upstream logs emitted by the router. Upstream logs\n  // are configured in the same way as access logs, but each log entry represents\n  // an upstream request. Presuming retries are configured, multiple upstream\n  // requests may be made for each downstream (inbound) request.\n  repeated config.accesslog.v3.AccessLog upstream_log = 3;\n\n  // Do not add any additional *x-envoy-* headers to requests or responses. This\n  // only affects the :ref:`router filter generated *x-envoy-* headers\n  // <config_http_filters_router_headers_set>`, other Envoy filters and the HTTP\n  // connection manager may continue to set *x-envoy-* headers.\n  bool suppress_envoy_headers = 4;\n\n  // Specifies a list of HTTP headers to strictly validate. Envoy will reject a\n  // request and respond with HTTP status 400 if the request contains an invalid\n  // value for any of the headers listed in this field. Strict header checking\n  // is only supported for the following headers:\n  //\n  // Value must be a ','-delimited list (i.e. no spaces) of supported retry\n  // policy values:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`\n  // * :ref:`config_http_filters_router_x-envoy-retry-on`\n  //\n  // Value must be an integer:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-max-retries`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`\n  repeated string strict_check_headers = 5 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"x-envoy-upstream-rq-timeout-ms\"\n        in: \"x-envoy-upstream-rq-per-try-timeout-ms\"\n        in: \"x-envoy-max-retries\"\n        in: \"x-envoy-retry-grpc-on\"\n        in: \"x-envoy-retry-on\"\n      }\n    }\n  }];\n\n  // If not set, ingress Envoy will ignore\n  // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress\n  // Envoy, when deriving timeout for upstream cluster.\n  bool respect_expected_rq_timeout = 6;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/router/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/router/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/router/v4alpha/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.router.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.router.v4alpha\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Router]\n// Router :ref:`configuration overview <config_http_filters_router>`.\n// [#extension: envoy.filters.http.router]\n\n// [#next-free-field: 7]\nmessage Router {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.router.v3.Router\";\n\n  // Whether the router generates dynamic cluster statistics. Defaults to\n  // true. Can be disabled in high performance scenarios.\n  google.protobuf.BoolValue dynamic_stats = 1;\n\n  // Whether to start a child span for egress routed calls. This can be\n  // useful in scenarios where other filters (auth, ratelimit, etc.) make\n  // outbound calls and have child spans rooted at the same ingress\n  // parent. Defaults to false.\n  bool start_child_span = 2;\n\n  // Configuration for HTTP upstream logs emitted by the router. Upstream logs\n  // are configured in the same way as access logs, but each log entry represents\n  // an upstream request. Presuming retries are configured, multiple upstream\n  // requests may be made for each downstream (inbound) request.\n  repeated config.accesslog.v4alpha.AccessLog upstream_log = 3;\n\n  // Do not add any additional *x-envoy-* headers to requests or responses. This\n  // only affects the :ref:`router filter generated *x-envoy-* headers\n  // <config_http_filters_router_headers_set>`, other Envoy filters and the HTTP\n  // connection manager may continue to set *x-envoy-* headers.\n  bool suppress_envoy_headers = 4;\n\n  // Specifies a list of HTTP headers to strictly validate. Envoy will reject a\n  // request and respond with HTTP status 400 if the request contains an invalid\n  // value for any of the headers listed in this field. Strict header checking\n  // is only supported for the following headers:\n  //\n  // Value must be a ','-delimited list (i.e. no spaces) of supported retry\n  // policy values:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`\n  // * :ref:`config_http_filters_router_x-envoy-retry-on`\n  //\n  // Value must be an integer:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-max-retries`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`\n  repeated string strict_check_headers = 5 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"x-envoy-upstream-rq-timeout-ms\"\n        in: \"x-envoy-upstream-rq-per-try-timeout-ms\"\n        in: \"x-envoy-max-retries\"\n        in: \"x-envoy-retry-grpc-on\"\n        in: \"x-envoy-retry-on\"\n      }\n    }\n  }];\n\n  // If not set, ingress Envoy will ignore\n  // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress\n  // Envoy, when deriving timeout for upstream cluster.\n  bool respect_expected_rq_timeout = 6;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/squash/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/squash/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/squash/v3/squash.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.squash.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.squash.v3\";\noption java_outer_classname = \"SquashProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Squash]\n// Squash :ref:`configuration overview <config_http_filters_squash>`.\n// [#extension: envoy.filters.http.squash]\n\n// [#next-free-field: 6]\nmessage Squash {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.squash.v2.Squash\";\n\n  // The name of the cluster that hosts the Squash server.\n  string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // When the filter requests the Squash server to create a DebugAttachment, it will use this\n  // structure as template for the body of the request. It can contain reference to environment\n  // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server\n  // with more information to find the process to attach the debugger to. For example, in a\n  // Istio/k8s environment, this will contain information on the pod:\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"spec\": {\n  //      \"attachment\": {\n  //        \"pod\": \"{{ POD_NAME }}\",\n  //        \"namespace\": \"{{ POD_NAMESPACE }}\"\n  //      },\n  //      \"match_request\": true\n  //    }\n  //  }\n  //\n  // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API)\n  google.protobuf.Struct attachment_template = 2;\n\n  // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second.\n  google.protobuf.Duration request_timeout = 3;\n\n  // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60\n  // seconds.\n  google.protobuf.Duration attachment_timeout = 4;\n\n  // Amount of time to poll for the status of the attachment object in the Squash server\n  // (to check if has been attached). Defaults to 1 second.\n  google.protobuf.Duration attachment_poll_period = 5;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/tap/v2alpha:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/tap/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.tap.v3;\n\nimport \"envoy/extensions/common/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.tap.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap]\n// Tap :ref:`configuration overview <config_http_filters_tap>`.\n// [#extension: envoy.filters.http.tap]\n\n// Top level configuration for the tap filter.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.tap.v2alpha.Tap\";\n\n  // Common configuration for the HTTP tap filter.\n  common.tap.v3.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/common/tap/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/tap/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.tap.v4alpha;\n\nimport \"envoy/extensions/common/tap/v4alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.tap.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap]\n// Tap :ref:`configuration overview <config_http_filters_tap>`.\n// [#extension: envoy.filters.http.tap]\n\n// Top level configuration for the tap filter.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.tap.v3.Tap\";\n\n  // Common configuration for the HTTP tap filter.\n  common.tap.v4alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/http/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// [#extension: envoy.filters.http.wasm]\n// Wasm :ref:`configuration overview <config_http_filters_wasm>`.\n\nmessage Wasm {\n  // General Plugin configuration.\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/http_inspector/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/http_inspector/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.http_inspector.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.http_inspector.v3\";\noption java_outer_classname = \"HttpInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP Inspector Filter]\n// Detect whether the application protocol is HTTP.\n// [#extension: envoy.filters.listener.http_inspector]\n\nmessage HttpInspector {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.http_inspector.v2.HttpInspector\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/original_dst/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/original_dst/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.original_dst.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.original_dst.v3\";\noption java_outer_classname = \"OriginalDstProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Original Dst Filter]\n// Use the Original destination address on downstream connections.\n// [#extension: envoy.filters.listener.original_dst]\n\nmessage OriginalDst {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.original_dst.v2.OriginalDst\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/original_src/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/original_src/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/original_src/v3/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.original_src.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.original_src.v3\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n// [#extension: envoy.filters.listener.original_src]\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the connection. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\nmessage OriginalSrc {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.original_src.v2alpha1.OriginalSrc\";\n\n  // Whether to bind the port to the one used in the original downstream connection.\n  // [#not-implemented-hide:]\n  bool bind_port = 1;\n\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/proxy_protocol/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.proxy_protocol.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Proxy Protocol Filter]\n// PROXY protocol listener filter.\n// [#extension: envoy.filters.listener.proxy_protocol]\n\nmessage ProxyProtocol {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol\";\n\n  message KeyValuePair {\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  message Rule {\n    // The type that triggers the rule - required\n    // TLV type is defined as uint8_t in proxy protocol. See `the spec\n    // <https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt>`_ for details.\n    uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}];\n\n    // If the TLV type is present, apply this metadata KeyValuePair.\n    KeyValuePair on_tlv_present = 2;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule rules = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/tls_inspector/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/tls_inspector/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.tls_inspector.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.tls_inspector.v3\";\noption java_outer_classname = \"TlsInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: TLS Inspector Filter]\n// Allows detecting whether the transport appears to be TLS or plaintext.\n// [#extension: envoy.filters.listener.tls_inspector]\n\nmessage TlsInspector {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.tls_inspector.v2.TlsInspector\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/client_ssl_auth/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.client_ssl_auth.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.client_ssl_auth.v3\";\noption java_outer_classname = \"ClientSslAuthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Client TLS authentication]\n// Client TLS authentication\n// :ref:`configuration overview <config_network_filters_client_ssl_auth>`.\n// [#extension: envoy.filters.network.client_ssl_auth]\n\nmessage ClientSSLAuth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.client_ssl_auth.v2.ClientSSLAuth\";\n\n  // The :ref:`cluster manager <arch_overview_cluster_manager>` cluster that runs\n  // the authentication service. The filter will connect to the service every 60s to fetch the list\n  // of principals. The service must support the expected :ref:`REST API\n  // <config_network_filters_client_ssl_auth_rest_api>`.\n  string auth_api_cluster = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_client_ssl_auth_stats>`.\n  string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Time in milliseconds between principal refreshes from the\n  // authentication service. Default is 60000 (60s). The actual fetch time\n  // will be this value plus a random jittered value between\n  // 0-refresh_delay_ms milliseconds.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // An optional list of IP address and subnet masks that should be white\n  // listed for access by the filter. If no list is provided, there is no\n  // IP allowlist.\n  repeated config.core.v3.CidrRange ip_white_list = 4\n      [(udpa.annotations.field_migrate).rename = \"ip_allowlist\"];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/direct_response/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/direct_response/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/direct_response/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.direct_response.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.direct_response.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Direct response]\n// Direct response :ref:`configuration overview <config_network_filters_direct_response>`.\n// [#extension: envoy.filters.network.direct_response]\n\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.direct_response.v2.Config\";\n\n  // Response data as a data source.\n  config.core.v3.DataSource response = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/dubbo/router/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.router.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.router.v3\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Router]\n// Dubbo router :ref:`configuration overview <config_dubbo_filters_router>`.\n\nmessage Router {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.dubbo.router.v2alpha1.Router\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/v3/README.md",
    "content": "Protocol buffer definitions for the Dubbo proxy.\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v3;\n\nimport \"envoy/extensions/filters/network/dubbo_proxy/v3/route.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3\";\noption java_outer_classname = \"DubboProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dubbo Proxy]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n// [#extension: envoy.filters.network.dubbo_proxy]\n\n// Dubbo Protocol types supported by Envoy.\nenum ProtocolType {\n  // the default protocol.\n  Dubbo = 0;\n}\n\n// Dubbo Serialization types supported by Envoy.\nenum SerializationType {\n  // the default serialization protocol.\n  Hessian2 = 0;\n}\n\n// [#next-free-field: 6]\nmessage DubboProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy\";\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Configure the protocol used.\n  ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Configure the serialization protocol used.\n  SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  repeated RouteConfiguration route_config = 4;\n\n  // A list of individual Dubbo filters that make up the filter chain for requests made to the\n  // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no dubbo_filters are specified, a default Dubbo router filter\n  // (`envoy.filters.dubbo.router`) is used.\n  repeated DubboFilter dubbo_filters = 5;\n}\n\n// DubboFilter configures a Dubbo filter.\nmessage DubboFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any config = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dubbo Proxy Route Configuration]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n\n// [#next-free-field: 6]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The interface name of the service.\n  string interface = 2;\n\n  // Which group does the interface belong to.\n  string group = 3;\n\n  // The version number of the interface.\n  string version = 4;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 5;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteMatch\";\n\n  // Method level routing matching.\n  MethodMatch method = 1;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v3.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed.\n    string cluster = 1;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    // Currently ClusterWeight only supports the name and weight fields.\n    config.route.v3.WeightedCluster weighted_clusters = 2;\n  }\n}\n\nmessage MethodMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch\";\n\n  // The parameter matching type.\n  message ParameterMatchSpecifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch.ParameterMatchSpecifier\";\n\n    oneof parameter_match_specifier {\n      // If specified, header match will be performed based on the value of the header.\n      string exact_match = 3;\n\n      // If specified, header match will be performed based on range.\n      // The rule will match if the request header value is within this range.\n      // The entire request header value must represent an integer in base 10 notation: consisting\n      // of an optional plus or minus sign followed by a sequence of digits. The rule will not match\n      // if the header value does not represent an integer. Match will fail for empty values,\n      // floating point numbers or if only a subsequence of the header value is an integer.\n      //\n      // Examples:\n      //\n      // * For range [-10,0), route will match for header value -1, but not for 0,\n      //   \"somestring\", 10.9, \"-1somestring\"\n      type.v3.Int64Range range_match = 4;\n    }\n  }\n\n  // The name of the method.\n  type.matcher.v3.StringMatcher name = 1;\n\n  // Method parameter definition.\n  // The key is the parameter index, starting from 0.\n  // The value is the parameter matching type.\n  map<uint32, ParameterMatchSpecifier> params_match = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v4alpha;\n\nimport \"envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha\";\noption java_outer_classname = \"DubboProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Dubbo Proxy]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n// [#extension: envoy.filters.network.dubbo_proxy]\n\n// Dubbo Protocol types supported by Envoy.\nenum ProtocolType {\n  // the default protocol.\n  Dubbo = 0;\n}\n\n// Dubbo Serialization types supported by Envoy.\nenum SerializationType {\n  // the default serialization protocol.\n  Hessian2 = 0;\n}\n\n// [#next-free-field: 6]\nmessage DubboProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\";\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Configure the protocol used.\n  ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Configure the serialization protocol used.\n  SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  repeated RouteConfiguration route_config = 4;\n\n  // A list of individual Dubbo filters that make up the filter chain for requests made to the\n  // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no dubbo_filters are specified, a default Dubbo router filter\n  // (`envoy.filters.dubbo.router`) is used.\n  repeated DubboFilter dubbo_filters = 5;\n}\n\n// DubboFilter configures a Dubbo filter.\nmessage DubboFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any config = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Dubbo Proxy Route Configuration]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n\n// [#next-free-field: 6]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The interface name of the service.\n  string interface = 2;\n\n  // Which group does the interface belong to.\n  string group = 3;\n\n  // The version number of the interface.\n  string version = 4;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 5;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch\";\n\n  // Method level routing matching.\n  MethodMatch method = 1;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v4alpha.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed.\n    string cluster = 1;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    // Currently ClusterWeight only supports the name and weight fields.\n    config.route.v4alpha.WeightedCluster weighted_clusters = 2;\n  }\n}\n\nmessage MethodMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch\";\n\n  // The parameter matching type.\n  message ParameterMatchSpecifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier\";\n\n    oneof parameter_match_specifier {\n      // If specified, header match will be performed based on the value of the header.\n      string exact_match = 3;\n\n      // If specified, header match will be performed based on range.\n      // The rule will match if the request header value is within this range.\n      // The entire request header value must represent an integer in base 10 notation: consisting\n      // of an optional plus or minus sign followed by a sequence of digits. The rule will not match\n      // if the header value does not represent an integer. Match will fail for empty values,\n      // floating point numbers or if only a subsequence of the header value is an integer.\n      //\n      // Examples:\n      //\n      // * For range [-10,0), route will match for header value -1, but not for 0,\n      //   \"somestring\", 10.9, \"-1somestring\"\n      type.v3.Int64Range range_match = 4;\n    }\n  }\n\n  // The name of the method.\n  type.matcher.v4alpha.StringMatcher name = 1;\n\n  // Method parameter definition.\n  // The key is the parameter index, starting from 0.\n  // The value is the parameter matching type.\n  map<uint32, ParameterMatchSpecifier> params_match = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/echo/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/echo/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/echo/v3/echo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.echo.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.echo.v3\";\noption java_outer_classname = \"EchoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Echo]\n// Echo :ref:`configuration overview <config_network_filters_echo>`.\n// [#extension: envoy.filters.network.echo]\n\nmessage Echo {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.echo.v2.Echo\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/ext_authz/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/ext_authz/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.ext_authz.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.ext_authz.v3\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Network External Authorization ]\n// The network layer external authorization service configuration\n// :ref:`configuration overview <config_network_filters_ext_authz>`.\n// [#extension: envoy.filters.network.ext_authz]\n\n// External Authorization filter calls out to an external service over the\n// gRPC Authorization API defined by\n// :ref:`CheckRequest <envoy_api_msg_service.auth.v3.CheckRequest>`.\n// A failed check will cause this filter to close the TCP connection.\n// [#next-free-field: 7]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.ext_authz.v2.ExtAuthz\";\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The external authorization gRPC service configuration.\n  // The default timeout is set to 200ms by this filter.\n  config.core.v3.GrpcService grpc_service = 2;\n\n  // The filter's behaviour in case the external authorization service does\n  // not respond back. When it is set to true, Envoy will also allow traffic in case of\n  // communication failure between authorization service and the proxy.\n  // Defaults to false.\n  bool failure_mode_allow = 3;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v3.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 4;\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of Check{Request,Response} used on the wire.\n  config.core.v3.ApiVersion transport_api_version = 5\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/ext_authz/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.ext_authz.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Network External Authorization ]\n// The network layer external authorization service configuration\n// :ref:`configuration overview <config_network_filters_ext_authz>`.\n// [#extension: envoy.filters.network.ext_authz]\n\n// External Authorization filter calls out to an external service over the\n// gRPC Authorization API defined by\n// :ref:`CheckRequest <envoy_api_msg_service.auth.v4alpha.CheckRequest>`.\n// A failed check will cause this filter to close the TCP connection.\n// [#next-free-field: 7]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.ext_authz.v3.ExtAuthz\";\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The external authorization gRPC service configuration.\n  // The default timeout is set to 200ms by this filter.\n  config.core.v4alpha.GrpcService grpc_service = 2;\n\n  // The filter's behaviour in case the external authorization service does\n  // not respond back. When it is set to true, Envoy will also allow traffic in case of\n  // communication failure between authorization service and the proxy.\n  // Defaults to false.\n  bool failure_mode_allow = 3;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v4alpha.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 4;\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of Check{Request,Response} used on the wire.\n  config.core.v4alpha.ApiVersion transport_api_version = 5\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/http_connection_manager/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.http_connection_manager.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/protocol.proto\";\nimport \"envoy/config/core/v3/substitution_format_string.proto\";\nimport \"envoy/config/route/v3/route.proto\";\nimport \"envoy/config/route/v3/scoped_route.proto\";\nimport \"envoy/config/trace/v3/http_tracer.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3\";\noption java_outer_classname = \"HttpConnectionManagerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP connection manager]\n// HTTP connection manager :ref:`configuration overview <config_http_conn_man>`.\n// [#extension: envoy.filters.network.http_connection_manager]\n\n// [#next-free-field: 41]\nmessage HttpConnectionManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\";\n\n  enum CodecType {\n    // For every new connection, the connection manager will determine which\n    // codec to use. This mode supports both ALPN for TLS listeners as well as\n    // protocol inference for plaintext listeners. If ALPN data is available, it\n    // is preferred, otherwise protocol inference is used. In almost all cases,\n    // this is the right option to choose for this setting.\n    AUTO = 0;\n\n    // The connection manager will assume that the client is speaking HTTP/1.1.\n    HTTP1 = 1;\n\n    // The connection manager will assume that the client is speaking HTTP/2\n    // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN.\n    // Prior knowledge is allowed).\n    HTTP2 = 2;\n\n    // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n    // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n    // to distinguish HTTP1 and HTTP2 traffic.\n    HTTP3 = 3;\n  }\n\n  enum ServerHeaderTransformation {\n    // Overwrite any Server header with the contents of server_name.\n    OVERWRITE = 0;\n\n    // If no Server header is present, append Server server_name\n    // If a Server header is present, pass it through.\n    APPEND_IF_ABSENT = 1;\n\n    // Pass through the value of the server header, and do not append a header\n    // if none is present.\n    PASS_THROUGH = 2;\n  }\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  enum ForwardClientCertDetails {\n    // Do not send the XFCC header to the next hop. This is the default value.\n    SANITIZE = 0;\n\n    // When the client connection is mTLS (Mutual TLS), forward the XFCC header\n    // in the request.\n    FORWARD_ONLY = 1;\n\n    // When the client connection is mTLS, append the client certificate\n    // information to the request’s XFCC header and forward it.\n    APPEND_FORWARD = 2;\n\n    // When the client connection is mTLS, reset the XFCC header with the client\n    // certificate information and send it to the next hop.\n    SANITIZE_SET = 3;\n\n    // Always forward the XFCC header in the request, regardless of whether the\n    // client connection is mTLS.\n    ALWAYS_FORWARD_ONLY = 4;\n  }\n\n  // [#next-free-field: 10]\n  message Tracing {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing\";\n\n    enum OperationName {\n      // The HTTP listener is used for ingress/incoming requests.\n      INGRESS = 0;\n\n      // The HTTP listener is used for egress/outgoing requests.\n      EGRESS = 1;\n    }\n\n    reserved 1, 2;\n\n    reserved \"operation_name\", \"request_headers_for_tags\";\n\n    // Target percentage of requests managed by this HTTP connection manager that will be force\n    // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n    // header is set. This field is a direct analog for the runtime variable\n    // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n    // <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent client_sampling = 3;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be randomly\n    // selected for trace generation, if not requested by the client or not forced. This field is\n    // a direct analog for the runtime variable 'tracing.random_sampling' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent random_sampling = 4;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be traced\n    // after all other sampling checks have been applied (client-directed, force tracing, random\n    // sampling). This field functions as an upper limit on the total configured sampling rate. For\n    // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n    // of client requests with the appropriate headers to be force traced. This field is a direct\n    // analog for the runtime variable 'tracing.global_enabled' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent overall_sampling = 5;\n\n    // Whether to annotate spans with additional data. If true, spans will include logs for stream\n    // events.\n    bool verbose = 6;\n\n    // Maximum length of the request path to extract and include in the HttpUrl tag. Used to\n    // truncate lengthy request paths to meet the needs of a tracing backend.\n    // Default: 256\n    google.protobuf.UInt32Value max_path_tag_length = 7;\n\n    // A list of custom tags with unique tag name to create tags for the active span.\n    repeated type.tracing.v3.CustomTag custom_tags = 8;\n\n    // Configuration for an external tracing provider.\n    // If not specified, no tracing will be performed.\n    //\n    // .. attention::\n    //   Please be aware that *envoy.tracers.opencensus* provider can only be configured once\n    //   in Envoy lifetime.\n    //   Any attempts to reconfigure it or to use different configurations for different HCM filters\n    //   will be rejected.\n    //   Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes\n    //   on OpenCensus side.\n    config.trace.v3.Tracing.Http provider = 9;\n  }\n\n  message InternalAddressConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.\"\n        \"InternalAddressConfig\";\n\n    // Whether unix socket addresses should be considered internal.\n    bool unix_sockets = 1;\n  }\n\n  // [#next-free-field: 7]\n  message SetCurrentClientCertDetails {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.\"\n        \"SetCurrentClientCertDetails\";\n\n    reserved 2;\n\n    // Whether to forward the subject of the client cert. Defaults to false.\n    google.protobuf.BoolValue subject = 1;\n\n    // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the\n    // XFCC header comma separated from other values with the value Cert=\"PEM\".\n    // Defaults to false.\n    bool cert = 3;\n\n    // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM\n    // format. This will appear in the XFCC header comma separated from other values with the value\n    // Chain=\"PEM\".\n    // Defaults to false.\n    bool chain = 6;\n\n    // Whether to forward the DNS type Subject Alternative Names of the client cert.\n    // Defaults to false.\n    bool dns = 4;\n\n    // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to\n    // false.\n    bool uri = 5;\n  }\n\n  // The configuration for HTTP upgrades.\n  // For each upgrade type desired, an UpgradeConfig must be added.\n  //\n  // .. warning::\n  //\n  //    The current implementation of upgrade headers does not handle\n  //    multi-valued upgrade headers. Support for multi-valued headers may be\n  //    added in the future if needed.\n  //\n  // .. warning::\n  //    The current implementation of upgrade headers does not work with HTTP/2\n  //    upstreams.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.\"\n        \"UpgradeConfig\";\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type]\n    // will be proxied upstream.\n    string upgrade_type = 1;\n\n    // If present, this represents the filter chain which will be created for\n    // this type of upgrade. If no filters are present, the filter chain for\n    // HTTP connections will be used for this upgrade type.\n    repeated HttpFilter filters = 2;\n\n    // Determines if upgrades are enabled or disabled by default. Defaults to true.\n    // This can be overridden on a per-route basis with :ref:`cluster\n    // <envoy_api_field_config.route.v3.RouteAction.upgrade_configs>` as documented in the\n    // :ref:`upgrade documentation <arch_overview_upgrades>`.\n    google.protobuf.BoolValue enabled = 3;\n  }\n\n  reserved 27, 11;\n\n  reserved \"idle_timeout\";\n\n  // Supplies the type of codec that the connection manager should use.\n  CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics for the\n  // connection manager. See the :ref:`statistics documentation <config_http_conn_man_stats>` for\n  // more information.\n  string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The connection manager’s route table will be dynamically loaded via the RDS API.\n    Rds rds = 3;\n\n    // The route table for the connection manager is static and is specified in this property.\n    config.route.v3.RouteConfiguration route_config = 4;\n\n    // A route table will be dynamically assigned to each request based on request attributes\n    // (e.g., the value of a header). The \"routing scopes\" (i.e., route tables) and \"scope keys\" are\n    // specified in this message.\n    ScopedRoutes scoped_routes = 31;\n  }\n\n  // A list of individual HTTP filters that make up the filter chain for\n  // requests made to the connection manager. :ref:`Order matters <arch_overview_http_filters_ordering>`\n  // as the filters are processed sequentially as request events happen.\n  repeated HttpFilter http_filters = 5;\n\n  // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent`\n  // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked\n  // documentation for more information. Defaults to false.\n  google.protobuf.BoolValue add_user_agent = 6;\n\n  // Presence of the object defines whether the connection manager\n  // emits :ref:`tracing <arch_overview_tracing>` data to the :ref:`configured tracing provider\n  // <envoy_api_msg_config.trace.v3.Tracing>`.\n  Tracing tracing = 7;\n\n  // Additional settings for HTTP requests handled by the connection manager. These will be\n  // applicable to both HTTP1 and HTTP2 requests.\n  config.core.v3.HttpProtocolOptions common_http_protocol_options = 35\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Additional HTTP/1 settings that are passed to the HTTP/1 codec.\n  config.core.v3.Http1ProtocolOptions http_protocol_options = 8;\n\n  // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec.\n  config.core.v3.Http2ProtocolOptions http2_protocol_options = 9\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // An optional override that the connection manager will write to the server\n  // header in responses. If not set, the default is *envoy*.\n  string server_name = 10\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Defines the action to be applied to the Server header on the response path.\n  // By default, Envoy will overwrite the header with the value specified in\n  // server_name.\n  ServerHeaderTransformation server_header_transformation = 34\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The maximum request headers size for incoming connections.\n  // If unconfigured, the default max request headers allowed is 60 KiB.\n  // Requests that exceed this limit will receive a 431 response.\n  // The max configurable limit is 96 KiB, based on current implementation\n  // constraints.\n  google.protobuf.UInt32Value max_request_headers_kb = 29\n      [(validate.rules).uint32 = {lte: 96 gt: 0}];\n\n  // The stream idle timeout for connections managed by the connection manager.\n  // If not specified, this defaults to 5 minutes. The default value was selected\n  // so as not to interfere with any smaller configured timeouts that may have\n  // existed in configurations prior to the introduction of this feature, while\n  // introducing robustness to TCP connections that terminate without a FIN.\n  //\n  // This idle timeout applies to new streams and is overridable by the\n  // :ref:`route-level idle_timeout\n  // <envoy_api_field_config.route.v3.RouteAction.idle_timeout>`. Even on a stream in\n  // which the override applies, prior to receipt of the initial request\n  // headers, the :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout>`\n  // applies. Each time an encode/decode event for headers or data is processed\n  // for the stream, the timer will be reset. If the timeout fires, the stream\n  // is terminated with a 408 Request Timeout error code if no upstream response\n  // header has been received, otherwise a stream reset occurs.\n  //\n  // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough\n  // window to write any remaining stream data once the entirety of stream data (local end stream is\n  // true) has been buffered pending available window. In other words, this timeout defends against\n  // a peer that does not release enough window to completely write the stream, even though all\n  // data has been proxied within available flow control windows. If the timeout is hit in this\n  // case, the :ref:`tx_flush_timeout <config_http_conn_man_stats_per_codec>` counter will be\n  // incremented. Note that :ref:`max_stream_duration\n  // <envoy_api_field_config.core.v3.HttpProtocolOptions.max_stream_duration>` does not apply to\n  // this corner case.\n  //\n  // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due\n  // to the granularity of events presented to the connection manager. For example, while receiving\n  // very large request headers, it may be the case that there is traffic regularly arriving on the\n  // wire while the connection manage is only able to observe the end-of-headers event, hence the\n  // stream may still idle timeout.\n  //\n  // A value of 0 will completely disable the connection manager stream idle\n  // timeout, although per-route idle timeout overrides will continue to apply.\n  google.protobuf.Duration stream_idle_timeout = 24\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The amount of time that Envoy will wait for the entire request to be received.\n  // The timer is activated when the request is initiated, and is disarmed when the last byte of the\n  // request is sent upstream (i.e. all decoding filters have processed the request), OR when the\n  // response is initiated. If not specified or set to 0, this timeout is disabled.\n  google.protobuf.Duration request_timeout = 28\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The time that Envoy will wait between sending an HTTP/2 “shutdown\n  // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame.\n  // This is used so that Envoy provides a grace period for new streams that\n  // race with the final GOAWAY frame. During this grace period, Envoy will\n  // continue to accept new streams. After the grace period, a final GOAWAY\n  // frame is sent and Envoy will start refusing new streams. Draining occurs\n  // both when a connection hits the idle timeout or during general server\n  // draining. The default grace period is 5000 milliseconds (5 seconds) if this\n  // option is not specified.\n  google.protobuf.Duration drain_timeout = 12;\n\n  // The delayed close timeout is for downstream connections managed by the HTTP connection manager.\n  // It is defined as a grace period after connection close processing has been locally initiated\n  // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy\n  // from the downstream connection) prior to Envoy closing the socket associated with that\n  // connection.\n  // NOTE: This timeout is enforced even when the socket associated with the downstream connection\n  // is pending a flush of the write buffer. However, any progress made writing data to the socket\n  // will restart the timer associated with this timeout. This means that the total grace period for\n  // a socket in this state will be\n  // <total_time_waiting_for_write_buffer_flushes>+<delayed_close_timeout>.\n  //\n  // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close\n  // sequence mitigates a race condition that exists when downstream clients do not drain/process\n  // data in a connection's receive buffer after a remote close has been detected via a socket\n  // write(). This race leads to such clients failing to process the response code sent by Envoy,\n  // which could result in erroneous downstream processing.\n  //\n  // If the timeout triggers, Envoy will close the connection's socket.\n  //\n  // The default timeout is 1000 ms if this option is not specified.\n  //\n  // .. NOTE::\n  //    To be useful in avoiding the race condition described above, this timeout must be set\n  //    to *at least* <max round trip time expected between clients and Envoy>+<100ms to account for\n  //    a reasonable \"worst\" case processing time for a full iteration of Envoy's event loop>.\n  //\n  // .. WARNING::\n  //    A value of 0 will completely disable delayed close processing. When disabled, the downstream\n  //    connection's socket will be closed immediately after the write flush is completed or will\n  //    never close if the write flush does not complete.\n  google.protobuf.Duration delayed_close_timeout = 26;\n\n  // Configuration for :ref:`HTTP access logs <arch_overview_access_logs>`\n  // emitted by the connection manager.\n  repeated config.accesslog.v3.AccessLog access_log = 13;\n\n  // If set to true, the connection manager will use the real remote address\n  // of the client connection when determining internal versus external origin and manipulating\n  // various headers. If set to false or absent, the connection manager will use the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for`,\n  // :ref:`config_http_conn_man_headers_x-envoy-internal`, and\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information.\n  google.protobuf.BoolValue use_remote_address = 14\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The number of additional ingress proxy hops from the right side of the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when\n  // determining the origin client's IP address. The default is zero if this option\n  // is not specified. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information.\n  uint32 xff_num_trusted_hops = 19;\n\n  // Configures what network addresses are considered internal for stats and header sanitation\n  // purposes. If unspecified, only RFC1918 IP addresses will be considered internal.\n  // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information about internal/external addresses.\n  InternalAddressConfig internal_address_config = 25;\n\n  // If set, Envoy will not append the remote address to the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in\n  // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager\n  // has mutated the request headers. While :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>`\n  // will also suppress XFF addition, it has consequences for logging and other\n  // Envoy uses of the remote address, so *skip_xff_append* should be used\n  // when only an elision of XFF addition is intended.\n  bool skip_xff_append = 21;\n\n  // Via header value to append to request and response headers. If this is\n  // empty, no via header will be appended.\n  string via = 22;\n\n  // Whether the connection manager will generate the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to\n  // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature\n  // is not desired it can be disabled.\n  google.protobuf.BoolValue generate_request_id = 15;\n\n  // Whether the connection manager will keep the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if passed for a request that is edge\n  // (Edge request is the request from external clients to front Envoy) and not reset it, which\n  // is the current Envoy behaviour. This defaults to false.\n  bool preserve_external_request_id = 32;\n\n  // If set, Envoy will always set :ref:`x-request-id <config_http_conn_man_headers_x-request-id>` header in response.\n  // If this is false or not set, the request ID is returned in responses only if tracing is forced using\n  // :ref:`x-envoy-force-trace <config_http_conn_man_headers_x-envoy-force-trace>` header.\n  bool always_set_request_id_in_response = 37;\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  ForwardClientCertDetails forward_client_cert_details = 16\n      [(validate.rules).enum = {defined_only: true}];\n\n  // This field is valid only when :ref:`forward_client_cert_details\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.forward_client_cert_details>`\n  // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in\n  // the client certificate to be forwarded. Note that in the\n  // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and\n  // *By* is always set when the client certificate presents the URI type Subject Alternative Name\n  // value.\n  SetCurrentClientCertDetails set_current_client_cert_details = 17;\n\n  // If proxy_100_continue is true, Envoy will proxy incoming \"Expect:\n  // 100-continue\" headers upstream, and forward \"100 Continue\" responses\n  // downstream. If this is false or not set, Envoy will instead strip the\n  // \"Expect: 100-continue\" header, and send a \"100 Continue\" response itself.\n  bool proxy_100_continue = 18;\n\n  // If\n  // :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>`\n  // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is\n  // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*.\n  // This is useful for testing compatibility of upstream services that parse the header value. For\n  // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses\n  // <https://tools.ietf.org/html/rfc4291#section-2.5.5.2>`_ for details. This will also affect the\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See\n  // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6\n  // <config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6>` for runtime\n  // control.\n  // [#not-implemented-hide:]\n  bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20;\n\n  repeated UpgradeConfig upgrade_configs = 23;\n\n  // Should paths be normalized according to RFC 3986 before any processing of\n  // requests by HTTP filters or routing? This affects the upstream *:path* header\n  // as well. For paths that fail this check, Envoy will respond with 400 to\n  // paths that are malformed. This defaults to false currently but will default\n  // true in the future. When not specified, this value may be overridden by the\n  // runtime variable\n  // :ref:`http_connection_manager.normalize_path<config_http_conn_man_runtime_normalize_path>`.\n  // See `Normalization and Comparison <https://tools.ietf.org/html/rfc3986#section-6>`_\n  // for details of normalization.\n  // Note that Envoy does not perform\n  // `case normalization <https://tools.ietf.org/html/rfc3986#section-6.2.2.1>`_\n  google.protobuf.BoolValue normalize_path = 30;\n\n  // Determines if adjacent slashes in the path are merged into one before any processing of\n  // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without\n  // setting this option, incoming requests with path `//dir///file` will not match against route\n  // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of\n  // `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool merge_slashes = 33;\n\n  // The configuration of the request ID extension. This includes operations such as\n  // generation, validation, and associated tracing operations.\n  //\n  // If not set, Envoy uses the default UUID-based behavior:\n  //\n  // 1. Request ID is propagated using *x-request-id* header.\n  //\n  // 2. Request ID is a universally unique identifier (UUID).\n  //\n  // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID.\n  RequestIDExtension request_id_extension = 36;\n\n  // The configuration to customize local reply returned by Envoy. It can customize status code,\n  // body text and response content type. If not specified, status code and text body are hard\n  // coded in Envoy, the response content type is plain text.\n  LocalReplyConfig local_reply_config = 38;\n\n  // Determines if the port part should be removed from host/authority header before any processing\n  // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's<envoy_api_field_config.listener.v3.Listener.address>`\n  // local port and request method is not CONNECT. This affects the upstream host header as well.\n  // Without setting this option, incoming requests with host `example:443` will not match against\n  // route with :ref:`domains<envoy_api_field_config.route.v3.VirtualHost.domains>` match set to `example`. Defaults to `false`. Note that port removal is not part\n  // of `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool strip_matching_host_port = 39;\n\n  // Governs Envoy's behavior when receiving invalid HTTP from downstream.\n  // If this option is false (default), Envoy will err on the conservative side handling HTTP\n  // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request.\n  // If this option is set to true, Envoy will be more permissive, only resetting the invalid\n  // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire\n  // request is read for HTTP/1.1)\n  // In general this should be true for deployments receiving trusted traffic (L2 Envoys,\n  // company-internal mesh) and false when receiving untrusted traffic (edge deployments).\n  //\n  // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are\n  // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message>` or the new HTTP/2 option\n  // :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>`\n  // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.stream_error_on_invalid_http_messaging>`\n  google.protobuf.BoolValue stream_error_on_invalid_http_message = 40;\n}\n\n// The configuration to customize local reply returned by Envoy.\nmessage LocalReplyConfig {\n  // Configuration of list of mappers which allows to filter and change local response.\n  // The mappers will be checked by the specified order until one is matched.\n  repeated ResponseMapper mappers = 1;\n\n  // The configuration to form response body from the :ref:`command operators <config_access_log_command_operators>`\n  // and to specify response content type as one of: plain/text or application/json.\n  //\n  // Example one: \"plain/text\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n  //\n  // The following response body in \"plain/text\" format will be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: text\n  //\n  //   upstream connect error:503:path=/foo\n  //\n  // Example two: \"application/json\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   json_format:\n  //     status: \"%RESPONSE_CODE%\"\n  //     message: \"%LOCAL_REPLY_BODY%\"\n  //     path: \"%REQ(:path)%\"\n  //\n  // The following response body in \"application/json\" format would be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"status\": 503,\n  //    \"message\": \"upstream connection error\",\n  //    \"path\": \"/foo\"\n  //  }\n  //\n  config.core.v3.SubstitutionFormatString body_format = 2;\n}\n\n// The configuration to filter and change local response.\n// [#next-free-field: 6]\nmessage ResponseMapper {\n  // Filter to determine if this mapper should apply.\n  config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}];\n\n  // The new response status code if specified.\n  google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n  // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%`\n  // command operator in the `body_format`.\n  config.core.v3.DataSource body = 3;\n\n  // A per mapper `body_format` to override the :ref:`body_format <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format>`.\n  // It will be used when this mapper is matched.\n  config.core.v3.SubstitutionFormatString body_format_override = 4;\n\n  // HTTP headers to add to a local reply. This allows the response mapper to append, to add\n  // or to override headers of any local reply before it is sent to a downstream client.\n  repeated config.core.v3.HeaderValueOption headers_to_add = 5\n      [(validate.rules).repeated = {max_items: 1000}];\n}\n\nmessage Rds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.Rds\";\n\n  // Configuration source specifier for RDS.\n  config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n  // The name of the route configuration. This name will be passed to the RDS\n  // API. This allows an Envoy configuration with multiple HTTP listeners (and\n  // associated HTTP connection manager filters) to use different route\n  // configurations.\n  string route_config_name = 2\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // Resource locator for RDS. This is mutually exclusive to *route_config_name*.\n  // [#not-implemented-hide:]\n  udpa.core.v1.ResourceLocator rds_resource_locator = 3\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n}\n\n// This message is used to work around the limitations with 'oneof' and repeated fields.\nmessage ScopedRouteConfigurationsList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.ScopedRouteConfigurationsList\";\n\n  repeated config.route.v3.ScopedRouteConfiguration scoped_route_configurations = 1\n      [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#next-free-field: 6]\nmessage ScopedRoutes {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes\";\n\n  // Specifies the mechanism for constructing \"scope keys\" based on HTTP request attributes. These\n  // keys are matched against a set of :ref:`Key<envoy_api_msg_config.route.v3.ScopedRouteConfiguration.Key>`\n  // objects assembled from :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v3.ScopedRouteConfiguration>`\n  // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via\n  // :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list>`.\n  //\n  // Upon receiving a request's headers, the Router will build a key using the algorithm specified\n  // by this message. This key will be used to look up the routing table (i.e., the\n  // :ref:`RouteConfiguration<envoy_api_msg_config.route.v3.RouteConfiguration>`) to use for the request.\n  message ScopeKeyBuilder {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder\";\n\n    // Specifies the mechanism for constructing key fragments which are composed into scope keys.\n    message FragmentBuilder {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder.\"\n          \"FragmentBuilder\";\n\n      // Specifies how the value of a header should be extracted.\n      // The following example maps the structure of a header to the fields in this message.\n      //\n      // .. code::\n      //\n      //              <0> <1>   <-- index\n      //    X-Header: a=b;c=d\n      //    |         || |\n      //    |         || \\----> <element_separator>\n      //    |         ||\n      //    |         |\\----> <element.separator>\n      //    |         |\n      //    |         \\----> <element.key>\n      //    |\n      //    \\----> <name>\n      //\n      //    Each 'a=b' key-value pair constitutes an 'element' of the header field.\n      message HeaderValueExtractor {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder.\"\n            \"FragmentBuilder.HeaderValueExtractor\";\n\n        // Specifies a header field's key value pair to match on.\n        message KvElement {\n          option (udpa.annotations.versioning).previous_message_type =\n              \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder.\"\n              \"FragmentBuilder.HeaderValueExtractor.KvElement\";\n\n          // The separator between key and value (e.g., '=' separates 'k=v;...').\n          // If an element is an empty string, the element is ignored.\n          // If an element contains no separator, the whole element is parsed as key and the\n          // fragment value is an empty string.\n          // If there are multiple values for a matched key, the first value is returned.\n          string separator = 1 [(validate.rules).string = {min_len: 1}];\n\n          // The key to match on.\n          string key = 2 [(validate.rules).string = {min_len: 1}];\n        }\n\n        // The name of the header field to extract the value from.\n        string name = 1 [(validate.rules).string = {min_len: 1}];\n\n        // The element separator (e.g., ';' separates 'a;b;c;d').\n        // Default: empty string. This causes the entirety of the header field to be extracted.\n        // If this field is set to an empty string and 'index' is used in the oneof below, 'index'\n        // must be set to 0.\n        string element_separator = 2;\n\n        oneof extract_type {\n          // Specifies the zero based index of the element to extract.\n          // Note Envoy concatenates multiple values of the same header key into a comma separated\n          // string, the splitting always happens after the concatenation.\n          uint32 index = 3;\n\n          // Specifies the key value pair to extract the value from.\n          KvElement element = 4;\n        }\n      }\n\n      oneof type {\n        option (validate.required) = true;\n\n        // Specifies how a header field's value should be extracted.\n        HeaderValueExtractor header_value_extractor = 1;\n      }\n    }\n\n    // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the\n    // fragments of a :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v3.ScopedRouteConfiguration>`.\n    // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key.\n    repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the scoped routing configuration.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The algorithm to use for constructing a scope key for each request.\n  ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];\n\n  // Configuration source specifier for RDS.\n  // This config source is used to subscribe to RouteConfiguration resources specified in\n  // ScopedRouteConfiguration messages.\n  config.core.v3.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}];\n\n  oneof config_specifier {\n    option (validate.required) = true;\n\n    // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by\n    // matching a key constructed from the request's attributes according to the algorithm specified\n    // by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRouteConfigurationsList scoped_route_configurations_list = 4;\n\n    // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS\n    // API. A scope is assigned to a request by matching a key constructed from the request's\n    // attributes according to the algorithm specified by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRds scoped_rds = 5;\n  }\n}\n\nmessage ScopedRds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.ScopedRds\";\n\n  // Configuration source specifier for scoped RDS.\n  config.core.v3.ConfigSource scoped_rds_config_source = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// [#next-free-field: 6]\nmessage HttpFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.HttpFilter\";\n\n  reserved 3, 2;\n\n  reserved \"config\";\n\n  // The name of the filter configuration. The name is used as a fallback to\n  // select an extension if the type of the configuration proto is not\n  // sufficient. It also serves as a resource name in ExtensionConfigDS.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof config_type {\n    // Filter specific configuration which depends on the filter being instantiated. See the supported\n    // filters for further documentation.\n    google.protobuf.Any typed_config = 4;\n\n    // Configuration source specifier for an extension configuration discovery service.\n    // In case of a failure and without the default configuration, the HTTP listener responds with code 500.\n    // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061).\n    config.core.v3.ExtensionConfigSource config_discovery = 5;\n  }\n}\n\nmessage RequestIDExtension {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.RequestIDExtension\";\n\n  // Request ID extension specific configuration.\n  google.protobuf.Any typed_config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/config/trace/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/http_connection_manager/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.http_connection_manager.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/protocol.proto\";\nimport \"envoy/config/core/v4alpha/substitution_format_string.proto\";\nimport \"envoy/config/route/v4alpha/route.proto\";\nimport \"envoy/config/route/v4alpha/scoped_route.proto\";\nimport \"envoy/config/trace/v4alpha/http_tracer.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v4alpha\";\noption java_outer_classname = \"HttpConnectionManagerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP connection manager]\n// HTTP connection manager :ref:`configuration overview <config_http_conn_man>`.\n// [#extension: envoy.filters.network.http_connection_manager]\n\n// [#next-free-field: 41]\nmessage HttpConnectionManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\";\n\n  enum CodecType {\n    // For every new connection, the connection manager will determine which\n    // codec to use. This mode supports both ALPN for TLS listeners as well as\n    // protocol inference for plaintext listeners. If ALPN data is available, it\n    // is preferred, otherwise protocol inference is used. In almost all cases,\n    // this is the right option to choose for this setting.\n    AUTO = 0;\n\n    // The connection manager will assume that the client is speaking HTTP/1.1.\n    HTTP1 = 1;\n\n    // The connection manager will assume that the client is speaking HTTP/2\n    // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN.\n    // Prior knowledge is allowed).\n    HTTP2 = 2;\n\n    // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n    // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n    // to distinguish HTTP1 and HTTP2 traffic.\n    HTTP3 = 3;\n  }\n\n  enum ServerHeaderTransformation {\n    // Overwrite any Server header with the contents of server_name.\n    OVERWRITE = 0;\n\n    // If no Server header is present, append Server server_name\n    // If a Server header is present, pass it through.\n    APPEND_IF_ABSENT = 1;\n\n    // Pass through the value of the server header, and do not append a header\n    // if none is present.\n    PASS_THROUGH = 2;\n  }\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  enum ForwardClientCertDetails {\n    // Do not send the XFCC header to the next hop. This is the default value.\n    SANITIZE = 0;\n\n    // When the client connection is mTLS (Mutual TLS), forward the XFCC header\n    // in the request.\n    FORWARD_ONLY = 1;\n\n    // When the client connection is mTLS, append the client certificate\n    // information to the request’s XFCC header and forward it.\n    APPEND_FORWARD = 2;\n\n    // When the client connection is mTLS, reset the XFCC header with the client\n    // certificate information and send it to the next hop.\n    SANITIZE_SET = 3;\n\n    // Always forward the XFCC header in the request, regardless of whether the\n    // client connection is mTLS.\n    ALWAYS_FORWARD_ONLY = 4;\n  }\n\n  // [#next-free-field: 10]\n  message Tracing {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing\";\n\n    enum OperationName {\n      // The HTTP listener is used for ingress/incoming requests.\n      INGRESS = 0;\n\n      // The HTTP listener is used for egress/outgoing requests.\n      EGRESS = 1;\n    }\n\n    reserved 1, 2;\n\n    reserved \"operation_name\", \"request_headers_for_tags\";\n\n    // Target percentage of requests managed by this HTTP connection manager that will be force\n    // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n    // header is set. This field is a direct analog for the runtime variable\n    // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n    // <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent client_sampling = 3;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be randomly\n    // selected for trace generation, if not requested by the client or not forced. This field is\n    // a direct analog for the runtime variable 'tracing.random_sampling' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent random_sampling = 4;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be traced\n    // after all other sampling checks have been applied (client-directed, force tracing, random\n    // sampling). This field functions as an upper limit on the total configured sampling rate. For\n    // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n    // of client requests with the appropriate headers to be force traced. This field is a direct\n    // analog for the runtime variable 'tracing.global_enabled' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent overall_sampling = 5;\n\n    // Whether to annotate spans with additional data. If true, spans will include logs for stream\n    // events.\n    bool verbose = 6;\n\n    // Maximum length of the request path to extract and include in the HttpUrl tag. Used to\n    // truncate lengthy request paths to meet the needs of a tracing backend.\n    // Default: 256\n    google.protobuf.UInt32Value max_path_tag_length = 7;\n\n    // A list of custom tags with unique tag name to create tags for the active span.\n    repeated type.tracing.v3.CustomTag custom_tags = 8;\n\n    // Configuration for an external tracing provider.\n    // If not specified, no tracing will be performed.\n    //\n    // .. attention::\n    //   Please be aware that *envoy.tracers.opencensus* provider can only be configured once\n    //   in Envoy lifetime.\n    //   Any attempts to reconfigure it or to use different configurations for different HCM filters\n    //   will be rejected.\n    //   Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes\n    //   on OpenCensus side.\n    config.trace.v4alpha.Tracing.Http provider = 9;\n  }\n\n  message InternalAddressConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.\"\n        \"InternalAddressConfig\";\n\n    // Whether unix socket addresses should be considered internal.\n    bool unix_sockets = 1;\n  }\n\n  // [#next-free-field: 7]\n  message SetCurrentClientCertDetails {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.\"\n        \"SetCurrentClientCertDetails\";\n\n    reserved 2;\n\n    // Whether to forward the subject of the client cert. Defaults to false.\n    google.protobuf.BoolValue subject = 1;\n\n    // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the\n    // XFCC header comma separated from other values with the value Cert=\"PEM\".\n    // Defaults to false.\n    bool cert = 3;\n\n    // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM\n    // format. This will appear in the XFCC header comma separated from other values with the value\n    // Chain=\"PEM\".\n    // Defaults to false.\n    bool chain = 6;\n\n    // Whether to forward the DNS type Subject Alternative Names of the client cert.\n    // Defaults to false.\n    bool dns = 4;\n\n    // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to\n    // false.\n    bool uri = 5;\n  }\n\n  // The configuration for HTTP upgrades.\n  // For each upgrade type desired, an UpgradeConfig must be added.\n  //\n  // .. warning::\n  //\n  //    The current implementation of upgrade headers does not handle\n  //    multi-valued upgrade headers. Support for multi-valued headers may be\n  //    added in the future if needed.\n  //\n  // .. warning::\n  //    The current implementation of upgrade headers does not work with HTTP/2\n  //    upstreams.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.\"\n        \"UpgradeConfig\";\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type]\n    // will be proxied upstream.\n    string upgrade_type = 1;\n\n    // If present, this represents the filter chain which will be created for\n    // this type of upgrade. If no filters are present, the filter chain for\n    // HTTP connections will be used for this upgrade type.\n    repeated HttpFilter filters = 2;\n\n    // Determines if upgrades are enabled or disabled by default. Defaults to true.\n    // This can be overridden on a per-route basis with :ref:`cluster\n    // <envoy_api_field_config.route.v4alpha.RouteAction.upgrade_configs>` as documented in the\n    // :ref:`upgrade documentation <arch_overview_upgrades>`.\n    google.protobuf.BoolValue enabled = 3;\n  }\n\n  reserved 27, 11;\n\n  reserved \"idle_timeout\";\n\n  // Supplies the type of codec that the connection manager should use.\n  CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics for the\n  // connection manager. See the :ref:`statistics documentation <config_http_conn_man_stats>` for\n  // more information.\n  string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The connection manager’s route table will be dynamically loaded via the RDS API.\n    Rds rds = 3;\n\n    // The route table for the connection manager is static and is specified in this property.\n    config.route.v4alpha.RouteConfiguration route_config = 4;\n\n    // A route table will be dynamically assigned to each request based on request attributes\n    // (e.g., the value of a header). The \"routing scopes\" (i.e., route tables) and \"scope keys\" are\n    // specified in this message.\n    ScopedRoutes scoped_routes = 31;\n  }\n\n  // A list of individual HTTP filters that make up the filter chain for\n  // requests made to the connection manager. :ref:`Order matters <arch_overview_http_filters_ordering>`\n  // as the filters are processed sequentially as request events happen.\n  repeated HttpFilter http_filters = 5;\n\n  // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent`\n  // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked\n  // documentation for more information. Defaults to false.\n  google.protobuf.BoolValue add_user_agent = 6;\n\n  // Presence of the object defines whether the connection manager\n  // emits :ref:`tracing <arch_overview_tracing>` data to the :ref:`configured tracing provider\n  // <envoy_api_msg_config.trace.v4alpha.Tracing>`.\n  Tracing tracing = 7;\n\n  // Additional settings for HTTP requests handled by the connection manager. These will be\n  // applicable to both HTTP1 and HTTP2 requests.\n  config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Additional HTTP/1 settings that are passed to the HTTP/1 codec.\n  config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8;\n\n  // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec.\n  config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // An optional override that the connection manager will write to the server\n  // header in responses. If not set, the default is *envoy*.\n  string server_name = 10\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Defines the action to be applied to the Server header on the response path.\n  // By default, Envoy will overwrite the header with the value specified in\n  // server_name.\n  ServerHeaderTransformation server_header_transformation = 34\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The maximum request headers size for incoming connections.\n  // If unconfigured, the default max request headers allowed is 60 KiB.\n  // Requests that exceed this limit will receive a 431 response.\n  // The max configurable limit is 96 KiB, based on current implementation\n  // constraints.\n  google.protobuf.UInt32Value max_request_headers_kb = 29\n      [(validate.rules).uint32 = {lte: 96 gt: 0}];\n\n  // The stream idle timeout for connections managed by the connection manager.\n  // If not specified, this defaults to 5 minutes. The default value was selected\n  // so as not to interfere with any smaller configured timeouts that may have\n  // existed in configurations prior to the introduction of this feature, while\n  // introducing robustness to TCP connections that terminate without a FIN.\n  //\n  // This idle timeout applies to new streams and is overridable by the\n  // :ref:`route-level idle_timeout\n  // <envoy_api_field_config.route.v4alpha.RouteAction.idle_timeout>`. Even on a stream in\n  // which the override applies, prior to receipt of the initial request\n  // headers, the :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.stream_idle_timeout>`\n  // applies. Each time an encode/decode event for headers or data is processed\n  // for the stream, the timer will be reset. If the timeout fires, the stream\n  // is terminated with a 408 Request Timeout error code if no upstream response\n  // header has been received, otherwise a stream reset occurs.\n  //\n  // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough\n  // window to write any remaining stream data once the entirety of stream data (local end stream is\n  // true) has been buffered pending available window. In other words, this timeout defends against\n  // a peer that does not release enough window to completely write the stream, even though all\n  // data has been proxied within available flow control windows. If the timeout is hit in this\n  // case, the :ref:`tx_flush_timeout <config_http_conn_man_stats_per_codec>` counter will be\n  // incremented. Note that :ref:`max_stream_duration\n  // <envoy_api_field_config.core.v4alpha.HttpProtocolOptions.max_stream_duration>` does not apply to\n  // this corner case.\n  //\n  // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due\n  // to the granularity of events presented to the connection manager. For example, while receiving\n  // very large request headers, it may be the case that there is traffic regularly arriving on the\n  // wire while the connection manage is only able to observe the end-of-headers event, hence the\n  // stream may still idle timeout.\n  //\n  // A value of 0 will completely disable the connection manager stream idle\n  // timeout, although per-route idle timeout overrides will continue to apply.\n  google.protobuf.Duration stream_idle_timeout = 24\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The amount of time that Envoy will wait for the entire request to be received.\n  // The timer is activated when the request is initiated, and is disarmed when the last byte of the\n  // request is sent upstream (i.e. all decoding filters have processed the request), OR when the\n  // response is initiated. If not specified or set to 0, this timeout is disabled.\n  google.protobuf.Duration request_timeout = 28\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The time that Envoy will wait between sending an HTTP/2 “shutdown\n  // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame.\n  // This is used so that Envoy provides a grace period for new streams that\n  // race with the final GOAWAY frame. During this grace period, Envoy will\n  // continue to accept new streams. After the grace period, a final GOAWAY\n  // frame is sent and Envoy will start refusing new streams. Draining occurs\n  // both when a connection hits the idle timeout or during general server\n  // draining. The default grace period is 5000 milliseconds (5 seconds) if this\n  // option is not specified.\n  google.protobuf.Duration drain_timeout = 12;\n\n  // The delayed close timeout is for downstream connections managed by the HTTP connection manager.\n  // It is defined as a grace period after connection close processing has been locally initiated\n  // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy\n  // from the downstream connection) prior to Envoy closing the socket associated with that\n  // connection.\n  // NOTE: This timeout is enforced even when the socket associated with the downstream connection\n  // is pending a flush of the write buffer. However, any progress made writing data to the socket\n  // will restart the timer associated with this timeout. This means that the total grace period for\n  // a socket in this state will be\n  // <total_time_waiting_for_write_buffer_flushes>+<delayed_close_timeout>.\n  //\n  // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close\n  // sequence mitigates a race condition that exists when downstream clients do not drain/process\n  // data in a connection's receive buffer after a remote close has been detected via a socket\n  // write(). This race leads to such clients failing to process the response code sent by Envoy,\n  // which could result in erroneous downstream processing.\n  //\n  // If the timeout triggers, Envoy will close the connection's socket.\n  //\n  // The default timeout is 1000 ms if this option is not specified.\n  //\n  // .. NOTE::\n  //    To be useful in avoiding the race condition described above, this timeout must be set\n  //    to *at least* <max round trip time expected between clients and Envoy>+<100ms to account for\n  //    a reasonable \"worst\" case processing time for a full iteration of Envoy's event loop>.\n  //\n  // .. WARNING::\n  //    A value of 0 will completely disable delayed close processing. When disabled, the downstream\n  //    connection's socket will be closed immediately after the write flush is completed or will\n  //    never close if the write flush does not complete.\n  google.protobuf.Duration delayed_close_timeout = 26;\n\n  // Configuration for :ref:`HTTP access logs <arch_overview_access_logs>`\n  // emitted by the connection manager.\n  repeated config.accesslog.v4alpha.AccessLog access_log = 13;\n\n  // If set to true, the connection manager will use the real remote address\n  // of the client connection when determining internal versus external origin and manipulating\n  // various headers. If set to false or absent, the connection manager will use the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for`,\n  // :ref:`config_http_conn_man_headers_x-envoy-internal`, and\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information.\n  google.protobuf.BoolValue use_remote_address = 14\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The number of additional ingress proxy hops from the right side of the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when\n  // determining the origin client's IP address. The default is zero if this option\n  // is not specified. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information.\n  uint32 xff_num_trusted_hops = 19;\n\n  // Configures what network addresses are considered internal for stats and header sanitation\n  // purposes. If unspecified, only RFC1918 IP addresses will be considered internal.\n  // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information about internal/external addresses.\n  InternalAddressConfig internal_address_config = 25;\n\n  // If set, Envoy will not append the remote address to the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in\n  // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager\n  // has mutated the request headers. While :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.use_remote_address>`\n  // will also suppress XFF addition, it has consequences for logging and other\n  // Envoy uses of the remote address, so *skip_xff_append* should be used\n  // when only an elision of XFF addition is intended.\n  bool skip_xff_append = 21;\n\n  // Via header value to append to request and response headers. If this is\n  // empty, no via header will be appended.\n  string via = 22;\n\n  // Whether the connection manager will generate the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to\n  // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature\n  // is not desired it can be disabled.\n  google.protobuf.BoolValue generate_request_id = 15;\n\n  // Whether the connection manager will keep the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if passed for a request that is edge\n  // (Edge request is the request from external clients to front Envoy) and not reset it, which\n  // is the current Envoy behaviour. This defaults to false.\n  bool preserve_external_request_id = 32;\n\n  // If set, Envoy will always set :ref:`x-request-id <config_http_conn_man_headers_x-request-id>` header in response.\n  // If this is false or not set, the request ID is returned in responses only if tracing is forced using\n  // :ref:`x-envoy-force-trace <config_http_conn_man_headers_x-envoy-force-trace>` header.\n  bool always_set_request_id_in_response = 37;\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  ForwardClientCertDetails forward_client_cert_details = 16\n      [(validate.rules).enum = {defined_only: true}];\n\n  // This field is valid only when :ref:`forward_client_cert_details\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.forward_client_cert_details>`\n  // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in\n  // the client certificate to be forwarded. Note that in the\n  // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and\n  // *By* is always set when the client certificate presents the URI type Subject Alternative Name\n  // value.\n  SetCurrentClientCertDetails set_current_client_cert_details = 17;\n\n  // If proxy_100_continue is true, Envoy will proxy incoming \"Expect:\n  // 100-continue\" headers upstream, and forward \"100 Continue\" responses\n  // downstream. If this is false or not set, Envoy will instead strip the\n  // \"Expect: 100-continue\" header, and send a \"100 Continue\" response itself.\n  bool proxy_100_continue = 18;\n\n  // If\n  // :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.use_remote_address>`\n  // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is\n  // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*.\n  // This is useful for testing compatibility of upstream services that parse the header value. For\n  // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses\n  // <https://tools.ietf.org/html/rfc4291#section-2.5.5.2>`_ for details. This will also affect the\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See\n  // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6\n  // <config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6>` for runtime\n  // control.\n  // [#not-implemented-hide:]\n  bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20;\n\n  repeated UpgradeConfig upgrade_configs = 23;\n\n  // Should paths be normalized according to RFC 3986 before any processing of\n  // requests by HTTP filters or routing? This affects the upstream *:path* header\n  // as well. For paths that fail this check, Envoy will respond with 400 to\n  // paths that are malformed. This defaults to false currently but will default\n  // true in the future. When not specified, this value may be overridden by the\n  // runtime variable\n  // :ref:`http_connection_manager.normalize_path<config_http_conn_man_runtime_normalize_path>`.\n  // See `Normalization and Comparison <https://tools.ietf.org/html/rfc3986#section-6>`_\n  // for details of normalization.\n  // Note that Envoy does not perform\n  // `case normalization <https://tools.ietf.org/html/rfc3986#section-6.2.2.1>`_\n  google.protobuf.BoolValue normalize_path = 30;\n\n  // Determines if adjacent slashes in the path are merged into one before any processing of\n  // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without\n  // setting this option, incoming requests with path `//dir///file` will not match against route\n  // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of\n  // `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool merge_slashes = 33;\n\n  // The configuration of the request ID extension. This includes operations such as\n  // generation, validation, and associated tracing operations.\n  //\n  // If not set, Envoy uses the default UUID-based behavior:\n  //\n  // 1. Request ID is propagated using *x-request-id* header.\n  //\n  // 2. Request ID is a universally unique identifier (UUID).\n  //\n  // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID.\n  RequestIDExtension request_id_extension = 36;\n\n  // The configuration to customize local reply returned by Envoy. It can customize status code,\n  // body text and response content type. If not specified, status code and text body are hard\n  // coded in Envoy, the response content type is plain text.\n  LocalReplyConfig local_reply_config = 38;\n\n  // Determines if the port part should be removed from host/authority header before any processing\n  // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's<envoy_api_field_config.listener.v4alpha.Listener.address>`\n  // local port and request method is not CONNECT. This affects the upstream host header as well.\n  // Without setting this option, incoming requests with host `example:443` will not match against\n  // route with :ref:`domains<envoy_api_field_config.route.v4alpha.VirtualHost.domains>` match set to `example`. Defaults to `false`. Note that port removal is not part\n  // of `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool strip_matching_host_port = 39;\n\n  // Governs Envoy's behavior when receiving invalid HTTP from downstream.\n  // If this option is false (default), Envoy will err on the conservative side handling HTTP\n  // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request.\n  // If this option is set to true, Envoy will be more permissive, only resetting the invalid\n  // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire\n  // request is read for HTTP/1.1)\n  // In general this should be true for deployments receiving trusted traffic (L2 Envoys,\n  // company-internal mesh) and false when receiving untrusted traffic (edge deployments).\n  //\n  // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are\n  // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message>` or the new HTTP/2 option\n  // :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>`\n  // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.stream_error_on_invalid_http_messaging>`\n  google.protobuf.BoolValue stream_error_on_invalid_http_message = 40;\n}\n\n// The configuration to customize local reply returned by Envoy.\nmessage LocalReplyConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig\";\n\n  // Configuration of list of mappers which allows to filter and change local response.\n  // The mappers will be checked by the specified order until one is matched.\n  repeated ResponseMapper mappers = 1;\n\n  // The configuration to form response body from the :ref:`command operators <config_access_log_command_operators>`\n  // and to specify response content type as one of: plain/text or application/json.\n  //\n  // Example one: \"plain/text\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n  //\n  // The following response body in \"plain/text\" format will be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: text\n  //\n  //   upstream connect error:503:path=/foo\n  //\n  // Example two: \"application/json\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   json_format:\n  //     status: \"%RESPONSE_CODE%\"\n  //     message: \"%LOCAL_REPLY_BODY%\"\n  //     path: \"%REQ(:path)%\"\n  //\n  // The following response body in \"application/json\" format would be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"status\": 503,\n  //    \"message\": \"upstream connection error\",\n  //    \"path\": \"/foo\"\n  //  }\n  //\n  config.core.v4alpha.SubstitutionFormatString body_format = 2;\n}\n\n// The configuration to filter and change local response.\n// [#next-free-field: 6]\nmessage ResponseMapper {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper\";\n\n  // Filter to determine if this mapper should apply.\n  config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}];\n\n  // The new response status code if specified.\n  google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n  // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%`\n  // command operator in the `body_format`.\n  config.core.v4alpha.DataSource body = 3;\n\n  // A per mapper `body_format` to override the :ref:`body_format <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format>`.\n  // It will be used when this mapper is matched.\n  config.core.v4alpha.SubstitutionFormatString body_format_override = 4;\n\n  // HTTP headers to add to a local reply. This allows the response mapper to append, to add\n  // or to override headers of any local reply before it is sent to a downstream client.\n  repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5\n      [(validate.rules).repeated = {max_items: 1000}];\n}\n\nmessage Rds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.Rds\";\n\n  // Configuration source specifier for RDS.\n  config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n  oneof name_specifier {\n    // The name of the route configuration. This name will be passed to the RDS\n    // API. This allows an Envoy configuration with multiple HTTP listeners (and\n    // associated HTTP connection manager filters) to use different route\n    // configurations.\n    string route_config_name = 2;\n\n    // Resource locator for RDS. This is mutually exclusive to *route_config_name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator rds_resource_locator = 3;\n  }\n}\n\n// This message is used to work around the limitations with 'oneof' and repeated fields.\nmessage ScopedRouteConfigurationsList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList\";\n\n  repeated config.route.v4alpha.ScopedRouteConfiguration scoped_route_configurations = 1\n      [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#next-free-field: 6]\nmessage ScopedRoutes {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes\";\n\n  // Specifies the mechanism for constructing \"scope keys\" based on HTTP request attributes. These\n  // keys are matched against a set of :ref:`Key<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration.Key>`\n  // objects assembled from :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration>`\n  // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via\n  // :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scoped_route_configurations_list>`.\n  //\n  // Upon receiving a request's headers, the Router will build a key using the algorithm specified\n  // by this message. This key will be used to look up the routing table (i.e., the\n  // :ref:`RouteConfiguration<envoy_api_msg_config.route.v4alpha.RouteConfiguration>`) to use for the request.\n  message ScopeKeyBuilder {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder\";\n\n    // Specifies the mechanism for constructing key fragments which are composed into scope keys.\n    message FragmentBuilder {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.\"\n          \"ScopeKeyBuilder.FragmentBuilder\";\n\n      // Specifies how the value of a header should be extracted.\n      // The following example maps the structure of a header to the fields in this message.\n      //\n      // .. code::\n      //\n      //              <0> <1>   <-- index\n      //    X-Header: a=b;c=d\n      //    |         || |\n      //    |         || \\----> <element_separator>\n      //    |         ||\n      //    |         |\\----> <element.separator>\n      //    |         |\n      //    |         \\----> <element.key>\n      //    |\n      //    \\----> <name>\n      //\n      //    Each 'a=b' key-value pair constitutes an 'element' of the header field.\n      message HeaderValueExtractor {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.\"\n            \"ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor\";\n\n        // Specifies a header field's key value pair to match on.\n        message KvElement {\n          option (udpa.annotations.versioning).previous_message_type =\n              \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.\"\n              \"ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement\";\n\n          // The separator between key and value (e.g., '=' separates 'k=v;...').\n          // If an element is an empty string, the element is ignored.\n          // If an element contains no separator, the whole element is parsed as key and the\n          // fragment value is an empty string.\n          // If there are multiple values for a matched key, the first value is returned.\n          string separator = 1 [(validate.rules).string = {min_len: 1}];\n\n          // The key to match on.\n          string key = 2 [(validate.rules).string = {min_len: 1}];\n        }\n\n        // The name of the header field to extract the value from.\n        string name = 1 [(validate.rules).string = {min_len: 1}];\n\n        // The element separator (e.g., ';' separates 'a;b;c;d').\n        // Default: empty string. This causes the entirety of the header field to be extracted.\n        // If this field is set to an empty string and 'index' is used in the oneof below, 'index'\n        // must be set to 0.\n        string element_separator = 2;\n\n        oneof extract_type {\n          // Specifies the zero based index of the element to extract.\n          // Note Envoy concatenates multiple values of the same header key into a comma separated\n          // string, the splitting always happens after the concatenation.\n          uint32 index = 3;\n\n          // Specifies the key value pair to extract the value from.\n          KvElement element = 4;\n        }\n      }\n\n      oneof type {\n        option (validate.required) = true;\n\n        // Specifies how a header field's value should be extracted.\n        HeaderValueExtractor header_value_extractor = 1;\n      }\n    }\n\n    // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the\n    // fragments of a :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration>`.\n    // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key.\n    repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the scoped routing configuration.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The algorithm to use for constructing a scope key for each request.\n  ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];\n\n  // Configuration source specifier for RDS.\n  // This config source is used to subscribe to RouteConfiguration resources specified in\n  // ScopedRouteConfiguration messages.\n  config.core.v4alpha.ConfigSource rds_config_source = 3\n      [(validate.rules).message = {required: true}];\n\n  oneof config_specifier {\n    option (validate.required) = true;\n\n    // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by\n    // matching a key constructed from the request's attributes according to the algorithm specified\n    // by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRouteConfigurationsList scoped_route_configurations_list = 4;\n\n    // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS\n    // API. A scope is assigned to a request by matching a key constructed from the request's\n    // attributes according to the algorithm specified by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRds scoped_rds = 5;\n  }\n}\n\nmessage ScopedRds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds\";\n\n  // Configuration source specifier for scoped RDS.\n  config.core.v4alpha.ConfigSource scoped_rds_config_source = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// [#next-free-field: 6]\nmessage HttpFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter\";\n\n  reserved 3, 2;\n\n  reserved \"config\";\n\n  // The name of the filter configuration. The name is used as a fallback to\n  // select an extension if the type of the configuration proto is not\n  // sufficient. It also serves as a resource name in ExtensionConfigDS.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof config_type {\n    // Filter specific configuration which depends on the filter being instantiated. See the supported\n    // filters for further documentation.\n    google.protobuf.Any typed_config = 4;\n\n    // Configuration source specifier for an extension configuration discovery service.\n    // In case of a failure and without the default configuration, the HTTP listener responds with code 500.\n    // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061).\n    config.core.v4alpha.ExtensionConfigSource config_discovery = 5;\n  }\n}\n\nmessage RequestIDExtension {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension\";\n\n  // Request ID extension specific configuration.\n  google.protobuf.Any typed_config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/kafka_broker/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/kafka_broker/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.kafka_broker.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.kafka_broker.v3\";\noption java_outer_classname = \"KafkaBrokerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Kafka Broker]\n// Kafka Broker :ref:`configuration overview <config_network_filters_kafka_broker>`.\n// [#extension: envoy.filters.network.kafka_broker]\n\nmessage KafkaBroker {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker\";\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_kafka_broker_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/local_ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/local_rate_limit/v2alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.local_ratelimit.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/token_bucket.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.local_ratelimit.v3\";\noption java_outer_classname = \"LocalRateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Local rate limit]\n// Local rate limit :ref:`configuration overview <config_network_filters_local_rate_limit>`.\n// [#extension: envoy.filters.network.local_ratelimit]\n\nmessage LocalRateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.local_rate_limit.v2alpha.LocalRateLimit\";\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_local_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The token bucket configuration to use for rate limiting connections that are processed by the\n  // filter's filter chain. Each incoming connection processed by the filter consumes a single\n  // token. If the token is available, the connection will be allowed. If no tokens are available,\n  // the connection will be immediately closed.\n  //\n  // .. note::\n  //   In the current implementation each filter and filter chain has an independent rate limit.\n  //\n  // .. note::\n  //   In the current implementation the token bucket's :ref:`fill_interval\n  //   <envoy_api_field_type.v3.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive\n  //   refills.\n  type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}];\n\n  // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults\n  // to enabled.\n  config.core.v3.RuntimeFeatureFlag runtime_enabled = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/mongo_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/mongo_proxy/v2:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.mongo_proxy.v3;\n\nimport \"envoy/extensions/filters/common/fault/v3/fault.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.mongo_proxy.v3\";\noption java_outer_classname = \"MongoProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Mongo proxy]\n// MongoDB :ref:`configuration overview <config_network_filters_mongo_proxy>`.\n// [#extension: envoy.filters.network.mongo_proxy]\n\nmessage MongoProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.mongo_proxy.v2.MongoProxy\";\n\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mongo_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The optional path to use for writing Mongo access logs. If not access log\n  // path is specified no access logs will be written. Note that access log is\n  // also gated :ref:`runtime <config_network_filters_mongo_proxy_runtime>`.\n  string access_log = 2;\n\n  // Inject a fixed delay before proxying a Mongo operation. Delays are\n  // applied to the following MongoDB operations: Query, Insert, GetMore,\n  // and KillCursors. Once an active delay is in progress, all incoming\n  // data up until the timer event fires will be a part of the delay.\n  common.fault.v3.FaultDelay delay = 3;\n\n  // Flag to specify whether :ref:`dynamic metadata\n  // <config_network_filters_mongo_proxy_dynamic_metadata>` should be emitted. Defaults to false.\n  bool emit_dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/mysql_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.mysql_proxy.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.mysql_proxy.v3\";\noption java_outer_classname = \"MysqlProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: MySQL proxy]\n// MySQL Proxy :ref:`configuration overview <config_network_filters_mysql_proxy>`.\n// [#extension: envoy.filters.network.mysql_proxy]\n\nmessage MySQLProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy\";\n\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mysql_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing MySQL access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.postgres_proxy.v3alpha;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.postgres_proxy.v3alpha\";\noption java_outer_classname = \"PostgresProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Postgres proxy]\n// Postgres Proxy :ref:`configuration overview\n// <config_network_filters_postgres_proxy>`.\n// [#extension: envoy.filters.network.postgres_proxy]\n\nmessage PostgresProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_postgres_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Controls whether SQL statements received in Frontend Query messages\n  // are parsed. Parsing is required to produce Postgres proxy filter\n  // metadata. Defaults to true.\n  google.protobuf.BoolValue enable_sql_parsing = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/rate_limit/v2:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"//envoy/extensions/common/ratelimit/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.ratelimit.v3;\n\nimport \"envoy/config/ratelimit/v3/rls.proto\";\nimport \"envoy/extensions/common/ratelimit/v3/ratelimit.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.ratelimit.v3\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_network_filters_rate_limit>`.\n// [#extension: envoy.filters.network.ratelimit]\n\n// [#next-free-field: 7]\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.rate_limit.v2.RateLimit\";\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The rate limit descriptor list to use in the rate limit service request.\n  repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 6\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rbac/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/rbac/v2:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rbac/v3/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rbac.v3;\n\nimport \"envoy/config/rbac/v3/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rbac.v3\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_network_filters_rbac>`.\n// [#extension: envoy.filters.network.rbac]\n\n// RBAC network filter config.\n//\n// Header should not be used in rules/shadow_rules in RBAC network filter as\n// this information is only available in :ref:`RBAC http filter <config_http_filters_rbac>`.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.rbac.v2.RBAC\";\n\n  enum EnforcementType {\n    // Apply RBAC policies when the first byte of data arrives on the connection.\n    ONE_TIME_ON_FIRST_BYTE = 0;\n\n    // Continuously apply RBAC policies as data arrives. Use this mode when\n    // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka,\n    // etc. when the protocol decoders emit dynamic metadata such as the\n    // resources being accessed and the operations on the resources.\n    CONTINUOUS = 1;\n  }\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v3.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter but will emit stats and logs\n  // and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v3.RBAC shadow_rules = 2;\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 3 [(validate.rules).string = {min_len: 1}];\n\n  // RBAC enforcement strategy. By default RBAC will be enforced only once\n  // when the first byte of data arrives from the downstream. When used in\n  // conjunction with filters that emit dynamic metadata after decoding\n  // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to\n  // CONTINUOUS to enforce RBAC policies on every message boundary.\n  EnforcementType enforcement_type = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rbac/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rbac.v4alpha;\n\nimport \"envoy/config/rbac/v4alpha/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rbac.v4alpha\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_network_filters_rbac>`.\n// [#extension: envoy.filters.network.rbac]\n\n// RBAC network filter config.\n//\n// Header should not be used in rules/shadow_rules in RBAC network filter as\n// this information is only available in :ref:`RBAC http filter <config_http_filters_rbac>`.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rbac.v3.RBAC\";\n\n  enum EnforcementType {\n    // Apply RBAC policies when the first byte of data arrives on the connection.\n    ONE_TIME_ON_FIRST_BYTE = 0;\n\n    // Continuously apply RBAC policies as data arrives. Use this mode when\n    // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka,\n    // etc. when the protocol decoders emit dynamic metadata such as the\n    // resources being accessed and the operations on the resources.\n    CONTINUOUS = 1;\n  }\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter but will emit stats and logs\n  // and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC shadow_rules = 2;\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 3 [(validate.rules).string = {min_len: 1}];\n\n  // RBAC enforcement strategy. By default RBAC will be enforced only once\n  // when the first byte of data arrives from the downstream. When used in\n  // conjunction with filters that emit dynamic metadata after decoding\n  // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to\n  // CONTINUOUS to enforce RBAC policies on every message boundary.\n  EnforcementType enforcement_type = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/redis_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/redis_proxy/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.redis_proxy.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.redis_proxy.v3\";\noption java_outer_classname = \"RedisProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Redis Proxy]\n// Redis Proxy :ref:`configuration overview <config_network_filters_redis_proxy>`.\n// [#extension: envoy.filters.network.redis_proxy]\n\n// [#next-free-field: 9]\nmessage RedisProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.redis_proxy.v2.RedisProxy\";\n\n  // Redis connection pool settings.\n  // [#next-free-field: 9]\n  message ConnPoolSettings {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings\";\n\n    // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently\n    // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data\n    // because replication is asynchronous and requires some delay. You need to ensure that your\n    // application can tolerate stale data.\n    enum ReadPolicy {\n      // Default mode. Read from the current primary node.\n      MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = \"PRIMARY\"];\n\n      // Read from the primary, but if it is unavailable, read from replica nodes.\n      PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = \"PREFER_PRIMARY\"];\n\n      // Read from replica nodes. If multiple replica nodes are present within a shard, a random\n      // node is selected. Healthy nodes have precedent over unhealthy nodes.\n      REPLICA = 2;\n\n      // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not\n      // present or unhealthy), read from the primary.\n      PREFER_REPLICA = 3;\n\n      // Read from any node of the cluster. A random node is selected among the primary and\n      // replicas, healthy nodes have precedent over unhealthy nodes.\n      ANY = 4;\n    }\n\n    // Per-operation timeout in milliseconds. The timer starts when the first\n    // command of a pipeline is written to the backend connection. Each response received from Redis\n    // resets the timer since it signifies that the next command is being processed by the backend.\n    // The only exception to this behavior is when a connection to a backend is not yet established.\n    // In that case, the connect timeout on the cluster will govern the timeout until the connection\n    // is ready.\n    google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}];\n\n    // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be\n    // forwarded to the same upstream. The hash key used for determining the upstream in a\n    // consistent hash ring configuration will be computed from the hash tagged key instead of the\n    // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster\n    // implementation <https://redis.io/topics/cluster-spec#keys-hash-tags>`_.\n    //\n    // Examples:\n    //\n    // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream\n    // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream\n    bool enable_hashtagging = 2;\n\n    // Accept `moved and ask redirection\n    // <https://redis.io/topics/cluster-spec#redirection-and-resharding>`_ errors from upstream\n    // redis servers, and retry commands to the specified target server. The target server does not\n    // need to be known to the cluster manager. If the command cannot be redirected, then the\n    // original error is passed downstream unchanged. By default, this support is not enabled.\n    bool enable_redirection = 3;\n\n    // Maximum size of encoded request buffer before flush is triggered and encoded requests\n    // are sent upstream. If this is unset, the buffer flushes whenever it receives data\n    // and performs no batching.\n    // This feature makes it possible for multiple clients to send requests to Envoy and have\n    // them batched- for example if one is running several worker processes, each with its own\n    // Redis connection. There is no benefit to using this with a single downstream process.\n    // Recommended size (if enabled) is 1024 bytes.\n    uint32 max_buffer_size_before_flush = 4;\n\n    // The encoded request buffer is flushed N milliseconds after the first request has been\n    // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`.\n    // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise,\n    // the timer should be set according to the number of clients, overall request rate and\n    // desired maximum latency for a single command. For example, if there are many requests\n    // being batched together at a high rate, the buffer will likely be filled before the timer\n    // fires. Alternatively, if the request rate is lower the buffer will not be filled as often\n    // before the timer fires.\n    // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter\n    // defaults to 3ms.\n    google.protobuf.Duration buffer_flush_timeout = 5;\n\n    // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts\n    // can be created at any given time by any given worker thread (see `enable_redirection` for\n    // more details). If the host is unknown and a connection cannot be created due to enforcing\n    // this limit, then redirection will fail and the original redirection error will be passed\n    // downstream unchanged. This limit defaults to 100.\n    google.protobuf.UInt32Value max_upstream_unknown_connections = 6;\n\n    // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate\n    // count. These commands are measured in microseconds.\n    bool enable_command_stats = 8;\n\n    // Read policy. The default is to read from the primary.\n    ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  message PrefixRoutes {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes\";\n\n    message Route {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route\";\n\n      // The router is capable of shadowing traffic from one cluster to another. The current\n      // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n      // respond before returning the response from the primary cluster. All normal statistics are\n      // collected for the shadow cluster making this feature useful for testing.\n      message RequestMirrorPolicy {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route.\"\n            \"RequestMirrorPolicy\";\n\n        // Specifies the cluster that requests will be mirrored to. The cluster must\n        // exist in the cluster manager configuration.\n        string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n        // If not specified or the runtime key is not present, all requests to the target cluster\n        // will be mirrored.\n        //\n        // If specified, Envoy will lookup the runtime key to get the percentage of requests to the\n        // mirror.\n        config.core.v3.RuntimeFractionalPercent runtime_fraction = 2;\n\n        // Set this to TRUE to only mirror write commands, this is effectively replicating the\n        // writes in a \"fire and forget\" manner.\n        bool exclude_read_commands = 3;\n      }\n\n      // String prefix that must match the beginning of the keys. Envoy will always favor the\n      // longest match.\n      string prefix = 1 [(validate.rules).string = {max_bytes: 1000}];\n\n      // Indicates if the prefix needs to be removed from the key when forwarded.\n      bool remove_prefix = 2;\n\n      // Upstream cluster to forward the command to.\n      string cluster = 3 [(validate.rules).string = {min_len: 1}];\n\n      // Indicates that the route has a request mirroring policy.\n      repeated RequestMirrorPolicy request_mirror_policy = 4;\n    }\n\n    reserved 3;\n\n    reserved \"catch_all_cluster\";\n\n    // List of prefix routes.\n    repeated Route routes = 1;\n\n    // Indicates that prefix matching should be case insensitive.\n    bool case_insensitive = 2;\n\n    // Optional catch-all route to forward commands that doesn't match any of the routes. The\n    // catch-all route becomes required when no routes are specified.\n    Route catch_all_route = 4;\n  }\n\n  // RedisFault defines faults used for fault injection.\n  message RedisFault {\n    enum RedisFaultType {\n      // Delays requests. This is the base fault; other faults can have delays added.\n      DELAY = 0;\n\n      // Returns errors on requests.\n      ERROR = 1;\n    }\n\n    // Fault type.\n    RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Percentage of requests fault applies to.\n    config.core.v3.RuntimeFractionalPercent fault_enabled = 2\n        [(validate.rules).message = {required: true}];\n\n    // Delay for all faults. If not set, defaults to zero\n    google.protobuf.Duration delay = 3;\n\n    // Commands fault is restricted to, if any. If not set, fault applies to all commands\n    // other than auth and ping (due to special handling of those commands in Envoy).\n    repeated string commands = 4;\n  }\n\n  reserved 2;\n\n  reserved \"cluster\";\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_redis_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Network settings for the connection pool to the upstream clusters.\n  ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}];\n\n  // Indicates that latency stat should be computed in microseconds. By default it is computed in\n  // milliseconds. This does not apply to upstream command stats currently.\n  bool latency_in_micros = 4;\n\n  // List of **unique** prefixes used to separate keys from different workloads to different\n  // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all\n  // cluster can be used to forward commands when there is no match. Time complexity of the\n  // lookups are in O(min(longest key prefix, key length)).\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    prefix_routes:\n  //      routes:\n  //        - prefix: \"ab\"\n  //          cluster: \"cluster_a\"\n  //        - prefix: \"abc\"\n  //          cluster: \"cluster_b\"\n  //\n  // When using the above routes, the following prefixes would be sent to:\n  //\n  // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b.\n  // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a.\n  // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all\n  //   route<envoy_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.PrefixRoutes.catch_all_route>`\n  //   would have retrieved the key from that cluster instead.\n  //\n  // See the :ref:`configuration section\n  // <arch_overview_redis_configuration>` of the architecture overview for recommendations on\n  // configuring the backing clusters.\n  PrefixRoutes prefix_routes = 5;\n\n  // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis\n  // AUTH command <https://redis.io/commands/auth>`_ with this password before enabling any other\n  // command. If an AUTH command's password matches this password, an \"OK\" response will be returned\n  // to the client. If the AUTH command password does not match this password, then an \"ERR invalid\n  // password\" error will be returned. If any other command is received before AUTH when this\n  // password is set, then a \"NOAUTH Authentication required.\" error response will be sent to the\n  // client. If an AUTH command is received when the password is not set, then an \"ERR Client sent\n  // AUTH, but no password is set\" error will be returned.\n  config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true];\n\n  // List of faults to inject. Faults currently come in two flavors:\n  // - Delay, which delays a request.\n  // - Error, which responds to a request with an error. Errors can also have delays attached.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    faults:\n  //    - fault_type: ERROR\n  //      fault_enabled:\n  //        default_value:\n  //          numerator: 10\n  //          denominator: HUNDRED\n  //        runtime_key: \"bogus_key\"\n  //        commands:\n  //        - GET\n  //      - fault_type: DELAY\n  //        fault_enabled:\n  //          default_value:\n  //            numerator: 10\n  //            denominator: HUNDRED\n  //          runtime_key: \"bogus_key\"\n  //        delay: 2s\n  //\n  // See the :ref:`fault injection section\n  // <config_network_filters_redis_proxy_fault_injection>` for more information on how to configure this.\n  repeated RedisFault faults = 8;\n\n  // If a username is provided an ACL style AUTH command will be required with a username and password.\n  // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis\n  // AUTH command <https://redis.io/commands/auth>`_ with this username and the *downstream_auth_password*\n  // before enabling any other command. If an AUTH command's username and password matches this username\n  // and the *downstream_auth_password* , an \"OK\" response will be returned to the client. If the AUTH\n  // command username or password does not match this username or the *downstream_auth_password*, then an\n  // \"WRONGPASS invalid username-password pair\" error will be returned. If any other command is received before AUTH when this\n  // password is set, then a \"NOAUTH Authentication required.\" error response will be sent to the\n  // client. If an AUTH command is received when the password is not set, then an \"ERR Client sent\n  // AUTH, but no ACL is set\" error will be returned.\n  config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true];\n}\n\n// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in\n// :ref:`typed_extension_protocol_options<envoy_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.redis_proxy`.\nmessage RedisProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions\";\n\n  // Upstream server password as defined by the `requirepass` directive\n  // <https://redis.io/topics/config>`_ in the server's configuration file.\n  config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true];\n\n  // Upstream server username as defined by the `user` directive\n  // <https://redis.io/topics/acl>`_ in the server's configuration file.\n  config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md",
    "content": "Protocol buffer definitions for the Rocketmq proxy."
  },
  {
    "path": "api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v3;\n\nimport \"envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3\";\noption java_outer_classname = \"RocketmqProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RocketMQ Proxy]\n// RocketMQ Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n// [#extension: envoy.filters.network.rocketmq_proxy]\n\nmessage RocketmqProxy {\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is specified in this property.\n  RouteConfiguration route_config = 2;\n\n  // The largest duration transient object expected to live, more than 10s is recommended.\n  google.protobuf.Duration transient_object_life_span = 3;\n\n  // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting\n  // facility without considering backward compatibility of exiting RocketMQ client SDK.\n  bool develop_mode = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rocketmq Proxy Route Configuration]\n// Rocketmq Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n\nmessage RouteConfiguration {\n  // The name of the route configuration.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  // The name of the topic.\n  type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}];\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v3.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  // Indicates the upstream cluster to which the request should be routed.\n  string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer.\n  config.core.v3.Metadata metadata_match = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v4alpha;\n\nimport \"envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha\";\noption java_outer_classname = \"RocketmqProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: RocketMQ Proxy]\n// RocketMQ Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n// [#extension: envoy.filters.network.rocketmq_proxy]\n\nmessage RocketmqProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\";\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is specified in this property.\n  RouteConfiguration route_config = 2;\n\n  // The largest duration transient object expected to live, more than 10s is recommended.\n  google.protobuf.Duration transient_object_life_span = 3;\n\n  // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting\n  // facility without considering backward compatibility of exiting RocketMQ client SDK.\n  bool develop_mode = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Rocketmq Proxy Route Configuration]\n// Rocketmq Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration\";\n\n  // The name of the route configuration.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch\";\n\n  // The name of the topic.\n  type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}];\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v4alpha.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction\";\n\n  // Indicates the upstream cluster to which the request should be routed.\n  string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer.\n  config.core.v4alpha.Metadata metadata_match = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/sni_cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/sni_cluster/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.sni_cluster.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.sni_cluster.v3\";\noption java_outer_classname = \"SniClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SNI Cluster Filter]\n// Set the upstream cluster name from the SNI field in the TLS connection.\n// [#extension: envoy.filters.network.sni_cluster]\n\nmessage SniCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.sni_cluster.v2.SniCluster\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha;\n\nimport \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha\";\noption java_outer_classname = \"SniDynamicForwardProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SNI dynamic forward proxy]\n\n// Configuration for the SNI-based dynamic forward proxy filter. See the\n// :ref:`architecture overview <arch_overview_http_dynamic_forward_proxy>` for\n// more information. Note this filter must be configured along with\n// :ref:`TLS inspector listener filter <config_listener_filters_tls_inspector>`\n// to work.\n// [#extension: envoy.filters.network.sni_dynamic_forward_proxy]\nmessage FilterConfig {\n  // The DNS cache configuration that the filter will attach to. Note this\n  // configuration must match that of associated :ref:`dynamic forward proxy\n  // cluster configuration\n  // <envoy_api_field_extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n\n  oneof port_specifier {\n    // The port number to connect to the upstream.\n    uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/tcp_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/tcp_proxy/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.tcp_proxy.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/hash_policy.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v3\";\noption java_outer_classname = \"TcpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: TCP Proxy]\n// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.\n// [#extension: envoy.filters.network.tcp_proxy]\n\n// [#next-free-field: 14]\nmessage TcpProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy\";\n\n  // Allows for specification of multiple upstream clusters along with weights\n  // that indicate the percentage of traffic to be forwarded to each cluster.\n  // The router selects an upstream cluster based on these weights.\n  message WeightedCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster\";\n\n    message ClusterWeight {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight\";\n\n      // Name of the upstream cluster.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // When a request matches the route, the choice of an upstream cluster is\n      // determined by its weight. The sum of weights across all entries in the\n      // clusters array determines the total weight.\n      uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n      // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n      // in the upstream cluster with metadata matching what is set in this field will be considered\n      // for load balancing. Note that this will be merged with what's provided in\n      // :ref:`TcpProxy.metadata_match\n      // <envoy_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.metadata_match>`, with values\n      // here taking precedence. The filter name should be specified as *envoy.lb*.\n      config.core.v3.Metadata metadata_match = 3;\n    }\n\n    // Specifies one or more upstream clusters associated with the route.\n    repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Configuration for tunneling TCP over other transports or application layers.\n  // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will\n  // remain the default.\n  message TunnelingConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig\";\n\n    // The hostname to send in the synthesized CONNECT headers to the upstream proxy.\n    string hostname = 1 [(validate.rules).string = {min_len: 1}];\n  }\n\n  reserved 6;\n\n  reserved \"deprecated_v1\";\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_tcp_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 10;\n  }\n\n  // Optional endpoint metadata match criteria. Only endpoints in the upstream\n  // cluster with metadata matching that set in metadata_match will be\n  // considered. The filter name should be specified as *envoy.lb*.\n  config.core.v3.Metadata metadata_match = 9;\n\n  // The idle timeout for connections managed by the TCP proxy filter. The idle timeout\n  // is defined as the period in which there are no bytes sent or received on either\n  // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set\n  // to 0s, the timeout will be disabled.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 8;\n\n  // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy\n  // filter. The idle timeout is defined as the period in which there is no\n  // active traffic. If not set, there is no idle timeout. When the idle timeout\n  // is reached the connection will be closed. The distinction between\n  // downstream_idle_timeout/upstream_idle_timeout provides a means to set\n  // timeout based on the last byte sent on the downstream/upstream connection.\n  google.protobuf.Duration downstream_idle_timeout = 3;\n\n  // [#not-implemented-hide:]\n  google.protobuf.Duration upstream_idle_timeout = 4;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by the this tcp_proxy.\n  repeated config.accesslog.v3.AccessLog access_log = 5;\n\n  // The maximum number of unsuccessful connection attempts that will be made before\n  // giving up. If the parameter is not specified, 1 connection attempt will be made.\n  google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}];\n\n  // [#not-implemented-hide:] feature in progress\n  // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP\n  // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload\n  // will be proxied upstream as per usual.\n  TunnelingConfig tunneling_config = 12;\n\n  // The maximum duration of a connection. The duration is defined as the period since a connection\n  // was established. If not set, there is no max duration. When max_downstream_connection_duration\n  // is reached the connection will be closed. Duration must be at least 1ms.\n  google.protobuf.Duration max_downstream_connection_duration = 13\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/tcp_proxy/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.tcp_proxy.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/v3/hash_policy.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha\";\noption java_outer_classname = \"TcpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: TCP Proxy]\n// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.\n// [#extension: envoy.filters.network.tcp_proxy]\n\n// [#next-free-field: 14]\nmessage TcpProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\";\n\n  // Allows for specification of multiple upstream clusters along with weights\n  // that indicate the percentage of traffic to be forwarded to each cluster.\n  // The router selects an upstream cluster based on these weights.\n  message WeightedCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster\";\n\n    message ClusterWeight {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight\";\n\n      // Name of the upstream cluster.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // When a request matches the route, the choice of an upstream cluster is\n      // determined by its weight. The sum of weights across all entries in the\n      // clusters array determines the total weight.\n      uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n      // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n      // in the upstream cluster with metadata matching what is set in this field will be considered\n      // for load balancing. Note that this will be merged with what's provided in\n      // :ref:`TcpProxy.metadata_match\n      // <envoy_api_field_extensions.filters.network.tcp_proxy.v4alpha.TcpProxy.metadata_match>`, with values\n      // here taking precedence. The filter name should be specified as *envoy.lb*.\n      config.core.v4alpha.Metadata metadata_match = 3;\n    }\n\n    // Specifies one or more upstream clusters associated with the route.\n    repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Configuration for tunneling TCP over other transports or application layers.\n  // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will\n  // remain the default.\n  message TunnelingConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig\";\n\n    // The hostname to send in the synthesized CONNECT headers to the upstream proxy.\n    string hostname = 1 [(validate.rules).string = {min_len: 1}];\n  }\n\n  reserved 6;\n\n  reserved \"deprecated_v1\";\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_tcp_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 10;\n  }\n\n  // Optional endpoint metadata match criteria. Only endpoints in the upstream\n  // cluster with metadata matching that set in metadata_match will be\n  // considered. The filter name should be specified as *envoy.lb*.\n  config.core.v4alpha.Metadata metadata_match = 9;\n\n  // The idle timeout for connections managed by the TCP proxy filter. The idle timeout\n  // is defined as the period in which there are no bytes sent or received on either\n  // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set\n  // to 0s, the timeout will be disabled.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 8;\n\n  // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy\n  // filter. The idle timeout is defined as the period in which there is no\n  // active traffic. If not set, there is no idle timeout. When the idle timeout\n  // is reached the connection will be closed. The distinction between\n  // downstream_idle_timeout/upstream_idle_timeout provides a means to set\n  // timeout based on the last byte sent on the downstream/upstream connection.\n  google.protobuf.Duration downstream_idle_timeout = 3;\n\n  // [#not-implemented-hide:]\n  google.protobuf.Duration upstream_idle_timeout = 4;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by the this tcp_proxy.\n  repeated config.accesslog.v4alpha.AccessLog access_log = 5;\n\n  // The maximum number of unsuccessful connection attempts that will be made before\n  // giving up. If the parameter is not specified, 1 connection attempt will be made.\n  google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}];\n\n  // [#not-implemented-hide:] feature in progress\n  // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP\n  // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload\n  // will be proxied upstream as per usual.\n  TunnelingConfig tunneling_config = 12;\n\n  // The maximum duration of a connection. The duration is defined as the period since a connection\n  // was established. If not set, there is no max duration. When max_downstream_connection_duration\n  // is reached the connection will be closed. Duration must be at least 1ms.\n  google.protobuf.Duration max_downstream_connection_duration = 13\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3;\n\nimport \"envoy/config/ratelimit/v3/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_thrift_filters_rate_limit>`.\n// [#extension: envoy.filters.thrift.ratelimit]\n\n// [#next-free-field: 6]\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit\";\n\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specifies the rate limit configuration stage. Each configured rate limit filter performs a\n  // rate limit check using descriptors configured in the\n  // :ref:`envoy_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` for the request.\n  // Only those entries with a matching stage number are used for a given filter. If not set, the\n  // default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 3;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 4;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 5\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/v3/README.md",
    "content": "Protocol buffer definitions for the Thrift proxy.\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Thrift Proxy Route Configuration]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch\";\n\n  oneof match_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route must exactly match the request method name. As a special case, an\n    // empty string matches any request method name.\n    string method_name = 1;\n\n    // If specified, the route must have the service name as the request method name prefix. As a\n    // special case, an empty string matches any service name. Only relevant when service\n    // multiplexing.\n    string service_name = 2;\n  }\n\n  // Inverts whatever matching is done in the :ref:`method_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.method_name>` or\n  // :ref:`service_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.service_name>` fields.\n  // Cannot be combined with wildcard matching as that would result in routes never being matched.\n  //\n  // .. note::\n  //\n  //   This does not invert matching done as part of the :ref:`headers field\n  //   <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.headers>` field. To\n  //   invert header matching, see :ref:`invert_match\n  //   <envoy_api_field_config.route.v3.HeaderMatcher.invert_match>`.\n  bool invert = 3;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config). Note that this only applies for Thrift transports and/or\n  // protocols that support headers.\n  repeated config.route.v3.HeaderMatcher headers = 4;\n}\n\n// [#next-free-field: 7]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates a single upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 2;\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // Thrift header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist Envoy will\n    // respond with an unknown method exception or an internal error exception,\n    // respectively.\n    string cluster_header = 6\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n  // the upstream cluster with metadata matching what is set in this field will be considered.\n  // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight.metadata_match>`,\n  // with values there taking precedence. Keys and values should be provided under the \"envoy.lb\"\n  // metadata key.\n  config.core.v3.Metadata metadata_match = 3;\n\n  // Specifies a set of rate limit configurations that could be applied to the route.\n  // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders\n  // action with the header name \":method-name\".\n  repeated config.route.v3.RateLimit rate_limits = 4;\n\n  // Strip the service prefix from the method name, if there's a prefix. For\n  // example, the method call Service:method would end up being just method.\n  bool strip_service_name = 5;\n}\n\n// Allows for specification of multiple upstream clusters along with weights that indicate the\n// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster\n// based on these weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster\";\n\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight\";\n\n    // Name of the upstream cluster.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // When a request matches the route, the choice of an upstream cluster is determined by its\n    // weight. The sum of weights across all entries in the clusters array determines the total\n    // weight.\n    google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field, combined with what's\n    // provided in :ref:`RouteAction's metadata_match\n    // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteAction.metadata_match>`,\n    // will be considered. Values here will take precedence. Keys and values should be provided\n    // under the \"envoy.lb\" metadata key.\n    config.core.v3.Metadata metadata_match = 3;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v3;\n\nimport \"envoy/extensions/filters/network/thrift_proxy/v3/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3\";\noption java_outer_classname = \"ThriftProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Thrift Proxy]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n// [#extension: envoy.filters.network.thrift_proxy]\n\n// Thrift transport types supported by Envoy.\nenum TransportType {\n  // For downstream connections, the Thrift proxy will attempt to determine which transport to use.\n  // For upstream connections, the Thrift proxy will use same transport as the downstream\n  // connection.\n  AUTO_TRANSPORT = 0;\n\n  // The Thrift proxy will use the Thrift framed transport.\n  FRAMED = 1;\n\n  // The Thrift proxy will use the Thrift unframed transport.\n  UNFRAMED = 2;\n\n  // The Thrift proxy will assume the client is using the Thrift header transport.\n  HEADER = 3;\n}\n\n// Thrift Protocol types supported by Envoy.\nenum ProtocolType {\n  // For downstream connections, the Thrift proxy will attempt to determine which protocol to use.\n  // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol\n  // detection. For upstream connections, the Thrift proxy will use the same protocol as the\n  // downstream connection.\n  AUTO_PROTOCOL = 0;\n\n  // The Thrift proxy will use the Thrift binary protocol.\n  BINARY = 1;\n\n  // The Thrift proxy will use Thrift non-strict binary protocol.\n  LAX_BINARY = 2;\n\n  // The Thrift proxy will use the Thrift compact protocol.\n  COMPACT = 3;\n\n  // The Thrift proxy will use the Thrift \"Twitter\" protocol implemented by the finagle library.\n  TWITTER = 4;\n}\n\n// [#next-free-field: 6]\nmessage ThriftProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy\";\n\n  // Supplies the type of transport that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.TransportType.AUTO_TRANSPORT>`.\n  TransportType transport = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.ProtocolType.AUTO_PROTOCOL>`.\n  ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  RouteConfiguration route_config = 4;\n\n  // A list of individual Thrift filters that make up the filter chain for requests made to the\n  // Thrift proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no thrift_filters are specified, a default Thrift router filter\n  // (`envoy.filters.thrift.router`) is used.\n  repeated ThriftFilter thrift_filters = 5;\n}\n\n// ThriftFilter configures a Thrift filter.\nmessage ThriftFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter. The built-in filters are:\n  //\n  // [#comment:TODO(zuercher): Auto generate the following list]\n  // * :ref:`envoy.filters.thrift.router <config_thrift_filters_router>`\n  // * :ref:`envoy.filters.thrift.rate_limit <config_thrift_filters_rate_limit>`\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in\n// in\n// :ref:`typed_extension_protocol_options<envoy_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.thrift_proxy`.\nmessage ThriftProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProtocolOptions\";\n\n  // Supplies the type of transport that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.TransportType.AUTO_TRANSPORT>`,\n  // which is the default, causes the proxy to use the same transport as the downstream connection.\n  TransportType transport = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.ProtocolType.AUTO_PROTOCOL>`,\n  // which is the default, causes the proxy to use the same protocol as the downstream connection.\n  ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Thrift Proxy Route Configuration]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch\";\n\n  oneof match_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route must exactly match the request method name. As a special case, an\n    // empty string matches any request method name.\n    string method_name = 1;\n\n    // If specified, the route must have the service name as the request method name prefix. As a\n    // special case, an empty string matches any service name. Only relevant when service\n    // multiplexing.\n    string service_name = 2;\n  }\n\n  // Inverts whatever matching is done in the :ref:`method_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteMatch.method_name>` or\n  // :ref:`service_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteMatch.service_name>` fields.\n  // Cannot be combined with wildcard matching as that would result in routes never being matched.\n  //\n  // .. note::\n  //\n  //   This does not invert matching done as part of the :ref:`headers field\n  //   <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteMatch.headers>` field. To\n  //   invert header matching, see :ref:`invert_match\n  //   <envoy_api_field_config.route.v4alpha.HeaderMatcher.invert_match>`.\n  bool invert = 3;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config). Note that this only applies for Thrift transports and/or\n  // protocols that support headers.\n  repeated config.route.v4alpha.HeaderMatcher headers = 4;\n}\n\n// [#next-free-field: 7]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates a single upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 2;\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // Thrift header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist Envoy will\n    // respond with an unknown method exception or an internal error exception,\n    // respectively.\n    string cluster_header = 6\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n  // the upstream cluster with metadata matching what is set in this field will be considered.\n  // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.WeightedCluster.ClusterWeight.metadata_match>`,\n  // with values there taking precedence. Keys and values should be provided under the \"envoy.lb\"\n  // metadata key.\n  config.core.v4alpha.Metadata metadata_match = 3;\n\n  // Specifies a set of rate limit configurations that could be applied to the route.\n  // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders\n  // action with the header name \":method-name\".\n  repeated config.route.v4alpha.RateLimit rate_limits = 4;\n\n  // Strip the service prefix from the method name, if there's a prefix. For\n  // example, the method call Service:method would end up being just method.\n  bool strip_service_name = 5;\n}\n\n// Allows for specification of multiple upstream clusters along with weights that indicate the\n// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster\n// based on these weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster\";\n\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight\";\n\n    // Name of the upstream cluster.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // When a request matches the route, the choice of an upstream cluster is determined by its\n    // weight. The sum of weights across all entries in the clusters array determines the total\n    // weight.\n    google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field, combined with what's\n    // provided in :ref:`RouteAction's metadata_match\n    // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteAction.metadata_match>`,\n    // will be considered. Values here will take precedence. Keys and values should be provided\n    // under the \"envoy.lb\" metadata key.\n    config.core.v4alpha.Metadata metadata_match = 3;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v4alpha;\n\nimport \"envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha\";\noption java_outer_classname = \"ThriftProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Thrift Proxy]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n// [#extension: envoy.filters.network.thrift_proxy]\n\n// Thrift transport types supported by Envoy.\nenum TransportType {\n  // For downstream connections, the Thrift proxy will attempt to determine which transport to use.\n  // For upstream connections, the Thrift proxy will use same transport as the downstream\n  // connection.\n  AUTO_TRANSPORT = 0;\n\n  // The Thrift proxy will use the Thrift framed transport.\n  FRAMED = 1;\n\n  // The Thrift proxy will use the Thrift unframed transport.\n  UNFRAMED = 2;\n\n  // The Thrift proxy will assume the client is using the Thrift header transport.\n  HEADER = 3;\n}\n\n// Thrift Protocol types supported by Envoy.\nenum ProtocolType {\n  // For downstream connections, the Thrift proxy will attempt to determine which protocol to use.\n  // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol\n  // detection. For upstream connections, the Thrift proxy will use the same protocol as the\n  // downstream connection.\n  AUTO_PROTOCOL = 0;\n\n  // The Thrift proxy will use the Thrift binary protocol.\n  BINARY = 1;\n\n  // The Thrift proxy will use Thrift non-strict binary protocol.\n  LAX_BINARY = 2;\n\n  // The Thrift proxy will use the Thrift compact protocol.\n  COMPACT = 3;\n\n  // The Thrift proxy will use the Thrift \"Twitter\" protocol implemented by the finagle library.\n  TWITTER = 4;\n}\n\n// [#next-free-field: 6]\nmessage ThriftProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\";\n\n  // Supplies the type of transport that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.TransportType.AUTO_TRANSPORT>`.\n  TransportType transport = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.ProtocolType.AUTO_PROTOCOL>`.\n  ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  RouteConfiguration route_config = 4;\n\n  // A list of individual Thrift filters that make up the filter chain for requests made to the\n  // Thrift proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no thrift_filters are specified, a default Thrift router filter\n  // (`envoy.filters.thrift.router`) is used.\n  repeated ThriftFilter thrift_filters = 5;\n}\n\n// ThriftFilter configures a Thrift filter.\nmessage ThriftFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter. The built-in filters are:\n  //\n  // [#comment:TODO(zuercher): Auto generate the following list]\n  // * :ref:`envoy.filters.thrift.router <config_thrift_filters_router>`\n  // * :ref:`envoy.filters.thrift.rate_limit <config_thrift_filters_rate_limit>`\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in\n// in\n// :ref:`typed_extension_protocol_options<envoy_api_field_config.cluster.v4alpha.Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.thrift_proxy`.\nmessage ThriftProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions\";\n\n  // Supplies the type of transport that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.TransportType.AUTO_TRANSPORT>`,\n  // which is the default, causes the proxy to use the same transport as the downstream connection.\n  TransportType transport = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.ProtocolType.AUTO_PROTOCOL>`,\n  // which is the default, causes the proxy to use the same protocol as the downstream connection.\n  ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// [#extension: envoy.filters.network.wasm]\n// Wasm :ref:`configuration overview <config_network_filters_wasm>`.\n\nmessage Wasm {\n  // General Plugin configuration.\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.zookeeper_proxy.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.zookeeper_proxy.v3\";\noption java_outer_classname = \"ZookeeperProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: ZooKeeper proxy]\n// ZooKeeper Proxy :ref:`configuration overview <config_network_filters_zookeeper_proxy>`.\n// [#extension: envoy.filters.network.zookeeper_proxy]\n\nmessage ZooKeeperProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.zookeeper_proxy.v1alpha1.ZooKeeperProxy\";\n\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_zookeeper_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n\n  // Messages — requests, responses and events — that are bigger than this value will\n  // be ignored. If it is not set, the default value is 1Mb.\n  //\n  // The value here should match the jute.maxbuffer property in your cluster configuration:\n  //\n  // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options\n  //\n  // if that is set. If it isn't, ZooKeeper's default is also 1Mb.\n  google.protobuf.UInt32Value max_packet_bytes = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/dns/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.udp.dns_filter.v3alpha;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/dns/v3/dns_table.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha\";\noption java_outer_classname = \"DnsFilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: DNS Filter]\n// DNS Filter :ref:`configuration overview <config_udp_listener_filters_dns_filter>`.\n// [#extension: envoy.filters.udp_listener.dns_filter]\n\n// Configuration for the DNS filter.\nmessage DnsFilterConfig {\n  // This message contains the configuration for the DNS Filter operating\n  // in a server context. This message will contain the virtual hosts and\n  // associated addresses with which Envoy will respond to queries\n  message ServerContextConfig {\n    oneof config_source {\n      option (validate.required) = true;\n\n      // Load the configuration specified from the control plane\n      data.dns.v3.DnsTable inline_dns_table = 1;\n\n      // Seed the filter configuration from an external path. This source\n      // is a yaml formatted file that contains the DnsTable driving Envoy's\n      // responses to DNS queries\n      config.core.v3.DataSource external_dns_table = 2;\n    }\n  }\n\n  // This message contains the configuration for the DNS Filter operating\n  // in a client context. This message will contain the timeouts, retry,\n  // and forwarding configuration for Envoy to make DNS requests to other\n  // resolvers\n  message ClientContextConfig {\n    // Sets the maximum time we will wait for the upstream query to complete\n    // We allow 5s for the upstream resolution to complete, so the minimum\n    // value here is 1. Note that the total latency for a failed query is the\n    // number of retries multiplied by the resolver_timeout.\n    google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // A list of DNS servers to which we can forward queries. If not\n    // specified, Envoy will use the ambient DNS resolvers in the\n    // system.\n    repeated config.core.v3.Address upstream_resolvers = 2;\n\n    // Controls how many outstanding external lookup contexts the filter tracks.\n    // The context structure allows the filter to respond to every query even if the external\n    // resolution times out or is otherwise unsuccessful\n    uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // The stat prefix used when emitting DNS filter statistics\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Server context configuration contains the data that the filter uses to respond\n  // to DNS requests.\n  ServerContextConfig server_config = 2;\n\n  // Client context configuration controls Envoy's behavior when it must use external\n  // resolvers to answer a query. This object is optional and if omitted instructs\n  // the filter to resolve queries from the data in the server_config\n  ClientContextConfig client_config = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/data/dns/v4alpha:pkg\",\n        \"//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.udp.dns_filter.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/data/dns/v4alpha/dns_table.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha\";\noption java_outer_classname = \"DnsFilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: DNS Filter]\n// DNS Filter :ref:`configuration overview <config_udp_listener_filters_dns_filter>`.\n// [#extension: envoy.filters.udp_listener.dns_filter]\n\n// Configuration for the DNS filter.\nmessage DnsFilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig\";\n\n  // This message contains the configuration for the DNS Filter operating\n  // in a server context. This message will contain the virtual hosts and\n  // associated addresses with which Envoy will respond to queries\n  message ServerContextConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig\";\n\n    oneof config_source {\n      option (validate.required) = true;\n\n      // Load the configuration specified from the control plane\n      data.dns.v4alpha.DnsTable inline_dns_table = 1;\n\n      // Seed the filter configuration from an external path. This source\n      // is a yaml formatted file that contains the DnsTable driving Envoy's\n      // responses to DNS queries\n      config.core.v4alpha.DataSource external_dns_table = 2;\n    }\n  }\n\n  // This message contains the configuration for the DNS Filter operating\n  // in a client context. This message will contain the timeouts, retry,\n  // and forwarding configuration for Envoy to make DNS requests to other\n  // resolvers\n  message ClientContextConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig\";\n\n    // Sets the maximum time we will wait for the upstream query to complete\n    // We allow 5s for the upstream resolution to complete, so the minimum\n    // value here is 1. Note that the total latency for a failed query is the\n    // number of retries multiplied by the resolver_timeout.\n    google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // A list of DNS servers to which we can forward queries. If not\n    // specified, Envoy will use the ambient DNS resolvers in the\n    // system.\n    repeated config.core.v4alpha.Address upstream_resolvers = 2;\n\n    // Controls how many outstanding external lookup contexts the filter tracks.\n    // The context structure allows the filter to respond to every query even if the external\n    // resolution times out or is otherwise unsuccessful\n    uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // The stat prefix used when emitting DNS filter statistics\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Server context configuration contains the data that the filter uses to respond\n  // to DNS requests.\n  ServerContextConfig server_config = 2;\n\n  // Client context configuration controls Envoy's behavior when it must use external\n  // resolvers to answer a query. This object is optional and if omitted instructs\n  // the filter to resolve queries from the data in the server_config\n  ClientContextConfig client_config = 3;\n}\n"
  },
  {
    "path": "api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/udp/udp_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.udp.udp_proxy.v3;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3\";\noption java_outer_classname = \"UdpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: UDP proxy]\n// UDP proxy :ref:`configuration overview <config_udp_listener_filters_udp_proxy>`.\n// [#extension: envoy.filters.udp_listener.udp_proxy]\n\n// Configuration for the UDP proxy filter.\n// [#next-free-field: 6]\nmessage UdpProxyConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig\";\n\n  // Specifies the UDP hash policy.\n  // The packets can be routed by hash policy.\n  message HashPolicy {\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // The source IP will be used to compute the hash used by hash-based load balancing algorithms.\n      bool source_ip = 1 [(validate.rules).bool = {const: true}];\n    }\n  }\n\n  // The stat prefix used when emitting UDP proxy filter stats.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by\n  // the session. The default if not specified is 1 minute.\n  google.protobuf.Duration idle_timeout = 3;\n\n  // Use the remote downstream IP address as the sender IP address when sending packets to upstream hosts.\n  // This option requires Envoy to be run with the *CAP_NET_ADMIN* capability on Linux.\n  // And the IPv6 stack must be enabled on Linux kernel.\n  // This option does not preserve the remote downstream port.\n  // If this option is enabled, the IP address of sent datagrams will be changed to the remote downstream IP address.\n  // This means that Envoy will not receive packets that are sent by upstream hosts because the upstream hosts\n  // will send the packets with the remote downstream IP address as the destination. All packets will be routed\n  // to the remote downstream directly if there are route rules on the upstream host side.\n  // There are two options to return the packets back to the remote downstream.\n  // The first one is to use DSR (Direct Server Return).\n  // The other one is to configure routing rules on the upstream hosts to forward\n  // all packets back to Envoy and configure iptables rules on the host running Envoy to\n  // forward all packets from upstream hosts to the Envoy process so that Envoy can forward the packets to the downstream.\n  // If the platform does not support this option, Envoy will raise a configuration error.\n  bool use_original_src_ip = 4;\n\n  // Optional configuration for UDP proxy hash policies. If hash_policies is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated HashPolicy hash_policies = 5 [(validate.rules).repeated = {max_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.internal_redirect.allow_listed_routes.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3\";\noption java_outer_classname = \"AllowListedRoutesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Allow listed routes internal redirect predicate]\n\n// An internal redirect predicate that accepts only explicitly allowed target routes.\n// [#extension: envoy.internal_redirect_predicates.allow_listed_routes]\nmessage AllowListedRoutesConfig {\n  // The list of routes that's allowed as redirect target by this predicate,\n  // identified by the route's :ref:`name <envoy_api_field_config.route.v3.Route.route>`.\n  // Empty route names are not allowed.\n  repeated string allowed_route_names = 1\n      [(validate.rules).repeated = {items {string {min_len: 1}}}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.internal_redirect.previous_routes.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3\";\noption java_outer_classname = \"PreviousRoutesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Previous routes internal redirect predicate]\n\n// An internal redirect predicate that rejects redirect targets that are pointing\n// to a route that has been followed by a previous redirect from the current route.\n// [#extension: envoy.internal_redirect_predicates.previous_routes]\nmessage PreviousRoutesConfig {\n}\n"
  },
  {
    "path": "api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.internal_redirect.safe_cross_scheme.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3\";\noption java_outer_classname = \"SafeCrossSchemeConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SafeCrossScheme internal redirect predicate]\n\n// An internal redirect predicate that checks the scheme between the\n// downstream url and the redirect target url and allows a) same scheme\n// redirect and b) safe cross scheme redirect, which means if the downstream\n// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the\n// downstream scheme is HTTP, only HTTP redirect targets are allowed.\n// [#extension:\n// envoy.internal_redirect_predicates.safe_cross_scheme]\nmessage SafeCrossSchemeConfig {\n}\n"
  },
  {
    "path": "api/envoy/extensions/network/socket_interface/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.network.socket_interface.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.network.socket_interface.v3\";\noption java_outer_classname = \"DefaultSocketInterfaceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Default Socket Interface configuration]\n\n// Configuration for default socket interface that relies on OS dependent syscall to create\n// sockets.\nmessage DefaultSocketInterface {\n}\n"
  },
  {
    "path": "api/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/retry/omit_host_metadata/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.retry.host.omit_host_metadata.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.retry.host.omit_host_metadata.v3\";\noption java_outer_classname = \"OmitHostMetadataConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Omit host metadata retry predicate]\n\n// A retry host predicate that can be used to reject a host based on\n// predefined metadata match criteria.\n// [#extension: envoy.retry_host_predicates.omit_host_metadata]\nmessage OmitHostMetadataConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.retry.omit_host_metadata.v2.OmitHostMetadataConfig\";\n\n  // Retry host predicate metadata match criteria. The hosts in\n  // the upstream cluster with matching metadata will be omitted while\n  // attempting a retry of a failed request. The metadata should be specified\n  // under the *envoy.lb* key.\n  config.core.v3.Metadata metadata_match = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/retry/priority/previous_priorities/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/retry/previous_priorities:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.retry.priority.previous_priorities.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.retry.priority.previous_priorities.v3\";\noption java_outer_classname = \"PreviousPrioritiesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Previous priorities retry selector]\n\n// A retry host selector that attempts to spread retries between priorities, even if certain\n// priorities would not normally be attempted due to higher priorities being available.\n//\n// As priorities get excluded, load will be distributed amongst the remaining healthy priorities\n// based on the relative health of the priorities, matching how load is distributed during regular\n// host selection. For example, given priority healths of {100, 50, 50}, the original load will be\n// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load\n// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the\n// remaining to spill over to P2.\n//\n// Each priority attempted will be excluded until there are no healthy priorities left, at which\n// point the list of attempted priorities will be reset, essentially starting from the beginning.\n// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the\n// following sequence of priorities would be selected (assuming update_frequency = 1):\n// Attempt 1: P0 (P0 is 100% healthy)\n// Attempt 2: P2 (P0 already attempted, P2 only healthy priority)\n// Attempt 3: P0 (no healthy priorities, reset)\n// Attempt 4: P2\n//\n// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original\n// priority load, so behavior should be identical to not using this plugin.\n//\n// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of\n// priorities), which might incur significant overhead for clusters with many priorities.\n// [#extension: envoy.retry_priorities.previous_priorities]\nmessage PreviousPrioritiesConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.retry.previous_priorities.PreviousPrioritiesConfig\";\n\n  // How often the priority load should be updated based on previously attempted priorities. Useful\n  // to allow each priorities to receive more than one request before being excluded or to reduce\n  // the number of times that the priority load has to be recomputed.\n  //\n  // For example, by setting this to 2, then the first two attempts (initial attempt and first\n  // retry) will use the unmodified priority load. The third and fourth attempt will use priority\n  // load which excludes the priorities routed to with the first two attempts, and the fifth and\n  // sixth attempt will use the priority load excluding the priorities used for the first four\n  // attempts.\n  //\n  // Must be greater than 0.\n  int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/stat_sinks/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.stat_sinks.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// Wasm :ref:`configuration overview <config_stat_sinks_wasm>`.\n// [#extension: envoy.stat_sinks.wasm]\n\nmessage Wasm {\n  // General Plugin configuration.\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/tracers/datadog/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.datadog.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.datadog.v4alpha\";\noption java_outer_classname = \"DatadogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Datadog tracer]\n\n// Configuration for the Datadog tracer.\n// [#extension: envoy.tracers.datadog]\nmessage DatadogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.DatadogConfig\";\n\n  // The cluster to use for submitting traces to the Datadog agent.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The name used for the service when traces are generated by envoy.\n  string service_name = 2 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.dynamic_ot.v4alpha;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.dynamic_ot.v4alpha\";\noption java_outer_classname = \"DynamicOtProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Dynamically loadable OpenTracing tracer]\n\n// DynamicOtConfig is used to dynamically load a tracer from a shared library\n// that implements the `OpenTracing dynamic loading API\n// <https://github.com/opentracing/opentracing-cpp>`_.\n// [#extension: envoy.tracers.dynamic_ot]\nmessage DynamicOtConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.DynamicOtConfig\";\n\n  // Dynamic library implementing the `OpenTracing API\n  // <https://github.com/opentracing/opentracing-cpp>`_.\n  string library = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The configuration to use when creating a tracer from the given dynamic\n  // library.\n  google.protobuf.Struct config = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/tracers/lightstep/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.lightstep.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.lightstep.v4alpha\";\noption java_outer_classname = \"LightstepProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: LightStep tracer]\n\n// Configuration for the LightStep tracer.\n// [#extension: envoy.tracers.lightstep]\nmessage LightstepConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.LightstepConfig\";\n\n  // Available propagation modes\n  enum PropagationMode {\n    // Propagate trace context in the single header x-ot-span-context.\n    ENVOY = 0;\n\n    // Propagate trace context using LightStep's native format.\n    LIGHTSTEP = 1;\n\n    // Propagate trace context using the b3 format.\n    B3 = 2;\n\n    // Propagation trace context using the w3 trace-context standard.\n    TRACE_CONTEXT = 3;\n  }\n\n  // The cluster manager cluster that hosts the LightStep collectors.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // File containing the access token to the `LightStep\n  // <https://lightstep.com/>`_ API.\n  string access_token_file = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Propagation modes to use by LightStep's tracer.\n  repeated PropagationMode propagation_modes = 3\n      [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/tracers/opencensus/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.opencensus.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"opencensus/proto/trace/v1/trace_config.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.opencensus.v4alpha\";\noption java_outer_classname = \"OpencensusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: OpenCensus tracer]\n\n// Configuration for the OpenCensus tracer.\n// [#next-free-field: 15]\n// [#extension: envoy.tracers.opencensus]\nmessage OpenCensusConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.OpenCensusConfig\";\n\n  enum TraceContext {\n    // No-op default, no trace context is utilized.\n    NONE = 0;\n\n    // W3C Trace-Context format \"traceparent:\" header.\n    TRACE_CONTEXT = 1;\n\n    // Binary \"grpc-trace-bin:\" header.\n    GRPC_TRACE_BIN = 2;\n\n    // \"X-Cloud-Trace-Context:\" header.\n    CLOUD_TRACE_CONTEXT = 3;\n\n    // X-B3-* headers.\n    B3 = 4;\n  }\n\n  reserved 7;\n\n  // Configures tracing, e.g. the sampler, max number of annotations, etc.\n  .opencensus.proto.trace.v1.TraceConfig trace_config = 1;\n\n  // Enables the stdout exporter if set to true. This is intended for debugging\n  // purposes.\n  bool stdout_exporter_enabled = 2;\n\n  // Enables the Stackdriver exporter if set to true. The project_id must also\n  // be set.\n  bool stackdriver_exporter_enabled = 3;\n\n  // The Cloud project_id to use for Stackdriver tracing.\n  string stackdriver_project_id = 4;\n\n  // (optional) By default, the Stackdriver exporter will connect to production\n  // Stackdriver. If stackdriver_address is non-empty, it will instead connect\n  // to this address, which is in the gRPC format:\n  // https://github.com/grpc/grpc/blob/master/doc/naming.md\n  string stackdriver_address = 10;\n\n  // (optional) The gRPC server that hosts Stackdriver tracing service. Only\n  // Google gRPC is supported. If :ref:`target_uri <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.target_uri>`\n  // is not provided, the default production Stackdriver address will be used.\n  config.core.v4alpha.GrpcService stackdriver_grpc_service = 13;\n\n  // Enables the Zipkin exporter if set to true. The url and service name must\n  // also be set.\n  bool zipkin_exporter_enabled = 5;\n\n  // The URL to Zipkin, e.g. \"http://127.0.0.1:9411/api/v2/spans\"\n  string zipkin_url = 6;\n\n  // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or\n  // ocagent_grpc_service must also be set.\n  bool ocagent_exporter_enabled = 11;\n\n  // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC\n  // format: https://github.com/grpc/grpc/blob/master/doc/naming.md\n  // [#comment:TODO: deprecate this field]\n  string ocagent_address = 12;\n\n  // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported.\n  // This is only used if the ocagent_address is left empty.\n  config.core.v4alpha.GrpcService ocagent_grpc_service = 14;\n\n  // List of incoming trace context headers we will accept. First one found\n  // wins.\n  repeated TraceContext incoming_trace_context = 8;\n\n  // List of outgoing trace context headers we will produce.\n  repeated TraceContext outgoing_trace_context = 9;\n}\n"
  },
  {
    "path": "api/envoy/extensions/tracers/xray/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/tracers/xray/v4alpha/xray.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.xray.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.xray.v4alpha\";\noption java_outer_classname = \"XrayProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: AWS X-Ray Tracer Configuration]\n// Configuration for AWS X-Ray tracer\n\nmessage XRayConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v3.XRayConfig\";\n\n  message SegmentFields {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.trace.v3.XRayConfig.SegmentFields\";\n\n    // The type of AWS resource, e.g. \"AWS::AppMesh::Proxy\".\n    string origin = 1;\n\n    // AWS resource metadata dictionary.\n    // See: `X-Ray Segment Document documentation <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-aws>`__\n    google.protobuf.Struct aws = 2;\n  }\n\n  // The UDP endpoint of the X-Ray Daemon where the spans will be sent.\n  // If this value is not set, the default value of 127.0.0.1:2000 will be used.\n  config.core.v4alpha.SocketAddress daemon_endpoint = 1;\n\n  // The name of the X-Ray segment.\n  string segment_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The location of a local custom sampling rules JSON file.\n  // For an example of the sampling rules see:\n  // `X-Ray SDK documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-sampling>`_\n  config.core.v4alpha.DataSource sampling_rule_manifest = 3;\n\n  // Optional custom fields to be added to each trace segment.\n  // see: `X-Ray Segment Document documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html>`__\n  SegmentFields segment_fields = 4;\n}\n"
  },
  {
    "path": "api/envoy/extensions/tracers/zipkin/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.zipkin.v4alpha;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.zipkin.v4alpha\";\noption java_outer_classname = \"ZipkinProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Zipkin tracer]\n\n// Configuration for the Zipkin tracer.\n// [#extension: envoy.tracers.zipkin]\n// [#next-free-field: 6]\nmessage ZipkinConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v3.ZipkinConfig\";\n\n  // Available Zipkin collector endpoint versions.\n  enum CollectorEndpointVersion {\n    // Zipkin API v1, JSON over HTTP.\n    // [#comment: The default implementation of Zipkin client before this field is added was only v1\n    // and the way user configure this was by not explicitly specifying the version. Consequently,\n    // before this is added, the corresponding Zipkin collector expected to receive v1 payload.\n    // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when\n    // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field,\n    // since in Zipkin realm this v1 version is considered to be not preferable anymore.]\n    DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Zipkin API v2, JSON over HTTP.\n    HTTP_JSON = 1;\n\n    // Zipkin API v2, protobuf over HTTP.\n    HTTP_PROTO = 2;\n\n    // [#not-implemented-hide:]\n    GRPC = 3;\n  }\n\n  // The cluster manager cluster that hosts the Zipkin collectors. Note that the\n  // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster\n  // resources <envoy_api_field_config.bootstrap.v4alpha.Bootstrap.StaticResources.clusters>`.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The API endpoint of the Zipkin service where the spans will be sent. When\n  // using a standard Zipkin installation, the API endpoint is typically\n  // /api/v1/spans, which is the default value.\n  string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Determines whether a 128bit trace id will be used when creating a new\n  // trace instance. The default value is false, which will result in a 64 bit trace id being used.\n  bool trace_id_128bit = 3;\n\n  // Determines whether client and server spans will share the same span context.\n  // The default value is true.\n  google.protobuf.BoolValue shared_span_context = 4;\n\n  // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be\n  // used.\n  CollectorEndpointVersion collector_endpoint_version = 5;\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/alts/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/transport_socket/alts/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/alts/v3/alts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.alts.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.alts.v3\";\noption java_outer_classname = \"AltsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: ALTS]\n// [#extension: envoy.transport_sockets.alts]\n\n// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy.\n// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/\nmessage Alts {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.transport_socket.alts.v2alpha.Alts\";\n\n  // The location of a handshaker service, this is usually 169.254.169.254:8080\n  // on GCE.\n  string handshaker_service = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The acceptable service accounts from peer, peers not in the list will be rejected in the\n  // handshake validation step. If empty, no validation will be performed.\n  repeated string peer_service_accounts = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.proxy_protocol.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/proxy_protocol.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3\";\noption java_outer_classname = \"UpstreamProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Upstream Proxy Protocol]\n// [#extension: envoy.transport_sockets.upstream_proxy_protocol]\n\n// Configuration for PROXY protocol socket\nmessage ProxyProtocolUpstreamTransport {\n  // The PROXY protocol settings\n  config.core.v3.ProxyProtocolConfig config = 1;\n\n  // The underlying transport socket being wrapped.\n  config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/quic/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.quic.v3;\n\nimport \"envoy/extensions/transport_sockets/tls/v3/tls.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.quic.v3\";\noption java_outer_classname = \"QuicTransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: quic transport]\n// [#extension: envoy.transport_sockets.quic]\n\n// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicDownstreamTransport {\n  tls.v3.DownstreamTlsContext downstream_tls_context = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicUpstreamTransport {\n  tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/transport_sockets/quic/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.quic.v4alpha;\n\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/tls.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha\";\noption java_outer_classname = \"QuicTransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: quic transport]\n// [#extension: envoy.transport_sockets.quic]\n\n// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicDownstreamTransport {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport\";\n\n  tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicUpstreamTransport {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport\";\n\n  tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/transport_socket/raw_buffer/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.raw_buffer.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.raw_buffer.v3\";\noption java_outer_classname = \"RawBufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Raw Buffer]\n// [#extension: envoy.transport_sockets.raw_buffer]\n\n// Configuration for raw buffer transport socket.\nmessage RawBuffer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.transport_socket.raw_buffer.v2.RawBuffer\";\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/transport_socket/tap/v2alpha:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tap/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tap.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/extensions/common/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tap.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap]\n// [#extension: envoy.transport_sockets.tap]\n\n// Configuration for tap transport socket. This wraps another transport socket, providing the\n// ability to interpose and record in plain text any traffic that is surfaced to Envoy.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.transport_socket.tap.v2alpha.Tap\";\n\n  // Common configuration for the tap transport socket.\n  common.tap.v3.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // The underlying transport socket being wrapped.\n  config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/common/tap/v4alpha:pkg\",\n        \"//envoy/extensions/transport_sockets/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tap.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/extensions/common/tap/v4alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tap.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap]\n// [#extension: envoy.transport_sockets.tap]\n\n// Configuration for tap transport socket. This wraps another transport socket, providing the\n// ability to interpose and record in plain text any traffic that is surfaced to Envoy.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tap.v3.Tap\";\n\n  // Common configuration for the tap transport socket.\n  common.tap.v4alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // The underlying transport socket being wrapped.\n  config.core.v4alpha.TransportSocket transport_socket = 2\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v3/cert.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/extensions/transport_sockets/tls/v3/common.proto\";\nimport public \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\nimport public \"envoy/extensions/transport_sockets/tls/v3/tls.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"CertProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common TLS configuration]\n\nmessage TlsParameters {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.TlsParameters\";\n\n  enum TlsProtocol {\n    // Envoy will choose the optimal TLS version.\n    TLS_AUTO = 0;\n\n    // TLS 1.0\n    TLSv1_0 = 1;\n\n    // TLS 1.1\n    TLSv1_1 = 2;\n\n    // TLS 1.2\n    TLSv1_2 = 3;\n\n    // TLS 1.3\n    TLSv1_3 = 4;\n  }\n\n  // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for\n  // servers.\n  TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for\n  // servers.\n  TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // If specified, the TLS listener will only support the specified `cipher list\n  // <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration>`_\n  // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not\n  // specified, the default list will be used.\n  //\n  // In non-FIPS builds, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]\n  //   [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   ECDHE-ECDSA-AES128-GCM-SHA256\n  //   ECDHE-RSA-AES128-GCM-SHA256\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  repeated string cipher_suites = 3;\n\n  // If specified, the TLS connection will only support the specified ECDH\n  // curves. If not specified, the default curves will be used.\n  //\n  // In non-FIPS builds, the default curves are:\n  //\n  // .. code-block:: none\n  //\n  //   X25519\n  //   P-256\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default curve is:\n  //\n  // .. code-block:: none\n  //\n  //   P-256\n  repeated string ecdh_curves = 4;\n}\n\n// BoringSSL private key method configuration. The private key methods are used for external\n// (potentially asynchronous) signing and decryption operations. Some use cases for private key\n// methods would be TPM support and TLS acceleration.\nmessage PrivateKeyProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.PrivateKeyProvider\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // Private key method provider name. The name must match a\n  // supported private key method provider type.\n  string provider_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Private key method provider specific configuration.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true];\n  }\n}\n\n// [#next-free-field: 7]\nmessage TlsCertificate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.TlsCertificate\";\n\n  // The TLS certificate chain.\n  config.core.v3.DataSource certificate_chain = 1;\n\n  // The TLS private key.\n  config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n  // BoringSSL private key method provider. This is an alternative to :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.TlsCertificate.private_key>` field. This can't be\n  // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.TlsCertificate.private_key>` and\n  // :ref:`private_key_provider\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.TlsCertificate.private_key_provider>` fields will result in an\n  // error.\n  PrivateKeyProvider private_key_provider = 6;\n\n  // The password to decrypt the TLS private key. If this field is not set, it is assumed that the\n  // TLS private key is not password encrypted.\n  config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true];\n\n  // The OCSP response to be stapled with this certificate during the handshake.\n  // The response must be DER-encoded and may only be  provided via ``filename`` or\n  // ``inline_bytes``. The response may pertain to only one certificate.\n  config.core.v3.DataSource ocsp_staple = 4;\n\n  // [#not-implemented-hide:]\n  repeated config.core.v3.DataSource signed_certificate_timestamp = 5;\n}\n\nmessage TlsSessionTicketKeys {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.TlsSessionTicketKeys\";\n\n  // Keys for encrypting and decrypting TLS session tickets. The\n  // first key in the array contains the key to encrypt all new sessions created by this context.\n  // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys\n  // by, for example, putting the new key first, and the previous key second.\n  //\n  // If :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys>`\n  // is not specified, the TLS library will still support resuming sessions via tickets, but it will\n  // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts\n  // or on different hosts.\n  //\n  // Each key must contain exactly 80 bytes of cryptographically-secure random data. For\n  // example, the output of ``openssl rand 80``.\n  //\n  // .. attention::\n  //\n  //   Using this feature has serious security considerations and risks. Improper handling of keys\n  //   may result in loss of secrecy in connections, even if ciphers supporting perfect forward\n  //   secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some\n  //   discussion. To minimize the risk, you must:\n  //\n  //   * Keep the session ticket keys at least as secure as your TLS certificate private keys\n  //   * Rotate session ticket keys at least daily, and preferably hourly\n  //   * Always generate keys using a cryptographically-secure random data source\n  repeated config.core.v3.DataSource keys = 1\n      [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true];\n}\n\n// [#next-free-field: 11]\nmessage CertificateValidationContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.CertificateValidationContext\";\n\n  // Peer certificate verification mode.\n  enum TrustChainVerification {\n    // Perform default certificate verification (e.g., against CA / verification lists)\n    VERIFY_TRUST_CHAIN = 0;\n\n    // Connections where the certificate fails verification will be permitted.\n    // For HTTP connections, the result of certificate verification can be used in route matching. (\n    // see :ref:`validated <envoy_api_field_config.route.v3.RouteMatch.TlsContextMatchOptions.validated>` ).\n    ACCEPT_UNTRUSTED = 1;\n  }\n\n  reserved 4, 5;\n\n  reserved \"verify_subject_alt_name\";\n\n  // TLS certificate data containing certificate authority certificates to use in verifying\n  // a presented peer certificate (e.g. server certificate for clusters or client certificate\n  // for listeners). If not specified and a peer certificate is presented it will not be\n  // verified. By default, a client certificate is optional, unless one of the additional\n  // options (:ref:`require_client_certificate\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.require_client_certificate>`,\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_spki>`,\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>`, or\n  // :ref:`match_subject_alt_names\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.match_subject_alt_names>`) is also\n  // specified.\n  //\n  // It can optionally contain certificate revocation lists, in which case Envoy will verify\n  // that the presented peer certificate has not been revoked by one of the included CRLs. Note\n  // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be\n  // provided for all certificate authorities in that chain. Failure to do so will result in\n  // verification failure for both revoked and unrevoked certificates from that chain.\n  //\n  // See :ref:`the TLS overview <arch_overview_ssl_enabling_verification>` for a list of common\n  // system CA locations.\n  config.core.v3.DataSource trusted_ca = 1;\n\n  // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the\n  // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate\n  // matches one of the specified values.\n  //\n  // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -pubkey\n  //     | openssl pkey -pubin -outform DER\n  //     | openssl dgst -sha256 -binary\n  //     | openssl enc -base64\n  //   NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A=\n  //\n  // This is the format used in HTTP Public Key Pinning.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  //\n  // .. attention::\n  //\n  //   This option is preferred over :ref:`verify_certificate_hash\n  //   <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>`,\n  //   because SPKI is tied to a private key, so it doesn't change when the certificate\n  //   is renewed using the same private key.\n  repeated string verify_certificate_spki = 3\n      [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}];\n\n  // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that\n  // the SHA-256 of the DER-encoded presented certificate matches one of the specified values.\n  //\n  // A hex-encoded SHA-256 of the certificate can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d\" \" -f2\n  //   df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a\n  //\n  // A long hex-encoded and colon-separated SHA-256 (a.k.a. \"fingerprint\") of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d\"=\" -f2\n  //   DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A\n  //\n  // Both of those formats are acceptable.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  repeated string verify_certificate_hash = 2\n      [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}];\n\n  // An optional list of Subject Alternative name matchers. Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified matches.\n  //\n  // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be\n  // configured with exact match type in the :ref:`string matcher <envoy_api_msg_type.matcher.v3.StringMatcher>`.\n  // For example if the certificate has \"\\*.example.com\" as DNS SAN entry, to allow only \"api.example.com\",\n  // it should be configured as shown below.\n  //\n  // .. code-block:: yaml\n  //\n  //  match_subject_alt_names:\n  //    exact: \"api.example.com\"\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca>`.\n  repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9;\n\n  // [#not-implemented-hide:] Must present signed certificate time-stamp.\n  google.protobuf.BoolValue require_signed_certificate_timestamp = 6;\n\n  // An optional `certificate revocation list\n  // <https://en.wikipedia.org/wiki/Certificate_revocation_list>`_\n  // (in PEM format). If specified, Envoy will verify that the presented peer\n  // certificate has not been revoked by this CRL. If this DataSource contains\n  // multiple CRLs, all of them will be used. Note that if a CRL is provided\n  // for any certificate authority in a trust chain, a CRL must be provided\n  // for all certificate authorities in that chain. Failure to do so will\n  // result in verification failure for both revoked and unrevoked certificates\n  // from that chain.\n  config.core.v3.DataSource crl = 7;\n\n  // If specified, Envoy will not reject expired certificates.\n  bool allow_expired_certificate = 8;\n\n  // Certificate trust chain verification mode.\n  TrustChainVerification trust_chain_verification = 10\n      [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v3/secret.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"SecretProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Secrets configuration]\n\nmessage GenericSecret {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.GenericSecret\";\n\n  // Secret of generic type and is available to filters.\n  config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true];\n}\n\nmessage SdsSecretConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.SdsSecretConfig\";\n\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  // When both name and config are specified, then secret can be fetched and/or reloaded via\n  // SDS. When only name is specified, then secret will be loaded from static resources.\n  string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // Resource locator for SDS. This is mutually exclusive to *name*.\n  // [#not-implemented-hide:]\n  udpa.core.v1.ResourceLocator sds_resource_locator = 3\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  config.core.v3.ConfigSource sds_config = 2;\n}\n\n// [#next-free-field: 6]\nmessage Secret {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.Secret\";\n\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  string name = 1;\n\n  oneof type {\n    TlsCertificate tls_certificate = 2;\n\n    TlsSessionTicketKeys session_ticket_keys = 3;\n\n    CertificateValidationContext validation_context = 4;\n\n    GenericSecret generic_secret = 5;\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v3/tls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/common.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"TlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: TLS transport socket]\n// [#extension: envoy.transport_sockets.tls]\n// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS.\n\nmessage UpstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.UpstreamTlsContext\";\n\n  // Common TLS context settings.\n  //\n  // .. attention::\n  //\n  //   Server certificate verification is not enabled by default. Configure\n  //   :ref:`trusted_ca<envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca>` to enable\n  //   verification.\n  CommonTlsContext common_tls_context = 1;\n\n  // SNI string to use when creating TLS backend connections.\n  string sni = 2 [(validate.rules).string = {max_bytes: 255}];\n\n  // If true, server-initiated TLS renegotiation will be allowed.\n  //\n  // .. attention::\n  //\n  //   TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary.\n  bool allow_renegotiation = 3;\n\n  // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets\n  // for TLSv1.2 and older) to store for the purpose of session resumption.\n  //\n  // Defaults to 1, setting this to 0 disables session resumption.\n  google.protobuf.UInt32Value max_session_keys = 4;\n}\n\n// [#next-free-field: 9]\nmessage DownstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.DownstreamTlsContext\";\n\n  enum OcspStaplePolicy {\n    // OCSP responses are optional. If an OCSP response is absent\n    // or expired, the associated certificate will be used for\n    // connections without an OCSP staple.\n    LENIENT_STAPLING = 0;\n\n    // OCSP responses are optional. If an OCSP response is absent,\n    // the associated certificate will be used without an\n    // OCSP staple. If a response is provided but is expired,\n    // the associated certificate will not be used for\n    // subsequent connections. If no suitable certificate is found,\n    // the connection is rejected.\n    STRICT_STAPLING = 1;\n\n    // OCSP responses are required. Configuration will fail if\n    // a certificate is provided without an OCSP response. If a\n    // response expires, the associated certificate will not be\n    // used connections. If no suitable certificate is found, the\n    // connection is rejected.\n    MUST_STAPLE = 2;\n  }\n\n  // Common TLS context settings.\n  CommonTlsContext common_tls_context = 1;\n\n  // If specified, Envoy will reject connections without a valid client\n  // certificate.\n  google.protobuf.BoolValue require_client_certificate = 2;\n\n  // If specified, Envoy will reject connections without a valid and matching SNI.\n  // [#not-implemented-hide:]\n  google.protobuf.BoolValue require_sni = 3;\n\n  oneof session_ticket_keys_type {\n    // TLS session ticket key settings.\n    TlsSessionTicketKeys session_ticket_keys = 4;\n\n    // Config for fetching TLS session ticket keys via SDS API.\n    SdsSecretConfig session_ticket_keys_sds_secret_config = 5;\n\n    // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS\n    // server to not issue TLS session tickets for the purposes of stateless TLS session resumption.\n    // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using\n    // the keys specified through either :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys>`\n    // or :ref:`session_ticket_keys_sds_secret_config <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys_sds_secret_config>`.\n    // If this config is set to false and no keys are explicitly configured, the TLS server will issue\n    // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the\n    // implication that sessions cannot be resumed across hot restarts or on different hosts.\n    bool disable_stateless_session_resumption = 7;\n  }\n\n  // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session\n  // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2)\n  // <https://tools.ietf.org/html/rfc5077#section-5.6>`\n  // only seconds could be specified (fractional seconds are going to be ignored).\n  google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = {\n    lt {seconds: 4294967296}\n    gte {}\n  }];\n\n  // Config for whether to use certificates if they do not have\n  // an accompanying OCSP response or if the response expires at runtime.\n  // Defaults to LENIENT_STAPLING\n  OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}];\n}\n\n// TLS context shared by both client and server TLS contexts.\n// [#next-free-field: 14]\nmessage CommonTlsContext {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.CommonTlsContext\";\n\n  // Config for Certificate provider to get certificates. This provider should allow certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  message CertificateProvider {\n    // opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"TLS\" to specify a new tls-certificate.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Provider specific config.\n    // Note: an implementation is expected to dedup multiple instances of the same config\n    // to maintain a single certificate-provider instance. The sharing can happen, for\n    // example, among multiple clusters or between the tls_certificate and validation_context\n    // certificate providers of a cluster.\n    // This config could be supplied inline or (in future) a named xDS resource.\n    oneof config {\n      option (validate.required) = true;\n\n      config.core.v3.TypedExtensionConfig typed_config = 2;\n    }\n  }\n\n  // Similar to CertificateProvider above, but allows the provider instances to be configured on\n  // the client side instead of being sent from the control plane.\n  message CertificateProviderInstance {\n    // Provider instance name. This name must be defined in the client's configuration (e.g., a\n    // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config\n    // field that would be sent in the CertificateProvider message if the config was sent by the\n    // control plane). If not present, defaults to \"default\".\n    //\n    // Instance names should generally be defined not in terms of the underlying provider\n    // implementation (e.g., \"file_watcher\") but rather in terms of the function of the\n    // certificates (e.g., \"foo_deployment_identity\").\n    string instance_name = 1;\n\n    // Opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"example.com\" to specify a certificate for a\n    // particular domain. Not all provider instances will actually use this field, so the value\n    // defaults to the empty string.\n    string certificate_name = 2;\n  }\n\n  message CombinedCertificateValidationContext {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext\";\n\n    // How to validate peer certificates.\n    CertificateValidationContext default_validation_context = 1\n        [(validate.rules).message = {required: true}];\n\n    // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n    // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n    // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n    // or validation_context_certificate_provider_instance may be used.\n    SdsSecretConfig validation_context_sds_secret_config = 2 [\n      (validate.rules).message = {required: true},\n      (udpa.annotations.field_migrate).oneof_promotion = \"dynamic_validation_context\"\n    ];\n\n    // Certificate provider for fetching validation context.\n    // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n    // or validation_context_certificate_provider_instance may be used.\n    // [#not-implemented-hide:]\n    CertificateProvider validation_context_certificate_provider = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"dynamic_validation_context\"];\n\n    // Certificate provider instance for fetching validation context.\n    // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n    // or validation_context_certificate_provider_instance may be used.\n    // [#not-implemented-hide:]\n    CertificateProviderInstance validation_context_certificate_provider_instance = 4\n        [(udpa.annotations.field_migrate).oneof_promotion = \"dynamic_validation_context\"];\n  }\n\n  reserved 5;\n\n  // TLS protocol versions, cipher suites etc.\n  TlsParameters tls_params = 1;\n\n  // :ref:`Multiple TLS certificates <arch_overview_ssl_cert_select>` can be associated with the\n  // same context to allow both RSA and ECDSA certificates.\n  //\n  // Only a single TLS certificate is supported in client contexts. In server contexts, the first\n  // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is\n  // used for clients that support ECDSA.\n  repeated TlsCertificate tls_certificates = 2;\n\n  // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6\n      [(validate.rules).repeated = {max_items: 1}];\n\n  // Certificate provider for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProvider tls_certificate_certificate_provider = 9;\n\n  // Certificate provider instance for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProviderInstance tls_certificate_certificate_provider_instance = 11;\n\n  oneof validation_context_type {\n    // How to validate peer certificates.\n    CertificateValidationContext validation_context = 3;\n\n    // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n    // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n    SdsSecretConfig validation_context_sds_secret_config = 7;\n\n    // Combined certificate validation context holds a default CertificateValidationContext\n    // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic\n    // and default CertificateValidationContext are merged into a new CertificateValidationContext\n    // for validation. This merge is done by Message::MergeFrom(), so dynamic\n    // CertificateValidationContext overwrites singular fields in default\n    // CertificateValidationContext, and concatenates repeated fields to default\n    // CertificateValidationContext, and logical OR is applied to boolean fields.\n    CombinedCertificateValidationContext combined_validation_context = 8;\n\n    // Certificate provider for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProvider validation_context_certificate_provider = 10;\n\n    // Certificate provider instance for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProviderInstance validation_context_certificate_provider_instance = 12;\n  }\n\n  // Supplies the list of ALPN protocols that the listener should expose. In\n  // practice this is likely to be set to one of two values (see the\n  // :ref:`codec_type\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.codec_type>`\n  // parameter in the HTTP connection manager for more information):\n  //\n  // * \"h2,http/1.1\" If the listener is going to support both HTTP/2 and HTTP/1.1.\n  // * \"http/1.1\" If the listener is only going to support HTTP/1.1.\n  //\n  // There is no default for this parameter. If empty, Envoy will not expose ALPN.\n  repeated string alpn_protocols = 4;\n\n  // Custom TLS handshaker. If empty, defaults to native TLS handshaking\n  // behavior.\n  config.core.v3.TypedExtensionConfig custom_handshaker = 13;\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common TLS configuration]\n\nmessage TlsParameters {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.TlsParameters\";\n\n  enum TlsProtocol {\n    // Envoy will choose the optimal TLS version.\n    TLS_AUTO = 0;\n\n    // TLS 1.0\n    TLSv1_0 = 1;\n\n    // TLS 1.1\n    TLSv1_1 = 2;\n\n    // TLS 1.2\n    TLSv1_2 = 3;\n\n    // TLS 1.3\n    TLSv1_3 = 4;\n  }\n\n  // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for\n  // servers.\n  TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for\n  // servers.\n  TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // If specified, the TLS listener will only support the specified `cipher list\n  // <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration>`_\n  // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not\n  // specified, the default list will be used.\n  //\n  // In non-FIPS builds, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]\n  //   [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   ECDHE-ECDSA-AES128-GCM-SHA256\n  //   ECDHE-RSA-AES128-GCM-SHA256\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  repeated string cipher_suites = 3;\n\n  // If specified, the TLS connection will only support the specified ECDH\n  // curves. If not specified, the default curves will be used.\n  //\n  // In non-FIPS builds, the default curves are:\n  //\n  // .. code-block:: none\n  //\n  //   X25519\n  //   P-256\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default curve is:\n  //\n  // .. code-block:: none\n  //\n  //   P-256\n  repeated string ecdh_curves = 4;\n}\n\n// BoringSSL private key method configuration. The private key methods are used for external\n// (potentially asynchronous) signing and decryption operations. Some use cases for private key\n// methods would be TPM support and TLS acceleration.\nmessage PrivateKeyProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // Private key method provider name. The name must match a\n  // supported private key method provider type.\n  string provider_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Private key method provider specific configuration.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true];\n  }\n}\n\n// [#next-free-field: 7]\nmessage TlsCertificate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.TlsCertificate\";\n\n  // The TLS certificate chain.\n  config.core.v4alpha.DataSource certificate_chain = 1;\n\n  // The TLS private key.\n  config.core.v4alpha.DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n  // BoringSSL private key method provider. This is an alternative to :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.TlsCertificate.private_key>` field. This can't be\n  // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.TlsCertificate.private_key>` and\n  // :ref:`private_key_provider\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.TlsCertificate.private_key_provider>` fields will result in an\n  // error.\n  PrivateKeyProvider private_key_provider = 6;\n\n  // The password to decrypt the TLS private key. If this field is not set, it is assumed that the\n  // TLS private key is not password encrypted.\n  config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true];\n\n  // The OCSP response to be stapled with this certificate during the handshake.\n  // The response must be DER-encoded and may only be  provided via ``filename`` or\n  // ``inline_bytes``. The response may pertain to only one certificate.\n  config.core.v4alpha.DataSource ocsp_staple = 4;\n\n  // [#not-implemented-hide:]\n  repeated config.core.v4alpha.DataSource signed_certificate_timestamp = 5;\n}\n\nmessage TlsSessionTicketKeys {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys\";\n\n  // Keys for encrypting and decrypting TLS session tickets. The\n  // first key in the array contains the key to encrypt all new sessions created by this context.\n  // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys\n  // by, for example, putting the new key first, and the previous key second.\n  //\n  // If :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.session_ticket_keys>`\n  // is not specified, the TLS library will still support resuming sessions via tickets, but it will\n  // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts\n  // or on different hosts.\n  //\n  // Each key must contain exactly 80 bytes of cryptographically-secure random data. For\n  // example, the output of ``openssl rand 80``.\n  //\n  // .. attention::\n  //\n  //   Using this feature has serious security considerations and risks. Improper handling of keys\n  //   may result in loss of secrecy in connections, even if ciphers supporting perfect forward\n  //   secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some\n  //   discussion. To minimize the risk, you must:\n  //\n  //   * Keep the session ticket keys at least as secure as your TLS certificate private keys\n  //   * Rotate session ticket keys at least daily, and preferably hourly\n  //   * Always generate keys using a cryptographically-secure random data source\n  repeated config.core.v4alpha.DataSource keys = 1\n      [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true];\n}\n\n// [#next-free-field: 11]\nmessage CertificateValidationContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext\";\n\n  // Peer certificate verification mode.\n  enum TrustChainVerification {\n    // Perform default certificate verification (e.g., against CA / verification lists)\n    VERIFY_TRUST_CHAIN = 0;\n\n    // Connections where the certificate fails verification will be permitted.\n    // For HTTP connections, the result of certificate verification can be used in route matching. (\n    // see :ref:`validated <envoy_api_field_config.route.v4alpha.RouteMatch.TlsContextMatchOptions.validated>` ).\n    ACCEPT_UNTRUSTED = 1;\n  }\n\n  reserved 4, 5;\n\n  reserved \"verify_subject_alt_name\";\n\n  // TLS certificate data containing certificate authority certificates to use in verifying\n  // a presented peer certificate (e.g. server certificate for clusters or client certificate\n  // for listeners). If not specified and a peer certificate is presented it will not be\n  // verified. By default, a client certificate is optional, unless one of the additional\n  // options (:ref:`require_client_certificate\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.require_client_certificate>`,\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_spki>`,\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>`, or\n  // :ref:`match_subject_alt_names\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.match_subject_alt_names>`) is also\n  // specified.\n  //\n  // It can optionally contain certificate revocation lists, in which case Envoy will verify\n  // that the presented peer certificate has not been revoked by one of the included CRLs. Note\n  // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be\n  // provided for all certificate authorities in that chain. Failure to do so will result in\n  // verification failure for both revoked and unrevoked certificates from that chain.\n  //\n  // See :ref:`the TLS overview <arch_overview_ssl_enabling_verification>` for a list of common\n  // system CA locations.\n  config.core.v4alpha.DataSource trusted_ca = 1;\n\n  // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the\n  // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate\n  // matches one of the specified values.\n  //\n  // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -pubkey\n  //     | openssl pkey -pubin -outform DER\n  //     | openssl dgst -sha256 -binary\n  //     | openssl enc -base64\n  //   NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A=\n  //\n  // This is the format used in HTTP Public Key Pinning.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  //\n  // .. attention::\n  //\n  //   This option is preferred over :ref:`verify_certificate_hash\n  //   <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>`,\n  //   because SPKI is tied to a private key, so it doesn't change when the certificate\n  //   is renewed using the same private key.\n  repeated string verify_certificate_spki = 3\n      [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}];\n\n  // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that\n  // the SHA-256 of the DER-encoded presented certificate matches one of the specified values.\n  //\n  // A hex-encoded SHA-256 of the certificate can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d\" \" -f2\n  //   df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a\n  //\n  // A long hex-encoded and colon-separated SHA-256 (a.k.a. \"fingerprint\") of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d\"=\" -f2\n  //   DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A\n  //\n  // Both of those formats are acceptable.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  repeated string verify_certificate_hash = 2\n      [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}];\n\n  // An optional list of Subject Alternative name matchers. Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified matches.\n  //\n  // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be\n  // configured with exact match type in the :ref:`string matcher <envoy_api_msg_type.matcher.v4alpha.StringMatcher>`.\n  // For example if the certificate has \"\\*.example.com\" as DNS SAN entry, to allow only \"api.example.com\",\n  // it should be configured as shown below.\n  //\n  // .. code-block:: yaml\n  //\n  //  match_subject_alt_names:\n  //    exact: \"api.example.com\"\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.trusted_ca>`.\n  repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9;\n\n  // [#not-implemented-hide:] Must present signed certificate time-stamp.\n  google.protobuf.BoolValue require_signed_certificate_timestamp = 6;\n\n  // An optional `certificate revocation list\n  // <https://en.wikipedia.org/wiki/Certificate_revocation_list>`_\n  // (in PEM format). If specified, Envoy will verify that the presented peer\n  // certificate has not been revoked by this CRL. If this DataSource contains\n  // multiple CRLs, all of them will be used. Note that if a CRL is provided\n  // for any certificate authority in a trust chain, a CRL must be provided\n  // for all certificate authorities in that chain. Failure to do so will\n  // result in verification failure for both revoked and unrevoked certificates\n  // from that chain.\n  config.core.v4alpha.DataSource crl = 7;\n\n  // If specified, Envoy will not reject expired certificates.\n  bool allow_expired_certificate = 8;\n\n  // Certificate trust chain verification mode.\n  TrustChainVerification trust_chain_verification = 10\n      [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha\";\noption java_outer_classname = \"SecretProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Secrets configuration]\n\nmessage GenericSecret {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.GenericSecret\";\n\n  // Secret of generic type and is available to filters.\n  config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true];\n}\n\nmessage SdsSecretConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig\";\n\n  oneof name_specifier {\n    // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n    // When both name and config are specified, then secret can be fetched and/or reloaded via\n    // SDS. When only name is specified, then secret will be loaded from static resources.\n    string name = 1;\n\n    // Resource locator for SDS. This is mutually exclusive to *name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator sds_resource_locator = 3;\n  }\n\n  config.core.v4alpha.ConfigSource sds_config = 2;\n}\n\n// [#next-free-field: 6]\nmessage Secret {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.Secret\";\n\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  string name = 1;\n\n  oneof type {\n    TlsCertificate tls_certificate = 2;\n\n    TlsSessionTicketKeys session_ticket_keys = 3;\n\n    CertificateValidationContext validation_context = 4;\n\n    GenericSecret generic_secret = 5;\n  }\n}\n"
  },
  {
    "path": "api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v4alpha;\n\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/common.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/secret.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha\";\noption java_outer_classname = \"TlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: TLS transport socket]\n// [#extension: envoy.transport_sockets.tls]\n// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS.\n\nmessage UpstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\";\n\n  // Common TLS context settings.\n  //\n  // .. attention::\n  //\n  //   Server certificate verification is not enabled by default. Configure\n  //   :ref:`trusted_ca<envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.trusted_ca>` to enable\n  //   verification.\n  CommonTlsContext common_tls_context = 1;\n\n  // SNI string to use when creating TLS backend connections.\n  string sni = 2 [(validate.rules).string = {max_bytes: 255}];\n\n  // If true, server-initiated TLS renegotiation will be allowed.\n  //\n  // .. attention::\n  //\n  //   TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary.\n  bool allow_renegotiation = 3;\n\n  // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets\n  // for TLSv1.2 and older) to store for the purpose of session resumption.\n  //\n  // Defaults to 1, setting this to 0 disables session resumption.\n  google.protobuf.UInt32Value max_session_keys = 4;\n}\n\n// [#next-free-field: 9]\nmessage DownstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\";\n\n  enum OcspStaplePolicy {\n    // OCSP responses are optional. If an OCSP response is absent\n    // or expired, the associated certificate will be used for\n    // connections without an OCSP staple.\n    LENIENT_STAPLING = 0;\n\n    // OCSP responses are optional. If an OCSP response is absent,\n    // the associated certificate will be used without an\n    // OCSP staple. If a response is provided but is expired,\n    // the associated certificate will not be used for\n    // subsequent connections. If no suitable certificate is found,\n    // the connection is rejected.\n    STRICT_STAPLING = 1;\n\n    // OCSP responses are required. Configuration will fail if\n    // a certificate is provided without an OCSP response. If a\n    // response expires, the associated certificate will not be\n    // used connections. If no suitable certificate is found, the\n    // connection is rejected.\n    MUST_STAPLE = 2;\n  }\n\n  // Common TLS context settings.\n  CommonTlsContext common_tls_context = 1;\n\n  // If specified, Envoy will reject connections without a valid client\n  // certificate.\n  google.protobuf.BoolValue require_client_certificate = 2;\n\n  // If specified, Envoy will reject connections without a valid and matching SNI.\n  // [#not-implemented-hide:]\n  google.protobuf.BoolValue require_sni = 3;\n\n  oneof session_ticket_keys_type {\n    // TLS session ticket key settings.\n    TlsSessionTicketKeys session_ticket_keys = 4;\n\n    // Config for fetching TLS session ticket keys via SDS API.\n    SdsSecretConfig session_ticket_keys_sds_secret_config = 5;\n\n    // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS\n    // server to not issue TLS session tickets for the purposes of stateless TLS session resumption.\n    // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using\n    // the keys specified through either :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.session_ticket_keys>`\n    // or :ref:`session_ticket_keys_sds_secret_config <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.session_ticket_keys_sds_secret_config>`.\n    // If this config is set to false and no keys are explicitly configured, the TLS server will issue\n    // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the\n    // implication that sessions cannot be resumed across hot restarts or on different hosts.\n    bool disable_stateless_session_resumption = 7;\n  }\n\n  // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session\n  // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2)\n  // <https://tools.ietf.org/html/rfc5077#section-5.6>`\n  // only seconds could be specified (fractional seconds are going to be ignored).\n  google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = {\n    lt {seconds: 4294967296}\n    gte {}\n  }];\n\n  // Config for whether to use certificates if they do not have\n  // an accompanying OCSP response or if the response expires at runtime.\n  // Defaults to LENIENT_STAPLING\n  OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}];\n}\n\n// TLS context shared by both client and server TLS contexts.\n// [#next-free-field: 14]\nmessage CommonTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext\";\n\n  // Config for Certificate provider to get certificates. This provider should allow certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  message CertificateProvider {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider\";\n\n    // opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"TLS\" to specify a new tls-certificate.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Provider specific config.\n    // Note: an implementation is expected to dedup multiple instances of the same config\n    // to maintain a single certificate-provider instance. The sharing can happen, for\n    // example, among multiple clusters or between the tls_certificate and validation_context\n    // certificate providers of a cluster.\n    // This config could be supplied inline or (in future) a named xDS resource.\n    oneof config {\n      option (validate.required) = true;\n\n      config.core.v4alpha.TypedExtensionConfig typed_config = 2;\n    }\n  }\n\n  // Similar to CertificateProvider above, but allows the provider instances to be configured on\n  // the client side instead of being sent from the control plane.\n  message CertificateProviderInstance {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance\";\n\n    // Provider instance name. This name must be defined in the client's configuration (e.g., a\n    // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config\n    // field that would be sent in the CertificateProvider message if the config was sent by the\n    // control plane). If not present, defaults to \"default\".\n    //\n    // Instance names should generally be defined not in terms of the underlying provider\n    // implementation (e.g., \"file_watcher\") but rather in terms of the function of the\n    // certificates (e.g., \"foo_deployment_identity\").\n    string instance_name = 1;\n\n    // Opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"example.com\" to specify a certificate for a\n    // particular domain. Not all provider instances will actually use this field, so the value\n    // defaults to the empty string.\n    string certificate_name = 2;\n  }\n\n  message CombinedCertificateValidationContext {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.\"\n        \"CombinedCertificateValidationContext\";\n\n    // How to validate peer certificates.\n    CertificateValidationContext default_validation_context = 1\n        [(validate.rules).message = {required: true}];\n\n    oneof dynamic_validation_context {\n      // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n      // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n      // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n      // or validation_context_certificate_provider_instance may be used.\n      SdsSecretConfig validation_context_sds_secret_config = 2\n          [(validate.rules).message = {required: true}];\n\n      // Certificate provider for fetching validation context.\n      // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n      // or validation_context_certificate_provider_instance may be used.\n      // [#not-implemented-hide:]\n      CertificateProvider validation_context_certificate_provider = 3;\n\n      // Certificate provider instance for fetching validation context.\n      // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n      // or validation_context_certificate_provider_instance may be used.\n      // [#not-implemented-hide:]\n      CertificateProviderInstance validation_context_certificate_provider_instance = 4;\n    }\n  }\n\n  reserved 5;\n\n  // TLS protocol versions, cipher suites etc.\n  TlsParameters tls_params = 1;\n\n  // :ref:`Multiple TLS certificates <arch_overview_ssl_cert_select>` can be associated with the\n  // same context to allow both RSA and ECDSA certificates.\n  //\n  // Only a single TLS certificate is supported in client contexts. In server contexts, the first\n  // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is\n  // used for clients that support ECDSA.\n  repeated TlsCertificate tls_certificates = 2;\n\n  // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6\n      [(validate.rules).repeated = {max_items: 1}];\n\n  // Certificate provider for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProvider tls_certificate_certificate_provider = 9;\n\n  // Certificate provider instance for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProviderInstance tls_certificate_certificate_provider_instance = 11;\n\n  oneof validation_context_type {\n    // How to validate peer certificates.\n    CertificateValidationContext validation_context = 3;\n\n    // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n    // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n    SdsSecretConfig validation_context_sds_secret_config = 7;\n\n    // Combined certificate validation context holds a default CertificateValidationContext\n    // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic\n    // and default CertificateValidationContext are merged into a new CertificateValidationContext\n    // for validation. This merge is done by Message::MergeFrom(), so dynamic\n    // CertificateValidationContext overwrites singular fields in default\n    // CertificateValidationContext, and concatenates repeated fields to default\n    // CertificateValidationContext, and logical OR is applied to boolean fields.\n    CombinedCertificateValidationContext combined_validation_context = 8;\n\n    // Certificate provider for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProvider validation_context_certificate_provider = 10;\n\n    // Certificate provider instance for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProviderInstance validation_context_certificate_provider_instance = 12;\n  }\n\n  // Supplies the list of ALPN protocols that the listener should expose. In\n  // practice this is likely to be set to one of two values (see the\n  // :ref:`codec_type\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.codec_type>`\n  // parameter in the HTTP connection manager for more information):\n  //\n  // * \"h2,http/1.1\" If the listener is going to support both HTTP/2 and HTTP/1.1.\n  // * \"http/1.1\" If the listener is only going to support HTTP/1.1.\n  //\n  // There is no default for this parameter. If empty, Envoy will not expose ALPN.\n  repeated string alpn_protocols = 4;\n\n  // Custom TLS handshaker. If empty, defaults to native TLS handshaking\n  // behavior.\n  config.core.v4alpha.TypedExtensionConfig custom_handshaker = 13;\n}\n"
  },
  {
    "path": "api/envoy/extensions/upstreams/http/generic/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.upstreams.http.generic.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.upstreams.http.generic.v3\";\noption java_outer_classname = \"GenericConnectionPoolProtoOuterClass\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Generic Connection Pool]\n\n// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream,\n// based on CONNECT configuration.\n// [#extension: envoy.upstreams.http.generic]\nmessage GenericConnectionPoolProto {\n}\n"
  },
  {
    "path": "api/envoy/extensions/upstreams/http/http/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.upstreams.http.http.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.upstreams.http.http.v3\";\noption java_outer_classname = \"HttpConnectionPoolProtoOuterClass\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Http Connection Pool]\n\n// A connection pool which forwards downstream HTTP as HTTP to upstream.\n// [#extension: envoy.upstreams.http.http]\nmessage HttpConnectionPoolProto {\n}\n"
  },
  {
    "path": "api/envoy/extensions/upstreams/http/tcp/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.upstreams.http.tcp.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3\";\noption java_outer_classname = \"TcpConnectionPoolProtoOuterClass\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tcp Connection Pool]\n\n// A connection pool which forwards downstream HTTP as TCP to upstream,\n// [#extension: envoy.upstreams.http.tcp]\nmessage TcpConnectionPoolProto {\n}\n"
  },
  {
    "path": "api/envoy/extensions/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/extensions/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.wasm.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// [#extension: envoy.bootstrap.wasm]\n\n// Configuration for a Wasm VM.\n// [#next-free-field: 7]\nmessage VmConfig {\n  // An ID which will be used along with a hash of the wasm code (or the name of the registered Null\n  // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same\n  // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can\n  // reduce memory utilization and make sharing of data easier which may have security implications.\n  // See ref: \"TODO: add ref\" for details.\n  string vm_id = 1;\n\n  // The Wasm runtime type (either \"v8\" or \"null\" for code compiled into Envoy).\n  string runtime = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The Wasm code that Envoy will execute.\n  config.core.v3.AsyncDataSource code = 3;\n\n  // The Wasm configuration used in initialization of a new VM\n  // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before\n  // passing it to the plugin. `google.protobuf.BytesValue` and\n  // `google.protobuf.StringValue` are passed directly without the wrapper.\n  google.protobuf.Any configuration = 4;\n\n  // Allow the wasm file to include pre-compiled code on VMs which support it.\n  // Warning: this should only be enable for trusted sources as the precompiled code is not\n  // verified.\n  bool allow_precompiled = 5;\n\n  // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration\n  // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter\n  // warming state.\n  bool nack_on_code_cache_miss = 6;\n}\n\n// Base Configuration for Wasm Plugins e.g. filters and services.\n// [#next-free-field: 6]\nmessage PluginConfig {\n  // A unique name for a filters/services in a VM for use in identifying the filter/service if\n  // multiple filters/services are handled by the same *vm_id* and *root_id* and for\n  // logging/debugging.\n  string name = 1;\n\n  // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts\n  // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all\n  // filters/services with a blank root_id with the same *vm_id* will share Context(s).\n  string root_id = 2;\n\n  // Configuration for finding or starting VM.\n  oneof vm {\n    VmConfig vm_config = 3;\n    // TODO: add referential VM configurations.\n  }\n\n  // Filter/service configuration used to configure or reconfigure a plugin\n  // (proxy_on_configuration).\n  // `google.protobuf.Struct` is serialized as JSON before\n  // passing it to the plugin. `google.protobuf.BytesValue` and\n  // `google.protobuf.StringValue` are passed directly without the wrapper.\n  google.protobuf.Any configuration = 4;\n\n  // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false),\n  // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error,\n  // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false\n  // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial\n  // startup the proxy will not start.\n  bool fail_open = 5;\n}\n\n// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService\n// <config_wasm_service>` This opaque configuration will be used to create a Wasm Service.\nmessage WasmService {\n  // General plugin configuration.\n  PluginConfig config = 1;\n\n  // If true, create a single VM rather than creating one VM per worker. Such a singleton can\n  // not be used with filters.\n  bool singleton = 2;\n}\n"
  },
  {
    "path": "api/envoy/extensions/watchdog/abort_action/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.watchdog.abort_action.v3alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.watchdog.abort_action.v3alpha\";\noption java_outer_classname = \"AbortActionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Watchdog Action that sends a SIGABRT to kill the process.]\n// [#extension: envoy.watchdog.abort_action]\n\n// A GuardDogAction that will terminate the process by sending SIGABRT to the\n// stuck thread. This would allow easier access to the call stack of the stuck\n// thread since we would run signal handlers on that thread. This would be\n// more useful than the default watchdog kill behaviors since those PANIC\n// from the watchdog's thread.\n\n// This is currently only implemented for systems that support kill to send\n// signals.\nmessage AbortActionConfig {\n  // How long to wait for the thread to respond to the SIGABRT before killing the\n  // process from this action. This is a blocking action.\n  google.protobuf.Duration wait_duration = 1;\n}\n"
  },
  {
    "path": "api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.watchdog.profile_action.v3alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha\";\noption java_outer_classname = \"ProfileActionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Watchdog Action that does CPU profiling.]\n// [#extension: envoy.watchdog.profile_action]\n\n// Configuration for the profile watchdog action.\nmessage ProfileActionConfig {\n  // How long the profile should last. If not set defaults to 5 seconds.\n  google.protobuf.Duration profile_duration = 1;\n\n  // File path to the directory to output profiles.\n  string profile_path = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Limits the max number of profiles that can be generated by this action\n  // over its lifetime to avoid filling the disk.\n  // If not set (i.e. it's 0), a default of 10 will be used.\n  uint64 max_profiles = 3;\n}\n"
  },
  {
    "path": "api/envoy/service/README.md",
    "content": "Protocol buffer definitions for gRPC and REST services.\n\nVisibility should be constrained to none (default).\n"
  },
  {
    "path": "api/envoy/service/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/data/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/accesslog/v2/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.accesslog.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/data/accesslog/v2/accesslog.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.accesslog.v2\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Service for streaming access logs from Envoy to an access log server.\nservice AccessLogService {\n  // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different\n  // API for \"critical\" access logs in which Envoy will buffer access logs for some period of time\n  // until it gets an ACK so it could then retry. This API is designed for high throughput with the\n  // expectation that it might be lossy.\n  rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {\n  }\n}\n\n// Empty response for the StreamAccessLogs API. Will never be sent. See below.\nmessage StreamAccessLogsResponse {\n}\n\n// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream\n// access logs without ever expecting a response.\nmessage StreamAccessLogsMessage {\n  message Identifier {\n    // The node sending the access log messages over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig\n    // <envoy_api_msg_config.accesslog.v2.CommonGrpcAccessLogConfig>`.\n    string log_name = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Wrapper for batches of HTTP access log entries.\n  message HTTPAccessLogEntries {\n    repeated data.accesslog.v2.HTTPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Wrapper for batches of TCP access log entries.\n  message TCPAccessLogEntries {\n    repeated data.accesslog.v2.TCPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batches of log entries of a single type. Generally speaking, a given stream should only\n  // ever include one type of log entry.\n  oneof log_entries {\n    option (validate.required) = true;\n\n    HTTPAccessLogEntries http_logs = 2;\n\n    TCPAccessLogEntries tcp_logs = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/service/accesslog/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/accesslog/v3:pkg\",\n        \"//envoy/service/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/accesslog/v3/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.accesslog.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/accesslog/v3/accesslog.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.accesslog.v3\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Service for streaming access logs from Envoy to an access log server.\nservice AccessLogService {\n  // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different\n  // API for \"critical\" access logs in which Envoy will buffer access logs for some period of time\n  // until it gets an ACK so it could then retry. This API is designed for high throughput with the\n  // expectation that it might be lossy.\n  rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {\n  }\n}\n\n// Empty response for the StreamAccessLogs API. Will never be sent. See below.\nmessage StreamAccessLogsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v2.StreamAccessLogsResponse\";\n}\n\n// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream\n// access logs without ever expecting a response.\nmessage StreamAccessLogsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v2.StreamAccessLogsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig\n    // <envoy_api_msg_extensions.access_loggers.grpc.v3.CommonGrpcAccessLogConfig>`.\n    string log_name = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Wrapper for batches of HTTP access log entries.\n  message HTTPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries\";\n\n    repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Wrapper for batches of TCP access log entries.\n  message TCPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries\";\n\n    repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batches of log entries of a single type. Generally speaking, a given stream should only\n  // ever include one type of log entry.\n  oneof log_entries {\n    option (validate.required) = true;\n\n    HTTPAccessLogEntries http_logs = 2;\n\n    TCPAccessLogEntries tcp_logs = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/service/accesslog/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/data/accesslog/v3:pkg\",\n        \"//envoy/service/accesslog/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/accesslog/v4alpha/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.accesslog.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/data/accesslog/v3/accesslog.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.accesslog.v4alpha\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Service for streaming access logs from Envoy to an access log server.\nservice AccessLogService {\n  // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different\n  // API for \"critical\" access logs in which Envoy will buffer access logs for some period of time\n  // until it gets an ACK so it could then retry. This API is designed for high throughput with the\n  // expectation that it might be lossy.\n  rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {\n  }\n}\n\n// Empty response for the StreamAccessLogs API. Will never be sent. See below.\nmessage StreamAccessLogsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v3.StreamAccessLogsResponse\";\n}\n\n// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream\n// access logs without ever expecting a response.\nmessage StreamAccessLogsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v3.StreamAccessLogsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v3.StreamAccessLogsMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig\n    // <envoy_api_msg_extensions.access_loggers.grpc.v3.CommonGrpcAccessLogConfig>`.\n    string log_name = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Wrapper for batches of HTTP access log entries.\n  message HTTPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v3.StreamAccessLogsMessage.HTTPAccessLogEntries\";\n\n    repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Wrapper for batches of TCP access log entries.\n  message TCPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v3.StreamAccessLogsMessage.TCPAccessLogEntries\";\n\n    repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batches of log entries of a single type. Generally speaking, a given stream should only\n  // ever include one type of log entry.\n  oneof log_entries {\n    option (validate.required) = true;\n\n    HTTPAccessLogEntries http_logs = 2;\n\n    TCPAccessLogEntries tcp_logs = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/service/auth/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/auth/v2/attribute_context.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v2\";\noption java_outer_classname = \"AttributeContextProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Attribute Context ]\n\n// See :ref:`network filter configuration overview <config_network_filters_ext_authz>`\n// and :ref:`HTTP filter configuration overview <config_http_filters_ext_authz>`.\n\n// An attribute is a piece of metadata that describes an activity on a network.\n// For example, the size of an HTTP request, or the status code of an HTTP response.\n//\n// Each attribute has a type and a name, which is logically defined as a proto message field\n// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes\n// supported by Envoy authorization system.\n// [#comment: The following items are left out of this proto\n// Request.Auth field for jwt tokens\n// Request.Api for api management\n// Origin peer that originated the request\n// Caching Protocol\n// request_context return values to inject back into the filter chain\n// peer.claims -- from X.509 extensions\n// Configuration\n// - field mask to send\n// - which return values from request_context are copied back\n// - which return values are copied into request_headers]\n// [#next-free-field: 12]\nmessage AttributeContext {\n  // This message defines attributes for a node that handles a network request.\n  // The node can be either a service or an application that sends, forwards,\n  // or receives the request. Service peers should fill in the `service`,\n  // `principal`, and `labels` as appropriate.\n  // [#next-free-field: 6]\n  message Peer {\n    // The address of the peer, this is typically the IP address.\n    // It can also be UDS path, or others.\n    api.v2.core.Address address = 1;\n\n    // The canonical service name of the peer.\n    // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster\n    // <config_http_conn_man_headers_downstream-service-cluster>`\n    // If a more trusted source of the service name is available through mTLS/secure naming, it\n    // should be used.\n    string service = 2;\n\n    // The labels associated with the peer.\n    // These could be pod labels for Kubernetes or tags for VMs.\n    // The source of the labels could be an X.509 certificate or other configuration.\n    map<string, string> labels = 3;\n\n    // The authenticated identity of this peer.\n    // For example, the identity associated with the workload such as a service account.\n    // If an X.509 certificate is used to assert the identity this field should be sourced from\n    // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.\n    // The primary identity should be the principal. The principal format is issuer specific.\n    //\n    // Example:\n    // *    SPIFFE format is `spiffe://trust-domain/path`\n    // *    Google account format is `https://accounts.google.com/{userid}`\n    string principal = 4;\n\n    // The X.509 certificate used to authenticate the identify of this peer.\n    // When present, the certificate contents are encoded in URL and PEM format.\n    string certificate = 5;\n  }\n\n  // Represents a network request, such as an HTTP request.\n  message Request {\n    // The timestamp when the proxy receives the first byte of the request.\n    google.protobuf.Timestamp time = 1;\n\n    // Represents an HTTP request or an HTTP-like request.\n    HttpRequest http = 2;\n  }\n\n  // This message defines attributes for an HTTP request.\n  // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.\n  // [#next-free-field: 12]\n  message HttpRequest {\n    // The unique ID for a request, which can be propagated to downstream\n    // systems. The ID should have low probability of collision\n    // within a single day for a specific service.\n    // For HTTP requests, it should be X-Request-ID or equivalent.\n    string id = 1;\n\n    // The HTTP request method, such as `GET`, `POST`.\n    string method = 2;\n\n    // The HTTP request headers. If multiple headers share the same key, they\n    // must be merged according to the HTTP spec. All header keys must be\n    // lower-cased, because HTTP header keys are case-insensitive.\n    map<string, string> headers = 3;\n\n    // The request target, as it appears in the first line of the HTTP request. This includes\n    // the URL path and query-string. No decoding is performed.\n    string path = 4;\n\n    // The HTTP request `Host` or 'Authority` header value.\n    string host = 5;\n\n    // The HTTP URL scheme, such as `http` and `https`.\n    string scheme = 6;\n\n    // This field is always empty, and exists for compatibility reasons. The HTTP URL query is\n    // included in `path` field.\n    string query = 7;\n\n    // This field is always empty, and exists for compatibility reasons. The URL fragment is\n    // not submitted as part of HTTP requests; it is unknowable.\n    string fragment = 8;\n\n    // The HTTP request size in bytes. If unknown, it must be -1.\n    int64 size = 9;\n\n    // The network protocol used with the request, such as \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2\".\n    //\n    // See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all\n    // possible values.\n    string protocol = 10;\n\n    // The HTTP request body.\n    string body = 11;\n  }\n\n  // The source of a network activity, such as starting a TCP connection.\n  // In a multi hop network activity, the source represents the sender of the\n  // last hop.\n  Peer source = 1;\n\n  // The destination of a network activity, such as accepting a TCP connection.\n  // In a multi hop network activity, the destination represents the receiver of\n  // the last hop.\n  Peer destination = 2;\n\n  // Represents a network request, such as an HTTP request.\n  Request request = 4;\n\n  // This is analogous to http_request.headers, however these contents will not be sent to the\n  // upstream server. Context_extensions provide an extension mechanism for sending additional\n  // information to the auth server without modifying the proto definition. It maps to the\n  // internal opaque context in the filter chain.\n  map<string, string> context_extensions = 10;\n\n  // Dynamic metadata associated with the request.\n  api.v2.core.Metadata metadata_context = 11;\n}\n"
  },
  {
    "path": "api/envoy/service/auth/v2/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/service/auth/v2/attribute_context.proto\";\nimport \"envoy/type/http_status.proto\";\n\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v2\";\noption java_outer_classname = \"ExternalAuthProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(CheckRequest) returns (CheckResponse) {\n  }\n}\n\nmessage CheckRequest {\n  // The request attributes.\n  AttributeContext attributes = 1;\n}\n\n// HTTP attributes for a denied response.\nmessage DeniedHttpResponse {\n  // This field allows the authorization service to send a HTTP response status\n  // code to the downstream client other than 403 (Forbidden).\n  type.HttpStatus status = 1 [(validate.rules).message = {required: true}];\n\n  // This field allows the authorization service to send HTTP response headers\n  // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message.\n  repeated api.v2.core.HeaderValueOption headers = 2;\n\n  // This field allows the authorization service to send a response body data\n  // to the downstream client.\n  string body = 3;\n}\n\n// HTTP attributes for an ok response.\nmessage OkHttpResponse {\n  // HTTP entity headers in addition to the original request headers. This allows the authorization\n  // service to append, to add or to override headers from the original request before\n  // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message. By setting the `append` field to `true`,\n  // the filter will append the correspondent header value to the matched request header.\n  // By leaving `append` as false, the filter will either add a new header, or override an existing\n  // one if there is a match.\n  repeated api.v2.core.HeaderValueOption headers = 2;\n}\n\n// Intended for gRPC and Network Authorization servers `only`.\nmessage CheckResponse {\n  // Status `OK` allows the request. Any other status indicates the request should be denied.\n  google.rpc.Status status = 1;\n\n  // An message that contains HTTP response attributes. This message is\n  // used when the authorization service needs to send custom responses to the\n  // downstream client or, to modify/add request headers being dispatched to the upstream.\n  oneof http_response {\n    // Supplies http attributes for a denied response.\n    DeniedHttpResponse denied_response = 2;\n\n    // Supplies http attributes for an ok response.\n    OkHttpResponse ok_response = 3;\n  }\n}\n"
  },
  {
    "path": "api/envoy/service/auth/v2alpha/BUILD",
    "content": "load(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\n# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\napi_proto_package(\n    has_services = True,\n    deps = [\"//envoy/service/auth/v2:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/service/auth/v2alpha/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v2alpha;\n\noption java_multiple_files = true;\noption java_generic_services = true;\noption java_outer_classname = \"CertsProto\";\noption java_package = \"io.envoyproxy.envoy.service.auth.v2alpha\";\n\nimport \"envoy/service/auth/v2/external_auth.proto\";\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(v2.CheckRequest) returns (v2.CheckResponse);\n}\n"
  },
  {
    "path": "api/envoy/service/auth/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/auth/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/auth/v3/attribute_context.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v3\";\noption java_outer_classname = \"AttributeContextProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Attribute Context ]\n\n// See :ref:`network filter configuration overview <config_network_filters_ext_authz>`\n// and :ref:`HTTP filter configuration overview <config_http_filters_ext_authz>`.\n\n// An attribute is a piece of metadata that describes an activity on a network.\n// For example, the size of an HTTP request, or the status code of an HTTP response.\n//\n// Each attribute has a type and a name, which is logically defined as a proto message field\n// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes\n// supported by Envoy authorization system.\n// [#comment: The following items are left out of this proto\n// Request.Auth field for jwt tokens\n// Request.Api for api management\n// Origin peer that originated the request\n// Caching Protocol\n// request_context return values to inject back into the filter chain\n// peer.claims -- from X.509 extensions\n// Configuration\n// - field mask to send\n// - which return values from request_context are copied back\n// - which return values are copied into request_headers]\n// [#next-free-field: 12]\nmessage AttributeContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.AttributeContext\";\n\n  // This message defines attributes for a node that handles a network request.\n  // The node can be either a service or an application that sends, forwards,\n  // or receives the request. Service peers should fill in the `service`,\n  // `principal`, and `labels` as appropriate.\n  // [#next-free-field: 6]\n  message Peer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v2.AttributeContext.Peer\";\n\n    // The address of the peer, this is typically the IP address.\n    // It can also be UDS path, or others.\n    config.core.v3.Address address = 1;\n\n    // The canonical service name of the peer.\n    // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster\n    // <config_http_conn_man_headers_downstream-service-cluster>`\n    // If a more trusted source of the service name is available through mTLS/secure naming, it\n    // should be used.\n    string service = 2;\n\n    // The labels associated with the peer.\n    // These could be pod labels for Kubernetes or tags for VMs.\n    // The source of the labels could be an X.509 certificate or other configuration.\n    map<string, string> labels = 3;\n\n    // The authenticated identity of this peer.\n    // For example, the identity associated with the workload such as a service account.\n    // If an X.509 certificate is used to assert the identity this field should be sourced from\n    // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.\n    // The primary identity should be the principal. The principal format is issuer specific.\n    //\n    // Example:\n    // *    SPIFFE format is `spiffe://trust-domain/path`\n    // *    Google account format is `https://accounts.google.com/{userid}`\n    string principal = 4;\n\n    // The X.509 certificate used to authenticate the identify of this peer.\n    // When present, the certificate contents are encoded in URL and PEM format.\n    string certificate = 5;\n  }\n\n  // Represents a network request, such as an HTTP request.\n  message Request {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v2.AttributeContext.Request\";\n\n    // The timestamp when the proxy receives the first byte of the request.\n    google.protobuf.Timestamp time = 1;\n\n    // Represents an HTTP request or an HTTP-like request.\n    HttpRequest http = 2;\n  }\n\n  // This message defines attributes for an HTTP request.\n  // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.\n  // [#next-free-field: 13]\n  message HttpRequest {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v2.AttributeContext.HttpRequest\";\n\n    // The unique ID for a request, which can be propagated to downstream\n    // systems. The ID should have low probability of collision\n    // within a single day for a specific service.\n    // For HTTP requests, it should be X-Request-ID or equivalent.\n    string id = 1;\n\n    // The HTTP request method, such as `GET`, `POST`.\n    string method = 2;\n\n    // The HTTP request headers. If multiple headers share the same key, they\n    // must be merged according to the HTTP spec. All header keys must be\n    // lower-cased, because HTTP header keys are case-insensitive.\n    map<string, string> headers = 3;\n\n    // The request target, as it appears in the first line of the HTTP request. This includes\n    // the URL path and query-string. No decoding is performed.\n    string path = 4;\n\n    // The HTTP request `Host` or 'Authority` header value.\n    string host = 5;\n\n    // The HTTP URL scheme, such as `http` and `https`.\n    string scheme = 6;\n\n    // This field is always empty, and exists for compatibility reasons. The HTTP URL query is\n    // included in `path` field.\n    string query = 7;\n\n    // This field is always empty, and exists for compatibility reasons. The URL fragment is\n    // not submitted as part of HTTP requests; it is unknowable.\n    string fragment = 8;\n\n    // The HTTP request size in bytes. If unknown, it must be -1.\n    int64 size = 9;\n\n    // The network protocol used with the request, such as \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2\".\n    //\n    // See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all\n    // possible values.\n    string protocol = 10;\n\n    // The HTTP request body.\n    string body = 11;\n\n    // The HTTP request body in bytes. This is used instead of\n    // :ref:`body <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` when\n    // :ref:`pack_as_bytes <envoy_api_field_extensions.filters.http.ext_authz.v3.BufferSettings.pack_as_bytes>`\n    // is set to true.\n    bytes raw_body = 12;\n  }\n\n  // The source of a network activity, such as starting a TCP connection.\n  // In a multi hop network activity, the source represents the sender of the\n  // last hop.\n  Peer source = 1;\n\n  // The destination of a network activity, such as accepting a TCP connection.\n  // In a multi hop network activity, the destination represents the receiver of\n  // the last hop.\n  Peer destination = 2;\n\n  // Represents a network request, such as an HTTP request.\n  Request request = 4;\n\n  // This is analogous to http_request.headers, however these contents will not be sent to the\n  // upstream server. Context_extensions provide an extension mechanism for sending additional\n  // information to the auth server without modifying the proto definition. It maps to the\n  // internal opaque context in the filter chain.\n  map<string, string> context_extensions = 10;\n\n  // Dynamic metadata associated with the request.\n  config.core.v3.Metadata metadata_context = 11;\n}\n"
  },
  {
    "path": "api/envoy/service/auth/v3/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/service/auth/v3/attribute_context.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"google/protobuf/struct.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v3\";\noption java_outer_classname = \"ExternalAuthProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(CheckRequest) returns (CheckResponse) {\n  }\n}\n\nmessage CheckRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.auth.v2.CheckRequest\";\n\n  // The request attributes.\n  AttributeContext attributes = 1;\n}\n\n// HTTP attributes for a denied response.\nmessage DeniedHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.DeniedHttpResponse\";\n\n  // This field allows the authorization service to send a HTTP response status\n  // code to the downstream client other than 403 (Forbidden).\n  type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];\n\n  // This field allows the authorization service to send HTTP response headers\n  // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message.\n  repeated config.core.v3.HeaderValueOption headers = 2;\n\n  // This field allows the authorization service to send a response body data\n  // to the downstream client.\n  string body = 3;\n}\n\n// HTTP attributes for an OK response.\n// [#next-free-field: 6]\nmessage OkHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.OkHttpResponse\";\n\n  // HTTP entity headers in addition to the original request headers. This allows the authorization\n  // service to append, to add or to override headers from the original request before\n  // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message. By setting the `append` field to `true`,\n  // the filter will append the correspondent header value to the matched request header.\n  // By leaving `append` as false, the filter will either add a new header, or override an existing\n  // one if there is a match.\n  repeated config.core.v3.HeaderValueOption headers = 2;\n\n  // HTTP entity headers to remove from the original request before dispatching\n  // it to the upstream. This allows the authorization service to act on auth\n  // related headers (like `Authorization`), process them, and consume them.\n  // Under this model, the upstream will either receive the request (if it's\n  // authorized) or not receive it (if it's not), but will not see headers\n  // containing authorization credentials.\n  //\n  // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as\n  // the header `Host`, may not be removed as that would make the request\n  // malformed. If mentioned in `headers_to_remove` these special headers will\n  // be ignored.\n  //\n  // When using the HTTP service this must instead be set by the HTTP\n  // authorization service as a comma separated list like so:\n  // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``.\n  repeated string headers_to_remove = 5;\n\n  // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata\n  // <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>`. Until it is removed,\n  // setting this field overrides :ref:`CheckResponse.dynamic_metadata\n  // <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>`.\n  google.protobuf.Struct dynamic_metadata = 3 [deprecated = true];\n}\n\n// Intended for gRPC and Network Authorization servers `only`.\nmessage CheckResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.CheckResponse\";\n\n  // Status `OK` allows the request. Any other status indicates the request should be denied.\n  google.rpc.Status status = 1;\n\n  // An message that contains HTTP response attributes. This message is\n  // used when the authorization service needs to send custom responses to the\n  // downstream client or, to modify/add request headers being dispatched to the upstream.\n  oneof http_response {\n    // Supplies http attributes for a denied response.\n    DeniedHttpResponse denied_response = 2;\n\n    // Supplies http attributes for an ok response.\n    OkHttpResponse ok_response = 3;\n  }\n\n  // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next\n  // filter. This metadata lives in a namespace specified by the canonical name of extension filter\n  // that requires it:\n  //\n  // - :ref:`envoy.filters.http.ext_authz <config_http_filters_ext_authz_dynamic_metadata>` for HTTP filter.\n  // - :ref:`envoy.filters.network.ext_authz <config_network_filters_ext_authz_dynamic_metadata>` for network filter.\n  google.protobuf.Struct dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "api/envoy/service/auth/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/auth/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/auth/v4alpha/attribute_context.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v4alpha\";\noption java_outer_classname = \"AttributeContextProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Attribute Context ]\n\n// See :ref:`network filter configuration overview <config_network_filters_ext_authz>`\n// and :ref:`HTTP filter configuration overview <config_http_filters_ext_authz>`.\n\n// An attribute is a piece of metadata that describes an activity on a network.\n// For example, the size of an HTTP request, or the status code of an HTTP response.\n//\n// Each attribute has a type and a name, which is logically defined as a proto message field\n// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes\n// supported by Envoy authorization system.\n// [#comment: The following items are left out of this proto\n// Request.Auth field for jwt tokens\n// Request.Api for api management\n// Origin peer that originated the request\n// Caching Protocol\n// request_context return values to inject back into the filter chain\n// peer.claims -- from X.509 extensions\n// Configuration\n// - field mask to send\n// - which return values from request_context are copied back\n// - which return values are copied into request_headers]\n// [#next-free-field: 12]\nmessage AttributeContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.AttributeContext\";\n\n  // This message defines attributes for a node that handles a network request.\n  // The node can be either a service or an application that sends, forwards,\n  // or receives the request. Service peers should fill in the `service`,\n  // `principal`, and `labels` as appropriate.\n  // [#next-free-field: 6]\n  message Peer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v3.AttributeContext.Peer\";\n\n    // The address of the peer, this is typically the IP address.\n    // It can also be UDS path, or others.\n    config.core.v4alpha.Address address = 1;\n\n    // The canonical service name of the peer.\n    // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster\n    // <config_http_conn_man_headers_downstream-service-cluster>`\n    // If a more trusted source of the service name is available through mTLS/secure naming, it\n    // should be used.\n    string service = 2;\n\n    // The labels associated with the peer.\n    // These could be pod labels for Kubernetes or tags for VMs.\n    // The source of the labels could be an X.509 certificate or other configuration.\n    map<string, string> labels = 3;\n\n    // The authenticated identity of this peer.\n    // For example, the identity associated with the workload such as a service account.\n    // If an X.509 certificate is used to assert the identity this field should be sourced from\n    // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.\n    // The primary identity should be the principal. The principal format is issuer specific.\n    //\n    // Example:\n    // *    SPIFFE format is `spiffe://trust-domain/path`\n    // *    Google account format is `https://accounts.google.com/{userid}`\n    string principal = 4;\n\n    // The X.509 certificate used to authenticate the identify of this peer.\n    // When present, the certificate contents are encoded in URL and PEM format.\n    string certificate = 5;\n  }\n\n  // Represents a network request, such as an HTTP request.\n  message Request {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v3.AttributeContext.Request\";\n\n    // The timestamp when the proxy receives the first byte of the request.\n    google.protobuf.Timestamp time = 1;\n\n    // Represents an HTTP request or an HTTP-like request.\n    HttpRequest http = 2;\n  }\n\n  // This message defines attributes for an HTTP request.\n  // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.\n  // [#next-free-field: 13]\n  message HttpRequest {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v3.AttributeContext.HttpRequest\";\n\n    // The unique ID for a request, which can be propagated to downstream\n    // systems. The ID should have low probability of collision\n    // within a single day for a specific service.\n    // For HTTP requests, it should be X-Request-ID or equivalent.\n    string id = 1;\n\n    // The HTTP request method, such as `GET`, `POST`.\n    string method = 2;\n\n    // The HTTP request headers. If multiple headers share the same key, they\n    // must be merged according to the HTTP spec. All header keys must be\n    // lower-cased, because HTTP header keys are case-insensitive.\n    map<string, string> headers = 3;\n\n    // The request target, as it appears in the first line of the HTTP request. This includes\n    // the URL path and query-string. No decoding is performed.\n    string path = 4;\n\n    // The HTTP request `Host` or 'Authority` header value.\n    string host = 5;\n\n    // The HTTP URL scheme, such as `http` and `https`.\n    string scheme = 6;\n\n    // This field is always empty, and exists for compatibility reasons. The HTTP URL query is\n    // included in `path` field.\n    string query = 7;\n\n    // This field is always empty, and exists for compatibility reasons. The URL fragment is\n    // not submitted as part of HTTP requests; it is unknowable.\n    string fragment = 8;\n\n    // The HTTP request size in bytes. If unknown, it must be -1.\n    int64 size = 9;\n\n    // The network protocol used with the request, such as \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2\".\n    //\n    // See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all\n    // possible values.\n    string protocol = 10;\n\n    // The HTTP request body.\n    string body = 11;\n\n    // The HTTP request body in bytes. This is used instead of\n    // :ref:`body <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` when\n    // :ref:`pack_as_bytes <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.BufferSettings.pack_as_bytes>`\n    // is set to true.\n    bytes raw_body = 12;\n  }\n\n  // The source of a network activity, such as starting a TCP connection.\n  // In a multi hop network activity, the source represents the sender of the\n  // last hop.\n  Peer source = 1;\n\n  // The destination of a network activity, such as accepting a TCP connection.\n  // In a multi hop network activity, the destination represents the receiver of\n  // the last hop.\n  Peer destination = 2;\n\n  // Represents a network request, such as an HTTP request.\n  Request request = 4;\n\n  // This is analogous to http_request.headers, however these contents will not be sent to the\n  // upstream server. Context_extensions provide an extension mechanism for sending additional\n  // information to the auth server without modifying the proto definition. It maps to the\n  // internal opaque context in the filter chain.\n  map<string, string> context_extensions = 10;\n\n  // Dynamic metadata associated with the request.\n  config.core.v4alpha.Metadata metadata_context = 11;\n}\n"
  },
  {
    "path": "api/envoy/service/auth/v4alpha/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/service/auth/v4alpha/attribute_context.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"google/protobuf/struct.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v4alpha\";\noption java_outer_classname = \"ExternalAuthProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(CheckRequest) returns (CheckResponse) {\n  }\n}\n\nmessage CheckRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.auth.v3.CheckRequest\";\n\n  // The request attributes.\n  AttributeContext attributes = 1;\n}\n\n// HTTP attributes for a denied response.\nmessage DeniedHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.DeniedHttpResponse\";\n\n  // This field allows the authorization service to send a HTTP response status\n  // code to the downstream client other than 403 (Forbidden).\n  type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];\n\n  // This field allows the authorization service to send HTTP response headers\n  // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message.\n  repeated config.core.v4alpha.HeaderValueOption headers = 2;\n\n  // This field allows the authorization service to send a response body data\n  // to the downstream client.\n  string body = 3;\n}\n\n// HTTP attributes for an OK response.\n// [#next-free-field: 6]\nmessage OkHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.OkHttpResponse\";\n\n  reserved 3;\n\n  reserved \"dynamic_metadata\";\n\n  // HTTP entity headers in addition to the original request headers. This allows the authorization\n  // service to append, to add or to override headers from the original request before\n  // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message. By setting the `append` field to `true`,\n  // the filter will append the correspondent header value to the matched request header.\n  // By leaving `append` as false, the filter will either add a new header, or override an existing\n  // one if there is a match.\n  repeated config.core.v4alpha.HeaderValueOption headers = 2;\n\n  // HTTP entity headers to remove from the original request before dispatching\n  // it to the upstream. This allows the authorization service to act on auth\n  // related headers (like `Authorization`), process them, and consume them.\n  // Under this model, the upstream will either receive the request (if it's\n  // authorized) or not receive it (if it's not), but will not see headers\n  // containing authorization credentials.\n  //\n  // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as\n  // the header `Host`, may not be removed as that would make the request\n  // malformed. If mentioned in `headers_to_remove` these special headers will\n  // be ignored.\n  //\n  // When using the HTTP service this must instead be set by the HTTP\n  // authorization service as a comma separated list like so:\n  // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``.\n  repeated string headers_to_remove = 5;\n}\n\n// Intended for gRPC and Network Authorization servers `only`.\nmessage CheckResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.CheckResponse\";\n\n  // Status `OK` allows the request. Any other status indicates the request should be denied.\n  google.rpc.Status status = 1;\n\n  // An message that contains HTTP response attributes. This message is\n  // used when the authorization service needs to send custom responses to the\n  // downstream client or, to modify/add request headers being dispatched to the upstream.\n  oneof http_response {\n    // Supplies http attributes for a denied response.\n    DeniedHttpResponse denied_response = 2;\n\n    // Supplies http attributes for an ok response.\n    OkHttpResponse ok_response = 3;\n  }\n\n  // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next\n  // filter. This metadata lives in a namespace specified by the canonical name of extension filter\n  // that requires it:\n  //\n  // - :ref:`envoy.filters.http.ext_authz <config_http_filters_ext_authz_dynamic_metadata>` for HTTP filter.\n  // - :ref:`envoy.filters.network.ext_authz <config_network_filters_ext_authz_dynamic_metadata>` for network filter.\n  google.protobuf.Struct dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "api/envoy/service/cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/cluster/v3/cds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.cluster.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.cluster.v3\";\noption java_outer_classname = \"CdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: CDS]\n\n// Return list of all clusters this proxy will load balance to.\nservice ClusterDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.cluster.v3.Cluster\";\n\n  rpc StreamClusters(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaClusters(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchClusters(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:clusters\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage CdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.CdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/discovery/v2/ads.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"AdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Aggregated Discovery Service (ADS)]\n\n// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes,\n// and listeners are retained in the package `envoy.api.v2` for backwards\n// compatibility with existing management servers. New development in discovery\n// services should proceed in the package `envoy.service.discovery.v2`.\n\n// See https://github.com/lyft/envoy-api#apis for a description of the role of\n// ADS and how it is intended to be used by a management server. ADS requests\n// have the same structure as their singleton xDS counterparts, but can\n// multiplex many resource types on a single stream. The type_url in the\n// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover\n// the multiplexed singleton APIs at the Envoy instance and management server.\nservice AggregatedDiscoveryService {\n  // This is a gRPC-only API.\n  rpc StreamAggregatedResources(stream api.v2.DiscoveryRequest)\n      returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaAggregatedResources(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage AdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v2/hds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\nimport \"envoy/api/v2/endpoint/endpoint_components.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"HdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.health.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health Discovery Service (HDS)]\n\n// HDS is Health Discovery Service. It compliments Envoy’s health checking\n// service by designating this Envoy to be a healthchecker for a subset of hosts\n// in the cluster. The status of these health checks will be reported to the\n// management server, where it can be aggregated etc and redistributed back to\n// Envoy through EDS.\nservice HealthDiscoveryService {\n  // 1. Envoy starts up and if its can_healthcheck option in the static\n  //    bootstrap config is enabled, sends HealthCheckRequest to the management\n  //    server. It supplies its capabilities (which protocol it can health check\n  //    with, what zone it resides in, etc.).\n  // 2. In response to (1), the management server designates this Envoy as a\n  //    healthchecker to health check a subset of all upstream hosts for a given\n  //    cluster (for example upstream Host 1 and Host 2). It streams\n  //    HealthCheckSpecifier messages with cluster related configuration for all\n  //    clusters this Envoy is designated to health check. Subsequent\n  //    HealthCheckSpecifier message will be sent on changes to:\n  //    a. Endpoints to health checks\n  //    b. Per cluster configuration change\n  // 3. Envoy creates a health probe based on the HealthCheck config and sends\n  //    it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck\n  //    configuration Envoy waits upon the arrival of the probe response and\n  //    looks at the content of the response to decide whether the endpoint is\n  //    healthy or not. If a response hasn't been received within the timeout\n  //    interval, the endpoint health status is considered TIMEOUT.\n  // 4. Envoy reports results back in an EndpointHealthResponse message.\n  //    Envoy streams responses as often as the interval configured by the\n  //    management server in HealthCheckSpecifier.\n  // 5. The management Server collects health statuses for all endpoints in the\n  //    cluster (for all clusters) and uses this information to construct\n  //    EndpointDiscoveryResponse messages.\n  // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load\n  //    balances traffic to them without additional health checking. It may\n  //    use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection\n  //    failed to a particular endpoint to account for health status propagation\n  //    delay between HDS and EDS).\n  // By default, can_healthcheck is true. If can_healthcheck is false, Cluster\n  // configuration may not contain HealthCheck message.\n  // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above\n  // invariant?\n  // TODO(htuch): Add @amb67's diagram.\n  rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse)\n      returns (stream HealthCheckSpecifier) {\n  }\n\n  // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of\n  // request/response. Should we add an identifier to the HealthCheckSpecifier\n  // to bind with the response?\n  rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {\n    option (google.api.http).post = \"/v2/discovery:health_check\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Defines supported protocols etc, so the management server can assign proper\n// endpoints to healthcheck.\nmessage Capability {\n  // Different Envoy instances may have different capabilities (e.g. Redis)\n  // and/or have ports enabled for different protocols.\n  enum Protocol {\n    HTTP = 0;\n    TCP = 1;\n    REDIS = 2;\n  }\n\n  repeated Protocol health_check_protocols = 1;\n}\n\nmessage HealthCheckRequest {\n  api.v2.core.Node node = 1;\n\n  Capability capability = 2;\n}\n\nmessage EndpointHealth {\n  api.v2.endpoint.Endpoint endpoint = 1;\n\n  api.v2.core.HealthStatus health_status = 2;\n}\n\nmessage EndpointHealthResponse {\n  repeated EndpointHealth endpoints_health = 1;\n}\n\nmessage HealthCheckRequestOrEndpointHealthResponse {\n  oneof request_type {\n    HealthCheckRequest health_check_request = 1;\n\n    EndpointHealthResponse endpoint_health_response = 2;\n  }\n}\n\nmessage LocalityEndpoints {\n  api.v2.core.Locality locality = 1;\n\n  repeated api.v2.endpoint.Endpoint endpoints = 2;\n}\n\n// The cluster name and locality is provided to Envoy for the endpoints that it\n// health checks to support statistics reporting, logging and debugging by the\n// Envoy instance (outside of HDS). For maximum usefulness, it should match the\n// same cluster structure as that provided by EDS.\nmessage ClusterHealthCheck {\n  string cluster_name = 1;\n\n  repeated api.v2.core.HealthCheck health_checks = 2;\n\n  repeated LocalityEndpoints locality_endpoints = 3;\n}\n\nmessage HealthCheckSpecifier {\n  repeated ClusterHealthCheck cluster_health_checks = 1;\n\n  // The default is 1 second.\n  google.protobuf.Duration interval = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v2/rtds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"RtdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.runtime.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Runtime Discovery Service (RTDS)]\n// RTDS :ref:`configuration overview <config_runtime_rtds>`\n\n// Discovery service for Runtime resources.\nservice RuntimeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.service.discovery.v2.Runtime\";\n\n  rpc StreamRuntime(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaRuntime(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:runtime\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage RtdsDummy {\n}\n\n// RTDS resource type. This describes a layer in the runtime virtual filesystem.\nmessage Runtime {\n  // Runtime resource name. This makes the Runtime a self-describing xDS\n  // resource.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  google.protobuf.Struct layer = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v2/sds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"SdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.secret.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Secret Discovery Service (SDS)]\n\nservice SecretDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.auth.Secret\";\n\n  rpc DeltaSecrets(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamSecrets(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc FetchSecrets(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:secrets\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage SdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/discovery/v3/ads.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v3\";\noption java_outer_classname = \"AdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Aggregated Discovery Service (ADS)]\n\n// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes,\n// and listeners are retained in the package `envoy.api.v2` for backwards\n// compatibility with existing management servers. New development in discovery\n// services should proceed in the package `envoy.service.discovery.v2`.\n\n// See https://github.com/lyft/envoy-api#apis for a description of the role of\n// ADS and how it is intended to be used by a management server. ADS requests\n// have the same structure as their singleton xDS counterparts, but can\n// multiplex many resource types on a single stream. The type_url in the\n// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover\n// the multiplexed singleton APIs at the Envoy instance and management server.\nservice AggregatedDiscoveryService {\n  // This is a gRPC-only API.\n  rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest)\n      returns (stream DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage AdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.AdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v3/discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\nimport \"udpa/core/v1/resource_name.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v3\";\noption java_outer_classname = \"DiscoveryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common discovery API components]\n\n// A DiscoveryRequest requests a set of versioned resources of the same type for\n// a given Envoy node on some API.\n// [#next-free-field: 7]\nmessage DiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.DiscoveryRequest\";\n\n  // The version_info provided in the request messages will be the version_info\n  // received with the most recent successfully processed response or empty on\n  // the first request. It is expected that no new request is sent after a\n  // response is received until the Envoy instance is ready to ACK/NACK the new\n  // configuration. ACK/NACK takes place by returning the new API config version\n  // as applied or the previous API config version respectively. Each type_url\n  // (see below) has an independent version associated with it.\n  string version_info = 1;\n\n  // The node making the request.\n  config.core.v3.Node node = 2;\n\n  // List of resources to subscribe to, e.g. list of cluster names or a route\n  // configuration name. If this is empty, all resources for the API are\n  // returned. LDS/CDS may have empty resource_names, which will cause all\n  // resources for the Envoy instance to be returned. The LDS and CDS responses\n  // will then imply a number of resources that need to be fetched via EDS/RDS,\n  // which will be explicitly enumerated in resource_names.\n  repeated string resource_names = 3;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This is implicit\n  // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is\n  // required for ADS.\n  string type_url = 4;\n\n  // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above\n  // discussion on version_info and the DiscoveryResponse nonce comment. This\n  // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP,\n  // or 2) the client has not yet accepted an update in this xDS stream (unlike\n  // delta, where it is populated only for new explicit ACKs).\n  string response_nonce = 5;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v3.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details* provides the Envoy\n  // internal exception related to the failure. It is only intended for consumption during manual\n  // debugging, the string provided is not guaranteed to be stable across Envoy versions.\n  google.rpc.Status error_detail = 6;\n}\n\n// [#next-free-field: 7]\nmessage DiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.DiscoveryResponse\";\n\n  // The version of the response data.\n  string version_info = 1;\n\n  // The response resources. These resources are typed and depend on the API being called.\n  repeated google.protobuf.Any resources = 2;\n\n  // [#not-implemented-hide:]\n  // Canary is used to support two Envoy command line flags:\n  //\n  // * --terminate-on-canary-transition-failure. When set, Envoy is able to\n  //   terminate if it detects that configuration is stuck at canary. Consider\n  //   this example sequence of updates:\n  //   - Management server applies a canary config successfully.\n  //   - Management server rolls back to a production config.\n  //   - Envoy rejects the new production config.\n  //   Since there is no sensible way to continue receiving configuration\n  //   updates, Envoy will then terminate and apply production config from a\n  //   clean slate.\n  // * --dry-run-canary. When set, a canary response will never be applied, only\n  //   validated via a dry run.\n  bool canary = 3;\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty).\n  string type_url = 4;\n\n  // For gRPC based subscriptions, the nonce provides a way to explicitly ack a\n  // specific DiscoveryResponse in a following DiscoveryRequest. Additional\n  // messages may have been sent by Envoy to the management server for the\n  // previous version on the stream prior to this DiscoveryResponse, that were\n  // unprocessed at response send time. The nonce allows the management server\n  // to ignore any further DiscoveryRequests for the previous version until a\n  // DiscoveryRequest bearing the nonce. The nonce is optional and is not\n  // required for non-stream based xDS implementations.\n  string nonce = 5;\n\n  // [#not-implemented-hide:]\n  // The control plane instance that sent the response.\n  config.core.v3.ControlPlane control_plane = 6;\n}\n\n// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC\n// endpoint for Delta xDS.\n//\n// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full\n// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a\n// diff to the state of a xDS client.\n// In Delta XDS there are per-resource versions, which allow tracking state at\n// the resource granularity.\n// An xDS Delta session is always in the context of a gRPC bidirectional\n// stream. This allows the xDS server to keep track of the state of xDS clients\n// connected to it.\n//\n// In Delta xDS the nonce field is required and used to pair\n// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK.\n// Optionally, a response message level system_version_info is present for\n// debugging purposes only.\n//\n// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest\n// can be either or both of: [1] informing the server of what resources the\n// client has gained/lost interest in (using resource_names_subscribe and\n// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from\n// the server (using response_nonce, with presence of error_detail making it a NACK).\n// Additionally, the first message (for a given type_url) of a reconnected gRPC stream\n// has a third role: informing the server of the resources (and their versions)\n// that the client already possesses, using the initial_resource_versions field.\n//\n// As with state-of-the-world, when multiple resource types are multiplexed (ADS),\n// all requests/acknowledgments/updates are logically walled off by type_url:\n// a Cluster ACK exists in a completely separate world from a prior Route NACK.\n// In particular, initial_resource_versions being sent at the \"start\" of every\n// gRPC stream actually entails a message for each type_url, each with its own\n// initial_resource_versions.\n// [#next-free-field: 10]\nmessage DeltaDiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.DeltaDiscoveryRequest\";\n\n  // The node making the request.\n  config.core.v3.Node node = 1;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This does not need to be set if\n  // resources are only referenced via *udpa_resource_subscribe* and\n  // *udpa_resources_unsubscribe*.\n  string type_url = 2;\n\n  // DeltaDiscoveryRequests allow the client to add or remove individual\n  // resources to the set of tracked resources in the context of a stream.\n  // All resource names in the resource_names_subscribe list are added to the\n  // set of tracked resources and all resource names in the resource_names_unsubscribe\n  // list are removed from the set of tracked resources.\n  //\n  // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or\n  // resource_names_unsubscribe list simply means that no resources are to be\n  // added or removed to the resource list.\n  // *Like* state-of-the-world xDS, the server must send updates for all tracked\n  // resources, but can also send updates for resources the client has not subscribed to.\n  //\n  // NOTE: the server must respond with all resources listed in resource_names_subscribe,\n  // even if it believes the client has the most recent version of them. The reason:\n  // the client may have dropped them, but then regained interest before it had a chance\n  // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd.\n  //\n  // These two fields can be set in any DeltaDiscoveryRequest, including ACKs\n  // and initial_resource_versions.\n  //\n  // A list of Resource names to add to the list of tracked resources.\n  repeated string resource_names_subscribe = 3;\n\n  // As with *resource_names_subscribe* but used when subscribing to resources indicated\n  // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator\n  // are ignored and the context parameters are matched with\n  // *context_param_specifier* specific semantics.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8;\n\n  // A list of Resource names to remove from the list of tracked resources.\n  repeated string resource_names_unsubscribe = 4;\n\n  // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a\n  // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed\n  // resource locator provided in *udpa_resources_subscribe*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9;\n\n  // Informs the server of the versions of the resources the xDS client knows of, to enable the\n  // client to continue the same logical xDS session even in the face of gRPC stream reconnection.\n  // It will not be populated: [1] in the very first stream of a session, since the client will\n  // not yet have any resources,  [2] in any message after the first in a stream (for a given\n  // type_url), since the server will already be correctly tracking the client's state.\n  // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.)\n  // The map's keys are names of xDS resources known to the xDS client.\n  // The map's values are opaque resource versions.\n  map<string, string> initial_resource_versions = 5;\n\n  // When the DeltaDiscoveryRequest is a ACK or NACK message in response\n  // to a previous DeltaDiscoveryResponse, the response_nonce must be the\n  // nonce in the DeltaDiscoveryResponse.\n  // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted.\n  string response_nonce = 6;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v3.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details*\n  // provides the Envoy internal exception related to the failure.\n  google.rpc.Status error_detail = 7;\n}\n\n// [#next-free-field: 8]\nmessage DeltaDiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.DeltaDiscoveryResponse\";\n\n  // The version of the response data (used for debugging).\n  string system_version_info = 1;\n\n  // The response resources. These are typed resources, whose types must match\n  // the type_url field.\n  repeated Resource resources = 2;\n\n  // field id 3 IS available!\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty.\n  // This does not need to be set if *udpa_removed_resources* is used instead of\n  // *removed_resources*.\n  string type_url = 4;\n\n  // Resources names of resources that have be deleted and to be removed from the xDS Client.\n  // Removed resources for missing resources can be ignored.\n  repeated string removed_resources = 6;\n\n  // As with *removed_resources* but used when a removed resource was named in\n  // its *Resource*s with a *udpa.core.v1.ResourceName*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceName udpa_removed_resources = 7;\n\n  // The nonce provides a way for DeltaDiscoveryRequests to uniquely\n  // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required.\n  string nonce = 5;\n}\n\n// [#next-free-field: 6]\nmessage Resource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Resource\";\n\n  // The resource's name, to distinguish it from others of the same type of resource.\n  string name = 3 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered.\n  udpa.core.v1.ResourceName udpa_resource_name = 5\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // The aliases are a list of other names that this resource can go by.\n  repeated string aliases = 4;\n\n  // The resource level version. It allows xDS to track the state of individual\n  // resources.\n  string version = 1;\n\n  // The resource being tracked.\n  google.protobuf.Any resource = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/discovery/v4alpha/ads.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v4alpha;\n\nimport \"envoy/service/discovery/v4alpha/discovery.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v4alpha\";\noption java_outer_classname = \"AdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Aggregated Discovery Service (ADS)]\n\n// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes,\n// and listeners are retained in the package `envoy.api.v2` for backwards\n// compatibility with existing management servers. New development in discovery\n// services should proceed in the package `envoy.service.discovery.v2`.\n\n// See https://github.com/lyft/envoy-api#apis for a description of the role of\n// ADS and how it is intended to be used by a management server. ADS requests\n// have the same structure as their singleton xDS counterparts, but can\n// multiplex many resource types on a single stream. The type_url in the\n// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover\n// the multiplexed singleton APIs at the Envoy instance and management server.\nservice AggregatedDiscoveryService {\n  // This is a gRPC-only API.\n  rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest)\n      returns (stream DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage AdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.AdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/discovery/v4alpha/discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\nimport \"udpa/core/v1/resource_name.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v4alpha\";\noption java_outer_classname = \"DiscoveryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common discovery API components]\n\n// A DiscoveryRequest requests a set of versioned resources of the same type for\n// a given Envoy node on some API.\n// [#next-free-field: 7]\nmessage DiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DiscoveryRequest\";\n\n  // The version_info provided in the request messages will be the version_info\n  // received with the most recent successfully processed response or empty on\n  // the first request. It is expected that no new request is sent after a\n  // response is received until the Envoy instance is ready to ACK/NACK the new\n  // configuration. ACK/NACK takes place by returning the new API config version\n  // as applied or the previous API config version respectively. Each type_url\n  // (see below) has an independent version associated with it.\n  string version_info = 1;\n\n  // The node making the request.\n  config.core.v4alpha.Node node = 2;\n\n  // List of resources to subscribe to, e.g. list of cluster names or a route\n  // configuration name. If this is empty, all resources for the API are\n  // returned. LDS/CDS may have empty resource_names, which will cause all\n  // resources for the Envoy instance to be returned. The LDS and CDS responses\n  // will then imply a number of resources that need to be fetched via EDS/RDS,\n  // which will be explicitly enumerated in resource_names.\n  repeated string resource_names = 3;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This is implicit\n  // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is\n  // required for ADS.\n  string type_url = 4;\n\n  // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above\n  // discussion on version_info and the DiscoveryResponse nonce comment. This\n  // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP,\n  // or 2) the client has not yet accepted an update in this xDS stream (unlike\n  // delta, where it is populated only for new explicit ACKs).\n  string response_nonce = 5;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v4alpha.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details* provides the Envoy\n  // internal exception related to the failure. It is only intended for consumption during manual\n  // debugging, the string provided is not guaranteed to be stable across Envoy versions.\n  google.rpc.Status error_detail = 6;\n}\n\n// [#next-free-field: 7]\nmessage DiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DiscoveryResponse\";\n\n  // The version of the response data.\n  string version_info = 1;\n\n  // The response resources. These resources are typed and depend on the API being called.\n  repeated google.protobuf.Any resources = 2;\n\n  // [#not-implemented-hide:]\n  // Canary is used to support two Envoy command line flags:\n  //\n  // * --terminate-on-canary-transition-failure. When set, Envoy is able to\n  //   terminate if it detects that configuration is stuck at canary. Consider\n  //   this example sequence of updates:\n  //   - Management server applies a canary config successfully.\n  //   - Management server rolls back to a production config.\n  //   - Envoy rejects the new production config.\n  //   Since there is no sensible way to continue receiving configuration\n  //   updates, Envoy will then terminate and apply production config from a\n  //   clean slate.\n  // * --dry-run-canary. When set, a canary response will never be applied, only\n  //   validated via a dry run.\n  bool canary = 3;\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty).\n  string type_url = 4;\n\n  // For gRPC based subscriptions, the nonce provides a way to explicitly ack a\n  // specific DiscoveryResponse in a following DiscoveryRequest. Additional\n  // messages may have been sent by Envoy to the management server for the\n  // previous version on the stream prior to this DiscoveryResponse, that were\n  // unprocessed at response send time. The nonce allows the management server\n  // to ignore any further DiscoveryRequests for the previous version until a\n  // DiscoveryRequest bearing the nonce. The nonce is optional and is not\n  // required for non-stream based xDS implementations.\n  string nonce = 5;\n\n  // [#not-implemented-hide:]\n  // The control plane instance that sent the response.\n  config.core.v4alpha.ControlPlane control_plane = 6;\n}\n\n// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC\n// endpoint for Delta xDS.\n//\n// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full\n// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a\n// diff to the state of a xDS client.\n// In Delta XDS there are per-resource versions, which allow tracking state at\n// the resource granularity.\n// An xDS Delta session is always in the context of a gRPC bidirectional\n// stream. This allows the xDS server to keep track of the state of xDS clients\n// connected to it.\n//\n// In Delta xDS the nonce field is required and used to pair\n// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK.\n// Optionally, a response message level system_version_info is present for\n// debugging purposes only.\n//\n// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest\n// can be either or both of: [1] informing the server of what resources the\n// client has gained/lost interest in (using resource_names_subscribe and\n// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from\n// the server (using response_nonce, with presence of error_detail making it a NACK).\n// Additionally, the first message (for a given type_url) of a reconnected gRPC stream\n// has a third role: informing the server of the resources (and their versions)\n// that the client already possesses, using the initial_resource_versions field.\n//\n// As with state-of-the-world, when multiple resource types are multiplexed (ADS),\n// all requests/acknowledgments/updates are logically walled off by type_url:\n// a Cluster ACK exists in a completely separate world from a prior Route NACK.\n// In particular, initial_resource_versions being sent at the \"start\" of every\n// gRPC stream actually entails a message for each type_url, each with its own\n// initial_resource_versions.\n// [#next-free-field: 10]\nmessage DeltaDiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DeltaDiscoveryRequest\";\n\n  // The node making the request.\n  config.core.v4alpha.Node node = 1;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This does not need to be set if\n  // resources are only referenced via *udpa_resource_subscribe* and\n  // *udpa_resources_unsubscribe*.\n  string type_url = 2;\n\n  // DeltaDiscoveryRequests allow the client to add or remove individual\n  // resources to the set of tracked resources in the context of a stream.\n  // All resource names in the resource_names_subscribe list are added to the\n  // set of tracked resources and all resource names in the resource_names_unsubscribe\n  // list are removed from the set of tracked resources.\n  //\n  // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or\n  // resource_names_unsubscribe list simply means that no resources are to be\n  // added or removed to the resource list.\n  // *Like* state-of-the-world xDS, the server must send updates for all tracked\n  // resources, but can also send updates for resources the client has not subscribed to.\n  //\n  // NOTE: the server must respond with all resources listed in resource_names_subscribe,\n  // even if it believes the client has the most recent version of them. The reason:\n  // the client may have dropped them, but then regained interest before it had a chance\n  // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd.\n  //\n  // These two fields can be set in any DeltaDiscoveryRequest, including ACKs\n  // and initial_resource_versions.\n  //\n  // A list of Resource names to add to the list of tracked resources.\n  repeated string resource_names_subscribe = 3;\n\n  // As with *resource_names_subscribe* but used when subscribing to resources indicated\n  // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator\n  // are ignored and the context parameters are matched with\n  // *context_param_specifier* specific semantics.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8;\n\n  // A list of Resource names to remove from the list of tracked resources.\n  repeated string resource_names_unsubscribe = 4;\n\n  // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a\n  // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed\n  // resource locator provided in *udpa_resources_subscribe*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9;\n\n  // Informs the server of the versions of the resources the xDS client knows of, to enable the\n  // client to continue the same logical xDS session even in the face of gRPC stream reconnection.\n  // It will not be populated: [1] in the very first stream of a session, since the client will\n  // not yet have any resources,  [2] in any message after the first in a stream (for a given\n  // type_url), since the server will already be correctly tracking the client's state.\n  // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.)\n  // The map's keys are names of xDS resources known to the xDS client.\n  // The map's values are opaque resource versions.\n  map<string, string> initial_resource_versions = 5;\n\n  // When the DeltaDiscoveryRequest is a ACK or NACK message in response\n  // to a previous DeltaDiscoveryResponse, the response_nonce must be the\n  // nonce in the DeltaDiscoveryResponse.\n  // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted.\n  string response_nonce = 6;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v4alpha.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details*\n  // provides the Envoy internal exception related to the failure.\n  google.rpc.Status error_detail = 7;\n}\n\n// [#next-free-field: 8]\nmessage DeltaDiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DeltaDiscoveryResponse\";\n\n  // The version of the response data (used for debugging).\n  string system_version_info = 1;\n\n  // The response resources. These are typed resources, whose types must match\n  // the type_url field.\n  repeated Resource resources = 2;\n\n  // field id 3 IS available!\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty.\n  // This does not need to be set if *udpa_removed_resources* is used instead of\n  // *removed_resources*.\n  string type_url = 4;\n\n  // Resources names of resources that have be deleted and to be removed from the xDS Client.\n  // Removed resources for missing resources can be ignored.\n  repeated string removed_resources = 6;\n\n  // As with *removed_resources* but used when a removed resource was named in\n  // its *Resource*s with a *udpa.core.v1.ResourceName*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceName udpa_removed_resources = 7;\n\n  // The nonce provides a way for DeltaDiscoveryRequests to uniquely\n  // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required.\n  string nonce = 5;\n}\n\n// [#next-free-field: 6]\nmessage Resource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.Resource\";\n\n  oneof name_specifier {\n    // The resource's name, to distinguish it from others of the same type of resource.\n    string name = 3;\n\n    // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered.\n    udpa.core.v1.ResourceName udpa_resource_name = 5;\n  }\n\n  // The aliases are a list of other names that this resource can go by.\n  repeated string aliases = 4;\n\n  // The resource level version. It allows xDS to track the state of individual\n  // resources.\n  string version = 1;\n\n  // The resource being tracked.\n  google.protobuf.Any resource = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/endpoint/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/endpoint/v3/eds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.endpoint.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.endpoint.v3\";\noption java_outer_classname = \"EdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: EDS]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\nservice EndpointDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.endpoint.v3.ClusterLoadAssignment\";\n\n  // The resource_names field in DiscoveryRequest specifies a list of clusters\n  // to subscribe to updates for.\n  rpc StreamEndpoints(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaEndpoints(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchEndpoints(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:endpoints\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage EdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.EdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/event_reporting/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/event_reporting/v2alpha/event_reporting_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.event_reporting.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.event_reporting.v2alpha\";\noption java_outer_classname = \"EventReportingServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.event_reporting.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Event Reporting Service]\n\n// [#not-implemented-hide:]\n// Service for streaming different types of events from Envoy to a server. The examples of\n// such events may be health check or outlier detection events.\nservice EventReportingService {\n  // Envoy will connect and send StreamEventsRequest messages forever.\n  // The management server may send StreamEventsResponse to configure event stream. See below.\n  // This API is designed for high throughput with the expectation that it might be lossy.\n  rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) {\n  }\n}\n\n// [#not-implemented-hide:]\n// An events envoy sends to the management server.\nmessage StreamEventsRequest {\n  message Identifier {\n    // The node sending the event messages over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batch of events. When the stream is already active, it will be the events occurred\n  // since the last message had been sent. If the server receives unknown event type, it should\n  // silently ignore it.\n  //\n  // The following events are supported:\n  //\n  // * :ref:`HealthCheckEvent <envoy_api_msg_data.core.v2alpha.HealthCheckEvent>`\n  // * :ref:`OutlierDetectionEvent <envoy_api_msg_data.cluster.v2alpha.OutlierDetectionEvent>`\n  repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#not-implemented-hide:]\n// The management server may send envoy a StreamEventsResponse to tell which events the server\n// is interested in. In future, with aggregated event reporting service, this message will\n// contain, for example, clusters the envoy should send events for, or event types the server\n// wants to process.\nmessage StreamEventsResponse {\n}\n"
  },
  {
    "path": "api/envoy/service/event_reporting/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/event_reporting/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/event_reporting/v3/event_reporting_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.event_reporting.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.event_reporting.v3\";\noption java_outer_classname = \"EventReportingServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Event Reporting Service]\n\n// [#not-implemented-hide:]\n// Service for streaming different types of events from Envoy to a server. The examples of\n// such events may be health check or outlier detection events.\nservice EventReportingService {\n  // Envoy will connect and send StreamEventsRequest messages forever.\n  // The management server may send StreamEventsResponse to configure event stream. See below.\n  // This API is designed for high throughput with the expectation that it might be lossy.\n  rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) {\n  }\n}\n\n// [#not-implemented-hide:]\n// An events envoy sends to the management server.\nmessage StreamEventsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v2alpha.StreamEventsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.event_reporting.v2alpha.StreamEventsRequest.Identifier\";\n\n    // The node sending the event messages over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batch of events. When the stream is already active, it will be the events occurred\n  // since the last message had been sent. If the server receives unknown event type, it should\n  // silently ignore it.\n  //\n  // The following events are supported:\n  //\n  // * :ref:`HealthCheckEvent <envoy_api_msg_data.core.v3.HealthCheckEvent>`\n  // * :ref:`OutlierDetectionEvent <envoy_api_msg_data.cluster.v3.OutlierDetectionEvent>`\n  repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#not-implemented-hide:]\n// The management server may send envoy a StreamEventsResponse to tell which events the server\n// is interested in. In future, with aggregated event reporting service, this message will\n// contain, for example, clusters the envoy should send events for, or event types the server\n// wants to process.\nmessage StreamEventsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v2alpha.StreamEventsResponse\";\n}\n"
  },
  {
    "path": "api/envoy/service/event_reporting/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/event_reporting/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/event_reporting/v4alpha/event_reporting_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.event_reporting.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.event_reporting.v4alpha\";\noption java_outer_classname = \"EventReportingServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC Event Reporting Service]\n\n// [#not-implemented-hide:]\n// Service for streaming different types of events from Envoy to a server. The examples of\n// such events may be health check or outlier detection events.\nservice EventReportingService {\n  // Envoy will connect and send StreamEventsRequest messages forever.\n  // The management server may send StreamEventsResponse to configure event stream. See below.\n  // This API is designed for high throughput with the expectation that it might be lossy.\n  rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) {\n  }\n}\n\n// [#not-implemented-hide:]\n// An events envoy sends to the management server.\nmessage StreamEventsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v3.StreamEventsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.event_reporting.v3.StreamEventsRequest.Identifier\";\n\n    // The node sending the event messages over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batch of events. When the stream is already active, it will be the events occurred\n  // since the last message had been sent. If the server receives unknown event type, it should\n  // silently ignore it.\n  //\n  // The following events are supported:\n  //\n  // * :ref:`HealthCheckEvent <envoy_api_msg_data.core.v3.HealthCheckEvent>`\n  // * :ref:`OutlierDetectionEvent <envoy_api_msg_data.cluster.v3.OutlierDetectionEvent>`\n  repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#not-implemented-hide:]\n// The management server may send envoy a StreamEventsResponse to tell which events the server\n// is interested in. In future, with aggregated event reporting service, this message will\n// contain, for example, clusters the envoy should send events for, or event types the server\n// wants to process.\nmessage StreamEventsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v3.StreamEventsResponse\";\n}\n"
  },
  {
    "path": "api/envoy/service/extension/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/extension/v3/config_discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.extension.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.extension.v3\";\noption java_outer_classname = \"ConfigDiscoveryProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Extension Config Discovery Service (ECDS)]\n\n// Return extension configurations.\nservice ExtensionConfigDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.core.v3.TypedExtensionConfig\";\n\n  rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest)\n      returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:extension_configs\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue\n// with importing services: https://github.com/google/protobuf/issues/4221 and\n// protoxform to upgrade the file.\nmessage EcdsDummy {\n}\n"
  },
  {
    "path": "api/envoy/service/health/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/health/v3/hds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.health.v3;\n\nimport \"envoy/config/cluster/v3/cluster.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\nimport \"envoy/config/endpoint/v3/endpoint_components.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.health.v3\";\noption java_outer_classname = \"HdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health Discovery Service (HDS)]\n\n// HDS is Health Discovery Service. It compliments Envoy’s health checking\n// service by designating this Envoy to be a healthchecker for a subset of hosts\n// in the cluster. The status of these health checks will be reported to the\n// management server, where it can be aggregated etc and redistributed back to\n// Envoy through EDS.\nservice HealthDiscoveryService {\n  // 1. Envoy starts up and if its can_healthcheck option in the static\n  //    bootstrap config is enabled, sends HealthCheckRequest to the management\n  //    server. It supplies its capabilities (which protocol it can health check\n  //    with, what zone it resides in, etc.).\n  // 2. In response to (1), the management server designates this Envoy as a\n  //    healthchecker to health check a subset of all upstream hosts for a given\n  //    cluster (for example upstream Host 1 and Host 2). It streams\n  //    HealthCheckSpecifier messages with cluster related configuration for all\n  //    clusters this Envoy is designated to health check. Subsequent\n  //    HealthCheckSpecifier message will be sent on changes to:\n  //    a. Endpoints to health checks\n  //    b. Per cluster configuration change\n  // 3. Envoy creates a health probe based on the HealthCheck config and sends\n  //    it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck\n  //    configuration Envoy waits upon the arrival of the probe response and\n  //    looks at the content of the response to decide whether the endpoint is\n  //    healthy or not. If a response hasn't been received within the timeout\n  //    interval, the endpoint health status is considered TIMEOUT.\n  // 4. Envoy reports results back in an EndpointHealthResponse message.\n  //    Envoy streams responses as often as the interval configured by the\n  //    management server in HealthCheckSpecifier.\n  // 5. The management Server collects health statuses for all endpoints in the\n  //    cluster (for all clusters) and uses this information to construct\n  //    EndpointDiscoveryResponse messages.\n  // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load\n  //    balances traffic to them without additional health checking. It may\n  //    use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection\n  //    failed to a particular endpoint to account for health status propagation\n  //    delay between HDS and EDS).\n  // By default, can_healthcheck is true. If can_healthcheck is false, Cluster\n  // configuration may not contain HealthCheck message.\n  // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above\n  // invariant?\n  // TODO(htuch): Add @amb67's diagram.\n  rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse)\n      returns (stream HealthCheckSpecifier) {\n  }\n\n  // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of\n  // request/response. Should we add an identifier to the HealthCheckSpecifier\n  // to bind with the response?\n  rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {\n    option (google.api.http).post = \"/v3/discovery:health_check\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Defines supported protocols etc, so the management server can assign proper\n// endpoints to healthcheck.\nmessage Capability {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.Capability\";\n\n  // Different Envoy instances may have different capabilities (e.g. Redis)\n  // and/or have ports enabled for different protocols.\n  enum Protocol {\n    HTTP = 0;\n    TCP = 1;\n    REDIS = 2;\n  }\n\n  repeated Protocol health_check_protocols = 1;\n}\n\nmessage HealthCheckRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.HealthCheckRequest\";\n\n  config.core.v3.Node node = 1;\n\n  Capability capability = 2;\n}\n\nmessage EndpointHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.EndpointHealth\";\n\n  config.endpoint.v3.Endpoint endpoint = 1;\n\n  config.core.v3.HealthStatus health_status = 2;\n}\n\n// Group endpoint health by locality under each cluster.\nmessage LocalityEndpointsHealth {\n  config.core.v3.Locality locality = 1;\n\n  repeated EndpointHealth endpoints_health = 2;\n}\n\n// The health status of endpoints in a cluster. The cluster name and locality\n// should match the corresponding fields in ClusterHealthCheck message.\nmessage ClusterEndpointsHealth {\n  string cluster_name = 1;\n\n  repeated LocalityEndpointsHealth locality_endpoints_health = 2;\n}\n\nmessage EndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.EndpointHealthResponse\";\n\n  // Deprecated - Flat list of endpoint health information.\n  repeated EndpointHealth endpoints_health = 1 [deprecated = true];\n\n  // Organize Endpoint health information by cluster.\n  repeated ClusterEndpointsHealth cluster_endpoints_health = 2;\n}\n\nmessage HealthCheckRequestOrEndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.HealthCheckRequestOrEndpointHealthResponse\";\n\n  oneof request_type {\n    HealthCheckRequest health_check_request = 1;\n\n    EndpointHealthResponse endpoint_health_response = 2;\n  }\n}\n\nmessage LocalityEndpoints {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.LocalityEndpoints\";\n\n  config.core.v3.Locality locality = 1;\n\n  repeated config.endpoint.v3.Endpoint endpoints = 2;\n}\n\n// The cluster name and locality is provided to Envoy for the endpoints that it\n// health checks to support statistics reporting, logging and debugging by the\n// Envoy instance (outside of HDS). For maximum usefulness, it should match the\n// same cluster structure as that provided by EDS.\nmessage ClusterHealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.ClusterHealthCheck\";\n\n  string cluster_name = 1;\n\n  repeated config.core.v3.HealthCheck health_checks = 2;\n\n  repeated LocalityEndpoints locality_endpoints = 3;\n\n  // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria <envoy_api_field_config.core.v3.HealthCheck.transport_socket_match_criteria>`\n  // on connection when health checking. For more details, see\n  // :ref:`config.cluster.v3.Cluster.transport_socket_matches <envoy_api_field_config.cluster.v3.Cluster.transport_socket_matches>`.\n  repeated config.cluster.v3.Cluster.TransportSocketMatch transport_socket_matches = 4;\n}\n\nmessage HealthCheckSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.HealthCheckSpecifier\";\n\n  repeated ClusterHealthCheck cluster_health_checks = 1;\n\n  // The default is 1 second.\n  google.protobuf.Duration interval = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/health/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/cluster/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/health/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/health/v4alpha/hds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.health.v4alpha;\n\nimport \"envoy/config/cluster/v4alpha/cluster.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/health_check.proto\";\nimport \"envoy/config/endpoint/v3/endpoint_components.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.health.v4alpha\";\noption java_outer_classname = \"HdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Health Discovery Service (HDS)]\n\n// HDS is Health Discovery Service. It compliments Envoy’s health checking\n// service by designating this Envoy to be a healthchecker for a subset of hosts\n// in the cluster. The status of these health checks will be reported to the\n// management server, where it can be aggregated etc and redistributed back to\n// Envoy through EDS.\nservice HealthDiscoveryService {\n  // 1. Envoy starts up and if its can_healthcheck option in the static\n  //    bootstrap config is enabled, sends HealthCheckRequest to the management\n  //    server. It supplies its capabilities (which protocol it can health check\n  //    with, what zone it resides in, etc.).\n  // 2. In response to (1), the management server designates this Envoy as a\n  //    healthchecker to health check a subset of all upstream hosts for a given\n  //    cluster (for example upstream Host 1 and Host 2). It streams\n  //    HealthCheckSpecifier messages with cluster related configuration for all\n  //    clusters this Envoy is designated to health check. Subsequent\n  //    HealthCheckSpecifier message will be sent on changes to:\n  //    a. Endpoints to health checks\n  //    b. Per cluster configuration change\n  // 3. Envoy creates a health probe based on the HealthCheck config and sends\n  //    it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck\n  //    configuration Envoy waits upon the arrival of the probe response and\n  //    looks at the content of the response to decide whether the endpoint is\n  //    healthy or not. If a response hasn't been received within the timeout\n  //    interval, the endpoint health status is considered TIMEOUT.\n  // 4. Envoy reports results back in an EndpointHealthResponse message.\n  //    Envoy streams responses as often as the interval configured by the\n  //    management server in HealthCheckSpecifier.\n  // 5. The management Server collects health statuses for all endpoints in the\n  //    cluster (for all clusters) and uses this information to construct\n  //    EndpointDiscoveryResponse messages.\n  // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load\n  //    balances traffic to them without additional health checking. It may\n  //    use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection\n  //    failed to a particular endpoint to account for health status propagation\n  //    delay between HDS and EDS).\n  // By default, can_healthcheck is true. If can_healthcheck is false, Cluster\n  // configuration may not contain HealthCheck message.\n  // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above\n  // invariant?\n  // TODO(htuch): Add @amb67's diagram.\n  rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse)\n      returns (stream HealthCheckSpecifier) {\n  }\n\n  // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of\n  // request/response. Should we add an identifier to the HealthCheckSpecifier\n  // to bind with the response?\n  rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {\n    option (google.api.http).post = \"/v3/discovery:health_check\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Defines supported protocols etc, so the management server can assign proper\n// endpoints to healthcheck.\nmessage Capability {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.health.v3.Capability\";\n\n  // Different Envoy instances may have different capabilities (e.g. Redis)\n  // and/or have ports enabled for different protocols.\n  enum Protocol {\n    HTTP = 0;\n    TCP = 1;\n    REDIS = 2;\n  }\n\n  repeated Protocol health_check_protocols = 1;\n}\n\nmessage HealthCheckRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.HealthCheckRequest\";\n\n  config.core.v4alpha.Node node = 1;\n\n  Capability capability = 2;\n}\n\nmessage EndpointHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.EndpointHealth\";\n\n  config.endpoint.v3.Endpoint endpoint = 1;\n\n  config.core.v4alpha.HealthStatus health_status = 2;\n}\n\n// Group endpoint health by locality under each cluster.\nmessage LocalityEndpointsHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.LocalityEndpointsHealth\";\n\n  config.core.v4alpha.Locality locality = 1;\n\n  repeated EndpointHealth endpoints_health = 2;\n}\n\n// The health status of endpoints in a cluster. The cluster name and locality\n// should match the corresponding fields in ClusterHealthCheck message.\nmessage ClusterEndpointsHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.ClusterEndpointsHealth\";\n\n  string cluster_name = 1;\n\n  repeated LocalityEndpointsHealth locality_endpoints_health = 2;\n}\n\nmessage EndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.EndpointHealthResponse\";\n\n  reserved 1;\n\n  reserved \"endpoints_health\";\n\n  // Organize Endpoint health information by cluster.\n  repeated ClusterEndpointsHealth cluster_endpoints_health = 2;\n}\n\nmessage HealthCheckRequestOrEndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse\";\n\n  oneof request_type {\n    HealthCheckRequest health_check_request = 1;\n\n    EndpointHealthResponse endpoint_health_response = 2;\n  }\n}\n\nmessage LocalityEndpoints {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.LocalityEndpoints\";\n\n  config.core.v4alpha.Locality locality = 1;\n\n  repeated config.endpoint.v3.Endpoint endpoints = 2;\n}\n\n// The cluster name and locality is provided to Envoy for the endpoints that it\n// health checks to support statistics reporting, logging and debugging by the\n// Envoy instance (outside of HDS). For maximum usefulness, it should match the\n// same cluster structure as that provided by EDS.\nmessage ClusterHealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.ClusterHealthCheck\";\n\n  string cluster_name = 1;\n\n  repeated config.core.v4alpha.HealthCheck health_checks = 2;\n\n  repeated LocalityEndpoints locality_endpoints = 3;\n\n  // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria <envoy_api_field_config.core.v4alpha.HealthCheck.transport_socket_match_criteria>`\n  // on connection when health checking. For more details, see\n  // :ref:`config.cluster.v3.Cluster.transport_socket_matches <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket_matches>`.\n  repeated config.cluster.v4alpha.Cluster.TransportSocketMatch transport_socket_matches = 4;\n}\n\nmessage HealthCheckSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.HealthCheckSpecifier\";\n\n  repeated ClusterHealthCheck cluster_health_checks = 1;\n\n  // The default is 1 second.\n  google.protobuf.Duration interval = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/listener/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/listener/v3/lds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.listener.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.listener.v3\";\noption java_outer_classname = \"LdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listener]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// The Envoy instance initiates an RPC at startup to discover a list of\n// listeners. Updates are delivered via streaming from the LDS server and\n// consist of a complete update of all listeners. Existing connections will be\n// allowed to drain from listeners that are no longer present.\nservice ListenerDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.listener.v3.Listener\";\n\n  rpc DeltaListeners(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamListeners(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc FetchListeners(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:listeners\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage LdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.LdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/load_stats/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/load_stats/v2/lrs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.load_stats.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/endpoint/load_report.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.load_stats.v2\";\noption java_outer_classname = \"LrsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Load reporting service]\n\nservice LoadReportingService {\n  // Advanced API to allow for multi-dimensional load balancing by remote\n  // server. For receiving LB assignments, the steps are:\n  // 1, The management server is configured with per cluster/zone/load metric\n  //    capacity configuration. The capacity configuration definition is\n  //    outside of the scope of this document.\n  // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters\n  //    to balance.\n  //\n  // Independently, Envoy will initiate a StreamLoadStats bidi stream with a\n  // management server:\n  // 1. Once a connection establishes, the management server publishes a\n  //    LoadStatsResponse for all clusters it is interested in learning load\n  //    stats about.\n  // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts\n  //    based on per-zone weights and/or per-instance weights (if specified)\n  //    based on intra-zone LbPolicy. This information comes from the above\n  //    {Stream,Fetch}Endpoints.\n  // 3. When upstream hosts reply, they optionally add header <define header\n  //    name> with ASCII representation of EndpointLoadMetricStats.\n  // 4. Envoy aggregates load reports over the period of time given to it in\n  //    LoadStatsResponse.load_reporting_interval. This includes aggregation\n  //    stats Envoy maintains by itself (total_requests, rpc_errors etc.) as\n  //    well as load metrics from upstream hosts.\n  // 5. When the timer of load_reporting_interval expires, Envoy sends new\n  //    LoadStatsRequest filled with load reports for each cluster.\n  // 6. The management server uses the load reports from all reported Envoys\n  //    from around the world, computes global assignment and prepares traffic\n  //    assignment destined for each zone Envoys are located in. Goto 2.\n  rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {\n  }\n}\n\n// A load report Envoy sends to the management server.\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\nmessage LoadStatsRequest {\n  // Node identifier for Envoy instance.\n  api.v2.core.Node node = 1;\n\n  // A list of load stats to report.\n  repeated api.v2.endpoint.ClusterStats cluster_stats = 2;\n}\n\n// The management server sends envoy a LoadStatsResponse with all clusters it\n// is interested in learning load stats about.\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\nmessage LoadStatsResponse {\n  // Clusters to report stats for.\n  // Not populated if *send_all_clusters* is true.\n  repeated string clusters = 1;\n\n  // If true, the client should send all clusters it knows about.\n  // Only clients that advertise the \"envoy.lrs.supports_send_all_clusters\" capability in their\n  // :ref:`client_features<envoy_api_field_core.Node.client_features>` field will honor this field.\n  bool send_all_clusters = 4;\n\n  // The minimum interval of time to collect stats over. This is only a minimum for two reasons:\n  // 1. There may be some delay from when the timer fires until stats sampling occurs.\n  // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic\n  //    that is observed in between the corresponding previous *LoadStatsRequest* and this\n  //    *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period\n  //    of inobservability that might otherwise exists between the messages. New clusters are not\n  //    subject to this consideration.\n  google.protobuf.Duration load_reporting_interval = 2;\n\n  // Set to *true* if the management server supports endpoint granularity\n  // report.\n  bool report_endpoint_granularity = 3;\n}\n"
  },
  {
    "path": "api/envoy/service/load_stats/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/load_stats/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/load_stats/v3/lrs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.load_stats.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/endpoint/v3/load_report.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.load_stats.v3\";\noption java_outer_classname = \"LrsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Load Reporting service (LRS)]\n\n// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional\n// stream with a management server. Upon connecting, the management server can send a\n// :ref:`LoadStatsResponse <envoy_api_msg_service.load_stats.v3.LoadStatsResponse>` to a node it is\n// interested in getting the load reports for. Envoy in this node will start sending\n// :ref:`LoadStatsRequest <envoy_api_msg_service.load_stats.v3.LoadStatsRequest>`. This is done periodically\n// based on the :ref:`load reporting interval <envoy_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`\n// For details, take a look at the :ref:`Load Reporting Service sandbox example <install_sandboxes_load_reporting_service>`.\n\nservice LoadReportingService {\n  // Advanced API to allow for multi-dimensional load balancing by remote\n  // server. For receiving LB assignments, the steps are:\n  // 1, The management server is configured with per cluster/zone/load metric\n  //    capacity configuration. The capacity configuration definition is\n  //    outside of the scope of this document.\n  // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters\n  //    to balance.\n  //\n  // Independently, Envoy will initiate a StreamLoadStats bidi stream with a\n  // management server:\n  // 1. Once a connection establishes, the management server publishes a\n  //    LoadStatsResponse for all clusters it is interested in learning load\n  //    stats about.\n  // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts\n  //    based on per-zone weights and/or per-instance weights (if specified)\n  //    based on intra-zone LbPolicy. This information comes from the above\n  //    {Stream,Fetch}Endpoints.\n  // 3. When upstream hosts reply, they optionally add header <define header\n  //    name> with ASCII representation of EndpointLoadMetricStats.\n  // 4. Envoy aggregates load reports over the period of time given to it in\n  //    LoadStatsResponse.load_reporting_interval. This includes aggregation\n  //    stats Envoy maintains by itself (total_requests, rpc_errors etc.) as\n  //    well as load metrics from upstream hosts.\n  // 5. When the timer of load_reporting_interval expires, Envoy sends new\n  //    LoadStatsRequest filled with load reports for each cluster.\n  // 6. The management server uses the load reports from all reported Envoys\n  //    from around the world, computes global assignment and prepares traffic\n  //    assignment destined for each zone Envoys are located in. Goto 2.\n  rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {\n  }\n}\n\n// A load report Envoy sends to the management server.\nmessage LoadStatsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v2.LoadStatsRequest\";\n\n  // Node identifier for Envoy instance.\n  config.core.v3.Node node = 1;\n\n  // A list of load stats to report.\n  repeated config.endpoint.v3.ClusterStats cluster_stats = 2;\n}\n\n// The management server sends envoy a LoadStatsResponse with all clusters it\n// is interested in learning load stats about.\nmessage LoadStatsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v2.LoadStatsResponse\";\n\n  // Clusters to report stats for.\n  // Not populated if *send_all_clusters* is true.\n  repeated string clusters = 1;\n\n  // If true, the client should send all clusters it knows about.\n  // Only clients that advertise the \"envoy.lrs.supports_send_all_clusters\" capability in their\n  // :ref:`client_features<envoy_api_field_config.core.v3.Node.client_features>` field will honor this field.\n  bool send_all_clusters = 4;\n\n  // The minimum interval of time to collect stats over. This is only a minimum for two reasons:\n  //\n  // 1. There may be some delay from when the timer fires until stats sampling occurs.\n  // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic\n  //    that is observed in between the corresponding previous *LoadStatsRequest* and this\n  //    *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period\n  //    of inobservability that might otherwise exists between the messages. New clusters are not\n  //    subject to this consideration.\n  google.protobuf.Duration load_reporting_interval = 2;\n\n  // Set to *true* if the management server supports endpoint granularity\n  // report.\n  bool report_endpoint_granularity = 3;\n}\n"
  },
  {
    "path": "api/envoy/service/load_stats/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/load_stats/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/load_stats/v4alpha/lrs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.load_stats.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/endpoint/v3/load_report.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.load_stats.v4alpha\";\noption java_outer_classname = \"LrsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Load Reporting service (LRS)]\n\n// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional\n// stream with a management server. Upon connecting, the management server can send a\n// :ref:`LoadStatsResponse <envoy_api_msg_service.load_stats.v4alpha.LoadStatsResponse>` to a node it is\n// interested in getting the load reports for. Envoy in this node will start sending\n// :ref:`LoadStatsRequest <envoy_api_msg_service.load_stats.v4alpha.LoadStatsRequest>`. This is done periodically\n// based on the :ref:`load reporting interval <envoy_api_field_service.load_stats.v4alpha.LoadStatsResponse.load_reporting_interval>`\n// For details, take a look at the :ref:`Load Reporting Service sandbox example <install_sandboxes_load_reporting_service>`.\n\nservice LoadReportingService {\n  // Advanced API to allow for multi-dimensional load balancing by remote\n  // server. For receiving LB assignments, the steps are:\n  // 1, The management server is configured with per cluster/zone/load metric\n  //    capacity configuration. The capacity configuration definition is\n  //    outside of the scope of this document.\n  // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters\n  //    to balance.\n  //\n  // Independently, Envoy will initiate a StreamLoadStats bidi stream with a\n  // management server:\n  // 1. Once a connection establishes, the management server publishes a\n  //    LoadStatsResponse for all clusters it is interested in learning load\n  //    stats about.\n  // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts\n  //    based on per-zone weights and/or per-instance weights (if specified)\n  //    based on intra-zone LbPolicy. This information comes from the above\n  //    {Stream,Fetch}Endpoints.\n  // 3. When upstream hosts reply, they optionally add header <define header\n  //    name> with ASCII representation of EndpointLoadMetricStats.\n  // 4. Envoy aggregates load reports over the period of time given to it in\n  //    LoadStatsResponse.load_reporting_interval. This includes aggregation\n  //    stats Envoy maintains by itself (total_requests, rpc_errors etc.) as\n  //    well as load metrics from upstream hosts.\n  // 5. When the timer of load_reporting_interval expires, Envoy sends new\n  //    LoadStatsRequest filled with load reports for each cluster.\n  // 6. The management server uses the load reports from all reported Envoys\n  //    from around the world, computes global assignment and prepares traffic\n  //    assignment destined for each zone Envoys are located in. Goto 2.\n  rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {\n  }\n}\n\n// A load report Envoy sends to the management server.\nmessage LoadStatsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v3.LoadStatsRequest\";\n\n  // Node identifier for Envoy instance.\n  config.core.v4alpha.Node node = 1;\n\n  // A list of load stats to report.\n  repeated config.endpoint.v3.ClusterStats cluster_stats = 2;\n}\n\n// The management server sends envoy a LoadStatsResponse with all clusters it\n// is interested in learning load stats about.\nmessage LoadStatsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v3.LoadStatsResponse\";\n\n  // Clusters to report stats for.\n  // Not populated if *send_all_clusters* is true.\n  repeated string clusters = 1;\n\n  // If true, the client should send all clusters it knows about.\n  // Only clients that advertise the \"envoy.lrs.supports_send_all_clusters\" capability in their\n  // :ref:`client_features<envoy_api_field_config.core.v4alpha.Node.client_features>` field will honor this field.\n  bool send_all_clusters = 4;\n\n  // The minimum interval of time to collect stats over. This is only a minimum for two reasons:\n  //\n  // 1. There may be some delay from when the timer fires until stats sampling occurs.\n  // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic\n  //    that is observed in between the corresponding previous *LoadStatsRequest* and this\n  //    *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period\n  //    of inobservability that might otherwise exists between the messages. New clusters are not\n  //    subject to this consideration.\n  google.protobuf.Duration load_reporting_interval = 2;\n\n  // Set to *true* if the management server supports endpoint granularity\n  // report.\n  bool report_endpoint_granularity = 3;\n}\n"
  },
  {
    "path": "api/envoy/service/metrics/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@prometheus_metrics_model//:client_model\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/metrics/v2/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.metrics.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"metrics.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.metrics.v2\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metrics service]\n\n// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric\n// data model as a standard to represent metrics information.\nservice MetricsService {\n  // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure.\n  rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) {\n  }\n}\n\nmessage StreamMetricsResponse {\n}\n\nmessage StreamMetricsMessage {\n  message Identifier {\n    // The node sending metrics over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // A list of metric entries\n  repeated io.prometheus.client.MetricFamily envoy_metrics = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/metrics/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/metrics/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@prometheus_metrics_model//:client_model\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/metrics/v3/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.metrics.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"metrics.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.metrics.v3\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metrics service]\n\n// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric\n// data model as a standard to represent metrics information.\nservice MetricsService {\n  // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure.\n  rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) {\n  }\n}\n\nmessage StreamMetricsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v2.StreamMetricsResponse\";\n}\n\nmessage StreamMetricsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v2.StreamMetricsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.metrics.v2.StreamMetricsMessage.Identifier\";\n\n    // The node sending metrics over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // A list of metric entries\n  repeated io.prometheus.client.MetricFamily envoy_metrics = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/metrics/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/metrics/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@prometheus_metrics_model//:client_model\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/metrics/v4alpha/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.metrics.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"metrics.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.metrics.v4alpha\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metrics service]\n\n// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric\n// data model as a standard to represent metrics information.\nservice MetricsService {\n  // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure.\n  rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) {\n  }\n}\n\nmessage StreamMetricsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v3.StreamMetricsResponse\";\n}\n\nmessage StreamMetricsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v3.StreamMetricsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.metrics.v3.StreamMetricsMessage.Identifier\";\n\n    // The node sending metrics over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // A list of metric entries\n  repeated io.prometheus.client.MetricFamily envoy_metrics = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/ratelimit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/ratelimit/v2/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.ratelimit.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/ratelimit/ratelimit.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.ratelimit.v2\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate Limit Service (RLS)]\n\nservice RateLimitService {\n  // Determine whether rate limiting should take place.\n  rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) {\n  }\n}\n\n// Main message for a rate limit request. The rate limit service is designed to be fully generic\n// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded\n// configuration will parse the request and find the most specific limit to apply. In addition,\n// a RateLimitRequest can contain multiple \"descriptors\" to limit on. When multiple descriptors\n// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any\n// of them are over limit. This enables more complex application level rate limiting scenarios\n// if desired.\nmessage RateLimitRequest {\n  // All rate limit requests must specify a domain. This enables the configuration to be per\n  // application without fear of overlap. E.g., \"envoy\".\n  string domain = 1;\n\n  // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is\n  // processed by the service (see below). If any of the descriptors are over limit, the entire\n  // request is considered to be over limit.\n  repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 2;\n\n  // Rate limit requests can optionally specify the number of hits a request adds to the matched\n  // limit. If the value is not set in the message, a request increases the matched limit by 1.\n  uint32 hits_addend = 3;\n}\n\n// A response from a ShouldRateLimit call.\nmessage RateLimitResponse {\n  enum Code {\n    // The response code is not known.\n    UNKNOWN = 0;\n\n    // The response code to notify that the number of requests are under limit.\n    OK = 1;\n\n    // The response code to notify that the number of requests are over limit.\n    OVER_LIMIT = 2;\n  }\n\n  // Defines an actual rate limit in terms of requests per unit of time and the unit itself.\n  message RateLimit {\n    enum Unit {\n      // The time unit is not known.\n      UNKNOWN = 0;\n\n      // The time unit representing a second.\n      SECOND = 1;\n\n      // The time unit representing a minute.\n      MINUTE = 2;\n\n      // The time unit representing an hour.\n      HOUR = 3;\n\n      // The time unit representing a day.\n      DAY = 4;\n    }\n\n    // A name or description of this limit.\n    string name = 3;\n\n    // The number of requests per unit of time.\n    uint32 requests_per_unit = 1;\n\n    // The unit of time.\n    Unit unit = 2;\n  }\n\n  message DescriptorStatus {\n    // The response code for an individual descriptor.\n    Code code = 1;\n\n    // The current limit as configured by the server. Useful for debugging, etc.\n    RateLimit current_limit = 2;\n\n    // The limit remaining in the current time unit.\n    uint32 limit_remaining = 3;\n  }\n\n  // The overall response code which takes into account all of the descriptors that were passed\n  // in the RateLimitRequest message.\n  Code overall_code = 1;\n\n  // A list of DescriptorStatus messages which matches the length of the descriptor list passed\n  // in the RateLimitRequest. This can be used by the caller to determine which individual\n  // descriptors failed and/or what the currently configured limits are for all of them.\n  repeated DescriptorStatus statuses = 2;\n\n  // A list of headers to add to the response\n  repeated api.v2.core.HeaderValue headers = 3\n      [(udpa.annotations.field_migrate).rename = \"response_headers_to_add\"];\n\n  // A list of headers to add to the request when forwarded\n  repeated api.v2.core.HeaderValue request_headers_to_add = 4;\n}\n"
  },
  {
    "path": "api/envoy/service/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/extensions/common/ratelimit/v3:pkg\",\n        \"//envoy/service/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/ratelimit/v3/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.ratelimit.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/extensions/common/ratelimit/v3/ratelimit.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.ratelimit.v3\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate Limit Service (RLS)]\n\nservice RateLimitService {\n  // Determine whether rate limiting should take place.\n  rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) {\n  }\n}\n\n// Main message for a rate limit request. The rate limit service is designed to be fully generic\n// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded\n// configuration will parse the request and find the most specific limit to apply. In addition,\n// a RateLimitRequest can contain multiple \"descriptors\" to limit on. When multiple descriptors\n// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any\n// of them are over limit. This enables more complex application level rate limiting scenarios\n// if desired.\nmessage RateLimitRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.ratelimit.v2.RateLimitRequest\";\n\n  // All rate limit requests must specify a domain. This enables the configuration to be per\n  // application without fear of overlap. E.g., \"envoy\".\n  string domain = 1;\n\n  // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is\n  // processed by the service (see below). If any of the descriptors are over limit, the entire\n  // request is considered to be over limit.\n  repeated envoy.extensions.common.ratelimit.v3.RateLimitDescriptor descriptors = 2;\n\n  // Rate limit requests can optionally specify the number of hits a request adds to the matched\n  // limit. If the value is not set in the message, a request increases the matched limit by 1.\n  uint32 hits_addend = 3;\n}\n\n// A response from a ShouldRateLimit call.\nmessage RateLimitResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.ratelimit.v2.RateLimitResponse\";\n\n  enum Code {\n    // The response code is not known.\n    UNKNOWN = 0;\n\n    // The response code to notify that the number of requests are under limit.\n    OK = 1;\n\n    // The response code to notify that the number of requests are over limit.\n    OVER_LIMIT = 2;\n  }\n\n  // Defines an actual rate limit in terms of requests per unit of time and the unit itself.\n  message RateLimit {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.ratelimit.v2.RateLimitResponse.RateLimit\";\n\n    // Identifies the unit of of time for rate limit.\n    // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4]\n    enum Unit {\n      // The time unit is not known.\n      UNKNOWN = 0;\n\n      // The time unit representing a second.\n      SECOND = 1;\n\n      // The time unit representing a minute.\n      MINUTE = 2;\n\n      // The time unit representing an hour.\n      HOUR = 3;\n\n      // The time unit representing a day.\n      DAY = 4;\n    }\n\n    // A name or description of this limit.\n    string name = 3;\n\n    // The number of requests per unit of time.\n    uint32 requests_per_unit = 1;\n\n    // The unit of time.\n    Unit unit = 2;\n  }\n\n  message DescriptorStatus {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.ratelimit.v2.RateLimitResponse.DescriptorStatus\";\n\n    // The response code for an individual descriptor.\n    Code code = 1;\n\n    // The current limit as configured by the server. Useful for debugging, etc.\n    RateLimit current_limit = 2;\n\n    // The limit remaining in the current time unit.\n    uint32 limit_remaining = 3;\n\n    // Duration until reset of the current limit window.\n    google.protobuf.Duration duration_until_reset = 4;\n  }\n\n  // The overall response code which takes into account all of the descriptors that were passed\n  // in the RateLimitRequest message.\n  Code overall_code = 1;\n\n  // A list of DescriptorStatus messages which matches the length of the descriptor list passed\n  // in the RateLimitRequest. This can be used by the caller to determine which individual\n  // descriptors failed and/or what the currently configured limits are for all of them.\n  repeated DescriptorStatus statuses = 2;\n\n  // A list of headers to add to the response\n  repeated config.core.v3.HeaderValue response_headers_to_add = 3;\n\n  // A list of headers to add to the request when forwarded\n  repeated config.core.v3.HeaderValue request_headers_to_add = 4;\n}\n"
  },
  {
    "path": "api/envoy/service/route/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/route/v3/rds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.route.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.route.v3\";\noption java_outer_classname = \"RdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RDS]\n\n// The resource_names field in DiscoveryRequest specifies a route configuration.\n// This allows an Envoy configuration with multiple HTTP listeners (and\n// associated HTTP connection manager filters) to use different route\n// configurations. Each listener will bind its HTTP connection manager filter to\n// a route table via this identifier.\nservice RouteDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.route.v3.RouteConfiguration\";\n\n  rpc StreamRoutes(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaRoutes(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for\n// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered\n// during the processing of an HTTP request if a route for the request cannot be resolved. The\n// :ref:`resource_names_subscribe <envoy_api_field_service.discovery.v3.DeltaDiscoveryRequest.resource_names_subscribe>`\n// field contains a list of virtual host names or aliases to track. The contents of an alias would\n// be the contents of a *host* or *authority* header used to make an http request. An xDS server\n// will match an alias to a virtual host based on the content of :ref:`domains'\n// <envoy_api_field_config.route.v3.VirtualHost.domains>` field. The *resource_names_unsubscribe* field\n// contains a list of virtual host names that have been :ref:`unsubscribed\n// <xds_protocol_unsubscribe>` from the routing table associated with the RouteConfiguration.\nservice VirtualHostDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.route.v3.VirtualHost\";\n\n  rpc DeltaVirtualHosts(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage RdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.RdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/route/v3/srds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.route.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.route.v3\";\noption java_outer_classname = \"SrdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SRDS]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// The Scoped Routes Discovery Service (SRDS) API distributes\n// :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`\n// resources. Each ScopedRouteConfiguration resource represents a \"routing\n// scope\" containing a mapping that allows the HTTP connection manager to\n// dynamically assign a routing table (specified via a\n// :ref:`RouteConfiguration<envoy_api_msg_config.route.v3.RouteConfiguration>` message) to each\n// HTTP request.\nservice ScopedRoutesDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.route.v3.ScopedRouteConfiguration\";\n\n  rpc StreamScopedRoutes(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaScopedRoutes(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchScopedRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:scoped-routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage SrdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.SrdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/runtime/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/runtime/v3/rtds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.runtime.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.runtime.v3\";\noption java_outer_classname = \"RtdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Runtime Discovery Service (RTDS)]\n// RTDS :ref:`configuration overview <config_runtime_rtds>`\n\n// Discovery service for Runtime resources.\nservice RuntimeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.service.runtime.v3.Runtime\";\n\n  rpc StreamRuntime(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaRuntime(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRuntime(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:runtime\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage RtdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.RtdsDummy\";\n}\n\n// RTDS resource type. This describes a layer in the runtime virtual filesystem.\nmessage Runtime {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.discovery.v2.Runtime\";\n\n  // Runtime resource name. This makes the Runtime a self-describing xDS\n  // resource.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  google.protobuf.Struct layer = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/secret/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/secret/v3/sds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.secret.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.secret.v3\";\noption java_outer_classname = \"SdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Secret Discovery Service (SDS)]\n\nservice SecretDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.extensions.transport_sockets.tls.v3.Secret\";\n\n  rpc DeltaSecrets(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamSecrets(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc FetchSecrets(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:secrets\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage SdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.SdsDummy\";\n}\n"
  },
  {
    "path": "api/envoy/service/status/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/admin/v2alpha:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/status/v2/csds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.status.v2;\n\nimport \"envoy/admin/v2alpha/config_dump.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/node.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.status.v2\";\noption java_outer_classname = \"CsdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Client Status Discovery Service (CSDS)]\n\n// CSDS is Client Status Discovery Service. It can be used to get the status of\n// an xDS-compliant client from the management server's point of view. In the\n// future, it can potentially be used as an interface to get the current\n// state directly from the client.\nservice ClientStatusDiscoveryService {\n  rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {\n  }\n\n  rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) {\n    option (google.api.http).post = \"/v2/discovery:client_status\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Status of a config.\nenum ConfigStatus {\n  // Status info is not available/unknown.\n  UNKNOWN = 0;\n\n  // Management server has sent the config to client and received ACK.\n  SYNCED = 1;\n\n  // Config is not sent.\n  NOT_SENT = 2;\n\n  // Management server has sent the config to client but hasn’t received\n  // ACK/NACK.\n  STALE = 3;\n\n  // Management server has sent the config to client but received NACK.\n  ERROR = 4;\n}\n\n// Request for client status of clients identified by a list of NodeMatchers.\nmessage ClientStatusRequest {\n  // Management server can use these match criteria to identify clients.\n  // The match follows OR semantics.\n  repeated type.matcher.NodeMatcher node_matchers = 1;\n}\n\n// Detailed config (per xDS) with status.\n// [#next-free-field: 6]\nmessage PerXdsConfig {\n  ConfigStatus status = 1;\n\n  oneof per_xds_config {\n    admin.v2alpha.ListenersConfigDump listener_config = 2;\n\n    admin.v2alpha.ClustersConfigDump cluster_config = 3;\n\n    admin.v2alpha.RoutesConfigDump route_config = 4;\n\n    admin.v2alpha.ScopedRoutesConfigDump scoped_route_config = 5;\n  }\n}\n\n// All xds configs for a particular client.\nmessage ClientConfig {\n  // Node for a particular client.\n  api.v2.core.Node node = 1;\n\n  repeated PerXdsConfig xds_config = 2;\n}\n\nmessage ClientStatusResponse {\n  // Client configs for the clients specified in the ClientStatusRequest.\n  repeated ClientConfig config = 1;\n}\n"
  },
  {
    "path": "api/envoy/service/status/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/admin/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/status/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/status/v3/csds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.status.v3;\n\nimport \"envoy/admin/v3/config_dump.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/matcher/v3/node.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.status.v3\";\noption java_outer_classname = \"CsdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Client Status Discovery Service (CSDS)]\n\n// CSDS is Client Status Discovery Service. It can be used to get the status of\n// an xDS-compliant client from the management server's point of view. It can\n// also be used to get the current xDS states directly from the client.\nservice ClientStatusDiscoveryService {\n  rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {\n  }\n\n  rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) {\n    option (google.api.http).post = \"/v3/discovery:client_status\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Status of a config from a management server view.\nenum ConfigStatus {\n  // Status info is not available/unknown.\n  UNKNOWN = 0;\n\n  // Management server has sent the config to client and received ACK.\n  SYNCED = 1;\n\n  // Config is not sent.\n  NOT_SENT = 2;\n\n  // Management server has sent the config to client but hasn’t received\n  // ACK/NACK.\n  STALE = 3;\n\n  // Management server has sent the config to client but received NACK. The\n  // attached config dump will be the latest config (the rejected one), since\n  // it is the persisted version in the management server.\n  ERROR = 4;\n}\n\n// Config status from a client-side view.\nenum ClientConfigStatus {\n  // Config status is not available/unknown.\n  CLIENT_UNKNOWN = 0;\n\n  // Client requested the config but hasn't received any config from management\n  // server yet.\n  CLIENT_REQUESTED = 1;\n\n  // Client received the config and replied with ACK.\n  CLIENT_ACKED = 2;\n\n  // Client received the config and replied with NACK. Notably, the attached\n  // config dump is not the NACKed version, but the most recent accepted one. If\n  // no config is accepted yet, the attached config dump will be empty.\n  CLIENT_NACKED = 3;\n}\n\n// Request for client status of clients identified by a list of NodeMatchers.\nmessage ClientStatusRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.ClientStatusRequest\";\n\n  // Management server can use these match criteria to identify clients.\n  // The match follows OR semantics.\n  repeated type.matcher.v3.NodeMatcher node_matchers = 1;\n\n  // The node making the csds request.\n  config.core.v3.Node node = 2;\n}\n\n// Detailed config (per xDS) with status.\n// [#next-free-field: 8]\nmessage PerXdsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.PerXdsConfig\";\n\n  // Config status generated by management servers. Will not be present if the\n  // CSDS server is an xDS client.\n  ConfigStatus status = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"status_config\"];\n\n  // Client config status is populated by xDS clients. Will not be present if\n  // the CSDS server is an xDS server. No matter what the client config status\n  // is, xDS clients should always dump the most recent accepted xDS config.\n  ClientConfigStatus client_status = 7\n      [(udpa.annotations.field_migrate).oneof_promotion = \"status_config\"];\n\n  oneof per_xds_config {\n    admin.v3.ListenersConfigDump listener_config = 2;\n\n    admin.v3.ClustersConfigDump cluster_config = 3;\n\n    admin.v3.RoutesConfigDump route_config = 4;\n\n    admin.v3.ScopedRoutesConfigDump scoped_route_config = 5;\n\n    admin.v3.EndpointsConfigDump endpoint_config = 6;\n  }\n}\n\n// All xds configs for a particular client.\nmessage ClientConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.ClientConfig\";\n\n  // Node for a particular client.\n  config.core.v3.Node node = 1;\n\n  repeated PerXdsConfig xds_config = 2;\n}\n\nmessage ClientStatusResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.ClientStatusResponse\";\n\n  // Client configs for the clients specified in the ClientStatusRequest.\n  repeated ClientConfig config = 1;\n}\n"
  },
  {
    "path": "api/envoy/service/status/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/admin/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/status/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/status/v4alpha/csds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.status.v4alpha;\n\nimport \"envoy/admin/v4alpha/config_dump.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/matcher/v4alpha/node.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.status.v4alpha\";\noption java_outer_classname = \"CsdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Client Status Discovery Service (CSDS)]\n\n// CSDS is Client Status Discovery Service. It can be used to get the status of\n// an xDS-compliant client from the management server's point of view. It can\n// also be used to get the current xDS states directly from the client.\nservice ClientStatusDiscoveryService {\n  rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {\n  }\n\n  rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) {\n    option (google.api.http).post = \"/v3/discovery:client_status\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Status of a config from a management server view.\nenum ConfigStatus {\n  // Status info is not available/unknown.\n  UNKNOWN = 0;\n\n  // Management server has sent the config to client and received ACK.\n  SYNCED = 1;\n\n  // Config is not sent.\n  NOT_SENT = 2;\n\n  // Management server has sent the config to client but hasn’t received\n  // ACK/NACK.\n  STALE = 3;\n\n  // Management server has sent the config to client but received NACK. The\n  // attached config dump will be the latest config (the rejected one), since\n  // it is the persisted version in the management server.\n  ERROR = 4;\n}\n\n// Config status from a client-side view.\nenum ClientConfigStatus {\n  // Config status is not available/unknown.\n  CLIENT_UNKNOWN = 0;\n\n  // Client requested the config but hasn't received any config from management\n  // server yet.\n  CLIENT_REQUESTED = 1;\n\n  // Client received the config and replied with ACK.\n  CLIENT_ACKED = 2;\n\n  // Client received the config and replied with NACK. Notably, the attached\n  // config dump is not the NACKed version, but the most recent accepted one. If\n  // no config is accepted yet, the attached config dump will be empty.\n  CLIENT_NACKED = 3;\n}\n\n// Request for client status of clients identified by a list of NodeMatchers.\nmessage ClientStatusRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.ClientStatusRequest\";\n\n  // Management server can use these match criteria to identify clients.\n  // The match follows OR semantics.\n  repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1;\n\n  // The node making the csds request.\n  config.core.v4alpha.Node node = 2;\n}\n\n// Detailed config (per xDS) with status.\n// [#next-free-field: 8]\nmessage PerXdsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.PerXdsConfig\";\n\n  oneof status_config {\n    // Config status generated by management servers. Will not be present if the\n    // CSDS server is an xDS client.\n    ConfigStatus status = 1;\n\n    // Client config status is populated by xDS clients. Will not be present if\n    // the CSDS server is an xDS server. No matter what the client config status\n    // is, xDS clients should always dump the most recent accepted xDS config.\n    ClientConfigStatus client_status = 7;\n  }\n\n  oneof per_xds_config {\n    admin.v4alpha.ListenersConfigDump listener_config = 2;\n\n    admin.v4alpha.ClustersConfigDump cluster_config = 3;\n\n    admin.v4alpha.RoutesConfigDump route_config = 4;\n\n    admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5;\n\n    admin.v4alpha.EndpointsConfigDump endpoint_config = 6;\n  }\n}\n\n// All xds configs for a particular client.\nmessage ClientConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.ClientConfig\";\n\n  // Node for a particular client.\n  config.core.v4alpha.Node node = 1;\n\n  repeated PerXdsConfig xds_config = 2;\n}\n\nmessage ClientStatusResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.ClientStatusResponse\";\n\n  // Client configs for the clients specified in the ClientStatusRequest.\n  repeated ClientConfig config = 1;\n}\n"
  },
  {
    "path": "api/envoy/service/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/data/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/tap/v2alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/grpc_service.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v2alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.tap.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common tap configuration]\n\n// Tap configuration.\nmessage TapConfig {\n  // [#comment:TODO(mattklein123): Rate limiting]\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  MatchPredicate match_config = 1 [(validate.rules).message = {required: true}];\n\n  // The tap output configuration. If a match configuration matches a data source being tapped,\n  // a tap will occur and the data will be written to the configured output.\n  OutputConfig output_config = 2 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\\connections for\n  // which the tap matching is enabled. When not enabled, the request\\connection will not be\n  // recorded.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.FractionalPercent.DenominatorType>`.\n  api.v2.core.RuntimeFractionalPercent tap_enabled = 3;\n}\n\n// Tap match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 9]\nmessage MatchPredicate {\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  // HTTP headers to match.\n  repeated api.v2.route.HeaderMatcher headers = 1;\n}\n\n// Tap output configuration.\nmessage OutputConfig {\n  // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple\n  // sink types are supported this constraint will be relaxed.\n  repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}];\n\n  // For buffered tapping, the maximum amount of received body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v2alpha.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_rx_bytes = 2;\n\n  // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v2alpha.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_tx_bytes = 3;\n\n  // Indicates whether taps produce a single buffered message per tap, or multiple streamed\n  // messages per tap in the emitted :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v2alpha.TraceWrapper>` messages. Note that streamed tapping does not\n  // mean that no buffering takes place. Buffering may be required if data is processed before a\n  // match can be determined. See the HTTP tap filter :ref:`streaming\n  // <config_http_filters_tap_streaming>` documentation for more information.\n  bool streaming = 4;\n}\n\n// Tap output sink configuration.\nmessage OutputSink {\n  // Output format. All output is in the form of one or more :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v2alpha.TraceWrapper>` messages. This enumeration indicates\n  // how those messages are written. Note that not all sinks support all output formats. See\n  // individual sink documentation for more information.\n  enum Format {\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v2alpha.Body>`\n    // data will be present in the :ref:`as_bytes\n    // <envoy_api_field_data.tap.v2alpha.Body.as_bytes>` field. This means that body data will be\n    // base64 encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n    JSON_BODY_AS_BYTES = 0;\n\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v2alpha.Body>`\n    // data will be present in the :ref:`as_string\n    // <envoy_api_field_data.tap.v2alpha.Body.as_string>` field. This means that body data will be\n    // string encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_. This format type is\n    // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the\n    // user wishes to view it directly without being forced to base64 decode the body.\n    JSON_BODY_AS_STRING = 1;\n\n    // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes\n    // multiple binary messages without any length information the data stream will not be\n    // useful. However, for certain sinks that are self-delimiting (e.g., one message per file)\n    // this output format makes consumption simpler.\n    PROTO_BINARY = 2;\n\n    // Messages are written as a sequence tuples, where each tuple is the message length encoded\n    // as a `protobuf 32-bit varint\n    // <https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.coded_stream>`_\n    // followed by the binary message. The messages can be read back using the language specific\n    // protobuf coded stream implementation to obtain the message length and the message.\n    PROTO_BINARY_LENGTH_DELIMITED = 3;\n\n    // Text proto format.\n    PROTO_TEXT = 4;\n  }\n\n  // Sink output format.\n  Format format = 1 [(validate.rules).enum = {defined_only: true}];\n\n  oneof output_sink_type {\n    option (validate.required) = true;\n\n    // Tap output will be streamed out the :http:post:`/tap` admin endpoint.\n    //\n    // .. attention::\n    //\n    //   It is only allowed to specify the streaming admin output sink if the tap is being\n    //   configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has\n    //   been configured to receive tap configuration from some other source (e.g., static\n    //   file, XDS, etc.) configuring the streaming admin output type will fail.\n    StreamingAdminSink streaming_admin = 2;\n\n    // Tap output will be written to a file per tap sink.\n    FilePerTapSink file_per_tap = 3;\n\n    // [#not-implemented-hide:]\n    // GrpcService to stream data to. The format argument must be PROTO_BINARY.\n    StreamingGrpcSink streaming_grpc = 4;\n  }\n}\n\n// Streaming admin sink configuration.\nmessage StreamingAdminSink {\n}\n\n// The file per tap sink outputs a discrete file for every tapped stream.\nmessage FilePerTapSink {\n  // Path prefix. The output file will be of the form <path_prefix>_<id>.pb, where <id> is an\n  // identifier distinguishing the recorded trace for stream instances (the Envoy\n  // connection ID, HTTP stream ID, etc.).\n  string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC\n// server.\nmessage StreamingGrpcSink {\n  // Opaque identifier, that will be sent back to the streaming grpc server.\n  string tap_id = 1;\n\n  // The gRPC server that hosts the Tap Sink Service.\n  api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/service/tap/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/data/tap/v2alpha/wrapper.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap Sink Service]\n\n// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call\n// StreamTaps to deliver captured taps to the server\nservice TapSinkService {\n  // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect.\n  rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server\n// and stream taps without ever expecting a response.\nmessage StreamTapsRequest {\n  message Identifier {\n    // The node sending taps over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The opaque identifier that was set in the :ref:`output config\n    // <envoy_api_field_service.tap.v2alpha.StreamingGrpcSink.tap_id>`.\n    string tap_id = 2;\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // The trace id. this can be used to merge together a streaming trace. Note that the trace_id\n  // is not guaranteed to be spatially or temporally unique.\n  uint64 trace_id = 2;\n\n  // The trace data.\n  data.tap.v2alpha.TraceWrapper trace = 3;\n}\n\n// [#not-implemented-hide:]\nmessage StreamTapsResponse {\n}\n"
  },
  {
    "path": "api/envoy/service/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/tap/v3:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/tap/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/tap/v3/wrapper.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap Sink Service]\n\n// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call\n// StreamTaps to deliver captured taps to the server\nservice TapSinkService {\n  // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect.\n  rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server\n// and stream taps without ever expecting a response.\nmessage StreamTapsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamTapsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.tap.v2alpha.StreamTapsRequest.Identifier\";\n\n    // The node sending taps over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The opaque identifier that was set in the :ref:`output config\n    // <envoy_api_field_config.tap.v3.StreamingGrpcSink.tap_id>`.\n    string tap_id = 2;\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // The trace id. this can be used to merge together a streaming trace. Note that the trace_id\n  // is not guaranteed to be spatially or temporally unique.\n  uint64 trace_id = 2;\n\n  // The trace data.\n  data.tap.v3.TraceWrapper trace = 3;\n}\n\n// [#not-implemented-hide:]\nmessage StreamTapsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamTapsResponse\";\n}\n"
  },
  {
    "path": "api/envoy/service/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/data/tap/v3:pkg\",\n        \"//envoy/service/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/tap/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/data/tap/v3/wrapper.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap Sink Service]\n\n// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call\n// StreamTaps to deliver captured taps to the server\nservice TapSinkService {\n  // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect.\n  rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server\n// and stream taps without ever expecting a response.\nmessage StreamTapsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v3.StreamTapsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.tap.v3.StreamTapsRequest.Identifier\";\n\n    // The node sending taps over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The opaque identifier that was set in the :ref:`output config\n    // <envoy_api_field_config.tap.v4alpha.StreamingGrpcSink.tap_id>`.\n    string tap_id = 2;\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // The trace id. this can be used to merge together a streaming trace. Note that the trace_id\n  // is not guaranteed to be spatially or temporally unique.\n  uint64 trace_id = 2;\n\n  // The trace data.\n  data.tap.v3.TraceWrapper trace = 3;\n}\n\n// [#not-implemented-hide:]\nmessage StreamTapsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v3.StreamTapsResponse\";\n}\n"
  },
  {
    "path": "api/envoy/service/trace/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/trace/v2/trace_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.trace.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"opencensus/proto/trace/v1/trace.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.trace.v2\";\noption java_outer_classname = \"TraceServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Trace service]\n\n// Service for streaming traces to server that consumes the trace data. It\n// uses OpenCensus data model as a standard to represent trace information.\nservice TraceService {\n  // Envoy will connect and send StreamTracesMessage messages forever. It does\n  // not expect any response to be sent as nothing would be done in the case\n  // of failure.\n  rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) {\n  }\n}\n\nmessage StreamTracesResponse {\n}\n\nmessage StreamTracesMessage {\n  message Identifier {\n    // The node sending the access log messages over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata.\n  // As a performance optimization this will only be sent in the first message\n  // on the stream.\n  Identifier identifier = 1;\n\n  // A list of Span entries\n  repeated opencensus.proto.trace.v1.Span spans = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/trace/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/trace/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/trace/v3/trace_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.trace.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"opencensus/proto/trace/v1/trace.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.trace.v3\";\noption java_outer_classname = \"TraceServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Trace service]\n\n// Service for streaming traces to server that consumes the trace data. It\n// uses OpenCensus data model as a standard to represent trace information.\nservice TraceService {\n  // Envoy will connect and send StreamTracesMessage messages forever. It does\n  // not expect any response to be sent as nothing would be done in the case\n  // of failure.\n  rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) {\n  }\n}\n\nmessage StreamTracesResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v2.StreamTracesResponse\";\n}\n\nmessage StreamTracesMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v2.StreamTracesMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.trace.v2.StreamTracesMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata.\n  // As a performance optimization this will only be sent in the first message\n  // on the stream.\n  Identifier identifier = 1;\n\n  // A list of Span entries\n  repeated opencensus.proto.trace.v1.Span spans = 2;\n}\n"
  },
  {
    "path": "api/envoy/service/trace/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/service/trace/v4alpha/trace_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.trace.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"opencensus/proto/trace/v1/trace.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.trace.v4alpha\";\noption java_outer_classname = \"TraceServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Trace service]\n\n// Service for streaming traces to server that consumes the trace data. It\n// uses OpenCensus data model as a standard to represent trace information.\nservice TraceService {\n  // Envoy will connect and send StreamTracesMessage messages forever. It does\n  // not expect any response to be sent as nothing would be done in the case\n  // of failure.\n  rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) {\n  }\n}\n\nmessage StreamTracesResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v3.StreamTracesResponse\";\n}\n\nmessage StreamTracesMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v3.StreamTracesMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.trace.v3.StreamTracesMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata.\n  // As a performance optimization this will only be sent in the first message\n  // on the stream.\n  Identifier identifier = 1;\n\n  // A list of Span entries\n  repeated opencensus.proto.trace.v1.Span spans = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/type/hash_policy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"HashPolicyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Hash Policy]\n\n// Specifies the hash policy\nmessage HashPolicy {\n  // The source IP will be used to compute the hash used by hash-based load balancing\n  // algorithms.\n  message SourceIp {\n  }\n\n  oneof policy_specifier {\n    option (validate.required) = true;\n\n    SourceIp source_ip = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP]\n\nenum CodecClientType {\n  HTTP1 = 0;\n\n  HTTP2 = 1;\n\n  // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n  // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n  // to distinguish HTTP1 and HTTP2 traffic.\n  HTTP3 = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/http_status.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"HttpStatusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP status codes]\n\n// HTTP response codes supported in Envoy.\n// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml\nenum StatusCode {\n  // Empty - This code not part of the HTTP status code specification, but it is needed for proto\n  // `enum` type.\n  Empty = 0;\n\n  Continue = 100;\n\n  OK = 200;\n\n  Created = 201;\n\n  Accepted = 202;\n\n  NonAuthoritativeInformation = 203;\n\n  NoContent = 204;\n\n  ResetContent = 205;\n\n  PartialContent = 206;\n\n  MultiStatus = 207;\n\n  AlreadyReported = 208;\n\n  IMUsed = 226;\n\n  MultipleChoices = 300;\n\n  MovedPermanently = 301;\n\n  Found = 302;\n\n  SeeOther = 303;\n\n  NotModified = 304;\n\n  UseProxy = 305;\n\n  TemporaryRedirect = 307;\n\n  PermanentRedirect = 308;\n\n  BadRequest = 400;\n\n  Unauthorized = 401;\n\n  PaymentRequired = 402;\n\n  Forbidden = 403;\n\n  NotFound = 404;\n\n  MethodNotAllowed = 405;\n\n  NotAcceptable = 406;\n\n  ProxyAuthenticationRequired = 407;\n\n  RequestTimeout = 408;\n\n  Conflict = 409;\n\n  Gone = 410;\n\n  LengthRequired = 411;\n\n  PreconditionFailed = 412;\n\n  PayloadTooLarge = 413;\n\n  URITooLong = 414;\n\n  UnsupportedMediaType = 415;\n\n  RangeNotSatisfiable = 416;\n\n  ExpectationFailed = 417;\n\n  MisdirectedRequest = 421;\n\n  UnprocessableEntity = 422;\n\n  Locked = 423;\n\n  FailedDependency = 424;\n\n  UpgradeRequired = 426;\n\n  PreconditionRequired = 428;\n\n  TooManyRequests = 429;\n\n  RequestHeaderFieldsTooLarge = 431;\n\n  InternalServerError = 500;\n\n  NotImplemented = 501;\n\n  BadGateway = 502;\n\n  ServiceUnavailable = 503;\n\n  GatewayTimeout = 504;\n\n  HTTPVersionNotSupported = 505;\n\n  VariantAlsoNegotiates = 506;\n\n  InsufficientStorage = 507;\n\n  LoopDetected = 508;\n\n  NotExtended = 510;\n\n  NetworkAuthenticationRequired = 511;\n}\n\n// HTTP status.\nmessage HttpStatus {\n  // Supplies HTTP response code.\n  StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/type/matcher/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metadata matcher]\n\n// MetadataMatcher provides a general interface to check if a given value is matched in\n// :ref:`Metadata <envoy_api_msg_core.Metadata>`. It uses `filter` and `path` to retrieve the value\n// from the Metadata and then check if it's matched to the specified value.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.filters.http.rbac:\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following MetadataMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to\n// enforce access control based on dynamic metadata in a request. See :ref:`Permission\n// <envoy_api_msg_config.rbac.v2.Permission>` and :ref:`Principal\n// <envoy_api_msg_config.rbac.v2.Principal>`.\n\n// [#next-major-version: MetadataMatcher should use StructMatcher]\nmessage MetadataMatcher {\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that\n  // if the segment key refers to a list, it has to be the last segment in a path.\n  message PathSegment {\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The filter name to retrieve the Struct from the Metadata.\n  string filter = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The MetadataMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/node.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/matcher/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"NodeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Node matcher]\n\n// Specifies the way to match a Node.\n// The match follows AND semantics.\nmessage NodeMatcher {\n  // Specifies match criteria on the node id.\n  StringMatcher node_id = 1;\n\n  // Specifies match criteria on the node metadata.\n  repeated StructMatcher node_metadatas = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/number.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"NumberProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Number matcher]\n\n// Specifies the way to match a double value.\nmessage DoubleMatcher {\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, the input double value must be in the range specified here.\n    // Note: The range is using half-open interval semantics [start, end).\n    DoubleRange range = 1;\n\n    // If specified, the input double value must be equal to the value specified here.\n    double exact = 2;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/path.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"PathProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Path matcher]\n\n// Specifies the way to match a path on HTTP request.\nmessage PathMatcher {\n  oneof rule {\n    option (validate.required) = true;\n\n    // The `path` must match the URL path portion of the :path header. The query and fragment\n    // string (if present) are removed in the URL path portion.\n    // For example, the path */data* will match the *:path* header */data#fragment?param=value*.\n    StringMatcher path = 1 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/regex.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"RegexProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Regex matcher]\n\n// A regex matcher designed for safety when used with untrusted input.\nmessage RegexMatcher {\n  // Google's `RE2 <https://github.com/google/re2>`_ regex engine. The regex string must adhere to\n  // the documented `syntax <https://github.com/google/re2/wiki/Syntax>`_. The engine is designed\n  // to complete execution in linear time as well as limit the amount of memory used.\n  //\n  // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level`\n  // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or\n  // complexity that a compiled regex can have before an exception is thrown or a warning is\n  // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and\n  // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning).\n  //\n  // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`,\n  // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented\n  // each time the program size exceeds the warn level threshold.\n  message GoogleRE2 {\n    // This field controls the RE2 \"program size\" which is a rough estimate of how complex a\n    // compiled regex is to evaluate. A regex that has a program size greater than the configured\n    // value will fail to compile. In this case, the configured max program size can be increased\n    // or the regex can be simplified. If not specified, the default is 100.\n    //\n    // This field is deprecated; regexp validation should be performed on the management server\n    // instead of being done by each individual client.\n    google.protobuf.UInt32Value max_program_size = 1 [deprecated = true];\n  }\n\n  oneof engine_type {\n    option (validate.required) = true;\n\n    // Google's RE2 regex engine.\n    GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // The regex match string. The string must be supported by the configured engine.\n  string regex = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Describes how to match a string and then produce a new string using a regular\n// expression and a substitution string.\nmessage RegexMatchAndSubstitute {\n  // The regular expression used to find portions of a string (hereafter called\n  // the \"subject string\") that should be replaced. When a new string is\n  // produced during the substitution operation, the new string is initially\n  // the same as the subject string, but then all matches in the subject string\n  // are replaced by the substitution string. If replacing all matches isn't\n  // desired, regular expression anchors can be used to ensure a single match,\n  // so as to replace just one occurrence of a pattern. Capture groups can be\n  // used in the pattern to extract portions of the subject string, and then\n  // referenced in the substitution string.\n  RegexMatcher pattern = 1;\n\n  // The string that should be substituted into matching portions of the\n  // subject string during a substitution operation to produce a new string.\n  // Capture groups in the pattern can be referenced in the substitution\n  // string. Note, however, that the syntax for referring to capture groups is\n  // defined by the chosen regular expression engine. Google's `RE2\n  // <https://github.com/google/re2>`_ regular expression engine uses a\n  // backslash followed by the capture group number to denote a numbered\n  // capture group. E.g., ``\\1`` refers to capture group 1, and ``\\2`` refers\n  // to capture group 2.\n  string substitution = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/regex.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"StringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: String matcher]\n\n// Specifies the way to match a string.\n// [#next-free-field: 7]\nmessage StringMatcher {\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // The input string must match exactly the string specified here.\n    //\n    // Examples:\n    //\n    // * *abc* only matches the value *abc*.\n    string exact = 1;\n\n    // The input string must have the prefix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *abc.xyz*\n    string prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must have the suffix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc*\n    string suffix = 3 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must match the regular expression specified here.\n    // The regex grammar is defined `here\n    // <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n    //\n    // Examples:\n    //\n    // * The regex ``\\d{3}`` matches the value *123*\n    // * The regex ``\\d{3}`` does not match the value *1234*\n    // * The regex ``\\d{3}`` does not match the value *123.456*\n    //\n    // .. attention::\n    //   This field has been deprecated in favor of `safe_regex` as it is not safe for use with\n    //   untrusted input in all cases.\n    string regex = 4 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // The input string must match the regular expression specified here.\n    RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];\n  }\n\n  // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no\n  // effect for the safe_regex match.\n  // For example, the matcher *data* will match both input string *Data* and *data* if set to true.\n  bool ignore_case = 6;\n}\n\n// Specifies a list of ways to match a string.\nmessage ListStringMatcher {\n  repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/struct.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"StructProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Struct matcher]\n\n// StructMatcher provides a general interface to check if a given value is matched in\n// google.protobuf.Struct. It uses `path` to retrieve the value\n// from the struct and then check if it's matched to the specified value.\n//\n// For example, for the following Struct:\n//\n// .. code-block:: yaml\n//\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following StructMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of StructMatcher is to match metadata in envoy.v*.core.Node.\nmessage StructMatcher {\n  // Specifies the segment in a path to retrieve value from Struct.\n  message PathSegment {\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The StructMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metadata matcher]\n\n// MetadataMatcher provides a general interface to check if a given value is matched in\n// :ref:`Metadata <envoy_api_msg_config.core.v3.Metadata>`. It uses `filter` and `path` to retrieve the value\n// from the Metadata and then check if it's matched to the specified value.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.filters.http.rbac:\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following MetadataMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to\n// enforce access control based on dynamic metadata in a request. See :ref:`Permission\n// <envoy_api_msg_config.rbac.v3.Permission>` and :ref:`Principal\n// <envoy_api_msg_config.rbac.v3.Principal>`.\n\n// [#next-major-version: MetadataMatcher should use StructMatcher]\nmessage MetadataMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.MetadataMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that\n  // if the segment key refers to a list, it has to be the last segment in a path.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.MetadataMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The filter name to retrieve the Struct from the Metadata.\n  string filter = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The MetadataMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/node.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/matcher/v3/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"NodeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Node matcher]\n\n// Specifies the way to match a Node.\n// The match follows AND semantics.\nmessage NodeMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.NodeMatcher\";\n\n  // Specifies match criteria on the node id.\n  StringMatcher node_id = 1;\n\n  // Specifies match criteria on the node metadata.\n  repeated StructMatcher node_metadatas = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/number.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"NumberProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Number matcher]\n\n// Specifies the way to match a double value.\nmessage DoubleMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.DoubleMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, the input double value must be in the range specified here.\n    // Note: The range is using half-open interval semantics [start, end).\n    type.v3.DoubleRange range = 1;\n\n    // If specified, the input double value must be equal to the value specified here.\n    double exact = 2;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/path.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"PathProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Path matcher]\n\n// Specifies the way to match a path on HTTP request.\nmessage PathMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.PathMatcher\";\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // The `path` must match the URL path portion of the :path header. The query and fragment\n    // string (if present) are removed in the URL path portion.\n    // For example, the path */data* will match the *:path* header */data#fragment?param=value*.\n    StringMatcher path = 1 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/regex.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"RegexProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Regex matcher]\n\n// A regex matcher designed for safety when used with untrusted input.\nmessage RegexMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.RegexMatcher\";\n\n  // Google's `RE2 <https://github.com/google/re2>`_ regex engine. The regex string must adhere to\n  // the documented `syntax <https://github.com/google/re2/wiki/Syntax>`_. The engine is designed\n  // to complete execution in linear time as well as limit the amount of memory used.\n  //\n  // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level`\n  // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or\n  // complexity that a compiled regex can have before an exception is thrown or a warning is\n  // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and\n  // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning).\n  //\n  // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`,\n  // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented\n  // each time the program size exceeds the warn level threshold.\n  message GoogleRE2 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.RegexMatcher.GoogleRE2\";\n\n    // This field controls the RE2 \"program size\" which is a rough estimate of how complex a\n    // compiled regex is to evaluate. A regex that has a program size greater than the configured\n    // value will fail to compile. In this case, the configured max program size can be increased\n    // or the regex can be simplified. If not specified, the default is 100.\n    //\n    // This field is deprecated; regexp validation should be performed on the management server\n    // instead of being done by each individual client.\n    google.protobuf.UInt32Value max_program_size = 1 [deprecated = true];\n  }\n\n  oneof engine_type {\n    option (validate.required) = true;\n\n    // Google's RE2 regex engine.\n    GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // The regex match string. The string must be supported by the configured engine.\n  string regex = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Describes how to match a string and then produce a new string using a regular\n// expression and a substitution string.\nmessage RegexMatchAndSubstitute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.RegexMatchAndSubstitute\";\n\n  // The regular expression used to find portions of a string (hereafter called\n  // the \"subject string\") that should be replaced. When a new string is\n  // produced during the substitution operation, the new string is initially\n  // the same as the subject string, but then all matches in the subject string\n  // are replaced by the substitution string. If replacing all matches isn't\n  // desired, regular expression anchors can be used to ensure a single match,\n  // so as to replace just one occurrence of a pattern. Capture groups can be\n  // used in the pattern to extract portions of the subject string, and then\n  // referenced in the substitution string.\n  RegexMatcher pattern = 1 [(validate.rules).message = {required: true}];\n\n  // The string that should be substituted into matching portions of the\n  // subject string during a substitution operation to produce a new string.\n  // Capture groups in the pattern can be referenced in the substitution\n  // string. Note, however, that the syntax for referring to capture groups is\n  // defined by the chosen regular expression engine. Google's `RE2\n  // <https://github.com/google/re2>`_ regular expression engine uses a\n  // backslash followed by the capture group number to denote a numbered\n  // capture group. E.g., ``\\1`` refers to capture group 1, and ``\\2`` refers\n  // to capture group 2.\n  string substitution = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/regex.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"StringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: String matcher]\n\n// Specifies the way to match a string.\n// [#next-free-field: 8]\nmessage StringMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.StringMatcher\";\n\n  reserved 4;\n\n  reserved \"regex\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // The input string must match exactly the string specified here.\n    //\n    // Examples:\n    //\n    // * *abc* only matches the value *abc*.\n    string exact = 1;\n\n    // The input string must have the prefix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *abc.xyz*\n    string prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must have the suffix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc*\n    string suffix = 3 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must match the regular expression specified here.\n    RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];\n\n    // The input string must have the substring specified here.\n    // Note: empty contains match is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc.def*\n    string contains = 7 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no\n  // effect for the safe_regex match.\n  // For example, the matcher *data* will match both input string *Data* and *data* if set to true.\n  bool ignore_case = 6;\n}\n\n// Specifies a list of ways to match a string.\nmessage ListStringMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.ListStringMatcher\";\n\n  repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/struct.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"StructProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Struct matcher]\n\n// StructMatcher provides a general interface to check if a given value is matched in\n// google.protobuf.Struct. It uses `path` to retrieve the value\n// from the struct and then check if it's matched to the specified value.\n//\n// For example, for the following Struct:\n//\n// .. code-block:: yaml\n//\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following StructMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of StructMatcher is to match metadata in envoy.v*.core.Node.\nmessage StructMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.StructMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Struct.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.StructMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The StructMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v3/value.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/number.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"ValueProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Value matcher]\n\n// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported.\n// StructValue is not supported and is always not matched.\n// [#next-free-field: 7]\nmessage ValueMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.ValueMatcher\";\n\n  // NullMatch is an empty message to specify a null value.\n  message NullMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.ValueMatcher.NullMatch\";\n  }\n\n  // Specifies how to match a value.\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, a match occurs if and only if the target value is a NullValue.\n    NullMatch null_match = 1;\n\n    // If specified, a match occurs if and only if the target value is a double value and is\n    // matched to this field.\n    DoubleMatcher double_match = 2;\n\n    // If specified, a match occurs if and only if the target value is a string value and is\n    // matched to this field.\n    StringMatcher string_match = 3;\n\n    // If specified, a match occurs if and only if the target value is a bool value and is equal\n    // to this field.\n    bool bool_match = 4;\n\n    // If specified, value match will be performed based on whether the path is referring to a\n    // valid primitive value in the metadata. If the path is referring to a non-primitive value,\n    // the result is always not matched.\n    bool present_match = 5;\n\n    // If specified, a match occurs if and only if the target value is a list value and\n    // is matched to this field.\n    ListMatcher list_match = 6;\n  }\n}\n\n// Specifies the way to match a list value.\nmessage ListMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.ListMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, at least one of the values in the list must match the value specified.\n    ValueMatcher one_of = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metadata matcher]\n\n// MetadataMatcher provides a general interface to check if a given value is matched in\n// :ref:`Metadata <envoy_api_msg_config.core.v4alpha.Metadata>`. It uses `filter` and `path` to retrieve the value\n// from the Metadata and then check if it's matched to the specified value.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.filters.http.rbac:\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following MetadataMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to\n// enforce access control based on dynamic metadata in a request. See :ref:`Permission\n// <envoy_api_msg_config.rbac.v4alpha.Permission>` and :ref:`Principal\n// <envoy_api_msg_config.rbac.v4alpha.Principal>`.\n\n// [#next-major-version: MetadataMatcher should use StructMatcher]\nmessage MetadataMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.MetadataMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that\n  // if the segment key refers to a list, it has to be the last segment in a path.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.MetadataMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The filter name to retrieve the Struct from the Metadata.\n  string filter = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The MetadataMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/node.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/matcher/v4alpha/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"NodeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Node matcher]\n\n// Specifies the way to match a Node.\n// The match follows AND semantics.\nmessage NodeMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.NodeMatcher\";\n\n  // Specifies match criteria on the node id.\n  StringMatcher node_id = 1;\n\n  // Specifies match criteria on the node metadata.\n  repeated StructMatcher node_metadatas = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/number.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"NumberProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Number matcher]\n\n// Specifies the way to match a double value.\nmessage DoubleMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.DoubleMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, the input double value must be in the range specified here.\n    // Note: The range is using half-open interval semantics [start, end).\n    v3.DoubleRange range = 1;\n\n    // If specified, the input double value must be equal to the value specified here.\n    double exact = 2;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/path.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"PathProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Path matcher]\n\n// Specifies the way to match a path on HTTP request.\nmessage PathMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.PathMatcher\";\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // The `path` must match the URL path portion of the :path header. The query and fragment\n    // string (if present) are removed in the URL path portion.\n    // For example, the path */data* will match the *:path* header */data#fragment?param=value*.\n    StringMatcher path = 1 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/regex.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"RegexProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Regex matcher]\n\n// A regex matcher designed for safety when used with untrusted input.\nmessage RegexMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.RegexMatcher\";\n\n  // Google's `RE2 <https://github.com/google/re2>`_ regex engine. The regex string must adhere to\n  // the documented `syntax <https://github.com/google/re2/wiki/Syntax>`_. The engine is designed\n  // to complete execution in linear time as well as limit the amount of memory used.\n  //\n  // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level`\n  // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or\n  // complexity that a compiled regex can have before an exception is thrown or a warning is\n  // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and\n  // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning).\n  //\n  // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`,\n  // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented\n  // each time the program size exceeds the warn level threshold.\n  message GoogleRE2 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.RegexMatcher.GoogleRE2\";\n\n    reserved 1;\n\n    reserved \"max_program_size\";\n  }\n\n  oneof engine_type {\n    option (validate.required) = true;\n\n    // Google's RE2 regex engine.\n    GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // The regex match string. The string must be supported by the configured engine.\n  string regex = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Describes how to match a string and then produce a new string using a regular\n// expression and a substitution string.\nmessage RegexMatchAndSubstitute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.RegexMatchAndSubstitute\";\n\n  // The regular expression used to find portions of a string (hereafter called\n  // the \"subject string\") that should be replaced. When a new string is\n  // produced during the substitution operation, the new string is initially\n  // the same as the subject string, but then all matches in the subject string\n  // are replaced by the substitution string. If replacing all matches isn't\n  // desired, regular expression anchors can be used to ensure a single match,\n  // so as to replace just one occurrence of a pattern. Capture groups can be\n  // used in the pattern to extract portions of the subject string, and then\n  // referenced in the substitution string.\n  RegexMatcher pattern = 1 [(validate.rules).message = {required: true}];\n\n  // The string that should be substituted into matching portions of the\n  // subject string during a substitution operation to produce a new string.\n  // Capture groups in the pattern can be referenced in the substitution\n  // string. Note, however, that the syntax for referring to capture groups is\n  // defined by the chosen regular expression engine. Google's `RE2\n  // <https://github.com/google/re2>`_ regular expression engine uses a\n  // backslash followed by the capture group number to denote a numbered\n  // capture group. E.g., ``\\1`` refers to capture group 1, and ``\\2`` refers\n  // to capture group 2.\n  string substitution = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/regex.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"StringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: String matcher]\n\n// Specifies the way to match a string.\n// [#next-free-field: 8]\nmessage StringMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.StringMatcher\";\n\n  reserved 4;\n\n  reserved \"regex\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // The input string must match exactly the string specified here.\n    //\n    // Examples:\n    //\n    // * *abc* only matches the value *abc*.\n    string exact = 1;\n\n    // The input string must have the prefix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *abc.xyz*\n    string prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must have the suffix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc*\n    string suffix = 3 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must match the regular expression specified here.\n    RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];\n\n    // The input string must have the substring specified here.\n    // Note: empty contains match is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc.def*\n    string contains = 7 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no\n  // effect for the safe_regex match.\n  // For example, the matcher *data* will match both input string *Data* and *data* if set to true.\n  bool ignore_case = 6;\n}\n\n// Specifies a list of ways to match a string.\nmessage ListStringMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.ListStringMatcher\";\n\n  repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/struct.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"StructProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Struct matcher]\n\n// StructMatcher provides a general interface to check if a given value is matched in\n// google.protobuf.Struct. It uses `path` to retrieve the value\n// from the struct and then check if it's matched to the specified value.\n//\n// For example, for the following Struct:\n//\n// .. code-block:: yaml\n//\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following StructMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of StructMatcher is to match metadata in envoy.v*.core.Node.\nmessage StructMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.StructMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Struct.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.StructMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The StructMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/v4alpha/value.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/number.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"ValueProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Value matcher]\n\n// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported.\n// StructValue is not supported and is always not matched.\n// [#next-free-field: 7]\nmessage ValueMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.ValueMatcher\";\n\n  // NullMatch is an empty message to specify a null value.\n  message NullMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.ValueMatcher.NullMatch\";\n  }\n\n  // Specifies how to match a value.\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, a match occurs if and only if the target value is a NullValue.\n    NullMatch null_match = 1;\n\n    // If specified, a match occurs if and only if the target value is a double value and is\n    // matched to this field.\n    DoubleMatcher double_match = 2;\n\n    // If specified, a match occurs if and only if the target value is a string value and is\n    // matched to this field.\n    StringMatcher string_match = 3;\n\n    // If specified, a match occurs if and only if the target value is a bool value and is equal\n    // to this field.\n    bool bool_match = 4;\n\n    // If specified, value match will be performed based on whether the path is referring to a\n    // valid primitive value in the metadata. If the path is referring to a non-primitive value,\n    // the result is always not matched.\n    bool present_match = 5;\n\n    // If specified, a match occurs if and only if the target value is a list value and\n    // is matched to this field.\n    ListMatcher list_match = 6;\n  }\n}\n\n// Specifies the way to match a list value.\nmessage ListMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.ListMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, at least one of the values in the list must match the value specified.\n    ValueMatcher one_of = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/matcher/value.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/number.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"ValueProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Value matcher]\n\n// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported.\n// StructValue is not supported and is always not matched.\n// [#next-free-field: 7]\nmessage ValueMatcher {\n  // NullMatch is an empty message to specify a null value.\n  message NullMatch {\n  }\n\n  // Specifies how to match a value.\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, a match occurs if and only if the target value is a NullValue.\n    NullMatch null_match = 1;\n\n    // If specified, a match occurs if and only if the target value is a double value and is\n    // matched to this field.\n    DoubleMatcher double_match = 2;\n\n    // If specified, a match occurs if and only if the target value is a string value and is\n    // matched to this field.\n    StringMatcher string_match = 3;\n\n    // If specified, a match occurs if and only if the target value is a bool value and is equal\n    // to this field.\n    bool bool_match = 4;\n\n    // If specified, value match will be performed based on whether the path is referring to a\n    // valid primitive value in the metadata. If the path is referring to a non-primitive value,\n    // the result is always not matched.\n    bool present_match = 5;\n\n    // If specified, a match occurs if and only if the target value is a list value and\n    // is matched to this field.\n    ListMatcher list_match = 6;\n  }\n}\n\n// Specifies the way to match a list value.\nmessage ListMatcher {\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, at least one of the values in the list must match the value specified.\n    ValueMatcher one_of = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/metadata/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "api/envoy/type/metadata/v2/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.metadata.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.metadata.v2\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.type.metadata.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metadata]\n\n// MetadataKey provides a general interface using `key` and `path` to retrieve value from\n// :ref:`Metadata <envoy_api_msg_core.Metadata>`.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.xxx:\n//        prop:\n//          foo: bar\n//          xyz:\n//            hello: envoy\n//\n// The following MetadataKey will retrieve a string value \"bar\" from the Metadata.\n//\n// .. code-block:: yaml\n//\n//    key: envoy.xxx\n//    path:\n//    - key: prop\n//    - key: foo\n//\nmessage MetadataKey {\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Currently it is only supported to specify the key, i.e. field name, as one segment of a path.\n  message PathSegment {\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n  }\n\n  // The key name of Metadata to retrieve the Struct from the metadata.\n  // Typically, it represents a builtin subsystem or custom extension.\n  string key = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The path to retrieve the Value from the Struct. It can be a prefix or a full path,\n  // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example,\n  // which depends on the particular scenario.\n  //\n  // Note: Due to that only the key type segment is supported, the path can not specify a list\n  // unless the list is the last segment.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Describes what kind of metadata.\nmessage MetadataKind {\n  // Represents dynamic metadata associated with the request.\n  message Request {\n  }\n\n  // Represents metadata from :ref:`the route<envoy_api_field_route.Route.metadata>`.\n  message Route {\n  }\n\n  // Represents metadata from :ref:`the upstream cluster<envoy_api_field_Cluster.metadata>`.\n  message Cluster {\n  }\n\n  // Represents metadata from :ref:`the upstream\n  // host<envoy_api_field_endpoint.LbEndpoint.metadata>`.\n  message Host {\n  }\n\n  oneof kind {\n    option (validate.required) = true;\n\n    // Request kind of metadata.\n    Request request = 1;\n\n    // Route kind of metadata.\n    Route route = 2;\n\n    // Cluster kind of metadata.\n    Cluster cluster = 3;\n\n    // Host kind of metadata.\n    Host host = 4;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/metadata/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/metadata/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/type/metadata/v3/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.metadata.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.metadata.v3\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metadata]\n\n// MetadataKey provides a general interface using `key` and `path` to retrieve value from\n// :ref:`Metadata <envoy_api_msg_config.core.v3.Metadata>`.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.xxx:\n//        prop:\n//          foo: bar\n//          xyz:\n//            hello: envoy\n//\n// The following MetadataKey will retrieve a string value \"bar\" from the Metadata.\n//\n// .. code-block:: yaml\n//\n//    key: envoy.xxx\n//    path:\n//    - key: prop\n//    - key: foo\n//\nmessage MetadataKey {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.metadata.v2.MetadataKey\";\n\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Currently it is only supported to specify the key, i.e. field name, as one segment of a path.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKey.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The key name of Metadata to retrieve the Struct from the metadata.\n  // Typically, it represents a builtin subsystem or custom extension.\n  string key = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct. It can be a prefix or a full path,\n  // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example,\n  // which depends on the particular scenario.\n  //\n  // Note: Due to that only the key type segment is supported, the path can not specify a list\n  // unless the list is the last segment.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Describes what kind of metadata.\nmessage MetadataKind {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.metadata.v2.MetadataKind\";\n\n  // Represents dynamic metadata associated with the request.\n  message Request {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Request\";\n  }\n\n  // Represents metadata from :ref:`the route<envoy_api_field_config.route.v3.Route.metadata>`.\n  message Route {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Route\";\n  }\n\n  // Represents metadata from :ref:`the upstream cluster<envoy_api_field_config.cluster.v3.Cluster.metadata>`.\n  message Cluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Cluster\";\n  }\n\n  // Represents metadata from :ref:`the upstream\n  // host<envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`.\n  message Host {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Host\";\n  }\n\n  oneof kind {\n    option (validate.required) = true;\n\n    // Request kind of metadata.\n    Request request = 1;\n\n    // Route kind of metadata.\n    Route route = 2;\n\n    // Cluster kind of metadata.\n    Cluster cluster = 3;\n\n    // Host kind of metadata.\n    Host host = 4;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/percent.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"PercentProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Percent]\n\n// Identifies a percentage, in the range [0.0, 100.0].\nmessage Percent {\n  double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}];\n}\n\n// A fractional percentage is used in cases in which for performance reasons performing floating\n// point to integer conversions during randomness calculations is undesirable. The message includes\n// both a numerator and denominator that together determine the final fractional value.\n//\n// * **Example**: 1/100 = 1%.\n// * **Example**: 3/10000 = 0.03%.\nmessage FractionalPercent {\n  // Fraction percentages support several fixed denominator values.\n  enum DenominatorType {\n    // 100.\n    //\n    // **Example**: 1/100 = 1%.\n    HUNDRED = 0;\n\n    // 10,000.\n    //\n    // **Example**: 1/10000 = 0.01%.\n    TEN_THOUSAND = 1;\n\n    // 1,000,000.\n    //\n    // **Example**: 1/1000000 = 0.0001%.\n    MILLION = 2;\n  }\n\n  // Specifies the numerator. Defaults to 0.\n  uint32 numerator = 1;\n\n  // Specifies the denominator. If the denominator specified is less than the numerator, the final\n  // fractional percentage is capped at 1 (100%).\n  DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/range.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"RangeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Range]\n\n// Specifies the int64 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int64Range {\n  // start of the range (inclusive)\n  int64 start = 1;\n\n  // end of the range (exclusive)\n  int64 end = 2;\n}\n\n// Specifies the int32 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int32Range {\n  // start of the range (inclusive)\n  int32 start = 1;\n\n  // end of the range (exclusive)\n  int32 end = 2;\n}\n\n// Specifies the double start and end of the range using half-open interval semantics [start,\n// end).\nmessage DoubleRange {\n  // start of the range (inclusive)\n  double start = 1;\n\n  // end of the range (exclusive)\n  double end = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/semantic_version.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"SemanticVersionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Semantic Version]\n\n// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate\n// expected behaviors and APIs, the patch version field is used only\n// for security fixes and can be generally ignored.\nmessage SemanticVersion {\n  uint32 major_number = 1;\n\n  uint32 minor_number = 2;\n\n  uint32 patch = 3;\n}\n"
  },
  {
    "path": "api/envoy/type/token_bucket.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"TokenBucketProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Token bucket]\n\n// Configures a token bucket, typically used for rate limiting.\nmessage TokenBucket {\n  // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket\n  // initially contains.\n  uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // The number of tokens added to the bucket during each fill interval. If not specified, defaults\n  // to a single token.\n  google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}];\n\n  // The fill interval that tokens are added to the bucket. During each fill interval\n  // `tokens_per_fill` are added to the bucket. The bucket will never contain more than\n  // `max_tokens` tokens.\n  google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n}\n"
  },
  {
    "path": "api/envoy/type/tracing/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/metadata/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/type/tracing/v2/custom_tag.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.tracing.v2;\n\nimport \"envoy/type/metadata/v2/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.tracing.v2\";\noption java_outer_classname = \"CustomTagProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Custom Tag]\n\n// Describes custom tags for the active span.\n// [#next-free-field: 6]\nmessage CustomTag {\n  // Literal type custom tag with static value for the tag value.\n  message Literal {\n    // Static literal value to populate the tag value.\n    string value = 1 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Environment type custom tag with environment name and default value.\n  message Environment {\n    // Environment variable name to obtain the value to populate the tag value.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // When the environment variable is not found,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Header type custom tag with header name and default value.\n  message Header {\n    // Header name to obtain the value to populate the tag value.\n    string name = 1\n        [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // When the header does not exist,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Metadata type custom tag using\n  // :ref:`MetadataKey <envoy_api_msg_type.metadata.v2.MetadataKey>` to retrieve the protobuf value\n  // from :ref:`Metadata <envoy_api_msg_core.Metadata>`, and populate the tag value with\n  // `the canonical JSON <https://developers.google.com/protocol-buffers/docs/proto3#json>`_\n  // representation of it.\n  message Metadata {\n    // Specify what kind of metadata to obtain tag value from.\n    metadata.v2.MetadataKind kind = 1;\n\n    // Metadata key to define the path to retrieve the tag value.\n    metadata.v2.MetadataKey metadata_key = 2;\n\n    // When no valid metadata is found,\n    // the tag value would be populated with this default value if specified,\n    // otherwise no tag would be populated.\n    string default_value = 3;\n  }\n\n  // Used to populate the tag name.\n  string tag = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Used to specify what kind of custom tag.\n  oneof type {\n    option (validate.required) = true;\n\n    // A literal custom tag.\n    Literal literal = 2;\n\n    // An environment custom tag.\n    Environment environment = 3;\n\n    // A request header custom tag.\n    Header request_header = 4;\n\n    // A custom tag to obtain tag value from the metadata.\n    Metadata metadata = 5;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/tracing/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/type/tracing/v3/custom_tag.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.tracing.v3;\n\nimport \"envoy/type/metadata/v3/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.tracing.v3\";\noption java_outer_classname = \"CustomTagProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Custom Tag]\n\n// Describes custom tags for the active span.\n// [#next-free-field: 6]\nmessage CustomTag {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.tracing.v2.CustomTag\";\n\n  // Literal type custom tag with static value for the tag value.\n  message Literal {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Literal\";\n\n    // Static literal value to populate the tag value.\n    string value = 1 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Environment type custom tag with environment name and default value.\n  message Environment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Environment\";\n\n    // Environment variable name to obtain the value to populate the tag value.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // When the environment variable is not found,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Header type custom tag with header name and default value.\n  message Header {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Header\";\n\n    // Header name to obtain the value to populate the tag value.\n    string name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // When the header does not exist,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Metadata type custom tag using\n  // :ref:`MetadataKey <envoy_api_msg_type.metadata.v3.MetadataKey>` to retrieve the protobuf value\n  // from :ref:`Metadata <envoy_api_msg_config.core.v3.Metadata>`, and populate the tag value with\n  // `the canonical JSON <https://developers.google.com/protocol-buffers/docs/proto3#json>`_\n  // representation of it.\n  message Metadata {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Metadata\";\n\n    // Specify what kind of metadata to obtain tag value from.\n    metadata.v3.MetadataKind kind = 1;\n\n    // Metadata key to define the path to retrieve the tag value.\n    metadata.v3.MetadataKey metadata_key = 2;\n\n    // When no valid metadata is found,\n    // the tag value would be populated with this default value if specified,\n    // otherwise no tag would be populated.\n    string default_value = 3;\n  }\n\n  // Used to populate the tag name.\n  string tag = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Used to specify what kind of custom tag.\n  oneof type {\n    option (validate.required) = true;\n\n    // A literal custom tag.\n    Literal literal = 2;\n\n    // An environment custom tag.\n    Environment environment = 3;\n\n    // A request header custom tag.\n    Header request_header = 4;\n\n    // A custom tag to obtain tag value from the metadata.\n    Metadata metadata = 5;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/envoy/type/v3/hash_policy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"HashPolicyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Hash Policy]\n\n// Specifies the hash policy\nmessage HashPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.HashPolicy\";\n\n  // The source IP will be used to compute the hash used by hash-based load balancing\n  // algorithms.\n  message SourceIp {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.type.HashPolicy.SourceIp\";\n  }\n\n  oneof policy_specifier {\n    option (validate.required) = true;\n\n    SourceIp source_ip = 1;\n  }\n}\n"
  },
  {
    "path": "api/envoy/type/v3/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP]\n\nenum CodecClientType {\n  HTTP1 = 0;\n\n  HTTP2 = 1;\n\n  // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n  // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n  // to distinguish HTTP1 and HTTP2 traffic.\n  HTTP3 = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/v3/http_status.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"HttpStatusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP status codes]\n\n// HTTP response codes supported in Envoy.\n// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml\nenum StatusCode {\n  // Empty - This code not part of the HTTP status code specification, but it is needed for proto\n  // `enum` type.\n  Empty = 0;\n\n  Continue = 100;\n\n  OK = 200;\n\n  Created = 201;\n\n  Accepted = 202;\n\n  NonAuthoritativeInformation = 203;\n\n  NoContent = 204;\n\n  ResetContent = 205;\n\n  PartialContent = 206;\n\n  MultiStatus = 207;\n\n  AlreadyReported = 208;\n\n  IMUsed = 226;\n\n  MultipleChoices = 300;\n\n  MovedPermanently = 301;\n\n  Found = 302;\n\n  SeeOther = 303;\n\n  NotModified = 304;\n\n  UseProxy = 305;\n\n  TemporaryRedirect = 307;\n\n  PermanentRedirect = 308;\n\n  BadRequest = 400;\n\n  Unauthorized = 401;\n\n  PaymentRequired = 402;\n\n  Forbidden = 403;\n\n  NotFound = 404;\n\n  MethodNotAllowed = 405;\n\n  NotAcceptable = 406;\n\n  ProxyAuthenticationRequired = 407;\n\n  RequestTimeout = 408;\n\n  Conflict = 409;\n\n  Gone = 410;\n\n  LengthRequired = 411;\n\n  PreconditionFailed = 412;\n\n  PayloadTooLarge = 413;\n\n  URITooLong = 414;\n\n  UnsupportedMediaType = 415;\n\n  RangeNotSatisfiable = 416;\n\n  ExpectationFailed = 417;\n\n  MisdirectedRequest = 421;\n\n  UnprocessableEntity = 422;\n\n  Locked = 423;\n\n  FailedDependency = 424;\n\n  UpgradeRequired = 426;\n\n  PreconditionRequired = 428;\n\n  TooManyRequests = 429;\n\n  RequestHeaderFieldsTooLarge = 431;\n\n  InternalServerError = 500;\n\n  NotImplemented = 501;\n\n  BadGateway = 502;\n\n  ServiceUnavailable = 503;\n\n  GatewayTimeout = 504;\n\n  HTTPVersionNotSupported = 505;\n\n  VariantAlsoNegotiates = 506;\n\n  InsufficientStorage = 507;\n\n  LoopDetected = 508;\n\n  NotExtended = 510;\n\n  NetworkAuthenticationRequired = 511;\n}\n\n// HTTP status.\nmessage HttpStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.HttpStatus\";\n\n  // Supplies HTTP response code.\n  StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}];\n}\n"
  },
  {
    "path": "api/envoy/type/v3/percent.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"PercentProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Percent]\n\n// Identifies a percentage, in the range [0.0, 100.0].\nmessage Percent {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.Percent\";\n\n  double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}];\n}\n\n// A fractional percentage is used in cases in which for performance reasons performing floating\n// point to integer conversions during randomness calculations is undesirable. The message includes\n// both a numerator and denominator that together determine the final fractional value.\n//\n// * **Example**: 1/100 = 1%.\n// * **Example**: 3/10000 = 0.03%.\nmessage FractionalPercent {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.FractionalPercent\";\n\n  // Fraction percentages support several fixed denominator values.\n  enum DenominatorType {\n    // 100.\n    //\n    // **Example**: 1/100 = 1%.\n    HUNDRED = 0;\n\n    // 10,000.\n    //\n    // **Example**: 1/10000 = 0.01%.\n    TEN_THOUSAND = 1;\n\n    // 1,000,000.\n    //\n    // **Example**: 1/1000000 = 0.0001%.\n    MILLION = 2;\n  }\n\n  // Specifies the numerator. Defaults to 0.\n  uint32 numerator = 1;\n\n  // Specifies the denominator. If the denominator specified is less than the numerator, the final\n  // fractional percentage is capped at 1 (100%).\n  DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "api/envoy/type/v3/range.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"RangeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Range]\n\n// Specifies the int64 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int64Range {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.Int64Range\";\n\n  // start of the range (inclusive)\n  int64 start = 1;\n\n  // end of the range (exclusive)\n  int64 end = 2;\n}\n\n// Specifies the int32 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int32Range {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.Int32Range\";\n\n  // start of the range (inclusive)\n  int32 start = 1;\n\n  // end of the range (exclusive)\n  int32 end = 2;\n}\n\n// Specifies the double start and end of the range using half-open interval semantics [start,\n// end).\nmessage DoubleRange {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.DoubleRange\";\n\n  // start of the range (inclusive)\n  double start = 1;\n\n  // end of the range (exclusive)\n  double end = 2;\n}\n"
  },
  {
    "path": "api/envoy/type/v3/ratelimit_unit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"RatelimitUnitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Ratelimit Time Unit]\n\n// Identifies the unit of of time for rate limit.\nenum RateLimitUnit {\n  // The time unit is not known.\n  UNKNOWN = 0;\n\n  // The time unit representing a second.\n  SECOND = 1;\n\n  // The time unit representing a minute.\n  MINUTE = 2;\n\n  // The time unit representing an hour.\n  HOUR = 3;\n\n  // The time unit representing a day.\n  DAY = 4;\n}\n"
  },
  {
    "path": "api/envoy/type/v3/semantic_version.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"SemanticVersionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Semantic Version]\n\n// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate\n// expected behaviors and APIs, the patch version field is used only\n// for security fixes and can be generally ignored.\nmessage SemanticVersion {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.SemanticVersion\";\n\n  uint32 major_number = 1;\n\n  uint32 minor_number = 2;\n\n  uint32 patch = 3;\n}\n"
  },
  {
    "path": "api/envoy/type/v3/token_bucket.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"TokenBucketProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Token bucket]\n\n// Configures a token bucket, typically used for rate limiting.\nmessage TokenBucket {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.TokenBucket\";\n\n  // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket\n  // initially contains.\n  uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // The number of tokens added to the bucket during each fill interval. If not specified, defaults\n  // to a single token.\n  google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}];\n\n  // The fill interval that tokens are added to the bucket. During each fill interval\n  // `tokens_per_fill` are added to the bucket. The bucket will never contain more than\n  // `max_tokens` tokens.\n  google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n}\n"
  },
  {
    "path": "api/examples/service_envoy/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nexports_files([\n    \"http_connection_manager.pb\",\n    \"listeners.pb\",\n])\n"
  },
  {
    "path": "api/examples/service_envoy/http_connection_manager.pb",
    "content": "codec_type: AUTO\n\nstat_prefix: \"ingress_http\"\n\nroute_config {\n  virtual_hosts {\n    name: \"service\"\n    domains: \"*\"\n    routes {\n      match {\n        prefix: \"/service\"\n      }\n      route {\n        cluster: \"local_service\"\n        timeout {\n          seconds: 0\n        }\n      }\n    }\n  }\n}\n\nhttp_filters {\n  name: \"router\"\n}\n"
  },
  {
    "path": "api/examples/service_envoy/listeners.pb",
    "content": "address {\n  socket_address {\n    protocol: TCP\n    port_value: 80\n  }\n}\nfilter_chains {\n  filters {\n    name: \"http_connection_manager\"\n  }\n}\n"
  },
  {
    "path": "api/test/build/BUILD",
    "content": "load(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_test\", \"api_go_test\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_cc_test(\n    name = \"build_test\",\n    srcs = [\"build_test.cc\"],\n    deps = [\n        \"//envoy/api/v2:pkg_cc_proto\",\n        \"//envoy/service/accesslog/v2:pkg_cc_proto\",\n        \"//envoy/service/discovery/v2:pkg_cc_proto\",\n        \"//envoy/service/metrics/v2:pkg_cc_proto\",\n        \"//envoy/service/ratelimit/v2:pkg_cc_proto\",\n        \"@com_github_cncf_udpa//udpa/service/orca/v1:pkg_cc_proto\",\n    ],\n)\n\napi_go_test(\n    name = \"go_build_test\",\n    size = \"small\",\n    srcs = [\"go_build_test.go\"],\n    importpath = \"go_build_test\",\n    deps = [\n        \"//envoy/api/v2:pkg_go_proto\",\n        \"//envoy/api/v2/auth:pkg_go_proto\",\n        \"//envoy/config/bootstrap/v2:pkg_go_proto\",\n        \"//envoy/service/accesslog/v2:pkg_go_proto\",\n        \"//envoy/service/discovery/v2:pkg_go_proto\",\n        \"//envoy/service/metrics/v2:pkg_go_proto\",\n        \"//envoy/service/ratelimit/v2:pkg_go_proto\",\n        \"//envoy/service/trace/v2:pkg_go_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/test/build/build_test.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <cstdlib>\n#include <iostream>\n\n#include \"google/protobuf/descriptor.h\"\n\n// Basic C++ build/link validation for the v2 xDS APIs.\nint main(int argc, char* argv[]) {\n  const auto methods = {\n      \"envoy.api.v2.ClusterDiscoveryService.FetchClusters\",\n      \"envoy.api.v2.ClusterDiscoveryService.StreamClusters\",\n      \"envoy.api.v2.EndpointDiscoveryService.FetchEndpoints\",\n      \"envoy.api.v2.EndpointDiscoveryService.StreamEndpoints\",\n      \"envoy.api.v2.ListenerDiscoveryService.FetchListeners\",\n      \"envoy.api.v2.ListenerDiscoveryService.StreamListeners\",\n      \"envoy.api.v2.RouteDiscoveryService.FetchRoutes\",\n      \"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n      \"envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources\",\n      \"envoy.service.discovery.v2.HealthDiscoveryService.FetchHealthCheck\",\n      \"envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck\",\n      \"envoy.service.discovery.v2.RuntimeDiscoveryService.FetchRuntime\",\n      \"envoy.service.discovery.v2.RuntimeDiscoveryService.StreamRuntime\",\n      \"envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs\",\n      \"envoy.service.metrics.v2.MetricsService.StreamMetrics\",\n      \"envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit\",\n      \"udpa.service.orca.v1.OpenRcaService.StreamCoreMetrics\",\n  };\n\n  for (const auto& method : methods) {\n    if (google::protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) == nullptr) {\n      std::cout << \"Unable to find method descriptor for \" << method << std::endl;\n      exit(EXIT_FAILURE);\n    }\n  }\n\n  exit(EXIT_SUCCESS);\n}\n"
  },
  {
    "path": "api/test/build/go_build_test.go",
    "content": "package go_build_test\n\nimport (\n\t\"testing\"\n\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/api/v2\"\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/api/v2/auth\"\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v2\"\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v2\"\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2\"\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/service/metrics/v2\"\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2\"\n\t_ \"github.com/envoyproxy/go-control-plane/envoy/service/trace/v2\"\n)\n\nfunc TestNoop(t *testing.T) {\n\t// Noop test that verifies the successful importation of Envoy V2 API protos\n}\n"
  },
  {
    "path": "api/test/validate/BUILD",
    "content": "load(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_test\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_cc_test(\n    name = \"pgv_test\",\n    srcs = [\"pgv_test.cc\"],\n    deps = [\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/core:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/listener:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/route:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/accesslog/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/buffer/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/fault/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/gzip/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/header_to_metadata/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/health_check/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/lua/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/router/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/squash/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/transcoder/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/network/mongo_proxy/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/network/redis_proxy/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/network/tcp_proxy/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/health_checker/redis/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "api/test/validate/pgv_test.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <cstdlib>\n#include <iostream>\n\n// We don't use all the headers in the test below, but including them anyway as\n// a cheap way to get some C++ compiler sanity checking.\n#include \"envoy/api/v2/cluster.pb.validate.h\"\n#include \"envoy/api/v2/endpoint.pb.validate.h\"\n#include \"envoy/api/v2/listener.pb.validate.h\"\n#include \"envoy/api/v2/route.pb.validate.h\"\n#include \"envoy/api/v2/core/protocol.pb.validate.h\"\n#include \"envoy/config/health_checker/redis/v2/redis.pb.validate.h\"\n#include \"envoy/config/filter/accesslog/v2/accesslog.pb.validate.h\"\n#include \"envoy/config/filter/http/buffer/v2/buffer.pb.validate.h\"\n#include \"envoy/config/filter/http/fault/v2/fault.pb.validate.h\"\n#include \"envoy/config/filter/http/gzip/v2/gzip.pb.validate.h\"\n#include \"envoy/config/filter/http/health_check/v2/health_check.pb.validate.h\"\n#include \"envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.pb.validate.h\"\n#include \"envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.validate.h\"\n#include \"envoy/config/filter/http/lua/v2/lua.pb.validate.h\"\n#include \"envoy/config/filter/http/router/v2/router.pb.validate.h\"\n#include \"envoy/config/filter/http/squash/v2/squash.pb.validate.h\"\n#include \"envoy/config/filter/http/transcoder/v2/transcoder.pb.validate.h\"\n#include \"envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.validate.h\"\n#include \"envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.pb.validate.h\"\n#include \"envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.validate.h\"\n#include \"envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.pb.validate.h\"\n#include \"envoy/api/v2/listener/listener.pb.validate.h\"\n#include \"envoy/api/v2/route/route.pb.validate.h\"\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.validate.h\"\n\n#include \"google/protobuf/text_format.h\"\n\ntemplate <class Proto> struct TestCase {\n  void run() {\n    std::string err;\n    if (Validate(invalid_message, &err)) {\n      std::cerr << \"Unexpected successful validation of invalid message: \"\n                << invalid_message.DebugString() << std::endl;\n      exit(EXIT_FAILURE);\n    }\n    if (!Validate(valid_message, &err)) {\n      std::cerr << \"Unexpected failed validation of valid message: \" << valid_message.DebugString()\n                << \", \" << err << std::endl;\n      exit(EXIT_FAILURE);\n    }\n  }\n\n  Proto& invalid_message;\n  Proto& valid_message;\n};\n\n// Basic protoc-gen-validate C++ validation header inclusion and Validate calls\n// from data plane API.\nint main(int argc, char* argv[]) {\n  envoy::config::bootstrap::v2::Bootstrap invalid_bootstrap;\n  invalid_bootstrap.mutable_static_resources()->add_clusters();\n  // This is a baseline test of the validation features we care about. It's\n  // probably not worth adding in every filter and field that we want to valid\n  // in the API upfront, but as regressions occur, this is the place to add the\n  // specific case.\n  const std::string valid_bootstrap_text = R\"EOF(\n  node {}\n  cluster_manager {}\n  admin {\n    access_log_path: \"/dev/null\"\n    address { pipe { path: \"/\" } }\n  }\n  )EOF\";\n  envoy::config::bootstrap::v2::Bootstrap valid_bootstrap;\n  if (!google::protobuf::TextFormat::ParseFromString(valid_bootstrap_text, &valid_bootstrap)) {\n    std::cerr << \"Unable to parse text proto: \" << valid_bootstrap_text << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  TestCase<envoy::config::bootstrap::v2::Bootstrap>{invalid_bootstrap, valid_bootstrap}.run();\n\n  exit(EXIT_SUCCESS);\n}\n"
  },
  {
    "path": "api/tools/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\", \"py_test\")\n\nlicenses([\"notice\"])  # Apache 2\n\npy_binary(\n    name = \"tap2pcap\",\n    srcs = [\"tap2pcap.py\"],\n    licenses = [\"notice\"],  # Apache 2\n    visibility = [\"//visibility:public\"],\n    deps = [\"//envoy/data/tap/v2alpha:pkg_py_proto\"],\n)\n\npy_test(\n    name = \"tap2pcap_test\",\n    srcs = [\"tap2pcap_test.py\"],\n    data = [\n        \"data/tap2pcap_h2_ipv4.pb_text\",\n        \"data/tap2pcap_h2_ipv4.txt\",\n    ],\n    # Don't run this by default, since we don't want to force local dependency on Wireshark/tshark,\n    # will explicitly invoke in CI.\n    tags = [\"manual\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":tap2pcap\"],\n)\n\npy_binary(\n    name = \"generate_listeners\",\n    srcs = [\"generate_listeners.py\"],\n    licenses = [\"notice\"],  # Apache 2\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//envoy/api/v2:pkg_py_proto\",\n        \"//envoy/config/filter/network/http_connection_manager/v2:pkg_py_proto\",\n    ],\n)\n\npy_test(\n    name = \"generate_listeners_test\",\n    srcs = [\"generate_listeners_test.py\"],\n    data = [\n        \"//examples/service_envoy:http_connection_manager.pb\",\n        \"//examples/service_envoy:listeners.pb\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\":generate_listeners\"],\n)\n"
  },
  {
    "path": "api/tools/data/tap2pcap_h2_ipv4.pb_text",
    "content": "socket_buffered_trace {\n  connection {\n    local_address {\n      socket_address {\n        address: \"127.0.0.1\"\n        port_value: 10000\n      }\n    }\n    remote_address {\n      socket_address {\n        address: \"127.0.0.1\"\n        port_value: 53288\n      }\n    }\n  }\n  events {\n    timestamp {\n      seconds: 1525207293\n      nanos: 216737962\n    }\n    read {\n      data: {\n        as_bytes: \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\\000\\000\\022\\004\\000\\000\\000\\000\\000\\000\\003\\000\\000\\000d\\000\\004@\\000\\000\\000\\000\\002\\000\\000\\000\\000\\000\\000\\004\\010\\000\\000\\000\\000\\000?\\377\\000\\001\\000\\000\\036\\001\\005\\000\\000\\000\\001\\202\\204\\206A\\212\\240\\344\\035\\023\\235\\t\\270\\020\\000\\000z\\210%\\266P\\303\\253\\266\\362\\340S\\003*/*\"\n      }\n    }\n  }\n  events {\n    timestamp {\n      seconds: 1525207293\n      nanos: 230450657\n    }\n    write {\n      data: {\n        as_bytes: \"\\000\\000\\006\\004\\000\\000\\000\\000\\000\\000\\004\\020\\000\\000\\000\\000\\000\\000\\004\\001\\000\\000\\000\\000\\000\\000\\004\\010\\000\\000\\000\\000\\000\\017\\377\\000\\001\"\n      }\n    }\n  }\n  events {\n    timestamp {\n      seconds: 1525207293\n      nanos: 230558250\n    }\n    read {\n      data: {\n        as_bytes: \"\\000\\000\\000\\004\\001\\000\\000\\000\\000\"\n      }\n    }\n  }\n  events {\n    timestamp {\n      seconds: 1525207293\n      nanos: 345386933\n    }\n    write {\n      data: {\n        as_bytes: \"\\000\\025\\223\\001\\004\\000\\000\\000\\001\\210@\\217\\362\\264\\307<\\324\\025d\\025\\0101\\352X\\325J\\177\\211\\3056\\316p\\232l\\371!\\301\\000\\216\\362\\264\\307<\\324\\025b\\371\\254\\266\\032\\222\\324\\237\\377\\277 \\023n6\\357\\320\\200\\027]o\\350@\\013\\300s\\350@\\013\\302\\177\\351\\326\\302\\333\\241\\372u\\2612\\363\\237Ae\\260\\205\\327>\\202\\313b\\003\\301\\372\\013-\\211\\226\\333\\372\\013-\\211\\226\\335\\372\\013-\\211\\267\\033\\372\\013-\\211\\366[\\372\\013-\\211\\367\\001\\364\\026[d\\017\\271\\364\\026[d/=\\364\\026[dL\\271\\364\\026[e\\221;\\364\\026[e\\267\\031\\372\\013-\\262\\353N\\375\\005\\226\\331x\\014\\375\\005\\226\\331x\\r}\\005\\226\\331x \\372\\013-\\262\\360^\\372\\013-\\262\\373\\200\\372\\013-\\262\\373\\354\\375\\005\\226\\332\\003\\356}\\005\\226\\332\\013\\216\\375\\005\\226\\332\\013\\316\\375\\005\\226\\332\\020\\232\\372\\013-\\264&\\203\\364\\026[hN\\273\\364\\026[i\\220;\\364\\026[i\\226\\231\\372\\013-\\264\\350\\003\\364\\026[i\\326\\335\\372\\013-\\264\\360[\\372\\013-\\264\\363m}\\005\\226\\332|-\\375\\005\\226\\332}\\347~\\202\\313m\\201\\227~\\202\\313m\\210\\\"\\372\\013-\\266\\'\\032\\372\\013-\\266\\'\\303\\364\\026[m\\246\\335\\372\\013-\\266\\330]\\372\\013-\\266\\333\\356\\375\\005\\226\\333m\\367\\376\\202\\313m\\270\\340\\276\\202\\313m\\272\\340~\\202\\313m\\272\\350~\\202\\313m\\272\\363?Ae\\266\\335y\\377\\240\\262\\333o8\\017\\240\\262\\333o\\211\\237\\240\\262\\333\\200\\020>\\202\\313n\\000L\\375\\005\\226\\334\\003N}\\005\\226\\334\\003\\257\\375\\005\\226\\334\\010^\\372\\013-\\270\\027\\201\\364\\026[p/\\013\\350,\\266\\340\\234o\\350,\\266\\343 o\\350,\\266\\343L\\271\\364\\026[q\\246\\201\\364\\026[q\\300\\207\\350,\\266\\343\\201o\\350,\\266\\343\\201w\\350,\\266\\343\\201\\177\\350,\\266\\343\\216\\003\\350,\\266\\343\\217\\273\\364\\026[q\\3215\\364\\026[q\\326\\235\\372\\013-\\270\\353\\257\\375\\005\\226\\334u\\367>\\202\\313n<\\333?Ae\\267\\036u\\377\\240\\262\\333\\217\\200?Ae\\267\\037i\\377\\240\\262\\333\\240e\\317\\240\\262\\333\\240x_Ae\\267B\\313\\337Ae\\267B\\320\\276\\202\\313n\\205\\307\\276\\202\\313n\\205\\347>\\202\\313n\\210\\017\\375\\005\\226\\335\\020_\\372\\013-\\272\\313m\\375\\005\\226\\335h\\017\\375\\005\\226\\335h!\\372\\013-\\272\\320\\203\\364\\026[u\\247\\237\\372\\013-\\272\\343\\301\\372\\013-\\272\\363-}\\005\\226\\335|.\\375\\005\\226\\335}\\227~\\202\\313n\\276\\343?Ae\\267_}\\237\\240\\262\\333\\300q\\257\\240\\262\\333\\300t?Ae\\267\\200\\353?Ae\\267\\200\\370~\\202\\313o\\004\\017}\\005\\226\\336\\010C\\364\\026[x!s\\350,\\266\\360\\204\\357\\320Ym\\3416\\037\\240\\262\\333\\302}\\237\\240\\262\\333\\314\\205\\377\\240\\262\\333\\314\\272\\377\\320Ym\\346\\200\\017\\240\\262\\333\\315\\005\\257\\240\\262\\333\\315\\010?Ae\\267\\232\\023\\177Ae\\267\\234\\020~\\202\\313o8\\313\\337Ae\\267\\234h_Ae\\267\\234}\\377\\240\\262\\333\\317>\\017\\240\\262\\333\\317\\205\\377\\240\\262\\333\\340\\013\\377Ae\\267\\300\\313\\337Ae\\267\\304\\360\\276\\204\\330\\002&\\335\\372h\\002e\\240\\277\\364\\320\\004\\373\\301o\\351\\2402\\020\\201\\377\\246\\200\\310B\\343_M\\001\\226\\231d?M\\001\\226\\236h\\037M\\001\\227\\237}\\237\\246\\200\\320\\002\\313\\177M\\001\\246B\\370>\\232\\003L\\264\\370\\276\\232\\003M\\274\\320~\\232\\003N\\270\\360\\276\\232\\003O2\\323\\277M\\001\\261:\\340~\\232\\003m\\262\\343\\177M\\001\\267\\032p\\037M\\001\\267\\202\\323\\337M\\001\\267\\336u\\317\\246\\200\\340\\237y\\277\\246\\200\\343\\254\\270\\327\\323@q\\346\\337\\027\\323@q\\360>\\317\\323@q\\360>\\337\\323@q\\367\\031k\\351\\2408\\373\\214\\271\\364\\320\\034}\\347\\034\\372h\\016\\204\\016\\203\\351\\240:\\026\\\\\\007\\323@t.\\274/\\246\\200\\353o\\005\\317\\246\\200\\353\\217\\274\\347\\323@u\\360\\t\\357\\246\\200\\353\\342\\003\\237M\\001\\327\\334m\\317\\246\\200\\353\\3568\\037\\246\\200\\353\\3568\\377\\323@x\\014\\211\\257\\246\\200\\360\\033\\013\\337M\\001\\3406&\\276\\232\\003\\301\\003/}4\\007\\204 \\277\\364\\320\\036d-;\\364\\320\\036e\\366\\301\\364\\320\\036i\\240\\263\\364\\320\\036i\\2417\\364\\320\\036i\\320\\273\\364\\320\\036m\\327\\201\\364\\320\\036q\\367]\\372h\\017>\\026\\305\\364\\320\\037\\000\\202/\\246\\200\\370\\031\\010>\\232\\003\\355\\276\\020}4\\007\\334\\023\\315\\3754\\007\\334\\023\\316}4\\007\\334y\\306~\\232\\003\\356>\\323\\337M\\001\\367B\\333?M\\001\\367Zq\\377\\246\\200\\373\\257\\210_M\\001\\367_\\023\\377M\\001\\367\\202\\320>\\232\\003\\3574\\340>\\232\\003\\3578\\373\\377M\\001\\367\\235\\020~\\232\\003\\357:\\'\\276\\232\\010\\000!9\\364\\320@\\021\\004?M\\004\\014\\205\\366~\\232\\010\\031i\\340\\3754\\0204\\363b\\372h m\\226\\\\\\372h m\\247\\303\\364\\320@\\343-;\\364\\320@\\343\\2179\\364\\320@\\343\\317\\203\\351\\240\\201\\326\\302\\337\\323A\\003\\255\\262\\017\\246\\202\\007_\\013_M\\004\\017<\\006\\376\\232\\010\\036y\\227~\\232\\010\\037d.}4\\020>\\323\\340\\372h!\\013\\340k\\351\\240\\204/\\266\\337\\323A\\010_p?M\\004-4\\343\\277M\\004-6\\353_M\\004-6\\373\\277M\\004-\\211\\340\\3754\\020\\266\\320A\\364\\320B\\333\\217\\273\\364\\320B\\343 o\\351\\240\\205\\306@\\357\\323A\\013\\241p?M\\004.\\205\\320\\3754\\020\\272\\330Y\\372h!u\\360\\265\\364\\320B\\360\\037\\027\\323A\\013\\315\\264\\367\\323A\\013\\340d_M\\004/\\201\\226\\276\\232\\010_\\003.}4\\020\\276\\006\\201\\364\\320B\\370\\035k\\351\\240\\205\\366\\331g\\351\\240\\205\\366\\336k\\351\\240\\210\\014\\262/\\246\\202 6\\373?M\\004@q\\300}4\\021\\004.\\263\\364\\320D\\020\\272\\337\\323A\\020Zi\\277\\246\\202 \\264\\373?M\\004Al/}4\\021\\005\\307\\334\\372h\\\"\\020\\236\\027\\323A\\023.4\\037\\246\\202&\\204\\323\\177M\\004M2\\370~\\232\\010\\232}\\340\\3754\\0216\\323\\257}4\\0216\\363.\\3754\\0216\\373\\242\\372h\\\"p@\\007\\323A\\023\\202\\023\\337M\\004N:\\333_M\\004N\\201\\347>\\232\\010\\235\\023L\\3754\\021:\\310\\032\\372h\\\"u\\2207\\364\\320D\\353A{\\351\\240\\211\\326\\302\\337\\323A\\023\\301\\013\\277M\\004O\\010-}4\\021< \\271\\364\\320D\\363-\\265\\364\\320D\\363@\\027\\323A\\023\\315\\272\\017\\246\\202\\'\\233y\\317\\246\\202\\'\\333\\020>\\232\\010\\237q\\226~\\232\\010\\237u\\367>\\232\\013 \\020\\232\\372h,\\201\\227\\234\\372h,\\201\\267\\001\\364\\320Y\\003n?\\364\\320Y\\010B\\327\\323Ad.2/\\246\\202\\310]e\\337\\246\\202\\310^e\\257\\246\\202\\310\\200\\320~\\232\\013\\\"\\013\\355\\3754\\026D\\320^\\372h,\\211\\246\\205\\364\\320Y\\023o9\\364\\320Y\\023\\217\\275\\364\\320Y\\023\\317\\275\\364\\320Yd.\\013\\351\\240\\262\\313-\\277\\364\\320Ye\\240\\275\\364\\320Ye\\247\\003\\364\\320Ye\\260?\\364\\320Ye\\306\\\\\\372h,\\262\\343/}4\\026Yy\\266\\276\\232\\013-2\\343?M\\005\\226\\232\\023\\337M\\005\\226\\233q\\237\\246\\202\\313`\\023\\177M\\005\\226\\300\\363\\337M\\005\\226\\304\\323\\377M\\005\\226\\332m\\337\\246\\202\\313m8\\037\\246\\202\\313m\\272\\347\\323Ae\\300>/\\246\\202\\313\\201e\\277\\246\\202\\313\\201m\\277\\246\\202\\313\\201|_M\\005\\227Z\\013\\177M\\005\\227Zq\\257\\246\\202\\313\\255\\205\\237\\246\\202\\313\\255\\276\\357\\323Ae\\327D\\357\\323Ae\\346\\233o\\351\\240\\262\\363\\341k\\351\\240\\262\\363\\357\\007\\351\\240\\262\\373L\\271\\364\\320Z\\000\\002/\\246\\202\\320\\000&~\\232\\013@\\003@\\372h-\\000M3\\364\\320Z\\003\\216\\267\\364\\320Z\\003\\240k\\351\\240\\264\\007Yg\\351\\240\\264\\007\\333\\177\\351\\240\\264\\0204\\377\\323Ah,\\262/\\246\\202\\320Yi\\317\\246\\202\\320Ze\\337\\246\\202\\320[\\020~\\232\\013Am\\326\\376\\232\\013Am\\347\\276\\232\\013Aq\\346~\\232\\013At\\016\\3754\\026\\202\\370[\\372h-\\005\\366\\331\\372h-\\t\\221=\\364\\320Z\\023\\314\\265\\364\\320Z\\023\\340w\\351\\240\\264\\310@\\317\\323Ai\\220\\205\\237\\246\\202\\323\\\"u\\357\\246\\202\\323,\\274\\337\\323Ai\\226\\200\\377\\323Ai\\227\\033g\\351\\240\\264\\313\\256\\275\\364\\320Ze\\327\\205\\364\\320Ze\\327\\231\\372h-2\\370\\005\\364\\320Ze\\367\\035\\372h-4&Y\\372h-4\\310\\233\\372h-4\\330\\235\\372h-4\\333L\\3754\\026\\232p\\002\\372h-4\\340_\\372h-4\\353B\\372h-6\\020?\\364\\320Zm\\247\\003\\364\\320Zm\\247\\233\\372h-6\\330\\005\\364\\320Zm\\327E\\364\\320Zm\\346\\\\\\372h-8\\007^\\372h-8\\007\\201\\364\\320Zp-9\\364\\320Zp/3\\364\\320Zq\\246\\201\\364\\320Zq\\246\\205\\364\\320Zq\\247]\\372h-8\\343!\\372h-8\\343\\316\\3754\\026\\234}\\366\\376\\232\\013N\\201\\267~\\232\\013N\\264\\373\\337M\\005\\247_\\000>\\232\\013N\\276!}4\\026\\236\\000]\\372h-<\\007\\003\\364\\320Zx\\016;\\364\\320ZxL\\207\\351\\240\\264\\360\\234{\\351\\240\\264\\363 k\\351\\240\\264\\363\\\"s\\351\\240\\264\\363\\\"{\\351\\240\\264\\363\\\"\\177\\351\\240\\264\\363,\\213\\351\\240\\264\\363O\\275\\364\\320Zy\\261\\003\\351\\240\\264\\363b\\027\\323Ai\\346\\335g\\351\\240\\264\\363\\255=\\364\\320Zy\\347Y\\372h-<\\373b\\372h->\\000\\273\\364\\320Z|\\r\\275\\364\\320Z|L\\277\\364\\320Z}\\226\\303\\364\\320Z}\\266\\331\\372h->\\333\\340\\372h->\\333\\341\\372h->\\333\\355\\3754\\026\\237m\\367\\276\\232\\013O\\270\\'\\376\\232\\013O\\270\\363?M\\005\\247\\336\\003\\377M\\005\\247\\336\\013?M\\005\\247\\336d\\037M\\005\\247\\337\\013?M\\005\\260\\000.\\3754\\026\\300\\006C\\364\\320[\\000\\037\\007\\323Al\\r\\005\\257\\246\\202\\330\\032l?M\\005\\260<\\007>\\232\\013`x.}4\\026\\302\\'\\034\\372h-\\204O\\267\\364\\320[\\010\\237\\177\\351\\240\\266\\026_\\017\\323Al.<\\357\\323Al.\\266/\\246\\202\\330]m\\357\\246\\202\\330^m\\317\\246\\202\\330^q\\277\\246\\202\\330^u\\277\\246\\202\\330_\\013\\377M\\005\\261\\001\\247\\376\\232\\013b\\013N\\3754\\026\\304\\333`\\372h-\\211\\327\\201\\364\\320[\\023\\341o\\351\\240\\266\\'\\302\\357\\323AlO\\266\\017\\246\\202\\333 i\\377\\246\\202\\333 q\\357\\246\\202\\333\\\"\\000~\\232\\013l\\210B\\372h-\\262\\'^\\372h-\\262\\313B\\372h-\\262\\320\\205\\364\\320[e\\2605\\364\\320[e\\301=\\364\\320[e\\306\\332\\372h-\\262\\350\\\\\\372h-\\262\\373b\\372h-\\262\\373n\\3754\\026\\332\\000\\201\\364\\320[h\\016\\213\\351\\240\\266\\320\\036\\017\\323Am\\246@\\367\\323Am\\246[k\\351\\240\\266\\323\\355\\277\\364\\320[l,\\277\\364\\320[l/\\265\\364\\320[m\\260\\267\\364\\320[m\\267\\035\\372h-\\266\\333\\217\\3754\\026\\333q\\366\\376\\232\\013m\\272!}4\\026\\333u\\220}4\\026\\333u\\320}4\\026\\334\\003O}4\\026\\334\\003\\356\\3754\\026\\334\\010\\032\\372h-\\270\\0207\\364\\320[p w\\351\\240\\266\\340Yw\\351\\240\\266\\340^\\017\\323Am\\3014\\327\\323Am\\306\\202\\337\\323Am\\307\\033s\\351\\240\\266\\343\\240k\\351\\240\\266\\343\\240s\\351\\240\\266\\343\\257\\003\\351\\240\\266\\343\\3173\\364\\320[t\\016\\207\\351\\240\\266\\350Y\\177\\351\\240\\266\\353-\\013\\351\\240\\266\\353-\\203\\351\\240\\266\\353/\\007\\351\\240\\266\\353/3\\364\\320[u\\2405\\364\\320[u\\306\\336\\372h-\\272\\350Z\\372h-\\272\\353\\201\\372h-\\272\\353\\356}4\\026\\335u\\367\\376\\232\\013n\\274\\000\\3754\\026\\335y\\227~\\232\\013n\\274\\363\\277M\\005\\267_\\023\\377M\\005\\267\\200\\313\\177M\\005\\267\\200\\343_M\\005\\267\\202\\373?M\\005\\267\\231\\003\\177M\\005\\267\\231\\003\\377M\\005\\267\\234m\\337\\246\\202\\333\\316\\211\\257\\246\\202\\333\\3176\\327\\323Am\\347\\304\\017\\246\\202\\333\\355\\272\\017\\246\\202\\333\\356\\t\\277\\246\\202\\333\\3564/\\246\\202\\333\\356>\\317\\323Am\\367]\\007\\323Am\\367]g\\351\\240\\266\\373\\256\\273\\364\\320[}\\360\\013\\351\\240\\266\\373\\355\\007\\351\\240\\266\\373\\356\\273\\364\\320\\\\\\000[\\177\\351\\240\\270\\001\\010\\037M\\005\\3002\\027>\\232\\013\\200h\\017}4\\027\\000\\320_\\372h.\\001\\2413\\364\\320\\\\\\003N3\\364\\320\\\\\\003O\\213\\351\\240\\270\\007\\304\\317\\323Ap\\017\\211\\277\\246\\202\\340\\037i\\317\\246\\202\\340\\037y\\357\\246\\202\\340@\\007~\\232\\013\\201\\013\\316\\3754\\027\\002\\313o}4\\027\\002\\320Y\\372h.\\005\\247\\203\\364\\320\\\\\\013O7\\364\\320\\\\\\013O?\\364\\320\\\\\\013a\\027\\323Ap-\\211\\317\\246\\202\\340[i\\357\\246\\202\\340[|_M\\005\\300\\266\\373\\177M\\005\\300\\272\\373_M\\005\\300\\274\\353\\237M\\005\\300\\276\\027\\276\\232\\013\\201}\\240\\3754\\027\\004\\006\\331\\372h.\\010\\017\\003\\351\\240\\270 \\270/\\246\\202\\340\\204\\'>\\232\\013\\202\\020\\236\\372h.\\t\\226C\\364\\320\\\\\\023A\\007\\323ApM6\\317\\323ApM\\204_M\\005\\3018 \\3754\\027\\004\\343\\201\\372h.\\t\\307\\234\\372h.2\\007\\034\\372h.2\\007\\035\\372h.2\\026\\237\\372h.2 \\013\\351\\240\\270\\313-9\\364\\320\\\\e\\226\\236\\372h.2\\313`\\372h.2\\313\\357\\3754\\027\\031l\\017}4\\027\\031}\\247\\276\\232\\013\\2152\\313\\337M\\005\\306\\231q\\377\\246\\202\\343M:\\017\\246\\202\\343M<\\037\\246\\202\\343N\\211\\357\\246\\202\\343N\\270\\327\\323Aq\\247\\336\\017\\323Aq\\247\\337k\\351\\240\\270\\330\\002\\347\\323Aq\\2604\\037\\246\\202\\343`l?M\\005\\306\\302\\350~\\232\\013\\215\\210\\r}4\\027\\033\\023/}4\\027\\033\\023`\\372h.6\\310\\001\\364\\320\\\\m\\220\\263\\364\\320\\\\m\\221?\\364\\320\\\\m\\227\\305\\364\\320\\\\m\\247_\\372h.6\\330\\037\\372h.6\\340\\003\\364\\320\\\\m\\3003\\364\\320\\\\m\\300\\277\\364\\320\\\\m\\326\\305\\364\\320\\\\m\\327\\205\\364\\320\\\\m\\340?\\364\\320\\\\m\\347Y\\372h.6\\363\\255\\3754\\027\\033y\\327\\276\\232\\013\\215\\276\\006\\376\\232\\013\\215\\276\\007\\276\\232\\013\\216\\000N\\3754\\027\\034\\020\\034\\372h.8\\'\\337\\372h.8\\333\\315}4\\027\\034q\\346\\276\\232\\013\\216:\\373\\337M\\005\\307\\036d_M\\005\\307B\\027\\276\\232\\013\\216\\205\\226~\\232\\013\\216\\205\\240\\3754\\027\\035\\013L\\3754\\027\\035\\013M\\3754\\027\\035\\013\\355}4\\027\\035\\023-}4\\027\\035\\023\\357\\3754\\027\\035e\\326~\\232\\013\\216\\262\\373?M\\005\\307Y}\\277\\246\\202\\343\\255\\005\\257\\246\\202\\343\\255\\205\\257\\246\\202\\343\\256\\266\\367\\323Aq\\327\\204\\327\\323Aq\\327\\231g\\351\\240\\270\\353\\315\\013\\351\\240\\270\\360\\032w\\351\\240\\270\\360\\032{\\351\\240\\270\\360\\033\\007\\323Aq\\3408\\017\\246\\202\\343\\301y\\277\\246\\202\\343\\301|?M\\005\\307\\231}\\237\\246\\202\\343\\315\\205\\277\\246\\202\\343\\315\\266\\037\\246\\202\\343\\315\\266/\\246\\202\\343\\315\\274/\\246\\202\\343\\316\\201\\257\\246\\202\\343\\342h\\037M\\005\\307\\304\\353\\277M\\005\\307\\304\\363_M\\005\\307\\333}\\357\\246\\202\\343\\356\\001\\337\\246\\202\\343\\356\\270\\367\\323Aq\\367\\234o\\351\\240\\270\\373\\354\\273\\364\\320\\\\}\\366^\\372h.\\200\\r?\\364\\320]\\000\\034\\177\\351\\240\\272\\000\\264\\017\\246\\202\\350\\002\\333\\277M\\005\\3204\\323?M\\005\\320<\\340~\\232\\013\\240y\\327\\376\\232\\013\\240|M\\3754\\027@\\373\\254\\3758\\330D\\343\\355}8\\330D\\360\\001\\364\\343ad\\r\\267\\364\\343ae\\267\\301\\364\\343ae\\301\\007\\351\\346@\\006\\202\\317\\323\\314\\200\\r<\\327\\323\\314\\200\\r>\\327\\323\\314\\200\\r>\\357\\323\\314\\200\\r\\270\\347\\320@ \\000\\0173\\364\\020\\010\\001}\\267>\\202\\001\\000@|\\037A\\000\\200 \\266\\317\\320@ \\010M\\013\\350 \\020\\004\\310\\034\\372\\010\\004\\0012\\310>\\202\\001\\000M4\\337\\320@ \\t\\246\\335\\372\\010\\004\\0016\\313\\237A\\000\\200&\\332g\\350 \\020\\004\\333\\217\\375\\004\\002\\000\\233u\\317\\240\\200@\\023n\\275\\364\\020\\010\\002p@\\372\\010\\004\\0018\\'\\276\\202\\001\\000N2\\357\\320@ \\t\\306_\\372\\010\\004\\0018\\333\\177A\\000\\200\\'\\034w\\350 \\020\\004\\343\\256\\375\\004\\002\\000\\234u\\357\\240\\262\\330\\000\\000\\273\\364\\026\\336\\003\\256\\270\\327\\320\\\\\\020\\000\\026\\\\\\372\\013\\202\\000\\002\\353\\337Ap@\\000\\204\\357\\320_\\000\\000\\'\\236\\372\\013\\340\\000\\032\\023?A|\\000\\003Bw\\350/\\200\\001}\\367\\376\\202\\370\\000&\\332{\\350/\\200\\002y\\340}\\005\\360\\001\\226D\\037\\240\\276\\0002\\310\\231\\372\\013\\340\\003,\\211\\277\\240\\276\\0002\\310\\234\\372\\013\\340\\003,\\211\\357\\240\\276\\0002\\310\\237\\372\\013\\340\\003,\\262\\017\\240\\276\\0002\\320\\035\\372\\013\\340\\003-\\001\\357\\240\\276\\0002\\320\\037\\372\\013\\340\\003L\\201\\377\\240\\276\\0004\\330\\\\\\372\\013\\340\\003M\\205\\337\\240\\276\\0004\\330^\\372\\013\\340\\003M\\205\\377\\240\\276\\0004\\330\\201\\364\\027\\300\\006\\233\\020~\\202\\370\\000\\323\\317\\213\\350/\\200\\r>\\000\\375\\005\\360\\001\\261\\001\\277\\240\\276\\0006 9\\364\\027\\300\\006\\304\\026\\276\\202\\370\\000\\330\\202\\337\\320_\\000\\033\\020]\\372\\013\\340\\003b\\013\\337A|\\000lA\\177\\350/\\200\\r\\270\\343\\177A|\\000m\\307E\\364\\027\\300\\006\\335e\\317\\240\\276\\0006\\353M}4\\026B\\350[oa\\226\\337i~\\224\\000T\\320?J\\010\\001yA\\002\\343A\\270\\313*b\\321\\277d\\002-1X\\215\\256\\303w\\032K\\364\\245#\\362\\260\\346,\\000_\\226I|\\245\\211\\323M\\037j\\022q\\330\\202\\246\\014\\233\\265,\\363\\315\\276\\260\\177@\\230\\362\\264\\307<\\324\\025i\\245*\\321\\214\\235KT\\213X^\\326\\225\\tX\\325J\\177\\224)\\244\\202)/\\237\\225\\203\\361\\203\\261\\223\\026\\301\\372\\232\\274M_\\361@\\224\\362\\264\\307<\\324\\025i\\274!h\\315P\\354\\364\\267r\\330\\203\\036\\257\\207\\013\\355\\005\\246\\\\m\\357@\\003p3p\\257\\275\\256\\017\\347|\\346B\\206B\\225\\035*\\rMl\\353R\\263\\320bz\\376\\024\\334R\\2512\\344;\\025\\263\\\\\\345\\242\\265%=\\212R{\\n\\241\\252\\224\\353\\377?@\\236\\362\\264\\307<\\324\\025i\\245*\\304\\266\\313\\013RV\\260\\275\\255*\\022\\261\\016\\204\\255-\\207\\245i\\274#\\204\\013K\\264\\017@\\217\\362\\264\\307<\\324\\025i\\006\\221\\255\\334\\266 \\307\\253\\207\\013\\355>\\333\\302m\\277v\\204-]\\317\\353@\\217\\362\\264\\307<\\324\\025j\\212\\232OR\\324\\0162\\321\\240b:\\220\\307k\\030\\214\\366L\\307k\\030\\216\\24417\\204-]\\207\\221\\211\\274 +\\240\\266w+\\016\\274\\017@\\214\\362\\267\\224!j\\354:JD\\230\\365\\177\\212\\017\\332\\224\\236B\\301\\035\\007\\'_@\\213\\362\\264\\266\\016\\222\\254z\\322c\\324\\217\\211\\335\\016\\214\\032\\266\\344\\305\\223O@\\223\\362\\264\\307<\\324\\025i\\245*\\326\\027\\265\\245BVM\\203!\\177\\303\\031)\\350\\027\\2564\\323?]\\254\\242\\240\\267q\\367\\231k\\351\\210\\352C\\035\\254b3\\3313\\035\\254b:\\220\\304\\336\\020\\265v\\036F&\\360\\200\\256\\202\\331\\334\\254:\\360>\\273YEAn\\343\\3572\\327\\320\\311O@\\275q\\246\\231@\\217\\362\\264\\307<\\324\\025i\\221Dk \\266w1\\013\\003web\\017(\\300\\016\\270\\262\\303\\266\\001\\000/,\\006\\326\\000V\\020>\\324/\\232\\315aQ\\006\\371\\355\\372Q\\220\\255\\240~\\226\\020\\002\\362\\202\\005\\306\\203q\\226T\\305\\243\\177\\332\\225\\2153\\300\\307\\332\\222\\036\\221\\232\\250\\027\\230\\347\\232\\202\\256C\\323\\017(\\377\\'\\323\\222\\374\\001\\023\\360\\037\\036L\\272\\274\\305D\\276\\254\\237/\\237yd\\333\\370\\224\\003\\036\\275\\035w\\352\\247N\\276\\306j\\251\\346\\240\\277\\226\\001\\313\\315\\343[\\266I\\315E\\355\\274\\031\\330W,\\246\\350*\\375\\351\\230\\217\\342\\277\\336\\034ZC\\2435\\323BO\\323\\311\\023fK\\306\\316\\314m;\\0179\\331\\313\\223\\006\\216\\267\\253\\353\\217e\\373\\037\\305\\2012\\343\\272/\\034)\\333]\\267r\\375\\250_5\\232\\302\\242\\016E\\223\\351FB\\266\\242%a\\000/( \\\\h7\\031eLZ7\\375\\251X\\323<\\014}\\251!\\351\\031\\252\\201y\\216y\\250*\\344=?jcJk\\325U\\036\\277@\\215\\362\\264\\307<\\324\\025h\\306N\\245\\252D\\177\\320b:\\220\\307k\\030\\214\\366L\\307k\\030\\216\\24417\\204-]\\207\\221\\211\\274 +\\240\\266w+\\016\\274\\017\\256\\326QP[\\270\\373\\314\\265\\364\\304u!\\216\\3261\\031\\354\\231\\216\\3261\\035Hbo\\010Z\\273\\017#\\023x@W\\232R\\260\\363\\237C%=\\002\\365\\306\\232g@\\222\\362\\264\\307<\\324\\025i\\016\\205\\220[;\\230\\205Y6\\014\\205\\207\\244f\\252\\344\\347\\244\\277@\\214\\362\\264\\307<\\324\\025d\\026\\316\\346!\\177\\207\\361\\343\\307\\324\\347\\244\\277@\\234\\362\\264\\307<\\324\\025i\\245*\\326\\025\\025\\236\\244\\025b\\036B\\255!R3P\\205\\223`\\310_\\250\\260\\250\\254\\365 \\261\\020\\362\\026$\\0251G\\352(\\306N\\245\\252O\\253\\n\\212\\317R\\013\\021\\017!bAS\\024~\\242\\214d\\352Z\\244@\\236\\362\\264\\307<\\324\\025i\\245*\\326\\025\\025\\236\\244\\025h\\317\\'\\245\\223`\\352D\\247\\262\\221\\244\\307\\251\\037\\215\\232\\332\\275\\232\\272\\313\\'\\321\\'\\266\\256\\245\\223@\\222\\362\\264\\307<\\324\\025dNZ(\\224\\310\\235d$i\\265\\037\\207\\275\\010&\\273\\202\\037_@\\205\\035\\tY\\035\\311\\354\\237\\264\\037\\315\\306\\232g\\371\\373R\\221\\300&\\337\\020\\000\\017\\265;Zb@\\330Y\\003-2\\317\\332\\235\\2551 l,\\201\\226\\231\\027\\332\\235\\2551 l,\\201\\226\\231\\017\\332\\235\\2551 l,\\201\\226Y\\177\\355N\\326\\230\\2206\\026@\\313,\\267\\365\\332\\323\\022\\017\\346\\343M3\\374\\375\\251H\\340\\023o\\210\\000\\007\\332\\235\\340\\376Zg\\351\\241}4\\037\\246_\\372e\\277\\347@\\223\\362\\264\\307<\\324\\025i\\245*\\310-\\235\\314B\\254\\233\\006B\\377\\207\\361\\343\\307\\324\\347\\244\\277R\\203\\250\\365\\027{\\213\\204\\204-i[\\005D<\\206\\252o@\\225\\362\\261j\\356\\177K[Z\\023aGJ\\310-\\235\\314B\\254\\223R_\\202\\010Z\"\n      }\n    }\n  }\n  events {\n    timestamp {\n      seconds: 1525207293\n      nanos: 346744029\n    }\n    write {\n      data: {\n        as_bytes: \"\\000\\035V\\000\\000\\000\\000\\000\\001<!doctype html><html itemscope=\\\"\\\" itemtype=\\\"http://schema.org/WebPage\\\" lang=\\\"en\\\"><head><meta content=\\\"Search the world\\'s information, including webpages, images, videos and more. Google has many special features to help you find exactly what you\\'re looking for.\\\" name=\\\"description\\\"><meta content=\\\"noodp\\\" name=\\\"robots\\\"><meta content=\\\"text/html; charset=UTF-8\\\" http-equiv=\\\"Content-Type\\\"><meta content=\\\"/images/branding/googleg/1x/googleg_standard_color_128dp.png\\\" itemprop=\\\"image\\\"><title>Google</title><script nonce=\\\"rLBCtD6iWuWYX1UPQlCXOA==\\\">(function(){window.google={kEI:\\'_dDoWrawEqit_QaygpG4Dw\\',kEXPI:\\'0,1352960,787,57,473,638,542,205,99,1018,74,366,892,289,221,398,19,2341024,210,165,52,23,329303,1294,12383,2349,2506,23435,9256,16115,316,453,7,792,7,9186,1402,5281,1100,3335,2,2,1624,515,2466,2196,363,554,332,332,2102,113,2201,1525,150,1516,224,843,1372,133,130,3322,658,208,919,444,131,1116,2,582,352,24,287,64,310,296,1056,402,368,58,2,1,3,190,495,612,284,104,789,1113,1149,154,730,1615,2,153,8,318,49,7,1,2,137,412,50,636,8,537,770,67,196,722,51,77,450,188,517,21,543,452,221,62,2,840,1,283,158,440,390,143,283,230,22,25,10,341,87,124,149,30,27,64,2,3,30,119,272,935,139,41,344,75,9,206,2,343,189,658,44,484,170,111,91,721,112,6029110,1873,672,9,41,8797610,135,4,1572,549,332,441,2,2,1,2,1,1,77,1,1,900,207,1,1,1,1,1,371,9,304,1,8,1,2,1,1,446,7,64,8,22311411\\',authuser:0,kscs:\\'c9c918f0__dDoWrawEqit_QaygpG4Dw\\',u:\\'c9c918f0\\',kGL:\\'US\\'};google.kHL=\\'en\\';})();google.time=function(){return(new Date).getTime()};(function(){google.lc=[];google.li=0;google.getEI=function(a){for(var b;a&&(!a.getAttribute||!(b=a.getAttribute(\\\"eid\\\")));)a=a.parentNode;return b||google.kEI};google.getLEI=function(a){for(var b=null;a&&(!a.getAttribute||!(b=a.getAttribute(\\\"leid\\\")));)a=a.parentNode;return b};google.https=function(){return\\\"https:\\\"==window.location.protocol};google.ml=function(){return null};google.wl=function(a,b){try{google.ml(Error(a),!1,b)}catch(d){}};google.log=function(a,b,d,c,g){if(a=google.logUrl(a,b,d,c,g)){b=new Image;var e=google.lc,f=google.li;e[f]=b;b.onerror=b.onload=b.onabort=function(){delete e[f]};google.vel&&google.vel.lu&&google.vel.lu(a);b.src=a;google.li=f+1}};google.logUrl=function(a,b,d,c,g){var e=\\\"\\\",f=google.ls||\\\"\\\";d||-1!=b.search(\\\"&ei=\\\")||(e=\\\"&ei=\\\"+google.getEI(c),-1==b.search(\\\"&lei=\\\")&&(c=google.getLEI(c))&&(e+=\\\"&lei=\\\"+c));c=\\\"\\\";!d&&google.cshid&&-1==b.search(\\\"&cshid=\\\")&&(c=\\\"&cshid=\\\"+google.cshid);a=d||\\\"/\\\"+(g||\\\"gen_204\\\")+\\\"?atyp=i&ct=\\\"+a+\\\"&cad=\\\"+b+e+f+\\\"&zx=\\\"+google.time()+c;/^http:/i.test(a)&&google.https()&&(google.ml(Error(\\\"a\\\"),!1,{src:a,glmm:1}),a=\\\"\\\");return a};}).call(this);(function(){google.y={};google.x=function(a,b){if(a)var c=a.id;else{do c=Math.random();while(google.y[c])}google.y[c]=[a,b];return!1};google.lm=[];google.plm=function(a){google.lm.push.apply(google.lm,a)};google.lq=[];google.load=function(a,b,c){google.lq.push([[a],b,c])};google.loadAll=function(a,b){google.lq.push([a,b])};}).call(this);google.f={};var a=window.location,b=a.href.indexOf(\\\"#\\\");if(0<=b){var c=a.href.substring(b+1);/(^|&)q=/.test(c)&&-1==c.indexOf(\\\"#\\\")&&a.replace(\\\"/search?\\\"+c.replace(/(^|&)fp=[^&]*/g,\\\"\\\")+\\\"&cad=h\\\")};</script><style>#gbar,#guser{font-size:13px;padding-top:1px !important;}#gbar{height:22px}#guser{padding-bottom:7px !important;text-align:right}.gbh,.gbd{border-top:1px solid #c9d7f1;font-size:1px}.gbh{height:0;position:absolute;top:24px;width:100%}@media all{.gb1{height:22px;margin-right:.5em;vertical-align:top}#gbar{float:left}}a.gb1,a.gb4{text-decoration:underline !important}a.gb1,a.gb4{color:#00c !important}.gbi .gb4{color:#dd8e27 !important}.gbf .gb4{color:#900 !important}\\n</style><style>body,td,a,p,.h{font-family:arial,sans-serif}body{margin:0;overflow-y:scroll}#gog{padding:3px 8px 0}td{line-height:.8em}.gac_m td{line-height:17px}form{margin-bottom:20px}.h{color:#36c}.q{color:#00c}.ts td{padding:0}.ts{border-collapse:collapse}em{font-weight:bold;font-style:normal}.lst{height:25px;width:496px}.gsfi,.lst{font:18px arial,sans-serif}.gsfs{font:17px arial,sans-serif}.ds{display:inline-box;display:inline-block;margin:3px 0 4px;margin-left:4px}input{font-family:inherit}a.gb1,a.gb2,a.gb3,a.gb4{color:#11c !important}body{background:#fff;color:black}a{color:#11c;text-decoration:none}a:hover,a:active{text-decoration:underline}.fl a{color:#36c}a:visited{color:#551a8b}a.gb1,a.gb4{text-decoration:underline}a.gb3:hover{text-decoration:none}#ghead a.gb2:hover{color:#fff !important}.sblc{padding-top:5px}.sblc a{display:block;margin:2px 0;margin-left:13px;font-size:11px}.lsbb{background:#eee;border:solid 1px;border-color:#ccc #999 #999 #ccc;height:30px}.lsbb{display:block}.ftl,#fll a{display:inline-block;margin:0 12px}.lsb{background:url(/images/nav_logo229.png) 0 -261px repeat-x;border:none;color:#000;cursor:pointer;height:30px;margin:0;outline:0;font:15px arial,sans-serif;vertical-align:top}.lsb:active{background:#ccc}.lst:focus{outline:none}</style><script nonce=\\\"rLBCtD6iWuWYX1UPQlCXOA==\\\"></script><link href=\\\"/images/branding/product/ico/googleg_lodp.ico\\\" rel=\\\"shortcut icon\\\"></head><body bgcolor=\\\"#fff\\\"><script nonce=\\\"rLBCtD6iWuWYX1UPQlCXOA==\\\">(function(){var src=\\'/images/nav_logo229.png\\';var iesg=false;document.body.onload = function(){window.n && window.n();if (document.images){new Image().src=src;}\\nif (!iesg){document.f&&document.f.q.focus();document.gbqf&&document.gbqf.q.focus();}\\n}\\n})();</script><div id=\\\"mngb\\\"> <div id=gbar><nobr><b class=gb1>Search</b> <a class=gb1 href=\\\"https://www.google.com/imghp?hl=en&tab=wi\\\">Images</a> <a class=gb1 href=\\\"https://maps.google.com/maps?hl=en&tab=wl\\\">Maps</a> <a class=gb1 href=\\\"https://play.google.com/?hl=en&tab=w8\\\">Play</a> <a class=gb1 href=\\\"https://www.youtube.com/?gl=US&tab=w1\\\">YouTube</a> <a class=gb1 href=\\\"https://news.google.com/nwshp?hl=en&tab=wn\\\">News</a> <a class=gb1 href=\\\"https://mail.google.com/mail/?tab=wm\\\">Gmail</a> <a class=gb1 href=\\\"https://drive.google.com/?tab=wo\\\">Drive</a> <a class=gb1 style=\\\"text-decoration:none\\\" href=\\\"https://www.google.com/intl/en/options/\\\"><u>More</u> &raquo;</a></nobr></div><div id=guser width=100%><nobr><span id=gbn class=gbi></span><span id=gbf class=gbf></span><span id=gbe></span><a href=\\\"http://www.google.com/history/optout?hl=en\\\" class=gb4>Web History</a> | <a  href=\\\"/preferences?hl=en\\\" class=gb4>Settings</a> | <a target=_top id=gb_70 href=\\\"https://accounts.google.com/ServiceLogin?hl=en&passive=true&continue=https://www.google.com/\\\" class=gb4>Sign in</a></nobr></div><div class=gbh style=left:0></div><div class=gbh style=right:0></div> </div><center><br clear=\\\"all\\\" id=\\\"lgpd\\\"><div id=\\\"lga\\\"><img alt=\\\"Google\\\" height=\\\"92\\\" src=\\\"/images/branding/googlelogo/1x/googlelogo_white_background_color_272x92dp.png\\\" style=\\\"padding:28px 0 14px\\\" width=\\\"272\\\" id=\\\"hplogo\\\" onload=\\\"window.lol&&lol()\\\"><br><br></div><form action=\\\"/search\\\" name=\\\"f\\\"><table cellpadding=\\\"0\\\" cellspacing=\\\"0\\\"><tr valign=\\\"top\\\"><td width=\\\"25%\\\">&nbsp;</td><td align=\\\"center\\\" nowrap=\\\"\\\"><input name=\\\"ie\\\" value=\\\"ISO-8859-1\\\" type=\\\"hidden\\\"><input value=\\\"en\\\" name=\\\"hl\\\" type=\\\"hidden\\\"><input name=\\\"source\\\" type=\\\"hidden\\\" value=\\\"hp\\\"><input name=\\\"biw\\\" type=\\\"hidden\\\"><input name=\\\"bih\\\" type=\\\"hidden\\\"><div class=\\\"ds\\\" style=\\\"height:32px;margin:4px 0\\\"><input style=\\\"color:#000;margin:0;padding:5px 8px 0 6px;vertical-align:top\\\" autocomplete=\\\"off\\\" class=\\\"lst\\\" value=\\\"\\\" title=\\\"Google Search\\\" maxlength=\\\"2048\\\" name=\\\"q\\\" size=\\\"57\\\"></div><br style=\\\"line-height:0\\\"><span class=\\\"ds\\\"><span class=\\\"lsbb\\\"><input class=\\\"lsb\\\" value=\\\"Google Search\\\" name=\\\"btnG\\\" type=\\\"submit\\\"></span></span><span class=\"\n      }\n    }\n  }\n  events {\n    timestamp {\n      seconds: 1525207293\n      nanos: 347782371\n    }\n    write {\n      data: {\n        as_bytes: \"\\000\\014\\010\\000\\000\\000\\000\\000\\001\\\"ds\\\"><span class=\\\"lsbb\\\"><input class=\\\"lsb\\\" value=\\\"I\\'m Feeling Lucky\\\" name=\\\"btnI\\\" onclick=\\\"if(this.form.q.value)this.checked=1; else top.location=\\'/doodles/\\'\\\" type=\\\"submit\\\"></span></span></td><td class=\\\"fl sblc\\\" align=\\\"left\\\" nowrap=\\\"\\\" width=\\\"25%\\\"><a href=\\\"/advanced_search?hl=en&amp;authuser=0\\\">Advanced search</a><a href=\\\"/language_tools?hl=en&amp;authuser=0\\\">Language tools</a></td></tr></table><input id=\\\"gbv\\\" name=\\\"gbv\\\" type=\\\"hidden\\\" value=\\\"1\\\"></form><div id=\\\"gac_scont\\\"></div><div style=\\\"font-size:83%;min-height:3.5em\\\"><br></div><span id=\\\"footer\\\"><div style=\\\"font-size:10pt\\\"><div style=\\\"margin:19px auto;text-align:center\\\" id=\\\"fll\\\"><a href=\\\"/intl/en/ads/\\\">Advertising\\240Programs</a><a href=\\\"/services/\\\">Business Solutions</a><a href=\\\"https://plus.google.com/116899029375914044550\\\" rel=\\\"publisher\\\">+Google</a><a href=\\\"/intl/en/about.html\\\">About Google</a></div></div><p style=\\\"color:#767676;font-size:8pt\\\">&copy; 2018 - <a href=\\\"/intl/en/policies/privacy/\\\">Privacy</a> - <a href=\\\"/intl/en/policies/terms/\\\">Terms</a></p></span></center><script nonce=\\\"rLBCtD6iWuWYX1UPQlCXOA==\\\">(function(){window.google.cdo={height:0,width:0};(function(){var a=window.innerWidth,b=window.innerHeight;if(!a||!b){var c=window.document,d=\\\"CSS1Compat\\\"==c.compatMode?c.documentElement:c.body;a=d.clientWidth;b=d.clientHeight}a&&b&&(a!=google.cdo.width||b!=google.cdo.height)&&google.log(\\\"\\\",\\\"\\\",\\\"/client_204?&atyp=i&biw=\\\"+a+\\\"&bih=\\\"+b+\\\"&ei=\\\"+google.kEI);}).call(this);})();</script><div id=\\\"xjsd\\\"></div><div id=\\\"xjsi\\\"><script nonce=\\\"rLBCtD6iWuWYX1UPQlCXOA==\\\">(function(){function c(b){window.setTimeout(function(){var a=document.createElement(\\\"script\\\");a.src=b;google.timers&&google.timers.load.t&&google.tick(\\\"load\\\",{gen204:\\\"xjsls\\\",clearcut:31});document.getElementById(\\\"xjsd\\\").appendChild(a)},0)}google.dljp=function(b,a){google.xjsu=b;c(a)};google.dlj=c;}).call(this);if(!google.xjs){window._=window._||{};window._DumpException=window._._DumpException=function(e){throw e};window._F_installCss=window._._F_installCss=function(c){};google.dljp(\\'/xjs/_/js/k\\\\x3dxjs.hp.en_US.hJpRN-HQS74.O/m\\\\x3dsb_he,d/am\\\\x3dVDA2/rt\\\\x3dj/d\\\\x3d1/rs\\\\x3dACT90oGoOpEv-zlKjP7tm3pX-9wBDPuVdQ\\',\\'/xjs/_/js/k\\\\x3dxjs.hp.en_US.hJpRN-HQS74.O/m\\\\x3dsb_he,d/am\\\\x3dVDA2/rt\\\\x3dj/d\\\\x3d1/rs\\\\x3dACT90oGoOpEv-zlKjP7tm3pX-9wBDPuVdQ\\');google.xjs=1;}google.pmc={\\\"sb_he\\\":{\\\"agen\\\":true,\\\"cgen\\\":true,\\\"client\\\":\\\"heirloom-hp\\\",\\\"dh\\\":true,\\\"dhqt\\\":true,\\\"ds\\\":\\\"\\\",\\\"ffql\\\":\\\"en\\\",\\\"fl\\\":true,\\\"host\\\":\\\"google.com\\\",\\\"isbh\\\":28,\\\"jsonp\\\":true,\\\"msgs\\\":{\\\"cibl\\\":\\\"Clear Search\\\",\\\"dym\\\":\\\"Did you mean:\\\",\\\"lcky\\\":\\\"I\\\\u0026#39;m Feeling Lucky\\\",\\\"lml\\\":\\\"Learn more\\\",\\\"oskt\\\":\\\"Input tools\\\",\\\"psrc\\\":\\\"This search was removed from your \\\\u003Ca href=\\\\\\\"/history\\\\\\\"\\\\u003EWeb History\\\\u003C/a\\\\u003E\\\",\\\"psrl\\\":\\\"Remove\\\",\\\"sbit\\\":\\\"Search by image\\\",\\\"srch\\\":\\\"Google Search\\\"},\\\"nds\\\":true,\\\"ovr\\\":{},\\\"pq\\\":\\\"\\\",\\\"refpd\\\":true,\\\"rfs\\\":[],\\\"sbpl\\\":24,\\\"sbpr\\\":24,\\\"scd\\\":10,\\\"sce\\\":5,\\\"stok\\\":\\\"3mJuW88WexlwJgN3WXmg_mhSAvs\\\"},\\\"d\\\":{},\\\"ZI/YVQ\\\":{},\\\"U5B21g\\\":{},\\\"DPBNMg\\\":{},\\\"YFCs/g\\\":{}};google.x(null,function(){});(function(){var r=[];google.plm(r);})();(function(){var ctx=[]\\n;google.jsc && google.jsc.x(ctx);})();</script></div></body></html>\\000\\000\\000\\000\\001\\000\\000\\000\\001\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "api/tools/data/tap2pcap_h2_ipv4.txt",
    "content": "    1   0.000000    127.0.0.1 → 127.0.0.1    HTTP2 157 Magic, SETTINGS[0], WINDOW_UPDATE[0], HEADERS[1]: GET /\n    2   0.013713    127.0.0.1 → 127.0.0.1    HTTP2 91 SETTINGS[0], SETTINGS[0], WINDOW_UPDATE[0]\n    3   0.013821    127.0.0.1 → 127.0.0.1    HTTP2 63 SETTINGS[0]\n    4   0.128649    127.0.0.1 → 127.0.0.1    HTTP2 5586 HEADERS[1]: 200 OK\n    5   0.130007    127.0.0.1 → 127.0.0.1    HTTP2 7573 DATA[1]\n    6   0.131045    127.0.0.1 → 127.0.0.1    HTTP2 3152 DATA[1], DATA[1] (text/html)\n"
  },
  {
    "path": "api/tools/generate_listeners.py",
    "content": "# Map from listeners proto, with holes where filter config fragments should go, and\n# a list of filter config fragment protos, to a final listeners.pb with the\n# config fragments converted to the opaque Struct representation.\n\nimport sys\n\n# Some evil hack to deal with the fact that Bazel puts both google/api and\n# google/protobuf roots in the sys.path, and Python gets confused, e.g. it\n# thinks that there is no api package if it encounters the google/protobuf root\n# in sys.path first.\nfrom pkgutil import extend_path\nimport google\ngoogle.__path__ = extend_path(google.__path__, google.__name__)\n\nfrom google.protobuf import json_format\nfrom google.protobuf import struct_pb2\nfrom google.protobuf import text_format\n\nfrom envoy.api.v2 import lds_pb2\nfrom envoy.config.filter.network.http_connection_manager.v2 import http_connection_manager_pb2\n\n\n# Convert an arbitrary proto object to its Struct proto representation.\ndef ProtoToStruct(proto):\n  json_rep = json_format.MessageToJson(proto)\n  parsed_msg = struct_pb2.Struct()\n  json_format.Parse(json_rep, parsed_msg)\n  return parsed_msg\n\n\n# Parse a proto from the filesystem.\ndef ParseProto(path, filter_name):\n  # We only know about some filter config protos ahead of time.\n  KNOWN_FILTERS = {\n      'http_connection_manager': lambda: http_connection_manager_pb2.HttpConnectionManager()\n  }\n  filter_config = KNOWN_FILTERS[filter_name]()\n  with open(path, 'r') as f:\n    text_format.Merge(f.read(), filter_config)\n  return filter_config\n\n\ndef GenerateListeners(listeners_pb_path, output_pb_path, output_json_path, fragments):\n  listener = lds_pb2.Listener()\n  with open(listeners_pb_path, 'r') as f:\n    text_format.Merge(f.read(), listener)\n\n  for filter_chain in listener.filter_chains:\n    for f in filter_chain.filters:\n      f.config.CopyFrom(ProtoToStruct(ParseProto(next(fragments), f.name)))\n\n  with open(output_pb_path, 'w') as f:\n    f.write(str(listener))\n\n  with open(output_json_path, 'w') as f:\n    f.write(json_format.MessageToJson(listener))\n\n\nif __name__ == '__main__':\n  if len(sys.argv) < 4:\n    print('Usage: %s <path to listeners.pb> <output listeners.pb> <output '\n          'listeners.json> <filter config fragment paths>') % sys.argv[0]\n    sys.exit(1)\n\n  GenerateListeners(sys.argv[1], sys.argv[2], sys.argv[3], iter(sys.argv[4:]))\n"
  },
  {
    "path": "api/tools/generate_listeners_test.py",
    "content": "\"\"\"Tests for generate_listeners.\"\"\"\n\nimport os\n\nimport generate_listeners\n\nif __name__ == \"__main__\":\n  srcdir = os.path.join(os.getenv(\"TEST_SRCDIR\"), 'envoy_api_canonical')\n  generate_listeners.GenerateListeners(\n      os.path.join(srcdir, \"examples/service_envoy/listeners.pb\"), \"/dev/stdout\", \"/dev/stdout\",\n      iter([os.path.join(srcdir, \"examples/service_envoy/http_connection_manager.pb\")]))\n"
  },
  {
    "path": "api/tools/tap2pcap.py",
    "content": "\"\"\"Tool to convert Envoy tap trace format to PCAP.\n\nUses od and text2pcap (part of Wireshark) utilities to translate the Envoy\ntap trace proto format to a PCAP file suitable for consuming in Wireshark\nand other tools in the PCAP ecosystem. The TCP stream in the output PCAP is\nsynthesized based on the known IP/port/timestamps that Envoy produces in its\ntap files; it is not a literal wire tap.\n\nUsage:\n\nbazel run @envoy_api_canonical//tools:tap2pcap <tap .pb/.pb_text> <pcap path>\n\nKnown issues:\n- IPv6 PCAP generation has malformed TCP packets. This appears to be a text2pcap\nissue.\n\nTODO(htuch):\n- Figure out IPv6 PCAP issue above, or file a bug once the root cause is clear.\n\"\"\"\nfrom __future__ import print_function\n\nimport datetime\nimport io\nimport socket\nimport subprocess as sp\nimport sys\nimport time\n\nfrom google.protobuf import text_format\n\nfrom envoy.data.tap.v2alpha import wrapper_pb2\n\n\ndef DumpEvent(direction, timestamp, data):\n  dump = io.StringIO()\n  dump.write('%s\\n' % direction)\n  # Adjust to local timezone\n  adjusted_dt = timestamp.ToDatetime() - datetime.timedelta(seconds=time.altzone)\n  dump.write('%s\\n' % adjusted_dt)\n  od = sp.Popen(['od', '-Ax', '-tx1', '-v'], stdout=sp.PIPE, stdin=sp.PIPE, stderr=sp.PIPE)\n  packet_dump = od.communicate(data)[0]\n  dump.write(packet_dump.decode())\n  return dump.getvalue()\n\n\ndef Tap2Pcap(tap_path, pcap_path):\n  wrapper = wrapper_pb2.TraceWrapper()\n  if tap_path.endswith('.pb_text'):\n    with open(tap_path, 'r') as f:\n      text_format.Merge(f.read(), wrapper)\n  else:\n    with open(tap_path, 'r') as f:\n      wrapper.ParseFromString(f.read())\n\n  trace = wrapper.socket_buffered_trace\n  local_address = trace.connection.local_address.socket_address.address\n  local_port = trace.connection.local_address.socket_address.port_value\n  remote_address = trace.connection.remote_address.socket_address.address\n  remote_port = trace.connection.remote_address.socket_address.port_value\n\n  dumps = []\n  for event in trace.events:\n    if event.HasField('read'):\n      dumps.append(DumpEvent('I', event.timestamp, event.read.data.as_bytes))\n    elif event.HasField('write'):\n      dumps.append(DumpEvent('O', event.timestamp, event.write.data.as_bytes))\n\n  ipv6 = False\n  try:\n    socket.inet_pton(socket.AF_INET6, local_address)\n    ipv6 = True\n  except socket.error:\n    pass\n\n  text2pcap_args = [\n      'text2pcap', '-D', '-t', '%Y-%m-%d %H:%M:%S.', '-6' if ipv6 else '-4',\n      '%s,%s' % (remote_address, local_address), '-T',\n      '%d,%d' % (remote_port, local_port), '-', pcap_path\n  ]\n  text2pcap = sp.Popen(text2pcap_args, stdout=sp.PIPE, stdin=sp.PIPE)\n  text2pcap.communicate('\\n'.join(dumps).encode())\n\n\nif __name__ == '__main__':\n  if len(sys.argv) != 3:\n    print('Usage: %s <tap .pb/.pb_text> <pcap path>' % sys.argv[0])\n    sys.exit(1)\n  Tap2Pcap(sys.argv[1], sys.argv[2])\n"
  },
  {
    "path": "api/tools/tap2pcap_test.py",
    "content": "\"\"\"Tests for tap2pcap.\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport subprocess as sp\nimport sys\n\nimport tap2pcap\n\n# Validate that the tapped trace when run through tap2cap | tshark matches\n# a golden output file for the tshark dump. Since we run tap2pcap in a\n# subshell with a limited environment, the inferred time zone should be UTC.\nif __name__ == '__main__':\n  srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api_canonical')\n  tap_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.pb_text')\n  expected_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.txt')\n  pcap_path = os.path.join(os.getenv('TEST_TMPDIR'), 'generated.pcap')\n\n  tap2pcap.Tap2Pcap(tap_path, pcap_path)\n  actual_output = sp.check_output(['tshark', '-r', pcap_path, '-d', 'tcp.port==10000,http2', '-P'])\n  with open(expected_path, 'rb') as f:\n    expected_output = f.read()\n  if actual_output != expected_output:\n    print('Mismatch')\n    print('Expected: %s' % expected_output)\n    print('Actual: %s' % actual_output)\n    sys.exit(1)\n"
  },
  {
    "path": "api/versioning/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py.\n\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\n# This tracks active development versions of protos.\nproto_library(\n    name = \"active_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//envoy/admin/v3:pkg\",\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/bootstrap/v3:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/common/matcher/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/config/filter/thrift/router/v2alpha1:pkg\",\n        \"//envoy/config/grpc_credential/v3:pkg\",\n        \"//envoy/config/health_checker/redis/v2:pkg\",\n        \"//envoy/config/listener/v3:pkg\",\n        \"//envoy/config/metrics/v3:pkg\",\n        \"//envoy/config/overload/v3:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg\",\n        \"//envoy/config/resource_monitor/injected_resource/v2alpha:pkg\",\n        \"//envoy/config/retry/omit_canary_hosts/v2:pkg\",\n        \"//envoy/config/retry/previous_hosts/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"//envoy/data/accesslog/v3:pkg\",\n        \"//envoy/data/cluster/v3:pkg\",\n        \"//envoy/data/core/v3:pkg\",\n        \"//envoy/data/dns/v3:pkg\",\n        \"//envoy/data/tap/v3:pkg\",\n        \"//envoy/extensions/access_loggers/file/v3:pkg\",\n        \"//envoy/extensions/access_loggers/grpc/v3:pkg\",\n        \"//envoy/extensions/access_loggers/wasm/v3:pkg\",\n        \"//envoy/extensions/clusters/aggregate/v3:pkg\",\n        \"//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/clusters/redis/v3:pkg\",\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/common/ratelimit/v3:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"//envoy/extensions/compression/gzip/compressor/v3:pkg\",\n        \"//envoy/extensions/compression/gzip/decompressor/v3:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg\",\n        \"//envoy/extensions/filters/http/admission_control/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/aws_lambda/v3:pkg\",\n        \"//envoy/extensions/filters/http/aws_request_signing/v3:pkg\",\n        \"//envoy/extensions/filters/http/buffer/v3:pkg\",\n        \"//envoy/extensions/filters/http/cache/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/compressor/v3:pkg\",\n        \"//envoy/extensions/filters/http/cors/v3:pkg\",\n        \"//envoy/extensions/filters/http/csrf/v3:pkg\",\n        \"//envoy/extensions/filters/http/decompressor/v3:pkg\",\n        \"//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/http/dynamo/v3:pkg\",\n        \"//envoy/extensions/filters/http/ext_authz/v3:pkg\",\n        \"//envoy/extensions/filters/http/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_stats/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_web/v3:pkg\",\n        \"//envoy/extensions/filters/http/gzip/v3:pkg\",\n        \"//envoy/extensions/filters/http/header_to_metadata/v3:pkg\",\n        \"//envoy/extensions/filters/http/health_check/v3:pkg\",\n        \"//envoy/extensions/filters/http/ip_tagging/v3:pkg\",\n        \"//envoy/extensions/filters/http/jwt_authn/v3:pkg\",\n        \"//envoy/extensions/filters/http/local_ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/http/lua/v3:pkg\",\n        \"//envoy/extensions/filters/http/oauth2/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/on_demand/v3:pkg\",\n        \"//envoy/extensions/filters/http/original_src/v3:pkg\",\n        \"//envoy/extensions/filters/http/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/http/rbac/v3:pkg\",\n        \"//envoy/extensions/filters/http/router/v3:pkg\",\n        \"//envoy/extensions/filters/http/squash/v3:pkg\",\n        \"//envoy/extensions/filters/http/tap/v3:pkg\",\n        \"//envoy/extensions/filters/http/wasm/v3:pkg\",\n        \"//envoy/extensions/filters/listener/http_inspector/v3:pkg\",\n        \"//envoy/extensions/filters/listener/original_dst/v3:pkg\",\n        \"//envoy/extensions/filters/listener/original_src/v3:pkg\",\n        \"//envoy/extensions/filters/listener/proxy_protocol/v3:pkg\",\n        \"//envoy/extensions/filters/listener/tls_inspector/v3:pkg\",\n        \"//envoy/extensions/filters/network/client_ssl_auth/v3:pkg\",\n        \"//envoy/extensions/filters/network/direct_response/v3:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/echo/v3:pkg\",\n        \"//envoy/extensions/filters/network/ext_authz/v3:pkg\",\n        \"//envoy/extensions/filters/network/http_connection_manager/v3:pkg\",\n        \"//envoy/extensions/filters/network/kafka_broker/v3:pkg\",\n        \"//envoy/extensions/filters/network/local_ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/mongo_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/mysql_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg\",\n        \"//envoy/extensions/filters/network/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/rbac/v3:pkg\",\n        \"//envoy/extensions/filters/network/redis_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/sni_cluster/v3:pkg\",\n        \"//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg\",\n        \"//envoy/extensions/filters/network/tcp_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/wasm/v3:pkg\",\n        \"//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg\",\n        \"//envoy/extensions/filters/udp/udp_proxy/v3:pkg\",\n        \"//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg\",\n        \"//envoy/extensions/internal_redirect/previous_routes/v3:pkg\",\n        \"//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg\",\n        \"//envoy/extensions/network/socket_interface/v3:pkg\",\n        \"//envoy/extensions/retry/host/omit_host_metadata/v3:pkg\",\n        \"//envoy/extensions/retry/priority/previous_priorities/v3:pkg\",\n        \"//envoy/extensions/stat_sinks/wasm/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/alts/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/quic/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/raw_buffer/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tap/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/extensions/upstreams/http/generic/v3:pkg\",\n        \"//envoy/extensions/upstreams/http/http/v3:pkg\",\n        \"//envoy/extensions/upstreams/http/tcp/v3:pkg\",\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"//envoy/extensions/watchdog/abort_action/v3alpha:pkg\",\n        \"//envoy/extensions/watchdog/profile_action/v3alpha:pkg\",\n        \"//envoy/service/accesslog/v3:pkg\",\n        \"//envoy/service/auth/v3:pkg\",\n        \"//envoy/service/cluster/v3:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"//envoy/service/endpoint/v3:pkg\",\n        \"//envoy/service/event_reporting/v3:pkg\",\n        \"//envoy/service/extension/v3:pkg\",\n        \"//envoy/service/health/v3:pkg\",\n        \"//envoy/service/listener/v3:pkg\",\n        \"//envoy/service/load_stats/v3:pkg\",\n        \"//envoy/service/metrics/v3:pkg\",\n        \"//envoy/service/ratelimit/v3:pkg\",\n        \"//envoy/service/route/v3:pkg\",\n        \"//envoy/service/runtime/v3:pkg\",\n        \"//envoy/service/secret/v3:pkg\",\n        \"//envoy/service/status/v3:pkg\",\n        \"//envoy/service/tap/v3:pkg\",\n        \"//envoy/service/trace/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n    ],\n)\n\n# This tracks frozen versions of protos.\nproto_library(\n    name = \"frozen_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//envoy/admin/v2alpha:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/cluster:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"//envoy/api/v2/listener:pkg\",\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/accesslog/v2:pkg\",\n        \"//envoy/config/bootstrap/v2:pkg\",\n        \"//envoy/config/cluster/aggregate/v2alpha:pkg\",\n        \"//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/cluster/redis:pkg\",\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/filter/dubbo/router/v2alpha1:pkg\",\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg\",\n        \"//envoy/config/filter/http/aws_lambda/v2alpha:pkg\",\n        \"//envoy/config/filter/http/aws_request_signing/v2alpha:pkg\",\n        \"//envoy/config/filter/http/buffer/v2:pkg\",\n        \"//envoy/config/filter/http/cache/v2alpha:pkg\",\n        \"//envoy/config/filter/http/compressor/v2:pkg\",\n        \"//envoy/config/filter/http/cors/v2:pkg\",\n        \"//envoy/config/filter/http/csrf/v2:pkg\",\n        \"//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/filter/http/dynamo/v2:pkg\",\n        \"//envoy/config/filter/http/ext_authz/v2:pkg\",\n        \"//envoy/config/filter/http/fault/v2:pkg\",\n        \"//envoy/config/filter/http/grpc_http1_bridge/v2:pkg\",\n        \"//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg\",\n        \"//envoy/config/filter/http/grpc_stats/v2alpha:pkg\",\n        \"//envoy/config/filter/http/grpc_web/v2:pkg\",\n        \"//envoy/config/filter/http/gzip/v2:pkg\",\n        \"//envoy/config/filter/http/header_to_metadata/v2:pkg\",\n        \"//envoy/config/filter/http/health_check/v2:pkg\",\n        \"//envoy/config/filter/http/ip_tagging/v2:pkg\",\n        \"//envoy/config/filter/http/jwt_authn/v2alpha:pkg\",\n        \"//envoy/config/filter/http/lua/v2:pkg\",\n        \"//envoy/config/filter/http/on_demand/v2:pkg\",\n        \"//envoy/config/filter/http/original_src/v2alpha1:pkg\",\n        \"//envoy/config/filter/http/rate_limit/v2:pkg\",\n        \"//envoy/config/filter/http/rbac/v2:pkg\",\n        \"//envoy/config/filter/http/router/v2:pkg\",\n        \"//envoy/config/filter/http/squash/v2:pkg\",\n        \"//envoy/config/filter/http/tap/v2alpha:pkg\",\n        \"//envoy/config/filter/http/transcoder/v2:pkg\",\n        \"//envoy/config/filter/listener/http_inspector/v2:pkg\",\n        \"//envoy/config/filter/listener/original_dst/v2:pkg\",\n        \"//envoy/config/filter/listener/original_src/v2alpha1:pkg\",\n        \"//envoy/config/filter/listener/proxy_protocol/v2:pkg\",\n        \"//envoy/config/filter/listener/tls_inspector/v2:pkg\",\n        \"//envoy/config/filter/network/client_ssl_auth/v2:pkg\",\n        \"//envoy/config/filter/network/direct_response/v2:pkg\",\n        \"//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/echo/v2:pkg\",\n        \"//envoy/config/filter/network/ext_authz/v2:pkg\",\n        \"//envoy/config/filter/network/http_connection_manager/v2:pkg\",\n        \"//envoy/config/filter/network/kafka_broker/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/local_rate_limit/v2alpha:pkg\",\n        \"//envoy/config/filter/network/mongo_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg\",\n        \"//envoy/config/filter/network/rate_limit/v2:pkg\",\n        \"//envoy/config/filter/network/rbac/v2:pkg\",\n        \"//envoy/config/filter/network/redis_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/sni_cluster/v2:pkg\",\n        \"//envoy/config/filter/network/tcp_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg\",\n        \"//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg\",\n        \"//envoy/config/filter/udp/udp_proxy/v2alpha:pkg\",\n        \"//envoy/config/grpc_credential/v2alpha:pkg\",\n        \"//envoy/config/listener/v2:pkg\",\n        \"//envoy/config/metrics/v2:pkg\",\n        \"//envoy/config/overload/v2alpha:pkg\",\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"//envoy/config/rbac/v2:pkg\",\n        \"//envoy/config/retry/omit_host_metadata/v2:pkg\",\n        \"//envoy/config/retry/previous_priorities:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"//envoy/config/trace/v2alpha:pkg\",\n        \"//envoy/config/transport_socket/alts/v2alpha:pkg\",\n        \"//envoy/config/transport_socket/raw_buffer/v2:pkg\",\n        \"//envoy/config/transport_socket/tap/v2alpha:pkg\",\n        \"//envoy/data/accesslog/v2:pkg\",\n        \"//envoy/data/cluster/v2alpha:pkg\",\n        \"//envoy/data/core/v2alpha:pkg\",\n        \"//envoy/data/dns/v2alpha:pkg\",\n        \"//envoy/data/tap/v2alpha:pkg\",\n        \"//envoy/service/accesslog/v2:pkg\",\n        \"//envoy/service/auth/v2:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"//envoy/service/event_reporting/v2alpha:pkg\",\n        \"//envoy/service/load_stats/v2:pkg\",\n        \"//envoy/service/metrics/v2:pkg\",\n        \"//envoy/service/ratelimit/v2:pkg\",\n        \"//envoy/service/status/v2:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"//envoy/service/trace/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"//envoy/type/metadata/v2:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n    ],\n)\n"
  },
  {
    "path": "api/xds_protocol.rst",
    "content": ".. _xds_protocol:\n\nxDS REST and gRPC protocol\n==========================\n\nEnvoy discovers its various dynamic resources via the filesystem or by\nquerying one or more management servers. Collectively, these discovery\nservices and their corresponding APIs are referred to as *xDS*.\nResources are requested via *subscriptions*, by specifying a filesystem\npath to watch, initiating gRPC streams, or polling a REST-JSON URL. The\nlatter two methods involve sending requests with a :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>`\nproto payload. Resources are delivered in a\n:ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\nproto payload in all methods. We discuss each type of subscription\nbelow.\n\nResource Types\n--------------\n\nEvery configuration resource in the xDS API has a type associated with it. Resource types follow a\n:repo:`versioning scheme <api/API_VERSIONING.md>`. Resource types are versioned independent of the\ntransports described below.\n\nThe following v2 xDS resource types are supported:\n\n-  :ref:`envoy.api.v2.Listener <envoy_api_msg_Listener>`\n-  :ref:`envoy.api.v2.RouteConfiguration <envoy_api_msg_RouteConfiguration>`\n-  :ref:`envoy.api.v2.ScopedRouteConfiguration <envoy_api_msg_ScopedRouteConfiguration>`\n-  :ref:`envoy.api.v2.route.VirtualHost <envoy_api_msg_route.VirtualHost>`\n-  :ref:`envoy.api.v2.Cluster <envoy_api_msg_Cluster>`\n-  :ref:`envoy.api.v2.ClusterLoadAssignment <envoy_api_msg_ClusterLoadAssignment>`\n-  :ref:`envoy.api.v2.Auth.Secret <envoy_api_msg_Auth.Secret>`\n-  :ref:`envoy.service.discovery.v2.Runtime <envoy_api_msg_service.discovery.v2.Runtime>`\n\nThe following v3 xdS resource types are supported:\n\n-  :ref:`envoy.config.listener.v3.Listener <envoy_v3_api_msg_config.listener.v3.Listener>`\n-  :ref:`envoy.config.route.v3.RouteConfiguration <envoy_v3_api_msg_config.route.v3.RouteConfiguration>`\n-  :ref:`envoy.config.route.v3.ScopedRouteConfiguration <envoy_v3_api_msg_config.route.v3.ScopedRouteConfiguration>`\n-  :ref:`envoy.config.route.v3.VirtualHost <envoy_v3_api_msg_config.route.v3.VirtualHost>`\n-  :ref:`envoy.config.cluster.v3.Cluster <envoy_v3_api_msg_config.cluster.v3.Cluster>`\n-  :ref:`envoy.config.endpoint.v3.ClusterLoadAssignment <envoy_v3_api_msg_config.endpoint.v3.ClusterLoadAssignment>`\n-  :ref:`envoy.extensions.transport_sockets.tls.v3.Secret <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.Secret>`\n-  :ref:`envoy.service.runtime.v3.Runtime <envoy_v3_api_msg_service.runtime.v3.Runtime>`\n\nThe concept of `type URLs <https://developers.google.com/protocol-buffers/docs/proto3#any>`_\nappears below, and takes the form `type.googleapis.com/<resource type>` -- e.g.,\n`type.googleapis.com/envoy.api.v2.Cluster` for a `Cluster` resource. In various requests from\nEnvoy and responses by the management server, the resource type URL is stated.\n\n\nFilesystem subscriptions\n------------------------\n\nThe simplest approach to delivering dynamic configuration is to place it\nat a well known path specified in the :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>`.\nEnvoy will use `inotify` (`kqueue` on macOS) to monitor the file for\nchanges and parse the\n:ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` proto in the file on update.\nBinary protobufs, JSON, YAML and proto text are supported formats for\nthe\n:ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`.\n\nThere is no mechanism available for filesystem subscriptions to ACK/NACK\nupdates beyond stats counters and logs. The last valid configuration for\nan xDS API will continue to apply if an configuration update rejection\noccurs.\n\n.. _xds_protocol_streaming_grpc_subscriptions:\n\nStreaming gRPC subscriptions\n----------------------------\n\nAPI flow\n~~~~~~~~\n\nFor typical HTTP routing scenarios, the core resource types for the client's configuration are\n`Listener`, `RouteConfiguration`, `Cluster`, and `ClusterLoadAssignment`. Each `Listener` resource\nmay point to a `RouteConfiguration` resource, which may point to one or more `Cluster` resources,\nand each `Cluster` resource may point to a `ClusterLoadAssignment` resource.\n\nEnvoy fetches all `Listener` and `Cluster` resources at startup. It then fetches whatever\n`RouteConfiguration` and `ClusterLoadAssignment` resources that are required by the `Listener` and\n`Cluster` resources. In effect, every `Listener` or `Cluster` resource is a root to part of Envoy's\nconfiguration tree.\n\nA non-proxy client such as gRPC might start by fetching only the specific `Listener` resources\nthat it is interested in. It then fetches the `RouteConfiguration` resources required by those\n`Listener` resources, followed by whichever `Cluster` resources are required by those\n`RouteConfiguration` resources, followed by the `ClusterLoadAssignment` resources required\nby the `Cluster` resources. In effect, the original `Listener` resources are the roots to\nthe client's configuration tree.\n\nVariants of the xDS Transport Protocol\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nFour Variants\n^^^^^^^^^^^^^\n\nThere are four variants of the xDS transport protocol used via streaming gRPC, which cover all\ncombinations of two dimensions.\n\nThe first dimension is State of the World (SotW) vs. incremental. The SotW approach was the\noriginal mechanism used by xDS, in which the client must specify all resource names it is\ninterested in with each request (except when making a wildcard request in LDS/CDS), and the server\nmust return all resources the client has subscribed to in each request (in LDS/CDS). This means\nthat if the client is already subscribing to 99 resources and wants to add an additional one, it\nmust send a request with all 100 resource names, rather than just the one new one. And the server\nmust then respond by sending all 100 resources, even if the 99 that were already subscribed to have\nnot changed (in LDS/CDS). This mechanism can be a scalability limitation, which is why the\nincremental protocol variant was introduced. The incremental approach allows both the client and\nserver to indicate only deltas relative to their previous state -- i.e., the client can say that\nit wants to add or remove its subscription to a particular resource name without resending those\nthat have not changed, and the server can send updates only for those resources that have changed.\nThe incremental protocol also provides a mechanism for lazy loading of resources. For details on\nthe incremental protocol, see :ref:`Incremental xDS <xds_protocol_delta>` below.\n\nThe second dimension is using a separate gRPC stream for each resource type vs. aggregating all\nresource types onto a single gRPC stream. The former approach was the original mechanism used by\nxDS, and it offers an eventual consistency model. The latter approach was added for environments\nin which explicit control of sequencing is required. For details, see :ref:`Eventual consistency\nconsiderations <xds_protocol_eventual_consistency_considerations>` below.\n\nSo, the four variants of the xDS transport protocol are:\n\n1. State of the World (Basic xDS): SotW, separate gRPC stream for each resource type\n2. Incremental xDS: incremental, separate gRPC stream for each resource type\n3. Aggregated Discovery Service (ADS): SotW, aggregate stream for all resource types\n4. Incremental ADS: incremental, aggregate stream for all resource types\n\nRPC Services and Methods for Each Variant\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nFor the non-aggregated protocol variants, there is a separate RPC service for each resource type.\nEach of these RPC services can provide a method for each of the SotW and Incremental protocol\nvariants. Here are the RPC services and methods for each resource type:\n\n-  Listener: Listener Discovery Service (LDS)\n   -  SotW: ListenerDiscoveryService.StreamListeners\n   -  Incremental: ListenerDiscoveryService.DeltaListeners\n-  RouteConfiguration: Route Discovery Service (RDS)\n   -  SotW: RouteDiscoveryService.StreamRoutes\n   -  Incremental: RouteDiscoveryService.DeltaRoutes\n-  ScopedRouteConfiguration: Scoped Route Discovery Service (SRDS)\n   -  SotW: ScopedRouteDiscoveryService.StreamScopedRoutes\n   -  Incremental: ScopedRouteDiscoveryService.DeltaScopedRoutes\n-  VirtualHost: Virtual Host Discovery Service (VHDS)\n   -  SotW: N/A\n   -  Incremental: VirtualHostDiscoveryService.DeltaVirtualHosts\n-  Cluster: Cluster Discovery Service (CDS)\n   -  SotW: ClusterDiscoveryService.StreamClusters\n   -  Incremental: ClusterDiscoveryService.DeltaClusters\n-  ClusterLoadAssignment: Endpoint Discovery Service (EDS)\n   -  SotW: EndpointDiscoveryService.StreamEndpoints\n   -  Incremental: EndpointDiscoveryService.DeltaEndpoints\n-  Secret: Secret Discovery Service (SDS)\n   -  SotW: SecretDiscoveryService.StreamSecrets\n   -  Incremental: SecretDiscoveryService.DeltaSecrets\n-  Runtime: Runtime Discovery Service (RTDS)\n   -  SotW: RuntimeDiscoveryService.StreamRuntime\n   -  Incremental: RuntimeDiscoveryService.DeltaRuntime\n\nIn the aggregated protocol variants, all resource types are multiplexed on a single gRPC stream,\nwhere each resource type is treated as a separate logical stream within the aggregated stream.\nIn effect, it simply combines all of the above separate APIs into a single stream by treating\nrequests and responses for each resource type as a separate sub-stream on the single aggregated\nstream. The RPC service and methods for the aggregated protocol variants are:\n\n-  SotW: AggregatedDiscoveryService.StreamAggregatedResources\n-  Incremental: AggregatedDiscoveryService.DeltaAggregatedResources\n\nFor all of the SotW methods, the request type is :ref:`DiscoveryRequest\n<envoy_api_msg_DiscoveryRequest>` and the response type is :ref:`DiscoveryResponse\n<envoy_api_msg_DiscoveryResponse>`.\n\nFor all of the incremental methods, the request type is :ref:`DeltaDiscoveryRequest\n<envoy_api_msg_DeltaDiscoveryRequest>` and the response type is :ref:`DeltaDiscoveryResponse\n<envoy_api_msg_DeltaDiscoveryResponse>`.\n\nConfiguring Which Variant to Use\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIn the xDS API, the :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` message indicates how to\nobtain resources of a particular type. If the :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>`\ncontains a gRPC :ref:`ApiConfigSource <envoy_api_msg_core.ApiConfigSource>`, it points to an\nupstream cluster for the management server; this will initiate an independent bidirectional gRPC\nstream for each xDS resource type, potentially to distinct management servers. If the\n:ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` contains a :ref:`AggregatedConfigSource\n<envoy_api_msg_core.AggregatedConfigSource>`, it tells the client to use :ref:`ADS\n<xds_protocol_ads>`.\n\nCurrently, the client is expected to be given some local configuration that tells it how to obtain\nthe :ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resources.\n:ref:`Listener <envoy_api_msg_Listener>` resources may include a\n:ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` that indicates how the\n:ref:`RouteConfiguration <envoy_api_msg_RouteConfiguration>` resources are obtained, and\n:ref:`Cluster <envoy_api_msg_Cluster>` resources may include a\n:ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` that indicates how the\n:ref:`ClusterLoadAssignment <envoy_api_msg_ClusterLoadAssignment>` resources are obtained.\n\nClient Configuration\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIn Envoy, the bootstrap file contains two :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>`\nmessages, one indicating how :ref:`Listener <envoy_api_msg_Listener>` resources are obtained and\nanother indicating how :ref:`Cluster <envoy_api_msg_Cluster>` resources are obtained. It also\ncontains a separate :ref:`ApiConfigSource <envoy_api_msg_core.ApiConfigSource>` message indicating\nhow to contact the ADS server, which will be used whenever a :ref:`ConfigSource\n<envoy_api_msg_core.ConfigSource>` message (either in the bootstrap file or in a :ref:`Listener\n<envoy_api_msg_Listener>` or :ref:`Cluster <envoy_api_msg_Cluster>` resource obtained from a\nmanagement server) contains an :ref:`AggregatedConfigSource\n<envoy_api_msg_core.AggregatedConfigSource>` message.\n\nIn a gRPC client that uses xDS, only ADS is supported, and the bootstrap file contains the name of\nthe ADS server, which will be used for all resources. The :ref:`ConfigSource\n<envoy_api_msg_core.ConfigSource>` messages in the :ref:`Listener <envoy_api_msg_Listener>` and\n:ref:`Cluster <envoy_api_msg_Cluster>` resources must contain :ref:`AggregatedConfigSource\n<envoy_api_msg_core.AggregatedConfigSource>` messages.\n\nThe xDS transport Protocol\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTransport API version\n^^^^^^^^^^^^^^^^^^^^^\n\nIn addition the resource type version described above, the xDS wire protocol has a\ntransport version associated with it. This provides type versioning for messages such as\n:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` and :ref:`DiscoveryResponse\n<envoy_api_msg_DiscoveryResponse>`. It is also encoded in the gRPC method name, so a server\ncan determine which version a client is speaking based on which method it calls.\n\nBasic Protocol Overview\n^^^^^^^^^^^^^^^^^^^^^^^\n\nEach xDS stream begins with a :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` from the\nclient, which specifies the list of resources to subscribe to, the type URL corresponding to the\nsubscribed resources, the node identifier, and an optional resource type instance version\nindicating the most recent version of the resource type that the client has already seen (see\n:ref:`ACK/NACK and resource type instance version <xds_ack_nack>` for details).\n\nThe server will then send a :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` containing\nany resources that the client has subscribed to that have changed since the last resource type\ninstance version that the client indicated it has seen. The server may send additional responses\nat any time when the subscribed resources change.\n\nWhenever the client receives a new response, it will send another request indicating whether or\nnot the resources in the response were valid (see\n:ref:`ACK/NACK and resource type instance version <xds_ack_nack>` for details).\n\nOnly the first request on a stream is guaranteed to carry the node identifier.\nThe subsequent discovery requests on the same stream may carry an empty node\nidentifier. This holds true regardless of the acceptance of the discovery\nresponses on the same stream. The node identifier should always be identical if\npresent more than once on the stream. It is sufficient to only check the first\nmessage for the node identifier as a result.\n\n.. _xds_ack_nack:\n\nACK/NACK and resource type instance version\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nEvery xDS resource type has a version string that indicates the version for that resource type.\nWhenever one resource of that type changes, the version is changed.\n\nIn a responses sent by the xDS server, the\n:ref:`version_info<envoy_api_field_DiscoveryResponse.version_info>` field indicates the current\nversion for that resource type. The client then sends another request to the server with the\n:ref:`version_info<envoy_api_field_DiscoveryRequest.version_info>` field indicating the most\nrecent valid version seen by the client. This provides a way for the server to determine when\nit sends a version that the client considers invalid.\n\n(In the :ref:`incremental protocol variants <xds_protocol_delta>`, the resource type instance\nversion is sent by the server in the\n:ref:`system_version_info<envoy_api_field_DeltaDiscoveryResponse.system_version_info>` field.\nHowever, this information is not actually used by the client to communicate which resources are\nvalid, because the incremental API variants have a separate mechanism for that.)\n\nThe resource type instance version is separate for each resource type. When using the aggregated\nprotocol variants, each resource type has its own version even though all resource types are being\nsent on the same stream.\n\nThe resource type is also separate for each xDS server (where an xDS server is identified by a\nunique :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>`). When obtaining resources of a\ngiven type from multiple xDS servers, each xDS server will have a different notion of version.\n\nNote that the version for a resource type is not a property of an individual xDS stream but rather\na property of the resources themselves. If the stream becomes broken and the client creates a new\nstream, the client's initial request on the new stream should indicate the most recent version\nseen by the client on the previous stream.\n\nAn example EDS request might be:\n\n.. code:: yaml\n\n    version_info:\n    node: { id: envoy }\n    resource_names:\n    - foo\n    - bar\n    type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\n    response_nonce:\n\nThe management server may reply either immediately or when the requested\nresources are available with a :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`, e.g.:\n\n.. code:: yaml\n\n    version_info: X\n    resources:\n    - foo ClusterLoadAssignment proto encoding\n    - bar ClusterLoadAssignment proto encoding\n    type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\n    nonce: A\n\nAfter processing the :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`, Envoy will send a new\nrequest on the stream, specifying the last version successfully applied\nand the nonce provided by the management server. The version provides Envoy and the\nmanagement server a shared notion of the currently applied configuration,\nas well as a mechanism to ACK/NACK configuration updates.\n\nACK\n^^^\n\nIf the update was successfully applied, the\n:ref:`version_info <envoy_api_field_DiscoveryRequest.version_info>` will be **X**, as indicated\nin the sequence diagram:\n\n.. figure:: diagrams/simple-ack.svg\n   :alt: Version update after ACK\n\nNACK\n^^^^\n\nIf Envoy had instead rejected configuration\nupdate **X**, it would reply with :ref:`error_detail <envoy_api_field_DiscoveryRequest.error_detail>`\npopulated and its previous version, which in this case was the empty\ninitial version. The :ref:`error_detail <envoy_api_field_DiscoveryRequest.error_detail>` has\nmore details around the exact error message populated in the message field:\n\n.. figure:: diagrams/simple-nack.svg\n   :alt: No version update after NACK\n\nIn the sequence diagrams, the following format is used to abbreviate messages:\n\n- *DiscoveryRequest*: (V=version_info,R=resource_names,N=response_nonce,T=type_url)\n- *DiscoveryResponse*: (V=version_info,R=resources,N=nonce,T=type_url)\n\nAfter a NACK, an API update may succeed at a new version **Y**:\n\n\n.. figure:: diagrams/later-ack.svg\n   :alt: ACK after NACK\n\nACK and NACK semantics summary\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n- The xDS client should ACK or NACK every :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\n  received from the management server.\n- Like all other requests, the nonce from the :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\n  is sent as :ref:`response_nonce <envoy_api_field_DiscoveryRequest.response_nonce>`.\n  As described in :ref:`resource update <xds_protocol_resource_update>` the nonce is\n  used in certain race conditions to disambiguate between ACK and NACK.\n- ACK signifies successful configuration update and contains the\n  :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` from the\n  :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`.\n- NACK signifies unsuccessful configuration update and contains the previous (existing)\n  :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>`.\n- Only the NACK should populate the :ref:`error_detail <envoy_api_field_DiscoveryRequest.error_detail>`.\n\n.. _xds_protocol_resource_update:\n\nWhen to send an update\n^^^^^^^^^^^^^^^^^^^^^^\n\nThe management server should only send updates to the Envoy client when\nthe resources in the :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` have changed. Envoy replies\nto any :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` with a :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` containing the\nACK/NACK immediately after it has been either accepted or rejected. If\nthe management server provides the same set of resources rather than\nwaiting for a change to occur, it will cause needless work on both the client and the management\nserver, which could have a severe performance impact.\n\nWithin a stream, new :ref:`DiscoveryRequests <envoy_api_msg_DiscoveryRequest>` supersede any prior\n:ref:`DiscoveryRequests <envoy_api_msg_DiscoveryRequest>` having the same resource type. This means that\nthe management server only needs to respond to the latest\n:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` on each stream for any given resource type.\n\n.. _xds_protocol_resource_hints:\n\nHow the client specifies what resources to return\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nxDS requests allow the client to specify a set of resource names as a hint to the server about\nwhich resources the client is interested in. In the SotW protocol variants, this is done via the\n:ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` specified in the\n:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>`; in the incremental protocol variants,\nthis is done via the :ref:`resource_names_subscribe\n<envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>` and\n:ref:`resource_names_unsubscribe\n<envoy_api_field_DeltaDiscoveryRequest.resource_names_unsubscribe>` fields in the\n:ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>`.\n\nNormally (see below for exceptions), requests must specify the set of resource names that the\nclient is interested in. The management server must supply the requested resources if they exist.\nThe client will silently ignore any supplied resources that were not explicitly requested. When\nthe client sends a new request that changes the set of resources being requested, the server must\nresend any newly requested resources, even if it previously sent those resources without having\nbeen asked for them and the resources have not changed since that time. If the list of resource\nnames becomes empty, that means that the client is no longer interested in any resources of the\nspecified type.\n\nFor :ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resource\ntypes, there is also a \"wildcard\" mode, which is triggered when the initial request on the stream\nfor that resource type contains no resource names. In this case, the server should use\nsite-specific business logic to determine the full set of resources that the client is interested\nin, typically based on the client's :ref:`node <envoy_api_msg_Core.Node>` identification. Note\nthat once a stream has entered wildcard mode for a given resource type, there is no way to change\nthe stream out of wildcard mode; resource names specified in any subsequent request on the stream\nwill be ignored.\n\nClient Behavior\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nEnvoy will always use wildcard mode for :ref:`Listener <envoy_api_msg_Listener>` and\n:ref:`Cluster <envoy_api_msg_Cluster>` resources. However, other xDS clients (such as gRPC clients\nthat use xDS) may specify explicit resource names for these resource types, for example if they\nonly have a singleton listener and already know its name from some out-of-band configuration.\n\nGrouping Resources into Responses\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIn the incremental protocol variants, the server sends each resource in its own response. This\nmeans that if the server has previously sent 100 resources and only one of them has changed, it\nmay send a response containing only the changed resource; it does not need to resend the 99\nresources that have not changed, and the client must not delete the unchanged resources.\n\nIn the SotW protocol variants, all resource types except for :ref:`Listener\n<envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` are grouped into responses\nin the same way as in the incremental protocol variants. However,\n:ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resource types\nare handled differently: the server must include the complete state of the world, meaning that all\nresources of the relevant type that are needed by the client must be included, even if they did\nnot change since the last response. This means that if the server has previously sent 100\nresources and only one of them has changed, it must resend all 100 of them, even the 99 that were\nnot modified.\n\nNote that all of the protocol variants operate on units of whole named resources. There is\nno mechanism for providing incremental updates of repeated fields within a named resource.\nMost notably, there is currently no mechanism for incrementally updating individual\nendpoints within an EDS response.\n\nDuplicate Resource Names\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nIt is an error for a server to send a single response that contains the same resource name\ntwice. Clients should NACK responses that contain multiple instances of the same resource name.\n\nDeleting Resources\n^^^^^^^^^^^^^^^^^^\n\nIn the incremental proocol variants, the server signals the client that a resource should be\ndeleted via the :ref:`removed_resources <envoy_api_field_DeltaDiscoveryResponse.removed_resources>`\nfield of the response. This tells the client to remove the resource from its local cache.\n\nIn the SotW protocol variants, the criteria for deleting resources is more complex. For\n:ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resource types,\nif a previously seen resource is not present in a new response, that indicates that the resource\nhas been removed, and the client must delete it; a response containing no resources means to delete\nall resources of that type. However, for other resource types, the API provides no mechanism for\nthe server to tell the client that resources have been deleted; instead, deletions are indicated\nimplicitly by parent resources being changed to no longer refer to a child resource. For example,\nwhen the client receives an LDS update removing a :ref:`Listener <envoy_api_msg_Listener>`\nthat was previously pointing to :ref:`RouteConfiguration <envoy_api_msg_RouteConfiguration>` A,\nif no other :ref:`Listener <envoy_api_msg_Listener>` is pointing to :ref:`RouteConfiguration\n<envoy_api_msg_RouteConfiguration>` A, then the client may delete A. For those resource types,\nan empty :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` is effectively a no-op\nfrom the client's perspective.\n\nKnowing When a Requested Resource Does Not Exist\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe SotW protocol variants do not provide any explicit mechanism to determine when a requested\nresource does not exist.\n\nResponses for :ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>`\nresource types must include all resources requested by the client. However, it may not be possible\nfor the client to know that a resource does not exist based solely on its absence in a response,\nbecause the delivery of the updates is eventually consistent: if the client initially sends a\nrequest for resource A, then sends a request for resources A and B, and then sees a response\ncontaining only resource A, the client cannot conclude that resource B does not exist, because\nthe response may have been sent on the basis of the first request, before the server saw the\nsecond request.\n\nFor other resource types, because each resource can be sent in its own response, there is no way\nto know from the next response whether the newly requested resource exists, because the next\nresponse could be an unrelated update for another resource that had already been subscribed to\npreviously.\n\nAs a result, clients are expected to use a timeout (recommended duration is 15 seconds) after\nsending a request for a new resource, after which they will consider the requested resource to\nnot exist if they have not received the resource. In Envoy, this is done for\n:ref:`RouteConfiguration <envoy_api_msg_RouteConfiguration>` and :ref:`ClusterLoadAssignment\n<envoy_api_msg_ClusterLoadAssignment>` resources during :ref:`resource warming\n<xds_protocol_resource_warming>`.\n\nNote that this timeout is not strictly necessary when using wildcard mode for :ref:`Listener\n<envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resource types, because\nin that case every response will contain all existing resources that are relevant to the\nclient, so the client can know that a resource does not exist by its absence in the next\nresponse it sees. However, using a timeout is still recommended in this case, since it protects\nagainst the case where the management server fails to send a response in a timely manner.\n\nNote that even if a requested resource does not exist at the moment when the client requests it,\nthat resource could be created at any time. Management servers must remember the set of resources\nbeing requested by the client, and if one of those resources springs into existence later, the\nserver must send an update to the client informing it of the new resource. Clients that initially\nsee a resource that does not exist must be prepared for the resource to be created at any time.\n\nUnsubscribing From Resources\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIn the incremental protocol variants, resources can be unsubscribed to via the\n:ref:`resource_names_unsubscribe\n<envoy_api_field_DeltaDiscoveryRequest.resource_names_unsubscribe>` field.\n\nIn the SotW protocol variants, each request must contain the full list of resource names being\nsubscribed to in the :ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` field,\nso unsubscribing to a set of resources is done by sending a new request containing all resource\nnames that are still being subscribed to but not containing the resource names being unsubscribed\nto. For example, if the client had previously been subscribed to resources A and B but wishes to\nunsubscribe from B, it must send a new request containing only resource A.\n\nNote that for :ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>`\nresource types where the stream is in \"wildcard\" mode (see :ref:`How the client specifies what\nresources to return <xds_protocol_resource_hints>` for details), the set of resources being\nsubscribed to is determined by the server instead of the client, so there is no mechanism\nfor the client to unsubscribe from resources.\n\nRequesting Multiple Resources on a Single Stream\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nFor EDS/RDS, Envoy may either generate a distinct stream for each\nresource of a given type (e.g. if each :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` has its own\ndistinct upstream cluster for a management server), or may combine\ntogether multiple resource requests for a given resource type when they\nare destined for the same management server. While this is left to\nimplementation specifics, management servers should be capable of\nhandling one or more :ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` for a given resource type in\neach request. Both sequence diagrams below are valid for fetching two\nEDS resources `{foo, bar}`:\n\n|Multiple EDS requests on the same stream| |Multiple EDS requests on\ndistinct streams|\n\nResource updates\n^^^^^^^^^^^^^^^^\n\nAs discussed above, Envoy may update the list of :ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` it\npresents to the management server in each :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` that\nACK/NACKs a specific :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`. In addition, Envoy may later\nissue additional :ref:`DiscoveryRequests <envoy_api_msg_DiscoveryRequest>` at a given :ref:`version_info <envoy_api_field_DiscoveryRequest.version_info>` to\nupdate the management server with new resource hints. For example, if\nEnvoy is at EDS version **X** and knows only about cluster ``foo``, but\nthen receives a CDS update and learns about ``bar`` in addition, it may\nissue an additional :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` for **X** with `{foo,bar}` as\n`resource_names`.\n\n.. figure:: diagrams/cds-eds-resources.svg\n   :alt: CDS response leads to EDS resource hint update\n\nThere is a race condition that may arise here; if after a resource hint\nupdate is issued by Envoy at **X**, but before the management server\nprocesses the update it replies with a new version **Y**, the resource\nhint update may be interpreted as a rejection of **Y** by presenting an\n**X** :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>`. To avoid this, the management server provides a\n``nonce`` that Envoy uses to indicate the specific :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\neach :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` corresponds to:\n\n.. figure:: diagrams/update-race.svg\n   :alt: EDS update race motivates nonces\n\nThe management server should not send a :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` for any\n:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` that has a stale nonce. A nonce becomes stale\nfollowing a newer nonce being presented to Envoy in a\n:ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`. A management server does not need to send an\nupdate until it determines a new version is available. Earlier requests\nat a version then also become stale. It may process multiple\n:ref:`DiscoveryRequests <envoy_api_msg_DiscoveryRequest>` at a version until a new version is ready.\n\n.. figure:: diagrams/stale-requests.svg\n   :alt: Requests become stale\n\nAn implication of the above resource update sequencing is that Envoy\ndoes not expect a :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` for every :ref:`DiscoveryRequests <envoy_api_msg_DiscoveryRequest>`\nit issues.\n\n.. _xds_protocol_resource_warming:\n\nResource warming\n~~~~~~~~~~~~~~~~\n\n:ref:`Clusters <arch_overview_cluster_warming>` and\n:ref:`Listeners <config_listeners_lds>`\ngo through warming before they can serve requests. This process\nhappens both during :ref:`Envoy initialization <arch_overview_initialization>`\nand when the `Cluster` or `Listener` is updated. Warming of\n`Cluster` is completed only when a `ClusterLoadAssignment` response\nis supplied by management server. Similarly, warming of `Listener` is\ncompleted only when a `RouteConfiguration` is supplied by management\nserver if the listener refers to an RDS configuration. Management server\nis expected to provide the EDS/RDS updates during warming. If management\nserver does not provide EDS/RDS responses, Envoy will not initialize\nitself during the initialization phase and the updates sent via CDS/LDS\nwill not take effect until EDS/RDS responses are supplied.\n\n.. _xds_protocol_eventual_consistency_considerations:\n\nEventual consistency considerations\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSince Envoy's xDS APIs are eventually consistent, traffic may drop\nbriefly during updates. For example, if only cluster **X** is known via\nCDS/EDS, a `RouteConfiguration` references cluster **X** and is then\nadjusted to cluster **Y** just before the CDS/EDS update providing\n**Y**, traffic will be blackholed until **Y** is known about by the\nEnvoy instance.\n\nFor some applications, a temporary drop of traffic is acceptable,\nretries at the client or by other Envoy sidecars will hide this drop.\nFor other scenarios where drop can't be tolerated, traffic drop could\nhave been avoided by providing a CDS/EDS update with both **X** and\n**Y**, then the RDS update repointing from **X** to **Y** and then a\nCDS/EDS update dropping **X**.\n\nIn general, to avoid traffic drop, sequencing of updates should follow a\nmake before break model, wherein:\n\n- CDS updates (if any) must always be pushed first.\n- EDS updates (if any) must arrive after CDS updates for the respective clusters.\n- LDS updates must arrive after corresponding CDS/EDS updates.\n- RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates.\n- VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates.\n- Stale CDS clusters and related EDS endpoints (ones no longer being referenced) can then be removed.\n\nxDS updates can be pushed independently if no new\nclusters/routes/listeners are added or if it's acceptable to temporarily\ndrop traffic during updates. Note that in case of LDS updates, the\nlisteners will be warmed before they receive traffic, i.e. the dependent\nroutes are fetched through RDS if configured. Clusters are warmed when\nadding/removing/updating clusters. On the other hand, routes are not\nwarmed, i.e., the management plane must ensure that clusters referenced\nby a route are in place, before pushing the updates for a route.\n\n.. _xds_protocol_ads:\n\nAggregated Discovery Service\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIt's challenging to provide the above guarantees on sequencing to avoid\ntraffic drop when management servers are distributed. ADS allow a single\nmanagement server, via a single gRPC stream, to deliver all API updates.\nThis provides the ability to carefully sequence updates to avoid traffic\ndrop. With ADS, a single stream is used with multiple independent\n:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>`/:ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` sequences multiplexed via the\ntype URL. For any given type URL, the above sequencing of\n:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` and :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` messages applies. An\nexample update sequence might look like:\n\n.. figure:: diagrams/ads.svg\n   :alt: EDS/CDS multiplexed on an ADS stream\n\nA single ADS stream is available per Envoy instance.\n\nAn example minimal ``bootstrap.yaml`` fragment for ADS configuration is:\n\n.. code:: yaml\n\n    node:\n      id: <node identifier>\n    dynamic_resources:\n      cds_config: {ads: {}}\n      lds_config: {ads: {}}\n      ads_config:\n        api_type: GRPC\n        grpc_services:\n          envoy_grpc:\n            cluster_name: ads_cluster\n    static_resources:\n      clusters:\n      - name: ads_cluster\n        connect_timeout: { seconds: 5 }\n        type: STATIC\n        hosts:\n        - socket_address:\n            address: <ADS management server IP address>\n            port_value: <ADS management server port>\n        lb_policy: ROUND_ROBIN\n        # It is recommended to configure either HTTP/2 or TCP keepalives in order to detect\n        # connection issues, and allow Envoy to reconnect. TCP keepalive is less expensive, but\n        # may be inadequate if there is a TCP proxy between Envoy and the management server.\n        # HTTP/2 keepalive is slightly more expensive, but may detect issues through more types\n        # of intermediate proxies.\n        http2_protocol_options:\n          connection_keepalive:\n            interval: 30s\n            timeout: 5s\n        upstream_connection_options:\n          tcp_keepalive:\n            ...\n    admin:\n      ...\n\n.. _xds_protocol_delta:\n\nIncremental xDS\n~~~~~~~~~~~~~~~\n\nIncremental xDS is a separate xDS endpoint that:\n\n-  Allows the protocol to communicate on the wire in terms of\n   resource/resource name deltas (\"Delta xDS\"). This supports the goal\n   of scalability of xDS resources. Rather than deliver all 100k\n   clusters when a single cluster is modified, the management server\n   only needs to deliver the single cluster that changed.\n-  Allows the Envoy to on-demand / lazily request additional resources.\n   For example, requesting a cluster only when a request for that\n   cluster arrives.\n\nAn Incremental xDS session is always in the context of a gRPC\nbidirectional stream. This allows the xDS server to keep track of the\nstate of xDS clients connected to it. There is no REST version of\nIncremental xDS yet.\n\nIn the delta xDS wire protocol, the nonce field is required and used to\npair a :ref:`DeltaDiscoveryResponse <envoy_api_msg_DeltaDiscoveryResponse>`\nto a :ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>`\nACK or NACK. Optionally, a response message level :ref:`system_version_info <envoy_api_field_DeltaDiscoveryResponse.system_version_info>`\nis present for debugging purposes only.\n\n:ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>` can be sent in the following situations:\n\n- Initial message in a xDS bidirectional gRPC stream.\n- As an ACK or NACK response to a previous :ref:`DeltaDiscoveryResponse <envoy_api_msg_DeltaDiscoveryResponse>`. In this case the :ref:`response_nonce <envoy_api_field_DiscoveryRequest.response_nonce>` is set to the nonce value in the Response. ACK or NACK is determined by the absence or presence of :ref:`error_detail <envoy_api_field_DiscoveryRequest.error_detail>`.\n- Spontaneous :ref:`DeltaDiscoveryRequests <envoy_api_msg_DeltaDiscoveryRequest>` from the client. This can be done to dynamically add or remove elements from the tracked :ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` set. In this case :ref:`response_nonce <envoy_api_field_DiscoveryRequest.response_nonce>` must be omitted.\n\nIn this first example the client connects and receives a first update\nthat it ACKs. The second update fails and the client NACKs the update.\nLater the xDS client spontaneously requests the \"wc\" resource.\n\n.. figure:: diagrams/incremental.svg\n   :alt: Incremental session example\n\nOn reconnect the Incremental xDS client may tell the server of its known\nresources to avoid resending them over the network. Because no state is\nassumed to be preserved from the previous stream, the reconnecting\nclient must provide the server with all resource names it is interested\nin.\n\n.. figure:: diagrams/incremental-reconnect.svg\n   :alt: Incremental reconnect example\n\nResource names\n^^^^^^^^^^^^^^\n\nResources are identified by a resource name or an alias. Aliases of a\nresource, if present, can be identified by the alias field in the\nresource of a :ref:`DeltaDiscoveryResponse <envoy_api_msg_DeltaDiscoveryResponse>`. The resource name will be\nreturned in the name field in the resource of a\n:ref:`DeltaDiscoveryResponse <envoy_api_msg_DeltaDiscoveryResponse>`.\n\n.. _xds_protocol_delta_subscribe:\n\nSubscribing to Resources\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe client can send either an alias or the name of a resource in the\n:ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>` field of a :ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>` in\norder to subscribe to a resource. Both the names and aliases of\nresources should be checked in order to determine whether the entity in\nquestion has been subscribed to.\n\nA :ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>` field may contain resource names that the\nserver believes the client is already subscribed to, and furthermore has\nthe most recent versions of. However, the server *must* still provide\nthose resources in the response; due to implementation details hidden\nfrom the server, the client may have \"forgotten\" those resources despite\napparently remaining subscribed.\n\n.. _xds_protocol_unsubscribe:\n\nUnsubscribing from Resources\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWhen a client loses interest in some resources, it will indicate that\nwith the :ref:`resource_names_unsubscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_unsubscribe>` field of a\n:ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>`. As with :ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>`, these\nmay be resource names or aliases.\n\nA :ref:`resource_names_unsubscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_unsubscribe>` field may contain superfluous resource\nnames, which the server thought the client was already not subscribed\nto. The server must cleanly process such a request; it can simply ignore\nthese phantom unsubscriptions.\n\nKnowing When a Requested Resource Does Not Exist\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWhen a resource subscribed to by a client does not exist, the server will send a :ref:`Resource\n<envoy_api_msg_Resource>` whose :ref:`name <envoy_api_field_Resource.name>` field matches the\nname that the client subscribed to and whose :ref:`resource <envoy_api_field_Resource.resource>`\nfield is unset. This allows the client to quickly determine when a resource does not exist without\nwaiting for a timeout, as would be done in the SotW protocol variants. However, clients are still\nencouraged to use a timeout to protect against the case where the management server fails to send\na response in a timely manner.\n\nREST-JSON polling subscriptions\n-------------------------------\n\nSynchronous (long) polling via REST endpoints is also available for the\nxDS singleton APIs. The above sequencing of messages is similar, except\nno persistent stream is maintained to the management server. It is\nexpected that there is only a single outstanding request at any point in\ntime, and as a result the response nonce is optional in REST-JSON. The\n`JSON canonical transform of\nproto3 <https://developers.google.com/protocol-buffers/docs/proto3#json>`__\nis used to encode :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` and :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\nmessages. ADS is not available for REST-JSON polling.\n\nWhen the poll period is set to a small value, with the intention of long\npolling, then there is also a requirement to avoid sending a\n:ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` unless a change to the underlying resources has\noccurred via a :ref:`resource update <xds_protocol_resource_update>`.\n\n.. |Multiple EDS requests on the same stream| image:: diagrams/eds-same-stream.svg\n.. |Multiple EDS requests on distinct streams| image:: diagrams/eds-distinct-stream.svg\n"
  },
  {
    "path": "bazel/BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\", \"cc_proto_library\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\nload(\"//bazel:envoy_internal.bzl\", \"envoy_select_force_libcpp\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nexports_files([\n    \"gen_sh_test_runner.sh\",\n    \"sh_test_wrapper.sh\",\n    \"test_for_benchmark_wrapper.sh\",\n])\n\ngenrule(\n    name = \"gnu_build_id\",\n    outs = [\"gnu_build_id.ldscript\"],\n    cmd = \"\"\"\n      echo --build-id=0x$$(\n          grep BUILD_SCM_REVISION bazel-out/volatile-status.txt \\\\\n        | sed 's/^BUILD_SCM_REVISION //') \\\\\n        > $@\n    \"\"\",\n    # Undocumented attr to depend on workspace status files.\n    # https://github.com/bazelbuild/bazel/issues/4942\n    stamp = 1,\n)\n\n# For macOS, which doesn't have GNU ld's `--build-id` flag.\ngenrule(\n    name = \"raw_build_id\",\n    outs = [\"raw_build_id.ldscript\"],\n    cmd = \"\"\"\n      grep BUILD_SCM_REVISION bazel-out/volatile-status.txt \\\\\n    | sed 's/^BUILD_SCM_REVISION //' \\\\\n    | tr -d '\\\\n' \\\\\n    > $@\n    \"\"\",\n    # Undocumented attr to depend on workspace status files.\n    # https://github.com/bazelbuild/bazel/issues/4942\n    stamp = 1,\n)\n\n# A target to optionally link C++ standard library dynamically in sanitizer runs.\n# TSAN doesn't support libc/libstdc++ static linking per doc:\n#   http://releases.llvm.org/8.0.1/tools/clang/docs/ThreadSanitizer.html\ncc_library(\n    name = \"dynamic_stdlib\",\n    linkopts = envoy_select_force_libcpp(\n        [\"-lc++\"],\n        [\"-lstdc++\"],\n    ),\n)\n\ncc_library(\n    name = \"static_stdlib\",\n    linkopts = select({\n        \"//bazel:linux\": [\"-static-libgcc\"],\n        \"//conditions:default\": [],\n    }),\n)\n\nconfig_setting(\n    name = \"windows_opt_build\",\n    values = {\n        \"cpu\": \"x64_windows\",\n        \"compilation_mode\": \"opt\",\n    },\n)\n\nconfig_setting(\n    name = \"windows_dbg_build\",\n    values = {\n        \"cpu\": \"x64_windows\",\n        \"compilation_mode\": \"dbg\",\n    },\n)\n\nconfig_setting(\n    name = \"windows_fastbuild_build\",\n    values = {\n        \"cpu\": \"x64_windows\",\n        \"compilation_mode\": \"fastbuild\",\n    },\n)\n\nconfig_setting(\n    name = \"opt_build\",\n    values = {\"compilation_mode\": \"opt\"},\n)\n\nconfig_setting(\n    name = \"fastbuild_build\",\n    values = {\"compilation_mode\": \"fastbuild\"},\n)\n\nconfig_setting(\n    name = \"dbg_build\",\n    values = {\"compilation_mode\": \"dbg\"},\n)\n\nconfig_setting(\n    name = \"no_debug_info\",\n    values = {\"define\": \"no_debug_info=1\"},\n)\n\nconfig_setting(\n    name = \"asan_build\",\n    values = {\"define\": \"ENVOY_CONFIG_ASAN=1\"},\n)\n\nconfig_setting(\n    name = \"tsan_build\",\n    values = {\"define\": \"ENVOY_CONFIG_TSAN=1\"},\n)\n\nconfig_setting(\n    name = \"msan_build\",\n    values = {\"define\": \"ENVOY_CONFIG_MSAN=1\"},\n)\n\nconfig_setting(\n    name = \"coverage_build\",\n    values = {\"define\": \"ENVOY_CONFIG_COVERAGE=1\"},\n)\n\nconfig_setting(\n    name = \"clang_build\",\n    flag_values = {\n        \"@bazel_tools//tools/cpp:compiler\": \"clang\",\n    },\n)\n\nconfig_setting(\n    name = \"gcc_build\",\n    flag_values = {\n        \"@bazel_tools//tools/cpp:compiler\": \"gcc\",\n    },\n)\n\nconfig_setting(\n    name = \"dynamic_link_tests\",\n    values = {\n        \"define\": \"dynamic_link_tests=true\",\n    },\n)\n\nconfig_setting(\n    name = \"disable_tcmalloc\",\n    values = {\"define\": \"tcmalloc=disabled\"},\n)\n\nconfig_setting(\n    name = \"debug_tcmalloc\",\n    values = {\"define\": \"tcmalloc=debug\"},\n)\n\nconfig_setting(\n    name = \"gperftools_tcmalloc\",\n    values = {\"define\": \"tcmalloc=gperftools\"},\n)\n\n# As select() can't be nested we need these specialized settings to avoid ambiguity when choosing\n# tcmalloc's flavor for x86_64 builds.\nconfig_setting(\n    name = \"disable_tcmalloc_on_linux_x86_64\",\n    values = {\n        \"define\": \"tcmalloc=disabled\",\n        \"cpu\": \"k8\",\n    },\n)\n\nconfig_setting(\n    name = \"gperftools_tcmalloc_on_linux_x86_64\",\n    values = {\n        \"define\": \"tcmalloc=gperftools\",\n        \"cpu\": \"k8\",\n    },\n)\n\nconfig_setting(\n    name = \"debug_tcmalloc_on_linux_x86_64\",\n    values = {\n        \"define\": \"tcmalloc=debug\",\n        \"cpu\": \"k8\",\n    },\n)\n\nconfig_setting(\n    name = \"disable_signal_trace\",\n    values = {\"define\": \"signal_trace=disabled\"},\n)\n\nconfig_setting(\n    name = \"disable_object_dump_on_signal_trace\",\n    values = {\"define\": \"object_dump_on_signal_trace=disabled\"},\n)\n\nconfig_setting(\n    name = \"disable_deprecated_features\",\n    values = {\"define\": \"deprecated_features=disabled\"},\n)\n\nconfig_setting(\n    name = \"disable_hot_restart\",\n    values = {\"define\": \"hot_restart=disabled\"},\n)\n\n# Used to avoid conflicting selects https://github.com/bazelbuild/bazel/issues/8323\nalias(\n    name = \"disable_hot_restart_or_apple\",\n    actual = select({\n        \":apple\": \":apple\",\n        \"//conditions:default\": \":disable_hot_restart\",\n    }),\n)\n\nconfig_setting(\n    name = \"disable_google_grpc\",\n    values = {\"define\": \"google_grpc=disabled\"},\n)\n\nconfig_setting(\n    name = \"enable_path_normalization_by_default\",\n    values = {\"define\": \"path_normalization_by_default=true\"},\n)\n\nconfig_setting(\n    name = \"enable_new_codecs_in_integration_tests\",\n    values = {\"define\": \"use_new_codecs_in_integration_tests=true\"},\n)\n\ncc_proto_library(\n    name = \"grpc_health_proto\",\n    deps = [\"@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only\"],\n)\n\nconfig_setting(\n    name = \"enable_exported_symbols\",\n    values = {\"define\": \"exported_symbols=enabled\"},\n)\n\nconfig_setting(\n    name = \"enable_log_debug_assert_in_release\",\n    values = {\"define\": \"log_debug_assert_in_release=enabled\"},\n)\n\nconfig_setting(\n    name = \"disable_known_issue_asserts\",\n    values = {\"define\": \"disable_known_issue_asserts=true\"},\n)\n\nconfig_setting(\n    name = \"enable_perf_annotation\",\n    values = {\"define\": \"perf_annotation=enabled\"},\n)\n\nconfig_setting(\n    name = \"force_libcpp\",\n    values = {\"define\": \"force_libcpp=enabled\"},\n)\n\nconfig_setting(\n    name = \"boringssl_fips\",\n    constraint_values = [\n        \"@bazel_tools//platforms:linux\",\n        \"@bazel_tools//platforms:x86_64\",\n    ],\n    values = {\"define\": \"boringssl=fips\"},\n)\n\nconfig_setting(\n    name = \"boringssl_disabled\",\n    values = {\"define\": \"boringssl=disabled\"},\n)\n\nconfig_setting(\n    name = \"zlib_ng\",\n    constraint_values = [\n        \"@bazel_tools//platforms:linux\",\n    ],\n    values = {\"define\": \"zlib=ng\"},\n)\n\nconfig_setting(\n    name = \"enable_quiche\",\n    values = {\"define\": \"quiche=enabled\"},\n)\n\n# TODO: consider converting WAVM VM support to an extension (https://github.com/envoyproxy/envoy/issues/12574)\nconfig_setting(\n    name = \"wasm_all\",\n    values = {\"define\": \"wasm=enabled\"},\n)\n\nconfig_setting(\n    name = \"wasm_wavm\",\n    values = {\"define\": \"wasm=wavm\"},\n)\n\nconfig_setting(\n    name = \"wasm_v8\",\n    values = {\"define\": \"wasm=v8\"},\n)\n\nconfig_setting(\n    name = \"wasm_none\",\n    values = {\"define\": \"wasm=disabled\"},\n)\n\n# Alias pointing to the selected version of BoringSSL:\n# - BoringSSL FIPS from @boringssl_fips//:ssl,\n# - non-FIPS BoringSSL from @boringssl//:ssl.\nalias(\n    name = \"boringssl\",\n    actual = select({\n        \"//bazel:boringssl_fips\": \"@boringssl_fips//:ssl\",\n        \"//conditions:default\": \"@boringssl//:ssl\",\n    }),\n)\n\nconfig_setting(\n    name = \"linux_x86_64\",\n    values = {\"cpu\": \"k8\"},\n)\n\nconfig_setting(\n    name = \"linux_aarch64\",\n    values = {\"cpu\": \"aarch64\"},\n)\n\nconfig_setting(\n    name = \"linux_ppc\",\n    values = {\"cpu\": \"ppc\"},\n)\n\nconfig_setting(\n    name = \"linux_s390x\",\n    values = {\"cpu\": \"s390x\"},\n)\n\nconfig_setting(\n    name = \"linux_mips64\",\n    values = {\"cpu\": \"mips64\"},\n)\n\nconfig_setting(\n    name = \"windows_x86_64\",\n    values = {\"cpu\": \"x64_windows\"},\n)\n\n# Configuration settings to make doing selects for Apple vs non-Apple platforms\n# easier. More details: https://docs.bazel.build/versions/master/configurable-attributes.html#config_settingaliasing\nconfig_setting(\n    name = \"darwin\",\n    values = {\"cpu\": \"darwin\"},\n)\n\nconfig_setting(\n    name = \"darwin_x86_64\",\n    values = {\"cpu\": \"darwin_x86_64\"},\n)\n\nconfig_setting(\n    name = \"ios_i386\",\n    values = {\"cpu\": \"ios_i386\"},\n)\n\nconfig_setting(\n    name = \"ios_x86_64\",\n    values = {\"cpu\": \"ios_x86_64\"},\n)\n\nconfig_setting(\n    name = \"ios_armv7\",\n    values = {\"cpu\": \"ios_armv7\"},\n)\n\nconfig_setting(\n    name = \"ios_armv7s\",\n    values = {\"cpu\": \"ios_armv7s\"},\n)\n\nconfig_setting(\n    name = \"ios_arm64\",\n    values = {\"cpu\": \"ios_arm64\"},\n)\n\nconfig_setting(\n    name = \"ios_arm64e\",\n    values = {\"cpu\": \"ios_arm64e\"},\n)\n\nconfig_setting(\n    name = \"manual_stamp\",\n    values = {\"define\": \"manual_stamp=manual_stamp\"},\n)\n\nconfig_setting(\n    name = \"android_logger\",\n    values = {\"define\": \"logger=android\"},\n)\n\nconfig_setting(\n    name = \"libfuzzer_coverage\",\n    define_values = {\n        \"FUZZING_ENGINE\": \"libfuzzer\",\n        \"ENVOY_CONFIG_COVERAGE\": \"1\",\n    },\n)\n\nconfig_setting(\n    name = \"libfuzzer\",\n    values = {\"define\": \"FUZZING_ENGINE=libfuzzer\"},\n)\n\nalias(\n    name = \"apple\",\n    actual = select(\n        {\n            \":darwin\": \":darwin\",\n            \":darwin_x86_64\": \":darwin_x86_64\",\n            \":ios_arm64\": \":ios_arm64\",\n            \":ios_arm64e\": \":ios_arm64e\",\n            \":ios_armv7\": \":ios_armv7\",\n            \":ios_armv7s\": \":ios_armv7s\",\n            \":ios_i386\": \":ios_i386\",\n            \":ios_x86_64\": \":ios_x86_64\",\n            # If we're not on an apple platform return a value that will never match in the select() statement calling this\n            # since it would have already been matched above.\n            \"//conditions:default\": \":darwin\",\n        },\n    ),\n)\n\nalias(\n    name = \"linux\",\n    actual = select(\n        {\n            \":linux_x86_64\": \":linux_x86_64\",\n            \":linux_aarch64\": \":linux_aarch64\",\n            \":linux_ppc\": \":linux_ppc\",\n            \":linux_s390x\": \"linux_s390x\",\n            \":linux_mips64\": \":linux_mips64\",\n            # If we're not on an linux platform return a value that will never match in the select() statement calling this\n            # since it would have already been matched above.\n            \"//conditions:default\": \":linux_x86_64\",\n        },\n    ),\n)\n\nalias(\n    name = \"x86\",\n    actual = select(\n        {\n            \":darwin_x86_64\": \":darwin_x86_64\",\n            \":ios_x86_64\": \"ios_x86_64\",\n            \"linux_x86_64\": \"linux_x86_64\",\n            \"windows_x86_64\": \"windows_x86_64\",\n            # If we're not on an x86 platform return a value that will never match in the select() statement calling this since it would have already been matched above.\n            \"//conditions:default\": \":darwin_x86_64\",\n        },\n    ),\n)\n\nalias(\n    name = \"remote_jdk11\",\n    actual = \"@bazel_tools//tools/jdk:remote_jdk11\",\n)\n"
  },
  {
    "path": "bazel/DEVELOPER.md",
    "content": "# Developer guide for writing Envoy Bazel rules\n\nWhen adding or maintaining Envoy binary, library and test targets, it's\nnecessary to write or modify Bazel `BUILD` files. In general, each directory has\na `BUILD` file covering the source files contained immediately in the directory.\n\nSome guidelines for defining new targets using the [custom Envoy build\nrules](../bazel/envoy_build_system.bzl) are provided below. The [Bazel BUILD\nEncyclopedia](https://bazel.build/versions/master/docs/be/overview.html)\nprovides further details regarding the underlying rules.\n\n## Style guide\n\nThe [BUILD file style\nguide](https://bazel.build/versions/master/docs/skylark/build-style.html) is the\ncanonical style reference. The\n[buildifier](https://github.com/bazelbuild/buildifier) tool automatically\nenforces these guidelines. In addition, within the `BUILD` file, targets should\nbe sorted alphabetically by their `name` attribute.\n\n## Adding files to the Envoy build\n\nAll modules that make up the Envoy binary are statically linked at compile time.\nMany of the modules within Envoy have a pure virtual interface living in\n[`include/envoy`](../include/envoy), implementation sources in\n[`source`](../source), mocks in [`test/mocks`](../test/mocks) and\nunit/integration tests in [`test`](../test). The relevant `BUILD` files will\nrequire updating or to be added in these locations as you extend Envoy.\n\nAs an example, consider adding the following interface in `include/envoy/foo/bar.h`:\n\n```c++\n#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/foo/baz.h\"\n\nclass Bar {\npublic:\n  virtual ~Bar() = default;\n\n  virtual void someThing() PURE;\n  ...\n```\n\nThis would require the addition to `include/envoy/foo/BUILD` of the following target:\n\n```python\nenvoy_cc_library(\n    name = \"bar_interface\",\n    hdrs = [\"bar.h\"],\n    deps = [\n        \":baz_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n```\n\nThis declares a new target `bar_interface`, where the convention is that pure\nvirtual interfaces have their targets suffixed with `_interface`. The header\n`bar.h` is exported to other targets that depend on\n`//include/envoy/foo:bar_interface`. The interface target itself depends on\n`baz_interface` (in the same directory, hence the relative Bazel label) and\n`buffer_interface`.\n\nIn general, any header included via `#include` in a file belonging to the union\nof the `hdrs` and `srcs` lists for a Bazel target X should appear directly in\nthe exported `hdrs` list for some target Y listed in the `deps` of X.\n\nContinuing the above example, the implementation of `Bar` might take place in\n`source/common/foo/bar_impl.h`, e.g.\n\n```c++\n#pragma once\n\n#include \"envoy/foo/bar.h\"\n\nclass BarImpl : public Bar {\n...\n```\n\nand `source/common/foo/bar_impl.cc`:\n\n```c++\n#include \"common/foo/bar_impl.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/foo/bar_internal.h\"\n#include \"common/foo/baz_impl.h\"\n...\n```\n\nThe corresponding target to be added to `source/common/foo/BUILD` would be:\n\n```python\nenvoy_cc_library(\n    name = \"bar_lib\",\n    srcs = [\n        \"bar_impl.cc\",\n        \"bar_internal.h\",\n    ],\n    hdrs = [\"bar_impl.h\"],\n    deps = [\n        \":baz_lib\",\n        \"//include/envoy/foo:bar_interface\",\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n```\n\nBy convention, Bazel targets for internal implementation libraries are suffixed\nwith `_lib`.\n\nSimilar to the above, a test mock target might be declared for `test/mocks/foo/mocks.h` in\n`test/mocks/foo/BUILD` with:\n\n```python\nenvoy_cc_mock(\n    name = \"foo_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/foo:bar_interface\",\n        ...\n    ],\n)\n```\n\nTypically, mocks are provided for all interfaces in a directory in a single\n`mocks.{cc,h}` and corresponding `_mocks` Bazel target. There are some\nexceptions, such as [test/mocks/upstream/BUILD](../test/mocks/upstream/BUILD),\nwhere more granular mock targets are defined.\n\nUnit tests for `BarImpl` would be written in `test/common/foo/bar_impl_test.cc`\nand a target added to `test/common/foo/BUILD`:\n\n```python\nenvoy_cc_test(\n    name = \"bar_impl_test\",\n    srcs = [\"bar_impl_test.cc\"],\n    deps = [\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//source/common/foo:bar_lib\",\n        ...\n    ],\n)\n```\n\n## Binary targets\n\nNew binary targets, for example tools that make use of some Envoy libraries, can be added\nwith the `envoy_cc_binary` rule, e.g. for a new `tools/hello/world.cc` that depends on\n`bar_lib`, we might have in `tools/hello/BUILD`:\n\n```python\nenvoy_cc_binary(\n    name = \"world\",\n    srcs = [\"world.cc\"],\n    deps = [\n        \"//source/common/foo:bar_lib\",\n    ],\n)\n```\n\n## Filter linking\n\nFilters are registered via static initializers at early runtime by modules in\n[`source/server/config`](../source/server/config). These require the `alwayslink\n= 1` attribute to be set in the corresponding `envoy_cc_library` target to\nensure they are correctly linked. See\n[`source/server/config/http/BUILD`](../source/server/config/http/BUILD) for\nexamples.\n\n## Tests with environment dependencies\n\nSome tests depends on read-only data files. In general, these can be specified by adding a\n`data = [\"some_file.csv\", ...],` attribute to the `envoy_cc_test` target, e.g.\n\n```python\nenvoy_cc_test(\n    name = \"bar_impl_test\",\n    srcs = [\"bar_impl_test.cc\"],\n    data = [\"some_file.csv\"],\n    deps = [\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//source/common/foo:bar_lib\",\n        ...\n    ],\n)\n```\n\nA [glob\nfunction](https://bazel.build/versions/master/docs/be/functions.html#glob) is\navailable for simple pattern matching. Within a test, the read-only data dependencies\ncan be accessed via the\n[`TestEnvironment::runfilesPath()`](../test/test_common/environment.h) method.\n\nA writable path is provided for test temporary files by\n[`TestEnvironment::temporaryDirectory()`](../test/test_common/environment.h).\n\nIntegration tests might rely on JSON files that require paths for writable\ntemporary files and paths for file-based Unix Domain Sockets to be specified in\nthe JSON. Jinja-style `{{ test_tmpdir }}` and `{{ test_udsdir }}` macros can be used as\nplaceholders, with the substituted JSON files made available in\n[`TestEnvironment::temporaryDirectory()`](../test/test_common/environment.h) by\nthe `envoy_cc_test_with_json` rule, e.g.\n\n```python\nenvoy_cc_test_with_json(\n    name = \"bar_integration_test\",\n    srcs = [\"bar_integration_test.cc\"],\n    jsons = [\"//test/config/integration:server.json\"],\n    deps = [\n        \"//source/server:server_lib\",\n        ...\n    ],\n)\n```\n\nIn general, the `setup_cmds` attribute can be used to declare a setup shell\nscript that executes in the [test\nenvironment](https://bazel.build/versions/master/docs/test-encyclopedia.html#initial-conditions)\nprior to the test, see [`bazel/envoy_build_system.bzl`](envoy_build_system.bzl)\nfor further details.\n"
  },
  {
    "path": "bazel/EXTERNAL_DEPS.md",
    "content": "# Choosing tarballs\n\nWhere the dependency maintainer provides a tarball, prefer that over the\nautomatically generated Github tarball. Github generated tarball SHA256\nvalues can change when Github change their tar/gzip libraries breaking\nbuilds. Maintainer provided tarballs are more stable and the maintainer\ncan provide the SHA256.\n\n# Adding external dependencies to Envoy (C++)\n\n## Native Bazel\n\nThis is the preferred style of adding dependencies that use Bazel for their\nbuild process.\n\n1. Define a new Bazel repository in [`bazel/repositories.bzl`](repositories.bzl),\n   in the `envoy_dependencies()` function.\n2. Reference your new external dependency in some `envoy_cc_library` via the\n   `external_deps` attribute.\n3. `bazel test //test/...`\n\n## External CMake (preferred)\n\nThis is the preferred style of adding dependencies that use CMake for their build system.\n\n1. Define a the source Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the\n   `envoy_dependencies()` function.\n2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference\n   the source repository in step 1.\n3. Reference your new external dependency in some `envoy_cc_library` via the name bound in step 1\n   `external_deps` attribute.\n4. `bazel test //test/...`\n\n\n## genrule repository\n\nThis is the newer style of adding dependencies with no upstream Bazel configs.\nIt wraps the dependency's native build tooling in a Bazel-aware shell script,\ninstalling to a Bazel-managed prefix.\n\nThe shell script is executed by Bash, with a few Bazel-specific extensions.\nSee the [Bazel docs for \"genrule\"](https://docs.bazel.build/versions/master/be/general.html#genrule)\nfor details on Bazel's shell extensions.\n\n1. Add a BUILD file in [`bazel/external/`](external/), using a `genrule` target\n   to build the dependency. Please do not add BUILD logic that replaces the\n   dependency's upstream build tooling.\n2. Define a new Bazel repository in [`bazel/repositories.bzl`](repositories.bzl),\n   in the `envoy_dependencies()` function. The repository may use `genrule_repository`\n   from [`bazel/genrule_repository.bzl`](genrule_repository.bzl) to place large\n   genrule shell commands into a separate file.\n3. Reference your new external dependency in some `envoy_cc_library` via Y in the\n   `external_deps` attribute.\n4. `bazel test //test/...`\n\nDependencies between external libraries can use the standard Bazel dependency\nresolution logic, using the `$(location)` shell extension to resolve paths\nto binaries, libraries, headers, etc.\n\n# Adding external dependencies to Envoy (Python)\n\nPython dependencies should be added via `pip3` and `rules_python`. The process\nis:\n\n1. Define a `pip3_import()` pointing at your target `requirements.txt` in\n   [`bazel/repositories_extra.bzl`](repositories_extra.bzl)\n\n2. Add a `pip_install()` invocation in\n   [`bazel/dependency_imports.bzl`](dependency_imports.bzl).\n\n3. Add a `requirements(\"<package name\")` in the `BUILD` file that depends on\n   this package.\n\nYou can use [`tools/config_validation/BUILD`](../tools/config_validation/BUILD) as an example\nfor this flow. See also the [`rules_python`](https://github.com/bazelbuild/rules_python)\ndocumentation for further references.\n\n# Updating an external dependency version\n\n1. Update the corresponding entry in\n[the repository locations file.](https://github.com/envoyproxy/envoy/blob/master/bazel/repository_locations.bzl)\n2. `bazel test //test/...`\n\n# Overriding an external dependency temporarily\n\nAn external dependency built by genrule repository or native Bazel could be overridden by\nspecifying Bazel option\n[`--override_repository`](https://docs.bazel.build/versions/master/command-line-reference.html)\nto point to a local copy. The option can used multiple times to override multiple dependencies.\nThe name of the dependency can be found in\n[the repository locations file.](https://github.com/envoyproxy/envoy/blob/master/bazel/repository_locations.bzl)\nThe path of the local copy has to be absolute path.\n\nFor repositories built by `envoy_cmake_external()` in `bazel/foreign_cc/BUILD`,\nit is necessary to populate the local copy with some additional Bazel machinery\nto support `--override_repository`:\n1. Place an empty `WORKSPACE` in the root.\n2. Place a `BUILD` file with `filegroup(name = \"all\", srcs = glob([\"**\"]), visibility = [\"//visibility:public\"])`\n   in the root.\n\n# Debugging external dependencies\n\nFor all external dependencies, overriding with a local copy as described in the\nprevious section is a useful tool.\n\nBelow we describe specific tips for obtaining additional debug for specific\ndependencies:\n\n* `libevent`: add `\"EVENT__ENABLE_VERBOSE_DEBUG\": \"on\",` to `cache_entries`\n  in the `event` target in `bazel/foreign_cc/BUILD` for verbose tracing of\n  libevent processing.\n\n* `nghttp2`: set `ENVOY_NGHTTP2_TRACE` in the environment and run at `-l trace`.\n\n# Distdir - prefetching dependencies\n\nUsually Bazel downloads all dependencies during build time. But there is a\npossibility to prefetch dependencies and point Bazel to them by using `--distdir`\noption and providing a path to directory which contains tarballs with exactly\nthe same name and the same SHA256 sum that are defined in repositories\ndefinitions.\n\nFor example, let's assume that your distdir location is `$HOME/envoy_distdir`.\nTo prefetch `boringssl` which is defined in `bazel/repository_locations.bzl` as:\n\n```\nboringssl = dict(\n    # Use commits from branch \"chromium-stable-with-bazel\"\n    sha256 = \"d1700e0455f5f918f8a85ff3ce6cd684d05c766200ba6bdb18c77d5dcadc05a1\",\n    strip_prefix = \"boringssl-060e9a583976e73d1ea8b2bfe8b9cab33c62fa17\",\n    # chromium-70.0.3538.67\n    urls = [\"https://github.com/google/boringssl/archive/060e9a583976e73d1ea8b2bfe8b9cab33c62fa17.tar.gz\"],\n),\n```\n\n`$HOME/envoy_distdir` needs to contain `060e9a583976e73d1ea8b2bfe8b9cab33c62fa17.tar.gz`\nfile.\n\nThen Envoy needs to be built with the following command:\n\n```\nbazel build --distdir=$HOME/envoy_distdir //source/exe:envoy\n```\n"
  },
  {
    "path": "bazel/PPROF.md",
    "content": "# CPU or memory consumption testing with `pprof`\n\nTo use `pprof` to analyze performance and memory consumption in Envoy, you can\nuse the built-in statically linked profiler, or dynamically link it in to a\nspecific place yourself.\n\n## Collecting CPU or heap profile for a full execution of envoy\n\nStatic linking is already available (because of a `HeapProfilerDump()` call\ninside\n[`Envoy::Profiler::Heap::stopProfiler())`](https://github.com/envoyproxy/envoy/blob/master/source/common/profiler/profiler.cc#L32-L39)).\n\n### Compiling a statically-linked Envoy\n\nBuild the static binary using bazel:\n\n    $ bazel build //source/exe:envoy-static\n\n### Collecting the profile\n\nTo collect a heap profile, run a statically-linked Envoy with `pprof`\n\nand run the binary with a `CPUPROFILE` or `HEAPPROFILE` environment variable, like so:\n\n    $ CPUPROFILE=/tmp/mybin.cpuprof bazel-bin/source/exe/envoy-static <args>\n    $ HEAPPROFILE=/tmp/mybin.heapprof bazel-bin/source/exe/envoy-static <args>\n\n`CPUPROFILE` or `HEAPPROFILE` sets a location for the profiler output. (See *Methodology*.)\n\nThere are several other environment variables that can be set to tweak the behavior of gperftools. See https://gperftools.github.io/gperftools/ for more details.\n\n### Analyzing the profile\n\n[pprof](https://github.com/google/pprof) can be used to symbolize CPU and heap profiles. For example:\n\n    $ pprof -text bazel-bin/source/exe/envoy-static /tmp/mybin.cpuprof\n\n## Collecting CPU or heap profile for the full execution of a test target\n\nThe profiler library is automatically linked into envoy_cc_test targets.\n\nRun a test with heap profiling enabled, like so:\n\n    $ bazel test --test_env=HEAPPROFILE=/tmp/heapprof <test target>\n\nRun a test with CPU profiling enabled, like so:\n\n    $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof <test target>\n\nNote that heap checks and heap profile collection in tests have noticiable performance implications. Use the following command to collect a CPU profile from a test target with heap check and heap profile collection disabled:\n\n    $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof --test_env=HEAPPROFILE= --test_env=HEAPCHECK= <test target>\n\n## Starting and stopping profile programmatically\n\n### Add `tcmalloc_dep` dependency to envoy_cc_library rules\n\nIt is possible to start/stop the CPU or heap profiler programmatically.\nThe [Gperftools CPU Profiler](https://gperftools.github.io/gperftools/cpuprofile.html)\nis controlled by `ProfilerStart()`/`ProfilerStop()`, and the\n[Gperftools Heap Profiler](https://gperftools.github.io/gperftools/heapprofile.html)\nis controlled by `HeapProfilerStart()`, `HeapProfilerStop()` and `HeapProfilerDump()`.\n\nThese functions are wrapped by Envoy objects defined in [`source/common/profiler/profiler.h`](https://github.com/envoyproxy/envoy/blob/master/source/common/profiler/profiler.h)).\n\nTo enable profiling programmatically:\n\n1. Add a library dependency on \"//source/common/profiler:profiler_lib\" to your envoy_cc_library build rule.\n2. Use the `startProfiler`/`stopProfiler` methods of `Envoy::Profiler::Cpu` or `Envoy::Profiler::Heap` to collect a profile.\n\nNote that `startProfiler` should only be called if no other profile of that type is currently active (e.i. `profilerEnabled()` returns false).\n\nExample:\n\n```c++\n    // includes\n    #include \"common/profiler/profiler.h\"\n    ...\n    Function(...) {\n        if (!Profiler::Cpu::startProfiler(profile_path)) {\n           // Error handling\n        }\n        ...\n        Do expensive stuff in one or more threads.\n        ...\n\n        // Stop the profiler and dump output to the `profile_path` specified when profile was started.\n        Profiler::Cpu::stopProfiler();\n    }\n```\n\n## Memory Profiling in Tests\nTo support memory leaks detection, tests are built with gperftools dependencies enabled by default.\n\n### Enabling Memory Profiling in Tests\nUse `HeapProfilerStart()`, `HeapProfilerStop()`, and `HeapProfilerDump()` to start, stop, and persist\nmemory dumps, respectively. Please see [above](#adding-tcmalloc_dep-to-envoy) for more details.\n\n### Bazel Configuration\nBy default, bazel executes tests in a sandbox, which will be deleted together with memory dumps\nafter the test run. To preserve memory dumps, bazel can be forced to run tests without\nsandboxing, by setting the ```TestRunner``` parameter to ```local```:\n```\nbazel test --strategy=TestRunner=local ...\n```\n\nAn alternative is to set ```HEAPPROFILE``` environment variable for the test runner:\n```\nbazel test --test_env=HEAPPROFILE=/tmp/testprofile ...\n```\n\n# Methodology\n\nFor consistent testing, it makes sense to run Envoy for a constant amount of\ntime across trials:\n\n    $ timeout <num_seconds> bazel-bin/source/exe/envoy <options>\n\nEnvoy will print to stdout something like:\n\n    Starting tracking the heap\n\nAnd then a series of stdouts like:\n\n    Dumping heap profile to <heap file 0001> (100 MB currently in use)\n    Dumping heap profile to <heap file 0002> (200 MB currently in use)\n    ...\n\nThis will generate a series of files; if you statically-linked, these are\nwherever `HEAPPROFILE` points to. Otherwise, they are in the current directory\nby default. They'll be named something like `main_common_base.0001.heap`,\n`main_common_base.0002.heap`, etc.\n\n*NB:* There is no reason this needs to be titled `main_common_base`. Whatever\nflag you supply `HeapProfilerStart` / `HeapProfilerDump` will become the\nfilename. Multiple sections of code could be profiled simultaneously by setting\nmultiple `HeapProfilerStart()` / `HeapProfilerStop()` breakpoints with unique\nidentifiers.\n\n# Analyzing with `pprof`\n\n[pprof](https://github.com/google/pprof) can read these heap files in a\nnumber of ways. Most convenient for first-order inspection might be `pprof -top`\nor `pprof -text`:\n\n    $ pprof -text bazel-bin/source/exe/envoy main_common_base* | head -n5\n    File: envoy\n    Build ID: ...\n    Type: inuse_space\n    Showing nodes accounting for 6402800.62kB, 98.59% of 6494044.58kB total\n    Dropped ... nodes (cum <= ...kB)\n\nMore complex flame/graph charts can be generated and viewed in a browser, which\nis often more helpful than text-based output:\n\n    $ pprof -http=localhost:9999 bazel-bin/source/exe/envoy main_common_base*\n"
  },
  {
    "path": "bazel/README.md",
    "content": "# Building Envoy with Bazel\n\n## Installing Bazelisk as Bazel\n\nIt is recommended to use [Bazelisk](https://github.com/bazelbuild/bazelisk) installed as `bazel`, to avoid Bazel compatibility issues.\n\nOn Linux, run the following commands:\n\n```\nsudo wget -O /usr/local/bin/bazel https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-linux-amd64\nsudo chmod +x /usr/local/bin/bazel\n```\n\nOn macOS, run the following command:\n```\nbrew install bazelisk\n```\n\nOn Windows, run the following commands:\n```\nmkdir %USERPROFILE%\\bazel\npowershell Invoke-WebRequest https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-windows-amd64.exe -OutFile %USERPROFILE%\\bazel\\bazel.exe\nset PATH=%PATH%;%USERPROFILE%\\bazel\n```\n\nIf you're building from an revision of Envoy prior to August 2019, which doesn't contains a `.bazelversion` file, run `ci/run_envoy_docker.sh \"bazel version\"`\nto find the right version of Bazel and set the version to `USE_BAZEL_VERSION` environment variable to build.\n\n## Production environments\n\nTo build Envoy with Bazel in a production environment, where the [Envoy\ndependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements) are typically\nindependently sourced, the following steps should be followed:\n\n1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements).\n1. `bazel build -c opt //source/exe:envoy-static` from the repository root.\n\n## Quick start Bazel build for developers\n\nThis section describes how to and what dependencies to install to get started building Envoy with Bazel.\nIf you would rather use a pre-build Docker image with required tools installed, skip to [this section](#building-envoy-with-the-ci-docker-image).\n\nAs a developer convenience, a [WORKSPACE](https://github.com/envoyproxy/envoy/blob/master/WORKSPACE) and\n[rules for building a recent\nversion](https://github.com/envoyproxy/envoy/blob/master/bazel/repositories.bzl) of the various Envoy\ndependencies are provided. These are provided as is, they are only suitable for development and\ntesting purposes. The specific versions of the Envoy dependencies used in this build may not be\nup-to-date with the latest security patches. See\n[this doc](https://github.com/envoyproxy/envoy/blob/master/bazel/EXTERNAL_DEPS.md#updating-an-external-dependency-version)\nfor how to update or override dependencies.\n\n1. Install external dependencies.\n    ### Ubuntu\n    On Ubuntu, run the following:\n    ```\n    sudo apt-get install \\\n       libtool \\\n       cmake \\\n       automake \\\n       autoconf \\\n       make \\\n       ninja-build \\\n       curl \\\n       unzip \\\n       virtualenv\n    ```\n\n    ### Fedora\n    On Fedora (maybe also other red hat distros), run the following:\n    ```\n    dnf install cmake libtool libstdc++ libstdc++-static libatomic ninja-build lld patch aspell-en\n    ```\n\n    ### Linux\n    On Linux, we recommend using the prebuilt Clang+LLVM package from [LLVM official site](http://releases.llvm.org/download.html).\n    Extract the tar.xz and run the following:\n    ```\n    bazel/setup_clang.sh <PATH_TO_EXTRACTED_CLANG_LLVM>\n    ```\n\n    This will setup a `clang.bazelrc` file in Envoy source root. If you want to make clang as default, run the following:\n    ```\n    echo \"build --config=clang\" >> user.bazelrc\n    ```\n\n    Note: Either `libc++` or `libstdc++-7-dev` (or higher) must be installed. These are typically\n    available via a package manager, but may not be available in default repositories depending on\n    OS version. To build against `libc++` build with the `--config=libc++` instead of the\n    `--config=clang` flag.\n\n    ### macOS\n    On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/):\n    ```\n    brew install coreutils wget cmake libtool go bazel automake ninja clang-format autoconf aspell\n    ```\n    _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum`\n\n    The full version of Xcode (not just Command Line Tools) is also required to build Envoy on macOS.\n    Envoy compiles and passes tests with the version of clang installed by Xcode 11.1:\n    Apple clang version 11.0.0 (clang-1100.0.33.8).\n\n    In order for bazel to be aware of the tools installed by brew, the PATH\n    variable must be set for bazel builds. This can be accomplished by setting\n    this in your `user.bazelrc` file:\n\n    ```\n    build --action_env=PATH=\"/usr/local/bin:/opt/local/bin:/usr/bin:/bin\"\n    ```\n\n    Alternatively, you can pass `--action_env` on the command line when running\n    `bazel build`/`bazel test`.\n\n    Having the binutils keg installed in Brew is known to cause issues due to putting an incompatible\n    version of `ar` on the PATH, so if you run into issues building third party code like luajit\n    consider uninstalling binutils.\n\n    ### Windows\n\n    Install bazelisk in the PATH using the `bazel.exe` executable name as described above in the first section.\n\n    When building Envoy, Bazel creates very long path names. One way to work around these excessive path\n    lengths is to change the output base directory for bazel to a very short root path. The CI pipeline\n    for Windows uses `C:\\_eb` as the bazel base path. This and other preferences should be set up by placing\n    the following bazelrc configuration line in a system `%ProgramData%\\bazel.bazelrc` file or the individual\n    user's `%USERPROFILE%\\.bazelrc` file (rather than including it on every bazel command line):\n    ```\n    startup --output_base=C:/_eb\n    ```\n\n    Bazel also creates file symlinks when building Envoy. It's strongly recommended to enable file symlink support \n    using [Bazel's instructions](https://docs.bazel.build/versions/master/windows.html#enable-symlink-support).\n    For other common issues, see the \n    [Using Bazel on Windows](https://docs.bazel.build/versions/master/windows.html) page.\n\n    [python3](https://www.python.org/downloads/): Specifically, the Windows-native flavor distributed\n    by python.org. The POSIX flavor available via MSYS2, the Windows Store flavor and other distributions\n    will not work. Add a symlink for `python3.exe` pointing to the installed `python.exe` for Envoy scripts\n    and Bazel rules which follow POSIX python conventions. Add `pip.exe` to the PATH and install the `wheel`\n    package.\n    ```\n    mklink %USERPROFILE%\\Python38\\python3.exe %USERPROFILE%\\Python38\\python.exe\n    set PATH=%PATH%;%USERPROFILE%\\Python38\n    set PATH=%PATH%;%USERPROFILE%\\Python38\\Scripts\n    pip install wheel\n    ```\n\n    [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019):\n    For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ workload.\n    You may alternately install the entire Visual Studio 2019 and use the Build Tools installed in that\n    package. Earlier versions of VC++ Build Tools/Visual Studio are not recommended or supported.\n    If installed in a non-standard filesystem location, be sure to set the `BAZEL_VC` environment variable\n    to the path of the VC++ package to allow Bazel to find your installation of VC++. NOTE: ensure that\n    the `link.exe` that resolves on your PATH is from VC++ Build Tools and not `/usr/bin/link.exe` from MSYS2,\n    which is determined by their relative ordering in your PATH.\n    ```\n    set BAZEL_VC=%USERPROFILE%\\VSBT2019\\VC\n    set PATH=%PATH%;%USERPROFILE%\\VSBT2019\\VC\\Tools\\MSVC\\14.26.28801\\bin\\Hostx64\\x64\n    ```\n\n    Ensure `CMake` and `ninja` binaries are on the PATH. The versions packaged with VC++ Build\n    Tools are sufficient in most cases, but are 32 bit binaries. These flavors will not run in\n    the project's GCP CI remote build environment, so 64 bit builds from the CMake and ninja\n    projects are used instead.\n    ```\n    set PATH=%PATH%;%USERPROFILE%\\VSBT2019\\Common7\\IDE\\CommonExtensions\\Microsoft\\CMake\\CMake\\bin\n    set PATH=%PATH%;%USERPROFILE%\\VSBT2019\\Common7\\IDE\\CommonExtensions\\Microsoft\\CMake\\Ninja\n    ```\n\n    [MSYS2 shell](https://msys2.github.io/): Install to a path with no spaces, e.g. C:\\msys32. \n    \n    Set the `BAZEL_SH` environment variable to the path of the installed MSYS2 `bash.exe` \n    executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment variable to a value \n    of `*` is often advisable to ensure argument parsing in the MSYS2 shell behaves as expected.\n    ```\n    set PATH=%PATH%;%USERPROFILE%\\msys64\\usr\\bin\n    set BAZEL_SH=%USERPROFILE%\\msys64\\usr\\bin\\bash.exe\n    set MSYS2_ARG_CONV_EXCL=*\n    ```\n\n    Set the `TMPDIR` environment variable to a path usable as a temporary directory (e.g.\n    `C:\\Windows\\TEMP`), and create a directory symlink `C:\\c` to `C:\\`, so that the MSYS2\n    path `/c/Windows/TEMP` is equivalent to the Windows path `C:\\Windows\\TEMP`:\n    ```\n    set TMPDIR=C:\\Windows\\TEMP\n    mklink /d C:\\c C:\\\n    ```\n\n    The TMPDIR path and MSYS2 `mktemp` command are used frequently by the `rules_foreign_cc`\n    component of Bazel as well as Envoy's test scripts, causing problems if not set to a path\n    accessible to both Windows and msys commands. [Note the `ci/windows_ci_steps.sh` script\n    which builds envoy and run tests in CI creates this symlink automatically.]\n\n    In the MSYS2 shell, install additional packages via pacman:\n    ```\n    pacman -S diffutils patch unzip zip\n    ```\n\n    [Git](https://git-scm.com/downloads): This version from the Git project, or the version\n    distributed using pacman under MSYS2 will both work, ensure one is on the PATH:.\n    ```\n    set PATH=%PATH%;%USERPROFILE%\\Git\\bin\n    ```\n\n    Lastly, persist environment variable changes. NOTE: The paths in this document are given as\n    examples, make sure to verify you are using the correct paths for your environment. Also note\n    that these examples assume using a `cmd.exe` shell to set environment variables etc., be sure\n    to do the equivalent if using a different shell.\n    ```\n    setx PATH \"%PATH%\"\n    setx BAZEL_SH \"%BAZEL_SH%\"\n    setx MSYS2_ARG_CONV_EXCL \"%MSYS2_ARG_CONV_EXCL%\"\n    setx BAZEL_VC \"%BAZEL_VC%\"\n    setx TMPDIR \"%TMPDIR%\"\n    ```\n\n1. Install Golang on your machine. This is required as part of building [BoringSSL](https://boringssl.googlesource.com/boringssl/+/HEAD/BUILDING.md)\n   and also for [Buildifer](https://github.com/bazelbuild/buildtools) which is used for formatting bazel BUILD files.\n1. `go get -u github.com/bazelbuild/buildtools/buildifier` to install buildifier. You may need to set `BUILDIFIER_BIN` to `$GOPATH/bin/buildifier`\n   in your shell for buildifier to work.\n1. `go get -u github.com/bazelbuild/buildtools/buildozer` to install buildozer. You may need to set `BUILDOZER_BIN` to `$GOPATH/bin/buildozer`\n   in your shell for buildozer to work.\n1. `bazel build //source/exe:envoy-static` from the Envoy source directory. Add `-c opt` for an optimized release build or\n   `-c dbg` for an unoptimized, fully instrumented debugging build.\n\n## Building Envoy with the CI Docker image\n\nEnvoy can also be built with the Docker image used for CI, by installing Docker and executing the following.\n\nOn Linux, run:\n\n```\n./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'\n```\n\nFrom a Windows host with Docker installed, the Windows containers feature enabled, and bash (installed via\nMSYS2 or Git bash), run:\n\n```\n./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh'\n```\n\nSee also the [documentation](https://github.com/envoyproxy/envoy/tree/master/ci) for developer use of the\nCI Docker image.\n\n## Building Envoy with Remote Execution\n\nEnvoy can also be built with Bazel [Remote Execution](https://docs.bazel.build/versions/master/remote-execution.html),\npart of the CI is running with the hosted [GCP RBE](https://blog.bazel.build/2018/10/05/remote-build-execution.html) service.\n\nTo build Envoy with a remote build services, run Bazel with your remote build service flags and with `--config=remote-clang`.\nFor example the following command runs build with the GCP RBE service used in CI:\n\n```\nbazel build //source/exe:envoy-static --config=remote-clang \\\n    --remote_cache=grpcs://remotebuildexecution.googleapis.com \\\n    --remote_executor=grpcs://remotebuildexecution.googleapis.com \\\n    --remote_instance_name=projects/envoy-ci/instances/default_instance\n```\n\nChange the value of `--remote_cache`, `--remote_executor` and `--remote_instance_name` for your remote build services. Tests can\nbe run in remote execution too.\n\nNote: Currently the test run configuration in `.bazelrc` doesn't download test binaries and test logs,\nto override the behavior set [`--experimental_remote_download_outputs`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_remote_download_outputs)\naccordingly.\n\n## Building Envoy with Docker sandbox\n\nBuilding Envoy with Docker sandbox uses the same Docker image used in CI with fixed C++ toolchain configuration. It produces more consistent\noutput which is not depending on your local C++ toolchain. It can also help debugging issues with RBE. To build Envoy with Docker sandbox:\n\n```\nbazel build //source/exe:envoy-static --config=docker-clang\n```\n\nTests can be run in docker sandbox too. Note that the network environment, such as IPv6, may be different in the docker sandbox so you may want\nset different options. See below to configure test IP versions.\n\n## Linking against libc++ on Linux\n\nTo link Envoy against libc++, follow the [quick start](#quick-start-bazel-build-for-developers) to setup Clang+LLVM and run:\n```\nbazel build --config=libc++ //source/exe:envoy-static\n```\n\nOr use our configuration with Remote Execution or Docker sandbox, pass `--config=remote-clang-libc++` or\n`--config=docker-clang-libc++` respectively.\n\nIf you want to make libc++ as default, add a line `build --config=libc++` to the `user.bazelrc` file in Envoy source root.\n\n## Using a compiler toolchain in a non-standard location\n\nBy setting the `CC` and `LD_LIBRARY_PATH` in the environment that Bazel executes from as\nappropriate, an arbitrary compiler toolchain and standard library location can be specified. One\nslight caveat is that (at the time of writing), Bazel expects the binutils in `$(dirname $CC)` to be\nunprefixed, e.g. `as` instead of `x86_64-linux-gnu-as`.\n\nNote: this configuration currently doesn't work with Remote Execution or Docker sandbox, you have to generate a\ncustom toolchains configuration for them. See [bazelbuild/bazel-toolchains](https://github.com/bazelbuild/bazel-toolchains)\nfor more details.\n\n## Supported compiler versions\n\nWe now require Clang >= 5.0 due to known issues with std::string thread safety and C++14 support. GCC >= 7 is also\nknown to work. Currently the CI is running with Clang 10.\n\n## Clang STL debug symbols\n\nBy default Clang drops some debug symbols that are required for pretty printing to work correctly.\nMore information can be found [here](https://bugs.llvm.org/show_bug.cgi?id=24202). The easy solution\nis to set ```--copt=-fno-limit-debug-info``` on the CLI or in your .bazelrc file.\n\n## Removing debug info\n\nIf you don't want your debug or release binaries to contain debug info\nto reduce binary size, pass `--define=no_debug_info=1` when building.\nThis is primarily useful when building envoy as a static library. When\nbuilding a linked envoy binary you can build the implicit `.stripped`\ntarget from [`cc_binary`](https://docs.bazel.build/versions/master/be/c-cpp.html#cc_binary)\nor pass [`--strip=always`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--strip)\ninstead.\n\n# Testing Envoy with Bazel\n\nAll the Envoy tests can be built and run with:\n\n```\nbazel test //test/...\n```\n\nAn individual test target can be run with a more specific Bazel\n[label](https://bazel.build/versions/master/docs/build-ref.html#Labels), e.g. to build and run only\nthe units tests in\n[test/common/http/async_client_impl_test.cc](https://github.com/envoyproxy/envoy/blob/master/test/common/http/async_client_impl_test.cc):\n\n```\nbazel test //test/common/http:async_client_impl_test\n```\n\nTo observe more verbose test output:\n\n```\nbazel test --test_output=streamed //test/common/http:async_client_impl_test\n```\n\nIt's also possible to pass into an Envoy test additional command-line args via `--test_arg`. For\nexample, for extremely verbose test debugging:\n\n```\nbazel test --test_output=streamed //test/common/http:async_client_impl_test --test_arg=\"-l trace\"\n```\n\nBy default, testing exercises both IPv4 and IPv6 address connections. In IPv4 or IPv6 only\nenvironments, set the environment variable ENVOY_IP_TEST_VERSIONS to \"v4only\" or\n\"v6only\", respectively.\n\n```\nbazel test //test/... --test_env=ENVOY_IP_TEST_VERSIONS=v4only\nbazel test //test/... --test_env=ENVOY_IP_TEST_VERSIONS=v6only\n```\n\nBy default, tests are run with the [gperftools](https://github.com/gperftools/gperftools) heap\nchecker enabled in \"normal\" mode to detect leaks. For other mode options, see the gperftools\nheap checker [documentation](https://gperftools.github.io/gperftools/heap_checker.html). To\ndisable the heap checker or change the mode, set the HEAPCHECK environment variable:\n\n```\n# Disables the heap checker\nbazel test //test/... --test_env=HEAPCHECK=\n# Changes the heap checker to \"minimal\" mode\nbazel test //test/... --test_env=HEAPCHECK=minimal\n```\n\nIf you see a leak detected, by default the reported offsets will require `addr2line` interpretation.\nYou can run under `--config=clang-asan` to have this automatically applied.\n\nBazel will by default cache successful test results. To force it to rerun tests:\n\n```\nbazel test //test/common/http:async_client_impl_test --cache_test_results=no\n```\n\nBazel will by default run all tests inside a sandbox, which disallows access to the\nlocal filesystem. If you need to break out of the sandbox (for example to run under a\nlocal script or tool with [`--run_under`](https://docs.bazel.build/versions/master/user-manual.html#flag--run_under)),\nyou can run the test with `--strategy=TestRunner=local`, e.g.:\n\n```\nbazel test //test/common/http:async_client_impl_test --strategy=TestRunner=local --run_under=/some/path/foobar.sh\n```\n# Stack trace symbol resolution\n\nEnvoy can produce backtraces on demand and from assertions and other fatal\nactions like segfaults. Where supported, stack traces will contain resolved\nsymbols, though not include line numbers. On systems where absl::Symbolization is\nnot supported, the stack traces written in the log or to stderr contain addresses rather\nthan resolved symbols. If the symbols were resolved, the address is also included at\nthe end of the line.\n\nThe `tools/stack_decode.py` script exists to process the output and do additional symbol\nresolution including file names and line numbers. It requires the `addr2line` program be\ninstalled and in your path. Any log lines not relevant to the backtrace capability are\npassed through the script unchanged (it acts like a filter). File and line information\nis appended to the stack trace lines.\n\nThe script runs in one of two modes. To process log input from stdin, pass `-s` as the first\nargument, followed by the executable file path. You can postprocess a log or pipe the output\nof an Envoy process. If you do not specify the `-s` argument it runs the arguments as a child\nprocess. This enables you to run a test with backtrace post processing. Bazel sandboxing must\nbe disabled by specifying local execution. Example command line with\n`run_under`:\n\n```\nbazel test -c dbg //test/server:backtrace_test\n--run_under=`pwd`/tools/stack_decode.py --strategy=TestRunner=local\n--cache_test_results=no --test_output=all\n```\n\nExample using input on stdin:\n\n```\nbazel test -c dbg //test/server:backtrace_test --cache_test_results=no --test_output=streamed |& tools/stack_decode.py -s bazel-bin/test/server/backtrace_test\n```\n\nYou will need to use either a `dbg` build type or the `opt` build type to get file and line\nsymbol information in the binaries.\n\nBy default main.cc will install signal handlers to print backtraces at the\nlocation where a fatal signal occurred. The signal handler will re-raise the\nfatal signal with the default handler so a core file will still be dumped after\nthe stack trace is logged. To inhibit this behavior use\n`--define=signal_trace=disabled` on the Bazel command line. No signal handlers will\nbe installed.\n\n# Running a single Bazel test under GDB\n\n```\nbazel build -c dbg //test/common/http:async_client_impl_test\nbazel build -c dbg //test/common/http:async_client_impl_test.dwp\ngdb bazel-bin/test/common/http/async_client_impl_test\n```\n\nWe need to use `-c dbg` Bazel option to generate debugging symbols and without\nthat GDB will not be very useful. The debugging symbols are stored as separate\ndebugging information files (`.dwo` files) and we can build a DWARF package file\nwith `.dwp ` target. The `.dwp` file need to be presented in the same folder with the\nbinary for a full debugging experience.\n\n# Running Bazel tests requiring privileges\n\nSome tests may require privileges (e.g. CAP_NET_ADMIN) in order to execute. One option is to run\nthem with elevated privileges, e.g. `sudo test`. However, that may not always be possible,\nparticularly if the test needs to run in a CI pipeline. `tools/bazel-test-docker.sh` may be used in\nsuch situations to run the tests in a privileged docker container.\n\nThe script works by wrapping the test execution in the current repository's circle ci build\ncontainer, then executing it either locally or on a remote docker container. In both cases, the\ncontainer runs with the `--privileged` flag, allowing it to execute operations which would otherwise\nbe restricted.\n\nThe command line format is:\n`tools/bazel-test-docker.sh <bazel-test-target> [optional-flags-to-bazel]`\n\nThe script uses two optional environment variables to control its behaviour:\n\n* `RUN_REMOTE=<yes|no>`: chooses whether to run on a remote docker server.\n* `LOCAL_MOUNT=<yes|no>`: copy/mount local libraries onto the docker container.\n\nUse `RUN_REMOTE=yes` when you don't want to run against your local docker instance. Note that you\nwill need to override a few environment variables to set up the remote docker. The list of variables\ncan be found in the [Documentation](https://docs.docker.com/engine/reference/commandline/cli/).\n\nUse `LOCAL_MOUNT=yes` when you are not building with the Envoy build container. This will ensure\nthat the libraries against which the tests dynamically link will be available and of the correct\nversion.\n\n## Examples\n\nRunning the http integration test in a privileged container:\n\n```bash\ntools/bazel-test-docker.sh  //test/integration:integration_test --jobs=4 -c dbg\n```\n\nRunning the http integration test compiled locally against a privileged remote container:\n\n```bash\nsetup_remote_docker_variables\nRUN_REMOTE=yes MOUNT_LOCAL=yes tools/bazel-test-docker.sh  //test/integration:integration_test \\\n  --jobs=4 -c dbg\n```\n\n# Additional Envoy build and test options\n\nIn general, there are 3 [compilation\nmodes](https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode)\nthat Bazel supports:\n\n* `fastbuild`: `-O0`, aimed at developer speed (default).\n* `opt`: `-O2 -DNDEBUG -ggdb3 -gsplit-dwarf`, for production builds and performance benchmarking.\n* `dbg`: `-O0 -ggdb3 -gsplit-dwarf`, no optimization and debug symbols.\n\nYou can use the `-c <compilation_mode>` flag to control this, e.g.\n\n```\nbazel build -c opt //source/exe:envoy-static\n```\n\nTo override the compilation mode and optimize the build for binary size, you can\nuse the `sizeopt` configuration:\n\n```\nbazel build //source/exe:envoy-static --config=sizeopt\n```\n\n## Sanitizers\n\nTo build and run tests with the gcc compiler's [address sanitizer\n(ASAN)](https://github.com/google/sanitizers/wiki/AddressSanitizer) and\n[undefined behavior\n(UBSAN)](https://developers.redhat.com/blog/2014/10/16/gcc-undefined-behavior-sanitizer-ubsan) sanitizer enabled:\n\n```\nbazel test -c dbg --config=asan //test/...\n```\n\nThe ASAN failure stack traces include line numbers as a result of running ASAN with a `dbg` build above. If the\nstack trace is not symbolized, try setting the ASAN_SYMBOLIZER_PATH environment variable to point to the\nllvm-symbolizer binary (or make sure the llvm-symbolizer is in your $PATH).\n\nIf you have clang-5.0 or newer, additional checks are provided with:\n\n```\nbazel test -c dbg --config=clang-asan //test/...\n```\n\n[Thread sanitizer (TSAN)](https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual) tests rely on\na TSAN-instrumented version of libc++ and can be run under the docker sandbox:\n\n```\nbazel test -c dbg --config=docker-tsan //test/...\n```\n\nAlternatively, you can build a local copy of TSAN-instrumented libc++. Follow the [quick start](#quick-start-bazel-build-for-developers) instruction to setup Clang+LLVM environment. Download LLVM sources from the [LLVM official site](https://github.com/llvm/llvm-project)\n\n```\ncurl -sSfL \"https://github.com/llvm/llvm-project/archive/llvmorg-10.0.0.tar.gz\" | tar zx\n\n```\n\nConfigure and build a TSAN-instrumented libc++. Please note that `LLVM_USE_SANITIZER=Thread` preprocessor definition is used to enable TSAN instrumentation, and `CMAKE_INSTALL_PREFIX=\"/opt/libcxx_tsan\"` defines the installation directory path.\n\n```\nmkdir tsan\npushd tsan\n\ncmake -GNinja -DLLVM_ENABLE_PROJECTS=\"libcxxabi;libcxx\" -DLLVM_USE_LINKER=lld -DLLVM_USE_SANITIZER=Thread -DCMAKE_BUILD_TYPE=Release \\\n  -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_INSTALL_PREFIX=\"/opt/libcxx_tsan\" \"../llvm-project-llvmorg-10.0.0/llvm\"\nninja install-cxx install-cxxabi\n\nrm -rf /opt/libcxx_tsan/include\n```\n\nGenerate local_tsan.bazelrc containing bazel configuration for tsan tests:\n\n```\nbazel/setup_local_tsan.sh </path/to/instrumented/libc++/home>\n\n```\n\nTo execute TSAN tests using the local instrumented libc++ library pass `--config=local-tsan` to bazel:\n\n```\nbazel test --config=local-tsan //test/...\n```\n\nFor [memory sanitizer (MSAN)](https://github.com/google/sanitizers/wiki/MemorySanitizer) testing,\nit has to be run under the docker sandbox which comes with MSAN instrumented libc++:\n\n```\nbazel test -c dbg --config=docker-msan //test/...\n```\n\nTo run the sanitizers on OS X, prefix `macos-` to the config option, e.g.:\n\n```\nbazel test -c dbg --config=macos-asan //test/...\n```\n\n## Log Verbosity\n\nLog verbosity is controlled at runtime in all builds.\n\nTo obtain `nghttp2` traces, you can set `ENVOY_NGHTTP2_TRACE` in the environment for enhanced\nlogging at `-l trace`. For example, in tests:\n\n```\nbazel test //test/integration:protocol_integration_test --test_output=streamed \\\n  --test_arg=\"-l trace\" --test_env=\"ENVOY_NGHTTP2_TRACE=\"\n```\n\n## Disabling optional features\n\nThe following optional features can be disabled on the Bazel build command-line:\n\n* Hot restart with `--define hot_restart=disabled`\n* Google C++ gRPC client with `--define google_grpc=disabled`\n* Backtracing on signals with `--define signal_trace=disabled`\n* Active stream state dump on signals with `--define signal_trace=disabled` or `--define disable_object_dump_on_signal_trace=disabled`\n* tcmalloc with `--define tcmalloc=disabled`. Also you can choose Gperftools' implementation of\n  tcmalloc with `--define tcmalloc=gperftools` which is the default for non-x86 builds.\n* deprecated features with `--define deprecated_features=disabled`\n\n\n## Enabling optional features\n\nThe following optional features can be enabled on the Bazel build command-line:\n\n* Exported symbols during linking with `--define exported_symbols=enabled`.\n  This is useful in cases where you have a lua script that loads shared object libraries, such as\n  those installed via luarocks.\n* Perf annotation with `--define perf_annotation=enabled` (see\n  source/common/common/perf_annotation.h for details).\n* BoringSSL can be built in a FIPS-compliant mode with `--define boringssl=fips`\n  (see [FIPS 140-2](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for details).\n* ASSERT() can be configured to log failures and increment a stat counter in a release build with\n  `--define log_debug_assert_in_release=enabled`. The default behavior is to compile debug assertions out of\n  release builds so that the condition is not evaluated. This option has no effect in debug builds.\n* memory-debugging (scribbling over memory after allocation and before freeing) with\n  `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL and\n  tcmalloc is built from the sources of Gperftools.\n* Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with\n  `--define path_normalization_by_default=true`. Note this still could be disable by explicit xDS config.\n* Manual stamping via VersionInfo with `--define manual_stamp=manual_stamp`.\n  This is needed if the `version_info_lib` is compiled via a non-binary bazel rules, e.g `envoy_cc_library`.\n  Otherwise, the linker will fail to resolve symbols that are included via the `linktamp` rule, which is only available to binary targets.\n  This is being tracked as a feature in: https://github.com/envoyproxy/envoy/issues/6859.\n* Process logging for Android applications can be enabled with `--define logger=android`.\n* Excluding assertions for known issues with `--define disable_known_issue_asserts=true`.\n  A KNOWN_ISSUE_ASSERT is an assertion that should pass (like all assertions), but sometimes fails for some as-yet unidentified or unresolved reason. Because it is known to potentially fail, it can be compiled out even when DEBUG is true, when this flag is set. This allows Envoy to be run in production with assertions generally enabled, without crashing for known issues. KNOWN_ISSUE_ASSERT should only be used for newly-discovered issues that represent benign violations of expectations.\n* Envoy can be linked to [`zlib-ng`](https://github.com/zlib-ng/zlib-ng) instead of\n  [`zlib`](https://zlib.net) with `--define zlib=ng`.\n\n## Disabling extensions\n\nEnvoy uses a modular build which allows extensions to be removed if they are not needed or desired.\nExtensions that can be removed are contained in\n[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). Use the following\nprocedure to customize the extensions for your build:\n\n* The Envoy build assumes that a Bazel repository named `@envoy_build_config` exists which\n  contains the file `@envoy_build_config//:extensions_build_config.bzl`. In the default build,\n  a synthetic repository is created containing [extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl).\n  Thus, the default build has all extensions.\n* Start by creating a new Bazel workspace somewhere in the filesystem that your build can access.\n  This workspace should contain:\n  * Empty WORKSPACE file.\n  * Empty BUILD file.\n  * A copy of [extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl).\n  * Comment out any extensions that you don't want to build in your file copy.\n\nTo have your local build use your overridden configuration repository there are two options:\n\n1. Use the [`--override_repository`](https://docs.bazel.build/versions/master/command-line-reference.html)\n   CLI option to override the `@envoy_build_config` repo.\n2. Use the following snippet in your WORKSPACE before you load the Envoy repository. E.g.,\n\n```\nworkspace(name = \"envoy\")\n\nlocal_repository(\n    name = \"envoy_build_config\",\n    # Relative paths are also supported.\n    path = \"/somewhere/on/filesystem/envoy_build_config\",\n)\n\nlocal_repository(\n    name = \"envoy\",\n    # Relative paths are also supported.\n    path = \"/somewhere/on/filesystem/envoy\",\n)\n\n...\n```\n\n## Extra extensions\n\nIf you are building your own Envoy extensions or custom Envoy builds and encounter visibility\nproblems with, you may need to adjust the default visibility rules to be public,\nas documented in\n[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl).\nSee the instructions above about how to create your own custom version of\n[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl).\n\n# Release builds\n\nRelease builds should be built in `opt` mode, processed with `strip` and have a\n`.note.gnu.build-id` section with the Git SHA1 at which the build took place.\nThey should also ignore any local `.bazelrc` for reproducibility. This can be\nachieved with:\n\n```\nbazel --bazelrc=/dev/null build -c opt //source/exe:envoy-static.stripped\n```\n\nOne caveat to note is that the Git SHA1 is truncated to 16 bytes today as a\nresult of the workaround in place for\nhttps://github.com/bazelbuild/bazel/issues/2805.\n\n# Coverage builds\n\nTo generate coverage results, make sure you are using a clang toolchain and have `llvm-cov` and\n`llvm-profdata` in your `PATH`. Then run:\n\n```\ntest/run_envoy_bazel_coverage.sh\n```\n\nThe summary results are printed to the standard output and the full coverage\nreport is available in `generated/coverage/coverage.html`.\n\nTo generate coverage results for fuzz targets, use the `FUZZ_COVERAGE` environment variable, e.g.:\n```\nFUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh\n```\nThis generates a coverage report for fuzz targets after running the target for one minute against fuzzing engine libfuzzer using its coprus as initial seed inputs. The full coverage report will be available in `generated/fuzz_coverage/coverage.html`.\n\nCoverage for every PR is available in Circle in the \"artifacts\" tab of the coverage job. You will\nneed to navigate down and open \"coverage.html\" but then you can navigate per normal. NOTE: We\nhave seen some issues with seeing the artifacts tab. If you can't see it, log out of Circle, and\nthen log back in and it should start working.\n\nThe latest coverage report for master is available\n[here](https://storage.googleapis.com/envoy-postsubmit/master/coverage/index.html). The latest fuzz coverage report for master is available [here](https://storage.googleapis.com/envoy-postsubmit/master/fuzz_coverage/index.html).\n\nIt's also possible to specialize the coverage build to a specified test or test dir. This is useful\nwhen doing things like exploring the coverage of a fuzzer over its corpus. This can be done by\npassing coverage targets as the command-line arguments and using the `VALIDATE_COVERAGE` environment\nvariable, e.g. for a fuzz test:\n\n```\nFUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh //test/common/common:base64_fuzz_test\n```\n\n# Cleaning the build and test artifacts\n\n`bazel clean` will nuke all the build/test artifacts from the Bazel cache for\nEnvoy proper. To remove the artifacts for the external dependencies run\n`bazel clean --expunge`.\n\nIf something goes really wrong and none of the above work to resolve a stale build issue, you can\nalways remove your Bazel cache completely. It is likely located in `~/.cache/bazel`.\n\n# Adding or maintaining Envoy build rules\n\nSee the [developer guide for writing Envoy Bazel rules](DEVELOPER.md).\n\n# Bazel performance on (virtual) machines with low resources\n\nIf the (virtual) machine that is performing the build is low on memory or CPU\nresources, you can override Bazel's default job parallelism determination with\n`--jobs=N` to restrict the build to at most `N` simultaneous jobs, e.g.:\n\n```\nbazel build --jobs=2 //source/exe:envoy-static\n```\n\n# Debugging the Bazel build\n\nWhen trying to understand what Bazel is doing, the `-s` and `--explain` options\nare useful. To have Bazel provide verbose output on which commands it is executing:\n\n```\nbazel build -s //source/exe:envoy-static\n```\n\nTo have Bazel emit to a text file the rationale for rebuilding a target:\n\n```\nbazel build --explain=file.txt //source/exe:envoy-static\n```\n\nTo get more verbose explanations:\n\n```\nbazel build --explain=file.txt --verbose_explanations //source/exe:envoy-static\n```\n\n# Resolving paths in bazel build output\n\nSometimes it's useful to see real system paths in bazel error message output (vs. symbolic links).\n`tools/path_fix.sh` is provided to help with this. See the comments in that file.\n\n# Compilation database\n\nRun `tools/gen_compilation_database.py` to generate\na [JSON Compilation Database](https://clang.llvm.org/docs/JSONCompilationDatabase.html). This could be used\nwith any tools (e.g. clang-tidy) compatible with the format. It is recommended to run this script\nwith `TEST_TMPDIR` set, so the Bazel artifacts doesn't get cleaned up in next `bazel build` or `bazel test`.\n\nThe compilation database could also be used to setup editors with cross reference, code completion.\nFor example, you can use [You Complete Me](https://valloric.github.io/YouCompleteMe/) or\n[clangd](https://clangd.llvm.org/) with supported editors.\n\nFor example, use following command to prepare a compilation database:\n\n```\nTEST_TMPDIR=/tmp tools/gen_compilation_database.py\n```\n\n\n# Running clang-format without docker\n\nThe easiest way to run the clang-format check/fix commands is to run them via\ndocker, which helps ensure the right toolchain is set up. However you may prefer\nto run clang-format scripts on your workstation directly:\n * It's possible there is a speed advantage\n * Docker itself can sometimes go awry and you then have to deal with that\n * Type-ahead doesn't always work when waiting running a command through docker\n\nTo run the tools directly, you must install the correct version of clang. This\nmay change over time, check the version of clang in the docker image. You must\nalso have 'buildifier' installed from the bazel distribution.\n\nEdit the paths shown here to reflect the installation locations on your system:\n\n```shell\nexport CLANG_FORMAT=\"$HOME/ext/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang-format\"\nexport BUILDIFIER_BIN=\"/usr/bin/buildifier\"\n```\n\nOnce this is set up, you can run clang-format without docker:\n\n```shell\n./tools/code_format/check_format.py check\n./tools/spelling/check_spelling.sh check\n./tools/code_format/check_format.py fix\n./tools/spelling/check_spelling.sh fix\n```\n\n# Advanced caching setup\n\nSetting up an HTTP cache for Bazel output helps optimize Bazel performance and resource usage when\nusing multiple compilation modes or multiple trees.\n\n## Setup local cache\n\nYou may use any [Remote Caching](https://docs.bazel.build/versions/master/remote-caching.html) backend\nas an alternative to this.\n\nThis requires Go 1.11+, follow the [instructions](https://golang.org/doc/install#install) to install\nif you don't have one. To start the cache, run the following from the root of the Envoy repository (or anywhere else\nthat the Go toolchain can find the necessary dependencies):\n\n```\ngo run github.com/buchgr/bazel-remote --dir ${HOME}/bazel_cache --host 127.0.0.1 --port 28080 --max_size 64\n```\n\nSee [Bazel remote cache](https://github.com/buchgr/bazel-remote) for more information on the parameters.\nThe command above will setup a maximum 64 GiB cache at `~/bazel_cache` on port 28080. You might\nwant to setup a larger cache if you run ASAN builds.\n\nNOTE: Using docker to run remote cache server described in remote cache docs will likely have\nslower cache performance on macOS due to slow disk performance on Docker for Mac.\n\nAdding the following parameter to Bazel everytime or persist them in `.bazelrc`.\n\n```\n--remote_http_cache=http://127.0.0.1:28080/\n```\n"
  },
  {
    "path": "bazel/antlr.patch",
    "content": "diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp\nindex c6cceda13..e86533759 100755\n--- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp\n+++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp\n@@ -104,7 +104,7 @@ void deserializeSets(\n\n }\n\n-ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) {\n+ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions()) {\n }\n\n ATNDeserializer::ATNDeserializer(const ATNDeserializationOptions& dso): deserializationOptions(dso) {\ndiff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp\nindex 827c3d59f..62914cf55 100755\n--- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp\n+++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp\n@@ -69,7 +69,7 @@ void LexerATNSimulator::copyState(LexerATNSimulator *simulator) {\n }\n\n size_t LexerATNSimulator::match(CharStream *input, size_t mode) {\n-  match_calls++;\n+  // match_calls++;\n   _mode = mode;\n   ssize_t mark = input->mark();\n\n"
  },
  {
    "path": "bazel/api_binding.bzl",
    "content": "def _default_envoy_api_impl(ctx):\n    ctx.file(\"WORKSPACE\", \"\")\n    api_dirs = [\n        \"BUILD\",\n        \"bazel\",\n        \"envoy\",\n        \"examples\",\n        \"test\",\n        \"tools\",\n        \"versioning\",\n    ]\n    for d in api_dirs:\n        ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d)\n\n_default_envoy_api = repository_rule(\n    implementation = _default_envoy_api_impl,\n    attrs = {\n        \"envoy_root\": attr.label(default = \"@envoy//:BUILD\"),\n        \"reldir\": attr.string(),\n    },\n)\n\ndef envoy_api_binding():\n    # Treat the data plane API as an external repo, this simplifies exporting\n    # the API to https://github.com/envoyproxy/data-plane-api. This is the\n    # shadow API for Envoy internal use, see #9479.\n    if \"envoy_api\" not in native.existing_rules().keys():\n        _default_envoy_api(name = \"envoy_api\", reldir = \"generated_api_shadow\")\n\n    # We also provide the non-shadowed API for developer use (see #9479).\n    if \"envoy_api_raw\" not in native.existing_rules().keys():\n        _default_envoy_api(name = \"envoy_api_canonical\", reldir = \"api\")\n\n    # TODO(https://github.com/envoyproxy/envoy/issues/7719) need to remove both bindings and use canonical rules\n    native.bind(\n        name = \"api_httpbody_protos\",\n        actual = \"@com_google_googleapis//google/api:httpbody_cc_proto\",\n    )\n    native.bind(\n        name = \"http_api_protos\",\n        actual = \"@com_google_googleapis//google/api:annotations_cc_proto\",\n    )\n"
  },
  {
    "path": "bazel/api_repositories.bzl",
    "content": "load(\"@envoy_api//bazel:repositories.bzl\", \"api_dependencies\")\n\ndef envoy_api_dependencies():\n    api_dependencies()\n"
  },
  {
    "path": "bazel/boringssl_static.patch",
    "content": "diff --git a/BUILD b/BUILD\nindex d7c731bf6..315cdeca0 100644\n--- a/BUILD\n+++ b/BUILD\n@@ -88,6 +88,7 @@ boringssl_copts = select({\n     \":windows_x86_64\": [\n         \"-DWIN32_LEAN_AND_MEAN\",\n         \"-DOPENSSL_NO_ASM\",\n+        \"-DBORINGSSL_IMPLEMENTATION\",\n     ],\n     \"//conditions:default\": [\"-DOPENSSL_NO_ASM\"],\n })\n@@ -141,6 +142,7 @@ cc_library(\n         \":windows_x86_64\": [\"-defaultlib:advapi32.lib\"],\n         \"//conditions:default\": [\"-lpthread\"],\n     }),\n+    linkstatic = True,\n     visibility = [\"//visibility:public\"],\n )\n \n@@ -150,6 +152,7 @@ cc_library(\n     hdrs = ssl_headers,\n     copts = boringssl_copts_cxx,\n     includes = [\"src/include\"],\n+    linkstatic = True,\n     visibility = [\"//visibility:public\"],\n     deps = [\n         \":crypto\",\n"
  },
  {
    "path": "bazel/coverage/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\n# TODO(lizan): Add test for this and upstream to upstream Bazel.\nfilegroup(\n    name = \"coverage_support\",\n    srcs = [\"collect_cc_coverage.sh\"],\n)\n\nexports_files([\"fuzz_coverage_wrapper.sh\"])\n"
  },
  {
    "path": "bazel/coverage/collect_cc_coverage.sh",
    "content": "#!/bin/bash -x\n#\n# This is a fork of https://github.com/bazelbuild/bazel/blob/3.1.0/tools/test/collect_cc_coverage.sh\n# to cover most of use cases in Envoy.\n# TODO(lizan): Move this to upstream Bazel\n#\n# Copyright 2016 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This script collects code coverage data for C++ sources, after the tests\n# were executed.\n#\n# Bazel C++ code coverage collection support is poor and limited. There is\n# an ongoing effort to improve this (tracking issue #1118).\n#\n# Bazel uses the lcov tool for gathering coverage data. There is also\n# an experimental support for clang llvm coverage, which uses the .profraw\n# data files to compute the coverage report.\n#\n# This script assumes the following environment variables are set:\n# - COVERAGE_DIR            Directory containing metadata files needed for\n#                           coverage collection (e.g. gcda files, profraw).\n# - COVERAGE_MANIFEST       Location of the instrumented file manifest.\n# - COVERAGE_GCOV_PATH      Location of gcov. This is set by the TestRunner.\n# - COVERAGE_GCOV_OPTIONS   Additional options to pass to gcov.\n# - ROOT                    Location from where the code coverage collection\n#                           was invoked.\n#\n# The script looks in $COVERAGE_DIR for the C++ metadata coverage files (either\n# gcda or profraw) and uses either lcov or gcov to get the coverage data.\n# The coverage data is placed in $COVERAGE_OUTPUT_FILE.\n\nread -ra COVERAGE_GCOV_OPTIONS <<< \"${COVERAGE_GCOV_OPTIONS:-}\"\n\n# Checks if clang llvm coverage should be used instead of lcov.\nfunction uses_llvm() {\n  if stat \"${COVERAGE_DIR}\"/*.profraw >/dev/null 2>&1; then\n    return 0\n  fi\n  return 1\n}\n\n# Returns 0 if gcov must be used, 1 otherwise.\nfunction uses_gcov() {\n  [[ \"$GCOV_COVERAGE\" -eq \"1\"  ]] && return 0\n  return 1\n}\n\nfunction init_gcov() {\n  # Symlink the gcov tool such with a link called gcov. Clang comes with a tool\n  # called llvm-cov, which behaves like gcov if symlinked in this way (otherwise\n  # we would need to invoke it with \"llvm-cov gcov\").\n  # For more details see https://llvm.org/docs/CommandGuide/llvm-cov.html.\n  GCOV=\"${COVERAGE_DIR}/gcov\"\n  ln -s \"${COVERAGE_GCOV_PATH}\" \"${GCOV}\"\n}\n\n# Computes code coverage data using the clang generated metadata found under\n# $COVERAGE_DIR.\n# Writes the collected coverage into the given output file.\nfunction llvm_coverage() {\n  local output_file=\"${1}\" object_file object_files object_param=()\n  shift\n  export LLVM_PROFILE_FILE=\"${COVERAGE_DIR}/%h-%p-%m.profraw\"\n  \"${COVERAGE_GCOV_PATH}\" merge -output \"${output_file}.data\" \\\n      \"${COVERAGE_DIR}\"/*.profraw\n\n\n  object_files=\"$(find -L \"${RUNFILES_DIR}\" -type f -exec file -L {} \\; \\\n       | grep ELF | grep -v \"LSB core\" | sed 's,:.*,,')\"\n\n  for object_file in ${object_files}; do\n    object_param+=(-object \"${object_file}\")\n  done\n\n  llvm-cov export -instr-profile \"${output_file}.data\" -format=lcov \\\n      -ignore-filename-regex='.*external/.+' \\\n      -ignore-filename-regex='/tmp/.+' \\\n      \"${object_param[@]}\" | sed 's#/proc/self/cwd/##' > \"${output_file}\"\n}\n\n# Generates a code coverage report in gcov intermediate text format by invoking\n# gcov and using the profile data (.gcda) and notes (.gcno) files.\n#\n# The profile data files are expected to be found under $COVERAGE_DIR.\n# The notes file are expected to be found under $ROOT.\n#\n# - output_file     The location of the file where the generated code coverage\n#                   report is written.\nfunction gcov_coverage() {\n  local gcda gcno_path line output_file=\"${1}\"\n  shift\n\n  # Copy .gcno files next to their corresponding .gcda files in $COVERAGE_DIR\n  # because gcov expects them to be in the same directory.\n  while read -r line; do\n    if [[ ${line: -4} == \"gcno\" ]]; then\n      gcno_path=${line}\n      gcda=\"${COVERAGE_DIR}/$(dirname \"${gcno_path}\")/$(basename \"${gcno_path}\" .gcno).gcda\"\n      # If the gcda file was not found we skip generating coverage from the gcno\n      # file.\n      if [[ -f \"$gcda\" ]]; then\n          # gcov expects both gcno and gcda files to be in the same directory.\n          # We overcome this by copying the gcno to $COVERAGE_DIR where the gcda\n          # files are expected to be.\n          if [ ! -f \"${COVERAGE_DIR}/${gcno_path}\" ]; then\n              mkdir -p \"${COVERAGE_DIR}/$(dirname \"${gcno_path}\")\"\n              cp \"$ROOT/${gcno_path}\" \"${COVERAGE_DIR}/${gcno_path}\"\n          fi\n          # Invoke gcov to generate a code coverage report with the flags:\n          # -i              Output gcov file in an intermediate text format.\n          #                 The output is a single .gcov file per .gcda file.\n          #                 No source code is required.\n          # -o directory    The directory containing the .gcno and\n          #                 .gcda data files.\n          # \"${gcda\"}       The input file name. gcov is looking for data files\n          #                 named after the input filename without its extension.\n          # gcov produces files called <source file name>.gcov in the current\n          # directory. These contain the coverage information of the source file\n          # they correspond to. One .gcov file is produced for each source\n          # (or header) file containing code which was compiled to produce the\n          # .gcda files.\n          # Don't generate branch coverage (-b) because of a gcov issue that\n          # segfaults when both -i and -b are used (see\n          # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84879).\n          \"${GCOV}\" -i \"${COVERAGE_GCOV_OPTIONS[@]}\" -o \"$(dirname \"${gcda}\")\" \"${gcda}\"\n\n          # Append all .gcov files in the current directory to the output file.\n          cat ./*.gcov >> \"$output_file\"\n          # Delete the .gcov files.\n          rm ./*.gcov\n      fi\n    fi\n  done < \"${COVERAGE_MANIFEST}\"\n}\n\nfunction main() {\n  init_gcov\n\n  # If llvm code coverage is used, we output the raw code coverage report in\n  # the $COVERAGE_OUTPUT_FILE. This report will not be converted to any other\n  # format by LcovMerger.\n  # TODO(#5881): Convert profdata reports to lcov.\n  if uses_llvm; then\n    BAZEL_CC_COVERAGE_TOOL=\"PROFDATA\"\n  fi\n\n  # When using either gcov or lcov, have an output file specific to the test\n  # and format used. For lcov we generate a \".dat\" output file and for gcov\n  # a \".gcov\" output file. It is important that these files are generated under\n  # COVERAGE_DIR.\n  # When this script is invoked by tools/test/collect_coverage.sh either of\n  # these two coverage reports will be picked up by LcovMerger and their\n  # content will be converted and/or merged with other reports to an lcov\n  # format, generating the final code coverage report.\n  case \"$BAZEL_CC_COVERAGE_TOOL\" in\n        (\"GCOV\") gcov_coverage \"$COVERAGE_DIR/_cc_coverage.gcov\" ;;\n        (\"PROFDATA\") llvm_coverage \"$COVERAGE_DIR/_cc_coverage.dat\" ;;\n        (*) echo \"Coverage tool $BAZEL_CC_COVERAGE_TOOL not supported\" \\\n            && exit 1\n  esac\n}\n\nmain\n"
  },
  {
    "path": "bazel/coverage/fuzz_coverage_wrapper.sh",
    "content": "#!/bin/bash\n\nset -ex\n\nTEST_BINARY=$1\nshift\n\n# Clear existing corpus if previous run wasn't in sandbox\nrm -rf fuzz_corpus\n\nmkdir -p fuzz_corpus/seed_corpus\ncp -r \"$@\" fuzz_corpus/seed_corpus\n\n# TODO(asraa): When fuzz targets are stable, remove error suppression and run coverage while fuzzing.\nLLVM_PROFILE_FILE='' ${TEST_BINARY} fuzz_corpus -seed=\"${FUZZ_CORPUS_SEED:-1}\" -max_total_time=\"${FUZZ_CORPUS_TIME:-60}\" -max_len=2048 -rss_limit_mb=8192 -timeout=30 || :\n\n# Passing files instead of a directory will run fuzzing as a regression test.\n# TODO(asraa): Remove manual `|| :`, but this shouldn't be necessary.\n_CORPUS=\"$(find fuzz_corpus -type f)\"\nwhile read -r line; do CORPUS+=(\"$line\"); done \\\n    <<< \"$_CORPUS\"\n${TEST_BINARY} \"${CORPUS[@]}\" -rss_limit_mb=8192 || :\n"
  },
  {
    "path": "bazel/crates.bzl",
    "content": "\"\"\"\ncargo-raze crate workspace functions\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\nload(\"@bazel_tools//tools/build_defs/repo:git.bzl\", \"new_git_repository\")\n\ndef _new_http_archive(name, **kwargs):\n    if not native.existing_rule(name):\n        http_archive(name = name, **kwargs)\n\ndef _new_git_repository(name, **kwargs):\n    if not native.existing_rule(name):\n        new_git_repository(name = name, **kwargs)\n\ndef raze_fetch_remote_crates():\n    _new_http_archive(\n        name = \"raze__ahash__0_3_8\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/ahash/ahash-0.3.8.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"ahash-0.3.8\",\n        build_file = Label(\"//bazel/external/cargo/remote:ahash-0.3.8.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__autocfg__1_0_0\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/autocfg/autocfg-1.0.0.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"autocfg-1.0.0\",\n        build_file = Label(\"//bazel/external/cargo/remote:autocfg-1.0.0.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__cfg_if__0_1_10\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/cfg-if/cfg-if-0.1.10.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"cfg-if-0.1.10\",\n        build_file = Label(\"//bazel/external/cargo/remote:cfg-if-0.1.10.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__hashbrown__0_7_2\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/hashbrown/hashbrown-0.7.2.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"hashbrown-0.7.2\",\n        build_file = Label(\"//bazel/external/cargo/remote:hashbrown-0.7.2.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__libc__0_2_74\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/libc/libc-0.2.74.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"libc-0.2.74\",\n        build_file = Label(\"//bazel/external/cargo/remote:libc-0.2.74.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__log__0_4_11\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/log/log-0.4.11.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"log-0.4.11\",\n        build_file = Label(\"//bazel/external/cargo/remote:log-0.4.11.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__memory_units__0_4_0\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/memory_units/memory_units-0.4.0.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"memory_units-0.4.0\",\n        build_file = Label(\"//bazel/external/cargo/remote:memory_units-0.4.0.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__proxy_wasm__0_1_2\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/proxy-wasm/proxy-wasm-0.1.2.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"proxy-wasm-0.1.2\",\n        build_file = Label(\"//bazel/external/cargo/remote:proxy-wasm-0.1.2.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__wee_alloc__0_4_5\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/wee_alloc/wee_alloc-0.4.5.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"wee_alloc-0.4.5\",\n        build_file = Label(\"//bazel/external/cargo/remote:wee_alloc-0.4.5.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__winapi__0_3_9\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi/winapi-0.3.9.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"winapi-0.3.9\",\n        build_file = Label(\"//bazel/external/cargo/remote:winapi-0.3.9.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__winapi_i686_pc_windows_gnu__0_4_0\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-i686-pc-windows-gnu/winapi-i686-pc-windows-gnu-0.4.0.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"winapi-i686-pc-windows-gnu-0.4.0\",\n        build_file = Label(\"//bazel/external/cargo/remote:winapi-i686-pc-windows-gnu-0.4.0.BUILD\"),\n    )\n\n    _new_http_archive(\n        name = \"raze__winapi_x86_64_pc_windows_gnu__0_4_0\",\n        url = \"https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-x86_64-pc-windows-gnu/winapi-x86_64-pc-windows-gnu-0.4.0.crate\",\n        type = \"tar.gz\",\n        strip_prefix = \"winapi-x86_64-pc-windows-gnu-0.4.0\",\n        build_file = Label(\"//bazel/external/cargo/remote:winapi-x86_64-pc-windows-gnu-0.4.0.BUILD\"),\n    )\n"
  },
  {
    "path": "bazel/dependency_imports.bzl",
    "content": "load(\"@rules_foreign_cc//:workspace_definitions.bzl\", \"rules_foreign_cc_dependencies\")\nload(\"@io_bazel_rules_go//go:deps.bzl\", \"go_register_toolchains\", \"go_rules_dependencies\")\nload(\"@envoy_build_tools//toolchains:rbe_toolchains_config.bzl\", \"rbe_toolchains_config\")\nload(\"@bazel_toolchains//rules/exec_properties:exec_properties.bzl\", \"create_rbe_exec_properties_dict\", \"custom_exec_properties\")\nload(\"@bazel_gazelle//:deps.bzl\", \"gazelle_dependencies\", \"go_repository\")\nload(\"@build_bazel_rules_apple//apple:repositories.bzl\", \"apple_rules_dependencies\")\nload(\"@upb//bazel:repository_defs.bzl\", upb_bazel_version_repository = \"bazel_version_repository\")\nload(\"@io_bazel_rules_rust//rust:repositories.bzl\", \"rust_repositories\")\nload(\"@io_bazel_rules_rust//:workspace.bzl\", \"bazel_version\")\nload(\"@config_validation_pip3//:requirements.bzl\", config_validation_pip_install = \"pip_install\")\nload(\"@configs_pip3//:requirements.bzl\", configs_pip_install = \"pip_install\")\nload(\"@headersplit_pip3//:requirements.bzl\", headersplit_pip_install = \"pip_install\")\nload(\"@kafka_pip3//:requirements.bzl\", kafka_pip_install = \"pip_install\")\nload(\"@protodoc_pip3//:requirements.bzl\", protodoc_pip_install = \"pip_install\")\nload(\"@thrift_pip3//:requirements.bzl\", thrift_pip_install = \"pip_install\")\nload(\"@rules_antlr//antlr:deps.bzl\", \"antlr_dependencies\")\n\n# go version for rules_go\nGO_VERSION = \"1.14.7\"\n\ndef envoy_dependency_imports(go_version = GO_VERSION):\n    rules_foreign_cc_dependencies()\n    go_rules_dependencies()\n    go_register_toolchains(go_version)\n    rbe_toolchains_config()\n    gazelle_dependencies()\n    apple_rules_dependencies()\n    rust_repositories()\n    bazel_version(name = \"bazel_version\")\n    upb_bazel_version_repository(name = \"upb_bazel_version\")\n    antlr_dependencies(472)\n\n    custom_exec_properties(\n        name = \"envoy_large_machine_exec_property\",\n        constants = {\n            \"LARGE_MACHINE\": create_rbe_exec_properties_dict(labels = dict(size = \"large\")),\n        },\n    )\n\n    go_repository(\n        name = \"org_golang_google_grpc\",\n        build_file_proto_mode = \"disable\",\n        importpath = \"google.golang.org/grpc\",\n        sum = \"h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=\",\n        version = \"v1.29.1\",\n    )\n\n    go_repository(\n        name = \"org_golang_x_net\",\n        importpath = \"golang.org/x/net\",\n        sum = \"h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=\",\n        version = \"v0.0.0-20190813141303-74dc4d7220e7\",\n    )\n\n    go_repository(\n        name = \"org_golang_x_text\",\n        importpath = \"golang.org/x/text\",\n        sum = \"h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=\",\n        version = \"v0.3.0\",\n    )\n\n    config_validation_pip_install()\n    configs_pip_install()\n    headersplit_pip_install()\n    kafka_pip_install()\n    protodoc_pip_install()\n    thrift_pip_install()\n"
  },
  {
    "path": "bazel/dev_binding.bzl",
    "content": "def _default_envoy_dev_impl(ctxt):\n    if \"LLVM_CONFIG\" in ctxt.os.environ:\n        ctxt.file(\"WORKSPACE\", \"\")\n        ctxt.file(\"BUILD.bazel\", \"\")\n        ctxt.symlink(ctxt.path(ctxt.attr.envoy_root).dirname.get_child(\"tools\").get_child(\"clang_tools\"), \"clang_tools\")\n\n_default_envoy_dev = repository_rule(\n    implementation = _default_envoy_dev_impl,\n    attrs = {\n        \"envoy_root\": attr.label(default = \"@envoy//:BUILD\"),\n    },\n)\n\ndef _clang_tools_impl(ctxt):\n    if \"LLVM_CONFIG\" in ctxt.os.environ:\n        llvm_config_path = ctxt.os.environ[\"LLVM_CONFIG\"]\n        exec_result = ctxt.execute([llvm_config_path, \"--includedir\"])\n        if exec_result.return_code != 0:\n            fail(llvm_config_path + \" --includedir returned %d\" % exec_result.return_code)\n        clang_tools_include_path = exec_result.stdout.rstrip()\n        exec_result = ctxt.execute([llvm_config_path, \"--libdir\"])\n        if exec_result.return_code != 0:\n            fail(llvm_config_path + \" --libdir returned %d\" % exec_result.return_code)\n        clang_tools_lib_path = exec_result.stdout.rstrip()\n        for include_dir in [\"clang\", \"clang-c\", \"llvm\", \"llvm-c\"]:\n            ctxt.symlink(clang_tools_include_path + \"/\" + include_dir, include_dir)\n        ctxt.symlink(clang_tools_lib_path, \"lib\")\n        ctxt.symlink(Label(\"@envoy_dev//clang_tools/support:BUILD.prebuilt\"), \"BUILD\")\n\n_clang_tools = repository_rule(\n    implementation = _clang_tools_impl,\n    environ = [\"LLVM_CONFIG\"],\n)\n\ndef envoy_dev_binding():\n    # Treat the Envoy developer tools that require llvm as an external repo, this avoids\n    # breaking bazel build //... when llvm is not installed.\n    if \"envoy_dev\" not in native.existing_rules().keys():\n        _default_envoy_dev(name = \"envoy_dev\")\n        _clang_tools(name = \"clang_tools\")\n"
  },
  {
    "path": "bazel/envoy_binary.bzl",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_binary\")\n\n# DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead.\n# Envoy binary targets\nload(\n    \":envoy_internal.bzl\",\n    \"envoy_copts\",\n    \"envoy_external_dep_path\",\n    \"envoy_stdlib_deps\",\n    \"tcmalloc_external_dep\",\n)\n\n# Envoy C++ binary targets should be specified with this function.\ndef envoy_cc_binary(\n        name,\n        srcs = [],\n        data = [],\n        testonly = 0,\n        visibility = None,\n        external_deps = [],\n        repository = \"\",\n        stamped = False,\n        deps = [],\n        linkopts = [],\n        tags = []):\n    if not linkopts:\n        linkopts = _envoy_linkopts()\n    if stamped:\n        linkopts = linkopts + _envoy_stamped_linkopts()\n        deps = deps + _envoy_stamped_deps()\n    deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + envoy_stdlib_deps()\n    cc_binary(\n        name = name,\n        srcs = srcs,\n        data = data,\n        copts = envoy_copts(repository),\n        linkopts = linkopts,\n        testonly = testonly,\n        linkstatic = 1,\n        visibility = visibility,\n        malloc = tcmalloc_external_dep(repository),\n        stamp = 1,\n        deps = deps,\n        tags = tags,\n    )\n\n# Select the given values if exporting is enabled in the current build.\ndef _envoy_select_exported_symbols(xs):\n    return select({\n        \"@envoy//bazel:enable_exported_symbols\": xs,\n        \"//conditions:default\": [],\n    })\n\n# Compute the final linkopts based on various options.\ndef _envoy_linkopts():\n    return select({\n        # The macOS system library transitively links common libraries (e.g., pthread).\n        \"@envoy//bazel:apple\": [\n            # See note here: https://luajit.org/install.html\n            \"-pagezero_size 10000\",\n            \"-image_base 100000000\",\n        ],\n        \"@envoy//bazel:windows_x86_64\": [\n            \"-DEFAULTLIB:advapi32.lib\",\n            \"-DEFAULTLIB:ws2_32.lib\",\n            \"-DEFAULTLIB:iphlpapi.lib\",\n            \"-WX\",\n        ],\n        \"//conditions:default\": [\n            \"-pthread\",\n            \"-lrt\",\n            \"-ldl\",\n            \"-Wl,-z,relro,-z,now\",\n            \"-Wl,--hash-style=gnu\",\n        ],\n    }) + select({\n        \"@envoy//bazel:boringssl_fips\": [],\n        \"@envoy//bazel:windows_x86_64\": [],\n        \"//conditions:default\": [\"-pie\"],\n    }) + _envoy_select_exported_symbols([\"-Wl,-E\"])\n\ndef _envoy_stamped_deps():\n    return select({\n        \"@envoy//bazel:windows_x86_64\": [],\n        \"@envoy//bazel:apple\": [\n            \"@envoy//bazel:raw_build_id.ldscript\",\n        ],\n        \"//conditions:default\": [\n            \"@envoy//bazel:gnu_build_id.ldscript\",\n        ],\n    })\n\ndef _envoy_stamped_linkopts():\n    return select({\n        # Coverage builds in CI are failing to link when setting a build ID.\n        #\n        # /usr/bin/ld.gold: internal error in write_build_id, at ../../gold/layout.cc:5419\n        \"@envoy//bazel:coverage_build\": [],\n        \"@envoy//bazel:windows_x86_64\": [],\n\n        # macOS doesn't have an official equivalent to the `.note.gnu.build-id`\n        # ELF section, so just stuff the raw ID into a new text section.\n        \"@envoy//bazel:apple\": [\n            \"-sectcreate __TEXT __build_id\",\n            \"$(location @envoy//bazel:raw_build_id.ldscript)\",\n        ],\n\n        # Note: assumes GNU GCC (or compatible) handling of `--build-id` flag.\n        \"//conditions:default\": [\n            \"-Wl,@$(location @envoy//bazel:gnu_build_id.ldscript)\",\n        ],\n    })\n"
  },
  {
    "path": "bazel/envoy_build_system.bzl",
    "content": "# The main Envoy bazel file. Load this file for all Envoy-specific build macros\n# and rules that you'd like to use in your BUILD files.\nload(\"@rules_foreign_cc//tools/build_defs:cmake.bzl\", \"cmake_external\")\nload(\":envoy_binary.bzl\", _envoy_cc_binary = \"envoy_cc_binary\")\nload(\":envoy_internal.bzl\", \"envoy_external_dep_path\")\nload(\n    \":envoy_library.bzl\",\n    _envoy_basic_cc_library = \"envoy_basic_cc_library\",\n    _envoy_cc_extension = \"envoy_cc_extension\",\n    _envoy_cc_library = \"envoy_cc_library\",\n    _envoy_cc_posix_library = \"envoy_cc_posix_library\",\n    _envoy_cc_win32_library = \"envoy_cc_win32_library\",\n    _envoy_include_prefix = \"envoy_include_prefix\",\n    _envoy_proto_library = \"envoy_proto_library\",\n)\nload(\n    \":envoy_select.bzl\",\n    _envoy_select_boringssl = \"envoy_select_boringssl\",\n    _envoy_select_google_grpc = \"envoy_select_google_grpc\",\n    _envoy_select_hot_restart = \"envoy_select_hot_restart\",\n    _envoy_select_new_codecs_in_integration_tests = \"envoy_select_new_codecs_in_integration_tests\",\n    _envoy_select_wasm = \"envoy_select_wasm\",\n    _envoy_select_wasm_all_v8_wavm_none = \"envoy_select_wasm_all_v8_wavm_none\",\n    _envoy_select_wasm_v8 = \"envoy_select_wasm_v8\",\n    _envoy_select_wasm_wavm = \"envoy_select_wasm_wavm\",\n)\nload(\n    \":envoy_test.bzl\",\n    _envoy_benchmark_test = \"envoy_benchmark_test\",\n    _envoy_cc_benchmark_binary = \"envoy_cc_benchmark_binary\",\n    _envoy_cc_fuzz_test = \"envoy_cc_fuzz_test\",\n    _envoy_cc_mock = \"envoy_cc_mock\",\n    _envoy_cc_test = \"envoy_cc_test\",\n    _envoy_cc_test_binary = \"envoy_cc_test_binary\",\n    _envoy_cc_test_library = \"envoy_cc_test_library\",\n    _envoy_py_test_binary = \"envoy_py_test_binary\",\n    _envoy_sh_test = \"envoy_sh_test\",\n)\nload(\n    \"@envoy_build_config//:extensions_build_config.bzl\",\n    \"EXTENSION_PACKAGE_VISIBILITY\",\n)\n\ndef envoy_package():\n    native.package(default_visibility = [\"//visibility:public\"])\n\ndef envoy_extension_package():\n    native.package(default_visibility = EXTENSION_PACKAGE_VISIBILITY)\n\n# A genrule variant that can output a directory. This is useful when doing things like\n# generating a fuzz corpus mechanically.\ndef _envoy_directory_genrule_impl(ctx):\n    tree = ctx.actions.declare_directory(ctx.attr.name + \".outputs\")\n    ctx.actions.run_shell(\n        inputs = ctx.files.srcs,\n        tools = ctx.files.tools,\n        outputs = [tree],\n        command = \"mkdir -p \" + tree.path + \" && \" + ctx.expand_location(ctx.attr.cmd),\n        env = {\"GENRULE_OUTPUT_DIR\": tree.path},\n    )\n    return [DefaultInfo(files = depset([tree]))]\n\nenvoy_directory_genrule = rule(\n    implementation = _envoy_directory_genrule_impl,\n    attrs = {\n        \"srcs\": attr.label_list(),\n        \"cmd\": attr.string(),\n        \"tools\": attr.label_list(),\n    },\n)\n\n# External CMake C++ library targets should be specified with this function. This defaults\n# to building the dependencies with ninja\ndef envoy_cmake_external(\n        name,\n        cache_entries = {},\n        debug_cache_entries = {},\n        cmake_options = [\"-GNinja\"],\n        make_commands = [\"ninja -v\", \"ninja -v install\"],\n        lib_source = \"\",\n        postfix_script = \"\",\n        static_libraries = [],\n        copy_pdb = False,\n        pdb_name = \"\",\n        cmake_files_dir = \"$BUILD_TMPDIR/CMakeFiles\",\n        generate_crosstool_file = False,\n        **kwargs):\n    cache_entries.update({\"CMAKE_BUILD_TYPE\": \"Bazel\"})\n    cache_entries_debug = dict(cache_entries)\n    cache_entries_debug.update(debug_cache_entries)\n\n    pf = \"\"\n    if copy_pdb:\n        # TODO: Add iterator of the first list presented of these options;\n        # static_libraries[.pdb], pdb_names, name[.pdb] files\n        if pdb_name == \"\":\n            pdb_name = name\n\n        copy_command = \"cp {cmake_files_dir}/{pdb_name}.dir/{pdb_name}.pdb $INSTALLDIR/lib/{pdb_name}.pdb\".format(cmake_files_dir = cmake_files_dir, pdb_name = pdb_name)\n        if postfix_script != \"\":\n            copy_command = copy_command + \" && \" + postfix_script\n\n        pf = select({\n            \"@envoy//bazel:windows_dbg_build\": copy_command,\n            \"//conditions:default\": postfix_script,\n        })\n    else:\n        pf = postfix_script\n\n    cmake_external(\n        name = name,\n        cache_entries = select({\n            \"@envoy//bazel:dbg_build\": cache_entries_debug,\n            \"//conditions:default\": cache_entries,\n        }),\n        cmake_options = cmake_options,\n        # TODO(lizan): Make this always true\n        generate_crosstool_file = select({\n            \"@envoy//bazel:windows_x86_64\": True,\n            \"//conditions:default\": generate_crosstool_file,\n        }),\n        lib_source = lib_source,\n        make_commands = make_commands,\n        postfix_script = pf,\n        static_libraries = static_libraries,\n        **kwargs\n    )\n\n# Used to select a dependency that has different implementations on POSIX vs Windows.\n# The platform-specific implementations should be specified with envoy_cc_posix_library\n# and envoy_cc_win32_library respectively\ndef envoy_cc_platform_dep(name):\n    return select({\n        \"@envoy//bazel:windows_x86_64\": [name + \"_win32\"],\n        \"//conditions:default\": [name + \"_posix\"],\n    })\n\n# Envoy proto descriptor targets should be specified with this function.\n# This is used for testing only.\ndef envoy_proto_descriptor(name, out, srcs = [], external_deps = []):\n    input_files = [\"$(location \" + src + \")\" for src in srcs]\n    include_paths = [\".\", native.package_name()]\n\n    if \"api_httpbody_protos\" in external_deps:\n        srcs.append(\"@com_google_googleapis//google/api:httpbody.proto\")\n        include_paths.append(\"external/com_google_googleapis\")\n\n    if \"http_api_protos\" in external_deps:\n        srcs.append(\"@com_google_googleapis//google/api:annotations.proto\")\n        srcs.append(\"@com_google_googleapis//google/api:http.proto\")\n        include_paths.append(\"external/com_google_googleapis\")\n\n    if \"well_known_protos\" in external_deps:\n        srcs.append(\"@com_google_protobuf//:well_known_protos\")\n        include_paths.append(\"external/com_google_protobuf/src\")\n\n    options = [\"--include_imports\"]\n    options.extend([\"-I\" + include_path for include_path in include_paths])\n    options.append(\"--descriptor_set_out=$@\")\n\n    cmd = \"$(location //external:protoc) \" + \" \".join(options + input_files)\n    native.genrule(\n        name = name,\n        srcs = srcs,\n        outs = [out],\n        cmd = cmd,\n        tools = [\"//external:protoc\"],\n    )\n\n# Dependencies on Google grpc should be wrapped with this function.\ndef envoy_google_grpc_external_deps():\n    return envoy_select_google_grpc([envoy_external_dep_path(\"grpc\")])\n\n# Here we create wrappers for each of the public targets within the separate bazel\n# files loaded above. This maintains envoy_build_system.bzl as the preferred import\n# for BUILD files that need these build macros. Do not use the imports directly\n# from the other bzl files (e.g. envoy_select.bzl, envoy_binary.bzl, etc.)\n\n# Select wrappers (from envoy_select.bzl)\nenvoy_select_boringssl = _envoy_select_boringssl\nenvoy_select_google_grpc = _envoy_select_google_grpc\nenvoy_select_hot_restart = _envoy_select_hot_restart\nenvoy_select_wasm = _envoy_select_wasm\nenvoy_select_wasm_all_v8_wavm_none = _envoy_select_wasm_all_v8_wavm_none\nenvoy_select_wasm_wavm = _envoy_select_wasm_wavm\nenvoy_select_wasm_v8 = _envoy_select_wasm_v8\nenvoy_select_new_codecs_in_integration_tests = _envoy_select_new_codecs_in_integration_tests\n\n# Binary wrappers (from envoy_binary.bzl)\nenvoy_cc_binary = _envoy_cc_binary\n\n# Library wrappers (from envoy_library.bzl)\nenvoy_basic_cc_library = _envoy_basic_cc_library\nenvoy_cc_extension = _envoy_cc_extension\nenvoy_cc_library = _envoy_cc_library\nenvoy_cc_posix_library = _envoy_cc_posix_library\nenvoy_cc_win32_library = _envoy_cc_win32_library\nenvoy_include_prefix = _envoy_include_prefix\nenvoy_proto_library = _envoy_proto_library\n\n# Test wrappers (from envoy_test.bzl)\nenvoy_cc_fuzz_test = _envoy_cc_fuzz_test\nenvoy_cc_mock = _envoy_cc_mock\nenvoy_cc_test = _envoy_cc_test\nenvoy_cc_test_binary = _envoy_cc_test_binary\nenvoy_cc_test_library = _envoy_cc_test_library\nenvoy_cc_benchmark_binary = _envoy_cc_benchmark_binary\nenvoy_benchmark_test = _envoy_benchmark_test\nenvoy_py_test_binary = _envoy_py_test_binary\nenvoy_sh_test = _envoy_sh_test\n"
  },
  {
    "path": "bazel/envoy_internal.bzl",
    "content": "# DO NOT LOAD THIS FILE. Targets from this file should be considered private\n# and not used outside of the @envoy//bazel package.\nload(\":envoy_select.bzl\", \"envoy_select_google_grpc\", \"envoy_select_hot_restart\")\n\n# Compute the final copts based on various options.\ndef envoy_copts(repository, test = False):\n    posix_options = [\n        \"-Wall\",\n        \"-Wextra\",\n        \"-Werror\",\n        \"-Wnon-virtual-dtor\",\n        \"-Woverloaded-virtual\",\n        \"-Wold-style-cast\",\n        \"-Wformat\",\n        \"-Wformat-security\",\n        \"-Wvla\",\n    ]\n\n    # Windows options for cleanest service compilation;\n    #   General MSVC C++ options for Envoy current expectations.\n    #   Target windows.h for all Windows 10 (0x0A) API prototypes (ntohll etc)\n    #   (See https://msdn.microsoft.com/en-us/library/windows/desktop/aa383745(v=vs.85).aspx )\n    #   Optimize Windows headers by dropping GUI-oriented features from compilation\n    msvc_options = [\n        \"-WX\",\n        \"-Zc:__cplusplus\",\n        \"-DWIN32\",\n        \"-D_WIN32_WINNT=0x0A00\",  # _WIN32_WINNT_WIN10\n        \"-DNTDDI_VERSION=0x0A000000\",  # NTDDI_WIN10\n        \"-DWIN32_LEAN_AND_MEAN\",\n        \"-DNOUSER\",\n        \"-DNOMCX\",\n        \"-DNOIME\",\n        \"-DNOCRYPT\",\n        # Ignore unguarded gcc pragmas in quiche (unrecognized by MSVC)\n        # TODO(wrowe): Drop this change when fixed in bazel/external/quiche.genrule_cmd\n        \"-wd4068\",\n        # this is to silence the incorrect MSVC compiler warning when trying to convert between\n        # std::optional data types while conversions between primitive types are producing no error\n        \"-wd4244\",\n    ]\n\n    return select({\n               repository + \"//bazel:windows_x86_64\": msvc_options,\n               \"//conditions:default\": posix_options,\n           }) + select({\n               # Bazel adds an implicit -DNDEBUG for opt.\n               repository + \"//bazel:opt_build\": [] if test else [\"-ggdb3\", \"-gsplit-dwarf\"],\n               repository + \"//bazel:fastbuild_build\": [],\n               repository + \"//bazel:dbg_build\": [\"-ggdb3\", \"-gsplit-dwarf\"],\n               repository + \"//bazel:windows_opt_build\": [],\n               repository + \"//bazel:windows_fastbuild_build\": [],\n               repository + \"//bazel:windows_dbg_build\": [],\n           }) + select({\n               repository + \"//bazel:clang_build\": [\"-fno-limit-debug-info\", \"-Wgnu-conditional-omitted-operand\", \"-Wc++2a-extensions\", \"-Wrange-loop-analysis\"],\n               repository + \"//bazel:gcc_build\": [\"-Wno-maybe-uninitialized\"],\n               \"//conditions:default\": [],\n           }) + select({\n               repository + \"//bazel:no_debug_info\": [\"-g0\"],\n               \"//conditions:default\": [],\n           }) + select({\n               repository + \"//bazel:disable_tcmalloc\": [\"-DABSL_MALLOC_HOOK_MMAP_DISABLE\"],\n               repository + \"//bazel:disable_tcmalloc_on_linux_x86_64\": [\"-DABSL_MALLOC_HOOK_MMAP_DISABLE\"],\n               repository + \"//bazel:gperftools_tcmalloc\": [\"-DGPERFTOOLS_TCMALLOC\"],\n               repository + \"//bazel:gperftools_tcmalloc_on_linux_x86_64\": [\"-DGPERFTOOLS_TCMALLOC\"],\n               repository + \"//bazel:debug_tcmalloc\": [\"-DENVOY_MEMORY_DEBUG_ENABLED=1\", \"-DGPERFTOOLS_TCMALLOC\"],\n               repository + \"//bazel:debug_tcmalloc_on_linux_x86_64\": [\"-DENVOY_MEMORY_DEBUG_ENABLED=1\", \"-DGPERFTOOLS_TCMALLOC\"],\n               repository + \"//bazel:linux_x86_64\": [\"-DTCMALLOC\"],\n               \"//conditions:default\": [\"-DGPERFTOOLS_TCMALLOC\"],\n           }) + select({\n               repository + \"//bazel:disable_signal_trace\": [],\n               \"//conditions:default\": [\"-DENVOY_HANDLE_SIGNALS\"],\n           }) + select({\n               repository + \"//bazel:disable_object_dump_on_signal_trace\": [],\n               \"//conditions:default\": [\"-DENVOY_OBJECT_TRACE_ON_DUMP\"],\n           }) + select({\n               repository + \"//bazel:disable_deprecated_features\": [\"-DENVOY_DISABLE_DEPRECATED_FEATURES\"],\n               \"//conditions:default\": [],\n           }) + select({\n               repository + \"//bazel:enable_log_debug_assert_in_release\": [\"-DENVOY_LOG_DEBUG_ASSERT_IN_RELEASE\"],\n               \"//conditions:default\": [],\n           }) + select({\n               repository + \"//bazel:disable_known_issue_asserts\": [\"-DENVOY_DISABLE_KNOWN_ISSUE_ASSERTS\"],\n               \"//conditions:default\": [],\n           }) + select({\n               # APPLE_USE_RFC_3542 is needed to support IPV6_PKTINFO in MAC OS.\n               repository + \"//bazel:apple\": [\"-D__APPLE_USE_RFC_3542\"],\n               \"//conditions:default\": [],\n           }) + envoy_select_hot_restart([\"-DENVOY_HOT_RESTART\"], repository) + \\\n           _envoy_select_perf_annotation([\"-DENVOY_PERF_ANNOTATION\"]) + \\\n           envoy_select_google_grpc([\"-DENVOY_GOOGLE_GRPC\"], repository) + \\\n           _envoy_select_path_normalization_by_default([\"-DENVOY_NORMALIZE_PATH_BY_DEFAULT\"], repository)\n\n# References to Envoy external dependencies should be wrapped with this function.\ndef envoy_external_dep_path(dep):\n    return \"//external:%s\" % dep\n\ndef envoy_linkstatic():\n    return select({\n        \"@envoy//bazel:dynamic_link_tests\": 0,\n        \"//conditions:default\": 1,\n    })\n\ndef envoy_select_force_libcpp(if_libcpp, default = None):\n    return select({\n        \"@envoy//bazel:force_libcpp\": if_libcpp,\n        \"@envoy//bazel:apple\": [],\n        \"@envoy//bazel:windows_x86_64\": [],\n        \"//conditions:default\": default or [],\n    })\n\ndef envoy_stdlib_deps():\n    return select({\n        \"@envoy//bazel:asan_build\": [\"@envoy//bazel:dynamic_stdlib\"],\n        \"@envoy//bazel:msan_build\": [\"@envoy//bazel:dynamic_stdlib\"],\n        \"@envoy//bazel:tsan_build\": [\"@envoy//bazel:dynamic_stdlib\"],\n        \"//conditions:default\": [\"@envoy//bazel:static_stdlib\"],\n    })\n\n# Dependencies on tcmalloc_and_profiler should be wrapped with this function.\ndef tcmalloc_external_dep(repository):\n    return select({\n        repository + \"//bazel:disable_tcmalloc\": None,\n        repository + \"//bazel:disable_tcmalloc_on_linux_x86_64\": None,\n        repository + \"//bazel:debug_tcmalloc\": envoy_external_dep_path(\"gperftools\"),\n        repository + \"//bazel:debug_tcmalloc_on_linux_x86_64\": envoy_external_dep_path(\"gperftools\"),\n        repository + \"//bazel:gperftools_tcmalloc\": envoy_external_dep_path(\"gperftools\"),\n        repository + \"//bazel:gperftools_tcmalloc_on_linux_x86_64\": envoy_external_dep_path(\"gperftools\"),\n        repository + \"//bazel:linux_x86_64\": envoy_external_dep_path(\"tcmalloc\"),\n        \"//conditions:default\": envoy_external_dep_path(\"gperftools\"),\n    })\n\n# Select the given values if default path normalization is on in the current build.\ndef _envoy_select_path_normalization_by_default(xs, repository = \"\"):\n    return select({\n        repository + \"//bazel:enable_path_normalization_by_default\": xs,\n        \"//conditions:default\": [],\n    })\n\ndef _envoy_select_perf_annotation(xs):\n    return select({\n        \"@envoy//bazel:enable_perf_annotation\": xs,\n        \"//conditions:default\": [],\n    })\n"
  },
  {
    "path": "bazel/envoy_library.bzl",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\n# DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead.\n# Envoy library targets\nload(\n    \":envoy_internal.bzl\",\n    \"envoy_copts\",\n    \"envoy_external_dep_path\",\n    \"envoy_linkstatic\",\n)\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_py_proto_library\")\nload(\n    \"@envoy_build_config//:extensions_build_config.bzl\",\n    \"EXTENSION_CONFIG_VISIBILITY\",\n)\n\n# As above, but wrapped in list form for adding to dep lists. This smell seems needed as\n# SelectorValue values have to match the attribute type. See\n# https://github.com/bazelbuild/bazel/issues/2273.\ndef tcmalloc_external_deps(repository):\n    return select({\n        repository + \"//bazel:disable_tcmalloc\": [],\n        repository + \"//bazel:disable_tcmalloc_on_linux_x86_64\": [],\n        repository + \"//bazel:debug_tcmalloc\": [envoy_external_dep_path(\"gperftools\")],\n        repository + \"//bazel:debug_tcmalloc_on_linux_x86_64\": [envoy_external_dep_path(\"gperftools\")],\n        repository + \"//bazel:gperftools_tcmalloc\": [envoy_external_dep_path(\"gperftools\")],\n        repository + \"//bazel:gperftools_tcmalloc_on_linux_x86_64\": [envoy_external_dep_path(\"gperftools\")],\n        repository + \"//bazel:linux_x86_64\": [envoy_external_dep_path(\"tcmalloc\")],\n        \"//conditions:default\": [envoy_external_dep_path(\"gperftools\")],\n    })\n\n# Envoy C++ library targets that need no transformations or additional dependencies before being\n# passed to cc_library should be specified with this function. Note: this exists to ensure that\n# all envoy targets pass through an envoy-declared Starlark function where they can be modified\n# before being passed to a native bazel function.\ndef envoy_basic_cc_library(name, deps = [], external_deps = [], **kargs):\n    cc_library(\n        name = name,\n        deps = deps + [envoy_external_dep_path(dep) for dep in external_deps],\n        **kargs\n    )\n\n# All Envoy extensions must be tagged with their security hardening stance with\n# respect to downstream and upstream data plane threats. These are verbose\n# labels intended to make clear the trust that operators may place in\n# extensions.\nEXTENSION_SECURITY_POSTURES = [\n    # This extension is hardened against untrusted downstream traffic. It\n    # assumes that the upstream is trusted.\n    \"robust_to_untrusted_downstream\",\n    # This extension is hardened against both untrusted downstream and upstream\n    # traffic.\n    \"robust_to_untrusted_downstream_and_upstream\",\n    # This extension is not hardened and should only be used in deployments\n    # where both the downstream and upstream are trusted.\n    \"requires_trusted_downstream_and_upstream\",\n    # This is functionally equivalent to\n    # requires_trusted_downstream_and_upstream, but acts as a placeholder to\n    # allow us to identify extensions that need classifying.\n    \"unknown\",\n    # Not relevant to data plane threats, e.g. stats sinks.\n    \"data_plane_agnostic\",\n]\n\nEXTENSION_STATUS_VALUES = [\n    # This extension is stable and is expected to be production usable.\n    \"stable\",\n    # This extension is functional but has not had substantial production burn\n    # time, use only with this caveat.\n    \"alpha\",\n    # This extension is work-in-progress. Functionality is incomplete and it is\n    # not intended for production use.\n    \"wip\",\n]\n\ndef envoy_cc_extension(\n        name,\n        security_posture,\n        # Only set this for internal, undocumented extensions.\n        undocumented = False,\n        status = \"stable\",\n        tags = [],\n        extra_visibility = [],\n        visibility = EXTENSION_CONFIG_VISIBILITY,\n        **kwargs):\n    if security_posture not in EXTENSION_SECURITY_POSTURES:\n        fail(\"Unknown extension security posture: \" + security_posture)\n    if status not in EXTENSION_STATUS_VALUES:\n        fail(\"Unknown extension status: \" + status)\n    if \"//visibility:public\" not in visibility:\n        visibility = visibility + extra_visibility\n    envoy_cc_library(name, tags = tags, visibility = visibility, **kwargs)\n\n# Envoy C++ library targets should be specified with this function.\ndef envoy_cc_library(\n        name,\n        srcs = [],\n        hdrs = [],\n        copts = [],\n        visibility = None,\n        external_deps = [],\n        tcmalloc_dep = None,\n        repository = \"\",\n        tags = [],\n        deps = [],\n        strip_include_prefix = None,\n        textual_hdrs = None,\n        defines = []):\n    if tcmalloc_dep:\n        deps += tcmalloc_external_deps(repository)\n\n    cc_library(\n        name = name,\n        srcs = srcs,\n        hdrs = hdrs,\n        copts = envoy_copts(repository) + copts,\n        visibility = visibility,\n        tags = tags,\n        textual_hdrs = textual_hdrs,\n        deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [\n            repository + \"//include/envoy/common:base_includes\",\n            repository + \"//source/common/common:fmt_lib\",\n            envoy_external_dep_path(\"abseil_flat_hash_map\"),\n            envoy_external_dep_path(\"abseil_flat_hash_set\"),\n            envoy_external_dep_path(\"abseil_strings\"),\n            envoy_external_dep_path(\"spdlog\"),\n            envoy_external_dep_path(\"fmtlib\"),\n        ],\n        include_prefix = envoy_include_prefix(native.package_name()),\n        alwayslink = 1,\n        linkstatic = envoy_linkstatic(),\n        strip_include_prefix = strip_include_prefix,\n        defines = defines,\n    )\n\n    # Intended for usage by external consumers. This allows them to disambiguate\n    # include paths via `external/envoy...`\n    cc_library(\n        name = name + \"_with_external_headers\",\n        hdrs = hdrs,\n        copts = envoy_copts(repository) + copts,\n        visibility = visibility,\n        tags = [\"nocompdb\"],\n        deps = [\":\" + name],\n        strip_include_prefix = strip_include_prefix,\n    )\n\n# Used to specify a library that only builds on POSIX\ndef envoy_cc_posix_library(name, srcs = [], hdrs = [], **kargs):\n    envoy_cc_library(\n        name = name + \"_posix\",\n        srcs = select({\n            \"@envoy//bazel:windows_x86_64\": [],\n            \"//conditions:default\": srcs,\n        }),\n        hdrs = select({\n            \"@envoy//bazel:windows_x86_64\": [],\n            \"//conditions:default\": hdrs,\n        }),\n        **kargs\n    )\n\n# Used to specify a library that only builds on Windows\ndef envoy_cc_win32_library(name, srcs = [], hdrs = [], **kargs):\n    envoy_cc_library(\n        name = name + \"_win32\",\n        srcs = select({\n            \"@envoy//bazel:windows_x86_64\": srcs,\n            \"//conditions:default\": [],\n        }),\n        hdrs = select({\n            \"@envoy//bazel:windows_x86_64\": hdrs,\n            \"//conditions:default\": [],\n        }),\n        **kargs\n    )\n\n# Transform the package path (e.g. include/envoy/common) into a path for\n# exporting the package headers at (e.g. envoy/common). Source files can then\n# include using this path scheme (e.g. #include \"envoy/common/time.h\").\ndef envoy_include_prefix(path):\n    if path.startswith(\"source/\") or path.startswith(\"include/\"):\n        return \"/\".join(path.split(\"/\")[1:])\n    return None\n\n# Envoy proto targets should be specified with this function.\ndef envoy_proto_library(name, external_deps = [], **kwargs):\n    api_cc_py_proto_library(\n        name,\n        # Avoid generating .so, we don't need it, can interfere with builds\n        # such as OSS-Fuzz.\n        linkstatic = 1,\n        visibility = [\"//visibility:public\"],\n        **kwargs\n    )\n"
  },
  {
    "path": "bazel/envoy_select.bzl",
    "content": "# DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead.\n# Envoy select targets. This is in a separate file to avoid a circular\n# dependency with envoy_build_system.bzl.\n\n# Used to select a dependency that has different implementations on POSIX vs Windows.\n# The platform-specific implementations should be specified with envoy_cc_posix_library\n# and envoy_cc_win32_library respectively\ndef envoy_cc_platform_dep(name):\n    return select({\n        \"@envoy//bazel:windows_x86_64\": [name + \"_win32\"],\n        \"//conditions:default\": [name + \"_posix\"],\n    })\n\ndef envoy_select_boringssl(if_fips, default = None, if_disabled = None):\n    return select({\n        \"@envoy//bazel:boringssl_fips\": if_fips,\n        \"@envoy//bazel:boringssl_disabled\": if_disabled or [],\n        \"//conditions:default\": default or [],\n    })\n\n# Selects the given values if Google gRPC is enabled in the current build.\ndef envoy_select_google_grpc(xs, repository = \"\"):\n    return select({\n        repository + \"//bazel:disable_google_grpc\": [],\n        \"//conditions:default\": xs,\n    })\n\n# Selects the given values if hot restart is enabled in the current build.\ndef envoy_select_hot_restart(xs, repository = \"\"):\n    return select({\n        repository + \"//bazel:disable_hot_restart_or_apple\": [],\n        \"//conditions:default\": xs,\n    })\n\n# Selects the given values depending on the WASM runtimes enabled in the current build.\ndef envoy_select_wasm(xs):\n    return select({\n        \"@envoy//bazel:wasm_none\": [],\n        \"//conditions:default\": xs,\n    })\n\ndef envoy_select_wasm_v8(xs):\n    return select({\n        \"@envoy//bazel:wasm_wavm\": [],\n        \"@envoy//bazel:wasm_none\": [],\n        \"//conditions:default\": xs,\n    })\n\ndef envoy_select_wasm_wavm(xs):\n    return select({\n        \"@envoy//bazel:wasm_all\": xs,\n        \"@envoy//bazel:wasm_wavm\": xs,\n        \"//conditions:default\": [],\n    })\n\ndef envoy_select_wasm_all_v8_wavm_none(xs1, xs2, xs3, xs4):\n    return select({\n        \"@envoy//bazel:wasm_all\": xs1,\n        \"@envoy//bazel:wasm_v8\": xs2,\n        \"@envoy//bazel:wasm_wavm\": xs3,\n        \"@envoy//bazel:wasm_none\": xs4,\n        \"//conditions:default\": xs2,\n    })\n\n# Select the given values if use legacy codecs in test is on in the current build.\ndef envoy_select_new_codecs_in_integration_tests(xs, repository = \"\"):\n    return select({\n        repository + \"//bazel:enable_new_codecs_in_integration_tests\": xs,\n        \"//conditions:default\": [],\n    })\n"
  },
  {
    "path": "bazel/envoy_test.bzl",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\"@rules_cc//cc:defs.bzl\", \"cc_binary\", \"cc_library\", \"cc_test\")\n\n# DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead.\n# Envoy test targets. This includes both test library and test binary targets.\nload(\"@bazel_tools//tools/build_defs/pkg:pkg.bzl\", \"pkg_tar\")\nload(\":envoy_binary.bzl\", \"envoy_cc_binary\")\nload(\":envoy_library.bzl\", \"tcmalloc_external_deps\")\nload(\n    \":envoy_internal.bzl\",\n    \"envoy_copts\",\n    \"envoy_external_dep_path\",\n    \"envoy_linkstatic\",\n    \"envoy_select_force_libcpp\",\n    \"envoy_stdlib_deps\",\n    \"tcmalloc_external_dep\",\n)\n\n# Envoy C++ related test infrastructure (that want gtest, gmock, but may be\n# relied on by envoy_cc_test_library) should use this function.\ndef _envoy_cc_test_infrastructure_library(\n        name,\n        srcs = [],\n        hdrs = [],\n        data = [],\n        external_deps = [],\n        deps = [],\n        repository = \"\",\n        tags = [],\n        include_prefix = None,\n        copts = [],\n        **kargs):\n    # Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests.\n    deps += tcmalloc_external_deps(repository)\n    cc_library(\n        name = name,\n        srcs = srcs,\n        hdrs = hdrs,\n        data = data,\n        copts = envoy_copts(repository, test = True) + copts,\n        testonly = 1,\n        deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [\n            envoy_external_dep_path(\"googletest\"),\n        ],\n        tags = tags,\n        include_prefix = include_prefix,\n        alwayslink = 1,\n        linkstatic = envoy_linkstatic(),\n        **kargs\n    )\n\n# Compute the test linkopts based on various options.\ndef _envoy_test_linkopts():\n    return select({\n        \"@envoy//bazel:apple\": [\n            # See note here: https://luajit.org/install.html\n            \"-pagezero_size 10000\",\n            \"-image_base 100000000\",\n        ],\n        \"@envoy//bazel:windows_x86_64\": [\n            \"-DEFAULTLIB:advapi32.lib\",\n            \"-DEFAULTLIB:ws2_32.lib\",\n            \"-DEFAULTLIB:iphlpapi.lib\",\n            \"-WX\",\n        ],\n\n        # TODO(mattklein123): It's not great that we universally link against the following libs.\n        # In particular, -latomic and -lrt are not needed on all platforms. Make this more granular.\n        \"//conditions:default\": [\"-pthread\", \"-lrt\", \"-ldl\"],\n    }) + envoy_select_force_libcpp([], [\"-lstdc++fs\", \"-latomic\"])\n\n# Envoy C++ fuzz test targets. These are not included in coverage runs.\ndef envoy_cc_fuzz_test(\n        name,\n        corpus,\n        dictionaries = [],\n        repository = \"\",\n        size = \"medium\",\n        deps = [],\n        tags = [],\n        **kwargs):\n    if not (corpus.startswith(\"//\") or corpus.startswith(\":\") or corpus.startswith(\"@\")):\n        corpus_name = name + \"_corpus\"\n        corpus = native.glob([corpus + \"/**\"])\n        native.filegroup(\n            name = corpus_name,\n            srcs = corpus,\n        )\n    else:\n        corpus_name = corpus\n    tar_src = [corpus_name]\n    if dictionaries:\n        tar_src += dictionaries\n    pkg_tar(\n        name = name + \"_corpus_tar\",\n        srcs = tar_src,\n        testonly = 1,\n    )\n    fuzz_copts = [\"-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION\"]\n    test_lib_name = name + \"_lib\"\n    envoy_cc_test_library(\n        name = test_lib_name,\n        deps = deps + envoy_stdlib_deps() + [\n            repository + \"//test/fuzz:fuzz_runner_lib\",\n            repository + \"//test/test_common:test_version_linkstamp\",\n        ],\n        repository = repository,\n        tags = tags,\n        **kwargs\n    )\n    cc_test(\n        name = name,\n        copts = fuzz_copts + envoy_copts(\"@envoy\", test = True),\n        linkopts = _envoy_test_linkopts() + select({\n            \"@envoy//bazel:libfuzzer\": [\"-fsanitize=fuzzer\"],\n            \"//conditions:default\": [],\n        }),\n        linkstatic = envoy_linkstatic(),\n        args = select({\n            \"@envoy//bazel:libfuzzer_coverage\": [\"$(locations %s)\" % corpus_name],\n            \"@envoy//bazel:libfuzzer\": [],\n            \"//conditions:default\": [\"$(locations %s)\" % corpus_name],\n        }),\n        data = [corpus_name],\n        # No fuzzing on macOS or Windows\n        deps = select({\n            \"@envoy//bazel:apple\": [repository + \"//test:dummy_main\"],\n            \"@envoy//bazel:windows_x86_64\": [repository + \"//test:dummy_main\"],\n            \"@envoy//bazel:libfuzzer\": [\n                \":\" + test_lib_name,\n            ],\n            \"//conditions:default\": [\n                \":\" + test_lib_name,\n                repository + \"//test/fuzz:main\",\n            ],\n        }),\n        size = size,\n        tags = [\"fuzz_target\"] + tags,\n    )\n\n    # This target exists only for\n    # https://github.com/google/oss-fuzz/blob/master/projects/envoy/build.sh. It won't yield\n    # anything useful on its own, as it expects to be run in an environment where the linker options\n    # provide a path to FuzzingEngine.\n    cc_binary(\n        name = name + \"_driverless\",\n        copts = fuzz_copts + envoy_copts(\"@envoy\", test = True),\n        linkopts = [\"-lFuzzingEngine\"] + _envoy_test_linkopts(),\n        linkstatic = 1,\n        testonly = 1,\n        deps = [\":\" + test_lib_name],\n        tags = [\"manual\"] + tags,\n    )\n\n# Envoy C++ test targets should be specified with this function.\ndef envoy_cc_test(\n        name,\n        srcs = [],\n        data = [],\n        # List of pairs (Bazel shell script target, shell script args)\n        repository = \"\",\n        external_deps = [],\n        deps = [],\n        tags = [],\n        args = [],\n        copts = [],\n        shard_count = None,\n        coverage = True,\n        local = False,\n        size = \"medium\",\n        flaky = False):\n    coverage_tags = tags + ([] if coverage else [\"nocoverage\"])\n    cc_test(\n        name = name,\n        srcs = srcs,\n        data = data,\n        copts = envoy_copts(repository, test = True) + copts,\n        linkopts = _envoy_test_linkopts(),\n        linkstatic = envoy_linkstatic(),\n        malloc = tcmalloc_external_dep(repository),\n        deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + [\"googletest\"]] + [\n            repository + \"//test:main\",\n            repository + \"//test/test_common:test_version_linkstamp\",\n        ],\n        # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51\n        # 2 - by default, mocks act as StrictMocks.\n        args = args + [\"--gmock_default_mock_behavior=2\"],\n        tags = coverage_tags,\n        local = local,\n        shard_count = shard_count,\n        size = size,\n        flaky = flaky,\n    )\n\n# Envoy C++ test related libraries (that want gtest, gmock) should be specified\n# with this function.\ndef envoy_cc_test_library(\n        name,\n        srcs = [],\n        hdrs = [],\n        data = [],\n        external_deps = [],\n        deps = [],\n        repository = \"\",\n        tags = [],\n        include_prefix = None,\n        copts = [],\n        **kargs):\n    deps = deps + [\n        repository + \"//test/test_common:printers_includes\",\n    ]\n\n    _envoy_cc_test_infrastructure_library(\n        name,\n        srcs,\n        hdrs,\n        data,\n        external_deps,\n        deps,\n        repository,\n        tags,\n        include_prefix,\n        copts,\n        visibility = [\"//visibility:public\"],\n        **kargs\n    )\n\n# Envoy test binaries should be specified with this function.\ndef envoy_cc_test_binary(\n        name,\n        tags = [],\n        deps = [],\n        **kargs):\n    envoy_cc_binary(\n        name,\n        testonly = 1,\n        linkopts = _envoy_test_linkopts(),\n        tags = tags + [\"compilation_db_dep\"],\n        deps = deps + [\n            \"@envoy//test/test_common:test_version_linkstamp\",\n        ],\n        **kargs\n    )\n\n# Envoy benchmark binaries should be specified with this function. bazel run\n# these targets to measure performance.\ndef envoy_cc_benchmark_binary(\n        name,\n        deps = [],\n        repository = \"\",\n        **kargs):\n    envoy_cc_test_binary(\n        name,\n        deps = deps + [repository + \"//test/benchmark:main\"],\n        repository = repository,\n        **kargs\n    )\n\n# Tests to validate that Envoy benchmarks run successfully should be specified\n# with this function. Not for actual performance measurements: iteratons and\n# expensive benchmarks will be skipped in the interest of execution time.\ndef envoy_benchmark_test(\n        name,\n        benchmark_binary,\n        data = [],\n        tags = [],\n        **kargs):\n    native.sh_test(\n        name = name,\n        srcs = [\"//bazel:test_for_benchmark_wrapper.sh\"],\n        data = [\":\" + benchmark_binary] + data,\n        args = [\"%s/%s\" % (native.package_name(), benchmark_binary)],\n        tags = tags + [\"nocoverage\"],\n        **kargs\n    )\n\n# Envoy Python test binaries should be specified with this function.\ndef envoy_py_test_binary(\n        name,\n        external_deps = [],\n        deps = [],\n        **kargs):\n    py_binary(\n        name = name,\n        deps = deps + [envoy_external_dep_path(dep) for dep in external_deps],\n        **kargs\n    )\n\n# Envoy C++ mock targets should be specified with this function.\ndef envoy_cc_mock(name, **kargs):\n    envoy_cc_test_library(name = name, **kargs)\n\n# Envoy shell tests that need to be included in coverage run should be specified with this function.\ndef envoy_sh_test(\n        name,\n        srcs = [],\n        data = [],\n        coverage = True,\n        cc_binary = [],\n        tags = [],\n        **kargs):\n    if coverage:\n        if cc_binary == []:\n            fail(\"cc_binary is required for coverage-enabled test.\")\n        test_runner_cc = name + \"_test_runner.cc\"\n        native.genrule(\n            name = name + \"_gen_test_runner\",\n            srcs = srcs,\n            outs = [test_runner_cc],\n            cmd = \"$(location //bazel:gen_sh_test_runner.sh) $(SRCS) >> $@\",\n            tools = [\"//bazel:gen_sh_test_runner.sh\"],\n        )\n        envoy_cc_test(\n            name = name,\n            srcs = [test_runner_cc],\n            data = srcs + data + cc_binary,\n            tags = tags,\n            deps = [\"//test/test_common:environment_lib\"] + cc_binary,\n            **kargs\n        )\n\n    else:\n        native.sh_test(\n            name = name,\n            srcs = [\"//bazel:sh_test_wrapper.sh\"],\n            data = srcs + data + cc_binary,\n            args = srcs,\n            tags = tags + [\"nocoverage\"],\n            **kargs\n        )\n"
  },
  {
    "path": "bazel/external/BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\n# Use a wrapper cc_library with an empty source source file to force\n# compilation of other cc_library targets that only list *.a sources.\ncc_library(\n    name = \"all_external\",\n    srcs = [\":empty.cc\"],\n    defines = [\"OPENTRACING_STATIC\"],\n    # TODO: external/io_opentracing_cpp/BUILD.bazel:19:1: Executing genrule\n    # @io_opentracing_cpp//:generate_version_h failed - needs porting\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"@com_github_datadog_dd_opentracing_cpp//:dd_opentracing_cpp\",\n        \"@com_google_googletest//:gtest\",\n        \"@com_lightstep_tracer_cpp//:lightstep_tracer\",\n        \"@io_opentracing_cpp//:opentracing\",\n    ],\n)\n\ngenrule(\n    name = \"empty_cc\",\n    outs = [\"empty.cc\"],\n    cmd = \"touch \\\"$(@D)/empty.cc\\\"\",\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/boringssl_fips.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\nload(\":genrule_cmd.bzl\", \"genrule_cmd\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"crypto\",\n    srcs = [\n        \"crypto/libcrypto.a\",\n    ],\n    hdrs = glob([\"boringssl/include/openssl/*.h\"]),\n    defines = [\"BORINGSSL_FIPS\"],\n    includes = [\"boringssl/include\"],\n    visibility = [\"//visibility:public\"],\n)\n\ncc_library(\n    name = \"ssl\",\n    srcs = [\n        \"ssl/libssl.a\",\n    ],\n    hdrs = glob([\"boringssl/include/openssl/*.h\"]),\n    includes = [\"boringssl/include\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":crypto\"],\n)\n\ngenrule(\n    name = \"build\",\n    srcs = glob([\"boringssl/**\"]),\n    outs = [\n        \"crypto/libcrypto.a\",\n        \"ssl/libssl.a\",\n    ],\n    cmd = genrule_cmd(\"@envoy//bazel/external:boringssl_fips.genrule_cmd\"),\n)\n"
  },
  {
    "path": "bazel/external/boringssl_fips.genrule_cmd",
    "content": "#!/bin/bash\n\nset -e\n\n# BoringSSL build as described in the Security Policy for BoringCrypto module (2020-07-02):\n# https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3678.pdf\n\n# This works only on Linux-x86_64.\nif [[ `uname` != \"Linux\" || `uname -m` != \"x86_64\" ]]; then\n  echo \"ERROR: BoringSSL FIPS is currently supported only on Linux-x86_64.\"\n  exit 1\nfi\n\n# Bazel magic.\nROOT=$$(dirname $(rootpath boringssl/BUILDING.md))/..\npushd $$ROOT\n\n# Build tools requirements:\n# - Clang compiler version 7.0.1 (https://releases.llvm.org/download.html)\n# - Go programming language version 1.12.7 (https://golang.org/dl/)\n# - Ninja build system version 1.9.0 (https://github.com/ninja-build/ninja/releases)\n\n# Override $$PATH for build tools, to avoid picking up anything else.\nexport PATH=\"$$(dirname `which cmake`):/usr/bin:/bin\"\n\n# Clang 7.0.1\nVERSION=7.0.1\nSHA256=02ad925add5b2b934d64c3dd5cbd1b2002258059f7d962993ba7f16524c3089c\nPLATFORM=\"x86_64-linux-gnu-ubuntu-16.04\"\n\ncurl -sLO https://releases.llvm.org/\"$$VERSION\"/clang+llvm-\"$$VERSION\"-\"$$PLATFORM\".tar.xz \\\n  && echo \"$$SHA256\" clang+llvm-\"$$VERSION\"-\"$$PLATFORM\".tar.xz | sha256sum --check\ntar xf clang+llvm-\"$$VERSION\"-\"$$PLATFORM\".tar.xz\n\nexport HOME=\"$$PWD\"\nprintf \"set(CMAKE_C_COMPILER \\\"clang\\\")\\nset(CMAKE_CXX_COMPILER \\\"clang++\\\")\\n\" > $${HOME}/toolchain\nexport PATH=\"$$PWD/clang+llvm-$$VERSION-$$PLATFORM/bin:$$PATH\"\n\nif [[ `clang --version | head -1 | awk '{print $$3}'` != \"$$VERSION\" ]]; then\n  echo \"ERROR: Clang version doesn't match.\"\n  exit 1\nfi\n\n# Go 1.12.7\nVERSION=1.12.7\nSHA256=66d83bfb5a9ede000e33c6579a91a29e6b101829ad41fffb5c5bb6c900e109d9\nPLATFORM=\"linux-amd64\"\n\ncurl -sLO https://dl.google.com/go/go\"$$VERSION\".\"$$PLATFORM\".tar.gz \\\n  && echo \"$$SHA256\" go\"$$VERSION\".\"$$PLATFORM\".tar.gz | sha256sum --check\ntar xf go\"$$VERSION\".\"$$PLATFORM\".tar.gz\n\nexport GOPATH=\"$$PWD/gopath\"\nexport GOROOT=\"$$PWD/go\"\nexport PATH=\"$$GOPATH/bin:$$GOROOT/bin:$$PATH\"\n\nif [[ `go version | awk '{print $$3}'` != \"go$$VERSION\" ]]; then\n  echo \"ERROR: Go version doesn't match.\"\n  exit 1\nfi\n\n# Ninja 1.9.0\nVERSION=1.9.0\nSHA256=1b1235f2b0b4df55ac6d80bbe681ea3639c9d2c505c7ff2159a3daf63d196305\nPLATFORM=\"linux\"\n\ncurl -sLO https://github.com/ninja-build/ninja/releases/download/v\"$$VERSION\"/ninja-\"$$PLATFORM\".zip \\\n  && echo \"$$SHA256\" ninja-\"$$PLATFORM\".zip | sha256sum --check\nunzip -o ninja-\"$$PLATFORM\".zip\n\nexport PATH=\"$$PWD:$$PATH\"\n\nif [[ `ninja --version` != \"$$VERSION\" ]]; then\n  echo \"ERROR: Ninja version doesn't match.\"\n  exit 1\nfi\n\n# Clean after previous build.\nrm -rf boringssl/build\n\n# Build BoringSSL.\ncd boringssl\nmkdir build && cd build && cmake -GNinja -DCMAKE_TOOLCHAIN_FILE=$${HOME}/toolchain -DFIPS=1 -DCMAKE_BUILD_TYPE=Release ..\nninja\nninja run_tests\n\n# Verify correctness of the FIPS build.\nif [[ `tool/bssl isfips` != \"1\" ]]; then\n  echo \"ERROR: BoringSSL tool didn't report FIPS build.\"\n  exit 1\nfi\n\n# Move compiled libraries to the expected destinations.\npopd\nmv $$ROOT/boringssl/build/crypto/libcrypto.a $(execpath crypto/libcrypto.a)\nmv $$ROOT/boringssl/build/ssl/libssl.a $(execpath ssl/libssl.a)\n"
  },
  {
    "path": "bazel/external/boringssl_fips.patch",
    "content": "# Fix FIPS build (from BoringSSL commit 4ca15d5dcbe6e8051a4654df7c971ea8307abfe0).\n#\n# The modulewrapper is not a part of the FIPS module, so it can be patched without\n# concern about breaking the FIPS validation.\n--- boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc\n+++ boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc\n@@ -12,9 +12,11 @@\n  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN\n  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */\n \n+#include <string>\n #include <vector>\n \n #include <assert.h>\n+#include <errno.h>\n #include <string.h>\n #include <sys/uio.h>\n #include <unistd.h>\n"
  },
  {
    "path": "bazel/external/cargo/BUILD",
    "content": "\"\"\"\ncargo-raze workspace build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\npackage(default_visibility = [\"//visibility:public\"])\n\nlicenses([\n    \"notice\",  # See individual crates for specific licenses\n])\n\nalias(\n    name = \"log\",\n    actual = \"@raze__log__0_4_11//:log\",\n    tags = [\"cargo-raze\"],\n)\n\nalias(\n    name = \"proxy_wasm\",\n    actual = \"@raze__proxy_wasm__0_1_2//:proxy_wasm\",\n    tags = [\"cargo-raze\"],\n)\n"
  },
  {
    "path": "bazel/external/cargo/remote/BUILD",
    "content": ""
  },
  {
    "path": "bazel/external/cargo/remote/ahash-0.3.8.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # MIT from expression \"MIT OR Apache-2.0\"\n])\n\n# Unsupported target \"ahash\" with type \"bench\" omitted\n\nrust_library(\n    name = \"ahash\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2018\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.3.8\",\n    deps = [\n    ],\n)\n\n# Unsupported target \"bench\" with type \"test\" omitted\n# Unsupported target \"map\" with type \"bench\" omitted\n# Unsupported target \"map_tests\" with type \"test\" omitted\n# Unsupported target \"nopanic\" with type \"test\" omitted\n"
  },
  {
    "path": "bazel/external/cargo/remote/autocfg-1.0.0.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # Apache-2.0 from expression \"Apache-2.0 OR MIT\"\n])\n\nrust_library(\n    name = \"autocfg\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"1.0.0\",\n    deps = [\n    ],\n)\n\n# Unsupported target \"integers\" with type \"example\" omitted\n# Unsupported target \"paths\" with type \"example\" omitted\n# Unsupported target \"rustflags\" with type \"test\" omitted\n# Unsupported target \"traits\" with type \"example\" omitted\n# Unsupported target \"versions\" with type \"example\" omitted\n"
  },
  {
    "path": "bazel/external/cargo/remote/cfg-if-0.1.10.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # MIT from expression \"MIT OR Apache-2.0\"\n])\n\nrust_library(\n    name = \"cfg_if\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2018\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.1.10\",\n    deps = [\n    ],\n)\n\n# Unsupported target \"xcrate\" with type \"test\" omitted\n"
  },
  {
    "path": "bazel/external/cargo/remote/hashbrown-0.7.2.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # Apache-2.0 from expression \"Apache-2.0 OR MIT\"\n])\n\n# Unsupported target \"bench\" with type \"bench\" omitted\n# Unsupported target \"build-script-build\" with type \"custom-build\" omitted\n\nrust_library(\n    name = \"hashbrown\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n        \"ahash\",\n        \"inline-more\",\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2018\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.7.2\",\n    deps = [\n        \"@raze__ahash__0_3_8//:ahash\",\n    ],\n)\n\n# Unsupported target \"hasher\" with type \"test\" omitted\n# Unsupported target \"rayon\" with type \"test\" omitted\n# Unsupported target \"serde\" with type \"test\" omitted\n# Unsupported target \"set\" with type \"test\" omitted\n"
  },
  {
    "path": "bazel/external/cargo/remote/libc-0.2.74.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # MIT from expression \"MIT OR Apache-2.0\"\n])\n\n# Unsupported target \"build-script-build\" with type \"custom-build\" omitted\n# Unsupported target \"const_fn\" with type \"test\" omitted\n\nrust_library(\n    name = \"libc\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.2.74\",\n    deps = [\n    ],\n)\n"
  },
  {
    "path": "bazel/external/cargo/remote/log-0.4.11.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # MIT from expression \"MIT OR Apache-2.0\"\n])\n\n# Unsupported target \"build-script-build\" with type \"custom-build\" omitted\n# Unsupported target \"filters\" with type \"test\" omitted\n\nrust_library(\n    name = \"log\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n        \"--cfg=atomic_cas\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.4.11\",\n    deps = [\n        \"@raze__cfg_if__0_1_10//:cfg_if\",\n    ],\n)\n\n# Unsupported target \"macros\" with type \"test\" omitted\n"
  },
  {
    "path": "bazel/external/cargo/remote/memory_units-0.4.0.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"reciprocal\",  # MPL-2.0 from expression \"MPL-2.0\"\n])\n\nrust_library(\n    name = \"memory_units\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.4.0\",\n    deps = [\n    ],\n)\n"
  },
  {
    "path": "bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # Apache-2.0 from expression \"Apache-2.0\"\n])\n\n# Unsupported target \"hello_world\" with type \"example\" omitted\n# Unsupported target \"http_auth_random\" with type \"example\" omitted\n# Unsupported target \"http_body\" with type \"example\" omitted\n# Unsupported target \"http_headers\" with type \"example\" omitted\n\nrust_library(\n    name = \"proxy_wasm\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2018\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.1.2\",\n    deps = [\n        \"@raze__hashbrown__0_7_2//:hashbrown\",\n        \"@raze__log__0_4_11//:log\",\n        \"@raze__wee_alloc__0_4_5//:wee_alloc\",\n    ],\n)\n"
  },
  {
    "path": "bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"reciprocal\",  # MPL-2.0 from expression \"MPL-2.0\"\n])\n\n# Unsupported target \"build-script-build\" with type \"custom-build\" omitted\n\nrust_library(\n    name = \"wee_alloc\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n        \"default\",\n        \"size_classes\",\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.4.5\",\n    deps = [\n        \"@raze__cfg_if__0_1_10//:cfg_if\",\n        \"@raze__libc__0_2_74//:libc\",\n        \"@raze__memory_units__0_4_0//:memory_units\",\n    ],\n)\n"
  },
  {
    "path": "bazel/external/cargo/remote/winapi-0.3.9.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # MIT from expression \"MIT OR Apache-2.0\"\n])\n\n# Unsupported target \"build-script-build\" with type \"custom-build\" omitted\n\nrust_library(\n    name = \"winapi\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n        \"memoryapi\",\n        \"synchapi\",\n        \"winbase\",\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.3.9\",\n    deps = [\n    ],\n)\n"
  },
  {
    "path": "bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # MIT from expression \"MIT OR Apache-2.0\"\n])\n\n# Unsupported target \"build-script-build\" with type \"custom-build\" omitted\n\nrust_library(\n    name = \"winapi_i686_pc_windows_gnu\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.4.0\",\n    deps = [\n    ],\n)\n"
  },
  {
    "path": "bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD",
    "content": "\"\"\"\ncargo-raze crate build file.\n\nDO NOT EDIT! Replaced on runs of cargo-raze\n\"\"\"\n\nload(\n    \"@io_bazel_rules_rust//rust:rust.bzl\",\n    \"rust_library\",\n)\n\npackage(default_visibility = [\n    # Public for visibility by \"@raze__crate__version//\" targets.\n    #\n    # Prefer access through \"//bazel/external/cargo\", which limits external\n    # visibility to explicit Cargo.toml dependencies.\n    \"//visibility:public\",\n])\n\nlicenses([\n    \"notice\",  # MIT from expression \"MIT OR Apache-2.0\"\n])\n\n# Unsupported target \"build-script-build\" with type \"custom-build\" omitted\n\nrust_library(\n    name = \"winapi_x86_64_pc_windows_gnu\",\n    srcs = glob([\"**/*.rs\"]),\n    crate_features = [\n    ],\n    crate_root = \"src/lib.rs\",\n    crate_type = \"lib\",\n    edition = \"2015\",\n    rustc_flags = [\n        \"--cap-lints=allow\",\n    ],\n    tags = [\"cargo-raze\"],\n    version = \"0.4.0\",\n    deps = [\n    ],\n)\n"
  },
  {
    "path": "bazel/external/compiler_rt.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"fuzzed_data_provider\",\n    hdrs = [\"include/fuzzer/FuzzedDataProvider.h\"],\n    strip_include_prefix = \"include\",\n    visibility = [\"//visibility:public\"],\n)\n\nlibfuzzer_copts = [\n    \"-fno-sanitize=address,thread,undefined\",\n    \"-fsanitize-coverage=0\",\n    \"-O3\",\n]\n\ncc_library(\n    name = \"libfuzzer_main\",\n    srcs = [\"lib/fuzzer/FuzzerMain.cpp\"],\n    copts = libfuzzer_copts,\n    visibility = [\"//visibility:public\"],\n    deps = [\":libfuzzer_no_main\"],\n    alwayslink = True,\n)\n\ncc_library(\n    name = \"libfuzzer_no_main\",\n    srcs = glob(\n        [\"lib/fuzzer/Fuzzer*.cpp\"],\n        exclude = [\"lib/fuzzer/FuzzerMain.cpp\"],\n    ),\n    hdrs = glob([\n        \"lib/fuzzer/Fuzzer*.h\",\n        \"lib/fuzzer/Fuzzer*.def\",\n    ]),\n    copts = libfuzzer_copts,\n    visibility = [\"//visibility:public\"],\n    alwayslink = True,\n)\n"
  },
  {
    "path": "bazel/external/fmtlib.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"fmtlib\",\n    hdrs = glob([\n        \"include/fmt/*.h\",\n    ]),\n    defines = [\"FMT_HEADER_ONLY\"],\n    includes = [\"include\"],\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/http-parser.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"http_parser\",\n    srcs = [\n        \"http_parser.c\",\n        \"http_parser.h\",\n    ],\n    hdrs = [\"http_parser.h\"],\n    # This compiler flag is set to an arbtitrarily high number so\n    # as to effectively disables the http_parser header limit, as\n    # we do our own checks in the conn manager and codec.\n    copts = [\"-DHTTP_MAX_HEADER_SIZE=0x2000000\"],\n    includes = [\".\"],\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/kafka_int32.patch",
    "content": "--- DescribeGroupsResponse.json\t2020-03-25 16:12:16.373302600 -0400\n+++ DescribeGroupsResponse.json\t2020-03-25 16:11:16.184156200 -0400\n@@ -63,7 +63,7 @@\n         { \"name\": \"MemberAssignment\", \"type\": \"bytes\", \"versions\": \"0+\",\n           \"about\": \"The current assignment provided by the group leader.\" }\n       ]},\n-      { \"name\": \"AuthorizedOperations\", \"type\": \"int32\", \"versions\": \"3+\",  \"default\": \"-2147483648\",\n+      { \"name\": \"AuthorizedOperations\", \"type\": \"int32\", \"versions\": \"3+\",  \"default\": \"INT32_MIN\",\n         \"about\": \"32-bit bitfield to represent authorized operations for this group.\" }\n     ]}\n   ]\n\n--- MetadataResponse.json\t2020-03-25 15:53:36.319161000 -0400\n+++ MetadataResponse.json\t2020-03-25 15:54:11.510400000 -0400\n@@ -81,10 +81,10 @@\n         { \"name\": \"OfflineReplicas\", \"type\": \"[]int32\", \"versions\": \"5+\", \"ignorable\": true,\n           \"about\": \"The set of offline replicas of this partition.\" }\n       ]},\n-      { \"name\": \"TopicAuthorizedOperations\", \"type\": \"int32\", \"versions\": \"8+\", \"default\": \"-2147483648\",\n+      { \"name\": \"TopicAuthorizedOperations\", \"type\": \"int32\", \"versions\": \"8+\", \"default\": \"INT32_MIN\",\n         \"about\": \"32-bit bitfield to represent authorized operations for this topic.\" }\n     ]},\n-    { \"name\": \"ClusterAuthorizedOperations\", \"type\": \"int32\", \"versions\": \"8+\", \"default\": \"-2147483648\",\n+    { \"name\": \"ClusterAuthorizedOperations\", \"type\": \"int32\", \"versions\": \"8+\", \"default\": \"INT32_MIN\",\n       \"about\": \"32-bit bitfield to represent authorized operations for this cluster.\" }\n   ]\n }\n"
  },
  {
    "path": "bazel/external/libcircllhist.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"libcircllhist\",\n    srcs = [\"src/circllhist.c\"],\n    hdrs = [\n        \"src/circllhist.h\",\n    ],\n    copts = select({\n        \"@envoy//bazel:windows_x86_64\": [\"-DWIN32\"],\n        \"//conditions:default\": [],\n    }),\n    includes = [\"src\"],\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/libprotobuf_mutator.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"libprotobuf_mutator\",\n    srcs = glob(\n        [\n            \"src/**/*.cc\",\n            \"src/**/*.h\",\n            \"port/protobuf.h\",\n        ],\n        exclude = [\"**/*_test.cc\"],\n    ),\n    hdrs = [\"src/libfuzzer/libfuzzer_macro.h\"],\n    include_prefix = \"libprotobuf_mutator\",\n    includes = [\".\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"//external:protobuf\"],\n)\n"
  },
  {
    "path": "bazel/external/proxy_wasm_cpp_host.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\nload(\n    \"@envoy//bazel:envoy_build_system.bzl\",\n    \"envoy_select_wasm_all_v8_wavm_none\",\n    \"envoy_select_wasm_v8\",\n    \"envoy_select_wasm_wavm\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\npackage(default_visibility = [\"//visibility:public\"])\n\ncc_library(\n    name = \"include\",\n    hdrs = glob([\"include/proxy-wasm/**/*.h\"]),\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:common_lib\",\n    ],\n)\n\ncc_library(\n    name = \"lib\",\n    # Note that the select cannot appear in the glob.\n    srcs = envoy_select_wasm_all_v8_wavm_none(\n        glob(\n            [\n                \"src/**/*.h\",\n                \"src/**/*.cc\",\n            ],\n        ),\n        glob(\n            [\n                \"src/**/*.h\",\n                \"src/**/*.cc\",\n            ],\n            exclude = [\"src/wavm/*\"],\n        ),\n        glob(\n            [\n                \"src/**/*.h\",\n                \"src/**/*.cc\",\n            ],\n            exclude = [\"src/v8/*\"],\n        ),\n        glob(\n            [\n                \"src/**/*.h\",\n                \"src/**/*.cc\",\n            ],\n            exclude = [\n                \"src/wavm/*\",\n                \"src/v8/*\",\n            ],\n        ),\n    ),\n    copts = envoy_select_wasm_wavm([\n        '-DWAVM_API=\"\"',\n        \"-Wno-non-virtual-dtor\",\n        \"-Wno-old-style-cast\",\n    ]),\n    deps = [\n        \":include\",\n        \"//external:abseil_flat_hash_map\",\n        \"//external:abseil_optional\",\n        \"//external:abseil_strings\",\n        \"//external:protobuf\",\n        \"//external:ssl\",\n        \"//external:zlib\",\n        \"@proxy_wasm_cpp_sdk//:api_lib\",\n        \"@proxy_wasm_cpp_sdk//:common_lib\",\n    ] + envoy_select_wasm_wavm([\n        \"@envoy//bazel/foreign_cc:wavm\",\n    ]) + envoy_select_wasm_v8([\n        \"//external:wee8\",\n    ]),\n)\n"
  },
  {
    "path": "bazel/external/quiche.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_proto_library\")\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\nload(\":genrule_cmd.bzl\", \"genrule_cmd\")\nload(\n    \"@envoy//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# QUICHE is Google's implementation of QUIC and related protocols. It is the\n# same code used in Chromium and Google's servers, but packaged in a form that\n# is intended to be easier to incorporate into third-party projects.\n#\n# QUICHE code falls into three groups:\n# 1. Platform-independent code. Most QUICHE code is in this category.\n# 2. APIs and type aliases to platform-dependent code/types, referenced by code\n#    in group 1. This group is called the \"Platform API\".\n# 3. Definitions of types declared in group 2. This group is called the\n#    \"Platform impl\", and must be provided by the codebase that embeds QUICHE.\n#\n# Concretely, header files in group 2 (the Platform API) #include header and\n# source files in group 3 (the Platform impl). Unfortunately, QUICHE does not\n# yet provide a built-in way to customize this dependency, e.g. to override the\n# directory or namespace in which Platform impl types are defined. Hence the\n# gross hacks in quiche.genrule_cmd, invoked from here to tweak QUICHE source\n# files into a form usable by Envoy.\n#\n# The mechanics of this will change as QUICHE evolves, supplies its own Bazel\n# buildfiles, and provides a built-in way to override platform impl directory\n# location. However, the end result (QUICHE files placed under\n# quiche/{http2,quic,spdy}/, with the Envoy-specific implementation of the\n# QUICHE platform APIs in //source/extensions/quic_listeners/quiche/platform/,\n# should remain largely the same.\n\nsrc_files = glob([\n    \"**/*.h\",\n    \"**/*.c\",\n    \"**/*.cc\",\n    \"**/*.inc\",\n    \"**/*.proto\",\n])\n\ngenrule(\n    name = \"quiche_files\",\n    srcs = src_files,\n    outs = [\"quiche/\" + f for f in src_files],\n    cmd = genrule_cmd(\"@envoy//bazel/external:quiche.genrule_cmd\"),\n    visibility = [\"//visibility:private\"],\n)\n\n# These options are only used to suppress errors in brought-in QUICHE tests.\n# Use #pragma GCC diagnostic ignored in integration code to suppress these errors.\nquiche_common_copts = [\n    \"-Wno-unused-function\",\n    # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames.\n    \"-Wno-invalid-offsetof\",\n    \"-Wno-range-loop-analysis\",\n]\n\nquiche_copts = select({\n    # Ignore unguarded #pragma GCC statements in QUICHE sources\n    \"@envoy//bazel:windows_x86_64\": [\"-wd4068\"],\n    # Remove these after upstream fix.\n    \"@envoy//bazel:gcc_build\": [\n        \"-Wno-sign-compare\",\n    ] + quiche_common_copts,\n    \"//conditions:default\": quiche_common_copts,\n})\n\ntest_suite(\n    name = \"ci_tests\",\n    tests = [\n        \"http2_platform_api_test\",\n        \"quic_platform_api_test\",\n        \"quiche_common_test\",\n        \"spdy_platform_api_test\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"http2_test_tools_random\",\n    srcs = [\"quiche/http2/test_tools/http2_random.cc\"],\n    hdrs = [\"quiche/http2/test_tools/http2_random.h\"],\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    deps = [\":http2_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"http2_platform\",\n    hdrs = [\n        \"quiche/http2/platform/api/http2_bug_tracker.h\",\n        \"quiche/http2/platform/api/http2_containers.h\",\n        \"quiche/http2/platform/api/http2_estimate_memory_usage.h\",\n        \"quiche/http2/platform/api/http2_flag_utils.h\",\n        \"quiche/http2/platform/api/http2_flags.h\",\n        \"quiche/http2/platform/api/http2_logging.h\",\n        \"quiche/http2/platform/api/http2_macros.h\",\n        \"quiche/http2/platform/api/http2_string_utils.h\",\n        # TODO: uncomment the following files as implementations are added.\n        # \"quiche/http2/platform/api/http2_test_helpers.h\",\n    ],\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quiche_common_platform\",\n        \"@envoy//source/extensions/quic_listeners/quiche/platform:http2_platform_impl_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_constants_lib\",\n    srcs = [\"quiche/http2/http2_constants.cc\"],\n    hdrs = [\"quiche/http2/http2_constants.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\":http2_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"http2_structures_lib\",\n    srcs = [\"quiche/http2/http2_structures.cc\"],\n    hdrs = [\"quiche/http2/http2_structures.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_decode_buffer_lib\",\n    srcs = [\"quiche/http2/decoder/decode_buffer.cc\"],\n    hdrs = [\"quiche/http2/decoder/decode_buffer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\":http2_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_decode_http2_structures_lib\",\n    srcs = [\"quiche/http2/decoder/decode_http2_structures.cc\"],\n    hdrs = [\"quiche/http2/decoder/decode_http2_structures.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_decode_status_lib\",\n    srcs = [\"quiche/http2/decoder/decode_status.cc\"],\n    hdrs = [\"quiche/http2/decoder/decode_status.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\":http2_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_frame_decoder_state_lib\",\n    srcs = [\"quiche/http2/decoder/frame_decoder_state.cc\"],\n    hdrs = [\"quiche/http2/decoder/frame_decoder_state.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_structure_decoder_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_frame_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/http2_frame_decoder.cc\"],\n    hdrs = [\n        \"quiche/http2/decoder/frame_decoder_state.h\",\n        \"quiche/http2/decoder/http2_frame_decoder.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_decoder_payload_decoders_altsvc_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_continuation_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_data_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_goaway_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_headers_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_ping_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_priority_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_push_promise_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_rst_stream_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_settings_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_unknown_payload_decoder_lib\",\n        \":http2_decoder_payload_decoders_window_update_payload_decoder_lib\",\n        \":http2_decoder_structure_decoder_lib\",\n        \":http2_hpack_varint_hpack_varint_decoder_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_frame_decoder_listener_lib\",\n    srcs = [\"quiche/http2/decoder/http2_frame_decoder_listener.cc\"],\n    hdrs = [\"quiche/http2/decoder/http2_frame_decoder_listener.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_altsvc_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/altsvc_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/altsvc_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_continuation_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/continuation_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/continuation_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_data_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/data_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/data_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_goaway_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/goaway_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/goaway_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_headers_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/headers_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/headers_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_ping_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/ping_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/ping_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_priority_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/priority_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/priority_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_push_promise_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_rst_stream_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_settings_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/settings_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/settings_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_unknown_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/unknown_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/unknown_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_payload_decoders_window_update_payload_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/payload_decoders/window_update_payload_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/payload_decoders/window_update_payload_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_http2_structures_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_decoder_frame_decoder_state_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_decoder_structure_decoder_lib\",\n    srcs = [\"quiche/http2/decoder/http2_structure_decoder.cc\"],\n    hdrs = [\"quiche/http2/decoder/http2_structure_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_http2_structures_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_block_decoder_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_block_decoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_block_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_hpack_decoder_hpack_decoding_error_lib\",\n        \":http2_hpack_decoder_hpack_entry_decoder_lib\",\n        \":http2_hpack_decoder_hpack_entry_decoder_listener_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_decoder_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_decoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_hpack_decoder_hpack_block_decoder_lib\",\n        \":http2_hpack_decoder_hpack_decoder_listener_lib\",\n        \":http2_hpack_decoder_hpack_decoder_state_lib\",\n        \":http2_hpack_decoder_hpack_decoder_tables_lib\",\n        \":http2_hpack_decoder_hpack_decoding_error_lib\",\n        \":http2_hpack_decoder_hpack_whole_entry_buffer_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_decoder_listener_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_decoder_listener.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_decoder_listener.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_hpack_hpack_string_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_decoder_state_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_decoder_state.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_decoder_state.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_hpack_decoder_hpack_decoder_listener_lib\",\n        \":http2_hpack_decoder_hpack_decoder_string_buffer_lib\",\n        \":http2_hpack_decoder_hpack_decoder_tables_lib\",\n        \":http2_hpack_decoder_hpack_decoding_error_lib\",\n        \":http2_hpack_decoder_hpack_whole_entry_listener_lib\",\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_hpack_hpack_string_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_decoder_string_buffer_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_decoder_string_buffer.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_decoder_string_buffer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_hpack_huffman_hpack_huffman_decoder_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_decoder_tables_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_decoder_tables.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_decoder_tables.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_hpack_hpack_static_table_entries_lib\",\n        \":http2_hpack_hpack_string_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_decoding_error_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_decoding_error.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_decoding_error.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_entry_decoder_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_entry_decoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_entry_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_hpack_decoder_hpack_decoding_error_lib\",\n        \":http2_hpack_decoder_hpack_entry_decoder_listener_lib\",\n        \":http2_hpack_decoder_hpack_entry_type_decoder_lib\",\n        \":http2_hpack_decoder_hpack_string_decoder_lib\",\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_entry_decoder_listener_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_entry_decoder_listener.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_entry_decoder_listener.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_entry_type_decoder_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_entry_type_decoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_entry_type_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_hpack_varint_hpack_varint_decoder_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_string_decoder_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_string_decoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_string_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_hpack_varint_hpack_varint_decoder_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_string_decoder_listener_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_string_decoder_listener.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_string_decoder_listener.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\":http2_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_whole_entry_buffer_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_whole_entry_buffer.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_whole_entry_buffer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_hpack_decoder_hpack_decoder_string_buffer_lib\",\n        \":http2_hpack_decoder_hpack_decoding_error_lib\",\n        \":http2_hpack_decoder_hpack_entry_decoder_listener_lib\",\n        \":http2_hpack_decoder_hpack_whole_entry_listener_lib\",\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_decoder_hpack_whole_entry_listener_lib\",\n    srcs = [\"quiche/http2/hpack/decoder/hpack_whole_entry_listener.cc\"],\n    hdrs = [\"quiche/http2/hpack/decoder/hpack_whole_entry_listener.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_hpack_decoder_hpack_decoder_string_buffer_lib\",\n        \":http2_hpack_decoder_hpack_decoding_error_lib\",\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_huffman_hpack_huffman_decoder_lib\",\n    srcs = [\"quiche/http2/hpack/huffman/hpack_huffman_decoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/huffman/hpack_huffman_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_platform\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_huffman_hpack_huffman_encoder_lib\",\n    srcs = [\"quiche/http2/hpack/huffman/hpack_huffman_encoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/huffman/hpack_huffman_encoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_hpack_huffman_huffman_spec_tables_lib\",\n        \":http2_platform\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_huffman_huffman_spec_tables_lib\",\n    srcs = [\"quiche/http2/hpack/huffman/huffman_spec_tables.cc\"],\n    hdrs = [\"quiche/http2/hpack/huffman/huffman_spec_tables.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_hpack_constants_lib\",\n    srcs = [\"quiche/http2/hpack/http2_hpack_constants.cc\"],\n    hdrs = [\"quiche/http2/hpack/http2_hpack_constants.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\":http2_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_hpack_static_table_entries_lib\",\n    hdrs = [\"quiche/http2/hpack/hpack_static_table_entries.inc\"],\n    repository = \"@envoy\",\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_hpack_string_lib\",\n    srcs = [\"quiche/http2/hpack/hpack_string.cc\"],\n    hdrs = [\"quiche/http2/hpack/hpack_string.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_platform\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_varint_hpack_varint_decoder_lib\",\n    srcs = [\"quiche/http2/hpack/varint/hpack_varint_decoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/varint/hpack_varint_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_hpack_varint_hpack_varint_encoder_lib\",\n    srcs = [\"quiche/http2/hpack/varint/hpack_varint_encoder.cc\"],\n    hdrs = [\"quiche/http2/hpack/varint/hpack_varint_encoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\":http2_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"spdy_platform\",\n    hdrs = [\n        \"quiche/spdy/platform/api/spdy_bug_tracker.h\",\n        \"quiche/spdy/platform/api/spdy_containers.h\",\n        \"quiche/spdy/platform/api/spdy_endianness_util.h\",\n        \"quiche/spdy/platform/api/spdy_estimate_memory_usage.h\",\n        \"quiche/spdy/platform/api/spdy_flags.h\",\n        \"quiche/spdy/platform/api/spdy_logging.h\",\n        \"quiche/spdy/platform/api/spdy_macros.h\",\n        \"quiche/spdy/platform/api/spdy_mem_slice.h\",\n        \"quiche/spdy/platform/api/spdy_string_utils.h\",\n    ],\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quiche_common_lib\",\n        \"@envoy//source/extensions/quic_listeners/quiche/platform:spdy_platform_impl_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_simple_arena_lib\",\n    srcs = [\"quiche/spdy/core/spdy_simple_arena.cc\"],\n    hdrs = [\"quiche/spdy/core/spdy_simple_arena.h\"],\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\":spdy_platform\"],\n)\n\nenvoy_cc_test_library(\n    name = \"spdy_platform_test_helpers\",\n    hdrs = [\"quiche/spdy/platform/api/spdy_test_helpers.h\"],\n    repository = \"@envoy\",\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:spdy_platform_test_helpers_impl_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_alt_svc_wire_format_lib\",\n    srcs = [\"quiche/spdy/core/spdy_alt_svc_wire_format.cc\"],\n    hdrs = [\"quiche/spdy/core/spdy_alt_svc_wire_format.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\":spdy_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_fifo_write_scheduler_lib\",\n    hdrs = [\"quiche/spdy/core/fifo_write_scheduler.h\"],\n    repository = \"@envoy\",\n    deps = [\n        \":spdy_core_write_scheduler_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_framer_lib\",\n    srcs = [\n        \"quiche/spdy/core/spdy_frame_builder.cc\",\n        \"quiche/spdy/core/spdy_framer.cc\",\n    ],\n    hdrs = [\n        \"quiche/spdy/core/spdy_frame_builder.h\",\n        \"quiche/spdy/core/spdy_framer.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_platform\",\n        \":spdy_core_alt_svc_wire_format_lib\",\n        \":spdy_core_frame_reader_lib\",\n        \":spdy_core_header_block_lib\",\n        \":spdy_core_headers_handler_interface_lib\",\n        \":spdy_core_hpack_hpack_lib\",\n        \":spdy_core_protocol_lib\",\n        \":spdy_core_zero_copy_output_buffer_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_frame_reader_lib\",\n    srcs = [\"quiche/spdy/core/spdy_frame_reader.cc\"],\n    hdrs = [\"quiche/spdy/core/spdy_frame_reader.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":spdy_core_protocol_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_header_block_lib\",\n    srcs = [\"quiche/spdy/core/spdy_header_block.cc\"],\n    hdrs = [\"quiche/spdy/core/spdy_header_block.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":spdy_core_header_storage_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_header_storage_lib\",\n    srcs = [\"quiche/spdy/core/spdy_header_storage.cc\"],\n    hdrs = [\"quiche/spdy/core/spdy_header_storage.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \"spdy_simple_arena_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_headers_handler_interface_lib\",\n    hdrs = [\"quiche/spdy/core/spdy_headers_handler_interface.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\":spdy_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_http2_deframer_lib\",\n    srcs = [\"quiche/spdy/core/http2_frame_decoder_adapter.cc\"],\n    hdrs = [\"quiche/spdy/core/http2_frame_decoder_adapter.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_constants_lib\",\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_decoder_frame_decoder_lib\",\n        \":http2_decoder_frame_decoder_listener_lib\",\n        \":http2_platform\",\n        \":http2_structures_lib\",\n        \":spdy_core_alt_svc_wire_format_lib\",\n        \":spdy_core_header_block_lib\",\n        \":spdy_core_headers_handler_interface_lib\",\n        \":spdy_core_hpack_hpack_decoder_adapter_lib\",\n        \":spdy_core_hpack_hpack_lib\",\n        \":spdy_core_protocol_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_lifo_write_scheduler_lib\",\n    hdrs = [\"quiche/spdy/core/lifo_write_scheduler.h\"],\n    repository = \"@envoy\",\n    deps = [\n        \":spdy_core_write_scheduler_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_intrusive_list_lib\",\n    hdrs = [\"quiche/spdy/core/spdy_intrusive_list.h\"],\n    repository = \"@envoy\",\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_http2_priority_write_scheduler_lib\",\n    hdrs = [\"quiche/spdy/core/http2_priority_write_scheduler.h\"],\n    repository = \"@envoy\",\n    deps = [\n        \":spdy_core_intrusive_list_lib\",\n        \":spdy_core_protocol_lib\",\n        \":spdy_core_write_scheduler_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_hpack_hpack_lib\",\n    srcs = [\n        \"quiche/spdy/core/hpack/hpack_constants.cc\",\n        \"quiche/spdy/core/hpack/hpack_encoder.cc\",\n        \"quiche/spdy/core/hpack/hpack_entry.cc\",\n        \"quiche/spdy/core/hpack/hpack_header_table.cc\",\n        \"quiche/spdy/core/hpack/hpack_huffman_table.cc\",\n        \"quiche/spdy/core/hpack/hpack_output_stream.cc\",\n        \"quiche/spdy/core/hpack/hpack_static_table.cc\",\n    ],\n    hdrs = [\n        \"quiche/spdy/core/hpack/hpack_constants.h\",\n        \"quiche/spdy/core/hpack/hpack_encoder.h\",\n        \"quiche/spdy/core/hpack/hpack_entry.h\",\n        \"quiche/spdy/core/hpack/hpack_header_table.h\",\n        \"quiche/spdy/core/hpack/hpack_huffman_table.h\",\n        \"quiche/spdy/core/hpack/hpack_output_stream.h\",\n        \"quiche/spdy/core/hpack/hpack_static_table.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":spdy_core_protocol_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_hpack_hpack_decoder_adapter_lib\",\n    srcs = [\"quiche/spdy/core/hpack/hpack_decoder_adapter.cc\"],\n    hdrs = [\"quiche/spdy/core/hpack/hpack_decoder_adapter.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":http2_hpack_decoder_hpack_decoder_lib\",\n        \":http2_hpack_decoder_hpack_decoder_listener_lib\",\n        \":http2_hpack_decoder_hpack_decoder_tables_lib\",\n        \":http2_hpack_hpack_constants_lib\",\n        \":http2_hpack_hpack_string_lib\",\n        \":spdy_core_header_block_lib\",\n        \":spdy_core_headers_handler_interface_lib\",\n        \":spdy_core_hpack_hpack_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_priority_write_scheduler_lib\",\n    srcs = [\"quiche/spdy/core/priority_write_scheduler.h\"],\n    repository = \"@envoy\",\n    deps = [\n        \":http2_platform\",\n        \":spdy_core_protocol_lib\",\n        \":spdy_core_write_scheduler_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_protocol_lib\",\n    srcs = [\"quiche/spdy/core/spdy_protocol.cc\"],\n    hdrs = [\n        \"quiche/spdy/core/spdy_bitmasks.h\",\n        \"quiche/spdy/core/spdy_protocol.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":spdy_core_alt_svc_wire_format_lib\",\n        \":spdy_core_header_block_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_write_scheduler_lib\",\n    hdrs = [\"quiche/spdy/core/write_scheduler.h\"],\n    repository = \"@envoy\",\n    deps = [\n        \":spdy_core_protocol_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"spdy_core_test_utils_lib\",\n    srcs = [\"quiche/spdy/core/spdy_test_utils.cc\"],\n    hdrs = [\"quiche/spdy/core/spdy_test_utils.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":quiche_common_test_tools_test_utils_lib\",\n        \":spdy_core_header_block_lib\",\n        \":spdy_core_headers_handler_interface_lib\",\n        \":spdy_core_protocol_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_core_zero_copy_output_buffer_lib\",\n    hdrs = [\"quiche/spdy/core/zero_copy_output_buffer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n)\n\nenvoy_cc_library(\n    name = \"quic_platform\",\n    srcs = [\n        \"quiche/quic/platform/api/quic_file_utils.cc\",\n        \"quiche/quic/platform/api/quic_hostname_utils.cc\",\n        \"quiche/quic/platform/api/quic_mutex.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/platform/api/quic_cert_utils.h\",\n        \"quiche/quic/platform/api/quic_file_utils.h\",\n        \"quiche/quic/platform/api/quic_hostname_utils.h\",\n        \"quiche/quic/platform/api/quic_mutex.h\",\n        \"quiche/quic/platform/api/quic_pcc_sender.h\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_time_lib\",\n        \":quic_platform_base\",\n        \"@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_impl_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_base\",\n    hdrs = [\n        \"quiche/quic/platform/api/quic_aligned.h\",\n        \"quiche/quic/platform/api/quic_bug_tracker.h\",\n        \"quiche/quic/platform/api/quic_client_stats.h\",\n        \"quiche/quic/platform/api/quic_containers.h\",\n        \"quiche/quic/platform/api/quic_error_code_wrappers.h\",\n        \"quiche/quic/platform/api/quic_estimate_memory_usage.h\",\n        \"quiche/quic/platform/api/quic_exported_stats.h\",\n        \"quiche/quic/platform/api/quic_fallthrough.h\",\n        \"quiche/quic/platform/api/quic_flag_utils.h\",\n        \"quiche/quic/platform/api/quic_flags.h\",\n        \"quiche/quic/platform/api/quic_iovec.h\",\n        \"quiche/quic/platform/api/quic_logging.h\",\n        \"quiche/quic/platform/api/quic_macros.h\",\n        \"quiche/quic/platform/api/quic_map_util.h\",\n        \"quiche/quic/platform/api/quic_mem_slice.h\",\n        \"quiche/quic/platform/api/quic_prefetch.h\",\n        \"quiche/quic/platform/api/quic_ptr_util.h\",\n        \"quiche/quic/platform/api/quic_reference_counted.h\",\n        \"quiche/quic/platform/api/quic_server_stats.h\",\n        \"quiche/quic/platform/api/quic_stack_trace.h\",\n        \"quiche/quic/platform/api/quic_stream_buffer_allocator.h\",\n        \"quiche/quic/platform/api/quic_string_utils.h\",\n        \"quiche/quic/platform/api/quic_uint128.h\",\n        # TODO: uncomment the following files as implementations are added.\n        # \"quiche/quic/platform/api/quic_fuzzed_data_provider.h\",\n        # \"quiche/quic/platform/api/quic_test_loopback.h\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_platform_export\",\n        \":quiche_common_lib\",\n        \"@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_base_impl_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_export\",\n    hdrs = [\"quiche/quic/platform/api/quic_export.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_export_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_epoll_lib\",\n    hdrs = [\"quiche/quic/platform/api/quic_epoll.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_epoll_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_expect_bug\",\n    hdrs = [\"quiche/quic/platform/api/quic_expect_bug.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_expect_bug_impl_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_ip_address_family\",\n    hdrs = [\"quiche/quic/platform/api/quic_ip_address_family.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_ip_address\",\n    srcs = [\"quiche/quic/platform/api/quic_ip_address.cc\"],\n    hdrs = [\"quiche/quic/platform/api/quic_ip_address.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_platform_base\",\n        \":quic_platform_export\",\n        \":quic_platform_ip_address_family\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_mock_log\",\n    hdrs = [\"quiche/quic/platform/api/quic_mock_log.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_mock_log_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_port_utils\",\n    hdrs = [\"quiche/quic/platform/api/quic_port_utils.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_impl_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_udp_socket\",\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\"quiche/quic/platform/api/quic_udp_socket_platform_api.h\"],\n        \"//conditions:default\": [],\n    }),\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_udp_socket_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_sleep\",\n    hdrs = [\"quiche/quic/platform/api/quic_sleep.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_sleep_impl_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_socket_address\",\n    srcs = [\"quiche/quic/platform/api/quic_socket_address.cc\"],\n    hdrs = [\"quiche/quic/platform/api/quic_socket_address.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_platform_export\",\n        \":quic_platform_ip_address\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_test\",\n    hdrs = [\"quiche/quic/platform/api/quic_test.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_test_output\",\n    hdrs = [\"quiche/quic/platform/api/quic_test_output.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_output_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_system_event_loop\",\n    hdrs = [\"quiche/quic/platform/api/quic_system_event_loop.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_system_event_loop_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_thread\",\n    hdrs = [\"quiche/quic/platform/api/quic_thread.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_thread_impl_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform_endian\",\n    hdrs = [\"quiche/common/platform/api/quiche_endian.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps =\n        [\n            \":quiche_common_platform_export\",\n            \"@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_endian_impl_lib\",\n        ],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform_export\",\n    hdrs = [\"quiche/common/platform/api/quiche_export.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps =\n        [\"@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_export_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quiche_common_test_tools_test_utils_lib\",\n    srcs = [\"quiche/common/test_tools/quiche_test_utils.cc\"],\n    hdrs = [\n        \"quiche/common/platform/api/quiche_test.h\",\n        \"quiche/common/test_tools/quiche_test_utils.h\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quiche_common_platform\",\n        \"@envoy//test/extensions/quic_listeners/quiche/platform:quiche_common_platform_test_impl_lib\",\n    ],\n)\n\n#TODO(danzh) Figure out why using envoy_proto_library() fails.\nproto_library(\n    name = \"quic_core_proto_cached_network_parameters_proto\",\n    srcs = [\"quiche/quic/core/proto/cached_network_parameters.proto\"],\n)\n\ncc_proto_library(\n    name = \"quic_core_proto_cached_network_parameters_proto_cc\",\n    deps = [\":quic_core_proto_cached_network_parameters_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_proto_cached_network_parameters_proto_header\",\n    hdrs = [\"quiche/quic/core/proto/cached_network_parameters_proto.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_proto_cached_network_parameters_proto_cc\"],\n)\n\nproto_library(\n    name = \"quic_core_proto_source_address_token_proto\",\n    srcs = [\"quiche/quic/core/proto/source_address_token.proto\"],\n    deps = [\":quic_core_proto_cached_network_parameters_proto\"],\n)\n\ncc_proto_library(\n    name = \"quic_core_proto_source_address_token_proto_cc\",\n    deps = [\":quic_core_proto_source_address_token_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_proto_source_address_token_proto_header\",\n    hdrs = [\"quiche/quic/core/proto/source_address_token_proto.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_proto_source_address_token_proto_cc\"],\n)\n\nproto_library(\n    name = \"quic_core_proto_crypto_server_config_proto\",\n    srcs = [\"quiche/quic/core/proto/crypto_server_config.proto\"],\n)\n\ncc_proto_library(\n    name = \"quic_core_proto_crypto_server_config_proto_cc\",\n    deps = [\":quic_core_proto_crypto_server_config_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_proto_crypto_server_config_proto_header\",\n    hdrs = [\"quiche/quic/core/proto/crypto_server_config_proto.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_proto_crypto_server_config_proto_cc\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_ack_listener_interface_lib\",\n    srcs = [\"quiche/quic/core/quic_ack_listener_interface.cc\"],\n    hdrs = [\"quiche/quic/core/quic_ack_listener_interface.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_alarm_interface_lib\",\n    srcs = [\"quiche/quic/core/quic_alarm.cc\"],\n    hdrs = [\"quiche/quic/core/quic_alarm.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_arena_scoped_ptr_lib\",\n        \":quic_core_time_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_alarm_factory_interface_lib\",\n    hdrs = [\"quiche/quic/core/quic_alarm_factory.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_alarm_interface_lib\",\n        \":quic_core_one_block_arena_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_bandwidth_lib\",\n    srcs = [\"quiche/quic/core/quic_bandwidth.cc\"],\n    hdrs = [\"quiche/quic/core/quic_bandwidth.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_constants_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_batch_writer_batch_writer_buffer_lib\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_batch_writer_buffer.cc\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_batch_writer_buffer.h\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_circular_deque_lib\",\n        \":quic_core_linux_socket_utils_lib\",\n        \":quic_core_packet_writer_interface_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_batch_writer_batch_writer_base_lib\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_batch_writer_base.cc\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_batch_writer_base.h\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_batch_writer_batch_writer_buffer_lib\",\n        \":quic_core_packet_writer_interface_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_core_batch_writer_batch_writer_test_lib\",\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_batch_writer_test.h\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_batch_writer_batch_writer_base_lib\",\n        \":quic_core_udp_socket_lib\",\n        \":quic_platform_test\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_batch_writer_gso_batch_writer_lib\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_gso_batch_writer.cc\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_gso_batch_writer.h\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_batch_writer_batch_writer_base_lib\",\n        \":quic_core_linux_socket_utils_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_batch_writer_sendmmsg_batch_writer_lib\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.cc\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.h\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_batch_writer_batch_writer_base_lib\",\n        \":quic_core_linux_socket_utils_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_blocked_writer_interface_lib\",\n    hdrs = [\"quiche/quic/core/quic_blocked_writer_interface.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_platform_export\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_arena_scoped_ptr_lib\",\n    hdrs = [\"quiche/quic/core/quic_arena_scoped_ptr.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_buffer_allocator_lib\",\n    srcs = [\n        \"quiche/quic/core/quic_buffer_allocator.cc\",\n        \"quiche/quic/core/quic_simple_buffer_allocator.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/quic_buffer_allocator.h\",\n        \"quiche/quic/core/quic_simple_buffer_allocator.h\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":quic_platform_export\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_clock_lib\",\n    srcs = [\"quiche/quic/core/quic_clock.cc\"],\n    hdrs = [\"quiche/quic/core/quic_clock.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_time_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_coalesced_packet_lib\",\n    srcs = [\"quiche/quic/core/quic_coalesced_packet.cc\"],\n    hdrs = [\"quiche/quic/core/quic_coalesced_packet.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":quic_core_packets_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_config_lib\",\n    srcs = [\"quiche/quic/core/quic_config.cc\"],\n    hdrs = [\"quiche/quic/core/quic_config.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_constants_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_socket_address_coder_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_bandwidth_sampler_lib\",\n    srcs = [\"quiche/quic/core/congestion_control/bandwidth_sampler.cc\"],\n    hdrs = [\"quiche/quic/core/congestion_control/bandwidth_sampler.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_congestion_control_windowed_filter_lib\",\n        \":quic_core_packet_number_indexed_queue_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_bbr_lib\",\n    srcs = [\"quiche/quic/core/congestion_control/bbr_sender.cc\"],\n    hdrs = [\"quiche/quic/core/congestion_control/bbr_sender.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_congestion_control_bandwidth_sampler_lib\",\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_congestion_control_rtt_stats_lib\",\n        \":quic_core_congestion_control_windowed_filter_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_unacked_packet_map_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_bbr2_lib\",\n    srcs = [\n        \"quiche/quic/core/congestion_control/bbr2_drain.cc\",\n        \"quiche/quic/core/congestion_control/bbr2_misc.cc\",\n        \"quiche/quic/core/congestion_control/bbr2_probe_bw.cc\",\n        \"quiche/quic/core/congestion_control/bbr2_probe_rtt.cc\",\n        \"quiche/quic/core/congestion_control/bbr2_sender.cc\",\n        \"quiche/quic/core/congestion_control/bbr2_startup.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/congestion_control/bbr2_drain.h\",\n        \"quiche/quic/core/congestion_control/bbr2_misc.h\",\n        \"quiche/quic/core/congestion_control/bbr2_probe_bw.h\",\n        \"quiche/quic/core/congestion_control/bbr2_probe_rtt.h\",\n        \"quiche/quic/core/congestion_control/bbr2_sender.h\",\n        \"quiche/quic/core/congestion_control/bbr2_startup.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_congestion_control_bandwidth_sampler_lib\",\n        \":quic_core_congestion_control_bbr_lib\",\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_congestion_control_rtt_stats_lib\",\n        \":quic_core_congestion_control_windowed_filter_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_general_loss_algorithm_lib\",\n    srcs = [\"quiche/quic/core/congestion_control/general_loss_algorithm.cc\"],\n    hdrs = [\"quiche/quic/core/congestion_control/general_loss_algorithm.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_congestion_control_rtt_stats_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_unacked_packet_map_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_congestion_control_interface_lib\",\n    hdrs = [\n        \"quiche/quic/core/congestion_control/loss_detection_interface.h\",\n        \"quiche/quic/core/congestion_control/send_algorithm_interface.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_clock_lib\",\n        \":quic_core_config_lib\",\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_unacked_packet_map_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_congestion_control_lib\",\n    srcs = [\n        \"quiche/quic/core/congestion_control/send_algorithm_interface.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/congestion_control/loss_detection_interface.h\",\n        \"quiche/quic/core/congestion_control/send_algorithm_interface.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_config_lib\",\n        \":quic_core_congestion_control_bbr2_lib\",\n        \":quic_core_congestion_control_bbr_lib\",\n        \":quic_core_congestion_control_tcp_cubic_bytes_lib\",\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_unacked_packet_map_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_pacing_sender_lib\",\n    srcs = [\"quiche/quic/core/congestion_control/pacing_sender.cc\"],\n    hdrs = [\"quiche/quic/core/congestion_control/pacing_sender.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_config_lib\",\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_rtt_stats_lib\",\n    srcs = [\"quiche/quic/core/congestion_control/rtt_stats.cc\"],\n    hdrs = [\"quiche/quic/core/congestion_control/rtt_stats.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_tcp_cubic_helper\",\n    srcs = [\n        \"quiche/quic/core/congestion_control/hybrid_slow_start.cc\",\n        \"quiche/quic/core/congestion_control/prr_sender.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/congestion_control/hybrid_slow_start.h\",\n        \"quiche/quic/core/congestion_control/prr_sender.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_tcp_cubic_bytes_lib\",\n    srcs = [\n        \"quiche/quic/core/congestion_control/cubic_bytes.cc\",\n        \"quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/congestion_control/cubic_bytes.h\",\n        \"quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_congestion_control_rtt_stats_lib\",\n        \":quic_core_congestion_control_tcp_cubic_helper\",\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_uber_loss_algorithm_lib\",\n    srcs = [\"quiche/quic/core/congestion_control/uber_loss_algorithm.cc\"],\n    hdrs = [\"quiche/quic/core/congestion_control/uber_loss_algorithm.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_congestion_control_general_loss_algorithm_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_congestion_control_windowed_filter_lib\",\n    hdrs = [\"quiche/quic/core/congestion_control/windowed_filter.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_time_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_connection_lib\",\n    srcs = [\"quiche/quic/core/quic_connection.cc\"],\n    hdrs = [\"quiche/quic/core/quic_connection.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_alarm_factory_interface_lib\",\n        \":quic_core_alarm_interface_lib\",\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_blocked_writer_interface_lib\",\n        \":quic_core_config_lib\",\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_idle_network_detector_lib\",\n        \":quic_core_legacy_version_encapsulator_lib\",\n        \":quic_core_mtu_discovery_lib\",\n        \":quic_core_network_blackhole_detector_lib\",\n        \":quic_core_one_block_arena_lib\",\n        \":quic_core_packet_creator_lib\",\n        \":quic_core_packet_writer_interface_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_proto_cached_network_parameters_proto_header\",\n        \":quic_core_sent_packet_manager_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_uber_received_packet_manager_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_connection_stats_lib\",\n    srcs = [\"quiche/quic/core/quic_connection_stats.cc\"],\n    hdrs = [\"quiche/quic/core/quic_connection_stats.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_time_accumulator_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_constants_lib\",\n    srcs = [\"quiche/quic/core/quic_constants.cc\"],\n    hdrs = [\"quiche/quic/core/quic_constants.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_types_lib\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_crypto_handshake_lib\",\n    srcs = [\n        \"quiche/quic/core/crypto/cert_compressor.cc\",\n        \"quiche/quic/core/crypto/channel_id.cc\",\n        \"quiche/quic/core/crypto/common_cert_set.cc\",\n        \"quiche/quic/core/crypto/crypto_framer.cc\",\n        \"quiche/quic/core/crypto/crypto_handshake.cc\",\n        \"quiche/quic/core/crypto/crypto_handshake_message.cc\",\n        \"quiche/quic/core/crypto/crypto_secret_boxer.cc\",\n        \"quiche/quic/core/crypto/crypto_utils.cc\",\n        \"quiche/quic/core/crypto/curve25519_key_exchange.cc\",\n        \"quiche/quic/core/crypto/key_exchange.cc\",\n        \"quiche/quic/core/crypto/p256_key_exchange.cc\",\n        \"quiche/quic/core/crypto/quic_compressed_certs_cache.cc\",\n        \"quiche/quic/core/crypto/quic_crypto_client_config.cc\",\n        \"quiche/quic/core/crypto/quic_crypto_server_config.cc\",\n        \"quiche/quic/core/crypto/server_proof_verifier.h\",\n        \"quiche/quic/core/crypto/transport_parameters.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/crypto/cert_compressor.h\",\n        \"quiche/quic/core/crypto/channel_id.h\",\n        \"quiche/quic/core/crypto/common_cert_set.h\",\n        \"quiche/quic/core/crypto/crypto_framer.h\",\n        \"quiche/quic/core/crypto/crypto_handshake.h\",\n        \"quiche/quic/core/crypto/crypto_handshake_message.h\",\n        \"quiche/quic/core/crypto/crypto_message_parser.h\",\n        \"quiche/quic/core/crypto/crypto_secret_boxer.h\",\n        \"quiche/quic/core/crypto/crypto_utils.h\",\n        \"quiche/quic/core/crypto/curve25519_key_exchange.h\",\n        \"quiche/quic/core/crypto/key_exchange.h\",\n        \"quiche/quic/core/crypto/p256_key_exchange.h\",\n        \"quiche/quic/core/crypto/proof_verifier.h\",\n        \"quiche/quic/core/crypto/quic_compressed_certs_cache.h\",\n        \"quiche/quic/core/crypto/quic_crypto_client_config.h\",\n        \"quiche/quic/core/crypto/quic_crypto_server_config.h\",\n        \"quiche/quic/core/crypto/transport_parameters.h\",\n    ],\n    copts = quiche_copts,\n    external_deps = [\n        \"ssl\",\n        \"zlib\",\n    ],\n    repository = \"@envoy\",\n    tags = [\n        \"nofips\",\n        \"pg3\",\n    ],\n    textual_hdrs = [\n        \"quiche/quic/core/crypto/common_cert_set_2.c\",\n        \"quiche/quic/core/crypto/common_cert_set_2a.inc\",\n        \"quiche/quic/core/crypto/common_cert_set_2b.inc\",\n        \"quiche/quic/core/crypto/common_cert_set_3.c\",\n        \"quiche/quic/core/crypto/common_cert_set_3a.inc\",\n        \"quiche/quic/core/crypto/common_cert_set_3b.inc\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_clock_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_crypto_hkdf_lib\",\n        \":quic_core_crypto_proof_source_interface_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_crypto_tls_handshake_lib\",\n        \":quic_core_data_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_lru_cache_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_proto_cached_network_parameters_proto_header\",\n        \":quic_core_proto_crypto_server_config_proto_header\",\n        \":quic_core_proto_source_address_token_proto_header\",\n        \":quic_core_server_id_lib\",\n        \":quic_core_socket_address_coder_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_boring_utils_lib\",\n    hdrs = [\"quiche/quic/core/crypto/boring_utils.h\"],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_export\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_certificate_view_lib\",\n    srcs = [\"quiche/quic/core/crypto/certificate_view.cc\"],\n    hdrs = [\"quiche/quic/core/crypto/certificate_view.h\"],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_crypto_boring_utils_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform\",\n        \":quic_platform_ip_address\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_encryption_lib\",\n    srcs = [\n        \"quiche/quic/core/crypto/aead_base_decrypter.cc\",\n        \"quiche/quic/core/crypto/aead_base_encrypter.cc\",\n        \"quiche/quic/core/crypto/aes_128_gcm_12_decrypter.cc\",\n        \"quiche/quic/core/crypto/aes_128_gcm_12_encrypter.cc\",\n        \"quiche/quic/core/crypto/aes_128_gcm_decrypter.cc\",\n        \"quiche/quic/core/crypto/aes_128_gcm_encrypter.cc\",\n        \"quiche/quic/core/crypto/aes_256_gcm_decrypter.cc\",\n        \"quiche/quic/core/crypto/aes_256_gcm_encrypter.cc\",\n        \"quiche/quic/core/crypto/aes_base_decrypter.cc\",\n        \"quiche/quic/core/crypto/aes_base_encrypter.cc\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_encrypter.cc\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.cc\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_tls_encrypter.cc\",\n        \"quiche/quic/core/crypto/chacha_base_decrypter.cc\",\n        \"quiche/quic/core/crypto/chacha_base_encrypter.cc\",\n        \"quiche/quic/core/crypto/null_decrypter.cc\",\n        \"quiche/quic/core/crypto/null_encrypter.cc\",\n        \"quiche/quic/core/crypto/quic_crypter.cc\",\n        \"quiche/quic/core/crypto/quic_decrypter.cc\",\n        \"quiche/quic/core/crypto/quic_encrypter.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/crypto/aead_base_decrypter.h\",\n        \"quiche/quic/core/crypto/aead_base_encrypter.h\",\n        \"quiche/quic/core/crypto/aes_128_gcm_12_decrypter.h\",\n        \"quiche/quic/core/crypto/aes_128_gcm_12_encrypter.h\",\n        \"quiche/quic/core/crypto/aes_128_gcm_decrypter.h\",\n        \"quiche/quic/core/crypto/aes_128_gcm_encrypter.h\",\n        \"quiche/quic/core/crypto/aes_256_gcm_decrypter.h\",\n        \"quiche/quic/core/crypto/aes_256_gcm_encrypter.h\",\n        \"quiche/quic/core/crypto/aes_base_decrypter.h\",\n        \"quiche/quic/core/crypto/aes_base_encrypter.h\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_decrypter.h\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_encrypter.h\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.h\",\n        \"quiche/quic/core/crypto/chacha20_poly1305_tls_encrypter.h\",\n        \"quiche/quic/core/crypto/chacha_base_decrypter.h\",\n        \"quiche/quic/core/crypto/chacha_base_encrypter.h\",\n        \"quiche/quic/core/crypto/crypto_protocol.h\",\n        \"quiche/quic/core/crypto/null_decrypter.h\",\n        \"quiche/quic/core/crypto/null_encrypter.h\",\n        \"quiche/quic/core/crypto/quic_crypter.h\",\n        \"quiche/quic/core/crypto/quic_decrypter.h\",\n        \"quiche/quic/core/crypto/quic_encrypter.h\",\n    ],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_crypto_hkdf_lib\",\n        \":quic_core_data_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_tag_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_hkdf_lib\",\n    srcs = [\"quiche/quic/core/crypto/quic_hkdf.cc\"],\n    hdrs = [\"quiche/quic/core/crypto/quic_hkdf.h\"],\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_proof_source_interface_lib\",\n    srcs = [\n        \"quiche/quic/core/crypto/proof_source.cc\",\n        \"quiche/quic/core/crypto/quic_crypto_proof.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/crypto/proof_source.h\",\n        \"quiche/quic/core/crypto/quic_crypto_proof.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_packets_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_random_lib\",\n    srcs = [\"quiche/quic/core/crypto/quic_random.cc\"],\n    hdrs = [\"quiche/quic/core/crypto/quic_random.h\"],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_crypto_tls_handshake_lib\",\n    srcs = [\n        \"quiche/quic/core/crypto/tls_client_connection.cc\",\n        \"quiche/quic/core/crypto/tls_connection.cc\",\n        \"quiche/quic/core/crypto/tls_server_connection.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/crypto/tls_client_connection.h\",\n        \"quiche/quic/core/crypto/tls_connection.h\",\n        \"quiche/quic/core/crypto/tls_server_connection.h\",\n    ],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_crypto_proof_source_interface_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_circular_deque_lib\",\n    hdrs = [\"quiche/quic/core/quic_circular_deque.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_data_lib\",\n    srcs = [\n        \"quiche/quic/core/quic_data_reader.cc\",\n        \"quiche/quic/core/quic_data_writer.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/quic_data_reader.h\",\n        \"quiche/quic/core/quic_data_writer.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_constants_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_error_codes_lib\",\n    srcs = [\"quiche/quic/core/quic_error_codes.cc\"],\n    hdrs = [\"quiche/quic/core/quic_error_codes.h\"],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_platform_base\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_framer_lib\",\n    srcs = [\"quiche/quic/core/quic_framer.cc\"],\n    hdrs = [\"quiche/quic/core/quic_framer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_constants_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_data_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_socket_address_coder_lib\",\n        \":quic_core_stream_frame_data_producer_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_frames_frames_lib\",\n    srcs = [\n        \"quiche/quic/core/frames/quic_ack_frame.cc\",\n        \"quiche/quic/core/frames/quic_ack_frequency_frame.cc\",\n        \"quiche/quic/core/frames/quic_blocked_frame.cc\",\n        \"quiche/quic/core/frames/quic_connection_close_frame.cc\",\n        \"quiche/quic/core/frames/quic_crypto_frame.cc\",\n        \"quiche/quic/core/frames/quic_frame.cc\",\n        \"quiche/quic/core/frames/quic_goaway_frame.cc\",\n        \"quiche/quic/core/frames/quic_handshake_done_frame.cc\",\n        \"quiche/quic/core/frames/quic_max_streams_frame.cc\",\n        \"quiche/quic/core/frames/quic_message_frame.cc\",\n        \"quiche/quic/core/frames/quic_new_connection_id_frame.cc\",\n        \"quiche/quic/core/frames/quic_new_token_frame.cc\",\n        \"quiche/quic/core/frames/quic_padding_frame.cc\",\n        \"quiche/quic/core/frames/quic_path_challenge_frame.cc\",\n        \"quiche/quic/core/frames/quic_path_response_frame.cc\",\n        \"quiche/quic/core/frames/quic_ping_frame.cc\",\n        \"quiche/quic/core/frames/quic_retire_connection_id_frame.cc\",\n        \"quiche/quic/core/frames/quic_rst_stream_frame.cc\",\n        \"quiche/quic/core/frames/quic_stop_sending_frame.cc\",\n        \"quiche/quic/core/frames/quic_stop_waiting_frame.cc\",\n        \"quiche/quic/core/frames/quic_stream_frame.cc\",\n        \"quiche/quic/core/frames/quic_streams_blocked_frame.cc\",\n        \"quiche/quic/core/frames/quic_window_update_frame.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/frames/quic_ack_frame.h\",\n        \"quiche/quic/core/frames/quic_ack_frequency_frame.h\",\n        \"quiche/quic/core/frames/quic_blocked_frame.h\",\n        \"quiche/quic/core/frames/quic_connection_close_frame.h\",\n        \"quiche/quic/core/frames/quic_crypto_frame.h\",\n        \"quiche/quic/core/frames/quic_frame.h\",\n        \"quiche/quic/core/frames/quic_goaway_frame.h\",\n        \"quiche/quic/core/frames/quic_handshake_done_frame.h\",\n        \"quiche/quic/core/frames/quic_inlined_frame.h\",\n        \"quiche/quic/core/frames/quic_max_streams_frame.h\",\n        \"quiche/quic/core/frames/quic_message_frame.h\",\n        \"quiche/quic/core/frames/quic_mtu_discovery_frame.h\",\n        \"quiche/quic/core/frames/quic_new_connection_id_frame.h\",\n        \"quiche/quic/core/frames/quic_new_token_frame.h\",\n        \"quiche/quic/core/frames/quic_padding_frame.h\",\n        \"quiche/quic/core/frames/quic_path_challenge_frame.h\",\n        \"quiche/quic/core/frames/quic_path_response_frame.h\",\n        \"quiche/quic/core/frames/quic_ping_frame.h\",\n        \"quiche/quic/core/frames/quic_retire_connection_id_frame.h\",\n        \"quiche/quic/core/frames/quic_rst_stream_frame.h\",\n        \"quiche/quic/core/frames/quic_stop_sending_frame.h\",\n        \"quiche/quic/core/frames/quic_stop_waiting_frame.h\",\n        \"quiche/quic/core/frames/quic_stream_frame.h\",\n        \"quiche/quic/core/frames/quic_streams_blocked_frame.h\",\n        \"quiche/quic/core/frames/quic_window_update_frame.h\",\n    ],\n    copts = quiche_copts,\n    # TODO: Work around initializer in anonymous union in fastbuild build.\n    # Remove this after upstream fix.\n    defines = select({\n        \"@envoy//bazel:windows_x86_64\": [\"QUIC_FRAME_DEBUG=0\"],\n        \"//conditions:default\": [],\n    }),\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_buffer_allocator_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_interval_lib\",\n        \":quic_core_interval_set_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_mem_slice_span\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_http_constants_lib\",\n    hdrs = [\"quiche/quic/core/http/http_constants.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\":quic_core_types_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_client_lib\",\n    srcs = [\n        \"quiche/quic/core/http/quic_client_promised_info.cc\",\n        \"quiche/quic/core/http/quic_client_push_promise_index.cc\",\n        \"quiche/quic/core/http/quic_spdy_client_session.cc\",\n        \"quiche/quic/core/http/quic_spdy_client_session_base.cc\",\n        \"quiche/quic/core/http/quic_spdy_client_stream.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/http/quic_client_promised_info.h\",\n        \"quiche/quic/core/http/quic_client_push_promise_index.h\",\n        \"quiche/quic/core/http/quic_spdy_client_session.h\",\n        \"quiche/quic/core/http/quic_spdy_client_session_base.h\",\n        \"quiche/quic/core/http/quic_spdy_client_stream.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_alarm_interface_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_http_spdy_session_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_qpack_qpack_streams_lib\",\n        \":quic_core_server_id_lib\",\n        \":quic_core_session_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n        \":spdy_core_framer_lib\",\n        \":spdy_core_protocol_lib\",\n        \"@envoy//source/extensions/quic_listeners/quiche:spdy_server_push_utils_for_envoy_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_header_list_lib\",\n    srcs = [\"quiche/quic/core/http/quic_header_list.cc\"],\n    hdrs = [\"quiche/quic/core/http/quic_header_list.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_circular_deque_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_qpack_qpack_header_table_lib\",\n        \":quic_platform_base\",\n        \":spdy_core_header_block_lib\",\n        \":spdy_core_headers_handler_interface_lib\",\n        \":spdy_core_protocol_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_http_decoder_lib\",\n    srcs = [\"quiche/quic/core/http/http_decoder.cc\"],\n    hdrs = [\"quiche/quic/core/http/http_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_data_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_http_http_frames_lib\",\n        \":quic_core_http_spdy_utils_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_http_encoder_lib\",\n    srcs = [\"quiche/quic/core/http/http_encoder.cc\"],\n    hdrs = [\"quiche/quic/core/http/http_encoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_data_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_http_http_frames_lib\",\n        \":quic_core_http_spdy_utils_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_http_frames_lib\",\n    hdrs = [\"quiche/quic/core/http/http_frames.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n        \":spdy_core_framer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_spdy_server_push_utils_header\",\n    hdrs = [\"quiche/quic/core/http/spdy_server_push_utils.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_platform_base\",\n        \":spdy_core_header_block_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_spdy_session_lib\",\n    srcs = [\n        \"quiche/quic/core/http/quic_headers_stream.cc\",\n        \"quiche/quic/core/http/quic_receive_control_stream.cc\",\n        \"quiche/quic/core/http/quic_send_control_stream.cc\",\n        \"quiche/quic/core/http/quic_server_session_base.cc\",\n        \"quiche/quic/core/http/quic_spdy_server_stream_base.cc\",\n        \"quiche/quic/core/http/quic_spdy_session.cc\",\n        \"quiche/quic/core/http/quic_spdy_stream.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/http/quic_headers_stream.h\",\n        \"quiche/quic/core/http/quic_receive_control_stream.h\",\n        \"quiche/quic/core/http/quic_send_control_stream.h\",\n        \"quiche/quic/core/http/quic_server_session_base.h\",\n        \"quiche/quic/core/http/quic_spdy_server_stream_base.h\",\n        \"quiche/quic/core/http/quic_spdy_session.h\",\n        \"quiche/quic/core/http/quic_spdy_stream.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_connection_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_http_header_list_lib\",\n        \":quic_core_http_http_constants_lib\",\n        \":quic_core_http_http_decoder_lib\",\n        \":quic_core_http_http_encoder_lib\",\n        \":quic_core_http_spdy_stream_body_manager_lib\",\n        \":quic_core_http_spdy_utils_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_proto_cached_network_parameters_proto_header\",\n        \":quic_core_qpack_qpack_decoded_headers_accumulator_lib\",\n        \":quic_core_qpack_qpack_decoder_lib\",\n        \":quic_core_qpack_qpack_decoder_stream_sender_lib\",\n        \":quic_core_qpack_qpack_encoder_lib\",\n        \":quic_core_qpack_qpack_encoder_stream_sender_lib\",\n        \":quic_core_qpack_qpack_streams_lib\",\n        \":quic_core_session_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_mem_slice_storage\",\n        \":spdy_core_framer_lib\",\n        \":spdy_core_http2_deframer_lib\",\n        \":spdy_core_protocol_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_spdy_stream_body_manager_lib\",\n    srcs = [\"quiche/quic/core/http/quic_spdy_stream_body_manager.cc\"],\n    hdrs = [\"quiche/quic/core/http/quic_spdy_stream_body_manager.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_http_http_decoder_lib\",\n        \":quic_core_session_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_http_spdy_utils_lib\",\n    srcs = [\"quiche/quic/core/http/spdy_utils.cc\"],\n    hdrs = [\"quiche/quic/core/http/spdy_utils.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_http_header_list_lib\",\n        \":quic_core_http_http_constants_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_platform_base\",\n        \":spdy_core_framer_lib\",\n        \":spdy_core_protocol_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_idle_network_detector_lib\",\n    srcs = [\"quiche/quic/core/quic_idle_network_detector.cc\"],\n    hdrs = [\"quiche/quic/core/quic_idle_network_detector.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_alarm_factory_interface_lib\",\n        \":quic_core_alarm_interface_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_one_block_arena_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_interval_lib\",\n    hdrs = [\"quiche/quic/core/quic_interval.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_interval_deque_lib\",\n    hdrs = [\"quiche/quic/core/quic_interval_deque.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_circular_deque_lib\",\n        \":quic_core_interval_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_interval_set_lib\",\n    hdrs = [\"quiche/quic/core/quic_interval_set.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_interval_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_lru_cache_lib\",\n    hdrs = [\"quiche/quic/core/quic_lru_cache.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_mtu_discovery_lib\",\n    srcs = [\"quiche/quic/core/quic_mtu_discovery.cc\"],\n    hdrs = [\"quiche/quic/core/quic_mtu_discovery.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":quic_core_constants_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_one_block_arena_lib\",\n    srcs = [\"quiche/quic/core/quic_one_block_arena.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_arena_scoped_ptr_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_syscall_wrapper_lib\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\"quiche/quic/core/quic_syscall_wrapper.cc\"],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\"quiche/quic/core/quic_syscall_wrapper.h\"],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_legacy_version_encapsulator_lib\",\n    srcs = [\n        \"quiche/quic/core/quic_legacy_version_encapsulator.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/quic_legacy_version_encapsulator.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_packet_creator_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_linux_socket_utils_lib\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\"quiche/quic/core/quic_linux_socket_utils.cc\"],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\"quiche/quic/core/quic_linux_socket_utils.h\"],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_packet_writer_interface_lib\",\n        \":quic_core_syscall_wrapper_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_network_blackhole_detector_lib\",\n    srcs = [\"quiche/quic/core/quic_network_blackhole_detector.cc\"],\n    hdrs = [\"quiche/quic/core/quic_network_blackhole_detector.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_alarm_factory_interface_lib\",\n        \":quic_core_alarm_interface_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_one_block_arena_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_packet_creator_lib\",\n    srcs = [\"quiche/quic/core/quic_packet_creator.cc\"],\n    hdrs = [\"quiche/quic/core/quic_packet_creator.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_circular_deque_lib\",\n        \":quic_core_coalesced_packet_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_data_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_packet_number_indexed_queue_lib\",\n    hdrs = [\"quiche/quic/core/packet_number_indexed_queue.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_circular_deque_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_packet_writer_interface_lib\",\n    srcs = [\"quiche/quic/core/quic_packet_writer_wrapper.cc\"],\n    hdrs = [\n        \"quiche/quic/core/quic_packet_writer.h\",\n        \"quiche/quic/core/quic_packet_writer_wrapper.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_packets_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_packets_lib\",\n    srcs = [\n        \"quiche/quic/core/quic_packets.cc\",\n        \"quiche/quic/core/quic_write_blocked_list.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/quic_packets.h\",\n        \"quiche/quic/core/quic_write_blocked_list.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_ack_listener_interface_lib\",\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_frames_frames_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform\",\n        \":quic_platform_socket_address\",\n        \":spdy_core_fifo_write_scheduler_lib\",\n        \":spdy_core_http2_priority_write_scheduler_lib\",\n        \":spdy_core_lifo_write_scheduler_lib\",\n        \":spdy_core_priority_write_scheduler_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_process_packet_interface_lib\",\n    hdrs = [\"quiche/quic/core/quic_process_packet_interface.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_packets_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_blocking_manager_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_blocking_manager.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_blocking_manager.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_decoder_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_decoder.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_qpack_qpack_decoder_stream_sender_lib\",\n        \":quic_core_qpack_qpack_encoder_stream_receiver_lib\",\n        \":quic_core_qpack_qpack_header_table_lib\",\n        \":quic_core_qpack_qpack_progressive_decoder_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_encoder_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_encoder.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_encoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_qpack_blocking_manager_lib\",\n        \":quic_core_qpack_qpack_decoder_stream_receiver_lib\",\n        \":quic_core_qpack_qpack_encoder_stream_sender_lib\",\n        \":quic_core_qpack_qpack_header_table_lib\",\n        \":quic_core_qpack_qpack_index_conversions_lib\",\n        \":quic_core_qpack_qpack_instruction_encoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_core_qpack_qpack_required_insert_count_lib\",\n        \":quic_core_qpack_value_splitting_header_list_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_header_table_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_header_table.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_header_table.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_qpack_qpack_static_table_lib\",\n        \":quic_platform_base\",\n        \":spdy_core_hpack_hpack_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_instruction_decoder_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_instruction_decoder.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_instruction_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":http2_hpack_huffman_hpack_huffman_decoder_lib\",\n        \":http2_hpack_varint_hpack_varint_decoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_instructions_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_instructions.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_instructions.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_instruction_encoder_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_instruction_encoder.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_instruction_encoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":http2_hpack_huffman_hpack_huffman_encoder_lib\",\n        \":http2_hpack_varint_hpack_varint_encoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_progressive_decoder_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_progressive_decoder.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_progressive_decoder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_qpack_qpack_decoder_stream_sender_lib\",\n        \":quic_core_qpack_qpack_encoder_stream_receiver_lib\",\n        \":quic_core_qpack_qpack_header_table_lib\",\n        \":quic_core_qpack_qpack_index_conversions_lib\",\n        \":quic_core_qpack_qpack_instruction_decoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_core_qpack_qpack_required_insert_count_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_required_insert_count_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_required_insert_count.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_required_insert_count.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_encoder_stream_sender_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_encoder_stream_sender.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_encoder_stream_sender.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_qpack_qpack_instruction_encoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_core_qpack_qpack_stream_sender_delegate_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_encoder_stream_receiver_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_encoder_stream_receiver.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_encoder_stream_receiver.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":quic_core_qpack_qpack_instruction_decoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_core_qpack_qpack_stream_receiver_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_decoder_stream_sender_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_decoder_stream_sender.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_decoder_stream_sender.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_qpack_qpack_instruction_encoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_core_qpack_qpack_stream_sender_delegate_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_decoder_stream_receiver_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_decoder_stream_receiver.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_decoder_stream_receiver.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":http2_decoder_decode_buffer_lib\",\n        \":http2_decoder_decode_status_lib\",\n        \":quic_core_qpack_qpack_instruction_decoder_lib\",\n        \":quic_core_qpack_qpack_instructions_lib\",\n        \":quic_core_qpack_qpack_stream_receiver_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_index_conversions_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_index_conversions.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_index_conversions.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":quic_platform_base\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_static_table_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_static_table.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_static_table.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_base\",\n        \":spdy_core_hpack_hpack_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_stream_receiver_lib\",\n    hdrs = [\"quiche/quic/core/qpack/qpack_stream_receiver.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_streams_lib\",\n    srcs = [\n        \"quiche/quic/core/qpack/qpack_receive_stream.cc\",\n        \"quiche/quic/core/qpack/qpack_send_stream.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/qpack/qpack_receive_stream.h\",\n        \"quiche/quic/core/qpack/qpack_send_stream.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    deps = [\n        \":quic_core_qpack_qpack_stream_receiver_lib\",\n        \":quic_core_qpack_qpack_stream_sender_delegate_lib\",\n        \":quic_core_session_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_decoded_headers_accumulator_lib\",\n    srcs = [\"quiche/quic/core/qpack/qpack_decoded_headers_accumulator.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/qpack_decoded_headers_accumulator.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_http_header_list_lib\",\n        \":quic_core_qpack_qpack_decoder_lib\",\n        \":quic_core_qpack_qpack_progressive_decoder_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_value_splitting_header_list_lib\",\n    srcs = [\"quiche/quic/core/qpack/value_splitting_header_list.cc\"],\n    hdrs = [\"quiche/quic/core/qpack/value_splitting_header_list.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_base\",\n        \":spdy_core_header_block_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_qpack_qpack_stream_sender_delegate_lib\",\n    hdrs = [\"quiche/quic/core/qpack/qpack_stream_sender_delegate.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_received_packet_manager_lib\",\n    srcs = [\"quiche/quic/core/quic_received_packet_manager.cc\"],\n    hdrs = [\"quiche/quic/core/quic_received_packet_manager.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_config_lib\",\n        \":quic_core_congestion_control_rtt_stats_lib\",\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_sent_packet_manager_lib\",\n    srcs = [\"quiche/quic/core/quic_sent_packet_manager.cc\"],\n    hdrs = [\"quiche/quic/core/quic_sent_packet_manager.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_congestion_control_congestion_control_lib\",\n        \":quic_core_congestion_control_general_loss_algorithm_lib\",\n        \":quic_core_congestion_control_pacing_sender_lib\",\n        \":quic_core_congestion_control_rtt_stats_lib\",\n        \":quic_core_congestion_control_uber_loss_algorithm_lib\",\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_proto_cached_network_parameters_proto_header\",\n        \":quic_core_sustained_bandwidth_recorder_lib\",\n        \":quic_core_transmission_info_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_unacked_packet_map_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_server_id_lib\",\n    srcs = [\"quiche/quic/core/quic_server_id.cc\"],\n    hdrs = [\"quiche/quic/core/quic_server_id.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_server_lib\",\n    srcs = [\n        \"quiche/quic/core/chlo_extractor.cc\",\n        \"quiche/quic/core/quic_buffered_packet_store.cc\",\n        \"quiche/quic/core/quic_dispatcher.cc\",\n        \"quiche/quic/core/tls_chlo_extractor.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/chlo_extractor.h\",\n        \"quiche/quic/core/quic_buffered_packet_store.h\",\n        \"quiche/quic/core/quic_dispatcher.h\",\n        \"quiche/quic/core/tls_chlo_extractor.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_alarm_factory_interface_lib\",\n        \":quic_core_alarm_interface_lib\",\n        \":quic_core_blocked_writer_interface_lib\",\n        \":quic_core_connection_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_process_packet_interface_lib\",\n        \":quic_core_session_lib\",\n        \":quic_core_time_lib\",\n        \":quic_core_time_wait_list_manager_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_core_version_manager_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_session_lib\",\n    srcs = [\n        \"quiche/quic/core/legacy_quic_stream_id_manager.cc\",\n        \"quiche/quic/core/quic_control_frame_manager.cc\",\n        \"quiche/quic/core/quic_crypto_client_handshaker.cc\",\n        \"quiche/quic/core/quic_crypto_client_stream.cc\",\n        \"quiche/quic/core/quic_crypto_handshaker.cc\",\n        \"quiche/quic/core/quic_crypto_server_stream.cc\",\n        \"quiche/quic/core/quic_crypto_server_stream_base.cc\",\n        \"quiche/quic/core/quic_crypto_stream.cc\",\n        \"quiche/quic/core/quic_datagram_queue.cc\",\n        \"quiche/quic/core/quic_flow_controller.cc\",\n        \"quiche/quic/core/quic_session.cc\",\n        \"quiche/quic/core/quic_stream.cc\",\n        \"quiche/quic/core/quic_stream_id_manager.cc\",\n        \"quiche/quic/core/quic_stream_sequencer.cc\",\n        \"quiche/quic/core/tls_client_handshaker.cc\",\n        \"quiche/quic/core/tls_handshaker.cc\",\n        \"quiche/quic/core/tls_server_handshaker.cc\",\n        \"quiche/quic/core/uber_quic_stream_id_manager.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/handshaker_delegate_interface.h\",\n        \"quiche/quic/core/legacy_quic_stream_id_manager.h\",\n        \"quiche/quic/core/quic_control_frame_manager.h\",\n        \"quiche/quic/core/quic_crypto_client_handshaker.h\",\n        \"quiche/quic/core/quic_crypto_client_stream.h\",\n        \"quiche/quic/core/quic_crypto_handshaker.h\",\n        \"quiche/quic/core/quic_crypto_server_stream.h\",\n        \"quiche/quic/core/quic_crypto_server_stream_base.h\",\n        \"quiche/quic/core/quic_crypto_stream.h\",\n        \"quiche/quic/core/quic_datagram_queue.h\",\n        \"quiche/quic/core/quic_flow_controller.h\",\n        \"quiche/quic/core/quic_session.h\",\n        \"quiche/quic/core/quic_stream.h\",\n        \"quiche/quic/core/quic_stream_id_manager.h\",\n        \"quiche/quic/core/quic_stream_sequencer.h\",\n        \"quiche/quic/core/stream_delegate_interface.h\",\n        \"quiche/quic/core/tls_client_handshaker.h\",\n        \"quiche/quic/core/tls_handshaker.h\",\n        \"quiche/quic/core/tls_server_handshaker.h\",\n        \"quiche/quic/core/uber_quic_stream_id_manager.h\",\n    ],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_config_lib\",\n        \":quic_core_connection_lib\",\n        \":quic_core_constants_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_crypto_tls_handshake_lib\",\n        \":quic_core_frames_frames_lib\",\n        \":quic_core_packet_creator_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_server_id_lib\",\n        \":quic_core_session_notifier_interface_lib\",\n        \":quic_core_stream_frame_data_producer_lib\",\n        \":quic_core_stream_send_buffer_lib\",\n        \":quic_core_stream_sequencer_buffer_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform\",\n        \":quic_platform_mem_slice_span\",\n        \":spdy_core_protocol_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_session_notifier_interface_lib\",\n    hdrs = [\"quiche/quic/core/session_notifier_interface.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_frames_frames_lib\",\n        \":quic_core_time_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_socket_address_coder_lib\",\n    srcs = [\"quiche/quic/core/quic_socket_address_coder.cc\"],\n    hdrs = [\"quiche/quic/core/quic_socket_address_coder.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_base\",\n        \":quic_platform_socket_address\",\n        \":spdy_core_priority_write_scheduler_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_stream_frame_data_producer_lib\",\n    hdrs = [\"quiche/quic/core/quic_stream_frame_data_producer.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_types_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_stream_send_buffer_lib\",\n    srcs = [\"quiche/quic/core/quic_stream_send_buffer.cc\"],\n    hdrs = [\"quiche/quic/core/quic_stream_send_buffer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_circular_deque_lib\",\n        \":quic_core_data_lib\",\n        \":quic_core_frames_frames_lib\",\n        \":quic_core_interval_deque_lib\",\n        \":quic_core_interval_lib\",\n        \":quic_core_interval_set_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_mem_slice_span\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_stream_sequencer_buffer_lib\",\n    srcs = [\"quiche/quic/core/quic_stream_sequencer_buffer.cc\"],\n    hdrs = [\"quiche/quic/core/quic_stream_sequencer_buffer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_constants_lib\",\n        \":quic_core_interval_lib\",\n        \":quic_core_interval_set_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_sustained_bandwidth_recorder_lib\",\n    srcs = [\"quiche/quic/core/quic_sustained_bandwidth_recorder.cc\"],\n    hdrs = [\"quiche/quic/core/quic_sustained_bandwidth_recorder.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_bandwidth_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_tag_lib\",\n    srcs = [\"quiche/quic/core/quic_tag.cc\"],\n    hdrs = [\"quiche/quic/core/quic_tag.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_time_lib\",\n    srcs = [\"quiche/quic/core/quic_time.cc\"],\n    hdrs = [\"quiche/quic/core/quic_time.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":quic_platform_base\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_time_accumulator_lib\",\n    hdrs = [\"quiche/quic/core/quic_time_accumulator.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_time_wait_list_manager_lib\",\n    srcs = [\"quiche/quic/core/quic_time_wait_list_manager.cc\"],\n    hdrs = [\"quiche/quic/core/quic_time_wait_list_manager.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_blocked_writer_interface_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_packet_writer_interface_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_session_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_transmission_info_lib\",\n    srcs = [\"quiche/quic/core/quic_transmission_info.cc\"],\n    hdrs = [\"quiche/quic/core/quic_transmission_info.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_ack_listener_interface_lib\",\n        \":quic_core_frames_frames_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_export\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_types_lib\",\n    srcs = [\n        \"quiche/quic/core/quic_connection_id.cc\",\n        \"quiche/quic/core/quic_packet_number.cc\",\n        \"quiche/quic/core/quic_types.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/core/quic_connection_id.h\",\n        \"quiche/quic/core/quic_packet_number.h\",\n        \"quiche/quic/core/quic_types.h\",\n    ],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_time_lib\",\n        \":quic_platform_base\",\n        \":quiche_common_platform_endian\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_uber_received_packet_manager_lib\",\n    srcs = [\"quiche/quic/core/uber_received_packet_manager.cc\"],\n    hdrs = [\"quiche/quic/core/uber_received_packet_manager.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_received_packet_manager_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_udp_socket_lib\",\n    srcs = select({\n        \"@envoy//bazel:windows_x86_64\": [],\n        \"//conditions:default\": [\"quiche/quic/core/quic_udp_socket_posix.cc\"],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:windows_x86_64\": [],\n        \"//conditions:default\": [\"quiche/quic/core/quic_udp_socket.h\"],\n    }),\n    copts = quiche_copts + select({\n        # On OSX/iOS, condstants from RFC 3542 (e.g. IPV6_RECVPKTINFO) are not usable\n        # without this define.\n        \"@envoy//bazel:apple\": [\"-D__APPLE_USE_RFC_3542\"],\n        \"//conditions:default\": [],\n    }),\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_types_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform\",\n        \":quic_platform_udp_socket\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_unacked_packet_map_lib\",\n    srcs = [\"quiche/quic/core/quic_unacked_packet_map.cc\"],\n    hdrs = [\"quiche/quic/core/quic_unacked_packet_map.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_session_notifier_interface_lib\",\n        \":quic_core_transmission_info_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_utils_lib\",\n    srcs = [\"quiche/quic/core/quic_utils.cc\"],\n    hdrs = [\"quiche/quic/core/quic_utils.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_constants_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_error_codes_lib\",\n        \":quic_core_frames_frames_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_socket_address\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_version_manager_lib\",\n    srcs = [\"quiche/quic/core/quic_version_manager.cc\"],\n    hdrs = [\"quiche/quic/core/quic_version_manager.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_versions_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_core_versions_lib\",\n    srcs = [\"quiche/quic/core/quic_versions.cc\"],\n    hdrs = [\"quiche/quic/core/quic_versions.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_tag_lib\",\n        \":quic_core_types_lib\",\n        \":quic_platform_base\",\n        \":quiche_common_platform_endian\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_config_peer_lib\",\n    srcs = [\"quiche/quic/test_tools/quic_config_peer.cc\"],\n    hdrs = [\"quiche/quic/test_tools/quic_config_peer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_config_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_crypto_server_config_peer_lib\",\n    srcs = [\n        \"quiche/quic/test_tools/quic_crypto_server_config_peer.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/test_tools/quic_crypto_server_config_peer.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_test_tools_mock_clock_lib\",\n        \":quic_test_tools_mock_random_lib\",\n        \":quic_test_tools_test_utils_interface_lib\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_first_flight_lib\",\n    srcs = [\n        \"quiche/quic/test_tools/first_flight.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/test_tools/first_flight.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_config_lib\",\n        \":quic_core_connection_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_http_client_lib\",\n        \":quic_core_packet_writer_interface_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_types_lib\",\n        \":quic_core_versions_lib\",\n        \":quic_platform\",\n        \":quic_test_tools_test_utils_interface_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_test_tools_flow_controller_peer_lib\",\n    srcs = [\n        \"quiche/quic/test_tools/quic_flow_controller_peer.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/test_tools/quic_flow_controller_peer.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_packets_lib\",\n        \":quic_core_session_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_framer_peer_lib\",\n    srcs = [\"quiche/quic/test_tools/quic_framer_peer.cc\"],\n    hdrs = [\"quiche/quic/test_tools/quic_framer_peer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_test_tools_interval_deque_peer_lib\",\n    hdrs = [\"quiche/quic/test_tools/quic_interval_deque_peer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_interval_deque_lib\",\n        \":quic_core_interval_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_mock_clock_lib\",\n    srcs = [\"quiche/quic/test_tools/mock_clock.cc\"],\n    hdrs = [\"quiche/quic/test_tools/mock_clock.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_clock_lib\",\n        \":quic_core_time_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_mock_random_lib\",\n    srcs = [\"quiche/quic/test_tools/mock_random.cc\"],\n    hdrs = [\"quiche/quic/test_tools/mock_random.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_crypto_random_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_mock_syscall_wrapper_lib\",\n    srcs = [\"quiche/quic/test_tools/quic_mock_syscall_wrapper.cc\"],\n    hdrs = [\"quiche/quic/test_tools/quic_mock_syscall_wrapper.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_syscall_wrapper_lib\",\n        \":quic_platform_base\",\n        \":quic_platform_test\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_sent_packet_manager_peer_lib\",\n    srcs = [\"quiche/quic/test_tools/quic_sent_packet_manager_peer.cc\"],\n    hdrs = [\"quiche/quic/test_tools/quic_sent_packet_manager_peer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_sent_packet_manager_lib\",\n        \":quic_test_tools_unacked_packet_map_peer_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_server_session_base_peer\",\n    hdrs = [\n        \"quiche/quic/test_tools/quic_server_session_base_peer.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_http_spdy_session_lib\",\n        \":quic_core_utils_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_simple_quic_framer_lib\",\n    srcs = [\"quiche/quic/test_tools/simple_quic_framer.cc\"],\n    hdrs = [\"quiche/quic/test_tools/simple_quic_framer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_platform_base\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_stream_send_buffer_peer_lib\",\n    srcs = [\"quiche/quic/test_tools/quic_stream_send_buffer_peer.cc\"],\n    hdrs = [\"quiche/quic/test_tools/quic_stream_send_buffer_peer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_stream_send_buffer_lib\",\n        \":quic_test_tools_interval_deque_peer_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_stream_peer_lib\",\n    srcs = [\"quiche/quic/test_tools/quic_stream_peer.cc\"],\n    hdrs = [\"quiche/quic/test_tools/quic_stream_peer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_packets_lib\",\n        \":quic_core_session_lib\",\n        \":quic_core_stream_send_buffer_lib\",\n        \":quic_platform_base\",\n        \":quic_test_tools_flow_controller_peer_lib\",\n        \":quic_test_tools_stream_send_buffer_peer_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_test_certificates_lib\",\n    srcs = [\"quiche/quic/test_tools/test_certificates.cc\"],\n    hdrs = [\"quiche/quic/test_tools/test_certificates.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_base\",\n        \":quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_test_utils_interface_lib\",\n    srcs = [\n        \"quiche/quic/test_tools/crypto_test_utils.cc\",\n        \"quiche/quic/test_tools/mock_quic_session_visitor.cc\",\n        \"quiche/quic/test_tools/mock_quic_time_wait_list_manager.cc\",\n        \"quiche/quic/test_tools/quic_buffered_packet_store_peer.cc\",\n        \"quiche/quic/test_tools/quic_connection_peer.cc\",\n        \"quiche/quic/test_tools/quic_dispatcher_peer.cc\",\n        \"quiche/quic/test_tools/quic_test_utils.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/test_tools/crypto_test_utils.h\",\n        \"quiche/quic/test_tools/mock_quic_session_visitor.h\",\n        \"quiche/quic/test_tools/mock_quic_time_wait_list_manager.h\",\n        \"quiche/quic/test_tools/quic_buffered_packet_store_peer.h\",\n        \"quiche/quic/test_tools/quic_connection_peer.h\",\n        \"quiche/quic/test_tools/quic_dispatcher_peer.h\",\n        \"quiche/quic/test_tools/quic_test_utils.h\",\n    ],\n    copts = quiche_copts,\n    external_deps = [\"ssl\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_buffer_allocator_lib\",\n        \":quic_core_congestion_control_congestion_control_interface_lib\",\n        \":quic_core_connection_lib\",\n        \":quic_core_connection_stats_lib\",\n        \":quic_core_crypto_crypto_handshake_lib\",\n        \":quic_core_crypto_encryption_lib\",\n        \":quic_core_crypto_proof_source_interface_lib\",\n        \":quic_core_crypto_random_lib\",\n        \":quic_core_data_lib\",\n        \":quic_core_framer_lib\",\n        \":quic_core_http_client_lib\",\n        \":quic_core_http_spdy_session_lib\",\n        \":quic_core_packet_creator_lib\",\n        \":quic_core_packet_writer_interface_lib\",\n        \":quic_core_packets_lib\",\n        \":quic_core_received_packet_manager_lib\",\n        \":quic_core_sent_packet_manager_lib\",\n        \":quic_core_server_id_lib\",\n        \":quic_core_server_lib\",\n        \":quic_core_session_lib\",\n        \":quic_core_time_wait_list_manager_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform\",\n        \":quic_platform_test\",\n        \":quic_test_tools_config_peer_lib\",\n        \":quic_test_tools_framer_peer_lib\",\n        \":quic_test_tools_mock_clock_lib\",\n        \":quic_test_tools_mock_random_lib\",\n        \":quic_test_tools_sent_packet_manager_peer_lib\",\n        \":quic_test_tools_simple_quic_framer_lib\",\n        \":quic_test_tools_stream_peer_lib\",\n        \":quiche_common_test_tools_test_utils_lib\",\n        \":spdy_core_framer_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_session_peer_lib\",\n    srcs = [\n        \"quiche/quic/test_tools/quic_session_peer.cc\",\n    ],\n    hdrs = [\n        \"quiche/quic/test_tools/quic_session_peer.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_packets_lib\",\n        \":quic_core_session_lib\",\n        \":quic_core_utils_lib\",\n        \":quic_platform\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_tools_unacked_packet_map_peer_lib\",\n    srcs = [\"quiche/quic/test_tools/quic_unacked_packet_map_peer.cc\"],\n    hdrs = [\"quiche/quic/test_tools/quic_unacked_packet_map_peer.h\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":quic_core_unacked_packet_map_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"epoll_server_platform\",\n    hdrs = [\n        \"quiche/epoll_server/platform/api/epoll_address_test_utils.h\",\n        \"quiche/epoll_server/platform/api/epoll_bug.h\",\n        \"quiche/epoll_server/platform/api/epoll_expect_bug.h\",\n        \"quiche/epoll_server/platform/api/epoll_export.h\",\n        \"quiche/epoll_server/platform/api/epoll_logging.h\",\n        \"quiche/epoll_server/platform/api/epoll_ptr_util.h\",\n        \"quiche/epoll_server/platform/api/epoll_test.h\",\n        \"quiche/epoll_server/platform/api/epoll_thread.h\",\n        \"quiche/epoll_server/platform/api/epoll_time.h\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:epoll_server_platform_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"epoll_server_lib\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/epoll_server/fake_simple_epoll_server.cc\",\n            \"quiche/epoll_server/simple_epoll_server.cc\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"@envoy//bazel:linux\": [\n            \"quiche/epoll_server/fake_simple_epoll_server.h\",\n            \"quiche/epoll_server/simple_epoll_server.h\",\n        ],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":epoll_server_platform\"],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform_optional\",\n    hdrs = [\"quiche/common/platform/api/quiche_optional.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quiche_common_platform_export\",\n        \"@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_optional_impl_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform\",\n    hdrs = [\n        \"quiche/common/platform/api/quiche_arraysize.h\",\n        \"quiche/common/platform/api/quiche_logging.h\",\n        \"quiche/common/platform/api/quiche_optional.h\",\n        \"quiche/common/platform/api/quiche_ptr_util.h\",\n        \"quiche/common/platform/api/quiche_str_cat.h\",\n        \"quiche/common/platform/api/quiche_string_piece.h\",\n        \"quiche/common/platform/api/quiche_text_utils.h\",\n        \"quiche/common/platform/api/quiche_time_utils.h\",\n        \"quiche/common/platform/api/quiche_unordered_containers.h\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quiche_common_platform_export\",\n        \":quiche_common_platform_optional\",\n        \"@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_impl_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quiche_common_platform_test\",\n    srcs = [\n        \"quiche/common/platform/api/quiche_endian_test.cc\",\n        \"quiche/common/platform/api/quiche_str_cat_test.cc\",\n        \"quiche/common/platform/api/quiche_text_utils_test.cc\",\n        \"quiche/common/platform/api/quiche_time_utils_test.cc\",\n    ],\n    hdrs = [\"quiche/common/platform/api/quiche_test.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quiche_common_platform\",\n        \":quiche_common_platform_endian\",\n        \"@envoy//test/extensions/quic_listeners/quiche/platform:quiche_common_platform_test_impl_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_lib\",\n    srcs = [\n        \"quiche/common/quiche_data_reader.cc\",\n        \"quiche/common/quiche_data_writer.cc\",\n    ],\n    hdrs = [\n        \"quiche/common/quiche_data_reader.h\",\n        \"quiche/common/quiche_data_writer.h\",\n        \"quiche/common/simple_linked_hash_map.h\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quiche_common_platform\",\n        \":quiche_common_platform_endian\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"epoll_server_test\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\"quiche/epoll_server/simple_epoll_server_test.cc\"],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\":epoll_server_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"quiche_common_test\",\n    srcs = [\"quiche/common/simple_linked_hash_map_test.cc\"],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quiche_common_lib\",\n        \":quiche_common_platform_test\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http2_platform_api_test\",\n    srcs = [\n        \"quiche/http2/platform/api/http2_string_utils_test.cc\",\n        \"quiche/http2/test_tools/http2_random_test.cc\",\n    ],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":http2_platform\",\n        \":http2_test_tools_random\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"spdy_platform_api_test\",\n    srcs = [\"quiche/spdy/platform/api/spdy_string_utils_test.cc\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quiche_common_test_tools_test_utils_lib\",\n        \":spdy_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_mem_slice_span\",\n    hdrs = [\n        \"quiche/quic/platform/api/quic_mem_slice_span.h\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_span_impl_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_test_mem_slice_vector_lib\",\n    hdrs = [\"quiche/quic/platform/api/quic_test_mem_slice_vector.h\"],\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\"@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_mem_slice_vector_impl_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_mem_slice_storage\",\n    hdrs = [\"quiche/quic/platform/api/quic_mem_slice_storage.h\"],\n    repository = \"@envoy\",\n    visibility = [\"//visibility:public\"],\n    deps = [\"@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_storage_impl_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"spdy_core_header_block_test\",\n    srcs = [\"quiche/spdy/core/spdy_header_block_test.cc\"],\n    copts = quiche_copts,\n    coverage = False,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":spdy_core_header_block_lib\",\n        \":spdy_core_test_utils_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"quic_platform_api_test\",\n    srcs = [\n        \"quiche/quic/platform/api/quic_containers_test.cc\",\n        \"quiche/quic/platform/api/quic_mem_slice_span_test.cc\",\n        # Re-enable it when tests pass.\n        # \"quiche/quic/platform/api/quic_mem_slice_storage_test.cc\",\n        \"quiche/quic/platform/api/quic_mem_slice_test.cc\",\n        \"quiche/quic/platform/api/quic_reference_counted_test.cc\",\n        \"quiche/quic/platform/api/quic_string_utils_test.cc\",\n    ],\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_buffer_allocator_lib\",\n        \":quic_platform\",\n        \":quic_platform_mem_slice_span\",\n        \":quic_platform_mem_slice_storage\",\n        \":quic_platform_test\",\n        \":quic_platform_test_mem_slice_vector_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"quic_core_batch_writer_batch_writer_test\",\n    srcs = select({\n        \"@envoy//bazel:linux\": [\"quiche/quic/core/batch_writer/quic_batch_writer_test.cc\"],\n        \"//conditions:default\": [],\n    }),\n    copts = quiche_copts,\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_core_batch_writer_batch_writer_test_lib\",\n        \":quic_core_batch_writer_gso_batch_writer_lib\",\n        \":quic_core_batch_writer_sendmmsg_batch_writer_lib\",\n        \":quic_platform\",\n    ],\n)\n"
  },
  {
    "path": "bazel/external/quiche.genrule_cmd",
    "content": "#!/bin/bash\n\nset -e\n\n# This script is invoked from quiche.BUILD to tweak QUICHE source files into a\n# form usable by Envoy. Transformations performed here:\n#\n# - Move subtree under quiche/ base dir, for clarity in #include statements.\n# - Rewrite include directives for platform/impl files to point to the directory\n#   containing Envoy's QUICHE platform implementation.\n# - Fix include directives for non-platform/impl files to remove\n#   \"net/third_party\" from the path. (This is an artifact of Chromium source\n#   tree structure.)\n\n# Determine base directory of unmodified QUICHE source files. In practice, this\n# ends up being \"external/com_googlesource_quiche\".\nsrc_base_dir=$$(dirname $$(dirname $$(dirname $(rootpath quic/core/quic_constants.h))))\n\n# sed commands to apply to each source file.\ncat <<EOF >sed_commands\n# Rewrite include directives for testonly platform impl files.\n/^#include/ s!net/http2/platform/impl/http2_reconstruct_object_impl.h!test/extensions/quic_listeners/quiche/platform/http2_reconstruct_object_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_expect_bug_impl.h!test/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_mock_log_impl.h!test/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_port_utils_impl.h!test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_sleep_impl.h!test/extensions/quic_listeners/quiche/platform/quic_sleep_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_system_event_loop_impl.h!test/extensions/quic_listeners/quiche/platform/quic_system_event_loop_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_test_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_test_mem_slice_vector_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_mem_slice_vector_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_test_output_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h!\n/^#include/ s!net/quic/platform/impl/quic_thread_impl.h!test/extensions/quic_listeners/quiche/platform/quic_thread_impl.h!\n/^#include/ s!net/quiche/common/platform/impl/quiche_test_impl.h!test/extensions/quic_listeners/quiche/platform/quiche_test_impl.h!\n/^#include/ s!net/spdy/platform/impl/spdy_test_helpers_impl.h!test/extensions/quic_listeners/quiche/platform/spdy_test_helpers_impl.h!\n/^#include/ s!net/spdy/platform/impl/spdy_test_impl.h!test/extensions/quic_listeners/quiche/platform/spdy_test_impl.h!\n\n# Rewrite include directives for platform impl files.\n/^#include/ s!net/(http2|spdy|quic|quiche/common)/platform/impl/!extensions/quic_listeners/quiche/platform/!\n\n# Rewrite include directives for epoll_server platform impl files.\n/^#include/ s!net/tools/epoll_server/platform/impl!test/extensions/quic_listeners/quiche/platform/!\n\n# Strip \"net/third_party\" from include directives to other QUICHE files.\n/^#include/ s!net/third_party/quiche/src/!quiche/!\n\n# Rewrite gmock & gtest includes.\n/^#include/ s!testing/gmock/include/gmock/!gmock/!\n/^#include/ s!testing/gtest/include/gtest/!gtest/!\n\n# Rewrite third_party includes.\n/^#include/ s!third_party/boringssl/src/include/!!\n/^#include/ s!third_party/zlib/zlib!zlib!\n\n/^import/ s!cached_network_parameters!quiche/quic/core/proto/cached_network_parameters!\n\n# Rewrite #pragma clang\n/^#pragma/ s!clang!GCC!\n/^#pragma/ s!-Weverything!-Wall!\nEOF\n\nfor src_file in $(SRCS); do\n  # Extract relative path (e.g. \"quic/core/quic_utils.cc\") from full path in\n  # src_path (e.g. \"external/com_googlesource_quiche/quic/core/quic_utils.cc\").\n  src_path=\"$${src_file#$$src_base_dir/}\"\n\n  # Map to output file with quiche/ base directory inserted in path.\n  out_file=\"$(@D)/quiche/$$src_path\"\n  mkdir -p \"$$(dirname \"$$out_file\")\"\n\n  # Apply text substitutions. -E ensures consistent behavior on Linux vs. OS X.\n  sed -E -f sed_commands \"$$src_file\" > \"$$out_file\"\ndone\n"
  },
  {
    "path": "bazel/external/rapidjson.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"rapidjson\",\n    hdrs = glob([\"include/rapidjson/**/*.h\"]),\n    defines = [\"RAPIDJSON_HAS_STDSTRING=1\"],\n    includes = [\"include\"],\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/spdlog.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"spdlog\",\n    hdrs = glob([\n        \"include/**/*.h\",\n    ]),\n    defines = [\"SPDLOG_FMT_EXTERNAL\"],\n    includes = [\"include\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@com_github_fmtlib_fmt//:fmtlib\"],\n)\n"
  },
  {
    "path": "bazel/external/sqlparser.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"sqlparser\",\n    srcs = glob([\"src/**/*.cpp\"]),\n    hdrs = glob([\n        \"include/**/*.h\",\n        \"src/**/*.h\",\n    ]),\n    defines = select({\n        \"@envoy//bazel:windows_x86_64\": [\"YY_NO_UNISTD_H\"],\n        \"//conditions:default\": [],\n    }),\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/tclap.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"tclap\",\n    hdrs = glob([\"include/tclap/*.h\"]),\n    includes = [\"include\"],\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/twitter_common_finagle_thrift.BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\npy_library(\n    name = \"twitter_common_finagle_thrift\",\n    srcs = glob([\n        \"gen/**/*.py\",\n    ]),\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/twitter_common_lang.BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\npy_library(\n    name = \"twitter_common_lang\",\n    srcs = glob([\n        \"twitter/**/*.py\",\n    ]),\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/external/twitter_common_rpc.BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\npy_library(\n    name = \"twitter_common_rpc\",\n    srcs = glob([\n        \"twitter/**/*.py\",\n    ]),\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"@com_github_twitter_common_finagle_thrift//:twitter_common_finagle_thrift\",\n        \"@com_github_twitter_common_lang//:twitter_common_lang\",\n    ],\n)\n"
  },
  {
    "path": "bazel/external/wee8.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\nload(\"@envoy_large_machine_exec_property//:constants.bzl\", \"LARGE_MACHINE\")\nload(\":genrule_cmd.bzl\", \"genrule_cmd\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"wee8\",\n    srcs = [\n        \"libwee8.a\",\n    ],\n    hdrs = [\n        \"wee8/include/v8-version.h\",\n        \"wee8/third_party/wasm-api/wasm.hh\",\n    ],\n    defines = [\"ENVOY_WASM_V8\"],\n    includes = [\n        \"wee8/include\",\n        \"wee8/third_party\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\ngenrule(\n    name = \"build\",\n    srcs = glob([\"wee8/**\"]),\n    outs = [\n        \"libwee8.a\",\n    ],\n    cmd = genrule_cmd(\"@envoy//bazel/external:wee8.genrule_cmd\"),\n    exec_properties = LARGE_MACHINE,\n)\n"
  },
  {
    "path": "bazel/external/wee8.genrule_cmd",
    "content": "#!/bin/bash\n\nset -e\n\n# This works only on Linux-{x86_64,s390x,aarch64} and macOS-x86_64.\ncase \"$$(uname -s)-$$(uname -m)\" in\nLinux-x86_64|Linux-s390x|Linux-aarch64|Darwin-x86_64)\n  ;;\n*)\n  echo \"ERROR: wee8 is currently supported only on Linux-{x86_64,s390x,aarch64} and macOS-x86_64.\" >&2\n  exit 1\nesac\n\n# Bazel magic.\nROOT=$$(dirname $(rootpath wee8/BUILD.gn))/..\npushd $$ROOT/wee8\n\n# Clean after previous build.\nrm -rf out/wee8\n\n# Export compiler configuration.\nexport CXXFLAGS=\"$${CXXFLAGS-} -Wno-sign-compare -Wno-deprecated-copy -Wno-unknown-warning-option\"\nif [[ ( `uname` == \"Darwin\" && $${CXX-} == \"\" ) || $${CXX-} == *\"clang\"* ]]; then\n  export IS_CLANG=true\n  export CC=$${CC:-clang}\n  export CXX=$${CXX:-clang++}\n  export CXXFLAGS=\"$${CXXFLAGS} -Wno-implicit-int-float-conversion -Wno-builtin-assume-aligned-alignment -Wno-final-dtor-non-final-class\"\nelse\n  export IS_CLANG=false\n  export CC=$${CC:-gcc}\n  export CXX=$${CXX:-g++}\nfi\n\nexport AR=$${AR:-ar}\nexport NM=$${NM:-nm}\n\n# Hook sanitizers.\nif [[ $${ENVOY_ASAN-} == \"1\" ]]; then\n  WEE8_BUILD_ARGS+=\" is_asan=true\"\n  WEE8_BUILD_ARGS+=\" is_lsan=true\"\nfi\nif [[ $${ENVOY_UBSAN_VPTR-} == \"1\" ]]; then\n  WEE8_BUILD_ARGS+=\" is_ubsan=true\"\n  WEE8_BUILD_ARGS+=\" is_ubsan_vptr=true\"\nfi\nif [[ $${ENVOY_MSAN-} == \"1\" ]]; then\n  WEE8_BUILD_ARGS+=\" is_msan=true\"\n  export LDFLAGS=\"$${LDFLAGS} -L/opt/libcxx_msan/lib -Wl,-rpath,/opt/libcxx_msan/lib\"\nfi\nif [[ $${ENVOY_TSAN-} == \"1\" ]]; then\n  WEE8_BUILD_ARGS+=\" is_tsan=true\"\nfi\n\n# Debug/release build.\nif [[ $(COMPILATION_MODE) == \"dbg\" && $${ENVOY_UBSAN_VPTR-} != \"1\" && $${ENVOY_MSAN-} != \"1\" && $${ENVOY_TSAN-} != \"1\" ]]; then\n  WEE8_BUILD_ARGS+=\" is_debug=true\"\n  WEE8_BUILD_ARGS+=\" v8_symbol_level=2\"\n  WEE8_BUILD_ARGS+=\" v8_optimized_debug=false\"\nelse\n  WEE8_BUILD_ARGS+=\" is_debug=false\"\n  WEE8_BUILD_ARGS+=\" v8_symbol_level=1\"\n  WEE8_BUILD_ARGS+=\" v8_enable_handle_zapping=false\"\nfi\n\n# Clang or not Clang, that is the question.\nWEE8_BUILD_ARGS+=\" is_clang=$$IS_CLANG\"\n# Hack to disable bleeding-edge compiler flags.\nWEE8_BUILD_ARGS+=\" use_xcode_clang=true\"\n# Use local toolchain.\nWEE8_BUILD_ARGS+=\" custom_toolchain=\\\"//build/toolchain/linux/unbundle:default\\\"\"\n# Use local stdlibc++ / libc++.\nWEE8_BUILD_ARGS+=\" use_custom_libcxx=false\"\n# Use local sysroot.\nWEE8_BUILD_ARGS+=\" use_sysroot=false\"\n# Disable unused GLib2 dependency.\nWEE8_BUILD_ARGS+=\" use_glib=false\"\n# Expose debug symbols.\nWEE8_BUILD_ARGS+=\" v8_expose_symbols=true\"\n# Build monolithic library.\nWEE8_BUILD_ARGS+=\" is_component_build=false\"\nWEE8_BUILD_ARGS+=\" v8_enable_i18n_support=false\"\nWEE8_BUILD_ARGS+=\" v8_enable_gdbjit=false\"\nWEE8_BUILD_ARGS+=\" v8_use_external_startup_data=false\"\n# Disable read-only heap, since it's leaky and HEAPCHECK complains about it.\n# TODO(PiotrSikora): remove when fixed upstream.\nWEE8_BUILD_ARGS+=\" v8_enable_shared_ro_heap=false\"\n# Support Arm64\nif [[ `uname -m` == \"aarch64\" ]]; then\n  WEE8_BUILD_ARGS+=\" target_cpu=\\\"arm64\\\"\"\nfi\n\n# Build wee8.\nif [[ -f /etc/centos-release ]] && [[ $$(cat /etc/centos-release) =~ \"CentOS Linux release 7\" ]] && [[ -x \"$$(command -v gn)\" ]]; then\n  # Using system default gn tools\n  # This is done only for CentOS 7, as it has an old version of GLIBC which is otherwise incompatible\n  gn=$$(command -v gn)\nelif [[ \"$$(uname -s)\" == \"Darwin\" ]]; then\n  gn=buildtools/mac/gn\nelif [[ \"$$(uname -s)-$$(uname -m)\" == \"Linux-x86_64\" ]]; then\n  gn=buildtools/linux64/gn\nelse\n  # Using system default gn tools\n  gn=$$(command -v gn)\nfi\n\nif [[ \"$$(uname -s)\" == \"Darwin\" ]]; then\n  ninja=third_party/depot_tools/ninja\nelif [[ \"$$(uname -s)-$$(uname -m)\" == \"Linux-x86_64\" ]]; then\n  ninja=third_party/depot_tools/ninja\nelse\n  # Using system default ninja tools\n  ninja=$$(command -v ninja)\nfi\n\n\"$$gn\" gen out/wee8 --args=\"$$WEE8_BUILD_ARGS\"\n\"$$ninja\" -C out/wee8 wee8\n\n# Move compiled library to the expected destinations.\npopd\nmv $$ROOT/wee8/out/wee8/obj/libwee8.a $(execpath libwee8.a)\n"
  },
  {
    "path": "bazel/external/wee8.patch",
    "content": "# 1. Fix linking with unbundled toolchain on macOS.\n# 2. Increase VSZ limit to 4TiB (allows us to start up to 409 VMs).\n# 3. Fix MSAN linking.\n--- wee8/build/toolchain/gcc_toolchain.gni\n+++ wee8/build/toolchain/gcc_toolchain.gni\n@@ -329,6 +329,8 @@ template(\"gcc_toolchain\") {\n         # AIX does not support either -D (deterministic output) or response\n         # files.\n         command = \"$ar -X64 {{arflags}} -r -c -s {{output}} {{inputs}}\"\n+      } else if (current_os == \"mac\") {\n+        command = \"\\\"$ar\\\" {{arflags}} -r -c -s {{output}} {{inputs}}\"\n       } else {\n         rspfile = \"{{output}}.rsp\"\n         rspfile_content = \"{{inputs}}\"\n@@ -507,7 +509,7 @@ template(\"gcc_toolchain\") {\n\n       start_group_flag = \"\"\n       end_group_flag = \"\"\n-      if (current_os != \"aix\") {\n+      if (current_os != \"aix\" && current_os != \"mac\") {\n         # the \"--start-group .. --end-group\" feature isn't available on the aix ld.\n         start_group_flag = \"-Wl,--start-group\"\n         end_group_flag = \"-Wl,--end-group \"\n--- wee8/src/objects/backing-store.cc\n+++ wee8/src/objects/backing-store.cc\n@@ -34,7 +34,7 @@ constexpr bool kUseGuardRegions = false;\n // address space limits needs to be smaller.\n constexpr size_t kAddressSpaceLimit = 0x8000000000L;  // 512 GiB\n #elif V8_TARGET_ARCH_64_BIT\n-constexpr size_t kAddressSpaceLimit = 0x10100000000L;  // 1 TiB + 4 GiB\n+constexpr size_t kAddressSpaceLimit = 0x40100000000L;  // 4 TiB + 4 GiB\n #else\n constexpr size_t kAddressSpaceLimit = 0xC0000000;  // 3 GiB\n #endif\n--- wee8/build/config/sanitizers/sanitizers.gni\n+++ wee8/build/config/sanitizers/sanitizers.gni\n@@ -150,7 +150,7 @@ if (!is_a_target_toolchain) {\n # standard system libraries. We have instrumented system libraries for msan,\n # which requires them to prevent false positives.\n # TODO(thakis): Maybe remove this variable.\n-use_prebuilt_instrumented_libraries = is_msan\n+use_prebuilt_instrumented_libraries = false\n\n # Whether we are doing a fuzzer build. Normally this should be checked instead\n # of checking \"use_libfuzzer || use_afl\" because often developers forget to\n@@ -198,8 +198,7 @@ assert(!using_sanitizer || is_clang,\n assert(!is_cfi || is_clang,\n        \"is_cfi requires setting is_clang = true in 'gn args'\")\n\n-prebuilt_instrumented_libraries_available =\n-    is_msan && (msan_track_origins == 0 || msan_track_origins == 2)\n+prebuilt_instrumented_libraries_available = false\n\n if (use_libfuzzer && is_linux) {\n   if (is_asan) {\n"
  },
  {
    "path": "bazel/external/xxhash.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_library(\n    name = \"xxhash\",\n    srcs = [\"xxhash.c\"],\n    hdrs = [\n        \"xxh3.h\",\n        \"xxhash.h\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "bazel/foreign_cc/BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_cmake_external\", \"envoy_package\")\nload(\"@rules_foreign_cc//tools/build_defs:configure.bzl\", \"configure_make\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\n# autotools packages are unusable on Windows as-is\n# TODO: Consider our own gperftools.BUILD file as we do with many other packages\nconfigure_make(\n    name = \"gperftools_build\",\n    configure_options = [\n        \"--enable-shared=no\",\n        \"--enable-frame-pointers\",\n        \"--disable-libunwind\",\n    ] + select({\n        \"//bazel:apple\": [\"AR=/usr/bin/ar\"],\n        \"//conditions:default\": [],\n    }),\n    lib_source = \"@com_github_gperftools_gperftools//:all\",\n    linkopts = [\"-lpthread\"],\n    make_commands = [\"make install-libLTLIBRARIES install-perftoolsincludeHEADERS\"],\n    static_libraries = select({\n        \"//bazel:debug_tcmalloc\": [\"libtcmalloc_debug.a\"],\n        \"//conditions:default\": [\"libtcmalloc_and_profiler.a\"],\n    }),\n    tags = [\"skip_on_windows\"],\n)\n\n# Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/227\ncc_library(\n    name = \"gperftools\",\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"gperftools_build\",\n    ],\n)\n\nconfigure_make(\n    name = \"luajit\",\n    configure_command = \"build.py\",\n    configure_env_vars = select({\n        # This shouldn't be needed! See\n        # https://github.com/envoyproxy/envoy/issues/6084\n        # TODO(htuch): Remove when #6084 is fixed\n        \"//bazel:asan_build\": {\"ENVOY_CONFIG_ASAN\": \"1\"},\n        \"//bazel:msan_build\": {\"ENVOY_CONFIG_MSAN\": \"1\"},\n        \"//bazel:windows_dbg_build\": {\"WINDOWS_DBG_BUILD\": \"debug\"},\n        \"//conditions:default\": {},\n    }),\n    lib_source = \"@com_github_luajit_luajit//:all\",\n    make_commands = [],\n    out_include_dir = \"include/luajit-2.1\",\n    static_libraries = select({\n        \"//bazel:windows_x86_64\": [\"lua51.lib\"],\n        \"//conditions:default\": [\"libluajit-5.1.a\"],\n    }),\n)\n\nconfigure_make(\n    name = \"moonjit\",\n    configure_command = \"build.py\",\n    configure_env_vars = select({\n        # This shouldn't be needed! See\n        # https://github.com/envoyproxy/envoy/issues/6084\n        # TODO(htuch): Remove when #6084 is fixed\n        \"//bazel:asan_build\": {\"ENVOY_CONFIG_ASAN\": \"1\"},\n        \"//bazel:msan_build\": {\"ENVOY_CONFIG_MSAN\": \"1\"},\n        \"//bazel:windows_dbg_build\": {\"WINDOWS_DBG_BUILD\": \"debug\"},\n        \"//conditions:default\": {},\n    }),\n    lib_source = \"@com_github_moonjit_moonjit//:all\",\n    make_commands = [],\n    out_include_dir = \"include/moonjit-2.2\",\n    static_libraries = select({\n        \"//bazel:windows_x86_64\": [\"lua51.lib\"],\n        \"//conditions:default\": [\"libluajit-5.1.a\"],\n    }),\n)\n\nenvoy_cmake_external(\n    name = \"ares\",\n    cache_entries = {\n        \"CARES_SHARED\": \"no\",\n        \"CARES_STATIC\": \"on\",\n        \"CMAKE_CXX_COMPILER_FORCED\": \"on\",\n        \"CMAKE_INSTALL_LIBDIR\": \"lib\",\n    },\n    defines = [\"CARES_STATICLIB\"],\n    lib_source = \"@com_github_c_ares_c_ares//:all\",\n    linkopts = select({\n        \"//bazel:apple\": [\"-lresolv\"],\n        \"//conditions:default\": [],\n    }),\n    postfix_script = select({\n        \"//bazel:windows_x86_64\": \"cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/nameser.h $INSTALLDIR/include/nameser.h\",\n        \"//conditions:default\": \"\",\n    }),\n    static_libraries = select({\n        \"//bazel:windows_x86_64\": [\"cares.lib\"],\n        \"//conditions:default\": [\"libcares.a\"],\n    }),\n)\n\nenvoy_cmake_external(\n    name = \"curl\",\n    cache_entries = {\n        \"BUILD_CURL_EXE\": \"off\",\n        \"BUILD_TESTING\": \"off\",\n        \"BUILD_SHARED_LIBS\": \"off\",\n        \"CURL_HIDDEN_SYMBOLS\": \"off\",\n        \"CMAKE_USE_LIBSSH2\": \"off\",\n        \"CURL_BROTLI\": \"off\",\n        \"CMAKE_USE_GSSAPI\": \"off\",\n        \"HTTP_ONLY\": \"on\",\n        \"CMAKE_INSTALL_LIBDIR\": \"lib\",\n        # Explicitly enable Unix sockets, once afunix.h is correctly detected\n        # \"USE_UNIX_SOCKETS\": \"on\",\n        # Explicitly disable \"Windows\" crypto for Windows\n        \"CURL_DISABLE_CRYPTO_AUTH\": \"on\",\n        # C-Ares.\n        \"ENABLE_ARES\": \"on\",\n        \"CARES_LIBRARY\": \"$EXT_BUILD_DEPS/ares\",\n        \"CARES_INCLUDE_DIR\": \"$EXT_BUILD_DEPS/ares/include\",\n        # SSL (via Envoy's SSL dependency) is disabled, curl's CMake uses\n        # FindOpenSSL.cmake which fails at what looks like version parsing\n        # (the libraries are found ok).\n        \"CURL_CA_PATH\": \"none\",\n        \"CMAKE_USE_OPENSSL\": \"off\",\n        \"OPENSSL_ROOT_DIR\": \"$EXT_BUILD_DEPS\",\n        # NGHTTP2.\n        \"USE_NGHTTP2\": \"on\",\n        \"NGHTTP2_LIBRARY\": \"$EXT_BUILD_DEPS/nghttp2\",\n        \"NGHTTP2_INCLUDE_DIR\": \"$EXT_BUILD_DEPS/nghttp2/include\",\n        # ZLIB.\n        \"CURL_ZLIB\": \"on\",\n        \"ZLIB_LIBRARY\": \"$EXT_BUILD_DEPS/zlib\",\n        \"ZLIB_INCLUDE_DIR\": \"$EXT_BUILD_DEPS/zlib/include\",\n        \"CMAKE_CXX_COMPILER_FORCED\": \"on\",\n        \"CMAKE_C_FLAGS_BAZEL\": \"-fPIC\",\n        # Note we use Bazel's flags (not _RELEASE/_DEBUG CMake flags), but this toggle\n        # also works around a bug in CMP0091 logic which re-injected a badly placed -M flag.\n        # See https://github.com/bazelbuild/rules_foreign_cc/issues/426\n        \"CURL_STATIC_CRT\": \"on\",\n    },\n    defines = [\"CURL_STATICLIB\"],\n    generate_crosstool_file = True,\n    lib_source = \"@com_github_curl//:all\",\n    static_libraries = select({\n        \"//bazel:windows_x86_64\": [\"libcurl.lib\"],\n        \"//conditions:default\": [\"libcurl.a\"],\n    }),\n    deps = [\n        \":ares\",\n        \":nghttp2\",\n        \"//external:ssl\",\n        \"//external:zlib\",\n    ],\n)\n\nenvoy_cmake_external(\n    name = \"event\",\n    cache_entries = {\n        \"EVENT__DISABLE_OPENSSL\": \"on\",\n        \"EVENT__DISABLE_MBEDTLS\": \"on\",\n        \"EVENT__DISABLE_REGRESS\": \"on\",\n        \"EVENT__DISABLE_TESTS\": \"on\",\n        \"EVENT__LIBRARY_TYPE\": \"STATIC\",\n        # Force _GNU_SOURCE on for Android builds. This would be contained in\n        # a 'select' but the downstream macro uses a select on all of these\n        # options, and they cannot be nested.\n        # If https://github.com/bazelbuild/rules_foreign_cc/issues/289 is fixed\n        # this can be removed.\n        # More details https://github.com/lyft/envoy-mobile/issues/116\n        \"_GNU_SOURCE\": \"on\",\n    },\n    lib_source = \"@com_github_libevent_libevent//:all\",\n    static_libraries = select({\n        # macOS organization of libevent is different from Windows/Linux.\n        # Including libevent_core is a requirement on those platforms, but\n        # results in duplicate symbols when built on macOS.\n        # See https://github.com/lyft/envoy-mobile/issues/677 for details.\n        \"//bazel:apple\": [\n            \"libevent.a\",\n            \"libevent_pthreads.a\",\n        ],\n        \"//bazel:windows_x86_64\": [\n            \"event.lib\",\n            \"event_core.lib\",\n        ],\n        \"//conditions:default\": [\n            \"libevent.a\",\n            \"libevent_pthreads.a\",\n            \"libevent_core.a\",\n        ],\n    }),\n)\n\nenvoy_cmake_external(\n    name = \"llvm\",\n    cache_entries = {\n        # Disable both: BUILD and INCLUDE, since some of the INCLUDE\n        # targets build code instead of only generating build files.\n        \"LLVM_BUILD_DOCS\": \"off\",\n        \"LLVM_INCLUDE_DOCS\": \"off\",\n        \"LLVM_BUILD_EXAMPLES\": \"off\",\n        \"LLVM_INCLUDE_EXAMPLES\": \"off\",\n        \"LLVM_BUILD_RUNTIME\": \"off\",\n        \"LLVM_BUILD_RUNTIMES\": \"off\",\n        \"LLVM_INCLUDE_RUNTIMES\": \"off\",\n        \"LLVM_BUILD_TESTS\": \"off\",\n        \"LLVM_INCLUDE_TESTS\": \"off\",\n        \"LLVM_BUILD_TOOLS\": \"off\",\n        \"LLVM_INCLUDE_TOOLS\": \"off\",\n        \"LLVM_BUILD_UTILS\": \"off\",\n        \"LLVM_INCLUDE_UTILS\": \"off\",\n        \"LLVM_ENABLE_LIBEDIT\": \"off\",\n        \"LLVM_ENABLE_LIBXML2\": \"off\",\n        \"LLVM_ENABLE_TERMINFO\": \"off\",\n        \"LLVM_ENABLE_ZLIB\": \"off\",\n        \"LLVM_TARGETS_TO_BUILD\": \"X86\",\n        \"CMAKE_CXX_COMPILER_FORCED\": \"on\",\n        # Workaround for the issue with statically linked libstdc++\n        # using -l:libstdc++.a.\n        \"CMAKE_CXX_FLAGS\": \"-lstdc++\",\n    },\n    env_vars = {\n        # Workaround for the -DDEBUG flag added in fastbuild on macOS,\n        # which conflicts with DEBUG macro used in LLVM.\n        \"CFLAGS\": \"-UDEBUG\",\n        \"CXXFLAGS\": \"-UDEBUG\",\n        \"ASMFLAGS\": \"-UDEBUG\",\n    },\n    lib_source = \"@org_llvm_llvm//:all\",\n    static_libraries = select({\n        \"//conditions:default\": [\n            # Order from llvm-config --libnames.\n            \"libLLVMLTO.a\",\n            \"libLLVMPasses.a\",\n            \"libLLVMObjCARCOpts.a\",\n            \"libLLVMSymbolize.a\",\n            \"libLLVMDebugInfoPDB.a\",\n            \"libLLVMDebugInfoDWARF.a\",\n            \"libLLVMFuzzMutate.a\",\n            \"libLLVMTableGen.a\",\n            \"libLLVMDlltoolDriver.a\",\n            \"libLLVMLineEditor.a\",\n            \"libLLVMOrcJIT.a\",\n            \"libLLVMCoverage.a\",\n            \"libLLVMMIRParser.a\",\n            \"libLLVMObjectYAML.a\",\n            \"libLLVMLibDriver.a\",\n            \"libLLVMOption.a\",\n            \"libLLVMWindowsManifest.a\",\n            \"libLLVMX86Disassembler.a\",\n            \"libLLVMX86AsmParser.a\",\n            \"libLLVMX86CodeGen.a\",\n            \"libLLVMGlobalISel.a\",\n            \"libLLVMSelectionDAG.a\",\n            \"libLLVMAsmPrinter.a\",\n            \"libLLVMDebugInfoCodeView.a\",\n            \"libLLVMDebugInfoMSF.a\",\n            \"libLLVMX86Desc.a\",\n            \"libLLVMMCDisassembler.a\",\n            \"libLLVMX86Info.a\",\n            \"libLLVMX86Utils.a\",\n            \"libLLVMMCJIT.a\",\n            \"libLLVMInterpreter.a\",\n            \"libLLVMExecutionEngine.a\",\n            \"libLLVMRuntimeDyld.a\",\n            \"libLLVMCodeGen.a\",\n            \"libLLVMTarget.a\",\n            \"libLLVMCoroutines.a\",\n            \"libLLVMipo.a\",\n            \"libLLVMInstrumentation.a\",\n            \"libLLVMVectorize.a\",\n            \"libLLVMScalarOpts.a\",\n            \"libLLVMLinker.a\",\n            \"libLLVMIRReader.a\",\n            \"libLLVMAsmParser.a\",\n            \"libLLVMInstCombine.a\",\n            \"libLLVMTransformUtils.a\",\n            \"libLLVMBitWriter.a\",\n            \"libLLVMAnalysis.a\",\n            \"libLLVMProfileData.a\",\n            \"libLLVMObject.a\",\n            \"libLLVMMCParser.a\",\n            \"libLLVMMC.a\",\n            \"libLLVMBitReader.a\",\n            \"libLLVMBitstreamReader.a\",\n            \"libLLVMCore.a\",\n            \"libLLVMBinaryFormat.a\",\n            \"libLLVMSupport.a\",\n            \"libLLVMDemangle.a\",\n            \"libLLVMRemarks.a\",\n            \"libLLVMCFGuard.a\",\n            \"libLLVMTextAPI.a\",\n        ],\n    }),\n)\n\nenvoy_cmake_external(\n    name = \"nghttp2\",\n    cache_entries = {\n        \"ENABLE_LIB_ONLY\": \"on\",\n        \"ENABLE_SHARED_LIB\": \"off\",\n        \"ENABLE_STATIC_LIB\": \"on\",\n        \"CMAKE_INSTALL_LIBDIR\": \"lib\",\n        \"CMAKE_CXX_COMPILER_FORCED\": \"on\",\n    },\n    cmake_files_dir = \"$BUILD_TMPDIR/lib/CMakeFiles\",\n    debug_cache_entries = {\"ENABLE_DEBUG\": \"on\"},\n    defines = [\"NGHTTP2_STATICLIB\"],\n    lib_source = \"@com_github_nghttp2_nghttp2//:all\",\n    static_libraries = select({\n        \"//bazel:windows_x86_64\": [\"nghttp2.lib\"],\n        \"//conditions:default\": [\"libnghttp2.a\"],\n    }),\n)\n\nenvoy_cmake_external(\n    name = \"wavm\",\n    binaries = [\"wavm\"],\n    cache_entries = {\n        \"LLVM_DIR\": \"$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm\",\n        \"WAVM_ENABLE_STATIC_LINKING\": \"on\",\n        \"WAVM_ENABLE_RELEASE_ASSERTS\": \"on\",\n        \"WAVM_ENABLE_UNWIND\": \"no\",\n        # Workaround for the issue with statically linked libstdc++\n        # using -l:libstdc++.a.\n        \"CMAKE_CXX_FLAGS\": \"-lstdc++ -Wno-unused-command-line-argument\",\n    },\n    defines = [\"ENVOY_WASM_WAVM\"],\n    env_vars = {\n        # Workaround for the -DDEBUG flag added in fastbuild on macOS,\n        # which conflicts with DEBUG macro used in LLVM.\n        \"CFLAGS\": \"-UDEBUG\",\n        \"CXXFLAGS\": \"-UDEBUG\",\n        \"ASMFLAGS\": \"-UDEBUG\",\n    },\n    lib_source = \"@com_github_wavm_wavm//:all\",\n    static_libraries = select({\n        \"//conditions:default\": [\n            \"libWAVM.a\",\n        ],\n    }),\n    deps = [\":llvm\"],\n)\n\nenvoy_cmake_external(\n    name = \"zlib\",\n    cache_entries = {\n        \"BUILD_SHARED_LIBS\": \"off\",\n        \"CMAKE_CXX_COMPILER_FORCED\": \"on\",\n        \"CMAKE_C_COMPILER_FORCED\": \"on\",\n        \"SKIP_BUILD_EXAMPLES\": \"on\",\n\n        # The following entries are for zlib-ng. Since zlib and zlib-ng are compatible source\n        # codes and CMake ignores unknown cache entries, it is fine to combine it into one\n        # dictionary.\n        #\n        # Reference: https://github.com/zlib-ng/zlib-ng#build-options.\n        \"ZLIB_COMPAT\": \"on\",\n        \"ZLIB_ENABLE_TESTS\": \"off\",\n\n        # Warning: Turning WITH_OPTIM to \"on\" doesn't pass ZlibCompressorImplTest.CallingChecksum.\n        \"WITH_OPTIM\": \"on\",\n        # However turning off SSE4 fixes it.\n        \"WITH_SSE4\": \"off\",\n\n        # Warning: Turning WITH_NEW_STRATEGIES to \"on\" doesn't pass gzip compressor fuzz test.\n        # Turning this off means falling into NO_QUICK_STRATEGY route.\n        \"WITH_NEW_STRATEGIES\": \"off\",\n\n        # Only allow aligned address.\n        # Reference: https://github.com/zlib-ng/zlib-ng#advanced-build-options.\n        \"UNALIGNED_OK\": \"off\",\n    },\n    lib_source = select({\n        \"//bazel:zlib_ng\": \"@com_github_zlib_ng_zlib_ng//:all\",\n        \"//conditions:default\": \"@net_zlib//:all\",\n    }),\n    static_libraries = select({\n        \"//bazel:windows_x86_64\": [\"zlibstatic.lib\"],\n        \"//conditions:default\": [\"libz.a\"],\n    }),\n)\n"
  },
  {
    "path": "bazel/foreign_cc/llvm.patch",
    "content": "# Workaround for Envoy's CMAKE_BUILD_TYPE=Bazel.\n--- a/CMakeLists.txt\n+++ b/CMakeLists.txt\n@@ -247,7 +247,7 @@\n string(TOUPPER \"${CMAKE_BUILD_TYPE}\" uppercase_CMAKE_BUILD_TYPE)\n \n if (CMAKE_BUILD_TYPE AND\n-    NOT uppercase_CMAKE_BUILD_TYPE MATCHES \"^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL)$\")\n+    NOT uppercase_CMAKE_BUILD_TYPE MATCHES \"^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL|BAZEL)$\")\n   message(FATAL_ERROR \"Invalid value for CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}\")\n endif()\n \n# Workaround for a missing -fuse-ld flag in CXXFLAGS, which results in\n# different linkers being used during configure and compilation phases.\n--- a/cmake/modules/HandleLLVMOptions.cmake\n+++ b/cmake/modules/HandleLLVMOptions.cmake\n@@ -718,8 +718,6 @@ endif()\n if (UNIX AND CMAKE_GENERATOR STREQUAL \"Ninja\")\n   include(CheckLinkerFlag)\n   check_linker_flag(\"-Wl,--color-diagnostics\" LINKER_SUPPORTS_COLOR_DIAGNOSTICS)\n-  append_if(LINKER_SUPPORTS_COLOR_DIAGNOSTICS \"-Wl,--color-diagnostics\"\n-    CMAKE_EXE_LINKER_FLAGS CMAKE_MODULE_LINKER_FLAGS CMAKE_SHARED_LINKER_FLAGS)\n endif()\n \n # Add flags for add_dead_strip().\n"
  },
  {
    "path": "bazel/foreign_cc/luajit.patch",
    "content": "diff --git a/src/Makefile b/src/Makefile\nindex f56465d..5d91fa7 100644\n--- a/src/Makefile\n+++ b/src/Makefile\n@@ -27,7 +27,7 @@ NODOTABIVER= 51\n DEFAULT_CC = gcc\n #\n # LuaJIT builds as a native 32 or 64 bit binary by default.\n-CC= $(DEFAULT_CC)\n+CC ?= $(DEFAULT_CC)\n #\n # Use this if you want to force a 32 bit build on a 64 bit multilib OS.\n #CC= $(DEFAULT_CC) -m32\n@@ -71,10 +71,10 @@ CCWARN= -Wall\n # as dynamic mode.\n #\n # Mixed mode creates a static + dynamic library and a statically linked luajit.\n-BUILDMODE= mixed\n+#BUILDMODE= mixed\n #\n # Static mode creates a static library and a statically linked luajit.\n-#BUILDMODE= static\n+BUILDMODE= static\n #\n # Dynamic mode creates a dynamic library and a dynamically linked luajit.\n # Note: this executable will only run when the library is installed!\n@@ -99,7 +99,7 @@ XCFLAGS=\n # enabled by default. Some other features that *might* break some existing\n # code (e.g. __pairs or os.execute() return values) can be enabled here.\n # Note: this does not provide full compatibility with Lua 5.2 at this time.\n-#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT\n+XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT\n #\n # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter.\n #XCFLAGS+= -DLUAJIT_DISABLE_JIT\n@@ -111,7 +111,7 @@ XCFLAGS=\n #XCFLAGS+= -DLUAJIT_NUMMODE=2\n #\n # Enable GC64 mode for x64.\n-#XCFLAGS+= -DLUAJIT_ENABLE_GC64\n+XCFLAGS+= -DLUAJIT_ENABLE_GC64\n #\n ##############################################################################\n\n@@ -587,7 +587,7 @@ endif\n\n Q= @\n E= @echo\n-#Q=\n+Q=\n #E= @:\n\n ##############################################################################\nEOF\n--- a/src/msvcbuild.bat\t2020-08-13 18:42:05.667354300 +0000\n+++ b/src/msvcbuild.bat\t2020-08-13 19:03:25.092297900 +0000\n@@ -14,7 +14,7 @@\n @if not defined INCLUDE goto :FAIL\n \n @setlocal\n-@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline\n+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT\n @set LJLINK=link /nologo\n @set LJMT=mt /nologo\n @set LJLIB=lib /nologo /nodefaultlib\n@@ -25,7 +25,7 @@\n @set LJLIBNAME=lua51.lib\n @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c\n \n-%LJCOMPILE% host\\minilua.c\n+%LJCOMPILE% /O2 host\\minilua.c\n @if errorlevel 1 goto :BAD\n %LJLINK% /out:minilua.exe minilua.obj\n @if errorlevel 1 goto :BAD\n@@ -48,7 +48,7 @@\n minilua %DASM% -LN %DASMFLAGS% -o host\\buildvm_arch.h %DASC%\n @if errorlevel 1 goto :BAD\n \n-%LJCOMPILE% /I \".\" /I %DASMDIR% host\\buildvm*.c\n+%LJCOMPILE% /O2 /I \".\" /I %DASMDIR% host\\buildvm*.c\n @if errorlevel 1 goto :BAD\n %LJLINK% /out:buildvm.exe buildvm*.obj\n @if errorlevel 1 goto :BAD\n@@ -72,24 +72,35 @@\n \n @if \"%1\" neq \"debug\" goto :NODEBUG\n @shift\n-@set LJCOMPILE=%LJCOMPILE% /Zi\n+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7\n @set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no\n+@set LJCRTDBG=d\n+@goto :ENDDEBUG\n :NODEBUG\n+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7\n+@set LJLINK=%LJLINK% /release /incremental:no\n+@set LJCRTDBG=\n+:ENDDEBUG\n @if \"%1\"==\"amalg\" goto :AMALGDLL\n @if \"%1\"==\"static\" goto :STATIC\n-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c\n+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% \n+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c\n @if errorlevel 1 goto :BAD\n %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj\n @if errorlevel 1 goto :BAD\n @goto :MTDLL\n :STATIC\n+@shift\n+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG%\n %LJCOMPILE% lj_*.c lib_*.c\n @if errorlevel 1 goto :BAD\n %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj\n @if errorlevel 1 goto :BAD\n @goto :MTDLL\n :AMALGDLL\n-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c\n+@shift\n+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% \n+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c\n @if errorlevel 1 goto :BAD\n %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj\n @if errorlevel 1 goto :BAD\ndiff --git a/build.py b/build.py\nnew file mode 100755\nindex 0000000..9c71271\n--- /dev/null\n+++ b/build.py\n@@ -0,0 +1,56 @@\n+#!/usr/bin/env python3\n+\n+import argparse\n+import os\n+import shutil\n+\n+def main():\n+    parser = argparse.ArgumentParser()\n+    parser.add_argument(\"--prefix\")\n+    args = parser.parse_args()\n+    src_dir = os.path.dirname(os.path.realpath(__file__))\n+    shutil.copytree(src_dir, os.path.basename(src_dir))\n+    os.chdir(os.path.basename(src_dir))\n+\n+    os.environ[\"MACOSX_DEPLOYMENT_TARGET\"] = \"10.6\"\n+    os.environ[\"DEFAULT_CC\"] = os.environ.get(\"CC\", \"\")\n+    os.environ[\"TARGET_CFLAGS\"] = os.environ.get(\"CFLAGS\", \"\") + \" -fno-function-sections -fno-data-sections\"\n+    os.environ[\"TARGET_LDFLAGS\"] = os.environ.get(\"CFLAGS\", \"\") + \" -fno-function-sections -fno-data-sections\"\n+    os.environ[\"CFLAGS\"] = \"\"\n+    # LuaJIT compile process build a tool `buildvm` and use it, building `buildvm` with ASAN\n+    # will cause LSAN detect its leak and fail the build, set exitcode to 0 to make LSAN doesn't\n+    # fail on it.\n+    os.environ[\"LSAN_OPTIONS\"] = \"exitcode=0\"\n+\n+    if \"ENVOY_MSAN\" in os.environ:\n+      os.environ[\"HOST_CFLAGS\"] = \"-fno-sanitize=memory\"\n+      os.environ[\"HOST_LDFLAGS\"] = \"-fno-sanitize=memory\"\n+\n+    # Remove LuaJIT from ASAN for now.\n+    # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved.\n+    if \"ENVOY_CONFIG_ASAN\" in os.environ or \"ENVOY_CONFIG_MSAN\" in os.environ:\n+      os.environ[\"TARGET_CFLAGS\"] += \" -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blocklist.txt\" % os.environ[\"PWD\"]\n+      with open(\"clang-asan-blocklist.txt\", \"w\") as f:\n+        f.write(\"fun:*\\n\")\n+\n+    os.system('make -j{} V=1 PREFIX=\"{}\" install'.format(os.cpu_count(), args.prefix))\n+\n+def win_main():\n+    src_dir = os.path.dirname(os.path.realpath(__file__))\n+    dst_dir = os.getcwd() + \"/luajit\"\n+    shutil.copytree(src_dir, os.path.basename(src_dir))\n+    os.chdir(os.path.basename(src_dir) + \"/src\")\n+    os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static')\n+    os.makedirs(dst_dir + \"/lib\", exist_ok=True)\n+    shutil.copy(\"lua51.lib\", dst_dir + \"/lib\")\n+    os.makedirs(dst_dir + \"/include/luajit-2.1\", exist_ok=True)\n+    for header in [\"lauxlib.h\", \"luaconf.h\", \"lua.h\", \"lua.hpp\", \"luajit.h\", \"lualib.h\"]:\n+      shutil.copy(header, dst_dir + \"/include/luajit-2.1\")\n+    os.makedirs(dst_dir + \"/bin\", exist_ok=True)\n+    shutil.copy(\"luajit.exe\", dst_dir + \"/bin\")\n+\n+if os.name == 'nt':\n+  win_main()\n+else:\n+  main()\n+\n"
  },
  {
    "path": "bazel/foreign_cc/moonjit.patch",
    "content": "diff --git a/build.py b/build.py\nnew file mode 100644\nindex 00000000..dab3606c\n--- /dev/null\n+++ b/build.py\n@@ -0,0 +1,56 @@\n+#!/usr/bin/env python3\n+\n+import argparse\n+import os\n+import shutil\n+\n+def main():\n+    parser = argparse.ArgumentParser()\n+    parser.add_argument(\"--prefix\")\n+    args = parser.parse_args()\n+    src_dir = os.path.dirname(os.path.realpath(__file__))\n+    shutil.copytree(src_dir, os.path.basename(src_dir))\n+    os.chdir(os.path.basename(src_dir))\n+\n+    os.environ[\"MACOSX_DEPLOYMENT_TARGET\"] = \"10.6\"\n+    os.environ[\"DEFAULT_CC\"] = os.environ.get(\"CC\", \"\")\n+    os.environ[\"TARGET_CFLAGS\"] = os.environ.get(\"CFLAGS\", \"\") + \" -fno-function-sections -fno-data-sections\"\n+    os.environ[\"TARGET_LDFLAGS\"] = os.environ.get(\"CFLAGS\", \"\") + \" -fno-function-sections -fno-data-sections\"\n+    os.environ[\"CFLAGS\"] = \"\"\n+    # LuaJIT compile process build a tool `buildvm` and use it, building `buildvm` with ASAN\n+    # will cause LSAN detect its leak and fail the build, set exitcode to 0 to make LSAN doesn't\n+    # fail on it.\n+    os.environ[\"LSAN_OPTIONS\"] = \"exitcode=0\"\n+\n+    if \"ENVOY_MSAN\" in os.environ:\n+      os.environ[\"HOST_CFLAGS\"] = \"-fno-sanitize=memory\"\n+      os.environ[\"HOST_LDFLAGS\"] = \"-fno-sanitize=memory\"\n+\n+    # Remove LuaJIT from ASAN for now.\n+    # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved.\n+    if \"ENVOY_CONFIG_ASAN\" in os.environ or \"ENVOY_CONFIG_MSAN\" in os.environ:\n+      os.environ[\"TARGET_CFLAGS\"] += \" -fsanitize-blacklist=%s/com_github_moonjit_moonjit/clang-asan-blocklist.txt\" % os.environ[\"PWD\"]\n+      with open(\"clang-asan-blocklist.txt\", \"w\") as f:\n+        f.write(\"fun:*\\n\")\n+\n+    os.system('make -j{} V=1 PREFIX=\"{}\" install'.format(os.cpu_count(), args.prefix))\n+\n+def win_main():\n+    src_dir = os.path.dirname(os.path.realpath(__file__))\n+    dst_dir = os.getcwd() + \"/moonjit\"\n+    shutil.copytree(src_dir, os.path.basename(src_dir))\n+    os.chdir(os.path.basename(src_dir) + \"/src\")\n+    os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static')\n+    os.makedirs(dst_dir + \"/lib\", exist_ok=True)\n+    shutil.copy(\"lua51.lib\", dst_dir + \"/lib\")\n+    os.makedirs(dst_dir + \"/include/moonjit-2.2\", exist_ok=True)\n+    for header in [\"lauxlib.h\", \"luaconf.h\", \"lua.h\", \"lua.hpp\", \"luajit.h\", \"lualib.h\"]:\n+      shutil.copy(header, dst_dir + \"/include/moonjit-2.2\")\n+    os.makedirs(dst_dir + \"/bin\", exist_ok=True)\n+    shutil.copy(\"luajit.exe\", dst_dir + \"/bin\")\n+\n+if os.name == 'nt':\n+  win_main()\n+else:\n+  main()\n+\ndiff --git a/src/Makefile b/src/Makefile\nindex dad9aeec..e10b3118 100644\n--- a/src/Makefile\n+++ b/src/Makefile\n@@ -27,7 +27,7 @@ NODOTABIVER= 51\n DEFAULT_CC = gcc\n #\n # LuaJIT builds as a native 32 or 64 bit binary by default.\n-CC= $(DEFAULT_CC)\n+CC ?= $(DEFAULT_CC)\n #\n # Use this if you want to force a 32 bit build on a 64 bit multilib OS.\n #CC= $(DEFAULT_CC) -m32\n@@ -71,10 +71,10 @@ CCWARN= -Wall\n # as dynamic mode.\n #\n # Mixed mode creates a static + dynamic library and a statically linked luajit.\n-BUILDMODE= mixed\n+#BUILDMODE= mixed\n #\n # Static mode creates a static library and a statically linked luajit.\n-#BUILDMODE= static\n+BUILDMODE= static\n #\n # Dynamic mode creates a dynamic library and a dynamically linked luajit.\n # Note: this executable will only run when the library is installed!\n@@ -99,7 +99,7 @@ XCFLAGS=\n # enabled by default. Some other features that *might* break some existing\n # code (e.g. __pairs or os.execute() return values) can be enabled here.\n # Note: this does not provide full compatibility with Lua 5.2 at this time.\n-#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT\n+XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT\n #\n # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter.\n #XCFLAGS+= -DLUAJIT_DISABLE_JIT\n@@ -612,7 +612,7 @@ endif\n \n Q= @\n E= @echo\n-#Q=\n+Q=\n #E= @:\n \n ##############################################################################\ndiff --git a/src/msvcbuild.bat b/src/msvcbuild.bat\nindex c2d2c212..71f24422 100644\n--- a/src/msvcbuild.bat\n+++ b/src/msvcbuild.bat\n@@ -15,7 +15,7 @@\n @setlocal\r\n @rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK\r\n @set DEBUGCFLAGS=\r\n-@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline\r\n+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT\r\n @set LJLINK=link /nologo\r\n @set LJMT=mt /nologo\r\n @set LJLIB=lib /nologo /nodefaultlib\r\n@@ -24,10 +24,9 @@\n @set DASC=vm_x86.dasc\r\n @set LJDLLNAME=lua51.dll\r\n @set LJLIBNAME=lua51.lib\r\n-@set BUILDTYPE=release\r\n @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_utf8.c\r\n \r\n-%LJCOMPILE% host\\minilua.c\r\n+%LJCOMPILE% /O2 host\\minilua.c\r\n @if errorlevel 1 goto :BAD\r\n %LJLINK% /out:minilua.exe minilua.obj\r\n @if errorlevel 1 goto :BAD\r\n@@ -50,7 +49,7 @@ if exist minilua.exe.manifest^\n minilua %DASM% -LN %DASMFLAGS% -o host\\buildvm_arch.h %DASC%\r\n @if errorlevel 1 goto :BAD\r\n \r\n-%LJCOMPILE% /I \".\" /I %DASMDIR% host\\buildvm*.c\r\n+%LJCOMPILE% /O2 /I \".\" /I %DASMDIR% host\\buildvm*.c\r\n @if errorlevel 1 goto :BAD\r\n %LJLINK% /out:buildvm.exe buildvm*.obj\r\n @if errorlevel 1 goto :BAD\r\n@@ -74,25 +73,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c\n \r\n @if \"%1\" neq \"debug\" goto :NODEBUG\r\n @shift\r\n-@set BUILDTYPE=debug\r\n-@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS%\r\n+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7\r\n+@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no\r\n+@set LJCRTDBG=d\r\n+@goto :ENDDEBUG\r\n :NODEBUG\r\n-@set LJLINK=%LJLINK% /%BUILDTYPE%\r\n+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7\r\n+@set LJLINK=%LJLINK% /release /incremental:no\r\n+@set LJCRTDBG=\r\n+:ENDDEBUG\r\n @if \"%1\"==\"amalg\" goto :AMALGDLL\r\n @if \"%1\"==\"static\" goto :STATIC\r\n-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c\r\n+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%\r\n+LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c\r\n @if errorlevel 1 goto :BAD\r\n %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj\r\n @if errorlevel 1 goto :BAD\r\n @goto :MTDLL\r\n :STATIC\r\n+@shift\r\n+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG%\r\n %LJCOMPILE% lj_*.c lib_*.c\r\n @if errorlevel 1 goto :BAD\r\n %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj\r\n @if errorlevel 1 goto :BAD\r\n @goto :MTDLL\r\n :AMALGDLL\r\n-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c\r\n+@shift\r\n+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%\r\n+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c\r\n @if errorlevel 1 goto :BAD\r\n %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj\r\n @if errorlevel 1 goto :BAD\r\n"
  },
  {
    "path": "bazel/foreign_cc/nghttp2.patch",
    "content": "diff --git a/CMakeLists.txt b/CMakeLists.txt\nindex 35c77d1d..47bd63f5 100644\n--- a/CMakeLists.txt\n+++ b/CMakeLists.txt\n@@ -273,7 +273,11 @@ check_type_size(\"ssize_t\" SIZEOF_SSIZE_T)\n if(SIZEOF_SSIZE_T STREQUAL \"\")\n   # ssize_t is a signed type in POSIX storing at least -1.\n   # Set it to \"int\" to match the behavior of AC_TYPE_SSIZE_T (autotools).\n-  set(ssize_t int)\n+  if(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)\n+    set(ssize_t ptrdiff_t)\n+  else()\n+    set(ssize_t int)\n+  endif()\n endif()\n # AC_TYPE_UINT8_T\n # AC_TYPE_UINT16_T\n# https://github.com/nghttp2/nghttp2/pull/1468\ndiff --git a/lib/nghttp2_buf.c b/lib/nghttp2_buf.c\nindex 2a435bebf..92f97f7f2 100644\n--- a/lib/nghttp2_buf.c\n+++ b/lib/nghttp2_buf.c\n@@ -82,8 +82,10 @@ void nghttp2_buf_reset(nghttp2_buf *buf) {\n }\n \n void nghttp2_buf_wrap_init(nghttp2_buf *buf, uint8_t *begin, size_t len) {\n-  buf->begin = buf->pos = buf->last = buf->mark = begin;\n-  buf->end = begin + len;\n+  buf->begin = buf->pos = buf->last = buf->mark = buf->end = begin;\n+  if (buf->end != NULL) {\n+    buf->end += len;\n+  }\n }\n \n static int buf_chain_new(nghttp2_buf_chain **chain, size_t chunk_length,\ndiff --git a/lib/nghttp2_frame.c b/lib/nghttp2_frame.c\nindex 4821de408..940c723b0 100644\n--- a/lib/nghttp2_frame.c\n+++ b/lib/nghttp2_frame.c\n@@ -818,8 +818,10 @@ int nghttp2_frame_unpack_origin_payload(nghttp2_extension *frame,\n   size_t len = 0;\n \n   origin = frame->payload;\n-  p = payload;\n-  end = p + payloadlen;\n+  p = end = payload;\n+  if (end != NULL) {\n+    end += payloadlen;\n+  }\n \n   for (; p != end;) {\n     if (end - p < 2) {\ndiff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c\nindex 563ccd7de..794f141a1 100644\n--- a/lib/nghttp2_session.c\n+++ b/lib/nghttp2_session.c\n@@ -5349,7 +5349,7 @@ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe,\n \n ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,\n                                  size_t inlen) {\n-  const uint8_t *first = in, *last = in + inlen;\n+  const uint8_t *first = in, *last = in;\n   nghttp2_inbound_frame *iframe = &session->iframe;\n   size_t readlen;\n   ssize_t padlen;\n@@ -5360,6 +5360,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,\n   size_t pri_fieldlen;\n   nghttp2_mem *mem;\n \n+  if (in != NULL) {\n+    last += inlen;\n+  }\n+\n   DEBUGF(\"recv: connection recv_window_size=%d, local_window=%d\\n\",\n          session->recv_window_size, session->local_window_size);\n \n@@ -5389,7 +5393,9 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,\n       }\n \n       iframe->payloadleft -= readlen;\n-      in += readlen;\n+      if (in != NULL) {\n+        in += readlen;\n+      }\n \n       if (iframe->payloadleft == 0) {\n         session_inbound_frame_reset(session);\n"
  },
  {
    "path": "bazel/foreign_cc/zlib.patch",
    "content": "diff --git a/CMakeLists.txt b/CMakeLists.txt\nindex 0fe939d..2f0475a 100644\n--- a/CMakeLists.txt\n+++ b/CMakeLists.txt\n@@ -229,21 +229,22 @@ endif()\n #============================================================================\n # Example binaries\n #============================================================================\n-\n-add_executable(example test/example.c)\n-target_link_libraries(example zlib)\n-add_test(example example)\n-\n-add_executable(minigzip test/minigzip.c)\n-target_link_libraries(minigzip zlib)\n-\n-if(HAVE_OFF64_T)\n-    add_executable(example64 test/example.c)\n-    target_link_libraries(example64 zlib)\n-    set_target_properties(example64 PROPERTIES COMPILE_FLAGS \"-D_FILE_OFFSET_BITS=64\")\n-    add_test(example64 example64)\n-\n-    add_executable(minigzip64 test/minigzip.c)\n-    target_link_libraries(minigzip64 zlib)\n-    set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS \"-D_FILE_OFFSET_BITS=64\")\n+if(NOT SKIP_BUILD_EXAMPLES)\n+    add_executable(example test/example.c)\n+    target_link_libraries(example zlib)\n+    add_test(example example)\n+\n+    add_executable(minigzip test/minigzip.c)\n+    target_link_libraries(minigzip zlib)\n+\n+    if(HAVE_OFF64_T)\n+        add_executable(example64 test/example.c)\n+        target_link_libraries(example64 zlib)\n+        set_target_properties(example64 PROPERTIES COMPILE_FLAGS \"-D_FILE_OFFSET_BITS=64\")\n+        add_test(example64 example64)\n+\n+        add_executable(minigzip64 test/minigzip.c)\n+        target_link_libraries(minigzip64 zlib)\n+        set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS \"-D_FILE_OFFSET_BITS=64\")\n+    endif()\n endif()\n"
  },
  {
    "path": "bazel/gen_sh_test_runner.sh",
    "content": "#!/bin/bash\n\n# Used in a genrule to wrap sh_test script for execution in\n# //test/coverage:coverage_tests single binary.\n\n# Do not generate test suites for empty source files.\nif [ -z \"$1\" ]; then\n  exit 0\nfi\n\nRAW_TEST_NAME=\"$(basename \"$1\")\"\n# Normalize to something we can use in a TEST(ShTest, ...) name\nTEST_NAME=\"${RAW_TEST_NAME//./_}\"\n\nEXEC_ARGS=\"\\\"$1\\\"\"\nshift\nfor a in \"$@\"\ndo\n  EXEC_ARGS=\"${EXEC_ARGS}, \\\"$a\\\"\"\ndone\n\n(\n  cat << EOF\n#include \"test/test_common/environment.h\"\n\n#include \"gtest/gtest.h\"\n\nTEST(ShTest, ${TEST_NAME}) {\n  Envoy::TestEnvironment::exec({${EXEC_ARGS}});\n}\nEOF\n)\n"
  },
  {
    "path": "bazel/genrule_repository.bzl",
    "content": "def _genrule_repository(ctx):\n    ctx.download_and_extract(\n        ctx.attr.urls,\n        \"\",  # output\n        ctx.attr.sha256,\n        \"\",  # type\n        ctx.attr.strip_prefix,\n    )\n    for ii, patch in enumerate(ctx.attr.patches):\n        patch_input = \"patch-input-%d.patch\" % (ii,)\n        ctx.symlink(patch, patch_input)\n        patch_result = ctx.execute([\"patch\", \"-p0\", \"--input\", patch_input])\n        if patch_result.return_code != 0:\n            fail(\"Failed to apply patch %r: %s\" % (patch, patch_result.stderr))\n\n    # https://github.com/bazelbuild/bazel/issues/3766\n    genrule_cmd_file = Label(\"@envoy//bazel\").relative(str(ctx.attr.genrule_cmd_file))\n    ctx.symlink(genrule_cmd_file, \"_envoy_genrule_cmd.genrule_cmd\")\n    cat_genrule_cmd = ctx.execute([\"cat\", \"_envoy_genrule_cmd.genrule_cmd\"])\n    if cat_genrule_cmd.return_code != 0:\n        fail(\"Failed to read genrule command %r: %s\" % (\n            genrule_cmd_file,\n            cat_genrule_cmd.stderr,\n        ))\n\n    ctx.file(\"WORKSPACE\", \"workspace(name=%r)\" % (ctx.name,))\n    ctx.symlink(ctx.attr.build_file, \"BUILD.bazel\")\n\n    # Inject the genrule_cmd content into a .bzl file that can be loaded\n    # from the repository BUILD file. We force the user to look up the\n    # command content \"by label\" so the inclusion source is obvious.\n    ctx.file(\"genrule_cmd.bzl\", \"\"\"\n_GENRULE_CMD = {%r: %r}\ndef genrule_cmd(label):\n    return _GENRULE_CMD[label]\n\"\"\" % (str(genrule_cmd_file), cat_genrule_cmd.stdout))\n\ngenrule_repository = repository_rule(\n    attrs = {\n        \"urls\": attr.string_list(\n            mandatory = True,\n            allow_empty = False,\n        ),\n        \"sha256\": attr.string(),\n        \"strip_prefix\": attr.string(),\n        \"patches\": attr.label_list(\n            allow_files = [\".patch\"],\n            allow_empty = True,\n        ),\n        \"genrule_cmd_file\": attr.label(\n            mandatory = True,\n            allow_single_file = [\".genrule_cmd\"],\n        ),\n        \"build_file\": attr.label(\n            mandatory = True,\n            allow_single_file = [\".BUILD\"],\n        ),\n    },\n    implementation = _genrule_repository,\n)\n\ndef _genrule_cc_deps(ctx):\n    outs = depset()\n    for dep in ctx.attr.deps:\n        outs = dep.cc.transitive_headers + dep.cc.libs + outs\n    return DefaultInfo(files = outs)\n\ngenrule_cc_deps = rule(\n    attrs = {\n        \"deps\": attr.label_list(\n            providers = [],  # CcStarlarkApiProvider\n            mandatory = True,\n            allow_empty = False,\n        ),\n    },\n    implementation = _genrule_cc_deps,\n)\n\ndef _absolute_bin(path):\n    # If the binary path looks like it's relative to the current directory,\n    # transform it to be absolute by appending \"${PWD}\".\n    if \"/\" in path and not path.startswith(\"/\"):\n        return '\"${PWD}\"/%r' % (path,)\n    return \"%r\" % (path,)\n\ndef _genrule_environment(ctx):\n    lines = []\n\n    # Bazel uses the same command for C and C++ compilation.\n    c_compiler = ctx.var[\"CC\"]\n\n    # Bare minimum cflags to get included test binaries to link.\n    #\n    # See .bazelrc for the full set.\n    asan_flags = [\"-fsanitize=address,undefined\"]\n    tsan_flags = [\"-fsanitize=thread\"]\n\n    # Older versions of GCC in Ubuntu, including GCC 5 used in CI images,\n    # incorrectly invoke the older `/usr/bin/ld` with gold-specific options when\n    # building with sanitizers enabled. Work around this by forcing use of gold\n    # in sanitize mode.\n    #\n    # This is not a great solution because it doesn't detect GCC when Bazel has\n    # wrapped it in an intermediate script, but it works well enough to keep CI\n    # running.\n    #\n    # https://stackoverflow.com/questions/37603238/fsanitize-not-using-gold-linker-in-gcc-6-1\n    force_ld = []\n    if \"clang\" in c_compiler:\n        force_ld = [\"-fuse-ld=lld\"]\n    elif \"gcc\" in c_compiler or \"g++\" in c_compiler:\n        force_ld = [\"-fuse-ld=gold\"]\n\n    cc_flags = []\n    ld_flags = []\n    ld_libs = []\n    if ctx.var.get(\"ENVOY_CONFIG_COVERAGE\"):\n        ld_libs.append(\"-lgcov\")\n    if ctx.var.get(\"ENVOY_CONFIG_ASAN\"):\n        cc_flags += asan_flags\n        ld_flags += asan_flags\n        ld_flags += force_ld\n    if ctx.var.get(\"ENVOY_CONFIG_TSAN\"):\n        cc_flags += tsan_flags\n        ld_flags += tsan_flags\n        ld_flags += force_ld\n\n    lines.append(\"export CFLAGS=%r\" % (\" \".join(cc_flags),))\n    lines.append(\"export LDFLAGS=%r\" % (\" \".join(ld_flags),))\n    lines.append(\"export LIBS=%r\" % (\" \".join(ld_libs),))\n    lines.append(\"export CC=%s\" % (_absolute_bin(c_compiler),))\n    lines.append(\"export CXX=%s\" % (_absolute_bin(c_compiler),))\n\n    # Some Autoconf helper binaries leak, which makes ./configure think the\n    # system is unable to do anything. Turn off leak checking during part of\n    # the build.\n    lines.append(\"export ASAN_OPTIONS=detect_leaks=0\")\n\n    lines.append(\"\")\n    out = ctx.actions.declare_file(ctx.attr.name + \".sh\")\n    ctx.actions.write(out, \"\\n\".join(lines))\n    return DefaultInfo(files = depset([out]))\n\ngenrule_environment = rule(\n    implementation = _genrule_environment,\n)\n"
  },
  {
    "path": "bazel/get_workspace_status",
    "content": "#!/bin/bash\n\n# This file was imported from https://github.com/bazelbuild/bazel at d6fec93.\n\n# This script will be run bazel when building process starts to\n# generate key-value information that represents the status of the\n# workspace. The output should be like\n#\n# KEY1 VALUE1\n# KEY2 VALUE2\n#\n# If the script exits with non-zero code, it's considered as a failure\n# and the output will be discarded.\n\n# For Envoy in particular, we want to force binaries to relink when the Git\n# SHA changes (https://github.com/envoyproxy/envoy/issues/2551). This can be\n# done by prefixing keys with \"STABLE_\". To avoid breaking compatibility with\n# other status scripts, this one still echos the non-stable (\"volatile\") names.\n\n# If this SOURCE_VERSION file exists then it must have been placed here by a\n# distribution doing a non-git, source build.\n# Distributions would be expected to echo the commit/tag as BUILD_SCM_REVISION\nif [ -f SOURCE_VERSION ]\nthen\n    echo \"BUILD_SCM_REVISION $(cat SOURCE_VERSION)\"\n    echo \"STABLE_BUILD_SCM_REVISION $(cat SOURCE_VERSION)\"\n    echo \"BUILD_SCM_STATUS Distribution\"\n    exit 0\nfi\n\n# The code below presents an implementation that works for git repository\ngit_rev=$(git rev-parse HEAD) || exit 1\necho \"BUILD_SCM_REVISION ${git_rev}\"\necho \"STABLE_BUILD_SCM_REVISION ${git_rev}\"\n\n# Check whether there are any uncommitted changes\ntree_status=\"Clean\"\ngit diff-index --quiet HEAD -- || {\n    tree_status=\"Modified\"\n}\necho \"BUILD_SCM_STATUS ${tree_status}\"\necho \"STABLE_BUILD_SCM_STATUS ${tree_status}\"\n"
  },
  {
    "path": "bazel/io_opentracing_cpp.patch",
    "content": "diff --git a/mocktracer/BUILD b/mocktracer/BUILD\nindex 3b22bab..d425e2e 100644\n--- a/mocktracer/BUILD\n+++ b/mocktracer/BUILD\n@@ -7,11 +7,13 @@ cc_library(\n     deps = [\n         \"//:opentracing\",\n     ],\n+    alwayslink = 1,\n )\n\n cc_binary(\n     name = \"libmocktracer_plugin.so\",\n     linkshared = 1,\n+    linkstatic = 1,\n     visibility = [\"//visibility:public\"],\n     deps = [\n         \"//mocktracer:mocktracer\"\ndiff --git a/src/dynamic_load_unix.cpp b/src/dynamic_load_unix.cpp\nindex 17e08fd..7e8ac02 100644\n--- a/src/dynamic_load_unix.cpp\n+++ b/src/dynamic_load_unix.cpp\n@@ -35,7 +35,13 @@ DynamicallyLoadTracingLibrary(const char* shared_library,\n                               std::string& error_message) noexcept try {\n   dlerror();  // Clear any existing error.\n\n-  const auto handle = dlopen(shared_library, RTLD_NOW | RTLD_LOCAL);\n+  const auto handle = dlopen(shared_library, RTLD_NOW | RTLD_LOCAL\n+#if defined(__has_feature)\n+#if __has_feature(address_sanitizer)\n+      | RTLD_NODELETE\n+#endif\n+#endif\n+  );\n   if (handle == nullptr) {\n     error_message = dlerror();\n     return make_unexpected(dynamic_load_failure_error);\n# commit 3a6f049c123a1906c7381e824292c18fd8698293\n# Author: Christian Neumüller <cn00@gmx.at>\n# Date:   Wed Feb 27 01:48:17 2019 +0100\n#\n# Fix MSVC compiler flags. (#104)\n# \n#    * All debug specific flags would be replaced by release specific on MSVC.\n#    * The OPENTRACING_STATIC flag would be missing from OpenTracingConfig.cmake when linking against OpenTracing::opentracing-static\n#\ndiff --git a/CMakeLists.txt b/CMakeLists.txt\nindex 1721fb3..3873b3a 100644\n--- a/CMakeLists.txt\n+++ b/CMakeLists.txt\n@@ -52,7 +52,7 @@ if (\"${CMAKE_CXX_COMPILER_ID}\" MATCHES \"Clang\")\n elseif (\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"GNU\")\n   set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wall -Wextra\")\n elseif (\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\")\n-  set(CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_RELEASE} -D_SCL_SECURE_NO_WARNINGS\")\n+  set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS\")\n endif()\n \n # ==============================================================================\n"
  },
  {
    "path": "bazel/protobuf.patch",
    "content": "# https://github.com/protocolbuffers/protobuf/pull/6720\ndiff --git a/third_party/BUILD b/third_party/BUILD\nnew file mode 100644\nindex 0000000000..b66101a39a\n--- /dev/null\n+++ b/third_party/BUILD\n@@ -0,0 +1 @@\n+exports_files([\"six.BUILD\", \"zlib.BUILD\"])\n\n# https://github.com/protocolbuffers/protobuf/pull/6896\ndiff --git a/src/google/protobuf/stubs/strutil.cc b/src/google/protobuf/stubs/strutil.cc\nindex 62b3f0a871..bb3df47ccf 100644\n--- a/src/google/protobuf/stubs/strutil.cc\n+++ b/src/google/protobuf/stubs/strutil.cc\n@@ -1435,32 +1435,44 @@ AlphaNum::AlphaNum(strings::Hex hex) {\n // after the area just overwritten.  It comes in multiple flavors to minimize\n // call overhead.\n static char *Append1(char *out, const AlphaNum &x) {\n-  memcpy(out, x.data(), x.size());\n-  return out + x.size();\n+  if (x.size() > 0) {\n+    memcpy(out, x.data(), x.size());\n+    out += x.size();\n+  }\n+  return out;\n }\n \n static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) {\n-  memcpy(out, x1.data(), x1.size());\n-  out += x1.size();\n-\n-  memcpy(out, x2.data(), x2.size());\n-  return out + x2.size();\n+  if (x1.size() > 0) {\n+    memcpy(out, x1.data(), x1.size());\n+    out += x1.size();\n+  }\n+  if (x2.size() > 0) {\n+    memcpy(out, x2.data(), x2.size());\n+    out += x2.size();\n+  }\n+  return out;\n }\n \n-static char *Append4(char *out,\n-                     const AlphaNum &x1, const AlphaNum &x2,\n+static char *Append4(char *out, const AlphaNum &x1, const AlphaNum &x2,\n                      const AlphaNum &x3, const AlphaNum &x4) {\n-  memcpy(out, x1.data(), x1.size());\n-  out += x1.size();\n-\n-  memcpy(out, x2.data(), x2.size());\n-  out += x2.size();\n-\n-  memcpy(out, x3.data(), x3.size());\n-  out += x3.size();\n-\n-  memcpy(out, x4.data(), x4.size());\n-  return out + x4.size();\n+  if (x1.size() > 0) {\n+    memcpy(out, x1.data(), x1.size());\n+    out += x1.size();\n+  }\n+  if (x2.size() > 0) {\n+    memcpy(out, x2.data(), x2.size());\n+    out += x2.size();\n+  }\n+  if (x3.size() > 0) {\n+    memcpy(out, x3.data(), x3.size());\n+    out += x3.size();\n+  }\n+  if (x4.size() > 0) {\n+    memcpy(out, x4.data(), x4.size());\n+    out += x4.size();\n+  }\n+  return out;\n }\n \n string StrCat(const AlphaNum &a, const AlphaNum &b) {\n\n# patching for zlib binding\ndiff --git a/BUILD b/BUILD\nindex efc3d8e7f..746ad4851 100644\n--- a/BUILD\n+++ b/BUILD\n@@ -24,7 +24,7 @@ config_setting(\n # ZLIB configuration\n ################################################################################\n\n-ZLIB_DEPS = [\"@zlib//:zlib\"]\n+ZLIB_DEPS = [\"//external:zlib\"]\n\n ################################################################################\n # Protobuf Runtime Library\n\ndiff --git a/protobuf.bzl b/protobuf.bzl\nindex 5fa5543b1..484bc41a7 100644\n--- a/protobuf.bzl\n+++ b/protobuf.bzl\n@@ -75,18 +75,17 @@ def _RelativeOutputPath(path, include, dest = \"\"):\n def _proto_gen_impl(ctx):\n     \"\"\"General implementation for generating protos\"\"\"\n     srcs = ctx.files.srcs\n-    deps = []\n-    deps += ctx.files.srcs\n+    deps = depset(direct=ctx.files.srcs)\n     source_dir = _SourceDir(ctx)\n     gen_dir = _GenDir(ctx).rstrip(\"/\")\n     if source_dir:\n-        import_flags = [\"-I\" + source_dir, \"-I\" + gen_dir]\n+        import_flags = depset(direct=[\"-I\" + source_dir, \"-I\" + gen_dir])\n     else:\n-        import_flags = [\"-I.\"]\n+        import_flags = depset(direct=[\"-I.\"])\n \n     for dep in ctx.attr.deps:\n-        import_flags += dep.proto.import_flags\n-        deps += dep.proto.deps\n+        import_flags = depset(transitive=[import_flags, dep.proto.import_flags])\n+        deps = depset(transitive=[deps, dep.proto.deps])\n \n     if not ctx.attr.gen_cc and not ctx.attr.gen_py and not ctx.executable.plugin:\n         return struct(\n@@ -103,7 +102,7 @@ def _proto_gen_impl(ctx):\n         in_gen_dir = src.root.path == gen_dir\n         if in_gen_dir:\n             import_flags_real = []\n-            for f in depset(import_flags).to_list():\n+            for f in import_flags.to_list():\n                 path = f.replace(\"-I\", \"\")\n                 import_flags_real.append(\"-I$(realpath -s %s)\" % path)\n \n@@ -118,7 +117,7 @@ def _proto_gen_impl(ctx):\n             outs.extend(_PyOuts([src.basename], use_grpc_plugin = use_grpc_plugin))\n \n         outs = [ctx.actions.declare_file(out, sibling = src) for out in outs]\n-        inputs = [src] + deps\n+        inputs = [src] + deps.to_list()\n         tools = [ctx.executable.protoc]\n         if ctx.executable.plugin:\n             plugin = ctx.executable.plugin\n@@ -141,7 +140,7 @@ def _proto_gen_impl(ctx):\n                 inputs = inputs,\n                 tools = tools,\n                 outputs = outs,\n-                arguments = args + import_flags + [src.path],\n+                arguments = args + import_flags.to_list() + [src.path],\n                 executable = ctx.executable.protoc,\n                 mnemonic = \"ProtoCompile\",\n                 use_default_shell_env = True,\n"
  },
  {
    "path": "bazel/repositories.bzl",
    "content": "load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\nload(\":dev_binding.bzl\", \"envoy_dev_binding\")\nload(\":genrule_repository.bzl\", \"genrule_repository\")\nload(\"@envoy_api//bazel:envoy_http_archive.bzl\", \"envoy_http_archive\")\nload(\":repository_locations.bzl\", \"DEPENDENCY_ANNOTATIONS\", \"DEPENDENCY_REPOSITORIES\", \"USE_CATEGORIES\", \"USE_CATEGORIES_WITH_CPE_OPTIONAL\")\nload(\"@com_google_googleapis//:repository_rules.bzl\", \"switched_rules_by_language\")\nload(\":crates.bzl\", \"raze_fetch_remote_crates\")\n\nPPC_SKIP_TARGETS = [\"envoy.filters.http.lua\"]\n\nWINDOWS_SKIP_TARGETS = [\n    \"envoy.tracers.dynamic_ot\",\n    \"envoy.tracers.lightstep\",\n    \"envoy.tracers.datadog\",\n    \"envoy.tracers.opencensus\",\n    \"envoy.watchdog.abort_action\",\n]\n\n# Make all contents of an external repository accessible under a filegroup.  Used for external HTTP\n# archives, e.g. cares.\nBUILD_ALL_CONTENT = \"\"\"filegroup(name = \"all\", srcs = glob([\"**\"]), visibility = [\"//visibility:public\"])\"\"\"\n\ndef _build_all_content(exclude = []):\n    return \"\"\"filegroup(name = \"all\", srcs = glob([\"**\"], exclude={}), visibility = [\"//visibility:public\"])\"\"\".format(repr(exclude))\n\ndef _fail_missing_attribute(attr, key):\n    fail(\"The '%s' attribute must be defined for external dependecy \" % attr + key)\n\n# Method for verifying content of the DEPENDENCY_REPOSITORIES defined in bazel/repository_locations.bzl\n# Verification is here so that bazel/repository_locations.bzl can be loaded into other tools written in Python,\n# and as such needs to be free of bazel specific constructs.\n#\n# We also remove the attributes for further consumption in this file, since rules such as http_archive\n# don't recognize them.\ndef _repository_locations():\n    locations = {}\n    for key, location in DEPENDENCY_REPOSITORIES.items():\n        mutable_location = dict(location)\n        locations[key] = mutable_location\n\n        if \"sha256\" not in location or len(location[\"sha256\"]) == 0:\n            _fail_missing_attribute(\"sha256\", key)\n\n        if \"project_name\" not in location:\n            _fail_missing_attribute(\"project_name\", key)\n        mutable_location.pop(\"project_name\")\n\n        if \"project_desc\" not in location:\n            _fail_missing_attribute(\"project_desc\", key)\n        mutable_location.pop(\"project_desc\")\n\n        if \"project_url\" not in location:\n            _fail_missing_attribute(\"project_url\", key)\n        project_url = mutable_location.pop(\"project_url\")\n        if not project_url.startswith(\"https://\") and not project_url.startswith(\"http://\"):\n            fail(\"project_url must start with https:// or http://: \" + project_url)\n\n        if \"version\" not in location:\n            _fail_missing_attribute(\"version\", key)\n        mutable_location.pop(\"version\")\n\n        if \"use_category\" not in location:\n            _fail_missing_attribute(\"use_category\", key)\n        use_category = mutable_location.pop(\"use_category\")\n\n        if \"dataplane_ext\" in use_category or \"observability_ext\" in use_category:\n            if \"extensions\" not in location:\n                _fail_missing_attribute(\"extensions\", key)\n            mutable_location.pop(\"extensions\")\n\n        if \"last_updated\" not in location:\n            _fail_missing_attribute(\"last_updated\", key)\n        last_updated = mutable_location.pop(\"last_updated\")\n\n        # Starlark doesn't have regexes.\n        if len(last_updated) != 10 or last_updated[4] != \"-\" or last_updated[7] != \"-\":\n            fail(\"last_updated must match YYYY-DD-MM: \" + last_updated)\n\n        if \"cpe\" in location:\n            cpe = mutable_location.pop(\"cpe\")\n\n            # Starlark doesn't have regexes.\n            cpe_matches = (cpe != \"N/A\" and (not cpe.startswith(\"cpe:2.3:a:\") or not cpe.endswith(\":*\") and len(cpe.split(\":\")) != 6))\n            if cpe_matches:\n                fail(\"CPE must match cpe:2.3:a:<facet>:<facet>:*: \" + cpe)\n        elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location[\"use_category\"]]:\n            _fail_missing_attribute(\"cpe\", key)\n\n        for category in location[\"use_category\"]:\n            if category not in USE_CATEGORIES:\n                fail(\"Unknown use_category value '\" + category + \"' for dependecy \" + key)\n\n    return locations\n\nREPOSITORY_LOCATIONS = _repository_locations()\n\n# To initialize http_archive REPOSITORY_LOCATIONS dictionaries must be stripped of annotations.\n# See repository_locations.bzl for the list of annotation attributes.\ndef _get_location(dependency):\n    stripped = dict(REPOSITORY_LOCATIONS[dependency])\n    for attribute in DEPENDENCY_ANNOTATIONS:\n        stripped.pop(attribute, None)\n    return stripped\n\ndef _repository_impl(name, **kwargs):\n    envoy_http_archive(\n        name,\n        locations = REPOSITORY_LOCATIONS,\n        **kwargs\n    )\n\ndef _default_envoy_build_config_impl(ctx):\n    ctx.file(\"WORKSPACE\", \"\")\n    ctx.file(\"BUILD.bazel\", \"\")\n    ctx.symlink(ctx.attr.config, \"extensions_build_config.bzl\")\n\n_default_envoy_build_config = repository_rule(\n    implementation = _default_envoy_build_config_impl,\n    attrs = {\n        \"config\": attr.label(default = \"@envoy//source/extensions:extensions_build_config.bzl\"),\n    },\n)\n\n# Python dependencies.\ndef _python_deps():\n    # TODO(htuch): convert these to pip3_import.\n    _repository_impl(\n        name = \"com_github_twitter_common_lang\",\n        build_file = \"@envoy//bazel/external:twitter_common_lang.BUILD\",\n    )\n    _repository_impl(\n        name = \"com_github_twitter_common_rpc\",\n        build_file = \"@envoy//bazel/external:twitter_common_rpc.BUILD\",\n    )\n    _repository_impl(\n        name = \"com_github_twitter_common_finagle_thrift\",\n        build_file = \"@envoy//bazel/external:twitter_common_finagle_thrift.BUILD\",\n    )\n    _repository_impl(\n        name = \"six\",\n        build_file = \"@com_google_protobuf//third_party:six.BUILD\",\n    )\n\n# Bazel native C++ dependencies. For the dependencies that doesn't provide autoconf/automake builds.\ndef _cc_deps():\n    _repository_impl(\"grpc_httpjson_transcoding\")\n    native.bind(\n        name = \"path_matcher\",\n        actual = \"@grpc_httpjson_transcoding//src:path_matcher\",\n    )\n    native.bind(\n        name = \"grpc_transcoding\",\n        actual = \"@grpc_httpjson_transcoding//src:transcoding\",\n    )\n\ndef _go_deps(skip_targets):\n    # Keep the skip_targets check around until Istio Proxy has stopped using\n    # it to exclude the Go rules.\n    if \"io_bazel_rules_go\" not in skip_targets:\n        _repository_impl(\n            name = \"io_bazel_rules_go\",\n            # TODO(wrowe, sunjayBhatia): remove when Windows RBE supports batch file invocation\n            patch_args = [\"-p1\"],\n            patches = [\"@envoy//bazel:rules_go.patch\"],\n        )\n        _repository_impl(\"bazel_gazelle\")\n\ndef _rust_deps():\n    _repository_impl(\"io_bazel_rules_rust\")\n    raze_fetch_remote_crates()\n\ndef envoy_dependencies(skip_targets = []):\n    # Setup Envoy developer tools.\n    envoy_dev_binding()\n\n    # Treat Envoy's overall build config as an external repo, so projects that\n    # build Envoy as a subcomponent can easily override the config.\n    if \"envoy_build_config\" not in native.existing_rules().keys():\n        _default_envoy_build_config(name = \"envoy_build_config\")\n\n    # Setup external Bazel rules\n    _foreign_cc_dependencies()\n\n    # Binding to an alias pointing to the selected version of BoringSSL:\n    # - BoringSSL FIPS from @boringssl_fips//:ssl,\n    # - non-FIPS BoringSSL from @boringssl//:ssl.\n    _boringssl()\n    _boringssl_fips()\n    native.bind(\n        name = \"ssl\",\n        actual = \"@envoy//bazel:boringssl\",\n    )\n\n    # The long repo names (`com_github_fmtlib_fmt` instead of `fmtlib`) are\n    # semi-standard in the Bazel community, intended to avoid both duplicate\n    # dependencies and name conflicts.\n    _com_github_c_ares_c_ares()\n    _com_github_circonus_labs_libcircllhist()\n    _com_github_cyan4973_xxhash()\n    _com_github_datadog_dd_opentracing_cpp()\n    _com_github_mirror_tclap()\n    _com_github_envoyproxy_sqlparser()\n    _com_github_fmtlib_fmt()\n    _com_github_gabime_spdlog()\n    _com_github_google_benchmark()\n    _com_github_google_jwt_verify()\n    _com_github_google_libprotobuf_mutator()\n    _com_github_google_tcmalloc()\n    _com_github_gperftools_gperftools()\n    _com_github_grpc_grpc()\n    _com_github_jbeder_yaml_cpp()\n    _com_github_libevent_libevent()\n    _com_github_luajit_luajit()\n    _com_github_moonjit_moonjit()\n    _com_github_nghttp2_nghttp2()\n    _com_github_nodejs_http_parser()\n    _com_github_tencent_rapidjson()\n    _com_google_absl()\n    _com_google_googletest()\n    _com_google_protobuf()\n    _io_opencensus_cpp()\n    _com_github_curl()\n    _com_github_envoyproxy_sqlparser()\n    _com_googlesource_chromium_v8()\n    _com_googlesource_quiche()\n    _com_googlesource_googleurl()\n    _com_lightstep_tracer_cpp()\n    _io_opentracing_cpp()\n    _net_zlib()\n    _com_github_zlib_ng_zlib_ng()\n    _upb()\n    _proxy_wasm_cpp_sdk()\n    _proxy_wasm_cpp_host()\n    _emscripten_toolchain()\n    _repository_impl(\"com_googlesource_code_re2\")\n    _com_google_cel_cpp()\n    _repository_impl(\"com_github_google_flatbuffers\")\n    _repository_impl(\"bazel_toolchains\")\n    _repository_impl(\"bazel_compdb\")\n    _repository_impl(\"envoy_build_tools\")\n    _repository_impl(\"rules_cc\")\n\n    # Unconditional, since we use this only for compiler-agnostic fuzzing utils.\n    _org_llvm_releases_compiler_rt()\n\n    _python_deps()\n    _cc_deps()\n    _go_deps(skip_targets)\n    _rust_deps()\n    _kafka_deps()\n\n    _org_llvm_llvm()\n    _com_github_wavm_wavm()\n\n    switched_rules_by_language(\n        name = \"com_google_googleapis_imports\",\n        cc = True,\n        go = True,\n        grpc = True,\n        rules_override = {\n            \"py_proto_library\": \"@envoy_api//bazel:api_build_system.bzl\",\n        },\n    )\n    native.bind(\n        name = \"bazel_runfiles\",\n        actual = \"@bazel_tools//tools/cpp/runfiles\",\n    )\n\ndef _boringssl():\n    _repository_impl(\n        name = \"boringssl\",\n        patch_args = [\"-p1\"],\n        patches = [\"@envoy//bazel:boringssl_static.patch\"],\n    )\n\ndef _boringssl_fips():\n    location = REPOSITORY_LOCATIONS[\"boringssl_fips\"]\n    genrule_repository(\n        name = \"boringssl_fips\",\n        urls = location[\"urls\"],\n        sha256 = location[\"sha256\"],\n        genrule_cmd_file = \"@envoy//bazel/external:boringssl_fips.genrule_cmd\",\n        build_file = \"@envoy//bazel/external:boringssl_fips.BUILD\",\n        patches = [\"@envoy//bazel/external:boringssl_fips.patch\"],\n    )\n\ndef _com_github_circonus_labs_libcircllhist():\n    _repository_impl(\n        name = \"com_github_circonus_labs_libcircllhist\",\n        build_file = \"@envoy//bazel/external:libcircllhist.BUILD\",\n    )\n    native.bind(\n        name = \"libcircllhist\",\n        actual = \"@com_github_circonus_labs_libcircllhist//:libcircllhist\",\n    )\n\ndef _com_github_c_ares_c_ares():\n    location = _get_location(\"com_github_c_ares_c_ares\")\n    http_archive(\n        name = \"com_github_c_ares_c_ares\",\n        build_file_content = BUILD_ALL_CONTENT,\n        **location\n    )\n    native.bind(\n        name = \"ares\",\n        actual = \"@envoy//bazel/foreign_cc:ares\",\n    )\n\ndef _com_github_cyan4973_xxhash():\n    _repository_impl(\n        name = \"com_github_cyan4973_xxhash\",\n        build_file = \"@envoy//bazel/external:xxhash.BUILD\",\n    )\n    native.bind(\n        name = \"xxhash\",\n        actual = \"@com_github_cyan4973_xxhash//:xxhash\",\n    )\n\ndef _com_github_envoyproxy_sqlparser():\n    _repository_impl(\n        name = \"com_github_envoyproxy_sqlparser\",\n        build_file = \"@envoy//bazel/external:sqlparser.BUILD\",\n    )\n    native.bind(\n        name = \"sqlparser\",\n        actual = \"@com_github_envoyproxy_sqlparser//:sqlparser\",\n    )\n\ndef _com_github_mirror_tclap():\n    _repository_impl(\n        name = \"com_github_mirror_tclap\",\n        build_file = \"@envoy//bazel/external:tclap.BUILD\",\n        patch_args = [\"-p1\"],\n        # If and when we pick up tclap 1.4 or later release,\n        # this entire issue was refactored away 6 years ago;\n        # https://sourceforge.net/p/tclap/code/ci/5d4ffbf2db794af799b8c5727fb6c65c079195ac/\n        # https://github.com/envoyproxy/envoy/pull/8572#discussion_r337554195\n        patches = [\"@envoy//bazel:tclap-win64-ull-sizet.patch\"],\n    )\n    native.bind(\n        name = \"tclap\",\n        actual = \"@com_github_mirror_tclap//:tclap\",\n    )\n\ndef _com_github_fmtlib_fmt():\n    _repository_impl(\n        name = \"com_github_fmtlib_fmt\",\n        build_file = \"@envoy//bazel/external:fmtlib.BUILD\",\n    )\n    native.bind(\n        name = \"fmtlib\",\n        actual = \"@com_github_fmtlib_fmt//:fmtlib\",\n    )\n\ndef _com_github_gabime_spdlog():\n    _repository_impl(\n        name = \"com_github_gabime_spdlog\",\n        build_file = \"@envoy//bazel/external:spdlog.BUILD\",\n    )\n    native.bind(\n        name = \"spdlog\",\n        actual = \"@com_github_gabime_spdlog//:spdlog\",\n    )\n\ndef _com_github_google_benchmark():\n    location = _get_location(\"com_github_google_benchmark\")\n    http_archive(\n        name = \"com_github_google_benchmark\",\n        **location\n    )\n    native.bind(\n        name = \"benchmark\",\n        actual = \"@com_github_google_benchmark//:benchmark\",\n    )\n\ndef _com_github_google_libprotobuf_mutator():\n    _repository_impl(\n        name = \"com_github_google_libprotobuf_mutator\",\n        build_file = \"@envoy//bazel/external:libprotobuf_mutator.BUILD\",\n    )\n\ndef _com_github_jbeder_yaml_cpp():\n    _repository_impl(\n        name = \"com_github_jbeder_yaml_cpp\",\n    )\n    native.bind(\n        name = \"yaml_cpp\",\n        actual = \"@com_github_jbeder_yaml_cpp//:yaml-cpp\",\n    )\n\ndef _com_github_libevent_libevent():\n    location = _get_location(\"com_github_libevent_libevent\")\n    http_archive(\n        name = \"com_github_libevent_libevent\",\n        build_file_content = BUILD_ALL_CONTENT,\n        **location\n    )\n    native.bind(\n        name = \"event\",\n        actual = \"@envoy//bazel/foreign_cc:event\",\n    )\n\ndef _net_zlib():\n    _repository_impl(\n        name = \"net_zlib\",\n        build_file_content = BUILD_ALL_CONTENT,\n        patch_args = [\"-p1\"],\n        patches = [\"@envoy//bazel/foreign_cc:zlib.patch\"],\n    )\n\n    native.bind(\n        name = \"zlib\",\n        actual = \"@envoy//bazel/foreign_cc:zlib\",\n    )\n\n    # Bind for grpc.\n    native.bind(\n        name = \"madler_zlib\",\n        actual = \"@envoy//bazel/foreign_cc:zlib\",\n    )\n\ndef _com_github_zlib_ng_zlib_ng():\n    _repository_impl(\n        name = \"com_github_zlib_ng_zlib_ng\",\n        build_file_content = BUILD_ALL_CONTENT,\n    )\n\ndef _com_google_cel_cpp():\n    _repository_impl(\"com_google_cel_cpp\")\n    _repository_impl(\"rules_antlr\")\n    location = _get_location(\"antlr4_runtimes\")\n    http_archive(\n        name = \"antlr4_runtimes\",\n        build_file_content = \"\"\"\npackage(default_visibility = [\"//visibility:public\"])\ncc_library(\n    name = \"cpp\",\n    srcs = glob([\"runtime/Cpp/runtime/src/**/*.cpp\"]),\n    hdrs = glob([\"runtime/Cpp/runtime/src/**/*.h\"]),\n    includes = [\"runtime/Cpp/runtime/src\"],\n)\n\"\"\",\n        patch_args = [\"-p1\"],\n        # Patches ASAN violation of initialization fiasco\n        patches = [\"@envoy//bazel:antlr.patch\"],\n        **location\n    )\n\n    # Parser dependencies\n    # TODO: upgrade this when cel is upgraded to use the latest version\n    http_archive(\n        name = \"rules_antlr\",\n        sha256 = \"7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429\",\n        strip_prefix = \"rules_antlr-3cc2f9502a54ceb7b79b37383316b23c4da66f9a\",\n        urls = [\"https://github.com/marcohu/rules_antlr/archive/3cc2f9502a54ceb7b79b37383316b23c4da66f9a.tar.gz\"],\n    )\n\n    http_archive(\n        name = \"antlr4_runtimes\",\n        build_file_content = \"\"\"\npackage(default_visibility = [\"//visibility:public\"])\ncc_library(\n    name = \"cpp\",\n    srcs = glob([\"runtime/Cpp/runtime/src/**/*.cpp\"]),\n    hdrs = glob([\"runtime/Cpp/runtime/src/**/*.h\"]),\n    includes = [\"runtime/Cpp/runtime/src\"],\n)\n\"\"\",\n        sha256 = \"46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64\",\n        patch_args = [\"-p1\"],\n        # Patches ASAN violation of initialization fiasco\n        patches = [\"@envoy//bazel:antlr.patch\"],\n        strip_prefix = \"antlr4-4.7.2\",\n        urls = [\"https://github.com/antlr/antlr4/archive/4.7.2.tar.gz\"],\n    )\n\ndef _com_github_nghttp2_nghttp2():\n    location = _get_location(\"com_github_nghttp2_nghttp2\")\n    http_archive(\n        name = \"com_github_nghttp2_nghttp2\",\n        build_file_content = BUILD_ALL_CONTENT,\n        patch_args = [\"-p1\"],\n        # This patch cannot be picked up due to ABI rules. Better\n        # solve is likely at the next version-major. Discussion at;\n        # https://github.com/nghttp2/nghttp2/pull/1395\n        # https://github.com/envoyproxy/envoy/pull/8572#discussion_r334067786\n        patches = [\"@envoy//bazel/foreign_cc:nghttp2.patch\"],\n        **location\n    )\n    native.bind(\n        name = \"nghttp2\",\n        actual = \"@envoy//bazel/foreign_cc:nghttp2\",\n    )\n\ndef _io_opentracing_cpp():\n    _repository_impl(\n        name = \"io_opentracing_cpp\",\n        patch_args = [\"-p1\"],\n        # Workaround for LSAN false positive in https://github.com/envoyproxy/envoy/issues/7647\n        patches = [\"@envoy//bazel:io_opentracing_cpp.patch\"],\n    )\n    native.bind(\n        name = \"opentracing\",\n        actual = \"@io_opentracing_cpp//:opentracing\",\n    )\n\ndef _com_lightstep_tracer_cpp():\n    _repository_impl(\"com_lightstep_tracer_cpp\")\n    native.bind(\n        name = \"lightstep\",\n        actual = \"@com_lightstep_tracer_cpp//:manual_tracer_lib\",\n    )\n\ndef _com_github_datadog_dd_opentracing_cpp():\n    _repository_impl(\"com_github_datadog_dd_opentracing_cpp\")\n    _repository_impl(\n        name = \"com_github_msgpack_msgpack_c\",\n        build_file = \"@com_github_datadog_dd_opentracing_cpp//:bazel/external/msgpack.BUILD\",\n    )\n    native.bind(\n        name = \"dd_opentracing_cpp\",\n        actual = \"@com_github_datadog_dd_opentracing_cpp//:dd_opentracing_cpp\",\n    )\n\ndef _com_github_tencent_rapidjson():\n    _repository_impl(\n        name = \"com_github_tencent_rapidjson\",\n        build_file = \"@envoy//bazel/external:rapidjson.BUILD\",\n    )\n    native.bind(\n        name = \"rapidjson\",\n        actual = \"@com_github_tencent_rapidjson//:rapidjson\",\n    )\n\ndef _com_github_nodejs_http_parser():\n    _repository_impl(\n        name = \"com_github_nodejs_http_parser\",\n        build_file = \"@envoy//bazel/external:http-parser.BUILD\",\n    )\n    native.bind(\n        name = \"http_parser\",\n        actual = \"@com_github_nodejs_http_parser//:http_parser\",\n    )\n\ndef _com_google_googletest():\n    _repository_impl(\"com_google_googletest\")\n    native.bind(\n        name = \"googletest\",\n        actual = \"@com_google_googletest//:gtest\",\n    )\n\n# TODO(jmarantz): replace the use of bind and external_deps with just\n# the direct Bazel path at all sites.  This will make it easier to\n# pull in more bits of abseil as needed, and is now the preferred\n# method for pure Bazel deps.\ndef _com_google_absl():\n    _repository_impl(\"com_google_absl\")\n    native.bind(\n        name = \"abseil_any\",\n        actual = \"@com_google_absl//absl/types:any\",\n    )\n    native.bind(\n        name = \"abseil_base\",\n        actual = \"@com_google_absl//absl/base:base\",\n    )\n\n    # Bind for grpc.\n    native.bind(\n        name = \"absl-base\",\n        actual = \"@com_google_absl//absl/base\",\n    )\n    native.bind(\n        name = \"abseil_flat_hash_map\",\n        actual = \"@com_google_absl//absl/container:flat_hash_map\",\n    )\n    native.bind(\n        name = \"abseil_flat_hash_set\",\n        actual = \"@com_google_absl//absl/container:flat_hash_set\",\n    )\n    native.bind(\n        name = \"abseil_hash\",\n        actual = \"@com_google_absl//absl/hash:hash\",\n    )\n    native.bind(\n        name = \"abseil_hash_testing\",\n        actual = \"@com_google_absl//absl/hash:hash_testing\",\n    )\n    native.bind(\n        name = \"abseil_inlined_vector\",\n        actual = \"@com_google_absl//absl/container:inlined_vector\",\n    )\n    native.bind(\n        name = \"abseil_memory\",\n        actual = \"@com_google_absl//absl/memory:memory\",\n    )\n    native.bind(\n        name = \"abseil_node_hash_map\",\n        actual = \"@com_google_absl//absl/container:node_hash_map\",\n    )\n    native.bind(\n        name = \"abseil_node_hash_set\",\n        actual = \"@com_google_absl//absl/container:node_hash_set\",\n    )\n    native.bind(\n        name = \"abseil_str_format\",\n        actual = \"@com_google_absl//absl/strings:str_format\",\n    )\n    native.bind(\n        name = \"abseil_strings\",\n        actual = \"@com_google_absl//absl/strings:strings\",\n    )\n    native.bind(\n        name = \"abseil_int128\",\n        actual = \"@com_google_absl//absl/numeric:int128\",\n    )\n    native.bind(\n        name = \"abseil_optional\",\n        actual = \"@com_google_absl//absl/types:optional\",\n    )\n    native.bind(\n        name = \"abseil_synchronization\",\n        actual = \"@com_google_absl//absl/synchronization:synchronization\",\n    )\n    native.bind(\n        name = \"abseil_symbolize\",\n        actual = \"@com_google_absl//absl/debugging:symbolize\",\n    )\n    native.bind(\n        name = \"abseil_stacktrace\",\n        actual = \"@com_google_absl//absl/debugging:stacktrace\",\n    )\n\n    # Require abseil_time as an indirect dependency as it is needed by the\n    # direct dependency jwt_verify_lib.\n    native.bind(\n        name = \"abseil_time\",\n        actual = \"@com_google_absl//absl/time:time\",\n    )\n\n    # Bind for grpc.\n    native.bind(\n        name = \"absl-time\",\n        actual = \"@com_google_absl//absl/time:time\",\n    )\n\n    native.bind(\n        name = \"abseil_algorithm\",\n        actual = \"@com_google_absl//absl/algorithm:algorithm\",\n    )\n    native.bind(\n        name = \"abseil_variant\",\n        actual = \"@com_google_absl//absl/types:variant\",\n    )\n    native.bind(\n        name = \"abseil_status\",\n        actual = \"@com_google_absl//absl/status\",\n    )\n\ndef _com_google_protobuf():\n    _repository_impl(\"rules_python\")\n    _repository_impl(\n        \"com_google_protobuf\",\n        patches = [\"@envoy//bazel:protobuf.patch\"],\n        patch_args = [\"-p1\"],\n    )\n\n    native.bind(\n        name = \"protobuf\",\n        actual = \"@com_google_protobuf//:protobuf\",\n    )\n    native.bind(\n        name = \"protobuf_clib\",\n        actual = \"@com_google_protobuf//:protoc_lib\",\n    )\n    native.bind(\n        name = \"protocol_compiler\",\n        actual = \"@com_google_protobuf//:protoc\",\n    )\n    native.bind(\n        name = \"protoc\",\n        actual = \"@com_google_protobuf//:protoc\",\n    )\n\n    # Needed for `bazel fetch` to work with @com_google_protobuf\n    # https://github.com/google/protobuf/blob/v3.6.1/util/python/BUILD#L6-L9\n    native.bind(\n        name = \"python_headers\",\n        actual = \"@com_google_protobuf//util/python:python_headers\",\n    )\n\ndef _io_opencensus_cpp():\n    location = _get_location(\"io_opencensus_cpp\")\n    http_archive(\n        name = \"io_opencensus_cpp\",\n        **location\n    )\n    native.bind(\n        name = \"opencensus_trace\",\n        actual = \"@io_opencensus_cpp//opencensus/trace\",\n    )\n    native.bind(\n        name = \"opencensus_trace_b3\",\n        actual = \"@io_opencensus_cpp//opencensus/trace:b3\",\n    )\n    native.bind(\n        name = \"opencensus_trace_cloud_trace_context\",\n        actual = \"@io_opencensus_cpp//opencensus/trace:cloud_trace_context\",\n    )\n    native.bind(\n        name = \"opencensus_trace_grpc_trace_bin\",\n        actual = \"@io_opencensus_cpp//opencensus/trace:grpc_trace_bin\",\n    )\n    native.bind(\n        name = \"opencensus_trace_trace_context\",\n        actual = \"@io_opencensus_cpp//opencensus/trace:trace_context\",\n    )\n    native.bind(\n        name = \"opencensus_exporter_ocagent\",\n        actual = \"@io_opencensus_cpp//opencensus/exporters/trace/ocagent:ocagent_exporter\",\n    )\n    native.bind(\n        name = \"opencensus_exporter_stdout\",\n        actual = \"@io_opencensus_cpp//opencensus/exporters/trace/stdout:stdout_exporter\",\n    )\n    native.bind(\n        name = \"opencensus_exporter_stackdriver\",\n        actual = \"@io_opencensus_cpp//opencensus/exporters/trace/stackdriver:stackdriver_exporter\",\n    )\n    native.bind(\n        name = \"opencensus_exporter_zipkin\",\n        actual = \"@io_opencensus_cpp//opencensus/exporters/trace/zipkin:zipkin_exporter\",\n    )\n\ndef _com_github_curl():\n    # Used by OpenCensus Zipkin exporter.\n    location = _get_location(\"com_github_curl\")\n    http_archive(\n        name = \"com_github_curl\",\n        build_file_content = BUILD_ALL_CONTENT + \"\"\"\ncc_library(name = \"curl\", visibility = [\"//visibility:public\"], deps = [\"@envoy//bazel/foreign_cc:curl\"])\n\"\"\",\n        **location\n    )\n    native.bind(\n        name = \"curl\",\n        actual = \"@envoy//bazel/foreign_cc:curl\",\n    )\n\ndef _com_googlesource_chromium_v8():\n    location = _get_location(\"com_googlesource_chromium_v8\")\n    genrule_repository(\n        name = \"com_googlesource_chromium_v8\",\n        genrule_cmd_file = \"@envoy//bazel/external:wee8.genrule_cmd\",\n        build_file = \"@envoy//bazel/external:wee8.BUILD\",\n        patches = [\"@envoy//bazel/external:wee8.patch\"],\n        **location\n    )\n    native.bind(\n        name = \"wee8\",\n        actual = \"@com_googlesource_chromium_v8//:wee8\",\n    )\n\ndef _com_googlesource_quiche():\n    location = REPOSITORY_LOCATIONS[\"com_googlesource_quiche\"]\n    genrule_repository(\n        name = \"com_googlesource_quiche\",\n        urls = location[\"urls\"],\n        sha256 = location[\"sha256\"],\n        genrule_cmd_file = \"@envoy//bazel/external:quiche.genrule_cmd\",\n        build_file = \"@envoy//bazel/external:quiche.BUILD\",\n    )\n    native.bind(\n        name = \"quiche_common_platform\",\n        actual = \"@com_googlesource_quiche//:quiche_common_platform\",\n    )\n    native.bind(\n        name = \"quiche_http2_platform\",\n        actual = \"@com_googlesource_quiche//:http2_platform\",\n    )\n    native.bind(\n        name = \"quiche_spdy_platform\",\n        actual = \"@com_googlesource_quiche//:spdy_platform\",\n    )\n    native.bind(\n        name = \"quiche_quic_platform\",\n        actual = \"@com_googlesource_quiche//:quic_platform\",\n    )\n    native.bind(\n        name = \"quiche_quic_platform_base\",\n        actual = \"@com_googlesource_quiche//:quic_platform_base\",\n    )\n\ndef _com_googlesource_googleurl():\n    _repository_impl(\n        name = \"com_googlesource_googleurl\",\n    )\n    native.bind(\n        name = \"googleurl\",\n        actual = \"@com_googlesource_googleurl//url:url\",\n    )\n\ndef _org_llvm_releases_compiler_rt():\n    _repository_impl(\n        name = \"org_llvm_releases_compiler_rt\",\n        build_file = \"@envoy//bazel/external:compiler_rt.BUILD\",\n    )\n\ndef _com_github_grpc_grpc():\n    _repository_impl(\"com_github_grpc_grpc\")\n    _repository_impl(\"build_bazel_rules_apple\")\n\n    # Rebind some stuff to match what the gRPC Bazel is expecting.\n    native.bind(\n        name = \"protobuf_headers\",\n        actual = \"@com_google_protobuf//:protobuf_headers\",\n    )\n    native.bind(\n        name = \"libssl\",\n        actual = \"//external:ssl\",\n    )\n    native.bind(\n        name = \"cares\",\n        actual = \"//external:ares\",\n    )\n\n    native.bind(\n        name = \"grpc\",\n        actual = \"@com_github_grpc_grpc//:grpc++\",\n    )\n\n    native.bind(\n        name = \"grpc_health_proto\",\n        actual = \"@envoy//bazel:grpc_health_proto\",\n    )\n\n    native.bind(\n        name = \"grpc_alts_fake_handshaker_server\",\n        actual = \"@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:fake_handshaker_lib\",\n    )\n\n    native.bind(\n        name = \"grpc_alts_handshaker_proto\",\n        actual = \"@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:handshaker_proto\",\n    )\n\n    native.bind(\n        name = \"grpc_alts_transport_security_common_proto\",\n        actual = \"@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:transport_security_common_proto\",\n    )\n\ndef _upb():\n    _repository_impl(\n        name = \"upb\",\n        patches = [\"@envoy//bazel:upb.patch\"],\n        patch_args = [\"-p1\"],\n    )\n\n    native.bind(\n        name = \"upb_lib\",\n        actual = \"@upb//:upb\",\n    )\n\ndef _proxy_wasm_cpp_sdk():\n    _repository_impl(name = \"proxy_wasm_cpp_sdk\")\n\ndef _proxy_wasm_cpp_host():\n    _repository_impl(\n        name = \"proxy_wasm_cpp_host\",\n        build_file = \"@envoy//bazel/external:proxy_wasm_cpp_host.BUILD\",\n    )\n\ndef _emscripten_toolchain():\n    _repository_impl(\n        name = \"emscripten_toolchain\",\n        build_file_content = _build_all_content(exclude = [\n            \"upstream/emscripten/cache/is_vanilla.txt\",\n            \".emscripten_sanity\",\n        ]),\n        patch_cmds = REPOSITORY_LOCATIONS[\"emscripten_toolchain\"][\"patch_cmds\"],\n    )\n\ndef _com_github_google_jwt_verify():\n    _repository_impl(\"com_github_google_jwt_verify\")\n\n    native.bind(\n        name = \"jwt_verify_lib\",\n        actual = \"@com_github_google_jwt_verify//:jwt_verify_lib\",\n    )\n\ndef _com_github_luajit_luajit():\n    location = _get_location(\"com_github_luajit_luajit\")\n    http_archive(\n        name = \"com_github_luajit_luajit\",\n        build_file_content = BUILD_ALL_CONTENT,\n        patches = [\"@envoy//bazel/foreign_cc:luajit.patch\"],\n        patch_args = [\"-p1\"],\n        patch_cmds = [\"chmod u+x build.py\"],\n        **location\n    )\n\n    native.bind(\n        name = \"luajit\",\n        actual = \"@envoy//bazel/foreign_cc:luajit\",\n    )\n\ndef _com_github_moonjit_moonjit():\n    location = _get_location(\"com_github_moonjit_moonjit\")\n    http_archive(\n        name = \"com_github_moonjit_moonjit\",\n        build_file_content = BUILD_ALL_CONTENT,\n        patches = [\"@envoy//bazel/foreign_cc:moonjit.patch\"],\n        patch_args = [\"-p1\"],\n        patch_cmds = [\"chmod u+x build.py\"],\n        **location\n    )\n\n    native.bind(\n        name = \"moonjit\",\n        actual = \"@envoy//bazel/foreign_cc:moonjit\",\n    )\n\ndef _com_github_google_tcmalloc():\n    _repository_impl(\n        name = \"com_github_google_tcmalloc\",\n    )\n\n    native.bind(\n        name = \"tcmalloc\",\n        actual = \"@com_github_google_tcmalloc//tcmalloc\",\n    )\n\ndef _com_github_gperftools_gperftools():\n    location = _get_location(\"com_github_gperftools_gperftools\")\n    http_archive(\n        name = \"com_github_gperftools_gperftools\",\n        build_file_content = BUILD_ALL_CONTENT,\n        **location\n    )\n\n    native.bind(\n        name = \"gperftools\",\n        actual = \"@envoy//bazel/foreign_cc:gperftools\",\n    )\n\ndef _org_llvm_llvm():\n    location = _get_location(\"org_llvm_llvm\")\n    http_archive(\n        name = \"org_llvm_llvm\",\n        build_file_content = BUILD_ALL_CONTENT,\n        patch_args = [\"-p1\"],\n        patches = [\"@envoy//bazel/foreign_cc:llvm.patch\"],\n        **location\n    )\n    native.bind(\n        name = \"llvm\",\n        actual = \"@envoy//bazel/foreign_cc:llvm\",\n    )\n\ndef _com_github_wavm_wavm():\n    location = _get_location(\"com_github_wavm_wavm\")\n    http_archive(\n        name = \"com_github_wavm_wavm\",\n        build_file_content = BUILD_ALL_CONTENT,\n        **location\n    )\n    native.bind(\n        name = \"wavm\",\n        actual = \"@envoy//bazel/foreign_cc:wavm\",\n    )\n\ndef _kafka_deps():\n    # This archive contains Kafka client source code.\n    # We are using request/response message format files to generate parser code.\n    KAFKASOURCE_BUILD_CONTENT = \"\"\"\nfilegroup(\n    name = \"request_protocol_files\",\n    srcs = glob([\"*Request.json\"]),\n    visibility = [\"//visibility:public\"],\n)\nfilegroup(\n    name = \"response_protocol_files\",\n    srcs = glob([\"*Response.json\"]),\n    visibility = [\"//visibility:public\"],\n)\n    \"\"\"\n    http_archive(\n        name = \"kafka_source\",\n        build_file_content = KAFKASOURCE_BUILD_CONTENT,\n        patches = [\"@envoy//bazel/external:kafka_int32.patch\"],\n        **_get_location(\"kafka_source\")\n    )\n\n    # This archive provides Kafka (and Zookeeper) binaries, that are used during Kafka integration\n    # tests.\n    http_archive(\n        name = \"kafka_server_binary\",\n        build_file_content = BUILD_ALL_CONTENT,\n        **_get_location(\"kafka_server_binary\")\n    )\n\n    # This archive provides Kafka client in Python, so we can use it to interact with Kafka server\n    # during interation tests.\n    http_archive(\n        name = \"kafka_python_client\",\n        build_file_content = BUILD_ALL_CONTENT,\n        **_get_location(\"kafka_python_client\")\n    )\n\ndef _foreign_cc_dependencies():\n    _repository_impl(\"rules_foreign_cc\")\n\ndef _is_linux(ctxt):\n    return ctxt.os.name == \"linux\"\n\ndef _is_arch(ctxt, arch):\n    res = ctxt.execute([\"uname\", \"-m\"])\n    return arch in res.stdout\n\ndef _is_linux_ppc(ctxt):\n    return _is_linux(ctxt) and _is_arch(ctxt, \"ppc\")\n\ndef _is_linux_s390x(ctxt):\n    return _is_linux(ctxt) and _is_arch(ctxt, \"s390x\")\n\ndef _is_linux_x86_64(ctxt):\n    return _is_linux(ctxt) and _is_arch(ctxt, \"x86_64\")\n"
  },
  {
    "path": "bazel/repositories_extra.bzl",
    "content": "load(\"@rules_python//python:repositories.bzl\", \"py_repositories\")\nload(\"@rules_python//python:pip.bzl\", \"pip3_import\", \"pip_repositories\")\n\n# Python dependencies.\ndef _python_deps():\n    py_repositories()\n    pip_repositories()\n\n    pip3_import(\n        name = \"config_validation_pip3\",\n        requirements = \"@envoy//tools/config_validation:requirements.txt\",\n        extra_pip_args = [\"--require-hashes\"],\n\n        # project_name = \"PyYAML\",\n        # project_url = \"https://github.com/yaml/pyyaml\",\n        # version = \"5.3.1\",\n        # last_update = \"2020-03-18\"\n        # use_category = [\"other\"],\n        # cpe = \"cpe:2.3:a:pyyaml:pyyaml:*\",\n    )\n    pip3_import(\n        name = \"configs_pip3\",\n        requirements = \"@envoy//configs:requirements.txt\",\n        extra_pip_args = [\"--require-hashes\"],\n\n        # project_name = \"Jinja\",\n        # project_url = \"http://palletsprojects.com/p/jinja\",\n        # version = \"2.11.2\",\n        # last_update = \"2020-04-13\"\n        # use_category = [\"test\"],\n        # cpe = \"cpe:2.3:a:palletsprojects:jinja:*\",\n\n        # project_name = \"MarkupSafe\",\n        # project_url = \"https://markupsafe.palletsprojects.com/en/1.1.x/\",\n        # version = \"1.1.1\",\n        # last_update = \"2019-02-23\"\n        # use_category = [\"test\"],\n    )\n    pip3_import(\n        name = \"kafka_pip3\",\n        requirements = \"@envoy//source/extensions/filters/network/kafka:requirements.txt\",\n        extra_pip_args = [\"--require-hashes\"],\n\n        # project_name = \"Jinja\",\n        # project_url = \"http://palletsprojects.com/p/jinja\",\n        # version = \"2.11.2\",\n        # last_update = \"2020-04-13\"\n        # use_category = [\"test\"],\n        # cpe = \"cpe:2.3:a:palletsprojects:jinja:*\",\n\n        # project_name = \"MarkupSafe\",\n        # project_url = \"https://markupsafe.palletsprojects.com/en/1.1.x/\",\n        # version = \"1.1.1\",\n        # last_update = \"2019-02-23\"\n        # use_category = [\"test\"],\n    )\n    pip3_import(\n        name = \"headersplit_pip3\",\n        requirements = \"@envoy//tools/envoy_headersplit:requirements.txt\",\n        extra_pip_args = [\"--require-hashes\"],\n\n        # project_name = \"Clang\",\n        # project_url = \"https://clang.llvm.org/\",\n        # version = \"10.0.1\",\n        # last_update = \"2020-07-21\"\n        # use_category = [\"other\"],\n        # cpe = \"cpe:2.3:a:llvm:clang:*\",\n    )\n    pip3_import(\n        name = \"protodoc_pip3\",\n        requirements = \"@envoy//tools/protodoc:requirements.txt\",\n        extra_pip_args = [\"--require-hashes\"],\n\n        # project_name = \"PyYAML\",\n        # project_url = \"https://github.com/yaml/pyyaml\",\n        # version = \"5.3.1\",\n        # last_update = \"2020-03-18\"\n        # use_category = [\"other\"],\n        # cpe = \"cpe:2.3:a:pyyaml:pyyaml:*\",\n    )\n    pip3_import(\n        name = \"thrift_pip3\",\n        requirements = \"@envoy//test/extensions/filters/network/thrift_proxy:requirements.txt\",\n        extra_pip_args = [\"--require-hashes\"],\n\n        # project_name = \"Apache Thrift\",\n        # project_url = \"http://thrift.apache.org/\",\n        # version = \"0.11.0\",\n        # last_update = \"2017-12-07\"\n        # use_category = [\"dataplane\"],\n        # cpe = \"cpe:2.3:a:apache:thrift:*\",\n\n        # project_name = \"Six: Python 2 and 3 Compatibility Library\",\n        # project_url = \"https://six.readthedocs.io/\",\n        # version = \"1.15.0\",\n        # last_update = \"2020-05-21\"\n        # use_category = [\"dataplane\"],\n    )\n\n# Envoy deps that rely on a first stage of dependency loading in envoy_dependencies().\ndef envoy_dependencies_extra():\n    _python_deps()\n"
  },
  {
    "path": "bazel/repository_locations.bzl",
    "content": "# Validation of content in this file is done on the bazel/repositories.bzl file to make it free of bazel\n# constructs. This is to allow this file to be loaded into Python based build and maintenance tools.\n\n# Envoy dependencies may be annotated with the following attributes:\nDEPENDENCY_ANNOTATIONS = [\n    # List of the categories describing how the dependency is being used. This attribute is used\n    # for automatic tracking of security posture of Envoy's dependencies.\n    # Possible values are documented in the USE_CATEGORIES list below.\n    # This attribute is mandatory for each dependecy.\n    \"use_category\",\n\n    # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID\n    # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See\n    # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements\n    # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use \"N/A\" for dependencies without CPE assigned.\n    # This attribute is optional for components with use categories listed in the\n    # USE_CATEGORIES_WITH_CPE_OPTIONAL\n    \"cpe\",\n]\n\n# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed\n# to be declared.\nUSE_CATEGORIES = [\n    # This dependency is used in API protos.\n    \"api\",\n    # This dependency is used in build process.\n    \"build\",\n    # This dependency is used to process xDS requests.\n    \"controlplane\",\n    # This dependency is used in processing downstream or upstream requests (core).\n    \"dataplane_core\",\n    # This dependency is used in processing downstream or upstream requests (extensions).\n    \"dataplane_ext\",\n    # This dependecy is used for logging, metrics or tracing (core). It may process unstrusted input.\n    \"observability_core\",\n    # This dependecy is used for logging, metrics or tracing (extensions). It may process unstrusted input.\n    \"observability_ext\",\n    # This dependency does not handle untrusted data and is used for various utility purposes.\n    \"other\",\n    # This dependency is used only in tests.\n    \"test_only\",\n]\n\n# Components with these use categories are not required to specify the 'cpe'\n# and 'last_updated' annotation.\nUSE_CATEGORIES_WITH_CPE_OPTIONAL = [\"build\", \"other\", \"test_only\"]\n\nDEPENDENCY_REPOSITORIES_SPEC = dict(\n    bazel_compdb = dict(\n        project_name = \"bazel-compilation-database\",\n        project_desc = \"Clang JSON compilation database support for Bazel\",\n        project_url = \"https://github.com/grailbio/bazel-compilation-database\",\n        version = \"0.4.5\",\n        sha256 = \"bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4\",\n        strip_prefix = \"bazel-compilation-database-{version}\",\n        urls = [\"https://github.com/grailbio/bazel-compilation-database/archive/{version}.tar.gz\"],\n        last_updated = \"2020-08-01\",\n        use_category = [\"build\"],\n    ),\n    bazel_gazelle = dict(\n        project_name = \"Gazelle\",\n        project_desc = \"Bazel BUILD file generator for Go projects\",\n        project_url = \"https://github.com/bazelbuild/bazel-gazelle\",\n        version = \"0.21.1\",\n        sha256 = \"cdb02a887a7187ea4d5a27452311a75ed8637379a1287d8eeb952138ea485f7d\",\n        urls = [\"https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz\"],\n        last_updated = \"2020-05-28\",\n        use_category = [\"build\"],\n    ),\n    bazel_toolchains = dict(\n        project_name = \"bazel-toolchains\",\n        project_desc = \"Bazel toolchain configs for RBE\",\n        project_url = \"https://github.com/bazelbuild/bazel-toolchains\",\n        version = \"3.4.1\",\n        sha256 = \"7ebb200ed3ca3d1f7505659c7dfed01c4b5cb04c3a6f34140726fe22f5d35e86\",\n        strip_prefix = \"bazel-toolchains-{version}\",\n        urls = [\n            \"https://github.com/bazelbuild/bazel-toolchains/releases/download/{version}/bazel-toolchains-{version}.tar.gz\",\n            \"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/{version}.tar.gz\",\n        ],\n        last_updated = \"2020-08-10\",\n        use_category = [\"build\"],\n    ),\n    build_bazel_rules_apple = dict(\n        project_name = \"Apple Rules for Bazel\",\n        project_desc = \"Bazel rules for Apple platforms\",\n        project_url = \"https://github.com/bazelbuild/rules_apple\",\n        version = \"0.19.0\",\n        sha256 = \"7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42\",\n        urls = [\"https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz\"],\n        last_updated = \"2020-10-10\",\n        use_category = [\"build\"],\n    ),\n    envoy_build_tools = dict(\n        project_name = \"envoy-build-tools\",\n        project_desc = \"Common build tools shared by the Envoy/UDPA ecosystem\",\n        project_url = \"https://github.com/envoyproxy/envoy-build-tools\",\n        version = \"0ba5aa98a6e6c5efcc63f53602f69548d2417683\",\n        sha256 = \"dc3881d16e7b0c855a7279f5757d55e4aa55fe2befbd9e34215b971818622f9e\",\n        strip_prefix = \"envoy-build-tools-{version}\",\n        urls = [\"https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz\"],\n        last_updated = \"2020-10-01\",\n        use_category = [\"build\"],\n    ),\n    boringssl = dict(\n        project_name = \"BoringSSL\",\n        project_desc = \"Minimal OpenSSL fork\",\n        project_url = \"https://github.com/google/boringssl\",\n        version = \"597b810379e126ae05d32c1d94b1a9464385acd0\",\n        sha256 = \"1ea42456c020daf0a9b0f9e8d8bc3a403c9314f4f54230c617257af996cd5fa6\",\n        strip_prefix = \"boringssl-{version}\",\n        # To update BoringSSL, which tracks Chromium releases:\n        # 1. Open https://omahaproxy.appspot.com/ and note <current_version> of linux/stable release.\n        # 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags/<current_version>/DEPS and note <boringssl_revision>.\n        # 3. Find a commit in BoringSSL's \"master-with-bazel\" branch that merges <boringssl_revision>.\n        #\n        # chromium-85.0.4183.83\n        urls = [\"https://github.com/google/boringssl/archive/{version}.tar.gz\"],\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2020-06-23\",\n        cpe = \"cpe:2.3:a:google:boringssl:*\",\n    ),\n    boringssl_fips = dict(\n        project_name = \"BoringSSL (FIPS)\",\n        project_desc = \"FIPS compliant BoringSSL\",\n        project_url = \"https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md\",\n        version = \"fips-20190808\",\n        sha256 = \"3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8\",\n        urls = [\"https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz\"],\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2019-08-08\",\n        cpe = \"cpe:2.3:a:google:boringssl:*\",\n    ),\n    com_google_absl = dict(\n        project_name = \"Abseil\",\n        project_desc = \"Open source collection of C++ libraries drawn from the most fundamental pieces of Google’s internal codebase\",\n        project_url = \"https://abseil.io/\",\n        version = \"093cc27604df1c4a179b73bc3f00d4d1ce2ce113\",\n        sha256 = \"55d33c75aff05a8c4a55bdf0eddad66c71a963107bc2add96cf8eb88ddb47a80\",\n        strip_prefix = \"abseil-cpp-{version}\",\n        urls = [\"https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-10-01\",\n        cpe = \"N/A\",\n    ),\n    com_github_c_ares_c_ares = dict(\n        project_name = \"c-ares\",\n        project_desc = \"C library for asynchronous DNS requests\",\n        project_url = \"https://c-ares.haxx.se/\",\n        version = \"1.16.1\",\n        sha256 = \"d08312d0ecc3bd48eee0a4cc0d2137c9f194e0a28de2028928c0f6cae85f86ce\",\n        strip_prefix = \"c-ares-{version}\",\n        urls = [\"https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-05-11\",\n        cpe = \"cpe:2.3:a:c-ares_project:c-ares:*\",\n    ),\n    com_github_circonus_labs_libcircllhist = dict(\n        project_name = \"libcircllhist\",\n        project_desc = \"An implementation of Circonus log-linear histograms\",\n        project_url = \"https://github.com/circonus-labs/libcircllhist\",\n        version = \"63a16dd6f2fc7bc841bb17ff92be8318df60e2e1\",\n        sha256 = \"8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c\",\n        strip_prefix = \"libcircllhist-{version}\",\n        urls = [\"https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz\"],\n        use_category = [\"controlplane\", \"observability_core\", \"dataplane_core\"],\n        last_updated = \"2019-02-11\",\n        cpe = \"N/A\",\n    ),\n    com_github_cyan4973_xxhash = dict(\n        project_name = \"xxHash\",\n        project_desc = \"Extremely fast hash algorithm\",\n        project_url = \"https://github.com/Cyan4973/xxHash\",\n        version = \"0.7.3\",\n        sha256 = \"952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7\",\n        strip_prefix = \"xxHash-{version}\",\n        urls = [\"https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-03-04\",\n        cpe = \"N/A\",\n    ),\n    com_github_envoyproxy_sqlparser = dict(\n        project_name = \"C++ SQL Parser Library\",\n        project_desc = \"Forked from Hyrise SQL Parser\",\n        project_url = \"https://github.com/envoyproxy/sql-parser\",\n        version = \"3b40ba2d106587bdf053a292f7e3bb17e818a57f\",\n        sha256 = \"96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71\",\n        strip_prefix = \"sql-parser-{version}\",\n        urls = [\"https://github.com/envoyproxy/sql-parser/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.filters.network.mysql_proxy\",\n            \"envoy.filters.network.postgres_proxy\",\n        ],\n        last_updated = \"2020-06-10\",\n        cpe = \"N/A\",\n    ),\n    com_github_mirror_tclap = dict(\n        project_name = \"tclap\",\n        project_desc = \"Small, flexible library that provides a simple interface for defining and accessing command line arguments\",\n        project_url = \"http://tclap.sourceforge.net\",\n        version = \"1-2-1\",\n        sha256 = \"f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f\",\n        strip_prefix = \"tclap-tclap-{version}-release-final\",\n        urls = [\"https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz\"],\n        last_updated = \"2017-11-10\",\n        use_category = [\"other\"],\n    ),\n    com_github_fmtlib_fmt = dict(\n        project_name = \"fmt\",\n        project_desc = \"{fmt} is an open-source formatting library providing a fast and safe alternative to C stdio and C++ iostreams\",\n        project_url = \"https://fmt.dev\",\n        version = \"7.0.3\",\n        sha256 = \"decfdf9ad274070fa85f26407b816f5a4d82205ae86bac1990be658d0795ea4d\",\n        strip_prefix = \"fmt-{version}\",\n        urls = [\"https://github.com/fmtlib/fmt/releases/download/{version}/fmt-{version}.zip\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-08-07\",\n        cpe = \"cpe:2.3:a:fmt:fmt:*\",\n    ),\n    com_github_gabime_spdlog = dict(\n        project_name = \"spdlog\",\n        project_desc = \"Very fast, header-only/compiled, C++ logging library\",\n        project_url = \"https://github.com/gabime/spdlog\",\n        version = \"1.7.0\",\n        sha256 = \"f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62\",\n        strip_prefix = \"spdlog-{version}\",\n        urls = [\"https://github.com/gabime/spdlog/archive/v{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-07-09\",\n        cpe = \"N/A\",\n    ),\n    com_github_google_libprotobuf_mutator = dict(\n        project_name = \"libprotobuf-mutator\",\n        project_desc = \"Library to randomly mutate protobuffers\",\n        project_url = \"https://github.com/google/libprotobuf-mutator\",\n        version = \"8942a9ba43d8bb196230c321d46d6a137957a719\",\n        sha256 = \"49a26dbe77c75f2eca1dd8a9fbdb31c4496d9af42df027ff57569c5a7a5d980d\",\n        strip_prefix = \"libprotobuf-mutator-{version}\",\n        urls = [\"https://github.com/google/libprotobuf-mutator/archive/{version}.tar.gz\"],\n        last_updated = \"2020-08-18\",\n        use_category = [\"test_only\"],\n    ),\n    com_github_google_tcmalloc = dict(\n        project_name = \"tcmalloc\",\n        project_desc = \"Fast, multi-threaded malloc implementation\",\n        project_url = \"https://github.com/google/tcmalloc\",\n        version = \"d1311bf409db47c3441d3de6ea07d768c6551dec\",\n        sha256 = \"e22444b6544edd81f11c987dd5e482a2e00bbff717badb388779ca57525dad50\",\n        strip_prefix = \"tcmalloc-{version}\",\n        urls = [\"https://github.com/google/tcmalloc/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-09-16\",\n        cpe = \"N/A\",\n    ),\n    com_github_gperftools_gperftools = dict(\n        project_name = \"gperftools\",\n        project_desc = \"tcmalloc and profiling libraries\",\n        project_url = \"https://github.com/gperftools/gperftools\",\n        version = \"2.8\",\n        sha256 = \"240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e\",\n        strip_prefix = \"gperftools-{version}\",\n        urls = [\"https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz\"],\n        last_updated = \"2020-07-06\",\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        cpe = \"cpe:2.3:a:gperftools_project:gperftools:*\",\n    ),\n    com_github_grpc_grpc = dict(\n        project_name = \"gRPC\",\n        project_desc = \"gRPC C core library\",\n        project_url = \"https://grpc.io\",\n        # TODO(JimmyCYJ): Bump to release 1.27\n        # This sha on grpc:v1.25.x branch is specifically chosen to fix gRPC STS call credential options.\n        version = \"d8f4928fa779f6005a7fe55a176bdb373b0f910f\",\n        sha256 = \"bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123\",\n        strip_prefix = \"grpc-{version}\",\n        urls = [\"https://github.com/grpc/grpc/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-02-11\",\n        cpe = \"cpe:2.3:a:grpc:grpc:*\",\n    ),\n    com_github_luajit_luajit = dict(\n        project_name = \"LuaJIT\",\n        project_desc = \"Just-In-Time compiler for Lua\",\n        project_url = \"https://luajit.org\",\n        version = \"2.1.0-beta3\",\n        sha256 = \"409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8\",\n        strip_prefix = \"LuaJIT-{version}\",\n        urls = [\"https://github.com/LuaJIT/LuaJIT/archive/v{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\"envoy.filters.http.lua\"],\n        last_updated = \"2017-11-07\",\n        cpe = \"cpe:2.3:a:luajit:luajit:*\",\n    ),\n    com_github_moonjit_moonjit = dict(\n        project_name = \"Moonjit\",\n        project_desc = \"LuaJIT fork with wider platform support\",\n        project_url = \"https://github.com/moonjit/moonjit\",\n        version = \"2.2.0\",\n        sha256 = \"83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6\",\n        strip_prefix = \"moonjit-{version}\",\n        urls = [\"https://github.com/moonjit/moonjit/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\"envoy.filters.http.lua\"],\n        last_updated = \"2020-01-14\",\n        cpe = \"cpe:2.3:a:moonjit_project:moonjit:*\",\n    ),\n    com_github_nghttp2_nghttp2 = dict(\n        project_name = \"Nghttp2\",\n        project_desc = \"Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C\",\n        project_url = \"https://nghttp2.org\",\n        version = \"1.41.0\",\n        sha256 = \"eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8\",\n        strip_prefix = \"nghttp2-{version}\",\n        urls = [\"https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz\"],\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2020-06-02\",\n        cpe = \"cpe:2.3:a:nghttp2:nghttp2:*\",\n    ),\n    io_opentracing_cpp = dict(\n        project_name = \"OpenTracing\",\n        project_desc = \"Vendor-neutral APIs and instrumentation for distributed tracing\",\n        project_url = \"https://opentracing.io\",\n        version = \"1.5.1\",\n        sha256 = \"015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301\",\n        strip_prefix = \"opentracing-cpp-{version}\",\n        urls = [\"https://github.com/opentracing/opentracing-cpp/archive/v{version}.tar.gz\"],\n        use_category = [\"observability_ext\"],\n        extensions = [\n            \"envoy.tracers.datadog\",\n            \"envoy.tracers.dynamic_ot\",\n            \"envoy.tracers.lightstep\",\n        ],\n        last_updated = \"2019-01-16\",\n        cpe = \"N/A\",\n    ),\n    com_lightstep_tracer_cpp = dict(\n        project_name = \"lightstep-tracer-cpp\",\n        project_desc = \"LightStep distributed tracing library for C++\",\n        project_url = \"https://github.com/lightstep/lightstep-tracer-cpp\",\n        version = \"1942b3f142e218ebc143a043f32e3278dafec9aa\",\n        sha256 = \"3238921a8f578beb26c2215cd277e8f6752f3d29b020b881d60d96a240a38aed\",\n        strip_prefix = \"lightstep-tracer-cpp-{version}\",\n        urls = [\"https://github.com/lightstep/lightstep-tracer-cpp/archive/{version}.tar.gz\"],\n        use_category = [\"observability_ext\"],\n        extensions = [\"envoy.tracers.lightstep\"],\n        last_updated = \"2020-08-24\",\n        cpe = \"N/A\",\n    ),\n    com_github_datadog_dd_opentracing_cpp = dict(\n        project_name = \"Datadog OpenTracing C++ Client\",\n        project_desc = \"Datadog OpenTracing C++ Client\",\n        project_url = \"https://github.com/DataDog/dd-opentracing-cpp\",\n        version = \"1.1.5\",\n        sha256 = \"b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924\",\n        strip_prefix = \"dd-opentracing-cpp-{version}\",\n        urls = [\"https://github.com/DataDog/dd-opentracing-cpp/archive/v{version}.tar.gz\"],\n        use_category = [\"observability_ext\"],\n        extensions = [\"envoy.tracers.datadog\"],\n        last_updated = \"2020-05-15\",\n        cpe = \"N/A\",\n    ),\n    com_github_google_benchmark = dict(\n        project_name = \"Benchmark\",\n        project_desc = \"Library to benchmark code snippets\",\n        project_url = \"https://github.com/google/benchmark\",\n        version = \"1.5.1\",\n        sha256 = \"23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2\",\n        strip_prefix = \"benchmark-{version}\",\n        urls = [\"https://github.com/google/benchmark/archive/v{version}.tar.gz\"],\n        use_category = [\"test_only\"],\n        last_updated = \"2020-06-09\",\n    ),\n    com_github_libevent_libevent = dict(\n        project_name = \"libevent\",\n        project_desc = \"Event notification library\",\n        project_url = \"https://libevent.org\",\n        # This SHA includes the new \"prepare\" and \"check\" watchers, used for event loop performance\n        # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition\n        # in the watchers (see https://github.com/libevent/libevent/pull/802).\n        # This also includes the fixes for https://github.com/libevent/libevent/issues/806\n        # and https://github.com/lyft/envoy-mobile/issues/215.\n        # This also includes the fixes for Phantom events with EV_ET (see\n        # https://github.com/libevent/libevent/issues/984).\n        # This also includes the wepoll backend for Windows (see\n        # https://github.com/libevent/libevent/pull/1006)\n        # TODO(adip): Update to v2.2 when it is released.\n        version = \"62c152d9a7cd264b993dad730c4163c6ede2e0a3\",\n        sha256 = \"4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213\",\n        strip_prefix = \"libevent-{version}\",\n        urls = [\"https://github.com/libevent/libevent/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-07-31\",\n        cpe = \"cpe:2.3:a:libevent_project:libevent:*\",\n    ),\n    # This should be removed, see https://github.com/envoyproxy/envoy/issues/13261.\n    net_zlib = dict(\n        project_name = \"zlib\",\n        project_desc = \"zlib compression library\",\n        project_url = \"https://zlib.net\",\n        version = \"79baebe50e4d6b73ae1f8b603f0ef41300110aa3\",\n        # Use the dev branch of zlib to resolve fuzz bugs and out of bound\n        # errors resulting in crashes in zlib 1.2.11.\n        # TODO(asraa): Remove when zlib > 1.2.11 is released.\n        sha256 = \"155a8f8c1a753fb05b16a1b0cc0a0a9f61a78e245f9e0da483d13043b3bcbf2e\",\n        strip_prefix = \"zlib-{version}\",\n        urls = [\"https://github.com/madler/zlib/archive/{version}.tar.gz\"],\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2019-04-14\",\n        cpe = \"cpe:2.3:a:gnu:zlib:*\",\n    ),\n    com_github_zlib_ng_zlib_ng = dict(\n        project_name = \"zlib-ng\",\n        project_desc = \"zlib fork (higher performance)\",\n        project_url = \"https://github.com/zlib-ng/zlib-ng\",\n        version = \"193d8fd7dfb7927facab7a3034daa27ad5b9df1c\",\n        sha256 = \"5fe543e8d007b9e7b729f3d6b3a5ee1f9b68d0eef5f6af1393745a4dcd472a98\",\n        strip_prefix = \"zlib-ng-{version}\",\n        urls = [\"https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz\"],\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2020-08-16\",\n        cpe = \"N/A\",\n    ),\n    com_github_jbeder_yaml_cpp = dict(\n        project_name = \"yaml-cpp\",\n        project_desc = \"YAML parser and emitter in C++ matching the YAML 1.2 spec\",\n        project_url = \"https://github.com/jbeder/yaml-cpp\",\n        version = \"98acc5a8874faab28b82c28936f4b400b389f5d6\",\n        sha256 = \"79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f\",\n        strip_prefix = \"yaml-cpp-{version}\",\n        urls = [\"https://github.com/jbeder/yaml-cpp/archive/{version}.tar.gz\"],\n        # YAML is also used for runtime as well as controlplane. It shouldn't appear on the\n        # dataplane but we can't verify this automatically due to code structure today.\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2020-07-28\",\n        cpe = \"cpe:2.3:a:yaml-cpp_project:yaml-cpp:*\",\n    ),\n    com_github_msgpack_msgpack_c = dict(\n        project_name = \"msgpack for C/C++\",\n        project_desc = \"MessagePack is an efficient binary serialization format\",\n        project_url = \"https://github.com/msgpack/msgpack-c\",\n        version = \"3.3.0\",\n        sha256 = \"6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b\",\n        strip_prefix = \"msgpack-{version}\",\n        urls = [\"https://github.com/msgpack/msgpack-c/releases/download/cpp-{version}/msgpack-{version}.tar.gz\"],\n        use_category = [\"observability_ext\"],\n        extensions = [\"envoy.tracers.datadog\"],\n        last_updated = \"2020-06-05\",\n        cpe = \"N/A\",\n    ),\n    com_github_google_jwt_verify = dict(\n        project_name = \"jwt_verify_lib\",\n        project_desc = \"JWT verification library for C++\",\n        project_url = \"https://github.com/google/jwt_verify_lib\",\n        version = \"7276a339af8426724b744216f619c99152f8c141\",\n        sha256 = \"f1fde4f3ebb3b2d841332c7a02a4b50e0529a19709934c63bc6208d1bbe28fb1\",\n        strip_prefix = \"jwt_verify_lib-{version}\",\n        urls = [\"https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\"envoy.filters.http.jwt_authn\"],\n        last_updated = \"2020-07-09\",\n        cpe = \"N/A\",\n    ),\n    com_github_nodejs_http_parser = dict(\n        project_name = \"HTTP Parser\",\n        project_desc = \"Parser for HTTP messages written in C\",\n        project_url = \"https://github.com/nodejs/http-parser\",\n        # This SHA includes fix for https://github.com/nodejs/http-parser/issues/517 which allows (opt-in) to serve\n        # requests with both Content-Legth and Transfer-Encoding: chunked headers set.\n        version = \"4f15b7d510dc7c6361a26a7c6d2f7c3a17f8d878\",\n        sha256 = \"6a12896313ce1ca630cf516a0ee43a79b5f13f5a5d8143f56560ac0b21c98fac\",\n        strip_prefix = \"http-parser-{version}\",\n        urls = [\"https://github.com/nodejs/http-parser/archive/{version}.tar.gz\"],\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2020-07-10\",\n        cpe = \"cpe:2.3:a:nodejs:node.js:*\",\n    ),\n    com_github_tencent_rapidjson = dict(\n        project_name = \"RapidJSON\",\n        project_desc = \"Fast JSON parser/generator for C++\",\n        project_url = \"https://rapidjson.org\",\n        version = \"dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1\",\n        sha256 = \"a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b\",\n        strip_prefix = \"rapidjson-{version}\",\n        urls = [\"https://github.com/Tencent/rapidjson/archive/{version}.tar.gz\"],\n        # We're mostly using com_google_protobuf for JSON, but there are some extensions and hard to\n        # disentangle uses on the dataplane, e.g. header_formatter, Squash filter.\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2019-12-02\",\n        cpe = \"cpe:2.3:a:tencent:rapidjson:*\",\n    ),\n    com_github_twitter_common_lang = dict(\n        project_name = \"twitter.common.lang (Thrift)\",\n        project_desc = \"twitter.common Python language and compatibility facilities\",\n        project_url = \"https://pypi.org/project/twitter.common.lang\",\n        version = \"0.3.9\",\n        sha256 = \"56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1\",\n        strip_prefix = \"twitter.common.lang-{version}/src\",\n        urls = [\"https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-{version}.tar.gz\"],\n        last_updated = \"2018-06-26\",\n        use_category = [\"test_only\"],\n    ),\n    com_github_twitter_common_rpc = dict(\n        project_name = \"twitter.common.rpc (Thrift)\",\n        project_desc = \"twitter.common Thrift helpers including Finagle and SSL transports\",\n        project_url = \"https://pypi.org/project/twitter.common.rpc\",\n        version = \"0.3.9\",\n        sha256 = \"0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514\",\n        strip_prefix = \"twitter.common.rpc-{version}/src\",\n        urls = [\"https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-{version}.tar.gz\"],\n        last_updated = \"2018-06-26\",\n        use_category = [\"test_only\"],\n    ),\n    com_github_twitter_common_finagle_thrift = dict(\n        project_name = \"twitter.common.finagle-thrift\",\n        project_desc = \"twitter.common Thrift stubs for Zipkin RPC tracing support in Finagle\",\n        project_url = \"https://pypi.org/project/twitter.common.finagle-thrift\",\n        version = \"0.3.9\",\n        sha256 = \"1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a\",\n        strip_prefix = \"twitter.common.finagle-thrift-{version}/src\",\n        urls = [\"https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-{version}.tar.gz\"],\n        last_updated = \"2018-06-26\",\n        use_category = [\"test_only\"],\n    ),\n    com_google_googletest = dict(\n        project_name = \"Google Test\",\n        project_desc = \"Google's C++ test framework\",\n        project_url = \"https://github.com/google/googletest\",\n        # Pick up fix for MOCK_METHOD compilation with clang-cl for Windows (resolved after 1.10.0)\n        # see https://github.com/google/googletest/issues/2490\n        version = \"a4ab0abb93620ce26efad9de9296b73b16e88588\",\n        sha256 = \"7897bfaa5ad39a479177cfb5c3ce010184dbaee22a7c3727b212282871918751\",\n        strip_prefix = \"googletest-{version}\",\n        urls = [\"https://github.com/google/googletest/archive/{version}.tar.gz\"],\n        last_updated = \"2020-09-10\",\n        use_category = [\"test_only\"],\n    ),\n    com_google_protobuf = dict(\n        project_name = \"Protocol Buffers\",\n        project_desc = \"Language-neutral, platform-neutral extensible mechanism for serializing structured data\",\n        project_url = \"https://developers.google.com/protocol-buffers\",\n        version = \"3.10.1\",\n        sha256 = \"d7cfd31620a352b2ee8c1ed883222a0d77e44346643458e062e86b1d069ace3e\",\n        strip_prefix = \"protobuf-{version}\",\n        urls = [\"https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz\"],\n        use_category = [\"dataplane_core\", \"controlplane\"],\n        last_updated = \"2020-10-24\",\n        cpe = \"cpe:2.3:a:google:protobuf:*\",\n    ),\n    grpc_httpjson_transcoding = dict(\n        project_name = \"grpc-httpjson-transcoding\",\n        project_desc = \"Library that supports transcoding so that HTTP/JSON can be converted to gRPC\",\n        project_url = \"https://github.com/grpc-ecosystem/grpc-httpjson-transcoding\",\n        version = \"faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6\",\n        sha256 = \"62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5\",\n        strip_prefix = \"grpc-httpjson-transcoding-{version}\",\n        urls = [\"https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\"envoy.filters.http.grpc_json_transcoder\"],\n        last_updated = \"2020-03-02\",\n        cpe = \"N/A\",\n    ),\n    io_bazel_rules_go = dict(\n        project_name = \"Go rules for Bazel\",\n        project_desc = \"Bazel rules for the Go language\",\n        project_url = \"https://github.com/bazelbuild/rules_go\",\n        version = \"0.23.7\",\n        sha256 = \"0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616\",\n        urls = [\"https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz\"],\n        use_category = [\"build\"],\n        last_updated = \"2020-08-06\",\n    ),\n    rules_cc = dict(\n        project_name = \"C++ rules for Bazel\",\n        project_desc = \"Bazel rules for the C++ language\",\n        project_url = \"https://github.com/bazelbuild/rules_cc\",\n        # TODO(lizan): pin to a point releases when there's a released version.\n        version = \"818289e5613731ae410efb54218a4077fb9dbb03\",\n        sha256 = \"9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0\",\n        strip_prefix = \"rules_cc-{version}\",\n        urls = [\"https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz\"],\n        last_updated = \"2020-05-13\",\n        use_category = [\"build\"],\n    ),\n    rules_foreign_cc = dict(\n        project_name = \"Rules for using foreign build systems in Bazel\",\n        project_desc = \"Rules for using foreign build systems in Bazel\",\n        project_url = \"https://github.com/bazelbuild/rules_foreign_cc\",\n        version = \"594bf4d7731e606a705f3ad787dd0a70c5a28b30\",\n        sha256 = \"2b1cf88de0b6e0195f6571cfde3a5bd406d11b42117d6adef2395c9525a1902e\",\n        strip_prefix = \"rules_foreign_cc-{version}\",\n        urls = [\"https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz\"],\n        last_updated = \"2020-08-21\",\n        use_category = [\"build\"],\n    ),\n    rules_python = dict(\n        project_name = \"Python rules for Bazel\",\n        project_desc = \"Bazel rules for the Python language\",\n        project_url = \"https://github.com/bazelbuild/rules_python\",\n        # TODO(htuch): revert back to a point releases when pip3_import appears.\n        version = \"a0fbf98d4e3a232144df4d0d80b577c7a693b570\",\n        sha256 = \"76a8fd4e7eca2a3590f816958faa0d83c9b2ce9c32634c5c375bcccf161d3bb5\",\n        strip_prefix = \"rules_python-{version}\",\n        urls = [\"https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz\"],\n        last_updated = \"2020-04-09\",\n        use_category = [\"build\"],\n    ),\n    six = dict(\n        project_name = \"Six\",\n        project_desc = \"Python 2 and 3 compatibility library\",\n        project_url = \"https://pypi.org/project/six\",\n        version = \"1.12.0\",\n        sha256 = \"d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73\",\n        urls = [\"https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-{version}.tar.gz\"],\n        last_updated = \"2019-11-17\",\n        use_category = [\"other\"],\n    ),\n    org_llvm_llvm = dict(\n        project_name = \"LLVM\",\n        project_desc = \"LLVM Compiler Infrastructure\",\n        project_url = \"https://llvm.org\",\n        version = \"10.0\",\n        sha256 = \"df83a44b3a9a71029049ec101fb0077ecbbdf5fe41e395215025779099a98fdf\",\n        strip_prefix = \"llvm-{version}.0.src\",\n        urls = [\"https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}.0/llvm-{version}.0.src.tar.xz\"],\n        last_updated = \"2020-03-24\",\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        cpe = \"N/A\",\n    ),\n    com_github_wavm_wavm = dict(\n        project_name = \"WAVM\",\n        project_desc = \"WebAssembly Virtual Machine\",\n        project_url = \"https://wavm.github.io\",\n        version = \"e8155f1f3af88b4d08802716a7054950ef18d827\",\n        sha256 = \"cc3fcaf05d57010c9cf8eb920234679dede6c780137b55001fd34e4d14806f7c\",\n        strip_prefix = \"WAVM-{version}\",\n        urls = [\"https://github.com/WAVM/WAVM/archive/{version}.tar.gz\"],\n        last_updated = \"2020-07-06\",\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        cpe = \"N/A\",\n    ),\n    io_opencensus_cpp = dict(\n        project_name = \"OpenCensus C++\",\n        project_desc = \"OpenCensus tracing library\",\n        project_url = \"https://github.com/census-instrumentation/opencensus-cpp\",\n        version = \"7877337633466358ed680f9b26967da5b310d7aa\",\n        sha256 = \"12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212\",\n        strip_prefix = \"opencensus-cpp-{version}\",\n        urls = [\"https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz\"],\n        use_category = [\"observability_ext\"],\n        extensions = [\"envoy.tracers.opencensus\"],\n        last_updated = \"2020-06-01\",\n        cpe = \"N/A\",\n    ),\n    # This should be removed, see https://github.com/envoyproxy/envoy/issues/11816.\n    com_github_curl = dict(\n        project_name = \"curl\",\n        project_desc = \"Library for transferring data with URLs\",\n        project_url = \"https://curl.haxx.se\",\n        version = \"7.72.0\",\n        sha256 = \"d4d5899a3868fbb6ae1856c3e55a32ce35913de3956d1973caccd37bd0174fa2\",\n        strip_prefix = \"curl-{version}\",\n        urls = [\"https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\", \"observability_ext\"],\n        extensions = [\n            \"envoy.filters.http.aws_lambda\",\n            \"envoy.filters.http.aws_request_signing\",\n            \"envoy.grpc_credentials.aws_iam\",\n            \"envoy.tracers.opencensus\",\n        ],\n        last_updated = \"2020-08-19\",\n        cpe = \"cpe:2.3:a:haxx:curl:*\",\n    ),\n    com_googlesource_chromium_v8 = dict(\n        project_name = \"V8\",\n        project_desc = \"Google’s open source high-performance JavaScript and WebAssembly engine, written in C++\",\n        project_url = \"https://v8.dev\",\n        version = \"8.5.210.20\",\n        # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh\n        # and contains complete checkout of V8 with all dependencies necessary to build wee8.\n        sha256 = \"ef404643d7da6854b76b9fb9950a79a1acbd037b7a26f02c585ac379b0f7dee1\",\n        urls = [\"https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        last_updated = \"2020-08-31\",\n        cpe = \"cpe:2.3:a:google:v8:*\",\n    ),\n    com_googlesource_quiche = dict(\n        project_name = \"QUICHE\",\n        project_desc = \"QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols\",\n        project_url = \"https://quiche.googlesource.com/quiche\",\n        # Static snapshot of https://quiche.googlesource.com/quiche/+archive/f555d99a084cdd086a349548c70fb558ac5847cf.tar.gz\n        version = \"f555d99a084cdd086a349548c70fb558ac5847cf\",\n        sha256 = \"1833f08e7b0f18b49d7498b029b7f3e6559a82113ec82a98a9e945553756e351\",\n        urls = [\"https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\"envoy.transport_sockets.quic\"],\n        last_updated = \"2020-09-18\",\n        cpe = \"N/A\",\n    ),\n    com_googlesource_googleurl = dict(\n        project_name = \"Chrome URL parsing library\",\n        project_desc = \"Chrome URL parsing library\",\n        project_url = \"https://quiche.googlesource.com/googleurl\",\n        # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz.\n        version = \"ef0d23689e240e6c8de4c3a5296b209128c87373\",\n        sha256 = \"d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176\",\n        urls = [\"https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [],\n        last_updated = \"2020-08-05\",\n        cpe = \"N/A\",\n    ),\n    com_google_cel_cpp = dict(\n        project_name = \"Common Expression Language (CEL) C++ library\",\n        project_desc = \"Common Expression Language (CEL) C++ library\",\n        project_url = \"https://opensource.google/projects/cel\",\n        version = \"b9453a09b28a1531c4917e8792b3ea61f6b1a447\",\n        sha256 = \"cad7d01139947d78e413d112cb8f7431fbb33cf66b0adf9c280824803fc2a72e\",\n        strip_prefix = \"cel-cpp-{version}\",\n        urls = [\"https://github.com/google/cel-cpp/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.rbac\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.rbac\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        last_updated = \"2020-07-14\",\n        cpe = \"N/A\",\n    ),\n    com_github_google_flatbuffers = dict(\n        project_name = \"FlatBuffers\",\n        project_desc = \"Cross platform serialization library architected for maximum memory efficiency\",\n        project_url = \"https://github.com/google/flatbuffers\",\n        version = \"a83caf5910644ba1c421c002ef68e42f21c15f9f\",\n        sha256 = \"b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a\",\n        strip_prefix = \"flatbuffers-{version}\",\n        urls = [\"https://github.com/google/flatbuffers/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        last_updated = \"2020-07-29\",\n        cpe = \"N/A\",\n    ),\n    com_googlesource_code_re2 = dict(\n        project_name = \"RE2\",\n        project_desc = \"RE2, a regular expression library\",\n        project_url = \"https://github.com/google/re2\",\n        version = \"2020-07-06\",\n        sha256 = \"2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f\",\n        strip_prefix = \"re2-{version}\",\n        urls = [\"https://github.com/google/re2/archive/{version}.tar.gz\"],\n        use_category = [\"controlplane\", \"dataplane_core\"],\n        last_updated = \"2020-07-06\",\n        cpe = \"N/A\",\n    ),\n    # Included to access FuzzedDataProvider.h. This is compiler agnostic but\n    # provided as part of the compiler-rt source distribution. We can't use the\n    # Clang variant as we are not a Clang-LLVM only shop today.\n    org_llvm_releases_compiler_rt = dict(\n        project_name = \"compiler-rt\",\n        project_desc = \"LLVM compiler runtime library\",\n        project_url = \"https://compiler-rt.llvm.org\",\n        version = \"10.0.0\",\n        sha256 = \"6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75\",\n        # Only allow peeking at fuzzer related files for now.\n        strip_prefix = \"compiler-rt-{version}.src\",\n        urls = [\"https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz\"],\n        last_updated = \"2020-03-24\",\n        use_category = [\"test_only\"],\n    ),\n    upb = dict(\n        project_name = \"upb\",\n        project_desc = \"A small protobuf implementation in C (gRPC dependency)\",\n        project_url = \"https://github.com/protocolbuffers/upb\",\n        version = \"8a3ae1ef3e3e3f26b45dec735c5776737fc7247f\",\n        sha256 = \"e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47\",\n        strip_prefix = \"upb-{version}\",\n        urls = [\"https://github.com/protocolbuffers/upb/archive/{version}.tar.gz\"],\n        use_category = [\"controlplane\"],\n        last_updated = \"2019-11-19\",\n        cpe = \"N/A\",\n    ),\n    kafka_source = dict(\n        project_name = \"Kafka (source)\",\n        project_desc = \"Open-source distributed event streaming platform\",\n        project_url = \"https://kafka.apache.org\",\n        version = \"2.4.1\",\n        sha256 = \"740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd\",\n        strip_prefix = \"kafka-{version}/clients/src/main/resources/common/message\",\n        urls = [\"https://github.com/apache/kafka/archive/{version}.zip\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\"envoy.filters.network.kafka_broker\"],\n        last_updated = \"2020-08-26\",\n        cpe = \"cpe:2.3:a:apache:kafka:*\",\n    ),\n    kafka_server_binary = dict(\n        project_name = \"Kafka (server binary)\",\n        project_desc = \"Open-source distributed event streaming platform\",\n        project_url = \"https://kafka.apache.org\",\n        version = \"2.4.1\",\n        sha256 = \"2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a\",\n        strip_prefix = \"kafka_2.12-{version}\",\n        urls = [\"https://mirrors.gigenet.com/apache/kafka/{version}/kafka_2.12-{version}.tgz\"],\n        last_updated = \"2020-08-26\",\n        use_category = [\"test_only\"],\n    ),\n    kafka_python_client = dict(\n        project_name = \"Kafka (Python client)\",\n        project_desc = \"Open-source distributed event streaming platform\",\n        project_url = \"https://kafka.apache.org\",\n        version = \"2.0.1\",\n        sha256 = \"05f7c6eecb402f11fcb7e524c903f1ba1c38d3bdc9bf42bc8ec3cf7567b9f979\",\n        strip_prefix = \"kafka-python-{version}\",\n        urls = [\"https://github.com/dpkp/kafka-python/archive/{version}.tar.gz\"],\n        last_updated = \"2020-08-26\",\n        use_category = [\"test_only\"],\n    ),\n    proxy_wasm_cpp_sdk = dict(\n        project_name = \"WebAssembly for Proxies (C++ SDK)\",\n        project_desc = \"WebAssembly for Proxies (C++ SDK)\",\n        project_url = \"https://github.com/proxy-wasm/proxy-wasm-cpp-sdk\",\n        version = \"7afb39d868a973caa6216a535c24e37fb666b6f3\",\n        sha256 = \"213d0b441bcc3df2c87933b24a593b5fd482fa8f4db158b707c60005b9e70040\",\n        strip_prefix = \"proxy-wasm-cpp-sdk-{version}\",\n        # 2020-09-10\n        urls = [\"https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        last_updated = \"2020-07-29\",\n        cpe = \"N/A\",\n    ),\n    proxy_wasm_cpp_host = dict(\n        project_name = \"WebAssembly for Proxies (C++ host implementation)\",\n        project_desc = \"WebAssembly for Proxies (C++ host implementation)\",\n        project_url = \"https://github.com/proxy-wasm/proxy-wasm-cpp-host\",\n        # 2020-09-10\n        version = \"49ed20e895b728aae6b811950a2939ecbaf76f7c\",\n        sha256 = \"fa03293d01450b9164f8f56ef9227301f7d1af4f373f996400f75c93f6ebc822\",\n        strip_prefix = \"proxy-wasm-cpp-host-{version}\",\n        urls = [\"https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        last_updated = \"2020-07-29\",\n        cpe = \"N/A\",\n    ),\n    # TODO: upgrade to the latest version (1.41 currently fails tests)\n    emscripten_toolchain = dict(\n        project_name = \"Emscripten SDK\",\n        project_desc = \"Emscripten SDK (use by Wasm)\",\n        project_url = \"https://github.com/emscripten-core/emsdk\",\n        version = \"1.39\",\n        sha256 = \"4ac0f1f3de8b3f1373d435cd7e58bd94de4146e751f099732167749a229b443b\",\n        patch_cmds = [\n            \"[[ \\\"$(uname -m)\\\" == \\\"x86_64\\\" ]] && ./emsdk install 1.39.6-upstream && ./emsdk activate --embedded 1.39.6-upstream || true\",\n        ],\n        strip_prefix = \"emsdk-{version}.6\",\n        urls = [\"https://github.com/emscripten-core/emsdk/archive/{version}.6.tar.gz\"],\n        use_category = [\"build\"],\n        last_updated = \"2020-07-29\",\n    ),\n    io_bazel_rules_rust = dict(\n        project_name = \"Bazel rust rules\",\n        project_desc = \"Bazel rust rules (used by Wasm)\",\n        project_url = \"https://github.com/bazelbuild/rules_rust\",\n        version = \"fda9a1ce6482973adfda022cadbfa6b300e269c3\",\n        sha256 = \"484a2b2b67cd2d1fa1054876de7f8d291c4b203fd256bc8cbea14d749bb864ce\",\n        # Last commit where \"out_binary = True\" works.\n        # See: https://github.com/bazelbuild/rules_rust/issues/386\n        strip_prefix = \"rules_rust-{version}\",\n        urls = [\"https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz\"],\n        use_category = [\"build\"],\n        last_updated = \"2020-07-29\",\n    ),\n    rules_antlr = dict(\n        project_name = \"ANTLR Rules for Bazel\",\n        project_desc = \"Bazel rules for ANTLR\",\n        project_url = \"https://github.com/marcohu/rules_antlr\",\n        version = \"3cc2f9502a54ceb7b79b37383316b23c4da66f9a\",\n        sha256 = \"7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429\",\n        strip_prefix = \"rules_antlr-{version}\",\n        urls = [\"https://github.com/marcohu/rules_antlr/archive/{version}.tar.gz\"],\n        # This should be \"build\", but that trips the verification in the docs.\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        last_updated = \"2020-07-29\",\n        cpe = \"N/A\",\n    ),\n    antlr4_runtimes = dict(\n        project_name = \"ANTLR v4\",\n        project_desc = \"ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files\",\n        project_url = \"https://github.com/antlr/antlr4\",\n        version = \"4.7.1\",\n        sha256 = \"4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574\",\n        strip_prefix = \"antlr4-{version}\",\n        urls = [\"https://github.com/antlr/antlr4/archive/{version}.tar.gz\"],\n        use_category = [\"dataplane_ext\"],\n        extensions = [\n            \"envoy.access_loggers.wasm\",\n            \"envoy.bootstrap.wasm\",\n            \"envoy.filters.http.wasm\",\n            \"envoy.filters.network.wasm\",\n            \"envoy.stat_sinks.wasm\",\n        ],\n        last_updated = \"2020-07-29\",\n        cpe = \"N/A\",\n    ),\n)\n\ndef _format_version(s, version):\n    return s.format(version = version, dash_version = version.replace(\".\", \"-\"), underscore_version = version.replace(\".\", \"_\"))\n\n# Interpolate {version} in the above dependency specs. This code should be capable of running in both Python\n# and Starlark.\ndef _dependency_repositories():\n    locations = {}\n    for key, location in DEPENDENCY_REPOSITORIES_SPEC.items():\n        mutable_location = dict(location)\n        locations[key] = mutable_location\n\n        # Fixup with version information.\n        if \"version\" in location:\n            if \"strip_prefix\" in location:\n                mutable_location[\"strip_prefix\"] = _format_version(location[\"strip_prefix\"], location[\"version\"])\n            mutable_location[\"urls\"] = [_format_version(url, location[\"version\"]) for url in location[\"urls\"]]\n    return locations\n\nDEPENDENCY_REPOSITORIES = _dependency_repositories()\n"
  },
  {
    "path": "bazel/rules_go.patch",
    "content": "#\n# Bazel RBE on Windows GCP workers currently will not invoke cmd.exe batch files correctly\n#\n# Symptom is program not found 'bazel-out', because of the way that the CreateProcess command\n# is constructed by bazel with actions.run with forward slashes, e.g. the command\n#   cmd.exe /c \"bazel-out/host/bin/external/go_sdk/builder.exe.bat\"\n# where cmd.exe on GCP is treating 'bazel-out' as the target, and /host as a command line switch.\n# This problem was not observed on Azure CI pipelines or locally by the developers. The eventual\n# fix is not specific to rules_go; this patch simply addresses immediate breakage and can be removed\n# once the underlying issue within Bazel/RBE is fixed.\n# See:\n# - https://github.com/bazelbuild/rules_go/pull/2542\n# - https://github.com/envoyproxy/envoy/issues/11657\n#\ndiff --git a/go/private/rules/binary.bzl b/go/private/rules/binary.bzl\nindex b88dfd96..e68b5ece 100644\n--- a/go/private/rules/binary.bzl\n+++ b/go/private/rules/binary.bzl\n@@ -128,8 +128,9 @@ def _go_tool_binary_impl(ctx):\n             content = cmd,\n         )\n         ctx.actions.run(\n-            executable = bat,\n-            inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go],\n+            executable = \"cmd.exe\",\n+            arguments = [\"/S\", \"/C\", bat.path.replace(\"/\", \"\\\\\")],\n+            inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go, bat],\n             outputs = [cout],\n             env = {\"GOROOT\": sdk.root_file.dirname},  # NOTE(#2005): avoid realpath in sandbox\n             mnemonic = \"GoToolchainBinaryCompile\",\n"
  },
  {
    "path": "bazel/setup_clang.sh",
    "content": "#!/bin/bash\n\nBAZELRC_FILE=\"${BAZELRC_FILE:-$(bazel info workspace)/clang.bazelrc}\"\n\nLLVM_PREFIX=$1\n\nif [[ ! -e \"${LLVM_PREFIX}/bin/llvm-config\" ]]; then\n  echo \"Error: cannot find llvm-config in ${LLVM_PREFIX}.\"\n  exit 1\nfi\n\nPATH=\"$(\"${LLVM_PREFIX}\"/bin/llvm-config --bindir):${PATH}\"\nexport PATH\n\nRT_LIBRARY_PATH=\"$(dirname \"$(find \"$(llvm-config --libdir)\" -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1)\")\"\n\necho \"# Generated file, do not edit. If you want to disable clang, just delete this file.\nbuild:clang --action_env='PATH=${PATH}'\nbuild:clang --action_env=CC=clang\nbuild:clang --action_env=CXX=clang++\nbuild:clang --action_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config'\nbuild:clang --repo_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config'\nbuild:clang --linkopt='-L$(llvm-config --libdir)'\nbuild:clang --linkopt='-Wl,-rpath,$(llvm-config --libdir)'\n\nbuild:clang-asan --action_env=ENVOY_UBSAN_VPTR=1\nbuild:clang-asan --copt=-fsanitize=vptr,function\nbuild:clang-asan --linkopt=-fsanitize=vptr,function\nbuild:clang-asan --linkopt='-L${RT_LIBRARY_PATH}'\nbuild:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a\nbuild:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a\n\" > \"${BAZELRC_FILE}\"\n"
  },
  {
    "path": "bazel/setup_local_tsan.sh",
    "content": "#!/bin/bash\n\nBAZELRC_FILE=\"${BAZELRC_FILE:-$(bazel info workspace)/local_tsan.bazelrc}\"\n\nLIBCXX_PREFIX=$1\n\nif [[ ! -e \"${LIBCXX_PREFIX}/lib\" ]]; then\n  echo \"Error: cannot find /lib in ${LIBCXX_PREFIX}.\"\n  exit 1\nfi\n\n\necho \"# Generated file, do not edit. Delete this file if you no longer use local tsan-instrumented libc++\nbuild:local-tsan --config=libc++\nbuild:local-tsan --config=clang-tsan\nbuild:local-tsan --linkopt=-L${LIBCXX_PREFIX}/lib\nbuild:local-tsan --linkopt=-Wl,-rpath,${LIBCXX_PREFIX}/lib\n\" > \"${BAZELRC_FILE}\"\n"
  },
  {
    "path": "bazel/sh_test_wrapper.sh",
    "content": "#!/bin/bash\n\n# Dummy shell implementation for nooping tests.\n# TODO(lizan): remove when we have a solution for\n# https://github.com/bazelbuild/bazel/issues/3510\n\ncd \"$(dirname \"$0\")\" || exit 1\n\nif [ $# -gt 0 ]; then\n  \"./${1}\" \"${@:2}\"\nfi\n"
  },
  {
    "path": "bazel/tclap-win64-ull-sizet.patch",
    "content": "diff --git a/include/tclap/StandardTraits.h b/include/tclap/StandardTraits.h\nindex 46d7f6f..117057b 100644\n--- a/include/tclap/StandardTraits.h\n+++ b/include/tclap/StandardTraits.h\n@@ -123,8 +123,9 @@ struct ArgTraits<unsigned char> {\n     typedef ValueLike ValueCategory;\n };\n \n-// Microsoft implements size_t awkwardly. \n-#if defined(_MSC_VER) && defined(_M_X64)\n+// Microsoft implements size_t awkwardly.\n+// Studio 2005 introduces unsigned long long, which conflicts with the size_t template\n+#if defined(_MSC_VER) && (_MSC_VER < 1400) && defined(_M_X64)\n /**\n  * size_ts have value-like semantics.\n  */\n"
  },
  {
    "path": "bazel/test/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nexports_files([\"verify_tap_test.sh\"])\n"
  },
  {
    "path": "bazel/test/verify_tap_test.sh",
    "content": "#!/bin/bash\n\nset -ex\n\n# Clear existing tap directory if previous run wasn't in sandbox\nrm -rf tap\n\nmkdir -p tap\nTAP_TMP=\"$(realpath tap)\"\n\nTAP_PATH=\"${TAP_TMP}/tap\" \"$@\"\n\n# TODO(htuch): Check for pcap, now CI (with or without RBE) does have\n# enough capabilities.\n# Verify that some pb_text files have been created.\nls -l \"${TAP_TMP}\"/tap_*.pb_text > /dev/null\n"
  },
  {
    "path": "bazel/test_for_benchmark_wrapper.sh",
    "content": "#!/bin/bash\n\n# Set the benchmark time to 0 to just verify that the benchmark runs to\n# completion.  We're interacting with two different flag parsers, so the order\n# of flags and the -- matters.\n\"${TEST_SRCDIR}/envoy/${1}\" \"${@:2}\" --skip_expensive_benchmarks -- --benchmark_min_time=0\n"
  },
  {
    "path": "bazel/toolchains/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nplatform(\n    name = \"rbe_ubuntu_clang_platform\",\n    parents = [\"@rbe_ubuntu_clang//config:platform\"],\n    remote_execution_properties = \"\"\"\n        {PARENT_REMOTE_EXECUTION_PROPERTIES}\n        properties: {\n          name: \"dockerAddCapabilities\"\n          value: \"SYS_PTRACE,NET_RAW,NET_ADMIN\"\n        }\n        properties: {\n          name: \"dockerNetwork\"\n          value: \"standard\"\n        }\n        \"\"\",\n)\n"
  },
  {
    "path": "bazel/upb.patch",
    "content": "# https://github.com/protocolbuffers/upb/pull/226\ndiff --git a/bazel/upb_proto_library.bzl b/bazel/upb_proto_library.bzl\nindex f148745be..21ed34b48 100644\n--- a/bazel/upb_proto_library.bzl\n+++ b/bazel/upb_proto_library.bzl\n@@ -8,7 +8,7 @@ load(\"@bazel_tools//tools/cpp:toolchain_utils.bzl\", \"find_cpp_toolchain\")\n \n # copybara:strip_for_google3_begin\n load(\"@bazel_skylib//lib:versions.bzl\", \"versions\")\n-load(\"@bazel_version//:bazel_version.bzl\", \"bazel_version\")\n+load(\"@upb_bazel_version//:bazel_version.bzl\", \"bazel_version\")\n # copybara:strip_end\n \n # Generic support code #########################################################\ndiff --git a/bazel/workspace_deps.bzl b/bazel/workspace_deps.bzl\nindex 39bf524a7..aabbc3411 100644\n--- a/bazel/workspace_deps.bzl\n+++ b/bazel/workspace_deps.bzl\n@@ -5,7 +5,7 @@ load(\"//bazel:repository_defs.bzl\", \"bazel_version_repository\")\n \n def upb_deps():\n     bazel_version_repository(\n-        name = \"bazel_version\",\n+        name = \"upb_bazel_version\",\n     )\n \n     git_repository(\n"
  },
  {
    "path": "bazel/wasm/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n"
  },
  {
    "path": "bazel/wasm/wasm.bzl",
    "content": "load(\"@io_bazel_rules_rust//rust:rust.bzl\", \"rust_binary\")\nload(\"@rules_cc//cc:defs.bzl\", \"cc_binary\")\n\ndef _wasm_cc_transition_impl(settings, attr):\n    return {\n        \"//command_line_option:cpu\": \"wasm32\",\n        \"//command_line_option:crosstool_top\": \"@proxy_wasm_cpp_sdk//toolchain:emscripten\",\n\n        # Overriding copt/cxxopt/linkopt to prevent sanitizers/coverage options leak\n        # into WASM build configuration\n        \"//command_line_option:copt\": [],\n        \"//command_line_option:cxxopt\": [],\n        \"//command_line_option:linkopt\": [],\n        \"//command_line_option:collect_code_coverage\": \"false\",\n        \"//command_line_option:fission\": \"no\",\n    }\n\ndef _wasm_rust_transition_impl(settings, attr):\n    return {\n        \"//command_line_option:platforms\": \"@io_bazel_rules_rust//rust/platform:wasm\",\n    }\n\nwasm_cc_transition = transition(\n    implementation = _wasm_cc_transition_impl,\n    inputs = [],\n    outputs = [\n        \"//command_line_option:cpu\",\n        \"//command_line_option:crosstool_top\",\n        \"//command_line_option:copt\",\n        \"//command_line_option:cxxopt\",\n        \"//command_line_option:fission\",\n        \"//command_line_option:linkopt\",\n        \"//command_line_option:collect_code_coverage\",\n    ],\n)\n\nwasm_rust_transition = transition(\n    implementation = _wasm_rust_transition_impl,\n    inputs = [],\n    outputs = [\n        \"//command_line_option:platforms\",\n    ],\n)\n\ndef _wasm_binary_impl(ctx):\n    out = ctx.actions.declare_file(ctx.label.name)\n    if ctx.attr.precompile:\n        ctx.actions.run(\n            executable = ctx.executable._compile_tool,\n            arguments = [ctx.files.binary[0].path, out.path],\n            outputs = [out],\n            inputs = ctx.files.binary,\n        )\n    else:\n        ctx.actions.run(\n            executable = \"cp\",\n            arguments = [ctx.files.binary[0].path, out.path],\n            outputs = [out],\n            inputs = ctx.files.binary,\n        )\n\n    return [DefaultInfo(files = depset([out]), runfiles = ctx.runfiles([out]))]\n\ndef _wasm_attrs(transition):\n    return {\n        \"binary\": attr.label(mandatory = True, cfg = transition),\n        \"precompile\": attr.bool(default = False),\n        # This is deliberately in target configuration to avoid compiling v8 twice.\n        \"_compile_tool\": attr.label(default = \"@envoy//test/tools/wee8_compile:wee8_compile_tool\", executable = True, cfg = \"target\"),\n        \"_whitelist_function_transition\": attr.label(default = \"@bazel_tools//tools/whitelists/function_transition_whitelist\"),\n    }\n\n# WASM binary rule implementation.\n# This copies the binary specified in binary attribute in WASM configuration to\n# target configuration, so a binary in non-WASM configuration can depend on them.\nwasm_cc_binary_rule = rule(\n    implementation = _wasm_binary_impl,\n    attrs = _wasm_attrs(wasm_cc_transition),\n)\n\nwasm_rust_binary_rule = rule(\n    implementation = _wasm_binary_impl,\n    attrs = _wasm_attrs(wasm_rust_transition),\n)\n\ndef wasm_cc_binary(name, tags = [], repository = \"\", **kwargs):\n    wasm_name = \"_wasm_\" + name\n    kwargs.setdefault(\"additional_linker_inputs\", [\"@proxy_wasm_cpp_sdk//:jslib\", \"@envoy//source/extensions/common/wasm/ext:jslib\"])\n\n    if repository == \"@envoy\":\n        envoy_js = \"--js-library source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js\"\n    else:\n        envoy_js = \"--js-library external/envoy/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js\"\n    kwargs.setdefault(\"linkopts\", [\n        envoy_js,\n        \"--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js\",\n    ])\n    kwargs.setdefault(\"visibility\", [\"//visibility:public\"])\n    cc_binary(\n        name = wasm_name,\n        # Adding manual tag it won't be built in non-WASM (e.g. x86_64 config)\n        # when an wildcard is specified, but it will be built in WASM configuration\n        # when the wasm_binary below is built.\n        tags = [\"manual\"],\n        **kwargs\n    )\n\n    wasm_cc_binary_rule(\n        name = name,\n        binary = \":\" + wasm_name,\n        tags = tags + [\"manual\"],\n    )\n\ndef envoy_wasm_cc_binary(name, tags = [], **kwargs):\n    wasm_cc_binary(name, tags, repository = \"@envoy\", **kwargs)\n\ndef wasm_rust_binary(name, tags = [], **kwargs):\n    wasm_name = \"_wasm_\" + (name if not \".wasm\" in name else name.strip(\".wasm\"))\n    kwargs.setdefault(\"visibility\", [\"//visibility:public\"])\n\n    rust_binary(\n        name = wasm_name,\n        edition = \"2018\",\n        crate_type = \"cdylib\",\n        out_binary = True,\n        tags = [\"manual\"],\n        **kwargs\n    )\n\n    wasm_rust_binary_rule(\n        name = name,\n        precompile = select({\n            \"@envoy//bazel:linux_x86_64\": True,\n            \"//conditions:default\": False,\n        }),\n        binary = \":\" + wasm_name,\n        tags = tags + [\"manual\"],\n    )\n"
  },
  {
    "path": "ci/Dockerfile-envoy",
    "content": "ARG BUILD_OS=ubuntu\nARG BUILD_TAG=18.04\n\n# Build stage\nFROM buildpack-deps:$BUILD_TAG as build\n\nRUN echo \"d6c40440609a23483f12eb6295b5191e94baf08298a856bab6e15b10c3b82891  /tmp/su-exec.c\" > /tmp/checksum \\\n    && curl -o /tmp/su-exec.c https://raw.githubusercontent.com/ncopa/su-exec/212b75144bbc06722fbd7661f651390dc47a43d1/su-exec.c \\\n    && sha256sum -c /tmp/checksum \\\n    && gcc -Wall /tmp/su-exec.c -o/usr/local/bin/su-exec \\\n    && chown root:root /usr/local/bin/su-exec \\\n    && chmod 0755 /usr/local/bin/su-exec\n\n# Final stage\nFROM $BUILD_OS:$BUILD_TAG\nARG TARGETPLATFORM\n\nRUN apt-get update && apt-get upgrade -y \\\n    && apt-get install --no-install-recommends -y ca-certificates \\\n    && apt-get autoremove -y && apt-get clean \\\n    && rm -rf /tmp/* /var/tmp/* \\\n    && rm -rf /var/lib/apt/lists/*\n\nCOPY --from=build /usr/local/bin/su-exec /usr/local/bin/su-exec\nRUN adduser --group --system envoy\n\nRUN mkdir -p /etc/envoy\n\nARG ENVOY_BINARY_SUFFIX=_stripped\nADD ${TARGETPLATFORM}/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/\nADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml\n\nEXPOSE 10000\n\nCOPY ci/docker-entrypoint.sh /\nENTRYPOINT [\"/docker-entrypoint.sh\"]\nCMD [\"envoy\", \"-c\", \"/etc/envoy/envoy.yaml\"]\n"
  },
  {
    "path": "ci/Dockerfile-envoy-alpine",
    "content": "FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31\nRUN mkdir -p /etc/envoy\n\nADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml\nRUN apk add --no-cache shadow su-exec \\\n        && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy\n\nARG ENVOY_BINARY_SUFFIX=_stripped\nADD linux/amd64/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/\n\nEXPOSE 10000\n\nCOPY ci/docker-entrypoint.sh /\nENTRYPOINT [\"/docker-entrypoint.sh\"]\nCMD [\"envoy\", \"-c\", \"/etc/envoy/envoy.yaml\"]\n"
  },
  {
    "path": "ci/Dockerfile-envoy-google-vrp",
    "content": "ARG ENVOY_VRP_BASE_IMAGE\nFROM $ENVOY_VRP_BASE_IMAGE\n\nRUN apt-get update \\\n    && apt-get upgrade -y \\\n    && apt-get install -y libc++1 supervisor gdb strace tshark \\\n    && apt-get autoremove -y \\\n    && apt-get clean \\\n    && rm -rf /tmp/* /var/tmp/* \\\n    && rm -rf /var/lib/apt/lists/*\n\nADD configs/google-vrp/envoy-edge.yaml /etc/envoy/envoy-edge.yaml\nADD configs/google-vrp/envoy-origin.yaml /etc/envoy/envoy-origin.yaml\nADD configs/google-vrp/launch_envoy.sh /usr/local/bin/launch_envoy.sh\nADD configs/google-vrp/supervisor.conf /etc/supervisor.conf\nADD test/config/integration/certs/serverkey.pem /etc/envoy/certs/serverkey.pem\nADD test/config/integration/certs/servercert.pem /etc/envoy/certs/servercert.pem\n# ADD %local envoy bin% /usr/local/bin/envoy\n\nEXPOSE 10000\nEXPOSE 10001\n\nCMD [\"supervisord\", \"-c\", \"/etc/supervisor.conf\"]\n"
  },
  {
    "path": "ci/README.md",
    "content": "# Developer use of CI Docker images\n\nThere are two available flavors of Envoy Docker images for Linux, based on Ubuntu and Alpine Linux\nand an image based on Windows2019.\n\n## Ubuntu Envoy image\n\nThe Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:<hash>`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks,\nwhere `<hash>` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers\nmay work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8)\nrepo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image.\nMoreover, the Docker image at [`envoyproxy/envoy-dev:<hash>`](https://hub.docker.com/r/envoyproxy/envoy-dev/) is an image that has an Envoy binary at `/usr/local/bin/envoy`.\nThe `<hash>` corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy\nbinary built from the latest tip of master that passed tests.\n\n## Alpine Envoy image\n\nMinimal images based on Alpine Linux allow for quicker deployment of Envoy. Two Alpine based images are built,\none with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols and one stripped of them (`envoyproxy/envoy-alpine`).\nBoth images are pushed with two different tags: `<hash>` and `latest`. Parallel to the Ubuntu images above, `<hash>` corresponds to the\nmaster commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of master that passed tests.\n\n## Windows 2019 Envoy image\n\nThe Windows 2019 based Envoy Docker image at [`envoyproxy/envoy-build-windows2019:<hash>`](https://hub.docker.com/r/envoyproxy/envoy-build-windows2019/)\nis used for CI checks, where `<hash>` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh).\nDevelopers may work with the most recent `envoyproxy/envoy-build-windows2019` image to provide a self-contained environment for building Envoy binaries and\nrunning tests that reflects the latest built Windows 2019 Envoy image.\n\n# Build image base and compiler versions\n\nCurrently there are three build images for Linux and one for Windows:\n\n* `envoyproxy/envoy-build` &mdash; alias to `envoyproxy/envoy-build-ubuntu`.\n* `envoyproxy/envoy-build-ubuntu` &mdash; based on Ubuntu 18.04 (Bionic) with GCC 9 and Clang 10 compiler.\n* `envoyproxy/envoy-build-centos` &mdash; based on CentOS 7 with GCC 9 and Clang 10 compiler, this image is experimental and not well tested.\n* `envoyproxy/envoy-build-windows2019` &mdash; based on Windows 2019 LTS with VS 2019 Build Tools.\n\nThe source for these images is located in the [envoyproxy/envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools)\nrepository.\n\nWe use the Clang compiler for all Linux CI runs with tests. We have an additional Linux CI run with GCC which builds binary only.\n\n# C++ standard library\n\nAs of November 2019 after [#8859](https://github.com/envoyproxy/envoy/pull/8859) the official released binary is\n[linked against libc++ on Linux](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#linking-against-libc-on-linux).\nTo override the C++ standard library in your build, set environment variable `ENVOY_STDLIB` to `libstdc++` or `libc++` and\nrun `./ci/do_ci.sh` as described below.\n\n# Building and running tests as a developer\n\nThe `./ci/run_envoy_docker.sh` script can be used to set up a Docker container on Linux and Windows\nto build an Envoy static binary and run tests.\n\nThe build image defaults to `envoyproxy/envoy-build-ubuntu` on Linux and\n`envoyproxy/envoy-build-windows2019` on Windows, but you can choose build image by setting\n`IMAGE_NAME` in the environment.\n\nIn case your setup is behind a proxy, set `http_proxy` and `https_proxy` to the proxy servers before\ninvoking the build.\n\n```bash\nIMAGE_NAME=envoyproxy/envoy-build-ubuntu http_proxy=http://proxy.foo.com:8080 https_proxy=http://proxy.bar.com:8080 ./ci/run_envoy_docker.sh <build_script_args>'\n```\n\n## On Linux\n\nAn example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is:\n\n```bash\n./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'\n```\n\nThe Envoy binary can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-fastbuild` on the Docker host. You\ncan control this by setting `ENVOY_DOCKER_BUILD_DIR` in the environment, e.g. to\ngenerate the binary in `~/build/envoy/source/exe/envoy-fastbuild` you can run:\n\n```bash\nENVOY_DOCKER_BUILD_DIR=~/build ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'\n```\n\nFor a release version of the Envoy binary you can run:\n\n```bash\n./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release.server_only'\n```\n\nThe build artifact can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy` (or wherever\n`$ENVOY_DOCKER_BUILD_DIR` points).\n\nFor a debug version of the Envoy binary you can run:\n\n```bash\n./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.debug.server_only'\n```\n\nThe build artifact can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-debug` (or wherever\n`$ENVOY_DOCKER_BUILD_DIR` points).\n\nTo leverage a [bazel remote cache](https://github.com/envoyproxy/envoy/tree/master/bazel#advanced-caching-setup) add the http_remote_cache endpoint to\nthe BAZEL_BUILD_EXTRA_OPTIONS environment variable\n\n```bash\n./ci/run_envoy_docker.sh \"BAZEL_BUILD_EXTRA_OPTIONS='--remote_http_cache=http://127.0.0.1:28080' ./ci/do_ci.sh bazel.release\"\n```\n\nThe `./ci/run_envoy_docker.sh './ci/do_ci.sh <TARGET>'` targets are:\n\n* `bazel.api` &mdash; build and run API tests under `-c fastbuild` with clang.\n* `bazel.asan` &mdash; build and run tests under `-c dbg --config=clang-asan` with clang.\n* `bazel.asan <test>` &mdash; build and run a specified test or test dir under `-c dbg --config=clang-asan` with clang.\n* `bazel.debug` &mdash; build Envoy static binary and run tests under `-c dbg`.\n* `bazel.debug <test>` &mdash; build Envoy static binary and run a specified test or test dir under `-c dbg`.\n* `bazel.debug.server_only` &mdash; build Envoy static binary under `-c dbg`.\n* `bazel.dev` &mdash; build Envoy static binary and run tests under `-c fastbuild` with clang.\n* `bazel.dev <test>` &mdash; build Envoy static binary and run a specified test or test dir under `-c fastbuild` with clang.\n* `bazel.release` &mdash; build Envoy static binary and run tests under `-c opt` with clang.\n* `bazel.release <test>` &mdash; build Envoy static binary and run a specified test or test dir under `-c opt` with clang.\n* `bazel.release.server_only` &mdash; build Envoy static binary under `-c opt` with clang.\n* `bazel.sizeopt` &mdash; build Envoy static binary and run tests under `-c opt --config=sizeopt` with clang.\n* `bazel.sizeopt <test>` &mdash; build Envoy static binary and run a specified test or test dir under `-c opt --config=sizeopt` with clang.\n* `bazel.sizeopt.server_only` &mdash; build Envoy static binary under `-c opt --config=sizeopt` with clang.\n* `bazel.coverage` &mdash; build and run tests under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`.\n* `bazel.coverage <test>` &mdash; build and run a specified test or test dir under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`.\n* `bazel.coverity` &mdash; build Envoy static binary and run Coverity Scan static analysis.\n* `bazel.msan` &mdash; build and run tests under `-c dbg --config=clang-msan` with clang.\n* `bazel.msan <test>` &mdash; build and run a specified test or test dir under `-c dbg --config=clang-msan` with clang.\n* `bazel.tsan` &mdash; build and run tests under `-c dbg --config=clang-tsan` with clang.\n* `bazel.tsan <test>` &mdash; build and run a specified test or test dir under `-c dbg --config=clang-tsan` with clang.\n* `bazel.fuzz` &mdash; build and run fuzz tests under `-c dbg --config=asan-fuzzer` with clang.\n* `bazel.fuzz <test>` &mdash; build and run a specified fuzz test or test dir under `-c dbg --config=asan-fuzzer` with clang. If specifying a single fuzz test, must use the full target name with \"_with_libfuzzer\" for `<test>`.\n* `bazel.compile_time_options` &mdash; build Envoy and run tests with various compile-time options toggled to their non-default state, to ensure they still build.\n* `bazel.compile_time_options <test>` &mdash; build Envoy and run a specified test or test dir with various compile-time options toggled to their non-default state, to ensure they still build.\n* `bazel.clang_tidy <files>` &mdash; build and run clang-tidy specified source files, if no files specified, runs against the diff with the last GitHub commit.\n* `check_format`&mdash; run `clang-format` and `buildifier` on entire source tree.\n* `fix_format`&mdash; run and enforce `clang-format` and `buildifier` on entire source tree.\n* `check_spelling`&mdash; run `misspell` on entire project.\n* `fix_spelling`&mdash; run and enforce `misspell` on entire project.\n* `check_spelling_pedantic`&mdash; run `aspell` on C++ and proto comments.\n* `docs`&mdash; build documentation tree in `generated/docs`.\n\n## On Windows\n\nAn example basic invocation to build the Envoy static binary and run tests is:\n\n```bash\n./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh'\n```\n\nYou can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc. as well\nas set environment variables to adjust your container build environment as described above.\n\nThe Envoy binary can be found in `C:\\Windows\\Temp\\envoy-docker-build\\envoy\\source\\exe` on the Docker host. You\ncan control this by setting `ENVOY_DOCKER_BUILD_DIR` in the environment, e.g. to\ngenerate the binary in `C:\\Users\\foo\\build\\envoy\\source\\exe` you can run:\n\n```bash\nENVOY_DOCKER_BUILD_DIR=\"C:\\Users\\foo\\build\" ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'\n```\n\nNote the quotations around the `ENVOY_DOCKER_BUILD_DIR` value to preserve the backslashes in the\npath.\n\nIf you would like to run an interactive session to keep the build container running (to persist your local build environment), run:\n\n```bash\n./ci/run_envoy_docker.sh 'bash'\n```\n\nFrom an interactive session, you can invoke `bazel` manually or use the `./ci/windows_ci_steps.sh` script to build and run tests.\n\n# Testing changes to the build image as a developer\n\nWhile all changes to the build image should eventually be upstreamed, it can be useful to\ntest those changes locally before sending out a pull request. To experiment\nwith a local clone of the upstream build image you can make changes to files such as\nbuild_container.sh locally and then run:\n\n```bash\nDISTRO=ubuntu\ncd ci/build_container\nLINUX_DISTRO=\"${DISTRO}\" CIRCLE_SHA1=my_tag ./docker_build.sh  # Wait patiently for quite some time\ncd ../..\nIMAGE_NAME=\"envoyproxy/envoy-build-${DISTRO}\" IMAGE_ID=my_tag ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.whatever'\n```\n\nThis build the Ubuntu based `envoyproxy/envoy-build-ubuntu` image, and the final call will run against your local copy of the build image.\n\n# macOS Build Flow\n\nThe macOS CI build is part of the [CircleCI](https://circleci.com/gh/envoyproxy/envoy) workflow.\nDependencies are installed by the `ci/mac_ci_setup.sh` script, via [Homebrew](https://brew.sh),\nwhich is pre-installed on the CircleCI macOS image. The dependencies are cached are re-installed\non every build. The `ci/mac_ci_steps.sh` script executes the specific commands that\nbuild and test Envoy. Note that the full version of Xcode (not just Command Line Tools) is required.\n\n# Coverity Scan Build Flow\n\n[Coverity Scan Envoy Project](https://scan.coverity.com/projects/envoy-proxy)\n\nCoverity Scan static analysis is not run within Envoy CI. However, Envoy can be locally built and\nsubmitted for analysis. A Coverity Scan Envoy project token must be generated from the\n[Coverity Project Settings](https://scan.coverity.com/projects/envoy-proxy?tab=project_settings).\nOnly a Coverity Project Administrator can create a token. With this token, running\n`ci/do_coverity_local.sh` will use the Ubuntu based `envoyproxy/envoy-build-ubuntu` image to build the\nEnvoy static binary with the Coverity Scan tool chain. This process generates an artifact,\nenvoy-coverity-output.tgz, that is uploaded to Coverity for static analysis.\n\nTo build and submit for analysis:\n```bash\nCOVERITY_TOKEN={generated Coverity project token} ./ci/do_coverity_local.sh\n```\n"
  },
  {
    "path": "ci/WORKSPACE.filter.example",
    "content": "workspace(name = \"envoy_filter_example\")\n\nlocal_repository(\n    name = \"envoy\",\n    path = \"{ENVOY_SRCDIR}\",\n)\n\nload(\"@envoy//bazel:api_binding.bzl\", \"envoy_api_binding\")\n\nenvoy_api_binding()\n\nload(\"@envoy//bazel:api_repositories.bzl\", \"envoy_api_dependencies\")\n\nenvoy_api_dependencies()\n\nload(\"@envoy//bazel:repositories.bzl\", \"envoy_dependencies\")\n\nenvoy_dependencies()\n\nload(\"@envoy//bazel:repositories_extra.bzl\", \"envoy_dependencies_extra\")\n\nenvoy_dependencies_extra()\n\nload(\"@envoy//bazel:dependency_imports.bzl\", \"envoy_dependency_imports\")\n\nenvoy_dependency_imports()\n"
  },
  {
    "path": "ci/api_mirror.sh",
    "content": "#!/bin/bash\n\nset -e\n\nCHECKOUT_DIR=../data-plane-api\n\nif [ -z \"$CIRCLE_PULL_REQUEST\" ] && [ \"$CIRCLE_BRANCH\" == \"master\" ]\nthen\n  echo \"Cloning...\"\n  git clone git@github.com:envoyproxy/data-plane-api \"$CHECKOUT_DIR\"\n\n  git -C \"$CHECKOUT_DIR\" config user.name \"data-plane-api(CircleCI)\"\n  git -C \"$CHECKOUT_DIR\" config user.email data-plane-api@users.noreply.github.com\n  git -C \"$CHECKOUT_DIR\" fetch\n  git -C \"$CHECKOUT_DIR\" checkout -B master origin/master\n\n  # Determine last envoyproxy/envoy SHA in envoyproxy/data-plane-api\n  MIRROR_MSG=\"Mirrored from https://github.com/envoyproxy/envoy\"\n  LAST_ENVOY_SHA=$(git -C \"$CHECKOUT_DIR\" log --grep=\"$MIRROR_MSG\" -n 1 | grep \"$MIRROR_MSG\" | \\\n    tail -n 1 | sed -e \"s#.*$MIRROR_MSG @ ##\")\n\n  echo \"Last mirrored envoyproxy/envoy SHA is $LAST_ENVOY_SHA\"\n\n  # Compute SHA sequence to replay in envoyproxy/data-plane-api\n  SHAS=$(git rev-list --reverse \"$LAST_ENVOY_SHA\"..HEAD api/)\n\n  # For each SHA, hard reset, rsync api/ and generate commit in\n  # envoyproxy/data-plane-api\n  API_WORKING_DIR=\"../envoy-api-mirror\"\n  git worktree add \"$API_WORKING_DIR\"\n  for sha in $SHAS\n  do\n    git -C \"$API_WORKING_DIR\" reset --hard \"$sha\"\n    COMMIT_MSG=$(git -C \"$API_WORKING_DIR\" log --format=%B -n 1)\n    QUALIFIED_COMMIT_MSG=$(echo -e \"$COMMIT_MSG\\n\\n$MIRROR_MSG @ $sha\")\n    rsync -acv --delete --exclude \"ci/\" --exclude \".*\" --exclude LICENSE \\\n      \"$API_WORKING_DIR\"/api/ \"$CHECKOUT_DIR\"/\n    git -C \"$CHECKOUT_DIR\" add .\n    git -C \"$CHECKOUT_DIR\" commit -m \"$QUALIFIED_COMMIT_MSG\"\n  done\n\n  echo \"Pushing...\"\n  git -C \"$CHECKOUT_DIR\" push origin master\n  echo \"Done\"\nfi\n"
  },
  {
    "path": "ci/build_setup.sh",
    "content": "#!/bin/bash\n\n# Configure environment variables for Bazel build and test.\n\nset -e\n\nexport PPROF_PATH=/thirdparty_build/bin/pprof\n\n[ -z \"${NUM_CPUS}\" ] && NUM_CPUS=$(grep -c ^processor /proc/cpuinfo)\n[ -z \"${ENVOY_SRCDIR}\" ] && export ENVOY_SRCDIR=/source\n[ -z \"${ENVOY_BUILD_TARGET}\" ] && export ENVOY_BUILD_TARGET=//source/exe:envoy-static\n[ -z \"${ENVOY_BUILD_DEBUG_INFORMATION}\" ] && export ENVOY_BUILD_DEBUG_INFORMATION=//source/exe:envoy-static.dwp\n[ -z \"${ENVOY_BUILD_ARCH}\" ] && {\n    ENVOY_BUILD_ARCH=$(uname -m)\n    export ENVOY_BUILD_ARCH\n}\n\nread -ra BAZEL_BUILD_EXTRA_OPTIONS <<< \"${BAZEL_BUILD_EXTRA_OPTIONS:-}\"\nread -ra BAZEL_EXTRA_TEST_OPTIONS <<< \"${BAZEL_EXTRA_TEST_OPTIONS:-}\"\nread -ra BAZEL_OPTIONS <<< \"${BAZEL_OPTIONS:-}\"\n\necho \"ENVOY_SRCDIR=${ENVOY_SRCDIR}\"\necho \"ENVOY_BUILD_TARGET=${ENVOY_BUILD_TARGET}\"\necho \"ENVOY_BUILD_ARCH=${ENVOY_BUILD_ARCH}\"\n\nfunction setup_gcc_toolchain() {\n  if [[ -n \"${ENVOY_STDLIB}\" && \"${ENVOY_STDLIB}\" != \"libstdc++\" ]]; then\n    echo \"gcc toolchain doesn't support ${ENVOY_STDLIB}.\"\n    exit 1\n  fi\n  if [[ -z \"${ENVOY_RBE}\" ]]; then\n    export CC=gcc\n    export CXX=g++\n    export BAZEL_COMPILER=gcc\n    echo \"$CC/$CXX toolchain configured\"\n  else\n    BAZEL_BUILD_OPTIONS=(\"--config=remote-gcc\" \"${BAZEL_BUILD_OPTIONS[@]}\")\n  fi\n}\n\nfunction setup_clang_toolchain() {\n  ENVOY_STDLIB=\"${ENVOY_STDLIB:-libc++}\"\n  if [[ -z \"${ENVOY_RBE}\" ]]; then\n    if [[ \"${ENVOY_STDLIB}\" == \"libc++\" ]]; then\n      BAZEL_BUILD_OPTIONS=(\"--config=libc++\" \"${BAZEL_BUILD_OPTIONS[@]}\")\n    else\n      BAZEL_BUILD_OPTIONS=(\"--config=clang\" \"${BAZEL_BUILD_OPTIONS[@]}\")\n    fi\n  else\n    if [[ \"${ENVOY_STDLIB}\" == \"libc++\" ]]; then\n      BAZEL_BUILD_OPTIONS=(\"--config=remote-clang-libc++\" \"${BAZEL_BUILD_OPTIONS[@]}\")\n    else\n      BAZEL_BUILD_OPTIONS=(\"--config=remote-clang\" \"${BAZEL_BUILD_OPTIONS[@]}\")\n    fi\n  fi\n  echo \"clang toolchain with ${ENVOY_STDLIB} configured\"\n}\n\nexport BUILD_DIR=${BUILD_DIR:-/build}\nif [[ ! -d \"${BUILD_DIR}\" ]]\nthen\n  echo \"${BUILD_DIR} mount missing - did you forget -v <something>:${BUILD_DIR}? Creating.\"\n  mkdir -p \"${BUILD_DIR}\"\nfi\n\n# Environment setup.\nexport TEST_TMPDIR=${BUILD_DIR}/tmp\nexport PATH=/opt/llvm/bin:${PATH}\nexport CLANG_FORMAT=\"${CLANG_FORMAT:-clang-format}\"\n\nif [[ -f \"/etc/redhat-release\" ]]; then\n  BAZEL_BUILD_EXTRA_OPTIONS+=(\"--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1\")\nfi\n\nfunction cleanup() {\n  # Remove build artifacts. This doesn't mess with incremental builds as these\n  # are just symlinks.\n  rm -rf \"${ENVOY_SRCDIR}\"/bazel-* clang.bazelrc\n}\n\ncleanup\ntrap cleanup EXIT\n\nexport LLVM_ROOT=\"${LLVM_ROOT:-/opt/llvm}\"\n\"$(dirname \"$0\")\"/../bazel/setup_clang.sh \"${LLVM_ROOT}\"\n\n[[ \"${BUILD_REASON}\" != \"PullRequest\" ]] && BAZEL_EXTRA_TEST_OPTIONS+=(\"--nocache_test_results\")\n\n# TODO(phlax): deprecate/remove this - i believe it was made redundant here:\n#   https://github.com/envoyproxy/envoy/commit/3ebedeb708a23062332a6fcdf33b462b7070adba#diff-2fa22a1337effee365a51e6844be0ab3\nexport BAZEL_QUERY_OPTIONS=\"${BAZEL_OPTIONS[*]}\"\n# Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks\n# to save disk space.\nBAZEL_BUILD_OPTIONS=(\n  \"${BAZEL_OPTIONS[@]}\"\n  \"--verbose_failures\"\n  \"--show_task_finish\"\n  \"--experimental_generate_json_trace_profile\"\n  \"--test_output=errors\"\n  \"--repository_cache=${BUILD_DIR}/repository_cache\"\n  \"--experimental_repository_cache_hardlinks\"\n  \"${BAZEL_BUILD_EXTRA_OPTIONS[@]}\"\n  \"${BAZEL_EXTRA_TEST_OPTIONS[@]}\")\n\n[[ \"${ENVOY_BUILD_ARCH}\" == \"aarch64\" ]] && BAZEL_BUILD_OPTIONS+=(\n  \"--define\" \"wasm=disabled\"\n\t\"--flaky_test_attempts=2\"\n\t\"--test_env=HEAPCHECK=\")\n\n[[ \"${BAZEL_EXPUNGE}\" == \"1\" ]] && bazel clean --expunge\n\n# Also setup some space for building Envoy standalone.\nexport ENVOY_BUILD_DIR=\"${BUILD_DIR}\"/envoy\nmkdir -p \"${ENVOY_BUILD_DIR}\"\n\n# This is where we copy build deliverables to.\nexport ENVOY_DELIVERY_DIR=\"${ENVOY_BUILD_DIR}\"/source/exe\nmkdir -p \"${ENVOY_DELIVERY_DIR}\"\n\n# This is where we copy the coverage report to.\nexport ENVOY_COVERAGE_ARTIFACT=\"${ENVOY_BUILD_DIR}\"/generated/coverage.tar.gz\n\n# This is where we copy the fuzz coverage report to.\nexport ENVOY_FUZZ_COVERAGE_ARTIFACT=\"${ENVOY_BUILD_DIR}\"/generated/fuzz_coverage.tar.gz\n\n# This is where we dump failed test logs for CI collection.\nexport ENVOY_FAILED_TEST_LOGS=\"${ENVOY_BUILD_DIR}\"/generated/failed-testlogs\nmkdir -p \"${ENVOY_FAILED_TEST_LOGS}\"\n\n# This is where we copy the build profile to.\nexport ENVOY_BUILD_PROFILE=\"${ENVOY_BUILD_DIR}\"/generated/build-profile\nmkdir -p \"${ENVOY_BUILD_PROFILE}\"\n\nexport BUILDIFIER_BIN=\"${BUILDIFIER_BIN:-/usr/local/bin/buildifier}\"\nexport BUILDOZER_BIN=\"${BUILDOZER_BIN:-/usr/local/bin/buildozer}\"\n\n# We set up an Envoy consuming project for test builds only if '-nofetch'\n# is not set AND this is an Envoy build. For derivative builds where Envoy\n# source tree is different than the current workspace, the setup step is\n# skipped.\nif [[ \"$1\" != \"-nofetch\" && \"${ENVOY_SRCDIR}\" == \"$(bazel info workspace)\" ]]; then\n  # shellcheck source=ci/filter_example_setup.sh\n  . \"$(dirname \"$0\")\"/filter_example_setup.sh\nelse\n  echo \"Skip setting up Envoy Filter Example.\"\nfi\n\nexport ENVOY_BUILD_FILTER_EXAMPLE=\"${FILTER_WORKSPACE_SET:-0}\"\n"
  },
  {
    "path": "ci/check_and_fix_format.sh",
    "content": "#!/bin/bash\n\nset -e\n\nDIFF_OUTPUT=\"${DIFF_OUTPUT:-/build/fix_format.diff}\"\n\n# We set this for two reasons. First, we want to ensure belt-and-braces that we check these formats\n# in CI in case the skip-on-file-change heuristics in proto_format.sh etc. are buggy. Second, this\n# prevents AZP cache weirdness.\nexport FORCE_PROTO_FORMAT=yes\nexport FORCE_PYTHON_FORMAT=yes\n\nfunction fix {\n  set +e\n  ci/do_ci.sh fix_format\n  ci/do_ci.sh fix_spelling\n  ci/do_ci.sh fix_spelling_pedantic\n  echo \"Format check failed, try apply following patch to fix:\"\n  git add api\n  git diff HEAD | tee \"${DIFF_OUTPUT}\"\n\n  exit 1\n}\n\n# If any of the checks fail, run the fix function above.\ntrap fix ERR\n\nci/do_ci.sh check_format\nci/do_ci.sh check_repositories\nci/do_ci.sh check_spelling\nci/do_ci.sh check_spelling_pedantic\n"
  },
  {
    "path": "ci/do_ci.sh",
    "content": "#!/bin/bash\n\n# Run a CI build/test target, e.g. docs, asan.\n\nset -e\n\nbuild_setup_args=\"\"\nif [[ \"$1\" == \"fix_format\" || \"$1\" == \"check_format\" || \"$1\" == \"check_repositories\" || \\\n        \"$1\" == \"check_spelling\" || \"$1\" == \"fix_spelling\" || \"$1\" == \"bazel.clang_tidy\" || \\\n        \"$1\" == \"check_spelling_pedantic\" || \"$1\" == \"fix_spelling_pedantic\" ]]; then\n  build_setup_args=\"-nofetch\"\nfi\n\nSRCDIR=\"${PWD}\"\nNO_BUILD_SETUP=\"${NO_BUILD_SETUP:-}\"\nif [[ -z \"$NO_BUILD_SETUP\" ]]; then\n    # shellcheck source=ci/setup_cache.sh\n    . \"$(dirname \"$0\")\"/setup_cache.sh\n    # shellcheck source=ci/build_setup.sh\n    . \"$(dirname \"$0\")\"/build_setup.sh $build_setup_args\nfi\ncd \"${SRCDIR}\"\n\nif [[ \"${ENVOY_BUILD_ARCH}\" == \"x86_64\" ]]; then\n  BUILD_ARCH_DIR=\"/linux/amd64\"\nelif [[ \"${ENVOY_BUILD_ARCH}\" == \"aarch64\" ]]; then\n  BUILD_ARCH_DIR=\"/linux/arm64\"\nelse\n  # Fall back to use the ENVOY_BUILD_ARCH itself.\n  BUILD_ARCH_DIR=\"/linux/${ENVOY_BUILD_ARCH}\"\nfi\n\necho \"building using ${NUM_CPUS} CPUs\"\necho \"building for ${ENVOY_BUILD_ARCH}\"\n\nfunction collect_build_profile() {\n  declare -g build_profile_count=${build_profile_count:-1}\n  mv -f \"$(bazel info output_base)/command.profile.gz\" \"${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.profile.gz\" || true\n  ((build_profile_count++))\n}\n\nfunction bazel_with_collection() {\n  local failed_logs\n  declare -r BAZEL_OUTPUT=\"${ENVOY_SRCDIR}\"/bazel.output.txt\n  bazel \"$@\" | tee \"${BAZEL_OUTPUT}\"\n  declare BAZEL_STATUS=\"${PIPESTATUS[0]}\"\n  if [ \"${BAZEL_STATUS}\" != \"0\" ]\n  then\n    pushd bazel-testlogs\n    failed_logs=$(grep \"  /build.*test.log\" \"${BAZEL_OUTPUT}\" | sed -e 's/  \\/build.*\\/testlogs\\/\\(.*\\)/\\1/')\n    while read -r f; do\n      cp --parents -f \"$f\" \"${ENVOY_FAILED_TEST_LOGS}\"\n    done <<< \"$failed_logs\"\n    popd\n    exit \"${BAZEL_STATUS}\"\n  fi\n  collect_build_profile \"$1\"\n  run_process_test_result\n}\n\nfunction cp_binary_for_outside_access() {\n  DELIVERY_LOCATION=\"$1\"\n  cp -f \\\n    bazel-bin/\"${ENVOY_BIN}\" \\\n    \"${ENVOY_DELIVERY_DIR}\"/\"${DELIVERY_LOCATION}\"\n}\n\nfunction cp_debug_info_for_outside_access() {\n  DELIVERY_LOCATION=\"$1\"\n  cp -f \\\n    bazel-bin/\"${ENVOY_BIN}\".dwp \\\n    \"${ENVOY_DELIVERY_DIR}\"/\"${DELIVERY_LOCATION}\".dwp\n}\n\n\nfunction cp_binary_for_image_build() {\n  # TODO(mattklein123): Replace this with caching and a different job which creates images.\n  local BASE_TARGET_DIR=\"${ENVOY_SRCDIR}${BUILD_ARCH_DIR}\"\n  echo \"Copying binary for image build...\"\n  COMPILE_TYPE=\"$2\"\n  mkdir -p \"${BASE_TARGET_DIR}\"/build_\"$1\"\n  cp -f \"${ENVOY_DELIVERY_DIR}\"/envoy \"${BASE_TARGET_DIR}\"/build_\"$1\"\n  if [[ \"${COMPILE_TYPE}\" == \"dbg\" || \"${COMPILE_TYPE}\" == \"opt\" ]]; then\n    cp -f \"${ENVOY_DELIVERY_DIR}\"/envoy.dwp \"${BASE_TARGET_DIR}\"/build_\"$1\"\n  fi\n  mkdir -p \"${BASE_TARGET_DIR}\"/build_\"$1\"_stripped\n  strip \"${ENVOY_DELIVERY_DIR}\"/envoy -o \"${BASE_TARGET_DIR}\"/build_\"$1\"_stripped/envoy\n\n  # Copy for azp which doesn't preserve permissions, creating a tar archive\n  tar czf \"${ENVOY_BUILD_DIR}\"/envoy_binary.tar.gz -C \"${BASE_TARGET_DIR}\" build_\"$1\" build_\"$1\"_stripped\n\n  # Remove binaries to save space, only if BUILD_REASON exists (running in AZP)\n  [[ -z \"${BUILD_REASON}\" ]] || \\\n    rm -rf \"${BASE_TARGET_DIR}\"/build_\"$1\" \"${BASE_TARGET_DIR}\"/build_\"$1\"_stripped \"${ENVOY_DELIVERY_DIR}\"/envoy{,.dwp} \\\n      bazel-bin/\"${ENVOY_BIN}\"{,.dwp}\n}\n\nfunction bazel_binary_build() {\n  BINARY_TYPE=\"$1\"\n  if [[ \"${BINARY_TYPE}\" == \"release\" ]]; then\n    COMPILE_TYPE=\"opt\"\n  elif [[ \"${BINARY_TYPE}\" == \"debug\" ]]; then\n    COMPILE_TYPE=\"dbg\"\n  elif [[ \"${BINARY_TYPE}\" == \"sizeopt\" ]]; then\n    # The COMPILE_TYPE variable is redundant in this case and is only here for\n    # readability. It is already set in the .bazelrc config for sizeopt.\n    COMPILE_TYPE=\"opt\"\n    CONFIG_ARGS=\"--config=sizeopt\"\n  elif [[ \"${BINARY_TYPE}\" == \"fastbuild\" ]]; then\n    COMPILE_TYPE=\"fastbuild\"\n  fi\n\n  echo \"Building...\"\n  ENVOY_BIN=$(echo \"${ENVOY_BUILD_TARGET}\" | sed -e 's#^@\\([^/]*\\)/#external/\\1#;s#^//##;s#:#/#')\n\n  # This is a workaround for https://github.com/bazelbuild/bazel/issues/11834\n  [[ -n \"${ENVOY_RBE}\" ]] && rm -rf bazel-bin/\"${ENVOY_BIN}\"*\n\n  bazel build \"${BAZEL_BUILD_OPTIONS[@]}\" -c \"${COMPILE_TYPE}\" \"${ENVOY_BUILD_TARGET}\" ${CONFIG_ARGS}\n  collect_build_profile \"${BINARY_TYPE}\"_build\n\n  # Copy the built envoy binary somewhere that we can access outside of the\n  # container.\n  cp_binary_for_outside_access envoy\n\n  if [[ \"${COMPILE_TYPE}\" == \"dbg\" || \"${COMPILE_TYPE}\" == \"opt\" ]]; then\n    # Generate dwp file for debugging since we used split DWARF to reduce binary\n    # size\n    bazel build \"${BAZEL_BUILD_OPTIONS[@]}\" -c \"${COMPILE_TYPE}\" \"${ENVOY_BUILD_DEBUG_INFORMATION}\" ${CONFIG_ARGS}\n    # Copy the debug information\n    cp_debug_info_for_outside_access envoy\n  fi\n\n  cp_binary_for_image_build \"${BINARY_TYPE}\" \"${COMPILE_TYPE}\"\n\n}\n\nfunction run_process_test_result() {\n  echo \"running flaky test reporting script\"\n  \"${ENVOY_SRCDIR}\"/ci/flaky_test/run_process_xml.sh \"$CI_TARGET\"\n}\n\nCI_TARGET=$1\nshift\n\nif [[ $# -ge 1 ]]; then\n  COVERAGE_TEST_TARGETS=(\"$@\")\n  TEST_TARGETS=(\"$@\")\nelse\n  # Coverage test will add QUICHE tests by itself.\n  COVERAGE_TEST_TARGETS=(\"//test/...\")\n  TEST_TARGETS=(\"${COVERAGE_TEST_TARGETS[@]}\" \"@com_googlesource_quiche//:ci_tests\")\nfi\n\nif [[ \"$CI_TARGET\" == \"bazel.release\" ]]; then\n  # When testing memory consumption, we want to test against exact byte-counts\n  # where possible. As these differ between platforms and compile options, we\n  # define the 'release' builds as canonical and test them only in CI, so the\n  # toolchain is kept consistent. This ifdef is checked in\n  # test/common/stats/stat_test_utility.cc when computing\n  # Stats::TestUtil::MemoryTest::mode().\n  [[ \"${ENVOY_BUILD_ARCH}\" == \"x86_64\" ]] && BAZEL_BUILD_OPTIONS+=(\"--test_env=ENVOY_MEMORY_TEST_EXACT=true\")\n\n  setup_clang_toolchain\n  echo \"Testing ${TEST_TARGETS[*]} with options: ${BAZEL_BUILD_OPTIONS[*]}\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" -c opt \"${TEST_TARGETS[@]}\"\n\n  echo \"bazel release build with tests...\"\n  bazel_binary_build release\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.release.server_only\" ]]; then\n  setup_clang_toolchain\n  echo \"bazel release build...\"\n  bazel_binary_build release\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.sizeopt.server_only\" ]]; then\n  setup_clang_toolchain\n  echo \"bazel size optimized build...\"\n  bazel_binary_build sizeopt\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.sizeopt\" ]]; then\n  setup_clang_toolchain\n  echo \"Testing ${TEST_TARGETS[*]}\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" --config=sizeopt \"${TEST_TARGETS[@]}\"\n\n  echo \"bazel size optimized build with tests...\"\n  bazel_binary_build sizeopt\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.gcc\" ]]; then\n  BAZEL_BUILD_OPTIONS+=(\"--test_env=HEAPCHECK=\")\n  setup_gcc_toolchain\n\n  echo \"Testing ${TEST_TARGETS[*]}\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" -c fastbuild \"${TEST_TARGETS[@]}\"\n\n  echo \"bazel release build with gcc...\"\n  bazel_binary_build fastbuild\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.debug\" ]]; then\n  setup_clang_toolchain\n  echo \"Testing ${TEST_TARGETS[*]}\"\n  bazel test \"${BAZEL_BUILD_OPTIONS[@]}\" -c dbg \"${TEST_TARGETS[@]}\"\n\n  echo \"bazel debug build with tests...\"\n  bazel_binary_build debug\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.debug.server_only\" ]]; then\n  setup_clang_toolchain\n  echo \"bazel debug build...\"\n  bazel_binary_build debug\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.asan\" ]]; then\n  setup_clang_toolchain\n  BAZEL_BUILD_OPTIONS+=(-c opt --copt -g \"--config=clang-asan\" \"--build_tests_only\")\n  echo \"bazel ASAN/UBSAN debug build with tests\"\n  echo \"Building and testing envoy tests ${TEST_TARGETS[*]}\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" \"${TEST_TARGETS[@]}\"\n  if [ \"${ENVOY_BUILD_FILTER_EXAMPLE}\" == \"1\" ]; then\n    echo \"Building and testing envoy-filter-example tests...\"\n    pushd \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"\n    bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" \"${ENVOY_FILTER_EXAMPLE_TESTS[@]}\"\n    popd\n  fi\n\n  # TODO(mattklein123): This part of the test is now flaky in CI and it's unclear why, possibly\n  # due to sandboxing issue. Debug and enable it again.\n  # if [ \"${CI_SKIP_INTEGRATION_TEST_TRAFFIC_TAPPING}\" != \"1\" ] ; then\n    # Also validate that integration test traffic tapping (useful when debugging etc.)\n    # works. This requires that we set TAP_PATH. We do this under bazel.asan to\n    # ensure a debug build in CI.\n    # echo \"Validating integration test traffic tapping...\"\n    # bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" \\\n    #   --run_under=@envoy//bazel/test:verify_tap_test.sh \\\n    #   //test/extensions/transport_sockets/tls/integration:ssl_integration_test\n  # fi\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.tsan\" ]]; then\n  setup_clang_toolchain\n  echo \"bazel TSAN debug build with tests\"\n  echo \"Building and testing envoy tests ${TEST_TARGETS[*]}\"\n  bazel_with_collection test --config=rbe-toolchain-tsan \"${BAZEL_BUILD_OPTIONS[@]}\" -c dbg --build_tests_only \"${TEST_TARGETS[@]}\"\n  if [ \"${ENVOY_BUILD_FILTER_EXAMPLE}\" == \"1\" ]; then\n    echo \"Building and testing envoy-filter-example tests...\"\n    pushd \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"\n    bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" -c dbg --config=clang-tsan \"${ENVOY_FILTER_EXAMPLE_TESTS[@]}\"\n    popd\n  fi\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.msan\" ]]; then\n  ENVOY_STDLIB=libc++\n  setup_clang_toolchain\n  # rbe-toolchain-msan must comes as first to win library link order.\n  BAZEL_BUILD_OPTIONS=(\"--config=rbe-toolchain-msan\" \"${BAZEL_BUILD_OPTIONS[@]}\" \"-c dbg\" \"--build_tests_only\")\n  echo \"bazel MSAN debug build with tests\"\n  echo \"Building and testing envoy tests ${TEST_TARGETS[*]}\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" \"${TEST_TARGETS[@]}\"\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.dev\" ]]; then\n  setup_clang_toolchain\n  # This doesn't go into CI but is available for developer convenience.\n  echo \"bazel fastbuild build with tests...\"\n  echo \"Building...\"\n  bazel_binary_build fastbuild\n\n  echo \"Building and testing ${TEST_TARGETS[*]}\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" -c fastbuild \"${TEST_TARGETS[@]}\"\n  # TODO(foreseeable): consolidate this and the API tool tests in a dedicated target.\n  bazel_with_collection //tools/envoy_headersplit:headersplit_test --spawn_strategy=local\n  bazel_with_collection //tools/envoy_headersplit:replace_includes_test --spawn_strategy=local\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.compile_time_options\" ]]; then\n  # Right now, none of the available compile-time options conflict with each other. If this\n  # changes, this build type may need to be broken up.\n  # TODO(mpwarres): remove quiche=enabled once QUICHE is built by default.\n  COMPILE_TIME_OPTIONS=(\n    \"--define\" \"signal_trace=disabled\"\n    \"--define\" \"hot_restart=disabled\"\n    \"--define\" \"google_grpc=disabled\"\n    \"--define\" \"boringssl=fips\"\n    \"--define\" \"log_debug_assert_in_release=enabled\"\n    \"--define\" \"quiche=enabled\"\n    \"--define\" \"wasm=disabled\"\n    \"--define\" \"path_normalization_by_default=true\"\n    \"--define\" \"deprecated_features=disabled\"\n    \"--define\" \"use_new_codecs_in_integration_tests=true\"\n    \"--define\" \"tcmalloc=gperftools\"\n    \"--define\" \"zlib=ng\")\n\n  ENVOY_STDLIB=\"${ENVOY_STDLIB:-libstdc++}\"\n  setup_clang_toolchain\n  # This doesn't go into CI but is available for developer convenience.\n  echo \"bazel with different compiletime options build with tests...\"\n\n  if [[ \"${TEST_TARGETS[*]}\" == \"//test/...\" ]]; then\n    cd \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"\n    TEST_TARGETS=(\"@envoy//test/...\")\n  fi\n  # Building all the dependencies from scratch to link them against libc++.\n  echo \"Building and testing ${TEST_TARGETS[*]}\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" \"${COMPILE_TIME_OPTIONS[@]}\" -c dbg \"${TEST_TARGETS[@]}\" --test_tag_filters=-nofips --build_tests_only\n\n  # Legacy codecs \"--define legacy_codecs_in_integration_tests=true\" should also be tested in\n  # integration tests with asan.\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" \"${COMPILE_TIME_OPTIONS[@]}\" -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only\n\n  # \"--define log_debug_assert_in_release=enabled\" must be tested with a release build, so run only\n  # these tests under \"-c opt\" to save time in CI.\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" \"${COMPILE_TIME_OPTIONS[@]}\" -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test\n\n  echo \"Building binary...\"\n  bazel build \"${BAZEL_BUILD_OPTIONS[@]}\" \"${COMPILE_TIME_OPTIONS[@]}\" -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips\n  collect_build_profile build\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.api\" ]]; then\n  setup_clang_toolchain\n  echo \"Validating API structure...\"\n  ./tools/api/validate_structure.py\n  echo \"Building API...\"\n  bazel build \"${BAZEL_BUILD_OPTIONS[@]}\" -c fastbuild @envoy_api_canonical//envoy/...\n  echo \"Testing API...\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \\\n    @envoy_api_canonical//tools:tap2pcap_test\n  echo \"Testing API boosting (unit tests)...\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" -c fastbuild @envoy_dev//clang_tools/api_booster/...\n  echo \"Testing API boosting (golden C++ tests)...\"\n  # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet.\n  LLVM_CONFIG=\"${LLVM_ROOT}\"/bin/llvm-config BAZEL_BUILD_OPTIONS=\"--config=clang\" python3.8 ./tools/api_boost/api_boost_test.py\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.coverage\" || \"$CI_TARGET\" == \"bazel.fuzz_coverage\" ]]; then\n  setup_clang_toolchain\n  echo \"${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS[*]}\"\n\n  [[ \"$CI_TARGET\" == \"bazel.fuzz_coverage\" ]] && export FUZZ_COVERAGE=true\n\n  # We use custom BAZEL_BUILD_OPTIONS here to cover profiler's code.\n  BAZEL_BUILD_OPTIONS=\"${BAZEL_BUILD_OPTIONS[*]} --define tcmalloc=gperftools\" test/run_envoy_bazel_coverage.sh \"${COVERAGE_TEST_TARGETS[@]}\"\n  collect_build_profile coverage\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.clang_tidy\" ]]; then\n  # clang-tidy will warn on standard library issues with libc++\n  ENVOY_STDLIB=\"libstdc++\"\n  setup_clang_toolchain\n  BAZEL_BUILD_OPTIONS=\"${BAZEL_BUILD_OPTIONS[*]}\" NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh \"$@\"\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.coverity\" ]]; then\n  # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy\n  # build when compiled with Clang 5. Revisit when Coverity Scan explicitly\n  # supports Clang 5. Until this issue is resolved, run Coverity Scan with\n  # the GCC toolchain.\n  setup_gcc_toolchain\n  echo \"bazel Coverity Scan build\"\n  echo \"Building...\"\n  /build/cov-analysis/bin/cov-build --dir \"${ENVOY_BUILD_DIR}\"/cov-int bazel build --action_env=LD_PRELOAD \"${BAZEL_BUILD_OPTIONS[@]}\" \\\n    -c opt \"${ENVOY_BUILD_TARGET}\"\n  # tar up the coverity results\n  tar czvf \"${ENVOY_BUILD_DIR}\"/envoy-coverity-output.tgz -C \"${ENVOY_BUILD_DIR}\" cov-int\n  # Copy the Coverity results somewhere that we can access outside of the container.\n  cp -f \\\n     \"${ENVOY_BUILD_DIR}\"/envoy-coverity-output.tgz \\\n     \"${ENVOY_DELIVERY_DIR}\"/envoy-coverity-output.tgz\n  exit 0\nelif [[ \"$CI_TARGET\" == \"bazel.fuzz\" ]]; then\n  setup_clang_toolchain\n  FUZZ_TEST_TARGETS=(\"$(bazel query \"attr('tags','fuzzer',${TEST_TARGETS[*]})\")\")\n  echo \"bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS[*]}\"\n  echo \"Building envoy fuzzers and executing 100 fuzz iterations...\"\n  bazel_with_collection test \"${BAZEL_BUILD_OPTIONS[@]}\" --config=asan-fuzzer \"${FUZZ_TEST_TARGETS[@]}\" --test_arg=\"-runs=10\"\n  exit 0\nelif [[ \"$CI_TARGET\" == \"fix_format\" ]]; then\n  # proto_format.sh needs to build protobuf.\n  setup_clang_toolchain\n  echo \"fix_format...\"\n  ./tools/code_format/check_format.py fix\n  ./tools/code_format/format_python_tools.sh fix\n  ./tools/proto_format/proto_format.sh fix --test\n  exit 0\nelif [[ \"$CI_TARGET\" == \"check_format\" ]]; then\n  # proto_format.sh needs to build protobuf.\n  setup_clang_toolchain\n  echo \"check_format_test...\"\n  ./tools/code_format/check_format_test_helper.sh --log=WARN\n  echo \"check_format...\"\n  ./tools/code_format/check_shellcheck_format.sh\n  ./tools/code_format/check_format.py check\n  ./tools/code_format/format_python_tools.sh check\n  ./tools/proto_format/proto_format.sh check --test\n  exit 0\nelif [[ \"$CI_TARGET\" == \"check_repositories\" ]]; then\n  echo \"check_repositories...\"\n  ./tools/check_repositories.sh\n  exit 0\nelif [[ \"$CI_TARGET\" == \"check_spelling\" ]]; then\n  echo \"check_spelling...\"\n  ./tools/spelling/check_spelling.sh check\n  exit 0\nelif [[ \"$CI_TARGET\" == \"fix_spelling\" ]];then\n  echo \"fix_spell...\"\n  ./tools/spelling/check_spelling.sh fix\n  exit 0\nelif [[ \"$CI_TARGET\" == \"check_spelling_pedantic\" ]]; then\n  echo \"check_spelling_pedantic...\"\n  ./tools/spelling/check_spelling_pedantic.py --mark check\n  exit 0\nelif [[ \"$CI_TARGET\" == \"fix_spelling_pedantic\" ]]; then\n  echo \"fix_spelling_pedantic...\"\n  ./tools/spelling/check_spelling_pedantic.py fix\n  exit 0\nelif [[ \"$CI_TARGET\" == \"docs\" ]]; then\n  echo \"generating docs...\"\n  # Validate dependency relationships between core/extensions and external deps.\n  tools/dependency/validate_test.py\n  tools/dependency/validate.py\n  # Build docs.\n  docs/build.sh\n  exit 0\nelif [[ \"$CI_TARGET\" == \"verify_examples\" ]]; then\n  echo \"verify examples...\"\n  docker load < \"$ENVOY_DOCKER_BUILD_DIR/docker/envoy-docker-images.tar.xz\"\n  _images=$(docker image list --format \"{{.Repository}}\")\n  while read -r line; do images+=(\"$line\"); done \\\n      <<< \"$_images\"\n  _tags=$(docker image list --format \"{{.Tag}}\")\n  while read -r line; do tags+=(\"$line\"); done \\\n      <<< \"$_tags\"\n  for i in \"${!images[@]}\"; do\n      if [[ \"${images[i]}\" =~ \"envoy\" ]]; then\n          docker tag \"${images[$i]}:${tags[$i]}\" \"${images[$i]}:latest\"\n      fi\n  done\n  docker images\n  sudo apt-get update -y\n  sudo apt-get install -y -qq --no-install-recommends redis-tools\n  export DOCKER_NO_PULL=1\n  umask 027\n  ci/verify_examples.sh\n  exit 0\nelse\n  echo \"Invalid do_ci.sh target, see ci/README.md for valid targets.\"\n  exit 1\nfi\n"
  },
  {
    "path": "ci/do_circle_ci.sh",
    "content": "#!/bin/bash\n\nset -e\n\n# Workaround for argument too long issue in protoc\nulimit -s 16384\n\n# bazel uses jgit internally and the default circle-ci .gitconfig says to\n# convert https://github.com to ssh://git@github.com, which jgit does not support.\nif [[ -e \"${HOME}/.gitconfig\" ]]; then\n  mv ~/.gitconfig ~/.gitconfig_save\nfi\n\n# Workaround for not using ci/run_envoy_docker.sh\n# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI\n# Docker image gets confused as it has no passwd entry when running non-root\n# unless we do this.\nFAKE_HOME=/tmp/fake_home\nmkdir -p \"${FAKE_HOME}\"\nexport HOME=\"${FAKE_HOME}\"\nexport PYTHONUSERBASE=\"${FAKE_HOME}\"\nexport USER=bazel\n\nENVOY_SRCDIR=\"$(pwd)\"\nexport ENVOY_SRCDIR\n\n# xlarge resource_class.\n# See note: https://circleci.com/docs/2.0/configuration-reference/#resource_class for why we\n# hard code this (basically due to how docker works).\nexport NUM_CPUS=6\n\n# CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only.\n# IPv6 tests are run with Azure Pipelines.\nexport BAZEL_BUILD_EXTRA_OPTIONS+=\" \\\n    --test_env=ENVOY_IP_TEST_VERSIONS=v4only \\\n    --local_cpu_resources=${NUM_CPUS} \\\n    --action_env=HOME \\\n    --action_env=PYTHONUSERBASE \\\n    --test_env=HOME \\\n    --test_env=PYTHONUSERBASE\"\n\nfunction finish {\n  echo \"disk space at end of build:\"\n  df -h\n}\ntrap finish EXIT\n\necho \"disk space at beginning of build:\"\ndf -h\n\nci/do_ci.sh \"$@\"\n"
  },
  {
    "path": "ci/do_coverity_local.sh",
    "content": "#!/bin/bash\n#\n#  do_coverity_local.sh\n#\n#  This script builds Envoy with the Coverity Scan Built Tool.\n#\n#  It expects the following environment variables to be set:\n#    COVERITY_TOKEN      - set to the user's Coverity Scan project token.\n#    COVERITY_USER_EMAIL - set to the email address used with the Coverity account.\n#                          defaults to the local git config user.email.\n\n\nset -e\n\n. ./ci/envoy_build_sha.sh\n\n[[ -z \"${ENVOY_DOCKER_BUILD_DIR}\" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build\nmkdir -p \"${ENVOY_DOCKER_BUILD_DIR}\"\n\nTEST_TYPE=\"bazel.coverity\"\nCOVERITY_USER_EMAIL=\"${COVERITY_USER_EMAIL:-$(git config user.email)}\"\nCOVERITY_OUTPUT_FILE=\"${ENVOY_DOCKER_BUILD_DIR}\"/envoy/source/exe/envoy-coverity-output.tgz\n\nif [ -n \"${COVERITY_TOKEN}\" ]\nthen\n  pushd \"${ENVOY_DOCKER_BUILD_DIR}\"\n  rm -rf cov-analysis\n  wget https://scan.coverity.com/download/linux64 --post-data \"token=${COVERITY_TOKEN}&project=Envoy+Proxy\" -O coverity_tool.tgz\n  tar xvf coverity_tool.tgz\n  mv cov-analysis-linux* cov-analysis\n  popd\nelse\n  echo \"ERROR: COVERITY_TOKEN is required to download and run Coverity Scan.\"\n  exit 1\nfi\n\nci/run_envoy_docker.sh \"ci/do_ci.sh ${TEST_TYPE}\"\n\n# Check the artifact size as an approximation for determining if the scan tool was successful.\nif [[ $(find \"${COVERITY_OUTPUT_FILE}\" -type f -size +256M 2>/dev/null) ]]\nthen\n  echo \"Uploading Coverity Scan build\"\n  curl \\\n    --form token=\"${COVERITY_TOKEN}\" \\\n    --form email=\"${COVERITY_USER_EMAIL}\" \\\n    --form file=@\"${COVERITY_OUTPUT_FILE}\" \\\n    --form version=\"${ENVOY_BUILD_SHA}\" \\\n    --form description=\"Envoy Proxy Build ${ENVOY_BUILD_SHA}\" \\\n    https://scan.coverity.com/projects/envoy-proxy\nelse\n  echo \"Coverity Scan output file appears to be too small.\"\n  echo \"Not submitting build for analysis.\"\n  exit 1\nfi\n"
  },
  {
    "path": "ci/docker-entrypoint.sh",
    "content": "#!/usr/bin/env sh\nset -e\n\nloglevel=\"${loglevel:-}\"\n\n# if the first argument look like a parameter (i.e. start with '-'), run Envoy\nif [ \"${1#-}\" != \"$1\" ]; then\n\tset -- envoy \"$@\"\nfi\n\nif [ \"$1\" = 'envoy' ]; then\n\t# set the log level if the $loglevel variable is set\n\tif [ -n \"$loglevel\" ]; then\n\t\tset -- \"$@\" --log-level \"$loglevel\"\n\tfi\nfi\n\nif [ \"$ENVOY_UID\" != \"0\" ]; then\n    if [ -n \"$ENVOY_UID\" ]; then\n\tusermod -u \"$ENVOY_UID\" envoy\n    fi\n    if [ -n \"$ENVOY_GID\" ]; then\n\tgroupmod -g \"$ENVOY_GID\" envoy\n    fi\n    # Ensure the envoy user is able to write to container logs\n    chown envoy:envoy /dev/stdout /dev/stderr\n    su-exec envoy \"${@}\"\nelse\n    exec \"${@}\"\nfi\n"
  },
  {
    "path": "ci/docker_ci.sh",
    "content": "#!/bin/bash\n\n# Do not ever set -x here, it is a security hazard as it will place the credentials below in the\n# CI logs.\nset -e\n\nENVOY_DOCKER_IMAGE_DIRECTORY=\"${ENVOY_DOCKER_IMAGE_DIRECTORY:-${BUILD_STAGINGDIRECTORY:-.}/build_images}\"\n\n# Setting environments for buildx tools\nconfig_env() {\n  # Qemu configurations\n  docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n\n  # Remove older build instance\n  docker buildx rm multi-builder || :\n  docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64\n}\n\nbuild_platforms() {\n  TYPE=$1\n  FILE_SUFFIX=\"${TYPE/-debug/}\"\n\n  if [[ -z \"${FILE_SUFFIX}\" ]]; then\n    echo \"linux/arm64,linux/amd64\"\n  else\n    echo \"linux/amd64\"\n  fi\n}\n\nbuild_args() {\n  TYPE=$1\n  FILE_SUFFIX=\"${TYPE/-debug/}\"\n\n  printf ' -f ci/Dockerfile-envoy%s' \"${FILE_SUFFIX}\"\n  if [[ \"${TYPE}\" == *-debug ]]; then\n      printf ' --build-arg ENVOY_BINARY_SUFFIX='\n  elif [[ \"${TYPE}\" == \"-google-vrp\" ]]; then\n      printf ' --build-arg ENVOY_VRP_BASE_IMAGE=%s' \"${VRP_BASE_IMAGE}\"\n  fi\n}\n\nuse_builder() {\n  TYPE=$1\n  if [[ \"${TYPE}\" == \"-google-vrp\" ]]; then\n    docker buildx use default\n  else\n    docker buildx use multi-builder\n  fi\n}\n\nIMAGES_TO_SAVE=()\n\nbuild_images() {\n  local _args args=()\n  TYPE=$1\n  BUILD_TAG=$2\n\n  use_builder \"${TYPE}\"\n  _args=$(build_args \"${TYPE}\")\n  read -ra args <<< \"$_args\"\n  PLATFORM=\"$(build_platforms \"${TYPE}\")\"\n\n  docker buildx build --platform \"${PLATFORM}\" \"${args[@]}\" -t \"${BUILD_TAG}\" .\n\n  PLATFORM=\"$(build_platforms \"${TYPE}\" | tr ',' ' ')\"\n  # docker buildx load cannot have multiple platform, load individually\n  for ARCH in ${PLATFORM}; do\n    if [[ \"${ARCH}\" == \"linux/amd64\" ]]; then\n      IMAGE_TAG=\"${BUILD_TAG}\"\n    else\n      IMAGE_TAG=\"${BUILD_TAG}-${ARCH/linux\\//}\"\n    fi\n    docker buildx build --platform \"${ARCH}\" \"${args[@]}\" -t \"${IMAGE_TAG}\" . --load\n    IMAGES_TO_SAVE+=(\"${IMAGE_TAG}\")\n  done\n}\n\npush_images() {\n  local _args args=()\n  TYPE=$1\n  BUILD_TAG=$2\n\n  use_builder \"${TYPE}\"\n  _args=$(build_args \"${TYPE}\")\n  read -ra args <<< \"$_args\"\n  PLATFORM=\"$(build_platforms \"${TYPE}\")\"\n  # docker buildx doesn't do push with default builder\n  docker buildx build --platform \"${PLATFORM}\" \"${args[@]}\" -t \"${BUILD_TAG}\" . --push || \\\n    docker push \"${BUILD_TAG}\"\n}\n\nMASTER_BRANCH=\"refs/heads/master\"\nRELEASE_BRANCH_REGEX=\"^refs/heads/release/v.*\"\nRELEASE_TAG_REGEX=\"^refs/tags/v.*\"\n\n# For master builds and release branch builds use the dev repo. Otherwise we assume it's a tag and\n# we push to the primary repo.\nif [[ \"${AZP_BRANCH}\" =~ ${RELEASE_TAG_REGEX} ]]; then\n  IMAGE_POSTFIX=\"\"\n  IMAGE_NAME=\"${AZP_BRANCH/refs\\/tags\\//}\"\nelse\n  IMAGE_POSTFIX=\"-dev\"\n  IMAGE_NAME=\"${AZP_SHA1}\"\nfi\n\n# This prefix is altered for the private security images on setec builds.\nDOCKER_IMAGE_PREFIX=\"${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}\"\n\n# \"-google-vrp\" must come afer \"\" to ensure we rebuild the local base image dependency.\nBUILD_TYPES=(\"\" \"-debug\" \"-alpine\" \"-alpine-debug\" \"-google-vrp\")\n\n# Configure docker-buildx tools\nconfig_env\n\n# VRP base image is only for amd64\nVRP_BASE_IMAGE=\"${DOCKER_IMAGE_PREFIX}${IMAGE_POSTFIX}:${IMAGE_NAME}\"\n\n# Test the docker build in all cases, but use a local tag that we will overwrite before push in the\n# cases where we do push.\nfor BUILD_TYPE in \"${BUILD_TYPES[@]}\"; do\n  build_images \"${BUILD_TYPE}\" \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}\"\ndone\n\nmkdir -p \"${ENVOY_DOCKER_IMAGE_DIRECTORY}\"\nENVOY_DOCKER_TAR=\"${ENVOY_DOCKER_IMAGE_DIRECTORY}/envoy-docker-images.tar.xz\"\necho \"Saving built images to ${ENVOY_DOCKER_TAR}.\"\ndocker save \"${IMAGES_TO_SAVE[@]}\" | xz -T0 -2 >\"${ENVOY_DOCKER_TAR}\"\n\n# Only push images for master builds, release branch builds, and tag builds.\nif [[ \"${AZP_BRANCH}\" != \"${MASTER_BRANCH}\" ]] &&\n  ! [[ \"${AZP_BRANCH}\" =~ ${RELEASE_BRANCH_REGEX} ]] &&\n  ! [[ \"${AZP_BRANCH}\" =~ ${RELEASE_TAG_REGEX} ]]; then\n  echo 'Ignoring non-master branch or tag for docker push.'\n  exit 0\nfi\n\ndocker login -u \"$DOCKERHUB_USERNAME\" -p \"$DOCKERHUB_PASSWORD\"\n\nfor BUILD_TYPE in \"${BUILD_TYPES[@]}\"; do\n  push_images \"${BUILD_TYPE}\" \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}\"\n\n  # Only push latest on master builds.\n  if [[ \"${AZP_BRANCH}\" == \"${MASTER_BRANCH}\" ]]; then\n    docker tag \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}\" \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest\"\n    push_images \"${BUILD_TYPE}\" \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest\"\n  fi\n\n  # Push vX.Y-latest to tag the latest image in a release line\n  if [[ \"${AZP_BRANCH}\" =~ ${RELEASE_TAG_REGEX} ]]; then\n    RELEASE_LINE=$(echo \"$IMAGE_NAME\" | sed -E 's/(v[0-9]+\\.[0-9]+)\\.[0-9]+/\\1-latest/')\n    docker tag \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}\" \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}\"\n    push_images \"${BUILD_TYPE}\" \"${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}\"\n  fi\ndone\n"
  },
  {
    "path": "ci/docker_rebuild_google-vrp.sh",
    "content": "#!/bin/bash\n\n# Script to rebuild Dockerfile-envoy-google-vrp locally (i.e. not in CI) for development purposes.\n# This makes use of the latest envoy-dev base image on Docker Hub as the base and takes an\n# optional local path for an Envoy binary. When a custom local Envoy binary is used, the script\n# switches to using ${BASE_DOCKER_IMAGE} for the build, which should be configured to provide\n# compatibility with your local build environment (specifically glibc).\n#\n# Usage:\n#\n# Basic rebuild of Docker image (tagged envoy-google-vrp:local):\n#\n#   ./ci/docker_rebuild_google-vrp.sh\n#\n# Basic rebuild of Docker image (tagged envoy-google-vrp:local) with some local Envoy binary:\n#\n#   bazel build //source/exe:envoy-static --config=libc++ -copt\n#   ./ci/docker_rebuild_google-vrp.sh bazel-bin/source/exe/envoy-static\n\nset -e\n\n# Don't use the local envoy-dev, but pull from Docker Hub instead, this avoids having to rebuild\n# this local dep which is fairly stable.\nBASE_DOCKER_IMAGE=\"envoyproxy/envoy-dev:latest\"\n\nBUILD_DIR=\"$(mktemp -d)\"\ndeclare -r BUILD_DIR\ncp ci/Dockerfile-envoy-google-vrp \"${BUILD_DIR}\"\ndeclare -r DOCKER_BUILD_FILE=\"${BUILD_DIR}\"/Dockerfile-envoy-google-vrp\n\n# If we have a local Envoy binary, use a variant of the build environment that supports it.\nif [[ -n \"$1\" ]]; then\n  # This should match your local machine if you are building custom Envoy binaries outside of Docker.\n  # This provides compatibility of locally built Envoy and glibc in the Docker env.\n  BASE_DOCKER_IMAGE=\"ubuntu:20.04\"\n  # Copy the binary to deal with symlinks in Bazel cache and Docker daemon confusion.\n  declare -r LOCAL_ENVOY=\"envoy-binary\"\n  cp -f \"$1\" \"${PWD}/${LOCAL_ENVOY}\"\n  sed -i -e \"s@# ADD %local envoy bin%@ADD ${LOCAL_ENVOY}@\" \"${DOCKER_BUILD_FILE}\"\nfi\n\ncat \"${DOCKER_BUILD_FILE}\"\n\ndocker build -t \"envoy-google-vrp:local\" --build-arg \"ENVOY_VRP_BASE_IMAGE=${BASE_DOCKER_IMAGE}\" -f \"${DOCKER_BUILD_FILE}\" .\n\nif [[ -n \"$1\" ]]; then\n  rm -f \"${LOCAL_ENVOY}\"\nfi\nrm -r \"${BUILD_DIR}\"\n"
  },
  {
    "path": "ci/envoy_build_sha.sh",
    "content": "#!/bin/bash\n\nENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu \"$(dirname \"$0\")\"/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\\(.*\\)#\\1#' | uniq)\n[[ $(wc -l <<< \"${ENVOY_BUILD_SHA}\" | awk '{$1=$1};1') == 1 ]] || (echo \".bazelrc envoyproxy/envoy-build-ubuntu hashes are inconsistent!\" && exit 1)\n"
  },
  {
    "path": "ci/filter_example_mirror.sh",
    "content": "#!/bin/bash\n\nset -e\n\nENVOY_SRCDIR=$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/../\" && pwd)\nCHECKOUT_DIR=../envoy-filter-example\n\nif [ -z \"$CIRCLE_PULL_REQUEST\" ] && [ \"$CIRCLE_BRANCH\" == \"master\" ]\nthen\n  echo \"Cloning...\"\n  git clone git@github.com:envoyproxy/envoy-filter-example \"$CHECKOUT_DIR\"\n\n  git -C \"$CHECKOUT_DIR\" config user.name \"envoy-filter-example(CircleCI)\"\n  git -C \"$CHECKOUT_DIR\" config user.email envoy-filter-example@users.noreply.github.com\n  git -C \"$CHECKOUT_DIR\" fetch\n  git -C \"$CHECKOUT_DIR\" checkout -B master origin/master\n\n  echo \"Updating Submodule...\"\n  # Update submodule to latest Envoy SHA\n  ENVOY_SHA=$(git rev-parse HEAD)\n  git -C \"$CHECKOUT_DIR\" submodule update --init\n  git -C \"$CHECKOUT_DIR/envoy\" checkout \"$ENVOY_SHA\"\n\n  echo \"Updating Workspace file.\"\n  sed -e \"s|{ENVOY_SRCDIR}|envoy|\" \"${ENVOY_SRCDIR}\"/ci/WORKSPACE.filter.example > \"${CHECKOUT_DIR}\"/WORKSPACE\n\n  echo \"Committing, and Pushing...\"\n  git -C \"$CHECKOUT_DIR\" commit -a -m \"Update Envoy submodule to $ENVOY_SHA\"\n  git -C \"$CHECKOUT_DIR\" push origin master\n  echo \"Done\"\nfi\n"
  },
  {
    "path": "ci/filter_example_setup.sh",
    "content": "#!/bin/bash\n\n# Configure environment for Envoy Filter Example build and test.\n\nset -e\n\n# This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to.\nENVOY_FILTER_EXAMPLE_GITSHA=\"493e2e5bee10bbed1c3c097e09d83d7f672a9f2e\"\nENVOY_FILTER_EXAMPLE_SRCDIR=\"${BUILD_DIR}/envoy-filter-example\"\n\n# shellcheck disable=SC2034\nENVOY_FILTER_EXAMPLE_TESTS=(\n    \"//:echo2_integration_test\"\n    \"//http-filter-example:http_filter_integration_test\"\n    \"//:envoy_binary_test\")\n\nif [[ ! -d \"${ENVOY_FILTER_EXAMPLE_SRCDIR}/.git\" ]]; then\n  rm -rf \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"\n  git clone https://github.com/envoyproxy/envoy-filter-example.git \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"\nfi\n\n(cd \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\" && git fetch origin && git checkout -f \"${ENVOY_FILTER_EXAMPLE_GITSHA}\")\nsed -e \"s|{ENVOY_SRCDIR}|${ENVOY_SRCDIR}|\" \"${ENVOY_SRCDIR}\"/ci/WORKSPACE.filter.example > \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"/WORKSPACE\n\nmkdir -p \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"/bazel\nln -sf \"${ENVOY_SRCDIR}\"/bazel/get_workspace_status \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"/bazel/\ncp -f \"${ENVOY_SRCDIR}\"/.bazelrc \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"/\ncp -f \"$(bazel info workspace)\"/*.bazelrc \"${ENVOY_FILTER_EXAMPLE_SRCDIR}\"/\n\nexport FILTER_WORKSPACE_SET=1\n"
  },
  {
    "path": "ci/flaky_test/process_xml.py",
    "content": "#!/usr/bin/env python3\n\nimport subprocess\nimport os\nimport xml.etree.ElementTree as ET\nimport slack\nimport sys\n\n\n# Check if a test suite reports failure.\ndef checkTestStatus(file):\n  tree = ET.parse(file)\n\n  root = tree.getroot()\n\n  for testsuite in root:\n    if (testsuite.attrib['failures'] != '0'):\n      return False\n  return True\n\n\ndef parseXML(file, visited):\n  log_file = file.split('.')\n  log_file_path = \"\"\n\n  # This is dependent on the fact that log files reside in the same directory\n  # as their corresponding xml files.\n  for token in log_file[:-1]:\n    log_file_path += token\n  log_file_path += \".log\"\n\n  tree = ET.parse(file)\n\n  root = tree.getroot()\n  ret = \"\"\n\n  # This loop is dependent on the structure of xml file emitted for test runs.\n  # Should this change in the future, appropriate adjustments need to made.\n  for testsuite in root:\n    if (testsuite.attrib['failures'] != '0'):\n      for testcase in testsuite:\n        for failure_msg in testcase:\n          if (testcase.attrib['name'], testsuite.attrib['name']) not in visited:\n            ret += \"-----------------------Flaky Testcase: {} in TestSuite: {} -----------------------\\n\".format(\n                testcase.attrib['name'], testsuite.attrib['name'])\n            ret += log_file_path + \"\\n\" + failure_msg.text + \"\\n\"\n            visited.add((testcase.attrib['name'], testsuite.attrib['name']))\n  return ret\n\n\n# The following function links the filepath of 'test.xml' (the result for the last attempt) with\n# that of its 'attmpt_n.xml' file and stores it in a dictionary for easy lookup.\ndef processFindOutput(f, problematic_tests):\n  for line in f:\n    lineList = line.split('/')\n    filepath = \"\"\n    for i in range(len(lineList)):\n      if i >= len(lineList) - 2:\n        break\n      filepath += lineList[i] + \"/\"\n    filepath += \"test.xml\"\n    problematic_tests[filepath] = line.strip('\\n')\n\n\n# Prints out helpful information on the run using Git.\n# Should Git changes the output of the used commands in the future,\n# this will likely need adjustments as well.\ndef getGitInfo(CI_TARGET):\n  ret = \"\"\n  os.system(\"git remote -v > ${TMP_OUTPUT_PROCESS_XML}\")\n  os.system(\"git describe --all >> ${TMP_OUTPUT_PROCESS_XML}\")\n  os.system(\"git show >> ${TMP_OUTPUT_PROCESS_XML}\")\n  f = open(os.environ['TMP_OUTPUT_PROCESS_XML'], 'r+', encoding='utf-8')\n  # Fetching the URL from predefined env variable\n  envoy_link = os.environ[\"REPO_URI\"]\n  for line in [next(f) for x in range(6)]:\n    if line.split('/')[0] == 'remotes':\n      for token in line.split('/')[1:-1]:\n        envoy_link += '/' + token\n    ret += line\n\n  ret += \"link for additional content: \" + envoy_link + \" \\n\"\n  ret += \"azure build URI: \" + os.environ[\"BUILD_URI\"] + \" \\n\"\n  if CI_TARGET != \"\":\n    ret += \"In \" + CI_TARGET + \" build\\n\"\n  return ret\n\n\nif __name__ == \"__main__\":\n  CI_TARGET = \"\"\n  if len(sys.argv) == 2:\n    CI_TARGET = sys.argv[1]\n  output_msg = \"``` \\n\"\n  has_flaky_test = False\n\n  if os.getenv(\"TEST_TMPDIR\") and os.getenv(\"REPO_URI\") and os.getenv(\"BUILD_URI\"):\n    os.environ[\"TMP_OUTPUT_PROCESS_XML\"] = os.getenv(\"TEST_TMPDIR\") + \"/tmp_output_process_xml.txt\"\n  else:\n    print(\"set the env variables first\")\n    sys.exit(0)\n  output_msg += getGitInfo(CI_TARGET)\n\n  if CI_TARGET == \"MacOS\":\n    os.system('find ${TEST_TMPDIR}/ -name \"attempt_*.xml\" > ${TMP_OUTPUT_PROCESS_XML}')\n  else:\n    os.system(\n        'find ${TEST_TMPDIR}/**/**/**/**/bazel-testlogs/ -name \"attempt_*.xml\" > ${TMP_OUTPUT_PROCESS_XML}'\n    )\n\n  f = open(os.environ['TMP_OUTPUT_PROCESS_XML'], 'r+')\n  if f.closed:\n    print(\"cannot open {}\".format(os.environ['TMP_OUTPUT_PROCESS_XML']))\n\n  # All output of find command should be either failed or flaky tests, as only then will\n  # a test be rerun and have an 'attempt_n.xml' file. problematic_tests holds a lookup\n  # table between the last_attempt xml filepath and the failed previous attempt filepath.\n  problematic_tests = {}\n  processFindOutput(f, problematic_tests)\n\n  # Needed to make sure no duplicate flaky tests are going to be reported.\n  visited = set()\n\n  # The logic here goes as follows: If there is a test suite that has run multiple times,\n  # which produces attempt_*.xml files, it means that the end result of that test\n  # is either flaky or failed. So if we find that the last run of the test succeeds\n  # we know for sure that this is a flaky test.\n  for k in problematic_tests.keys():\n    if checkTestStatus(k):\n      has_flaky_test = True\n      output_msg += parseXML(problematic_tests[k], visited)\n  output_msg += \"``` \\n\"\n\n  if has_flaky_test:\n    if os.getenv(\"SLACK_TOKEN\"):\n      SLACKTOKEN = os.environ[\"SLACK_TOKEN\"]\n      client = slack.WebClient(SLACKTOKEN)\n      client.chat_postMessage(channel='test-flaky', text=output_msg, as_user=\"true\")\n    else:\n      print(output_msg)\n\n  os.remove(os.environ[\"TMP_OUTPUT_PROCESS_XML\"])\n"
  },
  {
    "path": "ci/flaky_test/requirements.txt",
    "content": "aiohttp==3.6.2 \\\n    --hash=sha256:1e984191d1ec186881ffaed4581092ba04f7c61582a177b187d3a2f07ed9719e \\\n    --hash=sha256:259ab809ff0727d0e834ac5e8a283dc5e3e0ecc30c4d80b3cd17a4139ce1f326 \\\n    --hash=sha256:2f4d1a4fdce595c947162333353d4a44952a724fba9ca3205a3df99a33d1307a \\\n    --hash=sha256:32e5f3b7e511aa850829fbe5aa32eb455e5534eaa4b1ce93231d00e2f76e5654 \\\n    --hash=sha256:344c780466b73095a72c616fac5ea9c4665add7fc129f285fbdbca3cccf4612a \\\n    --hash=sha256:460bd4237d2dbecc3b5ed57e122992f60188afe46e7319116da5eb8a9dfedba4 \\\n    --hash=sha256:4c6efd824d44ae697814a2a85604d8e992b875462c6655da161ff18fd4f29f17 \\\n    --hash=sha256:50aaad128e6ac62e7bf7bd1f0c0a24bc968a0c0590a726d5a955af193544bcec \\\n    --hash=sha256:6206a135d072f88da3e71cc501c59d5abffa9d0bb43269a6dcd28d66bfafdbdd \\\n    --hash=sha256:65f31b622af739a802ca6fd1a3076fd0ae523f8485c52924a89561ba10c49b48 \\\n    --hash=sha256:ae55bac364c405caa23a4f2d6cfecc6a0daada500274ffca4a9230e7129eac59 \\\n    --hash=sha256:b778ce0c909a2653741cb4b1ac7015b5c130ab9c897611df43ae6a58523cb965\nasync-timeout==3.0.1 \\\n    --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \\\n    --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3\nattrs==20.2.0 \\\n    --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \\\n    --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc\nchardet==3.0.4 \\\n    --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \\\n    --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691\nidna==2.10 \\\n    --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \\\n    --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0\nidna_ssl==1.1.0 \\\n    --hash=sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c\nmultidict==4.7.6 \\\n    --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \\\n    --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \\\n    --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \\\n    --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \\\n    --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \\\n    --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \\\n    --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \\\n    --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \\\n    --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \\\n    --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \\\n    --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \\\n    --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \\\n    --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \\\n    --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \\\n    --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \\\n    --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \\\n    --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d\nslackclient==2.9.1 \\\n    --hash=sha256:214edd4a494cc74353c8084ec184ff97a116d4b12cde287f805a9af948ef39ae \\\n    --hash=sha256:3a3e84fd4f13d9715740c13ce6c3c25b970147aeeeec22ef137d796124dfcf08\ntyping-extensions==3.7.4.3 \\\n    --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \\\n    --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \\\n    --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f\nwheel==0.35.1 \\\n    --hash=sha256:497add53525d16c173c2c1c733b8f655510e909ea78cc0e29d374243544b77a2 \\\n    --hash=sha256:99a22d87add3f634ff917310a3d87e499f19e663413a52eb9232c447aa646c9f\nyarl==1.6.0 \\\n    --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \\\n    --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \\\n    --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \\\n    --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \\\n    --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \\\n    --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \\\n    --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \\\n    --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \\\n    --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \\\n    --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \\\n    --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \\\n    --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \\\n    --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \\\n    --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \\\n    --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \\\n    --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \\\n    --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a\n"
  },
  {
    "path": "ci/flaky_test/run_process_xml.sh",
    "content": "#!/bin/bash\n\n# shellcheck source=tools/shell_utils.sh\n. \"${ENVOY_SRCDIR}\"/tools/shell_utils.sh\n\nif [[ \"${ENVOY_BUILD_ARCH}\" == \"aarch64\" ]]; then\n  export MULTIDICT_NO_EXTENSIONS=1\n  export YARL_NO_EXTENSIONS=1\nfi\n\npython_venv process_xml \"$1\"\n"
  },
  {
    "path": "ci/flaky_test/run_process_xml_mac.sh",
    "content": "#!/bin/bash\n\npip3 install slackclient\n./ci/flaky_test/process_xml.py\n"
  },
  {
    "path": "ci/go_mirror.sh",
    "content": "#!/bin/bash\n\nset -e\n\nif [ -z \"$CIRCLE_PULL_REQUEST\" ] && [ \"$CIRCLE_BRANCH\" == \"master\" ]\nthen\n  tools/api/generate_go_protobuf.py\nfi\n"
  },
  {
    "path": "ci/mac_ci_setup.sh",
    "content": "#!/bin/bash\n\n# Installs the dependencies required for a macOS build via homebrew.\n# Tools are not upgraded to new versions.\n# See:\n# https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for\n# a list of pre-installed tools in the macOS image.\n\nexport HOMEBREW_NO_AUTO_UPDATE=1\nHOMEBREW_RETRY_ATTEMPTS=10\nHOMEBREW_RETRY_INTERVAL=1\n\n\nfunction is_installed {\n    brew ls --versions \"$1\" >/dev/null\n}\n\nfunction install {\n    echo \"Installing $1\"\n    if ! brew install \"$1\"; then\n        echo \"Failed to install $1\"\n        exit 1\n    fi\n}\n\nfunction retry () {\n    local returns=1 i=1\n    while ((i<=HOMEBREW_RETRY_ATTEMPTS)); do\n\tif \"$@\"; then\n\t    returns=0\n\t    break\n\telse\n\t    sleep \"$HOMEBREW_RETRY_INTERVAL\";\n\t    ((i++))\n\tfi\n    done\n    return \"$returns\"\n}\n\nif ! retry brew update; then\n    echo \"Failed to update homebrew\"\n    exit 1\nfi\n\nDEPS=\"automake cmake coreutils go libtool wget ninja\"\nfor DEP in ${DEPS}\ndo\n    is_installed \"${DEP}\" || install \"${DEP}\"\ndone\n\nif [ -n \"$CIRCLECI\" ]; then\n    # bazel uses jgit internally and the default circle-ci .gitconfig says to\n    # convert https://github.com to ssh://git@github.com, which jgit does not support.\n    mv ~/.gitconfig ~/.gitconfig_save\nfi\n\n# Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have\n# to unlink/overwrite them to install bazelisk\necho \"Installing bazelisk\"\nbrew reinstall --force bazelisk\nif ! brew link --overwrite bazelisk; then\n    echo \"Failed to install and link bazelisk\"\n    exit 1\nfi\n\nbazel version\n\npip3 install slackclient\n"
  },
  {
    "path": "ci/mac_ci_steps.sh",
    "content": "#!/bin/bash\n\nset -e\n\nfunction finish {\n  echo \"disk space at end of build:\"\n  df -h\n}\ntrap finish EXIT\n\necho \"disk space at beginning of build:\"\ndf -h\n\n# shellcheck source=ci/setup_cache.sh\n. \"$(dirname \"$0\")\"/setup_cache.sh\n\nread -ra BAZEL_BUILD_EXTRA_OPTIONS <<< \"${BAZEL_BUILD_EXTRA_OPTIONS:-}\"\nread -ra BAZEL_EXTRA_TEST_OPTIONS <<< \"${BAZEL_EXTRA_TEST_OPTIONS:-}\"\n\n# TODO(zuercher): remove --flaky_test_attempts when https://github.com/envoyproxy/envoy/issues/2428\n# is resolved.\nBAZEL_BUILD_OPTIONS=(\n    \"--curses=no\"\n    --show_task_finish\n    --verbose_failures\n    \"--action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin\"\n    \"--test_output=all\"\n    \"--flaky_test_attempts=integration@2\"\n    \"${BAZEL_BUILD_EXTRA_OPTIONS[@]}\"\n    \"${BAZEL_EXTRA_TEST_OPTIONS[@]}\")\n\n# Build envoy and run tests as separate steps so that failure output\n# is somewhat more deterministic (rather than interleaving the build\n# and test steps).\n\nif [[ $# -gt 0 ]]; then\n  TEST_TARGETS=$*\nelse\n  TEST_TARGETS='//test/integration/...'\nfi\n\nif [[ \"$TEST_TARGETS\" == \"//test/...\" || \"$TEST_TARGETS\" == \"//test/integration/...\" ]]; then\n  bazel build \"${BAZEL_BUILD_OPTIONS[@]}\" //source/exe:envoy-static\nfi\nbazel test \"${BAZEL_BUILD_OPTIONS[@]}\" \"${TEST_TARGETS}\"\n\n# Additionally run macOS specific test suites\nbazel test \"${BAZEL_BUILD_OPTIONS[@]}\" //test/common/network:apple_dns_impl_test\n"
  },
  {
    "path": "ci/repokitteh/modules/azure_pipelines.star",
    "content": "load(\"github.com/repokitteh/modules/lib/utils.star\", \"react\")\n\n_azp_context_prefix = \"ci/azp: \"\n\ndef _retry_azp(organization, project, build_id, token):\n    \"\"\"Makes an Azure Pipelines Build API request with retry\"\"\"\n\n    url = \"https://dev.azure.com/{organization}/{project}/_apis/build/builds/{buildId}?retry=true&api-version=5.1\".format(organization = organization, project = project, buildId = build_id)\n    return http(url = url, method = \"PATCH\", headers = {\n        \"authorization\": \"Basic \" + token,\n        \"content-type\": \"application/json;odata=verbose\",\n    })\n\ndef _get_azp_checks():\n    github_checks = github.check_list_runs()[\"check_runs\"]\n\n    check_ids = []\n    checks = []\n    for check in github_checks:\n        # Filter out job level GitHub check, which is not individually retriable.\n        if check[\"app\"][\"slug\"] == \"azure-pipelines\" and \"jobId\" not in check[\"details_url\"] and check[\"external_id\"] not in check_ids:\n            check_ids.append(check[\"external_id\"])\n            checks.append(check)\n\n    return checks\n\ndef _retry(config, comment_id, command):\n    msgs = \"Retrying Azure Pipelines, to retry CircleCI checks, use `/retest-circle`.\\n\"\n    checks = _get_azp_checks()\n\n    retried_checks = []\n    for check in checks:\n        name_with_link = \"[{}]({})\".format(check[\"name\"], check[\"details_url\"])\n        if check[\"status\"] != \"completed\":\n            msgs += \"Cannot retry non-completed check: {}, please wait.\\n\".format(name_with_link)\n        elif check[\"conclusion\"] != \"failure\":\n            msgs += \"Check {} didn't fail.\\n\".format(name_with_link)\n        else:\n            _, build_id, project = check[\"external_id\"].split(\"|\")\n            _retry_azp(\"cncf\", project, build_id, config[\"token\"])\n            retried_checks.append(name_with_link)\n\n    if len(retried_checks) == 0:\n        react(comment_id, msgs)\n    else:\n        react(comment_id, None)\n        msgs += \"Retried failed jobs in: {}\".format(\", \".join(retried_checks))\n        github.issue_create_comment(msgs)\n\nhandlers.command(name = \"retry-azp\", func = _retry)\n"
  },
  {
    "path": "ci/repokitteh/modules/ownerscheck.star",
    "content": "# Ownership specified by list of specs, like so:\n#\n# use(\n#   \"github.com/repokitteh/modules/ownerscheck.star\",\n#   paths=[\n#     {\n#       \"owner\": \"envoyproxy/api-shepherds!\",\n#       \"path\": \"api/\",\n#       \"label\": \"api\",\n#       \"allow_global_approval\": True,\n#       \"github_status_label\" = \"any API change\",\n#     },\n#   ],\n# )\n#\n# This module will maintain a commit status per specified path regex (also aka as spec).\n#\n# Two types of approvals:\n# 1. Global approvals, done by approving the PR using Github's review approval feature.\n# 2. Partial approval, done by commenting \"/lgtm [label]\" where label is the label\n#    associated with the path. This does not affect GitHub's PR approve status, only\n#    this module's maintained commit status. This approval is automatically revoked\n#    if any further changes are done to the relevant files in this spec.\n#\n# By default, 'allow_global_approval' is true and either (1) or (2) above can unblock\n# merges. If 'allow_global_approval' is set false, then only (2) will unblock a merge.\n#\n# 'label' refers to a GitHub label applied to any matching PR. The GitHub check status\n# can be customized with `github_status_label`.\n\nload(\"text\", \"match\")\nload(\"github.com/repokitteh/modules/lib/utils.star\", \"react\")\n\ndef _store_partial_approval(who, files):\n  for f in files:\n    store_put('ownerscheck/partial/%s:%s' % (who, f['filename']), f['sha'])\n\n\ndef _is_partially_approved(who, files):\n  for f in files:\n    sha = store_get('ownerscheck/partial/%s:%s' % (who, f['filename']))\n    if sha != f['sha']:\n      return False\n\n  return True\n\n\ndef _get_relevant_specs(specs, changed_files):\n  if not specs:\n    print(\"no specs\")\n    return []\n\n  relevant = []\n\n  for spec in specs:\n    path_match = spec[\"path\"]\n\n    files = [f for f in changed_files if match(path_match, f['filename'])]\n    allow_global_approval = spec.get(\"allow_global_approval\", True)\n    status_label = spec.get(\"github_status_label\", \"\")\n    if files:\n      relevant.append(struct(files=files,\n                             owner=spec[\"owner\"],\n                             label=spec.get(\"label\", None),\n                             path_match=path_match,\n                             allow_global_approval=allow_global_approval,\n                             status_label=status_label))\n\n  print(\"specs: %s\" % relevant)\n\n  return relevant\n\n\ndef _get_global_approvers(): # -> List[str] (owners)\n  reviews = [{'login': r['user']['login'], 'state': r['state']} for r in github.pr_list_reviews()]\n\n  print(\"reviews=%s\" % reviews)\n\n  return [r['login'] for r in reviews if r['state'] == 'APPROVED']\n\n\ndef _is_approved(spec, approvers):\n  owner = spec.owner\n\n  if owner[-1] == '!':\n    owner = owner[:-1]\n\n  required = [owner]\n\n  if '/' in owner:\n    team_name = owner.split('/')[1]\n\n    # this is a team, parse it.\n    team_id = github.team_get_by_name(team_name)['id']\n    required = [m['login'] for m in github.team_list_members(team_id)]\n\n    print(\"team %s(%d) = %s\" % (team_name, team_id, required))\n\n  for r in required:\n    if spec.allow_global_approval and any([a for a in approvers if a == r]):\n      print(\"global approver: %s\" % r)\n      return True\n\n    if _is_partially_approved(r, spec.files):\n      print(\"partial approval: %s\" % r)\n      return True\n\n  return False\n\n\ndef _update_status(owner, status_label, path_match, approved):\n  changes_to = path_match or '/'\n  github.create_status(\n    state=approved and 'success' or 'pending',\n    context='%s must approve for %s' % (owner, status_label),\n    description='changes to %s' % changes_to,\n  )\n\ndef _get_specs(config):\n  return _get_relevant_specs(config.get('paths', []), github.pr_list_files())\n\ndef _reconcile(config, specs=None):\n  specs = specs or _get_specs(config)\n\n  if not specs:\n    return []\n\n  approvers = _get_global_approvers()\n\n  print(\"approvers: %s\" % approvers)\n\n  results = []\n\n  for spec in specs:\n    approved = _is_approved(spec, approvers)\n\n    print(\"%s -> %s\" % (spec, approved))\n\n    results.append((spec, approved))\n\n    if spec.owner[-1] == '!':\n      _update_status(spec.owner[:-1], spec.status_label, spec.path_match, approved)\n\n      if spec.label:\n        if approved:\n          github.issue_unlabel(spec.label)\n        else:\n          github.issue_label(spec.label)\n    elif spec.label: # fyis\n      github.issue_label(spec.label)\n\n  return results\n\n\ndef _comment(config, results, force=False):\n  lines = []\n\n  for spec, approved in results:\n    if approved:\n      continue\n\n    mention = spec.owner\n\n    if mention[0] != '@':\n      mention = '@' + mention\n\n    if mention[-1] == '!':\n      mention = mention[:-1]\n\n    match_description = spec.path_match\n    if match_description:\n      match_description = ' for changes made to `' + match_description + '`'\n\n    mode = spec.owner[-1] == '!' and 'approval' or 'fyi'\n\n    key = \"ownerscheck/%s/%s\" % (spec.owner, spec.path_match)\n\n    if (not force) and (store_get(key) == mode):\n      mode = 'skip'\n    else:\n      store_put(key, mode)\n\n    if mode == 'approval':\n      lines.append('CC %s: Your approval is needed%s.' % (mention, match_description))\n    elif mode == 'fyi':\n      lines.append('CC %s: FYI only%s.' % (mention, match_description))\n\n  if lines:\n    github.issue_create_comment('\\n'.join(lines))\n\n\ndef _reconcile_and_comment(config):\n  _comment(config, _reconcile(config))\n\n\ndef _force_reconcile_and_comment(config):\n  _comment(config, _reconcile(config), force=True)\n\n\ndef _pr(action, config):\n  if action in ['synchronize', 'opened']:\n    _reconcile_and_comment(config)\n\n\ndef _pr_review(action, review_state, config):\n  if action != 'submitted' or not review_state:\n    return\n\n  _reconcile(config)\n\n\n# Partial approvals are done by commenting \"/lgtm [label]\".\ndef _lgtm_by_comment(config, comment_id, command, sender, sha):\n  labels = command.args\n\n  if len(labels) != 1:\n    react(comment_id, 'please specify a single label can be specified')\n    return\n\n  label = labels[0]\n\n  specs = [s for s in _get_specs(config) if s.label and s.label == label]\n\n  if len(specs) == 0:\n    react(comment_id, 'no relevant owners for \"%s\"' % label)\n    return\n\n  for spec in specs:\n    _store_partial_approval(sender, spec.files)\n\n  react(comment_id, None)\n\n  _reconcile(config, specs)\n\n\nhandlers.pull_request(func=_pr)\nhandlers.pull_request_review(func=_pr_review)\n\nhandlers.command(name='checkowners', func=_reconcile)\nhandlers.command(name='checkowners!', func=_force_reconcile_and_comment)\nhandlers.command(name='lgtm', func=_lgtm_by_comment)\n"
  },
  {
    "path": "ci/run_clang_tidy.sh",
    "content": "#!/bin/bash\n\nset -eo pipefail\n\n# ENVOY_SRCDIR should point to where Envoy source lives, while SRCDIR could be a downstream build\n# (for example envoy-filter-example).\n[[ -z \"${ENVOY_SRCDIR}\" ]] && ENVOY_SRCDIR=\"${PWD}\"\n[[ -z \"${SRCDIR}\" ]] && SRCDIR=\"${ENVOY_SRCDIR}\"\n\nexport LLVM_CONFIG=${LLVM_CONFIG:-llvm-config}\nLLVM_PREFIX=${LLVM_PREFIX:-$(${LLVM_CONFIG} --prefix)}\nCLANG_TIDY=${CLANG_TIDY:-$(${LLVM_CONFIG} --bindir)/clang-tidy}\nCLANG_APPLY_REPLACEMENTS=${CLANG_APPLY_REPLACEMENTS:-$(${LLVM_CONFIG} --bindir)/clang-apply-replacements}\nFIX_YAML=clang-tidy-fixes.yaml\n\n# Quick syntax check of .clang-tidy.\n${CLANG_TIDY} -dump-config > /dev/null 2> clang-tidy-config-errors.txt\nif [[ -s clang-tidy-config-errors.txt ]]; then\n  cat clang-tidy-config-errors.txt\n  rm clang-tidy-config-errors.txt\n  exit 1\nfi\nrm clang-tidy-config-errors.txt\n\necho \"Generating compilation database...\"\n\n# bazel build need to be run to setup virtual includes, generating files which are consumed\n# by clang-tidy\n\"${ENVOY_SRCDIR}/tools/gen_compilation_database.py\" --include_headers\n\n# Do not run clang-tidy against win32 impl\n# TODO(scw00): We should run clang-tidy against win32 impl once we have clang-cl support for Windows\nfunction exclude_win32_impl() {\n  grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 | grep -v source/common/api/win32\n}\n\n# Do not run clang-tidy against macOS impl\n# TODO: We should run clang-tidy against macOS impl for completeness\nfunction exclude_macos_impl() {\n  grep -v source/common/filesystem/kqueue/ | grep -v source/common/network/apple_dns_impl | grep -v test/common/network/apple_dns_impl_test\n}\n\n# Do not run incremental clang-tidy on check_format testdata files.\nfunction exclude_check_format_testdata() {\n  grep -v tools/testdata/check_format/\n}\n\n# Do not run clang-tidy on envoy_headersplit testdata files.\nfunction exclude_headersplit_testdata() {\n  grep -v tools/envoy_headersplit/\n}\n\n# Do not run clang-tidy against Chromium URL import, this needs to largely\n# reflect the upstream structure.\nfunction exclude_chromium_url() {\n  grep -v source/common/chromium_url/\n}\n\n# Exclude files in third_party which are temporary forks from other OSS projects.\nfunction exclude_third_party() {\n  grep -v third_party/\n}\n\n# Exclude files which are part of the Wasm emscripten environment\nfunction exclude_wasm_emscripten() {\n  grep -v source/extensions/common/wasm/ext\n}\n\n# Exclude files which are part of the Wasm SDK\nfunction exclude_wasm_sdk() {\n  grep -v proxy_wasm_cpp_sdk\n}\n\n# Exclude files which are part of the Wasm Host environment\nfunction exclude_wasm_host() {\n  grep -v proxy_wasm_cpp_host\n}\n\n# Exclude proxy-wasm test_data.\nfunction exclude_wasm_test_data() {\n  grep -v wasm/test_data\n}\n\nfunction filter_excludes() {\n  exclude_check_format_testdata | exclude_headersplit_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data\n}\n\nfunction run_clang_tidy() {\n  python3 \"${LLVM_PREFIX}/share/clang/run-clang-tidy.py\" \\\n    -clang-tidy-binary=\"${CLANG_TIDY}\" \\\n    -clang-apply-replacements-binary=\"${CLANG_APPLY_REPLACEMENTS}\" \\\n    -export-fixes=${FIX_YAML} -j \"${NUM_CPUS:-0}\" -p \"${SRCDIR}\" -quiet \\\n    ${APPLY_CLANG_TIDY_FIXES:+-fix} \"$@\"\n}\n\nfunction run_clang_tidy_diff() {\n  git diff \"$1\" | filter_excludes | \\\n    python3 \"${LLVM_PREFIX}/share/clang/clang-tidy-diff.py\" \\\n      -clang-tidy-binary=\"${CLANG_TIDY}\" \\\n      -export-fixes=\"${FIX_YAML}\" -j \"${NUM_CPUS:-0}\" -p 1 -quiet\n}\n\nif [[ $# -gt 0 ]]; then\n  echo \"Running clang-tidy on: $*\"\n  run_clang_tidy \"$@\"\nelif [[ \"${RUN_FULL_CLANG_TIDY}\" == 1 ]]; then\n  echo \"Running a full clang-tidy\"\n  run_clang_tidy\nelse\n  if [[ -z \"${DIFF_REF}\" ]]; then\n    if [[ \"${BUILD_REASON}\" == \"PullRequest\" ]]; then\n      DIFF_REF=\"remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}\"\n    elif [[ \"${BUILD_REASON}\" == *CI ]]; then\n      DIFF_REF=\"HEAD^\"\n    else\n      DIFF_REF=$(\"${ENVOY_SRCDIR}\"/tools/git/last_github_commit.sh)\n    fi\n  fi\n  echo \"Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse \"${DIFF_REF}\")), current HEAD ($(git rev-parse HEAD))\"\n  run_clang_tidy_diff \"${DIFF_REF}\"\nfi\n\nif [[ -s \"${FIX_YAML}\" ]]; then\n  echo \"clang-tidy check failed, potentially fixed by clang-apply-replacements:\"\n  cat \"${FIX_YAML}\"\n  exit 1\nfi\n"
  },
  {
    "path": "ci/run_envoy_docker.sh",
    "content": "#!/bin/bash\n\nset -e\n\n# shellcheck source=ci/envoy_build_sha.sh\n. \"$(dirname \"$0\")\"/envoy_build_sha.sh\n\nfunction is_windows() {\n  [[ \"$(uname -s)\" == *NT* ]]\n}\n\nread -ra ENVOY_DOCKER_OPTIONS <<< \"${ENVOY_DOCKER_OPTIONS:-}\"\n\n# TODO(phlax): uppercase these env vars\nexport HTTP_PROXY=\"${http_proxy:-}\"\nexport HTTPS_PROXY=\"${https_proxy:-}\"\nexport NO_PROXY=\"${no_proxy:-}\"\n\nif is_windows; then\n  [[ -z \"${IMAGE_NAME}\" ]] && IMAGE_NAME=\"envoyproxy/envoy-build-windows2019\"\n  # TODO(sunjayBhatia): Currently ENVOY_DOCKER_OPTIONS is ignored on Windows because\n  # CI sets it to a Linux-specific value. Undo this once https://github.com/envoyproxy/envoy/issues/13272\n  # is resolved.\n  ENVOY_DOCKER_OPTIONS=()\n  DEFAULT_ENVOY_DOCKER_BUILD_DIR=C:/Windows/Temp/envoy-docker-build\n  BUILD_DIR_MOUNT_DEST=C:/build\n  # Replace MSYS style drive letter (/c/) with driver letter designation (C:/)\n  SOURCE_DIR=$(echo \"${PWD}\" | sed -E \"s#/([a-zA-Z])/#\\1:/#\")\n  SOURCE_DIR_MOUNT_DEST=C:/source\n  START_COMMAND=(\"bash\" \"-c\" \"cd source && $*\")\nelse\n  [[ -z \"${IMAGE_NAME}\" ]] && IMAGE_NAME=\"envoyproxy/envoy-build-ubuntu\"\n  # We run as root and later drop permissions. This is required to setup the USER\n  # in useradd below, which is need for correct Python execution in the Docker\n  # environment.\n  ENVOY_DOCKER_OPTIONS+=(-u root:root)\n  ENVOY_DOCKER_OPTIONS+=(-v /var/run/docker.sock:/var/run/docker.sock)\n  ENVOY_DOCKER_OPTIONS+=(--cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN)\n  DEFAULT_ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build\n  BUILD_DIR_MOUNT_DEST=/build\n  SOURCE_DIR=\"${PWD}\"\n  SOURCE_DIR_MOUNT_DEST=/source\n  START_COMMAND=(\"/bin/bash\" \"-lc\" \"groupadd --gid $(id -g) -f envoygroup \\\n    && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home --home-dir /build envoybuild \\\n    && usermod -a -G pcap envoybuild \\\n    && sudo -EHs -u envoybuild bash -c 'cd /source && $*'\")\nfi\n\n# The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker\n# images').\n[[ -z \"${IMAGE_ID}\" ]] && IMAGE_ID=\"${ENVOY_BUILD_SHA}\"\n[[ -z \"${ENVOY_DOCKER_BUILD_DIR}\" ]] && ENVOY_DOCKER_BUILD_DIR=\"${DEFAULT_ENVOY_DOCKER_BUILD_DIR}\"\n# Replace backslash with forward slash for Windows style paths\nENVOY_DOCKER_BUILD_DIR=\"${ENVOY_DOCKER_BUILD_DIR//\\\\//}\"\nmkdir -p \"${ENVOY_DOCKER_BUILD_DIR}\"\n\n[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=(\"-it\")\n[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=(-v \"$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)\")\n\nexport ENVOY_BUILD_IMAGE=\"${IMAGE_NAME}:${IMAGE_ID}\"\n\n# Since we specify an explicit hash, docker-run will pull from the remote repo if missing.\ndocker run --rm \\\n       \"${ENVOY_DOCKER_OPTIONS[@]}\" \\\n       -v \"${ENVOY_DOCKER_BUILD_DIR}\":\"${BUILD_DIR_MOUNT_DEST}\" \\\n       -v \"${SOURCE_DIR}\":\"${SOURCE_DIR_MOUNT_DEST}\" \\\n       -e HTTP_PROXY \\\n       -e HTTPS_PROXY \\\n       -e NO_PROXY \\\n       -e BAZEL_STARTUP_OPTIONS \\\n       -e BAZEL_BUILD_EXTRA_OPTIONS \\\n       -e BAZEL_EXTRA_TEST_OPTIONS \\\n       -e BAZEL_REMOTE_CACHE \\\n       -e ENVOY_STDLIB \\\n       -e BUILD_REASON \\\n       -e BAZEL_REMOTE_INSTANCE \\\n       -e GCP_SERVICE_ACCOUNT_KEY \\\n       -e NUM_CPUS \\\n       -e ENVOY_RBE \\\n       -e FUZZIT_API_KEY \\\n       -e ENVOY_BUILD_IMAGE \\\n       -e ENVOY_SRCDIR \\\n       -e ENVOY_BUILD_TARGET \\\n       -e SYSTEM_PULLREQUEST_TARGETBRANCH \\\n       -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \\\n       -e GCS_ARTIFACT_BUCKET \\\n       -e BUILD_SOURCEBRANCHNAME \\\n       -e BAZELISK_BASE_URL \\\n       -e ENVOY_BUILD_ARCH \\\n       -e SLACK_TOKEN \\\n       -e BUILD_URI\\\n       -e REPO_URI \\\n       \"${ENVOY_BUILD_IMAGE}\" \\\n       \"${START_COMMAND[@]}\"\n"
  },
  {
    "path": "ci/setup_cache.sh",
    "content": "#!/bin/bash\n\nset -e\n\nif [[ -n \"${GCP_SERVICE_ACCOUNT_KEY:0:1}\" ]]; then\n  # mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all\n  # users by default.\n  GCP_SERVICE_ACCOUNT_KEY_FILE=$(mktemp -t gcp_service_account.XXXXXX.json)\n\n  gcp_service_account_cleanup() {\n    echo \"Deleting service account key file...\"\n    rm -rf \"${GCP_SERVICE_ACCOUNT_KEY_FILE}\"\n  }\n\n  trap gcp_service_account_cleanup EXIT\n\n  bash -c 'echo \"${GCP_SERVICE_ACCOUNT_KEY}\"' | base64 --decode > \"${GCP_SERVICE_ACCOUNT_KEY_FILE}\"\n\n  export BAZEL_BUILD_EXTRA_OPTIONS+=\" --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE}\"\nfi\n\n\nif [[ -n \"${BAZEL_REMOTE_CACHE}\" ]]; then\n  export BAZEL_BUILD_EXTRA_OPTIONS+=\" --remote_cache=${BAZEL_REMOTE_CACHE}\"\n  echo \"Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE}.\"\n\n  if [[ -n \"${BAZEL_REMOTE_INSTANCE}\" ]]; then\n    export BAZEL_BUILD_EXTRA_OPTIONS+=\" --remote_instance_name=${BAZEL_REMOTE_INSTANCE}\"\n    echo \"instance_name: ${BAZEL_REMOTE_INSTANCE}.\"\n  elif [[ -z \"${ENVOY_RBE}\" ]]; then\n    export BAZEL_BUILD_EXTRA_OPTIONS+=\" --jobs=HOST_CPUS*.9 --remote_timeout=600\"\n    echo \"using local build cache.\"\n  fi\n\nelse\n  echo \"No remote cache is set, skipping setup remote cache.\"\nfi\n"
  },
  {
    "path": "ci/upload_gcs_artifact.sh",
    "content": "#!/bin/bash\n\nset -e -o pipefail\n\nif [[ -z \"${GCS_ARTIFACT_BUCKET}\" ]]; then\n  echo \"Artifact bucket is not set, not uploading artifacts.\"\n  exit 0\nfi\n\n# Fail when service account key is not specified\nbash -c 'echo ${GCP_SERVICE_ACCOUNT_KEY}' | base64 --decode | gcloud auth activate-service-account --key-file=-\n\nSOURCE_DIRECTORY=\"$1\"\nTARGET_SUFFIX=\"$2\"\n\nif [ ! -d \"${SOURCE_DIRECTORY}\" ]; then\n  echo \"ERROR: ${SOURCE_DIRECTORY} is not found.\"\n  exit 1\nfi\n\nBRANCH=${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}\nGCS_LOCATION=\"${GCS_ARTIFACT_BUCKET}/${BRANCH}/${TARGET_SUFFIX}\"\n\necho \"Uploading to gs://${GCS_LOCATION} ...\"\ngsutil -mq rsync -dr \"${SOURCE_DIRECTORY}\" \"gs://${GCS_LOCATION}\"\necho \"Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}/index.html\"\n"
  },
  {
    "path": "ci/verify_examples.sh",
    "content": "#!/bin/bash -E\n\nTESTFILTER=\"${1:-*}\"\nFAILED=()\nSRCDIR=\"${SRCDIR:-$(pwd)}\"\nEXCLUDE_EXAMPLES=${EXCLUDED_EXAMPLES:-\"wasm\"}\n\n\ntrap_errors () {\n    local frame=0 command line sub file\n    if [[ -n \"$example\" ]]; then\n        command=\" (${example})\"\n    fi\n    set +v\n    while read -r line sub file < <(caller \"$frame\"); do\n        if [[ \"$frame\" -ne \"0\" ]]; then\n            FAILED+=(\"  > ${sub}@ ${file} :${line}\")\n        else\n            FAILED+=(\"${sub}@ ${file} :${line}${command}\")\n        fi\n        ((frame++))\n    done\n    set -v\n}\n\ntrap trap_errors ERR\ntrap exit 1 INT\n\n\nrun_examples () {\n    local examples example\n    cd \"${SRCDIR}/examples\" || exit 1\n    examples=$(find . -mindepth 1 -maxdepth 1 -type d -name \"$TESTFILTER\" | grep -vE \"${EXCLUDE_EXAMPLES}\" | sort)\n    for example in $examples; do\n        pushd \"$example\" > /dev/null || return 1\n        ./verify.sh\n        popd > /dev/null || return 1\n    done\n}\n\nrun_examples\n\nif [[ \"${#FAILED[@]}\" -ne \"0\" ]]; then\n    echo \"TESTS FAILED:\"\n    for failed in \"${FAILED[@]}\"; do\n        echo \"$failed\" >&2\n    done\n    exit 1\nfi\n"
  },
  {
    "path": "ci/windows_ci_steps.sh",
    "content": "#!/usr/bin/bash.exe\n\nset -e\n\nfunction finish {\n  echo \"disk space at end of build:\"\n  df -h\n}\ntrap finish EXIT\n\necho \"disk space at beginning of build:\"\ndf -h\n\n# shellcheck source=ci/setup_cache.sh\n. \"$(dirname \"$0\")\"/setup_cache.sh\n\nread -ra BAZEL_STARTUP_OPTIONS <<< \"${BAZEL_STARTUP_OPTIONS:-}\"\n# Default to msvc-cl if not overridden\nread -ra BAZEL_BUILD_EXTRA_OPTIONS <<< \"${BAZEL_BUILD_EXTRA_OPTIONS:---config=msvc-cl}\"\nread -ra BAZEL_EXTRA_TEST_OPTIONS <<< \"${BAZEL_EXTRA_TEST_OPTIONS:-}\"\n\n# Set up TMPDIR so bash and non-bash can access\n# e.g. TMPDIR=/d/tmp, make a link from /d/d to /d so both bash and Windows programs resolve the\n# same path\n# This is due to this issue: https://github.com/bazelbuild/rules_foreign_cc/issues/334\n# rules_foreign_cc does not currently use bazel output/temp directories by default, it uses mktemp\n# which respects the value of the TMPDIR environment variable\ndrive=\"$(readlink -f \"$TMPDIR\" | cut -d '/' -f2)\"\nif [ ! -e \"/$drive/$drive\" ]; then\n  /c/windows/system32/cmd.exe /c \"mklink /d $drive:\\\\$drive $drive:\\\\\"\nfi\n\nBUILD_DIR=${BUILD_DIR:-/c/build}\nif [[ ! -d \"${BUILD_DIR}\" ]]\nthen\n  echo \"${BUILD_DIR} mount missing - did you forget -v <something>:${BUILD_DIR}? Creating.\"\n  mkdir -p \"${BUILD_DIR}\"\nfi\n\n# Environment setup.\nexport TEST_TMPDIR=${BUILD_DIR}/tmp\n\n[[ \"${BUILD_REASON}\" != \"PullRequest\" ]] && BAZEL_EXTRA_TEST_OPTIONS+=(--nocache_test_results)\n\nBAZEL_STARTUP_OPTIONS+=(\"--output_base=c:/_eb\")\nBAZEL_BUILD_OPTIONS=(\n    -c opt\n    --show_task_finish\n    --verbose_failures\n    --define \"wasm=disabled\"\n    \"--test_output=errors\"\n    \"${BAZEL_BUILD_EXTRA_OPTIONS[@]}\"\n    \"${BAZEL_EXTRA_TEST_OPTIONS[@]}\")\n\n# Also setup some space for building Envoy standalone.\nENVOY_BUILD_DIR=\"${BUILD_DIR}\"/envoy\nmkdir -p \"${ENVOY_BUILD_DIR}\"\n\n# This is where we copy build deliverables to.\nENVOY_DELIVERY_DIR=\"${ENVOY_BUILD_DIR}\"/source/exe\nmkdir -p \"${ENVOY_DELIVERY_DIR}\"\n\n# Test to validate updates of all dependency libraries in bazel/external and bazel/foreign_cc\n# bazel \"${BAZEL_STARTUP_OPTIONS[@]}\" build \"${BAZEL_BUILD_OPTIONS[@]}\" //bazel/... --build_tag_filters=-skip_on_windows\n\n# Complete envoy-static build (nothing needs to be skipped, build failure indicates broken dependencies)\nbazel \"${BAZEL_STARTUP_OPTIONS[@]}\" build \"${BAZEL_BUILD_OPTIONS[@]}\" //source/exe:envoy-static\n\n# Copy binary to delivery directory\ncp -f bazel-bin/source/exe/envoy-static.exe \"${ENVOY_DELIVERY_DIR}/envoy.exe\"\n\n# Copy for azp, creating a tar archive\ntar czf \"${ENVOY_BUILD_DIR}\"/envoy_binary.tar.gz -C \"${ENVOY_DELIVERY_DIR}\" envoy.exe\n\n# Test invocations of known-working tests on Windows\nbazel \"${BAZEL_STARTUP_OPTIONS[@]}\" test \"${BAZEL_BUILD_OPTIONS[@]}\" //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows,-flaky_on_windows --build_tests_only\n\n# Build tests that are known-flaky or known-failing to ensure no compilation regressions\nbazel \"${BAZEL_STARTUP_OPTIONS[@]}\" build \"${BAZEL_BUILD_OPTIONS[@]}\" //test/... --test_tag_filters=-skip_on_windows,fails_on_windows,flaky_on_windows --build_tests_only\n\n# Summarize tests bypasssed to monitor the progress of porting to Windows\necho \"Tests bypassed as skip_on_windows: $(bazel query 'kind(\".*test rule\", attr(\"tags\", \"skip_on_windows\", //test/...))' 2>/dev/null | sort | wc -l) known unbuildable or inapplicable tests\"\necho \"Tests bypassed as fails_on_windows: $(bazel query 'kind(\".*test rule\", attr(\"tags\", \"fails_on_windows\", //test/...))' 2>/dev/null | sort | wc -l) known incompatible tests\"\necho \"Tests bypassed as flaky_on_windows: $(bazel query 'kind(\".*test rule\", attr(\"tags\", \"flaky_on_windows\", //test/...))' 2>/dev/null | sort | wc -l) known unstable tests\"\n"
  },
  {
    "path": "configs/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\"@configs_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\npy_binary(\n    name = \"configgen\",\n    srcs = [\"configgen.py\"],\n    data = glob([\n        \"*.yaml\",\n    ]),\n    deps = [\n        requirement(\"Jinja2\"),\n        requirement(\"MarkupSafe\"),\n    ],\n)\n\nfilegroup(\n    name = \"configs\",\n    srcs = [\n        \"google-vrp/envoy-edge.yaml\",\n        \"google-vrp/envoy-origin.yaml\",\n        \"original-dst-cluster/proxy_config.yaml\",\n    ] + select({\n        \"//bazel:apple\": [],\n        \"//bazel:windows_x86_64\": [],\n        \"//conditions:default\": [\"freebind/freebind.yaml\"],\n    }),\n)\n\ngenrule(\n    name = \"example_configs\",\n    srcs = [\n        \":configs\",\n        \"//examples:configs\",\n        \"//docs:configs\",\n        \"//test/config/integration/certs\",\n    ],\n    outs = [\"example_configs.tar\"],\n    cmd = (\n        \"$(location configgen.sh) $(location configgen) $(@D) \" +\n        \"$(locations :configs) \" +\n        \"$(locations //examples:configs) \" +\n        \"$(locations //docs:configs) \" +\n        \"$(locations //test/config/integration/certs)\"\n    ),\n    tools = [\n        \"configgen.sh\",\n        \":configgen\",\n    ],\n)\n"
  },
  {
    "path": "configs/Dockerfile",
    "content": "# This configuration will build a Docker container containing\n# an Envoy proxy that routes to Google.\n\nFROM envoyproxy/envoy-dev:latest\nRUN apt-get update\nCOPY google_com_proxy.v2.yaml /etc/envoy.yaml\nCMD /usr/local/bin/envoy -c /etc/envoy.yaml\n"
  },
  {
    "path": "configs/access_log_format_helper_v2.template.yaml",
    "content": "{% macro ingress_sampled_log() -%}\n  format: \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \\\"%REQ(:AUTHORITY)%\\\"\\n\"\n{% endmacro %}\n\n{% macro ingress_full() -%}\n  format: \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \\\"%REQ(:AUTHORITY)%\\\"\\n\"\n{% endmacro %}\n\n{% macro egress_error_log() -%}\n  format: \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \\\"%REQ(:AUTHORITY)%\\\" \\\"%UPSTREAM_HOST%\\\"\\n\"\n{% endmacro %}\n\n{% macro egress_error_amazon_service() -%}\n  format: \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \\\"%REQ(:AUTHORITY)%\\\" \\\"%UPSTREAM_HOST%\\\" \\\"%RESP(X-AMZN-RequestId)%\\\"\\n\"\n{% endmacro %}\n"
  },
  {
    "path": "configs/configgen.py",
    "content": "import jinja2\nimport json\nfrom collections import OrderedDict\nimport os\nimport shutil\nimport sys\n\nSCRIPT_DIR = os.path.dirname(__file__)\nOUT_DIR = sys.argv[1]\n\n#\n# About this script: Envoy configurations needed for a complete infrastructure are complicated.\n# This script demonstrates how to programatically build Envoy configurations using jinja templates.\n# This is roughly how we build our configurations at Lyft. The three configurations demonstrated\n# here (front proxy, double proxy, and service to service) are also very close approximations to\n# what we use at Lyft in production. They give a demonstration of how to configure most Envoy\n# features. Along with the configuration guide it should be possible to modify them for different\n# use cases.\n#\n\n# This is the set of internal services that front Envoy will route to. Each cluster referenced\n# in envoy_router.template.json must be specified here. It is a dictionary of dictionaries.\n# Options can be specified for each cluster if needed. See make_route_internal() in\n# routing_helper.template.json for the types of options supported.\nfront_envoy_clusters = {'service1': {}, 'service2': {}, 'service3': {}, 'ratelimit': {}}\n\n# This is the set of internal services that local Envoys will route to. All services that will be\n# accessed via the 9001 egress port need to be listed here. It is a dictionary of dictionaries.\n# Options can be specified for each cluster if needed. See make_route_internal() in\n# routing_helper.template.json for the types of options supported.\nservice_to_service_envoy_clusters = {\n    'ratelimit': {},\n    'service1': {\n        'service_to_service_rate_limit': True\n    },\n    'service3': {}\n}\n\n# This is a list of external hosts that can be accessed from local Envoys. Each external service has\n# its own port. This is because some SDKs don't make it easy to use host based routing. Below\n# we demonstrate setting up proxying for DynamoDB. In the config, this ends up using the HTTP\n# DynamoDB statistics filter, as well as generating a special access log which includes the\n# X-AMZN-RequestId response header.\nexternal_virtual_hosts = [{\n    'name': 'dynamodb_iad',\n    'address': \"127.0.0.1\",\n    'protocol': \"TCP\",\n    'port_value': \"9204\",\n    'hosts': [{\n        'name': 'dynamodb_iad',\n        'domain': '*',\n        'remote_address': 'dynamodb.us-east-1.amazonaws.com',\n        'protocol': 'TCP',\n        'port_value': '443',\n        'verify_subject_alt_name': ['dynamodb.us-east-1.amazonaws.com'],\n        'ssl': True\n    }],\n    'is_amzn_service': True,\n    'cluster_type': 'logical_dns'\n}]\n\n# This is the set of mongo clusters that local Envoys can talk to. Each database defines a set of\n# mongos routers to talk to, and whether the global rate limit service should be called for new\n# connections. Many organizations will not be interested in the mongo feature. Setting this to\n# an empty dictionary will remove all mongo configuration. The configuration is a useful example\n# as it demonstrates how to setup TCP proxy and the network rate limit filter.\nmongos_servers = {\n    'somedb': {\n        'address': \"127.0.0.1\",\n        'protocol': \"TCP\",\n        'port_value': 27019,\n        'hosts': [\n            {\n                'port_value': 27817,\n                'address': 'router1.yourcompany.net',\n                'protocol': 'TCP'\n            },\n            {\n                'port_value': 27817,\n                'address': 'router2.yourcompany.net',\n                'protocol': 'TCP'\n            },\n            {\n                'port_value': 27817,\n                'address': 'router3.yourcompany.net',\n                'protocol': 'TCP'\n            },\n            {\n                'port_value': 27817,\n                'address': 'router4.yourcompany.net',\n                'protocol': 'TCP'\n            },\n        ],\n        'ratelimit': True\n    }\n}\n\n\ndef generate_config(template_path, template, output_file, **context):\n  \"\"\" Generate a final config file based on a template and some context. \"\"\"\n  env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path, followlinks=True),\n                           undefined=jinja2.StrictUndefined)\n  raw_output = env.get_template(template).render(**context)\n  with open(output_file, 'w') as fh:\n    fh.write(raw_output)\n\n\n# TODO(sunjayBhatia, wrowe): Avoiding tracing extensions until they build on Windows\ntracing_enabled = os.name != 'nt'\n\n# Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners,\n# as well as a listener for the double proxy to connect to via SSL client authentication.\ngenerate_config(SCRIPT_DIR,\n                'envoy_front_proxy_v2.template.yaml',\n                '{}/envoy_front_proxy.v2.yaml'.format(OUT_DIR),\n                clusters=front_envoy_clusters,\n                tracing=tracing_enabled)\n\n# Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners,\n# and backhauls the traffic to the main front proxy.\ngenerate_config(SCRIPT_DIR,\n                'envoy_double_proxy_v2.template.yaml',\n                '{}/envoy_double_proxy.v2.yaml'.format(OUT_DIR),\n                tracing=tracing_enabled)\n\n# Generate a demo config for the service to service (local) proxy. This sets up several different\n# listeners:\n# 9211: Main ingress listener for service to service traffic.\n# 9001: Main egress listener for service to service traffic. Applications use this port to send\n#       requests to other services.\n# optional external service ports: built from external_virtual_hosts above. Each external host\n#                                  that Envoy proxies to listens on its own port.\n# optional mongo ports: built from mongos_servers above.\ngenerate_config(SCRIPT_DIR,\n                'envoy_service_to_service_v2.template.yaml',\n                '{}/envoy_service_to_service.yaml'.format(OUT_DIR),\n                internal_virtual_hosts=service_to_service_envoy_clusters,\n                external_virtual_hosts=external_virtual_hosts,\n                mongos_servers=mongos_servers)\n\nfor google_ext in ['v2.yaml']:\n  shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR)\n\nshutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.v3.yaml'), OUT_DIR)\nshutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.v3.yaml'), OUT_DIR)\n"
  },
  {
    "path": "configs/configgen.sh",
    "content": "#!/bin/bash\n\nset -e\n\nCONFIGGEN=\"$1\"\nshift\nOUT_DIR=\"$1\"\nshift\n\nmkdir -p \"$OUT_DIR/certs\"\nmkdir -p \"$OUT_DIR/lib\"\n\"$CONFIGGEN\" \"$OUT_DIR\"\n\nfor FILE in \"$@\"; do\n  case \"$FILE\" in\n  *.pem)\n    cp \"$FILE\" \"$OUT_DIR/certs\"\n    ;;\n  *.lua)\n    cp \"$FILE\" \"$OUT_DIR/lib\"\n    ;;\n  *)\n\n    FILENAME=\"$(echo \"$FILE\" | sed -e 's/.*examples\\///g')\"\n    # Configuration filenames may conflict. To avoid this we use the full path.\n    cp -v \"$FILE\" \"$OUT_DIR/${FILENAME//\\//_}\"\n    ;;\n  esac\ndone\n\n# tar is having issues with -C for some reason so just cd into OUT_DIR.\n(cd \"$OUT_DIR\"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem lib/*.lua)\n"
  },
  {
    "path": "configs/encapsulate_in_connect.v3.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9903\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: TCP\n        address: 127.0.0.1\n        port_value: 10000\n    filter_chains:\n    - filters:\n      - name: tcp\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n          stat_prefix: tcp_stats\n          cluster: \"cluster_0\"\n          tunneling_config:\n            hostname: host.com\n  clusters:\n    - name: cluster_0\n      connect_timeout: 5s\n      http2_protocol_options:\n        {}\n      load_assignment:\n        cluster_name: cluster_0\n        endpoints:\n          - lb_endpoints:\n              - endpoint:\n                  address:\n                    socket_address:\n                      address: 127.0.0.1\n                      port_value: 10001\n"
  },
  {
    "path": "configs/envoy_double_proxy_v2.template.yaml",
    "content": "{%- macro listener(protocol, address, port_value, tls, proxy_proto, tracing) -%}\n- name: listener_created_from_configgen\n  address:\n    socket_address:\n      protocol: {{protocol}}\n      address: {{address}}\n      port_value: {{port_value}}\n  filter_chains:\n  - filter_chain_match: {}\n    {% if tls %}\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n        common_tls_context:\n          tls_certificates:\n          - certificate_chain:\n              filename: certs/servercert.pem\n            private_key:\n              filename: certs/serverkey.pem\n          validation_context: {}\n          alpn_protocols:\n          - h2\n          - http/1.1\n    {% endif %}\n    {% if proxy_proto %}\n    use_proxy_proto: true\n    {%endif -%}\n    filters:\n    - name: envoy.filters.network.http_connection_manager\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n        codec_type: AUTO\n        stat_prefix: router\n        route_config:\n          name: local_route\n          virtual_hosts:\n          - name: local_service\n            domains: [\"*\"]\n            routes:\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: backhaul\n                #Generally allow front proxy to control timeout and use this as a backstop\n                timeout: 20s\n        http_filters:\n        - name: envoy.filters.http.health_check\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n            pass_through_mode: false\n            headers:\n              - exact_match: /healthcheck\n                name: :path\n        - name: envoy.filters.http.buffer\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer\n            max_request_bytes: 5242880\n        - name: envoy.filters.http.router\n          typed_config: {}\n        {% if tracing %}\n        tracing:\n          operation_name: INGRESS\n          provider:\n            name: envoy.tracers.lightstep\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.trace.v2.LightstepConfig\n              access_token_file: \"/etc/envoy/lightstep_access_token\"\n              collector_cluster: lightstep_saas\n        {% endif %}\n        common_http_protocol_options:\n          idle_timeout: 840s\n        access_log:\n        - name: envoy.access_loggers.file\n          filter:\n            or_filter:\n              filters:\n                - status_code_filter:\n                    comparison:\n                      op: GE\n                      value:\n                        default_value: 500\n                        runtime_key: access_log.access_error.status\n                - duration_filter:\n                    comparison:\n                      op: GE\n                      value:\n                        default_value: 1000\n                        runtime_key: access_log.access_error.duration\n                - traceable_filter: {}\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n            path: /var/log/envoy/access_error.log\n            format: \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \\\"%REQ(:AUTHORITY)%\\\" \\\"%REQ(X-LYFT-USER-ID)%\\\" \\\"%RESP(GRPC-STATUS)%\\\"\\n\"\n        {% if proxy_proto %}\n        use_remote_address: true\n        {%endif -%}\n{% endmacro -%}\nstatic_resources:\n  listeners:\n  # TCP listener for external port 443 (TLS). Assumes a TCP LB in front such as ELB which\n  # supports proxy proto\n  {{ listener(\"TCP\", \"0.0.0.0\",9300,True, True, tracing)|indent(2) }}\n  # TCP listener for external port 80 (non-TLS). Assumes a TCP LB in front such as ELB which\n  # supports proxy proto.\n  {{ listener(\"TCP\", \"0.0.0.0\",9301,False, True, tracing)|indent(2) }}\n  clusters:\n  - name: statsd\n    type: STATIC\n    connect_timeout: 0.25s\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: statsd\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8125\n                protocol: TCP\n  - name: backhaul\n    type: STRICT_DNS\n    connect_timeout: 1s\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: backhaul\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: front-proxy.yourcompany.net\n                port_value: 9400\n                protocol: TCP\n    # There are so few connections going back\n    # that we can get some imbalance. Until we come up\n    # with a better solution just limit the requests\n    # so we can cycle and get better spread.\n    max_requests_per_connection: 25000\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n        common_tls_context:\n          tls_certificates:\n          - certificate_chain:\n              filename: certs/clientcert.pem\n            private_key:\n              filename: certs/clientkey.pem\n          validation_context:\n            trusted_ca:\n              filename: certs/cacert.pem\n            match_subject_alt_names: \n              exact: \"front-proxy.yourcompany.net\"\n    http2_protocol_options: {}\n  - name: lightstep_saas\n    type: LOGICAL_DNS\n    connect_timeout: 1s\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: lightstep_saas\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: collector-grpc.lightstep.com\n                port_value: 443\n                protocol: TCP\n    http2_protocol_options: {}\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n        common_tls_context:\n          validation_context:\n            trusted_ca:\n              filename: certs/cacert.pem\n            match_subject_alt_names: \n              exact: \"collector-grpc.lightstep.com\"\nflags_path: \"/etc/envoy/flags\"\nstats_sinks:\n- name: envoy.stat_sinks.statsd\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.metrics.v2.StatsdSink\n    tcp_cluster_name: statsd\nlayered_runtime:\n  layers:\n    - name: root\n      disk_layer:\n        symlink_root: /srv/configset/envoydata/current\n        subdirectory: envoy\n    - name: override\n      disk_layer:\n        symlink_root: /srv/configset/envoydata/current\n        subdirectory: envoy_override\n        append_service_cluster: true\n    - name: admin\n      admin_layer: {}\nadmin:\n  access_log_path: \"/var/log/envoy/admin_access.log\"\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9901\n"
  },
  {
    "path": "configs/envoy_front_proxy_v2.template.yaml",
    "content": "{% import 'routing_helper_v2.template.yaml' as helper -%}\n{% macro router_file_content() -%}{% include kwargs['router_file'] -%}{% endmacro -%}\n{% macro listener(protocol, address, port_value, proxy_proto, tls, tracing) -%}\n  name: not_required_for_static_listeners\n  address:\n    socket_address:\n      protocol: {{protocol}}\n      address: {{address}}\n      port_value: {{port_value}}\n  filter_chains:\n  {% if tls %}\n  - transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n        common_tls_context:\n          alpn_protocols: h2,http/1.1\n          tls_certificates:\n          - certificate_chain:\n              filename: certs/servercert.pem\n            private_key:\n              filename: certs/serverkey.pem\n          {% if kwargs.get('pin_double_proxy_client', False) %}\n          validation_context:\n            trusted_ca:\n              filename: certs/cacert.pm\n            #This should be the hash of the /etc/envoy/envoy-double-proxy.pem cert used in the\n            #double proxy configuration.\n            verify_certificate_hash: \"0000000000000000000000000000000000000000000000000000000000000000\"\n          {% endif %}\n    {%if proxy_proto%}\n    use_proxy_proto: true\n    {%endif%}\n  {%endif %}\n    filters:\n    - name: envoy.filters.network.http_connection_manager\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n        codec_type: AUTO\n        stat_prefix: router\n        {% if proxy_proto -%}\n        use_remote_address: true\n        {%endif-%}\n        stat_prefix: ingress_http\n        route_config:\n          {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }}\n        http_filters:\n        - name: envoy.filters.http.health_check\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n            pass_through_mode: false\n            headers:\n              - name: \":path\"\n                exact_match: \"/healthcheck\"\n        - name: envoy.filters.http.buffer\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer\n            max_request_bytes: 5242880\n        - name: envoy.filters.http.ratelimit\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit\n            domain: envoy_front\n            request_type: external\n            rate_limit_service:\n              grpc_service:\n                envoy_grpc:\n                  cluster_name: ratelimit\n        - name: envoy.filters.http.router\n          typed_config: {}\n        add_user_agent: true\n        {% if tracing %}\n        tracing:\n          operation_name: INGRESS\n          provider:\n            name: envoy.tracers.lightstep\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.trace.v2.LightstepConfig\n              collector_cluster: lightstep_saas\n              access_token_file: \"/etc/envoy/lightstep_access_token\"\n        {% endif %}\n        common_http_protocol_options:\n          idle_timeout: 840s\n        access_log:\n        - name: envoy.access_loggers.file\n          filter:\n            or_filter:\n              filters:\n                - status_code_filter:\n                    comparison:\n                      op: GE\n                      value:\n                        default_value: 500\n                        runtime_key: access_log.access_error.status\n                - duration_filter:\n                    comparison:\n                      op: GE\n                      value:\n                        default_value: 1000\n                        runtime_key: access_log.access_error.duration\n                - traceable_filter: {}\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog      \n            path: \"/var/log/envoy/access_error.log\"\n            format: \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \\\"%REQ(:AUTHORITY)%\\\" \\\"%REQ(X-LYFT-USER-ID)%\\\" \\\"%RESP(GRPC-STATUS)%\\\"\\n\"\n{% endmacro -%}\nstatic_resources:\n  listeners:\n    # TCP listeners for public HTTP/HTTPS endpoints. Assumes a TCP LB in front such as ELB which\n    # supports proxy proto.\n  - {{ listener(\"TCP\", \"0.0.0.0\", \"9300\", True, True, tracing)|indent(2) }}\n  - {{ listener(\"TCP\", \"0.0.0.0\", \"9301\", True, True, tracing)|indent(2) }}\n    # TCP listener for backhaul traffic from the double proxy.\n    # See envoy_double_proxy.template.json\n  - {{ listener(\"TCP\", \"0.0.0.0\", \"9400\", True, True, tracing, pin_double_proxy_client=True)|indent(2) }}\n  clusters:\n  - name: sds\n    type: STRICT_DNS\n    connect_timeout: 0.25s\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: sds\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: discovery.yourcompany.net\n                port_value: 80\n                protocol: TCP\n  - name: statsd\n    type: STATIC\n    connect_timeout: 0.25s\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: statsd\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8125\n                protocol: TCP\n  - name: lightstep_saas\n    type: LOGICAL_DNS\n    connect_timeout: 1s\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: lightstep_saas\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: collector-grpc.lightstep.com\n                port_value: 443\n                protocol: TCP\n    http2_protocol_options: {}\n  {% for service, options in clusters.items() -%}\n  - {{ helper.internal_cluster_definition(service, options)|indent(2) }}\n  {% endfor %}\ncluster_manager:\n  outlier_detection:\n    event_log_path: /var/log/envoy/outlier_events.log\nflags_path: /etc/envoy/flags\nlayered_runtime:\n  layers:\n    - name: root\n      disk_layer:\n        symlink_root: /srv/configset/envoydata/current\n        subdirectory: envoy\n    - name: override\n      disk_layer:\n        symlink_root: /srv/configset/envoydata/current\n        subdirectory: envoy_override\n        append_service_cluster: true\n    - name: admin\n      admin_layer: {}\nadmin:\n  access_log_path: /var/log/envoy/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 0.0.0.0\n      port_value: 9901\n"
  },
  {
    "path": "configs/envoy_router_v2.template.yaml",
    "content": "{% import 'routing_helper_v2.template.yaml' as helper with context -%}\nname: local_route\nvirtual_hosts:\n- name: www\n  domains:\n  - www.yourcompany.com\n  routes:\n  - match:\n      prefix: \"/foo/bar\"\n      runtime_fraction:\n        default_value:\n          numerator: 0\n          denominator: HUNDRED\n        runtime_key: routing.www.use_service_2\n    route:\n      {{ helper.make_route('service2')|indent(4) }}\n  - match:\n      prefix: \"/\"\n    route:\n      {{ helper.make_route('service1')|indent(4) }}\n  require_tls: ALL\n  rate_limits:\n    actions:\n      remote_address: {}\n- name: www_redirect\n  domains:\n  - wwww.yourcompany.net\n  routes:\n  - match:\n      prefix: \"/\"\n    redirect:\n      host_redirect: www.yourcompany.net\n  require_tls: ALL\n  rate_limits:\n  - actions:\n      remote_address: {}\n- name: api\n  domains:\n  - api.yourcompany.net\n  routes:\n  - match:\n      path: \"/foo/bar\"\n    route:\n      {{ helper.make_route('service3')|indent(4) }}\n  - match:\n      prefix: \"/\"\n    route:\n      {{ helper.make_route('service1')|indent(4) }}\n  require_tls: EXTERNAL_ONLY\n  rate_limits:\n  - actions:\n      remote_address: {}\n"
  },
  {
    "path": "configs/envoy_service_to_service_v2.template.yaml",
    "content": "{% import 'routing_helper_v2.template.yaml' as helper -%}\n{% import 'access_log_format_helper_v2.template.yaml' as access_log_helper -%}\n{% macro ingress_listener(protocol, address, port_value) -%}\n- address:\n    socket_address:\n      protocol: {{protocol}}\n      address: {{address}}\n      port_value: {{port_value}}\n  traffic_direction: INBOUND\n  filter_chains:\n  - filters:\n    - name: envoy.filters.network.http_connection_manager\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n        codec_type: AUTO\n        stat_prefix: ingress_http\n        route_config:\n          name: local_route\n          virtual_hosts:\n          - name: local_service\n            domains:\n            - \"*\"\n            routes:\n            - match:\n                prefix: \"/\"\n                headers:\n                - name: content-type\n                  exact_match: application/grpc\n              route:\n                cluster: local_service_grpc\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: local_service\n        http_filters:\n        - name: envoy.filters.http.health_check\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n            pass_through_mode: true\n            headers:\n              - name: \":path\"\n                exact_match: \"/healthcheck\"\n            cache_time: 2.5s\n        - name: envoy.filters.http.buffer\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer\n            max_request_bytes: 5242880\n        - name: envoy.filters.http.router\n          typed_config: {}\n        access_log:\n        - name: envoy.access_loggers.file\n          filter:\n            not_health_check_filter:  {}\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n            path: \"/var/log/envoy/ingress_http.log\"\n            {{ access_log_helper.ingress_full()|indent(10)}}\n        - name: envoy.access_loggers.file\n          filter:\n            and_filter:\n              filters:\n                - or_filter:\n                    filters:\n                    - status_code_filter:\n                        comparison:\n                          op: GE\n                          value:\n                            default_value: 400\n                            runtime_key: access_log.access_error.status\n                    - status_code_filter:\n                        comparison:\n                          op: EQ\n                          value:\n                            default_value: 0\n                            runtime_key: access_log.access_error.status\n                    - duration_filter:\n                        comparison:\n                          op: GE\n                          value:\n                            default_value: 2000\n                            runtime_key: access_log.access_error.duration\n                - not_health_check_filter: {}\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n            path: \"/var/log/envoy/ingress_http_error.log\"\n            {{ access_log_helper.ingress_sampled_log()|indent(10)}}\n        - name: envoy.access_loggers.file\n          filter:\n            and_filter:\n              filters:\n              - not_health_check_filter: {}\n              - runtime_filter:\n                  runtime_key:  access_log.ingress_http\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n            path: \"/var/log/envoy/ingress_http_sampled.log\"\n            {{ access_log_helper.ingress_sampled_log()|indent(10)}}\n        common_http_protocol_options:\n          idle_timeout: 840s\n{% endmacro -%}\nstatic_resources:\n  listeners:\n  {{ ingress_listener(\"tcp\", \"0.0.0.0\", 9211) | indent(2)}}\n  - address:\n      socket_address:\n        protocol: TCP\n        port_value: 9001\n        address: 127.0.0.1\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: AUTO\n          stat_prefix: egress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            {% for service, options in internal_virtual_hosts.items() %}\n            - name: {{ service }}\n              domains:\n              - {{ service }}\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  {{ helper.make_route_internal(service, options)|indent(16) }}\n            {% endfor %}\n          add_user_agent: true\n          common_http_protocol_options:\n            idle_timeout: 840s\n          access_log:\n          - name: envoy.access_loggers.file\n            filter:\n              or_filter:\n                filters:\n                  - status_code_filter:\n                      comparison:\n                        op: GE\n                        value:\n                          default_value: 400\n                          runtime_key: access_log.access_error.status\n                  - duration_filter:\n                      comparison:\n                        op: GE\n                        value:\n                          default_value: 2000\n                          runtime_key: access_log.access_error.duration\n                  - traceable_filter: {}\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: \"/var/log/envoy/egress_http_error.log\"\n              {{ access_log_helper.egress_error_log()|indent(10) }}\n          use_remote_address: true\n          http_filters:\n          - name: envoy.filters.http.ratelimit\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit\n              domain: envoy_service_to_service\n              rate_limit_service:\n                 grpc_service:\n                    envoy_grpc:\n                       cluster_name: ratelimit\n          - name: envoy.filters.http.grpc_http1_bridge\n            typed_config: {}\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  - address:\n      socket_address:\n        protocol: TCP\n        port_value: 9002\n        address: 127.0.0.1\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: AUTO\n          stat_prefix: egress_http\n          rds:\n            config_source:\n               api_config_source:\n                 api_type: GRPC\n                 grpc_services:\n                   envoy_grpc:\n                     cluster_name: \"rds\"\n            route_config_name: rds_config_for_listener_1\n          add_user_agent: true\n          common_http_protocol_options:\n            idle_timeout: 840s\n          access_log:\n          - name: envoy.access_loggers.file\n            filter:\n              or_filter:\n                filters:\n                  - status_code_filter:\n                      comparison:\n                        op: GE\n                        value:\n                          default_value: 400\n                          runtime_key: access_log.access_error.status\n                  - duration_filter:\n                      comparison:\n                        op: GE\n                        value:\n                          default_value: 2000\n                          runtime_key: access_log.access_error.duration\n                  - traceable_filter: {}\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: \"/var/log/envoy/egress_http_error.log\"\n              {{ access_log_helper.egress_error_log()|indent(10) }}\n          use_remote_address: true\n          http_filters:\n          - name: envoy.filters.http.ratelimit\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit\n              domain: envoy_service_to_service\n              rate_limit_service:\n                 grpc_service:\n                    envoy_grpc:\n                       cluster_name: ratelimit\n          - name: envoy.filters.http.grpc_http1_bridge\n            typed_config: {}\n          - name: envoy.filters.http.router\n            typed_config: {}\n  {% if external_virtual_hosts|length > 0 or mongos_servers|length > 0 %}{% endif -%}\n  {% for mapping in external_virtual_hosts -%}\n  - name: \"{{ mapping['address']}}\"\n    address:\n      socket_address:\n        address: \"{{ mapping['address'] }}\"\n        protocol: TCP\n        port_value: 9901\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: AUTO\n          common_http_protocol_options:\n            idle_timeout: 840s\n          stat_prefix: egress_{{ mapping['name'] }}\n          #update access_logs here\n          route_config:\n            virtual_hosts:\n            {% for host in mapping['hosts'] %}\n            - name: egress_{{ host['name'] }}\n              domains:\n              - \"{{ host['domain'] }}\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: egress_{{ host['name'] }}\n                  retry_policy:\n                    retry_on: connect-failure\n                  {% if host.get('host_rewrite', False) %}\n                  host_rewrite: \"{{host['host_rewrite']}}\"\n                  {% endif %}\n            {% endfor %}\n          http_filters:\n          {% if mapping['name'] in ['dynamodb_iad', 'dynamodb_legacy'] -%}\n          - name: envoy.filters.http.dynamo\n            typed_config: {}\n          {% endif -%}\n          - name: envoy.filters.http.router\n            typed_config: {}\n          access_log:\n          - name: envoy.access_loggers.file\n            filter:\n              or_filter:\n                filters:\n                  - status_code_filter:\n                      comparison:\n                        op: GE\n                        value:\n                          default_value: 400\n                          runtime_key: access_log.access_error.status\n                  - status_code_filter:\n                      comparison:\n                        op: EQ\n                        value:\n                          default_value: 0\n                          runtime_key: access_log.access_error.status\n                  {% if mapping.get('log_high_latency_requests', True) %}\n                  - duration_filter:\n                      comparison:\n                        op: GE\n                        value:\n                          default_value: 2000\n                          runtime_key: access_log.access_error.duration\n                  {% endif %}\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: \"/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log\"\n              {% if mapping.get('is_amzn_service', False) -%}\n              {{ access_log_helper.egress_error_amazon_service()|indent(10) }}\n              {% else -%}\n              {{ access_log_helper.egress_error_log()|indent(10) }}\n              {% endif %}\n  {% if (mongos_servers|length > 0) or (mongos_servers|length == 0 and not loop.last ) %}{% endif -%}\n  {% endfor -%}\n  {% for key, value in mongos_servers.items() -%}\n  - name : \"{{ value['address'] }}\"\n    address:\n      socket_address:\n        address: \"{{ value['address'] }}\"\n        protocol: TCP\n        port_value: 9003\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.tcp_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy\n          stat_prefix: mongo_{{ key }}\n          cluster: mongo_{{ key }}\n      - name: envoy.filters.network.mongo_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.mongo_proxy.v2.MongoProxy\n          stat_prefix: \"{{ key }}\"\n          access_log: \"/var/log/envoy/mongo_{{ key }}.log\"\n      {% if value.get('ratelimit', False) %}\n      - name: envoy.filters.network.ratelimit\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.ratelimit.v3.RateLimit\n          stat_prefix: \"{{ key }}\"\n          domain: envoy_mongo_cps\n          descriptors:\n            entries:\n            - key: database\n              value: \"{{ key }}\"\n      {% endif %}\n  {% endfor -%}\n  clusters:\n  {% for service, options in internal_virtual_hosts.items() -%}\n  - {{ helper.internal_cluster_definition(service, options)|indent(2)}}\n  {% endfor -%}\n  {% for mapping in external_virtual_hosts -%}\n  {% for host in mapping['hosts'] -%}\n  - name: egress_{{ host['name'] }}\n    {% if host.get('ssl', False) %}\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n        common_tls_context:\n          validation_context:\n            trusted_ca:\n              filename: certs/cacert.pem\n            {% if host.get('verify_subject_alt_name', False) %}\n            match_subject_alt_names:\n              exact: \"{{host['verify_subject_alt_name'] }}\"\n            {% endif %}\n        {% if host.get('sni', False) %}\n        sni: \"{{ host['sni'] }}\"\n        {% endif %}\n    connect_timeout: 1s\n    {% else %}\n    connect_timeout: 0.25s\n    {% endif %}\n    type: LOGICAL_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: egress_{{ host['name'] }}\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {{ host['remote_address'] }}\n                port_value: {{ host['port_value'] }}\n                protocol: {{ host['protocol'] }}\n  {% endfor -%}\n  {% endfor -%}\n  {% for key, value in mongos_servers.items() -%}\n  - name: mongo_{{ key }}\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: RANDOM\n    load_assignment:\n      cluster_name: mongo_{{ key }}\n      endpoints:\n      - lb_endpoints:\n        {% for server in value['hosts'] -%}\n        - endpoint:\n            address:\n              socket_address:\n                address: {{ server['address'] }}\n                port_value: {{ server['port_value'] }}\n                protocol: {{ server['protocol'] }}\n        {% endfor -%}\n  {% endfor %}\n  - name: main_website\n    connect_timeout: 0.25s\n    type: LOGICAL_DNS\n    # Comment out the following line to test on v6 networks\n    dns_lookup_family: V4_ONLY\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: main_website\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: main_website.com\n                port_value: 443\n                protocol: TCP\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n        sni: www.main_website.com\n  - name: local_service\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: main_website\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\n                protocol: TCP\n    circuit_breakers:\n      thresholds:\n        max_pending_requests: 30\n        max_connections: 100\n  - name: local_service_grpc\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: local_service_grpc\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8081\n                protocol: TCP\n    circuit_breakers:\n      thresholds:\n        max_requests: 200\n    dns_lookup_family: V4_ONLY\n  - name: rds\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    http2_protocol_options:\n      connection_keepalive:\n        interval: 30s\n        timeout: 5s\n    load_assignment:\n      cluster_name: rds\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: rds.yourcompany.net\n                port_value: 80\n                protocol: TCP\n    dns_lookup_family: V4_ONLY\n  - name: statsd\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: statsd\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8125\n                protocol: TCP\n    dns_lookup_family: V4_ONLY\n  - name: lightstep_saas\n    connect_timeout: 1s\n    type: LOGICAL_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: lightstep_saas\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: collector-grpc.lightstep.com\n                port_value: 443\n                protocol: TCP\n    http2_protocol_options:\n      max_concurrent_streams: 100\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n        common_tls_context:\n          validation_context:\n            trusted_ca:\n              filename: certs/cacert.pem\n            match_subject_alt_names:\n              exact: \"collector-grpc.lightstep.com\"\n  - name: cds_cluster\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: cds_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: cds.yourcompany.net\n                port_value: 80\n                protocol: TCP\n  - name: sds\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: sds\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: discovery.yourcompany.net\n                port_value: 80\n                protocol: TCP\ndynamic_resources:\n  cds_config:\n    api_config_source:\n      api_type: REST\n      cluster_names:\n      - cds_cluster\n      refresh_delay: 30s\ncluster_manager: {}\nflags_path: \"/etc/envoy/flags\"\nstats_sinks:\n  - name: envoy.stat_sinks.statsd\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.metrics.v2.StatsdSink\n      tcp_cluster_name: statsd\nlayered_runtime:\n  layers:\n    - name: root\n      disk_layer:\n        symlink_root: /srv/configset/envoydata/current\n        subdirectory: envoy\n    - name: override\n      disk_layer:\n        symlink_root: /srv/configset/envoydata/current\n        subdirectory: envoy_override\n        append_service_cluster: true\n    - name: admin\n      admin_layer: {}\nadmin:\n  access_log_path: /var/log/envoy/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 0.0.0.0\n      port_value: 9901\n"
  },
  {
    "path": "configs/freebind/README.md",
    "content": "# Freebind testing\n\nTo manually validate the `IP_FREEBIND` behavior in Envoy, you can launch Envoy with\n[freebind.yaml](freebind.yaml).\n\nThe listener free bind behavior can be verified with:\n\n1. `envoy -c ./configs/freebind/freebind.yaml -l trace`\n2. `sudo ifconfig lo:1 192.168.42.1/30 up`\n3. `nc -v -l 0.0.0.0 10001`\n\nTo cleanup run `sudo ifconfig lo:1 down`.\n\nTODO(htuch): Steps to verify upstream behavior.\n"
  },
  {
    "path": "configs/freebind/freebind.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 9901\n\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        address: 192.168.42.1\n        port_value: 10000\n    freebind: true\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              - match: { prefix: \"/\" }\n                route: { cluster: service_local }\n          http_filters:\n          - name: envoy.filters.http.router\n  clusters:\n  - name: service_local\n    connect_timeout: 30s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: service_local\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 10001\n# TODO(htuch): Figure out how to do end-to-end testing with\n# outgoing connections and free bind.\n#    upstream_bind_config:\n#      source_address:\n#        address: 192.168.43.1\n#      freebind: true\n"
  },
  {
    "path": "configs/google-vrp/envoy-edge.yaml",
    "content": "overload_manager:\n  refresh_interval: 0.25s\n  resource_monitors:\n  - name: \"envoy.resource_monitors.fixed_heap\"\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig\n      # TODO: Tune for your system.\n      max_heap_size_bytes: 1073741824 # 1 GiB\n  actions:\n  - name: \"envoy.overload_actions.shrink_heap\"\n    triggers:\n    - name: \"envoy.resource_monitors.fixed_heap\"\n      threshold:\n        value: 0.90\n  - name: \"envoy.overload_actions.stop_accepting_requests\"\n    triggers:\n    - name: \"envoy.resource_monitors.fixed_heap\"\n      threshold:\n        value: 0.95\n\nstatic_resources:\n  listeners:\n  - name: listener_https\n    address:\n      socket_address:\n        protocol: TCP\n        address: 0.0.0.0\n        port_value: 10000\n    per_connection_buffer_limit_bytes: 32768 # 32 KiB\n    filter_chains:\n    - transport_socket:\n        name: envoy.transport_sockets.tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n            - certificate_chain: { filename: \"certs/servercert.pem\" }\n              private_key: { filename: \"certs/serverkey.pem\" }\n      # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol.\n      # use_proxy_proto: true\n      filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: ingress_http\n          use_remote_address: true\n          common_http_protocol_options:\n            idle_timeout: 3600s # 1 hour\n            headers_with_underscores_action: REJECT_REQUEST\n          http2_protocol_options:\n            max_concurrent_streams: 100\n            initial_stream_window_size: 65536 # 64 KiB\n            initial_connection_window_size: 1048576 # 1 MiB\n          stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests\n          request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              # The exact route table is not super important in this example (this is the model\n              # for the Google VRP scenario).\n              routes:\n              - match:\n                  prefix: \"/content\"\n                route:\n                  cluster: service_foo\n                  idle_timeout: 15s # must be disabled for long-lived and streaming requests\n              - match:\n                  prefix: \"/\"\n                direct_response:\n                  status: 403\n                  body:\n                    inline_string: \"denied\\n\"\n          http_filters:\n          - name: envoy.filters.http.router\n  clusters:\n    name: service_foo\n    connect_timeout: 5s\n    per_connection_buffer_limit_bytes: 32768 # 32 KiB\n    load_assignment:\n      cluster_name: service_foo\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 10002\n    http2_protocol_options:\n      initial_stream_window_size: 65536 # 64 KiB\n      initial_connection_window_size: 1048576 # 1 MiB\n"
  },
  {
    "path": "configs/google-vrp/envoy-origin.yaml",
    "content": "overload_manager:\n  refresh_interval: 0.25s\n  resource_monitors:\n  - name: \"envoy.resource_monitors.fixed_heap\"\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig\n      max_heap_size_bytes: 1073741824 # 1 GiB\n  actions:\n  - name: \"envoy.overload_actions.shrink_heap\"\n    triggers:\n    - name: \"envoy.resource_monitors.fixed_heap\"\n      threshold:\n        value: 0.95\n  - name: \"envoy.overload_actions.stop_accepting_requests\"\n    triggers:\n    - name: \"envoy.resource_monitors.fixed_heap\"\n      threshold:\n        value: 0.98\n\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: TCP\n        address: 0.0.0.0\n        port_value: 10002\n    per_connection_buffer_limit_bytes: 32768\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: ingress_http\n          use_remote_address: true\n          common_http_protocol_options:\n            idle_timeout: 3600s # 1 hour\n            headers_with_underscores_action: REJECT_REQUEST\n          http2_protocol_options:\n            max_concurrent_streams: 100\n            initial_stream_window_size: 65536 # 64 KiB\n            initial_connection_window_size: 1048576 # 1 MiB\n          stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests\n          request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              - match:\n                  path: \"/blockedz\"\n                direct_response:\n                  status: 200\n                  body:\n                    inline_string: \"hidden treasure\\n\"\n              - match:\n                  prefix: \"/\"\n                direct_response:\n                  status: 200\n                  body:\n                    inline_string: \"normal\\n\"\n          http_filters:\n          - name: envoy.filters.http.router\n"
  },
  {
    "path": "configs/google-vrp/launch_envoy.sh",
    "content": "#!/bin/bash\n\ncd /etc/envoy || exit\nenvoy \"$@\"\n"
  },
  {
    "path": "configs/google-vrp/supervisor.conf",
    "content": "[supervisord]\nnodaemon=true\n\n[program:envoy-edge]\ncommand=launch_envoy.sh -c /etc/envoy/envoy-edge.yaml %(ENV_ENVOY_EDGE_EXTRA_ARGS)s\n  --log-format \"(edge)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v\" --base-id 0\nredirect_stderr=true\nstdout_logfile_maxbytes=0\nstdout_logfile=/dev/stdout\n\n[program:envoy-origin]\ncommand=launch_envoy.sh -c /etc/envoy/envoy-origin.yaml %(ENV_ENVOY_ORIGIN_EXTRA_ARGS)s\n  --log-format \"(origin)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v\" --base-id 1\nredirect_stderr=true\nstdout_logfile_maxbytes=0\nstdout_logfile=/dev/stdout\n"
  },
  {
    "path": "configs/google_com_proxy.v2.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9901\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: TCP\n        address: 0.0.0.0\n        port_value: 10000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  host_rewrite: www.google.com\n                  cluster: service_google\n          http_filters:\n          - name: envoy.filters.http.router\n  clusters:\n  - name: service_google\n    connect_timeout: 30s\n    type: LOGICAL_DNS\n    # Comment out the following line to test on v6 networks\n    dns_lookup_family: V4_ONLY\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: service_google\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: www.google.com\n                port_value: 443\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n        sni: www.google.com\n"
  },
  {
    "path": "configs/original-dst-cluster/README.md",
    "content": "# Original destination cluster configuration and testing\n\nAn original destination cluster forwards requests to the same destination\nthe request was going to before being redirected to Envoy using an\niptables REDIRECT rule. `proxy_config.json` contains an example Envoy\nconfiguration demonstrating the use of an original destination\ncluster. `netns_setup.sh` and `netns_cleanup.sh` are provided as\nexamples for setting up and cleaning up, respectively, a network\nnamespace and the required iptables rule to redirect traffic to Envoy.\n\n# Setting up\n\n`netns_setup.sh` takes two arguments: the name of the new network\nnamespace and the prefix that is to be redirected. Envoy listener port\nis set to 10000, which matches the configuration in\n`proxy_config.json`.\n\nThis creates a network namespace `ns1` and redirects traffic from\nthere to Envoy listening on port 10000 if the destination address of\nthe traffic matches `173.194.222.0/24` :\n\n```\nsudo ./configs/original-dst-cluster/netns_setup.sh ns1 173.194.222.0/24\n```\n\n# Building and running Envoy\n\nBuild Envoy with debug options, so that the behavior can be better\nobserved from the logs:\n\n```\nbazel build //source/exe:envoy-static -c dbg\n```\n\nThen you should run Envoy with the provided example configuration:\n\n```\nbazel-out/local-dbg/bin/source/exe/envoy-static -c configs/original-dst-cluster/proxy_config.json -l debug\n```\n\nWhen running you should see periodical messages like `Cleaning up\nstale original dst hosts.`\n\n# Generating traffic\n\nNext we generate traffic from the new network namespace hitting the\nredirect rule. Run this from another terminal:\n\n```\nsudo ip netns exec ns1 curl -v 173.194.222.106:80\n```\n\nMost likely you'll see `301 Moved` in the curl response. In the rare\ncase of upstream connection timeout you'll see `503 Service\nUnavailable` instead. The connection timeout setting on the\nproxy_config.json is set to 6 seconds to make this less likely, but if\nno host with the destination address exist then you will get this\nresponse no matter how long the timeout setting.\n\nYou should see lines with `Adding host 173.194.222.106:80` being\nlogged by each Envoy thread, followed by `Keeping active host\n173.194.222.106:80` and eventually `Removing stale host\n173.194.222.106:80`, again multiple times, once from each Envoy\nthread.\n\n# Cleaning up\n\nTo properly remove the added network namespace and the iptables\nconfiguration run `netns_cleanup.sh` with the same arguments as\nthe setup before:\n\n```\nsudo ./configs/original-dst-cluster/netns_cleanup.sh ns1 173.194.222.0/24\n```\n\nFinally, stop Envoy with `^C`.\n"
  },
  {
    "path": "configs/original-dst-cluster/netns_cleanup.sh",
    "content": "#!/usr/bin/env bash\n#\n# Cleanup network namespace after testing Envoy original_dst cluster\n#\n\nNETNS=$1\nTARGET_IP=$2\nENVOY_PORT=10000\n\n# remove iptables rule\niptables -t nat -D PREROUTING --src 0/0 --dst \"$TARGET_IP\" -p tcp --dport 80 -j REDIRECT --to-ports \"$ENVOY_PORT\"\n\n# delete network namespace\nip netns delete \"$NETNS\"\n\n# delete veth pair\nip link del \"$NETNS-veth0\" type veth peer name \"$NETNS-veth1\"\n"
  },
  {
    "path": "configs/original-dst-cluster/netns_setup.sh",
    "content": "#!/usr/bin/env bash\n#\n# Example setup network namespace for testing Envoy original_dst cluster\n# Clean up with the cleanup script with the same arguments.\n#\n# Test with:\n# $sudo ip netns exec ${NETNS} curl -v ${TARGET_IP}:80\n#\nset -e\n\n# name of the network namespace\nNETNS=$1 \n\n# IP address or prefix that will be redirected\nTARGET_IP=$2\n\n# Local Envoy Listener port number\nENVOY_PORT=10000\n\n# Create veth pair\nip link add \"$NETNS-veth0\" type veth peer name \"$NETNS-veth1\"\nifconfig \"$NETNS-veth0\" 10.0.200.2/24 up\n\n# Create network namespace\nip netns add \"$NETNS\"\n# Move veth peer to the namespace\nip link set \"$NETNS-veth1\" netns \"$NETNS\"\n\n# Configure network namespace\nip netns exec \"$NETNS\" ifconfig lo 127.0.0.1 up\nip netns exec \"$NETNS\" ifconfig \"$NETNS-veth1\" 10.0.200.1/24 up\nip netns exec \"$NETNS\" ip route add default via 10.0.200.2\n\n#configure iptables REDIRECT in the PREROUTING hook of the root name space nat table.\niptables -t nat -I PREROUTING --src 0/0 --dst \"$TARGET_IP\" -p tcp --dport 80 -j REDIRECT --to-ports \"$ENVOY_PORT\"\n"
  },
  {
    "path": "configs/original-dst-cluster/proxy_config.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 10000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: ingress_http\n          route_config:\n            name: local_service\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: cluster1\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n          codec_type: auto\n    listener_filters:\n    - name: envoy.filters.listener.original_dst\n      typed_config: {}\n  clusters:\n  - name: cluster1\n    type: ORIGINAL_DST\n    connect_timeout: 6s\n    lb_policy: CLUSTER_PROVIDED\n    dns_lookup_family: V4_ONLY\ncluster_manager: {}\nadmin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 9901\n"
  },
  {
    "path": "configs/requirements.txt",
    "content": "Jinja2==2.11.2 \\\n    --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \\\n    --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035\nMarkupSafe==1.1.1 \\\n    --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \\\n    --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \\\n    --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \\\n    --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \\\n    --hash=sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42 \\\n    --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \\\n    --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \\\n    --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \\\n    --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \\\n    --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \\\n    --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \\\n    --hash=sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b \\\n    --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \\\n    --hash=sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15 \\\n    --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \\\n    --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \\\n    --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \\\n    --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \\\n    --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \\\n    --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \\\n    --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \\\n    --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \\\n    --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \\\n    --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \\\n    --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \\\n    --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \\\n    --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \\\n    --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \\\n    --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \\\n    --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \\\n    --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \\\n    --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \\\n    --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be\n"
  },
  {
    "path": "configs/routing_helper_v2.template.yaml",
    "content": "{%- macro make_route_internal(cluster, options) %}\n  cluster: {{ cluster }}\n  {%- if 'timeout' in options -%}\n  timeout: {{ options['timeout'] }},\n  {% endif %}\n  retry_policy:\n    retry_on: 5xx\n{%- endmacro %}\n{%- macro make_route(cluster) -%}\n  {{ make_route_internal(cluster, clusters.get(cluster, {})) }}\n{%- endmacro -%}\n{%- macro internal_cluster_definition(service, options) -%}\n  name: {{ service }}\n  connect_timeout: 0.250s\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      api_config_source:\n        api_type: REST\n        cluster_names:\n        - sds\n        refresh_delay: 30s\n    service_name: {{ service }}\n  lb_policy: LEAST_REQUEST\n  {% if 'max_requests' in options -%}\n  circuit_breakers:\n    thresholds:\n    - priority: DEFAULT\n      max_requests: {{ options['max_requests'] }}\n  {% endif -%}\n  health_checks:\n  - http_health_check:\n      path: /healthcheck\n      service_name_matcher:\n        prefix: accidents\n    timeout: 2s\n    interval: 5s\n    interval_jitter: 5s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n  outlier_detection:\n    success_rate_stdev_factor: 1900\n  http2_protocol_options: {}\n{% endmacro -%}\n"
  },
  {
    "path": "configs/terminate_connect.v3.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9902\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: TCP\n        address: 127.0.0.1\n        port_value: 10001\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains:\n                - \"*\"\n              routes:\n                - match:\n                    connect_matcher:\n                      {}\n                  route:\n                    cluster: service_google\n                    upgrade_configs:\n                      - upgrade_type: CONNECT\n                        connect_config:\n                          {}\n          http_filters:\n          - name: envoy.filters.http.router\n          http2_protocol_options:\n            allow_connect: true\n          upgrade_configs:\n            - upgrade_type: CONNECT\n  clusters:\n  - name: service_google\n    connect_timeout: 0.25s\n    type: LOGICAL_DNS\n    # Comment out the following line to test on v6 networks\n    dns_lookup_family: V4_ONLY\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: service_google\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: www.google.com\n                port_value: 443\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n        sni: www.google.com\n"
  },
  {
    "path": "configs/using_deprecated_config.v2.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9901\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: TCP\n        address: 0.0.0.0\n        port_value: 10000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  host_rewrite: www.google.com\n                  cluster: service_google\n                  cors:\n                    allow_origin:\n                    - \"test-origin-1\"\n          http_filters:\n          - name: envoy.filters.http.router\n  clusters:\n  - name: service_google\n    connect_timeout: 0.25s\n    type: LOGICAL_DNS\n    # Comment out the following line to test on v6 networks\n    dns_lookup_family: V4_ONLY\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: service_google\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: www.google.com\n                port_value: 443\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n        sni: www.google.com\ntracing:\n  http:\n    name: envoy.tracers.zipkin\n    config:\n      collector_cluster: service_google\n      collector_endpoint: /api/v1/spans\n      collector_endpoint_version: HTTP_JSON_V1\nlayered_runtime:\n  layers:\n  - name: static_layer\n    static_layer:\n      envoy.deprecated_features:envoy.config.trace.v2.ZipkinConfig.HTTP_JSON_V1: true\n      envoy.deprecated_features:envoy.api.v2.route.CorsPolicy.allow_origin: true\n"
  },
  {
    "path": "docs/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nexports_files([\"protodoc_manifest.yaml\"])\n\nenvoy_package()\n\n# TODO(phlax): fix failing/excluded configs\n# the following config only fails on windows:\n#       dns-cache-circuit-breaker: \"Error: unable to read file: /etc/ssl/certs/ca-certificates.crt\"\n\nfilegroup(\n    name = \"configs\",\n    srcs = glob(\n        [\"root/**/*.yaml\"],\n        exclude = [\n            \"root/intro/_include/life-of-a-request.yaml\",\n            \"root/intro/arch_overview/security/_include/ssl.yaml\",\n            \"root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml\",\n            \"root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml\",\n            \"root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml\",\n        ],\n    ),\n)\n"
  },
  {
    "path": "docs/README.md",
    "content": "# Building documentation locally\n\nThere are two methods to build the documentation, described below.\n\nIn both cases, the generated output can be found in `generated/docs`.\n\n## Building in an existing Envoy development environment\n\nIf you have an [existing Envoy development environment](https://github.com/envoyproxy/envoy/tree/master/bazel#quick-start-bazel-build-for-developers), you should have the necessary dependencies and requirements and be able to build the documentation directly.\n\n```bash\n./docs/build.sh\n```\n\nBy default configuration examples are going to be validated during build. To disable validation,\nset `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`:\n\n```bash\nSPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh\n```\n\n## Using the Docker build container to build the documentation\n\nIf you *do not* have an existing development environment, you may wish to use the Docker build\nimage that is used in continuous integration.\n\nThis can be done as follows:\n\n```\n./ci/run_envoy_docker.sh 'docs/build.sh'\n```\n\nTo use this method you will need a minimum of 4-5GB of disk space available to accommodate the build image.\n\n# Creating a Pull Request with documentation changes\n\nWhen you create a Pull Request the documentation is rendered by CircleCI.\n\nIf you are logged in to CircleCI (it is possible to authenticate using your Github account), you can view\nthe rendered changes.\n\nTo do this:\n- click `Details` in the `ci/circleci: docs` check at the bottom of the Pull Request.\n- click `ARTIFACTS` in the CircleCI dashboard\n- browse to the documentation root at `generated/docs/index.html`.\n\n# How the Envoy website and docs are updated\n\n1. The docs are published to [docs/envoy/latest](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy/latest)\n   on every commit to master. This process is handled by CircleCI with the\n  [`publish.sh`](https://github.com/envoyproxy/envoy/blob/master/docs/publish.sh) script.\n\n2. The docs are published to [docs/envoy](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy)\n   in a directory named after every tagged commit in this repo. Thus, on every tagged release there\n   are snapped docs.\n"
  },
  {
    "path": "docs/_ext/validating_code_block.py",
    "content": "from typing import List\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.parsers.rst import directives\nfrom sphinx.application import Sphinx\nfrom sphinx.util.docutils import SphinxDirective\nfrom sphinx.directives.code import CodeBlock\nfrom sphinx.errors import ExtensionError\n\nimport os\nimport subprocess\n\n\nclass ValidatingCodeBlock(CodeBlock):\n  \"\"\"A directive that provides protobuf yaml formatting and validation.\n\n    'type-name' option is required and expected to conain full Envoy API type.\n    An ExtensionError is raised on validation failure.\n    Validation will be skipped if SPHINX_SKIP_CONFIG_VALIDATION environment variable is set.\n    \"\"\"\n  has_content = True\n  required_arguments = CodeBlock.required_arguments\n  optional_arguments = CodeBlock.optional_arguments\n  final_argument_whitespace = CodeBlock.final_argument_whitespace\n  option_spec = {\n      'type-name': directives.unchanged,\n  }\n  option_spec.update(CodeBlock.option_spec)\n  skip_validation = (os.getenv('SPHINX_SKIP_CONFIG_VALIDATION') or 'false').lower() == 'true'\n\n  def run(self):\n    source, line = self.state_machine.get_source_and_line(self.lineno)\n    # built-in directives.unchanged_required option validator produces a confusing error message\n    if self.options.get('type-name') == None:\n      raise ExtensionError(\"Expected type name in: {0} line: {1}\".format(source, line))\n\n    if not ValidatingCodeBlock.skip_validation:\n      args = [\n          'bazel-bin/tools/config_validation/validate_fragment',\n          self.options.get('type-name'), '-s', '\\n'.join(self.content)\n      ]\n      completed = subprocess.run(args,\n                                 stdout=subprocess.PIPE,\n                                 stderr=subprocess.PIPE,\n                                 encoding='utf-8')\n      if completed.returncode != 0:\n        raise ExtensionError(\n            \"Failed config validation for type: '{0}' in: {1} line: {2}:\\n {3}\".format(\n                self.options.get('type-name'), source, line, completed.stderr))\n\n    self.options.pop('type-name', None)\n    return list(CodeBlock.run(self))\n\n\ndef setup(app):\n  app.add_directive(\"validated-code-block\", ValidatingCodeBlock)\n\n  return {\n      'version': '0.1',\n      'parallel_read_safe': True,\n      'parallel_write_safe': True,\n  }\n"
  },
  {
    "path": "docs/build.sh",
    "content": "#!/usr/bin/env bash\n\n# set SPHINX_SKIP_CONFIG_VALIDATION environment variable to true to skip\n# validation of configuration examples\n\n. tools/shell_utils.sh\n\nset -e\n\n# We need to set ENVOY_DOCS_VERSION_STRING and ENVOY_DOCS_RELEASE_LEVEL for Sphinx.\n# We also validate that the tag and version match at this point if needed.\nif [ -n \"$CIRCLE_TAG\" ]\nthen\n  # Check the git tag matches the version number in the VERSION file.\n  VERSION_NUMBER=$(cat VERSION)\n  if [ \"v${VERSION_NUMBER}\" != \"${CIRCLE_TAG}\" ]; then\n    echo \"Given git tag does not match the VERSION file content:\"\n    echo \"${CIRCLE_TAG} vs $(cat VERSION)\"\n    exit 1\n  fi\n  # Check the version_history.rst contains current release version.\n  grep --fixed-strings \"$VERSION_NUMBER\" docs/root/version_history/current.rst \\\n    || (echo \"Git tag not found in version_history/current.rst\" && exit 1)\n\n  # Now that we know there is a match, we can use the tag.\n  export ENVOY_DOCS_VERSION_STRING=\"tag-$CIRCLE_TAG\"\n  export ENVOY_DOCS_RELEASE_LEVEL=tagged\n  export ENVOY_BLOB_SHA=\"$CIRCLE_TAG\"\nelse\n  BUILD_SHA=$(git rev-parse HEAD)\n  VERSION_NUM=$(cat VERSION)\n  export ENVOY_DOCS_VERSION_STRING=\"${VERSION_NUM}\"-\"${BUILD_SHA:0:6}\"\n  export ENVOY_DOCS_RELEASE_LEVEL=pre-release\n  export ENVOY_BLOB_SHA=\"$BUILD_SHA\"\nfi\n\nSCRIPT_DIR=\"$(dirname \"$0\")\"\nSRC_DIR=\"$(dirname \"$SCRIPT_DIR\")\"\nAPI_DIR=\"${SRC_DIR}\"/api\nCONFIGS_DIR=\"${SRC_DIR}\"/configs\nBUILD_DIR=build_docs\n[[ -z \"${DOCS_OUTPUT_DIR}\" ]] && DOCS_OUTPUT_DIR=generated/docs\n[[ -z \"${GENERATED_RST_DIR}\" ]] && GENERATED_RST_DIR=generated/rst\n\nrm -rf \"${DOCS_OUTPUT_DIR}\"\nmkdir -p \"${DOCS_OUTPUT_DIR}\"\n\nrm -rf \"${GENERATED_RST_DIR}\"\nmkdir -p \"${GENERATED_RST_DIR}\"\n\nsource_venv \"$BUILD_DIR\"\npip3 install --require-hashes -r \"${SCRIPT_DIR}\"/requirements.txt\n\n# Clean up any stale files in the API tree output. Bazel remembers valid cached\n# files still.\nrm -rf bazel-bin/external/envoy_api_canonical\n\nEXTENSION_DB_PATH=\"$(realpath \"${BUILD_DIR}/extension_db.json\")\"\nexport EXTENSION_DB_PATH\n\n# This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files.\nIFS=\" \" read -ra BAZEL_BUILD_OPTIONS <<< \"${BAZEL_BUILD_OPTIONS:-}\"\nBAZEL_BUILD_OPTIONS+=(\n    \"--remote_download_outputs=all\"\n    \"--strategy=protodoc=sandboxed,local\"\n    \"--action_env=ENVOY_BLOB_SHA\"\n    \"--action_env=EXTENSION_DB_PATH\")\n\n# Generate extension database. This maps from extension name to extension\n# metadata, based on the envoy_cc_extension() Bazel target attributes.\n./docs/generate_extension_db.py \"${EXTENSION_DB_PATH}\"\n\n# Generate RST for the lists of trusted/untrusted extensions in\n# intro/arch_overview/security docs.\nmkdir -p \"${GENERATED_RST_DIR}\"/intro/arch_overview/security\n./docs/generate_extension_rst.py \"${EXTENSION_DB_PATH}\" \"${GENERATED_RST_DIR}\"/intro/arch_overview/security\n\n# Generate RST for external dependency docs in intro/arch_overview/security.\n./docs/generate_external_dep_rst.py \"${GENERATED_RST_DIR}\"/intro/arch_overview/security\n\nfunction generate_api_rst() {\n  local proto_target\n  declare -r API_VERSION=$1\n  echo \"Generating ${API_VERSION} API RST...\"\n\n  # Generate the extensions docs\n  bazel build \"${BAZEL_BUILD_OPTIONS[@]}\" @envoy_api_canonical//:\"${API_VERSION}\"_protos --aspects \\\n    tools/protodoc/protodoc.bzl%protodoc_aspect --output_groups=rst\n\n  # Fill in boiler plate for extensions that have google.protobuf.Empty as their\n  # config.\n  bazel run \"${BAZEL_BUILD_OPTIONS[@]}\" //tools/protodoc:generate_empty \\\n    \"${PWD}\"/docs/empty_extensions.json \"${PWD}/${GENERATED_RST_DIR}/api-${API_VERSION}\"/config\n\n  # We do ** matching below to deal with Bazel cache blah (source proto artifacts\n  # are nested inside source package targets).\n  shopt -s globstar\n\n  # Find all source protos.\n  proto_target=$(bazel query \"labels(srcs, labels(deps, @envoy_api_canonical//:${API_VERSION}_protos))\")\n  declare -r proto_target\n\n  # Only copy in the protos we care about and know how to deal with in protodoc.\n  for p in ${proto_target}\n  do\n    declare PROTO_FILE_WITHOUT_PREFIX=\"${p#@envoy_api_canonical//}\"\n    declare PROTO_FILE_CANONICAL=\"${PROTO_FILE_WITHOUT_PREFIX/://}\"\n    # We use ** glob matching here to deal with the fact that we have something\n    # like\n    # bazel-bin/external/envoy_api_canonical/envoy/admin/v2alpha/pkg/envoy/admin/v2alpha/certs.proto.proto\n    # and we don't want to have to do a nested loop and slow bazel query to\n    # recover the canonical package part of the path.\n    declare SRCS=(bazel-bin/external/envoy_api_canonical/**/\"${PROTO_FILE_CANONICAL}.rst\")\n    # While we may have reformatted the file multiple times due to the transitive\n    # dependencies in the aspect above, they all look the same. So, just pick an\n    # arbitrary match and we're done.\n    declare SRC=\"${SRCS[0]}\"\n    declare DST=\"${GENERATED_RST_DIR}/api-${API_VERSION}/${PROTO_FILE_CANONICAL#envoy/}\".rst\n\n    mkdir -p \"$(dirname \"${DST}\")\"\n    cp -f \"${SRC}\" \"$(dirname \"${DST}\")\"\n  done\n}\n\ngenerate_api_rst v2\ngenerate_api_rst v3\n\n# Fixup anchors and references in v3 so they form a distinct namespace.\n# TODO(htuch): Do this in protodoc generation in the future.\nfind \"${GENERATED_RST_DIR}\"/api-v3 -name \"*.rst\" -print0 | xargs -0 sed -i -e \"s#envoy_api_#envoy_v3_api_#g\"\nfind \"${GENERATED_RST_DIR}\"/api-v3 -name \"*.rst\" -print0 | xargs -0 sed -i -e \"s#config_resource_monitors#v3_config_resource_monitors#g\"\n\n# xDS protocol spec.\nmkdir -p ${GENERATED_RST_DIR}/api-docs\ncp -f \"${API_DIR}\"/xds_protocol.rst \"${GENERATED_RST_DIR}/api-docs/xds_protocol.rst\"\n# Edge hardening example YAML.\nmkdir -p \"${GENERATED_RST_DIR}\"/configuration/best_practices\ncp -f \"${CONFIGS_DIR}\"/google-vrp/envoy-edge.yaml \"${GENERATED_RST_DIR}\"/configuration/best_practices\n\nrsync -rav  \"${API_DIR}/diagrams\" \"${GENERATED_RST_DIR}/api-docs\"\n\nrsync -av \"${SCRIPT_DIR}\"/root/ \"${SCRIPT_DIR}\"/conf.py \"${SCRIPT_DIR}\"/_ext \"${GENERATED_RST_DIR}\"\n\n# To speed up validate_fragment invocations in validating_code_block\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" //tools/config_validation:validate_fragment\n\nsphinx-build -W --keep-going -b html \"${GENERATED_RST_DIR}\" \"${DOCS_OUTPUT_DIR}\"\n"
  },
  {
    "path": "docs/conf.py",
    "content": "# -*- coding: utf-8 -*-\n#\n# envoy documentation build configuration file, created by\n# sphinx-quickstart on Sat May 28 10:51:27 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom datetime import datetime\nimport os\nfrom sphinx.directives.code import CodeBlock\nimport sphinx_rtd_theme\nimport sys\n\n\n# https://stackoverflow.com/questions/44761197/how-to-use-substitution-definitions-with-code-blocks\nclass SubstitutionCodeBlock(CodeBlock):\n  \"\"\"\n  Similar to CodeBlock but replaces placeholders with variables. See \"substitutions\" below.\n  \"\"\"\n\n  def run(self):\n    \"\"\"\n    Replace placeholders with given variables.\n    \"\"\"\n    app = self.state.document.settings.env.app\n    new_content = []\n    existing_content = self.content\n    for item in existing_content:\n      for pair in app.config.substitutions:\n        original, replacement = pair\n        item = item.replace(original, replacement)\n      new_content.append(item)\n\n    self.content = new_content\n    return list(CodeBlock.run(self))\n\n\ndef setup(app):\n  app.add_config_value('release_level', '', 'env')\n  app.add_config_value('substitutions', [], 'html')\n  app.add_directive('substitution-code-block', SubstitutionCodeBlock)\n\n\nif not os.environ.get('ENVOY_DOCS_RELEASE_LEVEL'):\n  raise Exception(\"ENVOY_DOCS_RELEASE_LEVEL env var must be defined\")\n\nrelease_level = os.environ['ENVOY_DOCS_RELEASE_LEVEL']\nblob_sha = os.environ['ENVOY_BLOB_SHA']\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\n\nsys.path.append(os.path.abspath(\"./_ext\"))\n\nextensions = [\n    'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', 'sphinx_tabs.tabs',\n    'sphinx_copybutton', 'validating_code_block'\n]\nextlinks = {\n    'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''),\n    'api': ('https://github.com/envoyproxy/envoy/blob/{}/api/%s'.format(blob_sha), ''),\n}\n\n# Setup global substitutions\nif 'pre-release' in release_level:\n  substitutions = [('|envoy_docker_image|', 'envoy-dev:{}'.format(blob_sha))]\nelse:\n  substitutions = [('|envoy_docker_image|', 'envoy:{}'.format(blob_sha))]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\ncopybutton_prompt_text = r\"\\$ |PS>\"\ncopybutton_prompt_is_regexp = True\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'envoy'\ncopyright = u'2016-{}, Envoy Project Authors'.format(datetime.now().year)\nauthor = u'Envoy Project Authors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\nif not os.environ.get('ENVOY_DOCS_VERSION_STRING'):\n  raise Exception(\"ENVOY_DOCS_VERSION_STRING env var must be defined\")\n\n# The short X.Y version.\nversion = os.environ['ENVOY_DOCS_VERSION_STRING']\n# The full version, including alpha/beta/rc tags.\nrelease = os.environ['ENVOY_DOCS_VERSION_STRING']\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n    '_build',\n    '_venv',\n    'Thumbs.db',\n    '.DS_Store',\n    'api-v2/api/v2/endpoint/load_report.proto.rst',\n    'api-v2/service/discovery/v2/hds.proto.rst',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n    'logo_only': True,\n    'includehidden': False,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#html_title = u'envoy v1.0.0'\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '_static/img/envoy-logo.png'\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = 'favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_style = 'css/envoy.css'\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'envoydoc'\n"
  },
  {
    "path": "docs/empty_extensions.json",
    "content": "{\n  \"envoy.filters.http.cors\": {\n    \"title\": \"CORS processing\",\n    \"path\": \"filter/http/cors\",\n    \"description\": \"https://en.wikipedia.org/wiki/Cross-origin_resource_sharing\",\n    \"ref\": \"config_http_filters_cors\"\n  },\n  \"envoy.filters.http.dynamo\": {\n    \"title\": \"AWS DynamoDB\",\n    \"path\": \"filter/http/dynamo\",\n    \"description\": \"https://aws.amazon.com/dynamodb/\",\n    \"ref\": \"config_http_filters_dynamo\"\n  },\n  \"envoy.filters.http.grpc_http1_bridge\": {\n    \"title\": \"gRPC HTTP/1 bridge\",\n    \"path\": \"filter/http/grpc_http1_bridge\",\n    \"description\": \"HTTP filter that bridges HTTP/1.1 unary gRPC to compliant HTTP/2 gRPC\",\n    \"ref\": \"config_http_filters_grpc_bridge\"\n  },\n  \"envoy.filters.http.grpc_web\": {\n    \"title\": \"gRPC Web\",\n    \"path\": \"filter/http/grpc_web\",\n    \"description\": \"https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md\",\n    \"ref\": \"config_http_filters_grpc_web\"\n  },\n  \"envoy.filters.listener.http_inspector\": {\n    \"title\": \"HTTP Inspector\",\n    \"path\": \"filter/listener/http_inspector\",\n    \"ref\": \"config_listener_filters_http_inspector\"\n  },\n  \"envoy.filters.listener.original_dst\": {\n    \"title\": \"Original Destination\",\n    \"path\": \"filter/listener/original_dst\",\n    \"ref\": \"config_listener_filters_original_dst\"\n  },\n  \"envoy.filters.listener.proxy_protocol\": {\n    \"title\": \"Proxy Protocol\",\n    \"path\": \"filter/listener/proxy_protocol\",\n    \"ref\": \"config_listener_filters_proxy_protocol\"\n  },\n  \"envoy.filters.listener.tls_inspector\": {\n    \"title\": \"TLS Inspector\",\n    \"path\": \"filter/listener/tls_inspector\",\n    \"ref\": \"config_listener_filters_tls_inspector\"\n  },\n  \"envoy.filters.network.echo\": {\n    \"title\": \"Echo\",\n    \"path\": \"filter/network/echo\",\n    \"ref\": \"config_network_filters_echo\"\n  },\n  \"envoy.filters.network.sni_cluster\": {\n    \"title\": \"SNI Cluster\",\n    \"path\": \"filter/network/sni_cluster\",\n    \"ref\": \"config_network_filters_sni_cluster\"\n  },\n  \"envoy.retry_host_predicates.previous_hosts\": {\n    \"title\": \"Previous Hosts\",\n    \"path\": \"retry/previous_hosts\",\n    \"ref\": \"arch_overview_http_retry_plugins\"\n  },\n  \"envoy.retry_host_predicates.omit_canary_hosts\": {\n    \"title\": \"Omit Canary Hosts\",\n    \"path\": \"retry/omit_canary_hosts\",\n    \"ref\": \"arch_overview_http_retry_plugins\"\n  }\n}\n"
  },
  {
    "path": "docs/generate_extension_db.py",
    "content": "#!/usr/bin/env python3\n\n# Generate an extension database, a JSON file mapping from qualified well known\n# extension name to metadata derived from the envoy_cc_extension target.\n\nimport json\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nimport sys\n\nfrom importlib.util import spec_from_loader, module_from_spec\nfrom importlib.machinery import SourceFileLoader\n\nBUILDOZER_PATH = os.getenv(\"BUILDOZER_BIN\") or (os.path.expandvars(\"$GOPATH/bin/buildozer\") if\n                                                os.getenv(\"GOPATH\") else shutil.which(\"buildozer\"))\n\n# source/extensions/extensions_build_config.bzl must have a .bzl suffix for Starlark\n# import, so we are forced to do this workaround.\n_extensions_build_config_spec = spec_from_loader(\n    'extensions_build_config',\n    SourceFileLoader('extensions_build_config', 'source/extensions/extensions_build_config.bzl'))\nextensions_build_config = module_from_spec(_extensions_build_config_spec)\n_extensions_build_config_spec.loader.exec_module(extensions_build_config)\n\n\nclass ExtensionDbError(Exception):\n  pass\n\n\ndef IsMissing(value):\n  return value == '(missing)'\n\n\ndef GetExtensionMetadata(target):\n  r = subprocess.run(\n      [BUILDOZER_PATH, '-stdout', 'print security_posture status undocumented', target],\n      stdout=subprocess.PIPE,\n      stderr=subprocess.PIPE)\n  security_posture, status, undocumented = r.stdout.decode('utf-8').strip().split(' ')\n  if IsMissing(security_posture):\n    raise ExtensionDbError(\n        'Missing security posture for %s.  Please make sure the target is an envoy_cc_extension and security_posture is set'\n        % target)\n  return {\n      'security_posture': security_posture,\n      'undocumented': False if IsMissing(undocumented) else bool(undocumented),\n      'status': 'stable' if IsMissing(status) else status,\n  }\n\n\nif __name__ == '__main__':\n  output_path = sys.argv[1]\n  extension_db = {}\n  for extension, target in extensions_build_config.EXTENSIONS.items():\n    extension_db[extension] = GetExtensionMetadata(target)\n  # The TLS and generic upstream extensions are hard-coded into the build, so\n  # not in source/extensions/extensions_build_config.bzl\n  extension_db['envoy.transport_sockets.tls'] = GetExtensionMetadata(\n      '//source/extensions/transport_sockets/tls:config')\n  extension_db['envoy.upstreams.http.generic'] = GetExtensionMetadata(\n      '//source/extensions/upstreams/http/generic:config')\n\n  pathlib.Path(output_path).write_text(json.dumps(extension_db))\n"
  },
  {
    "path": "docs/generate_extension_rst.py",
    "content": "#!/usr/bin/env python3\n\n# Generate RST lists of extensions grouped by their security posture.\n\nfrom collections import defaultdict\nimport json\nimport pathlib\nimport sys\n\n\ndef FormatItem(extension, metadata):\n  if metadata['undocumented']:\n    item = '* %s' % extension\n  else:\n    item = '* :ref:`%s <extension_%s>`' % (extension, extension)\n  if metadata['status'] == 'alpha':\n    item += ' (alpha)'\n  return item\n\n\nif __name__ == '__main__':\n  extension_db_path = sys.argv[1]\n  security_rst_root = sys.argv[2]\n\n  extension_db = json.loads(pathlib.Path(extension_db_path).read_text())\n  security_postures = defaultdict(list)\n  for extension, metadata in extension_db.items():\n    security_postures[metadata['security_posture']].append(extension)\n\n  for sp, extensions in security_postures.items():\n    output_path = pathlib.Path(security_rst_root, 'secpos_%s.rst' % sp)\n    content = '\\n'.join(\n        FormatItem(extension, extension_db[extension])\n        for extension in sorted(extensions)\n        if extension_db[extension]['status'] != 'wip')\n    output_path.write_text(content)\n"
  },
  {
    "path": "docs/generate_external_dep_rst.py",
    "content": "#!/usr/bin/env python3\n\n# Generate RST lists of external dependencies.\n\nfrom collections import defaultdict, namedtuple\nimport pathlib\nimport sys\nimport urllib.parse\n\nfrom importlib.util import spec_from_loader, module_from_spec\nfrom importlib.machinery import SourceFileLoader\n\n# bazel/repository_locations.bzl must have a .bzl suffix for Starlark import, so\n# we are forced to do this workaround.\n_repository_locations_spec = spec_from_loader(\n    'repository_locations',\n    SourceFileLoader('repository_locations', 'bazel/repository_locations.bzl'))\nrepository_locations = module_from_spec(_repository_locations_spec)\n_repository_locations_spec.loader.exec_module(repository_locations)\n\n\n# Render a CSV table given a list of table headers, widths and list of rows\n# (each a list of strings).\ndef CsvTable(headers, widths, rows):\n  csv_rows = '\\n  '.join(', '.join(row) for row in rows)\n  return f'''.. csv-table::\n  :header: {', '.join(headers)}\n  :widths: {', '.join(str(w) for w in widths) }\n\n  {csv_rows}\n\n'''\n\n\n# Anonymous external RST link for a given URL.\ndef RstLink(text, url):\n  return f'`{text} <{url}>`__'\n\n\n# NIST CPE database search URL for a given CPE.\ndef NistCpeUrl(cpe):\n  encoded_cpe = urllib.parse.quote(cpe)\n  return 'https://nvd.nist.gov/products/cpe/search/results?keyword=%s&status=FINAL&orderBy=CPEURI&namingFormat=2.3' % encoded_cpe\n\n\n# Render version strings human readable.\ndef RenderVersion(version):\n  # Heuristic, almost certainly a git SHA\n  if len(version) == 40:\n    # Abbreviate git SHA\n    return version[:7]\n  return version\n\n\ndef RenderTitle(title):\n  underline = '~' * len(title)\n  return f'\\n{title}\\n{underline}\\n\\n'\n\n\nif __name__ == '__main__':\n  security_rst_root = sys.argv[1]\n\n  Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe', 'last_updated'])\n  use_categories = defaultdict(lambda: defaultdict(list))\n  # Bin rendered dependencies into per-use category lists.\n  for k, v in repository_locations.DEPENDENCY_REPOSITORIES.items():\n    cpe = v.get('cpe', '')\n    if cpe == 'N/A':\n      cpe = ''\n    if cpe:\n      cpe = RstLink(cpe, NistCpeUrl(cpe))\n    project_name = v['project_name']\n    project_url = v['project_url']\n    name = RstLink(project_name, project_url)\n    version = RstLink(RenderVersion(v['version']), v['urls'][0])\n    last_updated = v['last_updated']\n    dep = Dep(name, project_name.lower(), version, cpe, last_updated)\n    for category in v['use_category']:\n      for ext in v.get('extensions', ['core']):\n        use_categories[category][ext].append(dep)\n\n  def CsvRow(dep):\n    return [dep.name, dep.version, dep.last_updated, dep.cpe]\n\n  # Generate per-use category RST with CSV tables.\n  for category, exts in use_categories.items():\n    content = ''\n    for ext_name, deps in sorted(exts.items()):\n      if ext_name != 'core':\n        content += RenderTitle(ext_name)\n      output_path = pathlib.Path(security_rst_root, f'external_dep_{category}.rst')\n      content += CsvTable(['Name', 'Version', 'Last updated', 'CPE'], [2, 1, 1, 2],\n                          [CsvRow(dep) for dep in sorted(deps, key=lambda d: d.sort_name)])\n    output_path.write_text(content)\n"
  },
  {
    "path": "docs/protodoc_manifest.yaml",
    "content": "fields:\n  envoy.config.bootstrap.v3.Bootstrap.overload_manager:\n    edge_config:\n      example:\n        refresh_interval: 0.25s\n        resource_monitors:\n        - name: \"envoy.resource_monitors.fixed_heap\"\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig\n            max_heap_size_bytes: 1073741824\n        actions:\n        - name: \"envoy.overload_actions.shrink_heap\"\n          triggers:\n          - name: \"envoy.resource_monitors.fixed_heap\"\n            threshold:\n              value: 0.90\n        - name: \"envoy.overload_actions.stop_accepting_requests\"\n          triggers:\n          - name: \"envoy.resource_monitors.fixed_heap\"\n            threshold:\n              value: 0.95\n  envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes:\n    edge_config: { example: 32768 }\n  envoy.config.cluster.v3.Cluster.http2_protocol_options:\n    edge_config:\n      example:\n        initial_stream_window_size: 65536 # 64 KiB\n        initial_connection_window_size: 1048576 # 1 MiB\n  envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes:\n    edge_config: { example: 32768 }\n  envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options:\n    edge_config:\n      example:\n        idle_timeout: 900s # 15 mins\n        headers_with_underscores_action: REJECT_REQUEST\n  envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options:\n    edge_config:\n      example:\n        max_concurrent_streams: 100\n        initial_stream_window_size: 65536 # 64 KiB\n        initial_connection_window_size: 1048576 # 1 MiB\n  envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout:\n    edge_config:\n      example: 300s # 5 mins\n  envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout:\n    edge_config:\n      note: >\n        This timeout is not compatible with streaming requests.\n      example: 300s # 5 mins\n  envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address:\n    edge_config: { example: true }\n"
  },
  {
    "path": "docs/publish.sh",
    "content": "#!/bin/bash\n\n# This is run on every commit that CircleCI picks up. It assumes that docs have already been built\n# via docs/build.sh. The push behavior differs depending on the nature of the commit:\n# * Tag commit (e.g. v1.6.0): pushes docs to versioned location, e.g.\n#   https://www.envoyproxy.io/docs/envoy/v1.6.0/.\n# * Master commit: pushes docs to https://www.envoyproxy.io/docs/envoy/latest/.\n# * Otherwise: noop.\n\nset -e\n\nDOCS_DIR=generated/docs\nCHECKOUT_DIR=../envoy-docs\nBUILD_SHA=$(git rev-parse HEAD)\n\nif [ -n \"$CIRCLE_TAG\" ]\nthen\n  PUBLISH_DIR=\"$CHECKOUT_DIR\"/docs/envoy/\"$CIRCLE_TAG\"\nelif [ -z \"$CIRCLE_PULL_REQUEST\" ] && [ \"$CIRCLE_BRANCH\" == \"master\" ]\nthen\n  PUBLISH_DIR=\"$CHECKOUT_DIR\"/docs/envoy/latest\nelse\n  echo \"Ignoring docs push\"\n  exit 0\nfi\n\necho 'cloning'\ngit clone git@github.com:envoyproxy/envoyproxy.github.io \"$CHECKOUT_DIR\"\n\ngit -C \"$CHECKOUT_DIR\" fetch\ngit -C \"$CHECKOUT_DIR\" checkout -B master origin/master\nrm -fr \"$PUBLISH_DIR\"\nmkdir -p \"$PUBLISH_DIR\"\ncp -r \"$DOCS_DIR\"/* \"$PUBLISH_DIR\"\ncd \"$CHECKOUT_DIR\"\n\ngit config user.name \"envoy-docs(travis)\"\ngit config user.email envoy-docs@users.noreply.github.com\necho 'add'\ngit add .\necho 'commit'\ngit commit -m \"docs envoy@$BUILD_SHA\"\necho 'push'\ngit push origin master\n"
  },
  {
    "path": "docs/requirements.txt",
    "content": "alabaster==0.7.12 \\\n    --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \\\n    --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02\nBabel==2.8.0 \\\n    --hash=sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38 \\\n    --hash=sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4\ncertifi==2020.6.20 \\\n    --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \\\n    --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41\nchardet==3.0.4 \\\n    --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \\\n    --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691\ndocutils==0.16 \\\n    --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \\\n    --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc\ngitdb==4.0.5 \\\n    --hash=sha256:91f36bfb1ab7949b3b40e23736db18231bf7593edada2ba5c3a174a7b23657ac \\\n    --hash=sha256:c9e1f2d0db7ddb9a704c2a0217be31214e91a4fe1dea1efad19ae42ba0c285c9\nGitPython==3.1.8 \\\n    --hash=sha256:080bf8e2cf1a2b907634761c2eaefbe83b69930c94c66ad11b65a8252959f912 \\\n    --hash=sha256:1858f4fd089abe92ae465f01d5aaaf55e937eca565fb2c1fce35a51b5f85c910\nidna==2.10 \\\n    --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \\\n    --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0\nimagesize==1.2.0 \\\n    --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \\\n    --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1\nJinja2==2.11.2 \\\n    --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \\\n    --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035\nMarkupSafe==1.1.1 \\\n    --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \\\n    --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \\\n    --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \\\n    --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \\\n    --hash=sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42 \\\n    --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \\\n    --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \\\n    --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \\\n    --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \\\n    --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \\\n    --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \\\n    --hash=sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b \\\n    --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \\\n    --hash=sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15 \\\n    --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \\\n    --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \\\n    --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \\\n    --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \\\n    --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \\\n    --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \\\n    --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \\\n    --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \\\n    --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \\\n    --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \\\n    --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \\\n    --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \\\n    --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \\\n    --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \\\n    --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \\\n    --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \\\n    --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \\\n    --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \\\n    --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be\npackaging==20.4 \\\n    --hash=sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8 \\\n    --hash=sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181\nPygments==2.7.1 \\\n    --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \\\n    --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7\npyparsing==2.4.7 \\\n    --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \\\n    --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b\npytz==2020.1 \\\n    --hash=sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed \\\n    --hash=sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048\nrequests==2.24.0 \\\n    --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b \\\n    --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898\nsix==1.15.0 \\\n    --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \\\n    --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced\nsmmap==3.0.4 \\\n    --hash=sha256:54c44c197c819d5ef1991799a7e30b662d1e520f2ac75c9efbeb54a742214cf4 \\\n    --hash=sha256:9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24\nsnowballstemmer==2.0.0 \\\n    --hash=sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0 \\\n    --hash=sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52\nSphinx==3.2.1 \\\n    --hash=sha256:321d6d9b16fa381a5306e5a0b76cd48ffbc588e6340059a729c6fdd66087e0e8 \\\n    --hash=sha256:ce6fd7ff5b215af39e2fcd44d4a321f6694b4530b6f2b2109b64d120773faea0\nsphinx-copybutton==0.3.0 \\\n    --hash=sha256:4becad3a1e7c50211f1477e34fd4b6d027680e1612f497cb5b88cf85bccddaaa \\\n    --hash=sha256:4cd06afd0588aa43eba968bfc6105e1ec6546c50a51f880af1d89afaebc6fb58\nsphinx-rtd-theme==0.5.0 \\\n    --hash=sha256:22c795ba2832a169ca301cd0a083f7a434e09c538c70beb42782c073651b707d \\\n    --hash=sha256:373413d0f82425aaa28fb288009bf0d0964711d347763af2f1b65cafcb028c82\nsphinx-tabs==1.3.0 \\\n    --hash=sha256:537857f91f1b371f7b45eb8ac83001618b3e3178c78df073d2cc4558a8e66ef5 \\\n    --hash=sha256:54132c8a57aa19bba6e17fe26eb94ea9df531708ff3f509b119313b32d0d5aff\nsphinxcontrib-applehelp==1.0.2 \\\n    --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \\\n    --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58\nsphinxcontrib-devhelp==1.0.2 \\\n    --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \\\n    --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4\nsphinxcontrib-htmlhelp==1.0.3 \\\n    --hash=sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f \\\n    --hash=sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b\nsphinxcontrib-httpdomain==1.7.0 \\\n    --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \\\n    --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335\nsphinxcontrib-jsmath==1.0.1 \\\n    --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \\\n    --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8\nsphinxcontrib-qthelp==1.0.3 \\\n    --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \\\n    --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6\nsphinxcontrib-serializinghtml==1.1.4 \\\n    --hash=sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc \\\n    --hash=sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a\nurllib3==1.25.10 \\\n    --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \\\n    --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461\n"
  },
  {
    "path": "docs/root/_static/css/envoy.css",
    "content": "@import url(\"theme.css\");\n\n/* Splits a long line descriptions in tables in to multiple lines */\n.wy-table-responsive table td, .wy-table-responsive table th {\n  white-space: normal !important;\n}\n\n/* align multi line csv table columns */\ntable.docutils div.line-block {\n    margin-left: 0;\n}\n/* Breaking long words */\n.wy-nav-content {\n  overflow-wrap: break-word;\n  max-width: 1000px;\n}\n\n/* To style the API version label of a search result item */\n.api-version-label {\n  border-radius: 20%;\n  background-color: #c0c0c0;\n  color: #ffffff;\n  margin-left: 4px;\n  padding: 4px;\n}\n"
  },
  {
    "path": "docs/root/_static/placeholder",
    "content": ""
  },
  {
    "path": "docs/root/_static/searchtools.js",
    "content": "/*\n * searchtools.js\n * ~~~~~~~~~~~~~~~~\n *\n * Sphinx JavaScript utilities for the full-text search.\n *\n * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.\n * :license: BSD, see LICENSE for details.\n *\n */\n\n// Modified from https://raw.githubusercontent.com/sphinx-doc/sphinx/3.x/sphinx/themes/basic/static/searchtools.js\n// to have renderApiVersionLabel to render the API version for each search result item.\n\nif (!Scorer) {\n  /**\n   * Simple result scoring code.\n   */\n  var Scorer = {\n    // Implement the following function to further tweak the score for each result\n    // The function takes a result array [filename, title, anchor, descr, score]\n    // and returns the new score.\n    /*\n      score: function(result) {\n        return result[4];\n      },\n      */\n\n    // query matches the full name of an object\n    objNameMatch: 11,\n    // or matches in the last dotted part of the object name\n    objPartialMatch: 6,\n    // Additive scores depending on the priority of the object\n    objPrio: {\n      0: 15, // used to be importantResults\n      1: 5, // used to be objectResults\n      2: -5,\n    }, // used to be unimportantResults\n    //  Used when the priority is not in the mapping.\n    objPrioDefault: 0,\n\n    // query found in title\n    title: 15,\n    partialTitle: 7,\n    // query found in terms\n    term: 5,\n    partialTerm: 2,\n  };\n}\n\nif (!splitQuery) {\n  function splitQuery(query) {\n    return query.split(/\\s+/);\n  }\n}\n\n/**\n * Search Module\n */\nvar Search = {\n  _index: null,\n  _queued_query: null,\n  _pulse_status: -1,\n\n  htmlToText: function (htmlString) {\n    var htmlElement = document.createElement(\"span\");\n    htmlElement.innerHTML = htmlString;\n    $(htmlElement).find(\".headerlink\").remove();\n    docContent = $(htmlElement).find(\"[role=main]\")[0];\n    if (docContent === undefined) {\n      console.warn(\n        \"Content block not found. Sphinx search tries to obtain it \" +\n          \"via '[role=main]'. Could you check your theme or template.\"\n      );\n      return \"\";\n    }\n    return docContent.textContent || docContent.innerText;\n  },\n\n  init: function () {\n    var params = $.getQueryParameters();\n    if (params.q) {\n      var query = params.q[0];\n      $('input[name=\"q\"]')[0].value = query;\n      this.performSearch(query);\n    }\n  },\n\n  loadIndex: function (url) {\n    $.ajax({\n      type: \"GET\",\n      url: url,\n      data: null,\n      dataType: \"script\",\n      cache: true,\n      complete: function (jqxhr, textstatus) {\n        if (textstatus != \"success\") {\n          document.getElementById(\"searchindexloader\").src = url;\n        }\n      },\n    });\n  },\n\n  setIndex: function (index) {\n    var q;\n    this._index = index;\n    if ((q = this._queued_query) !== null) {\n      this._queued_query = null;\n      Search.query(q);\n    }\n  },\n\n  hasIndex: function () {\n    return this._index !== null;\n  },\n\n  deferQuery: function (query) {\n    this._queued_query = query;\n  },\n\n  stopPulse: function () {\n    this._pulse_status = 0;\n  },\n\n  startPulse: function () {\n    if (this._pulse_status >= 0) return;\n    function pulse() {\n      var i;\n      Search._pulse_status = (Search._pulse_status + 1) % 4;\n      var dotString = \"\";\n      for (i = 0; i < Search._pulse_status; i++) dotString += \".\";\n      Search.dots.text(dotString);\n      if (Search._pulse_status > -1) window.setTimeout(pulse, 500);\n    }\n    pulse();\n  },\n\n  /**\n   * perform a search for something (or wait until index is loaded)\n   */\n  performSearch: function (query) {\n    // create the required interface elements\n    this.out = $(\"#search-results\");\n    this.title = $(\"<h2>\" + _(\"Searching\") + \"</h2>\").appendTo(this.out);\n    this.dots = $(\"<span></span>\").appendTo(this.title);\n    this.status = $('<p class=\"search-summary\">&nbsp;</p>').appendTo(this.out);\n    this.output = $('<ul class=\"search\"/>').appendTo(this.out);\n\n    $(\"#search-progress\").text(_(\"Preparing search...\"));\n    this.startPulse();\n\n    // index already loaded, the browser was quick!\n    if (this.hasIndex()) this.query(query);\n    else this.deferQuery(query);\n  },\n\n  /**\n   * execute search (requires search index to be loaded)\n   */\n  query: function (query) {\n    var i;\n\n    // stem the searchterms and add them to the correct list\n    var stemmer = new Stemmer();\n    var searchterms = [];\n    var excluded = [];\n    var hlterms = [];\n    var tmp = splitQuery(query);\n    var objectterms = [];\n    for (i = 0; i < tmp.length; i++) {\n      if (tmp[i] !== \"\") {\n        objectterms.push(tmp[i].toLowerCase());\n      }\n\n      if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i] === \"\") {\n        // skip this \"word\"\n        continue;\n      }\n      // stem the word\n      var word = stemmer.stemWord(tmp[i].toLowerCase());\n      // prevent stemmer from cutting word smaller than two chars\n      if (word.length < 3 && tmp[i].length >= 3) {\n        word = tmp[i];\n      }\n      var toAppend;\n      // select the correct list\n      if (word[0] == \"-\") {\n        toAppend = excluded;\n        word = word.substr(1);\n      } else {\n        toAppend = searchterms;\n        hlterms.push(tmp[i].toLowerCase());\n      }\n      // only add if not already in the list\n      if (!$u.contains(toAppend, word)) toAppend.push(word);\n    }\n    var highlightstring = \"?highlight=\" + $.urlencode(hlterms.join(\" \"));\n\n    // console.debug('SEARCH: searching for:');\n    // console.info('required: ', searchterms);\n    // console.info('excluded: ', excluded);\n\n    // prepare search\n    var terms = this._index.terms;\n    var titleterms = this._index.titleterms;\n\n    // array of [filename, title, anchor, descr, score]\n    var results = [];\n    $(\"#search-progress\").empty();\n\n    // lookup as object\n    for (i = 0; i < objectterms.length; i++) {\n      var others = [].concat(\n        objectterms.slice(0, i),\n        objectterms.slice(i + 1, objectterms.length)\n      );\n      results = results.concat(\n        this.performObjectSearch(objectterms[i], others)\n      );\n    }\n\n    // lookup as search terms in fulltext\n    results = results.concat(\n      this.performTermsSearch(searchterms, excluded, terms, titleterms)\n    );\n\n    // let the scorer override scores with a custom scoring function\n    if (Scorer.score) {\n      for (i = 0; i < results.length; i++)\n        results[i][4] = Scorer.score(results[i]);\n    }\n\n    // now sort the results by score (in opposite order of appearance, since the\n    // display function below uses pop() to retrieve items) and then\n    // alphabetically\n    results.sort(function (a, b) {\n      var left = a[4];\n      var right = b[4];\n      if (left > right) {\n        return 1;\n      } else if (left < right) {\n        return -1;\n      } else {\n        // same score: sort alphabetically\n        left = a[1].toLowerCase();\n        right = b[1].toLowerCase();\n        return left > right ? -1 : left < right ? 1 : 0;\n      }\n    });\n\n    // for debugging\n    //Search.lastresults = results.slice();  // a copy\n    //console.info('search results:', Search.lastresults);\n\n    // renderApiVersionLabel renders API version for each search result item.\n    function renderApiVersionLabel(linkUrl) {\n      const filtered = linkUrl\n        .split(\"/\")\n        .filter((part) => part.startsWith(\"api-v\"));\n      return filtered.length === 1\n        ? '&nbsp;<sup class=\"api-version-label\">' + filtered.pop() + \"</sup>\"\n        : \"\";\n    }\n\n    // print the results\n    var resultCount = results.length;\n    function displayNextItem() {\n      // results left, load the summary and display it\n      if (results.length) {\n        var item = results.pop();\n        var listItem = $('<li style=\"display:none\"></li>');\n        var requestUrl = \"\";\n        var linkUrl = \"\";\n        if (DOCUMENTATION_OPTIONS.BUILDER === \"dirhtml\") {\n          // dirhtml builder\n          var dirname = item[0] + \"/\";\n          if (dirname.match(/\\/index\\/$/)) {\n            dirname = dirname.substring(0, dirname.length - 6);\n          } else if (dirname == \"index/\") {\n            dirname = \"\";\n          }\n          requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + dirname;\n          linkUrl = requestUrl;\n        } else {\n          // normal html builders\n          requestUrl =\n            DOCUMENTATION_OPTIONS.URL_ROOT +\n            item[0] +\n            DOCUMENTATION_OPTIONS.FILE_SUFFIX;\n          linkUrl = item[0] + DOCUMENTATION_OPTIONS.LINK_SUFFIX;\n        }\n        listItem.append(\n          $(\"<a/>\")\n            .attr(\"href\", linkUrl + highlightstring + item[2])\n            .html(item[1])\n        );\n        var apiVersion = renderApiVersionLabel(linkUrl);\n        if (apiVersion !== \"\") {\n          listItem.append(apiVersion);\n        }\n        if (item[3]) {\n          listItem.append($(\"<span> (\" + item[3] + \")</span>\"));\n          Search.output.append(listItem);\n          listItem.slideDown(5, function () {\n            displayNextItem();\n          });\n        } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {\n          $.ajax({\n            url: requestUrl,\n            dataType: \"text\",\n            complete: function (jqxhr, textstatus) {\n              var data = jqxhr.responseText;\n              if (data !== \"\" && data !== undefined) {\n                listItem.append(\n                  Search.makeSearchSummary(data, searchterms, hlterms)\n                );\n              }\n              Search.output.append(listItem);\n              listItem.slideDown(5, function () {\n                displayNextItem();\n              });\n            },\n          });\n        } else {\n          // no source available, just display title\n          Search.output.append(listItem);\n          listItem.slideDown(5, function () {\n            displayNextItem();\n          });\n        }\n      }\n      // search finished, update title and status message\n      else {\n        Search.stopPulse();\n        Search.title.text(_(\"Search Results\"));\n        if (!resultCount)\n          Search.status.text(\n            _(\n              \"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories.\"\n            )\n          );\n        else\n          Search.status.text(\n            _(\n              \"Search finished, found %s page(s) matching the search query.\"\n            ).replace(\"%s\", resultCount)\n          );\n        Search.status.fadeIn(500);\n      }\n    }\n    displayNextItem();\n  },\n\n  /**\n   * search for object names\n   */\n  performObjectSearch: function (object, otherterms) {\n    var filenames = this._index.filenames;\n    var docnames = this._index.docnames;\n    var objects = this._index.objects;\n    var objnames = this._index.objnames;\n    var titles = this._index.titles;\n\n    var i;\n    var results = [];\n\n    for (var prefix in objects) {\n      for (var name in objects[prefix]) {\n        var fullname = (prefix ? prefix + \".\" : \"\") + name;\n        var fullnameLower = fullname.toLowerCase();\n        if (fullnameLower.indexOf(object) > -1) {\n          var score = 0;\n          var parts = fullnameLower.split(\".\");\n          // check for different match types: exact matches of full name or\n          // \"last name\" (i.e. last dotted part)\n          if (fullnameLower == object || parts[parts.length - 1] == object) {\n            score += Scorer.objNameMatch;\n            // matches in last name\n          } else if (parts[parts.length - 1].indexOf(object) > -1) {\n            score += Scorer.objPartialMatch;\n          }\n          var match = objects[prefix][name];\n          var objname = objnames[match[1]][2];\n          var title = titles[match[0]];\n          // If more than one term searched for, we require other words to be\n          // found in the name/title/description\n          if (otherterms.length > 0) {\n            var haystack = (\n              prefix +\n              \" \" +\n              name +\n              \" \" +\n              objname +\n              \" \" +\n              title\n            ).toLowerCase();\n            var allfound = true;\n            for (i = 0; i < otherterms.length; i++) {\n              if (haystack.indexOf(otherterms[i]) == -1) {\n                allfound = false;\n                break;\n              }\n            }\n            if (!allfound) {\n              continue;\n            }\n          }\n          var descr = objname + _(\", in \") + title;\n\n          var anchor = match[3];\n          if (anchor === \"\") anchor = fullname;\n          else if (anchor == \"-\")\n            anchor = objnames[match[1]][1] + \"-\" + fullname;\n          // add custom score for some objects according to scorer\n          if (Scorer.objPrio.hasOwnProperty(match[2])) {\n            score += Scorer.objPrio[match[2]];\n          } else {\n            score += Scorer.objPrioDefault;\n          }\n          results.push([\n            docnames[match[0]],\n            fullname,\n            \"#\" + anchor,\n            descr,\n            score,\n            filenames[match[0]],\n          ]);\n        }\n      }\n    }\n\n    return results;\n  },\n\n  /**\n   * search for full-text terms in the index\n   */\n  performTermsSearch: function (searchterms, excluded, terms, titleterms) {\n    var docnames = this._index.docnames;\n    var filenames = this._index.filenames;\n    var titles = this._index.titles;\n\n    var i, j, file;\n    var fileMap = {};\n    var scoreMap = {};\n    var results = [];\n\n    // perform the search on the required terms\n    for (i = 0; i < searchterms.length; i++) {\n      var word = searchterms[i];\n      var files = [];\n      var _o = [\n        { files: terms[word], score: Scorer.term },\n        { files: titleterms[word], score: Scorer.title },\n      ];\n      // add support for partial matches\n      if (word.length > 2) {\n        for (var w in terms) {\n          if (w.match(word) && !terms[word]) {\n            _o.push({ files: terms[w], score: Scorer.partialTerm });\n          }\n        }\n        for (var w in titleterms) {\n          if (w.match(word) && !titleterms[word]) {\n            _o.push({ files: titleterms[w], score: Scorer.partialTitle });\n          }\n        }\n      }\n\n      // no match but word was a required one\n      if (\n        $u.every(_o, function (o) {\n          return o.files === undefined;\n        })\n      ) {\n        break;\n      }\n      // found search word in contents\n      $u.each(_o, function (o) {\n        var _files = o.files;\n        if (_files === undefined) return;\n\n        if (_files.length === undefined) _files = [_files];\n        files = files.concat(_files);\n\n        // set score for the word in each file to Scorer.term\n        for (j = 0; j < _files.length; j++) {\n          file = _files[j];\n          if (!(file in scoreMap)) scoreMap[file] = {};\n          scoreMap[file][word] = o.score;\n        }\n      });\n\n      // create the mapping\n      for (j = 0; j < files.length; j++) {\n        file = files[j];\n        if (file in fileMap && fileMap[file].indexOf(word) === -1)\n          fileMap[file].push(word);\n        else fileMap[file] = [word];\n      }\n    }\n\n    // now check if the files don't contain excluded terms\n    for (file in fileMap) {\n      var valid = true;\n\n      // check if all requirements are matched\n      var filteredTermCount = searchterms.filter(function (term) { // as search terms with length < 3 are discarded: ignore\n        return term.length > 2;\n      }).length;\n      if (\n        fileMap[file].length != searchterms.length &&\n        fileMap[file].length != filteredTermCount\n      )\n        continue;\n\n      // ensure that none of the excluded terms is in the search result\n      for (i = 0; i < excluded.length; i++) {\n        if (\n          terms[excluded[i]] == file ||\n          titleterms[excluded[i]] == file ||\n          $u.contains(terms[excluded[i]] || [], file) ||\n          $u.contains(titleterms[excluded[i]] || [], file)\n        ) {\n          valid = false;\n          break;\n        }\n      }\n\n      // if we have still a valid result we can add it to the result list\n      if (valid) {\n        // select one (max) score for the file.\n        // for better ranking, we should calculate ranking by using words statistics like basic tf-idf...\n        var score = $u.max(\n          $u.map(fileMap[file], function (w) {\n            return scoreMap[file][w];\n          })\n        );\n        results.push([\n          docnames[file],\n          titles[file],\n          \"\",\n          null,\n          score,\n          filenames[file],\n        ]);\n      }\n    }\n    return results;\n  },\n\n  /**\n   * helper function to return a node containing the\n   * search summary for a given text. keywords is a list\n   * of stemmed words, hlwords is the list of normal, unstemmed\n   * words. the first one is used to find the occurrence, the\n   * latter for highlighting it.\n   */\n  makeSearchSummary: function (htmlText, keywords, hlwords) {\n    var text = Search.htmlToText(htmlText);\n    var textLower = text.toLowerCase();\n    var start = 0;\n    $.each(keywords, function () {\n      var i = textLower.indexOf(this.toLowerCase());\n      if (i > -1) start = i;\n    });\n    start = Math.max(start - 120, 0);\n    var excerpt =\n      (start > 0 ? \"...\" : \"\") +\n      $.trim(text.substr(start, 240)) +\n      (start + 240 - text.length ? \"...\" : \"\");\n    var rv = $('<div class=\"context\"></div>').text(excerpt);\n    $.each(hlwords, function () {\n      rv = rv.highlightText(this, \"highlighted\");\n    });\n    return rv;\n  },\n};\n\n$(document).ready(function () {\n  Search.init();\n});\n"
  },
  {
    "path": "docs/root/about_docs.rst",
    "content": "About the documentation\n=======================\n\nThe Envoy documentation is composed of a few major sections:\n\n* :ref:`Introduction <intro>`: This section covers a general overview of what Envoy is, an\n  architecture overview, how it is typically deployed, etc.\n* :ref:`Getting Started <start>`: Quickly get started with Envoy using Docker.\n* :ref:`Installation <install>`: How to build/install Envoy using Docker.\n* :ref:`Version history <version_history>`: Per-version release notes.\n* :ref:`Configuration <config>`: Detailed configuration instructions for Envoy.\n  Where relevant, the configuration guide also contains information on statistics, runtime\n  configuration, and APIs.\n* :ref:`Operations <operations>`: General information on how to operate Envoy including the command\n  line interface, hot restart wrapper, administration interface, a general statistics overview,\n  etc.\n* :ref:`Extending Envoy <extending>`: Information on how to write custom filters for Envoy.\n* :ref:`API reference <envoy_api_reference>`: Envoy API detailed reference.\n* :ref:`Envoy FAQ <faq_overview>`: Have questions? We have answers. Hopefully.\n"
  },
  {
    "path": "docs/root/api/api.rst",
    "content": ".. _api:\n\nAPI \n===\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  api_supported_versions\n  ../api-v2/api\n  ../api-v3/api\n  ../api-docs/xds_protocol\n  client_features\n"
  },
  {
    "path": "docs/root/api/api_supported_versions.rst",
    "content": ".. _api_supported_versions:\n\nSupported API versions\n======================\n\nEnvoy's APIs follow a :repo:`versioning scheme <api/API_VERSIONING.md>` in which Envoy supports\nmultiple major API versions at any point in time. The following versions are currently supported:\n\n* :ref:`v2 xDS API <envoy_api_reference>` (*deprecated*, end-of-life EOY 2020). This API will not\n  accept new features after the end of Q1 2020.\n* :ref:`v3 xDS API <envoy_v3_api_reference>` (*active*, end-of-life unknown). Envoy developers and\n  operators are encouraged to be actively adopting and working with v3 xDS.\n\nThe following API versions are no longer supported by Envoy:\n\n* v1 xDS API. This was the legacy REST-JSON API that preceded the current Protobuf and dual\n  REST/gRPC xDS APIs.\n"
  },
  {
    "path": "docs/root/api/client_features.rst",
    "content": ".. _client_features:\n\nWell Known Client Features\n==========================\n\nAuthoritative list of features that an xDS client may support. An xDS client supplies the list of\nfeatures it supports in the :ref:`client_features <envoy_api_field_core.Node.client_features>` field.\nClient features use reverse DNS naming scheme, for example `com.acme.feature`.\n\nCurrently Defined Client Features\n---------------------------------\n\n.. It would be nice to use an RST ref here for service.load_stats.v2.LoadStatsResponse.send_all_clusters, but we can't due to https://github.com/envoyproxy/envoy/issues/3091.\n\n- **envoy.config.require-any-fields-contain-struct**: This feature indicates that xDS client\n  requires that the configuration entries of type  *google.protobuf.Any* contain messages of type\n  *udpa.type.v1.TypedStruct* only.\n- **envoy.lb.does_not_support_overprovisioning**: This feature indicates that the client does not\n  support overprovisioning for priority failover and locality weighting as configured by the\n  :ref:`overprovisioning_factor<envoy_api_field_ClusterLoadAssignment.Policy.overprovisioning_factor>`\n  field. If graceful failover functionality is required, it must be supplied by the management\n  server.\n- **envoy.lrs.supports_send_all_clusters**: This feature indicates that the client supports\n  the *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters*\n  field in the LRS response.\n"
  },
  {
    "path": "docs/root/api-v2/admin/admin.rst",
    "content": "Admin\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../admin/v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/api.rst",
    "content": ".. _envoy_api_reference:\n\nv2 API reference\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  bootstrap/bootstrap\n  listeners/listeners\n  clusters/clusters\n  http_routes/http_routes\n  config/config\n  admin/admin\n  data/data\n  service/service\n  common_messages/common_messages\n  types/types\n"
  },
  {
    "path": "docs/root/api-v2/bootstrap/bootstrap.rst",
    "content": "Bootstrap\n=========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../config/bootstrap/v2/bootstrap.proto\n  ../config/metrics/v2/stats.proto\n  ../config/metrics/v2/metrics_service.proto\n  ../config/overload/v2alpha/overload.proto\n  ../config/ratelimit/v2/rls.proto\n"
  },
  {
    "path": "docs/root/api-v2/clusters/clusters.rst",
    "content": "Clusters\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../api/v2/cluster.proto\n  ../api/v2/cluster/outlier_detection.proto\n  ../api/v2/cluster/circuit_breaker.proto\n  ../api/v2/cluster/filter.proto\n  ../api/v2/endpoint.proto\n  ../api/v2/endpoint/endpoint_components.proto\n  ../api/v2/core/health_check.proto\n"
  },
  {
    "path": "docs/root/api-v2/common_messages/common_messages.rst",
    "content": "Common messages\n===============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../api/v2/core/base.proto\n  ../api/v2/core/address.proto\n  ../api/v2/core/backoff.proto\n  ../api/v2/core/protocol.proto\n  ../api/v2/discovery.proto\n  ../api/v2/core/config_source.proto\n  ../api/v2/core/grpc_service.proto\n  ../api/v2/core/grpc_method_list.proto\n  ../api/v2/core/http_uri.proto\n  ../api/v2/core/socket_option.proto\n  ../api/v2/auth/common.proto\n  ../api/v2/auth/secret.proto\n  ../api/v2/ratelimit/ratelimit.proto\n"
  },
  {
    "path": "docs/root/api-v2/config/accesslog/accesslog.rst",
    "content": "Access loggers\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2/*\n"
  },
  {
    "path": "docs/root/api-v2/config/cluster/cluster.rst",
    "content": "Cluster\n=======\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  aggregate/v2alpha/*\n  dynamic_forward_proxy/v2alpha/*\n  redis/*\n"
  },
  {
    "path": "docs/root/api-v2/config/common/common.rst",
    "content": "Common\n======\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  dynamic_forward_proxy/v2alpha/*\n  tap/v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/config/config.rst",
    "content": "Extensions\n==========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  filter/filter\n  accesslog/accesslog\n  rbac/rbac\n  health_checker/health_checker\n  transport_socket/transport_socket\n  resource_monitor/resource_monitor\n  common/common\n  cluster/cluster\n  listener/listener\n  grpc_credential/grpc_credential\n  retry/retry\n  trace/trace\n"
  },
  {
    "path": "docs/root/api-v2/config/filter/dubbo/dubbo.rst",
    "content": "Dubbo filters\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2alpha1/*\n"
  },
  {
    "path": "docs/root/api-v2/config/filter/filter.rst",
    "content": "Filters\n=======\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  listener/listener\n  network/network\n  udp/udp\n  http/http\n  accesslog/v2/accesslog.proto\n  fault/v2/fault.proto\n  dubbo/dubbo\n  thrift/thrift\n"
  },
  {
    "path": "docs/root/api-v2/config/filter/http/http.rst",
    "content": "HTTP filters\n============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  */v2/*\n  */v2alpha/*\n  */v2alpha1/*\n"
  },
  {
    "path": "docs/root/api-v2/config/filter/listener/listener.rst",
    "content": "Listener filters\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  */v2alpha1/*\n  */v2/*\n"
  },
  {
    "path": "docs/root/api-v2/config/filter/network/network.rst",
    "content": "Network filters\n===============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  */v1alpha1/*\n  */v2/*\n  */v2alpha/*\n  */v2alpha1/*\n"
  },
  {
    "path": "docs/root/api-v2/config/filter/thrift/thrift.rst",
    "content": "Thrift filters\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2alpha1/*\n"
  },
  {
    "path": "docs/root/api-v2/config/filter/udp/udp.rst",
    "content": "UDP listener filters\n====================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/config/grpc_credential/grpc_credential.rst",
    "content": "Grpc Credentials\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/config/health_checker/health_checker.rst",
    "content": "Health checkers\n===============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2/*"
  },
  {
    "path": "docs/root/api-v2/config/listener/listener.rst",
    "content": "Listener\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2/*\n"
  },
  {
    "path": "docs/root/api-v2/config/rbac/rbac.rst",
    "content": "RBAC\n====\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2/*\n"
  },
  {
    "path": "docs/root/api-v2/config/resource_monitor/resource_monitor.rst",
    "content": ".. _config_resource_monitors:\n\nResource monitors\n=================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/config/retry/retry.rst",
    "content": "Retry Predicates\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  omit_host_metadata/v2/*\n  */*\n  */v2/*\n"
  },
  {
    "path": "docs/root/api-v2/config/trace/trace.rst",
    "content": "HTTP Tracers\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2/*\n  v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/config/transport_socket/transport_socket.rst",
    "content": "Transport sockets\n=================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2alpha/*\n  */v2/*\n  ../../api/v2/auth/tls.proto\n"
  },
  {
    "path": "docs/root/api-v2/data/accesslog/accesslog.rst",
    "content": "Access logs\n===========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2/*\n"
  },
  {
    "path": "docs/root/api-v2/data/cluster/cluster.rst",
    "content": "Cluster data\n============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2alpha/outlier_detection_event.proto\n"
  },
  {
    "path": "docs/root/api-v2/data/core/core.rst",
    "content": "Core data\n=========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2alpha/health_check_event.proto\n"
  },
  {
    "path": "docs/root/api-v2/data/data.rst",
    "content": "Envoy data\n==========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  accesslog/accesslog\n  cluster/cluster\n  core/core\n  dns/dns\n  tap/tap\n"
  },
  {
    "path": "docs/root/api-v2/data/dns/dns.rst",
    "content": "Extensions objects\n==================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/data/tap/tap.rst",
    "content": "Tap\n===\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v2/http_routes/http_routes.rst",
    "content": "HTTP route management\n=====================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../api/v2/route.proto\n  ../api/v2/scoped_route.proto\n  ../api/v2/route/route_components.proto\n"
  },
  {
    "path": "docs/root/api-v2/listeners/listeners.rst",
    "content": "Listeners\n=========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../api/v2/listener.proto\n  ../api/v2/listener/listener_components.proto\n  ../api/v2/listener/udp_listener_config.proto\n  ../api/v2/listener/quic_config.proto\n"
  },
  {
    "path": "docs/root/api-v2/service/service.rst",
    "content": "Services\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  accesslog/v2/*\n  load_stats/v2/*\n  auth/v2/*\n  discovery/v2/*\n  metrics/v2/*\n  ratelimit/v2/*\n  status/v2/*\n  tap/v2alpha/*\n  trace/v2/*\n"
  },
  {
    "path": "docs/root/api-v2/types/types.rst",
    "content": "Types\n=====\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../type/hash_policy.proto\n  ../type/http.proto\n  ../type/http_status.proto\n  ../type/percent.proto\n  ../type/range.proto\n  ../type/semantic_version.proto\n  ../type/token_bucket.proto\n  ../type/matcher/metadata.proto\n  ../type/matcher/node.proto\n  ../type/matcher/number.proto\n  ../type/matcher/path.proto\n  ../type/matcher/regex.proto\n  ../type/matcher/string.proto\n  ../type/matcher/struct.proto\n  ../type/matcher/value.proto\n  ../type/metadata/v2/metadata.proto\n  ../type/tracing/v2/custom_tag.proto\n"
  },
  {
    "path": "docs/root/api-v3/admin/admin.rst",
    "content": "Admin\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../admin/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/api.rst",
    "content": ".. _envoy_v3_api_reference:\n\nv3 API reference\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  bootstrap/bootstrap\n  listeners/listeners\n  clusters/clusters\n  http_routes/http_routes\n  config/config\n  admin/admin\n  data/data\n  service/service\n  common_messages/common_messages\n  types/types\n"
  },
  {
    "path": "docs/root/api-v3/bootstrap/bootstrap.rst",
    "content": "Bootstrap\n=========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../config/bootstrap/v3/bootstrap.proto\n  ../config/metrics/v3/stats.proto\n  ../config/metrics/v3/metrics_service.proto\n  ../config/overload/v3/overload.proto\n  ../config/ratelimit/v3/rls.proto\n  ../extensions/wasm/v3/wasm.proto\n"
  },
  {
    "path": "docs/root/api-v3/clusters/clusters.rst",
    "content": "Clusters\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../config/cluster/v3/cluster.proto\n  ../config/cluster/v3/outlier_detection.proto\n  ../config/cluster/v3/circuit_breaker.proto\n  ../config/cluster/v3/filter.proto\n  ../config/endpoint/v3/endpoint.proto\n  ../config/endpoint/v3/endpoint_components.proto\n  ../config/core/v3/health_check.proto\n"
  },
  {
    "path": "docs/root/api-v3/common_messages/common_messages.rst",
    "content": "Common messages\n===============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../config/core/v3/base.proto\n  ../config/core/v3/extension.proto\n  ../config/core/v3/address.proto\n  ../config/core/v3/backoff.proto\n  ../config/core/v3/protocol.proto\n  ../config/core/v3/proxy_protocol.proto\n  ../service/discovery/v3/discovery.proto\n  ../config/core/v3/config_source.proto\n  ../config/core/v3/grpc_service.proto\n  ../config/core/v3/grpc_method_list.proto\n  ../config/core/v3/http_uri.proto\n  ../config/core/v3/socket_option.proto\n  ../config/core/v3/substitution_format_string.proto\n  ../extensions/common/ratelimit/v3/ratelimit.proto\n  ../extensions/filters/common/fault/v3/fault.proto\n  ../extensions/network/socket_interface/v3/default_socket_interface.proto\n"
  },
  {
    "path": "docs/root/api-v3/config/accesslog/accesslog.rst",
    "content": "Access loggers\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*\n  ../../extensions/access_loggers/*/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/cluster/cluster.rst",
    "content": "Cluster\n=======\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../extensions/clusters/*/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/common/common.rst",
    "content": "Common\n======\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  matcher/v3/*\n  ../../extensions/common/dynamic_forward_proxy/v3/*\n  ../../extensions/common/tap/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/compression/compression.rst",
    "content": "Compression\n===========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../extensions/compression/gzip/*/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/config.rst",
    "content": "Extensions\n==========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  filter/filter\n  accesslog/accesslog\n  rbac/rbac\n  health_checker/health_checker\n  transport_socket/transport_socket\n  resource_monitor/resource_monitor\n  common/common\n  compression/compression\n  cluster/cluster\n  grpc_credential/grpc_credential\n  retry/retry\n  trace/trace\n  internal_redirect/internal_redirect\n  endpoint/endpoint\n  upstream/upstream\n  wasm/wasm\n  watchdog/watchdog\n"
  },
  {
    "path": "docs/root/api-v3/config/endpoint/endpoint.rst",
    "content": "Endpoint\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*"
  },
  {
    "path": "docs/root/api-v3/config/filter/dubbo/dubbo.rst",
    "content": "Dubbo filters\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../../extensions/filters/network/dubbo_proxy/*/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/filter/filter.rst",
    "content": "Filters\n=======\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  listener/listener\n  network/network\n  udp/udp\n  http/http\n  dubbo/dubbo\n  thrift/thrift\n"
  },
  {
    "path": "docs/root/api-v3/config/filter/http/http.rst",
    "content": "HTTP filters\n============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  ../../../extensions/filters/http/*/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/filter/listener/listener.rst",
    "content": "Listener filters\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  ../../../extensions/filters/listener/*/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/filter/network/network.rst",
    "content": "Network filters\n===============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  ../../../extensions/filters/network/*/v3*/*\n"
  },
  {
    "path": "docs/root/api-v3/config/filter/thrift/thrift.rst",
    "content": "Thrift filters\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  router/v2alpha1/*\n  ../../../extensions/filters/network/thrift_proxy/**/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/filter/udp/udp.rst",
    "content": "UDP listener filters\n====================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../../extensions/filters/udp/*/v3/*\n  ../../../extensions/filters/udp/*/v3alpha/*\n"
  },
  {
    "path": "docs/root/api-v3/config/grpc_credential/grpc_credential.rst",
    "content": "Grpc Credentials\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/health_checker/health_checker.rst",
    "content": "Health checkers\n===============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2/*"
  },
  {
    "path": "docs/root/api-v3/config/internal_redirect/internal_redirect.rst",
    "content": "Internal Redirect Predicates\n============================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../extensions/internal_redirect/**\n"
  },
  {
    "path": "docs/root/api-v3/config/rbac/rbac.rst",
    "content": "RBAC\n====\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/resource_monitor/resource_monitor.rst",
    "content": ".. _v3_config_resource_monitors:\n\nResource monitors\n=================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */v2alpha/*\n"
  },
  {
    "path": "docs/root/api-v3/config/retry/retry.rst",
    "content": "Retry Predicates\n================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  */empty/*\n  */v2/*\n  ../../extensions/retry/**/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/trace/trace.rst",
    "content": "HTTP Tracers\n==============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/transport_socket/transport_socket.rst",
    "content": "Transport sockets\n=================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../extensions/transport_sockets/*/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/upstream/upstream.rst",
    "content": "Upstream Configuration\n======================\n\n.. toctree::\n  :glob:\n  :maxdepth: 3\n\n  ../../extensions/upstreams/http/*/v3/**\n"
  },
  {
    "path": "docs/root/api-v3/config/wasm/wasm.rst",
    "content": "WASM\n====\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../extensions/wasm/v3/*\n  ../../extensions/stat_sinks/wasm/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/config/watchdog/watchdog.rst",
    "content": "Watchdog\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../extensions/watchdog/profile_action/v3alpha/*\n  ../../extensions/watchdog/abort_action/v3alpha/*\n"
  },
  {
    "path": "docs/root/api-v3/data/accesslog/accesslog.rst",
    "content": "Access logs\n===========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*\n"
  },
  {
    "path": "docs/root/api-v3/data/cluster/cluster.rst",
    "content": "Cluster data\n============\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/outlier_detection_event.proto\n"
  },
  {
    "path": "docs/root/api-v3/data/core/core.rst",
    "content": "Core data\n=========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/health_check_event.proto\n"
  },
  {
    "path": "docs/root/api-v3/data/data.rst",
    "content": "Envoy data\n==========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  accesslog/accesslog\n  cluster/cluster\n  core/core\n  dns/dns\n  tap/tap\n"
  },
  {
    "path": "docs/root/api-v3/data/dns/dns.rst",
    "content": "Extensions objects\n==================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*\n"
  },
  {
    "path": "docs/root/api-v3/data/tap/tap.rst",
    "content": "Tap\n===\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  v3/*\n"
  },
  {
    "path": "docs/root/api-v3/http_routes/http_routes.rst",
    "content": "HTTP route management\n=====================\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../config/route/v3/route.proto\n  ../config/route/v3/scoped_route.proto\n  ../config/route/v3/route_components.proto\n"
  },
  {
    "path": "docs/root/api-v3/listeners/listeners.rst",
    "content": "Listeners\n=========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../config/listener/v3/listener.proto\n  ../config/listener/v3/listener_components.proto\n  ../config/listener/v3/api_listener.proto\n  ../config/listener/v3/udp_listener_config.proto\n  ../config/listener/v3/quic_config.proto\n"
  },
  {
    "path": "docs/root/api-v3/service/service.rst",
    "content": "Services\n========\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  accesslog/v3/*\n  load_stats/v3/*\n  auth/v3/*\n  health/v3/*\n  metrics/v3/*\n  ratelimit/v3/*\n  runtime/v3/*\n  status/v3/*\n  tap/v3/*\n  ../config/tap/v3/*\n  trace/v3/*\n  extension/v3/*\n"
  },
  {
    "path": "docs/root/api-v3/types/types.rst",
    "content": "Types\n=====\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../type/v3/hash_policy.proto\n  ../type/v3/http.proto\n  ../type/v3/http_status.proto\n  ../type/v3/percent.proto\n  ../type/v3/range.proto\n  ../type/v3/ratelimit_unit.proto\n  ../type/v3/semantic_version.proto\n  ../type/v3/token_bucket.proto\n  ../type/matcher/v3/metadata.proto\n  ../type/matcher/v3/node.proto\n  ../type/matcher/v3/number.proto\n  ../type/matcher/v3/path.proto\n  ../type/matcher/v3/regex.proto\n  ../type/matcher/v3/string.proto\n  ../type/matcher/v3/struct.proto\n  ../type/matcher/v3/value.proto\n  ../type/metadata/v3/metadata.proto\n  ../type/tracing/v3/custom_tag.proto\n"
  },
  {
    "path": "docs/root/configuration/advanced/advanced.rst",
    "content": "Advanced\n========\n\n.. toctree::\n  :maxdepth: 2\n\n  well_known_dynamic_metadata\n"
  },
  {
    "path": "docs/root/configuration/advanced/well_known_dynamic_metadata.rst",
    "content": ".. _well_known_dynamic_metadata:\n\nWell Known Dynamic Metadata\n===========================\n\nFilters can emit dynamic metadata via the *setDynamicMetadata* routine in the\n:repo:`StreamInfo <include/envoy/stream_info/stream_info.h>` interface on a\n:repo:`Connection <include/envoy/network/connection.h>`. This metadata emitted by a filter can be\nconsumed by other filters and useful features can be built by stacking such filters. For example,\na logging filter can consume dynamic metadata from an RBAC filter to log details about runtime\nshadow rule behavior. Another example is where an RBAC filter permits/restricts MySQL/MongoDB operations\nby looking at the operational metadata emitted by the MongoDB filter.\n\nThe following Envoy filters emit dynamic metadata that other filters can leverage.\n\n* :ref:`External Authorization Filter <config_http_filters_ext_authz_dynamic_metadata>`\n* :ref:`External Authorization Network Filter <config_network_filters_ext_authz_dynamic_metadata>`\n* :ref:`Mongo Proxy Filter <config_network_filters_mongo_proxy_dynamic_metadata>`\n* :ref:`MySQL Proxy Filter <config_network_filters_mysql_proxy_dynamic_metadata>`\n* :ref:`Postgres Proxy Filter <config_network_filters_postgres_proxy_dynamic_metadata>`\n* :ref:`Role Based Access Control (RBAC) Filter <config_http_filters_rbac_dynamic_metadata>`\n* :ref:`Role Based Access Control (RBAC) Network Filter <config_network_filters_rbac_dynamic_metadata>`\n* :ref:`ZooKeeper Proxy Filter <config_network_filters_zookeeper_proxy_dynamic_metadata>`\n\nThe following Envoy filters can be configured to consume dynamic metadata emitted by other filters.\n\n* :ref:`External Authorization Filter via the metadata context namespaces\n  <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.metadata_context_namespaces>`\n* :ref:`RateLimit Filter limit override <config_http_filters_rate_limit_override_dynamic_metadata>`\n\n.. _shared_dynamic_metadata:\n\nShared Dynamic Metadata\n-----------------------\nDynamic metadata that is set by multiple filters is placed in the common key namespace `envoy.common`. Refer to the corresponding rules when setting this metadata.\n\n.. csv-table::\n  :header: Name, Type, Description, Rules\n  :widths: 1, 1, 3, 3\n\n  access_log_hint, boolean, Whether access loggers should log the request., \"When this metadata is already set: A `true` value should not be overwritten by a `false` value, while a `false` value can be overwritten by a `true` value.\"\n\nThe following Envoy filters emit shared dynamic metadata.\n\n* :ref:`Role Based Access Control (RBAC) Filter <config_http_filters_rbac_dynamic_metadata>`\n* :ref:`Role Based Access Control (RBAC) Network Filter <config_network_filters_rbac_dynamic_metadata>`\n\nThe following filters consume shared dynamic metadata.\n\n* :ref:`Metadata Access Log Filter<envoy_v3_api_msg_config.accesslog.v3.MetadataFilter>`\n"
  },
  {
    "path": "docs/root/configuration/best_practices/_include/edge.yaml",
    "content": "overload_manager:\n  refresh_interval: 0.25s\n  resource_monitors:\n  - name: \"envoy.resource_monitors.fixed_heap\"\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig\n      # TODO: Tune for your system.\n      max_heap_size_bytes: 2147483648 # 2 GiB\n  actions:\n  - name: \"envoy.overload_actions.shrink_heap\"\n    triggers:\n    - name: \"envoy.resource_monitors.fixed_heap\"\n      threshold:\n        value: 0.95\n  - name: \"envoy.overload_actions.stop_accepting_requests\"\n    triggers:\n    - name: \"envoy.resource_monitors.fixed_heap\"\n      threshold:\n        value: 0.98\n\nadmin:\n  access_log_path: \"/var/log/envoy_admin.log\"\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 9090\n\nstatic_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 443\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    per_connection_buffer_limit_bytes: 32768 # 32 KiB\n    filter_chains:\n    - filter_chain_match:\n        server_names: [\"example.com\", \"www.example.com\"]\n      transport_socket:\n        name: envoy.transport_sockets.tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n            - certificate_chain: { filename: \"certs/servercert.pem\" }\n              private_key: { filename: \"certs/serverkey.pem\" }\n      # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol.\n      # use_proxy_proto: true\n      filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: ingress_http\n          use_remote_address: true\n          common_http_protocol_options:\n            idle_timeout: 3600s # 1 hour\n            headers_with_underscores_action: REJECT_REQUEST\n          http2_protocol_options:\n            max_concurrent_streams: 100\n            initial_stream_window_size: 65536 # 64 KiB\n            initial_connection_window_size: 1048576 # 1 MiB\n          stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests\n          request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests\n          route_config:\n            virtual_hosts:\n            - name: default\n              domains: \"*\"\n              routes:\n              - match: { prefix: \"/\" }\n                route:\n                  cluster: service_foo\n                  idle_timeout: 15s # must be disabled for long-lived and streaming requests\n  clusters:\n    name: service_foo\n    connect_timeout: 15s\n    per_connection_buffer_limit_bytes: 32768 # 32 KiB\n    load_assignment:\n      cluster_name: some_service\n      endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 8080\n    http2_protocol_options:\n      initial_stream_window_size: 65536 # 64 KiB\n      initial_connection_window_size: 1048576 # 1 MiB\n\nlayered_runtime:\n  layers:\n    - name: static_layer_0\n      static_layer:\n        envoy:\n          resource_limits:\n            listener:\n              example_listener_name:\n                connection_limit: 10000\n        overload:\n          global_downstream_max_connections: 50000\n"
  },
  {
    "path": "docs/root/configuration/best_practices/best_practices.rst",
    "content": "Configuration best practices\n============================\n\n.. toctree::\n  :maxdepth: 2\n\n  edge\n  level_two\n\n"
  },
  {
    "path": "docs/root/configuration/best_practices/edge.rst",
    "content": ".. _best_practices_edge:\n\nConfiguring Envoy as an edge proxy\n==================================\n\nEnvoy is a production-ready edge proxy, however, the default settings are tailored\nfor the service mesh use case, and some values need to be adjusted when using Envoy\nas an edge proxy.\n\nTCP proxies should configure:\n\n* restrict access to the admin endpoint,\n* :ref:`overload_manager <config_overload_manager>`,\n* :ref:`listener buffer limits <envoy_v3_api_field_config.listener.v3.Listener.per_connection_buffer_limit_bytes>` to 32 KiB,\n* :ref:`cluster buffer limits <envoy_v3_api_field_config.cluster.v3.Cluster.per_connection_buffer_limit_bytes>` to 32 KiB.\n\nHTTP proxies should additionally configure:\n\n* :ref:`use_remote_address <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>`\n  to true (to avoid consuming HTTP headers from external clients, see :ref:`HTTP header sanitizing <config_http_conn_man_header_sanitizing>`\n  for details),\n* :ref:`connection and stream timeouts <faq_configuration_timeouts>`,\n* :ref:`HTTP/2 maximum concurrent streams limit <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_concurrent_streams>` to 100,\n* :ref:`HTTP/2 initial stream window size limit <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.initial_stream_window_size>` to 64 KiB,\n* :ref:`HTTP/2 initial connection window size limit <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.initial_connection_window_size>` to 1 MiB.\n* :ref:`headers_with_underscores_action setting <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.headers_with_underscores_action>` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable.\n* :ref:`Listener connection limits. <config_listeners_runtime>`\n* :ref:`Global downstream connection limits <config_overload_manager>`.\n\nThe following is a YAML example of the above recommendation (taken from the :ref:`Google VRP\n<arch_overview_google_vrp>` edge server configuration):\n\n.. literalinclude:: _include/edge.yaml\n    :language: yaml\n"
  },
  {
    "path": "docs/root/configuration/best_practices/level_two.rst",
    "content": ".. _best_practices_level2:\n\nConfiguring Envoy as a level two proxy\n======================================\n\nEnvoy is a production-ready proxy, however, the default settings that are tailored for the\nedge use case may need to be adjusted when using Envoy in a multi-level deployment as a\n\"level two\" proxy.\n\n.. image:: /_static/multilevel_deployment.svg\n\n**In summary, if you run level two Envoy version 1.11.1 or greater which terminates \nHTTP/2, we strongly advise you to change the HttpConnectionManager configuration of your level\ntwo Envoy, by setting its downstream**\n:ref:`validation of HTTP messaging option <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n**to true.**\n\nIf there is an invalid HTTP/2 request and this option is not set, the Envoy in \nquestion will reset the entire connection. This behavior was changed as part of \nthe 1.11.1 security release, to increase the security of Edge Envoys. Unfortunately, \nbecause there are no guarantees that edge proxies will enforce HTTP/1 or HTTP/2 \nstandards compliance as rigorously as Envoy’s HTTP/2 stack does, this can result \nin a problem as follows. If one client sends a request that for example passes \nlevel one proxy's validation checks, and it is forwarded over an upstream multiplexed \nHTTP/2 connection (potentially shared with other clients) the strict enforcement on \nthe level two Envoy HTTP/2 will reset all the streams on that connection, causing \na service disruption to the clients sharing that L1-L2 connection. If a malicious \nuser has insight into what traffic will bypass level one checks, they could spray\n“bad” traffic across the level one fleet, causing serious disruption to other users’ \ntraffic.\n\nThis configuration option also has implications for invalid HTTP/1.1 though slightly less\nsevere ones. For Envoy L1s, invalid HTTP/1 requests will also result in connection\nreset. If the option is set to true, and the request is completely read, the connection\nwill persist and can be reused for a subsequent request.\n"
  },
  {
    "path": "docs/root/configuration/configuration.rst",
    "content": ".. _config:\n\nConfiguration reference\n=======================\n\n.. toctree::\n  :maxdepth: 2\n\n  overview/overview\n  listeners/listeners\n  http/http\n  upstream/upstream\n  observability/observability\n  security/security\n  operations/operations\n  other_features/other_features\n  other_protocols/other_protocols\n  advanced/advanced\n  best_practices/best_practices\n"
  },
  {
    "path": "docs/root/configuration/http/http.rst",
    "content": "HTTP\n====\n\n.. toctree::\n  :maxdepth: 2\n\n  http_conn_man/http_conn_man\n  http_filters/http_filters\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/header_casing.rst",
    "content": "HTTP/1.1 Header Casing\n======================\n\nWhen handling HTTP/1.1, Envoy will normalize the header keys to be all lowercase. While this is\ncompliant with the HTTP/1.1 spec, in practice this can result in issues when migrating\nexisting systems that might rely on specific header casing.\n\nTo support these use cases, Envoy allows configuring a formatting scheme for the headers, which\nwill have Envoy transform the header keys during serialization. To configure this formatting on\nresponse headers, specify the format in the :ref:`http_protocol_options <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_protocol_options>`.\nTo configure this for upstream request headers, specify the formatting on the :ref:`Cluster <envoy_v3_api_field_config.cluster.v3.Cluster.http_protocol_options>`.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/header_sanitizing.rst",
    "content": ".. _config_http_conn_man_header_sanitizing:\n\nHTTP header sanitizing\n======================\n\nFor security reasons, Envoy will \"sanitize\" various incoming HTTP headers depending on whether the\nrequest is an internal or external request. The sanitizing action depends on the header and may\nresult in addition, removal, or modification. Ultimately, whether the request is considered internal\nor external is governed by the :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`\nheader (please read the linked section carefully as how Envoy populates the header is complex and depends on the\n:ref:`use_remote_address\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>`\nsetting). In addition, the\n:ref:`internal_address_config\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.internal_address_config>`\nsetting can be used to configure the internal/external determination.\n\nEnvoy will potentially sanitize the following headers:\n\n* :ref:`x-envoy-decorator-operation <config_http_filters_router_x-envoy-decorator-operation>`\n* :ref:`x-envoy-downstream-service-cluster\n  <config_http_conn_man_headers_downstream-service-cluster>`\n* :ref:`x-envoy-downstream-service-node <config_http_conn_man_headers_downstream-service-node>`\n* :ref:`x-envoy-expected-rq-timeout-ms <config_http_filters_router_x-envoy-expected-rq-timeout-ms>`\n* :ref:`x-envoy-external-address <config_http_conn_man_headers_x-envoy-external-address>`\n* :ref:`x-envoy-force-trace <config_http_conn_man_headers_x-envoy-force-trace>`\n* :ref:`x-envoy-internal <config_http_conn_man_headers_x-envoy-internal>`\n* :ref:`x-envoy-ip-tags <config_http_filters_ip_tagging>`\n* :ref:`x-envoy-max-retries <config_http_filters_router_x-envoy-max-retries>`\n* :ref:`x-envoy-retry-grpc-on <config_http_filters_router_x-envoy-retry-grpc-on>`\n* :ref:`x-envoy-retry-on <config_http_filters_router_x-envoy-retry-on>`\n* :ref:`x-envoy-upstream-alt-stat-name <config_http_filters_router_x-envoy-upstream-alt-stat-name>`\n* :ref:`x-envoy-upstream-rq-per-try-timeout-ms\n  <config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms>`\n* :ref:`x-envoy-upstream-rq-timeout-alt-response\n  <config_http_filters_router_x-envoy-upstream-rq-timeout-alt-response>`\n* :ref:`x-envoy-upstream-rq-timeout-ms <config_http_filters_router_x-envoy-upstream-rq-timeout-ms>`\n* :ref:`x-forwarded-client-cert <config_http_conn_man_headers_x-forwarded-client-cert>`\n* :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`\n* :ref:`x-forwarded-proto <config_http_conn_man_headers_x-forwarded-proto>`\n* :ref:`x-request-id <config_http_conn_man_headers_x-request-id>`\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/headers.rst",
    "content": ".. _config_http_conn_man_headers:\n\nHTTP header manipulation\n========================\n\nThe HTTP connection manager manipulates several HTTP headers both during decoding (when the request\nis being received) as well as during encoding (when the response is being sent).\n\n.. contents::\n  :local:\n\n.. _config_http_conn_man_headers_user-agent:\n\nuser-agent\n----------\n\nThe *user-agent* header may be set by the connection manager during decoding if the :ref:`add_user_agent\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent>` option is\nenabled. The header is only modified if it is not already set. If the connection manager does set the header, the value\nis determined by the :option:`--service-cluster` command line option.\n\n.. _config_http_conn_man_headers_server:\n\nserver\n------\n\nThe *server* header will be set during encoding to the value in the :ref:`server_name\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.server_name>` option.\n\n.. _config_http_conn_man_headers_x-client-trace-id:\n\nx-client-trace-id\n-----------------\n\nIf an external client sets this header, Envoy will join the provided trace ID with the internally\ngenerated :ref:`config_http_conn_man_headers_x-request-id`. x-client-trace-id needs to be globally\nunique and generating a uuid4 is recommended. If this header is set, it has similar effect to\n:ref:`config_http_conn_man_headers_x-envoy-force-trace`. See the :ref:`tracing.client_enabled\n<config_http_conn_man_runtime_client_enabled>` runtime configuration setting.\n\n.. _config_http_conn_man_headers_downstream-service-cluster:\n\nx-envoy-downstream-service-cluster\n----------------------------------\n\nInternal services often want to know which service is calling them. This header is cleaned from\nexternal requests, but for internal requests will contain the service cluster of the caller. Note\nthat in the current implementation, this should be considered a hint as it is set by the caller and\ncould be easily spoofed by any internal entity. In the future Envoy will support a mutual\nauthentication TLS mesh which will make this header fully secure. Like *user-agent*, the value\nis determined by the :option:`--service-cluster` command line option. In order to enable this\nfeature you need to set the :ref:`user_agent <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent>` option to true.\n\n.. _config_http_conn_man_headers_downstream-service-node:\n\nx-envoy-downstream-service-node\n-------------------------------\n\nInternal services may want to know the downstream node request comes from. This header\nis quite similar to :ref:`config_http_conn_man_headers_downstream-service-cluster`, except the value is taken from\nthe  :option:`--service-node` option.\n\n.. _config_http_conn_man_headers_x-envoy-external-address:\n\nx-envoy-external-address\n------------------------\n\nIt is a common case where a service wants to perform analytics based on the origin client's IP\naddress. Per the lengthy discussion on :ref:`XFF <config_http_conn_man_headers_x-forwarded-for>`,\nthis can get quite complicated, so Envoy simplifies this by setting *x-envoy-external-address*\nto the :ref:`trusted client address <config_http_conn_man_headers_x-forwarded-for_trusted_client_address>`\nif the request is from an external client. *x-envoy-external-address* is not set or overwritten\nfor internal requests. This header can be safely forwarded between internal services for analytics\npurposes without having to deal with the complexities of XFF.\n\n.. _config_http_conn_man_headers_x-envoy-force-trace:\n\nx-envoy-force-trace\n-------------------\n\nIf an internal request sets this header, Envoy will modify the generated\n:ref:`config_http_conn_man_headers_x-request-id` such that it forces traces to be collected.\nThis also forces :ref:`config_http_conn_man_headers_x-request-id` to be returned in the response\nheaders. If this request ID is then propagated to other hosts, traces will also be collected on\nthose hosts which will provide a consistent trace for an entire request flow. See the\n:ref:`tracing.global_enabled <config_http_conn_man_runtime_global_enabled>` and\n:ref:`tracing.random_sampling <config_http_conn_man_runtime_random_sampling>` runtime\nconfiguration settings.\n\n.. _config_http_conn_man_headers_x-envoy-internal:\n\nx-envoy-internal\n----------------\n\nIt is a common case where a service wants to know whether a request is internal origin or not. Envoy\nuses :ref:`XFF <config_http_conn_man_headers_x-forwarded-for>` to determine this and then will set\nthe header value to *true*.\n\nThis is a convenience to avoid having to parse and understand XFF.\n\n.. _config_http_conn_man_headers_x-envoy-original-dst-host:\n\nx-envoy-original-dst-host\n-------------------------\n\nThe header used to override destination address when using the\n:ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\nload balancing policy.\n\nIt is ignored, unless the use of it is enabled via\n:ref:`use_http_header <envoy_v3_api_field_config.cluster.v3.Cluster.OriginalDstLbConfig.use_http_header>`.\n\n.. _config_http_conn_man_headers_x-forwarded-client-cert:\n\nx-forwarded-client-cert\n-----------------------\n\n*x-forwarded-client-cert* (XFCC) is a proxy header which indicates certificate information of part\nor all of the clients or proxies that a request has flowed through, on its way from the client to the\nserver. A proxy may choose to sanitize/append/forward the XFCC header before proxying the request.\n\nThe XFCC header value is a comma (\",\") separated string. Each substring is an XFCC element, which\nholds information added by a single proxy. A proxy can append the current client certificate\ninformation as an XFCC element, to the end of the request's XFCC header after a comma.\n\nEach XFCC element is a semicolon \";\" separated string. Each substring is a key-value pair, grouped\ntogether by an equals (\"=\") sign. The keys are case-insensitive, the values are case-sensitive. If\n\",\", \";\" or \"=\" appear in a value, the value should be double-quoted. Double-quotes in the value\nshould be replaced by backslash-double-quote (\\\").\n\nThe following keys are supported:\n\n1. ``By`` The Subject Alternative Name (URI type) of the current proxy's certificate.\n2. ``Hash`` The SHA 256 digest of the current client certificate.\n3. ``Cert`` The entire client certificate in URL encoded PEM format.\n4. ``Chain`` The entire client certificate chain (including the leaf certificate) in URL encoded PEM format.\n5. ``Subject`` The Subject field of the current client certificate. The value is always double-quoted.\n6. ``URI`` The URI type Subject Alternative Name field of the current client certificate.\n7. ``DNS`` The DNS type Subject Alternative Name field of the current client certificate. A client certificate may contain multiple DNS type Subject Alternative Names, each will be a separate key-value pair.\n\nA client certificate may contain multiple Subject Alternative Name types. For details on different Subject Alternative Name types, please refer `RFC 2459`_.\n\n.. _RFC 2459: https://tools.ietf.org/html/rfc2459#section-4.2.1.7\n\nSome examples of the XFCC header are:\n\n1. For one client certificate with only URI type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject=\"/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client\";URI=http://testclient.lyft.com``\n2. For two client certificates with only URI type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;URI=http://testclient.lyft.com,By=http://backend.lyft.com;Hash=9ba61d6425303443c0748a02dd8de688468ed33be74eee6556d90c0149c1309e;URI=http://frontend.lyft.com``\n3. For one client certificate with both URI type and DNS type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject=\"/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client\";URI=http://testclient.lyft.com;DNS=lyft.com;DNS=www.lyft.com``\n\nHow Envoy processes XFCC is specified by the\n:ref:`forward_client_cert_details<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.forward_client_cert_details>`\nand the\n:ref:`set_current_client_cert_details<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.set_current_client_cert_details>`\nHTTP connection manager options. If *forward_client_cert_details* is unset, the XFCC header will be sanitized by\ndefault.\n\n.. _config_http_conn_man_headers_x-forwarded-for:\n\nx-forwarded-for\n---------------\n\n*x-forwarded-for* (XFF) is a standard proxy header which indicates the IP addresses that a request has\nflowed through on its way from the client to the server. A compliant proxy will *append* the IP\naddress of the nearest client to the XFF list before proxying the request. Some examples of XFF are:\n\n1. ``x-forwarded-for: 50.0.0.1`` (single client)\n2. ``x-forwarded-for: 50.0.0.1, 40.0.0.1`` (external proxy hop)\n3. ``x-forwarded-for: 50.0.0.1, 10.0.0.1`` (internal proxy hop)\n\nEnvoy will only append to XFF if the :ref:`use_remote_address\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>`\nHTTP connection manager option is set to true and the :ref:`skip_xff_append\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.skip_xff_append>`\nis set false. This means that if *use_remote_address* is false (which is the default) or\n*skip_xff_append* is true, the connection manager operates in a transparent mode where it does not\nmodify XFF.\n\n.. attention::\n\n  In general, *use_remote_address* should be set to true when Envoy is deployed as an edge\n  node (aka a front proxy), whereas it may need to be set to false when Envoy is used as\n  an internal service node in a mesh deployment.\n\n.. _config_http_conn_man_headers_x-forwarded-for_trusted_client_address:\n\nThe value of *use_remote_address* controls how Envoy determines the *trusted client address*.\nGiven an HTTP request that has traveled through a series of zero or more proxies to reach\nEnvoy, the trusted client address is the earliest source IP address that is known to be\naccurate. The source IP address of the immediate downstream node's connection to Envoy is\ntrusted. XFF *sometimes* can be trusted. Malicious clients can forge XFF, but the last\naddress in XFF can be trusted if it was put there by a trusted proxy.\n\nEnvoy's default rules for determining the trusted client address (*before* appending anything\nto XFF) are:\n\n* If *use_remote_address* is false and an XFF containing at least one IP address is\n  present in the request, the trusted client address is the *last* (rightmost) IP address in XFF.\n* Otherwise, the trusted client address is the source IP address of the immediate downstream\n  node's connection to Envoy.\n\nIn an environment where there are one or more trusted proxies in front of an edge\nEnvoy instance, the *xff_num_trusted_hops* configuration option can be used to trust\nadditional addresses from XFF:\n\n* If *use_remote_address* is false and *xff_num_trusted_hops* is set to a value *N* that is\n  greater than zero, the trusted client address is the (N+1)th address from the right end\n  of XFF. (If the XFF contains fewer than N+1 addresses, Envoy falls back to using the\n  immediate downstream connection's source address as trusted client address.)\n* If *use_remote_address* is true and *xff_num_trusted_hops* is set to a value *N* that is\n  greater than zero, the trusted client address is the Nth address from the right end\n  of XFF. (If the XFF contains fewer than N addresses, Envoy falls back to using the\n  immediate downstream connection's source address as trusted client address.)\n\nEnvoy uses the trusted client address contents to determine whether a request originated\nexternally or internally. This influences whether the\n:ref:`config_http_conn_man_headers_x-envoy-internal` header is set.\n\nExample 1: Envoy as edge proxy, without a trusted proxy in front of it\n    Settings:\n      | use_remote_address = true\n      | xff_num_trusted_hops = 0\n\n    Request details:\n      | Downstream IP address = 192.0.2.5\n      | XFF = \"203.0.113.128, 203.0.113.10, 203.0.113.1\"\n\n    Result:\n      | Trusted client address = 192.0.2.5 (XFF is ignored)\n      | X-Envoy-External-Address is set to 192.0.2.5\n      | XFF is changed to \"203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5\"\n      | X-Envoy-Internal is removed (if it was present in the incoming request)\n\nExample 2: Envoy as internal proxy, with the Envoy edge proxy from Example 1 in front of it\n    Settings:\n      | use_remote_address = false\n      | xff_num_trusted_hops = 0\n\n    Request details:\n      | Downstream IP address = 10.11.12.13 (address of the Envoy edge proxy)\n      | XFF = \"203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5\"\n\n    Result:\n      | Trusted client address = 192.0.2.5 (last address in XFF is trusted)\n      | X-Envoy-External-Address is not modified\n      | X-Envoy-Internal is removed (if it was present in the incoming request)\n\nExample 3: Envoy as edge proxy, with two trusted external proxies in front of it\n    Settings:\n      | use_remote_address = true\n      | xff_num_trusted_hops = 2\n\n    Request details:\n      | Downstream IP address = 192.0.2.5\n      | XFF = \"203.0.113.128, 203.0.113.10, 203.0.113.1\"\n\n    Result:\n      | Trusted client address = 203.0.113.10 (2nd to last address in XFF is trusted)\n      | X-Envoy-External-Address is set to 203.0.113.10\n      | XFF is changed to \"203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5\"\n      | X-Envoy-Internal is removed (if it was present in the incoming request)\n\nExample 4: Envoy as internal proxy, with the edge proxy from Example 3 in front of it\n    Settings:\n      | use_remote_address = false\n      | xff_num_trusted_hops = 2\n\n    Request details:\n      | Downstream IP address = 10.11.12.13 (address of the Envoy edge proxy)\n      | XFF = \"203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5\"\n\n    Result:\n      | Trusted client address = 203.0.113.10\n      | X-Envoy-External-Address is not modified\n      | X-Envoy-Internal is removed (if it was present in the incoming request)\n\nExample 5: Envoy as an internal proxy, receiving a request from an internal client\n    Settings:\n      | use_remote_address = false\n      | xff_num_trusted_hops = 0\n\n    Request details:\n      | Downstream IP address = 10.20.30.40 (address of the internal client)\n      | XFF is not present\n\n    Result:\n      | Trusted client address = 10.20.30.40\n      | X-Envoy-External-Address remains unset\n      | X-Envoy-Internal is set to \"false\"\n\nExample 6: The internal Envoy from Example 5, receiving a request proxied by another Envoy\n    Settings:\n      | use_remote_address = false\n      | xff_num_trusted_hops = 0\n\n    Request details:\n      | Downstream IP address = 10.20.30.50 (address of the Envoy instance proxying to this one)\n      | XFF = \"10.20.30.40\"\n\n    Result:\n      | Trusted client address = 10.20.30.40\n      | X-Envoy-External-Address remains unset\n      | X-Envoy-Internal is set to \"true\"\n\nA few very important notes about XFF:\n\n1. If *use_remote_address* is set to true, Envoy sets the\n   :ref:`config_http_conn_man_headers_x-envoy-external-address` header to the trusted\n   client address.\n\n.. _config_http_conn_man_headers_x-forwarded-for_internal_origin:\n\n2. XFF is what Envoy uses to determine whether a request is internal origin or external origin.\n   If *use_remote_address* is set to true, the request is internal if and only if the\n   request contains no XFF and the immediate downstream node's connection to Envoy has\n   an internal (RFC1918 or RFC4193) source address. If *use_remote_address* is false, the\n   request is internal if and only if XFF contains a single RFC1918 or RFC4193 address.\n\n   * **NOTE**: If an internal service proxies an external request to another internal service, and\n     includes the original XFF header, Envoy will append to it on egress if\n     :ref:`use_remote_address <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>` is set. This will cause\n     the other side to think the request is external. Generally, this is what is intended if XFF is\n     being forwarded. If it is not intended, do not forward XFF, and forward\n     :ref:`config_http_conn_man_headers_x-envoy-internal` instead.\n   * **NOTE**: If an internal service call is forwarded to another internal service (preserving XFF),\n     Envoy will not consider it internal. This is a known \"bug\" due to the simplification of how\n     XFF is parsed to determine if a request is internal. In this scenario, do not forward XFF and\n     allow Envoy to generate a new one with a single internal origin IP.\n\n.. _config_http_conn_man_headers_x-forwarded-proto:\n\nx-forwarded-proto\n-----------------\n\nIt is a common case where a service wants to know what the originating protocol (HTTP or HTTPS) was\nof the connection terminated by front/edge Envoy. *x-forwarded-proto* contains this information. It\nwill be set to either *http* or *https*.\n\n.. _config_http_conn_man_headers_x-request-id:\n\nx-request-id\n------------\n\nThe *x-request-id* header is used by Envoy to uniquely identify a request as well as perform stable\naccess logging and tracing. Envoy will generate an *x-request-id* header for all external origin\nrequests (the header is sanitized). It will also generate an *x-request-id* header for internal\nrequests that do not already have one. This means that *x-request-id* can and should be propagated\nbetween client applications in order to have stable IDs across the entire mesh. Due to the out of\nprocess architecture of Envoy, the header can not be automatically forwarded by Envoy itself. This\nis one of the few areas where a thin client library is needed to perform this duty. How that is done\nis out of scope for this documentation. If *x-request-id* is propagated across all hosts, the\nfollowing features are available:\n\n* Stable :ref:`access logging <config_access_log>` via the\n  :ref:`v3 API runtime filter<envoy_v3_api_field_config.accesslog.v3.AccessLogFilter.runtime_filter>`.\n* Stable tracing when performing random sampling via the :ref:`tracing.random_sampling\n  <config_http_conn_man_runtime_random_sampling>` runtime setting or via forced tracing using the\n  :ref:`config_http_conn_man_headers_x-envoy-force-trace` and\n  :ref:`config_http_conn_man_headers_x-client-trace-id` headers.\n\n.. _config_http_conn_man_headers_x-ot-span-context:\n\nx-ot-span-context\n-----------------\n\nThe *x-ot-span-context* HTTP header is used by Envoy to establish proper parent-child relationships\nbetween tracing spans when used with the LightStep tracer.\nFor example, an egress span is a child of an ingress\nspan (if the ingress span was present). Envoy injects the *x-ot-span-context* header on ingress requests and\nforwards it to the local service. Envoy relies on the application to propagate *x-ot-span-context* on\nthe egress call to an upstream. See more on tracing :ref:`here <arch_overview_tracing>`.\n\n.. _config_http_conn_man_headers_x-b3-traceid:\n\nx-b3-traceid\n------------\n\nThe *x-b3-traceid* HTTP header is used by the Zipkin tracer in Envoy.\nThe TraceId is 64-bit in length and indicates the overall ID of the\ntrace. Every span in a trace shares this ID. See more on zipkin tracing\n`here <https://github.com/openzipkin/b3-propagation>`__.\n\n.. _config_http_conn_man_headers_x-b3-spanid:\n\nx-b3-spanid\n-----------\n\nThe *x-b3-spanid* HTTP header is used by the Zipkin tracer in Envoy.\nThe SpanId is 64-bit in length and indicates the position of the current\noperation in the trace tree. The value should not be interpreted: it may or\nmay not be derived from the value of the TraceId. See more on zipkin tracing\n`here <https://github.com/openzipkin/b3-propagation>`__.\n\n.. _config_http_conn_man_headers_x-b3-parentspanid:\n\nx-b3-parentspanid\n-----------------\n\nThe *x-b3-parentspanid* HTTP header is used by the Zipkin tracer in Envoy.\nThe ParentSpanId is 64-bit in length and indicates the position of the\nparent operation in the trace tree. When the span is the root of the trace\ntree, the ParentSpanId is absent. See more on zipkin tracing\n`here <https://github.com/openzipkin/b3-propagation>`__.\n\n.. _config_http_conn_man_headers_x-b3-sampled:\n\nx-b3-sampled\n------------\n\nThe *x-b3-sampled* HTTP header is used by the Zipkin tracer in Envoy.\nWhen the Sampled flag is either not specified or set to 1, the span will be reported to the tracing\nsystem. Once Sampled is set to 0 or 1, the same\nvalue should be consistently sent downstream. See more on zipkin tracing\n`here <https://github.com/openzipkin/b3-propagation>`__.\n\n.. _config_http_conn_man_headers_x-b3-flags:\n\nx-b3-flags\n----------\n\nThe *x-b3-flags* HTTP header is used by the Zipkin tracer in Envoy.\nThe encode one or more options. For example, Debug is encoded as\n``X-B3-Flags: 1``. See more on zipkin tracing\n`here <https://github.com/openzipkin/b3-propagation>`__.\n\n.. _config_http_conn_man_headers_b3:\n\nb3\n----------\n\nThe *b3* HTTP header is used by the Zipkin tracer in Envoy.\nIs a more compressed header format. See more on zipkin tracing\n`here <https://github.com/openzipkin/b3-propagation#single-header>`__.\n\n.. _config_http_conn_man_headers_x-datadog-trace-id:\n\nx-datadog-trace-id\n------------------\n\nThe *x-datadog-trace-id* HTTP header is used by the Datadog tracer in Envoy.\nThe 64-bit value represents the ID of the overall trace, and is used to correlate\nthe spans.\n\n.. _config_http_conn_man_headers_x-datadog-parent-id:\n\nx-datadog-parent-id\n-------------------\n\nThe *x-datadog-parent-id* HTTP header is used by the Datadog tracer in Envoy.\nThe 64-bit value uniquely identifies the span within the trace, and is used to\ncreate parent-child relationships between spans.\n\n.. _config_http_conn_man_headers_x-datadog-sampling-priority:\n\nx-datadog-sampling-priority\n---------------------------\n\nThe *x-datadog-sampling-priority* HTTP header is used by the Datadog tracer in Envoy.\nThe integer value indicates the sampling decision that has been made for this trace.\nA value of 0 indicates that the trace should not be collected, and a value of 1\nrequests that spans are sampled and reported.\n\n.. _config_http_conn_man_headers_custom_request_headers:\n\nCustom request/response headers\n-------------------------------\n\nCustom request/response headers can be added to a request/response at the weighted cluster,\nroute, virtual host, and/or global route configuration level. See the\n:ref:`v3 <envoy_v3_api_msg_config.route.v3.RouteConfiguration>` API documentation.\n\nNo *:-prefixed* pseudo-header may be modified via this mechanism. The *:path*\nand *:authority* headers may instead be modified via mechanisms such as\n:ref:`prefix_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.prefix_rewrite>`,\n:ref:`regex_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.regex_rewrite>`, and\n:ref:`host_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.host_rewrite_literal>`.\n\nHeaders are appended to requests/responses in the following order: weighted cluster level headers,\nroute level headers, virtual host level headers and finally global level headers.\n\nEnvoy supports adding dynamic values to request and response headers. The percent symbol (%) is\nused to delimit variable names.\n\n.. attention::\n\n  If a literal percent symbol (%) is desired in a request/response header, it must be escaped by\n  doubling it. For example, to emit a header with the value ``100%``, the custom header value in\n  the Envoy configuration must be ``100%%``.\n\nSupported variable names are:\n\n%DOWNSTREAM_REMOTE_ADDRESS%\n    Remote address of the downstream connection. If the address is an IP address it includes both\n    address and port.\n\n    .. note::\n\n      This may not be the physical remote address of the peer if the address has been inferred from\n      :ref:`proxy proto <envoy_v3_api_field_config.listener.v3.FilterChain.use_proxy_proto>` or :ref:`x-forwarded-for\n      <config_http_conn_man_headers_x-forwarded-for>`.\n\n%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\n    Same as **%DOWNSTREAM_REMOTE_ADDRESS%** excluding port if the address is an IP address.\n\n%DOWNSTREAM_LOCAL_ADDRESS%\n    Local address of the downstream connection. If the address is an IP address it includes both\n    address and port.\n    If the original connection was redirected by iptables REDIRECT, this represents\n    the original destination address restored by the\n    :ref:`Original Destination Filter <config_listener_filters_original_dst>` using SO_ORIGINAL_DST socket option.\n    If the original connection was redirected by iptables TPROXY, and the listener's transparent\n    option was set to true, this represents the original destination address and port.\n\n%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%\n    Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address.\n\n%DOWNSTREAM_LOCAL_PORT%\n    Similar to **%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%**, but only extracts the port portion of the **%DOWNSTREAM_LOCAL_ADDRESS%**\n\n%DOWNSTREAM_LOCAL_URI_SAN%\n  HTTP\n    The URIs present in the SAN of the local certificate used to establish the downstream TLS connection.\n  TCP\n    The URIs present in the SAN of the local certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_URI_SAN%\n  HTTP\n    The URIs present in the SAN of the peer certificate used to establish the downstream TLS connection.\n  TCP\n    The URIs present in the SAN of the peer certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_LOCAL_SUBJECT%\n  HTTP\n    The subject present in the local certificate used to establish the downstream TLS connection.\n  TCP\n    The subject present in the local certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_SUBJECT%\n  HTTP\n    The subject present in the peer certificate used to establish the downstream TLS connection.\n  TCP\n    The subject present in the peer certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_ISSUER%\n  HTTP\n    The issuer present in the peer certificate used to establish the downstream TLS connection.\n  TCP\n    The issuer present in the peer certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_TLS_SESSION_ID%\n  HTTP\n    The session ID for the established downstream TLS connection.\n  TCP\n    The session ID for the established downstream TLS connection.\n\n%DOWNSTREAM_TLS_CIPHER%\n  HTTP\n    The OpenSSL name for the set of ciphers used to establish the downstream TLS connection.\n  TCP\n    The OpenSSL name for the set of ciphers used to establish the downstream TLS connection.\n\n%DOWNSTREAM_TLS_VERSION%\n  HTTP\n    The TLS version (e.g., ``TLSv1.2``, ``TLSv1.3``) used to establish the downstream TLS connection.\n  TCP\n    The TLS version (e.g., ``TLSv1.2``, ``TLSv1.3``) used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_FINGERPRINT_256%\n  HTTP\n    The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_FINGERPRINT_1%\n  HTTP\n    The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_SERIAL%\n  HTTP\n    The serial number of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The serial number of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_CERT%\n  HTTP\n    The client certificate in the URL-encoded PEM format used to establish the downstream TLS connection.\n  TCP\n    The client certificate in the URL-encoded PEM format used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_CERT_V_START%\n  HTTP\n    The validity start date of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The validity start date of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_CERT_V_END%\n  HTTP\n    The validity end date of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The validity end date of the client certificate used to establish the downstream TLS connection.\n\n%HOSTNAME%\n    The system hostname.\n\n%PROTOCOL%\n    The original protocol which is already added by Envoy as a\n    :ref:`x-forwarded-proto <config_http_conn_man_headers_x-forwarded-proto>` request header.\n\n%UPSTREAM_METADATA([\"namespace\", \"key\", ...])%\n    Populates the header with :ref:`EDS endpoint metadata <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.metadata>` from the\n    upstream host selected by the router. Metadata may be selected from any namespace. In general,\n    metadata values may be strings, numbers, booleans, lists, nested structures, or null. Upstream\n    metadata values may be selected from nested structs by specifying multiple keys. Otherwise,\n    only string, boolean, and numeric values are supported. If the namespace or key(s) are not\n    found, or if the selected value is not a supported type, then no header is emitted. The\n    namespace and key(s) are specified as a JSON array of strings. Finally, percent symbols in the\n    parameters **do not** need to be escaped by doubling them.\n\n    Upstream metadata cannot be added to request headers as the upstream host has not been selected\n    when custom request headers are generated.\n\n%DYNAMIC_METADATA([\"namespace\", \"key\", ...])%\n    Similar to UPSTREAM_METADATA, populates the header with dynamic metadata available in a request\n    (e.g.: added by filters like the header-to-metadata filter).\n\n    This works both on request and response headers.\n\n%UPSTREAM_REMOTE_ADDRESS%\n    Remote address of the upstream host. If the address is an IP address it includes both address\n    and port. The upstream remote address cannot be added to request headers as the upstream host\n    has not been selected when custom request headers are generated.\n\n%PER_REQUEST_STATE(reverse.dns.data.name)%\n    Populates the header with values set on the stream info filterState() object. To be\n    usable in custom request/response headers, these values must be of type\n    Envoy::Router::StringAccessor. These values should be named in standard reverse DNS style,\n    identifying the organization that created the value and ending in a unique name for the data.\n\n%REQ(header-name)%\n    Populates the header with a value of the request header.\n\n%START_TIME%\n    Request start time. START_TIME can be customized with specifiers as specified in\n    :ref:`access log format rules<config_access_log_format_start_time>`.\n\n    An example of setting a custom header with current time in seconds with the milliseconds resolution:\n\n    .. code-block:: none\n\n      route:\n        cluster: www\n      request_headers_to_add:\n        - header:\n            key: \"x-request-start\"\n            value: \"%START_TIME(%s.%3f)%\"\n          append: true\n\n%RESPONSE_FLAGS%\n    Additional details about the response or connection, if any. Possible values and their meanings\n    are listed in the access log formatter :ref:`documentation<config_access_log_format_response_flags>`.\n\n%RESPONSE_CODE_DETAILS%\n    Response code details provides additional information about the HTTP response code, such as\n    who set it (the upstream or envoy) and why.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/http_conn_man.rst",
    "content": ".. _config_http_conn_man:\n\nHTTP connection manager\n=======================\n\n.. toctree::\n  :maxdepth: 2\n\n  overview\n  route_matching\n  traffic_splitting\n  header_casing\n  headers\n  header_sanitizing\n  local_reply\n  response_code_details\n  stats\n  runtime\n  rds\n  vhds\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/local_reply.rst",
    "content": ".. _config_http_conn_man_local_reply:\n\nLocal reply modification\n========================\n\nThe :ref:`HTTP connection manager <arch_overview_http_conn_man>` supports modification of local reply which is response returned by Envoy itself.\n\nFeatures:\n\n* :ref:`Local reply content modification<config_http_conn_man_local_reply_modification>`.\n* :ref:`Local reply format modification<config_http_conn_man_local_reply_format>`.\n\n.. _config_http_conn_man_local_reply_modification:\n\nLocal reply content modification\n--------------------------------\n\nThe local response content returned by Envoy can be customized. A list of :ref:`mappers <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.mappers>` can be specified. Each mapper must have a :ref:`filter <envoy_v3_api_field_config.accesslog.v3.AccessLog.filter>`. It may have following rewrite rules; a :ref:`status_code <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.ResponseMapper.status_code>` rule to rewrite response code, a :ref:`headers_to_add <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.ResponseMapper.headers_to_add>` rule to add/override/append response HTTP headers, a :ref:`body <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.ResponseMapper.body>` rule to rewrite the local reply body and a :ref:`body_format_override <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.ResponseMapper.body_format_override>` to specify the response body format. Envoy checks each `mapper` according to the specified order until the first one is matched. If a `mapper` is matched, all its rewrite rules will apply.\n\nExample of a LocalReplyConfig\n\n.. code-block::\n\n  mappers:\n  - filter:\n      status_code_filter:\n        comparison:\n          op: EQ\n          value:\n            default_value: 400\n            runtime_key: key_b\n    headers_to_add:\n      - header:\n          key: \"foo\"\n          value: \"bar\"\n        append: false\n    status_code: 401\n    body:\n      inline_string: \"not allowed\"\n\nIn above example, if the status_code is 400,  it will be rewritten to 401, the response body will be rewritten to as \"not allowed\".\n\n.. _config_http_conn_man_local_reply_format:\n\nLocal reply format modification\n-------------------------------\n\nThe response body content type can be customized. If not specified, the content type is plain/text. There are two `body_format` fields; one is the :ref:`body_format <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format>` field in the :ref:`LocalReplyConfig <envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.LocalReplyConfig>` message and the other :ref:`body_format_override <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.ResponseMapper.body_format_override>` field in the `mapper`. The latter is only used when its mapper is matched. The former is used if there is no any matched mappers, or the matched mapper doesn't have the `body_format` specified.\n\nLocal reply format can be specified as :ref:`SubstitutionFormatString <envoy_v3_api_msg_config.core.v3.SubstitutionFormatString>`. It supports :ref:`text_format <envoy_v3_api_field_config.core.v3.SubstitutionFormatString.text_format>` and :ref:`json_format <envoy_v3_api_field_config.core.v3.SubstitutionFormatString.json_format>`.\n\nOptionally, content-type can be modified further via :ref:`content_type <envoy_v3_api_field_config.core.v3.SubstitutionFormatString.content_type>` field. If not specified, default content-type is `text/plain` for :ref:`text_format <envoy_v3_api_field_config.core.v3.SubstitutionFormatString.text_format>` and `application/json` for :ref:`json_format <envoy_v3_api_field_config.core.v3.SubstitutionFormatString.json_format>`.\n\nExample of a LocalReplyConfig with `body_format` field.\n\n.. code-block::\n\n  mappers:\n  - filter:\n      status_code_filter:\n        comparison:\n          op: EQ\n          value:\n            default_value: 400\n            runtime_key: key_b\n    status_code: 401\n    body_format_override:\n      text_format: \"<h1>%LOCAL_REPLY_BODY% %REQ(:path)%</h1>\"\n      content_type: \"text/html; charset=UTF-8\"\n  - filter:\n      status_code_filter:\n        comparison:\n          op: EQ\n          value:\n            default_value: 500\n            runtime_key: key_b\n    status_code: 501\n  body_format:\n    text_format: \"%LOCAL_REPLY_BODY% %RESPONSE_CODE%\"\n\nIn above example, there is a `body_format_override` inside the first `mapper` with a filter matching `status_code == 400`. It generates the response body in plain text format by concatenating %LOCAL_REPLY_BODY% with the `:path` request header. It is only used when the first mapper is matched. There is a `body_format` at the bottom of the config and at the same level as field `mappers`. It is used when non of the mappers is matched or the matched mapper doesn't have its own `body_format_override` specified.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/overview.rst",
    "content": "Overview\n========\n\n* HTTP connection manager :ref:`architecture overview <arch_overview_http_conn_man>`\n* HTTP protocols :ref:`architecture overview <arch_overview_http_protocols>`\n* :ref:`v3 API reference\n  <envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager>`\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/rds.rst",
    "content": ".. _config_http_conn_man_rds:\n\nRoute discovery service (RDS)\n=============================\n\nThe route discovery service (RDS) API is an optional API that Envoy will call to dynamically fetch\n:ref:`route configurations <envoy_v3_api_msg_config.route.v3.RouteConfiguration>`. A route configuration includes both\nHTTP header modifications, virtual hosts, and the individual route entries contained within each\nvirtual host. Each :ref:`HTTP connection manager filter <config_http_conn_man>` can independently\nfetch its own route configuration via the API. Optionally, the \n:ref:`virtual host discovery service <config_http_conn_man_vhds>`\ncan be used to fetch virtual hosts separately from the route configuration.\n\n* :ref:`v2 API reference <v2_grpc_streaming_endpoints>`\n\nStatistics\n----------\n\nRDS has a :ref:`statistics <subscription_statistics>` tree rooted at *http.<stat_prefix>.rds.<route_config_name>.*.\nAny ``:`` character in the ``route_config_name`` name gets replaced with ``_`` in the\nstats tree.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/response_code_details.rst",
    "content": ".. _config_http_conn_man_details:\n\nResponse Code Details\n=====================\n\nIf _%RESPONSE_CODE_DETAILS%_ is configured on via :ref:`access logging<config_access_log_format_response_code_details>`,\nor :ref:`custom headers<config_http_conn_man_headers_custom_request_headers>` Envoy will communicate the detailed\nreason a given stream ended.\nThis page lists the details sent by the HttpConnectionManager, Router filter, and codecs. It is not comprehensive as\nany other filters may send their own local replies with custom details.\n\nBelow are the list of reasons the HttpConnectionManager or Router filter may send responses or reset streams.\n\n.. warning::\n  The following list is not guaranteed to be stable, since the details are subject to change.\n\n.. csv-table::\n   :header: Name, Description\n   :widths: 1, 2\n\n   absolute_path_rejected, The request was rejected due to using an absolute path on a route not supporting them.\n   admin_filter_response, The response was generated by the admin filter.\n   cluster_not_found, The request was rejected by the router filter because there was no cluster found for the selected route.\n   downstream_local_disconnect, The client connection was locally closed for an unspecified reason.\n   downstream_remote_disconnect, The client disconnected unexpectedly.\n   duration_timeout, The max connection duration was exceeded.\n   direct_response, A direct response was generated by the router filter.\n   filter_chain_not_found, The request was rejected due to no matching filter chain.\n   internal_redirect, The original stream was replaced with an internal redirect.\n   low_version, The HTTP/1.0 or HTTP/0.9 request was rejected due to HTTP/1.0 support not being configured.\n   maintenance_mode, The request was rejected by the router filter because the cluster was in maintenance mode.\n   max_duration_timeout, The per-stream max duration timeout was exceeded.\n   missing_host_header, The request was rejected due to a missing Host: or :authority field.\n   missing_path_rejected, The request was rejected due to a missing Path or :path header field.\n   no_healthy_upstream, The request was rejected by the router filter because there was no healthy upstream found.\n   overload, The request was rejected due to the Overload Manager reaching configured resource limits.\n   path_normalization_failed, \"The request was rejected because path normalization was configured on and failed, probably due to an invalid path.\"\n   request_headers_failed_strict_check, The request was rejected due to x-envoy-* headers failing strict header validation.\n   request_overall_timeout, The per-stream total request timeout was exceeded.\n   request_payload_exceeded_retry_buffer_limit, Envoy is doing streaming proxying but too much data arrived while waiting to attempt a retry.\n   request_payload_too_large, Envoy is doing non-streaming proxying and the request payload exceeded configured limits.\n   response_payload_too_large, Envoy is doing non-streaming proxying and the response payload exceeded configured limits.\n   response_payload_too_large, Envoy is doing non-streaming proxying and the response payload exceeded configured limits.\n   route_configuration_not_found, The request was rejected because there was no route configuration found.\n   route_not_found, The request was rejected because there was no route found.\n   stream_idle_timeout, The per-stream keepalive timeout was exceeded.\n   upgrade_failed, The request was rejected because it attempted an unsupported upgrade.\n   upstream_max_stream_duration_reached, The request was destroyed because of it exceeded the configured max stream duration.\n   upstream_per_try_timeout, The final upstream try timed out.\n   upstream_reset_after_response_started{details}, The upstream connection was reset after a response was started. This may include further details about the cause of the disconnect.\n   upstream_reset_before_response_started{details}, The upstream connection was reset before a response was started This may include further details about the cause of the disconnect.\n   upstream_response_timeout, The upstream response timed out.\n   via_upstream, The response code was set by the upstream.\n\n\n.. _config_http_conn_man_details_per_codec:\n\nPer codec details\n-----------------\n\nEach codec may send codec-specific details when encountering errors.\n\nHttp1 details\n~~~~~~~~~~~~~\n\nAll http1 details are rooted at *http1.*\n\n.. csv-table::\n   :header: Name, Description\n   :widths: 1, 2\n\n   http1.body_disallowed, A body was sent on a request where bodies are not allowed.\n   http1.codec_error, Some error was encountered in the http_parser internals.\n   http1.connection_header_rejected, The Connection header was malformed or overly long.\n   http1.content_length_and_chunked_not_allowed, A request was sent with both Transfer-Encoding: chunked and a Content-Length header when disallowed by configuration.\n   http1.content_length_not_allowed, A content length was sent on a response it was disallowed on.\n   http1.headers_too_large, The overall byte size of rquest headers was larger than the configured limits.\n   http1.invalid_characters, The headers contained illegal characters.\n   http1.invalid_transfer_encoding, The Transfer-Encoding header was not valid.\n   http1.invalid_url, The request URL was not valid.\n   http1.too_many_headers, Too many headers were sent with this request.\n   http1.transfer_encoding_not_allowed, A transfer encoding was sent on a response it was disallowed on.\n   http1.unexpected_underscore, An underscore was sent in a header key when disallowed by configuration.\n\n\nHttp2 details\n~~~~~~~~~~~~~\n\nAll http2 details are rooted at *http2.*\n\n.. csv-table::\n   :header: Name, Description\n   :widths: 1, 2\n\n    http2.inbound_empty_frames_flood, Envoy detected an inbound HTTP/2 frame flood.\n    http2.invalid.header.field, One of the HTTP/2 headers was invalid\n    http2.outbound_frames_flood, Envoy detected an HTTP/2 frame flood from the server.\n    http2.too_many_headers, The number of headers (or trailers) exceeded the configured limits\n    http2.unexpected_underscore, Envoy was configured to drop requests with header keys beginning with underscores.\n    http2.unknown.nghttp2.error, An unknown error was encountered by nghttp2\n    http2.violation.of.messaging.rule, The stream was in violation of a HTTP/2 messaging rule.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/route_matching.rst",
    "content": ".. _config_http_conn_man_route_table_route_matching:\n\nRoute matching\n==============\n\nWhen Envoy matches a route, it uses the following procedure:\n\n#. The HTTP request's *host* or *:authority* header is matched to a :ref:`virtual host\n   <envoy_v3_api_msg_config.route.v3.VirtualHost>`.\n#. Each :ref:`route entry <envoy_v3_api_msg_config.route.v3.Route>` in the virtual host is checked,\n   *in order*. If there is a match, the route is used and no further route checks are made.\n#. Independently, each :ref:`virtual cluster <envoy_v3_api_msg_config.route.v3.VirtualCluster>` in the\n   virtual host is checked, *in order*. If there is a match, the virtual cluster is used and no\n   further virtual cluster checks are made.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/runtime.rst",
    "content": ".. _config_http_conn_man_runtime:\n\nRuntime\n=======\n\nThe HTTP connection manager supports the following runtime settings:\n\n.. _config_http_conn_man_runtime_normalize_path:\n\nhttp_connection_manager.normalize_path\n  % of requests that will have path normalization applied if not already configured in\n  :ref:`normalize_path <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.normalize_path>`.\n  This is evaluated at configuration load time and will apply to all requests for a given\n  configuration.\n\n.. _config_http_conn_man_runtime_client_enabled:\n\ntracing.client_enabled\n  % of requests that will be force traced if the\n  :ref:`config_http_conn_man_headers_x-client-trace-id` header is set. Defaults to 100.\n\n.. _config_http_conn_man_runtime_global_enabled:\n\ntracing.global_enabled\n  % of requests that will be traced after all other checks have been applied (force tracing,\n  sampling, etc.). Defaults to 100.\n\n.. _config_http_conn_man_runtime_random_sampling:\n\ntracing.random_sampling\n  % of requests that will be randomly traced. See :ref:`here <arch_overview_tracing>` for more\n  information. This runtime control is specified in the range 0-10000 and defaults to 10000. Thus,\n  trace sampling can be specified in 0.01% increments.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/stats.rst",
    "content": ".. _config_http_conn_man_stats:\n\nStatistics\n==========\n\nEvery connection manager has a statistics tree rooted at *http.<stat_prefix>.* with the following\nstatistics:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   downstream_cx_total, Counter, Total connections\n   downstream_cx_ssl_total, Counter, Total TLS connections\n   downstream_cx_http1_total, Counter, Total HTTP/1.1 connections\n   downstream_cx_upgrades_total, Counter, Total successfully upgraded connections. These are also counted as total http1/http2 connections.\n   downstream_cx_http2_total, Counter, Total HTTP/2 connections\n   downstream_cx_destroy, Counter, Total connections destroyed\n   downstream_cx_destroy_remote, Counter, Total connections destroyed due to remote close\n   downstream_cx_destroy_local, Counter, Total connections destroyed due to local close\n   downstream_cx_destroy_active_rq, Counter, Total connections destroyed with 1+ active request\n   downstream_cx_destroy_local_active_rq, Counter, Total connections destroyed locally with 1+ active request\n   downstream_cx_destroy_remote_active_rq, Counter, Total connections destroyed remotely with 1+ active request\n   downstream_cx_active, Gauge, Total active connections\n   downstream_cx_ssl_active, Gauge, Total active TLS connections\n   downstream_cx_http1_active, Gauge, Total active HTTP/1.1 connections\n   downstream_cx_upgrades_active, Gauge, Total active upgraded connections. These are also counted as active http1/http2 connections.\n   downstream_cx_http2_active, Gauge, Total active HTTP/2 connections\n   downstream_cx_protocol_error, Counter, Total protocol errors\n   downstream_cx_length_ms, Histogram, Connection length milliseconds\n   downstream_cx_rx_bytes_total, Counter, Total bytes received\n   downstream_cx_rx_bytes_buffered, Gauge, Total received bytes currently buffered\n   downstream_cx_tx_bytes_total, Counter, Total bytes sent\n   downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered\n   downstream_cx_drain_close, Counter, Total connections closed due to draining\n   downstream_cx_idle_timeout, Counter, Total connections closed due to idle timeout\n   downstream_cx_max_duration_reached, Counter, Total connections closed due to max connection duration\n   downstream_cx_overload_disable_keepalive, Counter, Total connections for which HTTP 1.x keepalive has been disabled due to Envoy overload\n   downstream_flow_control_paused_reading_total, Counter, Total number of times reads were disabled due to flow control\n   downstream_flow_control_resumed_reading_total, Counter, Total number of times reads were enabled on the connection due to flow control\n   downstream_rq_total, Counter, Total requests\n   downstream_rq_http1_total, Counter, Total HTTP/1.1 requests\n   downstream_rq_http2_total, Counter, Total HTTP/2 requests\n   downstream_rq_active, Gauge, Total active requests\n   downstream_rq_response_before_rq_complete, Counter, Total responses sent before the request was complete\n   downstream_rq_rx_reset, Counter, Total request resets received\n   downstream_rq_tx_reset, Counter, Total request resets sent\n   downstream_rq_non_relative_path, Counter, Total requests with a non-relative HTTP path\n   downstream_rq_too_large, Counter, Total requests resulting in a 413 due to buffering an overly large body\n   downstream_rq_completed, Counter, Total requests that resulted in a response (e.g. does not include aborted requests)\n   downstream_rq_1xx, Counter, Total 1xx responses\n   downstream_rq_2xx, Counter, Total 2xx responses\n   downstream_rq_3xx, Counter, Total 3xx responses\n   downstream_rq_4xx, Counter, Total 4xx responses\n   downstream_rq_5xx, Counter, Total 5xx responses\n   downstream_rq_ws_on_non_ws_route, Counter, Total upgrade requests rejected by non upgrade routes. This now applies both to WebSocket and non-WebSocket upgrades\n   downstream_rq_time, Histogram, Total time for request and response (milliseconds)\n   downstream_rq_idle_timeout, Counter, Total requests closed due to idle timeout\n   downstream_rq_max_duration_reached, Counter, Total requests closed due to max duration reached\n   downstream_rq_timeout, Counter, Total requests closed due to a timeout on the request path\n   downstream_rq_overload_close, Counter, Total requests closed due to Envoy overload\n   rs_too_large, Counter, Total response errors due to buffering an overly large body\n\nPer user agent statistics\n-------------------------\n\nAdditional per user agent statistics are rooted at *http.<stat_prefix>.user_agent.<user_agent>.*\nCurrently Envoy matches user agent for both iOS (*ios*) and Android (*android*) and produces\nthe following statistics:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   downstream_cx_total, Counter, Total connections\n   downstream_cx_destroy_remote_active_rq, Counter, Total connections destroyed remotely with 1+ active requests\n   downstream_rq_total, Counter, Total requests\n\n.. _config_http_conn_man_stats_per_listener:\n\nPer listener statistics\n-----------------------\n\nAdditional per listener statistics are rooted at *listener.<address>.http.<stat_prefix>.* with the\nfollowing statistics:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   downstream_rq_completed, Counter, Total responses\n   downstream_rq_1xx, Counter, Total 1xx responses\n   downstream_rq_2xx, Counter, Total 2xx responses\n   downstream_rq_3xx, Counter, Total 3xx responses\n   downstream_rq_4xx, Counter, Total 4xx responses\n   downstream_rq_5xx, Counter, Total 5xx responses\n\n.. _config_http_conn_man_stats_per_codec:\n\nPer codec statistics\n-----------------------\n\nEach codec has the option of adding per-codec statistics. Both http1 and http2 have codec stats.\n\nHttp1 codec statistics\n~~~~~~~~~~~~~~~~~~~~~~\n\nAll http1 statistics are rooted at *http1.*\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.headers_with_underscores_action>`.\n   metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/1 encoding\n   response_flood, Counter, Total number of connections closed due to response flooding\n   requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.headers_with_underscores_action>`.\n\nHttp2 codec statistics\n~~~~~~~~~~~~~~~~~~~~~~\n\nAll http2 statistics are rooted at *http2.*\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.headers_with_underscores_action>`.\n   header_overflow, Counter, Total number of connections reset due to the headers being larger than the :ref:`configured value <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.max_request_headers_kb>`.\n   headers_cb_no_stream, Counter, Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug\n   inbound_empty_frames_flood, Counter, Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload>`.\n   inbound_priority_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream>`.\n   inbound_window_update_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE. The limit is configured by setting the :ref:`max_inbound_window_updateframes_per_data_frame_sent config setting <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent>`.\n   outbound_flood, Counter, Total number of connections terminated for exceeding the limit on outbound frames of all types. The limit is configured by setting the :ref:`max_outbound_frames config setting <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_outbound_frames>`.\n   outbound_control_flood, Counter, \"Total number of connections terminated for exceeding the limit on outbound frames of types PING, SETTINGS and RST_STREAM. The limit is configured by setting the :ref:`max_outbound_control_frames config setting <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_outbound_control_frames>`.\"\n   requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.headers_with_underscores_action>`.\n   rx_messaging_error, Counter, Total number of invalid received frames that violated `section 8 <https://tools.ietf.org/html/rfc7540#section-8>`_ of the HTTP/2 spec. This will result in a *tx_reset*\n   rx_reset, Counter, Total number of reset stream frames received by Envoy\n   trailers, Counter, Total number of trailers seen on requests coming from downstream\n   tx_flush_timeout, Counter, Total number of :ref:`stream idle timeouts <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>` waiting for open stream window to flush the remainder of a stream\n   tx_reset, Counter, Total number of reset stream frames transmitted by Envoy\n   keepalive_timeout, Counter, Total number of connections closed due to :ref:`keepalive timeout <envoy_v3_api_field_config.core.v3.KeepaliveSettings.timeout>`\n   streams_active, Gauge, Active streams as observed by the codec\n   pending_send_bytes, Gauge, Currently buffered body data in bytes waiting to be written when stream/connection window is opened.\n\n.. attention::\n\n  The HTTP/2 `streams_active` gauge may be greater than the HTTP connection manager\n  `downstream_rq_active` gauge due to differences in stream accounting between the codec and the\n  HTTP connection manager.\n\nTracing statistics\n------------------\n\nTracing statistics are emitted when tracing decisions are made. All tracing statistics are rooted at *http.<stat_prefix>.tracing.* with the following statistics:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   random_sampling, Counter, Total number of traceable decisions by random sampling\n   service_forced, Counter, Total number of traceable decisions by server runtime flag *tracing.global_enabled*\n   client_enabled, Counter, Total number of traceable decisions by request header *x-envoy-force-trace*\n   not_traceable, Counter, Total number of non-traceable decisions by request id\n   health_check, Counter, Total number of non-traceable decisions by health check\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/traffic_splitting.rst",
    "content": ".. _config_http_conn_man_route_table_traffic_splitting:\n\nTraffic Shifting/Splitting\n===========================================\n\n.. contents::\n  :local:\n\nEnvoy's router can split traffic to a route in a virtual host across\ntwo or more upstream clusters. There are two common use cases.\n\n1. Version upgrades: traffic to a route is shifted gradually\nfrom one cluster to another. The\n:ref:`traffic shifting <config_http_conn_man_route_table_traffic_splitting_shift>`\nsection describes this scenario in more detail.\n\n2. A/B testing or multivariate testing: ``two or more versions`` of\nthe same service are tested simultaneously. The traffic to the route has to\nbe *split* between clusters running different versions of the same\nservice. The\n:ref:`traffic splitting <config_http_conn_man_route_table_traffic_splitting_split>`\nsection describes this scenario in more detail.\n\n.. _config_http_conn_man_route_table_traffic_splitting_shift:\n\nTraffic shifting between two upstreams\n--------------------------------------\n\nThe :ref:`runtime <envoy_v3_api_field_config.route.v3.RouteMatch.runtime_fraction>` object\nin the route configuration determines the probability of selecting a\nparticular route (and hence its cluster). By using the *runtime_fraction*\nconfiguration, traffic to a particular route in a virtual host can be\ngradually shifted from one cluster to another. Consider the following\nexample configuration, where two versions ``helloworld_v1`` and\n``helloworld_v2`` of a service named ``helloworld`` are declared in the\nenvoy configuration file.\n\n.. code-block:: yaml\n\n  virtual_hosts:\n     - name: www2\n       domains:\n       - '*'\n       routes:\n         - match:\n             prefix: /\n             runtime_fraction:\n               default_value:\n                 numerator: 50\n                 denominator: HUNDRED\n               runtime_key: routing.traffic_shift.helloworld\n           route:\n             cluster: helloworld_v1\n         - match:\n             prefix: /\n           route:\n             cluster: helloworld_v2\n\n\nEnvoy matches routes with a :ref:`first match <config_http_conn_man_route_table_route_matching>` policy.\nIf the route has a runtime_fraction object, the request will be additionally matched based on the runtime_fraction\n:ref:`value <envoy_v3_api_field_config.route.v3.RouteMatch.runtime_fraction>`\n(or the default, if no value is specified). Thus, by placing routes\nback-to-back in the above example and specifying a runtime_fraction object in the\nfirst route, traffic shifting can be accomplished by changing the runtime_fraction\nvalue. The following are the approximate sequence of actions required to\naccomplish the task.\n\n1. In the beginning, set ``routing.traffic_shift.helloworld`` to ``100``,\n   so that all requests to the ``helloworld`` virtual host would match with\n   the v1 route and be served by the ``helloworld_v1`` cluster.\n2. To start shifting traffic to ``helloworld_v2`` cluster, set\n   ``routing.traffic_shift.helloworld`` to values ``0 < x < 100``. For\n   instance at ``90``, 1 out of every 10 requests to the ``helloworld``\n   virtual host will not match the v1 route and will fall through to the v2\n   route.\n3. Gradually decrease the value set in ``routing.traffic_shift.helloworld``\n   so that a larger percentage of requests match the v2 route.\n4. When ``routing.traffic_shift.helloworld`` is set to ``0``, no requests\n   to the ``helloworld`` virtual host will match to the v1 route. All\n   traffic would now fall through to the v2 route and be served by the\n   ``helloworld_v2`` cluster.\n\n\n.. _config_http_conn_man_route_table_traffic_splitting_split:\n\nTraffic splitting across multiple upstreams\n-------------------------------------------\n\nConsider the ``helloworld`` example again, now with three versions (v1, v2 and\nv3) instead of two. To split traffic evenly across the three versions\n(i.e., ``33%, 33%, 34%``), the ``weighted_clusters`` option can be used to\nspecify the weight for each upstream cluster.\n\nUnlike the previous example, a **single** :ref:`route\n<envoy_v3_api_msg_config.route.v3.Route>` entry is sufficient. The\n:ref:`weighted_clusters <envoy_v3_api_field_config.route.v3.RouteAction.weighted_clusters>`\nconfiguration block in a route can be used to specify multiple upstream clusters\nalong with weights that indicate the **percentage** of traffic to be sent\nto each upstream cluster.\n\n.. code-block:: yaml\n\n  virtual_hosts:\n     - name: www2\n       domains:\n       - '*'\n       routes:\n         - match: { prefix: / }\n           route:\n             weighted_clusters:\n               runtime_key_prefix: routing.traffic_split.helloworld\n               clusters:\n                 - name: helloworld_v1\n                   weight: 33\n                 - name: helloworld_v2\n                   weight: 33\n                 - name: helloworld_v3\n                   weight: 34\n\n\nBy default, the weights must sum to exactly 100. In the V2 API, the\n:ref:`total weight <envoy_v3_api_field_config.route.v3.WeightedCluster.total_weight>` defaults to 100, but can\nbe modified to allow finer granularity.\n\nThe weights assigned to each cluster can be dynamically adjusted using the\nfollowing runtime variables: ``routing.traffic_split.helloworld.helloworld_v1``,\n``routing.traffic_split.helloworld.helloworld_v2`` and\n``routing.traffic_split.helloworld.helloworld_v3``.\n"
  },
  {
    "path": "docs/root/configuration/http/http_conn_man/vhds.rst",
    "content": ".. _config_http_conn_man_vhds:\n\nVirtual Host Discovery Service (VHDS)\n=====================================\n\nThe virtual host discovery service (VHDS) API is an optional API that Envoy will call to\ndynamically fetch :ref:`virtual hosts <envoy_v3_api_msg_config.route.v3.VirtualHost>`. A virtual host includes\na name and set of domains that get routed to it based on the incoming request's host header.\n\nBy default in RDS, all routes for a cluster are sent to every Envoy instance in the mesh. This\ncauses scaling issues as the size of the cluster grows. The majority of this complexity can be\nfound in the virtual host configurations, of which most are not needed by any individual proxy.\n\nIn order to fix this issue, the Virtual Host Discovery Service (VHDS) protocol uses the delta xDS\nprotocol to allow a route configuration to be subscribed to and the necessary virtual hosts to be\nrequested as needed. Instead of sending all virtual hosts with a route config, using VHDS will\nallow an Envoy instance to subscribe and unsubscribe from a list of virtual hosts stored internally\nin the xDS management server. The xDS management server will monitor this list and use it to filter\nthe configuration sent to an individual Envoy instance to only contain the subscribed virtual hosts.\n\nVirtual Host Resource Naming Convention\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nVirtual hosts in VHDS are identified by a combination of the name of the route configuration to\nwhich the virtual host belongs as well as the HTTP *host* header (*:authority* for HTTP2) entry.\nResources should be named as follows::\n\n<route configuration name>/<host entry>\n\nNote that matching should be done from right to left since a host entry cannot contain slashes while\na route configuration name can.\n\nSubscribing to Resources\n^^^^^^^^^^^^^^^^^^^^^^^^\nVHDS allows resources to be :ref:`subscribed <xds_protocol_delta_subscribe>` to using a\n:ref:`DeltaDiscoveryRequest <envoy_v3_api_msg_service.discovery.v3.DeltaDiscoveryRequest>` with the\n:ref:`type_url <envoy_v3_api_field_service.discovery.v3.DeltaDiscoveryRequest.type_url>` set to\n`type.googleapis.com/envoy.config.route.v3.VirtualHost`\nand :ref:`resource_names_subscribe <envoy_v3_api_field_service.discovery.v3.DeltaDiscoveryRequest.resource_names_subscribe>`\nset to a list of virtual host resource names for which it would like configuration.\n\nIf a route for the contents of a host/authority header cannot be resolved, the active stream is\npaused while a\n:ref:`DeltaDiscoveryRequest <envoy_v3_api_msg_service.discovery.v3.DeltaDiscoveryRequest>` is sent.\nWhen a :ref:`DeltaDiscoveryResponse <envoy_v3_api_msg_service.discovery.v3.DeltaDiscoveryResponse>` is received where one of\nthe :ref:`aliases <envoy_v3_api_field_service.discovery.v3.Resource.aliases>` or the \n:ref:`name <envoy_v3_api_field_service.discovery.v3.Resource.name>` in the response exactly matches the\n:ref:`resource_names_subscribe <envoy_v3_api_field_service.discovery.v3.DeltaDiscoveryRequest.resource_names_subscribe>`\nentry from the :ref:`DeltaDiscoveryRequest <envoy_v3_api_msg_service.discovery.v3.DeltaDiscoveryRequest>`, the route\nconfiguration is updated, the stream is resumed, and processing of the filter chain continues.\n\nUpdates to virtual hosts occur in two ways. If a virtual host was originally sent over RDS, then the\nvirtual host should be updated over RDS. If a virtual host was subscribed to over VHDS, then updates\nwill take place over VHDS.\n\nWhen a route configuration entry is updated, if the \n:ref:`vhds field <envoy_v3_api_field_config.route.v3.RouteConfiguration.vhds>` has changed, the virtual host table for\nthat route configuration is cleared, which will require that all virtual hosts be sent again.\n\nCompatibility with Scoped RDS\n-----------------------------\n\nVHDS shouldn't present any compatibility issues with\n:ref:`scoped RDS <envoy_v3_api_msg_config.route.v3.ScopedRouteConfiguration>`.\nRoute configuration names can still be used for virtual host matching, but with\nscoped RDS configured it would point to a scoped route configuration.\n\nHowever, it is important to note that using\non-demand :ref:`scoped RDS <envoy_v3_api_msg_config.route.v3.ScopedRouteConfiguration>`\nand VHDS together will require two on-demand subscriptions per routing scope.\n\n\n* :ref:`v2 API reference <v2_grpc_streaming_endpoints>`\n\nStatistics\n----------\n\nVHDS has a statistics tree rooted at *http.<stat_prefix>.vhds.<virtual_host_name>.*.\nAny ``:`` character in the ``virtual_host_name`` name gets replaced with ``_`` in the\nstats tree. The stats tree contains the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  config_reload, Counter, Total API fetches that resulted in a config reload due to a different config\n  empty_update, Counter, Total count of empty updates received\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9901\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: TCP\n        address: 0.0.0.0\n        port_value: 10000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              - match:\n                  prefix: \"/force-host-rewrite\"\n                route:\n                  cluster: dynamic_forward_proxy_cluster\n                typed_per_filter_config:\n                  envoy.filters.http.dynamic_forward_proxy:\n                    \"@type\": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.PerRouteConfig\n                    host_rewrite_literal: www.example.org\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: dynamic_forward_proxy_cluster\n          http_filters:\n          - name: envoy.filters.http.dynamic_forward_proxy\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig\n              dns_cache_config:\n                name: dynamic_forward_proxy_cache_config\n                dns_lookup_family: V4_ONLY\n          - name: envoy.filters.http.router\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  clusters:\n  - name: dynamic_forward_proxy_cluster\n    connect_timeout: 1s\n    lb_policy: CLUSTER_PROVIDED\n    cluster_type:\n      name: envoy.clusters.dynamic_forward_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig\n        dns_cache_config:\n          name: dynamic_forward_proxy_cache_config\n          dns_lookup_family: V4_ONLY\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n        common_tls_context:\n          validation_context:\n            trusted_ca: {filename: /etc/ssl/certs/ca-certificates.crt}\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml",
    "content": "admin:\n  access_log_path: /dev/stdout\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 9901\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 80\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          access_log:\n          - name: envoy.access_loggers.file\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog\n              path: /dev/stdout\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              - match:\n                  prefix: \"/route-with-filter-disabled\"\n                route:\n                  host_rewrite: localhost\n                  cluster: grpc\n                  timeout: 5.00s\n                # per_filter_config disables the filter for this route\n                typed_per_filter_config:\n                  envoy.filters.http.grpc_http1_reverse_bridge:\n                    \"@type\": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfigPerRoute\n                    disabled: true\n              - match:\n                  prefix: \"/route-with-filter-enabled\"\n                route:\n                  host_rewrite: localhost\n                  cluster: other\n                  timeout: 5.00s\n          http_filters:\n          - name: envoy.filters.http.grpc_http1_reverse_bridge\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig\n              content_type: application/grpc+proto\n              withhold_grpc_frames: true\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: other\n    connect_timeout: 5.00s\n    type: LOGICAL_DNS\n    dns_lookup_family: V4_ONLY\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: some_service\n      endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: localhost\n                  port_value: 4630\n  - name: grpc\n    connect_timeout: 5.00s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: grpc\n      endpoints:\n        - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: localhost\n                    port_value: 10005\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address: { address: 0.0.0.0, port_value: 9901 }\n\nstatic_resources:\n  listeners:\n  - name: listener1\n    address:\n      socket_address: { address: 0.0.0.0, port_value: 51051 }\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: grpc_json\n          codec_type: AUTO\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              # NOTE: by default, matching happens based on the gRPC route, and not on the incoming request path.\n              # Reference: https://www.envoyproxy.io/docs/envoy/latest/configuration/http_filters/grpc_json_transcoder_filter#route-configs-for-transcoded-requests\n              - match: { prefix: \"/helloworld.Greeter\" }\n                route: { cluster: grpc, timeout: { seconds: 60 } }\n          http_filters:\n          - name: envoy.filters.http.grpc_json_transcoder\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder\n              proto_descriptor: \"/tmp/envoy/proto.pb\"\n              services: [\"helloworld.Greeter\"]\n              print_options:\n                add_whitespace: true\n                always_print_primitive_fields: true\n                always_print_enums_as_ints: false\n                preserve_proto_field_names: false\n          - name: envoy.filters.http.router\n\n  clusters:\n  - name: grpc\n    connect_timeout: 1.25s\n    type: logical_dns\n    lb_policy: round_robin\n    dns_lookup_family: V4_ONLY\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: grpc\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                # WARNING: \"docker.for.mac.localhost\" has been deprecated from Docker v18.03.0.\n                # If you're running an older version of Docker, please use \"docker.for.mac.localhost\" instead.\n                # Reference: https://docs.docker.com/docker-for-mac/release-notes/#docker-community-edition-18030-ce-mac59-2018-03-26\n                address: host.docker.internal\n                port_value: 50051\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst",
    "content": ".. _config_http_filters_adaptive_concurrency:\n\nAdaptive Concurrency\n====================\n\n.. attention::\n\n  The adaptive concurrency filter is experimental and is currently under active development.\n\nThis filter should be configured with the name `envoy.filters.http.adaptive_concurrency`.\n\nSee the :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency>` for details on each configuration parameter.\n\nOverview\n--------\nThe adaptive concurrency filter dynamically adjusts the allowed number of requests that can be\noutstanding (concurrency) to all hosts in a given cluster at any time. Concurrency values are\ncalculated using latency sampling of completed requests and comparing the measured samples in a time\nwindow against the expected latency for hosts in the cluster.\n\nConcurrency Controllers\n-----------------------\nConcurrency controllers implement the algorithm responsible for making forwarding decisions for each\nrequest and recording latency samples to use in the calculation of the concurrency limit.\n\nGradient Controller\n~~~~~~~~~~~~~~~~~~~\nThe gradient controller makes forwarding decisions based on a periodically measured ideal round-trip\ntime (minRTT) for an upstream.\n\n:ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.adaptive_concurrency.v3.GradientControllerConfig>`\n\nCalculating the minRTT\n^^^^^^^^^^^^^^^^^^^^^^\n\nThe minRTT is periodically measured by only allowing a very low outstanding request count to an\nupstream cluster and measuring the latency under these ideal conditions. The calculation is also\ntriggered in scenarios where the concurrency limit is determined to be the minimum possible value\nfor 5 consecutive sampling windows. The length of this minRTT calculation window is variable\ndepending on the number of requests the filter is configured to aggregate to represent the expected\nlatency of an upstream.\n\nA configurable *jitter* value is used to randomly delay the start of the minRTT calculation window\nby some amount of time. This is not necessary and can be disabled; however, it is recommended to\nprevent all hosts in a cluster from being in a minRTT calculation window (and having a concurrency\nlimit of 3 by default) at the same time. The jitter helps negate the effect of the minRTT\ncalculation on the downstream success rate if retries are enabled.\n\nIt is possible that there is a noticeable increase in request 503s during the minRTT measurement\nwindow because of the potentially significant drop in the concurrency limit. This is expected and it\nis recommended to enable retries for resets/503s.\n\n.. note::\n\n    It is recommended to use :ref:`the previous_hosts retry predicate\n    <arch_overview_http_retry_plugins>`. Due to the minRTT recalculation jitter, it's unlikely that\n    all hosts in the cluster will be in a minRTT calculation window, so retrying on a different host\n    in the cluster will have a higher likelihood of success in this scenario.\n\nOnce calculated, the minRTT is then used in the calculation of a value referred to as the\n*gradient*.\n\nThe Gradient\n^^^^^^^^^^^^\nThe gradient is calculated using summarized sampled request latencies (sampleRTT):\n\n.. math::\n\n    gradient = \\frac{minRTT + B}{sampleRTT}\n\nThis gradient value has a useful property, such that it decreases as the sampled latencies increase.\nNotice that *B*, the buffer value added to the minRTT, allows for normal variance in the sampled\nlatencies by requiring the sampled latencies the exceed the minRTT by some configurable threshold\nbefore decreasing the gradient value.\n\nThe buffer will be a percentage of the measured minRTT value whose value is modified via the buffer field in the :ref:`minRTT calculation parameters <envoy_v3_api_msg_extensions.filters.http.adaptive_concurrency.v3.GradientControllerConfig.MinimumRTTCalculationParams>`. The buffer is calculated as follows:\n\n.. math::\n\n    B = minRTT * buffer_{pct}\n\nThe gradient value is then used to update the concurrency limit via:\n\n.. math::\n\n    limit_{new} = gradient * limit_{old} + headroom\n\nConcurrency Limit Headroom\n^^^^^^^^^^^^^^^^^^^^^^^^^^\nThe headroom value is necessary as a driving factor to increase the concurrency limit when the\nsampleRTT is in the same ballpark as the minRTT. This value must be present in the limit\ncalculation, since it forces the concurrency limit to increase until there is a deviation from the\nminRTT latency. In the absence of a headroom value, the concurrency limit could potentially stagnate\nat an unnecessary small value if the sampleRTT and minRTT are close to each other.\n\nBecause the headroom value is so necessary to the proper function for the gradient controller, the\nheadroom value is unconfigurable and pinned to the square-root of the concurrency limit.\n\nLimitations\n-----------\nThe adaptive concurrency filter's control loop relies on latency measurements\nand adjustments to the concurrency limit based on those measurements. Because of\nthis, the filter must operate in conditions where it has full control over\nrequest concurrency. This means that:\n\n    1. The filter works as intended in the filter chain for a local cluster.\n\n    2. The filter must be able to limit the concurrency for a cluster. This means\n       there must not be requests destined for a cluster that are not decoded by\n       the adaptive concurrency filter.\n\nExample Configuration\n---------------------\nAn example filter configuration can be found below. Not all fields are required and many of the\nfields can be overridden via runtime settings.\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.adaptive_concurrency\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency\n    gradient_controller_config:\n      sample_aggregate_percentile:\n        value: 90\n      concurrency_limit_params:\n        concurrency_update_interval: 0.1s\n      min_rtt_calc_params:\n        jitter:\n          value: 10\n        interval: 60s\n        request_count: 50\n    enabled:\n      default_value: true\n      runtime_key: \"adaptive_concurrency.enabled\"\n\nThe above configuration can be understood as follows:\n\n* Gather latency samples for a time window of 100ms. When entering a new window, summarize the\n  requests (sampleRTT) and and update the concurrency limit using this sampleRTT.\n* When calculating the sampleRTT, use the p90 of all sampled latencies for that window.\n* Recalculate the minRTT every 60s and add a jitter (random delay) of 0s-6s to the start of the\n  minRTT recalculation. The delay is dictated by the jitter value.\n* Collect 50 request samples to calculate the minRTT and use the p90 to summarize them.\n* The filter is enabled by default.\n\n.. note::\n\n    It is recommended that the adaptive concurrency filter come after the healthcheck filter in the\n    filter chain to prevent latency sampling of health checks. If health check traffic is sampled,\n    it could potentially affect the accuracy of the minRTT measurements.\n\nRuntime\n-------\n\nThe adaptive concurrency filter supports the following runtime settings:\n\nadaptive_concurrency.enabled\n    Overrides whether the adaptive concurrency filter will use the concurrency controller for\n    forwarding decisions. If set to `false`, the filter will be a no-op. Defaults to what is\n    specified for `enabled` in the filter configuration.\n\nadaptive_concurrency.gradient_controller.min_rtt_calc_interval_ms\n    Overrides the interval in which the ideal round-trip time (minRTT) will be recalculated.\n\nadaptive_concurrency.gradient_controller.min_rtt_aggregate_request_count\n    Overrides the number of requests sampled for calculation of the minRTT.\n\nadaptive_concurrency.gradient_controller.jitter\n    Overrides the random delay introduced to the minRTT calculation start time. A value of `10`\n    indicates a random delay of 10% of the configured interval. The runtime value specified is\n    clamped to the range [0,100].\n\nadaptive_concurrency.gradient_controller.sample_rtt_calc_interval_ms\n    Overrides the interval in which the concurrency limit is recalculated based on sampled latencies.\n\nadaptive_concurrency.gradient_controller.max_concurrency_limit\n    Overrides the maximum allowed concurrency limit.\n\nadaptive_concurrency.gradient_controller.min_rtt_buffer\n    Overrides the padding added to the minRTT when calculating the concurrency limit.\n\nadaptive_concurrency.gradient_controller.sample_aggregate_percentile\n    Overrides the percentile value used to represent the collection of latency samples in\n    calculations. A value of `95` indicates the 95th percentile. The runtime value specified is\n    clamped to the range [0,100].\n\nadaptive_concurrency.gradient_controller.min_concurrency\n    Overrides the concurrency that is pinned while measuring the minRTT.\n\nStatistics\n----------\nThe adaptive concurrency filter outputs statistics in the\n*http.<stat_prefix>.adaptive_concurrency.* namespace. The :ref:`stat prefix\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>`\ncomes from the owning HTTP connection manager. Statistics are specific to the concurrency\ncontrollers.\n\nGradient Controller Statistics\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThe gradient controller uses the namespace\n*http.<stat_prefix>.adaptive_concurrency.gradient_controller*.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: auto\n\n  rq_blocked, Counter, Total requests that were blocked by the filter.\n  min_rtt_calculation_active, Gauge, Set to 1 if the controller is in the process of a minRTT calculation. 0 otherwise.\n  concurrency_limit, Gauge, The current concurrency limit.\n  gradient, Gauge, The current gradient value.\n  burst_queue_size, Gauge, The current headroom value in the concurrency limit calculation.\n  min_rtt_msecs, Gauge, The current measured minRTT value.\n  sample_rtt_msecs, Gauge, The current measured sampleRTT aggregate.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/admission_control_filter.rst",
    "content": ".. _config_http_filters_admission_control:\n\nAdmission Control\n=================\n\n.. attention::\n\n  The admission control filter is experimental and is currently under active development.\n\nSee the :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.admission_control.v3alpha.AdmissionControl>` for details on each configuration parameter.\n\nOverview\n--------\n\nThe admission control filter probabilistically rejects requests based on the success rate of\nprevious requests in a configurable sliding time window. It is based on `client-side\nthrottling <https://landing.google.com/sre/sre-book/chapters/handling-overload/>`_ from the `Google SRE handbook <https://landing.google.com/sre/sre-book/toc/index.html>`_. The only notable difference between the admission control\nfilter's load shedding and load shedding defined in client-side throttling is that users may\nconfigure how aggressively load shedding starts at a target request success rate. Users may also\nconfigure the definition of a successful request for the purposes of the rejection probability\ncalculation.\n\nThe probability that the filter will reject a request is as follows:\n\n.. math::\n\n   P_{reject} = {(\\frac{n_{total} - s}{n_{total} + 1})}^\\frac{1}{aggression}\n\nwhere,\n\n.. math::\n\n   s = \\frac{n_{success}}{threshold}\n\n\n- *n* refers to a request count gathered in the sliding window.\n- *threshold* is a configurable value that dictates the lowest request success rate at which the\n  filter will **not reject** requests. The value is normalized to [0,1] for the calculation.\n- *aggression* controls the rejection probability curve such that 1.0 is a linear increase in\n  rejection probability as the success rate decreases. As the **aggression** increases, the\n  rejection probability will be higher for higher success rates. See `Aggression`_ for a more\n  detailed explanation.\n\n.. note::\n   The success rate calculations are performed on a per-thread basis for increased performance. In\n   addition, the per-thread isolation prevents decreases the blast radius of a single bad connection\n   with an anomalous success rate. Therefore, the rejection probability may vary between worker\n   threads.\n\n.. note::\n   Health check traffic does not count towards any of the filter's measurements.\n\nSee the :ref:`v3 API reference\n<envoy_v3_api_msg_extensions.filters.http.admission_control.v3alpha.AdmissionControl>` for more\ndetails on this parameter.\n\nThe definition of a successful request is a :ref:`configurable parameter\n<envoy_v3_api_msg_extensions.filters.http.admission_control.v3alpha.AdmissionControl.SuccessCriteria>`\nfor both HTTP and gRPC requests.\n\nAggression\n~~~~~~~~~~\n\nThe aggression value affects the rejection probabilities as shown in the following figures:\n\n.. image:: images/aggression_graph.png\n\nSince the success rate threshold in the first figure is set to 95%, the rejection probability\nremains 0 until then. In the second figure, there rejection probability remains 0 until the success\nrate reaches 50%. In both cases, as success rate drops to 0%, the rejection probability approaches a\nvalue just under 100%. The aggression values dictate how high the rejection probability will be at a\ngiven request success rate, so it will shed load more *aggressively*.\n\nExample Configuration\n---------------------\nAn example filter configuration can be found below. Not all fields are required and many of the\nfields can be overridden via runtime settings.\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.admission_control\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl\n    enabled:\n      default_value: true\n      runtime_key: \"admission_control.enabled\"\n    sampling_window: 120s\n    sr_threshold:\n      default_value: 95.0\n      runtime_key: \"admission_control.sr_threshold\"\n    aggression:\n      default_value: 1.5\n      runtime_key: \"admission_control.aggression\"\n    success_criteria:\n      http_criteria:\n        http_success_status:\n          - start: 100\n            end:   400\n          - start: 404\n            end:   404\n      grpc_criteria:\n        grpc_success_status:\n          - 0\n          - 1\n\nThe above configuration can be understood as follows:\n\n* Calculate the request success-rate over a 120s sliding window.\n* Do not begin shedding any load until the request success-rate drops below 95% in the sliding\n  window.\n* HTTP requests are considered successful if they are 1xx, 2xx, 3xx, or a 404.\n* gRPC requests are considered successful if they are OK or CANCELLED.\n\nStatistics\n----------\nThe admission control filter outputs statistics in the\n*http.<stat_prefix>.admission_control.* namespace. The :ref:`stat prefix\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>`\ncomes from the owning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: auto\n\n  rq_rejected, Counter, Total requests that were not admitted by the filter.\n  rq_success, Counter, Total requests that were considered a success.\n  rq_failure, Counter, Total requests that were considered a failure.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/aws_lambda_filter.rst",
    "content": "\n.. _config_http_filters_aws_lambda:\n\nAWS Lambda\n==========\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.aws_lambda.v3.Config>`\n* This filter should be configured with the name *envoy.filters.http.aws_lambda*.\n\n.. attention::\n\n  The AWS Lambda filter is currently under active development.\n\nThe HTTP AWS Lambda filter is used to trigger an AWS Lambda function from a standard HTTP/1.x or HTTP/2 request.\nIt supports a few options to control whether to pass through the HTTP request payload as is or to wrap it in a JSON\nschema.\n\nIf :ref:`payload_passthrough <envoy_v3_api_field_extensions.filters.http.aws_lambda.v3.Config.payload_passthrough>` is set to\n``true``, then the payload is sent to Lambda without any transformations.\n*Note*: This means you lose access to all the HTTP headers in the Lambda function.\n\nHowever, if :ref:`payload_passthrough <envoy_v3_api_field_extensions.filters.http.aws_lambda.v3.Config.payload_passthrough>`\nis set to ``false``, then the HTTP request is transformed to a JSON payload with the following schema:\n\n.. code-block::\n\n    {\n        \"rawPath\": \"/path/to/resource\",\n        \"method\": \"GET|POST|HEAD|...\",\n        \"headers\": {\"header-key\": \"header-value\", ... },\n        \"queryStringParameters\": {\"key\": \"value\", ...},\n        \"body\": \"...\",\n        \"isBase64Encoded\": true|false\n    }\n\n- ``rawPath`` is the HTTP request resource path (including the query string)\n- ``method`` is the HTTP request method. For example ``GET``, ``PUT``, etc.\n- ``headers`` are the HTTP request headers. If multiple headers share the same name, their values are\n  coalesced into a single comma-separated value.\n- ``queryStringParameters`` are the HTTP request query string parameters. If multiple parameters share the same name,\n  the last one wins. That is, parameters are _not_ coalesced into a single value if they share the same key name.\n- ``body`` the body of the HTTP request is base64-encoded by the filter if the ``content-type`` header exists and is _not_ one of the following:\n\n    -  text/*\n    -  application/json\n    -  application/xml\n    -  application/javascript\n\nOtherwise, the body of HTTP request is added to the JSON payload as is.\n\nOn the other end, the response of the Lambda function must conform to the following schema:\n\n.. code-block::\n\n    {\n        \"statusCode\": ...\n        \"headers\": {\"header-key\": \"header-value\", ... },\n        \"cookies\": [\"key1=value1; HttpOnly; ...\", \"key2=value2; Secure; ...\", ...],\n        \"body\": \"...\",\n        \"isBase64Encoded\": true|false\n    }\n\n- The ``statusCode`` field is an integer used as the HTTP response code. If this key is missing, Envoy returns a ``200\n  OK``.\n- The ``headers`` are used as the HTTP response headers.\n- The ``cookies`` are used as ``Set-Cookie`` response headers. Unlike the request headers, cookies are _not_ part of the\n  response headers because the ``Set-Cookie`` header cannot contain more than one value per the `RFC`_. Therefore, Each\n  key/value pair in this JSON array will translate to a single ``Set-Cookie`` header.\n- The ``body`` is base64-decoded if it is marked as base64-encoded and sent as the body of the HTTP response.\n\n.. _RFC: https://tools.ietf.org/html/rfc6265#section-4.1\n\n.. note::\n\n    The target cluster must have its endpoint set to the `regional Lambda endpoint`_. Use the same region as the Lambda\n    function.\n\n    AWS IAM credentials must be defined in either environment variables, EC2 metadata or ECS task metadata.\n\n\n.. _regional Lambda endpoint: https://docs.aws.amazon.com/general/latest/gr/lambda-service.html\n\nThe filter supports :ref:`per-filter configuration\n<envoy_v3_api_msg_extensions.filters.http.aws_lambda.v3.PerRouteConfig>`.\n\nIf you use the per-filter configuration, the target cluster _must_ have the following metadata:\n\n.. code-block:: yaml\n\n    metadata:\n      filter_metadata:\n        com.amazonaws.lambda:\n          egress_gateway: true\n\n\nBelow are some examples that show how the filter can be used in different deployment scenarios.\n\nExample configuration\n---------------------\n\nIn this configuration, the filter applies to all routes in the filter chain of the http connection manager:\n\n.. code-block:: yaml\n\n  http_filters:\n  - name: envoy.filters.http.aws_lambda\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.Config\n      arn: \"arn:aws:lambda:us-west-2:987654321:function:hello_envoy\"\n      payload_passthrough: true\n\nThe corresponding regional endpoint must be specified in the target cluster. So, for example if the Lambda function is\nin us-west-2:\n\n.. code-block:: yaml\n\n  clusters:\n  - name: lambda_egress_gateway\n    connect_timeout: 0.25s\n    type: LOGICAL_DNS\n    dns_lookup_family: V4_ONLY\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: lambda_egress_gateway\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: lambda.us-west-2.amazonaws.com\n                port_value: 443\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n        sni: \"*.amazonaws.com\"\n\n\nThe filter can also be configured per virtual-host, route or weighted-cluster. In that case, the target cluster *must*\nhave specific Lambda metadata.\n\n.. code-block:: yaml\n\n    weighted_clusters:\n    clusters:\n    - name: lambda_egress_gateway\n      weight: 42\n      typed_per_filter_config:\n        envoy.filters.http.aws_lambda:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.PerRouteConfig\n          invoke_config:\n            arn: \"arn:aws:lambda:us-west-2:987654321:function:hello_envoy\"\n            payload_passthrough: false\n\n\nAn example with the Lambda metadata applied to a weighted-cluster:\n\n.. code-block:: yaml\n\n  clusters:\n  - name: lambda_egress_gateway\n    connect_timeout: 0.25s\n    type: LOGICAL_DNS\n    dns_lookup_family: V4_ONLY\n    lb_policy: ROUND_ROBIN\n    metadata:\n      filter_metadata:\n        com.amazonaws.lambda:\n          egress_gateway: true\n    load_assignment:\n      cluster_name: lambda_egress_gateway # does this have to match? seems redundant\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: lambda.us-west-2.amazonaws.com\n                port_value: 443\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n        sni: \"*.amazonaws.com\"\n\n\nStatistics\n----------\n\nThe AWS Lambda filter outputs statistics in the *http.<stat_prefix>.aws_lambda.* namespace. The\n:ref:`stat prefix <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stat_prefix>`\ncomes from the owning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  server_error, Counter, Total requests that returned invalid JSON response (see :ref:`payload_passthrough <envoy_api_msg_config.filter.http.aws_lambda.v2alpha.config>`)\n  upstream_rq_payload_size, Histogram, Size in bytes of the request after JSON-tranformation (if any).\n\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/aws_request_signing_filter.rst",
    "content": "\n.. _config_http_filters_aws_request_signing:\n\nAWS Request Signing\n===================\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.aws_request_signing.v3.AwsRequestSigning>`\n* This filter should be configured with the name *envoy.filters.http.aws_request_signing*.\n\n.. attention::\n\n  The AWS request signing filter is experimental and is currently under active development.\n\nThe HTTP AWS request signing filter is used to access authenticated AWS services. It uses the\nexisting AWS Credential Provider to get the secrets used for generating the required\nheaders.\n\nExample configuration\n---------------------\n\nExample filter configuration:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.aws_request_signing\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.aws_request_signing.v3.AwsRequestSigning\n    service_name: s3\n    region: us-west-2\n\n\nStatistics\n----------\n\nThe AWS request signing filter outputs statistics in the *http.<stat_prefix>.aws_request_signing.* namespace. The\n:ref:`stat prefix <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>`\ncomes from the owning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  signing_added, Counter, Total authentication headers added to requests\n  signing_failed, Counter, Total requests for which a signature was not added\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/buffer_filter.rst",
    "content": ".. _config_http_filters_buffer:\n\nBuffer\n======\n\nThe buffer filter is used to stop filter iteration and wait for a fully buffered complete request.\nThis is useful in different situations including protecting some applications from having to deal\nwith partial requests and high network latency.\n\nIf enabled the buffer filter populates content-length header if it is not present in the request\nalready. The behavior can be disabled using the runtime feature\n`envoy.reloadable_features.buffer_filter_populate_content_length`.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.buffer.v3.Buffer>`\n* This filter should be configured with the name *envoy.filters.http.buffer*.\n\nPer-Route Configuration\n-----------------------\n\nThe buffer filter configuration can be overridden or disabled on a per-route basis by providing a\n:ref:`BufferPerRoute <envoy_v3_api_msg_extensions.filters.http.buffer.v3.BufferPerRoute>` configuration on\nthe virtual host, route, or weighted cluster.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/cdn_loop_filter.rst",
    "content": ".. _config_http_filters_cdn_loop:\n\nCDN-Loop header\n===============\n\nThe CDN-Loop header filter participates in the cross-CDN loop detection protocol specified by `RFC\n8586 <https://tools.ietf.org/html/rfc8586>`_. The CDN-Loop header filter performs two actions.\nFirst, the filter checks to see how many times a particular CDN identifier has appeared in the\nCDN-Loop header. Next, if the check passes, the filter then appends the CDN identifier to the\nCDN-Loop header and passes the request to the next upstream filter. If the check fails, the filter\nstops processing on the request and returns an error response.\n\nRFC 8586 is particular in how the CDN-Loop header should be modified. As such:\n\n* other filters in the filter chain should not modify the CDN-Loop header and\n* the HTTP route configuration's :ref:`request_headers_to_add\n  <envoy_v3_api_field_config.route.v3.RouteConfiguration.request_headers_to_add>` or\n  :ref:`request_headers_to_remove\n  <envoy_v3_api_field_config.route.v3.RouteConfiguration.request_headers_to_remove>` fields should\n  not contain the CDN-Loop header.\n\nThe filter will coalesce multiple CDN-Loop headers into a single, comma-separated header.\n\nConfiguration\n-------------\n\nThe filter is configured with the name *envoy.filters.http.cdn_loop*.\n\nThe `filter config <config_http_filters_cdn_loop>`_ has two fields.\n\n* The *cdn_id* field sets the identifier that the filter will look for within and append to the\n  CDN-Loop header. RFC 8586 calls this field the \"cdn-id\"; \"cdn-id\" can either be a pseudonym or a\n  hostname the CDN provider has control of. The *cdn_id* field must not be empty.\n* The *max_allowed_occurrences* field controls how many times *cdn_id* can appear in the CDN-Loop\n  header on downstream requests (before the filter appends *cdn_id* to the header). If the *cdn_id*\n  appears more than *max_allowed_occurrences* times in the header, the filter will reject the\n  downstream's request. Most users should configure *max_allowed_occurrences* to be 0 (the\n  default).\n\nResponse Code Details\n---------------------\n\n.. list-table::\n   :header-rows: 1\n\n   * - Name\n     - HTTP Status\n     - Description\n   * - invalid_cdn_loop_header\n     - 400 (Bad Request)\n     - The CDN-Loop header in the downstream is invalid or unparseable.\n   * - cdn_loop_detected\n     - 502 (Bad Gateway)\n     - The *cdn_id* value appears more than *max_allowed_occurrences* in the CDN-Loop header,\n       indicating a loop between CDNs.\n\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/compressor_filter.rst",
    "content": ".. _config_http_filters_compressor:\n\nCompressor\n==========\nCompressor is an HTTP filter which enables Envoy to compress dispatched data\nfrom an upstream service upon client request. Compression is useful in\nsituations when bandwidth is scarce and large payloads can be effectively compressed\nat the expense of higher CPU load or offloading it to a compression accelerator.\n\n.. note::\n\n This filter deprecates the :ref:`HTTP Gzip filter <config_http_filters_gzip>`.\n\nConfiguration\n-------------\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.compressor.v3.Compressor>`\n* This filter should be configured with the name *envoy.filters.http.compressor*.\n\nHow it works\n------------\nWhen compressor filter is enabled, request and response headers are inspected to\ndetermine whether or not the content should be compressed. The content is\ncompressed and then sent to the client with the appropriate headers, if\nresponse and request allow.\n\nCurrently the filter supports :ref:`gzip compression <envoy_v3_api_msg_extensions.compression.gzip.compressor.v3.Gzip>`\nonly. Other compression libraries can be supported as extensions.\n\nAn example configuration of the filter may look like the following:\n\n.. code-block:: yaml\n\n    http_filters:\n    - name: envoy.filters.http.compressor\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor\n        disable_on_etag_header: true\n        content_length: 100\n        content_type:\n          - text/html\n          - application/json\n        compressor_library:\n          name: text_optimized\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\n            memory_level: 3\n            window_bits: 10\n            compression_level: best_compression\n            compression_strategy: default_strategy\n\nBy *default* compression will be *skipped* when:\n\n- A request does NOT contain *accept-encoding* header.\n- A request includes *accept-encoding* header, but it does not contain \"gzip\" or \"\\*\".\n- A request includes *accept-encoding* with \"gzip\" or \"\\*\" with the weight \"q=0\". Note\n  that the \"gzip\" will have a higher weight then \"\\*\". For example, if *accept-encoding*\n  is \"gzip;q=0,\\*;q=1\", the filter will not compress. But if the header is set to\n  \"\\*;q=0,gzip;q=1\", the filter will compress.\n- A request whose *accept-encoding* header includes any encoding type with a higher\n  weight than \"gzip\"'s given the corresponding compression filter is present in the chain.\n- A response contains a *content-encoding* header.\n- A response contains a *cache-control* header whose value includes \"no-transform\".\n- A response contains a *transfer-encoding* header whose value includes \"gzip\".\n- A response does not contain a *content-type* value that matches one of the selected\n  mime-types, which default to *application/javascript*, *application/json*,\n  *application/xhtml+xml*, *image/svg+xml*, *text/css*, *text/html*, *text/plain*,\n  *text/xml*.\n- Neither *content-length* nor *transfer-encoding* headers are present in\n  the response.\n- Response size is smaller than 30 bytes (only applicable when *transfer-encoding*\n  is not chunked).\n\nPlease note that in case the filter is configured to use a compression library extension\nother than gzip it looks for content encoding in the *accept-encoding* header provided by\nthe extension.\n\nWhen compression is *applied*:\n\n- The *content-length* is removed from response headers.\n- Response headers contain \"*transfer-encoding: chunked*\" and do not contain\n  \"*content-encoding*\" header.\n- The \"*vary: accept-encoding*\" header is inserted on every response.\n\nAlso the \"*vary: accept-encoding*\" header may be inserted even if compression is *not*\napplied due to incompatible \"*accept-encoding*\" header in a request. This happens\nwhen the requested resource still can be compressed given compatible \"*accept-encoding*\".\nOtherwise, if an uncompressed response is cached by a caching proxy in front of Envoy,\nthe proxy won't know to fetch a new incoming request with compatible \"*accept-encoding*\"\nfrom upstream.\n\n.. _compressor-statistics:\n\nStatistics\n----------\n\nEvery configured Compressor filter has statistics rooted at\n<stat_prefix>.compressor.<compressor_library.name>.<compressor_library_stat_prefix>.*\nwith the following:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  compressed, Counter, Number of requests compressed.\n  not_compressed, Counter, Number of requests not compressed.\n  no_accept_header, Counter, Number of requests with no accept header sent.\n  header_identity, Counter, Number of requests sent with \"identity\" set as the *accept-encoding*.\n  header_compressor_used, Counter, Number of requests sent with \"gzip\" set as the *accept-encoding*.\n  header_compressor_overshadowed, Counter, Number of requests skipped by this filter instance because they were handled by another filter in the same filter chain.\n  header_wildcard, Counter, Number of requests sent with \"\\*\" set as the *accept-encoding*.\n  header_not_valid, Counter, Number of requests sent with a not valid *accept-encoding* header (aka \"q=0\" or an unsupported encoding type).\n  total_uncompressed_bytes, Counter, The total uncompressed bytes of all the requests that were marked for compression.\n  total_compressed_bytes, Counter, The total compressed bytes of all the requests that were marked for compression.\n  content_length_too_small, Counter, Number of requests that accepted gzip encoding but did not compress because the payload was too small.\n  not_compressed_etag, Counter, Number of requests that were not compressed due to the etag header. *disable_on_etag_header* must be turned on for this to happen.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/cors_filter.rst",
    "content": ".. _config_http_filters_cors:\n\nCORS\n====\n\nThis is a filter which handles Cross-Origin Resource Sharing requests based on route or virtual host settings.\nFor the meaning of the headers please refer to the pages below.\n\n* https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS\n* https://www.w3.org/TR/cors/\n* :ref:`v2 API reference <envoy_v3_api_msg_config.route.v3.CorsPolicy>`\n* This filter should be configured with the name *envoy.filters.http.cors*.\n\n.. _cors-runtime:\n\nRuntime\n-------\nThe fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key\n<envoy_v3_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` value of the :ref:`filter_enabled\n<envoy_v3_api_field_config.route.v3.CorsPolicy.filter_enabled>` field.\n\nThe fraction of requests for which the filter is enabled in shadow-only mode can be configured via\nthe :ref:`runtime_key <envoy_v3_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` value of the\n:ref:`shadow_enabled <envoy_v3_api_field_config.route.v3.CorsPolicy.shadow_enabled>` field. When enabled in\nshadow-only mode, the filter will evaluate the request's *Origin* to determine if it's valid but\nwill not enforce any policies.\n\n.. note::\n\n  If both ``filter_enabled`` and ``shadow_enabled`` are on, the ``filter_enabled``\n  flag will take precedence.\n\n.. _cors-statistics:\n\nStatistics\n----------\n\nThe CORS filter outputs statistics in the <stat_prefix>.cors.* namespace.\n\n.. note::\n  Requests that do not have an Origin header will be omitted from statistics.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  origin_valid, Counter, Number of requests that have a valid Origin header.\n  origin_invalid, Counter, Number of requests that have an invalid Origin header.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/csrf_filter.rst",
    "content": ".. _config_http_filters_csrf:\n\nCSRF\n====\n\nThis is a filter which prevents Cross-Site Request Forgery based on a route or virtual host settings.\nAt it's simplest, CSRF is an attack that occurs when a malicious third-party\nexploits a vulnerability that allows them to submit an undesired request on the\nuser's behalf.\n\nA real-life example is cited in section 1 of `Robust Defenses for Cross-Site Request Forgery <https://seclab.stanford.edu/websec/csrf/csrf.pdf>`_:\n\n    \"For example, in late 2007 [42], Gmail had a CSRF vulnerability. When a Gmail user visited\n    a malicious site, the malicious site could generate a request to Gmail that Gmail treated\n    as part of its ongoing session with the victim. In November 2007, a web attacker exploited\n    this CSRF vulnerability to inject an email filter into David Airey’s Gmail account [1].\"\n\nThere are many ways to mitigate CSRF, some of which have been outlined in the\n`OWASP Prevention Cheat Sheet <https://github.com/OWASP/CheatSheetSeries/blob/5a1044e38778b42a19c6adbb4dfef7a0fb071099/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.md>`_.\nThis filter employs a stateless mitigation pattern known as origin verification.\n\nThis pattern relies on two pieces of information used in determining if\na request originated from the same host.\n* The origin that caused the user agent to issue the request (source origin).\n* The origin that the request is going to (target origin).\n\nWhen the filter is evaluating a request, it ensures both pieces of information are present\nand compares their values. If the source origin is missing or the origins do not match\nthe request is rejected. The exception to this being if the source origin has been\nadded to the policy as valid. Because CSRF attacks specifically target state-changing\nrequests, the filter only acts on HTTP requests that have a state-changing method\n(POST, PUT, etc.).\n\n  .. note::\n    Due to differing functionality between browsers this filter will determine\n    a request's source origin from the Origin header. If that is not present it will\n    fall back to the host and port value from the requests Referer header.\n\n\nFor more information on CSRF please refer to the pages below.\n\n* https://www.owasp.org/index.php/Cross-Site_Request_Forgery_%28CSRF%29\n* https://seclab.stanford.edu/websec/csrf/csrf.pdf\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.csrf.v3.CsrfPolicy>`\n\n  .. note::\n\n    This filter should be configured with the name *envoy.filters.http.csrf*.\n\n.. _csrf-configuration:\n\nConfiguration\n-------------\n\nThe CSRF filter supports the ability to extend the source origins it will consider\nvalid. The reason it is able to do this while still mitigating cross-site request\nforgery attempts is because the target origin has already been reached by the time\nfront-envoy is applying the filter. This means that while endpoints may support\ncross-origin requests they are still protected from malicious third-parties who\nhave not been allowlisted.\n\nIt's important to note that requests should generally originate from the same\norigin as the target but there are use cases where that may not be possible.\nFor example, if you are hosting a static site on a third-party vendor but need\nto make requests for tracking purposes.\n\n.. warning::\n\n  Additional origins can be either an exact string, regex pattern, prefix string,\n  or suffix string. It's advised to be cautious when adding regex, prefix, or suffix\n  origins since an ambiguous origin can pose a security vulnerability.\n\n.. _csrf-runtime:\n\nRuntime\n-------\n\nThe fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key\n<envoy_v3_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` value of the :ref:`filter_enabled\n<envoy_v3_api_field_extensions.filters.http.csrf.v3.CsrfPolicy.filter_enabled>` field.\n\nThe fraction of requests for which the filter is enabled in shadow-only mode can be configured via\nthe :ref:`runtime_key <envoy_v3_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` value of the\n:ref:`shadow_enabled <envoy_v3_api_field_extensions.filters.http.csrf.v3.CsrfPolicy.shadow_enabled>` field.\nWhen enabled in shadow-only mode, the filter will evaluate the request's *Origin* and *Destination*\nto determine if it's valid but will not enforce any policies.\n\n.. note::\n\n  If both ``filter_enabled`` and ``shadow_enabled`` are on, the ``filter_enabled``\n  flag will take precedence.\n\n.. _csrf-statistics:\n\nStatistics\n----------\n\nThe CSRF filter outputs statistics in the <stat_prefix>.csrf.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  missing_source_origin, Counter, Number of requests that are missing a source origin header.\n  request_invalid, Counter, Number of requests whose source and target origins do not match.\n  request_valid, Counter, Number of requests whose source and target origins match.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/decompressor_filter.rst",
    "content": ".. _config_http_filters_decompressor:\n\nDecompressor\n============\nDecompressor is an HTTP filter which enables Envoy to bidirectionally decompress data.\n\n\nConfiguration\n-------------\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.decompressor.v3.Decompressor>`\n\nHow it works\n------------\nWhen the decompressor filter is enabled, headers are inspected to\ndetermine whether or not the content should be decompressed. The content is\ndecompressed and passed on to the rest of the filter chain. Note that decompression happens\nindependently for request and responses based on the rules described below.\n\nCurrently the filter supports :ref:`gzip compression <envoy_v3_api_msg_extensions.compression.gzip.decompressor.v3.Gzip>`\nonly. Other compression libraries can be supported as extensions.\n\nAn example configuration of the filter may look like the following:\n\n.. code-block:: yaml\n\n    http_filters:\n    - name: decompressor\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor\n        decompressor_library:\n          name: basic\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\n            window_bits: 10\n\nBy *default* decompression will be *skipped* when:\n\n- A request/response does NOT contain *content-encoding* header.\n- A request/response includes *content-encoding* header, but it does not contain the configured\n  decompressor's content-encoding.\n- A request/response contains a *cache-control* header whose value includes \"no-transform\".\n\nWhen decompression is *applied*:\n\n- The *content-length* is removed from headers.\n\n  .. note::\n\n    If an updated *content-length* header is desired, the buffer filter can be installed as part\n    of the filter chain to buffer decompressed frames, and ultimately update the header. Due to\n    :ref:`filter ordering <arch_overview_http_filters_ordering>` a buffer filter needs to be\n    installed after the decompressor for requests and prior to the decompressor for responses.\n\n- The *content-encoding* header is modified to remove the decompression that was applied.\n\n- *x-envoy-decompressor-<decompressor_name>-<compressed/uncompressed>-bytes* trailers are added to\n  the request/response to relay information about decompression.\n\n.. _decompressor-statistics:\n\nUsing different decompressors for requests and responses\n--------------------------------------------------------\n\nIf different compression libraries are desired for requests and responses, it is possible to install\nmultiple decompressor filters enabled only for requests or responses. For instance:\n\n.. code-block:: yaml\n\n  http_filters:\n  # This filter is only enabled for requests.\n  - name: envoy.filters.http.decompressor\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor\n      decompressor_library:\n        name: small\n        typed_config:\n          \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\n          window_bits: 9\n          chunk_size: 8192\n      response_direction_config:\n        common_config:\n          enabled:\n            default_value: false\n            runtime_key: response_decompressor_enabled\n  # This filter is only enabled for responses.\n  - name: envoy.filters.http.decompressor\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor\n      decompressor_library:\n        name: large\n        typed_config:\n          \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\n          window_bits: 12\n          chunk_size: 16384\n      request_direction_config:\n        common_config:\n          enabled:\n            default_value: false\n            runtime_key: request_decompressor_enabled\n\nStatistics\n----------\n\nEvery configured Deompressor filter has statistics rooted at\n<stat_prefix>.decompressor.<decompressor_library.name>.<decompressor_library_stat_prefix>.<request/response>*\nwith the following:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  decompressed, Counter, Number of request/responses compressed.\n  not_decompressed, Counter, Number of request/responses not compressed.\n  total_uncompressed_bytes, Counter, The total uncompressed bytes of all the request/responses that were marked for decompression.\n  total_compressed_bytes, Counter, The total compressed bytes of all the request/responses that were marked for decompression.\n\nAdditional stats for the decompressor library are rooted at\n<stat_prefix>.decompressor.<decompressor_library.name>.<decompressor_library_stat_prefix>.decompressor_library.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst",
    "content": ".. _config_http_filters_dynamic_forward_proxy:\n\nDynamic forward proxy\n=====================\n\n* HTTP dynamic forward proxy :ref:`architecture overview <arch_overview_http_dynamic_forward_proxy>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig>`\n* This filter should be configured with the name *envoy.filters.http.dynamic_forward_proxy*\n\nThe following is a complete configuration that configures both the\n:ref:`dynamic forward proxy HTTP filter\n<envoy_v3_api_msg_extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig>`\nas well as the :ref:`dynamic forward proxy cluster\n<envoy_v3_api_msg_extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig>`. Both filter and cluster\nmust be configured together and point to the same DNS cache parameters for Envoy to operate as an\nHTTP dynamic forward proxy.\n\nThis filter supports :ref:`host rewrite <envoy_v3_api_msg_extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig>`\nvia the :ref:`virtual host's typed_per_filter_config <envoy_v3_api_field_config.route.v3.VirtualHost.typed_per_filter_config>` or the\n:ref:`route's typed_per_filter_config <envoy_v3_api_field_config.route.v3.Route.typed_per_filter_config>`. This can be used to rewrite\nthe host header with the provided value before DNS lookup, thus allowing to route traffic to the rewritten\nhost when forwarding. See the example below within the configured routes.\n\n.. note::\n\n  Configuring a :ref:`transport_socket with name envoy.transport_sockets.tls <envoy_v3_api_field_config.cluster.v3.Cluster.transport_socket>` on the cluster with\n  *trusted_ca* certificates instructs Envoy to use TLS when connecting to upstream hosts and verify\n  the certificate chain. Additionally, Envoy will automatically perform SAN verification for the\n  resolved host name as well as specify the host name via SNI.\n\n.. _dns_cache_circuit_breakers:\n\n  Dynamic forward proxy uses circuit breakers built in to the DNS cache with the configuration\n  of :ref:`DNS cache circuit breakers <envoy_v3_api_field_extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig.dns_cache_circuit_breaker>`. By default, this behavior is enabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`.\n  If this runtime feature is disabled, cluster circuit breakers will be used even when setting the configuration\n  of :ref:`DNS cache circuit breakers <envoy_v3_api_field_extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig.dns_cache_circuit_breaker>`.\n\n.. literalinclude:: _include/dns-cache-circuit-breaker.yaml\n    :language: yaml\n\nStatistics\n----------\n\nThe dynamic forward proxy DNS cache outputs statistics in the dns_cache.<dns_cache_name>.*\nnamespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  dns_query_attempt, Counter, Number of DNS query attempts.\n  dns_query_success, Counter, Number of DNS query successes.\n  dns_query_failure, Counter, Number of DNS query failures.\n  host_address_changed, Counter, Number of DNS queries that resulted in a host address change.\n  host_added, Counter, Number of hosts that have been added to the cache.\n  host_removed, Counter, Number of hosts that have been removed from the cache.\n  num_hosts, Gauge, Number of hosts that are currently in the cache.\n  dns_rq_pending_overflow, Counter, Number of dns pending request overflow.\n\nThe dynamic forward proxy DNS cache circuit breakers outputs statistics in the dns_cache.<dns_cache_name>.circuit_breakers*\nnamespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  rq_pending_open, Gauge, Whether the requests circuit breaker is closed (0) or open (1)\n  rq_pending_remaining, Gauge, Number of remaining requests until the circuit breaker opens\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/dynamodb_filter.rst",
    "content": ".. _config_http_filters_dynamo:\n\nDynamoDB\n========\n\n* DynamoDB :ref:`architecture overview <arch_overview_dynamo>`\n* :ref:`v3 API reference <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`\n* This filter should be configured with the name *envoy.filters.http.dynamo*.\n\nStatistics\n----------\n\nThe DynamoDB filter outputs statistics in the *http.<stat_prefix>.dynamodb.* namespace. The :ref:`stat prefix\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>` comes from the\nowning HTTP connection manager.\n\nPer operation stats can be found in the *http.<stat_prefix>.dynamodb.operation.<operation_name>.*\nnamespace.\n\n  .. csv-table::\n    :header: Name, Type, Description\n    :widths: 1, 1, 2\n\n    upstream_rq_total, Counter, Total number of requests with <operation_name>\n    upstream_rq_time, Histogram, Time spent on <operation_name>\n    upstream_rq_total_xxx, Counter, Total number of requests with <operation_name> per response code (503/2xx/etc)\n    upstream_rq_time_xxx, Histogram, Time spent on <operation_name> per response code (400/3xx/etc)\n\nPer table stats can be found in the *http.<stat_prefix>.dynamodb.table.<table_name>.* namespace.\nMost of the operations to DynamoDB involve a single table, but BatchGetItem and BatchWriteItem can\ninclude several tables, Envoy tracks per table stats in this case only if it is the same table used\nin all operations from the batch.\n\n  .. csv-table::\n    :header: Name, Type, Description\n    :widths: 1, 1, 2\n\n    upstream_rq_total, Counter, Total number of requests on <table_name> table\n    upstream_rq_time, Histogram, Time spent on <table_name> table\n    upstream_rq_total_xxx, Counter, Total number of requests on <table_name> table per response code (503/2xx/etc)\n    upstream_rq_time_xxx, Histogram, Time spent on <table_name> table per response code (400/3xx/etc)\n\n*Disclaimer: Please note that this is a pre-release Amazon DynamoDB feature that is not yet widely available.*\nPer partition and operation stats can be found in the *http.<stat_prefix>.dynamodb.table.<table_name>.*\nnamespace. For batch operations, Envoy tracks per partition and operation stats only if it is the same\ntable used in all operations.\n\n  .. csv-table::\n    :header: Name, Type, Description\n    :widths: 1, 1, 2\n\n    capacity.<operation_name>.__partition_id=<last_seven_characters_from_partition_id>, Counter, Total number of capacity for <operation_name> on <table_name> table for a given <partition_id>\n\nAdditional detailed stats:\n\n* For 4xx responses and partial batch operation failures, the total number of failures for a given\n  table and failure are tracked in the *http.<stat_prefix>.dynamodb.error.<table_name>.* namespace.\n\n  .. csv-table::\n    :header: Name, Type, Description\n    :widths: 1, 1, 2\n\n    <error_type>, Counter, Total number of specific <error_type> for a given <table_name>\n    BatchFailureUnprocessedKeys, Counter, Total number of partial batch failures for a given <table_name>\n\nRuntime\n-------\n\nThe DynamoDB filter supports the following runtime settings:\n\ndynamodb.filter_enabled\n  The % of requests for which the filter is enabled. Default is 100%.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/ext_authz_filter.rst",
    "content": ".. _config_http_filters_ext_authz:\n\nExternal Authorization\n======================\n* External authorization :ref:`architecture overview <arch_overview_ext_authz>`\n* :ref:`HTTP filter v3 API reference <envoy_v3_api_msg_extensions.filters.http.ext_authz.v3.ExtAuthz>`\n* This filter should be configured with the name *envoy.filters.http.ext_authz*.\n\nThe external authorization filter calls an external gRPC or HTTP service to check whether an incoming\nHTTP request is authorized or not.\nIf the request is deemed unauthorized, then the request will be denied normally with 403 (Forbidden) response.\nNote that sending additional custom metadata from the authorization service to the upstream, to the downstream or to the authorization service is\nalso possible. This is explained in more details at :ref:`HTTP filter <envoy_v3_api_msg_extensions.filters.http.ext_authz.v3.ExtAuthz>`.\n\nThe content of the requests that are passed to an authorization service is specified by\n:ref:`CheckRequest <envoy_v3_api_msg_service.auth.v3.CheckRequest>`.\n\n.. _config_http_filters_ext_authz_http_configuration:\n\nThe HTTP filter, using a gRPC/HTTP service, can be configured as follows. You can see all the\nconfiguration options at\n:ref:`HTTP filter <envoy_v3_api_msg_extensions.filters.http.ext_authz.v3.ExtAuthz>`.\n\nConfiguration Examples\n----------------------\n\nA sample filter configuration for a gRPC authorization server:\n\n.. code-block:: yaml\n\n  http_filters:\n    - name: envoy.filters.http.ext_authz\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n        grpc_service:\n          envoy_grpc:\n            cluster_name: ext-authz\n\n          # Default is 200ms; override if your server needs e.g. warmup time.\n          timeout: 0.5s\n        include_peer_certificate: true\n\n.. code-block:: yaml\n\n  clusters:\n    - name: ext-authz\n      type: static\n      http2_protocol_options: {}\n      load_assignment:\n        cluster_name: ext-authz\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 10003\n\n      # This timeout controls the initial TCP handshake timeout - not the timeout for the\n      # entire request.\n      connect_timeout: 0.25s\n\n.. note::\n\n  One of the features of this filter is to send HTTP request body to the configured gRPC\n  authorization server as part of the :ref:`check request\n  <envoy_v3_api_msg_service.auth.v3.CheckRequest>`.\n\n  A sample configuration is as follows:\n\n  .. code:: yaml\n\n    http_filters:\n      - name: envoy.filters.http.ext_authz\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n          grpc_service:\n            envoy_grpc:\n              cluster_name: ext-authz\n          with_request_body:\n            max_request_bytes: 1024\n            allow_partial_message: true\n            pack_as_bytes: true\n\n  Please note that by default :ref:`check request<envoy_v3_api_msg_service.auth.v3.CheckRequest>`\n  carries the HTTP request body as UTF-8 string and it fills the :ref:`body\n  <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` field. To pack the request\n  body as raw bytes, it is needed to set :ref:`pack_as_bytes\n  <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.BufferSettings.pack_as_bytes>` field to\n  true. In effect to that, the :ref:`raw_body\n  <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.raw_body>`\n  field will be set and :ref:`body\n  <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` field will be empty.\n\nA sample filter configuration for a raw HTTP authorization server:\n\n.. code-block:: yaml\n\n  http_filters:\n    - name: envoy.filters.http.ext_authz\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n        http_service:\n            server_uri:\n              uri: 127.0.0.1:10003\n              cluster: ext-authz\n              timeout: 0.25s\n              failure_mode_allow: false\n        include_peer_certificate: true\n\n.. code-block:: yaml\n\n  clusters:\n    - name: ext-authz\n      connect_timeout: 0.25s\n      type: logical_dns\n      lb_policy: round_robin\n      load_assignment:\n        cluster_name: ext-authz\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 10003\n\nPer-Route Configuration\n-----------------------\n\nA sample virtual host and route filter configuration.\nIn this example we add additional context on the virtual host, and disabled the filter for `/static` prefixed routes.\n\n.. code-block:: yaml\n\n  route_config:\n    name: local_route\n    virtual_hosts:\n    - name: local_service\n      domains: [\"*\"]\n      typed_per_filter_config:\n        envoy.filters.http.ext_authz:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n          check_settings:\n            context_extensions:\n              virtual_host: local_service\n      routes:\n      - match: { prefix: \"/static\" }\n        route: { cluster: some_service }\n        typed_per_filter_config:\n          envoy.filters.http.ext_authz:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n            disabled: true\n      - match: { prefix: \"/\" }\n        route: { cluster: some_service }\n\nStatistics\n----------\n.. _config_http_filters_ext_authz_stats:\n\nThe HTTP filter outputs statistics in the *cluster.<route target cluster>.ext_authz.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  ok, Counter, Total responses from the filter.\n  error, Counter, Total errors (including timeouts) contacting the external service.\n  timeout, Counter, Total timeouts contacting the external service (only counted when timeout is measured when check request is created).\n  denied, Counter, Total responses from the authorizations service that were to deny the traffic.\n  disabled, Counter, Total requests that are allowed without calling external services due to the filter is disabled.\n  failure_mode_allowed, Counter, \"Total requests that were error(s) but were allowed through because\n  of failure_mode_allow set to true.\"\n\nDynamic Metadata\n----------------\n.. _config_http_filters_ext_authz_dynamic_metadata:\n\n.. note::\n\n  The External Authorization filter emits dynamic metadata only when it is configured to use\n  gRPC service as the authorization server.\n\nThe External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct``\n*only* when the gRPC authorization server returns a :ref:`CheckResponse\n<envoy_v3_api_msg_service.auth.v3.CheckResponse>` with a filled :ref:`dynamic_metadata\n<envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>` field.\n\nRuntime\n-------\nThe fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key\n<envoy_v3_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` value of the :ref:`filter_enabled\n<envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.filter_enabled>` field.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/fault_filter.rst",
    "content": ".. _config_http_filters_fault_injection:\n\nFault Injection\n===============\n\nThe fault injection filter can be used to test the resiliency of\nmicroservices to different forms of failures. The filter can be used to\ninject delays and abort requests with user-specified error codes, thereby\nproviding the ability to stage different failure scenarios such as service\nfailures, service overloads, high network latency, network partitions,\netc. Faults injection can be limited to a specific set of requests based on\nthe (destination) upstream cluster of a request and/or a set of pre-defined\nrequest headers.\n\nThe scope of failures is restricted to those that are observable by an\napplication communicating over the network. CPU and disk failures on the\nlocal host cannot be emulated.\n\nConfiguration\n-------------\n\n.. note::\n\n  The fault injection filter must be inserted before any other filter,\n  including the router filter.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.fault.v3.HTTPFault>`\n* This filter should be configured with the name *envoy.filters.http.fault*.\n\n.. _config_http_filters_fault_injection_http_header:\n\nControlling fault injection via HTTP headers\n--------------------------------------------\n\nThe fault filter has the capability to allow fault configuration to be specified by the caller.\nThis is useful in certain scenarios in which it is desired to allow the client to specify its own\nfault configuration. The currently supported header controls are:\n\nx-envoy-fault-abort-request\n  HTTP status code to abort a request with. The header value should be an integer that specifies\n  the HTTP status code to return in response to a request and must be in the range [200, 600). \n  In order for the header to work, :ref:`header_abort\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.FaultAbort.header_abort>` needs to be set.\n\nx-envoy-fault-abort-grpc-request\n  gRPC status code to abort a request with. The header value should be a non-negative integer that specifies\n  the gRPC status code to return in response to a request. Its value range is [0, UInt32.Max] instead of [0, 16]\n  to allow testing even not well-defined gRPC status codes. When this header is set, the HTTP response status code\n  will be set to 200. In order for the header to work, :ref:`header_abort\n  <envoy_api_field_config.filter.http.fault.v2.FaultAbort.header_abort>` needs to be set. If both \n  *x-envoy-fault-abort-request* and *x-envoy-fault-abort-grpc-request* headers are set then \n  *x-envoy-fault-abort-grpc-request* header will be **ignored** and fault response http status code will be\n  set to *x-envoy-fault-abort-request* header value.\n\nx-envoy-fault-abort-request-percentage\n  The percentage of requests that should be failed with a status code that's defined\n  by the value of *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP headers.\n  The header value should be an integer that specifies the numerator of the percentage of request to apply aborts\n  to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of\n  :ref:`percentage <envoy_v3_api_field_extensions.filters.http.fault.v3.FaultAbort.percentage>` field.\n  Percentage's denominator is equal to default percentage's denominator\n  :ref:`percentage <envoy_v3_api_field_extensions.filters.http.fault.v3.FaultAbort.percentage>` field.\n  In order for the header to work, :ref:`header_abort\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.FaultAbort.header_abort>` needs to be set and\n  either *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP header needs to be a part of the request.\n\nx-envoy-fault-delay-request\n  The duration to delay a request by. The header value should be an integer that specifies the number\n  of milliseconds to throttle the latency for. In order for the header to work, :ref:`header_delay\n  <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultDelay.header_delay>` needs to be set.\n\nx-envoy-fault-delay-request-percentage\n  The percentage of requests that should be delayed by a duration that's defined by the value of\n  *x-envoy-fault-delay-request* HTTP header. The header value should be an integer that\n  specifies the percentage of request to apply delays to and must be greater\n  or equal to 0 and its maximum value is capped by the value of the numerator of\n  :ref:`percentage <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultDelay.percentage>` field.\n  Percentage's denominator is equal to default percentage's denominator\n  :ref:`percentage <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultDelay.percentage>` field.\n  In order for the header to work, :ref:`header_delay\n  <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultDelay.header_delay>` needs to be set and\n  *x-envoy-fault-delay-request* HTTP header needs to be a part of a request.\n\nx-envoy-fault-throughput-response\n  The rate limit to use when a response to a caller is sent. The header value should be an integer\n  that specifies the limit in KiB/s and must be > 0. In order for the header to work, :ref:`header_limit\n  <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultRateLimit.header_limit>` needs to be set.\n\nx-envoy-fault-throughput-response-percentage\n  The percentage of requests whose response rate should be limited to the value of\n  *x-envoy-fault-throughput-response* HTTP header. The header value should be an integer that\n  specifies the percentage of request to apply delays to and must be greater\n  or equal to 0 and its maximum value is capped by the value of the numerator of\n  :ref:`percentage <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultRateLimit.percentage>` field.\n  Percentage's denominator is equal to default percentage's denominator\n  :ref:`percentage <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultRateLimit.percentage>` field.\n  In order for the header to work, :ref:`header_limit\n  <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultRateLimit.header_limit>` needs to be set and\n  *x-envoy-fault-delay-request* HTTP header needs to be a part of a request.\n\n.. attention::\n\n  Allowing header control is inherently dangerous if exposed to untrusted clients. In this case,\n  it is suggested to use the :ref:`max_active_faults\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.max_active_faults>` setting to limit the\n  maximum concurrent faults that can be active at any given time.\n\nThe following is an example configuration that enables header control for both of the above\noptions:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.fault\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault\n    max_active_faults: 100\n    abort:\n      header_abort: {}\n      percentage:\n        numerator: 100\n    delay:\n      header_delay: {}\n      percentage:\n        numerator: 100\n    response_rate_limit:\n      header_limit: {}\n      percentage:\n        numerator: 100\n\n.. _config_http_filters_fault_injection_runtime:\n\nRuntime\n-------\n\nThe HTTP fault injection filter supports the following global runtime settings:\n\n.. attention::\n\n  Some of the following runtime keys require the filter to be configured for the specific fault\n  type and some do not. Please consult the documentation for each key for more information.\n\nfault.http.abort.abort_percent\n  % of requests that will be aborted if the headers match. Defaults to the\n  *abort_percent* specified in config. If the config does not contain an\n  *abort* block, then *abort_percent* defaults to 0. For historic reasons, this runtime key is\n  available regardless of whether the filter is :ref:`configured for abort\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.abort>`.\n\nfault.http.abort.http_status\n  HTTP status code that will be used as the response status code of requests that will be\n  aborted if the headers match. Defaults to the HTTP status code specified\n  in the config. If the config does not contain an *abort* block, then\n  *http_status* defaults to 0. For historic reasons, this runtime key is\n  available regardless of whether the filter is :ref:`configured for abort\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.abort>`.\n\nfault.http.abort.grpc_status\n  gRPC status code that will be used as the response status code of requests that will be\n  aborted if the headers match. Defaults to the gRPC status code specified in the config.\n  If this field is missing from both the runtime and the config, gRPC status code in the response\n  will be derived from *fault.http.abort.http_status* field. This runtime key is only available when\n  the filter is :ref:`configured for abort <envoy_api_field_config.filter.http.fault.v2.HTTPFault.abort>`.\n\nfault.http.delay.fixed_delay_percent\n  % of requests that will be delayed if the headers match. Defaults to the\n  *delay_percent* specified in the config or 0 otherwise. This runtime key is only available when\n  the filter is :ref:`configured for delay\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.delay>`.\n\nfault.http.delay.fixed_duration_ms\n  The delay duration in milliseconds. If not specified, the\n  *fixed_duration_ms* specified in the config will be used. If this field\n  is missing from both the runtime and the config, no delays will be\n  injected. This runtime key is only available when the filter is :ref:`configured for delay\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.delay>`.\n\nfault.http.max_active_faults\n  The maximum number of active faults (of all types) that Envoy will will inject via the fault\n  filter. This can be used in cases where it is desired that faults are 100% injected,\n  but the user wants to avoid a situation in which too many unexpected concurrent faulting requests\n  cause resource constraint issues. If not specified, the :ref:`max_active_faults\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.max_active_faults>` setting will be used.\n\nfault.http.rate_limit.response_percent\n  % of requests which will have a response rate limit fault injected. Defaults to the value set in\n  the :ref:`percentage <envoy_v3_api_field_extensions.filters.common.fault.v3.FaultRateLimit.percentage>` field.\n  This runtime key is only available when the filter is :ref:`configured for response rate limiting\n  <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.response_rate_limit>`.\n\n*Note*, fault filter runtime settings for the specific downstream cluster\noverride the default ones if present. The following are downstream specific\nruntime keys:\n\n* fault.http.<downstream-cluster>.abort.abort_percent\n* fault.http.<downstream-cluster>.abort.http_status\n* fault.http.<downstream-cluster>.delay.fixed_delay_percent\n* fault.http.<downstream-cluster>.delay.fixed_duration_ms\n\nDownstream cluster name is taken from\n:ref:`the HTTP x-envoy-downstream-service-cluster <config_http_conn_man_headers_downstream-service-cluster>`\nheader. If the following settings are not found in the runtime it defaults to the global runtime settings\nwhich defaults to the config settings.\n\n.. _config_http_filters_fault_injection_stats:\n\nStatistics\n----------\n\nThe fault filter outputs statistics in the *http.<stat_prefix>.fault.* namespace. The :ref:`stat prefix\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>` comes from the\nowning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  delays_injected, Counter, Total requests that were delayed\n  aborts_injected, Counter, Total requests that were aborted\n  response_rl_injected, Counter, \"Total requests that had a response rate limit selected for injection (actually injection may not occur due to disconnect, reset, no body, etc.)\"\n  faults_overflow, Counter, Total number of faults that were not injected due to overflowing the :ref:`max_active_faults <envoy_v3_api_field_extensions.filters.http.fault.v3.HTTPFault.max_active_faults>` setting\n  active_faults, Gauge, Total number of faults active at the current time\n  <downstream-cluster>.delays_injected, Counter, Total delayed requests for the given downstream cluster\n  <downstream-cluster>.aborts_injected, Counter, Total aborted requests for the given downstream cluster\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst",
    "content": ".. _config_http_filters_grpc_bridge:\n\ngRPC HTTP/1.1 bridge\n====================\n\n* gRPC :ref:`architecture overview <arch_overview_grpc>`\n* :ref:`v3 API reference <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`\n* This filter should be configured with the name *envoy.filters.http.grpc_http1_bridge*.\n\nThis is a simple filter which enables the bridging of an HTTP/1.1 client which does not support\nresponse trailers to a compliant gRPC server. It works by doing the following:\n\n* When a request is sent, the filter sees if the connection is HTTP/1.1 and the request content type\n  is *application/grpc*.\n* If so, when the response is received, the filter buffers it and waits for trailers and then checks the\n  *grpc-status* code. If it is not zero, the filter switches the HTTP response code to 503. It also copies\n  the *grpc-status* and *grpc-message* trailers into the response headers so that the client can look\n  at them if it wishes.\n* The client should send HTTP/1.1 requests that translate to the following pseudo headers:\n\n  * *\\:method*: POST\n  * *\\:path*: <gRPC-METHOD-NAME>\n  * *content-type*: application/grpc\n\n* The body should be the serialized grpc body which is:\n\n  * 1 byte of zero (not compressed).\n  * network order 4 bytes of proto message length.\n  * serialized proto message.\n\n* Because this scheme must buffer the response to look for the *grpc-status* trailer it will only\n  work with unary gRPC APIs.\n\nThis filter also collects stats for all gRPC requests that transit, even if those requests are\nnormal gRPC requests over HTTP/2.\n\nMore info: wire format in `gRPC over HTTP/2 <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_.\n\n.. attention::\n\n   Note that statistics are also collected by the dedicated :ref:`gRPC stats filter\n   <config_http_filters_grpc_stats>`. The use of this filter for gRPC telemetry\n   has been deprecated.\n\nStatistics\n----------\n\nThe filter emits statistics in the *cluster.<route target cluster>.grpc.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  <grpc service>.<grpc method>.success, Counter, Total successful service/method calls\n  <grpc service>.<grpc method>.failure, Counter, Total failed service/method calls\n  <grpc service>.<grpc method>.total, Counter, Total service/method calls\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst",
    "content": ".. _config_http_filters_grpc_http1_reverse_bridge:\n\ngRPC HTTP/1.1 reverse bridge\n============================\n\n* gRPC :ref:`architecture overview <arch_overview_grpc>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig>`\n* This filter should be configured with the name *envoy.filters.http.grpc_http1_reverse_bridge*.\n\nThis is a filter that enables converting an incoming gRPC request into a HTTP/1.1 request to allow\na server that does not understand HTTP/2 or gRPC semantics to handle the request.\n\nThe filter works by:\n\n* Checking the content type of the incoming request. If it's a gRPC request, the filter is enabled.\n* The content type is modified to a configurable value. This can be a noop by configuring\n  ``application/grpc``.\n* The gRPC frame header is optionally stripped from the request body. The content length header\n  will be adjusted if so.\n* On receiving a response, the content type of the response is validated and the status code is\n  mapped to a grpc-status which is inserted into the response trailers.\n* The response body is optionally prefixed by the gRPC frame header, again adjusting the content\n  length header if necessary.\n\nDue to being mapped to HTTP/1.1, this filter will only work with unary gRPC calls.\n\ngRPC frame header management\n----------------------------\n\nBy setting the withhold_grpc_frame option, the filter will assume that the upstream does not\nunderstand any gRPC semantics and will convert the request body into a simple binary encoding\nof the request body and perform the reverse conversion on the response body. This ends up\nsimplifying the server side handling of these requests, as they no longer need to be concerned\nwith parsing and generating gRPC formatted data.\n\nThis works by stripping the gRPC frame header from the request body, while injecting a gRPC\nframe header in the response.\n\nIf this feature is not used, the upstream must be ready to receive HTTP/1.1 requests prefixed\nwith the gRPC frame header and respond with gRPC formatted responses.\n\nHow to disable HTTP/1.1 reverse bridge filter per route\n-------------------------------------------------------\n\n.. literalinclude:: _include/grpc-reverse-bridge-filter.yaml\n    :language: yaml\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst",
    "content": ".. _config_http_filters_grpc_json_transcoder:\n\ngRPC-JSON transcoder\n====================\n\n* gRPC :ref:`architecture overview <arch_overview_grpc>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder>`\n* This filter should be configured with the name *envoy.filters.http.grpc_json_transcoder*.\n\nThis is a filter which allows a RESTful JSON API client to send requests to Envoy over HTTP\nand get proxied to a gRPC service. The HTTP mapping for the gRPC service has to be defined by\n`custom options <https://cloud.google.com/service-management/reference/rpc/google.api#http>`_.\n\nJSON mapping\n------------\n\nThe protobuf to JSON mapping is defined `here <https://developers.google.com/protocol-buffers/docs/proto3#json>`_. For\ngRPC stream request parameters, Envoy expects an array of messages, and it returns an array of messages for stream\nresponse parameters.\n\n.. _config_grpc_json_generate_proto_descriptor_set:\n\nHow to generate proto descriptor set\n------------------------------------\n\nEnvoy has to know the proto descriptor of your gRPC service in order to do the transcoding.\n\nTo generate a protobuf descriptor set for the gRPC service, you'll also need to clone the\ngoogleapis repository from GitHub before running protoc, as you'll need annotations.proto\nin your include path, to define the HTTP mapping.\n\n.. code-block:: bash\n\n  git clone https://github.com/googleapis/googleapis\n  GOOGLEAPIS_DIR=<your-local-googleapis-folder>\n\nThen run protoc to generate the descriptor set from bookstore.proto:\n\n.. code-block:: bash\n\n  protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \\\n    --descriptor_set_out=proto.pb test/proto/bookstore.proto\n\nIf you have more than one proto source files, you can pass all of them in one command.\n\nRoute configs for transcoded requests\n-------------------------------------\n\nThe route configs to be used with the gRPC-JSON transcoder should be identical to the gRPC route.\nThe requests processed by the transcoder filter will have `/<package>.<service>/<method>` path and\n`POST` method. The route configs for those requests should match on `/<package>.<service>/<method>`,\nnot the incoming request path. This allows the routes to be used for both gRPC requests and\ngRPC-JSON transcoded requests.\n\nFor example, with the following proto example, the router will process `/helloworld.Greeter/SayHello`\nas the path, so the route config prefix `/say` won't match requests to `SayHello`. If you want to\nmatch the incoming request path, set `match_incoming_request_route` to true.\n\n.. code-block:: proto\n\n  package helloworld;\n\n  // The greeting service definition.\n  service Greeter {\n    // Sends a greeting\n    rpc SayHello (HelloRequest) returns (HelloReply) {\n      option (google.api.http) = {\n        get: \"/say\"\n      };\n    }\n  }\n\nSending arbitrary content\n-------------------------\n\nBy default, when transcoding occurs, gRPC-JSON encodes the message output of a gRPC service method into\nJSON and sets the HTTP response `Content-Type` header to `application/json`. To send arbitrary content,\na gRPC service method can use\n`google.api.HttpBody <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto>`_\nas its output message type. The implementation needs to set\n`content_type <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto#L68>`_\n(which sets the value of the HTTP response `Content-Type` header) and\n`data <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto#L71>`_\n(which sets the HTTP response body) accordingly.\nMultiple `google.api.HttpBody <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto>`_\ncan be send by the gRPC server in the server streaming case.\nIn this case, HTTP response header `Content-Type` will use the `content-type` from the first\n`google.api.HttpBody <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto>`_.\n\nHeaders\n--------\n\ngRPC-JSON forwards the following headers to the gRPC server:\n\n* `x-envoy-original-path`, containing the value of the original path of HTTP request\n* `x-envoy-original-method`, containing the value of the original method of HTTP request\n\n\nSample Envoy configuration\n--------------------------\n\nHere's a sample Envoy configuration that proxies to a gRPC server running on localhost:50051. Port 51051 proxies\ngRPC requests and uses the gRPC-JSON transcoder filter to provide the RESTful JSON mapping. I.e., you can make either\ngRPC or RESTful JSON requests to localhost:51051.\n\n.. literalinclude:: _include/grpc-transcoder-filter.yaml\n    :language: yaml\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/grpc_stats_filter.rst",
    "content": ".. _config_http_filters_grpc_stats:\n\ngRPC Statistics\n===============\n\n* gRPC :ref:`architecture overview <arch_overview_grpc>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.grpc_stats.v3.FilterConfig>`\n* This filter should be configured with the name *envoy.filters.http.grpc_stats*.\n* This filter can be enabled to emit a :ref:`filter state object\n  <envoy_v3_api_msg_extensions.filters.http.grpc_stats.v3.FilterObject>`\n\nThis is a filter which enables telemetry of gRPC calls. Additionally, the\nfilter detects message boundaries in streaming gRPC calls and emits the message\ncounts for both the request and the response. \n\nMore info: wire format in `gRPC over HTTP/2 <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_.\n\nThe filter emits statistics in the *cluster.<route target cluster>.grpc.* namespace. Depending on the\nconfiguration, the stats may be prefixed with `<grpc service>.<grpc method>.`; the stats in the table below\nare shown in this form. See the documentation for\n:ref:`individual_method_stats_allowlist <envoy_v3_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.individual_method_stats_allowlist>`\nand :ref:`stats_for_all_methods <envoy_v3_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_methods>`.\n\nTo enable *upstream_rq_time* (v3 API only) see :ref:`enable_upstream_stats <envoy_v3_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.enable_upstream_stats>`.\n\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  <grpc service>.<grpc method>.success, Counter, Total successful service/method calls\n  <grpc service>.<grpc method>.failure, Counter, Total failed service/method calls\n  <grpc service>.<grpc method>.total, Counter, Total service/method calls\n  <grpc service>.<grpc method>.request_message_count, Counter, Total request message count for service/method calls\n  <grpc service>.<grpc method>.response_message_count, Counter, Total response message count for service/method calls\n  <grpc service>.<grpc method>.upstream_rq_time, Histogram, Request time milliseconds\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/grpc_web_filter.rst",
    "content": ".. _config_http_filters_grpc_web:\n\ngRPC-Web\n========\n\n* gRPC :ref:`architecture overview <arch_overview_grpc>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.grpc_web.v3.GrpcWeb>`\n* This filter should be configured with the name *envoy.filters.http.grpc_web*.\n\nThis is a filter which enables the bridging of a gRPC-Web client to a compliant gRPC server by\nfollowing https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/gzip_filter.rst",
    "content": ".. _config_http_filters_gzip:\n\n.. warning::\n\n  This filter has been deprecated in favor the\n  :ref:`HTTP Compressor filter <config_http_filters_compressor>`.\n\nGzip\n====\nGzip is an HTTP filter which enables Envoy to compress dispatched data\nfrom an upstream service upon client request. Compression is useful in\nsituations where large payloads need to be transmitted without\ncompromising the response time.\n\nConfiguration\n-------------\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.gzip.v3.Gzip>`\n* This filter should be configured with the name *envoy.filters.http.gzip*.\n\n.. attention::\n\n  The *window bits* is a number that tells the compressor how far ahead in the\n  text the algorithm should be looking for repeated sequence of characters.\n  Due to a known bug in the underlying zlib library, *window bits* with value\n  eight does not work as expected. Therefore any number below that will be\n  automatically set to 9. This issue might be solved in future releases of\n  the library.\n\nRuntime\n-------\n\nThe Gzip filter can be runtime feature flagged via the :ref:`runtime_enabled\n<envoy_v3_api_field_extensions.filters.http.gzip.v3.Gzip.compressor>`\nconfiguration field within the compressor field.\n\nHow it works\n------------\nWhen gzip filter is enabled, request and response headers are inspected to\ndetermine whether or not the content should be compressed. The content is\ncompressed and then sent to the client with the appropriate headers, if\nresponse and request allow.\n\nBy *default* compression will be *skipped* when:\n\n- A request does NOT contain *accept-encoding* header.\n- A request includes *accept-encoding* header, but it does not contain \"gzip\" or \"\\*\".\n- A request includes *accept-encoding* with \"gzip\" or \"\\*\" with the weight \"q=0\". Note\n  that the \"gzip\" will have a higher weight then \"\\*\". For example, if *accept-encoding*\n  is \"gzip;q=0,\\*;q=1\", the filter will not compress. But if the header is set to\n  \"\\*;q=0,gzip;q=1\", the filter will compress.\n- A request whose *accept-encoding* header includes any encoding type with a higher\n  weight than \"gzip\"'s given the corresponding compression filter is present in the chain.\n- A response contains a *content-encoding* header.\n- A response contains a *cache-control* header whose value includes \"no-transform\".\n- A response contains a *transfer-encoding* header whose value includes \"gzip\".\n- A response does not contain a *content-type* value that matches one of the selected\n  mime-types, which default to *application/javascript*, *application/json*,\n  *application/xhtml+xml*, *image/svg+xml*, *text/css*, *text/html*, *text/plain*,\n  *text/xml*.\n- Neither *content-length* nor *transfer-encoding* headers are present in\n  the response.\n- Response size is smaller than 30 bytes (only applicable when *transfer-encoding*\n  is not chunked).\n\nWhen compression is *applied*:\n\n- The *content-length* is removed from response headers.\n- Response headers contain \"*transfer-encoding: chunked*\" and do not contain\n  \"*content-encoding*\" header.\n- The \"*vary: accept-encoding*\" header is inserted on every response.\n\n.. _gzip-statistics:\n\nStatistics\n----------\n\nEvery configured Gzip filter has statistics rooted at <stat_prefix>.gzip.* with the following:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  compressed, Counter, Number of requests compressed.\n  not_compressed, Counter, Number of requests not compressed.\n  no_accept_header, Counter, Number of requests with no accept header sent.\n  header_identity, Counter, Number of requests sent with \"identity\" set as the *accept-encoding*.\n  header_gzip, Counter, Number of requests sent with \"gzip\" set as the *accept-encoding*. This counter is deprecated in favour of *header_compressor_used*.\n  header_compressor_used, Counter, Number of requests sent with \"gzip\" set as the *accept-encoding*.\n  header_compressor_overshadowed, Counter, Number of requests skipped by this filter instance because they were handled by another filter in the same filter chain.\n  header_wildcard, Counter, Number of requests sent with \"\\*\" set as the *accept-encoding*.\n  header_not_valid, Counter, Number of requests sent with a not valid *accept-encoding* header (aka \"q=0\" or an unsupported encoding type).\n  total_uncompressed_bytes, Counter, The total uncompressed bytes of all the requests that were marked for compression.\n  total_compressed_bytes, Counter, The total compressed bytes of all the requests that were marked for compression.\n  content_length_too_small, Counter, Number of requests that accepted gzip encoding but did not compress because the payload was too small.\n  not_compressed_etag, Counter, Number of requests that were not compressed due to the etag header. *disable_on_etag_header* must be turned on for this to happen.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/header_to_metadata_filter.rst",
    "content": ".. _config_http_filters_header_to_metadata:\n\nEnvoy Header-To-Metadata Filter\n===============================\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.header_to_metadata.v3.Config>`\n* This filter should be configured with the name *envoy.filters.http.header_to_metadata*.\n\nThis filter is configured with rules that will be matched against requests and responses.\nEach rule has either a cookie or a header and can be triggered either when the header\nor cookie is present or missing.\n\nWhen a rule is triggered, dynamic metadata will be added based on the configuration of the rule.\nIf the header or cookie is present, it's value is extracted and used along with the specified\nkey as metadata. If the header or cookie is missing, on missing case is triggered and the value\nspecified is used for adding metadata.\n\nThe metadata can then be used for load balancing decisions, consumed from logs, etc.\n\nA typical use case for this filter is to dynamically match requests with load balancer\nsubsets. For this, a given header's value would be extracted and attached to the request\nas dynamic metadata which would then be used to match a subset of endpoints.\n\nExample\n-------\n\nA sample filter configuration to route traffic to endpoints based on the presence or\nabsence of a version header could be:\n\n.. code-block:: yaml\n\n  http_filters:\n    - name: envoy.filters.http.header_to_metadata\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config\n        request_rules:\n          - header: x-version\n            on_header_present:\n              metadata_namespace: envoy.lb\n              key: version\n              type: STRING\n            on_header_missing:\n              metadata_namespace: envoy.lb\n              key: default\n              value: 'true'\n              type: STRING\n            remove: false\n\nAs with headers, the value of the specified cookie will be extracted from the request\nand added as metadata with the key specified.\nRemoving a cookie when a rule matches is unsupported.\n\n.. code-block:: yaml\n\n  http_filters:\n    - name: envoy.filters.http.header_to_metadata\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config\n        request_rules:\n          - cookie: cookie\n            on_header_present:\n              metadata_namespace: envoy.lb\n              key: version\n              type: STRING\n            on_header_missing:\n              metadata_namespace: envoy.lb\n              key: default\n              value: 'true'\n              type: STRING\n            remove: false\n\n\nA corresponding upstream cluster configuration could be:\n\n.. code-block:: yaml\n\n  clusters:\n    - name: versioned-cluster\n      type: EDS\n      lb_policy: ROUND_ROBIN\n      lb_subset_config:\n        fallback_policy: ANY_ENDPOINT\n\tsubset_selectors:\n\t  - keys:\n\t      - default\n          - keys:\n\t      - version\n\nThis would then allow requests with the `x-version` header set to be matched against\nendpoints with the corresponding version. Whereas requests with that header missing\nwould be matched with the default endpoints.\n\nIf the header's value needs to be transformed before it's added to the request as\ndynamic metadata, this filter supports regex matching and substitution:\n\n.. code-block:: yaml\n\n  http_filters:\n    - name: envoy.filters.http.header_to_metadata\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config\n        request_rules:\n          - header: \":path\"\n            on_header_present:\n              metadata_namespace: envoy.lb\n              key: cluster\n              regex_value_rewrite:\n                pattern:\n                  google_re2: {}\n                  regex: \"^/(cluster[\\\\d\\\\w-]+)/?.*$\"\n                substitution: \"\\\\1\"\n\nNote that this filter also supports per route configuration:\n\n.. code-block:: yaml\n\n  route_config:\n    name: local_route\n    virtual_hosts:\n    - name: local_service\n      domains: [\"*\"]\n      routes:\n      - match: { prefix: \"/version-to-metadata\" }\n        route: { cluster: service }\n        typed_per_filter_config:\n          envoy.filters.http.header_to_metadata:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config\n            request_rules:\n              - header: x-version\n                on_header_present:\n                  metadata_namespace: envoy.lb\n                  key: version\n                  type: STRING\n                remove: false\n      - match: { prefix: \"/\" }\n        route: { cluster: some_service }\n\nThis can be used to either override the global configuration or if the global configuration\nis empty (no rules), it can be used to only enable the filter at a per route level.\n\nStatistics\n----------\n\nCurrently, this filter generates no statistics.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/health_check_filter.rst",
    "content": ".. _config_http_filters_health_check:\n\nHealth check\n============\n\n* Health check filter :ref:`architecture overview <arch_overview_health_checking_filter>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.health_check.v3.HealthCheck>`\n* This filter should be configured with the name *envoy.filters.http.health_check*.\n\n.. note::\n\n  Note that the filter will automatically fail health checks and set the\n  :ref:`x-envoy-immediate-health-check-fail\n  <config_http_filters_router_x-envoy-immediate-health-check-fail>` header if the\n  :ref:`/healthcheck/fail <operations_admin_interface_healthcheck_fail>` admin endpoint has been\n  called. (The :ref:`/healthcheck/ok <operations_admin_interface_healthcheck_ok>` admin endpoint\n  reverses this behavior).\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/http_filters.rst",
    "content": ".. _config_http_filters:\n\nHTTP filters\n============\n\n.. toctree::\n  :maxdepth: 2\n\n  adaptive_concurrency_filter\n  admission_control_filter\n  aws_lambda_filter\n  aws_request_signing_filter\n  buffer_filter\n  cdn_loop_filter\n  compressor_filter\n  cors_filter\n  csrf_filter\n  decompressor_filter\n  dynamic_forward_proxy_filter\n  dynamodb_filter\n  ext_authz_filter\n  fault_filter\n  grpc_http1_bridge_filter\n  grpc_http1_reverse_bridge_filter\n  grpc_json_transcoder_filter\n  grpc_stats_filter\n  grpc_web_filter\n  gzip_filter\n  health_check_filter\n  header_to_metadata_filter\n  ip_tagging_filter\n  jwt_authn_filter\n  local_rate_limit_filter\n  lua_filter\n  oauth2_filter\n  on_demand_updates_filter\n  original_src_filter\n  rate_limit_filter\n  rbac_filter\n  router_filter\n  squash_filter\n  tap_filter\n  wasm_filter\n\n.. TODO(toddmgreer): Remove this hack and add user-visible CacheFilter docs when CacheFilter is production-ready.\n.. toctree::\n  :hidden:\n\n  ../../../api-v3/extensions/filters/http/admission_control/v3alpha/admission_control.proto\n  ../../../api-v3/extensions/filters/http/oauth2/v3alpha/oauth.proto\n  ../../../api-v3/extensions/filters/http/cache/v3alpha/cache.proto\n  ../../../api-v3/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/ip_tagging_filter.rst",
    "content": ".. _config_http_filters_ip_tagging:\n\nIP Tagging\n==========\n\nThe HTTP IP Tagging filter sets the header *x-envoy-ip-tags* with the string tags for the trusted address from\n:ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`. If there are no tags for an address,\nthe header is not set.\n\nThe implementation for IP Tagging provides a scalable way to compare an IP address to a large list of CIDR\nranges efficiently. The underlying algorithm for storing tags and IP address subnets is a Level-Compressed trie\ndescribed in the paper `IP-address lookup using\nLC-tries <https://www.nada.kth.se/~snilsson/publications/IP-address-lookup-using-LC-tries/>`_ by S. Nilsson and\nG. Karlsson.\n\n\nConfiguration\n-------------\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.ip_tagging.v3.IPTagging>`\n* This filter should be configured with the name *envoy.filters.http.ip_tagging*.\n\nStatistics\n----------\n\nThe IP Tagging filter outputs statistics in the *http.<stat_prefix>.ip_tagging.* namespace. The stat prefix comes from\nthe owning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n        <tag_name>.hit, Counter, Total number of requests that have the <tag_name> applied to it\n        no_hit, Counter, Total number of requests with no applicable IP tags\n        total, Counter, Total number of requests the IP Tagging Filter operated on\n\nRuntime\n-------\n\nThe IP Tagging filter supports the following runtime settings:\n\nip_tagging.http_filter_enabled\n    The % of requests for which the filter is enabled. Default is 100.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/jwt_authn_filter.rst",
    "content": ".. _config_http_filters_jwt_authn:\n\nJWT Authentication\n==================\n\nThis HTTP filter can be used to verify JSON Web Token (JWT). It will verify its signature, audiences and issuer. It will also check its time restrictions, such as expiration and nbf (not before) time. If the JWT verification fails, its request will be rejected. If the JWT verification succeeds, its payload can be forwarded to the upstream for further authorization if desired.\n\nJWKS is needed to verify JWT signatures. They can be specified in the filter config or can be fetched remotely from a JWKS server.\n\nFollowing are supported JWT alg:\n\n.. code-block::\n\n   ES256, ES384, ES512,\n   HS256, HS384, HS512,\n   RS256, RS384, RS512,\n   PS256, PS384, PS512,\n   EdDSA\n\nConfiguration\n-------------\n\nThis filter should be configured with the name *envoy.filters.http.jwt_authn*.\n\nThis HTTP :ref:`filter config <envoy_v3_api_msg_extensions.filters.http.jwt_authn.v3.JwtAuthentication>` has two fields:\n\n* Field *providers* specifies how a JWT should be verified, such as where to extract the token, where to fetch the public key (JWKS) and where to output its payload.\n* Field *rules* specifies matching rules and their requirements. If a request matches a rule, its requirement applies. The requirement specifies which JWT providers should be used.\n\nJwtProvider\n~~~~~~~~~~~\n\n:ref:`JwtProvider <envoy_v3_api_msg_extensions.filters.http.jwt_authn.v3.JwtProvider>` specifies how a JWT should be verified. It has the following fields:\n\n* *issuer*: the principal that issued the JWT, usually a URL or an email address.\n* *audiences*: a list of JWT audiences allowed to access. A JWT containing any of these audiences will be accepted.\n  If not specified, the audiences in JWT will not be checked.\n* *local_jwks*: fetch JWKS in local data source, either in a local file or embedded in the inline string.\n* *remote_jwks*: fetch JWKS from a remote HTTP server, also specify cache duration.\n* *forward*: if true, JWT will be forwarded to the upstream.\n* *from_headers*: extract JWT from HTTP headers.\n* *from_params*: extract JWT from query parameters.\n* *forward_payload_header*: forward the JWT payload in the specified HTTP header.\n\nDefault Extract Location\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nIf *from_headers* and *from_params* is empty,  the default location to extract JWT is from HTTP header::\n\n  Authorization: Bearer <token>\n\nand query parameter key *access_token* as::\n\n  /path?access_token=<JWT>\n\nIf a request has two tokens, one from the header and the other from the query parameter, all of them must be valid.\n\nIn the :ref:`filter config <envoy_v3_api_msg_extensions.filters.http.jwt_authn.v3.JwtAuthentication>`, *providers* is a map, to map *provider_name* to a :ref:`JwtProvider <envoy_v3_api_msg_extensions.filters.http.jwt_authn.v3.JwtProvider>`. The *provider_name* must be unique, it is referred in the `JwtRequirement <envoy_v3_api_msg_extensions.filters.http.jwt_authn.v3.JwtRequirement>` in its *provider_name* field.\n\n.. important::\n   For *remote_jwks*, a **jwks_cluster** cluster is required.\n\nDue to above requirement, `OpenID Connect Discovery <https://openid.net/specs/openid-connect-discovery-1_0.html>`_ is not supported since the URL to fetch JWKS is in the response of the discovery. It is not easy to setup a cluster config for a dynamic URL.\n\nRemote JWKS config example\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: yaml\n\n  providers:\n    provider_name1:\n      issuer: https://example.com\n      audiences:\n      - bookstore_android.apps.googleusercontent.com\n      - bookstore_web.apps.googleusercontent.com\n      remote_jwks:\n        http_uri:\n          uri: https://example.com/jwks.json\n          cluster: example_jwks_cluster\n        cache_duration:\n          seconds: 300\n\nAbove example fetches JWSK from a remote server with URL https://example.com/jwks.json. The token will be extracted from the default extract locations. The token will not be forwarded to upstream. JWT payload will not be added to the request header.\n\nFollowing cluster **example_jwks_cluster** is needed to fetch JWKS.\n\n.. code-block:: yaml\n\n  cluster:\n    name: example_jwks_cluster\n    type: STRICT_DNS\n    load_assignment:\n      cluster_name: example_jwks_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: example.com\n                port_value: 80\n\n\nInline JWKS config example\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAnother config example using inline JWKS:\n\n.. code-block:: yaml\n\n  providers:\n    provider_name2:\n      issuer: https://example2.com\n      local_jwks:\n        inline_string: PUBLIC-KEY\n      from_headers:\n      - name: jwt-assertion\n      forward: true\n      forward_payload_header: x-jwt-payload\n\nAbove example uses config inline string to specify JWKS. The JWT token will be extracted from HTTP headers as::\n\n     jwt-assertion: <JWT>.\n\nJWT payload will be added to the request header as following format::\n\n    x-jwt-payload: base64url_encoded(jwt_payload_in_JSON)\n\nRequirementRule\n~~~~~~~~~~~~~~~\n\n:ref:`RequirementRule <envoy_v3_api_msg_extensions.filters.http.jwt_authn.v3.RequirementRule>` has two fields:\n\n* Field *match* specifies how a request can be matched; e.g. by HTTP headers, or by query parameters, or by path prefixes.\n* Field *requires* specifies the JWT requirement, e.g. which provider is required.\n\n.. important::\n   - **If a request matches multiple rules, the first matched rule will apply**.\n   - If the matched rule has empty *requires* field, **JWT verification is not required**.\n   - If a request doesn't match any rules, **JWT verification is not required**.\n\nSingle requirement config example\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: yaml\n\n  providers:\n    jwt_provider1:\n      issuer: https://example.com\n      audiences:\n        audience1\n      local_jwks:\n        inline_string: PUBLIC-KEY\n  rules:\n  - match:\n      prefix: /health\n  - match:\n      prefix: /api\n    requires:\n      provider_and_audiences:\n        provider_name: jwt_provider1\n        audiences:\n          api_audience\n  - match:\n      prefix: /\n    requires:\n      provider_name: jwt_provider1\n\nAbove config uses single requirement rule, each rule may have either an empty requirement or a single requirement with one provider name.\n\nGroup requirement config example\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: yaml\n\n  providers:\n    provider1:\n      issuer: https://provider1.com\n      local_jwks:\n        inline_string: PUBLIC-KEY\n    provider2:\n      issuer: https://provider2.com\n      local_jwks:\n        inline_string: PUBLIC-KEY\n  rules:\n  - match:\n      prefix: /any\n    requires:\n      requires_any:\n        requirements:\n        - provider_name: provider1\n        - provider_name: provider2\n  - match:\n      prefix: /all\n    requires:\n      requires_all:\n        requirements:\n        - provider_name: provider1\n        - provider_name: provider2\n\nAbove config uses more complex *group* requirements:\n\n* The first *rule* specifies *requires_any*; if any of **provider1** or **provider2** requirement is satisfied, the request is OK to proceed.\n* The second *rule* specifies *requires_all*; only if both **provider1** and **provider2** requirements are satisfied, the request is OK to proceed.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/local_rate_limit_filter.rst",
    "content": ".. _config_http_filters_local_rate_limit:\n\nLocal rate limit\n================\n\n* Local rate limiting :ref:`architecture overview <arch_overview_local_rate_limit>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.local_ratelimit.v3.LocalRateLimit>`\n* This filter should be configured with the name *envoy.filters.http.local_ratelimit*.\n\nThe HTTP local rate limit filter applies a :ref:`token bucket\n<envoy_v3_api_field_extensions.filters.http.local_ratelimit.v3.LocalRateLimit.token_bucket>` rate\nlimit when the request's route or virtual host has a per filter\n:ref:`local rate limit configuration <envoy_v3_api_msg_extensions.filters.http.local_ratelimit.v3.LocalRateLimit>`.\n\nIf the local rate limit token bucket is checked, and there are no token availables, a 429 response is returned\n(the response is configurable). The local rate limit filter also sets the\n:ref:`x-envoy-ratelimited<config_http_filters_router_x-envoy-ratelimited>` header. Additional response\nheaders may be configured.\n\nExample configuration\n---------------------\n\nExample filter configuration for a globally set rate limiter (e.g.: all vhosts/routes share the same token bucket):\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.local_ratelimit\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit\n    stat_prefix: http_local_rate_limiter\n    token_bucket:\n      max_tokens: 10000\n      tokens_per_fill: 1000\n      fill_interval: 1s\n    filter_enabled:\n      runtime_key: local_rate_limit_enabled\n      default_value:\n        numerator: 100\n        denominator: HUNDRED\n    filter_enforced:\n      runtime_key: local_rate_limit_enforced\n      default_value:\n        numerator: 100\n        denominator: HUNDRED\n    response_headers_to_add:\n      - append: false\n        header:\n          key: x-local-rate-limit\n          value: 'true'\n\n\nExample filter configuration for a globally disabled rate limiter but enabled for a specific route:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.local_ratelimit\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit\n    stat_prefix: http_local_rate_limiter\n\n\nThe route specific configuration:\n\n.. code-block:: yaml\n\n  route_config:\n    name: local_route\n    virtual_hosts:\n    - name: local_service\n      domains: [\"*\"]\n      routes:\n      - match: { prefix: \"/path/with/rate/limit\" }\n        route: { cluster: service_protected_by_rate_limit }\n        typed_per_filter_config:\n          envoy.filters.http.local_ratelimit:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit\n            token_bucket:\n              max_tokens: 10000\n              tokens_per_fill: 1000\n              fill_interval: 1s\n            filter_enabled:\n              runtime_key: local_rate_limit_enabled\n              default_value:\n                numerator: 100\n                denominator: HUNDRED\n            filter_enforced:\n              runtime_key: local_rate_limit_enforced\n              default_value:\n                numerator: 100\n                denominator: HUNDRED\n            response_headers_to_add:\n              - append: false\n                header:\n                  key: x-local-rate-limit\n                  value: 'true'\n      - match: { prefix: \"/\" }\n        route: { cluster: default_service }\n\n\nNote that if this filter is configured as globally disabled and there are no virtual host or route level\ntoken buckets, no rate limiting will be applied.\n\nStatistics\n----------\n\nThe local rate limit filter outputs statistics in the *<stat_prefix>.http_local_rate_limit.* namespace.\n429 responses -- or the configured status code -- are emitted to the normal cluster :ref:`dynamic HTTP statistics\n<config_cluster_manager_cluster_stats_dynamic_http>`.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  enabled, Counter, Total number of requests for which the rate limiter was consulted\n  ok, Counter, Total under limit responses from the token bucket\n  rate_limited, Counter, Total responses without an available token (but not necessarily enforced)\n  enforced, Counter, Total number of requests for which rate limiting was applied (e.g.: 429 returned)\n\n.. _config_http_filters_local_rate_limit_runtime:\n\nRuntime\n-------\n\nThe HTTP rate limit filter supports the following runtime fractional settings:\n\nhttp_filter_enabled\n  % of requests that will check the local rate limit decision, but not enforce, for a given *route_key* specified\n  in the :ref:`local rate limit configuration <envoy_v3_api_msg_extensions.filters.http.local_ratelimit.v3.LocalRateLimit>`.\n  Defaults to 0.\n\nhttp_filter_enforcing\n  % of requests that will enforce the local rate limit decision for a given *route_key* specified in the\n  :ref:`local rate limit configuration <envoy_v3_api_msg_extensions.filters.http.local_ratelimit.v3.LocalRateLimit>`.\n  Defaults to 0. This can be used to test what would happen before fully enforcing the outcome.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/lua_filter.rst",
    "content": ".. _config_http_filters_lua:\n\nLua\n===\n\n.. attention::\n\n  By default Envoy is built without exporting symbols that you may need when interacting with Lua\n  modules installed as shared objects. Envoy may need to be built with support for exported symbols.\n  Please see the :repo:`Bazel docs <bazel/README.md>` for more information.\n\nOverview\n--------\n\nThe HTTP Lua filter allows `Lua <https://www.lua.org/>`_ scripts to be run during both the request\nand response flows. `LuaJIT <https://luajit.org/>`_ is used as the runtime. Because of this, the\nsupported Lua version is mostly 5.1 with some 5.2 features. See the `LuaJIT documentation\n<https://luajit.org/extensions.html>`_ for more details.\n\n.. note::\n\n  `moonjit <https://github.com/moonjit/moonjit/>`_ is a continuation of LuaJIT development, which\n  supports more 5.2 features and additional architectures. Envoy can be built with moonjit support\n  by using the following bazel option: ``--//source/extensions/filters/common/lua:moonjit=1``.\n\nThe design of the filter and Lua support at a high level is as follows:\n\n* All Lua environments are :ref:`per worker thread <arch_overview_threading>`. This means that\n  there is no truly global data. Any globals created and populated at load time will be visible\n  from each worker thread in isolation. True global support may be added via an API in the future.\n* All scripts are run as coroutines. This means that they are written in a synchronous style even\n  though they may perform complex asynchronous tasks. This makes the scripts substantially easier\n  to write. All network/async processing is performed by Envoy via a set of APIs. Envoy will\n  suspend execution of the script as appropriate and resume it when async tasks are complete.\n* **Do not perform blocking operations from scripts.** It is critical for performance that\n  Envoy APIs are used for all IO.\n\nCurrently supported high level features\n---------------------------------------\n\n**NOTE:** It is expected that this list will expand over time as the filter is used in production.\nThe API surface has been kept small on purpose. The goal is to make scripts extremely simple and\nsafe to write. Very complex or high performance use cases are assumed to use the native C++ filter\nAPI.\n\n* Inspection of headers, body, and trailers while streaming in either the request flow, response\n  flow, or both.\n* Modification of headers and trailers.\n* Blocking and buffering the full request/response body for inspection.\n* Performing an outbound async HTTP call to an upstream host. Such a call can be performed while\n  buffering body data so that when the call completes upstream headers can be modified.\n* Performing a direct response and skipping further filter iteration. For example, a script\n  could make an upstream HTTP call for authentication, and then directly respond with a 403\n  response code.\n\nConfiguration\n-------------\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.lua.v3.Lua>`\n* This filter should be configured with the name *envoy.filters.http.lua*.\n\nA simple example of configuring Lua HTTP filter that contains only :ref:`inline_code\n<envoy_v3_api_field_extensions.filters.http.lua.v3.Lua.inline_code>` is as follow:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.lua\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua\n    inline_code: |\n      -- Called on the request path.\n      function envoy_on_request(request_handle)\n        -- Do something.\n      end\n      -- Called on the response path.\n      function envoy_on_response(response_handle)\n        -- Do something.\n      end\n\nBy default, Lua script defined in ``inline_code`` will be treated as a ``GLOBAL`` script. Envoy will\nexecute it for every HTTP request.\n\nPer-Route Configuration\n-----------------------\n\nThe Lua HTTP filter also can be disabled or overridden on a per-route basis by providing a\n:ref:`LuaPerRoute <envoy_v3_api_msg_extensions.filters.http.lua.v3.LuaPerRoute>` configuration\non the virtual host, route, or weighted cluster.\n\nLuaPerRoute provides two ways of overriding the `GLOBAL` Lua script:\n\n* By providing a name reference to the defined :ref:`named Lua source codes map\n  <envoy_v3_api_field_extensions.filters.http.lua.v3.Lua.source_codes>`.\n* By providing inline :ref:`source code\n  <envoy_v3_api_field_extensions.filters.http.lua.v3.LuaPerRoute.source_code>` (This allows the\n  code to be sent through RDS).\n\nAs a concrete example, given the following Lua filter configuration:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.lua\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua\n    inline_code: |\n      function envoy_on_request(request_handle)\n        -- do something\n      end\n    source_codes:\n      hello.lua:\n        inline_string: |\n          function envoy_on_request(request_handle)\n            request_handle:logInfo(\"Hello World.\")\n          end\n      bye.lua:\n        inline_string: |\n          function envoy_on_response(response_handle)\n            response_handle:logInfo(\"Bye Bye.\")\n          end\n\nThe HTTP Lua filter can be disabled on some virtual host, route, or weighted cluster by the\n:ref:`LuaPerRoute <envoy_v3_api_msg_extensions.filters.http.lua.v3.LuaPerRoute>` configuration as\nfollow:\n\n.. code-block:: yaml\n\n  per_filter_config:\n    envoy.filters.http.lua:\n      disabled: true\n\nWe can also refer to a Lua script in the filter configuration by specifying a name in LuaPerRoute.\nThe ``GLOBAL`` Lua script will be overridden by the referenced script:\n\n.. code-block:: yaml\n\n  per_filter_config:\n    envoy.filters.http.lua:\n      name: hello.lua\n\n.. attention::\n\n  The name ``GLOBAL`` is reserved for :ref:`Lua.inline_code\n  <envoy_v3_api_field_extensions.filters.http.lua.v3.Lua.inline_code>`. Therefore, do not use\n  ``GLOBAL`` as name for other Lua scripts.\n\nOr we can define a new Lua script in the LuaPerRoute configuration directly to override the `GLOBAL`\nLua script as follows:\n\n.. code-block:: yaml\n\n  per_filter_config:\n    envoy.filters.http.lua:\n      source_code:\n        inline_string: |\n          function envoy_on_response(response_handle)\n            response_handle:logInfo(\"Goodbye.\")\n          end\n\n\nScript examples\n---------------\n\nThis section provides some concrete examples of Lua scripts as a more gentle introduction and quick\nstart. Please refer to the :ref:`stream handle API <config_http_filters_lua_stream_handle_api>` for\nmore details on the supported API.\n\n.. code-block:: lua\n\n  -- Called on the request path.\n  function envoy_on_request(request_handle)\n    -- Wait for the entire request body and add a request header with the body size.\n    request_handle:headers():add(\"request_body_size\", request_handle:body():length())\n  end\n\n  -- Called on the response path.\n  function envoy_on_response(response_handle)\n    -- Wait for the entire response body and add a response header with the body size.\n    response_handle:headers():add(\"response_body_size\", response_handle:body():length())\n    -- Remove a response header named 'foo'\n    response_handle:headers():remove(\"foo\")\n  end\n\n.. code-block:: lua\n\n  function envoy_on_request(request_handle)\n    -- Make an HTTP call to an upstream host with the following headers, body, and timeout.\n    local headers, body = request_handle:httpCall(\n    \"lua_cluster\",\n    {\n      [\":method\"] = \"POST\",\n      [\":path\"] = \"/\",\n      [\":authority\"] = \"lua_cluster\"\n    },\n    \"hello world\",\n    5000)\n\n    -- Add information from the HTTP call into the headers that are about to be sent to the next\n    -- filter in the filter chain.\n    request_handle:headers():add(\"upstream_foo\", headers[\"foo\"])\n    request_handle:headers():add(\"upstream_body_size\", #body)\n  end\n\n.. code-block:: lua\n\n  function envoy_on_request(request_handle)\n    -- Make an HTTP call.\n    local headers, body = request_handle:httpCall(\n    \"lua_cluster\",\n    {\n      [\":method\"] = \"POST\",\n      [\":path\"] = \"/\",\n      [\":authority\"] = \"lua_cluster\",\n      [\"set-cookie\"] = { \"lang=lua; Path=/\", \"type=binding; Path=/\" }\n    },\n    \"hello world\",\n    5000)\n\n    -- Response directly and set a header from the HTTP call. No further filter iteration\n    -- occurs.\n    request_handle:respond(\n      {[\":status\"] = \"403\",\n       [\"upstream_foo\"] = headers[\"foo\"]},\n      \"nope\")\n  end\n\n.. code-block:: lua\n\n  function envoy_on_request(request_handle)\n    -- Log information about the request\n    request_handle:logInfo(\"Authority: \"..request_handle:headers():get(\":authority\"))\n    request_handle:logInfo(\"Method: \"..request_handle:headers():get(\":method\"))\n    request_handle:logInfo(\"Path: \"..request_handle:headers():get(\":path\"))\n  end\n\n  function envoy_on_response(response_handle)\n    -- Log response status code\n    response_handle:logInfo(\"Status: \"..response_handle:headers():get(\":status\"))\n  end\n\nA common use-case is to rewrite upstream response body, for example: an upstream sends non-2xx\nresponse with JSON data, but the application requires HTML page to be sent to browsers.\n\nThere are two ways of doing this, the first one is via the `body()` API.\n\n.. code-block:: lua\n\n    function envoy_on_response(response_handle)\n      local content_length = response_handle:body():setBytes(\"<html><b>Not Found<b></html>\")\n      response_handle:headers():replace(\"content-length\", content_length)\n      response_handle:headers():replace(\"content-type\", \"text/html\")\n    end\n\n\nOr, through `bodyChunks()` API, which let Envoy to skip buffering the upstream response data.\n\n.. code-block:: lua\n\n    function envoy_on_response(response_handle)\n\n      -- Sets the content-length.\n      response_handle:headers():replace(\"content-length\", 28)\n      response_handle:headers():replace(\"content-type\", \"text/html\")\n\n      local last\n      for chunk in response_handle:bodyChunks() do\n        -- Clears each received chunk.\n        chunk:setBytes(\"\")\n        last = chunk\n      end\n\n      last:setBytes(\"<html><b>Not Found<b></html>\")\n    end\n\n.. _config_http_filters_lua_stream_handle_api:\n\nComplete example\n----------------\n\nA complete example using Docker is available in :repo:`/examples/lua`.\n\nStream handle API\n-----------------\n\nWhen Envoy loads the script in the configuration, it looks for two global functions that the\nscript defines:\n\n.. code-block:: lua\n\n  function envoy_on_request(request_handle)\n  end\n\n  function envoy_on_response(response_handle)\n  end\n\nA script can define either or both of these functions. During the request path, Envoy will\nrun *envoy_on_request* as a coroutine, passing a handle to the request API. During the\nresponse path, Envoy will run *envoy_on_response* as a coroutine, passing handle to the\nresponse API.\n\n.. attention::\n\n  It is critical that all interaction with Envoy occur through the passed stream handle. The stream\n  handle should not be assigned to any global variable and should not be used outside of the\n  coroutine. Envoy will fail your script if the handle is used incorrectly.\n\nThe following methods on the stream handle are supported:\n\nheaders()\n^^^^^^^^^\n\n.. code-block:: lua\n\n  local headers = handle:headers()\n\nReturns the stream's headers. The headers can be modified as long as they have not been sent to\nthe next filter in the header chain. For example, they can be modified after an *httpCall()* or\nafter a *body()* call returns. The script will fail if the headers are modified in any other\nsituation.\n\nReturns a :ref:`header object <config_http_filters_lua_header_wrapper>`.\n\nbody()\n^^^^^^\n\n.. code-block:: lua\n\n  local body = handle:body()\n\nReturns the stream's body. This call will cause Envoy to suspend execution of the script until\nthe entire body has been received in a buffer. Note that all buffering must adhere to the\nflow-control policies in place. Envoy will not buffer more data than is allowed by the connection\nmanager.\n\nReturns a :ref:`buffer object <config_http_filters_lua_buffer_wrapper>`.\n\nbodyChunks()\n^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  local iterator = handle:bodyChunks()\n\nReturns an iterator that can be used to iterate through all received body chunks as they arrive.\nEnvoy will suspend executing the script in between chunks, but *will not buffer* them. This can be\nused by a script to inspect data as it is streaming by.\n\n.. code-block:: lua\n\n  for chunk in request_handle:bodyChunks() do\n    request_handle:log(0, chunk:length())\n  end\n\nEach chunk the iterator returns is a :ref:`buffer object <config_http_filters_lua_buffer_wrapper>`.\n\ntrailers()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  local trailers = handle:trailers()\n\nReturns the stream's trailers. May return nil if there are no trailers. The trailers may be\nmodified before they are sent to the next filter.\n\nReturns a :ref:`header object <config_http_filters_lua_header_wrapper>`.\n\nlog*()\n^^^^^^\n\n.. code-block:: lua\n\n  handle:logTrace(message)\n  handle:logDebug(message)\n  handle:logInfo(message)\n  handle:logWarn(message)\n  handle:logErr(message)\n  handle:logCritical(message)\n\nLogs a message using Envoy's application logging. *message* is a string to log.\n\nhttpCall()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  local headers, body = handle:httpCall(cluster, headers, body, timeout, asynchronous)\n\nMakes an HTTP call to an upstream host. *cluster* is a string which maps to a configured cluster manager cluster. *headers*\nis a table of key/value pairs to send (the value can be a string or table of strings). Note that\nthe *:method*, *:path*, and *:authority* headers must be set. *body* is an optional string of body\ndata to send. *timeout* is an integer that specifies the call timeout in milliseconds.\n\n*asynchronous* is a boolean flag. If asynchronous is set to true, Envoy will make the HTTP request and continue,\nregardless of the response success or failure. If this is set to false, or not set, Envoy will suspend executing the script\nuntil the call completes or has an error.\n\nReturns *headers* which is a table of response headers. Returns *body* which is the string response\nbody. May be nil if there is no body.\n\nrespond()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  handle:respond(headers, body)\n\nRespond immediately and do not continue further filter iteration. This call is *only valid in\nthe request flow*. Additionally, a response is only possible if the request headers have not yet been\npassed to subsequent filters. Meaning, the following Lua code is invalid:\n\n.. code-block:: lua\n\n  function envoy_on_request(request_handle)\n    for chunk in request_handle:bodyChunks() do\n      request_handle:respond(\n        {[\":status\"] = \"100\"},\n        \"nope\")\n    end\n  end\n\n*headers* is a table of key/value pairs to send (the value can be a string or table of strings).\nNote that the *:status* header must be set. *body* is a string and supplies the optional response\nbody. May be nil.\n\nmetadata()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  local metadata = handle:metadata()\n\nReturns the current route entry metadata. Note that the metadata should be specified\nunder the filter name i.e. *envoy.filters.http.lua*. Below is an example of a *metadata* in a\n:ref:`route entry <envoy_v3_api_msg_config.route.v3.Route>`.\n\n.. code-block:: yaml\n\n  metadata:\n    filter_metadata:\n      envoy.filters.http.lua:\n        foo: bar\n        baz:\n          - bad\n          - baz\n\nReturns a :ref:`metadata object <config_http_filters_lua_metadata_wrapper>`.\n\nstreamInfo()\n^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  local streamInfo = handle:streamInfo()\n\nReturns :repo:`information <include/envoy/stream_info/stream_info.h>` related to the current request.\n\nReturns a :ref:`stream info object <config_http_filters_lua_stream_info_wrapper>`.\n\nconnection()\n^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  local connection = handle:connection()\n\nReturns the current request's underlying :repo:`connection <include/envoy/network/connection.h>`.\n\nReturns a :ref:`connection object <config_http_filters_lua_connection_wrapper>`.\n\nimportPublicKey()\n^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  local pubkey = handle:importPublicKey(keyder, keyderLength)\n\nReturns public key which is used by :ref:`verifySignature <verify_signature>` to verify digital signature.\n\n.. _verify_signature:\n\nverifySignature()\n^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  local ok, error = verifySignature(hashFunction, pubkey, signature, signatureLength, data, dataLength)\n\nVerify signature using provided parameters. *hashFunction* is the variable for the hash function which be used\nfor verifying signature. *SHA1*, *SHA224*, *SHA256*, *SHA384* and *SHA512* are supported.\n*pubkey* is the public key. *signature* is the signature to be verified. *signatureLength* is\nthe length of the signature. *data* is the content which will be hashed. *dataLength* is the length of data.\n\nThe function returns a pair. If the first element is *true*, the second element will be empty\nwhich means signature is verified; otherwise, the second element will store the error message.\n\n.. _config_http_filters_lua_stream_handle_api_base64_escape:\n\nbase64Escape()\n^^^^^^^^^^^^^^\n.. code-block:: lua\n\n  local base64_encoded = handle:base64Escape(\"input string\")\n\nEncodes the input string as base64. This can be useful for escaping binary data.\n\n.. _config_http_filters_lua_header_wrapper:\n\nHeader object API\n-----------------\n\nadd()\n^^^^^\n\n.. code-block:: lua\n\n  headers:add(key, value)\n\nAdds a header. *key* is a string that supplies the header key. *value* is a string that supplies\nthe header value.\n\nget()\n^^^^^\n\n.. code-block:: lua\n\n  headers:get(key)\n\nGets a header. *key* is a string that supplies the header key. Returns a string that is the header\nvalue or nil if there is no such header.\n\n__pairs()\n^^^^^^^^^\n\n.. code-block:: lua\n\n  for key, value in pairs(headers) do\n  end\n\nIterates through every header. *key* is a string that supplies the header key. *value* is a string\nthat supplies the header value.\n\n.. attention::\n\n  In the current implementation, headers cannot be modified during iteration. Additionally, if\n  it is necessary to modify headers after an iteration, the iteration must first be completed. This means that\n  `break` or any other way to exit the loop early must not be used. This may be more flexible in the future.\n\nremove()\n^^^^^^^^\n\n.. code-block:: lua\n\n  headers:remove(key)\n\nRemoves a header. *key* supplies the header key to remove.\n\nreplace()\n^^^^^^^^^\n\n.. code-block:: lua\n\n  headers:replace(key, value)\n\nReplaces a header. *key* is a string that supplies the header key. *value* is a string that supplies\nthe header value. If the header does not exist, it is added as per the *add()* function.\n\n.. _config_http_filters_lua_buffer_wrapper:\n\nBuffer API\n----------\n\nlength()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  local size = buffer:length()\n\nGets the size of the buffer in bytes. Returns an integer.\n\ngetBytes()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  buffer:getBytes(index, length)\n\nGet bytes from the buffer. By default Envoy will not copy all buffer bytes to Lua. This will\ncause a buffer segment to be copied. *index* is an integer and supplies the buffer start index to\ncopy. *length* is an integer and supplies the buffer length to copy. *index* + *length* must be\nless than the buffer length.\n\n.. _config_http_filters_lua_buffer_wrapper_api_set_bytes:\n\nsetBytes()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  buffer:setBytes(string)\n\nSet the content of wrapped buffer with the input string.\n\n.. _config_http_filters_lua_metadata_wrapper:\n\nMetadata object API\n-------------------\n\nget()\n^^^^^\n\n.. code-block:: lua\n\n  metadata:get(key)\n\nGets a metadata. *key* is a string that supplies the metadata key. Returns the corresponding\nvalue of the given metadata key. The type of the value can be: *nil*, *boolean*, *number*,\n*string* and *table*.\n\n__pairs()\n^^^^^^^^^\n\n.. code-block:: lua\n\n  for key, value in pairs(metadata) do\n  end\n\nIterates through every *metadata* entry. *key* is a string that supplies a *metadata*\nkey. *value* is a *metadata* entry value.\n\n.. _config_http_filters_lua_stream_info_wrapper:\n\nStream info object API\n-----------------------\n\nprotocol()\n^^^^^^^^^^\n\n.. code-block:: lua\n\n  streamInfo:protocol()\n\nReturns the string representation of :repo:`HTTP protocol <include/envoy/http/protocol.h>`\nused by the current request. The possible values are: *HTTP/1.0*, *HTTP/1.1*, and *HTTP/2*.\n\ndynamicMetadata()\n^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  streamInfo:dynamicMetadata()\n\nReturns a :ref:`dynamic metadata object <config_http_filters_lua_stream_info_dynamic_metadata_wrapper>`.\n\ndownstreamSslConnection()\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  streamInfo:downstreamSslConnection()\n\nReturns :repo:`information <include/envoy/ssl/connection.h>` related to the current SSL connection.\n\nReturns a downstream :ref:`SSL connection info object <config_http_filters_lua_ssl_socket_info>`.\n\n.. _config_http_filters_lua_stream_info_dynamic_metadata_wrapper:\n\nDynamic metadata object API\n---------------------------\n\nget()\n^^^^^\n\n.. code-block:: lua\n\n  dynamicMetadata:get(filterName)\n\n  -- to get a value from a returned table.\n  dynamicMetadata:get(filterName)[key]\n\nGets an entry in dynamic metadata struct. *filterName* is a string that supplies the filter name, e.g. *envoy.lb*.\nReturns the corresponding *table* of a given *filterName*.\n\nset()\n^^^^^\n\n.. code-block:: lua\n\n  dynamicMetadata:set(filterName, key, value)\n\nSets key-value pair of a *filterName*'s metadata. *filterName* is a key specifying the target filter name,\ne.g. *envoy.lb*. The type of *key* is *string*. The type of *value* is any Lua type that can be mapped\nto a metadata value: *table*, *numeric*, *boolean*, *string* or *nil*. When using a *table* as an argument,\nits keys can only be *string* or *numeric*.\n\n.. code-block:: lua\n\n  function envoy_on_request(request_handle)\n    local headers = request_handle:headers()\n    request_handle:streamInfo():dynamicMetadata():set(\"envoy.filters.http.lua\", \"request.info\", {\n      auth: headers:get(\"authorization\"),\n      token: headers:get(\"x-request-token\"),\n    })\n  end\n\n  function envoy_on_response(response_handle)\n    local meta = response_handle:streamInfo():dynamicMetadata():get(\"envoy.filters.http.lua\")[\"request.info\"]\n    response_handle:logInfo(\"Auth: \"..meta.auth..\", token: \"..meta.token)\n  end\n\n\n__pairs()\n^^^^^^^^^\n\n.. code-block:: lua\n\n  for key, value in pairs(dynamicMetadata) do\n  end\n\nIterates through every *dynamicMetadata* entry. *key* is a string that supplies a *dynamicMetadata*\nkey. *value* is a *dynamicMetadata* entry value.\n\n.. _config_http_filters_lua_connection_wrapper:\n\nConnection object API\n---------------------\n\nssl()\n^^^^^\n\n.. code-block:: lua\n\n  if connection:ssl() == nil then\n    print(\"plain\")\n  else\n    print(\"secure\")\n  end\n\nReturns :repo:`SSL connection <include/envoy/ssl/connection.h>` object when the connection is\nsecured and *nil* when it is not.\n\nReturns an :ref:`SSL connection info object <config_http_filters_lua_ssl_socket_info>`.\n\n.. _config_http_filters_lua_ssl_socket_info:\n\nSSL connection object API\n-------------------------\n\npeerCertificatePresented()\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  if downstreamSslConnection:peerCertificatePresented() then\n    print(\"peer certificate is presented\")\n  end\n\nReturns a bool representing whether the peer certificate is presented.\n\npeerCertificateValidated()\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  if downstreamSslConnection:peerCertificateVaidated() then\n    print(\"peer certificate is valiedated\")\n  end\n\nReturns bool whether the peer certificate was validated.\n\nuriSanLocalCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  -- For example, uriSanLocalCertificate contains {\"san1\", \"san2\"}\n  local certs = downstreamSslConnection:uriSanLocalCertificate()\n\n  -- The following prints san1,san2\n  handle:logTrace(table.concat(certs, \",\"))\n\nReturns the URIs (as a table) in the SAN field of the local certificate. Returns an empty table if\nthere is no local certificate, or no SAN field, or no URI SAN entries.\n\nsha256PeerCertificateDigest()\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:sha256PeerCertificateDigest()\n\nReturns the SHA256 digest of the peer certificate. Returns ``\"\"`` if there is no peer certificate\nwhich can happen in TLS (non-mTLS) connections.\n\nserialNumberPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:serialNumberPeerCertificate()\n\nReturns the serial number field of the peer certificate. Returns ``\"\"`` if there is no peer\ncertificate, or no serial number.\n\nissuerPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:issuerPeerCertificate()\n\nReturns the issuer field of the peer certificate in RFC 2253 format. Returns ``\"\"`` if there is no\npeer certificate, or no issuer.\n\nsubjectPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:subjectPeerCertificate()\n\nReturn the subject field of the peer certificate in RFC 2253 format. Returns ``\"\"`` if there is no\npeer certificate, or no subject.\n\nuriSanPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:uriSanPeerCertificate()\n\nReturns the URIs (as a table) in the SAN field of the peer certificate. Returns an empty table if\nthere is no peer certificate, or no SAN field, or no URI SAN entries.\n\nsubjectLocalCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:subjectLocalCertificate()\n\nReturns the subject field of the local certificate in RFC 2253 format. Returns ``\"\"`` if there is no\nlocal certificate, or no subject.\n\nurlEncodedPemEncodedPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:urlEncodedPemEncodedPeerCertificate()\n\nReturns the URL-encoded PEM-encoded representation of the peer certificate. Returns ``\"\"`` if there\nis no peer certificate or encoding fails.\n\nurlEncodedPemEncodedPeerCertificateChain()\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:urlEncodedPemEncodedPeerCertificateChain()\n\nReturnns the URL-encoded PEM-encoded representation of the full peer certificate chain including the\nleaf certificate. Returns ``\"\"`` if there is no peer certificate or encoding fails.\n\ndnsSansPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:dnsSansPeerCertificate()\n\nReturns the DNS entries (as a table) in the SAN field of the peer certificate. Returns an empty\ntable if there is no peer certificate, or no SAN field, or no DNS SAN entries.\n\ndnsSansLocalCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:dnsSansLocalCertificate()\n\nReturns the DNS entries (as a table) in the SAN field of the local certificate. Returns an empty\ntable if there is no local certificate, or no SAN field, or no DNS SAN entries.\n\nvalidFromPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:validFromPeerCertificate()\n\nReturns the time (timestamp-since-epoch in seconds) that the peer certificate was issued and should\nbe considered valid from. Returns ``0`` if there is no peer certificate.\n\nIn Lua, we usually use ``os.time(os.date(\"!*t\"))`` to get current timestamp-since-epoch in seconds.\n\nexpirationPeerCertificate()\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:validFromPeerCertificate()\n\nReturns the time (timestamp-since-epoch in seconds) that the peer certificate expires and should not\nbe considered valid after. Returns ``0`` if there is no peer certificate.\n\nIn Lua, we usually use ``os.time(os.date(\"!*t\"))`` to get current timestamp-since-epoch in seconds.\n\nsessionId()\n^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:sessionId()\n\nReturns the hex-encoded TLS session ID as defined in RFC 5246.\n\nciphersuiteId()\n^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:ciphersuiteId()\n\nReturns the standard ID (hex-encoded) for the ciphers used in the established TLS connection.\nReturns ``\"0xffff\"`` if there is no current negotiated ciphersuite.\n\nciphersuiteString()\n^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:ciphersuiteString()\n\nReturns the OpenSSL name for the set of ciphers used in the established TLS connection. Returns\n``\"\"`` if there is no current negotiated ciphersuite.\n\ntlsVersion()\n^^^^^^^^^^^^\n\n.. code-block:: lua\n\n  downstreamSslConnection:urlEncodedPemEncodedPeerCertificateChain()\n\nReturns the TLS version (e.g., TLSv1.2, TLSv1.3) used in the established TLS connection.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/oauth2_filter.rst",
    "content": "\n.. _config_http_filters_oauth:\n\nOAuth2\n======\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.oauth2.v3alpha.OAuth2>`\n* This filter should be configured with the name *envoy.filters.http.oauth2*.\n\n.. attention::\n\n  The OAuth2 filter is currently under active development.\n\nExample configuration\n---------------------\n\n.. code-block::\n\n   http_filters:\n   - name: oauth2\n     typed_config:\n       \"@type\": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2\n       token_endpoint:\n         cluster: oauth\n         uri: oauth.com/token\n         timeout: 3s\n       authorization_endpoint: https://oauth.com/oauth/authorize/\n       redirect_uri: \"%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback\"\n       redirect_path_matcher:\n         path:\n           exact: /callback\n       signout_path:\n         path:\n           exact: /signout\n      credentials:\n        client_id: foo\n        token_secret:\n          name: token\n        hmac_secret:\n          name: hmac\n      timeout: 3s\n   - name: envoy.router\n\n  clusters:\n  - name: service\n    ...\n  - name: auth\n    connect_timeout: 5s\n    type: LOGICAL_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: auth\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address: { socket_address: { address: auth.example.com, port_value: 443 }}\n    tls_context: { sni: auth.example.com }\n\nNotes\n-----\n\nThis module does not currently provide much Cross-Site-Request-Forgery protection for the redirect loop\nto the OAuth server and back.\n\nThe service must be served over HTTPS for this filter to work, as the cookies use `;secure`.\n\nStatistics\n----------\n\nThe OAuth filter outputs statistics in the *<stat_prefix>.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  oauth_failure, Counter, Total requests that were denied.\n  oauth_success, Counter, Total requests that were allowed.\n  oauth_unauthorization_rq, Counter, Total unauthorized requests.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/on_demand_updates_filter.rst",
    "content": ".. _config_http_filters_on_demand:\n\nOn-demand VHDS and S/RDS Updates\n================================\n\nThe on demand filter can be used to support either on demand VHDS or S/RDS update if configured in the filter chain.\n\nThe on-demand update filter can be used to request a :ref:`virtual host <envoy_v3_api_msg_config.route.v3.VirtualHost>`\ndata if it's not already present in the :ref:`Route Configuration <envoy_v3_api_msg_config.route.v3.RouteConfiguration>`. The\ncontents of the *Host* or *:authority* header is used to create the on-demand request. For an on-demand\nrequest to be created, :ref:`VHDS <envoy_v3_api_field_config.route.v3.RouteConfiguration.vhds>` must be enabled and either *Host*\nor *:authority* header be present.\n\nThe on-demand update filter can also be used to request a *Route Configuration* data if RouteConfiguration is specified to be \nloaded on demand in the :ref:`Scoped RouteConfiguration <envoy_v3_api_msg_config.route.v3.ScopedRouteConfiguration>`. \nThe contents of the HTTP header is used to find the scope and create the on-demand request. \n\nOn-demand VHDS and on-demand S/RDS can not be used at the same time at this point.\n\nConfiguration\n-------------\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.on_demand.v3.OnDemand>`\n* This filter should be configured with the name *envoy.filters.http.on_demand*.\n* The filter should be placed before *envoy.filters.http.router* filter in the HttpConnectionManager's filter chain.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/original_src_filter.rst",
    "content": ".. _config_http_filters_original_src:\n\nOriginal Source\n===============\n\n* :ref:`HTTP filter v3 API reference <envoy_v3_api_msg_extensions.filters.http.original_src.v3.OriginalSrc>`\n* This filter should be configured with the name *envoy.filters.http.original_src*.\n\nThe original source http filter replicates the downstream remote address of the connection on\nthe upstream side of Envoy. For example, if a downstream connection connects to Envoy with IP\naddress ``10.1.2.3``, then Envoy will connect to the upstream with source IP ``10.1.2.3``. The\ndownstream remote address is determined based on the logic for the \"trusted client address\"\noutlined in :ref:`XFF <config_http_conn_man_headers_x-forwarded-for>`.\n\n\nNote that the filter is intended to be used in conjunction with the\n:ref:`Router <config_http_filters_router>` filter. In particular, it must run prior to the router\nfilter so that it may add the desired source IP to the state of the filter chain.\n\nIP Version Support\n------------------\nThe filter supports both IPv4 and IPv6 as addresses. Note that the upstream connection must support\nthe version used.\n\nExtra Setup\n-----------\n\nThe downstream remote address used will likely be globally routable. By default, packets returning\nfrom the upstream host to that address will not route through Envoy. The network must be configured\nto forcefully route any traffic whose IP was replicated by Envoy back through the Envoy host.\n\nIf Envoy and the upstream are on the same host -- e.g. in an sidecar deployment --, then iptables\nand routing rules can be used to ensure correct behaviour. The filter has an unsigned integer\nconfiguration,\n:ref:`mark <envoy_v3_api_field_extensions.filters.http.original_src.v3.OriginalSrc.mark>`. Setting\nthis to *X* causes Envoy to *mark* all upstream packets originating from this http with value\n*X*. Note that if\n:ref:`mark <envoy_v3_api_field_extensions.filters.http.original_src.v3.OriginalSrc.mark>` is set\nto 0, Envoy will not mark upstream packets.\n\nWe can use the following set of commands to ensure that all ipv4 and ipv6 traffic marked with *X*\n(assumed to be 123 in the example) routes correctly. Note that this example assumes that *eth0* is\nthe default outbound interface.\n\n.. code-block:: text\n\n  iptables  -t mangle -I PREROUTING -m mark     --mark 123 -j CONNMARK --save-mark\n  iptables  -t mangle -I OUTPUT     -m connmark --mark 123 -j CONNMARK --restore-mark\n  ip6tables -t mangle -I PREROUTING -m mark     --mark 123 -j CONNMARK --save-mark\n  ip6tables -t mangle -I OUTPUT     -m connmark --mark 123 -j CONNMARK --restore-mark\n  ip rule add fwmark 123 lookup 100\n  ip route add local 0.0.0.0/0 dev lo table 100\n  ip -6 rule add fwmark 123 lookup 100\n  ip -6 route add local ::/0 dev lo table 100\n  echo 1 > /proc/sys/net/ipv4/conf/eth0/route_localnet\n\n\nExample HTTP configuration\n------------------------------\n\nThe following example configures Envoy to use the original source for all connections made on port\n8888. All upstream packets are marked with 123.\n\n.. code-block:: yaml\n\n  http_filters:\n    - name: envoy.filters.http.original_src\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.listener.original_src.v3.OriginalSrc\n        mark: 123\n    - name: envoy.filters.http.router\n      typed_config: {}\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/rate_limit_filter.rst",
    "content": ".. _config_http_filters_rate_limit:\n\nRate limit\n==========\n\n* Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.ratelimit.v3.RateLimit>`\n* This filter should be configured with the name *envoy.filters.http.ratelimit*.\n\nThe HTTP rate limit filter will call the rate limit service when the request's route or virtual host\nhas one or more :ref:`rate limit configurations<envoy_v3_api_field_config.route.v3.VirtualHost.rate_limits>`\nthat match the filter stage setting. The :ref:`route<envoy_v3_api_field_config.route.v3.RouteAction.include_vh_rate_limits>`\ncan optionally include the virtual host rate limit configurations. More than one configuration can\napply to a request. Each configuration results in a descriptor being sent to the rate limit service.\n\nIf the rate limit service is called, and the response for any of the descriptors is over limit, a\n429 response is returned. The rate limit filter also sets the :ref:`x-envoy-ratelimited<config_http_filters_router_x-envoy-ratelimited>` header.\n\nIf there is an error in calling rate limit service or rate limit service returns an error and :ref:`failure_mode_deny <envoy_v3_api_field_extensions.filters.http.ratelimit.v3.RateLimit.failure_mode_deny>` is \nset to true, a 500 response is returned.\n\n.. _config_http_filters_rate_limit_composing_actions:\n\nComposing Actions\n-----------------\n\nEach :ref:`rate limit action <envoy_v3_api_msg_config.route.v3.RateLimit>` on the route or\nvirtual host populates a descriptor entry. A vector of descriptor entries compose a descriptor. To\ncreate more complex rate limit descriptors, actions can be composed in any order. The descriptor\nwill be populated in the order the actions are specified in the configuration.\n\nExample 1\n^^^^^^^^^\n\nFor example, to generate the following descriptor:\n\n.. code-block:: cpp\n\n  (\"generic_key\", \"some_value\")\n  (\"source_cluster\", \"from_cluster\")\n\nThe configuration would be:\n\n.. code-block:: yaml\n\n  actions:\n      - {source_cluster: {}}\n      - {generic_key: {descriptor_value: some_value}}\n\nExample 2\n^^^^^^^^^\n\nIf an action doesn't append a descriptor entry, no descriptor is generated for\nthe configuration.\n\nFor the following configuration:\n\n.. code-block:: yaml\n\n  actions:\n      - {source_cluster: {}}\n      - {remote_address: {}}\n      - {generic_key: {descriptor_value: some_value}}\n\n\nIf a request did not set :ref:`x-forwarded-for<config_http_conn_man_headers_x-forwarded-for>`,\nno descriptor is generated.\n\nIf a request sets :ref:`x-forwarded-for<config_http_conn_man_headers_x-forwarded-for>`, the\nthe following descriptor is generated:\n\n.. code-block:: cpp\n\n  (\"generic_key\", \"some_value\")\n  (\"remote_address\", \"<trusted address from x-forwarded-for>\")\n  (\"source_cluster\", \"from_cluster\")\n\n.. _config_http_filters_rate_limit_rate_limit_override:\n\nRate Limit Override\n-------------------\n\nA :ref:`rate limit action <envoy_v3_api_msg_config.route.v3.RateLimit>` can optionally contain\na :ref:`limit override <envoy_v3_api_msg_config.route.v3.RateLimit.Override>`. The limit value\nwill be appended to the descriptor produced by the action and sent to the ratelimit service,\noverriding the static service configuration.\n\nThe override can be configured to be taken from the :ref:`Dynamic Metadata\n<envoy_v3_api_msg_config.core.v3.Metadata>` under a specified :ref: `key\n<envoy_v3_api_msg_config.type.metadata.v3.MetadataKey>`. If the value is misconfigured\nor key does not exist, the override configuration is ignored.\n\nExample 3\n^^^^^^^^^\n\nThe following configuration\n\n.. code-block:: yaml\n\n  actions:\n      - {generic_key: {descriptor_value: some_value}}\n  limit:\n     metadata_key:\n         key: test.filter.key\n         path:\n             - key: test\n\n.. _config_http_filters_rate_limit_override_dynamic_metadata:\n\nWill lookup the value of the dynamic metadata. The value must be a structure with integer field\n\"requests_per_unit\" and a string field \"unit\" which is parseable to :ref:`RateLimitUnit enum\n<envoy_v3_api_enum_type.v3.RateLimitUnit>`. For example, with the following dynamic metadata\nthe rate limit override of 42 requests per hour will be appended to the rate limit descriptor.\n\n.. code-block:: yaml\n\n  test.filter.key:\n      test:\n          requests_per_unit: 42\n          unit: HOUR\n\nStatistics\n----------\n\nThe rate limit filter outputs statistics in the *cluster.<route target cluster>.ratelimit.* namespace.\n429 responses are emitted to the normal cluster :ref:`dynamic HTTP statistics\n<config_cluster_manager_cluster_stats_dynamic_http>`.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  ok, Counter, Total under limit responses from the rate limit service\n  error, Counter, Total errors contacting the rate limit service\n  over_limit, Counter, total over limit responses from the rate limit service\n  failure_mode_allowed, Counter, \"Total requests that were error(s) but were allowed through because\n  of :ref:`failure_mode_deny <envoy_v3_api_field_extensions.filters.http.ratelimit.v3.RateLimit.failure_mode_deny>` set to false.\"\n\nRuntime\n-------\n\nThe HTTP rate limit filter supports the following runtime settings:\n\nratelimit.http_filter_enabled\n  % of requests that will call the rate limit service. Defaults to 100.\n\nratelimit.http_filter_enforcing\n  % of requests that that will have the rate limit service decision enforced. Defaults to 100.\n  This can be used to test what would happen before fully enforcing the outcome.\n\nratelimit.<route_key>.http_filter_enabled\n  % of requests that will call the rate limit service for a given *route_key* specified in the\n  :ref:`rate limit configuration <envoy_v3_api_msg_config.route.v3.RateLimit>`. Defaults to 100.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/rbac_filter.rst",
    "content": ".. _config_http_filters_rbac:\n\nRole Based Access Control (RBAC) Filter\n=======================================\n\nThe RBAC filter is used to authorize actions (permissions) by identified downstream clients\n(principals). This is useful to explicitly manage callers to an application and protect it from\nunexpected or forbidden agents. The filter supports configuration with either a safe-list (ALLOW) or\nblock-list (DENY) set of policies based off properties of the connection (IPs, ports, SSL subject)\nas well as the incoming request's HTTP headers. This filter also supports policy in both enforcement\nand shadow mode, shadow mode won't effect real users, it is used to test that a new set of policies\nwork before rolling out to production.\n\nWhen a request is denied, the :ref:`RESPONSE_CODE_DETAILS<config_access_log_format_response_code_details>`\nwill include the name of the matched policy that caused the deny in the format of `rbac_access_denied_matched_policy[policy_name]`\n(policy_name will be `none` if no policy matched), this helps to distinguish the deny from Envoy RBAC\nfilter and the upstream backend.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.rbac.v3.RBAC>`\n* This filter should be configured with the name *envoy.filters.http.rbac*.\n\nPer-Route Configuration\n-----------------------\n\nThe RBAC filter configuration can be overridden or disabled on a per-route basis by providing a\n:ref:`RBACPerRoute <envoy_v3_api_msg_extensions.filters.http.rbac.v3.RBACPerRoute>` configuration on\nthe virtual host, route, or weighted cluster.\n\nStatistics\n----------\n\nThe RBAC filter outputs statistics in the *http.<stat_prefix>.rbac.* namespace. The :ref:`stat prefix\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>` comes from the\nowning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  allowed, Counter, Total requests that were allowed access\n  denied, Counter, Total requests that were denied access\n  shadow_allowed, Counter, Total requests that would be allowed access by the filter's shadow rules\n  shadow_denied, Counter, Total requests that would be denied access by the filter's shadow rules\n  logged, Counter, Total requests that should be logged\n  not_logged, Counter, Total requests that should not be logged\n\n.. _config_http_filters_rbac_dynamic_metadata:\n\nDynamic Metadata\n----------------\n\nThe RBAC filter emits the following dynamic metadata.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  shadow_effective_policy_id, string, The effective shadow policy ID matching the action (if any).\n  shadow_engine_result, string, The engine result for the shadow rules (i.e. either `allowed` or `denied`).\n  access_log_hint, boolean, Whether the request should be logged. This metadata is shared and set under the key namespace 'envoy.common' (See :ref:`Shared Dynamic Metadata<shared_dynamic_metadata>`).\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/router_filter.rst",
    "content": ".. _config_http_filters_router:\n\nRouter\n======\n\nThe router filter implements HTTP forwarding. It will be used in almost all HTTP proxy scenarios\nthat Envoy is deployed for. The filter's main job is to follow the instructions specified in the\nconfigured :ref:`route table <envoy_v3_api_msg_config.route.v3.RouteConfiguration>`. In addition to forwarding and\nredirection, the filter also handles retry, statistics, etc.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.router.v3.Router>`\n* This filter should be configured with the name *envoy.filters.http.router*.\n\n.. _config_http_filters_router_headers_consumed:\n\nHTTP headers (consumed from downstreams)\n----------------------------------------\n\nThe router consumes and sets various HTTP headers both on the egress/request path as well as on the\ningress/response path. They are documented in this section.\n\n.. contents::\n  :local:\n\n.. _config_http_filters_router_x-envoy-max-retries:\n\nx-envoy-max-retries\n^^^^^^^^^^^^^^^^^^^\nIf a :ref:`route config retry policy <envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>` or a\n:ref:`virtual host retry policy <envoy_v3_api_field_config.route.v3.VirtualHost.retry_policy>` is in place, Envoy will default to retrying\none time unless explicitly specified. The number of retries can be explicitly set in the virtual host retry config,\nthe route retry config, or by using this header. If this header is used, its value takes precedence over the number of\nretries set in either retry policy. If a retry policy is not configured and :ref:`config_http_filters_router_x-envoy-retry-on`\nor :ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers are not specified, Envoy will not retry a failed request.\n\nA few notes on how Envoy does retries:\n\n* The route timeout (set via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or the\n  :ref:`timeout <envoy_v3_api_field_config.route.v3.RouteAction.timeout>` in route configuration or set via\n  `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_  by specifying\n  :ref:`max_grpc_timeout <envoy_v3_api_field_config.route.v3.RouteAction.timeout>` in route configuration) **includes** all\n  retries. Thus if the request timeout is set to 3s, and the first request attempt takes 2.7s, the\n  retry (including back-off) has .3s to complete. This is by design to avoid an exponential\n  retry/timeout explosion.\n* By default, Envoy uses a fully jittered exponential back-off algorithm for retries with a default base\n  interval of 25ms. Given a base interval B and retry number N, the back-off for the retry is in\n  the range :math:`\\big[0, (2^N-1)B\\big)`. For example, given the default interval, the first retry\n  will be delayed randomly by 0-24ms, the 2nd by 0-74ms, the 3rd by 0-174ms, and so on. The\n  interval is capped at a maximum interval, which defaults to 10 times the base interval (250ms).\n  The default base interval (and therefore the maximum interval) can be manipulated by setting the\n  upstream.base_retry_backoff_ms runtime parameter. The back-off intervals can also be modified\n  by configuring the retry policy's\n  :ref:`retry back-off <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_back_off>`.\n* Envoy can also be configured to use feedback from the upstream server to decide the interval between\n  retries. Response headers like ``Retry-After`` or ``X-RateLimit-Reset`` instruct the client how long\n  to wait before re-trying. The retry policy's\n  :ref:`rate limited retry back off <envoy_v3_api_field_config.route.v3.RetryPolicy.rate_limited_retry_back_off>`\n  strategy can be configured to expect a particular header, and if that header is present in the response Envoy\n  will use its value to decide the back-off. If the header is not present, or if it cannot be parsed\n  successfully, Envoy will use the default exponential back-off algorithm instead.\n\n.. _config_http_filters_router_x-envoy-retry-on:\n\nx-envoy-retry-on\n^^^^^^^^^^^^^^^^\n\nSetting this header will cause Envoy to attempt to retry failed requests (number\nof retries defaults to 1 and can be controlled by :ref:`x-envoy-max-retries\n<config_http_filters_router_x-envoy-max-retries>` header or the :ref:`route config retry policy\n<envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>` or the :ref:`virtual host retry policy <envoy_v3_api_field_config.route.v3.VirtualHost.retry_policy>`).\nThe value to which the x-envoy-retry-on header is set indicates the retry policy. One or more policies\ncan be specified using a ',' delimited list. The supported policies are:\n\n5xx\n  Envoy will attempt a retry if the upstream server responds with any 5xx response code, or does not\n  respond at all (disconnect/reset/read timeout). (Includes *connect-failure* and *refused-stream*)\n\n  * **NOTE:** Envoy will not retry when a request exceeds\n    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` (resulting in a 504 error\n    code). Use :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` if you want\n    to retry when individual attempts take too long.\n    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` is an outer time limit for a\n    request, including any retries that take place.\n\ngateway-error\n  This policy is similar to the *5xx* policy but will only retry requests that result in a 502, 503,\n  or 504.\n\nreset\n  Envoy will attempt a retry if the upstream server does not respond at all (disconnect/reset/read timeout.)\n\nconnect-failure\n  Envoy will attempt a retry if a request is failed because of a connection failure to the upstream\n  server (connect timeout, etc.). (Included in *5xx*)\n\n  * **NOTE:** A connection failure/timeout is a the TCP level, not the request level. This does not\n    include upstream request timeouts specified via\n    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or via :ref:`route\n    configuration <envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>` or via\n    :ref:`virtual host retry policy <envoy_v3_api_field_config.route.v3.VirtualHost.retry_policy>`.\n\n.. _config_http_filters_router_retry_policy-envoy-ratelimited:\n\nenvoy-ratelimited\n  Envoy will retry if the header :ref:`x-envoy-ratelimited<config_http_filters_router_x-envoy-ratelimited>`\n  is present.\n\nretriable-4xx\n  Envoy will attempt a retry if the upstream server responds with a retriable 4xx response code.\n  Currently, the only response code in this category is 409.\n\n  * **NOTE:** Be careful turning on this retry type. There are certain cases where a 409 can indicate\n    that an optimistic locking revision needs to be updated. Thus, the caller should not retry and\n    needs to read then attempt another write. If a retry happens in this type of case it will always\n    fail with another 409.\n\nrefused-stream\n  Envoy will attempt a retry if the upstream server resets the stream with a REFUSED_STREAM error\n  code. This reset type indicates that a request is safe to retry. (Included in *5xx*)\n\nretriable-status-codes\n  Envoy will attempt a retry if the upstream server responds with any response code matching one defined\n  in either :ref:`the retry policy <envoy_v3_api_field_config.route.v3.RetryPolicy.retriable_status_codes>`\n  or in the :ref:`config_http_filters_router_x-envoy-retriable-status-codes` header.\n\nretriable-headers\n  Envoy will attempt a retry if the upstream server response includes any headers matching in either\n  :ref:`the retry policy <envoy_v3_api_field_config.route.v3.RetryPolicy.retriable_headers>` or in the\n  :ref:`config_http_filters_router_x-envoy-retriable-header-names` header.\n\nThe number of retries can be controlled via the\n:ref:`config_http_filters_router_x-envoy-max-retries` header or via the :ref:`route\nconfiguration <envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>` or via the\n:ref:`virtual host retry policy <envoy_v3_api_field_config.route.v3.VirtualHost.retry_policy>`.\n\nNote that retry policies can also be applied at the :ref:`route level\n<envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>` or the\n:ref:`virtual host level <envoy_v3_api_field_config.route.v3.VirtualHost.retry_policy>`.\n\nBy default, Envoy will *not* perform retries unless you've configured them per above.\n\n.. _config_http_filters_router_x-envoy-retry-grpc-on:\n\nx-envoy-retry-grpc-on\n^^^^^^^^^^^^^^^^^^^^^\nSetting this header will cause Envoy to attempt to retry failed requests (number of retries defaults\nto 1, and can be controlled by :ref:`x-envoy-max-retries <config_http_filters_router_x-envoy-max-retries>`\nheader or the :ref:`route config retry policy <envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>`) or the\n:ref:`virtual host retry policy <envoy_v3_api_field_config.route.v3.VirtualHost.retry_policy>`.\ngRPC retries are currently only supported for gRPC status codes in response headers. gRPC status codes in\ntrailers will not trigger retry logic. One or more policies can be specified  using a ',' delimited\nlist. The supported policies are:\n\ncancelled\n  Envoy will attempt a retry if the gRPC status code in the response headers is \"cancelled\" (1)\n\ndeadline-exceeded\n  Envoy will attempt a retry if the gRPC status code in the response headers is \"deadline-exceeded\" (4)\n\ninternal\n  Envoy will attempt to retry if the gRPC status code in the response headers is \"internal\" (13)\n\nresource-exhausted\n  Envoy will attempt a retry if the gRPC status code in the response headers is \"resource-exhausted\" (8)\n\nunavailable\n  Envoy will attempt a retry if the gRPC status code in the response headers is \"unavailable\" (14)\n\nAs with the x-envoy-retry-grpc-on header, the number of retries can be controlled via the\n:ref:`config_http_filters_router_x-envoy-max-retries` header\n\nNote that retry policies can also be applied at the :ref:`route level\n<envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>` or the\n:ref:`virtual host level <envoy_v3_api_field_config.route.v3.VirtualHost.retry_policy>`.\n\nBy default, Envoy will *not* perform retries unless you've configured them per above.\n\n.. _config_http_filters_router_x-envoy-retriable-header-names:\n\nx-envoy-retriable-header-names\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nSetting this header informs Envoy about what response headers should be considered retriable. It is used\nin conjunction with the :ref:`retriable-headers <config_http_filters_router_x-envoy-retry-on>` retry policy.\nWhen the corresponding retry policy is set, the response headers provided by this list header value will be\nconsidered retriable in addition to the response headers enabled for retry through other retry policies.\n\nThe list is a comma-separated list of header names: \"X-Upstream-Retry,X-Try-Again\" would cause any upstream\nresponses containing either one of the specified headers to be retriable if 'retriable-headers' retry policy\nis enabled. Header names are case-insensitive.\n\nOnly the names of retriable response headers can be specified via the request header. A more sophisticated\nretry policy based on the response headers can be specified by using arbitrary header matching rules\nvia :ref:`retry policy configuration <envoy_v3_api_field_config.route.v3.RetryPolicy.retriable_headers>`.\n\nThis header will only be honored for requests from internal clients.\n\n.. _config_http_filters_router_x-envoy-retriable-status-codes:\n\nx-envoy-retriable-status-codes\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nSetting this header informs Envoy about what status codes should be considered retriable when used in\nconjunction with the :ref:`retriable-status-code <config_http_filters_router_x-envoy-retry-on>` retry policy.\nWhen the corresponding retry policy is set, the list of retriable status codes will be considered retriable\nin addition to the status codes enabled for retry through other retry policies.\n\nThe list is a comma delimited list of integers: \"409\" would cause 409 to be considered retriable, while \"504,409\"\nwould consider both 504 and 409 retriable.\n\nThis header will only be honored for requests from internal clients.\n\n.. _config_http_filters_router_x-envoy-upstream-alt-stat-name:\n\nx-envoy-upstream-alt-stat-name\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSetting this header will cause Envoy to emit upstream response code/timing statistics to a dual stat tree.\nThis can be useful for application level categories that Envoy doesn't know about. The output tree\nis documented :ref:`here <config_cluster_manager_cluster_stats_alt_tree>`.\n\nThis should not be confused with :ref:`alt_stat_name <envoy_v3_api_field_config.cluster.v3.Cluster.alt_stat_name>` which\nis specified while defining the cluster and when provided specifies an alternative name for the\ncluster at the root of the statistic tree.\n\n.. _config_http_filters_router_x-envoy-upstream-rq-timeout-alt-response:\n\nx-envoy-upstream-rq-timeout-alt-response\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSetting this header will cause Envoy to set a 204 response code (instead of 504) in the event of a request timeout.\nThe actual value of the header is ignored; only its presence is considered. See also \n:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`.\n\n.. _config_http_filters_router_x-envoy-upstream-rq-timeout-ms:\n\nx-envoy-upstream-rq-timeout-ms\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSetting this header will cause Envoy to override the :ref:`route configuration timeout\n<envoy_v3_api_field_config.route.v3.RouteAction.timeout>` or gRPC client timeout set via `grpc-timeout header\n<https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_  by specifying :ref:`max_grpc_timeout\n<envoy_v3_api_field_config.route.v3.RouteAction.timeout>`. The timeout must be specified in millisecond\nunits. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`.\n\n.. _config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms:\n\nx-envoy-upstream-rq-per-try-timeout-ms\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSetting this header will cause Envoy to set a *per try* timeout on routed requests.\nIf a global route timeout is configured, this timeout must be less than the global route\ntimeout (see :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`) or it is ignored.\nThis allows a caller to set a tight per try timeout to allow for retries while maintaining a\nreasonable overall timeout. This timeout only applies before any part of the response is sent to\nthe downstream, which normally happens after the upstream has sent response headers.\n\nx-envoy-hedge-on-per-try-timeout\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSetting this header will cause Envoy to use a request hedging strategy in the case of a per try timeout.\nThis overrides the value set in the :ref:`route configuration\n<envoy_v3_api_field_config.route.v3.HedgePolicy.hedge_on_per_try_timeout>`. This means that a retry\nwill be issued without resetting the original request, leaving multiple upstream requests\nin flight.\n\nThe value of the header should be \"true\" or \"false\", and is ignored if invalid.\n\n.. _config_http_filters_router_x-envoy-decorator-operation:\n\nx-envoy-decorator-operation\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe value of this header will override any locally defined operation (span) name on the\nserver span generated by the tracing mechanism.\n\nHTTP response headers consumed from upstream\n--------------------------------------------\n\nx-envoy-decorator-operation\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe value of this header will override any locally defined operation (span) name on the\nclient span generated by the tracing mechanism.\n\nx-envoy-upstream-canary\n^^^^^^^^^^^^^^^^^^^^^^^\n\nIf an upstream host sets this header, the router will use it to generate canary specific statistics.\nThe output tree is documented :ref:`here <config_cluster_manager_cluster_stats_dynamic_http>`.\n\n.. _config_http_filters_router_x-envoy-immediate-health-check-fail:\n\nx-envoy-immediate-health-check-fail\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf the upstream host returns this header (set to any value), Envoy will immediately assume the\nupstream host has failed :ref:`active health checking <arch_overview_health_checking>` (if the\ncluster has been :ref:`configured <config_cluster_manager_cluster_hc>` for active health checking).\nThis can be used to fast fail an upstream host via standard data plane processing without waiting\nfor the next health check interval. The host can become healthy again via standard active health\nchecks. See the :ref:`health checking overview <arch_overview_health_checking>` for more\ninformation.\n\n.. _config_http_filters_router_x-envoy-ratelimited:\n\nx-envoy-ratelimited\n^^^^^^^^^^^^^^^^^^^\n\nIf this header is set by upstream, Envoy will not retry unless the retry policy\n:ref:`envoy-ratelimited<config_http_filters_router_retry_policy-envoy-ratelimited>`\nis enabled. Currently, the value of the header is not looked at, only its\npresence. This header is set by :ref:`rate limit\nfilter<config_http_filters_rate_limit>` when the request is rate limited.\n\n.. _config_http_filters_router_headers_set:\n\nHTTP request headers set on upstream calls\n------------------------------------------\n\nThe router sets various HTTP headers both on the egress/request path as well as on the\ningress/response path. They are documented in this section.\n\n.. contents::\n  :local:\n\n.. _config_http_filters_router_x-envoy-attempt-count:\n\nx-envoy-attempt-count\n^^^^^^^^^^^^^^^^^^^^^\n\nSent to the upstream to indicate which attempt the current request is in a series of retries. The value\nwill be \"1\" on the initial request, incrementing by one for each retry. Only set if the\n:ref:`include_request_attempt_count <envoy_v3_api_field_config.route.v3.VirtualHost.include_request_attempt_count>`\nflag is set to true.\n\nSent to the downstream to indicate how many upstream requests took place. The header will be absent if\nthe router did not send any upstream requests. The value will be \"1\" if only the original upstream\nrequest was sent, incrementing by one for each retry. Only set if the\n:ref:`include_attempt_count_in_response <envoy_v3_api_field_config.route.v3.VirtualHost.include_attempt_count_in_response>`\nflag is set to true.\n\n.. _config_http_filters_router_x-envoy-expected-rq-timeout-ms:\n\nx-envoy-expected-rq-timeout-ms\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThis is the time in milliseconds the router expects the request to be completed. Envoy sets this\nheader so that the upstream host receiving the request can make decisions based on the request\ntimeout, e.g., early exit. This is set on internal requests and is either taken from the\n:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` header or the :ref:`route timeout\n<envoy_v3_api_field_config.route.v3.RouteAction.timeout>`, in that order.\n\n.. _config_http_filters_router_x-envoy-original-path:\n\nx-envoy-original-path\n^^^^^^^^^^^^^^^^^^^^^\n\nIf the route utilizes :ref:`prefix_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.prefix_rewrite>`\nor :ref:`regex_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.regex_rewrite>`,\nEnvoy will put the original path header in this header. This can be useful for logging and\ndebugging.\n\nHTTP response headers set on downstream responses\n-------------------------------------------------\n\n.. _config_http_filters_router_x-envoy-upstream-service-time:\n\nx-envoy-upstream-service-time\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nContains the time in milliseconds spent by the upstream host processing the request and the network\nlatency between Envoy and upstream host. This is useful if the client wants to determine service time\ncompared to network latency between client and Envoy. This header is set on responses.\n\n.. _config_http_filters_router_x-envoy-overloaded_set:\n\nx-envoy-overloaded\n^^^^^^^^^^^^^^^^^^\n\nEnvoy will set this header on the downstream response\nif a request was dropped due to either :ref:`maintenance mode\n<config_http_filters_router_runtime_maintenance_mode>` or upstream :ref:`circuit breaking\n<arch_overview_circuit_break>`.\n\n.. _config_http_filters_router_stats:\n\nStatistics\n----------\n\nThe router outputs many statistics in the cluster namespace (depending on the cluster specified in\nthe chosen route). See :ref:`here <config_cluster_manager_cluster_stats>` for more information.\n\nThe router filter outputs statistics in the *http.<stat_prefix>.* namespace. The :ref:`stat prefix\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>` comes from the\nowning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  no_route, Counter, Total requests that had no route and resulted in a 404\n  no_cluster, Counter, Total requests in which the target cluster did not exist and which by default result in a 503\n  rq_redirect, Counter, Total requests that resulted in a redirect response\n  rq_direct_response, Counter, Total requests that resulted in a direct response\n  rq_total, Counter, Total routed requests\n  rq_reset_after_downstream_response_started, Counter, Total requests that were reset after downstream response had started\n\n.. _config_http_filters_router_vcluster_stats:\n\nVirtual Clusters\n^^^^^^^^^^^^^^^^\n\nVirtual cluster statistics are output in the\n*vhost.<virtual host name>.vcluster.<virtual cluster name>.* namespace and include the following\nstatistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  upstream_rq_<\\*xx>, Counter, \"Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)\"\n  upstream_rq_<\\*>, Counter, \"Specific HTTP response codes (e.g., 201, 302, etc.)\"\n  upstream_rq_retry, Counter, Total request retries\n  upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries <config_http_filters_router_x-envoy-max-retries>`\n  upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budgets <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.retry_budget>`\n  upstream_rq_retry_success, Counter, Total request retry successes\n  upstream_rq_time, Histogram, Request time milliseconds\n  upstream_rq_timeout, Counter, Total requests that timed out waiting for a response\n  upstream_rq_total, Counter, Total requests initiated by the router to the upstream\n\nRuntime\n-------\n\nThe router filter supports the following runtime settings:\n\nupstream.base_retry_backoff_ms\n  Base exponential retry back-off time. See :ref:`here <arch_overview_http_routing_retry>` and\n  :ref:`config_http_filters_router_x-envoy-max-retries` for more information. Defaults to 25ms.\n  The default maximum retry back-off time is 10 times this value.\n\n.. _config_http_filters_router_runtime_maintenance_mode:\n\nupstream.maintenance_mode.<cluster name>\n  % of requests that will result in an immediate 503 response. This overrides any routing behavior\n  for requests that would have been destined for <cluster name>. This can be used for load\n  shedding, failure injection, etc. Defaults to disabled.\n\nupstream.use_retry\n  % of requests that are eligible for retry. This configuration is checked before any other retry\n  configuration and can be used to fully disable retries across all Envoys if needed.\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/squash_filter.rst",
    "content": ".. _config_http_filters_squash:\n\nSquash\n======\n\nSquash is an HTTP filter which enables Envoy to integrate with Squash microservices debugger.\nCode: https://github.com/solo-io/squash, API Docs: https://squash.solo.io/\n\nOverview\n--------\n\nThe main use case for this filter is in a service mesh, where Envoy is deployed as a sidecar.\nOnce a request marked for debugging enters the mesh, the Squash Envoy filter reports its 'location'\nin the cluster to the Squash server - as there is a 1-1 mapping between Envoy sidecars and\napplication containers, the Squash server can find and attach a debugger to the application container.\nThe Squash filter also holds the request until a debugger is attached (or a timeout occurs). This\nenables developers (via Squash) to attach a native debugger to the container that will handle the\nrequest, before the request arrive to the application code, without any changes to the cluster.\n\nConfiguration\n-------------\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.squash.v3.Squash>`\n* This filter should be configured with the name *envoy.filters.http.squash*.\n\nHow it works\n------------\n\nWhen the Squash filter encounters a request containing the header 'x-squash-debug' it will:\n\n1. Delay the incoming request.\n2. Contact the Squash server and request the creation of a DebugAttachment\n\n   - On the Squash server side, Squash will attempt to attach a debugger to the application Envoy\n     proxies to. On success, it changes the state of the DebugAttachment\n     to attached.\n\n3. Wait until the Squash server updates the DebugAttachment object's state to attached (or\n   error state)\n4. Resume the incoming request\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/tap_filter.rst",
    "content": ".. _config_http_filters_tap:\n\nTap\n===\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.tap.v3.Tap>`\n* This filter should be configured with the name *envoy.filters.http.tap*.\n\n.. attention::\n\n  The tap filter is experimental and is currently under active development. There is currently a\n  very limited set of match conditions, output configuration, output sinks, etc. Capabilities will\n  be expanded over time and the configuration structures are likely to change.\n\nThe HTTP tap filter is used to interpose on and record HTTP traffic. At a high level, the\nconfiguration is composed of two pieces:\n\n1. :ref:`Match configuration <envoy_v3_api_msg_config.tap.v3.MatchPredicate>`: a list of\n   conditions under which the filter will match an HTTP request and begin a tap session.\n2. :ref:`Output configuration <envoy_v3_api_msg_config.tap.v3.OutputConfig>`: a list of output\n   sinks that the filter will write the matched and tapped data to.\n\nEach of these concepts will be covered incrementally over the course of several example\nconfigurations in the following section.\n\nExample configuration\n---------------------\n\nExample filter configuration:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.tap\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\n    common_config:\n      admin_config:\n        config_id: test_config_id\n\nThe previous snippet configures the filter for control via the :http:post:`/tap` admin handler.\nSee the following section for more details.\n\n.. _config_http_filters_tap_admin_handler:\n\nAdmin handler\n-------------\n\nWhen the HTTP filter specifies an :ref:`admin_config\n<envoy_v3_api_msg_extensions.common.tap.v3.AdminConfig>`, it is configured for admin control and\nthe :http:post:`/tap` admin handler will be installed. The admin handler can be used for live\ntapping and debugging of HTTP traffic. It works as follows:\n\n1. A POST request is used to provide a valid tap configuration. The POST request body can be either\n   the JSON or YAML representation of the :ref:`TapConfig\n   <envoy_v3_api_msg_config.tap.v3.TapConfig>` message.\n2. If the POST request is accepted, Envoy will stream :ref:`HttpBufferedTrace\n   <envoy_v3_api_msg_data.tap.v3.HttpBufferedTrace>` messages (serialized to JSON) until the admin\n   request is terminated.\n\nAn example POST body:\n\n.. code-block:: yaml\n\n  config_id: test_config_id\n  tap_config:\n    match_config:\n      and_match:\n        rules:\n          - http_request_headers_match:\n              headers:\n                - name: foo\n                  exact_match: bar\n          - http_response_headers_match:\n              headers:\n                - name: bar\n                  exact_match: baz\n    output_config:\n      sinks:\n        - streaming_admin: {}\n\nThe preceding configuration instructs the tap filter to match any HTTP requests in which a request\nheader ``foo: bar`` is present AND a response header ``bar: baz`` is present. If both of these\nconditions are met, the request will be tapped and streamed out the admin endpoint.\n\nAnother example POST body:\n\n.. code-block:: yaml\n\n  config_id: test_config_id\n  tap_config:\n    match_config:\n      or_match:\n        rules:\n          - http_request_headers_match:\n              headers:\n                - name: foo\n                  exact_match: bar\n          - http_response_headers_match:\n              headers:\n                - name: bar\n                  exact_match: baz\n    output_config:\n      sinks:\n        - streaming_admin: {}\n\nThe preceding configuration instructs the tap filter to match any HTTP requests in which a request\nheader ``foo: bar`` is present OR a response header ``bar: baz`` is present. If either of these\nconditions are met, the request will be tapped and streamed out the admin endpoint.\n\nAnother example POST body:\n\n.. code-block:: yaml\n\n  config_id: test_config_id\n  tap_config:\n    match_config:\n      any_match: true\n    output_config:\n      sinks:\n        - streaming_admin: {}\n\nThe preceding configuration instructs the tap filter to match any HTTP requests. All requests will\nbe tapped and streamed out the admin endpoint.\n\nAnother example POST body:\n\n.. code-block:: yaml\n\n  config_id: test_config_id\n  tap_config:\n    match_config:\n      and_match:\n        rules:\n          - http_request_headers_match:\n              headers:\n                - name: foo\n                  exact_match: bar\n          - http_request_generic_body_match:\n              patterns:\n                - string_match: test\n                - binary_match: 3q2+7w==\n              bytes_limit: 128\n          - http_response_generic_body_match:\n              patterns:\n                - binary_match: vu8=\n              bytes_limit: 64\n    output_config:\n      sinks:\n        - streaming_admin: {}\n\nThe preceding configuration instructs the tap filter to match any HTTP requests in which a request\nheader ``foo: bar`` is present AND request body contains string ``test`` and hex bytes ``deadbeef`` (``3q2+7w==`` in base64 format)\nin the first 128 bytes AND response body contains hex bytes ``beef`` (``vu8=`` in base64 format) in the first 64 bytes. If all of these\nconditions are met, the request will be tapped and streamed out to the admin endpoint.\n\n.. attention::\n\n  Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n  If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n  to scan only part of the http body.\n\nOutput format\n-------------\n\nEach output sink has an associated :ref:`format\n<envoy_v3_api_enum_config.tap.v3.OutputSink.Format>`. The default format is\n:ref:`JSON_BODY_AS_BYTES\n<envoy_v3_api_enum_value_config.tap.v3.OutputSink.Format.JSON_BODY_AS_BYTES>`. This format is\neasy to read JSON, but has the downside that body data is base64 encoded. In the case that the tap\nis known to be on human readable data, the :ref:`JSON_BODY_AS_STRING\n<envoy_v3_api_enum_value_config.tap.v3.OutputSink.Format.JSON_BODY_AS_STRING>` format may be\nmore user friendly. See the reference documentation for more information on other available formats.\n\nAn example of a streaming admin tap configuration that uses the :ref:`JSON_BODY_AS_STRING\n<envoy_v3_api_enum_value_config.tap.v3.OutputSink.Format.JSON_BODY_AS_STRING>` format:\n\n.. code-block:: yaml\n\n  config_id: test_config_id\n  tap_config:\n    match_config:\n      any_match: true\n    output_config:\n      sinks:\n        - format: JSON_BODY_AS_STRING\n          streaming_admin: {}\n\nBuffered body limits\n--------------------\n\nFor buffered taps, Envoy will limit the amount of body data that is tapped to avoid OOM situations.\nThe default limit is 1KiB for both received (request) and transmitted (response) data. This is\nconfigurable via the :ref:`max_buffered_rx_bytes\n<envoy_v3_api_field_config.tap.v3.OutputConfig.max_buffered_rx_bytes>` and\n:ref:`max_buffered_tx_bytes\n<envoy_v3_api_field_config.tap.v3.OutputConfig.max_buffered_tx_bytes>` settings.\n\n.. _config_http_filters_tap_streaming:\n\nStreaming matching\n------------------\n\nThe tap filter supports \"streaming matching.\" This means that instead of waiting until the end of\nthe request/response sequence, the filter will match incrementally as the request proceeds. I.e.,\nfirst the request headers will be matched, then the request body if present, then the request\ntrailers if present, then the response headers if present, etc.\n\nThe filter additionally supports optional streamed output which is governed by the :ref:`streaming\n<envoy_v3_api_field_config.tap.v3.OutputConfig.streaming>` setting. If this setting is false\n(the default), Envoy will emit :ref:`fully buffered traces\n<envoy_v3_api_msg_data.tap.v3.HttpBufferedTrace>`. Users are likely to find this format easier\nto interact with for simple cases.\n\nIn cases where fully buffered traces are not practical (e.g., very large request and responses,\nlong lived streaming APIs, etc.), the streaming setting can be set to true, and Envoy will emit\nmultiple :ref:`streamed trace segments <envoy_v3_api_msg_data.tap.v3.HttpStreamedTraceSegment>` for\neach tap. In this case, it is required that post-processing is performed to stitch all of the trace\nsegments back together into a usable form. Also note that binary protobuf is not a self-delimiting\nformat. If binary protobuf output is desired, the :ref:`PROTO_BINARY_LENGTH_DELIMITED\n<envoy_v3_api_enum_value_config.tap.v3.OutputSink.Format.PROTO_BINARY_LENGTH_DELIMITED>` output\nformat should be used.\n\nAn static filter configuration to enable streaming output looks like:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.tap\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\n    common_config:\n      static_config:\n        match_config:\n          http_response_headers_match:\n            headers:\n              - name: bar\n                exact_match: baz\n        output_config:\n          streaming: true\n          sinks:\n            - format: PROTO_BINARY_LENGTH_DELIMITED\n              file_per_tap:\n                path_prefix: /tmp/\n\nThe previous configuration will match response headers, and as such will buffer request headers,\nbody, and trailers until a match can be determined (buffered data limits still apply as described\nin the previous section). If a match is determined, buffered data will be flushed in individual\ntrace segments and then the rest of the tap will be streamed as data arrives. The messages output\nmight look like this:\n\n.. code-block:: yaml\n\n  http_streamed_trace_segment:\n    trace_id: 1\n    request_headers:\n      headers:\n        - key: a\n          value: b\n\n.. code-block:: yaml\n\n  http_streamed_trace_segment:\n    trace_id: 1\n    request_body_chunk:\n      as_bytes: aGVsbG8=\n\nEtc.\n\nStatistics\n----------\n\nThe tap filter outputs statistics in the *http.<stat_prefix>.tap.* namespace. The :ref:`stat prefix\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stat_prefix>`\ncomes from the owning HTTP connection manager.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  rq_tapped, Counter, Total requests that matched and were tapped\n"
  },
  {
    "path": "docs/root/configuration/http/http_filters/wasm_filter.rst",
    "content": ".. _config_http_filters_wasm:\n\nWasm\n====\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.wasm.v3.Wasm>`\n\n.. attention::\n\n  The Wasm filter is experimental and is currently under active development. Capabilities will\n  be expanded over time and the configuration structures are likely to change.\n\nThe HTTP Wasm filter is used implement an HTTP filter with a Wasm plugin.\n\nExample configuration\n---------------------\n\nExample filter configuration:\n\n.. code-block:: yaml\n\n  name: envoy.filters.http.wasm\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm\n    config:\n      config:\n        name: \"my_plugin\"\n        vm_config:\n          runtime: \"envoy.wasm.runtime.v8\"\n          code:\n            local:\n              filename: \"/etc/envoy_filter_http_wasm_example.wasm\"\n          allow_precompiled: true\n \n\nThe preceding snippet configures a filter from a Wasm binary on local disk.\n"
  },
  {
    "path": "docs/root/configuration/listeners/lds.rst",
    "content": ".. _config_listeners_lds:\n\nListener discovery service (LDS)\n================================\n\nThe listener discovery service (LDS) is an optional API that Envoy will call to dynamically fetch\nlisteners. Envoy will reconcile the API response and add, modify, or remove known listeners\ndepending on what is required.\n\nThe semantics of listener updates are as follows:\n\n* Every listener must have a unique :ref:`name <envoy_v3_api_field_config.listener.v3.Listener.name>`. If a name is not\n  provided, Envoy will create a UUID. Listeners that are to be dynamically updated should have a\n  unique name supplied by the management server.\n* When a listener is added, it will be \"warmed\" before taking traffic. For example, if the listener\n  references an :ref:`RDS <config_http_conn_man_rds>` configuration, that configuration will be\n  resolved and fetched before the listener is moved to \"active.\"\n* Listeners are effectively constant once created. Thus, when a listener is updated, an entirely\n  new listener is created (with the same listen socket). This listener goes through the same\n  warming process described above for a newly added listener.\n* When a listener is updated or removed, the old listener will be placed into a \"draining\" state\n  much like when the entire server is drained for restart. Connections owned by the listener will\n  be gracefully closed (if possible) for some period of time before the listener is removed and any\n  remaining connections are closed. The drain time is set via the :option:`--drain-time-s` option.\n\n  .. note::\n\n    Any listeners that are statically defined within the Envoy configuration cannot be modified or\n    removed via the LDS API.\n\nConfiguration\n-------------\n\n* :ref:`v3 LDS API <v2_grpc_streaming_endpoints>`\n\nStatistics\n----------\n\nLDS has a :ref:`statistics <subscription_statistics>` tree rooted at *listener_manager.lds.*\n"
  },
  {
    "path": "docs/root/configuration/listeners/listener_filters/http_inspector.rst",
    "content": ".. _config_listener_filters_http_inspector:\n\nHTTP Inspector\n==============\n\nHTTP Inspector listener filter allows detecting whether the application protocol appears to be HTTP, \nand if it is HTTP, it detects the HTTP protocol (HTTP/1.x or HTTP/2) further. This can be used to select a\n:ref:`FilterChain <envoy_v3_api_msg_config.listener.v3.FilterChain>` via the :ref:`application_protocols <envoy_v3_api_field_config.listener.v3.FilterChainMatch.application_protocols>`\nof a :ref:`FilterChainMatch <envoy_v3_api_msg_config.listener.v3.FilterChainMatch>`.\n\n* :ref:`Listener filter v3 API reference <envoy_v3_api_msg_extensions.filters.listener.http_inspector.v3.HttpInspector>`\n* This filter should be configured with the name *envoy.filters.listener.http_inspector*.\n\nExample\n-------\n\nA sample filter configuration could be:\n\n.. code-block:: yaml\n\n  listener_filters:\n    - name: \"envoy.filters.listener.http_inspector\"\n      typed_config: {}\n\nStatistics\n----------\n\nThis filter has a statistics tree rooted at *http_inspector* with the following statistics: \n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  read_error, Counter, Total read errors\n  http10_found, Counter, Total number of times HTTP/1.0 was found\n  http11_found, Counter, Total number of times HTTP/1.1 was found\n  http2_found, Counter, Total number of times HTTP/2 was found\n  http_not_found, Counter, Total number of times HTTP protocol was not found\n"
  },
  {
    "path": "docs/root/configuration/listeners/listener_filters/listener_filters.rst",
    "content": ".. _config_listener_filters:\n\nListener filters\n================\n\nEnvoy has the following builtin listener filters.\n\n.. toctree::\n  :maxdepth: 2\n\n  http_inspector\n  original_dst_filter\n  original_src_filter\n  proxy_protocol\n  tls_inspector\n"
  },
  {
    "path": "docs/root/configuration/listeners/listener_filters/original_dst_filter.rst",
    "content": ".. _config_listener_filters_original_dst:\n\nOriginal Destination\n====================\n\nOriginal destination listener filter reads the SO_ORIGINAL_DST socket option set when a connection\nhas been redirected by an iptables REDIRECT target, or by an iptables TPROXY target in combination\nwith setting the listener's :ref:`transparent <envoy_v3_api_field_config.listener.v3.Listener.transparent>` option.\nLater processing in Envoy sees the restored destination address as the connection's local address,\nrather than the address at which the listener is listening at. Furthermore, :ref:`an original\ndestination cluster <arch_overview_service_discovery_types_original_destination>` may be used to\nforward HTTP requests or TCP connections to the restored destination address.\n\n* :ref:`v2 API reference <envoy_v3_api_field_config.listener.v3.ListenerFilter.name>`\n* This filter should be configured with the name *envoy.filters.listener.original_dst*.\n"
  },
  {
    "path": "docs/root/configuration/listeners/listener_filters/original_src_filter.rst",
    "content": ".. _config_listener_filters_original_src:\n\nOriginal Source\n===============\n\n* :ref:`Listener filter v3 API reference <envoy_v3_api_msg_extensions.filters.listener.original_src.v3.OriginalSrc>`\n* This filter should be configured with the name *envoy.filters.listener.original_src*.\n\nThe original source listener filter replicates the downstream remote address of the connection on\nthe upstream side of Envoy. For example, if a downstream connection connects to Envoy with IP\naddress ``10.1.2.3``, then Envoy will connect to the upstream with source IP ``10.1.2.3``.\n\nInteraction with Proxy Protocol\n--------------------------------\n\nIf the connection has not had its source address translated or proxied, then Envoy can simply use\nthe existing connection information to build the correct downstream remote address. However, if this\nis not true, a :ref:`Proxy Protocol filter <config_listener_filters_proxy_protocol>` may be used to\nextract the downstream remote address.\n\nIP Version Support\n------------------\nThe filter supports both IPv4 and IPv6 as addresses. Note that the upstream connection must support\nthe version used.\n\nExtra Setup\n-----------\n\nThe downstream remote address used will likely be globally routable. By default, packets returning\nfrom the upstream host to that address will not route through Envoy. The network must be configured\nto forcefully route any traffic whose IP was replicated by Envoy back through the Envoy host.\n\nIf Envoy and the upstream are on the same host -- e.g. in an sidecar deployment --, then iptables\nand routing rules can be used to ensure correct behaviour. The filter has an unsigned integer\nconfiguration,\n:ref:`mark <envoy_v3_api_field_extensions.filters.listener.original_src.v3.OriginalSrc.mark>`. Setting\nthis to *X* causes Envoy to *mark* all upstream packets originating from this listener with value\n*X*. Note that if\n:ref:`mark <envoy_v3_api_field_extensions.filters.listener.original_src.v3.OriginalSrc.mark>` is set\nto 0, Envoy will not mark upstream packets.\n\nWe can use the following set of commands to ensure that all ipv4 and ipv6 traffic marked with *X*\n(assumed to be 123 in the example) routes correctly. Note that this example assumes that *eth0* is\nthe default outbound interface.\n\n.. code-block:: text\n\n  iptables  -t mangle -I PREROUTING -m mark     --mark 123 -j CONNMARK --save-mark\n  iptables  -t mangle -I OUTPUT     -m connmark --mark 123 -j CONNMARK --restore-mark\n  ip6tables -t mangle -I PREROUTING -m mark     --mark 123 -j CONNMARK --save-mark\n  ip6tables -t mangle -I OUTPUT     -m connmark --mark 123 -j CONNMARK --restore-mark\n  ip rule add fwmark 123 lookup 100\n  ip route add local 0.0.0.0/0 dev lo table 100\n  ip -6 rule add fwmark 123 lookup 100\n  ip -6 route add local ::/0 dev lo table 100\n  echo 1 > /proc/sys/net/ipv4/conf/eth0/route_localnet\n\n\nExample Listener configuration\n------------------------------\n\nThe following example configures Envoy to use the original source for all connections made on port\n8888. It uses Proxy Protocol to determine the downstream remote address. All upstream packets are\nmarked with 123.\n\n.. code-block:: yaml\n\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8888\n    listener_filters:\n      - name: envoy.filters.listener.proxy_protocol\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol\n      - name: envoy.filters.listener.original_src\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.listener.original_src.v3.OriginalSrc\n          mark: 123\n"
  },
  {
    "path": "docs/root/configuration/listeners/listener_filters/proxy_protocol.rst",
    "content": ".. _config_listener_filters_proxy_protocol:\n\nProxy Protocol\n==============\n\nThis listener filter adds support for\n`HAProxy Proxy Protocol <https://www.haproxy.org/download/1.9/doc/proxy-protocol.txt>`_.\n\nIn this mode, the downstream connection is assumed to come from a proxy\nwhich places the original coordinates (IP, PORT) into a connection-string.\nEnvoy then extracts these and uses them as the remote address.\n\nIn Proxy Protocol v2 there exists the concept of extensions (TLV)\ntags that are optional. If the type of the TLV is added to the filter's configuration,\nthe TLV will be emitted as dynamic metadata with user-specified key.\n\nThis implementation supports both version 1 and version 2, it\nautomatically determines on a per-connection basis which of the two\nversions is present. Note: if the filter is enabled, the Proxy Protocol\nmust be present on the connection (either version 1 or version 2),\nthe standard does not allow parsing to determine if it is present or not.\n\nIf there is a protocol error or an unsupported address family\n(e.g. AF_UNIX) the connection will be closed and an error thrown.\n\n* :ref:`v3 API reference <envoy_v3_api_field_config.listener.v3.Filter.name>`\n* This filter should be configured with the name *envoy.filters.listener.proxy_protocol*.\n\nStatistics\n----------\n\nThis filter emits the following statistics: \n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  downstream_cx_proxy_proto_error, Counter, Total proxy protocol errors"
  },
  {
    "path": "docs/root/configuration/listeners/listener_filters/tls_inspector.rst",
    "content": ".. _config_listener_filters_tls_inspector:\n\nTLS Inspector\n=============\n\nTLS Inspector listener filter allows detecting whether the transport appears to be\nTLS or plaintext, and if it is TLS, it detects the\n`Server Name Indication <https://en.wikipedia.org/wiki/Server_Name_Indication>`_\nand/or `Application-Layer Protocol Negotiation\n<https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_\nfrom the client. This can be used to select a\n:ref:`FilterChain <envoy_v3_api_msg_config.listener.v3.FilterChain>` via the\n:ref:`server_names <envoy_v3_api_field_config.listener.v3.FilterChainMatch.server_names>` and/or\n:ref:`application_protocols <envoy_v3_api_field_config.listener.v3.FilterChainMatch.application_protocols>`\nof a :ref:`FilterChainMatch <envoy_v3_api_msg_config.listener.v3.FilterChainMatch>`.\n\n* :ref:`SNI <faq_how_to_setup_sni>`\n* :ref:`v2 API reference <envoy_v3_api_field_config.listener.v3.ListenerFilter.name>`\n* This filter should be configured with the name *envoy.filters.listener.tls_inspector*.\n\nExample\n-------\n\nA sample filter configuration could be:\n\n.. code-block:: yaml\n\n  listener_filters:\n  - name: \"envoy.filters.listener.tls_inspector\"\n    typed_config: {}\n\nStatistics\n----------\n\nThis filter has a statistics tree rooted at *tls_inspector* with the following statistics: \n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  connection_closed, Counter, Total connections closed\n  client_hello_too_large, Counter, Total unreasonably large Client Hello received\n  read_error, Counter, Total read errors\n  tls_found, Counter, Total number of times TLS was found\n  tls_not_found, Counter, Total number of times TLS was not found\n  alpn_found, Counter, Total number of times `Application-Layer Protocol Negotiation <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ was successful\n  alpn_not_found, Counter, Total number of times `Application-Layer Protocol Negotiation <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ has failed\n  sni_found, Counter, Total number of times `Server Name Indication <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ was found\n  sni_not_found, Counter, Total number of times `Server Name Indication <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ was not found\n\n"
  },
  {
    "path": "docs/root/configuration/listeners/listeners.rst",
    "content": ".. _config_listeners:\n\nListeners\n=========\n\n.. toctree::\n  :maxdepth: 2\n\n  overview\n  stats\n  runtime\n  listener_filters/listener_filters\n  network_filters/network_filters\n  udp_filters/udp_filters\n  lds\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9901\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: TCP\n        address: 0.0.0.0\n        port_value: 10000\n    listener_filters:\n      - name: envoy.filters.listener.tls_inspector\n    filter_chains:\n      - filters:\n          - name: envoy.filters.network.sni_dynamic_forward_proxy\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig\n              port_value: 443\n              dns_cache_config:\n                name: dynamic_forward_proxy_cache_config\n                dns_lookup_family: V4_ONLY\n          - name: envoy.tcp_proxy\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n              stat_prefix: tcp\n              cluster: dynamic_forward_proxy_cluster\n  clusters:\n  - name: dynamic_forward_proxy_cluster\n    connect_timeout: 1s\n    lb_policy: CLUSTER_PROVIDED\n    cluster_type:\n      name: envoy.clusters.dynamic_forward_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig\n        dns_cache_config:\n          name: dynamic_forward_proxy_cache_config\n          dns_lookup_family: V4_ONLY\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst",
    "content": ".. _config_network_filters_client_ssl_auth:\n\nClient TLS authentication\n=========================\n\n* Client TLS authentication filter :ref:`architecture overview <arch_overview_ssl_auth_filter>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth>`\n* This filter should be configured with the name *envoy.filters.network.client_ssl_auth*.\n\n.. _config_network_filters_client_ssl_auth_stats:\n\nStatistics\n----------\n\nEvery configured client TLS authentication filter has statistics rooted at\n*auth.clientssl.<stat_prefix>.* with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  update_success, Counter, Total principal update successes\n  update_failure, Counter, Total principal update failures\n  auth_no_ssl, Counter, Total connections ignored due to no TLS\n  auth_ip_allowlist, Counter, Total connections allowed due to the IP allowlist\n  auth_digest_match, Counter, Total connections allowed due to certificate match\n  auth_digest_no_match, Counter, Total connections denied due to no certificate match\n  total_principals, Gauge, Total loaded principals\n\n.. _config_network_filters_client_ssl_auth_rest_api:\n\nREST API\n--------\n\n.. http:get:: /v1/certs/list/approved\n\n  The authentication filter will call this API every refresh interval to fetch the current list\n  of approved certificates/principals. The expected JSON response looks like:\n\n  .. code-block:: json\n\n    {\n      \"certificates\": []\n    }\n\n  certificates\n    *(required, array)* list of approved certificates/principals.\n\n  Each certificate object is defined as:\n\n  .. code-block:: json\n\n    {\n      \"fingerprint_sha256\": \"...\",\n    }\n\n  fingerprint_sha256\n    *(required, string)* The SHA256 hash of the approved client certificate. Envoy will match this\n    hash to the presented client certificate to determine whether there is a digest match.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/direct_response_filter.rst",
    "content": ".. _config_network_filters_direct_response:\n\nDirect response\n===============\n\nThe direct response filter is a trivial network filter used to respond\nimmediately to new downstream connections with an optional canned response. It\ncan be used, for example, as a terminal filter in filter chains to collect\ntelemetry for blocked traffic. This filter should be configured with the name\n*envoy.filters.network.direct_response*.\n\n* :ref:`v3 API reference <envoy_v3_api_field_config.listener.v3.Filter.name>`\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst",
    "content": ".. _config_network_filters_dubbo_proxy:\n\nDubbo proxy\n============\n\nThe dubbo proxy filter decodes the RPC protocol between dubbo clients\nand servers. the decoded RPC information is converted to metadata.\nthe metadata includes the basic request ID, request type, serialization type,\nand the required service name, method name, parameter name,\nand parameter value for routing.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.dubbo_proxy.v3.DubboProxy>`\n* This filter should be configured with the name *envoy.filters.network.dubbo_proxy*.\n\n.. _config_network_filters_dubbo_proxy_stats:\n\nStatistics\n----------\n\nEvery configured dubbo proxy filter has statistics rooted at *dubbo.<stat_prefix>.* with the\nfollowing statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  request, Counter, Total requests\n  request_twoway, Counter, Total twoway requests\n  request_oneway, Counter, Total oneway requests\n  request_event, Counter, Total event requests\n  request_decoding_error, Counter, Total decoding error requests\n  request_decoding_success, Counter, Total decoding success requests\n  request_active, Gauge, Total active requests\n  response, Counter, Total responses\n  response_success, Counter, Total success responses\n  response_error, Counter, Total responses that protocol parse error\n  response_error_caused_connection_close, Counter, Total responses that caused by the downstream connection close\n  response_business_exception, Counter, Total responses that the protocol contains exception information returned by the business layer\n  response_decoding_error, Counter, Total decoding error responses\n  response_decoding_success, Counter, Total decoding success responses\n  response_error, Counter, Total responses that protocol parse error\n  local_response_success, Counter, Total local responses\n  local_response_error, Counter, Total local responses that encoding error\n  local_response_business_exception, Counter, Total local responses that the protocol contains business exception\n  cx_destroy_local_with_active_rq, Counter, Connections destroyed locally with an active query\n  cx_destroy_remote_with_active_rq, Counter, Connections destroyed remotely with an active query\n\n\nImplement custom filter based on the dubbo proxy filter\n--------------------------------------------------------\n\nIf you want to implement a custom filter based on the dubbo protocol,\nthe dubbo proxy filter like HTTP also provides a very convenient way to expand,\nthe first step is to implement the DecoderFilter interface, and give the filter named, such as testFilter,\nthe second step is to add your configuration, configuration method refer to the following sample\n\n.. code-block:: yaml\n\n  filter_chains:\n  - filters:\n    - name: envoy.filters.network.dubbo_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\n        stat_prefix: dubbo_incomming_stats\n        protocol_type: Dubbo\n        serialization_type: Hessian2\n        route_config:\n          name: local_route\n          interface: org.apache.dubbo.demo.DemoService\n          routes:\n          - match:\n              method:\n                name:\n                  exact: sayHello\n            route:\n              cluster: user_service_dubbo_server\n        dubbo_filters:\n        - name: envoy.filters.dubbo.testFilter\n          typed_config:\n            \"@type\": type.googleapis.com/google.protobuf.Struct\n            value:\n              name: test_service\n        - name: envoy.filters.dubbo.router\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/echo_filter.rst",
    "content": ".. _config_network_filters_echo:\n\nEcho\n====\n\nThe echo is a trivial network filter mainly meant to demonstrate the network filter API. If\ninstalled it will echo (write) all received data back to the connected downstream client. \nThis filter should be configured with the name *envoy.filters.network.echo*.\n\n* :ref:`v3 API reference <envoy_v3_api_field_config.listener.v3.Filter.name>`\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/ext_authz_filter.rst",
    "content": ".. _config_network_filters_ext_authz:\n\nExternal Authorization\n======================\n\n* External authorization :ref:`architecture overview <arch_overview_ext_authz>`\n* :ref:`Network filter v3 API reference <envoy_v3_api_msg_extensions.filters.network.ext_authz.v3.ExtAuthz>`\n* This filter should be configured with the name *envoy.filters.network.ext_authz*.\n\nThe external authorization network filter calls an external authorization service to check if the\nincoming request is authorized or not. If the request is deemed unauthorized by the network filter\nthen the connection will be closed.\n\n.. tip::\n  It is recommended that this filter is configured first in the filter chain so that requests are\n  authorized prior to rest of the filters processing the request.\n\nThe content of the request that are passed to an authorization service is specified by\n:ref:`CheckRequest <envoy_v3_api_msg_service.auth.v3.CheckRequest>`.\n\n.. _config_network_filters_ext_authz_network_configuration:\n\nThe network filter, gRPC service, can be configured as follows. You can see all the configuration\noptions at :ref:`Network filter <envoy_v3_api_msg_extensions.filters.network.ext_authz.v3.ExtAuthz>`.\n\nExample\n-------\n\nA sample filter configuration could be:\n\n.. code-block:: yaml\n\n  filters:\n    - name: envoy.filters.network.ext_authz\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz\n        stat_prefix: ext_authz\n        grpc_service:\n          envoy_grpc:\n            cluster_name: ext-authz\n        include_peer_certificate: true\n\n  clusters:\n    - name: ext-authz\n      type: static\n      http2_protocol_options: {}\n      load_assignment:\n        cluster_name: ext-authz\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 10003\n\nStatistics\n----------\n\nThe network filter outputs statistics in the *config.ext_authz.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  total, Counter, Total responses from the filter.\n  error, Counter, Total errors contacting the external service.\n  denied, Counter, Total responses from the authorizations service that were to deny the traffic.\n  disabled, Counter, Total requests that are allowed without calling external services due to the filter is disabled.\n  failure_mode_allowed, Counter, \"Total requests that were error(s) but were allowed through\n  because of failure_mode_allow set to true.\"\n  ok, Counter, Total responses from the authorization service that were to allow the traffic.\n  cx_closed, Counter, Total connections that were closed.\n  active, Gauge, Total currently active requests in transit to the authorization service.\n\nDynamic Metadata\n----------------\n.. _config_network_filters_ext_authz_dynamic_metadata:\n\nThe External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct``\n*only* when the gRPC authorization server returns a :ref:`CheckResponse\n<envoy_v3_api_msg_service.auth.v3.CheckResponse>` with a filled :ref:`dynamic_metadata\n<envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>` field.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst",
    "content": ".. _config_network_filters_kafka_broker:\n\nKafka Broker filter\n===================\n\nThe Apache Kafka broker filter decodes the client protocol for\n`Apache Kafka <https://kafka.apache.org/>`_, both the requests and responses in the payload.\nThe message versions in `Kafka 2.4.0 <http://kafka.apache.org/24/protocol.html#protocol_api_keys>`_\nare supported.\nThe filter attempts not to influence the communication between client and brokers, so the messages\nthat could not be decoded (due to Kafka client or broker running a newer version than supported by\nthis filter) are forwarded as-is.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.kafka_broker.v3.KafkaBroker>`\n* This filter should be configured with the name *envoy.filters.network.kafka_broker*.\n\n.. attention::\n\n   The kafka_broker filter is experimental and is currently under active development.\n   Capabilities will be expanded over time and the configuration structures are likely to change.\n\n.. _config_network_filters_kafka_broker_config:\n\nConfiguration\n-------------\n\nThe Kafka Broker filter should be chained with the TCP proxy filter as shown\nin the configuration snippet below:\n\n.. code-block:: yaml\n\n  listeners:\n  - address:\n      socket_address:\n        address: 127.0.0.1 # Host that Kafka clients should connect to.\n        port_value: 19092  # Port that Kafka clients should connect to.\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.kafka_broker\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\n          stat_prefix: exampleprefix\n      - name: envoy.filters.network.tcp_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n          stat_prefix: tcp\n          cluster: localkafka\n  clusters:\n  - name: localkafka\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: some_service\n      endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1 # Kafka broker's host\n                  port_value: 9092 # Kafka broker's port.\n\nThe Kafka broker needs to advertise the Envoy listener port instead of its own.\n\n.. code-block:: text\n\n  # Listener value needs to be equal to cluster value in Envoy config\n  # (will receive payloads from Envoy).\n  listeners=PLAINTEXT://127.0.0.1:9092\n\n  # Advertised listener value needs to be equal to Envoy's listener\n  # (will make clients discovering this broker talk to it through Envoy).\n  advertised.listeners=PLAINTEXT://127.0.0.1:19092\n\n.. _config_network_filters_kafka_broker_stats:\n\nStatistics\n----------\n\nEvery configured Kafka Broker filter has statistics rooted at *kafka.<stat_prefix>.*, with multiple\nstatistics per message type.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  request.TYPE, Counter, Number of times a request of particular type was received from Kafka client\n  request.unknown, Counter, Number of times a request with format not recognized by this filter was received\n  request.failure, Counter, Number of times a request with invalid format was received or other processing exception occurred\n  response.TYPE, Counter, Number of times a response of particular type was received from Kafka broker\n  response.TYPE_duration, Histogram, Response generation time in milliseconds\n  response.unknown, Counter, Number of times a response with format not recognized by this filter was received\n  response.failure, Counter, Number of times a response with invalid format was received or other processing exception occurred\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst",
    "content": ".. _config_network_filters_local_rate_limit:\n\nLocal rate limit\n================\n\n* Local rate limiting :ref:`architecture overview <arch_overview_local_rate_limit>`\n* :ref:`v3 API reference\n  <envoy_v3_api_msg_extensions.filters.network.local_ratelimit.v3.LocalRateLimit>`\n* This filter should be configured with the name *envoy.filters.network.local_ratelimit*.\n\n.. note::\n  Global rate limiting is also supported via the :ref:`global rate limit filter\n  <config_network_filters_rate_limit>`.\n\nOverview\n--------\n\nThe local rate limit filter applies a :ref:`token bucket\n<envoy_v3_api_field_extensions.filters.network.local_ratelimit.v3.LocalRateLimit.token_bucket>` rate\nlimit to incoming connections that are processed by the filter's filter chain. Each connection\nprocessed by the filter utilizes a single token, and if no tokens are available, the connection will\nbe immediately closed without further filter iteration.\n\n.. note::\n  In the current implementation each filter and filter chain has an independent rate limit.\n\n.. _config_network_filters_local_rate_limit_stats:\n\nStatistics\n----------\n\nEvery configured local rate limit filter has statistics rooted at *local_ratelimit.<stat_prefix>.*\nwith the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  rate_limited, Counter, Total connections that have been closed due to rate limit exceeded\n\nRuntime\n-------\n\nThe local rate limit filter can be runtime feature flagged via the :ref:`enabled\n<envoy_v3_api_field_extensions.filters.network.local_ratelimit.v3.LocalRateLimit.runtime_enabled>`\nconfiguration field.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst",
    "content": ".. _config_network_filters_mongo_proxy:\n\nMongo proxy\n===========\n\n* MongoDB :ref:`architecture overview <arch_overview_mongo>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.mongo_proxy.v3.MongoProxy>`\n* This filter should be configured with the name *envoy.filters.network.mongo_proxy*.\n\n.. _config_network_filters_mongo_proxy_fault_injection:\n\nFault injection\n---------------\n\nThe Mongo proxy filter supports fault injection. See the v3 API reference for how to\nconfigure.\n\n.. _config_network_filters_mongo_proxy_stats:\n\nStatistics\n----------\n\nEvery configured MongoDB proxy filter has statistics rooted at *mongo.<stat_prefix>.* with the\nfollowing statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  decoding_error, Counter, Number of MongoDB protocol decoding errors\n  delay_injected, Counter, Number of times the delay is injected\n  op_get_more, Counter, Number of OP_GET_MORE messages\n  op_insert, Counter, Number of OP_INSERT messages\n  op_kill_cursors, Counter, Number of OP_KILL_CURSORS messages\n  op_query, Counter, Number of OP_QUERY messages\n  op_query_tailable_cursor, Counter, Number of OP_QUERY with tailable cursor flag set\n  op_query_no_cursor_timeout, Counter, Number of OP_QUERY with no cursor timeout flag set\n  op_query_await_data, Counter, Number of OP_QUERY with await data flag set\n  op_query_exhaust, Counter, Number of OP_QUERY with exhaust flag set\n  op_query_no_max_time, Counter, Number of queries without maxTimeMS set\n  op_query_scatter_get, Counter, Number of scatter get queries\n  op_query_multi_get, Counter, Number of multi get queries\n  op_query_active, Gauge, Number of active queries\n  op_reply, Counter, Number of OP_REPLY messages\n  op_reply_cursor_not_found, Counter, Number of OP_REPLY with cursor not found flag set\n  op_reply_query_failure, Counter, Number of OP_REPLY with query failure flag set\n  op_reply_valid_cursor, Counter, Number of OP_REPLY with a valid cursor\n  cx_destroy_local_with_active_rq, Counter, Connections destroyed locally with an active query\n  cx_destroy_remote_with_active_rq, Counter, Connections destroyed remotely with an active query\n  cx_drain_close, Counter, Connections gracefully closed on reply boundaries during server drain\n\nScatter gets\n^^^^^^^^^^^^\n\nEnvoy defines a *scatter get* as any query that does not use an *_id* field as a query parameter.\nEnvoy looks in both the top level document as well as within a *$query* field for *_id*.\n\nMulti gets\n^^^^^^^^^^\n\nEnvoy defines a *multi get* as any query that does use an *_id* field as a query parameter, but\nwhere *_id* is not a scalar value (i.e., a document or an array). Envoy looks in both the top level\ndocument as well as within a *$query* field for *_id*.\n\n.. _config_network_filters_mongo_proxy_comment_parsing:\n\n$comment parsing\n^^^^^^^^^^^^^^^^\n\nIf a query has a top level *$comment* field (typically in addition to a *$query* field), Envoy will\nparse it as JSON and look for the following structure:\n\n.. code-block:: json\n\n  {\n    \"callingFunction\": \"...\"\n  }\n\ncallingFunction\n  *(required, string)* the function that made the query. If available, the function will be used\n  in :ref:`callsite <config_network_filters_mongo_proxy_callsite_stats>` query statistics.\n\nPer command statistics\n^^^^^^^^^^^^^^^^^^^^^^\n\nThe MongoDB filter will gather statistics for commands in the *mongo.<stat_prefix>.cmd.<cmd>.*\nnamespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  total, Counter, Number of commands\n  reply_num_docs, Histogram, Number of documents in reply\n  reply_size, Histogram, Size of the reply in bytes\n  reply_time_ms, Histogram, Command time in milliseconds\n\n.. _config_network_filters_mongo_proxy_collection_stats:\n\nPer collection query statistics\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe MongoDB filter will gather statistics for queries in the\n*mongo.<stat_prefix>.collection.<collection>.query.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  total, Counter, Number of queries\n  scatter_get, Counter, Number of scatter gets\n  multi_get, Counter, Number of multi gets\n  reply_num_docs, Histogram, Number of documents in reply\n  reply_size, Histogram, Size of the reply in bytes\n  reply_time_ms, Histogram, Query time in milliseconds\n\n.. _config_network_filters_mongo_proxy_callsite_stats:\n\nPer collection and callsite query statistics\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf the application provides the :ref:`calling function\n<config_network_filters_mongo_proxy_comment_parsing>` in the *$comment* field, Envoy will generate\nper callsite statistics. These statistics match the :ref:`per collection statistics\n<config_network_filters_mongo_proxy_collection_stats>` but are found in the\n*mongo.<stat_prefix>.collection.<collection>.callsite.<callsite>.query.* namespace.\n\n.. _config_network_filters_mongo_proxy_runtime:\n\nRuntime\n-------\n\nThe Mongo proxy filter supports the following runtime settings:\n\nmongo.connection_logging_enabled\n  % of connections that will have logging enabled. Defaults to 100. This allows only a % of\n  connections to have logging, but for all messages on those connections to be logged.\n\nmongo.proxy_enabled\n  % of connections that will have the proxy enabled at all. Defaults to 100.\n\nmongo.logging_enabled\n  % of messages that will be logged. Defaults to 100. If less than 100, queries may be logged\n  without replies, etc.\n\nmongo.mongo.drain_close_enabled\n  % of connections that will be drain closed if the server is draining and would otherwise\n  attempt a drain close. Defaults to 100.\n\nmongo.fault.fixed_delay.percent\n  Probability of an eligible MongoDB operation to be affected by\n  the injected fault when there is no active fault.\n  Defaults to the *percentage* specified in the config.\n\nmongo.fault.fixed_delay.duration_ms\n  The delay duration in milliseconds. Defaults to the *duration_ms* specified in the config.\n\nAccess log format\n-----------------\n\nThe access log format is not customizable and has the following layout:\n\n.. code-block:: json\n\n  {\"time\": \"...\", \"message\": \"...\", \"upstream_host\": \"...\"}\n\ntime\n  System time that complete message was parsed, including milliseconds.\n\nmessage\n  Textual expansion of the message. Whether the message is fully expanded depends on the context.\n  Sometimes summary data is presented to avoid extremely large log sizes.\n\nupstream_host\n  The upstream host that the connection is proxying to, if available. This is populated if the\n  filter is used along with the :ref:`TCP proxy filter <config_network_filters_tcp_proxy>`.\n\n.. _config_network_filters_mongo_proxy_dynamic_metadata:\n\nDynamic Metadata\n----------------\n\nThe Mongo filter emits the following dynamic metadata when enabled via the\n:ref:`configuration <envoy_v3_api_field_extensions.filters.network.mongo_proxy.v3.MongoProxy.emit_dynamic_metadata>`.\nThis dynamic metadata is available as key-value pairs where the key\nrepresents the database and the collection being accessed, and the value is\na list of operations performed on the collection.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  key, string, The resource name in *db.collection* format.\n  value, array, A list of strings representing the operations executed on the resource (insert/update/query/delete).\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst",
    "content": ".. _config_network_filters_mysql_proxy:\n\nMySQL proxy\n===========\n\nThe MySQL proxy filter decodes the wire protocol between the MySQL client\nand server. It decodes the SQL queries in the payload (SQL99 format only).\nThe decoded info is emitted as dynamic metadata that can be combined with\naccess log filters to get detailed information on tables accessed as well\nas operations performed on each table.\n\n.. attention::\n\n   The mysql_proxy filter is experimental and is currently under active\n   development. Capabilities will be expanded over time and the\n   configuration structures are likely to change.\n\n.. warning::\n\n   The mysql_proxy filter was tested with MySQL v5.5. The filter may not work\n   with other versions of MySQL due to differences in the protocol implementation.\n\n.. _config_network_filters_mysql_proxy_config:\n\nConfiguration\n-------------\n\nThe MySQL proxy filter should be chained with the TCP proxy filter as shown\nin the configuration snippet below:\n\n.. code-block:: yaml\n\n  filter_chains:\n  - filters:\n    - name: envoy.filters.network.mysql_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy\n        stat_prefix: mysql\n    - name: envoy.filters.network.tcp_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n        stat_prefix: tcp\n        cluster: ...\n\n\n.. _config_network_filters_mysql_proxy_stats:\n\nStatistics\n----------\n\nEvery configured MySQL proxy filter has statistics rooted at *mysql.<stat_prefix>.* with the\nfollowing statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  auth_switch_request, Counter, Number of times the upstream server requested clients to switch to a different authentication method\n  decoder_errors, Counter, Number of MySQL protocol decoding errors\n  login_attempts, Counter, Number of login attempts\n  login_failures, Counter, Number of login failures\n  protocol_errors, Counter, Number of out of sequence protocol messages encountered in a session\n  queries_parse_error, Counter, Number of MySQL queries parsed with errors\n  queries_parsed, Counter, Number of MySQL queries successfully parsed\n  sessions, Counter, Number of MySQL sessions since start\n  upgraded_to_ssl, Counter, Number of sessions/connections that were upgraded to SSL\n\n.. _config_network_filters_mysql_proxy_dynamic_metadata:\n\nDynamic Metadata\n----------------\n\nThe MySQL filter emits the following dynamic metadata for each SQL query parsed:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  <table.db>, string, The resource name in *table.db* format. The resource name defaults to the table being accessed if the database cannot be inferred.\n  [], list, A list of strings representing the operations executed on the resource. Operations can be one of insert/update/select/drop/delete/create/alter/show.\n\n.. _config_network_filters_mysql_proxy_rbac:\n\nRBAC Enforcement on Table Accesses\n----------------------------------\n\nThe dynamic metadata emitted by the MySQL filter can be used in conjunction\nwith the RBAC filter to control accesses to individual tables in a\ndatabase. The following configuration snippet shows an example RBAC filter\nconfiguration that denies SQL queries with _update_ statements to the\n_catalog_ table in the _productdb_ database.\n\n.. code-block:: yaml\n\n  filter_chains:\n  - filters:\n    - name: envoy.filters.network.mysql_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy\n        stat_prefix: mysql\n    - name: envoy.filters.network.rbac\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC\n        stat_prefix: rbac\n        rules:\n          action: DENY\n          policies:\n            \"product-viewer\":\n              permissions:\n              - metadata:\n                  filter: envoy.filters.network.mysql_proxy\n                  path:\n                  - key: catalog.productdb\n                  value:\n                    list_match:\n                      one_of:\n                        string_match:\n                          exact: update\n              principals:\n              - any: true\n    - name: envoy.filters.network.tcp_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n        stat_prefix: tcp\n        cluster: mysql\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/network_filters.rst",
    "content": ".. _config_network_filters:\n\nNetwork filters\n===============\n\nIn addition to the :ref:`HTTP connection manager <config_http_conn_man>` which is large\nenough to have its own section in the configuration guide, Envoy has the follow builtin network\nfilters.\n\n.. toctree::\n  :maxdepth: 2\n\n  dubbo_proxy_filter\n  client_ssl_auth_filter\n  echo_filter\n  direct_response_filter\n  ext_authz_filter\n  kafka_broker_filter\n  local_rate_limit_filter\n  mongo_proxy_filter\n  mysql_proxy_filter\n  postgres_proxy_filter\n  rate_limit_filter\n  rbac_filter\n  redis_proxy_filter\n  rocketmq_proxy_filter\n  tcp_proxy_filter\n  thrift_proxy_filter\n  sni_cluster_filter\n  sni_dynamic_forward_proxy_filter\n  wasm_filter\n  zookeeper_proxy_filter\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst",
    "content": ".. _config_network_filters_postgres_proxy:\n\nPostgres proxy\n================\n\nThe Postgres proxy filter decodes the wire protocol between a Postgres client (downstream) and a Postgres server\n(upstream). The decoded information is used to produce Postgres level statistics like sessions,\nstatements or transactions executed, among others. The Postgres proxy filter parses SQL queries carried in ``Query`` and ``Parse`` messages.\nWhen SQL query has been parsed successfully, the :ref:`metadata <config_network_filters_postgres_proxy_dynamic_metadata>` is created, \nwhich may be used by other filters like :ref:`RBAC <config_network_filters_rbac>`.\nWhen the Postgres filter detects that a session is encrypted, the messages are ignored and no decoding takes\nplace. More information:\n\n* Postgres :ref:`architecture overview <arch_overview_postgres>`\n\n.. attention::\n\n   The `postgres_proxy` filter is experimental and is currently under active development.\n   Capabilities will be expanded over time and the configuration structures are likely to change.\n\n\n.. warning::\n\n   The `postgreql_proxy` filter was tested only with\n   `Postgres frontend/backend protocol version 3.0`_, which was introduced in\n   Postgres 7.4. Earlier versions are thus not supported. Testing is limited\n   anyway to not EOL-ed versions.\n\n   .. _Postgres frontend/backend protocol version 3.0: https://www.postgresql.org/docs/current/protocol.html\n\n\n\nConfiguration\n-------------\n\nThe Postgres proxy filter should be chained with the TCP proxy as shown in the configuration\nexample below:\n\n.. code-block:: yaml\n\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.postgres_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.postgres_proxy.v3alpha.PostgresProxy\n          stat_prefix: postgres\n      - name: envoy.tcp_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n          stat_prefix: tcp\n          cluster: postgres_cluster\n\n\n.. _config_network_filters_postgres_proxy_stats:\n\nStatistics\n----------\n\nEvery configured Postgres proxy filter has statistics rooted at postgres.<stat_prefix> with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 2, 1, 2\n\n  errors, Counter, Number of times the server replied with ERROR message\n  errors_error, Counter, Number of times the server replied with ERROR message with ERROR severity\n  errors_fatal, Counter, Number of times the server replied with ERROR message with FATAL severity\n  errors_panic, Counter, Number of times the server replied with ERROR message with PANIC severity\n  errors_unknown, Counter, Number of times the server replied with ERROR message but the decoder could not parse it\n  messages, Counter, Total number of messages processed by the filter\n  messages_backend, Counter, Total number of backend messages detected by the filter\n  messages_frontend, Counter, Number of frontend messages detected by the filter\n  messages_unknown, Counter, Number of times the filter successfully decoded a message but did not know what to do with it\n  sessions, Counter, Total number of successful logins\n  sessions_encrypted, Counter, Number of times the filter detected encrypted sessions\n  sessions_unencrypted, Counter, Number of messages indicating unencrypted successful login\n  statements, Counter, Total number of SQL statements\n  statements_delete, Counter, Number of DELETE statements\n  statements_insert, Counter, Number of INSERT statements\n  statements_select, Counter, Number of SELECT statements\n  statements_update, Counter, Number of UPDATE statements\n  statements_other, Counter, \"Number of statements other than DELETE, INSERT, SELECT or UPDATE\"\n  statements_parsed, Counter, Number of SQL queries parsed successfully\n  statements_parse_error, Counter, Number of SQL queries not parsed successfully\n  transactions, Counter, Total number of SQL transactions\n  transactions_commit, Counter, Number of COMMIT transactions\n  transactions_rollback, Counter, Number of ROLLBACK transactions\n  notices, Counter, Total number of NOTICE messages\n  notices_notice, Counter, Number of NOTICE messages with NOTICE subtype\n  notices_log, Counter, Number of NOTICE messages with LOG subtype\n  notices_warning, Counter, Number ofr NOTICE messags with WARNING severity\n  notices_debug, Counter, Number of NOTICE messages with DEBUG severity\n  notices_info, Counter, Number of NOTICE messages with INFO severity\n  notices_unknown, Counter, Number of NOTICE messages which could not be recognized\n\n\n.. _config_network_filters_postgres_proxy_dynamic_metadata:\n\nDynamic Metadata\n----------------\n\nThe Postgres filter emits Dynamic Metadata based on SQL statements carried in ``Query`` and ``Parse`` messages. ``statements_parsed`` statistics Counter tracks how many times\nSQL statement was parsed successfully and metadata was created. The metadata is emitted in the following format:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  <table.db>, string, The resource name in *table.db* format.\n  [], list, A list of strings representing the operations executed on the resource. Operations can be one of insert/update/select/drop/delete/create/alter/show.\n\n.. attention::\n\n   Currently used parser does not successfully parse all SQL statements and it cannot be assumed that all SQL queries will successfully produce Dynamic Metadata.\n   Creating Dynamic Metadata from SQL queries is on best-effort basis at the moment. If parsing of an SQL query fails, ``statements_parse_error`` counter is increased, log message is created, Dynamic Metadata is not\n   produced, but the Postgres message is still forwarded to upstream Postgres server.\n\nParsing SQL statements and emitting Dynamic Metadata can be disabled by setting :ref:`enable_sql_parsing<envoy_v3_api_field_extensions.filters.network.postgres_proxy.v3alpha.PostgresProxy.enable_sql_parsing>` to false.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/rate_limit_filter.rst",
    "content": ".. _config_network_filters_rate_limit:\n\nRate limit\n==========\n\n* Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.ratelimit.v3.RateLimit>`\n* This filter should be configured with the name *envoy.filters.network.ratelimit*.\n\n.. note::\n  Local rate limiting is also supported via the :ref:`local rate limit filter\n  <config_network_filters_local_rate_limit>`.\n\n.. _config_network_filters_rate_limit_stats:\n\nStatistics\n----------\n\nEvery configured rate limit filter has statistics rooted at *ratelimit.<stat_prefix>.* with the\nfollowing statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  total, Counter, Total requests to the rate limit service\n  error, Counter, Total errors contacting the rate limit service\n  over_limit, Counter, Total over limit responses from the rate limit service\n  ok, Counter, Total under limit responses from the rate limit service\n  cx_closed, Counter, Total connections closed due to an over limit response from the rate limit service\n  active, Gauge, Total active requests to the rate limit service\n  failure_mode_allowed, Counter, \"Total requests that were error(s) but were allowed through because\n  of :ref:`failure_mode_deny <envoy_v3_api_field_extensions.filters.network.ratelimit.v3.RateLimit.failure_mode_deny>` set to false.\"\n\nRuntime\n-------\n\nThe network rate limit filter supports the following runtime settings:\n\nratelimit.tcp_filter_enabled\n  % of connections that will call the rate limit service. Defaults to 100.\n\nratelimit.tcp_filter_enforcing\n  % of connections that will call the rate limit service and enforce the decision. Defaults to 100.\n  This can be used to test what would happen before fully enforcing the outcome.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/rbac_filter.rst",
    "content": ".. _config_network_filters_rbac:\n\nRole Based Access Control (RBAC) Network Filter\n===============================================\n\nThe RBAC network filter is used to authorize actions (permissions) by identified downstream clients\n(principals). This is useful to explicitly manage callers to an application and protect it from\nunexpected or forbidden agents. The filter supports configuration with either a safe-list (ALLOW) or\nblock-list (DENY) set of policies based on properties of the connection (IPs, ports, SSL subject).\nThis filter also supports policy in both enforcement and shadow modes. Shadow mode won't effect real\nusers, it is used to test that a new set of policies work before rolling out to production.\n\nWhen a request is denied, the :ref:`CONNECTION_TERMINATION_DETAILS<config_access_log_format_connection_termination_details>`\nwill include the name of the matched policy that caused the deny in the format of `rbac_access_denied_matched_policy[policy_name]`\n(policy_name will be `none` if no policy matched), this helps to distinguish the deny from Envoy\nRBAC filter and the upstream backend.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.rbac.v3.RBAC>`\n* This filter should be configured with the name *envoy.filters.network.rbac*.\n\nStatistics\n----------\n\nThe RBAC network filter outputs statistics in the *<stat_prefix>.rbac.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  allowed, Counter, Total requests that were allowed access\n  denied, Counter, Total requests that were denied access\n  shadow_allowed, Counter, Total requests that would be allowed access by the filter's shadow rules\n  shadow_denied, Counter, Total requests that would be denied access by the filter's shadow rules\n  logged, Counter, Total requests that should be logged\n  not_logged, Counter, Total requests that should not be logged\n\n.. _config_network_filters_rbac_dynamic_metadata:\n\nDynamic Metadata\n----------------\n\nThe RBAC filter emits the following dynamic metadata.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  shadow_effective_policy_id, string, The effective shadow policy ID matching the action (if any).\n  shadow_engine_result, string, The engine result for the shadow rules (i.e. either `allowed` or `denied`).\n  access_log_hint, boolean, Whether the request should be logged. This metadata is shared and set under the key namespace 'envoy.common' (See :ref:`Shared Dynamic Metadata<shared_dynamic_metadata>`).\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst",
    "content": ".. _config_network_filters_redis_proxy:\n\nRedis proxy\n===========\n\n* Redis :ref:`architecture overview <arch_overview_redis>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.redis_proxy.v3.RedisProxy>`\n* This filter should be configured with the name *envoy.filters.network.redis_proxy*.\n\n.. _config_network_filters_redis_proxy_stats:\n\nStatistics\n----------\n\nEvery configured Redis proxy filter has statistics rooted at *redis.<stat_prefix>.* with the\nfollowing statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  downstream_cx_active, Gauge, Total active connections\n  downstream_cx_protocol_error, Counter, Total protocol errors\n  downstream_cx_rx_bytes_buffered, Gauge, Total received bytes currently buffered\n  downstream_cx_rx_bytes_total, Counter, Total bytes received\n  downstream_cx_total, Counter, Total connections\n  downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered\n  downstream_cx_tx_bytes_total, Counter, Total bytes sent\n  downstream_cx_drain_close, Counter, Number of connections closed due to draining\n  downstream_rq_active, Gauge, Total active requests\n  downstream_rq_total, Counter, Total requests\n\n\nSplitter statistics\n-------------------\n\nThe Redis filter will gather statistics for the command splitter in the\n*redis.<stat_prefix>.splitter.* with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  invalid_request, Counter, Number of requests with an incorrect number of arguments\n  unsupported_command, Counter, Number of commands issued which are not recognized by the command splitter\n\nPer command statistics\n----------------------\n\nThe Redis filter will gather statistics for commands in the\n*redis.<stat_prefix>.command.<command>.* namespace. By default latency stats are in milliseconds and can be\nchanged to microseconds by setting the configuration parameter :ref:`latency_in_micros <envoy_v3_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.latency_in_micros>` to true.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  total, Counter, Number of commands\n  success, Counter, Number of commands that were successful\n  error, Counter, Number of commands that returned a partial or complete error response\n  latency, Histogram, Command execution time in milliseconds (including delay faults)\n  error_fault, Counter, Number of commands that had an error fault injected\n  delay_fault, Counter, Number of commands that had a delay fault injected\n  \n.. _config_network_filters_redis_proxy_per_command_stats:\n\nRuntime\n-------\n\nThe Redis proxy filter supports the following runtime settings:\n\nredis.drain_close_enabled\n  % of connections that will be drain closed if the server is draining and would otherwise\n  attempt a drain close. Defaults to 100.\n\n.. _config_network_filters_redis_proxy_fault_injection:\n\nFault Injection\n---------------\n\nThe Redis filter can perform fault injection. Currently, Delay and Error faults are supported.\nDelay faults delay a request, and Error faults respond with an error. Moreover, errors can be delayed.\n\nNote that the Redis filter does not check for correctness in your configuration - it is the user's\nresponsibility to make sure both the default and runtime percentages are correct! This is because\npercentages can be changed during runtime, and validating correctness at request time is expensive.\nIf multiple faults are specified, the fault injection percentage should not exceed 100% for a given \nfault and Redis command combination. For example, if two faults are specified; one applying to GET at 60\n%, and one applying to all commands at 50%, that is a bad configuration as GET now has 110% chance of\napplying a fault. This means that every request will have a fault.\n\nIf a delay is injected, the delay is additive - if the request took 400ms and a delay of 100ms\nis injected, then the total request latency is 500ms. Also, due to implementation of the redis protocol,\na delayed request will delay everything that comes in after it, due to the proxy's need to respect the \norder of commands it receives.\n\nNote that faults must have a `fault_enabled` field, and are not enabled by default (if no default value\nor runtime key are set).\n\nExample configuration:\n\n.. code-block:: yaml\n\n  faults:\n  - fault_type: ERROR\n    fault_enabled:\n      default_value:\n        numerator: 10\n        denominator: HUNDRED\n      runtime_key: \"bogus_key\"\n      commands:\n      - GET\n    - fault_type: DELAY\n      fault_enabled:\n        default_value:\n          numerator: 10\n          denominator: HUNDRED\n        runtime_key: \"bogus_key\"\n      delay: 2s\n\nThis creates two faults- an error, applying only to GET commands at 10%, and a delay, applying to all\ncommands at 10%. This means that 20% of GET commands will have a fault applied, as discussed earlier.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst",
    "content": ".. _config_network_filters_rocketmq_proxy:\n\nRocketMQ proxy\n==============\n\nApache RocketMQ is a distributed messaging system, which is composed of four types of roles: producer, consumer, name\nserver and broker server. The former two are embedded into user application in form of SDK; whilst the latter are\nstandalone servers.\n\nA message in RocketMQ carries a topic as its destination and optionally one or more tags as application specific labels.\n\nProducers are used to send messages to brokers according to their topics. Similar to many distributed systems,\nproducers need to know how to connect to these serving brokers. To achieve this goal, RocketMQ provides name server\nclusters for producers to lookup. Namely, when producers attempts to send messages with a new topic, it first\ntries to lookup the addresses(called route info) of brokers that serve the topic from name servers. Once producers\nget the route info of the topic, they actively cache them in memory and renew them periodically thereafter. This\nmechanism, though simple, effectively keeps service availability high without demanding availability of name server\nservice.\n\nBrokers provides messaging service to end users. In addition to various messaging services, they also periodically\nreport health status and route info of topics currently served to name servers.\n\nMajor role of the name server is to serve querying of route info  for a topic. Additionally, it also purges route info\nentries once the belonging brokers fail to report their health info for a configured period of time. This ensures\nclients almost always connect to brokers that are online and ready to serve.\n\nConsumers are used by application to pull message from brokers. They perform similar heartbeats to maintain alive\nstatus. RocketMQ brokers support two message-fetch approaches: long-pulling and pop.\n\nUsing the first approach, consumers have to implement load-balancing algorithm. The pop approach, in the perspective of\nconsumers, is stateless.\n\nEnvoy RocketMQ filter proxies requests and responses between producers/consumer and brokers. Various statistical items\nare collected to enhance observability.\n\nAt present, pop-based message fetching is implemented. Long-pulling will be implemented in the next pull request.\n\n.. _config_network_filters_rocketmq_proxy_stats:\n\nStatistics\n----------\n\nEvery configured rocketmq proxy filter has statistics rooted at *rocketmq.<stat_prefix>.* with the\nfollowing statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  request, Counter, Total requests\n  request_decoding_error, Counter, Total decoding error requests\n  request_decoding_success, Counter, Total decoding success requests\n  response, Counter, Total responses\n  response_decoding_error, Counter, Total decoding error responses\n  response_decoding_success, Counter, Total decoding success responses\n  response_error, Counter, Total error responses\n  response_success, Counter, Total success responses\n  heartbeat, Counter, Total heartbeat requests\n  unregister, Counter, Total unregister requests\n  get_topic_route, Counter, Total getting topic route requests\n  send_message_v1, Counter, Total sending message v1 requests\n  send_message_v2, Counter, Total sending message v2 requests\n  pop_message, Counter, Total poping message requests\n  ack_message, Counter, Total acking message requests\n  get_consumer_list, Counter, Total getting consumer list requests\n  maintenance_failure, Counter, Total maintenance failure\n  request_active, Gauge, Total active requests\n  send_message_v1_active, Gauge, Total active sending message v1 requests\n  send_message_v2_active, Gauge, Total active sending message v2 requests\n  pop_message_active, Gauge, Total active poping message active requests\n  get_topic_route_active, Gauge, Total active geting topic route requests\n  send_message_pending, Gauge, Total pending sending message requests\n  pop_message_pending, Gauge, Total pending poping message requests\n  get_topic_route_pending, Gauge, Total pending geting topic route requests\n  total_pending, Gauge, Total pending requests\n  request_time_ms, Histogram, Request time in milliseconds"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst",
    "content": ".. _config_network_filters_sni_cluster:\n\nUpstream Cluster from SNI\n=========================\n\nThe `sni_cluster` is a network filter that uses the SNI value in a TLS\nconnection as the upstream cluster name. The filter will not modify the\nupstream cluster for non-TLS connections. This filter should be configured \nwith the name *envoy.filters.network.sni_cluster*.\n\nThis filter has no configuration. It must be installed before the\n:ref:`tcp_proxy <config_network_filters_tcp_proxy>` filter.\n\n* :ref:`v3 API reference <envoy_v3_api_field_config.listener.v3.Filter.name>`\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst",
    "content": ".. _config_network_filters_sni_dynamic_forward_proxy:\n\nSNI dynamic forward proxy\n=========================\n\n.. attention::\n\n  SNI dynamic forward proxy support should be considered alpha and not production ready.\n\nThrough the combination of :ref:`TLS inspector <config_listener_filters_tls_inspector>` listener filter,\nthis network filter and the\n:ref:`dynamic forward proxy cluster <envoy_api_msg_config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig>`,\nEnvoy supports SNI based dynamic forward proxy. The implementation works just like the\n:ref:`HTTP dynamic forward proxy <arch_overview_http_dynamic_forward_proxy>`, but using the value in\nSNI as target host instead.\n\nThe following is a complete configuration that configures both this filter\nas well as the :ref:`dynamic forward proxy cluster\n<envoy_api_msg_config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig>`. Both filter and cluster\nmust be configured together and point to the same DNS cache parameters for Envoy to operate as an\nSNI dynamic forward proxy.\n\n.. note::\n\n  The following config doesn't terminate TLS in listener, so there is no need to configure TLS context\n  in cluster. The TLS handshake is passed through by Envoy.\n\n.. literalinclude:: _include/sni-dynamic-forward-proxy-filter.yaml\n    :language: yaml\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst",
    "content": ".. _config_network_filters_tcp_proxy:\n\nTCP proxy\n=========\n\n* TCP proxy :ref:`architecture overview <arch_overview_tcp_proxy>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.tcp_proxy.v3.TcpProxy>`\n* This filter should be configured with the name *envoy.filters.network.tcp_proxy*.\n\n.. _config_network_filters_tcp_proxy_dynamic_cluster:\n\nDynamic cluster selection\n-------------------------\n\nThe upstream cluster used by the TCP proxy filter can be dynamically set by\nother network filters on a per-connection basis by setting a per-connection\nstate object under the key `envoy.tcp_proxy.cluster`. See the\nimplementation for the details.\n\n.. _config_network_filters_tcp_proxy_subset_lb:\n\nRouting to a subset of hosts\n----------------------------\n\nTCP proxy can be configured to route to a subset of hosts within an upstream cluster.\n\nTo define metadata that a suitable upstream host must match, use one of the following fields:\n\n#. Use :ref:`TcpProxy.metadata_match<envoy_v3_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.metadata_match>`\n   to define required metadata for a single upstream cluster.\n#. Use :ref:`ClusterWeight.metadata_match<envoy_v3_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight.metadata_match>`\n   to define required metadata for a weighted upstream cluster.\n#. Use combination of :ref:`TcpProxy.metadata_match<envoy_v3_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.metadata_match>`\n   and :ref:`ClusterWeight.metadata_match<envoy_v3_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight.metadata_match>`\n   to define required metadata for a weighted upstream cluster (metadata from the latter will be merged on top of the former).\n\nIn addition, dynamic metadata can be set by earlier network filters on the `StreamInfo`. Setting the dynamic metadata\nmust happen before `onNewConnection()` is called on the `TcpProxy` filter to affect load balancing.\n\n.. _config_network_filters_tcp_proxy_stats:\n\nStatistics\n----------\n\nThe TCP proxy filter emits both its own downstream statistics as well as many of the :ref:`cluster\nupstream statistics <config_cluster_manager_cluster_stats>` where applicable. The downstream\nstatistics are rooted at *tcp.<stat_prefix>.* with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  downstream_cx_total, Counter, Total number of connections handled by the filter\n  downstream_cx_no_route, Counter, Number of connections for which no matching route was found or the cluster for the route was not found\n  downstream_cx_tx_bytes_total, Counter, Total bytes written to the downstream connection\n  downstream_cx_tx_bytes_buffered, Gauge, Total bytes currently buffered to the downstream connection\n  downstream_cx_rx_bytes_total, Counter, Total bytes read from the downstream connection\n  downstream_cx_rx_bytes_buffered, Gauge, Total bytes currently buffered from the downstream connection\n  downstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from downstream\n  downstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from downstream\n  idle_timeout, Counter, Total number of connections closed due to idle timeout\n  max_downstream_connection_duration, Counter, Total number of connections closed due to max_downstream_connection_duration timeout\n  upstream_flush_total, Counter, Total number of connections that continued to flush upstream data after the downstream connection was closed\n  upstream_flush_active, Gauge, Total connections currently continuing to flush upstream data after the downstream connection was closed\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst",
    "content": ".. _config_network_filters_thrift_proxy:\n\nThrift proxy\n============\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.ThriftProxy>`\n* This filter should be configured with the name *envoy.filters.network.thrift_proxy*.\n\nCluster Protocol Options\n------------------------\n\nThrift connections to upstream hosts can be configured by adding an entry to the appropriate\nCluster's :ref:`extension_protocol_options<envoy_v3_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`\nkeyed by `envoy.filters.network.thrift_proxy`. The\n:ref:`ThriftProtocolOptions<envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions>`\nmessage describes the available options.\n\nThrift Request Metadata\n-----------------------\n\nThe :ref:`HEADER transport<envoy_v3_api_enum_value_extensions.filters.network.thrift_proxy.v3.TransportType.HEADER>`\nand :ref:`TWITTER protocol<envoy_v3_api_enum_value_extensions.filters.network.thrift_proxy.v3.ProtocolType.TWITTER>`\nsupport metadata. In particular, the\n`Header transport <https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md>`_\nsupports informational key/value pairs and the Twitter protocol transmits\n`tracing and request context data <https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/thrift/tracing.thrift>`_.\n\nHeader Transport Metadata\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nHeader transport key/value pairs are available for routing as\n:ref:`headers <envoy_v3_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.headers>`.\n\nTwitter Protocol Metadata\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTwitter protocol request contexts are converted into headers which are available for routing as\n:ref:`headers <envoy_v3_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.headers>`.\nIn addition, the following fields are presented as headers:\n\nClient Identifier\n    The ClientId's `name` field (nested in the RequestHeader `client_id` field) becomes the\n    `:client-id` header.\n\nDestination\n    The RequestHeader `dest` field becomes the `:dest` header.\n\nDelegations\n    Each Delegation from the RequestHeader `delegations` field is added as a header. The header\n    name is the prefix `:d:` followed by the Delegation's `src`. The value is the Delegation's\n    `dst` field.\n\nMetadata Interoperability\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nRequest metadata that is available for routing (see above) is automatically converted between wire\nformats when translation between downstream and upstream connections occurs. Twitter protocol\nrequest contexts, client id, destination, and delegations are therefore presented as Header\ntransport key/value pairs, named as above. Similarly, Header transport key/value pairs are\npresented as Twitter protocol RequestContext values, unless they match the special names described\nabove. For instance, a downstream Header transport request with the info key \":client-id\" is\ntranslated to an upstream Twitter protocol request with a ClientId value.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/wasm_filter.rst",
    "content": ".. _config_network_filters_wasm:\n\nWasm Network Filter\n===============================================\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.rbac.v3.RBAC>`\n\n.. attention::\n\n  The Wasm filter is experimental and is currently under active development. Capabilities will\n  be expanded over time and the configuration structures are likely to change.\n\nThe Wasm network filter is used to implement a network filter with a Wasm plugin. \n\n\nExample configuration\n---------------------\n\nExample filter configuration:\n\n.. code-block:: yaml\n\n  name: envoy.filters.network.wasm\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm\n    config:\n      config:\n        name: \"my_plugin\"\n        vm_config:\n          runtime: \"envoy.wasm.runtime.v8\"\n          code:\n            local:\n              filename: \"/etc/envoy_filter_http_wasm_example.wasm\"\n          allow_precompiled: true\n\n\nThe preceding snippet configures a filter from a Wasm binary on local disk.\n"
  },
  {
    "path": "docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst",
    "content": ".. _config_network_filters_zookeeper_proxy:\n\nZooKeeper proxy\n===============\n\nThe ZooKeeper proxy filter decodes the client protocol for\n`Apache ZooKeeper <https://zookeeper.apache.org/>`_. It decodes the requests,\nresponses and events in the payload. Most opcodes known in\n`ZooKeeper 3.5 <https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/ZooDefs.java>`_\nare supported. The unsupported ones are related to SASL authentication.\n\n.. attention::\n\n   The zookeeper_proxy filter is experimental and is currently under active\n   development. Capabilities will be expanded over time and the\n   configuration structures are likely to change.\n\n.. _config_network_filters_zookeeper_proxy_config:\n\nConfiguration\n-------------\n\nThe ZooKeeper proxy filter should be chained with the TCP proxy filter as shown\nin the configuration snippet below:\n\n.. code-block:: yaml\n\n  filter_chains:\n  - filters:\n    - name: envoy.filters.network.zookeeper_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\n        stat_prefix: zookeeper\n    - name: envoy.filters.network.tcp_proxy\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n        stat_prefix: tcp\n        cluster: ...\n\n\n.. _config_network_filters_zookeeper_proxy_stats:\n\nStatistics\n----------\n\nEvery configured ZooKeeper proxy filter has statistics rooted at *<stat_prefix>.zookeeper.*. The\nfollowing counters are available:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  decoder_error, Counter, Number of times a message wasn't decoded\n  request_bytes, Counter, Number of bytes in decoded request messages\n  connect_rq, Counter, Number of regular connect (non-readonly) requests\n  connect_readonly_rq, Counter, Number of connect requests with the readonly flag set\n  ping_rq, Counter, Number of ping requests\n  auth.<type>_rq, Counter, Number of auth requests for a given type\n  getdata_rq, Counter, Number of getdata requests\n  create_rq, Counter, Number of create requests\n  create2_rq, Counter, Number of create2 requests\n  setdata_rq, Counter, Number of setdata requests\n  getchildren_rq, Counter, Number of getchildren requests\n  getchildren2_rq, Counter, Number of getchildren2 requests\n  remove_rq, Counter, Number of delete requests\n  exists_rq, Counter, Number of stat requests\n  getacl_rq, Counter, Number of getacl requests\n  setacl_rq, Counter, Number of setacl requests\n  sync_rq, Counter, Number of sync requests\n  multi_rq, Counter, Number of multi transaction requests\n  reconfig_rq, Counter, Number of reconfig requests\n  close_rq, Counter, Number of close requests\n  setwatches_rq, Counter, Number of setwatches requests\n  checkwatches_rq, Counter, Number of checkwatches requests\n  removewatches_rq, Counter, Number of removewatches requests\n  check_rq, Counter, Number of check requests\n  response_bytes, Counter, Number of bytes in decoded response messages\n  connect_resp, Counter, Number of connect responses\n  ping_resp, Counter, Number of ping responses\n  auth_resp, Counter, Number of auth responses\n  watch_event, Counter, Number of watch events fired by the server\n  getdata_resp, Counter, Number of getdata responses\n  create_resp, Counter, Number of create responses\n  create2_resp, Counter, Number of create2 responses\n  createcontainer_resp, Counter, Number of createcontainer responses\n  createttl_resp, Counter, Number of createttl responses\n  setdata_resp, Counter, Number of setdata responses\n  getchildren_resp, Counter, Number of getchildren responses\n  getchildren2_resp, Counter, Number of getchildren2 responses\n  getephemerals_resp, Counter, Number of getephemerals responses\n  getallchildrennumber_resp, Counter, Number of getallchildrennumber responses\n  remove_resp, Counter, Number of remove responses\n  exists_resp, Counter, Number of exists responses\n  getacl_resp, Counter, Number of getacl responses\n  setacl_resp, Counter, Number of setacl responses\n  sync_resp, Counter, Number of sync responses\n  multi_resp, Counter, Number of multi responses\n  reconfig_resp, Counter, Number of reconfig responses\n  close_resp, Counter, Number of close responses\n  setauth_resp, Counter, Number of setauth responses\n  setwatches_resp, Counter, Number of setwatches responses\n  checkwatches_resp, Counter, Number of checkwatches responses\n  removewatches_resp, Counter, Number of removewatches responses\n  check_resp, Counter, Number of check responses\n\n\n.. _config_network_filters_zookeeper_proxy_latency_stats:\n\nPer opcode latency statistics\n-----------------------------\n\nThe filter will gather latency statistics in the *<stat_prefix>.zookeeper.<opcode>_response_latency* namespace.\nLatency stats are in milliseconds:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  connect_response_latency, Histogram, Opcode execution time in milliseconds\n  ping_response_latency, Histogram, Opcode execution time in milliseconds\n  auth_response_latency, Histogram, Opcode execution time in milliseconds\n  watch_event, Histogram, Opcode execution time in milliseconds\n  getdata_response_latency, Histogram, Opcode execution time in milliseconds\n  create_response_latency, Histogram, Opcode execution time in milliseconds\n  create2_response_latency, Histogram, Opcode execution time in milliseconds\n  createcontainer_response_latency, Histogram, Opcode execution time in milliseconds\n  createttl_response_latency, Histogram, Opcode execution time in milliseconds\n  setdata_response_latency, Histogram, Opcode execution time in milliseconds\n  getchildren_response_latency, Histogram, Opcode execution time in milliseconds\n  getchildren2_response_latency, Histogram, Opcode execution time in milliseconds\n  getephemerals_response_latency, Histogram, Opcode execution time in milliseconds\n  getallchildrennumber_response_latency, Histogram, Opcode execution time in milliseconds\n  remove_response_latency, Histogram, Opcode execution time in milliseconds\n  exists_response_latency, Histogram, Opcode execution time in milliseconds\n  getacl_response_latency, Histogram, Opcode execution time in milliseconds\n  setacl_response_latency, Histogram, Opcode execution time in milliseconds\n  sync_response_latency, Histogram, Opcode execution time in milliseconds\n  multi_response_latency, Histogram, Opcode execution time in milliseconds\n  reconfig_response_latency, Histogram, Opcode execution time in milliseconds\n  close_response_latency, Histogram, Opcode execution time in milliseconds\n  setauth_response_latency, Histogram, Opcode execution time in milliseconds\n  setwatches_response_latency, Histogram, Opcode execution time in milliseconds\n  checkwatches_response_latency, Histogram, Opcode execution time in milliseconds\n  removewatches_response_latency, Histogram, Opcode execution time in milliseconds\n  check_response_latency, Histogram, Opcode execution time in milliseconds\n\n\n.. _config_network_filters_zookeeper_proxy_dynamic_metadata:\n\nDynamic Metadata\n----------------\n\nThe ZooKeeper filter emits the following dynamic metadata for each message parsed:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  <path>, string, \"The path associated with the request, response or event\"\n  <opname>, string, \"The opname for the request, response or event\"\n  <create_type>, string, \"The string representation of the flags applied to the znode\"\n  <bytes>, string, \"The size of the request message in bytes\"\n  <watch>, string, \"True if a watch is being set, false otherwise\"\n  <version>, string, \"The version parameter, if any, given with the request\"\n  <timeout>, string, \"The timeout parameter in a connect response\"\n  <protocol_version>, string, \"The protocol version in a connect response\"\n  <readonly>, string, \"The readonly flag in a connect response\"\n  <zxid>, string, \"The zxid field in a response header\"\n  <error>, string, \"The error field in a response header\"\n  <client_state>, string, \"The state field in a watch event\"\n  <event_type>, string, \"The event type in a a watch event\"\n"
  },
  {
    "path": "docs/root/configuration/listeners/overview.rst",
    "content": "Overview\n========\n\nThe top level Envoy configuration contains a list of :ref:`listeners <arch_overview_listeners>`.\nEach individual listener configuration has the following format:\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.listener.v3.Listener>`\n"
  },
  {
    "path": "docs/root/configuration/listeners/runtime.rst",
    "content": ".. _config_listeners_runtime:\n\nRuntime\n-------\nThe following runtime settings are supported:\n\nenvoy.resource_limits.listener.<name of listener>.connection_limit\n    Sets a limit on the number of active connections to the specified listener.\n"
  },
  {
    "path": "docs/root/configuration/listeners/stats.rst",
    "content": ".. _config_listener_stats:\n\nStatistics\n==========\n\nListener\n--------\n\nEvery listener has a statistics tree rooted at *listener.<address>.* with the following statistics:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   downstream_cx_total, Counter, Total connections\n   downstream_cx_destroy, Counter, Total destroyed connections\n   downstream_cx_active, Gauge, Total active connections\n   downstream_cx_length_ms, Histogram, Connection length milliseconds\n   downstream_cx_overflow, Counter, Total connections rejected due to enforcement of listener connection limit\n   downstream_pre_cx_timeout, Counter, Sockets that timed out during listener filter processing\n   downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing\n   global_cx_overflow, Counter, Total connections rejected due to enforecement of the global connection limit\n   no_filter_chain_match, Counter, Total connections that didn't match any filter chain\n   ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications\n   ssl.handshake, Counter, Total successful TLS connection handshakes\n   ssl.session_reused, Counter, Total successful TLS session resumptions\n   ssl.no_certificate, Counter, Total successful TLS connections with no client certificate\n   ssl.fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate\n   ssl.fail_verify_error, Counter, Total TLS connections that failed CA verification\n   ssl.fail_verify_san, Counter, Total TLS connections that failed SAN verification\n   ssl.fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification\n   ssl.ocsp_staple_failed, Counter, Total TLS connections that failed compliance with the OCSP policy\n   ssl.ocsp_staple_omitted, Counter, Total TLS connections that succeeded without stapling an OCSP response\n   ssl.ocsp_staple_responses, Counter, Total TLS connections where a valid OCSP response was available (irrespective of whether the client requested stapling)\n   ssl.ocsp_staple_requests, Counter, Total TLS connections where the client requested an OCSP staple\n   ssl.ciphers.<cipher>, Counter, Total successful TLS connections that used cipher <cipher>\n   ssl.curves.<curve>, Counter, Total successful TLS connections that used ECDHE curve <curve>\n   ssl.sigalgs.<sigalg>, Counter, Total successful TLS connections that used signature algorithm <sigalg>\n   ssl.versions.<version>, Counter, Total successful TLS connections that used protocol version <version>\n\n.. _config_listener_stats_per_handler:\n\nPer-handler Listener Stats\n--------------------------\n\nEvery listener additionally has a statistics tree rooted at *listener.<address>.<handler>.* which\ncontains *per-handler* statistics. As described in the\n:ref:`threading model <arch_overview_threading>` documentation, Envoy has a threading model which\nincludes the *main thread* as well as a number of *worker threads* which are controlled by the\n:option:`--concurrency` option. Along these lines, *<handler>* is equal to *main_thread*,\n*worker_0*, *worker_1*, etc. These statistics can be used to look for per-handler/worker imbalance\non either accepted or active connections.\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   downstream_cx_total, Counter, Total connections on this handler.\n   downstream_cx_active, Gauge, Total active connections on this handler.\n\n.. _config_listener_manager_stats:\n\nListener manager\n----------------\n\nThe listener manager has a statistics tree rooted at *listener_manager.* with the following\nstatistics. Any ``:`` character in the stats name is replaced with ``_``.\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   listener_added, Counter, Total listeners added (either via static config or LDS).\n   listener_modified, Counter, Total listeners modified (via LDS).\n   listener_removed, Counter, Total listeners removed (via LDS).\n   listener_stopped, Counter, Total listeners stopped.\n   listener_create_success, Counter, Total listener objects successfully added to workers.\n   listener_create_failure, Counter, Total failed listener object additions to workers.\n   listener_in_place_updated, Counter, Total listener objects created to execute filter chain update path.\n   total_filter_chains_draining, Gauge, Number of currently draining filter chains.\n   total_listeners_warming, Gauge, Number of currently warming listeners.\n   total_listeners_active, Gauge, Number of currently active listeners.\n   total_listeners_draining, Gauge, Number of currently draining listeners.\n   workers_started, Gauge, A boolean (1 if started and 0 otherwise) that indicates whether listeners have been initialized on workers.\n"
  },
  {
    "path": "docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml",
    "content": "admin:\n  access_log_path: /tmp/admin_access.log\n  address:\n    socket_address:\n      protocol: TCP\n      address: 127.0.0.1\n      port_value: 9901\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        protocol: UDP\n        address: 127.0.0.1\n        port_value: 1234\n    listener_filters:\n      name: envoy.filters.udp_listener.udp_proxy\n      typed_config:\n        '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig\n        stat_prefix: service\n        cluster: service_udp\n  clusters:\n  - name: service_udp\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: service_udp\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 1235\n"
  },
  {
    "path": "docs/root/configuration/listeners/udp_filters/dns_filter.rst",
    "content": ".. _config_udp_listener_filters_dns_filter:\n\nDNS Filter\n==========\n\n.. attention::\n\n  DNS Filter is under active development and should be considered alpha and not production ready.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig>`\n* This filter should be configured with the name *envoy.filters.udp_listener.dns_filter*\n\nOverview\n--------\n\nThe DNS filter allows Envoy to resolve forward DNS queries as an authoritative server for any\nconfigured domains. The filter's configuration specifies the names and addresses for which Envoy\nwill answer as well as the configuration needed to send queries externally for unknown domains.\n\nThe filter supports local and external DNS resolution. If a lookup for a name does not match a\nstatically configured domain, or a provisioned cluster name, Envoy can refer the query to an\nexternal resolver for an answer. Users have the option of specifying the DNS servers that Envoy\nwill use for external resolution. Users can disable external DNS resolution by omitting the\nclient configuration object.\n\nThe filter supports :ref:`per-filter configuration\n<envoy_v3_api_msg_extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig>`.\nAn Example configuration follows that illustrates how the filter can be used.\n\nExample Configuration\n---------------------\n\n.. code-block:: yaml\n\n  listener_filters:\n    name: envoy.filters.udp.dns_filter\n    typed_config:\n      \"@type\": \"type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig\"\n      stat_prefix: \"dns_filter_prefix\"\n      client_config:\n        resolution_timeout: 5s\n        upstream_resolvers:\n        - socket_address:\n            address: \"8.8.8.8\"\n            port_value: 53\n        - socket_address:\n            address: \"8.8.4.4\"\n            port_value: 53\n        max_pending_lookups: 256\n      server_config:\n        inline_dns_table:\n          known_suffixes:\n          - suffix: \"domain1.com\"\n          - suffix: \"domain2.com\"\n          - suffix: \"domain3.com\"\n          - suffix: \"domain4.com\"\n          - suffix: \"domain5.com\"\n          virtual_domains:\n            - name: \"www.domain1.com\"\n              endpoint:\n                address_list:\n                  address:\n                  - 10.0.0.1\n                  - 10.0.0.2\n            - name: \"www.domain2.com\"\n              endpoint:\n                address_list:\n                  address:\n                    - 2001:8a:c1::2800:7\n            - name: \"www.domain3.com\"\n              endpoint:\n                address_list:\n                  address:\n                  - 10.0.3.1\n            - name: \"www.domain4.com\"\n              endpoint:\n                cluster_name: cluster_0\n            - name: \"voip.domain5.com\"\n              endpoint:\n                service_list:\n                  services:\n                    - service_name: \"sip\"\n                      protocol: { number: 6 }\n                      ttl: 86400s\n                      targets:\n                      - host_name: \"primary.voip.domain5.com\"\n                        priority: 10\n                        weight: 30\n                        port: 5060\n                      - host_name: \"secondary.voip.domain5.com\"\n                        priority: 10\n                        weight: 20\n                        port: 5060\n                      - host_name: \"backup.voip.domain5.com\"\n                        priority: 10\n                        weight: 10\n                        port: 5060\n\n\nIn this example, Envoy is configured to respond to client queries for four domains. For any\nother query, it will forward upstream to external resolvers. The filter will return an address\nmatching the input query type. If the query is for type A records and no A records are configured,\nEnvoy will return no addresses and set the response code appropriately. Conversely, if there are\nmatching records for the query type, each configured address is returned. This is also true for\nAAAA records. Only A, AAAA, and SRV records are supported. If the filter parses queries for other\nrecord types, the filter immediately responds indicating that the type is not supported. The\nfilter can also redirect a query for a DNS name to the enpoints of a cluster. \"www.domain4.com\"\nin the configuration demonstrates this. Along with an address list, a cluster name is a valid\nendpoint for a DNS name.\n\nThe DNS filter also supports responding to queries for service records. The records for \"domain5.com\"\nillustrate the configuration necessary to support responding to SRV records. The target name\npopulated in the configuration must be fully qualified domain names, unless the target is a cluster.\nFor non-cluster targets, each referenced target name must be defined in the DNS Filter table so that\nEnvoy can resolve the target hosts' IP addresses. For a cluster, Envoy will return an address for\neach cluster endpoint.\n\nEach service record's protocol can be defined by a name or number. As configured in the example,\nthe filter will successfully respond to SRV record requests for \"_sip._tcp.voip.domain5.com\". If a\nnumerical value is specified, Envoy will attempt to resolve the number to a name. String values for\nprotocols are used as they appear. An underscore is prepended to both the service and protocol to\nadhere to the convention outlined in the RFC.\n\nThe filter can also consume its domain configuration from an external DNS table. The same entities\nappearing in the static configuration can be stored as JSON or YAML in a separate file and referenced\nusing the :ref:`external_dns_table DataSource <envoy_api_msg_core.DataSource>` directive:\n\nExample External DnsTable Configuration\n---------------------------------------\n\n.. code-block:: yaml\n\n    listener_filters:\n      name: \"envoy.filters.udp.dns_filter\"\n      typed_config:\n        '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig'\n        stat_prefix: \"my_prefix\"\n        server_config:\n          external_dns_table:\n            filename: \"/home/ubuntu/configs/dns_table.json\"\n\nIn the file, the table can be defined as follows:\n\nDnsTable JSON Configuration\n---------------------------\n\n.. code-block:: json\n\n  {\n    \"known_suffixes\": [\n      { \"suffix\": \"suffix1.com\" },\n      { \"suffix\": \"suffix2.com\" }\n    ],\n    \"virtual_domains\": [\n      {\n        \"name\": \"www.suffix1.com\",\n        \"endpoint\": {\n          \"address_list\": {\n            \"address\": [ \"10.0.0.1\", \"10.0.0.2\" ]\n          }\n        }\n      },\n      {\n        \"name\": \"www.suffix2.com\",\n        \"endpoint\": {\n          \"address_list\": {\n            \"address\": [ \"2001:8a:c1::2800:7\" ]\n          }\n        }\n      }\n    ]\n  }\n\n\nBy utilizing this configuration, the DNS responses can be configured separately from the Envoy\nconfiguration.\n"
  },
  {
    "path": "docs/root/configuration/listeners/udp_filters/udp_filters.rst",
    "content": ".. _config_udp_listener_filters:\n\nUDP listener filters\n====================\n\nEnvoy has the following builtin UDP listener filters.\n\n.. toctree::\n  :maxdepth: 2\n\n  udp_proxy\n  dns_filter\n\n"
  },
  {
    "path": "docs/root/configuration/listeners/udp_filters/udp_proxy.rst",
    "content": ".. _config_udp_listener_filters_udp_proxy:\n\nUDP proxy\n=========\n\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.udp.udp_proxy.v3.UdpProxyConfig>`\n* This filter should be configured with the name *envoy.filters.udp_listener.udp_proxy*\n\nOverview\n--------\n\nThe UDP proxy listener filter allows Envoy to operate as a *non-transparent* proxy between a\nUDP client and server. The lack of transparency means that the upstream server will see the\nsource IP and port of the Envoy instance versus the client. All datagrams flow from the client, to\nEnvoy, to the upstream server, back to Envoy, and back to the client.\n\nBecause UDP is not a connection oriented protocol, Envoy must keep track of a client's *session*\nsuch that the response datagrams from an upstream server can be routed back to the correct client.\nEach session is index by the 4-tuple consisting of source IP/port and local IP/port that the\ndatagram is received on. Sessions last until the :ref:`idle timeout\n<envoy_v3_api_field_extensions.filters.udp.udp_proxy.v3.UdpProxyConfig.idle_timeout>` is reached.\n\nThe UDP proxy listener filter also can operate as a *transparent* proxy if the\n:ref:`use_original_src_ip <envoy_v3_api_msg_extensions.filters.udp.udp_proxy.v3.UdpProxyConfig>`\nfield is set. But please keep in mind that it does not forward the port to upstreams. It forwards only the IP address to upstreams.\n\nLoad balancing and unhealthy host handling\n------------------------------------------\n\nEnvoy will fully utilize the configured load balancer for the configured upstream cluster when\nload balancing UDP datagrams. When a new session is created, Envoy will associate the session\nwith an upstream host selected using the configured load balancer. All future datagrams that\nbelong to the session will be routed to the same upstream host.\n\nWhen an upstream host becomes unhealthy (due to :ref:`active health checking\n<arch_overview_health_checking>`), Envoy will attempt to create a new session to a healthy host\nwhen the next datagram is received.\n\nCircuit breaking\n----------------\n\nThe number of sessions that can be created per upstream cluster is limited by the cluster's\n:ref:`maximum connection circuit breaker <arch_overview_circuit_break_cluster_maximum_connections>`.\nBy default this is 1024.\n\nExample configuration\n---------------------\n\nThe following example configuration will cause Envoy to listen on UDP port 1234 and proxy to a UDP\nserver listening on port 1235.\n\n.. literalinclude:: _include/udp-proxy.yaml\n    :language: yaml\n\n\nStatistics\n----------\n\nThe UDP proxy filter emits both its own downstream statistics as well as many of the :ref:`cluster\nupstream statistics <config_cluster_manager_cluster_stats>` where applicable. The downstream\nstatistics are rooted at *udp.<stat_prefix>.* with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  downstream_sess_no_route, Counter, Number of datagrams not routed due to no cluster\n  downstream_sess_rx_bytes, Counter, Number of bytes received\n  downstream_sess_rx_datagrams, Counter, Number of datagrams received\n  downstream_sess_rx_errors, Counter, Number of datagram receive errors\n  downstream_sess_total, Counter, Number sessions created in total\n  downstream_sess_tx_bytes, Counter, Number of bytes transmitted\n  downstream_sess_tx_datagrams, Counter, Number of datagrams transmitted\n  downstream_sess_tx_errors, counter, Number of datagram transmission errors\n  idle_timeout, Counter, Number of sessions destroyed due to idle timeout\n  downstream_sess_active, Gauge, Number of sessions currently active\n\nThe following standard :ref:`upstream cluster stats <config_cluster_manager_cluster_stats>` are used\nby the UDP proxy:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  upstream_cx_none_healthy, Counter, Number of datagrams dropped due to no healthy hosts\n  upstream_cx_overflow, Counter, Number of datagrams dropped due to hitting the session circuit breaker\n  upstream_cx_rx_bytes_total, Counter, Number of bytes received\n  upstream_cx_tx_bytes_total, Counter, Number of bytes transmitted\n\nThe UDP proxy filter also emits custom upstream cluster stats prefixed with\n*cluster.<cluster_name>.udp.*:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  sess_rx_datagrams, Counter, Number of datagrams received\n  sess_rx_errors, Counter, Number of datagram receive errors\n  sess_tx_datagrams, Counter, Number of datagrams transmitted\n  sess_tx_errors, Counter, Number of datagrams tramsitted\n"
  },
  {
    "path": "docs/root/configuration/observability/access_log/access_log.rst",
    "content": "Access Logs\n===========\n\n.. toctree::\n  :maxdepth: 2\n\n  overview\n  stats\n  usage\n"
  },
  {
    "path": "docs/root/configuration/observability/access_log/overview.rst",
    "content": "Overview\n========\n\n* Access logging :ref:`architecture overview <arch_overview_access_logs>`\n* :ref:`Configuration overview <config_access_log>`\n* :ref:`v2 API reference <envoy_api_msg_config.filter.accesslog.v2.AccessLog>`\n"
  },
  {
    "path": "docs/root/configuration/observability/access_log/stats.rst",
    "content": ".. _config_access_log_stats:\n\nStatistics\n==========\n\nCurrently only the gRPC and file based access logs have statistics.\n\ngRPC access log statistics\n--------------------------\n\nThe gRPC access log has statistics rooted at *access_logs.grpc_access_log.* with the following statistics:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   logs_written, Counter, Total log entries sent to the logger which were not dropped. This does not imply the logs have been flushed to the gRPC endpoint yet.\n   logs_dropped, Counter, Total log entries dropped due to network or HTTP/2 back up.\n\n\nFile access log statistics\n--------------------------\n\nThe file access log has statistics rooted at the *filesystem.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  write_buffered, Counter, Total number of times file data is moved to Envoy's internal flush buffer\n  write_completed, Counter, Total number of times a file was successfully written\n  write_failed, Counter, Total number of times an error occurred during a file write operation\n  flushed_by_timer, Counter, Total number of times internal flush buffers are written to a file due to flush timeout\n  reopen_failed, Counter, Total number of times a file was failed to be opened\n  write_total_buffered, Gauge, Current total size of internal flush buffer in bytes\n"
  },
  {
    "path": "docs/root/configuration/observability/access_log/usage.rst",
    "content": ".. _config_access_log:\n\nAccess logging\n==============\n\nConfiguration\n-------------------------\n\nAccess logs are configured as part of the :ref:`HTTP connection manager config\n<config_http_conn_man>` or :ref:`TCP Proxy <config_network_filters_tcp_proxy>`.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.accesslog.v3.AccessLog>`\n\n.. _config_access_log_format:\n\nFormat Rules\n------------\n\nAccess log formats contain command operators that extract the relevant data and insert it.\nThey support two formats: :ref:`\"format strings\" <config_access_log_format_strings>` and\n:ref:`\"format dictionaries\" <config_access_log_format_dictionaries>`. In both cases, the command operators\nare used to extract the relevant data, which is then inserted into the specified log format.\nOnly one access log format may be specified at a time.\n\n.. _config_access_log_format_strings:\n\nFormat Strings\n--------------\n\nFormat strings are plain strings, specified using the ``format`` key. They may contain\neither command operators or other characters interpreted as a plain string.\nThe access log formatter does not make any assumptions about a new line separator, so one\nhas to specified as part of the format string.\nSee the :ref:`default format <config_access_log_default_format>` for an example.\n\n.. _config_access_log_default_format:\n\nDefault Format String\n---------------------\n\nIf custom format string is not specified, Envoy uses the following default format:\n\n.. code-block:: none\n\n  [%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\"\n  %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION%\n  %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\"\n  \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\\n\n\nExample of the default Envoy access log format:\n\n.. code-block:: none\n\n  [2016-04-15T20:17:00.310Z] \"POST /api/v1/locations HTTP/2\" 204 - 154 0 226 100 \"10.0.35.28\"\n  \"nsq2http\" \"cc21d9b0-cf5c-432b-8c7e-98aeb7988cd2\" \"locations\" \"tcp://10.0.2.1:80\"\n\n.. _config_access_log_format_dictionaries:\n\nFormat Dictionaries\n-------------------\n\nFormat dictionaries are dictionaries that specify a structured access log output format,\nspecified using the ``json_format`` or ``typed_json_format`` keys. This allows logs to be output in\na structured format such as JSON. Similar to format strings, command operators are evaluated and\ntheir values inserted into the format dictionary to construct the log output.\n\nFor example, with the following format provided in the configuration as ``json_format``:\n\n.. code-block:: json\n\n  {\n    \"config\": {\n      \"json_format\": {\n          \"protocol\": \"%PROTOCOL%\",\n          \"duration\": \"%DURATION%\",\n          \"my_custom_header\": \"%REQ(MY_CUSTOM_HEADER)%\"\n      }\n    }\n  }\n\nThe following JSON object would be written to the log file:\n\n.. code-block:: json\n\n  {\"protocol\": \"HTTP/1.1\", \"duration\": \"123\", \"my_custom_header\": \"value_of_MY_CUSTOM_HEADER\"}\n\nThis allows you to specify a custom key for each command operator.\n\nThe ``typed_json_format`` differs from ``json_format`` in that values are rendered as JSON numbers,\nbooleans, and nested objects or lists where applicable. In the example, the request duration\nwould be rendered as the number ``123``.\n\nFormat dictionaries have the following restrictions:\n\n* The dictionary must map strings to strings (specifically, strings to command operators). Nesting\n  is supported.\n* When using the ``typed_json_format`` command operators will only produce typed output if the\n  command operator is the only string that appears in the dictionary value. For example,\n  ``\"%DURATION%\"`` will log a numeric duration value, but ``\"%DURATION%.0\"`` will log a string\n  value.\n\n.. note::\n\n  When using the ``typed_json_format``, integer values that exceed :math:`2^{53}` will be\n  represented with reduced precision as they must be converted to floating point numbers.\n\n.. _config_access_log_command_operators:\n\nCommand Operators\n-----------------\n\nCommand operators are used to extract values that will be inserted into the access logs.\nThe same operators are used by different types of access logs (such as HTTP and TCP). Some\nfields may have slightly different meanings, depending on what type of log it is. Differences\nare noted.\n\nNote that if a value is not set/empty, the logs will contain a ``-`` character or, for JSON logs,\nthe string ``\"-\"``. For typed JSON logs unset values are represented as ``null`` values and empty\nstrings are rendered as ``\"\"``. :ref:`omit_empty_values\n<envoy_v3_api_field_config.core.v3.SubstitutionFormatString.omit_empty_values>` option could be used\nto omit empty values entirely.\n\nUnless otherwise noted, command operators produce string outputs for typed JSON logs.\n\nThe following command operators are supported:\n\n.. _config_access_log_format_start_time:\n\n%START_TIME%\n  HTTP\n    Request start time including milliseconds.\n\n  TCP\n    Downstream connection start time including milliseconds.\n\n  START_TIME can be customized using a `format string <https://en.cppreference.com/w/cpp/io/manip/put_time>`_.\n  In addition to that, START_TIME also accepts following specifiers:\n\n  +------------------------+-------------------------------------------------------------+\n  | Specifier              | Explanation                                                 |\n  +========================+=============================================================+\n  | ``%s``                 | The number of seconds since the Epoch                       |\n  +------------------------+-------------------------------------------------------------+\n  | ``%f``, ``%[1-9]f``    | Fractional seconds digits, default is 9 digits (nanosecond) |\n  |                        +-------------------------------------------------------------+\n  |                        | - ``%3f`` millisecond (3 digits)                            |\n  |                        | - ``%6f`` microsecond (6 digits)                            |\n  |                        | - ``%9f`` nanosecond (9 digits)                             |\n  +------------------------+-------------------------------------------------------------+\n\n  Examples of formatting START_TIME is as follows:\n\n  .. code-block:: none\n\n    %START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%\n\n    # To include millisecond fraction of the second (.000 ... .999). E.g. 1527590590.528.\n    %START_TIME(%s.%3f)%\n\n    %START_TIME(%s.%6f)%\n\n    %START_TIME(%s.%9f)%\n\n  In typed JSON logs, START_TIME is always rendered as a string.\n\n%BYTES_RECEIVED%\n  HTTP\n    Body bytes received.\n\n  TCP\n    Downstream bytes received on connection.\n\n  Renders a numeric value in typed JSON logs.\n\n%PROTOCOL%\n  HTTP\n    Protocol. Currently either *HTTP/1.1* or *HTTP/2*.\n\n  TCP\n    Not implemented (\"-\").\n\n  In typed JSON logs, PROTOCOL will render the string ``\"-\"`` if the protocol is not\n  available (e.g. in TCP logs).\n\n%RESPONSE_CODE%\n  HTTP\n    HTTP response code. Note that a response code of '0' means that the server never sent the\n    beginning of a response. This generally means that the (downstream) client disconnected.\n\n  TCP\n    Not implemented (\"-\").\n\n  Renders a numeric value in typed JSON logs.\n\n.. _config_access_log_format_response_code_details:\n\n%RESPONSE_CODE_DETAILS%\n  HTTP\n    HTTP response code details provides additional information about the response code, such as\n    who set it (the upstream or envoy) and why.\n\n  TCP\n    Not implemented (\"-\")\n\n.. _config_access_log_format_connection_termination_details:\n\n%CONNECTION_TERMINATION_DETAILS%\n  HTTP and TCP\n    Connection termination details may provide additional information about why the connection was\n    terminated by Envoy for L4 reasons.\n\n%BYTES_SENT%\n  HTTP\n    Body bytes sent. For WebSocket connection it will also include response header bytes.\n\n  TCP\n    Downstream bytes sent on connection.\n\n  Renders a numeric value in typed JSON logs.\n\n%DURATION%\n  HTTP\n    Total duration in milliseconds of the request from the start time to the last byte out.\n\n  TCP\n    Total duration in milliseconds of the downstream connection.\n\n  Renders a numeric value in typed JSON logs.\n\n%REQUEST_DURATION%\n  HTTP\n    Total duration in milliseconds of the request from the start time to the last byte of\n    the request received from the downstream.\n\n  TCP\n    Not implemented (\"-\").\n\n  Renders a numeric value in typed JSON logs.\n\n%RESPONSE_DURATION%\n  HTTP\n    Total duration in milliseconds of the request from the start time to the first byte read from the\n    upstream host.\n\n  TCP\n    Not implemented (\"-\").\n\n  Renders a numeric value in typed JSON logs.\n\n%RESPONSE_TX_DURATION%\n  HTTP\n    Total duration in milliseconds of the request from the first byte read from the upstream host to the last\n    byte sent downstream.\n\n  TCP\n    Not implemented (\"-\").\n\n  Renders a numeric value in typed JSON logs.\n\n.. _config_access_log_format_response_flags:\n\n%RESPONSE_FLAGS%\n  Additional details about the response or connection, if any. For TCP connections, the response codes mentioned in\n  the descriptions do not apply. Possible values are:\n\n  HTTP and TCP\n    * **UH**: No healthy upstream hosts in upstream cluster in addition to 503 response code.\n    * **UF**: Upstream connection failure in addition to 503 response code.\n    * **UO**: Upstream overflow (:ref:`circuit breaking <arch_overview_circuit_break>`) in addition to 503 response code.\n    * **NR**: No :ref:`route configured <arch_overview_http_routing>` for a given request in addition to 404 response code, or no matching filter chain for a downstream connection.\n    * **URX**: The request was rejected because the :ref:`upstream retry limit (HTTP) <envoy_v3_api_field_config.route.v3.RetryPolicy.num_retries>`  or :ref:`maximum connect attempts (TCP) <envoy_v3_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.max_connect_attempts>` was reached.\n  HTTP only\n    * **DC**: Downstream connection termination.\n    * **LH**: Local service failed :ref:`health check request <arch_overview_health_checking>` in addition to 503 response code.\n    * **UT**: Upstream request timeout in addition to 504 response code.\n    * **LR**: Connection local reset in addition to 503 response code.\n    * **UR**: Upstream remote reset in addition to 503 response code.\n    * **UC**: Upstream connection termination in addition to 503 response code.\n    * **DI**: The request processing was delayed for a period specified via :ref:`fault injection <config_http_filters_fault_injection>`.\n    * **FI**: The request was aborted with a response code specified via :ref:`fault injection <config_http_filters_fault_injection>`.\n    * **RL**: The request was ratelimited locally by the :ref:`HTTP rate limit filter <config_http_filters_rate_limit>` in addition to 429 response code.\n    * **UAEX**: The request was denied by the external authorization service.\n    * **RLSE**: The request was rejected because there was an error in rate limit service.\n    * **IH**: The request was rejected because it set an invalid value for a\n      :ref:`strictly-checked header <envoy_v3_api_field_extensions.filters.http.router.v3.Router.strict_check_headers>` in addition to 400 response code.\n    * **SI**: Stream idle timeout in addition to 408 response code.\n    * **DPE**: The downstream request had an HTTP protocol error.\n    * **UMSDR**: The upstream request reached to max stream duration.\n\n%ROUTE_NAME%\n  Name of the route.\n\n%UPSTREAM_HOST%\n  Upstream host URL (e.g., tcp://ip:port for TCP connections).\n\n%UPSTREAM_CLUSTER%\n  Upstream cluster to which the upstream host belongs to.\n\n%UPSTREAM_LOCAL_ADDRESS%\n  Local address of the upstream connection. If the address is an IP address it includes both\n  address and port.\n\n.. _config_access_log_format_upstream_transport_failure_reason:\n\n%UPSTREAM_TRANSPORT_FAILURE_REASON%\n  HTTP\n    If upstream connection failed due to transport socket (e.g. TLS handshake), provides the failure\n    reason from the transport socket. The format of this field depends on the configured upstream\n    transport socket. Common TLS failures are in :ref:`TLS trouble shooting <arch_overview_ssl_trouble_shooting>`.\n\n  TCP\n    Not implemented (\"-\")\n\n%DOWNSTREAM_REMOTE_ADDRESS%\n  Remote address of the downstream connection. If the address is an IP address it includes both\n  address and port.\n\n  .. note::\n\n    This may not be the physical remote address of the peer if the address has been inferred from\n    :ref:`proxy proto <envoy_v3_api_field_config.listener.v3.FilterChain.use_proxy_proto>` or :ref:`x-forwarded-for\n    <config_http_conn_man_headers_x-forwarded-for>`.\n\n%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\n  Remote address of the downstream connection. If the address is an IP address the output does\n  *not* include port.\n\n  .. note::\n\n    This may not be the physical remote address of the peer if the address has been inferred from\n    :ref:`proxy proto <envoy_v3_api_field_config.listener.v3.FilterChain.use_proxy_proto>` or :ref:`x-forwarded-for\n    <config_http_conn_man_headers_x-forwarded-for>`.\n\n%DOWNSTREAM_DIRECT_REMOTE_ADDRESS%\n  Direct remote address of the downstream connection. If the address is an IP address it includes both\n  address and port.\n\n  .. note::\n\n    This is always the physical remote address of the peer even if the downstream remote address has\n    been inferred from :ref:`proxy proto <envoy_v3_api_field_config.listener.v3.FilterChain.use_proxy_proto>`\n    or :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`.\n\n%DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT%\n  The direct remote address of the downstream connection. If the address is an IP address the output does\n  *not* include port.\n\n  .. note::\n\n    This is always the physical remote address of the peer even if the downstream remote address has\n    been inferred from :ref:`proxy proto <envoy_v3_api_field_config.listener.v3.FilterChain.use_proxy_proto>`\n    or :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`.\n\n%DOWNSTREAM_LOCAL_ADDRESS%\n  Local address of the downstream connection. If the address is an IP address it includes both\n  address and port.\n  If the original connection was redirected by iptables REDIRECT, this represents\n  the original destination address restored by the\n  :ref:`Original Destination Filter <config_listener_filters_original_dst>` using SO_ORIGINAL_DST socket option.\n  If the original connection was redirected by iptables TPROXY, and the listener's transparent\n  option was set to true, this represents the original destination address and port.\n\n%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%\n    Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address.\n\n.. _config_access_log_format_connection_id:\n\n%CONNECTION_ID%\n  An identifier for the downstream connection. It can be used to\n  cross-reference TCP access logs across multiple log sinks, or to\n  cross-reference timer-based reports for the same connection. The identifier\n  is unique with high likelihood within an execution, but can duplicate across\n  multiple instances or between restarts.\n\n%GRPC_STATUS%\n  gRPC status code which is easy to interpret with text message corresponding with number.\n\n%DOWNSTREAM_LOCAL_PORT%\n    Similar to **%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%**, but only extracts the port portion of the **%DOWNSTREAM_LOCAL_ADDRESS%**\n\n%REQ(X?Y):Z%\n  HTTP\n    An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an\n    optional parameter denoting string truncation up to Z characters long. The value is taken from\n    the HTTP request header named X first and if it's not set, then request header Y is used. If\n    none of the headers are present '-' symbol will be in the log.\n\n  TCP\n    Not implemented (\"-\").\n\n%RESP(X?Y):Z%\n  HTTP\n    Same as **%REQ(X?Y):Z%** but taken from HTTP response headers.\n\n  TCP\n    Not implemented (\"-\").\n\n%TRAILER(X?Y):Z%\n  HTTP\n    Same as **%REQ(X?Y):Z%** but taken from HTTP response trailers.\n\n  TCP\n    Not implemented (\"-\").\n\n%DYNAMIC_METADATA(NAMESPACE:KEY*):Z%\n  HTTP\n    :ref:`Dynamic Metadata <envoy_v3_api_msg_config.core.v3.Metadata>` info,\n    where NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional\n    lookup up key in the namespace with the option of specifying nested keys separated by ':',\n    and Z is an optional parameter denoting string truncation up to Z characters long. Dynamic Metadata\n    can be set by filters using the :repo:`StreamInfo <include/envoy/stream_info/stream_info.h>` API:\n    *setDynamicMetadata*. The data will be logged as a JSON string. For example, for the following dynamic metadata:\n\n    ``com.test.my_filter: {\"test_key\": \"foo\", \"test_object\": {\"inner_key\": \"bar\"}}``\n\n    * %DYNAMIC_METADATA(com.test.my_filter)% will log: ``{\"test_key\": \"foo\", \"test_object\": {\"inner_key\": \"bar\"}}``\n    * %DYNAMIC_METADATA(com.test.my_filter:test_key)% will log: ``\"foo\"``\n    * %DYNAMIC_METADATA(com.test.my_filter:test_object)% will log: ``{\"inner_key\": \"bar\"}``\n    * %DYNAMIC_METADATA(com.test.my_filter:test_object:inner_key)% will log: ``\"bar\"``\n    * %DYNAMIC_METADATA(com.unknown_filter)% will log: ``-``\n    * %DYNAMIC_METADATA(com.test.my_filter:unknown_key)% will log: ``-``\n    * %DYNAMIC_METADATA(com.test.my_filter):25% will log (truncation at 25 characters): ``{\"test_key\": \"foo\", \"test``\n\n  TCP\n    Not implemented (\"-\").\n\n  .. note::\n\n    For typed JSON logs, this operator renders a single value with string, numeric, or boolean type\n    when the referenced key is a simple value. If the referenced key is a struct or list value, a\n    JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum\n    length is ignored\n\n.. _config_access_log_format_filter_state:\n\n%FILTER_STATE(KEY:F):Z%\n  HTTP\n    :ref:`Filter State <arch_overview_data_sharing_between_filters>` info, where the KEY is required to\n    look up the filter state object. The serialized proto will be logged as JSON string if possible.\n    If the serialized proto is unknown to Envoy it will be logged as protobuf debug string.\n    Z is an optional parameter denoting string truncation up to Z characters long.\n    F is an optional parameter used to indicate which method FilterState uses for serialization. \n    If 'PLAIN' is set, the filter state object will be serialized as an unstructured string. \n    If 'TYPED' is set or no F provided, the filter state object will be serialized as an JSON string.\n\n  TCP\n    Same as HTTP, the filter state is from connection instead of a L7 request.\n\n  .. note::\n\n    For typed JSON logs, this operator renders a single value with string, numeric, or boolean type\n    when the referenced key is a simple value. If the referenced key is a struct or list value, a\n    JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum\n    length is ignored\n\n%REQUESTED_SERVER_NAME%\n  HTTP\n    String value set on ssl connection socket for Server Name Indication (SNI)\n  TCP\n    String value set on ssl connection socket for Server Name Indication (SNI)\n\n%DOWNSTREAM_LOCAL_URI_SAN%\n  HTTP\n    The URIs present in the SAN of the local certificate used to establish the downstream TLS connection.\n  TCP\n    The URIs present in the SAN of the local certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_URI_SAN%\n  HTTP\n    The URIs present in the SAN of the peer certificate used to establish the downstream TLS connection.\n  TCP\n    The URIs present in the SAN of the peer certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_LOCAL_SUBJECT%\n  HTTP\n    The subject present in the local certificate used to establish the downstream TLS connection.\n  TCP\n    The subject present in the local certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_SUBJECT%\n  HTTP\n    The subject present in the peer certificate used to establish the downstream TLS connection.\n  TCP\n    The subject present in the peer certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_ISSUER%\n  HTTP\n    The issuer present in the peer certificate used to establish the downstream TLS connection.\n  TCP\n    The issuer present in the peer certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_TLS_SESSION_ID%\n  HTTP\n    The session ID for the established downstream TLS connection.\n  TCP\n    The session ID for the established downstream TLS connection.\n\n%DOWNSTREAM_TLS_CIPHER%\n  HTTP\n    The OpenSSL name for the set of ciphers used to establish the downstream TLS connection.\n  TCP\n    The OpenSSL name for the set of ciphers used to establish the downstream TLS connection.\n\n%DOWNSTREAM_TLS_VERSION%\n  HTTP\n    The TLS version (e.g., ``TLSv1.2``, ``TLSv1.3``) used to establish the downstream TLS connection.\n  TCP\n    The TLS version (e.g., ``TLSv1.2``, ``TLSv1.3``) used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_FINGERPRINT_256%\n  HTTP\n    The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_FINGERPRINT_1%\n  HTTP\n    The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_SERIAL%\n  HTTP\n    The serial number of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The serial number of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_CERT%\n  HTTP\n    The client certificate in the URL-encoded PEM format used to establish the downstream TLS connection.\n  TCP\n    The client certificate in the URL-encoded PEM format used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_CERT_V_START%\n  HTTP\n    The validity start date of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The validity start date of the client certificate used to establish the downstream TLS connection.\n\n%DOWNSTREAM_PEER_CERT_V_END%\n  HTTP\n    The validity end date of the client certificate used to establish the downstream TLS connection.\n  TCP\n    The validity end date of the client certificate used to establish the downstream TLS connection.\n\n%HOSTNAME%\n  The system hostname.\n\n%LOCAL_REPLY_BODY%\n  The body text for the requests rejected by the Envoy.\n"
  },
  {
    "path": "docs/root/configuration/observability/application_logging.rst",
    "content": ".. _config_application_logs:\n\nApplication logging\n===================\n\nEnvoy and its filters write application logs for debuggability.\nEnvoy can be configured to output application logs in a format that is compatible with common log viewers.\nThis section documents how Envoy can be configured to enable integration with each log viewer.\n\nStackdriver Logging with GKE\n----------------------------\n\n`Stackdriver Logging <https://cloud.google.com/logging/>`_ can read logs from containers running on\n`Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine/>`_. Envoy should be configured\nwith the following :ref:`command line options <operations_cli>`:\n\n* ``--log-format '%L%m%d %T.%e %t envoy] [%t][%n]%v'``: Logs are formatted in `glog <https://github.com/google/glog>`_\n  format, allowing Stackdriver to parse the log severity and timestamp.\n* ``--log-format-escaped``: Each string that is logged will be printed in a single line.\n  C-style escape sequences (such as ``\\n``) will be escaped and prevent a single string\n  from spanning multiple lines. This ensures each log line is structured with the glog prefix.\n* The ``--log-path`` flag **does not** need to be set, since Stackdriver can read logs from STDERR.\n* The ``--log-level`` flag can be set to control the log severity logged to Stackdriver.\n\n`Reference documentation <https://cloud.google.com/run/docs/logging#container-logs>`_ for Stackdriver on GKE."
  },
  {
    "path": "docs/root/configuration/observability/observability.rst",
    "content": "Observability\n=============\n\n.. toctree::\n  :maxdepth: 2\n\n  statistics\n  application_logging\n  access_log/access_log.rst\n"
  },
  {
    "path": "docs/root/configuration/observability/statistics.rst",
    "content": ".. _statistics:\n\nStatistics\n==========\n\n.. _server_statistics:\n\nServer\n------\n\nServer related statistics are rooted at *server.* with following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  uptime, Gauge, Current server uptime in seconds\n  concurrency, Gauge, Number of worker threads\n  memory_allocated, Gauge, Current amount of allocated memory in bytes. Total of both new and old Envoy processes on hot restart.\n  memory_heap_size, Gauge, Current reserved heap size in bytes. New Envoy process heap size on hot restart.\n  memory_physical_size, Gauge, Current estimate of total bytes of the physical memory. New Envoy process physical memory size on hot restart.\n  live, Gauge, \"1 if the server is not currently draining, 0 otherwise\"\n  state, Gauge, Current :ref:`State <envoy_v3_api_field_admin.v3.ServerInfo.state>` of the Server.\n  parent_connections, Gauge, Total connections of the old Envoy process on hot restart\n  total_connections, Gauge, Total connections of both new and old Envoy processes\n  version, Gauge, Integer represented version number based on SCM revision or :ref:`stats_server_version_override <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.stats_server_version_override>` if set.\n  days_until_first_cert_expiring, Gauge, Number of days until the next certificate being managed will expire\n  seconds_until_first_ocsp_response_expiring, Gauge, Number of seconds until the next OCSP response being managed will expire\n  hot_restart_epoch, Gauge, Current hot restart epoch -- an integer passed via command line flag `--restart-epoch` usually indicating generation.\n  hot_restart_generation, Gauge, Current hot restart generation -- like hot_restart_epoch but computed automatically by incrementing from parent.\n  initialization_time_ms, Histogram, Total time taken for Envoy initialization in milliseconds. This is the time from server start-up until the worker threads are ready to accept new connections\n  debug_assertion_failures, Counter, Number of debug assertion failures detected in a release build if compiled with `--define log_debug_assert_in_release=enabled` or zero otherwise\n  envoy_bug_failures, Counter, Number of envoy bug failures detected in a release build. File or report the issue if this increments as this may be serious.\n  static_unknown_fields, Counter, Number of messages in static configuration with unknown fields\n  dynamic_unknown_fields, Counter, Number of messages in dynamic configuration with unknown fields\n\n"
  },
  {
    "path": "docs/root/configuration/operations/operations.rst",
    "content": "Operations\n==========\n\n.. toctree::\n  :maxdepth: 2\n\n  runtime\n  overload_manager/overload_manager\n  tools/router_check\n"
  },
  {
    "path": "docs/root/configuration/operations/overload_manager/overload_manager.rst",
    "content": ".. _config_overload_manager:\n\nOverload manager\n================\n\nThe :ref:`overload manager <arch_overview_overload_manager>` is configured in the Bootstrap\n:ref:`overload_manager <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.overload_manager>`\nfield.\n\nAn example configuration of the overload manager is shown below. It shows a configuration to\ndisable HTTP/1.x keepalive when heap memory usage reaches 95% and to stop accepting\nrequests when heap memory usage reaches 99%.\n\n.. code-block:: yaml\n\n   refresh_interval:\n     seconds: 0\n     nanos: 250000000\n   resource_monitors:\n     - name: \"envoy.resource_monitors.fixed_heap\"\n       typed_config:\n         \"@type\": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig\n         max_heap_size_bytes: 2147483648\n   actions:\n     - name: \"envoy.overload_actions.disable_http_keepalive\"\n       triggers:\n         - name: \"envoy.resource_monitors.fixed_heap\"\n           threshold:\n             value: 0.95\n     - name: \"envoy.overload_actions.stop_accepting_requests\"\n       triggers:\n         - name: \"envoy.resource_monitors.fixed_heap\"\n           threshold:\n             value: 0.99\n\nResource monitors\n-----------------\n\nThe overload manager uses Envoy's :ref:`extension <extending>` framework for defining\nresource monitors. Envoy's builtin resource monitors are listed\n:ref:`here <config_resource_monitors>`.\n\nTriggers\n--------\n\nTriggers connect resource monitors to actions. There are two types of triggers supported:\n\n.. list-table::\n  :header-rows: 1\n  :widths: 1, 2\n\n  * - Type\n    - Description\n  * - :ref:`threshold <envoy_v3_api_msg_config.overload.v3.ThresholdTrigger>`\n    - Sets the action state to 1 (= *saturated*) when the resource pressure is above a threshold, and to 0 otherwise.\n  * - :ref:`scaled <envoy_v3_api_msg_config.overload.v3.ScaledTrigger>`\n    - Sets the action state to 0 when the resource pressure is below the\n      :ref:`scaling_threshold <envoy_v3_api_field_config.overload.v3.ScaledTrigger.scaling_threshold>`,\n      `(pressure - scaling_threshold)/(saturation_threshold - scaling_threshold)` when\n      `scaling_threshold < pressure < saturation_threshold`, and to 1 (*saturated*) when the\n      pressure is above the\n      :ref:`saturation_threshold <envoy_v3_api_field_config.overload.v3.ScaledTrigger.saturation_threshold>`.\"\n\n.. _config_overload_manager_overload_actions:\n\nOverload actions\n----------------\n\nThe following overload actions are supported:\n\n.. csv-table::\n  :header: Name, Description\n  :widths: 1, 2\n\n  envoy.overload_actions.stop_accepting_requests, Envoy will immediately respond with a 503 response code to new requests\n  envoy.overload_actions.disable_http_keepalive, Envoy will stop accepting streams on incoming HTTP connections\n  envoy.overload_actions.stop_accepting_connections, Envoy will stop accepting new network connections on its configured listeners\n  envoy.overload_actions.shrink_heap, Envoy will periodically try to shrink the heap by releasing free memory to the system\n\nLimiting Active Connections\n---------------------------\n\nCurrently, the only supported way to limit the total number of active connections allowed across all\nlisteners is via specifying an integer through the runtime key\n``overload.global_downstream_max_connections``. The connection limit is recommended to be less than\nhalf of the system's file descriptor limit, to account for upstream connections, files, and other\nusage of file descriptors.\nIf the value is unspecified, there is no global limit on the number of active downstream connections\nand Envoy will emit a warning indicating this at startup. To disable the warning without setting a\nlimit on the number of active downstream connections, the runtime value may be set to a very large\nlimit (~2e9).\n\nIf it is desired to only limit the number of downstream connections for a particular listener,\nper-listener limits can be set via the :ref:`listener configuration <config_listeners>`.\n\nOne may simultaneously specify both per-listener and global downstream connection limits and the\nconditions will be enforced independently. For instance, if it is known that a particular listener\nshould have a smaller number of open connections than others, one may specify a smaller connection\nlimit for that specific listener and allow the global limit to enforce resource utilization among\nall listeners.\n\nAn example configuration can be found in the :ref:`edge best practices document <best_practices_edge>`.\n\nStatistics\n----------\n\nEach configured resource monitor has a statistics tree rooted at *overload.<name>.*\nwith the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  pressure, Gauge, Resource pressure as a percent\n  failed_updates, Counter, Total failed attempts to update the resource pressure\n  skipped_updates, Counter, Total skipped attempts to update the resource pressure due to a pending update\n\nEach configured overload action has a statistics tree rooted at *overload.<name>.*\nwith the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  active, Gauge, \"Active state of the action (0=scaling, 1=saturated)\"\n  scale_percent, Gauge, \"Scaled value of the action as a percent (0-99=scaling, 100=saturated)\"\n"
  },
  {
    "path": "docs/root/configuration/operations/runtime.rst",
    "content": ".. _config_runtime:\n\nRuntime\n=======\n\nThe :ref:`runtime configuration <arch_overview_runtime>` specifies a virtual file system tree that\ncontains re-loadable configuration elements. This virtual file system can be realized via a series\nof local file system, static bootstrap configuration, RTDS and admin console derived overlays.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.bootstrap.v3.Runtime>`\n\n.. _config_virtual_filesystem:\n\nVirtual file system\n-------------------\n\n.. _config_runtime_layering:\n\nLayering\n++++++++\n\nThe runtime can be viewed as a virtual file system consisting of multiple layers. The :ref:`layered\nruntime <envoy_v3_api_msg_config.bootstrap.v3.LayeredRuntime>` bootstrap configuration specifies this\nlayering. Runtime settings in later layers override earlier layers. A typical configuration might\nbe:\n\n.. validated-code-block:: yaml\n  :type-name: envoy.config.bootstrap.v3.LayeredRuntime\n\n  layers:\n  - name: static_layer_0\n    static_layer:\n      health_check:\n        min_interval: 5\n  - name: disk_layer_0\n    disk_layer: { symlink_root: /srv/runtime/current, subdirectory: envoy }\n  - name: disk_layer_1\n    disk_layer: { symlink_root: /srv/runtime/current, subdirectory: envoy_override, append_service_cluster: true }\n  - name: admin_layer_0\n    admin_layer: {}\n\nIn the deprecated :ref:`runtime <envoy_v3_api_msg_config.bootstrap.v3.Runtime>` bootstrap\nconfiguration, the layering was implicit and fixed:\n\n1. :ref:`Static bootstrap configuration <config_runtime_bootstrap>`\n2. :ref:`Local disk file system <config_runtime_local_disk>`\n3. :ref:`Local disk file system *override_subdirectory* <config_runtime_local_disk>`\n4. :ref:`Admin console overrides <config_runtime_admin>`\n\nwith values in higher layers overriding corresponding values in lower layers.\n\n.. _config_runtime_file_system:\n\nFile system layout\n++++++++++++++++++\n\nVarious sections of the configuration guide describe the runtime settings that are available.\nFor example, :ref:`here <config_cluster_manager_cluster_runtime>` are the runtime settings for\nupstream clusters.\n\nEach '.' in a runtime key indicates a new directory in the hierarchy,\nThe terminal portion of a path is the file. The contents of the file constitute the runtime value.\nWhen reading numeric values from a file, spaces and new lines will be ignored.\n\n*numerator* or *denominator* are reserved keywords and may not appear in any directory.\n\n.. _config_runtime_bootstrap:\n\nStatic bootstrap\n++++++++++++++++\n\nA static base runtime may be specified in the :ref:`bootstrap configuration\n<envoy_v3_api_field_config.bootstrap.v3.Runtime.base>` via a :ref:`protobuf JSON representation\n<config_runtime_proto_json>`.\n\n.. _config_runtime_local_disk:\n\nLocal disk file system\n++++++++++++++++++++++\n\nWhen the :ref:`runtime virtual file system <config_runtime_file_system>` is realized on a local\ndisk, it is rooted at *symlink_root* +\n*subdirectory*. For example, the *health_check.min_interval* key would have the following full\nfile system path (using the symbolic link):\n\n``/srv/runtime/current/envoy/health_check/min_interval``\n\n.. _config_runtime_local_disk_overrides:\n\nOverrides\n~~~~~~~~~\n\nAn arbitrary number of disk file system layers can be overlaid in the :ref:`layered\nruntime <envoy_v3_api_msg_config.bootstrap.v3.LayeredRuntime>` bootstrap configuration.\n\nIn the deprecated :ref:`runtime <envoy_v3_api_msg_config.bootstrap.v3.Runtime>` bootstrap configuration,\nthere was a distinguished file system override. Assume that the folder ``/srv/runtime/v1`` points to\nthe actual file system path where global runtime configurations are stored. The following would be a\ntypical configuration setting for runtime:\n\n* *symlink_root*: ``/srv/runtime/current``\n* *subdirectory*: ``envoy``\n* *override_subdirectory*: ``envoy_override``\n\nWhere ``/srv/runtime/current`` is a symbolic link to ``/srv/runtime/v1``.\n\n.. _config_runtime_local_disk_service_cluster_subdirs:\n\nCluster-specific subdirectories\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIn the deprecated :ref:`runtime <envoy_v3_api_msg_config.bootstrap.v3.Runtime>` bootstrap configuration,\nthe *override_subdirectory* is used along with the :option:`--service-cluster` CLI option. Assume\nthat :option:`--service-cluster` has been set to ``my-cluster``. Envoy will first look for the\n*health_check.min_interval* key in the following full file system path:\n\n``/srv/runtime/current/envoy_override/my-cluster/health_check/min_interval``\n\nIf found, the value will override any value found in the primary lookup path. This allows the user\nto customize the runtime values for individual clusters on top of global defaults.\n\nWith the :ref:`layered runtime <envoy_v3_api_msg_config.bootstrap.v3.LayeredRuntime>` bootstrap\nconfiguration, it is possible to specialize on service cluster via the :ref:`append_service_cluster\n<envoy_v3_api_field_config.bootstrap.v3.RuntimeLayer.DiskLayer.append_service_cluster>` option at any\ndisk layer.\n\n.. _config_runtime_symbolic_link_swap:\n\nUpdating runtime values via symbolic link swap\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThere are two steps to update any runtime value. First, create a hard copy of the entire runtime\ntree and update the desired runtime values. Second, atomically swap the symbolic link root from the\nold tree to the new runtime tree, using the equivalent of the following command:\n\n.. code-block:: console\n\n  /srv/runtime:~$ ln -s /srv/runtime/v2 new && mv -Tf new current\n\nIt's beyond the scope of this document how the file system data is deployed, garbage collected, etc.\n\n.. _config_runtime_rtds:\n\nRuntime Discovery Service (RTDS)\n++++++++++++++++++++++++++++++++\n\nOne or more runtime layers may be specified and delivered by specifying a :ref:`rtds_layer\n<envoy_v3_api_field_config.bootstrap.v3.RuntimeLayer.rtds_layer>`. This points the runtime layer at a\nregular :ref:`xDS <xds_protocol>` endpoint, subscribing to a single xDS resource for the given\nlayer. The resource type for these layers is a :ref:`Runtime message\n<envoy_v3_api_msg_service.runtime.v3.Runtime>`.\n\n.. _config_runtime_admin:\n\nAdmin console\n+++++++++++++\n\nValues can be viewed at the\n:ref:`/runtime admin endpoint <operations_admin_interface_runtime>`. Values can be modified and\nadded at the :ref:`/runtime_modify admin endpoint <operations_admin_interface_runtime_modify>`. If\nruntime is not configured, an empty provider is used which has the effect of using all defaults\nbuilt into the code, except for any values added via `/runtime_modify`.\n\n.. attention::\n\n  Use the :ref:`/runtime_modify<operations_admin_interface_runtime_modify>` endpoint with care.\n  Changes are effectively immediately. It is **critical** that the admin interface is :ref:`properly\n  secured <operations_admin_interface_security>`.\n\nAt most one admin layer may be specified. If a non-empty :ref:`layered runtime\n<envoy_v3_api_msg_config.bootstrap.v3.LayeredRuntime>` bootstrap configuration is specified with an\nabsent admin layer, any mutating admin console actions will elicit a 503 response.\n\n.. _config_runtime_atomicity:\n\nAtomicity\n---------\n\nThe runtime will reload and a new snapshot will be generated in a variety of situations, i.e.:\n\n* When a file move operation is detected under the symlink root or the symlink root changes.\n* When an admin console override is added or modified.\n\nAll runtime layers are evaluated during a snapshot. Layers with errors are ignored and excluded from\nthe effective layers, see :ref:`num_layers <runtime_stats>`. Walking the symlink root will take a\nnon-zero amount of time, so if true atomicity is desired, the runtime directory should be immutable\nand symlink changes should be used to orchestrate updates.\n\nDisk layers with the same symlink root will only trigger a single refresh when a file movement is\ndetected. Disk layers with overlapping symlink root paths that are not identical may trigger\nmultiple reloads when a file movement is detected.\n\n.. _config_runtime_proto_json:\n\nProtobuf and JSON representation\n--------------------------------\n\nThe runtime :ref:`file system <config_runtime_file_system>` can be represented inside a proto3\nmessage as a `google.protobuf.Struct\n<https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Struct>`_\nmodeling a JSON object with the following rules:\n\n* Dot separators map to tree edges.\n* Scalar leaves (integer, strings, booleans, doubles) are represented with their respective JSON type.\n* :ref:`FractionalPercent <envoy_v3_api_msg_type.v3.FractionalPercent>` is represented with via its\n  `canonical JSON encoding <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n\nAn example representation of a setting for the *health_check.min_interval* key in YAML is:\n\n.. code-block:: yaml\n\n  health_check:\n    min_interval: 5\n\n.. note::\n\n  Integer values that are parsed from doubles are rounded down to the nearest whole number.\n\n.. _config_runtime_comments:\n\nComments\n--------\n\nLines starting with ``#`` as the first character are treated as comments.\n\nComments can be used to provide context on an existing value. Comments are also useful in an\notherwise empty file to keep a placeholder for deployment in a time of need.\n\n.. _config_runtime_deprecation:\n\nUsing runtime overrides for deprecated features\n-----------------------------------------------\n\nThe Envoy runtime is also a part of the Envoy feature deprecation process.\n\nAs described in the Envoy :repo:`breaking change policy <CONTRIBUTING.md#breaking-change-policy>`,\nfeature deprecation in Envoy is in 3 phases: warn-by-default, fail-by-default, and code removal.\n\nIn the first phase, Envoy logs a warning to the warning log that the feature is deprecated and\nincrements the :ref:`deprecated_feature_use <runtime_stats>` runtime stat.\nUsers are encouraged to go to :ref:`deprecated <deprecated>` to see how to\nmigrate to the new code path and make sure it is suitable for their use case.\n\nIn the second phase the field will be tagged as disallowed_by_default\nand use of that configuration field will cause the config to be rejected by default.\nThis disallowed mode can be overridden in runtime configuration by setting\nenvoy.deprecated_features:full_fieldname or envoy.deprecated_features:full_enum_value\nto true. For example, for a deprecated field\n``Foo.Bar.Eep`` set ``envoy.deprecated_features:Foo.bar.Eep`` to\n``true``. There is a production example using static runtime to allow both fail-by-default fields here:\n:repo:`configs/using_deprecated_config.v2.yaml`\nUse of these override is **strongly discouraged** so please use with caution and switch to the new fields\nas soon as possible. Fatal-by-default configuration indicates that the removal of the old code paths is\nimminent. It is far better for both Envoy users and for Envoy contributors if any bugs or feature gaps\nwith the new code paths are flushed out ahead of time, rather than after the code is removed!\n\n.. _runtime_stats:\n\n.. attention::\n\n   Versions of Envoy prior to 1.14.1 cannot parse runtime booleans from integer values and require\n   an explicit \"true\" or \"false\". Mistakenly placing an integer such as \"0\" to represent \"false\"\n   will lead to usage of the default value. This is especially important to keep in mind for case of\n   runtime overrides for :ref:`deprecated features<deprecated>`, as it will can potentially result\n   in unexpected Envoy behaviors.\n\nStatistics\n----------\n\nThe file system runtime provider emits some statistics in the *runtime.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  admin_overrides_active, Gauge, 1 if any admin overrides are active otherwise 0\n  deprecated_feature_use, Counter, Total number of times deprecated features were used. Detailed information about the feature used will be logged to warning logs in the form \"Using deprecated option 'X' from file Y\".\n  deprecated_feature_seen_since_process_start, Gauge, Number of times deprecated features were used. This is not carried over during hot restarts.\n  load_error, Counter, Total number of load attempts that resulted in an error in any layer\n  load_success, Counter, Total number of load attempts that were successful at all layers\n  num_keys, Gauge, Number of keys currently loaded\n  num_layers, Gauge, Number of layers currently active (without loading errors)\n  override_dir_exists, Counter, Total number of loads that did use an override directory\n  override_dir_not_exists, Counter, Total number of loads that did not use an override directory\n"
  },
  {
    "path": "docs/root/configuration/operations/tools/router_check.rst",
    "content": ".. _config_tools_router_check_tool:\n\nRoute table check tool\n======================\n\n.. note::\n\n  The following configuration is for the route table check tool only and is not part of the Envoy binary.\n  The route table check tool is a standalone binary that can be used to verify Envoy's routing for a given configuration\n  file.\n\nThe following specifies input to the route table check tool. The route table check tool checks if\nthe route returned by a :ref:`router <envoy_v3_api_msg_config.route.v3.RouteConfiguration>` matches what is expected.\nThe tool can be used to check cluster name, virtual cluster name,\nvirtual host name, manual path rewrite, manual host rewrite, path redirect, and\nheader field matches. Extensions for other test cases can be added. Details about installing the tool\nand sample tool input/output can be found at :ref:`installation <install_tools_route_table_check_tool>`.\n\nThe route table check tool config is composed of an array of json test objects. Each test object is composed of\nthree parts.\n\nTest name\n  This field specifies the name of each test object.\n\nInput values\n  The input value fields specify the parameters to be passed to the router. Example input fields include\n  the :authority, :path, and :method header fields. The :authority and :path fields specify the url\n  sent to the router and are required. All other input fields are optional.\n\nValidate\n  The validate fields specify the expected values and test cases to check. At least one test\n  case is required.\n\nA simple tool configuration json has one test case and is written as follows. The test\nexpects a cluster name match of \"instant-server\".::\n\n   tests\n   - test_name: Cluster_name_test,\n     input:\n       authority: api.lyft.com,\n       path: /api/locations\n     validate:\n       cluster_name: instant-server\n\n.. code-block:: yaml\n\n  tests\n  - test_name: ...,\n    input:\n      authority: ...,\n      path: ...,\n      method: ...,\n      internal: ...,\n      random_value: ...,\n      ssl: ...,\n      runtime: ...,\n      additional_request_headers:\n        - key: ...,\n          value: ...\n      additional_response_headers:\n        - key: ...,\n          value: ...\n    validate:\n      cluster_name: ...,\n      virtual_cluster_name: ...,\n      virtual_host_name: ...,\n      host_rewrite: ...,\n      path_rewrite: ...,\n      path_redirect: ...,\n      request_header_matches:\n        - name: ...,\n          exact_match: ...\n      response_header_matches:\n        - name: ...,\n          exact_match: ...\n        - name: ...,\n          presence_match: ...\n\ntest_name\n  *(required, string)* The name of a test object.\n\ninput\n  *(required, object)* Input values sent to the router that determine the returned route.\n\n  authority\n    *(required, string)* The url authority. This value along with the path parameter define\n    the url to be matched. An example authority value is \"api.lyft.com\".\n\n  path\n    *(required, string)* The url path. An example path value is \"/foo\".\n\n  method\n    *(required, string)* The request method. If not specified, the default method is GET. The options\n    are GET, PUT, or POST.\n\n  internal\n    *(optional, boolean)* A flag that determines whether to set x-envoy-internal to \"true\".\n    If not specified, or if internal is equal to false, x-envoy-internal is not set.\n\n  random_value\n    *(optional, integer)* An integer used to identify the target for weighted cluster selection\n    and as a factor for the routing engine to decide whether a runtime based route takes effect.\n    The default value of random_value is 0. For routes with runtime fraction numerators of 0, \n    the route checker tool changes the numerators to 1 so they can be tested with random_value\n    set to 0 to simulate the route being enabled and random_value set to any int >= 1 to\n    simulate the route being disabled.\n\n  ssl\n    *(optional, boolean)* A flag that determines whether to set x-forwarded-proto to https or http.\n    By setting x-forwarded-proto to a given protocol, the tool is able to simulate the behavior of\n    a client issuing a request via http or https. By default ssl is false which corresponds to\n    x-forwarded-proto set to http.\n\n  runtime\n    *(optional, string)* A string representing the runtime setting to enable for the test. The runtime\n    setting along with the random_value is used by the router to decide if the route should be enabled.\n    Only a random_value lesser than the fractional percentage defined on the route entry enables the\n    route.\n\n  additional_request_headers, additional_response_headers\n    *(optional, array)*  Additional headers to be added as input for route determination. The \"authority\",\n    \"path\", \"method\", \"x-forwarded-proto\", and \"x-envoy-internal\" fields are specified by the other config\n    options and should not be set here.\n\n    key\n      *(required, string)* The name of the header field to add.\n\n    value\n      *(required, string)* The value of the header field to add.\n\nvalidate\n  *(required, object)* The validate object specifies the returned route parameters to match. At least one\n  test parameter must be specified. Use \"\" (empty string) to indicate that no return value is expected.\n  For example, to test that no cluster match is expected use {\"cluster_name\": \"\"}.\n\n  cluster_name\n    *(optional, string)* Match the cluster name.\n\n  virtual_cluster_name\n    *(optional, string)* Match the virtual cluster name.\n\n  virtual_host_name\n    *(optional, string)* Match the virtual host name.\n\n  host_rewrite\n    *(optional, string)* Match the host header field after rewrite.\n\n  path_rewrite\n    *(optional, string)* Match the path header field after rewrite.\n\n  path_redirect\n    *(optional, string)* Match the returned redirect path.\n\n  request_header_fields, response_header_fields\n    *(optional, array, deprecated)*  Match the listed header fields. Example header fields include the \"path\", \"cookie\",\n    and \"date\" fields. The header fields are checked after all other test cases. Thus, the header fields checked\n    will be those of the redirected or rewritten routes when applicable.\n    These fields are deprecated. Use request_header_matches, response_header_matches instead.\n\n    key\n      *(required, string)* The name of the header field to match.\n\n    value\n      *(required, string)* The value of the header field to match.\n\n  request_header_matches, response_header_matches\n    *(optional, array)*  Matchers for the listed headers. Example header fields include the \"path\", \"cookie\",\n    and \"date\" fields, as well as custom headers set in the input or by the route. The header fields are checked\n    after all other test cases. Thus, the header fields checked will be those of the redirected or rewritten\n    routes when applicable.\n    - Matchers are specified as :ref:`HeaderMatchers <envoy_api_msg_route.HeaderMatcher>`, and behave the same way.\n\nCoverage\n--------\n\nThe router check tool will report route coverage at the end of a successful test run.\n\n.. code:: bash\n\n  > bazel-bin/test/tools/router_check/router_check_tool --config-path ... --test-path ...\n  Current route coverage: 0.0744863\n\nThis reporting can be leveraged to enforce a minimum coverage percentage by using\nthe `-f` or `--fail-under` flag. If coverage falls below this percentage the test\nrun will fail.\n\n.. code:: bash\n\n  > bazel-bin/test/tools/router_check/router_check_tool --config-path ... --test-path ... --fail-under 8\n  Current route coverage: 7.44863%\n  Failed to meet coverage requirement: 8%\n\n\nBy default the coverage report measures test coverage by checking that at least one field is\nverified for every route. However, this can leave holes in the tests where fields\naren't validated and later changed. For more comprehensive coverage you can add a flag,\n`--covall`, which will calculate coverage taking into account all of the possible\nfields that could be tested.\n\n.. code:: bash\n\n  > bazel-bin/test/tools/router_check/router_check_tool --config-path ... --test-path ... --f 7 --covall\n  Current route coverage: 6.2948%\n  Failed to meet coverage requirement: 7%\n"
  },
  {
    "path": "docs/root/configuration/other_features/other_features.rst",
    "content": "Other features\n==============\n\n.. toctree::\n  :maxdepth: 2\n\n  rate_limit\n  wasm\n  wasm_stat_sink\n"
  },
  {
    "path": "docs/root/configuration/other_features/rate_limit.rst",
    "content": ".. _config_rate_limit_service:\n\nRate limit service\n==================\n\nThe :ref:`rate limit service <arch_overview_global_rate_limit>` configuration specifies the global rate\nlimit service Envoy should talk to when it needs to make global rate limit decisions. If no rate\nlimit service is configured, a \"null\" service will be used which will always return OK if called.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.ratelimit.v3.RateLimitServiceConfig>`\n\ngRPC service IDL\n----------------\n\nEnvoy expects the rate limit service to support the gRPC IDL specified in\n:ref:`rls.proto <envoy_v3_api_file_envoy/service/ratelimit/v3/rls.proto>`. See the IDL documentation\nfor more information on how the API works. See Lyft's reference implementation\n`here <https://github.com/lyft/ratelimit>`_.\n"
  },
  {
    "path": "docs/root/configuration/other_features/wasm.rst",
    "content": ".. _config_wasm_service:\n\nWasm service\n============\n\nThe :ref:`WasmService <envoy_v3_api_msg_extensions.wasm.v3.WasmService>` configuration specifies a\nsingleton or per-worker Wasm service for background or on-demand activities.\n\nExample plugin configuration:\n\n.. code-block:: yaml\n\n  wasm:\n    config:\n      config:\n        name: \"my_plugin\"\n        vm_config:\n          runtime: \"envoy.wasm.runtime.v8\"\n          code:\n            local:\n              filename: \"/etc/envoy_filter_http_wasm_example.wasm\"\n      singleton: true\n\nThe preceding snippet configures a plugin singleton service from a Wasm binary on local disk.\n"
  },
  {
    "path": "docs/root/configuration/other_features/wasm_stat_sink.rst",
    "content": ".. _config_stat_sinks_wasm:\n\nWasm Stat Sink\n==============\n\nThe :ref:`WasmService <envoy_v3_api_msg_extensions.stat_sinks.wasm.v3.Wasm>` configuration specifies a\nsingleton or per-worker Wasm stat sink service.\n"
  },
  {
    "path": "docs/root/configuration/other_protocols/dubbo_filters/dubbo_filters.rst",
    "content": ".. _config_dubbo_filters:\n\nDubbo filters\n===============\n\nEnvoy has the following builtin Dubbo filters.\n\n.. toctree::\n  :maxdepth: 2\n\n  router_filter\n"
  },
  {
    "path": "docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst",
    "content": ".. _config_dubbo_filters_router:\n\nRouter\n======\n\nThe router filter implements Dubbo forwarding. It will be used in almost all Dubbo proxying\nscenarios. The filter's main job is to follow the instructions specified in the configured\n:ref:`route table <envoy_v3_api_msg_extensions.filters.network.dubbo_proxy.v3.RouteConfiguration>`.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.filter.thrift.router.v2alpha1.Router>`\n* This filter should be configured with the name *envoy.filters.dubbo.router*.\n"
  },
  {
    "path": "docs/root/configuration/other_protocols/other_protocols.rst",
    "content": "Other protocols\n===============\n\n.. toctree::\n  :maxdepth: 2\n\n  thrift_filters/thrift_filters\n  dubbo_filters/dubbo_filters\n"
  },
  {
    "path": "docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst",
    "content": ".. _config_thrift_filters_rate_limit:\n\nRate limit\n==========\n\n* Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`\n* :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.network.thrift_proxy.filters.ratelimit.v3.RateLimit>`\n* This filter should be configured with the name *envoy.filters.thrift.rate_limit*.\n\nThe Thrift rate limit filter will call the rate limit service when the request's route has one or\nmore :ref:`rate limit configurations\n<envoy_v3_api_field_extensions.filters.network.thrift_proxy.v3.RouteAction.rate_limits>` that\nmatch the filter's stage setting. More than one configuration can apply to a request. Each\nconfiguration results in a descriptor being sent to the rate limit service.\n\nIf the rate limit service is called, and the response for any of the descriptors is over limit, an\napplication exception indicating an internal error is returned.\n\nIf there is an error in calling the rate limit service or it returns an error and\n:ref:`failure_mode_deny\n<envoy_v3_api_field_extensions.filters.network.thrift_proxy.filters.ratelimit.v3.RateLimit.failure_mode_deny>` is set to\ntrue, an application exception indicating an internal error is returned.\n\n.. _config_thrift_filters_rate_limit_stats:\n\nStatistics\n----------\n\nThe filter outputs statistics in the *cluster.<route target cluster>.ratelimit.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  ok, Counter, Total under limit responses from the rate limit service.\n  error, Counter, Total errors contacting the rate limit service.\n  over_limit, Counter, Total over limit responses from the rate limit service.\n  failure_mode_allowed, Counter, \"Total requests that were error(s) but were allowed through because\n  of :ref:`failure_mode_deny\n  <envoy_v3_api_field_extensions.filters.network.thrift_proxy.filters.ratelimit.v3.RateLimit.failure_mode_deny>` set to\n  false.\"\n"
  },
  {
    "path": "docs/root/configuration/other_protocols/thrift_filters/router_filter.rst",
    "content": ".. _config_thrift_filters_router:\n\nRouter\n======\n\nThe router filter implements Thrift forwarding. It will be used in almost all Thrift proxying\nscenarios. The filter's main job is to follow the instructions specified in the configured\n:ref:`route table <envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.RouteConfiguration>`.\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.filter.thrift.router.v2alpha1.Router>`\n* This filter should be configured with the name *envoy.filters.thrift.router*.\n\nStatistics\n----------\n\nThe filter outputs statistics in the *thrift.<stat_prefix>.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  route_missing, Counter, Total requests with no route found.\n  unknown_cluster, Counter, Total requests with a route that has an unknown cluster.\n  upstream_rq_maintenance_mode, Counter, Total requests with a destination cluster in maintenance mode.\n  no_healthy_upstream, Counter, Total requests with no healthy upstream endpoints available.\n"
  },
  {
    "path": "docs/root/configuration/other_protocols/thrift_filters/thrift_filters.rst",
    "content": ".. _config_thrift_filters:\n\nThrift filters\n===============\n\nEnvoy has the following builtin Thrift filters.\n\n.. toctree::\n  :maxdepth: 2\n\n  rate_limit_filter\n  router_filter\n"
  },
  {
    "path": "docs/root/configuration/overview/bootstrap.rst",
    "content": ".. _config_overview_bootstrap:\n\nBootstrap configuration\n-----------------------\n\nTo use the xDS API, it's necessary to supply a bootstrap configuration file. This\nprovides static server configuration and configures Envoy to access :ref:`dynamic\nconfiguration if needed <arch_overview_dynamic_config>`. This is supplied on the command-line via\nthe :option:`-c` flag, i.e.:\n\n.. code-block:: console\n\n  ./envoy -c <path to config>.{json,yaml,pb,pb_text}\n\nwhere the extension reflects the underlying config representation.\n\nThe :ref:`Bootstrap <envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>` message is the root of the\nconfiguration. A key concept in the :ref:`Bootstrap <envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>`\nmessage is the distinction between static and dynamic resources. Resources such\nas a :ref:`Listener <envoy_v3_api_msg_config.listener.v3.Listener>` or :ref:`Cluster\n<envoy_v3_api_msg_config.cluster.v3.Cluster>` may be supplied either statically in\n:ref:`static_resources <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.static_resources>` or have\nan xDS service such as :ref:`LDS\n<config_listeners_lds>` or :ref:`CDS <config_cluster_manager_cds>` configured in\n:ref:`dynamic_resources <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.dynamic_resources>`.\n"
  },
  {
    "path": "docs/root/configuration/overview/examples.rst",
    "content": "Examples\n--------\n\nBelow we will use YAML representation of the config protos and a running example\nof a service proxying HTTP from 127.0.0.1:10000 to 127.0.0.2:1234.\n\nStatic\n^^^^^^\n\nA minimal fully static bootstrap config is provided below:\n\n.. validated-code-block:: yaml\n  :type-name: envoy.config.bootstrap.v3.Bootstrap\n\n  admin:\n    access_log_path: /tmp/admin_access.log\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 9901 }\n\n  static_resources:\n    listeners:\n    - name: listener_0\n      address:\n        socket_address: { address: 127.0.0.1, port_value: 10000 }\n      filter_chains:\n      - filters:\n        - name: envoy.filters.network.http_connection_manager\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n            stat_prefix: ingress_http\n            codec_type: AUTO\n            route_config:\n              name: local_route\n              virtual_hosts:\n              - name: local_service\n                domains: [\"*\"]\n                routes:\n                - match: { prefix: \"/\" }\n                  route: { cluster: some_service }\n            http_filters:\n            - name: envoy.filters.http.router\n    clusters:\n    - name: some_service\n      connect_timeout: 0.25s\n      type: STATIC\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        cluster_name: some_service\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 1234\n\nMostly static with dynamic EDS\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA bootstrap config that continues from the above example with :ref:`dynamic endpoint\ndiscovery <arch_overview_dynamic_config_eds>` via an\n:ref:`EDS<envoy_v3_api_file_envoy/service/endpoint/v3/eds.proto>` gRPC management server listening\non 127.0.0.1:5678 is provided below:\n\n.. validated-code-block:: yaml\n  :type-name: envoy.config.bootstrap.v3.Bootstrap\n\n  admin:\n    access_log_path: /tmp/admin_access.log\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 9901 }\n\n  static_resources:\n    listeners:\n    - name: listener_0\n      address:\n        socket_address: { address: 127.0.0.1, port_value: 10000 }\n      filter_chains:\n      - filters:\n        - name: envoy.filters.network.http_connection_manager\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n            stat_prefix: ingress_http\n            codec_type: AUTO\n            route_config:\n              name: local_route\n              virtual_hosts:\n              - name: local_service\n                domains: [\"*\"]\n                routes:\n                - match: { prefix: \"/\" }\n                  route: { cluster: some_service }\n            http_filters:\n            - name: envoy.filters.http.router\n    clusters:\n    - name: some_service\n      connect_timeout: 0.25s\n      lb_policy: ROUND_ROBIN\n      type: EDS\n      eds_cluster_config:\n        eds_config:\n          api_config_source:\n            api_type: GRPC\n            grpc_services:\n              - envoy_grpc:\n                  cluster_name: xds_cluster\n    - name: xds_cluster\n      connect_timeout: 0.25s\n      type: STATIC\n      lb_policy: ROUND_ROBIN\n      http2_protocol_options:\n        connection_keepalive:\n          interval: 30s\n          timeout: 5s\n      upstream_connection_options:\n        # configure a TCP keep-alive to detect and reconnect to the admin\n        # server in the event of a TCP socket half open connection\n        tcp_keepalive: {}\n      load_assignment:\n        cluster_name: xds_cluster\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 5678\n\nNotice above that *xds_cluster* is defined to point Envoy at the management server. Even in\nan otherwise completely dynamic configurations, some static resources need to\nbe defined to point Envoy at its xDS management server(s).\n\nIt's important to set appropriate :ref:`TCP Keep-Alive options <envoy_v3_api_msg_config.core.v3.TcpKeepalive>`\nin the `tcp_keepalive` block. This will help detect TCP half open connections to the xDS management\nserver and re-establish a full connection.\n\nIn the above example, the EDS management server could then return a proto encoding of a\n:ref:`DiscoveryResponse <envoy_v3_api_msg_service.discovery.v3.DiscoveryResponse>`:\n\n.. code-block:: yaml\n\n  version_info: \"0\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\n    cluster_name: some_service\n    endpoints:\n    - lb_endpoints:\n      - endpoint:\n          address:\n            socket_address:\n              address: 127.0.0.2\n              port_value: 1234\n\n\nThe versioning and type URL scheme that appear above are explained in more\ndetail in the :ref:`streaming gRPC subscription protocol\n<xds_protocol_streaming_grpc_subscriptions>`\ndocumentation.\n\nDynamic\n^^^^^^^\n\nA fully dynamic bootstrap configuration, in which all resources other than\nthose belonging to the management server are discovered via xDS is provided\nbelow:\n\n.. validated-code-block:: yaml\n  :type-name: envoy.config.bootstrap.v3.Bootstrap\n\n  admin:\n    access_log_path: /tmp/admin_access.log\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 9901 }\n\n  dynamic_resources:\n    lds_config:\n      api_config_source:\n        api_type: GRPC\n        grpc_services:\n          - envoy_grpc:\n              cluster_name: xds_cluster\n    cds_config:\n      api_config_source:\n        api_type: GRPC\n        grpc_services:\n          - envoy_grpc:\n              cluster_name: xds_cluster\n\n  static_resources:\n    clusters:\n    - name: xds_cluster\n      connect_timeout: 0.25s\n      type: STATIC\n      lb_policy: ROUND_ROBIN\n      http2_protocol_options:\n        # Configure an HTTP/2 keep-alive to detect connection issues and reconnect\n        # to the admin server if the connection is no longer responsive.\n        connection_keepalive:\n          interval: 30s\n          timeout: 5s\n      load_assignment:\n        cluster_name: xds_cluster\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 5678\n\nThe management server could respond to LDS requests with:\n\n.. code-block:: yaml\n\n  version_info: \"0\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n    name: listener_0\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 10000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: ingress_http\n          codec_type: AUTO\n          rds:\n            route_config_name: local_route\n            config_source:\n              api_config_source:\n                api_type: GRPC\n                grpc_services:\n                  - envoy_grpc:\n                      cluster_name: xds_cluster\n          http_filters:\n          - name: envoy.filters.http.router\n\nThe management server could respond to RDS requests with:\n\n.. code-block:: yaml\n\n  version_info: \"0\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.route.v3.RouteConfiguration\n    name: local_route\n    virtual_hosts:\n    - name: local_service\n      domains: [\"*\"]\n      routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: some_service }\n\nThe management server could respond to CDS requests with:\n\n.. code-block:: yaml\n\n  version_info: \"0\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n    name: some_service\n    connect_timeout: 0.25s\n    lb_policy: ROUND_ROBIN\n    type: EDS\n    eds_cluster_config:\n      eds_config:\n        api_config_source:\n          api_type: GRPC\n          grpc_services:\n            - envoy_grpc:\n                cluster_name: xds_cluster\n\nThe management server could respond to EDS requests with:\n\n.. code-block:: yaml\n\n  version_info: \"0\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\n    cluster_name: some_service\n    endpoints:\n    - lb_endpoints:\n      - endpoint:\n          address:\n            socket_address:\n              address: 127.0.0.2\n              port_value: 1234\n"
  },
  {
    "path": "docs/root/configuration/overview/extension.rst",
    "content": ".. _config_overview_extension_configuration:\n\nExtension configuration\n-----------------------\n\nEach configuration resource in Envoy has a type URL in the `typed_config`. This\ntype corresponds to a versioned schema. If the type URL uniquely identifies an\nextension capable of interpreting the configuration, then the extension is\nselected regardless of the `name` field. In this case the `name` field becomes\noptional and can be used as an identifier or as an annotation for the\nparticular instance of the extension configuration. For example, the following\nfilter configuration snippet is permitted:\n\n.. code-block:: yaml\n\n  name: front-http-proxy\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n    stat_prefix: ingress_http\n    codec_type: AUTO\n    rds:\n      route_config_name: local_route\n      config_source:\n        api_config_source:\n          api_type: GRPC\n          grpc_services:\n            envoy_grpc:\n              cluster_name: xds_cluster\n    http_filters:\n    - name: front-router\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n        dynamic_stats: true\n\nIn case the control plane lacks the schema definitions for an extension,\n`udpa.type.v1.TypedStruct` should be used as a generic container. The type URL\ninside it is then used by a client to convert the contents to a typed\nconfiguration resource. For example, the above example could be written as\nfollows:\n\n.. code-block:: yaml\n\n  name: front-http-proxy\n  typed_config:\n    \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n    type_url: type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n    value:\n      stat_prefix: ingress_http\n      codec_type: AUTO\n      rds:\n        route_config_name: local_route\n        config_source:\n          api_config_source:\n            api_type: GRPC\n            grpc_services:\n              envoy_grpc:\n                cluster_name: xds_cluster\n      http_filters:\n      - name: front-router\n        typed_config:\n          \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n          type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3Router\n\n.. _config_overview_extension_discovery:\n\nDiscovery service\n^^^^^^^^^^^^^^^^^\n\nExtension configuration can be supplied dynamically from an :ref:`xDS\nmanagement server<xds_protocol>` using :ref:`ExtensionConfiguration discovery\nservice<envoy_v3_api_file_envoy/service/extension/v3/config_discovery.proto>`.\nThe name field in the extension configuration acts as the resource identifier.\nFor example, HTTP connection manager supports :ref:`dynamic filter\nre-configuration<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.config_discovery>`\nfor HTTP filters.\n\nExtension config discovery service has a :ref:`statistics\n<subscription_statistics>` tree rooted at\n*<stat_prefix>.extension_config_discovery.<extension_config_name>*. In addition\nto the common subscription statistics, it also provides the following:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  config_reload, Counter, Total number of successful configuration updates\n  config_fail, Counter, Total number of failed configuration updates\n"
  },
  {
    "path": "docs/root/configuration/overview/introduction.rst",
    "content": "Introduction\n============\n\nThe Envoy xDS APIs are defined as `proto3\n<https://developers.google.com/protocol-buffers/docs/proto3>`_ `Protocol Buffers\n<https://developers.google.com/protocol-buffers/>`_ in the :repo:`api tree <api/>`. They\nsupport:\n\n* Streaming delivery of :ref:`xDS <xds_protocol>` API updates via gRPC. This reduces\n  resource requirements and can lower the update latency.\n* A new REST-JSON API in which the JSON/YAML formats are derived mechanically via the `proto3\n  canonical JSON mapping\n  <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n* Delivery of updates via the filesystem, REST-JSON or gRPC endpoints.\n* Advanced load balancing through an extended endpoint assignment API and load\n  and resource utilization reporting to management servers.\n* :ref:`Stronger consistency and ordering properties\n  <xds_protocol_eventual_consistency_considerations>`\n  when needed. The APIs still maintain a baseline eventual consistency model.\n\nSee the :ref:`xDS protocol description <xds_protocol>` for\nfurther details on aspects of xDS message exchange between Envoy and the management server.\n"
  },
  {
    "path": "docs/root/configuration/overview/mgmt_server.rst",
    "content": "Management Server\n-----------------\n\n.. _config_overview_mgmt_con_issues:\n\nManagement Server Unreachability\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWhen an Envoy instance loses connectivity with the management server, Envoy will latch on to\nthe previous configuration while actively retrying in the background to reestablish the\nconnection with the management server.\n\nIt is important that Envoy detects when a connection to a management server is unhealthy so that\nit can try to establish a new connection. Configuring either\n:ref:`TCP keep-alives <envoy_v3_api_field_config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive>`\nor :ref:`HTTP/2 keepalives <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.connection_keepalive>`\nin the cluster that connects to the management server is recommended.\n\nEnvoy debug logs the fact that it is not able to establish a connection with the management server\nevery time it attempts a connection.\n\n:ref:`connected_state <management_server_stats>` statistic provides a signal for monitoring this behavior.\n\n.. _management_server_stats:\n\nStatistics\n^^^^^^^^^^\n\nManagement Server has a statistics tree rooted at *control_plane.* with the following statistics:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server\n   rate_limit_enforced, Counter, Total number of times rate limit was enforced for management server requests\n   pending_requests, Gauge, Total number of pending requests when the rate limit was enforced\n   identifier, TextReadout, The identifier of the control plane instance that sent the last discovery response\n\n.. _subscription_statistics:\n\nxDS subscription statistics\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nEnvoy discovers its various dynamic resources via discovery\nservices referred to as *xDS*. Resources are requested via :ref:`subscriptions <xds_protocol>`,\nby specifying a filesystem path to watch, initiating gRPC streams or polling a REST-JSON URL.\n\nThe following statistics are generated for all subscriptions.\n\n.. csv-table::\n :header: Name, Type, Description\n :widths: 1, 1, 2\n\n config_reload, Counter, Total API fetches that resulted in a config reload due to a different config\n init_fetch_timeout, Counter, Total :ref:`initial fetch timeouts <envoy_v3_api_field_config.core.v3.ConfigSource.initial_fetch_timeout>`\n update_attempt, Counter, Total API fetches attempted\n update_success, Counter, Total API fetches completed successfully\n update_failure, Counter, Total API fetches that failed because of network errors\n update_rejected, Counter, Total API fetches that failed because of schema/validation errors\n update_time, Gauge, Timestamp of the last successful API fetch attempt as milliseconds since the epoch. Refreshed even after a trivial configuration reload that contained no configuration changes.\n version, Gauge, Hash of the contents from the last successful API fetch\n version_text, TextReadout, The version text from the last successful API fetch\n control_plane.connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server\n"
  },
  {
    "path": "docs/root/configuration/overview/overview.rst",
    "content": ".. _config_overview:\n\nOverview\n========\n\n.. toctree::\n  :maxdepth: 2\n\n  introduction\n  versioning\n  bootstrap\n  examples\n  extension\n  xds_api\n  mgmt_server\n"
  },
  {
    "path": "docs/root/configuration/overview/versioning.rst",
    "content": "Versioning\n----------\n\nThe Envoy xDS APIs follow a well defined :repo:`versioning scheme <api/API_VERSIONING.md>`. Envoy\nsupports :ref:`multiple major versions <api_supported_versions>` at any point in time. The examples\nin this section are taken from the v2 xDS API.\n\nEnvoy has API versions for both the xDS transport, i.e. the wire protocol for moving resources\nbetween a management server and Envoy, and for resources. These are known as the transport and\nresource API version respectively.\n\nThe transport and resource version may be mixed. For example, v3 resources may be transferred over\nthe v2 transport protocol. In addition, an Envoy may consume mixed resource versions for distinct\nresource types. For example, :ref:`v3 Clusters <envoy_v3_api_msg_config.cluster.v3.Cluster>` may be\nused alongside :ref:`v2 Listeners <envoy_api_msg_Listener>`.\n\nBoth the transport and resource API versions follow the API versioning support and deprecation\n:repo:`policy <api/API_VERSIONING.md>`.\n\n.. note::\n\n    Envoy will internally operate at the latest xDS resource version and all supported versioned\n    resources will be transparently upgrading to this latest version on configuration ingestion. For\n    example, v2 and v3 resources, delivered over either a v2 or v3 transport, or any mix thereof,\n    will be internally converted into v3 resources.\n"
  },
  {
    "path": "docs/root/configuration/overview/xds_api.rst",
    "content": ".. _config_overview_management_server:\n\nxDS API endpoints\n-----------------\n\nAn xDS management server will implement the below endpoints as required for\ngRPC and/or REST serving. In both streaming gRPC and\nREST-JSON cases, a :ref:`DiscoveryRequest <envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest>` is sent and a\n:ref:`DiscoveryResponse <envoy_v3_api_msg_service.discovery.v3.DiscoveryResponse>` received following the\n:ref:`xDS protocol <xds_protocol>`.\n\nBelow we describe endpoints for the v2 and v3 transport API versions.\n\n.. _v2_grpc_streaming_endpoints:\n\ngRPC streaming endpoints\n^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. http:post:: /envoy.api.v2.ClusterDiscoveryService/StreamClusters\n.. http:post:: /envoy.service.cluster.v3.ClusterDiscoveryService/StreamClusters\n\nSee :repo:`cds.proto <api/service/cluster/v3/cds.proto>` for the service definition. This is used by Envoy\nas a client when\n\n.. code-block:: yaml\n\n    cds_config:\n      api_config_source:\n        api_type: GRPC\n        transport_api_version: <V2|V3>\n        grpc_services:\n          envoy_grpc:\n            cluster_name: some_xds_cluster\n\nis set in the :ref:`dynamic_resources\n<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.dynamic_resources>` of the :ref:`Bootstrap\n<envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>` config.\n\n.. http:post:: /envoy.api.v2.EndpointDiscoveryService/StreamEndpoints\n.. http:post:: /envoy.service.endpoint.v3.EndpointDiscoveryService/StreamEndpoints\n\nSee :repo:`eds.proto\n<api/envoy/service/endpoint/v3/eds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    eds_config:\n      api_config_source:\n        api_type: GRPC\n        transport_api_version: <V2|V3>\n        grpc_services:\n          envoy_grpc:\n            cluster_name: some_xds_cluster\n\nis set in the :ref:`eds_cluster_config\n<envoy_v3_api_field_config.cluster.v3.Cluster.eds_cluster_config>` field of the :ref:`Cluster\n<envoy_v3_api_msg_config.cluster.v3.Cluster>` config.\n\n.. http:post:: /envoy.api.v2.ListenerDiscoveryService/StreamListeners\n.. http:post:: /envoy.service.listener.v3.ListenerDiscoveryService/StreamListeners\n\nSee :repo:`lds.proto\n<api/envoy/service/listener/v3/lds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    lds_config:\n      api_config_source:\n        api_type: GRPC\n        transport_api_version: <V2|V3>\n        grpc_services:\n          envoy_grpc:\n            cluster_name: some_xds_cluster\n\nis set in the :ref:`dynamic_resources\n<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.dynamic_resources>` of the :ref:`Bootstrap\n<envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>` config.\n\n.. http:post:: /envoy.api.v2.RouteDiscoveryService/StreamRoutes\n.. http:post:: /envoy.service.route.v3.RouteDiscoveryService/StreamRoutes\n\nSee :repo:`rds.proto\n<api/envoy/service/route/v3/rds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    route_config_name: some_route_name\n    config_source:\n      api_config_source:\n        api_type: GRPC\n        transport_api_version: <V2|V3>\n        grpc_services:\n          envoy_grpc:\n            cluster_name: some_xds_cluster\n\nis set in the :ref:`rds\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.rds>` field\nof the :ref:`HttpConnectionManager\n<envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager>` config.\n\n.. http:post:: /envoy.api.v2.ScopedRoutesDiscoveryService/StreamScopedRoutes\n.. http:post:: /envoy.service.route.v3.ScopedRoutesDiscoveryService/StreamScopedRoutes\n\nSee :repo:`srds.proto\n<api/envoy/service/route/v3/srds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    name: some_scoped_route_name\n    scoped_rds:\n      config_source:\n        api_config_source:\n          api_type: GRPC\n          transport_api_version: <V2|V3>\n          grpc_services:\n            envoy_grpc:\n              cluster_name: some_xds_cluster\n\nis set in the :ref:`scoped_routes\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scoped_routes>`\nfield of the :ref:`HttpConnectionManager\n<envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager>` config.\n\n.. http:post:: /envoy.service.discovery.v2.SecretDiscoveryService/StreamSecrets\n.. http:post:: /envoy.service.secret.v3.SecretDiscoveryService/StreamSecrets\n\nSee :repo:`sds.proto\n<api/envoy/service/secret/v3/sds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    name: some_secret_name\n    config_source:\n      api_config_source:\n        api_type: GRPC\n        transport_api_version: <V2|V3>\n        grpc_services:\n          envoy_grpc:\n            cluster_name: some_xds_cluster\n\nis set inside a :ref:`SdsSecretConfig <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.SdsSecretConfig>` message. This message\nis used in various places such as the :ref:`CommonTlsContext <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.CommonTlsContext>`.\n\n.. http:post:: /envoy.service.discovery.v2.RuntimeDiscoveryService/StreamRuntime\n.. http:post:: /envoy.service.runtime.v3.RuntimeDiscoveryService/StreamRuntime\n\nSee :repo:`rtds.proto\n<api/envoy/service/runtime/v3/rtds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    name: some_runtime_layer_name\n    config_source:\n      api_config_source:\n        api_type: GRPC\n        transport_api_version: <V2|V3>\n        grpc_services:\n          envoy_grpc:\n            cluster_name: some_xds_cluster\n\nis set inside the :ref:`rtds_layer <envoy_v3_api_field_config.bootstrap.v3.RuntimeLayer.rtds_layer>`\nfield.\n\nREST endpoints\n^^^^^^^^^^^^^^\n\n.. http:post:: /v2/discovery:clusters\n.. http:post:: /v3/discovery:clusters\n\nSee :repo:`cds.proto\n<api/envoy/service/cluster/v3/cds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    cds_config:\n      api_config_source:\n        api_type: REST\n        transport_api_version: <V2|V3>\n        cluster_names: [some_xds_cluster]\n\nis set in the :ref:`dynamic_resources\n<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.dynamic_resources>` of the :ref:`Bootstrap\n<envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>` config.\n\n.. http:post:: /v2/discovery:endpoints\n.. http:post:: /v3/discovery:endpoints\n\nSee :repo:`eds.proto\n<api/envoy/service/endpoint/v3/eds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    eds_config:\n      api_config_source:\n        api_type: REST\n        transport_api_version: <V2|V3>\n        cluster_names: [some_xds_cluster]\n\nis set in the :ref:`eds_cluster_config\n<envoy_v3_api_field_config.cluster.v3.Cluster.eds_cluster_config>` field of the :ref:`Cluster\n<envoy_v3_api_msg_config.cluster.v3.Cluster>` config.\n\n.. http:post:: /v2/discovery:listeners\n.. http:post:: /v3/discovery:listeners\n\nSee :repo:`lds.proto\n<api/envoy/service/listener/v3/lds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    lds_config:\n      api_config_source:\n        api_type: REST\n        transport_api_version: <V2|V3>\n        cluster_names: [some_xds_cluster]\n\nis set in the :ref:`dynamic_resources\n<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.dynamic_resources>` of the :ref:`Bootstrap\n<envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>` config.\n\n.. http:post:: /v2/discovery:routes\n.. http:post:: /v3/discovery:routes\n\nSee :repo:`rds.proto\n<api/envoy/service/route/v3/rds.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    route_config_name: some_route_name\n    config_source:\n      api_config_source:\n        api_type: REST\n        transport_api_version: <V2|V3>\n        cluster_names: [some_xds_cluster]\n\nis set in the :ref:`rds\n<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.rds>` field of the :ref:`HttpConnectionManager\n<envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager>` config.\n\n.. note::\n\n    The management server responding to these endpoints must respond with a :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\n    along with a HTTP status of 200. Additionally, if the configuration that would be supplied has not changed (as indicated by the version\n    supplied by the Envoy client) then the management server can respond with an empty body and a HTTP status of 304.\n\n.. _config_overview_ads:\n\nAggregated Discovery Service\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWhile Envoy fundamentally employs an eventual consistency model, ADS provides an\nopportunity to sequence API update pushes and ensure affinity of a single\nmanagement server for an Envoy node for API updates. ADS allows one or more APIs\nand their resources to be delivered on a single, bidirectional gRPC stream by\nthe management server. Without this, some APIs such as RDS and EDS may require\nthe management of multiple streams and connections to distinct management\nservers.\n\nADS will allow for hitless updates of configuration by appropriate sequencing.\nFor example, suppose *foo.com* was mapped to cluster *X*. We wish to change the\nmapping in the route table to point *foo.com* at cluster *Y*. In order to do\nthis, a CDS/EDS update must first be delivered containing both clusters *X* and\n*Y*.\n\nWithout ADS, the CDS/EDS/RDS streams may point at distinct management servers,\nor when on the same management server at distinct gRPC streams/connections that\nrequire coordination. The EDS resource requests may be split across two distinct\nstreams, one for *X* and one for *Y*. ADS allows these to be coalesced to a\nsingle stream to a single management server, avoiding the need for distributed\nsynchronization to correctly sequence the update. With ADS, the management\nserver would deliver the CDS, EDS and then RDS updates on a single stream.\n\nADS is only available for gRPC streaming (not REST) and is described more fully\nin :ref:`xDS <xds_protocol_ads>`\ndocument. The gRPC endpoint is:\n\n.. http:post:: /envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources\n.. http:post:: /envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources\n\nSee :repo:`discovery.proto\n<api/envoy/service/discovery/v3/discovery.proto>`\nfor the service definition. This is used by Envoy as a client when\n\n.. code-block:: yaml\n\n    ads_config:\n      api_type: GRPC\n      transport_api_version: <V2|V3>\n      grpc_services:\n        envoy_grpc:\n          cluster_name: some_ads_cluster\n\nis set in the :ref:`dynamic_resources\n<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.dynamic_resources>` of the :ref:`Bootstrap\n<envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>` config.\n\nWhen this is set, any of the configuration sources :ref:`above <v2_grpc_streaming_endpoints>` can\nbe set to use the ADS channel. For example, a LDS config could be changed from\n\n.. code-block:: yaml\n\n    lds_config:\n      api_config_source:\n        api_type: REST\n        cluster_names: [some_xds_cluster]\n\nto\n\n.. code-block:: yaml\n\n    lds_config: {ads: {}}\n\nwith the effect that the LDS stream will be directed to *some_ads_cluster* over\nthe shared ADS channel.\n\n.. _config_overview_delta:\n\nDelta endpoints\n^^^^^^^^^^^^^^^\n\nThe REST, filesystem, and original gRPC xDS implementations all deliver \"state of the world\" updates:\nevery CDS update must contain every cluster, with the absence of a cluster from an update implying\nthat the cluster is gone. For Envoy deployments with huge amounts of resources and even a trickle of\nchurn, these state-of-the-world updates can be cumbersome.\n\nAs of 1.12.0, Envoy supports a \"delta\" variant of xDS (including ADS), where updates only contain\nresources added/changed/removed. Delta xDS is a gRPC (only) protocol. Delta uses different\nrequest/response protos than SotW (DeltaDiscovery{Request,Response}); see\n:repo:`discovery.proto <api/envoy/service/discovery/v3/discovery.proto>`. Conceptually, delta should be viewed as\na new xDS transport type: there is static, filesystem, REST, gRPC-SotW, and now gRPC-delta.\n(Envoy's implementation of the gRPC-SotW/delta client happens to share most of its code between the\ntwo, and something similar is likely possible on the server side. However, they are in fact\nincompatible protocols.\n:ref:`The specification of the delta xDS protocol's behavior is here <xds_protocol_delta>`.)\n\nTo use delta, simply set the api_type field of your\n:ref:`ApiConfigSource <envoy_v3_api_msg_config.core.v3.ApiConfigSource>` proto(s) to DELTA_GRPC.\nThat works for both xDS and ADS; for ADS, it's the api_type field of\n:ref:`DynamicResources.ads_config <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.dynamic_resources>`,\nas described in the previous section.\n"
  },
  {
    "path": "docs/root/configuration/security/secret.rst",
    "content": ".. _config_secret_discovery_service:\n\nSecret discovery service (SDS)\n==============================\n\nTLS certificates, the secrets, can be specified in the bootstrap.static_resource\n:ref:`secrets <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.StaticResources.secrets>`.\nBut they can also be fetched remotely by secret discovery service (SDS).\n\nThe most important benefit of SDS is to simplify the certificate management. Without this feature, in k8s deployment, certificates must be created as secrets and mounted into the proxy containers. If certificates are expired, the secrets need to be updated and the proxy containers need to be re-deployed. With SDS, a central SDS server will push certificates to all Envoy instances. If certificates are expired, the server just pushes new certificates to Envoy instances, Envoy will use the new ones right away without re-deployment.\n\nIf a listener server certificate needs to be fetched by SDS remotely, it will NOT be marked as active, its port will not be opened before the certificates are fetched. If Envoy fails to fetch the certificates due to connection failures, or bad response data, the listener will be marked as active, and the port will be open, but the connection to the port will be reset.\n\nUpstream clusters are handled in a similar way, if a cluster client certificate needs to be fetched by SDS remotely, it will NOT be marked as active and it will not be used before the certificates are fetched. If Envoy fails to fetch the certificates due to connection failures, or bad response data, the cluster will be marked as active, it can be used to handle the requests, but the requests routed to that cluster will be rejected.\n\nIf a static cluster is using SDS, and it needs to define a SDS cluster (unless Google gRPC is used which doesn't need a cluster), the SDS cluster has to be defined before the static clusters using it.\n\nThe connection between Envoy proxy and SDS server has to be secure. One option is to run the SDS server on the same host and use Unix Domain Socket for the connection. Otherwise the connection requires TLS with authentication between the proxy and SDS server. Credential types in use today for authentication are:\n\n* mTLS -- In this case, the client certificates for the SDS connection must be statically configured.\n* AWS IAM SigV4\n\nSDS server\n----------\n\nA SDS server needs to implement the gRPC service :repo:`SecretDiscoveryService <api/envoy/service/secret/v3/sds.proto>`.\nIt follows the same protocol as other :ref:`xDS <xds_protocol>`.\n\nSDS Configuration\n-----------------\n\n:ref:`SdsSecretConfig <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.SdsSecretConfig>` is used to specify the secret. Its field *name* is a required field. If its *sds_config* field is empty, the *name* field specifies the secret in the bootstrap static_resource :ref:`secrets <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.StaticResources.secrets>`. Otherwise, it specifies the SDS server as :ref:`ConfigSource <envoy_v3_api_msg_config.core.v3.ConfigSource>`. Only gRPC is supported for the SDS service so its *api_config_source* must specify a **grpc_service**.\n\n*SdsSecretConfig* is used in two fields in :ref:`CommonTlsContext <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.CommonTlsContext>`. The first field is *tls_certificate_sds_secret_configs* to use SDS to get :ref:`TlsCertificate <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.TlsCertificate>`. The second field is *validation_context_sds_secret_config* to use SDS to get :ref:`CertificateValidationContext <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.CertificateValidationContext>`.\n\nExample one: static_resource\n-----------------------------\n\nThis example show how to configure secrets in the static_resource:\n\n.. code-block:: yaml\n\n  static_resources:\n    secrets:\n      - name: server_cert\n        tls_certificate:\n          certificate_chain:\n            filename: certs/servercert.pem\n          private_key:\n            filename: certs/serverkey.pem\n      - name: client_cert\n        tls_certificate:\n          certificate_chain:\n            filename: certs/clientcert.pem\n          private_key:\n            filename: certs/clientkey.pem\n      - name: validation_context\n        validation_context:\n          trusted_ca:\n            filename: certs/cacert.pem\n          verify_certificate_hash:\n            E0:F3:C8:CE:5E:2E:A3:05:F0:70:1F:F5:12:E3:6E:2E:97:92:82:84:A2:28:BC:F7:73:32:D3:39:30:A1:B6:FD\n    clusters:\n      - connect_timeout: 0.25s\n        load_assignment:\n          cluster_name: local_service_tls\n          ...\n          transport_socket:\n            name: envoy.transport_sockets.tls\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n              common_tls_context:\n                tls_certificate_sds_secret_configs:\n                - name: client_cert\n    listeners:\n      ....\n      filter_chains:\n        transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n            common_tls_context:\n              tls_certificate_sds_secret_configs:\n              - name: server_cert\n              validation_context_sds_secret_config:\n                name: validation_context\n\n\nIn this example, certificates are specified in the bootstrap static_resource, they are not fetched remotely. In the config, *secrets* static resource has 3 secrets: **client_cert**, **server_cert** and **validation_context**. In the cluster config, one of hosts uses **client_cert** in its *tls_certificate_sds_secret_configs*. In the listeners section, one of them uses **server_cert** in its *tls_certificate_sds_secret_configs* and **validation_context** for its *validation_context_sds_secret_config*.\n\n.. _sds_server_example:\n\nExample two: SDS server\n------------------------\n\nThis example shows how to configure secrets fetched from remote SDS servers:\n\n.. code-block:: yaml\n\n    clusters:\n      - name: sds_server_mtls\n        http2_protocol_options:\n          connection_keepalive:\n            interval: 30s\n            timeout: 5s\n        load_assignment:\n          cluster_name: sds_server_mtls\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 8234\n        transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n            common_tls_context:\n            - tls_certificate:\n              certificate_chain:\n                filename: certs/sds_cert.pem\n              private_key:\n                filename: certs/sds_key.pem\n      - name: sds_server_uds\n        http2_protocol_options: {}\n        load_assignment:\n          cluster_name: sds_server_uds\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  pipe:\n                    path: /tmp/uds_path\n      - name: example_cluster\n        connect_timeout: 0.25s\n        load_assignment:\n          cluster_name: local_service_tls\n          ...\n          transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n              common_tls_context:\n                tls_certificate_sds_secret_configs:\n                - name: client_cert\n                  sds_config:\n                    api_config_source:\n                      api_type: GRPC\n                      grpc_services:\n                        google_grpc:\n                          target_uri: unix:/tmp/uds_path\n    listeners:\n      ....\n      filter_chains:\n      - transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n            common_tls_context:\n              tls_certificate_sds_secret_configs:\n              - name: server_cert\n                sds_config:\n                  api_config_source:\n                    api_type: GRPC\n                    grpc_services:\n                      envoy_grpc:\n                        cluster_name: sds_server_mtls\n              validation_context_sds_secret_config:\n                name: validation_context\n                sds_config:\n                  api_config_source:\n                    api_type: GRPC\n                    grpc_services:\n                      envoy_grpc:\n                        cluster_name: sds_server_uds\n\n\nFor illustration, above example uses three methods to access the SDS server. A gRPC SDS server can be reached by Unix Domain Socket path **/tmp/uds_path** and **127.0.0.1:8234** by mTLS. It provides three secrets, **client_cert**, **server_cert** and **validation_context**. In the config, cluster **example_cluster** certificate **client_cert** is configured to use Google gRPC with UDS to talk to the SDS server. The Listener needs to fetch **server_cert** and **validation_context** from the SDS server. The **server_cert** is using Envoy gRPC with cluster **sds_server_mtls** configured with client certificate to use mTLS to talk to SDS server. The **validate_context** is using Envoy gRPC with cluster **sds_server_uds** configured with UDS path to talk to the SDS server.\n\n.. _xds_certificate_rotation:\n\nExample three: certificate rotation for xDS gRPC connection\n------------------------------------------------------------\n\nManaging certificates for xDS gRPC connection between Envoy and xDS server introduces a bootstrapping problem: SDS server cannot manage certificates that are required to connect to the server.\n\nThis example shows how to set up xDS connection by sourcing SDS configuration from the filesystem.\nThe certificate and key files are watched with inotify and reloaded automatically without restart.\nIn contrast, :ref:`sds_server_example` requires a restart to reload xDS certificates and key after update.\n\n.. code-block:: yaml\n\n    clusters:\n    - name: control_plane\n      type: LOGICAL_DNS\n      connect_timeout: 1s\n      load_assignment:\n        cluster_name: control_plane\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: controlplane\n                  port_value: 8443\n      http2_protocol_options: {}\n      transport_socket:\n        name: \"envoy.transport_sockets.tls\"\n        typed_config:\n          \"@type\": \"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\"\n          common_tls_context:\n            tls_certificate_sds_secret_configs:\n              sds_config:\n                path: /etc/envoy/tls_certificate_sds_secret.yaml\n            validation_context_sds_secret_config:\n              sds_config:\n                path: /etc/envoy/validation_context_sds_secret.yaml\n\nPaths to client certificate, including client's certificate chain and private key are given in SDS config file ``/etc/envoy/tls_certificate_sds_secret.yaml``:\n\n.. code-block:: yaml\n\n    resources:\n      - \"@type\": \"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\"\n        tls_certificate:\n          certificate_chain:\n            filename: /certs/sds_cert.pem\n          private_key:\n            filename: /certs/sds_key.pem\n\nPath to CA certificate bundle for validating the xDS server certificate is given in SDS config file ``/etc/envoy/validation_context_sds_secret.yaml``:\n\n.. code-block:: yaml\n\n    resources:\n      - \"@type\": \"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\"\n        validation_context:\n          trusted_ca:\n            filename: /certs/cacert.pem\n\n\nStatistics\n----------\nSSL socket factory outputs following SDS related statistics. They are all counter type.\n\nFor downstream listeners, they are in the *listener.<LISTENER_IP>.server_ssl_socket_factory.* namespace.\n\n.. csv-table::\n     :header: Name, Description\n     :widths: 1, 2\n\n     ssl_context_update_by_sds, Total number of ssl context has been updated.\n     downstream_context_secrets_not_ready, Total number of downstream connections reset due to empty ssl certificate.\n\nFor upstream clusters, they are in the *cluster.<CLUSTER_NAME>.client_ssl_socket_factory.* namespace.\n\n.. csv-table::\n     :header: Name, Description\n     :widths: 1, 2\n\n     ssl_context_update_by_sds, Total number of ssl context has been updated.\n     upstream_context_secrets_not_ready, Total number of upstream connections reset due to empty ssl certificate.\n"
  },
  {
    "path": "docs/root/configuration/security/security.rst",
    "content": "Security\n========\n\n.. toctree::\n  :maxdepth: 2\n\n  secret\n"
  },
  {
    "path": "docs/root/configuration/upstream/cluster_manager/cds.rst",
    "content": ".. _config_cluster_manager_cds:\n\nCluster discovery service\n=========================\n\nThe cluster discovery service (CDS) is an optional API that Envoy will call to dynamically fetch\ncluster manager members. Envoy will reconcile the API response and add, modify, or remove known\nclusters depending on what is required.\n\n.. note::\n\n  Any clusters that are statically defined within the Envoy configuration cannot be modified or\n  removed via the CDS API.\n\n* :ref:`v3 CDS API <v2_grpc_streaming_endpoints>`\n\nStatistics\n----------\n\nCDS has a :ref:`statistics <subscription_statistics>` tree rooted at *cluster_manager.cds.*\n"
  },
  {
    "path": "docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst",
    "content": ".. _config_cluster_manager_cluster_circuit_breakers:\n\nCircuit breaking\n================\n\n* Circuit Breaking :ref:`architecture overview <arch_overview_circuit_break>`.\n* :ref:`v3 API documentation <envoy_v3_api_msg_config.cluster.v3.CircuitBreakers>`.\n\nThe following is an example circuit breaker configuration:\n\n.. code-block:: yaml\n\n  circuit_breakers:\n    thresholds:\n    - priority: \"DEFAULT\"\n      max_requests: 75\n      max_pending_requests: 35\n      retry_budget:\n        budget_percent:\n          value: 25.0\n        min_retry_concurrency: 10\n\nRuntime\n-------\n\nAll circuit breaking settings are runtime configurable for all defined priorities based on cluster\nname. They follow the following naming scheme ``circuit_breakers.<cluster_name>.<priority>.<setting>``.\n``cluster_name`` is the name field in each cluster's configuration, which is set in the Envoy\n:ref:`config file <envoy_v3_api_field_config.cluster.v3.Cluster.name>`. Available runtime settings will override\nsettings set in the Envoy config file.\n"
  },
  {
    "path": "docs/root/configuration/upstream/cluster_manager/cluster_hc.rst",
    "content": ".. _config_cluster_manager_cluster_hc:\n\nHealth checking\n===============\n\n* Health checking :ref:`architecture overview <arch_overview_health_checking>`.\n* If health checking is configured for a cluster, additional statistics are emitted. They are\n  documented :ref:`here <config_cluster_manager_cluster_stats>`.\n* :ref:`v3 API documentation <envoy_v3_api_msg_config.core.v3.HealthCheck>`.\n\n.. _config_cluster_manager_cluster_hc_tcp_health_checking:\n\nTCP health checking\n-------------------\n\nThe type of matching performed is the following:\n\n.. code-block:: yaml\n\n\n  tcp_health_check:\n      send: {text: '0101'}\n      receive: [{text: '02'}, {text: '03'}]\n\nDuring each health check cycle, all of the \"send\" bytes are sent to the target server.\n\nWhen checking the response, \"fuzzy\" matching is performed such that each block must be found,\nand in the order specified, but not necessarily contiguous. Thus, in the example above,\n\"04\" could be inserted in the response between \"02\" and \"03\" and the check\nwould still pass. This is done to support protocols that insert non-deterministic data, such as\ntime, into the response.\n\nHealth checks that require a more complex pattern such as send/receive/send/receive are not\ncurrently possible.\n\nIf \"receive\" is an empty array, Envoy will perform \"connect only\" TCP health checking. During each\ncycle, Envoy will attempt to connect to the upstream host, and consider it a success if the\nconnection succeeds. A new connection is created for each health check cycle.\n"
  },
  {
    "path": "docs/root/configuration/upstream/cluster_manager/cluster_manager.rst",
    "content": ".. _config_cluster_manager:\n\nCluster manager\n===============\n\n.. toctree::\n  :maxdepth: 2\n\n  overview\n  cluster_stats\n  cluster_runtime\n  cds\n  cluster_hc\n  cluster_circuit_breakers\n"
  },
  {
    "path": "docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst",
    "content": ".. _config_cluster_manager_cluster_runtime:\n\nRuntime\n=======\n\nUpstream clusters support the following runtime settings:\n\nActive health checking\n----------------------\n\nhealth_check.min_interval\n  Min value for the health checking :ref:`interval <envoy_v3_api_field_config.core.v3.HealthCheck.interval>`.\n  Default value is 1 ms. The effective health check interval will be no less than 1ms. The health\n  checking interval will be between *min_interval* and *max_interval*.\n\nhealth_check.max_interval\n  Max value for the health checking :ref:`interval <envoy_v3_api_field_config.core.v3.HealthCheck.interval>`.\n  Default value is MAX_INT. The effective health check interval will be no less than 1ms. The health\n  checking interval will be between *min_interval* and *max_interval*.\n\nhealth_check.verify_cluster\n  What % of health check requests will be verified against the :ref:`expected upstream service\n  <envoy_v3_api_field_config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher>` as the :ref:`health check filter\n  <arch_overview_health_checking_filter>` will write the remote service cluster into the response.\n\n.. _config_cluster_manager_cluster_runtime_outlier_detection:\n\nOutlier detection\n-----------------\n\nSee the outlier detection :ref:`architecture overview <arch_overview_outlier_detection>` for more\ninformation on outlier detection. The runtime parameters supported by outlier detection are the\nsame as the :ref:`static configuration parameters <envoy_v3_api_msg_config.cluster.v3.OutlierDetection>`, namely:\n\noutlier_detection.consecutive_5xx\n  :ref:`consecutive_5XX\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_5xx>`\n  setting in outlier detection\n\noutlier_detection.consecutive_gateway_failure\n  :ref:`consecutive_gateway_failure\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_gateway_failure>`\n  setting in outlier detection\n\noutlier_detection.consecutive_local_origin_failure\n  :ref:`consecutive_local_origin_failure\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_local_origin_failure>`\n  setting in outlier detection\n\noutlier_detection.interval_ms\n  :ref:`interval_ms\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.interval>`\n  setting in outlier detection\n\noutlier_detection.base_ejection_time_ms\n  :ref:`base_ejection_time_ms\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.base_ejection_time>`\n  setting in outlier detection\n\noutlier_detection.max_ejection_percent\n  :ref:`max_ejection_percent\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.max_ejection_percent>`\n  setting in outlier detection\n\noutlier_detection.enforcing_consecutive_5xx\n  :ref:`enforcing_consecutive_5xx\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_consecutive_5xx>`\n  setting in outlier detection\n\noutlier_detection.enforcing_consecutive_gateway_failure\n  :ref:`enforcing_consecutive_gateway_failure\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_consecutive_gateway_failure>`\n  setting in outlier detection\n\noutlier_detection.enforcing_consecutive_local_origin_failure\n  :ref:`enforcing_consecutive_local_origin_failure\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_consecutive_local_origin_failure>`\n  setting in outlier detection\n\noutlier_detection.enforcing_success_rate\n  :ref:`enforcing_success_rate\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_success_rate>`\n  setting in outlier detection\n\noutlier_detection.enforcing_local_origin_success_rate\n  :ref:`enforcing_local_origin_success_rate\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_local_origin_success_rate>`\n  setting in outlier detection\n\noutlier_detection.success_rate_minimum_hosts\n  :ref:`success_rate_minimum_hosts\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_minimum_hosts>`\n  setting in outlier detection\n\noutlier_detection.success_rate_request_volume\n  :ref:`success_rate_request_volume\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_request_volume>`\n  setting in outlier detection\n\noutlier_detection.success_rate_stdev_factor\n  :ref:`success_rate_stdev_factor\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_stdev_factor>`\n  setting in outlier detection\n\noutlier_detection.enforcing_failure_percentage\n  :ref:`enforcing_failure_percentage\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_failure_percentage>`\n  setting in outlier detection\n\noutlier_detection.enforcing_failure_percentage_local_origin\n  :ref:`enforcing_failure_percentage_local_origin\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_failure_percentage_local_origin>`\n  setting in outlier detection\n\noutlier_detection.failure_percentage_request_volume\n  :ref:`failure_percentage_request_volume\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.failure_percentage_request_volume>`\n  setting in outlier detection\n\noutlier_detection.failure_percentage_minimum_hosts\n  :ref:`failure_percentage_minimum_hosts\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.failure_percentage_minimum_hosts>`\n  setting in outlier detection\n\noutlier_detection.failure_percentage_threshold\n  :ref:`failure_percentage_threshold\n  <envoy_v3_api_field_config.cluster.v3.OutlierDetection.failure_percentage_threshold>`\n  setting in outlier detection\n\nCore\n----\n\nupstream.healthy_panic_threshold\n  Sets the :ref:`panic threshold <arch_overview_load_balancing_panic_threshold>` percentage.\n  Defaults to 50%.\n\nupstream.use_http2\n  Whether the cluster utilizes the *http2* :ref:`protocol options <envoy_v3_api_field_config.cluster.v3.Cluster.http2_protocol_options>`\n  if configured. Set to 0 to disable HTTP/2 even if the feature is configured. Defaults to enabled.\n\n.. _config_cluster_manager_cluster_runtime_zone_routing:\n\nZone aware load balancing\n-------------------------\n\nupstream.zone_routing.enabled\n  % of requests that will be routed to the same upstream zone. Defaults to 100% of requests.\n\nupstream.zone_routing.min_cluster_size\n  Minimal size of the upstream cluster for which zone aware routing can be attempted. Default value\n  is 6. If the upstream cluster size is smaller than *min_cluster_size* zone aware routing will not\n  be performed.\n\nCircuit breaking\n----------------\n\ncircuit_breakers.<cluster_name>.<priority>.max_connections\n  :ref:`Max connections circuit breaker setting <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.max_connections>`\n\ncircuit_breakers.<cluster_name>.<priority>.max_pending_requests\n  :ref:`Max pending requests circuit breaker setting <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.max_pending_requests>`\n\ncircuit_breakers.<cluster_name>.<priority>.max_requests\n  :ref:`Max requests circuit breaker setting <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.max_requests>`\n\ncircuit_breakers.<cluster_name>.<priority>.max_retries\n  :ref:`Max retries circuit breaker setting <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.max_retries>`\n\ncircuit_breakers.<cluster_name>.<priority>.retry_budget.budget_percent\n  :ref:`Max retries circuit breaker setting <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.budget_percent>`\n\ncircuit_breakers.<cluster_name>.<priority>.retry_budget.min_retry_concurrency\n  :ref:`Max retries circuit breaker setting <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.min_retry_concurrency>`\n"
  },
  {
    "path": "docs/root/configuration/upstream/cluster_manager/cluster_stats.rst",
    "content": ".. _config_cluster_manager_cluster_stats:\n\nStatistics\n==========\n\n.. contents::\n  :local:\n\nGeneral\n-------\n\nThe cluster manager has a statistics tree rooted at *cluster_manager.* with the following\nstatistics. Any ``:`` character in the stats name is replaced with ``_``. Stats include\nall clusters managed by the cluster manager, including both clusters used for data plane\nupstreams and control plane xDS clusters.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  cluster_added, Counter, Total clusters added (either via static config or CDS)\n  cluster_modified, Counter, Total clusters modified (via CDS)\n  cluster_removed, Counter, Total clusters removed (via CDS)\n  cluster_updated, Counter, Total cluster updates\n  cluster_updated_via_merge, Counter, Total cluster updates applied as merged updates\n  update_merge_cancelled, Counter, Total merged updates that got cancelled and delivered early\n  update_out_of_merge_window, Counter, Total updates which arrived out of a merge window\n  active_clusters, Gauge, Number of currently active (warmed) clusters\n  warming_clusters, Gauge, Number of currently warming (not active) clusters\n\nEvery cluster has a statistics tree rooted at *cluster.<name>.* with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  upstream_cx_total, Counter, Total connections\n  upstream_cx_active, Gauge, Total active connections\n  upstream_cx_http1_total, Counter, Total HTTP/1.1 connections\n  upstream_cx_http2_total, Counter, Total HTTP/2 connections\n  upstream_cx_connect_fail, Counter, Total connection failures\n  upstream_cx_connect_timeout, Counter, Total connection connect timeouts\n  upstream_cx_idle_timeout, Counter, Total connection idle timeouts\n  upstream_cx_connect_attempts_exceeded, Counter, Total consecutive connection failures exceeding configured connection attempts\n  upstream_cx_overflow, Counter, Total times that the cluster's connection circuit breaker overflowed\n  upstream_cx_connect_ms, Histogram, Connection establishment milliseconds\n  upstream_cx_length_ms, Histogram, Connection length milliseconds\n  upstream_cx_destroy, Counter, Total destroyed connections\n  upstream_cx_destroy_local, Counter, Total connections destroyed locally\n  upstream_cx_destroy_remote, Counter, Total connections destroyed remotely\n  upstream_cx_destroy_with_active_rq, Counter, Total connections destroyed with 1+ active request\n  upstream_cx_destroy_local_with_active_rq, Counter, Total connections destroyed locally with 1+ active request\n  upstream_cx_destroy_remote_with_active_rq, Counter, Total connections destroyed remotely with 1+ active request\n  upstream_cx_close_notify, Counter, Total connections closed via HTTP/1.1 connection close header or HTTP/2 GOAWAY\n  upstream_cx_rx_bytes_total, Counter, Total received connection bytes\n  upstream_cx_rx_bytes_buffered, Gauge, Received connection bytes currently buffered\n  upstream_cx_tx_bytes_total, Counter, Total sent connection bytes\n  upstream_cx_tx_bytes_buffered, Gauge, Send connection bytes currently buffered\n  upstream_cx_pool_overflow, Counter, Total times that the cluster's connection pool circuit breaker overflowed\n  upstream_cx_protocol_error, Counter, Total connection protocol errors\n  upstream_cx_max_requests, Counter, Total connections closed due to maximum requests\n  upstream_cx_none_healthy, Counter, Total times connection not established due to no healthy hosts\n  upstream_rq_total, Counter, Total requests\n  upstream_rq_active, Gauge, Total active requests\n  upstream_rq_pending_total, Counter, Total requests pending a connection pool connection\n  upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool or requests (mainly for HTTP/2) circuit breaking and were failed\n  upstream_rq_pending_failure_eject, Counter, Total requests that were failed due to a connection pool connection failure or remote connection termination \n  upstream_rq_pending_active, Gauge, Total active requests pending a connection pool connection\n  upstream_rq_cancelled, Counter, Total requests cancelled before obtaining a connection pool connection\n  upstream_rq_maintenance_mode, Counter, Total requests that resulted in an immediate 503 due to :ref:`maintenance mode<config_http_filters_router_runtime_maintenance_mode>`\n  upstream_rq_timeout, Counter, Total requests that timed out waiting for a response\n  upstream_rq_max_duration_reached, Counter, Total requests closed due to max duration reached\n  upstream_rq_per_try_timeout, Counter, Total requests that hit the per try timeout (except when request hedging is enabled)\n  upstream_rq_rx_reset, Counter, Total requests that were reset remotely\n  upstream_rq_tx_reset, Counter, Total requests that were reset locally\n  upstream_rq_retry, Counter, Total request retries\n  upstream_rq_retry_backoff_exponential, Counter, Total retries using the exponential backoff strategy\n  upstream_rq_retry_backoff_ratelimited, Counter, Total retries using the ratelimited backoff strategy\n  upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries <config_http_filters_router_x-envoy-max-retries>`\n  upstream_rq_retry_success, Counter, Total request retry successes\n  upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budget <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.retry_budget>`\n  upstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from upstream\n  upstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from upstream\n  upstream_flow_control_backed_up_total, Counter, Total number of times the upstream connection backed up and paused reads from downstream\n  upstream_flow_control_drained_total, Counter, Total number of times the upstream connection drained and resumed reads from downstream\n  upstream_internal_redirect_failed_total, Counter, Total number of times failed internal redirects resulted in redirects being passed downstream.\n  upstream_internal_redirect_succeed_total, Counter, Total number of times internal redirects resulted in a second upstream request.\n  membership_change, Counter, Total cluster membership changes\n  membership_healthy, Gauge, Current cluster healthy total (inclusive of both health checking and outlier detection)\n  membership_degraded, Gauge, Current cluster degraded total\n  membership_total, Gauge, Current cluster membership total\n  retry_or_shadow_abandoned, Counter, Total number of times shadowing or retry buffering was canceled due to buffer limits\n  config_reload, Counter, Total API fetches that resulted in a config reload due to a different config\n  update_attempt, Counter, Total attempted cluster membership updates by service discovery\n  update_success, Counter, Total successful cluster membership updates by service discovery\n  update_failure, Counter, Total failed cluster membership updates by service discovery\n  update_empty, Counter, Total cluster membership updates ending with empty cluster load assignment and continuing with previous config\n  update_no_rebuild, Counter, Total successful cluster membership updates that didn't result in any cluster load balancing structure rebuilds\n  version, Gauge, Hash of the contents from the last successful API fetch\n  max_host_weight, Gauge, Maximum weight of any host in the cluster\n  bind_errors, Counter, Total errors binding the socket to the configured source address\n  assignment_timeout_received, Counter, Total assignments received with endpoint lease information.\n  assignment_stale, Counter, Number of times the received assignments went stale before new assignments arrived.\n\nHealth check statistics\n-----------------------\n\nIf health check is configured, the cluster has an additional statistics tree rooted at\n*cluster.<name>.health_check.* with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  attempt, Counter, Number of health checks\n  success, Counter, Number of successful health checks\n  failure, Counter, Number of immediately failed health checks (e.g. HTTP 503) as well as network failures\n  passive_failure, Counter, Number of health check failures due to passive events (e.g. x-envoy-immediate-health-check-fail)\n  network_failure, Counter, Number of health check failures due to network error\n  verify_cluster, Counter, Number of health checks that attempted cluster name verification\n  healthy, Gauge, Number of healthy members\n\n.. _config_cluster_manager_cluster_stats_outlier_detection:\n\nOutlier detection statistics\n----------------------------\n\nIf :ref:`outlier detection <arch_overview_outlier_detection>` is configured for a cluster,\nstatistics will be rooted at *cluster.<name>.outlier_detection.* and contain the following:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  ejections_enforced_total, Counter, Number of enforced ejections due to any outlier type\n  ejections_active, Gauge, Number of currently ejected hosts\n  ejections_overflow, Counter, Number of ejections aborted due to the max ejection %\n  ejections_enforced_consecutive_5xx, Counter, Number of enforced consecutive 5xx ejections\n  ejections_detected_consecutive_5xx, Counter, Number of detected consecutive 5xx ejections (even if unenforced)\n  ejections_enforced_success_rate, Counter, Number of enforced success rate outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` config item. Refer to :ref:`Outlier Detection documentation<arch_overview_outlier_detection>` for details.\n  ejections_detected_success_rate, Counter, Number of detected success rate outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` config item. Refer to :ref:`Outlier Detection documentation<arch_overview_outlier_detection>` for details.\n  ejections_enforced_consecutive_gateway_failure, Counter, Number of enforced consecutive gateway failure ejections\n  ejections_detected_consecutive_gateway_failure, Counter, Number of detected consecutive gateway failure ejections (even if unenforced)\n  ejections_enforced_consecutive_local_origin_failure, Counter, Number of enforced consecutive local origin failure ejections\n  ejections_detected_consecutive_local_origin_failure, Counter, Number of detected consecutive local origin failure ejections (even if unenforced)\n  ejections_enforced_local_origin_success_rate, Counter, Number of enforced success rate outlier ejections for locally originated failures\n  ejections_detected_local_origin_success_rate, Counter, Number of detected success rate outlier ejections for locally originated failures (even if unenforced)\n  ejections_enforced_failure_percentage, Counter, Number of enforced failure percentage outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` config item. Refer to :ref:`Outlier Detection documentation<arch_overview_outlier_detection>` for details.\n  ejections_detected_failure_percentage, Counter, Number of detected failure percentage outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` config item. Refer to :ref:`Outlier Detection documentation<arch_overview_outlier_detection>` for details.\n  ejections_enforced_failure_percentage_local_origin, Counter, Number of enforced failure percentage outlier ejections for locally originated failures\n  ejections_detected_failure_percentage_local_origin, Counter, Number of detected failure percentage outlier ejections for locally originated failures (even if unenforced)\n  ejections_total, Counter, Deprecated. Number of ejections due to any outlier type (even if unenforced)\n  ejections_consecutive_5xx, Counter, Deprecated. Number of consecutive 5xx ejections (even if unenforced)\n\n.. _config_cluster_manager_cluster_stats_circuit_breakers:\n\nCircuit breakers statistics\n---------------------------\n\nCircuit breakers statistics will be rooted at *cluster.<name>.circuit_breakers.<priority>.* and contain the following:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  cx_open, Gauge, Whether the connection circuit breaker is closed (0) or open (1)\n  cx_pool_open, Gauge, Whether the connection pool circuit breaker is closed (0) or open (1)\n  rq_pending_open, Gauge, Whether the pending requests circuit breaker is closed (0) or open (1)\n  rq_open, Gauge, Whether the requests circuit breaker is closed (0) or open (1)\n  rq_retry_open, Gauge, Whether the retry circuit breaker is closed (0) or open (1)\n  remaining_cx, Gauge, Number of remaining connections until the circuit breaker opens\n  remaining_pending, Gauge, Number of remaining pending requests until the circuit breaker opens\n  remaining_rq, Gauge, Number of remaining requests until the circuit breaker opens\n  remaining_retries, Gauge, Number of remaining retries until the circuit breaker opens\n\n.. _config_cluster_manager_cluster_stats_timeout_budgets:\n\nTimeout budget statistics\n-------------------------\n\nIf :ref:`timeout budget statistic tracking <envoy_v3_api_field_config.cluster.v3.Cluster.track_timeout_budgets>` is\nturned on, statistics will be added to *cluster.<name>* and contain the following:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   upstream_rq_timeout_budget_percent_used, Histogram, What percentage of the global timeout was used waiting for a response\n   upstream_rq_timeout_budget_per_try_percent_used, Histogram, What percentage of the per try timeout was used waiting for a response\n\n.. _config_cluster_manager_cluster_stats_dynamic_http:\n\nDynamic HTTP statistics\n-----------------------\n\nIf HTTP is used, dynamic HTTP response code statistics are also available. These are emitted by\nvarious internal systems as well as some filters such as the :ref:`router filter\n<config_http_filters_router>` and :ref:`rate limit filter <config_http_filters_rate_limit>`. They\nare rooted at *cluster.<name>.* and contain the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  upstream_rq_completed, Counter, \"Total upstream requests completed\"\n  upstream_rq_<\\*xx>, Counter, \"Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)\"\n  upstream_rq_<\\*>, Counter, \"Specific HTTP response codes (e.g., 201, 302, etc.)\"\n  upstream_rq_time, Histogram, Request time milliseconds\n  canary.upstream_rq_completed, Counter, \"Total upstream canary requests completed\"\n  canary.upstream_rq_<\\*xx>, Counter, Upstream canary aggregate HTTP response codes\n  canary.upstream_rq_<\\*>, Counter, Upstream canary specific HTTP response codes\n  canary.upstream_rq_time, Histogram, Upstream canary request time milliseconds\n  internal.upstream_rq_completed, Counter, \"Total internal origin requests completed\"\n  internal.upstream_rq_<\\*xx>, Counter, Internal origin aggregate HTTP response codes\n  internal.upstream_rq_<\\*>, Counter, Internal origin specific HTTP response codes\n  internal.upstream_rq_time, Histogram, Internal origin request time milliseconds\n  external.upstream_rq_completed, Counter, \"Total external origin requests completed\"\n  external.upstream_rq_<\\*xx>, Counter, External origin aggregate HTTP response codes\n  external.upstream_rq_<\\*>, Counter, External origin specific HTTP response codes\n  external.upstream_rq_time, Histogram, External origin request time milliseconds\n\n.. _config_cluster_manager_cluster_stats_alt_tree:\n\nAlternate tree dynamic HTTP statistics\n--------------------------------------\n\nIf alternate tree statistics are configured, they will be present in the\n*cluster.<name>.<alt name>.* namespace. The statistics produced are the same as documented in\nthe dynamic HTTP statistics section :ref:`above\n<config_cluster_manager_cluster_stats_dynamic_http>`.\n\n.. _config_cluster_manager_cluster_per_az_stats:\n\nPer service zone dynamic HTTP statistics\n----------------------------------------\n\nIf the service zone is available for the local service (via :option:`--service-zone`)\nand the :ref:`upstream cluster <arch_overview_service_discovery_types_eds>`,\nEnvoy will track the following statistics in *cluster.<name>.zone.<from_zone>.<to_zone>.* namespace.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  upstream_rq_<\\*xx>, Counter, \"Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)\"\n  upstream_rq_<\\*>, Counter, \"Specific HTTP response codes (e.g., 201, 302, etc.)\"\n  upstream_rq_time, Histogram, Request time milliseconds\n\nLoad balancer statistics\n------------------------\n\nStatistics for monitoring load balancer decisions. Stats are rooted at *cluster.<name>.* and contain\nthe following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  lb_recalculate_zone_structures, Counter, The number of times locality aware routing structures are regenerated for fast decisions on upstream locality selection\n  lb_healthy_panic, Counter, Total requests load balanced with the load balancer in panic mode\n  lb_zone_cluster_too_small, Counter, No zone aware routing because of small upstream cluster size\n  lb_zone_routing_all_directly, Counter, Sending all requests directly to the same zone\n  lb_zone_routing_sampled, Counter, Sending some requests to the same zone\n  lb_zone_routing_cross_zone, Counter, Zone aware routing mode but have to send cross zone\n  lb_local_cluster_not_ok, Counter, Local host set is not set or it is panic mode for local cluster\n  lb_zone_number_differs, Counter, Number of zones in local and upstream cluster different\n  lb_zone_no_capacity_left, Counter, Total number of times ended with random zone selection due to rounding error\n  original_dst_host_invalid, Counter, Total number of invalid hosts passed to original destination load balancer\n\n.. _config_cluster_manager_cluster_stats_subset_lb:\n\nLoad balancer subset statistics\n-------------------------------\n\nStatistics for monitoring :ref:`load balancer subset <arch_overview_load_balancer_subsets>`\ndecisions. Stats are rooted at *cluster.<name>.* and contain the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  lb_subsets_active, Gauge, Number of currently available subsets\n  lb_subsets_created, Counter, Number of subsets created\n  lb_subsets_removed, Counter, Number of subsets removed due to no hosts\n  lb_subsets_selected, Counter, Number of times any subset was selected for load balancing\n  lb_subsets_fallback, Counter, Number of times the fallback policy was invoked\n  lb_subsets_fallback_panic, Counter, Number of times the subset panic mode triggered\n  lb_subsets_single_host_per_subset_duplicate, Gauge, Number of duplicate (unused) hosts when using :ref:`single_host_per_subset <envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.single_host_per_subset>`\n\n.. _config_cluster_manager_cluster_stats_ring_hash_lb:\n\nRing hash load balancer statistics\n----------------------------------\n\nStatistics for monitoring the size and effective distribution of hashes when using the\n:ref:`ring hash load balancer <arch_overview_load_balancing_types_ring_hash>`. Stats are rooted at\n*cluster.<name>.ring_hash_lb.* and contain the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  size, Gauge, Total number of host hashes on the ring\n  min_hashes_per_host, Gauge, Minimum number of hashes for a single host\n  max_hashes_per_host, Gauge, Maximum number of hashes for a single host\n\n.. _config_cluster_manager_cluster_stats_maglev_lb:\n\nMaglev load balancer statistics\n-------------------------------\n\nStatistics for monitoring effective host weights when using the\n:ref:`Maglev load balancer <arch_overview_load_balancing_types_maglev>`. Stats are rooted at\n*cluster.<name>.maglev_lb.* and contain the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  min_entries_per_host, Gauge, Minimum number of entries for a single host\n  max_entries_per_host, Gauge, Maximum number of entries for a single host\n\n.. _config_cluster_manager_cluster_stats_request_response_sizes:\n\nRequest Response Size statistics\n--------------------------------\n\nIf :ref:`request response size statistics <envoy_v3_api_field_config.cluster.v3.Cluster.track_cluster_stats>` are tracked,\nstatistics will be added to *cluster.<name>* and contain the following:\n\n.. csv-table::\n   :header: Name, Type, Description\n   :widths: 1, 1, 2\n\n   upstream_rq_headers_size, Histogram, Request headers size in bytes per upstream\n   upstream_rq_body_size, Histogram, Request body size in bytes per upstream\n   upstream_rs_headers_size, Histogram, Response headers size in bytes per upstream\n   upstream_rs_body_size, Histogram, Response body size in bytes per upstream\n"
  },
  {
    "path": "docs/root/configuration/upstream/cluster_manager/overview.rst",
    "content": "Overview\n========\n\n* Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`\n* :ref:`v3 API reference <envoy_v3_api_msg_config.bootstrap.v3.ClusterManager>`\n"
  },
  {
    "path": "docs/root/configuration/upstream/health_checkers/health_checkers.rst",
    "content": ".. _config_health_checkers:\n\nHealth checkers\n===============\n\n.. toctree::\n  :maxdepth: 2\n\n  redis"
  },
  {
    "path": "docs/root/configuration/upstream/health_checkers/redis.rst",
    "content": ".. _config_health_checkers_redis:\n\nRedis\n=====\n\nThe Redis health checker is a custom health checker (with :code:`envoy.health_checkers.redis` as name)\nwhich checks Redis upstream hosts. It sends a Redis PING command and expect a PONG response. The upstream\nRedis server can respond with anything other than PONG to cause an immediate active health check failure.\nOptionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a\npassing health check. This allows the user to mark a Redis instance for maintenance by setting the\nspecified :ref:`key <envoy_v3_api_field_config.health_checker.redis.v2.Redis.key>` to any value and waiting\nfor traffic to drain.\n\nAn example setting for :ref:`custom_health_check <envoy_v3_api_msg_config.core.v3.HealthCheck.CustomHealthCheck>` as a\nRedis health checker is shown below:\n\n.. code-block:: yaml\n\n  custom_health_check:\n    name: envoy.health_checkers.redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\n        key: foo\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.core.v3.HealthCheck.CustomHealthCheck>`\n"
  },
  {
    "path": "docs/root/configuration/upstream/upstream.rst",
    "content": "Upstream clusters\n=================\n\n.. toctree::\n  :maxdepth: 2\n\n  cluster_manager/cluster_manager\n  health_checkers/health_checkers\n"
  },
  {
    "path": "docs/root/extending/extending.rst",
    "content": ".. _extending:\n\nExtending Envoy for custom use cases\n====================================\n\nThe Envoy architecture makes it fairly easily extensible via a variety of different extension\ntypes including:\n\n* :ref:`Access loggers <arch_overview_access_logs>`\n* :ref:`Access log filters <arch_overview_access_log_filters>`\n* :ref:`Clusters <arch_overview_service_discovery>`\n* :ref:`Listener filters <arch_overview_listener_filters>`\n* :ref:`Network filters <arch_overview_network_filters>`\n* :ref:`HTTP filters <arch_overview_http_filters>`\n* :ref:`gRPC credential providers <arch_overview_grpc>`\n* :ref:`Health checkers <arch_overview_health_checking>`\n* :ref:`Resource monitors <arch_overview_overload_manager>`\n* :ref:`Retry implementations <arch_overview_http_routing_retry>`\n* :ref:`Stat sinks <arch_overview_statistics>`\n* :ref:`Tracers <arch_overview_tracing>`\n* :ref:`Request ID <arch_overview_tracing>`\n* :ref:`Transport sockets <envoy_v3_api_field_extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider.typed_config>`\n* BoringSSL private key methods\n* :ref:`Watchdog action <envoy_v3_api_msg_config.bootstrap.v3.Watchdog.WatchdogAction>`\n* :ref:`Internal redirect policy <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.predicates>`\n* :ref:`Compression libraries <arch_overview_compression_libraries>`\n\nAs of this writing there is no high level extension developer documentation. The\n:repo:`existing extensions <source/extensions>` are a good way to learn what is possible.\n\nAn example of how to add a network filter and structure the repository and build dependencies can\nbe found at `envoy-filter-example <https://github.com/envoyproxy/envoy-filter-example>`_.\n"
  },
  {
    "path": "docs/root/faq/api/control_plane.rst",
    "content": ".. _control_plane:\n\nHow do I support multiple xDS API major versions in my control plane?\n=====================================================================\n\nWhere possible, it is highly recommended that control planes support a single major version at a\ngiven point in time for simplicity. This works in situations where control planes need to only\nsupport a window of Envoy versions which spans less than a year. Temporary support for multiple\nversions during rollout in this scenario is described :ref:`here <control_plane_version_support>`.\n\nFor control planes that need to support a wider range of versions, there are a few approaches:\n\n1. Independent vN/v(N+1) configuration generation pipelines. This is simple to understand but\n   involves significant duplication of code and can be expensive engineering wise. This may work\n   well if the API surface in use is small.\n2. Have the control plane use vN canonically and mechanically transform vN messages to their v(N+1)\n   equivalents. This does not allow for the use of any new v(N+1) features. It is necessary to avoid\n   the use of any deprecated vN fields. With these caveats aside, a simple transformation is\n   possible where the vN proto message is serialized and then deserialized as a v(N+1) proto message\n   (this binary compatibility is guaranteed). An optimization when serving *google.protobuf.Any*\n   resources in a *DiscoveryResponse* is to simply rewrite the type URL.\n3. Have the control plane use v(N+1) canonically and mechanically transform v(N+1) messages to their\n   vN equivalents when serving vN-only Envoys. This works provided it is safe to ignore v(N+1)-only\n   fields from the perspective of the operator's intent when providing input to the config pipeline\n   (e.g. if a new regex type is requested and silently ignored in a *RouteMatch* for vN Envoys, this\n   is problematic). Similar to (2), the v(N+1) message may be transformed to a vN equivalent by a\n   serialization round-trip. If the goal is to support the widest range of vN clients, it's\n   necessary to transform, using hand-written code, fields that are present in both vN/v(N+1) to\n   their vN deprecated counterparts, since some earlier vN Envoy clients will not have the newer\n   fields common to vN and v(N+1).\n"
  },
  {
    "path": "docs/root/faq/api/control_plane_version_support.rst",
    "content": ".. _control_plane_version_support:\n\nWhich xDS transport and resource versions does my control plane need to support?\n================================================================================\n\nIf a control plane is serving a well known set of clients at a given API major version, it only\nneeds to support that version (both transport and resource version). However, even in this\nrelatively basic scenario, if the set of clients straddles a major version drop or the control plane\nwishes to move from v2/v3, there are considerations around rollout of client and server binaries.\n\nOne approach to this problem is to add temporary support to the management server for both v2 and v3\ntransport versions (see https://github.com/envoyproxy/go-control-plane). For resources, messages\nare binary compatible modulo deprecated or new fields between API major versions. If the control\nplane no longer emits resources with deprecated fields, this allows for a trivial replacement of\ntype URL based on the requested resource from the client to serve the same resource for v2 and v3. A\ntypical rollout sequence might look like:\n\n1. Clients with a mix of v2 and v3 support are in operation, with a v2 management server. The\n   client bootstraps will reference v2 API transport endpoints.\n\n2. A management server with dual v2/v3 API support is rolled out. Both v2 and v3 transport endpoints\n   are supported, while a trivial type URL replacement in the returned resource is sufficient for\n   matching the requested v2 or v3 resource type URL with the existing v2 resource in the control\n   plane. When returning resources with embedded `ConfigSource` messages pointing at xDS resources\n   for a v3 request, it will be necessary to set the `transport_api_version` and\n   `resource_api_version` to v3. No deprecated v2 fields or new v3 fields can be used at this point.\n\n3. Client bootstraps are upgraded to v3 API transport endpoints and v3 API resource versions.\n\n4. Support for v2 is removed in the management server. The management server moves to v3 exclusively\n   internally and can support newer fields.\n\nAnother approach for type url version migration will be to enable the support of mixed type url \nprotected by a runtime guard *envoy.reloadable_features.enable_type_url_downgrade_and_upgrade*.\nClient can send discovery request with v2 resource type url and process discovery response with \nv3 resource type url. Client can also send discovery request with v3 resource type url and process \ndiscovery response with v2 resource type url. The upgrade and downgrade of type url is performed automatically.\nIf your management server does not support both v2/v3 at the same time, you can have clients \nwith type url upgrade and downgrade feature enabled. These clients can talk to a mix of management servers\nthat support either v2 or v3 exclusively. Just like the first approach, no deprecated v2 fields or new v3 fields \ncan be used at this point.\n\nIf you are operating a managed control plane as-a-service, you will likely need to support a wide\nrange of client versions. In this scenario, you will require long term support for multiple major\nAPI transport and resource versions. Strategies for managing this support are described :ref:`here\n<control_plane>`.\n"
  },
  {
    "path": "docs/root/faq/api/envoy_upgrade_v3.rst",
    "content": "If I upgrade to Envoy 1.13+, do I need to use the v3 API?\n=========================================================\n\nThe v2 API is deprecated in the 1.13.0 release (January 2020). It will be fully supported for the\nduration of 2020 and then all support for v2 will be removed from Envoy at EOY 2020.\n\nAll existing v2 boostrap and xDS configuration should continue to work seamlessly in 1.13.0 and for the\nduration of 2020. Envoy internally operates at v3+, but does so by transparently upgrading\nconfiguration from v2 at ingestion time.\n\nSince EOQ1 2020, we have frozen the v2 API and no new features will be added. To consume these\nnewer features, you will need to migrate to the v3 API.\n\nIt is highly recommend that operators with self-managed and/or self-developed control planes migrate\nto v3 well before Q4 2020 in order to avoid hitting the hard deadline for v3 support at EOY.\n\n"
  },
  {
    "path": "docs/root/faq/api/envoy_v2_support.rst",
    "content": "How long will the v2 APIs be supported?\n=======================================\n\nThe v2 xDS APIs are deprecated and will be removed form Envoy at the end of 2020, as per the\n:repo:`API versioning policy </api/API_VERSIONING.md>`.\n\n"
  },
  {
    "path": "docs/root/faq/api/envoy_v3.rst",
    "content": "How do I configure Envoy to use the v3 API?\n===========================================\n\nBy default, Envoy will attempt to parse any YAML, JSON or text proto as v2, and if it fails to do\nso, will consider it as v3. So, if you have a simple static Envoy consuming a text-based bootstrap,\nyou just need to start using the new configuration. For binary proto bootstrap configuration, please\nuse a :ref:`v3 Bootstrap <envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>` proto.\n\nFor dynamic configuration, we have introduced two new fields to :ref:`config sources\n<envoy_v3_api_msg_config.core.v3.ConfigSource>`, transport API version and resource API version. The\ndistinction is as follows:\n\n* The :ref:`transport API version\n  <envoy_v3_api_field_config.core.v3.ApiConfigSource.transport_api_version>` indicates the API\n  endpoint and version of *DiscoveryRequest*/*DiscoveryResponse* messages used.\n\n* The :ref:`resource API version\n  <envoy_v3_api_field_config.core.v3.ConfigSource.resource_api_version>` indicates whether a v2 or\n  v3 resource, e.g. v2 *RouteConfiguration* or v3 *RouteConfiguration*, is delivered.\n\nIt is possible to use a mixture of transport API and resource API versions, e.g. to deliver v2\n*Listener* resources and v3 *RouteConfiguration* resources over a v2 ADS transport. This is an\nintentional feature designed to provide for gradual migration of Envoy deployments from v2 to v3.\n\nThere may be some operational advantage in having vM resources delivered over vN endpoints, so we\nprovide the flexibility to make this call by appropriate configuration of :ref:`config sources\n<envoy_v3_api_msg_config.core.v3.ConfigSource>`.\n"
  },
  {
    "path": "docs/root/faq/api/extensions.rst",
    "content": "How does API versioning interact with a new extension?\n======================================================\n\nFor extension configuration API, please follow the :repo:`new extension configuration steps\n<api/STYLE.md#adding-an-extension-configuration-to-the-api>` in the style guide.\n\nExtension implementations should operate with v3 messages internally, for both their own\nconfiguration and other Envoy configuration messages. Unit tests should be written against v3\nconfiguration.\n"
  },
  {
    "path": "docs/root/faq/api/incremental.rst",
    "content": "What is the status of incremental xDS support?\n==============================================\n\nThe :ref:`incremental xDS <xds_protocol_delta>` protocol is designed to improve efficiency,\nscalability and functional use of xDS updates via two mechanisms:\n\n* Delta xDS. Resource deltas are delivered rather than state-of-the-world.\n* On-demand xDS. Resource can be lazy loaded depending on request contents.\n\nCurrently, all xDS protocols (including ADS) support delta xDS. On-demand xDS is supported for\n:ref:`VHDS <config_http_conn_man_vhds>` only.\n"
  },
  {
    "path": "docs/root/faq/api/package_naming.rst",
    "content": "What do the v2, v3, vN etc. mean in API package names?\n======================================================\n\nSee the :repo:`API versioning guidelines <api/API_VERSIONING.md>`.\n"
  },
  {
    "path": "docs/root/faq/api/why_versioning.rst",
    "content": "Why are the Envoy xDS APIs versioned? What is the benefit?\n==========================================================\n\nEnvoy is a platform and needs to allow its APIs to grow and evolve to encompass new features,\nimprove ergonomics and address new use cases. At the same time, we need a disciplined approach to\nturning down stale functionality and removing APIs and their supporting code that are no longer\nmaintained. If we don't do this, we lose the ability in the long term to provide a reliable,\nmaintainable, scalable code base and set of APIs for our users.\n\nWe had previously put in place policies around :repo:`breaking changes\n<CONTRIBUTING.md#breaking-change-policy>` across releases, the :repo:`API versioning policy\n<api/API_VERSIONING.md>` takes this a step further, articulating a guaranteed multi-year support\nwindow for APIs that provides control plane authors a predictable clock when considering support\nfor a range of Envoy versions.\n\nFor the v3 xDS APIs, a brief list of the key improvements that were made with a clean break from v2:\n\n* Packages organization was improved to reflect a more logical grouping of related APIs:\n\n  - The legacy `envoy.api.v2` tree was eliminated, with protos moved to their logical groupings,\n    e.g. `envoy.config.core.v3`, `envoy.server.listener.v3`.\n  - All packages are now versioned with a `vN` at the end. This allows for type-level identification\n    of major version.\n  - xDS service endpoints/transport and configuration are split between `envoy.service` and\n    `envoy.config`.\n  - Extensions now reflect the Envoy source tree layout under `envoy.extensions`.\n* `std::regex` regular expressions were dropped from the API, in favor of RE2. The former have dangerous\n  security implications.\n* `google.protobuf.Struct` configuration of extensions was dropped from the API, in favor of\n  typed configuration. This provides for better support for multiple instances of extensions, e.g.\n  in filter chains, and more flexible naming of extension instances.\n* Over 60 deprecated fields were removed from the API.\n* Tooling and processes were established for API versioning support. This has now been reflected in\n  the bootstrap `Node`, providing a long term notion of API support that control planes can depend\n  upon for client negotiation.\n"
  },
  {
    "path": "docs/root/faq/build/binaries.rst",
    "content": "Where do I get binaries?\n========================\n\nPlease see :ref:`here <install_binaries>`.\n"
  },
  {
    "path": "docs/root/faq/build/boringssl.rst",
    "content": "Why does Envoy use BoringSSL?\n=============================\n\n`BoringSSL <https://boringssl.googlesource.com/boringssl/>`_ is a slimmed down TLS implementation\nmaintained by Google. Getting TLS right is very, very hard. Envoy has chosen to align with\nBoringSSL so as to obtain access to the world class experts that Google employs to work on this\ncode base. In short: if BoringSSL is good enough for Google's production systems it is good enough\nfor Envoy and the project will not offer first class support for any other TLS implementation.\n"
  },
  {
    "path": "docs/root/faq/configuration/deprecation.rst",
    "content": ".. _faq_deprecation:\n\nHow are configuration deprecations handled?\n===========================================\n\nAs documented in the \"Breaking change policy\" in :repo:`CONTRIBUTING.md`, features can be marked\nas deprecated at any point as long as there is a replacement available. Each deprecation is\nannotated in the API proto itself and explained in detail in the\n:ref:`Envoy documentation <deprecated>`.\n\nFor the first 3 months following deprecation, use of deprecated fields will result in a logged\nwarning and incrementing the :ref:`deprecated_feature_use <runtime_stats>` counter.\nAfter that point, the field will be annotated as fatal-by-default and further use of the field\nwill be treated as invalid configuration unless\n:ref:`runtime overrides <config_runtime_deprecation>` are employed to re-enable use.\n"
  },
  {
    "path": "docs/root/faq/configuration/edge.rst",
    "content": ".. _faq_edge:\n\nHow do I configure Envoy as an edge proxy?\n==========================================\n\nRefer to :ref:`configuring Envoy as an edge proxy <best_practices_edge>`\nfor an example of the edge proxy configuration.\n"
  },
  {
    "path": "docs/root/faq/configuration/flow_control.rst",
    "content": ".. _faq_flow_control:\n\nHow do I configure flow control?\n================================\n\nFlow control may cause problems where Envoy is using non-streaming L7 filters, and request or\nresponse bodies exceed the L7 buffer limits. For requests where the body must be buffered and\nexceeds the configured limits, Envoy will serve a 413 to the user and increment the\n:ref:`downstream_rq_too_large <config_http_conn_man_stats>` metric. On the response path if the\nresponse body must be buffered and exceeds the limit, Envoy will increment the\n:ref:`rs_too_large <config_http_conn_man_stats>` metric and either disconnect mid-response\n(if headers have already been sent downstream) or send a 500 response.\n\nThere are three knobs for configuring Envoy flow control:\n:ref:`listener limits <envoy_v3_api_field_config.listener.v3.Listener.per_connection_buffer_limit_bytes>`,\n:ref:`cluster limits <envoy_v3_api_field_config.cluster.v3.Cluster.per_connection_buffer_limit_bytes>` and\n:ref:`http2 stream limits <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.initial_connection_window_size>`\n\nThe listener limits apply to how much raw data will be read per read() call from\ndownstream, as well as how much data may be buffered in userspace between Envoy\nand downstream.\n\nThe listener limits are also propogated to the HttpConnectionManager, and applied on a per-stream\nbasis to HTTP/1.1 L7 buffers described below. As such they limit the size of HTTP/1 requests and\nresponse bodies that can be buffered. For HTTP/2, as many streams can be multiplexed over one TCP\nconnection, the L7 and L4 buffer limits can be tuned separately, and the configuration option\n:ref:`http2 stream limits <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.initial_connection_window_size>`\nis applied to all of the L7 buffers. Note that for both HTTP/1 and\nHTTP/2 Envoy can and will proxy arbitrarily large bodies on routes where all L7 filters are\nstreaming, but many filters such as the transcoder or buffer filters require the full HTTP body to\nbe buffered, so limit the request and response size based on the listener limit.\n\nThe cluster limits affect how much raw data will be read per read() call from upstream, as\nwell as how much data may be buffered in userspace between Envoy and upstream.\n\nThe following code block shows how to adjust all three fields mentioned above, though generally\nthe only one which needs to be amended is the listener\n:ref:`per_connection_buffer_limit_bytes <envoy_v3_api_field_config.listener.v3.Listener.per_connection_buffer_limit_bytes>`\n\n.. code-block:: yaml\n\n  static_resources:\n    listeners:\n      name: http\n      address:\n        socket_address:\n          address: '::1'\n          portValue: 0\n      filter_chains:\n        filters:\n          name: envoy.filters.network.http_connection_manager\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n            http2_protocol_options:\n              initial_stream_window_size: 65535\n            route_config: {}\n            codec_type: HTTP2\n            http_filters: []\n            stat_prefix: config_test\n      per_connection_buffer_limit_bytes: 1024\n    clusters:\n      name: cluster_0\n      connect_timeout: 5s\n      per_connection_buffer_limit_bytes: 1024\n      load_assignment:\n        cluster_name: some_service\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: ::1\n                    port_value: 46685\n"
  },
  {
    "path": "docs/root/faq/configuration/level_two.rst",
    "content": ".. _faq_level2:\n\nHow do I configure Envoy as a level two proxy?\n==============================================\n\nRefer to :ref:`configuring Envoy as a level two proxy <best_practices_level2>`\nfor an example of the level 2 proxy configuration.\n"
  },
  {
    "path": "docs/root/faq/configuration/resource_limits.rst",
    "content": ".. _faq_resource_limits:\n\nHow does Envoy prevent file descriptor exhaustion?\n==================================================\n\n:ref:`Per-listener connection limits <config_listeners_runtime>` may be configured as an upper bound\non the number of active connections a particular listener will accept. The listener may accept more\nconnections than the configured value on the order of the number of worker threads.\n\nIn addition, one may configure a :ref:`global limit <config_overload_manager>` on the number of\nconnections that will apply across all listeners.\n\nOn Unix-based systems, it is recommended to keep the sum of all connection limits less than half of\nthe system's file descriptor limit to account for upstream connections, files, and other usage of\nfile descriptors.\n\n.. note::\n\n    This per-listener connection limiting will eventually be handled by the :ref:`overload manager\n    <arch_overview_overload_manager>`.\n"
  },
  {
    "path": "docs/root/faq/configuration/sni.rst",
    "content": ".. _faq_how_to_setup_sni:\n\nHow do I configure SNI for listeners?\n=====================================\n\n`SNI <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ is only supported in the :ref:`v3\nconfiguration/API <config_overview>`.\n\n.. attention::\n\n  :ref:`TLS Inspector <config_listener_filters_tls_inspector>` listener filter must be configured\n  in order to detect requested SNI.\n\nThe following is a YAML example of the above requirement.\n\n.. code-block:: yaml\n\n  address:\n    socket_address: { address: 127.0.0.1, port_value: 1234 }\n  listener_filters:\n  - name: \"envoy.filters.listener.tls_inspector\"\n    typed_config: {}\n  filter_chains:\n  - filter_chain_match:\n      server_names: [\"example.com\", \"www.example.com\"]\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n        common_tls_context:\n          tls_certificates:\n          - certificate_chain: { filename: \"example_com_cert.pem\" }\n            private_key: { filename: \"example_com_key.pem\" }\n    filters:\n    - name: envoy.filters.network.http_connection_manager\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n        stat_prefix: ingress_http\n        route_config:\n          virtual_hosts:\n          - name: default\n            domains: \"*\"\n            routes:\n            - match: { prefix: \"/\" }\n              route: { cluster: service_foo }\n  - filter_chain_match:\n      server_names: \"api.example.com\"\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n        common_tls_context:\n          tls_certificates:\n          - certificate_chain: { filename: \"api_example_com_cert.pem\" }\n            private_key: { filename: \"api_example_com_key.pem\" }\n    filters:\n    - name: envoy.filters.network.http_connection_manager\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n        stat_prefix: ingress_http\n        route_config:\n          virtual_hosts:\n          - name: default\n            domains: \"*\"\n            routes:\n            - match: { prefix: \"/\" }\n              route: { cluster: service_foo }\n\n\nHow do I configure SNI for clusters?\n====================================\n\nFor clusters, a fixed SNI can be set in :ref:`UpstreamTlsContext <envoy_v3_api_field_extensions.transport_sockets.tls.v3.UpstreamTlsContext.sni>`.\nTo derive SNI from HTTP `host` or `:authority` header, turn on\n:ref:`auto_sni <envoy_v3_api_field_config.core.v3.UpstreamHttpProtocolOptions.auto_sni>` to override the fixed SNI in\n`UpstreamTlsContext`. If upstream will present certificates with the hostname in SAN, turn on\n:ref:`auto_san_validation <envoy_v3_api_field_config.core.v3.UpstreamHttpProtocolOptions.auto_san_validation>` too.\nIt still needs a trust CA in validation context in `UpstreamTlsContext` for trust anchor.\n"
  },
  {
    "path": "docs/root/faq/configuration/timeouts.rst",
    "content": ".. _faq_configuration_timeouts:\n\nHow do I configure timeouts?\n============================\n\nEnvoy supports a wide range of timeouts that may need to be configured depending on the deployment.\nThis page summarizes the most important timeouts used in various scenarios.\n\n.. attention::\n\n  This is not an exhaustive list of all of the configurable timeouts that Envoy supports. Depending\n  on the deployment additional configuration may be required.\n\nHTTP/gRPC\n---------\n\nConnection timeouts\n^^^^^^^^^^^^^^^^^^^\n\nConnection timeouts apply to the entire HTTP connection and all streams the connection carries.\n\n* The HTTP protocol :ref:`idle timeout <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.idle_timeout>`\n  is defined in a generic message used by both the HTTP connection manager as well as upstream\n  cluster HTTP connections. The idle timeout is the time at which a downstream or upstream\n  connection will be terminated if there are no active streams. The default idle timeout if not\n  otherwise specified is *1 hour*. To modify the idle timeout for downstream connections use the\n  :ref:`common_http_protocol_options\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options>`\n  field in the HTTP connection manager configuration. To modify the idle timeout for upstream\n  connections use the\n  :ref:`common_http_protocol_options <envoy_v3_api_field_config.cluster.v3.Cluster.common_http_protocol_options>` field\n  in the cluster configuration.\n\nStream timeouts\n^^^^^^^^^^^^^^^\n\nStream timeouts apply to individual streams carried by an HTTP connection. Note that a stream is\nan HTTP/2 and HTTP/3 concept, however internally Envoy maps HTTP/1 requests to streams so in this\ncontext request/stream is interchangeable.\n\n* The HTTP connection manager :ref:`request_timeout\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout>`\n  is the amount of time the connection manager will allow for the *entire request stream* to be\n  received from the client.\n\n  .. attention::\n\n    This timeout is not enforced by default as it is not compatible with streaming requests\n    (requests that never end). See the stream idle timeout that follows. However, if using the\n    :ref:`buffer filter <config_http_filters_buffer>`, it is recommended to configure this timeout.\n* The HTTP connection manager :ref:`stream_idle_timeout\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout>`\n  is the amount of time that the connection manager will allow a stream to exist with no upstream\n  or downstream activity. The default stream idle timeout is *5 minutes*. This timeout is strongly\n  recommended for all requests (not just streaming requests/responses) as it additionally defends\n  against an HTTP/2 peer that does not open stream window once an entire response has been buffered\n  to be sent to a downstream client.\n* The HTTP protocol :ref:`max_stream_duration <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.max_stream_duration>` \n  is defined in a generic message used by the HTTP connection manager. The max stream duration is the \n  maximum time that a stream's lifetime will span. You can use this functionality when you want to reset \n  HTTP request/response streams periodically. You can't use :ref:`request_timeout \n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout>`\n  in this situation because this timer will be disarmed if a response header is received on the request/response streams.\n  This timeout is available on both upstream and downstream connections.\n\nRoute timeouts\n^^^^^^^^^^^^^^\n\nEnvoy supports additional stream timeouts at the route level, as well as overriding some of the\nstream timeouts already introduced above.\n\n* A route :ref:`timeout <envoy_v3_api_field_config.route.v3.RouteAction.timeout>` is the amount of time that\n  Envoy will wait for the upstream to respond with a complete response. *This timeout does not\n  start until the entire downstream request stream has been received*.\n\n  .. attention::\n\n    This timeout defaults to *15 seconds*, however, it is not compatible with streaming responses\n    (responses that never end), and will need to be disabled. Stream idle timeouts should be used\n    in the case of streaming APIs as described elsewhere on this page.\n* The route :ref:`idle_timeout <envoy_v3_api_field_config.route.v3.RouteAction.idle_timeout>` allows overriding\n  of the HTTP connection manager :ref:`stream_idle_timeout\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout>`\n  and does the same thing.\n* The route :ref:`per_try_timeout <envoy_v3_api_field_config.route.v3.RetryPolicy.per_try_timeout>` can be\n  configured when using retries so that individual tries using a shorter timeout than the overall\n  request timeout described above. This timeout only applies before any part of the response\n  is sent to the downstream, which normally happens after the upstream has sent response headers.\n  This timeout can be used with streaming endpoints to retry if the upstream fails to begin a\n  response within the timeout.\n* The route :ref:`MaxStreamDuration proto <envoy_v3_api_msg_config.route.v3.RouteAction.MaxStreamDuration>`\n  can be used to override the HttpConnectionManager's\n  :ref:`max_stream_duration <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.max_stream_duration>`\n  for individual routes as well as setting both limits and a fixed time offset on grpc-timeout headers.\n\nTCP\n---\n\n* The cluster :ref:`connect_timeout <envoy_v3_api_field_config.cluster.v3.Cluster.connect_timeout>` specifies the amount\n  of time Envoy will wait for an upstream TCP connection to be established. This timeout has no\n  default, but is required in the configuration.\n\n  .. attention::\n\n    For TLS connections, the connect timeout includes the TLS handshake.\n* The TCP proxy :ref:`idle_timeout\n  <envoy_v3_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.idle_timeout>`\n  is the amount of time that the TCP proxy will allow a connection to exist with no upstream\n  or downstream activity. The default idle timeout if not otherwise specified is *1 hour*.\n"
  },
  {
    "path": "docs/root/faq/configuration/zipkin_tracing.rst",
    "content": ".. _common_configuration_zipkin_tracing:\n\nHow do I configure Zipkin tracing?\n==================================\n\nRefer to the :ref:`zipkin sandbox setup <install_sandboxes_zipkin_tracing>`\nfor an example of zipkin tracing configuration.\n"
  },
  {
    "path": "docs/root/faq/configuration/zone_aware_routing.rst",
    "content": ".. _common_configuration_zone_aware_routing:\n\nHow do I configure zone aware routing?\n======================================\n\nThere are several steps required for enabling :ref:`zone aware routing <arch_overview_load_balancing_zone_aware_routing>`\nbetween source service (\"cluster_a\") and destination service (\"cluster_b\").\n\nEnvoy configuration on the source service\n-----------------------------------------\nThis section describes the specific configuration for the Envoy running side by side with the source service.\nThese are the requirements:\n\n* Envoy must be launched with :option:`--service-zone` option which defines the zone for the current host.\n* Both definitions of the source and the destination clusters must have :ref:`EDS <envoy_v3_api_field_config.cluster.v3.Cluster.type>` type.\n* :ref:`local_cluster_name <envoy_v3_api_field_config.bootstrap.v3.ClusterManager.local_cluster_name>` must be set to the\n  source cluster.\n\n  Only essential parts are listed in the configuration below for the cluster manager.\n\n.. code-block:: yaml\n\n  cluster_manager:\n    local_cluster_name: cluster_a\n  static_resources:\n    clusters:\n    - name: cluster_a\n      type: EDS\n      eds_cluster_config: ...\n    - name: cluster_b\n      type: EDS\n      eds_cluster_config: ...\n\nEnvoy configuration on the destination service\n----------------------------------------------\nIt's not necessary to run Envoy side by side with the destination service, but it's important that each host in the\ndestination cluster registers with the discovery service :ref:`queried by the source service Envoy\n<config_overview_management_server>`. :ref:`Zone <envoy_v3_api_msg_config.endpoint.v3.LocalityLbEndpoints>`\ninformation must be available as part of that response.\n\nOnly zone related data is listed in the response below.\n\n.. code-block:: yaml\n\n  locality:\n    zone: us-east-1d\n\nInfrastructure setup\n--------------------\nThe above configuration is necessary for zone aware routing, but there are certain conditions\nwhen zone aware routing is :ref:`not performed <arch_overview_load_balancing_zone_aware_routing_preconditions>`.\n\nVerification steps\n------------------\n* Use :ref:`per zone <config_cluster_manager_cluster_per_az_stats>` Envoy stats to monitor cross zone traffic.\n"
  },
  {
    "path": "docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst",
    "content": ".. _faq_why_is_envoy_404ing_connect_requests:\n\nWhy is Envoy sending 404s to CONNECT requests?\n==============================================\n\nEnvoy's default matchers match based on host and path. Because CONNECT requests (generally) do not have a path, most matchers will fail to match CONNECT requests, and Envoy will send a 404 because the route is not found. The solution for HTTP/1.1 CONNECT requests, is to use a :ref:`connect_matcher <envoy_v3_api_msg_config.route.v3.RouteMatch.ConnectMatcher>` as described in the CONNECT section of the :ref:`upgrade documentation<arch_overview_upgrades>`.\n"
  },
  {
    "path": "docs/root/faq/debugging/why_is_envoy_sending_413s.rst",
    "content": ".. _faq_why_is_envoy_sending_413:\n\nWhy is Envoy sending 413s?\n==========================\n\nEnvoy by default imposes limits to how much it will buffer for a given request. Generally, Envoy filters are designed to be streaming, and will pass data from downstream to upstream, or will simply pause processing while waiting for an external event (e.g. doing auth checks). Some filters, for example the buffer filter, require buffering the full request or response. If a request body is too large to buffer, but buffering is required by the filter, Envoy will send a 413. The buffer limits can be increased at the risk of making OOMs more possible. Please see the ref:`flow control docs <faq_flow_control>` for details.\n"
  },
  {
    "path": "docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst",
    "content": ".. _why_is_envoy_sending_http2_resets:\n\nWhy is Envoy sending HTTP/2 resets?\n===================================\n\nThe HTTP/2 reset path is mostly governed by the codec Envoy uses to frame HTTP/2, nghttp2. nghttp2 has\nextremely good adherence to the HTTP/2 spec, but as many clients are not exactly as compliant, this\nmismatch can cause unexpected resets. Unfortunately, unlike the debugging the \n:ref:`internal response path <why_is_envoy_sending_internal_responses>`, Envoy has limited visibility into\nthe specific reason nghttp2 reset a given stream.\n\nIf you have a reproducible failure case, you can run it against a debug Envoy with \"-l trace\" to get\ndetailed nghttp2 error logs, which often indicate which header failed compliance checks. Alternately,\nif you can afford to run with \"-l trace\" on a machine encountering the errors, you can look for logs\nfrom the file \"source/common/http/http2/codec_impl.cc\" of the form\n`invalid http2: [nghttp2 error detail]`\nfor example:\n`invalid http2: Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], value: [3]`\n\nYou can also check :ref:`HTTP/2 stats`<config_http_conn_man_stats_per_codec>`: in many cases where\nEnvoy resets streams, for example if there are more headers than allowed by configuration or flood\ndetection kicks in, http2 counters will be incremented as the streams are reset.\n\n\n"
  },
  {
    "path": "docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst",
    "content": ".. _why_is_envoy_sending_internal_responses:\n\nWhy is Envoy sending internal responses?\n========================================\n\nOne of the easiest ways to get an understanding of why Envoy sends a given local response, is to turn on trace logging. If you can run your instance with “-l trace” you will slow Envoy down significantly, but get detailed information on various events in the lifetime of each stream and connection. Any time Envoy sends an internally generated response it will log to the _debug_ level “Sending local reply with details [unique reason]” which gives you information about why the local response was sent. Each individual response detail is used at one point in the code base, be it a codec validation check or a failed route match.\n\nIf turning on debug logging is not plausible, the response details can be added to the access logs using _%RESPONSE_CODE_DETAILS%_, and again it will let you pinpoint the exact reason a given response was generated. Documentation on response code details can be found :ref:`here<config_http_conn_man_details>`\n\n"
  },
  {
    "path": "docs/root/faq/debugging/why_is_my_route_not_found.rst",
    "content": ".. _why_is_my_route_not_found:\n\nWhy is my route not found?\n==========================\n\nOnce you've drilled down into Envoy responses and discovered Envoy generating local responses with the message\n\"Sending local reply with details route_not_found\" the next question is _why_?\n\nOften you can look at your route configuration and the headers sent, and see what is missing.\nOne often overlooked problem is host:port matching. If your route configuration matches the domain\nwww.host.com but the client is sending requests to www.host.com:443, it will not match.\n\nIf this is the problem you are encountering you can solve it one of two ways. First by changing your\nconfiguration to match host:port pairs, going from\n\n.. code-block:: yaml\n\n  domains:\n    - \"www.host.com\"\n\nto\n\n.. code-block:: yaml\n\n  domains:\n    - \"www.host.com\"\n    - \"www.host.com:80\"\n    - \"www.host.com:443\"\n\nThe other is to strip ports entirely using :ref:`stripping port from host header <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.strip_matching_host_port>`. Not that this will only stip port 80 from insecure requests and 443 from secure request. It does\nnot just stop ports when routes are matched, but changes\nthe host sent downstream to also not include the port.\n\n"
  },
  {
    "path": "docs/root/faq/extensions/contract.rst",
    "content": ".. _faq_filter_contract:\n\nIs there a contract my HTTP filter must adhere to?\n--------------------------------------------------\n\n* Headers encoding/decoding\n\n  * During encoding/decoding of headers if a filter returns ``FilterHeadersStatus::StopIteration``,\n    the processing can be resumed if ``encodeData()``/``decodeData()`` return\n    ``FilterDataStatus::Continue`` or by explicitly calling\n    ``continueEncoding()``/``continueDecoding()``.\n\n  * During encoding/decoding of headers if a filter returns\n    ``FilterHeadersStatus::StopAllIterationAndBuffer`` or\n    ``FilterHeadersStatus::StopAllIterationAndWatermark``, the processing can be resumed by calling\n    ``continueEncoding()``/``continueDecoding()``.\n\n  * A filter's ``decodeHeaders()`` implementation must not return\n    ``FilterHeadersStatus::ContinueAndEndStream`` when called with ``end_stream`` set to *true*. In this case\n    ``FilterHeadersStatus::Continue`` should be returned.\n\n  * A filter's ``encode100ContinueHeaders()`` must return ``FilterHeadersStatus::Continue`` or\n    ``FilterHeadersStatus::StopIteration``.\n\n* Data encoding/decoding\n\n  * During encoding/decoding of data if a filter returns\n    ``FilterDataStatus::StopIterationAndBuffer``, ``FilterDataStatus::StopIterationAndWatermark``,\n    or ``FilterDataStatus::StopIterationNoBuffer``, the processing can be resumed if\n    ``encodeData()``/``decodeData()`` return ``FilterDataStatus::Continue`` or by explicitly\n    calling ``continueEncoding()``/``continueDecoding()``.\n\n* Trailers encoding/decoding\n\n  * During encoding/decoding of trailers if a filter returns ``FilterTrailersStatus::StopIteration``,\n    the processing can be resumed by explicitly calling ``continueEncoding()``/``continueDecoding()``.\n\nAre there well-known headers that will appear in the given headers map of ``decodeHeaders()``?\n----------------------------------------------------------------------------------------------\n\nThe first filter of the decoding filter chain will have the following headers in the map:\n\n* ``Host``\n* ``Path`` (this might be omitted for CONNECT requests).\n\nAlthough these headers may be omitted by one of the filters on the decoding filter chain,\nthey should be reinserted before the terminal filter is triggered.\n\n"
  },
  {
    "path": "docs/root/faq/load_balancing/concurrency_lb.rst",
    "content": "Why doesn't RR load balancing appear to be even?\n================================================\n\nEnvoy utilizes a siloed :ref:`threading model <arch_overview_threading>`. This means that worker\nthreads and the load balancers that run on them do not coordinate with each other. When utilizing\nload balancing policies such as :ref:`round robin <arch_overview_load_balancing_types_round_robin>`,\nit may thus appear that load balancing is not working properly when using multiple workers. The\n:option:`--concurrency` option can be used to adjust the number of workers if desired.\n\nThe siloed execution model is also the reason why multiple HTTP/2 connections may be established to\neach upstream; :ref:`connection pools <arch_overview_conn_pool>` are not shared between workers.\n"
  },
  {
    "path": "docs/root/faq/load_balancing/disable_circuit_breaking.rst",
    "content": ".. _faq_disable_circuit_breaking:\n\nIs there a way to disable circuit breaking?\n===========================================\n\nEnvoy comes with :ref:`certain defaults <envoy_v3_api_msg_config.cluster.v3.CircuitBreakers.Thresholds>`\nfor each kind of circuit breaking. Currently, there isn't a switch to turn\ncircuit breaking off completely; however, you could achieve a similar behavior\nby setting these thresholds very high, for example, to `std::numeric_limits<uint32_t>::max()`.\n\nFollowing is a sample configuration that tries to effectively disable all kinds\nof circuit breaking by setting the thresholds to a value of `1000000000`.\n\n.. code-block:: yaml\n\n  circuit_breakers:\n    thresholds:\n      - priority: DEFAULT\n        max_connections: 1000000000\n        max_pending_requests: 1000000000\n        max_requests: 1000000000\n        max_retries: 1000000000\n      - priority: HIGH\n        max_connections: 1000000000\n        max_pending_requests: 1000000000\n        max_requests: 1000000000\n        max_retries: 1000000000\n\nEnvoy supports priority routing at the route level. You may adjust the thresholds accordingly.\n"
  },
  {
    "path": "docs/root/faq/load_balancing/lb_panic_threshold.rst",
    "content": "I setup health checking. When I fail some hosts, Envoy starts routing to all of them again. Why?\n================================================================================================\n\nThis feature is known as the load balancer :ref:`panic threshold\n<arch_overview_load_balancing_panic_threshold>`. It is used to prevent cascading failure when\nupstream hosts start failing health checks in large numbers.\n"
  },
  {
    "path": "docs/root/faq/load_balancing/region_failover.rst",
    "content": "How do I make Envoy fail over to another region during service degradation?\n===========================================================================\n\nEnvoy uses the concept of\n`priorities <arch_overview_load_balancing_priority_levels>` to express\nthe idea that a certain set of endpoints should be preferred over others.\n\nBy putting the preferred endpoints into the lower priority, Envoy will\nalways select one of these endpoints as long as that priority is sufficiently\navailable. This means that common failover scenarios can be expressed by\nputting the fallback endpoints in a different priority. See the\n`priority <arch_overview_load_balancing_priority_levels>` for more information\nabout this.\n"
  },
  {
    "path": "docs/root/faq/load_balancing/transient_failures.rst",
    "content": ".. _common_configuration_transient_failures:\n\nHow do I handle transient failures?\n===================================\n\nOne of the biggest advantages of using Envoy in a service mesh is that it frees up services\nfrom implementing complex resiliency features like circuit breaking, outlier detection and retries\nthat enable services to be resilient to realities such as rolling upgrades, dynamic infrastructure,\nand network failures. Having these features implemented at Envoy not only improves the availability\nand resiliency of services but also brings in consistency in terms of the behaviour and observability.\n\nThis section explains at a high level the configuration supported by Envoy and how these features can be\nused together to handle these scenarios.\n\nCircuit Breaking\n----------------\n\n:ref:`Circuit Breaking <arch_overview_circuit_break>` is a critical component of distributed systems.\nCircuit breaking lets applications configure failure thresholds that ensure safe maximums, allowing components\nto fail quickly and apply back pressure as soon as possible. Applying correct circuit breaking thresholds helps\nto save resources which otherwise are wasted in waiting for requests (timeouts) or retrying requests unnecessarily.\nOne of the main advantages of the circuit breaking implementation in Envoy is that the circuit breaking limits are applied\nat the network level.\n\n.. _common_configuration_transient_failures_retries:\n\nRetries\n-------\n\nAutomatic :ref:`request retries <config_http_filters_router>` is another method of ensuring service resilience. Request retries should\ntypically be used to guard against transient failures. Envoy supports very rich set of configurable parameters that dictate what type\nof requests are retried, how many times the request should be retried, timeouts for retries, etc.\n\nRetries in gRPC services\n------------------------\n\nFor gRPC services, Envoy looks at the gRPC status in the response and attempts a retry based on the statuses configured in\n:ref:`x-envoy-retry-grpc-on <config_http_filters_router_x-envoy-retry-grpc-on>`.\n\nThe following application status codes in gRPC are considered safe for automatic retry.\n\n* *CANCELLED* - Return this code if there is an error that can be retried in the service.\n* *RESOURCE_EXHAUSTED* - Return this code if some of the resources that service depends on are exhausted in that instance so that retrying\n  to another instance would help. Please note that for shared resource exhaustion, returning this will not help. Instead :ref:`rate limiting <arch_overview_global_rate_limit>`\n  should be used to handle such cases.\n\nThe HTTP Status codes *502 (Bad Gateway)*, *503 (Service Unavailable)* and *504 (Gateway Timeout)* are all mapped to gRPC status code *UNAVAILABLE*.\nThis can also be considered safe for automatic retry.\n\nThe idempotency of a request is an important consideration when configuring retries.\n\nEnvoy also supports extensions to its retry policies. The :ref:`retry plugins <arch_overview_http_retry_plugins>`\nallow you to customize the Envoy retry implementation to your application.\n\nOutlier Detection\n-----------------\n\n:ref:`Outlier detection <arch_overview_outlier_detection>` is a way of dynamically detecting misbehaving hosts\nin the upstream cluster. By detecting such hosts and ejecting them for a temporary period of time from the healthy\nload balancing set, Envoy can increase the success rate of a cluster. Envoy supports configuring outlier detection\nbased on continuous *5xx*, continuous gateway failures and success rate.\n\nEnvoy also allows you to configure the ejection period.\n\n**Configuration**\n\nThe following settings help to optimize some combination of:\n\n* Maximum request success for common scenarios (i.e. rolling upgrade)\n* Speed\n* Avoid cascading failures\n\n\n*Circuit Breaker*\n\n.. code-block:: json\n\n  {\n     \"thresholds\": [\n       {\n         \"max_retries\": 10,\n       }\n    ]\n  }\n\nFor the purpose of this specific use case, the retry budget for upstream cluster should be configured to\nenable and control concurrent retries. If the value configured is too low, some requests will not be retried,\nwhich can be measured via :ref:`upstream_rq_retry_overflow <config_cluster_manager_cluster_stats>`.\nIf the value configured is too high, the service can be overwhelmed with retry requests.\n\n\n*Outlier Detection*\n\n.. code-block:: json\n\n  {\n     \"consecutive_5xx\": 5,\n     \"base_ejection_time\": \"30s\",\n     \"max_ejection_percent\": 50,\n     \"consecutive_gateway_failure\": 5,\n  }\n\nThis setting enables outlier detection if there are 5 consecutive *5xx* or *gateway failures*\nand limits the number of hosts that are ejected to 50% of the upstream cluster size. This configuration\nplaces a safe limit on the number of hosts removed. Please note that once a host a ejected, it will be returned\nto the pool after an ejection time is elapsed (which is equal to the *base_ejection_time* multiplied by the number\nof times the host has been ejected).\n\n*Request Retry*\n\n.. code-block:: json\n\n  {\n     \"retry_on\": \"cancelled,connect-failure,gateway-error,refused-stream,reset,resource-exhausted,unavailable\",\n     \"num_retries\": 1,\n     \"retry_host_predicate\": [\n     {\n        \"name\": \"envoy.retry_host_predicates.previous_hosts\"\n     }\n    ],\n    \"host_selection_retry_max_attempts\": \"5\"\n  }\n\nThe request will be retried based on the conditions documented in *retry_on*. This setting also configures Envoy to use\n:ref:`Previous Host Retry Predicate <arch_overview_http_retry_plugins>` that allows it to choose a different\nhost than the host where previous request has failed, because typically failures on that same host are likely to continue\nfor some time and immediate retry would have less chance of success.\n"
  },
  {
    "path": "docs/root/faq/overview.rst",
    "content": ".. _faq_overview:\n\nFAQ\n===\n\nBuild\n-----\n\n.. toctree::\n  :maxdepth: 2\n\n  build/binaries\n  build/boringssl\n\nAPI\n---\n.. toctree::\n  :maxdepth: 2\n\n  api/envoy_v2_support\n  api/envoy_v3\n  api/envoy_upgrade_v3\n  api/extensions\n  api/control_plane_version_support\n  api/control_plane\n  api/package_naming\n  api/why_versioning\n  api/incremental\n\n.. _faq_overview_debug:\n\nDebugging\n---------\n.. toctree::\n  :maxdepth: 2\n\n  debugging/why_is_envoy_sending_internal_responses\n  debugging/why_is_envoy_sending_http2_resets\n  debugging/why_is_envoy_404ing_connect_requests\n  debugging/why_is_envoy_sending_413s\n  debugging/why_is_my_route_not_found\n\nPerformance\n-----------\n\n.. toctree::\n  :maxdepth: 2\n\n  performance/how_fast_is_envoy\n  performance/how_to_benchmark_envoy\n\nConfiguration\n-------------\n\n.. toctree::\n  :maxdepth: 2\n\n  configuration/edge\n  configuration/level_two\n  configuration/sni\n  configuration/zone_aware_routing\n  configuration/zipkin_tracing\n  configuration/flow_control\n  configuration/timeouts\n  configuration/deprecation\n  configuration/resource_limits\n\nLoad balancing\n--------------\n\n.. toctree::\n  :maxdepth: 2\n\n  load_balancing/lb_panic_threshold\n  load_balancing/concurrency_lb\n  load_balancing/disable_circuit_breaking\n  load_balancing/transient_failures\n  load_balancing/region_failover\n\nExtensions\n----------\n\n.. toctree::\n  :maxdepth: 2\n\n  extensions/contract\n"
  },
  {
    "path": "docs/root/faq/performance/how_fast_is_envoy.rst",
    "content": ".. _faq_how_fast_is_envoy:\n\nHow fast is Envoy?\n==================\n\nWe are frequently asked *how fast is Envoy?* or *how much latency will Envoy add to my requests?*\nThe answer is: *it depends*. Performance depends a great deal on which Envoy features are being\nused and the environment in which Envoy is run. In addition, doing accurate performance testing\nis an incredibly difficult task that the project does not currently have resources for.\n\nAlthough we have done quite a bit of performance tuning of Envoy in the critical path and we\nbelieve it performs extremely well, because of the previous points we do not currently publish\nany official benchmarks. We encourage users to benchmark Envoy in their own environments with a\nconfiguration similar to what they plan on using in production.\n"
  },
  {
    "path": "docs/root/faq/performance/how_to_benchmark_envoy.rst",
    "content": "What are best practices for benchmarking Envoy?\n===============================================\n\nThere is :ref:`no single QPS, latency or throughput overhead <faq_how_fast_is_envoy>` that can\ncharacterize a network proxy such as Envoy. Instead, any measurements need to be contextually aware,\nensuring an apples-to-apples comparison with other systems by configuring and load testing Envoy\nappropriately. As a result, we can't provide a canonical benchmark configuration, but instead offer\nthe following guidance:\n\n* A release Envoy binary should be used. If building, please ensure that `-c opt`\n  is used on the Bazel command line. When consuming Envoy point releases, make\n  sure you are using the latest point release; given the pace of Envoy development\n  it's not reasonable to pick older versions when making a statement about Envoy\n  performance. Similarly, if working on a master build, please perform due diligence\n  and ensure no regressions or performance improvements have landed proximal to your\n  benchmark work and that your are close to HEAD.\n\n* The :option:`--concurrency` Envoy CLI flag should be unset (providing one worker thread per\n  logical core on your machine) or set to match the number of cores/threads made available to other\n  network proxies in your comparison.\n\n* Disable :ref:`circuit breaking <faq_disable_circuit_breaking>`. A common issue during benchmarking\n  is that Envoy's default circuit breaker limits are low, leading to connection and request queuing.\n\n* Disable :ref:`generate_request_id\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.generate_request_id>`.\n\n* Disable :ref:`dynamic_stats\n  <envoy_v3_api_field_extensions.filters.http.router.v3.Router.dynamic_stats>`. If you are measuring\n  the overhead vs. a direct connection, you might want to consider disabling all stats via\n  :ref:`reject_all <envoy_v3_api_field_config.metrics.v3.StatsMatcher.reject_all>`.\n\n* Ensure that the networking and HTTP filter chains are reflective of comparable features\n  in the systems that Envoy is being compared with.\n\n* Ensure that TLS settings (if any) are realistic and that consistent cyphers are used in\n  any comparison. Session reuse may have a significant impact on results and should be tracked via\n  :ref:`listener SSL stats <config_listener_stats>`.\n\n* Ensure that :ref:`HTTP/2 settings <envoy_v3_api_msg_config.core.v3.Http2ProtocolOptions>`, in\n  particular those that affect flow control and stream concurrency, are consistent in any\n  comparison. Ideally taking into account BDP and network link latencies when optimizing any\n  HTTP/2 settings.\n\n* Verify in the listener and cluster stats that the number of streams, connections and errors\n  matches what is expected in any given experiment.\n\n* Make sure you are aware of how connections created by your load generator are\n  distributed across Envoy worker threads. This is especially important for\n  benchmarks that use low connection counts and perfect keep-alive. You should be aware that\n  Envoy will allocate all streams for a given connection to a single worker thread. This means,\n  for example, that if you have 72 logical cores and worker threads, but only a single HTTP/2\n  connection from your load generator, then only 1 worker thread will be active.\n\n* Make sure request-release timing expectations line up with what is intended.\n  Some load generators produce naturally jittery and/or batchy timings. This\n  might end up being an unintended dominant factor in certain tests.\n\n* The specifics of how your load generator reuses connections is an important factor (e.g. MRU,\n  random, LRU, etc.) as this impacts work distribution.\n\n* If you're trying to measure small (say < 1ms) latencies, make sure the measurement tool and\n  environment have the required sensitivity and the noise floor is sufficiently low.\n\n* Be critical of your bootstrap or xDS configuration. Ideally every line has a motivation and is\n  necessary for the benchmark under consideration.\n\n* Consider using `Nighthawk <https://github.com/envoyproxy/nighthawk>`_ as your\n  load generator and measurement tool. We are committed to building out\n  benchmarking and latency measurement best practices in this tool.\n\n* Examine `perf` profiles of Envoy during the benchmark run, e.g. with `flame graphs\n  <http://www.brendangregg.com/flamegraphs.html>`_. Verify that Envoy is spending its time\n  doing the expected essential work under test, rather than some unrelated or tangential\n  work.\n\n* Familiarize yourself with `latency measurement best practices\n  <https://www.youtube.com/watch?v=lJ8ydIuPFeU>`_. In particular, never measure latency at\n  max load, this is not generally meaningful or reflecting of real system performance; aim\n  to measure below the knee of the QPS-latency curve. Prefer open vs. closed loop load\n  generators.\n\n* Avoid `benchmarking crimes <https://www.cse.unsw.edu.au/~gernot/benchmarking-crimes.html>`_.\n"
  },
  {
    "path": "docs/root/index.rst",
    "content": "Envoy documentation\n=================================\n\n.. ifconfig:: release_level in ('pre-release')\n\n  .. attention::\n\n    This is pre-release documentation. There is risk of it not being consistent with what is\n    currently implemented in Envoy, though we try to make things consistent as quickly as possible.\n\n.. toctree::\n  :maxdepth: 2\n\n  about_docs\n  intro/intro\n  start/start\n  install/install\n  version_history/version_history\n  configuration/configuration\n  operations/operations\n  extending/extending\n  api/api\n  faq/overview\n"
  },
  {
    "path": "docs/root/install/building.rst",
    "content": ".. _building:\n\n\nBuilding\n========\n\nThe Envoy build system uses Bazel. In order to ease initial building and for a quick start, we\nprovide an Ubuntu 16 based docker container that has everything needed inside of it to build\nand *statically link* Envoy, see :repo:`ci/README.md`.\n\nIn order to build manually, follow the instructions at :repo:`bazel/README.md`.\n\n.. _install_requirements:\n\nRequirements\n------------\n\nEnvoy was initially developed and deployed on Ubuntu 14.04 LTS. It should work on any reasonably\nrecent Linux including Ubuntu 18.04 LTS.\n\nBuilding Envoy has the following requirements:\n\n* GCC 7+ or Clang/LLVM 7+ (for C++14 support). Clang/LLVM 9+ preferred where Clang is used (see below).\n* These :repo:`Bazel native <bazel/repository_locations.bzl>` dependencies.\n\nPlease see the linked :repo:`CI <ci/README.md>` and :repo:`Bazel <bazel/README.md>` documentation\nfor more information on performing manual builds.\nPlease note that for Clang/LLVM 8 and lower, Envoy may need to be built with `--define tcmalloc=gperftools`\nas the new tcmalloc code is not guaranteed to compile with lower versions of Clang.\n\n.. _install_binaries:\n\nPre-built binaries\n------------------\n\nWe build and tag Docker images with release versions when we do official releases. These images can\nbe found in the following repositories:\n\n* `envoyproxy/envoy <https://hub.docker.com/r/envoyproxy/envoy/tags/>`_: Release binary with\n  symbols stripped on top of an Ubuntu Bionic base.\n* `envoyproxy/envoy-debug <https://hub.docker.com/r/envoyproxy/envoy-debug/tags/>`_: Release\n  binary with debug symbols on top of an Ubuntu Bionic base.\n* `envoyproxy/envoy-alpine <https://hub.docker.com/r/envoyproxy/envoy-alpine/tags/>`_: Release\n  binary with symbols stripped on top of a **glibc** alpine base.\n* `envoyproxy/envoy-alpine-debug <https://hub.docker.com/r/envoyproxy/envoy-alpine-debug/tags/>`_:\n  *Deprecated in favor of envoyproxy/envoy-debug.* Release binary with debug symbols on top of a\n  Release binary with debug symbols on top of a **glibc** alpine base.\n\n.. note::\n\n  In the above repositories, we tag a *vX.Y-latest* image for each security/stable release line.\n\nOn every master commit we additionally create a set of development Docker images. These images can\nbe found in the following repositories:\n\n* `envoyproxy/envoy-dev <https://hub.docker.com/r/envoyproxy/envoy-dev/tags/>`_: Release binary with\n  symbols stripped on top of an Ubuntu Bionic base.\n* `envoyproxy/envoy-debug-dev <https://hub.docker.com/r/envoyproxy/envoy-debug-dev/tags/>`_: Release\n  binary with debug symbols on top of an Ubuntu Bionic base.\n* `envoyproxy/envoy-alpine-dev <https://hub.docker.com/r/envoyproxy/envoy-alpine-dev/tags/>`_: Release\n  binary with symbols stripped on top of a **glibc** alpine base.\n* `envoyproxy/envoy-alpine-debug-dev <https://hub.docker.com/r/envoyproxy/envoy-alpine-debug-dev/tags/>`_:\n  *Deprecated in favor of envoyproxy/envoy-debug-dev.* Release binary with debug symbols on top of a\n  **glibc** alpine base.\n\nIn the above *dev* repositories, the *latest* tag points to the last Envoy SHA in master that passed\ntests.\n\n.. note::\n\n  The Envoy project considers master to be release candidate quality at all times, and many\n  organizations track and deploy master in production. We encourage you to do the same so that\n  issues can be reported as early as possible in the development process.\n\nPackaged Envoy pre-built binaries for a variety of platforms are available via\n`GetEnvoy.io <https://www.getenvoy.io/>`_.\n\nWe will consider producing additional binary types depending on community interest in helping with\nCI, packaging, etc. Please open an `issue in GetEnvoy <https://github.com/tetratelabs/getenvoy/issues>`_\nfor pre-built binaries for different platforms.\n\n.. _arm_binaries:\n\nARM64 binaries\n^^^^^^^^^^^^^^\n\n`envoyproxy/envoy <https://hub.docker.com/r/envoyproxy/envoy/tags/>`_,\n`envoyproxy/envoy-debug <https://hub.docker.com/r/envoyproxy/envoy-debug/tags/>`_,\n`envoyproxy/envoy-dev <https://hub.docker.com/r/envoyproxy/envoy-dev/tags/>`_ and\n`envoyproxy/envoy-debug-dev <https://hub.docker.com/r/envoyproxy/envoy-debug-dev/tags/>`_ are Docker\n`multi-arch <https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/>`_ images\nand should run transparently on compatible ARM64 hosts.\n\nModifying Envoy\n---------------\n\nIf you're interested in modifying Envoy and testing your changes, one approach\nis to use Docker. This guide will walk through the process of building your own\nEnvoy binary, and putting the binary in an Ubuntu container.\n\n.. toctree::\n    :maxdepth: 2\n\n    sandboxes/local_docker_build\n"
  },
  {
    "path": "docs/root/install/install.rst",
    "content": ".. _install:\n\nBuilding and installation\n=========================\n\n.. toctree::\n  :maxdepth: 2\n\n  building\n  ref_configs\n  tools/tools\n"
  },
  {
    "path": "docs/root/install/ref_configs.rst",
    "content": ".. _install_ref_configs:\n\nReference configurations\n========================\n\nThe source distribution includes a set of example configuration templates for each of the three\nmajor Envoy deployment types:\n\n* :ref:`Service to service <deployment_type_service_to_service>`\n* :ref:`Front proxy <deployment_type_front_proxy>`\n* :ref:`Double proxy <deployment_type_double_proxy>`\n\nThe goal of this set of example configurations is to demonstrate the full capabilities of Envoy in\na complex deployment. All features will not be applicable to all use cases. For full documentation\nsee the :ref:`configuration reference <config>`.\n\nConfiguration generator\n-----------------------\n\nEnvoy configurations can become relatively complicated. At Lyft we use `jinja\n<http://jinja.pocoo.org/>`_ templating to make the configurations easier to create and manage. The\nsource distribution includes a version of the configuration generator that loosely approximates what\nwe use at Lyft. We have also included three example configuration templates for each of the above\nthree scenarios.\n\n* Generator script: :repo:`configs/configgen.py`\n* Service to service template: :repo:`configs/envoy_service_to_service_v2.template.yaml`\n* Front proxy template: :repo:`configs/envoy_front_proxy_v2.template.yaml`\n* Double proxy template: :repo:`configs/envoy_double_proxy_v2.template.yaml`\n\nTo generate the example configurations run the following from the root of the repo:\n\n.. code-block:: console\n\n  mkdir -p generated/configs\n  bazel build //configs:example_configs\n  tar xvf $PWD/bazel-out/k8-fastbuild/bin/configs/example_configs.tar -C generated/configs\n\nThe previous command will produce three fully expanded configurations using some variables\ndefined inside of `configgen.py`. See the comments inside of `configgen.py` for detailed\ninformation on how the different expansions work.\n\nA few notes about the example configurations:\n\n* An instance of :ref:`endpoint discovery service <arch_overview_service_discovery_types_eds>` is assumed\n  to be running at `discovery.yourcompany.net`.\n* DNS for `yourcompany.net` is assumed to be setup for various things. Search the configuration\n  templates for different instances of this.\n* Tracing is configured for `LightStep <https://lightstep.com/>`_. To\n  disable this or enable `Zipkin <https://zipkin.io>`_ or `Datadog <https://datadoghq.com>`_ tracing, delete or\n  change the :ref:`tracing configuration <envoy_api_file_envoy/config/trace/v2/trace.proto>` accordingly.\n* The configuration demonstrates the use of a :ref:`global rate limiting service\n  <arch_overview_global_rate_limit>`. To disable this delete the :ref:`rate limit configuration\n  <config_rate_limit_service>`.\n* :ref:`Route discovery service <config_http_conn_man_rds>` is configured for the service to service\n  reference configuration and it is assumed to be running at `rds.yourcompany.net`.\n* :ref:`Cluster discovery service <config_cluster_manager_cds>` is configured for the service to\n  service reference configuration and it is assumed that be running at `cds.yourcompany.net`.\n"
  },
  {
    "path": "docs/root/install/sandboxes/local_docker_build.rst",
    "content": ".. _install_sandboxes_local_docker_build:\n\nBuilding an Envoy Docker image\n==============================\n\nThe following steps guide you through building your own Envoy binary, and\nputting that in a clean Ubuntu container.\n\n**Step 1: Build Envoy**\n\nUsing ``envoyproxy/envoy-build`` you will compile Envoy.\nThis image has all software needed to build Envoy. From your Envoy directory::\n\n  $ pwd\n  src/envoy\n  $ ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release'\n\nThat command will take some time to run because it is compiling an Envoy binary and running tests.\n\nFor more information on building and different build targets, please refer to :repo:`ci/README.md`.\n\n**Step 2: Build image with only Envoy binary**\n\nIn this step we'll build an image that only has the Envoy binary, and none\nof the software used to build it.::\n\n  $ pwd\n  src/envoy/\n  $ docker build -f ci/Dockerfile-envoy -t envoy .\n\nNow you can use this ``envoy`` image to build the any of the sandboxes if you change\nthe ``FROM`` line in any Dockerfile.\n\nThis will be particularly useful if you are interested in modifying Envoy, and testing\nyour changes.\n"
  },
  {
    "path": "docs/root/install/tools/config_load_check_tool.rst",
    "content": ".. _install_tools_config_load_check_tool:\n\nConfig load check tool\n======================\n\nThe config load check tool checks that a configuration file in JSON format is written using valid JSON\nand conforms to the Envoy JSON schema. This tool leverages the configuration test in\n``test/config_test/config_test.cc``. The test loads the JSON configuration file and runs server configuration\ninitialization with it.\n\nInput\n  The tool expects a PATH to the root of a directory that holds JSON Envoy configuration files. The tool\n  will recursively go through the file system tree and run a configuration test for each file found. Keep in mind that\n  the tool will try to load all files found in the path.\n\nOutput\n  The tool will output Envoy logs as it initializes the server configuration with the config it is currently testing.\n  If there are configuration files where the JSON file is malformed or is does not conform to the Envoy JSON schema, the\n  tool will exit with status EXIT_FAILURE. If the tool successfully loads all configuration files found it will\n  exit with status EXIT_SUCCESS.\n\nBuilding\n  The tool can be built locally using Bazel. ::\n\n    bazel build //test/tools/config_load_check:config_load_check_tool\n\nRunning\n  The tool takes a path as described above. ::\n\n    bazel-bin/test/tools/config_load_check/config_load_check_tool PATH\n"
  },
  {
    "path": "docs/root/install/tools/route_table_check_tool.rst",
    "content": ".. _install_tools_route_table_check_tool:\n\nRoute table check tool\n=======================\n\nThe route table check tool checks whether the route parameters returned by a router match what is expected.\nThe tool can also be used to check whether a path redirect, path rewrite, or host rewrite\nmatch what is expected.\n\nUsage\n  router_check_tool [-t <string>] [-c <string>] [-d] [-p] [--] [--version] [-h] <unlabelledConfigStrings>\n    -t <string>,  --test-path <string>\n      Path to a tool config JSON file. The tool config JSON file schema is found in\n      :ref:`config <config_tools_router_check_tool>`.\n      The tool config input file specifies urls (composed of authorities and paths)\n      and expected route parameter values. Additional parameters such as additional headers are optional.\n      \n      Schema: All internal schemas in the tool are based on :repo:`proto3 <test/tools/router_check/validation.proto>`.\n\n    -c <string>,  --config-path <string>\n      Path to a v2 router config file (YAML or JSON). The router config file schema is found in\n      :ref:`config <envoy_api_file_envoy/api/v2/route/route.proto>` and the config file extension\n      must reflect its file type (for instance, .json for JSON and .yaml for YAML).\n\n    -d,  --details\n      Show detailed test execution results. The first line indicates the test name.\n\n    --only-show-failures\n      Displays test results for failed tests. Omits test names for passing tests if the details flag is set.\n\n    -f, --fail-under\n      Represents a percent value for route test coverage under which the run should fail.\n\n    --covall\n      Enables comprehensive code coverage percent calculation taking into account all the possible\n      asserts. Displays missing tests.\n\n    --disable-deprecation-check\n      Disables the deprecation check for RouteConfiguration proto.\n\n    -h,  --help\n      Displays usage information and exits.\n\nOutput\n  The program exits with status EXIT_FAILURE if any test case does not match the expected route parameter\n  value.\n\n  If a test fails, details of the failed test cases are printed if ``--details`` flag is provided. \n  The first field is the expected route parameter value. The second field is the actual route parameter value. \n  The third field indicates the parameter that is compared.\n  In the following example, Test_2 and Test_5 failed while the other tests\n  passed. In the failed test cases, conflict details are printed. ::\n\n    Test_1\n    Test_2\n    default other virtual_host_name\n    Test_3\n    Test_4\n    Test_5\n    locations ats cluster_name\n    Test_6\n\nBuilding\n  The tool can be built locally using Bazel. ::\n\n    bazel build //test/tools/router_check:router_check_tool\n\nRunning\n  Example ::\n\n    bazel-bin/test/tools/router_check/router_check_tool -c router_config.(yaml|json) -t tool_config.json --details\n\nTesting\n  A bash shell script test can be run with bazel. The test compares routes using different router and\n  tool configuration files. The configuration files can be found in\n  test/tools/router_check/test/config/... . ::\n\n    bazel test //test/tools/router_check/...\n"
  },
  {
    "path": "docs/root/install/tools/schema_validator_check_tool.rst",
    "content": ".. _install_tools_schema_validator_check_tool:\n\nSchema Validator check tool\n===========================\n\nThe schema validator tool validates that the passed in configuration conforms to\na given schema. The configuration may be JSON or YAML. To validate the entire\nconfig, please refer to the\n:ref:`config load check tool<install_tools_config_load_check_tool>`.\n\nInput\n  The tool expects two inputs:\n\n  1. The schema type to check the passed in configuration against. The supported types are:\n\n    * `route` - for :ref:`route configuration<envoy_v3_api_msg_config.route.v3.RouteConfiguration>` validation.\n    * `discovery_response` for :ref:`discovery response<envoy_v3_api_msg_service.discovery.v3.DiscoveryResponse>` validation.\n\n  2. The path to the configuration file.\n\nOutput\n  If the configuration conforms to the schema, the tool will exit with status\n  EXIT_SUCCESS. If the configuration does not conform to the schema, an error\n  message is outputted detailing what doesn't conform to the schema. The tool\n  will exit with status EXIT_FAILURE.\n\nBuilding\n  The tool can be built locally using Bazel. ::\n\n    bazel build //test/tools/schema_validator:schema_validator_tool\n\nRunning\n  The tool takes a path as described above. ::\n\n    bazel-bin/test/tools/schema_validator/schema_validator_tool  --schema-type SCHEMA_TYPE  --config-path PATH\n"
  },
  {
    "path": "docs/root/install/tools/tools.rst",
    "content": "Tools\n=====\n\n.. toctree::\n  :maxdepth: 2\n\n  config_load_check_tool\n  route_table_check_tool\n  schema_validator_check_tool\n"
  },
  {
    "path": "docs/root/intro/_include/life-of-a-request.yaml",
    "content": "\nstatic_resources:\n  listeners:\n  # There is a single listener bound to port 443.\n  - name: listener_https\n    address:\n      socket_address:\n        protocol: TCP\n        address: 0.0.0.0\n        port_value: 443\n    # A single listener filter exists for TLS inspector.\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    # On the listener, there is a single filter chain that matches SNI for acme.com.\n    filter_chains:\n    - filter_chain_match:\n        # This will match the SNI extracted by the TLS Inspector filter.\n        server_names: [\"acme.com\"]\n      # Downstream TLS configuration.\n      transport_socket:\n        name: envoy.transport_sockets.tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n            - certificate_chain: { filename: \"certs/servercert.pem\" }\n              private_key: { filename: \"certs/serverkey.pem\" }\n      filters:\n      # The HTTP connection manager is the only network filter.\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          stat_prefix: ingress_http\n          use_remote_address: true\n          http2_protocol_options:\n            max_concurrent_streams: 100\n          # File system based access logging.\n          access_log:\n            - name: envoy.access_loggers.file\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog\n                path: \"/var/log/envoy/access.log\"\n          # The route table, mapping /foo to some_service.\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"acme.com\"]\n              routes:\n              - match:\n                  path: \"/foo\"\n                route:\n                  cluster: some_service\n      # CustomFilter and the HTTP router filter are the HTTP filter chain.\n      http_filters:\n          - name: some.customer.filter\n          - name: envoy.filters.http.router\n  clusters:\n  - name: some_service\n    connect_timeout: 5s\n    # Upstream TLS configuration.\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n    load_assignment:\n      cluster_name: some_service\n      # Static endpoint assignment.\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.1.2.10\n                port_value: 10002\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.1.2.11\n                port_value: 10002\n    http2_protocol_options:\n      max_concurrent_streams: 100\n  - name: some_statsd_sink\n    connect_timeout: 5s\n    # The rest of the configuration for statsd sink cluster.\n# statsd sink.\nstats_sinks:\n   - name: envoy.stat_sinks.statsd\n     typed_config:\n       \"@type\": type.googleapis.com/envoy.config.metrics.v3.StatsdSink\n       tcp_cluster_name: some_statsd_cluster\n"
  },
  {
    "path": "docs/root/intro/arch_overview/advanced/advanced.rst",
    "content": "Advanced\n========\n\n.. toctree::\n  :maxdepth: 2\n\n  data_sharing_between_filters\n"
  },
  {
    "path": "docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst",
    "content": ".. _arch_overview_data_sharing_between_filters:\n\nSharing data between filters\n============================\n\nEnvoy provides the following mechanisms for the transfer of configuration,\nmetadata and per-request/connection state to, from and between filters, as\nwell as to other core subsystems (e.g., access logging).\n\nStatic State\n^^^^^^^^^^^^\n\nStatic state is any immutable state specified at configuration load time\n(e.g., through xDS). There are three categories of static state:\n\nMetadata\n--------\n\nSeveral parts of Envoy configuration (e.g. listeners, routes, clusters)\ncontain a :ref:`metadata <envoy_v3_api_msg_config.core.v3.Metadata>` where arbitrary\nkey-value pairs can be encoded. The typical pattern is to use the filter\nnames in reverse DNS format as the key and encode filter specific\nconfiguration metadata in the value. This metadata is immutable and shared\nacross all requests/connections. Such config metadata is usually provided\nduring bootstrap time or as part of xDS. For example, weighted clusters in\nHTTP routes use the metadata to indicate the labels on the endpoints\ncorresponding to the weighted cluster. Another example, the subset load\nbalancer uses the metadata from the route entry corresponding to the\nweighted cluster to select appropriate endpoints in a cluster\n\nTyped Metadata\n--------------\n\n:ref:`Metadata <envoy_v3_api_msg_config.core.v3.Metadata>` as such is untyped. Before\nacting on the metadata, callers typically convert it to a typed class\nobject. The cost of conversion becomes non-negligible when performed\nrepeatedly (e.g., for each request stream or connection). Typed Metadata\nsolves this problem by allowing filters to register a one time conversion\nlogic for a specific key. Incoming config metadata (via xDS) is converted\nto class objects at config load time. Filters can then obtain a typed\nvariant of the metadata at runtime (per request or connection), thereby\neliminating the need for filters to repeatedly convert from\n`ProtobufWkt::Struct` to some internal object during request/connection\nprocessing.\n\nFor example, a filter that desires to have a convenience wrapper class over\nan opaque metadata with key `xxx.service.policy` in `ClusterInfo` could\nregister a factory `ServicePolicyFactory` that inherits from\n`ClusterTypedMetadataFactory`. The factory translates the `ProtobufWkt::Struct`\ninto an instance of `ServicePolicy` class (inherited from\n`FilterState::Object`). When a `Cluster` is created, the associated\n`ServicePolicy` instance will be created and cached. Note that typed\nmetadata is not a new source of metadata. It is obtained from metadata that\nis specified as part of the configuration. A `FilterState::Object` implements\n`serializeAsProto` method can be configured in access loggers to log it.\n\nHTTP Per-Route Filter Configuration\n-----------------------------------\n\nIn HTTP routes, :ref:`typed_per_filter_config\n<envoy_v3_api_field_config.route.v3.VirtualHost.typed_per_filter_config>` allows HTTP filters\nto have virtualhost/route-specific configuration in addition to a global\nfilter config common to all virtual hosts. This configuration is converted\nand embedded into the route table. It is up to the HTTP filter\nimplementation to treat the route-specific filter config as a replacement\nto global config or an enhancement. For example, the HTTP fault filter uses\nthis technique to provide per-route fault configuration.\n\n`typed_per_filter_config` is a `map<string, google.protobuf.Any>`. The Connection\nmanager iterates over this map and invokes the filter factory interface\n`createRouteSpecificFilterConfigTyped` to parse/validate the struct value and\nconvert it into a typed class object that’s stored with the route\nitself. HTTP filters can then query the route-specific filter config during\nrequest processing.\n\nDynamic State\n^^^^^^^^^^^^^\n\nDynamic state is generated per network connection or per HTTP\nstream. Dynamic state can be mutable if desired by the filter generating\nthe state.\n\n`Envoy::Network::Connection` and `Envoy::Http::Filter` provide a\n`StreamInfo` object that contains information about the current TCP\nconnection and HTTP stream (i.e., HTTP request/response pair)\nrespectively. `StreamInfo` contains a set of fixed attributes as part of\nthe class definition (e.g., HTTP protocol, requested server name, etc.). In\naddition, it provides a facility to store typed objects in a map\n(`map<string, FilterState::Object>`). The state stored per filter can be\neither write-once (immutable), or write-many (mutable).\n"
  },
  {
    "path": "docs/root/intro/arch_overview/arch_overview.rst",
    "content": "Architecture overview\n=====================\n\n.. toctree::\n  :maxdepth: 2\n\n  intro/intro\n  listeners/listeners_toc\n  http/http\n  upstream/upstream\n  observability/observability\n  security/security\n  operations/operations\n  other_features/other_features\n  other_protocols/other_protocols\n  advanced/advanced\n"
  },
  {
    "path": "docs/root/intro/arch_overview/http/http.rst",
    "content": "HTTP\n====\n\n.. toctree::\n  :maxdepth: 2\n\n  http_connection_management\n  http_filters\n  http_routing\n  upgrades\n  http_proxy\n"
  },
  {
    "path": "docs/root/intro/arch_overview/http/http_connection_management.rst",
    "content": ".. _arch_overview_http_conn_man:\n\nHTTP connection management\n==========================\n\nHTTP is such a critical component of modern service oriented architectures that Envoy implements a\nlarge amount of HTTP specific functionality. Envoy has a built in network level filter called the\n:ref:`HTTP connection manager <config_http_conn_man>`. This filter translates raw bytes into HTTP\nlevel messages and events (e.g., headers received, body data received, trailers received, etc.). It\nalso handles functionality common to all HTTP connections and requests such as :ref:`access logging\n<arch_overview_access_logs>`, :ref:`request ID generation and tracing <arch_overview_tracing>`,\n:ref:`request/response header manipulation <config_http_conn_man_headers>`, :ref:`route table\n<arch_overview_http_routing>` management, and :ref:`statistics <config_http_conn_man_stats>`.\n\nHTTP connection manager :ref:`configuration <config_http_conn_man>`.\n\n.. _arch_overview_http_protocols:\n\nHTTP protocols\n--------------\n\nEnvoy’s HTTP connection manager has native support for HTTP/1.1, WebSockets, and HTTP/2. It does not support\nSPDY. Envoy’s HTTP support was designed to first and foremost be an HTTP/2 multiplexing proxy.\nInternally, HTTP/2 terminology is used to describe system components. For example, an HTTP request\nand response take place on a *stream*. A codec API is used to translate from different wire\nprotocols into a protocol agnostic form for streams, requests, responses, etc. In the case of\nHTTP/1.1, the codec translates the serial/pipelining capabilities of the protocol into something\nthat looks like HTTP/2 to higher layers. This means that the majority of the code does not need to\nunderstand whether a stream originated on an HTTP/1.1 or HTTP/2 connection.\n\nHTTP header sanitizing\n----------------------\n\nThe HTTP connection manager performs various :ref:`header sanitizing\n<config_http_conn_man_header_sanitizing>` actions for security reasons.\n\nRoute table configuration\n-------------------------\n\nEach :ref:`HTTP connection manager filter <config_http_conn_man>` has an associated :ref:`route\ntable <arch_overview_http_routing>`. The route table can be specified in one of two ways:\n\n* Statically.\n* Dynamically via the :ref:`RDS API <config_http_conn_man_rds>`.\n\n.. _arch_overview_http_retry_plugins:\n\nRetry plugin configuration\n--------------------------\n\nNormally during retries, host selection follows the same process as the original request. Retry plugins\ncan be used to modify this behavior, and they fall into two categories:\n\n* :ref:`Host Predicates <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_host_predicate>`:\n  These predicates can be used to \"reject\" a host, which will cause host selection to be reattempted.\n  Any number of these predicates can be specified, and the host will be rejected if any of the predicates reject the host.\n\n  Envoy supports the following built-in host predicates\n\n  * *envoy.retry_host_predicates.previous_hosts*: This will keep track of previously attempted hosts, and rejects\n    hosts that have already been attempted.\n\n  * *envoy.retry_host_predicates.omit_canary_hosts*: This will reject any host that is a marked as canary host.\n    Hosts are marked by setting ``canary: true`` for the ``envoy.lb`` filter in the endpoint's filter metadata.\n    See :ref:`LbEndpoint <envoy_v3_api_msg_config.endpoint.v3.LbEndpoint>` for more details.\n\n  * *envoy.retry_host_predicates.omit_host_metadata*: This will reject any host based on predefined metadata match criteria. \n    See the configuration example below for more details.\n\n* :ref:`Priority Predicates<envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>`: These predicates can\n  be used to adjust the priority load used when selecting a priority for a retry attempt. Only one such\n  predicate may be specified.\n\n  Envoy supports the following built-in priority predicates\n\n  * *envoy.retry_priorities.previous_priorities*: This will keep track of previously attempted priorities,\n    and adjust the priority load such that other priorities will be targeted in subsequent retry attempts.\n\nHost selection will continue until either the configured predicates accept the host or a configurable\n:ref:`max attempts <envoy_v3_api_field_config.route.v3.RetryPolicy.host_selection_retry_max_attempts>` has been reached.\n\nThese plugins can be combined to affect both host selection and priority load. Envoy can also be extended\nwith custom retry plugins similar to how custom filters can be added.\n\n\n**Configuration Example**\n\nFor example, to configure retries to prefer hosts that haven't been attempted already, the built-in\n``envoy.retry_host_predicates.previous_hosts`` predicate can be used:\n\n.. code-block:: yaml\n\n  retry_policy:\n    retry_host_predicate:\n    - name: envoy.retry_host_predicates.previous_hosts\n    host_selection_retry_max_attempts: 3\n\nThis will reject hosts previously attempted, retrying host selection a maximum of 3 times. The bound\non attempts is necessary in order to deal with scenarios in which finding an acceptable host is either\nimpossible (no hosts satisfy the predicate) or very unlikely (the only suitable host has a very low\nrelative weight).\n\nTo reject a host based on its metadata, ``envoy.retry_host_predicates.omit_host_metadata`` can be used:\n\n.. code-block:: yaml\n\n  retry_policy:\n    retry_host_predicate:\n    - name: envoy.retry_host_predicates.omit_host_metadata\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.retry.host.omit_host_metadata.v3.OmitHostMetadataConfig\n        metadata_match:\n          filter_metadata:\n            envoy.lb:\n              key: value\n\nThis will reject any host with matching (key, value) in its metadata.\n\nTo configure retries to attempt other priorities during retries, the built-in\n``envoy.retry_priorities.previous_priorities`` can be used.\n\n.. code-block:: yaml\n\n  retry_policy:\n    retry_priority:\n      name: envoy.retry_priorities.previous_priorities\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.retry.priority.previous_priorities.v3.PreviousPrioritiesConfig\n        update_frequency: 2\n\nThis will target priorities in subsequent retry attempts that haven't been already used. The ``update_frequency`` parameter decides how\noften the priority load should be recalculated.\n\nThese plugins can be combined, which will exclude both previously attempted hosts as well as\npreviously attempted priorities.\n\n.. code-block:: yaml\n\n  retry_policy:\n    retry_host_predicate:\n    - name: envoy.retry_host_predicates.previous_hosts\n    host_selection_retry_max_attempts: 3\n    retry_priority:\n      name: envoy.retry_priorities.previous_priorities\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.retry.priority.previous_priorities.v3.PreviousPrioritiesConfig\n        update_frequency: 2\n\n.. _arch_overview_internal_redirects:\n\nInternal redirects\n--------------------------\n\nEnvoy supports handling 3xx redirects internally, that is capturing a configurable 3xx redirect\nresponse, synthesizing a new request, sending it to the upstream specified by the new route match,\nand returning the redirected response as the response to the original request.\n\nInternal redirects are configured via the :ref:`internal redirect policy\n<envoy_v3_api_field_config.route.v3.RouteAction.internal_redirect_policy>` field in route configuration.\nWhen redirect handling is on, any 3xx response from upstream, that matches\n:ref:`redirect_response_codes\n<envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.redirect_response_codes>`\nis subject to the redirect being handled by Envoy.\n\nFor a redirect to be handled successfully it must pass the following checks:\n\n1. Have a response code matching one of :ref:`redirect_response_codes\n   <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.redirect_response_codes>`, which is\n   either 302 (by default), or a set of 3xx codes (301, 302, 303, 307, 308).\n2. Have a *location* header with a valid, fully qualified URL.\n3. The request must have been fully processed by Envoy.\n4. The request must not have a body.\n5. :ref:`allow_cross_scheme_redirect\n   <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.allow_cross_scheme_redirect>` is true (default to false),\n   or the scheme of the downstream request and the *location* header are the same.\n6. The number of previously handled internal redirect within a given downstream request does not\n   exceed :ref:`max internal redirects\n   <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.max_internal_redirects>`\n   of the route that the request or redirected request is hitting.\n7. All :ref:`predicates <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.predicates>` accept\n   the target route.\n\nAny failure will result in redirect being passed downstream instead.\n\nSince a redirected request may be bounced between different routes, any route in the chain of redirects that\n\n1. does not have internal redirect enabled\n2. or has a :ref:`max internal redirects\n   <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.max_internal_redirects>`\n   smaller or equal to the redirect chain length when the redirect chain hits it\n3. or is disallowed by any of the :ref:`predicates\n   <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.predicates>`\n\nwill cause the redirect to be passed downstream.\n\nTwo predicates can be used to create a DAG that defines the redirect chain, the :ref:`previous routes\n<envoy_v3_api_msg_extensions.internal_redirect.previous_routes.v3.PreviousRoutesConfig>` predicate, and\nthe :ref:`allow_listed_routes\n<envoy_v3_api_msg_extensions.internal_redirect.allow_listed_routes.v3.AllowListedRoutesConfig>`.\nSpecifically, the *allow listed routes* predicate defines edges of individual node in the DAG\nand the *previous routes* predicate defines \"visited\" state of the edges, so that loop can be avoided\nif so desired.\n\nA third predicate :ref:`safe_cross_scheme                                      \n<envoy_v3_api_msg_extensions.internal_redirect.safe_cross_scheme.v3.SafeCrossSchemeConfig>`\ncan be used to prevent HTTP -> HTTPS redirect.\n\nOnce the redirect has passed these checks, the request headers which were shipped to the original\nupstream will be modified by:\n\n1. Putting the fully qualified original request URL in the x-envoy-original-url header.\n2. Replacing the Authority/Host, Scheme, and Path headers with the values from the Location header.\n\nThe altered request headers will then have a new route selected, be sent through a new filter chain,\nand then shipped upstream with all of the normal Envoy request sanitization taking place.\n\n.. warning::\n  Note that HTTP connection manager sanitization such as clearing untrusted headers will only be\n  applied once. Per-route header modifications will be applied on both the original route and the\n  second route, even if they are the same, so be careful configuring header modification rules to\n  avoid duplicating undesired header values.\n\nA sample redirect flow might look like this:\n\n1. Client sends a GET request for *\\http://foo.com/bar*\n2. Upstream 1 sends a 302 with  *\"location: \\http://baz.com/eep\"*\n3. Envoy is configured to allow redirects on the original route, and sends a new GET request to\n   Upstream 2, to fetch *\\http://baz.com/eep* with the additional request header\n   *\"x-envoy-original-url: \\http://foo.com/bar\"*\n4. Envoy proxies the response data for *\\http://baz.com/eep* to the client as the response to the original\n   request.\n\n\nTimeouts\n--------\n\nVarious configurable timeouts apply to an HTTP connection and its constituent streams. Please see\n:ref:`this FAQ entry <faq_configuration_timeouts>` for an overview of important timeout\nconfiguration.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/http/http_filters.rst",
    "content": ".. _arch_overview_http_filters:\n\nHTTP filters\n============\n\nMuch like the :ref:`network level filter <arch_overview_network_filters>` stack, Envoy supports an\nHTTP level filter stack within the connection manager. Filters can be written that operate on HTTP\nlevel messages without knowledge of the underlying physical protocol (HTTP/1.1, HTTP/2, etc.) or\nmultiplexing capabilities. There are three types of HTTP level filters:\n\n* **Decoder**: Decoder filters are invoked when the connection manager is decoding parts of the\n  request stream (headers, body, and trailers).\n* **Encoder**: Encoder filters are invoked when the connection manager is about to encode parts of\n  the response stream (headers, body, and trailers).\n* **Decoder/Encoder**: Decoder/Encoder filters are invoked both when the connection manager is\n  decoding parts of the request stream and when the connection manager is about to encode parts of\n  the response stream.\n\nThe API for HTTP level filters allows the filters to operate without knowledge of the underlying\nprotocol. Like network level filters, HTTP filters can stop and continue iteration to subsequent\nfilters. This allows for more complex scenarios such as health check handling, calling a rate\nlimiting service, buffering, routing, generating statistics for application traffic such as\nDynamoDB, etc. HTTP level filters can also share state (static and dynamic) among\nthemselves within the context of a single request stream. Refer to :ref:`data sharing\nbetween filters <arch_overview_data_sharing_between_filters>` for more details. Envoy already\nincludes several HTTP level filters that are documented in this architecture overview as well as\nthe :ref:`configuration reference <config_http_filters>`.\n\n.. _arch_overview_http_filters_ordering:\n\nFilter ordering\n---------------\n\nFilter ordering in the :ref:`http_filters field <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_filters>`\nmatters. If filters are configured in the following order (and assuming all three filters are\ndecoder/encoder filters):\n\n.. code-block:: yaml\n\n  http_filters:\n    - A\n    - B\n    # The last configured filter has to be a terminal filter, as determined by the\n    # NamedHttpFilterConfigFactory::isTerminalFilter() function. This is most likely the router\n    # filter.\n    - C\n\nThe connection manager will invoke decoder filters in the order: ``A``, ``B``, ``C``.\nOn the other hand, the connection manager will invoke encoder filters in the **reverse**\norder: ``C``, ``B``, ``A``.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/http/http_proxy.rst",
    "content": ".. _arch_overview_http_dynamic_forward_proxy:\n\nHTTP dynamic forward proxy\n==========================\n\nThrough the combination of both an :ref:`HTTP filter <config_http_filters_dynamic_forward_proxy>` and\n:ref:`custom cluster <envoy_v3_api_msg_extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig>`,\nEnvoy supports HTTP dynamic forward proxy. This means that Envoy can perform the role of an HTTP\nproxy without prior knowledge of all configured DNS addresses, while still retaining the vast\nmajority of Envoy's benefits including asynchronous DNS resolution. The implementation works as\nfollows:\n\n* The dynamic forward proxy HTTP filter is used to pause requests if the target DNS host is not\n  already in cache.\n* Envoy will begin asynchronously resolving the DNS address, unblocking any requests waiting on\n  the response when the resolution completes.\n* Any future requests will not be blocked as the DNS address is already in cache. The resolution\n  process works similarly to the :ref:`logical DNS\n  <arch_overview_service_discovery_types_logical_dns>` service discovery type with a single target\n  address being remembered at any given time.\n* All known hosts are stored in the dynamic forward proxy cluster such that they can be displayed\n  in :ref:`admin output <operations_admin_interface>`.\n* A special load balancer will select the right host to use based on the HTTP host/authority header\n  during forwarding.\n* Hosts that have not been used for a period of time are subject to a TTL that will purge them.\n* When the upstream cluster has been configured with a TLS context, Envoy will automatically perform\n  SAN verification for the resolved host name as well as specify the host name via SNI.\n\nThe above implementation details mean that at steady state Envoy can forward a large volume of\nHTTP proxy traffic while all DNS resolution happens asynchronously in the background. Additionally,\nall other Envoy filters and extensions can be used in conjunction with dynamic forward proxy support\nincluding authentication, RBAC, rate limiting, etc.\n\nFor further configuration information see the :ref:`HTTP filter configuration documentation\n<config_http_filters_dynamic_forward_proxy>`.\n\nMemory usage details\n--------------------\n\nMemory usage detail's for Envoy's dynamic forward proxy support are as follows:\n\n* Each resolved host/port pair uses a fixed amount of memory global to the server and shared\n  amongst all workers.\n* Address changes are performed inline using read/write locks and require no host reallocations.\n* Hosts removed via TTL are purged once all active connections stop referring to them and all used\n  memory is regained.\n* The :ref:`max_hosts\n  <envoy_v3_api_field_extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig.max_hosts>` field can\n  be used to limit the number of hosts that the DNS cache will store at any given time.\n* The cluster's :ref:`max_pending_requests\n  <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.max_pending_requests>` circuit breaker can\n  be used to limit the number of requests that are pending waiting for the DNS cache to load\n  a host.\n* Long lived upstream connections can have the underlying logical host expire via TTL while the\n  connection is still open. Upstream requests and connections are still bound by other cluster\n  circuit breakers such as :ref:`max_requests\n  <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.max_requests>`. The current assumption is that\n  host data shared between connections uses a marginal amount of memory compared to the connections\n  and requests themselves, making it not worth controlling independently.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/http/http_routing.rst",
    "content": ".. _arch_overview_http_routing:\n\nHTTP routing\n============\n\nEnvoy includes an HTTP :ref:`router filter <config_http_filters_router>` which can be installed to\nperform advanced routing tasks. This is useful both for handling edge traffic (traditional reverse\nproxy request handling) as well as for building a service to service Envoy mesh (typically via\nrouting on the host/authority HTTP header to reach a particular upstream service cluster). Envoy\nalso has the ability to be configured as forward proxy. In the forward proxy configuration, mesh\nclients can participate by appropriately configuring their http proxy to be an Envoy. At a high\nlevel the router takes an incoming HTTP request, matches it to an upstream cluster, acquires a\n:ref:`connection pool <arch_overview_conn_pool>` to a host in the upstream cluster, and forwards the\nrequest. The router filter supports the following features:\n\n* Virtual hosts that map domains/authorities to a set of routing rules.\n* Prefix and exact path matching rules (both :ref:`case sensitive\n  <envoy_v3_api_field_config.route.v3.RouteMatch.case_sensitive>` and case insensitive). Regex/slug\n  matching is not currently supported, mainly because it makes it difficult/impossible to\n  programmatically determine whether routing rules conflict with each other. For this reason we\n  don’t recommend regex/slug routing at the reverse proxy level, however we may add support in the\n  future depending on demand.\n* :ref:`TLS redirection <envoy_v3_api_field_config.route.v3.VirtualHost.require_tls>` at the virtual host\n  level.\n* :ref:`Path <envoy_v3_api_field_config.route.v3.RedirectAction.path_redirect>`/:ref:`host\n  <envoy_v3_api_field_config.route.v3.RedirectAction.host_redirect>` redirection at the route level.\n* :ref:`Direct (non-proxied) HTTP responses <arch_overview_http_routing_direct_response>`\n  at the route level.\n* :ref:`Explicit host rewriting <envoy_v3_api_field_extensions.filters.http.aws_request_signing.v3.AwsRequestSigning.host_rewrite>`.\n* :ref:`Automatic host rewriting <envoy_v3_api_field_config.route.v3.RouteAction.auto_host_rewrite>` based on\n  the DNS name of the selected upstream host.\n* :ref:`Prefix rewriting <envoy_v3_api_field_config.route.v3.RedirectAction.prefix_rewrite>`.\n* :ref:`Path rewriting using a regular expression and capture groups <envoy_v3_api_field_config.route.v3.RouteAction.regex_rewrite>`.\n* :ref:`Request retries <arch_overview_http_routing_retry>` specified either via HTTP header or via\n  route configuration.\n* Request timeout specified either via :ref:`HTTP\n  header <config_http_filters_router_headers_consumed>` or via :ref:`route configuration\n  <envoy_v3_api_field_config.route.v3.RouteAction.timeout>`.\n* :ref:`Request hedging <arch_overview_http_routing_hedging>` for retries in response to a request (per try) timeout.\n* Traffic shifting from one upstream cluster to another via :ref:`runtime values\n  <envoy_v3_api_field_config.route.v3.RouteMatch.runtime_fraction>` (see :ref:`traffic shifting/splitting\n  <config_http_conn_man_route_table_traffic_splitting>`).\n* Traffic splitting across multiple upstream clusters using :ref:`weight/percentage-based routing\n  <envoy_v3_api_field_config.route.v3.RouteAction.weighted_clusters>` (see :ref:`traffic shifting/splitting\n  <config_http_conn_man_route_table_traffic_splitting_split>`).\n* Arbitrary header matching :ref:`routing rules <envoy_v3_api_msg_config.route.v3.HeaderMatcher>`.\n* Virtual cluster specifications. A virtual cluster is specified at the virtual host level and is\n  used by Envoy to generate additional statistics on top of the standard cluster level ones. Virtual\n  clusters can use regex matching.\n* :ref:`Priority <arch_overview_http_routing_priority>` based routing.\n* :ref:`Hash policy <envoy_v3_api_field_config.route.v3.RouteAction.hash_policy>` based routing.\n* :ref:`Absolute urls <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_protocol_options>` are supported for non-tls forward proxies.\n\n.. _arch_overview_http_routing_route_scope:\n\nRoute Scope\n-----------\n\nScoped routing enables Envoy to put constraints on search space of domains and route rules.\nA :ref:`Route Scope<envoy_api_msg_ScopedRouteConfiguration>` associates a key with a :ref:`route table <arch_overview_http_routing_route_table>`.\nFor each request, a scope key is computed dynamically by the HTTP connection manager to pick the :ref:`route table<envoy_api_msg_RouteConfiguration>`.\nRouteConfiguration associated with scope can be loaded on demand with :ref:`v3 API reference <envoy_v3_api_msg_extensions.filters.http.on_demand.v3.OnDemand>` configured and on demand filed in protobuf set to true.\n\nThe Scoped RDS (SRDS) API contains a set of :ref:`Scopes <envoy_v3_api_msg_config.route.v3.ScopedRouteConfiguration>` resources, each defining independent routing configuration,\nalong with a :ref:`ScopeKeyBuilder <envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder>`\ndefining the key construction algorithm used by Envoy to look up the scope corresponding to each request. \n\nFor example, for the following scoped route configuration, Envoy will look into the \"addr\" header value, split the header value by \";\" first, and use the first value for key 'x-foo-key' as the scope key.\nIf the \"addr\" header value is \"foo=1;x-foo-key=127.0.0.1;x-bar-key=1.1.1.1\", then \"127.0.0.1\" will be computed as the scope key to look up for corresponding route configuration.\n\n.. code-block:: yaml\n\n  name: scope_by_addr\n  fragments:\n    - header_value_extractor:\n        name: Addr\n        element_separator: ;\n        element:\n          key: x-foo-key\n          separator: =\n\n.. _arch_overview_http_routing_route_table:\n\nFor a key to match a :ref:`ScopedRouteConfiguration<envoy_v3_api_msg_config.route.v3.ScopedRouteConfiguration>`, the number of fragments in the computed key has to match that of\nthe :ref:`ScopedRouteConfiguration<envoy_v3_api_msg_config.route.v3.ScopedRouteConfiguration>`.\nThen fragments are matched in order. A missing fragment(treated as NULL) in the built key makes the request unable to match any scope,\ni.e. no route entry can be found for the request.\n\nRoute table\n-----------\n\nThe :ref:`configuration <config_http_conn_man>` for the HTTP connection manager owns the :ref:`route\ntable <envoy_v3_api_msg_config.route.v3.RouteConfiguration>` that is used by all configured HTTP filters. Although the\nrouter filter is the primary consumer of the route table, other filters also have access in case\nthey want to make decisions based on the ultimate destination of the request. For example, the built\nin rate limit filter consults the route table to determine whether the global rate limit service\nshould be called based on the route. The connection manager makes sure that all calls to acquire a\nroute are stable for a particular request, even if the decision involves randomness (e.g. in the\ncase of a runtime configuration route rule).\n\n.. _arch_overview_http_routing_retry:\n\nRetry semantics\n---------------\n\nEnvoy allows retries to be configured both in the :ref:`route configuration\n<envoy_v3_api_field_config.route.v3.RouteAction.retry_policy>` as well as for specific requests via :ref:`request\nheaders <config_http_filters_router_headers_consumed>`. The following configurations are possible:\n\n* **Maximum number of retries**: Envoy will continue to retry any number of times. The intervals between\n  retries are decided either by an exponential backoff algorithm (the default), or based on feedback\n  from the upstream server via headers (if present). Additionally, *all retries are contained within the\n  overall request timeout*. This avoids long request times due to a large number of retries.\n* **Retry conditions**: Envoy can retry on different types of conditions depending on application\n  requirements. For example, network failure, all 5xx response codes, idempotent 4xx response codes,\n  etc.\n* **Retry budgets**: Envoy can limit the proportion of active requests via :ref:`retry budgets <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.retry_budget>` that can be retries to\n  prevent their contribution to large increases in traffic volume.\n* **Host selection retry plugins**: Envoy can be configured to apply additional logic to the host\n  selection logic when selecting hosts for retries. Specifying a\n  :ref:`retry host predicate <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_host_predicate>`\n  allows for reattempting host selection when certain hosts are selected (e.g. when an already\n  attempted host is selected), while a\n  :ref:`retry priority <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>` can be\n  configured to adjust the priority load used when selecting a priority for retries.\n\nNote that Envoy retries requests when :ref:`x-envoy-overloaded\n<config_http_filters_router_x-envoy-overloaded_set>` is present. It is recommended to either configure\n:ref:`retry budgets (preferred) <envoy_api_field_cluster.CircuitBreakers.Thresholds.retry_budget>` or set\n:ref:`maximum active retries circuit breaker <arch_overview_circuit_break>` to an appropriate value to avoid retry storms.\n\n.. _arch_overview_http_routing_hedging:\n\nRequest Hedging\n---------------\n\nEnvoy supports request hedging which can be enabled by specifying a :ref:`hedge\npolicy <envoy_v3_api_msg_config.route.v3.HedgePolicy>`. This means that Envoy will race\nmultiple simultaneous upstream requests and return the response associated with\nthe first acceptable response headers to the downstream. The retry policy is\nused to determine whether a response should be returned or whether more\nresponses should be awaited.\n\nCurrently hedging can only be performed in response to a request timeout. This\nmeans that a retry request will be issued without canceling the initial\ntimed-out request and a late response will be awaited. The first \"good\"\nresponse according to retry policy will be returned downstream.\n\nThe implementation ensures that the same upstream request is not retried twice.\nThis might otherwise occur if a request times out and then results in a 5xx\nresponse, creating two retriable events.\n\n.. _arch_overview_http_routing_priority:\n\nPriority routing\n----------------\n\nEnvoy supports priority routing at the :ref:`route <envoy_v3_api_msg_config.route.v3.Route>` level.\nThe current priority implementation uses different :ref:`connection pool <arch_overview_conn_pool>`\nand :ref:`circuit breaking <config_cluster_manager_cluster_circuit_breakers>` settings for each\npriority level. This means that even for HTTP/2 requests, two physical connections will be used to\nan upstream host. In the future Envoy will likely support true HTTP/2 priority over a single\nconnection.\n\nThe currently supported priorities are *default* and *high*.\n\n.. _arch_overview_http_routing_direct_response:\n\nDirect responses\n----------------\n\nEnvoy supports the sending of \"direct\" responses. These are preconfigured HTTP responses\nthat do not require proxying to an upstream server.\n\nThere are two ways to specify a direct response in a Route:\n\n* Set the :ref:`direct_response <envoy_v3_api_field_config.route.v3.Route.direct_response>` field.\n  This works for all HTTP response statuses.\n* Set the :ref:`redirect <envoy_v3_api_field_config.route.v3.Route.redirect>` field. This works for\n  redirect response statuses only, but it simplifies the setting of the *Location* header.\n\nA direct response has an HTTP status code and an optional body. The Route configuration\ncan specify the response body inline or specify the pathname of a file containing the\nbody. If the Route configuration specifies a file pathname, Envoy will read the file\nupon configuration load and cache the contents.\n\n.. attention::\n\n   If a response body is specified, it must be no more than 4KB in size, regardless of\n   whether it is provided inline or in a file. Envoy currently holds the entirety of the\n   body in memory, so the 4KB limit is intended to keep the proxy's memory footprint\n   from growing too large.\n\nIf **response_headers_to_add** has been set for the Route or the enclosing Virtual Host,\nEnvoy will include the specified headers in the direct HTTP response.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/http/upgrades.rst",
    "content": ".. _arch_overview_upgrades:\n\nHTTP upgrades\n===========================\n\nEnvoy Upgrade support is intended mainly for WebSocket and CONNECT support, but may be used for\narbitrary upgrades as well. Upgrades pass both the HTTP headers and the upgrade payload\nthrough an HTTP filter chain. One may configure the\n:ref:`upgrade_configs <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.upgrade_configs>`\nwith or without custom filter chains. If only the\n:ref:`upgrade_type <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.upgrade_type>`\nis specified, both the upgrade headers, any request and response body, and HTTP data payload will\npass through the default HTTP filter chain. To avoid the use of HTTP-only filters for upgrade payload,\none can set up custom\n:ref:`filters <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.filters>`\nfor the given upgrade type, up to and including only using the router filter to send the HTTP\ndata upstream.\n\nUpgrades can be enabled or disabled on a :ref:`per-route <envoy_v3_api_field_config.route.v3.RouteAction.upgrade_configs>` basis.\nAny per-route enabling/disabling automatically overrides HttpConnectionManager configuration as\nlaid out below, but custom filter chains can only be configured on a per-HttpConnectionManager basis.\n\n+-----------------------+-------------------------+-------------------+\n| *HCM Upgrade Enabled* | *Route Upgrade Enabled* | *Upgrade Enabled* |\n+=======================+=========================+===================+\n| T (Default)           | T (Default)             | T                 |\n+-----------------------+-------------------------+-------------------+\n| T (Default)           | F                       | F                 |\n+-----------------------+-------------------------+-------------------+\n| F                     | T (Default)             | T                 |\n+-----------------------+-------------------------+-------------------+\n| F                     | F                       | F                 |\n+-----------------------+-------------------------+-------------------+\n\nNote that the statistics for upgrades are all bundled together so WebSocket and other upgrades\n:ref:`statistics <config_http_conn_man_stats>` are tracked by stats such as\ndownstream_cx_upgrades_total and downstream_cx_upgrades_active\n\nWebsocket over HTTP/2 hops\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWhile HTTP/2 support for WebSockets is off by default, Envoy does support tunneling WebSockets over\nHTTP/2 streams for deployments that prefer a uniform HTTP/2 mesh throughout; this enables, for example,\na deployment of the form:\n\n[Client] ---- HTTP/1.1 ---- [Front Envoy] ---- HTTP/2 ---- [Sidecar Envoy ---- H1  ---- App]\n\nIn this case, if a client is for example using WebSocket, we want the Websocket to arrive at the\nupstream server functionally intact, which means it needs to traverse the HTTP/2 hop.\n\nThis is accomplished via `Extended CONNECT (RFC8441) <https://tools.ietf.org/html/rfc8441>`_ support,\nturned on by setting :ref:`allow_connect <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.allow_connect>`\ntrue at the second layer Envoy. The\nWebSocket request will be transformed into an HTTP/2 CONNECT stream, with :protocol header\nindicating the original upgrade, traverse the HTTP/2 hop, and be downgraded back into an HTTP/1\nWebSocket Upgrade. This same Upgrade-CONNECT-Upgrade transformation will be performed on any\nHTTP/2 hop, with the documented flaw that the HTTP/1.1 method is always assumed to be GET.\nNon-WebSocket upgrades are allowed to use any valid HTTP method (i.e. POST) and the current\nupgrade/downgrade mechanism will drop the original method and transform the Upgrade request to\na GET method on the final Envoy-Upstream hop.\n\nNote that the HTTP/2 upgrade path has very strict HTTP/1.1 compliance, so will not proxy WebSocket\nupgrade requests or responses with bodies.\n\nCONNECT support\n^^^^^^^^^^^^^^^\n\nEnvoy CONNECT support is off by default (Envoy will send an internally generated 403 in response to\nCONNECT requests). CONNECT support can be enabled via the upgrade options described above, setting\nthe upgrade value to the special keyword \"CONNECT\".\n\nWhile for HTTP/2, CONNECT request may have a path, in general and for HTTP/1.1 CONNECT requests do\nnot have a path, and can only be matched using a\n:ref:`connect_matcher <envoy_v3_api_msg_config.route.v3.RouteMatch.ConnectMatcher>`\n\nEnvoy can handle CONNECT in one of two ways, either proxying the CONNECT headers through as if they\nwere any other request, and letting the upstream terminate the CONNECT request, or by terminating the\nCONNECT request, and forwarding the payload as raw TCP data. When CONNECT upgrade configuration is\nset up, the default behavior is to proxy the CONNECT request, treating it like any other request using\nthe upgrade path.\nIf termination is desired, this can be accomplished by setting\n:ref:`connect_config <envoy_v3_api_field_config.route.v3.RouteAction.UpgradeConfig.connect_config>`\nIf it that message is present for CONNECT requests, the router filter will strip the request headers,\nand forward the HTTP payload upstream. On receipt of initial TCP data from upstream, the router\nwill synthesize 200 response headers, and then forward the TCP data as the HTTP response body.\n\n.. warning::\n  This mode of CONNECT support can create major security holes if not configured correctly, as the upstream\n  will be forwarded *unsanitized* headers if they are in the body payload. Please use with caution\n\nTunneling TCP over HTTP/2\n^^^^^^^^^^^^^^^^^^^^^^^^^\nEnvoy also has support for transforming raw TCP into HTTP/2 CONNECT requests. This can be used to\nproxy multiplexed TCP over pre-warmed secure connections and amortize the cost of any TLS handshake.\nAn example set up proxying SMTP would look something like this\n\n[SMTP Upstream] --- raw SMTP --- [L2 Envoy]  --- SMTP tunneled over HTTP/2  --- [L1 Envoy]  --- raw SMTP  --- [Client]\n\nExamples of such a set up can be found in the Envoy example config :repo:`directory <configs/>`\nIf you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.v3.yaml --base-id 1`\nand `bazel-bin/source/exe/envoy-static --config-path  configs/terminate_connect.v3.yaml`\nyou will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2\nCONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the\noriginal TCP upstream, in this case to google.com.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/intro/intro.rst",
    "content": "Introduction\n============\n\n.. toctree::\n  :maxdepth: 2\n\n  terminology\n  threading_model\n"
  },
  {
    "path": "docs/root/intro/arch_overview/intro/terminology.rst",
    "content": "Terminology\n===========\n\nA few definitions before we dive into the main architecture documentation. Some of the definitions\nare slightly contentious within the industry, however they are how Envoy uses them throughout the\ndocumentation and codebase, so *c'est la vie*.\n\n**Host**: An entity capable of network communication (application on a mobile phone, server, etc.).\nIn this documentation a host is a logical network application. A physical piece of hardware could\npossibly have multiple hosts running on it as long as each of them can be independently addressed.\n\n**Downstream**: A downstream host connects to Envoy, sends requests, and receives responses.\n\n**Upstream**: An upstream host receives connections and requests from Envoy and returns responses.\n\n**Listener**: A listener is a named network location (e.g., port, unix domain socket, etc.) that can\nbe connected to by downstream clients. Envoy exposes one or more listeners that downstream hosts\nconnect to.\n\n**Cluster**: A cluster is a group of logically similar upstream hosts that Envoy connects to. Envoy\ndiscovers the members of a cluster via :ref:`service discovery <arch_overview_service_discovery>`.\nIt optionally determines the health of cluster members via :ref:`active health checking\n<arch_overview_health_checking>`. The cluster member that Envoy routes a request to is determined\nby the :ref:`load balancing policy <arch_overview_load_balancing>`.\n\n**Mesh**: A group of hosts that coordinate to provide a consistent network topology. In this\ndocumentation, an “Envoy mesh” is a group of Envoy proxies that form a message passing substrate for\na distributed system comprised of many different services and application platforms.\n\n**Runtime configuration**: Out of band realtime configuration system deployed alongside Envoy.\nConfiguration settings can be altered that will affect operation without needing to restart Envoy or\nchange the primary configuration.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/intro/threading_model.rst",
    "content": ".. _arch_overview_threading:\n\nThreading model\n===============\n\nEnvoy uses a single process with multiple threads architecture. A single *primary* thread controls\nvarious sporadic coordination tasks while some number of *worker* threads perform listening,\nfiltering, and forwarding. Once a connection is accepted by a listener, the connection spends the\nrest of its lifetime bound to a single worker thread. This allows the majority of Envoy to be\nlargely single threaded (embarrassingly parallel) with a small amount of more complex code handling\ncoordination between the worker threads. Generally Envoy is written to be 100% non-blocking and for\nmost workloads we recommend configuring the number of worker threads to be equal to the number of\nhardware threads on the machine.\n\nListener connection balancing\n-----------------------------\n\nBy default, there is no coordination between worker threads. This means that all worker threads\nindependently attempt to accept connections on each listener and rely on the kernel to perform\nadequate balancing between threads. For most workloads, the kernel does a very good job of\nbalancing incoming connections. However, for some workloads, particularly those that have a small\nnumber of very long lived connections (e.g., service mesh HTTP2/gRPC egress), it may be desirable\nto have Envoy forcibly balance connections between worker threads. To support this behavior,\nEnvoy allows for different types of :ref:`connection balancing\n<envoy_v3_api_field_config.listener.v3.Listener.connection_balance_config>` to be configured on each :ref:`listener\n<arch_overview_listeners>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/listeners/dns_filter.rst",
    "content": "DNS Filter\n==========\n\nEnvoy supports responding to DNS requests by configuring a :ref:`UDP listener DNS Filter\n<config_udp_listener_filters_dns_filter>`.\n\nThe DNS filter supports responding to forward queries for A and AAAA records. The answers are\ndiscovered from statically configured resources, clusters, or external DNS servers. The filter\nwill return DNS responses up to to 512 bytes. If domains are configured with multiple addresses,\nor clusters with multiple endpoints, Envoy will return each discovered address up to the\naforementioned size limit.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/listeners/listener_filters.rst",
    "content": ".. _arch_overview_listener_filters:\n\nListener filters\n================\n\nAs discussed in the :ref:`listener <arch_overview_listeners>` section, listener filters may be\nused to manipulate connection metadata. The main purpose of listener filters is to make adding\nfurther system integration functions easier by not requiring changes to Envoy core functionality,\nand also make interaction between multiple such features more explicit.\n\nThe API for listener filters is relatively simple since ultimately these filters operate on newly\naccepted sockets. Filters in the chain can stop and subsequently continue iteration to\nfurther filters. This allows for more complex scenarios such as calling a :ref:`rate limiting\nservice <arch_overview_global_rate_limit>`, etc. Envoy already includes several listener filters that\nare documented in this architecture overview as well as the :ref:`configuration reference\n<config_listener_filters>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/listeners/listeners.rst",
    "content": ".. _arch_overview_listeners:\n\nListeners\n=========\n\nThe Envoy configuration supports any number of listeners within a single process. Generally we\nrecommend running a single Envoy per machine regardless of the number of configured listeners. This\nallows for easier operation and a single source of statistics. Envoy supports both TCP and UDP\nlisteners.\n\nTCP\n---\n\nEach listener is independently configured with some number :ref:`filter chains\n<envoy_v3_api_msg_config.listener.v3.FilterChain>`, where an individual chain is selected based on its\n:ref:`match criteria <envoy_v3_api_msg_config.listener.v3.FilterChainMatch>`. An individual filter chain is\ncomposed of one or more network level (L3/L4) :ref:`filters <arch_overview_network_filters>`. When\na new connection is received on a listener, the appropriate filter chain is selected, and the\nconfigured connection local filter stack is instantiated and begins processing subsequent events.\nThe generic listener architecture is used to perform the vast majority of different proxy tasks that\nEnvoy is used for (e.g., :ref:`rate limiting <arch_overview_global_rate_limit>`, :ref:`TLS client\nauthentication <arch_overview_ssl_auth_filter>`, :ref:`HTTP connection management\n<arch_overview_http_conn_man>`, MongoDB :ref:`sniffing <arch_overview_mongo>`, raw :ref:`TCP proxy\n<arch_overview_tcp_proxy>`, etc.).\n\nListeners are optionally also configured with some number of :ref:`listener filters\n<arch_overview_listener_filters>`. These filters are processed before the network level filters,\nand have the opportunity to manipulate the connection metadata, usually to influence how the\nconnection is processed by later filters or clusters.\n\nListeners can also be fetched dynamically via the :ref:`listener discovery service (LDS)\n<config_listeners_lds>`.\n\nListener :ref:`configuration <config_listeners>`.\n\n.. _arch_overview_listeners_udp:\n\nUDP\n---\n\nEnvoy also supports UDP listeners and specifically :ref:`UDP listener filters\n<config_udp_listener_filters>`. UDP listener filters are instantiated once per worker and are global\nto that worker. Each listener filter processes each UDP datagram that is received by the worker\nlistening on the port. In practice, UDP listeners are configured with the SO_REUSEPORT kernel option\nwhich will cause the kernel to consistently hash each UDP 4-tuple to the same worker. This allows a\nUDP listener filter to be \"session\" oriented if it so desires. A built-in example of this\nfunctionality is the :ref:`UDP proxy <config_udp_listener_filters_udp_proxy>` listener filter.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/listeners/listeners_toc.rst",
    "content": "Listeners\n=========\n\n.. toctree::\n  :maxdepth: 2\n\n  listeners\n  listener_filters\n  network_filters\n  tcp_proxy\n  udp_proxy\n  dns_filter\n"
  },
  {
    "path": "docs/root/intro/arch_overview/listeners/network_filters.rst",
    "content": ".. _arch_overview_network_filters:\n\nNetwork (L3/L4) filters\n=======================\n\nAs discussed in the :ref:`listener <arch_overview_listeners>` section, network level (L3/L4) filters\nform the core of Envoy connection handling. The filter API allows for different sets of filters to\nbe mixed and matched and attached to a given listener. There are three different types of network\nfilters:\n\n* **Read**: Read filters are invoked when Envoy receives data from a downstream connection.\n* **Write**: Write filters are invoked when Envoy is about to send data to a downstream connection.\n* **Read/Write**: Read/Write filters are invoked both when Envoy receives data from a downstream\n  connection and when it is about to send data to a downstream connection.\n\nThe API for network level filters is relatively simple since ultimately the filters operate on raw\nbytes and a small number of connection events (e.g., TLS handshake complete, connection disconnected\nlocally or remotely, etc.). Filters in the chain can stop and subsequently continue iteration to\nfurther filters. This allows for more complex scenarios such as calling a :ref:`rate limiting\nservice <arch_overview_global_rate_limit>`, etc. Network level filters can also share state (static and\ndynamic) among themselves within the context of a single downstream connection. Refer to\n:ref:`data sharing between filters <arch_overview_data_sharing_between_filters>` for more details.\nEnvoy already includes several network level filters that are documented in this architecture\noverview as well as the :ref:`configuration reference <config_network_filters>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/listeners/tcp_proxy.rst",
    "content": ".. _arch_overview_tcp_proxy:\n\nTCP proxy\n=========\n\nSince Envoy is fundamentally written as a L3/L4 server, basic L3/L4 proxy is easily implemented. The\nTCP proxy filter performs basic 1:1 network connection proxy between downstream clients and upstream\nclusters. It can be used by itself as an stunnel replacement, or in conjunction with other filters\nsuch as the :ref:`MongoDB filter <arch_overview_mongo>` or the :ref:`rate limit\n<config_network_filters_rate_limit>` filter.\n\nThe TCP proxy filter will respect the\n:ref:`connection limits <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.max_connections>`\nimposed by each upstream cluster's global resource manager. The TCP proxy filter checks with the\nupstream cluster's resource manager if it can create a connection without going over that cluster's\nmaximum number of connections, if it can't the TCP proxy will not make the connection.\n\nTCP proxy filter :ref:`configuration reference <config_network_filters_tcp_proxy>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/listeners/udp_proxy.rst",
    "content": ".. _arch_overview_udp_proxy:\n\nUDP proxy\n=========\n\nEnvoy supports UDP proxy via the :ref:`UDP proxy listener filter\n<config_udp_listener_filters_udp_proxy>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/observability/access_logging.rst",
    "content": ".. _arch_overview_access_logs:\n\nAccess logging\n==============\n\nThe :ref:`HTTP connection manager <arch_overview_http_conn_man>` and\n:ref:`tcp proxy <arch_overview_tcp_proxy>` support extensible access logging with the following\nfeatures:\n\n* Any number of access logs per a connection stream.\n* Customizable access log filters that allow different types of requests and responses to be written\n  to different access logs.\n\nDownstream connection access logging can be enabled using :ref:`listener access\nlogs<envoy_v3_api_field_config.listener.v3.Listener.access_log>`. The listener access logs complement\nHTTP request access logging and can be enabled separately and independently from\nfilter access logs.\n\n.. _arch_overview_access_log_filters:\n\nAccess log filters\n------------------\n\nEnvoy supports several built-in\n:ref:`access log filters<envoy_v3_api_msg_config.accesslog.v3.AccessLogFilter>` and\n:ref:`extension filters<envoy_v3_api_field_config.accesslog.v3.AccessLogFilter.extension_filter>`\nthat are registered at runtime.\n\n.. _arch_overview_access_logs_sinks:\n\nAccess logging sinks\n--------------------\n\nEnvoy supports pluggable access logging sinks. The currently supported sinks are:\n\nFile\n****\n\n* Asynchronous IO flushing architecture. Access logging will never block the main network processing\n  threads.\n* Customizable access log formats using predefined fields as well as arbitrary HTTP request and\n  response headers.\n\ngRPC\n****\n\n* Envoy can send access log messages to a gRPC access logging service.\n\nFurther reading\n---------------\n\n* Access log :ref:`configuration <config_access_log>`.\n* File :ref:`access log sink <envoy_v3_api_msg_extensions.access_loggers.file.v3.FileAccessLog>`.\n* gRPC :ref:`Access Log Service (ALS) <envoy_v3_api_msg_extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig>`\n  sink.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/observability/observability.rst",
    "content": "Observability\n=============\n\n.. toctree::\n  :maxdepth: 2\n\n  statistics\n  access_logging\n  tracing\n"
  },
  {
    "path": "docs/root/intro/arch_overview/observability/statistics.rst",
    "content": ".. _arch_overview_statistics:\n\nStatistics\n==========\n\nOne of the primary goals of Envoy is to make the network understandable. Envoy emits a large number\nof statistics depending on how it is configured. Generally the statistics fall into three categories:\n\n* **Downstream**: Downstream statistics relate to incoming connections/requests. They are emitted by\n  listeners, the HTTP connection manager, the TCP proxy filter, etc.\n* **Upstream**: Upstream statistics relate to outgoing connections/requests. They are emitted by\n  connection pools, the router filter, the TCP proxy filter, etc.\n* **Server**: Server statistics describe how the Envoy server instance is working. Statistics like\n  server uptime or amount of allocated memory are categorized here.\n\nA single proxy scenario typically involves both downstream and upstream statistics. The two types\ncan be used to get a detailed picture of that particular network hop. Statistics from the entire\nmesh give a very detailed picture of each hop and overall network health. The statistics emitted are\ndocumented in detail in the operations guide.\n\nAs of the v2 API, Envoy has the ability to support custom, pluggable sinks. :ref:`A\nfew standard sink implementations<envoy_v3_api_msg_config.metrics.v3.StatsSink>` are included in Envoy.\nSome sinks also support emitting statistics with tags/dimensions.\n\nWithin Envoy and throughout the documentation, statistics are identified by a canonical string\nrepresentation. The dynamic portions of these strings are stripped to become tags. Users can\nconfigure this behavior via :ref:`the Tag Specifier configuration <envoy_v3_api_msg_config.metrics.v3.TagSpecifier>`.\n\nEnvoy emits three types of values as statistics:\n\n* **Counters**: Unsigned integers that only increase and never decrease. E.g., total requests.\n* **Gauges**: Unsigned integers that both increase and decrease. E.g., currently active requests.\n* **Histograms**: Unsigned integers that are part of a stream of values that are then aggregated by\n  the collector to ultimately yield summarized percentile values. E.g., upstream request time.\n\nInternally, counters and gauges are batched and periodically flushed to improve performance.\nHistograms are written as they are received. Note: what were previously referred to as timers have\nbecome histograms as the only difference between the two representations was the units.\n\n* :ref:`v3 API reference <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.stats_sinks>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/observability/tracing.rst",
    "content": ".. _arch_overview_tracing:\n\nTracing\n=======\n\nOverview\n--------\nDistributed tracing allows developers to obtain visualizations of call flows in large service\noriented architectures. It can be invaluable in understanding serialization, parallelism, and\nsources of latency. Envoy supports three features related to system wide tracing:\n\n* **Request ID generation**: Envoy will generate UUIDs when needed and populate the\n  :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the\n  x-request-id header for unified logging as well as tracing. The behavior can be configured on per :ref:`HTTP connection manager<envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_id_extension>` basis using an extension.\n* **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can\n  be used to join untrusted request IDs to the trusted internal\n  :ref:`config_http_conn_man_headers_x-request-id`.\n* **External trace service integration**: Envoy supports pluggable external trace visualization\n  providers, that are divided into two subgroups:\n\n  - External tracers which are part of the Envoy code base, like `LightStep <https://lightstep.com/>`_,\n    `Zipkin <https://zipkin.io/>`_  or any Zipkin compatible backends (e.g. `Jaeger <https://github.com/jaegertracing/>`_), and\n    `Datadog <https://datadoghq.com>`_.\n  - External tracers which come as a third party plugin, like `Instana <https://www.instana.com/blog/monitoring-envoy-proxy-microservices/>`_.\n\nSupport for other tracing providers would not be difficult to add.\n\nHow to initiate a trace\n-----------------------\nThe HTTP connection manager that handles the request must have the :ref:`tracing\n<envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing>` object set. There are several ways tracing can be\ninitiated:\n\n* By an external client via the :ref:`config_http_conn_man_headers_x-client-trace-id`\n  header.\n* By an internal service via the :ref:`config_http_conn_man_headers_x-envoy-force-trace`\n  header.\n* Randomly sampled via the :ref:`random_sampling <config_http_conn_man_runtime_random_sampling>`\n  runtime setting.\n\nThe router filter is also capable of creating a child span for egress calls via the\n:ref:`start_child_span <envoy_v3_api_field_extensions.filters.http.router.v3.Router.start_child_span>` option.\n\nTrace context propagation\n-------------------------\nEnvoy provides the capability for reporting tracing information regarding communications between\nservices in the mesh. However, to be able to correlate the pieces of tracing information generated\nby the various proxies within a call flow, the services must propagate certain trace context between\nthe inbound and outbound requests.\n\nWhichever tracing provider is being used, the service should propagate the\n:ref:`config_http_conn_man_headers_x-request-id` to enable logging across the invoked services\nto be correlated.\n\nThe tracing providers also require additional context, to enable the parent/child relationships\nbetween the spans (logical units of work) to be understood. This can be achieved by using the\nLightStep (via OpenTracing API) or Zipkin tracer directly within the service itself, to extract the\ntrace context from the inbound request and inject it into any subsequent outbound requests. This\napproach would also enable the service to create additional spans, describing work being done\ninternally within the service, that may be useful when examining the end-to-end trace.\n\nAlternatively the trace context can be manually propagated by the service:\n\n* When using the LightStep tracer, Envoy relies on the service to propagate the\n  :ref:`config_http_conn_man_headers_x-ot-span-context` HTTP header\n  while sending HTTP requests to other services.\n\n* When using the Zipkin tracer, Envoy relies on the service to propagate the\n  B3 HTTP headers (\n  :ref:`config_http_conn_man_headers_x-b3-traceid`,\n  :ref:`config_http_conn_man_headers_x-b3-spanid`,\n  :ref:`config_http_conn_man_headers_x-b3-parentspanid`,\n  :ref:`config_http_conn_man_headers_x-b3-sampled`, and\n  :ref:`config_http_conn_man_headers_x-b3-flags`). The :ref:`config_http_conn_man_headers_x-b3-sampled`\n  header can also be supplied by an external client to either enable or disable tracing for a particular\n  request. In addition, the single :ref:`config_http_conn_man_headers_b3` header propagation format is\n  supported, which is a more compressed format.\n\n* When using the Datadog tracer, Envoy relies on the service to propagate the\n  Datadog-specific HTTP headers (\n  :ref:`config_http_conn_man_headers_x-datadog-trace-id`,\n  :ref:`config_http_conn_man_headers_x-datadog-parent-id`,\n  :ref:`config_http_conn_man_headers_x-datadog-sampling-priority`).\n\nWhat data each trace contains\n-----------------------------\nAn end-to-end trace is comprised of one or more spans. A\nspan represents a logical unit of work that has a start time and duration and can contain metadata\nassociated with it. Each span generated by Envoy contains the following data:\n\n* Originating service cluster set via :option:`--service-cluster`.\n* Start time and duration of the request.\n* Originating host set via :option:`--service-node`.\n* Downstream cluster set via the :ref:`config_http_conn_man_headers_downstream-service-cluster`\n  header.\n* HTTP request URL, method, protocol and user-agent.\n* Additional custom tags set via :ref:`custom_tags\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.custom_tags>`.\n* Upstream cluster name and address.\n* HTTP response status code.\n* GRPC response status and message (if available).\n* An error tag when HTTP status is 5xx or GRPC status is not \"OK\".\n* Tracing system-specific metadata.\n\nThe span also includes a name (or operation) which by default is defined as the host of the invoked\nservice. However this can be customized using a :ref:`envoy_v3_api_msg_config.route.v3.Decorator` on\nthe route. The name can also be overridden using the\n:ref:`config_http_filters_router_x-envoy-decorator-operation` header.\n\nEnvoy automatically sends spans to tracing collectors. Depending on the tracing collector,\nmultiple spans are stitched together using common information such as the globally unique\nrequest ID :ref:`config_http_conn_man_headers_x-request-id` (LightStep) or\nthe trace ID configuration (Zipkin and Datadog). See\n:ref:`v3 API reference <envoy_v3_api_msg_config.trace.v3.Tracing>`\nfor more information on how to setup tracing in Envoy.\n\nBaggage\n-----------------------------\nBaggage provides a mechanism for data to be available throughout the entirety of a trace.\nWhile metadata such as tags are usually communicated to collectors out-of-band, baggage data is injected into the actual\nrequest context and available to applications during the duration of the request. This enables metadata to transparently\ntravel from the beginning of the request throughout your entire mesh without relying on application-specific modifications for\npropagation. See `OpenTracing's documentation <https://opentracing.io/docs/overview/tags-logs-baggage/>`_ for more information about baggage.\n\nTracing providers have varying level of support for getting and setting baggage:\n\n* Lightstep (and any OpenTracing-compliant tracer) can read/write baggage\n* Zipkin support is not yet implemented\n* X-Ray and OpenCensus don't support baggage\n"
  },
  {
    "path": "docs/root/intro/arch_overview/operations/draining.rst",
    "content": ".. _arch_overview_draining:\n\nDraining\n========\n\nIn a few different scenarios, Envoy will attempt to gracefully shed connections. For instance,\nduring server shutdown, existing requests can be discouraged and listeners set to stop accepting,\nto reduce the number of open connections when the server shuts down. Draining behaviour is defined\nby the server options in addition to individual listener configs.\n\nDraining occurs at the following times:\n\n* The server is being :ref:`hot restarted <arch_overview_hot_restart>`.\n* The server begins the graceful drain sequence via the :ref:`drain_listeners?graceful\n  <operations_admin_interface_drain>` admin endpoint.\n* The server has been manually health check failed via the :ref:`healthcheck/fail\n  <operations_admin_interface_healthcheck_fail>` admin endpoint. See the :ref:`health check filter\n  <arch_overview_health_checking_filter>` architecture overview for more information.\n* Individual listeners are being modified or removed via :ref:`LDS\n  <arch_overview_dynamic_config_lds>`.\n\nBy default, the Envoy server will close listeners immediately on server shutdown. To drain listeners\nfor some duration of time prior to server shutdown, use :ref:`drain_listeners <operations_admin_interface_drain>`\nbefore shutting down the server. The listeners will be directly stopped without any graceful draining behaviour,\nand cease accepting new connections immediately.\n\nTo add a graceful drain period prior to listeners being closed, use the query parameter\n:ref:`drain_listeners?graceful <operations_admin_interface_drain>`. By default, Envoy\nwill discourage requests for some period of time (as determined by :option:`--drain-time-s`). \nThe behaviour of request discouraging is determined by the drain manager.\n\nNote that although draining is a per-listener concept, it must be supported at the network filter\nlevel. Currently the only filters that support graceful draining are\n:ref:`Redis <config_network_filters_redis_proxy>`,\n:ref:`Mongo <config_network_filters_mongo_proxy>`,\nand :ref:`HTTP connection manager <config_http_conn_man>`.\n\nBy default, the :ref:`HTTP connection manager <config_http_conn_man>` filter will\nadd \"Connection: close\" to HTTP1 requests, send HTTP2 GOAWAY, and terminate connections\non request completion (after the delayed close period).\n\nEach :ref:`configured listener <arch_overview_listeners>` has a :ref:`drain_type\n<envoy_v3_api_enum_config.listener.v3.Listener.DrainType>` setting which controls when draining takes place. The currently\nsupported values are:\n\ndefault\n  Envoy will drain listeners in response to all three cases above (admin health fail, hot restart, and\n  LDS update/remove). This is the default setting.\n\nmodify_only\n  Envoy will drain listeners only in response to the 2nd and 3rd cases above (hot restart and\n  LDS update/remove). This setting is useful if Envoy is hosting both ingress and egress listeners.\n  It may be desirable to set *modify_only* on egress listeners so they only drain during\n  modifications while relying on ingress listener draining to perform full server draining when\n  attempting to do a controlled shutdown.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/operations/dynamic_configuration.rst",
    "content": ".. _arch_overview_dynamic_config:\n\nxDS configuration API overview\n==============================\n\nEnvoy is architected such that different types of configuration management approaches are possible.\nThe approach taken in a deployment will be dependent on the needs of the implementor. Simple\ndeployments are possible with a fully static configuration. More complicated deployments can\nincrementally add more complex dynamic configuration, the downside being that the implementor must\nprovide one or more external gRPC/REST based configuration provider APIs. These APIs are\ncollectively known as :ref:`\"xDS\" <xds_protocol>` (* discovery service). This document gives an\noverview of the options currently available.\n\n* Top level configuration :ref:`reference <config>`.\n* :ref:`Reference configurations <install_ref_configs>`.\n* Envoy :ref:`v3 API overview <config_overview>`.\n* :ref:`xDS API endpoints <config_overview_management_server>`.\n\nFully static\n------------\n\nIn a fully static configuration, the implementor provides a set of :ref:`listeners\n<config_listeners>` (and :ref:`filter chains <envoy_v3_api_msg_config.listener.v3.Filter>`), :ref:`clusters\n<config_cluster_manager>`, etc. Dynamic host discovery is only possible via DNS based\n:ref:`service discovery <arch_overview_service_discovery>`. Configuration reloads must take place\nvia the built in :ref:`hot restart <arch_overview_hot_restart>` mechanism.\n\nThough simplistic, fairly complicated deployments can be created using static configurations and\ngraceful hot restarts.\n\n.. _arch_overview_dynamic_config_eds:\n\nEDS\n---\n\nThe :ref:`Endpoint Discovery Service (EDS) API <arch_overview_service_discovery_types_eds>` provides\na more advanced mechanism by which Envoy can discover members of an upstream cluster. Layered on top\nof a static configuration, EDS allows an Envoy deployment to circumvent the limitations of DNS\n(maximum records in a response, etc.) as well as consume more information used in load balancing and\nrouting (e.g., canary status, zone, etc.).\n\n.. _arch_overview_dynamic_config_cds:\n\nCDS\n---\n\nThe :ref:`Cluster Discovery Service (CDS) API <config_cluster_manager_cds>` layers on a mechanism by\nwhich Envoy can discover upstream clusters used during routing. Envoy will gracefully add, update,\nand remove clusters as specified by the API. This API allows implementors to build a topology in\nwhich Envoy does not need to be aware of all upstream clusters at initial configuration time.\nTypically, when doing HTTP routing along with CDS (but without route discovery service),\nimplementors will make use of the router's ability to forward requests to a cluster specified in an\n:ref:`HTTP request header <envoy_v3_api_field_config.route.v3.RouteAction.cluster_header>`.\n\nAlthough it is possible to use CDS without EDS by specifying fully static clusters, we recommend\nstill using the EDS API for clusters specified via CDS. Internally, when a cluster definition is\nupdated, the operation is graceful. However, all existing connection pools will be drained and\nreconnected. EDS does not suffer from this limitation. When hosts are added and removed via EDS, the\nexisting hosts in the cluster are unaffected.\n\n.. _arch_overview_dynamic_config_rds:\n\nRDS\n---\n\nThe :ref:`Route Discovery Service (RDS) API <config_http_conn_man_rds>` layers on a mechanism by\nwhich Envoy can discover the entire route configuration for an HTTP connection manager filter at\nruntime. The route configuration will be gracefully swapped in without affecting existing requests.\nThis API, when used alongside EDS and CDS, allows implementors to build a complex routing topology\n(:ref:`traffic shifting <config_http_conn_man_route_table_traffic_splitting>`, blue/green\ndeployment, etc).\n\nVHDS\n----\nThe :ref:`Virtual Host Discovery Service <config_http_conn_man_vhds>` allows the virtual hosts belonging\nto a route configuration to be requested as needed separately from the route configuration itself. This\nAPI is typically used in deployments in which there are a large number of virtual hosts in a route\nconfiguration.\n\nSRDS\n----\n\nThe :ref:`Scoped Route Discovery Service (SRDS) API <arch_overview_http_routing_route_scope>` allows\na route table to be broken up into multiple pieces. This API is typically used in deployments of\nHTTP routing with massive route tables in which simple linear searches are not feasible.\n\n.. _arch_overview_dynamic_config_lds:\n\nLDS\n---\n\nThe :ref:`Listener Discovery Service (LDS) API <config_listeners_lds>` layers on a mechanism by which\nEnvoy can discover entire listeners at runtime. This includes all filter stacks, up to and including\nHTTP filters with embedded references to :ref:`RDS <config_http_conn_man_rds>`. Adding LDS into\nthe mix allows almost every aspect of Envoy to be dynamically configured. Hot restart should\nonly be required for very rare configuration changes (admin, tracing driver, etc.), certificate\nrotation, or binary updates.\n\nSDS\n---\n\nThe :ref:`Secret Discovery Service (SDS) API <config_secret_discovery_service>` layers on a mechanism\nby which Envoy can discover cryptographic secrets (certificate plus private key, TLS session\nticket keys) for its listeners, as well as configuration of peer certificate validation logic\n(trusted root certs, revocations, etc).\n\nRTDS\n----\n\nThe :ref:`RunTime Discovery Service (RTDS) API <config_runtime_rtds>` allows\n:ref:`runtime <config_runtime>` layers to be fetched via an xDS API. This may be favorable to,\nor augmented by, file system layers.\n\nECDS\n----\n\nThe :ref:`Extension Config Discovery Service (ECDS) API <config_overview_extension_discovery>`\nallows extension configurations (e.g. HTTP filter configuration) to be served independently from\nthe listener. This is useful when building systems that are more appropriately split from the\nprimary control plane such as WAF, fault testing, etc.\n\nAggregated xDS (\"ADS\")\n----------------------\n\nEDS, CDS, etc. are each separate services, with different REST/gRPC service names, e.g.\nStreamListeners, StreamSecrets. For users looking to enforce the order in which resources of\ndifferent types reach Envoy, there is aggregated xDS, a single gRPC service that carries all\nresource types in a single gRPC stream. (ADS is only supported by gRPC).\n:ref:`More details about ADS <config_overview_ads>`.\n\n.. _arch_overview_dynamic_config_delta:\n\nDelta gRPC xDS\n--------------\n\nStandard xDS is \"state-of-the-world\": every update must contain every resource, with the absence of\na resource from an update implying that the resource is gone. Envoy supports a \"delta\" variant of\nxDS (including ADS), where updates only contain resources added/changed/removed. Delta xDS is a\nnew protocol, with request/response APIs different from SotW.\n:ref:`More details about delta <config_overview_delta>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/operations/hot_restart.rst",
    "content": ".. _arch_overview_hot_restart:\n\nHot restart\n===========\n\nEase of operation is one of the primary goals of Envoy. In addition to robust statistics and a local\nadministration interface, Envoy has the ability to “hot” or “live” restart itself. This means that\nEnvoy can fully reload itself (both code and configuration) without dropping any connections. The\nhot restart functionality has the following general architecture:\n\n* Statistics and some locks are kept in a shared memory region. This means that gauges will be\n  consistent across both processes as restart is taking place.\n* The two active processes communicate with each other over unix domain sockets using a basic RPC\n  protocol.\n* The new process fully initializes itself (loads the configuration, does an initial service\n  discovery and health checking phase, etc.) before it asks for copies of the listen sockets from\n  the old process. The new process starts listening and then tells the old process to start\n  draining.\n* During the draining phase, the old process attempts to gracefully close existing connections. How\n  this is done depends on the configured filters. The drain time is configurable via the\n  :option:`--drain-time-s` option and as more time passes draining becomes more aggressive.\n* After drain sequence, the new Envoy process tells the old Envoy process to shut itself down.\n  This time is configurable via the :option:`--parent-shutdown-time-s` option.\n* Envoy’s hot restart support was designed so that it will work correctly even if the new Envoy\n  process and the old Envoy process are running inside different containers. Communication between\n  the processes takes place only using unix domain sockets.\n* An example restarter/parent process written in Python is included in the source distribution. This\n  parent process is usable with standard process control utilities such as monit/runit/etc.\n\nEnvoy's default command line options assume that only a single set of Envoy processes is running on\na given host: an active Envoy server process and, potentially, a draining Envoy server process that\nwill exit as described above. The :option:`--base-id` or :option:`--use-dynamic-base-id` options\nmay be used to allow multiple, distinctly configured Envoys to run on the same host and hot restart\nindependently.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/operations/init.rst",
    "content": ".. _arch_overview_initialization:\n\nInitialization\n==============\n\nHow Envoy initializes itself when it starts up is complex. This section explains at a high level\nhow the process works. All of the following happens before any listeners start listening and\naccepting new connections.\n\n* During startup, the :ref:`cluster manager <arch_overview_cluster_manager>` goes through a\n  multi-phase initialization where it first initializes static/DNS clusters, then predefined\n  :ref:`EDS <arch_overview_dynamic_config_eds>` clusters. Then it initializes\n  :ref:`CDS <arch_overview_dynamic_config_cds>` if applicable, waits for one response (or failure) \n  for a :ref:`bounded period of time <envoy_v3_api_field_config.core.v3.ConfigSource.initial_fetch_timeout>`,\n  and does the same primary/secondary initialization of CDS provided clusters.\n* If clusters use :ref:`active health checking <arch_overview_health_checking>`, Envoy also does a\n  single active health check round.\n* Once cluster manager initialization is done, :ref:`RDS <arch_overview_dynamic_config_rds>` and\n  :ref:`LDS <arch_overview_dynamic_config_lds>` initialize (if applicable). The server waits\n  for a :ref:`bounded period of time <envoy_v3_api_field_config.core.v3.ConfigSource.initial_fetch_timeout>` \n  for at least one response (or failure) for LDS/RDS requests. After which, it starts accepting connections.\n* If LDS itself returns a listener that needs an RDS response, Envoy further waits for \n  a :ref:`bounded period of time <envoy_v3_api_field_config.core.v3.ConfigSource.initial_fetch_timeout>` until an RDS\n  response (or failure) is received. Note that this process takes place on every future listener\n  addition via LDS and is known as :ref:`listener warming <config_listeners_lds>`.\n* After all of the previous steps have taken place, the listeners start accepting new connections.\n  This flow ensures that during hot restart the new process is fully capable of accepting and\n  processing new connections before the draining of the old process begins.\n\nA key design principle of initialization is that an Envoy is always guaranteed to initialize within \n:ref:`initial_fetch_timeout <envoy_v3_api_field_config.core.v3.ConfigSource.initial_fetch_timeout>`, \nwith a best effort made to obtain the complete set of xDS configuration within that subject to the \nmanagement server availability.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/operations/operations.rst",
    "content": "Operations & configuration\n==========================\n\n.. toctree::\n  :maxdepth: 2\n\n  dynamic_configuration\n  init\n  draining\n  runtime\n  hot_restart\n  overload_manager\n"
  },
  {
    "path": "docs/root/intro/arch_overview/operations/overload_manager.rst",
    "content": ".. _arch_overview_overload_manager:\n\nOverload manager\n================\n\nThe overload manager is an extensible component for protecting the Envoy server from overload\nwith respect to various system resources (such as memory, cpu or file descriptors) due to too\nmany client connections or requests. This is distinct from\n:ref:`circuit breaking <arch_overview_circuit_break>` which is primarily aimed at protecting\nupstream services.\n\nThe overload manager is :ref:`configured <config_overload_manager>` by specifying a set of\nresources to monitor and a set of overload actions that will be taken when some of those\nresources exceed certain pressure thresholds.\n\nArchitecture\n------------\n\nThe overload manager works by periodically polling the *pressure* of a set of **resources**,\nfeeding those through **triggers**, and taking **actions** based on the triggers. The set of\nresource monitors, triggers, and actions are specified at startup.\n\nResources\n~~~~~~~~~\n\nA resource is a thing that can be monitored by the overload manager, and whose *pressure* is\nrepresented by a real value in the range [0, 1]. The pressure of a resource is evaluated by a\n*resource monitor*. See the :ref:`configuration page <config_overload_manager>` for setting up\nresource monitors.\n\nTriggers\n~~~~~~~~\n\nTriggers are evaluated on each resource pressure update, and convert a resource pressure value\ninto an action state. An action state has a value in the range [0, 1], and is categorized into one of two groups:\n\n.. _arch_overview_overload_manager-triggers-state:\n\n.. csv-table::\n  :header: action state, value, description\n  :widths: 1, 1, 2\n\n  scaling,   \"[0, 1)\", the resource pressure is below the configured saturation point; action may be taken\n  saturated, 1, the resource pressure is at or above the configured saturation point; drastic action should be taken\n\nWhen a resource pressure value is updated, the relevant triggers are reevaluated. For each action\nwith at least one trigger, the resulting action state is the maximum value over the configured\ntriggers. What effect the action state has depends on the action's configuration and implementation.\n\nActions\n~~~~~~~\n\nWhen a trigger changes state, the value is sent to registered actions, which can then affect how\nconnections and requests are processed. Each action interprets the input states differently, and\nsome may ignore the *scaling* state altogether, taking effect only when *saturated*."
  },
  {
    "path": "docs/root/intro/arch_overview/operations/runtime.rst",
    "content": ".. _arch_overview_runtime:\n\nRuntime configuration\n=====================\n\nEnvoy supports “runtime” configuration (also known as \"feature flags\" and \"decider\"). Configuration\nsettings can be altered that will affect operation without needing to restart Envoy or change the\nprimary configuration. The currently supported implementation uses a tree of file system files.\nEnvoy watches for a symbolic link swap in a configured directory and reloads the tree when that\nhappens. This type of system is very commonly deployed in large distributed systems. Other\nimplementations would not be difficult to implement. Supported runtime configuration settings are\ndocumented in the relevant sections of the operations guide. Envoy will operate correctly with\ndefault runtime values and a “null” provider so it is not required that such a system exists to run\nEnvoy.\n\nRuntime :ref:`configuration <config_runtime>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_features/compression/libraries.rst",
    "content": ".. _arch_overview_compression_libraries:\n\nCompression Libraries\n=====================\n\nUnderlying implementation\n-------------------------\n\nCurrently Envoy uses `zlib <http://zlib.net>`_ as a compression library.\n\n.. note::\n\n  `zlib-ng <https://github.com/zlib-ng/zlib-ng>`_ is a fork that hosts several 3rd-party\n  contributions containing new optimizations. Those optimizations are considered useful for\n  `improving compression performance <https://github.com/envoyproxy/envoy/issues/8448#issuecomment-667152013>`_.\n  Envoy can be built to use `zlib-ng <https://github.com/zlib-ng/zlib-ng>`_ instead of regular\n  `zlib <http://zlib.net>`_ by using ``--define zlib=ng`` Bazel option. The relevant build options\n  used to build `zlib-ng <https://github.com/zlib-ng/zlib-ng>`_ can be evaluated in :repo:`here\n  <bazel/foreign_cc/BUILD>`. Currently, this option is only available on Linux.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_features/global_rate_limiting.rst",
    "content": ".. _arch_overview_global_rate_limit:\n\nGlobal rate limiting\n====================\n\nAlthough distributed :ref:`circuit breaking <arch_overview_circuit_break>` is generally extremely\neffective in controlling throughput in distributed systems, there are times when it is not very\neffective and global rate limiting is desired. The most common case is when a large number of hosts\nare forwarding to a small number of hosts and the average request latency is low (e.g.,\nconnections/requests to a database server). If the target hosts become backed up, the downstream\nhosts will overwhelm the upstream cluster. In this scenario it is extremely difficult to configure a\ntight enough circuit breaking limit on each downstream host such that the system will operate\nnormally during typical request patterns but still prevent cascading failure when the system starts\nto fail. Global rate limiting is a good solution for this case.\n\nEnvoy integrates directly with a global gRPC rate limiting service. Although any service that\nimplements the defined RPC/IDL protocol can be used, Lyft provides a `reference implementation <https://github.com/lyft/ratelimit>`_\nwritten in Go which uses a Redis backend. Envoy’s rate limit integration has the following features:\n\n* **Network level rate limit filter**: Envoy will call the rate limit service for every new\n  connection on the listener where the filter is installed. The configuration specifies a specific\n  domain and descriptor set to rate limit on. This has the ultimate effect of rate limiting the\n  connections per second that transit the listener. :ref:`Configuration reference\n  <config_network_filters_rate_limit>`.\n* **HTTP level rate limit filter**: Envoy will call the rate limit service for every new request on\n  the listener where the filter is installed and where the route table specifies that the global\n  rate limit service should be called. All requests to the target upstream cluster as well as all\n  requests from the originating cluster to the target cluster can be rate limited.\n  :ref:`Configuration reference <config_http_filters_rate_limit>`\n\nRate limit service :ref:`configuration <config_rate_limit_service>`.\n\nNote that Envoy also supports :ref:`local rate limiting <config_network_filters_local_rate_limit>`.\nLocal rate limiting can be used in conjunction with global rate limiting to reduce load on the\nglobal rate limit service. For example, a local token bucket rate limit can absorb very large bursts\nin load that might otherwise overwhelm a global rate limit service. Thus, the rate limit is applied\nin two stages. The initial coarse grained limiting is performed by the token bucket limit before\na fine grained global limit finishes the job.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_features/ip_transparency.rst",
    "content": ".. _arch_overview_ip_transparency:\n\nIP Transparency\n===============\n\nWhat is IP Transparency\n-----------------------\n\nAs a proxy, Envoy is an IP endpoint: it has its own IP address, distinct from that of any downstream\nrequests. Consequently, when Envoy establishes connections to upstream hosts, the IP address of that\nconnection will be different from that of any proxied connections.\n\nSometimes the upstream server or network may need to know the original IP address of the connection,\ncalled the *downstream remote address*, for many reasons. Some examples include:\n\n* the IP address being used to form part of an identity,\n* the IP address being used to enforce network policy, or\n* the IP address being included in an audit.\n\nEnvoy supports multiple methods for providing the downstream remote address to the upstream host.\nThese techniques vary in complexity and applicability.\n\nHTTP Headers\n------------\n\nHTTP headers may carry the original IP address of the request in the\n:ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>` header. The upstream server\ncan use this header to determine the downstream remote address. Envoy may also use this header to\nchoose the IP address used by the\n:ref:`Original Src HTTP Filter <arch_overview_ip_transparency_original_src_http>`.\n\nThe HTTP header approach has a few downsides:\n\n* It is only applicable to HTTP.\n* It may not be supported by the upstream host.\n* It requires careful configuration.\n\nProxy Protocol\n--------------\n\n`HAProxy Proxy Protocol <http://www.haproxy.org/download/1.9/doc/proxy-protocol.txt>`_ defines a\nprotocol for communicating metadata about a connection over TCP, prior to the main TCP stream. This\nmetadata includes the source IP. Envoy supports consuming this information using\n:ref:`Proxy Protocol filter <config_listener_filters_proxy_protocol>`, which may be used to recover\nthe downstream remote address for propagation into an\n:ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>` header. It can also be used in\nconjunction with the\n:ref:`Original Src Listener Filter <arch_overview_ip_transparency_original_src_listener>`. Finally,\nEnvoy supports generating this header using the :ref:`Proxy Protocol Transport Socket <extension_envoy.transport_sockets.upstream_proxy_protocol>`.\nHere is an example config for setting up the socket:\n\n.. code-block:: yaml\n\n    clusters:\n    - name: service1\n      connect_timeout: 0.25s\n      type: strict_dns\n      lb_policy: round_robin\n      transport_socket:\n        name: envoy.transport_sockets.upstream_proxy_protocol\n        typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.proxy_protocol.v3.ProxyProtocolUpstreamTransport\n        config:\n          version: V1\n        transport_socket:\n          name: envoy.transport_sockets.raw_buffer\n      ...\n\nNote: If you are wrapping a TLS socket, the header will be sent before the TLS handshake occurs.\n\nSome drawbacks to Proxy Protocol:\n\n* It only supports TCP protocols.\n* It requires upstream host support.\n\n.. _arch_overview_ip_transparency_original_src_listener:\n\nOriginal Source Listener Filter\n-------------------------------\n\nIn controlled deployments, it may be possible to replicate the downstream remote address on the\nupstream connection by using a\n:ref:`Original Source listener filter <config_listener_filters_original_src>`. No metadata is added\nto the upstream request or stream. Rather, the upstream connection itself will be established with\nthe downstream remote address as its source address. This filter will work with any upstream\nprotocol or host. However, it requires fairly complex configuration, and it may not be supported in\nall deployments due to routing constraints.\n\nSome drawbacks to the Original Source filter:\n\n* It requires that Envoy have access to the downstream remote address.\n* Its configuration is relatively complex.\n* It may introduce a slight performance hit due to restrictions on connection pooling.\n\n.. _arch_overview_ip_transparency_original_src_http:\n\nOriginal Source HTTP Filter\n---------------------------\n\nIn controlled deployments, it may be possible to replicate the downstream remote address on the\nupstream connection by using a\n:ref:`Original Source HTTP filter <config_http_filters_original_src>`. This filter operates much like\nthe :ref:`Original Src Listener Filter <arch_overview_ip_transparency_original_src_listener>`. The\nmain difference is that it can infer the original source address from HTTP headers, which is important\nfor cases where a single downstream connection carries multiple HTTP requests from different original\nsource addresses. Deployments with a front proxy forwarding to sidecar proxies are examples where case\napplies.\n\nThis filter will work with any upstream HTTP host. However, it requires fairly complex configuration,\nand it may not be supported in all deployments due to routing constraints.\n\nSome drawbacks to the Original Source filter:\n\n* It requires that Envoy be properly configured to extract the downstream remote address from the\n  :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>` header.\n* Its configuration is relatively complex.\n* It may introduce a slight performance hit due to restrictions on connection pooling.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_features/local_rate_limiting.rst",
    "content": ".. _arch_overview_local_rate_limit:\n\nLocal rate limiting\n===================\n\nEnvoy supports local (non-distributed) rate limiting of L4 connections via the\n:ref:`local rate limit filter <config_network_filters_local_rate_limit>`.\n\nEnvoy additionally supports local rate limiting of HTTP requests via the\n:ref:`HTTP local rate limit filter <config_http_filters_local_rate_limit>`. This can\nbe activated globally at the listener level or at a more specific level (e.g.: the virtual\nhost or route level).\n\nFinally, Envoy also supports :ref:`global rate limiting <arch_overview_global_rate_limit>`. Local\nrate limiting can be used in conjunction with global rate limiting to reduce load on the global\nrate limit service.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_features/other_features.rst",
    "content": "Other features\n==============\n\n.. toctree::\n  :maxdepth: 2\n\n  local_rate_limiting\n  global_rate_limiting\n  scripting\n  ip_transparency\n  compression/libraries\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_features/scripting.rst",
    "content": "Scripting\n=========\n\nEnvoy supports `Lua <https://www.lua.org/>`_ scripting as part of a dedicated\n:ref:`HTTP filter <config_http_filters_lua>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_protocols/dynamo.rst",
    "content": ".. _arch_overview_dynamo:\n\nDynamoDB\n========\n\nEnvoy supports an HTTP level DynamoDB sniffing filter with the following features:\n\n* DynamoDB API request/response parser.\n* DynamoDB per operation/per table/per partition and operation statistics.\n* Failure type statistics for 4xx responses, parsed from response JSON,\n  e.g., ProvisionedThroughputExceededException.\n* Batch operation partial failure statistics.\n\nThe DynamoDB filter is a good example of Envoy’s extensibility and core abstractions at the HTTP\nlayer. At Lyft we use this filter for all application communication with DynamoDB. It provides an\ninvaluable source of data agnostic to the application platform and specific AWS SDK in use.\n\nDynamoDB filter :ref:`configuration <config_http_filters_dynamo>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_protocols/grpc.rst",
    "content": ".. _arch_overview_grpc:\n\ngRPC\n====\n\n`gRPC <https://www.grpc.io/>`_ is an RPC framework from Google. It uses protocol buffers as the\nunderlying serialization/IDL format. At the transport layer it uses HTTP/2 for request/response\nmultiplexing. Envoy has first class support for gRPC both at the transport layer as well as at the\napplication layer:\n\n* gRPC makes use of HTTP/2 trailers to convey request status. Envoy is one of very few HTTP proxies\n  that correctly supports HTTP/2 trailers and is thus one of the few proxies that can transport\n  gRPC requests and responses.\n* The gRPC runtime for some languages is relatively immature. See :ref:`below <arch_overview_grpc_bridging>`\n  for an overview of filters that can help bring gRPC to more languages.\n* gRPC-Web is supported by a :ref:`filter <config_http_filters_grpc_web>` that allows a gRPC-Web\n  client to send requests to Envoy over HTTP/1.1 and get proxied to a gRPC server. It's under\n  active development and is expected to be the successor to the gRPC :ref:`bridge filter\n  <config_http_filters_grpc_bridge>`.\n* gRPC-JSON transcoder is supported by a :ref:`filter <config_http_filters_grpc_json_transcoder>`\n  that allows a RESTful JSON API client to send requests to Envoy over HTTP and get proxied to a\n  gRPC service.\n\n.. _arch_overview_grpc_bridging:\n\ngRPC bridging\n-------------\n\nEnvoy supports two gRPC bridges:\n\n* :ref:`grpc_http1_bridge filter <config_http_filters_grpc_bridge>` which allows gRPC requests to be sent to Envoy over\n  HTTP/1.1. Envoy then translates the requests to HTTP/2 for transport to the target server. The response is translated back to HTTP/1.1.\n  When installed, the bridge filter gathers per RPC statistics in addition to the standard array of global HTTP statistics.\n* :ref:`grpc_http1_reverse_bridge filter <config_http_filters_grpc_http1_reverse_bridge>` which allows gRPC requests to be sent to Envoy\n  and then translated to HTTP/1.1 when sent to the upstream. The response is then converted back into gRPC when sent to the downstream.\n  This filter can also optionally manage the gRPC frame header, allowing the upstream to not have to be gRPC aware at all.\n\n.. _arch_overview_grpc_services:\n\ngRPC services\n-------------\n\nIn addition to proxying gRPC on the data plane, Envoy makes use of gRPC for its\ncontrol plane, where it :ref:`fetches configuration from management server(s)\n<config_overview>` and in filters, such as for :ref:`rate limiting\n<config_http_filters_rate_limit>` or authorization checks. We refer to these as\n*gRPC services*.\n\nWhen specifying gRPC services, it's necessary to specify the use of either the\n:ref:`Envoy gRPC client <envoy_v3_api_field_config.core.v3.GrpcService.envoy_grpc>` or the\n:ref:`Google C++ gRPC client <envoy_v3_api_field_config.core.v3.GrpcService.google_grpc>`. We\ndiscuss the tradeoffs in this choice below.\n\nThe Envoy gRPC client is a minimal custom implementation of gRPC that makes use\nof Envoy's HTTP/2 upstream connection management. Services are specified as\nregular Envoy :ref:`clusters <arch_overview_cluster_manager>`, with regular\ntreatment of :ref:`timeouts, retries <arch_overview_http_conn_man>`, endpoint\n:ref:`discovery <arch_overview_dynamic_config_eds>`/:ref:`load\nbalancing/failover <arch_overview_load_balancing>`/load reporting, :ref:`circuit\nbreaking <arch_overview_circuit_break>`, :ref:`health checks\n<arch_overview_health_checking>`, :ref:`outlier detection\n<arch_overview_outlier_detection>`. They share the same :ref:`connection pooling\n<arch_overview_conn_pool>` mechanism as the Envoy data plane. Similarly, cluster\n:ref:`statistics <arch_overview_statistics>` are available for gRPC services.\nSince the client is minimal, it does not include advanced gRPC features such as\n`OAuth2 <https://oauth.net/2/>`_ or `gRPC-LB\n<https://grpc.io/blog/loadbalancing>`_ lookaside.\n\nThe Google C++ gRPC client is based on the reference implementation of gRPC\nprovided by Google at https://github.com/grpc/grpc. It provides advanced gRPC\nfeatures that are missing in the Envoy gRPC client. The Google C++ gRPC client\nperforms its own load balancing, retries, timeouts, endpoint management, etc,\nindependent of Envoy's cluster management. The Google C++ gRPC client also\nsupports `custom authentication plugins\n<https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms>`_.\n\nIt is recommended to use the Envoy gRPC client in most cases, where the advanced\nfeatures in the Google C++ gRPC client are not required. This provides\nconfiguration and monitoring simplicity. Where necessary features are missing\nin the Envoy gRPC client, the Google C++ gRPC client should be used instead.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_protocols/mongo.rst",
    "content": ".. _arch_overview_mongo:\n\nMongoDB\n=======\n\nEnvoy supports a network level MongoDB sniffing filter with the following features:\n\n* MongoDB wire format BSON parser.\n* Detailed MongoDB query/operation statistics including timings and scatter/multi-get counts for\n  routed clusters.\n* Query logging.\n* Per callsite statistics via the $comment query parameter.\n* Fault injection.\n\nThe MongoDB filter is a good example of Envoy’s extensibility and core abstractions. At Lyft we use\nthis filter between all applications and our databases. It provides an invaluable source of data\nthat is agnostic to the application platform and specific MongoDB driver in use.\n\nMongoDB proxy filter :ref:`configuration reference <config_network_filters_mongo_proxy>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_protocols/other_protocols.rst",
    "content": "Other protocols\n===============\n\n.. toctree::\n  :maxdepth: 2\n\n  grpc\n  mongo\n  dynamo\n  redis\n  postgres\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_protocols/postgres.rst",
    "content": ".. _arch_overview_postgres:\n\nPostgres\n========\n\nEnvoy supports a network level Postgres sniffing filter to add network observability. By using the\nPostgres proxy, Envoy is able to decode `Postgres frontend/backend protocol`_ and gather\nstatistics from the decoded information.\n\nThe main goal of the Postgres filter is to capture runtime statistics without impacting or\ngenerating any load on the Postgres upstream server, it is transparent to it. The filter currently\noffers the following features:\n\n* Decode non SSL traffic, ignore SSL traffic.\n* Decode session information.\n* Capture transaction information, including commits and rollbacks.\n* Expose counters for different types of statements (INSERTs, DELETEs, UPDATEs, etc).\n  The counters are updated based on decoding backend CommandComplete messages not by decoding SQL statements sent by a client.\n* Count frontend, backend and unknown messages.\n* Identify errors and notices backend responses.\n\nThe Postgres filter solves a notable problem for Postgres deployments:\ngathering this information either imposes additional load to the server; or\nrequires pull-based querying for metadata from the server, sometimes requiring\nexternal components or extensions. This filter provides valuable observability\ninformation, without impacting the performance of the upstream Postgres\nserver or requiring the installation of any software.\n\nPostgres proxy filter :ref:`configuration reference <config_network_filters_postgres_proxy>`.\n\n.. _Postgres frontend/backend protocol: https://www.postgresql.org/docs/current/protocol.html\n"
  },
  {
    "path": "docs/root/intro/arch_overview/other_protocols/redis.rst",
    "content": ".. _arch_overview_redis:\n\nRedis\n=======\n\nEnvoy can act as a Redis proxy, partitioning commands among instances in a cluster.\nIn this mode, the goals of Envoy are to maintain availability and partition tolerance\nover consistency. This is the key point when comparing Envoy to `Redis Cluster\n<https://redis.io/topics/cluster-spec>`_. Envoy is designed as a best-effort cache,\nmeaning that it will not try to reconcile inconsistent data or keep a globally consistent\nview of cluster membership. It also supports routing commands from different workload to\ndifferent to different upstream clusters based on their access patterns, eviction, or isolation\nrequirements.\n\nThe Redis project offers a thorough reference on partitioning as it relates to Redis. See\n\"`Partitioning: how to split data among multiple Redis instances\n<https://redis.io/topics/partitioning>`_\".\n\n**Features of Envoy Redis**:\n\n* `Redis protocol <https://redis.io/topics/protocol>`_ codec.\n* Hash-based partitioning.\n* Ketama distribution.\n* Detailed command statistics.\n* Active and passive healthchecking.\n* Hash tagging.\n* Prefix routing.\n* Separate downstream client and upstream server authentication.\n* Request mirroring for all requests or write requests only.\n* Control :ref:`read requests routing<envoy_v3_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.ConnPoolSettings.read_policy>`. This only works with Redis Cluster.\n\n**Planned future enhancements**:\n\n* Additional timing stats.\n* Circuit breaking.\n* Request collapsing for fragmented commands.\n* Replication.\n* Built-in retry.\n* Tracing.\n\n.. _arch_overview_redis_configuration:\n\nConfiguration\n-------------\n\nFor filter configuration details, see the Redis proxy filter\n:ref:`configuration reference <config_network_filters_redis_proxy>`.\n\nThe corresponding cluster definition should be configured with\n:ref:`ring hash load balancing <envoy_v3_api_field_config.cluster.v3.Cluster.lb_policy>`.\n\nIf :ref:`active health checking <arch_overview_health_checking>` is desired, the\ncluster should be configured with a :ref:`custom health check\n<envoy_v3_api_field_config.core.v3.HealthCheck.custom_health_check>` which configured as a\n:ref:`Redis health checker <config_health_checkers_redis>`.\n\nIf passive healthchecking is desired, also configure\n:ref:`outlier detection <arch_overview_outlier_detection>`.\n\nFor the purposes of passive healthchecking, connect timeouts, command timeouts, and connection\nclose map to 5xx. All other responses from Redis are counted as a success.\n\n.. _arch_overview_redis_cluster_support:\n\nRedis Cluster Support (Experimental)\n----------------------------------------\n\nEnvoy currently offers experimental support for `Redis Cluster <https://redis.io/topics/cluster-spec>`_.\n\nWhen using Envoy as a sidecar proxy for a Redis Cluster, the service can use a non-cluster Redis client\nimplemented in any language to connect to the proxy as if it's a single node Redis instance.\nThe Envoy proxy will keep track of the cluster topology and send commands to the correct Redis node in the\ncluster according to the `spec <https://redis.io/topics/cluster-spec>`_. Advance features such as reading\nfrom replicas can also be added to the Envoy proxy instead of updating redis clients in each language.\n\nEnvoy proxy tracks the topology of the cluster by sending periodic\n`cluster slots <https://redis.io/commands/cluster-slots>`_ commands to a random node in the cluster, and maintains the\nfollowing information:\n\n* List of known nodes.\n* The primaries for each shard.\n* Nodes entering or leaving the cluster.\n\nFor topology configuration details, see the Redis Cluster\n:ref:`v2 API reference <envoy_v3_api_msg_extensions.clusters.redis.v3.RedisClusterConfig>`.\n\nEvery Redis cluster has its own extra statistics tree rooted at *cluster.<name>.redis_cluster.* with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  max_upstream_unknown_connections_reached, Counter, Total number of times that an upstream connection to an unknown host is not created after redirection having reached the connection pool's max_upstream_unknown_connections limit\n  upstream_cx_drained, Counter, Total number of upstream connections drained of active requests before being closed\n  upstream_commands.upstream_rq_time, Histogram, Histogram of upstream request times for all types of requests\n\n.. _arch_overview_redis_cluster_command_stats:\n\nPer-cluster command statistics can be enabled via the setting :ref:`enable_command_stats <envoy_v3_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.ConnPoolSettings.enable_command_stats>`.:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  upstream_commands.[command].success, Counter, Total number of successful requests for a specific Redis command\n  upstream_commands.[command].failure, Counter, Total number of failed or cancelled requests for a specific Redis command\n  upstream_commands.[command].total, Counter, Total number of requests for a specific Redis command (sum of success and failure)\n  upstream_commands.[command].latency, Histogram, Latency of requests for a specific Redis command\n\nSupported commands\n------------------\n\nAt the protocol level, pipelines are supported. MULTI (transaction block) is not.\nUse pipelining wherever possible for the best performance.\n\nAt the command level, Envoy only supports commands that can be reliably hashed to a server. AUTH and PING\nare the only exceptions. AUTH is processed locally by Envoy if a downstream password has been configured,\nand no other commands will be processed until authentication is successful when a password has been\nconfigured. Envoy will transparently issue AUTH commands upon connecting to upstream servers, if upstream\nauthentication passwords are configured for the cluster. Envoy responds to PING immediately with PONG.\nArguments to PING are not allowed. All other supported commands must contain a key. Supported commands are\nfunctionally identical to the original Redis command except possibly in failure scenarios.\n\nFor details on each command's usage see the official\n`Redis command reference <https://redis.io/commands>`_.\n\n.. csv-table::\n  :header: Command, Group\n  :widths: 1, 1\n\n  AUTH, Authentication\n  PING, Connection\n  DEL, Generic\n  DUMP, Generic\n  EXISTS, Generic\n  EXPIRE, Generic\n  EXPIREAT, Generic\n  PERSIST, Generic\n  PEXPIRE, Generic\n  PEXPIREAT, Generic\n  PTTL, Generic\n  RESTORE, Generic\n  TOUCH, Generic\n  TTL, Generic\n  TYPE, Generic\n  UNLINK, Generic\n  GEOADD, Geo\n  GEODIST, Geo\n  GEOHASH, Geo\n  GEOPOS, Geo\n  GEORADIUS_RO, Geo\n  GEORADIUSBYMEMBER_RO, Geo\n  HDEL, Hash\n  HEXISTS, Hash\n  HGET, Hash\n  HGETALL, Hash\n  HINCRBY, Hash\n  HINCRBYFLOAT, Hash\n  HKEYS, Hash\n  HLEN, Hash\n  HMGET, Hash\n  HMSET, Hash\n  HSCAN, Hash\n  HSET, Hash\n  HSETNX, Hash\n  HSTRLEN, Hash\n  HVALS, Hash\n  LINDEX, List\n  LINSERT, List\n  LLEN, List\n  LPOP, List\n  LPUSH, List\n  LPUSHX, List\n  LRANGE, List\n  LREM, List\n  LSET, List\n  LTRIM, List\n  RPOP, List\n  RPUSH, List\n  RPUSHX, List\n  EVAL, Scripting\n  EVALSHA, Scripting\n  SADD, Set\n  SCARD, Set\n  SISMEMBER, Set\n  SMEMBERS, Set\n  SPOP, Set\n  SRANDMEMBER, Set\n  SREM, Set\n  SSCAN, Set\n  ZADD, Sorted Set\n  ZCARD, Sorted Set\n  ZCOUNT, Sorted Set\n  ZINCRBY, Sorted Set\n  ZLEXCOUNT, Sorted Set\n  ZRANGE, Sorted Set\n  ZRANGEBYLEX, Sorted Set\n  ZRANGEBYSCORE, Sorted Set\n  ZRANK, Sorted Set\n  ZREM, Sorted Set\n  ZREMRANGEBYLEX, Sorted Set\n  ZREMRANGEBYRANK, Sorted Set\n  ZREMRANGEBYSCORE, Sorted Set\n  ZREVRANGE, Sorted Set\n  ZREVRANGEBYLEX, Sorted Set\n  ZREVRANGEBYSCORE, Sorted Set\n  ZREVRANK, Sorted Set\n  ZPOPMIN, Sorted Set\n  ZPOPMAX, Sorted Set\n  ZSCAN, Sorted Set\n  ZSCORE, Sorted Set\n  APPEND, String\n  BITCOUNT, String\n  BITFIELD, String\n  BITPOS, String\n  DECR, String\n  DECRBY, String\n  GET, String\n  GETBIT, String\n  GETRANGE, String\n  GETSET, String\n  INCR, String\n  INCRBY, String\n  INCRBYFLOAT, String\n  MGET, String\n  MSET, String\n  PSETEX, String\n  SET, String\n  SETBIT, String\n  SETEX, String\n  SETNX, String\n  SETRANGE, String\n  STRLEN, String\n\nFailure modes\n-------------\n\nIf Redis throws an error, we pass that error along as the response to the command. Envoy treats a\nresponse from Redis with the error datatype as a normal response and passes it through to the\ncaller.\n\nEnvoy can also generate its own errors in response to the client.\n\n.. csv-table::\n  :header: Error, Meaning\n  :widths: 1, 1\n\n  no upstream host, \"The ring hash load balancer did not have a healthy host available at the\n  ring position chosen for the key.\"\n  upstream failure, \"The backend did not respond within the timeout period or closed\n  the connection.\"\n  invalid request, \"Command was rejected by the first stage of the command splitter due to\n  datatype or length.\"\n  unsupported command, \"The command was not recognized by Envoy and therefore cannot be serviced\n  because it cannot be hashed to a backend server.\"\n  finished with n errors, \"Fragmented commands which sum the response (e.g. DEL) will return the\n  total number of errors received if any were received.\"\n  upstream protocol error, \"A fragmented command received an unexpected datatype or a backend\n  responded with a response that not conform to the Redis protocol.\"\n  wrong number of arguments for command, \"Certain commands check in Envoy that the number of\n  arguments is correct.\"\n  \"NOAUTH Authentication required.\", \"The command was rejected because a downstream authentication\n  password has been set and the client has not successfully authenticated.\"\n  ERR invalid password, \"The authentication command failed due to an invalid password.\"\n  \"ERR Client sent AUTH, but no password is set\", \"An authentication command was received, but no\n  downstream authentication password has been configured.\"\n\n\nIn the case of MGET, each individual key that cannot be fetched will generate an error response.\nFor example, if we fetch five keys and two of the keys' backends time out, we would get an error\nresponse for each in place of the value.\n\n.. code-block:: none\n\n  $ redis-cli MGET a b c d e\n  1) \"alpha\"\n  2) \"bravo\"\n  3) (error) upstream failure\n  4) (error) upstream failure\n  5) \"echo\"\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/_include/ssl.yaml",
    "content": "static_resources:\n  listeners:\n  - name: listener_0\n    address: { socket_address: { address: 127.0.0.1, port_value: 10000 } }\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        # ...\n      transport_socket:\n        name: envoy.transport_sockets.tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n          common_tls_context:\n            validation_context:\n              trusted_ca:\n                filename: /usr/local/my-client-ca.crt\n  clusters:\n  - name: some_service\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: some_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.2\n                port_value: 1234\n    transport_socket:\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n        common_tls_context:\n          tls_certificates:\n            certificate_chain: { \"filename\": \"/cert.crt\" }\n            private_key: { \"filename\": \"/cert.key\" }\n            ocsp_response: { \"filename\": \"/ocsp_response.der\" }\n          validation_context:\n            match_subject_alt_names:\n              exact: \"foo\"\n            trusted_ca:\n              filename: /etc/ssl/certs/ca-certificates.crt\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/ext_authz_filter.rst",
    "content": ".. _arch_overview_ext_authz:\n\nExternal Authorization\n======================\n\n* :ref:`Network filter configuration <config_network_filters_ext_authz>`.\n* :ref:`HTTP filter configuration <config_http_filters_ext_authz>`.\n\nThe External authorization filter calls an authorization service to check if the incoming request\nis authorized or not. The filter can be either configured as a\n:ref:`network filter <config_network_filters_ext_authz>`, or as a\n:ref:`HTTP filter <config_http_filters_ext_authz>` or both. If the request is deemed\nunauthorized by the network filter then the connection will be closed. If the request is deemed\nunauthorized at the HTTP filter the request will be denied with 403 (Forbidden) response.\n\n.. tip::\n  It is recommended that these filters are configured as the first filter in the filter chain so\n  that requests are authorized prior to rest of the filters processing the request.\n\nThe external authorization service cluster may be either statically configured or configured via\nthe :ref:`Cluster Discovery Service <config_cluster_manager_cds>`. If the external service is not\navailable when a request comes in then whether the request is authorized or not is defined by the\nconfiguration setting of *failure_mode_allow* configuration in the applicable\n:ref:`network filter <envoy_v3_api_msg_extensions.filters.network.ext_authz.v3.ExtAuthz>` or\n:ref:`HTTP filter <envoy_v3_api_msg_extensions.filters.http.ext_authz.v3.ExtAuthz>`. If it is set to\ntrue then the request will be permitted (fail open) otherwise it will be denied.\nThe default setting is *false*.\n\nService Definition\n------------------\n\nThe context of the traffic is passed on to an external authorization service using the service\ndefinition listed here.\nThe content of the request that are passed to an authorization service is specified by\n:ref:`CheckRequest <envoy_v3_api_msg_service.auth.v3.CheckRequest>`.\n\n.. toctree::\n  :glob:\n  :maxdepth: 2\n\n  ../../../api-v3/service/auth/v3/*\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/external_deps.rst",
    "content": ".. _arch_overview_external_deps:\n\nExternal dependencies\n=====================\n\nBelow we enumerate the external dependencies that may be linked into the Envoy binary. We exclude\ndependencies that only are used in CI or developer tooling above.\n\nData plane (core)\n-----------------\n\n.. include:: external_dep_dataplane_core.rst\n\nData plane (extensions)\n-----------------------\n\n.. include:: external_dep_dataplane_ext.rst\n\nControl plane\n-------------\n\n.. include:: external_dep_controlplane.rst\n\nObservability (core)\n--------------------\n\n.. include:: external_dep_observability_core.rst\n\nObservability (extensions)\n--------------------------\n\n.. include:: external_dep_observability_ext.rst\n\nTest only\n---------\n\n.. include:: external_dep_test_only.rst\n\nBuild\n-----\n\n.. include:: external_dep_build.rst\n\nMiscellaneous\n-------------\n\n.. include:: external_dep_other.rst\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/google_vrp.rst",
    "content": ".. _arch_overview_google_vrp:\n\nGoogle Vulnerability Reward Program (VRP)\n=========================================\n\nEnvoy is a participant in `Google's Vulnerability Reward Program (VRP)\n<https://www.google.com/about/appsecurity/reward-program/>`_. This is open to all security\nresearchers and will provide rewards for vulnerabilities discovered and reported according to the\nrules below.\n\n.. _arch_overview_google_vrp_rules:\n\nRules\n-----\n\nThe goal of the VRP is to provide a formal process to honor contributions from external\nsecurity researchers to Envoy's security. Vulnerabilities should meet the following conditions\nto be eligible for the program:\n\n1. Vulnerabilities must meet one of the below :ref:`objectives\n   <arch_overview_google_vrp_objectives>`, demonstrated with the supplied Docker-based\n   :ref:`execution environment <arch_overview_google_vrp_ee>` and be consistent with the\n   program's :ref:`threat model <arch_overview_google_vrp_threat_model>`.\n\n2. Vulnerabilities must be reported to envoy-security@googlegroups.com and be kept under embargo\n   while triage and potential security releases occur. Please follow the :repo:`disclosure guidance\n   <SECURITY.md#disclosures>` when submitting reports. Disclosure SLOs are documented :repo:`here\n   <SECURITY.md#fix-and-disclosure-slos>`. In general, security disclosures are subject to the\n   `Linux Foundation's privacy policy <https://www.linuxfoundation.org/privacy/>`_ with the added\n   proviso that VRP reports (including reporter e-mail address and name) may be freely shared with\n   Google for VRP purposes.\n\n3. Vulnerabilities must not be previously known in a public forum, e.g. GitHub issues trackers,\n   CVE databases (when previously associated with Envoy), etc. Existing CVEs that have not been\n   previously associated with an Envoy vulnerability are fair game.\n\n4. Vulnerabilities must not be also submitted to a parallel reward program run by Google or\n   `Lyft <https://www.lyft.com/security>`_.\n\nRewards are at the discretion of the Envoy OSS security team and Google. They will be conditioned on\nthe above criteria. If multiple instances of the same vulnerability are reported at the same time by\nindependent researchers or the vulnerability is already tracked under embargo by the OSS Envoy\nsecurity team, we will aim to fairly divide the reward amongst reporters.\n\n.. _arch_overview_google_vrp_threat_model:\n\nThreat model\n------------\n\nThe base threat model matches that of Envoy's :ref:`OSS security posture\n<arch_overview_threat_model>`. We add a number of temporary restrictions to provide a constrained\nattack surface for the initial stages of this program. We exclude any threat from:\n\n* Untrusted control planes.\n* Runtime services such as access logging, external authorization, etc.\n* Untrusted upstreams.\n* DoS attacks except as stipulated below.\n* Any filters apart from the HTTP connection manager network filter and HTTP router filter.\n* Admin console; this is disabled in the execution environment.\n\nWe also explicitly exclude any local attacks (e.g. via local processes, shells, etc.) against\nthe Envoy process. All attacks must occur via the network data plane on port 10000. Similarly,\nkernel and Docker vulnerabilities are outside the threat model.\n\nIn the future we may relax some of these restrictions as we increase the sophistication of the\nprogram's execution environment.\n\n.. _arch_overview_google_vrp_ee:\n\nExecution environment\n---------------------\n\nWe supply Docker images that act as the reference environment for this program:\n\n* `envoyproxy/envoy-google-vrp <https://hub.docker.com/r/envoyproxy/envoy-google-vrp/tags/>`_ images\n  are based on Envoy point releases. Only the latest point release at the time of vulnerability\n  submission is eligible for the program. The first point release available for VRP will be the\n  1.15.0 Envoy release.\n\n* `envoyproxy/envoy-google-vrp-dev <https://hub.docker.com/r/envoyproxy/envoy-google-vrp-dev/tags/>`_\n  images are based on Envoy master builds. Only builds within the last 5 days at the time of\n  vulnerability submission are eligible for the program. They must not be subject to any\n  publicly disclosed vulnerability at that point in time.\n\nTwo Envoy processes are available when these images are launched via `docker run`:\n\n* The *edge* Envoy is listening on ports 10000 (HTTPS). It has a :repo:`static configuration\n  </configs/google-vrp/envoy-edge.yaml>` that is configured according to Envoy's :ref:`edge hardening\n  principles <faq_edge>`. It has sinkhole, direct response and request forwarding routing rules (in\n  order):\n\n  1. `/content/*`: route to the origin Envoy server.\n  2. `/*`: return 403 (denied).\n\n\n* The *origin* Envoy is an upstream of the edge Envoy. It has a :repo:`static configuration\n  </configs/google-vrp/envoy-origin.yaml>` that features only direct responses, effectively acting\n  as an HTTP origin server. There are two route rules (in order):\n\n  1. `/blockedz`: return 200 `hidden treasure`. It should never be possible to have\n     traffic on the Envoy edge server's 10000 port receive this response unless a\n     qualifying vulnerability is present.\n  2. `/*`: return 200 `normal`.\n\nWhen running the Docker images, the following command line options should be supplied:\n\n* `-m 3g` to ensure that memory is bounded to 3GB. At least this much memory should be available\n  to the execution environment. Each Envoy process has an overload manager configured to limit\n  at 1GB.\n\n* `-e ENVOY_EDGE_EXTRA_ARGS=\"<...>\"` supplies additional CLI args for the edge Envoy. This\n  needs to be set but can be empty.\n\n* `-e ENVOY_ORIGIN_EXTRA_ARGS=\"<...>\"` supplies additional CLI args for the origin Envoy. This\n  needs to be set but can be empty.\n\n.. _arch_overview_google_vrp_objectives:\n\nObjectives\n----------\n\nVulnerabilities will be evidenced by requests on 10000 that trigger a failure mode\nthat falls into one of these categories:\n\n* Query-of-death: requests that cause the Envoy process to segfault or abort\n  in some immediate way.\n* OOM: requests that cause the edge Envoy process to OOM. There should be no more than\n  100 connections and streams in total involved to cause this to happen (i.e. brute force\n  connection/stream DoS is excluded).\n* Routing rule bypass: requests that are able to access `hidden treasure`.\n* TLS certificate exfiltration: requests that are able to obtain the edge Envoy's\n  `serverkey.pem`.\n* Remote code exploits: any root shell obtained via the network data plane.\n* At the discretion of the OSS Envoy security team, sufficiently interesting vulnerabilities that\n  don't fit the above categories but are likely to fall into the category of high or critical\n  vulnerabilities.\n\nWorking with the Docker images\n------------------------------\n\nA basic invocation of the execution environment that will bring up the edge Envoy on local\nport 10000 looks like:\n\n.. code-block:: bash\n\n   docker run -m 3g -p 10000:10000 --name envoy-google-vrp \\\n     -e ENVOY_EDGE_EXTRA_ARGS=\"\" \\\n     -e ENVOY_ORIGIN_EXTRA_ARGS=\"\" \\\n     envoyproxy/envoy-google-vrp-dev:latest\n\nWhen debugging, additional args may prove useful, e.g. in order to obtain trace logs, make\nuse of `wireshark` and `gdb`:\n\n.. code-block:: bash\n\n   docker run -m 3g -p 10000:10000 --name envoy-google-vrp \\\n     -e ENVOY_EDGE_EXTRA_ARGS=\"-l trace\" \\\n     -e ENVOY_ORIGIN_EXTRA_ARGS=\"-l trace\" \\\n     --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN \\\n     envoyproxy/envoy-google-vrp-dev:latest\n\nYou can obtain a shell in the Docker container with:\n\n.. code-block:: bash\n\n  docker exec -it envoy-google-vrp /bin/bash\n\nThe Docker images include `gdb`, `strace`, `tshark` (feel free to contribute other\nsuggestions via PRs updating the :repo:`Docker build file </ci/Dockerfile-envoy-google-vrp>`).\n\nRebuilding the Docker image\n---------------------------\n\nIt's helpful to be able to regenerate your own Docker base image for research purposes.\nTo do this without relying on CI, follow the instructions at the top of\n:repo:`ci/docker_rebuild_google-vrp.sh`. An example of this flow looks like:\n\n.. code-block:: bash\n\n   bazel build //source/exe:envoy-static\n   ./ci/docker_rebuild_google-vrp.sh bazel-bin/source/exe/envoy-static\n   docker run -m 3g -p 10000:10000 --name envoy-google-vrp \\\n     -e ENVOY_EDGE_EXTRA_ARGS=\"\" \\\n     -e ENVOY_ORIGIN_EXTRA_ARGS=\"\" \\\n     envoy-google-vrp:local\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/jwt_authn_filter.rst",
    "content": ".. _arch_overview_jwt_authn:\n\nJSON Web Token (JWT) Authentication\n===================================\n\n* :ref:`HTTP filter configuration <config_http_filters_jwt_authn>`.\n\nThe JSON Web Token (JWT) Authentication filter checks if the incoming request has a valid\n`JSON Web Token (JWT) <https://tools.ietf.org/html/rfc7519>`_. It checks the validity of the JWT by\nverifying the JWT signature, audiences and issuer based on the\n:ref:`HTTP filter configuration <config_http_filters_jwt_authn>`. The JWT Authentication filter\ncould be configured to either reject the request with invalid JWT immediately or defer the decision\nto later filters by passing the JWT payload to other filters.\n\nThe JWT Authentication filter supports to check the JWT under various conditions of the request, it\ncould be configured to check JWT only on specific paths so that you could allowlist some paths from\nthe JWT authentication, which is useful if a path is accessible publicly and doesn't require any JWT\nauthentication.\n\nThe JWT Authentication filter supports to extract the JWT from various locations of the request and\ncould combine multiple JWT requirements for the same request. The\n`JSON Web Key Set (JWKS) <https://tools.ietf.org/html/rfc7517>`_ needed for the JWT signature\nverification could be either specified inline in the filter config or fetched from remote server\nvia HTTP/HTTPS.\n\nThe JWT Authentication filter also supports to write the payloads of the successfully verified JWT\nto :ref:`Dynamic State <arch_overview_data_sharing_between_filters>` so that later filters could use\nit to make their own decisions based on the JWT payloads.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/rbac_filter.rst",
    "content": ".. _arch_overview_rbac:\n\nRole Based Access Control\n=========================\n\n* :ref:`Network filter configuration <config_network_filters_rbac>`.\n* :ref:`HTTP filter configuration <config_http_filters_rbac>`.\n\nThe Role Based Access Control (RBAC) filter checks if the incoming request is authorized or not.\nUnlike external authorization, the check of RBAC filter happens in the Envoy process and is\nbased on a list of policies from the filter config.\n\nThe RBAC filter can be either configured as a :ref:`network filter <config_network_filters_rbac>`,\nor as a :ref:`HTTP filter <config_http_filters_rbac>` or both. If the request is deemed unauthorized\nby the network filter then the connection will be closed. If the request is deemed unauthorized by\nthe HTTP filter the request will be denied with 403 (Forbidden) response.\n\nPolicy\n------\n\nThe RBAC filter checks the request based on a list of\n:ref:`policies <envoy_v3_api_field_config.rbac.v3.RBAC.policies>`. A policy consists of a list of\n:ref:`permissions <envoy_v3_api_msg_config.rbac.v3.Permission>` and\n:ref:`principals <envoy_v3_api_msg_config.rbac.v3.Principal>`. The permission specifies the actions of\nthe request, for example, the method and path of a HTTP request. The principal specifies the\ndownstream client identities of the request, for example, the URI SAN of the downstream client\ncertificate. A policy is matched if its permissions and principals are matched at the same time.\n\nShadow Policy\n-------------\n\nThe filter can be configured with a\n:ref:`shadow policy <envoy_v3_api_field_extensions.filters.http.rbac.v3.RBAC.shadow_rules>` that doesn't\nhave any effect (i.e. not deny the request) but only emit stats and log the result. This is useful\nfor testing a rule before applying in production.\n\n.. _arch_overview_condition:\n\nCondition\n---------\n\nIn addition to the pre-defined permissions and principals, a policy may optionally provide an\nauthorization condition written in the `Common Expression Language\n<https://github.com/google/cel-spec/blob/master/doc/intro.md>`_. The condition specifies an extra\nclause that must be satisfied for the policy to match. For example, the following condition checks\nwhether the request path starts with `/v1/`:\n\n.. code-block:: yaml\n\n  call_expr:\n    function: startsWith\n    args:\n    - select_expr:\n       operand:\n         ident_expr:\n           name: request\n       field: path\n    - const_expr:\n       string_value: /v1/\n\nThe following attributes are exposed to the language runtime:\n\n.. csv-table::\n   :header: Attribute, Type, Description\n   :widths: 1, 1, 2\n\n   request.path, string, The path portion of the URL\n   request.url_path, string, The path portion of the URL without the query string\n   request.host, string, The host portion of the URL\n   request.scheme, string, The scheme portion of the URL\n   request.method, string, Request method\n   request.headers, string map, All request headers\n   request.referer, string, Referer request header\n   request.useragent, string, User agent request header\n   request.time, timestamp, Time of the first byte received\n   request.duration, duration, Total duration of the request\n   request.id, string, Request ID\n   request.size, int, Size of the request body\n   request.total_size, int, Total size of the request including the headers\n   request.protocol, string, Request protocol e.g. \"HTTP/2\"\n   response.code, int, Response HTTP status code\n   response.code_details, string, Internal response code details (subject to change)\n   response.grpc_status, int, Response gRPC status code\n   response.headers, string map, All response headers\n   response.trailers, string map, All response trailers\n   response.size, int, Size of the response body\n   response.total_size, int, Total size of the response including the approximate uncompressed size of the headers and the trailers\n   response.flags, int, Additional details about the response beyond the standard response code\n   source.address, string, Downstream connection remote address\n   source.port, int, Downstream connection remote port\n   destination.address, string, Downstream connection local address\n   destination.port, int, Downstream connection local port\n   metadata, :ref:`Metadata<envoy_api_msg_core.Metadata>`, Dynamic metadata\n   filter_state, map string to bytes, Filter state mapping data names to their serialized string value\n   connection.mtls, bool, Indicates whether TLS is applied to the downstream connection and the peer ceritificate is presented\n   connection.requested_server_name, string, Requested server name in the downstream TLS connection\n   connection.tls_version, string, TLS version of the downstream TLS connection\n   connection.subject_local_certificate, string, The subject field of the local certificate in the downstream TLS connection\n   connection.subject_peer_certificate, string, The subject field of the peer certificate in the downstream TLS connection\n   connection.dns_san_local_certificate, string, The first DNS entry in the SAN field of the local certificate in the downstream TLS connection\n   connection.dns_san_peer_certificate, string, The first DNS entry in the SAN field of the peer certificate in the downstream TLS connection\n   connection.uri_san_local_certificate, string, The first URI entry in the SAN field of the local certificate in the downstream TLS connection\n   connection.uri_san_peer_certificate, string, The first URI entry in the SAN field of the peer certificate in the downstream TLS connection\n   connection.id, uint, Downstream connection ID\n   upstream.address, string, Upstream connection remote address\n   upstream.port, int, Upstream connection remote port\n   upstream.tls_version, string, TLS version of the upstream TLS connection\n   upstream.subject_local_certificate, string, The subject field of the local certificate in the upstream TLS connection\n   upstream.subject_peer_certificate, string, The subject field of the peer certificate in the upstream TLS connection\n   upstream.dns_san_local_certificate, string, The first DNS entry in the SAN field of the local certificate in the upstream TLS connection\n   upstream.dns_san_peer_certificate, string, The first DNS entry in the SAN field of the peer certificate in the upstream TLS connection\n   upstream.uri_san_local_certificate, string, The first URI entry in the SAN field of the local certificate in the upstream TLS connection\n   upstream.uri_san_peer_certificate, string, The first URI entry in the SAN field of the peer certificate in the upstream TLS connection\n   upstream.local_address, string, The local address of the upstream connection\n   upstream.transport_failure_reason, string, The upstream transport failure reason e.g. certificate validation failed\n\n\nMost attributes are optional and provide the default value based on the type of the attribute.\nCEL supports presence checks for attributes and maps using `has()` syntax, e.g.\n`has(request.referer)`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/security.rst",
    "content": "Security\n========\n\n.. toctree::\n  :maxdepth: 2\n\n  ssl\n  jwt_authn_filter\n  ext_authz_filter\n  rbac_filter\n  threat_model\n  external_deps\n  google_vrp\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/ssl.rst",
    "content": ".. _arch_overview_ssl:\n\nTLS\n===\n\nEnvoy supports both :ref:`TLS termination <envoy_v3_api_field_config.listener.v3.FilterChain.transport_socket>` in listeners as well as\n:ref:`TLS origination <envoy_v3_api_field_config.cluster.v3.Cluster.transport_socket>` when making connections to upstream\nclusters. Support is sufficient for Envoy to perform standard edge proxy duties for modern web\nservices as well as to initiate connections with external services that have advanced TLS\nrequirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features:\n\n* **Configurable ciphers**: Each TLS listener and client can specify the ciphers that it supports.\n* **Client certificates**: Upstream/client connections can present a client certificate in addition\n  to server certificate verification.\n* **Certificate verification and pinning**: Certificate verification options include basic chain\n  verification, subject name verification, and hash pinning.\n* **Certificate revocation**: Envoy can check peer certificates against a certificate revocation list\n  (CRL) if one is :ref:`provided <envoy_v3_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.crl>`.\n* **ALPN**: TLS listeners support ALPN. The HTTP connection manager uses this information (in\n  addition to protocol inference) to determine whether a client is speaking HTTP/1.1 or HTTP/2.\n* **SNI**: SNI is supported for both server (listener) and client (upstream) connections.\n* **Session resumption**: Server connections support resuming previous sessions via TLS session\n  tickets (see `RFC 5077 <https://www.ietf.org/rfc/rfc5077.txt>`_). Resumption can be performed\n  across hot restarts and between parallel Envoy instances (typically useful in a front proxy\n  configuration).\n* **BoringSSL private key methods**: TLS private key operations (signing and decrypting) can be\n  performed asynchronously from :ref:`an extension <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.PrivateKeyProvider>`. This allows extending Envoy to support various key\n  management schemes (such as TPM) and TLS acceleration. This mechanism uses\n  `BoringSSL private key method interface <https://github.com/google/boringssl/blob/c0b4c72b6d4c6f4828a373ec454bd646390017d4/include/openssl/ssl.h#L1169>`_.\n* **OCSP Stapling**: Online Certificate Stapling Protocol responses may be stapled to certificates.\n\nUnderlying implementation\n-------------------------\n\nCurrently Envoy is written to use `BoringSSL <https://boringssl.googlesource.com/boringssl>`_ as the\nTLS provider.\n\n.. _arch_overview_ssl_fips:\n\nFIPS 140-2\n----------\n\nBoringSSL can be built in a\n`FIPS-compliant mode <https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md>`_,\nfollowing the build instructions from the `Security Policy for BoringCrypto module\n<https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3678.pdf>`_,\nusing ``--define boringssl=fips`` Bazel option. Currently, this option is only available on Linux-x86_64.\n\nThe correctness of the FIPS build can be verified by checking the presence of ``BoringSSL-FIPS``\nin the :option:`--version` output.\n\nIt's important to note that while using FIPS-compliant module is necessary for FIPS compliance,\nit's not sufficient by itself, and depending on the context, additional steps might be necessary.\nThe extra requirements may include using only approved algorithms and/or using only private keys\ngenerated by a module operating in FIPS-approved mode. For more information, please refer to the\n`Security Policy for BoringCrypto module\n<https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3678.pdf>`_\nand/or an `accredited CMVP laboratory <https://csrc.nist.gov/projects/testing-laboratories>`_.\n\nPlease note that the FIPS-compliant build is based on an older version of BoringSSL than\nthe non-FIPS build, and it doesn't support the most recent QUIC APIs.\n\n.. _arch_overview_ssl_enabling_verification:\n\nEnabling certificate verification\n---------------------------------\n\nCertificate verification of both upstream and downstream connections is not enabled unless the\nvalidation context specifies one or more trusted authority certificates.\n\nExample configuration\n^^^^^^^^^^^^^^^^^^^^^\n\n.. literalinclude:: _include/ssl.yaml\n    :language: yaml\n\n*/etc/ssl/certs/ca-certificates.crt* is the default path for the system CA bundle on Debian systems.\n:ref:`trusted_ca <envoy_v3_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca>` along with\n:ref:`match_subject_alt_names <envoy_v3_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.match_subject_alt_names>`\nmakes Envoy verify the server identity of *127.0.0.2:1234* as \"foo\" in the same way as e.g. cURL\ndoes on standard Debian installations. Common paths for system CA bundles on Linux and BSD are:\n\n* /etc/ssl/certs/ca-certificates.crt (Debian/Ubuntu/Gentoo etc.)\n* /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem (CentOS/RHEL 7)\n* /etc/pki/tls/certs/ca-bundle.crt (Fedora/RHEL 6)\n* /etc/ssl/ca-bundle.pem (OpenSUSE)\n* /usr/local/etc/ssl/cert.pem (FreeBSD)\n* /etc/ssl/cert.pem (OpenBSD)\n\nSee the reference for :ref:`UpstreamTlsContexts <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.UpstreamTlsContext>` and\n:ref:`DownstreamTlsContexts <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>` for other TLS options.\n\n.. attention::\n\n  If only :ref:`trusted_ca <envoy_v3_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca>` is\n  specified, Envoy will verify the certificate chain of the presented certificate, but not its\n  subject name, hash, etc. Other validation context configuration is typically required depending\n  on the deployment.\n\n.. _arch_overview_ssl_cert_select:\n\nCertificate selection\n---------------------\n\n:ref:`DownstreamTlsContexts <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>` support multiple TLS\ncertificates. These may be a mix of RSA and P-256 ECDSA certificates. The following rules apply:\n\n* Only one certificate of a particular type (RSA or ECDSA) may be specified.\n* Non-P-256 server ECDSA certificates are rejected.\n* If the client supports P-256 ECDSA, a P-256 ECDSA certificate will be selected if one is present in the\n  :ref:`DownstreamTlsContext <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>`\n  and it is in compliance with the OCSP policy.\n* If the client only supports RSA certificates, a RSA certificate will be selected if present in the\n  :ref:`DownstreamTlsContext <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>`.\n* Otherwise, the first certificate listed is used. This will result in a failed handshake if the\n  client only supports RSA certificates and the server only has ECDSA certificates.\n* Static and SDS certificates may not be mixed in a given :ref:`DownstreamTlsContext\n  <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>`.\n* The selected certificate must adhere to the OCSP policy. If no\n  such certificate is found, the connection is refused.\n\nOnly a single TLS certificate is supported today for :ref:`UpstreamTlsContexts\n<envoy_v3_api_msg_extensions.transport_sockets.tls.v3.UpstreamTlsContext>`.\n\nSecret discovery service (SDS)\n------------------------------\n\nTLS certificates can be specified in the static resource or can be fetched remotely.\nCertificate rotation is supported for static resources by sourcing :ref:`SDS configuration from the filesystem <xds_certificate_rotation>` or by pushing updates from the SDS server.\nPlease see :ref:`SDS <config_secret_discovery_service>` for details.\n\n.. _arch_overview_ssl_ocsp_stapling:\n\nOCSP Stapling\n-------------\n\n:ref:`DownstreamTlsContexts <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>` support\nstapling an Online Certificate Status Protocol (OCSP) response to a TLS certificate during the handshake. The\n``ocsp_staple`` field allows the operator to supply a pre-computed OCSP response per-certificate in the context.\nA single response may not pertain to multiple certificates. If provided, OCSP responses must be valid and\naffirm the certificate has not been revoked. Expired OCSP responses are accepted, but may cause downstream\nconnection errors depending on the OCSP staple policy.\n\n:ref:`DownstreamTlsContexts <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>`\nsupport an ``ocsp_staple_policy`` field to control whether Envoy should stop using a certificate or\ncontinue without stapling when its associated OCSP response is missing or expired.\nCertificates marked as `must-staple <https://tools.ietf.org/html/rfc7633>`_ require a\nvalid OCSP response regardless of the OCSP staple policy. In practice, a must-staple certificate causes\ncEnvoy to behave as if the OCSP staple policy is :ref:`MUST_STAPLE<envoy_v3_api_enum_value_extensions.transport_sockets.tls.v3.DownstreamTlsContext.OcspStaplePolicy.MUST_STAPLE>`.\nEnvoy will not use a must-staple certificate for new connections after its OCSP response expires.\n\nOCSP responses are never stapled to TLS requests that do not indicate support for OCSP stapling\nvia the ``status_request`` extension.\n\nThe following runtime flags are provided to adjust the requirements of OCSP responses and override\nthe OCSP policy. These flags default to ``true``.\n\n* ``envoy.reloadable_features.require_ocsp_response_for_must_staple_certs``: Disabling this allows\n  the operator to omit an OCSP response for must-staple certs in the config.\n* ``envoy.reloadable_features.check_ocsp_policy``: Disabling this will disable OCSP policy\n  checking. OCSP responses are stapled when available if the client supports it, even if the\n  response is expired. Stapling is skipped if no response is present.\n\nOCSP responses are ignored for :ref:`UpstreamTlsContexts\n<envoy_v3_api_msg_extensions.transport_sockets.tls.v3.UpstreamTlsContext>`.\n\n.. _arch_overview_ssl_auth_filter:\n\nAuthentication filter\n---------------------\n\nEnvoy provides a network filter that performs TLS client authentication via principals fetched from\na REST VPN service. This filter matches the presented client certificate hash against the principal\nlist to determine whether the connection should be allowed or not. Optional IP allowlisting can\nalso be configured. This functionality can be used to build edge proxy VPN support for web\ninfrastructure.\n\nClient TLS authentication filter :ref:`configuration reference\n<config_network_filters_client_ssl_auth>`.\n\n.. _arch_overview_ssl_custom_handshaker:\n\nCustom handshaker extension\n---------------------------\n\nThe :ref:`CommonTlsContext <envoy_v3_api_field_extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker>`\nhas a ``custom_handshaker`` extension which can be used to override SSL handshake\nbehavior entirely. This is useful for implementing any TLS behavior which is\ndifficult to express with callbacks. It is not necessary to write a custom\nhandshaker to use private key methods, see the\n:ref:`private key method interface <arch_overview_ssl>` described above.\n\nTo avoid reimplementing all of the `Ssl::ConnectionInfo <https://github.com/envoyproxy/envoy/blob/64bd6311bcc8f5b18ce44997ae22ff07ecccfe04/include/envoy/ssl/connection.h#L19>`_ interface, a custom\nimplementation might choose to extend\n`Envoy::Extensions::TransportSockets::Tls::SslHandshakerImpl <https://github.com/envoyproxy/envoy/blob/64bd6311bcc8f5b18ce44997ae22ff07ecccfe04/source/extensions/transport_sockets/tls/ssl_handshaker.h#L40>`_.\n\nCustom handshakers need to explicitly declare via `HandshakerCapabilities <https://github.com/envoyproxy/envoy/blob/64bd6311bcc8f5b18ce44997ae22ff07ecccfe04/include/envoy/ssl/handshaker.h#L68-L89>`_\nwhich TLS features they are responsible for. The default Envoy handshaker will\nmanage the remainder.\n\nA useful example handshaker, named ``SslHandshakerImplForTest``, lives in\n`this test <https://github.com/envoyproxy/envoy/blob/64bd6311bcc8f5b18ce44997ae22ff07ecccfe04/test/extensions/transport_sockets/tls/handshaker_test.cc#L174-L184>`_\nand demonstrates special-case ``SSL_ERROR`` handling and callbacks.\n\n.. _arch_overview_ssl_trouble_shooting:\n\nTrouble shooting\n----------------\n\nWhen Envoy originates TLS when making connections to upstream clusters, any errors will be logged into\n:ref:`UPSTREAM_TRANSPORT_FAILURE_REASON<config_access_log_format_upstream_transport_failure_reason>` field or\n:ref:`AccessLogCommon.upstream_transport_failure_reason<envoy_v3_api_field_data.accesslog.v3.AccessLogCommon.upstream_transport_failure_reason>` field.\nCommon errors are:\n\n* ``Secret is not supplied by SDS``: Envoy is still waiting SDS to deliver key/cert or root CA.\n* ``SSLV3_ALERT_CERTIFICATE_EXPIRED``: Peer certificate is expired and not allowed in config.\n* ``SSLV3_ALERT_CERTIFICATE_UNKNOWN``: Peer certificate is not in config specified SPKI.\n* ``SSLV3_ALERT_HANDSHAKE_FAILURE``: Handshake failed, usually due to upstream requires client certificate but not presented.\n* ``TLSV1_ALERT_PROTOCOL_VERSION``: TLS protocol version mismatch.\n* ``TLSV1_ALERT_UNKNOWN_CA``: Peer certificate CA is not in trusted CA.\n\nMore detailed list of error that can be raised by BoringSSL can be found\n`here <https://github.com/google/boringssl/blob/master/crypto/err/ssl.errordata>`_\n"
  },
  {
    "path": "docs/root/intro/arch_overview/security/threat_model.rst",
    "content": ".. _arch_overview_threat_model:\n\nThreat model\n============\n\nBelow we articulate the Envoy threat model, which is of relevance to Envoy operators, developers and\nsecurity researchers. We detail our security release process at\nhttps://github.com/envoyproxy/envoy/security/policy.\n\nConfidentiality, integrity and availability\n-------------------------------------------\n\nWe consider vulnerabilities leading to the compromise of data confidentiality or integrity to be our\nhighest priority concerns. Availability, in particular in areas relating to DoS and resource\nexhaustion, is also a serious security concern for Envoy operators, in particular those utilizing\nEnvoy in edge deployments.\n\nWe will activate the security release process for disclosures that meet the following criteria:\n\n* All issues that lead to loss of data confidentiality or integrity trigger the security release process.\n* An availability issue, such as Query-of-Death (QoD) or resource exhaustion needs to meet all of the\n  following criteria to trigger the security release process:\n  \n  - A component tagged as hardened is affected (see `Core and extensions`_ for the list of hardened components).\n    \n  - The type of traffic (upstream or downstream) that exhibits the issue matches the component's hardening tag.\n    I.e. component tagged as “hardened to untrusted downstream” is affected by downstream request.\n    \n  - A resource exhaustion issue needs to meet these additional criteria:\n    \n    + Not covered by an existing timeout or where applying short timeout values is impractical and either\n      \n      + Memory exhaustion, including out of memory conditions, where per-request memory use 100x or more above\n\tthe configured header or high watermark limit. I.e. 10 KiB client request leading to 1 MiB bytes of\n\tmemory consumed by Envoy;\n      \n      + Highly asymmetric CPU utilization where Envoy uses 100x or more CPU compared to client.\n\n\nThe Envoy availability stance around CPU and memory DoS is still evolving, especially for brute force\nattacks. We acknowledge that brute force (i.e. these with amplification factor less than 100) attacks are\nlikely for Envoy deployments as part of cloud infrastructure or with the use of botnets. We will continue\nto iterate and fix well known resource issues in the open, e.g. overload manager and watermark improvements.\nWe will activate the security process for brute force disclosures that appear to present a risk to\nexisting Envoy deployments.\n\nNote that we do not currently consider the default settings for Envoy to be safe from an availability\nperspective. It is necessary for operators to explicitly :ref:`configure <best_practices_edge>`\nwatermarks, the overload manager, circuit breakers and other resource related features in Envoy to\nprovide a robust availability story. We will not act on any security disclosure that relates to a\nlack of safe defaults. Over time, we will work towards improved safe-by-default configuration, but\ndue to backwards compatibility and performance concerns, this will require following the breaking\nchange deprecation policy.\n\nData and control plane\n----------------------\n\nWe divide our threat model into data and control plane, reflecting the internal division in Envoy of\nthese concepts from an architectural perspective. Our highest priority in risk assessment is the\nthreat posed by untrusted downstream client traffic on the data plane. This reflects the use of\nEnvoy in an edge serving capacity and also the use of Envoy as an inbound destination in a service\nmesh deployment.\n\nIn addition, we have an evolving position towards any vulnerability that might be exploitable by\nuntrusted upstreams. We recognize that these constitute a serious security consideration, given the\nuse of Envoy as an egress proxy. We will activate the security release process for disclosures that\nappear to present a risk profile that is significantly greater than the current Envoy upstream\nhardening status quo.\n\nThe control plane management server is generally trusted. We do not consider wire-level exploits\nagainst the xDS transport protocol to be a concern as a result. However, the configuration delivered\nto Envoy over xDS may originate from untrusted sources and may not be fully sanitized. An example of\nthis might be a service operator that hosts multiple tenants on an Envoy, where tenants may specify\na regular expression on a header match in `RouteConfiguration`. In this case, we expect that Envoy\nis resilient against the risks posed by malicious configuration from a confidentiality, integrity\nand availability perspective, as described above.\n\nWe generally assume that services utilized for side calls during the request processing, e.g.\nexternal authorization, credential suppliers, rate limit services, are trusted. When this is not the\ncase, an extension will explicitly state this in its documentation.\n\nCore and extensions\n-------------------\n\nAnything in the Envoy core may be used in both untrusted and trusted deployments. As a consequence,\nit should be hardened with this model in mind. Security issues related to core code will usually\ntrigger the security release process as described in this document.\n\nThe following extensions are intended to be hardened against untrusted downstream and upstreams:\n\n.. include:: secpos_robust_to_untrusted_downstream_and_upstream.rst\n\nThe following extensions should not be exposed to data plane attack vectors and hence are intended\nto be robust to untrusted downstreams and upstreams:\n\n.. include:: secpos_data_plane_agnostic.rst\n\nThe following extensions are intended to be hardened against untrusted downstreams but assume trusted\nupstreams:\n\n.. include:: secpos_robust_to_untrusted_downstream.rst\n\nThe following extensions should only be used when both the downstream and upstream are trusted:\n\n.. include:: secpos_requires_trusted_downstream_and_upstream.rst\n\n\nThe following extensions have an unknown security posture:\n\n.. include:: secpos_unknown.rst\n\nEnvoy currently has two dynamic filter extensions that support loadable code; WASM and Lua. In both\ncases, we assume that the dynamically loaded code is trusted. We expect the runtime for Lua to be\nrobust to untrusted data plane traffic with the assumption of a trusted script. WASM is still in\ndevelopment, but will eventually have a similar security stance.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/aggregate_cluster.rst",
    "content": ".. _arch_overview_aggregate_cluster:\n\nAggregate Cluster\n=================\n\nAggregate cluster is used for failover between clusters with different configuration, e.g., from EDS\nupstream cluster to STRICT_DNS upstream cluster, from cluster using ROUND_ROBIN load balancing \npolicy to cluster using MAGLEV, from cluster with 0.1s connection timeout to cluster with 1s \nconnection timeout, etc. Aggregate cluster loosely couples multiple clusters by referencing their \nname in the :ref:`configuration <envoy_v3_api_msg_extensions.clusters.aggregate.v3.ClusterConfig>`. The\nfallback priority is defined implicitly by the ordering in the :ref:`clusters list <envoy_v3_api_field_extensions.clusters.aggregate.v3.ClusterConfig.clusters>`.\nAggregate cluster uses tiered load balancing. The load balancer chooses cluster and priority first \nand then delegates the load balancing to the load balancer of the selected cluster. The top level \nload balancer reuses the existing load balancing algorithm by linearizing the priority set of \nmultiple clusters into one. \n\nLinearize Priority Set\n----------------------\n\nUpstream hosts are divided into multiple :ref:`priority levels <arch_overview_load_balancing_priority_levels>` \nand each priority level contains a list of healthy, degraded and unhealthy hosts. Linearization is \nused to simplify the host selection during load balancing by merging priority levels from multiple \nclusters. For example, primary cluster has 3 priority levels, secondary has 2 and tertiary has 2 and\nthe failover ordering is primary, secondary, tertiary. \n\n+-----------+----------------+-------------------------------------+\n| Cluster   | Priority Level |  Priority Level after Linearization |\n+===========+================+=====================================+\n| Primary   | 0              |  0                                  |\n+-----------+----------------+-------------------------------------+\n| Primary   | 1              |  1                                  |\n+-----------+----------------+-------------------------------------+\n| Primary   | 2              |  2                                  |\n+-----------+----------------+-------------------------------------+\n| Secondary | 0              |  3                                  |\n+-----------+----------------+-------------------------------------+\n| Secondary | 1              |  4                                  |\n+-----------+----------------+-------------------------------------+\n| Tertiary  | 0              |  5                                  |\n+-----------+----------------+-------------------------------------+\n| Tertiary  | 1              |  6                                  |\n+-----------+----------------+-------------------------------------+\n\nExample\n-------\n\nA sample aggregate cluster configuration could be:\n\n.. code-block:: yaml\n\n  name: aggregate_cluster\n  connect_timeout: 0.25s\n  lb_policy: CLUSTER_PROVIDED\n  cluster_type:\n    name: envoy.clusters.aggregate\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig\n      clusters:\n      # cluster primary, secondary and tertiary should be defined outside.\n      - primary\n      - secondary\n      - tertiary\n\nNote: :ref:`PriorityLoad retry plugins <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>` won't \nwork for aggregate cluster because the aggregate load balancer will override the *PriorityLoad* \nduring load balancing.\n\n\nLoad Balancing Example\n----------------------\n\nAggregate cluster uses tiered load balancing algorithm and the top tier is distributing traffic to \ndifferent clusters according to the health score across all :ref:`priorities <arch_overview_load_balancing_priority_levels>` \nin each cluster. The aggregate cluster in this section includes two clusters which is different from\nwhat the above configuration describes.\n \n+-----------------------------------------------------------------------------------------------------------------------+--------------------+----------------------+\n| Cluster                                                                                                               | Traffic to Primary | Traffic to Secondary |                                                \n+=======================================================================+===============================================+====================+======================+\n| Primary                                                               | Secondary                                     |                                           |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+                                           +\n| P=0 Healthy Endpoints | P=1 Healthy Endpoints | P=2 Healthy Endpoints | P=0 Healthy Endpoints | P=1 Healthy Endpoints |                                           |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 100%                  | 100%                  | 100%                  | 100%                  | 100%                  | 100%               | 0%                   |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 72%                   | 100%                  | 100%                  | 100%                  | 100%                  | 100%               | 0%                   |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 71%                   | 1%                    | 0%                    | 100%                  | 100%                  | 100%               | 0%                   |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 71%                   | 0%                    | 0%                    | 100%                  | 100%                  | 99%                | 1%                   |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 50%                   | 0%                    | 0%                    | 50%                   | 0%                    | 70%                | 30%                  |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 20%                   | 20%                   | 10%                   | 25%                   | 25%                   | 70%                | 30%                  |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 20%                   | 0%                    | 0%                    | 20%                   | 0%                    | 50%                | 50%                  |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 0%                    | 0%                    | 0%                    | 100%                  | 0%                    | 0%                 | 100%                 |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n| 0%                    | 0%                    | 0%                    | 72%                   | 0%                    | 0%                 | 100%                 |\n+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+\n\nNote: The above load balancing uses default :ref:`overprovisioning factor <arch_overview_load_balancing_overprovisioning_factor>` \nwhich is 1.4 which means if 80% of the endpoints in a priority level are healthy, that level is \nstill considered fully healthy because 80 * 1.4 > 100.\n\nThe example shows how the aggregate cluster level load balancer selects the cluster. E.g., healths \nof {{20, 20, 10}, {25, 25}} would result in a priority load of {{28%, 28%, 14%}, {30%, 0%}} of \ntraffic. When normalized total health drops below 100, traffic is distributed after normalizing the \nlevels' health scores to that sub-100 total. E.g. healths of {{20, 0, 0}, {20, 0}} (yielding a \nnormalized total health of 56) would be normalized and each cluster will receive 20 * 1.4 / 56 = 50%\nof the traffic which results in a priority load of {{50%, 0%, 0%}, {50%, 0%, 0%}} of traffic.\n\nThe load balancer reuses priority level logic to help with the cluster selection. The priority level\nlogic works with integer health scores. The health score of a level is (percent of healthy hosts in \nthe level) * (overprovisioning factor), capped at 100%. P=0 endpoints receive level 0's health \nscore percent of the traffic, with the rest flowing to P=1 (assuming P=1 is 100% healthy - more on \nthat later). The integer percents of traffic that each cluster receives are collectively called the \nsystem's \"cluster priority load\". For instance, for primary cluster, when 20% of P=0 endpoints are \nhealthy, 20% of P=1 endpoints are healthy, and 10% of P=2 endpoints are healthy; for secondary, when\n25% of P=0 endpoints are healthy and 25% of P=1 endpoints are healthy. The primary cluster will \nreceive 20% * 1.4 + 20% * 1.4 + 10% * 1.4 = 70% of the traffic. The secondary cluster will receive \nmin(100 - 70, 25% * 1.4 + 25% * 1.4) = 30% of the traffic. The traffic to all clusters sum up to \n100. The normalized health score and priority load are pre-computed before selecting the cluster and \npriority. \n\nTo sum this up in pseudo algorithms:\n\n::\n\n  health(P_X) = min(100, 1.4 * 100 * healthy_P_X_backends / total_P_X_backends), where \n                  total_P_X_backends is the number of backends for priority P_X after linearization\n  normalized_total_health = min(100, Σ(health(P_0)...health(P_X)))\n  cluster_priority_load(C_0) = min(100, Σ(health(P_0)...health(P_k)) * 100 / normalized_total_health), \n                  where P_0...P_k belong to C_0\n  cluster_priority_load(C_X) = min(100 - Σ(priority_load(C_0)..priority_load(C_X-1)),\n                           Σ(health(P_x)...health(P_X)) * 100 / normalized_total_health), \n                           where P_x...P_X belong to C_X\n  map from priorities to clusters:\n    P_0 ... P_k ... ...P_x ... P_X\n    ^       ^          ^       ^\n    cluster C_0        cluster C_X\n\nThe second tier is delegating the load balancing to the cluster selected in the first step and the \ncluster could use any load balancing algorithms specified by :ref:`load balancer type <arch_overview_load_balancing_types>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/circuit_breaking.rst",
    "content": ".. _arch_overview_circuit_break:\n\nCircuit breaking\n================\n\nCircuit breaking is a critical component of distributed systems. It’s nearly always better to fail\nquickly and apply back pressure downstream as soon as possible. One of the main benefits of an Envoy\nmesh is that Envoy enforces circuit breaking limits at the network level as opposed to having to\nconfigure and code each application independently. Envoy supports various types of fully distributed\n(not coordinated) circuit breaking:\n\n.. _arch_overview_circuit_break_cluster_maximum_connections:\n\n* **Cluster maximum connections**: The maximum number of connections that Envoy will establish to\n  all hosts in an upstream cluster. If this circuit breaker overflows the :ref:`upstream_cx_overflow\n  <config_cluster_manager_cluster_stats>` counter for the cluster will increment. All connections,\n  whether active or draining, count against this limit. Even if this circuit breaker has overflowed,\n  Envoy will ensure that a host selected by cluster load balancing has at least one connection\n  allocated. This has the implication that the :ref:`upstream_cx_active\n  <config_cluster_manager_cluster_stats>` count for a cluster may be higher than the cluster maximum\n  connection circuit breaker, with an upper bound of\n  `cluster maximum connections + (number of endpoints in a cluster) * (connection pools for the\n  cluster)`. This bound applies to the sum of connections across all workers threads. See\n  :ref:`connection pooling <arch_overview_conn_pool_how_many>` for details on how many connection\n  pools a cluster may have.\n* **Cluster maximum pending requests**: The maximum number of requests that will be queued while\n  waiting for a ready connection pool connection. Requests are added to the list\n  of pending requests whenever there aren't enough upstream connections available to immediately dispatch\n  the request. For HTTP/2 connections, if :ref:`max concurrent streams <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_concurrent_streams>`\n  and :ref:`max requests per connection <envoy_v3_api_field_config.cluster.v3.Cluster.max_requests_per_connection>` are not\n  configured, all requests will be multiplexed over the same connection so this circuit breaker\n  will only be hit when no connection is already established. If this circuit breaker overflows the\n  :ref:`upstream_rq_pending_overflow <config_cluster_manager_cluster_stats>` counter for the cluster will\n  increment.\n* **Cluster maximum requests**: The maximum number of requests that can be outstanding to all hosts\n  in a cluster at any given time. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow <config_cluster_manager_cluster_stats>`\n  counter for the cluster will increment.\n* **Cluster maximum active retries**: The maximum number of retries that can be outstanding to all\n  hosts in a cluster at any given time. In general we recommend using :ref:`retry budgets <envoy_v3_api_field_config.cluster.v3.CircuitBreakers.Thresholds.retry_budget>`; however, if static circuit breaking is preferred it should aggressively circuit break\n  retries. This is so that retries for sporadic failures are allowed, but the overall retry volume cannot\n  explode and cause large scale cascading failure. If this circuit breaker overflows the\n  :ref:`upstream_rq_retry_overflow <config_cluster_manager_cluster_stats>` counter for the cluster\n  will increment.\n\n  .. _arch_overview_circuit_break_cluster_maximum_connection_pools:\n\n* **Cluster maximum concurrent connection pools**: The maximum number of connection pools that can be\n  concurrently instantiated. Some features, such as the\n  :ref:`Original Src Listener Filter <arch_overview_ip_transparency_original_src_listener>`, can\n  create an unbounded number of connection pools. When a cluster has exhausted its concurrent\n  connection pools, it will attempt to reclaim an idle one. If it cannot, then the circuit breaker\n  will overflow. This differs from\n  :ref:`Cluster maximum connections <arch_overview_circuit_break_cluster_maximum_connections>` in that\n  connection pools never time out, whereas connections typically will. Connections automatically\n  clean up; connection pools do not. Note that in order for a connection pool to function it needs\n  at least one upstream connection, so this value should likely be no greater than\n  :ref:`Cluster maximum connections <arch_overview_circuit_break_cluster_maximum_connections>`.\n  If this circuit breaker overflows the\n  :ref:`upstream_cx_pool_overflow <config_cluster_manager_cluster_stats>` counter for the cluster\n  will increment.\n\n\nEach circuit breaking limit is :ref:`configurable <config_cluster_manager_cluster_circuit_breakers>`\nand tracked on a per upstream cluster and per priority basis. This allows different components of\nthe distributed system to be tuned independently and have different limits. The live state of these\ncircuit breakers, including the number of resources remaining until a circuit breaker opens, can\nbe observed via :ref:`statistics <config_cluster_manager_cluster_stats_circuit_breakers>`.\n\nWorkers threads share circuit breaker limits, i.e. if the active connection threshold is 500, worker\nthread 1 has 498 connections active, then worker thread 2 can only allocate 2 more connections.\nSince the implementation is eventually consistent, races between threads may allow limits to be\npotentially exceeded.\n\nCircuit breakers are enabled by default and have modest default values, e.g. 1024 connections per\ncluster. To disable circuit breakers, set the :ref:`thresholds <faq_disable_circuit_breaking>` to\nthe highest allowed values.\n\nNote that circuit breaking will cause the :ref:`x-envoy-overloaded\n<config_http_filters_router_x-envoy-overloaded_set>` header to be set by the router filter in the\ncase of HTTP requests.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/cluster_manager.rst",
    "content": ".. _arch_overview_cluster_manager:\n\nCluster manager\n===============\n\nEnvoy’s cluster manager manages all configured upstream clusters. Just as the Envoy configuration\ncan contain any number of listeners, the configuration can also contain any number of independently\nconfigured upstream clusters.\n\nUpstream clusters and hosts are abstracted from the network/HTTP filter stack given that upstream\nclusters and hosts may be used for any number of different proxy tasks. The cluster manager exposes\nAPIs to the filter stack that allow filters to obtain a L3/L4 connection to an upstream cluster, or\na handle to an abstract HTTP connection pool to an upstream cluster (whether the upstream host\nsupports HTTP/1.1 or HTTP/2 is hidden). A filter stage determines whether it needs an L3/L4\nconnection or a new HTTP stream and the cluster manager handles all of the complexity of knowing\nwhich hosts are available and healthy, load balancing, thread local storage of upstream connection\ndata (since most Envoy code is written to be single threaded), upstream connection type (TCP/IP,\nUDS), upstream protocol where applicable (HTTP/1.1, HTTP/2), etc.\n\nClusters known to the cluster manager can be configured either statically, or fetched dynamically\nvia the cluster discovery service (CDS) API. Dynamic cluster fetches allow more configuration to\nbe stored in a central configuration server and thus requires fewer Envoy restarts and configuration\ndistribution.\n\n* Cluster manager :ref:`configuration <config_cluster_manager>`.\n* CDS :ref:`configuration <config_cluster_manager_cds>`.\n\n.. _arch_overview_cluster_warming:\n\nCluster warming\n---------------\n\nWhen clusters are initialized both at server boot as well as via CDS, they are \"warmed.\" This means\nthat clusters do not become available until the following operations have taken place.\n\n* Initial service discovery load (e.g., DNS resolution, EDS update, etc.).\n* Initial active :ref:`health check <arch_overview_health_checking>` pass if active health checking\n  is configured. Envoy will send a health check request to each discovered host to determine its\n  initial health status.\n\nThe previous items ensure that Envoy has an accurate view of a cluster before it begins using it\nfor traffic serving.\n\nWhen discussing cluster warming, the cluster \"becoming available\" means:\n\n* For newly added clusters, the cluster will not appear to exist to the rest of Envoy until it has\n  been warmed. I.e., HTTP routes that reference the cluster will result in either a 404 or 503\n  (depending on configuration).\n* For updated clusters, the old cluster will continue to exist and serve traffic. When the new\n  cluster has been warmed, it will be atomically swapped with the old cluster such that no\n  traffic interruptions take place.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/connection_pooling.rst",
    "content": ".. _arch_overview_conn_pool:\n\nConnection pooling\n==================\n\nFor HTTP traffic, Envoy supports abstract connection pools that are layered on top of the underlying\nwire protocol (HTTP/1.1 or HTTP/2). The utilizing filter code does not need to be aware of whether\nthe underlying protocol supports true multiplexing or not. In practice the underlying\nimplementations have the following high level properties:\n\nHTTP/1.1\n--------\n\nThe HTTP/1.1 connection pool acquires connections as needed to an upstream host (up to the circuit\nbreaking limit). Requests are bound to connections as they become available, either because a\nconnection is done processing a previous request or because a new connection is ready to receive its\nfirst request. The HTTP/1.1 connection pool does not make use of pipelining so that only a single\ndownstream request must be reset if the upstream connection is severed.\n\nHTTP/2\n------\n\nThe HTTP/2 connection pool multiplexes multiple requests over a single connection, up to the limits\nimposed by :ref:`max concurrent streams\n<envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_concurrent_streams>` and :ref:`max\nrequests per connection <envoy_v3_api_field_config.cluster.v3.Cluster.max_requests_per_connection>`.\nThe HTTP/2 connection pool establishes as many connections as are needed to serve requests. With no\nlimits, this will be only a single connection. If a GOAWAY frame is received or if the connection\nreaches the :ref:`maximum requests per connection\n<envoy_v3_api_field_config.cluster.v3.Cluster.max_requests_per_connection>` limit, the connection\npool will drain the affected connection. Once a connection reaches its :ref:`maximum concurrent\nstream limit <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.max_concurrent_streams>`, it\nwill be marked as busy until a stream is available. New connections are established anytime there is\na pending request without a connection that can be dispatched to (up to circuit breaker limits for\nconnections). HTTP/2 is the preferred communication protocol, as connections rarely, if ever, get\nsevered.\n\n.. _arch_overview_conn_pool_how_many:\n\nNumber of connection pools\n--------------------------\n\nEach host in each cluster will have one or more connection pools. If the cluster is HTTP/1 or HTTP/2\nonly, then the host may have only a single connection pool. However, if the cluster supports multiple\nupstream protocols, then at least one connection pool per protocol will be allocated. Separate\nconnection pools are also allocated for each of the following features:\n\n* :ref:`Routing priority <arch_overview_http_routing_priority>`\n* :ref:`Socket options <envoy_v3_api_field_config.core.v3.BindConfig.socket_options>`\n* :ref:`Transport socket (e.g. TLS) options <envoy_v3_api_msg_config.core.v3.TransportSocket>`\n\nEach worker thread maintains its own connection pools for each cluster, so if an Envoy has two\nthreads and a cluster with both HTTP/1 and HTTP/2 support, there will be at least 4 connection pools.\n\n.. _arch_overview_conn_pool_health_checking:\n\nHealth checking interactions\n----------------------------\n\nIf Envoy is configured for either active or passive :ref:`health checking\n<arch_overview_health_checking>`, all connection pool connections will be closed on behalf of a host\nthat transitions from a available state to an unavailable state. If the host reenters the load\nbalancing rotation it will create fresh connections which will maximize the chance of working\naround a bad flow (due to ECMP route or something else).\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/health_checking.rst",
    "content": ".. _arch_overview_health_checking:\n\nHealth checking\n===============\n\nActive health checking can be :ref:`configured <config_cluster_manager_cluster_hc>` on a per\nupstream cluster basis. As described in the :ref:`service discovery\n<arch_overview_service_discovery>` section, active health checking and the EDS service discovery\ntype go hand in hand. However, there are other scenarios where active health checking is desired\neven when using the other service discovery types. Envoy supports three different types of health\nchecking along with various settings (check interval, failures required before marking a host\nunhealthy, successes required before marking a host healthy, etc.):\n\n* **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. By\n  default, it expects a 200 response if the host is healthy. Expected response codes are\n  :ref:`configurable <envoy_v3_api_msg_config.core.v3.HealthCheck.HttpHealthCheck>`. The\n  upstream host can return 503 if it wants to immediately notify downstream hosts to no longer\n  forward traffic to it.\n* **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the\n  upstream host. It expects the byte buffer to be echoed in the response if the host is to be\n  considered healthy. Envoy also supports connect only L3/L4 health checking.\n* **Redis**: Envoy will send a Redis PING command and expect a PONG response. The upstream Redis\n  server can respond with anything other than PONG to cause an immediate active health check\n  failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist\n  it is considered a passing healthcheck. This allows the user to mark a Redis instance for\n  maintenance by setting the specified key to any value and waiting for traffic to drain. See\n  :ref:`redis_key <envoy_v3_api_msg_config.health_checker.redis.v2.Redis>`.\n\nHealth checks occur over the transport socket specified for the cluster. This implies that if a cluster is\nusing a TLS-enabled transport socket, the health check will also occur over TLS. The\n:ref:`TLS options <envoy_v3_api_msg_config.core.v3.HealthCheck.TlsOptions>` used for health check connections\ncan be specified, which is useful if the corresponding upstream is using ALPN-based\n:ref:`FilterChainMatch <envoy_v3_api_msg_config.listener.v3.FilterChainMatch>` with different protocols for\nhealth checks versus data connections.\n\n.. _arch_overview_per_cluster_health_check_config:\n\nPer cluster member health check config\n--------------------------------------\n\nIf active health checking is configured for an upstream cluster, a specific additional configuration\nfor each registered member can be specified by setting the\n:ref:`HealthCheckConfig<envoy_v3_api_msg_config.endpoint.v3.Endpoint.HealthCheckConfig>`\nin the :ref:`Endpoint<envoy_v3_api_msg_config.endpoint.v3.Endpoint>` of an :ref:`LbEndpoint<envoy_v3_api_msg_config.endpoint.v3.LbEndpoint>`\nof each defined :ref:`LocalityLbEndpoints<envoy_v3_api_msg_config.endpoint.v3.LocalityLbEndpoints>` in a\n:ref:`ClusterLoadAssignment<envoy_v3_api_msg_config.endpoint.v3.ClusterLoadAssignment>`.\n\nAn example of setting up :ref:`health check config<envoy_v3_api_msg_config.endpoint.v3.Endpoint.HealthCheckConfig>`\nto set a :ref:`cluster member<envoy_v3_api_msg_config.endpoint.v3.Endpoint>`'s alternative health check\n:ref:`port<envoy_v3_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.port_value>` is:\n\n.. code-block:: yaml\n\n  load_assignment:\n    endpoints:\n    - lb_endpoints:\n      - endpoint:\n          health_check_config:\n            port_value: 8080\n          address:\n            socket_address:\n              address: localhost\n              port_value: 80\n\n.. _arch_overview_health_check_logging:\n\nHealth check event logging\n--------------------------\n\nA per-healthchecker log of ejection and addition events can optionally be produced by Envoy by\nspecifying a log file path in :ref:`the HealthCheck config <envoy_v3_api_field_config.core.v3.HealthCheck.event_log_path>`.\nThe log is structured as JSON dumps of\n:ref:`HealthCheckEvent messages <envoy_v3_api_msg_data.core.v3.HealthCheckEvent>`.\n\nEnvoy can be configured to log all health check failure events by setting the :ref:`always_log_health_check_failures\nflag <envoy_v3_api_field_config.core.v3.HealthCheck.always_log_health_check_failures>` to true.\n\nPassive health checking\n-----------------------\n\nEnvoy also supports passive health checking via :ref:`outlier detection\n<arch_overview_outlier_detection>`.\n\nConnection pool interactions\n----------------------------\n\nSee :ref:`here <arch_overview_conn_pool_health_checking>` for more information.\n\n.. _arch_overview_health_checking_filter:\n\nHTTP health checking filter\n---------------------------\n\nWhen an Envoy mesh is deployed with active health checking between clusters, a large amount of\nhealth checking traffic can be generated. Envoy includes an HTTP health checking filter that can be\ninstalled in a configured HTTP listener. This filter is capable of a few different modes of\noperation:\n\n* **No pass through**: In this mode, the health check request is never passed to the local service.\n  Envoy will respond with a 200 or a 503 depending on the current draining state of the server.\n* **No pass through, computed from upstream cluster health**: In this mode, the health checking\n  filter will return a 200 or a 503 depending on whether at least a :ref:`specified percentage\n  <envoy_v3_api_field_extensions.filters.http.health_check.v3.HealthCheck.cluster_min_healthy_percentages>`\n  of the servers are available (healthy + degraded) in one or more upstream clusters. (If the Envoy\n  server is in a draining state, though, it will respond with a 503 regardless of the upstream\n  cluster health.)\n* **Pass through**: In this mode, Envoy will pass every health check request to the local service.\n  The service is expected to return a 200 or a 503 depending on its health state.\n* **Pass through with caching**: In this mode, Envoy will pass health check requests to the local\n  service, but then cache the result for some period of time. Subsequent health check requests will\n  return the cached value up to the cache time. When the cache time is reached, the next health\n  check request will be passed to the local service. This is the recommended mode of operation when\n  operating a large mesh. Envoy uses persistent connections for health checking traffic and health\n  check requests have very little cost to Envoy itself. Thus, this mode of operation yields an\n  eventually consistent view of the health state of each upstream host without overwhelming the\n  local service with a large number of health check requests.\n\nFurther reading:\n\n* Health check filter :ref:`configuration <config_http_filters_health_check>`.\n* :ref:`/healthcheck/fail <operations_admin_interface_healthcheck_fail>` admin endpoint.\n* :ref:`/healthcheck/ok <operations_admin_interface_healthcheck_ok>` admin endpoint.\n\nActive health checking fast failure\n-----------------------------------\n\nWhen using active health checking along with passive health checking (:ref:`outlier detection\n<arch_overview_outlier_detection>`), it is common to use a long health checking interval to avoid a\nlarge amount of active health checking traffic. In this case, it is still useful to be able to\nquickly drain an upstream host when using the :ref:`/healthcheck/fail\n<operations_admin_interface_healthcheck_fail>` admin endpoint. To support this, the :ref:`router\nfilter <config_http_filters_router>` will respond to the :ref:`x-envoy-immediate-health-check-fail\n<config_http_filters_router_x-envoy-immediate-health-check-fail>` header. If this header is set by\nan upstream host, Envoy will immediately mark the host as being failed for active health check. Note\nthat this only occurs if the host's cluster has active health checking :ref:`configured\n<config_cluster_manager_cluster_hc>`. The :ref:`health checking filter\n<config_http_filters_health_check>` will automatically set this header if Envoy has been marked as\nfailed via the :ref:`/healthcheck/fail <operations_admin_interface_healthcheck_fail>` admin\nendpoint.\n\n.. _arch_overview_health_checking_identity:\n\nHealth check identity\n---------------------\n\nJust verifying that an upstream host responds to a particular health check URL does not necessarily\nmean that the upstream host is valid. For example, when using eventually consistent service\ndiscovery in a cloud auto scaling or container environment, it's possible for a host to go away and\nthen come back with the same IP address, but as a different host type. One solution to this problem\nis having a different HTTP health checking URL for every service type. The downside of that approach\nis that overall configuration becomes more complicated as every health check URL is fully custom.\n\nThe Envoy HTTP health checker supports the :ref:`service_name_matcher\n<envoy_v3_api_field_config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher>` option. If this option is set,\nthe health checker additionally compares the value of the *x-envoy-upstream-healthchecked-cluster* \nresponse header to *service_name_matcher*. If the values do not match, the health check does not pass.\nThe upstream health check filter appends *x-envoy-upstream-healthchecked-cluster* to the response headers.\nThe appended value is determined by the :option:`--service-cluster` command line option.\n\n.. _arch_overview_health_checking_degraded:\n\nDegraded health\n---------------\nWhen using the HTTP health checker, an upstream host can return ``x-envoy-degraded`` to inform the\nhealth checker that the host is degraded. See :ref:`here <arch_overview_load_balancing_degraded>` for\nhow this affects load balancing.\n\n\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/degraded.rst",
    "content": ".. _arch_overview_load_balancing_degraded:\n\nDegraded endpoints\n------------------\n\nEnvoy supports marking certain endpoints as degraded, meaning that they are able to receive\ntraffic, but should only receive traffic once there are not sufficient healthy hosts available.\n\nRouting to degraded hosts can be thought of as similar to routing to hosts in a\nlower :ref:`priority <arch_overview_load_balancing_priority_levels>`, although\ndegraded hosts will count against their original priority's health percentage\nfor the purposes of computing traffic spillover. As the amount of healthy hosts\navailable is no longer sufficient to handle 100% of the load, traffic will\nspill over to degraded hosts using the same mechanism as priority spillover for\nhealthy hosts. This ensures that traffic is gradually shifted to degraded hosts\nas it becomes necessary.\n\n\n+--------------------------------+------------------------------+-------------------------------+\n| P=0 healthy/degraded/unhealthy | Traffic to P=0 healthy hosts | Traffic to P=0 degraded hosts |\n+================================+==============================+===============================+\n| 100%/0%/0%                     | 100%                         |   0%                          |\n+--------------------------------+------------------------------+-------------------------------+\n| 71%/0%/29%                     | 100%                         |   0%                          |\n+--------------------------------+------------------------------+-------------------------------+\n| 71%/29%/0%                     | 99%                          |   1%                          |\n+--------------------------------+------------------------------+-------------------------------+\n| 25%/65%/10%                    | 35%                          |   65%                         |\n+--------------------------------+------------------------------+-------------------------------+\n| 5%/0%/95%                      | 100%                         |   0%                          |\n+--------------------------------+------------------------------+-------------------------------+\n\nEndpoints can be marked as degraded by using active health checking and having the upstream host\nreturn a :ref:`special header <arch_overview_health_checking_degraded>`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst",
    "content": ".. _arch_overview_load_balancing_types:\n\nSupported load balancers\n------------------------\n\nWhen a filter needs to acquire a connection to a host in an upstream cluster, the cluster manager\nuses a load balancing policy to determine which host is selected. The load balancing policies are\npluggable and are specified on a per upstream cluster basis in the :ref:`configuration\n<envoy_v3_api_msg_config.cluster.v3.Cluster>`. Note that if no active health checking policy is :ref:`configured\n<config_cluster_manager_cluster_hc>` for a cluster, all upstream cluster members are considered\nhealthy, unless otherwise specified through\n:ref:`health_status <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.health_status>`.\n\n.. _arch_overview_load_balancing_types_round_robin:\n\nWeighted round robin\n^^^^^^^^^^^^^^^^^^^^\n\nThis is a simple policy in which each available upstream host is selected in round\nrobin order. If :ref:`weights\n<envoy_v3_api_field_config.endpoint.v3.LbEndpoint.load_balancing_weight>` are assigned to\nendpoints in a locality, then a weighted round robin schedule is used, where\nhigher weighted endpoints will appear more often in the rotation to achieve the\neffective weighting.\n\n.. _arch_overview_load_balancing_types_least_request:\n\nWeighted least request\n^^^^^^^^^^^^^^^^^^^^^^\n\nThe least request load balancer uses different algorithms depending on whether hosts have the\nsame or different weights.\n\n* *all weights equal*: An O(1) algorithm which selects N random available hosts as specified in the\n  :ref:`configuration <envoy_v3_api_msg_config.cluster.v3.Cluster.LeastRequestLbConfig>` (2 by default) and picks the\n  host which has the fewest active requests (`Mitzenmacher et al.\n  <https://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf>`_ has shown that this\n  approach is nearly as good as an O(N) full scan). This is also known as P2C (power of two\n  choices). The P2C load balancer has the property that a host with the highest number of active\n  requests in the cluster will never receive new requests. It will be allowed to drain until it is\n  less than or equal to all of the other hosts.\n* *all weights not equal*:  If two or more hosts in the cluster have different load balancing\n  weights, the load balancer shifts into a mode where it uses a weighted round robin schedule in\n  which weights are dynamically adjusted based on the host's request load at the time of selection.\n\n  In this case the weights are calculated at the time a host is picked using the following formula:\n\n  `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`.\n\n  :ref:`active_request_bias<envoy_v3_api_field_config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias>`\n  can be configured via runtime and defaults to 1.0. It must be greater than or equal to 0.0.\n\n  The larger the active request bias is, the more aggressively active requests will lower the\n  effective weight.\n\n  If `active_request_bias` is set to 0.0, the least request load balancer behaves like the round\n  robin load balancer and ignores the active request count at the time of picking.\n\n  For example, if active_request_bias is 1.0, a host with weight 2 and an active request count of 4\n  will have an effective weight of 2 / (4 + 1)^1 = 0.4. This algorithm provides good balance at\n  steady state but may not adapt to load imbalance as quickly. Additionally, unlike P2C, a host will\n  never truly drain, though it will receive fewer requests over time.\n\n.. _arch_overview_load_balancing_types_ring_hash:\n\nRing hash\n^^^^^^^^^\n\nThe ring/modulo hash load balancer implements consistent hashing to upstream hosts. Each host is\nmapped onto a circle (the \"ring\") by hashing its address; each request is then routed to a host by\nhashing some property of the request, and finding the nearest corresponding host clockwise around\nthe ring. This technique is also commonly known as `\"Ketama\" <https://github.com/RJ/ketama>`_\nhashing, and like all hash-based load balancers, it is only effective when protocol routing is used\nthat specifies a value to hash on.\n\nEach host is hashed and placed on the ring some number of times proportional to its weight. For\nexample, if host A has a weight of 1 and host B has a weight of 2, then there might be three entries\non the ring: one for host A and two for host B. This doesn't actually provide the desired 2:1\npartitioning of the circle, however, since the computed hashes could be coincidentally very close to\none another; so it is necessary to multiply the number of hashes per host---for example inserting\n100 entries on the ring for host A and 200 entries for host B---to better approximate the desired\ndistribution. Best practice is to explicitly set\n:ref:`minimum_ring_size<envoy_v3_api_field_config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size>` and\n:ref:`maximum_ring_size<envoy_v3_api_field_config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size>`, and monitor\nthe :ref:`min_hashes_per_host and max_hashes_per_host\ngauges<config_cluster_manager_cluster_stats_ring_hash_lb>` to ensure good distribution. With the\nring partitioned appropriately, the addition or removal of one host from a set of N hosts will\naffect only 1/N requests.\n\nWhen priority based load balancing is in use, the priority level is also chosen by hash, so the\nendpoint selected will still be consistent when the set of backends is stable.\n\n.. _arch_overview_load_balancing_types_maglev:\n\nMaglev\n^^^^^^\n\nThe Maglev load balancer implements consistent hashing to upstream hosts. It uses the algorithm\ndescribed in section 3.4 of `this paper <https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/44824.pdf>`_\nwith a fixed table size of 65537 (see section 5.3 of the same paper). Maglev can be used as a drop\nin replacement for the :ref:`ring hash load balancer <arch_overview_load_balancing_types_ring_hash>`\nany place in which consistent hashing is desired. Like the ring hash load balancer, a consistent\nhashing load balancer is only effective when protocol routing is used that specifies a value to\nhash on.\n\nThe table construction algorithm places each host in the table some number of times proportional\nto its weight, until the table is completely filled. For example, if host A has a weight of 1 and\nhost B has a weight of 2, then host A will have 21,846 entries and host B will have 43,691 entries\n(totaling 65,537 entries). The algorithm attempts to place each host in the table at least once,\nregardless of the configured host and locality weights, so in some extreme cases the actual\nproportions may differ from the configured weights. For example, if the total number of hosts is\nlarger than the fixed table size, then some hosts will get 1 entry each and the rest will get 0,\nregardless of weight. Best practice is to monitor the :ref:`min_entries_per_host and\nmax_entries_per_host gauges <config_cluster_manager_cluster_stats_maglev_lb>` to ensure no hosts\nare underrepresented or missing.\n\nIn general, when compared to the ring hash (\"ketama\") algorithm, Maglev has substantially faster\ntable lookup build times as well as host selection times (approximately 10x and 5x respectively\nwhen using a large ring size of 256K entries). The downside of Maglev is that it is not as stable\nas ring hash. More keys will move position when hosts are removed (simulations show approximately\ndouble the keys will move). With that said, for many applications including Redis, Maglev is very\nlikely a superior drop in replacement for ring hash. The advanced reader can use\n:repo:`this benchmark </test/common/upstream/load_balancer_benchmark.cc>` to compare ring hash\nversus Maglev with different parameters.\n\n.. _arch_overview_load_balancing_types_random:\n\nRandom\n^^^^^^\n\nThe random load balancer selects a random available host. The random load balancer generally performs\nbetter than round robin if no health checking policy is configured. Random selection avoids bias\ntowards the host in the set that comes after a failed host.\n\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst",
    "content": "Load Balancing\n==============\n\n.. toctree::\n  :maxdepth: 2\n\n  overview\n  load_balancers\n  priority\n  degraded\n  locality_weight\n  overprovisioning\n  panic_threshold\n  original_dst\n  zone_aware\n  subsets\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst",
    "content": ".. _arch_overview_load_balancing_locality_weighted_lb:\n\nLocality weighted load balancing\n--------------------------------\n\nOne approach to determining how to weight assignments across different zones\nand geographical locations is by using explicit weights supplied via EDS in the\n:ref:`LocalityLbEndpoints <envoy_v3_api_msg_config.endpoint.v3.LocalityLbEndpoints>` message.\nThis approach is mutually exclusive with \n:ref:`zone aware routing <arch_overview_load_balancing_zone_aware_routing>`, since\nin the case of locality aware LB, we rely on the management server to provide the\nlocality weighting, rather than the Envoy-side heuristics used in zone aware\nrouting.\n\nWhen all endpoints are available, the locality is picked using a weighted\nround-robin schedule, where the locality weight is used for weighting. When some\nendpoints in a locality are unavailable, we adjust the locality weight to reflect\nthis. As with :ref:`priority levels\n<arch_overview_load_balancing_priority_levels>`, we assume an\n:ref:`over-provision factor <arch_overview_load_balancing_overprovisioning_factor>`\n(default value 1.4), which means we do not perform any weight\nadjustment when only a small number of endpoints in a locality are unavailable.\n\nAssume a simple set-up with 2 localities X and Y, where X has a locality weight\nof 1 and Y has a locality weight of 2, L=Y 100% available,\nwith default overprovisioning factor 1.4.\n\n+----------------------------+---------------------------+----------------------------+\n| L=X healthy endpoints      | Percent of traffic to L=X |  Percent of traffic to L=Y |\n+============================+===========================+============================+\n| 100%                       | 33%                       |   67%                      |\n+----------------------------+---------------------------+----------------------------+\n| 70%                        | 33%                       |   67%                      |\n+----------------------------+---------------------------+----------------------------+\n| 69%                        | 32%                       |   68%                      |\n+----------------------------+---------------------------+----------------------------+\n| 50%                        | 26%                       |   74%                      |\n+----------------------------+---------------------------+----------------------------+\n| 25%                        | 15%                       |   85%                      |\n+----------------------------+---------------------------+----------------------------+\n| 0%                         | 0%                        |   100%                     |\n+----------------------------+---------------------------+----------------------------+\n\n\nTo sum this up in pseudo algorithms:\n\n::\n\n  availability(L_X) = 140 * available_X_upstreams / total_X_upstreams\n  effective_weight(L_X) = locality_weight_X * min(100, availability(L_X))\n  load to L_X = effective_weight(L_X) / Σ_c(effective_weight(L_c))\n\nNote that the locality weighted pick takes place after the priority level is\npicked. The load balancer follows these steps:\n\n1. Pick :ref:`priority level <arch_overview_load_balancing_priority_levels>`.\n2. Pick locality (as described in this section) within priority level from (1).\n3. Pick endpoint using cluster specified load balancer within locality from (2).\n\nLocality weighted load balancing is configured by setting\n:ref:`locality_weighted_lb_config\n<envoy_v3_api_field_config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config>` in the\ncluster configuration and by providing weights via :ref:`load_balancing_weight\n<envoy_v3_api_field_config.endpoint.v3.LocalityLbEndpoints.load_balancing_weight>` and\nidentifying the location of the upstream hosts via :ref:`locality\n<envoy_v3_api_field_config.endpoint.v3.LocalityLbEndpoints.locality>` in\n:ref:`LocalityLbEndpoints <envoy_v3_api_msg_config.endpoint.v3.LocalityLbEndpoints>`.\n\nThis feature is not compatible with :ref:`load balancer subsetting\n<arch_overview_load_balancer_subsets>`, since it is not straightforward to\nreconcile locality level weighting with sensible weights for individual subsets.\n\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst",
    "content": ".. _arch_overview_load_balancing_types_original_destination:\n\nOriginal destination\n--------------------\n\nThis is a special purpose load balancer that can only be used with :ref:`an original destination\ncluster <arch_overview_service_discovery_types_original_destination>`. Upstream host is selected\nbased on the downstream connection metadata, i.e., connections are opened to the same address as the\ndestination address of the incoming connection was before the connection was redirected to\nEnvoy. New destinations are added to the cluster by the load balancer on-demand, and the cluster\n:ref:`periodically <envoy_v3_api_field_config.cluster.v3.Cluster.cleanup_interval>` cleans out unused hosts\nfrom the cluster. No other :ref:`load balancing policy <envoy_v3_api_field_config.cluster.v3.Cluster.lb_policy>` can\nbe used with original destination clusters.\n\n.. _arch_overview_load_balancing_types_original_destination_request_header:\n\nOriginal destination host request header\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nEnvoy can also pick up the original destination from a HTTP header called\n:ref:`x-envoy-original-dst-host <config_http_conn_man_headers_x-envoy-original-dst-host>`.\nPlease note that fully resolved IP address should be passed in this header. For example if a request has to be\nrouted to a host with IP address 10.195.16.237 at port 8888, the request header value should be set as\n``10.195.16.237:8888``.\n\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst",
    "content": ".. _arch_overview_load_balancing_overprovisioning_factor:\n\nOverprovisioning Factor\n-----------------------\nPriority levels and localities are considered overprovisioned with\n:ref:`this percentage <envoy_v3_api_field_config.endpoint.v3.ClusterLoadAssignment.Policy.overprovisioning_factor>`.\nEnvoy doesn't consider a priority level or locality unavailable until the\nfraction of available hosts multiplied by the overprovisioning factor drops\nbelow 100. The default value is 140 (in percentage, which means 140%), so a priority level or locality will not be\nconsidered unavailable until the percentage of available endpoints goes below 72%.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/overview.rst",
    "content": ".. _arch_overview_load_balancing:\n\nOverview\n========\n\nWhat is Load Balancing?\n-----------------------\n\nLoad balancing is a way of distributing traffic between multiple hosts within a single upstream cluster in order to effectively make use of available resources. There are many different ways\nof accomplishing this, so Envoy provides several different load balancing strategies.\nAt a high level, we can break these strategies into two categories: global\nload balancing and distributed load balancing.\n\n.. _arch_overview_load_balancing_distributed_lb:\n\nDistributed Load Balancing\n--------------------------\n\nDistributed load balancing refers to having Envoy itself determine how load should be distributed\nto the endpoints based on knowing the location of the upstream hosts.\n\nExamples\n^^^^^^^^\n\n* :ref:`Active health checking <arch_overview_health_checking>`: by health checking upstream\n  hosts, Envoy can adjust the weights of priorities and localities to account for unavailable\n  hosts.\n* :ref:`Zone aware routing <arch_overview_load_balancing_zone_aware_routing>`: this can be used\n  to make Envoy prefer closer endpoints without having to explicitly configure priorities in the\n  control plane.\n* :ref:`Load balancing algorithms <arch_overview_load_balancing_types>`: Envoy can use several\n  different algorithms to use the provided weights to determine which host to select.\n\n.. _arch_overview_load_balancing_global_lb:\n\nGlobal Load Balancing\n---------------------\n\nGlobal load balancing refers to having a single, global authority that decides how load should\nbe distributed between hosts. For Envoy, this would be done by the control plane, which is able\nto adjust the load applied to individual endpoints by specifying various parameters, such as\npriority, locality weight, endpoint weight and endpoint health.\n\nA simple example would be to have the control plane assign hosts to different\n:ref:`priorities <arch_overview_load_balancing_priority_levels>` based on network topology\nto ensure that hosts that require fewer network hops are preferred. This is similar to\nzone-aware routing, but is handled by the control plane instead of by Envoy. A benefit of doing\nit in the control plane is that it gets around some of the\n:ref:`limitations <arch_overview_load_balancing_zone_aware_routing_preconditions>` of zone aware routing.\n\nA more complicated setup could have resource usage being reported to the control plane, allowing\nit to adjust the weight of endpoints or :ref:`localities <arch_overview_load_balancing_locality_weighted_lb>`\nto account for the current resource usage, attempting to route new requests to idle hosts over busy ones.\n\nBoth Distributed and Global\n---------------------------\n\nMost sophisticated deployments will make use of features from both categories. For instance, global load\nbalancing could be used to define the high level routing priorities and weights, while distributed load balancing\ncould be used to react to changes in the system (e.g. using active health checking). By combining these you can\nget the best of both worlds: a globally aware authority that can control the flow of traffic on the macro\nlevel while still having the individual proxies be able to react to changes on the micro level.\n\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst",
    "content": ".. _arch_overview_load_balancing_panic_threshold:\n\nPanic threshold\n---------------\n\nDuring load balancing, Envoy will generally only consider available (healthy or degraded) hosts in\nan upstream cluster. However, if the percentage of available hosts in the cluster becomes too low,\nEnvoy will disregard health status and balance either amongst all hosts or no hosts. This is known\nas the *panic threshold*. The default panic threshold is 50%. This is\n:ref:`configurable <config_cluster_manager_cluster_runtime>` via runtime as well as in the\n:ref:`cluster configuration <envoy_v3_api_field_config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold>`.\nThe panic threshold is used to avoid a situation in which host failures cascade throughout the\ncluster as load increases.\n\nThere are two modes Envoy can choose from when in a panic state: traffic will either be sent to all\nhosts, or will be sent to no hosts (and therefore will always fail). This is configured in the\n:ref:`cluster configuration <envoy_v3_api_field_config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.fail_traffic_on_panic>`.\nChoosing to fail traffic during panic scenarios can help avoid overwhelming potentially failing\nupstream services, as it will reduce the load on the upstream service before all hosts have been\ndetermined to be unhealthy. However, it eliminates the possibility of _some_ requests succeeding\neven when many or all hosts in a cluster are unhealthy. This may be a good tradeoff to make if a\ngiven service is observed to fail in an all-or-nothing pattern, as it will more quickly cut off\nrequests to the cluster. Conversely, if a cluster typically continues to successfully service _some_\nrequests even when degraded, enabling this option is probably unhelpful.\n\nPanic thresholds work in conjunction with priorities. If the number of available hosts in a given\npriority goes down, Envoy will try to shift some traffic to lower priorities. If it succeeds in\nfinding enough available hosts in lower priorities, Envoy will disregard panic thresholds. In\nmathematical terms, if normalized total availability across all priority levels is 100%, Envoy\ndisregards panic thresholds and continues to distribute traffic load across priorities according to\nthe algorithm described :ref:`here <arch_overview_load_balancing_priority_levels>`.\nHowever, when normalized total availability drops below 100%, Envoy assumes that there are not enough\navailable hosts across all priority levels. It continues to distribute traffic load across priorities,\nbut if a given priority level's availability is below the panic threshold, traffic will go to all\n(or no) hosts in that priority level regardless of their availability.\n\nThe following examples explain the relationship between normalized total availability and panic threshold.\nIt is assumed that the default value of 50% is used for the panic threshold.\n\nAssume a simple set-up with 2 priority levels, P=1 100% healthy. In this scenario normalized total\nhealth is always 100%, P=0 never enters panic mode, and Envoy is able to shift as much traffic as\nnecessary to P=1.\n\n+-------------+------------+--------------+------------+--------------+--------------+\n| P=0 healthy | Traffic    | P=0 in panic | Traffic    | P=1 in panic | normalized   |\n| endpoints   |  to P=0    |              | to P=1     |              | total health |\n+=============+============+==============+============+==============+==============+\n| 72%         |  100%      | NO           |    0%      | NO           |  100%        |\n+-------------+------------+--------------+------------+--------------+--------------+\n| 71%         |   99%      | NO           |    1%      | NO           |  100%        |\n+-------------+------------+--------------+------------+--------------+--------------+\n| 50%         |   70%      | NO           |   30%      | NO           |  100%        |\n+-------------+------------+--------------+------------+--------------+--------------+\n| 25%         |   35%      | NO           |   65%      | NO           |  100%        |\n+-------------+------------+--------------+------------+--------------+--------------+\n| 0%          |    0%      | NO           |  100%      | NO           |  100%        |\n+-------------+------------+--------------+------------+--------------+--------------+\n\nIf P=1 becomes unhealthy, panic threshold continues to be disregarded until the sum of the health\nP=0 + P=1 goes below 100%. At this point Envoy starts checking panic threshold value for each\npriority.\n\n+-------------+-------------+----------+--------------+----------+--------------+-------------+\n| P=0 healthy | P=1 healthy | Traffic  | P=0 in panic | Traffic  | P=1 in panic | normalized  |\n| endpoints   | endpoints   | to P=0   |              | to P=1   |              | total health|\n+=============+=============+==========+==============+==========+==============+=============+\n| 72%         |  72%        |  100%    | NO           |   0%     | NO           |  100%       |\n+-------------+-------------+----------+--------------+----------+--------------+-------------+\n| 71%         |  71%        |  99%     | NO           |   1%     | NO           |  100%       |\n+-------------+-------------+----------+--------------+----------+--------------+-------------+\n| 50%         |  60%        |  70%     | NO           |   30%    | NO           |  100%       |\n+-------------+-------------+----------+--------------+----------+--------------+-------------+\n| 25%         |  100%       |  35%     | NO           |   65%    | NO           |  100%       |\n+-------------+-------------+----------+--------------+----------+--------------+-------------+\n| 25%         |  25%        |  50%     | YES          |   50%    | YES          |  70%        |\n+-------------+-------------+----------+--------------+----------+--------------+-------------+\n| 5%          |  65%        |  7%      | YES          |   93%    | NO           |  98%        |\n+-------------+-------------+----------+--------------+----------+--------------+-------------+\n\nPanic mode can be disabled by setting the panic threshold to 0%.\n\nLoad distribution is calculated as described above as long as there are priority levels not in panic mode.\nWhen all priority levels enter the panic mode, load calculation algorithm changes.\nIn this case each priority level receives traffic relative to the number of hosts in that priority level\nin relation to the number of hosts in all priority levels.\nFor example, if there are 2 priorities P=0 and P=1 and each of them consists of 5 hosts, each level will \nreceive 50% of the traffic.\nIf there are 2 hosts in priority P=0 and 8 hosts in priority P=1, priority P=0 will receive 20% of the \ntraffic and priority P=1 will receive 80% of the traffic.\n\nHowever, if the panic threshold is 0% for any priority, that priority will never enter panic mode.\nIn this case if all hosts are unhealthy, Envoy will fail to select a host and will instead immediately\nreturn error responses with \"503 - no healthy upstream\".\n\nNote that panic thresholds can be configured *per-priority*.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/priority.rst",
    "content": ".. _arch_overview_load_balancing_priority_levels:\n\nPriority levels\n------------------\n\nDuring load balancing, Envoy will generally only consider hosts configured at the highest priority\nlevel. For each EDS :ref:`LocalityLbEndpoints<envoy_v3_api_msg_config.endpoint.v3.LocalityLbEndpoints>` an optional\npriority may also be specified. When endpoints at the highest priority level (P=0) are healthy, all\ntraffic will land on endpoints in that priority level. As endpoints for the highest priority level\nbecome unhealthy, traffic will begin to trickle to lower priority levels.\n\nThe system can be overprovisioned with a configurable\n:ref:`overprovisioning factor <arch_overview_load_balancing_overprovisioning_factor>`, which\ncurrently defaults to 1.4 (this document will assume this value). If 80% of the endpoints in a\npriority level are healthy, that level is still considered fully healthy because 80*1.4 > 100.\nSo, level 0 endpoints will continue to receive all traffic until less than ~71.4% of them are\nhealthy.\n\nThe priority level logic works with integer health scores. The health score of a level is\n(percent of healthy hosts in the level) * (overprovisioning factor), capped at 100%. P=0\nendpoints receive (level 0's health score) percent of the traffic, with the rest flowing\nto P=1 (assuming P=1 is 100% healthy - more on that later). For instance, when 50% of P=0\nendpoints are healthy, they will receive 50 * 1.4 = 70% of the traffic.\nThe integer percents of traffic that each priority level receives are collectively called the\nsystem's \"priority load\". More examples (with 2 priority levels, P=1 100% healthy):\n\n+----------------------------+----------------+-----------------+\n| P=0 healthy endpoints      | Traffic to P=0 |  Traffic to P=1 |\n+============================+================+=================+\n| 100%                       | 100%           |   0%            |\n+----------------------------+----------------+-----------------+\n| 72%                        | 100%           |   0%            |\n+----------------------------+----------------+-----------------+\n| 71%                        | 99%            |   1%            |\n+----------------------------+----------------+-----------------+\n| 50%                        | 70%            |   30%           |\n+----------------------------+----------------+-----------------+\n| 25%                        | 35%            |   65%           |\n+----------------------------+----------------+-----------------+\n| 0%                         | 0%             |   100%          |\n+----------------------------+----------------+-----------------+\n\n.. attention::\n\n  In order for the load distribution algorithm and normalized total health calculation to work\n  properly, each priority level must be able to handle (100% * overprovision factor) of the\n  traffic: Envoy assumes a 100% healthy P=1 can take over entirely for an unhealthy P=0, etc.\n  If P=0 has 10 hosts but P=1 only has 2 hosts, that assumption probably will not hold.\n\nThe health score represents a level's current ability to handle traffic, after factoring in how\noverprovisioned the level originally was, and how many endpoints are currently unhealthy.\nTherefore, if the sum across all levels' health scores is < 100, then Envoy believes there are not\nenough healthy endpoints to fully handle the traffic. This sum is called the \"normalized total\nhealth.\" When normalized total health drops below 100, traffic is distributed after normalizing\nthe levels' health scores to that sub-100 total. E.g. healths of {20, 30} (yielding a normalized\ntotal health of 50) would be normalized, and result in a priority load of {40%, 60%} of traffic.\n\n+------------------------+-------------------------+-----------------+----------------+\n| P=0 healthy endpoints  | P=1 healthy endpoints   | Traffic to  P=0 | Traffic to P=1 |\n+========================+=========================+=================+================+\n| 100%                   |  100%                   | 100%            |   0%           |\n+------------------------+-------------------------+-----------------+----------------+\n| 72%                    |  72%                    | 100%            |   0%           |\n+------------------------+-------------------------+-----------------+----------------+\n| 71%                    |  71%                    | 99%             |   1%           |\n+------------------------+-------------------------+-----------------+----------------+\n| 50%                    |  50%                    | 70%             |   30%          |\n+------------------------+-------------------------+-----------------+----------------+\n| 25%                    |  100%                   | 35%             |   65%          |\n+------------------------+-------------------------+-----------------+----------------+\n| 25%                    |  25%                    | 50%             |   50%          |\n+------------------------+-------------------------+-----------------+----------------+\n\nAs more priorities are added, each level consumes load equal to its normalized effective health,\nunless the healths of the levels above it sum to 100%, in which case it receives no load.\n\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n| P=0 healthy endpoints | P=1 healthy endpoints | P=2 healthy endpoints | Traffic to P=0 | Traffic to P=1 | Traffic to P=2 |\n+=======================+=======================+=======================+================+================+================+\n| 100%                  |  100%                 |  100%                 | 100%           |   0%           |   0%           |\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n| 72%                   |  72%                  |  100%                 | 100%           |   0%           |   0%           |\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n| 71%                   |  71%                  |  100%                 | 99%            |   1%           |   0%           |\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n| 50%                   |  50%                  |  100%                 | 70%            |   30%          |   0%           |\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n| 25%                   |  100%                 |  100%                 | 35%            |   65%          |   0%           |\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n| 25%                   |  25%                  |  100%                 | 35%            |   35%          |   30%          |\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n| 25%                   |  25%                  |   20%                 | 36%            |   36%          |   28%          |\n+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+\n\nTo sum this up in pseudo algorithms:\n\n::\n\n  health(P_X) = min(100, 1.4 * 100 * healthy_P_X_backends / total_P_X_backends)\n  normalized_total_health = min(100, Σ(health(P_0)...health(P_X)))\n  priority_load(P_0) = min(100, health(P_0) * 100 / normalized_total_health)\n  priority_load(P_X) = min(100 - Σ(priority_load(P_0)..priority_load(P_X-1)),\n                           health(P_X) * 100 / normalized_total_health)\n\nNote: This sectioned talked about healthy priorities, but this also extends to\n:ref:`degraded priorities <arch_overview_load_balancing_degraded>`.\n\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst",
    "content": ".. _arch_overview_load_balancer_subsets:\n\nLoad Balancer Subsets\n---------------------\n\nEnvoy may be configured to divide hosts within an upstream cluster into subsets based on metadata\nattached to the hosts. Routes may then specify the metadata that a host must match in order to be\nselected by the load balancer, with the option of falling back to a predefined set of hosts,\nincluding any host.\n\nSubsets use the load balancer policy specified by the cluster. The original destination policy may\nnot be used with subsets because the upstream hosts are not known in advance. Subsets are compatible\nwith zone aware routing, but be aware that the use of subsets may easily violate the minimum hosts\ncondition described above.\n\nIf subsets are :ref:`configured <envoy_v3_api_field_config.cluster.v3.Cluster.lb_subset_config>` and a route\nspecifies no metadata or no subset matching the metadata exists, the subset load balancer initiates\nits fallback policy. The default policy is ``NO_FALLBACK``, in which case the request fails as if\nthe cluster had no hosts. Conversely, the ``ANY_ENDPOINT`` fallback policy load balances across all\nhosts in the cluster, without regard to host metadata. Finally, the ``DEFAULT_SUBSET`` causes\nfallback to load balance among hosts that match a specific set of metadata. It is possible to\noverride fallback policy for specific subset selector.\n\nSubsets must be predefined to allow the subset load balancer to efficiently select the correct\nsubset of hosts. Each definition is a set of keys, which translates to zero or more\nsubsets. Conceptually, each host that has a metadata value for all of the keys in a definition is\nadded to a subset specific to its key-value pairs. If no host has all the keys, no subsets result\nfrom the definition. Multiple definitions may be provided, and a single host may appear in multiple\nsubsets if it matches multiple definitions.\n\nDuring routing, the route's metadata match configuration is used to find a specific subset. If there\nis a subset with the exact keys and values specified by the route, the subset is used for load\nbalancing. Otherwise, the fallback policy is used. The cluster's subset configuration must,\ntherefore, contain a definition that has the same keys as a given route in order for subset load\nbalancing to occur.\n\nSubsets can be configured with only a single host in each subset, which can be used in use cases\nsimilar to :ref:`Maglev <arch_overview_load_balancing_types_maglev>` or\n:ref:`ring hash <arch_overview_load_balancing_types_ring_hash>`, such as load balancing based on a cookie,\nbut when it is important to select the same host even after new hosts are added to the cluster. Endpoint\nconfiguration changes may use less CPU if :ref:`single_host_per_subset <envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.single_host_per_subset>`\nis enabled.\n\nHost metadata is only supported when hosts are defined using\n:ref:`ClusterLoadAssignments <envoy_v3_api_msg_config.endpoint.v3.ClusterLoadAssignment>`. ClusterLoadAssignments are\navailable via EDS or the Cluster :ref:`load_assignment <envoy_v3_api_field_config.cluster.v3.Cluster.load_assignment>`\nfield. Host metadata for subset load balancing must be placed under the filter name ``\"envoy.lb\"``.\nSimilarly, route metadata match criteria use ``\"envoy.lb\"`` filter name. Host metadata may be\nhierarchical (e.g., the value for a top-level key may be a structured value or list), but the\nsubset load balancer only compares top-level keys and values. Therefore when using structured\nvalues, a route's match criteria will only match if an identical structured value appears in the\nhost's metadata.\n\nFinally, note that subset load balancing is not available for the\n:ref:`CLUSTER_PROVIDED <envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbPolicy.CLUSTER_PROVIDED>` load balancer\npolicies.\n\nExamples\n^^^^^^^^\n\nWe'll use simple metadata where all values are strings. Assume the following hosts are defined and\nassociated with a cluster:\n\n======  ======================\nHost    Metadata\n======  ======================\nhost1   v: 1.0, stage: prod\nhost2   v: 1.0, stage: prod\nhost3   v: 1.1, stage: canary\nhost4   v: 1.2-pre, stage: dev\n======  ======================\n\nThe cluster may enable subset load balancing like this:\n\n::\n\n  ---\n  name: cluster-name\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: '.../eds.conf'\n  connect_timeout:\n    seconds: 10\n  lb_policy: LEAST_REQUEST\n  lb_subset_config:\n    fallback_policy: DEFAULT_SUBSET\n    default_subset:\n      stage: prod\n    subset_selectors:\n    - keys:\n      - v\n      - stage\n    - keys:\n      - stage\n      fallback_policy: NO_FALLBACK\n\nThe following table describes some routes and the result of their application to the\ncluster. Typically the match criteria would be used with routes matching specific aspects of the\nrequest, such as the path or header information.\n\n======================  =============  ======================================================================\nMatch Criteria          Balances Over  Reason\n======================  =============  ======================================================================\nstage: canary           host3          Subset of hosts selected\nv: 1.2-pre, stage: dev  host4          Subset of hosts selected\nv: 1.0                  host1, host2   Fallback: No subset selector for \"v\" alone\nother: x                host1, host2   Fallback: No subset selector for \"other\"\n(none)                  host1, host2   Fallback: No subset requested\nstage: test             empty cluster  As fallback policy is overridden per selector with \"NO_FALLBACK\" value\n======================  =============  ======================================================================\n\nMetadata match criteria may also be specified on a route's weighted clusters. Metadata match\ncriteria from the selected weighted cluster are merged with and override the criteria from the\nroute:\n\n====================  ===============================  ====================\nRoute Match Criteria  Weighted Cluster Match Criteria  Final Match Criteria\n====================  ===============================  ====================\nstage: canary         stage: prod                      stage: prod\nv: 1.0                stage: prod                      v: 1.0, stage: prod\nv: 1.0, stage: prod   stage: canary                    v: 1.0, stage: canary\nv: 1.0, stage: prod   v: 1.1, stage: canary            v: 1.1, stage: canary\n(none)                v: 1.0                           v: 1.0\nv: 1.0                (none)                           v: 1.0\n====================  ===============================  ====================\n\n\nExample Host With Metadata\n**************************\n\nAn EDS ``LbEndpoint`` with host metadata:\n\n::\n\n  ---\n  endpoint:\n    address:\n      socket_address:\n        protocol: TCP\n        address: 127.0.0.1\n        port_value: 8888\n  metadata:\n    filter_metadata:\n      envoy.lb:\n        version: '1.0'\n        stage: 'prod'\n\n\nExample Route With Metadata Criteria\n************************************\n\nAn RDS ``Route`` with metadata match criteria:\n\n::\n\n  ---\n  match:\n    prefix: /\n  route:\n    cluster: cluster-name\n    metadata_match:\n      filter_metadata:\n        envoy.lb:\n          version: '1.0'\n          stage: 'prod'\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_balancing/zone_aware.rst",
    "content": ".. _arch_overview_load_balancing_zone_aware_routing:\n\nZone aware routing\n------------------\n\nWe use the following terminology:\n\n* **Originating/Upstream cluster**: Envoy routes requests from an originating cluster to an upstream\n  cluster.\n* **Local zone**: The same zone that contains a subset of hosts in both the originating and\n  upstream clusters.\n* **Zone aware routing**: Best effort routing of requests to an upstream cluster host in the local\n  zone.\n\nIn deployments where hosts in originating and upstream clusters belong to different zones\nEnvoy performs zone aware routing. There are several preconditions before zone aware routing can be\nperformed:\n\n.. _arch_overview_load_balancing_zone_aware_routing_preconditions:\n\n* Both originating and upstream cluster are not in\n  :ref:`panic mode <arch_overview_load_balancing_panic_threshold>`.\n* Zone aware :ref:`routing is enabled <config_cluster_manager_cluster_runtime_zone_routing>`.\n* The originating cluster has the same number of zones as the upstream cluster.\n* The upstream cluster has enough hosts. See\n  :ref:`here <config_cluster_manager_cluster_runtime_zone_routing>` for more information.\n\nThe purpose of zone aware routing is to send as much traffic to the local zone in the upstream\ncluster as possible while roughly maintaining the same number of requests per second across all\nupstream hosts (depending on load balancing policy).\n\nEnvoy tries to push as much traffic as possible to the local upstream zone as long as\nroughly the same number of requests per host in the upstream cluster are maintained. The decision of\nwhether Envoy routes to the local zone or performs cross zone routing depends on the percentage of\nhealthy hosts in the originating cluster and upstream cluster in the local zone. There are two cases\nwith regard to percentage relations in the local zone between originating and upstream clusters:\n\n* The originating cluster local zone percentage is greater than the one in the upstream cluster.\n  In this case we cannot route all requests from the local zone of the originating cluster to the\n  local zone of the upstream cluster because that will lead to request imbalance across all upstream\n  hosts. Instead, Envoy calculates the percentage of requests that can be routed directly to the\n  local zone of the upstream cluster. The rest of the requests are routed cross zone. The specific\n  zone is selected based on the residual capacity of the zone (that zone will get some local zone\n  traffic and may have additional capacity Envoy can use for cross zone traffic).\n* The originating cluster local zone percentage is smaller than the one in upstream cluster.\n  In this case the local zone of the upstream cluster can get all of the requests from the\n  local zone of the originating cluster and also have some space to allow traffic from other zones\n  in the originating cluster (if needed).\n\nNote that when using multiple priorities, zone aware routing is currently only supported for P=0.\n\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/load_reporting_service.rst",
    "content": ".. _arch_overview_load_reporting_service:\n\nLoad Reporting Service (LRS)\n============================\n\nThe Load Reporting Service provides a mechanism by which Envoy can emit Load Reports to a management\nserver at a regular cadence.\n\nThis will initiate a bi-directional stream with a management server. Upon connecting, the management\nserver can send a :ref:`LoadStatsResponse <envoy_v3_api_msg_service.load_stats.v3.LoadStatsResponse>`\nto a node it is interested in getting the load reports for. Envoy in this node will start sending\n:ref:`LoadStatsRequest <envoy_v3_api_msg_service.load_stats.v3.LoadStatsRequest>`. This is done periodically\nbased on the :ref:`load reporting interval <envoy_v3_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`\n\nEnvoy config with LRS can be found at :repo:`/examples/load-reporting-service/service-envoy-w-lrs.yaml`.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/outlier.rst",
    "content": ".. _arch_overview_outlier_detection:\n\nOutlier detection\n=================\n\nOutlier detection and ejection is the process of dynamically determining whether some number of\nhosts in an upstream cluster are performing unlike the others and removing them from the healthy\n:ref:`load balancing <arch_overview_load_balancing>` set. Performance might be along different axes\nsuch as consecutive failures, temporal success rate, temporal latency, etc. Outlier detection is a\nform of *passive* health checking. Envoy also supports :ref:`active health checking\n<arch_overview_health_checking>`. *Passive* and *active* health checking can be enabled together or\nindependently, and form the basis for an overall upstream health checking solution.\nOutlier detection is part of the :ref:`cluster configuration <envoy_v3_api_msg_config.cluster.v3.OutlierDetection>`\nand it needs filters to report errors, timeouts, and resets. Currently, the following filters support\noutlier detection: :ref:`http router <config_http_filters_router>`, \n:ref:`tcp proxy <config_network_filters_tcp_proxy>`  and :ref:`redis proxy <config_network_filters_redis_proxy>`.\n\nDetected errors fall into two categories: externally and locally originated errors. Externally generated errors\nare transaction specific and occur on the upstream server in response to the received request. For example, an HTTP server returning error code 500 or a redis server returning a payload which cannot be decoded. Those errors are generated on the upstream host after Envoy has connected to it successfully.\nLocally originated errors are generated by Envoy in response to an event which interrupted or prevented communication with the upstream host. Examples of locally originated errors are timeout, TCP reset, inability to connect to a specified port, etc.\n\nThe type of detected errors depends on the filter type. The :ref:`http router <config_http_filters_router>` filter, for example,\ndetects locally originated errors (timeouts, resets - errors related to connection to upstream host) and because it \nalso understands the HTTP protocol it reports\nerrors returned by the HTTP server (externally generated errors). In such a scenario, even when the connection to the upstream HTTP server is successful,\nthe transaction with the server may fail.\nBy contrast, the :ref:`tcp proxy <config_network_filters_tcp_proxy>` filter does not understand any protocol above\nthe TCP layer and reports only locally originated errors.\n\nUnder the default configuration (:ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` is *false*)\nlocally originated errors are not distinguished from externally generated (transaction) errors, all end up\nin the same bucket, and are compared against the\n:ref:`outlier_detection.consecutive_5xx<envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_5xx>`,\n:ref:`outlier_detection.consecutive_gateway_failure<envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_gateway_failure>` and\n:ref:`outlier_detection.success_rate_stdev_factor<envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_stdev_factor>` \nconfiguration items. For example, if connection to an upstream HTTP server fails twice because of timeout and \nthen, after successful connection establishment, the server returns error code 500 then the total error count will be 3.\n\nOutlier detection may also be configured to distinguish locally originated errors from externally originated (transaction) errors. \nIt is done via the\n:ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` configuration item.\nIn that mode locally originated errors are tracked by separate counters than externally originated \n(transaction) errors and \nthe outlier detector may be configured to react to locally originated errors and ignore externally originated errors \nor vice-versa.\n\nIt is important to understand that a cluster may be shared among several filter chains. If one filter chain\nejects a host based on its outlier detection type, other filter chains will be also affected even though their\noutlier detection type would not have ejected that host.\n\nEjection algorithm\n------------------\n\nDepending on the type of outlier detection, ejection either runs inline (for example in the case of\nconsecutive 5xx) or at a specified interval (for example in the case of periodic success rate). The\nejection algorithm works as follows:\n\n#. A host is determined to be an outlier.\n#. If no hosts have been ejected, Envoy will eject the host immediately. Otherwise, it checks to make\n   sure the number of ejected hosts is below the allowed threshold (specified via the\n   :ref:`outlier_detection.max_ejection_percent<envoy_v3_api_field_config.cluster.v3.OutlierDetection.max_ejection_percent>`\n   setting). If the number of ejected hosts is above the threshold, the host is not ejected.\n#. The host is ejected for some number of milliseconds. Ejection means that the host is marked\n   unhealthy and will not be used during load balancing unless the load balancer is in a\n   :ref:`panic <arch_overview_load_balancing_panic_threshold>` scenario. The number of milliseconds\n   is equal to the :ref:`outlier_detection.base_ejection_time_ms\n   <envoy_v3_api_field_config.cluster.v3.OutlierDetection.base_ejection_time>` value\n   multiplied by the number of times the host has been ejected. This causes hosts to get ejected\n   for longer and longer periods if they continue to fail.\n#. An ejected host will automatically be brought back into service after the ejection time has\n   been satisfied. Generally, outlier detection is used alongside :ref:`active health checking\n   <arch_overview_health_checking>` for a comprehensive health checking solution.\n\nDetection types\n---------------\n\nEnvoy supports the following outlier detection types:\n\nConsecutive 5xx\n^^^^^^^^^^^^^^^\n\nIn the default mode (:ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` is *false*) this detection type takes into account all generated errors: locally\noriginated and externally originated (transaction) errors.\nErrors generated by non-HTTP filters, like :ref:`tcp proxy <config_network_filters_tcp_proxy>` or  \n:ref:`redis proxy <config_network_filters_redis_proxy>` are internally mapped to HTTP 5xx codes and treated as such.\n\nIn split mode (:ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` is *true*) this detection type takes into account only externally originated (transaction) errors, ignoring locally originated errors.\nIf an upstream host is an HTTP-server, only 5xx types of error are taken into account (see :ref:`Consecutive Gateway Failure<consecutive_gateway_failure>` for exceptions).\nFor redis servers, served via   \n:ref:`redis proxy <config_network_filters_redis_proxy>` only malformed responses from the server are taken into account. \nProperly formatted responses, even when they carry an operational error (like index not found, access denied) are not taken into account.\n\nIf an upstream host returns some number of errors which are treated as consecutive 5xx type errors, it will be ejected. \nThe number of consecutive 5xx required for ejection is controlled by \nthe :ref:`outlier_detection.consecutive_5xx<envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_5xx>` value.\n\n.. _consecutive_gateway_failure:\n\nConsecutive Gateway Failure\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThis detection type takes into account a subset of 5xx errors, called \"gateway errors\" (502, 503 or 504 status code)\nand is supported only by the :ref:`http router <config_http_filters_router>`.\n\nIf an upstream host returns some number of consecutive \"gateway errors\" (502, 503 or 504 status\ncode), it will be ejected.\nThe number of consecutive gateway failures required for ejection is controlled by\nthe :ref:`outlier_detection.consecutive_gateway_failure\n<envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_gateway_failure>` value.\n\nConsecutive Local Origin Failure\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThis detection type is enabled only when :ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` is *true* and takes into account only locally originated errors (timeout, reset, etc). \nIf Envoy repeatedly cannot connect to an upstream host or communication with the upstream host is repeatedly interrupted, it will be ejected.\nVarious locally originated problems are detected: timeout, TCP reset, ICMP errors, etc. The number of consecutive\nlocally originated failures required for ejection is controlled \nby the :ref:`outlier_detection.consecutive_local_origin_failure \n<envoy_v3_api_field_config.cluster.v3.OutlierDetection.consecutive_local_origin_failure>` value.\nThis detection type is supported by :ref:`http router <config_http_filters_router>`, \n:ref:`tcp proxy <config_network_filters_tcp_proxy>`  and :ref:`redis proxy <config_network_filters_redis_proxy>`.\n\nSuccess Rate\n^^^^^^^^^^^^\n\nSuccess Rate based outlier detection aggregates success rate data from every host in a cluster. Then at given\nintervals ejects hosts based on statistical outlier detection. Success Rate outlier detection will not be\ncalculated for a host if its request volume over the aggregation interval is less than the\n:ref:`outlier_detection.success_rate_request_volume<envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_request_volume>`\nvalue. Moreover, detection will not be performed for a cluster if the number of hosts\nwith the minimum required request volume in an interval is less than the\n:ref:`outlier_detection.success_rate_minimum_hosts<envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_minimum_hosts>`\nvalue. \n\nIn the default configuration mode (:ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` is *false*)\nthis detection type takes into account all types of errors: locally and externally originated. The\n:ref:`outlier_detection.enforcing_local_origin_success<envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_local_origin_success_rate>` config item is ignored.\n\nIn split mode (:ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` is *true*), \nlocally originated errors and externally originated (transaction) errors are counted and treated separately. \nMost configuration items, namely\n:ref:`outlier_detection.success_rate_minimum_hosts<envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_minimum_hosts>`,\n:ref:`outlier_detection.success_rate_request_volume<envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_request_volume>`,\n:ref:`outlier_detection.success_rate_stdev_factor<envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_stdev_factor>` apply to both\ntypes of errors, but :ref:`outlier_detection.enforcing_success_rate<envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_success_rate>` applies\nto externally originated errors only and :ref:`outlier_detection.enforcing_local_origin_success_rate<envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_local_origin_success_rate>`  applies to locally originated errors only.\n\n.. _arch_overview_outlier_detection_failure_percentage:\n\nFailure Percentage\n^^^^^^^^^^^^^^^^^^\n\nFailure Percentage based outlier detection functions similarly to success rate detection, in\nthat it relies on success rate data from each host in a cluster. However, rather than compare those\nvalues to the mean success rate of the cluster as a whole, they are compared to a flat\nuser-configured threshold. This threshold is configured via the\n:ref:`outlier_detection.failure_percentage_threshold<envoy_v3_api_field_config.cluster.v3.OutlierDetection.failure_percentage_threshold>`\nfield.\n\nThe other configuration fields for failure percentage based detection are similar to the fields for\nsuccess rate detection. Failure percentage based detection also obeys\n:ref:`outlier_detection.split_external_local_origin_errors<envoy_v3_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`;\nthe enforcement percentages for externally- and locally-originated errors are controlled by\n:ref:`outlier_detection.enforcing_failure_percentage<envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_failure_percentage>`\nand\n:ref:`outlier_detection.enforcing_failure_percentage_local_origin<envoy_v3_api_field_config.cluster.v3.OutlierDetection.enforcing_failure_percentage_local_origin>`,\nrespectively. As with success rate detection, detection will not be performed for a host if its\nrequest volume over the aggregation interval is less than the\n:ref:`outlier_detection.failure_percentage_request_volume<envoy_v3_api_field_config.cluster.v3.OutlierDetection.failure_percentage_request_volume>`\nvalue. Detection also will not be performed for a cluster if the number of hosts with the minimum\nrequired request volume in an interval is less than the\n:ref:`outlier_detection.failure_percentage_minimum_hosts<envoy_v3_api_field_config.cluster.v3.OutlierDetection.failure_percentage_minimum_hosts>`\nvalue.\n\n.. _arch_overview_outlier_detection_grpc:\n\ngRPC\n----------------------\n\nFor gRPC requests, the outlier detection will use the HTTP status mapped from the `grpc-status <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses>`_ response header.\n\n\n.. _arch_overview_outlier_detection_logging:\n\nEjection event logging\n----------------------\n\nA log of outlier ejection events can optionally be produced by Envoy. This is extremely useful\nduring daily operations since global stats do not provide enough information on which hosts are\nbeing ejected and for what reasons. The log is structured as protobuf-based dumps of\n:ref:`OutlierDetectionEvent messages <envoy_v3_api_msg_data.cluster.v3.OutlierDetectionEvent>`.\nEjection event logging is configured in the Cluster manager :ref:`outlier detection configuration <envoy_v3_api_field_config.bootstrap.v3.ClusterManager.outlier_detection>`.\n\nConfiguration reference\n-----------------------\n\n* Cluster manager :ref:`global configuration <envoy_v3_api_field_config.bootstrap.v3.ClusterManager.outlier_detection>`\n* Per cluster :ref:`configuration <envoy_v3_api_msg_config.cluster.v3.OutlierDetection>`\n* Runtime :ref:`settings <config_cluster_manager_cluster_runtime_outlier_detection>`\n* Statistics :ref:`reference <config_cluster_manager_cluster_stats_outlier_detection>`\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/service_discovery.rst",
    "content": ".. _arch_overview_service_discovery:\n\nService discovery\n=================\n\nWhen an upstream cluster is defined in the :ref:`configuration <envoy_v3_api_msg_config.cluster.v3.Cluster>`,\nEnvoy needs to know how to resolve the members of the cluster. This is known as *service discovery*.\n\n.. _arch_overview_service_discovery_types:\n\nSupported service discovery types\n---------------------------------\n\n.. _arch_overview_service_discovery_types_static:\n\nStatic\n^^^^^^\n\nStatic is the simplest service discovery type. The configuration explicitly specifies the resolved\nnetwork name (IP address/port, unix domain socket, etc.) of each upstream host.\n\n.. _arch_overview_service_discovery_types_strict_dns:\n\nStrict DNS\n^^^^^^^^^^\n\nWhen using strict DNS service discovery, Envoy will continuously and asynchronously resolve the\nspecified DNS targets. Each returned IP address in the DNS result will be considered an explicit\nhost in the upstream cluster. This means that if the query returns three IP addresses, Envoy will\nassume the cluster has three hosts, and all three should be load balanced to. If a host is removed\nfrom the result Envoy assumes it no longer exists and will drain traffic from any existing\nconnection pools. Consequently, if a successful DNS resolution returns 0 hosts, Envoy will assume\nthat the cluster does not have any hosts. Note that Envoy never synchronously resolves DNS in the\nforwarding path. At the expense of eventual consistency, there is never a worry of blocking on a\nlong running DNS query.\n\nIf a single DNS name resolves to the same IP multiple times, these IPs will be de-duplicated.\n\nIf multiple DNS names resolve to the same IP, health checking will *not* be shared.\nThis means that care should be taken if active health checking is used with DNS names that resolve\nto the same IPs: if an IP is repeated many times between DNS names it might cause undue load on the\nupstream host.\n\nIf :ref:`respect_dns_ttl <envoy_v3_api_field_config.cluster.v3.Cluster.respect_dns_ttl>` is enabled, DNS record TTLs and\n:ref:`dns_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_refresh_rate>` are used to control DNS refresh rate.\nFor strict DNS cluster, if the minimum of all record TTLs is 0, :ref:`dns_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_refresh_rate>`\nwill be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_refresh_rate>`\ndefaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_failure_refresh_rate>`\ncontrols the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used.\n\nDNS resolving emits :ref:`cluster statistics <config_cluster_manager_cluster_stats>` fields *update_attempt*, *update_success* and *update_failure*.\n\n.. _arch_overview_service_discovery_types_logical_dns:\n\nLogical DNS\n^^^^^^^^^^^\n\nLogical DNS uses a similar asynchronous resolution mechanism to strict DNS. However, instead of\nstrictly taking the results of the DNS query and assuming that they comprise the entire upstream\ncluster, a logical DNS cluster only uses the first IP address returned *when a new connection needs\nto be initiated*. Thus, a single logical connection pool may contain physical connections to a\nvariety of different upstream hosts. Connections are never drained,\nincluding on a successful DNS resolution that returns 0 hosts.\n\nThis service discovery type is\noptimal for large scale web services that must be accessed via DNS. Such services typically use\nround robin DNS to return many different IP addresses. Typically a different result is returned for\neach query. If strict DNS were used in this scenario, Envoy would assume that the cluster’s members\nwere changing during every resolution interval which would lead to draining connection pools,\nconnection cycling, etc. Instead, with logical DNS, connections stay alive until they get cycled.\nWhen interacting with large scale web services, this is the best of all possible worlds:\nasynchronous/eventually consistent DNS resolution, long lived connections, and zero blocking in the\nforwarding path.\n\nIf :ref:`respect_dns_ttl <envoy_v3_api_field_config.cluster.v3.Cluster.respect_dns_ttl>` is enabled, DNS record TTLs and\n:ref:`dns_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_refresh_rate>` are used to control DNS refresh rate.\nFor logical DNS cluster, if the TTL of first record is 0, :ref:`dns_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_refresh_rate>`\nwill be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_refresh_rate>`\ndefaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate <envoy_v3_api_field_config.cluster.v3.Cluster.dns_failure_refresh_rate>`\ncontrols the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used.\n\nDNS resolving emits :ref:`cluster statistics <config_cluster_manager_cluster_stats>` fields *update_attempt*, *update_success* and *update_failure*.\n\n.. _arch_overview_service_discovery_types_original_destination:\n\nOriginal destination\n^^^^^^^^^^^^^^^^^^^^\n\nOriginal destination cluster can be used when incoming connections are redirected to Envoy either\nvia an iptables REDIRECT or TPROXY target or with Proxy Protocol. In these cases requests routed\nto an original destination cluster are forwarded to upstream hosts as addressed by the redirection\nmetadata, without any explicit host configuration or upstream host discovery.\nConnections to upstream hosts are pooled and unused hosts are flushed out when they have been idle longer than\n:ref:`cleanup_interval <envoy_v3_api_field_config.cluster.v3.Cluster.cleanup_interval>`, which defaults to\n5000ms. If the original destination address is not available, no upstream connection is opened.\nEnvoy can also pickup the original destination from a :ref:`HTTP header\n<arch_overview_load_balancing_types_original_destination_request_header>`.\nOriginal destination service discovery must be used with the original destination :ref:`load\nbalancer <arch_overview_load_balancing_types_original_destination>`.\n\n.. _arch_overview_service_discovery_types_eds:\n\nEndpoint discovery service (EDS)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe *endpoint discovery service* is a :ref:`xDS management server based on gRPC or REST-JSON API server\n<config_overview_management_server>` used by Envoy to fetch cluster members. The cluster members are called\n\"endpoint\" in Envoy terminology. For each cluster, Envoy fetch the endpoints from the discovery service. EDS is the\npreferred service discovery mechanism for a few reasons:\n\n* Envoy has explicit knowledge of each upstream host (vs. routing through a DNS resolved load\n  balancer) and can make more intelligent load balancing decisions.\n* Extra attributes carried in the discovery API response for each host inform Envoy of the host’s\n  load balancing weight, canary status, zone, etc. These additional attributes are used globally\n  by the Envoy mesh during load balancing, statistic gathering, etc.\n\nThe Envoy project provides reference gRPC implementations of EDS and\n:ref:`other discovery services <arch_overview_dynamic_config>`\nin both `Java <https://github.com/envoyproxy/java-control-plane>`_\nand `Go <https://github.com/envoyproxy/go-control-plane>`_.\n\n.. _arch_overview_service_discovery_types_custom:\n\nCustom cluster\n^^^^^^^^^^^^^^\n\nEnvoy also supports custom cluster discovery mechanism. Custom clusters are specified using\n:ref:`cluster_type field <envoy_v3_api_field_config.cluster.v3.Cluster.cluster_type>` on the cluster configuration.\n\nGenerally active health checking is used in conjunction with the eventually consistent service\ndiscovery service data to making load balancing and routing decisions. This is discussed further in\nthe following section.\n\n.. _arch_overview_service_discovery_eventually_consistent:\n\nOn eventually consistent service discovery\n------------------------------------------\n\nMany existing RPC systems treat service discovery as a fully consistent process. To this end, they\nuse fully consistent leader election backing stores such as Zookeeper, etcd, Consul, etc. Our\nexperience has been that operating these backing stores at scale is painful.\n\nEnvoy was designed from the beginning with the idea that service discovery does not require full\nconsistency. Instead, Envoy assumes that hosts come and go from the mesh in an eventually consistent\nway. Our recommended way of deploying a service to service Envoy mesh configuration uses eventually\nconsistent service discovery along with :ref:`active health checking <arch_overview_health_checking>`\n(Envoy explicitly health checking upstream cluster members) to determine cluster health. This\nparadigm has a number of benefits:\n\n* All health decisions are fully distributed. Thus, network partitions are gracefully handled\n  (whether the application gracefully handles the partition is a different story).\n* When health checking is configured for an upstream cluster, Envoy uses a 2x2 matrix to determine\n  whether to route to a host:\n\n.. csv-table::\n  :header: Discovery Status, Health Check OK, Health Check Failed\n  :widths: 1, 1, 2\n\n  Discovered, Route, Don't Route\n  Absent, Route, Don't Route / Delete\n\nHost discovered / health check OK\n  Envoy **will route** to the target host.\n\nHost absent / health check OK:\n  Envoy **will route** to the target host. This is very important since the design assumes that the\n  discovery service can fail at any time. If a host continues to pass health check even after becoming\n  absent from the discovery data, Envoy will still route. Although it would be impossible to add new\n  hosts in this scenario, existing hosts will continue to operate normally. When the discovery service\n  is operating normally again the data will eventually re-converge.\n\nHost discovered / health check FAIL\n  Envoy **will not route** to the target host. Health check data is assumed to be more accurate than\n  discovery data.\n\nHost absent / health check FAIL\n  Envoy **will not route and will delete** the target host. This\n  is the only state in which Envoy will purge host data.\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/upstream.rst",
    "content": "Upstream clusters\n=================\n\n.. toctree::\n  :maxdepth: 2\n\n  cluster_manager\n  service_discovery\n  health_checking\n  connection_pooling\n  load_balancing/load_balancing\n  aggregate_cluster\n  outlier\n  circuit_breaking\n  upstream_filters\n  load_reporting_service\n"
  },
  {
    "path": "docs/root/intro/arch_overview/upstream/upstream_filters.rst",
    "content": ".. _arch_overview_upstream_filters:\n\nUpstream network filters\n========================\n\nUpstream clusters provide an ability to inject network level (L3/L4)\nfilters. It should be noted that a network filter needs to\nbe registered in code as an upstream filter before usage. Currently,\nthere are no upstream filters available in Envoy out of the box.\nThe filters apply to the connection to the upstream hosts, using the same API presented by listeners for\nthe downstream connections. The write-callbacks are invoked for any chunk of\ndata sent to the upstream host, and the read-callbacks are invoked for data\nreceived from the upstream host.\n"
  },
  {
    "path": "docs/root/intro/deployment_types/deployment_types.rst",
    "content": "Deployment types\n================\n\nEnvoy is usable in a variety of different scenarios, however it's most useful when deployed as a\n*mesh* across all hosts in an infrastructure. This section describes three recommended deployment\ntypes in increasing order of complexity.\n\n.. toctree::\n\n  service_to_service\n  front_proxy\n  double_proxy\n"
  },
  {
    "path": "docs/root/intro/deployment_types/double_proxy.rst",
    "content": ".. _deployment_type_double_proxy:\n\nService to service, front proxy, and double proxy\n-------------------------------------------------\n\n.. image:: /_static/double_proxy.svg\n  :width: 70%\n\nThe above diagram shows the :ref:`front proxy <deployment_type_front_proxy>` configuration alongside\nanother Envoy cluster running as a *double proxy*. The idea behind the double proxy is that it is\nmore efficient to terminate TLS and client connections as close as possible to the user (shorter\nround trip times for the TLS handshake, faster TCP CWND expansion, less chance for packet loss,\netc.). Connections that terminate in the double proxy are then multiplexed onto long lived HTTP/2\nconnections running in the main data center.\n\nIn the above diagram, the front Envoy proxy running in region 1 authenticates itself with the front\nEnvoy proxy running in region 2 via TLS mutual authentication and pinned certificates. This allows\nthe front Envoy instances running in region 2 to trust elements of the incoming requests that\nordinarily would not be trustable (such as the x-forwarded-for HTTP header).\n\nConfiguration template\n^^^^^^^^^^^^^^^^^^^^^^\n\nThe source distribution includes an example double proxy configuration that is very similar to\nthe version that Lyft runs in production. See :ref:`here <install_ref_configs>` for more\ninformation.\n"
  },
  {
    "path": "docs/root/intro/deployment_types/front_proxy.rst",
    "content": ".. _deployment_type_front_proxy:\n\nService to service plus front proxy\n-----------------------------------\n\n.. image:: /_static/front_proxy.svg\n\nThe above diagram shows the :ref:`service to service <deployment_type_service_to_service>`\nconfiguration sitting behind an Envoy cluster used as an HTTP L7 edge reverse proxy. The\nreverse proxy provides the following features:\n\n* Terminates TLS.\n* Supports both HTTP/1.1 and HTTP/2.\n* Full HTTP L7 routing support.\n* Talks to the service to service Envoy clusters via the standard :ref:`ingress port\n  <deployment_type_service_to_service_ingress>` and using the discovery service for host\n  lookup. Thus, the front Envoy hosts work identically to any other Envoy host, other than the\n  fact that they do not run collocated with another service. This means that are operated in the\n  same way and emit the same statistics.\n\nConfiguration template\n^^^^^^^^^^^^^^^^^^^^^^\n\nThe source distribution includes an example front proxy configuration that is very similar to\nthe version that Lyft runs in production. See :ref:`here <install_ref_configs>` for more\ninformation.\n"
  },
  {
    "path": "docs/root/intro/deployment_types/service_to_service.rst",
    "content": ".. _deployment_type_service_to_service:\n\nService to service only\n-----------------------\n\n.. image:: /_static/service_to_service.svg\n  :width: 60%\n\nThe above diagram shows the simplest Envoy deployment which uses Envoy as a communication bus for\nall traffic internal to a service oriented architecture (SOA). In this scenario, Envoy exposes\nseveral listeners that are used for local origin traffic as well as service-to-service traffic.\n\nService to service egress listener\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThis is the port used by applications to talk to other services in the infrastructure. For example,\n*http://localhost:9001*. HTTP and gRPC requests use the HTTP/1.1 *host* header or the HTTP/2\n*:authority* header to indicate which remote cluster the request is destined for. Envoy handles\nservice discovery, load balancing, rate limiting, etc. depending on the details in the\nconfiguration. Services only need to know about the local Envoy and do not need to concern\nthemselves with network topology, whether they are running in development or production, etc.\n\nThis listener supports both HTTP/1.1 or HTTP/2 depending on the capabilities of the application.\n\n.. image:: /_static/service_to_service_egress_listener.svg\n  :width: 40%\n\n.. _deployment_type_service_to_service_ingress:\n\nService to service ingress listener\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThis is the port used by remote Envoys when they want to talk to the local Envoy. For example,\n*http://servicename:9211*. Envoy routes incoming requests to the local service on the configured\nport(s). Multiple application ports may be involved depending on application or load balancing\nneeds (for example if the service needs both an HTTP port and a gRPC port). The local Envoy\nperforms buffering, circuit breaking, etc. as needed.\n\nOur default configurations use HTTP/2 for all Envoy to Envoy communication, regardless of whether\nthe application uses HTTP/1.1 or HTTP/2 when egressing out of a local Envoy. HTTP/2 provides\nbetter performance via long lived connections and explicit reset notifications.\n\n.. image:: /_static/service_to_service_ingress_listener.svg\n  :width: 55%\n\n\nOptional external service egress listeners\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nGenerally, an explicit egress port is used for each external service that a local service wants\nto talk to. This is done because some external service SDKs do not easily support overriding the\n*host* header to allow for standard HTTP reverse proxy behavior. For example,\n*http://localhost:9250* might be allocated for connections destined for DynamoDB. Instead of using\n*host* routing for some external services and dedicated local port routing for others, we recommend\nbeing consistent and using local port routing for all external services.\n\nDiscovery service integration\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe recommended service-to-service configuration uses an external discovery service for all cluster\nlookups. This provides Envoy with the most detailed information possible for use when performing\nload balancing, statistics gathering, etc.\n\nConfiguration template\n^^^^^^^^^^^^^^^^^^^^^^\n\nThe source distribution includes :ref:`an example service-to-service configuration<install_ref_configs>`\nthat is very similar to the version that Lyft runs in production.\n"
  },
  {
    "path": "docs/root/intro/deprecated.rst",
    "content": "Deprecated\n==========\n\nThe deprecations for each version have moved :ref:`here <version_history>`.\n\n.. This page only exists because previous versions of Envoy link here.\n"
  },
  {
    "path": "docs/root/intro/getting_help.rst",
    "content": ".. _getting_help:\n\nGetting help\n============\n\nWe are very interested in building a community around Envoy. Please reach out to us if you are\ninterested in using it and need help or want to contribute.\n\nPlease see `contact info <https://github.com/envoyproxy/envoy#contact>`_.\n\nReporting security vulnerabilities\n----------------------------------\n\nPlease see `security contact info\n<https://github.com/envoyproxy/envoy#reporting-security-vulnerabilities>`_.\n"
  },
  {
    "path": "docs/root/intro/intro.rst",
    "content": ".. _intro:\n\nIntroduction\n============\n\n.. toctree::\n  :maxdepth: 2\n\n  what_is_envoy\n  arch_overview/arch_overview\n  life_of_a_request\n  deployment_types/deployment_types\n  getting_help\n\n.. These pages are only here for redirects from log lines from shipping versions of Envoy, so hide them.\n.. toctree::\n  :hidden:\n\n  version_history\n  deprecated\n"
  },
  {
    "path": "docs/root/intro/life_of_a_request.rst",
    "content": ".. _life_of_a_request:\n\nLife of a Request\n=================\n\nBelow we describe the events in the life of a request passing through an Envoy proxy. We first\ndescribe how Envoy fits into the request path for a request and then the internal events that take\nplace following the arrival of a request at the Envoy proxy from downstream. We follow the request\nuntil the corresponding dispatch upstream and the response path.\n\n\nTerminology\n-----------\n\nEnvoy uses the following terms through its codebase and documentation:\n\n* *Cluster*: a logical service with a set of endpoints that Envoy forwards requests to.\n* *Downstream*: an entity connecting to Envoy. This may be a local application (in a sidecar model) or\n  a network node. In non-sidecar models, this is a remote client.\n* *Endpoints*: network nodes that implement a logical service. They are grouped into clusters.\n  Endpoints in a cluster are *upstream* of an Envoy proxy.\n* *Filter*: a module in the connection or request processing pipeline providing some aspect of\n  request handling. An analogy from Unix is the composition of small utilities (filters) with Unix\n  pipes (filter chains).\n* *Filter chain*: a series of filters.\n* *Listeners*: Envoy module responsible for binding to an IP/port, accepting new TCP connections (or\n  UDP datagrams) and orchestrating the downstream facing aspects of request processing.\n* *Upstream*: an endpoint (network node) that Envoy connects to when forwarding requests for a\n  service. This may be a local application (in a sidecar model) or a network node. In non-sidecar\n  models, this corresponds with a remote backend.\n\nNetwork topology\n----------------\n\nHow a request flows through the components in a network (including Envoy) depends on the network’s\ntopology. Envoy can be used in a wide variety of networking topologies. We focus on the inner\noperation of Envoy below, but briefly we address how Envoy relates to the rest of the network in\nthis section.\n\nEnvoy originated as a `service mesh\n<https://blog.envoyproxy.io/service-mesh-data-plane-vs-control-plane-2774e720f7fc>`_ sidecar proxy,\nfactoring out load balancing, routing, observability, security and discovery services from\napplications. In the service mesh model, requests flow through Envoys as a gateway to the network.\nRequests arrive at an Envoy via either ingress or egress listeners:\n\n* Ingress listeners take requests from other nodes in the service mesh and forward them to the\n  local application. Responses from the local application flow back through Envoy to the downstream.\n* Egress listeners take requests from the local application and forward them to other nodes in the\n  network. These receiving nodes will also be typically running Envoy and accepting the request via\n  their ingress listeners.\n\n.. image:: /_static/lor-topology-service-mesh.svg\n   :width: 80%\n   :align: center\n\n.. image:: /_static/lor-topology-service-mesh-node.svg\n   :width: 40%\n   :align: center\n\n\nEnvoy is used in a variety of configurations beyond the service mesh. For example, it can also act\nas an internal load balancer:\n\n.. image:: /_static/lor-topology-ilb.svg\n   :width: 65%\n   :align: center\n\nOr as an ingress/egress proxy on the network edge:\n\n.. image:: /_static/lor-topology-edge.svg\n   :width: 90%\n   :align: center\n\nIn practice, a hybrid of these is often used, where Envoy features in a service mesh, on the edge\nand as an internal load balancer. A request path may traverse multiple Envoys.\n\n.. image:: /_static/lor-topology-hybrid.svg\n   :width: 90%\n   :align: center\n\nEnvoy may be configured in multi-tier topologies for scalability and reliability, with a request\nfirst passing through an edge Envoy prior to passing through a second Envoy tier:\n\n.. image:: /_static/lor-topology-tiered.svg\n   :width: 80%\n   :align: center\n\nIn all the above cases, a request will arrive at a specific Envoy via TCP, UDP or Unix domain\nsockets from downstream. Envoy will forward requests upstream via TCP, UDP or Unix domain sockets.\nWe focus on a single Envoy proxy below.\n\nConfiguration\n-------------\n\nEnvoy is a very extensible platform. This results in a combinatorial explosion of possible request\npaths, depending on:\n\n* L3/4 protocol, e.g. TCP, UDP, Unix domain sockets.\n* L7 protocol, e.g. HTTP/1, HTTP/2, HTTP/3, gRPC, Thrift, Dubbo, Kafka, Redis and various databases.\n* Transport socket, e.g. plain text, TLS, ALTS.\n* Connection routing, e.g. PROXY protocol, original destination, dynamic forwarding.\n* Authentication and authorization.\n* Circuit breakers and outlier detection configuration and activation state.\n* Many other configurations for networking, HTTP, listener, access logging, health checking, tracing\n  and stats extensions.\n\nIt's helpful to focus on one at a time, so this example covers the following:\n\n* An HTTP/2 request with :ref:`TLS <arch_overview_ssl>` over a TCP connection for both downstream\n  and upstream.\n* The :ref:`HTTP connection manager <arch_overview_http_conn_man>` as the only :ref:`network filter\n  <arch_overview_network_filters>`.\n* A hypothetical CustomFilter and the `router <arch_overview_http_routing>` filter as the :ref:`HTTP\n  filter <arch_overview_http_filters>` chain.\n* :ref:`Filesystem access logging <arch_overview_access_logs_sinks>`.\n* :ref:`Statsd sink <envoy_v3_api_msg_config.metrics.v3.StatsSink>`.\n* A single :ref:`cluster <arch_overview_cluster_manager>` with static endpoints.\n\nWe assume a static bootstrap configuration file for simplicity:\n\n.. literalinclude:: _include/life-of-a-request.yaml\n    :language: yaml\n\nHigh level architecture\n-----------------------\n\nThe request processing path in Envoy has two main parts:\n\n* :ref:`Listener subsystem <arch_overview_listeners>` which handles **downstream** request\n  processing. It is also responsible for managing the downstream request lifecycle and for the\n  response path to the client. The downstream HTTP/2 codec lives here.\n* :ref:`Cluster subsystem <arch_overview_cluster_manager>` which is responsible for selecting and\n  configuring the **upstream** connection to an endpoint. This is where knowledge of cluster and\n  endpoint health, load balancing and connection pooling exists. The upstream HTTP/2 codec lives\n  here.\n\nThe two subsystems are bridged with the HTTP router filter, which forwards the HTTP request from\ndownstream to upstream.\n\n.. image:: /_static/lor-architecture.svg\n   :width: 80%\n   :align: center\n\nWe use the terms :ref:`listener subsystem <arch_overview_listeners>` and :ref:`cluster subsystem\n<arch_overview_cluster_manager>` above to refer to the group of modules and instance classes that\nare created by the top level `ListenerManager` and `ClusterManager` classes. There are many\ncomponents that we discuss below that are instantiated before and during the course of a request by\nthese management systems, for example listeners, filter chains, codecs, connection pools and load\nbalancing data structures.\n\nEnvoy has an `event-based thread model\n<https://blog.envoyproxy.io/envoy-threading-model-a8d44b922310>`_. A main thread is responsible for\nthe server lifecycle, configuration processing, stats, etc. and some number of :ref:`worker threads\n<arch_overview_threading>` process requests. All threads operate around an event loop (`libevent\n<https://libevent.org/>`_) and any given downstream TCP connection (including all the multiplexed\nstreams on it) will be handled by exactly one worker thread for its lifetime. Each worker thread\nmaintains its own pool of TCP connections to upstream endpoints. :ref:`UDP\n<arch_overview_listeners_udp>` handling makes use of SO_REUSEPORT to have the kernel consistently\nhash the source/destination IP:port tuples to the same worker thread. UDP filter state is shared for\na given worker thread, with the filter responsible for providing session semantics as needed. This\nis in contrast to the connection oriented TCP filters we discuss below, where filter state exists on\na per connection and, in the case of HTTP filters, per-request basis.\n\nWorker threads rarely share state and operate in a trivially parallel fashion. This threading model\nenables scaling to very high core count CPUs.\n\nRequest flow\n------------\n\nOverview\n^^^^^^^^\n\nA brief outline of the life cycle of a request and response using the example configuration above:\n\n1. A TCP connection from downstream is accepted by an Envoy :ref:`listener\n   <arch_overview_listeners>` running on a :ref:`worker thread <arch_overview_threading>`.\n2. The :ref:`listener filter <arch_overview_listener_filters>` chain is created and runs. It can\n   provide SNI and other pre-TLS info. Once completed, the listener will match a network filter\n   chain. Each listener may have multiple filter chains which match on some combination of\n   destination IP CIDR range, SNI, ALPN, source ports, etc. A transport socket, in our case the TLS\n   transport socket, is associated with this filter chain.\n3. On network reads, the :ref:`TLS <arch_overview_ssl>` transport socket decrypts the data read from\n   the TCP connection to a decrypted data stream for further processing.\n4. The :ref:`network filter <arch_overview_network_filters>` chain is created and runs. The most\n   important filter for HTTP is the HTTP connection manager, which is the last network filter in the\n   chain.\n5. The HTTP/2 codec in :ref:`HTTP connection manager <arch_overview_http_conn_man>` deframes and\n   demultiplexes the decrypted data stream from the TLS connection to a number of independent\n   streams. Each stream handles a single request and response.\n6. For each HTTP stream, an :ref:`HTTP filter <arch_overview_http_filters>` chain is created and\n   runs. The request first passes through CustomFilter which may read and modify the request. The\n   most important HTTP filter is the router filter which sits at the end of the HTTP filter chain.\n   When `decodeHeaders` is invoked on the router filter, the route is selected and a cluster is\n   picked. The request headers on the stream are forwarded to an upstream endpoint in that cluster.\n   The :ref:`router <arch_overview_http_routing>` filter obtains an HTTP :ref:`connection pool\n   <arch_overview_conn_pool>` from the cluster manager for the matched cluster to do this.\n7. Cluster specific :ref:`load balancing <arch_overview_load_balancing>` is performed to find an\n   endpoint. The cluster’s circuit breakers are checked to determine if a new stream is allowed. A\n   new connection to the endpoint is created if the endpoint's connection pool is empty or lacks\n   capacity.\n8. The upstream endpoint connection's HTTP/2 codec multiplexes and frames the request’s stream with\n   any other streams going to that upstream over a single TCP connection.\n9. The upstream endpoint connection's TLS transport socket encrypts these bytes and writes them to a\n   TCP socket for the upstream connection.\n10. The request, consisting of headers, and optional body and trailers, is proxied upstream, and the\n    response is proxied downstream. The response passes through the HTTP filters in the\n    :ref:`opposite order <arch_overview_http_filters_ordering>` from the request, starting at the\n    router filter and passing through CustomFilter, before being sent downstream.\n11. When the response is complete, the stream is destroyed. Post-request processing will update\n    stats, write to the access log and finalize trace spans.\n\nWe elaborate on each of these steps in the sections below.\n\n1. Listener TCP accept\n^^^^^^^^^^^^^^^^^^^^^^\n\n.. image:: /_static/lor-listeners.svg\n   :width: 90%\n   :align: center\n\nThe *ListenerManager* is responsible for taking configuration representing :ref:`listeners\n<arch_overview_listeners>` and instantiating a number of *Listener* instances bound to their\nrespective IP/ports. Listeners may be in one of three states:\n\n* *Warming*: the listener is waiting for configuration dependencies (e.g. route configuration,\n  dynamic secrets). The listener is not yet ready to accept TCP connections.\n* *Active*: the listener is bound to its IP/port and accepts TCP connections.\n* *Draining*: the listener no longer accepts new TCP connections while its existing TCP connections\n  are allowed to continue for a drain period.\n\nEach :ref:`worker thread <arch_overview_threading>` maintains its own *Listener* instance for each\nof the configured listeners. Each listener may bind to the same port via SO_REUSEPORT or share a\nsingle socket bound to this port. When a new TCP connection arrives, the kernel decides which\nworker thread will accept the connection and the *Listener* for this worker thread will have its\n``Server::ConnectionHandlerImpl::ActiveTcpListener::onAccept()`` callback invoked.\n\n2. Listener filter chains and network filter chain matching\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe worker thread’s *Listener* then creates and runs the :ref:`listener filter\n<arch_overview_listener_filters>` chain. Filter chains are created by applying each filter’s *filter\nfactory*. The factory is aware of the filter’s configuration and creates a new instance of the\nfilter for each connection or stream.\n\nIn the case of our TLS listener configuration, the listener filter chain consists of the :ref:`TLS\ninspector <config_listener_filters_tls_inspector>` filter\n(``envoy.filters.listener.tls_inspector``). This filter examines the initial TLS handshake and\nextracts the server name (SNI). The SNI is then made available for filter chain matching. While the\nTLS inspector appears explicitly in the listener filter chain configuration, Envoy is also capable\nof inserting this automatically whenever there is a need for SNI (or ALPN) in a listener’s filter\nchain.\n\n.. image:: /_static/lor-listener-filters.svg\n   :width: 80%\n   :align: center\n\nThe TLS inspector filter implements the :repo:`ListenerFilter <include/envoy/network/filter.h>`\ninterface. All filter interfaces, whether listener or network/HTTP, require that filters implement\ncallbacks for specific connection or stream events. In the case of `ListenerFilter`, this is:\n\n\n.. code-block:: cpp\n\n  virtual FilterStatus onAccept(ListenerFilterCallbacks& cb) PURE;\n\n``onAccept()`` allows a filter to run during the TCP accept processing. The ``FilterStatus``\nreturned by the callback controls how the listener filter chain will continue. Listener filters may\npause the filter chain and then later resume, e.g. in response to an RPC made to another service.\n\nInformation extracted from the listener filters and connection properties is then used to match a\nfilter chain, giving the network filter chain and transport socket that will be used to handle the\nconnection.\n\n.. image:: /_static/lor-filter-chain-match.svg\n   :width: 50%\n   :align: center\n\n.. _life_of_a_request_tls_decryption:\n\n3. TLS transport socket decryption\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nEnvoy offers pluggable transport sockets via the\n:repo:`TransportSocket <include/envoy/network/transport_socket.h>`\nextension interface. Transport sockets follow the lifecycle events of a TCP connection and\nread/write into network buffers. Some key methods that transport sockets must implement are:\n\n.. code-block:: cpp\n\n  virtual void onConnected() PURE;\n  virtual IoResult doRead(Buffer::Instance& buffer) PURE;\n  virtual IoResult doWrite(Buffer::Instance& buffer, bool end_stream) PURE;\n  virtual void closeSocket(Network::ConnectionEvent event) PURE;\n\nWhen data is available on a TCP connection, ``Network::ConnectionImpl::onReadReady()`` invokes the\n:ref:`TLS <arch_overview_ssl>` transport socket via ``SslSocket::doRead()``. The transport socket\nthen performs a TLS handshake on the TCP connection. When the handshake completes,\n``SslSocket::doRead()`` provides a decrypted byte stream to an instance of\n``Network::FilterManagerImpl``, responsible for managing the network filter chain.\n\n.. image:: /_static/lor-transport-socket.svg\n   :width: 80%\n   :align: center\n\nIt’s important to note that no operation, whether it’s a TLS handshake or a pause of a filter\npipeline is truly blocking. Since Envoy is event-based, any situation in which processing requires\nadditional data will lead to early event completion and yielding of the CPU to another event. When\nthe network makes more data available to read, a read event will trigger the resumption of a TLS\nhandshake.\n\n4. Network filter chain processing\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nAs with the listener filter chain, Envoy, via `Network::FilterManagerImpl`, will instantiate a\nseries of :ref:`network filters <arch_overview_network_filters>` from their filter factories. The\ninstance is fresh for each new connection. Network filters, like transport sockets, follow TCP\nlifecycle events and are invoked as data becomes available from the transport socket.\n\n.. image:: /_static/lor-network-filters.svg\n   :width: 80%\n   :align: center\n\nNetwork filters are composed as a pipeline, unlike transport sockets which are one-per-connection.\nNetwork filters come in three varieties:\n\n* :repo:`ReadFilter <include/envoy/network/filter.h>` implementing ``onData()``, called when data is\n  available from the connection (due to some request).\n* :repo:`WriteFilter <include/envoy/network/filter.h>` implementing ``onWrite()``, called when data\n  is about to be written to the connection (due to some response).\n* :repo:`Filter <include/envoy/network/filter.h>` implementing both *ReadFilter* and *WriteFilter*.\n\nThe method signatures for the key filter methods are:\n\n.. code-block:: cpp\n\n  virtual FilterStatus onNewConnection() PURE;\n  virtual FilterStatus onData(Buffer::Instance& data, bool end_stream) PURE;\n  virtual FilterStatus onWrite(Buffer::Instance& data, bool end_stream) PURE;\n\nAs with the listener filter, the ``FilterStatus`` allows filters to pause execution of the filter\nchain. For example, if a rate limiting service needs to be queried, a rate limiting network filter\nwould return ``Network::FilterStatus::StopIteration`` from ``onData()`` and later invoke\n``continueReading()`` when the query completes.\n\nThe last network filter for a listener dealing with HTTP is :ref:`HTTP connection manager\n<arch_overview_http_conn_man>` (HCM). This is responsible for creating the HTTP/2 codec and managing\nthe HTTP filter chain. In our example, this is the only network filter. An example network filter\nchain making use of multiple network filters would look like:\n\n.. image:: /_static/lor-network-read.svg\n   :width: 80%\n   :align: center\n\nOn the response path, the network filter chain is executed in the reverse order to the request path.\n\n.. image:: /_static/lor-network-write.svg\n   :width: 80%\n   :align: center\n\n.. _life_of_a_request_http2_decoding:\n\n5. HTTP/2 codec decoding\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe HTTP/2 codec in Envoy is based on `nghttp2 <https://nghttp2.org/>`_. It is invoked by the HCM\nwith plaintext bytes from the TCP connection (after network filter chain transformation). The codec\ndecodes the byte stream as a series of HTTP/2 frames and demultiplexes the connection into a number\nof independent HTTP streams. Stream multiplexing is a key feature in HTTP/2, providing significant\nperformance advantages over HTTP/1. Each HTTP stream handles a single request and response.\n\nThe codec is also responsible for handling HTTP/2 setting frames and both stream and connection\nlevel :repo:`flow control <source/docs/flow_control.md>`.\n\nThe codecs are responsible for abstracting the specifics of the HTTP connection, presenting a\nstandard view to the HTTP connection manager and HTTP filter chain of a connection split into\nstreams, each with request/response headers/body/trailers. This is true regardless of whether the\nprotocol is HTTP/1, HTTP/2 or HTTP/3.\n\n6. HTTP filter chain processing\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nFor each HTTP stream, the HCM instantiates an :ref:`HTTP filter <arch_overview_http_filters>` chain,\nfollowing the pattern established above for listener and network filter chains.\n\n.. image:: /_static/lor-http-filters.svg\n   :width: 80%\n   :align: center\n\nThere are three kinds of HTTP filter interfaces:\n\n* :repo:`StreamDecoderFilter <include/envoy/http/filter.h>` with callbacks for request processing.\n* :repo:`StreamEncoderFilter <include/envoy/http/filter.h>` with callbacks for response processing.\n* :repo:`StreamFilter <include/envoy/http/filter.h>` implementing both `StreamDecoderFilter` and\n  `StreamEncoderFilter`.\n\nLooking at the decoder filter interface:\n\n.. code-block:: cpp\n\n  virtual FilterHeadersStatus decodeHeaders(RequestHeaderMap& headers, bool end_stream) PURE;\n  virtual FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) PURE;\n  virtual FilterTrailersStatus decodeTrailers(RequestTrailerMap& trailers) PURE;\n\nRather than operating on connection buffers and events, HTTP filters follow the lifecycle of an HTTP\nrequest, e.g. ``decodeHeaders()`` takes HTTP headers as an argument rather than a byte buffer. The\nreturned ``FilterStatus`` provides, as with network and listener filters, the ability to manage filter\nchain control flow.\n\nWhen the HTTP/2 codec makes available the HTTP requests headers, these are first passed to\n``decodeHeaders()`` in CustomFilter. If the returned ``FilterHeadersStatus`` is ``Continue``, HCM\nthen passes the headers (possibly mutated by CustomFilter) to the router filter.\n\nDecoder and encoder-decoder filters are executed on the request path. Encoder and encoder-decoder\nfilters are executed on the response path, in :ref:`reverse direction\n<arch_overview_http_filters_ordering>`. Consider the following example filter chain:\n\n.. image:: /_static/lor-http.svg\n   :width: 80%\n   :align: center\n\nThe request path will look like:\n\n.. image:: /_static/lor-http-decode.svg\n   :width: 80%\n   :align: center\n\nWhile the response path will look like:\n\n.. image:: /_static/lor-http-encode.svg\n   :width: 80%\n   :align: center\n\nWhen ``decodeHeaders()`` is invoked on the :ref:`router <arch_overview_http_routing>` filter, the\nroute selection is finalized and a cluster is picked. The HCM selects a route from its\n``RouteConfiguration`` at the start of HTTP filter chain execution. This is referred to as the\n*cached route*. Filters may modify headers and cause a new route to be selected, by asking HCM to\nclear the route cache and requesting HCM to reevaluate the route selection. When the router filter\nis invoked, the route is finalized. The selected route’s configuration will point at an upstream\ncluster name. The router filter then asks the `ClusterManager` for an HTTP :ref:`connection pool\n<arch_overview_conn_pool>` for the cluster. This involves load balancing and the connection pool,\ndiscussed in the next section.\n\n.. image:: /_static/lor-route-config.svg\n   :width: 70%\n   :align: center\n\nThe resulting HTTP connection pool is used to build an `UpstreamRequest` object in the router, which\nencapsulates the HTTP encoding and decoding callback methods for the upstream HTTP request. Once a\nstream is allocated on a connection in the HTTP connection pool, the request headers are forwarded\nto the upstream endpoint by the invocation of ``UpstreamRequest::encoderHeaders()``.\n\nThe router filter is responsible for all aspects of upstream request lifecycle management on the\nstream allocated from the HTTP connection pool. It also is responsible for request timeouts, retries\nand affinity.\n\n7. Load balancing\n^^^^^^^^^^^^^^^^^\n\nEach cluster has a :ref:`load balancer <arch_overview_load_balancing>` which picks an endpoint when\na new request arrives. Envoy supports a variety of load balancing algorithms, e.g. weighted\nround-robin, Maglev, least-loaded, random. Load balancers obtain their effective assignments from a\ncombination of static bootstrap configuration, DNS, dynamic xDS (the CDS and EDS discovery services)\nand active/passive health checks. Further details on how load balancing works in Envoy are provided\nin the :ref:`load balancing documentation <arch_overview_load_balancing>`.\n\nOnce an endpoint is selected, the :ref:`connection pool <arch_overview_conn_pool>` for this endpoint\nis used to find a connection to forward the request on. If no connection to the host exists, or all\nconnections are at their maximum concurrent stream limit, a new connection is established and placed\nin the connection pool, unless the circuit breaker for maximum connections for the cluster has\ntripped. If a maximum lifetime stream limit for a connection is configured and reached, a new\nconnection is allocated in the pool and the affected HTTP/2 connection is drained. Other circuit\nbreakers, e.g. maximum concurrent requests to a cluster are also checked. See :repo:`circuit\nbreakers <arch_overview_circuit_breakers>` and :ref:`connection pools <arch_overview_conn_pool>` for\nfurther details.\n\n.. image:: /_static/lor-lb.svg\n   :width: 80%\n   :align: center\n\n8. HTTP/2 codec encoding\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe selected connection's HTTP/2 codec multiplexes the request stream with any other streams going\nto the same upstream over a single TCP connection. This is the reverse of :ref:`HTTP/2 codec\ndecoding <life_of_a_request_http2_decoding>`.\n\nAs with the downstream HTTP/2 codec, the upstream codec is responsible for taking Envoy’s standard\nabstraction of HTTP, i.e. multiple streams multiplexed on a single connection with request/response\nheaders/body/trailers, and mapping this to the specifics of HTTP/2 by generating a series of HTTP/2\nframes.\n\n9. TLS transport socket encryption\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe upstream endpoint connection's TLS transport socket encrypts the bytes from the HTTP/2 codec\noutput and writes them to a TCP socket for the upstream connection. As with :ref:`TLS transport\nsocket decryption <life_of_a_request_tls_decryption>`, in our example the cluster has a transport\nsocket configured that provides TLS transport security. The same interfaces exist for upstream and\ndownstream transport socket extensions.\n\n.. image:: /_static/lor-client.svg\n   :width: 70%\n   :align: center\n\n10. Response path and HTTP lifecycle\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe request, consisting of headers, and optional body and trailers, is proxied upstream, and the\nresponse is proxied downstream. The response passes through the HTTP and network filters in the\n:ref:`opposite order <arch_overview_http_filters_ordering>`. from the request.\n\nVarious callbacks for decoder/encoder request lifecycle events will be invoked in HTTP filters, e.g.\nwhen response trailers are being forwarded or the request body is streamed. Similarly, read/write\nnetwork filters will also have their respective callbacks invoked as data continues to flow in both\ndirections during a request.\n\n:ref:`Outlier detection <arch_overview_outlier_detection>` status for the endpoint is revised as the\nrequest progresses.\n\nA request completes when the upstream response reaches its end-of-stream, i.e. when trailers or the\nresponse header/body with end-stream set are received. This is handled in\n``Router::Filter::onUpstreamComplete()``.\n\nIt is possible for a request to terminate early. This may be due to (but not limited to):\n\n* Request timeout.\n* Upstream endpoint steam reset.\n* HTTP filter stream reset.\n* Circuit breaking.\n* Unavailability of upstream resources, e.g. missing a cluster for a route.\n* No healthy endpoints.\n* DoS protection.\n* HTTP protocol violations.\n* Local reply from either the HCM or an HTTP filter. E.g. a rate limit HTTP filter returning a 429\n  response.\n\nIf any of these occur, Envoy may either send an internally generated response, if upstream response\nheaders have not yet been sent, or will reset the stream, if response headers have already been\nforwarded downstream. The Envoy :ref:`debugging FAQ <faq_overview_debug>` has further information on\ninterpreting these early stream terminations.\n\n11. Post-request processing\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nOnce a request completes, the stream is destroyed. The following also takes places:\n\n* The post-request :ref:`statistics <arch_overview_statistics>` are updated (e.g. timing, active\n  requests, upgrades, health checks). Some statistics are updated earlier however, during request\n  processing. Stats are not written to the stats :ref:`sink\n  <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.stats_sinks>` at this point, they are batched\n  and written by the main thread periodically. In our example this is a statsd sink.\n* :ref:`Access logs <arch_overview_access_logs>` are written to the access log :ref:`sinks\n  <arch_overview_access_logs_sinks>`. In our example this is a file access log.\n* :ref:`Trace <arch_overview_tracing>` spans are finalized. If our example request was traced, a\n  trace span, describing the duration and details of the request would be created by the HCM when\n  processing request headers and then finalized by the HCM during post-request processing.\n"
  },
  {
    "path": "docs/root/intro/version_history.rst",
    "content": "Version History\n===============\n\nThe changes for each version have moved :ref:`here <version_history>`.\n\n.. This page only exists because previous versions of Envoy link here.\n"
  },
  {
    "path": "docs/root/intro/what_is_envoy.rst",
    "content": "What is Envoy\n--------------\n\nEnvoy is an L7 proxy and communication bus designed for large modern service oriented architectures.\nThe project was born out of the belief that:\n\n  *The network should be transparent to applications. When network and application problems do occur\n  it should be easy to determine the source of the problem.*\n\nIn practice, achieving the previously stated goal is incredibly difficult. Envoy attempts to do so\nby providing the following high level features:\n\n**Out of process architecture:** Envoy is a self contained process that is designed to run\nalongside every application server. All of the Envoys form a transparent communication mesh in which\neach application sends and receives messages to and from localhost and is unaware of the network\ntopology. The out of process architecture has two substantial benefits over the traditional library\napproach to service to service communication:\n\n* Envoy works with any application language. A single Envoy deployment can form a mesh between\n  Java, C++, Go, PHP, Python, etc. It is becoming increasingly common for service oriented\n  architectures to use multiple application frameworks and languages. Envoy transparently bridges\n  the gap.\n* As anyone that has worked with a large service oriented architecture knows, deploying library\n  upgrades can be incredibly painful. Envoy can be deployed and upgraded quickly across an\n  entire infrastructure transparently.\n\n**L3/L4 filter architecture:** At its core, Envoy is an L3/L4 network proxy. A pluggable\n:ref:`filter <arch_overview_network_filters>` chain mechanism allows filters to be written to\nperform different TCP/UDP proxy tasks and inserted into the main server. Filters have already been\nwritten to support various tasks such as raw :ref:`TCP proxy <arch_overview_tcp_proxy>`, :ref:`UDP\nproxy <arch_overview_udp_proxy>`, :ref:`HTTP proxy <arch_overview_http_conn_man>`, :ref:`TLS client\ncertificate authentication <arch_overview_ssl_auth_filter>`, :ref:`Redis <arch_overview_redis>`,\n:ref:`MongoDB <arch_overview_mongo>`, :ref:`Postgres <arch_overview_postgres>`, etc.\n\n**HTTP L7 filter architecture:** HTTP is such a critical component of modern application\narchitectures that Envoy :ref:`supports <arch_overview_http_filters>` an additional HTTP L7 filter\nlayer. HTTP filters can be plugged into the HTTP connection management subsystem that perform\ndifferent tasks such as :ref:`buffering <config_http_filters_buffer>`, :ref:`rate limiting\n<arch_overview_global_rate_limit>`, :ref:`routing/forwarding <arch_overview_http_routing>`, sniffing\nAmazon's :ref:`DynamoDB <arch_overview_dynamo>`, etc.\n\n**First class HTTP/2 support:** When operating in HTTP mode, Envoy :ref:`supports\n<arch_overview_http_protocols>` both HTTP/1.1 and HTTP/2. Envoy can operate as a transparent\nHTTP/1.1 to HTTP/2 proxy in both directions. This means that any combination of HTTP/1.1 and HTTP/2\nclients and target servers can be bridged. The recommended service to service configuration uses\nHTTP/2 between all Envoys to create a mesh of persistent connections that requests and responses can\nbe multiplexed over.\n\n**HTTP L7 routing:** When operating in HTTP mode, Envoy supports a\n:ref:`routing <arch_overview_http_routing>` subsystem that is capable of routing and redirecting\nrequests based on path, authority, content type, :ref:`runtime <arch_overview_runtime>` values, etc.\nThis functionality is most useful when using Envoy as a front/edge proxy but is also leveraged when\nbuilding a service to service mesh.\n\n**gRPC support:** `gRPC <https://www.grpc.io/>`_ is an RPC framework from Google that uses HTTP/2\nas the underlying multiplexed transport. Envoy :ref:`supports <arch_overview_grpc>` all of the\nHTTP/2 features required to be used as the routing and load balancing substrate for gRPC requests\nand responses. The two systems are very complementary.\n\n**Service discovery and dynamic configuration:** Envoy optionally consumes a layered set of\n:ref:`dynamic configuration APIs <arch_overview_dynamic_config>` for centralized management.\nThe layers provide an Envoy with dynamic updates about: hosts within a backend cluster, the\nbackend clusters themselves, HTTP routing, listening sockets, and cryptographic material.\nFor a simpler deployment, backend host discovery can be\n:ref:`done through DNS resolution <arch_overview_service_discovery_types_strict_dns>`\n(or even\n:ref:`skipped entirely <arch_overview_service_discovery_types_static>`),\nwith the further layers replaced by static config files.\n\n**Health checking:** The :ref:`recommended <arch_overview_service_discovery_eventually_consistent>`\nway of building an Envoy mesh is to treat service discovery as an eventually consistent process.\nEnvoy includes a :ref:`health checking <arch_overview_health_checking>` subsystem which can\noptionally perform active health checking of upstream service clusters. Envoy then uses the union of\nservice discovery and health checking information to determine healthy load balancing targets. Envoy\nalso supports passive health checking via an :ref:`outlier detection\n<arch_overview_outlier_detection>` subsystem.\n\n**Advanced load balancing:** :ref:`Load balancing <arch_overview_load_balancing>` among different\ncomponents in a distributed system is a complex problem. Because Envoy is a self contained proxy\ninstead of a library, it is able to implement advanced load balancing techniques in a single place\nand have them be accessible to any application. Currently Envoy includes support for :ref:`automatic\nretries <arch_overview_http_routing_retry>`, :ref:`circuit breaking <arch_overview_circuit_break>`,\n:ref:`global rate limiting <arch_overview_global_rate_limit>` via an external rate limiting service,\n:ref:`request shadowing <envoy_v3_api_msg_config.route.v3.RouteAction.RequestMirrorPolicy>`, and\n:ref:`outlier detection <arch_overview_outlier_detection>`. Future support is planned for request\nracing.\n\n**Front/edge proxy support:** There is substantial benefit in using the same software at the edge\n(observability, management, identical service discovery and load balancing algorithms, etc.). Envoy\nhas a feature set that makes it well suited as an edge proxy for most modern web application use\ncases. This includes :ref:`TLS <arch_overview_ssl>` termination, HTTP/1.1 and HTTP/2 :ref:`support\n<arch_overview_http_protocols>`, as well as HTTP L7 :ref:`routing <arch_overview_http_routing>`.\n\n**Best in class observability:** As stated above, the primary goal of Envoy is to make the network\ntransparent. However, problems occur both at the network level and at the application level. Envoy\nincludes robust :ref:`statistics <arch_overview_statistics>` support for all subsystems. `statsd\n<https://github.com/etsy/statsd>`_ (and compatible providers) is the currently supported statistics\nsink, though plugging in a different one would not be difficult. Statistics are also viewable via\nthe :ref:`administration <operations_admin_interface>` port. Envoy also supports distributed\n:ref:`tracing <arch_overview_tracing>` via thirdparty providers.\n"
  },
  {
    "path": "docs/root/operations/admin.rst",
    "content": ".. _operations_admin_interface:\n\nAdministration interface\n========================\n\nEnvoy exposes a local administration interface that can be used to query and\nmodify different aspects of the server:\n\n* :ref:`v3 API reference <envoy_v3_api_msg_config.bootstrap.v3.Admin>`\n\n.. _operations_admin_interface_security:\n\n.. attention::\n\n  The administration interface in its current form both allows destructive operations to be\n  performed (e.g., shutting down the server) as well as potentially exposes private information\n  (e.g., stats, cluster names, cert info, etc.). It is **critical** that access to the\n  administration interface is only allowed via a secure network. It is also **critical** that hosts\n  that access the administration interface are **only** attached to the secure network (i.e., to\n  avoid CSRF attacks). This involves setting up an appropriate firewall or optimally only allowing\n  access to the administration listener via localhost. This can be accomplished with a v2\n  configuration like the following:\n\n  .. code-block:: yaml\n\n    admin:\n      access_log_path: /tmp/admin_access.log\n      profile_path: /tmp/envoy.prof\n      address:\n        socket_address: { address: 127.0.0.1, port_value: 9901 }\n\n  In the future additional security options will be added to the administration interface. This\n  work is tracked in `this <https://github.com/envoyproxy/envoy/issues/2763>`_ issue.\n\n  All mutations must be sent as HTTP POST operations. When a mutation is requested via GET,\n  the request has no effect, and an HTTP 400 (Invalid Request) response is returned.\n\n.. note::\n\n  For an endpoint with *?format=json*, it dumps data as a JSON-serialized proto. Fields with default\n  values are not rendered. For example for */clusters?format=json*, the circuit breakers thresholds\n  priority field is omitted when its value is :ref:`DEFAULT priority\n  <envoy_v3_api_enum_value_config.core.v3.RoutingPriority.DEFAULT>` as shown below:\n\n  .. code-block:: json\n\n    {\n     \"thresholds\": [\n      {\n       \"max_connections\": 1,\n       \"max_pending_requests\": 1024,\n       \"max_requests\": 1024,\n       \"max_retries\": 1\n      },\n      {\n       \"priority\": \"HIGH\",\n       \"max_connections\": 1,\n       \"max_pending_requests\": 1024,\n       \"max_requests\": 1024,\n       \"max_retries\": 1\n      }\n     ]\n    }\n\n.. http:get:: /\n\n  Render an HTML home page with a table of links to all available options.\n\n.. http:get:: /help\n\n  Print a textual table of all available options.\n\n.. _operations_admin_interface_certs:\n\n.. http:get:: /certs\n\n  List out all loaded TLS certificates, including file name, serial number, subject alternate names and days until\n  expiration in JSON format conforming to the :ref:`certificate proto definition <envoy_v3_api_msg_admin.v3.Certificates>`.\n\n.. _operations_admin_interface_clusters:\n\n.. http:get:: /clusters\n\n  List out all configured :ref:`cluster manager <arch_overview_cluster_manager>` clusters. This\n  information includes all discovered upstream hosts in each cluster along with per host statistics.\n  This is useful for debugging service discovery issues.\n\n  Cluster manager information\n    - ``version_info`` string -- the version info string of the last loaded\n      :ref:`CDS<config_cluster_manager_cds>` update.\n      If Envoy does not have :ref:`CDS<config_cluster_manager_cds>` setup, the\n      output will read ``version_info::static``.\n\n  Cluster wide information\n    - :ref:`circuit breakers<config_cluster_manager_cluster_circuit_breakers>` settings for all priority settings.\n\n    - Information about :ref:`outlier detection<arch_overview_outlier_detection>` if a detector is installed. Currently\n      :ref:`average success rate <envoy_v3_api_field_data.cluster.v3.OutlierEjectSuccessRate.cluster_average_success_rate>`,\n      and :ref:`ejection threshold<envoy_v3_api_field_data.cluster.v3.OutlierEjectSuccessRate.cluster_success_rate_ejection_threshold>`\n      are presented. Both of these values could be ``-1`` if there was not enough data to calculate them in the last\n      :ref/`interval<envoy_v3_api_field_config.cluster.v3.OutlierDetection.interval>`.\n\n    - ``added_via_api`` flag -- ``false`` if the cluster was added via static configuration, ``true``\n      if it was added via the :ref:`CDS<config_cluster_manager_cds>` api.\n\n  Per host statistics\n    .. csv-table::\n      :header: Name, Type, Description\n      :widths: 1, 1, 2\n\n      cx_total, Counter, Total connections\n      cx_active, Gauge, Total active connections\n      cx_connect_fail, Counter, Total connection failures\n      rq_total, Counter, Total requests\n      rq_timeout, Counter, Total timed out requests\n      rq_success, Counter, Total requests with non-5xx responses\n      rq_error, Counter, Total requests with 5xx responses\n      rq_active, Gauge, Total active requests\n      healthy, String, The health status of the host. See below\n      weight, Integer, Load balancing weight (1-100)\n      zone, String, Service zone\n      canary, Boolean, Whether the host is a canary\n      success_rate, Double, \"Request success rate (0-100). -1 if there was not enough\n      :ref:`request volume<envoy_v3_api_field_config.cluster.v3.OutlierDetection.success_rate_request_volume>`\n      in the :ref:`interval<envoy_v3_api_field_config.cluster.v3.OutlierDetection.interval>`\n      to calculate it\"\n\n  Host health status\n    A host is either healthy or unhealthy because of one or more different failing health states.\n    If the host is healthy the ``healthy`` output will be equal to *healthy*.\n\n    If the host is not healthy, the ``healthy`` output will be composed of one or more of the\n    following strings:\n\n    */failed_active_hc*: The host has failed an :ref:`active health check\n    <config_cluster_manager_cluster_hc>`.\n\n    */failed_eds_health*: The host was marked unhealthy by EDS.\n\n    */failed_outlier_check*: The host has failed an outlier detection check.\n\n.. http:get:: /clusters?format=json\n\n  Dump the */clusters* output in a JSON-serialized proto. See the\n  :ref:`definition <envoy_v3_api_msg_admin.v3.Clusters>` for more information.\n\n.. _operations_admin_interface_config_dump:\n\n.. http:get:: /config_dump\n\n  Dump currently loaded configuration from various Envoy components as JSON-serialized proto\n  messages. See the :ref:`response definition <envoy_v3_api_msg_admin.v3.ConfigDump>` for more\n  information.\n\n.. warning::\n  Configuration may include :ref:`TLS certificates <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.TlsCertificate>`. Before\n  dumping the configuration, Envoy will attempt to redact the ``private_key`` and ``password``\n  fields from any certificates it finds. This relies on the configuration being a strongly-typed\n  protobuf message. If your Envoy configuration uses deprecated ``config`` fields (of type\n  ``google.protobuf.Struct``), please update to the recommended ``typed_config`` fields (of type\n  ``google.protobuf.Any``) to ensure sensitive data is redacted properly.\n\n.. warning::\n  The underlying proto is marked v2alpha and hence its contents, including the JSON representation,\n  are not guaranteed to be stable.\n\n.. _operations_admin_interface_config_dump_include_eds:\n\n.. http:get:: /config_dump?include_eds\n\n  Dump currently loaded configuration including EDS. See the :ref:`response definition <envoy_v3_api_msg_admin.v3.EndpointsConfigDump>` for more\n  information.\n\n.. _operations_admin_interface_config_dump_by_mask:\n\n.. http:get:: /config_dump?mask={}\n\n  Specify a subset of fields that you would like to be returned. The mask is parsed as a\n  ``ProtobufWkt::FieldMask`` and applied to each top level dump such as\n  :ref:`BootstrapConfigDump <envoy_v3_api_msg_admin.v3.BootstrapConfigDump>` and\n  :ref:`ClustersConfigDump <envoy_v3_api_msg_admin.v3.ClustersConfigDump>`.\n  This behavior changes if both resource and mask query parameters are specified. See\n  below for details.\n\n.. _operations_admin_interface_config_dump_by_resource:\n\n.. http:get:: /config_dump?resource={}\n\n  Dump only the currently loaded configuration that matches the specified resource. The resource must\n  be a repeated field in one of the top level config dumps such as\n  :ref:`static_listeners <envoy_v3_api_field_admin.v3.ListenersConfigDump.static_listeners>` from\n  :ref:`ListenersConfigDump <envoy_v3_api_msg_admin.v3.ListenersConfigDump>` or\n  :ref:`dynamic_active_clusters <envoy_v3_api_field_admin.v3.ClustersConfigDump.dynamic_active_clusters>` from\n  :ref:`ClustersConfigDump <envoy_v3_api_msg_admin.v3.ClustersConfigDump>`. If you need a non-repeated\n  field, use the mask query parameter documented above. If you want only a subset of fields from the repeated\n  resource, use both as documented below.\n\n.. _operations_admin_interface_config_dump_by_resource_and_mask:\n\n.. http:get:: /config_dump?resource={}&mask={}\n\n  When both resource and mask query parameters are specified, the mask is applied to every element\n  in the desired repeated field so that only a subset of fields are returned. The mask is parsed\n  as a ``ProtobufWkt::FieldMask``.\n\n  For example, get the names of all active dynamic clusters with\n  ``/config_dump?resource=dynamic_active_clusters&mask=cluster.name``\n\n.. http:get:: /contention\n\n  Dump current Envoy mutex contention stats (:ref:`MutexStats <envoy_v3_api_msg_admin.v3.MutexStats>`) in JSON\n  format, if mutex tracing is enabled. See :option:`--enable-mutex-tracing`.\n\n.. http:post:: /cpuprofiler\n\n  Enable or disable the CPU profiler. Requires compiling with gperftools. The output file can be configured by admin.profile_path.\n\n.. http:post:: /heapprofiler\n\n  Enable or disable the Heap profiler. Requires compiling with gperftools. The output file can be configured by admin.profile_path.\n\n.. _operations_admin_interface_healthcheck_fail:\n\n.. http:post:: /healthcheck/fail\n\n  Fail inbound health checks. This requires the use of the HTTP :ref:`health check filter\n  <config_http_filters_health_check>`. This is useful for draining a server prior to shutting it\n  down or doing a full restart. Invoking this command will universally fail health check requests\n  regardless of how the filter is configured (pass through, etc.).\n\n.. _operations_admin_interface_healthcheck_ok:\n\n.. http:post:: /healthcheck/ok\n\n  Negate the effect of :http:post:`/healthcheck/fail`. This requires the use of the HTTP\n  :ref:`health check filter <config_http_filters_health_check>`.\n\n.. http:get:: /hot_restart_version\n\n  See :option:`--hot-restart-version`.\n\n.. _operations_admin_interface_init_dump:\n\n.. http:get:: /init_dump\n\n  Dump currently information of unready targets of various Envoy components as JSON-serialized proto\n  messages. See the :ref:`response definition <envoy_v3_api_msg_admin.v3.UnreadyTargetsDumps>` for more\n  information.\n\n.. _operations_admin_interface_init_dump_by_mask:\n\n.. http:get:: /init_dump?mask={}\n\n  When mask query parameters is specified, the mask value is the desired component to dump unready targets.\n  The mask is parsed as a ``ProtobufWkt::FieldMask``.\n\n  For example, get the unready targets of all listeners with\n  ``/init_dump?mask=listener``\n\n.. _operations_admin_interface_listeners:\n\n.. http:get:: /listeners\n\n  List out all configured :ref:`listeners <arch_overview_listeners>`. This information includes the names of listeners as well as\n  the addresses that they are listening on. If a listener is configured to listen on port 0, then the output will contain the actual\n  port that was allocated by the OS.\n\n.. http:get:: /listeners?format=json\n\n  Dump the */listeners* output in a JSON-serialized proto. See the\n  :ref:`definition <envoy_v3_api_msg_admin.v3.Listeners>` for more information.\n\n.. _operations_admin_interface_logging:\n\n.. http:post:: /logging\n\n  Enable/disable different logging levels on a particular logger or all loggers.\n\n  - To change the logging level across all loggers, set the query parameter as level=<desired_level>.\n  - To change a particular logger's level, set the query parameter like so, <logger_name>=<desired_level>.\n  - To list the loggers, send a POST request to the /logging endpoint without a query parameter.\n\n  .. note::\n\n    Generally only used during development. With `--enable-fine-grain-logging` being set, the logger is represented\n    by the path of the file it belongs to (to be specific, the path determined by `__FILE__`), so the logger list\n    will show a list of file paths, and the specific path should be used as <logger_name> to change the log level.\n\n.. http:get:: /memory\n\n  Prints current memory allocation / heap usage, in bytes. Useful in lieu of printing all `/stats` and filtering to get the memory-related statistics.\n\n.. http:post:: /quitquitquit\n\n  Cleanly exit the server.\n\n.. http:post:: /reset_counters\n\n  Reset all counters to zero. This is useful along with :http:get:`/stats` during debugging. Note\n  that this does not drop any data sent to statsd. It just affects local output of the\n  :http:get:`/stats` command.\n\n.. _operations_admin_interface_drain:\n\n.. http:post:: /drain_listeners\n\n   :ref:`Drains <arch_overview_draining>` all listeners.\n\n   .. http:post:: /drain_listeners?inboundonly\n\n   :ref:`Drains <arch_overview_draining>` all inbound listeners. `traffic_direction` field in\n   :ref:`Listener <envoy_v3_api_msg_config.listener.v3.Listener>` is used to determine whether a listener\n   is inbound or outbound.\n\n   .. http:post:: /drain_listeners?graceful\n\n   When draining listeners, enter a graceful drain period prior to closing listeners.\n   This behaviour and duration is configurable via server options or CLI\n   (:option:`--drain-time-s` and :option:`--drain-strategy`).\n\n.. attention::\n\n   This operation directly stops the matched listeners on workers. Once listeners in a given\n   traffic direction are stopped, listener additions and modifications in that direction\n   are not allowed.\n\n.. http:get:: /server_info\n\n  Outputs a JSON message containing information about the running server.\n\n  Sample output looks like:\n\n  .. code-block:: json\n\n    {\n      \"version\": \"b050513e840aa939a01f89b07c162f00ab3150eb/1.9.0-dev/Modified/DEBUG\",\n      \"state\": \"LIVE\",\n      \"command_line_options\": {\n        \"base_id\": \"0\",\n        \"concurrency\": 8,\n        \"config_path\": \"config.yaml\",\n        \"config_yaml\": \"\",\n        \"allow_unknown_static_fields\": false,\n        \"admin_address_path\": \"\",\n        \"local_address_ip_version\": \"v4\",\n        \"log_level\": \"info\",\n        \"component_log_level\": \"\",\n        \"log_format\": \"[%Y-%m-%d %T.%e][%t][%l][%n] %v\",\n        \"log_path\": \"\",\n        \"hot_restart_version\": false,\n        \"service_cluster\": \"\",\n        \"service_node\": \"\",\n        \"service_zone\": \"\",\n        \"mode\": \"Serve\",\n        \"disable_hot_restart\": false,\n        \"enable_mutex_tracing\": false,\n        \"restart_epoch\": 0,\n        \"file_flush_interval\": \"10s\",\n        \"drain_time\": \"600s\",\n        \"parent_shutdown_time\": \"900s\",\n        \"cpuset_threads\": false\n      },\n      \"uptime_current_epoch\": \"6s\",\n      \"uptime_all_epochs\": \"6s\",\n      \"node\": {\n        \"id\": \"node1\",\n        \"cluster\": \"cluster1\",\n        \"user_agent_name\": \"envoy\",\n        \"user_agent_build_version\": {\n          \"version\": {\n            \"major_number\": 1,\n            \"minor_number\": 15,\n            \"patch\": 0\n          }\n        },\n        \"metadata\": {},\n        \"extensions\": [],\n        \"client_features\": [],\n        \"listening_addresses\": []\n      }\n    }\n\n  See the :ref:`ServerInfo proto <envoy_v3_api_msg_admin.v3.ServerInfo>` for an\n  explanation of the output.\n\n.. http:get:: /ready\n\n  Outputs a string and error code reflecting the state of the server. 200 is returned for the LIVE state,\n  and 503 otherwise. This can be used as a readiness check.\n\n  Example output:\n\n  .. code-block:: none\n\n    LIVE\n\n  See the `state` field of the :ref:`ServerInfo proto <envoy_v3_api_msg_admin.v3.ServerInfo>` for an\n  explanation of the output.\n\n.. _operations_admin_interface_stats:\n\n.. http:get:: /stats\n\n  Outputs all statistics on demand. This command is very useful for local debugging.\n  Histograms will output the computed quantiles i.e P0,P25,P50,P75,P90,P99,P99.9 and P100.\n  The output for each quantile will be in the form of (interval,cumulative) where interval value\n  represents the summary since last flush interval and cumulative value represents the\n  summary since the start of Envoy instance. \"No recorded values\" in the histogram output indicates\n  that it has not been updated with a value.\n  See :ref:`here <operations_stats>` for more information.\n\n  .. http:get:: /stats?usedonly\n\n  Outputs statistics that Envoy has updated (counters incremented at least once, gauges changed at\n  least once, and histograms added to at least once).\n\n  .. http:get:: /stats?filter=regex\n\n  Filters the returned stats to those with names matching the regular expression\n  `regex`. Compatible with `usedonly`. Performs partial matching by default, so\n  `/stats?filter=server` will return all stats containing the word `server`.\n  Full-string matching can be specified with begin- and end-line anchors. (i.e.\n  `/stats?filter=^server.concurrency$`)\n\n.. http:get:: /stats?format=json\n\n  Outputs /stats in JSON format. This can be used for programmatic access of stats. Counters and Gauges\n  will be in the form of a set of (name,value) pairs. Histograms will be under the element \"histograms\",\n  that contains \"supported_quantiles\" which lists the quantiles supported and an array of computed_quantiles\n  that has the computed quantile for each histogram.\n\n  If a histogram is not updated during an interval, the output will have null for all the quantiles.\n\n  Example histogram output:\n\n  .. code-block:: json\n\n    {\n      \"histograms\": {\n        \"supported_quantiles\": [\n          0, 25, 50, 75, 90, 95, 99, 99.9, 100\n        ],\n        \"computed_quantiles\": [\n          {\n            \"name\": \"cluster.external_auth_cluster.upstream_cx_length_ms\",\n            \"values\": [\n              {\"interval\": 0, \"cumulative\": 0},\n              {\"interval\": 0, \"cumulative\": 0},\n              {\"interval\": 1.0435787, \"cumulative\": 1.0435787},\n              {\"interval\": 1.0941565, \"cumulative\": 1.0941565},\n              {\"interval\": 2.0860023, \"cumulative\": 2.0860023},\n              {\"interval\": 3.0665233, \"cumulative\": 3.0665233},\n              {\"interval\": 6.046609, \"cumulative\": 6.046609},\n              {\"interval\": 229.57333,\"cumulative\": 229.57333},\n              {\"interval\": 260,\"cumulative\": 260}\n            ]\n          },\n          {\n            \"name\": \"http.admin.downstream_rq_time\",\n            \"values\": [\n              {\"interval\": null, \"cumulative\": 0},\n              {\"interval\": null, \"cumulative\": 0},\n              {\"interval\": null, \"cumulative\": 1.0435787},\n              {\"interval\": null, \"cumulative\": 1.0941565},\n              {\"interval\": null, \"cumulative\": 2.0860023},\n              {\"interval\": null, \"cumulative\": 3.0665233},\n              {\"interval\": null, \"cumulative\": 6.046609},\n              {\"interval\": null, \"cumulative\": 229.57333},\n              {\"interval\": null, \"cumulative\": 260}\n            ]\n          }\n        ]\n      }\n    }\n\n  .. http:get:: /stats?format=json&usedonly\n\n  Outputs statistics that Envoy has updated (counters incremented at least once,\n  gauges changed at least once, and histograms added to at least once) in JSON format.\n\n.. http:get:: /stats?format=prometheus\n\n  or alternatively,\n\n  .. http:get:: /stats/prometheus\n\n  Outputs /stats in `Prometheus <https://prometheus.io/docs/instrumenting/exposition_formats/>`_\n  v0.0.4 format. This can be used to integrate with a Prometheus server.\n\n  You can optionally pass the `usedonly` URL query argument to only get statistics that\n  Envoy has updated (counters incremented at least once, gauges changed at least once,\n  and histograms added to at least once)\n\n  .. http:get:: /stats/recentlookups\n\n  This endpoint helps Envoy developers debug potential contention\n  issues in the stats system. Initially, only the count of StatName\n  lookups is acumulated, not the specific names that are being looked\n  up. In order to see specific recent requests, you must enable the\n  feature by POSTing to `/stats/recentlookups/enable`. There may be\n  approximately 40-100 nanoseconds of added overhead per lookup.\n\n  When enabled, this endpoint emits a table of stat names that were\n  recently accessed as strings by Envoy. Ideally, strings should be\n  converted into StatNames, counters, gauges, and histograms by Envoy\n  code only during startup or when receiving a new configuration via\n  xDS. This is because when stats are looked up as strings they must\n  take a global symbol table lock. During startup this is acceptable,\n  but in response to user requests on high core-count machines, this\n  can cause performance issues due to mutex contention.\n\n  This admin endpoint requires Envoy to be started with option\n  `--use-fake-symbol-table 0`.\n\n  See :repo:`source/docs/stats.md` for more details.\n\n  Note also that actual mutex contention can be tracked via :http:get:`/contention`.\n\n  .. http:post:: /stats/recentlookups/enable\n\n  Turns on collection of recent lookup of stat-names, thus enabling\n  `/stats/recentlookups`.\n\n  See :repo:`source/docs/stats.md` for more details.\n\n  .. http:post:: /stats/recentlookups/disable\n\n  Turns off collection of recent lookup of stat-names, thus disabling\n  `/stats/recentlookups`. It also clears the list of lookups. However,\n  the total count, visible as stat `server.stats_recent_lookups`, is\n  not cleared, and continues to accumulate.\n\n  See :repo:`source/docs/stats.md` for more details.\n\n  .. http:post:: /stats/recentlookups/clear\n\n  Clears all outstanding lookups and counts. This clears all recent\n  lookups data as well as the count, but collection continues if\n  it is enabled.\n\n  See :repo:`source/docs/stats.md` for more details.\n\n.. _operations_admin_interface_runtime:\n\n.. http:get:: /runtime\n\n  Outputs all runtime values on demand in JSON format. See :ref:`here <arch_overview_runtime>` for\n  more information on how these values are configured and utilized. The output include the list of\n  the active runtime override layers and the stack of layer values for each key. Empty strings\n  indicate no value, and the final active value from the stack also is included in a separate key.\n  Example output:\n\n.. code-block:: json\n\n  {\n    \"layers\": [\n      \"disk\",\n      \"override\",\n      \"admin\",\n    ],\n    \"entries\": {\n      \"my_key\": {\n        \"layer_values\": [\n          \"my_disk_value\",\n          \"\",\n          \"\"\n        ],\n        \"final_value\": \"my_disk_value\"\n      },\n      \"my_second_key\": {\n        \"layer_values\": [\n          \"my_second_disk_value\",\n          \"my_disk_override_value\",\n          \"my_admin_override_value\"\n        ],\n        \"final_value\": \"my_admin_override_value\"\n      }\n    }\n  }\n\n.. _operations_admin_interface_runtime_modify:\n\n.. http:post:: /runtime_modify?key1=value1&key2=value2&keyN=valueN\n\n  Adds or modifies runtime values as passed in query parameters. To delete a previously added key,\n  use an empty string as the value. Note that deletion only applies to overrides added via this\n  endpoint; values loaded from disk can be modified via override but not deleted.\n\n.. attention::\n\n  Use the /runtime_modify endpoint with care. Changes are effectively immediately. It is\n  **critical** that the admin interface is :ref:`properly secured\n  <operations_admin_interface_security>`.\n\n  .. _operations_admin_interface_hystrix_event_stream:\n\n.. http:get:: /hystrix_event_stream\n\n  This endpoint is intended to be used as the stream source for\n  `Hystrix dashboard <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n  a GET to this endpoint will trigger a stream of statistics from Envoy in\n  `text/event-stream <https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events>`_\n  format, as expected by the Hystrix dashboard.\n\n  If invoked from a browser or a terminal, the response will be shown as a continuous stream,\n  sent in intervals defined by the :ref:`Bootstrap <envoy_v3_api_msg_config.bootstrap.v3.Bootstrap>`\n  :ref:`stats_flush_interval <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.stats_flush_interval>`\n\n  This handler is enabled only when a Hystrix sink is enabled in the config file as documented\n  :ref:`here <envoy_v3_api_msg_config.metrics.v3.HystrixSink>`.\n\n  As Envoy's and Hystrix resiliency mechanisms differ, some of the statistics shown in the dashboard\n  had to be adapted:\n\n  * **Thread pool rejections** - Generally similar to what's called short circuited in Envoy,\n    and counted by *upstream_rq_pending_overflow*, although the term thread pool is not accurate for\n    Envoy. Both in Hystrix and Envoy, the result is rejected requests which are not passed upstream.\n  * **circuit breaker status (closed or open)** - Since in Envoy, a circuit is opened based on the\n    current number of connections/requests in queue, there is no sleeping window for circuit breaker,\n    circuit open/closed is momentary. Hence, we set the circuit breaker status to \"forced closed\".\n  * **Short-circuited (rejected)** - The term exists in Envoy but refers to requests not sent because\n    of passing a limit (queue or connections), while in Hystrix it refers to requests not sent because\n    of high percentage of service unavailable responses during some time frame.\n    In Envoy, service unavailable response will cause **outlier detection** - removing a node off the\n    load balancer pool, but requests are not rejected as a result. Therefore, this counter is always\n    set to '0'.\n  * Latency information represents data since last flush.\n    Mean latency is currently not available.\n\n.. http:post:: /tap\n\n  This endpoint is used for configuring an active tap session. It is only\n  available if a valid tap extension has been configured, and that extension has\n  been configured to accept admin configuration. See:\n\n  * :ref:`HTTP tap filter configuration <config_http_filters_tap_admin_handler>`\n\n.. http:post:: /reopen_logs\n\n  Triggers reopen of all access logs. Behavior is similar to SIGUSR1 handling.\n"
  },
  {
    "path": "docs/root/operations/certificates.rst",
    "content": ".. _operations_certificates:\n\nCertificate Management\n======================\n\nEnvoy provides several mechanisms for cert management. At a high level they can be broken into\n\n1. Static :ref:`CommonTlsContext <envoy_api_msg_auth.CommonTlsContext>` referenced certificates.\n   These will *not* reload automatically, and requires either a restart of the proxy or\n   reloading the clusters/listeners that reference them.\n   :ref:`Hot restarting <arch_overview_hot_restart>` can be used here to pick up the new\n   certificates without dropping traffic.\n2. :ref:`Secret Discovery Service <config_secret_discovery_service>` referenced certificates.\n   By using SDS, certificates can either be referenced as files (reloading the certs when the\n   parent directory is moved) or through an external SDS server that can push new certificates.\n"
  },
  {
    "path": "docs/root/operations/cli.rst",
    "content": ".. _operations_cli:\n\nCommand line options\n====================\n\nEnvoy is driven both by a JSON configuration file as well as a set of command line options. The\nfollowing are the command line options that Envoy supports.\n\n.. option:: -c <path string>, --config-path <path string>\n\n  *(optional)* The path to the v2 :ref:`JSON/YAML/proto3 configuration\n  file <config>`. If this flag is missing, :option:`--config-yaml` is required.\n  This will be parsed as a :ref:`v2 bootstrap configuration file\n  <config_overview_bootstrap>`.\n  Valid extensions are ``.json``, ``.yaml``, ``.pb`` and ``.pb_text``, which indicate\n  JSON, YAML, `binary proto3\n  <https://developers.google.com/protocol-buffers/docs/encoding>`_ and `text\n  proto3\n  <https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.text_format>`_\n  formats respectively.\n\n.. option:: --config-yaml <yaml string>\n\n  *(optional)* The YAML string for a bootstrap configuration. If :option:`--config-path` is also set,\n  the values in this YAML string will override and merge with the bootstrap loaded from :option:`--config-path`.\n  Because YAML is a superset of JSON, a JSON string may also be passed to :option:`--config-yaml`.\n\n  Example overriding the node id on the command line:\n\n    .. code-block:: console\n\n      ./envoy -c bootstrap.yaml --config-yaml \"node: {id: 'node1'}\"\n\n.. option:: --bootstrap-version <integer>\n\n   *(optional)* The API version to load the bootstrap as. The value should be a single integer, e.g.\n   to parse the bootstrap configuration as V3, specify ``--bootstrap-version 3``. If unset, Envoy will\n   attempt to load the bootstrap as the previous API version and upgrade it to the latest. If that fails,\n   Envoy will attempt to load the configuration as the latest version.\n\n.. option:: --mode <string>\n\n  *(optional)* One of the operating modes for Envoy:\n\n  * ``serve``: *(default)* Validate the JSON configuration and then serve traffic normally.\n\n  * ``validate``: Validate the JSON configuration and then exit, printing either an \"OK\" message (in\n    which case the exit code is 0) or any errors generated by the configuration file (exit code 1).\n    No network traffic is generated, and the hot restart process is not performed, so no other Envoy\n    process on the machine will be disturbed.\n\n.. option:: --admin-address-path <path string>\n\n  *(optional)* The output file path where the admin address and port will be written.\n\n.. option:: --local-address-ip-version <string>\n\n  *(optional)* The IP address version that is used to populate the server local IP address. This\n  parameter affects various headers including what is appended to the X-Forwarded-For (XFF) header.\n  The options are ``v4`` or ``v6``. The default is ``v4``.\n\n.. option:: --base-id <integer>\n\n  *(optional)* The base ID to use when allocating shared memory regions. Envoy uses shared memory\n  regions during :ref:`hot restart <arch_overview_hot_restart>`. Most users will never have to\n  set this option. However, if Envoy needs to be run multiple times on the same machine, each\n  running Envoy will need a unique base ID so that the shared memory regions do not conflict.\n\n.. option:: --use-dynamic-base-id\n\n  *(optional)* Selects an unused base ID to use when allocating shared memory regions. Using\n  preselected values with :option:`--base-id` is preferred, however. If this option is enabled,\n  it supersedes the :option:`--base-id` value. This flag may not be used when the value of\n  :option:`--restart-epoch` is non-zero. Instead, for subsequent hot restarts, set\n  :option:`--base-id` option with the selected base ID. See :option:`--base-id-path`.\n\n.. option:: --base-id-path <path_string>\n\n  *(optional)* Writes the base ID to the given path. While this option is compatible with\n  :option:`--base-id`, its intended use is to provide access to the dynamic base ID selected by\n  :option:`--use-dynamic-base-id`.\n\n.. option:: --concurrency <integer>\n\n  *(optional)* The number of :ref:`worker threads <arch_overview_threading>` to run. If not\n  specified defaults to the number of hardware threads on the machine. If set to zero, Envoy will\n  still run one worker thread.\n\n.. option:: -l <string>, --log-level <string>\n\n  *(optional)* The logging level. Non developers should generally never set this option. See the\n  help text for the available log levels and the default.\n\n.. option:: --component-log-level <string>\n\n  *(optional)* The comma separated list of logging level per component. Non developers should generally\n  never set this option. For example, if you want `upstream` component to run at `debug` level and\n  `connection` component to run at `trace` level, you should pass ``upstream:debug,connection:trace`` to\n  this flag. See ``ALL_LOGGER_IDS`` in :repo:`/source/common/common/logger.h` for a list of components.\n\n.. option:: --cpuset-threads\n\n   *(optional)* This flag is used to control the number of worker threads if :option:`--concurrency` is\n   not set. If enabled, the assigned cpuset size is used to determine the number of worker threads on\n   Linux-based systems. Otherwise the number of worker threads is set to the number of hardware threads\n   on the machine. You can read more about cpusets in the\n   `kernel documentation <https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt>`_.\n\n.. option:: --log-path <path string>\n\n   *(optional)* The output file path where logs should be written. This file will be re-opened\n   when SIGUSR1 is handled. If this is not set, log to stderr.\n\n.. option:: --log-format <format string>\n\n   *(optional)* The format string to use for laying out the log message metadata. If this is not\n   set, a default format string ``\"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v\"`` is used.\n\n   When used in conjunction with :option:`--log-format-prefix-with-location` set to 1, the logger can be\n   configured to prefix ``%v`` by a file path and a line number.\n\n   When used in conjunction with :option:`--log-format-escaped`, the logger can be configured\n   to log in a format that is parsable by log viewers. Known integrations are documented\n   in the :ref:`application logging configuration <config_application_logs>` section.\n\n   The supported format flags are (with example output):\n\n   :%v:\tThe actual message to log (\"some user text\")\n   :%t:\tThread id (\"1232\")\n   :%P:\tProcess id (\"3456\")\n   :%n:\tLogger's name (\"filter\")\n   :%l:\tThe log level of the message (\"debug\", \"info\", etc.)\n   :%L:\tShort log level of the message (\"D\", \"I\", etc.)\n   :%a:\tAbbreviated weekday name (\"Tue\")\n   :%A:\tFull weekday name (\"Tuesday\")\n   :%b:\tAbbreviated month name (\"Mar\")\n   :%B:\tFull month name (\"March\")\n   :%c:\tDate and time representation (\"Tue Mar 27 15:25:06 2018\")\n   :%C:\tYear in 2 digits (\"18\")\n   :%Y:\tYear in 4 digits (\"2018\")\n   :%D, %x:\tShort MM/DD/YY date (\"03/27/18\")\n   :%m:\tMonth 01-12 (\"03\")\n   :%d:\tDay of month 01-31 (\"27\")\n   :%H:\tHours in 24 format 00-23 (\"15\")\n   :%I:\tHours in 12 format 01-12 (\"03\")\n   :%M:\tMinutes 00-59 (\"25\")\n   :%S:\tSeconds 00-59 (\"06\")\n   :%e:\tMillisecond part of the current second 000-999 (\"008\")\n   :%f:\tMicrosecond part of the current second 000000-999999 (\"008789\")\n   :%F:\tNanosecond part of the current second 000000000-999999999 (\"008789123\")\n   :%p:\tAM/PM (\"AM\")\n   :%r:\t12-hour clock (\"03:25:06 PM\")\n   :%R:\t24-hour HH:MM time, equivalent to %H:%M (\"15:25\")\n   :%T, %X:\tISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S (\"13:25:06\")\n   :%z:\tISO 8601 offset from UTC in timezone ([+/-]HH:MM) (\"-07:00\")\n   :%%:\tThe % sign (\"%\")\n   :%@: Source file and line (\"my_file.cc:123\")\n   :%s: Basename of the source file (\"my_file.cc\")\n   :%g: Full relative path of the source file (\"/some/dir/my_file.cc\")\n   :%#: Source line (\"123\")\n   :%!: Source function (\"myFunc\")\n\n.. option:: --log-format-prefix-with-location <1|0>\n\n   *(optional)* This temporary flag allows replacing all entries of ``\"%v\"`` in the log format by\n   ``\"[%g:%#] %v\"``. This flag is provided for migration purposes only. If this is not set, a\n   default value 0 is used.\n\n   **NOTE**: The flag will be removed at 1.17.0 release.\n\n.. option:: --log-format-escaped\n\n  *(optional)* This flag enables application log sanitization to escape C-style escape sequences.\n  This can be used to prevent a single log line from spanning multiple lines in the underlying log.\n  This sanitizes all escape sequences in `this list <https://en.cppreference.com/w/cpp/language/escape>`_.\n  Note that each line's trailing whitespace characters (such as EOL characters) will not be escaped.\n\n.. option:: --restart-epoch <integer>\n\n  *(optional)* The :ref:`hot restart <arch_overview_hot_restart>` epoch. (The number of times\n  Envoy has been hot restarted instead of a fresh start). Defaults to 0 for the first start. This\n  option tells Envoy whether to attempt to create the shared memory region needed for hot restart,\n  or whether to open an existing one. It should be incremented every time a hot restart takes place.\n  The :ref:`hot restart wrapper <operations_hot_restarter>` sets the *RESTART_EPOCH* environment\n  variable which should be passed to this option in most cases.\n\n.. option:: --enable-fine-grain-logging\n\n  *(optional)* Enables fine-grain logger with file level log control and runtime update at administration\n  interface. If enabled, main log macros including `ENVOY_LOG`, `ENVOY_CONN_LOG`, `ENVOY_STREAM_LOG` and\n  `ENVOY_FLUSH_LOG` will use a per-file logger, and the usage doesn't need `Envoy::Logger::Loggable` any \n  more. The administration interface usage is similar. Please see `Administration interface \n  <https://www.envoyproxy.io/docs/envoy/latest/operations/admin>`_ for more detail.\n\n.. option:: --socket-path <path string>\n\n  *(optional)* The output file path to the socket address for :ref:`hot restart <arch_overview_hot_restart>`.\n  Default to \"@envoy_domain_socket\" which will be created in the abstract namespace. Suffix _{role}_{id}\n  is appended to provide name. All envoy processes wanting to participate in hot-restart together must\n  use the same value for this option.\n\n  **NOTE**: The path started with \"@\" will be created in the abstract namespace.\n\n.. option:: --socket-mode <string>\n\n  *(optional)* The socket file permission for :ref:`hot restart <arch_overview_hot_restart>`.\n  This must be a valid octal file permission, such as 644. The default value is 600.\n  This flag may not be used when :option:`--socket-path` is start with \"@\" or not set.\n\n.. option:: --hot-restart-version\n\n  *(optional)* Outputs an opaque hot restart compatibility version for the binary. This can be\n  matched against the output of the :http:get:`/hot_restart_version` admin endpoint to determine\n  whether the new binary and the running binary are hot restart compatible.\n\n.. option:: --service-cluster <string>\n\n  *(optional)* Defines the local service cluster name where Envoy is running. The\n  local service cluster name is first sourced from the :ref:`Bootstrap node\n  <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.node>` message's :ref:`cluster\n  <envoy_v3_api_field_config.core.v3.Node.cluster>` field. This CLI option provides an alternative\n  method for specifying this value and will override any value set in bootstrap\n  configuration. It should be set if any of the following features are used:\n  :ref:`statsd <arch_overview_statistics>`, :ref:`health check cluster\n  verification <envoy_v3_api_field_config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher>`,\n  :ref:`runtime override directory <envoy_v3_api_msg_config.bootstrap.v3.Runtime>`,\n  :ref:`user agent addition\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent>`,\n  :ref:`HTTP global rate limiting <config_http_filters_rate_limit>`,\n  :ref:`CDS <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  <arch_overview_tracing>`, either via this CLI option or in the bootstrap\n  configuration.\n\n.. option:: --service-node <string>\n\n  *(optional)* Defines the local service node name where Envoy is running. The\n  local service node name is first sourced from the :ref:`Bootstrap node\n  <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.node>` message's :ref:`id\n  <envoy_v3_api_field_config.core.v3.Node.id>` field. This CLI option provides an alternative\n  method for specifying this value and will override any value set in bootstrap\n  configuration. It should be set if any of the following features are used:\n  :ref:`statsd <arch_overview_statistics>`, :ref:`CDS\n  <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  <arch_overview_tracing>`, either via this CLI option or in the bootstrap\n  configuration.\n\n.. option:: --service-zone <string>\n\n  *(optional)* Defines the local service zone where Envoy is running. The local\n  service zone is first sourced from the :ref:`Bootstrap node\n  <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.node>` message's :ref:`locality.zone\n  <envoy_v3_api_field_config.core.v3.Locality.zone>` field. This CLI option provides an\n  alternative method for specifying this value and will override any value set\n  in bootstrap configuration. It should be set if discovery service routing is\n  used and the discovery service exposes :ref:`zone data\n  <envoy_v3_api_msg_config.endpoint.v3.LocalityLbEndpoints>`, either via this CLI option or in\n  the bootstrap configuration. The meaning of zone is context dependent, e.g.\n  `Availability Zone (AZ)\n  <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html>`_\n  on AWS, `Zone <https://cloud.google.com/compute/docs/regions-zones/>`_ on GCP,\n  etc.\n\n\n.. option:: --file-flush-interval-msec <integer>\n\n  *(optional)* The file flushing interval in milliseconds. Defaults to 10 seconds.\n  This setting is used during file creation to determine the duration between flushes\n  of buffers to files. The buffer will flush every time it gets full, or every time\n  the interval has elapsed, whichever comes first. Adjusting this setting is useful\n  when tailing :ref:`access logs <arch_overview_access_logs>` in order to\n  get more (or less) immediate flushing.\n\n.. option:: --drain-time-s <integer>\n\n  *(optional)* The time in seconds that Envoy will drain connections during\n  a :ref:`hot restart <arch_overview_hot_restart>` or when individual listeners are being\n  modified or removed via :ref:`LDS <arch_overview_dynamic_config_lds>`.\n  Defaults to 600 seconds (10 minutes). Generally the drain time should be less than\n  the parent shutdown time set via the :option:`--parent-shutdown-time-s` option. How the two\n  settings are configured depends on the specific deployment. In edge scenarios, it might be\n  desirable to have a very long drain time. In service to service scenarios, it might be possible\n  to make the drain and shutdown time much shorter (e.g., 60s/90s).\n\n.. option:: --drain-strategy <string>\n\n  *(optional)* Determine behaviour of Envoy during the hot restart drain sequence. During the drain sequence, the drain manager encourages draining through terminating connections on request completion, sending \"Connection: CLOSE\" on HTTP1, and sending GOAWAY on HTTP2.\n\n  * ``gradual``: *(default)* The percentage of requests encouraged to drain increases to 100% as the drain time elapses.\n\n  * ``immediate``: All requests are encouraged to drain as soon as the drain sequence begins.\n\n.. option:: --parent-shutdown-time-s <integer>\n\n  *(optional)* The time in seconds that Envoy will wait before shutting down the parent process\n  during a hot restart. See the :ref:`hot restart overview <arch_overview_hot_restart>` for more\n  information. Defaults to 900 seconds (15 minutes).\n\n.. option:: --disable-hot-restart\n\n  *(optional)* This flag disables Envoy hot restart for builds that have it enabled. By default, hot\n  restart is enabled.\n\n.. option:: --enable-mutex-tracing\n\n  *(optional)* This flag enables the collection of mutex contention statistics\n  (:ref:`MutexStats <envoy_v3_api_msg_admin.v3.MutexStats>`) as well as a contention endpoint\n  (:http:get:`/contention`). Mutex tracing is not enabled by default, since it incurs a slight performance\n  penalty for those Envoys which already experience mutex contention.\n\n.. option:: --allow-unknown-fields\n\n  *(optional)* Deprecated alias for :option:`--allow-unknown-static-fields`.\n\n.. option:: --allow-unknown-static-fields\n\n  *(optional)* This flag disables validation of protobuf configurations for unknown fields. By default, the\n  validation is enabled. For most deployments, the default should be used which ensures configuration errors\n  are caught upfront and Envoy is configured as intended. Warnings are logged for the first use of\n  any unknown field and these occurrences are counted in the :ref:`server.static_unknown_fields\n  <server_statistics>` statistic.\n\n.. option:: --reject-unknown-dynamic-fields\n\n  *(optional)* This flag disables validation of protobuf configuration for unknown fields in\n  dynamic configuration. By default, this flag is set false, disabling validation for fields beyond\n  bootstrap. This allows newer xDS configurations to be delivered to older Envoys. This can be set\n  true for strict dynamic checking when this behavior is not wanted but the default should be\n  desirable for most Envoy deployments. Warnings are logged for the first use of any unknown field\n  and these occurrences are counted in the :ref:`server.dynamic_unknown_fields <server_statistics>`\n  statistic.\n\n.. option:: --ignore-unknown-dynamic-fields\n\n  *(optional)* This flag disables validation of protobuf configuration for unknown fields in dynamic\n  configuration. Unlike setting :option:`--reject-unknown-dynamic-fields` to false, it does not log warnings\n  or count occurrences of unknown fields, in the interest of configuration processing speed. If\n  :option:`--reject-unknown-dynamic-fields` is set to true, this flag has no effect.\n\n.. option:: --disable-extensions <extension list>\n\n  *(optional)* This flag disabled the provided list of comma-separated extension names. Disabled\n  extensions cannot be used by static or dynamic configuration, though they are still linked into\n  Envoy and may run start-up code or have other runtime effects. Extension names are created by\n  joining the extension category and name with a forward slash,\n  e.g. ``grpc_credentials/envoy.grpc_credentials.file_based_metadata``.\n\n.. option:: --version\n\n  *(optional)* This flag is used to display Envoy version and build information, e.g.\n  ``c93f9f6c1e5adddd10a3e3646c7e049c649ae177/1.9.0-dev/Clean/RELEASE/BoringSSL-FIPS``.\n\n  It consists of five slash-separated fields:\n\n  * source revision - git commit from which Envoy was built,\n\n  * release number - either release (e.g. ``1.9.0``) or a development build (e.g. ``1.9.0-dev``),\n\n  * status of the source tree at the build time - either ``Clean`` or ``Modified``,\n\n  * build mode - either ``RELEASE`` or ``DEBUG``,\n\n  * TLS library - either ``BoringSSL`` or ``BoringSSL-FIPS``.\n"
  },
  {
    "path": "docs/root/operations/fs_flags.rst",
    "content": ".. _operations_file_system_flags:\n\nFile system flags\n=================\n\nEnvoy supports file system \"flags\" that alter state at startup. This is used to\npersist changes between restarts if necessary. The flag files should be placed\nin the directory specified in the :ref:`flags_path\n<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.flags_path>` configuration\noption. The currently supported flag files are:\n\ndrain\n  If this file exists, Envoy will start in health check failing mode, similar to after the\n  :http:post:`/healthcheck/fail` command has been executed.\n"
  },
  {
    "path": "docs/root/operations/hot_restarter.rst",
    "content": ".. _operations_hot_restarter:\n\nHot restart Python wrapper\n==========================\n\nTypically, Envoy will be :ref:`hot restarted <arch_overview_hot_restart>` for config changes and\nbinary updates. However, in many cases, users will wish to use a standard process manager such as\nmonit, runit, etc. We provide :repo:`/restarter/hot-restarter.py` to make this straightforward.\n\nThe restarter is invoked like so:\n\n.. code-block:: console\n\n  hot-restarter.py start_envoy.sh\n\n`start_envoy.sh` might be defined like so (using salt/jinja like syntax):\n\n.. code-block:: jinja\n\n  #!/bin/bash\n\n  ulimit -n {{ pillar.get('envoy_max_open_files', '102400') }}\n  sysctl fs.inotify.max_user_watches={{ pillar.get('envoy_max_inotify_watches', '524288') }}\n\n  exec /usr/sbin/envoy -c /etc/envoy/envoy.cfg --restart-epoch $RESTART_EPOCH --service-cluster {{ grains['cluster_name'] }} --service-node {{ grains['service_node'] }} --service-zone {{ grains.get('ec2_availability-zone', 'unknown') }}\n\nNote on `inotify.max_user_watches`: If Envoy is being configured to watch many files for configuration in a directory\non a Linux machine, increase this value as Linux enforces limits on the maximum number of files that can be watched.\n\nThe *RESTART_EPOCH* environment variable is set by the restarter on each restart and must be passed\nto the :option:`--restart-epoch` option.\n\n.. warning::\n\n   Special care must be taken if you wish to use the :option:`--use-dynamic-base-id` option. That\n   flag may only be set when the *RESTART_EPOCH* is 0 and your *start_envoy.sh* must obtain the\n   chosen base ID (via :option:`--base-id-path`), store it, and use it as the :option:`--base-id`\n   value on subsequent invocations (when *RESTART_EPOCH* is greater than 0).\n\nThe restarter handles the following signals:\n\n* **SIGTERM** or **SIGINT** (Ctrl-C): Will cleanly terminate all child processes and exit.\n* **SIGHUP**: Will hot restart by re-invoking whatever is passed as the first argument to the\n  hot restart script.\n* **SIGCHLD**: If any of the child processes shut down unexpectedly, the restart script will shut\n  everything down and exit to avoid being in an unexpected state. The controlling process manager\n  should then restart the restarter script to start Envoy again.\n* **SIGUSR1**: Will be forwarded to Envoy as a signal to reopen all access logs. This is used for\n  atomic move and reopen log rotation.\n"
  },
  {
    "path": "docs/root/operations/operations.rst",
    "content": ".. _operations:\n\nOperations and administration\n=============================\n\n.. toctree::\n  :maxdepth: 2\n\n  cli\n  hot_restarter\n  admin\n  stats_overview\n  runtime\n  fs_flags\n  traffic_tapping\n  certificates\n  performance\n"
  },
  {
    "path": "docs/root/operations/performance.rst",
    "content": ".. _operations_performance:\n\nPerformance\n===========\n\nEnvoy is architected to optimize scalability and resource utilization by running an event loop on a\n:ref:`small number of threads <arch_overview_threading>`. The \"main\" thread is responsible for\ncontrol plane processing, and each \"worker\" thread handles a portion of the data plane processing.\nEnvoy exposes two statistics to monitor performance of the event loops on all these threads.\n\n* **Loop duration:** Some amount of processing is done on each iteration of the event loop. This\n  amount will naturally vary with changes in load. However, if one or more threads have an unusually\n  long-tailed loop duration, it may indicate a performance issue. For example, work might not be\n  distributed fairly across the worker threads, or there may be a long blocking operation in an\n  extension that's impeding progress.\n\n* **Poll delay:** On each iteration of the event loop, the event dispatcher polls for I/O events\n  and \"wakes up\" either when some I/O events are ready to be processed or when a timeout fires,\n  whichever occurs first. In the case of a timeout, we can measure the difference between the\n  expected wakeup time and the actual wakeup time after polling; this difference is called the \"poll\n  delay.\" It's normal to see some small poll delay, usually equal to the kernel scheduler's \"time\n  slice\" or \"quantum\"---this depends on the specific operating system on which Envoy is\n  running---but if this number elevates substantially above its normal observed baseline, it likely\n  indicates kernel scheduler delays.\n\nThese statistics can be enabled by setting :ref:`enable_dispatcher_stats <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.enable_dispatcher_stats>`\nto true.\n\n.. warning::\n\n  Note that enabling dispatcher stats records a value for each iteration of the event loop on every\n  thread. This should normally be minimal overhead, but when using\n  :ref:`statsd <envoy_v3_api_msg_config.metrics.v3.StatsdSink>`, it will send each observed value over\n  the wire individually because the statsd protocol doesn't have any way to represent a histogram\n  summary. Be aware that this can be a very large volume of data.\n\nEvent loop statistics\n---------------------\n\nThe event dispatcher for the main thread has a statistics tree rooted at *server.dispatcher.*, and\nthe event dispatcher for each worker thread has a statistics tree rooted at\n*listener_manager.worker_<id>.dispatcher.*, each with the following statistics:\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  loop_duration_us, Histogram, Event loop durations in microseconds\n  poll_delay_us, Histogram, Polling delays in microseconds\n\nNote that any auxiliary threads are not included here.\n\n.. _operations_performance_watchdog:\n\nWatchdog\n--------\n\nIn addition to event loop statistics, Envoy also include a configurable\n:ref:`watchdog <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.watchdogs>`\nsystem that can increment statistics when Envoy is not responsive and\noptionally kill the server. The system has two separate watchdog configs, one\nfor the main thread and one for worker threads; this is helpful as the different\nthreads have different workloads. The system also has an extension point\nallowing for custom actions to be taken based on watchdog events. The\nstatistics are useful for understanding at a high level whether Envoy's event\nloop is not responsive either because it is doing too much work, blocking, or\nnot being scheduled by the OS.\n\nThe watchdog emits aggregated statistics in both *main_thread* and *workers*.\nIn addition, it emits individual statistics under  *server.<thread_name>.* trees.\n*<thread_name>* is equal to *main_thread*, *worker_0*, *worker_1*, etc.\n\n.. csv-table::\n  :header: Name, Type, Description\n  :widths: 1, 1, 2\n\n  watchdog_miss, Counter, Number of standard misses\n  watchdog_mega_miss, Counter, Number of mega misses\n"
  },
  {
    "path": "docs/root/operations/runtime.rst",
    "content": ".. _operations_runtime:\n\nRuntime\n=======\n\n:ref:`Runtime configuration <config_runtime>` can be used to modify various server settings\nwithout restarting Envoy. The runtime settings that are available depend on how the server is\nconfigured. They are documented in the relevant sections of the :ref:`configuration guide <config>`.\n\nRuntime guards are also used as a mechanism to disable new behavior or risky changes not otherwise\nguarded by configuration. Such changes will tend to introduce a runtime guard that can be used to\ndisable the new behavior/code path. The names of these runtime guards will be included in the\nrelease notes alongside an explanation of the change that warrented the runtime guard.\n\nDue to this usage of runtime guards, some deployments might find it useful to set up\ndynamic runtime configuration as a safety measure to be able to quickly disable the new behavior\nwithout having to revert to an older version of Envoy or redeploy it with a new set of static\nruntime flags.\n"
  },
  {
    "path": "docs/root/operations/stats_overview.rst",
    "content": ".. _operations_stats:\n\nStatistics overview\n===================\n\nEnvoy outputs numerous statistics which depend on how the server is configured. They can be seen\nlocally via the :http:get:`/stats` command and are typically sent to a :ref:`statsd cluster\n<arch_overview_statistics>`. The statistics that are output are documented in the relevant\nsections of the :ref:`configuration guide <config>`. Some of the more important statistics that will\nalmost always be used can be found in the following sections:\n\n* :ref:`HTTP connection manager <config_http_conn_man_stats>`\n* :ref:`Upstream cluster <config_cluster_manager_cluster_stats>`\n"
  },
  {
    "path": "docs/root/operations/traffic_tapping.rst",
    "content": ".. _operations_traffic_tapping:\n\nTraffic tapping\n===============\n\nEnvoy currently provides two experimental extensions that can tap traffic:\n\n  * :ref:`HTTP tap filter <config_http_filters_tap>`. See the linked filter documentation for more\n    information.\n  * :ref:`Tap transport socket extension <envoy_v3_api_msg_config.core.v3.TransportSocket>` that can intercept\n    traffic and write to a :ref:`protobuf trace file\n    <envoy_v3_api_msg_data.tap.v3.TraceWrapper>`. The remainder of this document describes\n    the configuration of the tap transport socket.\n\nTap transport socket configuration\n----------------------------------\n\n.. attention::\n\n  The tap transport socket is experimental and is currently under active development. There is\n  currently a very limited set of match conditions, output configuration, output sinks, etc.\n  Capabilities will be expanded over time and the configuration structures are likely to change.\n\nTapping can be configured on :ref:`Listener\n<envoy_v3_api_field_config.listener.v3.FilterChain.transport_socket>` and :ref:`Cluster\n<envoy_v3_api_field_config.cluster.v3.Cluster.transport_socket>` transport sockets, providing the ability to interpose on\ndownstream and upstream L4 connections respectively.\n\nTo configure traffic tapping, add an `envoy.transport_sockets.tap` transport socket\n:ref:`configuration <envoy_v3_api_msg_extensions.filters.http.tap.v3.Tap>` to the listener\nor cluster. For a plain text socket this might look like:\n\n.. code-block:: yaml\n\n  transport_socket:\n    name: envoy.transport_sockets.tap\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tap.v3.Tap\n      common_config:\n        static_config:\n          match_config:\n            any_match: true\n          output_config:\n            sinks:\n              - format: PROTO_BINARY\n                file_per_tap:\n                  path_prefix: /some/tap/path\n      transport_socket:\n        name: envoy.transport_sockets.raw_buffer\n\nFor a TLS socket, this will be:\n\n.. code-block:: yaml\n\n  transport_socket:\n    name: envoy.transport_sockets.tap\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tap.v3.Tap\n      common_config:\n        static_config:\n          match_config:\n            any_match: true\n          output_config:\n            sinks:\n              - format: PROTO_BINARY\n                file_per_tap:\n                  path_prefix: /some/tap/path\n      transport_socket:\n        name: envoy.transport_sockets.tls\n        typed_config: <TLS context>\n\nwhere the TLS context configuration replaces any existing :ref:`downstream\n<envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>` or :ref:`upstream\n<envoy_v3_api_msg_extensions.transport_sockets.tls.v3.UpstreamTlsContext>`\nTLS configuration on the listener or cluster, respectively.\n\nEach unique socket instance will generate a trace file prefixed with `path_prefix`. E.g.\n`/some/tap/path_0.pb`.\n\nBuffered data limits\n--------------------\n\nFor buffered socket taps, Envoy will limit the amount of body data that is tapped to avoid OOM\nsituations. The default limit is 1KiB for both received and transmitted data.\nThis is configurable via the :ref:`max_buffered_rx_bytes\n<envoy_v3_api_field_config.tap.v3.OutputConfig.max_buffered_rx_bytes>` and\n:ref:`max_buffered_tx_bytes\n<envoy_v3_api_field_config.tap.v3.OutputConfig.max_buffered_tx_bytes>` settings. When a buffered\nsocket tap is truncated, the trace will indicate truncation via the :ref:`read_truncated\n<envoy_v3_api_field_data.tap.v3.SocketBufferedTrace.read_truncated>` and :ref:`write_truncated\n<envoy_v3_api_field_data.tap.v3.SocketBufferedTrace.write_truncated>` fields as well as the body\n:ref:`truncated <envoy_v3_api_field_data.tap.v3.Body.truncated>` field.\n\nStreaming\n---------\n\nThe tap transport socket supports both buffered and streaming, controlled by the :ref:`streaming\n<envoy_v3_api_field_config.tap.v3.OutputConfig.streaming>` setting. When buffering,\n:ref:`SocketBufferedTrace <envoy_v3_api_msg_data.tap.v3.SocketBufferedTrace>` messages are\nemitted. When streaming, a series of :ref:`SocketStreamedTraceSegment\n<envoy_v3_api_msg_data.tap.v3.SocketStreamedTraceSegment>` are emitted.\n\nSee the :ref:`HTTP tap filter streaming <config_http_filters_tap_streaming>` documentation for more\ninformation. Most of the concepts overlap between the HTTP filter and the transport socket.\n\nPCAP generation\n---------------\n\nThe generated trace file can be converted to `libpcap format\n<https://wiki.wireshark.org/Development/LibpcapFileFormat>`_, suitable for\nanalysis with tools such as `Wireshark <https://www.wireshark.org/>`_ with the\n`tap2pcap` utility, e.g.:\n\n.. code-block:: bash\n\n  bazel run @envoy_api_canonical//tools:tap2pcap /some/tap/path_0.pb path_0.pcap\n  tshark -r path_0.pcap -d \"tcp.port==10000,http2\" -P\n    1   0.000000    127.0.0.1 → 127.0.0.1    HTTP2 157 Magic, SETTINGS, WINDOW_UPDATE, HEADERS\n    2   0.013713    127.0.0.1 → 127.0.0.1    HTTP2 91 SETTINGS, SETTINGS, WINDOW_UPDATE\n    3   0.013820    127.0.0.1 → 127.0.0.1    HTTP2 63 SETTINGS\n    4   0.128649    127.0.0.1 → 127.0.0.1    HTTP2 5586 HEADERS\n    5   0.130006    127.0.0.1 → 127.0.0.1    HTTP2 7573 DATA\n    6   0.131044    127.0.0.1 → 127.0.0.1    HTTP2 3152 DATA, DATA\n"
  },
  {
    "path": "docs/root/start/sandboxes/_include/docker-env-setup.rst",
    "content": "The following documentation runs through the setup of Envoy described above.\n\nStep 1: Install Docker\n**********************\n\nEnsure that you have a recent versions of ``docker`` and ``docker-compose`` installed.\n\nA simple way to achieve this is via the `Docker Desktop <https://www.docker.com/products/docker-desktop>`_.\n\nStep 2: Clone the Envoy repo\n****************************\n\nIf you have not cloned the Envoy repo, clone it with:\n\n.. tabs::\n\n   .. code-tab:: console SSH\n\n      git clone git@github.com:envoyproxy/envoy\n\n   .. code-tab:: console HTTPS\n\n      git clone https://github.com/envoyproxy/envoy.git\n"
  },
  {
    "path": "docs/root/start/sandboxes/cache.rst",
    "content": ".. _install_sandboxes_cache_filter:\n\nCache Filter\n============\n.. TODO(yosrym93): When a documentation is written for a production-ready Cache Filter, link to it through this doc.\n\nIn this example, we demonstrate how HTTP caching can be utilized in Envoy by using the Cache Filter.\nThe setup of this sandbox is based on the setup of the :ref:`Front Proxy sandbox <install_sandboxes_front_proxy>`.\n\nAll incoming requests are routed via the front Envoy, which acts as a reverse proxy sitting on\nthe edge of the ``envoymesh`` network. Ports ``8000`` and ``8001`` are exposed by docker\ncompose (see :repo:`/examples/cache/docker-compose.yaml`) to handle ``HTTP`` calls\nto the services, and requests to ``/admin`` respectively. Two backend services are deployed behind the front Envoy, each with a sidecar Envoy.\n\nThe front Envoy is configured to run the Cache Filter, which stores cacheable responses in an in-memory cache,\nand serves it to subsequent requests. In this demo, the responses that are served by the deployed services are stored in :repo:`/examples/cache/responses.yaml`.\nThis file is mounted to both services' containers, so any changes made to the stored responses while the services are running should be instantly effective (no need to rebuild or rerun).\n\nFor the purposes of the demo, a response's date of creation is appended to its body before being served.\nAn Etag is computed for every response for validation purposes, which only depends on the response body in the yaml file (i.e. the appended date is not taken into account).\nCached responses can be identified by having an ``age`` header. Validated responses can be identified by having a generation date older than the ``date`` header;\nas when a response is validated the ``date`` header is updated, while the body stays the same. Validated responses do not have an ``age`` header.\nResponses served from the backend service have no ``age`` header, and their ``date`` header is the same as their generation date.\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Start all of our containers\n***********************************\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/cache\n    $ docker-compose build --pull\n    $ docker-compose up -d\n    $ docker-compose ps\n\n           Name                      Command            State                             Ports\n    ------------------------------------------------------------------------------------------------------------------------\n    cache_front-envoy_1   /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n    cache_service1_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n    cache_service2_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n\nStep 4: Test Envoy's HTTP caching capabilities\n**********************************************\n\nYou can now send a request to both services via the ``front-envoy``. Note that since the two services have different routes,\nidentical requests to different services have different cache entries (i.e. a request sent to service 2 will not be served by a cached\nresponse produced by service 1).\n\nTo send a request:\n\n``curl -i localhost:8000/service/<service_no>/<response>``\n\n``service_no``: The service to send the request to, 1 or 2.\n\n``response``: The response that is being requested. The responses are found in :repo:`/examples/cache/responses.yaml`.\n\n\nThe provided example responses are:\n\n- ``valid-for-minute``\n    This response remains fresh in the cache for a minute. After which, the response gets validated by the backend service before being served from the cache.\n    If found to be updated, the new response is served (and cached). Otherwise, the cached response is served and refreshed.\n\n- ``private``\n    This response is private; it cannot be stored by shared caches (such as proxies). It will always be served from the backend service.\n\n- ``no-cache``\n    This response has to be validated every time before being served.\n\nYou can change the responses' headers and bodies (or add new ones) while the sandbox is running to experiment.\n\nExample responses\n-----------------\n\n1. valid-for-minute\n^^^^^^^^^^^^^^^^^^^\n\n.. code-block:: console\n\n    $ curl -i localhost:8000/service/1/valid-for-minute\n    HTTP/1.1 200 OK\n    content-type: text/html; charset=utf-8\n    content-length: 103\n    cache-control: max-age=60\n    custom-header: any value\n    etag: \"172ae25df822c3299cf2248694b4ce23\"\n    date: Fri, 11 Sep 2020 03:20:40 GMT\n    server: envoy\n    x-envoy-upstream-service-time: 11\n\n    This response will stay fresh for one minute\n    Response body generated at: Fri, 11 Sep 2020 03:20:40 GMT\n\nNaturally, response ``date`` header is the same time as the generated time.\nSending the same request after 30 seconds gives the same exact response with the same generation date,\nbut with an ``age`` header as it was served from cache:\n\n.. code-block:: console\n\n    $ curl -i localhost:8000/service/1/valid-for-minute\n    HTTP/1.1 200 OK\n    content-type: text/html; charset=utf-8\n    content-length: 103\n    cache-control: max-age=60\n    custom-header: any value\n    etag: \"172ae25df822c3299cf2248694b4ce23\"\n    date: Fri, 11 Sep 2020 03:20:40 GMT\n    server: envoy\n    x-envoy-upstream-service-time: 11\n    age: 30\n\n    This response will stay fresh for one minute\n    Response body generated at: Fri, 11 Sep 2020 03:20:40 GMT\n\nAfter 1 minute and 1 second:\n\n.. code-block:: console\n\n    $ curl -i localhost:8000/service/1/valid-for-minute\n    HTTP/1.1 200 OK\n    cache-control: max-age=60\n    custom-header: any value\n    etag: \"172ae25df822c3299cf2248694b4ce23\"\n    date: Fri, 11 Sep 2020 03:21:41 GMT\n    server: envoy\n    x-envoy-upstream-service-time: 8\n    content-length: 103\n    content-type: text/html; charset=utf-8\n\n    This response will stay fresh for one minute\n    Response body generated at: Fri, 11 Sep 2020 03:20:40 GMT\n\nThe same response was served after being validated with the backend service.\nYou can verify this as the response generation time is the same,\nbut the response ``date`` header was updated with the validation response date.\nAlso, no ``age`` header.\n\nEvery time the response is validated, it stays fresh for another minute.\nIf the response body changes while the cached response is still fresh,\nthe cached response will still be served. The cached response will only be updated when it is no longer fresh.\n\n2. private\n^^^^^^^^^^\n\n.. code-block:: console\n\n    $ curl -i localhost:8000/service/1/private\n    HTTP/1.1 200 OK\n    content-type: text/html; charset=utf-8\n    content-length: 117\n    cache-control: private\n    etag: \"6bd80b59b2722606abf2b8d83ed2126d\"\n    date: Fri, 11 Sep 2020 03:22:28 GMT\n    server: envoy\n    x-envoy-upstream-service-time: 7\n\n    This is a private response, it will not be cached by Envoy\n    Response body generated at: Fri, 11 Sep 2020 03:22:28 GMT\n\nNo matter how many times you make this request, you will always receive a new response;\nnew date of generation, new ``date`` header, and no ``age`` header.\n\n3. no-cache\n^^^^^^^^^^^\n\n.. code-block:: console\n\n    $ curl -i localhost:8000/service/1/no-cache\n    HTTP/1.1 200 OK\n    content-type: text/html; charset=utf-8\n    content-length: 130\n    cache-control: max-age=0, no-cache\n    etag: \"ce39a53bd6bb8abdb2488a5a375397e4\"\n    date: Fri, 11 Sep 2020 03:23:07 GMT\n    server: envoy\n    x-envoy-upstream-service-time: 7\n\n    This response can be cached, but it has to be validated on each request\n    Response body generated at: Fri, 11 Sep 2020 03:23:07 GMT\n\nAfter a few seconds:\n\n.. code-block:: console\n\n    $ curl -i localhost:8000/service/1/no-cache\n    HTTP/1.1 200 OK\n    cache-control: max-age=0, no-cache\n    etag: \"ce39a53bd6bb8abdb2488a5a375397e4\"\n    date: Fri, 11 Sep 2020 03:23:12 GMT\n    server: envoy\n    x-envoy-upstream-service-time: 7\n    content-length: 130\n    content-type: text/html; charset=utf-8\n\n    This response can be cached, but it has to be validated on each request\n    Response body generated at: Fri, 11 Sep 2020 03:23:07 GMT\n\nYou will receive a cached response that has the same generation time.\nHowever, the ``date`` header will always be updated as this response will always be validated first.\nAlso, no ``age`` header.\n\nIf you change the response body in the yaml file:\n\n.. code-block:: console\n\n    $ curl -i localhost:8000/service/1/no-cache\n    HTTP/1.1 200 OK\n    content-type: text/html; charset=utf-8\n    content-length: 133\n    cache-control: max-age=0, no-cache\n    etag: \"f4768af0ac9f6f54f88169a1f3ecc9f3\"\n    date: Fri, 11 Sep 2020 03:24:10 GMT\n    server: envoy\n    x-envoy-upstream-service-time: 7\n\n    This response can be cached, but it has to be validated on each request!!!\n    Response body generated at: Fri, 11 Sep 2020 03:24:10 GMT\n\nYou will receive a new response that's served from the backend service.\nThe new response will be cached for subsequent requests.\n\nYou can also add new responses to the yaml file with different ``cache-control`` headers and start experimenting!\nTo learn more about caching and ``cache-control`` headers visit\nthe `MDN Web Docs <https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching>`_.\n"
  },
  {
    "path": "docs/root/start/sandboxes/cors.rst",
    "content": ".. _install_sandboxes_cors:\n\nCORS Filter\n===========\n\nCross-Origin Resource Sharing (CORS) is a method of enforcing client-side\naccess controls on resources by specifying external domains that are able to\naccess certain or all routes of your domain. Browsers use the presence of HTTP\nheaders to determine if a response from a different origin is allowed.\n\nTo help demonstrate how front-envoy can enforce CORS policies, we are\nreleasing a set of `docker compose <https://docs.docker.com/compose/>`_ sandboxes\nthat deploy a frontend and backend service on different origins, both behind\nfront-envoy.\n\nThe frontend service has a field to input the remote domain of your backend\nservice along with radio buttons to select the remote domain's CORS enforcement.\nThe CORS enforcement choices are:\n\n  * Disabled: CORS is disabled on the route requested. This will result in a\n    client-side CORS error since the required headers to be considered a\n    valid CORS request are not present.\n  * Open: CORS is enabled on the route requested but the allowed origin is set\n    to ``*``. This is a very permissive policy and means that origin can request\n    data from this endpoint.\n  * Restricted: CORS is enabled on the route requested and the only allowed\n    origin is ``envoyproxy.io``. This will result in a client-side CORS error.\n\nRunning the Sandboxes\n~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Start all of our containers\n***********************************\n\nSwitch to the ``frontend`` directory in the ``cors`` example, and start the containers:\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/cors/frontend\n  $ docker-compose pull\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n            Name                          Command              State                            Ports\n  ------------------------------------------------------------------------------------------------------------------------------\n  frontend_front-envoy_1        /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n  frontend_frontend-service_1   /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n\nNow, switch to the ``backend`` directory in the ``cors`` example, and start the containers:\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/cors/backend\n  $ docker-compose pull\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n            Name                         Command             State                            Ports\n  ----------------------------------------------------------------------------------------------------------------------------\n  backend_backend-service_1   /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n  backend_front-envoy_1       /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp\n\nStep 4: Test Envoy's CORS capabilities\n**************************************\n\nYou can now open a browser to view your frontend service at http://localhost:8000.\n\nResults of the cross-origin request will be shown on the page under *Request Results*.\n\nYour browser's ``CORS`` enforcement logs can be found in the browser console.\n\nFor example:\n\n.. code-block:: console\n\n  Access to XMLHttpRequest at 'http://192.168.99.100:8002/cors/disabled' from origin 'http://192.168.99.101:8000'\n  has been blocked by CORS policy: No 'Access-Control-Allow-Origin' header is present on the requested resource.\n\nStep 5: Check stats of backend via admin\n****************************************\n\nWhen Envoy runs, it can listen to ``admin`` requests if a port is configured.\n\nIn the example configs, the backend admin is bound to port ``8003``.\n\nIf you browse to http://localhost:8003/stats you will be able to view\nall of the Envoy stats for the backend. You should see the ``CORS`` stats for\ninvalid and valid origins increment as you make requests from the frontend cluster.\n\n.. code-block:: none\n\n  http.ingress_http.cors.origin_invalid: 2\n  http.ingress_http.cors.origin_valid: 7\n"
  },
  {
    "path": "docs/root/start/sandboxes/csrf.rst",
    "content": ".. _install_sandboxes_csrf:\n\nCSRF Filter\n===========\n\nCross-Site Request Forgery (CSRF) is an attack that occurs when a malicious\nthird-party website exploits a vulnerability that allows them to submit an\nundesired request on a user's behalf. To mitigate this attack this filter\nchecks where a request is coming from to determine if the request's origin\nis the same as it's destination.\n\nTo help demonstrate how front-envoy can enforce CSRF policies, we are releasing\na `docker compose <https://docs.docker.com/compose/>`_ sandbox that\ndeploys a service with both a frontend and backed. This service will be started\non two different virtual machines with different origins.\n\nThe frontend has a field to input the remote domain of where you would like to\nsend POST requests along with radio buttons to select the remote domain's CSRF\nenforcement. The CSRF enforcement choices are:\n\n  * Disabled: CSRF is disabled on the requested route. This will result in a\n    successful request since there is no CSRF enforcement.\n  * Shadow Mode: CSRF is not enforced on the requested route but will record\n    if the request contains a valid source origin.\n  * Enabled: CSRF is enabled and will return a 403 (Forbidden) status code when\n    a request is made from a different origin.\n  * Ignored: CSRF is enabled but the request type is a GET. This should bypass\n    the CSRF filter and return successfully.\n\nRunning the Sandboxes\n~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Start all of our containers\n***********************************\n\nSwitch to the ``samesite`` directory in the ``csrf`` example, and start the containers:\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/csrf/samesite\n  $ docker-compose pull\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n            Name                        Command              State                            Ports\n  ----------------------------------------------------------------------------------------------------------------------\n  samesite_front-envoy_1      /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n  samesite_service_1          /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n\nNow, switch to the ``crosssite`` directory in the ``csrf`` example, and start the containers:\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/csrf/crosssite\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n            Name                       Command                State                            Ports\n  ----------------------------------------------------------------------------------------------------------------------\n  crosssite_front-envoy_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp\n  crosssite_service_1          /docker-entrypoint.sh /bin ... Up      10000/tcp, 8000/tcp\n\nStep 4: Test Envoy's CSRF capabilities\n**************************************\n\nYou can now open a browser at http://localhost:8002 to view your ``crosssite`` frontend service.\n\nEnter the IP of the ``samesite`` machine to demonstrate cross-site requests. Requests\nwith the enabled enforcement will fail. By default this field will be populated\nwith ``localhost``.\n\nTo demonstrate same-site requests open the frontend service for ``samesite`` at http://localhost:8000\nand enter the IP address of the ``samesite`` machine as the destination.\n\nResults of the cross-site request will be shown on the page under *Request Results*.\nYour browser's ``CSRF`` enforcement logs can be found in the browser console and in the\nnetwork tab.\n\nFor example:\n\n.. code-block:: console\n\n  Failed to load resource: the server responded with a status of 403 (Forbidden)\n\nIf you change the destination to be the same as one displaying the website and\nset the ``CSRF`` enforcement to enabled the request will go through successfully.\n\nStep 5: Check stats of backend via admin\n****************************************\n\nWhen Envoy runs, it can listen to ``admin`` requests if a port is configured. In\nthe example configs, the backend admin is bound to port ``8001``.\n\nIf you browse to http://localhost:8001/stats you will be able to view\nall of the Envoy stats for the backend. You should see the CORS stats for\ninvalid and valid origins increment as you make requests from the frontend cluster.\n\n.. code-block:: none\n\n  http.ingress_http.csrf.missing_source_origin: 0\n  http.ingress_http.csrf.request_invalid: 1\n  http.ingress_http.csrf.request_valid: 0\n"
  },
  {
    "path": "docs/root/start/sandboxes/ext_authz.rst",
    "content": ".. _install_sandboxes_ext_authz:\n\nExternal Authorization Filter\n=============================\n\nThe External Authorization sandbox demonstrates Envoy's :ref:`ext_authz filter <config_http_filters_ext_authz>`\ncapability to delegate authorization of incoming requests through Envoy to an external services.\n\nWhile ext_authz can also be employed as a network filter, this sandbox is limited to exhibit\next_authz HTTP Filter, which supports to call HTTP or gRPC service.\n\nThe setup of this sandbox is very similar to front-proxy deployment, however calls to upstream\nservice behind the proxy will be checked by an external HTTP or gRPC service. In this sandbox,\nfor every authorized call, the external authorization service adds additional ``x-current-user``\nheader entry to the original request headers to be forwarded to the upstream service.\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Start all of our containers\n***********************************\n\nTo build this sandbox example and start the example services, run the following commands:\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/ext_authz\n    $ docker-compose pull\n    $ docker-compose up --build -d\n    $ docker-compose ps\n\n                   Name                             Command               State                             Ports\n    ---------------------------------------------------------------------------------------------------------------------------------------\n    ext_authz_ext_authz-grpc-service_1   /app/server -users /etc/us       Up\n    ext_authz_ext_authz-http-service_1   docker-entrypoint.sh node        Up\n    ext_authz_front-envoy_1              /docker-entrypoint.sh /bin       Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n    ext_authz_upstream-service_1         python3 /app/service/server.py   Up\n\n.. note::\n\n    This sandbox has multiple setup controlled by ``FRONT_ENVOY_YAML`` environment variable which\n    points to the effective Envoy configuration to be used. The default value of ``FRONT_ENVOY_YAML``\n    can be defined in the ``.env`` file or provided inline when running the ``docker-compose up``\n    command. For more information, pease take a look at `environment variables in Compose documentation <https://docs.docker.com/compose/environment-variables>`_.\n\nBy default, ``FRONT_ENVOY_YAML`` points to ``config/grpc-service/v3.yaml`` file which bootstraps\nfront-envoy with ext_authz HTTP filter with gRPC service ``V3`` (this is specified by :ref:`transport_api_version field<envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.transport_api_version>`).\nThe possible values of ``FRONT_ENVOY_YAML`` can be found inside the ``envoy/examples/ext_authz/config``\ndirectory.\n\nFor example, to run Envoy with ext_authz HTTP filter with HTTP service will be:\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/ext_authz\n    $ docker-compose pull\n    $ # Tearing down the currently running setup\n    $ docker-compose down\n    $ FRONT_ENVOY_YAML=config/http-service.yaml docker-compose up --build -d\n    $ # Or you can update the .env file with the above FRONT_ENVOY_YAML value, so you don't have to specify it when running the \"up\" command.\n\nStep 4: Access the upstream-service behind the Front Envoy\n**********************************************************\n\nYou can now try to send a request to upstream-service via the front-envoy as follows:\n\n.. code-block:: console\n\n    $ curl -v localhost:8000/service\n    *   Trying 127.0.0.1...\n    * TCP_NODELAY set\n    * Connected to localhost (127.0.0.1) port 8000 (#0)\n    > GET /service HTTP/1.1\n    > Host: localhost:8000\n    > User-Agent: curl/7.58.0\n    > Accept: */*\n    >\n    < HTTP/1.1 403 Forbidden\n    < date: Fri, 19 Jun 2020 15:02:24 GMT\n    < server: envoy\n    < content-length: 0\n\nAs observed, the request failed with ``403 Forbidden`` status code. This happened since the ext_authz\nfilter employed by Envoy rejected the call. To let the request reach the upstream service, you need\nto provide a ``Bearer`` token via the ``Authorization`` header.\n\n.. note::\n\n    A complete list of users is defined in ``envoy/examples/ext_authz/auth/users.json`` file. For\n    example, the ``token1`` used in the below example is corresponding to ``user1``.\n\nAn example of successful requests can be observed as follows:\n\n.. code-block:: console\n\n    $ curl -v -H \"Authorization: Bearer token1\" localhost:8000/service\n    *   Trying 127.0.0.1...\n    * TCP_NODELAY set\n    * Connected to localhost (127.0.0.1) port 8000 (#0)\n    > GET /service HTTP/1.1\n    > Host: localhost:8000\n    > User-Agent: curl/7.58.0\n    > Accept: */*\n    > Authorization: Bearer token1\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 24\n    < server: envoy\n    < date: Fri, 19 Jun 2020 15:04:29 GMT\n    < x-envoy-upstream-service-time: 2\n    <\n    * Connection #0 to host localhost left intact\n    Hello user1 from behind Envoy!\n\nWe can also employ `Open Policy Agent <https://www.openpolicyagent.org/>`_ server\n(with `envoy_ext_authz_grpc <https://github.com/open-policy-agent/opa-istio-plugin>`_ plugin enabled)\nas the authorization server. To run this example:\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/ext_authz\n    $ docker-compose pull\n    $ # Tearing down the currently running setup\n    $ docker-compose down\n    $ FRONT_ENVOY_YAML=config/opa-service/v2.yaml docker-compose up --build -d\n\nAnd sending a request to the upstream service (via the Front Envoy) gives:\n\n.. code-block:: console\n\n    $ curl localhost:8000/service --verbose\n    *   Trying ::1...\n    * TCP_NODELAY set\n    * Connected to localhost (::1) port 8000 (#0)\n    > GET /service HTTP/1.1\n    > Host: localhost:8000\n    > User-Agent: curl/7.64.1\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 28\n    < server: envoy\n    < date: Thu, 02 Jul 2020 06:29:58 GMT\n    < x-envoy-upstream-service-time: 2\n    <\n    * Connection #0 to host localhost left intact\n    Hello OPA from behind Envoy!\n\nFrom the logs, we can observe the policy decision message from the Open Policy Agent server (for\nthe above request against the defined policy in ``config/opa-service/policy.rego``):\n\n.. code-block:: console\n\n    $ docker-compose logs ext_authz-opa-service | grep decision_id -A 30\n    ext_authz-opa-service_1   |   \"decision_id\": \"8143ca68-42d8-43e6-ade6-d1169bf69110\",\n    ext_authz-opa-service_1   |   \"input\": {\n    ext_authz-opa-service_1   |     \"attributes\": {\n    ext_authz-opa-service_1   |       \"destination\": {\n    ext_authz-opa-service_1   |         \"address\": {\n    ext_authz-opa-service_1   |           \"Address\": {\n    ext_authz-opa-service_1   |             \"SocketAddress\": {\n    ext_authz-opa-service_1   |               \"PortSpecifier\": {\n    ext_authz-opa-service_1   |                 \"PortValue\": 8000\n    ext_authz-opa-service_1   |               },\n    ext_authz-opa-service_1   |               \"address\": \"172.28.0.6\"\n    ext_authz-opa-service_1   |             }\n    ext_authz-opa-service_1   |           }\n    ext_authz-opa-service_1   |         }\n    ext_authz-opa-service_1   |       },\n    ext_authz-opa-service_1   |       \"metadata_context\": {},\n    ext_authz-opa-service_1   |       \"request\": {\n    ext_authz-opa-service_1   |         \"http\": {\n    ext_authz-opa-service_1   |           \"headers\": {\n    ext_authz-opa-service_1   |             \":authority\": \"localhost:8000\",\n    ext_authz-opa-service_1   |             \":method\": \"GET\",\n    ext_authz-opa-service_1   |             \":path\": \"/service\",\n    ext_authz-opa-service_1   |             \"accept\": \"*/*\",\n    ext_authz-opa-service_1   |             \"user-agent\": \"curl/7.64.1\",\n    ext_authz-opa-service_1   |             \"x-forwarded-proto\": \"http\",\n    ext_authz-opa-service_1   |             \"x-request-id\": \"b77919c0-f1d4-4b06-b444-5a8b32d5daf4\"\n    ext_authz-opa-service_1   |           },\n    ext_authz-opa-service_1   |           \"host\": \"localhost:8000\",\n    ext_authz-opa-service_1   |           \"id\": \"16617514055874272263\",\n    ext_authz-opa-service_1   |           \"method\": \"GET\",\n    ext_authz-opa-service_1   |           \"path\": \"/service\",\n\nTrying to send a request with method other than ``GET`` gives a rejection:\n\n.. code-block:: console\n\n    $ curl -X POST localhost:8000/service --verbose\n    *   Trying ::1...\n    * TCP_NODELAY set\n    * Connected to localhost (::1) port 8000 (#0)\n    > PUT /service HTTP/1.1\n    > Host: localhost:8000\n    > User-Agent: curl/7.64.1\n    > Accept: */*\n    >\n    < HTTP/1.1 403 Forbidden\n    < date: Thu, 02 Jul 2020 06:46:13 GMT\n    < server: envoy\n    < content-length: 0\n"
  },
  {
    "path": "docs/root/start/sandboxes/fault_injection.rst",
    "content": ".. _install_sandboxes_fault_injection:\n\nFault Injection Filter\n======================\n\nThis simple example demonstrates Envoy's :ref:`fault injection <config_http_filters_fault_injection>` capability using Envoy's :ref:`runtime support <config_runtime>` to control the feature.\n\nRunning the Sandboxes\n~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Start all of our containers\n***********************************\n\nTerminal 1\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/fault-injection\n  $ docker-compose pull\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n            Name                         Command               State                             Ports\n  ------------------------------------------------------------------------------------------------------------------------------\n  fault-injection_backend_1   gunicorn -b 0.0.0.0:80 htt       Up      0.0.0.0:8080->80/tcp\n  fault-injection_envoy_1     /docker-entrypoint.sh /usr       Up      10000/tcp, 0.0.0.0:9211->9211/tcp, 0.0.0.0:9901->9901/tcp\n\nStep 4: Start sending continuous stream of HTTP requests\n********************************************************\n\nTerminal 2\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/fault-injection\n  $ docker-compose exec envoy bash\n  $ bash send_request.sh\n\nThe script above (``send_request.sh``) sends a continuous stream of HTTP requests to Envoy, which in turn forwards the requests to the backend container. Fauilt injection is configured in Envoy but turned off (i.e. affects 0% of requests). Consequently, you should see a continuous sequence of HTTP 200 response codes.\n\nStep 5: Test Envoy's abort fault injection\n******************************************\n\nTurn on *abort* fault injection via the runtime using the commands below.\n\nTerminal 3\n\n.. code-block:: console\n\n  $ docker-compose exec envoy bash\n  $ bash enable_abort_fault_injection.sh\n\nThe script above enables HTTP aborts for 100% of requests. So, you should now see a continuous sequence of HTTP 503\nresponses for all requests.\n\nTo disable the abort injection:\n\nTerminal 3\n\n.. code-block:: console\n\n  $ bash disable_abort_fault_injection.sh\n\nStep 6: Test Envoy's delay fault injection\n******************************************\n\nTurn on *delay* fault injection via the runtime using the commands below.\n\nTerminal 3\n\n.. code-block:: console\n\n  $ docker-compose exec envoy bash\n  $ bash enable_delay_fault_injection.sh\n\nThe script above will add a 3-second delay to 50% of HTTP requests. You should now see a continuous sequence of HTTP 200 responses for all requests, but half of the requests will take 3 seconds to complete.\n\nTo disable the delay injection:\n\nTerminal 3\n\n.. code-block:: console\n\n  $ bash disable_delay_fault_injection.sh\n\nStep 7: Check the current runtime filesystem\n********************************************\n\nTo see the current runtime filesystem overview:\n\nTerminal 3\n\n.. code-block:: console\n\n  $ tree /srv/runtime\n"
  },
  {
    "path": "docs/root/start/sandboxes/front_proxy.rst",
    "content": ".. _install_sandboxes_front_proxy:\n\nFront Proxy\n===========\n\nTo get a flavor of what Envoy has to offer as a front proxy, we are releasing a `docker compose <https://docs.docker.com/compose/>`_\nsandbox that deploys a front Envoy and a couple of services (simple Flask apps) colocated with a\nrunning service Envoy. The three containers will be deployed inside a virtual network called\n``envoymesh``.\n\nBelow you can see a graphic showing the docker compose deployment:\n\n.. image:: /_static/docker_compose_front_proxy.svg\n  :width: 100%\n\nAll incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on\nthe edge of the ``envoymesh`` network. Port ``8080``, ``8443``, and ``8001`` are exposed by docker\ncompose (see :repo:`/examples/front-proxy/docker-compose.yaml`) to handle ``HTTP``, ``HTTPS`` calls\nto the services and requests to ``/admin`` respectively.\n\nMoreover, notice that all traffic routed by the front Envoy to the service containers is actually\nrouted to the service Envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`).\n\nIn turn the service Envoys route the request to the Flask app via the loopback\naddress (routes setup in :repo:`/examples/front-proxy/service-envoy.yaml`). This\nsetup illustrates the advantage of running service Envoys collocated with your services: all\nrequests are handled by the service Envoy, and efficiently routed to your services.\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Start all of our containers\n***********************************\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/front-proxy\n    $ docker-compose build --pull\n    $ docker-compose up -d\n    $ docker-compose ps\n\n              Name                         Command               State                                         Ports\n    ------------------------------------------------------------------------------------------------------------------------------------------------------\n    front-proxy_front-envoy_1   /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8001->8001/tcp, 0.0.0.0:8443->8443/tcp\n    front-proxy_service1_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n    front-proxy_service2_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n\nStep 4: Test Envoy's routing capabilities\n*****************************************\n\nYou can now send a request to both services via the ``front-envoy``.\n\nFor ``service1``:\n\n.. code-block:: console\n\n    $ curl -v localhost:8080/service/1\n    *   Trying ::1...\n    * TCP_NODELAY set\n    * Connected to localhost (::1) port 8080 (#0)\n    > GET /service/1 HTTP/1.1\n    > Host: localhost:8080\n    > User-Agent: curl/7.64.1\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 92\n    < server: envoy\n    < date: Mon, 06 Jul 2020 06:20:00 GMT\n    < x-envoy-upstream-service-time: 2\n    <\n    Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4\n\nFor ``service2``:\n\n.. code-block:: console\n\n    $ curl -v localhost:8080/service/2\n    *   Trying ::1...\n    * TCP_NODELAY set\n    * Connected to localhost (::1) port 8080 (#0)\n    > GET /service/2 HTTP/1.1\n    > Host: localhost:8080\n    > User-Agent: curl/7.64.1\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 92\n    < server: envoy\n    < date: Mon, 06 Jul 2020 06:23:13 GMT\n    < x-envoy-upstream-service-time: 2\n    <\n    Hello from behind Envoy (service 2)! hostname: ea6165ee4fee resolvedhostname: 192.168.160.2\n\nNotice that each request, while sent to the front Envoy, was correctly routed to the respective\napplication.\n\nWe can also use ``HTTPS`` to call services behind the front Envoy. For example, calling ``service1``:\n\n.. code-block:: console\n\n    $ curl https://localhost:8443/service/1 -k -v\n    *   Trying ::1...\n    * TCP_NODELAY set\n    * Connected to localhost (::1) port 8443 (#0)\n    * ALPN, offering h2\n    * ALPN, offering http/1.1\n    * successfully set certificate verify locations:\n    *   CAfile: /etc/ssl/cert.pem\n      CApath: none\n    * TLSv1.2 (OUT), TLS handshake, Client hello (1):\n    * TLSv1.2 (IN), TLS handshake, Server hello (2):\n    * TLSv1.2 (IN), TLS handshake, Certificate (11):\n    * TLSv1.2 (IN), TLS handshake, Server key exchange (12):\n    * TLSv1.2 (IN), TLS handshake, Server finished (14):\n    * TLSv1.2 (OUT), TLS handshake, Client key exchange (16):\n    * TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1):\n    * TLSv1.2 (OUT), TLS handshake, Finished (20):\n    * TLSv1.2 (IN), TLS change cipher, Change cipher spec (1):\n    * TLSv1.2 (IN), TLS handshake, Finished (20):\n    * SSL connection using TLSv1.2 / ECDHE-RSA-CHACHA20-POLY1305\n    * ALPN, server did not agree to a protocol\n    * Server certificate:\n    *  subject: CN=front-envoy\n    *  start date: Jul  5 15:18:44 2020 GMT\n    *  expire date: Jul  5 15:18:44 2021 GMT\n    *  issuer: CN=front-envoy\n    *  SSL certificate verify result: self signed certificate (18), continuing anyway.\n    > GET /service/1 HTTP/1.1\n    > Host: localhost:8443\n    > User-Agent: curl/7.64.1\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 92\n    < server: envoy\n    < date: Mon, 06 Jul 2020 06:17:14 GMT\n    < x-envoy-upstream-service-time: 3\n    <\n    Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4\n\nStep 5: Test Envoy's load balancing capabilities\n************************************************\n\nNow let's scale up our ``service1`` nodes to demonstrate the load balancing abilities of Envoy:\n\n.. code-block:: console\n\n    $ docker-compose scale service1=3\n    Creating and starting example_service1_2 ... done\n    Creating and starting example_service1_3 ... done\n\nNow if we send a request to ``service1`` multiple times, the front Envoy will load balance the\nrequests by doing a round robin of the three ``service1`` machines:\n\n.. code-block:: console\n\n    $ curl -v localhost:8080/service/1\n    *   Trying ::1...\n    * TCP_NODELAY set\n    * Connected to localhost (::1) port 8080 (#0)\n    > GET /service/1 HTTP/1.1\n    > Host: localhost:8080\n    > User-Agent: curl/7.64.1\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 92\n    < server: envoy\n    < date: Mon, 06 Jul 2020 06:21:47 GMT\n    < x-envoy-upstream-service-time: 6\n    <\n    Hello from behind Envoy (service 1)! hostname: 3dc787578c23 resolvedhostname: 192.168.160.6\n\n    $ curl -v localhost:8080/service/1\n    *   Trying 192.168.99.100...\n    * Connected to 192.168.99.100 (192.168.99.100) port 8080 (#0)\n    > GET /service/1 HTTP/1.1\n    > Host: 192.168.99.100:8080\n    > User-Agent: curl/7.54.0\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 89\n    < x-envoy-upstream-service-time: 1\n    < server: envoy\n    < date: Fri, 26 Aug 2018 19:40:22 GMT\n    <\n    Hello from behind Envoy (service 1)! hostname: 3a93ece62129 resolvedhostname: 192.168.160.5\n\n    $ curl -v localhost:8080/service/1\n    *   Trying 192.168.99.100...\n    * Connected to 192.168.99.100 (192.168.99.100) port 8080 (#0)\n    > GET /service/1 HTTP/1.1\n    > Host: 192.168.99.100:8080\n    > User-Agent: curl/7.43.0\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 89\n    < x-envoy-upstream-service-time: 1\n    < server: envoy\n    < date: Fri, 26 Aug 2018 19:40:24 GMT\n    < x-envoy-protocol-version: HTTP/1.1\n    <\n    Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4\n\nStep 6: enter containers and curl services\n******************************************\n\nIn addition of using ``curl`` from your host machine, you can also enter the\ncontainers themselves and ``curl`` from inside them. To enter a container you\ncan use ``docker-compose exec <container_name> /bin/bash``. For example we can\nenter the ``front-envoy`` container, and ``curl`` for services locally:\n\n.. code-block:: console\n\n    $ docker-compose exec front-envoy /bin/bash\n    root@81288499f9d7:/# curl localhost:8080/service/1\n    Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3\n    root@81288499f9d7:/# curl localhost:8080/service/1\n    Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5\n    root@81288499f9d7:/# curl localhost:8080/service/1\n    Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6\n    root@81288499f9d7:/# curl localhost:8080/service/2\n    Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2\n\nStep 7: enter container and curl admin\n**************************************\n\nWhen Envoy runs it also attaches an ``admin`` to your desired port.\n\nIn the example configs the admin is bound to port ``8001``.\n\nWe can ``curl`` it to gain useful information:\n\n- ``/server_info`` provides information about the Envoy version you are running.\n- ``/stats`` provides statistics about the  Envoy server.\n\nIn the example we can we can enter the ``front-envoy`` container to query admin:\n\n.. code-block:: console\n\n    $ docker-compose exec front-envoy /bin/bash\n    root@e654c2c83277:/# curl localhost:8001/server_info\n\n.. code-block:: json\n\n  {\n    \"version\": \"093e2ffe046313242144d0431f1bb5cf18d82544/1.15.0-dev/Clean/RELEASE/BoringSSL\",\n    \"state\": \"LIVE\",\n    \"hot_restart_version\": \"11.104\",\n    \"command_line_options\": {\n      \"base_id\": \"0\",\n      \"use_dynamic_base_id\": false,\n      \"base_id_path\": \"\",\n      \"concurrency\": 8,\n      \"config_path\": \"/etc/front-envoy.yaml\",\n      \"config_yaml\": \"\",\n      \"allow_unknown_static_fields\": false,\n      \"reject_unknown_dynamic_fields\": false,\n      \"ignore_unknown_dynamic_fields\": false,\n      \"admin_address_path\": \"\",\n      \"local_address_ip_version\": \"v4\",\n      \"log_level\": \"info\",\n      \"component_log_level\": \"\",\n      \"log_format\": \"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v\",\n      \"log_format_escaped\": false,\n      \"log_path\": \"\",\n      \"service_cluster\": \"front-proxy\",\n      \"service_node\": \"\",\n      \"service_zone\": \"\",\n      \"drain_strategy\": \"Gradual\",\n      \"mode\": \"Serve\",\n      \"disable_hot_restart\": false,\n      \"enable_mutex_tracing\": false,\n      \"restart_epoch\": 0,\n      \"cpuset_threads\": false,\n      \"disabled_extensions\": [],\n      \"bootstrap_version\": 0,\n      \"hidden_envoy_deprecated_max_stats\": \"0\",\n      \"hidden_envoy_deprecated_max_obj_name_len\": \"0\",\n      \"file_flush_interval\": \"10s\",\n      \"drain_time\": \"600s\",\n      \"parent_shutdown_time\": \"900s\"\n    },\n    \"uptime_current_epoch\": \"188s\",\n    \"uptime_all_epochs\": \"188s\"\n  }\n\n.. code-block:: console\n\n    root@e654c2c83277:/# curl localhost:8001/stats\n    cluster.service1.external.upstream_rq_200: 7\n    ...\n    cluster.service1.membership_change: 2\n    cluster.service1.membership_total: 3\n    ...\n    cluster.service1.upstream_cx_http2_total: 3\n    ...\n    cluster.service1.upstream_rq_total: 7\n    ...\n    cluster.service2.external.upstream_rq_200: 2\n    ...\n    cluster.service2.membership_change: 1\n    cluster.service2.membership_total: 1\n    ...\n    cluster.service2.upstream_cx_http2_total: 1\n    ...\n    cluster.service2.upstream_rq_total: 2\n    ...\n\nNotice that we can get the number of members of upstream clusters, number of requests fulfilled by\nthem, information about http ingress, and a plethora of other useful stats.\n"
  },
  {
    "path": "docs/root/start/sandboxes/grpc_bridge.rst",
    "content": ".. _install_sandboxes_grpc_bridge:\n\ngRPC Bridge\n===========\n\nEnvoy gRPC\n~~~~~~~~~~\n\nThe gRPC bridge sandbox is an example usage of Envoy's\n:ref:`gRPC bridge filter <config_http_filters_grpc_bridge>`.\n\nThis is an example of a key-value store where an ``http``-based client CLI, written in ``Python``,\nupdates a remote store, written in ``Go``, using the stubs generated for both languages.\n\nThe client send messages through a proxy that upgrades the HTTP requests from ``http/1.1`` to ``http/2``.\n\n``[client](http/1.1) -> [client-egress-proxy](http/2) -> [server-ingress-proxy](http/2) -> [server]``\n\nAnother Envoy feature demonstrated in this example is Envoy's ability to do authority\nbase routing via its route configuration.\n\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Generate the protocol stubs\n***********************************\n\nA docker-compose file is provided that generates the stubs for both ``client`` and ``server`` from the\nspecification in the ``protos`` directory.\n\nInspecting the ``docker-compose-protos.yaml`` file, you will see that it contains both the ``python``\nand ``go`` gRPC protoc commands necessary for generating the protocol stubs.\n\nGenerate the stubs as follows:\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/grpc-bridge\n  $ docker-compose -f docker-compose-protos.yaml up\n  Starting grpc-bridge_stubs_python_1 ... done\n  Starting grpc-bridge_stubs_go_1     ... done\n  Attaching to grpc-bridge_stubs_go_1, grpc-bridge_stubs_python_1\n  grpc-bridge_stubs_go_1 exited with code 0\n  grpc-bridge_stubs_python_1 exited with code 0\n\nYou may wish to clean up left over containers with the following command:\n\n.. code-block:: console\n\n  $ docker container prune\n\nYou can view the generated ``kv`` modules for both the client and server in their\nrespective directories:\n\n.. code-block:: console\n\n  $ ls -la client/kv/kv_pb2.py\n  -rw-r--r--  1 mdesales  CORP\\Domain Users  9527 Nov  6 21:59 client/kv/kv_pb2.py\n\n  $ ls -la server/kv/kv.pb.go\n  -rw-r--r--  1 mdesales  CORP\\Domain Users  9994 Nov  6 21:59 server/kv/kv.pb.go\n\nThese generated ``python`` and ``go`` stubs can be included as external modules.\n\nStep 4: Start all of our containers\n***********************************\n\nTo build this sandbox example and start the example services, run the following commands:\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/grpc-bridge\n    $ docker-compose pull\n    $ docker-compose up --build -d\n    $ docker-compose ps\n\n                   Name                             Command               State                         Ports\n    ---------------------------------------------------------------------------------------------------------------------------------------\n    grpc-bridge_grpc-client-proxy_1        /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:9911->9911/tcp, 0.0.0.0:9991->9991/tcp\n    grpc-bridge_grpc-client_1              /bin/sh -c tail -f /dev/null   Up\n    grpc-bridge_grpc-server-proxy_1        /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8811->8811/tcp, 0.0.0.0:8881->8881/tcp\n    grpc-bridge_grpc-server_1              /bin/sh -c /bin/server         Up      0.0.0.0:8081->8081/tcp\n\n\nSending requests to the Key/Value store\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTo use the Python service and send gRPC requests:\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/grpc-bridge\n\nSet a key:\n\n.. code-block:: console\n\n  $ docker-compose exec python /client/client.py set foo bar\n  setf foo to bar\n\n\nGet a key:\n\n.. code-block:: console\n\n  $ docker-compose exec python /client/client.py get foo\n  bar\n\nModify an existing key:\n\n.. code-block:: console\n\n  $ docker-compose exec python /client/client.py set foo baz\n  setf foo to baz\n\nGet the modified key:\n\n.. code-block:: console\n\n  $ docker-compose exec python /client/client.py get foo\n  baz\n\nIn the running docker-compose container, you should see the gRPC service printing a record of its activity:\n\n.. code-block:: console\n\n  $ docker-compose logs grpc-server\n  grpc_1    | 2017/05/30 12:05:09 set: foo = bar\n  grpc_1    | 2017/05/30 12:05:12 get: foo\n  grpc_1    | 2017/05/30 12:05:18 set: foo = baz\n"
  },
  {
    "path": "docs/root/start/sandboxes/jaeger_native_tracing.rst",
    "content": ".. _install_sandboxes_jaeger_native_tracing:\n\nJaeger Native Tracing\n=====================\n\nThe Jaeger tracing sandbox demonstrates Envoy's :ref:`request tracing <arch_overview_tracing>`\ncapabilities using `Jaeger <https://jaegertracing.io/>`_ as the tracing provider and Jaeger's native\n`C++ client <https://github.com/jaegertracing/jaeger-client-cpp>`_ as a plugin. Using Jaeger with its\nnative client instead of with Envoy's builtin Zipkin client has the following advantages:\n\n- Trace propagation will work with the other services using Jaeger without needing to make\n  configuration `changes <https://github.com/jaegertracing/jaeger-client-go#zipkin-http-b3-compatible-header-propagation>`_.\n- A variety of different `sampling strategies <https://www.jaegertracing.io/docs/sampling/#client-sampling-configuration>`_\n  can be used, including probabilistic or remote where sampling can be centrally controlled from Jaeger's backend.\n- Spans are sent to the collector in a more efficient binary encoding.\n\n\nThis sandbox is very similar to the front proxy architecture described above, with one difference:\nservice1 makes an API call to service2 before returning a response.\nThe three containers will be deployed inside a virtual network called ``envoymesh``.\n\n.. note::\n\n   The jaeger native tracing sandbox only works on x86-64.\n\nAll incoming requests are routed via the front Envoy, which is acting as a reverse proxy\nsitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed\nby docker compose (see :repo:`/examples/jaeger-native-tracing/docker-compose.yaml`). Notice that\nall Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in\n:repo:`/examples/jaeger-native-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated\nby the Jaeger tracer to a Jaeger cluster (trace driver setup\nin :repo:`/examples/jaeger-native-tracing/front-envoy-jaeger.yaml`).\n\nBefore routing a request to the appropriate service Envoy or the application, Envoy will take\ncare of generating the appropriate spans for tracing (parent/child context spans).\nAt a high-level, each span records the latency of upstream API calls as well as information\nneeded to correlate the span with other related spans (e.g., the trace ID).\n\nOne of the most important benefits of tracing from Envoy is that it will take care of\npropagating the traces to the Jaeger service cluster. However, in order to fully take advantage\nof tracing, the application has to propagate trace headers that Envoy generates, while making\ncalls to other services. In the sandbox we have provided, the simple flask app\n(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates\nthe trace headers while making an outbound call to service2.\n\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Build the sandbox\n*************************\n\nTo build this sandbox example, and start the example apps run the following commands:\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/jaeger-native-tracing\n    $ docker-compose pull\n    $ docker-compose up --build -d\n    $ docker-compose ps\n\n                Name                              Command                State                                                      Ports\n    -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n    jaeger-native-tracing_front-envoy_1   /start-front.sh                Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n    jaeger-native-tracing_jaeger_1        /go/bin/all-in-one-linux - ... Up      14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp\n    jaeger-native-tracing_service1_1      /start-service.sh              Up      10000/tcp, 8000/tcp\n    jaeger-native-tracing_service2_1      /start-service.sh              Up      10000/tcp, 8000/tcp\n\nStep 4: Generate some load\n**************************\n\nYou can now send a request to service1 via the front-envoy as follows:\n\n.. code-block:: console\n\n    $ curl -v localhost:8000/trace/1\n    *   Trying 192.168.99.100...\n    * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0)\n    > GET /trace/1 HTTP/1.1\n    > Host: 192.168.99.100:8000\n    > User-Agent: curl/7.54.0\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 89\n    < x-envoy-upstream-service-time: 9\n    < server: envoy\n    < date: Fri, 26 Aug 2018 19:39:19 GMT\n    <\n    Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6\n    * Connection #0 to host 192.168.99.100 left intact\n\nStep 5: View the traces in Jaeger UI\n************************************\n\nPoint your browser to http://localhost:16686 . You should see the Jaeger dashboard.\nSet the service to \"front-proxy\" and hit 'Find Traces'. You should see traces from the front-proxy.\nClick on a trace to explore the path taken by the request from front-proxy to service1\nto service2, as well as the latency incurred at each hop.\n"
  },
  {
    "path": "docs/root/start/sandboxes/jaeger_tracing.rst",
    "content": ".. _install_sandboxes_jaeger_tracing:\n\nJaeger Tracing\n==============\n\nThe Jaeger tracing sandbox demonstrates Envoy's :ref:`request tracing <arch_overview_tracing>`\ncapabilities using `Jaeger <https://jaegertracing.io/>`_ as the tracing provider. This sandbox\nis very similar to the front proxy architecture described above, with one difference:\nservice1 makes an API call to service2 before returning a response.\nThe three containers will be deployed inside a virtual network called ``envoymesh``.\n\nAll incoming requests are routed via the front Envoy, which is acting as a reverse proxy\nsitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed\nby docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yaml`). Notice that\nall Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in\n:repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated\nby the Jaeger tracer to a Jaeger cluster (trace driver setup\nin :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`).\n\nBefore routing a request to the appropriate service Envoy or the application, Envoy will take\ncare of generating the appropriate spans for tracing (parent/child context spans).\nAt a high-level, each span records the latency of upstream API calls as well as information\nneeded to correlate the span with other related spans (e.g., the trace ID).\n\nOne of the most important benefits of tracing from Envoy is that it will take care of\npropagating the traces to the Jaeger service cluster. However, in order to fully take advantage\nof tracing, the application has to propagate trace headers that Envoy generates, while making\ncalls to other services. In the sandbox we have provided, the simple flask app\n(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates\nthe trace headers while making an outbound call to service2.\n\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Build the sandbox\n*************************\n\nTo build this sandbox example, and start the example apps run the following commands:\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/jaeger-tracing\n    $ docker-compose pull\n    $ docker-compose up --build -d\n    $ docker-compose ps\n\n                Name                          Command             State                                                       Ports\n    ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n    jaeger-tracing_front-envoy_1   /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n    jaeger-tracing_jaeger_1        /go/bin/all-in-one-linux - ... Up      14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp\n    jaeger-tracing_service1_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n    jaeger-tracing_service2_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n\nStep 4: Generate some load\n**************************\n\nYou can now send a request to service1 via the front-envoy as follows:\n\n.. code-block:: console\n\n    $ curl -v localhost:8000/trace/1\n    *   Trying 192.168.99.100...\n    * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0)\n    > GET /trace/1 HTTP/1.1\n    > Host: 192.168.99.100:8000\n    > User-Agent: curl/7.54.0\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 89\n    < x-envoy-upstream-service-time: 9\n    < server: envoy\n    < date: Fri, 26 Aug 2018 19:39:19 GMT\n    <\n    Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6\n    * Connection #0 to host 192.168.99.100 left intact\n\nStep 5: View the traces in Jaeger UI\n************************************\n\nPoint your browser to http://localhost:16686 . You should see the Jaeger dashboard.\nSet the service to \"front-proxy\" and hit 'Find Traces'. You should see traces from the front-proxy.\nClick on a trace to explore the path taken by the request from front-proxy to service1\nto service2, as well as the latency incurred at each hop.\n"
  },
  {
    "path": "docs/root/start/sandboxes/load_reporting_service.rst",
    "content": ".. _install_sandboxes_load_reporting_service:\n\nLoad Reporting Service (LRS)\n============================\n\nThis simple example demonstrates Envoy's Load Reporting Service (LRS) capability and how to use it.\n\nLets say Cluster A (downstream) talks to Cluster B (Upstream) and Cluster C (Upstream). When enabling Load Report for\nCluster A, LRS server should be sending LoadStatsResponse to Cluster A with LoadStatsResponse.Clusters to be B and C.\nLRS server will then receive LoadStatsRequests (with total requests, successful requests etc) from Cluster A to Cluster B and\nfrom Cluster A to Cluster C.\n\nIn this example, all incoming requests are routed via Envoy to a simple goLang web server aka http_server.\nWe scale up two containers and randomly send requests to each. Envoy is configured to initiate the connection with LRS Server.\nLRS Server enables the stats by sending LoadStatsResponse. Sending requests to http_server will be counted towards successful requests and will be visible in LRS Server logs.\n\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Build the sandbox\n*************************\n\nTerminal 1 ::\n\n    $ pwd\n    envoy/examples/load_reporting_service\n    $ docker-compose pull\n    $ docker-compose up --scale http_service=2\n\n\nTerminal 2 ::\n\n    $ pwd\n    envoy/examples/load_reporting_service\n    $ docker-compose ps\n\n                                Name                               Command               State                           Ports\n    --------------------------------------------------------------------------------------------------------------------------------------\n    load-reporting-service_http_service_1   /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 0.0.0.0:80->80/tcp, 0.0.0.0:8081->8081/tcp\n    load-reporting-service_http_service_2   /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 0.0.0.0:81->80/tcp, 0.0.0.0:8082->8081/tcp\n    load-reporting-service_lrs_server_1     go run main.go                   Up      0.0.0.0:18000->18000/tcp\n\nStep 4: Start sending stream of HTTP requests\n*********************************************\n\nTerminal 2 ::\n\n  $ pwd\n  envoy/examples/load_reporting_service\n  $ bash send_requests.sh\n\nThe script above (``send_requests.sh``) sends requests randomly to each Envoy, which in turn forwards the requests to the backend service.\n\nStep 5: See Envoy Stats\n***********************\n\nYou should see\n\nTerminal 1 ::\n\n    ............................\n    lrs_server_1    | 2020/02/12 17:08:20 LRS Server is up and running on :18000\n    lrs_server_1    | 2020/02/12 17:08:23 Adding new cluster to cache `http_service` with node `0022a319e1e2`\n    lrs_server_1    | 2020/02/12 17:08:24 Adding new node `2417806c9d9a` to existing cluster `http_service`\n    lrs_server_1    | 2020/02/12 17:08:25 Creating LRS response for cluster http_service, node 2417806c9d9a with frequency 2 secs\n    lrs_server_1    | 2020/02/12 17:08:25 Creating LRS response for cluster http_service, node 0022a319e1e2 with frequency 2 secs\n    http_service_2  | 127.0.0.1 - - [12/Feb/2020 17:09:06] \"GET /service HTTP/1.1\" 200 -\n    http_service_1  | 127.0.0.1 - - [12/Feb/2020 17:09:06] \"GET /service HTTP/1.1\" 200 -\n    ............................\n    lrs_server_1    | 2020/02/12 17:09:07 Got stats from cluster `http_service` node `0022a319e1e2` - cluster_name:\"local_service\" upstream_locality_stats:<locality:<> total_successful_requests:21 total_issued_requests:21 > load_report_interval:<seconds:1 nanos:998411000 >\n    lrs_server_1    | 2020/02/12 17:09:07 Got stats from cluster `http_service` node `2417806c9d9a` - cluster_name:\"local_service\" upstream_locality_stats:<locality:<> total_successful_requests:17 total_issued_requests:17 > load_report_interval:<seconds:1 nanos:994529000 >\n    http_service_2  | 127.0.0.1 - - [12/Feb/2020 17:09:07] \"GET /service HTTP/1.1\" 200 -\n    http_service_1  | 127.0.0.1 - - [12/Feb/2020 17:09:07] \"GET /service HTTP/1.1\" 200 -\n    ............................\n    lrs_server_1    | 2020/02/12 17:09:09 Got stats from cluster `http_service` node `0022a319e1e2` - cluster_name:\"local_service\" upstream_locality_stats:<locality:<> total_successful_requests:3 total_issued_requests:3 > load_report_interval:<seconds:2 nanos:2458000 >\n    lrs_server_1    | 2020/02/12 17:09:09 Got stats from cluster `http_service` node `2417806c9d9a` - cluster_name:\"local_service\" upstream_locality_stats:<locality:<> total_successful_requests:9 total_issued_requests:9 > load_report_interval:<seconds:2 nanos:6487000 >\n"
  },
  {
    "path": "docs/root/start/sandboxes/lua.rst",
    "content": ".. _install_sandboxes_lua:\n\nLua Filter\n==========\n\nIn this example, we show how a Lua filter can be used with the Envoy\nproxy. The Envoy proxy configuration includes a Lua\nfilter that contains two functions namely\n``envoy_on_request(request_handle)`` and\n``envoy_on_response(response_handle)`` as documented :ref:`here <config_http_filters_lua>`.\n\nRunning the Sandboxes\n~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Build the sandbox\n*************************\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/lua\n  $ docker-compose pull\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n      Name                     Command               State                            Ports\n  --------------------------------------------------------------------------------------------------------------------\n  lua_proxy_1         /docker-entrypoint.sh /bin       Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n  lua_web_service_1   node ./index.js                  Up      0.0.0.0:8080->80/tcp\n\nStep 4: Send a request to the service\n*************************************\n\nThe output from the ``curl`` command below should include the headers ``foo``.\n\nTerminal 1\n\n.. code-block:: console\n\n  $ curl -v localhost:8000\n\n     Trying ::1...\n  * TCP_NODELAY set\n  * Connected to localhost (::1) port 8000 (#0)\n  > GET / HTTP/1.1\n  > Host: localhost:8000\n  > User-Agent: curl/7.64.1\n  > Accept: */*\n  >\n  < HTTP/1.1 200 OK\n  < x-powered-by: Express\n  < content-type: application/json; charset=utf-8\n  < content-length: 544\n  < etag: W/\"220-IhsqVTh4HjcpuJQ3C+rEL1Cw1jA\"\n  < date: Thu, 31 Oct 2019 03:13:24 GMT\n  < x-envoy-upstream-service-time: 1\n  < response-body-size: 544                      <-- This is added to the response header by our Lua script. --<\n  < server: envoy\n  <\n  {\n    \"path\": \"/\",\n    \"headers\": {\n      \"host\": \"localhost:8000\",\n      \"user-agent\": \"curl/7.64.1\",\n      \"accept\": \"*/*\",\n      \"x-forwarded-proto\": \"http\",\n      \"x-request-id\": \"a78fcce7-2d67-4eeb-890a-73eebb942a17\",\n      \"foo\": \"bar\",                              <-- This is added to the request header by our Lua script. --<\n      \"x-envoy-expected-rq-timeout-ms\": \"15000\",\n      \"content-length\": \"0\"\n    },\n    \"method\": \"GET\",\n    \"body\": \"\",\n    \"fresh\": false,\n    \"hostname\": \"localhost\",\n    \"ip\": \"::ffff:172.20.0.2\",\n    \"ips\": [],\n    \"protocol\": \"http\",\n    \"query\": {},\n    \"subdomains\": [],\n    \"xhr\": false,\n    \"os\": {\n      \"hostname\": \"7ca39ead805a\"\n    }\n  * Connection #0 to host localhost left intact\n  }* Closing connection 0\n"
  },
  {
    "path": "docs/root/start/sandboxes/mysql.rst",
    "content": ".. _install_sandboxes_mysql:\n\nMySQL Filter\n============\n\nIn this example, we show how the :ref:`MySQL filter <config_network_filters_mysql_proxy>` can be used with the Envoy proxy. The Envoy proxy configuration includes a MySQL filter that parses queries and collects MySQL-specific\nmetrics.\n\n\nRunning the Sandboxes\n~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Build the sandbox\n*************************\n\nTerminal 1\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/mysql\n  $ docker-compose pull\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n      Name                   Command               State                             Ports\n  ------------------------------------------------------------------------------------------------------------------\n  mysql_mysql_1   docker-entrypoint.sh mysqld      Up      0.0.0.0:3306->3306/tcp\n  mysql_proxy_1   /docker-entrypoint.sh /bin       Up      10000/tcp, 0.0.0.0:1999->1999/tcp, 0.0.0.0:8001->8001/tcp\n\n\nStep 4: Issue commands using mysql\n**********************************\n\nUse ``mysql`` to issue some commands and verify they are routed via Envoy. Note\nthat the current implementation of the protocol filter was tested with MySQL\nv5.5. It may, however, not work with other versions of MySQL due to differences\nin the protocol implementation.\n\nTerminal 1\n\n.. code-block:: console\n\n  $ docker run --rm -it --network envoymesh mysql:5.5 mysql -h envoy -P 1999 -u root\n  ... snip ...\n\n  mysql> CREATE DATABASE test;\n  Query OK, 1 row affected (0.00 sec)\n\n  mysql> USE test;\n  Database changed\n  mysql> CREATE TABLE test ( text VARCHAR(255) );\n  Query OK, 0 rows affected (0.01 sec)\n\n  mysql> SELECT COUNT(*) FROM test;\n  +----------+\n  | COUNT(*) |\n  +----------+\n  |        0 |\n  +----------+\n  1 row in set (0.01 sec)\n\n  mysql> INSERT INTO test VALUES ('hello, world!');\n  Query OK, 1 row affected (0.00 sec)\n\n  mysql> SELECT COUNT(*) FROM test;\n  +----------+\n  | COUNT(*) |\n  +----------+\n  |        1 |\n  +----------+\n  1 row in set (0.00 sec)\n\n  mysql> exit\n  Bye\n\nStep 5: Check egress stats\n**************************\n\nCheck egress stats were updated.\n\nTerminal 1\n\n.. code-block:: console\n\n  $ curl -s http://localhost:8001/stats?filter=egress_mysql\n  mysql.egress_mysql.auth_switch_request: 0\n  mysql.egress_mysql.decoder_errors: 0\n  mysql.egress_mysql.login_attempts: 1\n  mysql.egress_mysql.login_failures: 0\n  mysql.egress_mysql.protocol_errors: 0\n  mysql.egress_mysql.queries_parse_error: 0\n  mysql.egress_mysql.queries_parsed: 7\n  mysql.egress_mysql.sessions: 1\n  mysql.egress_mysql.upgraded_to_ssl: 0\n\nStep 6: Check TCP stats\n***********************\n\nCheck TCP stats were updated.\n\nTerminal 1\n\n.. code-block:: console\n\n  $ curl -s http://localhost:8001/stats?filter=mysql_tcp\n  tcp.mysql_tcp.downstream_cx_no_route: 0\n  tcp.mysql_tcp.downstream_cx_rx_bytes_buffered: 0\n  tcp.mysql_tcp.downstream_cx_rx_bytes_total: 347\n  tcp.mysql_tcp.downstream_cx_total: 1\n  tcp.mysql_tcp.downstream_cx_tx_bytes_buffered: 0\n  tcp.mysql_tcp.downstream_cx_tx_bytes_total: 702\n  tcp.mysql_tcp.downstream_flow_control_paused_reading_total: 0\n  tcp.mysql_tcp.downstream_flow_control_resumed_reading_total: 0\n  tcp.mysql_tcp.idle_timeout: 0\n  tcp.mysql_tcp.upstream_flush_active: 0\n  tcp.mysql_tcp.upstream_flush_total: 0\n"
  },
  {
    "path": "docs/root/start/sandboxes/redis.rst",
    "content": ".. _install_sandboxes_redis_filter:\n\nRedis Filter\n============\n\nIn this example, we show how a :ref:`Redis filter <config_network_filters_redis_proxy>` can be used with the Envoy proxy. The Envoy proxy configuration includes a Redis filter that routes egress requests to redis server.\n\n\nRunning the Sandboxes\n~~~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Build the sandbox\n*************************\n\nTerminal 1\n\n.. code-block:: console\n\n  $ pwd\n  envoy/examples/redis\n  $ docker-compose pull\n  $ docker-compose up --build -d\n  $ docker-compose ps\n\n      Name                   Command               State                             Ports\n  ------------------------------------------------------------------------------------------------------------------\n  redis_proxy_1   /docker-entrypoint.sh /bin       Up      10000/tcp, 0.0.0.0:1999->1999/tcp, 0.0.0.0:8001->8001/tcp\n  redis_redis_1   docker-entrypoint.sh redis       Up      0.0.0.0:6379->6379/tcp\n\nStep 4: Issue Redis commands\n****************************\n\nIssue Redis commands using your favourite Redis client, such as ``redis-cli``, and verify they are routed via Envoy.\n\nTerminal 1\n\n.. code-block:: console\n\n  $ redis-cli -h localhost -p 1999 set foo foo\n  OK\n  $ redis-cli -h localhost -p 1999 set bar bar\n  OK\n  $ redis-cli -h localhost -p 1999 get foo\n  \"foo\"\n  $ redis-cli -h localhost -p 1999 get bar\n  \"bar\"\n\nStep 5: Verify egress stats\n***************************\n\nGo to ``http://localhost:8001/stats?usedonly&filter=redis.egress_redis.command`` and verify the following stats:\n\n.. code-block:: none\n\n  redis.egress_redis.command.get.total: 2\n  redis.egress_redis.command.set.total: 2\n"
  },
  {
    "path": "docs/root/start/sandboxes/zipkin_tracing.rst",
    "content": ".. _install_sandboxes_zipkin_tracing:\n\nZipkin Tracing\n==============\n\nThe Zipkin tracing sandbox demonstrates Envoy's :ref:`request tracing <arch_overview_tracing>`\ncapabilities using `Zipkin <https://zipkin.io/>`_ as the tracing provider. This sandbox\nis very similar to the front proxy architecture described above, with one difference:\nservice1 makes an API call to service2 before returning a response.\nThe three containers will be deployed inside a virtual network called ``envoymesh``.\n\nAll incoming requests are routed via the front Envoy, which is acting as a reverse proxy\nsitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed\nby docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yaml`). Notice that\nall Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in\n:repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`) and setup to propagate the spans generated\nby the Zipkin tracer to a Zipkin cluster (trace driver setup\nin :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`).\n\nBefore routing a request to the appropriate service Envoy or the application, Envoy will take\ncare of generating the appropriate spans for tracing (parent/child/shared context spans).\nAt a high-level, each span records the latency of upstream API calls as well as information\nneeded to correlate the span with other related spans (e.g., the trace ID).\n\nOne of the most important benefits of tracing from Envoy is that it will take care of\npropagating the traces to the Zipkin service cluster. However, in order to fully take advantage\nof tracing, the application has to propagate trace headers that Envoy generates, while making\ncalls to other services. In the sandbox we have provided, the simple flask app\n(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates\nthe trace headers while making an outbound call to service2.\n\n\nRunning the Sandbox\n~~~~~~~~~~~~~~~~~~~\n\n.. include:: _include/docker-env-setup.rst\n\nStep 3: Build the sandbox\n*************************\n\nTo build this sandbox example, and start the example apps run the following commands:\n\n.. code-block:: console\n\n    $ pwd\n    envoy/examples/zipkin-tracing\n    $ docker-compose pull\n    $ docker-compose up --build -d\n    $ docker-compose ps\n\n                Name                          Command             State                            Ports\n    -----------------------------------------------------------------------------------------------------------------------------\n    zipkin-tracing_front-envoy_1   /docker-entrypoint.sh /bin ... Up      10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp\n    zipkin-tracing_service1_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n    zipkin-tracing_service2_1      /bin/sh -c /usr/local/bin/ ... Up      10000/tcp, 8000/tcp\n    zipkin-tracing_zipkin_1        /busybox/sh run.sh             Up      9410/tcp, 0.0.0.0:9411->9411/tcp\n\nStep 4: Generate some load\n**************************\n\nYou can now send a request to service1 via the front-envoy as follows:\n\n.. code-block:: console\n\n    $ curl -v localhost:8000/trace/1\n    *   Trying 192.168.99.100...\n    * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0)\n    > GET /trace/1 HTTP/1.1\n    > Host: 192.168.99.100:8000\n    > User-Agent: curl/7.43.0\n    > Accept: */*\n    >\n    < HTTP/1.1 200 OK\n    < content-type: text/html; charset=utf-8\n    < content-length: 89\n    < x-envoy-upstream-service-time: 1\n    < server: envoy\n    < date: Fri, 26 Aug 2018 19:39:19 GMT\n    <\n    Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6\n    * Connection #0 to host 192.168.99.100 left intact\n\nStep 5: View the traces in Zipkin UI\n************************************\n\nPoint your browser to http://localhost:9411 . You should see the Zipkin dashboard.\nSet the service to \"front-proxy\" and set the start time to a few minutes before\nthe start of the test (step 2) and hit enter. You should see traces from the front-proxy.\nClick on a trace to explore the path taken by the request from front-proxy to service1\nto service2, as well as the latency incurred at each hop.\n"
  },
  {
    "path": "docs/root/start/start.rst",
    "content": ".. _start:\n\nGetting Started\n===============\n\nThis section gets you started with a very simple configuration and provides some example configurations.\n\nThe fastest way to get started using Envoy is :ref:`installing pre-built binaries <install_binaries>`.\nYou can also :ref:`build it <building>` from source.\n\nThese examples use the :ref:`v3 Envoy API <envoy_api_reference>`, but use only the static configuration\nfeature of the API, which is most useful for simple requirements. For more complex requirements\n:ref:`Dynamic Configuration <arch_overview_dynamic_config>` is supported.\n\nQuick Start to Run Simple Example\n---------------------------------\n\nThese instructions run from files in the Envoy repo. The sections below give a\nmore detailed explanation of the configuration file and execution steps for\nthe same configuration.\n\nA very minimal Envoy configuration that can be used to validate basic plain HTTP\nproxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not\nintended to represent a realistic Envoy deployment:\n\n.. substitution-code-block:: none\n\n  $ docker pull envoyproxy/|envoy_docker_image|\n  $ docker run --rm -d -p 10000:10000 envoyproxy/|envoy_docker_image|\n  $ curl -v localhost:10000\n\nThe Docker image used will contain the latest version of Envoy\nand a basic Envoy configuration. This basic configuration tells\nEnvoy to route incoming requests to \\*.google.com.\n\nSimple Configuration\n--------------------\n\nEnvoy can be configured using a single YAML file passed in as an argument on the command line.\n\nThe :ref:`admin message <envoy_v3_api_msg_config.bootstrap.v3.Admin>` is required to configure\nthe administration server. The `address` key specifies the\nlistening :ref:`address <envoy_v3_api_file_envoy/config/core/v3/address.proto>`\nwhich in this case is simply `0.0.0.0:9901`.\n\n.. code-block:: yaml\n\n  admin:\n    access_log_path: /tmp/admin_access.log\n    address:\n      socket_address: { address: 0.0.0.0, port_value: 9901 }\n\nThe :ref:`static_resources <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.static_resources>` contains everything that is configured statically when Envoy starts,\nas opposed to the means of configuring resources dynamically when Envoy is running.\nThe :ref:`v2 API Overview <config_overview>` describes this.\n\n.. code-block:: yaml\n\n    static_resources:\n\nThe specification of the :ref:`listeners <envoy_v3_api_file_envoy/config/listener/v3/listener.proto>`.\n\n.. code-block:: yaml\n\n      listeners:\n      - name: listener_0\n        address:\n          socket_address: { address: 0.0.0.0, port_value: 10000 }\n        filter_chains:\n        - filters:\n          - name: envoy.filters.network.http_connection_manager\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n              stat_prefix: ingress_http\n              codec_type: AUTO\n              route_config:\n                name: local_route\n                virtual_hosts:\n                - name: local_service\n                  domains: [\"*\"]\n                  routes:\n                  - match: { prefix: \"/\" }\n                    route: { host_rewrite_literal: www.google.com, cluster: service_google }\n              http_filters:\n              - name: envoy.filters.http.router\n\nThe specification of the :ref:`clusters <envoy_v3_api_file_envoy/service/cluster/v3/cds.proto>`.\n\n.. code-block:: yaml\n\n      clusters:\n      - name: service_google\n        connect_timeout: 0.25s\n        type: LOGICAL_DNS\n        # Comment out the following line to test on v6 networks\n        dns_lookup_family: V4_ONLY\n        lb_policy: ROUND_ROBIN\n        load_assignment:\n          cluster_name: service_google\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: www.google.com\n                    port_value: 443\n        transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n            sni: www.google.com\n\n\nUsing the Envoy Docker Image\n----------------------------\n\nCreate a simple Dockerfile to execute Envoy, which assumes that envoy.yaml (described above) is in your local directory.\nYou can refer to the :ref:`Command line options <operations_cli>`.\n\n.. substitution-code-block:: none\n\n  FROM envoyproxy/|envoy_docker_image|\n  COPY envoy.yaml /etc/envoy/envoy.yaml\n\nBuild the Docker image that runs your configuration using::\n\n  $ docker build -t envoy:v1 .\n\nAnd now you can execute it with::\n\n  $ docker run -d --name envoy -p 9901:9901 -p 10000:10000 envoy:v1\n\nAnd finally, test it using::\n\n  $ curl -v localhost:10000\n\nIf you would like to use Envoy with docker-compose you can overwrite the provided configuration file\nby using a volume.\n\n.. substitution-code-block: yaml\n\n  version: '3'\n  services:\n    envoy:\n      image: envoyproxy/|envoy_docker_image|\n      ports:\n        - \"10000:10000\"\n      volumes:\n        - ./envoy.yaml:/etc/envoy/envoy.yaml\n\nBy default the Docker image will run as the ``envoy`` user created at build time.\n\nThe ``uid`` and ``gid`` of this user can be set at runtime using the ``ENVOY_UID`` and ``ENVOY_GID``\nenvironment variables. This can be done, for example, on the Docker command line::\n\n  $ docker run -d --name envoy -e ENVOY_UID=777 -e ENVOY_GID=777 -p 9901:9901 -p 10000:10000 envoy:v1\n\nThis can be useful if you wish to restrict or provide access to ``unix`` sockets inside the container, or\nfor controlling access to an ``envoy`` socket from outside of the container.\n\nIf you wish to run the container as the ``root`` user you can set ``ENVOY_UID`` to ``0``.\n\nThe ``envoy`` image sends application logs to ``/dev/stdout`` and ``/dev/stderr`` by default, and these\ncan be viewed in the container log.\n\nIf you send application, admin or access logs to a file output, the ``envoy`` user will require the\nnecessary permissions to write to this file. This can be achieved by setting the ``ENVOY_UID`` and/or\nby making the file writeable by the envoy user.\n\nFor example, to mount a log folder from the host and make it writable, you can:\n\n.. substitution-code-block:: none\n\n  $ mkdir logs\n  $ chown 777 logs\n  $ docker run -d -v `pwd`/logs:/var/log --name envoy -e ENVOY_UID=777 -p 9901:9901 -p 10000:10000 envoy:v1\n\nYou can then configure ``envoy`` to log to files in ``/var/log``\n\nThe default ``envoy`` ``uid`` and ``gid`` are ``101``.\n\nThe ``envoy`` user also needs to have permission to access any required configuration files mounted\ninto the container.\n\nIf you are running in an environment with a strict ``umask`` setting, you may need to provide envoy with\naccess either by setting the ``uid`` or ``gid`` of the file, or by making the configuration file readable\nby the envoy user.\n\nOne method of doing this without changing any file permissions or running as root inside the container\nis to start the container with the host user's ``uid``, for example:\n\n.. substitution-code-block:: none\n\n  $ docker run -d --name envoy -e ENVOY_UID=`id -u` -p 9901:9901 -p 10000:10000 envoy:v1\n\n\nSandboxes\n---------\n\nWe've created a number of sandboxes using Docker Compose that set up different\nenvironments to test out Envoy's features and show sample configurations. As we\ngauge peoples' interests we will add more sandboxes demonstrating different\nfeatures. The following sandboxes are available:\n\n.. toctree::\n    :maxdepth: 2\n\n    sandboxes/cache\n    sandboxes/cors\n    sandboxes/csrf\n    sandboxes/ext_authz\n    sandboxes/fault_injection\n    sandboxes/front_proxy\n    sandboxes/grpc_bridge\n    sandboxes/jaeger_native_tracing\n    sandboxes/jaeger_tracing\n    sandboxes/load_reporting_service\n    sandboxes/lua\n    sandboxes/mysql\n    sandboxes/redis\n    sandboxes/zipkin_tracing\n"
  },
  {
    "path": "docs/root/version_history/current.rst",
    "content": "1.16.0 (October 8, 2020)\n========================\n\nIncompatible Behavior Changes\n-----------------------------\n*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required*\n\n* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in :repo:`BUILD <BUILD>`.\n* build: tcmalloc changes require Clang 9. This requirement change can be avoided by building with `--define tcmalloc=gperftools` to use the older tcmalloc code.\n* config: additional warnings have been added for the use of v2 APIs. These appear as log messages\n  and are also captured in the :ref:`deprecated_feature_use <runtime_stats>` counter after server\n  initialization.\n* dns: `envoy.restart_features.use_apple_api_for_dns_lookups` is on by default. This flag only affects Apple platforms (macOS, iOS). It is incompatible to have the runtime flag set to true at the same time as specifying the ``use_tcp_for_dns_lookups`` option or custom dns resolvers. Doing so will cause failure.\n* watchdog: added two guarddogs, breaking the aggregated stats for the single guarddog system. The aggregated stats for the guarddogs will have the following prefixes: `main_thread` and `workers`. Concretely, anything monitoring `server.watchdog_miss` and `server.watchdog_mega_miss` will need to be updated.\n\nMinor Behavior Changes\n----------------------\n*Changes that may cause incompatibilities for some users, but should not for most*\n\n* adaptive concurrency: added a response body / grpc-message header for rejected requests.\n* async_client: minor change to handling header only responses more similar to header-with-empty-body responses.\n* build: an :ref:`Ubuntu based debug image <install_binaries>` is built and published in DockerHub.\n* build: the debug information will be generated separately to reduce target size and reduce compilation time when build in compilation mode `dbg` and `opt`. Users will need to build dwp file to debug with gdb.\n* compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied.\n* decompressor: headers-only requests were incorrectly not advertising accept-encoding when configured to do so. This is now fixed.\n* ext_authz filter: request timeout will now count from the time the check request is created, instead of when it becomes active. This makes sure that the timeout is enforced even if the ext_authz cluster's circuit breaker is engaged.\n  This behavior can be reverted by setting runtime feature `envoy.reloadable_features.ext_authz_measure_timeout_on_check_created` to false. When enabled, a new `ext_authz.timeout` stat is counted when timeout occurs. See :ref:`stats <config_http_filters_ext_authz_stats>`.\n* grpc reverse bridge: upstream headers will no longer be propagated when the response is missing or contains an unexpected content-type.\n* http: added :ref:`contains <envoy_api_msg_type.matcher.StringMatcher>`, a new string matcher type which matches if the value of the string has the substring mentioned in contains matcher.\n* http: added :ref:`contains <envoy_api_msg_route.HeaderMatcher>`, a new header matcher type which matches if the value of the header has the substring mentioned in contains matcher.\n* http: added :ref:`headers_to_add <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.ResponseMapper.headers_to_add>` to :ref:`local reply mapper <config_http_conn_man_local_reply>` to allow its users to add/append/override response HTTP headers to local replies.\n* http: added HCM level configuration of :ref:`error handling on invalid messaging <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message <envoy_v3_api_field_config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message>` to true to restore prior HTTP/1.1 behavior (i.e. connection isn't terminated) and to retain prior HTTP/2 behavior (i.e. connection is terminated).\n* http: added HCM level configuration of :ref:`error handling on invalid messaging <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>` to false to retain prior HTTP/2 behavior.\n* http: applying route level header modifications to local replies sent on that route. This behavior may be temporarily reverted by setting `envoy.reloadable_features.always_apply_route_header_rules` to false.\n* http: changed Envoy to send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive <config_overload_manager_overload_actions>` overload action is active. This behavior may be temporarily reverted by setting `envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2` to false.\n* http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting `envoy.reloadable_features.allow_response_for_timeout` to false.\n* http: changed empty trailers encoding behavior by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. This behavior can be reverted temporarily by setting runtime feature `envoy.reloadable_features.http2_skip_encoding_empty_trailers` to false.\n* http: changed how local replies are processed for requests which transform from grpc to not-grpc, or not-grpc to grpc. Previously the initial generated reply depended on which filter sent the reply, but now the reply is consistently generated the way the downstream expects. This behavior can be temporarily reverted by setting `envoy.reloadable_features.unify_grpc_handling` to false.\n* http: clarified and enforced 1xx handling. Multiple 100-continue headers are coalesced when proxying. 1xx headers other than {100, 101} are dropped.\n* http: fixed a bug in access logs where early stream termination could be incorrectly tagged as a downstream disconnect, and disconnects after partial response were not flagged.\n* http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false.\n* http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might\n  see a change in behavior.\n* logging: added fine-grain logging for file level log control with logger management at administration interface. It can be enabled by option :option:`--enable-fine-grain-logging`.\n* logging: changed default log format to `\"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v\"` and default value of :option:`--log-format-prefix-with-location` to `0`.\n* logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set\n  in the environment.\n* lua: changed the response body returned by `httpCall()` API to raw data. Previously, the returned data was string.\n* memory: switched to the `new tcmalloc <https://github.com/google/tcmalloc>`_ for linux_x86_64 builds. The `old tcmalloc <https://github.com/gperftools/gperftools>`_ can still be enabled with the `--define tcmalloc=gperftools` option.\n* postgres: changed log format to tokenize fields of Postgres messages.\n* router: added transport failure reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:{}, transport failure reason:{}`.This behavior may be reverted by setting runtime feature `envoy.reloadable_features.http_transport_failure_reason_in_body` to false.\n* router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false.\n* stats: the fake symbol table implemention has been removed from the binary, and the option `--use-fake-symbol-table` is now a no-op with a warning.\n* thrift_proxy: special characters {'\\0', '\\r', '\\n'} will be stripped from thrift headers.\n* watchdog: replaced single watchdog with separate watchdog configuration for worker threads and for the main thread configured via :ref:`Watchdogs<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.watchdogs>`. It works with :ref:`watchdog<envoy_v3_api_field_config.bootstrap.v3.Bootstrap.watchdog>` by having the worker thread and main thread watchdogs have same config.\n\nBug Fixes\n---------\n*Changes expected to improve the state of the world and are unlikely to have negative effects*\n\n* csrf: fixed issues with regards to origin and host header parsing.\n* dynamic_forward_proxy: only perform DNS lookups for routes to Dynamic Forward Proxy clusters since other cluster types handle DNS lookup themselves.\n* fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected.\n* fault: made the HeaderNameValues::prefix() method const.\n* grpc-web: fixed an issue with failing HTTP/2 requests on some browsers. Notably, WebKit-based browsers (https://bugs.webkit.org/show_bug.cgi?id=210108), Internet Explorer 11, and Edge (pre-Chromium).\n* http: fixed CVE-2020-25018 by rolling back the ``GURL`` dependency to previous state (reverted: ``2d69e30``, ``d828958``, and ``c9c4709`` commits) due to potential of crashing when Unicode URIs are present in requests.\n* http: fixed bugs in datadog and squash filter's handling of responses with no bodies.\n* http: made the HeaderValues::prefix() method const.\n* jwt_authn: supports jwt payload without \"iss\" field.\n* listener: fixed crash at listener inplace update when connetion load balancer is set.\n* rocketmq_proxy: fixed an issue involving incorrect header lengths. In debug mode it causes crash and in release mode it causes underflow.\n* thrift_proxy: fixed crashing bug on request overflow.\n* udp_proxy: fixed a crash due to UDP packets being processed after listener removal.\n\nRemoved Config or Runtime\n-------------------------\n*Normally occurs at the end of the* :ref:`deprecation period <deprecated>`\n\n* http: removed legacy header sanitization and the runtime guard `envoy.reloadable_features.strict_header_validation`.\n* http: removed legacy transfer-encoding enforcement and runtime guard `envoy.reloadable_features.reject_unsupported_transfer_encodings`.\n* http: removed configurable strict host validation and runtime guard `envoy.reloadable_features.strict_authority_validation`.\n* http: removed the connection header sanitization runtime guard `envoy.reloadable_features.connection_header_sanitization`.\n\nNew Features\n------------\n* access log: added a :ref:`dynamic metadata filter<envoy_v3_api_msg_config.accesslog.v3.MetadataFilter>` for access logs, which filters whether to log based on matching dynamic metadata.\n* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% <config_access_log_format_response_flags>` as a response flag.\n* access log: added support for :ref:`%CONNECTION_TERMINATION_DETAILS% <config_access_log_format_connection_termination_details>` as a log command operator about why the connection is terminated by Envoy.\n* access log: added support for nested objects in :ref:`JSON logging mode <config_access_log_format_dictionaries>`.\n* access log: added :ref:`omit_empty_values<envoy_v3_api_field_config.core.v3.SubstitutionFormatString.omit_empty_values>` option to omit unset value from formatted log.\n* access log: added support for :ref:`%CONNECTION_ID% <config_access_log_format_connection_id>` for the downstream connection identifier.\n* admin: added :ref:`circuit breakers settings <envoy_v3_api_msg_config.cluster.v3.CircuitBreakers>` information to GET /clusters?format=json :ref:`cluster status <envoy_v3_api_msg_admin.v3.ClusterStatus>`.\n* admin: added :ref:`node <envoy_v3_api_msg_config.core.v3.Node>` information to GET /server_info :ref:`response object <envoy_v3_api_msg_admin.v3.ServerInfo>`.\n* admin: added the ability to dump init manager unready targets information :ref:`/init_dump <operations_admin_interface_init_dump>` and :ref:`/init_dump?mask={} <operations_admin_interface_init_dump_by_mask>`.\n* admission control: added the :ref:`admission control <envoy_v3_api_msg_extensions.filters.http.admission_control.v3alpha.AdmissionControl>` filter for client-side request throttling.\n* build: enable building envoy :ref:`arm64 images <arm_binaries>` by buildx tool in x86 CI platform.\n* cluster: added new :ref:`connection_pool_per_downstream_connection <envoy_v3_api_field_config.cluster.v3.Cluster.connection_pool_per_downstream_connection>` flag, which enable creation of a new connection pool for each downstream connection.\n* decompressor filter: reports compressed and uncompressed bytes in trailers.\n* dns: added support for doing DNS resolution using Apple's DnsService APIs in Apple platforms (macOS, iOS). This feature is ON by default, and is only configurable via the `envoy.restart_features.use_apple_api_for_dns_lookups` runtime key. Note that this value is latched during server startup and changing the runtime key is a no-op during the lifetime of the process.\n* dns_filter: added support for answering :ref:`service record<envoy_v3_api_msg_data.dns.v3.DnsTable.DnsService>` queries.\n* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups<envoy_v3_api_field_extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig.use_tcp_for_dns_lookups>` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters<envoy_v3_api_msg_config.cluster.v3.Cluster>`.\n* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP <config_http_filters_ext_authz_dynamic_metadata>` and :ref:`network <config_network_filters_ext_authz_dynamic_metadata>` filters.\n  The emitted dynamic metadata is set by :ref:`dynamic metadata <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>` field in a returned :ref:`CheckResponse <envoy_v3_api_msg_service.auth.v3.CheckResponse>`.\n* ext_authz filter: added :ref:`stat_prefix <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.stat_prefix>` as an optional additional prefix for the statistics emitted from `ext_authz` HTTP filter.\n* ext_authz filter: added support for enabling the filter based on :ref:`dynamic metadata <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.filter_enabled_metadata>`.\n* ext_authz filter: added support for letting the authorization server instruct Envoy to remove headers from the original request by setting the new field :ref:`headers_to_remove <envoy_v3_api_field_service.auth.v3.OkHttpResponse.headers_to_remove>` before forwarding it to the upstream.\n* ext_authz filter: added support for sending :ref:`raw bytes as request body <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.raw_body>` of a gRPC check request by setting :ref:`pack_as_bytes <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.BufferSettings.pack_as_bytes>` to true.\n* ext_authz_filter: added :ref:`disable_request_body_buffering <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.CheckSettings.disable_request_body_buffering>` to disable request data buffering per-route.\n* grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message.\n* hds: added :ref:`cluster_endpoints_health <envoy_v3_api_field_service.health.v3.EndpointHealthResponse.cluster_endpoints_health>` to HDS responses, keeping endpoints in the same groupings as they were configured in the HDS specifier by cluster and locality instead of as a flat list.\n* hds: added :ref:`transport_socket_matches <envoy_v3_api_field_service.health.v3.ClusterHealthCheck.transport_socket_matches>` to HDS cluster health check specifier, so the existing match filter :ref:`transport_socket_match_criteria <envoy_v3_api_field_config.core.v3.HealthCheck.transport_socket_match_criteria>` in the repeated field :ref:`health_checks <envoy_v3_api_field_service.health.v3.ClusterHealthCheck.health_checks>` has context to match against. This unblocks support for health checks over HTTPS and HTTP/2.\n* hot restart: added :option:`--socket-path` and :option:`--socket-mode` to configure UDS path in the filesystem and set permission to it.\n* http: added HTTP/2 support for :ref:`connection keepalive <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.connection_keepalive>` via PING.\n* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% <config_http_conn_man_headers_custom_request_headers>` as custom header.\n* http: added :ref:`allow_chunked_length <envoy_v3_api_field_config.core.v3.Http1ProtocolOptions.allow_chunked_length>` configuration option for HTTP/1 codec to allow processing requests/responses with both Content-Length and Transfer-Encoding: chunked headers. If such message is served and option is enabled - per RFC Content-Length is ignored and removed.\n* http: added :ref:`CDN Loop filter <envoy_v3_api_msg_extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig>` and :ref:`documentation <config_http_filters_cdn_loop>`.\n* http: added :ref:`MaxStreamDuration proto <envoy_v3_api_msg_config.route.v3.RouteAction.MaxStreamDuration>` for configuring per-route downstream duration timeouts.\n* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default for HTTP/1.1 and HTTP/2 server connections. The new codecs can be enabled for testing by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated.\n* http: modified the HTTP header-map data-structure to use an underlying dictionary and a list (no change to the header-map API). To conform with previous versions, the use of a dictionary is currently disabled. It can be enabled by setting the `envoy.http.headermap.lazy_map_min_size` runtime feature to a non-negative number which defines the minimal number of headers in a request/response/trailers required for using a dictionary in addition to the list. Our current benchmarks suggest that the value 3 is a good threshold for most workloads.\n* load balancer: added :ref:`RingHashLbConfig<envoy_v3_api_msg_config.cluster.v3.Cluster.MaglevLbConfig>` to configure the table size of Maglev consistent hash.\n* load balancer: added a :ref:`configuration<envoy_v3_api_msg_config.cluster.v3.Cluster.LeastRequestLbConfig>` option to specify the active request bias used by the least request load balancer.\n* load balancer: added an :ref:`option <envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.single_host_per_subset>` to optimize subset load balancing when there is only one host per subset.\n* load balancer: added support for bounded load per host for consistent hash load balancers via :ref:`hash_balance_factor <envoy_api_field_Cluster.CommonLbConfig.consistent_hashing_lb_config>`.\n* local_ratelimit: added new :ref:`HTTP local ratelimit filter <config_http_filters_local_rate_limit>`.\n* local_reply config: added :ref:`content_type<envoy_v3_api_field_config.core.v3.SubstitutionFormatString.content_type>` field to set content-type.\n* lua: added Lua APIs to access :ref:`SSL connection info <config_http_filters_lua_ssl_socket_info>` object.\n* lua: added Lua API for :ref:`base64 escaping a string <config_http_filters_lua_stream_handle_api_base64_escape>`.\n* lua: added Lua API for :ref:`setting the current buffer content <config_http_filters_lua_buffer_wrapper_api_set_bytes>`.\n* lua: added new :ref:`source_code <envoy_v3_api_field_extensions.filters.http.lua.v3.LuaPerRoute.source_code>` field to support the dispatching of inline Lua code in per route configuration of Lua filter.\n* overload management: add :ref:`scaling <envoy_v3_api_field_config.overload.v3.Trigger.scaled>` trigger for OverloadManager actions.\n* postgres network filter: :ref:`metadata <config_network_filters_postgres_proxy_dynamic_metadata>` is produced based on SQL query.\n* proxy protocol: added support for generating the header upstream using :ref:`Proxy Protocol Transport Socket <extension_envoy.transport_sockets.upstream_proxy_protocol>`.\n* ratelimit: added :ref:`enable_x_ratelimit_headers <envoy_v3_api_msg_extensions.filters.http.ratelimit.v3.RateLimit>` option to enable `X-RateLimit-*` headers as defined in `draft RFC <https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html>`_.\n* ratelimit: added :ref:`per route config <envoy_v3_api_msg_extensions.filters.http.ratelimit.v3.RateLimitPerRoute>` for rate limit filter.\n* ratelimit: added support for optional :ref:`descriptor_key <envoy_v3_api_field_config.route.v3.RateLimit.Action.generic_key>` to Generic Key action.\n* rbac filter: added the name of the matched policy to the response code detail when a request is rejected by the RBAC filter.\n* rbac filter: added a log action to the :ref:`RBAC filter <envoy_v3_api_msg_config.rbac.v3.RBAC>` which sets dynamic metadata to inform access loggers whether to log.\n* redis: added fault injection support :ref:`fault injection for redis proxy <envoy_v3_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.faults>`, described further in :ref:`configuration documentation <config_network_filters_redis_proxy>`.\n* router: added a new :ref:`rate limited retry back off <envoy_v3_api_msg_config.route.v3.RetryPolicy.RateLimitedRetryBackOff>` strategy that uses headers like `Retry-After` or `X-RateLimit-Reset` to decide the back off interval.\n* router: added new\n  :ref:`envoy-ratelimited<config_http_filters_router_retry_policy-envoy-ratelimited>`\n  retry policy, which allows retrying envoy's own rate limited responses.\n* router: added new :ref:`host_rewrite_path_regex <envoy_v3_api_field_config.route.v3.RouteAction.host_rewrite_path_regex>`\n  option, which allows rewriting Host header based on path.\n* router: added support for DYNAMIC_METADATA :ref:`header formatter <config_http_conn_man_headers_custom_request_headers>`.\n* router_check_tool: added support for `request_header_matches`, `response_header_matches` to :ref:`router check tool <config_tools_router_check_tool>`.\n* signal: added support for calling fatal error handlers without envoy's signal handler, via FatalErrorHandler::callFatalErrorHandlers().\n* stats: added optional histograms to :ref:`cluster stats <config_cluster_manager_cluster_stats_request_response_sizes>`\n  that track headers and body sizes of requests and responses.\n* stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it.\n* tap: added :ref:`generic body matcher<envoy_v3_api_msg_config.tap.v3.HttpGenericBodyMatch>` to scan http requests and responses for text or hex patterns.\n* tcp_proxy: added :ref:`max_downstream_connection_duration<envoy_v3_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.max_downstream_connection_duration>` for downstream connection. When max duration is reached the connection will be closed.\n* tcp_proxy: allow earlier network filters to set metadataMatchCriteria on the connection StreamInfo to influence load balancing.\n* tls: added OCSP stapling support through the :ref:`ocsp_staple <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.TlsCertificate>` and :ref:`ocsp_staple_policy <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>` configuration options. See :ref:`OCSP Stapling <arch_overview_ssl_ocsp_stapling>` for usage and runtime flags.\n* tls: introduce new :ref:`extension point<envoy_v3_api_field_extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker>` for overriding :ref:`TLS handshaker <arch_overview_ssl>` behavior.\n* tls: switched from using socket BIOs to using custom BIOs that know how to interact with IoHandles. The feature can be disabled by setting runtime feature `envoy.reloadable_features.tls_use_io_handle_bio` to false.\n* tracing: added ability to set some :ref:`optional segment fields<envoy_v3_api_field_config.trace.v3.XRayConfig.segment_fields>` in the AWS  X-Ray tracer.\n* udp_proxy: added :ref:`hash_policies <envoy_v3_api_msg_extensions.filters.udp.udp_proxy.v3.UdpProxyConfig>` to support hash based routing.\n* udp_proxy: added :ref:`use_original_src_ip <envoy_v3_api_msg_extensions.filters.udp.udp_proxy.v3.UdpProxyConfig>` option to replicate the downstream remote address of the packets on the upstream side of Envoy. It is similar to :ref:`original source filter <envoy_v3_api_msg_extensions.filters.listener.original_src.v3.OriginalSrc>`.\n* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter<envoy_v3_api_field_config.bootstrap.v3.Watchdog.max_kill_timeout_jitter>`.\n* watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See :ref:`watchdog actions<envoy_v3_api_field_config.bootstrap.v3.Watchdog.actions>`.\n* watchdog: watchdog action extension that does cpu profiling. See :ref:`Profile Action <envoy_v3_api_file_envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto>`.\n* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action <envoy_v3_api_file_envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto>`.\n* xds: added :ref:`extension config discovery<envoy_v3_api_msg_config.core.v3.ExtensionConfigSource>` support for HTTP filters.\n* xds: added support for mixed v2/v3 discovery response, which enable type url downgrade and upgrade. This feature is disabled by default and is controlled by runtime guard `envoy.reloadable_features.enable_type_url_downgrade_and_upgrade`.\n* zlib: added option to use `zlib-ng <https://github.com/zlib-ng/zlib-ng>`_ as zlib library.\n\nDeprecated\n----------\n\n* build: alpine based debug image is deprecated in favor of :ref:`Ubuntu based debug image <install_binaries>`.\n* cluster: the :ref:`track_timeout_budgets <envoy_v3_api_field_config.cluster.v3.Cluster.track_timeout_budgets>`\n  field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration <envoy_v3_api_field_config.cluster.v3.Cluster.track_cluster_stats>`.\n* ext_authz: the :ref:`dynamic metadata <envoy_v3_api_field_service.auth.v3.OkHttpResponse.dynamic_metadata>` field in :ref:`OkHttpResponse <envoy_v3_api_msg_service.auth.v3.OkHttpResponse>` has been deprecated in favor of :ref:`dynamic metadata <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>` field in :ref:`CheckResponse <envoy_v3_api_msg_service.auth.v3.CheckResponse>`.\n* hds: the :ref:`endpoints_health <envoy_v3_api_field_service.health.v3.EndpointHealthResponse.endpoints_health>`\n  field has been deprecated in favor of :ref:`cluster_endpoints_health <envoy_v3_api_field_service.health.v3.EndpointHealthResponse.cluster_endpoints_health>` to maintain\n  grouping by cluster and locality.\n* router: the :ref:`include_vh_rate_limits <envoy_v3_api_field_config.route.v3.RouteAction.include_vh_rate_limits>` field has been deprecated in favor of :ref:`vh_rate_limits <envoy_v3_api_field_extensions.filters.http.ratelimit.v3.RateLimitPerRoute.vh_rate_limits>`.\n* router: the :ref:`max_grpc_timeout <envoy_v3_api_field_config.route.v3.RouteAction.max_grpc_timeout>` field has been deprecated in favor of :ref:`grpc_timeout_header_max <envoy_v3_api_field_config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max>`.\n* router: the :ref:`grpc_timeout_offset <envoy_v3_api_field_config.route.v3.RouteAction.grpc_timeout_offset>` field has been deprecated in favor of :ref:`grpc_timeout_header_offset <envoy_v3_api_field_config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset>`.\n* tap: the :ref:`match_config <envoy_v3_api_field_config.tap.v3.TapConfig.match_config>` field has been deprecated in favor of\n  :ref:`match <envoy_v3_api_field_config.tap.v3.TapConfig.match>` field.\n* router_check_tool: `request_header_fields`, `response_header_fields` config deprecated in favor of `request_header_matches`, `response_header_matches`.\n* watchdog: :ref:`watchdog <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.watchdog>` deprecated in favor of :ref:`watchdogs <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.watchdogs>`.\n"
  },
  {
    "path": "docs/root/version_history/v1.0.0.rst",
    "content": "1.0.0 (September 12, 2016)\n==========================\n\nChanges\n-------\n\nInitial open source release.\n"
  },
  {
    "path": "docs/root/version_history/v1.1.0.rst",
    "content": "1.1.0 (November 30, 2016)\n=========================\n\nChanges\n-------\n\n* Switch from Jannson to RapidJSON for our JSON library (allowing for a configuration schema in\n  1.2.0).\n* Upgrade :ref:`recommended version <install_requirements>` of various other libraries.\n* Configurable DNS refresh rate for DNS service discovery types.\n* Upstream circuit breaker configuration can be :ref:`overridden via runtime\n  <config_cluster_manager_cluster_runtime>`.\n* :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n* Generic header matching routing rule.\n* HTTP/2 graceful connection draining (double GOAWAY).\n* DynamoDB filter :ref:`per shard statistics <config_http_filters_dynamo>` (pre-release AWS\n  feature).\n* Initial release of the :ref:`fault injection HTTP filter <config_http_filters_fault_injection>`.\n* HTTP :ref:`rate limit filter <config_http_filters_rate_limit>` enhancements (note that the\n  configuration for HTTP rate limiting is going to be overhauled in 1.2.0).\n* Added :ref:`refused-stream retry policy <config_http_filters_router_x-envoy-retry-on>`.\n* Multiple :ref:`priority queues <arch_overview_http_routing_priority>` for upstream clusters\n  (configurable on a per route basis, with separate connection pools, circuit breakers, etc.).\n* Added max connection circuit breaking to the :ref:`TCP proxy filter <arch_overview_tcp_proxy>`.\n* Added :ref:`CLI <operations_cli>` options for setting the logging file flush interval as well\n  as the drain/shutdown time during hot restart.\n* A very large number of performance enhancements for core HTTP/TCP proxy flows as well as a\n  few new configuration flags to allow disabling expensive features if they are not needed\n  (specifically request ID generation and dynamic response code stats).\n* Support Mongo 3.2 in the :ref:`Mongo sniffing filter <config_network_filters_mongo_proxy>`.\n* Lots of other small fixes and enhancements not listed.\n"
  },
  {
    "path": "docs/root/version_history/v1.10.0.rst",
    "content": "1.10.0 (Apr 5, 2019)\n====================\n\nChanges\n-------\n\n* access log: added a new flag for upstream retry count exceeded.\n* access log: added a :ref:`gRPC filter <envoy_api_msg_config.filter.accesslog.v2.GrpcStatusFilter>` to allow filtering on gRPC status.\n* access log: added a new flag for stream idle timeout.\n* access log: added a new field for upstream transport failure reason in :ref:`file access logger<config_access_log_format_upstream_transport_failure_reason>` and\n  :ref:`gRPC access logger<envoy_api_field_data.accesslog.v2.AccessLogCommon.upstream_transport_failure_reason>` for HTTP access logs.\n* access log: added new fields for downstream x509 information (URI sans and subject) to file and gRPC access logger.\n* admin: the admin server can now be accessed via HTTP/2 (prior knowledge).\n* admin: changed HTTP response status code from 400 to 405 when attempting to GET a POST-only route (such as /quitquitquit).\n* buffer: fix vulnerabilities when allocation fails.\n* build: releases are built with GCC-7 and linked with LLD.\n* build: dev docker images :ref:`have been split <install_binaries>` from tagged images for easier\n  discoverability in Docker Hub. Additionally, we now build images for point releases.\n* config: added support of using google.protobuf.Any in opaque configs for extensions.\n* config: logging warnings when deprecated fields are in use.\n* config: removed deprecated --v2-config-only from command line config.\n* config: removed deprecated_v1 sds_config from :ref:`Bootstrap config <config_overview_bootstrap>`.\n* config: removed the deprecated_v1 config option from :ref:`ring hash <envoy_api_msg_Cluster.RingHashLbConfig>`.\n* config: removed REST_LEGACY as a valid :ref:`ApiType <envoy_api_field_core.ApiConfigSource.api_type>`.\n* config: finish cluster warming only when a named response i.e. ClusterLoadAssignment associated to the cluster being warmed comes in the EDS response. This is a behavioural change from the current implementation where warming of cluster completes on missing load assignments also.\n* config: use Envoy cpuset size to set the default number or worker threads if :option:`--cpuset-threads` is enabled.\n* config: added support for :ref:`initial_fetch_timeout <envoy_api_field_core.ConfigSource.initial_fetch_timeout>`. The timeout is disabled by default.\n* cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags <cors-runtime>` to filter.\n* csrf: added :ref:`CSRF filter <config_http_filters_csrf>`.\n* ext_authz: added support for buffering request body.\n* ext_authz: migrated from v2alpha to v2 and improved docs.\n* ext_authz: added a configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version.\n* ext_authz: migrated from v2alpha to v2 and improved the documentation.\n* ext_authz: authorization request and response configuration has been separated into two distinct objects: :ref:`authorization request\n  <envoy_api_field_config.filter.http.ext_authz.v2.HttpService.authorization_request>` and :ref:`authorization response\n  <envoy_api_field_config.filter.http.ext_authz.v2.HttpService.authorization_response>`. In addition, :ref:`client headers\n  <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_client_headers>` and :ref:`upstream headers\n  <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_upstream_headers>` replaces the previous *allowed_authorization_headers* object.\n  All the control header lists now support :ref:`string matcher <envoy_api_msg_type.matcher.StringMatcher>` instead of standard string.\n* fault: added the :ref:`max_active_faults\n  <envoy_api_field_config.filter.http.fault.v2.HTTPFault.max_active_faults>` setting, as well as\n  :ref:`statistics <config_http_filters_fault_injection_stats>` for the number of active faults\n  and the number of faults the overflowed.\n* fault: added :ref:`response rate limit\n  <envoy_api_field_config.filter.http.fault.v2.HTTPFault.response_rate_limit>` fault injection.\n* fault: added :ref:`HTTP header fault configuration\n  <config_http_filters_fault_injection_http_header>` to the HTTP fault filter.\n* governance: extending Envoy deprecation policy from 1 release (0-3 months) to 2 releases (3-6 months).\n* health check: expected response codes in http health checks are now :ref:`configurable <envoy_api_msg_core.HealthCheck.HttpHealthCheck>`.\n* http: added new grpc_http1_reverse_bridge filter for converting gRPC requests into HTTP/1.1 requests.\n* http: fixed a bug where Content-Length:0 was added to HTTP/1 204 responses.\n* http: added :ref:`max request headers size <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.max_request_headers_kb>`. The default behaviour is unchanged.\n* http: added modifyDecodingBuffer/modifyEncodingBuffer to allow modifying the buffered request/response data.\n* http: added encodeComplete/decodeComplete. These are invoked at the end of the stream, after all data has been encoded/decoded respectively. Default implementation is a no-op.\n* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging <arch_overview_outlier_detection_logging>`.\n* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy<config_network_filters_mysql_proxy>` for more details.\n* performance: new buffer implementation (disabled by default; to test it, add \"--use-libevent-buffers 0\" to the command-line arguments when starting Envoy).\n* jwt_authn: added :ref:`filter_state_rules <envoy_api_field_config.filter.http.jwt_authn.v2alpha.JwtAuthentication.filter_state_rules>` to allow specifying requirements from filterState by other filters.\n* ratelimit: removed deprecated rate limit configuration from bootstrap.\n* redis: added :ref:`hashtagging <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.enable_hashtagging>` to guarantee a given key's upstream.\n* redis: added :ref:`latency stats <config_network_filters_redis_proxy_per_command_stats>` for commands.\n* redis: added :ref:`success and error stats <config_network_filters_redis_proxy_per_command_stats>` for commands.\n* redis: migrate hash function for host selection to `MurmurHash2 <https://sites.google.com/site/murmurhash>`_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS.\n* redis: added :ref:`latency_in_micros <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.latency_in_micros>` to specify the redis commands stats time unit in microseconds.\n* router: added ability to configure a :ref:`retry policy <envoy_api_msg_route.RetryPolicy>` at the\n  virtual host level.\n* router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:`\n* router: added :ref:`rq_reset_after_downstream_response_started <config_http_filters_router_stats>` counter stat to router stats.\n* router: added per-route configuration of :ref:`internal redirects <envoy_api_field_route.RouteAction.internal_redirect_action>`.\n* router: removed deprecated route-action level headers_to_add/remove.\n* router: made :ref:`max retries header <config_http_filters_router_x-envoy-max-retries>` take precedence over the number of retries in route and virtual host retry policies.\n* router: added support for prefix wildcards in :ref:`virtual host domains<envoy_api_field_route.VirtualHost.domains>`\n* stats: added support for histograms in prometheus\n* stats: added usedonly flag to prometheus stats to only output metrics which have been\n  updated at least once.\n* stats: added gauges tracking remaining resources before circuit breakers open.\n* tap: added new alpha :ref:`HTTP tap filter <config_http_filters_tap>`.\n* tls: enabled TLS 1.3 on the server-side (non-FIPS builds).\n* upstream: add hash_function to specify the hash function for :ref:`ring hash<envoy_api_msg_Cluster.RingHashLbConfig>` as either xxHash or `murmurHash2 <https://sites.google.com/site/murmurhash>`_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS.\n* upstream: added :ref:`degraded health value<arch_overview_load_balancing_degraded>` which allows\n  routing to certain hosts only when there are insufficient healthy hosts available.\n* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type<arch_overview_service_discovery_types_custom>`.\n* upstream: added a :ref:`circuit breaker <arch_overview_circuit_break_cluster_maximum_connection_pools>` to limit the number of concurrent connection pools in use.\n* tracing: added :ref:`verbose <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing>` to support logging annotations on spans.\n* upstream: added support for host weighting and :ref:`locality weighting <arch_overview_load_balancing_locality_weighted_lb>` in the :ref:`ring hash load balancer <arch_overview_load_balancing_types_ring_hash>`, and added a :ref:`maximum_ring_size<envoy_api_field_Cluster.RingHashLbConfig.maximum_ring_size>` config parameter to strictly bound the ring size.\n* zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events).\n  Refer to :ref:`ZooKeeper proxy<config_network_filters_zookeeper_proxy>` for more details.\n* upstream: added configuration option to select any host when the fallback policy fails.\n* upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken.\n\nDeprecated\n----------\n\n* Use of `use_alpha` in :ref:`Ext-Authz Authorization Service <envoy_api_file_envoy/service/auth/v2/external_auth.proto>` is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version.\n* Use of `enabled` in `CorsPolicy`, found in\n  :ref:`route.proto <envoy_api_file_envoy/api/v2/route/route.proto>`.\n  Set the `filter_enabled` field instead.\n* Use of the `type` field in the `FaultDelay` message (found in\n  :ref:`fault.proto <envoy_api_file_envoy/config/filter/fault/v2/fault.proto>`)\n  has been deprecated. It was never used and setting it has no effect. It will be removed in the\n  following release.\n"
  },
  {
    "path": "docs/root/version_history/v1.11.0.rst",
    "content": "1.11.0 (July 11, 2019)\n======================\n\nChanges\n-------\n\n* access log: added a new field for downstream TLS session ID to file and gRPC access logger.\n* access log: added a new field for route name to file and gRPC access logger.\n* access log: added a new field for response code details in :ref:`file access logger<config_access_log_format_response_code_details>` and :ref:`gRPC access logger<envoy_api_field_data.accesslog.v2.HTTPResponseProperties.response_code_details>`.\n* access log: added several new variables for exposing information about the downstream TLS connection to :ref:`file access logger<config_access_log_format_response_code_details>` and :ref:`gRPC access logger<envoy_api_field_data.accesslog.v2.AccessLogCommon.tls_properties>`.\n* access log: added a new flag for request rejected due to failed strict header check.\n* admin: the administration interface now includes a :ref:`/ready endpoint <operations_admin_interface>` for easier readiness checks.\n* admin: extend :ref:`/runtime_modify endpoint <operations_admin_interface_runtime_modify>` to support parameters within the request body.\n* admin: the :ref:`/listener endpoint <operations_admin_interface_listeners>` now returns :ref:`listeners.proto<envoy_api_msg_admin.v2alpha.Listeners>` which includes listener names and ports.\n* admin: added host priority to :http:get:`/clusters` and :http:get:`/clusters?format=json` endpoint response\n* admin: the :ref:`/clusters endpoint <operations_admin_interface_clusters>` now shows hostname\n  for each host, useful for DNS based clusters.\n* api: track and report requests issued since last load report.\n* build: releases are built with Clang and linked with LLD.\n* config: added :ref:stats_server_version_override` <envoy_api_field_config.bootstrap.v2.Bootstrap.stats_server_version_override>` in bootstrap, that can be used to override :ref:`server.version statistic <server_statistics>`.\n* control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type <envoy_api_field_core.ApiConfigSource.api_type>`\n* csrf: added support for allowlisting additional source origins.\n* dns: added support for getting DNS record TTL which is used by STRICT_DNS/LOGICAL_DNS cluster as DNS refresh rate.\n* dubbo_proxy: support the :ref:`dubbo proxy filter <config_network_filters_dubbo_proxy>`.\n* dynamo_request_parser: adding support for transactions. Adds check for new types of dynamodb operations (TransactWriteItems, TransactGetItems) and awareness for new types of dynamodb errors (IdempotentParameterMismatchException, TransactionCanceledException, TransactionInProgressException).\n* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter <envoy_api_msg_ClusterLoadAssignment.Policy>`.\n* eds: removed max limit for `load_balancing_weight`.\n* event: added :ref:`loop duration and poll delay statistics <operations_performance>`.\n* ext_authz: added a `x-envoy-auth-partial-body` metadata header set to `false|true` indicating if there is a partial body sent in the authorization request message.\n* ext_authz: added configurable status code that allows customizing HTTP responses on filter check status errors.\n* ext_authz: added option to `ext_authz` that allows the filter clearing route cache.\n* grpc-json: added support for :ref:`auto mapping\n  <envoy_api_field_config.filter.http.transcoder.v2.GrpcJsonTranscoder.auto_mapping>`.\n* health check: added :ref:`initial jitter <envoy_api_field_core.HealthCheck.initial_jitter>` to add jitter to the first health check in order to prevent thundering herd on Envoy startup.\n* hot restart: stats are no longer shared between hot restart parent/child via shared memory, but rather by RPC. Hot restart version incremented to 11.\n* http: added the ability to pass a URL encoded PEM encoded peer certificate chain in the\n  :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header.\n* http: fixed a bug where large unbufferable responses were not tracked in stats and logs correctly.\n* http: fixed a crashing bug where gRPC local replies would cause segfaults when upstream access logging was on.\n* http: mitigated a race condition with the :ref:`delayed_close_timeout<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.delayed_close_timeout>` where it could trigger while actively flushing a pending write buffer for a downstream connection.\n* http: added support for :ref:`preserve_external_request_id<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.preserve_external_request_id>` that represents whether the x-request-id should not be reset on edge entry inside mesh\n* http: changed `sendLocalReply` to send percent-encoded `GrpcMessage`.\n* http: added a :ref:header_prefix` <envoy_api_field_config.bootstrap.v2.Bootstrap.header_prefix>` configuration option to allow Envoy to send and process x-custom- prefixed headers rather than x-envoy.\n* http: added :ref:`dynamic forward proxy <arch_overview_http_dynamic_forward_proxy>` support.\n* http: tracking the active stream and dumping state in Envoy crash handlers. This can be disabled by building with `--define disable_object_dump_on_signal_trace=disabled`\n* jwt_authn: make filter's parsing of JWT more flexible, allowing syntax like ``jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123``\n* listener: added :ref:`source IP <envoy_api_field_listener.FilterChainMatch.source_prefix_ranges>`\n  and :ref:`source port <envoy_api_field_listener.FilterChainMatch.source_ports>` filter\n  chain matching.\n* lua: exposed functions to Lua to verify digital signature.\n* original_src filter: added the :ref:`filter<config_http_filters_original_src>`.\n* outlier_detector: added configuration :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>` to distinguish locally and externally generated errors. See :ref:`arch_overview_outlier_detection` for full details.\n* rbac: migrated from v2alpha to v2.\n* redis: add support for Redis cluster custom cluster type.\n* redis: automatically route commands using cluster slots for Redis cluster.\n* redis: added :ref:`prefix routing <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.prefix_routes>` to enable routing commands based on their key's prefix to different upstream.\n* redis: added :ref:`request mirror policy <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route.request_mirror_policy>` to enable shadow traffic and/or dual writes.\n* redis: add support for zpopmax and zpopmin commands.\n* redis: added\n  :ref:`max_buffer_size_before_flush <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.max_buffer_size_before_flush>` to batch commands together until the encoder buffer hits a certain size, and\n  :ref:`buffer_flush_timeout <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.buffer_flush_timeout>` to control how quickly the buffer is flushed if it is not full.\n* redis: added auth support :ref:`downstream_auth_password <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.downstream_auth_password>` for downstream client authentication, and :ref:`auth_password <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProtocolOptions.auth_password>` to configure authentication passwords for upstream server clusters.\n* retry: added a retry predicate that :ref:`rejects canary hosts. <envoy_api_field_route.RetryPolicy.retry_host_predicate>`\n* router: add support for configuring a :ref:`gRPC timeout offset <envoy_api_field_route.RouteAction.grpc_timeout_offset>` on incoming requests.\n* router: added ability to control retry back-off intervals via :ref:`retry policy <envoy_api_msg_route.RetryPolicy.RetryBackOff>`.\n* router: added ability to issue a hedged retry in response to a per try timeout via a :ref:`hedge policy <envoy_api_msg_route.HedgePolicy>`.\n* router: added a route name field to each http route in route.Route list\n* router: added several new variables for exposing information about the downstream TLS connection via :ref:`header\n  formatters <config_http_conn_man_headers_custom_request_headers>`.\n* router: per try timeouts will no longer start before the downstream request has been received in full by the router.This ensures that the per try timeout does not account for slow downstreams and that will not start before the global timeout.\n* router: added :ref:`RouteAction's auto_host_rewrite_header <envoy_api_field_route.RouteAction.auto_host_rewrite_header>` to allow upstream host header substitution with some other header's value\n* router: added support for UPSTREAM_REMOTE_ADDRESS :ref:`header formatter\n  <config_http_conn_man_headers_custom_request_headers>`.\n* router: add ability to reject a request that includes invalid values for\n  headers configured in :ref:`strict_check_headers <envoy_api_field_config.filter.http.router.v2.Router.strict_check_headers>`\n* runtime: added support for :ref:`flexible layering configuration\n  <envoy_api_field_config.bootstrap.v2.Bootstrap.layered_runtime>`.\n* runtime: added support for statically :ref:`specifying the runtime in the bootstrap configuration\n  <envoy_api_field_config.bootstrap.v2.Runtime.base>`.\n* runtime: :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` support added to layered runtime configuration.\n* sandbox: added :ref:`CSRF sandbox <install_sandboxes_csrf>`.\n* server: ``--define manual_stamp=manual_stamp`` was added to allow server stamping outside of binary rules.\n  more info in the `bazel docs <https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#enabling-optional-features>`_.\n* server: added :ref:`server state <statistics>` statistic.\n* server: added :ref:`initialization_time_ms<statistics>` statistic.\n* subset: added :ref:`list_as_any<envoy_api_field_Cluster.LbSubsetConfig.list_as_any>` option to\n  the subset lb which allows matching metadata against any of the values in a list value\n  on the endpoints.\n* tools: added :repo:`proto <test/tools/router_check/validation.proto>` support for :ref:`router check tool <install_tools_route_table_check_tool>` tests.\n* tracing: add trace sampling configuration to the route, to override the route level.\n* upstream: added :ref:`upstream_cx_pool_overflow <config_cluster_manager_cluster_stats>` for the connection pool circuit breaker.\n* upstream: an EDS management server can now force removal of a host that is still passing active\n  health checking by first marking the host as failed via EDS health check and subsequently removing\n  it in a future update. This is a mechanism to work around a race condition in which an EDS\n  implementation may remove a host before it has stopped passing active HC, thus causing the host\n  to become stranded until a future update.\n* upstream: added :ref:`an option <envoy_api_field_Cluster.CommonLbConfig.ignore_new_hosts_until_first_hc>`\n  that allows ignoring new hosts for the purpose of load balancing calculations until they have\n  been health checked for the first time.\n* upstream: added runtime error checking to prevent setting dns type to STRICT_DNS or LOGICAL_DNS when custom resolver name is specified.\n* upstream: added possibility to override fallback_policy per specific selector in :ref:`subset load balancer <arch_overview_load_balancer_subsets>`.\n* upstream: the :ref:`logical DNS cluster <arch_overview_service_discovery_types_logical_dns>` now\n  displays the current resolved IP address in admin output instead of 0.0.0.0.\n\nDeprecated\n----------\n\n* The --max-stats and --max-obj-name-len flags no longer has any effect.\n* Use of :ref:`cluster <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.cluster>` in :ref:`redis_proxy.proto <envoy_api_file_envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto>` is deprecated. Set a :ref:`catch_all_route <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>` instead.\n* Use of :ref:`catch_all_cluster <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_cluster>` in :ref:`redis_proxy.proto <envoy_api_file_envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto>` is deprecated. Set a :ref:`catch_all_route <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>` instead.\n* Use of json based schema in router check tool tests. The tests should follow validation :repo:`schema<test/tools/router_check/validation.proto>`.\n* Use of the v1 style route configuration for the :ref:`TCP proxy filter <config_network_filters_tcp_proxy>`\n  is now fully replaced with listener :ref:`filter chain matching <envoy_api_msg_listener.FilterChainMatch>`.\n  Use this instead.\n* Use of :ref:`runtime <envoy_api_field_config.bootstrap.v2.Bootstrap.runtime>` in :ref:`Bootstrap\n  <envoy_api_msg_config.bootstrap.v2.Bootstrap>`. Use :ref:`layered_runtime\n  <envoy_api_field_config.bootstrap.v2.Bootstrap.layered_runtime>` instead.\n* Specifying \"deprecated_v1: true\" in HTTP and network filter configuration to allow loading JSON\n  configuration is now deprecated and will be removed in a following release. Update any custom\n  filters to use protobuf configuration. A struct can be used for a mostly 1:1 conversion if needed.\n  The `envoy.deprecated_features.v1_filter_json_config` runtime key can be used to temporarily\n  enable this feature once the deprecation becomes fail by default.\n"
  },
  {
    "path": "docs/root/version_history/v1.11.1.rst",
    "content": "1.11.1 (August 13, 2019)\n========================\n\nChanges\n-------\n\n* http: added mitigation of client initiated attacks that result in flooding of the downstream HTTP/2 connections. Those attacks can be logged at the \"warning\" level when the runtime feature `http.connection_manager.log_flood_exception` is enabled. The runtime setting defaults to disabled to avoid log spam when under attack.\n* http: added :ref:`inbound_empty_frames_flood <config_http_conn_man_stats_per_codec>` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting <envoy_api_field_core.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload>`.\n  Runtime feature `envoy.reloadable_features.http2_protocol_options.max_consecutive_inbound_frames_with_empty_payload` overrides :ref:`max_consecutive_inbound_frames_with_empty_payload setting <envoy_api_field_core.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload>`. Large override value (i.e. 2147483647) effectively disables mitigation of inbound frames with empty payload.\n* http: added :ref:`inbound_priority_frames_flood <config_http_conn_man_stats_per_codec>` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound PRIORITY frames. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting <envoy_api_field_core.Http2ProtocolOptions.max_inbound_priority_frames_per_stream>`.\n  Runtime feature `envoy.reloadable_features.http2_protocol_options.max_inbound_priority_frames_per_stream` overrides :ref:`max_inbound_priority_frames_per_stream setting <envoy_api_field_core.Http2ProtocolOptions.max_inbound_priority_frames_per_stream>`. Large override value effectively disables flood mitigation of inbound PRIORITY frames.\n* http: added :ref:`inbound_window_update_frames_flood <config_http_conn_man_stats_per_codec>` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound WINDOW_UPDATE frames. The limit is configured by setting the :ref:`max_inbound_window_update_frames_per_data_frame_sent config setting <envoy_api_field_core.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent>`.\n  Runtime feature `envoy.reloadable_features.http2_protocol_options.max_inbound_window_update_frames_per_data_frame_sent` overrides :ref:`max_inbound_window_update_frames_per_data_frame_sent setting <envoy_api_field_core.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent>`. Large override value effectively disables flood mitigation of inbound WINDOW_UPDATE frames.\n* http: added :ref:`outbound_flood <config_http_conn_man_stats_per_codec>` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit. The limit is configured by setting the :ref:`max_outbound_frames config setting <envoy_api_field_core.Http2ProtocolOptions.max_outbound_frames>`\n  Runtime feature `envoy.reloadable_features.http2_protocol_options.max_outbound_frames` overrides :ref:`max_outbound_frames config setting <envoy_api_field_core.Http2ProtocolOptions.max_outbound_frames>`. Large override value effectively disables flood mitigation of outbound frames of all types.\n* http: added :ref:`outbound_control_flood <config_http_conn_man_stats_per_codec>` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit for PING, SETTINGS and RST_STREAM frames. The limit is configured by setting the :ref:`max_outbound_control_frames config setting <envoy_api_field_core.Http2ProtocolOptions.max_outbound_control_frames>`.\n  Runtime feature `envoy.reloadable_features.http2_protocol_options.max_outbound_control_frames` overrides :ref:`max_outbound_control_frames config setting <envoy_api_field_core.Http2ProtocolOptions.max_outbound_control_frames>`. Large override value effectively disables flood mitigation of outbound frames of types PING, SETTINGS and RST_STREAM.\n* http: enabled strict validation of HTTP/2 messaging. Previous behavior can be restored using :ref:`stream_error_on_invalid_http_messaging config setting <envoy_api_field_core.Http2ProtocolOptions.stream_error_on_invalid_http_messaging>`.\n  Runtime feature `envoy.reloadable_features.http2_protocol_options.stream_error_on_invalid_http_messaging` overrides :ref:`stream_error_on_invalid_http_messaging config setting <envoy_api_field_core.Http2ProtocolOptions.stream_error_on_invalid_http_messaging>`.\n"
  },
  {
    "path": "docs/root/version_history/v1.11.2.rst",
    "content": "1.11.2 (October 8, 2019)\n========================\n\nChanges\n-------\n\n* http: fixed CVE-2019-15226 by adding a cached byte size in HeaderMap.\n* http: added :ref:`max headers count <envoy_api_field_core.HttpProtocolOptions.max_headers_count>` for http connections. The default limit is 100.\n* upstream: runtime feature `envoy.reloadable_features.max_response_headers_count` overrides the default limit for upstream :ref:`max headers count <envoy_api_field_Cluster.common_http_protocol_options>`\n* http: added :ref:`common_http_protocol_options <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.common_http_protocol_options>`\n  Runtime feature `envoy.reloadable_features.max_request_headers_count` overrides the default limit for downstream :ref:`max headers count <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.common_http_protocol_options>`\n* regex: backported safe regex matcher fix for CVE-2019-15225.\n\nDeprecated\n----------\n\n* Use of :ref:`idle_timeout\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.idle_timeout>`\n  is deprecated. Use :ref:`common_http_protocol_options\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.common_http_protocol_options>`\n  instead.\n"
  },
  {
    "path": "docs/root/version_history/v1.12.0.rst",
    "content": "1.12.0 (October 31, 2019)\n=========================\n\nChanges\n-------\n\n* access log: added a new flag for :ref:`downstream protocol error <envoy_api_field_data.accesslog.v2.ResponseFlags.downstream_protocol_error>`.\n* access log: added :ref:`buffering <envoy_api_field_config.accesslog.v2.CommonGrpcAccessLogConfig.buffer_size_bytes>` and :ref:`periodical flushing <envoy_api_field_config.accesslog.v2.CommonGrpcAccessLogConfig.buffer_flush_interval>` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second.\n* access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters <config_access_log_format>` and gRPC access logger.\n* access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs <envoy_api_msg_config.accesslog.v2.TcpGrpcAccessLogConfig>`.\n* access log: reintroduced :ref:`filesystem <config_access_log_stats>` stats and added the `write_failed` counter to track failed log writes.\n* admin: added ability to configure listener :ref:`socket options <envoy_api_field_config.bootstrap.v2.Admin.socket_options>`.\n* admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump <envoy_api_msg_admin.v2alpha.SecretsConfigDump>`.\n* admin: added support for :ref:`draining <operations_admin_interface_drain>` listeners via admin interface.\n* admin: added :http:get:`/stats/recentlookups`, :http:post:`/stats/recentlookups/clear`, :http:post:`/stats/recentlookups/disable`, and :http:post:`/stats/recentlookups/enable` endpoints.\n* api: added :ref:`set_node_on_first_message_only <envoy_api_field_core.ApiConfigSource.set_node_on_first_message_only>` option to omit the node identifier from the subsequent discovery requests on the same stream.\n* buffer filter: now populates content-length header if not present. This behavior can be temporarily disabled using the runtime feature `envoy.reloadable_features.buffer_filter_populate_content_length`.\n* build: official released binary is now PIE so it can be run with ASLR.\n* config: added support for :ref:`delta xDS <arch_overview_dynamic_config_delta>` (including ADS) delivery.\n* config: enforcing that terminal filters (e.g. HttpConnectionManager for L4, router for L7) be the last in their respective filter chains.\n* config: added access log :ref:`extension filter<envoy_api_field_config.filter.accesslog.v2.AccessLogFilter.extension_filter>`.\n* config: added support for :option:`--reject-unknown-dynamic-fields`, providing independent control\n  over whether unknown fields are rejected in static and dynamic configuration. By default, unknown\n  fields in static configuration are rejected and are allowed in dynamic configuration. Warnings\n  are logged for the first use of any unknown field and these occurrences are counted in the\n  :ref:`server.static_unknown_fields <server_statistics>` and :ref:`server.dynamic_unknown_fields\n  <server_statistics>` statistics.\n* config: added async data access for local and remote data sources.\n* config: changed the default value of :ref:`initial_fetch_timeout <envoy_api_field_core.ConfigSource.initial_fetch_timeout>` from 0s to 15s. This is a change in behaviour in the sense that Envoy will move to the next initialization phase, even if the first config is not delivered in 15s. Refer to :ref:`initialization process <arch_overview_initialization>` for more details.\n* config: added stat :ref:`init_fetch_timeout <config_cluster_manager_cds>`.\n* config: tls_context in Cluster and FilterChain are deprecated in favor of transport socket. See :ref:`deprecated documentation<deprecated>` for more information.\n* csrf: added PATCH to supported methods.\n* dns: added support for configuring :ref:`dns_failure_refresh_rate <envoy_api_field_Cluster.dns_failure_refresh_rate>` to set the DNS refresh rate during failures.\n* ext_authz: added :ref:`configurable ability <envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.metadata_context_namespaces>` to send dynamic metadata to the `ext_authz` service.\n* ext_authz: added :ref:`filter_enabled RuntimeFractionalPercent flag <envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.filter_enabled>` to filter.\n* ext_authz: added tracing to the HTTP client.\n* ext_authz: deprecated :ref:`cluster scope stats <config_http_filters_ext_authz_stats>` in favour of filter scope stats.\n* fault: added overrides for default runtime keys in :ref:`HTTPFault <envoy_api_msg_config.filter.http.fault.v2.HTTPFault>` filter.\n* grpc: added :ref:`AWS IAM grpc credentials extension <envoy_api_file_envoy/config/grpc_credential/v2alpha/aws_iam.proto>` for AWS-managed xDS.\n* grpc: added :ref:`gRPC stats filter <config_http_filters_grpc_stats>` for collecting stats about gRPC calls and streaming message counts.\n* grpc-json: added support for :ref:`ignoring unknown query parameters<envoy_api_field_config.filter.http.transcoder.v2.GrpcJsonTranscoder.ignore_unknown_query_parameters>`.\n* grpc-json: added support for :ref:`the grpc-status-details-bin header<envoy_api_field_config.filter.http.transcoder.v2.GrpcJsonTranscoder.convert_grpc_status>`.\n* header to metadata: added :ref:`PROTOBUF_VALUE <envoy_api_enum_value_config.filter.http.header_to_metadata.v2.Config.ValueType.PROTOBUF_VALUE>` and :ref:`ValueEncode <envoy_api_enum_config.filter.http.header_to_metadata.v2.Config.ValueEncode>` to support protobuf Value and Base64 encoding.\n* http: added a default one hour idle timeout to upstream and downstream connections. HTTP connections with no streams and no activity will be closed after one hour unless the default idle_timeout is overridden. To disable upstream idle timeouts, set the :ref:`idle_timeout <envoy_api_field_core.HttpProtocolOptions.idle_timeout>` to zero in Cluster :ref:`http_protocol_options<envoy_api_field_Cluster.common_http_protocol_options>`. To disable downstream idle timeouts, either set :ref:`idle_timeout <envoy_api_field_core.HttpProtocolOptions.idle_timeout>` to zero in the HttpConnectionManager :ref:`common_http_protocol_options <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.common_http_protocol_options>` or set the deprecated :ref:`connection manager <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.idle_timeout>` field to zero.\n* http: added the ability to format HTTP/1.1 header keys using :ref:`header_key_format <envoy_api_field_core.Http1ProtocolOptions.header_key_format>`.\n* http: added the ability to reject HTTP/1.1 requests with invalid HTTP header values, using the runtime feature `envoy.reloadable_features.strict_header_validation`.\n* http: changed Envoy to forward existing x-forwarded-proto from upstream trusted proxies. Guarded by `envoy.reloadable_features.trusted_forwarded_proto` which defaults true.\n* http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.server_header_transformation>` field.\n* http: added the ability to :ref:`merge adjacent slashes<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.merge_slashes>` in the path.\n* http: :ref:`AUTO <envoy_api_enum_value_config.filter.network.http_connection_manager.v2.HttpConnectionManager.CodecType.AUTO>` codec protocol inference now requires the H2 magic bytes to be the first bytes transmitted by a downstream client.\n* http: remove h2c upgrade headers for HTTP/1 as h2c upgrades are currently not supported.\n* http: absolute URL support is now on by default. The prior behavior can be reinstated by setting :ref:`allow_absolute_url <envoy_api_field_core.Http1ProtocolOptions.allow_absolute_url>` to false.\n* http: support :ref:`host rewrite <envoy_api_msg_config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig>` in the dynamic forward proxy.\n* http: support :ref:`disabling the filter per route <envoy_api_msg_config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfigPerRoute>` in the grpc http1 reverse bridge filter.\n* http: added the ability to :ref:`configure max connection duration <envoy_api_field_core.HttpProtocolOptions.max_connection_duration>` for downstream connections.\n* listeners: added :ref:`continue_on_listener_filters_timeout <envoy_api_field_Listener.continue_on_listener_filters_timeout>` to configure whether a listener will still create a connection when listener filters time out.\n* listeners: added :ref:`HTTP inspector listener filter <config_listener_filters_http_inspector>`.\n* listeners: added :ref:`connection balancer <envoy_api_field_Listener.connection_balance_config>`\n  configuration for TCP listeners.\n* listeners: listeners now close the listening socket as part of the draining stage as soon as workers stop accepting their connections.\n* lua: extended `httpCall()` and `respond()` APIs to accept headers with entry values that can be a string or table of strings.\n* lua: extended `dynamicMetadata:set()` to allow setting complex values.\n* metrics_service: added support for flushing histogram buckets.\n* outlier_detector: added :ref:`support for the grpc-status response header <arch_overview_outlier_detection_grpc>` by mapping it to HTTP status. Guarded by envoy.reloadable_features.outlier_detection_support_for_grpc_status which defaults to true.\n* performance: new buffer implementation enabled by default (to disable add \"--use-libevent-buffers 1\" to the command-line arguments when starting Envoy).\n* performance: stats symbol table implementation (disabled by default; to test it, add \"--use-fake-symbol-table 0\" to the command-line arguments when starting Envoy).\n* rbac: added support for DNS SAN as :ref:`principal_name <envoy_api_field_config.rbac.v2.Principal.Authenticated.principal_name>`.\n* redis: added :ref:`enable_command_stats <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.enable_command_stats>` to enable :ref:`per command statistics <arch_overview_redis_cluster_command_stats>` for upstream clusters.\n* redis: added :ref:`read_policy <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.read_policy>` to allow reading from redis replicas for Redis Cluster deployments.\n* redis: fixed a bug where the redis health checker ignored the upstream auth password.\n* redis: enable_hashtaging is always enabled when the upstream uses open source Redis cluster protocol.\n* regex: introduced new :ref:`RegexMatcher <envoy_api_msg_type.matcher.RegexMatcher>` type that\n  provides a safe regex implementation for untrusted user input. This type is now used in all\n  configuration that processes user provided input. See :ref:`deprecated configuration details\n  <deprecated>` for more information.\n* rbac: added conditions to the policy, see :ref:`condition <envoy_api_field_config.rbac.v2.Policy.condition>`.\n* router: added :ref:`rq_retry_skipped_request_not_complete <config_http_filters_router_stats>` counter stat to router stats.\n* router: :ref:`scoped routing <arch_overview_http_routing_route_scope>` is supported.\n* router: added new :ref:`retriable-headers <config_http_filters_router_x-envoy-retry-on>` retry policy. Retries can now be configured to trigger by arbitrary response header matching.\n* router: added ability for most specific header mutations to take precedence, see :ref:`route configuration's most specific\n  header mutations wins flag <envoy_api_field_RouteConfiguration.most_specific_header_mutations_wins>`.\n* router: added :ref:`respect_expected_rq_timeout <envoy_api_field_config.filter.http.router.v2.Router.respect_expected_rq_timeout>` that instructs ingress Envoy to respect :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when deriving timeout for upstream cluster.\n* router: added new :ref:`retriable request headers <envoy_api_field_route.Route.per_request_buffer_limit_bytes>` to route configuration, to allow limiting buffering for retries and shadowing.\n* router: added new :ref:`retriable request headers <envoy_api_field_route.RetryPolicy.retriable_request_headers>` to retry policies. Retries can now be configured to only trigger on request header match.\n* router: added the ability to match a route based on whether a TLS certificate has been\n  :ref:`presented <envoy_api_field_route.RouteMatch.TlsContextMatchOptions.presented>` by the\n  downstream connection.\n* router check tool: added coverage reporting & enforcement.\n* router check tool: added comprehensive coverage reporting.\n* router check tool: added deprecated field check.\n* router check tool: added flag for only printing results of failed tests.\n* router check tool: added support for outputting missing tests in the detailed coverage report.\n* router check tool: added coverage reporting for direct response routes.\n* runtime: allows for the ability to parse boolean values.\n* runtime: allows for the ability to parse integers as double values and vice-versa.\n* sds: added :ref:`session_ticket_keys_sds_secret_config <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys_sds_secret_config>` for loading TLS Session Ticket Encryption Keys using SDS API.\n* server: added a post initialization lifecycle event, in addition to the existing startup and shutdown events.\n* server: added :ref:`per-handler listener stats <config_listener_stats_per_handler>` and\n  :ref:`per-worker watchdog stats <operations_performance_watchdog>` to help diagnosing event\n  loop imbalance and general performance issues.\n* stats: added unit support to histogram.\n* tcp_proxy: the default :ref:`idle_timeout\n  <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.idle_timeout>` is now 1 hour.\n* thrift_proxy: fixed crashing bug on invalid transport/protocol framing.\n* thrift_proxy: added support for stripping service name from method when using the multiplexed protocol.\n* tls: added verification of IP address SAN fields in certificates against configured SANs in the certificate validation context.\n* tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP.\n  certificate validation context.\n* tracing: added tags for gRPC response status and message.\n* tracing: added :ref:`max_path_tag_length <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing>` to support customizing the length of the request path included in the extracted `http.url <https://github.com/opentracing/specification/blob/master/semantic_conventions.md#standard-span-tags-and-log-fields>`_ tag.\n* upstream: added :ref:`an option <envoy_api_field_Cluster.CommonLbConfig.close_connections_on_host_set_change>` that allows draining HTTP, TCP connection pools on cluster membership change.\n* upstream: added :ref:`transport_socket_matches <envoy_api_field_Cluster.transport_socket_matches>`, support using different transport socket config when connecting to different upstream endpoints within a cluster.\n* upstream: added network filter chains to upstream connections, see :ref:`filters<envoy_api_field_Cluster.filters>`.\n* upstream: added new :ref:`failure-percentage based outlier detection<arch_overview_outlier_detection_failure_percentage>` mode.\n* upstream: uses p2c to select hosts for least-requests load balancers if all host weights are the same, even in cases where weights are not equal to 1.\n* upstream: added :ref:`fail_traffic_on_panic <envoy_api_field_Cluster.CommonLbConfig.ZoneAwareLbConfig.fail_traffic_on_panic>` to allow failing all requests to a cluster during panic state.\n* zookeeper: parses responses and emits latency stats.\n\nDeprecated\n----------\n\n* The ORIGINAL_DST_LB :ref:`load balancing policy <envoy_api_field_Cluster.lb_policy>` is\n  deprecated, use CLUSTER_PROVIDED policy instead when configuring an :ref:`original destination\n  cluster <envoy_api_field_Cluster.type>`.\n* The `regex` field in :ref:`StringMatcher <envoy_api_msg_type.matcher.StringMatcher>` has been\n  deprecated in favor of the `safe_regex` field.\n* The `regex` field in :ref:`RouteMatch <envoy_api_msg_route.RouteMatch>` has been\n  deprecated in favor of the `safe_regex` field.\n* The `allow_origin` and `allow_origin_regex` fields in :ref:`CorsPolicy\n  <envoy_api_msg_route.CorsPolicy>` have been deprecated in favor of the\n  `allow_origin_string_match` field.\n* The `pattern` and `method` fields in :ref:`VirtualCluster <envoy_api_msg_route.VirtualCluster>`\n  have been deprecated in favor of the `headers` field.\n* The `regex_match` field in :ref:`HeaderMatcher <envoy_api_msg_route.HeaderMatcher>` has been\n  deprecated in favor of the `safe_regex_match` field.\n* The `value` and `regex` fields in :ref:`QueryParameterMatcher\n  <envoy_api_msg_route.QueryParameterMatcher>` has been deprecated in favor of the `string_match`\n  and `present_match` fields.\n* The :option:`--allow-unknown-fields` command-line option,\n  use :option:`--allow-unknown-static-fields` instead.\n* The use of HTTP_JSON_V1 :ref:`Zipkin collector endpoint version\n  <envoy_api_field_config.trace.v2.ZipkinConfig.collector_endpoint_version>` or not explicitly\n  specifying it is deprecated, use HTTP_JSON or HTTP_PROTO instead.\n* The `operation_name` field in :ref:`HTTP connection manager\n  <envoy_api_msg_config.filter.network.http_connection_manager.v2.HttpConnectionManager>`\n  has been deprecated in favor of the `traffic_direction` field in\n  :ref:`Listener <envoy_api_msg_Listener>`. The latter takes priority if\n  specified.\n* The `tls_context` field in :ref:`Filter chain <envoy_api_field_listener.FilterChain.tls_context>` message\n  and :ref:`Cluster <envoy_api_field_Cluster.tls_context>` message have been deprecated in favor of\n  `transport_socket` with name `envoy.transport_sockets.tls`. The latter takes priority if specified.\n* The `use_http2` field in\n  :ref:`HTTP health checker <envoy_api_msg_core.HealthCheck.HttpHealthCheck>` has been deprecated in\n  favor of the `codec_client_type` field.\n* The use of :ref:`gRPC bridge filter <config_http_filters_grpc_bridge>` for\n  gRPC stats has been deprecated in favor of the dedicated :ref:`gRPC stats\n  filter <config_http_filters_grpc_stats>`\n* Ext_authz filter stats `ok`, `error`, `denied`, `failure_mode_allowed` in\n  *cluster.<route target cluster>.ext_authz.* namespace is deprecated.\n  Use *http.<stat_prefix>.ext_authz.* namespace to access same counters instead.\n* Use of google.protobuf.Struct for extension opaque configs is deprecated. Use google.protobuf.Any instead or pack\n  udpa.type.v1.TypedStruct in google.protobuf.Any.\n"
  },
  {
    "path": "docs/root/version_history/v1.12.1.rst",
    "content": "1.12.1 (November 8, 2019)\n=========================\n\nChanges\n-------\n\n* listener: fixed CVE-2019-18836 by clearing accept filters before connection creation.\n"
  },
  {
    "path": "docs/root/version_history/v1.12.2.rst",
    "content": "1.12.2 (December 10, 2019)\n==========================\n\nChanges\n-------\n\n* http: fixed CVE-2019-18801 by allocating sufficient memory for request headers.\n* http: fixed CVE-2019-18802 by implementing stricter validation of HTTP/1 headers.\n* http: trim LWS at the end of header keys, for correct HTTP/1.1 header parsing.\n* http: added strict authority checking. This can be reversed temporarily by setting the runtime feature `envoy.reloadable_features.strict_authority_validation` to false.\n* route config: fixed CVE-2019-18838 by checking for presence of host/path headers.\n"
  },
  {
    "path": "docs/root/version_history/v1.12.3.rst",
    "content": "1.12.3 (March 3, 2020)\n======================\n\nChanges\n-------\n\n* buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation.\n* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`.\n* listeners: fixed issue where :ref:`TLS inspector listener filter <config_listener_filters_tls_inspector>` could have been bypassed by a client using only TLS 1.3.\n* rbac: added :ref:`url_path <envoy_api_field_config.rbac.v2.Permission.url_path>` for matching URL path without the query and fragment string.\n* sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases.\n"
  },
  {
    "path": "docs/root/version_history/v1.12.4.rst",
    "content": "1.12.4 (June 8, 2020)\n=====================\n\nChanges\n-------\n\n* http: added :ref:`headers_with_underscores_action setting <envoy_api_field_core.HttpProtocolOptions.headers_with_underscores_action>` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior.\n* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters.\n"
  },
  {
    "path": "docs/root/version_history/v1.12.5.rst",
    "content": "1.12.5 (June 30, 2020)\n======================\n\nChanges\n-------\n* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer.\n* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client.\n* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits.\n* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits <config_listeners_runtime>` on active/accepted connections.\n* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits <config_overload_manager>` on active/accepted connections.\n"
  },
  {
    "path": "docs/root/version_history/v1.12.6.rst",
    "content": "1.12.6 (July 7, 2020)\n=====================\n* tls: fixed a bug where wilcard matching for \"\\*.foo.com\" also matched domains of the form \"a.b.foo.com\". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false."
  },
  {
    "path": "docs/root/version_history/v1.12.7.rst",
    "content": "1.12.7 (September 29, 2020)\n===========================\nChanges\n-------\n* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline headers. This patch\n  changes the default behavior to always logically match on all headers. Multiple individual\n  headers will be logically concatenated with ',' similar to what is done with inline headers. This\n  makes the behavior effectively consistent. This behavior can be temporary reverted by setting\n  the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to \"false\".\n\n  Targeted fixes have been additionally performed on the following extensions which make them\n  consider all duplicate headers by default as a comma concatenated list:\n\n    1. Any extension using CEL matching on headers.\n    2. The header to metadata filter.\n    3. The JWT filter.\n    4. The Lua filter.\n\n  Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting\n  the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to false."
  },
  {
    "path": "docs/root/version_history/v1.13.0.rst",
    "content": "1.13.0 (January 20, 2020)\n=========================\n\nChanges\n-------\n\n* access log: added FILTER_STATE :ref:`access log formatters <config_access_log_format>` and gRPC access logger.\n* admin: added the ability to filter :ref:`/config_dump <operations_admin_interface_config_dump>`.\n* access log: added a :ref:`typed JSON logging mode <config_access_log_format_dictionaries>` to output access logs in JSON format with non-string values\n* access log: fixed UPSTREAM_LOCAL_ADDRESS :ref:`access log formatters <config_access_log_format>` to work for http requests\n* access log: added HOSTNAME.\n* api: remove all support for v1\n* api: added ability to specify `mode` for :ref:`Pipe <envoy_api_field_core.Pipe.mode>`.\n* api: support for the v3 xDS API added. See :ref:`api_supported_versions`.\n* aws_request_signing: added new alpha :ref:`HTTP AWS request signing filter <config_http_filters_aws_request_signing>`.\n* buffer: remove old implementation\n* build: official released binary is now built against libc++.\n* cluster: added :ref:`aggregate cluster <arch_overview_aggregate_cluster>` that allows load balancing between clusters.\n* config: all category names of internal envoy extensions are prefixed with the 'envoy.' prefix to follow the reverse DNS naming notation.\n* decompressor: remove decompressor hard assert failure and replace with an error flag.\n* ext_authz: added :ref:`configurable ability<envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.include_peer_certificate>` to send the :ref:`certificate<envoy_api_field_service.auth.v2.AttributeContext.Peer.certificate>` to the `ext_authz` service.\n* fault: fixed an issue where the http fault filter would repeatedly check the percentage of abort/delay when the `x-envoy-downstream-service-cluster` header was included in the request to ensure that the actual percentage of abort/delay matches the configuration of the filter.\n* health check: gRPC health checker sets the gRPC deadline to the configured timeout duration.\n* health check: added :ref:`TlsOptions <envoy_api_msg_core.HealthCheck.TlsOptions>` to allow TLS configuration overrides.\n* health check: added :ref:`service_name_matcher <envoy_api_field_core.HealthCheck.HttpHealthCheck.service_name_matcher>` to better compare the service name patterns for health check identity.\n* http: added strict validation that CONNECT is refused as it is not yet implemented. This can be reversed temporarily by setting the runtime feature `envoy.reloadable_features.strict_method_validation` to false.\n* http: added support for http1 trailers. To enable use :ref:`enable_trailers <envoy_api_field_core.Http1ProtocolOptions.enable_trailers>`.\n* http: added the ability to sanitize headers nominated by the Connection header. This new behavior is guarded by envoy.reloadable_features.connection_header_sanitization which defaults to true.\n* http: blocks unsupported transfer-encodings. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.reject_unsupported_transfer_encodings` to false.\n* http: support :ref:`auto_host_rewrite_header<envoy_api_field_config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig.auto_host_rewrite_header>` in the dynamic forward proxy.\n* jwt_authn: added :ref:`allow_missing<envoy_api_field_config.filter.http.jwt_authn.v2alpha.JwtRequirement.allow_missing>` option that accepts request without token but rejects bad request with bad tokens.\n* jwt_authn: added :ref:`bypass_cors_preflight<envoy_api_field_config.filter.http.jwt_authn.v2alpha.JwtAuthentication.bypass_cors_preflight>` to allow bypassing the CORS preflight request.\n* lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`\n* listeners: added :ref:`reuse_port<envoy_api_field_Listener.reuse_port>` option.\n* logger: added :ref:`--log-format-escaped <operations_cli>` command line option to escape newline characters in application logs.\n* ratelimit: added :ref:`local rate limit <config_network_filters_local_rate_limit>` network filter.\n* rbac: added support for matching all subject alt names instead of first in :ref:`principal_name <envoy_api_field_config.rbac.v2.Principal.Authenticated.principal_name>`.\n* redis: performance improvement for larger split commands by avoiding string copies.\n* redis: correctly follow MOVE/ASK redirection for mirrored clusters.\n* redis: add :ref:`host_degraded_refresh_threshold <envoy_api_field_config.cluster.redis.RedisClusterConfig.host_degraded_refresh_threshold>` and :ref:`failure_refresh_threshold <envoy_api_field_config.cluster.redis.RedisClusterConfig.failure_refresh_threshold>` to refresh topology when nodes are degraded or when requests fails.\n* router: added histograms to show timeout budget usage to the :ref:`cluster stats <config_cluster_manager_cluster_stats>`.\n* router check tool: added support for testing and marking coverage for routes of runtime fraction 0.\n* router: added :ref:`request_mirror_policies<envoy_api_field_route.RouteAction.request_mirror_policies>` to support sending multiple mirrored requests in one route.\n* router: added support for REQ(header-name) :ref:`header formatter <config_http_conn_man_headers_custom_request_headers>`.\n* router: added support for percentage-based :ref:`retry budgets <envoy_api_field_cluster.CircuitBreakers.Thresholds.retry_budget>`\n* router: allow using a :ref:`query parameter <envoy_api_field_route.RouteAction.HashPolicy.query_parameter>` for HTTP consistent hashing.\n* router: exposed DOWNSTREAM_REMOTE_ADDRESS as custom HTTP request/response headers.\n* router: added support for :ref:`max_internal_redirects <envoy_api_field_route.RouteAction.max_internal_redirects>` for configurable maximum internal redirect hops.\n* router: skip the Location header when the response code is not a 201 or a 3xx.\n* router: added :ref:`auto_sni <envoy_api_field_core.UpstreamHttpProtocolOptions.auto_sni>` to support setting SNI to transport socket for new upstream connections based on the downstream HTTP host/authority header.\n* router: added support for HOSTNAME :ref:`header formatter\n  <config_http_conn_man_headers_custom_request_headers>`.\n* server: added the :option:`--disable-extensions` CLI option, to disable extensions at startup.\n* server: fixed a bug in config validation for configs with runtime layers.\n* server: added :ref:`workers_started <config_listener_manager_stats>` that indicates whether listeners have been fully initialized on workers.\n* tcp_proxy: added :ref:`ClusterWeight.metadata_match<envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight.metadata_match>`.\n* tcp_proxy: added :ref:`hash_policy<envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.hash_policy>`.\n* thrift_proxy: added support for cluster header based routing.\n* thrift_proxy: added stats to the router filter.\n* tls: remove TLS 1.0 and 1.1 from client defaults\n* tls: added support for :ref:`generic string matcher <envoy_api_field_auth.CertificateValidationContext.match_subject_alt_names>` for subject alternative names.\n* tracing: added the ability to set custom tags on both the :ref:`HTTP connection manager<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing>` and the :ref:`HTTP route <envoy_api_field_route.Route.tracing>`.\n* tracing: added upstream_address tag.\n* tracing: added initial support for AWS X-Ray (local sampling rules only) :ref:`X-Ray Tracing <envoy_api_msg_config.trace.v2alpha.XRayConfig>`.\n* tracing: added tags for gRPC request path, authority, content-type and timeout.\n* udp: added initial support for :ref:`UDP proxy <config_udp_listener_filters_udp_proxy>`\n\nDeprecated\n----------\n\n* The `request_headers_for_tags` field in :ref:`HTTP connection manager\n  <envoy_api_msg_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing>`\n  has been deprecated in favor of the :ref:`custom_tags\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>` field.\n* The `verify_subject_alt_name` field in :ref:`Certificate Validation Context\n  <envoy_api_field_auth.CertificateValidationContext.verify_subject_alt_name>`\n  has been deprecated in favor of the :ref:`match_subject_alt_names\n  <envoy_api_field_auth.CertificateValidationContext.match_subject_alt_names>` field.\n* The `request_mirror_policy` field in :ref:`RouteMatch <envoy_api_msg_route.RouteAction>` has been deprecated in\n  favor of the `request_mirror_policies` field.\n* The `service_name` field in\n  :ref:`HTTP health checker <envoy_api_msg_core.HealthCheck.HttpHealthCheck>` has been deprecated in\n  favor of the `service_name_matcher` field.\n* The v2 xDS API is deprecated. It will be supported by Envoy until EOY 2020. See\n  :ref:`api_supported_versions`.\n"
  },
  {
    "path": "docs/root/version_history/v1.13.1.rst",
    "content": "1.13.1 (March 3, 2020)\n======================\n\nChanges\n-------\n\n* buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation.\n* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`.\n* listeners: fixed issue where :ref:`TLS inspector listener filter <config_listener_filters_tls_inspector>` could have been bypassed by a client using only TLS 1.3.\n* rbac: added :ref:`url_path <envoy_api_field_config.rbac.v2.Permission.url_path>` for matching URL path without the query and fragment string.\n* sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases.\n"
  },
  {
    "path": "docs/root/version_history/v1.13.2.rst",
    "content": "1.13.2 (June 8, 2020)\n=====================\n\nChanges\n-------\n\n* http: added :ref:`headers_with_underscores_action setting <envoy_api_field_core.HttpProtocolOptions.headers_with_underscores_action>` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior.\n* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters.\n"
  },
  {
    "path": "docs/root/version_history/v1.13.3.rst",
    "content": "1.13.3 (June 30, 2020)\n======================\n\nChanges\n-------\n\n* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer.\n* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client.\n* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits.\n* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits <config_listeners_runtime>` on active/accepted connections.\n* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits <config_overload_manager>` on active/accepted connections.\n"
  },
  {
    "path": "docs/root/version_history/v1.13.4.rst",
    "content": "1.13.4 (July 7, 2020)\n=====================\n* tls: fixed a bug where wilcard matching for \"\\*.foo.com\" also matched domains of the form \"a.b.foo.com\". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false."
  },
  {
    "path": "docs/root/version_history/v1.13.5.rst",
    "content": "1.13.5 (September 29, 2020)\n===========================\nChanges\n-------\n* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline headers. This patch\n  changes the default behavior to always logically match on all headers. Multiple individual\n  headers will be logically concatenated with ',' similar to what is done with inline headers. This\n  makes the behavior effectively consistent. This behavior can be temporary reverted by setting\n  the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to \"false\".\n\n  Targeted fixes have been additionally performed on the following extensions which make them\n  consider all duplicate headers by default as a comma concatenated list:\n\n    1. Any extension using CEL matching on headers.\n    2. The header to metadata filter.\n    3. The JWT filter.\n    4. The Lua filter.\n\n  Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting\n  the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to false.\n* http: fixed CVE-2020-25017. The setCopy() header map API previously only set the first header in the case of duplicate\n  non-inline headers. setCopy() now behaves similarly to the other set*() APIs and replaces all found\n  headers with a single value. This may have had security implications in the extauth filter which\n  uses this API. This behavior can be disabled by setting the runtime value\n  \"envoy.reloadable_features.http_set_copy_replace_all_headers\" to false."
  },
  {
    "path": "docs/root/version_history/v1.13.6.rst",
    "content": "1.13.6 (September 29, 2020)\n===========================\nChanges\n-------\n* test: fix flaky test."
  },
  {
    "path": "docs/root/version_history/v1.14.0.rst",
    "content": "1.14.0 (April 8, 2020)\n======================\n\nChanges\n-------\n\n* access log: access logger extensions use the \"envoy.access_loggers\" name space. A mapping\n  of extension names is available in the :ref:`deprecated <deprecated>` documentation.\n* access log: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`access log formatters <config_access_log_format>`.\n* access log: fixed `%DOWSTREAM_DIRECT_REMOTE_ADDRESS%` when used with PROXY protocol listener filter.\n* access log: introduced :ref:`connection-level access loggers<envoy_api_field_Listener.access_log>`.\n* adaptive concurrency: fixed bug that allowed concurrency limits to drop below the configured\n  minimum.\n* adaptive concurrency: minRTT is now triggered when the minimum concurrency is maintained for 5\n  consecutive sampling intervals.\n* admin: added support for displaying ip address subject alternate names in :ref:`certs<operations_admin_interface_certs>` end point.\n* admin: added :http:post:`/reopen_logs` endpoint to control log rotation.\n* api: froze v2 xDS API. New feature development in the API should occur in v3 xDS. While the v2 xDS API has\n  been deprecated since 1.13.0, it will continue to be supported by Envoy until EOY 2020. See\n  :ref:`api_supported_versions`.\n* aws_lambda: added :ref:`AWS Lambda filter <config_http_filters_aws_lambda>` that converts HTTP requests to Lambda\n  invokes. This effectively makes Envoy act as an egress gateway to AWS Lambda.\n* aws_request_signing: a few fixes so that it works with S3.\n* config: added stat :ref:`update_time <config_cluster_manager_cds>`.\n* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration <config_overview_extension_configuration>`.\n* datasource: added retry policy for remote async data source.\n* dns: added support for :ref:`dns_failure_refresh_rate <envoy_api_field_config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig.dns_failure_refresh_rate>` for the :ref:`dns cache <envoy_api_msg_config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig>` to set the DNS refresh rate during failures.\n* dns: the STRICT_DNS cluster now only resolves to 0 hosts if DNS resolution successfully returns 0 hosts.\n* eds: added :ref:`hostname <envoy_v3_api_field_config.endpoint.v3.Endpoint.hostname>` field for endpoints and :ref:`hostname <envoy_v3_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field for endpoint's health check config. This enables auto host rewrite and customizing the host header during health checks for eds endpoints.\n* ext_authz: disabled the use of lowercase string matcher for headers matching in HTTP-based `ext_authz`.\n  Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher` to false.\n* fault: added support for controlling abort faults with :ref:`HTTP header fault configuration <config_http_filters_fault_injection_http_header>` to the HTTP fault filter.\n* grpc-json: added support for building HTTP request into\n  `google.api.HttpBody <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto>`_.\n* grpc-stats: added option to limit which messages stats are created for.\n* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`.\n* http: added :ref:`headers_with_underscores_action setting <envoy_api_field_core.HttpProtocolOptions.headers_with_underscores_action>` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior.\n* http: added :ref:`max_stream_duration <envoy_api_field_core.HttpProtocolOptions.max_stream_duration>` to specify the duration of existing streams. See :ref:`connection and stream timeouts <faq_configuration_timeouts>`.\n* http: connection header sanitizing has been modified to always sanitize if there is no upgrade, including when an h2c upgrade attempt has been removed.\n* http: fixed a bug that could send extra METADATA frames and underflow memory when encoding METADATA frames on a connection that was dispatching data.\n* http: fixing a bug in HTTP/1.0 responses where Connection: keep-alive was not appended for connections which were kept alive.\n* http: http filter extensions use the \"envoy.filters.http\" name space. A mapping\n  of extension names is available in the :ref:`deprecated <deprecated>` documentation.\n* http: the runtime feature `http.connection_manager.log_flood_exception` is removed and replaced with a connection access log response code.\n* http: upgrade parser library, which removes support for \"identity\" transfer-encoding value.\n* listener filters: listener filter extensions use the \"envoy.filters.listener\" name space. A\n  mapping of extension names is available in the :ref:`deprecated <deprecated>` documentation.\n* listeners: added :ref:`listener filter matcher api <envoy_api_field_listener.ListenerFilter.filter_disabled>` to disable individual listener filter on matching downstream connections.\n* loadbalancing: added support for using hostname for consistent hash loadbalancing via :ref:`consistent_hash_lb_config <envoy_api_field_Cluster.CommonLbConfig.consistent_hashing_lb_config>`.\n* loadbalancing: added support for :ref:`retry host predicates <envoy_api_field_route.RetryPolicy.retry_host_predicate>` in conjunction with consistent hashing load balancers (ring hash and maglev).\n* lua: added a parameter to `httpCall` that makes it possible to have the call be asynchronous.\n* lua: added moonjit support.\n* mongo: the stat emitted for queries without a max time set in the :ref:`MongoDB filter<config_network_filters_mongo_proxy>` was modified to emit correctly for Mongo v3.2+.\n* network filters: added a :ref:`direct response filter <config_network_filters_direct_response>`.\n* network filters: network filter extensions use the \"envoy.filters.network\" name space. A mapping\n  of extension names is available in the :ref:`deprecated <deprecated>` documentation.\n* rbac: added :ref:`remote_ip <envoy_api_field_config.rbac.v2.Principal.remote_ip>` and :ref:`direct_remote_ip <envoy_api_field_config.rbac.v2.Principal.direct_remote_ip>` for matching downstream remote IP address.\n* rbac: deprecated :ref:`source_ip <envoy_api_field_config.rbac.v2.Principal.source_ip>` with :ref:`direct_remote_ip <envoy_api_field_config.rbac.v2.Principal.direct_remote_ip>` and :ref:`remote_ip <envoy_api_field_config.rbac.v2.Principal.remote_ip>`.\n* request_id_extension: added an ability to extend request ID handling at :ref:`HTTP connection manager<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.request_id_extension>`.\n* retry: added a retry predicate that :ref:`rejects hosts based on metadata. <envoy_api_field_route.RetryPolicy.retry_host_predicate>`.\n* router: added ability to set attempt count in downstream response, see :ref:`virtual host's include response\n  attempt count config <envoy_api_field_route.VirtualHost.include_attempt_count_in_response>`.\n* router: added additional stats for :ref:`virtual clusters <config_http_filters_router_vcluster_stats>`.\n* router: added :ref:`auto_san_validation <envoy_api_field_core.UpstreamHttpProtocolOptions.auto_san_validation>` to support overrriding SAN validation to transport socket for new upstream connections based on the downstream HTTP host/authority header.\n* router: added the ability to match a route based on whether a downstream TLS connection certificate has been\n  :ref:`validated <envoy_api_field_route.RouteMatch.TlsContextMatchOptions.validated>`.\n* router: added support for :ref:`regex_rewrite\n  <envoy_api_field_route.RouteAction.regex_rewrite>` for path rewriting using regular expressions and capture groups.\n* router: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`header formatter <config_http_conn_man_headers_custom_request_headers>`.\n* router: don't ignore :ref:`per_try_timeout <envoy_api_field_route.RetryPolicy.per_try_timeout>` when the :ref:`global route timeout <envoy_api_field_route.RouteAction.timeout>` is disabled.\n* router: strip whitespace for :ref:`retry_on <envoy_api_field_route.RetryPolicy.retry_on>`, :ref:`grpc-retry-on header <config_http_filters_router_x-envoy-retry-grpc-on>` and :ref:`retry-on header <config_http_filters_router_x-envoy-retry-on>`.\n* runtime: enabling the runtime feature `envoy.deprecated_features.allow_deprecated_extension_names`\n  disables the use of deprecated extension names.\n* runtime: integer values may now be parsed as booleans.\n* sds: added :ref:`GenericSecret <envoy_api_msg_auth.GenericSecret>` to support secret of generic type.\n* sds: added :ref:`certificate rotation <xds_certificate_rotation>` support for certificates in static resources.\n* server: the SIGUSR1 access log reopen warning now is logged at info level.\n* stat sinks: stat sink extensions use the \"envoy.stat_sinks\" name space. A mapping of extension\n  names is available in the :ref:`deprecated <deprecated>` documentation.\n* thrift_proxy: added router filter stats to docs.\n* tls: added configuration to disable stateless TLS session resumption :ref:`disable_stateless_session_resumption <envoy_api_field_auth.DownstreamTlsContext.disable_stateless_session_resumption>`.\n* tracing: added gRPC service configuration to the OpenCensus Stackdriver and OpenCensus Agent tracers.\n* tracing: tracer extensions use the \"envoy.tracers\" name space. A mapping of extension names is\n  available in the :ref:`deprecated <deprecated>` documentation.\n* upstream: added ``upstream_rq_retry_limit_exceeded`` to :ref:`cluster <config_cluster_manager_cluster_stats>`, and :ref:`virtual cluster <config_http_filters_router_vcluster_stats>` stats.\n* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode<arch_overview_load_balancing_panic_threshold>`.\n* upstream: combined HTTP/1 and HTTP/2 connection pool code. This means that circuit breaker\n  limits for both requests and connections apply to both pool types. Also, HTTP/2 now has\n  the option to limit concurrent requests on a connection, and allow multiple draining\n  connections. The old behavior is deprecated, but can be used during the deprecation\n  period by disabling runtime feature `envoy.reloadable_features.new_http1_connection_pool_behavior` or\n  `envoy.reloadable_features.new_http2_connection_pool_behavior` and then re-configure your clusters or\n  restart Envoy. The behavior will not switch until the connection pools are recreated. The new\n  circuit breaker behavior is described :ref:`here <arch_overview_circuit_break>`.\n* zlib: by default zlib is initialized to use its default strategy (Z_DEFAULT_STRATEGY)\n  instead of the fixed one (Z_FIXED). The difference is that the use of dynamic\n  Huffman codes is enabled now resulting in better compression ratio for normal data.\n\nDeprecated\n----------\n\n* The previous behavior for upstream connection pool circuit breaking described\n  `here <https://www.envoyproxy.io/docs/envoy/v1.13.0/intro/arch_overview/upstream/circuit_breaking>`_ has\n  been deprecated in favor of the new behavior described :ref:`here <arch_overview_circuit_break>`.\n* Access Logger, Listener Filter, HTTP Filter, Network Filter, Stats Sink, and Tracer names have\n  been deprecated in favor of the extension name from the envoy build system. Disable the runtime\n  feature \"envoy.deprecated_features.allow_deprecated_extension_names\" to disallow the deprecated\n  names. Use of these extension names generates a log message and increments the\n  \"deprecated_feature_use\" metric in stats.\n\n  .. csv-table::\n    :header: Canonical Names, Deprecated Names\n    :widths: 1, 1\n\n    envoy.access_loggers.file, envoy.file_access_log\n    envoy.access_loggers.http_grpc, envoy.http_grpc_access_log\n    envoy.access_loggers.tcp_grpc, envoy.tcp_grpc_access_log\n    envoy.filters.http.buffer, envoy.buffer\n    envoy.filters.http.cors, envoy.cors\n    envoy.filters.http.csrf, envoy.csrf\n    envoy.filters.http.dynamo, envoy.http_dynamo_filter\n    envoy.filters.http.ext_authz, envoy.ext_authz\n    envoy.filters.http.fault, envoy.fault\n    envoy.filters.http.grpc_http1_bridge, envoy.grpc_http1_bridge\n    envoy.filters.http.grpc_json_transcoder, envoy.grpc_json_transcoder\n    envoy.filters.http.grpc_web, envoy.grpc_web\n    envoy.filters.http.gzip, envoy.gzip\n    envoy.filters.http.health_check, envoy.health_check\n    envoy.filters.http.ip_tagging, envoy.ip_tagging\n    envoy.filters.http.lua, envoy.lua\n    envoy.filters.http.ratelimit, envoy.rate_limit\n    envoy.filters.http.router, envoy.router\n    envoy.filters.http.squash, envoy.squash\n    envoy.filters.listener.http_inspector, envoy.listener.http_inspector\n    envoy.filters.listener.original_dst, envoy.listener.original_dst\n    envoy.filters.listener.original_src, envoy.listener.original_src\n    envoy.filters.listener.proxy_protocol, envoy.listener.proxy_protocol\n    envoy.filters.listener.tls_inspector, envoy.listener.tls_inspector\n    envoy.filters.network.client_ssl_auth, envoy.client_ssl_auth\n    envoy.filters.network.echo, envoy.echo\n    envoy.filters.network.ext_authz, envoy.ext_authz\n    envoy.filters.network.http_connection_manager, envoy.http_connection_manager\n    envoy.filters.network.mongo_proxy, envoy.mongo_proxy\n    envoy.filters.network.ratelimit, envoy.ratelimit\n    envoy.filters.network.redis_proxy, envoy.redis_proxy\n    envoy.filters.network.tcp_proxy, envoy.tcp_proxy\n    envoy.stat_sinks.dog_statsd, envoy.dog_statsd\n    envoy.stat_sinks.metrics_service, envoy.metrics_service\n    envoy.stat_sinks.statsd, envoy.statsd\n    envoy.tracers.dynamic_ot, envoy.dynamic.ot\n    envoy.tracers.lightstep, envoy.lightstep\n    envoy.tracers.zipkin, envoy.zipkin\n\n  .. note::\n    Some renamed filters produce metadata using their filter name as the metadata namespace:\n\n    * Mongo Proxy Filter\n    * Zookeeper Filter\n\n    The metadata generated by these filters may be consumed by the following extensions, whose\n    configurations may need to be adjusted to use the new names.\n\n    * Access Loggers\n    * HTTP and Network Ext Authz filters\n    * HTTP and Network RBAC filters\n    * Tracers\n\n* The previous behavior of auto ignoring case in headers matching:\n  :ref:`allowed_headers <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationRequest.allowed_headers>`,\n  :ref:`allowed_upstream_headers <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_upstream_headers>`,\n  and :ref:`allowed_client_headers <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_client_headers>`\n  of HTTP-based `ext_authz` has been deprecated in favor of explicitly setting the\n  :ref:`ignore_case <envoy_api_field_type.matcher.StringMatcher.ignore_case>` field.\n* The `header_fields`, `custom_header_fields`, and `additional_headers` fields for the route checker\n  tool have been deprecated in favor of `request_header_fields`, `response_header_fields`,\n  `additional_request_headers`, and `additional_response_headers`.\n* The `content_length`, `content_type`, `disable_on_etag_header` and `remove_accept_encoding_header`\n  fields in :ref:`HTTP Gzip filter config <envoy_api_msg_config.filter.http.gzip.v2.Gzip>` have\n  been deprecated in favor of `compressor`.\n* The statistics counter `header_gzip` in :ref:`HTTP Gzip filter <config_http_filters_gzip>`\n  has been deprecated in favor of `header_compressor_used`.\n* Support for the undocumented HTTP/1.1 `:no-chunks` pseudo-header has been removed. If an extension\n  was using this it can achieve the same behavior via the new `http1StreamEncoderOptions()` API.\n* The grpc_stats filter behavior of by default creating a new stat for every message type seen is deprecated.\n  The default will switch to only creating a fixed set of stats. The previous behavior can be enabled by enabling\n  :ref:`stats_for_all_methods <envoy_api_field_config.filter.http.grpc_stats.v2alpha.FilterConfig.stats_for_all_methods>`,\n  and the previous default can be enabled until the end of the deprecation period by enabling runtime feature\n  `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`.\n* The :ref:`source_ip <envoy_api_field_config.rbac.v2.Principal.source_ip>` field in\n  `RBAC <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/rbac/v2/rbac.proto>`_ has been deprecated\n  in favor of :ref:`direct_remote_ip <envoy_api_field_config.rbac.v2.Principal.direct_remote_ip>` and\n  :ref:`remote_ip <envoy_api_field_config.rbac.v2.Principal.remote_ip>`.\n"
  },
  {
    "path": "docs/root/version_history/v1.14.1.rst",
    "content": "1.14.1 (April 8, 2020)\n======================\n\nChanges\n-------\n\n* request_id_extension: fixed static initialization for noop request id extension.\n"
  },
  {
    "path": "docs/root/version_history/v1.14.2.rst",
    "content": "1.14.2 (June 8, 2020)\n=====================\n\nChanges\n-------\n\n* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters.\n* http: the :ref:`stream_idle_timeout <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  now also defends against an HTTP/2 peer that does not open stream window once an entire response\n  has been buffered to be sent to a downstream client.\n* listener: Add runtime support for `per-listener limits <config_listeners_runtime>` on\n  active/accepted connections.\n* overload management: Add runtime support for :ref:`global limits <config_overload_manager>`\n  on active/accepted connections.\n"
  },
  {
    "path": "docs/root/version_history/v1.14.3.rst",
    "content": "1.14.3 (June 30, 2020)\n======================\n\nChanges\n-------\n* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer.\n* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client.\n* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits.\n* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits <config_listeners_runtime>` on active/accepted connections.\n* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits <config_overload_manager>` on active/accepted connections.\n"
  },
  {
    "path": "docs/root/version_history/v1.14.4.rst",
    "content": "1.14.4 (July 7, 2020)\n=====================\n* tls: fixed a bug where wilcard matching for \"\\*.foo.com\" also matched domains of the form \"a.b.foo.com\". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false."
  },
  {
    "path": "docs/root/version_history/v1.14.5.rst",
    "content": "1.14.5 (September 29, 2020)\n===========================\nChanges\n-------\n* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline headers.\n  This patch changes the default behavior to always logically match on all headers. Multiple individual\n  headers will be logically concatenated with ',' similar to what is done with inline headers. This\n  makes the behavior effectively consistent. This behavior can be temporary reverted by setting\n  the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to \"false\".\n\n  Targeted fixes have been additionally performed on the following extensions which make them\n  consider all duplicate headers by default as a comma concatenated list:\n\n    1. Any extension using CEL matching on headers.\n    2. The header to metadata filter.\n    3. The JWT filter.\n    4. The Lua filter.\n\n  Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting\n  the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to false.\n* http: fixed CVE-2020-25017. The setCopy() header map API previously only set the first header in the case of duplicate\n  non-inline headers. setCopy() now behaves similarly to the other set*() APIs and replaces all found\n  headers with a single value. This may have had security implications in the extauth filter which\n  uses this API. This behavior can be disabled by setting the runtime value\n  \"envoy.reloadable_features.http_set_copy_replace_all_headers\" to false."
  },
  {
    "path": "docs/root/version_history/v1.15.0.rst",
    "content": "1.15.0 (July 7, 2020)\n=====================\n\n\nIncompatible Behavior Changes\n-----------------------------\n*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required*\n\n* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27.\n* client_ssl_auth: the `auth_ip_white_list` stat has been renamed to\n  :ref:`auth_ip_allowlist <config_network_filters_client_ssl_auth_stats>`.\n* header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before).\n* router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.preserve_query_string_in_path_redirects` to false.\n* tls: fixed a bug where wilcard matching for \"\\*.foo.com\" also matched domains of the form \"a.b.foo.com\". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false.\n\nMinor Behavior Changes\n----------------------\n*Changes that may cause incompatibilities for some users, but should not for most*\n\n* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats <config_access_log_stats>` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false.\n* build: runs as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively.\n* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters <envoy_api_field_config.filter.http.health_check.v2.HealthCheck.cluster_min_healthy_percentages>` is now interpreted as an integer.\n* hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts).\n* http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.early_errors_via_hcm` to false.\n* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false.\n* http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests.\n  Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false.\n* http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false.\n* http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false.\n* http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false.\n* http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.http_default_alpn` to false.\n* listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list.\n* router: extended to allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`.\n* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded <config_http_filters_router_x-envoy-overloaded_set>`.\n\nBug Fixes\n---------\n*Changes expected to improve the state of the world and are unlikely to have negative effects*\n\n* adaptive concurrency: fixed a minRTT calculation bug where requests started before the concurrency\n  limit was pinned to the minimum would skew the new minRTT value if the replies arrived after the\n  start of the new minRTT window.\n* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer.\n* grpc-json: fixed a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written.\n* http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length.\n* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.merge_slashes>` is enabled.\n* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client.\n* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits.\n* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false.\n* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits <config_listeners_runtime>` on active/accepted connections.\n* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits <config_overload_manager>` on active/accepted connections.\n* prometheus stats: fixed the sort order of output lines to comply with the standard.\n* udp: the :ref:`reuse_port <envoy_api_field_Listener.reuse_port>` listener option must now be\n  specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a\n  bug fix.\n* upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check.\n\nRemoved Config or Runtime\n-------------------------\n*Normally occurs at the end of the* :ref:`deprecation period <deprecated>`\n\n* http: removed legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and\n  `envoy.reloadable_features.new_http2_connection_pool_behavior`.\n\nNew Features\n------------\n\n* access loggers: added file access logger config :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n* access loggers: added GRPC_STATUS operator on logging format.\n* access loggers: added gRPC access logger config added :ref:`API version <envoy_v3_api_field_extensions.access_loggers.grpc.v3.CommonGrpcAccessLogConfig.transport_api_version>` to explicitly set the version of gRPC service endpoint and message to be used.\n* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string <config_access_log_format_filter_state>`.\n* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds <operations_admin_interface_config_dump_include_eds>`.\n* aggregate cluster: made route :ref:`retry_priority <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>` predicates work with :ref:`this cluster type <envoy_v3_api_msg_extensions.clusters.aggregate.v3.ClusterConfig>`.\n* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27.\n* build: official released binary is now built with Clang 10.0.0.\n* cluster: added an extension point for configurable :ref:`upstreams <envoy_v3_api_field_config.cluster.v3.Cluster.upstream_config>`.\n* compressor: exposed generic :ref:`compressor <config_http_filters_compressor>` filter to users.\n* config: added :ref:`identifier <config_cluster_manager_cds>` stat that reflects control plane identifier.\n* config: added :ref:`version_text <config_cluster_manager_cds>` stat that reflects xDS version.\n* decompressor: exposed generic :ref:`decompressor <config_http_filters_decompressor>` filter to users.\n* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy <config_network_filters_sni_dynamic_forward_proxy>` support.\n* dynamic forward proxy: added configurable :ref:`circuit breakers <dns_cache_circuit_breakers>` for resolver on DNS cache.\n  This behavior can be temporarily disabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`.\n  If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers <dns_cache_circuit_breakers>` are configured.\n* dynamic forward proxy: added :ref:`allow_insecure_cluster_options<envoy_v3_api_field_extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig.allow_insecure_cluster_options>` to allow disabling of auto_san_validation and auto_sni.\n* ext_authz filter: added :ref:`v2 deny_at_disable <envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.deny_at_disable>`, :ref:`v3 deny_at_disable <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.deny_at_disable>`. This allows force denying protected paths while filter gets disabled, by setting this key to true.\n* ext_authz filter: added API version field for both :ref:`HTTP <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.transport_api_version>`\n  and :ref:`Network <envoy_v3_api_field_extensions.filters.network.ext_authz.v3.ExtAuthz.transport_api_version>` filters to explicitly set the version of gRPC service endpoint and message to be used.\n* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append <envoy_v3_api_field_extensions.filters.http.ext_authz.v3.AuthorizationResponse.allowed_upstream_headers_to_append>` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers.\n* fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults\n  are applied to using :ref:`HTTP headers <config_http_filters_fault_injection_http_header>` to the HTTP fault filter.\n* fault: added support for specifying grpc_status code in abort faults using\n  :ref:`HTTP header <config_http_filters_fault_injection_http_header>` or abort fault configuration in HTTP fault filter.\n* filter: added `upstream_rq_time` stats to the GPRC stats filter.\n  Disabled by default and can be enabled via :ref:`enable_upstream_stats <envoy_v3_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.enable_upstream_stats>`.\n* grpc: added support for Google gRPC :ref:`custom channel arguments <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.channel_args>`.\n* grpc-json: added support for streaming response using\n  `google.api.HttpBody <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto>`_.\n* grpc-json: send a `x-envoy-original-method` header to grpc services.\n* gzip filter: added option to set zlib's next output buffer size.\n* hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used.\n* header to metadata: added support for regex substitutions on header values.\n* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria <envoy_v3_api_field_config.core.v3.HealthCheck.transport_socket_match_criteria>`.\n* http: added :ref:`local_reply config <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.local_reply_config>` to http_connection_manager to customize :ref:`local reply <config_http_conn_man_local_reply>`.\n* http: added :ref:`stripping port from host header <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.strip_matching_host_port>` support.\n* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation<arch_overview_upgrades>` for details.\n* listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update.\n  Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false.\n  Also added additional draining filter chain stat for :ref:`listener manager <config_listener_manager_stats>` to track the number of draining filter chains and the number of in place update attempts.\n* logger: added :option:`--log-format-prefix-with-location` command line option to prefix '%v' with file path and line number.\n* lrs: added new *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* field\n  in LRS response, which allows management servers to avoid explicitly listing all clusters it is\n  interested in; behavior is allowed based on new \"envoy.lrs.supports_send_all_clusters\" capability\n  in :ref:`client_features<envoy_v3_api_field_config.core.v3.Node.client_features>` field.\n* lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used.\n* lua: added :ref:`per route config <envoy_v3_api_msg_extensions.filters.http.lua.v3.LuaPerRoute>` for Lua filter.\n* lua: added tracing to the ``httpCall()`` API.\n* metrics service: added :ref:`API version <envoy_v3_api_field_config.metrics.v3.MetricsServiceConfig.transport_api_version>` to explicitly set the version of gRPC service endpoint and message to be used.\n* network filters: added a :ref:`postgres proxy filter <config_network_filters_postgres_proxy>`.\n* network filters: added a :ref:`rocketmq proxy filter <config_network_filters_rocketmq_proxy>`.\n* performance: enabled stats symbol table implementation by default. To disable it, add\n  `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy.\n* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata <envoy_v3_api_field_config.route.v3.RateLimit.Action.dynamic_metadata>` as a ratelimit action.\n* ratelimit: added :ref:`API version <envoy_v3_api_field_config.ratelimit.v3.RateLimitServiceConfig.transport_api_version>` to explicitly set the version of gRPC service endpoint and message to be used.\n* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override <envoy_v3_api_field_config.route.v3.RateLimit.limit>` config.\n* redis: added acl support :ref:`downstream_auth_username <envoy_v3_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.downstream_auth_username>` for downstream client ACL authentication, and :ref:`auth_username <envoy_v3_api_field_extensions.filters.network.redis_proxy.v3.RedisProtocolOptions.auth_username>` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled.\n* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 <envoy_v3_api_field_type.matcher.v3.RegexMatcher.GoogleRE2.max_program_size>`.\n* request_id: added to :ref:`always_set_request_id_in_response setting <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.always_set_request_id_in_response>`\n  to set :ref:`x-request-id <config_http_conn_man_headers_x-request-id>` header in response even if\n  tracing is not forced.\n* router: added more fine grained internal redirect configs to the :ref:`internal_redirect_policy\n  <envoy_v3_api_field_config.route.v3.RouteAction.internal_redirect_policy>` field.\n* router: added regex substitution support for header based hashing.\n* router: added support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters\n  <config_http_conn_man_headers_custom_request_headers>`.\n* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent <envoy_v3_api_field_config.route.v3.RateLimit.Action.RequestHeaders.skip_if_absent>` field is set to true.\n* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start <runtime_stats>` that gets reset across hot restarts.\n* server: added the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose().\n* server: added :ref:`server.envoy_bug_failures <server_statistics>` statistic to count ENVOY_BUG failures.\n* stats: added the option to :ref:`report counters as deltas <envoy_v3_api_field_config.metrics.v3.MetricsServiceConfig.report_counters_as_deltas>` to the metrics service stats sink.\n* tracing: made tracing configuration fully dynamic and every HTTP connection manager\n  can now have a separate :ref:`tracing provider <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider>`.\n* udp: upgraded :ref:`udp_proxy <config_udp_listener_filters_udp_proxy>` filter to v3 and promoted it out of alpha.\n\nDeprecated\n----------\n\n* Tracing provider configuration as part of :ref:`bootstrap config <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.tracing>`\n  has been deprecated in favor of configuration as part of :ref:`HTTP connection manager\n  <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider>`.\n* The :ref:`HTTP Gzip filter <config_http_filters_gzip>` has been deprecated in favor of\n  :ref:`Compressor <config_http_filters_compressor>`.\n* The * :ref:`GoogleRE2.max_program_size<envoy_v3_api_field_type.matcher.v3.RegexMatcher.GoogleRE2.max_program_size>`\n  field is now deprecated. Management servers are expected to validate regexp program sizes\n  instead of expecting the client to do it. Alternatively, the max program size can be enforced by Envoy via runtime.\n* The :ref:`internal_redirect_action <envoy_v3_api_field_config.route.v3.RouteAction.internal_redirect_action>`\n  field and :ref:`max_internal_redirects <envoy_v3_api_field_config.route.v3.RouteAction.max_internal_redirects>` field\n  are now deprecated. This changes the implemented default cross scheme redirect behavior.\n  All cross scheme redirects are disallowed by default. To restore\n  the previous behavior, set allow_cross_scheme_redirect=true and use\n  :ref:`safe_cross_scheme<envoy_v3_api_msg_extensions.internal_redirect.safe_cross_scheme.v3.SafeCrossSchemeConfig>`,\n  in :ref:`predicates <envoy_v3_api_field_config.route.v3.InternalRedirectPolicy.predicates>`.\n* File access logger fields :ref:`format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.format>`, :ref:`json_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.json_format>` and :ref:`typed_json_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.typed_json_format>` are deprecated in favor of :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n* A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_deprecated_v2_api_warning` to `false`.\n* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers <dns_cache_circuit_breakers>`. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_dns_cache_circuit_breakers` to `false`.\n"
  },
  {
    "path": "docs/root/version_history/v1.15.1.rst",
    "content": "1.15.1 (September 29, 2020)\n===========================\n\nChanges\n-------\n* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline\n  headers. This patch changes the default behavior to always logically match on all headers.\n  Multiple individual headers will be logically concatenated with ',' similar to what is done with\n  inline headers. This makes the behavior effectively consistent. This behavior can be temporary\n  reverted by setting the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to\n  \"false\".\n\n  Targeted fixes have been additionally performed on the following extensions which make them\n  consider all duplicate headers by default as a comma concatenated list:\n\n    1. Any extension using CEL matching on headers.\n    2. The header to metadata filter.\n    3. The JWT filter.\n    4. The Lua filter.\n\n  Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting\n  the runtime value \"envoy.reloadable_features.header_match_on_all_headers\" to false.\n* http: The setCopy() header map API previously only set the first header in the case of duplicate\n  non-inline headers. setCopy() now behaves similarly to the other set*() APIs and replaces all found\n  headers with a single value. This may have had security implications in the extauth filter which\n  uses this API. This behavior can be disabled by setting the runtime value\n  \"envoy.reloadable_features.http_set_copy_replace_all_headers\" to false.\n"
  },
  {
    "path": "docs/root/version_history/v1.15.2.rst",
    "content": "1.15.2 (September 29, 2020)\n===========================\n\nChanges\n-------\n* docs: fix docs for v1.15.1."
  },
  {
    "path": "docs/root/version_history/v1.2.0.rst",
    "content": "1.2.0 (March 7, 2017)\n=====================\n\nChanges\n-------\n\n* :ref:`Cluster discovery service (CDS) API <config_cluster_manager_cds>`.\n* :ref:`Outlier detection <arch_overview_outlier_detection>` (passive health checking).\n* Envoy configuration is now checked against a JSON schema.\n* :ref:`Ring hash <arch_overview_load_balancing_types>` consistent load balancer, as well as HTTP\n  consistent hash routing based on a policy.\n* Vastly :ref:`enhanced global rate limit configuration <arch_overview_global_rate_limit>` via the HTTP\n  rate limiting filter.\n* HTTP routing to a cluster retrieved from a header.\n* Weighted cluster HTTP routing.\n* Auto host rewrite during HTTP routing.\n* Regex header matching during HTTP routing.\n* HTTP access log runtime filter.\n* LightStep tracer :ref:`parent/child span association <arch_overview_tracing>`.\n* :ref:`Route discovery service (RDS) API <config_http_conn_man_rds>`.\n* HTTP router :ref:`x-envoy-upstream-rq-timeout-alt-response header\n  <config_http_filters_router_x-envoy-upstream-rq-timeout-alt-response>` support.\n* *use_original_dst* and *bind_to_port* :ref:`listener options <config_listeners>` (useful for\n  iptables based transparent proxy support).\n* TCP proxy filter :ref:`route table support <config_network_filters_tcp_proxy>`.\n* Configurable stats flush interval.\n* Various :ref:`third party library upgrades <install_requirements>`, including using BoringSSL as\n  the default SSL provider.\n* No longer maintain closed HTTP/2 streams for priority calculations. Leads to substantial memory\n  savings for large meshes.\n* Numerous small changes and fixes not listed here.\n"
  },
  {
    "path": "docs/root/version_history/v1.3.0.rst",
    "content": "1.3.0 (May 17, 2017)\n====================\n\nChanges\n-------\n\n* As of this release, we now have an official :repo:`breaking change policy\n  </CONTRIBUTING.md#breaking-change-policy>`. Note that there are numerous breaking configuration\n  changes in this release. They are not listed here. Future releases will adhere to the policy and\n  have clear documentation on deprecations and changes.\n* Bazel is now the canonical build system (replacing CMake). There have been a huge number of\n  changes to the development/build/test flow. See :repo:`/bazel/README.md` and\n  :repo:`/ci/README.md` for more information.\n* :ref:`Outlier detection <arch_overview_outlier_detection>` has been expanded to include success\n  rate variance, and all parameters are now configurable in both runtime and in the JSON\n  configuration.\n* TCP level listener and cluster connections now have configurable receive buffer\n  limits at which point connection level back pressure is applied.\n  Full end to end flow control will be available in a future release.\n* :ref:`Redis health checking <config_cluster_manager_cluster_hc>` has been added as an active\n  health check type. Full Redis support will be documented/supported in 1.4.0.\n* :ref:`TCP health checking <config_cluster_manager_cluster_hc_tcp_health_checking>` now supports a\n  \"connect only\" mode that only checks if the remote server can be connected to without\n  writing/reading any data.\n* `BoringSSL <https://boringssl.googlesource.com/boringssl>`_ is now the only supported TLS provider.\n  The default cipher suites and ECDH curves have been updated with more modern defaults for both\n  listener and cluster connections.\n* The `header value match` rate limit action has been expanded to include an `expect\n  match` parameter.\n* Route level HTTP rate limit configurations now do not inherit the virtual host level\n  configurations by default. Use `include_vh_rate_limits` to inherit the virtual host\n  level options if desired.\n* HTTP routes can now add request headers on a per route and per virtual host basis via the\n  :ref:`request_headers_to_add <config_http_conn_man_headers_custom_request_headers>` option.\n* The :ref:`example configurations <install_ref_configs>` have been refreshed to demonstrate the\n  latest features.\n* `per_try_timeout_ms` can now be configured in\n  a route's retry policy in addition to via the :ref:`x-envoy-upstream-rq-per-try-timeout-ms\n  <config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms>` HTTP header.\n* HTTP virtual host matching now includes support for prefix wildcard domains (e.g., `*.lyft.com`).\n* The default for tracing random sampling has been changed to 100% and is still configurable in\n  :ref:`runtime <config_http_conn_man_runtime>`.\n* HTTP tracing configuration has been extended to allow tags\n  to be populated from arbitrary HTTP headers.\n* The :ref:`HTTP rate limit filter <config_http_filters_rate_limit>` can now be applied to internal,\n  external, or all requests via the `request_type` option.\n* :ref:`Listener binding <config_listeners>` now requires specifying an `address` field. This can be\n  used to bind a listener to both a specific address as well as a port.\n* The :ref:`MongoDB filter <config_network_filters_mongo_proxy>` now emits a stat for queries that\n  do not have `$maxTimeMS` set.\n* The :ref:`MongoDB filter <config_network_filters_mongo_proxy>` now emits logs that are fully valid\n  JSON.\n* The CPU profiler output path is now configurable.\n* A watchdog system has been added that can kill the server if a deadlock is detected.\n* A :ref:`route table checking tool <install_tools_route_table_check_tool>` has been added that can\n  be used to test route tables before use.\n* We have added an :ref:`example repo <extending>` that shows how to compile/link a custom filter.\n* Added additional cluster wide information related to outlier detection to the :ref:`/clusters\n  admin endpoint <operations_admin_interface>`.\n* Multiple SANs can now be verified via the `verify_subject_alt_name` setting.\n  Additionally, URI type SANs can be verified.\n* HTTP filters can now be passed opaque configuration specified on a per route basis.\n* By default Envoy now has a built in crash handler that will print a back trace. This behavior can\n  be disabled if desired via the ``--define=signal_trace=disabled`` Bazel option.\n* Zipkin has been added as a supported :ref:`tracing provider <arch_overview_tracing>`.\n* Numerous small changes and fixes not listed here.\n"
  },
  {
    "path": "docs/root/version_history/v1.4.0.rst",
    "content": "1.4.0 (August 24, 2017)\n=======================\n\nChanges\n-------\n\n* macOS is :repo:`now supported </bazel#quick-start-bazel-build-for-developers>`. (A few features\n  are missing such as hot restart and original destination routing).\n* YAML is now directly supported for config files.\n* Added /routes admin endpoint.\n* End-to-end flow control is now supported for TCP proxy, HTTP/1, and HTTP/2. HTTP flow control\n  that includes filter buffering is incomplete and will be implemented in 1.5.0.\n* Log verbosity :repo:`compile time flag </bazel#log-verbosity>` added.\n* Hot restart :repo:`compile time flag </bazel#hot-restart>` added.\n* Original destination :ref:`cluster <arch_overview_service_discovery_types_original_destination>`\n  and :ref:`load balancer <arch_overview_load_balancing_types_original_destination>` added.\n* :ref:`WebSocket <arch_overview_upgrades>` is now supported.\n* Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure\n  no one is using this feature.\n* Route `validate_clusters` option added.\n* :ref:`x-envoy-downstream-service-node <config_http_conn_man_headers_downstream-service-node>`\n  header added.\n* :ref:`x-forwarded-client-cert <config_http_conn_man_headers_x-forwarded-client-cert>` header\n  added.\n* Initial HTTP/1 forward proxy support for absolute URLs has been added.\n* HTTP/2 codec settings are now configurable.\n* gRPC/JSON transcoder :ref:`filter <config_http_filters_grpc_json_transcoder>` added.\n* gRPC web :ref:`filter <config_http_filters_grpc_web>` added.\n* Configurable timeout for the rate limit service call in the :ref:`network\n  <config_network_filters_rate_limit>` and :ref:`HTTP <config_http_filters_rate_limit>` rate limit\n  filters.\n* :ref:`x-envoy-retry-grpc-on <config_http_filters_router_x-envoy-retry-grpc-on>` header added.\n* :ref:`LDS API <arch_overview_dynamic_config_lds>` added.\n* TLS :`require_client_certificate` option added.\n* :ref:`Configuration check tool <install_tools_config_load_check_tool>` added.\n* :ref:`JSON schema check tool <install_tools_schema_validator_check_tool>` added.\n* Config validation mode added via the :option:`--mode` option.\n* :option:`--local-address-ip-version` option added.\n* IPv6 support is now complete.\n* UDP `statsd_ip_address` option added.\n* Per-cluster DNS resolvers added.\n* :ref:`Fault filter <config_http_filters_fault_injection>` enhancements and fixes.\n* Several features are :ref:`deprecated as of the 1.4.0 release <deprecated>`. They\n  will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the\n  `HttpFilterConfigFactory` filter API has been deprecated in favor of\n  `NamedHttpFilterConfigFactory`.\n* Many small bug fixes and performance improvements not listed.\n\nDeprecated\n----------\n\n* Config option `statsd_local_udp_port` has been deprecated and has been replaced with\n  `statsd_udp_ip_address`.\n* `HttpFilterConfigFactory` filter API has been deprecated in favor of `NamedHttpFilterConfigFactory`.\n* Config option `http_codec_options` has been deprecated and has been replaced with `http2_settings`.\n* The following log macros have been deprecated: `log_trace`, `log_debug`, `conn_log`,\n  `conn_log_info`, `conn_log_debug`, `conn_log_trace`, `stream_log`, `stream_log_info`,\n  `stream_log_debug`, `stream_log_trace`. For replacements, please see\n  `logger.h <https://github.com/envoyproxy/envoy/blob/master/source/common/common/logger.h>`_.\n* The connectionId() and ssl() callbacks of StreamFilterCallbacks have been deprecated and\n  replaced with a more general connection() callback, which, when not returning a nullptr, can be\n  used to get the connection id and SSL connection from the returned Connection object pointer.\n* The protobuf stub gRPC support via `Grpc::RpcChannelImpl` is now replaced with `Grpc::AsyncClientImpl`.\n  This no longer uses `protoc` generated stubs but instead utilizes C++ template generation of the\n  RPC stubs. `Grpc::AsyncClientImpl` supports streaming, in addition to the previous unary, RPCs.\n* The direction of network and HTTP filters in the configuration will be ignored from 1.4.0 and\n  later removed from the configuration in the v2 APIs. Filter direction is now implied at the C++ type\n  level. The `type()` methods on the `NamedNetworkFilterConfigFactory` and\n  `NamedHttpFilterConfigFactory` interfaces have been removed to reflect this.\n"
  },
  {
    "path": "docs/root/version_history/v1.5.0.rst",
    "content": "1.5.0 (December 4, 2017)\n========================\n\nChanges\n-------\n\n* access log: added fields for :ref:`UPSTREAM_LOCAL_ADDRESS and DOWNSTREAM_ADDRESS\n  <config_access_log_format>`.\n* admin: added :ref:`JSON output <operations_admin_interface_stats>` for stats admin endpoint.\n* admin: added basic :ref:`Prometheus output <operations_admin_interface_stats>` for stats admin\n  endpoint. Histograms are not currently output.\n* admin: added ``version_info`` to the :ref:`/clusters admin endpoint<operations_admin_interface_clusters>`.\n* config: the :ref:`v2 API <config_overview>` is now considered production ready.\n* config: added --v2-config-only CLI flag.\n* cors: added :ref:`CORS filter <config_http_filters_cors>`.\n* health check: added :ref:`x-envoy-immediate-health-check-fail\n  <config_http_filters_router_x-envoy-immediate-health-check-fail>` header support.\n* health check: added :ref:`reuse_connection <envoy_api_field_core.HealthCheck.reuse_connection>` option.\n* http: added :ref:`per-listener stats <config_http_conn_man_stats_per_listener>`.\n* http: end-to-end HTTP flow control is now complete across both connections, streams, and filters.\n* load balancer: added :ref:`subset load balancer <arch_overview_load_balancer_subsets>`.\n* load balancer: added ring size and hash :ref:`configuration options\n  <envoy_api_msg_Cluster.RingHashLbConfig>`. This used to be configurable via runtime. The runtime\n  configuration was deleted without deprecation as we are fairly certain no one is using it.\n* log: added the ability to optionally log to a file instead of stderr via the\n  :option:`--log-path` option.\n* listeners: added :ref:`drain_type <envoy_api_field_Listener.drain_type>` option.\n* lua: added experimental :ref:`Lua filter <config_http_filters_lua>`.\n* mongo filter: added :ref:`fault injection <config_network_filters_mongo_proxy_fault_injection>`.\n* mongo filter: added :ref:`\"drain close\" <arch_overview_draining>` support.\n* outlier detection: added :ref:`HTTP gateway failure type <arch_overview_outlier_detection>`.\n  See :ref:`deprecated log <deprecated>`\n  for outlier detection stats deprecations in this release.\n* redis: the :ref:`redis proxy filter <config_network_filters_redis_proxy>` is now considered\n  production ready.\n* redis: added :ref:`\"drain close\" <arch_overview_draining>` functionality.\n* router: added :ref:`x-envoy-overloaded <config_http_filters_router_x-envoy-overloaded_set>` support.\n* router: added :ref:`regex <envoy_api_field_route.RouteMatch.regex>` route matching.\n* router: added :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`\n  for upstream requests.\n* router: added :ref:`downstream IP hashing\n  <envoy_api_field_route.RouteAction.HashPolicy.connection_properties>` for HTTP ketama routing.\n* router: added :ref:`cookie hashing <envoy_api_field_route.RouteAction.HashPolicy.cookie>`.\n* router: added :ref:`start_child_span <envoy_api_field_config.filter.http.router.v2.Router.start_child_span>` option\n  to create child span for egress calls.\n* router: added optional :ref:`upstream logs <envoy_api_field_config.filter.http.router.v2.Router.upstream_log>`.\n* router: added complete :ref:`custom append/override/remove support\n  <config_http_conn_man_headers_custom_request_headers>` of request/response headers.\n* router: added support to :ref:`specify response code during redirect\n  <envoy_api_field_route.RedirectAction.response_code>`.\n* router: added :ref:`configuration <envoy_api_field_route.RouteAction.cluster_not_found_response_code>`\n  to return either a 404 or 503 if the upstream cluster does not exist.\n* runtime: added :ref:`comment capability <config_runtime_comments>`.\n* server: change default log level (:option:`-l`) to `info`.\n* stats: maximum stat/name sizes and maximum number of stats are now variable via the\n  `--max-obj-name-len` and `--max-stats` options.\n* tcp proxy: added :ref:`access logging <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.access_log>`.\n* tcp proxy: added :ref:`configurable connect retries\n  <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.max_connect_attempts>`.\n* tcp proxy: enable use of :ref:`outlier detector <arch_overview_outlier_detection>`.\n* tls: added :ref:`SNI support <faq_how_to_setup_sni>`.\n* tls: added support for specifying :ref:`TLS session ticket keys\n  <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys>`.\n* tls: allow configuration of the :ref:`min\n  <envoy_api_field_auth.TlsParameters.tls_minimum_protocol_version>` and :ref:`max\n  <envoy_api_field_auth.TlsParameters.tls_maximum_protocol_version>` TLS protocol versions.\n* tracing: added :ref:`custom trace span decorators <envoy_api_field_route.Route.decorator>`.\n* Many small bug fixes and performance improvements not listed.\n\nDeprecated\n----------\n\n* The outlier detection `ejections_total` stats counter has been deprecated and not replaced. Monitor\n  the individual `ejections_detected_*` counters for the detectors of interest, or\n  `ejections_enforced_total` for the total number of ejections that actually occurred.\n* The outlier detection `ejections_consecutive_5xx` stats counter has been deprecated in favour of\n  `ejections_detected_consecutive_5xx` and `ejections_enforced_consecutive_5xx`.\n* The outlier detection `ejections_success_rate` stats counter has been deprecated in favour of\n  `ejections_detected_success_rate` and `ejections_enforced_success_rate`.\n"
  },
  {
    "path": "docs/root/version_history/v1.6.0.rst",
    "content": "1.6.0 (March 20, 2018)\n======================\n\nChanges\n-------\n\n* access log: added DOWNSTREAM_REMOTE_ADDRESS, DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, and\n  DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters <config_access_log_format>`.\n  DOWNSTREAM_ADDRESS access log formatter has been deprecated.\n* access log: added less than or equal (LE) :ref:`comparison filter\n  <envoy_api_msg_config.filter.accesslog.v2.ComparisonFilter>`.\n* access log: added configuration to :ref:`runtime filter\n  <envoy_api_msg_config.filter.accesslog.v2.RuntimeFilter>` to set default sampling rate, divisor,\n  and whether to use independent randomness or not.\n* admin: added :ref:`/runtime <operations_admin_interface_runtime>` admin endpoint to read the\n  current runtime values.\n* build: added support for :repo:`building Envoy with exported symbols\n  <bazel#enabling-optional-features>`. This change allows scripts loaded with the Lua filter to\n  load shared object libraries such as those installed via `LuaRocks <https://luarocks.org/>`_.\n* config: added support for sending error details as\n  `grpc.rpc.Status <https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`_\n  in :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>`.\n* config: added support for :ref:`inline delivery <envoy_api_msg_core.DataSource>` of TLS\n  certificates and private keys.\n* config: added restrictions for the backing :ref:`config sources <envoy_api_msg_core.ConfigSource>`\n  of xDS resources. For filesystem based xDS the file must exist at configuration time. For cluster\n  based xDS the backing cluster must be statically defined and be of non-EDS type.\n* grpc: the Google gRPC C++ library client is now supported as specified in the :ref:`gRPC services\n  overview <arch_overview_grpc_services>` and :ref:`GrpcService <envoy_api_msg_core.GrpcService>`.\n* grpc-json: added support for :ref:`inline descriptors\n  <envoy_api_field_config.filter.http.transcoder.v2.GrpcJsonTranscoder.proto_descriptor_bin>`.\n* health check: added :ref:`gRPC health check <envoy_api_field_core.HealthCheck.grpc_health_check>`\n  based on `grpc.health.v1.Health <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto>`_\n  service.\n* health check: added ability to set :ref:`host header value\n  <envoy_api_field_core.HealthCheck.HttpHealthCheck.host>` for http health check.\n* health check: extended the health check filter to support computation of the health check response\n  based on the :ref:`percentage of healthy servers in upstream clusters\n  <envoy_api_field_config.filter.http.health_check.v2.HealthCheck.cluster_min_healthy_percentages>`.\n* health check: added setting for :ref:`no-traffic\n  interval<envoy_api_field_core.HealthCheck.no_traffic_interval>`.\n* http: added idle timeout for :ref:`upstream http connections\n  <envoy_api_field_core.HttpProtocolOptions.idle_timeout>`.\n* http: added support for :ref:`proxying 100-Continue responses\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.proxy_100_continue>`.\n* http: added the ability to pass a URL encoded PEM encoded peer certificate in the\n  :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header.\n* http: added support for trusting additional hops in the\n  :ref:`config_http_conn_man_headers_x-forwarded-for` request header.\n* http: added support for :ref:`incoming HTTP/1.0\n  <envoy_api_field_core.Http1ProtocolOptions.accept_http_10>`.\n* hot restart: added SIGTERM propagation to children to :ref:`hot-restarter.py\n  <operations_hot_restarter>`, which enables using it as a parent of containers.\n* ip tagging: added :ref:`HTTP IP Tagging filter<config_http_filters_ip_tagging>`.\n* listeners: added support for :ref:`listening for both IPv4 and IPv6\n  <envoy_api_field_core.SocketAddress.ipv4_compat>` when binding to ::.\n* listeners: added support for listening on :ref:`UNIX domain sockets\n  <envoy_api_field_core.Address.pipe>`.\n* listeners: added support for :ref:`abstract unix domain sockets <envoy_api_msg_core.Pipe>` on\n  Linux. The abstract namespace can be used by prepending '@' to a socket path.\n* load balancer: added cluster configuration for :ref:`healthy panic threshold\n  <envoy_api_field_Cluster.CommonLbConfig.healthy_panic_threshold>` percentage.\n* load balancer: added :ref:`Maglev <arch_overview_load_balancing_types_maglev>` consistent hash\n  load balancer.\n* load balancer: added support for\n  :ref:`LocalityLbEndpoints<envoy_api_msg_endpoint.LocalityLbEndpoints>` priorities.\n* lua: added headers :ref:`replace() <config_http_filters_lua_header_wrapper>` API.\n* lua: extended to support :ref:`metadata object <config_http_filters_lua_metadata_wrapper>` API.\n* redis: added local `PING` support to the :ref:`Redis filter <arch_overview_redis>`.\n* redis: added `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` to the :ref:`Redis command splitter\n  <arch_overview_redis>` allowlist.\n* router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS,\n  DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header\n  formatters <config_http_conn_man_headers_custom_request_headers>`. The CLIENT_IP header formatter\n  has been deprecated.\n* router: added gateway-error :ref:`retry-on <config_http_filters_router_x-envoy-retry-on>` policy.\n* router: added support for route matching based on :ref:`URL query string parameters\n  <envoy_api_msg_route.QueryParameterMatcher>`.\n* router: added support for more granular weighted cluster routing by allowing the :ref:`total_weight\n  <envoy_api_field_route.WeightedCluster.total_weight>` to be specified in configuration.\n* router: added support for :ref:`custom request/response headers\n  <config_http_conn_man_headers_custom_request_headers>` with mixed static and dynamic values.\n* router: added support for :ref:`direct responses <envoy_api_field_route.Route.direct_response>`.\n  I.e., sending a preconfigured HTTP response without proxying anywhere.\n* router: added support for :ref:`HTTPS redirects\n  <envoy_api_field_route.RedirectAction.https_redirect>` on specific routes.\n* router: added support for :ref:`prefix_rewrite\n  <envoy_api_field_route.RedirectAction.prefix_rewrite>` for redirects.\n* router: added support for :ref:`stripping the query string\n  <envoy_api_field_route.RedirectAction.strip_query>` for redirects.\n* router: added support for downstream request/upstream response\n  :ref:`header manipulation <config_http_conn_man_headers_custom_request_headers>` in :ref:`weighted\n  cluster <envoy_api_msg_route.WeightedCluster>`.\n* router: added support for :ref:`range based header matching\n  <envoy_api_field_route.HeaderMatcher.range_match>` for request routing.\n* squash: added support for the :ref:`Squash microservices debugger <config_http_filters_squash>`.\n  Allows debugging an incoming request to a microservice in the mesh.\n* stats: added metrics service API implementation.\n* stats: added native :ref:`DogStatsd <envoy_api_msg_config.metrics.v2.DogStatsdSink>` support.\n* stats: added support for :ref:`fixed stats tag values\n  <envoy_api_field_config.metrics.v2.TagSpecifier.fixed_value>` which will be added to all metrics.\n* tcp proxy: added support for specifying a :ref:`metadata matcher\n  <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.metadata_match>` for upstream\n  clusters in the tcp filter.\n* tcp proxy: improved TCP proxy to correctly proxy TCP half-close.\n* tcp proxy: added :ref:`idle timeout\n  <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.idle_timeout>`.\n* tcp proxy: access logs now bring an IP address without a port when using DOWNSTREAM_ADDRESS.\n  Use :ref:`DOWNSTREAM_REMOTE_ADDRESS <config_access_log_format>` instead.\n* tracing: added support for dynamically loading an :ref:`OpenTracing tracer\n  <envoy_api_msg_config.trace.v2.DynamicOtConfig>`.\n* tracing: when using the Zipkin tracer, it is now possible for clients to specify the sampling\n  decision (using the :ref:`x-b3-sampled <config_http_conn_man_headers_x-b3-sampled>` header) and\n  have the decision propagated through to subsequently invoked services.\n* tracing: when using the Zipkin tracer, it is no longer necessary to propagate the\n  :ref:`x-ot-span-context <config_http_conn_man_headers_x-ot-span-context>` header.\n  See more on trace context propagation :ref:`here <arch_overview_tracing>`.\n* transport sockets: added transport socket interface to allow custom implementations of transport\n  sockets. A transport socket provides read and write logic with buffer encryption and decryption\n  (if applicable). The existing TLS implementation has been refactored with the interface.\n* upstream: added support for specifying an :ref:`alternate stats name\n  <envoy_api_field_Cluster.alt_stat_name>` while emitting stats for clusters.\n* Many small bug fixes and performance improvements not listed.\n\nDeprecated\n----------\n\n* DOWNSTREAM_ADDRESS log formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT\n  instead.\n* CLIENT_IP header formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT instead.\n* 'use_original_dst' field in the v2 LDS API is deprecated. Use listener filters and filter chain\n  matching instead.\n* `value` and `regex` fields in the `HeaderMatcher` message is deprecated. Use the `exact_match`\n  or `regex_match` oneof instead.\n"
  },
  {
    "path": "docs/root/version_history/v1.7.0.rst",
    "content": "1.7.0 (Jun 21, 2018)\n====================\n\nChanges\n-------\n\n* access log: added ability to log response trailers.\n* access log: added ability to format START_TIME.\n* access log: added DYNAMIC_METADATA :ref:`access log formatter <config_access_log_format>`.\n* access log: added :ref:`HeaderFilter <envoy_api_msg_config.filter.accesslog.v2.HeaderFilter>`\n  to filter logs based on request headers.\n* access log: added `%([1-9])?f` as one of START_TIME specifiers to render subseconds.\n* access log: gRPC Access Log Service (ALS) support added for :ref:`HTTP access logs\n  <envoy_api_msg_config.accesslog.v2.HttpGrpcAccessLogConfig>`.\n* access log: improved WebSocket logging.\n* admin: added :http:get:`/config_dump` for dumping the current configuration and associated xDS\n  version information (if applicable).\n* admin: added :http:get:`/clusters?format=json` for outputing a JSON-serialized proto detailing\n  the current status of all clusters.\n* admin: added :http:get:`/stats/prometheus` as an alternative endpoint for getting stats in prometheus format.\n* admin: added :ref:`/runtime_modify endpoint <operations_admin_interface_runtime_modify>` to add or change runtime values.\n* admin: mutations must be sent as POSTs, rather than GETs. Mutations include:\n  :http:post:`/cpuprofiler`, :http:post:`/healthcheck/fail`, :http:post:`/healthcheck/ok`,\n  :http:post:`/logging`, :http:post:`/quitquitquit`, :http:post:`/reset_counters`,\n  :http:post:`/runtime_modify?key1=value1&key2=value2&keyN=valueN`.\n* admin: removed `/routes` endpoint; route configs can now be found at the :ref:`/config_dump endpoint <operations_admin_interface_config_dump>`.\n* buffer filter: the buffer filter can be optionally\n  :ref:`disabled <envoy_api_field_config.filter.http.buffer.v2.BufferPerRoute.disabled>` or\n  :ref:`overridden <envoy_api_field_config.filter.http.buffer.v2.BufferPerRoute.buffer>` with\n  route-local configuration.\n* cli: added --config-yaml flag to the Envoy binary. When set its value is interpreted as a yaml\n  representation of the bootstrap config and overrides --config-path.\n* cluster: added :ref:`option <envoy_api_field_Cluster.close_connections_on_host_health_failure>`\n  to close tcp_proxy upstream connections when health checks fail.\n* cluster: added :ref:`option <envoy_api_field_Cluster.drain_connections_on_host_removal>` to drain\n  connections from hosts after they are removed from service discovery, regardless of health status.\n* cluster: fixed bug preventing the deletion of all endpoints in a priority\n* debug: added symbolized stack traces (where supported)\n* ext-authz filter: added support to raw HTTP authorization.\n* ext-authz filter: added support to gRPC responses to carry HTTP attributes.\n* grpc: support added for the full set of :ref:`Google gRPC call credentials\n  <envoy_api_msg_core.GrpcService.GoogleGrpc.CallCredentials>`.\n* gzip filter: added :ref:`stats <gzip-statistics>` to the filter.\n* gzip filter: sending *accept-encoding* header as *identity* no longer compresses the payload.\n* health check: added ability to set :ref:`additional HTTP headers\n  <envoy_api_field_core.HealthCheck.HttpHealthCheck.request_headers_to_add>` for HTTP health check.\n* health check: added support for EDS delivered :ref:`endpoint health status\n  <envoy_api_field_endpoint.LbEndpoint.health_status>`.\n* health check: added interval overrides for health state transitions from :ref:`healthy to unhealthy\n  <envoy_api_field_core.HealthCheck.unhealthy_edge_interval>`, :ref:`unhealthy to healthy\n  <envoy_api_field_core.HealthCheck.healthy_edge_interval>` and for subsequent checks on\n  :ref:`unhealthy hosts <envoy_api_field_core.HealthCheck.unhealthy_interval>`.\n* health check: added support for :ref:`custom health check <envoy_api_field_core.HealthCheck.custom_health_check>`.\n* health check: health check connections can now be configured to use http/2.\n* health check http filter: added\n  :ref:`generic header matching <envoy_api_field_config.filter.http.health_check.v2.HealthCheck.headers>`\n  to trigger health check response. Deprecated the endpoint option.\n* http: filters can now optionally support\n  :ref:`virtual host <envoy_api_field_route.VirtualHost.per_filter_config>`,\n  :ref:`route <envoy_api_field_route.Route.per_filter_config>`, and\n  :ref:`weighted cluster <envoy_api_field_route.WeightedCluster.ClusterWeight.per_filter_config>`\n  local configuration.\n* http: added the ability to pass DNS type Subject Alternative Names of the client certificate in the\n  :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header.\n* http: local responses to gRPC requests are now sent as trailers-only gRPC responses instead of plain HTTP responses.\n  Notably the HTTP response code is always \"200\" in this case, and the gRPC error code is carried in \"grpc-status\"\n  header, optionally accompanied with a text message in \"grpc-message\" header.\n* http: added support for :ref:`via header\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.via>`\n  append.\n* http: added a :ref:`configuration option\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.skip_xff_append>`\n  to elide *x-forwarded-for* header modifications.\n* http: fixed a bug in inline headers where addCopy and addViaMove didn't add header values when\n  encountering inline headers with multiple instances.\n* listeners: added :ref:`tcp_fast_open_queue_length <envoy_api_field_Listener.tcp_fast_open_queue_length>` option.\n* listeners: added the ability to match :ref:`FilterChain <envoy_api_msg_listener.FilterChain>` using\n  :ref:`application_protocols <envoy_api_field_listener.FilterChainMatch.application_protocols>`\n  (e.g. ALPN for TLS protocol).\n* listeners: `sni_domains` has been deprecated/renamed to :ref:`server_names <envoy_api_field_listener.FilterChainMatch.server_names>`.\n* listeners: removed restriction on all filter chains having identical filters.\n* load balancer: added :ref:`weighted round robin\n  <arch_overview_load_balancing_types_round_robin>` support. The round robin\n  scheduler now respects endpoint weights and also has improved fidelity across\n  picks.\n* load balancer: :ref:`locality weighted load balancing\n  <arch_overview_load_balancer_subsets>` is now supported.\n* load balancer: ability to configure zone aware load balancer settings :ref:`through the API\n  <envoy_api_field_Cluster.CommonLbConfig.zone_aware_lb_config>`.\n* load balancer: the :ref:`weighted least request\n  <arch_overview_load_balancing_types_least_request>` load balancing algorithm has been improved\n  to have better balance when operating in weighted mode.\n* logger: added the ability to optionally set the log format via the :option:`--log-format` option.\n* logger: all :ref:`logging levels <operations_admin_interface_logging>` can be configured\n  at run-time: trace debug info warning error critical.\n* rbac http filter: a :ref:`role-based access control http filter <config_http_filters_rbac>` has been added.\n* router: the behavior of per-try timeouts have changed in the case where a portion of the response has\n  already been proxied downstream when the timeout occurs. Previously, the response would be reset\n  leading to either an HTTP/2 reset or an HTTP/1 closed connection and a partial response. Now, the\n  timeout will be ignored and the response will continue to proxy up to the global request timeout.\n* router: changed the behavior of :ref:`source IP routing <envoy_api_field_route.RouteAction.HashPolicy.ConnectionProperties.source_ip>`\n  to ignore the source port.\n* router: added an :ref:`prefix_match <envoy_api_field_route.HeaderMatcher.prefix_match>` match type\n  to explicitly match based on the prefix of a header value.\n* router: added an :ref:`suffix_match <envoy_api_field_route.HeaderMatcher.suffix_match>` match type\n  to explicitly match based on the suffix of a header value.\n* router: added an :ref:`present_match <envoy_api_field_route.HeaderMatcher.present_match>` match type\n  to explicitly match based on a header's presence.\n* router: added an :ref:`invert_match <envoy_api_field_route.HeaderMatcher.invert_match>` config option\n  which supports inverting all other match types to match based on headers which are not a desired value.\n* router: allow :ref:`cookie routing <envoy_api_msg_route.RouteAction.HashPolicy.Cookie>` to\n  generate session cookies.\n* router: added START_TIME as one of supported variables in :ref:`header\n  formatters <config_http_conn_man_headers_custom_request_headers>`.\n* router: added a :ref:`max_grpc_timeout <envoy_api_field_route.RouteAction.max_grpc_timeout>`\n  config option to specify the maximum allowable value for timeouts decoded from gRPC header field\n  `grpc-timeout`.\n* router: added a :ref:`configuration option\n  <envoy_api_field_config.filter.http.router.v2.Router.suppress_envoy_headers>` to disable *x-envoy-*\n  header generation.\n* router: added 'unavailable' to the retriable gRPC status codes that can be specified\n  through :ref:`x-envoy-retry-grpc-on <config_http_filters_router_x-envoy-retry-grpc-on>`.\n* sockets: added :ref:`tap transport socket extension <operations_traffic_tapping>` to support\n  recording plain text traffic and PCAP generation.\n* sockets: added `IP_FREEBIND` socket option support for :ref:`listeners\n  <envoy_api_field_Listener.freebind>` and upstream connections via\n  :ref:`cluster manager wide\n  <envoy_api_field_config.bootstrap.v2.ClusterManager.upstream_bind_config>` and\n  :ref:`cluster specific <envoy_api_field_Cluster.upstream_bind_config>` options.\n* sockets: added `IP_TRANSPARENT` socket option support for :ref:`listeners\n  <envoy_api_field_Listener.transparent>`.\n* sockets: added `SO_KEEPALIVE` socket option for upstream connections\n  :ref:`per cluster <envoy_api_field_Cluster.upstream_connection_options>`.\n* stats: added support for histograms.\n* stats: added :ref:`option to configure the statsd prefix<envoy_api_field_config.metrics.v2.StatsdSink.prefix>`.\n* stats: updated stats sink interface to flush through a single call.\n* tls: added support for\n  :ref:`verify_certificate_spki <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>`.\n* tls: added support for multiple\n  :ref:`verify_certificate_hash <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`\n  values.\n* tls: added support for using\n  :ref:`verify_certificate_spki <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>`\n  and :ref:`verify_certificate_hash <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`\n  without :ref:`trusted_ca <envoy_api_field_auth.CertificateValidationContext.trusted_ca>`.\n* tls: added support for allowing expired certificates with\n  :ref:`allow_expired_certificate <envoy_api_field_auth.CertificateValidationContext.allow_expired_certificate>`.\n* tls: added support for :ref:`renegotiation <envoy_api_field_auth.UpstreamTlsContext.allow_renegotiation>`\n  when acting as a client.\n* tls: removed support for legacy SHA-2 CBC cipher suites.\n* tracing: the sampling decision is now delegated to the tracers, allowing the tracer to decide when and if\n  to use it. For example, if the :ref:`x-b3-sampled <config_http_conn_man_headers_x-b3-sampled>` header\n  is supplied with the client request, its value will override any sampling decision made by the Envoy proxy.\n* websocket: support configuring idle_timeout and max_connect_attempts.\n* upstream: added support for host override for a request in :ref:`Original destination host request header <arch_overview_load_balancing_types_original_destination_request_header>`.\n* header to metadata: added :ref:`HTTP Header to Metadata filter<config_http_filters_header_to_metadata>`.\n\nDeprecated\n----------\n\n* Admin mutations should be sent as POSTs rather than GETs. HTTP GETs will result in an error\n  status code and will not have their intended effect. Prior to 1.7, GETs can be used for\n  admin mutations, but a warning is logged.\n* Rate limit service configuration via the `cluster_name` field is deprecated. Use `grpc_service`\n  instead.\n* gRPC service configuration via the `cluster_names` field in `ApiConfigSource` is deprecated. Use\n  `grpc_services` instead. Prior to 1.7, a warning is logged.\n* Redis health checker configuration via the `redis_health_check` field in `HealthCheck` is\n  deprecated. Use `custom_health_check` with name `envoy.health_checkers.redis` instead. Prior\n  to 1.7, `redis_health_check` can be used, but warning is logged.\n* `SAN` is replaced by `URI` in the `x-forwarded-client-cert` header.\n* The `endpoint` field in the http health check filter is deprecated in favor of the `headers`\n  field where one can specify HeaderMatch objects to match on.\n* The `sni_domains` field in the filter chain match was deprecated/renamed to `server_names`.\n"
  },
  {
    "path": "docs/root/version_history/v1.8.0.rst",
    "content": "1.8.0 (Oct 4, 2018)\n===================\n\nChanges\n-------\n\n* access log: added :ref:`response flag filter <envoy_api_msg_config.filter.accesslog.v2.ResponseFlagFilter>`\n  to filter based on the presence of Envoy response flags.\n* access log: added RESPONSE_DURATION and RESPONSE_TX_DURATION.\n* access log: added REQUESTED_SERVER_NAME for SNI to tcp_proxy and http\n* admin: added :http:get:`/hystrix_event_stream` as an endpoint for monitoring envoy's statistics\n  through `Hystrix dashboard <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n* cli: added support for :ref:`component log level <operations_cli>` command line option for configuring log levels of individual components.\n* cluster: added :ref:`option <envoy_api_field_Cluster.CommonLbConfig.update_merge_window>` to merge\n  health check/weight/metadata updates within the given duration.\n* config: regex validation added to limit to a maximum of 1024 characters.\n* config: v1 disabled by default. v1 support remains available until October via flipping --v2-config-only=false.\n* config: v1 disabled by default. v1 support remains available until October via deprecated flag --allow-deprecated-v1-api.\n* config: fixed stat inconsistency between xDS and ADS implementation. :ref:`update_failure <config_cluster_manager_cds>`\n  stat is incremented in case of network failure and :ref:`update_rejected <config_cluster_manager_cds>` stat is incremented\n  in case of schema/validation error.\n* config: added a stat :ref:`connected_state <management_server_stats>` that indicates current connected state of Envoy with\n  management server.\n* ext_authz: added support for configuring additional :ref:`authorization headers <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationRequest.headers_to_add>`\n  to be sent from Envoy to the authorization service.\n* fault: added support for fractional percentages in :ref:`FaultDelay <envoy_api_field_config.filter.fault.v2.FaultDelay.percentage>`\n  and in :ref:`FaultAbort <envoy_api_field_config.filter.http.fault.v2.FaultAbort.percentage>`.\n* grpc-json: added support for building HTTP response from\n  `google.api.HttpBody <https://github.com/googleapis/googleapis/blob/master/google/api/httpbody.proto>`_.\n* health check: added support for :ref:`custom health check <envoy_api_field_core.HealthCheck.custom_health_check>`.\n* health check: added support for :ref:`specifying jitter as a percentage <envoy_api_field_core.HealthCheck.interval_jitter_percent>`.\n* health_check: added support for :ref:`health check event logging <arch_overview_health_check_logging>`.\n* health_check: added :ref:`timestamp <envoy_api_field_data.core.v2alpha.HealthCheckEvent.timestamp>`\n  to the :ref:`health check event <envoy_api_msg_data.core.v2alpha.HealthCheckEvent>` definition.\n* health_check: added support for specifying :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`\n  to HTTP health checker requests.\n* http: added support for a :ref:`per-stream idle timeout\n  <envoy_api_field_route.RouteAction.idle_timeout>`. This applies at both :ref:`connection manager\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  and :ref:`per-route granularity <envoy_api_field_route.RouteAction.idle_timeout>`. The timeout\n  defaults to 5 minutes; if you have other timeouts (e.g. connection idle timeout, upstream\n  response per-retry) that are longer than this in duration, you may want to consider setting a\n  non-default per-stream idle timeout.\n* http: added upstream_rq_completed counter for :ref:`total requests completed <config_cluster_manager_cluster_stats_dynamic_http>` to dynamic HTTP counters.\n* http: added downstream_rq_completed counter for :ref:`total requests completed <config_http_conn_man_stats>`, including on a :ref:`per-listener basis <config_http_conn_man_stats_per_listener>`.\n* http: added generic :ref:`Upgrade support\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.upgrade_configs>`.\n* http: better handling of HEAD requests. Now sending transfer-encoding: chunked rather than content-length: 0.\n* http: fixed missing support for appending to predefined inline headers, e.g.\n  *authorization*, in features that interact with request and response headers,\n  e.g. :ref:`request_headers_to_add\n  <envoy_api_field_route.Route.request_headers_to_add>`. For example, a\n  request header *authorization: token1* will appear as *authorization:\n  token1,token2*, after having :ref:`request_headers_to_add\n  <envoy_api_field_route.Route.request_headers_to_add>` with *authorization:\n  token2* applied.\n* http: response filters not applied to early error paths such as http_parser generated 400s.\n* http: restrictions added to reject *:*-prefixed pseudo-headers in :ref:`custom\n  request headers <config_http_conn_man_headers_custom_request_headers>`.\n* http: :ref:`hpack_table_size <envoy_api_field_core.Http2ProtocolOptions.hpack_table_size>` now controls\n  dynamic table size of both: encoder and decoder.\n* http: added support for removing request headers using :ref:`request_headers_to_remove\n  <envoy_api_field_route.Route.request_headers_to_remove>`.\n* http: added support for a :ref:`delayed close timeout<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.delayed_close_timeout>` to mitigate race conditions when closing connections to downstream HTTP clients. The timeout defaults to 1 second.\n* jwt-authn filter: add support for per route JWT requirements.\n* listeners: added the ability to match :ref:`FilterChain <envoy_api_msg_listener.FilterChain>` using\n  :ref:`destination_port <envoy_api_field_listener.FilterChainMatch.destination_port>` and\n  :ref:`prefix_ranges <envoy_api_field_listener.FilterChainMatch.prefix_ranges>`.\n* lua: added :ref:`connection() <config_http_filters_lua_connection_wrapper>` wrapper and *ssl()* API.\n* lua: added :ref:`streamInfo() <config_http_filters_lua_stream_info_wrapper>` wrapper and *protocol()* API.\n* lua: added :ref:`streamInfo():dynamicMetadata() <config_http_filters_lua_stream_info_dynamic_metadata_wrapper>` API.\n* network: introduced :ref:`sni_cluster <config_network_filters_sni_cluster>` network filter that forwards connections to the\n  upstream cluster specified by the SNI value presented by the client during a TLS handshake.\n* proxy_protocol: added support for HAProxy Proxy Protocol v2 (AF_INET/AF_INET6 only).\n* ratelimit: added support for :repo:`api/envoy/service/ratelimit/v2/rls.proto`.\n  Lyft's reference implementation of the `ratelimit <https://github.com/lyft/ratelimit>`_ service also supports the data-plane-api proto as of v1.1.0.\n  Envoy can use either proto to send client requests to a ratelimit server with the use of the\n  `use_data_plane_proto` boolean flag in the ratelimit configuration.\n  Support for the legacy proto `source/common/ratelimit/ratelimit.proto` is deprecated and will be removed at the start of the 1.9.0 release cycle.\n* ratelimit: added :ref:`failure_mode_deny <envoy_api_msg_config.filter.http.rate_limit.v2.RateLimit>` option to control traffic flow in\n  case of rate limit service error.\n* rbac config: added a :ref:`principal_name <envoy_api_field_config.rbac.v2.Principal.Authenticated.principal_name>` field and\n  removed the old `name` field to give more flexibility for matching certificate identity.\n* rbac network filter: a :ref:`role-based access control network filter <config_network_filters_rbac>` has been added.\n* rest-api: added ability to set the :ref:`request timeout <envoy_api_field_core.ApiConfigSource.request_timeout>` for REST API requests.\n* route checker: added v2 config support and removed support for v1 configs.\n* router: added ability to set request/response headers at the :ref:`envoy_api_msg_route.Route` level.\n* stats: added :ref:`option to configure the DogStatsD metric name prefix<envoy_api_field_config.metrics.v2.DogStatsdSink.prefix>` to DogStatsdSink.\n* tcp_proxy: added support for :ref:`weighted clusters <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.weighted_clusters>`.\n* thrift_proxy: introduced thrift routing, moved configuration to correct location\n* thrift_proxy: introduced thrift configurable decoder filters\n* tls: implemented :ref:`Secret Discovery Service <config_secret_discovery_service>`.\n* tracing: added support for configuration of :ref:`tracing sampling\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing>`.\n* upstream: added configuration option to the subset load balancer to take locality weights into account when\n  selecting a host from a subset.\n* upstream: require opt-in to use the :ref:`x-envoy-original-dst-host <config_http_conn_man_headers_x-envoy-original-dst-host>` header\n  for overriding destination address when using the :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\n  load balancing policy.\n\nDeprecated\n----------\n\n* Use of the v1 API (including `*.deprecated_v1` fields in the v2 API) is deprecated.\n  See envoy-announce `email <https://groups.google.com/forum/#!topic/envoy-announce/oPnYMZw8H4U>`_.\n* Use of the legacy\n  `ratelimit.proto <https://github.com/envoyproxy/envoy/blob/b0a518d064c8255e0e20557a8f909b6ff457558f/source/common/ratelimit/ratelimit.proto>`_\n  is deprecated, in favor of the proto defined in\n  `date-plane-api <https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v2/rls.proto>`_\n  Prior to 1.8.0, Envoy can use either proto to send client requests to a ratelimit server with the use of the\n  `use_data_plane_proto` boolean flag in the `ratelimit configuration <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/ratelimit/v2/rls.proto>`_.\n  However, when using the deprecated client a warning is logged.\n* Use of the --v2-config-only flag.\n* Use of both `use_websocket` and `websocket_config` in\n  `route.proto <https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto>`_\n  is deprecated. Please use the new `upgrade_configs` in the\n  `HttpConnectionManager <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto>`_\n  instead.\n* Use of the integer `percent` field in `FaultDelay <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/fault/v2/fault.proto>`_\n  and in `FaultAbort <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/http/fault/v2/fault.proto>`_ is deprecated in favor\n  of the new `FractionalPercent` based `percentage` field.\n* Setting hosts via `hosts` field in `Cluster` is deprecated. Use `load_assignment` instead.\n* Use of `response_headers_to_*` and `request_headers_to_add` are deprecated at the `RouteAction`\n  level. Please use the configuration options at the `Route` level.\n* Use of `runtime` in `RouteMatch`, found in\n  `route.proto <https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto>`_.\n  Set the `runtime_fraction` field instead.\n* Use of the string `user` field in `Authenticated` in `rbac.proto <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/rbac/v2alpha/rbac.proto>`_\n  is deprecated in favor of the new `StringMatcher` based `principal_name` field.\n"
  },
  {
    "path": "docs/root/version_history/v1.9.0.rst",
    "content": "1.9.0 (Dec 20, 2018)\n====================\n\nChanges\n-------\n\n* access log: added a :ref:`JSON logging mode <config_access_log_format_dictionaries>` to output access logs in JSON format.\n* access log: added dynamic metadata to access log messages streamed over gRPC.\n* access log: added DOWNSTREAM_CONNECTION_TERMINATION.\n* admin: :http:post:`/logging` now responds with 200 while there are no params.\n* admin: added support for displaying subject alternate names in :ref:`certs<operations_admin_interface_certs>` end point.\n* admin: added host weight to the :http:get:`/clusters?format=json` end point response.\n* admin: :http:get:`/server_info` now responds with a JSON object instead of a single string.\n* admin: :http:get:`/server_info` now exposes what stage of initialization the server is currently in.\n* admin: added support for displaying command line options in :http:get:`/server_info` end point.\n* circuit-breaker: added cx_open, rq_pending_open, rq_open and rq_retry_open gauges to expose live\n  state via :ref:`circuit breakers statistics <config_cluster_manager_cluster_stats_circuit_breakers>`.\n* cluster: set a default of 1s for :ref:`option <envoy_api_field_Cluster.CommonLbConfig.update_merge_window>`.\n* config: removed support for the v1 API.\n* config: added support for :ref:`rate limiting<envoy_api_msg_core.RateLimitSettings>` discovery request calls.\n* cors: added :ref:`invalid/valid stats <cors-statistics>` to filter.\n* ext-authz: added support for providing per route config - optionally disable the filter and provide context extensions.\n* fault: removed integer percentage support.\n* grpc-json: added support for :ref:`ignoring query parameters\n  <envoy_api_field_config.filter.http.transcoder.v2.GrpcJsonTranscoder.ignored_query_parameters>`.\n* health check: added :ref:`logging health check failure events <envoy_api_field_core.HealthCheck.always_log_health_check_failures>`.\n* health check: added ability to set :ref:`authority header value\n  <envoy_api_field_core.HealthCheck.GrpcHealthCheck.authority>` for gRPC health check.\n* http: added HTTP/2 WebSocket proxying via :ref:`extended CONNECT <envoy_api_field_core.Http2ProtocolOptions.allow_connect>`.\n* http: added limits to the number and length of header modifications in all fields request_headers_to_add and response_headers_to_add. These limits are very high and should only be used as a last-resort safeguard.\n* http: added support for a :ref:`request timeout <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.request_timeout>`. The timeout is disabled by default.\n* http: no longer adding whitespace when appending X-Forwarded-For headers. **Warning**: this is not\n  compatible with 1.7.0 builds prior to `9d3a4eb4ac44be9f0651fcc7f87ad98c538b01ee <https://github.com/envoyproxy/envoy/pull/3610>`_.\n  See `#3611 <https://github.com/envoyproxy/envoy/issues/3611>`_ for details.\n* http: augmented the `sendLocalReply` filter API to accept an optional `GrpcStatus`\n  value to override the default HTTP to gRPC status mapping.\n* http: no longer close the TCP connection when a HTTP/1 request is retried due\n  to a response with empty body.\n* http: added support for more gRPC content-type headers in :ref:`gRPC bridge filter <config_http_filters_grpc_bridge>`, like application/grpc+proto.\n* listeners: all listener filters are now governed by the :ref:`listener_filters_timeout\n  <envoy_api_field_Listener.listener_filters_timeout>` setting. The hard coded 15s timeout in\n  the :ref:`TLS inspector listener filter <config_listener_filters_tls_inspector>` is superseded by\n  this setting.\n* listeners: added the ability to match :ref:`FilterChain <envoy_api_msg_listener.FilterChain>` using :ref:`source_type <envoy_api_field_listener.FilterChainMatch.source_type>`.\n* load balancer: added a `configuration <envoy_api_msg_Cluster.LeastRequestLbConfig>` option to specify the number of choices made in P2C.\n* logging: added missing [ in log prefix.\n* mongo_proxy: added :ref:`dynamic metadata <config_network_filters_mongo_proxy_dynamic_metadata>`.\n* network: removed the reference to `FilterState` in `Connection` in favor of `StreamInfo`.\n* rate-limit: added :ref:`configuration <envoy_api_field_config.filter.http.rate_limit.v2.RateLimit.rate_limited_as_resource_exhausted>`\n  to specify whether the `GrpcStatus` status returned should be `RESOURCE_EXHAUSTED` or\n  `UNAVAILABLE` when a gRPC call is rate limited.\n* rate-limit: removed support for the legacy ratelimit service and made the data-plane-api\n  :ref:`rls.proto <envoy_api_file_envoy/service/ratelimit/v2/rls.proto>` based implementation default.\n* rate-limit: removed the deprecated cluster_name attribute in :ref:`rate limit service configuration <envoy_api_file_envoy/config/ratelimit/v2/rls.proto>`.\n* rate-limit: added :ref:`rate_limit_service <envoy_api_msg_config.filter.http.rate_limit.v2.RateLimit>` configuration to filters.\n* rbac: added dynamic metadata to the network level filter.\n* rbac: added support for permission matching by :ref:`requested server name <envoy_api_field_config.rbac.v2.Permission.requested_server_name>`.\n* redis: static cluster configuration is no longer required. Redis proxy will work with clusters\n  delivered via CDS.\n* router: added ability to configure arbitrary :ref:`retriable status codes. <envoy_api_field_route.RetryPolicy.retriable_status_codes>`\n* router: added ability to set attempt count in upstream requests, see :ref:`virtual host's include request\n  attempt count flag <envoy_api_field_route.VirtualHost.include_request_attempt_count>`.\n* router: added internal :ref:`grpc-retry-on <config_http_filters_router_x-envoy-retry-grpc-on>` policy.\n* router: added :ref:`scheme_redirect <envoy_api_field_route.RedirectAction.scheme_redirect>` and\n  :ref:`port_redirect <envoy_api_field_route.RedirectAction.port_redirect>` to define the respective\n  scheme and port rewriting RedirectAction.\n* router: when :ref:`max_grpc_timeout <envoy_api_field_route.RouteAction.max_grpc_timeout>`\n  is set, Envoy will now add or update the grpc-timeout header to reflect Envoy's expected timeout.\n* router: per try timeouts now starts when an upstream stream is ready instead of when the request has\n  been fully decoded by Envoy.\n* router: added support for not retrying :ref:`rate limited requests<config_http_filters_router_x-envoy-ratelimited>`. Rate limit filter now sets the :ref:`x-envoy-ratelimited<config_http_filters_router_x-envoy-ratelimited>`\n  header so the rate limited requests that may have been retried earlier will not be retried with this change.\n* router: added support for enabling upgrades on a :ref:`per-route <envoy_api_field_route.RouteAction.upgrade_configs>` basis.\n* router: support configuring a default fraction of mirror traffic via\n  :ref:`runtime_fraction <envoy_api_field_route.RouteAction.RequestMirrorPolicy.runtime_key>`.\n* sandbox: added :ref:`cors sandbox <install_sandboxes_cors>`.\n* server: added `SIGINT` (Ctrl-C) handler to gracefully shutdown Envoy like `SIGTERM`.\n* stats: added :ref:`stats_matcher <envoy_api_field_config.metrics.v2.StatsConfig.stats_matcher>` to the bootstrap config for granular control of stat instantiation.\n* stream: renamed the `RequestInfo` namespace to `StreamInfo` to better match\n  its behaviour within TCP and HTTP implementations.\n* stream: renamed `perRequestState` to `filterState` in `StreamInfo`.\n* stream: added `downstreamDirectRemoteAddress` to `StreamInfo`.\n* thrift_proxy: introduced thrift rate limiter filter.\n* tls: added ssl.curves.<curve>, ssl.sigalgs.<sigalg> and ssl.versions.<version> to\n  :ref:`listener metrics <config_listener_stats>` to track TLS algorithms and versions in use.\n* tls: added support for :ref:`client-side session resumption <envoy_api_field_auth.UpstreamTlsContext.max_session_keys>`.\n* tls: added support for CRLs in :ref:`trusted_ca <envoy_api_field_auth.CertificateValidationContext.trusted_ca>`.\n* tls: added support for :ref:`multiple server TLS certificates <arch_overview_ssl_cert_select>`.\n* tls: added support for :ref:`password encrypted private keys <envoy_api_field_auth.TlsCertificate.password>`.\n* tls: added the ability to build :ref:`BoringSSL FIPS <arch_overview_ssl_fips>` using ``--define boringssl=fips`` Bazel option.\n* tls: removed support for ECDSA certificates with curves other than P-256.\n* tls: removed support for RSA certificates with keys smaller than 2048-bits.\n* tracing: added support to the Zipkin tracer for the :ref:`b3 <config_http_conn_man_headers_b3>` single header format.\n* tracing: added support for :ref:`Datadog <arch_overview_tracing>` tracer.\n* upstream: added :ref:`scale_locality_weight<envoy_api_field_Cluster.LbSubsetConfig.scale_locality_weight>` to enable\n  scaling locality weights by number of hosts removed by subset lb predicates.\n* upstream: changed how load calculation for :ref:`priority levels<arch_overview_load_balancing_priority_levels>` and :ref:`panic thresholds<arch_overview_load_balancing_panic_threshold>` interact. As long as normalized total health is 100% panic thresholds are disregarded.\n* upstream: changed the default hash for :ref:`ring hash <envoy_api_msg_Cluster.RingHashLbConfig>` from std::hash to `xxHash <https://github.com/Cyan4973/xxHash>`_.\n* upstream: when using active health checking and STRICT_DNS with several addresses that resolve\n  to the same hosts, Envoy will now health check each host independently.\n\nDeprecated\n----------\n\n* Order of execution of the network write filter chain has been reversed. Prior to this release cycle it was incorrect, see `#4599 <https://github.com/envoyproxy/envoy/issues/4599>`_. In the 1.9.0 release cycle we introduced `bugfix_reverse_write_filter_order` in `lds.proto <https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/lds.proto>`_ to temporarily support both old and new behaviors. Note this boolean field is deprecated.\n* Order of execution of the HTTP encoder filter chain has been reversed. Prior to this release cycle it was incorrect, see `#4599 <https://github.com/envoyproxy/envoy/issues/4599>`_. In the 1.9.0 release cycle we introduced `bugfix_reverse_encode_order` in `http_connection_manager.proto <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto>`_ to temporarily support both old and new behaviors. Note this boolean field is deprecated.\n* Use of the v1 REST_LEGACY ApiConfigSource is deprecated.\n* Use of std::hash in the ring hash load balancer is deprecated.\n* Use of `rate_limit_service` configuration in the `bootstrap configuration <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/bootstrap/v2/bootstrap.proto>`_ is deprecated.\n* Use of `runtime_key` in `RequestMirrorPolicy`, found in\n  `route.proto <https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto>`_\n  is deprecated. Set the `runtime_fraction` field instead.\n* Use of buffer filter `max_request_time` is deprecated in favor of the request timeout found in `HttpConnectionManager <https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto>`_\n"
  },
  {
    "path": "docs/root/version_history/v1.9.1.rst",
    "content": "1.9.1 (Apr 2, 2019)\n===================\n\nChanges\n-------\n\n* http: fixed CVE-2019-9900 by rejecting HTTP/1.x headers with embedded NUL characters.\n* http: fixed CVE-2019-9901 by normalizing HTTP paths prior to routing or L7 data plane processing.\n  This defaults off and is configurable via either HTTP connection manager :ref:`normalize_path\n  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.normalize_path>`\n  or the :ref:`runtime <config_http_conn_man_runtime_normalize_path>`.\n"
  },
  {
    "path": "docs/root/version_history/version_history.rst",
    "content": ".. _version_history:\n\nVersion history\n---------------\n\n.. toctree::\n  :titlesonly:\n\n  current\n  v1.15.2\n  v1.15.1\n  v1.15.0\n  v1.14.5\n  v1.14.4\n  v1.14.3\n  v1.14.2\n  v1.14.1\n  v1.14.0\n  v1.13.6\n  v1.13.5\n  v1.13.4\n  v1.13.3\n  v1.13.2\n  v1.13.1\n  v1.13.0\n  v1.12.7\n  v1.12.6\n  v1.12.5\n  v1.12.4\n  v1.12.3\n  v1.12.2\n  v1.12.1\n  v1.12.0\n  v1.11.2\n  v1.11.1\n  v1.11.0\n  v1.10.0\n  v1.9.1\n  v1.9.0\n  v1.8.0\n  v1.7.0\n  v1.6.0\n  v1.5.0\n  v1.4.0\n  v1.3.0\n  v1.2.0\n  v1.1.0\n  v1.0.0\n\n.. _deprecated:\n\nDeprecation Policy\n^^^^^^^^^^^^^^^^^^\n\nAs of release 1.3.0, Envoy will follow a\n`Breaking Change Policy <https://github.com/envoyproxy/envoy/blob/master//CONTRIBUTING.md#breaking-change-policy>`_.\n\nFeatures in the deprecated list for each version have been DEPRECATED\nand will be removed in the specified release cycle. A logged warning\nis expected for each deprecated item that is in deprecation window.\n"
  },
  {
    "path": "examples/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nfilegroup(\n    name = \"configs\",\n    srcs = glob(\n        [\n            \"**/*.yaml\",\n            \"**/*.lua\",\n        ],\n        exclude = [\n            \"cache/responses.yaml\",\n            \"jaeger-native-tracing/*\",\n            \"**/*docker-compose*.yaml\",\n        ],\n    ),\n)\n"
  },
  {
    "path": "examples/DEVELOPER.md",
    "content": "# Adding a sandbox example\n\n## Add a `verify.sh` test to your sandbox\n\nSandboxes are tested as part of the continuous integration process, which expects\neach sandbox to have a `verify.sh` script containing tests for the example.\n\n### Basic layout of the `verify.sh` script\n\nAt a minimum the `verify.sh` script should include the necessary parts to start\nand stop your sandbox.\n\nGiven a sandbox with a single `docker` composition, adding the following\nto `verify.sh` will test that the sandbox can be started and stopped.\n\n```bash\n#!/bin/bash -e\n\nexport NAME=example-sandbox\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n# add example tests here...\n\n```\n\nThe `$NAME` variable is used for logging when testing the example, and will\noften be the same as the directory name.\n\n### Log before running each test\n\nThere is a utility function `run_log` that can be used to indicate in test logs what\nis being executed and why, for example:\n\n```bash\nrun_log \"Checking foo.txt was created\"\nls foo.txt\n\nrun_log \"Checking bar.txt was created\"\nls bar.txt\n```\n\n### Add tests reflecting the documented examples\n\nThe tests should follow the steps laid out in the documentation.\n\nFor example, if the documentation provides a series of `bash` commands to execute, add these in order to `verify.sh`.\n\nYou may wish to grep the responses, or check return codes to ensure the commands respond as expected.\n\nLikewise, if the documentation asks the user to browse to a page - for example http://localhost:8000 -\nthen you should add a test to ensure that the given URL responds as expected.\n\nIf an example web page is also expected to make further JavaScript `HTTP` requests in order to function, then add\ntests for requests that mimick this interaction.\n\nA number of utility functions have been added to simplify browser testing.\n\n#### Utility functions: `responds_with`\n\nThe `responds_with` function can be used to ensure a request to a given URL responds with\nexpected `HTTP` content.\n\nIt follows the form `responds_with <expected_content> <url> [<curl_args>]`\n\nFor example, a simple `GET` request:\n\n```bash\nresponds_with \\\n    \"Hello, world\" \\\n    http://localhost:8000\n```\n\nThis is a more complicated example that uses an `HTTPS` `POST` request and sends some\nadditional headers:\n\n```bash\nresponds_with \\\n    \"Hello, postie\" \\\n    https://localhost:8000/some-endpoint \\\n    -k \\\n    -X POST \\\n    -d 'data=hello,rcpt=service' \\\n    -H 'Origin: https://example-service.com'\n```\n\n#### Utility functions: `responds_with_header`\n\nYou can check that a request responds with an expected header as follows:\n\n```bash\nresponds_with_header \\\n    \"HTTP/1.1 403 Forbidden\" \\\n    \"http://localhost:8000/?name=notdown\"\n```\n\n`responds_with_header` can accept additional curl arguments like `responds_with`\n\n#### Utility functions: `responds_without_header`\n\nYou can also check that a request *does not* respond with a given header:\n\n```bash\nresponds_without_header \\\n    \"X-Secret: treasure\" \\\n    \"http://localhost:8000\"\n```\n\n`responds_without_header` can accept additional curl arguments like `responds_with`\n\n### Slow starting `docker` compositions\n\nUnless your example provides a way for ensuring that all containers are healthy by\nthe time `docker-compose up -d` returns, you may need to add a `DELAY` before running\nthe steps in your `verify.sh`\n\nFor example, to wait 10 seconds after `docker-compose up -d` has been called, set the\nfollowing:\n\n```bash\n#!/bin/bash -e\n\nexport NAME=example-sandbox\nexport DELAY=10\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n# add example tests here...\n```\n\n### Examples with multiple `docker` compositions\n\nFor your example to work it may need more than one `docker` composition to be run.\n\nYou can set where to find the `docker-compose.yaml` files with the `PATHS` argument.\n\nBy default `PATHS=.`, but you can change this to a comma-separated list of paths.\n\nFor example a sandbox containing `frontend/docker-compose.yaml` and `backend/docker-compose.yaml`,\nmight use a `verify.sh` with:\n\n```bash\n#!/bin/bash -e\n\nexport NAME=example-sandbox\nexport PATHS=frontend,backend\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n# add example tests here...\n```\n\n### Bringing stacks up manually\n\nYou may need to bring up the stack manually, in order to run some steps beforehand.\n\nSourcing `verify-common.sh` will always leave you in the sandbox directory, and from there\nyou can use the `bring_up_example` function.\n\nFor example:\n\n```bash\n#!/bin/bash -e\n\nexport NAME=example-sandbox\nexport MANUAL=true\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\nrun_log \"Creating bar.txt before starting containers\"\necho foo > bar.txt\n\nbring_up_example\n\n# add example tests here...\n```\n\nIf your sandbox has multiple compositions, and uses the `$PATHS` env var described above,\n`bring_up_example` will bring all of your compositions up.\n\n### Additional arguments to `docker-compose up -d`\n\nIf you need to pass additional arguments to compose you can set the `UPARGS`\nenv var.\n\nFor example, to scale a composition with a service named `http_service`, you\nshould add the following:\n\n```bash\n#!/bin/bash -e\n\nexport NAME=example-sandbox\nexport UPARGS=\"--scale http_service=2\"\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n# add example tests here...\n```\n\n### Running commands inside `docker` containers\n\nIf your example asks the user to run commands inside containers, you can\nmimick this using `docker-compose exec -T`. The `-T` flag is necessary as the\ntests do not have access to a `tty` in the CI pipeline.\n\n### Note on permissions and configuration\n\nThe sandbox tests are run with a `umask` setting of `027` to ensure they will run in environments\nwhere this is the case.\n\nAs the Envoy containers run as non-root, it is essential that any configurations required\nby the daemon are included in the relevant example `Dockerfile` rather than mounted in\nany `docker-compose.yaml` files.\n\nThe Docker recipe should also ensure that added configurations are world-readable.\n\nFor example, with an added configuration file named `front-envoy.yaml`, you should add\nthe following in the Docker recipe:\n\n```\nRUN chmod go+r /etc/front-envoy.yaml\n```\n\n## Sandbox configuration tests\n\nExample configuration files are tested to ensure they are valid and well-formed, and do\nnot contain deprecated features.\n\n### Exclude configs from example configuration tests\n\nThe CI script searches for all files in the examples folders with a `yaml` or `lua` extension.\n\nThese files are bundled into a test and the `yaml` files are used to try to start an Envoy server.\n\nIf your example includes `yaml` files that are either not Envoy configuration, or for some reason\ncannot be tested in this way, you should add the files to the `exclude` list in the `filegroup.srcs`\nsection of the `examples/BUILD` file.\n\nThe `exclude` patterns are evaluated as `globs` in the context of the `examples` folder.\n\n\n## Verifying your sandbox\n\nOnce you have built your sandbox, and added the `verify.sh` script you can run it directly in the\nsandbox folder.\n\nFor example:\n\n```\ncd examples/example-sandbox\n./verify.sh\n\n```\n\nYou should see the docker composition brought up, your tests run, and the composition brought down again.\n\nThe script should exit with `0` for the tests to pass.\n\n\n## Verifying multiple/all sandboxes\n\nIn continuous integration, all of the sandboxes are checked using the `ci/verify-examples.sh`.\n\nThis can also be called with a filter argument, which is a `glob` evaluated in the context of the `examples` folder.\n\nFor example, to run all sandboxes with names beginning `jaeger`:\n\n```\n./ci/verify-examples.sh jaeger*\n```\n\n---\n\n**NOTE**\n\nYou can use this script locally to test the sandboxes on your platform, but you should be aware that it requires\na lot of resources as it downloads and builds many Docker images, and then runs them in turn.\n\n---\n\nOne way to run the tests in an isolated environment is to mount the `envoy` source into a `docker-in-docker` container\nor similar, and then run the script from inside that container.\n"
  },
  {
    "path": "examples/cache/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy\n"
  },
  {
    "path": "examples/cache/Dockerfile-service",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash curl\nRUN pip3 install -q Flask==0.11.1 requests==2.18.4 pyyaml\nRUN mkdir /code\nCOPY ./start_service.sh /usr/local/bin/start_service.sh\nCOPY ./service-envoy.yaml /etc/service-envoy.yaml\nCOPY ./service.py /code\nRUN chmod u+x /usr/local/bin/start_service.sh\nENTRYPOINT /usr/local/bin/start_service.sh\n"
  },
  {
    "path": "examples/cache/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/http_cache.html)\n"
  },
  {
    "path": "examples/cache/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n    environment:\n      - ENVOY_UID=0\n\n  service1:\n    build:\n      context: .\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./responses.yaml:/etc/responses.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service1\n    environment:\n      - SERVICE_NAME=1\n    expose:\n      - \"8000\"\n\n  service2:\n    build:\n      context: .\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./responses.yaml:/etc/responses.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service2\n    environment:\n      - SERVICE_NAME=2\n    expose:\n      - \"8000\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/cache/front-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/service/1\"\n                route:\n                  cluster: service1\n              - match:\n                  prefix: \"/service/2\"\n                route:\n                  cluster: service2\n          http_filters:\n          - name: \"envoy.filters.http.cache\"\n            typed_config:\n              \"@type\": \"type.googleapis.com/envoy.extensions.filters.http.cache.v3alpha.CacheConfig\"\n              typed_config:\n                \"@type\": \"type.googleapis.com/envoy.source.extensions.filters.http.cache.SimpleHttpCacheConfig\"\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  clusters:\n  - name: service1\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service1\n                port_value: 8000\n  - name: service2\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service2\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service2\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/cache/responses.yaml",
    "content": "valid-for-minute:\n  body: This response will stay fresh for one minute\n  headers:\n    cache-control: max-age=60\n    custom-header: any value\nprivate:\n  body: This is a private response, it will not be cached by Envoy\n  headers:\n    cache-control: private\nno-cache:\n  body: This response can be cached, but it has to be validated on each request\n  headers:\n    cache-control: max-age=0, no-cache\n"
  },
  {
    "path": "examples/cache/service-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/service\"\n                route:\n                  cluster: local_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8081\n"
  },
  {
    "path": "examples/cache/service.py",
    "content": "from flask import Flask\nfrom flask import request\nfrom flask import make_response, abort\nimport yaml\nimport os\nimport requests\nimport socket\nimport sys\nimport datetime\n\napp = Flask(__name__)\n\n\n@app.route('/service/<service_number>/<response_id>')\ndef get(service_number, response_id):\n  stored_response = yaml.load(open('/etc/responses.yaml', 'r')).get(response_id)\n\n  if stored_response is None:\n    abort(404, 'No response found with the given id')\n\n  response = make_response(stored_response.get('body') + '\\n')\n  if stored_response.get('headers'):\n    response.headers.update(stored_response.get('headers'))\n\n  # Generate etag header\n  response.add_etag()\n\n  # Append the date of response generation\n  body_with_date = \"{}\\nResponse generated at: {}\\n\".format(\n      response.get_data(as_text=True),\n      datetime.datetime.utcnow().strftime(\"%a, %d %b %Y %H:%M:%S GMT\"))\n\n  response.set_data(body_with_date)\n\n  # response.make_conditional() will change the response to a 304 response\n  # if a 'if-none-match' header exists in the request and matches the etag\n  return response.make_conditional(request)\n\n\nif __name__ == \"__main__\":\n  if not os.path.isfile('/etc/responses.yaml'):\n    print('Responses file not found at /etc/responses.yaml')\n    exit(1)\n  app.run(host='127.0.0.1', port=8080, debug=True)\n"
  },
  {
    "path": "examples/cache/start_service.sh",
    "content": "#!/bin/sh\npython3 /code/service.py &\nenvoy -c /etc/service-envoy.yaml --service-cluster \"service${SERVICE_NAME}\"\n"
  },
  {
    "path": "examples/cache/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=cache\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\ncheck_validated() {\n    # Get the date header and the response generation timestamp\n    local _dates dates\n    _dates=$(grep -oP '\\d\\d:\\d\\d:\\d\\d' <<< \"$1\")\n    while read -r line; do dates+=(\"$line\"); done \\\n\t<<< \"$_dates\"\n    # Make sure they are different\n    if [[ ${dates[0]} == \"${dates[1]}\" ]]; then\n       echo \"ERROR: validated responses should have a date AFTER the generation timestamp\" >&2\n       return 1\n    fi\n    # Make sure there is no age header\n    if grep -q \"age:\" <<< \"$1\"; then\n        echo \"ERROR: validated responses should not have an age header\" >&2\n        return 1\n    fi\n}\n\ncheck_cached() {\n    # Make sure there is an age header\n    if ! grep -q \"age:\" <<< \"$1\"; then\n        echo \"ERROR: cached responses should have an age header\" >&2\n        return 1\n    fi\n}\n\ncheck_from_origin() {\n    # Get the date header and the response generation timestamp\n    local _dates dates\n    _dates=$(grep -oP '\\d\\d:\\d\\d:\\d\\d' <<< \"$1\")\n    while read -r line; do dates+=(\"$line\"); done \\\n\t<<< \"$_dates\"\n    # Make sure they are equal\n    if [[ ${dates[0]} != \"${dates[1]}\" ]]; then\n       echo \"ERROR: responses from origin should have a date equal to the generation timestamp\" >&2\n       return 1\n    fi\n    # Make sure there is no age header\n    if grep -q \"age:\" <<< \"$1\" ; then\n        echo \"ERROR: responses from origin should not have an age header\" >&2\n        return 1\n    fi\n}\n\n\nrun_log \"Valid-for-minute: First request should be served by the origin\"\nresponse=$(curl -si localhost:8000/service/1/valid-for-minute)\ncheck_from_origin \"$response\"\n\nrun_log \"Snooze for 30 seconds\"\nsleep 30\n\nrun_log \"Valid-for-minute: Second request should be served from cache\"\nresponse=$(curl -si localhost:8000/service/1/valid-for-minute)\ncheck_cached \"$response\"\n\nrun_log \"Snooze for 31 more seconds\"\nsleep 31\n\nrun_log \"Valid-for-minute: More than a minute has passed, this request should get a validated response\"\nresponse=$(curl -si localhost:8000/service/1/valid-for-minute)\ncheck_validated \"$response\"\n\nrun_log \"Private: Make 4 requests make sure they are all served by the origin\"\nfor _ in {0..3}\ndo\n    response=$(curl -si localhost:8000/service/1/private)\n    check_from_origin \"$response\"\ndone\n\nrun_log \"No-cache: First request should be served by the origin\"\nresponse=$(curl -si localhost:8000/service/1/no-cache)\ncheck_from_origin \"$response\"\n\nrun_log \"No-cache: Make 4 more requests and make sure they are all validated before being served from cache\"\nfor _ in {0..3}\ndo\n    sleep 1\n    response=$(curl -si localhost:8000/service/1/no-cache)\n    check_validated \"$response\"\ndone\n"
  },
  {
    "path": "examples/cors/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/cors.html).\n"
  },
  {
    "path": "examples/cors/backend/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/cors/backend/Dockerfile-service",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash\nRUN pip3 install -q Flask==0.11.1\nRUN mkdir /code\nADD ./service.py /code/\nADD ./start_service.sh /usr/local/bin/start_service.sh\nRUN chmod u+x /usr/local/bin/start_service.sh\nENTRYPOINT [\"/bin/sh\", \"/usr/local/bin/start_service.sh\"]\n"
  },
  {
    "path": "examples/cors/backend/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8002:8000\"\n      - \"8003:8001\"\n\n  backend-service:\n    build:\n      context: .\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service-envoy.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - backendservice\n    expose:\n      - \"8000\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/cors/backend/front-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          access_log:\n            - name: envoy.access_loggers.file\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n                path: /dev/stdout\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: www\n              domains:\n              - \"*\"\n              cors:\n                allow_origin_string_match:\n                - safe_regex:\n                    google_re2: {}\n                    regex: \\*\n                allow_methods: \"GET\"\n                filter_enabled:\n                  default_value:\n                    numerator: 100\n                    denominator: HUNDRED\n                  runtime_key: cors.www.enabled\n                shadow_enabled:\n                  default_value:\n                    numerator: 0\n                    denominator: HUNDRED\n                  runtime_key: cors.www.shadow_enabled\n              routes:\n              - match:\n                  prefix: \"/cors/open\"\n                route:\n                  cluster: backend_service\n              - match:\n                  prefix: \"/cors/disabled\"\n                route:\n                  cluster: backend_service\n                  cors:\n                    filter_enabled:\n                      default_value:\n                        numerator: 0\n                        denominator: HUNDRED\n              - match:\n                  prefix: \"/cors/restricted\"\n                route:\n                  cluster: backend_service\n                  cors:\n                    allow_origin_string_match:\n                    - safe_regex:\n                        google_re2: {}\n                        regex: .*\\.envoyproxy\\.io\n                    allow_methods: \"GET\"\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: backend_service\n          http_filters:\n          - name: envoy.filters.http.cors\n            typed_config: {}\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: backend_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: backend_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: backendservice\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/cors/backend/service-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8081\n"
  },
  {
    "path": "examples/cors/backend/service.py",
    "content": "from flask import Flask, request, send_from_directory\nimport os\n\napp = Flask(__name__)\n\n\n@app.route('/cors/<status>')\ndef cors_enabled(status):\n  return 'Success!'\n\n\nif __name__ == \"__main__\":\n  app.run(host='127.0.0.1', port=8080, debug=True)\n"
  },
  {
    "path": "examples/cors/backend/start_service.sh",
    "content": "#!/bin/sh\npython3 /code/service.py &\nenvoy -c /etc/service-envoy.yaml --service-cluster backend-service\n"
  },
  {
    "path": "examples/cors/frontend/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/cors/frontend/Dockerfile-service",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash\nRUN pip3 install -q Flask==0.11.1\nRUN mkdir /code\nADD ./service.py ./index.html /code/\nADD ./start_service.sh /usr/local/bin/start_service.sh\nRUN chmod u+x /usr/local/bin/start_service.sh\nENTRYPOINT [\"/bin/sh\", \"/usr/local/bin/start_service.sh\"]\n"
  },
  {
    "path": "examples/cors/frontend/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n\n  frontend-service:\n    build:\n      context: .\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service-envoy.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - frontendservice\n    expose:\n      - \"8000\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/cors/frontend/front-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          access_log:\n            - name: envoy.access_loggers.file\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n                path: /dev/stdout\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: services\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: frontend_service\n          http_filters:\n          - name: envoy.filters.http.cors\n            typed_config: {}\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: frontend_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: frontend_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: frontendservice\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/cors/frontend/index.html",
    "content": "<!DOCTYPE html>\n<html>\n<head>\n    <title>Envoy CORS Webpage</title>\n    <link rel=\"shortcut icon\" href=\"https://www.envoyproxy.io/img/favicon.ico\">\n    <script type=\"text/javascript\">\n        var client = new XMLHttpRequest();\n        var resultText;\n\n        function invokeRemoteDomain() {\n            var remoteIP = document.getElementById(\"remoteip\").value;\n            var enforcement = document.querySelector('input[name=\"cors\"]:checked').value;\n            if(client) {\n                var url = `http://${remoteIP}:8002/cors/${enforcement}`;\n                client.open('GET', url, true);\n                client.onreadystatechange = handler;\n                client.send();\n            } else {\n                resultText = \"Could not find client to make request.\";\n                document.getElementById(\"results\").textContent = resultText;\n            }\n        }\n\n        function handler() {\n            var responseHeaders = client.getAllResponseHeaders()\n            if (responseHeaders === \"\") {\n                document.getElementById(\"results\").textContent = 'CORS Error';\n            }\n            if (client.readyState == 4 && client.status == 200) {\n                resultText = client.responseText;\n                document.getElementById(\"results\").textContent = resultText;\n            }\n        }\n    </script>\n</head>\n<body>\n    <h1>\n        Envoy CORS Demo\n    </h1>\n    <p>\n        This page requests an asset from another domain via cross-site XMLHttpRequest mitigated by Access Control.<br/>\n        This scenario demonstrates a <a href=\"https://www.w3.org/TR/cors/#simple-method\">simple method</a>.<br/>\n        It does <b>NOT</b> dispatch a preflight request.\n    </p>\n    <p>\n        Enter the IP address of backend Docker container. As we are running Docker Compose this should just be localhost.<br/>\n    </p>\n    <div>\n        <input id=\"remoteip\" type=\"text\" placeholder=\"Remote IP\" value=\"localhost\"/>\n        <button id=\"submit\" onclick=\"invokeRemoteDomain()\">Fetch asset</button><br/>\n        <div style=\"width:20%;float:left;\">\n            <h5>CORS Enforcement</h5>\n            <input type=\"radio\" name=\"cors\" value=\"disabled\" checked=\"checked\"/> Disabled<br/>\n            <input type=\"radio\" name=\"cors\" value=\"open\"/> Open<br/>\n            <input type=\"radio\" name=\"cors\" value=\"restricted\"/> Restricted<br/>\n            <br/>\n        </div>\n        <div style=\"float:left;\">\n            <h3>Request Results</h3>\n            <p id=\"results\"></p>\n        </div>\n    </div>\n</body>\n<script>\n    var input = document.getElementById(\"remoteip\");\n    input.addEventListener(\"keyup\", function(event) {\n        event.preventDefault();\n        if (event.keyCode === 13) {\n            document.getElementById(\"submit\").click();\n        }\n    });\n</script>\n</html>\n"
  },
  {
    "path": "examples/cors/frontend/service-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8081\n"
  },
  {
    "path": "examples/cors/frontend/service.py",
    "content": "from flask import Flask, send_from_directory\nimport os\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n  file_dir = os.path.dirname(os.path.realpath(__file__))\n  return send_from_directory(file_dir, 'index.html')\n\n\nif __name__ == \"__main__\":\n  app.run(host='127.0.0.1', port=8080, debug=True)\n"
  },
  {
    "path": "examples/cors/frontend/start_service.sh",
    "content": "#!/bin/sh\npython3 /code/service.py &\nenvoy -c /etc/service-envoy.yaml --service-cluster frontend-service\n"
  },
  {
    "path": "examples/cors/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=cors\nexport PATHS=frontend,backend\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test service\"\nresponds_with \\\n    \"Envoy CORS Webpage\" \\\n    http://localhost:8000\n\nrun_log \"Test cors server: disabled\"\nresponds_with \\\n    Success \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8002/cors/disabled\nresponds_without_header \\\n    access-control-allow-origin \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8002/cors/disabled\n\nrun_log \"Test cors server: open\"\nresponds_with \\\n    Success \\\n    -H 'Origin: http://example.com' \\\n    http://localhost:8002/cors/open\nresponds_with_header \\\n    \"access-control-allow-origin: http://example.com\" \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8002/cors/open\n\nrun_log \"Test cors server: restricted\"\nresponds_with \\\n    Success \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8002/cors/restricted\nresponds_without_header \\\n    access-control-allow-origin \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8002/cors/restricted\nresponds_with_header \\\n    \"access-control-allow-origin: http://foo.envoyproxy.io\" \\\n    -H \"Origin: http://foo.envoyproxy.io\" \\\n    http://localhost:8002/cors/restricted\n"
  },
  {
    "path": "examples/csrf/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/csrf.html).\n"
  },
  {
    "path": "examples/csrf/crosssite/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/csrf/crosssite/Dockerfile-service",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash\nRUN pip3 install -q Flask==0.11.1\nRUN mkdir /code\nADD ./crosssite/service.py ./index.html /code/\nADD ./start_service.sh /usr/local/bin/start_service.sh\nRUN chmod u+x /usr/local/bin/start_service.sh\nENTRYPOINT [\"/bin/sh\", \"/usr/local/bin/start_service.sh\"]\n"
  },
  {
    "path": "examples/csrf/crosssite/docker-compose.yml",
    "content": "version: '3.7'\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8002:8000\"\n      - \"8003:8001\"\n\n  service:\n    build:\n      context: ..\n      dockerfile: crosssite/Dockerfile-service\n    volumes:\n      - ../service-envoy.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service\n    expose:\n      - \"8000\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/csrf/crosssite/front-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          access_log:\n            - name: envoy.access_loggers.file\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n                path: \"/dev/stdout\"\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: www\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: generic_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: generic_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: generic_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/csrf/crosssite/service.py",
    "content": "import os\n\nfrom flask import Flask, send_from_directory\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route('/', methods=['GET'])\ndef index():\n  file_dir = os.path.dirname(os.path.realpath(__file__))\n  return send_from_directory(file_dir, 'index.html')\n\n\nif __name__ == \"__main__\":\n  app.run(host='127.0.0.1', port=8080, debug=True)\n"
  },
  {
    "path": "examples/csrf/index.html",
    "content": "<!DOCTYPE html>\n<html>\n<head>\n    <title>Envoy CSRF Wepage</title>\n    <link rel=\"shortcut icon\" href=\"https://www.envoyproxy.io/img/favicon.ico\">\n    <script type=\"text/javascript\">\n        var client = new XMLHttpRequest();\n        var resultText;\n\n        function submitToDomain() {\n            var remoteIP = document.getElementById(\"destinationip\").value;\n            var enforcement = document.querySelector('input[name=\"csrf\"]:checked').value;\n            var method = enforcement !== 'ignored' ? 'POST' : 'GET';\n            if(client) {\n                var url = `http://${remoteIP}:8000/csrf/${enforcement}`;\n                client.open(method, url, true);\n                client.onreadystatechange = handler;\n                client.send();\n            } else {\n                resultText = \"Could not find client to make request.\";\n                document.getElementById(\"results\").textContent = resultText;\n            }\n        }\n\n        function handler() {\n            var responseCode = client.status;\n            if (client.readyState == 4 && responseCode == 403) {\n                resultText = 'Rejected by CSRF';\n            }\n            else if (client.readyState == 4 && responseCode == 200) {\n                resultText = client.responseText;\n            }\n            else if (client.readyState == 4) {\n                resultText = 'Unknown Error. Check the console.';\n            }\n            document.getElementById(\"results\").textContent = resultText;\n        }\n    </script>\n</head>\n<body>\n    <h1>\n        Envoy CSRF Demo\n    </h1>\n    <p>\n        This page demonstrates a few scenarios for CSRF.\n    </p>\n    <p>\n        Enter the IP address of the destination Docker container.<br/>\n    </p>\n    <div>\n        <input id=\"destinationip\" type=\"text\" placeholder=\"Destination IP\" value=\"localhost\"/>\n        <button id=\"submit\" onclick=\"submitToDomain()\">Post to destination</button><br/>\n        <div style=\"width:20%;float:left;\">\n            <h5>CSRF Enforcement</h5>\n            <input type=\"radio\" name=\"csrf\" value=\"disabled\" checked=\"checked\"/> Disabled<br/>\n            <input type=\"radio\" name=\"csrf\" value=\"shadow\"/> Shadow Mode<br/>\n            <input type=\"radio\" name=\"csrf\" value=\"enabled\"/> Enabled<br/>\n            <input type=\"radio\" name=\"csrf\" value=\"ignored\"/> Ignored<br/>\n            <input type=\"radio\" name=\"csrf\" value=\"additional_origin\"/> Additional Origin<br/>\n            <br/>\n        </div>\n        <div style=\"float:left;\">\n            <h3>Request Results</h3>\n            <p id=\"results\"></p>\n        </div>\n    </div>\n</body>\n<script>\n    var input = document.getElementById(\"remoteip\");\n    if (input) {\n        input.addEventListener(\"keyup\", function(event) {\n            event.preventDefault();\n            if (event.keyCode === 13) {\n                document.getElementById(\"submit\").click();\n            }\n        });\n    }\n</script>\n</html>\n"
  },
  {
    "path": "examples/csrf/samesite/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\",  \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/csrf/samesite/Dockerfile-service",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash\nRUN pip3 install -q Flask==0.11.1\nRUN mkdir /code\nADD ./samesite/service.py ./index.html /code/\nADD ./start_service.sh /usr/local/bin/start_service.sh\nRUN chmod u+x /usr/local/bin/start_service.sh\nENTRYPOINT [\"/bin/sh\", \"/usr/local/bin/start_service.sh\"]\n"
  },
  {
    "path": "examples/csrf/samesite/docker-compose.yml",
    "content": "version: '3.7'\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n\n  service:\n    build:\n      context: ..\n      dockerfile: samesite/Dockerfile-service\n    volumes:\n      - ../service-envoy.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service\n    expose:\n      - \"8000\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/csrf/samesite/front-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          access_log:\n            - name: envoy.access_loggers.file\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n                path: \"/dev/stdout\"\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: www\n              domains:\n              - \"*\"\n              cors:\n                allow_origin_string_match:\n                - safe_regex:\n                    google_re2: {}\n                    regex: \\*\n                filter_enabled:\n                  default_value:\n                    numerator: 100\n                    denominator: HUNDRED\n              typed_per_filter_config:\n                envoy.filters.http.csrf:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy\n                  filter_enabled:\n                    default_value:\n                      numerator: 100\n                      denominator: HUNDRED\n                    runtime_key: csrf.www.enabled\n                  shadow_enabled:\n                    default_value:\n                      numerator: 0\n                      denominator: HUNDRED\n                    runtime_key: csrf.www.shadow_enabled\n              routes:\n              - match:\n                  prefix: \"/csrf/disabled\"\n                route:\n                  cluster: generic_service\n                typed_per_filter_config:\n                  envoy.filters.http.csrf:\n                    \"@type\": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy\n                    filter_enabled:\n                      default_value:\n                        numerator: 0\n                        denominator: HUNDRED\n              - match:\n                  prefix: \"/csrf/shadow\"\n                route:\n                  cluster: generic_service\n                typed_per_filter_config:\n                  envoy.filters.http.csrf:\n                    \"@type\": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy\n                    filter_enabled:\n                      default_value:\n                        numerator: 0\n                        denominator: HUNDRED\n                    shadow_enabled:\n                      default_value:\n                        numerator: 100\n                        denominator: HUNDRED\n              - match:\n                  prefix: \"/csrf/additional_origin\"\n                route:\n                  cluster: generic_service\n                typed_per_filter_config:\n                  envoy.filters.http.csrf:\n                    \"@type\": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy\n                    filter_enabled:\n                      default_value:\n                        numerator: 100\n                        denominator: HUNDRED\n                    additional_origins:\n                    - safe_regex:\n                        google_re2: {}\n                        regex: .*\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: generic_service\n          http_filters:\n          - name: envoy.filters.http.cors\n            typed_config: {}\n          - name: envoy.filters.http.csrf\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy\n              filter_enabled:\n                default_value:\n                  numerator: 0\n                  denominator: HUNDRED\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: generic_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: generic_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/csrf/samesite/service.py",
    "content": "import os\n\nfrom flask import Flask, send_from_directory\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route('/csrf/ignored', methods=['GET'])\ndef csrf_ignored():\n  return 'Success!'\n\n\n@app.route('/csrf/<status>', methods=['POST'])\ndef csrf_with_status(status):\n  return 'Success!'\n\n\n@app.route('/', methods=['GET'])\ndef index():\n  file_dir = os.path.dirname(os.path.realpath(__file__))\n  return send_from_directory(file_dir, 'index.html')\n\n\nif __name__ == \"__main__\":\n  app.run(host='127.0.0.1', port=8080, debug=True)\n"
  },
  {
    "path": "examples/csrf/service-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8081\n"
  },
  {
    "path": "examples/csrf/start_service.sh",
    "content": "#!/bin/sh\npython3 /code/service.py &\nenvoy -c /etc/service-envoy.yaml --service-cluster service\n"
  },
  {
    "path": "examples/csrf/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=csrf\nexport PATHS=samesite,crosssite\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test services\"\nresponds_with \\\n    \"Envoy CSRF Demo\" \\\n    http://localhost:8002\nresponds_with \\\n    \"Envoy CSRF Demo\" \\\n    http://localhost:8000\n\nrun_log \"Test stats server\"\nresponds_with \\\n    \":\" \\\n    http://localhost:8001/stats\n\nrun_log \"Test csrf server: disabled\"\nresponds_with \\\n    Success \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/disabled\nresponds_with_header \\\n    \"access-control-allow-origin: http://example.com\" \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/disabled\n\nrun_log \"Test csrf server: shadow\"\nresponds_with \\\n    Success \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/shadow\nresponds_with_header \\\n    \"access-control-allow-origin: http://example.com\" \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/shadow\n\nrun_log \"Test csrf server: enabled\"\nresponds_with \\\n    \"Invalid origin\" \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/enabled\nresponds_with_header \\\n    \"HTTP/1.1 403 Forbidden\" \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/enabled\n\nrun_log \"Test csrf server: additional_origin\"\nresponds_with \\\n    Success \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/additional_origin\nresponds_with_header \\\n    \"access-control-allow-origin: http://example.com\" \\\n    -X POST \\\n    -H \"Origin: http://example.com\" \\\n    http://localhost:8000/csrf/additional_origin\n"
  },
  {
    "path": "examples/ext_authz/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./config /etc/envoy-config\nCOPY ./run_envoy.sh /run_envoy.sh\nRUN chmod go+r -R /etc/envoy-config \\\n    && chmod go+rx /run_envoy.sh /etc/envoy-config /etc/envoy-config/*\nCMD [\"/bin/sh\", \"/run_envoy.sh\"]\n"
  },
  {
    "path": "examples/ext_authz/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/ext_authz)\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/Dockerfile",
    "content": "FROM golang:alpine AS builder\n\nRUN apk --no-cache add make\nCOPY . /app\nRUN make -C /app/grpc-service\n\nFROM alpine\n\nCOPY --from=builder /app/grpc-service/server /app/server\nCMD [\"/app/server\", \"-users\", \"/etc/users.json\"]\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/Makefile",
    "content": "all: server\n\nserver:\n\t@CGO_ENABLED=0 GOOS=linux go build -a --ldflags '-extldflags \"-static\"' \\\n\t\t-tags \"netgo\" -installsuffix netgo \\\n\t\t-o server\nclean:\n\t@rm -fr server\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/go.mod",
    "content": "module github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service\n\ngo 1.14\n\nrequire (\n\tgithub.com/envoyproxy/go-control-plane v0.9.5\n\tgithub.com/golang/protobuf v1.3.2\n\tgoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55\n\tgoogle.golang.org/grpc v1.25.1\n)\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU=\ngithub.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=\ngithub.com/envoyproxy/go-control-plane v0.9.0 h1:67WMNTvGrl7V1dWdKCeTwxDr7nio9clKoTlLhwIPnT4=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU=\ngithub.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\n\tenvoy_service_auth_v2 \"github.com/envoyproxy/go-control-plane/envoy/service/auth/v2\"\n\tenvoy_service_auth_v3 \"github.com/envoyproxy/go-control-plane/envoy/service/auth/v3\"\n\t\"google.golang.org/grpc\"\n\n\t\"github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth\"\n\tauth_v2 \"github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth/v2\"\n\tauth_v3 \"github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth/v3\"\n)\n\nfunc main() {\n\tport := flag.Int(\"port\", 9001, \"gRPC port\")\n\tdata := flag.String(\"users\", \"../../users.json\", \"users file\")\n\n\tflag.Parse()\n\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", *port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen to %d: %v\", *port, err)\n\t}\n\n\tusers, err := auth.LoadUsers(*data)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to load user data:%s %v\", *data, err)\n\t}\n\tgs := grpc.NewServer()\n\n\t// Serve v3 and v2.\n\tenvoy_service_auth_v3.RegisterAuthorizationServer(gs, auth_v3.New(users))\n\tenvoy_service_auth_v2.RegisterAuthorizationServer(gs, auth_v2.New(users))\n\n\tlog.Printf(\"starting gRPC server on: %d\\n\", *port)\n\n\tgs.Serve(lis)\n}\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/pkg/auth/users.go",
    "content": "package auth\n\nimport (\n\t\"encoding/json\"\n\t\"io/ioutil\"\n)\n\n// Users holds a list of users.\ntype Users map[string]string\n\n// Check checks if a key could retrieve a user from a list of users.\nfunc (u Users) Check(key string) (bool, string) {\n\tvalue, ok := u[key]\n\tif !ok {\n\t\treturn false, \"\"\n\t}\n\treturn ok, value\n}\n\n// LoadUsers load users data from a JSON file.\nfunc LoadUsers(jsonFile string) (Users, error) {\n\tvar users Users\n\tdata, err := ioutil.ReadFile(jsonFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(data, &users); err != nil {\n\t\treturn nil, err\n\t}\n\treturn users, nil\n}\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go",
    "content": "package v2\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"strings\"\n\n\tenvoy_api_v2_core \"github.com/envoyproxy/go-control-plane/envoy/api/v2/core\"\n\tenvoy_service_auth_v2 \"github.com/envoyproxy/go-control-plane/envoy/service/auth/v2\"\n\t\"github.com/golang/protobuf/ptypes/wrappers\"\n\t\"google.golang.org/genproto/googleapis/rpc/code\"\n\t\"google.golang.org/genproto/googleapis/rpc/status\"\n\n\t\"github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth\"\n)\n\ntype server struct {\n\tusers auth.Users\n}\n\nvar _ envoy_service_auth_v2.AuthorizationServer = &server{}\n\n// New creates a new authorization server.\nfunc New(users auth.Users) envoy_service_auth_v2.AuthorizationServer {\n\treturn &server{users}\n}\n\n// Check implements authorization's Check interface which performs authorization check based on the\n// attributes associated with the incoming request.\nfunc (s *server) Check(\n\tctx context.Context,\n\treq *envoy_service_auth_v2.CheckRequest) (*envoy_service_auth_v2.CheckResponse, error) {\n\tauthorization := req.Attributes.Request.Http.Headers[\"authorization\"]\n\tlog.Println(authorization)\n\n\textracted := strings.Fields(authorization)\n\tif len(extracted) == 2 && extracted[0] == \"Bearer\" {\n\t\tvalid, user := s.users.Check(extracted[1])\n\t\tif valid {\n\t\t\treturn &envoy_service_auth_v2.CheckResponse{\n\t\t\t\tHttpResponse: &envoy_service_auth_v2.CheckResponse_OkResponse{\n\t\t\t\t\tOkResponse: &envoy_service_auth_v2.OkHttpResponse{\n\t\t\t\t\t\tHeaders: []*envoy_api_v2_core.HeaderValueOption{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAppend: &wrappers.BoolValue{Value: false},\n\t\t\t\t\t\t\t\tHeader: &envoy_api_v2_core.HeaderValue{\n\t\t\t\t\t\t\t\t\t// For a successful request, the authorization server sets the\n\t\t\t\t\t\t\t\t\t// x-current-user value.\n\t\t\t\t\t\t\t\t\tKey:   \"x-current-user\",\n\t\t\t\t\t\t\t\t\tValue: user,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tStatus: &status.Status{\n\t\t\t\t\tCode: int32(code.Code_OK),\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t}\n\n\treturn &envoy_service_auth_v2.CheckResponse{\n\t\tStatus: &status.Status{\n\t\t\tCode: int32(code.Code_PERMISSION_DENIED),\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go",
    "content": "package v3\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"strings\"\n\n\tenvoy_api_v3_core \"github.com/envoyproxy/go-control-plane/envoy/config/core/v3\"\n\tenvoy_service_auth_v3 \"github.com/envoyproxy/go-control-plane/envoy/service/auth/v3\"\n\t\"github.com/golang/protobuf/ptypes/wrappers\"\n\t\"google.golang.org/genproto/googleapis/rpc/code\"\n\t\"google.golang.org/genproto/googleapis/rpc/status\"\n\n\t\"github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth\"\n)\n\ntype server struct {\n\tusers auth.Users\n}\n\nvar _ envoy_service_auth_v3.AuthorizationServer = &server{}\n\n// New creates a new authorization server.\nfunc New(users auth.Users) envoy_service_auth_v3.AuthorizationServer {\n\treturn &server{users}\n}\n\n// Check implements authorization's Check interface which performs authorization check based on the\n// attributes associated with the incoming request.\nfunc (s *server) Check(\n\tctx context.Context,\n\treq *envoy_service_auth_v3.CheckRequest) (*envoy_service_auth_v3.CheckResponse, error) {\n\tauthorization := req.Attributes.Request.Http.Headers[\"authorization\"]\n\tlog.Println(authorization)\n\n\textracted := strings.Fields(authorization)\n\tif len(extracted) == 2 && extracted[0] == \"Bearer\" {\n\t\tvalid, user := s.users.Check(extracted[1])\n\t\tif valid {\n\t\t\treturn &envoy_service_auth_v3.CheckResponse{\n\t\t\t\tHttpResponse: &envoy_service_auth_v3.CheckResponse_OkResponse{\n\t\t\t\t\tOkResponse: &envoy_service_auth_v3.OkHttpResponse{\n\t\t\t\t\t\tHeaders: []*envoy_api_v3_core.HeaderValueOption{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAppend: &wrappers.BoolValue{Value: false},\n\t\t\t\t\t\t\t\tHeader: &envoy_api_v3_core.HeaderValue{\n\t\t\t\t\t\t\t\t\t// For a successful request, the authorization server sets the\n\t\t\t\t\t\t\t\t\t// x-current-user value.\n\t\t\t\t\t\t\t\t\tKey:   \"x-current-user\",\n\t\t\t\t\t\t\t\t\tValue: user,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tStatus: &status.Status{\n\t\t\t\t\tCode: int32(code.Code_OK),\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t}\n\n\treturn &envoy_service_auth_v3.CheckResponse{\n\t\tStatus: &status.Status{\n\t\t\tCode: int32(code.Code_PERMISSION_DENIED),\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "examples/ext_authz/auth/http-service/Dockerfile",
    "content": "FROM node:alpine\n\nCOPY . /app\nCMD [\"node\", \"/app/http-service/server\"]\n"
  },
  {
    "path": "examples/ext_authz/auth/http-service/server.js",
    "content": "const Http = require(\"http\");\nconst path = require(\"path\");\n\nconst tokens = require(process.env.USERS ||\n  path.join(__dirname, \"..\", \"users.json\"));\n\nconst server = new Http.Server((req, res) => {\n  const authorization = req.headers[\"authorization\"] || \"\";\n  const extracted = authorization.split(\" \");\n  if (extracted.length === 2 && extracted[0] === \"Bearer\") {\n    const user = checkToken(extracted[1]);\n    if (user !== undefined) {\n      // The authorization server returns a response with \"x-current-user\" header for a successful\n      // request.\n      res.writeHead(200, { \"x-current-user\": user });\n      return res.end();\n    }\n  }\n  res.writeHead(403);\n  res.end();\n});\n\nconst port = process.env.PORT || 9002;\nserver.listen(port);\nconsole.log(`starting HTTP server on: ${port}`);\n\nfunction checkToken(token) {\n  return tokens[token];\n}\n"
  },
  {
    "path": "examples/ext_authz/auth/users.json",
    "content": "{\n  \"token1\": \"user1\",\n  \"token2\": \"user2\",\n  \"token3\": \"user3\"\n}\n"
  },
  {
    "path": "examples/ext_authz/config/grpc-service/v2.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: upstream\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: upstream-service\n          http_filters:\n          - name: envoy.filters.http.ext_authz\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n              grpc_service:\n                envoy_grpc:\n                  cluster_name: ext_authz-grpc-service\n                timeout: 0.250s\n              transport_api_version: V2\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  clusters:\n  - name: upstream-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: upstream-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: upstream-service\n                port_value: 8080\n\n  - name: ext_authz-grpc-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: ext_authz-grpc-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: ext_authz-grpc-service\n                port_value: 9001\n\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/ext_authz/config/grpc-service/v3.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: upstream\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: upstream-service\n          http_filters:\n          - name: envoy.filters.http.ext_authz\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n              grpc_service:\n                envoy_grpc:\n                  cluster_name: ext_authz-grpc-service\n                timeout: 0.250s\n              transport_api_version: V3\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  clusters:\n  - name: upstream-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: upstream-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: upstream-service\n                port_value: 8080\n\n  - name: ext_authz-grpc-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: ext_authz-grpc-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: ext_authz-grpc-service\n                port_value: 9001\n\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/ext_authz/config/http-service.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: upstream\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: upstream-service\n          http_filters:\n          - name: envoy.filters.http.ext_authz\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n              http_service:\n                server_uri:\n                  uri: ext_authz\n                  cluster: ext_authz-http-service\n                  timeout:  0.250s\n                authorization_response:\n                  allowed_upstream_headers:\n                    patterns:\n                    - exact: x-current-user\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  clusters:\n  - name: upstream-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: upstream-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: upstream-service\n                port_value: 8080\n\n  - name: ext_authz-http-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: ext_authz-http-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: ext_authz-http-service\n                port_value: 9002\n\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/ext_authz/config/opa-service/policy.rego",
    "content": "package istio.authz\n\ndefault allow = false\n\nallow = response {\n  input.attributes.request.http.method == \"GET\"\n  response := {\n    \"allowed\": true,\n    \"headers\": {\"x-current-user\": \"OPA\"}\n  }\n}\n"
  },
  {
    "path": "examples/ext_authz/config/opa-service/v2.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: upstream\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: upstream-service\n          http_filters:\n          - name: envoy.filters.http.ext_authz\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n              grpc_service:\n                envoy_grpc:\n                  cluster_name: ext_authz-opa-service\n                timeout: 0.250s\n              transport_api_version: V2\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  clusters:\n  - name: upstream-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: upstream-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: upstream-service\n                port_value: 8080\n\n  - name: ext_authz-opa-service\n    connect_timeout: 0.250s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: ext_authz-opa-service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: ext_authz-opa-service\n                port_value: 9002\n\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/ext_authz/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    environment:\n      - FRONT_ENVOY_YAML\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n\n  ext_authz-http-service:\n    build:\n      context: ./auth\n      dockerfile: http-service/Dockerfile\n    volumes:\n      - ./users.json:/etc/users.json\n    environment:\n      - USERS=/etc/users.json\n    networks:\n      envoymesh:\n        aliases:\n          - ext_authz-http-service\n\n  ext_authz-grpc-service:\n    build:\n      context: ./auth\n      dockerfile: grpc-service/Dockerfile\n    volumes:\n      - ./users.json:/etc/users.json\n    networks:\n      envoymesh:\n        aliases:\n          - ext_authz-grpc-service\n\n  ext_authz-opa-service:\n    image: openpolicyagent/opa:0.21.0-istio\n    volumes:\n      - ./config/opa-service/policy.rego:/etc/policy.rego\n    command:\n      - run\n      - --log-level=debug\n      - --server\n      - --log-format=json-pretty\n      - --set=plugins.envoy_ext_authz_grpc.addr=:9002\n      - --set=decision_logs.console=true\n      - /etc/policy.rego\n    networks:\n      envoymesh:\n        aliases:\n          - ext_authz-opa-service\n\n  upstream-service:\n    build:\n      context: ./upstream\n      dockerfile: service/Dockerfile\n    networks:\n      envoymesh:\n        aliases:\n          - upstream-service\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/ext_authz/run_envoy.sh",
    "content": "#!/bin/sh\n\n/usr/local/bin/envoy -c \"/etc/envoy-${FRONT_ENVOY_YAML}\" --service-cluster front-proxy\n"
  },
  {
    "path": "examples/ext_authz/upstream/service/Dockerfile",
    "content": "FROM python:3-alpine\n\nRUN pip3 install -q Flask==0.11.1\nCOPY . ./app\nCMD [\"python3\", \"/app/service/server.py\"]\n"
  },
  {
    "path": "examples/ext_authz/upstream/service/server.py",
    "content": "from flask import Flask, request\n\napp = Flask(__name__)\n\n\n@app.route('/service')\ndef hello():\n  return 'Hello ' + request.headers.get('x-current-user') + ' from behind Envoy!'\n\n\nif __name__ == \"__main__\":\n  app.run(host='0.0.0.0', port=8080, debug=False)\n"
  },
  {
    "path": "examples/ext_authz/users.json",
    "content": "{\n  \"token1\": \"user1\",\n  \"token2\": \"user2\",\n  \"token3\": \"user3\"\n}\n"
  },
  {
    "path": "examples/ext_authz/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=ext_authz\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test services responds with 403\"\nresponds_with_header \\\n    \"HTTP/1.1 403 Forbidden\"\\\n    http://localhost:8000/service\n\nrun_log \"Restart front-envoy with FRONT_ENVOY_YAML=config/http-service.yaml\"\ndocker-compose down\nFRONT_ENVOY_YAML=config/http-service.yaml docker-compose up -d\nsleep 10\n\nrun_log \"Test service responds with 403\"\nresponds_with_header \\\n    \"HTTP/1.1 403 Forbidden\"\\\n    http://localhost:8000/service\n\nrun_log \"Test authenticated service responds with 200\"\nresponds_with_header \\\n    \"HTTP/1.1 200 OK\" \\\n    -H \"Authorization: Bearer token1\" \\\n    http://localhost:8000/service\n\nrun_log \"Restart front-envoy with FRONT_ENVOY_YAML=config/opa-service/v2.yaml\"\ndocker-compose down\nFRONT_ENVOY_YAML=config/opa-service/v2.yaml docker-compose up -d\nsleep 10\n\nrun_log \"Test OPA service responds with 200\"\nresponds_with_header \\\n    \"HTTP/1.1 200 OK\" \\\n    http://localhost:8000/service\n\nrun_log \"Check OPA logs\"\ndocker-compose logs ext_authz-opa-service | grep decision_id -A 30\n\nrun_log \"Check OPA service rejects POST\"\nresponds_with_header \\\n    \"HTTP/1.1 403 Forbidden\" \\\n    -X POST \\\n    http://localhost:8000/service\n"
  },
  {
    "path": "examples/fault-injection/.gitignore",
    "content": "/runtime/\n"
  },
  {
    "path": "examples/fault-injection/Dockerfile-envoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get install -y curl tree\nCOPY ./envoy.yaml /etc/envoy.yaml\nRUN chmod go+r /etc/envoy.yaml\nCOPY enable_delay_fault_injection.sh disable_delay_fault_injection.sh enable_abort_fault_injection.sh disable_abort_fault_injection.sh send_request.sh /\n"
  },
  {
    "path": "examples/fault-injection/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/fault_injection.html).\n"
  },
  {
    "path": "examples/fault-injection/disable_abort_fault_injection.sh",
    "content": "#!/usr/bin/env bash\nset -ex\n\nrm /srv/runtime/v1/envoy/fault/http/abort/abort_percent\nrm /srv/runtime/v1/envoy/fault/http/abort/http_status\n\npushd /srv/runtime\nln -s /srv/runtime/v1 new && mv -Tf new current\npopd\n"
  },
  {
    "path": "examples/fault-injection/disable_delay_fault_injection.sh",
    "content": "#!/usr/bin/env bash\nset -ex\n\nrm /srv/runtime/v1/envoy/fault/http/delay/fixed_delay_percent\nrm /srv/runtime/v1/envoy/fault/http/delay/fixed_duration_ms\n\npushd /srv/runtime\nln -s /srv/runtime/v1 new && mv -Tf new current\npopd\n"
  },
  {
    "path": "examples/fault-injection/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n  envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-envoy\n    command: /usr/local/bin/envoy -c /etc/envoy.yaml\n    volumes:\n      - ./runtime:/srv/runtime\n    networks:\n      - envoymesh\n    ports:\n      - 9211:9211\n      - 9901:9901\n  backend:\n    image: kennethreitz/httpbin@sha256:2c7abc4803080c22928265744410173b6fea3b898872c01c5fd0f0f9df4a59fb\n    networks:\n      - envoymesh\n    ports:\n      - 8080:80\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/fault-injection/enable_abort_fault_injection.sh",
    "content": "#!/usr/bin/env bash\nset -ex\n\nmkdir -p /srv/runtime/v1/envoy/fault/http/abort\necho '100' > /srv/runtime/v1/envoy/fault/http/abort/abort_percent\necho '503' > /srv/runtime/v1/envoy/fault/http/abort/http_status\n\npushd /srv/runtime\nln -s /srv/runtime/v1 new && mv -Tf new current\npopd\n"
  },
  {
    "path": "examples/fault-injection/enable_delay_fault_injection.sh",
    "content": "#!/usr/bin/env bash\nset -ex\n\nmkdir -p /srv/runtime/v1/envoy/fault/http/delay\necho '50' > /srv/runtime/v1/envoy/fault/http/delay/fixed_delay_percent\necho '3000' > /srv/runtime/v1/envoy/fault/http/delay/fixed_duration_ms\n\npushd /srv/runtime\nln -s /srv/runtime/v1 new && mv -Tf new current\npopd\n"
  },
  {
    "path": "examples/fault-injection/envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 9211\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          access_log:\n            name: envoy.access_loggers.file\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: /dev/stdout\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: /\n                route:\n                  cluster: local_service\n          http_filters:\n          - name: envoy.filters.http.fault\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault\n              abort:\n                http_status: 503\n                percentage:\n                  numerator: 0\n                  denominator: HUNDRED\n              delay:\n                fixed_delay: 3s\n                percentage:\n                  numerator: 0\n                  denominator: HUNDRED\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 1s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n          endpoint:\n            address:\n              socket_address:\n                address: backend\n                port_value: 80\nadmin:\n  access_log_path: /dev/stdout\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 9901\nlayered_runtime:\n  layers:\n  - name: disk_layer_0\n    disk_layer:\n      symlink_root: /srv/runtime/current\n      subdirectory: envoy\n"
  },
  {
    "path": "examples/fault-injection/send_request.sh",
    "content": "#!/usr/bin/env bash\nset -ex\n\nwhile :; do\n  curl -v localhost:9211/status/200\n  sleep 1\ndone\n"
  },
  {
    "path": "examples/fault-injection/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=fault-injection\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Send requests for 20 seconds\"\ndocker-compose exec -T envoy bash -c \\\n               \"bash send_request.sh & export pid=\\$! && sleep 20 && kill \\$pid\" \\\n    &> /dev/null\n\nrun_log \"Check logs\"\ndocker-compose logs | grep \"HTTP/1.1\\\" 200\"\n\n\n_fault_injection_test () {\n    local action code existing_200s existing_codes\n    action=\"$1\"\n    code=\"$2\"\n    existing_codes=0\n\n    # enable fault injection and check for http hits of type $code\n    existing_codes=$(docker-compose logs | grep -c \"HTTP/1.1\\\" ${code}\" || :)\n    run_log \"Enable ${action} fault injection\"\n    docker-compose exec -T envoy bash \"enable_${action}_fault_injection.sh\"\n    run_log \"Send requests for 20 seconds\"\n    docker-compose exec -T envoy bash -c \\\n                   \"bash send_request.sh & export pid=\\$! && sleep 20 && kill \\$pid\" \\\n        &> /dev/null\n    run_log \"Check logs again\"\n    new_codes=$(docker-compose logs | grep -c \"HTTP/1.1\\\" ${code}\")\n    if [[ \"$new_codes\" -le \"$existing_codes\" ]]; then\n        echo \"ERROR: expected to find new logs with response code $code\" >&2\n        return 1\n    fi\n\n    # disable fault injection and check for http hits of type 200\n    existing_200s=$(docker-compose logs | grep -c \"HTTP/1.1\\\" 200\")\n    run_log \"Disable ${action} fault injection\"\n    docker-compose exec -T envoy bash \"disable_${action}_fault_injection.sh\"\n    run_log \"Send requests for 20 seconds\"\n    docker-compose exec -T envoy bash -c \\\n                   \"bash send_request.sh & export pid=\\$! && sleep 20 && kill \\$pid\" \\\n        &> /dev/null\n    run_log \"Check logs again\"\n    new_200s=$(docker-compose logs | grep -c \"HTTP/1.1\\\" 200\")\n    if [[ \"$new_200s\" -le \"$existing_200s\" ]]; then\n        echo \"ERROR: expected to find new logs with response code 200\" >&2\n        return 1\n    fi\n}\n\n_fault_injection_test abort 503\n_fault_injection_test delay 200\n\nrun_log \"Check tree\"\ndocker-compose exec -T envoy tree /srv/runtime\n"
  },
  {
    "path": "examples/front-proxy/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/front-proxy/Dockerfile-jaeger-service",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash curl\nRUN pip3 install -q Flask==0.11.1 requests==2.18.4\nRUN mkdir /code\nADD ./service.py /code\nADD ./start_service.sh /usr/local/bin/start_service.sh\nRUN chmod u+x /usr/local/bin/start_service.sh\n#\n# for discussion on jaeger binary compatibility, and the source of the file, see here:\n#  https://github.com/envoyproxy/envoy/issues/11382#issuecomment-638012072\n#\nRUN echo \"4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca  /usr/local/lib/libjaegertracing_plugin.so\" > /tmp/checksum \\\n         && curl -Ls https://github.com/tetratelabs/getenvoy-package/files/3518103/getenvoy-centos-jaegertracing-plugin.tar.gz \\\n              | tar zxf - -C /usr/local/lib \\\n         && mv /usr/local/lib/libjaegertracing.so.0.4.2 /usr/local/lib/libjaegertracing_plugin.so \\\n         && sha256sum -c /tmp/checksum \\\n         && rm /tmp/checksum\nENTRYPOINT [\"/bin/sh\", \"/usr/local/bin/start_service.sh\"]\n"
  },
  {
    "path": "examples/front-proxy/Dockerfile-service",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash curl\nRUN pip3 install -q Flask==0.11.1 requests==2.18.4\nRUN mkdir /code\nADD ./service.py /code\nADD ./start_service.sh /usr/local/bin/start_service.sh\nRUN chmod u+x /usr/local/bin/start_service.sh\nENTRYPOINT [\"/bin/sh\", \"/usr/local/bin/start_service.sh\"]\n"
  },
  {
    "path": "examples/front-proxy/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/front_proxy.html)\n"
  },
  {
    "path": "examples/front-proxy/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8080\"\n      - \"8443\"\n      - \"8001\"\n    ports:\n      - \"8080:8080\"\n      - \"8443:8443\"\n      - \"8001:8001\"\n\n  service1:\n    build:\n      context: .\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service-envoy.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service1\n    environment:\n      - SERVICE_NAME=1\n    expose:\n      - \"8000\"\n\n  service2:\n    build:\n      context: .\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service-envoy.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service2\n    environment:\n      - SERVICE_NAME=2\n    expose:\n      - \"8000\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/front-proxy/front-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8080\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/service/1\"\n                route:\n                  cluster: service1\n              - match:\n                  prefix: \"/service/2\"\n                route:\n                  cluster: service2\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8443\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/service/1\"\n                route:\n                  cluster: service1\n              - match:\n                  prefix: \"/service/2\"\n                route:\n                  cluster: service2\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n      transport_socket:\n        name: envoy.transport_sockets.tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              # The following self-signed certificate pair is generated using:\n              # $ openssl req -x509 -newkey rsa:2048 -keyout a/front-proxy-key.pem -out  a/front-proxy-crt.pem -days 3650 -nodes -subj '/CN=front-envoy'\n              #\n              # Instead of feeding it as an inline_string, certificate pair can also be fed to Envoy\n              # via filename. Reference: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/base.proto#config-core-v3-datasource.\n              #\n              # Or in a dynamic configuration scenario, certificate pair can be fetched remotely via\n              # Secret Discovery Service (SDS). Reference: https://www.envoyproxy.io/docs/envoy/latest/configuration/security/secret.\n              certificate_chain:\n                inline_string: |\n                  -----BEGIN CERTIFICATE-----\n                  MIICqDCCAZACCQCquzpHNpqBcDANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtm\n                  cm9udC1lbnZveTAeFw0yMDA3MDgwMTMxNDZaFw0zMDA3MDYwMTMxNDZaMBYxFDAS\n                  BgNVBAMMC2Zyb250LWVudm95MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC\n                  AQEAthnYkqVQBX+Wg7aQWyCCb87hBce1hAFhbRM8Y9dQTqxoMXZiA2n8G089hUou\n                  oQpEdJgitXVS6YMFPFUUWfwcqxYAynLK4X5im26Yfa1eO8La8sZUS+4Bjao1gF5/\n                  VJxSEo2yZ7fFBo8M4E44ZehIIocipCRS+YZehFs6dmHoq/MGvh2eAHIa+O9xssPt\n                  ofFcQMR8rwBHVbKy484O10tNCouX4yUkyQXqCRy6HRu7kSjOjNKSGtjfG+h5M8bh\n                  10W7ZrsJ1hWhzBulSaMZaUY3vh5ngpws1JATQVSK1Jm/dmMRciwlTK7KfzgxHlSX\n                  58ENpS7yPTISkEICcLbXkkKGEQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCmj6Hg\n                  vwOxWz0xu+6fSfRL6PGJUGq6wghCfUvjfwZ7zppDUqU47fk+yqPIOzuGZMdAqi7N\n                  v1DXkeO4A3hnMD22Rlqt25vfogAaZVToBeQxCPd/ALBLFrvLUFYuSlS3zXSBpQqQ\n                  Ny2IKFYsMllz5RSROONHBjaJOn5OwqenJ91MPmTAG7ujXKN6INSBM0PjX9Jy4Xb9\n                  zT+I85jRDQHnTFce1WICBDCYidTIvJtdSSokGSuy4/xyxAAc/BpZAfOjBQ4G1QRe\n                  9XwOi790LyNUYFJVyeOvNJwveloWuPLHb9idmY5YABwikUY6QNcXwyHTbRCkPB2I\n                  m+/R4XnmL4cKQ+5Z\n                  -----END CERTIFICATE-----\n              private_key:\n                inline_string: |\n                  -----BEGIN PRIVATE KEY-----\n                  MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC2GdiSpVAFf5aD\n                  tpBbIIJvzuEFx7WEAWFtEzxj11BOrGgxdmIDafwbTz2FSi6hCkR0mCK1dVLpgwU8\n                  VRRZ/ByrFgDKcsrhfmKbbph9rV47wtryxlRL7gGNqjWAXn9UnFISjbJnt8UGjwzg\n                  Tjhl6EgihyKkJFL5hl6EWzp2Yeir8wa+HZ4Achr473Gyw+2h8VxAxHyvAEdVsrLj\n                  zg7XS00Ki5fjJSTJBeoJHLodG7uRKM6M0pIa2N8b6HkzxuHXRbtmuwnWFaHMG6VJ\n                  oxlpRje+HmeCnCzUkBNBVIrUmb92YxFyLCVMrsp/ODEeVJfnwQ2lLvI9MhKQQgJw\n                  tteSQoYRAgMBAAECggEAeDGdEkYNCGQLe8pvg8Z0ccoSGpeTxpqGrNEKhjfi6NrB\n                  NwyVav10iq4FxEmPd3nobzDPkAftfvWc6hKaCT7vyTkPspCMOsQJ39/ixOk+jqFx\n                  lNa1YxyoZ9IV2DIHR1iaj2Z5gB367PZUoGTgstrbafbaNY9IOSyojCIO935ubbcx\n                  DWwL24XAf51ez6sXnI8V5tXmrFlNXhbhJdH8iIxNyM45HrnlUlOk0lCK4gmLJjy9\n                  10IS2H2Wh3M5zsTpihH1JvM56oAH1ahrhMXs/rVFXXkg50yD1KV+HQiEbglYKUxO\n                  eMYtfaY9i2CuLwhDnWp3oxP3HfgQQhD09OEN3e0IlQKBgQDZ/3poG9TiMZSjfKqL\n                  xnCABMXGVQsfFWNC8THoW6RRx5Rqi8q08yJrmhCu32YKvccsOljDQJQQJdQO1g09\n                  e/adJmCnTrqxNtjPkX9txV23Lp6Ak7emjiQ5ICu7iWxrcO3zf7hmKtj7z+av8sjO\n                  mDI7NkX5vnlE74nztBEjp3eC0wKBgQDV2GeJV028RW3b/QyP3Gwmax2+cKLR9PKR\n                  nJnmO5bxAT0nQ3xuJEAqMIss/Rfb/macWc2N/6CWJCRT6a2vgy6xBW+bqG6RdQMB\n                  xEZXFZl+sSKhXPkc5Wjb4lQ14YWyRPrTjMlwez3k4UolIJhJmwl+D7OkMRrOUERO\n                  EtUvc7odCwKBgBi+nhdZKWXveM7B5N3uzXBKmmRz3MpPdC/yDtcwJ8u8msUpTv4R\n                  JxQNrd0bsIqBli0YBmFLYEMg+BwjAee7vXeDFq+HCTv6XMva2RsNryCO4yD3I359\n                  XfE6DJzB8ZOUgv4Dvluie3TB2Y6ZQV/p+LGt7G13yG4hvofyJYvlg3RPAoGAcjDg\n                  +OH5zLN2eqah8qBN0CYa9/rFt0AJ19+7/smLTJ7QvQq4g0gwS1couplcCEnNGWiK\n                  72y1n/ckvvplmPeAE19HveMvR9UoCeV5ej86fACy8V/oVpnaaLBvL2aCMjPLjPP9\n                  DWeCIZp8MV86cvOrGfngf6kJG2qZTueXl4NAuwkCgYEArKkhlZVXjwBoVvtHYmN2\n                  o+F6cGMlRJTLhNc391WApsgDZfTZSdeJsBsvvzS/Nc0burrufJg0wYioTlpReSy4\n                  ohhtprnQQAddfjHP7rh2LGt+irFzhdXXQ1ybGaGM9D764KUNCXLuwdly0vzXU4HU\n                  q5sGxGrC1RECGB5Zwx2S2ZY=\n                  -----END PRIVATE KEY-----\n\n  clusters:\n  - name: service1\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service1\n                port_value: 8000\n  - name: service2\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service2\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service2\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\nlayered_runtime:\n  layers:\n    - name: static_layer_0\n      static_layer:\n        envoy:\n          resource_limits:\n            listener:\n              example_listener_name:\n                connection_limit: 10000\n"
  },
  {
    "path": "examples/front-proxy/service-envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/service\"\n                route:\n                  cluster: local_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8081\n"
  },
  {
    "path": "examples/front-proxy/service.py",
    "content": "from flask import Flask\nfrom flask import request\nimport os\nimport requests\nimport socket\nimport sys\n\napp = Flask(__name__)\n\nTRACE_HEADERS_TO_PROPAGATE = [\n    'X-Ot-Span-Context',\n    'X-Request-Id',\n\n    # Zipkin headers\n    'X-B3-TraceId',\n    'X-B3-SpanId',\n    'X-B3-ParentSpanId',\n    'X-B3-Sampled',\n    'X-B3-Flags',\n\n    # Jaeger header (for native client)\n    \"uber-trace-id\"\n]\n\n\n@app.route('/service/<service_number>')\ndef hello(service_number):\n  return ('Hello from behind Envoy (service {})! hostname: {} resolved'\n          'hostname: {}\\n'.format(os.environ['SERVICE_NAME'], socket.gethostname(),\n                                  socket.gethostbyname(socket.gethostname())))\n\n\n@app.route('/trace/<service_number>')\ndef trace(service_number):\n  headers = {}\n  # call service 2 from service 1\n  if int(os.environ['SERVICE_NAME']) == 1:\n    for header in TRACE_HEADERS_TO_PROPAGATE:\n      if header in request.headers:\n        headers[header] = request.headers[header]\n    requests.get(\"http://localhost:9000/trace/2\", headers=headers)\n  return ('Hello from behind Envoy (service {})! hostname: {} resolved'\n          'hostname: {}\\n'.format(os.environ['SERVICE_NAME'], socket.gethostname(),\n                                  socket.gethostbyname(socket.gethostname())))\n\n\nif __name__ == \"__main__\":\n  app.run(host='127.0.0.1', port=8080, debug=True)\n"
  },
  {
    "path": "examples/front-proxy/start_service.sh",
    "content": "#!/bin/sh\npython3 /code/service.py &\nenvoy -c /etc/service-envoy.yaml --service-cluster \"service${SERVICE_NAME}\"\n"
  },
  {
    "path": "examples/front-proxy/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=front-proxy\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test service: localhost:8080/service/1\"\nresponds_with \\\n    \"Hello from behind Envoy (service 1)!\" \\\n    http://localhost:8080/service/1\n\nrun_log \"Test service: localhost:8080/service/2\"\nresponds_with \\\n    \"Hello from behind Envoy (service 2)!\" \\\n    http://localhost:8080/service/2\n\nrun_log \"Test service: https://localhost:8443/service/1\"\nresponds_with \\\n    \"Hello from behind Envoy (service 1)!\" \\\n    -k https://localhost:8443/service/1\n\nrun_log \"Scale up docker service1=3\"\ndocker-compose scale service1=3\nrun_log \"Snooze for 5 while docker-compose scales...\"\nsleep 5\n\nrun_log \"Test round-robin localhost:8080/service/1\"\ndocker-compose exec -T front-envoy bash -c \"\\\n                   curl -s http://localhost:8080/service/1 \\\n                   && curl -s http://localhost:8080/service/1 \\\n                   && curl -s http://localhost:8080/service/1\" \\\n                   | grep Hello | grep \"service 1\"\n\n\nrun_log \"Test service inside front-envoy: localhost:8080/service/2\"\ndocker-compose exec -T front-envoy curl -s http://localhost:8080/service/2 | grep Hello | grep \"service 2\"\n\nrun_log \"Test service info: localhost:8080/server_info\"\ndocker-compose exec -T front-envoy curl -s http://localhost:8001/server_info | jq '.'\n\nrun_log \"Test service stats: localhost:8080/stats\"\ndocker-compose exec -T front-envoy curl -s http://localhost:8001/stats | grep \":\"\n"
  },
  {
    "path": "examples/grpc-bridge/.gitignore",
    "content": ".idea\nserver/kv/kv.pb.go\nclient/kv/kv_pb2.py\n"
  },
  {
    "path": "examples/grpc-bridge/Dockerfile-client",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nCOPY ./client/envoy-proxy.yaml /etc/client-envoy-proxy.yaml\nRUN chmod go+r /etc/client-envoy-proxy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/client-envoy-proxy.yaml\"]\n"
  },
  {
    "path": "examples/grpc-bridge/Dockerfile-server",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nCOPY ./server/envoy-proxy.yaml /etc/server-envoy-proxy.yaml\nRUN chmod go+r /etc/server-envoy-proxy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/server-envoy-proxy.yaml\", \"--service-cluster\", \"backend-proxy\"]\n"
  },
  {
    "path": "examples/grpc-bridge/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/grpc_bridge)\n\n# gRPC HTTP/1.1 to HTTP/2 bridge\n\nThis is an example of a key-value store where a client CLI, written in Python, updates a remote store, written in Go, using the stubs generated for both languages. \n \nRunning clients that uses gRPC stubs and sends messages through a proxy\nthat upgrades the HTTP requests from http/1.1 to http/2. This is a more detailed\nimplementation of the Envoy documentation at https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/grpc_bridge\n\n* Client: talks in python and sends HTTP/1.1 requests (gRPC stubs)\n  * Client-Proxy: Envoy setup that acts as an egress and converts the HTTP/1.1 call to HTTP/2.\n* Server: talks in golang and receives HTTP/2 requests (gRPC stubs)\n  * Server-Proxy: Envoy setup that acts as an ingress and receives the HTTP/2 calls\n\n`[client](http/1.1) -> [client-egress-proxy](http/2) -> [server-ingress-proxy](http/2) -> [server]`\n\n# Running in 3 Steps\n\n* Generate Stubs: both the `client` and `server` stubs in `python` and `go` respectively to be used by each server.\n* Start Both Client and Server Servers and Proxies: `\n* Use the Client CLI to make calls to the kv server.\n\n## Generate Stubs\n\n* Uses the `protos` dir and generates the stubs for both `client` and `server`\n* Inspect the file `docker-compose-protos.yaml` with the gRPC protoc commands to generate the stubs.\n\n```console\n$ docker-compose -f docker-compose-protos.yaml up --remove-orphans\nStarting grpc-bridge_stubs_python_1 ... done\nStarting grpc-bridge_stubs_go_1     ... done\nAttaching to grpc-bridge_stubs_go_1, grpc-bridge_stubs_python_1\ngrpc-bridge_stubs_go_1 exited with code 0\ngrpc-bridge_stubs_python_1 exited with code 0\n```\n\n* The files created were the `kv` modules for both the client and server respective dir.\n  * Note that both stubs are their respective languages.\n  * For each language, use its ways to include the stubs as an external module.\n\n```console\n$ ls -la client/kv/kv_pb2.py\n-rw-r--r--  1 mdesales  CORP\\Domain Users  9527 Nov  6 21:59 client/kv/kv_pb2.py\n\n$ ls -la server/kv/kv.pb.go\n-rw-r--r--  1 mdesales  CORP\\Domain Users  9994 Nov  6 21:59 server/kv/kv.pb.go\n```\n\n## Start Both Client and Server and Proxies\n\n* After the stubs are in place, start the containers described in `docker-compose.yaml`.\n\n```console\n$ docker-compose up --build\n```\n\n* Inspect the files `client/envoy-proxy.yaml` and `server/envoy-proxy.yaml`, as they define configs for their respective container, comparing port numbers and other specific settings.\n\nNotice that you will be interacting with the client container, which hosts\nthe client python CLI. The port numbers for the proxies and the containers are displayed\nby the `docker-compose ps`, so it's easier to compare with the `\\*/envoy-proxy.yaml` config files for each\nof the containers how they match.\n\nNote that the client container to use is `grpc-bridge_grpc-client_1` and binds to no port\nas it will use the `python` CLI.\n\n```console\n$ docker-compose ps\n             Name                            Command               State                             Ports\n------------------------------------------------------------------------------------------------------------------------------------\ngrpc-bridge_grpc-client-proxy_1   /docker-entrypoint.sh /usr ... Up      10000/tcp, 0.0.0.0:9911->9911/tcp, 0.0.0.0:9991->9991/tcp\ngrpc-bridge_grpc-client_1         /bin/sh -c tail -f /dev/null     Up\ngrpc-bridge_grpc-server-proxy_1   /docker-entrypoint.sh /usr ... Up      10000/tcp, 0.0.0.0:8811->8811/tcp, 0.0.0.0:8881->8881/tcp\ngrpc-bridge_grpc-server_1         /bin/sh -c /bin/server           Up      0.0.0.0:8081->8081/tcp\n```\n\n## Use the Client CLI\n\n* Since the containers are running, you can use the client container to interact with the gRPC server through the proxies\n* The client has the methods `set key value` and `get key` to use the in-memory key-value store.\n\n```console\n$ docker-compose exec grpc-client /client/grpc-kv-client.py set foo bar\nsetf foo to bar\n```\n\n> NOTE: You could also run docker instead of docker-compose `docker exec -ti grpc-bridge_grpc-client_1 /client/grpc-kv-client.py set foo bar`\n\n* The server will display the gRPC call received by the server, and then the access logs from the proxy for the SET method.\n  * Note that the proxy is propagating the headers of the request\n\n```console\ngrpc-server_1        | 2019/11/07 16:33:58 set: foo = bar\ngrpc-server-proxy_1  | [2019-11-07T16:33:58.856Z] \"POST /kv.KV/Set HTTP/1.1\" 200 - 15 7 3 1 \"172.24.0.3\" \"python-requests/2.22.0\" \"c11cf735-0647-4e67-965c-5b1e362a5532\" \"grpc\" \"172.24.0.2:8081\"\ngrpc-client-proxy_1  | [2019-11-07T16:33:58.855Z] \"POST /kv.KV/Set HTTP/1.1\" 200 - 15 7 5 3 \"172.24.0.3\" \"python-requests/2.22.0\" \"c11cf735-0647-4e67-965c-5b1e362a5532\" \"grpc\" \"172.24.0.5:8811\"\n```\n\n* Getting the value is no different\n\n```console\n$ docker-compose exec grpc-client /client/grpc-kv-client.py get foo\nbar\n```\n\n> NOTE: You could also run docker instead of docker-compose `docker exec -ti grpc-bridge_grpc-client_1 /client/grpc-kv-client.py get foo`\n\n* The logs in the server will show the same for the GET method.\n  * Note that again the request ID is proxied through\n\n```console\ngrpc-server_1        | 2019/11/07 16:34:50 get: foo\ngrpc-server-proxy_1  | [2019-11-07T16:34:50.456Z] \"POST /kv.KV/Get HTTP/1.1\" 200 - 10 10 2 1 \"172.24.0.3\" \"python-requests/2.22.0\" \"727d4dcd-a276-4bb2-b4cc-494ae7119c24\" \"grpc\" \"172.24.0.2:8081\"\ngrpc-client-proxy_1  | [2019-11-07T16:34:50.455Z] \"POST /kv.KV/Get HTTP/1.1\" 200 - 10 10 3 2 \"172.24.0.3\" \"python-requests/2.22.0\" \"727d4dcd-a276-4bb2-b4cc-494ae7119c24\" \"grpc\" \"172.24.0.5:8811\"\n```\n\n# Troubleshooting\n\n* Errors building the `client` or `server` are related to the missing gRPC stubs.\n* Make sure to produce the stubs before building\n  * The error below is when the server is missing the stubs in the kv dir.\n\n```console\n$ go build -o server\ngo: finding github.com/envoyproxy/envoy/examples/grpc-bridge latest\ngo: finding github.com/envoyproxy/envoy/examples latest\ngo: finding github.com/envoyproxy/envoy/examples/grpc-bridge/server/kv latest\ngo: finding github.com/envoyproxy/envoy/examples/grpc-bridge/server latest\nbuild github.com/envoyproxy/envoy: cannot load github.com/envoyproxy/envoy/examples/grpc-bridge/server/kv: no matching versions for query \"latest\"\n```\n"
  },
  {
    "path": "examples/grpc-bridge/client/Dockerfile",
    "content": "FROM grpc/python\n\nWORKDIR /client\n\nCOPY requirements.txt /client/requirements.txt\n\n# Cache the dependencies\nRUN pip install -r /client/requirements.txt\n\n# Copy the sources, including the stubs\nCOPY client.py /client/grpc-kv-client.py\nCOPY kv /client/kv\n\nRUN chmod a+x /client/grpc-kv-client.py\n\n# http://bigdatums.net/2017/11/07/how-to-keep-docker-containers-running/\n# Call docker exec /client/grpc.py set | get\nCMD tail -f /dev/null\n"
  },
  {
    "path": "examples/grpc-bridge/client/client.py",
    "content": "#!/usr/bin/env python\n\nimport requests, sys\nimport os\n\n# Stubs generated by protoc\nfrom kv import kv_pb2 as kv\n\nfrom struct import pack\n\nHOST = os.getenv('CLIENT_PROXY', \"http://localhost:9001\")\nHEADERS = {'content-type': 'application/grpc', 'Host': 'grpc'}\nUSAGE = \"\"\"\ngrpc-client usage [{host}]:\n  ./client.py set <key> <value> - sets the <key> and <value>\n  ./client.py get <key>         - gets the value for <key>\n\n  Set env var CLIENT_PROXY to change to a different host\n  \"\"\".format(host=HOST)\n\n\nclass KVClient():\n\n  def get(self, key):\n    r = kv.GetRequest(key=key)\n\n    # Build the gRPC frame\n    data = r.SerializeToString()\n    data = pack('!cI', b'\\0', len(data)) + data\n\n    resp = requests.post(HOST + \"/kv.KV/Get\", data=data, headers=HEADERS)\n\n    return kv.GetResponse().FromString(resp.content[5:])\n\n  def set(self, key, value):\n    r = kv.SetRequest(key=key, value=value)\n    data = r.SerializeToString()\n    data = pack('!cI', b'\\0', len(data)) + data\n\n    return requests.post(HOST + \"/kv.KV/Set\", data=data, headers=HEADERS)\n\n\ndef run():\n  if len(sys.argv) == 1:\n    print(USAGE)\n\n    sys.exit(0)\n\n  cmd = sys.argv[1]\n\n  client = KVClient()\n\n  if cmd == \"get\":\n    # ensure a key was provided\n    if len(sys.argv) != 3:\n      print(USAGE)\n      sys.exit(1)\n\n    # get the key to fetch\n    key = sys.argv[2]\n\n    # send the request to the server\n    response = client.get(key)\n\n    print(response.value)\n    sys.exit(0)\n\n  elif cmd == \"set\":\n    # ensure a key and value were provided\n    if len(sys.argv) < 4:\n      print(USAGE)\n      sys.exit(1)\n\n    # get the key and the full text of value\n    key = sys.argv[2]\n    value = \" \".join(sys.argv[3:])\n\n    # send the request to the server\n    response = client.set(key, value)\n\n    print(\"setf %s to %s\" % (key, value))\n\n\nif __name__ == '__main__':\n  run()\n"
  },
  {
    "path": "examples/grpc-bridge/client/envoy-proxy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 9911\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          add_user_agent: true\n          access_log:\n          - name: envoy.access_loggers.file\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: \"/dev/stdout\"\n          stat_prefix: egress_http\n          common_http_protocol_options:\n            idle_timeout: 0.840s\n          use_remote_address: true\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - grpc\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: backend-proxy\n          http_filters:\n          - name: envoy.filters.http.grpc_http1_bridge\n            typed_config: {}\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: backend-proxy\n    type: logical_dns\n    dns_lookup_family: V4_ONLY\n    lb_policy: round_robin\n    connect_timeout: 0.250s\n    http_protocol_options: {}\n    load_assignment:\n      cluster_name: backend-proxy\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: kv-backend-proxy\n                port_value: 8811\n\nadmin:\n  access_log_path: \"/tmp/admin_access.log\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 9991\n"
  },
  {
    "path": "examples/grpc-bridge/client/kv/__init__.py",
    "content": ""
  },
  {
    "path": "examples/grpc-bridge/client/requirements.txt",
    "content": "requests>=2.22.0\ngrpcio\ngrpcio-tools\nprotobuf==3.10.0\n"
  },
  {
    "path": "examples/grpc-bridge/docker-compose-protos.yaml",
    "content": "version: \"3.7\"\n\n# This is the conversion from a script to a dockerized version of the script\n# https://github.com/envoyproxy/envoy/blob/master/examples/grpc-bridge/service/script/gen\nservices:\n\n  # $ docker run -ti -v $(pwd):/protos -v $(pwd)/stubs:/stubs grpc/go protoc --go_out=plugins=grpc:/stubs -I/protos /protos/kv.proto\n  stubs_go:\n    image: grpc/go\n    command: protoc --go_out=plugins=grpc:/stubs -I/protos /protos/kv.proto\n    volumes:\n      - ./protos:/protos\n      - ./server/kv:/stubs\n\n  # $ docker run -ti -v $(pwd):/protos -v $(pwd)/stubs:/stubs grpc/python python -m grpc.tools.protoc --python_out=/stubs --grpc_python_out=/stubs -I/protos /protos/kv.proto\n  stubs_python:\n    image: grpc/python\n    command: python -m grpc.tools.protoc --python_out=/stubs --grpc_python_out=/stubs -I/protos /protos/kv.proto\n    volumes:\n      - ./protos:/protos\n      - ./client/kv:/stubs\n"
  },
  {
    "path": "examples/grpc-bridge/docker-compose.yaml",
    "content": "version: \"3.7\"\n\nservices:\n\n  # Requires the build of the stubs first\n  grpc-server:\n    image: envoyproxy/example-kv-server\n    build:\n      context: server\n    expose:\n      - \"8081\"\n    ports:\n      - \"8081:8081\"\n    networks:\n      envoymesh:\n        aliases:\n          - kv-backend-service\n\n  grpc-server-proxy:\n    build:\n      context: .\n      dockerfile: Dockerfile-server\n    networks:\n      envoymesh:\n        aliases:\n          - kv-backend-proxy\n    expose:\n      - \"8811\"\n      - \"8881\"\n    ports:\n      - \"8811:8811\"\n      - \"8881:8881\"\n\n  # Requires the build of the stubs first\n  grpc-client:\n    image: envoyproxy/example-kv-client\n    build:\n      context: client\n    environment:\n      - CLIENT_PROXY=http://kv-client-proxy:9911\n    networks:\n      envoymesh:\n        aliases:\n          - grpc-client\n\n  grpc-client-proxy:\n    build:\n      context: .\n      dockerfile: Dockerfile-client\n    networks:\n      envoymesh:\n        aliases:\n          - kv-client-proxy\n    expose:\n      - \"9911\"\n      - \"9991\"\n    ports:\n      - \"9911:9911\"\n      - \"9991:9991\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/grpc-bridge/protos/kv.proto",
    "content": "syntax = \"proto3\";\n\npackage kv;\n\nmessage GetRequest {\n  string key = 1;\n}\n\nmessage GetResponse {\n  string value = 1;\n}\n\nmessage SetRequest {\n  string key = 1;\n  string value = 2;\n}\n\nmessage SetResponse {\n  bool ok = 1;\n}\n\nservice KV {\n  rpc Get(GetRequest) returns (GetResponse);\n  rpc Set(SetRequest) returns (SetResponse);\n}\n"
  },
  {
    "path": "examples/grpc-bridge/server/Dockerfile",
    "content": "FROM golang:1.13.0-stretch as builder\n\nWORKDIR /build\n\n# Resolve and build Go dependencies as Docker cache\nCOPY go.mod /build/go.mod\nCOPY go.sum /build/go.sum\nCOPY kv/go.mod /build/kv/go.mod\n\nENV GO111MODULE=on\nRUN go mod download\n\nCOPY service.go /build/main.go\nCOPY kv/ /build/kv\n\n# Build for linux\nENV GOOS=linux\nENV GOARCH=amd64\nENV CGO_ENABLED=0 \nRUN go build -o server\n\n# Build the main container (Linux Runtime)\nFROM alpine:latest\nWORKDIR /root/\n\n# Copy the linux amd64 binary\nCOPY --from=builder /build/server /bin/\n\nENTRYPOINT /bin/server\n"
  },
  {
    "path": "examples/grpc-bridge/server/envoy-proxy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8811\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          access_log:\n          - name: envoy.access_loggers.file\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: \"/dev/stdout\"\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                  grpc: {}\n                route:\n                  cluster: backend_grpc_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: backend_grpc_service\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: backend_grpc_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: kv-backend-service\n                port_value: 8081\n\nadmin:\n  access_log_path: \"/tmp/admin_access.log\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8881\n"
  },
  {
    "path": "examples/grpc-bridge/server/go.mod",
    "content": "module github.com/envoyproxy/envoy\n\ngo 1.13\n\nrequire (\n\tgithub.com/envoyproxy/envoy/examples/grpc-bridge/server/kv v0.0.0-00010101000000-000000000000\n\tgolang.org/x/net v0.0.0-20191105084925-a882066a44e0\n\tgoogle.golang.org/grpc v1.25.0\n)\n\nreplace github.com/envoyproxy/envoy/examples/grpc-bridge/server/kv => ./kv\n"
  },
  {
    "path": "examples/grpc-bridge/server/go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20191105084925-a882066a44e0 h1:QPlSTtPE2k6PZPasQUbzuK3p9JbS+vMXYVto8g/yrsg=\ngolang.org/x/net v0.0.0-20191105084925-a882066a44e0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.0 h1:ItERT+UbGdX+s4u+nQNlVM/Q7cbmf7icKfvzbWqVtq0=\ngoogle.golang.org/grpc v1.25.0/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\n"
  },
  {
    "path": "examples/grpc-bridge/server/kv/go.mod",
    "content": ""
  },
  {
    "path": "examples/grpc-bridge/server/service.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\n\t\"sync\"\n\n  \"github.com/envoyproxy/envoy/examples/grpc-bridge/server/kv\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/grpc\"\n)\n\ntype KV struct {\n\tsync.Mutex\n\tstore map[string]string\n}\n\nfunc (k *KV) Get(ctx context.Context, in *kv.GetRequest) (*kv.GetResponse, error) {\n\tlog.Printf(\"get: %s\", in.Key)\n\tresp := new(kv.GetResponse)\n\tif val, ok := k.store[in.Key]; ok {\n\t\tresp.Value = val\n\t}\n\n\treturn resp, nil\n}\n\nfunc (k *KV) Set(ctx context.Context, in *kv.SetRequest) (*kv.SetResponse, error) {\n\tlog.Printf(\"set: %s = %s\", in.Key, in.Value)\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tk.store[in.Key] = in.Value\n\n\treturn &kv.SetResponse{Ok: true}, nil\n}\n\nfunc NewKVStore() (kv *KV) {\n\tkv = &KV{\n\t\tstore: make(map[string]string),\n\t}\n\n\treturn\n}\n\nfunc main() {\n\tport := flag.Int(\"port\", 8081, \"grpc port\")\n\n\tflag.Parse()\n\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", *port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\tgs := grpc.NewServer()\n\tkv.RegisterKVServer(gs, NewKVStore())\n\n\tlog.Printf(\"starting grpc on :%d\\n\", *port)\n\n\tgs.Serve(lis)\n}\n"
  },
  {
    "path": "examples/grpc-bridge/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=grpc-bridge\n# this allows us to bring up the stack manually after generating stubs\nexport MANUAL=true\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Generate protocol stubs\"\ndocker-compose -f docker-compose-protos.yaml up\ndocker rm grpc-bridge_stubs_go_1 grpc-bridge_stubs_python_1\n\nls client/kv/kv_pb2.py\nls server/kv/kv.pb.go\n\nbring_up_example\n\nrun_log \"Set key value foo=bar\"\ndocker-compose exec -T grpc-client /client/grpc-kv-client.py set foo bar | grep setf\n\nrun_log \"Get key foo\"\ndocker-compose exec -T grpc-client /client/grpc-kv-client.py get foo | grep bar\n"
  },
  {
    "path": "examples/jaeger-native-tracing/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml\n#\n# for discussion on jaeger binary compatibility, and the source of the file, see here:\n#  https://github.com/envoyproxy/envoy/issues/11382#issuecomment-638012072\n#\nRUN echo \"4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca /usr/local/lib/libjaegertracing_plugin.so\" > /tmp/checksum \\\n         && curl -Ls https://github.com/tetratelabs/getenvoy-package/files/3518103/getenvoy-centos-jaegertracing-plugin.tar.gz \\\n              | tar zxf - -C /usr/local/lib \\\n         && mv /usr/local/lib/libjaegertracing.so.0.4.2 /usr/local/lib/libjaegertracing_plugin.so \\\n         && sha256sum -c /tmp/checksum \\\n         && rm /tmp/checksum \\\n\t && chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/jaeger-native-tracing/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/jaeger_native_tracing)\n"
  },
  {
    "path": "examples/jaeger-native-tracing/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n    dns:\n      - 8.8.8.8\n      - 8.8.4.4\n\n  service1:\n    build:\n      context: ../front-proxy\n      dockerfile: Dockerfile-jaeger-service\n    volumes:\n      - ./service1-envoy-jaeger.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service1\n    environment:\n      - SERVICE_NAME=1\n    expose:\n      - \"8000\"\n    dns:\n      - 8.8.8.8\n      - 8.8.4.4\n\n  service2:\n    build:\n      context: ../front-proxy\n      dockerfile: Dockerfile-jaeger-service\n    volumes:\n      - ./service2-envoy-jaeger.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service2\n    environment:\n      - SERVICE_NAME=2\n    expose:\n      - \"8000\"\n    dns:\n      - 8.8.8.8\n      - 8.8.4.4\n\n  jaeger:\n    image: jaegertracing/all-in-one\n    environment:\n      - COLLECTOR_ZIPKIN_HTTP_PORT=9411\n    networks:\n      envoymesh:\n        aliases:\n          - jaeger\n    expose:\n      - \"9411\"\n      - \"16686\"\n    ports:\n      - \"9411:9411\"\n      - \"16686:16686\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/jaeger-native-tracing/front-envoy-jaeger.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          generate_request_id: true\n          tracing:\n            provider:\n              name: envoy.tracers.dynamic_ot\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig\n                library: /usr/local/lib/libjaegertracing_plugin.so\n                config:\n                  service_name: front-proxy\n                  sampler:\n                    type: const\n                    param: 1\n                  reporter:\n                    localAgentHostPort: jaeger:6831\n                  headers:\n                    jaegerDebugHeader: jaeger-debug-id\n                    jaegerBaggageHeader: jaeger-baggage\n                    traceBaggageHeaderPrefix: uberctx-\n                  baggage_restrictions:\n                    denyBaggageOnInitializationFailure: false\n                    hostPort: \"\"\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: service1\n                decorator:\n                  operation: checkAvailability\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n          use_remote_address: true\n  clusters:\n  - name: service1\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service1\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/jaeger-native-tracing/install-jaeger-plugin.sh",
    "content": "#!/usr/bin/env bash\nJAEGER_VERSION=v0.4.2\ncurl -Lo /usr/local/lib/libjaegertracing_plugin.so https://github.com/jaegertracing/jaeger-client-cpp/releases/download/$JAEGER_VERSION/libjaegertracing_plugin.linux_amd64.so\n"
  },
  {
    "path": "examples/jaeger-native-tracing/service1-envoy-jaeger.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: INBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: service1_route\n            virtual_hosts:\n            - name: service1\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n                decorator:\n                  operation: checkAvailability\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 9000\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.dynamic_ot\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig\n                library: /usr/local/lib/libjaegertracing_plugin.so\n                config:\n                  service_name: service1\n                  sampler:\n                    type: const\n                    param: 1\n                  reporter:\n                    localAgentHostPort: jaeger:6831\n                  headers:\n                    jaegerDebugHeader: jaeger-debug-id\n                    jaegerBaggageHeader: jaeger-baggage\n                    traceBaggageHeaderPrefix: uberctx-\n                  baggage_restrictions:\n                    denyBaggageOnInitializationFailure: false\n                    hostPort: \"\"\n          codec_type: auto\n          stat_prefix: egress_http\n          route_config:\n            name: service2_route\n            virtual_hosts:\n            - name: service2\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/trace/2\"\n                route:\n                  cluster: service2\n                decorator:\n                  operation: checkStock\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\n  - name: service2\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service2\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service2\n                port_value: 8000\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/jaeger-native-tracing/service2-envoy-jaeger.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: INBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.dynamic_ot\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig\n                library: /usr/local/lib/libjaegertracing_plugin.so\n                config:\n                  service_name: service2\n                  sampler:\n                    type: const\n                    param: 1\n                  reporter:\n                    localAgentHostPort: jaeger:6831\n                  headers:\n                    jaegerDebugHeader: jaeger-debug-id\n                    jaegerBaggageHeader: jaeger-baggage\n                    traceBaggageHeaderPrefix: uberctx-\n                  baggage_restrictions:\n                    denyBaggageOnInitializationFailure: false\n                    hostPort: \"\"\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service2\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n                decorator:\n                  operation: checkStock\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/jaeger-native-tracing/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=jaeger-native\nexport DELAY=10\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test services\"\nresponds_with \\\n    Hello \\\n    http://localhost:8000/trace/1\n\nrun_log \"Test Jaeger UI\"\nresponds_with \\\n    \"<!doctype html>\" \\\n    http://localhost:16686\n"
  },
  {
    "path": "examples/jaeger-tracing/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/jaeger-tracing/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/jaeger_tracing)\n"
  },
  {
    "path": "examples/jaeger-tracing/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n\n  service1:\n    build:\n      context: ../front-proxy\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service1-envoy-jaeger.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service1\n    environment:\n      - SERVICE_NAME=1\n    expose:\n      - \"8000\"\n\n  service2:\n    build:\n      context: ../front-proxy\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service2-envoy-jaeger.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service2\n    environment:\n      - SERVICE_NAME=2\n    expose:\n      - \"8000\"\n\n  jaeger:\n    image: jaegertracing/all-in-one\n    environment:\n      - COLLECTOR_ZIPKIN_HTTP_PORT=9411\n    networks:\n      envoymesh:\n        aliases:\n          - jaeger\n    expose:\n      - \"9411\"\n      - \"16686\"\n    ports:\n      - \"9411:9411\"\n      - \"16686:16686\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/jaeger-tracing/front-envoy-jaeger.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          generate_request_id: true\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: jaeger\n                collector_endpoint: \"/api/v2/spans\"\n                shared_span_context: false\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: service1\n                decorator:\n                  operation: checkAvailability\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n          use_remote_address: true\n  clusters:\n  - name: service1\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service1\n                port_value: 8000\n  - name: jaeger\n    connect_timeout: 1s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: jaeger\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: jaeger\n                port_value: 9411\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/jaeger-tracing/service1-envoy-jaeger.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: INBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: jaeger\n                collector_endpoint: \"/api/v2/spans\"\n                shared_span_context: false\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: service1_route\n            virtual_hosts:\n            - name: service1\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n                decorator:\n                  operation: checkAvailability\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 9000\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: jaeger\n                collector_endpoint: \"/api/v2/spans\"\n                shared_span_context: false\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: egress_http\n          route_config:\n            name: service2_route\n            virtual_hosts:\n            - name: service2\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/trace/2\"\n                route:\n                  cluster: service2\n                decorator:\n                  operation: checkStock\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\n  - name: service2\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service2\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service2\n                port_value: 8000\n  - name: jaeger\n    connect_timeout: 1s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: jaeger\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: jaeger\n                port_value: 9411\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/jaeger-tracing/service2-envoy-jaeger.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: INBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: jaeger\n                collector_endpoint: \"/api/v2/spans\"\n                shared_span_context: false\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service2\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n                decorator:\n                  operation: checkStock\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\n  - name: jaeger\n    connect_timeout: 1s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: jaeger\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: jaeger\n                port_value: 9411\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/jaeger-tracing/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=jaeger-tracing\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test services\"\nresponds_with \\\n    Hello \\\n    http://localhost:8000/trace/1\n\nrun_log \"Test Jaeger UI\"\nresponds_with \\\n    \"<!doctype html>\" \\\n    http://localhost:16686\n"
  },
  {
    "path": "examples/load-reporting-service/Dockerfile-http-server",
    "content": "FROM envoyproxy/envoy-alpine-dev:latest\n\nRUN apk update && apk add py3-pip bash curl\nRUN mkdir /code\nADD ./start_service.sh /usr/local/bin/start_service.sh\nCOPY . ./code\n\nRUN pip3 install -q Flask==0.11.1\n\nRUN chmod u+x /usr/local/bin/start_service.sh\nENTRYPOINT [\"/bin/sh\", \"/usr/local/bin/start_service.sh\"]\n"
  },
  {
    "path": "examples/load-reporting-service/Dockerfile-lrs",
    "content": "FROM golang\nCOPY ./server /go/src/github.com/envoyproxy/envoy/example/load-reporting-service/server\nCOPY *.go /go/src/github.com/envoyproxy/envoy/example/load-reporting-service/\nCOPY go.sum /go/src/github.com/envoyproxy/envoy/example/load-reporting-service\nCOPY go.mod /go/src/github.com/envoyproxy/envoy/example/load-reporting-service\n\nWORKDIR /go/src/github.com/envoyproxy/envoy/example/load-reporting-service\nRUN go mod download\nRUN go install /go/src/github.com/envoyproxy/envoy/example/load-reporting-service\n\nCMD [\"go\",\"run\",\"main.go\"]\n"
  },
  {
    "path": "examples/load-reporting-service/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/load_reporting_service.html)\n"
  },
  {
    "path": "examples/load-reporting-service/docker-compose.yaml",
    "content": "version: '3.7'\nservices:\n\n  http_service:\n    build:\n      context: .\n      dockerfile: Dockerfile-http-server\n    volumes:\n      - ./service-envoy-w-lrs.yaml:/etc/service-envoy-w-lrs.yaml\n    environment:\n      ENVOY_UID: 0\n    networks:\n      envoymesh:\n        aliases:\n          - http_service\n    expose:\n      - \"80\"\n      - \"8081\"\n    ports:\n      - \"80-81:80\"\n      - \"8081-8082:8081\"\n\n  lrs_server:\n    build:\n      context: .\n      dockerfile: Dockerfile-lrs\n    networks:\n      envoymesh:\n        aliases:\n          - lrs_server\n    volumes:\n      - /go/src/github.com/envoyproxy/envoy/examples/load-reporting-service\n    expose:\n      - \"18000\"\n    ports:\n      - \"18000:18000\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/load-reporting-service/go.mod",
    "content": "module github.com/envoyproxy/envoy/examples/load-reporting-service\n\ngo 1.13\n\nrequire (\n\tgithub.com/envoyproxy/go-control-plane v0.9.0\n\tgithub.com/golang/protobuf v1.3.2\n\tgolang.org/x/net v0.0.0-20200226121028-0de0cce0169b // indirect\n\tgolang.org/x/sys v0.0.0-20190412213103-97732733099d // indirect\n\tgoogle.golang.org/grpc v1.25.1\n)\n"
  },
  {
    "path": "examples/load-reporting-service/go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/envoyproxy/go-control-plane v0.9.0 h1:67WMNTvGrl7V1dWdKCeTwxDr7nio9clKoTlLhwIPnT4=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\n"
  },
  {
    "path": "examples/load-reporting-service/http_server.py",
    "content": "from flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/service')\ndef hello():\n  return 'Hello from behind Envoy!'\n\n\nif __name__ == \"__main__\":\n  app.run(host='0.0.0.0', port=8082, debug=False)\n"
  },
  {
    "path": "examples/load-reporting-service/main.go",
    "content": "package main\n\nimport (\n\t\"log\"\n\t\"net\"\n\n\t\"github.com/envoyproxy/envoy/examples/load-reporting-service/server\"\n\tgcpLoadStats \"github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2\"\n\t\"google.golang.org/grpc\"\n)\n\nfunc main() {\n\t// Listening on port 18000\n\taddress := \":18000\"\n\tlis, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\txdsServer := server.NewServer()\n\tgcpLoadStats.RegisterLoadReportingServiceServer(grpcServer, xdsServer)\n\n\tlog.Printf(\"LRS Server is up and running on %s\", address)\n\terr = grpcServer.Serve(lis)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "examples/load-reporting-service/send_requests.sh",
    "content": "#!/usr/bin/env bash\n\ncounter=1\nwhile [ $counter -le 50 ]\ndo\n  # generate random Port number to send requests\n  ports=(\"80\" \"81\")\n  port=${ports[$RANDOM % ${#ports[@]} ]}\n\n  curl -v \"localhost:${port}/service\"\n  ((counter++))\ndone\n"
  },
  {
    "path": "examples/load-reporting-service/server/lrs_server.go",
    "content": "package server\n\nimport (\n\t\"log\"\n\t\"sync\"\n\n\tgcpLoadStats \"github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2\"\n\t\"github.com/golang/protobuf/ptypes/duration\"\n)\n\n// This is how often Envoy will send the load report\nconst StatsFrequencyInSeconds = 2\n\n// Server handling Load Stats communication\ntype Server interface {\n\tgcpLoadStats.LoadReportingServiceServer\n\tHandleRequest(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer, request *gcpLoadStats.LoadStatsRequest)\n}\n\nfunc NewServer() Server {\n\treturn &server{nodesConnected: make(map[string]bool)}\n}\n\ntype server struct {\n\t// protects nodesConnected\n\tmu sync.Mutex\n\n\t// This cache stores nodes connected to the LRS server\n\tnodesConnected map[string]bool\n}\n\n// Handles incoming stream connections and LoadStatsRequests\nfunc (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\t// input stream ended or errored out\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.HandleRequest(stream, req)\n\t}\n}\n\nfunc (s *server) HandleRequest(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer, request *gcpLoadStats.LoadStatsRequest) {\n\tnodeID := request.GetNode().GetId()\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\t// Check whether any Node has already connected or not.\n\t// If not, add the NodeID to nodesConnected and enable Load Report with given frequency\n\t// If yes, log stats\n\tif _, exist := s.nodesConnected[nodeID]; !exist {\n\t\t// Add NodeID to the nodesConnected\n\t\tlog.Printf(\"Adding new new node to cache `%s`\", nodeID)\n\t\ts.nodesConnected[nodeID] = true\n\n\t\t// Initialize Load Reporting\n\t\terr := stream.Send(&gcpLoadStats.LoadStatsResponse{\n\t\t\tClusters:                  []string{\"local_service\"},\n\t\t\tLoadReportingInterval:     &duration.Duration{Seconds: StatsFrequencyInSeconds},\n\t\t\tReportEndpointGranularity: true,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Unable to send response to node %s due to err: %s\", nodeID, err)\n\t\t}\n\t\treturn\n\t}\n\n\t// After Load Report is enabled, log the Load Report stats received\n\tfor _, clusterStats := range request.ClusterStats {\n\t\tif len(clusterStats.UpstreamLocalityStats) > 0 {\n\t\t\tlog.Printf(\"Got stats from cluster `%s` node `%s` - %s\", request.Node.Cluster, request.Node.Id, clusterStats)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "examples/load-reporting-service/service-envoy-w-lrs.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 80\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/service\"\n                route:\n                  cluster: local_service\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8082\n  - name: load_reporting_cluster\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: load_reporting_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: lrs_server\n                port_value: 18000\ncluster_manager:\n  load_stats_config:\n    api_type: GRPC\n    grpc_services:\n      envoy_grpc: \n        cluster_name: load_reporting_cluster\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8081\n"
  },
  {
    "path": "examples/load-reporting-service/start_service.sh",
    "content": "#!/bin/bash\npython3 /code/http_server.py &\n/usr/local/bin/envoy -c /etc/service-envoy-w-lrs.yaml --service-node \"${HOSTNAME}\" --service-cluster http_service\n"
  },
  {
    "path": "examples/load-reporting-service/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=load-reporting\nexport UPARGS=\"--scale http_service=2\"\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\nrun_log \"Send requests\"\nbash send_requests.sh 2> /dev/null\nrun_log \"Check logs: http 1\"\ndocker-compose logs http_service | grep http_service_1 | grep HTTP | grep 200\n\nrun_log \"Check logs: http 2\"\ndocker-compose logs http_service | grep http_service_2 | grep HTTP | grep 200\n\nrun_log \"Check logs: lrs_server\"\ndocker-compose logs lrs_server | grep \"up and running\"\n"
  },
  {
    "path": "examples/lua/Dockerfile-proxy",
    "content": "FROM envoyproxy/envoy-dev:latest\nADD ./lib/mylibrary.lua /lib/mylibrary.lua\nCOPY ./envoy.yaml /etc/envoy.yaml\nRUN chmod go+r /etc/envoy.yaml /lib/mylibrary.lua\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/envoy.yaml\", \"-l\", \"debug\", \"--service-cluster\", \"proxy\"]\n"
  },
  {
    "path": "examples/lua/Dockerfile-web-service",
    "content": "FROM solsson/http-echo\n"
  },
  {
    "path": "examples/lua/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/lua.html).\n"
  },
  {
    "path": "examples/lua/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  proxy:\n    build:\n      context: .\n      dockerfile: Dockerfile-proxy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n\n  web_service:\n    build:\n      context: .\n      dockerfile: Dockerfile-web-service\n    networks:\n      envoymesh:\n        aliases:\n          - web_service\n    expose:\n      - \"80\"\n    ports:\n      - \"8080:80\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/lua/envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - name: main\n    address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: ingress_http\n          codec_type: auto\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: web_service\n          http_filters:\n          - name: envoy.filters.http.lua\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n              inline_code: |\n                local mylibrary = require(\"lib.mylibrary\")\n\n                function envoy_on_request(request_handle)\n                  request_handle:headers():add(\"foo\", mylibrary.foobar())\n                end\n                function envoy_on_response(response_handle)\n                  body_size = response_handle:body():length()\n                  response_handle:headers():add(\"response-body-size\", tostring(body_size))\n                end\n          - name: envoy.filters.http.router\n            typed_config: {}\n\n  clusters:\n  - name: web_service\n    connect_timeout: 0.25s\n    type: strict_dns # static\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: web_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: web_service\n                port_value: 80\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/lua/lib/mylibrary.lua",
    "content": "M = {}\n\nfunction M.foobar()\n    return \"bar\"\nend\n\nreturn M\n"
  },
  {
    "path": "examples/lua/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=lua\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test connection\"\nresponds_with \\\n    foo \\\n    http://localhost:8000\n"
  },
  {
    "path": "examples/mysql/Dockerfile-mysql",
    "content": "FROM mysql:5.5\n"
  },
  {
    "path": "examples/mysql/Dockerfile-proxy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nCOPY ./envoy.yaml /etc/envoy.yaml\nRUN chmod go+r /etc/envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c /etc/envoy.yaml\", \"-l\", \"debug\"]\n"
  },
  {
    "path": "examples/mysql/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/mysql.html).\n"
  },
  {
    "path": "examples/mysql/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  proxy:\n    build:\n      context: .\n      dockerfile: Dockerfile-proxy\n    networks:\n      envoymesh:\n        aliases:\n          - envoy\n    expose:\n      - \"1999\"\n      - \"8001\"\n    ports:\n      - \"1999:1999\"\n      - \"8001:8001\"\n\n  mysql:\n    build:\n      context: .\n      dockerfile: Dockerfile-mysql\n    networks:\n      envoymesh:\n        aliases:\n          - mysql\n    environment:\n      - MYSQL_ALLOW_EMPTY_PASSWORD=yes\n    expose:\n      - \"3306\"\n    ports:\n      - \"3306:3306\"\n\nnetworks:\n  envoymesh:\n    name: envoymesh\n"
  },
  {
    "path": "examples/mysql/envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - name: mysql_listener\n    address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 1999\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.mysql_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy\n          stat_prefix: egress_mysql\n      - name: envoy.filters.network.tcp_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy\n          stat_prefix: mysql_tcp\n          cluster: mysql_cluster\n\n  clusters:\n  - name: mysql_cluster\n    connect_timeout: 1s\n    type: strict_dns\n    load_assignment:\n      cluster_name: mysql_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: mysql\n                port_value: 3306\n\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/mysql/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=mysql\nexport DELAY=10\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n_mysql () {\n    local mysql_client\n    mysql_client=(docker run --network envoymesh mysql:5.5 mysql -h envoy -P 1999 -u root)\n    \"${mysql_client[@]}\" \"${@}\"\n}\n\nrun_log \"Create a mysql database\"\n_mysql -e \"CREATE DATABASE test;\"\n_mysql -e \"show databases;\" | grep test\n\nrun_log \"Create a mysql table\"\n_mysql -e \"USE test; CREATE TABLE test ( text VARCHAR(255) ); INSERT INTO test VALUES ('hello, world!');\"\n_mysql -e \"SELECT COUNT(*) from test.test;\" | grep 1\n\nrun_log \"Check mysql egress stats\"\nresponds_with \\\n    egress_mysql \\\n    \"http://localhost:8001/stats?filter=egress_mysql\"\n\nrun_log \"Check mysql TCP stats\"\nresponds_with \\\n    mysql_tcp \\\n    \"http://localhost:8001/stats?filter=mysql_tcp\"\n"
  },
  {
    "path": "examples/redis/Dockerfile-proxy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nCOPY ./envoy.yaml /etc/envoy.yaml\nRUN chmod go+r /etc/envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/envoy.yaml\", \"-l\", \"debug\", \"--service-cluster\", \"proxy\"]\n"
  },
  {
    "path": "examples/redis/Dockerfile-redis",
    "content": "FROM redis\n"
  },
  {
    "path": "examples/redis/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/redis.html).\n"
  },
  {
    "path": "examples/redis/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  proxy:\n    build:\n      context: .\n      dockerfile: Dockerfile-proxy\n    networks:\n      - envoymesh\n    expose:\n      - \"1999\"\n      - \"8001\"\n    ports:\n      - \"1999:1999\"\n      - \"8001:8001\"\n\n  redis:\n    build:\n      context: .\n      dockerfile: Dockerfile-redis\n    networks:\n      envoymesh:\n        aliases:\n          - redis_server\n    expose:\n      - \"6379\"\n    ports:\n      - \"6379:6379\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/redis/envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - name: redis_listener\n    address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 1999\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.redis_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy\n          stat_prefix: egress_redis\n          settings:\n            op_timeout: 5s\n          prefix_routes:\n            catch_all_route:\n              cluster: redis_cluster\n  clusters:\n  - name: redis_cluster\n    connect_timeout: 1s\n    type: strict_dns # static\n    lb_policy: MAGLEV\n    load_assignment:\n      cluster_name: redis_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: redis_server\n                port_value: 6379\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/redis/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=redis\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test set\"\nredis-cli -h localhost -p 1999 set foo FOO | grep OK\nredis-cli -h localhost -p 1999 set bar BAR | grep OK\n\nrun_log \"Test get\"\nredis-cli -h localhost -p 1999 get foo | grep FOO\nredis-cli -h localhost -p 1999 get bar | grep BAR\n\nrun_log \"Test redis stats\"\nresponds_with \\\n    egress_redis \\\n    \"http://localhost:8001/stats?usedonly&filter=redis.egress_redis.command\"\n"
  },
  {
    "path": "examples/verify-common.sh",
    "content": "#!/bin/bash -e\n\nDELAY=\"${DELAY:-0}\"\nDOCKER_NO_PULL=\"${DOCKER_NO_PULL:-}\"\nMANUAL=\"${MANUAL:-}\"\nNAME=\"${NAME:-}\"\nPATHS=\"${PATHS:-.}\"\nUPARGS=\"${UPARGS:-}\"\n\n\nrun_log () {\n    echo -e \"\\n> [${NAME}] ${*}\"\n}\n\nbring_up_example_stack () {\n    local args path up_args\n    args=(\"${UPARGS[@]}\")\n    path=\"$1\"\n    read -ra up_args <<< \"up --build -d ${args[*]}\"\n    if [[ -z \"$DOCKER_NO_PULL\" ]]; then\n\trun_log \"Pull the images ($path)\"\n\tdocker-compose pull\n\techo\n    fi\n    run_log \"Bring up services ($path)\"\n    docker-compose \"${up_args[@]}\" || return 1\n    echo\n}\n\nbring_up_example () {\n    local path paths\n    read -ra paths <<< \"$(echo \"$PATHS\" | tr ',' ' ')\"\n    for path in \"${paths[@]}\"; do\n        pushd \"$path\" > /dev/null || return 1\n        bring_up_example_stack \"$path\" || {\n            echo \"ERROR: starting ${NAME} ${path}\" >&2\n            return 1\n        }\n        popd > /dev/null || return 1\n    done\n    if [[ \"$DELAY\" -ne \"0\" ]]; then\n        run_log \"Snooze for ${DELAY} while ${NAME} gets started\"\n        sleep \"$DELAY\"\n    fi\n    for path in \"${paths[@]}\"; do\n        pushd \"$path\" > /dev/null || return 1\n        docker-compose ps\n        docker-compose logs\n        popd > /dev/null || return 1\n    done\n}\n\ncleanup_stack () {\n    local path\n    path=\"$1\"\n    run_log \"Cleanup ($path)\"\n    docker-compose down\n}\n\ncleanup () {\n    local path paths\n    read -ra paths <<< \"$(echo \"$PATHS\" | tr ',' ' ')\"\n    for path in \"${paths[@]}\"; do\n        pushd \"$path\" > /dev/null || return 1\n        cleanup_stack \"$path\" || {\n            echo \"ERROR: cleanup ${NAME} ${path}\" >&2\n            return 1\n        }\n        popd > /dev/null\n    done\n}\n\n_curl () {\n    local arg curl_command\n    curl_command=(curl -s)\n    if [[ ! \"$*\" =~ \"-X\" ]]; then\n        curl_command+=(-X GET)\n    fi\n    for arg in \"${@}\"; do\n        curl_command+=(\"$arg\")\n    done\n    \"${curl_command[@]}\" || {\n        echo \"ERROR: curl (${curl_command[*]})\" >&2\n        return 1\n    }\n}\n\nresponds_with () {\n    local expected\n    expected=\"$1\"\n    shift\n    _curl \"${@}\" | grep \"$expected\" || {\n        echo \"ERROR: curl expected (${*}): $expected\" >&2\n        return 1\n    }\n}\n\nresponds_with_header () {\n    local expected\n    expected=\"$1\"\n    shift\n    _curl --head \"${@}\" | grep \"$expected\"  || {\n        echo \"ERROR: curl header (${*}): $expected\" >&2\n        return 1\n    }\n}\n\nresponds_without_header () {\n    local expected\n    expected=\"$1\"\n    shift\n    _curl --head \"${@}\" | grep \"$expected\" | [[ \"$(wc -l)\" -eq 0 ]] || {\n        echo \"ERROR: curl without header (${*}): $expected\" >&2\n        return 1\n    }\n}\n\n\ntrap 'cleanup' EXIT\n\nif [[ -z \"$NAME\" ]]; then\n    echo \"ERROR: You must set the '$NAME' variable before sourcing this script\" >&2\n    exit 1\nfi\n\nif [[ -z \"$MANUAL\" ]]; then\n    bring_up_example\nfi\n"
  },
  {
    "path": "examples/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\"//bazel/wasm:wasm.bzl\", \"wasm_cc_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nwasm_cc_binary(\n    name = \"envoy_filter_http_wasm_example.wasm\",\n    srcs = [\"envoy_filter_http_wasm_example.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n"
  },
  {
    "path": "examples/wasm/Dockerfile-proxy",
    "content": "FROM envoyproxy/envoy-dev:latest\nCOPY ./envoy.yaml /etc/envoy.yaml\nCOPY ./envoy_filter_http_wasm_example.wasm /etc/envoy_filter_http_wasm_example.wasm\nRUN chmod go+r /etc/envoy.yaml\nCMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy\n"
  },
  {
    "path": "examples/wasm/Dockerfile-web-service",
    "content": "FROM solsson/http-echo\n"
  },
  {
    "path": "examples/wasm/README.md",
    "content": "# Envoy WebAssembly Filter\n\nIn this example, we show how a WebAssembly(WASM) filter can be used with the Envoy\nproxy. The Envoy proxy [configuration](./envoy.yaml) includes a Webassembly filter\nas documented [here](https://www.envoyproxy.io/docs/envoy/latest/).\n<!--TODO(bianpengyuan): change to the url of Wasm filter once the doc is ready.-->\n\n\n\n## Quick Start\n\n1. `docker-compose build`\n2. `docker-compose up`\n3. `curl -v localhost:18000`\n\nCurl output should include our headers:\n\n```\n# <b> curl -v localhost:8000</b>\n* Rebuilt URL to: localhost:18000/\n*   Trying 127.0.0.1...\n* TCP_NODELAY set\n* Connected to localhost (127.0.0.1) port 18000 (#0)\n> GET / HTTP/1.1\n> Host: localhost:18000\n> User-Agent: curl/7.58.0\n> Accept: */*\n> \n< HTTP/1.1 200 OK\n< content-length: 13\n< content-type: text/plain\n< location: envoy-wasm\n< date: Tue, 09 Jul 2019 00:47:14 GMT\n< server: envoy\n< x-envoy-upstream-service-time: 0\n< newheader: newheadervalue\n< \nexample body\n* Connection #0 to host localhost left intact\n```\n\n## Build WASM Module\n\nNow you want to make changes to the C++ filter ([envoy_filter_http_wasm_example.cc](envoy_filter_http_wasm_example.cc))\nand build the WASM module ([envoy_filter_http_wasm_example.wasm](envoy_filter_http_wasm_example.wasm)).\n\n1. Build WASM module\n   ```shell\n   bazel build //examples/wasm:envoy_filter_http_wasm_example.wasm\n   ```\n\n## Build the Envoy WASM Image\n\n<!--TODO(incfly): remove this once we upstream WASM to envoyproxy main repo.-->\n\nFor Envoy WASM runtime developers, if you want to make changes, please\n\n1. Follow [instructions](https://github.com/envoyproxy/envoy-wasm/blob/master/WASM.md).\n2. Modify `docker-compose.yaml` to mount your own Envoy.\n"
  },
  {
    "path": "examples/wasm/docker-compose.yaml",
    "content": "version: '3.7'\nservices:\n\n  proxy:\n    build:\n      context: .\n      dockerfile: Dockerfile-proxy\n    volumes:\n      - ./envoy.yaml:/etc/envoy.yaml\n      - ./envoy_wasm_example.wasm:/etc/envoy_wasm_example.wasm\n      - ./envoy_filter_http_wasm_example.wasm:/etc/envoy_filter_http_wasm_example.wasm\n      # Uncomment this line if you want to use your own Envoy with WASM enabled.\n      #- /tmp/envoy-docker-build/envoy/source/exe/envoy:/usr/local/bin/envoy\n    networks:\n      - envoymesh\n    expose:\n      - \"80\"\n      - \"8001\"\n    ports:\n      - \"18000:80\"\n      - \"18001:8001\"\n\n  web_service:\n    build:\n      context: .\n      dockerfile: Dockerfile-web-service\n    networks:\n      envoymesh:\n        aliases:\n          - web_service\n    expose:\n      - \"80\"\n    ports:\n      - \"18080:80\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/wasm/envoy.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: web_service\n          http_filters:\n          - name: envoy.filters.http.wasm\n            typed_config:\n              \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n              type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm\n              value:\n                config:\n                  name: \"my_plugin\"\n                  root_id: \"my_root_id\"\n                  configuration:\n                    \"@type\": \"type.googleapis.com/google.protobuf.StringValue\"\n                    value: |\n                      {}\n                  vm_config:\n                    runtime: \"envoy.wasm.runtime.v8\"\n                    vm_id: \"my_vm_id\"\n                    code:\n                      local:\n                        filename: \"/etc/envoy_filter_http_wasm_example.wasm\"\n                    configuration: {}\n          - name: envoy.filters.http.router\n            typed_config: {}\n  - name: staticreply\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 8099\n    filter_chains:\n    - filters:\n      - name: envoy.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: ingress_http\n          codec_type: auto\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                direct_response:\n                  status: 200\n                  body:\n                    inline_string: \"foo\\n\"\n          http_filters:\n          - name: envoy.router\n            config: {}\n  clusters:\n  - name: web_service\n    connect_timeout: 0.25s\n    type: static\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: service1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8099\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/wasm/envoy_filter_http_wasm_example.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n#include <string_view>\n#include <unordered_map>\n\n#include \"proxy_wasm_intrinsics.h\"\n\nclass ExampleRootContext : public RootContext {\npublic:\n  explicit ExampleRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {}\n\n  bool onStart(size_t) override;\n  bool onConfigure(size_t) override;\n  void onTick() override;\n};\n\nclass ExampleContext : public Context {\npublic:\n  explicit ExampleContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  void onCreate() override;\n  FilterHeadersStatus onRequestHeaders(uint32_t headers, bool end_of_stream) override;\n  FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) override;\n  FilterHeadersStatus onResponseHeaders(uint32_t headers, bool end_of_stream) override;\n  FilterDataStatus onResponseBody(size_t body_buffer_length, bool end_of_stream) override;\n  void onDone() override;\n  void onLog() override;\n  void onDelete() override;\n};\nstatic RegisterContextFactory register_ExampleContext(CONTEXT_FACTORY(ExampleContext),\n                                                      ROOT_FACTORY(ExampleRootContext),\n                                                      \"my_root_id\");\n\nbool ExampleRootContext::onStart(size_t) {\n  LOG_TRACE(\"onStart\");\n  return true;\n}\n\nbool ExampleRootContext::onConfigure(size_t) {\n  LOG_TRACE(\"onConfigure\");\n  proxy_set_tick_period_milliseconds(1000); // 1 sec\n  return true;\n}\n\nvoid ExampleRootContext::onTick() { LOG_TRACE(\"onTick\"); }\n\nvoid ExampleContext::onCreate() { LOG_WARN(std::string(\"onCreate \" + std::to_string(id()))); }\n\nFilterHeadersStatus ExampleContext::onRequestHeaders(uint32_t, bool) {\n  LOG_DEBUG(std::string(\"onRequestHeaders \") + std::to_string(id()));\n  auto result = getRequestHeaderPairs();\n  auto pairs = result->pairs();\n  LOG_INFO(std::string(\"headers: \") + std::to_string(pairs.size()));\n  for (auto& p : pairs) {\n    LOG_INFO(std::string(p.first) + std::string(\" -> \") + std::string(p.second));\n  }\n  return FilterHeadersStatus::Continue;\n}\n\nFilterHeadersStatus ExampleContext::onResponseHeaders(uint32_t, bool) {\n  LOG_DEBUG(std::string(\"onResponseHeaders \") + std::to_string(id()));\n  auto result = getResponseHeaderPairs();\n  auto pairs = result->pairs();\n  LOG_INFO(std::string(\"headers: \") + std::to_string(pairs.size()));\n  for (auto& p : pairs) {\n    LOG_INFO(std::string(p.first) + std::string(\" -> \") + std::string(p.second));\n  }\n  addResponseHeader(\"newheader\", \"newheadervalue\");\n  replaceResponseHeader(\"location\", \"envoy-wasm\");\n  return FilterHeadersStatus::Continue;\n}\n\nFilterDataStatus ExampleContext::onRequestBody(size_t body_buffer_length,\n                                               bool /* end_of_stream */) {\n  auto body = getBufferBytes(WasmBufferType::HttpRequestBody, 0, body_buffer_length);\n  LOG_ERROR(std::string(\"onRequestBody \") + std::string(body->view()));\n  return FilterDataStatus::Continue;\n}\n\nFilterDataStatus ExampleContext::onResponseBody(size_t /* body_buffer_length */,\n                                                bool /* end_of_stream */) {\n  setBuffer(WasmBufferType::HttpResponseBody, 0, 3, \"foo\");\n  return FilterDataStatus::Continue;\n}\n\nvoid ExampleContext::onDone() { LOG_WARN(std::string(\"onDone \" + std::to_string(id()))); }\n\nvoid ExampleContext::onLog() { LOG_WARN(std::string(\"onLog \" + std::to_string(id()))); }\n\nvoid ExampleContext::onDelete() { LOG_WARN(std::string(\"onDelete \" + std::to_string(id()))); }\n"
  },
  {
    "path": "examples/wasm/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=wasm\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test connection\"\nresponds_with \\\n    foo \\\n    http://localhost:8000\n\nrun_log \"Test header\"\nresponds_with_header \\\n    \"newheader: newheadervalue\" \\\n    http://localhost:8000\n"
  },
  {
    "path": "examples/zipkin-tracing/Dockerfile-frontenvoy",
    "content": "FROM envoyproxy/envoy-dev:latest\n\nRUN apt-get update && apt-get -q install -y \\\n    curl\nCOPY ./front-envoy-zipkin.yaml /etc/front-envoy.yaml\nRUN chmod go+r /etc/front-envoy.yaml\nCMD [\"/usr/local/bin/envoy\", \"-c\", \"/etc/front-envoy.yaml\", \"--service-cluster\", \"front-proxy\"]\n"
  },
  {
    "path": "examples/zipkin-tracing/README.md",
    "content": "To learn about this sandbox and for instructions on how to run it please head over\nto the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/zipkin_tracing)\n"
  },
  {
    "path": "examples/zipkin-tracing/docker-compose.yaml",
    "content": "version: \"3.7\"\nservices:\n\n  front-envoy:\n    build:\n      context: .\n      dockerfile: Dockerfile-frontenvoy\n    networks:\n      - envoymesh\n    expose:\n      - \"8000\"\n      - \"8001\"\n    ports:\n      - \"8000:8000\"\n      - \"8001:8001\"\n\n  service1:\n    build:\n      context: ../front-proxy\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service1-envoy-zipkin.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service1\n    environment:\n      - SERVICE_NAME=1\n    expose:\n      - \"8000\"\n\n  service2:\n    build:\n      context: ../front-proxy\n      dockerfile: Dockerfile-service\n    volumes:\n      - ./service2-envoy-zipkin.yaml:/etc/service-envoy.yaml\n    networks:\n      envoymesh:\n        aliases:\n          - service2\n    environment:\n      - SERVICE_NAME=2\n    expose:\n      - \"8000\"\n\n  zipkin:\n    image: openzipkin/zipkin\n    networks:\n      envoymesh:\n        aliases:\n          - zipkin\n    expose:\n      - \"9411\"\n    ports:\n      - \"9411:9411\"\n\nnetworks:\n  envoymesh: {}\n"
  },
  {
    "path": "examples/zipkin-tracing/front-envoy-zipkin.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          generate_request_id: true\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: zipkin\n                collector_endpoint: \"/api/v2/spans\"\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: backend\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: service1\n                decorator:\n                  operation: checkAvailability\n              response_headers_to_add:\n              - header:\n                  key: \"x-b3-traceid\"\n                  value: \"%REQ(x-b3-traceid)%\"\n              - header:\n                  key: \"x-request-id\"\n                  value: \"%REQ(x-request-id)%\"\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: service1\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service1\n                port_value: 8000\n  - name: zipkin\n    connect_timeout: 1s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: zipkin\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: zipkin\n                port_value: 9411\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/zipkin-tracing/service1-envoy-zipkin.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: INBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: zipkin\n                collector_endpoint: \"/api/v2/spans\"\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: service1_route\n            virtual_hosts:\n            - name: service1\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n                decorator:\n                  operation: checkAvailability\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 9000\n    traffic_direction: OUTBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: zipkin\n                collector_endpoint: \"/api/v2/spans\"\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: egress_http\n          route_config:\n            name: service2_route\n            virtual_hosts:\n            - name: service2\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/trace/2\"\n                route:\n                  cluster: service2\n                decorator:\n                  operation: checkStock\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\n  - name: service2\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: service2\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: service2\n                port_value: 8000\n  - name: zipkin\n    connect_timeout: 1s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: zipkin\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: zipkin\n                port_value: 9411\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/zipkin-tracing/service2-envoy-zipkin.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 8000\n    traffic_direction: INBOUND\n    filter_chains:\n    - filters:\n      - name: envoy.filters.network.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          tracing:\n            provider:\n              name: envoy.tracers.zipkin\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n                collector_cluster: zipkin\n                collector_endpoint: \"/api/v2/spans\"\n                collector_endpoint_version: HTTP_JSON\n          codec_type: auto\n          stat_prefix: ingress_http\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: service2\n              domains:\n              - \"*\"\n              routes:\n              - match:\n                  prefix: \"/\"\n                route:\n                  cluster: local_service\n                decorator:\n                  operation: checkStock\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config: {}\n  clusters:\n  - name: local_service\n    connect_timeout: 0.250s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: local_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8080\n  - name: zipkin\n    connect_timeout: 1s\n    type: strict_dns\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: zipkin\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: zipkin\n                port_value: 9411\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 8001\n"
  },
  {
    "path": "examples/zipkin-tracing/verify.sh",
    "content": "#!/bin/bash -e\n\nexport NAME=zipkin\n\n# shellcheck source=examples/verify-common.sh\n. \"$(dirname \"${BASH_SOURCE[0]}\")/../verify-common.sh\"\n\n\nrun_log \"Test connection\"\nresponds_with \\\n    \"Hello from behind Envoy (service 1)!\" \\\n    http://localhost:8000/trace/1\n\nrun_log \"Test dashboard\"\n# this could do with using the healthcheck and waiting\nsleep 20\nresponds_with \\\n    \"<!doctype html>\" \\\n    http://localhost:9411/zipkin/\n"
  },
  {
    "path": "generated_api_shadow/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//envoy/admin/v2alpha:pkg\",\n        \"//envoy/admin/v3:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/cluster:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"//envoy/api/v2/listener:pkg\",\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/accesslog/v2:pkg\",\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/bootstrap/v2:pkg\",\n        \"//envoy/config/bootstrap/v3:pkg\",\n        \"//envoy/config/cluster/aggregate/v2alpha:pkg\",\n        \"//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/cluster/redis:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/filter/dubbo/router/v2alpha1:pkg\",\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg\",\n        \"//envoy/config/filter/http/admission_control/v2alpha:pkg\",\n        \"//envoy/config/filter/http/buffer/v2:pkg\",\n        \"//envoy/config/filter/http/compressor/v2:pkg\",\n        \"//envoy/config/filter/http/cors/v2:pkg\",\n        \"//envoy/config/filter/http/csrf/v2:pkg\",\n        \"//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/config/filter/http/dynamo/v2:pkg\",\n        \"//envoy/config/filter/http/ext_authz/v2:pkg\",\n        \"//envoy/config/filter/http/fault/v2:pkg\",\n        \"//envoy/config/filter/http/grpc_http1_bridge/v2:pkg\",\n        \"//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg\",\n        \"//envoy/config/filter/http/grpc_stats/v2alpha:pkg\",\n        \"//envoy/config/filter/http/grpc_web/v2:pkg\",\n        \"//envoy/config/filter/http/gzip/v2:pkg\",\n        \"//envoy/config/filter/http/header_to_metadata/v2:pkg\",\n        \"//envoy/config/filter/http/health_check/v2:pkg\",\n        \"//envoy/config/filter/http/ip_tagging/v2:pkg\",\n        \"//envoy/config/filter/http/jwt_authn/v2alpha:pkg\",\n        \"//envoy/config/filter/http/lua/v2:pkg\",\n        \"//envoy/config/filter/http/on_demand/v2:pkg\",\n        \"//envoy/config/filter/http/original_src/v2alpha1:pkg\",\n        \"//envoy/config/filter/http/rate_limit/v2:pkg\",\n        \"//envoy/config/filter/http/rbac/v2:pkg\",\n        \"//envoy/config/filter/http/router/v2:pkg\",\n        \"//envoy/config/filter/http/squash/v2:pkg\",\n        \"//envoy/config/filter/http/tap/v2alpha:pkg\",\n        \"//envoy/config/filter/http/transcoder/v2:pkg\",\n        \"//envoy/config/filter/listener/http_inspector/v2:pkg\",\n        \"//envoy/config/filter/listener/original_dst/v2:pkg\",\n        \"//envoy/config/filter/listener/original_src/v2alpha1:pkg\",\n        \"//envoy/config/filter/listener/proxy_protocol/v2:pkg\",\n        \"//envoy/config/filter/listener/tls_inspector/v2:pkg\",\n        \"//envoy/config/filter/network/client_ssl_auth/v2:pkg\",\n        \"//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/echo/v2:pkg\",\n        \"//envoy/config/filter/network/ext_authz/v2:pkg\",\n        \"//envoy/config/filter/network/http_connection_manager/v2:pkg\",\n        \"//envoy/config/filter/network/kafka_broker/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/local_rate_limit/v2alpha:pkg\",\n        \"//envoy/config/filter/network/mongo_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg\",\n        \"//envoy/config/filter/network/rate_limit/v2:pkg\",\n        \"//envoy/config/filter/network/rbac/v2:pkg\",\n        \"//envoy/config/filter/network/redis_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/rocketmq_proxy/v3:pkg\",\n        \"//envoy/config/filter/network/sni_cluster/v2:pkg\",\n        \"//envoy/config/filter/network/tcp_proxy/v2:pkg\",\n        \"//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg\",\n        \"//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg\",\n        \"//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg\",\n        \"//envoy/config/filter/thrift/router/v2alpha1:pkg\",\n        \"//envoy/config/filter/udp/udp_proxy/v2alpha:pkg\",\n        \"//envoy/config/grpc_credential/v2alpha:pkg\",\n        \"//envoy/config/grpc_credential/v3:pkg\",\n        \"//envoy/config/health_checker/redis/v2:pkg\",\n        \"//envoy/config/listener/v2:pkg\",\n        \"//envoy/config/listener/v3:pkg\",\n        \"//envoy/config/metrics/v2:pkg\",\n        \"//envoy/config/metrics/v3:pkg\",\n        \"//envoy/config/overload/v2alpha:pkg\",\n        \"//envoy/config/overload/v3:pkg\",\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"//envoy/config/rbac/v2:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg\",\n        \"//envoy/config/resource_monitor/injected_resource/v2alpha:pkg\",\n        \"//envoy/config/retry/omit_canary_hosts/v2:pkg\",\n        \"//envoy/config/retry/omit_host_metadata/v2:pkg\",\n        \"//envoy/config/retry/previous_hosts/v2:pkg\",\n        \"//envoy/config/retry/previous_priorities:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"//envoy/config/trace/v2alpha:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"//envoy/config/transport_socket/alts/v2alpha:pkg\",\n        \"//envoy/config/transport_socket/raw_buffer/v2:pkg\",\n        \"//envoy/config/transport_socket/tap/v2alpha:pkg\",\n        \"//envoy/data/accesslog/v2:pkg\",\n        \"//envoy/data/accesslog/v3:pkg\",\n        \"//envoy/data/cluster/v2alpha:pkg\",\n        \"//envoy/data/core/v2alpha:pkg\",\n        \"//envoy/data/core/v3:pkg\",\n        \"//envoy/data/tap/v2alpha:pkg\",\n        \"//envoy/data/tap/v3:pkg\",\n        \"//envoy/extensions/access_loggers/file/v3:pkg\",\n        \"//envoy/extensions/access_loggers/grpc/v3:pkg\",\n        \"//envoy/extensions/clusters/aggregate/v3:pkg\",\n        \"//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/clusters/redis/v3:pkg\",\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/common/ratelimit/v3:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg\",\n        \"//envoy/extensions/filters/http/admission_control/v3alpha:pkg\",\n        \"//envoy/extensions/filters/http/buffer/v3:pkg\",\n        \"//envoy/extensions/filters/http/compressor/v3:pkg\",\n        \"//envoy/extensions/filters/http/cors/v3:pkg\",\n        \"//envoy/extensions/filters/http/csrf/v3:pkg\",\n        \"//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/http/dynamo/v3:pkg\",\n        \"//envoy/extensions/filters/http/ext_authz/v3:pkg\",\n        \"//envoy/extensions/filters/http/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_stats/v3:pkg\",\n        \"//envoy/extensions/filters/http/grpc_web/v3:pkg\",\n        \"//envoy/extensions/filters/http/gzip/v3:pkg\",\n        \"//envoy/extensions/filters/http/header_to_metadata/v3:pkg\",\n        \"//envoy/extensions/filters/http/health_check/v3:pkg\",\n        \"//envoy/extensions/filters/http/ip_tagging/v3:pkg\",\n        \"//envoy/extensions/filters/http/jwt_authn/v3:pkg\",\n        \"//envoy/extensions/filters/http/lua/v3:pkg\",\n        \"//envoy/extensions/filters/http/on_demand/v3:pkg\",\n        \"//envoy/extensions/filters/http/original_src/v3:pkg\",\n        \"//envoy/extensions/filters/http/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/http/rbac/v3:pkg\",\n        \"//envoy/extensions/filters/http/router/v3:pkg\",\n        \"//envoy/extensions/filters/http/squash/v3:pkg\",\n        \"//envoy/extensions/filters/http/tap/v3:pkg\",\n        \"//envoy/extensions/filters/listener/http_inspector/v3:pkg\",\n        \"//envoy/extensions/filters/listener/original_dst/v3:pkg\",\n        \"//envoy/extensions/filters/listener/original_src/v3:pkg\",\n        \"//envoy/extensions/filters/listener/proxy_protocol/v3:pkg\",\n        \"//envoy/extensions/filters/listener/tls_inspector/v3:pkg\",\n        \"//envoy/extensions/filters/network/client_ssl_auth/v3:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/echo/v3:pkg\",\n        \"//envoy/extensions/filters/network/ext_authz/v3:pkg\",\n        \"//envoy/extensions/filters/network/http_connection_manager/v3:pkg\",\n        \"//envoy/extensions/filters/network/kafka_broker/v3:pkg\",\n        \"//envoy/extensions/filters/network/local_ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/mongo_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/mysql_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/rbac/v3:pkg\",\n        \"//envoy/extensions/filters/network/redis_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/sni_cluster/v3:pkg\",\n        \"//envoy/extensions/filters/network/tcp_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/v3:pkg\",\n        \"//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg\",\n        \"//envoy/extensions/retry/host/omit_host_metadata/v3:pkg\",\n        \"//envoy/extensions/retry/priority/previous_priorities/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/alts/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/raw_buffer/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tap/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"//envoy/service/accesslog/v2:pkg\",\n        \"//envoy/service/accesslog/v3:pkg\",\n        \"//envoy/service/auth/v2:pkg\",\n        \"//envoy/service/auth/v3:pkg\",\n        \"//envoy/service/cluster/v3:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"//envoy/service/endpoint/v3:pkg\",\n        \"//envoy/service/health/v3:pkg\",\n        \"//envoy/service/listener/v3:pkg\",\n        \"//envoy/service/load_stats/v2:pkg\",\n        \"//envoy/service/load_stats/v3:pkg\",\n        \"//envoy/service/metrics/v2:pkg\",\n        \"//envoy/service/metrics/v3:pkg\",\n        \"//envoy/service/ratelimit/v2:pkg\",\n        \"//envoy/service/ratelimit/v3:pkg\",\n        \"//envoy/service/route/v3:pkg\",\n        \"//envoy/service/runtime/v3:pkg\",\n        \"//envoy/service/secret/v3:pkg\",\n        \"//envoy/service/status/v2:pkg\",\n        \"//envoy/service/status/v3:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"//envoy/service/tap/v3:pkg\",\n        \"//envoy/service/trace/v2:pkg\",\n        \"//envoy/service/trace/v3:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/metadata/v2:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/README.md",
    "content": "This directory is for generated Envoy internal artifacts (via `proto_format`).\n\nDo not hand edit any file under `envoy/`. This shadow API may only be used in\nthe Envoy source tree.\n\nThe `bazel/` tree is a symlink back to the official API Bazel rules.\n"
  },
  {
    "path": "generated_api_shadow/bazel/BUILD",
    "content": "load(\"@io_bazel_rules_go//proto:compiler.bzl\", \"go_proto_compiler\")\n\nlicenses([\"notice\"])  # Apache 2\n\ngo_proto_compiler(\n    name = \"pgv_plugin_go\",\n    options = [\"lang=go\"],\n    plugin = \"@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate\",\n    suffix = \".pb.validate.go\",\n    valid_archive = False,\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/bazel/api_build_system.bzl",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_test\")\nload(\"@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl\", \"pgv_cc_proto_library\")\nload(\"@com_github_grpc_grpc//bazel:cc_grpc_library.bzl\", \"cc_grpc_library\")\nload(\"@com_google_protobuf//:protobuf.bzl\", _py_proto_library = \"py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\nload(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\nload(\n    \"//bazel:external_proto_deps.bzl\",\n    \"EXTERNAL_PROTO_CC_BAZEL_DEP_MAP\",\n    \"EXTERNAL_PROTO_GO_BAZEL_DEP_MAP\",\n    \"EXTERNAL_PROTO_PY_BAZEL_DEP_MAP\",\n)\n\n_PY_PROTO_SUFFIX = \"_py_proto\"\n_CC_PROTO_SUFFIX = \"_cc_proto\"\n_CC_GRPC_SUFFIX = \"_cc_grpc\"\n_GO_PROTO_SUFFIX = \"_go_proto\"\n_GO_IMPORTPATH_PREFIX = \"github.com/envoyproxy/go-control-plane/\"\n\n_COMMON_PROTO_DEPS = [\n    \"@com_google_protobuf//:any_proto\",\n    \"@com_google_protobuf//:descriptor_proto\",\n    \"@com_google_protobuf//:duration_proto\",\n    \"@com_google_protobuf//:empty_proto\",\n    \"@com_google_protobuf//:struct_proto\",\n    \"@com_google_protobuf//:timestamp_proto\",\n    \"@com_google_protobuf//:wrappers_proto\",\n    \"@com_google_googleapis//google/api:http_proto\",\n    \"@com_google_googleapis//google/api:httpbody_proto\",\n    \"@com_google_googleapis//google/api:annotations_proto\",\n    \"@com_google_googleapis//google/rpc:status_proto\",\n    \"@com_envoyproxy_protoc_gen_validate//validate:validate_proto\",\n]\n\ndef _proto_mapping(dep, proto_dep_map, proto_suffix):\n    mapped = proto_dep_map.get(dep)\n    if mapped == None:\n        prefix = \"@\" + Label(dep).workspace_name if not dep.startswith(\"//\") else \"\"\n        return prefix + \"//\" + Label(dep).package + \":\" + Label(dep).name + proto_suffix\n    return mapped\n\ndef _go_proto_mapping(dep):\n    return _proto_mapping(dep, EXTERNAL_PROTO_GO_BAZEL_DEP_MAP, _GO_PROTO_SUFFIX)\n\ndef _cc_proto_mapping(dep):\n    return _proto_mapping(dep, EXTERNAL_PROTO_CC_BAZEL_DEP_MAP, _CC_PROTO_SUFFIX)\n\ndef _py_proto_mapping(dep):\n    return _proto_mapping(dep, EXTERNAL_PROTO_PY_BAZEL_DEP_MAP, _PY_PROTO_SUFFIX)\n\n# TODO(htuch): Convert this to native py_proto_library once\n# https://github.com/bazelbuild/bazel/issues/3935 and/or\n# https://github.com/bazelbuild/bazel/issues/2626 are resolved.\ndef _api_py_proto_library(name, srcs = [], deps = []):\n    _py_proto_library(\n        name = name + _PY_PROTO_SUFFIX,\n        srcs = srcs,\n        default_runtime = \"@com_google_protobuf//:protobuf_python\",\n        protoc = \"@com_google_protobuf//:protoc\",\n        deps = [_py_proto_mapping(dep) for dep in deps] + [\n            \"@com_envoyproxy_protoc_gen_validate//validate:validate_py\",\n            \"@com_google_googleapis//google/rpc:status_py_proto\",\n            \"@com_google_googleapis//google/api:annotations_py_proto\",\n            \"@com_google_googleapis//google/api:http_py_proto\",\n            \"@com_google_googleapis//google/api:httpbody_py_proto\",\n        ],\n        visibility = [\"//visibility:public\"],\n    )\n\n# This defines googleapis py_proto_library. The repository does not provide its definition and requires\n# overriding it in the consuming project (see https://github.com/grpc/grpc/issues/19255 for more details).\ndef py_proto_library(name, deps = [], plugin = None):\n    srcs = [dep[:-6] + \".proto\" if dep.endswith(\"_proto\") else dep for dep in deps]\n    proto_deps = []\n\n    # py_proto_library in googleapis specifies *_proto rules in dependencies.\n    # By rewriting *_proto to *.proto above, the dependencies in *_proto rules are not preserved.\n    # As a workaround, manually specify the proto dependencies for the imported python rules.\n    if name == \"annotations_py_proto\":\n        proto_deps = proto_deps + [\":http_py_proto\"]\n\n    # checked.proto depends on syntax.proto, we have to add this dependency manually as well.\n    if name == \"checked_py_proto\":\n        proto_deps = proto_deps + [\":syntax_py_proto\"]\n\n    # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0:\n    # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72.\n    # plugin should also be passed in here when gRPC version is greater than v1.25.x.\n    _py_proto_library(\n        name = name,\n        srcs = srcs,\n        default_runtime = \"@com_google_protobuf//:protobuf_python\",\n        protoc = \"@com_google_protobuf//:protoc\",\n        deps = proto_deps + [\"@com_google_protobuf//:protobuf_python\"],\n        visibility = [\"//visibility:public\"],\n    )\n\ndef _api_cc_grpc_library(name, proto, deps = []):\n    cc_grpc_library(\n        name = name,\n        srcs = [proto],\n        deps = deps,\n        proto_only = False,\n        grpc_only = True,\n        visibility = [\"//visibility:public\"],\n    )\n\ndef api_cc_py_proto_library(\n        name,\n        visibility = [\"//visibility:private\"],\n        srcs = [],\n        deps = [],\n        linkstatic = 0,\n        has_services = 0):\n    relative_name = \":\" + name\n    proto_library(\n        name = name,\n        srcs = srcs,\n        deps = deps + _COMMON_PROTO_DEPS,\n        visibility = visibility,\n    )\n    cc_proto_library_name = name + _CC_PROTO_SUFFIX\n    pgv_cc_proto_library(\n        name = cc_proto_library_name,\n        linkstatic = linkstatic,\n        cc_deps = [_cc_proto_mapping(dep) for dep in deps] + [\n            \"@com_google_googleapis//google/api:http_cc_proto\",\n            \"@com_google_googleapis//google/api:httpbody_cc_proto\",\n            \"@com_google_googleapis//google/api:annotations_cc_proto\",\n            \"@com_google_googleapis//google/rpc:status_cc_proto\",\n        ],\n        deps = [relative_name],\n        visibility = [\"//visibility:public\"],\n    )\n    _api_py_proto_library(name, srcs, deps)\n\n    # Optionally define gRPC services\n    if has_services:\n        # TODO: when Python services are required, add to the below stub generations.\n        cc_grpc_name = name + _CC_GRPC_SUFFIX\n        cc_proto_deps = [cc_proto_library_name] + [_cc_proto_mapping(dep) for dep in deps]\n        _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps)\n\ndef api_cc_test(name, **kwargs):\n    cc_test(\n        name = name,\n        **kwargs\n    )\n\ndef api_go_test(name, **kwargs):\n    go_test(\n        name = name,\n        **kwargs\n    )\n\ndef api_proto_package(\n        name = \"pkg\",\n        srcs = [],\n        deps = [],\n        has_services = False,\n        visibility = [\"//visibility:public\"]):\n    if srcs == []:\n        srcs = native.glob([\"*.proto\"])\n\n    name = \"pkg\"\n    api_cc_py_proto_library(\n        name = name,\n        visibility = visibility,\n        srcs = srcs,\n        deps = deps,\n        has_services = has_services,\n    )\n\n    compilers = [\"@io_bazel_rules_go//proto:go_proto\", \"@envoy_api//bazel:pgv_plugin_go\"]\n    if has_services:\n        compilers = [\"@io_bazel_rules_go//proto:go_grpc\", \"@envoy_api//bazel:pgv_plugin_go\"]\n\n    # Because RBAC proro depends on googleapis syntax.proto and checked.proto,\n    # which share the same go proto library, it causes duplicative dependencies.\n    # Thus, we use depset().to_list() to remove duplicated depenencies.\n    go_proto_library(\n        name = name + _GO_PROTO_SUFFIX,\n        compilers = compilers,\n        importpath = _GO_IMPORTPATH_PREFIX + native.package_name(),\n        proto = name,\n        visibility = [\"//visibility:public\"],\n        deps = depset([_go_proto_mapping(dep) for dep in deps] + [\n            \"@com_github_golang_protobuf//ptypes:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/any:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/duration:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/struct:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/timestamp:go_default_library\",\n            \"@com_github_golang_protobuf//ptypes/wrappers:go_default_library\",\n            \"@com_envoyproxy_protoc_gen_validate//validate:go_default_library\",\n            \"@com_google_googleapis//google/api:annotations_go_proto\",\n            \"@com_google_googleapis//google/rpc:status_go_proto\",\n        ]).to_list(),\n    )\n"
  },
  {
    "path": "generated_api_shadow/bazel/envoy_http_archive.bzl",
    "content": "load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\n\ndef envoy_http_archive(name, locations, **kwargs):\n    # `existing_rule_keys` contains the names of repositories that have already\n    # been defined in the Bazel workspace. By skipping repos with existing keys,\n    # users can override dependency versions by using standard Bazel repository\n    # rules in their WORKSPACE files.\n    existing_rule_keys = native.existing_rules().keys()\n    if name in existing_rule_keys:\n        # This repository has already been defined, probably because the user\n        # wants to override the version. Do nothing.\n        return\n    loc_key = kwargs.pop(\"repository_key\", name)\n    location = locations[loc_key]\n\n    # HTTP tarball at a given URL. Add a BUILD file if requested.\n    http_archive(\n        name = name,\n        urls = location[\"urls\"],\n        sha256 = location[\"sha256\"],\n        strip_prefix = location.get(\"strip_prefix\", \"\"),\n        **kwargs\n    )\n"
  },
  {
    "path": "generated_api_shadow/bazel/external_proto_deps.bzl",
    "content": "# Any external dependency imported in the api/ .protos requires entries in\n# the maps below, to allow the Bazel proto and language specific bindings to be\n# inferred from the import directives.\n#\n# This file needs to be interpreted as both Python 3 and Starlark, so only the\n# common subset of Python should be used.\n\n# This maps from .proto import directive path to the Bazel dependency path for\n# external dependencies. Since BUILD files are generated, this is the canonical\n# place to define this mapping.\nEXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = {\n    \"google/api/expr/v1alpha1/checked.proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\",\n    \"google/api/expr/v1alpha1/syntax.proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    \"metrics.proto\": \"@prometheus_metrics_model//:client_model\",\n    \"opencensus/proto/trace/v1/trace.proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    \"opencensus/proto/trace/v1/trace_config.proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n}\n\n# This maps from the Bazel proto_library target to the Go language binding target for external dependencies.\nEXTERNAL_PROTO_GO_BAZEL_DEP_MAP = {\n    \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto\",\n    \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go\",\n}\n\n# This maps from the Bazel proto_library target to the C++ language binding target for external dependencies.\nEXTERNAL_PROTO_CC_BAZEL_DEP_MAP = {\n    \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto\",\n    \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc\",\n}\n\n# This maps from the Bazel proto_library target to the Python language binding target for external dependencies.\nEXTERNAL_PROTO_PY_BAZEL_DEP_MAP = {\n    \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto\",\n    \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\": \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py\",\n    \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\": \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py\",\n}\n"
  },
  {
    "path": "generated_api_shadow/bazel/repositories.bzl",
    "content": "load(\":envoy_http_archive.bzl\", \"envoy_http_archive\")\nload(\":repository_locations.bzl\", \"REPOSITORY_LOCATIONS\")\n\ndef api_dependencies():\n    envoy_http_archive(\n        \"bazel_skylib\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        \"com_envoyproxy_protoc_gen_validate\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"com_google_googleapis\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"com_github_cncf_udpa\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n\n    envoy_http_archive(\n        name = \"prometheus_metrics_model\",\n        locations = REPOSITORY_LOCATIONS,\n        build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT,\n    )\n    envoy_http_archive(\n        name = \"opencensus_proto\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"rules_proto\",\n        locations = REPOSITORY_LOCATIONS,\n    )\n    envoy_http_archive(\n        name = \"com_github_openzipkin_zipkinapi\",\n        locations = REPOSITORY_LOCATIONS,\n        build_file_content = ZIPKINAPI_BUILD_CONTENT,\n    )\n\nPROMETHEUSMETRICS_BUILD_CONTENT = \"\"\"\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\n\napi_cc_py_proto_library(\n    name = \"client_model\",\n    srcs = [\n        \"metrics.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\ngo_proto_library(\n    name = \"client_model_go_proto\",\n    importpath = \"github.com/prometheus/client_model/go\",\n    proto = \":client_model\",\n    visibility = [\"//visibility:public\"],\n)\n\"\"\"\n\nOPENCENSUSTRACE_BUILD_CONTENT = \"\"\"\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\n\napi_cc_py_proto_library(\n    name = \"trace_model\",\n    srcs = [\n        \"trace.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\ngo_proto_library(\n    name = \"trace_model_go_proto\",\n    importpath = \"trace_model\",\n    proto = \":trace_model\",\n    visibility = [\"//visibility:public\"],\n)\n\"\"\"\n\nZIPKINAPI_BUILD_CONTENT = \"\"\"\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_cc_py_proto_library\")\nload(\"@io_bazel_rules_go//proto:def.bzl\", \"go_proto_library\")\n\napi_cc_py_proto_library(\n    name = \"zipkin\",\n    srcs = [\n        \"zipkin-jsonv2.proto\",\n        \"zipkin.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\ngo_proto_library(\n    name = \"zipkin_go_proto\",\n    proto = \":zipkin\",\n    visibility = [\"//visibility:public\"],\n)\n\"\"\"\n"
  },
  {
    "path": "generated_api_shadow/bazel/repository_locations.bzl",
    "content": "DEPENDENCY_REPOSITORIES_SPEC = dict(\n    bazel_skylib = dict(\n        project_name = \"bazel-skylib\",\n        project_desc = \"Common useful functions and rules for Bazel\",\n        project_url = \"https://github.com/bazelbuild/bazel-skylib\",\n        version = \"1.0.3\",\n        sha256 = \"1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c\",\n        urls = [\"https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz\"],\n        last_updated = \"2020-08-27\",\n        use_category = [\"api\"],\n    ),\n    com_envoyproxy_protoc_gen_validate = dict(\n        project_name = \"protoc-gen-validate (PGV)\",\n        project_desc = \"protoc plugin to generate polyglot message validators\",\n        project_url = \"https://github.com/envoyproxy/protoc-gen-validate\",\n        version = \"278964a8052f96a2f514add0298098f63fb7f47f\",\n        sha256 = \"e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8\",\n        strip_prefix = \"protoc-gen-validate-{version}\",\n        urls = [\"https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz\"],\n        last_updated = \"2020-06-09\",\n        use_category = [\"api\"],\n    ),\n    com_github_cncf_udpa = dict(\n        project_name = \"Universal Data Plane API\",\n        project_desc = \"Universal Data Plane API Working Group (UDPA-WG)\",\n        project_url = \"https://github.com/cncf/udpa\",\n        version = \"0.0.1\",\n        sha256 = \"83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8\",\n        strip_prefix = \"udpa-{version}\",\n        urls = [\"https://github.com/cncf/udpa/archive/v{version}.tar.gz\"],\n        last_updated = \"2020-09-23\",\n        use_category = [\"api\"],\n    ),\n    com_github_openzipkin_zipkinapi = dict(\n        project_name = \"Zipkin API\",\n        project_desc = \"Zipkin's language independent model and HTTP Api Definitions\",\n        project_url = \"https://github.com/openzipkin/zipkin-api\",\n        version = \"0.2.2\",\n        sha256 = \"688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b\",\n        strip_prefix = \"zipkin-api-{version}\",\n        urls = [\"https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz\"],\n        last_updated = \"2020-09-23\",\n        use_category = [\"api\"],\n    ),\n    com_google_googleapis = dict(\n        # TODO(dio): Consider writing a Starlark macro for importing Google API proto.\n        project_name = \"Google APIs\",\n        project_desc = \"Public interface definitions of Google APIs\",\n        project_url = \"https://github.com/googleapis/googleapis\",\n        version = \"82944da21578a53b74e547774cf62ed31a05b841\",\n        sha256 = \"a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405\",\n        strip_prefix = \"googleapis-{version}\",\n        urls = [\"https://github.com/googleapis/googleapis/archive/{version}.tar.gz\"],\n        last_updated = \"2019-12-02\",\n        use_category = [\"api\"],\n    ),\n    opencensus_proto = dict(\n        project_name = \"OpenCensus Proto\",\n        project_desc = \"Language Independent Interface Types For OpenCensus\",\n        project_url = \"https://github.com/census-instrumentation/opencensus-proto\",\n        version = \"0.3.0\",\n        sha256 = \"b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0\",\n        strip_prefix = \"opencensus-proto-{version}/src\",\n        urls = [\"https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz\"],\n        last_updated = \"2020-06-20\",\n        use_category = [\"api\"],\n    ),\n    prometheus_metrics_model = dict(\n        project_name = \"Prometheus client model\",\n        project_desc = \"Data model artifacts for Prometheus\",\n        project_url = \"https://github.com/prometheus/client_model\",\n        version = \"60555c9708c786597e6b07bf846d0dc5c2a46f54\",\n        sha256 = \"6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e\",\n        strip_prefix = \"client_model-{version}\",\n        urls = [\"https://github.com/prometheus/client_model/archive/{version}.tar.gz\"],\n        last_updated = \"2020-06-23\",\n        use_category = [\"api\"],\n    ),\n    rules_proto = dict(\n        project_name = \"Protobuf Rules for Bazel\",\n        project_desc = \"Protocol buffer rules for Bazel\",\n        project_url = \"https://github.com/bazelbuild/rules_proto\",\n        version = \"40298556293ae502c66579620a7ce867d5f57311\",\n        sha256 = \"aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5\",\n        strip_prefix = \"rules_proto-{version}\",\n        urls = [\"https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz\"],\n        last_updated = \"2020-08-17\",\n        use_category = [\"api\"],\n    ),\n)\n\ndef _format_version(s, version):\n    return s.format(version = version, dash_version = version.replace(\".\", \"-\"), underscore_version = version.replace(\".\", \"_\"))\n\n# Interpolate {version} in the above dependency specs. This code should be capable of running in both Python\n# and Starlark.\ndef _dependency_repositories():\n    locations = {}\n    for key, location in DEPENDENCY_REPOSITORIES_SPEC.items():\n        mutable_location = dict(location)\n        locations[key] = mutable_location\n\n        # Fixup with version information.\n        if \"version\" in location:\n            if \"strip_prefix\" in location:\n                mutable_location[\"strip_prefix\"] = _format_version(location[\"strip_prefix\"], location[\"version\"])\n            mutable_location[\"urls\"] = [_format_version(url, location[\"version\"]) for url in location[\"urls\"]]\n    return locations\n\nREPOSITORY_LOCATIONS = _dependency_repositories()\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/bootstrap/v2:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/certs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"CertsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Certificates]\n\n// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to\n// display certificate information. See :ref:`/certs <operations_admin_interface_certs>` for more\n// information.\nmessage Certificates {\n  // List of certificates known to an Envoy.\n  repeated Certificate certificates = 1;\n}\n\nmessage Certificate {\n  // Details of CA certificate.\n  repeated CertificateDetails ca_cert = 1;\n\n  // Details of Certificate Chain\n  repeated CertificateDetails cert_chain = 2;\n}\n\n// [#next-free-field: 7]\nmessage CertificateDetails {\n  // Path of the certificate.\n  string path = 1;\n\n  // Certificate Serial Number.\n  string serial_number = 2;\n\n  // List of Subject Alternate names.\n  repeated SubjectAlternateName subject_alt_names = 3;\n\n  // Minimum of days until expiration of certificate and it's chain.\n  uint64 days_until_expiration = 4;\n\n  // Indicates the time from which the certificate is valid.\n  google.protobuf.Timestamp valid_from = 5;\n\n  // Indicates the time at which the certificate expires.\n  google.protobuf.Timestamp expiration_time = 6;\n}\n\nmessage SubjectAlternateName {\n  // Subject Alternate Name.\n  oneof name {\n    string dns = 1;\n\n    string uri = 2;\n\n    string ip_address = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/clusters.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/admin/v2alpha/metrics.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ClustersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Clusters]\n\n// Admin endpoint uses this wrapper for `/clusters` to display cluster status information.\n// See :ref:`/clusters <operations_admin_interface_clusters>` for more information.\nmessage Clusters {\n  // Mapping from cluster name to each cluster's status.\n  repeated ClusterStatus cluster_statuses = 1;\n}\n\n// Details an individual cluster's current status.\n// [#next-free-field: 6]\nmessage ClusterStatus {\n  // Name of the cluster.\n  string name = 1;\n\n  // Denotes whether this cluster was added via API or configured statically.\n  bool added_via_api = 2;\n\n  // The success rate threshold used in the last interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used to calculate the threshold.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used to calculate the threshold.\n  // The threshold is used to eject hosts based on their success rate. See\n  // :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.Percent success_rate_ejection_threshold = 3;\n\n  // Mapping from host address to the host's current status.\n  repeated HostStatus host_statuses = 4;\n\n  // The success rate threshold used in the last interval when only locally originated failures were\n  // taken into account and externally originated errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*. The threshold is used to eject hosts based on their success rate.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.Percent local_origin_success_rate_ejection_threshold = 5;\n}\n\n// Current state of a particular host.\n// [#next-free-field: 10]\nmessage HostStatus {\n  // Address of this host.\n  api.v2.core.Address address = 1;\n\n  // List of stats specific to this host.\n  repeated SimpleMetric stats = 2;\n\n  // The host's current health status.\n  HostHealthStatus health_status = 3;\n\n  // Request success rate for this host over the last calculated interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used in success rate\n  // calculation. If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used in success rate calculation.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.Percent success_rate = 4;\n\n  // The host's weight. If not configured, the value defaults to 1.\n  uint32 weight = 5;\n\n  // The hostname of the host, if applicable.\n  string hostname = 6;\n\n  // The host's priority. If not configured, the value defaults to 0 (highest priority).\n  uint32 priority = 7;\n\n  // Request success rate for this host over the last calculated\n  // interval when only locally originated errors are taken into account and externally originated\n  // errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.Percent local_origin_success_rate = 8;\n\n  // locality of the host.\n  api.v2.core.Locality locality = 9;\n}\n\n// Health status for a host.\n// [#next-free-field: 7]\nmessage HostHealthStatus {\n  // The host is currently failing active health checks.\n  bool failed_active_health_check = 1;\n\n  // The host is currently considered an outlier and has been ejected.\n  bool failed_outlier_check = 2;\n\n  // The host is currently being marked as degraded through active health checking.\n  bool failed_active_degraded_check = 4;\n\n  // The host has been removed from service discovery, but is being stabilized due to active\n  // health checking.\n  bool pending_dynamic_removal = 5;\n\n  // The host has not yet been health checked.\n  bool pending_active_hc = 6;\n\n  // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported\n  // here.\n  // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]\n  api.v2.core.HealthStatus eds_health_status = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/config_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/config/bootstrap/v2/bootstrap.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ConfigDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: ConfigDump]\n\n// The :ref:`/config_dump <operations_admin_interface_config_dump>` admin endpoint uses this wrapper\n// message to maintain and serve arbitrary configuration information from any component in Envoy.\nmessage ConfigDump {\n  // This list is serialized and dumped in its entirety at the\n  // :ref:`/config_dump <operations_admin_interface_config_dump>` endpoint.\n  //\n  // The following configurations are currently supported and will be dumped in the order given\n  // below:\n  //\n  // * *bootstrap*: :ref:`BootstrapConfigDump <envoy_api_msg_admin.v2alpha.BootstrapConfigDump>`\n  // * *clusters*: :ref:`ClustersConfigDump <envoy_api_msg_admin.v2alpha.ClustersConfigDump>`\n  // * *listeners*: :ref:`ListenersConfigDump <envoy_api_msg_admin.v2alpha.ListenersConfigDump>`\n  // * *routes*:  :ref:`RoutesConfigDump <envoy_api_msg_admin.v2alpha.RoutesConfigDump>`\n  //\n  // You can filter output with the resource and mask query parameters.\n  // See :ref:`/config_dump?resource={} <operations_admin_interface_config_dump_by_resource>`,\n  // :ref:`/config_dump?mask={} <operations_admin_interface_config_dump_by_mask>`,\n  // or :ref:`/config_dump?resource={},mask={}\n  // <operations_admin_interface_config_dump_by_resource_and_mask>` for more information.\n  repeated google.protobuf.Any configs = 1;\n}\n\nmessage UpdateFailureState {\n  // What the component configuration would have been if the update had succeeded.\n  google.protobuf.Any failed_configuration = 1;\n\n  // Time of the latest failed update attempt.\n  google.protobuf.Timestamp last_update_attempt = 2;\n\n  // Details about the last failed update attempt.\n  string details = 3;\n}\n\n// This message describes the bootstrap configuration that Envoy was started with. This includes\n// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate\n// the static portions of an Envoy configuration by reusing the output as the bootstrap\n// configuration for another Envoy.\nmessage BootstrapConfigDump {\n  config.bootstrap.v2.Bootstrap bootstrap = 1;\n\n  // The timestamp when the BootstrapConfig was last updated.\n  google.protobuf.Timestamp last_updated = 2;\n}\n\n// Envoy's listener manager fills this message with all currently known listeners. Listener\n// configuration information can be used to recreate an Envoy configuration by populating all\n// listeners as static listeners or by returning them in a LDS response.\nmessage ListenersConfigDump {\n  // Describes a statically loaded listener.\n  message StaticListener {\n    // The listener config.\n    google.protobuf.Any listener = 1;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicListenerState {\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time\n    // that the listener was loaded. In the future, discrete per-listener versions may be supported\n    // by the API.\n    string version_info = 1;\n\n    // The listener config.\n    google.protobuf.Any listener = 2;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // Describes a dynamically loaded listener via the LDS API.\n  // [#next-free-field: 6]\n  message DynamicListener {\n    // The name or unique id of this listener, pulled from the DynamicListenerState config.\n    string name = 1;\n\n    // The listener state for any active listener by this name.\n    // These are listeners that are available to service data plane traffic.\n    DynamicListenerState active_state = 2;\n\n    // The listener state for any warming listener by this name.\n    // These are listeners that are currently undergoing warming in preparation to service data\n    // plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the warming listeners should generally be discarded.\n    DynamicListenerState warming_state = 3;\n\n    // The listener state for any draining listener by this name.\n    // These are listeners that are currently undergoing draining in preparation to stop servicing\n    // data plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the draining listeners should generally be discarded.\n    DynamicListenerState draining_state = 4;\n\n    // Set if the last update failed, cleared after the next successful update.\n    UpdateFailureState error_state = 5;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` in the\n  // last processed LDS discovery response. If there are only static bootstrap listeners, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded listener configs.\n  repeated StaticListener static_listeners = 2;\n\n  // State for any warming, active, or draining listeners.\n  repeated DynamicListener dynamic_listeners = 3;\n}\n\n// Envoy's cluster manager fills this message with all currently known clusters. Cluster\n// configuration information can be used to recreate an Envoy configuration by populating all\n// clusters as static clusters or by returning them in a CDS response.\nmessage ClustersConfigDump {\n  // Describes a statically loaded cluster.\n  message StaticCluster {\n    // The cluster config.\n    google.protobuf.Any cluster = 1;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  // Describes a dynamically loaded cluster via the CDS API.\n  message DynamicCluster {\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time\n    // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by\n    // the API.\n    string version_info = 1;\n\n    // The cluster config.\n    google.protobuf.Any cluster = 2;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` in the\n  // last processed CDS discovery response. If there are only static bootstrap clusters, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded cluster configs.\n  repeated StaticCluster static_clusters = 2;\n\n  // The dynamically loaded active clusters. These are clusters that are available to service\n  // data plane traffic.\n  repeated DynamicCluster dynamic_active_clusters = 3;\n\n  // The dynamically loaded warming clusters. These are clusters that are currently undergoing\n  // warming in preparation to service data plane traffic. Note that if attempting to recreate an\n  // Envoy configuration from a configuration dump, the warming clusters should generally be\n  // discarded.\n  repeated DynamicCluster dynamic_warming_clusters = 4;\n}\n\n// Envoy's RDS implementation fills this message with all currently loaded routes, as described by\n// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration\n// or defined inline while configuring listeners are separated from those configured dynamically via RDS.\n// Route configuration information can be used to recreate an Envoy configuration by populating all routes\n// as static routes or by returning them in RDS responses.\nmessage RoutesConfigDump {\n  message StaticRouteConfig {\n    // The route config.\n    google.protobuf.Any route_config = 1;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicRouteConfig {\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time that\n    // the route configuration was loaded.\n    string version_info = 1;\n\n    // The route config.\n    google.protobuf.Any route_config = 2;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded route configs.\n  repeated StaticRouteConfig static_route_configs = 2;\n\n  // The dynamically loaded route configs.\n  repeated DynamicRouteConfig dynamic_route_configs = 3;\n}\n\n// Envoy's scoped RDS implementation fills this message with all currently loaded route\n// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both\n// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the\n// dynamically obtained scopes via the SRDS API.\nmessage ScopedRoutesConfigDump {\n  message InlineScopedRouteConfigs {\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 2;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  message DynamicScopedRouteConfigs {\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` field at the time that\n    // the scoped routes configuration was loaded.\n    string version_info = 2;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 3;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 4;\n  }\n\n  // The statically loaded scoped route configs.\n  repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1;\n\n  // The dynamically loaded scoped route configs.\n  repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2;\n}\n\n// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.\nmessage SecretsConfigDump {\n  // DynamicSecret contains secret information fetched via SDS.\n  message DynamicSecret {\n    // The name assigned to the secret.\n    string name = 1;\n\n    // This is the per-resource version information.\n    string version_info = 2;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 4;\n  }\n\n  // StaticSecret specifies statically loaded secret in bootstrap.\n  message StaticSecret {\n    // The name assigned to the secret.\n    string name = 1;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 3;\n  }\n\n  // The statically loaded secrets.\n  repeated StaticSecret static_secrets = 1;\n\n  // The dynamically loaded active secrets. These are secrets that are available to service\n  // clusters or listeners.\n  repeated DynamicSecret dynamic_active_secrets = 2;\n\n  // The dynamically loaded warming secrets. These are secrets that are currently undergoing\n  // warming in preparation to service clusters or listeners.\n  repeated DynamicSecret dynamic_warming_secrets = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/listeners.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ListenersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listeners]\n\n// Admin endpoint uses this wrapper for `/listeners` to display listener status information.\n// See :ref:`/listeners <operations_admin_interface_listeners>` for more information.\nmessage Listeners {\n  // List of listener statuses.\n  repeated ListenerStatus listener_statuses = 1;\n}\n\n// Details an individual listener's current status.\nmessage ListenerStatus {\n  // Name of the listener\n  string name = 1;\n\n  // The actual local address that the listener is listening on. If a listener was configured\n  // to listen on port 0, then this address has the port that was allocated by the OS.\n  api.v2.core.Address local_address = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/memory.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"MemoryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Memory]\n\n// Proto representation of the internal memory consumption of an Envoy instance. These represent\n// values extracted from an internal TCMalloc instance. For more information, see the section of the\n// docs entitled [\"Generic Tcmalloc Status\"](https://gperftools.github.io/gperftools/tcmalloc.html).\n// [#next-free-field: 7]\nmessage Memory {\n  // The number of bytes allocated by the heap for Envoy. This is an alias for\n  // `generic.current_allocated_bytes`.\n  uint64 allocated = 1;\n\n  // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for\n  // `generic.heap_size`.\n  uint64 heap_size = 2;\n\n  // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and depending on the OS, typically do not count towards physical memory\n  // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`.\n  uint64 pageheap_unmapped = 3;\n\n  // The number of bytes in free, mapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also\n  // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`.\n  uint64 pageheap_free = 4;\n\n  // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias\n  // for `tcmalloc.current_total_thread_cache_bytes`.\n  uint64 total_thread_cache = 5;\n\n  // The number of bytes of the physical memory usage by the allocator. This is an alias for\n  // `generic.total_physical_bytes`.\n  uint64 total_physical_bytes = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"MetricsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metrics]\n\n// Proto representation of an Envoy Counter or Gauge value.\nmessage SimpleMetric {\n  enum Type {\n    COUNTER = 0;\n    GAUGE = 1;\n  }\n\n  // Type of the metric represented.\n  Type type = 1;\n\n  // Current metric value.\n  uint64 value = 2;\n\n  // Name of the metric.\n  string name = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"MutexStatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: MutexStats]\n\n// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run\n// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex`\n// [docs](https://abseil.io/about/design/mutex#extra-features).\n//\n// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not\n// correspond to core clock frequency. For more information, see the `CycleClock`\n// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).\nmessage MutexStats {\n  // The number of individual mutex contentions which have occurred since startup.\n  uint64 num_contentions = 1;\n\n  // The length of the current contention wait cycle.\n  uint64 current_wait_cycles = 2;\n\n  // The lifetime total of all contention wait cycles.\n  uint64 lifetime_wait_cycles = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/server_info.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"ServerInfoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Server State]\n\n// Proto representation of the value returned by /server_info, containing\n// server version/server status information.\n// [#next-free-field: 7]\nmessage ServerInfo {\n  enum State {\n    // Server is live and serving traffic.\n    LIVE = 0;\n\n    // Server is draining listeners in response to external health checks failing.\n    DRAINING = 1;\n\n    // Server has not yet completed cluster manager initialization.\n    PRE_INITIALIZING = 2;\n\n    // Server is running the cluster manager initialization callbacks (e.g., RDS).\n    INITIALIZING = 3;\n  }\n\n  // Server version.\n  string version = 1;\n\n  // State of the server.\n  State state = 2;\n\n  // Uptime since current epoch was started.\n  google.protobuf.Duration uptime_current_epoch = 3;\n\n  // Uptime since the start of the first epoch.\n  google.protobuf.Duration uptime_all_epochs = 4;\n\n  // Hot restart version.\n  string hot_restart_version = 5;\n\n  // Command line options the server is currently running with.\n  CommandLineOptions command_line_options = 6;\n}\n\n// [#next-free-field: 29]\nmessage CommandLineOptions {\n  enum IpVersion {\n    v4 = 0;\n    v6 = 1;\n  }\n\n  enum Mode {\n    // Validate configs and then serve traffic normally.\n    Serve = 0;\n\n    // Validate configs and exit.\n    Validate = 1;\n\n    // Completely load and initialize the config, and then exit without running the listener loop.\n    InitOnly = 2;\n  }\n\n  reserved 12;\n\n  // See :option:`--base-id` for details.\n  uint64 base_id = 1;\n\n  // See :option:`--concurrency` for details.\n  uint32 concurrency = 2;\n\n  // See :option:`--config-path` for details.\n  string config_path = 3;\n\n  // See :option:`--config-yaml` for details.\n  string config_yaml = 4;\n\n  // See :option:`--allow-unknown-static-fields` for details.\n  bool allow_unknown_static_fields = 5;\n\n  // See :option:`--reject-unknown-dynamic-fields` for details.\n  bool reject_unknown_dynamic_fields = 26;\n\n  // See :option:`--admin-address-path` for details.\n  string admin_address_path = 6;\n\n  // See :option:`--local-address-ip-version` for details.\n  IpVersion local_address_ip_version = 7;\n\n  // See :option:`--log-level` for details.\n  string log_level = 8;\n\n  // See :option:`--component-log-level` for details.\n  string component_log_level = 9;\n\n  // See :option:`--log-format` for details.\n  string log_format = 10;\n\n  // See :option:`--log-format-escaped` for details.\n  bool log_format_escaped = 27;\n\n  // See :option:`--log-path` for details.\n  string log_path = 11;\n\n  // See :option:`--service-cluster` for details.\n  string service_cluster = 13;\n\n  // See :option:`--service-node` for details.\n  string service_node = 14;\n\n  // See :option:`--service-zone` for details.\n  string service_zone = 15;\n\n  // See :option:`--file-flush-interval-msec` for details.\n  google.protobuf.Duration file_flush_interval = 16;\n\n  // See :option:`--drain-time-s` for details.\n  google.protobuf.Duration drain_time = 17;\n\n  // See :option:`--parent-shutdown-time-s` for details.\n  google.protobuf.Duration parent_shutdown_time = 18;\n\n  // See :option:`--mode` for details.\n  Mode mode = 19;\n\n  // max_stats and max_obj_name_len are now unused and have no effect.\n  uint64 max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  uint64 max_obj_name_len = 21\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // See :option:`--disable-hot-restart` for details.\n  bool disable_hot_restart = 22;\n\n  // See :option:`--enable-mutex-tracing` for details.\n  bool enable_mutex_tracing = 23;\n\n  // See :option:`--restart-epoch` for details.\n  uint32 restart_epoch = 24;\n\n  // See :option:`--cpuset-threads` for details.\n  bool cpuset_threads = 25;\n\n  // See :option:`--disable-extensions` for details.\n  repeated string disabled_extensions = 28;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v2alpha;\n\nimport \"envoy/service/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap]\n\n// The /tap admin request body that is used to configure an active tap session.\nmessage TapRequest {\n  // The opaque configuration ID used to match the configuration to a loaded extension.\n  // A tap extension configures a similar opaque ID that is used to match.\n  string config_id = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The tap configuration to load.\n  service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/admin/v2alpha:pkg\",\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v3:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/certs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"CertsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Certificates]\n\n// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to\n// display certificate information. See :ref:`/certs <operations_admin_interface_certs>` for more\n// information.\nmessage Certificates {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Certificates\";\n\n  // List of certificates known to an Envoy.\n  repeated Certificate certificates = 1;\n}\n\nmessage Certificate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Certificate\";\n\n  // Details of CA certificate.\n  repeated CertificateDetails ca_cert = 1;\n\n  // Details of Certificate Chain\n  repeated CertificateDetails cert_chain = 2;\n}\n\n// [#next-free-field: 8]\nmessage CertificateDetails {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.CertificateDetails\";\n\n  message OcspDetails {\n    // Indicates the time from which the OCSP response is valid.\n    google.protobuf.Timestamp valid_from = 1;\n\n    // Indicates the time at which the OCSP response expires.\n    google.protobuf.Timestamp expiration = 2;\n  }\n\n  // Path of the certificate.\n  string path = 1;\n\n  // Certificate Serial Number.\n  string serial_number = 2;\n\n  // List of Subject Alternate names.\n  repeated SubjectAlternateName subject_alt_names = 3;\n\n  // Minimum of days until expiration of certificate and it's chain.\n  uint64 days_until_expiration = 4;\n\n  // Indicates the time from which the certificate is valid.\n  google.protobuf.Timestamp valid_from = 5;\n\n  // Indicates the time at which the certificate expires.\n  google.protobuf.Timestamp expiration_time = 6;\n\n  // Details related to the OCSP response associated with this certificate, if any.\n  OcspDetails ocsp_details = 7;\n}\n\nmessage SubjectAlternateName {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.SubjectAlternateName\";\n\n  // Subject Alternate Name.\n  oneof name {\n    string dns = 1;\n\n    string uri = 2;\n\n    string ip_address = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/clusters.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/admin/v3/metrics.proto\";\nimport \"envoy/config/cluster/v3/circuit_breaker.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ClustersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Clusters]\n\n// Admin endpoint uses this wrapper for `/clusters` to display cluster status information.\n// See :ref:`/clusters <operations_admin_interface_clusters>` for more information.\nmessage Clusters {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Clusters\";\n\n  // Mapping from cluster name to each cluster's status.\n  repeated ClusterStatus cluster_statuses = 1;\n}\n\n// Details an individual cluster's current status.\n// [#next-free-field: 7]\nmessage ClusterStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ClusterStatus\";\n\n  // Name of the cluster.\n  string name = 1;\n\n  // Denotes whether this cluster was added via API or configured statically.\n  bool added_via_api = 2;\n\n  // The success rate threshold used in the last interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used to calculate the threshold.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used to calculate the threshold.\n  // The threshold is used to eject hosts based on their success rate. See\n  // :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent success_rate_ejection_threshold = 3;\n\n  // Mapping from host address to the host's current status.\n  repeated HostStatus host_statuses = 4;\n\n  // The success rate threshold used in the last interval when only locally originated failures were\n  // taken into account and externally originated errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*. The threshold is used to eject hosts based on their success rate.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent local_origin_success_rate_ejection_threshold = 5;\n\n  // :ref:`Circuit breaking <arch_overview_circuit_break>` settings of the cluster.\n  config.cluster.v3.CircuitBreakers circuit_breakers = 6;\n}\n\n// Current state of a particular host.\n// [#next-free-field: 10]\nmessage HostStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.HostStatus\";\n\n  // Address of this host.\n  config.core.v3.Address address = 1;\n\n  // List of stats specific to this host.\n  repeated SimpleMetric stats = 2;\n\n  // The host's current health status.\n  HostHealthStatus health_status = 3;\n\n  // Request success rate for this host over the last calculated interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used in success rate\n  // calculation. If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used in success rate calculation.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent success_rate = 4;\n\n  // The host's weight. If not configured, the value defaults to 1.\n  uint32 weight = 5;\n\n  // The hostname of the host, if applicable.\n  string hostname = 6;\n\n  // The host's priority. If not configured, the value defaults to 0 (highest priority).\n  uint32 priority = 7;\n\n  // Request success rate for this host over the last calculated\n  // interval when only locally originated errors are taken into account and externally originated\n  // errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent local_origin_success_rate = 8;\n\n  // locality of the host.\n  config.core.v3.Locality locality = 9;\n}\n\n// Health status for a host.\n// [#next-free-field: 7]\nmessage HostHealthStatus {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.HostHealthStatus\";\n\n  // The host is currently failing active health checks.\n  bool failed_active_health_check = 1;\n\n  // The host is currently considered an outlier and has been ejected.\n  bool failed_outlier_check = 2;\n\n  // The host is currently being marked as degraded through active health checking.\n  bool failed_active_degraded_check = 4;\n\n  // The host has been removed from service discovery, but is being stabilized due to active\n  // health checking.\n  bool pending_dynamic_removal = 5;\n\n  // The host has not yet been health checked.\n  bool pending_active_hc = 6;\n\n  // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported\n  // here.\n  // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]\n  config.core.v3.HealthStatus eds_health_status = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/config_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/bootstrap/v3/bootstrap.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ConfigDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: ConfigDump]\n\n// The :ref:`/config_dump <operations_admin_interface_config_dump>` admin endpoint uses this wrapper\n// message to maintain and serve arbitrary configuration information from any component in Envoy.\nmessage ConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ConfigDump\";\n\n  // This list is serialized and dumped in its entirety at the\n  // :ref:`/config_dump <operations_admin_interface_config_dump>` endpoint.\n  //\n  // The following configurations are currently supported and will be dumped in the order given\n  // below:\n  //\n  // * *bootstrap*: :ref:`BootstrapConfigDump <envoy_api_msg_admin.v3.BootstrapConfigDump>`\n  // * *clusters*: :ref:`ClustersConfigDump <envoy_api_msg_admin.v3.ClustersConfigDump>`\n  // * *endpoints*:  :ref:`EndpointsConfigDump <envoy_api_msg_admin.v3.EndpointsConfigDump>`\n  // * *listeners*: :ref:`ListenersConfigDump <envoy_api_msg_admin.v3.ListenersConfigDump>`\n  // * *routes*:  :ref:`RoutesConfigDump <envoy_api_msg_admin.v3.RoutesConfigDump>`\n  //\n  // EDS Configuration will only be dumped by using parameter `?include_eds`\n  //\n  // You can filter output with the resource and mask query parameters.\n  // See :ref:`/config_dump?resource={} <operations_admin_interface_config_dump_by_resource>`,\n  // :ref:`/config_dump?mask={} <operations_admin_interface_config_dump_by_mask>`,\n  // or :ref:`/config_dump?resource={},mask={}\n  // <operations_admin_interface_config_dump_by_resource_and_mask>` for more information.\n  repeated google.protobuf.Any configs = 1;\n}\n\nmessage UpdateFailureState {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.UpdateFailureState\";\n\n  // What the component configuration would have been if the update had succeeded.\n  google.protobuf.Any failed_configuration = 1;\n\n  // Time of the latest failed update attempt.\n  google.protobuf.Timestamp last_update_attempt = 2;\n\n  // Details about the last failed update attempt.\n  string details = 3;\n}\n\n// This message describes the bootstrap configuration that Envoy was started with. This includes\n// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate\n// the static portions of an Envoy configuration by reusing the output as the bootstrap\n// configuration for another Envoy.\nmessage BootstrapConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.BootstrapConfigDump\";\n\n  config.bootstrap.v3.Bootstrap bootstrap = 1;\n\n  // The timestamp when the BootstrapConfig was last updated.\n  google.protobuf.Timestamp last_updated = 2;\n}\n\n// Envoy's listener manager fills this message with all currently known listeners. Listener\n// configuration information can be used to recreate an Envoy configuration by populating all\n// listeners as static listeners or by returning them in a LDS response.\nmessage ListenersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.ListenersConfigDump\";\n\n  // Describes a statically loaded listener.\n  message StaticListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ListenersConfigDump.StaticListener\";\n\n    // The listener config.\n    google.protobuf.Any listener = 1;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicListenerState {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ListenersConfigDump.DynamicListenerState\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time\n    // that the listener was loaded. In the future, discrete per-listener versions may be supported\n    // by the API.\n    string version_info = 1;\n\n    // The listener config.\n    google.protobuf.Any listener = 2;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // Describes a dynamically loaded listener via the LDS API.\n  // [#next-free-field: 6]\n  message DynamicListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ListenersConfigDump.DynamicListener\";\n\n    // The name or unique id of this listener, pulled from the DynamicListenerState config.\n    string name = 1;\n\n    // The listener state for any active listener by this name.\n    // These are listeners that are available to service data plane traffic.\n    DynamicListenerState active_state = 2;\n\n    // The listener state for any warming listener by this name.\n    // These are listeners that are currently undergoing warming in preparation to service data\n    // plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the warming listeners should generally be discarded.\n    DynamicListenerState warming_state = 3;\n\n    // The listener state for any draining listener by this name.\n    // These are listeners that are currently undergoing draining in preparation to stop servicing\n    // data plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the draining listeners should generally be discarded.\n    DynamicListenerState draining_state = 4;\n\n    // Set if the last update failed, cleared after the next successful update.\n    UpdateFailureState error_state = 5;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` in the\n  // last processed LDS discovery response. If there are only static bootstrap listeners, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded listener configs.\n  repeated StaticListener static_listeners = 2;\n\n  // State for any warming, active, or draining listeners.\n  repeated DynamicListener dynamic_listeners = 3;\n}\n\n// Envoy's cluster manager fills this message with all currently known clusters. Cluster\n// configuration information can be used to recreate an Envoy configuration by populating all\n// clusters as static clusters or by returning them in a CDS response.\nmessage ClustersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.ClustersConfigDump\";\n\n  // Describes a statically loaded cluster.\n  message StaticCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ClustersConfigDump.StaticCluster\";\n\n    // The cluster config.\n    google.protobuf.Any cluster = 1;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  // Describes a dynamically loaded cluster via the CDS API.\n  message DynamicCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ClustersConfigDump.DynamicCluster\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time\n    // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by\n    // the API.\n    string version_info = 1;\n\n    // The cluster config.\n    google.protobuf.Any cluster = 2;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` in the\n  // last processed CDS discovery response. If there are only static bootstrap clusters, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded cluster configs.\n  repeated StaticCluster static_clusters = 2;\n\n  // The dynamically loaded active clusters. These are clusters that are available to service\n  // data plane traffic.\n  repeated DynamicCluster dynamic_active_clusters = 3;\n\n  // The dynamically loaded warming clusters. These are clusters that are currently undergoing\n  // warming in preparation to service data plane traffic. Note that if attempting to recreate an\n  // Envoy configuration from a configuration dump, the warming clusters should generally be\n  // discarded.\n  repeated DynamicCluster dynamic_warming_clusters = 4;\n}\n\n// Envoy's RDS implementation fills this message with all currently loaded routes, as described by\n// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration\n// or defined inline while configuring listeners are separated from those configured dynamically via RDS.\n// Route configuration information can be used to recreate an Envoy configuration by populating all routes\n// as static routes or by returning them in RDS responses.\nmessage RoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.RoutesConfigDump\";\n\n  message StaticRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.RoutesConfigDump.StaticRouteConfig\";\n\n    // The route config.\n    google.protobuf.Any route_config = 1;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.RoutesConfigDump.DynamicRouteConfig\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time that\n    // the route configuration was loaded.\n    string version_info = 1;\n\n    // The route config.\n    google.protobuf.Any route_config = 2;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded route configs.\n  repeated StaticRouteConfig static_route_configs = 2;\n\n  // The dynamically loaded route configs.\n  repeated DynamicRouteConfig dynamic_route_configs = 3;\n}\n\n// Envoy's scoped RDS implementation fills this message with all currently loaded route\n// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both\n// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the\n// dynamically obtained scopes via the SRDS API.\nmessage ScopedRoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.ScopedRoutesConfigDump\";\n\n  message InlineScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ScopedRoutesConfigDump.InlineScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 2;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  message DynamicScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.ScopedRoutesConfigDump.DynamicScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time that\n    // the scoped routes configuration was loaded.\n    string version_info = 2;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 3;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 4;\n  }\n\n  // The statically loaded scoped route configs.\n  repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1;\n\n  // The dynamically loaded scoped route configs.\n  repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2;\n}\n\n// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.\nmessage SecretsConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.SecretsConfigDump\";\n\n  // DynamicSecret contains secret information fetched via SDS.\n  message DynamicSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.SecretsConfigDump.DynamicSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // This is the per-resource version information.\n    string version_info = 2;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 4;\n  }\n\n  // StaticSecret specifies statically loaded secret in bootstrap.\n  message StaticSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v2alpha.SecretsConfigDump.StaticSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 3;\n  }\n\n  // The statically loaded secrets.\n  repeated StaticSecret static_secrets = 1;\n\n  // The dynamically loaded active secrets. These are secrets that are available to service\n  // clusters or listeners.\n  repeated DynamicSecret dynamic_active_secrets = 2;\n\n  // The dynamically loaded warming secrets. These are secrets that are currently undergoing\n  // warming in preparation to service clusters or listeners.\n  repeated DynamicSecret dynamic_warming_secrets = 3;\n}\n\n// Envoy's admin fill this message with all currently known endpoints. Endpoint\n// configuration information can be used to recreate an Envoy configuration by populating all\n// endpoints as static endpoints or by returning them in an EDS response.\nmessage EndpointsConfigDump {\n  message StaticEndpointConfig {\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 1;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicEndpointConfig {\n    // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v3.DiscoveryResponse.version_info>` field at the time that\n    // the endpoint configuration was loaded.\n    string version_info = 1;\n\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 2;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded endpoint configs.\n  repeated StaticEndpointConfig static_endpoint_configs = 2;\n\n  // The dynamically loaded endpoint configs.\n  repeated DynamicEndpointConfig dynamic_endpoint_configs = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/init_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"InitDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: InitDump]\n\n// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers,\n// which provides the information of their unready targets.\n// The :ref:`/init_dump <operations_admin_interface_init_dump>` will dump all unready targets information.\nmessage UnreadyTargetsDumps {\n  // Message of unready targets information of an init manager.\n  message UnreadyTargetsDump {\n    // Name of the init manager. Example: \"init_manager_xxx\".\n    string name = 1;\n\n    // Names of unready targets of the init manager. Example: \"target_xxx\".\n    repeated string target_names = 2;\n  }\n\n  // You can choose specific component to dump unready targets with mask query parameter.\n  // See :ref:`/init_dump?mask={} <operations_admin_interface_init_dump_by_mask>` for more information.\n  // The dumps of unready targets of all init managers.\n  repeated UnreadyTargetsDump unready_targets_dumps = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/listeners.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ListenersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listeners]\n\n// Admin endpoint uses this wrapper for `/listeners` to display listener status information.\n// See :ref:`/listeners <operations_admin_interface_listeners>` for more information.\nmessage Listeners {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Listeners\";\n\n  // List of listener statuses.\n  repeated ListenerStatus listener_statuses = 1;\n}\n\n// Details an individual listener's current status.\nmessage ListenerStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ListenerStatus\";\n\n  // Name of the listener\n  string name = 1;\n\n  // The actual local address that the listener is listening on. If a listener was configured\n  // to listen on port 0, then this address has the port that was allocated by the OS.\n  config.core.v3.Address local_address = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/memory.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"MemoryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Memory]\n\n// Proto representation of the internal memory consumption of an Envoy instance. These represent\n// values extracted from an internal TCMalloc instance. For more information, see the section of the\n// docs entitled [\"Generic Tcmalloc Status\"](https://gperftools.github.io/gperftools/tcmalloc.html).\n// [#next-free-field: 7]\nmessage Memory {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.Memory\";\n\n  // The number of bytes allocated by the heap for Envoy. This is an alias for\n  // `generic.current_allocated_bytes`.\n  uint64 allocated = 1;\n\n  // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for\n  // `generic.heap_size`.\n  uint64 heap_size = 2;\n\n  // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and depending on the OS, typically do not count towards physical memory\n  // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`.\n  uint64 pageheap_unmapped = 3;\n\n  // The number of bytes in free, mapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also\n  // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`.\n  uint64 pageheap_free = 4;\n\n  // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias\n  // for `tcmalloc.current_total_thread_cache_bytes`.\n  uint64 total_thread_cache = 5;\n\n  // The number of bytes of the physical memory usage by the allocator. This is an alias for\n  // `generic.total_physical_bytes`.\n  uint64 total_physical_bytes = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"MetricsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metrics]\n\n// Proto representation of an Envoy Counter or Gauge value.\nmessage SimpleMetric {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.SimpleMetric\";\n\n  enum Type {\n    COUNTER = 0;\n    GAUGE = 1;\n  }\n\n  // Type of the metric represented.\n  Type type = 1;\n\n  // Current metric value.\n  uint64 value = 2;\n\n  // Name of the metric.\n  string name = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/mutex_stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"MutexStatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: MutexStats]\n\n// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run\n// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex`\n// [docs](https://abseil.io/about/design/mutex#extra-features).\n//\n// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not\n// correspond to core clock frequency. For more information, see the `CycleClock`\n// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).\nmessage MutexStats {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.MutexStats\";\n\n  // The number of individual mutex contentions which have occurred since startup.\n  uint64 num_contentions = 1;\n\n  // The length of the current contention wait cycle.\n  uint64 current_wait_cycles = 2;\n\n  // The lifetime total of all contention wait cycles.\n  uint64 lifetime_wait_cycles = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/server_info.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"ServerInfoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Server State]\n\n// Proto representation of the value returned by /server_info, containing\n// server version/server status information.\n// [#next-free-field: 8]\nmessage ServerInfo {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.ServerInfo\";\n\n  enum State {\n    // Server is live and serving traffic.\n    LIVE = 0;\n\n    // Server is draining listeners in response to external health checks failing.\n    DRAINING = 1;\n\n    // Server has not yet completed cluster manager initialization.\n    PRE_INITIALIZING = 2;\n\n    // Server is running the cluster manager initialization callbacks (e.g., RDS).\n    INITIALIZING = 3;\n  }\n\n  // Server version.\n  string version = 1;\n\n  // State of the server.\n  State state = 2;\n\n  // Uptime since current epoch was started.\n  google.protobuf.Duration uptime_current_epoch = 3;\n\n  // Uptime since the start of the first epoch.\n  google.protobuf.Duration uptime_all_epochs = 4;\n\n  // Hot restart version.\n  string hot_restart_version = 5;\n\n  // Command line options the server is currently running with.\n  CommandLineOptions command_line_options = 6;\n\n  // Populated node identity of this server.\n  config.core.v3.Node node = 7;\n}\n\n// [#next-free-field: 37]\nmessage CommandLineOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v2alpha.CommandLineOptions\";\n\n  enum IpVersion {\n    v4 = 0;\n    v6 = 1;\n  }\n\n  enum Mode {\n    // Validate configs and then serve traffic normally.\n    Serve = 0;\n\n    // Validate configs and exit.\n    Validate = 1;\n\n    // Completely load and initialize the config, and then exit without running the listener loop.\n    InitOnly = 2;\n  }\n\n  enum DrainStrategy {\n    // Gradually discourage connections over the course of the drain period.\n    Gradual = 0;\n\n    // Discourage all connections for the duration of the drain sequence.\n    Immediate = 1;\n  }\n\n  reserved 12;\n\n  // See :option:`--base-id` for details.\n  uint64 base_id = 1;\n\n  // See :option:`--use-dynamic-base-id` for details.\n  bool use_dynamic_base_id = 31;\n\n  // See :option:`--base-id-path` for details.\n  string base_id_path = 32;\n\n  // See :option:`--concurrency` for details.\n  uint32 concurrency = 2;\n\n  // See :option:`--config-path` for details.\n  string config_path = 3;\n\n  // See :option:`--config-yaml` for details.\n  string config_yaml = 4;\n\n  // See :option:`--allow-unknown-static-fields` for details.\n  bool allow_unknown_static_fields = 5;\n\n  // See :option:`--reject-unknown-dynamic-fields` for details.\n  bool reject_unknown_dynamic_fields = 26;\n\n  // See :option:`--ignore-unknown-dynamic-fields` for details.\n  bool ignore_unknown_dynamic_fields = 30;\n\n  // See :option:`--admin-address-path` for details.\n  string admin_address_path = 6;\n\n  // See :option:`--local-address-ip-version` for details.\n  IpVersion local_address_ip_version = 7;\n\n  // See :option:`--log-level` for details.\n  string log_level = 8;\n\n  // See :option:`--component-log-level` for details.\n  string component_log_level = 9;\n\n  // See :option:`--log-format` for details.\n  string log_format = 10;\n\n  // See :option:`--log-format-escaped` for details.\n  bool log_format_escaped = 27;\n\n  // See :option:`--log-path` for details.\n  string log_path = 11;\n\n  // See :option:`--service-cluster` for details.\n  string service_cluster = 13;\n\n  // See :option:`--service-node` for details.\n  string service_node = 14;\n\n  // See :option:`--service-zone` for details.\n  string service_zone = 15;\n\n  // See :option:`--file-flush-interval-msec` for details.\n  google.protobuf.Duration file_flush_interval = 16;\n\n  // See :option:`--drain-time-s` for details.\n  google.protobuf.Duration drain_time = 17;\n\n  // See :option:`--drain-strategy` for details.\n  DrainStrategy drain_strategy = 33;\n\n  // See :option:`--parent-shutdown-time-s` for details.\n  google.protobuf.Duration parent_shutdown_time = 18;\n\n  // See :option:`--mode` for details.\n  Mode mode = 19;\n\n  // See :option:`--disable-hot-restart` for details.\n  bool disable_hot_restart = 22;\n\n  // See :option:`--enable-mutex-tracing` for details.\n  bool enable_mutex_tracing = 23;\n\n  // See :option:`--restart-epoch` for details.\n  uint32 restart_epoch = 24;\n\n  // See :option:`--cpuset-threads` for details.\n  bool cpuset_threads = 25;\n\n  // See :option:`--disable-extensions` for details.\n  repeated string disabled_extensions = 28;\n\n  // See :option:`--bootstrap-version` for details.\n  uint32 bootstrap_version = 29;\n\n  // See :option:`--enable-fine-grain-logging` for details.\n  bool enable_fine_grain_logging = 34;\n\n  // See :option:`--socket-path` for details.\n  string socket_path = 35;\n\n  // See :option:`--socket-mode` for details.\n  uint32 socket_mode = 36;\n\n  uint64 hidden_envoy_deprecated_max_stats = 20\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  uint64 hidden_envoy_deprecated_max_obj_name_len = 21\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v3;\n\nimport \"envoy/config/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap]\n\n// The /tap admin request body that is used to configure an active tap session.\nmessage TapRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v2alpha.TapRequest\";\n\n  // The opaque configuration ID used to match the configuration to a loaded extension.\n  // A tap extension configures a similar opaque ID that is used to match.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The tap configuration to load.\n  config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/admin/v3:pkg\",\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v4alpha:pkg\",\n        \"//envoy/config/cluster/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/tap/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/certs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"CertsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Certificates]\n\n// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to\n// display certificate information. See :ref:`/certs <operations_admin_interface_certs>` for more\n// information.\nmessage Certificates {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Certificates\";\n\n  // List of certificates known to an Envoy.\n  repeated Certificate certificates = 1;\n}\n\nmessage Certificate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Certificate\";\n\n  // Details of CA certificate.\n  repeated CertificateDetails ca_cert = 1;\n\n  // Details of Certificate Chain\n  repeated CertificateDetails cert_chain = 2;\n}\n\n// [#next-free-field: 8]\nmessage CertificateDetails {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.CertificateDetails\";\n\n  message OcspDetails {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.CertificateDetails.OcspDetails\";\n\n    // Indicates the time from which the OCSP response is valid.\n    google.protobuf.Timestamp valid_from = 1;\n\n    // Indicates the time at which the OCSP response expires.\n    google.protobuf.Timestamp expiration = 2;\n  }\n\n  // Path of the certificate.\n  string path = 1;\n\n  // Certificate Serial Number.\n  string serial_number = 2;\n\n  // List of Subject Alternate names.\n  repeated SubjectAlternateName subject_alt_names = 3;\n\n  // Minimum of days until expiration of certificate and it's chain.\n  uint64 days_until_expiration = 4;\n\n  // Indicates the time from which the certificate is valid.\n  google.protobuf.Timestamp valid_from = 5;\n\n  // Indicates the time at which the certificate expires.\n  google.protobuf.Timestamp expiration_time = 6;\n\n  // Details related to the OCSP response associated with this certificate, if any.\n  OcspDetails ocsp_details = 7;\n}\n\nmessage SubjectAlternateName {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v3.SubjectAlternateName\";\n\n  // Subject Alternate Name.\n  oneof name {\n    string dns = 1;\n\n    string uri = 2;\n\n    string ip_address = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/clusters.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/admin/v4alpha/metrics.proto\";\nimport \"envoy/config/cluster/v4alpha/circuit_breaker.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/health_check.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ClustersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Clusters]\n\n// Admin endpoint uses this wrapper for `/clusters` to display cluster status information.\n// See :ref:`/clusters <operations_admin_interface_clusters>` for more information.\nmessage Clusters {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Clusters\";\n\n  // Mapping from cluster name to each cluster's status.\n  repeated ClusterStatus cluster_statuses = 1;\n}\n\n// Details an individual cluster's current status.\n// [#next-free-field: 7]\nmessage ClusterStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ClusterStatus\";\n\n  // Name of the cluster.\n  string name = 1;\n\n  // Denotes whether this cluster was added via API or configured statically.\n  bool added_via_api = 2;\n\n  // The success rate threshold used in the last interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used to calculate the threshold.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used to calculate the threshold.\n  // The threshold is used to eject hosts based on their success rate. See\n  // :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent success_rate_ejection_threshold = 3;\n\n  // Mapping from host address to the host's current status.\n  repeated HostStatus host_statuses = 4;\n\n  // The success rate threshold used in the last interval when only locally originated failures were\n  // taken into account and externally originated errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*. The threshold is used to eject hosts based on their success rate.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: this field may be omitted in any of the three following cases:\n  //\n  // 1. There were not enough hosts with enough request volume to proceed with success rate based\n  //    outlier ejection.\n  // 2. The threshold is computed to be < 0 because a negative value implies that there was no\n  //    threshold for that interval.\n  // 3. Outlier detection is not enabled for this cluster.\n  type.v3.Percent local_origin_success_rate_ejection_threshold = 5;\n\n  // :ref:`Circuit breaking <arch_overview_circuit_break>` settings of the cluster.\n  config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6;\n}\n\n// Current state of a particular host.\n// [#next-free-field: 10]\nmessage HostStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.HostStatus\";\n\n  // Address of this host.\n  config.core.v4alpha.Address address = 1;\n\n  // List of stats specific to this host.\n  repeated SimpleMetric stats = 2;\n\n  // The host's current health status.\n  HostHealthStatus health_status = 3;\n\n  // Request success rate for this host over the last calculated interval.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors: externally and locally generated were used in success rate\n  // calculation. If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*, only externally generated errors were used in success rate calculation.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent success_rate = 4;\n\n  // The host's weight. If not configured, the value defaults to 1.\n  uint32 weight = 5;\n\n  // The hostname of the host, if applicable.\n  string hostname = 6;\n\n  // The host's priority. If not configured, the value defaults to 0 (highest priority).\n  uint32 priority = 7;\n\n  // Request success rate for this host over the last calculated\n  // interval when only locally originated errors are taken into account and externally originated\n  // errors were treated as success.\n  // This field should be interpreted only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  //\n  // Note: the message will not be present if host did not have enough request volume to calculate\n  // success rate or the cluster did not have enough hosts to run through success rate outlier\n  // ejection.\n  type.v3.Percent local_origin_success_rate = 8;\n\n  // locality of the host.\n  config.core.v4alpha.Locality locality = 9;\n}\n\n// Health status for a host.\n// [#next-free-field: 7]\nmessage HostHealthStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.HostHealthStatus\";\n\n  // The host is currently failing active health checks.\n  bool failed_active_health_check = 1;\n\n  // The host is currently considered an outlier and has been ejected.\n  bool failed_outlier_check = 2;\n\n  // The host is currently being marked as degraded through active health checking.\n  bool failed_active_degraded_check = 4;\n\n  // The host has been removed from service discovery, but is being stabilized due to active\n  // health checking.\n  bool pending_dynamic_removal = 5;\n\n  // The host has not yet been health checked.\n  bool pending_active_hc = 6;\n\n  // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported\n  // here.\n  // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]\n  config.core.v4alpha.HealthStatus eds_health_status = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/config_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/bootstrap/v4alpha/bootstrap.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ConfigDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: ConfigDump]\n\n// The :ref:`/config_dump <operations_admin_interface_config_dump>` admin endpoint uses this wrapper\n// message to maintain and serve arbitrary configuration information from any component in Envoy.\nmessage ConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ConfigDump\";\n\n  // This list is serialized and dumped in its entirety at the\n  // :ref:`/config_dump <operations_admin_interface_config_dump>` endpoint.\n  //\n  // The following configurations are currently supported and will be dumped in the order given\n  // below:\n  //\n  // * *bootstrap*: :ref:`BootstrapConfigDump <envoy_api_msg_admin.v4alpha.BootstrapConfigDump>`\n  // * *clusters*: :ref:`ClustersConfigDump <envoy_api_msg_admin.v4alpha.ClustersConfigDump>`\n  // * *endpoints*:  :ref:`EndpointsConfigDump <envoy_api_msg_admin.v4alpha.EndpointsConfigDump>`\n  // * *listeners*: :ref:`ListenersConfigDump <envoy_api_msg_admin.v4alpha.ListenersConfigDump>`\n  // * *routes*:  :ref:`RoutesConfigDump <envoy_api_msg_admin.v4alpha.RoutesConfigDump>`\n  //\n  // EDS Configuration will only be dumped by using parameter `?include_eds`\n  //\n  // You can filter output with the resource and mask query parameters.\n  // See :ref:`/config_dump?resource={} <operations_admin_interface_config_dump_by_resource>`,\n  // :ref:`/config_dump?mask={} <operations_admin_interface_config_dump_by_mask>`,\n  // or :ref:`/config_dump?resource={},mask={}\n  // <operations_admin_interface_config_dump_by_resource_and_mask>` for more information.\n  repeated google.protobuf.Any configs = 1;\n}\n\nmessage UpdateFailureState {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.UpdateFailureState\";\n\n  // What the component configuration would have been if the update had succeeded.\n  google.protobuf.Any failed_configuration = 1;\n\n  // Time of the latest failed update attempt.\n  google.protobuf.Timestamp last_update_attempt = 2;\n\n  // Details about the last failed update attempt.\n  string details = 3;\n}\n\n// This message describes the bootstrap configuration that Envoy was started with. This includes\n// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate\n// the static portions of an Envoy configuration by reusing the output as the bootstrap\n// configuration for another Envoy.\nmessage BootstrapConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.BootstrapConfigDump\";\n\n  config.bootstrap.v4alpha.Bootstrap bootstrap = 1;\n\n  // The timestamp when the BootstrapConfig was last updated.\n  google.protobuf.Timestamp last_updated = 2;\n}\n\n// Envoy's listener manager fills this message with all currently known listeners. Listener\n// configuration information can be used to recreate an Envoy configuration by populating all\n// listeners as static listeners or by returning them in a LDS response.\nmessage ListenersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ListenersConfigDump\";\n\n  // Describes a statically loaded listener.\n  message StaticListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ListenersConfigDump.StaticListener\";\n\n    // The listener config.\n    google.protobuf.Any listener = 1;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicListenerState {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ListenersConfigDump.DynamicListenerState\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time\n    // that the listener was loaded. In the future, discrete per-listener versions may be supported\n    // by the API.\n    string version_info = 1;\n\n    // The listener config.\n    google.protobuf.Any listener = 2;\n\n    // The timestamp when the Listener was last successfully updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // Describes a dynamically loaded listener via the LDS API.\n  // [#next-free-field: 6]\n  message DynamicListener {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ListenersConfigDump.DynamicListener\";\n\n    // The name or unique id of this listener, pulled from the DynamicListenerState config.\n    string name = 1;\n\n    // The listener state for any active listener by this name.\n    // These are listeners that are available to service data plane traffic.\n    DynamicListenerState active_state = 2;\n\n    // The listener state for any warming listener by this name.\n    // These are listeners that are currently undergoing warming in preparation to service data\n    // plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the warming listeners should generally be discarded.\n    DynamicListenerState warming_state = 3;\n\n    // The listener state for any draining listener by this name.\n    // These are listeners that are currently undergoing draining in preparation to stop servicing\n    // data plane traffic. Note that if attempting to recreate an Envoy configuration from a\n    // configuration dump, the draining listeners should generally be discarded.\n    DynamicListenerState draining_state = 4;\n\n    // Set if the last update failed, cleared after the next successful update.\n    UpdateFailureState error_state = 5;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` in the\n  // last processed LDS discovery response. If there are only static bootstrap listeners, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded listener configs.\n  repeated StaticListener static_listeners = 2;\n\n  // State for any warming, active, or draining listeners.\n  repeated DynamicListener dynamic_listeners = 3;\n}\n\n// Envoy's cluster manager fills this message with all currently known clusters. Cluster\n// configuration information can be used to recreate an Envoy configuration by populating all\n// clusters as static clusters or by returning them in a CDS response.\nmessage ClustersConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ClustersConfigDump\";\n\n  // Describes a statically loaded cluster.\n  message StaticCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ClustersConfigDump.StaticCluster\";\n\n    // The cluster config.\n    google.protobuf.Any cluster = 1;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  // Describes a dynamically loaded cluster via the CDS API.\n  message DynamicCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ClustersConfigDump.DynamicCluster\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time\n    // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by\n    // the API.\n    string version_info = 1;\n\n    // The cluster config.\n    google.protobuf.Any cluster = 2;\n\n    // The timestamp when the Cluster was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // This is the :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` in the\n  // last processed CDS discovery response. If there are only static bootstrap clusters, this field\n  // will be \"\".\n  string version_info = 1;\n\n  // The statically loaded cluster configs.\n  repeated StaticCluster static_clusters = 2;\n\n  // The dynamically loaded active clusters. These are clusters that are available to service\n  // data plane traffic.\n  repeated DynamicCluster dynamic_active_clusters = 3;\n\n  // The dynamically loaded warming clusters. These are clusters that are currently undergoing\n  // warming in preparation to service data plane traffic. Note that if attempting to recreate an\n  // Envoy configuration from a configuration dump, the warming clusters should generally be\n  // discarded.\n  repeated DynamicCluster dynamic_warming_clusters = 4;\n}\n\n// Envoy's RDS implementation fills this message with all currently loaded routes, as described by\n// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration\n// or defined inline while configuring listeners are separated from those configured dynamically via RDS.\n// Route configuration information can be used to recreate an Envoy configuration by populating all routes\n// as static routes or by returning them in RDS responses.\nmessage RoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.RoutesConfigDump\";\n\n  message StaticRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.RoutesConfigDump.StaticRouteConfig\";\n\n    // The route config.\n    google.protobuf.Any route_config = 1;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicRouteConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig\";\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time that\n    // the route configuration was loaded.\n    string version_info = 1;\n\n    // The route config.\n    google.protobuf.Any route_config = 2;\n\n    // The timestamp when the Route was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded route configs.\n  repeated StaticRouteConfig static_route_configs = 2;\n\n  // The dynamically loaded route configs.\n  repeated DynamicRouteConfig dynamic_route_configs = 3;\n}\n\n// Envoy's scoped RDS implementation fills this message with all currently loaded route\n// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both\n// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the\n// dynamically obtained scopes via the SRDS API.\nmessage ScopedRoutesConfigDump {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.admin.v3.ScopedRoutesConfigDump\";\n\n  message InlineScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 2;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  message DynamicScopedRouteConfigs {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs\";\n\n    // The name assigned to the scoped route configurations.\n    string name = 1;\n\n    // This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time that\n    // the scoped routes configuration was loaded.\n    string version_info = 2;\n\n    // The scoped route configurations.\n    repeated google.protobuf.Any scoped_route_configs = 3;\n\n    // The timestamp when the scoped route config set was last updated.\n    google.protobuf.Timestamp last_updated = 4;\n  }\n\n  // The statically loaded scoped route configs.\n  repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1;\n\n  // The dynamically loaded scoped route configs.\n  repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2;\n}\n\n// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.\nmessage SecretsConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.SecretsConfigDump\";\n\n  // DynamicSecret contains secret information fetched via SDS.\n  message DynamicSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.SecretsConfigDump.DynamicSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // This is the per-resource version information.\n    string version_info = 2;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 4;\n  }\n\n  // StaticSecret specifies statically loaded secret in bootstrap.\n  message StaticSecret {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.SecretsConfigDump.StaticSecret\";\n\n    // The name assigned to the secret.\n    string name = 1;\n\n    // The timestamp when the secret was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n\n    // The actual secret information.\n    // Security sensitive information is redacted (replaced with \"[redacted]\") for\n    // private keys and passwords in TLS certificates.\n    google.protobuf.Any secret = 3;\n  }\n\n  // The statically loaded secrets.\n  repeated StaticSecret static_secrets = 1;\n\n  // The dynamically loaded active secrets. These are secrets that are available to service\n  // clusters or listeners.\n  repeated DynamicSecret dynamic_active_secrets = 2;\n\n  // The dynamically loaded warming secrets. These are secrets that are currently undergoing\n  // warming in preparation to service clusters or listeners.\n  repeated DynamicSecret dynamic_warming_secrets = 3;\n}\n\n// Envoy's admin fill this message with all currently known endpoints. Endpoint\n// configuration information can be used to recreate an Envoy configuration by populating all\n// endpoints as static endpoints or by returning them in an EDS response.\nmessage EndpointsConfigDump {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.EndpointsConfigDump\";\n\n  message StaticEndpointConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig\";\n\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 1;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicEndpointConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig\";\n\n    // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the\n    // :ref:`version_info <envoy_api_field_service.discovery.v4alpha.DiscoveryResponse.version_info>` field at the time that\n    // the endpoint configuration was loaded.\n    string version_info = 1;\n\n    // The endpoint config.\n    google.protobuf.Any endpoint_config = 2;\n\n    // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  // The statically loaded endpoint configs.\n  repeated StaticEndpointConfig static_endpoint_configs = 2;\n\n  // The dynamically loaded endpoint configs.\n  repeated DynamicEndpointConfig dynamic_endpoint_configs = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/init_dump.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"InitDumpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: InitDump]\n\n// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers,\n// which provides the information of their unready targets.\n// The :ref:`/init_dump <operations_admin_interface_init_dump>` will dump all unready targets information.\nmessage UnreadyTargetsDumps {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.UnreadyTargetsDumps\";\n\n  // Message of unready targets information of an init manager.\n  message UnreadyTargetsDump {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump\";\n\n    // Name of the init manager. Example: \"init_manager_xxx\".\n    string name = 1;\n\n    // Names of unready targets of the init manager. Example: \"target_xxx\".\n    repeated string target_names = 2;\n  }\n\n  // You can choose specific component to dump unready targets with mask query parameter.\n  // See :ref:`/init_dump?mask={} <operations_admin_interface_init_dump_by_mask>` for more information.\n  // The dumps of unready targets of all init managers.\n  repeated UnreadyTargetsDump unready_targets_dumps = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/listeners.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ListenersProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Listeners]\n\n// Admin endpoint uses this wrapper for `/listeners` to display listener status information.\n// See :ref:`/listeners <operations_admin_interface_listeners>` for more information.\nmessage Listeners {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Listeners\";\n\n  // List of listener statuses.\n  repeated ListenerStatus listener_statuses = 1;\n}\n\n// Details an individual listener's current status.\nmessage ListenerStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ListenerStatus\";\n\n  // Name of the listener\n  string name = 1;\n\n  // The actual local address that the listener is listening on. If a listener was configured\n  // to listen on port 0, then this address has the port that was allocated by the OS.\n  config.core.v4alpha.Address local_address = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/memory.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"MemoryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Memory]\n\n// Proto representation of the internal memory consumption of an Envoy instance. These represent\n// values extracted from an internal TCMalloc instance. For more information, see the section of the\n// docs entitled [\"Generic Tcmalloc Status\"](https://gperftools.github.io/gperftools/tcmalloc.html).\n// [#next-free-field: 7]\nmessage Memory {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.Memory\";\n\n  // The number of bytes allocated by the heap for Envoy. This is an alias for\n  // `generic.current_allocated_bytes`.\n  uint64 allocated = 1;\n\n  // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for\n  // `generic.heap_size`.\n  uint64 heap_size = 2;\n\n  // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and depending on the OS, typically do not count towards physical memory\n  // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`.\n  uint64 pageheap_unmapped = 3;\n\n  // The number of bytes in free, mapped pages in the page heap. These bytes always count towards\n  // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also\n  // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`.\n  uint64 pageheap_free = 4;\n\n  // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias\n  // for `tcmalloc.current_total_thread_cache_bytes`.\n  uint64 total_thread_cache = 5;\n\n  // The number of bytes of the physical memory usage by the allocator. This is an alias for\n  // `generic.total_physical_bytes`.\n  uint64 total_physical_bytes = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"MetricsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metrics]\n\n// Proto representation of an Envoy Counter or Gauge value.\nmessage SimpleMetric {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.SimpleMetric\";\n\n  enum Type {\n    COUNTER = 0;\n    GAUGE = 1;\n  }\n\n  // Type of the metric represented.\n  Type type = 1;\n\n  // Current metric value.\n  uint64 value = 2;\n\n  // Name of the metric.\n  string name = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/mutex_stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"MutexStatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: MutexStats]\n\n// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run\n// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex`\n// [docs](https://abseil.io/about/design/mutex#extra-features).\n//\n// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not\n// correspond to core clock frequency. For more information, see the `CycleClock`\n// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).\nmessage MutexStats {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.MutexStats\";\n\n  // The number of individual mutex contentions which have occurred since startup.\n  uint64 num_contentions = 1;\n\n  // The length of the current contention wait cycle.\n  uint64 current_wait_cycles = 2;\n\n  // The lifetime total of all contention wait cycles.\n  uint64 lifetime_wait_cycles = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/server_info.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"ServerInfoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Server State]\n\n// Proto representation of the value returned by /server_info, containing\n// server version/server status information.\n// [#next-free-field: 8]\nmessage ServerInfo {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.ServerInfo\";\n\n  enum State {\n    // Server is live and serving traffic.\n    LIVE = 0;\n\n    // Server is draining listeners in response to external health checks failing.\n    DRAINING = 1;\n\n    // Server has not yet completed cluster manager initialization.\n    PRE_INITIALIZING = 2;\n\n    // Server is running the cluster manager initialization callbacks (e.g., RDS).\n    INITIALIZING = 3;\n  }\n\n  // Server version.\n  string version = 1;\n\n  // State of the server.\n  State state = 2;\n\n  // Uptime since current epoch was started.\n  google.protobuf.Duration uptime_current_epoch = 3;\n\n  // Uptime since the start of the first epoch.\n  google.protobuf.Duration uptime_all_epochs = 4;\n\n  // Hot restart version.\n  string hot_restart_version = 5;\n\n  // Command line options the server is currently running with.\n  CommandLineOptions command_line_options = 6;\n\n  // Populated node identity of this server.\n  config.core.v4alpha.Node node = 7;\n}\n\n// [#next-free-field: 37]\nmessage CommandLineOptions {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.CommandLineOptions\";\n\n  enum IpVersion {\n    v4 = 0;\n    v6 = 1;\n  }\n\n  enum Mode {\n    // Validate configs and then serve traffic normally.\n    Serve = 0;\n\n    // Validate configs and exit.\n    Validate = 1;\n\n    // Completely load and initialize the config, and then exit without running the listener loop.\n    InitOnly = 2;\n  }\n\n  enum DrainStrategy {\n    // Gradually discourage connections over the course of the drain period.\n    Gradual = 0;\n\n    // Discourage all connections for the duration of the drain sequence.\n    Immediate = 1;\n  }\n\n  reserved 12, 20, 21;\n\n  reserved \"max_stats\", \"max_obj_name_len\";\n\n  // See :option:`--base-id` for details.\n  uint64 base_id = 1;\n\n  // See :option:`--use-dynamic-base-id` for details.\n  bool use_dynamic_base_id = 31;\n\n  // See :option:`--base-id-path` for details.\n  string base_id_path = 32;\n\n  // See :option:`--concurrency` for details.\n  uint32 concurrency = 2;\n\n  // See :option:`--config-path` for details.\n  string config_path = 3;\n\n  // See :option:`--config-yaml` for details.\n  string config_yaml = 4;\n\n  // See :option:`--allow-unknown-static-fields` for details.\n  bool allow_unknown_static_fields = 5;\n\n  // See :option:`--reject-unknown-dynamic-fields` for details.\n  bool reject_unknown_dynamic_fields = 26;\n\n  // See :option:`--ignore-unknown-dynamic-fields` for details.\n  bool ignore_unknown_dynamic_fields = 30;\n\n  // See :option:`--admin-address-path` for details.\n  string admin_address_path = 6;\n\n  // See :option:`--local-address-ip-version` for details.\n  IpVersion local_address_ip_version = 7;\n\n  // See :option:`--log-level` for details.\n  string log_level = 8;\n\n  // See :option:`--component-log-level` for details.\n  string component_log_level = 9;\n\n  // See :option:`--log-format` for details.\n  string log_format = 10;\n\n  // See :option:`--log-format-escaped` for details.\n  bool log_format_escaped = 27;\n\n  // See :option:`--log-path` for details.\n  string log_path = 11;\n\n  // See :option:`--service-cluster` for details.\n  string service_cluster = 13;\n\n  // See :option:`--service-node` for details.\n  string service_node = 14;\n\n  // See :option:`--service-zone` for details.\n  string service_zone = 15;\n\n  // See :option:`--file-flush-interval-msec` for details.\n  google.protobuf.Duration file_flush_interval = 16;\n\n  // See :option:`--drain-time-s` for details.\n  google.protobuf.Duration drain_time = 17;\n\n  // See :option:`--drain-strategy` for details.\n  DrainStrategy drain_strategy = 33;\n\n  // See :option:`--parent-shutdown-time-s` for details.\n  google.protobuf.Duration parent_shutdown_time = 18;\n\n  // See :option:`--mode` for details.\n  Mode mode = 19;\n\n  // See :option:`--disable-hot-restart` for details.\n  bool disable_hot_restart = 22;\n\n  // See :option:`--enable-mutex-tracing` for details.\n  bool enable_mutex_tracing = 23;\n\n  // See :option:`--restart-epoch` for details.\n  uint32 restart_epoch = 24;\n\n  // See :option:`--cpuset-threads` for details.\n  bool cpuset_threads = 25;\n\n  // See :option:`--disable-extensions` for details.\n  repeated string disabled_extensions = 28;\n\n  // See :option:`--bootstrap-version` for details.\n  uint32 bootstrap_version = 29;\n\n  // See :option:`--enable-fine-grain-logging` for details.\n  bool enable_fine_grain_logging = 34;\n\n  // See :option:`--socket-path` for details.\n  string socket_path = 35;\n\n  // See :option:`--socket-mode` for details.\n  uint32 socket_mode = 36;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/admin/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.admin.v4alpha;\n\nimport \"envoy/config/tap/v4alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.admin.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap]\n\n// The /tap admin request body that is used to configure an active tap session.\nmessage TapRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.admin.v3.TapRequest\";\n\n  // The opaque configuration ID used to match the configuration to a loaded extension.\n  // A tap extension configures a similar opaque ID that is used to match.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The tap configuration to load.\n  config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/annotations/BUILD",
    "content": "load(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package()\n"
  },
  {
    "path": "generated_api_shadow/envoy/annotations/deprecation.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.annotations;\n\nimport \"google/protobuf/descriptor.proto\";\n\n// Allows tagging proto fields as fatal by default. One Envoy release after\n// deprecation, deprecated fields will be disallowed by default, a state which\n// is reversible with :ref:`runtime overrides <config_runtime_deprecation>`.\n\n// Magic number in this file derived from top 28bit of SHA256 digest of\n// \"envoy.annotation.disallowed_by_default\"\nextend google.protobuf.FieldOptions {\n  bool disallowed_by_default = 189503207;\n}\n\n// Magic number in this file derived from top 28bit of SHA256 digest of\n// \"envoy.annotation.disallowed_by_default_enum\"\nextend google.protobuf.EnumValueOptions {\n  bool disallowed_by_default_enum = 70100853;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/annotations/resource.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.annotations;\n\nimport \"google/protobuf/descriptor.proto\";\n\n// Magic number in this file derived from top 28bit of SHA256 digest of \"envoy.annotation.resource\".\nextend google.protobuf.ServiceOptions {\n  ResourceAnnotation resource = 265073217;\n}\n\nmessage ResourceAnnotation {\n  // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource\n  // type.\n  string type = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/cluster:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"//envoy/api/v2/listener:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/listener/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/README.md",
    "content": "Protocol buffer definitions for xDS and top-level resource API messages.\n\nPackage group `//envoy/api/v2:friends` enumerates all consumers of the shared\nAPI messages. That includes package envoy.api.v2 itself, which contains several\nxDS definitions. Default visibility for all shared definitions should be set to\n`//envoy/api/v2:friends`.\n\nAdditionally, packages envoy.api.v2.core and envoy.api.v2.auth are also\nconsumed throughout the subpackages of `//envoy/api/v2`.\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/auth/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/auth/cert.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/auth/common.proto\";\nimport public \"envoy/api/v2/auth/secret.proto\";\nimport public \"envoy/api/v2/auth/tls.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"CertProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/auth/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common TLS configuration]\n\nmessage TlsParameters {\n  enum TlsProtocol {\n    // Envoy will choose the optimal TLS version.\n    TLS_AUTO = 0;\n\n    // TLS 1.0\n    TLSv1_0 = 1;\n\n    // TLS 1.1\n    TLSv1_1 = 2;\n\n    // TLS 1.2\n    TLSv1_2 = 3;\n\n    // TLS 1.3\n    TLSv1_3 = 4;\n  }\n\n  // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for\n  // servers.\n  TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for\n  // servers.\n  TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // If specified, the TLS listener will only support the specified `cipher list\n  // <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration>`_\n  // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not\n  // specified, the default list will be used.\n  //\n  // In non-FIPS builds, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]\n  //   [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   ECDHE-ECDSA-AES128-GCM-SHA256\n  //   ECDHE-RSA-AES128-GCM-SHA256\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  repeated string cipher_suites = 3;\n\n  // If specified, the TLS connection will only support the specified ECDH\n  // curves. If not specified, the default curves will be used.\n  //\n  // In non-FIPS builds, the default curves are:\n  //\n  // .. code-block:: none\n  //\n  //   X25519\n  //   P-256\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default curve is:\n  //\n  // .. code-block:: none\n  //\n  //   P-256\n  repeated string ecdh_curves = 4;\n}\n\n// BoringSSL private key method configuration. The private key methods are used for external\n// (potentially asynchronous) signing and decryption operations. Some use cases for private key\n// methods would be TPM support and TLS acceleration.\nmessage PrivateKeyProvider {\n  // Private key method provider name. The name must match a\n  // supported private key method provider type.\n  string provider_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Private key method provider specific configuration.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true];\n\n    google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true];\n  }\n}\n\n// [#next-free-field: 7]\nmessage TlsCertificate {\n  // The TLS certificate chain.\n  core.DataSource certificate_chain = 1;\n\n  // The TLS private key.\n  core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n  // BoringSSL private key method provider. This is an alternative to :ref:`private_key\n  // <envoy_api_field_auth.TlsCertificate.private_key>` field. This can't be\n  // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key\n  // <envoy_api_field_auth.TlsCertificate.private_key>` and\n  // :ref:`private_key_provider\n  // <envoy_api_field_auth.TlsCertificate.private_key_provider>` fields will result in an\n  // error.\n  PrivateKeyProvider private_key_provider = 6;\n\n  // The password to decrypt the TLS private key. If this field is not set, it is assumed that the\n  // TLS private key is not password encrypted.\n  core.DataSource password = 3 [(udpa.annotations.sensitive) = true];\n\n  // [#not-implemented-hide:]\n  core.DataSource ocsp_staple = 4;\n\n  // [#not-implemented-hide:]\n  repeated core.DataSource signed_certificate_timestamp = 5;\n}\n\nmessage TlsSessionTicketKeys {\n  // Keys for encrypting and decrypting TLS session tickets. The\n  // first key in the array contains the key to encrypt all new sessions created by this context.\n  // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys\n  // by, for example, putting the new key first, and the previous key second.\n  //\n  // If :ref:`session_ticket_keys <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys>`\n  // is not specified, the TLS library will still support resuming sessions via tickets, but it will\n  // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts\n  // or on different hosts.\n  //\n  // Each key must contain exactly 80 bytes of cryptographically-secure random data. For\n  // example, the output of ``openssl rand 80``.\n  //\n  // .. attention::\n  //\n  //   Using this feature has serious security considerations and risks. Improper handling of keys\n  //   may result in loss of secrecy in connections, even if ciphers supporting perfect forward\n  //   secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some\n  //   discussion. To minimize the risk, you must:\n  //\n  //   * Keep the session ticket keys at least as secure as your TLS certificate private keys\n  //   * Rotate session ticket keys at least daily, and preferably hourly\n  //   * Always generate keys using a cryptographically-secure random data source\n  repeated core.DataSource keys = 1\n      [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true];\n}\n\n// [#next-free-field: 11]\nmessage CertificateValidationContext {\n  // Peer certificate verification mode.\n  enum TrustChainVerification {\n    // Perform default certificate verification (e.g., against CA / verification lists)\n    VERIFY_TRUST_CHAIN = 0;\n\n    // Connections where the certificate fails verification will be permitted.\n    // For HTTP connections, the result of certificate verification can be used in route matching. (\n    // see :ref:`validated <envoy_api_field_route.RouteMatch.TlsContextMatchOptions.validated>` ).\n    ACCEPT_UNTRUSTED = 1;\n  }\n\n  // TLS certificate data containing certificate authority certificates to use in verifying\n  // a presented peer certificate (e.g. server certificate for clusters or client certificate\n  // for listeners). If not specified and a peer certificate is presented it will not be\n  // verified. By default, a client certificate is optional, unless one of the additional\n  // options (:ref:`require_client_certificate\n  // <envoy_api_field_auth.DownstreamTlsContext.require_client_certificate>`,\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>`,\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`, or\n  // :ref:`match_subject_alt_names\n  // <envoy_api_field_auth.CertificateValidationContext.match_subject_alt_names>`) is also\n  // specified.\n  //\n  // It can optionally contain certificate revocation lists, in which case Envoy will verify\n  // that the presented peer certificate has not been revoked by one of the included CRLs.\n  //\n  // See :ref:`the TLS overview <arch_overview_ssl_enabling_verification>` for a list of common\n  // system CA locations.\n  core.DataSource trusted_ca = 1;\n\n  // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the\n  // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate\n  // matches one of the specified values.\n  //\n  // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -pubkey\n  //     | openssl pkey -pubin -outform DER\n  //     | openssl dgst -sha256 -binary\n  //     | openssl enc -base64\n  //   NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A=\n  //\n  // This is the format used in HTTP Public Key Pinning.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  //\n  // .. attention::\n  //\n  //   This option is preferred over :ref:`verify_certificate_hash\n  //   <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`,\n  //   because SPKI is tied to a private key, so it doesn't change when the certificate\n  //   is renewed using the same private key.\n  repeated string verify_certificate_spki = 3\n      [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}];\n\n  // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that\n  // the SHA-256 of the DER-encoded presented certificate matches one of the specified values.\n  //\n  // A hex-encoded SHA-256 of the certificate can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d\" \" -f2\n  //   df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a\n  //\n  // A long hex-encoded and colon-separated SHA-256 (a.k.a. \"fingerprint\") of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d\"=\" -f2\n  //   DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A\n  //\n  // Both of those formats are acceptable.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  repeated string verify_certificate_hash = 2\n      [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}];\n\n  // An optional list of Subject Alternative Names. If specified, Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified values.\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_auth.CertificateValidationContext.trusted_ca>`.\n  repeated string verify_subject_alt_name = 4 [deprecated = true];\n\n  // An optional list of Subject Alternative name matchers. Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified matches.\n  //\n  // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be\n  // configured with exact match type in the :ref:`string matcher <envoy_api_msg_type.matcher.StringMatcher>`.\n  // For example if the certificate has \"\\*.example.com\" as DNS SAN entry, to allow only \"api.example.com\",\n  // it should be configured as shown below.\n  //\n  // .. code-block:: yaml\n  //\n  //  match_subject_alt_names:\n  //    exact: \"api.example.com\"\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_auth.CertificateValidationContext.trusted_ca>`.\n  repeated type.matcher.StringMatcher match_subject_alt_names = 9;\n\n  // [#not-implemented-hide:] Must present a signed time-stamped OCSP response.\n  google.protobuf.BoolValue require_ocsp_staple = 5;\n\n  // [#not-implemented-hide:] Must present signed certificate time-stamp.\n  google.protobuf.BoolValue require_signed_certificate_timestamp = 6;\n\n  // An optional `certificate revocation list\n  // <https://en.wikipedia.org/wiki/Certificate_revocation_list>`_\n  // (in PEM format). If specified, Envoy will verify that the presented peer\n  // certificate has not been revoked by this CRL. If this DataSource contains\n  // multiple CRLs, all of them will be used.\n  core.DataSource crl = 7;\n\n  // If specified, Envoy will not reject expired certificates.\n  bool allow_expired_certificate = 8;\n\n  // Certificate trust chain verification mode.\n  TrustChainVerification trust_chain_verification = 10\n      [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/auth/secret.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"envoy/api/v2/auth/common.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"SecretProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Secrets configuration]\n\nmessage GenericSecret {\n  // Secret of generic type and is available to filters.\n  core.DataSource secret = 1 [(udpa.annotations.sensitive) = true];\n}\n\nmessage SdsSecretConfig {\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  // When both name and config are specified, then secret can be fetched and/or reloaded via\n  // SDS. When only name is specified, then secret will be loaded from static resources.\n  string name = 1;\n\n  core.ConfigSource sds_config = 2;\n}\n\n// [#next-free-field: 6]\nmessage Secret {\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  string name = 1;\n\n  oneof type {\n    TlsCertificate tls_certificate = 2;\n\n    TlsSessionTicketKeys session_ticket_keys = 3;\n\n    CertificateValidationContext validation_context = 4;\n\n    GenericSecret generic_secret = 5;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/auth/tls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.auth;\n\nimport \"envoy/api/v2/auth/common.proto\";\nimport \"envoy/api/v2/auth/secret.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.auth\";\noption java_outer_classname = \"TlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tls.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: TLS transport socket]\n// [#extension: envoy.transport_sockets.tls]\n// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS.\n\nmessage UpstreamTlsContext {\n  // Common TLS context settings.\n  //\n  // .. attention::\n  //\n  //   Server certificate verification is not enabled by default. Configure\n  //   :ref:`trusted_ca<envoy_api_field_auth.CertificateValidationContext.trusted_ca>` to enable\n  //   verification.\n  CommonTlsContext common_tls_context = 1;\n\n  // SNI string to use when creating TLS backend connections.\n  string sni = 2 [(validate.rules).string = {max_bytes: 255}];\n\n  // If true, server-initiated TLS renegotiation will be allowed.\n  //\n  // .. attention::\n  //\n  //   TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary.\n  bool allow_renegotiation = 3;\n\n  // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets\n  // for TLSv1.2 and older) to store for the purpose of session resumption.\n  //\n  // Defaults to 1, setting this to 0 disables session resumption.\n  google.protobuf.UInt32Value max_session_keys = 4;\n}\n\n// [#next-free-field: 8]\nmessage DownstreamTlsContext {\n  // Common TLS context settings.\n  CommonTlsContext common_tls_context = 1;\n\n  // If specified, Envoy will reject connections without a valid client\n  // certificate.\n  google.protobuf.BoolValue require_client_certificate = 2;\n\n  // If specified, Envoy will reject connections without a valid and matching SNI.\n  // [#not-implemented-hide:]\n  google.protobuf.BoolValue require_sni = 3;\n\n  oneof session_ticket_keys_type {\n    // TLS session ticket key settings.\n    TlsSessionTicketKeys session_ticket_keys = 4;\n\n    // Config for fetching TLS session ticket keys via SDS API.\n    SdsSecretConfig session_ticket_keys_sds_secret_config = 5;\n\n    // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS\n    // server to not issue TLS session tickets for the purposes of stateless TLS session resumption.\n    // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using\n    // the keys specified through either :ref:`session_ticket_keys <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys>`\n    // or :ref:`session_ticket_keys_sds_secret_config <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys_sds_secret_config>`.\n    // If this config is set to false and no keys are explicitly configured, the TLS server will issue\n    // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the\n    // implication that sessions cannot be resumed across hot restarts or on different hosts.\n    bool disable_stateless_session_resumption = 7;\n  }\n\n  // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session\n  // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2)\n  // <https://tools.ietf.org/html/rfc5077#section-5.6>`\n  // only seconds could be specified (fractional seconds are going to be ignored).\n  google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = {\n    lt {seconds: 4294967296}\n    gte {}\n  }];\n}\n\n// TLS context shared by both client and server TLS contexts.\n// [#next-free-field: 9]\nmessage CommonTlsContext {\n  message CombinedCertificateValidationContext {\n    // How to validate peer certificates.\n    CertificateValidationContext default_validation_context = 1\n        [(validate.rules).message = {required: true}];\n\n    // Config for fetching validation context via SDS API.\n    SdsSecretConfig validation_context_sds_secret_config = 2\n        [(validate.rules).message = {required: true}];\n  }\n\n  reserved 5;\n\n  // TLS protocol versions, cipher suites etc.\n  TlsParameters tls_params = 1;\n\n  // :ref:`Multiple TLS certificates <arch_overview_ssl_cert_select>` can be associated with the\n  // same context to allow both RSA and ECDSA certificates.\n  //\n  // Only a single TLS certificate is supported in client contexts. In server contexts, the first\n  // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is\n  // used for clients that support ECDSA.\n  repeated TlsCertificate tls_certificates = 2;\n\n  // Configs for fetching TLS certificates via SDS API.\n  repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6\n      [(validate.rules).repeated = {max_items: 1}];\n\n  oneof validation_context_type {\n    // How to validate peer certificates.\n    CertificateValidationContext validation_context = 3;\n\n    // Config for fetching validation context via SDS API.\n    SdsSecretConfig validation_context_sds_secret_config = 7;\n\n    // Combined certificate validation context holds a default CertificateValidationContext\n    // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic\n    // and default CertificateValidationContext are merged into a new CertificateValidationContext\n    // for validation. This merge is done by Message::MergeFrom(), so dynamic\n    // CertificateValidationContext overwrites singular fields in default\n    // CertificateValidationContext, and concatenates repeated fields to default\n    // CertificateValidationContext, and logical OR is applied to boolean fields.\n    CombinedCertificateValidationContext combined_validation_context = 8;\n  }\n\n  // Supplies the list of ALPN protocols that the listener should expose. In\n  // practice this is likely to be set to one of two values (see the\n  // :ref:`codec_type\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.codec_type>`\n  // parameter in the HTTP connection manager for more information):\n  //\n  // * \"h2,http/1.1\" If the listener is going to support both HTTP/2 and HTTP/1.1.\n  // * \"http/1.1\" If the listener is only going to support HTTP/1.1.\n  //\n  // There is no default for this parameter. If empty, Envoy will not expose ALPN.\n  repeated string alpn_protocols = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/cds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/cluster.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"CdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: CDS]\n\n// Return list of all clusters this proxy will load balance to.\nservice ClusterDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.Cluster\";\n\n  rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:clusters\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage CdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/cluster/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.cluster;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.cluster\";\noption java_outer_classname = \"CircuitBreakerProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ClusterNS\";\noption ruby_package = \"Envoy.Api.V2.ClusterNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Circuit breakers]\n\n// :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be\n// specified individually for each defined priority.\nmessage CircuitBreakers {\n  // A Thresholds defines CircuitBreaker settings for a\n  // :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`.\n  // [#next-free-field: 9]\n  message Thresholds {\n    message RetryBudget {\n      // Specifies the limit on concurrent retries as a percentage of the sum of active requests and\n      // active pending requests. For example, if there are 100 active requests and the\n      // budget_percent is set to 25, there may be 25 active retries.\n      //\n      // This parameter is optional. Defaults to 20%.\n      type.Percent budget_percent = 1;\n\n      // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the\n      // number of active retries may never go below this number.\n      //\n      // This parameter is optional. Defaults to 3.\n      google.protobuf.UInt32Value min_retry_concurrency = 2;\n    }\n\n    // The :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`\n    // the specified CircuitBreaker settings apply to.\n    core.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // The maximum number of connections that Envoy will make to the upstream\n    // cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_connections = 2;\n\n    // The maximum number of pending requests that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_pending_requests = 3;\n\n    // The maximum number of parallel requests that Envoy will make to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_requests = 4;\n\n    // The maximum number of parallel retries that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 3.\n    google.protobuf.UInt32Value max_retries = 5;\n\n    // Specifies a limit on concurrent retries in relation to the number of active requests. This\n    // parameter is optional.\n    //\n    // .. note::\n    //\n    //    If this field is set, the retry budget will override any configured retry circuit\n    //    breaker.\n    RetryBudget retry_budget = 8;\n\n    // If track_remaining is true, then stats will be published that expose\n    // the number of resources remaining until the circuit breakers open. If\n    // not specified, the default is false.\n    //\n    // .. note::\n    //\n    //    If a retry budget is used in lieu of the max_retries circuit breaker,\n    //    the remaining retry resources remaining will not be tracked.\n    bool track_remaining = 6;\n\n    // The maximum number of connection pools per cluster that Envoy will concurrently support at\n    // once. If not specified, the default is unlimited. Set this for clusters which create a\n    // large number of connection pools. See\n    // :ref:`Circuit Breaking <arch_overview_circuit_break_cluster_maximum_connection_pools>` for\n    // more details.\n    google.protobuf.UInt32Value max_connection_pools = 7;\n  }\n\n  // If multiple :ref:`Thresholds<envoy_api_msg_cluster.CircuitBreakers.Thresholds>`\n  // are defined with the same :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`,\n  // the first one in the list is used. If no Thresholds is defined for a given\n  // :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`, the default values\n  // are used.\n  repeated Thresholds thresholds = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/cluster/filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.cluster;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.cluster\";\noption java_outer_classname = \"FilterProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ClusterNS\";\noption ruby_package = \"Envoy.Api.V2.ClusterNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Upstream filters]\n// Upstream filters apply to the connections to the upstream cluster hosts.\n\nmessage Filter {\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any typed_config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.cluster;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.cluster\";\noption java_outer_classname = \"OutlierDetectionProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ClusterNS\";\noption ruby_package = \"Envoy.Api.V2.ClusterNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Outlier detection]\n\n// See the :ref:`architecture overview <arch_overview_outlier_detection>` for\n// more information on outlier detection.\n// [#next-free-field: 21]\nmessage OutlierDetection {\n  // The number of consecutive 5xx responses or local origin errors that are mapped\n  // to 5xx error codes before a consecutive 5xx ejection\n  // occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_5xx = 1;\n\n  // The time interval between ejection analysis sweeps. This can result in\n  // both new ejections as well as hosts being returned to service. Defaults\n  // to 10000ms or 10s.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}];\n\n  // The base time that a host is ejected for. The real time is equal to the\n  // base time multiplied by the number of times the host has been ejected.\n  // Defaults to 30000ms or 30s.\n  google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];\n\n  // The maximum % of an upstream cluster that can be ejected due to outlier\n  // detection. Defaults to 10% but will eject at least one host regardless of the value.\n  google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive 5xx. This setting can be used to disable\n  // ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics. This setting can be used to\n  // disable ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}];\n\n  // The number of hosts in a cluster that must have enough request volume to\n  // detect success rate outliers. If the number of hosts is less than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for any host in the cluster. Defaults to 5.\n  google.protobuf.UInt32Value success_rate_minimum_hosts = 7;\n\n  // The minimum number of total requests that must be collected in one\n  // interval (as defined by the interval duration above) to include this host\n  // in success rate based outlier detection. If the volume is lower than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for that host. Defaults to 100.\n  google.protobuf.UInt32Value success_rate_request_volume = 8;\n\n  // This factor is used to determine the ejection threshold for success rate\n  // outlier ejection. The ejection threshold is the difference between the\n  // mean success rate, and the product of this factor and the standard\n  // deviation of the mean success rate: mean - (stdev *\n  // success_rate_stdev_factor). This factor is divided by a thousand to get a\n  // double. That is, if the desired factor is 1.9, the runtime value should\n  // be 1900. Defaults to 1900.\n  google.protobuf.UInt32Value success_rate_stdev_factor = 9;\n\n  // The number of consecutive gateway failures (502, 503, 504 status codes)\n  // before a consecutive gateway failure ejection occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_gateway_failure = 10;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive gateway failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // Determines whether to distinguish local origin failures from external errors. If set to true\n  // the following configuration parameters are taken into account:\n  // :ref:`consecutive_local_origin_failure<envoy_api_field_cluster.OutlierDetection.consecutive_local_origin_failure>`,\n  // :ref:`enforcing_consecutive_local_origin_failure<envoy_api_field_cluster.OutlierDetection.enforcing_consecutive_local_origin_failure>`\n  // and\n  // :ref:`enforcing_local_origin_success_rate<envoy_api_field_cluster.OutlierDetection.enforcing_local_origin_success_rate>`.\n  // Defaults to false.\n  bool split_external_local_origin_errors = 12;\n\n  // The number of consecutive locally originated failures before ejection\n  // occurs. Defaults to 5. Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value consecutive_local_origin_failure = 13;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive locally originated failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics for locally originated errors.\n  // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The failure percentage to use when determining failure percentage-based outlier detection. If\n  // the failure percentage of a given host is greater than or equal to this value, it will be\n  // ejected. Defaults to 85.\n  google.protobuf.UInt32Value failure_percentage_threshold = 16\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // failure percentage statistics. This setting can be used to disable ejection or to ramp it up\n  // slowly. Defaults to 0.\n  //\n  // [#next-major-version: setting this without setting failure_percentage_threshold should be\n  // invalid in v4.]\n  google.protobuf.UInt32Value enforcing_failure_percentage = 17\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // local-origin failure percentage statistics. This setting can be used to disable ejection or to\n  // ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection.\n  // If the total number of hosts in the cluster is less than this value, failure percentage-based\n  // ejection will not be performed. Defaults to 5.\n  google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19;\n\n  // The minimum number of total requests that must be collected in one interval (as defined by the\n  // interval duration above) to perform failure percentage-based ejection for this host. If the\n  // volume is lower than this setting, failure percentage-based ejection will not be performed for\n  // this host. Defaults to 50.\n  google.protobuf.UInt32Value failure_percentage_request_volume = 20;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/auth/tls.proto\";\nimport \"envoy/api/v2/cluster/circuit_breaker.proto\";\nimport \"envoy/api/v2/cluster/filter.proto\";\nimport \"envoy/api/v2/cluster/outlier_detection.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\nimport \"envoy/api/v2/core/protocol.proto\";\nimport \"envoy/api/v2/endpoint.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Cluster configuration]\n\n// Configuration for a single upstream cluster.\n// [#next-free-field: 48]\nmessage Cluster {\n  // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`\n  // for an explanation on each type.\n  enum DiscoveryType {\n    // Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`\n    // for an explanation.\n    STATIC = 0;\n\n    // Refer to the :ref:`strict DNS discovery\n    // type<arch_overview_service_discovery_types_strict_dns>`\n    // for an explanation.\n    STRICT_DNS = 1;\n\n    // Refer to the :ref:`logical DNS discovery\n    // type<arch_overview_service_discovery_types_logical_dns>`\n    // for an explanation.\n    LOGICAL_DNS = 2;\n\n    // Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`\n    // for an explanation.\n    EDS = 3;\n\n    // Refer to the :ref:`original destination discovery\n    // type<arch_overview_service_discovery_types_original_destination>`\n    // for an explanation.\n    ORIGINAL_DST = 4;\n  }\n\n  // Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture\n  // overview section for information on each type.\n  enum LbPolicy {\n    // Refer to the :ref:`round robin load balancing\n    // policy<arch_overview_load_balancing_types_round_robin>`\n    // for an explanation.\n    ROUND_ROBIN = 0;\n\n    // Refer to the :ref:`least request load balancing\n    // policy<arch_overview_load_balancing_types_least_request>`\n    // for an explanation.\n    LEAST_REQUEST = 1;\n\n    // Refer to the :ref:`ring hash load balancing\n    // policy<arch_overview_load_balancing_types_ring_hash>`\n    // for an explanation.\n    RING_HASH = 2;\n\n    // Refer to the :ref:`random load balancing\n    // policy<arch_overview_load_balancing_types_random>`\n    // for an explanation.\n    RANDOM = 3;\n\n    // Refer to the :ref:`original destination load balancing\n    // policy<arch_overview_load_balancing_types_original_destination>`\n    // for an explanation.\n    //\n    // .. attention::\n    //\n    //   **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead.\n    //\n    ORIGINAL_DST_LB = 4 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`\n    // for an explanation.\n    MAGLEV = 5;\n\n    // This load balancer type must be specified if the configured cluster provides a cluster\n    // specific load balancer. Consult the configured cluster's documentation for whether to set\n    // this option or not.\n    CLUSTER_PROVIDED = 6;\n\n    // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy\n    // <envoy_api_field_Cluster.load_balancing_policy>` field to determine the LB policy.\n    // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field\n    // and instead using the new load_balancing_policy field as the one and only mechanism for\n    // configuring this.]\n    LOAD_BALANCING_POLICY_CONFIG = 7;\n  }\n\n  // When V4_ONLY is selected, the DNS resolver will only perform a lookup for\n  // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will\n  // only perform a lookup for addresses in the IPv6 family. If AUTO is\n  // specified, the DNS resolver will first perform a lookup for addresses in\n  // the IPv6 family and fallback to a lookup for addresses in the IPv4 family.\n  // For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this setting is\n  // ignored.\n  enum DnsLookupFamily {\n    AUTO = 0;\n    V4_ONLY = 1;\n    V6_ONLY = 2;\n  }\n\n  enum ClusterProtocolSelection {\n    // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).\n    // If :ref:`http2_protocol_options <envoy_api_field_Cluster.http2_protocol_options>` are\n    // present, HTTP2 will be used, otherwise HTTP1.1 will be used.\n    USE_CONFIGURED_PROTOCOL = 0;\n\n    // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.\n    USE_DOWNSTREAM_PROTOCOL = 1;\n  }\n\n  // TransportSocketMatch specifies what transport socket config will be used\n  // when the match conditions are satisfied.\n  message TransportSocketMatch {\n    // The name of the match, used in stats generation.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Optional endpoint metadata match criteria.\n    // The connection to the endpoint with metadata matching what is set in this field\n    // will use the transport socket configuration specified here.\n    // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match\n    // against the values specified in this field.\n    google.protobuf.Struct match = 2;\n\n    // The configuration of the transport socket.\n    core.TransportSocket transport_socket = 3;\n  }\n\n  // Extended cluster type.\n  message CustomClusterType {\n    // The type of the cluster to instantiate. The name must match a supported cluster type.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Cluster specific configuration which depends on the cluster being instantiated.\n    // See the supported cluster for further documentation.\n    google.protobuf.Any typed_config = 2;\n  }\n\n  // Only valid when discovery type is EDS.\n  message EdsClusterConfig {\n    // Configuration for the source of EDS updates for this Cluster.\n    core.ConfigSource eds_config = 1;\n\n    // Optional alternative to cluster name to present to EDS. This does not\n    // have the same restrictions as cluster name, i.e. it may be arbitrary\n    // length.\n    string service_name = 2;\n  }\n\n  // Optionally divide the endpoints in this cluster into subsets defined by\n  // endpoint metadata and selected by route and weighted cluster metadata.\n  // [#next-free-field: 8]\n  message LbSubsetConfig {\n    // If NO_FALLBACK is selected, a result\n    // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,\n    // any cluster endpoint may be returned (subject to policy, health checks,\n    // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the\n    // endpoints matching the values from the default_subset field.\n    enum LbSubsetFallbackPolicy {\n      NO_FALLBACK = 0;\n      ANY_ENDPOINT = 1;\n      DEFAULT_SUBSET = 2;\n    }\n\n    // Specifications for subsets.\n    message LbSubsetSelector {\n      // Allows to override top level fallback policy per selector.\n      enum LbSubsetSelectorFallbackPolicy {\n        // If NOT_DEFINED top level config fallback policy is used instead.\n        NOT_DEFINED = 0;\n\n        // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.\n        NO_FALLBACK = 1;\n\n        // If ANY_ENDPOINT is selected, any cluster endpoint may be returned\n        // (subject to policy, health checks, etc).\n        ANY_ENDPOINT = 2;\n\n        // If DEFAULT_SUBSET is selected, load balancing is performed over the\n        // endpoints matching the values from the default_subset field.\n        DEFAULT_SUBSET = 3;\n\n        // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata\n        // keys reduced to\n        // :ref:`fallback_keys_subset<envoy_api_field_Cluster.LbSubsetConfig.LbSubsetSelector.fallback_keys_subset>`.\n        // It allows for a fallback to a different, less specific selector if some of the keys of\n        // the selector are considered optional.\n        KEYS_SUBSET = 4;\n      }\n\n      // List of keys to match with the weighted cluster metadata.\n      repeated string keys = 1;\n\n      // The behavior used when no endpoint subset matches the selected route's\n      // metadata.\n      LbSubsetSelectorFallbackPolicy fallback_policy = 2\n          [(validate.rules).enum = {defined_only: true}];\n\n      // Subset of\n      // :ref:`keys<envoy_api_field_Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by\n      // :ref:`KEYS_SUBSET<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`\n      // fallback policy.\n      // It has to be a non empty list if KEYS_SUBSET fallback policy is selected.\n      // For any other fallback policy the parameter is not used and should not be set.\n      // Only values also present in\n      // :ref:`keys<envoy_api_field_Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but\n      // `fallback_keys_subset` cannot be equal to `keys`.\n      repeated string fallback_keys_subset = 3;\n    }\n\n    // The behavior used when no endpoint subset matches the selected route's\n    // metadata. The value defaults to\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Specifies the default subset of endpoints used during fallback if\n    // fallback_policy is\n    // :ref:`DEFAULT_SUBSET<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.\n    // Each field in default_subset is\n    // compared to the matching LbEndpoint.Metadata under the *envoy.lb*\n    // namespace. It is valid for no hosts to match, in which case the behavior\n    // is the same as a fallback_policy of\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    google.protobuf.Struct default_subset = 2;\n\n    // For each entry, LbEndpoint.Metadata's\n    // *envoy.lb* namespace is traversed and a subset is created for each unique\n    // combination of key and value. For example:\n    //\n    // .. code-block:: json\n    //\n    //   { \"subset_selectors\": [\n    //       { \"keys\": [ \"version\" ] },\n    //       { \"keys\": [ \"stage\", \"hardware_type\" ] }\n    //   ]}\n    //\n    // A subset is matched when the metadata from the selected route and\n    // weighted cluster contains the same keys and values as the subset's\n    // metadata. The same host may appear in multiple subsets.\n    repeated LbSubsetSelector subset_selectors = 3;\n\n    // If true, routing to subsets will take into account the localities and locality weights of the\n    // endpoints when making the routing decision.\n    //\n    // There are some potential pitfalls associated with enabling this feature, as the resulting\n    // traffic split after applying both a subset match and locality weights might be undesirable.\n    //\n    // Consider for example a situation in which you have 50/50 split across two localities X/Y\n    // which have 100 hosts each without subsetting. If the subset LB results in X having only 1\n    // host selected but Y having 100, then a lot more load is being dumped on the single host in X\n    // than originally anticipated in the load balancing assignment delivered via EDS.\n    bool locality_weight_aware = 4;\n\n    // When used with locality_weight_aware, scales the weight of each locality by the ratio\n    // of hosts in the subset vs hosts in the original subset. This aims to even out the load\n    // going to an individual locality if said locality is disproportionately affected by the\n    // subset predicate.\n    bool scale_locality_weight = 5;\n\n    // If true, when a fallback policy is configured and its corresponding subset fails to find\n    // a host this will cause any host to be selected instead.\n    //\n    // This is useful when using the default subset as the fallback policy, given the default\n    // subset might become empty. With this option enabled, if that happens the LB will attempt\n    // to select a host from the entire cluster.\n    bool panic_mode_any = 6;\n\n    // If true, metadata specified for a metadata key will be matched against the corresponding\n    // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value\n    // and any of the elements in the list matches the criteria.\n    bool list_as_any = 7;\n  }\n\n  // Specific configuration for the LeastRequest load balancing policy.\n  message LeastRequestLbConfig {\n    // The number of random healthy hosts from which the host with the fewest active requests will\n    // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.\n    google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];\n  }\n\n  // Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`\n  // load balancing policy.\n  message RingHashLbConfig {\n    // The hash function used to hash hosts onto the ketama ring.\n    enum HashFunction {\n      // Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.\n      XX_HASH = 0;\n\n      // Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with\n      // std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled\n      // on Linux and not macOS.\n      MURMUR_HASH_2 = 1;\n    }\n\n    reserved 2;\n\n    // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each\n    // provided host) the better the request distribution will reflect the desired weights. Defaults\n    // to 1024 entries, and limited to 8M entries. See also\n    // :ref:`maximum_ring_size<envoy_api_field_Cluster.RingHashLbConfig.maximum_ring_size>`.\n    google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}];\n\n    // The hash function used to hash hosts onto the ketama ring. The value defaults to\n    // :ref:`XX_HASH<envoy_api_enum_value_Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.\n    HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}];\n\n    // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered\n    // to further constrain resource use. See also\n    // :ref:`minimum_ring_size<envoy_api_field_Cluster.RingHashLbConfig.minimum_ring_size>`.\n    google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}];\n  }\n\n  // Specific configuration for the\n  // :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\n  // load balancing policy.\n  message OriginalDstLbConfig {\n    // When true, :ref:`x-envoy-original-dst-host\n    // <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination\n    // address.\n    //\n    // .. attention::\n    //\n    //   This header isn't sanitized by default, so enabling this feature allows HTTP clients to\n    //   route traffic to arbitrary hosts and/or ports, which may have serious security\n    //   consequences.\n    bool use_http_header = 1;\n  }\n\n  // Common configuration for all load balancer implementations.\n  // [#next-free-field: 8]\n  message CommonLbConfig {\n    // Configuration for :ref:`zone aware routing\n    // <arch_overview_load_balancing_zone_aware_routing>`.\n    message ZoneAwareLbConfig {\n      // Configures percentage of requests that will be considered for zone aware routing\n      // if zone aware routing is configured. If not specified, the default is 100%.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      type.Percent routing_enabled = 1;\n\n      // Configures minimum upstream cluster size required for zone aware routing\n      // If upstream cluster size is less than specified, zone aware routing is not performed\n      // even if zone aware routing is configured. If not specified, the default is 6.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      google.protobuf.UInt64Value min_cluster_size = 2;\n\n      // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic\n      // mode<arch_overview_load_balancing_panic_threshold>`. Instead, the cluster will fail all\n      // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a\n      // failing service.\n      bool fail_traffic_on_panic = 3;\n    }\n\n    // Configuration for :ref:`locality weighted load balancing\n    // <arch_overview_load_balancing_locality_weighted_lb>`\n    message LocalityWeightedLbConfig {\n    }\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    message ConsistentHashingLbConfig {\n      // If set to `true`, the cluster will use hostname instead of the resolved\n      // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.\n      bool use_hostname_for_hashing = 1;\n    }\n\n    // Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.\n    // If not specified, the default is 50%.\n    // To disable panic mode, set to 0%.\n    //\n    // .. note::\n    //   The specified percent will be truncated to the nearest 1%.\n    type.Percent healthy_panic_threshold = 1;\n\n    oneof locality_config_specifier {\n      ZoneAwareLbConfig zone_aware_lb_config = 2;\n\n      LocalityWeightedLbConfig locality_weighted_lb_config = 3;\n    }\n\n    // If set, all health check/weight/metadata updates that happen within this duration will be\n    // merged and delivered in one shot when the duration expires. The start of the duration is when\n    // the first update happens. This is useful for big clusters, with potentially noisy deploys\n    // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes\n    // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new\n    // cluster). Please always keep in mind that the use of sandbox technologies may change this\n    // behavior.\n    //\n    // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge\n    // window to 0.\n    //\n    // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is\n    // because merging those updates isn't currently safe. See\n    // https://github.com/envoyproxy/envoy/pull/3941.\n    google.protobuf.Duration update_merge_window = 4;\n\n    // If set to true, Envoy will not consider new hosts when computing load balancing weights until\n    // they have been health checked for the first time. This will have no effect unless\n    // active health checking is also configured.\n    //\n    // Ignoring a host means that for any load balancing calculations that adjust weights based\n    // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and\n    // panic mode) Envoy will exclude these hosts in the denominator.\n    //\n    // For example, with hosts in two priorities P0 and P1, where P0 looks like\n    // {healthy, unhealthy (new), unhealthy (new)}\n    // and where P1 looks like\n    // {healthy, healthy}\n    // all traffic will still hit P0, as 1 / (3 - 2) = 1.\n    //\n    // Enabling this will allow scaling up the number of hosts for a given cluster without entering\n    // panic mode or triggering priority spillover, assuming the hosts pass the first health check.\n    //\n    // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not\n    // contribute to the calculation when deciding whether panic mode is enabled or not.\n    bool ignore_new_hosts_until_first_hc = 5;\n\n    // If set to `true`, the cluster manager will drain all existing\n    // connections to upstream hosts whenever hosts are added or removed from the cluster.\n    bool close_connections_on_host_set_change = 6;\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    ConsistentHashingLbConfig consistent_hashing_lb_config = 7;\n  }\n\n  message RefreshRate {\n    // Specifies the base interval between refreshes. This parameter is required and must be greater\n    // than zero and less than\n    // :ref:`max_interval <envoy_api_field_Cluster.RefreshRate.max_interval>`.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {nanos: 1000000}\n    }];\n\n    // Specifies the maximum interval between refreshes. This parameter is optional, but must be\n    // greater than or equal to the\n    // :ref:`base_interval <envoy_api_field_Cluster.RefreshRate.base_interval>`  if set. The default\n    // is 10 times the :ref:`base_interval <envoy_api_field_Cluster.RefreshRate.base_interval>`.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}];\n  }\n\n  reserved 12, 15;\n\n  // Configuration to use different transport sockets for different endpoints.\n  // The entry of *envoy.transport_socket_match* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_endpoint.LbEndpoint.metadata>`\n  // is used to match against the transport sockets as they appear in the list. The first\n  // :ref:`match <envoy_api_msg_Cluster.TransportSocketMatch>` is used.\n  // For example, with the following match\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"enableMTLS\"\n  //    match:\n  //      acceptMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //  - name: \"defaultToPlaintext\"\n  //    match: {}\n  //    transport_socket:\n  //      name: envoy.transport_sockets.raw_buffer\n  //\n  // Connections to the endpoints whose metadata value under *envoy.transport_socket_match*\n  // having \"acceptMTLS\"/\"true\" key/value pair use the \"enableMTLS\" socket configuration.\n  //\n  // If a :ref:`socket match <envoy_api_msg_Cluster.TransportSocketMatch>` with empty match\n  // criteria is provided, that always match any endpoint. For example, the \"defaultToPlaintext\"\n  // socket match in case above.\n  //\n  // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any\n  // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or\n  // *transport_socket* specified in this cluster.\n  //\n  // This field allows gradual and flexible transport socket configuration changes.\n  //\n  // The metadata of endpoints in EDS can indicate transport socket capabilities. For example,\n  // an endpoint's metadata can have two key value pairs as \"acceptMTLS\": \"true\",\n  // \"acceptPlaintext\": \"true\". While some other endpoints, only accepting plaintext traffic\n  // has \"acceptPlaintext\": \"true\" metadata information.\n  //\n  // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS\n  // traffic for endpoints with \"acceptMTLS\": \"true\", by adding a corresponding\n  // *TransportSocketMatch* in this field. Other client Envoys receive CDS without\n  // *transport_socket_match* set, and still send plain text traffic to the same cluster.\n  //\n  // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]\n  repeated TransportSocketMatch transport_socket_matches = 43;\n\n  // Supplies the name of the cluster which must be unique across all clusters.\n  // The cluster name is used when emitting\n  // :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name\n  // <envoy_api_field_Cluster.alt_stat_name>` is not provided.\n  // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // An optional alternative to the cluster name to be used while emitting stats.\n  // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be\n  // confused with :ref:`Router Filter Header\n  // <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.\n  string alt_stat_name = 28;\n\n  oneof cluster_discovery_type {\n    // The :ref:`service discovery type <arch_overview_service_discovery_types>`\n    // to use for resolving the cluster.\n    DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}];\n\n    // The custom cluster type.\n    CustomClusterType cluster_type = 38;\n  }\n\n  // Configuration to use for EDS updates for the Cluster.\n  EdsClusterConfig eds_cluster_config = 3;\n\n  // The timeout for new network connections to hosts in the cluster.\n  google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}];\n\n  // Soft limit on size of the cluster’s connections read and write buffers. If\n  // unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5;\n\n  // The :ref:`load balancer type <arch_overview_load_balancing_types>` to use\n  // when picking a host in the cluster.\n  LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}];\n\n  // If the service discovery type is\n  // :ref:`STATIC<envoy_api_enum_value_Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // then hosts is required.\n  //\n  // .. attention::\n  //\n  //   **This field is deprecated**. Set the\n  //   :ref:`load_assignment<envoy_api_field_Cluster.load_assignment>` field instead.\n  //\n  repeated core.Address hosts = 7 [deprecated = true];\n\n  // Setting this is required for specifying members of\n  // :ref:`STATIC<envoy_api_enum_value_Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>` clusters.\n  // This field supersedes the *hosts* field in the v2 API.\n  //\n  // .. attention::\n  //\n  //   Setting this allows non-EDS cluster types to contain embedded EDS equivalent\n  //   :ref:`endpoint assignments<envoy_api_msg_ClusterLoadAssignment>`.\n  //\n  ClusterLoadAssignment load_assignment = 33;\n\n  // Optional :ref:`active health checking <arch_overview_health_checking>`\n  // configuration for the cluster. If no\n  // configuration is specified no health checking will be done and all cluster\n  // members will be considered healthy at all times.\n  repeated core.HealthCheck health_checks = 8;\n\n  // Optional maximum requests for a single upstream connection. This parameter\n  // is respected by both the HTTP/1.1 and HTTP/2 connection pool\n  // implementations. If not specified, there is no limit. Setting this\n  // parameter to 1 will effectively disable keep alive.\n  google.protobuf.UInt32Value max_requests_per_connection = 9;\n\n  // Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.\n  cluster.CircuitBreakers circuit_breakers = 10;\n\n  // The TLS configuration for connections to the upstream cluster.\n  //\n  // .. attention::\n  //\n  //   **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are\n  //   set, `transport_socket` takes priority.\n  auth.UpstreamTlsContext tls_context = 11\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // HTTP protocol options that are applied only to upstream HTTP connections.\n  // These options apply to all HTTP versions.\n  core.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46;\n\n  // Additional options when handling HTTP requests upstream. These options will be applicable to\n  // both HTTP1 and HTTP2 requests.\n  core.HttpProtocolOptions common_http_protocol_options = 29;\n\n  // Additional options when handling HTTP1 requests.\n  core.Http1ProtocolOptions http_protocol_options = 13;\n\n  // Even if default HTTP2 protocol options are desired, this field must be\n  // set so that Envoy will assume that the upstream supports HTTP/2 when\n  // making new HTTP connection pool connections. Currently, Envoy only\n  // supports prior knowledge for upstream connections. Even if TLS is used\n  // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2\n  // connections to happen over plain text.\n  core.Http2ProtocolOptions http2_protocol_options = 14;\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Struct> extension_protocol_options = 35\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Any> typed_extension_protocol_options = 36;\n\n  // If the DNS refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used as the cluster’s DNS refresh\n  // rate. The value configured must be at least 1ms. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  google.protobuf.Duration dns_refresh_rate = 16\n      [(validate.rules).duration = {gt {nanos: 1000000}}];\n\n  // If the DNS failure refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types\n  // other than :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>` this setting is\n  // ignored.\n  RefreshRate dns_failure_refresh_rate = 44;\n\n  // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true,\n  // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS\n  // resolution.\n  bool respect_dns_ttl = 39;\n\n  // The DNS IP address resolution policy. If this setting is not specified, the\n  // value defaults to\n  // :ref:`AUTO<envoy_api_enum_value_Cluster.DnsLookupFamily.AUTO>`.\n  DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}];\n\n  // If DNS resolvers are specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used to specify the cluster’s dns resolvers.\n  // If this setting is not specified, the value defaults to the default\n  // resolver, which uses /etc/resolv.conf for configuration. For cluster types\n  // other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple's API only allows overriding DNS resolvers via system settings.\n  repeated core.Address dns_resolvers = 18;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 45;\n\n  // If specified, outlier detection will be enabled for this upstream cluster.\n  // Each of the configuration values can be overridden via\n  // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.\n  cluster.OutlierDetection outlier_detection = 19;\n\n  // The interval for removing stale hosts from a cluster type\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_Cluster.DiscoveryType.ORIGINAL_DST>`.\n  // Hosts are considered stale if they have not been used\n  // as upstream destinations during this interval. New hosts are added\n  // to original destination clusters on demand as new connections are\n  // redirected to Envoy, causing the number of hosts in the cluster to\n  // grow over time. Hosts that are not stale (they are actively used as\n  // destinations) are kept in the cluster, which allows connections to\n  // them remain open, saving the latency that would otherwise be spent\n  // on opening new connections. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_Cluster.DiscoveryType.ORIGINAL_DST>`\n  // this setting is ignored.\n  google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}];\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This overrides any bind_config specified in the bootstrap proto.\n  // If the address and port are empty, no bind will be performed.\n  core.BindConfig upstream_bind_config = 21;\n\n  // Configuration for load balancing subsetting.\n  LbSubsetConfig lb_subset_config = 22;\n\n  // Optional configuration for the load balancing algorithm selected by\n  // LbPolicy. Currently only\n  // :ref:`RING_HASH<envoy_api_enum_value_Cluster.LbPolicy.RING_HASH>` and\n  // :ref:`LEAST_REQUEST<envoy_api_enum_value_Cluster.LbPolicy.LEAST_REQUEST>`\n  // has additional configuration options.\n  // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding\n  // LbPolicy will generate an error at runtime.\n  oneof lb_config {\n    // Optional configuration for the Ring Hash load balancing policy.\n    RingHashLbConfig ring_hash_lb_config = 23;\n\n    // Optional configuration for the Original Destination load balancing policy.\n    OriginalDstLbConfig original_dst_lb_config = 34;\n\n    // Optional configuration for the LeastRequest load balancing policy.\n    LeastRequestLbConfig least_request_lb_config = 37;\n  }\n\n  // Common configuration for all load balancer implementations.\n  CommonLbConfig common_lb_config = 27;\n\n  // Optional custom transport socket implementation to use for upstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`UpstreamTlsContexts <envoy_api_msg_auth.UpstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.TransportSocket transport_socket = 24;\n\n  // The Metadata field can be used to provide additional information about the\n  // cluster. It can be used for stats, logging, and varying filter behavior.\n  // Fields should use reverse DNS notation to denote which entity within Envoy\n  // will need the information. For instance, if the metadata is intended for\n  // the Router filter, the filter name should be specified as *envoy.filters.http.router*.\n  core.Metadata metadata = 25;\n\n  // Determines how Envoy selects the protocol used to speak to upstream hosts.\n  ClusterProtocolSelection protocol_selection = 26;\n\n  // Optional options for upstream connections.\n  UpstreamConnectionOptions upstream_connection_options = 30;\n\n  // If an upstream host becomes unhealthy (as determined by the configured health checks\n  // or outlier detection), immediately close all connections to the failed host.\n  //\n  // .. note::\n  //\n  //   This is currently only supported for connections created by tcp_proxy.\n  //\n  // .. note::\n  //\n  //   The current implementation of this feature closes all connections immediately when\n  //   the unhealthy status is detected. If there are a large number of connections open\n  //   to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of\n  //   time exclusively closing these connections, and not processing any other traffic.\n  bool close_connections_on_host_health_failure = 31;\n\n  // If set to true, Envoy will ignore the health value of a host when processing its removal\n  // from service discovery. This means that if active health checking is used, Envoy will *not*\n  // wait for the endpoint to go unhealthy before removing it.\n  bool drain_connections_on_host_removal = 32\n      [(udpa.annotations.field_migrate).rename = \"ignore_health_on_host_removal\"];\n\n  // An (optional) network filter chain, listed in the order the filters should be applied.\n  // The chain will be applied to all outgoing connections that Envoy makes to the upstream\n  // servers of this cluster.\n  repeated cluster.Filter filters = 40;\n\n  // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the\n  // :ref:`lb_policy<envoy_api_field_Cluster.lb_policy>` field has the value\n  // :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>`.\n  LoadBalancingPolicy load_balancing_policy = 41;\n\n  // [#not-implemented-hide:]\n  // If present, tells the client where to send load reports via LRS. If not present, the\n  // client will fall back to a client-side default, which may be either (a) don't send any\n  // load reports or (b) send load reports for all clusters to a single default server\n  // (which may be configured in the bootstrap file).\n  //\n  // Note that if multiple clusters point to the same LRS server, the client may choose to\n  // create a separate stream for each cluster or it may choose to coalesce the data for\n  // multiple clusters onto a single stream. Either way, the client must make sure to send\n  // the data for any given cluster on no more than one stream.\n  //\n  // [#next-major-version: In the v3 API, we should consider restructuring this somehow,\n  // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation\n  // from the LRS stream here.]\n  core.ConfigSource lrs_server = 42;\n\n  // If track_timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  bool track_timeout_budgets = 47;\n}\n\n// [#not-implemented-hide:] Extensible load balancing policy configuration.\n//\n// Every LB policy defined via this mechanism will be identified via a unique name using reverse\n// DNS notation. If the policy needs configuration parameters, it must define a message for its\n// own configuration, which will be stored in the config field. The name of the policy will tell\n// clients which type of message they should expect to see in the config field.\n//\n// Note that there are cases where it is useful to be able to independently select LB policies\n// for choosing a locality and for choosing an endpoint within that locality. For example, a\n// given deployment may always use the same policy to choose the locality, but for choosing the\n// endpoint within the locality, some clusters may use weighted-round-robin, while others may\n// use some sort of session-based balancing.\n//\n// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a\n// child LB policy for each locality. For each request, the parent chooses the locality and then\n// delegates to the child policy for that locality to choose the endpoint within the locality.\n//\n// To facilitate this, the config message for the top-level LB policy may include a field of\n// type LoadBalancingPolicy that specifies the child policy.\nmessage LoadBalancingPolicy {\n  message Policy {\n    // Required. The name of the LB policy.\n    string name = 1;\n\n    // Optional config for the LB policy.\n    // No more than one of these two fields may be populated.\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Each client will iterate over the list in order and stop at the first policy that it\n  // supports. This provides a mechanism for starting to use new LB policies that are not yet\n  // supported by all clients.\n  repeated Policy policies = 1;\n}\n\n// An extensible structure containing the address Envoy should bind to when\n// establishing upstream connections.\nmessage UpstreamBindConfig {\n  // The address Envoy should bind to when establishing upstream connections.\n  core.Address source_address = 1;\n}\n\nmessage UpstreamConnectionOptions {\n  // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.\n  core.TcpKeepalive tcp_keepalive = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/address.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/socket_option.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"AddressProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Network addresses]\n\nmessage Pipe {\n  // Unix Domain Socket path. On Linux, paths starting with '@' will use the\n  // abstract namespace. The starting '@' is replaced by a null byte by Envoy.\n  // Paths starting with '@' will result in an error in environments other than\n  // Linux.\n  string path = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The mode for the Pipe. Not applicable for abstract sockets.\n  uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];\n}\n\n// [#next-free-field: 7]\nmessage SocketAddress {\n  enum Protocol {\n    TCP = 0;\n    UDP = 1;\n  }\n\n  Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The address for this socket. :ref:`Listeners <config_listeners>` will bind\n  // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::``\n  // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:\n  // It is possible to distinguish a Listener address via the prefix/suffix matching\n  // in :ref:`FilterChainMatch <envoy_api_msg_listener.FilterChainMatch>`.] When used\n  // within an upstream :ref:`BindConfig <envoy_api_msg_core.BindConfig>`, the address\n  // controls the source address of outbound connections. For :ref:`clusters\n  // <envoy_api_msg_Cluster>`, the cluster type determines whether the\n  // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS\n  // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized\n  // via :ref:`resolver_name <envoy_api_field_core.SocketAddress.resolver_name>`.\n  string address = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof port_specifier {\n    option (validate.required) = true;\n\n    uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}];\n\n    // This is only valid if :ref:`resolver_name\n    // <envoy_api_field_core.SocketAddress.resolver_name>` is specified below and the\n    // named resolver is capable of named port resolution.\n    string named_port = 4;\n  }\n\n  // The name of the custom resolver. This must have been registered with Envoy. If\n  // this is empty, a context dependent default applies. If the address is a concrete\n  // IP address, no resolution will occur. If address is a hostname this\n  // should be set for resolution other than DNS. Specifying a custom resolver with\n  // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime.\n  string resolver_name = 5;\n\n  // When binding to an IPv6 address above, this enables `IPv4 compatibility\n  // <https://tools.ietf.org/html/rfc3493#page-11>`_. Binding to ``::`` will\n  // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into\n  // IPv6 space as ``::FFFF:<IPv4-address>``.\n  bool ipv4_compat = 6;\n}\n\nmessage TcpKeepalive {\n  // Maximum number of keepalive probes to send without response before deciding\n  // the connection is dead. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 9.)\n  google.protobuf.UInt32Value keepalive_probes = 1;\n\n  // The number of seconds a connection needs to be idle before keep-alive probes\n  // start being sent. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 7200s (i.e., 2 hours.)\n  google.protobuf.UInt32Value keepalive_time = 2;\n\n  // The number of seconds between keep-alive probes. Default is to use the OS\n  // level configuration (unless overridden, Linux defaults to 75s.)\n  google.protobuf.UInt32Value keepalive_interval = 3;\n}\n\nmessage BindConfig {\n  // The address to bind to when creating a socket.\n  SocketAddress source_address = 1 [(validate.rules).message = {required: true}];\n\n  // Whether to set the *IP_FREEBIND* option when creating the socket. When this\n  // flag is set to true, allows the :ref:`source_address\n  // <envoy_api_field_UpstreamBindConfig.source_address>` to be an IP address\n  // that is not configured on the system running Envoy. When this flag is set\n  // to false, the option *IP_FREEBIND* is disabled on the socket. When this\n  // flag is not set (default), the socket is not modified, i.e. the option is\n  // neither enabled nor disabled.\n  google.protobuf.BoolValue freebind = 2;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated SocketOption socket_options = 3;\n}\n\n// Addresses specify either a logical or physical address and port, which are\n// used to tell Envoy where to bind/listen, connect to upstream and find\n// management servers.\nmessage Address {\n  oneof address {\n    option (validate.required) = true;\n\n    SocketAddress socket_address = 1;\n\n    Pipe pipe = 2;\n  }\n}\n\n// CidrRange specifies an IP Address and a prefix length to construct\n// the subnet mask for a `CIDR <https://tools.ietf.org/html/rfc4632>`_ range.\nmessage CidrRange {\n  // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.\n  string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Length of prefix, e.g. 0, 32.\n  google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/backoff.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"BackoffProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Backoff Strategy]\n\n// Configuration defining a jittered exponential back off strategy.\nmessage BackoffStrategy {\n  // The base interval to be used for the next back off computation. It should\n  // be greater than zero and less than or equal to :ref:`max_interval\n  // <envoy_api_field_core.BackoffStrategy.max_interval>`.\n  google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // Specifies the maximum interval between retries. This parameter is optional,\n  // but must be greater than or equal to the :ref:`base_interval\n  // <envoy_api_field_core.BackoffStrategy.base_interval>` if set. The default\n  // is 10 times the :ref:`base_interval\n  // <envoy_api_field_core.BackoffStrategy.base_interval>`.\n  google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/base.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/backoff.proto\";\nimport \"envoy/api/v2/core/http_uri.proto\";\nimport \"envoy/type/percent.proto\";\nimport \"envoy/type/semantic_version.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/core/socket_option.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"BaseProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common types]\n\n// Envoy supports :ref:`upstream priority routing\n// <arch_overview_http_routing_priority>` both at the route and the virtual\n// cluster level. The current priority implementation uses different connection\n// pool and circuit breaking settings for each priority level. This means that\n// even for HTTP/2 requests, two physical connections will be used to an\n// upstream host. In the future Envoy will likely support true HTTP/2 priority\n// over a single upstream connection.\nenum RoutingPriority {\n  DEFAULT = 0;\n  HIGH = 1;\n}\n\n// HTTP request method.\nenum RequestMethod {\n  METHOD_UNSPECIFIED = 0;\n  GET = 1;\n  HEAD = 2;\n  POST = 3;\n  PUT = 4;\n  DELETE = 5;\n  CONNECT = 6;\n  OPTIONS = 7;\n  TRACE = 8;\n  PATCH = 9;\n}\n\n// Identifies the direction of the traffic relative to the local Envoy.\nenum TrafficDirection {\n  // Default option is unspecified.\n  UNSPECIFIED = 0;\n\n  // The transport is used for incoming traffic.\n  INBOUND = 1;\n\n  // The transport is used for outgoing traffic.\n  OUTBOUND = 2;\n}\n\n// Identifies location of where either Envoy runs or where upstream hosts run.\nmessage Locality {\n  // Region this :ref:`zone <envoy_api_field_core.Locality.zone>` belongs to.\n  string region = 1;\n\n  // Defines the local service zone where Envoy is running. Though optional, it\n  // should be set if discovery service routing is used and the discovery\n  // service exposes :ref:`zone data <envoy_api_field_endpoint.LocalityLbEndpoints.locality>`,\n  // either in this message or via :option:`--service-zone`. The meaning of zone\n  // is context dependent, e.g. `Availability Zone (AZ)\n  // <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html>`_\n  // on AWS, `Zone <https://cloud.google.com/compute/docs/regions-zones/>`_ on\n  // GCP, etc.\n  string zone = 2;\n\n  // When used for locality of upstream hosts, this field further splits zone\n  // into smaller chunks of sub-zones so they can be load balanced\n  // independently.\n  string sub_zone = 3;\n}\n\n// BuildVersion combines SemVer version of extension with free-form build information\n// (i.e. 'alpha', 'private-build') as a set of strings.\nmessage BuildVersion {\n  // SemVer version of extension.\n  type.SemanticVersion version = 1;\n\n  // Free-form build information.\n  // Envoy defines several well known keys in the source/common/version/version.h file\n  google.protobuf.Struct metadata = 2;\n}\n\n// Version and identification for an Envoy extension.\n// [#next-free-field: 6]\nmessage Extension {\n  // This is the name of the Envoy filter as specified in the Envoy\n  // configuration, e.g. envoy.filters.http.router, com.acme.widget.\n  string name = 1;\n\n  // Category of the extension.\n  // Extension category names use reverse DNS notation. For instance \"envoy.filters.listener\"\n  // for Envoy's built-in listener filters or \"com.acme.filters.http\" for HTTP filters from\n  // acme.com vendor.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]\n  string category = 2;\n\n  // [#not-implemented-hide:] Type descriptor of extension configuration proto.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]\n  // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]\n  string type_descriptor = 3;\n\n  // The version is a property of the extension and maintained independently\n  // of other extensions and the Envoy API.\n  // This field is not set when extension did not provide version information.\n  BuildVersion version = 4;\n\n  // Indicates that the extension is present but was disabled via dynamic configuration.\n  bool disabled = 5;\n}\n\n// Identifies a specific Envoy instance. The node identifier is presented to the\n// management server, which may use this identifier to distinguish per Envoy\n// configuration for serving.\n// [#next-free-field: 12]\nmessage Node {\n  // An opaque node identifier for the Envoy node. This also provides the local\n  // service node name. It should be set if any of the following features are\n  // used: :ref:`statsd <arch_overview_statistics>`, :ref:`CDS\n  // <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-node`.\n  string id = 1;\n\n  // Defines the local service cluster name where Envoy is running. Though\n  // optional, it should be set if any of the following features are used:\n  // :ref:`statsd <arch_overview_statistics>`, :ref:`health check cluster\n  // verification\n  // <envoy_api_field_core.HealthCheck.HttpHealthCheck.service_name_matcher>`,\n  // :ref:`runtime override directory <envoy_api_msg_config.bootstrap.v2.Runtime>`,\n  // :ref:`user agent addition\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.add_user_agent>`,\n  // :ref:`HTTP global rate limiting <config_http_filters_rate_limit>`,\n  // :ref:`CDS <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-cluster`.\n  string cluster = 2;\n\n  // Opaque metadata extending the node identifier. Envoy will pass this\n  // directly to the management server.\n  google.protobuf.Struct metadata = 3;\n\n  // Locality specifying where the Envoy instance is running.\n  Locality locality = 4;\n\n  // This is motivated by informing a management server during canary which\n  // version of Envoy is being tested in a heterogeneous fleet. This will be set\n  // by Envoy in management server RPCs.\n  // This field is deprecated in favor of the user_agent_name and user_agent_version values.\n  string build_version = 5 [deprecated = true];\n\n  // Free-form string that identifies the entity requesting config.\n  // E.g. \"envoy\" or \"grpc\"\n  string user_agent_name = 6;\n\n  oneof user_agent_version_type {\n    // Free-form string that identifies the version of the entity requesting config.\n    // E.g. \"1.12.2\" or \"abcd1234\", or \"SpecialEnvoyBuild\"\n    string user_agent_version = 7;\n\n    // Structured version of the entity requesting config.\n    BuildVersion user_agent_build_version = 8;\n  }\n\n  // List of extensions and their versions supported by the node.\n  repeated Extension extensions = 9;\n\n  // Client feature support list. These are well known features described\n  // in the Envoy API repository for a given major version of an API. Client features\n  // use reverse DNS naming scheme, for example `com.acme.feature`.\n  // See :ref:`the list of features <client_features>` that xDS client may\n  // support.\n  repeated string client_features = 10;\n\n  // Known listening ports on the node as a generic hint to the management server\n  // for filtering :ref:`listeners <config_listeners>` to be returned. For example,\n  // if there is a listener bound to port 80, the list can optionally contain the\n  // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint.\n  repeated Address listening_addresses = 11;\n}\n\n// Metadata provides additional inputs to filters based on matched listeners,\n// filter chains, routes and endpoints. It is structured as a map, usually from\n// filter name (in reverse DNS format) to metadata specific to the filter. Metadata\n// key-values for a filter are merged as connection and request handling occurs,\n// with later values for the same key overriding earlier values.\n//\n// An example use of metadata is providing additional values to\n// http_connection_manager in the envoy.http_connection_manager.access_log\n// namespace.\n//\n// Another example use of metadata is to per service config info in cluster metadata, which may get\n// consumed by multiple filters.\n//\n// For load balancing, Metadata provides a means to subset cluster endpoints.\n// Endpoints have a Metadata object associated and routes contain a Metadata\n// object to match against. There are some well defined metadata used today for\n// this purpose:\n//\n// * ``{\"envoy.lb\": {\"canary\": <bool> }}`` This indicates the canary status of an\n//   endpoint and is also used during header processing\n//   (x-envoy-upstream-canary) and for stats purposes.\n// [#next-major-version: move to type/metadata/v2]\nmessage Metadata {\n  // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*\n  // namespace is reserved for Envoy's built-in filters.\n  map<string, google.protobuf.Struct> filter_metadata = 1;\n}\n\n// Runtime derived uint32 with a default when not specified.\nmessage RuntimeUInt32 {\n  // Default value if runtime value is not available.\n  uint32 default_value = 2;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// Runtime derived double with a default when not specified.\nmessage RuntimeDouble {\n  // Default value if runtime value is not available.\n  double default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// Runtime derived bool with a default when not specified.\nmessage RuntimeFeatureFlag {\n  // Default value if runtime value is not available.\n  google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key to get value for comparison. This value is used if defined. The boolean value must\n  // be represented via its\n  // `canonical JSON encoding <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n  string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// Header name/value pair.\nmessage HeaderValue {\n  // Header name.\n  string key = 1\n      [(validate.rules).string =\n           {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Header value.\n  //\n  // The same :ref:`format specifier <config_access_log_format>` as used for\n  // :ref:`HTTP access logging <config_access_log>` applies here, however\n  // unknown header values are replaced with the empty string instead of `-`.\n  string value = 2 [\n    (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}\n  ];\n}\n\n// Header name/value pair plus option to control append behavior.\nmessage HeaderValueOption {\n  // Header name/value pair that this option applies to.\n  HeaderValue header = 1 [(validate.rules).message = {required: true}];\n\n  // Should the value be appended? If true (default), the value is appended to\n  // existing values.\n  google.protobuf.BoolValue append = 2;\n}\n\n// Wrapper for a set of headers.\nmessage HeaderMap {\n  repeated HeaderValue headers = 1;\n}\n\n// Data source consisting of either a file or an inline value.\nmessage DataSource {\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local filesystem data source.\n    string filename = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Bytes inlined in the configuration.\n    bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];\n\n    // String inlined in the configuration.\n    string inline_string = 3 [(validate.rules).string = {min_bytes: 1}];\n  }\n}\n\n// The message specifies the retry policy of remote data source when fetching fails.\nmessage RetryPolicy {\n  // Specifies parameters that control :ref:`retry backoff strategy <envoy_api_msg_core.BackoffStrategy>`.\n  // This parameter is optional, in which case the default base interval is 1000 milliseconds. The\n  // default maximum interval is 10 times the base interval.\n  BackoffStrategy retry_back_off = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1.\n  google.protobuf.UInt32Value num_retries = 2;\n}\n\n// The message specifies how to fetch data from remote and how to verify it.\nmessage RemoteDataSource {\n  // The HTTP URI to fetch the remote data.\n  HttpUri http_uri = 1 [(validate.rules).message = {required: true}];\n\n  // SHA256 string for verifying data.\n  string sha256 = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Retry policy for fetching remote data.\n  RetryPolicy retry_policy = 3;\n}\n\n// Async data source which support async data fetch.\nmessage AsyncDataSource {\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local async data source.\n    DataSource local = 1;\n\n    // Remote async data source.\n    RemoteDataSource remote = 2;\n  }\n}\n\n// Configuration for transport socket in :ref:`listeners <config_listeners>` and\n// :ref:`clusters <envoy_api_msg_Cluster>`. If the configuration is\n// empty, a default transport socket implementation and configuration will be\n// chosen based on the platform and existence of tls_context.\nmessage TransportSocket {\n  // The name of the transport socket to instantiate. The name must match a supported transport\n  // socket implementation.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Implementation specific configuration which depends on the implementation being instantiated.\n  // See the supported transport socket implementations for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not\n// specified via a runtime key.\n//\n// .. note::\n//\n//   Parsing of the runtime key's data is implemented such that it may be represented as a\n//   :ref:`FractionalPercent <envoy_api_msg_type.FractionalPercent>` proto represented as JSON/YAML\n//   and may also be represented as an integer with the assumption that the value is an integral\n//   percentage out of 100. For instance, a runtime key lookup returning the value \"42\" would parse\n//   as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.\nmessage RuntimeFractionalPercent {\n  // Default value if the runtime value's for the numerator/denominator keys are not available.\n  type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key for a YAML representation of a FractionalPercent.\n  string runtime_key = 2;\n}\n\n// Identifies a specific ControlPlane instance that Envoy is connected to.\nmessage ControlPlane {\n  // An opaque control plane identifier that uniquely identifies an instance\n  // of control plane. This can be used to identify which control plane instance,\n  // the Envoy is connected to.\n  string identifier = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/config_source.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"ConfigSourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Configuration sources]\n\n// xDS API version. This is used to describe both resource and transport\n// protocol versions (in distinct configuration fields).\nenum ApiVersion {\n  // When not specified, we assume v2, to ease migration to Envoy's stable API\n  // versioning. If a client does not support v2 (e.g. due to deprecation), this\n  // is an invalid value.\n  AUTO = 0;\n\n  // Use xDS v2 API.\n  V2 = 1;\n\n  // Use xDS v3 API.\n  V3 = 2;\n}\n\n// API configuration source. This identifies the API type and cluster that Envoy\n// will use to fetch an xDS API.\n// [#next-free-field: 9]\nmessage ApiConfigSource {\n  // APIs may be fetched via either REST or gRPC.\n  enum ApiType {\n    // Ideally this would be 'reserved 0' but one can't reserve the default\n    // value. Instead we throw an exception if this is ever used.\n    UNSUPPORTED_REST_LEGACY = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // REST-JSON v2 API. The `canonical JSON encoding\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_ for\n    // the v2 protos is used.\n    REST = 1;\n\n    // gRPC v2 API.\n    GRPC = 2;\n\n    // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}\n    // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state\n    // with every update, the xDS server only sends what has changed since the last update.\n    DELTA_GRPC = 3;\n  }\n\n  // API type (gRPC, REST, delta gRPC)\n  ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}];\n\n  // Cluster names should be used only with REST. If > 1\n  // cluster is defined, clusters will be cycled through if any kind of failure\n  // occurs.\n  //\n  // .. note::\n  //\n  //  The cluster with name ``cluster_name`` must be statically defined and its\n  //  type must not be ``EDS``.\n  repeated string cluster_names = 2;\n\n  // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,\n  // services will be cycled through if any kind of failure occurs.\n  repeated GrpcService grpc_services = 4;\n\n  // For REST APIs, the delay between successive polls.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // For REST APIs, the request timeout. If not set, a default value of 1s will be used.\n  google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}];\n\n  // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be\n  // rate limited.\n  RateLimitSettings rate_limit_settings = 6;\n\n  // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.\n  bool set_node_on_first_message_only = 7;\n}\n\n// Aggregated Discovery Service (ADS) options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` can be used to\n// specify that ADS is to be used.\nmessage AggregatedConfigSource {\n}\n\n// [#not-implemented-hide:]\n// Self-referencing config source options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` can be used to\n// specify that other data can be obtained from the same server.\nmessage SelfConfigSource {\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Rate Limit settings to be applied for discovery requests made by Envoy.\nmessage RateLimitSettings {\n  // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a\n  // default value of 100 will be used.\n  google.protobuf.UInt32Value max_tokens = 1;\n\n  // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens\n  // per second will be used.\n  google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];\n}\n\n// Configuration for :ref:`listeners <config_listeners>`, :ref:`clusters\n// <config_cluster_manager>`, :ref:`routes\n// <envoy_api_msg_RouteConfiguration>`, :ref:`endpoints\n// <arch_overview_service_discovery>` etc. may either be sourced from the\n// filesystem or from an xDS API source. Filesystem configs are watched with\n// inotify for updates.\n// [#next-free-field: 7]\nmessage ConfigSource {\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Path on the filesystem to source and watch for configuration updates.\n    // When sourcing configuration for :ref:`secret <envoy_api_msg_auth.Secret>`,\n    // the certificate and key files are also watched for updates.\n    //\n    // .. note::\n    //\n    //  The path to the source must exist at config load time.\n    //\n    // .. note::\n    //\n    //   Envoy will only watch the file path for *moves.* This is because in general only moves\n    //   are atomic. The same method of swapping files as is demonstrated in the\n    //   :ref:`runtime documentation <config_runtime_symbolic_link_swap>` can be used here also.\n    string path = 1;\n\n    // API configuration source.\n    ApiConfigSource api_config_source = 2;\n\n    // When set, ADS will be used to fetch resources. The ADS API configuration\n    // source in the bootstrap configuration is used.\n    AggregatedConfigSource ads = 3;\n\n    // [#not-implemented-hide:]\n    // When set, the client will access the resources from the same server it got the\n    // ConfigSource from, although not necessarily from the same stream. This is similar to the\n    // :ref:`ads<envoy_api_field.ConfigSource.ads>` field, except that the client may use a\n    // different stream to the same server. As a result, this field can be used for things\n    // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)\n    // LDS to RDS on the same server without requiring the management server to know its name\n    // or required credentials.\n    // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since\n    // this field can implicitly mean to use the same stream in the case where the ConfigSource\n    // is provided via ADS and the specified data can also be obtained via ADS.]\n    SelfConfigSource self = 5;\n  }\n\n  // When this timeout is specified, Envoy will wait no longer than the specified time for first\n  // config response on this xDS subscription during the :ref:`initialization process\n  // <arch_overview_initialization>`. After reaching the timeout, Envoy will move to the next\n  // initialization phase, even if the first config is not delivered yet. The timer is activated\n  // when the xDS API subscription starts, and is disarmed on first config update or on error. 0\n  // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another\n  // timeout applies). The default is 15s.\n  google.protobuf.Duration initial_fetch_timeout = 4;\n\n  // API version for xDS resources. This implies the type URLs that the client\n  // will request for resources and the resource type that the client will in\n  // turn expect to be delivered.\n  ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/event_service_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"EventServiceConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#not-implemented-hide:]\n// Configuration of the event reporting service endpoint.\nmessage EventServiceConfig {\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Specifies the gRPC service that hosts the event reporting service.\n    GrpcService grpc_service = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"GrpcMethodListProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC method list]\n\n// A list of gRPC methods which can be used as an allowlist, for example.\nmessage GrpcMethodList {\n  message Service {\n    // The name of the gRPC service.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // The names of the gRPC methods in this service.\n    repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  repeated Service services = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/grpc_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"GrpcServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC services]\n\n// gRPC service configuration. This is used by :ref:`ApiConfigSource\n// <envoy_api_msg_core.ApiConfigSource>` and filter configurations.\n// [#next-free-field: 6]\nmessage GrpcService {\n  message EnvoyGrpc {\n    // The name of the upstream gRPC cluster. SSL credentials will be supplied\n    // in the :ref:`Cluster <envoy_api_msg_Cluster>` :ref:`transport_socket\n    // <envoy_api_field_Cluster.transport_socket>`.\n    string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // [#next-free-field: 7]\n  message GoogleGrpc {\n    // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.\n    message SslCredentials {\n      // PEM encoded server root certificates.\n      DataSource root_certs = 1;\n\n      // PEM encoded client private key.\n      DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n      // PEM encoded client certificate chain.\n      DataSource cert_chain = 3;\n    }\n\n    // Local channel credentials. Only UDS is supported for now.\n    // See https://github.com/grpc/grpc/pull/15909.\n    message GoogleLocalCredentials {\n    }\n\n    // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call\n    // credential types.\n    message ChannelCredentials {\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        SslCredentials ssl_credentials = 1;\n\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_default = 2;\n\n        GoogleLocalCredentials local_credentials = 3;\n      }\n    }\n\n    // [#next-free-field: 8]\n    message CallCredentials {\n      message ServiceAccountJWTAccessCredentials {\n        string json_key = 1;\n\n        uint64 token_lifetime_seconds = 2;\n      }\n\n      message GoogleIAMCredentials {\n        string authorization_token = 1;\n\n        string authority_selector = 2;\n      }\n\n      message MetadataCredentialsFromPlugin {\n        string name = 1;\n\n        oneof config_type {\n          google.protobuf.Struct config = 2 [deprecated = true];\n\n          google.protobuf.Any typed_config = 3;\n        }\n      }\n\n      // Security token service configuration that allows Google gRPC to\n      // fetch security token from an OAuth 2.0 authorization server.\n      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and\n      // https://github.com/grpc/grpc/pull/19587.\n      // [#next-free-field: 10]\n      message StsService {\n        // URI of the token exchange service that handles token exchange requests.\n        // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by\n        // https://github.com/envoyproxy/protoc-gen-validate/issues/303]\n        string token_exchange_service_uri = 1;\n\n        // Location of the target service or resource where the client\n        // intends to use the requested security token.\n        string resource = 2;\n\n        // Logical name of the target service where the client intends to\n        // use the requested security token.\n        string audience = 3;\n\n        // The desired scope of the requested security token in the\n        // context of the service or resource where the token will be used.\n        string scope = 4;\n\n        // Type of the requested security token.\n        string requested_token_type = 5;\n\n        // The path of subject token, a security token that represents the\n        // identity of the party on behalf of whom the request is being made.\n        string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}];\n\n        // Type of the subject token.\n        string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}];\n\n        // The path of actor token, a security token that represents the identity\n        // of the acting party. The acting party is authorized to use the\n        // requested security token and act on behalf of the subject.\n        string actor_token_path = 8;\n\n        // Type of the actor token.\n        string actor_token_type = 9;\n      }\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        // Access token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.\n        string access_token = 1;\n\n        // Google Compute Engine credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_compute_engine = 2;\n\n        // Google refresh token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.\n        string google_refresh_token = 3;\n\n        // Service Account JWT Access credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.\n        ServiceAccountJWTAccessCredentials service_account_jwt_access = 4;\n\n        // Google IAM credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.\n        GoogleIAMCredentials google_iam = 5;\n\n        // Custom authenticator credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.\n        // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.\n        MetadataCredentialsFromPlugin from_plugin = 6;\n\n        // Custom security token service which implements OAuth 2.0 token exchange.\n        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16\n        // See https://github.com/grpc/grpc/pull/19587.\n        StsService sts_service = 7;\n      }\n    }\n\n    // The target URI when using the `Google C++ gRPC client\n    // <https://github.com/grpc/grpc>`_. SSL credentials will be supplied in\n    // :ref:`channel_credentials <envoy_api_field_core.GrpcService.GoogleGrpc.channel_credentials>`.\n    string target_uri = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    ChannelCredentials channel_credentials = 2;\n\n    // A set of call credentials that can be composed with `channel credentials\n    // <https://grpc.io/docs/guides/auth.html#credential-types>`_.\n    repeated CallCredentials call_credentials = 3;\n\n    // The human readable prefix to use when emitting statistics for the gRPC\n    // service.\n    //\n    // .. csv-table::\n    //    :header: Name, Type, Description\n    //    :widths: 1, 1, 2\n    //\n    //    streams_total, Counter, Total number of streams opened\n    //    streams_closed_<gRPC status code>, Counter, Total streams closed with <gRPC status code>\n    string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}];\n\n    // The name of the Google gRPC credentials factory to use. This must have been registered with\n    // Envoy. If this is empty, a default credentials factory will be used that sets up channel\n    // credentials based on other configuration parameters.\n    string credentials_factory_name = 5;\n\n    // Additional configuration for site-specific customizations of the Google\n    // gRPC library.\n    google.protobuf.Struct config = 6;\n  }\n\n  reserved 4;\n\n  oneof target_specifier {\n    option (validate.required) = true;\n\n    // Envoy's in-built gRPC client.\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    EnvoyGrpc envoy_grpc = 1;\n\n    // `Google C++ gRPC client <https://github.com/grpc/grpc>`_\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    GoogleGrpc google_grpc = 2;\n  }\n\n  // The timeout for the gRPC request. This is the timeout for a specific\n  // request.\n  google.protobuf.Duration timeout = 3;\n\n  // Additional metadata to include in streams initiated to the GrpcService.\n  // This can be used for scenarios in which additional ad hoc authorization\n  // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.\n  repeated HeaderValue initial_metadata = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/event_service_config.proto\";\nimport \"envoy/type/http.proto\";\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health check]\n// * Health checking :ref:`architecture overview <arch_overview_health_checking>`.\n// * If health checking is configured for a cluster, additional statistics are emitted. They are\n//   documented :ref:`here <config_cluster_manager_cluster_stats>`.\n\n// Endpoint health status.\nenum HealthStatus {\n  // The health status is not known. This is interpreted by Envoy as *HEALTHY*.\n  UNKNOWN = 0;\n\n  // Healthy.\n  HEALTHY = 1;\n\n  // Unhealthy.\n  UNHEALTHY = 2;\n\n  // Connection draining in progress. E.g.,\n  // `<https://aws.amazon.com/blogs/aws/elb-connection-draining-remove-instances-from-service-with-care/>`_\n  // or\n  // `<https://cloud.google.com/compute/docs/load-balancing/enabling-connection-draining>`_.\n  // This is interpreted by Envoy as *UNHEALTHY*.\n  DRAINING = 3;\n\n  // Health check timed out. This is part of HDS and is interpreted by Envoy as\n  // *UNHEALTHY*.\n  TIMEOUT = 4;\n\n  // Degraded.\n  DEGRADED = 5;\n}\n\n// [#next-free-field: 23]\nmessage HealthCheck {\n  // Describes the encoding of the payload bytes in the payload.\n  message Payload {\n    oneof payload {\n      option (validate.required) = true;\n\n      // Hex encoded payload. E.g., \"000000FF\".\n      string text = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // [#not-implemented-hide:] Binary payload.\n      bytes binary = 2;\n    }\n  }\n\n  // [#next-free-field: 12]\n  message HttpHealthCheck {\n    // The value of the host header in the HTTP health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The host header can be customized for a specific endpoint by setting the\n    // :ref:`hostname <envoy_api_field_endpoint.Endpoint.HealthCheckConfig.hostname>` field.\n    string host = 1;\n\n    // Specifies the HTTP path that will be requested during health checking. For example\n    // */healthcheck*.\n    string path = 2 [(validate.rules).string = {min_bytes: 1}];\n\n    // [#not-implemented-hide:] HTTP specific payload.\n    Payload send = 3;\n\n    // [#not-implemented-hide:] HTTP specific response.\n    Payload receive = 4;\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    //\n    // .. attention::\n    //\n    //   This field has been deprecated in favor of `service_name_matcher` for better flexibility\n    //   over matching with service-cluster name.\n    string service_name = 5 [deprecated = true];\n\n    // Specifies a list of HTTP headers that should be added to each request that is sent to the\n    // health checked cluster. For more information, including details on header value syntax, see\n    // the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated HeaderValueOption request_headers_to_add = 6\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request that is sent to the\n    // health checked cluster.\n    repeated string request_headers_to_remove = 8;\n\n    // If set, health checks will be made using http/2.\n    // Deprecated, use :ref:`codec_client_type\n    // <envoy_api_field_core.HealthCheck.HttpHealthCheck.codec_client_type>` instead.\n    bool use_http2 = 7 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default\n    // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open\n    // semantics of :ref:`Int64Range <envoy_api_msg_type.Int64Range>`. The start and end of each\n    // range are required. Only statuses in the range [100, 600) are allowed.\n    repeated type.Int64Range expected_statuses = 9;\n\n    // Use specified application protocol for health checks.\n    type.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}];\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster using a :ref:`StringMatcher\n    // <envoy_api_msg_type.matcher.StringMatcher>`. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    type.matcher.StringMatcher service_name_matcher = 11;\n  }\n\n  message TcpHealthCheck {\n    // Empty payloads imply a connect-only health check.\n    Payload send = 1;\n\n    // When checking the response, “fuzzy” matching is performed such that each\n    // binary block must be found, and in the order specified, but not\n    // necessarily contiguous.\n    repeated Payload receive = 2;\n  }\n\n  message RedisHealthCheck {\n    // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n    // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n    // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n    // by setting the specified key to any value and waiting for traffic to drain.\n    string key = 1;\n  }\n\n  // `grpc.health.v1.Health\n  // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto>`_-based\n  // healthcheck. See `gRPC doc <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_\n  // for details.\n  message GrpcHealthCheck {\n    // An optional service name parameter which will be sent to gRPC service in\n    // `grpc.health.v1.HealthCheckRequest\n    // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto#L20>`_.\n    // message. See `gRPC health-checking overview\n    // <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_ for more information.\n    string service_name = 1;\n\n    // The value of the :authority header in the gRPC health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The authority header can be customized for a specific endpoint by setting\n    // the :ref:`hostname <envoy_api_field_endpoint.Endpoint.HealthCheckConfig.hostname>` field.\n    string authority = 2;\n  }\n\n  // Custom health check.\n  message CustomHealthCheck {\n    // The registered name of the custom health checker.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // A custom health checker specific configuration which depends on the custom health checker\n    // being instantiated. See :api:`envoy/config/health_checker` for reference.\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Health checks occur over the transport socket specified for the cluster. This implies that if a\n  // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.\n  //\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  message TlsOptions {\n    // Specifies the ALPN protocols for health check connections. This is useful if the\n    // corresponding upstream is using ALPN-based :ref:`FilterChainMatch\n    // <envoy_api_msg_listener.FilterChainMatch>` along with different protocols for health checks\n    // versus data connections. If empty, no ALPN protocols will be set on health check connections.\n    repeated string alpn_protocols = 1;\n  }\n\n  reserved 10;\n\n  // The time to wait for a health check response. If the timeout is reached the\n  // health check attempt will be considered a failure.\n  google.protobuf.Duration timeout = 1 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // The interval between health checks.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // An optional jitter amount in milliseconds. If specified, Envoy will start health\n  // checking after for a random time in ms between 0 and initial_jitter. This only\n  // applies to the first health check.\n  google.protobuf.Duration initial_jitter = 20;\n\n  // An optional jitter amount in milliseconds. If specified, during every\n  // interval Envoy will add interval_jitter to the wait time.\n  google.protobuf.Duration interval_jitter = 3;\n\n  // An optional jitter amount as a percentage of interval_ms. If specified,\n  // during every interval Envoy will add interval_ms *\n  // interval_jitter_percent / 100 to the wait time.\n  //\n  // If interval_jitter_ms and interval_jitter_percent are both set, both of\n  // them will be used to increase the wait time.\n  uint32 interval_jitter_percent = 18;\n\n  // The number of unhealthy health checks required before a host is marked\n  // unhealthy. Note that for *http* health checking if a host responds with 503\n  // this threshold is ignored and the host is considered unhealthy immediately.\n  google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}];\n\n  // The number of healthy health checks required before a host is marked\n  // healthy. Note that during startup, only a single successful health check is\n  // required to mark a host healthy.\n  google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Non-serving port for health checking.\n  google.protobuf.UInt32Value alt_port = 6;\n\n  // Reuse health check connection between health checks. Default is true.\n  google.protobuf.BoolValue reuse_connection = 7;\n\n  oneof health_checker {\n    option (validate.required) = true;\n\n    // HTTP health check.\n    HttpHealthCheck http_health_check = 8;\n\n    // TCP health check.\n    TcpHealthCheck tcp_health_check = 9;\n\n    // gRPC health check.\n    GrpcHealthCheck grpc_health_check = 11;\n\n    // Custom health check.\n    CustomHealthCheck custom_health_check = 13;\n  }\n\n  // The \"no traffic interval\" is a special health check interval that is used when a cluster has\n  // never had traffic routed to it. This lower interval allows cluster information to be kept up to\n  // date, without sending a potentially large amount of active health checking traffic for no\n  // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the\n  // standard health check interval that is defined. Note that this interval takes precedence over\n  // any other.\n  //\n  // The default value for \"no traffic interval\" is 60 seconds.\n  google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy interval\" is a health check interval that is used for hosts that are marked as\n  // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the\n  // standard health check interval that is defined.\n  //\n  // The default value for \"unhealthy interval\" is the same as \"interval\".\n  google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as unhealthy. For subsequent health checks\n  // Envoy will shift back to using either \"unhealthy interval\" if present or the standard health\n  // check interval that is defined.\n  //\n  // The default value for \"unhealthy edge interval\" is the same as \"unhealthy interval\".\n  google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}];\n\n  // The \"healthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as healthy. For subsequent health checks\n  // Envoy will shift back to using the standard health check interval that is defined.\n  //\n  // The default value for \"healthy edge interval\" is the same as the default interval.\n  google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];\n\n  // Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.\n  // If empty, no event log will be written.\n  string event_log_path = 17;\n\n  // [#not-implemented-hide:]\n  // The gRPC service for the health check event service.\n  // If empty, health check events won't be sent to a remote endpoint.\n  EventServiceConfig event_service = 22;\n\n  // If set to true, health check failure events will always be logged. If set to false, only the\n  // initial health check failure event will be logged.\n  // The default value is false.\n  bool always_log_health_check_failures = 19;\n\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  TlsOptions tls_options = 21;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/http_uri.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"HttpUriProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP Service URI ]\n\n// Envoy external URI descriptor\nmessage HttpUri {\n  // The HTTP server URI. It should be a full FQDN with protocol, host and path.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    uri: https://www.googleapis.com/oauth2/v1/certs\n  //\n  string uri = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Specify how `uri` is to be fetched. Today, this requires an explicit\n  // cluster, but in the future we may support dynamic cluster creation or\n  // inline DNS resolution. See `issue\n  // <https://github.com/envoyproxy/envoy/issues/1606>`_.\n  oneof http_upstream_type {\n    option (validate.required) = true;\n\n    // A cluster is created in the Envoy \"cluster_manager\" config\n    // section. This field specifies the cluster name.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    cluster: jwks_cluster\n    //\n    string cluster = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Sets the maximum duration in milliseconds that a response can take to arrive upon request.\n  google.protobuf.Duration timeout = 3 [(validate.rules).duration = {\n    required: true\n    gte {}\n  }];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"ProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Protocol options]\n\n// [#not-implemented-hide:]\nmessage TcpProtocolOptions {\n}\n\nmessage UpstreamHttpProtocolOptions {\n  // Set transport socket `SNI <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ for new\n  // upstream connections based on the downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  bool auto_sni = 1;\n\n  // Automatic validate upstream presented certificate for new upstream connections based on the\n  // downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  // This field is intended to set with `auto_sni` field.\n  bool auto_san_validation = 2;\n}\n\n// [#next-free-field: 6]\nmessage HttpProtocolOptions {\n  // Action to take when Envoy receives client request with header names containing underscore\n  // characters.\n  // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented\n  // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore\n  // characters.\n  enum HeadersWithUnderscoresAction {\n    // Allow headers with underscores. This is the default behavior.\n    ALLOW = 0;\n\n    // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests\n    // end with the stream reset. The \"httpN.requests_rejected_with_underscores_in_headers\" counter\n    // is incremented for each rejected request.\n    REJECT_REQUEST = 1;\n\n    // Drop the header with name containing underscores. The header is dropped before the filter chain is\n    // invoked and as such filters will not see dropped headers. The\n    // \"httpN.dropped_headers_with_underscores\" is incremented for each dropped header.\n    DROP_HEADER = 2;\n  }\n\n  // The idle timeout for connections. The idle timeout is defined as the\n  // period in which there are no active requests. When the\n  // idle timeout is reached the connection will be closed. If the connection is an HTTP/2\n  // downstream connection a drain sequence will occur prior to closing the connection, see\n  // :ref:`drain_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.drain_timeout>`.\n  // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.\n  // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 1;\n\n  // The maximum duration of a connection. The duration is defined as a period since a connection\n  // was established. If not set, there is no max duration. When max_connection_duration is reached\n  // the connection will be closed. Drain sequence will occur prior to closing the connection if\n  // if's applicable. See :ref:`drain_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.drain_timeout>`.\n  // Note: not implemented for upstream connections.\n  google.protobuf.Duration max_connection_duration = 3;\n\n  // The maximum number of headers. If unconfigured, the default\n  // maximum number of request headers allowed is 100. Requests that exceed this limit will receive\n  // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.\n  google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}];\n\n  // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be\n  // reset independent of any other timeouts. If not specified, this value is not set.\n  google.protobuf.Duration max_stream_duration = 4;\n\n  // Action to take when a client request with a header name containing underscore characters is received.\n  // If this setting is not specified, the value defaults to ALLOW.\n  // Note: upstream responses are not affected by this setting.\n  HeadersWithUnderscoresAction headers_with_underscores_action = 5;\n}\n\n// [#next-free-field: 6]\nmessage Http1ProtocolOptions {\n  message HeaderKeyFormat {\n    message ProperCaseWords {\n    }\n\n    oneof header_format {\n      option (validate.required) = true;\n\n      // Formats the header by proper casing words: the first character and any character following\n      // a special character will be capitalized if it's an alpha character. For example,\n      // \"content-type\" becomes \"Content-Type\", and \"foo$b#$are\" becomes \"Foo$B#$Are\".\n      // Note that while this results in most headers following conventional casing, certain headers\n      // are not covered. For example, the \"TE\" header will be formatted as \"Te\".\n      ProperCaseWords proper_case_words = 1;\n    }\n  }\n\n  // Handle HTTP requests with absolute URLs in the requests. These requests\n  // are generally sent by clients to forward/explicit proxies. This allows clients to configure\n  // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the\n  // *http_proxy* environment variable.\n  google.protobuf.BoolValue allow_absolute_url = 1;\n\n  // Handle incoming HTTP/1.0 and HTTP 0.9 requests.\n  // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1\n  // style connect logic, dechunking, and handling lack of client host iff\n  // *default_host_for_http_10* is configured.\n  bool accept_http_10 = 2;\n\n  // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as\n  // Envoy does not otherwise support HTTP/1.0 without a Host header.\n  // This is a no-op if *accept_http_10* is not true.\n  string default_host_for_http_10 = 3;\n\n  // Describes how the keys for response headers should be formatted. By default, all header keys\n  // are lower cased.\n  HeaderKeyFormat header_key_format = 4;\n\n  // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.\n  //\n  // .. attention::\n  //\n  //   Note that this only happens when Envoy is chunk encoding which occurs when:\n  //   - The request is HTTP/1.1.\n  //   - Is neither a HEAD only request nor a HTTP Upgrade.\n  //   - Not a response to a HEAD request.\n  //   - The content length header is not present.\n  bool enable_trailers = 5;\n}\n\n// [#next-free-field: 14]\nmessage Http2ProtocolOptions {\n  // Defines a parameter to be sent in the SETTINGS frame.\n  // See `RFC7540, sec. 6.5.1 <https://tools.ietf.org/html/rfc7540#section-6.5.1>`_ for details.\n  message SettingsParameter {\n    // The 16 bit parameter identifier.\n    google.protobuf.UInt32Value identifier = 1 [\n      (validate.rules).uint32 = {lte: 65536 gte: 1},\n      (validate.rules).message = {required: true}\n    ];\n\n    // The 32 bit parameter value.\n    google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_\n  // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values\n  // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header\n  // compression.\n  google.protobuf.UInt32Value hpack_table_size = 1;\n\n  // `Maximum concurrent streams <https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2>`_\n  // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)\n  // and defaults to 2147483647.\n  //\n  // For upstream connections, this also limits how many streams Envoy will initiate concurrently\n  // on a single connection. If the limit is reached, Envoy may queue requests or establish\n  // additional connections (as allowed per circuit breaker limits).\n  google.protobuf.UInt32Value max_concurrent_streams = 2\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 1}];\n\n  // `Initial stream-level flow-control window\n  // <https://httpwg.org/specs/rfc7540.html#rfc.section.6.9.2>`_ size. Valid values range from 65535\n  // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456\n  // (256 * 1024 * 1024).\n  //\n  // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default\n  // window size now, so it's also the minimum.\n  //\n  // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the\n  // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to\n  // stop the flow of data to the codec buffers.\n  google.protobuf.UInt32Value initial_stream_window_size = 3\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Similar to *initial_stream_window_size*, but for connection-level flow-control\n  // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.\n  google.protobuf.UInt32Value initial_connection_window_size = 4\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Allows proxying Websocket and other upgrades over H2 connect.\n  bool allow_connect = 5;\n\n  // [#not-implemented-hide:] Hiding until envoy has full metadata support.\n  // Still under implementation. DO NOT USE.\n  //\n  // Allows metadata. See [metadata\n  // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more\n  // information.\n  bool allow_metadata = 6;\n\n  // Limit the number of pending outbound downstream frames of all types (frames that are waiting to\n  // be written into the socket). Exceeding this limit triggers flood mitigation and connection is\n  // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due\n  // to flood mitigation. The default limit is 10000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,\n  // preventing high memory utilization when receiving continuous stream of these frames. Exceeding\n  // this limit triggers flood mitigation and connection is terminated. The\n  // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood\n  // mitigation. The default limit is 1000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an\n  // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but\n  // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``\n  // stat tracks the number of connections terminated due to flood mitigation.\n  // Setting this to 0 will terminate connection upon receiving first frame with an empty payload\n  // and no end stream flag. The default limit is 1.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9;\n\n  // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number\n  // of PRIORITY frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     max_inbound_priority_frames_per_stream * (1 + inbound_streams)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 100.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10;\n\n  // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number\n  // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     1 + 2 * (inbound_streams +\n  //              max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 10.\n  // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,\n  // but more complex implementations that try to estimate available bandwidth require at least 2.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11\n      [(validate.rules).uint32 = {gte: 1}];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  bool stream_error_on_invalid_http_messaging = 12;\n\n  // [#not-implemented-hide:]\n  // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:\n  //\n  // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by\n  // Envoy.\n  //\n  // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field\n  // 'allow_connect'.\n  //\n  // Note that custom parameters specified through this field can not also be set in the\n  // corresponding named parameters:\n  //\n  // .. code-block:: text\n  //\n  //   ID    Field Name\n  //   ----------------\n  //   0x1   hpack_table_size\n  //   0x3   max_concurrent_streams\n  //   0x4   initial_stream_window_size\n  //\n  // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies\n  // between custom parameters with the same identifier will trigger a failure.\n  //\n  // See `IANA HTTP/2 Settings\n  // <https://www.iana.org/assignments/http2-parameters/http2-parameters.xhtml#settings>`_ for\n  // standardized identifiers.\n  repeated SettingsParameter custom_settings_parameters = 13;\n}\n\n// [#not-implemented-hide:]\nmessage GrpcProtocolOptions {\n  Http2ProtocolOptions http2_protocol_options = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/core/socket_option.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.core;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.core\";\noption java_outer_classname = \"SocketOptionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.core.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Socket Option ]\n\n// Generic socket option message. This would be used to set socket options that\n// might not exist in upstream kernels or precompiled Envoy binaries.\n// [#next-free-field: 7]\nmessage SocketOption {\n  enum SocketState {\n    // Socket options are applied after socket creation but before binding the socket to a port\n    STATE_PREBIND = 0;\n\n    // Socket options are applied after binding the socket to a port but before calling listen()\n    STATE_BOUND = 1;\n\n    // Socket options are applied after calling listen()\n    STATE_LISTENING = 2;\n  }\n\n  // An optional name to give this socket option for debugging, etc.\n  // Uniqueness is not required and no special meaning is assumed.\n  string description = 1;\n\n  // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP\n  int64 level = 2;\n\n  // The numeric name as passed to setsockopt\n  int64 name = 3;\n\n  oneof value {\n    option (validate.required) = true;\n\n    // Because many sockopts take an int value.\n    int64 int_value = 4;\n\n    // Otherwise it's a byte buffer.\n    bytes buf_value = 5;\n  }\n\n  // The state in which the option will be applied. When used in BindConfig\n  // STATE_PREBIND is currently the only valid value.\n  SocketState state = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"DiscoveryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.discovery.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common discovery API components]\n\n// A DiscoveryRequest requests a set of versioned resources of the same type for\n// a given Envoy node on some API.\n// [#next-free-field: 7]\nmessage DiscoveryRequest {\n  // The version_info provided in the request messages will be the version_info\n  // received with the most recent successfully processed response or empty on\n  // the first request. It is expected that no new request is sent after a\n  // response is received until the Envoy instance is ready to ACK/NACK the new\n  // configuration. ACK/NACK takes place by returning the new API config version\n  // as applied or the previous API config version respectively. Each type_url\n  // (see below) has an independent version associated with it.\n  string version_info = 1;\n\n  // The node making the request.\n  core.Node node = 2;\n\n  // List of resources to subscribe to, e.g. list of cluster names or a route\n  // configuration name. If this is empty, all resources for the API are\n  // returned. LDS/CDS may have empty resource_names, which will cause all\n  // resources for the Envoy instance to be returned. The LDS and CDS responses\n  // will then imply a number of resources that need to be fetched via EDS/RDS,\n  // which will be explicitly enumerated in resource_names.\n  repeated string resource_names = 3;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This is implicit\n  // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is\n  // required for ADS.\n  string type_url = 4;\n\n  // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above\n  // discussion on version_info and the DiscoveryResponse nonce comment. This\n  // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP,\n  // or 2) the client has not yet accepted an update in this xDS stream (unlike\n  // delta, where it is populated only for new explicit ACKs).\n  string response_nonce = 5;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details* provides the Envoy\n  // internal exception related to the failure. It is only intended for consumption during manual\n  // debugging, the string provided is not guaranteed to be stable across Envoy versions.\n  google.rpc.Status error_detail = 6;\n}\n\n// [#next-free-field: 7]\nmessage DiscoveryResponse {\n  // The version of the response data.\n  string version_info = 1;\n\n  // The response resources. These resources are typed and depend on the API being called.\n  repeated google.protobuf.Any resources = 2;\n\n  // [#not-implemented-hide:]\n  // Canary is used to support two Envoy command line flags:\n  //\n  // * --terminate-on-canary-transition-failure. When set, Envoy is able to\n  //   terminate if it detects that configuration is stuck at canary. Consider\n  //   this example sequence of updates:\n  //   - Management server applies a canary config successfully.\n  //   - Management server rolls back to a production config.\n  //   - Envoy rejects the new production config.\n  //   Since there is no sensible way to continue receiving configuration\n  //   updates, Envoy will then terminate and apply production config from a\n  //   clean slate.\n  // * --dry-run-canary. When set, a canary response will never be applied, only\n  //   validated via a dry run.\n  bool canary = 3;\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty).\n  string type_url = 4;\n\n  // For gRPC based subscriptions, the nonce provides a way to explicitly ack a\n  // specific DiscoveryResponse in a following DiscoveryRequest. Additional\n  // messages may have been sent by Envoy to the management server for the\n  // previous version on the stream prior to this DiscoveryResponse, that were\n  // unprocessed at response send time. The nonce allows the management server\n  // to ignore any further DiscoveryRequests for the previous version until a\n  // DiscoveryRequest bearing the nonce. The nonce is optional and is not\n  // required for non-stream based xDS implementations.\n  string nonce = 5;\n\n  // [#not-implemented-hide:]\n  // The control plane instance that sent the response.\n  core.ControlPlane control_plane = 6;\n}\n\n// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC\n// endpoint for Delta xDS.\n//\n// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full\n// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a\n// diff to the state of a xDS client.\n// In Delta XDS there are per-resource versions, which allow tracking state at\n// the resource granularity.\n// An xDS Delta session is always in the context of a gRPC bidirectional\n// stream. This allows the xDS server to keep track of the state of xDS clients\n// connected to it.\n//\n// In Delta xDS the nonce field is required and used to pair\n// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK.\n// Optionally, a response message level system_version_info is present for\n// debugging purposes only.\n//\n// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest\n// can be either or both of: [1] informing the server of what resources the\n// client has gained/lost interest in (using resource_names_subscribe and\n// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from\n// the server (using response_nonce, with presence of error_detail making it a NACK).\n// Additionally, the first message (for a given type_url) of a reconnected gRPC stream\n// has a third role: informing the server of the resources (and their versions)\n// that the client already possesses, using the initial_resource_versions field.\n//\n// As with state-of-the-world, when multiple resource types are multiplexed (ADS),\n// all requests/acknowledgments/updates are logically walled off by type_url:\n// a Cluster ACK exists in a completely separate world from a prior Route NACK.\n// In particular, initial_resource_versions being sent at the \"start\" of every\n// gRPC stream actually entails a message for each type_url, each with its own\n// initial_resource_versions.\n// [#next-free-field: 8]\nmessage DeltaDiscoveryRequest {\n  // The node making the request.\n  core.Node node = 1;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\".\n  string type_url = 2;\n\n  // DeltaDiscoveryRequests allow the client to add or remove individual\n  // resources to the set of tracked resources in the context of a stream.\n  // All resource names in the resource_names_subscribe list are added to the\n  // set of tracked resources and all resource names in the resource_names_unsubscribe\n  // list are removed from the set of tracked resources.\n  //\n  // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or\n  // resource_names_unsubscribe list simply means that no resources are to be\n  // added or removed to the resource list.\n  // *Like* state-of-the-world xDS, the server must send updates for all tracked\n  // resources, but can also send updates for resources the client has not subscribed to.\n  //\n  // NOTE: the server must respond with all resources listed in resource_names_subscribe,\n  // even if it believes the client has the most recent version of them. The reason:\n  // the client may have dropped them, but then regained interest before it had a chance\n  // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd.\n  //\n  // These two fields can be set in any DeltaDiscoveryRequest, including ACKs\n  // and initial_resource_versions.\n  //\n  // A list of Resource names to add to the list of tracked resources.\n  repeated string resource_names_subscribe = 3;\n\n  // A list of Resource names to remove from the list of tracked resources.\n  repeated string resource_names_unsubscribe = 4;\n\n  // Informs the server of the versions of the resources the xDS client knows of, to enable the\n  // client to continue the same logical xDS session even in the face of gRPC stream reconnection.\n  // It will not be populated: [1] in the very first stream of a session, since the client will\n  // not yet have any resources,  [2] in any message after the first in a stream (for a given\n  // type_url), since the server will already be correctly tracking the client's state.\n  // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.)\n  // The map's keys are names of xDS resources known to the xDS client.\n  // The map's values are opaque resource versions.\n  map<string, string> initial_resource_versions = 5;\n\n  // When the DeltaDiscoveryRequest is a ACK or NACK message in response\n  // to a previous DeltaDiscoveryResponse, the response_nonce must be the\n  // nonce in the DeltaDiscoveryResponse.\n  // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted.\n  string response_nonce = 6;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details*\n  // provides the Envoy internal exception related to the failure.\n  google.rpc.Status error_detail = 7;\n}\n\n// [#next-free-field: 7]\nmessage DeltaDiscoveryResponse {\n  // The version of the response data (used for debugging).\n  string system_version_info = 1;\n\n  // The response resources. These are typed resources, whose types must match\n  // the type_url field.\n  repeated Resource resources = 2;\n\n  // field id 3 IS available!\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty.\n  string type_url = 4;\n\n  // Resources names of resources that have be deleted and to be removed from the xDS Client.\n  // Removed resources for missing resources can be ignored.\n  repeated string removed_resources = 6;\n\n  // The nonce provides a way for DeltaDiscoveryRequests to uniquely\n  // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required.\n  string nonce = 5;\n}\n\nmessage Resource {\n  // The resource's name, to distinguish it from others of the same type of resource.\n  string name = 3;\n\n  // The aliases are a list of other names that this resource can go by.\n  repeated string aliases = 4;\n\n  // The resource level version. It allows xDS to track the state of individual\n  // resources.\n  string version = 1;\n\n  // The resource being tracked.\n  google.protobuf.Any resource = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/eds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/endpoint.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"EdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: EDS]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\nservice EndpointDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.ClusterLoadAssignment\";\n\n  // The resource_names field in DiscoveryRequest specifies a list of clusters\n  // to subscribe to updates for.\n  rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaEndpoints(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:endpoints\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage EdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/endpoint/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.endpoint;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/endpoint/endpoint_components.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.endpoint\";\noption java_outer_classname = \"EndpointProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.endpoint;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.endpoint\";\noption java_outer_classname = \"EndpointComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Endpoints]\n\n// Upstream host identifier.\nmessage Endpoint {\n  // The optional health check configuration.\n  message HealthCheckConfig {\n    // Optional alternative health check port value.\n    //\n    // By default the health check address port of an upstream host is the same\n    // as the host's serving address port. This provides an alternative health\n    // check port. Setting this with a non-zero value allows an upstream host\n    // to have different health check address port.\n    uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}];\n\n    // By default, the host header for L7 health checks is controlled by cluster level configuration\n    // (see: :ref:`host <envoy_api_field_core.HealthCheck.HttpHealthCheck.host>` and\n    // :ref:`authority <envoy_api_field_core.HealthCheck.GrpcHealthCheck.authority>`). Setting this\n    // to a non-empty value allows overriding the cluster level configuration for a specific\n    // endpoint.\n    string hostname = 2;\n  }\n\n  // The upstream host address.\n  //\n  // .. attention::\n  //\n  //   The form of host address depends on the given cluster type. For STATIC or EDS,\n  //   it is expected to be a direct IP address (or something resolvable by the\n  //   specified :ref:`resolver <envoy_api_field_core.SocketAddress.resolver_name>`\n  //   in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname,\n  //   and will be resolved via DNS.\n  core.Address address = 1;\n\n  // The optional health check configuration is used as configuration for the\n  // health checker to contact the health checked host.\n  //\n  // .. attention::\n  //\n  //   This takes into effect only for upstream clusters with\n  //   :ref:`active health checking <arch_overview_health_checking>` enabled.\n  HealthCheckConfig health_check_config = 2;\n\n  // The hostname associated with this endpoint. This hostname is not used for routing or address\n  // resolution. If provided, it will be associated with the endpoint, and can be used for features\n  // that require a hostname, like\n  // :ref:`auto_host_rewrite <envoy_api_field_route.RouteAction.auto_host_rewrite>`.\n  string hostname = 3;\n}\n\n// An Endpoint that Envoy can route traffic to.\n// [#next-free-field: 6]\nmessage LbEndpoint {\n  // Upstream host identifier or a named reference.\n  oneof host_identifier {\n    Endpoint endpoint = 1;\n\n    // [#not-implemented-hide:]\n    string endpoint_name = 5;\n  }\n\n  // Optional health status when known and supplied by EDS server.\n  core.HealthStatus health_status = 2;\n\n  // The endpoint metadata specifies values that may be used by the load\n  // balancer to select endpoints in a cluster for a given request. The filter\n  // name should be specified as *envoy.lb*. An example boolean key-value pair\n  // is *canary*, providing the optional canary status of the upstream host.\n  // This may be matched against in a route's\n  // :ref:`RouteAction <envoy_api_msg_route.RouteAction>` metadata_match field\n  // to subset the endpoints considered in cluster load balancing.\n  core.Metadata metadata = 3;\n\n  // The optional load balancing weight of the upstream host; at least 1.\n  // Envoy uses the load balancing weight in some of the built in load\n  // balancers. The load balancing weight for an endpoint is divided by the sum\n  // of the weights of all endpoints in the endpoint's locality to produce a\n  // percentage of traffic for the endpoint. This percentage is then further\n  // weighted by the endpoint's locality's load balancing weight from\n  // LocalityLbEndpoints. If unspecified, each host is presumed to have equal\n  // weight in a locality. The sum of the weights of all endpoints in the\n  // endpoint's locality must not exceed uint32_t maximal value (4294967295).\n  google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}];\n}\n\n// A group of endpoints belonging to a Locality.\n// One can have multiple LocalityLbEndpoints for a locality, but this is\n// generally only done if the different groups need to have different load\n// balancing weights or different priorities.\n// [#next-free-field: 7]\nmessage LocalityLbEndpoints {\n  // Identifies location of where the upstream hosts run.\n  core.Locality locality = 1;\n\n  // The group of endpoints belonging to the locality specified.\n  repeated LbEndpoint lb_endpoints = 2;\n\n  // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load\n  // balancing weight for a locality is divided by the sum of the weights of all\n  // localities  at the same priority level to produce the effective percentage\n  // of traffic for the locality. The sum of the weights of all localities at\n  // the same priority level must not exceed uint32_t maximal value (4294967295).\n  //\n  // Locality weights are only considered when :ref:`locality weighted load\n  // balancing <arch_overview_load_balancing_locality_weighted_lb>` is\n  // configured. These weights are ignored otherwise. If no weights are\n  // specified when locality weighted load balancing is enabled, the locality is\n  // assigned no load.\n  google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional: the priority for this LocalityLbEndpoints. If unspecified this will\n  // default to the highest priority (0).\n  //\n  // Under usual circumstances, Envoy will only select endpoints for the highest\n  // priority (0). In the event all endpoints for a particular priority are\n  // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the\n  // next highest priority group.\n  //\n  // Priorities should range from 0 (highest) to N (lowest) without skipping.\n  uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}];\n\n  // Optional: Per locality proximity value which indicates how close this\n  // locality is from the source locality. This value only provides ordering\n  // information (lower the value, closer it is to the source locality).\n  // This will be consumed by load balancing schemes that need proximity order\n  // to determine where to route the requests.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value proximity = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/endpoint/load_report.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.endpoint;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.endpoint\";\noption java_outer_classname = \"LoadReportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// These are stats Envoy reports to GLB every so often. Report frequency is\n// defined by\n// :ref:`LoadStatsResponse.load_reporting_interval<envoy_api_field_service.load_stats.v2.LoadStatsResponse.load_reporting_interval>`.\n// Stats per upstream region/zone and optionally per subzone.\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\n// [#next-free-field: 9]\nmessage UpstreamLocalityStats {\n  // Name of zone, region and optionally endpoint group these metrics were\n  // collected from. Zone and region names could be empty if unknown.\n  core.Locality locality = 1;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint,\n  // aggregated over all endpoints in the locality.\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued by this Envoy since\n  // the last report. This information is aggregated over all the\n  // upstream endpoints in the locality.\n  uint64 total_issued_requests = 8;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n\n  // Endpoint granularity stats information for this locality. This information\n  // is populated if the Server requests it by setting\n  // :ref:`LoadStatsResponse.report_endpoint_granularity<envoy_api_field_service.load_stats.v2.LoadStatsResponse.report_endpoint_granularity>`.\n  repeated UpstreamEndpointStats upstream_endpoint_stats = 7;\n\n  // [#not-implemented-hide:] The priority of the endpoint group these metrics\n  // were collected from.\n  uint32 priority = 6;\n}\n\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\n// [#next-free-field: 8]\nmessage UpstreamEndpointStats {\n  // Upstream host address.\n  core.Address address = 1;\n\n  // Opaque and implementation dependent metadata of the\n  // endpoint. Envoy will pass this directly to the management server.\n  google.protobuf.Struct metadata = 6;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality. These include non-5xx responses for HTTP, where errors\n  // originate at the client and the endpoint responded successfully. For gRPC,\n  // the grpc-status values are those not covered by total_error_requests below.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests for this endpoint.\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint.\n  // For HTTP these are responses with 5xx status codes and for gRPC the\n  // grpc-status values:\n  //\n  //   - DeadlineExceeded\n  //   - Unimplemented\n  //   - Internal\n  //   - Unavailable\n  //   - Unknown\n  //   - DataLoss\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued to this endpoint\n  // since the last report. A single TCP connection, HTTP or gRPC\n  // request or stream is counted as one request.\n  uint64 total_issued_requests = 7;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n}\n\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\nmessage EndpointLoadMetricStats {\n  // Name of the metric; may be empty.\n  string metric_name = 1;\n\n  // Number of calls that finished and included this metric.\n  uint64 num_requests_finished_with_metric = 2;\n\n  // Sum of metric values across all calls that finished with this metric for\n  // load_reporting_interval.\n  double total_metric_value = 3;\n}\n\n// Per cluster load stats. Envoy reports these stats a management server in a\n// :ref:`LoadStatsRequest<envoy_api_msg_service.load_stats.v2.LoadStatsRequest>`\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\n// Next ID: 7\n// [#next-free-field: 7]\nmessage ClusterStats {\n  message DroppedRequests {\n    // Identifier for the policy specifying the drop.\n    string category = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Total number of deliberately dropped requests for the category.\n    uint64 dropped_count = 2;\n  }\n\n  // The name of the cluster.\n  string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The eds_cluster_config service_name of the cluster.\n  // It's possible that two clusters send the same service_name to EDS,\n  // in that case, the management server is supposed to do aggregation on the load reports.\n  string cluster_service_name = 6;\n\n  // Need at least one.\n  repeated UpstreamLocalityStats upstream_locality_stats = 2\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // Cluster-level stats such as total_successful_requests may be computed by\n  // summing upstream_locality_stats. In addition, below there are additional\n  // cluster-wide stats.\n  //\n  // The total number of dropped requests. This covers requests\n  // deliberately dropped by the drop_overload policy and circuit breaking.\n  uint64 total_dropped_requests = 3;\n\n  // Information about deliberately dropped requests for each category specified\n  // in the DropOverload policy.\n  repeated DroppedRequests dropped_requests = 5;\n\n  // Period over which the actual load report occurred. This will be guaranteed to include every\n  // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy\n  // and the *LoadStatsResponse* message sent from the management server, this may be longer than\n  // the requested load reporting interval in the *LoadStatsResponse*.\n  google.protobuf.Duration load_report_interval = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/endpoint.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/endpoint/endpoint_components.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"EndpointProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.endpoint.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Endpoint configuration]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\n// Each route from RDS will map to a single cluster or traffic split across\n// clusters using weights expressed in the RDS WeightedCluster.\n//\n// With EDS, each cluster is treated independently from a LB perspective, with\n// LB taking place between the Localities within a cluster and at a finer\n// granularity between the hosts within a locality. The percentage of traffic\n// for each endpoint is determined by both its load_balancing_weight, and the\n// load_balancing_weight of its locality. First, a locality will be selected,\n// then an endpoint within that locality will be chose based on its weight.\n// [#next-free-field: 6]\nmessage ClusterLoadAssignment {\n  // Load balancing policy settings.\n  // [#next-free-field: 6]\n  message Policy {\n    // [#not-implemented-hide:]\n    message DropOverload {\n      // Identifier for the policy specifying the drop.\n      string category = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // Percentage of traffic that should be dropped for the category.\n      type.FractionalPercent drop_percentage = 2;\n    }\n\n    reserved 1;\n\n    // Action to trim the overall incoming traffic to protect the upstream\n    // hosts. This action allows protection in case the hosts are unable to\n    // recover from an outage, or unable to autoscale or unable to handle\n    // incoming traffic volume for any reason.\n    //\n    // At the client each category is applied one after the other to generate\n    // the 'actual' drop percentage on all outgoing traffic. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"drop_overloads\": [\n    //      { \"category\": \"throttle\", \"drop_percentage\": 60 }\n    //      { \"category\": \"lb\", \"drop_percentage\": 50 }\n    //  ]}\n    //\n    // The actual drop percentages applied to the traffic at the clients will be\n    //    \"throttle\"_drop = 60%\n    //    \"lb\"_drop = 20%  // 50% of the remaining 'actual' load, which is 40%.\n    //    actual_outgoing_load = 20% // remaining after applying all categories.\n    // [#not-implemented-hide:]\n    repeated DropOverload drop_overloads = 2;\n\n    // Priority levels and localities are considered overprovisioned with this\n    // factor (in percentage). This means that we don't consider a priority\n    // level or locality unhealthy until the percentage of healthy hosts\n    // multiplied by the overprovisioning factor drops below 100.\n    // With the default value 140(1.4), Envoy doesn't consider a priority level\n    // or a locality unhealthy until their percentage of healthy hosts drops\n    // below 72%. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"overprovisioning_factor\": 100 }\n    //\n    // Read more at :ref:`priority levels <arch_overview_load_balancing_priority_levels>` and\n    // :ref:`localities <arch_overview_load_balancing_locality_weighted_lb>`.\n    google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}];\n\n    // The max time until which the endpoints from this assignment can be used.\n    // If no new assignments are received before this time expires the endpoints\n    // are considered stale and should be marked unhealthy.\n    // Defaults to 0 which means endpoints never go stale.\n    google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}];\n\n    // The flag to disable overprovisioning. If it is set to true,\n    // :ref:`overprovisioning factor\n    // <arch_overview_load_balancing_overprovisioning_factor>` will be ignored\n    // and Envoy will not perform graceful failover between priority levels or\n    // localities as endpoints become unhealthy. Otherwise Envoy will perform\n    // graceful failover as :ref:`overprovisioning factor\n    // <arch_overview_load_balancing_overprovisioning_factor>` suggests.\n    // [#not-implemented-hide:]\n    bool disable_overprovisioning = 5 [deprecated = true];\n  }\n\n  // Name of the cluster. This will be the :ref:`service_name\n  // <envoy_api_field_Cluster.EdsClusterConfig.service_name>` value if specified\n  // in the cluster :ref:`EdsClusterConfig\n  // <envoy_api_msg_Cluster.EdsClusterConfig>`.\n  string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // List of endpoints to load balance to.\n  repeated endpoint.LocalityLbEndpoints endpoints = 2;\n\n  // Map of named endpoints that can be referenced in LocalityLbEndpoints.\n  // [#not-implemented-hide:]\n  map<string, endpoint.Endpoint> named_endpoints = 5;\n\n  // Load balancing policy settings.\n  Policy policy = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/lds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/listener.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"LdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listener]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// The Envoy instance initiates an RPC at startup to discover a list of\n// listeners. Updates are delivered via streaming from the LDS server and\n// consist of a complete update of all listeners. Existing connections will be\n// allowed to drain from listeners that are no longer present.\nservice ListenerDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.Listener\";\n\n  rpc DeltaListeners(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:listeners\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage LdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/listener/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/listener/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/listener/listener_components.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/listener/listener_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"envoy/api/v2/auth/tls.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"ListenerComponentsProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listener components]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage Filter {\n  reserved 3;\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// Specifies the match criteria for selecting a specific filter chain for a\n// listener.\n//\n// In order for a filter chain to be selected, *ALL* of its criteria must be\n// fulfilled by the incoming connection, properties of which are set by the\n// networking stack and/or listener filters.\n//\n// The following order applies:\n//\n// 1. Destination port.\n// 2. Destination IP address.\n// 3. Server name (e.g. SNI for TLS protocol),\n// 4. Transport protocol.\n// 5. Application protocols (e.g. ALPN for TLS protocol).\n// 6. Source type (e.g. any, local or external network).\n// 7. Source IP address.\n// 8. Source port.\n//\n// For criteria that allow ranges or wildcards, the most specific value in any\n// of the configured filter chains that matches the incoming connection is going\n// to be used (e.g. for SNI ``www.example.com`` the most specific match would be\n// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter\n// chain without ``server_names`` requirements).\n//\n// [#comment: Implemented rules are kept in the preference order, with deprecated fields\n// listed at the end, because that's how we want to list them in the docs.\n//\n// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]\n// [#next-free-field: 13]\nmessage FilterChainMatch {\n  enum ConnectionSourceType {\n    // Any connection source matches.\n    ANY = 0;\n\n    // Match a connection originating from the same host.\n    LOCAL = 1 [(udpa.annotations.enum_value_migrate).rename = \"SAME_IP_OR_LOOPBACK\"];\n\n    // Match a connection originating from a different host.\n    EXTERNAL = 2;\n  }\n\n  reserved 1;\n\n  // Optional destination port to consider when use_original_dst is set on the\n  // listener in determining a filter chain match.\n  google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}];\n\n  // If non-empty, an IP address and prefix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  repeated core.CidrRange prefix_ranges = 3;\n\n  // If non-empty, an IP address and suffix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  // [#not-implemented-hide:]\n  string address_suffix = 4;\n\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value suffix_len = 5;\n\n  // Specifies the connection source IP match type. Can be any, local or external network.\n  ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}];\n\n  // The criteria is satisfied if the source IP address of the downstream\n  // connection is contained in at least one of the specified subnets. If the\n  // parameter is not specified or the list is empty, the source IP address is\n  // ignored.\n  repeated core.CidrRange source_prefix_ranges = 6;\n\n  // The criteria is satisfied if the source port of the downstream connection\n  // is contained in at least one of the specified ports. If the parameter is\n  // not specified, the source port is ignored.\n  repeated uint32 source_ports = 7\n      [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}];\n\n  // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining\n  // a filter chain match. Those values will be compared against the server names of a new\n  // connection, when detected by one of the listener filters.\n  //\n  // The server name will be matched against all wildcard domains, i.e. ``www.example.com``\n  // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.\n  //\n  // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.\n  //\n  // .. attention::\n  //\n  //   See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more\n  //   information.\n  repeated string server_names = 11;\n\n  // If non-empty, a transport protocol to consider when determining a filter chain match.\n  // This value will be compared against the transport protocol of a new connection, when\n  // it's detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``raw_buffer`` - default, used when no transport protocol is detected,\n  // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //   when TLS protocol is detected.\n  string transport_protocol = 9;\n\n  // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when\n  // determining a filter chain match. Those values will be compared against the application\n  // protocols of a new connection, when detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector\n  //   <config_listener_filters_tls_inspector>`,\n  // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //\n  // .. attention::\n  //\n  //   Currently, only :ref:`TLS Inspector <config_listener_filters_tls_inspector>` provides\n  //   application protocol detection based on the requested\n  //   `ALPN <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ values.\n  //\n  //   However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet,\n  //   and matching on values other than ``h2`` is going to lead to a lot of false negatives,\n  //   unless all connecting clients are known to use ALPN.\n  repeated string application_protocols = 10;\n}\n\n// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and\n// various other parameters.\n// [#next-free-field: 8]\nmessage FilterChain {\n  // The criteria to use when matching a connection to this filter chain.\n  FilterChainMatch filter_chain_match = 1;\n\n  // The TLS context for this filter chain.\n  //\n  // .. attention::\n  //\n  //   **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are\n  //   set, `transport_socket` takes priority.\n  auth.DownstreamTlsContext tls_context = 2 [deprecated = true];\n\n  // A list of individual network filters that make up the filter chain for\n  // connections established with the listener. Order matters as the filters are\n  // processed sequentially as connection events happen. Note: If the filter\n  // list is empty, the connection will close by default.\n  repeated Filter filters = 3;\n\n  // Whether the listener should expect a PROXY protocol V1 header on new\n  // connections. If this option is enabled, the listener will assume that that\n  // remote address of the connection is the one specified in the header. Some\n  // load balancers including the AWS ELB support this option. If the option is\n  // absent or set to false, Envoy will use the physical peer address of the\n  // connection as the remote address.\n  google.protobuf.BoolValue use_proxy_proto = 4;\n\n  // [#not-implemented-hide:] filter chain metadata.\n  core.Metadata metadata = 5;\n\n  // Optional custom transport socket implementation to use for downstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`DownstreamTlsContext <envoy_api_msg_auth.DownstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.TransportSocket transport_socket = 6;\n\n  // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no\n  // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter\n  // chain is to be dynamically updated or removed via FCDS a unique name must be provided.\n  string name = 7;\n}\n\n// Listener filter chain match configuration. This is a recursive structure which allows complex\n// nested match configurations to be built using various logical operators.\n//\n// Examples:\n//\n// * Matches if the destination port is 3306.\n//\n// .. code-block:: yaml\n//\n//  destination_port_range:\n//   start: 3306\n//   end: 3307\n//\n// * Matches if the destination port is 3306 or 15000.\n//\n// .. code-block:: yaml\n//\n//  or_match:\n//    rules:\n//      - destination_port_range:\n//          start: 3306\n//          end: 3306\n//      - destination_port_range:\n//          start: 15000\n//          end: 15001\n//\n// [#next-free-field: 6]\nmessage ListenerFilterChainMatchPredicate {\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    // The list of rules that make up the set.\n    repeated ListenerFilterChainMatchPredicate rules = 1\n        [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    ListenerFilterChainMatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // Match destination port. Particularly, the match evaluation must use the recovered local port if\n    // the owning listener filter is after :ref:`an original_dst listener filter <config_listener_filters_original_dst>`.\n    type.Int32Range destination_port_range = 5;\n  }\n}\n\nmessage ListenerFilter {\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_listener_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated.\n  // See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Optional match predicate used to disable the filter. The filter is enabled when this field is empty.\n  // See :ref:`ListenerFilterChainMatchPredicate <envoy_api_msg_listener.ListenerFilterChainMatchPredicate>`\n  // for further examples.\n  ListenerFilterChainMatchPredicate filter_disabled = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/listener/quic_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"QuicConfigProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: QUIC listener Config]\n\n// Configuration specific to the QUIC protocol.\n// Next id: 4\nmessage QuicProtocolOptions {\n  // Maximum number of streams that the client can negotiate per connection. 100\n  // if not specified.\n  google.protobuf.UInt32Value max_concurrent_streams = 1;\n\n  // Maximum number of milliseconds that connection will be alive when there is\n  // no network activity. 300000ms if not specified.\n  google.protobuf.Duration idle_timeout = 2;\n\n  // Connection timeout in milliseconds before the crypto handshake is finished.\n  // 20000ms if not specified.\n  google.protobuf.Duration crypto_handshake_timeout = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.listener;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.listener\";\noption java_outer_classname = \"UdpListenerConfigProto\";\noption java_multiple_files = true;\noption csharp_namespace = \"Envoy.Api.V2.ListenerNS\";\noption ruby_package = \"Envoy.Api.V2.ListenerNS\";\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: UDP Listener Config]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage UdpListenerConfig {\n  // Used to look up UDP listener factory, matches \"raw_udp_listener\" or\n  // \"quic_listener\" to create a specific udp listener.\n  // If not specified, treat as \"raw_udp_listener\".\n  string udp_listener_name = 1;\n\n  // Used to create a specific listener factory. To some factory, e.g.\n  // \"raw_udp_listener\", config is not needed.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ActiveRawUdpListenerConfig {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/socket_option.proto\";\nimport \"envoy/api/v2/listener/listener_components.proto\";\nimport \"envoy/api/v2/listener/udp_listener_config.proto\";\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\nimport \"envoy/config/listener/v2/api_listener.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Listener configuration]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// [#next-free-field: 23]\nmessage Listener {\n  enum DrainType {\n    // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check\n    // filter), listener removal/modification, and hot restart.\n    DEFAULT = 0;\n\n    // Drain in response to listener removal/modification and hot restart. This setting does not\n    // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress\n    // and egress listeners.\n    MODIFY_ONLY = 1;\n  }\n\n  // [#not-implemented-hide:]\n  message DeprecatedV1 {\n    // Whether the listener should bind to the port. A listener that doesn't\n    // bind can only receive connections redirected from other listeners that\n    // set use_original_dst parameter to true. Default is true.\n    //\n    // This is deprecated in v2, all Listeners will bind to their port. An\n    // additional filter chain must be created for every original destination\n    // port this listener may redirect to in v2, with the original port\n    // specified in the FilterChainMatch destination_port field.\n    //\n    // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.]\n    google.protobuf.BoolValue bind_to_port = 1;\n  }\n\n  // Configuration for listener connection balancing.\n  message ConnectionBalanceConfig {\n    // A connection balancer implementation that does exact balancing. This means that a lock is\n    // held during balancing so that connection counts are nearly exactly balanced between worker\n    // threads. This is \"nearly\" exact in the sense that a connection might close in parallel thus\n    // making the counts incorrect, but this should be rectified on the next accept. This balancer\n    // sacrifices accept throughput for accuracy and should be used when there are a small number of\n    // connections that rarely cycle (e.g., service mesh gRPC egress).\n    message ExactBalance {\n    }\n\n    oneof balance_type {\n      option (validate.required) = true;\n\n      // If specified, the listener will use the exact connection balancer.\n      ExactBalance exact_balance = 1;\n    }\n  }\n\n  reserved 14;\n\n  // The unique name by which this listener is known. If no name is provided,\n  // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically\n  // updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.\n  string name = 1;\n\n  // The address that the listener should listen on. In general, the address must be unique, though\n  // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on\n  // Linux as the actual port will be allocated by the OS.\n  core.Address address = 2 [(validate.rules).message = {required: true}];\n\n  // A list of filter chains to consider for this listener. The\n  // :ref:`FilterChain <envoy_api_msg_listener.FilterChain>` with the most specific\n  // :ref:`FilterChainMatch <envoy_api_msg_listener.FilterChainMatch>` criteria is used on a\n  // connection.\n  //\n  // Example using SNI for filter chain selection can be found in the\n  // :ref:`FAQ entry <faq_how_to_setup_sni>`.\n  repeated listener.FilterChain filter_chains = 3;\n\n  // If a connection is redirected using *iptables*, the port on which the proxy\n  // receives it might be different from the original destination address. When this flag is set to\n  // true, the listener hands off redirected connections to the listener associated with the\n  // original destination address. If there is no listener associated with the original destination\n  // address, the connection is handled by the listener that receives it. Defaults to false.\n  //\n  // .. attention::\n  //\n  //   This field is deprecated. Use :ref:`an original_dst <config_listener_filters_original_dst>`\n  //   :ref:`listener filter <envoy_api_field_Listener.listener_filters>` instead.\n  //\n  //   Note that hand off to another listener is *NOT* performed without this flag. Once\n  //   :ref:`FilterChainMatch <envoy_api_msg_listener.FilterChainMatch>` is implemented this flag\n  //   will be removed, as filter chain matching can be used to select a filter chain based on the\n  //   restored destination address.\n  google.protobuf.BoolValue use_original_dst = 4 [deprecated = true];\n\n  // Soft limit on size of the listener’s new connection read and write buffers.\n  // If unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5;\n\n  // Listener metadata.\n  core.Metadata metadata = 6;\n\n  // [#not-implemented-hide:]\n  DeprecatedV1 deprecated_v1 = 7;\n\n  // The type of draining to perform at a listener-wide level.\n  DrainType drain_type = 8;\n\n  // Listener filters have the opportunity to manipulate and augment the connection metadata that\n  // is used in connection filter chain matching, for example. These filters are run before any in\n  // :ref:`filter_chains <envoy_api_field_Listener.filter_chains>`. Order matters as the\n  // filters are processed sequentially right after a socket has been accepted by the listener, and\n  // before a connection is created.\n  // UDP Listener filters can be specified when the protocol in the listener socket address in\n  // :ref:`protocol <envoy_api_field_core.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_core.SocketAddress.Protocol.UDP>`.\n  // UDP listeners currently support a single filter.\n  repeated listener.ListenerFilter listener_filters = 9;\n\n  // The timeout to wait for all listener filters to complete operation. If the timeout is reached,\n  // the accepted socket is closed without a connection being created unless\n  // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the\n  // timeout. If not specified, a default timeout of 15s is used.\n  google.protobuf.Duration listener_filters_timeout = 15;\n\n  // Whether a connection should be created when listener filters timeout. Default is false.\n  //\n  // .. attention::\n  //\n  //   Some listener filters, such as :ref:`Proxy Protocol filter\n  //   <config_listener_filters_proxy_protocol>`, should not be used with this option. It will cause\n  //   unexpected behavior when a connection is created.\n  bool continue_on_listener_filters_timeout = 17;\n\n  // Whether the listener should be set as a transparent socket.\n  // When this flag is set to true, connections can be redirected to the listener using an\n  // *iptables* *TPROXY* target, in which case the original source and destination addresses and\n  // ports are preserved on accepted connections. This flag should be used in combination with\n  // :ref:`an original_dst <config_listener_filters_original_dst>` :ref:`listener filter\n  // <envoy_api_field_Listener.listener_filters>` to mark the connections' local addresses as\n  // \"restored.\" This can be used to hand off each redirected connection to another listener\n  // associated with the connection's destination address. Direct connections to the socket without\n  // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are\n  // therefore treated as if they were redirected.\n  // When this flag is set to false, the listener's socket is explicitly reset as non-transparent.\n  // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability.\n  // When this flag is not set (default), the socket is not modified, i.e. the transparent option\n  // is neither set nor reset.\n  google.protobuf.BoolValue transparent = 10;\n\n  // Whether the listener should set the *IP_FREEBIND* socket option. When this\n  // flag is set to true, listeners can be bound to an IP address that is not\n  // configured on the system running Envoy. When this flag is set to false, the\n  // option *IP_FREEBIND* is disabled on the socket. When this flag is not set\n  // (default), the socket is not modified, i.e. the option is neither enabled\n  // nor disabled.\n  google.protobuf.BoolValue freebind = 11;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.SocketOption socket_options = 13;\n\n  // Whether the listener should accept TCP Fast Open (TFO) connections.\n  // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on\n  // the socket, with a queue length of the specified size\n  // (see `details in RFC7413 <https://tools.ietf.org/html/rfc7413#section-5.1>`_).\n  // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket.\n  // When this flag is not set (default), the socket is not modified,\n  // i.e. the option is neither enabled nor disabled.\n  //\n  // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable\n  // TCP_FASTOPEN.\n  // See `ip-sysctl.txt <https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt>`_.\n  //\n  // On macOS, only values of 0, 1, and unset are valid; other values may result in an error.\n  // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter.\n  google.protobuf.UInt32Value tcp_fast_open_queue_length = 12;\n\n  // Specifies the intended direction of the traffic relative to the local Envoy.\n  core.TrafficDirection traffic_direction = 16;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_core.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_core.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // listener to create, i.e. :ref:`udp_listener_name\n  // <envoy_api_field_listener.UdpListenerConfig.udp_listener_name>` = \"raw_udp_listener\" for\n  // creating a packet-oriented UDP listener. If not present, treat it as \"raw_udp_listener\".\n  listener.UdpListenerConfig udp_listener_config = 18;\n\n  // Used to represent an API listener, which is used in non-proxy clients. The type of API\n  // exposed to the non-proxy application depends on the type of API listener.\n  // When this field is set, no other field except for :ref:`name<envoy_api_field_Listener.name>`\n  // should be set.\n  //\n  // .. note::\n  //\n  //  Currently only one ApiListener can be installed; and it can only be done via bootstrap config,\n  //  not LDS.\n  //\n  // [#next-major-version: In the v3 API, instead of this messy approach where the socket\n  // listener fields are directly in the top-level Listener message and the API listener types\n  // are in the ApiListener message, the socket listener messages should be in their own message,\n  // and the top-level Listener should essentially be a oneof that selects between the\n  // socket listener and the various types of API listener. That way, a given Listener message\n  // can structurally only contain the fields of the relevant type.]\n  config.listener.v2.ApiListener api_listener = 19;\n\n  // The listener's connection balancer configuration, currently only applicable to TCP listeners.\n  // If no configuration is specified, Envoy will not attempt to balance active connections between\n  // worker threads.\n  ConnectionBalanceConfig connection_balance_config = 20;\n\n  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and\n  // create one socket for each worker thread. This makes inbound connections\n  // distribute among worker threads roughly evenly in cases where there are a high number\n  // of connections. When this flag is set to false, all worker threads share one socket.\n  //\n  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart\n  // (see `3rd paragraph in 'soreuseport' commit message\n  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).\n  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket\n  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.\n  bool reuse_port = 21;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by this listener.\n  repeated config.filter.accesslog.v2.AccessLog access_log = 22;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/ratelimit/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.ratelimit;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.ratelimit\";\noption java_outer_classname = \"RatelimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.common.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common rate limit components]\n\n// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to\n// determine the final rate limit key and overall allowed limit. Here are some examples of how\n// they might be used for the domain \"envoy\".\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The\n// configuration supplies a default limit for the *remote_address* key. If there is a desire to\n// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the\n// configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if\n// configured that way in the service).\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits unauthenticated traffic to a specific path for a specific IP address.\n// Like (1) we can raise/block specific IP addresses if we want with an override configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"]\n//\n// What it does: Limits all traffic for an authenticated client \"foo\"\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits traffic to a specific path for an authenticated client \"foo\"\n//\n// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired.\n// This enables building complex application scenarios with a generic backend.\nmessage RateLimitDescriptor {\n  message Entry {\n    // Descriptor key.\n    string key = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Descriptor value.\n    string value = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Descriptor entries.\n  repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/rds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\nimport public \"envoy/api/v2/route.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"RdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: RDS]\n\n// The resource_names field in DiscoveryRequest specifies a route configuration.\n// This allows an Envoy configuration with multiple HTTP listeners (and\n// associated HTTP connection manager filters) to use different route\n// configurations. Each listener will bind its HTTP connection manager filter to\n// a route table via this identifier.\nservice RouteDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.RouteConfiguration\";\n\n  rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for\n// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered\n// during the processing of an HTTP request if a route for the request cannot be resolved. The\n// :ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>`\n// field contains a list of virtual host names or aliases to track. The contents of an alias would\n// be the contents of a *host* or *authority* header used to make an http request. An xDS server\n// will match an alias to a virtual host based on the content of :ref:`domains'\n// <envoy_api_field_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field\n// contains a list of virtual host names that have been :ref:`unsubscribed\n// <xds_protocol_unsubscribe>` from the routing table associated with the RouteConfiguration.\nservice VirtualHostDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.route.VirtualHost\";\n\n  rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage RdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/route/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/route/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.route;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/route/route_components.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.route\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/route/route_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2.route;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/regex.proto\";\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/percent.proto\";\nimport \"envoy/type/range.proto\";\nimport \"envoy/type/tracing/v2/custom_tag.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2.route\";\noption java_outer_classname = \"RouteComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP route components]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// The top level element in the routing configuration is a virtual host. Each virtual host has\n// a logical name as well as a set of domains that get routed to it based on the incoming request's\n// host header. This allows a single listener to service multiple top level domain path trees. Once\n// a virtual host is selected based on the domain, the routes are processed in order to see which\n// upstream cluster to route to or whether to perform a redirect.\n// [#next-free-field: 21]\nmessage VirtualHost {\n  enum TlsRequirementType {\n    // No TLS requirement for the virtual host.\n    NONE = 0;\n\n    // External requests must use TLS. If a request is external and it is not\n    // using TLS, a 301 redirect will be sent telling the client to use HTTPS.\n    EXTERNAL_ONLY = 1;\n\n    // All requests must use TLS. If a request is not using TLS, a 301 redirect\n    // will be sent telling the client to use HTTPS.\n    ALL = 2;\n  }\n\n  reserved 9;\n\n  // The logical name of the virtual host. This is used when emitting certain\n  // statistics but is not relevant for routing.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // A list of domains (host/authority header) that will be matched to this\n  // virtual host. Wildcard hosts are supported in the suffix or prefix form.\n  //\n  // Domain search order:\n  //  1. Exact domain names: ``www.foo.com``.\n  //  2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``.\n  //  3. Prefix domain wildcards: ``foo.*`` or ``foo-*``.\n  //  4. Special wildcard ``*`` matching any domain.\n  //\n  // .. note::\n  //\n  //   The wildcard will not match the empty string.\n  //   e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``.\n  //   The longest wildcards match first.\n  //   Only a single virtual host in the entire route configuration can match on ``*``. A domain\n  //   must be unique across all virtual hosts or the config will fail to load.\n  //\n  // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE.\n  repeated string domains = 2 [(validate.rules).repeated = {\n    min_items: 1\n    items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}}\n  }];\n\n  // The list of routes that will be matched, in order, for incoming requests.\n  // The first route that matches will be used.\n  repeated Route routes = 3;\n\n  // Specifies the type of TLS enforcement the virtual host expects. If this option is not\n  // specified, there is no TLS requirement for the virtual host.\n  TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // A list of virtual clusters defined for this virtual host. Virtual clusters\n  // are used for additional statistics gathering.\n  repeated VirtualCluster virtual_clusters = 5;\n\n  // Specifies a set of rate limit configurations that will be applied to the\n  // virtual host.\n  repeated RateLimit rate_limits = 6;\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption request_headers_to_add = 7\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // handled by this virtual host.\n  repeated string request_headers_to_remove = 13;\n\n  // Specifies a list of HTTP headers that should be added to each response\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // handled by this virtual host.\n  repeated string response_headers_to_remove = 11;\n\n  // Indicates that the virtual host has a CORS policy.\n  CorsPolicy cors = 8;\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Struct> per_filter_config = 12 [deprecated = true];\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 15;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the upstream request. Setting this option will cause it to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the upstream\n  // will see the attempt count as perceived by the second Envoy. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_config.filter.http.router.v2.Router.suppress_envoy_headers>` flag.\n  //\n  // [#next-major-version: rename to include_attempt_count_in_request.]\n  bool include_request_attempt_count = 14;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the downstream response. Setting this option will cause the router to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the downstream\n  // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_config.filter.http.router.v2.Router.suppress_envoy_headers>` flag.\n  bool include_attempt_count_in_response = 19;\n\n  // Indicates the retry policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  RetryPolicy retry_policy = 16;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that setting a route level entry\n  // will take precedence over this config and it'll be treated independently (e.g.: values are not\n  // inherited). :ref:`Retry policy <envoy_api_field_route.VirtualHost.retry_policy>` should not be\n  // set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 20;\n\n  // Indicates the hedge policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  HedgePolicy hedge_policy = 17;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum\n  // value of this and the listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18;\n}\n\n// A filter-defined action type.\nmessage FilterAction {\n  google.protobuf.Any action = 1;\n}\n\n// A route is both a specification of how to match a request as well as an indication of what to do\n// next (e.g., redirect, forward, rewrite, etc.).\n//\n// .. attention::\n//\n//   Envoy supports routing on HTTP method via :ref:`header matching\n//   <envoy_api_msg_route.HeaderMatcher>`.\n// [#next-free-field: 18]\nmessage Route {\n  reserved 6;\n\n  // Name for the route.\n  string name = 14;\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  oneof action {\n    option (validate.required) = true;\n\n    // Route request to some upstream cluster.\n    RouteAction route = 2;\n\n    // Return a redirect.\n    RedirectAction redirect = 3;\n\n    // Return an arbitrary HTTP response directly, without proxying.\n    DirectResponseAction direct_response = 7;\n\n    // [#not-implemented-hide:]\n    // If true, a filter will define the action (e.g., it could dynamically generate the\n    // RouteAction).\n    FilterAction filter_action = 17;\n  }\n\n  // The Metadata field can be used to provide additional information\n  // about the route. It can be used for configuration, stats, and logging.\n  // The metadata should go under the filter namespace that will need it.\n  // For instance, if the metadata is intended for the Router filter,\n  // the filter name should be specified as *envoy.filters.http.router*.\n  core.Metadata metadata = 4;\n\n  // Decorator for the matched route.\n  Decorator decorator = 5;\n\n  // The per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Struct> per_filter_config = 8 [deprecated = true];\n\n  // The typed_per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 13;\n\n  // Specifies a set of headers that will be added to requests matching this\n  // route. Headers specified at this level are applied before headers from the\n  // enclosing :ref:`envoy_api_msg_route.VirtualHost` and\n  // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption request_headers_to_add = 9\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // matching this route.\n  repeated string request_headers_to_remove = 12;\n\n  // Specifies a set of headers that will be added to responses to requests\n  // matching this route. Headers specified at this level are applied before\n  // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and\n  // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on\n  // :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // to requests matching this route.\n  repeated string response_headers_to_remove = 11;\n\n  // Presence of the object defines whether the connection manager's tracing configuration\n  // is overridden by this route specific instance.\n  Tracing tracing = 15;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set, the bytes actually buffered will be the minimum value of this and the\n  // listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16;\n}\n\n// Compared to the :ref:`cluster <envoy_api_field_route.RouteAction.cluster>` field that specifies a\n// single upstream cluster as the target of a request, the :ref:`weighted_clusters\n// <envoy_api_field_route.RouteAction.weighted_clusters>` option allows for specification of\n// multiple upstream clusters along with weights that indicate the percentage of\n// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the\n// weights.\nmessage WeightedCluster {\n  // [#next-free-field: 11]\n  message ClusterWeight {\n    reserved 7;\n\n    // Name of the upstream cluster. The cluster must exist in the\n    // :ref:`cluster manager configuration <config_cluster_manager>`.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // An integer between 0 and :ref:`total_weight\n    // <envoy_api_field_route.WeightedCluster.total_weight>`. When a request matches the route,\n    // the choice of an upstream cluster is determined by its weight. The sum of weights across all\n    // entries in the clusters array must add up to the total_weight, which defaults to 100.\n    google.protobuf.UInt32Value weight = 2;\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field will be considered for\n    // load balancing. Note that this will be merged with what's provided in\n    // :ref:`RouteAction.metadata_match <envoy_api_field_route.RouteAction.metadata_match>`, with\n    // values here taking precedence. The filter name should be specified as *envoy.lb*.\n    core.Metadata metadata_match = 3;\n\n    // Specifies a list of headers to be added to requests when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and\n    // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.HeaderValueOption request_headers_to_add = 4\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request when\n    // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    repeated string request_headers_to_remove = 9;\n\n    // Specifies a list of headers to be added to responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and\n    // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.HeaderValueOption response_headers_to_add = 5\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of headers to be removed from responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_route.RouteAction`.\n    repeated string response_headers_to_remove = 6;\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Struct> per_filter_config = 8 [deprecated = true];\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Any> typed_per_filter_config = 10;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Specifies the total weight across all clusters. The sum of all cluster weights must equal this\n  // value, which must be greater than 0. Defaults to 100.\n  google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies the runtime key prefix that should be used to construct the\n  // runtime keys associated with each cluster. When the *runtime_key_prefix* is\n  // specified, the router will look for weights associated with each upstream\n  // cluster under the key *runtime_key_prefix* + \".\" + *cluster[i].name* where\n  // *cluster[i]* denotes an entry in the clusters array field. If the runtime\n  // key for the cluster does not exist, the value specified in the\n  // configuration file will be used as the default weight. See the :ref:`runtime documentation\n  // <operations_runtime>` for how key names map to the underlying implementation.\n  string runtime_key_prefix = 2;\n}\n\n// [#next-free-field: 12]\nmessage RouteMatch {\n  message GrpcRouteMatchOptions {\n  }\n\n  message TlsContextMatchOptions {\n    // If specified, the route will match against whether or not a certificate is presented.\n    // If not specified, certificate presentation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue presented = 1;\n\n    // If specified, the route will match against whether or not a certificate is validated.\n    // If not specified, certificate validation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue validated = 2;\n  }\n\n  reserved 5;\n\n  oneof path_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route is a prefix rule meaning that the prefix must\n    // match the beginning of the *:path* header.\n    string prefix = 1;\n\n    // If specified, the route is an exact path rule meaning that the path must\n    // exactly match the *:path* header once the query string is removed.\n    string path = 2;\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex. The regex grammar is defined `here\n    // <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n    //\n    // Examples:\n    //\n    // * The regex ``/b[io]t`` matches the path */bit*\n    // * The regex ``/b[io]t`` matches the path */bot*\n    // * The regex ``/b[io]t`` does not match the path */bite*\n    // * The regex ``/b[io]t`` does not match the path */bit/bot*\n    //\n    // .. attention::\n    //   This field has been deprecated in favor of `safe_regex` as it is not safe for use with\n    //   untrusted input in all cases.\n    string regex = 3 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex.\n    //\n    // [#next-major-version: In the v3 API we should redo how path specification works such\n    // that we utilize StringMatcher, and additionally have consistent options around whether we\n    // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive\n    // to deprecate the existing options. We should even consider whether we want to do away with\n    // path_specifier entirely and just rely on a set of header matchers which can already match\n    // on :path, etc. The issue with that is it is unclear how to generically deal with query string\n    // stripping. This needs more thought.]\n    type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];\n  }\n\n  // Indicates that prefix/path matching should be case sensitive. The default\n  // is true.\n  google.protobuf.BoolValue case_sensitive = 4;\n\n  // Indicates that the route should additionally match on a runtime key. Every time the route\n  // is considered for a match, it must also fall under the percentage of matches indicated by\n  // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the\n  // number is <= the value of the numerator N, or if the key is not present, the default\n  // value, the router continues to evaluate the remaining match criteria. A runtime_fraction\n  // route configuration can be used to roll out route changes in a gradual manner without full\n  // code/config deploys. Refer to the :ref:`traffic shifting\n  // <config_http_conn_man_route_table_traffic_splitting_shift>` docs for additional documentation.\n  //\n  // .. note::\n  //\n  //    Parsing this field is implemented such that the runtime key's data may be represented\n  //    as a FractionalPercent proto represented as JSON/YAML and may also be represented as an\n  //    integer with the assumption that the value is an integral percentage out of 100. For\n  //    instance, a runtime key lookup returning the value \"42\" would parse as a FractionalPercent\n  //    whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics.\n  core.RuntimeFractionalPercent runtime_fraction = 9;\n\n  // Specifies a set of headers that the route should match on. The router will\n  // check the request’s headers against all the specified headers in the route\n  // config. A match will happen if all the headers in the route are present in\n  // the request with the same values (or based on presence if the value field\n  // is not in the config).\n  repeated HeaderMatcher headers = 6;\n\n  // Specifies a set of URL query parameters on which the route should\n  // match. The router will check the query string from the *path* header\n  // against all the specified query parameters. If the number of specified\n  // query parameters is nonzero, they all must match the *path* header's\n  // query string for a match to occur.\n  repeated QueryParameterMatcher query_parameters = 7;\n\n  // If specified, only gRPC requests will be matched. The router will check\n  // that the content-type header has a application/grpc or one of the various\n  // application/grpc+ values.\n  GrpcRouteMatchOptions grpc = 8;\n\n  // If specified, the client tls context will be matched against the defined\n  // match options.\n  //\n  // [#next-major-version: unify with RBAC]\n  TlsContextMatchOptions tls_context = 11;\n}\n\n// [#next-free-field: 12]\nmessage CorsPolicy {\n  // Specifies the origins that will be allowed to do CORS requests.\n  //\n  // An origin is allowed if either allow_origin or allow_origin_regex match.\n  //\n  // .. attention::\n  //  This field has been deprecated in favor of `allow_origin_string_match`.\n  repeated string allow_origin = 1\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Specifies regex patterns that match allowed origins.\n  //\n  // An origin is allowed if either allow_origin or allow_origin_regex match.\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for\n  //   use with untrusted input in all cases.\n  repeated string allow_origin_regex = 8\n      [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}];\n\n  // Specifies string patterns that match allowed origins. An origin is allowed if any of the\n  // string matchers match.\n  repeated type.matcher.StringMatcher allow_origin_string_match = 11;\n\n  // Specifies the content for the *access-control-allow-methods* header.\n  string allow_methods = 2;\n\n  // Specifies the content for the *access-control-allow-headers* header.\n  string allow_headers = 3;\n\n  // Specifies the content for the *access-control-expose-headers* header.\n  string expose_headers = 4;\n\n  // Specifies the content for the *access-control-max-age* header.\n  string max_age = 5;\n\n  // Specifies whether the resource allows credentials.\n  google.protobuf.BoolValue allow_credentials = 6;\n\n  oneof enabled_specifier {\n    // Specifies if the CORS filter is enabled. Defaults to true. Only effective on route.\n    //\n    // .. attention::\n    //\n    //   **This field is deprecated**. Set the\n    //   :ref:`filter_enabled<envoy_api_field_route.CorsPolicy.filter_enabled>` field instead.\n    google.protobuf.BoolValue enabled = 7\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // Specifies the % of requests for which the CORS filter is enabled.\n    //\n    // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS\n    // filter will be enabled for 100% of the requests.\n    //\n    // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is\n    // specified, Envoy will lookup the runtime key to get the percentage of requests to filter.\n    core.RuntimeFractionalPercent filter_enabled = 9;\n  }\n\n  // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not\n  // enforced.\n  //\n  // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those\n  // fields have to explicitly disable the filter in order for this setting to take effect.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* to determine if it's valid but will not enforce any policies.\n  core.RuntimeFractionalPercent shadow_enabled = 10;\n}\n\n// [#next-free-field: 34]\nmessage RouteAction {\n  enum ClusterNotFoundResponseCode {\n    // HTTP status code - 503 Service Unavailable.\n    SERVICE_UNAVAILABLE = 0;\n\n    // HTTP status code - 404 Not Found.\n    NOT_FOUND = 1;\n  }\n\n  // Configures :ref:`internal redirect <arch_overview_internal_redirects>` behavior.\n  enum InternalRedirectAction {\n    PASS_THROUGH_INTERNAL_REDIRECT = 0;\n    HANDLE_INTERNAL_REDIRECT = 1;\n  }\n\n  // The router is capable of shadowing traffic from one cluster to another. The current\n  // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n  // respond before returning the response from the primary cluster. All normal statistics are\n  // collected for the shadow cluster making this feature useful for testing.\n  //\n  // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is\n  // useful for logging. For example, *cluster1* becomes *cluster1-shadow*.\n  //\n  // .. note::\n  //\n  //   Shadowing will not be triggered if the primary cluster does not exist.\n  message RequestMirrorPolicy {\n    // Specifies the cluster that requests will be mirrored to. The cluster must\n    // exist in the cluster manager configuration.\n    string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // If not specified, all requests to the target cluster will be mirrored. If\n    // specified, Envoy will lookup the runtime key to get the % of requests to\n    // mirror. Valid values are from 0 to 10000, allowing for increments of\n    // 0.01% of requests to be mirrored. If the runtime key is specified in the\n    // configuration but not present in runtime, 0 is the default and thus 0% of\n    // requests will be mirrored.\n    //\n    // .. attention::\n    //\n    //   **This field is deprecated**. Set the\n    //   :ref:`runtime_fraction\n    //   <envoy_api_field_route.RouteAction.RequestMirrorPolicy.runtime_fraction>`\n    //   field instead. Mirroring occurs if both this and\n    //   <envoy_api_field_route.RouteAction.RequestMirrorPolicy.runtime_fraction>`\n    //   are not set.\n    string runtime_key = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // If not specified, all requests to the target cluster will be mirrored.\n    //\n    // If specified, this field takes precedence over the `runtime_key` field and requests must also\n    // fall under the percentage of matches indicated by this field.\n    //\n    // For some fraction N/D, a random number in the range [0,D) is selected. If the\n    // number is <= the value of the numerator N, or if the key is not present, the default\n    // value, the request will be mirrored.\n    core.RuntimeFractionalPercent runtime_fraction = 3;\n\n    // Determines if the trace span should be sampled. Defaults to true.\n    google.protobuf.BoolValue trace_sampled = 4;\n  }\n\n  // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer\n  // <arch_overview_load_balancing_types>`.\n  // [#next-free-field: 7]\n  message HashPolicy {\n    message Header {\n      // The name of the request header that will be used to obtain the hash\n      // key. If the request header is not present, no hash will be produced.\n      string header_name = 1 [\n        (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}\n      ];\n    }\n\n    // Envoy supports two types of cookie affinity:\n    //\n    // 1. Passive. Envoy takes a cookie that's present in the cookies header and\n    //    hashes on its value.\n    //\n    // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL)\n    //    on the first request from the client in its response to the client,\n    //    based on the endpoint the request gets sent to. The client then\n    //    presents this on the next and all subsequent requests. The hash of\n    //    this is sufficient to ensure these requests get sent to the same\n    //    endpoint. The cookie is generated by hashing the source and\n    //    destination ports and addresses so that multiple independent HTTP2\n    //    streams on the same connection will independently receive the same\n    //    cookie, even if they arrive at the Envoy simultaneously.\n    message Cookie {\n      // The name of the cookie that will be used to obtain the hash key. If the\n      // cookie is not present and ttl below is not set, no hash will be\n      // produced.\n      string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // If specified, a cookie with the TTL will be generated if the cookie is\n      // not present. If the TTL is present and zero, the generated cookie will\n      // be a session cookie.\n      google.protobuf.Duration ttl = 2;\n\n      // The name of the path for the cookie. If no path is specified here, no path\n      // will be set for the cookie.\n      string path = 3;\n    }\n\n    message ConnectionProperties {\n      // Hash on source IP address.\n      bool source_ip = 1;\n    }\n\n    message QueryParameter {\n      // The name of the URL query parameter that will be used to obtain the hash\n      // key. If the parameter is not present, no hash will be produced. Query\n      // parameter names are case-sensitive.\n      string name = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    message FilterState {\n      // The name of the Object in the per-request filterState, which is an\n      // Envoy::Http::Hashable object. If there is no data associated with the key,\n      // or the stored object is not Envoy::Http::Hashable, no hash will be produced.\n      string key = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // Header hash policy.\n      Header header = 1;\n\n      // Cookie hash policy.\n      Cookie cookie = 2;\n\n      // Connection properties hash policy.\n      ConnectionProperties connection_properties = 3;\n\n      // Query parameter hash policy.\n      QueryParameter query_parameter = 5;\n\n      // Filter state hash policy.\n      FilterState filter_state = 6;\n    }\n\n    // The flag that short-circuits the hash computing. This field provides a\n    // 'fallback' style of configuration: \"if a terminal policy doesn't work,\n    // fallback to rest of the policy list\", it saves time when the terminal\n    // policy works.\n    //\n    // If true, and there is already a hash computed, ignore rest of the\n    // list of hash polices.\n    // For example, if the following hash methods are configured:\n    //\n    //  ========= ========\n    //  specifier terminal\n    //  ========= ========\n    //  Header A  true\n    //  Header B  false\n    //  Header C  false\n    //  ========= ========\n    //\n    // The generateHash process ends if policy \"header A\" generates a hash, as\n    // it's a terminal policy.\n    bool terminal = 4;\n  }\n\n  // Allows enabling and disabling upgrades on a per-route basis.\n  // This overrides any enabled/disabled upgrade filter chain specified in the\n  // HttpConnectionManager\n  // :ref:`upgrade_configs\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.upgrade_configs>`\n  // but does not affect any custom filter chain specified there.\n  message UpgradeConfig {\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type] will be proxied upstream.\n    string upgrade_type = 1\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Determines if upgrades are available on this route. Defaults to true.\n    google.protobuf.BoolValue enabled = 2;\n  }\n\n  reserved 12, 18, 19, 16, 22, 21;\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // HTTP header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist, Envoy will\n    // return a 404 response.\n    //\n    // .. attention::\n    //\n    //   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1\n    //   *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n    string cluster_header = 2\n        [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster. See\n    // :ref:`traffic splitting <config_http_conn_man_route_table_traffic_splitting_split>`\n    // for additional documentation.\n    WeightedCluster weighted_clusters = 3;\n  }\n\n  // The HTTP status code to use when configured cluster is not found.\n  // The default response code is 503 Service Unavailable.\n  ClusterNotFoundResponseCode cluster_not_found_response_code = 20\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n  // in the upstream cluster with metadata matching what's set in this field will be considered\n  // for load balancing. If using :ref:`weighted_clusters\n  // <envoy_api_field_route.RouteAction.weighted_clusters>`, metadata will be merged, with values\n  // provided there taking precedence. The filter name should be specified as *envoy.lb*.\n  core.Metadata metadata_match = 4;\n\n  // Indicates that during forwarding, the matched prefix (or path) should be\n  // swapped with this value. This option allows application URLs to be rooted\n  // at a different path from those exposed at the reverse proxy layer. The router filter will\n  // place the original path before rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of *prefix_rewrite* or\n  // :ref:`regex_rewrite <envoy_api_field_route.RouteAction.regex_rewrite>`\n  // may be specified.\n  //\n  // .. attention::\n  //\n  //   Pay careful attention to the use of trailing slashes in the\n  //   :ref:`route's match <envoy_api_field_route.Route.match>` prefix value.\n  //   Stripping a prefix from a path requires multiple Routes to handle all cases. For example,\n  //   rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single\n  //   :ref:`Route <envoy_api_msg_route.Route>`, as shown by the below config entries:\n  //\n  //   .. code-block:: yaml\n  //\n  //     - match:\n  //         prefix: \"/prefix/\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //     - match:\n  //         prefix: \"/prefix\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //\n  //   Having above entries in the config, requests to */prefix* will be stripped to */*, while\n  //   requests to */prefix/etc* will be stripped to */etc*.\n  string prefix_rewrite = 5\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Indicates that during forwarding, portions of the path that match the\n  // pattern should be rewritten, even allowing the substitution of capture\n  // groups from the pattern into the new path as specified by the rewrite\n  // substitution string. This is useful to allow application paths to be\n  // rewritten in a way that is aware of segments with variable content like\n  // identifiers. The router filter will place the original path as it was\n  // before the rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of :ref:`prefix_rewrite <envoy_api_field_route.RouteAction.prefix_rewrite>`\n  // or *regex_rewrite* may be specified.\n  //\n  // Examples using Google's `RE2 <https://github.com/google/re2>`_ engine:\n  //\n  // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution\n  //   string of ``\\2/instance/\\1`` would transform ``/service/foo/v1/api``\n  //   into ``/v1/api/instance/foo``.\n  //\n  // * The pattern ``one`` paired with a substitution string of ``two`` would\n  //   transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``.\n  //\n  // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of\n  //   ``\\1two\\2`` would replace only the first occurrence of ``one``,\n  //   transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``.\n  //\n  // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/``\n  //   would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to\n  //   ``/aaa/yyy/bbb``.\n  type.matcher.RegexMatchAndSubstitute regex_rewrite = 32;\n\n  oneof host_rewrite_specifier {\n    // Indicates that during forwarding, the host header will be swapped with\n    // this value.\n    string host_rewrite = 6 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false},\n      (udpa.annotations.field_migrate).rename = \"host_rewrite_literal\"\n    ];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the hostname of the upstream host chosen by the cluster manager. This\n    // option is applicable only when the destination cluster for a route is of\n    // type *strict_dns* or *logical_dns*. Setting this to true with other cluster\n    // types has no effect.\n    google.protobuf.BoolValue auto_host_rewrite = 7;\n\n    // Indicates that during forwarding, the host header will be swapped with the content of given\n    // downstream or :ref:`custom <config_http_conn_man_headers_custom_request_headers>` header.\n    // If header value is empty, host header is left intact.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the potential security implications of using this option. Provided header\n    //   must come from trusted source.\n    string auto_host_rewrite_header = 29 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false},\n      (udpa.annotations.field_migrate).rename = \"host_rewrite_header\"\n    ];\n  }\n\n  // Specifies the upstream timeout for the route. If not specified, the default is 15s. This\n  // spans between the point at which the entire downstream request (i.e. end-of-stream) has been\n  // processed and when the upstream response has been completely processed. A value of 0 will\n  // disable the route's timeout.\n  //\n  // .. note::\n  //\n  //   This timeout includes all retries. See also\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //   :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration timeout = 8;\n\n  // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout,\n  // although the connection manager wide :ref:`stream_idle_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  // will still apply. A value of 0 will completely disable the route's idle timeout, even if a\n  // connection manager stream idle timeout is configured.\n  //\n  // The idle timeout is distinct to :ref:`timeout\n  // <envoy_api_field_route.RouteAction.timeout>`, which provides an upper bound\n  // on the upstream response time; :ref:`idle_timeout\n  // <envoy_api_field_route.RouteAction.idle_timeout>` instead bounds the amount\n  // of time the request's stream may be idle.\n  //\n  // After header decoding, the idle timeout will apply on downstream and\n  // upstream request events. Each time an encode/decode event for headers or\n  // data is processed for the stream, the timer will be reset. If the timeout\n  // fires, the stream is terminated with a 408 Request Timeout error code if no\n  // upstream response header has been received, otherwise a stream reset\n  // occurs.\n  google.protobuf.Duration idle_timeout = 24;\n\n  // Indicates that the route has a retry policy. Note that if this is set,\n  // it'll take precedence over the virtual host level retry policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  RetryPolicy retry_policy = 9;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that if this is set, it'll take\n  // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged,\n  // most internal one becomes the enforced policy). :ref:`Retry policy <envoy_api_field_route.VirtualHost.retry_policy>`\n  // should not be set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 33;\n\n  // Indicates that the route has a request mirroring policy.\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `request_mirror_policies` which supports one or\n  //   more mirroring policies.\n  RequestMirrorPolicy request_mirror_policy = 10 [deprecated = true];\n\n  // Indicates that the route has request mirroring policies.\n  repeated RequestMirrorPolicy request_mirror_policies = 30;\n\n  // Optionally specifies the :ref:`routing priority <arch_overview_http_routing_priority>`.\n  core.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies a set of rate limit configurations that could be applied to the\n  // route.\n  repeated RateLimit rate_limits = 13;\n\n  // Specifies if the rate limit filter should include the virtual host rate\n  // limits. By default, if the route configured rate limits, the virtual host\n  // :ref:`rate_limits <envoy_api_field_route.VirtualHost.rate_limits>` are not applied to the\n  // request.\n  google.protobuf.BoolValue include_vh_rate_limits = 14;\n\n  // Specifies a list of hash policies to use for ring hash load balancing. Each\n  // hash policy is evaluated individually and the combined result is used to\n  // route the request. The method of combination is deterministic such that\n  // identical lists of hash policies will produce the same hash. Since a hash\n  // policy examines specific parts of a request, it can fail to produce a hash\n  // (i.e. if the hashed header is not present). If (and only if) all configured\n  // hash policies fail to generate a hash, no hash will be produced for\n  // the route. In this case, the behavior is the same as if no hash policies\n  // were specified (i.e. the ring hash load balancer will choose a random\n  // backend). If a hash policy has the \"terminal\" attribute set to true, and\n  // there is already a hash generated, the hash is returned immediately,\n  // ignoring the rest of the hash policy list.\n  repeated HashPolicy hash_policy = 15;\n\n  // Indicates that the route has a CORS policy.\n  CorsPolicy cors = 17;\n\n  // If present, and the request is a gRPC request, use the\n  // `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_,\n  // or its default value (infinity) instead of\n  // :ref:`timeout <envoy_api_field_route.RouteAction.timeout>`, but limit the applied timeout\n  // to the maximum value specified here. If configured as 0, the maximum allowed timeout for\n  // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used\n  // and gRPC requests time out like any other requests using\n  // :ref:`timeout <envoy_api_field_route.RouteAction.timeout>` or its default.\n  // This can be used to prevent unexpected upstream request timeouts due to potentially long\n  // time gaps between gRPC request and response in gRPC streaming mode.\n  //\n  // .. note::\n  //\n  //    If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes\n  //    precedence over `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, when\n  //    both are present. See also\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //    :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration max_grpc_timeout = 23;\n\n  // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting\n  // the provided duration from the header. This is useful in allowing Envoy to set its global\n  // timeout to be less than that of the deadline imposed by the calling client, which makes it more\n  // likely that Envoy will handle the timeout instead of having the call canceled by the client.\n  // The offset will only be applied if the provided grpc_timeout is greater than the offset. This\n  // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning\n  // infinity).\n  google.protobuf.Duration grpc_timeout_offset = 28;\n\n  repeated UpgradeConfig upgrade_configs = 25;\n\n  InternalRedirectAction internal_redirect_action = 26;\n\n  // An internal redirect is handled, iff the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value, and\n  // :ref:`internal_redirect_action <envoy_api_field_route.RouteAction.internal_redirect_action>`\n  // is set to :ref:`HANDLE_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_route.RouteAction.InternalRedirectAction.HANDLE_INTERNAL_REDIRECT>`\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or has\n  // :ref:`internal_redirect_action <envoy_api_field_route.RouteAction.internal_redirect_action>`\n  // set to\n  // :ref:`PASS_THROUGH_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_route.RouteAction.InternalRedirectAction.PASS_THROUGH_INTERNAL_REDIRECT>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 31;\n\n  // Indicates that the route has a hedge policy. Note that if this is set,\n  // it'll take precedence over the virtual host level hedge policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  HedgePolicy hedge_policy = 27;\n}\n\n// HTTP retry :ref:`architecture overview <arch_overview_http_routing_retry>`.\n// [#next-free-field: 11]\nmessage RetryPolicy {\n  message RetryPriority {\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryHostPredicate {\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryBackOff {\n    // Specifies the base interval between retries. This parameter is required and must be greater\n    // than zero. Values less than 1 ms are rounded up to 1 ms.\n    // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's\n    // back-off algorithm.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // Specifies the maximum interval between retries. This parameter is optional, but must be\n    // greater than or equal to the `base_interval` if set. The default is 10 times the\n    // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion\n    // of Envoy's back-off algorithm.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Specifies the conditions under which retry takes place. These are the same\n  // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and\n  // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`.\n  string retry_on = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1. These are the same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-max-retries`.\n  google.protobuf.UInt32Value num_retries = 2;\n\n  // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The\n  // same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.\n  //\n  // .. note::\n  //\n  //   If left unspecified, Envoy will use the global\n  //   :ref:`route timeout <envoy_api_field_route.RouteAction.timeout>` for the request.\n  //   Consequently, when using a :ref:`5xx <config_http_filters_router_x-envoy-retry-on>` based\n  //   retry policy, a request that times out will not be retried as the total timeout budget\n  //   would have been exhausted.\n  google.protobuf.Duration per_try_timeout = 3;\n\n  // Specifies an implementation of a RetryPriority which is used to determine the\n  // distribution of load across priorities used for retries. Refer to\n  // :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more details.\n  RetryPriority retry_priority = 4;\n\n  // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host\n  // for retries. If any of the predicates reject the host, host selection will be reattempted.\n  // Refer to :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more\n  // details.\n  repeated RetryHostPredicate retry_host_predicate = 5;\n\n  // The maximum number of times host selection will be reattempted before giving up, at which\n  // point the host that was last selected will be routed to. If unspecified, this will default to\n  // retrying once.\n  int64 host_selection_retry_max_attempts = 6;\n\n  // HTTP status codes that should trigger a retry in addition to those specified by retry_on.\n  repeated uint32 retriable_status_codes = 7;\n\n  // Specifies parameters that control retry back off. This parameter is optional, in which case the\n  // default base interval is 25 milliseconds or, if set, the current value of the\n  // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times\n  // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries`\n  // describes Envoy's back-off algorithm.\n  RetryBackOff retry_back_off = 8;\n\n  // HTTP response headers that trigger a retry if present in the response. A retry will be\n  // triggered if any of the header matches match the upstream response headers.\n  // The field is only consulted if 'retriable-headers' retry policy is active.\n  repeated HeaderMatcher retriable_headers = 9;\n\n  // HTTP headers which must be present in the request for retries to be attempted.\n  repeated HeaderMatcher retriable_request_headers = 10;\n}\n\n// HTTP request hedging :ref:`architecture overview <arch_overview_http_routing_hedging>`.\nmessage HedgePolicy {\n  // Specifies the number of initial requests that should be sent upstream.\n  // Must be at least 1.\n  // Defaults to 1.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies a probability that an additional upstream request should be sent\n  // on top of what is specified by initial_requests.\n  // Defaults to 0.\n  // [#not-implemented-hide:]\n  type.FractionalPercent additional_request_chance = 2;\n\n  // Indicates that a hedged request should be sent when the per-try timeout\n  // is hit. This will only occur if the retry policy also indicates that a\n  // timed out request should be retried.\n  // Once a timed out request is retried due to per try timeout, the router\n  // filter will ensure that it is not retried again even if the returned\n  // response headers would otherwise be retried according the specified\n  // :ref:`RetryPolicy <envoy_api_msg_route.RetryPolicy>`.\n  // Defaults to false.\n  bool hedge_on_per_try_timeout = 3;\n}\n\n// [#next-free-field: 9]\nmessage RedirectAction {\n  enum RedirectResponseCode {\n    // Moved Permanently HTTP Status Code - 301.\n    MOVED_PERMANENTLY = 0;\n\n    // Found HTTP Status Code - 302.\n    FOUND = 1;\n\n    // See Other HTTP Status Code - 303.\n    SEE_OTHER = 2;\n\n    // Temporary Redirect HTTP Status Code - 307.\n    TEMPORARY_REDIRECT = 3;\n\n    // Permanent Redirect HTTP Status Code - 308.\n    PERMANENT_REDIRECT = 4;\n  }\n\n  // When the scheme redirection take place, the following rules apply:\n  //  1. If the source URI scheme is `http` and the port is explicitly\n  //     set to `:80`, the port will be removed after the redirection\n  //  2. If the source URI scheme is `https` and the port is explicitly\n  //     set to `:443`, the port will be removed after the redirection\n  oneof scheme_rewrite_specifier {\n    // The scheme portion of the URL will be swapped with \"https\".\n    bool https_redirect = 4;\n\n    // The scheme portion of the URL will be swapped with this value.\n    string scheme_redirect = 7;\n  }\n\n  // The host portion of the URL will be swapped with this value.\n  string host_redirect = 1\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The port value of the URL will be swapped with this value.\n  uint32 port_redirect = 8;\n\n  oneof path_rewrite_specifier {\n    // The path portion of the URL will be swapped with this value.\n    // Please note that query string in path_redirect will override the\n    // request's query string and will not be stripped.\n    //\n    // For example, let's say we have the following routes:\n    //\n    // - match: { path: \"/old-path-1\" }\n    //   redirect: { path_redirect: \"/new-path-1\" }\n    // - match: { path: \"/old-path-2\" }\n    //   redirect: { path_redirect: \"/new-path-2\", strip-query: \"true\" }\n    // - match: { path: \"/old-path-3\" }\n    //   redirect: { path_redirect: \"/new-path-3?foo=1\", strip_query: \"true\" }\n    //\n    // 1. if request uri is \"/old-path-1?bar=1\", users will be redirected to \"/new-path-1?bar=1\"\n    // 2. if request uri is \"/old-path-2?bar=1\", users will be redirected to \"/new-path-2\"\n    // 3. if request uri is \"/old-path-3?bar=1\", users will be redirected to \"/new-path-3?foo=1\"\n    string path_redirect = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during redirection, the matched prefix (or path)\n    // should be swapped with this value. This option allows redirect URLs be dynamically created\n    // based on the request.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the use of trailing slashes as mentioned in\n    //   :ref:`RouteAction's prefix_rewrite <envoy_api_field_route.RouteAction.prefix_rewrite>`.\n    string prefix_rewrite = 5\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // The HTTP status code to use in the redirect response. The default response\n  // code is MOVED_PERMANENTLY (301).\n  RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Indicates that during redirection, the query portion of the URL will\n  // be removed. Default value is false.\n  bool strip_query = 6;\n}\n\nmessage DirectResponseAction {\n  // Specifies the HTTP response status to be returned.\n  uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}];\n\n  // Specifies the content of the response body. If this setting is omitted,\n  // no body is included in the generated response.\n  //\n  // .. note::\n  //\n  //   Headers can be specified using *response_headers_to_add* in the enclosing\n  //   :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or\n  //   :ref:`envoy_api_msg_route.VirtualHost`.\n  core.DataSource body = 2;\n}\n\nmessage Decorator {\n  // The operation name associated with the request matched to this route. If tracing is\n  // enabled, this information will be used as the span name reported for this request.\n  //\n  // .. note::\n  //\n  //   For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden\n  //   by the :ref:`x-envoy-decorator-operation\n  //   <config_http_filters_router_x-envoy-decorator-operation>` header.\n  string operation = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Whether the decorated details should be propagated to the other party. The default is true.\n  google.protobuf.BoolValue propagate = 2;\n}\n\nmessage Tracing {\n  // Target percentage of requests managed by this HTTP connection manager that will be force\n  // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n  // header is set. This field is a direct analog for the runtime variable\n  // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n  // <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.FractionalPercent client_sampling = 1;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be randomly\n  // selected for trace generation, if not requested by the client or not forced. This field is\n  // a direct analog for the runtime variable 'tracing.random_sampling' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.FractionalPercent random_sampling = 2;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be traced\n  // after all other sampling checks have been applied (client-directed, force tracing, random\n  // sampling). This field functions as an upper limit on the total configured sampling rate. For\n  // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n  // of client requests with the appropriate headers to be force traced. This field is a direct\n  // analog for the runtime variable 'tracing.global_enabled' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.FractionalPercent overall_sampling = 3;\n\n  // A list of custom tags with unique tag name to create tags for the active span.\n  // It will take effect after merging with the :ref:`corresponding configuration\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing.custom_tags>`\n  // configured in the HTTP connection manager. If two tags with the same name are configured\n  // each in the HTTP connection manager and the route level, the one configured here takes\n  // priority.\n  repeated type.tracing.v2.CustomTag custom_tags = 4;\n}\n\n// A virtual cluster is a way of specifying a regex matching rule against\n// certain important endpoints such that statistics are generated explicitly for\n// the matched requests. The reason this is useful is that when doing\n// prefix/path matching Envoy does not always know what the application\n// considers to be an endpoint. Thus, it’s impossible for Envoy to generically\n// emit per endpoint statistics. However, often systems have highly critical\n// endpoints that they wish to get “perfect” statistics on. Virtual cluster\n// statistics are perfect in the sense that they are emitted on the downstream\n// side such that they include network level failures.\n//\n// Documentation for :ref:`virtual cluster statistics <config_http_filters_router_vcluster_stats>`.\n//\n// .. note::\n//\n//    Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for\n//    every application endpoint. This is both not easily maintainable and as well the matching and\n//    statistics output are not free.\nmessage VirtualCluster {\n  // Specifies a regex pattern to use for matching requests. The entire path of the request\n  // must match the regex. The regex grammar used is defined `here\n  // <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n  //\n  // Examples:\n  //\n  // * The regex ``/rides/\\d+`` matches the path */rides/0*\n  // * The regex ``/rides/\\d+`` matches the path */rides/123*\n  // * The regex ``/rides/\\d+`` does not match the path */rides/123/456*\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `headers` as it is not safe for use with\n  //   untrusted input in all cases.\n  string pattern = 1 [\n    deprecated = true,\n    (validate.rules).string = {max_bytes: 1024},\n    (envoy.annotations.disallowed_by_default) = true\n  ];\n\n  // Specifies a list of header matchers to use for matching requests. Each specified header must\n  // match. The pseudo-headers `:path` and `:method` can be used to match the request path and\n  // method, respectively.\n  repeated HeaderMatcher headers = 4;\n\n  // Specifies the name of the virtual cluster. The virtual cluster name as well\n  // as the virtual host name are used when emitting statistics. The statistics are emitted by the\n  // router filter and are documented :ref:`here <config_http_filters_router_stats>`.\n  string name = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Optionally specifies the HTTP method to match on. For example GET, PUT,\n  // etc.\n  //\n  // .. attention::\n  //   This field has been deprecated in favor of `headers`.\n  core.RequestMethod method = 3\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`.\nmessage RateLimit {\n  // [#next-free-field: 7]\n  message Action {\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"source_cluster\", \"<local service cluster>\")\n    //\n    // <local service cluster> is derived from the :option:`--service-cluster` option.\n    message SourceCluster {\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"destination_cluster\", \"<routed target cluster>\")\n    //\n    // Once a request matches against a route table rule, a routed cluster is determined by one of\n    // the following :ref:`route table configuration <envoy_api_msg_RouteConfiguration>`\n    // settings:\n    //\n    // * :ref:`cluster <envoy_api_field_route.RouteAction.cluster>` indicates the upstream cluster\n    //   to route to.\n    // * :ref:`weighted_clusters <envoy_api_field_route.RouteAction.weighted_clusters>`\n    //   chooses a cluster randomly from a set of clusters with attributed weight.\n    // * :ref:`cluster_header <envoy_api_field_route.RouteAction.cluster_header>` indicates which\n    //   header in the request contains the target cluster.\n    message DestinationCluster {\n    }\n\n    // The following descriptor entry is appended when a header contains a key that matches the\n    // *header_name*:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<header_value_queried_from_header>\")\n    message RequestHeaders {\n      // The header name to be queried from the request headers. The header’s\n      // value is used to populate the value of the descriptor entry for the\n      // descriptor_key.\n      string header_name = 1 [\n        (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}\n      ];\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    // The following descriptor entry is appended to the descriptor and is populated using the\n    // trusted address from :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"remote_address\", \"<trusted address from x-forwarded-for>\")\n    message RemoteAddress {\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"generic_key\", \"<descriptor_value>\")\n    message GenericKey {\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"header_match\", \"<descriptor_value>\")\n    message HeaderValueMatch {\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // If set to true, the action will append a descriptor entry when the\n      // request matches the headers. If set to false, the action will append a\n      // descriptor entry when the request does not match the headers. The\n      // default value is true.\n      google.protobuf.BoolValue expect_match = 2;\n\n      // Specifies a set of headers that the rate limit action should match\n      // on. The action will check the request’s headers against all the\n      // specified headers in the config. A match will happen if all the\n      // headers in the config are present in the request with the same values\n      // (or based on presence if the value field is not in the config).\n      repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    oneof action_specifier {\n      option (validate.required) = true;\n\n      // Rate limit on source cluster.\n      SourceCluster source_cluster = 1;\n\n      // Rate limit on destination cluster.\n      DestinationCluster destination_cluster = 2;\n\n      // Rate limit on request headers.\n      RequestHeaders request_headers = 3;\n\n      // Rate limit on remote address.\n      RemoteAddress remote_address = 4;\n\n      // Rate limit on a generic key.\n      GenericKey generic_key = 5;\n\n      // Rate limit on the existence of request headers.\n      HeaderValueMatch header_value_match = 6;\n    }\n  }\n\n  // Refers to the stage set in the filter. The rate limit configuration only\n  // applies to filters with the same stage number. The default stage number is\n  // 0.\n  //\n  // .. note::\n  //\n  //   The filter supports a range of 0 - 10 inclusively for stage numbers.\n  google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}];\n\n  // The key to be set in runtime to disable this rate limit configuration.\n  string disable_key = 2;\n\n  // A list of actions that are to be applied for this rate limit configuration.\n  // Order matters as the actions are processed sequentially and the descriptor\n  // is composed by appending descriptor entries in that sequence. If an action\n  // cannot append a descriptor entry, no descriptor is generated for the\n  // configuration. See :ref:`composing actions\n  // <config_http_filters_rate_limit_composing_actions>` for additional documentation.\n  repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// .. attention::\n//\n//   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host*\n//   header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n//\n// .. attention::\n//\n//   To route on HTTP method, use the special HTTP/2 *:method* header. This works for both\n//   HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g.,\n//\n//   .. code-block:: json\n//\n//     {\n//       \"name\": \":method\",\n//       \"exact_match\": \"POST\"\n//     }\n//\n// .. attention::\n//   In the absence of any header match specifier, match will default to :ref:`present_match\n//   <envoy_api_field_route.HeaderMatcher.present_match>`. i.e, a request that has the :ref:`name\n//   <envoy_api_field_route.HeaderMatcher.name>` header will match, regardless of the header's\n//   value.\n//\n//  [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.]\n// [#next-free-field: 12]\nmessage HeaderMatcher {\n  reserved 2, 3;\n\n  // Specifies the name of the header in the request.\n  string name = 1\n      [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Specifies how the header match will be performed to route the request.\n  oneof header_match_specifier {\n    // If specified, header match will be performed based on the value of the header.\n    string exact_match = 4;\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex. The regex grammar used in the value field is defined\n    // `here <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n    //\n    // Examples:\n    //\n    // * The regex ``\\d{3}`` matches the value *123*\n    // * The regex ``\\d{3}`` does not match the value *1234*\n    // * The regex ``\\d{3}`` does not match the value *123.456*\n    //\n    // .. attention::\n    //   This field has been deprecated in favor of `safe_regex_match` as it is not safe for use\n    //   with untrusted input in all cases.\n    string regex_match = 5 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex.\n    type.matcher.RegexMatcher safe_regex_match = 11;\n\n    // If specified, header match will be performed based on range.\n    // The rule will match if the request header value is within this range.\n    // The entire request header value must represent an integer in base 10 notation: consisting of\n    // an optional plus or minus sign followed by a sequence of digits. The rule will not match if\n    // the header value does not represent an integer. Match will fail for empty values, floating\n    // point numbers or if only a subsequence of the header value is an integer.\n    //\n    // Examples:\n    //\n    // * For range [-10,0), route will match for header value -1, but not for 0, \"somestring\", 10.9,\n    //   \"-1somestring\"\n    type.Int64Range range_match = 6;\n\n    // If specified, header match will be performed based on whether the header is in the\n    // request.\n    bool present_match = 7;\n\n    // If specified, header match will be performed based on the prefix of the header value.\n    // Note: empty prefix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.\n    string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}];\n\n    // If specified, header match will be performed based on the suffix of the header value.\n    // Note: empty suffix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.\n    string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // If specified, the match result will be inverted before checking. Defaults to false.\n  //\n  // Examples:\n  //\n  // * The regex ``\\d{3}`` does not match the value *1234*, so it will match when inverted.\n  // * The range [-10,0) will match the value -1, so it will not match when inverted.\n  bool invert_match = 8;\n}\n\n// Query parameter matching treats the query string of a request's :path header\n// as an ampersand-separated list of keys and/or key=value elements.\n// [#next-free-field: 7]\nmessage QueryParameterMatcher {\n  // Specifies the name of a key that must be present in the requested\n  // *path*'s query string.\n  string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}];\n\n  // Specifies the value of the key. If the value is absent, a request\n  // that contains the key in its query string will match, whether the\n  // key appears with a value (e.g., \"?debug=true\") or not (e.g., \"?debug\")\n  //\n  // ..attention::\n  //   This field is deprecated. Use an `exact` match inside the `string_match` field.\n  string value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Specifies whether the query parameter value is a regular expression.\n  // Defaults to false. The entire query parameter value (i.e., the part to\n  // the right of the equals sign in \"key=value\") must match the regex.\n  // E.g., the regex ``\\d+$`` will match *123* but not *a123* or *123a*.\n  //\n  // ..attention::\n  //   This field is deprecated. Use a `safe_regex` match inside the `string_match` field.\n  google.protobuf.BoolValue regex = 4\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  oneof query_parameter_match_specifier {\n    // Specifies whether a query parameter value should match against a string.\n    type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}];\n\n    // Specifies whether a query parameter should be present.\n    bool present_match = 6;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP route configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// [#next-free-field: 11]\nmessage RouteConfiguration {\n  // The name of the route configuration. For example, it might match\n  // :ref:`route_config_name\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.Rds.route_config_name>` in\n  // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`.\n  string name = 1;\n\n  // An array of virtual hosts that make up the route table.\n  repeated route.VirtualHost virtual_hosts = 2;\n\n  // An array of virtual hosts will be dynamically loaded via the VHDS API.\n  // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used\n  // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for\n  // on-demand discovery of virtual hosts. The contents of these two fields will be merged to\n  // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration\n  // taking precedence.\n  Vhds vhds = 9;\n\n  // Optionally specifies a list of HTTP headers that the connection manager\n  // will consider to be internal only. If they are found on external requests they will be cleaned\n  // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information.\n  repeated string internal_only_headers = 3 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each response that\n  // the connection manager encodes. Headers specified at this level are applied\n  // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or\n  // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption response_headers_to_add = 4\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // that the connection manager encodes.\n  repeated string response_headers_to_remove = 5 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // routed by the HTTP connection manager. Headers specified at this level are\n  // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or\n  // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.HeaderValueOption request_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // routed by the HTTP connection manager.\n  repeated string request_headers_to_remove = 8 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // By default, headers that should be added/removed are evaluated from most to least specific:\n  //\n  // * route level\n  // * virtual host level\n  // * connection manager level\n  //\n  // To allow setting overrides at the route or virtual host level, this order can be reversed\n  // by setting this option to true. Defaults to false.\n  //\n  // [#next-major-version: In the v3 API, this will default to true.]\n  bool most_specific_header_mutations_wins = 10;\n\n  // An optional boolean that specifies whether the clusters that the route\n  // table refers to will be validated by the cluster manager. If set to true\n  // and a route refers to a non-existent cluster, the route table will not\n  // load. If set to false and a route refers to a non-existent cluster, the\n  // route table will load and the router filter will return a 404 if the route\n  // is selected at runtime. This setting defaults to true if the route table\n  // is statically defined via the :ref:`route_config\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.route_config>`\n  // option. This setting default to false if the route table is loaded dynamically via the\n  // :ref:`rds\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.rds>`\n  // option. Users may wish to override the default behavior in certain cases (for example when\n  // using CDS with a static route table).\n  google.protobuf.BoolValue validate_clusters = 7;\n}\n\nmessage Vhds {\n  // Configuration source specifier for VHDS.\n  core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/scoped_route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"ScopedRouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP scoped routing configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// Specifies a routing scope, which associates a\n// :ref:`Key<envoy_api_msg_ScopedRouteConfiguration.Key>` to a\n// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name).\n//\n// The HTTP connection manager builds up a table consisting of these Key to\n// RouteConfiguration mappings, and looks up the RouteConfiguration to use per\n// request according to the algorithm specified in the\n// :ref:`scope_key_builder<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scope_key_builder>`\n// assigned to the HttpConnectionManager.\n//\n// For example, with the following configurations (in YAML):\n//\n// HttpConnectionManager config:\n//\n// .. code::\n//\n//   ...\n//   scoped_routes:\n//     name: foo-scoped-routes\n//     scope_key_builder:\n//       fragments:\n//         - header_value_extractor:\n//             name: X-Route-Selector\n//             element_separator: ,\n//             element:\n//               separator: =\n//               key: vip\n//\n// ScopedRouteConfiguration resources (specified statically via\n// :ref:`scoped_route_configurations_list<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scoped_route_configurations_list>`\n// or obtained dynamically via SRDS):\n//\n// .. code::\n//\n//  (1)\n//   name: route-scope1\n//   route_configuration_name: route-config1\n//   key:\n//      fragments:\n//        - string_key: 172.10.10.20\n//\n//  (2)\n//   name: route-scope2\n//   route_configuration_name: route-config2\n//   key:\n//     fragments:\n//       - string_key: 172.20.20.30\n//\n// A request from a client such as:\n//\n// .. code::\n//\n//     GET / HTTP/1.1\n//     Host: foo.com\n//     X-Route-Selector: vip=172.10.10.20\n//\n// would result in the routing table defined by the `route-config1`\n// RouteConfiguration being assigned to the HTTP request/stream.\n//\nmessage ScopedRouteConfiguration {\n  // Specifies a key which is matched against the output of the\n  // :ref:`scope_key_builder<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scope_key_builder>`\n  // specified in the HttpConnectionManager. The matching is done per HTTP\n  // request and is dependent on the order of the fragments contained in the\n  // Key.\n  message Key {\n    message Fragment {\n      oneof type {\n        option (validate.required) = true;\n\n        // A string to match against.\n        string string_key = 1;\n      }\n    }\n\n    // The ordered set of fragments to match against. The order must match the\n    // fragments in the corresponding\n    // :ref:`scope_key_builder<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scope_key_builder>`.\n    repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the routing scope.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an\n  // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated\n  // with this scope.\n  string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // The key to match against.\n  Key key = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/api/v2/srds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.api.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/api/v2/scoped_route.proto\";\n\noption java_package = \"io.envoyproxy.envoy.api.v2\";\noption java_outer_classname = \"SrdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.route.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: SRDS]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// The Scoped Routes Discovery Service (SRDS) API distributes\n// :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`\n// resources. Each ScopedRouteConfiguration resource represents a \"routing\n// scope\" containing a mapping that allows the HTTP connection manager to\n// dynamically assign a routing table (specified via a\n// :ref:`RouteConfiguration<envoy_api_msg_RouteConfiguration>` message) to each\n// HTTP request.\nservice ScopedRoutesDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.ScopedRouteConfiguration\";\n\n  rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaScopedRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:scoped-routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage SrdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/README.md",
    "content": "Protocol buffer definitions for Envoy's bootstrap, filter, and service configuration.\n\nVisibility should be constrained to none or `//envoy/config/bootstrap/v2` by default.\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/accesslog/v2/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v2\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.access_loggers.grpc.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Configuration for the built-in *envoy.access_loggers.http_grpc*\n// :ref:`AccessLog <envoy_api_msg_config.filter.accesslog.v2.AccessLog>`. This configuration will\n// populate :ref:`StreamAccessLogsMessage.http_logs\n// <envoy_api_field_service.accesslog.v2.StreamAccessLogsMessage.http_logs>`.\n// [#extension: envoy.access_loggers.http_grpc]\nmessage HttpGrpcAccessLogConfig {\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n\n  // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers\n  // <envoy_api_field_data.accesslog.v2.HTTPRequestProperties.request_headers>`.\n  repeated string additional_request_headers_to_log = 2;\n\n  // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers\n  // <envoy_api_field_data.accesslog.v2.HTTPResponseProperties.response_headers>`.\n  repeated string additional_response_headers_to_log = 3;\n\n  // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers\n  // <envoy_api_field_data.accesslog.v2.HTTPResponseProperties.response_trailers>`.\n  repeated string additional_response_trailers_to_log = 4;\n}\n\n// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will\n// populate *StreamAccessLogsMessage.tcp_logs*.\n// [#extension: envoy.access_loggers.tcp_grpc]\nmessage TcpGrpcAccessLogConfig {\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n}\n\n// Common configuration for gRPC access logs.\n// [#next-free-field: 6]\nmessage CommonGrpcAccessLogConfig {\n  // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier\n  // <envoy_api_msg_service.accesslog.v2.StreamAccessLogsMessage.Identifier>`. This allows the\n  // access log server to differentiate between different access logs coming from the same Envoy.\n  string log_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The gRPC service for the access log service.\n  api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n\n  // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time\n  // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to\n  // 1 second.\n  google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}];\n\n  // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until\n  // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it\n  // to zero effectively disables the batching. Defaults to 16384.\n  google.protobuf.UInt32Value buffer_size_bytes = 4;\n\n  // Additional filter state objects to log in :ref:`filter_state_objects\n  // <envoy_api_field_data.accesslog.v2.AccessLogCommon.filter_state_objects>`.\n  // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object.\n  repeated string filter_state_objects_to_log = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/accesslog/v2/file.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v2;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v2\";\noption java_outer_classname = \"FileProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.access_loggers.file.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: File access log]\n// [#extension: envoy.access_loggers.file]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.filter.accesslog.v2.AccessLog>`\n// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file*\n// AccessLog.\nmessage FileAccessLog {\n  // A path to a local file to which to write the access log entries.\n  string path = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof access_log_format {\n    // Access log :ref:`format string<config_access_log_format_strings>`.\n    // Envoy supports :ref:`custom access log formats <config_access_log_format>` as well as a\n    // :ref:`default format <config_access_log_default_format>`.\n    string format = 2;\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. All values\n    // are rendered as strings.\n    google.protobuf.Struct json_format = 3;\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. Values are\n    // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may\n    // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the\n    // documentation for a specific command operator for details.\n    google.protobuf.Struct typed_json_format = 4;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/accesslog/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v3\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common access log types]\n\nmessage AccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.AccessLog\";\n\n  // The name of the access log implementation to instantiate. The name must\n  // match a statically registered access log. Current built-in loggers include:\n  //\n  // #. \"envoy.access_loggers.file\"\n  // #. \"envoy.access_loggers.http_grpc\"\n  // #. \"envoy.access_loggers.tcp_grpc\"\n  string name = 1;\n\n  // Filter which is used to determine if the access log needs to be written.\n  AccessLogFilter filter = 2;\n\n  // Custom configuration that depends on the access log being instantiated.\n  // Built-in configurations include:\n  //\n  // #. \"envoy.access_loggers.file\": :ref:`FileAccessLog\n  //    <envoy_api_msg_extensions.access_loggers.file.v3.FileAccessLog>`\n  // #. \"envoy.access_loggers.http_grpc\": :ref:`HttpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig>`\n  // #. \"envoy.access_loggers.tcp_grpc\": :ref:`TcpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.TcpGrpcAccessLogConfig>`\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 3 [deprecated = true];\n  }\n}\n\n// [#next-free-field: 13]\nmessage AccessLogFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.AccessLogFilter\";\n\n  oneof filter_specifier {\n    option (validate.required) = true;\n\n    // Status code filter.\n    StatusCodeFilter status_code_filter = 1;\n\n    // Duration filter.\n    DurationFilter duration_filter = 2;\n\n    // Not health check filter.\n    NotHealthCheckFilter not_health_check_filter = 3;\n\n    // Traceable filter.\n    TraceableFilter traceable_filter = 4;\n\n    // Runtime filter.\n    RuntimeFilter runtime_filter = 5;\n\n    // And filter.\n    AndFilter and_filter = 6;\n\n    // Or filter.\n    OrFilter or_filter = 7;\n\n    // Header filter.\n    HeaderFilter header_filter = 8;\n\n    // Response flag filter.\n    ResponseFlagFilter response_flag_filter = 9;\n\n    // gRPC status filter.\n    GrpcStatusFilter grpc_status_filter = 10;\n\n    // Extension filter.\n    ExtensionFilter extension_filter = 11;\n\n    // Metadata Filter\n    MetadataFilter metadata_filter = 12;\n  }\n}\n\n// Filter on an integer comparison.\nmessage ComparisonFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.ComparisonFilter\";\n\n  enum Op {\n    // =\n    EQ = 0;\n\n    // >=\n    GE = 1;\n\n    // <=\n    LE = 2;\n  }\n\n  // Comparison operator.\n  Op op = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Value to compare against.\n  core.v3.RuntimeUInt32 value = 2;\n}\n\n// Filters on HTTP response/status code.\nmessage StatusCodeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.StatusCodeFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters on total request duration in milliseconds.\nmessage DurationFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.DurationFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters for requests that are not health check requests. A health check\n// request is marked by the health check filter.\nmessage NotHealthCheckFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.NotHealthCheckFilter\";\n}\n\n// Filters for requests that are traceable. See the tracing overview for more\n// information on how a request becomes traceable.\nmessage TraceableFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.TraceableFilter\";\n}\n\n// Filters for random sampling of requests.\nmessage RuntimeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.RuntimeFilter\";\n\n  // Runtime key to get an optional overridden numerator for use in the\n  // *percent_sampled* field. If found in runtime, this value will replace the\n  // default numerator.\n  string runtime_key = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The default sampling percentage. If not specified, defaults to 0% with\n  // denominator of 100.\n  type.v3.FractionalPercent percent_sampled = 2;\n\n  // By default, sampling pivots on the header\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being\n  // present. If :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`\n  // is present, the filter will consistently sample across multiple hosts based\n  // on the runtime key value and the value extracted from\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`. If it is\n  // missing, or *use_independent_randomness* is set to true, the filter will\n  // randomly sample based on the runtime key value alone.\n  // *use_independent_randomness* can be used for logging kill switches within\n  // complex nested :ref:`AndFilter\n  // <envoy_api_msg_config.accesslog.v3.AndFilter>` and :ref:`OrFilter\n  // <envoy_api_msg_config.accesslog.v3.OrFilter>` blocks that are easier to\n  // reason about from a probability perspective (i.e., setting to true will\n  // cause the filter to behave like an independent random variable when\n  // composed within logical operator filters).\n  bool use_independent_randomness = 3;\n}\n\n// Performs a logical “and” operation on the result of each filter in filters.\n// Filters are evaluated sequentially and if one of them returns false, the\n// filter returns false immediately.\nmessage AndFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.AndFilter\";\n\n  repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Performs a logical “or” operation on the result of each individual filter.\n// Filters are evaluated sequentially and if one of them returns true, the\n// filter returns true immediately.\nmessage OrFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.OrFilter\";\n\n  repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Filters requests based on the presence or value of a request header.\nmessage HeaderFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.HeaderFilter\";\n\n  // Only requests with a header which matches the specified HeaderMatcher will\n  // pass the filter check.\n  route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters requests that received responses with an Envoy response flag set.\n// A list of the response flags can be found\n// in the access log formatter\n// :ref:`documentation<config_access_log_format_response_flags>`.\nmessage ResponseFlagFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.ResponseFlagFilter\";\n\n  // Only responses with the any of the flags listed in this field will be\n  // logged. This field is optional. If it is not specified, then any response\n  // flag will pass the filter check.\n  repeated string flags = 1 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"LH\"\n        in: \"UH\"\n        in: \"UT\"\n        in: \"LR\"\n        in: \"UR\"\n        in: \"UF\"\n        in: \"UC\"\n        in: \"UO\"\n        in: \"NR\"\n        in: \"DI\"\n        in: \"FI\"\n        in: \"RL\"\n        in: \"UAEX\"\n        in: \"RLSE\"\n        in: \"DC\"\n        in: \"URX\"\n        in: \"SI\"\n        in: \"IH\"\n        in: \"DPE\"\n        in: \"UMSDR\"\n        in: \"RFCF\"\n        in: \"NFCF\"\n        in: \"DT\"\n      }\n    }\n  }];\n}\n\n// Filters gRPC requests based on their response status. If a gRPC status is not\n// provided, the filter will infer the status from the HTTP status code.\nmessage GrpcStatusFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.GrpcStatusFilter\";\n\n  enum Status {\n    OK = 0;\n    CANCELED = 1;\n    UNKNOWN = 2;\n    INVALID_ARGUMENT = 3;\n    DEADLINE_EXCEEDED = 4;\n    NOT_FOUND = 5;\n    ALREADY_EXISTS = 6;\n    PERMISSION_DENIED = 7;\n    RESOURCE_EXHAUSTED = 8;\n    FAILED_PRECONDITION = 9;\n    ABORTED = 10;\n    OUT_OF_RANGE = 11;\n    UNIMPLEMENTED = 12;\n    INTERNAL = 13;\n    UNAVAILABLE = 14;\n    DATA_LOSS = 15;\n    UNAUTHENTICATED = 16;\n  }\n\n  // Logs only responses that have any one of the gRPC statuses in this field.\n  repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n\n  // If included and set to true, the filter will instead block all responses\n  // with a gRPC status or inferred gRPC status enumerated in statuses, and\n  // allow all other responses.\n  bool exclude = 2;\n}\n\n// Filters based on matching dynamic metadata.\n// If the matcher path and key correspond to an existing key in dynamic\n// metadata, the request is logged only if the matcher value is equal to the\n// metadata value. If the matcher path and key *do not* correspond to an\n// existing key in dynamic metadata, the request is logged only if\n// match_if_key_not_found is \"true\" or unset.\nmessage MetadataFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.MetadataFilter\";\n\n  // Matcher to check metadata for specified value. For example, to match on the\n  // access_log_hint metadata, set the filter to \"envoy.common\" and the path to\n  // \"access_log_hint\", and the value to \"true\".\n  type.matcher.v3.MetadataMatcher matcher = 1;\n\n  // Default result if the key does not exist in dynamic metadata: if unset or\n  // true, then log; if false, then don't log.\n  google.protobuf.BoolValue match_if_key_not_found = 2;\n}\n\n// Extension filter is statically registered at runtime.\nmessage ExtensionFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.accesslog.v2.ExtensionFilter\";\n\n  // The name of the filter implementation to instantiate. The name must\n  // match a statically registered filter.\n  string name = 1;\n\n  // Custom configuration that depends on the filter being instantiated.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.accesslog.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.accesslog.v4alpha\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common access log types]\n\nmessage AccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.AccessLog\";\n\n  reserved 3;\n\n  reserved \"config\";\n\n  // The name of the access log implementation to instantiate. The name must\n  // match a statically registered access log. Current built-in loggers include:\n  //\n  // #. \"envoy.access_loggers.file\"\n  // #. \"envoy.access_loggers.http_grpc\"\n  // #. \"envoy.access_loggers.tcp_grpc\"\n  string name = 1;\n\n  // Filter which is used to determine if the access log needs to be written.\n  AccessLogFilter filter = 2;\n\n  // Custom configuration that depends on the access log being instantiated.\n  // Built-in configurations include:\n  //\n  // #. \"envoy.access_loggers.file\": :ref:`FileAccessLog\n  //    <envoy_api_msg_extensions.access_loggers.file.v4alpha.FileAccessLog>`\n  // #. \"envoy.access_loggers.http_grpc\": :ref:`HttpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig>`\n  // #. \"envoy.access_loggers.tcp_grpc\": :ref:`TcpGrpcAccessLogConfig\n  //    <envoy_api_msg_extensions.access_loggers.grpc.v3.TcpGrpcAccessLogConfig>`\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// [#next-free-field: 13]\nmessage AccessLogFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.AccessLogFilter\";\n\n  oneof filter_specifier {\n    option (validate.required) = true;\n\n    // Status code filter.\n    StatusCodeFilter status_code_filter = 1;\n\n    // Duration filter.\n    DurationFilter duration_filter = 2;\n\n    // Not health check filter.\n    NotHealthCheckFilter not_health_check_filter = 3;\n\n    // Traceable filter.\n    TraceableFilter traceable_filter = 4;\n\n    // Runtime filter.\n    RuntimeFilter runtime_filter = 5;\n\n    // And filter.\n    AndFilter and_filter = 6;\n\n    // Or filter.\n    OrFilter or_filter = 7;\n\n    // Header filter.\n    HeaderFilter header_filter = 8;\n\n    // Response flag filter.\n    ResponseFlagFilter response_flag_filter = 9;\n\n    // gRPC status filter.\n    GrpcStatusFilter grpc_status_filter = 10;\n\n    // Extension filter.\n    ExtensionFilter extension_filter = 11;\n\n    // Metadata Filter\n    MetadataFilter metadata_filter = 12;\n  }\n}\n\n// Filter on an integer comparison.\nmessage ComparisonFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.ComparisonFilter\";\n\n  enum Op {\n    // =\n    EQ = 0;\n\n    // >=\n    GE = 1;\n\n    // <=\n    LE = 2;\n  }\n\n  // Comparison operator.\n  Op op = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Value to compare against.\n  core.v4alpha.RuntimeUInt32 value = 2;\n}\n\n// Filters on HTTP response/status code.\nmessage StatusCodeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.StatusCodeFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters on total request duration in milliseconds.\nmessage DurationFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.DurationFilter\";\n\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters for requests that are not health check requests. A health check\n// request is marked by the health check filter.\nmessage NotHealthCheckFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.NotHealthCheckFilter\";\n}\n\n// Filters for requests that are traceable. See the tracing overview for more\n// information on how a request becomes traceable.\nmessage TraceableFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.TraceableFilter\";\n}\n\n// Filters for random sampling of requests.\nmessage RuntimeFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.RuntimeFilter\";\n\n  // Runtime key to get an optional overridden numerator for use in the\n  // *percent_sampled* field. If found in runtime, this value will replace the\n  // default numerator.\n  string runtime_key = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The default sampling percentage. If not specified, defaults to 0% with\n  // denominator of 100.\n  type.v3.FractionalPercent percent_sampled = 2;\n\n  // By default, sampling pivots on the header\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being\n  // present. If :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`\n  // is present, the filter will consistently sample across multiple hosts based\n  // on the runtime key value and the value extracted from\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`. If it is\n  // missing, or *use_independent_randomness* is set to true, the filter will\n  // randomly sample based on the runtime key value alone.\n  // *use_independent_randomness* can be used for logging kill switches within\n  // complex nested :ref:`AndFilter\n  // <envoy_api_msg_config.accesslog.v4alpha.AndFilter>` and :ref:`OrFilter\n  // <envoy_api_msg_config.accesslog.v4alpha.OrFilter>` blocks that are easier to\n  // reason about from a probability perspective (i.e., setting to true will\n  // cause the filter to behave like an independent random variable when\n  // composed within logical operator filters).\n  bool use_independent_randomness = 3;\n}\n\n// Performs a logical “and” operation on the result of each filter in filters.\n// Filters are evaluated sequentially and if one of them returns false, the\n// filter returns false immediately.\nmessage AndFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.AndFilter\";\n\n  repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Performs a logical “or” operation on the result of each individual filter.\n// Filters are evaluated sequentially and if one of them returns true, the\n// filter returns true immediately.\nmessage OrFilter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.accesslog.v3.OrFilter\";\n\n  repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Filters requests based on the presence or value of a request header.\nmessage HeaderFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.HeaderFilter\";\n\n  // Only requests with a header which matches the specified HeaderMatcher will\n  // pass the filter check.\n  route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters requests that received responses with an Envoy response flag set.\n// A list of the response flags can be found\n// in the access log formatter\n// :ref:`documentation<config_access_log_format_response_flags>`.\nmessage ResponseFlagFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.ResponseFlagFilter\";\n\n  // Only responses with the any of the flags listed in this field will be\n  // logged. This field is optional. If it is not specified, then any response\n  // flag will pass the filter check.\n  repeated string flags = 1 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"LH\"\n        in: \"UH\"\n        in: \"UT\"\n        in: \"LR\"\n        in: \"UR\"\n        in: \"UF\"\n        in: \"UC\"\n        in: \"UO\"\n        in: \"NR\"\n        in: \"DI\"\n        in: \"FI\"\n        in: \"RL\"\n        in: \"UAEX\"\n        in: \"RLSE\"\n        in: \"DC\"\n        in: \"URX\"\n        in: \"SI\"\n        in: \"IH\"\n        in: \"DPE\"\n        in: \"UMSDR\"\n        in: \"RFCF\"\n        in: \"NFCF\"\n        in: \"DT\"\n      }\n    }\n  }];\n}\n\n// Filters gRPC requests based on their response status. If a gRPC status is not\n// provided, the filter will infer the status from the HTTP status code.\nmessage GrpcStatusFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.GrpcStatusFilter\";\n\n  enum Status {\n    OK = 0;\n    CANCELED = 1;\n    UNKNOWN = 2;\n    INVALID_ARGUMENT = 3;\n    DEADLINE_EXCEEDED = 4;\n    NOT_FOUND = 5;\n    ALREADY_EXISTS = 6;\n    PERMISSION_DENIED = 7;\n    RESOURCE_EXHAUSTED = 8;\n    FAILED_PRECONDITION = 9;\n    ABORTED = 10;\n    OUT_OF_RANGE = 11;\n    UNIMPLEMENTED = 12;\n    INTERNAL = 13;\n    UNAVAILABLE = 14;\n    DATA_LOSS = 15;\n    UNAUTHENTICATED = 16;\n  }\n\n  // Logs only responses that have any one of the gRPC statuses in this field.\n  repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n\n  // If included and set to true, the filter will instead block all responses\n  // with a gRPC status or inferred gRPC status enumerated in statuses, and\n  // allow all other responses.\n  bool exclude = 2;\n}\n\n// Filters based on matching dynamic metadata.\n// If the matcher path and key correspond to an existing key in dynamic\n// metadata, the request is logged only if the matcher value is equal to the\n// metadata value. If the matcher path and key *do not* correspond to an\n// existing key in dynamic metadata, the request is logged only if\n// match_if_key_not_found is \"true\" or unset.\nmessage MetadataFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.MetadataFilter\";\n\n  // Matcher to check metadata for specified value. For example, to match on the\n  // access_log_hint metadata, set the filter to \"envoy.common\" and the path to\n  // \"access_log_hint\", and the value to \"true\".\n  type.matcher.v4alpha.MetadataMatcher matcher = 1;\n\n  // Default result if the key does not exist in dynamic metadata: if unset or\n  // true, then log; if false, then don't log.\n  google.protobuf.BoolValue match_if_key_not_found = 2;\n}\n\n// Extension filter is statically registered at runtime.\nmessage ExtensionFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v3.ExtensionFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter implementation to instantiate. The name must\n  // match a statically registered filter.\n  string name = 1;\n\n  // Custom configuration that depends on the filter being instantiated.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/bootstrap/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/metrics/v2:pkg\",\n        \"//envoy/config/overload/v2alpha:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.bootstrap.v2;\n\nimport \"envoy/api/v2/auth/secret.proto\";\nimport \"envoy/api/v2/cluster.proto\";\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/core/event_service_config.proto\";\nimport \"envoy/api/v2/core/socket_option.proto\";\nimport \"envoy/api/v2/listener.proto\";\nimport \"envoy/config/metrics/v2/stats.proto\";\nimport \"envoy/config/overload/v2alpha/overload.proto\";\nimport \"envoy/config/trace/v2/http_tracer.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.bootstrap.v2\";\noption java_outer_classname = \"BootstrapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Bootstrap]\n// This proto is supplied via the :option:`-c` CLI flag and acts as the root\n// of the Envoy v2 configuration. See the :ref:`v2 configuration overview\n// <config_overview_bootstrap>` for more detail.\n\n// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.\n// [#next-free-field: 21]\nmessage Bootstrap {\n  message StaticResources {\n    // Static :ref:`Listeners <envoy_api_msg_Listener>`. These listeners are\n    // available regardless of LDS configuration.\n    repeated api.v2.Listener listeners = 1;\n\n    // If a network based configuration source is specified for :ref:`cds_config\n    // <envoy_api_field_config.bootstrap.v2.Bootstrap.DynamicResources.cds_config>`, it's necessary\n    // to have some initial cluster definitions available to allow Envoy to know\n    // how to speak to the management server. These cluster definitions may not\n    // use :ref:`EDS <arch_overview_dynamic_config_eds>` (i.e. they should be static\n    // IP or DNS-based).\n    repeated api.v2.Cluster clusters = 2;\n\n    // These static secrets can be used by :ref:`SdsSecretConfig\n    // <envoy_api_msg_auth.SdsSecretConfig>`\n    repeated api.v2.auth.Secret secrets = 3;\n  }\n\n  message DynamicResources {\n    reserved 4;\n\n    // All :ref:`Listeners <envoy_api_msg_Listener>` are provided by a single\n    // :ref:`LDS <arch_overview_dynamic_config_lds>` configuration source.\n    api.v2.core.ConfigSource lds_config = 1;\n\n    // All post-bootstrap :ref:`Cluster <envoy_api_msg_Cluster>` definitions are\n    // provided by a single :ref:`CDS <arch_overview_dynamic_config_cds>`\n    // configuration source.\n    api.v2.core.ConfigSource cds_config = 2;\n\n    // A single :ref:`ADS <config_overview_ads>` source may be optionally\n    // specified. This must have :ref:`api_type\n    // <envoy_api_field_core.ApiConfigSource.api_type>` :ref:`GRPC\n    // <envoy_api_enum_value_core.ApiConfigSource.ApiType.GRPC>`. Only\n    // :ref:`ConfigSources <envoy_api_msg_core.ConfigSource>` that have\n    // the :ref:`ads <envoy_api_field_core.ConfigSource.ads>` field set will be\n    // streamed on the ADS channel.\n    api.v2.core.ApiConfigSource ads_config = 3;\n  }\n\n  reserved 10;\n\n  // Node identity to present to the management server and for instance\n  // identification purposes (e.g. in generated headers).\n  api.v2.core.Node node = 1;\n\n  // Statically specified resources.\n  StaticResources static_resources = 2;\n\n  // xDS configuration sources.\n  DynamicResources dynamic_resources = 3;\n\n  // Configuration for the cluster manager which owns all upstream clusters\n  // within the server.\n  ClusterManager cluster_manager = 4;\n\n  // Health discovery service config option.\n  // (:ref:`core.ApiConfigSource <envoy_api_msg_core.ApiConfigSource>`)\n  api.v2.core.ApiConfigSource hds_config = 14;\n\n  // Optional file system path to search for startup flag files.\n  string flags_path = 5;\n\n  // Optional set of stats sinks.\n  repeated metrics.v2.StatsSink stats_sinks = 6;\n\n  // Configuration for internal processing of stats.\n  metrics.v2.StatsConfig stats_config = 13;\n\n  // Optional duration between flushes to configured stats sinks. For\n  // performance reasons Envoy latches counters and only flushes counters and\n  // gauges at a periodic interval. If not specified the default is 5000ms (5\n  // seconds).\n  // Duration must be at least 1ms and at most 5 min.\n  google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = {\n    lt {seconds: 300}\n    gte {nanos: 1000000}\n  }];\n\n  // Optional watchdog configuration.\n  Watchdog watchdog = 8;\n\n  // Configuration for an external tracing provider.\n  //\n  // .. attention::\n  //  This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider\n  //  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing.provider>`.\n  trace.v2.Tracing tracing = 9;\n\n  // Configuration for the runtime configuration provider (deprecated). If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  Runtime runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Configuration for the runtime configuration provider. If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  LayeredRuntime layered_runtime = 17;\n\n  // Configuration for the local administration HTTP server.\n  Admin admin = 12;\n\n  // Optional overload manager configuration.\n  overload.v2alpha.OverloadManager overload_manager = 15;\n\n  // Enable :ref:`stats for event dispatcher <operations_performance>`, defaults to false.\n  // Note that this records a value for each iteration of the event loop on every thread. This\n  // should normally be minimal overhead, but when using\n  // :ref:`statsd <envoy_api_msg_config.metrics.v2.StatsdSink>`, it will send each observed value\n  // over the wire individually because the statsd protocol doesn't have any way to represent a\n  // histogram summary. Be aware that this can be a very large volume of data.\n  bool enable_dispatcher_stats = 16;\n\n  // Optional string which will be used in lieu of x-envoy in prefixing headers.\n  //\n  // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be\n  // transformed into x-foo-retry-on etc.\n  //\n  // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the\n  // headers Envoy will trust for core code and core extensions only. Be VERY careful making\n  // changes to this string, especially in multi-layer Envoy deployments or deployments using\n  // extensions which are not upstream.\n  string header_prefix = 18;\n\n  // Optional proxy version which will be used to set the value of :ref:`server.version statistic\n  // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to\n  // :ref:`stats sinks <envoy_api_msg_config.metrics.v2.StatsSink>`.\n  google.protobuf.UInt64Value stats_server_version_override = 19;\n\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // This may be overridden on a per-cluster basis in cds_config,\n  // when :ref:`dns_resolvers <envoy_api_field_Cluster.dns_resolvers>` and\n  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_Cluster.use_tcp_for_dns_lookups>` are\n  // specified.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 20;\n}\n\n// Administration interface :ref:`operations documentation\n// <operations_admin_interface>`.\nmessage Admin {\n  // The path to write the access log for the administration server. If no\n  // access log is desired specify ‘/dev/null’. This is only required if\n  // :ref:`address <envoy_api_field_config.bootstrap.v2.Admin.address>` is set.\n  string access_log_path = 1;\n\n  // The cpu profiler output path for the administration server. If no profile\n  // path is specified, the default is ‘/var/log/envoy/envoy.prof’.\n  string profile_path = 2;\n\n  // The TCP address that the administration server will listen on.\n  // If not specified, Envoy will not start an administration server.\n  api.v2.core.Address address = 3;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated api.v2.core.SocketOption socket_options = 4;\n}\n\n// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.\nmessage ClusterManager {\n  message OutlierDetection {\n    // Specifies the path to the outlier event log.\n    string event_log_path = 1;\n\n    // [#not-implemented-hide:]\n    // The gRPC service for the outlier detection event service.\n    // If empty, outlier detection events won't be sent to a remote endpoint.\n    api.v2.core.EventServiceConfig event_service = 2;\n  }\n\n  // Name of the local cluster (i.e., the cluster that owns the Envoy running\n  // this configuration). In order to enable :ref:`zone aware routing\n  // <arch_overview_load_balancing_zone_aware_routing>` this option must be set.\n  // If *local_cluster_name* is defined then :ref:`clusters\n  // <envoy_api_msg_Cluster>` must be defined in the :ref:`Bootstrap\n  // static cluster resources\n  // <envoy_api_field_config.bootstrap.v2.Bootstrap.StaticResources.clusters>`. This is unrelated to\n  // the :option:`--service-cluster` option which does not `affect zone aware\n  // routing <https://github.com/envoyproxy/envoy/issues/774>`_.\n  string local_cluster_name = 1;\n\n  // Optional global configuration for outlier detection.\n  OutlierDetection outlier_detection = 2;\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.\n  api.v2.core.BindConfig upstream_bind_config = 3;\n\n  // A management server endpoint to stream load stats to via\n  // *StreamLoadStats*. This must have :ref:`api_type\n  // <envoy_api_field_core.ApiConfigSource.api_type>` :ref:`GRPC\n  // <envoy_api_enum_value_core.ApiConfigSource.ApiType.GRPC>`.\n  api.v2.core.ApiConfigSource load_stats_config = 4;\n}\n\n// Envoy process watchdog configuration. When configured, this monitors for\n// nonresponsive threads and kills the process after the configured thresholds.\n// See the :ref:`watchdog documentation <operations_performance_watchdog>` for more information.\nmessage Watchdog {\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_miss* statistic. If not specified the default is 200ms.\n  google.protobuf.Duration miss_timeout = 1;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_mega_miss* statistic. If not specified the default is\n  // 1000ms.\n  google.protobuf.Duration megamiss_timeout = 2;\n\n  // If a watched thread has been nonresponsive for this duration, assume a\n  // programming error and kill the entire Envoy process. Set to 0 to disable\n  // kill behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration kill_timeout = 3;\n\n  // If at least two watched threads have been nonresponsive for at least this\n  // duration assume a true deadlock and kill the entire Envoy process. Set to 0\n  // to disable this behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration multikill_timeout = 4;\n}\n\n// Runtime :ref:`configuration overview <config_runtime>` (deprecated).\nmessage Runtime {\n  // The implementation assumes that the file system tree is accessed via a\n  // symbolic link. An atomic link swap is used when a new tree should be\n  // switched to. This parameter specifies the path to the symbolic link. Envoy\n  // will watch the location for changes and reload the file system tree when\n  // they happen. If this parameter is not set, there will be no disk based\n  // runtime.\n  string symlink_root = 1;\n\n  // Specifies the subdirectory to load within the root directory. This is\n  // useful if multiple systems share the same delivery mechanism. Envoy\n  // configuration elements can be contained in a dedicated subdirectory.\n  string subdirectory = 2;\n\n  // Specifies an optional subdirectory to load within the root directory. If\n  // specified and the directory exists, configuration values within this\n  // directory will override those found in the primary subdirectory. This is\n  // useful when Envoy is deployed across many different types of servers.\n  // Sometimes it is useful to have a per service cluster directory for runtime\n  // configuration. See below for exactly how the override directory is used.\n  string override_subdirectory = 3;\n\n  // Static base runtime. This will be :ref:`overridden\n  // <config_runtime_layering>` by other runtime layers, e.g.\n  // disk or admin. This follows the :ref:`runtime protobuf JSON representation\n  // encoding <config_runtime_proto_json>`.\n  google.protobuf.Struct base = 4;\n}\n\n// [#next-free-field: 6]\nmessage RuntimeLayer {\n  // :ref:`Disk runtime <config_runtime_local_disk>` layer.\n  message DiskLayer {\n    // The implementation assumes that the file system tree is accessed via a\n    // symbolic link. An atomic link swap is used when a new tree should be\n    // switched to. This parameter specifies the path to the symbolic link.\n    // Envoy will watch the location for changes and reload the file system tree\n    // when they happen. See documentation on runtime :ref:`atomicity\n    // <config_runtime_atomicity>` for further details on how reloads are\n    // treated.\n    string symlink_root = 1;\n\n    // Specifies the subdirectory to load within the root directory. This is\n    // useful if multiple systems share the same delivery mechanism. Envoy\n    // configuration elements can be contained in a dedicated subdirectory.\n    string subdirectory = 3;\n\n    // :ref:`Append <config_runtime_local_disk_service_cluster_subdirs>` the\n    // service cluster to the path under symlink root.\n    bool append_service_cluster = 2;\n  }\n\n  // :ref:`Admin console runtime <config_runtime_admin>` layer.\n  message AdminLayer {\n  }\n\n  // :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` layer.\n  message RtdsLayer {\n    // Resource to subscribe to at *rtds_config* for the RTDS layer.\n    string name = 1;\n\n    // RTDS configuration source.\n    api.v2.core.ConfigSource rtds_config = 2;\n  }\n\n  // Descriptive name for the runtime layer. This is only used for the runtime\n  // :http:get:`/runtime` output.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof layer_specifier {\n    option (validate.required) = true;\n\n    // :ref:`Static runtime <config_runtime_bootstrap>` layer.\n    // This follows the :ref:`runtime protobuf JSON representation encoding\n    // <config_runtime_proto_json>`. Unlike static xDS resources, this static\n    // layer is overridable by later layers in the runtime virtual filesystem.\n    google.protobuf.Struct static_layer = 2;\n\n    DiskLayer disk_layer = 3;\n\n    AdminLayer admin_layer = 4;\n\n    RtdsLayer rtds_layer = 5;\n  }\n}\n\n// Runtime :ref:`configuration overview <config_runtime>`.\nmessage LayeredRuntime {\n  // The :ref:`layers <config_runtime_layering>` of the runtime. This is ordered\n  // such that later layers in the list overlay earlier entries.\n  repeated RuntimeLayer layers = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/bootstrap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v2:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/listener/v3:pkg\",\n        \"//envoy/config/metrics/v3:pkg\",\n        \"//envoy/config/overload/v3:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.bootstrap.v3;\n\nimport \"envoy/config/cluster/v3/cluster.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/event_service_config.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/socket_option.proto\";\nimport \"envoy/config/listener/v3/listener.proto\";\nimport \"envoy/config/metrics/v3/stats.proto\";\nimport \"envoy/config/overload/v3/overload.proto\";\nimport \"envoy/config/trace/v3/http_tracer.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.bootstrap.v3\";\noption java_outer_classname = \"BootstrapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Bootstrap]\n// This proto is supplied via the :option:`-c` CLI flag and acts as the root\n// of the Envoy v2 configuration. See the :ref:`v2 configuration overview\n// <config_overview_bootstrap>` for more detail.\n\n// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.\n// [#next-free-field: 28]\nmessage Bootstrap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.Bootstrap\";\n\n  message StaticResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.Bootstrap.StaticResources\";\n\n    // Static :ref:`Listeners <envoy_api_msg_config.listener.v3.Listener>`. These listeners are\n    // available regardless of LDS configuration.\n    repeated listener.v3.Listener listeners = 1;\n\n    // If a network based configuration source is specified for :ref:`cds_config\n    // <envoy_api_field_config.bootstrap.v3.Bootstrap.DynamicResources.cds_config>`, it's necessary\n    // to have some initial cluster definitions available to allow Envoy to know\n    // how to speak to the management server. These cluster definitions may not\n    // use :ref:`EDS <arch_overview_dynamic_config_eds>` (i.e. they should be static\n    // IP or DNS-based).\n    repeated cluster.v3.Cluster clusters = 2;\n\n    // These static secrets can be used by :ref:`SdsSecretConfig\n    // <envoy_api_msg_extensions.transport_sockets.tls.v3.SdsSecretConfig>`\n    repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3;\n  }\n\n  // [#next-free-field: 7]\n  message DynamicResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.Bootstrap.DynamicResources\";\n\n    reserved 4;\n\n    // All :ref:`Listeners <envoy_api_msg_config.listener.v3.Listener>` are provided by a single\n    // :ref:`LDS <arch_overview_dynamic_config_lds>` configuration source.\n    core.v3.ConfigSource lds_config = 1;\n\n    // Resource locator for listener collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator lds_resources_locator = 5;\n\n    // All post-bootstrap :ref:`Cluster <envoy_api_msg_config.cluster.v3.Cluster>` definitions are\n    // provided by a single :ref:`CDS <arch_overview_dynamic_config_cds>`\n    // configuration source.\n    core.v3.ConfigSource cds_config = 2;\n\n    // Resource locator for cluster collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator cds_resources_locator = 6;\n\n    // A single :ref:`ADS <config_overview_ads>` source may be optionally\n    // specified. This must have :ref:`api_type\n    // <envoy_api_field_config.core.v3.ApiConfigSource.api_type>` :ref:`GRPC\n    // <envoy_api_enum_value_config.core.v3.ApiConfigSource.ApiType.GRPC>`. Only\n    // :ref:`ConfigSources <envoy_api_msg_config.core.v3.ConfigSource>` that have\n    // the :ref:`ads <envoy_api_field_config.core.v3.ConfigSource.ads>` field set will be\n    // streamed on the ADS channel.\n    core.v3.ApiConfigSource ads_config = 3;\n  }\n\n  reserved 10;\n\n  // Node identity to present to the management server and for instance\n  // identification purposes (e.g. in generated headers).\n  core.v3.Node node = 1;\n\n  // A list of :ref:`Node <envoy_v3_api_msg_config.core.v3.Node>` field names\n  // that will be included in the context parameters of the effective\n  // *UdpaResourceLocator* that is sent in a discovery request when resource\n  // locators are used for LDS/CDS. Any non-string field will have its JSON\n  // encoding set as the context parameter value, with the exception of\n  // metadata, which will be flattened (see example below). The supported field\n  // names are:\n  // - \"cluster\"\n  // - \"id\"\n  // - \"locality.region\"\n  // - \"locality.sub_zone\"\n  // - \"locality.zone\"\n  // - \"metadata\"\n  // - \"user_agent_build_version.metadata\"\n  // - \"user_agent_build_version.version\"\n  // - \"user_agent_name\"\n  // - \"user_agent_version\"\n  //\n  // The node context parameters act as a base layer dictionary for the context\n  // parameters (i.e. more specific resource specific context parameters will\n  // override). Field names will be prefixed with “udpa.node.” when included in\n  // context parameters.\n  //\n  // For example, if node_context_params is ``[\"user_agent_name\", \"metadata\"]``,\n  // the implied context parameters might be::\n  //\n  //   node.user_agent_name: \"envoy\"\n  //   node.metadata.foo: \"{\\\"bar\\\": \\\"baz\\\"}\"\n  //   node.metadata.some: \"42\"\n  //   node.metadata.thing: \"\\\"thing\\\"\"\n  //\n  // [#not-implemented-hide:]\n  repeated string node_context_params = 26;\n\n  // Statically specified resources.\n  StaticResources static_resources = 2;\n\n  // xDS configuration sources.\n  DynamicResources dynamic_resources = 3;\n\n  // Configuration for the cluster manager which owns all upstream clusters\n  // within the server.\n  ClusterManager cluster_manager = 4;\n\n  // Health discovery service config option.\n  // (:ref:`core.ApiConfigSource <envoy_api_msg_config.core.v3.ApiConfigSource>`)\n  core.v3.ApiConfigSource hds_config = 14;\n\n  // Optional file system path to search for startup flag files.\n  string flags_path = 5;\n\n  // Optional set of stats sinks.\n  repeated metrics.v3.StatsSink stats_sinks = 6;\n\n  // Configuration for internal processing of stats.\n  metrics.v3.StatsConfig stats_config = 13;\n\n  // Optional duration between flushes to configured stats sinks. For\n  // performance reasons Envoy latches counters and only flushes counters and\n  // gauges at a periodic interval. If not specified the default is 5000ms (5\n  // seconds).\n  // Duration must be at least 1ms and at most 5 min.\n  google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = {\n    lt {seconds: 300}\n    gte {nanos: 1000000}\n  }];\n\n  // Optional watchdog configuration.\n  // This is for a single watchdog configuration for the entire system.\n  // Deprecated in favor of *watchdogs* which has finer granularity.\n  Watchdog watchdog = 8 [deprecated = true];\n\n  // Optional watchdogs configuration.\n  // This is used for specifying different watchdogs for the different subsystems.\n  Watchdogs watchdogs = 27;\n\n  // Configuration for an external tracing provider.\n  //\n  // .. attention::\n  //  This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider\n  //  <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider>`.\n  trace.v3.Tracing tracing = 9 [deprecated = true];\n\n  // Configuration for the runtime configuration provider. If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  LayeredRuntime layered_runtime = 17;\n\n  // Configuration for the local administration HTTP server.\n  Admin admin = 12;\n\n  // Optional overload manager configuration.\n  overload.v3.OverloadManager overload_manager = 15 [\n    (udpa.annotations.security).configure_for_untrusted_downstream = true,\n    (udpa.annotations.security).configure_for_untrusted_upstream = true\n  ];\n\n  // Enable :ref:`stats for event dispatcher <operations_performance>`, defaults to false.\n  // Note that this records a value for each iteration of the event loop on every thread. This\n  // should normally be minimal overhead, but when using\n  // :ref:`statsd <envoy_api_msg_config.metrics.v3.StatsdSink>`, it will send each observed value\n  // over the wire individually because the statsd protocol doesn't have any way to represent a\n  // histogram summary. Be aware that this can be a very large volume of data.\n  bool enable_dispatcher_stats = 16;\n\n  // Optional string which will be used in lieu of x-envoy in prefixing headers.\n  //\n  // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be\n  // transformed into x-foo-retry-on etc.\n  //\n  // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the\n  // headers Envoy will trust for core code and core extensions only. Be VERY careful making\n  // changes to this string, especially in multi-layer Envoy deployments or deployments using\n  // extensions which are not upstream.\n  string header_prefix = 18;\n\n  // Optional proxy version which will be used to set the value of :ref:`server.version statistic\n  // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to\n  // :ref:`stats sinks <envoy_api_msg_config.metrics.v3.StatsSink>`.\n  google.protobuf.UInt64Value stats_server_version_override = 19;\n\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // This may be overridden on a per-cluster basis in cds_config,\n  // when :ref:`dns_resolvers <envoy_api_field_config.cluster.v3.Cluster.dns_resolvers>` and\n  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_config.cluster.v3.Cluster.use_tcp_for_dns_lookups>` are\n  // specified.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 20;\n\n  // Specifies optional bootstrap extensions to be instantiated at startup time.\n  // Each item contains extension specific configuration.\n  repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21;\n\n  // Configuration sources that will participate in\n  // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as\n  // follows:\n  // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call\n  //    this *resource_authority*.\n  // 2. *resource_authority* is compared against the authorities in any peer\n  //    *ConfigSource*. The peer *ConfigSource* is the configuration source\n  //    message which would have been used unconditionally for resolution\n  //    with opaque resource names. If there is a match with an authority, the\n  //    peer *ConfigSource* message is used.\n  // 3. *resource_authority* is compared sequentially with the authorities in\n  //    each configuration source in *config_sources*. The first *ConfigSource*\n  //    to match wins.\n  // 4. As a fallback, if no configuration source matches, then\n  //    *default_config_source* is used.\n  // 5. If *default_config_source* is not specified, resolution fails.\n  // [#not-implemented-hide:]\n  repeated core.v3.ConfigSource config_sources = 22;\n\n  // Default configuration source for *udpa.core.v1.ResourceLocator* if all\n  // other resolution fails.\n  // [#not-implemented-hide:]\n  core.v3.ConfigSource default_config_source = 23;\n\n  // Optional overriding of default socket interface. The value must be the name of one of the\n  // socket interface factories initialized through a bootstrap extension\n  string default_socket_interface = 24;\n\n  // Global map of CertificateProvider instances. These instances are referred to by name in the\n  // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance.instance_name>`\n  // field.\n  // [#not-implemented-hide:]\n  map<string, core.v3.TypedExtensionConfig> certificate_provider_instances = 25;\n\n  Runtime hidden_envoy_deprecated_runtime = 11\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// Administration interface :ref:`operations documentation\n// <operations_admin_interface>`.\nmessage Admin {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v2.Admin\";\n\n  // The path to write the access log for the administration server. If no\n  // access log is desired specify ‘/dev/null’. This is only required if\n  // :ref:`address <envoy_api_field_config.bootstrap.v3.Admin.address>` is set.\n  string access_log_path = 1;\n\n  // The cpu profiler output path for the administration server. If no profile\n  // path is specified, the default is ‘/var/log/envoy/envoy.prof’.\n  string profile_path = 2;\n\n  // The TCP address that the administration server will listen on.\n  // If not specified, Envoy will not start an administration server.\n  core.v3.Address address = 3;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v3.SocketOption socket_options = 4;\n}\n\n// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.\nmessage ClusterManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.ClusterManager\";\n\n  message OutlierDetection {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.ClusterManager.OutlierDetection\";\n\n    // Specifies the path to the outlier event log.\n    string event_log_path = 1;\n\n    // [#not-implemented-hide:]\n    // The gRPC service for the outlier detection event service.\n    // If empty, outlier detection events won't be sent to a remote endpoint.\n    core.v3.EventServiceConfig event_service = 2;\n  }\n\n  // Name of the local cluster (i.e., the cluster that owns the Envoy running\n  // this configuration). In order to enable :ref:`zone aware routing\n  // <arch_overview_load_balancing_zone_aware_routing>` this option must be set.\n  // If *local_cluster_name* is defined then :ref:`clusters\n  // <envoy_api_msg_config.cluster.v3.Cluster>` must be defined in the :ref:`Bootstrap\n  // static cluster resources\n  // <envoy_api_field_config.bootstrap.v3.Bootstrap.StaticResources.clusters>`. This is unrelated to\n  // the :option:`--service-cluster` option which does not `affect zone aware\n  // routing <https://github.com/envoyproxy/envoy/issues/774>`_.\n  string local_cluster_name = 1;\n\n  // Optional global configuration for outlier detection.\n  OutlierDetection outlier_detection = 2;\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.\n  core.v3.BindConfig upstream_bind_config = 3;\n\n  // A management server endpoint to stream load stats to via\n  // *StreamLoadStats*. This must have :ref:`api_type\n  // <envoy_api_field_config.core.v3.ApiConfigSource.api_type>` :ref:`GRPC\n  // <envoy_api_enum_value_config.core.v3.ApiConfigSource.ApiType.GRPC>`.\n  core.v3.ApiConfigSource load_stats_config = 4;\n}\n\n// Allows you to specify different watchdog configs for different subsystems.\n// This allows finer tuned policies for the watchdog. If a subsystem is omitted\n// the default values for that system will be used.\nmessage Watchdogs {\n  // Watchdog for the main thread.\n  Watchdog main_thread_watchdog = 1;\n\n  // Watchdog for the worker threads.\n  Watchdog worker_watchdog = 2;\n}\n\n// Envoy process watchdog configuration. When configured, this monitors for\n// nonresponsive threads and kills the process after the configured thresholds.\n// See the :ref:`watchdog documentation <operations_performance_watchdog>` for more information.\n// [#next-free-field: 8]\nmessage Watchdog {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v2.Watchdog\";\n\n  message WatchdogAction {\n    // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS.\n    // Within an event type, actions execute in the order they are configured.\n    // For KILL/MULTIKILL there is a default PANIC that will run after the\n    // registered actions and kills the process if it wasn't already killed.\n    // It might be useful to specify several debug actions, and possibly an\n    // alternate FATAL action.\n    enum WatchdogEvent {\n      UNKNOWN = 0;\n      KILL = 1;\n      MULTIKILL = 2;\n      MEGAMISS = 3;\n      MISS = 4;\n    }\n\n    // Extension specific configuration for the action.\n    core.v3.TypedExtensionConfig config = 1;\n\n    WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // Register actions that will fire on given WatchDog events.\n  // See *WatchDogAction* for priority of events.\n  repeated WatchdogAction actions = 7;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_miss* statistic. If not specified the default is 200ms.\n  google.protobuf.Duration miss_timeout = 1;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_mega_miss* statistic. If not specified the default is\n  // 1000ms.\n  google.protobuf.Duration megamiss_timeout = 2;\n\n  // If a watched thread has been nonresponsive for this duration, assume a\n  // programming error and kill the entire Envoy process. Set to 0 to disable\n  // kill behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration kill_timeout = 3;\n\n  // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is\n  // enabled. Enabling this feature would help to reduce risk of synchronized\n  // watchdog kill events across proxies due to external triggers. Set to 0 to\n  // disable. If not specified the default is 0 (disabled).\n  google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}];\n\n  // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))\n  // threads have been nonresponsive for at least this duration kill the entire\n  // Envoy process. Set to 0 to disable this behavior. If not specified the\n  // default is 0 (disabled).\n  google.protobuf.Duration multikill_timeout = 4;\n\n  // Sets the threshold for *multikill_timeout* in terms of the percentage of\n  // nonresponsive threads required for the *multikill_timeout*.\n  // If not specified the default is 0.\n  type.v3.Percent multikill_threshold = 5;\n}\n\n// Runtime :ref:`configuration overview <config_runtime>` (deprecated).\nmessage Runtime {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v2.Runtime\";\n\n  // The implementation assumes that the file system tree is accessed via a\n  // symbolic link. An atomic link swap is used when a new tree should be\n  // switched to. This parameter specifies the path to the symbolic link. Envoy\n  // will watch the location for changes and reload the file system tree when\n  // they happen. If this parameter is not set, there will be no disk based\n  // runtime.\n  string symlink_root = 1;\n\n  // Specifies the subdirectory to load within the root directory. This is\n  // useful if multiple systems share the same delivery mechanism. Envoy\n  // configuration elements can be contained in a dedicated subdirectory.\n  string subdirectory = 2;\n\n  // Specifies an optional subdirectory to load within the root directory. If\n  // specified and the directory exists, configuration values within this\n  // directory will override those found in the primary subdirectory. This is\n  // useful when Envoy is deployed across many different types of servers.\n  // Sometimes it is useful to have a per service cluster directory for runtime\n  // configuration. See below for exactly how the override directory is used.\n  string override_subdirectory = 3;\n\n  // Static base runtime. This will be :ref:`overridden\n  // <config_runtime_layering>` by other runtime layers, e.g.\n  // disk or admin. This follows the :ref:`runtime protobuf JSON representation\n  // encoding <config_runtime_proto_json>`.\n  google.protobuf.Struct base = 4;\n}\n\n// [#next-free-field: 6]\nmessage RuntimeLayer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.RuntimeLayer\";\n\n  // :ref:`Disk runtime <config_runtime_local_disk>` layer.\n  message DiskLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.RuntimeLayer.DiskLayer\";\n\n    // The implementation assumes that the file system tree is accessed via a\n    // symbolic link. An atomic link swap is used when a new tree should be\n    // switched to. This parameter specifies the path to the symbolic link.\n    // Envoy will watch the location for changes and reload the file system tree\n    // when they happen. See documentation on runtime :ref:`atomicity\n    // <config_runtime_atomicity>` for further details on how reloads are\n    // treated.\n    string symlink_root = 1;\n\n    // Specifies the subdirectory to load within the root directory. This is\n    // useful if multiple systems share the same delivery mechanism. Envoy\n    // configuration elements can be contained in a dedicated subdirectory.\n    string subdirectory = 3;\n\n    // :ref:`Append <config_runtime_local_disk_service_cluster_subdirs>` the\n    // service cluster to the path under symlink root.\n    bool append_service_cluster = 2;\n  }\n\n  // :ref:`Admin console runtime <config_runtime_admin>` layer.\n  message AdminLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.RuntimeLayer.AdminLayer\";\n  }\n\n  // :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` layer.\n  message RtdsLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer\";\n\n    // Resource to subscribe to at *rtds_config* for the RTDS layer.\n    string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // Resource locator for RTDS layer. This is mutually exclusive to *name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator rtds_resource_locator = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // RTDS configuration source.\n    core.v3.ConfigSource rtds_config = 2;\n  }\n\n  // Descriptive name for the runtime layer. This is only used for the runtime\n  // :http:get:`/runtime` output.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof layer_specifier {\n    option (validate.required) = true;\n\n    // :ref:`Static runtime <config_runtime_bootstrap>` layer.\n    // This follows the :ref:`runtime protobuf JSON representation encoding\n    // <config_runtime_proto_json>`. Unlike static xDS resources, this static\n    // layer is overridable by later layers in the runtime virtual filesystem.\n    google.protobuf.Struct static_layer = 2;\n\n    DiskLayer disk_layer = 3;\n\n    AdminLayer admin_layer = 4;\n\n    RtdsLayer rtds_layer = 5;\n  }\n}\n\n// Runtime :ref:`configuration overview <config_runtime>`.\nmessage LayeredRuntime {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v2.LayeredRuntime\";\n\n  // The :ref:`layers <config_runtime_layering>` of the runtime. This is ordered\n  // such that later layers in the list overlay earlier entries.\n  repeated RuntimeLayer layers = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/bootstrap/v3:pkg\",\n        \"//envoy/config/cluster/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/listener/v4alpha:pkg\",\n        \"//envoy/config/metrics/v4alpha:pkg\",\n        \"//envoy/config/overload/v3:pkg\",\n        \"//envoy/config/trace/v4alpha:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.bootstrap.v4alpha;\n\nimport \"envoy/config/cluster/v4alpha/cluster.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/event_service_config.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/socket_option.proto\";\nimport \"envoy/config/listener/v4alpha/listener.proto\";\nimport \"envoy/config/metrics/v4alpha/stats.proto\";\nimport \"envoy/config/overload/v3/overload.proto\";\nimport \"envoy/config/trace/v4alpha/http_tracer.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/secret.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.bootstrap.v4alpha\";\noption java_outer_classname = \"BootstrapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Bootstrap]\n// This proto is supplied via the :option:`-c` CLI flag and acts as the root\n// of the Envoy v2 configuration. See the :ref:`v2 configuration overview\n// <config_overview_bootstrap>` for more detail.\n\n// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.\n// [#next-free-field: 28]\nmessage Bootstrap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.Bootstrap\";\n\n  message StaticResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.Bootstrap.StaticResources\";\n\n    // Static :ref:`Listeners <envoy_api_msg_config.listener.v4alpha.Listener>`. These listeners are\n    // available regardless of LDS configuration.\n    repeated listener.v4alpha.Listener listeners = 1;\n\n    // If a network based configuration source is specified for :ref:`cds_config\n    // <envoy_api_field_config.bootstrap.v4alpha.Bootstrap.DynamicResources.cds_config>`, it's necessary\n    // to have some initial cluster definitions available to allow Envoy to know\n    // how to speak to the management server. These cluster definitions may not\n    // use :ref:`EDS <arch_overview_dynamic_config_eds>` (i.e. they should be static\n    // IP or DNS-based).\n    repeated cluster.v4alpha.Cluster clusters = 2;\n\n    // These static secrets can be used by :ref:`SdsSecretConfig\n    // <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.SdsSecretConfig>`\n    repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3;\n  }\n\n  // [#next-free-field: 7]\n  message DynamicResources {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.Bootstrap.DynamicResources\";\n\n    reserved 4;\n\n    // All :ref:`Listeners <envoy_api_msg_config.listener.v4alpha.Listener>` are provided by a single\n    // :ref:`LDS <arch_overview_dynamic_config_lds>` configuration source.\n    core.v4alpha.ConfigSource lds_config = 1;\n\n    // Resource locator for listener collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator lds_resources_locator = 5;\n\n    // All post-bootstrap :ref:`Cluster <envoy_api_msg_config.cluster.v4alpha.Cluster>` definitions are\n    // provided by a single :ref:`CDS <arch_overview_dynamic_config_cds>`\n    // configuration source.\n    core.v4alpha.ConfigSource cds_config = 2;\n\n    // Resource locator for cluster collection.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator cds_resources_locator = 6;\n\n    // A single :ref:`ADS <config_overview_ads>` source may be optionally\n    // specified. This must have :ref:`api_type\n    // <envoy_api_field_config.core.v4alpha.ApiConfigSource.api_type>` :ref:`GRPC\n    // <envoy_api_enum_value_config.core.v4alpha.ApiConfigSource.ApiType.GRPC>`. Only\n    // :ref:`ConfigSources <envoy_api_msg_config.core.v4alpha.ConfigSource>` that have\n    // the :ref:`ads <envoy_api_field_config.core.v4alpha.ConfigSource.ads>` field set will be\n    // streamed on the ADS channel.\n    core.v4alpha.ApiConfigSource ads_config = 3;\n  }\n\n  reserved 10, 11;\n\n  reserved \"runtime\";\n\n  // Node identity to present to the management server and for instance\n  // identification purposes (e.g. in generated headers).\n  core.v4alpha.Node node = 1;\n\n  // A list of :ref:`Node <envoy_v3_api_msg_config.core.v3.Node>` field names\n  // that will be included in the context parameters of the effective\n  // *UdpaResourceLocator* that is sent in a discovery request when resource\n  // locators are used for LDS/CDS. Any non-string field will have its JSON\n  // encoding set as the context parameter value, with the exception of\n  // metadata, which will be flattened (see example below). The supported field\n  // names are:\n  // - \"cluster\"\n  // - \"id\"\n  // - \"locality.region\"\n  // - \"locality.sub_zone\"\n  // - \"locality.zone\"\n  // - \"metadata\"\n  // - \"user_agent_build_version.metadata\"\n  // - \"user_agent_build_version.version\"\n  // - \"user_agent_name\"\n  // - \"user_agent_version\"\n  //\n  // The node context parameters act as a base layer dictionary for the context\n  // parameters (i.e. more specific resource specific context parameters will\n  // override). Field names will be prefixed with “udpa.node.” when included in\n  // context parameters.\n  //\n  // For example, if node_context_params is ``[\"user_agent_name\", \"metadata\"]``,\n  // the implied context parameters might be::\n  //\n  //   node.user_agent_name: \"envoy\"\n  //   node.metadata.foo: \"{\\\"bar\\\": \\\"baz\\\"}\"\n  //   node.metadata.some: \"42\"\n  //   node.metadata.thing: \"\\\"thing\\\"\"\n  //\n  // [#not-implemented-hide:]\n  repeated string node_context_params = 26;\n\n  // Statically specified resources.\n  StaticResources static_resources = 2;\n\n  // xDS configuration sources.\n  DynamicResources dynamic_resources = 3;\n\n  // Configuration for the cluster manager which owns all upstream clusters\n  // within the server.\n  ClusterManager cluster_manager = 4;\n\n  // Health discovery service config option.\n  // (:ref:`core.ApiConfigSource <envoy_api_msg_config.core.v4alpha.ApiConfigSource>`)\n  core.v4alpha.ApiConfigSource hds_config = 14;\n\n  // Optional file system path to search for startup flag files.\n  string flags_path = 5;\n\n  // Optional set of stats sinks.\n  repeated metrics.v4alpha.StatsSink stats_sinks = 6;\n\n  // Configuration for internal processing of stats.\n  metrics.v4alpha.StatsConfig stats_config = 13;\n\n  // Optional duration between flushes to configured stats sinks. For\n  // performance reasons Envoy latches counters and only flushes counters and\n  // gauges at a periodic interval. If not specified the default is 5000ms (5\n  // seconds).\n  // Duration must be at least 1ms and at most 5 min.\n  google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = {\n    lt {seconds: 300}\n    gte {nanos: 1000000}\n  }];\n\n  // Optional watchdog configuration.\n  // This is for a single watchdog configuration for the entire system.\n  // Deprecated in favor of *watchdogs* which has finer granularity.\n  Watchdog hidden_envoy_deprecated_watchdog = 8 [deprecated = true];\n\n  // Optional watchdogs configuration.\n  // This is used for specifying different watchdogs for the different subsystems.\n  Watchdogs watchdogs = 27;\n\n  // Configuration for an external tracing provider.\n  //\n  // .. attention::\n  //  This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider\n  //  <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.Tracing.provider>`.\n  trace.v4alpha.Tracing hidden_envoy_deprecated_tracing = 9 [deprecated = true];\n\n  // Configuration for the runtime configuration provider. If not\n  // specified, a “null” provider will be used which will result in all defaults\n  // being used.\n  LayeredRuntime layered_runtime = 17;\n\n  // Configuration for the local administration HTTP server.\n  Admin admin = 12;\n\n  // Optional overload manager configuration.\n  overload.v3.OverloadManager overload_manager = 15 [\n    (udpa.annotations.security).configure_for_untrusted_downstream = true,\n    (udpa.annotations.security).configure_for_untrusted_upstream = true\n  ];\n\n  // Enable :ref:`stats for event dispatcher <operations_performance>`, defaults to false.\n  // Note that this records a value for each iteration of the event loop on every thread. This\n  // should normally be minimal overhead, but when using\n  // :ref:`statsd <envoy_api_msg_config.metrics.v4alpha.StatsdSink>`, it will send each observed value\n  // over the wire individually because the statsd protocol doesn't have any way to represent a\n  // histogram summary. Be aware that this can be a very large volume of data.\n  bool enable_dispatcher_stats = 16;\n\n  // Optional string which will be used in lieu of x-envoy in prefixing headers.\n  //\n  // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be\n  // transformed into x-foo-retry-on etc.\n  //\n  // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the\n  // headers Envoy will trust for core code and core extensions only. Be VERY careful making\n  // changes to this string, especially in multi-layer Envoy deployments or deployments using\n  // extensions which are not upstream.\n  string header_prefix = 18;\n\n  // Optional proxy version which will be used to set the value of :ref:`server.version statistic\n  // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to\n  // :ref:`stats sinks <envoy_api_msg_config.metrics.v4alpha.StatsSink>`.\n  google.protobuf.UInt64Value stats_server_version_override = 19;\n\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // This may be overridden on a per-cluster basis in cds_config,\n  // when :ref:`dns_resolvers <envoy_api_field_config.cluster.v4alpha.Cluster.dns_resolvers>` and\n  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_config.cluster.v4alpha.Cluster.use_tcp_for_dns_lookups>` are\n  // specified.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 20;\n\n  // Specifies optional bootstrap extensions to be instantiated at startup time.\n  // Each item contains extension specific configuration.\n  repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21;\n\n  // Configuration sources that will participate in\n  // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as\n  // follows:\n  // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call\n  //    this *resource_authority*.\n  // 2. *resource_authority* is compared against the authorities in any peer\n  //    *ConfigSource*. The peer *ConfigSource* is the configuration source\n  //    message which would have been used unconditionally for resolution\n  //    with opaque resource names. If there is a match with an authority, the\n  //    peer *ConfigSource* message is used.\n  // 3. *resource_authority* is compared sequentially with the authorities in\n  //    each configuration source in *config_sources*. The first *ConfigSource*\n  //    to match wins.\n  // 4. As a fallback, if no configuration source matches, then\n  //    *default_config_source* is used.\n  // 5. If *default_config_source* is not specified, resolution fails.\n  // [#not-implemented-hide:]\n  repeated core.v4alpha.ConfigSource config_sources = 22;\n\n  // Default configuration source for *udpa.core.v1.ResourceLocator* if all\n  // other resolution fails.\n  // [#not-implemented-hide:]\n  core.v4alpha.ConfigSource default_config_source = 23;\n\n  // Optional overriding of default socket interface. The value must be the name of one of the\n  // socket interface factories initialized through a bootstrap extension\n  string default_socket_interface = 24;\n\n  // Global map of CertificateProvider instances. These instances are referred to by name in the\n  // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CommonTlsContext.CertificateProviderInstance.instance_name>`\n  // field.\n  // [#not-implemented-hide:]\n  map<string, core.v4alpha.TypedExtensionConfig> certificate_provider_instances = 25;\n}\n\n// Administration interface :ref:`operations documentation\n// <operations_admin_interface>`.\nmessage Admin {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v3.Admin\";\n\n  // The path to write the access log for the administration server. If no\n  // access log is desired specify ‘/dev/null’. This is only required if\n  // :ref:`address <envoy_api_field_config.bootstrap.v4alpha.Admin.address>` is set.\n  string access_log_path = 1;\n\n  // The cpu profiler output path for the administration server. If no profile\n  // path is specified, the default is ‘/var/log/envoy/envoy.prof’.\n  string profile_path = 2;\n\n  // The TCP address that the administration server will listen on.\n  // If not specified, Envoy will not start an administration server.\n  core.v4alpha.Address address = 3;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v4alpha.SocketOption socket_options = 4;\n}\n\n// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.\nmessage ClusterManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.ClusterManager\";\n\n  message OutlierDetection {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.ClusterManager.OutlierDetection\";\n\n    // Specifies the path to the outlier event log.\n    string event_log_path = 1;\n\n    // [#not-implemented-hide:]\n    // The gRPC service for the outlier detection event service.\n    // If empty, outlier detection events won't be sent to a remote endpoint.\n    core.v4alpha.EventServiceConfig event_service = 2;\n  }\n\n  // Name of the local cluster (i.e., the cluster that owns the Envoy running\n  // this configuration). In order to enable :ref:`zone aware routing\n  // <arch_overview_load_balancing_zone_aware_routing>` this option must be set.\n  // If *local_cluster_name* is defined then :ref:`clusters\n  // <envoy_api_msg_config.cluster.v4alpha.Cluster>` must be defined in the :ref:`Bootstrap\n  // static cluster resources\n  // <envoy_api_field_config.bootstrap.v4alpha.Bootstrap.StaticResources.clusters>`. This is unrelated to\n  // the :option:`--service-cluster` option which does not `affect zone aware\n  // routing <https://github.com/envoyproxy/envoy/issues/774>`_.\n  string local_cluster_name = 1;\n\n  // Optional global configuration for outlier detection.\n  OutlierDetection outlier_detection = 2;\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.\n  core.v4alpha.BindConfig upstream_bind_config = 3;\n\n  // A management server endpoint to stream load stats to via\n  // *StreamLoadStats*. This must have :ref:`api_type\n  // <envoy_api_field_config.core.v4alpha.ApiConfigSource.api_type>` :ref:`GRPC\n  // <envoy_api_enum_value_config.core.v4alpha.ApiConfigSource.ApiType.GRPC>`.\n  core.v4alpha.ApiConfigSource load_stats_config = 4;\n}\n\n// Allows you to specify different watchdog configs for different subsystems.\n// This allows finer tuned policies for the watchdog. If a subsystem is omitted\n// the default values for that system will be used.\nmessage Watchdogs {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.Watchdogs\";\n\n  // Watchdog for the main thread.\n  Watchdog main_thread_watchdog = 1;\n\n  // Watchdog for the worker threads.\n  Watchdog worker_watchdog = 2;\n}\n\n// Envoy process watchdog configuration. When configured, this monitors for\n// nonresponsive threads and kills the process after the configured thresholds.\n// See the :ref:`watchdog documentation <operations_performance_watchdog>` for more information.\n// [#next-free-field: 8]\nmessage Watchdog {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v3.Watchdog\";\n\n  message WatchdogAction {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.Watchdog.WatchdogAction\";\n\n    // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS.\n    // Within an event type, actions execute in the order they are configured.\n    // For KILL/MULTIKILL there is a default PANIC that will run after the\n    // registered actions and kills the process if it wasn't already killed.\n    // It might be useful to specify several debug actions, and possibly an\n    // alternate FATAL action.\n    enum WatchdogEvent {\n      UNKNOWN = 0;\n      KILL = 1;\n      MULTIKILL = 2;\n      MEGAMISS = 3;\n      MISS = 4;\n    }\n\n    // Extension specific configuration for the action.\n    core.v4alpha.TypedExtensionConfig config = 1;\n\n    WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // Register actions that will fire on given WatchDog events.\n  // See *WatchDogAction* for priority of events.\n  repeated WatchdogAction actions = 7;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_miss* statistic. If not specified the default is 200ms.\n  google.protobuf.Duration miss_timeout = 1;\n\n  // The duration after which Envoy counts a nonresponsive thread in the\n  // *watchdog_mega_miss* statistic. If not specified the default is\n  // 1000ms.\n  google.protobuf.Duration megamiss_timeout = 2;\n\n  // If a watched thread has been nonresponsive for this duration, assume a\n  // programming error and kill the entire Envoy process. Set to 0 to disable\n  // kill behavior. If not specified the default is 0 (disabled).\n  google.protobuf.Duration kill_timeout = 3;\n\n  // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is\n  // enabled. Enabling this feature would help to reduce risk of synchronized\n  // watchdog kill events across proxies due to external triggers. Set to 0 to\n  // disable. If not specified the default is 0 (disabled).\n  google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}];\n\n  // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))\n  // threads have been nonresponsive for at least this duration kill the entire\n  // Envoy process. Set to 0 to disable this behavior. If not specified the\n  // default is 0 (disabled).\n  google.protobuf.Duration multikill_timeout = 4;\n\n  // Sets the threshold for *multikill_timeout* in terms of the percentage of\n  // nonresponsive threads required for the *multikill_timeout*.\n  // If not specified the default is 0.\n  type.v3.Percent multikill_threshold = 5;\n}\n\n// Runtime :ref:`configuration overview <config_runtime>` (deprecated).\nmessage Runtime {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.bootstrap.v3.Runtime\";\n\n  // The implementation assumes that the file system tree is accessed via a\n  // symbolic link. An atomic link swap is used when a new tree should be\n  // switched to. This parameter specifies the path to the symbolic link. Envoy\n  // will watch the location for changes and reload the file system tree when\n  // they happen. If this parameter is not set, there will be no disk based\n  // runtime.\n  string symlink_root = 1;\n\n  // Specifies the subdirectory to load within the root directory. This is\n  // useful if multiple systems share the same delivery mechanism. Envoy\n  // configuration elements can be contained in a dedicated subdirectory.\n  string subdirectory = 2;\n\n  // Specifies an optional subdirectory to load within the root directory. If\n  // specified and the directory exists, configuration values within this\n  // directory will override those found in the primary subdirectory. This is\n  // useful when Envoy is deployed across many different types of servers.\n  // Sometimes it is useful to have a per service cluster directory for runtime\n  // configuration. See below for exactly how the override directory is used.\n  string override_subdirectory = 3;\n\n  // Static base runtime. This will be :ref:`overridden\n  // <config_runtime_layering>` by other runtime layers, e.g.\n  // disk or admin. This follows the :ref:`runtime protobuf JSON representation\n  // encoding <config_runtime_proto_json>`.\n  google.protobuf.Struct base = 4;\n}\n\n// [#next-free-field: 6]\nmessage RuntimeLayer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.RuntimeLayer\";\n\n  // :ref:`Disk runtime <config_runtime_local_disk>` layer.\n  message DiskLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer\";\n\n    // The implementation assumes that the file system tree is accessed via a\n    // symbolic link. An atomic link swap is used when a new tree should be\n    // switched to. This parameter specifies the path to the symbolic link.\n    // Envoy will watch the location for changes and reload the file system tree\n    // when they happen. See documentation on runtime :ref:`atomicity\n    // <config_runtime_atomicity>` for further details on how reloads are\n    // treated.\n    string symlink_root = 1;\n\n    // Specifies the subdirectory to load within the root directory. This is\n    // useful if multiple systems share the same delivery mechanism. Envoy\n    // configuration elements can be contained in a dedicated subdirectory.\n    string subdirectory = 3;\n\n    // :ref:`Append <config_runtime_local_disk_service_cluster_subdirs>` the\n    // service cluster to the path under symlink root.\n    bool append_service_cluster = 2;\n  }\n\n  // :ref:`Admin console runtime <config_runtime_admin>` layer.\n  message AdminLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer\";\n  }\n\n  // :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` layer.\n  message RtdsLayer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer\";\n\n    oneof name_specifier {\n      // Resource to subscribe to at *rtds_config* for the RTDS layer.\n      string name = 1;\n\n      // Resource locator for RTDS layer. This is mutually exclusive to *name*.\n      // [#not-implemented-hide:]\n      udpa.core.v1.ResourceLocator rtds_resource_locator = 3;\n    }\n\n    // RTDS configuration source.\n    core.v4alpha.ConfigSource rtds_config = 2;\n  }\n\n  // Descriptive name for the runtime layer. This is only used for the runtime\n  // :http:get:`/runtime` output.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof layer_specifier {\n    option (validate.required) = true;\n\n    // :ref:`Static runtime <config_runtime_bootstrap>` layer.\n    // This follows the :ref:`runtime protobuf JSON representation encoding\n    // <config_runtime_proto_json>`. Unlike static xDS resources, this static\n    // layer is overridable by later layers in the runtime virtual filesystem.\n    google.protobuf.Struct static_layer = 2;\n\n    DiskLayer disk_layer = 3;\n\n    AdminLayer admin_layer = 4;\n\n    RtdsLayer rtds_layer = 5;\n  }\n}\n\n// Runtime :ref:`configuration overview <config_runtime>`.\nmessage LayeredRuntime {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.bootstrap.v3.LayeredRuntime\";\n\n  // The :ref:`layers <config_runtime_layering>` of the runtime. This is ordered\n  // such that later layers in the list overlay earlier entries.\n  repeated RuntimeLayer layers = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.aggregate.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.aggregate.v2alpha\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.clusters.aggregate.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Aggregate cluster configuration]\n\n// Configuration for the aggregate cluster. See the :ref:`architecture overview\n// <arch_overview_aggregate_cluster>` for more information.\n// [#extension: envoy.clusters.aggregate]\nmessage ClusterConfig {\n  // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they\n  // appear in this list.\n  repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.dynamic_forward_proxy.v2alpha;\n\nimport \"envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.clusters.dynamic_forward_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamic forward proxy cluster configuration]\n\n// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.clusters.dynamic_forward_proxy]\nmessage ClusterConfig {\n  // The DNS cache configuration that the cluster will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy HTTP filter configuration\n  // <envoy_api_field_config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/redis/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.redis;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.redis\";\noption java_outer_classname = \"RedisClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Redis Cluster Configuration]\n// This cluster adds support for `Redis Cluster <https://redis.io/topics/cluster-spec>`_, as part\n// of :ref:`Envoy's support for Redis Cluster <arch_overview_redis>`.\n//\n// Redis Cluster is an extension of Redis which supports sharding and high availability (where a\n// shard that loses its primary fails over to a replica, and designates it as the new primary).\n// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client\n// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the\n// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS\n// command <https://redis.io/commands/cluster-slots>`_. This result is then stored locally, and\n// updated at user-configured intervals.\n//\n// Additionally, if\n// :ref:`enable_redirection<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.enable_redirection>`\n// is true, then moved and ask redirection errors from upstream servers will trigger a topology\n// refresh when they exceed a user-configured error threshold.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     name: name\n//     connect_timeout: 0.25s\n//     dns_lookup_family: V4_ONLY\n//     hosts:\n//     - socket_address:\n//       address: foo.bar.com\n//       port_value: 22120\n//     cluster_type:\n//     name: envoy.clusters.redis\n//     typed_config:\n//       \"@type\": type.googleapis.com/google.protobuf.Struct\n//       value:\n//         cluster_refresh_rate: 30s\n//         cluster_refresh_timeout: 0.5s\n//         redirect_refresh_interval: 10s\n//         redirect_refresh_threshold: 10\n// [#extension: envoy.clusters.redis]\n\n// [#next-free-field: 7]\nmessage RedisClusterConfig {\n  // Interval between successive topology refresh requests. If not set, this defaults to 5s.\n  google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}];\n\n  // Timeout for topology refresh request. If not set, this defaults to 3s.\n  google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}];\n\n  // The minimum interval that must pass after triggering a topology refresh request before a new\n  // request can possibly be triggered again. Any errors received during one of these\n  // time intervals are ignored. If not set, this defaults to 5s.\n  google.protobuf.Duration redirect_refresh_interval = 3;\n\n  // The number of redirection errors that must be received before\n  // triggering a topology refresh request. If not set, this defaults to 5.\n  // If this is set to 0, topology refresh after redirect is disabled.\n  google.protobuf.UInt32Value redirect_refresh_threshold = 4;\n\n  // The number of failures that must be received before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to failure.\n  uint32 failure_refresh_threshold = 5;\n\n  // The number of hosts became degraded or unhealthy before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to degraded or\n  // unhealthy host.\n  uint32 host_degraded_refresh_threshold = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/cluster:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"CircuitBreakerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Circuit breakers]\n\n// :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be\n// specified individually for each defined priority.\nmessage CircuitBreakers {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.cluster.CircuitBreakers\";\n\n  // A Thresholds defines CircuitBreaker settings for a\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`.\n  // [#next-free-field: 9]\n  message Thresholds {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.cluster.CircuitBreakers.Thresholds\";\n\n    message RetryBudget {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.cluster.CircuitBreakers.Thresholds.RetryBudget\";\n\n      // Specifies the limit on concurrent retries as a percentage of the sum of active requests and\n      // active pending requests. For example, if there are 100 active requests and the\n      // budget_percent is set to 25, there may be 25 active retries.\n      //\n      // This parameter is optional. Defaults to 20%.\n      type.v3.Percent budget_percent = 1;\n\n      // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the\n      // number of active retries may never go below this number.\n      //\n      // This parameter is optional. Defaults to 3.\n      google.protobuf.UInt32Value min_retry_concurrency = 2;\n    }\n\n    // The :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`\n    // the specified CircuitBreaker settings apply to.\n    core.v3.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // The maximum number of connections that Envoy will make to the upstream\n    // cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_connections = 2;\n\n    // The maximum number of pending requests that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_pending_requests = 3;\n\n    // The maximum number of parallel requests that Envoy will make to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_requests = 4;\n\n    // The maximum number of parallel retries that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 3.\n    google.protobuf.UInt32Value max_retries = 5;\n\n    // Specifies a limit on concurrent retries in relation to the number of active requests. This\n    // parameter is optional.\n    //\n    // .. note::\n    //\n    //    If this field is set, the retry budget will override any configured retry circuit\n    //    breaker.\n    RetryBudget retry_budget = 8;\n\n    // If track_remaining is true, then stats will be published that expose\n    // the number of resources remaining until the circuit breakers open. If\n    // not specified, the default is false.\n    //\n    // .. note::\n    //\n    //    If a retry budget is used in lieu of the max_retries circuit breaker,\n    //    the remaining retry resources remaining will not be tracked.\n    bool track_remaining = 6;\n\n    // The maximum number of connection pools per cluster that Envoy will concurrently support at\n    // once. If not specified, the default is unlimited. Set this for clusters which create a\n    // large number of connection pools. See\n    // :ref:`Circuit Breaking <arch_overview_circuit_break_cluster_maximum_connection_pools>` for\n    // more details.\n    google.protobuf.UInt32Value max_connection_pools = 7;\n  }\n\n  // If multiple :ref:`Thresholds<envoy_api_msg_config.cluster.v3.CircuitBreakers.Thresholds>`\n  // are defined with the same :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`,\n  // the first one in the list is used. If no Thresholds is defined for a given\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v3.RoutingPriority>`, the default values\n  // are used.\n  repeated Thresholds thresholds = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v3/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"envoy/config/cluster/v3/circuit_breaker.proto\";\nimport \"envoy/config/cluster/v3/filter.proto\";\nimport \"envoy/config/cluster/v3/outlier_detection.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\nimport \"envoy/config/core/v3/protocol.proto\";\nimport \"envoy/config/endpoint/v3/endpoint.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/tls.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Cluster configuration]\n\n// Cluster list collections. Entries are *Cluster* resources or references.\n// [#not-implemented-hide:]\nmessage ClusterCollection {\n  udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// Configuration for a single upstream cluster.\n// [#next-free-field: 53]\nmessage Cluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Cluster\";\n\n  // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`\n  // for an explanation on each type.\n  enum DiscoveryType {\n    // Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`\n    // for an explanation.\n    STATIC = 0;\n\n    // Refer to the :ref:`strict DNS discovery\n    // type<arch_overview_service_discovery_types_strict_dns>`\n    // for an explanation.\n    STRICT_DNS = 1;\n\n    // Refer to the :ref:`logical DNS discovery\n    // type<arch_overview_service_discovery_types_logical_dns>`\n    // for an explanation.\n    LOGICAL_DNS = 2;\n\n    // Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`\n    // for an explanation.\n    EDS = 3;\n\n    // Refer to the :ref:`original destination discovery\n    // type<arch_overview_service_discovery_types_original_destination>`\n    // for an explanation.\n    ORIGINAL_DST = 4;\n  }\n\n  // Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture\n  // overview section for information on each type.\n  enum LbPolicy {\n    // Refer to the :ref:`round robin load balancing\n    // policy<arch_overview_load_balancing_types_round_robin>`\n    // for an explanation.\n    ROUND_ROBIN = 0;\n\n    // Refer to the :ref:`least request load balancing\n    // policy<arch_overview_load_balancing_types_least_request>`\n    // for an explanation.\n    LEAST_REQUEST = 1;\n\n    // Refer to the :ref:`ring hash load balancing\n    // policy<arch_overview_load_balancing_types_ring_hash>`\n    // for an explanation.\n    RING_HASH = 2;\n\n    // Refer to the :ref:`random load balancing\n    // policy<arch_overview_load_balancing_types_random>`\n    // for an explanation.\n    RANDOM = 3;\n\n    // Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`\n    // for an explanation.\n    MAGLEV = 5;\n\n    // This load balancer type must be specified if the configured cluster provides a cluster\n    // specific load balancer. Consult the configured cluster's documentation for whether to set\n    // this option or not.\n    CLUSTER_PROVIDED = 6;\n\n    // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy\n    // <envoy_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field to determine the LB policy.\n    // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field\n    // and instead using the new load_balancing_policy field as the one and only mechanism for\n    // configuring this.]\n    LOAD_BALANCING_POLICY_CONFIG = 7;\n\n    hidden_envoy_deprecated_ORIGINAL_DST_LB = 4\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n  }\n\n  // When V4_ONLY is selected, the DNS resolver will only perform a lookup for\n  // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will\n  // only perform a lookup for addresses in the IPv6 family. If AUTO is\n  // specified, the DNS resolver will first perform a lookup for addresses in\n  // the IPv6 family and fallback to a lookup for addresses in the IPv4 family.\n  // For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this setting is\n  // ignored.\n  enum DnsLookupFamily {\n    AUTO = 0;\n    V4_ONLY = 1;\n    V6_ONLY = 2;\n  }\n\n  enum ClusterProtocolSelection {\n    // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).\n    // If :ref:`http2_protocol_options <envoy_api_field_config.cluster.v3.Cluster.http2_protocol_options>` are\n    // present, HTTP2 will be used, otherwise HTTP1.1 will be used.\n    USE_CONFIGURED_PROTOCOL = 0;\n\n    // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.\n    USE_DOWNSTREAM_PROTOCOL = 1;\n  }\n\n  // TransportSocketMatch specifies what transport socket config will be used\n  // when the match conditions are satisfied.\n  message TransportSocketMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.TransportSocketMatch\";\n\n    // The name of the match, used in stats generation.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Optional endpoint metadata match criteria.\n    // The connection to the endpoint with metadata matching what is set in this field\n    // will use the transport socket configuration specified here.\n    // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match\n    // against the values specified in this field.\n    google.protobuf.Struct match = 2;\n\n    // The configuration of the transport socket.\n    core.v3.TransportSocket transport_socket = 3;\n  }\n\n  // Extended cluster type.\n  message CustomClusterType {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.CustomClusterType\";\n\n    // The type of the cluster to instantiate. The name must match a supported cluster type.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Cluster specific configuration which depends on the cluster being instantiated.\n    // See the supported cluster for further documentation.\n    google.protobuf.Any typed_config = 2;\n  }\n\n  // Only valid when discovery type is EDS.\n  message EdsClusterConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.EdsClusterConfig\";\n\n    // Configuration for the source of EDS updates for this Cluster.\n    core.v3.ConfigSource eds_config = 1;\n\n    // Optional alternative to cluster name to present to EDS. This does not\n    // have the same restrictions as cluster name, i.e. it may be arbitrary\n    // length.\n    string service_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // Resource locator for EDS. This is mutually exclusive to *service_name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator eds_resource_locator = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n  }\n\n  // Optionally divide the endpoints in this cluster into subsets defined by\n  // endpoint metadata and selected by route and weighted cluster metadata.\n  // [#next-free-field: 8]\n  message LbSubsetConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.LbSubsetConfig\";\n\n    // If NO_FALLBACK is selected, a result\n    // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,\n    // any cluster endpoint may be returned (subject to policy, health checks,\n    // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the\n    // endpoints matching the values from the default_subset field.\n    enum LbSubsetFallbackPolicy {\n      NO_FALLBACK = 0;\n      ANY_ENDPOINT = 1;\n      DEFAULT_SUBSET = 2;\n    }\n\n    // Specifications for subsets.\n    message LbSubsetSelector {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector\";\n\n      // Allows to override top level fallback policy per selector.\n      enum LbSubsetSelectorFallbackPolicy {\n        // If NOT_DEFINED top level config fallback policy is used instead.\n        NOT_DEFINED = 0;\n\n        // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.\n        NO_FALLBACK = 1;\n\n        // If ANY_ENDPOINT is selected, any cluster endpoint may be returned\n        // (subject to policy, health checks, etc).\n        ANY_ENDPOINT = 2;\n\n        // If DEFAULT_SUBSET is selected, load balancing is performed over the\n        // endpoints matching the values from the default_subset field.\n        DEFAULT_SUBSET = 3;\n\n        // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata\n        // keys reduced to\n        // :ref:`fallback_keys_subset<envoy_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_keys_subset>`.\n        // It allows for a fallback to a different, less specific selector if some of the keys of\n        // the selector are considered optional.\n        KEYS_SUBSET = 4;\n      }\n\n      // List of keys to match with the weighted cluster metadata.\n      repeated string keys = 1;\n\n      // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for\n      // choosing a host, but updating hosts is faster, especially for large numbers of hosts.\n      //\n      // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy.\n      //\n      // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains\n      // only one entry.\n      //\n      // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys`\n      // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge\n      // :ref:`lb_subsets_single_host_per_subset_duplicate<config_cluster_manager_cluster_stats_subset_lb>` indicates how many duplicates are\n      // present in the current configuration.\n      bool single_host_per_subset = 4;\n\n      // The behavior used when no endpoint subset matches the selected route's\n      // metadata.\n      LbSubsetSelectorFallbackPolicy fallback_policy = 2\n          [(validate.rules).enum = {defined_only: true}];\n\n      // Subset of\n      // :ref:`keys<envoy_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by\n      // :ref:`KEYS_SUBSET<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`\n      // fallback policy.\n      // It has to be a non empty list if KEYS_SUBSET fallback policy is selected.\n      // For any other fallback policy the parameter is not used and should not be set.\n      // Only values also present in\n      // :ref:`keys<envoy_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but\n      // `fallback_keys_subset` cannot be equal to `keys`.\n      repeated string fallback_keys_subset = 3;\n    }\n\n    // The behavior used when no endpoint subset matches the selected route's\n    // metadata. The value defaults to\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Specifies the default subset of endpoints used during fallback if\n    // fallback_policy is\n    // :ref:`DEFAULT_SUBSET<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.\n    // Each field in default_subset is\n    // compared to the matching LbEndpoint.Metadata under the *envoy.lb*\n    // namespace. It is valid for no hosts to match, in which case the behavior\n    // is the same as a fallback_policy of\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    google.protobuf.Struct default_subset = 2;\n\n    // For each entry, LbEndpoint.Metadata's\n    // *envoy.lb* namespace is traversed and a subset is created for each unique\n    // combination of key and value. For example:\n    //\n    // .. code-block:: json\n    //\n    //   { \"subset_selectors\": [\n    //       { \"keys\": [ \"version\" ] },\n    //       { \"keys\": [ \"stage\", \"hardware_type\" ] }\n    //   ]}\n    //\n    // A subset is matched when the metadata from the selected route and\n    // weighted cluster contains the same keys and values as the subset's\n    // metadata. The same host may appear in multiple subsets.\n    repeated LbSubsetSelector subset_selectors = 3;\n\n    // If true, routing to subsets will take into account the localities and locality weights of the\n    // endpoints when making the routing decision.\n    //\n    // There are some potential pitfalls associated with enabling this feature, as the resulting\n    // traffic split after applying both a subset match and locality weights might be undesirable.\n    //\n    // Consider for example a situation in which you have 50/50 split across two localities X/Y\n    // which have 100 hosts each without subsetting. If the subset LB results in X having only 1\n    // host selected but Y having 100, then a lot more load is being dumped on the single host in X\n    // than originally anticipated in the load balancing assignment delivered via EDS.\n    bool locality_weight_aware = 4;\n\n    // When used with locality_weight_aware, scales the weight of each locality by the ratio\n    // of hosts in the subset vs hosts in the original subset. This aims to even out the load\n    // going to an individual locality if said locality is disproportionately affected by the\n    // subset predicate.\n    bool scale_locality_weight = 5;\n\n    // If true, when a fallback policy is configured and its corresponding subset fails to find\n    // a host this will cause any host to be selected instead.\n    //\n    // This is useful when using the default subset as the fallback policy, given the default\n    // subset might become empty. With this option enabled, if that happens the LB will attempt\n    // to select a host from the entire cluster.\n    bool panic_mode_any = 6;\n\n    // If true, metadata specified for a metadata key will be matched against the corresponding\n    // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value\n    // and any of the elements in the list matches the criteria.\n    bool list_as_any = 7;\n  }\n\n  // Specific configuration for the LeastRequest load balancing policy.\n  message LeastRequestLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.LeastRequestLbConfig\";\n\n    // The number of random healthy hosts from which the host with the fewest active requests will\n    // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.\n    google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];\n\n    // The following formula is used to calculate the dynamic weights when hosts have different load\n    // balancing weights:\n    //\n    // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`\n    //\n    // The larger the active request bias is, the more aggressively active requests will lower the\n    // effective weight when all host weights are not equal.\n    //\n    // `active_request_bias` must be greater than or equal to 0.0.\n    //\n    // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number\n    // of active requests at the time it picks a host and behaves like the Round Robin Load\n    // Balancer.\n    //\n    // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing\n    // weight by the number of active requests at the time it does a pick.\n    //\n    // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's\n    // host sets changes, e.g., whenever there is a host membership update or a host load balancing\n    // weight change.\n    //\n    // .. note::\n    //   This setting only takes effect if all host weights are not equal.\n    core.v3.RuntimeDouble active_request_bias = 2;\n  }\n\n  // Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`\n  // load balancing policy.\n  message RingHashLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.RingHashLbConfig\";\n\n    // The hash function used to hash hosts onto the ketama ring.\n    enum HashFunction {\n      // Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.\n      XX_HASH = 0;\n\n      // Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with\n      // std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled\n      // on Linux and not macOS.\n      MURMUR_HASH_2 = 1;\n    }\n\n    reserved 2;\n\n    // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each\n    // provided host) the better the request distribution will reflect the desired weights. Defaults\n    // to 1024 entries, and limited to 8M entries. See also\n    // :ref:`maximum_ring_size<envoy_api_field_config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size>`.\n    google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}];\n\n    // The hash function used to hash hosts onto the ketama ring. The value defaults to\n    // :ref:`XX_HASH<envoy_api_enum_value_config.cluster.v3.Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.\n    HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}];\n\n    // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered\n    // to further constrain resource use. See also\n    // :ref:`minimum_ring_size<envoy_api_field_config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size>`.\n    google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}];\n  }\n\n  // Specific configuration for the :ref:`Maglev<arch_overview_load_balancing_types_maglev>`\n  // load balancing policy.\n  message MaglevLbConfig {\n    // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee.\n    // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same\n    // upstream as it was before. Increasing the table size reduces the amount of disruption.\n    // The table size must be prime number. If it is not specified, the default is 65537.\n    google.protobuf.UInt64Value table_size = 1;\n  }\n\n  // Specific configuration for the\n  // :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\n  // load balancing policy.\n  message OriginalDstLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.OriginalDstLbConfig\";\n\n    // When true, :ref:`x-envoy-original-dst-host\n    // <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination\n    // address.\n    //\n    // .. attention::\n    //\n    //   This header isn't sanitized by default, so enabling this feature allows HTTP clients to\n    //   route traffic to arbitrary hosts and/or ports, which may have serious security\n    //   consequences.\n    bool use_http_header = 1;\n  }\n\n  // Common configuration for all load balancer implementations.\n  // [#next-free-field: 8]\n  message CommonLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Cluster.CommonLbConfig\";\n\n    // Configuration for :ref:`zone aware routing\n    // <arch_overview_load_balancing_zone_aware_routing>`.\n    message ZoneAwareLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig\";\n\n      // Configures percentage of requests that will be considered for zone aware routing\n      // if zone aware routing is configured. If not specified, the default is 100%.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      type.v3.Percent routing_enabled = 1;\n\n      // Configures minimum upstream cluster size required for zone aware routing\n      // If upstream cluster size is less than specified, zone aware routing is not performed\n      // even if zone aware routing is configured. If not specified, the default is 6.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      google.protobuf.UInt64Value min_cluster_size = 2;\n\n      // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic\n      // mode<arch_overview_load_balancing_panic_threshold>`. Instead, the cluster will fail all\n      // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a\n      // failing service.\n      bool fail_traffic_on_panic = 3;\n    }\n\n    // Configuration for :ref:`locality weighted load balancing\n    // <arch_overview_load_balancing_locality_weighted_lb>`\n    message LocalityWeightedLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig\";\n    }\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    message ConsistentHashingLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig\";\n\n      // If set to `true`, the cluster will use hostname instead of the resolved\n      // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.\n      bool use_hostname_for_hashing = 1;\n\n      // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150\n      // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster.\n      // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200.\n      // Minimum is 100.\n      //\n      // Applies to both Ring Hash and Maglev load balancers.\n      //\n      // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified\n      // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests\n      // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing\n      // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify\n      // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the\n      // cascading overflow effect when choosing the next host in the ring/table).\n      //\n      // If weights are specified on the hosts, they are respected.\n      //\n      // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts\n      // being probed, so use a higher value if you require better performance.\n      google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}];\n    }\n\n    // Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.\n    // If not specified, the default is 50%.\n    // To disable panic mode, set to 0%.\n    //\n    // .. note::\n    //   The specified percent will be truncated to the nearest 1%.\n    type.v3.Percent healthy_panic_threshold = 1;\n\n    oneof locality_config_specifier {\n      ZoneAwareLbConfig zone_aware_lb_config = 2;\n\n      LocalityWeightedLbConfig locality_weighted_lb_config = 3;\n    }\n\n    // If set, all health check/weight/metadata updates that happen within this duration will be\n    // merged and delivered in one shot when the duration expires. The start of the duration is when\n    // the first update happens. This is useful for big clusters, with potentially noisy deploys\n    // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes\n    // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new\n    // cluster). Please always keep in mind that the use of sandbox technologies may change this\n    // behavior.\n    //\n    // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge\n    // window to 0.\n    //\n    // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is\n    // because merging those updates isn't currently safe. See\n    // https://github.com/envoyproxy/envoy/pull/3941.\n    google.protobuf.Duration update_merge_window = 4;\n\n    // If set to true, Envoy will not consider new hosts when computing load balancing weights until\n    // they have been health checked for the first time. This will have no effect unless\n    // active health checking is also configured.\n    //\n    // Ignoring a host means that for any load balancing calculations that adjust weights based\n    // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and\n    // panic mode) Envoy will exclude these hosts in the denominator.\n    //\n    // For example, with hosts in two priorities P0 and P1, where P0 looks like\n    // {healthy, unhealthy (new), unhealthy (new)}\n    // and where P1 looks like\n    // {healthy, healthy}\n    // all traffic will still hit P0, as 1 / (3 - 2) = 1.\n    //\n    // Enabling this will allow scaling up the number of hosts for a given cluster without entering\n    // panic mode or triggering priority spillover, assuming the hosts pass the first health check.\n    //\n    // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not\n    // contribute to the calculation when deciding whether panic mode is enabled or not.\n    bool ignore_new_hosts_until_first_hc = 5;\n\n    // If set to `true`, the cluster manager will drain all existing\n    // connections to upstream hosts whenever hosts are added or removed from the cluster.\n    bool close_connections_on_host_set_change = 6;\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    ConsistentHashingLbConfig consistent_hashing_lb_config = 7;\n  }\n\n  message RefreshRate {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Cluster.RefreshRate\";\n\n    // Specifies the base interval between refreshes. This parameter is required and must be greater\n    // than zero and less than\n    // :ref:`max_interval <envoy_api_field_config.cluster.v3.Cluster.RefreshRate.max_interval>`.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {nanos: 1000000}\n    }];\n\n    // Specifies the maximum interval between refreshes. This parameter is optional, but must be\n    // greater than or equal to the\n    // :ref:`base_interval <envoy_api_field_config.cluster.v3.Cluster.RefreshRate.base_interval>`  if set. The default\n    // is 10 times the :ref:`base_interval <envoy_api_field_config.cluster.v3.Cluster.RefreshRate.base_interval>`.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}];\n  }\n\n  // [#not-implemented-hide:]\n  message PrefetchPolicy {\n    // Indicates how many streams (rounded up) can be anticipated per-upstream for each\n    // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching\n    // will only be done if the upstream is healthy.\n    //\n    // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be\n    // established, one for the new incoming stream, and one for a presumed follow-up stream. For\n    // HTTP/2, only one connection would be established by default as one connection can\n    // serve both the original and presumed follow-up stream.\n    //\n    // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100\n    // active streams, there would be 100 connections in use, and 50 connections prefetched.\n    // This might be a useful value for something like short lived single-use connections,\n    // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection\n    // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP\n    // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more\n    // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue\n    // in case of unexpected disconnects where the connection could not be reused.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight. This means in steady state if a connection is torn down,\n    // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be\n    // prefetched.\n    //\n    // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can\n    // harm latency more than the prefetching helps.\n    google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n\n    // Indicates how many many streams (rounded up) can be anticipated across a cluster for each\n    // stream, useful for low QPS services. This is currently supported for a subset of\n    // deterministic non-hash-based load-balancing algorithms (weighted round robin, random).\n    // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a\n    // cluster, doing best effort predictions of what upstream would be picked next and\n    // pre-establishing a connection.\n    //\n    // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first\n    // incoming stream, 2 connections will be prefetched - one to the first upstream for this\n    // cluster, one to the second on the assumption there will be a follow-up stream.\n    //\n    // Prefetching will be limited to one prefetch per configured upstream in the cluster.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight, so during warm up and in steady state if a connection\n    // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for\n    // connection establishment.\n    //\n    // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met,\n    // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream.\n    // TODO(alyssawilk) per LB docs and LB overview docs when unhiding.\n    google.protobuf.DoubleValue predictive_prefetch_ratio = 2\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n  }\n\n  reserved 12, 15;\n\n  // Configuration to use different transport sockets for different endpoints.\n  // The entry of *envoy.transport_socket_match* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`\n  // is used to match against the transport sockets as they appear in the list. The first\n  // :ref:`match <envoy_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` is used.\n  // For example, with the following match\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"enableMTLS\"\n  //    match:\n  //      acceptMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //  - name: \"defaultToPlaintext\"\n  //    match: {}\n  //    transport_socket:\n  //      name: envoy.transport_sockets.raw_buffer\n  //\n  // Connections to the endpoints whose metadata value under *envoy.transport_socket_match*\n  // having \"acceptMTLS\"/\"true\" key/value pair use the \"enableMTLS\" socket configuration.\n  //\n  // If a :ref:`socket match <envoy_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` with empty match\n  // criteria is provided, that always match any endpoint. For example, the \"defaultToPlaintext\"\n  // socket match in case above.\n  //\n  // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any\n  // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or\n  // *transport_socket* specified in this cluster.\n  //\n  // This field allows gradual and flexible transport socket configuration changes.\n  //\n  // The metadata of endpoints in EDS can indicate transport socket capabilities. For example,\n  // an endpoint's metadata can have two key value pairs as \"acceptMTLS\": \"true\",\n  // \"acceptPlaintext\": \"true\". While some other endpoints, only accepting plaintext traffic\n  // has \"acceptPlaintext\": \"true\" metadata information.\n  //\n  // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS\n  // traffic for endpoints with \"acceptMTLS\": \"true\", by adding a corresponding\n  // *TransportSocketMatch* in this field. Other client Envoys receive CDS without\n  // *transport_socket_match* set, and still send plain text traffic to the same cluster.\n  //\n  // This field can be used to specify custom transport socket configurations for health\n  // checks by adding matching key/value pairs in a health check's\n  // :ref:`transport socket match criteria <envoy_api_field_config.core.v3.HealthCheck.transport_socket_match_criteria>` field.\n  //\n  // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]\n  repeated TransportSocketMatch transport_socket_matches = 43;\n\n  // Supplies the name of the cluster which must be unique across all clusters.\n  // The cluster name is used when emitting\n  // :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name\n  // <envoy_api_field_config.cluster.v3.Cluster.alt_stat_name>` is not provided.\n  // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // An optional alternative to the cluster name to be used while emitting stats.\n  // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be\n  // confused with :ref:`Router Filter Header\n  // <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.\n  string alt_stat_name = 28;\n\n  oneof cluster_discovery_type {\n    // The :ref:`service discovery type <arch_overview_service_discovery_types>`\n    // to use for resolving the cluster.\n    DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}];\n\n    // The custom cluster type.\n    CustomClusterType cluster_type = 38;\n  }\n\n  // Configuration to use for EDS updates for the Cluster.\n  EdsClusterConfig eds_cluster_config = 3;\n\n  // The timeout for new network connections to hosts in the cluster.\n  google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}];\n\n  // Soft limit on size of the cluster’s connections read and write buffers. If\n  // unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The :ref:`load balancer type <arch_overview_load_balancing_types>` to use\n  // when picking a host in the cluster.\n  // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>` when implemented.]\n  LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}];\n\n  // Setting this is required for specifying members of\n  // :ref:`STATIC<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>` clusters.\n  // This field supersedes the *hosts* field in the v2 API.\n  //\n  // .. attention::\n  //\n  //   Setting this allows non-EDS cluster types to contain embedded EDS equivalent\n  //   :ref:`endpoint assignments<envoy_api_msg_config.endpoint.v3.ClusterLoadAssignment>`.\n  //\n  endpoint.v3.ClusterLoadAssignment load_assignment = 33;\n\n  // Optional :ref:`active health checking <arch_overview_health_checking>`\n  // configuration for the cluster. If no\n  // configuration is specified no health checking will be done and all cluster\n  // members will be considered healthy at all times.\n  repeated core.v3.HealthCheck health_checks = 8;\n\n  // Optional maximum requests for a single upstream connection. This parameter\n  // is respected by both the HTTP/1.1 and HTTP/2 connection pool\n  // implementations. If not specified, there is no limit. Setting this\n  // parameter to 1 will effectively disable keep alive.\n  google.protobuf.UInt32Value max_requests_per_connection = 9;\n\n  // Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.\n  CircuitBreakers circuit_breakers = 10;\n\n  // HTTP protocol options that are applied only to upstream HTTP connections.\n  // These options apply to all HTTP versions.\n  core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46;\n\n  // Additional options when handling HTTP requests upstream. These options will be applicable to\n  // both HTTP1 and HTTP2 requests.\n  core.v3.HttpProtocolOptions common_http_protocol_options = 29;\n\n  // Additional options when handling HTTP1 requests.\n  core.v3.Http1ProtocolOptions http_protocol_options = 13;\n\n  // Even if default HTTP2 protocol options are desired, this field must be\n  // set so that Envoy will assume that the upstream supports HTTP/2 when\n  // making new HTTP connection pool connections. Currently, Envoy only\n  // supports prior knowledge for upstream connections. Even if TLS is used\n  // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2\n  // connections to happen over plain text.\n  core.v3.Http2ProtocolOptions http2_protocol_options = 14\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Any> typed_extension_protocol_options = 36;\n\n  // If the DNS refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used as the cluster’s DNS refresh\n  // rate. The value configured must be at least 1ms. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  google.protobuf.Duration dns_refresh_rate = 16\n      [(validate.rules).duration = {gt {nanos: 1000000}}];\n\n  // If the DNS failure refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types\n  // other than :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>` this setting is\n  // ignored.\n  RefreshRate dns_failure_refresh_rate = 44;\n\n  // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true,\n  // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS\n  // resolution.\n  bool respect_dns_ttl = 39;\n\n  // The DNS IP address resolution policy. If this setting is not specified, the\n  // value defaults to\n  // :ref:`AUTO<envoy_api_enum_value_config.cluster.v3.Cluster.DnsLookupFamily.AUTO>`.\n  DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}];\n\n  // If DNS resolvers are specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used to specify the cluster’s dns resolvers.\n  // If this setting is not specified, the value defaults to the default\n  // resolver, which uses /etc/resolv.conf for configuration. For cluster types\n  // other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple's API only allows overriding DNS resolvers via system settings.\n  repeated core.v3.Address dns_resolvers = 18;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 45;\n\n  // If specified, outlier detection will be enabled for this upstream cluster.\n  // Each of the configuration values can be overridden via\n  // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.\n  OutlierDetection outlier_detection = 19;\n\n  // The interval for removing stale hosts from a cluster type\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.ORIGINAL_DST>`.\n  // Hosts are considered stale if they have not been used\n  // as upstream destinations during this interval. New hosts are added\n  // to original destination clusters on demand as new connections are\n  // redirected to Envoy, causing the number of hosts in the cluster to\n  // grow over time. Hosts that are not stale (they are actively used as\n  // destinations) are kept in the cluster, which allows connections to\n  // them remain open, saving the latency that would otherwise be spent\n  // on opening new connections. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.ORIGINAL_DST>`\n  // this setting is ignored.\n  google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}];\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This overrides any bind_config specified in the bootstrap proto.\n  // If the address and port are empty, no bind will be performed.\n  core.v3.BindConfig upstream_bind_config = 21;\n\n  // Configuration for load balancing subsetting.\n  LbSubsetConfig lb_subset_config = 22;\n\n  // Optional configuration for the load balancing algorithm selected by\n  // LbPolicy. Currently only\n  // :ref:`RING_HASH<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.RING_HASH>`,\n  // :ref:`MAGLEV<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.MAGLEV>` and\n  // :ref:`LEAST_REQUEST<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.LEAST_REQUEST>`\n  // has additional configuration options.\n  // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding\n  // LbPolicy will generate an error at runtime.\n  oneof lb_config {\n    // Optional configuration for the Ring Hash load balancing policy.\n    RingHashLbConfig ring_hash_lb_config = 23;\n\n    // Optional configuration for the Maglev load balancing policy.\n    MaglevLbConfig maglev_lb_config = 52;\n\n    // Optional configuration for the Original Destination load balancing policy.\n    OriginalDstLbConfig original_dst_lb_config = 34;\n\n    // Optional configuration for the LeastRequest load balancing policy.\n    LeastRequestLbConfig least_request_lb_config = 37;\n  }\n\n  // Common configuration for all load balancer implementations.\n  CommonLbConfig common_lb_config = 27;\n\n  // Optional custom transport socket implementation to use for upstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`UpstreamTlsContexts <envoy_api_msg_extensions.transport_sockets.tls.v3.UpstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v3.TransportSocket transport_socket = 24;\n\n  // The Metadata field can be used to provide additional information about the\n  // cluster. It can be used for stats, logging, and varying filter behavior.\n  // Fields should use reverse DNS notation to denote which entity within Envoy\n  // will need the information. For instance, if the metadata is intended for\n  // the Router filter, the filter name should be specified as *envoy.filters.http.router*.\n  core.v3.Metadata metadata = 25;\n\n  // Determines how Envoy selects the protocol used to speak to upstream hosts.\n  ClusterProtocolSelection protocol_selection = 26;\n\n  // Optional options for upstream connections.\n  UpstreamConnectionOptions upstream_connection_options = 30;\n\n  // If an upstream host becomes unhealthy (as determined by the configured health checks\n  // or outlier detection), immediately close all connections to the failed host.\n  //\n  // .. note::\n  //\n  //   This is currently only supported for connections created by tcp_proxy.\n  //\n  // .. note::\n  //\n  //   The current implementation of this feature closes all connections immediately when\n  //   the unhealthy status is detected. If there are a large number of connections open\n  //   to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of\n  //   time exclusively closing these connections, and not processing any other traffic.\n  bool close_connections_on_host_health_failure = 31;\n\n  // If set to true, Envoy will ignore the health value of a host when processing its removal\n  // from service discovery. This means that if active health checking is used, Envoy will *not*\n  // wait for the endpoint to go unhealthy before removing it.\n  bool ignore_health_on_host_removal = 32;\n\n  // An (optional) network filter chain, listed in the order the filters should be applied.\n  // The chain will be applied to all outgoing connections that Envoy makes to the upstream\n  // servers of this cluster.\n  repeated Filter filters = 40;\n\n  // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the\n  // :ref:`lb_policy<envoy_api_field_config.cluster.v3.Cluster.lb_policy>` field has the value\n  // :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v3.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>`.\n  LoadBalancingPolicy load_balancing_policy = 41;\n\n  // [#not-implemented-hide:]\n  // If present, tells the client where to send load reports via LRS. If not present, the\n  // client will fall back to a client-side default, which may be either (a) don't send any\n  // load reports or (b) send load reports for all clusters to a single default server\n  // (which may be configured in the bootstrap file).\n  //\n  // Note that if multiple clusters point to the same LRS server, the client may choose to\n  // create a separate stream for each cluster or it may choose to coalesce the data for\n  // multiple clusters onto a single stream. Either way, the client must make sure to send\n  // the data for any given cluster on no more than one stream.\n  //\n  // [#next-major-version: In the v3 API, we should consider restructuring this somehow,\n  // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation\n  // from the LRS stream here.]\n  core.v3.ConfigSource lrs_server = 42;\n\n  // If track_timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  //\n  // .. attention::\n  //\n  //   This field has been deprecated in favor of `timeout_budgets`, part of\n  //   :ref:`track_cluster_stats <envoy_api_field_config.cluster.v3.Cluster.track_cluster_stats>`.\n  bool track_timeout_budgets = 47 [deprecated = true];\n\n  // Optional customization and configuration of upstream connection pool, and upstream type.\n  //\n  // Currently this field only applies for HTTP traffic but is designed for eventual use for custom\n  // TCP upstreams.\n  //\n  // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream\n  // HTTP, using the http connection pool and the codec from `http2_protocol_options`\n  //\n  // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT\n  // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool.\n  //\n  // The default pool used is the generic connection pool which creates the HTTP upstream for most\n  // HTTP requests, and the TCP upstream if CONNECT termination is configured.\n  //\n  // If users desire custom connection pool or upstream behavior, for example terminating\n  // CONNECT only if a custom filter indicates it is appropriate, the custom factories\n  // can be registered and configured here.\n  core.v3.TypedExtensionConfig upstream_config = 48;\n\n  // Configuration to track optional cluster stats.\n  TrackClusterStats track_cluster_stats = 49;\n\n  // [#not-implemented-hide:]\n  // Prefetch configuration for this cluster.\n  PrefetchPolicy prefetch_policy = 50;\n\n  // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate\n  // connection pool for every downstream connection\n  bool connection_pool_per_downstream_connection = 51;\n\n  repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true];\n\n  envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context =\n      11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  map<string, google.protobuf.Struct> hidden_envoy_deprecated_extension_protocol_options = 35\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// [#not-implemented-hide:] Extensible load balancing policy configuration.\n//\n// Every LB policy defined via this mechanism will be identified via a unique name using reverse\n// DNS notation. If the policy needs configuration parameters, it must define a message for its\n// own configuration, which will be stored in the config field. The name of the policy will tell\n// clients which type of message they should expect to see in the config field.\n//\n// Note that there are cases where it is useful to be able to independently select LB policies\n// for choosing a locality and for choosing an endpoint within that locality. For example, a\n// given deployment may always use the same policy to choose the locality, but for choosing the\n// endpoint within the locality, some clusters may use weighted-round-robin, while others may\n// use some sort of session-based balancing.\n//\n// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a\n// child LB policy for each locality. For each request, the parent chooses the locality and then\n// delegates to the child policy for that locality to choose the endpoint within the locality.\n//\n// To facilitate this, the config message for the top-level LB policy may include a field of\n// type LoadBalancingPolicy that specifies the child policy.\nmessage LoadBalancingPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.LoadBalancingPolicy\";\n\n  message Policy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.LoadBalancingPolicy.Policy\";\n\n    // Required. The name of the LB policy.\n    string name = 1;\n\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n\n  // Each client will iterate over the list in order and stop at the first policy that it\n  // supports. This provides a mechanism for starting to use new LB policies that are not yet\n  // supported by all clients.\n  repeated Policy policies = 1;\n}\n\n// An extensible structure containing the address Envoy should bind to when\n// establishing upstream connections.\nmessage UpstreamBindConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.UpstreamBindConfig\";\n\n  // The address Envoy should bind to when establishing upstream connections.\n  core.v3.Address source_address = 1;\n}\n\nmessage UpstreamConnectionOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.UpstreamConnectionOptions\";\n\n  // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.\n  core.v3.TcpKeepalive tcp_keepalive = 1;\n}\n\nmessage TrackClusterStats {\n  // If timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  bool timeout_budgets = 1;\n\n  // If request_response_sizes is true, then the :ref:`histograms\n  // <config_cluster_manager_cluster_stats_request_response_sizes>`  tracking header and body sizes\n  // of requests and responses will be published.\n  bool request_response_sizes = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v3/filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"FilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Upstream filters]\n// Upstream filters apply to the connections to the upstream cluster hosts.\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.cluster.Filter\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any typed_config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v3\";\noption java_outer_classname = \"OutlierDetectionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Outlier detection]\n\n// See the :ref:`architecture overview <arch_overview_outlier_detection>` for\n// more information on outlier detection.\n// [#next-free-field: 21]\nmessage OutlierDetection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.cluster.OutlierDetection\";\n\n  // The number of consecutive 5xx responses or local origin errors that are mapped\n  // to 5xx error codes before a consecutive 5xx ejection\n  // occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_5xx = 1;\n\n  // The time interval between ejection analysis sweeps. This can result in\n  // both new ejections as well as hosts being returned to service. Defaults\n  // to 10000ms or 10s.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}];\n\n  // The base time that a host is ejected for. The real time is equal to the\n  // base time multiplied by the number of times the host has been ejected.\n  // Defaults to 30000ms or 30s.\n  google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];\n\n  // The maximum % of an upstream cluster that can be ejected due to outlier\n  // detection. Defaults to 10% but will eject at least one host regardless of the value.\n  google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive 5xx. This setting can be used to disable\n  // ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics. This setting can be used to\n  // disable ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}];\n\n  // The number of hosts in a cluster that must have enough request volume to\n  // detect success rate outliers. If the number of hosts is less than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for any host in the cluster. Defaults to 5.\n  google.protobuf.UInt32Value success_rate_minimum_hosts = 7;\n\n  // The minimum number of total requests that must be collected in one\n  // interval (as defined by the interval duration above) to include this host\n  // in success rate based outlier detection. If the volume is lower than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for that host. Defaults to 100.\n  google.protobuf.UInt32Value success_rate_request_volume = 8;\n\n  // This factor is used to determine the ejection threshold for success rate\n  // outlier ejection. The ejection threshold is the difference between the\n  // mean success rate, and the product of this factor and the standard\n  // deviation of the mean success rate: mean - (stdev *\n  // success_rate_stdev_factor). This factor is divided by a thousand to get a\n  // double. That is, if the desired factor is 1.9, the runtime value should\n  // be 1900. Defaults to 1900.\n  google.protobuf.UInt32Value success_rate_stdev_factor = 9;\n\n  // The number of consecutive gateway failures (502, 503, 504 status codes)\n  // before a consecutive gateway failure ejection occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_gateway_failure = 10;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive gateway failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // Determines whether to distinguish local origin failures from external errors. If set to true\n  // the following configuration parameters are taken into account:\n  // :ref:`consecutive_local_origin_failure<envoy_api_field_config.cluster.v3.OutlierDetection.consecutive_local_origin_failure>`,\n  // :ref:`enforcing_consecutive_local_origin_failure<envoy_api_field_config.cluster.v3.OutlierDetection.enforcing_consecutive_local_origin_failure>`\n  // and\n  // :ref:`enforcing_local_origin_success_rate<envoy_api_field_config.cluster.v3.OutlierDetection.enforcing_local_origin_success_rate>`.\n  // Defaults to false.\n  bool split_external_local_origin_errors = 12;\n\n  // The number of consecutive locally originated failures before ejection\n  // occurs. Defaults to 5. Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value consecutive_local_origin_failure = 13;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive locally originated failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics for locally originated errors.\n  // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The failure percentage to use when determining failure percentage-based outlier detection. If\n  // the failure percentage of a given host is greater than or equal to this value, it will be\n  // ejected. Defaults to 85.\n  google.protobuf.UInt32Value failure_percentage_threshold = 16\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // failure percentage statistics. This setting can be used to disable ejection or to ramp it up\n  // slowly. Defaults to 0.\n  //\n  // [#next-major-version: setting this without setting failure_percentage_threshold should be\n  // invalid in v4.]\n  google.protobuf.UInt32Value enforcing_failure_percentage = 17\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // local-origin failure percentage statistics. This setting can be used to disable ejection or to\n  // ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection.\n  // If the total number of hosts in the cluster is less than this value, failure percentage-based\n  // ejection will not be performed. Defaults to 5.\n  google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19;\n\n  // The minimum number of total requests that must be collected in one interval (as defined by the\n  // interval duration above) to perform failure percentage-based ejection for this host. If the\n  // volume is lower than this setting, failure percentage-based ejection will not be performed for\n  // this host. Defaults to 50.\n  google.protobuf.UInt32Value failure_percentage_request_volume = 20;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v4alpha/circuit_breaker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"CircuitBreakerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Circuit breakers]\n\n// :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be\n// specified individually for each defined priority.\nmessage CircuitBreakers {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.CircuitBreakers\";\n\n  // A Thresholds defines CircuitBreaker settings for a\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`.\n  // [#next-free-field: 9]\n  message Thresholds {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.CircuitBreakers.Thresholds\";\n\n    message RetryBudget {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget\";\n\n      // Specifies the limit on concurrent retries as a percentage of the sum of active requests and\n      // active pending requests. For example, if there are 100 active requests and the\n      // budget_percent is set to 25, there may be 25 active retries.\n      //\n      // This parameter is optional. Defaults to 20%.\n      type.v3.Percent budget_percent = 1;\n\n      // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the\n      // number of active retries may never go below this number.\n      //\n      // This parameter is optional. Defaults to 3.\n      google.protobuf.UInt32Value min_retry_concurrency = 2;\n    }\n\n    // The :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`\n    // the specified CircuitBreaker settings apply to.\n    core.v4alpha.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // The maximum number of connections that Envoy will make to the upstream\n    // cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_connections = 2;\n\n    // The maximum number of pending requests that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_pending_requests = 3;\n\n    // The maximum number of parallel requests that Envoy will make to the\n    // upstream cluster. If not specified, the default is 1024.\n    google.protobuf.UInt32Value max_requests = 4;\n\n    // The maximum number of parallel retries that Envoy will allow to the\n    // upstream cluster. If not specified, the default is 3.\n    google.protobuf.UInt32Value max_retries = 5;\n\n    // Specifies a limit on concurrent retries in relation to the number of active requests. This\n    // parameter is optional.\n    //\n    // .. note::\n    //\n    //    If this field is set, the retry budget will override any configured retry circuit\n    //    breaker.\n    RetryBudget retry_budget = 8;\n\n    // If track_remaining is true, then stats will be published that expose\n    // the number of resources remaining until the circuit breakers open. If\n    // not specified, the default is false.\n    //\n    // .. note::\n    //\n    //    If a retry budget is used in lieu of the max_retries circuit breaker,\n    //    the remaining retry resources remaining will not be tracked.\n    bool track_remaining = 6;\n\n    // The maximum number of connection pools per cluster that Envoy will concurrently support at\n    // once. If not specified, the default is unlimited. Set this for clusters which create a\n    // large number of connection pools. See\n    // :ref:`Circuit Breaking <arch_overview_circuit_break_cluster_maximum_connection_pools>` for\n    // more details.\n    google.protobuf.UInt32Value max_connection_pools = 7;\n  }\n\n  // If multiple :ref:`Thresholds<envoy_api_msg_config.cluster.v4alpha.CircuitBreakers.Thresholds>`\n  // are defined with the same :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`,\n  // the first one in the list is used. If no Thresholds is defined for a given\n  // :ref:`RoutingPriority<envoy_api_enum_config.core.v4alpha.RoutingPriority>`, the default values\n  // are used.\n  repeated Thresholds thresholds = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"envoy/config/cluster/v4alpha/circuit_breaker.proto\";\nimport \"envoy/config/cluster/v4alpha/filter.proto\";\nimport \"envoy/config/cluster/v4alpha/outlier_detection.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/health_check.proto\";\nimport \"envoy/config/core/v4alpha/protocol.proto\";\nimport \"envoy/config/endpoint/v3/endpoint.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Cluster configuration]\n\n// Cluster list collections. Entries are *Cluster* resources or references.\n// [#not-implemented-hide:]\nmessage ClusterCollection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.ClusterCollection\";\n\n  udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// Configuration for a single upstream cluster.\n// [#next-free-field: 53]\nmessage Cluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.cluster.v3.Cluster\";\n\n  // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`\n  // for an explanation on each type.\n  enum DiscoveryType {\n    // Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`\n    // for an explanation.\n    STATIC = 0;\n\n    // Refer to the :ref:`strict DNS discovery\n    // type<arch_overview_service_discovery_types_strict_dns>`\n    // for an explanation.\n    STRICT_DNS = 1;\n\n    // Refer to the :ref:`logical DNS discovery\n    // type<arch_overview_service_discovery_types_logical_dns>`\n    // for an explanation.\n    LOGICAL_DNS = 2;\n\n    // Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`\n    // for an explanation.\n    EDS = 3;\n\n    // Refer to the :ref:`original destination discovery\n    // type<arch_overview_service_discovery_types_original_destination>`\n    // for an explanation.\n    ORIGINAL_DST = 4;\n  }\n\n  // Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture\n  // overview section for information on each type.\n  enum LbPolicy {\n    reserved 4;\n\n    reserved \"ORIGINAL_DST_LB\";\n\n    // Refer to the :ref:`round robin load balancing\n    // policy<arch_overview_load_balancing_types_round_robin>`\n    // for an explanation.\n    ROUND_ROBIN = 0;\n\n    // Refer to the :ref:`least request load balancing\n    // policy<arch_overview_load_balancing_types_least_request>`\n    // for an explanation.\n    LEAST_REQUEST = 1;\n\n    // Refer to the :ref:`ring hash load balancing\n    // policy<arch_overview_load_balancing_types_ring_hash>`\n    // for an explanation.\n    RING_HASH = 2;\n\n    // Refer to the :ref:`random load balancing\n    // policy<arch_overview_load_balancing_types_random>`\n    // for an explanation.\n    RANDOM = 3;\n\n    // Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`\n    // for an explanation.\n    MAGLEV = 5;\n\n    // This load balancer type must be specified if the configured cluster provides a cluster\n    // specific load balancer. Consult the configured cluster's documentation for whether to set\n    // this option or not.\n    CLUSTER_PROVIDED = 6;\n\n    // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy\n    // <envoy_api_field_config.cluster.v4alpha.Cluster.load_balancing_policy>` field to determine the LB policy.\n    // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field\n    // and instead using the new load_balancing_policy field as the one and only mechanism for\n    // configuring this.]\n    LOAD_BALANCING_POLICY_CONFIG = 7;\n  }\n\n  // When V4_ONLY is selected, the DNS resolver will only perform a lookup for\n  // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will\n  // only perform a lookup for addresses in the IPv6 family. If AUTO is\n  // specified, the DNS resolver will first perform a lookup for addresses in\n  // the IPv6 family and fallback to a lookup for addresses in the IPv4 family.\n  // For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this setting is\n  // ignored.\n  enum DnsLookupFamily {\n    AUTO = 0;\n    V4_ONLY = 1;\n    V6_ONLY = 2;\n  }\n\n  enum ClusterProtocolSelection {\n    // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).\n    // If :ref:`http2_protocol_options <envoy_api_field_config.cluster.v4alpha.Cluster.http2_protocol_options>` are\n    // present, HTTP2 will be used, otherwise HTTP1.1 will be used.\n    USE_CONFIGURED_PROTOCOL = 0;\n\n    // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.\n    USE_DOWNSTREAM_PROTOCOL = 1;\n  }\n\n  // TransportSocketMatch specifies what transport socket config will be used\n  // when the match conditions are satisfied.\n  message TransportSocketMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.TransportSocketMatch\";\n\n    // The name of the match, used in stats generation.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Optional endpoint metadata match criteria.\n    // The connection to the endpoint with metadata matching what is set in this field\n    // will use the transport socket configuration specified here.\n    // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match\n    // against the values specified in this field.\n    google.protobuf.Struct match = 2;\n\n    // The configuration of the transport socket.\n    core.v4alpha.TransportSocket transport_socket = 3;\n  }\n\n  // Extended cluster type.\n  message CustomClusterType {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.CustomClusterType\";\n\n    // The type of the cluster to instantiate. The name must match a supported cluster type.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Cluster specific configuration which depends on the cluster being instantiated.\n    // See the supported cluster for further documentation.\n    google.protobuf.Any typed_config = 2;\n  }\n\n  // Only valid when discovery type is EDS.\n  message EdsClusterConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.EdsClusterConfig\";\n\n    // Configuration for the source of EDS updates for this Cluster.\n    core.v4alpha.ConfigSource eds_config = 1;\n\n    oneof name_specifier {\n      // Optional alternative to cluster name to present to EDS. This does not\n      // have the same restrictions as cluster name, i.e. it may be arbitrary\n      // length.\n      string service_name = 2;\n\n      // Resource locator for EDS. This is mutually exclusive to *service_name*.\n      // [#not-implemented-hide:]\n      udpa.core.v1.ResourceLocator eds_resource_locator = 3;\n    }\n  }\n\n  // Optionally divide the endpoints in this cluster into subsets defined by\n  // endpoint metadata and selected by route and weighted cluster metadata.\n  // [#next-free-field: 8]\n  message LbSubsetConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.LbSubsetConfig\";\n\n    // If NO_FALLBACK is selected, a result\n    // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,\n    // any cluster endpoint may be returned (subject to policy, health checks,\n    // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the\n    // endpoints matching the values from the default_subset field.\n    enum LbSubsetFallbackPolicy {\n      NO_FALLBACK = 0;\n      ANY_ENDPOINT = 1;\n      DEFAULT_SUBSET = 2;\n    }\n\n    // Specifications for subsets.\n    message LbSubsetSelector {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector\";\n\n      // Allows to override top level fallback policy per selector.\n      enum LbSubsetSelectorFallbackPolicy {\n        // If NOT_DEFINED top level config fallback policy is used instead.\n        NOT_DEFINED = 0;\n\n        // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.\n        NO_FALLBACK = 1;\n\n        // If ANY_ENDPOINT is selected, any cluster endpoint may be returned\n        // (subject to policy, health checks, etc).\n        ANY_ENDPOINT = 2;\n\n        // If DEFAULT_SUBSET is selected, load balancing is performed over the\n        // endpoints matching the values from the default_subset field.\n        DEFAULT_SUBSET = 3;\n\n        // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata\n        // keys reduced to\n        // :ref:`fallback_keys_subset<envoy_api_field_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_keys_subset>`.\n        // It allows for a fallback to a different, less specific selector if some of the keys of\n        // the selector are considered optional.\n        KEYS_SUBSET = 4;\n      }\n\n      // List of keys to match with the weighted cluster metadata.\n      repeated string keys = 1;\n\n      // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for\n      // choosing a host, but updating hosts is faster, especially for large numbers of hosts.\n      //\n      // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy.\n      //\n      // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains\n      // only one entry.\n      //\n      // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys`\n      // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge\n      // :ref:`lb_subsets_single_host_per_subset_duplicate<config_cluster_manager_cluster_stats_subset_lb>` indicates how many duplicates are\n      // present in the current configuration.\n      bool single_host_per_subset = 4;\n\n      // The behavior used when no endpoint subset matches the selected route's\n      // metadata.\n      LbSubsetSelectorFallbackPolicy fallback_policy = 2\n          [(validate.rules).enum = {defined_only: true}];\n\n      // Subset of\n      // :ref:`keys<envoy_api_field_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by\n      // :ref:`KEYS_SUBSET<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`\n      // fallback policy.\n      // It has to be a non empty list if KEYS_SUBSET fallback policy is selected.\n      // For any other fallback policy the parameter is not used and should not be set.\n      // Only values also present in\n      // :ref:`keys<envoy_api_field_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but\n      // `fallback_keys_subset` cannot be equal to `keys`.\n      repeated string fallback_keys_subset = 3;\n    }\n\n    // The behavior used when no endpoint subset matches the selected route's\n    // metadata. The value defaults to\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Specifies the default subset of endpoints used during fallback if\n    // fallback_policy is\n    // :ref:`DEFAULT_SUBSET<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.\n    // Each field in default_subset is\n    // compared to the matching LbEndpoint.Metadata under the *envoy.lb*\n    // namespace. It is valid for no hosts to match, in which case the behavior\n    // is the same as a fallback_policy of\n    // :ref:`NO_FALLBACK<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.\n    google.protobuf.Struct default_subset = 2;\n\n    // For each entry, LbEndpoint.Metadata's\n    // *envoy.lb* namespace is traversed and a subset is created for each unique\n    // combination of key and value. For example:\n    //\n    // .. code-block:: json\n    //\n    //   { \"subset_selectors\": [\n    //       { \"keys\": [ \"version\" ] },\n    //       { \"keys\": [ \"stage\", \"hardware_type\" ] }\n    //   ]}\n    //\n    // A subset is matched when the metadata from the selected route and\n    // weighted cluster contains the same keys and values as the subset's\n    // metadata. The same host may appear in multiple subsets.\n    repeated LbSubsetSelector subset_selectors = 3;\n\n    // If true, routing to subsets will take into account the localities and locality weights of the\n    // endpoints when making the routing decision.\n    //\n    // There are some potential pitfalls associated with enabling this feature, as the resulting\n    // traffic split after applying both a subset match and locality weights might be undesirable.\n    //\n    // Consider for example a situation in which you have 50/50 split across two localities X/Y\n    // which have 100 hosts each without subsetting. If the subset LB results in X having only 1\n    // host selected but Y having 100, then a lot more load is being dumped on the single host in X\n    // than originally anticipated in the load balancing assignment delivered via EDS.\n    bool locality_weight_aware = 4;\n\n    // When used with locality_weight_aware, scales the weight of each locality by the ratio\n    // of hosts in the subset vs hosts in the original subset. This aims to even out the load\n    // going to an individual locality if said locality is disproportionately affected by the\n    // subset predicate.\n    bool scale_locality_weight = 5;\n\n    // If true, when a fallback policy is configured and its corresponding subset fails to find\n    // a host this will cause any host to be selected instead.\n    //\n    // This is useful when using the default subset as the fallback policy, given the default\n    // subset might become empty. With this option enabled, if that happens the LB will attempt\n    // to select a host from the entire cluster.\n    bool panic_mode_any = 6;\n\n    // If true, metadata specified for a metadata key will be matched against the corresponding\n    // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value\n    // and any of the elements in the list matches the criteria.\n    bool list_as_any = 7;\n  }\n\n  // Specific configuration for the LeastRequest load balancing policy.\n  message LeastRequestLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.LeastRequestLbConfig\";\n\n    // The number of random healthy hosts from which the host with the fewest active requests will\n    // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.\n    google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];\n\n    // The following formula is used to calculate the dynamic weights when hosts have different load\n    // balancing weights:\n    //\n    // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`\n    //\n    // The larger the active request bias is, the more aggressively active requests will lower the\n    // effective weight when all host weights are not equal.\n    //\n    // `active_request_bias` must be greater than or equal to 0.0.\n    //\n    // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number\n    // of active requests at the time it picks a host and behaves like the Round Robin Load\n    // Balancer.\n    //\n    // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing\n    // weight by the number of active requests at the time it does a pick.\n    //\n    // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's\n    // host sets changes, e.g., whenever there is a host membership update or a host load balancing\n    // weight change.\n    //\n    // .. note::\n    //   This setting only takes effect if all host weights are not equal.\n    core.v4alpha.RuntimeDouble active_request_bias = 2;\n  }\n\n  // Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`\n  // load balancing policy.\n  message RingHashLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.RingHashLbConfig\";\n\n    // The hash function used to hash hosts onto the ketama ring.\n    enum HashFunction {\n      // Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.\n      XX_HASH = 0;\n\n      // Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with\n      // std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled\n      // on Linux and not macOS.\n      MURMUR_HASH_2 = 1;\n    }\n\n    reserved 2;\n\n    // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each\n    // provided host) the better the request distribution will reflect the desired weights. Defaults\n    // to 1024 entries, and limited to 8M entries. See also\n    // :ref:`maximum_ring_size<envoy_api_field_config.cluster.v4alpha.Cluster.RingHashLbConfig.maximum_ring_size>`.\n    google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}];\n\n    // The hash function used to hash hosts onto the ketama ring. The value defaults to\n    // :ref:`XX_HASH<envoy_api_enum_value_config.cluster.v4alpha.Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.\n    HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}];\n\n    // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered\n    // to further constrain resource use. See also\n    // :ref:`minimum_ring_size<envoy_api_field_config.cluster.v4alpha.Cluster.RingHashLbConfig.minimum_ring_size>`.\n    google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}];\n  }\n\n  // Specific configuration for the :ref:`Maglev<arch_overview_load_balancing_types_maglev>`\n  // load balancing policy.\n  message MaglevLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.MaglevLbConfig\";\n\n    // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee.\n    // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same\n    // upstream as it was before. Increasing the table size reduces the amount of disruption.\n    // The table size must be prime number. If it is not specified, the default is 65537.\n    google.protobuf.UInt64Value table_size = 1;\n  }\n\n  // Specific configuration for the\n  // :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`\n  // load balancing policy.\n  message OriginalDstLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.OriginalDstLbConfig\";\n\n    // When true, :ref:`x-envoy-original-dst-host\n    // <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination\n    // address.\n    //\n    // .. attention::\n    //\n    //   This header isn't sanitized by default, so enabling this feature allows HTTP clients to\n    //   route traffic to arbitrary hosts and/or ports, which may have serious security\n    //   consequences.\n    bool use_http_header = 1;\n  }\n\n  // Common configuration for all load balancer implementations.\n  // [#next-free-field: 8]\n  message CommonLbConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.CommonLbConfig\";\n\n    // Configuration for :ref:`zone aware routing\n    // <arch_overview_load_balancing_zone_aware_routing>`.\n    message ZoneAwareLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig\";\n\n      // Configures percentage of requests that will be considered for zone aware routing\n      // if zone aware routing is configured. If not specified, the default is 100%.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      type.v3.Percent routing_enabled = 1;\n\n      // Configures minimum upstream cluster size required for zone aware routing\n      // If upstream cluster size is less than specified, zone aware routing is not performed\n      // even if zone aware routing is configured. If not specified, the default is 6.\n      // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.\n      // * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.\n      google.protobuf.UInt64Value min_cluster_size = 2;\n\n      // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic\n      // mode<arch_overview_load_balancing_panic_threshold>`. Instead, the cluster will fail all\n      // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a\n      // failing service.\n      bool fail_traffic_on_panic = 3;\n    }\n\n    // Configuration for :ref:`locality weighted load balancing\n    // <arch_overview_load_balancing_locality_weighted_lb>`\n    message LocalityWeightedLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig\";\n    }\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    message ConsistentHashingLbConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig\";\n\n      // If set to `true`, the cluster will use hostname instead of the resolved\n      // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.\n      bool use_hostname_for_hashing = 1;\n\n      // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150\n      // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster.\n      // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200.\n      // Minimum is 100.\n      //\n      // Applies to both Ring Hash and Maglev load balancers.\n      //\n      // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified\n      // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests\n      // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing\n      // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify\n      // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the\n      // cascading overflow effect when choosing the next host in the ring/table).\n      //\n      // If weights are specified on the hosts, they are respected.\n      //\n      // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts\n      // being probed, so use a higher value if you require better performance.\n      google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}];\n    }\n\n    // Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.\n    // If not specified, the default is 50%.\n    // To disable panic mode, set to 0%.\n    //\n    // .. note::\n    //   The specified percent will be truncated to the nearest 1%.\n    type.v3.Percent healthy_panic_threshold = 1;\n\n    oneof locality_config_specifier {\n      ZoneAwareLbConfig zone_aware_lb_config = 2;\n\n      LocalityWeightedLbConfig locality_weighted_lb_config = 3;\n    }\n\n    // If set, all health check/weight/metadata updates that happen within this duration will be\n    // merged and delivered in one shot when the duration expires. The start of the duration is when\n    // the first update happens. This is useful for big clusters, with potentially noisy deploys\n    // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes\n    // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new\n    // cluster). Please always keep in mind that the use of sandbox technologies may change this\n    // behavior.\n    //\n    // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge\n    // window to 0.\n    //\n    // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is\n    // because merging those updates isn't currently safe. See\n    // https://github.com/envoyproxy/envoy/pull/3941.\n    google.protobuf.Duration update_merge_window = 4;\n\n    // If set to true, Envoy will not consider new hosts when computing load balancing weights until\n    // they have been health checked for the first time. This will have no effect unless\n    // active health checking is also configured.\n    //\n    // Ignoring a host means that for any load balancing calculations that adjust weights based\n    // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and\n    // panic mode) Envoy will exclude these hosts in the denominator.\n    //\n    // For example, with hosts in two priorities P0 and P1, where P0 looks like\n    // {healthy, unhealthy (new), unhealthy (new)}\n    // and where P1 looks like\n    // {healthy, healthy}\n    // all traffic will still hit P0, as 1 / (3 - 2) = 1.\n    //\n    // Enabling this will allow scaling up the number of hosts for a given cluster without entering\n    // panic mode or triggering priority spillover, assuming the hosts pass the first health check.\n    //\n    // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not\n    // contribute to the calculation when deciding whether panic mode is enabled or not.\n    bool ignore_new_hosts_until_first_hc = 5;\n\n    // If set to `true`, the cluster manager will drain all existing\n    // connections to upstream hosts whenever hosts are added or removed from the cluster.\n    bool close_connections_on_host_set_change = 6;\n\n    // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)\n    ConsistentHashingLbConfig consistent_hashing_lb_config = 7;\n  }\n\n  message RefreshRate {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.RefreshRate\";\n\n    // Specifies the base interval between refreshes. This parameter is required and must be greater\n    // than zero and less than\n    // :ref:`max_interval <envoy_api_field_config.cluster.v4alpha.Cluster.RefreshRate.max_interval>`.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {nanos: 1000000}\n    }];\n\n    // Specifies the maximum interval between refreshes. This parameter is optional, but must be\n    // greater than or equal to the\n    // :ref:`base_interval <envoy_api_field_config.cluster.v4alpha.Cluster.RefreshRate.base_interval>`  if set. The default\n    // is 10 times the :ref:`base_interval <envoy_api_field_config.cluster.v4alpha.Cluster.RefreshRate.base_interval>`.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}];\n  }\n\n  // [#not-implemented-hide:]\n  message PrefetchPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.Cluster.PrefetchPolicy\";\n\n    // Indicates how many streams (rounded up) can be anticipated per-upstream for each\n    // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching\n    // will only be done if the upstream is healthy.\n    //\n    // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be\n    // established, one for the new incoming stream, and one for a presumed follow-up stream. For\n    // HTTP/2, only one connection would be established by default as one connection can\n    // serve both the original and presumed follow-up stream.\n    //\n    // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100\n    // active streams, there would be 100 connections in use, and 50 connections prefetched.\n    // This might be a useful value for something like short lived single-use connections,\n    // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection\n    // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP\n    // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more\n    // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue\n    // in case of unexpected disconnects where the connection could not be reused.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight. This means in steady state if a connection is torn down,\n    // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be\n    // prefetched.\n    //\n    // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can\n    // harm latency more than the prefetching helps.\n    google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n\n    // Indicates how many many streams (rounded up) can be anticipated across a cluster for each\n    // stream, useful for low QPS services. This is currently supported for a subset of\n    // deterministic non-hash-based load-balancing algorithms (weighted round robin, random).\n    // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a\n    // cluster, doing best effort predictions of what upstream would be picked next and\n    // pre-establishing a connection.\n    //\n    // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first\n    // incoming stream, 2 connections will be prefetched - one to the first upstream for this\n    // cluster, one to the second on the assumption there will be a follow-up stream.\n    //\n    // Prefetching will be limited to one prefetch per configured upstream in the cluster.\n    //\n    // If this value is not set, or set explicitly to one, Envoy will fetch as many connections\n    // as needed to serve streams in flight, so during warm up and in steady state if a connection\n    // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for\n    // connection establishment.\n    //\n    // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met,\n    // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream.\n    // TODO(alyssawilk) per LB docs and LB overview docs when unhiding.\n    google.protobuf.DoubleValue predictive_prefetch_ratio = 2\n        [(validate.rules).double = {lte: 3.0 gte: 1.0}];\n  }\n\n  reserved 12, 15, 7, 11, 35;\n\n  reserved \"hosts\", \"tls_context\", \"extension_protocol_options\";\n\n  // Configuration to use different transport sockets for different endpoints.\n  // The entry of *envoy.transport_socket_match* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`\n  // is used to match against the transport sockets as they appear in the list. The first\n  // :ref:`match <envoy_api_msg_config.cluster.v4alpha.Cluster.TransportSocketMatch>` is used.\n  // For example, with the following match\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"enableMTLS\"\n  //    match:\n  //      acceptMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //  - name: \"defaultToPlaintext\"\n  //    match: {}\n  //    transport_socket:\n  //      name: envoy.transport_sockets.raw_buffer\n  //\n  // Connections to the endpoints whose metadata value under *envoy.transport_socket_match*\n  // having \"acceptMTLS\"/\"true\" key/value pair use the \"enableMTLS\" socket configuration.\n  //\n  // If a :ref:`socket match <envoy_api_msg_config.cluster.v4alpha.Cluster.TransportSocketMatch>` with empty match\n  // criteria is provided, that always match any endpoint. For example, the \"defaultToPlaintext\"\n  // socket match in case above.\n  //\n  // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any\n  // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or\n  // *transport_socket* specified in this cluster.\n  //\n  // This field allows gradual and flexible transport socket configuration changes.\n  //\n  // The metadata of endpoints in EDS can indicate transport socket capabilities. For example,\n  // an endpoint's metadata can have two key value pairs as \"acceptMTLS\": \"true\",\n  // \"acceptPlaintext\": \"true\". While some other endpoints, only accepting plaintext traffic\n  // has \"acceptPlaintext\": \"true\" metadata information.\n  //\n  // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS\n  // traffic for endpoints with \"acceptMTLS\": \"true\", by adding a corresponding\n  // *TransportSocketMatch* in this field. Other client Envoys receive CDS without\n  // *transport_socket_match* set, and still send plain text traffic to the same cluster.\n  //\n  // This field can be used to specify custom transport socket configurations for health\n  // checks by adding matching key/value pairs in a health check's\n  // :ref:`transport socket match criteria <envoy_api_field_config.core.v4alpha.HealthCheck.transport_socket_match_criteria>` field.\n  //\n  // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]\n  repeated TransportSocketMatch transport_socket_matches = 43;\n\n  // Supplies the name of the cluster which must be unique across all clusters.\n  // The cluster name is used when emitting\n  // :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name\n  // <envoy_api_field_config.cluster.v4alpha.Cluster.alt_stat_name>` is not provided.\n  // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // An optional alternative to the cluster name to be used while emitting stats.\n  // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be\n  // confused with :ref:`Router Filter Header\n  // <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.\n  string alt_stat_name = 28;\n\n  oneof cluster_discovery_type {\n    // The :ref:`service discovery type <arch_overview_service_discovery_types>`\n    // to use for resolving the cluster.\n    DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}];\n\n    // The custom cluster type.\n    CustomClusterType cluster_type = 38;\n  }\n\n  // Configuration to use for EDS updates for the Cluster.\n  EdsClusterConfig eds_cluster_config = 3;\n\n  // The timeout for new network connections to hosts in the cluster.\n  google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}];\n\n  // Soft limit on size of the cluster’s connections read and write buffers. If\n  // unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The :ref:`load balancer type <arch_overview_load_balancing_types>` to use\n  // when picking a host in the cluster.\n  // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>` when implemented.]\n  LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}];\n\n  // Setting this is required for specifying members of\n  // :ref:`STATIC<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STATIC>`,\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>` clusters.\n  // This field supersedes the *hosts* field in the v2 API.\n  //\n  // .. attention::\n  //\n  //   Setting this allows non-EDS cluster types to contain embedded EDS equivalent\n  //   :ref:`endpoint assignments<envoy_api_msg_config.endpoint.v3.ClusterLoadAssignment>`.\n  //\n  endpoint.v3.ClusterLoadAssignment load_assignment = 33;\n\n  // Optional :ref:`active health checking <arch_overview_health_checking>`\n  // configuration for the cluster. If no\n  // configuration is specified no health checking will be done and all cluster\n  // members will be considered healthy at all times.\n  repeated core.v4alpha.HealthCheck health_checks = 8;\n\n  // Optional maximum requests for a single upstream connection. This parameter\n  // is respected by both the HTTP/1.1 and HTTP/2 connection pool\n  // implementations. If not specified, there is no limit. Setting this\n  // parameter to 1 will effectively disable keep alive.\n  google.protobuf.UInt32Value max_requests_per_connection = 9;\n\n  // Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.\n  CircuitBreakers circuit_breakers = 10;\n\n  // HTTP protocol options that are applied only to upstream HTTP connections.\n  // These options apply to all HTTP versions.\n  core.v4alpha.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46;\n\n  // Additional options when handling HTTP requests upstream. These options will be applicable to\n  // both HTTP1 and HTTP2 requests.\n  core.v4alpha.HttpProtocolOptions common_http_protocol_options = 29;\n\n  // Additional options when handling HTTP1 requests.\n  core.v4alpha.Http1ProtocolOptions http_protocol_options = 13;\n\n  // Even if default HTTP2 protocol options are desired, this field must be\n  // set so that Envoy will assume that the upstream supports HTTP/2 when\n  // making new HTTP connection pool connections. Currently, Envoy only\n  // supports prior knowledge for upstream connections. Even if TLS is used\n  // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2\n  // connections to happen over plain text.\n  core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14\n      [(udpa.annotations.security).configure_for_untrusted_upstream = true];\n\n  // The extension_protocol_options field is used to provide extension-specific protocol options\n  // for upstream connections. The key should match the extension filter name, such as\n  // \"envoy.filters.network.thrift_proxy\". See the extension's documentation for details on\n  // specific options.\n  map<string, google.protobuf.Any> typed_extension_protocol_options = 36;\n\n  // If the DNS refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used as the cluster’s DNS refresh\n  // rate. The value configured must be at least 1ms. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  google.protobuf.Duration dns_refresh_rate = 16\n      [(validate.rules).duration = {gt {nanos: 1000000}}];\n\n  // If the DNS failure refresh rate is specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types\n  // other than :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>` and\n  // :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>` this setting is\n  // ignored.\n  RefreshRate dns_failure_refresh_rate = 44;\n\n  // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true,\n  // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS\n  // resolution.\n  bool respect_dns_ttl = 39;\n\n  // The DNS IP address resolution policy. If this setting is not specified, the\n  // value defaults to\n  // :ref:`AUTO<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DnsLookupFamily.AUTO>`.\n  DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}];\n\n  // If DNS resolvers are specified and the cluster type is either\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`,\n  // or :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`,\n  // this value is used to specify the cluster’s dns resolvers.\n  // If this setting is not specified, the value defaults to the default\n  // resolver, which uses /etc/resolv.conf for configuration. For cluster types\n  // other than\n  // :ref:`STRICT_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.STRICT_DNS>`\n  // and :ref:`LOGICAL_DNS<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.LOGICAL_DNS>`\n  // this setting is ignored.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple's API only allows overriding DNS resolvers via system settings.\n  repeated core.v4alpha.Address dns_resolvers = 18;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 45;\n\n  // If specified, outlier detection will be enabled for this upstream cluster.\n  // Each of the configuration values can be overridden via\n  // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.\n  OutlierDetection outlier_detection = 19;\n\n  // The interval for removing stale hosts from a cluster type\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.ORIGINAL_DST>`.\n  // Hosts are considered stale if they have not been used\n  // as upstream destinations during this interval. New hosts are added\n  // to original destination clusters on demand as new connections are\n  // redirected to Envoy, causing the number of hosts in the cluster to\n  // grow over time. Hosts that are not stale (they are actively used as\n  // destinations) are kept in the cluster, which allows connections to\n  // them remain open, saving the latency that would otherwise be spent\n  // on opening new connections. If this setting is not specified, the\n  // value defaults to 5000ms. For cluster types other than\n  // :ref:`ORIGINAL_DST<envoy_api_enum_value_config.cluster.v4alpha.Cluster.DiscoveryType.ORIGINAL_DST>`\n  // this setting is ignored.\n  google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}];\n\n  // Optional configuration used to bind newly established upstream connections.\n  // This overrides any bind_config specified in the bootstrap proto.\n  // If the address and port are empty, no bind will be performed.\n  core.v4alpha.BindConfig upstream_bind_config = 21;\n\n  // Configuration for load balancing subsetting.\n  LbSubsetConfig lb_subset_config = 22;\n\n  // Optional configuration for the load balancing algorithm selected by\n  // LbPolicy. Currently only\n  // :ref:`RING_HASH<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.RING_HASH>`,\n  // :ref:`MAGLEV<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.MAGLEV>` and\n  // :ref:`LEAST_REQUEST<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.LEAST_REQUEST>`\n  // has additional configuration options.\n  // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding\n  // LbPolicy will generate an error at runtime.\n  oneof lb_config {\n    // Optional configuration for the Ring Hash load balancing policy.\n    RingHashLbConfig ring_hash_lb_config = 23;\n\n    // Optional configuration for the Maglev load balancing policy.\n    MaglevLbConfig maglev_lb_config = 52;\n\n    // Optional configuration for the Original Destination load balancing policy.\n    OriginalDstLbConfig original_dst_lb_config = 34;\n\n    // Optional configuration for the LeastRequest load balancing policy.\n    LeastRequestLbConfig least_request_lb_config = 37;\n  }\n\n  // Common configuration for all load balancer implementations.\n  CommonLbConfig common_lb_config = 27;\n\n  // Optional custom transport socket implementation to use for upstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`UpstreamTlsContexts <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.UpstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v4alpha.TransportSocket transport_socket = 24;\n\n  // The Metadata field can be used to provide additional information about the\n  // cluster. It can be used for stats, logging, and varying filter behavior.\n  // Fields should use reverse DNS notation to denote which entity within Envoy\n  // will need the information. For instance, if the metadata is intended for\n  // the Router filter, the filter name should be specified as *envoy.filters.http.router*.\n  core.v4alpha.Metadata metadata = 25;\n\n  // Determines how Envoy selects the protocol used to speak to upstream hosts.\n  ClusterProtocolSelection protocol_selection = 26;\n\n  // Optional options for upstream connections.\n  UpstreamConnectionOptions upstream_connection_options = 30;\n\n  // If an upstream host becomes unhealthy (as determined by the configured health checks\n  // or outlier detection), immediately close all connections to the failed host.\n  //\n  // .. note::\n  //\n  //   This is currently only supported for connections created by tcp_proxy.\n  //\n  // .. note::\n  //\n  //   The current implementation of this feature closes all connections immediately when\n  //   the unhealthy status is detected. If there are a large number of connections open\n  //   to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of\n  //   time exclusively closing these connections, and not processing any other traffic.\n  bool close_connections_on_host_health_failure = 31;\n\n  // If set to true, Envoy will ignore the health value of a host when processing its removal\n  // from service discovery. This means that if active health checking is used, Envoy will *not*\n  // wait for the endpoint to go unhealthy before removing it.\n  bool ignore_health_on_host_removal = 32;\n\n  // An (optional) network filter chain, listed in the order the filters should be applied.\n  // The chain will be applied to all outgoing connections that Envoy makes to the upstream\n  // servers of this cluster.\n  repeated Filter filters = 40;\n\n  // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the\n  // :ref:`lb_policy<envoy_api_field_config.cluster.v4alpha.Cluster.lb_policy>` field has the value\n  // :ref:`LOAD_BALANCING_POLICY_CONFIG<envoy_api_enum_value_config.cluster.v4alpha.Cluster.LbPolicy.LOAD_BALANCING_POLICY_CONFIG>`.\n  LoadBalancingPolicy load_balancing_policy = 41;\n\n  // [#not-implemented-hide:]\n  // If present, tells the client where to send load reports via LRS. If not present, the\n  // client will fall back to a client-side default, which may be either (a) don't send any\n  // load reports or (b) send load reports for all clusters to a single default server\n  // (which may be configured in the bootstrap file).\n  //\n  // Note that if multiple clusters point to the same LRS server, the client may choose to\n  // create a separate stream for each cluster or it may choose to coalesce the data for\n  // multiple clusters onto a single stream. Either way, the client must make sure to send\n  // the data for any given cluster on no more than one stream.\n  //\n  // [#next-major-version: In the v3 API, we should consider restructuring this somehow,\n  // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation\n  // from the LRS stream here.]\n  core.v4alpha.ConfigSource lrs_server = 42;\n\n  // If track_timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  //\n  // .. attention::\n  //\n  //   This field has been deprecated in favor of `timeout_budgets`, part of\n  //   :ref:`track_cluster_stats <envoy_api_field_config.cluster.v4alpha.Cluster.track_cluster_stats>`.\n  bool hidden_envoy_deprecated_track_timeout_budgets = 47 [deprecated = true];\n\n  // Optional customization and configuration of upstream connection pool, and upstream type.\n  //\n  // Currently this field only applies for HTTP traffic but is designed for eventual use for custom\n  // TCP upstreams.\n  //\n  // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream\n  // HTTP, using the http connection pool and the codec from `http2_protocol_options`\n  //\n  // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT\n  // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool.\n  //\n  // The default pool used is the generic connection pool which creates the HTTP upstream for most\n  // HTTP requests, and the TCP upstream if CONNECT termination is configured.\n  //\n  // If users desire custom connection pool or upstream behavior, for example terminating\n  // CONNECT only if a custom filter indicates it is appropriate, the custom factories\n  // can be registered and configured here.\n  core.v4alpha.TypedExtensionConfig upstream_config = 48;\n\n  // Configuration to track optional cluster stats.\n  TrackClusterStats track_cluster_stats = 49;\n\n  // [#not-implemented-hide:]\n  // Prefetch configuration for this cluster.\n  PrefetchPolicy prefetch_policy = 50;\n\n  // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate\n  // connection pool for every downstream connection\n  bool connection_pool_per_downstream_connection = 51;\n}\n\n// [#not-implemented-hide:] Extensible load balancing policy configuration.\n//\n// Every LB policy defined via this mechanism will be identified via a unique name using reverse\n// DNS notation. If the policy needs configuration parameters, it must define a message for its\n// own configuration, which will be stored in the config field. The name of the policy will tell\n// clients which type of message they should expect to see in the config field.\n//\n// Note that there are cases where it is useful to be able to independently select LB policies\n// for choosing a locality and for choosing an endpoint within that locality. For example, a\n// given deployment may always use the same policy to choose the locality, but for choosing the\n// endpoint within the locality, some clusters may use weighted-round-robin, while others may\n// use some sort of session-based balancing.\n//\n// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a\n// child LB policy for each locality. For each request, the parent chooses the locality and then\n// delegates to the child policy for that locality to choose the endpoint within the locality.\n//\n// To facilitate this, the config message for the top-level LB policy may include a field of\n// type LoadBalancingPolicy that specifies the child policy.\nmessage LoadBalancingPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.LoadBalancingPolicy\";\n\n  message Policy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.cluster.v3.LoadBalancingPolicy.Policy\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // Required. The name of the LB policy.\n    string name = 1;\n\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Each client will iterate over the list in order and stop at the first policy that it\n  // supports. This provides a mechanism for starting to use new LB policies that are not yet\n  // supported by all clients.\n  repeated Policy policies = 1;\n}\n\n// An extensible structure containing the address Envoy should bind to when\n// establishing upstream connections.\nmessage UpstreamBindConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.UpstreamBindConfig\";\n\n  // The address Envoy should bind to when establishing upstream connections.\n  core.v4alpha.Address source_address = 1;\n}\n\nmessage UpstreamConnectionOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.UpstreamConnectionOptions\";\n\n  // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.\n  core.v4alpha.TcpKeepalive tcp_keepalive = 1;\n}\n\nmessage TrackClusterStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.TrackClusterStats\";\n\n  // If timeout_budgets is true, the :ref:`timeout budget histograms\n  // <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each\n  // request. These show what percentage of a request's per try and global timeout was used. A value\n  // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value\n  // of 100 would indicate that the request took the entirety of the timeout given to it.\n  bool timeout_budgets = 1;\n\n  // If request_response_sizes is true, then the :ref:`histograms\n  // <config_cluster_manager_cluster_stats_request_response_sizes>`  tracking header and body sizes\n  // of requests and responses will be published.\n  bool request_response_sizes = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"FilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Upstream filters]\n// Upstream filters apply to the connections to the upstream cluster hosts.\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.cluster.v3.Filter\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any typed_config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.cluster.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.cluster.v4alpha\";\noption java_outer_classname = \"OutlierDetectionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Outlier detection]\n\n// See the :ref:`architecture overview <arch_overview_outlier_detection>` for\n// more information on outlier detection.\n// [#next-free-field: 21]\nmessage OutlierDetection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.v3.OutlierDetection\";\n\n  // The number of consecutive 5xx responses or local origin errors that are mapped\n  // to 5xx error codes before a consecutive 5xx ejection\n  // occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_5xx = 1;\n\n  // The time interval between ejection analysis sweeps. This can result in\n  // both new ejections as well as hosts being returned to service. Defaults\n  // to 10000ms or 10s.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}];\n\n  // The base time that a host is ejected for. The real time is equal to the\n  // base time multiplied by the number of times the host has been ejected.\n  // Defaults to 30000ms or 30s.\n  google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];\n\n  // The maximum % of an upstream cluster that can be ejected due to outlier\n  // detection. Defaults to 10% but will eject at least one host regardless of the value.\n  google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive 5xx. This setting can be used to disable\n  // ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics. This setting can be used to\n  // disable ejection or to ramp it up slowly. Defaults to 100.\n  google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}];\n\n  // The number of hosts in a cluster that must have enough request volume to\n  // detect success rate outliers. If the number of hosts is less than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for any host in the cluster. Defaults to 5.\n  google.protobuf.UInt32Value success_rate_minimum_hosts = 7;\n\n  // The minimum number of total requests that must be collected in one\n  // interval (as defined by the interval duration above) to include this host\n  // in success rate based outlier detection. If the volume is lower than this\n  // setting, outlier detection via success rate statistics is not performed\n  // for that host. Defaults to 100.\n  google.protobuf.UInt32Value success_rate_request_volume = 8;\n\n  // This factor is used to determine the ejection threshold for success rate\n  // outlier ejection. The ejection threshold is the difference between the\n  // mean success rate, and the product of this factor and the standard\n  // deviation of the mean success rate: mean - (stdev *\n  // success_rate_stdev_factor). This factor is divided by a thousand to get a\n  // double. That is, if the desired factor is 1.9, the runtime value should\n  // be 1900. Defaults to 1900.\n  google.protobuf.UInt32Value success_rate_stdev_factor = 9;\n\n  // The number of consecutive gateway failures (502, 503, 504 status codes)\n  // before a consecutive gateway failure ejection occurs. Defaults to 5.\n  google.protobuf.UInt32Value consecutive_gateway_failure = 10;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive gateway failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // Determines whether to distinguish local origin failures from external errors. If set to true\n  // the following configuration parameters are taken into account:\n  // :ref:`consecutive_local_origin_failure<envoy_api_field_config.cluster.v4alpha.OutlierDetection.consecutive_local_origin_failure>`,\n  // :ref:`enforcing_consecutive_local_origin_failure<envoy_api_field_config.cluster.v4alpha.OutlierDetection.enforcing_consecutive_local_origin_failure>`\n  // and\n  // :ref:`enforcing_local_origin_success_rate<envoy_api_field_config.cluster.v4alpha.OutlierDetection.enforcing_local_origin_success_rate>`.\n  // Defaults to false.\n  bool split_external_local_origin_errors = 12;\n\n  // The number of consecutive locally originated failures before ejection\n  // occurs. Defaults to 5. Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value consecutive_local_origin_failure = 13;\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through consecutive locally originated failures. This setting can be\n  // used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status\n  // is detected through success rate statistics for locally originated errors.\n  // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.\n  // Parameter takes effect only when\n  // :ref:`split_external_local_origin_errors<envoy_api_field_config.cluster.v4alpha.OutlierDetection.split_external_local_origin_errors>`\n  // is set to true.\n  google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The failure percentage to use when determining failure percentage-based outlier detection. If\n  // the failure percentage of a given host is greater than or equal to this value, it will be\n  // ejected. Defaults to 85.\n  google.protobuf.UInt32Value failure_percentage_threshold = 16\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // failure percentage statistics. This setting can be used to disable ejection or to ramp it up\n  // slowly. Defaults to 0.\n  //\n  // [#next-major-version: setting this without setting failure_percentage_threshold should be\n  // invalid in v4.]\n  google.protobuf.UInt32Value enforcing_failure_percentage = 17\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The % chance that a host will be actually ejected when an outlier status is detected through\n  // local-origin failure percentage statistics. This setting can be used to disable ejection or to\n  // ramp it up slowly. Defaults to 0.\n  google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18\n      [(validate.rules).uint32 = {lte: 100}];\n\n  // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection.\n  // If the total number of hosts in the cluster is less than this value, failure percentage-based\n  // ejection will not be performed. Defaults to 5.\n  google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19;\n\n  // The minimum number of total requests that must be collected in one interval (as defined by the\n  // interval duration above) to perform failure percentage-based ejection for this host. If the\n  // volume is lower than this setting, failure percentage-based ejection will not be performed for\n  // this host. Defaults to 50.\n  google.protobuf.UInt32Value failure_percentage_request_volume = 20;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.dynamic_forward_proxy.v2alpha;\n\nimport \"envoy/api/v2/cluster.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha\";\noption java_outer_classname = \"DnsCacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.common.dynamic_forward_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamic forward proxy common configuration]\n\n// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#next-free-field: 7]\nmessage DnsCacheConfig {\n  // The name of the cache. Multiple named caches allow independent dynamic forward proxy\n  // configurations to operate within a single Envoy process using different configurations. All\n  // configurations with the same name *must* otherwise have the same settings when referenced\n  // from different configuration components. Configuration will fail to load if this is not\n  // the case.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The DNS lookup family to use during resolution.\n  //\n  // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 \"happy eyeballs\" mode. The\n  // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and\n  // then configures a host to have a primary and fall back address. With this, we could very\n  // likely build a \"happy eyeballs\" connection pool which would race the primary / fall back\n  // address and return the one that wins. This same method could potentially also be used for\n  // QUIC to TCP fall back.]\n  api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s.\n  //\n  // .. note:\n  //\n  //  The returned DNS TTL is not currently used to alter the refresh rate. This feature will be\n  //  added in a future change.\n  //\n  // .. note:\n  //\n  // The refresh rate is rounded to the closest millisecond, and must be at least 1ms.\n  google.protobuf.Duration dns_refresh_rate = 3\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n\n  // The TTL for hosts that are unused. Hosts that have not been used in the configured time\n  // interval will be purged. If not specified defaults to 5m.\n  //\n  // .. note:\n  //\n  //   The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This\n  //   means that if the configured TTL is shorter than the refresh rate the host may not be removed\n  //   immediately.\n  //\n  //  .. note:\n  //\n  //   The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage.\n  google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}];\n\n  // The maximum number of hosts that the cache will hold. If not specified defaults to 1024.\n  //\n  // .. note:\n  //\n  //   The implementation is approximate and enforced independently on each worker thread, thus\n  //   it is possible for the maximum hosts in the cache to go slightly above the configured\n  //   value depending on timing. This is similar to how other circuit breakers work.\n  google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}];\n\n  // If the DNS failure refresh rate is specified,\n  // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the dns_refresh_rate.\n  api.v2.Cluster.RefreshRate dns_failure_refresh_rate = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/matcher/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.matcher.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.matcher.v3\";\noption java_outer_classname = \"MatcherProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Unified Matcher API]\n\n// Match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  // HTTP headers to match.\n  repeated route.v3.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  message GenericTextMatch {\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/matcher/v3:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.matcher.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.matcher.v4alpha\";\noption java_outer_classname = \"MatcherProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Unified Matcher API]\n\n// Match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.matcher.v3.MatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.common.matcher.v3.MatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.matcher.v3.HttpHeadersMatch\";\n\n  // HTTP headers to match.\n  repeated route.v4alpha.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.matcher.v3.HttpGenericBodyMatch\";\n\n  message GenericTextMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch\";\n\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.common.tap.v2alpha;\n\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/service/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.common.tap.v2alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.common.tap.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common tap extension configuration]\n\n// Common configuration for all tap extensions.\nmessage CommonExtensionConfig {\n  // [#not-implemented-hide:]\n  message TapDSConfig {\n    // Configuration for the source of TapDS updates for this Cluster.\n    api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n    // Tap config to request from XDS server.\n    string name = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  oneof config_type {\n    option (validate.required) = true;\n\n    // If specified, the tap filter will be configured via an admin handler.\n    AdminConfig admin_config = 1;\n\n    // If specified, the tap filter will be configured via a static configuration that cannot be\n    // changed.\n    service.tap.v2alpha.TapConfig static_config = 2;\n\n    // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter.\n    TapDSConfig tapds_config = 3;\n  }\n}\n\n// Configuration for the admin handler. See :ref:`here <config_http_filters_tap_admin_handler>` for\n// more information.\nmessage AdminConfig {\n  // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is\n  // matched to the configured filter opaque ID to determine which filter to configure.\n  string config_id = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/address.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/socket_option.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"AddressProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Network addresses]\n\nmessage Pipe {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Pipe\";\n\n  // Unix Domain Socket path. On Linux, paths starting with '@' will use the\n  // abstract namespace. The starting '@' is replaced by a null byte by Envoy.\n  // Paths starting with '@' will result in an error in environments other than\n  // Linux.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The mode for the Pipe. Not applicable for abstract sockets.\n  uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];\n}\n\n// [#not-implemented-hide:] The address represents an envoy internal listener.\n// TODO(lambdai): Make this address available for listener and endpoint.\n// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.\nmessage EnvoyInternalAddress {\n  oneof address_name_specifier {\n    option (validate.required) = true;\n\n    // [#not-implemented-hide:] The :ref:`listener name <envoy_api_field_config.listener.v3.Listener.name>` of the destination internal listener.\n    string server_listener_name = 1;\n  }\n}\n\n// [#next-free-field: 7]\nmessage SocketAddress {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.SocketAddress\";\n\n  enum Protocol {\n    TCP = 0;\n    UDP = 1;\n  }\n\n  Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The address for this socket. :ref:`Listeners <config_listeners>` will bind\n  // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::``\n  // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:\n  // It is possible to distinguish a Listener address via the prefix/suffix matching\n  // in :ref:`FilterChainMatch <envoy_api_msg_config.listener.v3.FilterChainMatch>`.] When used\n  // within an upstream :ref:`BindConfig <envoy_api_msg_config.core.v3.BindConfig>`, the address\n  // controls the source address of outbound connections. For :ref:`clusters\n  // <envoy_api_msg_config.cluster.v3.Cluster>`, the cluster type determines whether the\n  // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS\n  // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized\n  // via :ref:`resolver_name <envoy_api_field_config.core.v3.SocketAddress.resolver_name>`.\n  string address = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof port_specifier {\n    option (validate.required) = true;\n\n    uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}];\n\n    // This is only valid if :ref:`resolver_name\n    // <envoy_api_field_config.core.v3.SocketAddress.resolver_name>` is specified below and the\n    // named resolver is capable of named port resolution.\n    string named_port = 4;\n  }\n\n  // The name of the custom resolver. This must have been registered with Envoy. If\n  // this is empty, a context dependent default applies. If the address is a concrete\n  // IP address, no resolution will occur. If address is a hostname this\n  // should be set for resolution other than DNS. Specifying a custom resolver with\n  // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime.\n  string resolver_name = 5;\n\n  // When binding to an IPv6 address above, this enables `IPv4 compatibility\n  // <https://tools.ietf.org/html/rfc3493#page-11>`_. Binding to ``::`` will\n  // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into\n  // IPv6 space as ``::FFFF:<IPv4-address>``.\n  bool ipv4_compat = 6;\n}\n\nmessage TcpKeepalive {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.TcpKeepalive\";\n\n  // Maximum number of keepalive probes to send without response before deciding\n  // the connection is dead. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 9.)\n  google.protobuf.UInt32Value keepalive_probes = 1;\n\n  // The number of seconds a connection needs to be idle before keep-alive probes\n  // start being sent. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 7200s (i.e., 2 hours.)\n  google.protobuf.UInt32Value keepalive_time = 2;\n\n  // The number of seconds between keep-alive probes. Default is to use the OS\n  // level configuration (unless overridden, Linux defaults to 75s.)\n  google.protobuf.UInt32Value keepalive_interval = 3;\n}\n\nmessage BindConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.BindConfig\";\n\n  // The address to bind to when creating a socket.\n  SocketAddress source_address = 1 [(validate.rules).message = {required: true}];\n\n  // Whether to set the *IP_FREEBIND* option when creating the socket. When this\n  // flag is set to true, allows the :ref:`source_address\n  // <envoy_api_field_config.cluster.v3.UpstreamBindConfig.source_address>` to be an IP address\n  // that is not configured on the system running Envoy. When this flag is set\n  // to false, the option *IP_FREEBIND* is disabled on the socket. When this\n  // flag is not set (default), the socket is not modified, i.e. the option is\n  // neither enabled nor disabled.\n  google.protobuf.BoolValue freebind = 2;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated SocketOption socket_options = 3;\n}\n\n// Addresses specify either a logical or physical address and port, which are\n// used to tell Envoy where to bind/listen, connect to upstream and find\n// management servers.\nmessage Address {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Address\";\n\n  oneof address {\n    option (validate.required) = true;\n\n    SocketAddress socket_address = 1;\n\n    Pipe pipe = 2;\n\n    // [#not-implemented-hide:]\n    EnvoyInternalAddress envoy_internal_address = 3;\n  }\n}\n\n// CidrRange specifies an IP Address and a prefix length to construct\n// the subnet mask for a `CIDR <https://tools.ietf.org/html/rfc4632>`_ range.\nmessage CidrRange {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.CidrRange\";\n\n  // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.\n  string address_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Length of prefix, e.g. 0, 32.\n  google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/backoff.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"BackoffProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Backoff Strategy]\n\n// Configuration defining a jittered exponential back off strategy.\nmessage BackoffStrategy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.BackoffStrategy\";\n\n  // The base interval to be used for the next back off computation. It should\n  // be greater than zero and less than or equal to :ref:`max_interval\n  // <envoy_api_field_config.core.v3.BackoffStrategy.max_interval>`.\n  google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // Specifies the maximum interval between retries. This parameter is optional,\n  // but must be greater than or equal to the :ref:`base_interval\n  // <envoy_api_field_config.core.v3.BackoffStrategy.base_interval>` if set. The default\n  // is 10 times the :ref:`base_interval\n  // <envoy_api_field_config.core.v3.BackoffStrategy.base_interval>`.\n  google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/base.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/backoff.proto\";\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/semantic_version.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"BaseProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common types]\n\n// Envoy supports :ref:`upstream priority routing\n// <arch_overview_http_routing_priority>` both at the route and the virtual\n// cluster level. The current priority implementation uses different connection\n// pool and circuit breaking settings for each priority level. This means that\n// even for HTTP/2 requests, two physical connections will be used to an\n// upstream host. In the future Envoy will likely support true HTTP/2 priority\n// over a single upstream connection.\nenum RoutingPriority {\n  DEFAULT = 0;\n  HIGH = 1;\n}\n\n// HTTP request method.\nenum RequestMethod {\n  METHOD_UNSPECIFIED = 0;\n  GET = 1;\n  HEAD = 2;\n  POST = 3;\n  PUT = 4;\n  DELETE = 5;\n  CONNECT = 6;\n  OPTIONS = 7;\n  TRACE = 8;\n  PATCH = 9;\n}\n\n// Identifies the direction of the traffic relative to the local Envoy.\nenum TrafficDirection {\n  // Default option is unspecified.\n  UNSPECIFIED = 0;\n\n  // The transport is used for incoming traffic.\n  INBOUND = 1;\n\n  // The transport is used for outgoing traffic.\n  OUTBOUND = 2;\n}\n\n// Identifies location of where either Envoy runs or where upstream hosts run.\nmessage Locality {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Locality\";\n\n  // Region this :ref:`zone <envoy_api_field_config.core.v3.Locality.zone>` belongs to.\n  string region = 1;\n\n  // Defines the local service zone where Envoy is running. Though optional, it\n  // should be set if discovery service routing is used and the discovery\n  // service exposes :ref:`zone data <envoy_api_field_config.endpoint.v3.LocalityLbEndpoints.locality>`,\n  // either in this message or via :option:`--service-zone`. The meaning of zone\n  // is context dependent, e.g. `Availability Zone (AZ)\n  // <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html>`_\n  // on AWS, `Zone <https://cloud.google.com/compute/docs/regions-zones/>`_ on\n  // GCP, etc.\n  string zone = 2;\n\n  // When used for locality of upstream hosts, this field further splits zone\n  // into smaller chunks of sub-zones so they can be load balanced\n  // independently.\n  string sub_zone = 3;\n}\n\n// BuildVersion combines SemVer version of extension with free-form build information\n// (i.e. 'alpha', 'private-build') as a set of strings.\nmessage BuildVersion {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.BuildVersion\";\n\n  // SemVer version of extension.\n  type.v3.SemanticVersion version = 1;\n\n  // Free-form build information.\n  // Envoy defines several well known keys in the source/common/version/version.h file\n  google.protobuf.Struct metadata = 2;\n}\n\n// Version and identification for an Envoy extension.\n// [#next-free-field: 6]\nmessage Extension {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Extension\";\n\n  // This is the name of the Envoy filter as specified in the Envoy\n  // configuration, e.g. envoy.filters.http.router, com.acme.widget.\n  string name = 1;\n\n  // Category of the extension.\n  // Extension category names use reverse DNS notation. For instance \"envoy.filters.listener\"\n  // for Envoy's built-in listener filters or \"com.acme.filters.http\" for HTTP filters from\n  // acme.com vendor.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]\n  string category = 2;\n\n  // [#not-implemented-hide:] Type descriptor of extension configuration proto.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]\n  // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]\n  string type_descriptor = 3;\n\n  // The version is a property of the extension and maintained independently\n  // of other extensions and the Envoy API.\n  // This field is not set when extension did not provide version information.\n  BuildVersion version = 4;\n\n  // Indicates that the extension is present but was disabled via dynamic configuration.\n  bool disabled = 5;\n}\n\n// Identifies a specific Envoy instance. The node identifier is presented to the\n// management server, which may use this identifier to distinguish per Envoy\n// configuration for serving.\n// [#next-free-field: 12]\nmessage Node {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Node\";\n\n  // An opaque node identifier for the Envoy node. This also provides the local\n  // service node name. It should be set if any of the following features are\n  // used: :ref:`statsd <arch_overview_statistics>`, :ref:`CDS\n  // <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-node`.\n  string id = 1;\n\n  // Defines the local service cluster name where Envoy is running. Though\n  // optional, it should be set if any of the following features are used:\n  // :ref:`statsd <arch_overview_statistics>`, :ref:`health check cluster\n  // verification\n  // <envoy_api_field_config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher>`,\n  // :ref:`runtime override directory <envoy_api_msg_config.bootstrap.v3.Runtime>`,\n  // :ref:`user agent addition\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent>`,\n  // :ref:`HTTP global rate limiting <config_http_filters_rate_limit>`,\n  // :ref:`CDS <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-cluster`.\n  string cluster = 2;\n\n  // Opaque metadata extending the node identifier. Envoy will pass this\n  // directly to the management server.\n  google.protobuf.Struct metadata = 3;\n\n  // Locality specifying where the Envoy instance is running.\n  Locality locality = 4;\n\n  // Free-form string that identifies the entity requesting config.\n  // E.g. \"envoy\" or \"grpc\"\n  string user_agent_name = 6;\n\n  oneof user_agent_version_type {\n    // Free-form string that identifies the version of the entity requesting config.\n    // E.g. \"1.12.2\" or \"abcd1234\", or \"SpecialEnvoyBuild\"\n    string user_agent_version = 7;\n\n    // Structured version of the entity requesting config.\n    BuildVersion user_agent_build_version = 8;\n  }\n\n  // List of extensions and their versions supported by the node.\n  repeated Extension extensions = 9;\n\n  // Client feature support list. These are well known features described\n  // in the Envoy API repository for a given major version of an API. Client features\n  // use reverse DNS naming scheme, for example `com.acme.feature`.\n  // See :ref:`the list of features <client_features>` that xDS client may\n  // support.\n  repeated string client_features = 10;\n\n  // Known listening ports on the node as a generic hint to the management server\n  // for filtering :ref:`listeners <config_listeners>` to be returned. For example,\n  // if there is a listener bound to port 80, the list can optionally contain the\n  // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint.\n  repeated Address listening_addresses = 11 [deprecated = true];\n\n  string hidden_envoy_deprecated_build_version = 5 [deprecated = true];\n}\n\n// Metadata provides additional inputs to filters based on matched listeners,\n// filter chains, routes and endpoints. It is structured as a map, usually from\n// filter name (in reverse DNS format) to metadata specific to the filter. Metadata\n// key-values for a filter are merged as connection and request handling occurs,\n// with later values for the same key overriding earlier values.\n//\n// An example use of metadata is providing additional values to\n// http_connection_manager in the envoy.http_connection_manager.access_log\n// namespace.\n//\n// Another example use of metadata is to per service config info in cluster metadata, which may get\n// consumed by multiple filters.\n//\n// For load balancing, Metadata provides a means to subset cluster endpoints.\n// Endpoints have a Metadata object associated and routes contain a Metadata\n// object to match against. There are some well defined metadata used today for\n// this purpose:\n//\n// * ``{\"envoy.lb\": {\"canary\": <bool> }}`` This indicates the canary status of an\n//   endpoint and is also used during header processing\n//   (x-envoy-upstream-canary) and for stats purposes.\n// [#next-major-version: move to type/metadata/v2]\nmessage Metadata {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.Metadata\";\n\n  // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*\n  // namespace is reserved for Envoy's built-in filters.\n  map<string, google.protobuf.Struct> filter_metadata = 1;\n}\n\n// Runtime derived uint32 with a default when not specified.\nmessage RuntimeUInt32 {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RuntimeUInt32\";\n\n  // Default value if runtime value is not available.\n  uint32 default_value = 2;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 3 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived percentage with a default when not specified.\nmessage RuntimePercent {\n  // Default value if runtime value is not available.\n  type.v3.Percent default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived double with a default when not specified.\nmessage RuntimeDouble {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RuntimeDouble\";\n\n  // Default value if runtime value is not available.\n  double default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived bool with a default when not specified.\nmessage RuntimeFeatureFlag {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.RuntimeFeatureFlag\";\n\n  // Default value if runtime value is not available.\n  google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key to get value for comparison. This value is used if defined. The boolean value must\n  // be represented via its\n  // `canonical JSON encoding <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Header name/value pair.\nmessage HeaderValue {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HeaderValue\";\n\n  // Header name.\n  string key = 1\n      [(validate.rules).string =\n           {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Header value.\n  //\n  // The same :ref:`format specifier <config_access_log_format>` as used for\n  // :ref:`HTTP access logging <config_access_log>` applies here, however\n  // unknown header values are replaced with the empty string instead of `-`.\n  string value = 2 [\n    (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}\n  ];\n}\n\n// Header name/value pair plus option to control append behavior.\nmessage HeaderValueOption {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.HeaderValueOption\";\n\n  // Header name/value pair that this option applies to.\n  HeaderValue header = 1 [(validate.rules).message = {required: true}];\n\n  // Should the value be appended? If true (default), the value is appended to\n  // existing values. Otherwise it replaces any existing values.\n  google.protobuf.BoolValue append = 2;\n}\n\n// Wrapper for a set of headers.\nmessage HeaderMap {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HeaderMap\";\n\n  repeated HeaderValue headers = 1;\n}\n\n// Data source consisting of either a file or an inline value.\nmessage DataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.DataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local filesystem data source.\n    string filename = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Bytes inlined in the configuration.\n    bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];\n\n    // String inlined in the configuration.\n    string inline_string = 3 [(validate.rules).string = {min_len: 1}];\n  }\n}\n\n// The message specifies the retry policy of remote data source when fetching fails.\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RetryPolicy\";\n\n  // Specifies parameters that control :ref:`retry backoff strategy <envoy_api_msg_config.core.v3.BackoffStrategy>`.\n  // This parameter is optional, in which case the default base interval is 1000 milliseconds. The\n  // default maximum interval is 10 times the base interval.\n  BackoffStrategy retry_back_off = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1.\n  google.protobuf.UInt32Value num_retries = 2\n      [(udpa.annotations.field_migrate).rename = \"max_retries\"];\n}\n\n// The message specifies how to fetch data from remote and how to verify it.\nmessage RemoteDataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.RemoteDataSource\";\n\n  // The HTTP URI to fetch the remote data.\n  HttpUri http_uri = 1 [(validate.rules).message = {required: true}];\n\n  // SHA256 string for verifying data.\n  string sha256 = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Retry policy for fetching remote data.\n  RetryPolicy retry_policy = 3;\n}\n\n// Async data source which support async data fetch.\nmessage AsyncDataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.AsyncDataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local async data source.\n    DataSource local = 1;\n\n    // Remote async data source.\n    RemoteDataSource remote = 2;\n  }\n}\n\n// Configuration for transport socket in :ref:`listeners <config_listeners>` and\n// :ref:`clusters <envoy_api_msg_config.cluster.v3.Cluster>`. If the configuration is\n// empty, a default transport socket implementation and configuration will be\n// chosen based on the platform and existence of tls_context.\nmessage TransportSocket {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.TransportSocket\";\n\n  // The name of the transport socket to instantiate. The name must match a supported transport\n  // socket implementation.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Implementation specific configuration which depends on the implementation being instantiated.\n  // See the supported transport socket implementations for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n\n// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not\n// specified via a runtime key.\n//\n// .. note::\n//\n//   Parsing of the runtime key's data is implemented such that it may be represented as a\n//   :ref:`FractionalPercent <envoy_api_msg_type.v3.FractionalPercent>` proto represented as JSON/YAML\n//   and may also be represented as an integer with the assumption that the value is an integral\n//   percentage out of 100. For instance, a runtime key lookup returning the value \"42\" would parse\n//   as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.\nmessage RuntimeFractionalPercent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.RuntimeFractionalPercent\";\n\n  // Default value if the runtime value's for the numerator/denominator keys are not available.\n  type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key for a YAML representation of a FractionalPercent.\n  string runtime_key = 2;\n}\n\n// Identifies a specific ControlPlane instance that Envoy is connected to.\nmessage ControlPlane {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.ControlPlane\";\n\n  // An opaque control plane identifier that uniquely identifies an instance\n  // of control plane. This can be used to identify which control plane instance,\n  // the Envoy is connected to.\n  string identifier = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/config_source.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/authority.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ConfigSourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Configuration sources]\n\n// xDS API and non-xDS services version. This is used to describe both resource and transport\n// protocol versions (in distinct configuration fields).\nenum ApiVersion {\n  // When not specified, we assume v2, to ease migration to Envoy's stable API\n  // versioning. If a client does not support v2 (e.g. due to deprecation), this\n  // is an invalid value.\n  AUTO = 0;\n\n  // Use xDS v2 API.\n  V2 = 1;\n\n  // Use xDS v3 API.\n  V3 = 2;\n}\n\n// API configuration source. This identifies the API type and cluster that Envoy\n// will use to fetch an xDS API.\n// [#next-free-field: 9]\nmessage ApiConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.ApiConfigSource\";\n\n  // APIs may be fetched via either REST or gRPC.\n  enum ApiType {\n    // Ideally this would be 'reserved 0' but one can't reserve the default\n    // value. Instead we throw an exception if this is ever used.\n    hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // REST-JSON v2 API. The `canonical JSON encoding\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_ for\n    // the v2 protos is used.\n    REST = 1;\n\n    // SotW gRPC service.\n    GRPC = 2;\n\n    // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}\n    // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state\n    // with every update, the xDS server only sends what has changed since the last update.\n    DELTA_GRPC = 3;\n\n    // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_GRPC = 5;\n\n    // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_DELTA_GRPC = 6;\n  }\n\n  // API type (gRPC, REST, delta gRPC)\n  ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}];\n\n  // Cluster names should be used only with REST. If > 1\n  // cluster is defined, clusters will be cycled through if any kind of failure\n  // occurs.\n  //\n  // .. note::\n  //\n  //  The cluster with name ``cluster_name`` must be statically defined and its\n  //  type must not be ``EDS``.\n  repeated string cluster_names = 2;\n\n  // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,\n  // services will be cycled through if any kind of failure occurs.\n  repeated GrpcService grpc_services = 4;\n\n  // For REST APIs, the delay between successive polls.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // For REST APIs, the request timeout. If not set, a default value of 1s will be used.\n  google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}];\n\n  // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be\n  // rate limited.\n  RateLimitSettings rate_limit_settings = 6;\n\n  // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.\n  bool set_node_on_first_message_only = 7;\n}\n\n// Aggregated Discovery Service (ADS) options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v3.ConfigSource>` can be used to\n// specify that ADS is to be used.\nmessage AggregatedConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.AggregatedConfigSource\";\n}\n\n// [#not-implemented-hide:]\n// Self-referencing config source options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v3.ConfigSource>` can be used to\n// specify that other data can be obtained from the same server.\nmessage SelfConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.SelfConfigSource\";\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Rate Limit settings to be applied for discovery requests made by Envoy.\nmessage RateLimitSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.RateLimitSettings\";\n\n  // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a\n  // default value of 100 will be used.\n  google.protobuf.UInt32Value max_tokens = 1;\n\n  // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens\n  // per second will be used.\n  google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];\n}\n\n// Configuration for :ref:`listeners <config_listeners>`, :ref:`clusters\n// <config_cluster_manager>`, :ref:`routes\n// <envoy_api_msg_config.route.v3.RouteConfiguration>`, :ref:`endpoints\n// <arch_overview_service_discovery>` etc. may either be sourced from the\n// filesystem or from an xDS API source. Filesystem configs are watched with\n// inotify for updates.\n// [#next-free-field: 8]\nmessage ConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.ConfigSource\";\n\n  // Authorities that this config source may be used for. An authority specified\n  // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior\n  // to configuration fetch. This field provides the association between\n  // authority name and configuration source.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.Authority authorities = 7;\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Path on the filesystem to source and watch for configuration updates.\n    // When sourcing configuration for :ref:`secret <envoy_api_msg_extensions.transport_sockets.tls.v3.Secret>`,\n    // the certificate and key files are also watched for updates.\n    //\n    // .. note::\n    //\n    //  The path to the source must exist at config load time.\n    //\n    // .. note::\n    //\n    //   Envoy will only watch the file path for *moves.* This is because in general only moves\n    //   are atomic. The same method of swapping files as is demonstrated in the\n    //   :ref:`runtime documentation <config_runtime_symbolic_link_swap>` can be used here also.\n    string path = 1;\n\n    // API configuration source.\n    ApiConfigSource api_config_source = 2;\n\n    // When set, ADS will be used to fetch resources. The ADS API configuration\n    // source in the bootstrap configuration is used.\n    AggregatedConfigSource ads = 3;\n\n    // [#not-implemented-hide:]\n    // When set, the client will access the resources from the same server it got the\n    // ConfigSource from, although not necessarily from the same stream. This is similar to the\n    // :ref:`ads<envoy_api_field.ConfigSource.ads>` field, except that the client may use a\n    // different stream to the same server. As a result, this field can be used for things\n    // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)\n    // LDS to RDS on the same server without requiring the management server to know its name\n    // or required credentials.\n    // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since\n    // this field can implicitly mean to use the same stream in the case where the ConfigSource\n    // is provided via ADS and the specified data can also be obtained via ADS.]\n    SelfConfigSource self = 5;\n  }\n\n  // When this timeout is specified, Envoy will wait no longer than the specified time for first\n  // config response on this xDS subscription during the :ref:`initialization process\n  // <arch_overview_initialization>`. After reaching the timeout, Envoy will move to the next\n  // initialization phase, even if the first config is not delivered yet. The timer is activated\n  // when the xDS API subscription starts, and is disarmed on first config update or on error. 0\n  // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another\n  // timeout applies). The default is 15s.\n  google.protobuf.Duration initial_fetch_timeout = 4;\n\n  // API version for xDS resources. This implies the type URLs that the client\n  // will request for resources and the resource type that the client will in\n  // turn expect to be delivered.\n  ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/event_service_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"EventServiceConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#not-implemented-hide:]\n// Configuration of the event reporting service endpoint.\nmessage EventServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.EventServiceConfig\";\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Specifies the gRPC service that hosts the event reporting service.\n    GrpcService grpc_service = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/extension.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ExtensionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Extension configuration]\n\n// Message type for extension configuration.\n// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.].\nmessage TypedExtensionConfig {\n  // The name of an extension. This is not used to select the extension, instead\n  // it serves the role of an opaque identifier.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The typed config for the extension. The type URL will be used to identify\n  // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*,\n  // the inner type URL of *TypedStruct* will be utilized. See the\n  // :ref:`extension configuration overview\n  // <config_overview_extension_configuration>` for further details.\n  google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}];\n}\n\n// Configuration source specifier for a late-bound extension configuration. The\n// parent resource is warmed until all the initial extension configurations are\n// received, unless the flag to apply the default configuration is set.\n// Subsequent extension updates are atomic on a per-worker basis. Once an\n// extension configuration is applied to a request or a connection, it remains\n// constant for the duration of processing. If the initial delivery of the\n// extension configuration fails, due to a timeout for example, the optional\n// default configuration is applied. Without a default configuration, the\n// extension is disabled, until an extension configuration is received. The\n// behavior of a disabled extension depends on the context. For example, a\n// filter chain with a disabled extension filter rejects all incoming streams.\nmessage ExtensionConfigSource {\n  ConfigSource config_source = 1 [(validate.rules).any = {required: true}];\n\n  // Optional default configuration to use as the initial configuration if\n  // there is a failure to receive the initial extension configuration or if\n  // `apply_default_config_without_warming` flag is set.\n  google.protobuf.Any default_config = 2;\n\n  // Use the default config as the initial configuration without warming and\n  // waiting for the first discovery response. Requires the default configuration\n  // to be supplied.\n  bool apply_default_config_without_warming = 3;\n\n  // A set of permitted extension type URLs. Extension configuration updates are rejected\n  // if they do not match any type URL in the set.\n  repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"GrpcMethodListProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC method list]\n\n// A list of gRPC methods which can be used as an allowlist, for example.\nmessage GrpcMethodList {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.GrpcMethodList\";\n\n  message Service {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.GrpcMethodList.Service\";\n\n    // The name of the gRPC service.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The names of the gRPC methods in this service.\n    repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  repeated Service services = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/grpc_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"GrpcServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC services]\n\n// gRPC service configuration. This is used by :ref:`ApiConfigSource\n// <envoy_api_msg_config.core.v3.ApiConfigSource>` and filter configurations.\n// [#next-free-field: 6]\nmessage GrpcService {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.GrpcService\";\n\n  message EnvoyGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.GrpcService.EnvoyGrpc\";\n\n    // The name of the upstream gRPC cluster. SSL credentials will be supplied\n    // in the :ref:`Cluster <envoy_api_msg_config.cluster.v3.Cluster>` :ref:`transport_socket\n    // <envoy_api_field_config.cluster.v3.Cluster.transport_socket>`.\n    string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`.\n    // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.\n    string authority = 2\n        [(validate.rules).string =\n             {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // [#next-free-field: 9]\n  message GoogleGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.GrpcService.GoogleGrpc\";\n\n    // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.\n    message SslCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials\";\n\n      // PEM encoded server root certificates.\n      DataSource root_certs = 1;\n\n      // PEM encoded client private key.\n      DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n      // PEM encoded client certificate chain.\n      DataSource cert_chain = 3;\n    }\n\n    // Local channel credentials. Only UDS is supported for now.\n    // See https://github.com/grpc/grpc/pull/15909.\n    message GoogleLocalCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials\";\n    }\n\n    // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call\n    // credential types.\n    message ChannelCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials\";\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        SslCredentials ssl_credentials = 1;\n\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_default = 2;\n\n        GoogleLocalCredentials local_credentials = 3;\n      }\n    }\n\n    // [#next-free-field: 8]\n    message CallCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials\";\n\n      message ServiceAccountJWTAccessCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"ServiceAccountJWTAccessCredentials\";\n\n        string json_key = 1;\n\n        uint64 token_lifetime_seconds = 2;\n      }\n\n      message GoogleIAMCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials\";\n\n        string authorization_token = 1;\n\n        string authority_selector = 2;\n      }\n\n      message MetadataCredentialsFromPlugin {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"MetadataCredentialsFromPlugin\";\n\n        string name = 1;\n\n        oneof config_type {\n          google.protobuf.Any typed_config = 3;\n\n          google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n        }\n      }\n\n      // Security token service configuration that allows Google gRPC to\n      // fetch security token from an OAuth 2.0 authorization server.\n      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and\n      // https://github.com/grpc/grpc/pull/19587.\n      // [#next-free-field: 10]\n      message StsService {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService\";\n\n        // URI of the token exchange service that handles token exchange requests.\n        // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by\n        // https://github.com/envoyproxy/protoc-gen-validate/issues/303]\n        string token_exchange_service_uri = 1;\n\n        // Location of the target service or resource where the client\n        // intends to use the requested security token.\n        string resource = 2;\n\n        // Logical name of the target service where the client intends to\n        // use the requested security token.\n        string audience = 3;\n\n        // The desired scope of the requested security token in the\n        // context of the service or resource where the token will be used.\n        string scope = 4;\n\n        // Type of the requested security token.\n        string requested_token_type = 5;\n\n        // The path of subject token, a security token that represents the\n        // identity of the party on behalf of whom the request is being made.\n        string subject_token_path = 6 [(validate.rules).string = {min_len: 1}];\n\n        // Type of the subject token.\n        string subject_token_type = 7 [(validate.rules).string = {min_len: 1}];\n\n        // The path of actor token, a security token that represents the identity\n        // of the acting party. The acting party is authorized to use the\n        // requested security token and act on behalf of the subject.\n        string actor_token_path = 8;\n\n        // Type of the actor token.\n        string actor_token_type = 9;\n      }\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        // Access token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.\n        string access_token = 1;\n\n        // Google Compute Engine credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_compute_engine = 2;\n\n        // Google refresh token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.\n        string google_refresh_token = 3;\n\n        // Service Account JWT Access credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.\n        ServiceAccountJWTAccessCredentials service_account_jwt_access = 4;\n\n        // Google IAM credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.\n        GoogleIAMCredentials google_iam = 5;\n\n        // Custom authenticator credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.\n        // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.\n        MetadataCredentialsFromPlugin from_plugin = 6;\n\n        // Custom security token service which implements OAuth 2.0 token exchange.\n        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16\n        // See https://github.com/grpc/grpc/pull/19587.\n        StsService sts_service = 7;\n      }\n    }\n\n    // Channel arguments.\n    message ChannelArgs {\n      message Value {\n        // Pointer values are not supported, since they don't make any sense when\n        // delivered via the API.\n        oneof value_specifier {\n          option (validate.required) = true;\n\n          string string_value = 1;\n\n          int64 int_value = 2;\n        }\n      }\n\n      // See grpc_types.h GRPC_ARG #defines for keys that work here.\n      map<string, Value> args = 1;\n    }\n\n    // The target URI when using the `Google C++ gRPC client\n    // <https://github.com/grpc/grpc>`_. SSL credentials will be supplied in\n    // :ref:`channel_credentials <envoy_api_field_config.core.v3.GrpcService.GoogleGrpc.channel_credentials>`.\n    string target_uri = 1 [(validate.rules).string = {min_len: 1}];\n\n    ChannelCredentials channel_credentials = 2;\n\n    // A set of call credentials that can be composed with `channel credentials\n    // <https://grpc.io/docs/guides/auth.html#credential-types>`_.\n    repeated CallCredentials call_credentials = 3;\n\n    // The human readable prefix to use when emitting statistics for the gRPC\n    // service.\n    //\n    // .. csv-table::\n    //    :header: Name, Type, Description\n    //    :widths: 1, 1, 2\n    //\n    //    streams_total, Counter, Total number of streams opened\n    //    streams_closed_<gRPC status code>, Counter, Total streams closed with <gRPC status code>\n    string stat_prefix = 4 [(validate.rules).string = {min_len: 1}];\n\n    // The name of the Google gRPC credentials factory to use. This must have been registered with\n    // Envoy. If this is empty, a default credentials factory will be used that sets up channel\n    // credentials based on other configuration parameters.\n    string credentials_factory_name = 5;\n\n    // Additional configuration for site-specific customizations of the Google\n    // gRPC library.\n    google.protobuf.Struct config = 6;\n\n    // How many bytes each stream can buffer internally.\n    // If not set an implementation defined default is applied (1MiB).\n    google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7;\n\n    // Custom channels args.\n    ChannelArgs channel_args = 8;\n  }\n\n  reserved 4;\n\n  oneof target_specifier {\n    option (validate.required) = true;\n\n    // Envoy's in-built gRPC client.\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    EnvoyGrpc envoy_grpc = 1;\n\n    // `Google C++ gRPC client <https://github.com/grpc/grpc>`_\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    GoogleGrpc google_grpc = 2;\n  }\n\n  // The timeout for the gRPC request. This is the timeout for a specific\n  // request.\n  google.protobuf.Duration timeout = 3;\n\n  // Additional metadata to include in streams initiated to the GrpcService.\n  // This can be used for scenarios in which additional ad hoc authorization\n  // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.\n  repeated HeaderValue initial_metadata = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/event_service_config.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/v3/http.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health check]\n// * Health checking :ref:`architecture overview <arch_overview_health_checking>`.\n// * If health checking is configured for a cluster, additional statistics are emitted. They are\n//   documented :ref:`here <config_cluster_manager_cluster_stats>`.\n\n// Endpoint health status.\nenum HealthStatus {\n  // The health status is not known. This is interpreted by Envoy as *HEALTHY*.\n  UNKNOWN = 0;\n\n  // Healthy.\n  HEALTHY = 1;\n\n  // Unhealthy.\n  UNHEALTHY = 2;\n\n  // Connection draining in progress. E.g.,\n  // `<https://aws.amazon.com/blogs/aws/elb-connection-draining-remove-instances-from-service-with-care/>`_\n  // or\n  // `<https://cloud.google.com/compute/docs/load-balancing/enabling-connection-draining>`_.\n  // This is interpreted by Envoy as *UNHEALTHY*.\n  DRAINING = 3;\n\n  // Health check timed out. This is part of HDS and is interpreted by Envoy as\n  // *UNHEALTHY*.\n  TIMEOUT = 4;\n\n  // Degraded.\n  DEGRADED = 5;\n}\n\n// [#next-free-field: 24]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HealthCheck\";\n\n  // Describes the encoding of the payload bytes in the payload.\n  message Payload {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.Payload\";\n\n    oneof payload {\n      option (validate.required) = true;\n\n      // Hex encoded payload. E.g., \"000000FF\".\n      string text = 1 [(validate.rules).string = {min_len: 1}];\n\n      // [#not-implemented-hide:] Binary payload.\n      bytes binary = 2;\n    }\n  }\n\n  // [#next-free-field: 12]\n  message HttpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.HttpHealthCheck\";\n\n    // The value of the host header in the HTTP health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The host header can be customized for a specific endpoint by setting the\n    // :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Specifies the HTTP path that will be requested during health checking. For example\n    // */healthcheck*.\n    string path = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // [#not-implemented-hide:] HTTP specific payload.\n    Payload send = 3;\n\n    // [#not-implemented-hide:] HTTP specific response.\n    Payload receive = 4;\n\n    // Specifies a list of HTTP headers that should be added to each request that is sent to the\n    // health checked cluster. For more information, including details on header value syntax, see\n    // the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated HeaderValueOption request_headers_to_add = 6\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request that is sent to the\n    // health checked cluster.\n    repeated string request_headers_to_remove = 8 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default\n    // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open\n    // semantics of :ref:`Int64Range <envoy_api_msg_type.v3.Int64Range>`. The start and end of each\n    // range are required. Only statuses in the range [100, 600) are allowed.\n    repeated type.v3.Int64Range expected_statuses = 9;\n\n    // Use specified application protocol for health checks.\n    type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}];\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster using a :ref:`StringMatcher\n    // <envoy_api_msg_type.matcher.v3.StringMatcher>`. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    type.matcher.v3.StringMatcher service_name_matcher = 11;\n\n    string hidden_envoy_deprecated_service_name = 5 [deprecated = true];\n\n    bool hidden_envoy_deprecated_use_http2 = 7\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n  }\n\n  message TcpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.TcpHealthCheck\";\n\n    // Empty payloads imply a connect-only health check.\n    Payload send = 1;\n\n    // When checking the response, “fuzzy” matching is performed such that each\n    // binary block must be found, and in the order specified, but not\n    // necessarily contiguous.\n    repeated Payload receive = 2;\n  }\n\n  message RedisHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.RedisHealthCheck\";\n\n    // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n    // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n    // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n    // by setting the specified key to any value and waiting for traffic to drain.\n    string key = 1;\n  }\n\n  // `grpc.health.v1.Health\n  // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto>`_-based\n  // healthcheck. See `gRPC doc <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_\n  // for details.\n  message GrpcHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.GrpcHealthCheck\";\n\n    // An optional service name parameter which will be sent to gRPC service in\n    // `grpc.health.v1.HealthCheckRequest\n    // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto#L20>`_.\n    // message. See `gRPC health-checking overview\n    // <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_ for more information.\n    string service_name = 1;\n\n    // The value of the :authority header in the gRPC health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The authority header can be customized for a specific endpoint by setting\n    // the :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string authority = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Custom health check.\n  message CustomHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.CustomHealthCheck\";\n\n    // The registered name of the custom health checker.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // A custom health checker specific configuration which depends on the custom health checker\n    // being instantiated. See :api:`envoy/config/health_checker` for reference.\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n\n      google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n    }\n  }\n\n  // Health checks occur over the transport socket specified for the cluster. This implies that if a\n  // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.\n  //\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  message TlsOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.HealthCheck.TlsOptions\";\n\n    // Specifies the ALPN protocols for health check connections. This is useful if the\n    // corresponding upstream is using ALPN-based :ref:`FilterChainMatch\n    // <envoy_api_msg_config.listener.v3.FilterChainMatch>` along with different protocols for health checks\n    // versus data connections. If empty, no ALPN protocols will be set on health check connections.\n    repeated string alpn_protocols = 1;\n  }\n\n  reserved 10;\n\n  // The time to wait for a health check response. If the timeout is reached the\n  // health check attempt will be considered a failure.\n  google.protobuf.Duration timeout = 1 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // The interval between health checks.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // An optional jitter amount in milliseconds. If specified, Envoy will start health\n  // checking after for a random time in ms between 0 and initial_jitter. This only\n  // applies to the first health check.\n  google.protobuf.Duration initial_jitter = 20;\n\n  // An optional jitter amount in milliseconds. If specified, during every\n  // interval Envoy will add interval_jitter to the wait time.\n  google.protobuf.Duration interval_jitter = 3;\n\n  // An optional jitter amount as a percentage of interval_ms. If specified,\n  // during every interval Envoy will add interval_ms *\n  // interval_jitter_percent / 100 to the wait time.\n  //\n  // If interval_jitter_ms and interval_jitter_percent are both set, both of\n  // them will be used to increase the wait time.\n  uint32 interval_jitter_percent = 18;\n\n  // The number of unhealthy health checks required before a host is marked\n  // unhealthy. Note that for *http* health checking if a host responds with 503\n  // this threshold is ignored and the host is considered unhealthy immediately.\n  google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}];\n\n  // The number of healthy health checks required before a host is marked\n  // healthy. Note that during startup, only a single successful health check is\n  // required to mark a host healthy.\n  google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Non-serving port for health checking.\n  google.protobuf.UInt32Value alt_port = 6;\n\n  // Reuse health check connection between health checks. Default is true.\n  google.protobuf.BoolValue reuse_connection = 7;\n\n  oneof health_checker {\n    option (validate.required) = true;\n\n    // HTTP health check.\n    HttpHealthCheck http_health_check = 8;\n\n    // TCP health check.\n    TcpHealthCheck tcp_health_check = 9;\n\n    // gRPC health check.\n    GrpcHealthCheck grpc_health_check = 11;\n\n    // Custom health check.\n    CustomHealthCheck custom_health_check = 13;\n  }\n\n  // The \"no traffic interval\" is a special health check interval that is used when a cluster has\n  // never had traffic routed to it. This lower interval allows cluster information to be kept up to\n  // date, without sending a potentially large amount of active health checking traffic for no\n  // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the\n  // standard health check interval that is defined. Note that this interval takes precedence over\n  // any other.\n  //\n  // The default value for \"no traffic interval\" is 60 seconds.\n  google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy interval\" is a health check interval that is used for hosts that are marked as\n  // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the\n  // standard health check interval that is defined.\n  //\n  // The default value for \"unhealthy interval\" is the same as \"interval\".\n  google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as unhealthy. For subsequent health checks\n  // Envoy will shift back to using either \"unhealthy interval\" if present or the standard health\n  // check interval that is defined.\n  //\n  // The default value for \"unhealthy edge interval\" is the same as \"unhealthy interval\".\n  google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}];\n\n  // The \"healthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as healthy. For subsequent health checks\n  // Envoy will shift back to using the standard health check interval that is defined.\n  //\n  // The default value for \"healthy edge interval\" is the same as the default interval.\n  google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];\n\n  // Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.\n  // If empty, no event log will be written.\n  string event_log_path = 17;\n\n  // [#not-implemented-hide:]\n  // The gRPC service for the health check event service.\n  // If empty, health check events won't be sent to a remote endpoint.\n  EventServiceConfig event_service = 22;\n\n  // If set to true, health check failure events will always be logged. If set to false, only the\n  // initial health check failure event will be logged.\n  // The default value is false.\n  bool always_log_health_check_failures = 19;\n\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  TlsOptions tls_options = 21;\n\n  // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's\n  // :ref:`tranport socket matches <envoy_api_field_config.cluster.v3.Cluster.transport_socket_matches>`.\n  // For example, the following match criteria\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_match_criteria:\n  //    useMTLS: true\n  //\n  // Will match the following :ref:`cluster socket match <envoy_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>`\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"useMTLS\"\n  //    match:\n  //      useMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //\n  // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`.\n  // This allows using different transport socket capabilities for health checking versus proxying to the\n  // endpoint.\n  //\n  // If the key/values pairs specified do not match any\n  // :ref:`transport socket matches <envoy_api_field_config.cluster.v3.Cluster.transport_socket_matches>`,\n  // the cluster's :ref:`transport socket <envoy_api_field_config.cluster.v3.Cluster.transport_socket>`\n  // will be used for health check socket configuration.\n  google.protobuf.Struct transport_socket_match_criteria = 23;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/http_uri.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"HttpUriProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP Service URI ]\n\n// Envoy external URI descriptor\nmessage HttpUri {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.HttpUri\";\n\n  // The HTTP server URI. It should be a full FQDN with protocol, host and path.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    uri: https://www.googleapis.com/oauth2/v1/certs\n  //\n  string uri = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specify how `uri` is to be fetched. Today, this requires an explicit\n  // cluster, but in the future we may support dynamic cluster creation or\n  // inline DNS resolution. See `issue\n  // <https://github.com/envoyproxy/envoy/issues/1606>`_.\n  oneof http_upstream_type {\n    option (validate.required) = true;\n\n    // A cluster is created in the Envoy \"cluster_manager\" config\n    // section. This field specifies the cluster name.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    cluster: jwks_cluster\n    //\n    string cluster = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Sets the maximum duration in milliseconds that a response can take to arrive upon request.\n  google.protobuf.Duration timeout = 3 [(validate.rules).duration = {\n    required: true\n    gte {}\n  }];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Protocol options]\n\n// [#not-implemented-hide:]\nmessage TcpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.TcpProtocolOptions\";\n}\n\nmessage UpstreamHttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.UpstreamHttpProtocolOptions\";\n\n  // Set transport socket `SNI <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ for new\n  // upstream connections based on the downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  bool auto_sni = 1;\n\n  // Automatic validate upstream presented certificate for new upstream connections based on the\n  // downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  // This field is intended to set with `auto_sni` field.\n  bool auto_san_validation = 2;\n}\n\n// [#next-free-field: 6]\nmessage HttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.HttpProtocolOptions\";\n\n  // Action to take when Envoy receives client request with header names containing underscore\n  // characters.\n  // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented\n  // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore\n  // characters.\n  enum HeadersWithUnderscoresAction {\n    // Allow headers with underscores. This is the default behavior.\n    ALLOW = 0;\n\n    // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests\n    // end with the stream reset. The \"httpN.requests_rejected_with_underscores_in_headers\" counter\n    // is incremented for each rejected request.\n    REJECT_REQUEST = 1;\n\n    // Drop the header with name containing underscores. The header is dropped before the filter chain is\n    // invoked and as such filters will not see dropped headers. The\n    // \"httpN.dropped_headers_with_underscores\" is incremented for each dropped header.\n    DROP_HEADER = 2;\n  }\n\n  // The idle timeout for connections. The idle timeout is defined as the\n  // period in which there are no active requests. When the\n  // idle timeout is reached the connection will be closed. If the connection is an HTTP/2\n  // downstream connection a drain sequence will occur prior to closing the connection, see\n  // :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout>`.\n  // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.\n  // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 1;\n\n  // The maximum duration of a connection. The duration is defined as a period since a connection\n  // was established. If not set, there is no max duration. When max_connection_duration is reached\n  // the connection will be closed. Drain sequence will occur prior to closing the connection if\n  // if's applicable. See :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout>`.\n  // Note: not implemented for upstream connections.\n  google.protobuf.Duration max_connection_duration = 3;\n\n  // The maximum number of headers. If unconfigured, the default\n  // maximum number of request headers allowed is 100. Requests that exceed this limit will receive\n  // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.\n  google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}];\n\n  // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be\n  // reset independent of any other timeouts. If not specified, this value is not set.\n  google.protobuf.Duration max_stream_duration = 4;\n\n  // Action to take when a client request with a header name containing underscore characters is received.\n  // If this setting is not specified, the value defaults to ALLOW.\n  // Note: upstream responses are not affected by this setting.\n  HeadersWithUnderscoresAction headers_with_underscores_action = 5;\n}\n\n// [#next-free-field: 8]\nmessage Http1ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.Http1ProtocolOptions\";\n\n  message HeaderKeyFormat {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat\";\n\n    message ProperCaseWords {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords\";\n    }\n\n    oneof header_format {\n      option (validate.required) = true;\n\n      // Formats the header by proper casing words: the first character and any character following\n      // a special character will be capitalized if it's an alpha character. For example,\n      // \"content-type\" becomes \"Content-Type\", and \"foo$b#$are\" becomes \"Foo$B#$Are\".\n      // Note that while this results in most headers following conventional casing, certain headers\n      // are not covered. For example, the \"TE\" header will be formatted as \"Te\".\n      ProperCaseWords proper_case_words = 1;\n    }\n  }\n\n  // Handle HTTP requests with absolute URLs in the requests. These requests\n  // are generally sent by clients to forward/explicit proxies. This allows clients to configure\n  // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the\n  // *http_proxy* environment variable.\n  google.protobuf.BoolValue allow_absolute_url = 1;\n\n  // Handle incoming HTTP/1.0 and HTTP 0.9 requests.\n  // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1\n  // style connect logic, dechunking, and handling lack of client host iff\n  // *default_host_for_http_10* is configured.\n  bool accept_http_10 = 2;\n\n  // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as\n  // Envoy does not otherwise support HTTP/1.0 without a Host header.\n  // This is a no-op if *accept_http_10* is not true.\n  string default_host_for_http_10 = 3;\n\n  // Describes how the keys for response headers should be formatted. By default, all header keys\n  // are lower cased.\n  HeaderKeyFormat header_key_format = 4;\n\n  // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.\n  //\n  // .. attention::\n  //\n  //   Note that this only happens when Envoy is chunk encoding which occurs when:\n  //   - The request is HTTP/1.1.\n  //   - Is neither a HEAD only request nor a HTTP Upgrade.\n  //   - Not a response to a HEAD request.\n  //   - The content length header is not present.\n  bool enable_trailers = 5;\n\n  // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding`\n  // headers set. By default such messages are rejected, but if option is enabled - Envoy will\n  // remove Content-Length header and process message.\n  // See `RFC7230, sec. 3.3.3 <https://tools.ietf.org/html/rfc7230#section-3.3.3>` for details.\n  //\n  // .. attention::\n  //   Enabling this option might lead to request smuggling vulnerability, especially if traffic\n  //   is proxied via multiple layers of proxies.\n  bool allow_chunked_length = 6;\n\n  // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate\n  // HTTP/1.1 connections upon receiving an invalid HTTP message. However,\n  // when this option is true, then Envoy will leave the HTTP/1.1 connection\n  // open where possible.\n  // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7;\n}\n\nmessage KeepaliveSettings {\n  // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.\n  google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // How long to wait for a response to a keepalive PING. If a response is not received within this\n  // time period, the connection will be aborted.\n  google.protobuf.Duration timeout = 2 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // A random jitter amount as a percentage of interval that will be added to each interval.\n  // A value of zero means there will be no jitter.\n  // The default value is 15%.\n  type.v3.Percent interval_jitter = 3;\n}\n\n// [#next-free-field: 16]\nmessage Http2ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.Http2ProtocolOptions\";\n\n  // Defines a parameter to be sent in the SETTINGS frame.\n  // See `RFC7540, sec. 6.5.1 <https://tools.ietf.org/html/rfc7540#section-6.5.1>`_ for details.\n  message SettingsParameter {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter\";\n\n    // The 16 bit parameter identifier.\n    google.protobuf.UInt32Value identifier = 1 [\n      (validate.rules).uint32 = {lte: 65535 gte: 0},\n      (validate.rules).message = {required: true}\n    ];\n\n    // The 32 bit parameter value.\n    google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_\n  // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values\n  // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header\n  // compression.\n  google.protobuf.UInt32Value hpack_table_size = 1;\n\n  // `Maximum concurrent streams <https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2>`_\n  // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)\n  // and defaults to 2147483647.\n  //\n  // For upstream connections, this also limits how many streams Envoy will initiate concurrently\n  // on a single connection. If the limit is reached, Envoy may queue requests or establish\n  // additional connections (as allowed per circuit breaker limits).\n  google.protobuf.UInt32Value max_concurrent_streams = 2\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 1}];\n\n  // `Initial stream-level flow-control window\n  // <https://httpwg.org/specs/rfc7540.html#rfc.section.6.9.2>`_ size. Valid values range from 65535\n  // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456\n  // (256 * 1024 * 1024).\n  //\n  // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default\n  // window size now, so it's also the minimum.\n  //\n  // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the\n  // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to\n  // stop the flow of data to the codec buffers.\n  google.protobuf.UInt32Value initial_stream_window_size = 3\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Similar to *initial_stream_window_size*, but for connection-level flow-control\n  // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.\n  google.protobuf.UInt32Value initial_connection_window_size = 4\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Allows proxying Websocket and other upgrades over H2 connect.\n  bool allow_connect = 5;\n\n  // [#not-implemented-hide:] Hiding until envoy has full metadata support.\n  // Still under implementation. DO NOT USE.\n  //\n  // Allows metadata. See [metadata\n  // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more\n  // information.\n  bool allow_metadata = 6;\n\n  // Limit the number of pending outbound downstream frames of all types (frames that are waiting to\n  // be written into the socket). Exceeding this limit triggers flood mitigation and connection is\n  // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due\n  // to flood mitigation. The default limit is 10000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,\n  // preventing high memory utilization when receiving continuous stream of these frames. Exceeding\n  // this limit triggers flood mitigation and connection is terminated. The\n  // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood\n  // mitigation. The default limit is 1000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an\n  // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but\n  // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``\n  // stat tracks the number of connections terminated due to flood mitigation.\n  // Setting this to 0 will terminate connection upon receiving first frame with an empty payload\n  // and no end stream flag. The default limit is 1.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9;\n\n  // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number\n  // of PRIORITY frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     max_inbound_priority_frames_per_stream * (1 + inbound_streams)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 100.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10;\n\n  // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number\n  // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     1 + 2 * (inbound_streams +\n  //              max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 10.\n  // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,\n  // but more complex implementations that try to estimate available bandwidth require at least 2.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11\n      [(validate.rules).uint32 = {gte: 1}];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n  // iff present.\n  //\n  // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>`\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  bool stream_error_on_invalid_http_messaging = 12 [deprecated = true];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14;\n\n  // [#not-implemented-hide:]\n  // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:\n  //\n  // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by\n  // Envoy.\n  //\n  // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field\n  // 'allow_connect'.\n  //\n  // Note that custom parameters specified through this field can not also be set in the\n  // corresponding named parameters:\n  //\n  // .. code-block:: text\n  //\n  //   ID    Field Name\n  //   ----------------\n  //   0x1   hpack_table_size\n  //   0x3   max_concurrent_streams\n  //   0x4   initial_stream_window_size\n  //\n  // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies\n  // between custom parameters with the same identifier will trigger a failure.\n  //\n  // See `IANA HTTP/2 Settings\n  // <https://www.iana.org/assignments/http2-parameters/http2-parameters.xhtml#settings>`_ for\n  // standardized identifiers.\n  repeated SettingsParameter custom_settings_parameters = 13;\n\n  // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer\n  // does not respond within the configured timeout, the connection will be aborted.\n  KeepaliveSettings connection_keepalive = 15;\n}\n\n// [#not-implemented-hide:]\nmessage GrpcProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.core.GrpcProtocolOptions\";\n\n  Http2ProtocolOptions http2_protocol_options = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Proxy Protocol]\n\nmessage ProxyProtocolConfig {\n  enum Version {\n    // PROXY protocol version 1. Human readable format.\n    V1 = 0;\n\n    // PROXY protocol version 2. Binary format.\n    V2 = 1;\n  }\n\n  // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details\n  Version version = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/socket_option.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"SocketOptionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Socket Option ]\n\n// Generic socket option message. This would be used to set socket options that\n// might not exist in upstream kernels or precompiled Envoy binaries.\n// [#next-free-field: 7]\nmessage SocketOption {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.core.SocketOption\";\n\n  enum SocketState {\n    // Socket options are applied after socket creation but before binding the socket to a port\n    STATE_PREBIND = 0;\n\n    // Socket options are applied after binding the socket to a port but before calling listen()\n    STATE_BOUND = 1;\n\n    // Socket options are applied after calling listen()\n    STATE_LISTENING = 2;\n  }\n\n  // An optional name to give this socket option for debugging, etc.\n  // Uniqueness is not required and no special meaning is assumed.\n  string description = 1;\n\n  // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP\n  int64 level = 2;\n\n  // The numeric name as passed to setsockopt\n  int64 name = 3;\n\n  oneof value {\n    option (validate.required) = true;\n\n    // Because many sockopts take an int value.\n    int64 int_value = 4;\n\n    // Otherwise it's a byte buffer.\n    bytes buf_value = 5;\n  }\n\n  // The state in which the option will be applied. When used in BindConfig\n  // STATE_PREBIND is currently the only valid value.\n  SocketState state = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v3;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v3\";\noption java_outer_classname = \"SubstitutionFormatStringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Substitution format string]\n\n// Configuration to use multiple :ref:`command operators <config_access_log_command_operators>`\n// to generate a new string in either plain text or JSON format.\nmessage SubstitutionFormatString {\n  oneof format {\n    option (validate.required) = true;\n\n    // Specify a format with command operators to form a text string.\n    // Its details is described in :ref:`format string<config_access_log_format_strings>`.\n    //\n    // For example, setting ``text_format`` like below,\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n    //\n    // generates plain text similar to:\n    //\n    // .. code-block:: text\n    //\n    //   upstream connect error:503:path=/foo\n    //\n    string text_format = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Specify a format with command operators to form a JSON string.\n    // Its details is described in :ref:`format dictionary<config_access_log_format_dictionaries>`.\n    // Values are rendered as strings, numbers, or boolean values as appropriate.\n    // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).\n    // See the documentation for a specific command operator for details.\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   json_format:\n    //     status: \"%RESPONSE_CODE%\"\n    //     message: \"%LOCAL_REPLY_BODY%\"\n    //\n    // The following JSON object would be created:\n    //\n    // .. code-block:: json\n    //\n    //  {\n    //    \"status\": 500,\n    //    \"message\": \"My error message\"\n    //  }\n    //\n    google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // If set to true, when command operators are evaluated to null,\n  //\n  // * for ``text_format``, the output of the empty operator is changed from ``-`` to an\n  //   empty string, so that empty values are omitted entirely.\n  // * for ``json_format`` the keys with null values are omitted in the output structure.\n  bool omit_empty_values = 3;\n\n  // Specify a *content_type* field.\n  // If this field is not set then ``text/plain`` is used for *text_format* and\n  // ``application/json`` is used for *json_format*.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   content_type: \"text/html; charset=UTF-8\"\n  //\n  string content_type = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/address.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/socket_option.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"AddressProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Network addresses]\n\nmessage Pipe {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Pipe\";\n\n  // Unix Domain Socket path. On Linux, paths starting with '@' will use the\n  // abstract namespace. The starting '@' is replaced by a null byte by Envoy.\n  // Paths starting with '@' will result in an error in environments other than\n  // Linux.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The mode for the Pipe. Not applicable for abstract sockets.\n  uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];\n}\n\n// [#not-implemented-hide:] The address represents an envoy internal listener.\n// TODO(lambdai): Make this address available for listener and endpoint.\n// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.\nmessage EnvoyInternalAddress {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.EnvoyInternalAddress\";\n\n  oneof address_name_specifier {\n    option (validate.required) = true;\n\n    // [#not-implemented-hide:] The :ref:`listener name <envoy_api_field_config.listener.v4alpha.Listener.name>` of the destination internal listener.\n    string server_listener_name = 1;\n  }\n}\n\n// [#next-free-field: 7]\nmessage SocketAddress {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.SocketAddress\";\n\n  enum Protocol {\n    TCP = 0;\n    UDP = 1;\n  }\n\n  Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The address for this socket. :ref:`Listeners <config_listeners>` will bind\n  // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::``\n  // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:\n  // It is possible to distinguish a Listener address via the prefix/suffix matching\n  // in :ref:`FilterChainMatch <envoy_api_msg_config.listener.v4alpha.FilterChainMatch>`.] When used\n  // within an upstream :ref:`BindConfig <envoy_api_msg_config.core.v4alpha.BindConfig>`, the address\n  // controls the source address of outbound connections. For :ref:`clusters\n  // <envoy_api_msg_config.cluster.v4alpha.Cluster>`, the cluster type determines whether the\n  // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS\n  // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized\n  // via :ref:`resolver_name <envoy_api_field_config.core.v4alpha.SocketAddress.resolver_name>`.\n  string address = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof port_specifier {\n    option (validate.required) = true;\n\n    uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}];\n\n    // This is only valid if :ref:`resolver_name\n    // <envoy_api_field_config.core.v4alpha.SocketAddress.resolver_name>` is specified below and the\n    // named resolver is capable of named port resolution.\n    string named_port = 4;\n  }\n\n  // The name of the custom resolver. This must have been registered with Envoy. If\n  // this is empty, a context dependent default applies. If the address is a concrete\n  // IP address, no resolution will occur. If address is a hostname this\n  // should be set for resolution other than DNS. Specifying a custom resolver with\n  // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime.\n  string resolver_name = 5;\n\n  // When binding to an IPv6 address above, this enables `IPv4 compatibility\n  // <https://tools.ietf.org/html/rfc3493#page-11>`_. Binding to ``::`` will\n  // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into\n  // IPv6 space as ``::FFFF:<IPv4-address>``.\n  bool ipv4_compat = 6;\n}\n\nmessage TcpKeepalive {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.TcpKeepalive\";\n\n  // Maximum number of keepalive probes to send without response before deciding\n  // the connection is dead. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 9.)\n  google.protobuf.UInt32Value keepalive_probes = 1;\n\n  // The number of seconds a connection needs to be idle before keep-alive probes\n  // start being sent. Default is to use the OS level configuration (unless\n  // overridden, Linux defaults to 7200s (i.e., 2 hours.)\n  google.protobuf.UInt32Value keepalive_time = 2;\n\n  // The number of seconds between keep-alive probes. Default is to use the OS\n  // level configuration (unless overridden, Linux defaults to 75s.)\n  google.protobuf.UInt32Value keepalive_interval = 3;\n}\n\nmessage BindConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.BindConfig\";\n\n  // The address to bind to when creating a socket.\n  SocketAddress source_address = 1 [(validate.rules).message = {required: true}];\n\n  // Whether to set the *IP_FREEBIND* option when creating the socket. When this\n  // flag is set to true, allows the :ref:`source_address\n  // <envoy_api_field_config.cluster.v4alpha.UpstreamBindConfig.source_address>` to be an IP address\n  // that is not configured on the system running Envoy. When this flag is set\n  // to false, the option *IP_FREEBIND* is disabled on the socket. When this\n  // flag is not set (default), the socket is not modified, i.e. the option is\n  // neither enabled nor disabled.\n  google.protobuf.BoolValue freebind = 2;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated SocketOption socket_options = 3;\n}\n\n// Addresses specify either a logical or physical address and port, which are\n// used to tell Envoy where to bind/listen, connect to upstream and find\n// management servers.\nmessage Address {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Address\";\n\n  oneof address {\n    option (validate.required) = true;\n\n    SocketAddress socket_address = 1;\n\n    Pipe pipe = 2;\n\n    // [#not-implemented-hide:]\n    EnvoyInternalAddress envoy_internal_address = 3;\n  }\n}\n\n// CidrRange specifies an IP Address and a prefix length to construct\n// the subnet mask for a `CIDR <https://tools.ietf.org/html/rfc4632>`_ range.\nmessage CidrRange {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.CidrRange\";\n\n  // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.\n  string address_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Length of prefix, e.g. 0, 32.\n  google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/backoff.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"BackoffProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Backoff Strategy]\n\n// Configuration defining a jittered exponential back off strategy.\nmessage BackoffStrategy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.BackoffStrategy\";\n\n  // The base interval to be used for the next back off computation. It should\n  // be greater than zero and less than or equal to :ref:`max_interval\n  // <envoy_api_field_config.core.v4alpha.BackoffStrategy.max_interval>`.\n  google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // Specifies the maximum interval between retries. This parameter is optional,\n  // but must be greater than or equal to the :ref:`base_interval\n  // <envoy_api_field_config.core.v4alpha.BackoffStrategy.base_interval>` if set. The default\n  // is 10 times the :ref:`base_interval\n  // <envoy_api_field_config.core.v4alpha.BackoffStrategy.base_interval>`.\n  google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/base.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/backoff.proto\";\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/semantic_version.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"BaseProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common types]\n\n// Envoy supports :ref:`upstream priority routing\n// <arch_overview_http_routing_priority>` both at the route and the virtual\n// cluster level. The current priority implementation uses different connection\n// pool and circuit breaking settings for each priority level. This means that\n// even for HTTP/2 requests, two physical connections will be used to an\n// upstream host. In the future Envoy will likely support true HTTP/2 priority\n// over a single upstream connection.\nenum RoutingPriority {\n  DEFAULT = 0;\n  HIGH = 1;\n}\n\n// HTTP request method.\nenum RequestMethod {\n  METHOD_UNSPECIFIED = 0;\n  GET = 1;\n  HEAD = 2;\n  POST = 3;\n  PUT = 4;\n  DELETE = 5;\n  CONNECT = 6;\n  OPTIONS = 7;\n  TRACE = 8;\n  PATCH = 9;\n}\n\n// Identifies the direction of the traffic relative to the local Envoy.\nenum TrafficDirection {\n  // Default option is unspecified.\n  UNSPECIFIED = 0;\n\n  // The transport is used for incoming traffic.\n  INBOUND = 1;\n\n  // The transport is used for outgoing traffic.\n  OUTBOUND = 2;\n}\n\n// Identifies location of where either Envoy runs or where upstream hosts run.\nmessage Locality {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Locality\";\n\n  // Region this :ref:`zone <envoy_api_field_config.core.v4alpha.Locality.zone>` belongs to.\n  string region = 1;\n\n  // Defines the local service zone where Envoy is running. Though optional, it\n  // should be set if discovery service routing is used and the discovery\n  // service exposes :ref:`zone data <envoy_api_field_config.endpoint.v3.LocalityLbEndpoints.locality>`,\n  // either in this message or via :option:`--service-zone`. The meaning of zone\n  // is context dependent, e.g. `Availability Zone (AZ)\n  // <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html>`_\n  // on AWS, `Zone <https://cloud.google.com/compute/docs/regions-zones/>`_ on\n  // GCP, etc.\n  string zone = 2;\n\n  // When used for locality of upstream hosts, this field further splits zone\n  // into smaller chunks of sub-zones so they can be load balanced\n  // independently.\n  string sub_zone = 3;\n}\n\n// BuildVersion combines SemVer version of extension with free-form build information\n// (i.e. 'alpha', 'private-build') as a set of strings.\nmessage BuildVersion {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.BuildVersion\";\n\n  // SemVer version of extension.\n  type.v3.SemanticVersion version = 1;\n\n  // Free-form build information.\n  // Envoy defines several well known keys in the source/common/version/version.h file\n  google.protobuf.Struct metadata = 2;\n}\n\n// Version and identification for an Envoy extension.\n// [#next-free-field: 6]\nmessage Extension {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Extension\";\n\n  // This is the name of the Envoy filter as specified in the Envoy\n  // configuration, e.g. envoy.filters.http.router, com.acme.widget.\n  string name = 1;\n\n  // Category of the extension.\n  // Extension category names use reverse DNS notation. For instance \"envoy.filters.listener\"\n  // for Envoy's built-in listener filters or \"com.acme.filters.http\" for HTTP filters from\n  // acme.com vendor.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]\n  string category = 2;\n\n  // [#not-implemented-hide:] Type descriptor of extension configuration proto.\n  // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]\n  // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]\n  string type_descriptor = 3;\n\n  // The version is a property of the extension and maintained independently\n  // of other extensions and the Envoy API.\n  // This field is not set when extension did not provide version information.\n  BuildVersion version = 4;\n\n  // Indicates that the extension is present but was disabled via dynamic configuration.\n  bool disabled = 5;\n}\n\n// Identifies a specific Envoy instance. The node identifier is presented to the\n// management server, which may use this identifier to distinguish per Envoy\n// configuration for serving.\n// [#next-free-field: 12]\nmessage Node {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Node\";\n\n  reserved 5;\n\n  reserved \"build_version\";\n\n  // An opaque node identifier for the Envoy node. This also provides the local\n  // service node name. It should be set if any of the following features are\n  // used: :ref:`statsd <arch_overview_statistics>`, :ref:`CDS\n  // <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-node`.\n  string id = 1;\n\n  // Defines the local service cluster name where Envoy is running. Though\n  // optional, it should be set if any of the following features are used:\n  // :ref:`statsd <arch_overview_statistics>`, :ref:`health check cluster\n  // verification\n  // <envoy_api_field_config.core.v4alpha.HealthCheck.HttpHealthCheck.service_name_matcher>`,\n  // :ref:`runtime override directory <envoy_api_msg_config.bootstrap.v4alpha.Runtime>`,\n  // :ref:`user agent addition\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.add_user_agent>`,\n  // :ref:`HTTP global rate limiting <config_http_filters_rate_limit>`,\n  // :ref:`CDS <config_cluster_manager_cds>`, and :ref:`HTTP tracing\n  // <arch_overview_tracing>`, either in this message or via\n  // :option:`--service-cluster`.\n  string cluster = 2;\n\n  // Opaque metadata extending the node identifier. Envoy will pass this\n  // directly to the management server.\n  google.protobuf.Struct metadata = 3;\n\n  // Locality specifying where the Envoy instance is running.\n  Locality locality = 4;\n\n  // Free-form string that identifies the entity requesting config.\n  // E.g. \"envoy\" or \"grpc\"\n  string user_agent_name = 6;\n\n  oneof user_agent_version_type {\n    // Free-form string that identifies the version of the entity requesting config.\n    // E.g. \"1.12.2\" or \"abcd1234\", or \"SpecialEnvoyBuild\"\n    string user_agent_version = 7;\n\n    // Structured version of the entity requesting config.\n    BuildVersion user_agent_build_version = 8;\n  }\n\n  // List of extensions and their versions supported by the node.\n  repeated Extension extensions = 9;\n\n  // Client feature support list. These are well known features described\n  // in the Envoy API repository for a given major version of an API. Client features\n  // use reverse DNS naming scheme, for example `com.acme.feature`.\n  // See :ref:`the list of features <client_features>` that xDS client may\n  // support.\n  repeated string client_features = 10;\n\n  // Known listening ports on the node as a generic hint to the management server\n  // for filtering :ref:`listeners <config_listeners>` to be returned. For example,\n  // if there is a listener bound to port 80, the list can optionally contain the\n  // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint.\n  repeated Address hidden_envoy_deprecated_listening_addresses = 11 [deprecated = true];\n}\n\n// Metadata provides additional inputs to filters based on matched listeners,\n// filter chains, routes and endpoints. It is structured as a map, usually from\n// filter name (in reverse DNS format) to metadata specific to the filter. Metadata\n// key-values for a filter are merged as connection and request handling occurs,\n// with later values for the same key overriding earlier values.\n//\n// An example use of metadata is providing additional values to\n// http_connection_manager in the envoy.http_connection_manager.access_log\n// namespace.\n//\n// Another example use of metadata is to per service config info in cluster metadata, which may get\n// consumed by multiple filters.\n//\n// For load balancing, Metadata provides a means to subset cluster endpoints.\n// Endpoints have a Metadata object associated and routes contain a Metadata\n// object to match against. There are some well defined metadata used today for\n// this purpose:\n//\n// * ``{\"envoy.lb\": {\"canary\": <bool> }}`` This indicates the canary status of an\n//   endpoint and is also used during header processing\n//   (x-envoy-upstream-canary) and for stats purposes.\n// [#next-major-version: move to type/metadata/v2]\nmessage Metadata {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.Metadata\";\n\n  // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*\n  // namespace is reserved for Envoy's built-in filters.\n  map<string, google.protobuf.Struct> filter_metadata = 1;\n}\n\n// Runtime derived uint32 with a default when not specified.\nmessage RuntimeUInt32 {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.RuntimeUInt32\";\n\n  // Default value if runtime value is not available.\n  uint32 default_value = 2;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 3 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived percentage with a default when not specified.\nmessage RuntimePercent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RuntimePercent\";\n\n  // Default value if runtime value is not available.\n  type.v3.Percent default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived double with a default when not specified.\nmessage RuntimeDouble {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.RuntimeDouble\";\n\n  // Default value if runtime value is not available.\n  double default_value = 1;\n\n  // Runtime key to get value for comparison. This value is used if defined.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Runtime derived bool with a default when not specified.\nmessage RuntimeFeatureFlag {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RuntimeFeatureFlag\";\n\n  // Default value if runtime value is not available.\n  google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key to get value for comparison. This value is used if defined. The boolean value must\n  // be represented via its\n  // `canonical JSON encoding <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n  string runtime_key = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Header name/value pair.\nmessage HeaderValue {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HeaderValue\";\n\n  // Header name.\n  string key = 1\n      [(validate.rules).string =\n           {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Header value.\n  //\n  // The same :ref:`format specifier <config_access_log_format>` as used for\n  // :ref:`HTTP access logging <config_access_log>` applies here, however\n  // unknown header values are replaced with the empty string instead of `-`.\n  string value = 2 [\n    (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}\n  ];\n}\n\n// Header name/value pair plus option to control append behavior.\nmessage HeaderValueOption {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.HeaderValueOption\";\n\n  // Header name/value pair that this option applies to.\n  HeaderValue header = 1 [(validate.rules).message = {required: true}];\n\n  // Should the value be appended? If true (default), the value is appended to\n  // existing values. Otherwise it replaces any existing values.\n  google.protobuf.BoolValue append = 2;\n}\n\n// Wrapper for a set of headers.\nmessage HeaderMap {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HeaderMap\";\n\n  repeated HeaderValue headers = 1;\n}\n\n// Data source consisting of either a file or an inline value.\nmessage DataSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.DataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local filesystem data source.\n    string filename = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Bytes inlined in the configuration.\n    bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];\n\n    // String inlined in the configuration.\n    string inline_string = 3 [(validate.rules).string = {min_len: 1}];\n  }\n}\n\n// The message specifies the retry policy of remote data source when fetching fails.\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.RetryPolicy\";\n\n  // Specifies parameters that control :ref:`retry backoff strategy <envoy_api_msg_config.core.v4alpha.BackoffStrategy>`.\n  // This parameter is optional, in which case the default base interval is 1000 milliseconds. The\n  // default maximum interval is 10 times the base interval.\n  BackoffStrategy retry_back_off = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1.\n  google.protobuf.UInt32Value max_retries = 2;\n}\n\n// The message specifies how to fetch data from remote and how to verify it.\nmessage RemoteDataSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RemoteDataSource\";\n\n  // The HTTP URI to fetch the remote data.\n  HttpUri http_uri = 1 [(validate.rules).message = {required: true}];\n\n  // SHA256 string for verifying data.\n  string sha256 = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Retry policy for fetching remote data.\n  RetryPolicy retry_policy = 3;\n}\n\n// Async data source which support async data fetch.\nmessage AsyncDataSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.AsyncDataSource\";\n\n  oneof specifier {\n    option (validate.required) = true;\n\n    // Local async data source.\n    DataSource local = 1;\n\n    // Remote async data source.\n    RemoteDataSource remote = 2;\n  }\n}\n\n// Configuration for transport socket in :ref:`listeners <config_listeners>` and\n// :ref:`clusters <envoy_api_msg_config.cluster.v4alpha.Cluster>`. If the configuration is\n// empty, a default transport socket implementation and configuration will be\n// chosen based on the platform and existence of tls_context.\nmessage TransportSocket {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.TransportSocket\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the transport socket to instantiate. The name must match a supported transport\n  // socket implementation.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Implementation specific configuration which depends on the implementation being instantiated.\n  // See the supported transport socket implementations for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not\n// specified via a runtime key.\n//\n// .. note::\n//\n//   Parsing of the runtime key's data is implemented such that it may be represented as a\n//   :ref:`FractionalPercent <envoy_api_msg_type.v3.FractionalPercent>` proto represented as JSON/YAML\n//   and may also be represented as an integer with the assumption that the value is an integral\n//   percentage out of 100. For instance, a runtime key lookup returning the value \"42\" would parse\n//   as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.\nmessage RuntimeFractionalPercent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RuntimeFractionalPercent\";\n\n  // Default value if the runtime value's for the numerator/denominator keys are not available.\n  type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}];\n\n  // Runtime key for a YAML representation of a FractionalPercent.\n  string runtime_key = 2;\n}\n\n// Identifies a specific ControlPlane instance that Envoy is connected to.\nmessage ControlPlane {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.ControlPlane\";\n\n  // An opaque control plane identifier that uniquely identifies an instance\n  // of control plane. This can be used to identify which control plane instance,\n  // the Envoy is connected to.\n  string identifier = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/config_source.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/authority.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ConfigSourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Configuration sources]\n\n// xDS API and non-xDS services version. This is used to describe both resource and transport\n// protocol versions (in distinct configuration fields).\nenum ApiVersion {\n  // When not specified, we assume v2, to ease migration to Envoy's stable API\n  // versioning. If a client does not support v2 (e.g. due to deprecation), this\n  // is an invalid value.\n  AUTO = 0;\n\n  // Use xDS v2 API.\n  V2 = 1;\n\n  // Use xDS v3 API.\n  V3 = 2;\n}\n\n// API configuration source. This identifies the API type and cluster that Envoy\n// will use to fetch an xDS API.\n// [#next-free-field: 9]\nmessage ApiConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.ApiConfigSource\";\n\n  // APIs may be fetched via either REST or gRPC.\n  enum ApiType {\n    // Ideally this would be 'reserved 0' but one can't reserve the default\n    // value. Instead we throw an exception if this is ever used.\n    hidden_envoy_deprecated_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // REST-JSON v2 API. The `canonical JSON encoding\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_ for\n    // the v2 protos is used.\n    REST = 1;\n\n    // SotW gRPC service.\n    GRPC = 2;\n\n    // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}\n    // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state\n    // with every update, the xDS server only sends what has changed since the last update.\n    DELTA_GRPC = 3;\n\n    // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_GRPC = 5;\n\n    // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be\n    // multiplexed on a single connection to an ADS endpoint.\n    // [#not-implemented-hide:]\n    AGGREGATED_DELTA_GRPC = 6;\n  }\n\n  // API type (gRPC, REST, delta gRPC)\n  ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}];\n\n  // Cluster names should be used only with REST. If > 1\n  // cluster is defined, clusters will be cycled through if any kind of failure\n  // occurs.\n  //\n  // .. note::\n  //\n  //  The cluster with name ``cluster_name`` must be statically defined and its\n  //  type must not be ``EDS``.\n  repeated string cluster_names = 2;\n\n  // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,\n  // services will be cycled through if any kind of failure occurs.\n  repeated GrpcService grpc_services = 4;\n\n  // For REST APIs, the delay between successive polls.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // For REST APIs, the request timeout. If not set, a default value of 1s will be used.\n  google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}];\n\n  // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be\n  // rate limited.\n  RateLimitSettings rate_limit_settings = 6;\n\n  // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.\n  bool set_node_on_first_message_only = 7;\n}\n\n// Aggregated Discovery Service (ADS) options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v4alpha.ConfigSource>` can be used to\n// specify that ADS is to be used.\nmessage AggregatedConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.AggregatedConfigSource\";\n}\n\n// [#not-implemented-hide:]\n// Self-referencing config source options. This is currently empty, but when\n// set in :ref:`ConfigSource <envoy_api_msg_config.core.v4alpha.ConfigSource>` can be used to\n// specify that other data can be obtained from the same server.\nmessage SelfConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.SelfConfigSource\";\n\n  // API version for xDS transport protocol. This describes the xDS gRPC/REST\n  // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.\n  ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Rate Limit settings to be applied for discovery requests made by Envoy.\nmessage RateLimitSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.RateLimitSettings\";\n\n  // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a\n  // default value of 100 will be used.\n  google.protobuf.UInt32Value max_tokens = 1;\n\n  // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens\n  // per second will be used.\n  google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];\n}\n\n// Configuration for :ref:`listeners <config_listeners>`, :ref:`clusters\n// <config_cluster_manager>`, :ref:`routes\n// <envoy_api_msg_config.route.v4alpha.RouteConfiguration>`, :ref:`endpoints\n// <arch_overview_service_discovery>` etc. may either be sourced from the\n// filesystem or from an xDS API source. Filesystem configs are watched with\n// inotify for updates.\n// [#next-free-field: 8]\nmessage ConfigSource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.ConfigSource\";\n\n  // Authorities that this config source may be used for. An authority specified\n  // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior\n  // to configuration fetch. This field provides the association between\n  // authority name and configuration source.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.Authority authorities = 7;\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Path on the filesystem to source and watch for configuration updates.\n    // When sourcing configuration for :ref:`secret <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.Secret>`,\n    // the certificate and key files are also watched for updates.\n    //\n    // .. note::\n    //\n    //  The path to the source must exist at config load time.\n    //\n    // .. note::\n    //\n    //   Envoy will only watch the file path for *moves.* This is because in general only moves\n    //   are atomic. The same method of swapping files as is demonstrated in the\n    //   :ref:`runtime documentation <config_runtime_symbolic_link_swap>` can be used here also.\n    string path = 1;\n\n    // API configuration source.\n    ApiConfigSource api_config_source = 2;\n\n    // When set, ADS will be used to fetch resources. The ADS API configuration\n    // source in the bootstrap configuration is used.\n    AggregatedConfigSource ads = 3;\n\n    // [#not-implemented-hide:]\n    // When set, the client will access the resources from the same server it got the\n    // ConfigSource from, although not necessarily from the same stream. This is similar to the\n    // :ref:`ads<envoy_api_field.ConfigSource.ads>` field, except that the client may use a\n    // different stream to the same server. As a result, this field can be used for things\n    // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)\n    // LDS to RDS on the same server without requiring the management server to know its name\n    // or required credentials.\n    // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since\n    // this field can implicitly mean to use the same stream in the case where the ConfigSource\n    // is provided via ADS and the specified data can also be obtained via ADS.]\n    SelfConfigSource self = 5;\n  }\n\n  // When this timeout is specified, Envoy will wait no longer than the specified time for first\n  // config response on this xDS subscription during the :ref:`initialization process\n  // <arch_overview_initialization>`. After reaching the timeout, Envoy will move to the next\n  // initialization phase, even if the first config is not delivered yet. The timer is activated\n  // when the xDS API subscription starts, and is disarmed on first config update or on error. 0\n  // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another\n  // timeout applies). The default is 15s.\n  google.protobuf.Duration initial_fetch_timeout = 4;\n\n  // API version for xDS resources. This implies the type URLs that the client\n  // will request for resources and the resource type that the client will in\n  // turn expect to be delivered.\n  ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/event_service_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"EventServiceConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#not-implemented-hide:]\n// Configuration of the event reporting service endpoint.\nmessage EventServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.EventServiceConfig\";\n\n  oneof config_source_specifier {\n    option (validate.required) = true;\n\n    // Specifies the gRPC service that hosts the event reporting service.\n    GrpcService grpc_service = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/extension.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ExtensionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Extension configuration]\n\n// Message type for extension configuration.\n// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.].\nmessage TypedExtensionConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.TypedExtensionConfig\";\n\n  // The name of an extension. This is not used to select the extension, instead\n  // it serves the role of an opaque identifier.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The typed config for the extension. The type URL will be used to identify\n  // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*,\n  // the inner type URL of *TypedStruct* will be utilized. See the\n  // :ref:`extension configuration overview\n  // <config_overview_extension_configuration>` for further details.\n  google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}];\n}\n\n// Configuration source specifier for a late-bound extension configuration. The\n// parent resource is warmed until all the initial extension configurations are\n// received, unless the flag to apply the default configuration is set.\n// Subsequent extension updates are atomic on a per-worker basis. Once an\n// extension configuration is applied to a request or a connection, it remains\n// constant for the duration of processing. If the initial delivery of the\n// extension configuration fails, due to a timeout for example, the optional\n// default configuration is applied. Without a default configuration, the\n// extension is disabled, until an extension configuration is received. The\n// behavior of a disabled extension depends on the context. For example, a\n// filter chain with a disabled extension filter rejects all incoming streams.\nmessage ExtensionConfigSource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.ExtensionConfigSource\";\n\n  ConfigSource config_source = 1 [(validate.rules).any = {required: true}];\n\n  // Optional default configuration to use as the initial configuration if\n  // there is a failure to receive the initial extension configuration or if\n  // `apply_default_config_without_warming` flag is set.\n  google.protobuf.Any default_config = 2;\n\n  // Use the default config as the initial configuration without warming and\n  // waiting for the first discovery response. Requires the default configuration\n  // to be supplied.\n  bool apply_default_config_without_warming = 3;\n\n  // A set of permitted extension type URLs. Extension configuration updates are rejected\n  // if they do not match any type URL in the set.\n  repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"GrpcMethodListProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC method list]\n\n// A list of gRPC methods which can be used as an allowlist, for example.\nmessage GrpcMethodList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.GrpcMethodList\";\n\n  message Service {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.GrpcMethodList.Service\";\n\n    // The name of the gRPC service.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The names of the gRPC methods in this service.\n    repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  repeated Service services = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"GrpcServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC services]\n\n// gRPC service configuration. This is used by :ref:`ApiConfigSource\n// <envoy_api_msg_config.core.v4alpha.ApiConfigSource>` and filter configurations.\n// [#next-free-field: 6]\nmessage GrpcService {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.GrpcService\";\n\n  message EnvoyGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.GrpcService.EnvoyGrpc\";\n\n    // The name of the upstream gRPC cluster. SSL credentials will be supplied\n    // in the :ref:`Cluster <envoy_api_msg_config.cluster.v4alpha.Cluster>` :ref:`transport_socket\n    // <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket>`.\n    string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`.\n    // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.\n    string authority = 2\n        [(validate.rules).string =\n             {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // [#next-free-field: 9]\n  message GoogleGrpc {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.GrpcService.GoogleGrpc\";\n\n    // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.\n    message SslCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials\";\n\n      // PEM encoded server root certificates.\n      DataSource root_certs = 1;\n\n      // PEM encoded client private key.\n      DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n      // PEM encoded client certificate chain.\n      DataSource cert_chain = 3;\n    }\n\n    // Local channel credentials. Only UDS is supported for now.\n    // See https://github.com/grpc/grpc/pull/15909.\n    message GoogleLocalCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials\";\n    }\n\n    // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call\n    // credential types.\n    message ChannelCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials\";\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        SslCredentials ssl_credentials = 1;\n\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_default = 2;\n\n        GoogleLocalCredentials local_credentials = 3;\n      }\n    }\n\n    // [#next-free-field: 8]\n    message CallCredentials {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials\";\n\n      message ServiceAccountJWTAccessCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"ServiceAccountJWTAccessCredentials\";\n\n        string json_key = 1;\n\n        uint64 token_lifetime_seconds = 2;\n      }\n\n      message GoogleIAMCredentials {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials\";\n\n        string authorization_token = 1;\n\n        string authority_selector = 2;\n      }\n\n      message MetadataCredentialsFromPlugin {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.\"\n            \"MetadataCredentialsFromPlugin\";\n\n        reserved 2;\n\n        reserved \"config\";\n\n        string name = 1;\n\n        oneof config_type {\n          google.protobuf.Any typed_config = 3;\n        }\n      }\n\n      // Security token service configuration that allows Google gRPC to\n      // fetch security token from an OAuth 2.0 authorization server.\n      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and\n      // https://github.com/grpc/grpc/pull/19587.\n      // [#next-free-field: 10]\n      message StsService {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService\";\n\n        // URI of the token exchange service that handles token exchange requests.\n        // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by\n        // https://github.com/envoyproxy/protoc-gen-validate/issues/303]\n        string token_exchange_service_uri = 1;\n\n        // Location of the target service or resource where the client\n        // intends to use the requested security token.\n        string resource = 2;\n\n        // Logical name of the target service where the client intends to\n        // use the requested security token.\n        string audience = 3;\n\n        // The desired scope of the requested security token in the\n        // context of the service or resource where the token will be used.\n        string scope = 4;\n\n        // Type of the requested security token.\n        string requested_token_type = 5;\n\n        // The path of subject token, a security token that represents the\n        // identity of the party on behalf of whom the request is being made.\n        string subject_token_path = 6 [(validate.rules).string = {min_len: 1}];\n\n        // Type of the subject token.\n        string subject_token_type = 7 [(validate.rules).string = {min_len: 1}];\n\n        // The path of actor token, a security token that represents the identity\n        // of the acting party. The acting party is authorized to use the\n        // requested security token and act on behalf of the subject.\n        string actor_token_path = 8;\n\n        // Type of the actor token.\n        string actor_token_type = 9;\n      }\n\n      oneof credential_specifier {\n        option (validate.required) = true;\n\n        // Access token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.\n        string access_token = 1;\n\n        // Google Compute Engine credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61\n        google.protobuf.Empty google_compute_engine = 2;\n\n        // Google refresh token credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.\n        string google_refresh_token = 3;\n\n        // Service Account JWT Access credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.\n        ServiceAccountJWTAccessCredentials service_account_jwt_access = 4;\n\n        // Google IAM credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.\n        GoogleIAMCredentials google_iam = 5;\n\n        // Custom authenticator credentials.\n        // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.\n        // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.\n        MetadataCredentialsFromPlugin from_plugin = 6;\n\n        // Custom security token service which implements OAuth 2.0 token exchange.\n        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16\n        // See https://github.com/grpc/grpc/pull/19587.\n        StsService sts_service = 7;\n      }\n    }\n\n    // Channel arguments.\n    message ChannelArgs {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs\";\n\n      message Value {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value\";\n\n        // Pointer values are not supported, since they don't make any sense when\n        // delivered via the API.\n        oneof value_specifier {\n          option (validate.required) = true;\n\n          string string_value = 1;\n\n          int64 int_value = 2;\n        }\n      }\n\n      // See grpc_types.h GRPC_ARG #defines for keys that work here.\n      map<string, Value> args = 1;\n    }\n\n    // The target URI when using the `Google C++ gRPC client\n    // <https://github.com/grpc/grpc>`_. SSL credentials will be supplied in\n    // :ref:`channel_credentials <envoy_api_field_config.core.v4alpha.GrpcService.GoogleGrpc.channel_credentials>`.\n    string target_uri = 1 [(validate.rules).string = {min_len: 1}];\n\n    ChannelCredentials channel_credentials = 2;\n\n    // A set of call credentials that can be composed with `channel credentials\n    // <https://grpc.io/docs/guides/auth.html#credential-types>`_.\n    repeated CallCredentials call_credentials = 3;\n\n    // The human readable prefix to use when emitting statistics for the gRPC\n    // service.\n    //\n    // .. csv-table::\n    //    :header: Name, Type, Description\n    //    :widths: 1, 1, 2\n    //\n    //    streams_total, Counter, Total number of streams opened\n    //    streams_closed_<gRPC status code>, Counter, Total streams closed with <gRPC status code>\n    string stat_prefix = 4 [(validate.rules).string = {min_len: 1}];\n\n    // The name of the Google gRPC credentials factory to use. This must have been registered with\n    // Envoy. If this is empty, a default credentials factory will be used that sets up channel\n    // credentials based on other configuration parameters.\n    string credentials_factory_name = 5;\n\n    // Additional configuration for site-specific customizations of the Google\n    // gRPC library.\n    google.protobuf.Struct config = 6;\n\n    // How many bytes each stream can buffer internally.\n    // If not set an implementation defined default is applied (1MiB).\n    google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7;\n\n    // Custom channels args.\n    ChannelArgs channel_args = 8;\n  }\n\n  reserved 4;\n\n  oneof target_specifier {\n    option (validate.required) = true;\n\n    // Envoy's in-built gRPC client.\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    EnvoyGrpc envoy_grpc = 1;\n\n    // `Google C++ gRPC client <https://github.com/grpc/grpc>`_\n    // See the :ref:`gRPC services overview <arch_overview_grpc_services>`\n    // documentation for discussion on gRPC client selection.\n    GoogleGrpc google_grpc = 2;\n  }\n\n  // The timeout for the gRPC request. This is the timeout for a specific\n  // request.\n  google.protobuf.Duration timeout = 3;\n\n  // Additional metadata to include in streams initiated to the GrpcService.\n  // This can be used for scenarios in which additional ad hoc authorization\n  // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.\n  repeated HeaderValue initial_metadata = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/event_service_config.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/v3/http.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Health check]\n// * Health checking :ref:`architecture overview <arch_overview_health_checking>`.\n// * If health checking is configured for a cluster, additional statistics are emitted. They are\n//   documented :ref:`here <config_cluster_manager_cluster_stats>`.\n\n// Endpoint health status.\nenum HealthStatus {\n  // The health status is not known. This is interpreted by Envoy as *HEALTHY*.\n  UNKNOWN = 0;\n\n  // Healthy.\n  HEALTHY = 1;\n\n  // Unhealthy.\n  UNHEALTHY = 2;\n\n  // Connection draining in progress. E.g.,\n  // `<https://aws.amazon.com/blogs/aws/elb-connection-draining-remove-instances-from-service-with-care/>`_\n  // or\n  // `<https://cloud.google.com/compute/docs/load-balancing/enabling-connection-draining>`_.\n  // This is interpreted by Envoy as *UNHEALTHY*.\n  DRAINING = 3;\n\n  // Health check timed out. This is part of HDS and is interpreted by Envoy as\n  // *UNHEALTHY*.\n  TIMEOUT = 4;\n\n  // Degraded.\n  DEGRADED = 5;\n}\n\n// [#next-free-field: 24]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HealthCheck\";\n\n  // Describes the encoding of the payload bytes in the payload.\n  message Payload {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.Payload\";\n\n    oneof payload {\n      option (validate.required) = true;\n\n      // Hex encoded payload. E.g., \"000000FF\".\n      string text = 1 [(validate.rules).string = {min_len: 1}];\n\n      // [#not-implemented-hide:] Binary payload.\n      bytes binary = 2;\n    }\n  }\n\n  // [#next-free-field: 12]\n  message HttpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.HttpHealthCheck\";\n\n    reserved 5, 7;\n\n    reserved \"service_name\", \"use_http2\";\n\n    // The value of the host header in the HTTP health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The host header can be customized for a specific endpoint by setting the\n    // :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Specifies the HTTP path that will be requested during health checking. For example\n    // */healthcheck*.\n    string path = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // [#not-implemented-hide:] HTTP specific payload.\n    Payload send = 3;\n\n    // [#not-implemented-hide:] HTTP specific response.\n    Payload receive = 4;\n\n    // Specifies a list of HTTP headers that should be added to each request that is sent to the\n    // health checked cluster. For more information, including details on header value syntax, see\n    // the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated HeaderValueOption request_headers_to_add = 6\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request that is sent to the\n    // health checked cluster.\n    repeated string request_headers_to_remove = 8 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default\n    // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open\n    // semantics of :ref:`Int64Range <envoy_api_msg_type.v3.Int64Range>`. The start and end of each\n    // range are required. Only statuses in the range [100, 600) are allowed.\n    repeated type.v3.Int64Range expected_statuses = 9;\n\n    // Use specified application protocol for health checks.\n    type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}];\n\n    // An optional service name parameter which is used to validate the identity of\n    // the health checked cluster using a :ref:`StringMatcher\n    // <envoy_api_msg_type.matcher.v4alpha.StringMatcher>`. See the :ref:`architecture overview\n    // <arch_overview_health_checking_identity>` for more information.\n    type.matcher.v4alpha.StringMatcher service_name_matcher = 11;\n  }\n\n  message TcpHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.TcpHealthCheck\";\n\n    // Empty payloads imply a connect-only health check.\n    Payload send = 1;\n\n    // When checking the response, “fuzzy” matching is performed such that each\n    // binary block must be found, and in the order specified, but not\n    // necessarily contiguous.\n    repeated Payload receive = 2;\n  }\n\n  message RedisHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.RedisHealthCheck\";\n\n    // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n    // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n    // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n    // by setting the specified key to any value and waiting for traffic to drain.\n    string key = 1;\n  }\n\n  // `grpc.health.v1.Health\n  // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto>`_-based\n  // healthcheck. See `gRPC doc <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_\n  // for details.\n  message GrpcHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.GrpcHealthCheck\";\n\n    // An optional service name parameter which will be sent to gRPC service in\n    // `grpc.health.v1.HealthCheckRequest\n    // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto#L20>`_.\n    // message. See `gRPC health-checking overview\n    // <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_ for more information.\n    string service_name = 1;\n\n    // The value of the :authority header in the gRPC health check request. If\n    // left empty (default value), the name of the cluster this health check is associated\n    // with will be used. The authority header can be customized for a specific endpoint by setting\n    // the :ref:`hostname <envoy_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.\n    string authority = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Custom health check.\n  message CustomHealthCheck {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.CustomHealthCheck\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // The registered name of the custom health checker.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // A custom health checker specific configuration which depends on the custom health checker\n    // being instantiated. See :api:`envoy/config/health_checker` for reference.\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Health checks occur over the transport socket specified for the cluster. This implies that if a\n  // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.\n  //\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  message TlsOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.HealthCheck.TlsOptions\";\n\n    // Specifies the ALPN protocols for health check connections. This is useful if the\n    // corresponding upstream is using ALPN-based :ref:`FilterChainMatch\n    // <envoy_api_msg_config.listener.v4alpha.FilterChainMatch>` along with different protocols for health checks\n    // versus data connections. If empty, no ALPN protocols will be set on health check connections.\n    repeated string alpn_protocols = 1;\n  }\n\n  reserved 10;\n\n  // The time to wait for a health check response. If the timeout is reached the\n  // health check attempt will be considered a failure.\n  google.protobuf.Duration timeout = 1 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // The interval between health checks.\n  google.protobuf.Duration interval = 2 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n\n  // An optional jitter amount in milliseconds. If specified, Envoy will start health\n  // checking after for a random time in ms between 0 and initial_jitter. This only\n  // applies to the first health check.\n  google.protobuf.Duration initial_jitter = 20;\n\n  // An optional jitter amount in milliseconds. If specified, during every\n  // interval Envoy will add interval_jitter to the wait time.\n  google.protobuf.Duration interval_jitter = 3;\n\n  // An optional jitter amount as a percentage of interval_ms. If specified,\n  // during every interval Envoy will add interval_ms *\n  // interval_jitter_percent / 100 to the wait time.\n  //\n  // If interval_jitter_ms and interval_jitter_percent are both set, both of\n  // them will be used to increase the wait time.\n  uint32 interval_jitter_percent = 18;\n\n  // The number of unhealthy health checks required before a host is marked\n  // unhealthy. Note that for *http* health checking if a host responds with 503\n  // this threshold is ignored and the host is considered unhealthy immediately.\n  google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}];\n\n  // The number of healthy health checks required before a host is marked\n  // healthy. Note that during startup, only a single successful health check is\n  // required to mark a host healthy.\n  google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Non-serving port for health checking.\n  google.protobuf.UInt32Value alt_port = 6;\n\n  // Reuse health check connection between health checks. Default is true.\n  google.protobuf.BoolValue reuse_connection = 7;\n\n  oneof health_checker {\n    option (validate.required) = true;\n\n    // HTTP health check.\n    HttpHealthCheck http_health_check = 8;\n\n    // TCP health check.\n    TcpHealthCheck tcp_health_check = 9;\n\n    // gRPC health check.\n    GrpcHealthCheck grpc_health_check = 11;\n\n    // Custom health check.\n    CustomHealthCheck custom_health_check = 13;\n  }\n\n  // The \"no traffic interval\" is a special health check interval that is used when a cluster has\n  // never had traffic routed to it. This lower interval allows cluster information to be kept up to\n  // date, without sending a potentially large amount of active health checking traffic for no\n  // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the\n  // standard health check interval that is defined. Note that this interval takes precedence over\n  // any other.\n  //\n  // The default value for \"no traffic interval\" is 60 seconds.\n  google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy interval\" is a health check interval that is used for hosts that are marked as\n  // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the\n  // standard health check interval that is defined.\n  //\n  // The default value for \"unhealthy interval\" is the same as \"interval\".\n  google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}];\n\n  // The \"unhealthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as unhealthy. For subsequent health checks\n  // Envoy will shift back to using either \"unhealthy interval\" if present or the standard health\n  // check interval that is defined.\n  //\n  // The default value for \"unhealthy edge interval\" is the same as \"unhealthy interval\".\n  google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}];\n\n  // The \"healthy edge interval\" is a special health check interval that is used for the first\n  // health check right after a host is marked as healthy. For subsequent health checks\n  // Envoy will shift back to using the standard health check interval that is defined.\n  //\n  // The default value for \"healthy edge interval\" is the same as the default interval.\n  google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];\n\n  // Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.\n  // If empty, no event log will be written.\n  string event_log_path = 17;\n\n  // [#not-implemented-hide:]\n  // The gRPC service for the health check event service.\n  // If empty, health check events won't be sent to a remote endpoint.\n  EventServiceConfig event_service = 22;\n\n  // If set to true, health check failure events will always be logged. If set to false, only the\n  // initial health check failure event will be logged.\n  // The default value is false.\n  bool always_log_health_check_failures = 19;\n\n  // This allows overriding the cluster TLS settings, just for health check connections.\n  TlsOptions tls_options = 21;\n\n  // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's\n  // :ref:`tranport socket matches <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket_matches>`.\n  // For example, the following match criteria\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_match_criteria:\n  //    useMTLS: true\n  //\n  // Will match the following :ref:`cluster socket match <envoy_api_msg_config.cluster.v4alpha.Cluster.TransportSocketMatch>`\n  //\n  // .. code-block:: yaml\n  //\n  //  transport_socket_matches:\n  //  - name: \"useMTLS\"\n  //    match:\n  //      useMTLS: true\n  //    transport_socket:\n  //      name: envoy.transport_sockets.tls\n  //      config: { ... } # tls socket configuration\n  //\n  // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the\n  // :ref:`LbEndpoint.Metadata <envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`.\n  // This allows using different transport socket capabilities for health checking versus proxying to the\n  // endpoint.\n  //\n  // If the key/values pairs specified do not match any\n  // :ref:`transport socket matches <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket_matches>`,\n  // the cluster's :ref:`transport socket <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket>`\n  // will be used for health check socket configuration.\n  google.protobuf.Struct transport_socket_match_criteria = 23;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"HttpUriProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP Service URI ]\n\n// Envoy external URI descriptor\nmessage HttpUri {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.HttpUri\";\n\n  // The HTTP server URI. It should be a full FQDN with protocol, host and path.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    uri: https://www.googleapis.com/oauth2/v1/certs\n  //\n  string uri = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specify how `uri` is to be fetched. Today, this requires an explicit\n  // cluster, but in the future we may support dynamic cluster creation or\n  // inline DNS resolution. See `issue\n  // <https://github.com/envoyproxy/envoy/issues/1606>`_.\n  oneof http_upstream_type {\n    option (validate.required) = true;\n\n    // A cluster is created in the Envoy \"cluster_manager\" config\n    // section. This field specifies the cluster name.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    cluster: jwks_cluster\n    //\n    string cluster = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Sets the maximum duration in milliseconds that a response can take to arrive upon request.\n  google.protobuf.Duration timeout = 3 [(validate.rules).duration = {\n    required: true\n    gte {}\n  }];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Protocol options]\n\n// [#not-implemented-hide:]\nmessage TcpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.TcpProtocolOptions\";\n}\n\nmessage UpstreamHttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.UpstreamHttpProtocolOptions\";\n\n  // Set transport socket `SNI <https://en.wikipedia.org/wiki/Server_Name_Indication>`_ for new\n  // upstream connections based on the downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  bool auto_sni = 1;\n\n  // Automatic validate upstream presented certificate for new upstream connections based on the\n  // downstream HTTP host/authority header, as seen by the\n  // :ref:`router filter <config_http_filters_router>`.\n  // This field is intended to set with `auto_sni` field.\n  bool auto_san_validation = 2;\n}\n\n// [#next-free-field: 6]\nmessage HttpProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.HttpProtocolOptions\";\n\n  // Action to take when Envoy receives client request with header names containing underscore\n  // characters.\n  // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented\n  // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore\n  // characters.\n  enum HeadersWithUnderscoresAction {\n    // Allow headers with underscores. This is the default behavior.\n    ALLOW = 0;\n\n    // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests\n    // end with the stream reset. The \"httpN.requests_rejected_with_underscores_in_headers\" counter\n    // is incremented for each rejected request.\n    REJECT_REQUEST = 1;\n\n    // Drop the header with name containing underscores. The header is dropped before the filter chain is\n    // invoked and as such filters will not see dropped headers. The\n    // \"httpN.dropped_headers_with_underscores\" is incremented for each dropped header.\n    DROP_HEADER = 2;\n  }\n\n  // The idle timeout for connections. The idle timeout is defined as the\n  // period in which there are no active requests. When the\n  // idle timeout is reached the connection will be closed. If the connection is an HTTP/2\n  // downstream connection a drain sequence will occur prior to closing the connection, see\n  // :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.drain_timeout>`.\n  // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.\n  // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 1;\n\n  // The maximum duration of a connection. The duration is defined as a period since a connection\n  // was established. If not set, there is no max duration. When max_connection_duration is reached\n  // the connection will be closed. Drain sequence will occur prior to closing the connection if\n  // if's applicable. See :ref:`drain_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.drain_timeout>`.\n  // Note: not implemented for upstream connections.\n  google.protobuf.Duration max_connection_duration = 3;\n\n  // The maximum number of headers. If unconfigured, the default\n  // maximum number of request headers allowed is 100. Requests that exceed this limit will receive\n  // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.\n  google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}];\n\n  // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be\n  // reset independent of any other timeouts. If not specified, this value is not set.\n  google.protobuf.Duration max_stream_duration = 4;\n\n  // Action to take when a client request with a header name containing underscore characters is received.\n  // If this setting is not specified, the value defaults to ALLOW.\n  // Note: upstream responses are not affected by this setting.\n  HeadersWithUnderscoresAction headers_with_underscores_action = 5;\n}\n\n// [#next-free-field: 8]\nmessage Http1ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.Http1ProtocolOptions\";\n\n  message HeaderKeyFormat {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat\";\n\n    message ProperCaseWords {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords\";\n    }\n\n    oneof header_format {\n      option (validate.required) = true;\n\n      // Formats the header by proper casing words: the first character and any character following\n      // a special character will be capitalized if it's an alpha character. For example,\n      // \"content-type\" becomes \"Content-Type\", and \"foo$b#$are\" becomes \"Foo$B#$Are\".\n      // Note that while this results in most headers following conventional casing, certain headers\n      // are not covered. For example, the \"TE\" header will be formatted as \"Te\".\n      ProperCaseWords proper_case_words = 1;\n    }\n  }\n\n  // Handle HTTP requests with absolute URLs in the requests. These requests\n  // are generally sent by clients to forward/explicit proxies. This allows clients to configure\n  // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the\n  // *http_proxy* environment variable.\n  google.protobuf.BoolValue allow_absolute_url = 1;\n\n  // Handle incoming HTTP/1.0 and HTTP 0.9 requests.\n  // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1\n  // style connect logic, dechunking, and handling lack of client host iff\n  // *default_host_for_http_10* is configured.\n  bool accept_http_10 = 2;\n\n  // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as\n  // Envoy does not otherwise support HTTP/1.0 without a Host header.\n  // This is a no-op if *accept_http_10* is not true.\n  string default_host_for_http_10 = 3;\n\n  // Describes how the keys for response headers should be formatted. By default, all header keys\n  // are lower cased.\n  HeaderKeyFormat header_key_format = 4;\n\n  // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.\n  //\n  // .. attention::\n  //\n  //   Note that this only happens when Envoy is chunk encoding which occurs when:\n  //   - The request is HTTP/1.1.\n  //   - Is neither a HEAD only request nor a HTTP Upgrade.\n  //   - Not a response to a HEAD request.\n  //   - The content length header is not present.\n  bool enable_trailers = 5;\n\n  // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding`\n  // headers set. By default such messages are rejected, but if option is enabled - Envoy will\n  // remove Content-Length header and process message.\n  // See `RFC7230, sec. 3.3.3 <https://tools.ietf.org/html/rfc7230#section-3.3.3>` for details.\n  //\n  // .. attention::\n  //   Enabling this option might lead to request smuggling vulnerability, especially if traffic\n  //   is proxied via multiple layers of proxies.\n  bool allow_chunked_length = 6;\n\n  // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate\n  // HTTP/1.1 connections upon receiving an invalid HTTP message. However,\n  // when this option is true, then Envoy will leave the HTTP/1.1 connection\n  // open where possible.\n  // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7;\n}\n\nmessage KeepaliveSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.KeepaliveSettings\";\n\n  // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.\n  google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // How long to wait for a response to a keepalive PING. If a response is not received within this\n  // time period, the connection will be aborted.\n  google.protobuf.Duration timeout = 2 [(validate.rules).duration = {\n    required: true\n    gte {nanos: 1000000}\n  }];\n\n  // A random jitter amount as a percentage of interval that will be added to each interval.\n  // A value of zero means there will be no jitter.\n  // The default value is 15%.\n  type.v3.Percent interval_jitter = 3;\n}\n\n// [#next-free-field: 16]\nmessage Http2ProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.Http2ProtocolOptions\";\n\n  // Defines a parameter to be sent in the SETTINGS frame.\n  // See `RFC7540, sec. 6.5.1 <https://tools.ietf.org/html/rfc7540#section-6.5.1>`_ for details.\n  message SettingsParameter {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter\";\n\n    // The 16 bit parameter identifier.\n    google.protobuf.UInt32Value identifier = 1 [\n      (validate.rules).uint32 = {lte: 65535 gte: 0},\n      (validate.rules).message = {required: true}\n    ];\n\n    // The 32 bit parameter value.\n    google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_\n  // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values\n  // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header\n  // compression.\n  google.protobuf.UInt32Value hpack_table_size = 1;\n\n  // `Maximum concurrent streams <https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2>`_\n  // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)\n  // and defaults to 2147483647.\n  //\n  // For upstream connections, this also limits how many streams Envoy will initiate concurrently\n  // on a single connection. If the limit is reached, Envoy may queue requests or establish\n  // additional connections (as allowed per circuit breaker limits).\n  google.protobuf.UInt32Value max_concurrent_streams = 2\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 1}];\n\n  // `Initial stream-level flow-control window\n  // <https://httpwg.org/specs/rfc7540.html#rfc.section.6.9.2>`_ size. Valid values range from 65535\n  // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456\n  // (256 * 1024 * 1024).\n  //\n  // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default\n  // window size now, so it's also the minimum.\n  //\n  // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the\n  // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to\n  // stop the flow of data to the codec buffers.\n  google.protobuf.UInt32Value initial_stream_window_size = 3\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Similar to *initial_stream_window_size*, but for connection-level flow-control\n  // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.\n  google.protobuf.UInt32Value initial_connection_window_size = 4\n      [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}];\n\n  // Allows proxying Websocket and other upgrades over H2 connect.\n  bool allow_connect = 5;\n\n  // [#not-implemented-hide:] Hiding until envoy has full metadata support.\n  // Still under implementation. DO NOT USE.\n  //\n  // Allows metadata. See [metadata\n  // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more\n  // information.\n  bool allow_metadata = 6;\n\n  // Limit the number of pending outbound downstream frames of all types (frames that are waiting to\n  // be written into the socket). Exceeding this limit triggers flood mitigation and connection is\n  // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due\n  // to flood mitigation. The default limit is 10000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,\n  // preventing high memory utilization when receiving continuous stream of these frames. Exceeding\n  // this limit triggers flood mitigation and connection is terminated. The\n  // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood\n  // mitigation. The default limit is 1000.\n  // [#comment:TODO: implement same limits for upstream outbound frames as well.]\n  google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}];\n\n  // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an\n  // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but\n  // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``\n  // stat tracks the number of connections terminated due to flood mitigation.\n  // Setting this to 0 will terminate connection upon receiving first frame with an empty payload\n  // and no end stream flag. The default limit is 1.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9;\n\n  // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number\n  // of PRIORITY frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     max_inbound_priority_frames_per_stream * (1 + inbound_streams)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 100.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10;\n\n  // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number\n  // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated\n  // using this formula::\n  //\n  //     1 + 2 * (inbound_streams +\n  //              max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)\n  //\n  // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks\n  // the number of connections terminated due to flood mitigation. The default limit is 10.\n  // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,\n  // but more complex implementations that try to estimate available bandwidth require at least 2.\n  // [#comment:TODO: implement same limits for upstream inbound frames as well.]\n  google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11\n      [(validate.rules).uint32 = {gte: 1}];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n  // iff present.\n  //\n  // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>`\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  bool hidden_envoy_deprecated_stream_error_on_invalid_http_messaging = 12 [deprecated = true];\n\n  // Allows invalid HTTP messaging and headers. When this option is disabled (default), then\n  // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,\n  // when this option is enabled, only the offending stream is terminated.\n  //\n  // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message>`\n  //\n  // See `RFC7540, sec. 8.1 <https://tools.ietf.org/html/rfc7540#section-8.1>`_ for details.\n  google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14;\n\n  // [#not-implemented-hide:]\n  // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:\n  //\n  // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by\n  // Envoy.\n  //\n  // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field\n  // 'allow_connect'.\n  //\n  // Note that custom parameters specified through this field can not also be set in the\n  // corresponding named parameters:\n  //\n  // .. code-block:: text\n  //\n  //   ID    Field Name\n  //   ----------------\n  //   0x1   hpack_table_size\n  //   0x3   max_concurrent_streams\n  //   0x4   initial_stream_window_size\n  //\n  // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies\n  // between custom parameters with the same identifier will trigger a failure.\n  //\n  // See `IANA HTTP/2 Settings\n  // <https://www.iana.org/assignments/http2-parameters/http2-parameters.xhtml#settings>`_ for\n  // standardized identifiers.\n  repeated SettingsParameter custom_settings_parameters = 13;\n\n  // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer\n  // does not respond within the configured timeout, the connection will be aborted.\n  KeepaliveSettings connection_keepalive = 15;\n}\n\n// [#not-implemented-hide:]\nmessage GrpcProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.GrpcProtocolOptions\";\n\n  Http2ProtocolOptions http2_protocol_options = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Proxy Protocol]\n\nmessage ProxyProtocolConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.ProxyProtocolConfig\";\n\n  enum Version {\n    // PROXY protocol version 1. Human readable format.\n    V1 = 0;\n\n    // PROXY protocol version 2. Binary format.\n    V2 = 1;\n  }\n\n  // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details\n  Version version = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/socket_option.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"SocketOptionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Socket Option ]\n\n// Generic socket option message. This would be used to set socket options that\n// might not exist in upstream kernels or precompiled Envoy binaries.\n// [#next-free-field: 7]\nmessage SocketOption {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.core.v3.SocketOption\";\n\n  enum SocketState {\n    // Socket options are applied after socket creation but before binding the socket to a port\n    STATE_PREBIND = 0;\n\n    // Socket options are applied after binding the socket to a port but before calling listen()\n    STATE_BOUND = 1;\n\n    // Socket options are applied after calling listen()\n    STATE_LISTENING = 2;\n  }\n\n  // An optional name to give this socket option for debugging, etc.\n  // Uniqueness is not required and no special meaning is assumed.\n  string description = 1;\n\n  // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP\n  int64 level = 2;\n\n  // The numeric name as passed to setsockopt\n  int64 name = 3;\n\n  oneof value {\n    option (validate.required) = true;\n\n    // Because many sockopts take an int value.\n    int64 int_value = 4;\n\n    // Otherwise it's a byte buffer.\n    bytes buf_value = 5;\n  }\n\n  // The state in which the option will be applied. When used in BindConfig\n  // STATE_PREBIND is currently the only valid value.\n  SocketState state = 6 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.core.v4alpha;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.core.v4alpha\";\noption java_outer_classname = \"SubstitutionFormatStringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Substitution format string]\n\n// Configuration to use multiple :ref:`command operators <config_access_log_command_operators>`\n// to generate a new string in either plain text or JSON format.\nmessage SubstitutionFormatString {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.core.v3.SubstitutionFormatString\";\n\n  oneof format {\n    option (validate.required) = true;\n\n    // Specify a format with command operators to form a text string.\n    // Its details is described in :ref:`format string<config_access_log_format_strings>`.\n    //\n    // For example, setting ``text_format`` like below,\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n    //\n    // generates plain text similar to:\n    //\n    // .. code-block:: text\n    //\n    //   upstream connect error:503:path=/foo\n    //\n    string text_format = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Specify a format with command operators to form a JSON string.\n    // Its details is described in :ref:`format dictionary<config_access_log_format_dictionaries>`.\n    // Values are rendered as strings, numbers, or boolean values as appropriate.\n    // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).\n    // See the documentation for a specific command operator for details.\n    //\n    // .. validated-code-block:: yaml\n    //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n    //\n    //   json_format:\n    //     status: \"%RESPONSE_CODE%\"\n    //     message: \"%LOCAL_REPLY_BODY%\"\n    //\n    // The following JSON object would be created:\n    //\n    // .. code-block:: json\n    //\n    //  {\n    //    \"status\": 500,\n    //    \"message\": \"My error message\"\n    //  }\n    //\n    google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}];\n  }\n\n  // If set to true, when command operators are evaluated to null,\n  //\n  // * for ``text_format``, the output of the empty operator is changed from ``-`` to an\n  //   empty string, so that empty values are omitted entirely.\n  // * for ``json_format`` the keys with null values are omitted in the output structure.\n  bool omit_empty_values = 3;\n\n  // Specify a *content_type* field.\n  // If this field is not set then ``text/plain`` is used for *text_format* and\n  // ``application/json`` is used for *json_format*.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   content_type: \"text/html; charset=UTF-8\"\n  //\n  string content_type = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/endpoint/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.endpoint.v3;\n\nimport \"envoy/config/endpoint/v3/endpoint_components.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.endpoint.v3\";\noption java_outer_classname = \"EndpointProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Endpoint configuration]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\n// Each route from RDS will map to a single cluster or traffic split across\n// clusters using weights expressed in the RDS WeightedCluster.\n//\n// With EDS, each cluster is treated independently from a LB perspective, with\n// LB taking place between the Localities within a cluster and at a finer\n// granularity between the hosts within a locality. The percentage of traffic\n// for each endpoint is determined by both its load_balancing_weight, and the\n// load_balancing_weight of its locality. First, a locality will be selected,\n// then an endpoint within that locality will be chose based on its weight.\n// [#next-free-field: 6]\nmessage ClusterLoadAssignment {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.ClusterLoadAssignment\";\n\n  // Load balancing policy settings.\n  // [#next-free-field: 6]\n  message Policy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.ClusterLoadAssignment.Policy\";\n\n    // [#not-implemented-hide:]\n    message DropOverload {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload\";\n\n      // Identifier for the policy specifying the drop.\n      string category = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Percentage of traffic that should be dropped for the category.\n      type.v3.FractionalPercent drop_percentage = 2;\n    }\n\n    reserved 1;\n\n    // Action to trim the overall incoming traffic to protect the upstream\n    // hosts. This action allows protection in case the hosts are unable to\n    // recover from an outage, or unable to autoscale or unable to handle\n    // incoming traffic volume for any reason.\n    //\n    // At the client each category is applied one after the other to generate\n    // the 'actual' drop percentage on all outgoing traffic. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"drop_overloads\": [\n    //      { \"category\": \"throttle\", \"drop_percentage\": 60 }\n    //      { \"category\": \"lb\", \"drop_percentage\": 50 }\n    //  ]}\n    //\n    // The actual drop percentages applied to the traffic at the clients will be\n    //    \"throttle\"_drop = 60%\n    //    \"lb\"_drop = 20%  // 50% of the remaining 'actual' load, which is 40%.\n    //    actual_outgoing_load = 20% // remaining after applying all categories.\n    // [#not-implemented-hide:]\n    repeated DropOverload drop_overloads = 2;\n\n    // Priority levels and localities are considered overprovisioned with this\n    // factor (in percentage). This means that we don't consider a priority\n    // level or locality unhealthy until the fraction of healthy hosts\n    // multiplied by the overprovisioning factor drops below 100.\n    // With the default value 140(1.4), Envoy doesn't consider a priority level\n    // or a locality unhealthy until their percentage of healthy hosts drops\n    // below 72%. For example:\n    //\n    // .. code-block:: json\n    //\n    //  { \"overprovisioning_factor\": 100 }\n    //\n    // Read more at :ref:`priority levels <arch_overview_load_balancing_priority_levels>` and\n    // :ref:`localities <arch_overview_load_balancing_locality_weighted_lb>`.\n    google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}];\n\n    // The max time until which the endpoints from this assignment can be used.\n    // If no new assignments are received before this time expires the endpoints\n    // are considered stale and should be marked unhealthy.\n    // Defaults to 0 which means endpoints never go stale.\n    google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}];\n\n    bool hidden_envoy_deprecated_disable_overprovisioning = 5 [deprecated = true];\n  }\n\n  // Name of the cluster. This will be the :ref:`service_name\n  // <envoy_api_field_config.cluster.v3.Cluster.EdsClusterConfig.service_name>` value if specified\n  // in the cluster :ref:`EdsClusterConfig\n  // <envoy_api_msg_config.cluster.v3.Cluster.EdsClusterConfig>`.\n  string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // List of endpoints to load balance to.\n  repeated LocalityLbEndpoints endpoints = 2;\n\n  // Map of named endpoints that can be referenced in LocalityLbEndpoints.\n  // [#not-implemented-hide:]\n  map<string, Endpoint> named_endpoints = 5;\n\n  // Load balancing policy settings.\n  Policy policy = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.endpoint.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.endpoint.v3\";\noption java_outer_classname = \"EndpointComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Endpoints]\n\n// Upstream host identifier.\nmessage Endpoint {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.endpoint.Endpoint\";\n\n  // The optional health check configuration.\n  message HealthCheckConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.endpoint.Endpoint.HealthCheckConfig\";\n\n    // Optional alternative health check port value.\n    //\n    // By default the health check address port of an upstream host is the same\n    // as the host's serving address port. This provides an alternative health\n    // check port. Setting this with a non-zero value allows an upstream host\n    // to have different health check address port.\n    uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}];\n\n    // By default, the host header for L7 health checks is controlled by cluster level configuration\n    // (see: :ref:`host <envoy_api_field_config.core.v3.HealthCheck.HttpHealthCheck.host>` and\n    // :ref:`authority <envoy_api_field_config.core.v3.HealthCheck.GrpcHealthCheck.authority>`). Setting this\n    // to a non-empty value allows overriding the cluster level configuration for a specific\n    // endpoint.\n    string hostname = 2;\n  }\n\n  // The upstream host address.\n  //\n  // .. attention::\n  //\n  //   The form of host address depends on the given cluster type. For STATIC or EDS,\n  //   it is expected to be a direct IP address (or something resolvable by the\n  //   specified :ref:`resolver <envoy_api_field_config.core.v3.SocketAddress.resolver_name>`\n  //   in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname,\n  //   and will be resolved via DNS.\n  core.v3.Address address = 1;\n\n  // The optional health check configuration is used as configuration for the\n  // health checker to contact the health checked host.\n  //\n  // .. attention::\n  //\n  //   This takes into effect only for upstream clusters with\n  //   :ref:`active health checking <arch_overview_health_checking>` enabled.\n  HealthCheckConfig health_check_config = 2;\n\n  // The hostname associated with this endpoint. This hostname is not used for routing or address\n  // resolution. If provided, it will be associated with the endpoint, and can be used for features\n  // that require a hostname, like\n  // :ref:`auto_host_rewrite <envoy_api_field_config.route.v3.RouteAction.auto_host_rewrite>`.\n  string hostname = 3;\n}\n\n// An Endpoint that Envoy can route traffic to.\n// [#next-free-field: 6]\nmessage LbEndpoint {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.endpoint.LbEndpoint\";\n\n  // Upstream host identifier or a named reference.\n  oneof host_identifier {\n    Endpoint endpoint = 1;\n\n    // [#not-implemented-hide:]\n    string endpoint_name = 5;\n  }\n\n  // Optional health status when known and supplied by EDS server.\n  core.v3.HealthStatus health_status = 2;\n\n  // The endpoint metadata specifies values that may be used by the load\n  // balancer to select endpoints in a cluster for a given request. The filter\n  // name should be specified as *envoy.lb*. An example boolean key-value pair\n  // is *canary*, providing the optional canary status of the upstream host.\n  // This may be matched against in a route's\n  // :ref:`RouteAction <envoy_api_msg_config.route.v3.RouteAction>` metadata_match field\n  // to subset the endpoints considered in cluster load balancing.\n  core.v3.Metadata metadata = 3;\n\n  // The optional load balancing weight of the upstream host; at least 1.\n  // Envoy uses the load balancing weight in some of the built in load\n  // balancers. The load balancing weight for an endpoint is divided by the sum\n  // of the weights of all endpoints in the endpoint's locality to produce a\n  // percentage of traffic for the endpoint. This percentage is then further\n  // weighted by the endpoint's locality's load balancing weight from\n  // LocalityLbEndpoints. If unspecified, each host is presumed to have equal\n  // weight in a locality. The sum of the weights of all endpoints in the\n  // endpoint's locality must not exceed uint32_t maximal value (4294967295).\n  google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}];\n}\n\n// A group of endpoints belonging to a Locality.\n// One can have multiple LocalityLbEndpoints for a locality, but this is\n// generally only done if the different groups need to have different load\n// balancing weights or different priorities.\n// [#next-free-field: 7]\nmessage LocalityLbEndpoints {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.LocalityLbEndpoints\";\n\n  // Identifies location of where the upstream hosts run.\n  core.v3.Locality locality = 1;\n\n  // The group of endpoints belonging to the locality specified.\n  repeated LbEndpoint lb_endpoints = 2;\n\n  // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load\n  // balancing weight for a locality is divided by the sum of the weights of all\n  // localities  at the same priority level to produce the effective percentage\n  // of traffic for the locality. The sum of the weights of all localities at\n  // the same priority level must not exceed uint32_t maximal value (4294967295).\n  //\n  // Locality weights are only considered when :ref:`locality weighted load\n  // balancing <arch_overview_load_balancing_locality_weighted_lb>` is\n  // configured. These weights are ignored otherwise. If no weights are\n  // specified when locality weighted load balancing is enabled, the locality is\n  // assigned no load.\n  google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional: the priority for this LocalityLbEndpoints. If unspecified this will\n  // default to the highest priority (0).\n  //\n  // Under usual circumstances, Envoy will only select endpoints for the highest\n  // priority (0). In the event all endpoints for a particular priority are\n  // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the\n  // next highest priority group.\n  //\n  // Priorities should range from 0 (highest) to N (lowest) without skipping.\n  uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}];\n\n  // Optional: Per locality proximity value which indicates how close this\n  // locality is from the source locality. This value only provides ordering\n  // information (lower the value, closer it is to the source locality).\n  // This will be consumed by load balancing schemes that need proximity order\n  // to determine where to route the requests.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value proximity = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/endpoint/v3/load_report.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.endpoint.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.endpoint.v3\";\noption java_outer_classname = \"LoadReportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Load Report]\n\n// These are stats Envoy reports to the management server at a frequency defined by\n// :ref:`LoadStatsResponse.load_reporting_interval<envoy_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`.\n// Stats per upstream region/zone and optionally per subzone.\n// [#next-free-field: 9]\nmessage UpstreamLocalityStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.UpstreamLocalityStats\";\n\n  // Name of zone, region and optionally endpoint group these metrics were\n  // collected from. Zone and region names could be empty if unknown.\n  core.v3.Locality locality = 1;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint,\n  // aggregated over all endpoints in the locality.\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued by this Envoy since\n  // the last report. This information is aggregated over all the\n  // upstream endpoints in the locality.\n  uint64 total_issued_requests = 8;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n\n  // Endpoint granularity stats information for this locality. This information\n  // is populated if the Server requests it by setting\n  // :ref:`LoadStatsResponse.report_endpoint_granularity<envoy_api_field_service.load_stats.v3.LoadStatsResponse.report_endpoint_granularity>`.\n  repeated UpstreamEndpointStats upstream_endpoint_stats = 7;\n\n  // [#not-implemented-hide:] The priority of the endpoint group these metrics\n  // were collected from.\n  uint32 priority = 6;\n}\n\n// [#next-free-field: 8]\nmessage UpstreamEndpointStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.UpstreamEndpointStats\";\n\n  // Upstream host address.\n  core.v3.Address address = 1;\n\n  // Opaque and implementation dependent metadata of the\n  // endpoint. Envoy will pass this directly to the management server.\n  google.protobuf.Struct metadata = 6;\n\n  // The total number of requests successfully completed by the endpoints in the\n  // locality. These include non-5xx responses for HTTP, where errors\n  // originate at the client and the endpoint responded successfully. For gRPC,\n  // the grpc-status values are those not covered by total_error_requests below.\n  uint64 total_successful_requests = 2;\n\n  // The total number of unfinished requests for this endpoint.\n  uint64 total_requests_in_progress = 3;\n\n  // The total number of requests that failed due to errors at the endpoint.\n  // For HTTP these are responses with 5xx status codes and for gRPC the\n  // grpc-status values:\n  //\n  //   - DeadlineExceeded\n  //   - Unimplemented\n  //   - Internal\n  //   - Unavailable\n  //   - Unknown\n  //   - DataLoss\n  uint64 total_error_requests = 4;\n\n  // The total number of requests that were issued to this endpoint\n  // since the last report. A single TCP connection, HTTP or gRPC\n  // request or stream is counted as one request.\n  uint64 total_issued_requests = 7;\n\n  // Stats for multi-dimensional load balancing.\n  repeated EndpointLoadMetricStats load_metric_stats = 5;\n}\n\nmessage EndpointLoadMetricStats {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.endpoint.EndpointLoadMetricStats\";\n\n  // Name of the metric; may be empty.\n  string metric_name = 1;\n\n  // Number of calls that finished and included this metric.\n  uint64 num_requests_finished_with_metric = 2;\n\n  // Sum of metric values across all calls that finished with this metric for\n  // load_reporting_interval.\n  double total_metric_value = 3;\n}\n\n// Per cluster load stats. Envoy reports these stats a management server in a\n// :ref:`LoadStatsRequest<envoy_api_msg_service.load_stats.v3.LoadStatsRequest>`\n// Next ID: 7\n// [#next-free-field: 7]\nmessage ClusterStats {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.endpoint.ClusterStats\";\n\n  message DroppedRequests {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.endpoint.ClusterStats.DroppedRequests\";\n\n    // Identifier for the policy specifying the drop.\n    string category = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Total number of deliberately dropped requests for the category.\n    uint64 dropped_count = 2;\n  }\n\n  // The name of the cluster.\n  string cluster_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The eds_cluster_config service_name of the cluster.\n  // It's possible that two clusters send the same service_name to EDS,\n  // in that case, the management server is supposed to do aggregation on the load reports.\n  string cluster_service_name = 6;\n\n  // Need at least one.\n  repeated UpstreamLocalityStats upstream_locality_stats = 2\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // Cluster-level stats such as total_successful_requests may be computed by\n  // summing upstream_locality_stats. In addition, below there are additional\n  // cluster-wide stats.\n  //\n  // The total number of dropped requests. This covers requests\n  // deliberately dropped by the drop_overload policy and circuit breaking.\n  uint64 total_dropped_requests = 3;\n\n  // Information about deliberately dropped requests for each category specified\n  // in the DropOverload policy.\n  repeated DroppedRequests dropped_requests = 5;\n\n  // Period over which the actual load report occurred. This will be guaranteed to include every\n  // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy\n  // and the *LoadStatsResponse* message sent from the management server, this may be longer than\n  // the requested load reporting interval in the *LoadStatsResponse*.\n  google.protobuf.Duration load_report_interval = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/README.md",
    "content": "Protocol buffer definitions for filters.\n\nVisibility of the definitions should be constrained to none except for\nshared definitions between explicitly enumerated filters (e.g. accesslog and fault definitions).\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.accesslog.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.accesslog.v2\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.accesslog.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common access log types]\n\nmessage AccessLog {\n  // The name of the access log implementation to instantiate. The name must\n  // match a statically registered access log. Current built-in loggers include:\n  //\n  // #. \"envoy.access_loggers.file\"\n  // #. \"envoy.access_loggers.http_grpc\"\n  // #. \"envoy.access_loggers.tcp_grpc\"\n  string name = 1;\n\n  // Filter which is used to determine if the access log needs to be written.\n  AccessLogFilter filter = 2;\n\n  // Custom configuration that depends on the access log being instantiated. Built-in\n  // configurations include:\n  //\n  // #. \"envoy.access_loggers.file\": :ref:`FileAccessLog\n  //    <envoy_api_msg_config.accesslog.v2.FileAccessLog>`\n  // #. \"envoy.access_loggers.http_grpc\": :ref:`HttpGrpcAccessLogConfig\n  //    <envoy_api_msg_config.accesslog.v2.HttpGrpcAccessLogConfig>`\n  // #. \"envoy.access_loggers.tcp_grpc\": :ref:`TcpGrpcAccessLogConfig\n  //    <envoy_api_msg_config.accesslog.v2.TcpGrpcAccessLogConfig>`\n  oneof config_type {\n    google.protobuf.Struct config = 3 [deprecated = true];\n\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// [#next-free-field: 12]\nmessage AccessLogFilter {\n  oneof filter_specifier {\n    option (validate.required) = true;\n\n    // Status code filter.\n    StatusCodeFilter status_code_filter = 1;\n\n    // Duration filter.\n    DurationFilter duration_filter = 2;\n\n    // Not health check filter.\n    NotHealthCheckFilter not_health_check_filter = 3;\n\n    // Traceable filter.\n    TraceableFilter traceable_filter = 4;\n\n    // Runtime filter.\n    RuntimeFilter runtime_filter = 5;\n\n    // And filter.\n    AndFilter and_filter = 6;\n\n    // Or filter.\n    OrFilter or_filter = 7;\n\n    // Header filter.\n    HeaderFilter header_filter = 8;\n\n    // Response flag filter.\n    ResponseFlagFilter response_flag_filter = 9;\n\n    // gRPC status filter.\n    GrpcStatusFilter grpc_status_filter = 10;\n\n    // Extension filter.\n    ExtensionFilter extension_filter = 11;\n  }\n}\n\n// Filter on an integer comparison.\nmessage ComparisonFilter {\n  enum Op {\n    // =\n    EQ = 0;\n\n    // >=\n    GE = 1;\n\n    // <=\n    LE = 2;\n  }\n\n  // Comparison operator.\n  Op op = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Value to compare against.\n  api.v2.core.RuntimeUInt32 value = 2;\n}\n\n// Filters on HTTP response/status code.\nmessage StatusCodeFilter {\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters on total request duration in milliseconds.\nmessage DurationFilter {\n  // Comparison.\n  ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters for requests that are not health check requests. A health check\n// request is marked by the health check filter.\nmessage NotHealthCheckFilter {\n}\n\n// Filters for requests that are traceable. See the tracing overview for more\n// information on how a request becomes traceable.\nmessage TraceableFilter {\n}\n\n// Filters for random sampling of requests.\nmessage RuntimeFilter {\n  // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field.\n  // If found in runtime, this value will replace the default numerator.\n  string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The default sampling percentage. If not specified, defaults to 0% with denominator of 100.\n  type.FractionalPercent percent_sampled = 2;\n\n  // By default, sampling pivots on the header\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being present. If\n  // :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` is present, the filter will\n  // consistently sample across multiple hosts based on the runtime key value and the value\n  // extracted from :ref:`x-request-id<config_http_conn_man_headers_x-request-id>`. If it is\n  // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based\n  // on the runtime key value alone. *use_independent_randomness* can be used for logging kill\n  // switches within complex nested :ref:`AndFilter\n  // <envoy_api_msg_config.filter.accesslog.v2.AndFilter>` and :ref:`OrFilter\n  // <envoy_api_msg_config.filter.accesslog.v2.OrFilter>` blocks that are easier to reason about\n  // from a probability perspective (i.e., setting to true will cause the filter to behave like\n  // an independent random variable when composed within logical operator filters).\n  bool use_independent_randomness = 3;\n}\n\n// Performs a logical “and” operation on the result of each filter in filters.\n// Filters are evaluated sequentially and if one of them returns false, the\n// filter returns false immediately.\nmessage AndFilter {\n  repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Performs a logical “or” operation on the result of each individual filter.\n// Filters are evaluated sequentially and if one of them returns true, the\n// filter returns true immediately.\nmessage OrFilter {\n  repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// Filters requests based on the presence or value of a request header.\nmessage HeaderFilter {\n  // Only requests with a header which matches the specified HeaderMatcher will pass the filter\n  // check.\n  api.v2.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}];\n}\n\n// Filters requests that received responses with an Envoy response flag set.\n// A list of the response flags can be found\n// in the access log formatter :ref:`documentation<config_access_log_format_response_flags>`.\nmessage ResponseFlagFilter {\n  // Only responses with the any of the flags listed in this field will be logged.\n  // This field is optional. If it is not specified, then any response flag will pass\n  // the filter check.\n  repeated string flags = 1 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"LH\"\n        in: \"UH\"\n        in: \"UT\"\n        in: \"LR\"\n        in: \"UR\"\n        in: \"UF\"\n        in: \"UC\"\n        in: \"UO\"\n        in: \"NR\"\n        in: \"DI\"\n        in: \"FI\"\n        in: \"RL\"\n        in: \"UAEX\"\n        in: \"RLSE\"\n        in: \"DC\"\n        in: \"URX\"\n        in: \"SI\"\n        in: \"IH\"\n        in: \"DPE\"\n      }\n    }\n  }];\n}\n\n// Filters gRPC requests based on their response status. If a gRPC status is not provided, the\n// filter will infer the status from the HTTP status code.\nmessage GrpcStatusFilter {\n  enum Status {\n    OK = 0;\n    CANCELED = 1;\n    UNKNOWN = 2;\n    INVALID_ARGUMENT = 3;\n    DEADLINE_EXCEEDED = 4;\n    NOT_FOUND = 5;\n    ALREADY_EXISTS = 6;\n    PERMISSION_DENIED = 7;\n    RESOURCE_EXHAUSTED = 8;\n    FAILED_PRECONDITION = 9;\n    ABORTED = 10;\n    OUT_OF_RANGE = 11;\n    UNIMPLEMENTED = 12;\n    INTERNAL = 13;\n    UNAVAILABLE = 14;\n    DATA_LOSS = 15;\n    UNAUTHENTICATED = 16;\n  }\n\n  // Logs only responses that have any one of the gRPC statuses in this field.\n  repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n\n  // If included and set to true, the filter will instead block all responses with a gRPC status or\n  // inferred gRPC status enumerated in statuses, and allow all other responses.\n  bool exclude = 2;\n}\n\n// Extension filter is statically registered at runtime.\nmessage ExtensionFilter {\n  // The name of the filter implementation to instantiate. The name must\n  // match a statically registered filter.\n  string name = 1;\n\n  // Custom configuration that depends on the filter being instantiated.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.dubbo.router.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.dubbo_proxy.router.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Router]\n// Dubbo router :ref:`configuration overview <config_dubbo_filters_router>`.\n\nmessage Router {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/fault/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/fault/v2/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.fault.v2;\n\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.fault.v2\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.common.fault.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common fault injection types]\n\n// Delay specification is used to inject latency into the\n// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections.\n// [#next-free-field: 6]\nmessage FaultDelay {\n  enum FaultDelayType {\n    // Unused and deprecated.\n    FIXED = 0;\n  }\n\n  // Fault delays are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderDelay {\n  }\n\n  reserved 2;\n\n  // Unused and deprecated. Will be removed in the next release.\n  FaultDelayType type = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  oneof fault_delay_secifier {\n    option (validate.required) = true;\n\n    // Add a fixed delay before forwarding the operation upstream. See\n    // https://developers.google.com/protocol-buffers/docs/proto3#json for\n    // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified\n    // delay will be injected before a new request/operation. For TCP\n    // connections, the proxying of the connection upstream will be delayed\n    // for the specified period. This is required if type is FIXED.\n    google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}];\n\n    // Fault delays are controlled via an HTTP header (if applicable).\n    HeaderDelay header_delay = 5;\n  }\n\n  // The percentage of operations/connections/requests on which the delay will be injected.\n  type.FractionalPercent percentage = 4;\n}\n\n// Describes a rate limit to be applied.\nmessage FaultRateLimit {\n  // Describes a fixed/constant rate limit.\n  message FixedLimit {\n    // The limit supplied in KiB/s.\n    uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // Rate limits are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderLimit {\n  }\n\n  oneof limit_type {\n    option (validate.required) = true;\n\n    // A fixed rate limit.\n    FixedLimit fixed_limit = 1;\n\n    // Rate limits are controlled via an HTTP header (if applicable).\n    HeaderLimit header_limit = 3;\n  }\n\n  // The percentage of operations/connections/requests on which the rate limit will be injected.\n  type.FractionalPercent percentage = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.adaptive_concurrency.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha\";\noption java_outer_classname = \"AdaptiveConcurrencyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.adaptive_concurrency.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Adaptive Concurrency]\n// Adaptive Concurrency Control :ref:`configuration overview\n// <config_http_filters_adaptive_concurrency>`.\n// [#extension: envoy.filters.http.adaptive_concurrency]\n\n// Configuration parameters for the gradient controller.\nmessage GradientControllerConfig {\n  // Parameters controlling the periodic recalculation of the concurrency limit from sampled request\n  // latencies.\n  message ConcurrencyLimitCalculationParams {\n    // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000.\n    google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // The period of time samples are taken to recalculate the concurrency limit.\n    google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n  }\n\n  // Parameters controlling the periodic minRTT recalculation.\n  // [#next-free-field: 6]\n  message MinimumRTTCalculationParams {\n    // The time interval between recalculating the minimum request round-trip time.\n    google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // The number of requests to aggregate/sample during the minRTT recalculation window before\n    // updating. Defaults to 50.\n    google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // Randomized time delta that will be introduced to the start of the minRTT calculation window.\n    // This is represented as a percentage of the interval duration. Defaults to 15%.\n    //\n    // Example: If the interval is 10s and the jitter is 15%, the next window will begin\n    // somewhere in the range (10s - 11.5s).\n    type.Percent jitter = 3;\n\n    // The concurrency limit set while measuring the minRTT. Defaults to 3.\n    google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}];\n\n    // Amount added to the measured minRTT to add stability to the concurrency limit during natural\n    // variability in latency. This is expressed as a percentage of the measured value and can be\n    // adjusted to allow more or less tolerance to the sampled latency values.\n    //\n    // Defaults to 25%.\n    type.Percent buffer = 5;\n  }\n\n  // The percentile to use when summarizing aggregated samples. Defaults to p50.\n  type.Percent sample_aggregate_percentile = 1;\n\n  ConcurrencyLimitCalculationParams concurrency_limit_params = 2\n      [(validate.rules).message = {required: true}];\n\n  MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}];\n}\n\nmessage AdaptiveConcurrency {\n  oneof concurrency_controller_config {\n    option (validate.required) = true;\n\n    // Gradient concurrency control will be used.\n    GradientControllerConfig gradient_controller_config = 1\n        [(validate.rules).message = {required: true}];\n  }\n\n  // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the\n  // message is unspecified, the filter will be enabled.\n  api.v2.core.RuntimeFeatureFlag enabled = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.aws_lambda.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.aws_lambda.v2alpha\";\noption java_outer_classname = \"AwsLambdaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.aws_lambda.v3\";\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: AWS Lambda]\n// AWS Lambda :ref:`configuration overview <config_http_filters_aws_lambda>`.\n// [#extension: envoy.filters.http.aws_lambda]\n\n// AWS Lambda filter config\nmessage Config {\n  enum InvocationMode {\n    // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In\n    // this mode the output of the Lambda function becomes the response of the HTTP request.\n    SYNCHRONOUS = 0;\n\n    // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be\n    // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the\n    // call which is translated to an HTTP 200 OK by the filter.\n    ASYNCHRONOUS = 1;\n  }\n\n  // The ARN of the AWS Lambda to invoke when the filter is engaged\n  // Must be in the following format:\n  // arn:<partition>:lambda:<region>:<account-number>:function:<function-name>\n  string arn = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether to transform the request (headers and body) to a JSON payload or pass it as is.\n  bool payload_passthrough = 2;\n\n  // Determines the way to invoke the Lambda function.\n  InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different\n// version of the same Lambda depending on the route.\nmessage PerRouteConfig {\n  Config invoke_config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.aws_request_signing.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.aws_request_signing.v2alpha\";\noption java_outer_classname = \"AwsRequestSigningProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.aws_request_signing.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: AwsRequestSigning]\n// AwsRequestSigning :ref:`configuration overview <config_http_filters_aws_request_signing>`.\n// [#extension: envoy.filters.http.aws_request_signing]\n\n// Top level configuration for the AWS request signing filter.\nmessage AwsRequestSigning {\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the HTTP endpoint.\n  //\n  // Example: s3\n  string service_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the HTTP\n  // endpoint.\n  //\n  // Example: us-west-2\n  string region = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Indicates that before signing headers, the host header will be swapped with\n  // this value. If not set or empty, the original host header value\n  // will be used and no rewrite will happen.\n  //\n  // Note: this rewrite affects both signing and host header forwarding. However, this\n  // option shouldn't be used with\n  // :ref:`HCM host rewrite <envoy_api_field_route.RouteAction.host_rewrite>` given that the\n  // value set here would be used for signing whereas the value set in the HCM would be used\n  // for host header forwarding which is not the desired outcome.\n  string host_rewrite = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.buffer.v2;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.buffer.v2\";\noption java_outer_classname = \"BufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.buffer.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Buffer]\n// Buffer :ref:`configuration overview <config_http_filters_buffer>`.\n// [#extension: envoy.filters.http.buffer]\n\nmessage Buffer {\n  reserved 2;\n\n  // The maximum request size that the filter will buffer before the connection\n  // manager will stop buffering and return a 413 response.\n  google.protobuf.UInt32Value max_request_bytes = 1\n      [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}];\n}\n\nmessage BufferPerRoute {\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the buffer filter for this particular vhost or route.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Override the global configuration of the filter with this new config.\n    Buffer buffer = 2 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.cache.v2alpha;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.cache.v2alpha\";\noption java_outer_classname = \"CacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.cache.v3alpha\";\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP Cache Filter]\n// [#extension: envoy.filters.http.cache]\n\nmessage CacheConfig {\n  // [#not-implemented-hide:]\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  message KeyCreatorParams {\n    // If true, exclude the URL scheme from the cache key. Set to true if your origins always\n    // produce the same response for http and https requests.\n    bool exclude_scheme = 1;\n\n    // If true, exclude the host from the cache key. Set to true if your origins' responses don't\n    // ever depend on host.\n    bool exclude_host = 2;\n\n    // If *query_parameters_included* is nonempty, only query parameters matched\n    // by one or more of its matchers are included in the cache key. Any other\n    // query params will not affect cache lookup.\n    repeated api.v2.route.QueryParameterMatcher query_parameters_included = 3;\n\n    // If *query_parameters_excluded* is nonempty, query parameters matched by one\n    // or more of its matchers are excluded from the cache key (even if also\n    // matched by *query_parameters_included*), and will not affect cache lookup.\n    repeated api.v2.route.QueryParameterMatcher query_parameters_excluded = 4;\n  }\n\n  // Config specific to the cache storage implementation.\n  google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];\n\n  // List of matching rules that defines allowed *Vary* headers.\n  //\n  // The *vary* response header holds a list of header names that affect the\n  // contents of a response, as described by\n  // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.\n  //\n  // During insertion, *allowed_vary_headers* acts as a allowlist: if a\n  // response's *vary* header mentions any header names that aren't matched by any rules in\n  // *allowed_vary_headers*, that response will not be cached.\n  //\n  // During lookup, *allowed_vary_headers* controls what request headers will be\n  // sent to the cache storage implementation.\n  repeated type.matcher.StringMatcher allowed_vary_headers = 2;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement key customization>\n  //\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  KeyCreatorParams key_creator_params = 3;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement size limit>\n  //\n  // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache\n  // storage implementation may have its own limit beyond which it will reject insertions).\n  uint32 max_body_bytes = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.compressor.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.compressor.v2\";\noption java_outer_classname = \"CompressorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.compressor.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Compressor]\n\n// [#next-free-field: 6]\nmessage Compressor {\n  // Minimum response length, in bytes, which will trigger compression. The default value is 30.\n  google.protobuf.UInt32Value content_length = 1;\n\n  // Set of strings that allows specifying which mime-types yield compression; e.g.,\n  // application/json, text/html, etc. When this field is not defined, compression will be applied\n  // to the following mime-types: \"application/javascript\", \"application/json\",\n  // \"application/xhtml+xml\", \"image/svg+xml\", \"text/css\", \"text/html\", \"text/plain\", \"text/xml\"\n  // and their synonyms.\n  repeated string content_type = 2;\n\n  // If true, disables compression when the response contains an etag header. When it is false, the\n  // filter will preserve weak etags and remove the ones that require strong validation.\n  bool disable_on_etag_header = 3;\n\n  // If true, removes accept-encoding from the request headers before dispatching it to the upstream\n  // so that responses do not get compressed before reaching the filter.\n  // .. attention:\n  //\n  //    To avoid interfering with other compression filters in the same chain use this option in\n  //    the filter closest to the upstream.\n  bool remove_accept_encoding_header = 4;\n\n  // Runtime flag that controls whether the filter is enabled or not. If set to false, the\n  // filter will operate as a pass-through filter. If not specified, defaults to enabled.\n  api.v2.core.RuntimeFeatureFlag runtime_enabled = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.cors.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.cors.v2\";\noption java_outer_classname = \"CorsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.cors.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Cors]\n// CORS Filter :ref:`configuration overview <config_http_filters_cors>`.\n// [#extension: envoy.filters.http.cors]\n\n// Cors filter config.\nmessage Cors {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.csrf.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.csrf.v2\";\noption java_outer_classname = \"CsrfProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.csrf.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: CSRF]\n// Cross-Site Request Forgery :ref:`configuration overview <config_http_filters_csrf>`.\n// [#extension: envoy.filters.http.csrf]\n\n// CSRF filter config.\nmessage CsrfPolicy {\n  // Specifies the % of requests for which the CSRF filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.FractionalPercent.DenominatorType>`.\n  api.v2.core.RuntimeFractionalPercent filter_enabled = 1\n      [(validate.rules).message = {required: true}];\n\n  // Specifies that CSRF policies will be evaluated and tracked, but not enforced.\n  //\n  // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* and *Destination* to determine if it's valid, but will not\n  // enforce any policies.\n  api.v2.core.RuntimeFractionalPercent shadow_enabled = 2;\n\n  // Specifies additional source origins that will be allowed in addition to\n  // the destination origin.\n  //\n  // More information on how this can be configured via runtime can be found\n  // :ref:`here <csrf-configuration>`.\n  repeated type.matcher.StringMatcher additional_origins = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.dynamic_forward_proxy.v2alpha;\n\nimport \"envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha\";\noption java_outer_classname = \"DynamicForwardProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.dynamic_forward_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamic forward proxy]\n\n// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.filters.http.dynamic_forward_proxy]\nmessage FilterConfig {\n  // The DNS cache configuration that the filter will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy cluster configuration\n  // <envoy_api_field_config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Per route Configuration for the dynamic forward proxy HTTP filter.\nmessage PerRouteConfig {\n  oneof host_rewrite_specifier {\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // this value. If not set or empty, the original host header value\n    // will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite <envoy_api_field_route.RouteAction.host_rewrite>` given that the\n    // value set here would be used for DNS lookups whereas the value set in the HCM would be used\n    // for host header forwarding which is not the desired outcome.\n    string host_rewrite = 1 [(udpa.annotations.field_migrate).rename = \"host_rewrite_literal\"];\n\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // the value of this header. If not set or empty, the original host header\n    // value will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite header <envoy_api_field_route.RouteAction.auto_host_rewrite_header>`\n    // given that the value set here would be used for DNS lookups whereas the value set in the HCM\n    // would be used for host header forwarding which is not the desired outcome.\n    string auto_host_rewrite_header = 2\n        [(udpa.annotations.field_migrate).rename = \"host_rewrite_header\"];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.dynamo.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.dynamo.v2\";\noption java_outer_classname = \"DynamoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.dynamo.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamo]\n// Dynamo :ref:`configuration overview <config_http_filters_dynamo>`.\n// [#extension: envoy.filters.http.dynamo]\n\n// Dynamo filter config.\nmessage Dynamo {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.ext_authz.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/grpc_service.proto\";\nimport \"envoy/api/v2/core/http_uri.proto\";\nimport \"envoy/type/http_status.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.ext_authz.v2\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.ext_authz.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: External Authorization]\n// External Authorization :ref:`configuration overview <config_http_filters_ext_authz>`.\n// [#extension: envoy.filters.http.ext_authz]\n\n// [#next-free-field: 12]\nmessage ExtAuthz {\n  // External authorization service configuration.\n  oneof services {\n    // gRPC service configuration (default timeout: 200ms).\n    api.v2.core.GrpcService grpc_service = 1;\n\n    // HTTP service configuration (default timeout: 200ms).\n    HttpService http_service = 3;\n  }\n\n  //  Changes filter's behaviour on errors:\n  //\n  //  1. When set to true, the filter will *accept* client request even if the communication with\n  //  the authorization service has failed, or if the authorization service has returned a HTTP 5xx\n  //  error.\n  //\n  //  2. When set to false, ext-authz will *reject* client requests and return a *Forbidden*\n  //  response if the communication with the authorization service has failed, or if the\n  //  authorization service has returned a HTTP 5xx error.\n  //\n  // Note that errors can be *always* tracked in the :ref:`stats\n  // <config_http_filters_ext_authz_stats>`.\n  bool failure_mode_allow = 2;\n\n  // Sets the package version the gRPC service should use. This is particularly\n  // useful when transitioning from alpha to release versions assuming that both definitions are\n  // semantically compatible. Deprecation note: This field is deprecated and should only be used for\n  // version upgrade. See release notes for more details.\n  bool use_alpha = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Enables filter to buffer the client request body and send it within the authorization request.\n  // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization\n  // request message indicating if the body data is partial.\n  BufferSettings with_request_body = 5;\n\n  // Clears route cache in order to allow the external authorization service to correctly affect\n  // routing decisions. Filter clears all cached routes when:\n  //\n  // 1. The field is set to *true*.\n  //\n  // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0.\n  //\n  // 3. At least one *authorization response header* is added to the client request, or is used for\n  // altering another client request header.\n  //\n  bool clear_route_cache = 6;\n\n  // Sets the HTTP status that is returned to the client when there is a network error between the\n  // filter and the authorization server. The default status is HTTP 403 Forbidden.\n  type.HttpStatus status_on_error = 7;\n\n  // Specifies a list of metadata namespaces whose values, if present, will be passed to the\n  // ext_authz service as an opaque *protobuf::Struct*.\n  //\n  // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata\n  // <envoy_api_field_config.filter.http.jwt_authn.v2alpha.JwtProvider.payload_in_metadata>` is set,\n  // then the following will pass the jwt payload to the authorization server.\n  //\n  // .. code-block:: yaml\n  //\n  //    metadata_context_namespaces:\n  //    - envoy.filters.http.jwt_authn\n  //\n  repeated string metadata_context_namespaces = 8;\n\n  // Specifies if the filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // If this field is not specified, the filter will be enabled for all requests.\n  api.v2.core.RuntimeFractionalPercent filter_enabled = 9;\n\n  // Specifies whether to deny the requests, when the filter is disabled.\n  // If :ref:`runtime_key <envoy_api_field_core.RuntimeFeatureFlag.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to determine whether to deny request for\n  // filter protected path at filter disabling. If filter is disabled in\n  // typed_per_filter_config for the path, requests will not be denied.\n  //\n  // If this field is not specified, all requests will be allowed when disabled.\n  api.v2.core.RuntimeFeatureFlag deny_at_disable = 11;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v2.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 10;\n}\n\n// Configuration for buffering the request data.\nmessage BufferSettings {\n  // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return\n  // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number\n  // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow\n  // <envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.failure_mode_allow>`.\n  uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached.\n  // The authorization request will be dispatched and no 413 HTTP error will be returned by the\n  // filter.\n  bool allow_partial_message = 2;\n}\n\n// HttpService is used for raw HTTP communication between the filter and the authorization service.\n// When configured, the filter will parse the client request and use these attributes to call the\n// authorization server. Depending on the response, the filter may reject or accept the client\n// request. Note that in any of these events, metadata can be added, removed or overridden by the\n// filter:\n//\n// *On authorization request*, a list of allowed request headers may be supplied. See\n// :ref:`allowed_headers\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationRequest.allowed_headers>`\n// for details. Additional headers metadata may be added to the authorization request. See\n// :ref:`headers_to_add\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationRequest.headers_to_add>` for\n// details.\n//\n// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and\n// additional headers metadata may be added to the original client request. See\n// :ref:`allowed_upstream_headers\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_upstream_headers>`\n// for details.\n//\n// On other authorization response statuses, the filter will not allow traffic. Additional headers\n// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers\n// <envoy_api_field_config.filter.http.ext_authz.v2.AuthorizationResponse.allowed_client_headers>`\n// for details.\n// [#next-free-field: 9]\nmessage HttpService {\n  reserved 3, 4, 5, 6;\n\n  // Sets the HTTP server URI which the authorization requests must be sent to.\n  api.v2.core.HttpUri server_uri = 1;\n\n  // Sets a prefix to the value of authorization request header *Path*.\n  string path_prefix = 2;\n\n  // Settings used for controlling authorization request metadata.\n  AuthorizationRequest authorization_request = 7;\n\n  // Settings used for controlling authorization response metadata.\n  AuthorizationResponse authorization_response = 8;\n}\n\nmessage AuthorizationRequest {\n  // Authorization request will include the client request headers that have a correspondent match\n  // in the :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>`. Note that in addition to the\n  // user's supplied matchers:\n  //\n  // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list.\n  //\n  // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have\n  // a message body. However, the authorization request can include the buffered client request body\n  // (controlled by :ref:`with_request_body\n  // <envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.with_request_body>` setting),\n  // consequently the value of *Content-Length* of the authorization request reflects the size of\n  // its payload size.\n  //\n  type.matcher.ListStringMatcher allowed_headers = 1;\n\n  // Sets a list of headers that will be included to the request to authorization service. Note that\n  // client request of the same key will be overridden.\n  repeated api.v2.core.HeaderValue headers_to_add = 2;\n}\n\nmessage AuthorizationResponse {\n  // When this :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the original client request.\n  // Note that coexistent headers will be overridden.\n  type.matcher.ListStringMatcher allowed_upstream_headers = 1;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>`. is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that when this list is *not* set, all the authorization response headers, except *Authority\n  // (Host)* will be in the response to the client. When a header is included in this list, *Path*,\n  // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added.\n  type.matcher.ListStringMatcher allowed_client_headers = 2;\n}\n\n// Extra settings on a per virtualhost/route/weighted-cluster level.\nmessage ExtAuthzPerRoute {\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the ext auth filter for this particular vhost or route.\n    // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Check request settings for this route.\n    CheckSettings check_settings = 2 [(validate.rules).message = {required: true}];\n  }\n}\n\n// Extra settings for the check request. You can use this to provide extra context for the\n// external authorization server on specific virtual hosts \\ routes. For example, adding a context\n// extension on the virtual host level can give the ext-authz server information on what virtual\n// host is used without needing to parse the host header. If CheckSettings is specified in multiple\n// per-filter-configs, they will be merged in order, and the result will be used.\nmessage CheckSettings {\n  // Context extensions to set on the CheckRequest's\n  // :ref:`AttributeContext.context_extensions<envoy_api_field_service.auth.v2.AttributeContext.context_extensions>`\n  //\n  // Merge semantics for this field are such that keys from more specific configs override.\n  //\n  // .. note::\n  //\n  //   These settings are only applied to a filter configured with a\n  //   :ref:`grpc_service<envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.grpc_service>`.\n  map<string, string> context_extensions = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.fault.v2;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/config/filter/fault/v2/fault.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.fault.v2\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.fault.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Fault Injection]\n// Fault Injection :ref:`configuration overview <config_http_filters_fault_injection>`.\n// [#extension: envoy.filters.http.fault]\n\nmessage FaultAbort {\n  // Fault aborts are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderAbort {\n  }\n\n  reserved 1;\n\n  oneof error_type {\n    option (validate.required) = true;\n\n    // HTTP status code to use to abort the HTTP request.\n    uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n    // Fault aborts are controlled via an HTTP header (if applicable).\n    HeaderAbort header_abort = 4;\n  }\n\n  // The percentage of requests/operations/connections that will be aborted with the error code\n  // provided.\n  type.FractionalPercent percentage = 3;\n}\n\n// [#next-free-field: 14]\nmessage HTTPFault {\n  // If specified, the filter will inject delays based on the values in the\n  // object.\n  filter.fault.v2.FaultDelay delay = 1;\n\n  // If specified, the filter will abort requests based on the values in\n  // the object. At least *abort* or *delay* must be specified.\n  FaultAbort abort = 2;\n\n  // Specifies the name of the (destination) upstream cluster that the\n  // filter should match on. Fault injection will be restricted to requests\n  // bound to the specific upstream cluster.\n  string upstream_cluster = 3;\n\n  // Specifies a set of headers that the filter should match on. The fault\n  // injection filter can be applied selectively to requests that match a set of\n  // headers specified in the fault filter config. The chances of actual fault\n  // injection further depend on the value of the :ref:`percentage\n  // <envoy_api_field_config.filter.http.fault.v2.FaultAbort.percentage>` field.\n  // The filter will check the request's headers against all the specified\n  // headers in the filter config. A match will happen if all the headers in the\n  // config are present in the request with the same values (or based on\n  // presence if the *value* field is not in the config).\n  repeated api.v2.route.HeaderMatcher headers = 4;\n\n  // Faults are injected for the specified list of downstream hosts. If this\n  // setting is not set, faults are injected for all downstream nodes.\n  // Downstream node name is taken from :ref:`the HTTP\n  // x-envoy-downstream-service-node\n  // <config_http_conn_man_headers_downstream-service-node>` header and compared\n  // against downstream_nodes list.\n  repeated string downstream_nodes = 5;\n\n  // The maximum number of faults that can be active at a single time via the configured fault\n  // filter. Note that because this setting can be overridden at the route level, it's possible\n  // for the number of active faults to be greater than this value (if injected via a different\n  // route). If not specified, defaults to unlimited. This setting can be overridden via\n  // `runtime <config_http_filters_fault_injection_runtime>` and any faults that are not injected\n  // due to overflow will be indicated via the `faults_overflow\n  // <config_http_filters_fault_injection_stats>` stat.\n  //\n  // .. attention::\n  //   Like other :ref:`circuit breakers <arch_overview_circuit_break>` in Envoy, this is a fuzzy\n  //   limit. It's possible for the number of active faults to rise slightly above the configured\n  //   amount due to the implementation details.\n  google.protobuf.UInt32Value max_active_faults = 6;\n\n  // The response rate limit to be applied to the response body of the stream. When configured,\n  // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent\n  // <config_http_filters_fault_injection_runtime>` runtime key.\n  //\n  // .. attention::\n  //  This is a per-stream limit versus a connection level limit. This means that concurrent streams\n  //  will each get an independent limit.\n  filter.fault.v2.FaultRateLimit response_rate_limit = 7;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_delay_percent\n  string delay_percent_runtime = 8;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.abort_percent\n  string abort_percent_runtime = 9;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_duration_ms\n  string delay_duration_runtime = 10;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.http_status\n  string abort_http_status_runtime = 11;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.max_active_faults\n  string max_active_faults_runtime = 12;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.rate_limit.response_percent\n  string response_rate_limit_percent_runtime = 13;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_http1_bridge.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_http1_bridge.v2\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_http1_bridge.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC HTTP/1.1 Bridge]\n// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview <config_http_filters_grpc_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_bridge]\n\n// gRPC HTTP/1.1 Bridge filter config.\nmessage Config {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge]\n// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview\n// <config_http_filters_grpc_http1_reverse_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_reverse_bridge]\n\n// gRPC reverse bridge filter configuration\nmessage FilterConfig {\n  // The content-type to pass to the upstream when the gRPC bridge filter is applied.\n  // The filter will also validate that the upstream responds with the same content type.\n  string content_type = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // If true, Envoy will assume that the upstream doesn't understand gRPC frames and\n  // strip the gRPC frame from the request, and add it back in to the response. This will\n  // hide the gRPC semantics from the upstream, allowing it to receive and respond with a\n  // simple binary encoded protobuf.\n  bool withhold_grpc_frames = 2;\n}\n\n// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level.\nmessage FilterConfigPerRoute {\n  // If true, disables gRPC reverse bridge filter for this particular vhost or route.\n  // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n  bool disabled = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_stats.v2alpha;\n\nimport \"envoy/api/v2/core/grpc_method_list.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_stats.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC statistics] gRPC statistics filter\n// :ref:`configuration overview <config_http_filters_grpc_stats>`.\n// [#extension: envoy.filters.http.grpc_stats]\n\n// gRPC statistics filter configuration\nmessage FilterConfig {\n  // If true, the filter maintains a filter state object with the request and response message\n  // counts.\n  bool emit_filter_state = 1;\n\n  oneof per_method_stat_specifier {\n    // If set, specifies an allowlist of service/methods that will have individual stats\n    // emitted for them. Any call that does not match the allowlist will be counted\n    // in a stat with no method specifier: `cluster.<name>.grpc.*`.\n    api.v2.core.GrpcMethodList individual_method_stats_allowlist = 2;\n\n    // If set to true, emit stats for all service/method names.\n    //\n    // If set to false, emit stats for all service/message types to the same stats without including\n    // the service/method in the name, with prefix `cluster.<name>.grpc`. This can be useful if\n    // service/method granularity is not needed, or if each cluster only receives a single method.\n    //\n    // .. attention::\n    //   This option is only safe if all clients are trusted. If this option is enabled\n    //   with untrusted clients, the clients could cause unbounded growth in the number of stats in\n    //   Envoy, using unbounded memory and potentially slowing down stats pipelines.\n    //\n    // .. attention::\n    //   If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the\n    //   behavior will default to `stats_for_all_methods=true`. This default value is deprecated,\n    //   and in a future release, if neither field is set, it will default to\n    //   `stats_for_all_methods=false` in order to be safe by default. This behavior can be\n    //   controlled with runtime override\n    //   `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`.\n    google.protobuf.BoolValue stats_for_all_methods = 3;\n  }\n}\n\n// gRPC statistics filter state object in protobuf form.\nmessage FilterObject {\n  // Count of request messages in the request stream.\n  uint64 request_message_count = 1;\n\n  // Count of response messages in the response stream.\n  uint64 response_message_count = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.grpc_web.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.grpc_web.v2\";\noption java_outer_classname = \"GrpcWebProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_web.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Web]\n// gRPC Web :ref:`configuration overview <config_http_filters_grpc_web>`.\n// [#extension: envoy.filters.http.grpc_web]\n\n// gRPC Web filter config.\nmessage GrpcWeb {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/compressor/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.gzip.v2;\n\nimport \"envoy/config/filter/http/compressor/v2/compressor.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.gzip.v2\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.gzip.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Gzip]\n// Gzip :ref:`configuration overview <config_http_filters_gzip>`.\n// [#extension: envoy.filters.http.gzip]\n\n// [#next-free-field: 11]\nmessage Gzip {\n  enum CompressionStrategy {\n    DEFAULT = 0;\n    FILTERED = 1;\n    HUFFMAN = 2;\n    RLE = 3;\n  }\n\n  message CompressionLevel {\n    enum Enum {\n      DEFAULT = 0;\n      BEST = 1;\n      SPEED = 2;\n    }\n  }\n\n  // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values\n  // use more memory, but are faster and produce better compression results. The default value is 5.\n  google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}];\n\n  // Minimum response length, in bytes, which will trigger compression. The default value is 30.\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  google.protobuf.UInt32Value content_length = 2 [deprecated = true];\n\n  // A value used for selecting the zlib compression level. This setting will affect speed and\n  // amount of compression applied to the content. \"BEST\" provides higher compression at the cost of\n  // higher latency, \"SPEED\" provides lower compression with minimum impact on response time.\n  // \"DEFAULT\" provides an optimal result between speed and compression. This field will be set to\n  // \"DEFAULT\" if not specified.\n  CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // A value used for selecting the zlib compression strategy which is directly related to the\n  // characteristics of the content. Most of the time \"DEFAULT\" will be the best choice, though\n  // there are situations which changing this parameter might produce better results. For example,\n  // run-length encoding (RLE) is typically used when the content is known for having sequences\n  // which same data occurs many consecutive times. For more information about each strategy, please\n  // refer to zlib manual.\n  CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // Set of strings that allows specifying which mime-types yield compression; e.g.,\n  // application/json, text/html, etc. When this field is not defined, compression will be applied\n  // to the following mime-types: \"application/javascript\", \"application/json\",\n  // \"application/xhtml+xml\", \"image/svg+xml\", \"text/css\", \"text/html\", \"text/plain\", \"text/xml\".\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  repeated string content_type = 6 [deprecated = true];\n\n  // If true, disables compression when the response contains an etag header. When it is false, the\n  // filter will preserve weak etags and remove the ones that require strong validation.\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  bool disable_on_etag_header = 7 [deprecated = true];\n\n  // If true, removes accept-encoding from the request headers before dispatching it to the upstream\n  // so that responses do not get compressed before reaching the filter.\n  // .. attention:\n  //\n  //    **This field is deprecated**. Set the `compressor` field instead.\n  bool remove_accept_encoding_header = 8 [deprecated = true];\n\n  // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size.\n  // Larger window results in better compression at the expense of memory usage. The default is 12\n  // which will produce a 4096 bytes window. For more details about this parameter, please refer to\n  // zlib manual > deflateInit2.\n  google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Set of configuration parameters common for all compression filters. If this field is set then\n  // the fields `content_length`, `content_type`, `disable_on_etag_header` and\n  // `remove_accept_encoding_header` are ignored.\n  compressor.v2.Compressor compressor = 10;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.header_to_metadata.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2\";\noption java_outer_classname = \"HeaderToMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.header_to_metadata.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Header-To-Metadata Filter]\n//\n// The configuration for transforming headers into metadata. This is useful\n// for matching load balancer subsets, logging, etc.\n//\n// Header to Metadata :ref:`configuration overview <config_http_filters_header_to_metadata>`.\n// [#extension: envoy.filters.http.header_to_metadata]\n\nmessage Config {\n  enum ValueType {\n    STRING = 0;\n\n    NUMBER = 1;\n\n    // The value is a serialized `protobuf.Value\n    // <https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62>`_.\n    PROTOBUF_VALUE = 2;\n  }\n\n  // ValueEncode defines the encoding algorithm.\n  enum ValueEncode {\n    // The value is not encoded.\n    NONE = 0;\n\n    // The value is encoded in `Base64 <https://tools.ietf.org/html/rfc4648#section-4>`_.\n    // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the\n    // non-ASCII characters in the header.\n    BASE64 = 1;\n  }\n\n  // [#next-free-field: 6]\n  message KeyValuePair {\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_bytes: 1}];\n\n    // The value to pair with the given key.\n    //\n    // When used for a `on_header_present` case, if value is non-empty it'll be used\n    // instead of the header value. If both are empty, no metadata is added.\n    //\n    // When used for a `on_header_missing` case, a non-empty value must be provided\n    // otherwise no metadata is added.\n    string value = 3;\n\n    // The value's type — defaults to string.\n    ValueType type = 4;\n\n    // How is the value encoded, default is NONE (not encoded).\n    // The value will be decoded accordingly before storing to metadata.\n    ValueEncode encode = 5;\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  message Rule {\n    // The header that triggers this rule — required.\n    string header = 1\n        [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // If the header is present, apply this metadata KeyValuePair.\n    //\n    // If the value in the KeyValuePair is non-empty, it'll be used instead\n    // of the header value.\n    KeyValuePair on_header_present = 2;\n\n    // If the header is not present, apply this metadata KeyValuePair.\n    //\n    // The value in the KeyValuePair must be set, since it'll be used in lieu\n    // of the missing header value.\n    KeyValuePair on_header_missing = 3;\n\n    // Whether or not to remove the header after a rule is applied.\n    //\n    // This prevents headers from leaking.\n    bool remove = 4;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule request_rules = 1;\n\n  // The list of rules to apply to responses.\n  repeated Rule response_rules = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.health_check.v2;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.health_check.v2\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.health_check.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health check]\n// Health check :ref:`configuration overview <config_http_filters_health_check>`.\n// [#extension: envoy.filters.http.health_check]\n\n// [#next-free-field: 6]\nmessage HealthCheck {\n  reserved 2;\n\n  // Specifies whether the filter operates in pass through mode or not.\n  google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}];\n\n  // If operating in pass through mode, the amount of time in milliseconds\n  // that the filter should cache the upstream response.\n  google.protobuf.Duration cache_time = 3;\n\n  // If operating in non-pass-through mode, specifies a set of upstream cluster\n  // names and the minimum percentage of servers in each of those clusters that\n  // must be healthy or degraded in order for the filter to return a 200.\n  //\n  // .. note::\n  //\n  //    This value is interpreted as an integer by truncating, so 12.50% will be calculated\n  //    as if it were 12%.\n  map<string, type.Percent> cluster_min_healthy_percentages = 4;\n\n  // Specifies a set of health check request headers to match on. The health check filter will\n  // check a request’s headers against all the specified headers. To specify the health check\n  // endpoint, set the ``:path`` header to match on.\n  repeated api.v2.route.HeaderMatcher headers = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.ip_tagging.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.ip_tagging.v2\";\noption java_outer_classname = \"IpTaggingProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.ip_tagging.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: IP tagging]\n// IP tagging :ref:`configuration overview <config_http_filters_ip_tagging>`.\n// [#extension: envoy.filters.http.ip_tagging]\n\nmessage IPTagging {\n  // The type of requests the filter should apply to. The supported types\n  // are internal, external or both. The\n  // :ref:`x-forwarded-for<config_http_conn_man_headers_x-forwarded-for_internal_origin>` header is\n  // used to determine if a request is internal and will result in\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>`\n  // being set. The filter defaults to both, and it will apply to all request types.\n  enum RequestType {\n    // Both external and internal requests will be tagged. This is the default value.\n    BOTH = 0;\n\n    // Only internal requests will be tagged.\n    INTERNAL = 1;\n\n    // Only external requests will be tagged.\n    EXTERNAL = 2;\n  }\n\n  // Supplies the IP tag name and the IP address subnets.\n  message IPTag {\n    // Specifies the IP tag name to apply.\n    string ip_tag_name = 1;\n\n    // A list of IP address subnets that will be tagged with\n    // ip_tag_name. Both IPv4 and IPv6 are supported.\n    repeated api.v2.core.CidrRange ip_list = 2;\n  }\n\n  // The type of request the filter should apply to.\n  RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system.\n  // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695]\n  // The set of IP tags for the filter.\n  repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md",
    "content": "# JWT Authentication HTTP filter config\n\n## Overview\n\n1. The proto file in this folder defines an HTTP filter config for \"jwt_authn\" filter.\n\n2. This filter will verify the JWT in the HTTP request as:\n    - The signature should be valid\n    - JWT should not be expired\n    - Issuer and audiences are valid and specified in the filter config.\n\n3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter.\n\n3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message.\n\n## The locations to extract JWT\n\nJWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header:\n```\nAuthorization: Bearer <token>\n```\nThe next default location is in the query parameter as:\n```\n?access_token=<TOKEN>\n```\n\nIf a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT.\n\n## HTTP header to pass successfully verified JWT\n\nIf a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64url-encoded JWT payload in JSON.\n\n\n## Further header options\n\nIn addition to the `name` field, which specifies the HTTP header name,\nthe `from_headers` section can specify an optional `value_prefix` value, as in:\n\n```yaml\n    from_headers:\n      - name: bespoke\n        value_prefix: jwt_value\n```\n\nThe above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`.\n\nAny non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped,\nand all following, contiguous, JWT-legal chars will be taken as the JWT.\n\nThis means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`:\n\n```text\nbespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\n\nbespoke: {\"jwt_value\": \"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\"}\n\nbespoke: beta:true,jwt_value:\"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk\",trace=1234\n```\n\nThe header `name` may be `Authorization`.\n\nThe `value_prefix` must match exactly, i.e., case-sensitively.\nIf the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token.\n\nIf there are no JWT-legal characters after the `value_prefix`, the entire string after it\nis taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser."
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.jwt_authn.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/http_uri.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.jwt_authn.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: JWT Authentication]\n// JWT Authentication :ref:`configuration overview <config_http_filters_jwt_authn>`.\n// [#extension: envoy.filters.http.jwt_authn]\n\n// Please see following for JWT authentication flow:\n//\n// * `JSON Web Token (JWT) <https://tools.ietf.org/html/rfc7519>`_\n// * `The OAuth 2.0 Authorization Framework <https://tools.ietf.org/html/rfc6749>`_\n// * `OpenID Connect <http://openid.net/connect>`_\n//\n// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies:\n//\n// * issuer: the principal that issues the JWT. It has to match the one from the token.\n// * allowed audiences: the ones in the token have to be listed here.\n// * how to fetch public key JWKS to verify the token signature.\n// * how to extract JWT token in the request.\n// * how to pass successfully verified token payload.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     issuer: https://example.com\n//     audiences:\n//     - bookstore_android.apps.googleusercontent.com\n//     - bookstore_web.apps.googleusercontent.com\n//     remote_jwks:\n//       http_uri:\n//         uri: https://example.com/.well-known/jwks.json\n//         cluster: example_jwks_cluster\n//       cache_duration:\n//         seconds: 300\n//\n// [#next-free-field: 10]\nmessage JwtProvider {\n  // Specify the `principal <https://tools.ietf.org/html/rfc7519#section-4.1.1>`_ that issued\n  // the JWT, usually a URL or an email address.\n  //\n  // Example: https://securetoken.google.com\n  // Example: 1234567-compute@developer.gserviceaccount.com\n  //\n  string issuer = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The list of JWT `audiences <https://tools.ietf.org/html/rfc7519#section-4.1.3>`_ are\n  // allowed to access. A JWT containing any of these audiences will be accepted. If not specified,\n  // will not check audiences in the token.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //     audiences:\n  //     - bookstore_android.apps.googleusercontent.com\n  //     - bookstore_web.apps.googleusercontent.com\n  //\n  repeated string audiences = 2;\n\n  // `JSON Web Key Set (JWKS) <https://tools.ietf.org/html/rfc7517#appendix-A>`_ is needed to\n  // validate signature of a JWT. This field specifies where to fetch JWKS.\n  oneof jwks_source_specifier {\n    option (validate.required) = true;\n\n    // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP\n    // URI and how the fetched JWKS should be cached.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    remote_jwks:\n    //      http_uri:\n    //        uri: https://www.googleapis.com/oauth2/v1/certs\n    //        cluster: jwt.www.googleapis.com|443\n    //      cache_duration:\n    //        seconds: 300\n    //\n    RemoteJwks remote_jwks = 3;\n\n    // JWKS is in local data source. It could be either in a local file or embedded in the\n    // inline_string.\n    //\n    // Example: local file\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      filename: /etc/envoy/jwks/jwks1.txt\n    //\n    // Example: inline_string\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      inline_string: ACADADADADA\n    //\n    api.v2.core.DataSource local_jwks = 4;\n  }\n\n  // If false, the JWT is removed in the request after a success verification. If true, the JWT is\n  // not removed in the request. Default value is false.\n  bool forward = 5;\n\n  // Two fields below define where to extract the JWT from an HTTP request.\n  //\n  // If no explicit location is specified, the following default locations are tried in order:\n  //\n  // 1. The Authorization header using the `Bearer schema\n  // <https://tools.ietf.org/html/rfc6750#section-2.1>`_. Example::\n  //\n  //    Authorization: Bearer <token>.\n  //\n  // 2. `access_token <https://tools.ietf.org/html/rfc6750#section-2.3>`_ query parameter.\n  //\n  // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations\n  // its provider specified or from the default locations.\n  //\n  // Specify the HTTP headers to extract JWT token. For examples, following config:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_headers:\n  //   - name: x-goog-iap-jwt-assertion\n  //\n  // can be used to extract token from header::\n  //\n  //   ``x-goog-iap-jwt-assertion: <JWT>``.\n  //\n  repeated JwtHeader from_headers = 6;\n\n  // JWT is sent in a query parameter. `jwt_params` represents the query parameter names.\n  //\n  // For example, if config is:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_params:\n  //   - jwt_token\n  //\n  // The JWT format in query parameter is::\n  //\n  //    /path?jwt_token=<JWT>\n  //\n  repeated string from_params = 7;\n\n  // This field specifies the header name to forward a successfully verified JWT payload to the\n  // backend. The forwarded data is::\n  //\n  //    base64url_encoded(jwt_payload_in_JSON)\n  //\n  // If it is not specified, the payload will not be forwarded.\n  string forward_payload_header = 8;\n\n  // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata\n  // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn**\n  // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields*\n  // and the value is the *protobuf::Struct* converted from JWT JSON payload.\n  //\n  // For example, if payload_in_metadata is *my_payload*:\n  //\n  // .. code-block:: yaml\n  //\n  //   envoy.filters.http.jwt_authn:\n  //     my_payload:\n  //       iss: https://example.com\n  //       sub: test@example.com\n  //       aud: https://example.com\n  //       exp: 1501281058\n  //\n  string payload_in_metadata = 9;\n}\n\n// This message specifies how to fetch JWKS from remote and how to cache it.\nmessage RemoteJwks {\n  // The HTTP URI to fetch the JWKS. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //    http_uri:\n  //      uri: https://www.googleapis.com/oauth2/v1/certs\n  //      cluster: jwt.www.googleapis.com|443\n  //\n  api.v2.core.HttpUri http_uri = 1;\n\n  // Duration after which the cached JWKS should be expired. If not specified, default cache\n  // duration is 5 minutes.\n  google.protobuf.Duration cache_duration = 2;\n}\n\n// This message specifies a header location to extract JWT token.\nmessage JwtHeader {\n  // The HTTP header name.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The value prefix. The value format is \"value_prefix<token>\"\n  // For example, for \"Authorization: Bearer <token>\", value_prefix=\"Bearer \" with a space at the\n  // end.\n  string value_prefix = 2;\n}\n\n// Specify a required provider with audiences.\nmessage ProviderWithAudiences {\n  // Specify a required provider name.\n  string provider_name = 1;\n\n  // This field overrides the one specified in the JwtProvider.\n  repeated string audiences = 2;\n}\n\n// This message specifies a Jwt requirement. An empty message means JWT verification is not\n// required. Here are some config examples:\n//\n// .. code-block:: yaml\n//\n//  # Example 1: not required with an empty message\n//\n//  # Example 2: require A\n//  provider_name: provider-A\n//\n//  # Example 3: require A or B\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 4: require A and B\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 5: require A and (B or C)\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_any:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 6: require A or (B and C)\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_all:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 7: A is optional (if token from A is provided, it must be valid, but also allows\n//  missing token.)\n//  requires_any:\n//    requirements:\n//    - provider_name: provider-A\n//    - allow_missing: {}\n//\n//  # Example 8: A is optional and B is required.\n//  requires_all:\n//    requirements:\n//    - requires_any:\n//        requirements:\n//        - provider_name: provider-A\n//        - allow_missing: {}\n//    - provider_name: provider-B\n//\n// [#next-free-field: 7]\nmessage JwtRequirement {\n  oneof requires_type {\n    // Specify a required provider name.\n    string provider_name = 1;\n\n    // Specify a required provider with audiences.\n    ProviderWithAudiences provider_and_audiences = 2;\n\n    // Specify list of JwtRequirement. Their results are OR-ed.\n    // If any one of them passes, the result is passed.\n    JwtRequirementOrList requires_any = 3;\n\n    // Specify list of JwtRequirement. Their results are AND-ed.\n    // All of them must pass, if one of them fails or missing, it fails.\n    JwtRequirementAndList requires_all = 4;\n\n    // The requirement is always satisfied even if JWT is missing or the JWT\n    // verification fails. A typical usage is: this filter is used to only verify\n    // JWTs and pass the verified JWT payloads to another filter, the other filter\n    // will make decision. In this mode, all JWT tokens will be verified.\n    google.protobuf.Empty allow_missing_or_failed = 5;\n\n    // The requirement is satisfied if JWT is missing, but failed if JWT is\n    // presented but invalid. Similar to allow_missing_or_failed, this is used\n    // to only verify JWTs and pass the verified payload to another filter. The\n    // different is this mode will reject requests with invalid tokens.\n    google.protobuf.Empty allow_missing = 6;\n  }\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are OR-ed; if any one of them passes, the result is passed\nmessage JwtRequirementOrList {\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails.\nmessage JwtRequirementAndList {\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a Jwt requirement for a specific Route condition.\n// Example 1:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /healthz\n//\n// In above example, \"requires\" field is empty for /healthz prefix match,\n// it means that requests matching the path prefix don't require JWT authentication.\n//\n// Example 2:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /\n//      requires: { provider_name: provider-A }\n//\n// In above example, all requests matched the path prefix require jwt authentication\n// from \"provider-A\".\nmessage RequirementRule {\n  // The route matching parameter. Only when the match is satisfied, the \"requires\" field will\n  // apply.\n  //\n  // For example: following match will match all requests.\n  //\n  // .. code-block:: yaml\n  //\n  //    match:\n  //      prefix: /\n  //\n  api.v2.route.RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Specify a Jwt Requirement. Please detail comment in message JwtRequirement.\n  JwtRequirement requires = 2;\n}\n\n// This message specifies Jwt requirements based on stream_info.filterState.\n// This FilterState should use `Router::StringAccessor` object to set a string value.\n// Other HTTP filters can use it to specify Jwt requirements dynamically.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//    name: jwt_selector\n//    requires:\n//      issuer_1:\n//        provider_name: issuer1\n//      issuer_2:\n//        provider_name: issuer2\n//\n// If a filter set \"jwt_selector\" with \"issuer_1\" to FilterState for a request,\n// jwt_authn filter will use JwtRequirement{\"provider_name\": \"issuer1\"} to verify.\nmessage FilterStateRule {\n  // The filter state name to retrieve the `Router::StringAccessor` object.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // A map of string keys to requirements. The string key is the string value\n  // in the FilterState with the name specified in the *name* field above.\n  map<string, JwtRequirement> requires = 3;\n}\n\n// This is the Envoy HTTP filter config for JWT authentication.\n//\n// For example:\n//\n// .. code-block:: yaml\n//\n//   providers:\n//      provider1:\n//        issuer: issuer1\n//        audiences:\n//        - audience1\n//        - audience2\n//        remote_jwks:\n//          http_uri:\n//            uri: https://example.com/.well-known/jwks.json\n//            cluster: example_jwks_cluster\n//      provider2:\n//        issuer: issuer2\n//        local_jwks:\n//          inline_string: jwks_string\n//\n//   rules:\n//      # Not jwt verification is required for /health path\n//      - match:\n//          prefix: /health\n//\n//      # Jwt verification for provider1 is required for path prefixed with \"prefix\"\n//      - match:\n//          prefix: /prefix\n//        requires:\n//          provider_name: provider1\n//\n//      # Jwt verification for either provider1 or provider2 is required for all other requests.\n//      - match:\n//          prefix: /\n//        requires:\n//          requires_any:\n//            requirements:\n//              - provider_name: provider1\n//              - provider_name: provider2\n//\nmessage JwtAuthentication {\n  // Map of provider names to JwtProviders.\n  //\n  // .. code-block:: yaml\n  //\n  //   providers:\n  //     provider1:\n  //        issuer: issuer1\n  //        audiences:\n  //        - audience1\n  //        - audience2\n  //        remote_jwks:\n  //          http_uri:\n  //            uri: https://example.com/.well-known/jwks.json\n  //            cluster: example_jwks_cluster\n  //      provider2:\n  //        issuer: provider2\n  //        local_jwks:\n  //          inline_string: jwks_string\n  //\n  map<string, JwtProvider> providers = 1;\n\n  // Specifies requirements based on the route matches. The first matched requirement will be\n  // applied. If there are overlapped match conditions, please put the most specific match first.\n  //\n  // Examples\n  //\n  // .. code-block:: yaml\n  //\n  //   rules:\n  //     - match:\n  //         prefix: /healthz\n  //     - match:\n  //         prefix: /baz\n  //       requires:\n  //         provider_name: provider1\n  //     - match:\n  //         prefix: /foo\n  //       requires:\n  //         requires_any:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //     - match:\n  //         prefix: /bar\n  //       requires:\n  //         requires_all:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //\n  repeated RequirementRule rules = 2;\n\n  // This message specifies Jwt requirements based on stream_info.filterState.\n  // Other HTTP filters can use it to specify Jwt requirements dynamically.\n  // The *rules* field above is checked first, if it could not find any matches,\n  // check this one.\n  FilterStateRule filter_state_rules = 3;\n\n  // When set to true, bypass the `CORS preflight request\n  // <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight>`_ regardless of JWT\n  // requirements specified in the rules.\n  bool bypass_cors_preflight = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.lua.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.lua.v2\";\noption java_outer_classname = \"LuaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.lua.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Lua]\n// Lua :ref:`configuration overview <config_http_filters_lua>`.\n// [#extension: envoy.filters.http.lua]\n\nmessage Lua {\n  // The Lua code that Envoy will execute. This can be a very small script that\n  // further loads code from disk if desired. Note that if JSON configuration is used, the code must\n  // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line\n  // strings so complex scripts can be easily expressed inline in the configuration.\n  string inline_code = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.on_demand.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.on_demand.v2\";\noption java_outer_classname = \"OnDemandProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.on_demand.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: OnDemand]\n// IP tagging :ref:`configuration overview <config_http_filters_on_demand>`.\n// [#extension: envoy.filters.http.on_demand]\n\nmessage OnDemand {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.original_src.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.original_src.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the request. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\n// [#extension: envoy.filters.http.original_src]\nmessage OriginalSrc {\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.rate_limit.v2;\n\nimport \"envoy/config/ratelimit/v2/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.rate_limit.v2\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_http_filters_rate_limit>`.\n// [#extension: envoy.filters.http.ratelimit]\n\n// [#next-free-field: 8]\nmessage RateLimit {\n  // The rate limit domain to use when calling the rate limit service.\n  string domain = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Specifies the rate limit configurations to be applied with the same\n  // stage number. If not set, the default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The type of requests the filter should apply to. The supported\n  // types are *internal*, *external* or *both*. A request is considered internal if\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is set to true. If\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is not set or false, a\n  // request is considered external. The filter defaults to *both*, and it will apply to all request\n  // types.\n  string request_type = 3\n      [(validate.rules).string = {in: \"internal\" in: \"external\" in: \"both\" in: \"\"}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead\n  // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The\n  // HTTP code will be 200 for a gRPC response.\n  bool rate_limited_as_resource_exhausted = 6;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.rbac.v2;\n\nimport \"envoy/config/rbac/v2/rbac.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.rbac.v2\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.rbac.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_http_filters_rbac>`.\n// [#extension: envoy.filters.http.rbac]\n\n// RBAC filter config.\nmessage RBAC {\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v2.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter (i.e., returning a 403)\n  // but will emit stats and logs and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v2.RBAC shadow_rules = 2;\n}\n\nmessage RBACPerRoute {\n  reserved 1;\n\n  // Override the global configuration of the filter with this new config.\n  // If absent, the global RBAC policy will be disabled for this route.\n  RBAC rbac = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/router/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/router/v2/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.router.v2;\n\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.router.v2\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.router.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Router]\n// Router :ref:`configuration overview <config_http_filters_router>`.\n// [#extension: envoy.filters.http.router]\n\n// [#next-free-field: 7]\nmessage Router {\n  // Whether the router generates dynamic cluster statistics. Defaults to\n  // true. Can be disabled in high performance scenarios.\n  google.protobuf.BoolValue dynamic_stats = 1;\n\n  // Whether to start a child span for egress routed calls. This can be\n  // useful in scenarios where other filters (auth, ratelimit, etc.) make\n  // outbound calls and have child spans rooted at the same ingress\n  // parent. Defaults to false.\n  bool start_child_span = 2;\n\n  // Configuration for HTTP upstream logs emitted by the router. Upstream logs\n  // are configured in the same way as access logs, but each log entry represents\n  // an upstream request. Presuming retries are configured, multiple upstream\n  // requests may be made for each downstream (inbound) request.\n  repeated accesslog.v2.AccessLog upstream_log = 3;\n\n  // Do not add any additional *x-envoy-* headers to requests or responses. This\n  // only affects the :ref:`router filter generated *x-envoy-* headers\n  // <config_http_filters_router_headers_set>`, other Envoy filters and the HTTP\n  // connection manager may continue to set *x-envoy-* headers.\n  bool suppress_envoy_headers = 4;\n\n  // Specifies a list of HTTP headers to strictly validate. Envoy will reject a\n  // request and respond with HTTP status 400 if the request contains an invalid\n  // value for any of the headers listed in this field. Strict header checking\n  // is only supported for the following headers:\n  //\n  // Value must be a ','-delimited list (i.e. no spaces) of supported retry\n  // policy values:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`\n  // * :ref:`config_http_filters_router_x-envoy-retry-on`\n  //\n  // Value must be an integer:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-max-retries`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`\n  repeated string strict_check_headers = 5 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"x-envoy-upstream-rq-timeout-ms\"\n        in: \"x-envoy-upstream-rq-per-try-timeout-ms\"\n        in: \"x-envoy-max-retries\"\n        in: \"x-envoy-retry-grpc-on\"\n        in: \"x-envoy-retry-on\"\n      }\n    }\n  }];\n\n  // If not set, ingress Envoy will ignore\n  // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress\n  // Envoy, when deriving timeout for upstream cluster.\n  bool respect_expected_rq_timeout = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.squash.v2;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.squash.v2\";\noption java_outer_classname = \"SquashProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.squash.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Squash]\n// Squash :ref:`configuration overview <config_http_filters_squash>`.\n// [#extension: envoy.filters.http.squash]\n\n// [#next-free-field: 6]\nmessage Squash {\n  // The name of the cluster that hosts the Squash server.\n  string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // When the filter requests the Squash server to create a DebugAttachment, it will use this\n  // structure as template for the body of the request. It can contain reference to environment\n  // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server\n  // with more information to find the process to attach the debugger to. For example, in a\n  // Istio/k8s environment, this will contain information on the pod:\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"spec\": {\n  //      \"attachment\": {\n  //        \"pod\": \"{{ POD_NAME }}\",\n  //        \"namespace\": \"{{ POD_NAMESPACE }}\"\n  //      },\n  //      \"match_request\": true\n  //    }\n  //  }\n  //\n  // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API)\n  google.protobuf.Struct attachment_template = 2;\n\n  // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second.\n  google.protobuf.Duration request_timeout = 3;\n\n  // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60\n  // seconds.\n  google.protobuf.Duration attachment_timeout = 4;\n\n  // Amount of time to poll for the status of the attachment object in the Squash server\n  // (to check if has been attached). Defaults to 1 second.\n  google.protobuf.Duration attachment_poll_period = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.tap.v2alpha;\n\nimport \"envoy/config/common/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.tap.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.http.tap.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap]\n// Tap :ref:`configuration overview <config_http_filters_tap>`.\n// [#extension: envoy.filters.http.tap]\n\n// Top level configuration for the tap filter.\nmessage Tap {\n  // Common configuration for the HTTP tap filter.\n  common.tap.v2alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.http.transcoder.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.http.transcoder.v2\";\noption java_outer_classname = \"TranscoderProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.http.grpc_json_transcoder.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC-JSON transcoder]\n// gRPC-JSON transcoder :ref:`configuration overview <config_http_filters_grpc_json_transcoder>`.\n// [#extension: envoy.filters.http.grpc_json_transcoder]\n\n// [#next-free-field: 10]\nmessage GrpcJsonTranscoder {\n  message PrintOptions {\n    // Whether to add spaces, line breaks and indentation to make the JSON\n    // output easy to read. Defaults to false.\n    bool add_whitespace = 1;\n\n    // Whether to always print primitive fields. By default primitive\n    // fields with default values will be omitted in JSON output. For\n    // example, an int32 field set to 0 will be omitted. Setting this flag to\n    // true will override the default behavior and print primitive fields\n    // regardless of their values. Defaults to false.\n    bool always_print_primitive_fields = 2;\n\n    // Whether to always print enums as ints. By default they are rendered\n    // as strings. Defaults to false.\n    bool always_print_enums_as_ints = 3;\n\n    // Whether to preserve proto field names. By default protobuf will\n    // generate JSON field names using the ``json_name`` option, or lower camel case,\n    // in that order. Setting this flag will preserve the original field names. Defaults to false.\n    bool preserve_proto_field_names = 4;\n  }\n\n  oneof descriptor_set {\n    option (validate.required) = true;\n\n    // Supplies the filename of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    string proto_descriptor = 1;\n\n    // Supplies the binary content of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    bytes proto_descriptor_bin = 4;\n  }\n\n  // A list of strings that\n  // supplies the fully qualified service names (i.e. \"package_name.service_name\") that\n  // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``,\n  // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than\n  // the service names specified here, but they won't be translated.\n  repeated string services = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // Control options for response JSON. These options are passed directly to\n  // `JsonPrintOptions <https://developers.google.com/protocol-buffers/docs/reference/cpp/\n  // google.protobuf.util.json_util#JsonPrintOptions>`_.\n  PrintOptions print_options = 3;\n\n  // Whether to keep the incoming request route after the outgoing headers have been transformed to\n  // the match the upstream gRPC service. Note: This means that routes for gRPC services that are\n  // not transcoded cannot be used in combination with *match_incoming_request_route*.\n  bool match_incoming_request_route = 5;\n\n  // A list of query parameters to be ignored for transcoding method mapping.\n  // By default, the transcoder filter will not transcode a request if there are any\n  // unknown/invalid query parameters.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {\n  //         option (google.api.http) = {\n  //           get: \"/shelves/{shelf}\"\n  //         };\n  //       }\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable\n  // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow\n  // the same request to be mapped to ``GetShelf``.\n  repeated string ignored_query_parameters = 6;\n\n  // Whether to route methods without the ``google.api.http`` option.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     package bookstore;\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {}\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The client could ``post`` a json body ``{\"shelf\": 1234}`` with the path of\n  // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``.\n  bool auto_mapping = 7;\n\n  // Whether to ignore query parameters that cannot be mapped to a corresponding\n  // protobuf field. Use this if you cannot control the query parameters and do\n  // not know them beforehand. Otherwise use ``ignored_query_parameters``.\n  // Defaults to false.\n  bool ignore_unknown_query_parameters = 8;\n\n  // Whether to convert gRPC status headers to JSON.\n  // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status``\n  // from the ``grpc-status-details-bin`` header and use it as JSON body.\n  // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and\n  // ``grpc-message`` headers.\n  // The error details types must be present in the ``proto_descriptor``.\n  //\n  // For example, if an upstream server replies with headers:\n  //\n  // .. code-block:: none\n  //\n  //     grpc-status: 5\n  //     grpc-status-details-bin:\n  //         CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ\n  //\n  // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message\n  // ``google.rpc.Status``. It will be transcoded into:\n  //\n  // .. code-block:: none\n  //\n  //     HTTP/1.1 404 Not Found\n  //     content-type: application/json\n  //\n  //     {\"code\":5,\"details\":[{\"@type\":\"type.googleapis.com/google.rpc.RequestInfo\",\"requestId\":\"r-1\"}]}\n  //\n  //  In order to transcode the message, the ``google.rpc.RequestInfo`` type from\n  //  the ``google/rpc/error_details.proto`` should be included in the configured\n  //  :ref:`proto descriptor set <config_grpc_json_generate_proto_descriptor_set>`.\n  bool convert_grpc_status = 9;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.http_inspector.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.http_inspector.v2\";\noption java_outer_classname = \"HttpInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.http_inspector.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP Inspector Filter]\n// Detect whether the application protocol is HTTP.\n// [#extension: envoy.filters.listener.http_inspector]\n\nmessage HttpInspector {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.original_dst.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.original_dst.v2\";\noption java_outer_classname = \"OriginalDstProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.original_dst.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Original Dst Filter]\n// Use the Original destination address on downstream connections.\n// [#extension: envoy.filters.listener.original_dst]\n\nmessage OriginalDst {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.original_src.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.original_src.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n// [#extension: envoy.filters.listener.original_src]\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the connection. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\nmessage OriginalSrc {\n  // Whether to bind the port to the one used in the original downstream connection.\n  // [#not-implemented-hide:]\n  bool bind_port = 1;\n\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.proxy_protocol.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.proxy_protocol.v2\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.proxy_protocol.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Proxy Protocol Filter]\n// PROXY protocol listener filter.\n// [#extension: envoy.filters.listener.proxy_protocol]\n\nmessage ProxyProtocol {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.listener.tls_inspector.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.listener.tls_inspector.v2\";\noption java_outer_classname = \"TlsInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.listener.tls_inspector.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: TLS Inspector Filter]\n// Allows detecting whether the transport appears to be TLS or plaintext.\n// [#extension: envoy.filters.listener.tls_inspector]\n\nmessage TlsInspector {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.client_ssl_auth.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2\";\noption java_outer_classname = \"ClientSslAuthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.client_ssl_auth.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Client TLS authentication]\n// Client TLS authentication\n// :ref:`configuration overview <config_network_filters_client_ssl_auth>`.\n// [#extension: envoy.filters.network.client_ssl_auth]\n\nmessage ClientSSLAuth {\n  // The :ref:`cluster manager <arch_overview_cluster_manager>` cluster that runs\n  // the authentication service. The filter will connect to the service every 60s to fetch the list\n  // of principals. The service must support the expected :ref:`REST API\n  // <config_network_filters_client_ssl_auth_rest_api>`.\n  string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_client_ssl_auth_stats>`.\n  string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Time in milliseconds between principal refreshes from the\n  // authentication service. Default is 60000 (60s). The actual fetch time\n  // will be this value plus a random jittered value between\n  // 0-refresh_delay_ms milliseconds.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // An optional list of IP address and subnet masks that should be white\n  // listed for access by the filter. If no list is provided, there is no\n  // IP allowlist.\n  repeated api.v2.core.CidrRange ip_white_list = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.direct_response.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.direct_response.v2\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.direct_response.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Direct response]\n// Direct response :ref:`configuration overview <config_network_filters_direct_response>`.\n// [#extension: envoy.filters.network.direct_response]\n\nmessage Config {\n  // Response data as a data source.\n  api.v2.core.DataSource response = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md",
    "content": "Protocol buffer definitions for the Dubbo proxy.\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.dubbo_proxy.v2alpha1;\n\nimport \"envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1\";\noption java_outer_classname = \"DubboProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.dubbo_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dubbo Proxy]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n// [#extension: envoy.filters.network.dubbo_proxy]\n\n// Dubbo Protocol types supported by Envoy.\nenum ProtocolType {\n  // the default protocol.\n  Dubbo = 0;\n}\n\n// Dubbo Serialization types supported by Envoy.\nenum SerializationType {\n  // the default serialization protocol.\n  Hessian2 = 0;\n}\n\n// [#next-free-field: 6]\nmessage DubboProxy {\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Configure the protocol used.\n  ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Configure the serialization protocol used.\n  SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  repeated RouteConfiguration route_config = 4;\n\n  // A list of individual Dubbo filters that make up the filter chain for requests made to the\n  // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no dubbo_filters are specified, a default Dubbo router filter\n  // (`envoy.filters.dubbo.router`) is used.\n  repeated DubboFilter dubbo_filters = 5;\n}\n\n// DubboFilter configures a Dubbo filter.\nmessage DubboFilter {\n  // The name of the filter to instantiate. The name must match a supported\n  // filter.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.dubbo_proxy.v2alpha1;\n\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/range.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.dubbo_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dubbo Proxy Route Configuration]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n\n// [#next-free-field: 6]\nmessage RouteConfiguration {\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The interface name of the service.\n  string interface = 2;\n\n  // Which group does the interface belong to.\n  string group = 3;\n\n  // The version number of the interface.\n  string version = 4;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 5;\n}\n\nmessage Route {\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  // Method level routing matching.\n  MethodMatch method = 1;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated api.v2.route.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed.\n    string cluster = 1;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    // Currently ClusterWeight only supports the name and weight fields.\n    api.v2.route.WeightedCluster weighted_clusters = 2;\n  }\n}\n\nmessage MethodMatch {\n  // The parameter matching type.\n  message ParameterMatchSpecifier {\n    oneof parameter_match_specifier {\n      // If specified, header match will be performed based on the value of the header.\n      string exact_match = 3;\n\n      // If specified, header match will be performed based on range.\n      // The rule will match if the request header value is within this range.\n      // The entire request header value must represent an integer in base 10 notation: consisting\n      // of an optional plus or minus sign followed by a sequence of digits. The rule will not match\n      // if the header value does not represent an integer. Match will fail for empty values,\n      // floating point numbers or if only a subsequence of the header value is an integer.\n      //\n      // Examples:\n      //\n      // * For range [-10,0), route will match for header value -1, but not for 0,\n      //   \"somestring\", 10.9, \"-1somestring\"\n      type.Int64Range range_match = 4;\n    }\n  }\n\n  // The name of the method.\n  type.matcher.StringMatcher name = 1;\n\n  // Method parameter definition.\n  // The key is the parameter index, starting from 0.\n  // The value is the parameter matching type.\n  map<uint32, ParameterMatchSpecifier> params_match = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.echo.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.echo.v2\";\noption java_outer_classname = \"EchoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.network.echo.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Echo]\n// Echo :ref:`configuration overview <config_network_filters_echo>`.\n// [#extension: envoy.filters.network.echo]\n\nmessage Echo {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.ext_authz.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.ext_authz.v2\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.ext_authz.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Network External Authorization ]\n// The network layer external authorization service configuration\n// :ref:`configuration overview <config_network_filters_ext_authz>`.\n// [#extension: envoy.filters.network.ext_authz]\n\n// External Authorization filter calls out to an external service over the\n// gRPC Authorization API defined by\n// :ref:`CheckRequest <envoy_api_msg_service.auth.v2.CheckRequest>`.\n// A failed check will cause this filter to close the TCP connection.\nmessage ExtAuthz {\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The external authorization gRPC service configuration.\n  // The default timeout is set to 200ms by this filter.\n  api.v2.core.GrpcService grpc_service = 2;\n\n  // The filter's behaviour in case the external authorization service does\n  // not respond back. When it is set to true, Envoy will also allow traffic in case of\n  // communication failure between authorization service and the proxy.\n  // Defaults to false.\n  bool failure_mode_allow = 3;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v2.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.http_connection_manager.v2;\n\nimport \"envoy/api/v2/core/config_source.proto\";\nimport \"envoy/api/v2/core/protocol.proto\";\nimport \"envoy/api/v2/route.proto\";\nimport \"envoy/api/v2/scoped_route.proto\";\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\nimport \"envoy/config/trace/v2/http_tracer.proto\";\nimport \"envoy/type/percent.proto\";\nimport \"envoy/type/tracing/v2/custom_tag.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2\";\noption java_outer_classname = \"HttpConnectionManagerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.http_connection_manager.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP connection manager]\n// HTTP connection manager :ref:`configuration overview <config_http_conn_man>`.\n// [#extension: envoy.filters.network.http_connection_manager]\n\n// [#next-free-field: 37]\nmessage HttpConnectionManager {\n  enum CodecType {\n    // For every new connection, the connection manager will determine which\n    // codec to use. This mode supports both ALPN for TLS listeners as well as\n    // protocol inference for plaintext listeners. If ALPN data is available, it\n    // is preferred, otherwise protocol inference is used. In almost all cases,\n    // this is the right option to choose for this setting.\n    AUTO = 0;\n\n    // The connection manager will assume that the client is speaking HTTP/1.1.\n    HTTP1 = 1;\n\n    // The connection manager will assume that the client is speaking HTTP/2\n    // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN.\n    // Prior knowledge is allowed).\n    HTTP2 = 2;\n\n    // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n    // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n    // to distinguish HTTP1 and HTTP2 traffic.\n    HTTP3 = 3;\n  }\n\n  enum ServerHeaderTransformation {\n    // Overwrite any Server header with the contents of server_name.\n    OVERWRITE = 0;\n\n    // If no Server header is present, append Server server_name\n    // If a Server header is present, pass it through.\n    APPEND_IF_ABSENT = 1;\n\n    // Pass through the value of the server header, and do not append a header\n    // if none is present.\n    PASS_THROUGH = 2;\n  }\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  enum ForwardClientCertDetails {\n    // Do not send the XFCC header to the next hop. This is the default value.\n    SANITIZE = 0;\n\n    // When the client connection is mTLS (Mutual TLS), forward the XFCC header\n    // in the request.\n    FORWARD_ONLY = 1;\n\n    // When the client connection is mTLS, append the client certificate\n    // information to the request’s XFCC header and forward it.\n    APPEND_FORWARD = 2;\n\n    // When the client connection is mTLS, reset the XFCC header with the client\n    // certificate information and send it to the next hop.\n    SANITIZE_SET = 3;\n\n    // Always forward the XFCC header in the request, regardless of whether the\n    // client connection is mTLS.\n    ALWAYS_FORWARD_ONLY = 4;\n  }\n\n  // [#next-free-field: 10]\n  message Tracing {\n    enum OperationName {\n      // The HTTP listener is used for ingress/incoming requests.\n      INGRESS = 0;\n\n      // The HTTP listener is used for egress/outgoing requests.\n      EGRESS = 1;\n    }\n\n    // The span name will be derived from this field. If\n    // :ref:`traffic_direction <envoy_api_field_Listener.traffic_direction>` is\n    // specified on the parent listener, then it is used instead of this field.\n    //\n    // .. attention::\n    //  This field has been deprecated in favor of `traffic_direction`.\n    OperationName operation_name = 1 [\n      deprecated = true,\n      (validate.rules).enum = {defined_only: true},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // A list of header names used to create tags for the active span. The header name is used to\n    // populate the tag name, and the header value is used to populate the tag value. The tag is\n    // created if the specified header name is present in the request's headers.\n    //\n    // .. attention::\n    //  This field has been deprecated in favor of :ref:`custom_tags\n    //  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>`.\n    repeated string request_headers_for_tags = 2 [deprecated = true];\n\n    // Target percentage of requests managed by this HTTP connection manager that will be force\n    // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n    // header is set. This field is a direct analog for the runtime variable\n    // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n    // <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.Percent client_sampling = 3;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be randomly\n    // selected for trace generation, if not requested by the client or not forced. This field is\n    // a direct analog for the runtime variable 'tracing.random_sampling' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.Percent random_sampling = 4;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be traced\n    // after all other sampling checks have been applied (client-directed, force tracing, random\n    // sampling). This field functions as an upper limit on the total configured sampling rate. For\n    // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n    // of client requests with the appropriate headers to be force traced. This field is a direct\n    // analog for the runtime variable 'tracing.global_enabled' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.Percent overall_sampling = 5;\n\n    // Whether to annotate spans with additional data. If true, spans will include logs for stream\n    // events.\n    bool verbose = 6;\n\n    // Maximum length of the request path to extract and include in the HttpUrl tag. Used to\n    // truncate lengthy request paths to meet the needs of a tracing backend.\n    // Default: 256\n    google.protobuf.UInt32Value max_path_tag_length = 7;\n\n    // A list of custom tags with unique tag name to create tags for the active span.\n    repeated type.tracing.v2.CustomTag custom_tags = 8;\n\n    // Configuration for an external tracing provider.\n    // If not specified, no tracing will be performed.\n    //\n    // .. attention::\n    //   Please be aware that *envoy.tracers.opencensus* provider can only be configured once\n    //   in Envoy lifetime.\n    //   Any attempts to reconfigure it or to use different configurations for different HCM filters\n    //   will be rejected.\n    //   Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes\n    //   on OpenCensus side.\n    trace.v2.Tracing.Http provider = 9;\n  }\n\n  message InternalAddressConfig {\n    // Whether unix socket addresses should be considered internal.\n    bool unix_sockets = 1;\n  }\n\n  // [#next-free-field: 7]\n  message SetCurrentClientCertDetails {\n    reserved 2;\n\n    // Whether to forward the subject of the client cert. Defaults to false.\n    google.protobuf.BoolValue subject = 1;\n\n    // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the\n    // XFCC header comma separated from other values with the value Cert=\"PEM\".\n    // Defaults to false.\n    bool cert = 3;\n\n    // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM\n    // format. This will appear in the XFCC header comma separated from other values with the value\n    // Chain=\"PEM\".\n    // Defaults to false.\n    bool chain = 6;\n\n    // Whether to forward the DNS type Subject Alternative Names of the client cert.\n    // Defaults to false.\n    bool dns = 4;\n\n    // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to\n    // false.\n    bool uri = 5;\n  }\n\n  // The configuration for HTTP upgrades.\n  // For each upgrade type desired, an UpgradeConfig must be added.\n  //\n  // .. warning::\n  //\n  //    The current implementation of upgrade headers does not handle\n  //    multi-valued upgrade headers. Support for multi-valued headers may be\n  //    added in the future if needed.\n  //\n  // .. warning::\n  //    The current implementation of upgrade headers does not work with HTTP/2\n  //    upstreams.\n  message UpgradeConfig {\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type]\n    // will be proxied upstream.\n    string upgrade_type = 1;\n\n    // If present, this represents the filter chain which will be created for\n    // this type of upgrade. If no filters are present, the filter chain for\n    // HTTP connections will be used for this upgrade type.\n    repeated HttpFilter filters = 2;\n\n    // Determines if upgrades are enabled or disabled by default. Defaults to true.\n    // This can be overridden on a per-route basis with :ref:`cluster\n    // <envoy_api_field_route.RouteAction.upgrade_configs>` as documented in the\n    // :ref:`upgrade documentation <arch_overview_upgrades>`.\n    google.protobuf.BoolValue enabled = 3;\n  }\n\n  reserved 27;\n\n  // Supplies the type of codec that the connection manager should use.\n  CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics for the\n  // connection manager. See the :ref:`statistics documentation <config_http_conn_man_stats>` for\n  // more information.\n  string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The connection manager’s route table will be dynamically loaded via the RDS API.\n    Rds rds = 3;\n\n    // The route table for the connection manager is static and is specified in this property.\n    api.v2.RouteConfiguration route_config = 4;\n\n    // A route table will be dynamically assigned to each request based on request attributes\n    // (e.g., the value of a header). The \"routing scopes\" (i.e., route tables) and \"scope keys\" are\n    // specified in this message.\n    ScopedRoutes scoped_routes = 31;\n  }\n\n  // A list of individual HTTP filters that make up the filter chain for\n  // requests made to the connection manager. :ref:`Order matters <arch_overview_http_filters_ordering>`\n  // as the filters are processed sequentially as request events happen.\n  repeated HttpFilter http_filters = 5;\n\n  // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent`\n  // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked\n  // documentation for more information. Defaults to false.\n  google.protobuf.BoolValue add_user_agent = 6;\n\n  // Presence of the object defines whether the connection manager\n  // emits :ref:`tracing <arch_overview_tracing>` data to the :ref:`configured tracing provider\n  // <envoy_api_msg_config.trace.v2.Tracing>`.\n  Tracing tracing = 7;\n\n  // Additional settings for HTTP requests handled by the connection manager. These will be\n  // applicable to both HTTP1 and HTTP2 requests.\n  api.v2.core.HttpProtocolOptions common_http_protocol_options = 35;\n\n  // Additional HTTP/1 settings that are passed to the HTTP/1 codec.\n  api.v2.core.Http1ProtocolOptions http_protocol_options = 8;\n\n  // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec.\n  api.v2.core.Http2ProtocolOptions http2_protocol_options = 9;\n\n  // An optional override that the connection manager will write to the server\n  // header in responses. If not set, the default is *envoy*.\n  string server_name = 10;\n\n  // Defines the action to be applied to the Server header on the response path.\n  // By default, Envoy will overwrite the header with the value specified in\n  // server_name.\n  ServerHeaderTransformation server_header_transformation = 34\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The maximum request headers size for incoming connections.\n  // If unconfigured, the default max request headers allowed is 60 KiB.\n  // Requests that exceed this limit will receive a 431 response.\n  // The max configurable limit is 96 KiB, based on current implementation\n  // constraints.\n  google.protobuf.UInt32Value max_request_headers_kb = 29\n      [(validate.rules).uint32 = {lte: 96 gt: 0}];\n\n  // The idle timeout for connections managed by the connection manager. The\n  // idle timeout is defined as the period in which there are no active\n  // requests. If not set, there is no idle timeout. When the idle timeout is\n  // reached the connection will be closed. If the connection is an HTTP/2\n  // connection a drain sequence will occur prior to closing the connection.\n  // This field is deprecated. Use :ref:`idle_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.common_http_protocol_options>`\n  // instead.\n  google.protobuf.Duration idle_timeout = 11\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // The stream idle timeout for connections managed by the connection manager.\n  // If not specified, this defaults to 5 minutes. The default value was selected\n  // so as not to interfere with any smaller configured timeouts that may have\n  // existed in configurations prior to the introduction of this feature, while\n  // introducing robustness to TCP connections that terminate without a FIN.\n  //\n  // This idle timeout applies to new streams and is overridable by the\n  // :ref:`route-level idle_timeout\n  // <envoy_api_field_route.RouteAction.idle_timeout>`. Even on a stream in\n  // which the override applies, prior to receipt of the initial request\n  // headers, the :ref:`stream_idle_timeout\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.stream_idle_timeout>`\n  // applies. Each time an encode/decode event for headers or data is processed\n  // for the stream, the timer will be reset. If the timeout fires, the stream\n  // is terminated with a 408 Request Timeout error code if no upstream response\n  // header has been received, otherwise a stream reset occurs.\n  //\n  // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough\n  // window to write any remaining stream data once the entirety of stream data (local end stream is\n  // true) has been buffered pending available window. In other words, this timeout defends against\n  // a peer that does not release enough window to completely write the stream, even though all\n  // data has been proxied within available flow control windows. If the timeout is hit in this\n  // case, the :ref:`tx_flush_timeout <config_http_conn_man_stats_per_codec>` counter will be\n  // incremented. Note that :ref:`max_stream_duration\n  // <envoy_api_field_core.HttpProtocolOptions.max_stream_duration>` does not apply to this corner\n  // case.\n  //\n  // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due\n  // to the granularity of events presented to the connection manager. For example, while receiving\n  // very large request headers, it may be the case that there is traffic regularly arriving on the\n  // wire while the connection manage is only able to observe the end-of-headers event, hence the\n  // stream may still idle timeout.\n  //\n  // A value of 0 will completely disable the connection manager stream idle\n  // timeout, although per-route idle timeout overrides will continue to apply.\n  google.protobuf.Duration stream_idle_timeout = 24;\n\n  // The amount of time that Envoy will wait for the entire request to be received.\n  // The timer is activated when the request is initiated, and is disarmed when the last byte of the\n  // request is sent upstream (i.e. all decoding filters have processed the request), OR when the\n  // response is initiated. If not specified or set to 0, this timeout is disabled.\n  google.protobuf.Duration request_timeout = 28;\n\n  // The time that Envoy will wait between sending an HTTP/2 “shutdown\n  // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame.\n  // This is used so that Envoy provides a grace period for new streams that\n  // race with the final GOAWAY frame. During this grace period, Envoy will\n  // continue to accept new streams. After the grace period, a final GOAWAY\n  // frame is sent and Envoy will start refusing new streams. Draining occurs\n  // both when a connection hits the idle timeout or during general server\n  // draining. The default grace period is 5000 milliseconds (5 seconds) if this\n  // option is not specified.\n  google.protobuf.Duration drain_timeout = 12;\n\n  // The delayed close timeout is for downstream connections managed by the HTTP connection manager.\n  // It is defined as a grace period after connection close processing has been locally initiated\n  // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy\n  // from the downstream connection) prior to Envoy closing the socket associated with that\n  // connection.\n  // NOTE: This timeout is enforced even when the socket associated with the downstream connection\n  // is pending a flush of the write buffer. However, any progress made writing data to the socket\n  // will restart the timer associated with this timeout. This means that the total grace period for\n  // a socket in this state will be\n  // <total_time_waiting_for_write_buffer_flushes>+<delayed_close_timeout>.\n  //\n  // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close\n  // sequence mitigates a race condition that exists when downstream clients do not drain/process\n  // data in a connection's receive buffer after a remote close has been detected via a socket\n  // write(). This race leads to such clients failing to process the response code sent by Envoy,\n  // which could result in erroneous downstream processing.\n  //\n  // If the timeout triggers, Envoy will close the connection's socket.\n  //\n  // The default timeout is 1000 ms if this option is not specified.\n  //\n  // .. NOTE::\n  //    To be useful in avoiding the race condition described above, this timeout must be set\n  //    to *at least* <max round trip time expected between clients and Envoy>+<100ms to account for\n  //    a reasonable \"worst\" case processing time for a full iteration of Envoy's event loop>.\n  //\n  // .. WARNING::\n  //    A value of 0 will completely disable delayed close processing. When disabled, the downstream\n  //    connection's socket will be closed immediately after the write flush is completed or will\n  //    never close if the write flush does not complete.\n  google.protobuf.Duration delayed_close_timeout = 26;\n\n  // Configuration for :ref:`HTTP access logs <arch_overview_access_logs>`\n  // emitted by the connection manager.\n  repeated accesslog.v2.AccessLog access_log = 13;\n\n  // If set to true, the connection manager will use the real remote address\n  // of the client connection when determining internal versus external origin and manipulating\n  // various headers. If set to false or absent, the connection manager will use the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for`,\n  // :ref:`config_http_conn_man_headers_x-envoy-internal`, and\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information.\n  google.protobuf.BoolValue use_remote_address = 14;\n\n  // The number of additional ingress proxy hops from the right side of the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when\n  // determining the origin client's IP address. The default is zero if this option\n  // is not specified. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information.\n  uint32 xff_num_trusted_hops = 19;\n\n  // Configures what network addresses are considered internal for stats and header sanitation\n  // purposes. If unspecified, only RFC1918 IP addresses will be considered internal.\n  // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information about internal/external addresses.\n  InternalAddressConfig internal_address_config = 25;\n\n  // If set, Envoy will not append the remote address to the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in\n  // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager\n  // has mutated the request headers. While :ref:`use_remote_address\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.use_remote_address>`\n  // will also suppress XFF addition, it has consequences for logging and other\n  // Envoy uses of the remote address, so *skip_xff_append* should be used\n  // when only an elision of XFF addition is intended.\n  bool skip_xff_append = 21;\n\n  // Via header value to append to request and response headers. If this is\n  // empty, no via header will be appended.\n  string via = 22;\n\n  // Whether the connection manager will generate the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to\n  // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature\n  // is not desired it can be disabled.\n  google.protobuf.BoolValue generate_request_id = 15;\n\n  // Whether the connection manager will keep the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if passed for a request that is edge\n  // (Edge request is the request from external clients to front Envoy) and not reset it, which\n  // is the current Envoy behaviour. This defaults to false.\n  bool preserve_external_request_id = 32;\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  ForwardClientCertDetails forward_client_cert_details = 16\n      [(validate.rules).enum = {defined_only: true}];\n\n  // This field is valid only when :ref:`forward_client_cert_details\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.forward_client_cert_details>`\n  // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in\n  // the client certificate to be forwarded. Note that in the\n  // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and\n  // *By* is always set when the client certificate presents the URI type Subject Alternative Name\n  // value.\n  SetCurrentClientCertDetails set_current_client_cert_details = 17;\n\n  // If proxy_100_continue is true, Envoy will proxy incoming \"Expect:\n  // 100-continue\" headers upstream, and forward \"100 Continue\" responses\n  // downstream. If this is false or not set, Envoy will instead strip the\n  // \"Expect: 100-continue\" header, and send a \"100 Continue\" response itself.\n  bool proxy_100_continue = 18;\n\n  // If\n  // :ref:`use_remote_address\n  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.use_remote_address>`\n  // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is\n  // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*.\n  // This is useful for testing compatibility of upstream services that parse the header value. For\n  // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses\n  // <https://tools.ietf.org/html/rfc4291#section-2.5.5.2>`_ for details. This will also affect the\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See\n  // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6\n  // <config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6>` for runtime\n  // control.\n  // [#not-implemented-hide:]\n  bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20;\n\n  repeated UpgradeConfig upgrade_configs = 23;\n\n  // Should paths be normalized according to RFC 3986 before any processing of\n  // requests by HTTP filters or routing? This affects the upstream *:path* header\n  // as well. For paths that fail this check, Envoy will respond with 400 to\n  // paths that are malformed. This defaults to false currently but will default\n  // true in the future. When not specified, this value may be overridden by the\n  // runtime variable\n  // :ref:`http_connection_manager.normalize_path<config_http_conn_man_runtime_normalize_path>`.\n  // See `Normalization and Comparison <https://tools.ietf.org/html/rfc3986#section-6>`_\n  // for details of normalization.\n  // Note that Envoy does not perform\n  // `case normalization <https://tools.ietf.org/html/rfc3986#section-6.2.2.1>`_\n  google.protobuf.BoolValue normalize_path = 30;\n\n  // Determines if adjacent slashes in the path are merged into one before any processing of\n  // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without\n  // setting this option, incoming requests with path `//dir///file` will not match against route\n  // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of\n  // `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool merge_slashes = 33;\n\n  // The configuration of the request ID extension. This includes operations such as\n  // generation, validation, and associated tracing operations.\n  //\n  // If not set, Envoy uses the default UUID-based behavior:\n  //\n  // 1. Request ID is propagated using *x-request-id* header.\n  //\n  // 2. Request ID is a universally unique identifier (UUID).\n  //\n  // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID.\n  RequestIDExtension request_id_extension = 36;\n}\n\nmessage Rds {\n  // Configuration source specifier for RDS.\n  api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n  // The name of the route configuration. This name will be passed to the RDS\n  // API. This allows an Envoy configuration with multiple HTTP listeners (and\n  // associated HTTP connection manager filters) to use different route\n  // configurations.\n  string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// This message is used to work around the limitations with 'oneof' and repeated fields.\nmessage ScopedRouteConfigurationsList {\n  repeated api.v2.ScopedRouteConfiguration scoped_route_configurations = 1\n      [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#next-free-field: 6]\nmessage ScopedRoutes {\n  // Specifies the mechanism for constructing \"scope keys\" based on HTTP request attributes. These\n  // keys are matched against a set of :ref:`Key<envoy_api_msg_ScopedRouteConfiguration.Key>`\n  // objects assembled from :ref:`ScopedRouteConfiguration<envoy_api_msg_ScopedRouteConfiguration>`\n  // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via\n  // :ref:`scoped_route_configurations_list<envoy_api_field_config.filter.network.http_connection_manager.v2.ScopedRoutes.scoped_route_configurations_list>`.\n  //\n  // Upon receiving a request's headers, the Router will build a key using the algorithm specified\n  // by this message. This key will be used to look up the routing table (i.e., the\n  // :ref:`RouteConfiguration<envoy_api_msg_RouteConfiguration>`) to use for the request.\n  message ScopeKeyBuilder {\n    // Specifies the mechanism for constructing key fragments which are composed into scope keys.\n    message FragmentBuilder {\n      // Specifies how the value of a header should be extracted.\n      // The following example maps the structure of a header to the fields in this message.\n      //\n      // .. code::\n      //\n      //              <0> <1>   <-- index\n      //    X-Header: a=b;c=d\n      //    |         || |\n      //    |         || \\----> <element_separator>\n      //    |         ||\n      //    |         |\\----> <element.separator>\n      //    |         |\n      //    |         \\----> <element.key>\n      //    |\n      //    \\----> <name>\n      //\n      //    Each 'a=b' key-value pair constitutes an 'element' of the header field.\n      message HeaderValueExtractor {\n        // Specifies a header field's key value pair to match on.\n        message KvElement {\n          // The separator between key and value (e.g., '=' separates 'k=v;...').\n          // If an element is an empty string, the element is ignored.\n          // If an element contains no separator, the whole element is parsed as key and the\n          // fragment value is an empty string.\n          // If there are multiple values for a matched key, the first value is returned.\n          string separator = 1 [(validate.rules).string = {min_bytes: 1}];\n\n          // The key to match on.\n          string key = 2 [(validate.rules).string = {min_bytes: 1}];\n        }\n\n        // The name of the header field to extract the value from.\n        string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n        // The element separator (e.g., ';' separates 'a;b;c;d').\n        // Default: empty string. This causes the entirety of the header field to be extracted.\n        // If this field is set to an empty string and 'index' is used in the oneof below, 'index'\n        // must be set to 0.\n        string element_separator = 2;\n\n        oneof extract_type {\n          // Specifies the zero based index of the element to extract.\n          // Note Envoy concatenates multiple values of the same header key into a comma separated\n          // string, the splitting always happens after the concatenation.\n          uint32 index = 3;\n\n          // Specifies the key value pair to extract the value from.\n          KvElement element = 4;\n        }\n      }\n\n      oneof type {\n        option (validate.required) = true;\n\n        // Specifies how a header field's value should be extracted.\n        HeaderValueExtractor header_value_extractor = 1;\n      }\n    }\n\n    // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the\n    // fragments of a :ref:`ScopedRouteConfiguration<envoy_api_msg_ScopedRouteConfiguration>`.\n    // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key.\n    repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the scoped routing configuration.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The algorithm to use for constructing a scope key for each request.\n  ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];\n\n  // Configuration source specifier for RDS.\n  // This config source is used to subscribe to RouteConfiguration resources specified in\n  // ScopedRouteConfiguration messages.\n  api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}];\n\n  oneof config_specifier {\n    option (validate.required) = true;\n\n    // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by\n    // matching a key constructed from the request's attributes according to the algorithm specified\n    // by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRouteConfigurationsList scoped_route_configurations_list = 4;\n\n    // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS\n    // API. A scope is assigned to a request by matching a key constructed from the request's\n    // attributes according to the algorithm specified by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRds scoped_rds = 5;\n  }\n}\n\nmessage ScopedRds {\n  // Configuration source specifier for scoped RDS.\n  api.v2.core.ConfigSource scoped_rds_config_source = 1\n      [(validate.rules).message = {required: true}];\n}\n\nmessage HttpFilter {\n  reserved 3;\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_http_filters>`.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\nmessage RequestIDExtension {\n  // Request ID extension specific configuration.\n  google.protobuf.Any typed_config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.kafka_broker.v2alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.kafka_broker.v2alpha1\";\noption java_outer_classname = \"KafkaBrokerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.kafka_broker.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Kafka Broker]\n// Kafka Broker :ref:`configuration overview <config_network_filters_kafka_broker>`.\n// [#extension: envoy.filters.network.kafka_broker]\n\nmessage KafkaBroker {\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_kafka_broker_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.local_rate_limit.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/token_bucket.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.local_rate_limit.v2alpha\";\noption java_outer_classname = \"LocalRateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.local_ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Local rate limit]\n// Local rate limit :ref:`configuration overview <config_network_filters_local_rate_limit>`.\n// [#extension: envoy.filters.network.local_ratelimit]\n\nmessage LocalRateLimit {\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_local_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The token bucket configuration to use for rate limiting connections that are processed by the\n  // filter's filter chain. Each incoming connection processed by the filter consumes a single\n  // token. If the token is available, the connection will be allowed. If no tokens are available,\n  // the connection will be immediately closed.\n  //\n  // .. note::\n  //   In the current implementation each filter and filter chain has an independent rate limit.\n  //\n  // .. note::\n  //   In the current implementation the token bucket's :ref:`fill_interval\n  //   <envoy_api_field_type.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive\n  //   refills.\n  type.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}];\n\n  // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults\n  // to enabled.\n  api.v2.core.RuntimeFeatureFlag runtime_enabled = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.mongo_proxy.v2;\n\nimport \"envoy/config/filter/fault/v2/fault.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2\";\noption java_outer_classname = \"MongoProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.mongo_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Mongo proxy]\n// MongoDB :ref:`configuration overview <config_network_filters_mongo_proxy>`.\n// [#extension: envoy.filters.network.mongo_proxy]\n\nmessage MongoProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mongo_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The optional path to use for writing Mongo access logs. If not access log\n  // path is specified no access logs will be written. Note that access log is\n  // also gated :ref:`runtime <config_network_filters_mongo_proxy_runtime>`.\n  string access_log = 2;\n\n  // Inject a fixed delay before proxying a Mongo operation. Delays are\n  // applied to the following MongoDB operations: Query, Insert, GetMore,\n  // and KillCursors. Once an active delay is in progress, all incoming\n  // data up until the timer event fires will be a part of the delay.\n  fault.v2.FaultDelay delay = 3;\n\n  // Flag to specify whether :ref:`dynamic metadata\n  // <config_network_filters_mongo_proxy_dynamic_metadata>` should be emitted. Defaults to false.\n  bool emit_dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.mysql_proxy.v1alpha1;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1\";\noption java_outer_classname = \"MysqlProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.mysql_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: MySQL proxy]\n// MySQL Proxy :ref:`configuration overview <config_network_filters_mysql_proxy>`.\n// [#extension: envoy.filters.network.mysql_proxy]\n\nmessage MySQLProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mysql_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing MySQL access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.rate_limit.v2;\n\nimport \"envoy/api/v2/ratelimit/ratelimit.proto\";\nimport \"envoy/config/ratelimit/v2/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.rate_limit.v2\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_network_filters_rate_limit>`.\n// [#extension: envoy.filters.network.ratelimit]\n\n// [#next-free-field: 7]\nmessage RateLimit {\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // The rate limit descriptor list to use in the rate limit service request.\n  repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 3\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.rbac.v2;\n\nimport \"envoy/config/rbac/v2/rbac.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.rbac.v2\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.filters.network.rbac.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_network_filters_rbac>`.\n// [#extension: envoy.filters.network.rbac]\n\n// RBAC network filter config.\n//\n// Header should not be used in rules/shadow_rules in RBAC network filter as\n// this information is only available in :ref:`RBAC http filter <config_http_filters_rbac>`.\nmessage RBAC {\n  enum EnforcementType {\n    // Apply RBAC policies when the first byte of data arrives on the connection.\n    ONE_TIME_ON_FIRST_BYTE = 0;\n\n    // Continuously apply RBAC policies as data arrives. Use this mode when\n    // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka,\n    // etc. when the protocol decoders emit dynamic metadata such as the\n    // resources being accessed and the operations on the resources.\n    CONTINUOUS = 1;\n  }\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v2.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter but will emit stats and logs\n  // and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v2.RBAC shadow_rules = 2;\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}];\n\n  // RBAC enforcement strategy. By default RBAC will be enforced only once\n  // when the first byte of data arrives from the downstream. When used in\n  // conjunction with filters that emit dynamic metadata after decoding\n  // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to\n  // CONTINUOUS to enforce RBAC policies on every message boundary.\n  EnforcementType enforcement_type = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.redis_proxy.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.redis_proxy.v2\";\noption java_outer_classname = \"RedisProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.redis_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Redis Proxy]\n// Redis Proxy :ref:`configuration overview <config_network_filters_redis_proxy>`.\n// [#extension: envoy.filters.network.redis_proxy]\n\n// [#next-free-field: 7]\nmessage RedisProxy {\n  // Redis connection pool settings.\n  // [#next-free-field: 9]\n  message ConnPoolSettings {\n    // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently\n    // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data\n    // because replication is asynchronous and requires some delay. You need to ensure that your\n    // application can tolerate stale data.\n    enum ReadPolicy {\n      // Default mode. Read from the current primary node.\n      MASTER = 0;\n\n      // Read from the primary, but if it is unavailable, read from replica nodes.\n      PREFER_MASTER = 1;\n\n      // Read from replica nodes. If multiple replica nodes are present within a shard, a random\n      // node is selected. Healthy nodes have precedent over unhealthy nodes.\n      REPLICA = 2;\n\n      // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not\n      // present or unhealthy), read from the primary.\n      PREFER_REPLICA = 3;\n\n      // Read from any node of the cluster. A random node is selected among the primary and\n      // replicas, healthy nodes have precedent over unhealthy nodes.\n      ANY = 4;\n    }\n\n    // Per-operation timeout in milliseconds. The timer starts when the first\n    // command of a pipeline is written to the backend connection. Each response received from Redis\n    // resets the timer since it signifies that the next command is being processed by the backend.\n    // The only exception to this behavior is when a connection to a backend is not yet established.\n    // In that case, the connect timeout on the cluster will govern the timeout until the connection\n    // is ready.\n    google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}];\n\n    // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be\n    // forwarded to the same upstream. The hash key used for determining the upstream in a\n    // consistent hash ring configuration will be computed from the hash tagged key instead of the\n    // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster\n    // implementation <https://redis.io/topics/cluster-spec#keys-hash-tags>`_.\n    //\n    // Examples:\n    //\n    // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream\n    // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream\n    bool enable_hashtagging = 2;\n\n    // Accept `moved and ask redirection\n    // <https://redis.io/topics/cluster-spec#redirection-and-resharding>`_ errors from upstream\n    // redis servers, and retry commands to the specified target server. The target server does not\n    // need to be known to the cluster manager. If the command cannot be redirected, then the\n    // original error is passed downstream unchanged. By default, this support is not enabled.\n    bool enable_redirection = 3;\n\n    // Maximum size of encoded request buffer before flush is triggered and encoded requests\n    // are sent upstream. If this is unset, the buffer flushes whenever it receives data\n    // and performs no batching.\n    // This feature makes it possible for multiple clients to send requests to Envoy and have\n    // them batched- for example if one is running several worker processes, each with its own\n    // Redis connection. There is no benefit to using this with a single downstream process.\n    // Recommended size (if enabled) is 1024 bytes.\n    uint32 max_buffer_size_before_flush = 4;\n\n    // The encoded request buffer is flushed N milliseconds after the first request has been\n    // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`.\n    // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise,\n    // the timer should be set according to the number of clients, overall request rate and\n    // desired maximum latency for a single command. For example, if there are many requests\n    // being batched together at a high rate, the buffer will likely be filled before the timer\n    // fires. Alternatively, if the request rate is lower the buffer will not be filled as often\n    // before the timer fires.\n    // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter\n    // defaults to 3ms.\n    google.protobuf.Duration buffer_flush_timeout = 5;\n\n    // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts\n    // can be created at any given time by any given worker thread (see `enable_redirection` for\n    // more details). If the host is unknown and a connection cannot be created due to enforcing\n    // this limit, then redirection will fail and the original redirection error will be passed\n    // downstream unchanged. This limit defaults to 100.\n    google.protobuf.UInt32Value max_upstream_unknown_connections = 6;\n\n    // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate\n    // count.\n    bool enable_command_stats = 8;\n\n    // Read policy. The default is to read from the primary.\n    ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  message PrefixRoutes {\n    message Route {\n      // The router is capable of shadowing traffic from one cluster to another. The current\n      // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n      // respond before returning the response from the primary cluster. All normal statistics are\n      // collected for the shadow cluster making this feature useful for testing.\n      message RequestMirrorPolicy {\n        // Specifies the cluster that requests will be mirrored to. The cluster must\n        // exist in the cluster manager configuration.\n        string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n        // If not specified or the runtime key is not present, all requests to the target cluster\n        // will be mirrored.\n        //\n        // If specified, Envoy will lookup the runtime key to get the percentage of requests to the\n        // mirror.\n        api.v2.core.RuntimeFractionalPercent runtime_fraction = 2;\n\n        // Set this to TRUE to only mirror write commands, this is effectively replicating the\n        // writes in a \"fire and forget\" manner.\n        bool exclude_read_commands = 3;\n      }\n\n      // String prefix that must match the beginning of the keys. Envoy will always favor the\n      // longest match.\n      string prefix = 1;\n\n      // Indicates if the prefix needs to be removed from the key when forwarded.\n      bool remove_prefix = 2;\n\n      // Upstream cluster to forward the command to.\n      string cluster = 3 [(validate.rules).string = {min_bytes: 1}];\n\n      // Indicates that the route has a request mirroring policy.\n      repeated RequestMirrorPolicy request_mirror_policy = 4;\n    }\n\n    // List of prefix routes.\n    repeated Route routes = 1;\n\n    // Indicates that prefix matching should be case insensitive.\n    bool case_insensitive = 2;\n\n    // Optional catch-all route to forward commands that doesn't match any of the routes. The\n    // catch-all route becomes required when no routes are specified.\n    // .. attention::\n    //\n    //   This field is deprecated. Use a :ref:`catch_all\n    //   route<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>`\n    //   instead.\n    string catch_all_cluster = 3\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n    // Optional catch-all route to forward commands that doesn't match any of the routes. The\n    // catch-all route becomes required when no routes are specified.\n    Route catch_all_route = 4;\n  }\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_redis_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Name of cluster from cluster manager. See the :ref:`configuration section\n  // <arch_overview_redis_configuration>` of the architecture overview for recommendations on\n  // configuring the backing cluster.\n  //\n  // .. attention::\n  //\n  //   This field is deprecated. Use a :ref:`catch_all\n  //   route<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>`\n  //   instead.\n  string cluster = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  // Network settings for the connection pool to the upstream clusters.\n  ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}];\n\n  // Indicates that latency stat should be computed in microseconds. By default it is computed in\n  // milliseconds.\n  bool latency_in_micros = 4;\n\n  // List of **unique** prefixes used to separate keys from different workloads to different\n  // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all\n  // cluster can be used to forward commands when there is no match. Time complexity of the\n  // lookups are in O(min(longest key prefix, key length)).\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    prefix_routes:\n  //      routes:\n  //        - prefix: \"ab\"\n  //          cluster: \"cluster_a\"\n  //        - prefix: \"abc\"\n  //          cluster: \"cluster_b\"\n  //\n  // When using the above routes, the following prefixes would be sent to:\n  //\n  // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b.\n  // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a.\n  // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all\n  //   route<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_route>`\n  //   would have retrieved the key from that cluster instead.\n  //\n  // See the :ref:`configuration section\n  // <arch_overview_redis_configuration>` of the architecture overview for recommendations on\n  // configuring the backing clusters.\n  PrefixRoutes prefix_routes = 5;\n\n  // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis\n  // AUTH command <https://redis.io/commands/auth>`_ with this password before enabling any other\n  // command. If an AUTH command's password matches this password, an \"OK\" response will be returned\n  // to the client. If the AUTH command password does not match this password, then an \"ERR invalid\n  // password\" error will be returned. If any other command is received before AUTH when this\n  // password is set, then a \"NOAUTH Authentication required.\" error response will be sent to the\n  // client. If an AUTH command is received when the password is not set, then an \"ERR Client sent\n  // AUTH, but no password is set\" error will be returned.\n  api.v2.core.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true];\n}\n\n// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in\n// :ref:`typed_extension_protocol_options<envoy_api_field_Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.redis_proxy`.\nmessage RedisProtocolOptions {\n  // Upstream server password as defined by the `requirepass` directive\n  // <https://redis.io/topics/config>`_ in the server's configuration file.\n  api.v2.core.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.sni_cluster.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.sni_cluster.v2\";\noption java_outer_classname = \"SniClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.sni_cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: SNI Cluster Filter]\n// Set the upstream cluster name from the SNI field in the TLS connection.\n// [#extension: envoy.filters.network.sni_cluster]\n\nmessage SniCluster {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/filter/accesslog/v2:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.tcp_proxy.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/config/filter/accesslog/v2/accesslog.proto\";\nimport \"envoy/type/hash_policy.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2\";\noption java_outer_classname = \"TcpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.tcp_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: TCP Proxy]\n// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.\n// [#extension: envoy.filters.network.tcp_proxy]\n\n// [#next-free-field: 13]\nmessage TcpProxy {\n  // [#not-implemented-hide:] Deprecated.\n  // TCP Proxy filter configuration using V1 format.\n  message DeprecatedV1 {\n    option deprecated = true;\n\n    // A TCP proxy route consists of a set of optional L4 criteria and the\n    // name of a cluster. If a downstream connection matches all the\n    // specified criteria, the cluster in the route is used for the\n    // corresponding upstream connection. Routes are tried in the order\n    // specified until a match is found. If no match is found, the connection\n    // is closed. A route with no criteria is valid and always produces a\n    // match.\n    // [#next-free-field: 6]\n    message TCPRoute {\n      // The cluster to connect to when a the downstream network connection\n      // matches the specified criteria.\n      string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // An optional list of IP address subnets in the form\n      // “ip_address/xx”. The criteria is satisfied if the destination IP\n      // address of the downstream connection is contained in at least one of\n      // the specified subnets. If the parameter is not specified or the list\n      // is empty, the destination IP address is ignored. The destination IP\n      // address of the downstream connection might be different from the\n      // addresses on which the proxy is listening if the connection has been\n      // redirected.\n      repeated api.v2.core.CidrRange destination_ip_list = 2;\n\n      // An optional string containing a comma-separated list of port numbers\n      // or ranges. The criteria is satisfied if the destination port of the\n      // downstream connection is contained in at least one of the specified\n      // ranges. If the parameter is not specified, the destination port is\n      // ignored. The destination port address of the downstream connection\n      // might be different from the port on which the proxy is listening if\n      // the connection has been redirected.\n      string destination_ports = 3;\n\n      // An optional list of IP address subnets in the form\n      // “ip_address/xx”. The criteria is satisfied if the source IP address\n      // of the downstream connection is contained in at least one of the\n      // specified subnets. If the parameter is not specified or the list is\n      // empty, the source IP address is ignored.\n      repeated api.v2.core.CidrRange source_ip_list = 4;\n\n      // An optional string containing a comma-separated list of port numbers\n      // or ranges. The criteria is satisfied if the source port of the\n      // downstream connection is contained in at least one of the specified\n      // ranges. If the parameter is not specified, the source port is\n      // ignored.\n      string source_ports = 5;\n    }\n\n    // The route table for the filter. All filter instances must have a route\n    // table, even if it is empty.\n    repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Allows for specification of multiple upstream clusters along with weights\n  // that indicate the percentage of traffic to be forwarded to each cluster.\n  // The router selects an upstream cluster based on these weights.\n  message WeightedCluster {\n    message ClusterWeight {\n      // Name of the upstream cluster.\n      string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      // When a request matches the route, the choice of an upstream cluster is\n      // determined by its weight. The sum of weights across all entries in the\n      // clusters array determines the total weight.\n      uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n      // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n      // in the upstream cluster with metadata matching what is set in this field will be considered\n      // for load balancing. Note that this will be merged with what's provided in\n      // :ref:`TcpProxy.metadata_match\n      // <envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.metadata_match>`, with values\n      // here taking precedence. The filter name should be specified as *envoy.lb*.\n      api.v2.core.Metadata metadata_match = 3;\n    }\n\n    // Specifies one or more upstream clusters associated with the route.\n    repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Configuration for tunneling TCP over other transports or application layers.\n  // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will\n  // remain the default.\n  message TunnelingConfig {\n    // The hostname to send in the synthesized CONNECT headers to the upstream proxy.\n    string hostname = 1 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_tcp_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 10;\n  }\n\n  // Optional endpoint metadata match criteria. Only endpoints in the upstream\n  // cluster with metadata matching that set in metadata_match will be\n  // considered. The filter name should be specified as *envoy.lb*.\n  api.v2.core.Metadata metadata_match = 9;\n\n  // The idle timeout for connections managed by the TCP proxy filter. The idle timeout\n  // is defined as the period in which there are no bytes sent or received on either\n  // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set\n  // to 0s, the timeout will be disabled.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 8;\n\n  // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy\n  // filter. The idle timeout is defined as the period in which there is no\n  // active traffic. If not set, there is no idle timeout. When the idle timeout\n  // is reached the connection will be closed. The distinction between\n  // downstream_idle_timeout/upstream_idle_timeout provides a means to set\n  // timeout based on the last byte sent on the downstream/upstream connection.\n  google.protobuf.Duration downstream_idle_timeout = 3;\n\n  // [#not-implemented-hide:]\n  google.protobuf.Duration upstream_idle_timeout = 4;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by the this tcp_proxy.\n  repeated accesslog.v2.AccessLog access_log = 5;\n\n  // [#not-implemented-hide:] Deprecated.\n  DeprecatedV1 deprecated_v1 = 6 [deprecated = true];\n\n  // The maximum number of unsuccessful connection attempts that will be made before\n  // giving up. If the parameter is not specified, 1 connection attempt will be made.\n  google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated type.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}];\n\n  // [#not-implemented-hide:] feature in progress\n  // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP\n  // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload\n  // will be proxied upstream as per usual.\n  TunnelingConfig tunneling_config = 12;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md",
    "content": "Protocol buffer definitions for the Thrift proxy.\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.thrift_proxy.v2alpha1;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.thrift_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Thrift Proxy Route Configuration]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n\nmessage RouteConfiguration {\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  oneof match_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route must exactly match the request method name. As a special case, an\n    // empty string matches any request method name.\n    string method_name = 1;\n\n    // If specified, the route must have the service name as the request method name prefix. As a\n    // special case, an empty string matches any service name. Only relevant when service\n    // multiplexing.\n    string service_name = 2;\n  }\n\n  // Inverts whatever matching is done in the :ref:`method_name\n  // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteMatch.method_name>` or\n  // :ref:`service_name\n  // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteMatch.service_name>` fields.\n  // Cannot be combined with wildcard matching as that would result in routes never being matched.\n  //\n  // .. note::\n  //\n  //   This does not invert matching done as part of the :ref:`headers field\n  //   <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteMatch.headers>` field. To\n  //   invert header matching, see :ref:`invert_match\n  //   <envoy_api_field_route.HeaderMatcher.invert_match>`.\n  bool invert = 3;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config). Note that this only applies for Thrift transports and/or\n  // protocols that support headers.\n  repeated api.v2.route.HeaderMatcher headers = 4;\n}\n\n// [#next-free-field: 7]\nmessage RouteAction {\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates a single upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 2;\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // Thrift header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist Envoy will\n    // respond with an unknown method exception or an internal error exception,\n    // respectively.\n    string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n  // the upstream cluster with metadata matching what is set in this field will be considered.\n  // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match\n  // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight.metadata_match>`,\n  // with values there taking precedence. Keys and values should be provided under the \"envoy.lb\"\n  // metadata key.\n  api.v2.core.Metadata metadata_match = 3;\n\n  // Specifies a set of rate limit configurations that could be applied to the route.\n  // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders\n  // action with the header name \":method-name\".\n  repeated api.v2.route.RateLimit rate_limits = 4;\n\n  // Strip the service prefix from the method name, if there's a prefix. For\n  // example, the method call Service:method would end up being just method.\n  bool strip_service_name = 5;\n}\n\n// Allows for specification of multiple upstream clusters along with weights that indicate the\n// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster\n// based on these weights.\nmessage WeightedCluster {\n  message ClusterWeight {\n    // Name of the upstream cluster.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // When a request matches the route, the choice of an upstream cluster is determined by its\n    // weight. The sum of weights across all entries in the clusters array determines the total\n    // weight.\n    google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field, combined with what's\n    // provided in :ref:`RouteAction's metadata_match\n    // <envoy_api_field_config.filter.network.thrift_proxy.v2alpha1.RouteAction.metadata_match>`,\n    // will be considered. Values here will take precedence. Keys and values should be provided\n    // under the \"envoy.lb\" metadata key.\n    api.v2.core.Metadata metadata_match = 3;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.thrift_proxy.v2alpha1;\n\nimport \"envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1\";\noption java_outer_classname = \"ThriftProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.thrift_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Thrift Proxy]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n// [#extension: envoy.filters.network.thrift_proxy]\n\n// Thrift transport types supported by Envoy.\nenum TransportType {\n  // For downstream connections, the Thrift proxy will attempt to determine which transport to use.\n  // For upstream connections, the Thrift proxy will use same transport as the downstream\n  // connection.\n  AUTO_TRANSPORT = 0;\n\n  // The Thrift proxy will use the Thrift framed transport.\n  FRAMED = 1;\n\n  // The Thrift proxy will use the Thrift unframed transport.\n  UNFRAMED = 2;\n\n  // The Thrift proxy will assume the client is using the Thrift header transport.\n  HEADER = 3;\n}\n\n// Thrift Protocol types supported by Envoy.\nenum ProtocolType {\n  // For downstream connections, the Thrift proxy will attempt to determine which protocol to use.\n  // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol\n  // detection. For upstream connections, the Thrift proxy will use the same protocol as the\n  // downstream connection.\n  AUTO_PROTOCOL = 0;\n\n  // The Thrift proxy will use the Thrift binary protocol.\n  BINARY = 1;\n\n  // The Thrift proxy will use Thrift non-strict binary protocol.\n  LAX_BINARY = 2;\n\n  // The Thrift proxy will use the Thrift compact protocol.\n  COMPACT = 3;\n\n  // The Thrift proxy will use the Thrift \"Twitter\" protocol implemented by the finagle library.\n  TWITTER = 4;\n}\n\n// [#next-free-field: 6]\nmessage ThriftProxy {\n  // Supplies the type of transport that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.TransportType.AUTO_TRANSPORT>`.\n  TransportType transport = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.ProtocolType.AUTO_PROTOCOL>`.\n  ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  RouteConfiguration route_config = 4;\n\n  // A list of individual Thrift filters that make up the filter chain for requests made to the\n  // Thrift proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no thrift_filters are specified, a default Thrift router filter\n  // (`envoy.filters.thrift.router`) is used.\n  repeated ThriftFilter thrift_filters = 5;\n}\n\n// ThriftFilter configures a Thrift filter.\nmessage ThriftFilter {\n  // The name of the filter to instantiate. The name must match a supported\n  // filter. The built-in filters are:\n  //\n  // [#comment:TODO(zuercher): Auto generate the following list]\n  // * :ref:`envoy.filters.thrift.router <config_thrift_filters_router>`\n  // * :ref:`envoy.filters.thrift.rate_limit <config_thrift_filters_rate_limit>`\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in\n// in\n// :ref:`typed_extension_protocol_options<envoy_api_field_Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.thrift_proxy`.\nmessage ThriftProtocolOptions {\n  // Supplies the type of transport that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.TransportType.AUTO_TRANSPORT>`,\n  // which is the default, causes the proxy to use the same transport as the downstream connection.\n  TransportType transport = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_config.filter.network.thrift_proxy.v2alpha1.ProtocolType.AUTO_PROTOCOL>`,\n  // which is the default, causes the proxy to use the same protocol as the downstream connection.\n  ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.network.zookeeper_proxy.v1alpha1;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1\";\noption java_outer_classname = \"ZookeeperProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.zookeeper_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: ZooKeeper proxy]\n// ZooKeeper Proxy :ref:`configuration overview <config_network_filters_zookeeper_proxy>`.\n// [#extension: envoy.filters.network.zookeeper_proxy]\n\nmessage ZooKeeperProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_zookeeper_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n\n  // Messages — requests, responses and events — that are bigger than this value will\n  // be ignored. If it is not set, the default value is 1Mb.\n  //\n  // The value here should match the jute.maxbuffer property in your cluster configuration:\n  //\n  // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options\n  //\n  // if that is set. If it isn't, ZooKeeper's default is also 1Mb.\n  google.protobuf.UInt32Value max_packet_bytes = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.thrift.rate_limit.v2alpha1;\n\nimport \"envoy/config/ratelimit/v2/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_thrift_filters_rate_limit>`.\n// [#extension: envoy.filters.thrift.ratelimit]\n\n// [#next-free-field: 6]\nmessage RateLimit {\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Specifies the rate limit configuration stage. Each configured rate limit filter performs a\n  // rate limit check using descriptors configured in the\n  // :ref:`envoy_api_msg_config.filter.network.thrift_proxy.v2alpha1.RouteAction` for the request.\n  // Only those entries with a matching stage number are used for a given filter. If not set, the\n  // default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 3;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 4;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.thrift.router.v2alpha1;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Router]\n// Thrift router :ref:`configuration overview <config_thrift_filters_router>`.\n// [#extension: envoy.filters.thrift.router]\n\nmessage Router {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.filter.udp.udp_proxy.v2alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha\";\noption java_outer_classname = \"UdpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.filters.udp.udp_proxy.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: UDP proxy]\n// UDP proxy :ref:`configuration overview <config_udp_listener_filters_udp_proxy>`.\n// [#extension: envoy.filters.udp_listener.udp_proxy]\n\n// Configuration for the UDP proxy filter.\nmessage UdpProxyConfig {\n  // The stat prefix used when emitting UDP proxy filter stats.\n  string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by\n  // the session. The default if not specified is 1 minute.\n  google.protobuf.Duration idle_timeout = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v2alpha\";\noption java_outer_classname = \"AwsIamProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Grpc Credentials AWS IAM]\n// Configuration for AWS IAM Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.aws_iam]\n\nmessage AwsIamConfig {\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the Grpc endpoint.\n  //\n  // Example: appmesh\n  string service_name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the Grpc\n  // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment\n  // variable.\n  //\n  // Example: us-west-2\n  string region = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v2alpha\";\noption java_outer_classname = \"FileBasedMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Grpc Credentials File Based Metadata]\n// Configuration for File Based Metadata Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.file_based_metadata]\n\nmessage FileBasedMetadataConfig {\n  // Location or inline data of secret to use for authentication of the Google gRPC connection\n  // this secret will be attached to a header of the gRPC connection\n  api.v2.core.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true];\n\n  // Metadata header key to use for sending the secret data\n  // if no header key is set, \"authorization\" header will be used\n  string header_key = 2;\n\n  // Prefix to prepend to the secret in the metadata header\n  // if no prefix is set, the default is to use no prefix\n  string header_prefix = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/grpc_credential/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/grpc_credential/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v3\";\noption java_outer_classname = \"AwsIamProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Grpc Credentials AWS IAM]\n// Configuration for AWS IAM Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.aws_iam]\n\nmessage AwsIamConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.grpc_credential.v2alpha.AwsIamConfig\";\n\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the Grpc endpoint.\n  //\n  // Example: appmesh\n  string service_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the Grpc\n  // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment\n  // variable.\n  //\n  // Example: us-west-2\n  string region = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.grpc_credential.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.grpc_credential.v3\";\noption java_outer_classname = \"FileBasedMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Grpc Credentials File Based Metadata]\n// Configuration for File Based Metadata Grpc Credentials Plugin\n// [#extension: envoy.grpc_credentials.file_based_metadata]\n\nmessage FileBasedMetadataConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig\";\n\n  // Location or inline data of secret to use for authentication of the Google gRPC connection\n  // this secret will be attached to a header of the gRPC connection\n  core.v3.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true];\n\n  // Metadata header key to use for sending the secret data\n  // if no header key is set, \"authorization\" header will be used\n  string header_key = 2;\n\n  // Prefix to prepend to the secret in the metadata header\n  // if no prefix is set, the default is to use no prefix\n  string header_prefix = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.health_checker.redis.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.health_checker.redis.v2\";\noption java_outer_classname = \"RedisProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Redis]\n// Redis health checker :ref:`configuration overview <config_health_checkers_redis>`.\n// [#extension: envoy.health_checkers.redis]\n\nmessage Redis {\n  // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value\n  // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other\n  // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance\n  // by setting the specified key to any value and waiting for traffic to drain.\n  string key = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v2/api_listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v2;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v2\";\noption java_outer_classname = \"ApiListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.listener.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: API listener]\n\n// Describes a type of API listener, which is used in non-proxy clients. The type of API\n// exposed to the non-proxy application depends on the type of API listener.\nmessage ApiListener {\n  // The type in this field determines the type of API listener. At present, the following\n  // types are supported:\n  // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP)\n  // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the\n  // specific config message for each type of API listener. We could not do this in v2 because\n  // it would have caused circular dependencies for go protos: lds.proto depends on this file,\n  // and http_connection_manager.proto depends on rds.proto, which is in the same directory as\n  // lds.proto, so lds.proto cannot depend on this file.]\n  google.protobuf.Any api_listener = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/listener:pkg\",\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/listener/v2:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/api_listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"ApiListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: API listener]\n\n// Describes a type of API listener, which is used in non-proxy clients. The type of API\n// exposed to the non-proxy application depends on the type of API listener.\nmessage ApiListener {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v2.ApiListener\";\n\n  // The type in this field determines the type of API listener. At present, the following\n  // types are supported:\n  // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP)\n  // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the\n  // specific config message for each type of API listener. We could not do this in v2 because\n  // it would have caused circular dependencies for go protos: lds.proto depends on this file,\n  // and http_connection_manager.proto depends on rds.proto, which is in the same directory as\n  // lds.proto, so lds.proto cannot depend on this file.]\n  google.protobuf.Any api_listener = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/socket_option.proto\";\nimport \"envoy/config/listener/v3/api_listener.proto\";\nimport \"envoy/config/listener/v3/listener_components.proto\";\nimport \"envoy/config/listener/v3/udp_listener_config.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\n\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listener configuration]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// Listener list collections. Entries are *Listener* resources or references.\n// [#not-implemented-hide:]\nmessage ListenerCollection {\n  repeated udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// [#next-free-field: 25]\nmessage Listener {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Listener\";\n\n  enum DrainType {\n    // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check\n    // filter), listener removal/modification, and hot restart.\n    DEFAULT = 0;\n\n    // Drain in response to listener removal/modification and hot restart. This setting does not\n    // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress\n    // and egress listeners.\n    MODIFY_ONLY = 1;\n  }\n\n  // [#not-implemented-hide:]\n  message DeprecatedV1 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Listener.DeprecatedV1\";\n\n    // Whether the listener should bind to the port. A listener that doesn't\n    // bind can only receive connections redirected from other listeners that\n    // set use_original_dst parameter to true. Default is true.\n    //\n    // This is deprecated in v2, all Listeners will bind to their port. An\n    // additional filter chain must be created for every original destination\n    // port this listener may redirect to in v2, with the original port\n    // specified in the FilterChainMatch destination_port field.\n    //\n    // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.]\n    google.protobuf.BoolValue bind_to_port = 1;\n  }\n\n  // Configuration for listener connection balancing.\n  message ConnectionBalanceConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.Listener.ConnectionBalanceConfig\";\n\n    // A connection balancer implementation that does exact balancing. This means that a lock is\n    // held during balancing so that connection counts are nearly exactly balanced between worker\n    // threads. This is \"nearly\" exact in the sense that a connection might close in parallel thus\n    // making the counts incorrect, but this should be rectified on the next accept. This balancer\n    // sacrifices accept throughput for accuracy and should be used when there are a small number of\n    // connections that rarely cycle (e.g., service mesh gRPC egress).\n    message ExactBalance {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.Listener.ConnectionBalanceConfig.ExactBalance\";\n    }\n\n    oneof balance_type {\n      option (validate.required) = true;\n\n      // If specified, the listener will use the exact connection balancer.\n      ExactBalance exact_balance = 1;\n    }\n  }\n\n  reserved 14;\n\n  // The unique name by which this listener is known. If no name is provided,\n  // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically\n  // updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.\n  string name = 1;\n\n  // The address that the listener should listen on. In general, the address must be unique, though\n  // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on\n  // Linux as the actual port will be allocated by the OS.\n  core.v3.Address address = 2 [(validate.rules).message = {required: true}];\n\n  // A list of filter chains to consider for this listener. The\n  // :ref:`FilterChain <envoy_api_msg_config.listener.v3.FilterChain>` with the most specific\n  // :ref:`FilterChainMatch <envoy_api_msg_config.listener.v3.FilterChainMatch>` criteria is used on a\n  // connection.\n  //\n  // Example using SNI for filter chain selection can be found in the\n  // :ref:`FAQ entry <faq_how_to_setup_sni>`.\n  repeated FilterChain filter_chains = 3;\n\n  // Soft limit on size of the listener’s new connection read and write buffers.\n  // If unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Listener metadata.\n  core.v3.Metadata metadata = 6;\n\n  // [#not-implemented-hide:]\n  DeprecatedV1 deprecated_v1 = 7;\n\n  // The type of draining to perform at a listener-wide level.\n  DrainType drain_type = 8;\n\n  // Listener filters have the opportunity to manipulate and augment the connection metadata that\n  // is used in connection filter chain matching, for example. These filters are run before any in\n  // :ref:`filter_chains <envoy_api_field_config.listener.v3.Listener.filter_chains>`. Order matters as the\n  // filters are processed sequentially right after a socket has been accepted by the listener, and\n  // before a connection is created.\n  // UDP Listener filters can be specified when the protocol in the listener socket address in\n  // :ref:`protocol <envoy_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`.\n  // UDP listeners currently support a single filter.\n  repeated ListenerFilter listener_filters = 9;\n\n  // The timeout to wait for all listener filters to complete operation. If the timeout is reached,\n  // the accepted socket is closed without a connection being created unless\n  // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the\n  // timeout. If not specified, a default timeout of 15s is used.\n  google.protobuf.Duration listener_filters_timeout = 15;\n\n  // Whether a connection should be created when listener filters timeout. Default is false.\n  //\n  // .. attention::\n  //\n  //   Some listener filters, such as :ref:`Proxy Protocol filter\n  //   <config_listener_filters_proxy_protocol>`, should not be used with this option. It will cause\n  //   unexpected behavior when a connection is created.\n  bool continue_on_listener_filters_timeout = 17;\n\n  // Whether the listener should be set as a transparent socket.\n  // When this flag is set to true, connections can be redirected to the listener using an\n  // *iptables* *TPROXY* target, in which case the original source and destination addresses and\n  // ports are preserved on accepted connections. This flag should be used in combination with\n  // :ref:`an original_dst <config_listener_filters_original_dst>` :ref:`listener filter\n  // <envoy_api_field_config.listener.v3.Listener.listener_filters>` to mark the connections' local addresses as\n  // \"restored.\" This can be used to hand off each redirected connection to another listener\n  // associated with the connection's destination address. Direct connections to the socket without\n  // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are\n  // therefore treated as if they were redirected.\n  // When this flag is set to false, the listener's socket is explicitly reset as non-transparent.\n  // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability.\n  // When this flag is not set (default), the socket is not modified, i.e. the transparent option\n  // is neither set nor reset.\n  google.protobuf.BoolValue transparent = 10;\n\n  // Whether the listener should set the *IP_FREEBIND* socket option. When this\n  // flag is set to true, listeners can be bound to an IP address that is not\n  // configured on the system running Envoy. When this flag is set to false, the\n  // option *IP_FREEBIND* is disabled on the socket. When this flag is not set\n  // (default), the socket is not modified, i.e. the option is neither enabled\n  // nor disabled.\n  google.protobuf.BoolValue freebind = 11;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v3.SocketOption socket_options = 13;\n\n  // Whether the listener should accept TCP Fast Open (TFO) connections.\n  // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on\n  // the socket, with a queue length of the specified size\n  // (see `details in RFC7413 <https://tools.ietf.org/html/rfc7413#section-5.1>`_).\n  // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket.\n  // When this flag is not set (default), the socket is not modified,\n  // i.e. the option is neither enabled nor disabled.\n  //\n  // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable\n  // TCP_FASTOPEN.\n  // See `ip-sysctl.txt <https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt>`_.\n  //\n  // On macOS, only values of 0, 1, and unset are valid; other values may result in an error.\n  // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter.\n  google.protobuf.UInt32Value tcp_fast_open_queue_length = 12;\n\n  // Specifies the intended direction of the traffic relative to the local Envoy.\n  core.v3.TrafficDirection traffic_direction = 16;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // listener to create, i.e. :ref:`udp_listener_name\n  // <envoy_api_field_config.listener.v3.UdpListenerConfig.udp_listener_name>` = \"raw_udp_listener\" for\n  // creating a packet-oriented UDP listener. If not present, treat it as \"raw_udp_listener\".\n  UdpListenerConfig udp_listener_config = 18;\n\n  // Used to represent an API listener, which is used in non-proxy clients. The type of API\n  // exposed to the non-proxy application depends on the type of API listener.\n  // When this field is set, no other field except for :ref:`name<envoy_api_field_config.listener.v3.Listener.name>`\n  // should be set.\n  //\n  // .. note::\n  //\n  //  Currently only one ApiListener can be installed; and it can only be done via bootstrap config,\n  //  not LDS.\n  //\n  // [#next-major-version: In the v3 API, instead of this messy approach where the socket\n  // listener fields are directly in the top-level Listener message and the API listener types\n  // are in the ApiListener message, the socket listener messages should be in their own message,\n  // and the top-level Listener should essentially be a oneof that selects between the\n  // socket listener and the various types of API listener. That way, a given Listener message\n  // can structurally only contain the fields of the relevant type.]\n  ApiListener api_listener = 19;\n\n  // The listener's connection balancer configuration, currently only applicable to TCP listeners.\n  // If no configuration is specified, Envoy will not attempt to balance active connections between\n  // worker threads.\n  ConnectionBalanceConfig connection_balance_config = 20;\n\n  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and\n  // create one socket for each worker thread. This makes inbound connections\n  // distribute among worker threads roughly evenly in cases where there are a high number\n  // of connections. When this flag is set to false, all worker threads share one socket.\n  //\n  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart\n  // (see `3rd paragraph in 'soreuseport' commit message\n  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).\n  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket\n  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.\n  bool reuse_port = 21;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by this listener.\n  repeated accesslog.v3.AccessLog access_log = 22;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // writer to create, i.e. :ref:`name <envoy_api_field_config.core.v3.TypedExtensionConfig.name>`\n  //    = \"udp_default_writer\" for creating a udp writer with writing in passthrough mode,\n  //    = \"udp_gso_batch_writer\" for creating a udp writer with writing in batch mode.\n  // If not present, treat it as \"udp_default_writer\".\n  // [#not-implemented-hide:]\n  core.v3.TypedExtensionConfig udp_writer_config = 23;\n\n  // The maximum length a tcp listener's pending connections queue can grow to. If no value is\n  // provided net.core.somaxconn will be used on Linux and 128 otherwise.\n  google.protobuf.UInt32Value tcp_backlog_size = 24;\n\n  google.protobuf.BoolValue hidden_envoy_deprecated_use_original_dst = 4 [deprecated = true];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/listener_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/tls.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"ListenerComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listener components]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.listener.Filter\";\n\n  reserved 3;\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n\n// Specifies the match criteria for selecting a specific filter chain for a\n// listener.\n//\n// In order for a filter chain to be selected, *ALL* of its criteria must be\n// fulfilled by the incoming connection, properties of which are set by the\n// networking stack and/or listener filters.\n//\n// The following order applies:\n//\n// 1. Destination port.\n// 2. Destination IP address.\n// 3. Server name (e.g. SNI for TLS protocol),\n// 4. Transport protocol.\n// 5. Application protocols (e.g. ALPN for TLS protocol).\n// 6. Source type (e.g. any, local or external network).\n// 7. Source IP address.\n// 8. Source port.\n//\n// For criteria that allow ranges or wildcards, the most specific value in any\n// of the configured filter chains that matches the incoming connection is going\n// to be used (e.g. for SNI ``www.example.com`` the most specific match would be\n// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter\n// chain without ``server_names`` requirements).\n//\n// [#comment: Implemented rules are kept in the preference order, with deprecated fields\n// listed at the end, because that's how we want to list them in the docs.\n//\n// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]\n// [#next-free-field: 13]\nmessage FilterChainMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.FilterChainMatch\";\n\n  enum ConnectionSourceType {\n    // Any connection source matches.\n    ANY = 0;\n\n    // Match a connection originating from the same host.\n    SAME_IP_OR_LOOPBACK = 1;\n\n    // Match a connection originating from a different host.\n    EXTERNAL = 2;\n  }\n\n  reserved 1;\n\n  // Optional destination port to consider when use_original_dst is set on the\n  // listener in determining a filter chain match.\n  google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}];\n\n  // If non-empty, an IP address and prefix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  repeated core.v3.CidrRange prefix_ranges = 3;\n\n  // If non-empty, an IP address and suffix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  // [#not-implemented-hide:]\n  string address_suffix = 4;\n\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value suffix_len = 5;\n\n  // Specifies the connection source IP match type. Can be any, local or external network.\n  ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}];\n\n  // The criteria is satisfied if the source IP address of the downstream\n  // connection is contained in at least one of the specified subnets. If the\n  // parameter is not specified or the list is empty, the source IP address is\n  // ignored.\n  repeated core.v3.CidrRange source_prefix_ranges = 6;\n\n  // The criteria is satisfied if the source port of the downstream connection\n  // is contained in at least one of the specified ports. If the parameter is\n  // not specified, the source port is ignored.\n  repeated uint32 source_ports = 7\n      [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}];\n\n  // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining\n  // a filter chain match. Those values will be compared against the server names of a new\n  // connection, when detected by one of the listener filters.\n  //\n  // The server name will be matched against all wildcard domains, i.e. ``www.example.com``\n  // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.\n  //\n  // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.\n  //\n  // .. attention::\n  //\n  //   See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more\n  //   information.\n  repeated string server_names = 11;\n\n  // If non-empty, a transport protocol to consider when determining a filter chain match.\n  // This value will be compared against the transport protocol of a new connection, when\n  // it's detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``raw_buffer`` - default, used when no transport protocol is detected,\n  // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //   when TLS protocol is detected.\n  string transport_protocol = 9;\n\n  // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when\n  // determining a filter chain match. Those values will be compared against the application\n  // protocols of a new connection, when detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector\n  //   <config_listener_filters_tls_inspector>`,\n  // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //\n  // .. attention::\n  //\n  //   Currently, only :ref:`TLS Inspector <config_listener_filters_tls_inspector>` provides\n  //   application protocol detection based on the requested\n  //   `ALPN <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ values.\n  //\n  //   However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet,\n  //   and matching on values other than ``h2`` is going to lead to a lot of false negatives,\n  //   unless all connecting clients are known to use ALPN.\n  repeated string application_protocols = 10;\n}\n\n// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and\n// various other parameters.\n// [#next-free-field: 9]\nmessage FilterChain {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.listener.FilterChain\";\n\n  // The configuration for on-demand filter chain. If this field is not empty in FilterChain message,\n  // a filter chain will be built on-demand.\n  // On-demand filter chains help speedup the warming up of listeners since the building and initialization of\n  // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain.\n  // Filter chains that are not often used can be set as on-demand.\n  message OnDemandConfiguration {\n    // The timeout to wait for filter chain placeholders to complete rebuilding.\n    // 1. If this field is set to 0, timeout is disabled.\n    // 2. If not specified, a default timeout of 15s is used.\n    // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached.\n    // Upon failure or timeout, all connections related to this filter chain will be closed.\n    // Rebuilding will start again on the next new connection.\n    google.protobuf.Duration rebuild_timeout = 1;\n  }\n\n  // The criteria to use when matching a connection to this filter chain.\n  FilterChainMatch filter_chain_match = 1;\n\n  // A list of individual network filters that make up the filter chain for\n  // connections established with the listener. Order matters as the filters are\n  // processed sequentially as connection events happen. Note: If the filter\n  // list is empty, the connection will close by default.\n  repeated Filter filters = 3;\n\n  // Whether the listener should expect a PROXY protocol V1 header on new\n  // connections. If this option is enabled, the listener will assume that that\n  // remote address of the connection is the one specified in the header. Some\n  // load balancers including the AWS ELB support this option. If the option is\n  // absent or set to false, Envoy will use the physical peer address of the\n  // connection as the remote address.\n  google.protobuf.BoolValue use_proxy_proto = 4;\n\n  // [#not-implemented-hide:] filter chain metadata.\n  core.v3.Metadata metadata = 5;\n\n  // Optional custom transport socket implementation to use for downstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`DownstreamTlsContext <envoy_api_msg_extensions.transport_sockets.tls.v3.DownstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v3.TransportSocket transport_socket = 6;\n\n  // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no\n  // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter\n  // chain is to be dynamically updated or removed via FCDS a unique name must be provided.\n  string name = 7;\n\n  // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand.\n  // If this field is not empty, the filter chain will be built on-demand.\n  // Otherwise, the filter chain will be built normally and block listener warming.\n  OnDemandConfiguration on_demand_configuration = 8;\n\n  envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n      hidden_envoy_deprecated_tls_context = 2 [deprecated = true];\n}\n\n// Listener filter chain match configuration. This is a recursive structure which allows complex\n// nested match configurations to be built using various logical operators.\n//\n// Examples:\n//\n// * Matches if the destination port is 3306.\n//\n// .. code-block:: yaml\n//\n//  destination_port_range:\n//   start: 3306\n//   end: 3307\n//\n// * Matches if the destination port is 3306 or 15000.\n//\n// .. code-block:: yaml\n//\n//  or_match:\n//    rules:\n//      - destination_port_range:\n//          start: 3306\n//          end: 3306\n//      - destination_port_range:\n//          start: 15000\n//          end: 15001\n//\n// [#next-free-field: 6]\nmessage ListenerFilterChainMatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.ListenerFilterChainMatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.listener.ListenerFilterChainMatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated ListenerFilterChainMatchPredicate rules = 1\n        [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    ListenerFilterChainMatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // Match destination port. Particularly, the match evaluation must use the recovered local port if\n    // the owning listener filter is after :ref:`an original_dst listener filter <config_listener_filters_original_dst>`.\n    type.v3.Int32Range destination_port_range = 5;\n  }\n}\n\nmessage ListenerFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.ListenerFilter\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_listener_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated.\n  // See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n\n  // Optional match predicate used to disable the filter. The filter is enabled when this field is empty.\n  // See :ref:`ListenerFilterChainMatchPredicate <envoy_api_msg_config.listener.v3.ListenerFilterChainMatchPredicate>`\n  // for further examples.\n  ListenerFilterChainMatchPredicate filter_disabled = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/quic_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"QuicConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: QUIC listener Config]\n\n// Configuration specific to the QUIC protocol.\n// Next id: 5\nmessage QuicProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.QuicProtocolOptions\";\n\n  // Maximum number of streams that the client can negotiate per connection. 100\n  // if not specified.\n  google.protobuf.UInt32Value max_concurrent_streams = 1;\n\n  // Maximum number of milliseconds that connection will be alive when there is\n  // no network activity. 300000ms if not specified.\n  google.protobuf.Duration idle_timeout = 2;\n\n  // Connection timeout in milliseconds before the crypto handshake is finished.\n  // 20000ms if not specified.\n  google.protobuf.Duration crypto_handshake_timeout = 3;\n\n  // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults\n  // to enabled.\n  core.v3.RuntimeFeatureFlag enabled = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"UdpDefaultWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Udp Default Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Default Writer.\nmessage UdpDefaultWriterOptions {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"UdpGsoBatchWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Udp Gso Batch Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Gso Batch Writer.\nmessage UdpGsoBatchWriterOptions {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v3;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v3\";\noption java_outer_classname = \"UdpListenerConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: UDP Listener Config]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage UdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.UdpListenerConfig\";\n\n  // Used to look up UDP listener factory, matches \"raw_udp_listener\" or\n  // \"quic_listener\" to create a specific udp listener.\n  // If not specified, treat as \"raw_udp_listener\".\n  string udp_listener_name = 1;\n\n  // Used to create a specific listener factory. To some factory, e.g.\n  // \"raw_udp_listener\", config is not needed.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n\nmessage ActiveRawUdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.listener.ActiveRawUdpListenerConfig\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/listener/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"ApiListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: API listener]\n\n// Describes a type of API listener, which is used in non-proxy clients. The type of API\n// exposed to the non-proxy application depends on the type of API listener.\nmessage ApiListener {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ApiListener\";\n\n  // The type in this field determines the type of API listener. At present, the following\n  // types are supported:\n  // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP)\n  // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the\n  // specific config message for each type of API listener. We could not do this in v2 because\n  // it would have caused circular dependencies for go protos: lds.proto depends on this file,\n  // and http_connection_manager.proto depends on rds.proto, which is in the same directory as\n  // lds.proto, so lds.proto cannot depend on this file.]\n  google.protobuf.Any api_listener = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/listener.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/socket_option.proto\";\nimport \"envoy/config/listener/v4alpha/api_listener.proto\";\nimport \"envoy/config/listener/v4alpha/listener_components.proto\";\nimport \"envoy/config/listener/v4alpha/udp_listener_config.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/collection_entry.proto\";\n\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"ListenerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Listener configuration]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// Listener list collections. Entries are *Listener* resources or references.\n// [#not-implemented-hide:]\nmessage ListenerCollection {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ListenerCollection\";\n\n  repeated udpa.core.v1.CollectionEntry entries = 1;\n}\n\n// [#next-free-field: 25]\nmessage Listener {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.listener.v3.Listener\";\n\n  enum DrainType {\n    // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check\n    // filter), listener removal/modification, and hot restart.\n    DEFAULT = 0;\n\n    // Drain in response to listener removal/modification and hot restart. This setting does not\n    // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress\n    // and egress listeners.\n    MODIFY_ONLY = 1;\n  }\n\n  // [#not-implemented-hide:]\n  message DeprecatedV1 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.Listener.DeprecatedV1\";\n\n    // Whether the listener should bind to the port. A listener that doesn't\n    // bind can only receive connections redirected from other listeners that\n    // set use_original_dst parameter to true. Default is true.\n    //\n    // This is deprecated in v2, all Listeners will bind to their port. An\n    // additional filter chain must be created for every original destination\n    // port this listener may redirect to in v2, with the original port\n    // specified in the FilterChainMatch destination_port field.\n    //\n    // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.]\n    google.protobuf.BoolValue bind_to_port = 1;\n  }\n\n  // Configuration for listener connection balancing.\n  message ConnectionBalanceConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.Listener.ConnectionBalanceConfig\";\n\n    // A connection balancer implementation that does exact balancing. This means that a lock is\n    // held during balancing so that connection counts are nearly exactly balanced between worker\n    // threads. This is \"nearly\" exact in the sense that a connection might close in parallel thus\n    // making the counts incorrect, but this should be rectified on the next accept. This balancer\n    // sacrifices accept throughput for accuracy and should be used when there are a small number of\n    // connections that rarely cycle (e.g., service mesh gRPC egress).\n    message ExactBalance {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance\";\n    }\n\n    oneof balance_type {\n      option (validate.required) = true;\n\n      // If specified, the listener will use the exact connection balancer.\n      ExactBalance exact_balance = 1;\n    }\n  }\n\n  reserved 14, 4;\n\n  reserved \"use_original_dst\";\n\n  // The unique name by which this listener is known. If no name is provided,\n  // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically\n  // updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.\n  string name = 1;\n\n  // The address that the listener should listen on. In general, the address must be unique, though\n  // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on\n  // Linux as the actual port will be allocated by the OS.\n  core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}];\n\n  // A list of filter chains to consider for this listener. The\n  // :ref:`FilterChain <envoy_api_msg_config.listener.v4alpha.FilterChain>` with the most specific\n  // :ref:`FilterChainMatch <envoy_api_msg_config.listener.v4alpha.FilterChainMatch>` criteria is used on a\n  // connection.\n  //\n  // Example using SNI for filter chain selection can be found in the\n  // :ref:`FAQ entry <faq_how_to_setup_sni>`.\n  repeated FilterChain filter_chains = 3;\n\n  // Soft limit on size of the listener’s new connection read and write buffers.\n  // If unspecified, an implementation defined default is applied (1MiB).\n  google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Listener metadata.\n  core.v4alpha.Metadata metadata = 6;\n\n  // [#not-implemented-hide:]\n  DeprecatedV1 deprecated_v1 = 7;\n\n  // The type of draining to perform at a listener-wide level.\n  DrainType drain_type = 8;\n\n  // Listener filters have the opportunity to manipulate and augment the connection metadata that\n  // is used in connection filter chain matching, for example. These filters are run before any in\n  // :ref:`filter_chains <envoy_api_field_config.listener.v4alpha.Listener.filter_chains>`. Order matters as the\n  // filters are processed sequentially right after a socket has been accepted by the listener, and\n  // before a connection is created.\n  // UDP Listener filters can be specified when the protocol in the listener socket address in\n  // :ref:`protocol <envoy_api_field_config.core.v4alpha.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v4alpha.SocketAddress.Protocol.UDP>`.\n  // UDP listeners currently support a single filter.\n  repeated ListenerFilter listener_filters = 9;\n\n  // The timeout to wait for all listener filters to complete operation. If the timeout is reached,\n  // the accepted socket is closed without a connection being created unless\n  // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the\n  // timeout. If not specified, a default timeout of 15s is used.\n  google.protobuf.Duration listener_filters_timeout = 15;\n\n  // Whether a connection should be created when listener filters timeout. Default is false.\n  //\n  // .. attention::\n  //\n  //   Some listener filters, such as :ref:`Proxy Protocol filter\n  //   <config_listener_filters_proxy_protocol>`, should not be used with this option. It will cause\n  //   unexpected behavior when a connection is created.\n  bool continue_on_listener_filters_timeout = 17;\n\n  // Whether the listener should be set as a transparent socket.\n  // When this flag is set to true, connections can be redirected to the listener using an\n  // *iptables* *TPROXY* target, in which case the original source and destination addresses and\n  // ports are preserved on accepted connections. This flag should be used in combination with\n  // :ref:`an original_dst <config_listener_filters_original_dst>` :ref:`listener filter\n  // <envoy_api_field_config.listener.v4alpha.Listener.listener_filters>` to mark the connections' local addresses as\n  // \"restored.\" This can be used to hand off each redirected connection to another listener\n  // associated with the connection's destination address. Direct connections to the socket without\n  // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are\n  // therefore treated as if they were redirected.\n  // When this flag is set to false, the listener's socket is explicitly reset as non-transparent.\n  // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability.\n  // When this flag is not set (default), the socket is not modified, i.e. the transparent option\n  // is neither set nor reset.\n  google.protobuf.BoolValue transparent = 10;\n\n  // Whether the listener should set the *IP_FREEBIND* socket option. When this\n  // flag is set to true, listeners can be bound to an IP address that is not\n  // configured on the system running Envoy. When this flag is set to false, the\n  // option *IP_FREEBIND* is disabled on the socket. When this flag is not set\n  // (default), the socket is not modified, i.e. the option is neither enabled\n  // nor disabled.\n  google.protobuf.BoolValue freebind = 11;\n\n  // Additional socket options that may not be present in Envoy source code or\n  // precompiled binaries.\n  repeated core.v4alpha.SocketOption socket_options = 13;\n\n  // Whether the listener should accept TCP Fast Open (TFO) connections.\n  // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on\n  // the socket, with a queue length of the specified size\n  // (see `details in RFC7413 <https://tools.ietf.org/html/rfc7413#section-5.1>`_).\n  // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket.\n  // When this flag is not set (default), the socket is not modified,\n  // i.e. the option is neither enabled nor disabled.\n  //\n  // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable\n  // TCP_FASTOPEN.\n  // See `ip-sysctl.txt <https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt>`_.\n  //\n  // On macOS, only values of 0, 1, and unset are valid; other values may result in an error.\n  // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter.\n  google.protobuf.UInt32Value tcp_fast_open_queue_length = 12;\n\n  // Specifies the intended direction of the traffic relative to the local Envoy.\n  core.v4alpha.TrafficDirection traffic_direction = 16;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v4alpha.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v4alpha.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // listener to create, i.e. :ref:`udp_listener_name\n  // <envoy_api_field_config.listener.v4alpha.UdpListenerConfig.udp_listener_name>` = \"raw_udp_listener\" for\n  // creating a packet-oriented UDP listener. If not present, treat it as \"raw_udp_listener\".\n  UdpListenerConfig udp_listener_config = 18;\n\n  // Used to represent an API listener, which is used in non-proxy clients. The type of API\n  // exposed to the non-proxy application depends on the type of API listener.\n  // When this field is set, no other field except for :ref:`name<envoy_api_field_config.listener.v4alpha.Listener.name>`\n  // should be set.\n  //\n  // .. note::\n  //\n  //  Currently only one ApiListener can be installed; and it can only be done via bootstrap config,\n  //  not LDS.\n  //\n  // [#next-major-version: In the v3 API, instead of this messy approach where the socket\n  // listener fields are directly in the top-level Listener message and the API listener types\n  // are in the ApiListener message, the socket listener messages should be in their own message,\n  // and the top-level Listener should essentially be a oneof that selects between the\n  // socket listener and the various types of API listener. That way, a given Listener message\n  // can structurally only contain the fields of the relevant type.]\n  ApiListener api_listener = 19;\n\n  // The listener's connection balancer configuration, currently only applicable to TCP listeners.\n  // If no configuration is specified, Envoy will not attempt to balance active connections between\n  // worker threads.\n  ConnectionBalanceConfig connection_balance_config = 20;\n\n  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and\n  // create one socket for each worker thread. This makes inbound connections\n  // distribute among worker threads roughly evenly in cases where there are a high number\n  // of connections. When this flag is set to false, all worker threads share one socket.\n  //\n  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart\n  // (see `3rd paragraph in 'soreuseport' commit message\n  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).\n  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket\n  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.\n  bool reuse_port = 21;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by this listener.\n  repeated accesslog.v4alpha.AccessLog access_log = 22;\n\n  // If the protocol in the listener socket address in :ref:`protocol\n  // <envoy_api_field_config.core.v4alpha.SocketAddress.protocol>` is :ref:`UDP\n  // <envoy_api_enum_value_config.core.v4alpha.SocketAddress.Protocol.UDP>`, this field specifies the actual udp\n  // writer to create, i.e. :ref:`name <envoy_api_field_config.core.v4alpha.TypedExtensionConfig.name>`\n  //    = \"udp_default_writer\" for creating a udp writer with writing in passthrough mode,\n  //    = \"udp_gso_batch_writer\" for creating a udp writer with writing in batch mode.\n  // If not present, treat it as \"udp_default_writer\".\n  // [#not-implemented-hide:]\n  core.v4alpha.TypedExtensionConfig udp_writer_config = 23;\n\n  // The maximum length a tcp listener's pending connections queue can grow to. If no value is\n  // provided net.core.somaxconn will be used on Linux and 128 otherwise.\n  google.protobuf.UInt32Value tcp_backlog_size = 24;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"ListenerComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Listener components]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage Filter {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.listener.v3.Filter\";\n\n  reserved 3, 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_network_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 4;\n  }\n}\n\n// Specifies the match criteria for selecting a specific filter chain for a\n// listener.\n//\n// In order for a filter chain to be selected, *ALL* of its criteria must be\n// fulfilled by the incoming connection, properties of which are set by the\n// networking stack and/or listener filters.\n//\n// The following order applies:\n//\n// 1. Destination port.\n// 2. Destination IP address.\n// 3. Server name (e.g. SNI for TLS protocol),\n// 4. Transport protocol.\n// 5. Application protocols (e.g. ALPN for TLS protocol).\n// 6. Source type (e.g. any, local or external network).\n// 7. Source IP address.\n// 8. Source port.\n//\n// For criteria that allow ranges or wildcards, the most specific value in any\n// of the configured filter chains that matches the incoming connection is going\n// to be used (e.g. for SNI ``www.example.com`` the most specific match would be\n// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter\n// chain without ``server_names`` requirements).\n//\n// [#comment: Implemented rules are kept in the preference order, with deprecated fields\n// listed at the end, because that's how we want to list them in the docs.\n//\n// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]\n// [#next-free-field: 13]\nmessage FilterChainMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.FilterChainMatch\";\n\n  enum ConnectionSourceType {\n    // Any connection source matches.\n    ANY = 0;\n\n    // Match a connection originating from the same host.\n    SAME_IP_OR_LOOPBACK = 1;\n\n    // Match a connection originating from a different host.\n    EXTERNAL = 2;\n  }\n\n  reserved 1;\n\n  // Optional destination port to consider when use_original_dst is set on the\n  // listener in determining a filter chain match.\n  google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}];\n\n  // If non-empty, an IP address and prefix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  repeated core.v4alpha.CidrRange prefix_ranges = 3;\n\n  // If non-empty, an IP address and suffix length to match addresses when the\n  // listener is bound to 0.0.0.0/:: or when use_original_dst is specified.\n  // [#not-implemented-hide:]\n  string address_suffix = 4;\n\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value suffix_len = 5;\n\n  // Specifies the connection source IP match type. Can be any, local or external network.\n  ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}];\n\n  // The criteria is satisfied if the source IP address of the downstream\n  // connection is contained in at least one of the specified subnets. If the\n  // parameter is not specified or the list is empty, the source IP address is\n  // ignored.\n  repeated core.v4alpha.CidrRange source_prefix_ranges = 6;\n\n  // The criteria is satisfied if the source port of the downstream connection\n  // is contained in at least one of the specified ports. If the parameter is\n  // not specified, the source port is ignored.\n  repeated uint32 source_ports = 7\n      [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}];\n\n  // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining\n  // a filter chain match. Those values will be compared against the server names of a new\n  // connection, when detected by one of the listener filters.\n  //\n  // The server name will be matched against all wildcard domains, i.e. ``www.example.com``\n  // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.\n  //\n  // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.\n  //\n  // .. attention::\n  //\n  //   See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more\n  //   information.\n  repeated string server_names = 11;\n\n  // If non-empty, a transport protocol to consider when determining a filter chain match.\n  // This value will be compared against the transport protocol of a new connection, when\n  // it's detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``raw_buffer`` - default, used when no transport protocol is detected,\n  // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //   when TLS protocol is detected.\n  string transport_protocol = 9;\n\n  // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when\n  // determining a filter chain match. Those values will be compared against the application\n  // protocols of a new connection, when detected by one of the listener filters.\n  //\n  // Suggested values include:\n  //\n  // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector\n  //   <config_listener_filters_tls_inspector>`,\n  // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector <config_listener_filters_tls_inspector>`\n  //\n  // .. attention::\n  //\n  //   Currently, only :ref:`TLS Inspector <config_listener_filters_tls_inspector>` provides\n  //   application protocol detection based on the requested\n  //   `ALPN <https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation>`_ values.\n  //\n  //   However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet,\n  //   and matching on values other than ``h2`` is going to lead to a lot of false negatives,\n  //   unless all connecting clients are known to use ALPN.\n  repeated string application_protocols = 10;\n}\n\n// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and\n// various other parameters.\n// [#next-free-field: 9]\nmessage FilterChain {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.FilterChain\";\n\n  // The configuration for on-demand filter chain. If this field is not empty in FilterChain message,\n  // a filter chain will be built on-demand.\n  // On-demand filter chains help speedup the warming up of listeners since the building and initialization of\n  // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain.\n  // Filter chains that are not often used can be set as on-demand.\n  message OnDemandConfiguration {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.FilterChain.OnDemandConfiguration\";\n\n    // The timeout to wait for filter chain placeholders to complete rebuilding.\n    // 1. If this field is set to 0, timeout is disabled.\n    // 2. If not specified, a default timeout of 15s is used.\n    // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached.\n    // Upon failure or timeout, all connections related to this filter chain will be closed.\n    // Rebuilding will start again on the next new connection.\n    google.protobuf.Duration rebuild_timeout = 1;\n  }\n\n  reserved 2;\n\n  reserved \"tls_context\";\n\n  // The criteria to use when matching a connection to this filter chain.\n  FilterChainMatch filter_chain_match = 1;\n\n  // A list of individual network filters that make up the filter chain for\n  // connections established with the listener. Order matters as the filters are\n  // processed sequentially as connection events happen. Note: If the filter\n  // list is empty, the connection will close by default.\n  repeated Filter filters = 3;\n\n  // Whether the listener should expect a PROXY protocol V1 header on new\n  // connections. If this option is enabled, the listener will assume that that\n  // remote address of the connection is the one specified in the header. Some\n  // load balancers including the AWS ELB support this option. If the option is\n  // absent or set to false, Envoy will use the physical peer address of the\n  // connection as the remote address.\n  google.protobuf.BoolValue use_proxy_proto = 4;\n\n  // [#not-implemented-hide:] filter chain metadata.\n  core.v4alpha.Metadata metadata = 5;\n\n  // Optional custom transport socket implementation to use for downstream connections.\n  // To setup TLS, set a transport socket with name `tls` and\n  // :ref:`DownstreamTlsContext <envoy_api_msg_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext>` in the `typed_config`.\n  // If no transport socket configuration is specified, new connections\n  // will be set up with plaintext.\n  core.v4alpha.TransportSocket transport_socket = 6;\n\n  // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no\n  // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter\n  // chain is to be dynamically updated or removed via FCDS a unique name must be provided.\n  string name = 7;\n\n  // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand.\n  // If this field is not empty, the filter chain will be built on-demand.\n  // Otherwise, the filter chain will be built normally and block listener warming.\n  OnDemandConfiguration on_demand_configuration = 8;\n}\n\n// Listener filter chain match configuration. This is a recursive structure which allows complex\n// nested match configurations to be built using various logical operators.\n//\n// Examples:\n//\n// * Matches if the destination port is 3306.\n//\n// .. code-block:: yaml\n//\n//  destination_port_range:\n//   start: 3306\n//   end: 3307\n//\n// * Matches if the destination port is 3306 or 15000.\n//\n// .. code-block:: yaml\n//\n//  or_match:\n//    rules:\n//      - destination_port_range:\n//          start: 3306\n//          end: 3306\n//      - destination_port_range:\n//          start: 15000\n//          end: 15001\n//\n// [#next-free-field: 6]\nmessage ListenerFilterChainMatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ListenerFilterChainMatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated ListenerFilterChainMatchPredicate rules = 1\n        [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    ListenerFilterChainMatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // Match destination port. Particularly, the match evaluation must use the recovered local port if\n    // the owning listener filter is after :ref:`an original_dst listener filter <config_listener_filters_original_dst>`.\n    type.v3.Int32Range destination_port_range = 5;\n  }\n}\n\nmessage ListenerFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ListenerFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a\n  // :ref:`supported filter <config_listener_filters>`.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated.\n  // See the supported filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n\n  // Optional match predicate used to disable the filter. The filter is enabled when this field is empty.\n  // See :ref:`ListenerFilterChainMatchPredicate <envoy_api_msg_config.listener.v4alpha.ListenerFilterChainMatchPredicate>`\n  // for further examples.\n  ListenerFilterChainMatchPredicate filter_disabled = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"QuicConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: QUIC listener Config]\n\n// Configuration specific to the QUIC protocol.\n// Next id: 5\nmessage QuicProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.QuicProtocolOptions\";\n\n  // Maximum number of streams that the client can negotiate per connection. 100\n  // if not specified.\n  google.protobuf.UInt32Value max_concurrent_streams = 1;\n\n  // Maximum number of milliseconds that connection will be alive when there is\n  // no network activity. 300000ms if not specified.\n  google.protobuf.Duration idle_timeout = 2;\n\n  // Connection timeout in milliseconds before the crypto handshake is finished.\n  // 20000ms if not specified.\n  google.protobuf.Duration crypto_handshake_timeout = 3;\n\n  // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults\n  // to enabled.\n  core.v4alpha.RuntimeFeatureFlag enabled = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"UdpDefaultWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Udp Default Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Default Writer.\nmessage UdpDefaultWriterOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.UdpDefaultWriterOptions\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"UdpGsoBatchWriterConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Udp Gso Batch Writer Config]\n\n// [#not-implemented-hide:]\n// Configuration specific to the Udp Gso Batch Writer.\nmessage UdpGsoBatchWriterOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.UdpGsoBatchWriterOptions\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.listener.v4alpha;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.listener.v4alpha\";\noption java_outer_classname = \"UdpListenerConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: UDP Listener Config]\n// Listener :ref:`configuration overview <config_listeners>`\n\nmessage UdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.UdpListenerConfig\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // Used to look up UDP listener factory, matches \"raw_udp_listener\" or\n  // \"quic_listener\" to create a specific udp listener.\n  // If not specified, treat as \"raw_udp_listener\".\n  string udp_listener_name = 1;\n\n  // Used to create a specific listener factory. To some factory, e.g.\n  // \"raw_udp_listener\", config is not needed.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ActiveRawUdpListenerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.listener.v3.ActiveRawUdpListenerConfig\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v2\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metrics service]\n\n// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink\n// <envoy_api_msg_config.metrics.v2.StatsSink>`. This opaque configuration will be used to create\n// Metrics Service.\n// [#extension: envoy.stat_sinks.metrics_service]\nmessage MetricsServiceConfig {\n  // The upstream gRPC cluster that hosts the metrics service.\n  api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v2/stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v2\";\noption java_outer_classname = \"StatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Stats]\n// Statistics :ref:`architecture overview <arch_overview_statistics>`.\n\n// Configuration for pluggable stats sinks.\nmessage StatsSink {\n  // The name of the stats sink to instantiate. The name must match a supported\n  // stats sink. The built-in stats sinks are:\n  //\n  // * :ref:`envoy.stat_sinks.statsd <envoy_api_msg_config.metrics.v2.StatsdSink>`\n  // * :ref:`envoy.stat_sinks.dog_statsd <envoy_api_msg_config.metrics.v2.DogStatsdSink>`\n  // * :ref:`envoy.stat_sinks.metrics_service <envoy_api_msg_config.metrics.v2.MetricsServiceConfig>`\n  // * :ref:`envoy.stat_sinks.hystrix <envoy_api_msg_config.metrics.v2.HystrixSink>`\n  //\n  // Sinks optionally support tagged/multiple dimensional metrics.\n  string name = 1;\n\n  // Stats sink specific configuration which depends on the sink being instantiated. See\n  // :ref:`StatsdSink <envoy_api_msg_config.metrics.v2.StatsdSink>` for an example.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Statistics configuration such as tagging.\nmessage StatsConfig {\n  // Each stat name is iteratively processed through these tag specifiers.\n  // When a tag is matched, the first capture group is removed from the name so\n  // later :ref:`TagSpecifiers <envoy_api_msg_config.metrics.v2.TagSpecifier>` cannot match that\n  // same portion of the match.\n  repeated TagSpecifier stats_tags = 1;\n\n  // Use all default tag regexes specified in Envoy. These can be combined with\n  // custom tags specified in :ref:`stats_tags\n  // <envoy_api_field_config.metrics.v2.StatsConfig.stats_tags>`. They will be processed before\n  // the custom tags.\n  //\n  // .. note::\n  //\n  //   If any default tags are specified twice, the config will be considered\n  //   invalid.\n  //\n  // See :repo:`well_known_names.h <source/common/config/well_known_names.h>` for a list of the\n  // default tags in Envoy.\n  //\n  // If not provided, the value is assumed to be true.\n  google.protobuf.BoolValue use_all_default_tags = 2;\n\n  // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated\n  // as normal. Preventing the instantiation of certain families of stats can improve memory\n  // performance for Envoys running especially large configs.\n  //\n  // .. warning::\n  //   Excluding stats may affect Envoy's behavior in undocumented ways. See\n  //   `issue #8771 <https://github.com/envoyproxy/envoy/issues/8771>`_ for more information.\n  //   If any unexpected behavior changes are observed, please open a new issue immediately.\n  StatsMatcher stats_matcher = 3;\n}\n\n// Configuration for disabling stat instantiation.\nmessage StatsMatcher {\n  // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to\n  // instantiate all stats, there is no need to construct a StatsMatcher.\n  //\n  // However, StatsMatcher can be used to limit the creation of families of stats in order to\n  // conserve memory. Stats can either be disabled entirely, or they can be\n  // limited by either an exclusion or an inclusion list of :ref:`StringMatcher\n  // <envoy_api_msg_type.matcher.StringMatcher>` protos:\n  //\n  // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to\n  //   `false`, all stats will be instantiated.\n  //\n  // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the\n  //   list will not instantiate.\n  //\n  // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of\n  //   the StringMatchers in the list.\n  //\n  //\n  // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex.\n  // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based\n  // matcher rather than a regex-based matcher.\n  //\n  // Example 1. Excluding all stats.\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"rejectAll\": \"true\"\n  //     }\n  //   }\n  //\n  // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"exclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n  // Example 3. Including only manager-related stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"inclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster_manager.\"\n  //           },\n  //           {\n  //             \"prefix\": \"listener_manager.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n\n  oneof stats_matcher {\n    option (validate.required) = true;\n\n    // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all\n    // stats are enabled.\n    bool reject_all = 1;\n\n    // Exclusive match. All stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.ListStringMatcher exclusion_list = 2;\n\n    // Inclusive match. No stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.ListStringMatcher inclusion_list = 3;\n  }\n}\n\n// Designates a tag name and value pair. The value may be either a fixed value\n// or a regex providing the value via capture groups. The specified tag will be\n// unconditionally set if a fixed value, otherwise it will only be set if one\n// or more capture groups in the regex match.\nmessage TagSpecifier {\n  // Attaches an identifier to the tag values to identify the tag being in the\n  // sink. Envoy has a set of default names and regexes to extract dynamic\n  // portions of existing stats, which can be found in :repo:`well_known_names.h\n  // <source/common/config/well_known_names.h>` in the Envoy repository. If a :ref:`tag_name\n  // <envoy_api_field_config.metrics.v2.TagSpecifier.tag_name>` is provided in the config and\n  // neither :ref:`regex <envoy_api_field_config.metrics.v2.TagSpecifier.regex>` or\n  // :ref:`fixed_value <envoy_api_field_config.metrics.v2.TagSpecifier.fixed_value>` were specified,\n  // Envoy will attempt to find that name in its set of defaults and use the accompanying regex.\n  //\n  // .. note::\n  //\n  //   It is invalid to specify the same tag name twice in a config.\n  string tag_name = 1;\n\n  oneof tag_value {\n    // Designates a tag to strip from the tag extracted name and provide as a named\n    // tag value for all statistics. This will only occur if any part of the name\n    // matches the regex provided with one or more capture groups.\n    //\n    // The first capture group identifies the portion of the name to remove. The\n    // second capture group (which will normally be nested inside the first) will\n    // designate the value of the tag for the statistic. If no second capture\n    // group is provided, the first will also be used to set the value of the tag.\n    // All other capture groups will be ignored.\n    //\n    // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and\n    // one tag specifier:\n    //\n    // .. code-block:: json\n    //\n    //   {\n    //     \"tag_name\": \"envoy.cluster_name\",\n    //     \"regex\": \"^cluster\\\\.((.+?)\\\\.)\"\n    //   }\n    //\n    // Note that the regex will remove ``foo_cluster.`` making the tag extracted\n    // name ``cluster.upstream_rq_timeout`` and the tag value for\n    // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no\n    // ``.`` character because of the second capture group).\n    //\n    // Example 2. a stat name\n    // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two\n    // tag specifiers:\n    //\n    // .. code-block:: json\n    //\n    //   [\n    //     {\n    //       \"tag_name\": \"envoy.http_user_agent\",\n    //       \"regex\": \"^http(?=\\\\.).*?\\\\.user_agent\\\\.((.+?)\\\\.)\\\\w+?$\"\n    //     },\n    //     {\n    //       \"tag_name\": \"envoy.http_conn_manager_prefix\",\n    //       \"regex\": \"^http\\\\.((.*?)\\\\.)\"\n    //     }\n    //   ]\n    //\n    // The two regexes of the specifiers will be processed in the definition order.\n    //\n    // The first regex will remove ``ios.``, leaving the tag extracted name\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag\n    // ``envoy.http_user_agent`` will be added with tag value ``ios``.\n    //\n    // The second regex will remove ``connection_manager_1.`` from the tag\n    // extracted name produced by the first regex\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving\n    // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag\n    // ``envoy.http_conn_manager_prefix`` will be added with the tag value\n    // ``connection_manager_1``.\n    string regex = 2 [(validate.rules).string = {max_bytes: 1024}];\n\n    // Specifies a fixed tag value for the ``tag_name``.\n    string fixed_value = 3;\n  }\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support\n// tagged metrics.\n// [#extension: envoy.stat_sinks.statsd]\nmessage StatsdSink {\n  oneof statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running `statsd <https://github.com/etsy/statsd>`_\n    // compliant listener. If specified, statistics will be flushed to this\n    // address.\n    api.v2.core.Address address = 1;\n\n    // The name of a cluster that is running a TCP `statsd\n    // <https://github.com/etsy/statsd>`_ compliant listener. If specified,\n    // Envoy will connect to this cluster to flush statistics.\n    string tcp_cluster_name = 2;\n  }\n\n  // Optional custom prefix for StatsdSink. If\n  // specified, this will override the default prefix.\n  // For example:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"prefix\" : \"envoy-prod\"\n  //   }\n  //\n  // will change emitted stats to\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy-prod.test_counter:1|c\n  //   envoy-prod.test_timer:5|ms\n  //\n  // Note that the default prefix, \"envoy\", will be used if a prefix is not\n  // specified.\n  //\n  // Stats with default prefix:\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy.test_counter:1|c\n  //   envoy.test_timer:5|ms\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink.\n// The sink emits stats with `DogStatsD <https://docs.datadoghq.com/guides/dogstatsd/>`_\n// compatible tags. Tags are configurable via :ref:`StatsConfig\n// <envoy_api_msg_config.metrics.v2.StatsConfig>`.\n// [#extension: envoy.stat_sinks.dog_statsd]\nmessage DogStatsdSink {\n  reserved 2;\n\n  oneof dog_statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running DogStatsD compliant listener. If specified,\n    // statistics will be flushed to this address.\n    api.v2.core.Address address = 1;\n  }\n\n  // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field\n  // <envoy_api_field_config.metrics.v2.StatsdSink.prefix>` for more details.\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink.\n// The sink emits stats in `text/event-stream\n// <https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events>`_\n// formatted stream for use by `Hystrix dashboard\n// <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n//\n// Note that only a single HystrixSink should be configured.\n//\n// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.\n// [#extension: envoy.stat_sinks.hystrix]\nmessage HystrixSink {\n  // The number of buckets the rolling statistical window is divided into.\n  //\n  // Each time the sink is flushed, all relevant Envoy statistics are sampled and\n  // added to the rolling window (removing the oldest samples in the window\n  // in the process). The sink then outputs the aggregate statistics across the\n  // current rolling window to the event stream(s).\n  //\n  // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets\n  //\n  // More detailed explanation can be found in `Hystrix wiki\n  // <https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#hystrixrollingnumber>`_.\n  int64 num_buckets = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/metrics/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v3\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metrics service]\n\n// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink\n// <envoy_api_msg_config.metrics.v3.StatsSink>`. This opaque configuration will be used to create\n// Metrics Service.\n// [#extension: envoy.stat_sinks.metrics_service]\nmessage MetricsServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.MetricsServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n\n  // API version for metric service transport protocol. This describes the metric service gRPC\n  // endpoint and version of messages used on the wire.\n  core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // If true, counters are reported as the delta between flushing intervals. Otherwise, the current\n  // counter value is reported. Defaults to false.\n  // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the\n  // sink will take updates from the :ref:`MetricsResponse <envoy_api_msg_service.metrics.v3.StreamMetricsResponse>`.\n  google.protobuf.BoolValue report_counters_as_deltas = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v3/stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v3\";\noption java_outer_classname = \"StatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Stats]\n// Statistics :ref:`architecture overview <arch_overview_statistics>`.\n\n// Configuration for pluggable stats sinks.\nmessage StatsSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v2.StatsSink\";\n\n  // The name of the stats sink to instantiate. The name must match a supported\n  // stats sink. The built-in stats sinks are:\n  //\n  // * :ref:`envoy.stat_sinks.statsd <envoy_api_msg_config.metrics.v3.StatsdSink>`\n  // * :ref:`envoy.stat_sinks.dog_statsd <envoy_api_msg_config.metrics.v3.DogStatsdSink>`\n  // * :ref:`envoy.stat_sinks.metrics_service <envoy_api_msg_config.metrics.v3.MetricsServiceConfig>`\n  // * :ref:`envoy.stat_sinks.hystrix <envoy_api_msg_config.metrics.v3.HystrixSink>`\n  //\n  // Sinks optionally support tagged/multiple dimensional metrics.\n  string name = 1;\n\n  // Stats sink specific configuration which depends on the sink being instantiated. See\n  // :ref:`StatsdSink <envoy_api_msg_config.metrics.v3.StatsdSink>` for an example.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n\n// Statistics configuration such as tagging.\nmessage StatsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.StatsConfig\";\n\n  // Each stat name is iteratively processed through these tag specifiers.\n  // When a tag is matched, the first capture group is removed from the name so\n  // later :ref:`TagSpecifiers <envoy_api_msg_config.metrics.v3.TagSpecifier>` cannot match that\n  // same portion of the match.\n  repeated TagSpecifier stats_tags = 1;\n\n  // Use all default tag regexes specified in Envoy. These can be combined with\n  // custom tags specified in :ref:`stats_tags\n  // <envoy_api_field_config.metrics.v3.StatsConfig.stats_tags>`. They will be processed before\n  // the custom tags.\n  //\n  // .. note::\n  //\n  //   If any default tags are specified twice, the config will be considered\n  //   invalid.\n  //\n  // See :repo:`well_known_names.h <source/common/config/well_known_names.h>` for a list of the\n  // default tags in Envoy.\n  //\n  // If not provided, the value is assumed to be true.\n  google.protobuf.BoolValue use_all_default_tags = 2;\n\n  // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated\n  // as normal. Preventing the instantiation of certain families of stats can improve memory\n  // performance for Envoys running especially large configs.\n  //\n  // .. warning::\n  //   Excluding stats may affect Envoy's behavior in undocumented ways. See\n  //   `issue #8771 <https://github.com/envoyproxy/envoy/issues/8771>`_ for more information.\n  //   If any unexpected behavior changes are observed, please open a new issue immediately.\n  StatsMatcher stats_matcher = 3;\n\n  // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first\n  // match is applied. If no match is found (or if no rules are set), the following default buckets\n  // are used:\n  //\n  //   .. code-block:: json\n  //\n  //     [\n  //       0.5,\n  //       1,\n  //       5,\n  //       10,\n  //       25,\n  //       50,\n  //       100,\n  //       250,\n  //       500,\n  //       1000,\n  //       2500,\n  //       5000,\n  //       10000,\n  //       30000,\n  //       60000,\n  //       300000,\n  //       600000,\n  //       1800000,\n  //       3600000\n  //     ]\n  repeated HistogramBucketSettings histogram_bucket_settings = 4;\n}\n\n// Configuration for disabling stat instantiation.\nmessage StatsMatcher {\n  // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to\n  // instantiate all stats, there is no need to construct a StatsMatcher.\n  //\n  // However, StatsMatcher can be used to limit the creation of families of stats in order to\n  // conserve memory. Stats can either be disabled entirely, or they can be\n  // limited by either an exclusion or an inclusion list of :ref:`StringMatcher\n  // <envoy_api_msg_type.matcher.v3.StringMatcher>` protos:\n  //\n  // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to\n  //   `false`, all stats will be instantiated.\n  //\n  // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the\n  //   list will not instantiate.\n  //\n  // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of\n  //   the StringMatchers in the list.\n  //\n  //\n  // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex.\n  // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based\n  // matcher rather than a regex-based matcher.\n  //\n  // Example 1. Excluding all stats.\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"rejectAll\": \"true\"\n  //     }\n  //   }\n  //\n  // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"exclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n  // Example 3. Including only manager-related stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"inclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster_manager.\"\n  //           },\n  //           {\n  //             \"prefix\": \"listener_manager.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.StatsMatcher\";\n\n  oneof stats_matcher {\n    option (validate.required) = true;\n\n    // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all\n    // stats are enabled.\n    bool reject_all = 1;\n\n    // Exclusive match. All stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v3.ListStringMatcher exclusion_list = 2;\n\n    // Inclusive match. No stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v3.ListStringMatcher inclusion_list = 3;\n  }\n}\n\n// Designates a tag name and value pair. The value may be either a fixed value\n// or a regex providing the value via capture groups. The specified tag will be\n// unconditionally set if a fixed value, otherwise it will only be set if one\n// or more capture groups in the regex match.\nmessage TagSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.TagSpecifier\";\n\n  // Attaches an identifier to the tag values to identify the tag being in the\n  // sink. Envoy has a set of default names and regexes to extract dynamic\n  // portions of existing stats, which can be found in :repo:`well_known_names.h\n  // <source/common/config/well_known_names.h>` in the Envoy repository. If a :ref:`tag_name\n  // <envoy_api_field_config.metrics.v3.TagSpecifier.tag_name>` is provided in the config and\n  // neither :ref:`regex <envoy_api_field_config.metrics.v3.TagSpecifier.regex>` or\n  // :ref:`fixed_value <envoy_api_field_config.metrics.v3.TagSpecifier.fixed_value>` were specified,\n  // Envoy will attempt to find that name in its set of defaults and use the accompanying regex.\n  //\n  // .. note::\n  //\n  //   It is invalid to specify the same tag name twice in a config.\n  string tag_name = 1;\n\n  oneof tag_value {\n    // Designates a tag to strip from the tag extracted name and provide as a named\n    // tag value for all statistics. This will only occur if any part of the name\n    // matches the regex provided with one or more capture groups.\n    //\n    // The first capture group identifies the portion of the name to remove. The\n    // second capture group (which will normally be nested inside the first) will\n    // designate the value of the tag for the statistic. If no second capture\n    // group is provided, the first will also be used to set the value of the tag.\n    // All other capture groups will be ignored.\n    //\n    // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and\n    // one tag specifier:\n    //\n    // .. code-block:: json\n    //\n    //   {\n    //     \"tag_name\": \"envoy.cluster_name\",\n    //     \"regex\": \"^cluster\\\\.((.+?)\\\\.)\"\n    //   }\n    //\n    // Note that the regex will remove ``foo_cluster.`` making the tag extracted\n    // name ``cluster.upstream_rq_timeout`` and the tag value for\n    // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no\n    // ``.`` character because of the second capture group).\n    //\n    // Example 2. a stat name\n    // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two\n    // tag specifiers:\n    //\n    // .. code-block:: json\n    //\n    //   [\n    //     {\n    //       \"tag_name\": \"envoy.http_user_agent\",\n    //       \"regex\": \"^http(?=\\\\.).*?\\\\.user_agent\\\\.((.+?)\\\\.)\\\\w+?$\"\n    //     },\n    //     {\n    //       \"tag_name\": \"envoy.http_conn_manager_prefix\",\n    //       \"regex\": \"^http\\\\.((.*?)\\\\.)\"\n    //     }\n    //   ]\n    //\n    // The two regexes of the specifiers will be processed in the definition order.\n    //\n    // The first regex will remove ``ios.``, leaving the tag extracted name\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag\n    // ``envoy.http_user_agent`` will be added with tag value ``ios``.\n    //\n    // The second regex will remove ``connection_manager_1.`` from the tag\n    // extracted name produced by the first regex\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving\n    // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag\n    // ``envoy.http_conn_manager_prefix`` will be added with the tag value\n    // ``connection_manager_1``.\n    string regex = 2 [(validate.rules).string = {max_bytes: 1024}];\n\n    // Specifies a fixed tag value for the ``tag_name``.\n    string fixed_value = 3;\n  }\n}\n\n// Specifies a matcher for stats and the buckets that matching stats should use.\nmessage HistogramBucketSettings {\n  // The stats that this rule applies to. The match is applied to the original stat name\n  // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`.\n  type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}];\n\n  // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique.\n  // The order of the buckets does not matter.\n  repeated double buckets = 2 [(validate.rules).repeated = {\n    min_items: 1\n    unique: true\n    items {double {gt: 0.0}}\n  }];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support\n// tagged metrics.\n// [#extension: envoy.stat_sinks.statsd]\nmessage StatsdSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v2.StatsdSink\";\n\n  oneof statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running `statsd <https://github.com/etsy/statsd>`_\n    // compliant listener. If specified, statistics will be flushed to this\n    // address.\n    core.v3.Address address = 1;\n\n    // The name of a cluster that is running a TCP `statsd\n    // <https://github.com/etsy/statsd>`_ compliant listener. If specified,\n    // Envoy will connect to this cluster to flush statistics.\n    string tcp_cluster_name = 2;\n  }\n\n  // Optional custom prefix for StatsdSink. If\n  // specified, this will override the default prefix.\n  // For example:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"prefix\" : \"envoy-prod\"\n  //   }\n  //\n  // will change emitted stats to\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy-prod.test_counter:1|c\n  //   envoy-prod.test_timer:5|ms\n  //\n  // Note that the default prefix, \"envoy\", will be used if a prefix is not\n  // specified.\n  //\n  // Stats with default prefix:\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy.test_counter:1|c\n  //   envoy.test_timer:5|ms\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink.\n// The sink emits stats with `DogStatsD <https://docs.datadoghq.com/guides/dogstatsd/>`_\n// compatible tags. Tags are configurable via :ref:`StatsConfig\n// <envoy_api_msg_config.metrics.v3.StatsConfig>`.\n// [#extension: envoy.stat_sinks.dog_statsd]\nmessage DogStatsdSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.DogStatsdSink\";\n\n  reserved 2;\n\n  oneof dog_statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running DogStatsD compliant listener. If specified,\n    // statistics will be flushed to this address.\n    core.v3.Address address = 1;\n  }\n\n  // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field\n  // <envoy_api_field_config.metrics.v3.StatsdSink.prefix>` for more details.\n  string prefix = 3;\n\n  // Optional max datagram size to use when sending UDP messages. By default Envoy\n  // will emit one metric per datagram. By specifying a max-size larger than a single\n  // metric, Envoy will emit multiple, new-line separated metrics. The max datagram\n  // size should not exceed your network's MTU.\n  //\n  // Note that this value may not be respected if smaller than a single metric.\n  google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink.\n// The sink emits stats in `text/event-stream\n// <https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events>`_\n// formatted stream for use by `Hystrix dashboard\n// <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n//\n// Note that only a single HystrixSink should be configured.\n//\n// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.\n// [#extension: envoy.stat_sinks.hystrix]\nmessage HystrixSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v2.HystrixSink\";\n\n  // The number of buckets the rolling statistical window is divided into.\n  //\n  // Each time the sink is flushed, all relevant Envoy statistics are sampled and\n  // added to the rolling window (removing the oldest samples in the window\n  // in the process). The sink then outputs the aggregate statistics across the\n  // current rolling window to the event stream(s).\n  //\n  // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets\n  //\n  // More detailed explanation can be found in `Hystrix wiki\n  // <https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#hystrixrollingnumber>`_.\n  int64 num_buckets = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/metrics/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v4alpha\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metrics service]\n\n// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink\n// <envoy_api_msg_config.metrics.v4alpha.StatsSink>`. This opaque configuration will be used to create\n// Metrics Service.\n// [#extension: envoy.stat_sinks.metrics_service]\nmessage MetricsServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.MetricsServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n\n  // API version for metric service transport protocol. This describes the metric service gRPC\n  // endpoint and version of messages used on the wire.\n  core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // If true, counters are reported as the delta between flushing intervals. Otherwise, the current\n  // counter value is reported. Defaults to false.\n  // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the\n  // sink will take updates from the :ref:`MetricsResponse <envoy_api_msg_service.metrics.v4alpha.StreamMetricsResponse>`.\n  google.protobuf.BoolValue report_counters_as_deltas = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.metrics.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.metrics.v4alpha\";\noption java_outer_classname = \"StatsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Stats]\n// Statistics :ref:`architecture overview <arch_overview_statistics>`.\n\n// Configuration for pluggable stats sinks.\nmessage StatsSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v3.StatsSink\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the stats sink to instantiate. The name must match a supported\n  // stats sink. The built-in stats sinks are:\n  //\n  // * :ref:`envoy.stat_sinks.statsd <envoy_api_msg_config.metrics.v4alpha.StatsdSink>`\n  // * :ref:`envoy.stat_sinks.dog_statsd <envoy_api_msg_config.metrics.v4alpha.DogStatsdSink>`\n  // * :ref:`envoy.stat_sinks.metrics_service <envoy_api_msg_config.metrics.v4alpha.MetricsServiceConfig>`\n  // * :ref:`envoy.stat_sinks.hystrix <envoy_api_msg_config.metrics.v4alpha.HystrixSink>`\n  //\n  // Sinks optionally support tagged/multiple dimensional metrics.\n  string name = 1;\n\n  // Stats sink specific configuration which depends on the sink being instantiated. See\n  // :ref:`StatsdSink <envoy_api_msg_config.metrics.v4alpha.StatsdSink>` for an example.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// Statistics configuration such as tagging.\nmessage StatsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.StatsConfig\";\n\n  // Each stat name is iteratively processed through these tag specifiers.\n  // When a tag is matched, the first capture group is removed from the name so\n  // later :ref:`TagSpecifiers <envoy_api_msg_config.metrics.v4alpha.TagSpecifier>` cannot match that\n  // same portion of the match.\n  repeated TagSpecifier stats_tags = 1;\n\n  // Use all default tag regexes specified in Envoy. These can be combined with\n  // custom tags specified in :ref:`stats_tags\n  // <envoy_api_field_config.metrics.v4alpha.StatsConfig.stats_tags>`. They will be processed before\n  // the custom tags.\n  //\n  // .. note::\n  //\n  //   If any default tags are specified twice, the config will be considered\n  //   invalid.\n  //\n  // See :repo:`well_known_names.h <source/common/config/well_known_names.h>` for a list of the\n  // default tags in Envoy.\n  //\n  // If not provided, the value is assumed to be true.\n  google.protobuf.BoolValue use_all_default_tags = 2;\n\n  // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated\n  // as normal. Preventing the instantiation of certain families of stats can improve memory\n  // performance for Envoys running especially large configs.\n  //\n  // .. warning::\n  //   Excluding stats may affect Envoy's behavior in undocumented ways. See\n  //   `issue #8771 <https://github.com/envoyproxy/envoy/issues/8771>`_ for more information.\n  //   If any unexpected behavior changes are observed, please open a new issue immediately.\n  StatsMatcher stats_matcher = 3;\n\n  // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first\n  // match is applied. If no match is found (or if no rules are set), the following default buckets\n  // are used:\n  //\n  //   .. code-block:: json\n  //\n  //     [\n  //       0.5,\n  //       1,\n  //       5,\n  //       10,\n  //       25,\n  //       50,\n  //       100,\n  //       250,\n  //       500,\n  //       1000,\n  //       2500,\n  //       5000,\n  //       10000,\n  //       30000,\n  //       60000,\n  //       300000,\n  //       600000,\n  //       1800000,\n  //       3600000\n  //     ]\n  repeated HistogramBucketSettings histogram_bucket_settings = 4;\n}\n\n// Configuration for disabling stat instantiation.\nmessage StatsMatcher {\n  // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to\n  // instantiate all stats, there is no need to construct a StatsMatcher.\n  //\n  // However, StatsMatcher can be used to limit the creation of families of stats in order to\n  // conserve memory. Stats can either be disabled entirely, or they can be\n  // limited by either an exclusion or an inclusion list of :ref:`StringMatcher\n  // <envoy_api_msg_type.matcher.v4alpha.StringMatcher>` protos:\n  //\n  // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to\n  //   `false`, all stats will be instantiated.\n  //\n  // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the\n  //   list will not instantiate.\n  //\n  // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of\n  //   the StringMatchers in the list.\n  //\n  //\n  // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex.\n  // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based\n  // matcher rather than a regex-based matcher.\n  //\n  // Example 1. Excluding all stats.\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"rejectAll\": \"true\"\n  //     }\n  //   }\n  //\n  // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"exclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n  // Example 3. Including only manager-related stats:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"statsMatcher\": {\n  //       \"inclusionList\": {\n  //         \"patterns\": [\n  //           {\n  //             \"prefix\": \"cluster_manager.\"\n  //           },\n  //           {\n  //             \"prefix\": \"listener_manager.\"\n  //           }\n  //         ]\n  //       }\n  //     }\n  //   }\n  //\n\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.StatsMatcher\";\n\n  oneof stats_matcher {\n    option (validate.required) = true;\n\n    // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all\n    // stats are enabled.\n    bool reject_all = 1;\n\n    // Exclusive match. All stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v4alpha.ListStringMatcher exclusion_list = 2;\n\n    // Inclusive match. No stats are enabled except for those matching one of the supplied\n    // StringMatcher protos.\n    type.matcher.v4alpha.ListStringMatcher inclusion_list = 3;\n  }\n}\n\n// Designates a tag name and value pair. The value may be either a fixed value\n// or a regex providing the value via capture groups. The specified tag will be\n// unconditionally set if a fixed value, otherwise it will only be set if one\n// or more capture groups in the regex match.\nmessage TagSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.TagSpecifier\";\n\n  // Attaches an identifier to the tag values to identify the tag being in the\n  // sink. Envoy has a set of default names and regexes to extract dynamic\n  // portions of existing stats, which can be found in :repo:`well_known_names.h\n  // <source/common/config/well_known_names.h>` in the Envoy repository. If a :ref:`tag_name\n  // <envoy_api_field_config.metrics.v4alpha.TagSpecifier.tag_name>` is provided in the config and\n  // neither :ref:`regex <envoy_api_field_config.metrics.v4alpha.TagSpecifier.regex>` or\n  // :ref:`fixed_value <envoy_api_field_config.metrics.v4alpha.TagSpecifier.fixed_value>` were specified,\n  // Envoy will attempt to find that name in its set of defaults and use the accompanying regex.\n  //\n  // .. note::\n  //\n  //   It is invalid to specify the same tag name twice in a config.\n  string tag_name = 1;\n\n  oneof tag_value {\n    // Designates a tag to strip from the tag extracted name and provide as a named\n    // tag value for all statistics. This will only occur if any part of the name\n    // matches the regex provided with one or more capture groups.\n    //\n    // The first capture group identifies the portion of the name to remove. The\n    // second capture group (which will normally be nested inside the first) will\n    // designate the value of the tag for the statistic. If no second capture\n    // group is provided, the first will also be used to set the value of the tag.\n    // All other capture groups will be ignored.\n    //\n    // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and\n    // one tag specifier:\n    //\n    // .. code-block:: json\n    //\n    //   {\n    //     \"tag_name\": \"envoy.cluster_name\",\n    //     \"regex\": \"^cluster\\\\.((.+?)\\\\.)\"\n    //   }\n    //\n    // Note that the regex will remove ``foo_cluster.`` making the tag extracted\n    // name ``cluster.upstream_rq_timeout`` and the tag value for\n    // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no\n    // ``.`` character because of the second capture group).\n    //\n    // Example 2. a stat name\n    // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two\n    // tag specifiers:\n    //\n    // .. code-block:: json\n    //\n    //   [\n    //     {\n    //       \"tag_name\": \"envoy.http_user_agent\",\n    //       \"regex\": \"^http(?=\\\\.).*?\\\\.user_agent\\\\.((.+?)\\\\.)\\\\w+?$\"\n    //     },\n    //     {\n    //       \"tag_name\": \"envoy.http_conn_manager_prefix\",\n    //       \"regex\": \"^http\\\\.((.*?)\\\\.)\"\n    //     }\n    //   ]\n    //\n    // The two regexes of the specifiers will be processed in the definition order.\n    //\n    // The first regex will remove ``ios.``, leaving the tag extracted name\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag\n    // ``envoy.http_user_agent`` will be added with tag value ``ios``.\n    //\n    // The second regex will remove ``connection_manager_1.`` from the tag\n    // extracted name produced by the first regex\n    // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving\n    // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag\n    // ``envoy.http_conn_manager_prefix`` will be added with the tag value\n    // ``connection_manager_1``.\n    string regex = 2 [(validate.rules).string = {max_bytes: 1024}];\n\n    // Specifies a fixed tag value for the ``tag_name``.\n    string fixed_value = 3;\n  }\n}\n\n// Specifies a matcher for stats and the buckets that matching stats should use.\nmessage HistogramBucketSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.HistogramBucketSettings\";\n\n  // The stats that this rule applies to. The match is applied to the original stat name\n  // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`.\n  type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}];\n\n  // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique.\n  // The order of the buckets does not matter.\n  repeated double buckets = 2 [(validate.rules).repeated = {\n    min_items: 1\n    unique: true\n    items {double {gt: 0.0}}\n  }];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support\n// tagged metrics.\n// [#extension: envoy.stat_sinks.statsd]\nmessage StatsdSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.metrics.v3.StatsdSink\";\n\n  oneof statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running `statsd <https://github.com/etsy/statsd>`_\n    // compliant listener. If specified, statistics will be flushed to this\n    // address.\n    core.v4alpha.Address address = 1;\n\n    // The name of a cluster that is running a TCP `statsd\n    // <https://github.com/etsy/statsd>`_ compliant listener. If specified,\n    // Envoy will connect to this cluster to flush statistics.\n    string tcp_cluster_name = 2;\n  }\n\n  // Optional custom prefix for StatsdSink. If\n  // specified, this will override the default prefix.\n  // For example:\n  //\n  // .. code-block:: json\n  //\n  //   {\n  //     \"prefix\" : \"envoy-prod\"\n  //   }\n  //\n  // will change emitted stats to\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy-prod.test_counter:1|c\n  //   envoy-prod.test_timer:5|ms\n  //\n  // Note that the default prefix, \"envoy\", will be used if a prefix is not\n  // specified.\n  //\n  // Stats with default prefix:\n  //\n  // .. code-block:: cpp\n  //\n  //   envoy.test_counter:1|c\n  //   envoy.test_timer:5|ms\n  string prefix = 3;\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink.\n// The sink emits stats with `DogStatsD <https://docs.datadoghq.com/guides/dogstatsd/>`_\n// compatible tags. Tags are configurable via :ref:`StatsConfig\n// <envoy_api_msg_config.metrics.v4alpha.StatsConfig>`.\n// [#extension: envoy.stat_sinks.dog_statsd]\nmessage DogStatsdSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.DogStatsdSink\";\n\n  reserved 2;\n\n  oneof dog_statsd_specifier {\n    option (validate.required) = true;\n\n    // The UDP address of a running DogStatsD compliant listener. If specified,\n    // statistics will be flushed to this address.\n    core.v4alpha.Address address = 1;\n  }\n\n  // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field\n  // <envoy_api_field_config.metrics.v4alpha.StatsdSink.prefix>` for more details.\n  string prefix = 3;\n\n  // Optional max datagram size to use when sending UDP messages. By default Envoy\n  // will emit one metric per datagram. By specifying a max-size larger than a single\n  // metric, Envoy will emit multiple, new-line separated metrics. The max datagram\n  // size should not exceed your network's MTU.\n  //\n  // Note that this value may not be respected if smaller than a single metric.\n  google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}];\n}\n\n// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink.\n// The sink emits stats in `text/event-stream\n// <https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events>`_\n// formatted stream for use by `Hystrix dashboard\n// <https://github.com/Netflix-Skunkworks/hystrix-dashboard/wiki>`_.\n//\n// Note that only a single HystrixSink should be configured.\n//\n// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.\n// [#extension: envoy.stat_sinks.hystrix]\nmessage HystrixSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.metrics.v3.HystrixSink\";\n\n  // The number of buckets the rolling statistical window is divided into.\n  //\n  // Each time the sink is flushed, all relevant Envoy statistics are sampled and\n  // added to the rolling window (removing the oldest samples in the window\n  // in the process). The sink then outputs the aggregate statistics across the\n  // current rolling window to the event stream(s).\n  //\n  // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets\n  //\n  // More detailed explanation can be found in `Hystrix wiki\n  // <https://github.com/Netflix/Hystrix/wiki/Metrics-and-Monitoring#hystrixrollingnumber>`_.\n  int64 num_buckets = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/overload/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/overload/v2alpha/overload.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.overload.v2alpha;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.overload.v2alpha\";\noption java_outer_classname = \"OverloadProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Overload Manager]\n\n// The Overload Manager provides an extensible framework to protect Envoy instances\n// from overload of various resources (memory, cpu, file descriptors, etc).\n// It monitors a configurable set of resources and notifies registered listeners\n// when triggers related to those resources fire.\n\nmessage ResourceMonitor {\n  // The name of the resource monitor to instantiate. Must match a registered\n  // resource monitor type. The built-in resource monitors are:\n  //\n  // * :ref:`envoy.resource_monitors.fixed_heap\n  //   <envoy_api_msg_config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig>`\n  // * :ref:`envoy.resource_monitors.injected_resource\n  //   <envoy_api_msg_config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig>`\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Configuration for the resource monitor being instantiated.\n  oneof config_type {\n    google.protobuf.Struct config = 2 [deprecated = true];\n\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\nmessage ThresholdTrigger {\n  // If the resource pressure is greater than or equal to this value, the trigger\n  // will fire.\n  double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n}\n\nmessage Trigger {\n  // The name of the resource this is a trigger for.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof trigger_oneof {\n    option (validate.required) = true;\n\n    ThresholdTrigger threshold = 2;\n  }\n}\n\nmessage OverloadAction {\n  // The name of the overload action. This is just a well-known string that listeners can\n  // use for registering callbacks. Custom overload actions should be named using reverse\n  // DNS to ensure uniqueness.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // A set of triggers for this action. If any of these triggers fire the overload action\n  // is activated. Listeners are notified when the overload action transitions from\n  // inactivated to activated, or vice versa.\n  repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\nmessage OverloadManager {\n  // The interval for refreshing resource usage.\n  google.protobuf.Duration refresh_interval = 1;\n\n  // The set of resources to monitor.\n  repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The set of overload actions.\n  repeated OverloadAction actions = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/overload/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/overload/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/overload/v3/overload.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.overload.v3;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.overload.v3\";\noption java_outer_classname = \"OverloadProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Overload Manager]\n\n// The Overload Manager provides an extensible framework to protect Envoy instances\n// from overload of various resources (memory, cpu, file descriptors, etc).\n// It monitors a configurable set of resources and notifies registered listeners\n// when triggers related to those resources fire.\n\nmessage ResourceMonitor {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.ResourceMonitor\";\n\n  // The name of the resource monitor to instantiate. Must match a registered\n  // resource monitor type. The built-in resource monitors are:\n  //\n  // * :ref:`envoy.resource_monitors.fixed_heap\n  //   <envoy_api_msg_config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig>`\n  // * :ref:`envoy.resource_monitors.injected_resource\n  //   <envoy_api_msg_config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig>`\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Configuration for the resource monitor being instantiated.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n\nmessage ThresholdTrigger {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.ThresholdTrigger\";\n\n  // If the resource pressure is greater than or equal to this value, the trigger\n  // will enter saturation.\n  double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n}\n\nmessage ScaledTrigger {\n  // If the resource pressure is greater than this value, the trigger will be in the\n  // :ref:`scaling <arch_overview_overload_manager-triggers-state>` state with value\n  // `(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)`.\n  double scaling_threshold = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n\n  // If the resource pressure is greater than this value, the trigger will enter saturation.\n  double saturation_threshold = 2 [(validate.rules).double = {lte: 1.0 gte: 0.0}];\n}\n\nmessage Trigger {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.Trigger\";\n\n  // The name of the resource this is a trigger for.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof trigger_oneof {\n    option (validate.required) = true;\n\n    ThresholdTrigger threshold = 2;\n\n    ScaledTrigger scaled = 3;\n  }\n}\n\nmessage OverloadAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.OverloadAction\";\n\n  // The name of the overload action. This is just a well-known string that listeners can\n  // use for registering callbacks. Custom overload actions should be named using reverse\n  // DNS to ensure uniqueness.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A set of triggers for this action. The state of the action is the maximum\n  // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners\n  // are notified when the overload action changes state.\n  repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\nmessage OverloadManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.overload.v2alpha.OverloadManager\";\n\n  // The interval for refreshing resource usage.\n  google.protobuf.Duration refresh_interval = 1;\n\n  // The set of resources to monitor.\n  repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The set of overload actions.\n  repeated OverloadAction actions = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/ratelimit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/ratelimit/v2/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.ratelimit.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.ratelimit.v2\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate limit service]\n\n// Rate limit :ref:`configuration overview <config_rate_limit_service>`.\nmessage RateLimitServiceConfig {\n  reserved 1, 3;\n\n  // Specifies the gRPC service that hosts the rate limit service. The client\n  // will connect to this cluster when it needs to make rate limit service\n  // requests.\n  api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/ratelimit/v3/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.ratelimit.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.ratelimit.v3\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit service]\n\n// Rate limit :ref:`configuration overview <config_rate_limit_service>`.\nmessage RateLimitServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.ratelimit.v2.RateLimitServiceConfig\";\n\n  reserved 1, 3;\n\n  // Specifies the gRPC service that hosts the rate limit service. The client\n  // will connect to this cluster when it needs to make rate limit service\n  // requests.\n  core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n\n  // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and\n  // version of messages used on the wire.\n  core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/rbac/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/rbac/v2/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.rbac.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\nimport \"envoy/type/matcher/metadata.proto\";\nimport \"envoy/type/matcher/path.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/api/expr/v1alpha1/syntax.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.rbac.v2\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Role Based Access Control (RBAC)]\n\n// Role Based Access Control (RBAC) provides service-level and method-level access control for a\n// service. RBAC policies are additive. The policies are examined in order. A request is allowed\n// once a matching policy is found (suppose the `action` is ALLOW).\n//\n// Here is an example of RBAC configuration. It has two policies:\n//\n// * Service account \"cluster.local/ns/default/sa/admin\" has full access to the service, and so\n//   does \"cluster.local/ns/default/sa/superuser\".\n//\n// * Any user can read (\"GET\") the service at paths with prefix \"/products\", so long as the\n//   destination port is either 80 or 443.\n//\n//  .. code-block:: yaml\n//\n//   action: ALLOW\n//   policies:\n//     \"service-admin\":\n//       permissions:\n//         - any: true\n//       principals:\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/admin\"\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/superuser\"\n//     \"product-viewer\":\n//       permissions:\n//           - and_rules:\n//               rules:\n//                 - header: { name: \":method\", exact_match: \"GET\" }\n//                 - url_path:\n//                     path: { prefix: \"/products\" }\n//                 - or_rules:\n//                     rules:\n//                       - destination_port: 80\n//                       - destination_port: 443\n//       principals:\n//         - any: true\n//\nmessage RBAC {\n  // Should we do safe-list or block-list style access control?\n  enum Action {\n    // The policies grant access to principals. The rest is denied. This is safe-list style\n    // access control. This is the default type.\n    ALLOW = 0;\n\n    // The policies deny access to principals. The rest is allowed. This is block-list style\n    // access control.\n    DENY = 1;\n  }\n\n  // The action to take if a policy matches. The request is allowed if and only if:\n  //\n  //   * `action` is \"ALLOWED\" and at least one policy matches\n  //   * `action` is \"DENY\" and none of the policies match\n  Action action = 1;\n\n  // Maps from policy name to policy. A match occurs when at least one policy matches the request.\n  map<string, Policy> policies = 2;\n}\n\n// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if\n// and only if at least one of its permissions match the action taking place AND at least one of its\n// principals match the downstream AND the condition is true if specified.\nmessage Policy {\n  // Required. The set of permissions that define a role. Each permission is matched with OR\n  // semantics. To match all actions for this policy, a single Permission with the `any` field set\n  // to true should be used.\n  repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Required. The set of principals that are assigned/denied the role based on “action”. Each\n  // principal is matched with OR semantics. To match all downstreams for this policy, a single\n  // Principal with the `any` field set to true should be used.\n  repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional symbolic expression specifying an access control\n  // :ref:`condition <arch_overview_condition>`. The condition is combined\n  // with the permissions and the principals as a clause with AND semantics.\n  google.api.expr.v1alpha1.Expr condition = 3;\n}\n\n// Permission defines an action (or actions) that a principal can take.\n// [#next-free-field: 11]\nmessage Permission {\n  // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set of rules that all must match in order to define the action.\n    Set and_rules = 1;\n\n    // A set of rules where at least one must match in order to define the action.\n    Set or_rules = 2;\n\n    // When any is set, it matches any action.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    api.v2.route.HeaderMatcher header = 4;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.PathMatcher url_path = 10;\n\n    // A CIDR block that describes the destination IP.\n    api.v2.core.CidrRange destination_ip = 5;\n\n    // A port number that describes the destination port connecting to.\n    uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}];\n\n    // Metadata that describes additional information about the action.\n    type.matcher.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided permission. For instance, if the value of `not_rule` would\n    // match, this permission would not match. Conversely, if the value of `not_rule` would not\n    // match, this permission would match.\n    Permission not_rule = 8;\n\n    // The request server from the client's connection request. This is\n    // typically TLS SNI.\n    //\n    // .. attention::\n    //\n    //   The behavior of this field may be affected by how Envoy is configured\n    //   as explained below.\n    //\n    //   * If the :ref:`TLS Inspector <config_listener_filters_tls_inspector>`\n    //     filter is not added, and if a `FilterChainMatch` is not defined for\n    //     the :ref:`server name <envoy_api_field_listener.FilterChainMatch.server_names>`,\n    //     a TLS connection's requested SNI server name will be treated as if it\n    //     wasn't present.\n    //\n    //   * A :ref:`listener filter <arch_overview_listener_filters>` may\n    //     overwrite a connection's requested server name within Envoy.\n    //\n    // Please refer to :ref:`this FAQ entry <faq_how_to_setup_sni>` to learn to\n    // setup SNI.\n    type.matcher.StringMatcher requested_server_name = 9;\n  }\n}\n\n// Principal defines an identity or a group of identities for a downstream subject.\n// [#next-free-field: 12]\nmessage Principal {\n  // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Authentication attributes for a downstream.\n  message Authenticated {\n    reserved 1;\n\n    // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the\n    // certificate, otherwise the subject field is used. If unset, it applies to any user that is\n    // authenticated.\n    type.matcher.StringMatcher principal_name = 2;\n  }\n\n  oneof identifier {\n    option (validate.required) = true;\n\n    // A set of identifiers that all must match in order to define the downstream.\n    Set and_ids = 1;\n\n    // A set of identifiers at least one must match in order to define the downstream.\n    Set or_ids = 2;\n\n    // When any is set, it matches any downstream.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // Authenticated attributes that identify the downstream.\n    Authenticated authenticated = 4;\n\n    // A CIDR block that describes the downstream IP.\n    // This address will honor proxy protocol, but will not honor XFF.\n    api.v2.core.CidrRange source_ip = 5 [deprecated = true];\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This is always the physical peer even if the\n    // :ref:`remote_ip <envoy_api_field_config.rbac.v2.Principal.remote_ip>` is inferred\n    // from for example the x-forwarder-for header, proxy protocol, etc.\n    api.v2.core.CidrRange direct_remote_ip = 10;\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This may not be the physical peer and could be different from the\n    // :ref:`direct_remote_ip <envoy_api_field_config.rbac.v2.Principal.direct_remote_ip>`.\n    // E.g, if the remote ip is inferred from for example the x-forwarder-for header,\n    // proxy protocol, etc.\n    api.v2.core.CidrRange remote_ip = 11;\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    api.v2.route.HeaderMatcher header = 6;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.PathMatcher url_path = 9;\n\n    // Metadata that describes additional information about the principal.\n    type.matcher.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided principal. For instance, if the value of `not_id` would match,\n    // this principal would not match. Conversely, if the value of `not_id` would not match, this\n    // principal would match.\n    Principal not_id = 8;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/rbac/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/rbac/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/rbac/v3/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.rbac.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\nimport \"envoy/type/matcher/v3/path.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/api/expr/v1alpha1/checked.proto\";\nimport \"google/api/expr/v1alpha1/syntax.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.rbac.v3\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Role Based Access Control (RBAC)]\n\n// Role Based Access Control (RBAC) provides service-level and method-level access control for a\n// service. RBAC policies are additive. The policies are examined in order. Requests are allowed\n// or denied based on the `action` and whether a matching policy is found. For instance, if the\n// action is ALLOW and a matching policy is found the request should be allowed.\n//\n// RBAC can also be used to make access logging decisions by communicating with access loggers\n// through dynamic metadata. When the action is LOG and at least one policy matches, the\n// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating\n// the request should be logged.\n//\n// Here is an example of RBAC configuration. It has two policies:\n//\n// * Service account \"cluster.local/ns/default/sa/admin\" has full access to the service, and so\n//   does \"cluster.local/ns/default/sa/superuser\".\n//\n// * Any user can read (\"GET\") the service at paths with prefix \"/products\", so long as the\n//   destination port is either 80 or 443.\n//\n//  .. code-block:: yaml\n//\n//   action: ALLOW\n//   policies:\n//     \"service-admin\":\n//       permissions:\n//         - any: true\n//       principals:\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/admin\"\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/superuser\"\n//     \"product-viewer\":\n//       permissions:\n//           - and_rules:\n//               rules:\n//                 - header: { name: \":method\", exact_match: \"GET\" }\n//                 - url_path:\n//                     path: { prefix: \"/products\" }\n//                 - or_rules:\n//                     rules:\n//                       - destination_port: 80\n//                       - destination_port: 443\n//       principals:\n//         - any: true\n//\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.RBAC\";\n\n  // Should we do safe-list or block-list style access control?\n  enum Action {\n    // The policies grant access to principals. The rest are denied. This is safe-list style\n    // access control. This is the default type.\n    ALLOW = 0;\n\n    // The policies deny access to principals. The rest are allowed. This is block-list style\n    // access control.\n    DENY = 1;\n\n    // The policies set the `access_log_hint` dynamic metadata key based on if requests match.\n    // All requests are allowed.\n    LOG = 2;\n  }\n\n  // The action to take if a policy matches. Every action either allows or denies a request,\n  // and can also carry out action-specific operations.\n  //\n  // Actions:\n  //\n  //  * ALLOW: Allows the request if and only if there is a policy that matches\n  //    the request.\n  //  * DENY: Allows the request if and only if there are no policies that\n  //    match the request.\n  //  * LOG: Allows all requests. If at least one policy matches, the dynamic\n  //    metadata key `access_log_hint` is set to the value `true` under the shared\n  //    key namespace 'envoy.common'. If no policies match, it is set to `false`.\n  //    Other actions do not modify this key.\n  //\n  Action action = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maps from policy name to policy. A match occurs when at least one policy matches the request.\n  map<string, Policy> policies = 2;\n}\n\n// Policy specifies a role and the principals that are assigned/denied the role.\n// A policy matches if and only if at least one of its permissions match the\n// action taking place AND at least one of its principals match the downstream\n// AND the condition is true if specified.\nmessage Policy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.Policy\";\n\n  // Required. The set of permissions that define a role. Each permission is\n  // matched with OR semantics. To match all actions for this policy, a single\n  // Permission with the `any` field set to true should be used.\n  repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Required. The set of principals that are assigned/denied the role based on\n  // “action”. Each principal is matched with OR semantics. To match all\n  // downstreams for this policy, a single Principal with the `any` field set to\n  // true should be used.\n  repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional symbolic expression specifying an access control\n  // :ref:`condition <arch_overview_condition>`. The condition is combined\n  // with the permissions and the principals as a clause with AND semantics.\n  // Only be used when checked_condition is not used.\n  google.api.expr.v1alpha1.Expr condition = 3\n      [(udpa.annotations.field_migrate).oneof_promotion = \"expression_specifier\"];\n\n  // [#not-implemented-hide:]\n  // An optional symbolic expression that has been successfully type checked.\n  // Only be used when condition is not used.\n  google.api.expr.v1alpha1.CheckedExpr checked_condition = 4\n      [(udpa.annotations.field_migrate).oneof_promotion = \"expression_specifier\"];\n}\n\n// Permission defines an action (or actions) that a principal can take.\n// [#next-free-field: 11]\nmessage Permission {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.Permission\";\n\n  // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v2.Permission.Set\";\n\n    repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set of rules that all must match in order to define the action.\n    Set and_rules = 1;\n\n    // A set of rules where at least one must match in order to define the action.\n    Set or_rules = 2;\n\n    // When any is set, it matches any action.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    route.v3.HeaderMatcher header = 4;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v3.PathMatcher url_path = 10;\n\n    // A CIDR block that describes the destination IP.\n    core.v3.CidrRange destination_ip = 5;\n\n    // A port number that describes the destination port connecting to.\n    uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}];\n\n    // Metadata that describes additional information about the action.\n    type.matcher.v3.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided permission. For instance, if the value of\n    // `not_rule` would match, this permission would not match. Conversely, if\n    // the value of `not_rule` would not match, this permission would match.\n    Permission not_rule = 8;\n\n    // The request server from the client's connection request. This is\n    // typically TLS SNI.\n    //\n    // .. attention::\n    //\n    //   The behavior of this field may be affected by how Envoy is configured\n    //   as explained below.\n    //\n    //   * If the :ref:`TLS Inspector <config_listener_filters_tls_inspector>`\n    //     filter is not added, and if a `FilterChainMatch` is not defined for\n    //     the :ref:`server name\n    //     <envoy_api_field_config.listener.v3.FilterChainMatch.server_names>`,\n    //     a TLS connection's requested SNI server name will be treated as if it\n    //     wasn't present.\n    //\n    //   * A :ref:`listener filter <arch_overview_listener_filters>` may\n    //     overwrite a connection's requested server name within Envoy.\n    //\n    // Please refer to :ref:`this FAQ entry <faq_how_to_setup_sni>` to learn to\n    // setup SNI.\n    type.matcher.v3.StringMatcher requested_server_name = 9;\n  }\n}\n\n// Principal defines an identity or a group of identities for a downstream\n// subject.\n// [#next-free-field: 12]\nmessage Principal {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v2.Principal\";\n\n  // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof.\n  // Depending on the context, each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v2.Principal.Set\";\n\n    repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Authentication attributes for a downstream.\n  message Authenticated {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v2.Principal.Authenticated\";\n\n    reserved 1;\n\n    // The name of the principal. If set, The URI SAN or DNS SAN in that order\n    // is used from the certificate, otherwise the subject field is used. If\n    // unset, it applies to any user that is authenticated.\n    type.matcher.v3.StringMatcher principal_name = 2;\n  }\n\n  oneof identifier {\n    option (validate.required) = true;\n\n    // A set of identifiers that all must match in order to define the\n    // downstream.\n    Set and_ids = 1;\n\n    // A set of identifiers at least one must match in order to define the\n    // downstream.\n    Set or_ids = 2;\n\n    // When any is set, it matches any downstream.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // Authenticated attributes that identify the downstream.\n    Authenticated authenticated = 4;\n\n    // A CIDR block that describes the downstream IP.\n    // This address will honor proxy protocol, but will not honor XFF.\n    core.v3.CidrRange source_ip = 5 [deprecated = true];\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This is always the physical peer even if the\n    // :ref:`remote_ip <envoy_api_field_config.rbac.v3.Principal.remote_ip>` is\n    // inferred from for example the x-forwarder-for header, proxy protocol,\n    // etc.\n    core.v3.CidrRange direct_remote_ip = 10;\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This may not be the physical peer and could be different from the\n    // :ref:`direct_remote_ip\n    // <envoy_api_field_config.rbac.v3.Principal.direct_remote_ip>`. E.g, if the\n    // remote ip is inferred from for example the x-forwarder-for header, proxy\n    // protocol, etc.\n    core.v3.CidrRange remote_ip = 11;\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP\n    // request. Only available for HTTP request. Note: the pseudo-header :path\n    // includes the query and fragment string. Use the `url_path` field if you\n    // want to match the URL path without the query and fragment string.\n    route.v3.HeaderMatcher header = 6;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v3.PathMatcher url_path = 9;\n\n    // Metadata that describes additional information about the principal.\n    type.matcher.v3.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided principal. For instance, if the value of\n    // `not_id` would match, this principal would not match. Conversely, if the\n    // value of `not_id` would not match, this principal would match.\n    Principal not_id = 8;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/rbac/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:checked_proto\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.rbac.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\nimport \"envoy/type/matcher/v4alpha/path.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/api/expr/v1alpha1/checked.proto\";\nimport \"google/api/expr/v1alpha1/syntax.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.rbac.v4alpha\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Role Based Access Control (RBAC)]\n\n// Role Based Access Control (RBAC) provides service-level and method-level access control for a\n// service. RBAC policies are additive. The policies are examined in order. Requests are allowed\n// or denied based on the `action` and whether a matching policy is found. For instance, if the\n// action is ALLOW and a matching policy is found the request should be allowed.\n//\n// RBAC can also be used to make access logging decisions by communicating with access loggers\n// through dynamic metadata. When the action is LOG and at least one policy matches, the\n// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating\n// the request should be logged.\n//\n// Here is an example of RBAC configuration. It has two policies:\n//\n// * Service account \"cluster.local/ns/default/sa/admin\" has full access to the service, and so\n//   does \"cluster.local/ns/default/sa/superuser\".\n//\n// * Any user can read (\"GET\") the service at paths with prefix \"/products\", so long as the\n//   destination port is either 80 or 443.\n//\n//  .. code-block:: yaml\n//\n//   action: ALLOW\n//   policies:\n//     \"service-admin\":\n//       permissions:\n//         - any: true\n//       principals:\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/admin\"\n//         - authenticated:\n//             principal_name:\n//               exact: \"cluster.local/ns/default/sa/superuser\"\n//     \"product-viewer\":\n//       permissions:\n//           - and_rules:\n//               rules:\n//                 - header: { name: \":method\", exact_match: \"GET\" }\n//                 - url_path:\n//                     path: { prefix: \"/products\" }\n//                 - or_rules:\n//                     rules:\n//                       - destination_port: 80\n//                       - destination_port: 443\n//       principals:\n//         - any: true\n//\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.RBAC\";\n\n  // Should we do safe-list or block-list style access control?\n  enum Action {\n    // The policies grant access to principals. The rest are denied. This is safe-list style\n    // access control. This is the default type.\n    ALLOW = 0;\n\n    // The policies deny access to principals. The rest are allowed. This is block-list style\n    // access control.\n    DENY = 1;\n\n    // The policies set the `access_log_hint` dynamic metadata key based on if requests match.\n    // All requests are allowed.\n    LOG = 2;\n  }\n\n  // The action to take if a policy matches. Every action either allows or denies a request,\n  // and can also carry out action-specific operations.\n  //\n  // Actions:\n  //\n  //  * ALLOW: Allows the request if and only if there is a policy that matches\n  //    the request.\n  //  * DENY: Allows the request if and only if there are no policies that\n  //    match the request.\n  //  * LOG: Allows all requests. If at least one policy matches, the dynamic\n  //    metadata key `access_log_hint` is set to the value `true` under the shared\n  //    key namespace 'envoy.common'. If no policies match, it is set to `false`.\n  //    Other actions do not modify this key.\n  //\n  Action action = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maps from policy name to policy. A match occurs when at least one policy matches the request.\n  map<string, Policy> policies = 2;\n}\n\n// Policy specifies a role and the principals that are assigned/denied the role.\n// A policy matches if and only if at least one of its permissions match the\n// action taking place AND at least one of its principals match the downstream\n// AND the condition is true if specified.\nmessage Policy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.Policy\";\n\n  // Required. The set of permissions that define a role. Each permission is\n  // matched with OR semantics. To match all actions for this policy, a single\n  // Permission with the `any` field set to true should be used.\n  repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Required. The set of principals that are assigned/denied the role based on\n  // “action”. Each principal is matched with OR semantics. To match all\n  // downstreams for this policy, a single Principal with the `any` field set to\n  // true should be used.\n  repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  oneof expression_specifier {\n    // An optional symbolic expression specifying an access control\n    // :ref:`condition <arch_overview_condition>`. The condition is combined\n    // with the permissions and the principals as a clause with AND semantics.\n    // Only be used when checked_condition is not used.\n    google.api.expr.v1alpha1.Expr condition = 3;\n\n    // [#not-implemented-hide:]\n    // An optional symbolic expression that has been successfully type checked.\n    // Only be used when condition is not used.\n    google.api.expr.v1alpha1.CheckedExpr checked_condition = 4;\n  }\n}\n\n// Permission defines an action (or actions) that a principal can take.\n// [#next-free-field: 11]\nmessage Permission {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.Permission\";\n\n  // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context,\n  // each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v3.Permission.Set\";\n\n    repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set of rules that all must match in order to define the action.\n    Set and_rules = 1;\n\n    // A set of rules where at least one must match in order to define the action.\n    Set or_rules = 2;\n\n    // When any is set, it matches any action.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only\n    // available for HTTP request.\n    // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path`\n    // field if you want to match the URL path without the query and fragment string.\n    route.v4alpha.HeaderMatcher header = 4;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v4alpha.PathMatcher url_path = 10;\n\n    // A CIDR block that describes the destination IP.\n    core.v4alpha.CidrRange destination_ip = 5;\n\n    // A port number that describes the destination port connecting to.\n    uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}];\n\n    // Metadata that describes additional information about the action.\n    type.matcher.v4alpha.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided permission. For instance, if the value of\n    // `not_rule` would match, this permission would not match. Conversely, if\n    // the value of `not_rule` would not match, this permission would match.\n    Permission not_rule = 8;\n\n    // The request server from the client's connection request. This is\n    // typically TLS SNI.\n    //\n    // .. attention::\n    //\n    //   The behavior of this field may be affected by how Envoy is configured\n    //   as explained below.\n    //\n    //   * If the :ref:`TLS Inspector <config_listener_filters_tls_inspector>`\n    //     filter is not added, and if a `FilterChainMatch` is not defined for\n    //     the :ref:`server name\n    //     <envoy_api_field_config.listener.v4alpha.FilterChainMatch.server_names>`,\n    //     a TLS connection's requested SNI server name will be treated as if it\n    //     wasn't present.\n    //\n    //   * A :ref:`listener filter <arch_overview_listener_filters>` may\n    //     overwrite a connection's requested server name within Envoy.\n    //\n    // Please refer to :ref:`this FAQ entry <faq_how_to_setup_sni>` to learn to\n    // setup SNI.\n    type.matcher.v4alpha.StringMatcher requested_server_name = 9;\n  }\n}\n\n// Principal defines an identity or a group of identities for a downstream\n// subject.\n// [#next-free-field: 12]\nmessage Principal {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.rbac.v3.Principal\";\n\n  // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof.\n  // Depending on the context, each are applied with the associated behavior.\n  message Set {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v3.Principal.Set\";\n\n    repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Authentication attributes for a downstream.\n  message Authenticated {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.rbac.v3.Principal.Authenticated\";\n\n    reserved 1;\n\n    // The name of the principal. If set, The URI SAN or DNS SAN in that order\n    // is used from the certificate, otherwise the subject field is used. If\n    // unset, it applies to any user that is authenticated.\n    type.matcher.v4alpha.StringMatcher principal_name = 2;\n  }\n\n  oneof identifier {\n    option (validate.required) = true;\n\n    // A set of identifiers that all must match in order to define the\n    // downstream.\n    Set and_ids = 1;\n\n    // A set of identifiers at least one must match in order to define the\n    // downstream.\n    Set or_ids = 2;\n\n    // When any is set, it matches any downstream.\n    bool any = 3 [(validate.rules).bool = {const: true}];\n\n    // Authenticated attributes that identify the downstream.\n    Authenticated authenticated = 4;\n\n    // A CIDR block that describes the downstream IP.\n    // This address will honor proxy protocol, but will not honor XFF.\n    core.v4alpha.CidrRange hidden_envoy_deprecated_source_ip = 5 [deprecated = true];\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This is always the physical peer even if the\n    // :ref:`remote_ip <envoy_api_field_config.rbac.v4alpha.Principal.remote_ip>` is\n    // inferred from for example the x-forwarder-for header, proxy protocol,\n    // etc.\n    core.v4alpha.CidrRange direct_remote_ip = 10;\n\n    // A CIDR block that describes the downstream remote/origin address.\n    // Note: This may not be the physical peer and could be different from the\n    // :ref:`direct_remote_ip\n    // <envoy_api_field_config.rbac.v4alpha.Principal.direct_remote_ip>`. E.g, if the\n    // remote ip is inferred from for example the x-forwarder-for header, proxy\n    // protocol, etc.\n    core.v4alpha.CidrRange remote_ip = 11;\n\n    // A header (or pseudo-header such as :path or :method) on the incoming HTTP\n    // request. Only available for HTTP request. Note: the pseudo-header :path\n    // includes the query and fragment string. Use the `url_path` field if you\n    // want to match the URL path without the query and fragment string.\n    route.v4alpha.HeaderMatcher header = 6;\n\n    // A URL path on the incoming HTTP request. Only available for HTTP.\n    type.matcher.v4alpha.PathMatcher url_path = 9;\n\n    // Metadata that describes additional information about the principal.\n    type.matcher.v4alpha.MetadataMatcher metadata = 7;\n\n    // Negates matching the provided principal. For instance, if the value of\n    // `not_id` would match, this principal would not match. Conversely, if the\n    // value of `not_id` would not match, this principal would match.\n    Principal not_id = 8;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.resource_monitor.fixed_heap.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha\";\noption java_outer_classname = \"FixedHeapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Fixed heap]\n// [#extension: envoy.resource_monitors.fixed_heap]\n\n// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a\n// fraction of currently reserved heap memory divided by a statically configured maximum\n// specified in the FixedHeapConfig.\nmessage FixedHeapConfig {\n  uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.resource_monitor.injected_resource.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha\";\noption java_outer_classname = \"InjectedResourceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Injected resource]\n// [#extension: envoy.resource_monitors.injected_resource]\n\n// The injected resource monitor allows injecting a synthetic resource pressure into Envoy\n// via a text file, which must contain a floating-point number in the range [0..1] representing\n// the resource pressure and be updated atomically by a symbolic link swap.\n// This is intended primarily for integration tests to force Envoy into an overloaded state.\nmessage InjectedResourceConfig {\n  string filename = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.omit_canary_hosts.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.omit_canary_hosts.v2\";\noption java_outer_classname = \"OmitCanaryHostsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Omit Canary Hosts Predicate]\n// [#extension: envoy.retry_host_predicates.omit_canary_hosts]\n\nmessage OmitCanaryHostsPredicate {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.omit_host_metadata.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.omit_host_metadata.v2\";\noption java_outer_classname = \"OmitHostMetadataConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.retry.host.omit_host_metadata.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Omit host metadata retry predicate]\n\n// A retry host predicate that can be used to reject a host based on\n// predefined metadata match criteria.\n// [#extension: envoy.retry_host_predicates.omit_host_metadata]\nmessage OmitHostMetadataConfig {\n  // Retry host predicate metadata match criteria. The hosts in\n  // the upstream cluster with matching metadata will be omitted while\n  // attempting a retry of a failed request. The metadata should be specified\n  // under the *envoy.lb* key.\n  api.v2.core.Metadata metadata_match = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.previous_hosts.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.previous_hosts.v2\";\noption java_outer_classname = \"PreviousHostsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Previous Hosts Predicate]\n// [#extension: envoy.retry_host_predicates.previous_hosts]\n\nmessage PreviousHostsPredicate {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/previous_priorities/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.retry.previous_priorities;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.retry.previous_priorities\";\noption java_outer_classname = \"PreviousPrioritiesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Previous priorities retry selector]\n\n// A retry host selector that attempts to spread retries between priorities, even if certain\n// priorities would not normally be attempted due to higher priorities being available.\n//\n// As priorities get excluded, load will be distributed amongst the remaining healthy priorities\n// based on the relative health of the priorities, matching how load is distributed during regular\n// host selection. For example, given priority healths of {100, 50, 50}, the original load will be\n// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load\n// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the\n// remaining to spill over to P2.\n//\n// Each priority attempted will be excluded until there are no healthy priorities left, at which\n// point the list of attempted priorities will be reset, essentially starting from the beginning.\n// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the\n// following sequence of priorities would be selected (assuming update_frequency = 1):\n// Attempt 1: P0 (P0 is 100% healthy)\n// Attempt 2: P2 (P0 already attempted, P2 only healthy priority)\n// Attempt 3: P0 (no healthy priorities, reset)\n// Attempt 4: P2\n//\n// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original\n// priority load, so behavior should be identical to not using this plugin.\n//\n// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of\n// priorities), which might incur significant overhead for clusters with many priorities.\n// [#extension: envoy.retry_priorities.previous_priorities]\nmessage PreviousPrioritiesConfig {\n  // How often the priority load should be updated based on previously attempted priorities. Useful\n  // to allow each priorities to receive more than one request before being excluded or to reduce\n  // the number of times that the priority load has to be recomputed.\n  //\n  // For example, by setting this to 2, then the first two attempts (initial attempt and first\n  // retry) will use the unmodified priority load. The third and fourth attempt will use priority\n  // load which excludes the priorities routed to with the first two attempts, and the fifth and\n  // sixth attempt will use the priority load excluding the priorities used for the first four\n  // attempts.\n  //\n  // Must be greater than 0.\n  int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP route configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// [#next-free-field: 11]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.RouteConfiguration\";\n\n  // The name of the route configuration. For example, it might match\n  // :ref:`route_config_name\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.Rds.route_config_name>` in\n  // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`.\n  string name = 1;\n\n  // An array of virtual hosts that make up the route table.\n  repeated VirtualHost virtual_hosts = 2;\n\n  // An array of virtual hosts will be dynamically loaded via the VHDS API.\n  // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used\n  // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for\n  // on-demand discovery of virtual hosts. The contents of these two fields will be merged to\n  // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration\n  // taking precedence.\n  Vhds vhds = 9;\n\n  // Optionally specifies a list of HTTP headers that the connection manager\n  // will consider to be internal only. If they are found on external requests they will be cleaned\n  // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information.\n  repeated string internal_only_headers = 3 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each response that\n  // the connection manager encodes. Headers specified at this level are applied\n  // after headers from any enclosed :ref:`envoy_api_msg_config.route.v3.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v3.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption response_headers_to_add = 4\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // that the connection manager encodes.\n  repeated string response_headers_to_remove = 5 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // routed by the HTTP connection manager. Headers specified at this level are\n  // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v3.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v3.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption request_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // routed by the HTTP connection manager.\n  repeated string request_headers_to_remove = 8 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // By default, headers that should be added/removed are evaluated from most to least specific:\n  //\n  // * route level\n  // * virtual host level\n  // * connection manager level\n  //\n  // To allow setting overrides at the route or virtual host level, this order can be reversed\n  // by setting this option to true. Defaults to false.\n  //\n  // [#next-major-version: In the v3 API, this will default to true.]\n  bool most_specific_header_mutations_wins = 10;\n\n  // An optional boolean that specifies whether the clusters that the route\n  // table refers to will be validated by the cluster manager. If set to true\n  // and a route refers to a non-existent cluster, the route table will not\n  // load. If set to false and a route refers to a non-existent cluster, the\n  // route table will load and the router filter will return a 404 if the route\n  // is selected at runtime. This setting defaults to true if the route table\n  // is statically defined via the :ref:`route_config\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.route_config>`\n  // option. This setting default to false if the route table is loaded dynamically via the\n  // :ref:`rds\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.rds>`\n  // option. Users may wish to override the default behavior in certain cases (for example when\n  // using CDS with a static route table).\n  google.protobuf.BoolValue validate_clusters = 7;\n}\n\nmessage Vhds {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Vhds\";\n\n  // Configuration source specifier for VHDS.\n  core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v3/route_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/proxy_protocol.proto\";\nimport \"envoy/type/matcher/v3/regex.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/metadata/v3/metadata.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v3\";\noption java_outer_classname = \"RouteComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP route components]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// The top level element in the routing configuration is a virtual host. Each virtual host has\n// a logical name as well as a set of domains that get routed to it based on the incoming request's\n// host header. This allows a single listener to service multiple top level domain path trees. Once\n// a virtual host is selected based on the domain, the routes are processed in order to see which\n// upstream cluster to route to or whether to perform a redirect.\n// [#next-free-field: 21]\nmessage VirtualHost {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.VirtualHost\";\n\n  enum TlsRequirementType {\n    // No TLS requirement for the virtual host.\n    NONE = 0;\n\n    // External requests must use TLS. If a request is external and it is not\n    // using TLS, a 301 redirect will be sent telling the client to use HTTPS.\n    EXTERNAL_ONLY = 1;\n\n    // All requests must use TLS. If a request is not using TLS, a 301 redirect\n    // will be sent telling the client to use HTTPS.\n    ALL = 2;\n  }\n\n  reserved 9;\n\n  // The logical name of the virtual host. This is used when emitting certain\n  // statistics but is not relevant for routing.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A list of domains (host/authority header) that will be matched to this\n  // virtual host. Wildcard hosts are supported in the suffix or prefix form.\n  //\n  // Domain search order:\n  //  1. Exact domain names: ``www.foo.com``.\n  //  2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``.\n  //  3. Prefix domain wildcards: ``foo.*`` or ``foo-*``.\n  //  4. Special wildcard ``*`` matching any domain.\n  //\n  // .. note::\n  //\n  //   The wildcard will not match the empty string.\n  //   e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``.\n  //   The longest wildcards match first.\n  //   Only a single virtual host in the entire route configuration can match on ``*``. A domain\n  //   must be unique across all virtual hosts or the config will fail to load.\n  //\n  // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE.\n  repeated string domains = 2 [(validate.rules).repeated = {\n    min_items: 1\n    items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}}\n  }];\n\n  // The list of routes that will be matched, in order, for incoming requests.\n  // The first route that matches will be used.\n  repeated Route routes = 3;\n\n  // Specifies the type of TLS enforcement the virtual host expects. If this option is not\n  // specified, there is no TLS requirement for the virtual host.\n  TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // A list of virtual clusters defined for this virtual host. Virtual clusters\n  // are used for additional statistics gathering.\n  repeated VirtualCluster virtual_clusters = 5;\n\n  // Specifies a set of rate limit configurations that will be applied to the\n  // virtual host.\n  repeated RateLimit rate_limits = 6;\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v3.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption request_headers_to_add = 7\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // handled by this virtual host.\n  repeated string request_headers_to_remove = 13 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a list of HTTP headers that should be added to each response\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v3.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // handled by this virtual host.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Indicates that the virtual host has a CORS policy.\n  CorsPolicy cors = 8;\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 15;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the upstream request. Setting this option will cause it to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the upstream\n  // will see the attempt count as perceived by the second Envoy. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v3.Router.suppress_envoy_headers>` flag.\n  //\n  // [#next-major-version: rename to include_attempt_count_in_request.]\n  bool include_request_attempt_count = 14;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the downstream response. Setting this option will cause the router to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the downstream\n  // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v3.Router.suppress_envoy_headers>` flag.\n  bool include_attempt_count_in_response = 19;\n\n  // Indicates the retry policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  RetryPolicy retry_policy = 16;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that setting a route level entry\n  // will take precedence over this config and it'll be treated independently (e.g.: values are not\n  // inherited). :ref:`Retry policy <envoy_api_field_config.route.v3.VirtualHost.retry_policy>` should not be\n  // set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 20;\n\n  // Indicates the hedge policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  HedgePolicy hedge_policy = 17;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum\n  // value of this and the listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18;\n\n  map<string, google.protobuf.Struct> hidden_envoy_deprecated_per_filter_config = 12\n      [deprecated = true];\n}\n\n// A filter-defined action type.\nmessage FilterAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.FilterAction\";\n\n  google.protobuf.Any action = 1;\n}\n\n// A route is both a specification of how to match a request as well as an indication of what to do\n// next (e.g., redirect, forward, rewrite, etc.).\n//\n// .. attention::\n//\n//   Envoy supports routing on HTTP method via :ref:`header matching\n//   <envoy_api_msg_config.route.v3.HeaderMatcher>`.\n// [#next-free-field: 18]\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.Route\";\n\n  reserved 6;\n\n  // Name for the route.\n  string name = 14;\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  oneof action {\n    option (validate.required) = true;\n\n    // Route request to some upstream cluster.\n    RouteAction route = 2;\n\n    // Return a redirect.\n    RedirectAction redirect = 3;\n\n    // Return an arbitrary HTTP response directly, without proxying.\n    DirectResponseAction direct_response = 7;\n\n    // [#not-implemented-hide:]\n    // If true, a filter will define the action (e.g., it could dynamically generate the\n    // RouteAction).\n    // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when\n    // implemented]\n    FilterAction filter_action = 17;\n  }\n\n  // The Metadata field can be used to provide additional information\n  // about the route. It can be used for configuration, stats, and logging.\n  // The metadata should go under the filter namespace that will need it.\n  // For instance, if the metadata is intended for the Router filter,\n  // the filter name should be specified as *envoy.filters.http.router*.\n  core.v3.Metadata metadata = 4;\n\n  // Decorator for the matched route.\n  Decorator decorator = 5;\n\n  // The typed_per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 13;\n\n  // Specifies a set of headers that will be added to requests matching this\n  // route. Headers specified at this level are applied before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v3.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption request_headers_to_add = 9\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // matching this route.\n  repeated string request_headers_to_remove = 12 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a set of headers that will be added to responses to requests\n  // matching this route. Headers specified at this level are applied before\n  // headers from the enclosing :ref:`envoy_api_msg_config.route.v3.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on\n  // :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v3.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // to requests matching this route.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Presence of the object defines whether the connection manager's tracing configuration\n  // is overridden by this route specific instance.\n  Tracing tracing = 15;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set, the bytes actually buffered will be the minimum value of this and the\n  // listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16;\n\n  map<string, google.protobuf.Struct> hidden_envoy_deprecated_per_filter_config = 8\n      [deprecated = true];\n}\n\n// Compared to the :ref:`cluster <envoy_api_field_config.route.v3.RouteAction.cluster>` field that specifies a\n// single upstream cluster as the target of a request, the :ref:`weighted_clusters\n// <envoy_api_field_config.route.v3.RouteAction.weighted_clusters>` option allows for specification of\n// multiple upstream clusters along with weights that indicate the percentage of\n// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the\n// weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.WeightedCluster\";\n\n  // [#next-free-field: 11]\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.WeightedCluster.ClusterWeight\";\n\n    reserved 7;\n\n    // Name of the upstream cluster. The cluster must exist in the\n    // :ref:`cluster manager configuration <config_cluster_manager>`.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // An integer between 0 and :ref:`total_weight\n    // <envoy_api_field_config.route.v3.WeightedCluster.total_weight>`. When a request matches the route,\n    // the choice of an upstream cluster is determined by its weight. The sum of weights across all\n    // entries in the clusters array must add up to the total_weight, which defaults to 100.\n    google.protobuf.UInt32Value weight = 2;\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field will be considered for\n    // load balancing. Note that this will be merged with what's provided in\n    // :ref:`RouteAction.metadata_match <envoy_api_field_config.route.v3.RouteAction.metadata_match>`, with\n    // values here taking precedence. The filter name should be specified as *envoy.lb*.\n    core.v3.Metadata metadata_match = 3;\n\n    // Specifies a list of headers to be added to requests when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v3.HeaderValueOption request_headers_to_add = 4\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request when\n    // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    repeated string request_headers_to_remove = 9 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of headers to be added to responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v3.HeaderValueOption response_headers_to_add = 5\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of headers to be removed from responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.\n    repeated string response_headers_to_remove = 6 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Any> typed_per_filter_config = 10;\n\n    map<string, google.protobuf.Struct> hidden_envoy_deprecated_per_filter_config = 8\n        [deprecated = true];\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Specifies the total weight across all clusters. The sum of all cluster weights must equal this\n  // value, which must be greater than 0. Defaults to 100.\n  google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies the runtime key prefix that should be used to construct the\n  // runtime keys associated with each cluster. When the *runtime_key_prefix* is\n  // specified, the router will look for weights associated with each upstream\n  // cluster under the key *runtime_key_prefix* + \".\" + *cluster[i].name* where\n  // *cluster[i]* denotes an entry in the clusters array field. If the runtime\n  // key for the cluster does not exist, the value specified in the\n  // configuration file will be used as the default weight. See the :ref:`runtime documentation\n  // <operations_runtime>` for how key names map to the underlying implementation.\n  string runtime_key_prefix = 2;\n}\n\n// [#next-free-field: 13]\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RouteMatch\";\n\n  message GrpcRouteMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteMatch.GrpcRouteMatchOptions\";\n  }\n\n  message TlsContextMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteMatch.TlsContextMatchOptions\";\n\n    // If specified, the route will match against whether or not a certificate is presented.\n    // If not specified, certificate presentation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue presented = 1;\n\n    // If specified, the route will match against whether or not a certificate is validated.\n    // If not specified, certificate validation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue validated = 2;\n  }\n\n  // An extensible message for matching CONNECT requests.\n  message ConnectMatcher {\n  }\n\n  reserved 5;\n\n  oneof path_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route is a prefix rule meaning that the prefix must\n    // match the beginning of the *:path* header.\n    string prefix = 1;\n\n    // If specified, the route is an exact path rule meaning that the path must\n    // exactly match the *:path* header once the query string is removed.\n    string path = 2;\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex.\n    //\n    // [#next-major-version: In the v3 API we should redo how path specification works such\n    // that we utilize StringMatcher, and additionally have consistent options around whether we\n    // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive\n    // to deprecate the existing options. We should even consider whether we want to do away with\n    // path_specifier entirely and just rely on a set of header matchers which can already match\n    // on :path, etc. The issue with that is it is unclear how to generically deal with query string\n    // stripping. This needs more thought.]\n    type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];\n\n    // If this is used as the matcher, the matcher will only match CONNECT requests.\n    // Note that this will not match HTTP/2 upgrade-style CONNECT requests\n    // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style\n    // upgrades.\n    // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,\n    // where Extended CONNECT requests may have a path, the path matchers will work if\n    // there is a path present.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectMatcher connect_matcher = 12;\n\n    string hidden_envoy_deprecated_regex = 3 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n  }\n\n  // Indicates that prefix/path matching should be case sensitive. The default\n  // is true.\n  google.protobuf.BoolValue case_sensitive = 4;\n\n  // Indicates that the route should additionally match on a runtime key. Every time the route\n  // is considered for a match, it must also fall under the percentage of matches indicated by\n  // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the\n  // number is <= the value of the numerator N, or if the key is not present, the default\n  // value, the router continues to evaluate the remaining match criteria. A runtime_fraction\n  // route configuration can be used to roll out route changes in a gradual manner without full\n  // code/config deploys. Refer to the :ref:`traffic shifting\n  // <config_http_conn_man_route_table_traffic_splitting_shift>` docs for additional documentation.\n  //\n  // .. note::\n  //\n  //    Parsing this field is implemented such that the runtime key's data may be represented\n  //    as a FractionalPercent proto represented as JSON/YAML and may also be represented as an\n  //    integer with the assumption that the value is an integral percentage out of 100. For\n  //    instance, a runtime key lookup returning the value \"42\" would parse as a FractionalPercent\n  //    whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics.\n  core.v3.RuntimeFractionalPercent runtime_fraction = 9;\n\n  // Specifies a set of headers that the route should match on. The router will\n  // check the request’s headers against all the specified headers in the route\n  // config. A match will happen if all the headers in the route are present in\n  // the request with the same values (or based on presence if the value field\n  // is not in the config).\n  repeated HeaderMatcher headers = 6;\n\n  // Specifies a set of URL query parameters on which the route should\n  // match. The router will check the query string from the *path* header\n  // against all the specified query parameters. If the number of specified\n  // query parameters is nonzero, they all must match the *path* header's\n  // query string for a match to occur.\n  repeated QueryParameterMatcher query_parameters = 7;\n\n  // If specified, only gRPC requests will be matched. The router will check\n  // that the content-type header has a application/grpc or one of the various\n  // application/grpc+ values.\n  GrpcRouteMatchOptions grpc = 8;\n\n  // If specified, the client tls context will be matched against the defined\n  // match options.\n  //\n  // [#next-major-version: unify with RBAC]\n  TlsContextMatchOptions tls_context = 11;\n}\n\n// [#next-free-field: 12]\nmessage CorsPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.CorsPolicy\";\n\n  // Specifies string patterns that match allowed origins. An origin is allowed if any of the\n  // string matchers match.\n  repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11;\n\n  // Specifies the content for the *access-control-allow-methods* header.\n  string allow_methods = 2;\n\n  // Specifies the content for the *access-control-allow-headers* header.\n  string allow_headers = 3;\n\n  // Specifies the content for the *access-control-expose-headers* header.\n  string expose_headers = 4;\n\n  // Specifies the content for the *access-control-max-age* header.\n  string max_age = 5;\n\n  // Specifies whether the resource allows credentials.\n  google.protobuf.BoolValue allow_credentials = 6;\n\n  oneof enabled_specifier {\n    // Specifies the % of requests for which the CORS filter is enabled.\n    //\n    // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS\n    // filter will be enabled for 100% of the requests.\n    //\n    // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is\n    // specified, Envoy will lookup the runtime key to get the percentage of requests to filter.\n    core.v3.RuntimeFractionalPercent filter_enabled = 9;\n\n    google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n  }\n\n  // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not\n  // enforced.\n  //\n  // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those\n  // fields have to explicitly disable the filter in order for this setting to take effect.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* to determine if it's valid but will not enforce any policies.\n  core.v3.RuntimeFractionalPercent shadow_enabled = 10;\n\n  repeated string hidden_envoy_deprecated_allow_origin = 1\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  repeated string hidden_envoy_deprecated_allow_origin_regex = 8\n      [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}];\n}\n\n// [#next-free-field: 37]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RouteAction\";\n\n  enum ClusterNotFoundResponseCode {\n    // HTTP status code - 503 Service Unavailable.\n    SERVICE_UNAVAILABLE = 0;\n\n    // HTTP status code - 404 Not Found.\n    NOT_FOUND = 1;\n  }\n\n  // Configures :ref:`internal redirect <arch_overview_internal_redirects>` behavior.\n  // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.]\n  enum InternalRedirectAction {\n    option deprecated = true;\n\n    PASS_THROUGH_INTERNAL_REDIRECT = 0;\n    HANDLE_INTERNAL_REDIRECT = 1;\n  }\n\n  // The router is capable of shadowing traffic from one cluster to another. The current\n  // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n  // respond before returning the response from the primary cluster. All normal statistics are\n  // collected for the shadow cluster making this feature useful for testing.\n  //\n  // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is\n  // useful for logging. For example, *cluster1* becomes *cluster1-shadow*.\n  //\n  // .. note::\n  //\n  //   Shadowing will not be triggered if the primary cluster does not exist.\n  message RequestMirrorPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteAction.RequestMirrorPolicy\";\n\n    // Specifies the cluster that requests will be mirrored to. The cluster must\n    // exist in the cluster manager configuration.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // If not specified, all requests to the target cluster will be mirrored.\n    //\n    // If specified, this field takes precedence over the `runtime_key` field and requests must also\n    // fall under the percentage of matches indicated by this field.\n    //\n    // For some fraction N/D, a random number in the range [0,D) is selected. If the\n    // number is <= the value of the numerator N, or if the key is not present, the default\n    // value, the request will be mirrored.\n    core.v3.RuntimeFractionalPercent runtime_fraction = 3;\n\n    // Determines if the trace span should be sampled. Defaults to true.\n    google.protobuf.BoolValue trace_sampled = 4;\n\n    string hidden_envoy_deprecated_runtime_key = 2\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n  }\n\n  // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer\n  // <arch_overview_load_balancing_types>`.\n  // [#next-free-field: 7]\n  message HashPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteAction.HashPolicy\";\n\n    message Header {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.Header\";\n\n      // The name of the request header that will be used to obtain the hash\n      // key. If the request header is not present, no hash will be produced.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // If specified, the request header value will be rewritten and used\n      // to produce the hash key.\n      type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2;\n    }\n\n    // Envoy supports two types of cookie affinity:\n    //\n    // 1. Passive. Envoy takes a cookie that's present in the cookies header and\n    //    hashes on its value.\n    //\n    // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL)\n    //    on the first request from the client in its response to the client,\n    //    based on the endpoint the request gets sent to. The client then\n    //    presents this on the next and all subsequent requests. The hash of\n    //    this is sufficient to ensure these requests get sent to the same\n    //    endpoint. The cookie is generated by hashing the source and\n    //    destination ports and addresses so that multiple independent HTTP2\n    //    streams on the same connection will independently receive the same\n    //    cookie, even if they arrive at the Envoy simultaneously.\n    message Cookie {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.Cookie\";\n\n      // The name of the cookie that will be used to obtain the hash key. If the\n      // cookie is not present and ttl below is not set, no hash will be\n      // produced.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If specified, a cookie with the TTL will be generated if the cookie is\n      // not present. If the TTL is present and zero, the generated cookie will\n      // be a session cookie.\n      google.protobuf.Duration ttl = 2;\n\n      // The name of the path for the cookie. If no path is specified here, no path\n      // will be set for the cookie.\n      string path = 3;\n    }\n\n    message ConnectionProperties {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.ConnectionProperties\";\n\n      // Hash on source IP address.\n      bool source_ip = 1;\n    }\n\n    message QueryParameter {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.QueryParameter\";\n\n      // The name of the URL query parameter that will be used to obtain the hash\n      // key. If the parameter is not present, no hash will be produced. Query\n      // parameter names are case-sensitive.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    message FilterState {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RouteAction.HashPolicy.FilterState\";\n\n      // The name of the Object in the per-request filterState, which is an\n      // Envoy::Http::Hashable object. If there is no data associated with the key,\n      // or the stored object is not Envoy::Http::Hashable, no hash will be produced.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // Header hash policy.\n      Header header = 1;\n\n      // Cookie hash policy.\n      Cookie cookie = 2;\n\n      // Connection properties hash policy.\n      ConnectionProperties connection_properties = 3;\n\n      // Query parameter hash policy.\n      QueryParameter query_parameter = 5;\n\n      // Filter state hash policy.\n      FilterState filter_state = 6;\n    }\n\n    // The flag that short-circuits the hash computing. This field provides a\n    // 'fallback' style of configuration: \"if a terminal policy doesn't work,\n    // fallback to rest of the policy list\", it saves time when the terminal\n    // policy works.\n    //\n    // If true, and there is already a hash computed, ignore rest of the\n    // list of hash polices.\n    // For example, if the following hash methods are configured:\n    //\n    //  ========= ========\n    //  specifier terminal\n    //  ========= ========\n    //  Header A  true\n    //  Header B  false\n    //  Header C  false\n    //  ========= ========\n    //\n    // The generateHash process ends if policy \"header A\" generates a hash, as\n    // it's a terminal policy.\n    bool terminal = 4;\n  }\n\n  // Allows enabling and disabling upgrades on a per-route basis.\n  // This overrides any enabled/disabled upgrade filter chain specified in the\n  // HttpConnectionManager\n  // :ref:`upgrade_configs\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.upgrade_configs>`\n  // but does not affect any custom filter chain specified there.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RouteAction.UpgradeConfig\";\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    message ConnectConfig {\n      // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream.\n      core.v3.ProxyProtocolConfig proxy_protocol_config = 1;\n    }\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type] will be proxied upstream.\n    string upgrade_type = 1\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Determines if upgrades are available on this route. Defaults to true.\n    google.protobuf.BoolValue enabled = 2;\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectConfig connect_config = 3;\n  }\n\n  message MaxStreamDuration {\n    // Specifies the maximum duration allowed for streams on the route. If not specified, the value\n    // from the :ref:`max_stream_duration\n    // <envoy_api_field_config.core.v3.HttpProtocolOptions.max_stream_duration>` field in\n    // :ref:`HttpConnectionManager.common_http_protocol_options\n    // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options>`\n    // is used. If this field is set explicitly to zero, any\n    // HttpConnectionManager max_stream_duration timeout will be disabled for\n    // this route.\n    google.protobuf.Duration max_stream_duration = 1;\n\n    // If present, and the request contains a `grpc-timeout header\n    // <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, use that value as the\n    // *max_stream_duration*, but limit the applied timeout to the maximum value specified here.\n    // If set to 0, the `grpc-timeout` header is used without modification.\n    google.protobuf.Duration grpc_timeout_header_max = 2;\n\n    // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by\n    // subtracting the provided duration from the header. This is useful for allowing Envoy to set\n    // its global timeout to be less than that of the deadline imposed by the calling client, which\n    // makes it more likely that Envoy will handle the timeout instead of having the call canceled\n    // by the client. If, after applying the offset, the resulting timeout is zero or negative,\n    // the stream will timeout immediately.\n    google.protobuf.Duration grpc_timeout_header_offset = 3;\n  }\n\n  reserved 12, 18, 19, 16, 22, 21;\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // HTTP header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist, Envoy will\n    // return a 404 response.\n    //\n    // .. attention::\n    //\n    //   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1\n    //   *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n    string cluster_header = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster. See\n    // :ref:`traffic splitting <config_http_conn_man_route_table_traffic_splitting_split>`\n    // for additional documentation.\n    WeightedCluster weighted_clusters = 3;\n  }\n\n  // The HTTP status code to use when configured cluster is not found.\n  // The default response code is 503 Service Unavailable.\n  ClusterNotFoundResponseCode cluster_not_found_response_code = 20\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n  // in the upstream cluster with metadata matching what's set in this field will be considered\n  // for load balancing. If using :ref:`weighted_clusters\n  // <envoy_api_field_config.route.v3.RouteAction.weighted_clusters>`, metadata will be merged, with values\n  // provided there taking precedence. The filter name should be specified as *envoy.lb*.\n  core.v3.Metadata metadata_match = 4;\n\n  // Indicates that during forwarding, the matched prefix (or path) should be\n  // swapped with this value. This option allows application URLs to be rooted\n  // at a different path from those exposed at the reverse proxy layer. The router filter will\n  // place the original path before rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of *prefix_rewrite* or\n  // :ref:`regex_rewrite <envoy_api_field_config.route.v3.RouteAction.regex_rewrite>`\n  // may be specified.\n  //\n  // .. attention::\n  //\n  //   Pay careful attention to the use of trailing slashes in the\n  //   :ref:`route's match <envoy_api_field_config.route.v3.Route.match>` prefix value.\n  //   Stripping a prefix from a path requires multiple Routes to handle all cases. For example,\n  //   rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single\n  //   :ref:`Route <envoy_api_msg_config.route.v3.Route>`, as shown by the below config entries:\n  //\n  //   .. code-block:: yaml\n  //\n  //     - match:\n  //         prefix: \"/prefix/\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //     - match:\n  //         prefix: \"/prefix\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //\n  //   Having above entries in the config, requests to */prefix* will be stripped to */*, while\n  //   requests to */prefix/etc* will be stripped to */etc*.\n  string prefix_rewrite = 5\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Indicates that during forwarding, portions of the path that match the\n  // pattern should be rewritten, even allowing the substitution of capture\n  // groups from the pattern into the new path as specified by the rewrite\n  // substitution string. This is useful to allow application paths to be\n  // rewritten in a way that is aware of segments with variable content like\n  // identifiers. The router filter will place the original path as it was\n  // before the rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of :ref:`prefix_rewrite <envoy_api_field_config.route.v3.RouteAction.prefix_rewrite>`\n  // or *regex_rewrite* may be specified.\n  //\n  // Examples using Google's `RE2 <https://github.com/google/re2>`_ engine:\n  //\n  // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution\n  //   string of ``\\2/instance/\\1`` would transform ``/service/foo/v1/api``\n  //   into ``/v1/api/instance/foo``.\n  //\n  // * The pattern ``one`` paired with a substitution string of ``two`` would\n  //   transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``.\n  //\n  // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of\n  //   ``\\1two\\2`` would replace only the first occurrence of ``one``,\n  //   transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``.\n  //\n  // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/``\n  //   would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to\n  //   ``/aaa/yyy/bbb``.\n  type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32;\n\n  oneof host_rewrite_specifier {\n    // Indicates that during forwarding, the host header will be swapped with\n    // this value.\n    string host_rewrite_literal = 6\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the hostname of the upstream host chosen by the cluster manager. This\n    // option is applicable only when the destination cluster for a route is of\n    // type *strict_dns* or *logical_dns*. Setting this to true with other cluster\n    // types has no effect.\n    google.protobuf.BoolValue auto_host_rewrite = 7;\n\n    // Indicates that during forwarding, the host header will be swapped with the content of given\n    // downstream or :ref:`custom <config_http_conn_man_headers_custom_request_headers>` header.\n    // If header value is empty, host header is left intact.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the potential security implications of using this option. Provided header\n    //   must come from trusted source.\n    string host_rewrite_header = 29\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the result of the regex substitution executed on path value with query and fragment removed.\n    // This is useful for transitioning variable content between path segment and subdomain.\n    //\n    // For example with the following config:\n    //\n    //   .. code-block:: yaml\n    //\n    //     host_rewrite_path_regex:\n    //       pattern:\n    //         google_re2: {}\n    //         regex: \"^/(.+)/.+$\"\n    //       substitution: \\1\n    //\n    // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`.\n    type.matcher.v3.RegexMatchAndSubstitute host_rewrite_path_regex = 35;\n  }\n\n  // Specifies the upstream timeout for the route. If not specified, the default is 15s. This\n  // spans between the point at which the entire downstream request (i.e. end-of-stream) has been\n  // processed and when the upstream response has been completely processed. A value of 0 will\n  // disable the route's timeout.\n  //\n  // .. note::\n  //\n  //   This timeout includes all retries. See also\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //   :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration timeout = 8;\n\n  // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout,\n  // although the connection manager wide :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout>`\n  // will still apply. A value of 0 will completely disable the route's idle timeout, even if a\n  // connection manager stream idle timeout is configured.\n  //\n  // The idle timeout is distinct to :ref:`timeout\n  // <envoy_api_field_config.route.v3.RouteAction.timeout>`, which provides an upper bound\n  // on the upstream response time; :ref:`idle_timeout\n  // <envoy_api_field_config.route.v3.RouteAction.idle_timeout>` instead bounds the amount\n  // of time the request's stream may be idle.\n  //\n  // After header decoding, the idle timeout will apply on downstream and\n  // upstream request events. Each time an encode/decode event for headers or\n  // data is processed for the stream, the timer will be reset. If the timeout\n  // fires, the stream is terminated with a 408 Request Timeout error code if no\n  // upstream response header has been received, otherwise a stream reset\n  // occurs.\n  google.protobuf.Duration idle_timeout = 24;\n\n  // Indicates that the route has a retry policy. Note that if this is set,\n  // it'll take precedence over the virtual host level retry policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  RetryPolicy retry_policy = 9;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that if this is set, it'll take\n  // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged,\n  // most internal one becomes the enforced policy). :ref:`Retry policy <envoy_api_field_config.route.v3.VirtualHost.retry_policy>`\n  // should not be set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 33;\n\n  // Indicates that the route has request mirroring policies.\n  repeated RequestMirrorPolicy request_mirror_policies = 30;\n\n  // Optionally specifies the :ref:`routing priority <arch_overview_http_routing_priority>`.\n  core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies a set of rate limit configurations that could be applied to the\n  // route.\n  repeated RateLimit rate_limits = 13;\n\n  // Specifies if the rate limit filter should include the virtual host rate\n  // limits. By default, if the route configured rate limits, the virtual host\n  // :ref:`rate_limits <envoy_api_field_config.route.v3.VirtualHost.rate_limits>` are not applied to the\n  // request.\n  //\n  // This field is deprecated. Please use :ref:`vh_rate_limits <envoy_v3_api_field_extensions.filters.http.ratelimit.v3.RateLimitPerRoute.vh_rate_limits>`\n  google.protobuf.BoolValue include_vh_rate_limits = 14 [deprecated = true];\n\n  // Specifies a list of hash policies to use for ring hash load balancing. Each\n  // hash policy is evaluated individually and the combined result is used to\n  // route the request. The method of combination is deterministic such that\n  // identical lists of hash policies will produce the same hash. Since a hash\n  // policy examines specific parts of a request, it can fail to produce a hash\n  // (i.e. if the hashed header is not present). If (and only if) all configured\n  // hash policies fail to generate a hash, no hash will be produced for\n  // the route. In this case, the behavior is the same as if no hash policies\n  // were specified (i.e. the ring hash load balancer will choose a random\n  // backend). If a hash policy has the \"terminal\" attribute set to true, and\n  // there is already a hash generated, the hash is returned immediately,\n  // ignoring the rest of the hash policy list.\n  repeated HashPolicy hash_policy = 15;\n\n  // Indicates that the route has a CORS policy.\n  CorsPolicy cors = 17;\n\n  // Deprecated by :ref:`grpc_timeout_header_max <envoy_api_field_config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max>`\n  // If present, and the request is a gRPC request, use the\n  // `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_,\n  // or its default value (infinity) instead of\n  // :ref:`timeout <envoy_api_field_config.route.v3.RouteAction.timeout>`, but limit the applied timeout\n  // to the maximum value specified here. If configured as 0, the maximum allowed timeout for\n  // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used\n  // and gRPC requests time out like any other requests using\n  // :ref:`timeout <envoy_api_field_config.route.v3.RouteAction.timeout>` or its default.\n  // This can be used to prevent unexpected upstream request timeouts due to potentially long\n  // time gaps between gRPC request and response in gRPC streaming mode.\n  //\n  // .. note::\n  //\n  //    If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes\n  //    precedence over `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, when\n  //    both are present. See also\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //    :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration max_grpc_timeout = 23 [deprecated = true];\n\n  // Deprecated by :ref:`grpc_timeout_header_offset <envoy_api_field_config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset>`.\n  // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting\n  // the provided duration from the header. This is useful in allowing Envoy to set its global\n  // timeout to be less than that of the deadline imposed by the calling client, which makes it more\n  // likely that Envoy will handle the timeout instead of having the call canceled by the client.\n  // The offset will only be applied if the provided grpc_timeout is greater than the offset. This\n  // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning\n  // infinity).\n  google.protobuf.Duration grpc_timeout_offset = 28 [deprecated = true];\n\n  repeated UpgradeConfig upgrade_configs = 25;\n\n  // If present, Envoy will try to follow an upstream redirect response instead of proxying the\n  // response back to the downstream. An upstream redirect response is defined\n  // by :ref:`redirect_response_codes\n  // <envoy_api_field_config.route.v3.InternalRedirectPolicy.redirect_response_codes>`.\n  InternalRedirectPolicy internal_redirect_policy = 34;\n\n  InternalRedirectAction internal_redirect_action = 26 [deprecated = true];\n\n  // An internal redirect is handled, iff the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value, and\n  // :ref:`internal_redirect_action <envoy_api_field_config.route.v3.RouteAction.internal_redirect_action>`\n  // is set to :ref:`HANDLE_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_config.route.v3.RouteAction.InternalRedirectAction.HANDLE_INTERNAL_REDIRECT>`\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or has\n  // :ref:`internal_redirect_action <envoy_api_field_config.route.v3.RouteAction.internal_redirect_action>`\n  // set to\n  // :ref:`PASS_THROUGH_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_config.route.v3.RouteAction.InternalRedirectAction.PASS_THROUGH_INTERNAL_REDIRECT>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 31 [deprecated = true];\n\n  // Indicates that the route has a hedge policy. Note that if this is set,\n  // it'll take precedence over the virtual host level hedge policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  HedgePolicy hedge_policy = 27;\n\n  // Specifies the maximum stream duration for this route.\n  MaxStreamDuration max_stream_duration = 36;\n\n  RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true];\n}\n\n// HTTP retry :ref:`architecture overview <arch_overview_http_routing_retry>`.\n// [#next-free-field: 12]\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RetryPolicy\";\n\n  enum ResetHeaderFormat {\n    SECONDS = 0;\n    UNIX_TIMESTAMP = 1;\n  }\n\n  message RetryPriority {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RetryPolicy.RetryPriority\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n\n      google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n    }\n  }\n\n  message RetryHostPredicate {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RetryPolicy.RetryHostPredicate\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n\n      google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n    }\n  }\n\n  message RetryBackOff {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RetryPolicy.RetryBackOff\";\n\n    // Specifies the base interval between retries. This parameter is required and must be greater\n    // than zero. Values less than 1 ms are rounded up to 1 ms.\n    // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's\n    // back-off algorithm.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // Specifies the maximum interval between retries. This parameter is optional, but must be\n    // greater than or equal to the `base_interval` if set. The default is 10 times the\n    // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion\n    // of Envoy's back-off algorithm.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  message ResetHeader {\n    string name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // A retry back-off strategy that applies when the upstream server rate limits\n  // the request.\n  //\n  // Given this configuration:\n  //\n  // .. code-block:: yaml\n  //\n  //   rate_limited_retry_back_off:\n  //     reset_headers:\n  //     - name: Retry-After\n  //       format: SECONDS\n  //     - name: X-RateLimit-Reset\n  //       format: UNIX_TIMESTAMP\n  //     max_interval: \"300s\"\n  //\n  // The following algorithm will apply:\n  //\n  //  1. If the response contains the header ``Retry-After`` its value must be on\n  //     the form ``120`` (an integer that represents the number of seconds to\n  //     wait before retrying). If so, this value is used as the back-off interval.\n  //  2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its\n  //     value must be on the form ``1595320702`` (an integer that represents the\n  //     point in time at which to retry, as a Unix timestamp in seconds). If so,\n  //     the current time is subtracted from this value and the result is used as\n  //     the back-off interval.\n  //  3. Otherwise, Envoy will use the default\n  //     :ref:`exponential back-off <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_back_off>`\n  //     strategy.\n  //\n  // No matter which format is used, if the resulting back-off interval exceeds\n  // ``max_interval`` it is discarded and the next header in ``reset_headers``\n  // is tried. If a request timeout is configured for the route it will further\n  // limit how long the request will be allowed to run.\n  //\n  // To prevent many clients retrying at the same point in time jitter is added\n  // to the back-off interval, so the resulting interval is decided by taking:\n  // ``random(interval, interval * 1.5)``.\n  //\n  // .. attention::\n  //\n  //   Configuring ``rate_limited_retry_back_off`` will not by itself cause a request\n  //   to be retried. You will still need to configure the right retry policy to match\n  //   the responses from the upstream server.\n  message RateLimitedRetryBackOff {\n    // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``)\n    // to match against the response. Headers are tried in order, and matched case\n    // insensitive. The first header to be parsed successfully is used. If no headers\n    // match the default exponential back-off is used instead.\n    repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}];\n\n    // Specifies the maximum back off interval that Envoy will allow. If a reset\n    // header contains an interval longer than this then it will be discarded and\n    // the next header will be tried. Defaults to 300 seconds.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Specifies the conditions under which retry takes place. These are the same\n  // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and\n  // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`.\n  string retry_on = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1. These are the same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-max-retries`.\n  google.protobuf.UInt32Value num_retries = 2\n      [(udpa.annotations.field_migrate).rename = \"max_retries\"];\n\n  // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The\n  // same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.\n  //\n  // .. note::\n  //\n  //   If left unspecified, Envoy will use the global\n  //   :ref:`route timeout <envoy_api_field_config.route.v3.RouteAction.timeout>` for the request.\n  //   Consequently, when using a :ref:`5xx <config_http_filters_router_x-envoy-retry-on>` based\n  //   retry policy, a request that times out will not be retried as the total timeout budget\n  //   would have been exhausted.\n  google.protobuf.Duration per_try_timeout = 3;\n\n  // Specifies an implementation of a RetryPriority which is used to determine the\n  // distribution of load across priorities used for retries. Refer to\n  // :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more details.\n  RetryPriority retry_priority = 4;\n\n  // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host\n  // for retries. If any of the predicates reject the host, host selection will be reattempted.\n  // Refer to :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more\n  // details.\n  repeated RetryHostPredicate retry_host_predicate = 5;\n\n  // The maximum number of times host selection will be reattempted before giving up, at which\n  // point the host that was last selected will be routed to. If unspecified, this will default to\n  // retrying once.\n  int64 host_selection_retry_max_attempts = 6;\n\n  // HTTP status codes that should trigger a retry in addition to those specified by retry_on.\n  repeated uint32 retriable_status_codes = 7;\n\n  // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the\n  // default base interval is 25 milliseconds or, if set, the current value of the\n  // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times\n  // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries`\n  // describes Envoy's back-off algorithm.\n  RetryBackOff retry_back_off = 8;\n\n  // Specifies parameters that control a retry back-off strategy that is used\n  // when the request is rate limited by the upstream server. The server may\n  // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to\n  // provide feedback to the client on how long to wait before retrying. If\n  // configured, this back-off strategy will be used instead of the\n  // default exponential back off strategy (configured using `retry_back_off`)\n  // whenever a response includes the matching headers.\n  RateLimitedRetryBackOff rate_limited_retry_back_off = 11;\n\n  // HTTP response headers that trigger a retry if present in the response. A retry will be\n  // triggered if any of the header matches match the upstream response headers.\n  // The field is only consulted if 'retriable-headers' retry policy is active.\n  repeated HeaderMatcher retriable_headers = 9;\n\n  // HTTP headers which must be present in the request for retries to be attempted.\n  repeated HeaderMatcher retriable_request_headers = 10;\n}\n\n// HTTP request hedging :ref:`architecture overview <arch_overview_http_routing_hedging>`.\nmessage HedgePolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.HedgePolicy\";\n\n  // Specifies the number of initial requests that should be sent upstream.\n  // Must be at least 1.\n  // Defaults to 1.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies a probability that an additional upstream request should be sent\n  // on top of what is specified by initial_requests.\n  // Defaults to 0.\n  // [#not-implemented-hide:]\n  type.v3.FractionalPercent additional_request_chance = 2;\n\n  // Indicates that a hedged request should be sent when the per-try timeout\n  // is hit. This will only occur if the retry policy also indicates that a\n  // timed out request should be retried.\n  // Once a timed out request is retried due to per try timeout, the router\n  // filter will ensure that it is not retried again even if the returned\n  // response headers would otherwise be retried according the specified\n  // :ref:`RetryPolicy <envoy_api_msg_config.route.v3.RetryPolicy>`.\n  // Defaults to false.\n  bool hedge_on_per_try_timeout = 3;\n}\n\n// [#next-free-field: 9]\nmessage RedirectAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RedirectAction\";\n\n  enum RedirectResponseCode {\n    // Moved Permanently HTTP Status Code - 301.\n    MOVED_PERMANENTLY = 0;\n\n    // Found HTTP Status Code - 302.\n    FOUND = 1;\n\n    // See Other HTTP Status Code - 303.\n    SEE_OTHER = 2;\n\n    // Temporary Redirect HTTP Status Code - 307.\n    TEMPORARY_REDIRECT = 3;\n\n    // Permanent Redirect HTTP Status Code - 308.\n    PERMANENT_REDIRECT = 4;\n  }\n\n  // When the scheme redirection take place, the following rules apply:\n  //  1. If the source URI scheme is `http` and the port is explicitly\n  //     set to `:80`, the port will be removed after the redirection\n  //  2. If the source URI scheme is `https` and the port is explicitly\n  //     set to `:443`, the port will be removed after the redirection\n  oneof scheme_rewrite_specifier {\n    // The scheme portion of the URL will be swapped with \"https\".\n    bool https_redirect = 4;\n\n    // The scheme portion of the URL will be swapped with this value.\n    string scheme_redirect = 7;\n  }\n\n  // The host portion of the URL will be swapped with this value.\n  string host_redirect = 1\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The port value of the URL will be swapped with this value.\n  uint32 port_redirect = 8;\n\n  oneof path_rewrite_specifier {\n    // The path portion of the URL will be swapped with this value.\n    // Please note that query string in path_redirect will override the\n    // request's query string and will not be stripped.\n    //\n    // For example, let's say we have the following routes:\n    //\n    // - match: { path: \"/old-path-1\" }\n    //   redirect: { path_redirect: \"/new-path-1\" }\n    // - match: { path: \"/old-path-2\" }\n    //   redirect: { path_redirect: \"/new-path-2\", strip-query: \"true\" }\n    // - match: { path: \"/old-path-3\" }\n    //   redirect: { path_redirect: \"/new-path-3?foo=1\", strip_query: \"true\" }\n    //\n    // 1. if request uri is \"/old-path-1?bar=1\", users will be redirected to \"/new-path-1?bar=1\"\n    // 2. if request uri is \"/old-path-2?bar=1\", users will be redirected to \"/new-path-2\"\n    // 3. if request uri is \"/old-path-3?bar=1\", users will be redirected to \"/new-path-3?foo=1\"\n    string path_redirect = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during redirection, the matched prefix (or path)\n    // should be swapped with this value. This option allows redirect URLs be dynamically created\n    // based on the request.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the use of trailing slashes as mentioned in\n    //   :ref:`RouteAction's prefix_rewrite <envoy_api_field_config.route.v3.RouteAction.prefix_rewrite>`.\n    string prefix_rewrite = 5\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // The HTTP status code to use in the redirect response. The default response\n  // code is MOVED_PERMANENTLY (301).\n  RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Indicates that during redirection, the query portion of the URL will\n  // be removed. Default value is false.\n  bool strip_query = 6;\n}\n\nmessage DirectResponseAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.route.DirectResponseAction\";\n\n  // Specifies the HTTP response status to be returned.\n  uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}];\n\n  // Specifies the content of the response body. If this setting is omitted,\n  // no body is included in the generated response.\n  //\n  // .. note::\n  //\n  //   Headers can be specified using *response_headers_to_add* in the enclosing\n  //   :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` or\n  //   :ref:`envoy_api_msg_config.route.v3.VirtualHost`.\n  core.v3.DataSource body = 2;\n}\n\nmessage Decorator {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.Decorator\";\n\n  // The operation name associated with the request matched to this route. If tracing is\n  // enabled, this information will be used as the span name reported for this request.\n  //\n  // .. note::\n  //\n  //   For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden\n  //   by the :ref:`x-envoy-decorator-operation\n  //   <config_http_filters_router_x-envoy-decorator-operation>` header.\n  string operation = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether the decorated details should be propagated to the other party. The default is true.\n  google.protobuf.BoolValue propagate = 2;\n}\n\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.Tracing\";\n\n  // Target percentage of requests managed by this HTTP connection manager that will be force\n  // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n  // header is set. This field is a direct analog for the runtime variable\n  // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n  // <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent client_sampling = 1;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be randomly\n  // selected for trace generation, if not requested by the client or not forced. This field is\n  // a direct analog for the runtime variable 'tracing.random_sampling' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent random_sampling = 2;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be traced\n  // after all other sampling checks have been applied (client-directed, force tracing, random\n  // sampling). This field functions as an upper limit on the total configured sampling rate. For\n  // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n  // of client requests with the appropriate headers to be force traced. This field is a direct\n  // analog for the runtime variable 'tracing.global_enabled' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent overall_sampling = 3;\n\n  // A list of custom tags with unique tag name to create tags for the active span.\n  // It will take effect after merging with the :ref:`corresponding configuration\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.custom_tags>`\n  // configured in the HTTP connection manager. If two tags with the same name are configured\n  // each in the HTTP connection manager and the route level, the one configured here takes\n  // priority.\n  repeated type.tracing.v3.CustomTag custom_tags = 4;\n}\n\n// A virtual cluster is a way of specifying a regex matching rule against\n// certain important endpoints such that statistics are generated explicitly for\n// the matched requests. The reason this is useful is that when doing\n// prefix/path matching Envoy does not always know what the application\n// considers to be an endpoint. Thus, it’s impossible for Envoy to generically\n// emit per endpoint statistics. However, often systems have highly critical\n// endpoints that they wish to get “perfect” statistics on. Virtual cluster\n// statistics are perfect in the sense that they are emitted on the downstream\n// side such that they include network level failures.\n//\n// Documentation for :ref:`virtual cluster statistics <config_http_filters_router_vcluster_stats>`.\n//\n// .. note::\n//\n//    Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for\n//    every application endpoint. This is both not easily maintainable and as well the matching and\n//    statistics output are not free.\nmessage VirtualCluster {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.VirtualCluster\";\n\n  // Specifies a list of header matchers to use for matching requests. Each specified header must\n  // match. The pseudo-headers `:path` and `:method` can be used to match the request path and\n  // method, respectively.\n  repeated HeaderMatcher headers = 4;\n\n  // Specifies the name of the virtual cluster. The virtual cluster name as well\n  // as the virtual host name are used when emitting statistics. The statistics are emitted by the\n  // router filter and are documented :ref:`here <config_http_filters_router_stats>`.\n  string name = 2 [(validate.rules).string = {min_len: 1}];\n\n  string hidden_envoy_deprecated_pattern = 1 [\n    deprecated = true,\n    (validate.rules).string = {max_bytes: 1024},\n    (envoy.annotations.disallowed_by_default) = true\n  ];\n\n  core.v3.RequestMethod hidden_envoy_deprecated_method = 3\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`.\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.RateLimit\";\n\n  // [#next-free-field: 8]\n  message Action {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.route.RateLimit.Action\";\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"source_cluster\", \"<local service cluster>\")\n    //\n    // <local service cluster> is derived from the :option:`--service-cluster` option.\n    message SourceCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.SourceCluster\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"destination_cluster\", \"<routed target cluster>\")\n    //\n    // Once a request matches against a route table rule, a routed cluster is determined by one of\n    // the following :ref:`route table configuration <envoy_api_msg_config.route.v3.RouteConfiguration>`\n    // settings:\n    //\n    // * :ref:`cluster <envoy_api_field_config.route.v3.RouteAction.cluster>` indicates the upstream cluster\n    //   to route to.\n    // * :ref:`weighted_clusters <envoy_api_field_config.route.v3.RouteAction.weighted_clusters>`\n    //   chooses a cluster randomly from a set of clusters with attributed weight.\n    // * :ref:`cluster_header <envoy_api_field_config.route.v3.RouteAction.cluster_header>` indicates which\n    //   header in the request contains the target cluster.\n    message DestinationCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.DestinationCluster\";\n    }\n\n    // The following descriptor entry is appended when a header contains a key that matches the\n    // *header_name*:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<header_value_queried_from_header>\")\n    message RequestHeaders {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.RequestHeaders\";\n\n      // The header name to be queried from the request headers. The header’s\n      // value is used to populate the value of the descriptor entry for the\n      // descriptor_key.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 2 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, Envoy skips the descriptor while calling rate limiting service\n      // when header is not present in the request. By default it skips calling the\n      // rate limiting service if this header is not present in the request.\n      bool skip_if_absent = 3;\n    }\n\n    // The following descriptor entry is appended to the descriptor and is populated using the\n    // trusted address from :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"remote_address\", \"<trusted address from x-forwarded-for>\")\n    message RemoteAddress {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.RemoteAddress\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"generic_key\", \"<descriptor_value>\")\n    message GenericKey {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.GenericKey\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // An optional key to use in the descriptor entry. If not set it defaults\n      // to 'generic_key' as the descriptor key.\n      string descriptor_key = 2;\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"header_match\", \"<descriptor_value>\")\n    message HeaderValueMatch {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.route.RateLimit.Action.HeaderValueMatch\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, the action will append a descriptor entry when the\n      // request matches the headers. If set to false, the action will append a\n      // descriptor entry when the request does not match the headers. The\n      // default value is true.\n      google.protobuf.BoolValue expect_match = 2;\n\n      // Specifies a set of headers that the rate limit action should match\n      // on. The action will check the request’s headers against all the\n      // specified headers in the config. A match will happen if all the\n      // headers in the config are present in the request with the same values\n      // (or based on presence if the value field is not in the config).\n      repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    // The following descriptor entry is appended when the dynamic metadata contains a key value:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<value_queried_from_metadata>\")\n    message DynamicMetaData {\n      // The key to use in the descriptor entry.\n      string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Metadata struct that defines the key and path to retrieve the string value. A match will\n      // only happen if the value in the dynamic metadata is of type string.\n      type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];\n\n      // An optional value to use if *metadata_key* is empty. If not set and\n      // no value is present under the metadata_key then no descriptor is generated.\n      string default_value = 3;\n    }\n\n    oneof action_specifier {\n      option (validate.required) = true;\n\n      // Rate limit on source cluster.\n      SourceCluster source_cluster = 1;\n\n      // Rate limit on destination cluster.\n      DestinationCluster destination_cluster = 2;\n\n      // Rate limit on request headers.\n      RequestHeaders request_headers = 3;\n\n      // Rate limit on remote address.\n      RemoteAddress remote_address = 4;\n\n      // Rate limit on a generic key.\n      GenericKey generic_key = 5;\n\n      // Rate limit on the existence of request headers.\n      HeaderValueMatch header_value_match = 6;\n\n      // Rate limit on dynamic metadata.\n      DynamicMetaData dynamic_metadata = 7;\n    }\n  }\n\n  message Override {\n    // Fetches the override from the dynamic metadata.\n    message DynamicMetadata {\n      // Metadata struct that defines the key and path to retrieve the struct value.\n      // The value must be a struct containing an integer \"requests_per_unit\" property\n      // and a \"unit\" property with a value parseable to :ref:`RateLimitUnit\n      // enum <envoy_api_enum_type.v3.RateLimitUnit>`\n      type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}];\n    }\n\n    oneof override_specifier {\n      option (validate.required) = true;\n\n      // Limit override from dynamic metadata.\n      DynamicMetadata dynamic_metadata = 1;\n    }\n  }\n\n  // Refers to the stage set in the filter. The rate limit configuration only\n  // applies to filters with the same stage number. The default stage number is\n  // 0.\n  //\n  // .. note::\n  //\n  //   The filter supports a range of 0 - 10 inclusively for stage numbers.\n  google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}];\n\n  // The key to be set in runtime to disable this rate limit configuration.\n  string disable_key = 2;\n\n  // A list of actions that are to be applied for this rate limit configuration.\n  // Order matters as the actions are processed sequentially and the descriptor\n  // is composed by appending descriptor entries in that sequence. If an action\n  // cannot append a descriptor entry, no descriptor is generated for the\n  // configuration. See :ref:`composing actions\n  // <config_http_filters_rate_limit_composing_actions>` for additional documentation.\n  repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional limit override to be appended to the descriptor produced by this\n  // rate limit configuration. If the override value is invalid or cannot be resolved\n  // from metadata, no override is provided. See :ref:`rate limit override\n  // <config_http_filters_rate_limit_rate_limit_override>` for more information.\n  Override limit = 4;\n}\n\n// .. attention::\n//\n//   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host*\n//   header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n//\n// .. attention::\n//\n//   To route on HTTP method, use the special HTTP/2 *:method* header. This works for both\n//   HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g.,\n//\n//   .. code-block:: json\n//\n//     {\n//       \"name\": \":method\",\n//       \"exact_match\": \"POST\"\n//     }\n//\n// .. attention::\n//   In the absence of any header match specifier, match will default to :ref:`present_match\n//   <envoy_api_field_config.route.v3.HeaderMatcher.present_match>`. i.e, a request that has the :ref:`name\n//   <envoy_api_field_config.route.v3.HeaderMatcher.name>` header will match, regardless of the header's\n//   value.\n//\n//  [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.]\n// [#next-free-field: 13]\nmessage HeaderMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.route.HeaderMatcher\";\n\n  reserved 2, 3;\n\n  // Specifies the name of the header in the request.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Specifies how the header match will be performed to route the request.\n  oneof header_match_specifier {\n    // If specified, header match will be performed based on the value of the header.\n    string exact_match = 4;\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex.\n    type.matcher.v3.RegexMatcher safe_regex_match = 11;\n\n    // If specified, header match will be performed based on range.\n    // The rule will match if the request header value is within this range.\n    // The entire request header value must represent an integer in base 10 notation: consisting of\n    // an optional plus or minus sign followed by a sequence of digits. The rule will not match if\n    // the header value does not represent an integer. Match will fail for empty values, floating\n    // point numbers or if only a subsequence of the header value is an integer.\n    //\n    // Examples:\n    //\n    // * For range [-10,0), route will match for header value -1, but not for 0, \"somestring\", 10.9,\n    //   \"-1somestring\"\n    type.v3.Int64Range range_match = 6;\n\n    // If specified, header match will be performed based on whether the header is in the\n    // request.\n    bool present_match = 7;\n\n    // If specified, header match will be performed based on the prefix of the header value.\n    // Note: empty prefix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.\n    string prefix_match = 9 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on the suffix of the header value.\n    // Note: empty suffix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.\n    string suffix_match = 10 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on whether the header value contains\n    // the given value or not.\n    // Note: empty contains match is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*.\n    string contains_match = 12 [(validate.rules).string = {min_len: 1}];\n\n    string hidden_envoy_deprecated_regex_match = 5 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n  }\n\n  // If specified, the match result will be inverted before checking. Defaults to false.\n  //\n  // Examples:\n  //\n  // * The regex ``\\d{3}`` does not match the value *1234*, so it will match when inverted.\n  // * The range [-10,0) will match the value -1, so it will not match when inverted.\n  bool invert_match = 8;\n}\n\n// Query parameter matching treats the query string of a request's :path header\n// as an ampersand-separated list of keys and/or key=value elements.\n// [#next-free-field: 7]\nmessage QueryParameterMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.route.QueryParameterMatcher\";\n\n  // Specifies the name of a key that must be present in the requested\n  // *path*'s query string.\n  string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}];\n\n  oneof query_parameter_match_specifier {\n    // Specifies whether a query parameter value should match against a string.\n    type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}];\n\n    // Specifies whether a query parameter should be present.\n    bool present_match = 6;\n  }\n\n  string hidden_envoy_deprecated_value = 3\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n\n  google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// HTTP Internal Redirect :ref:`architecture overview <arch_overview_internal_redirects>`.\nmessage InternalRedirectPolicy {\n  // An internal redirect is not handled, unless the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value.\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy\n  // <envoy_api_field_config.route.v3.RouteAction.internal_redirect_policy>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 1;\n\n  // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified,\n  // only 302 will be treated as internal redirect.\n  // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored.\n  repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}];\n\n  // Specifies a list of predicates that are queried when an upstream response is deemed\n  // to trigger an internal redirect by all other criteria. Any predicate in the list can reject\n  // the redirect, causing the response to be proxied to downstream.\n  repeated core.v3.TypedExtensionConfig predicates = 3;\n\n  // Allow internal redirect to follow a target URI with a different scheme than the value of\n  // x-forwarded-proto. The default is false.\n  bool allow_cross_scheme_redirect = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v3/scoped_route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v3\";\noption java_outer_classname = \"ScopedRouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP scoped routing configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// Specifies a routing scope, which associates a\n// :ref:`Key<envoy_api_msg_config.route.v3.ScopedRouteConfiguration.Key>` to a\n// :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name).\n//\n// The HTTP connection manager builds up a table consisting of these Key to\n// RouteConfiguration mappings, and looks up the RouteConfiguration to use per\n// request according to the algorithm specified in the\n// :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder>`\n// assigned to the HttpConnectionManager.\n//\n// For example, with the following configurations (in YAML):\n//\n// HttpConnectionManager config:\n//\n// .. code::\n//\n//   ...\n//   scoped_routes:\n//     name: foo-scoped-routes\n//     scope_key_builder:\n//       fragments:\n//         - header_value_extractor:\n//             name: X-Route-Selector\n//             element_separator: ,\n//             element:\n//               separator: =\n//               key: vip\n//\n// ScopedRouteConfiguration resources (specified statically via\n// :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list>`\n// or obtained dynamically via SRDS):\n//\n// .. code::\n//\n//  (1)\n//   name: route-scope1\n//   route_configuration_name: route-config1\n//   key:\n//      fragments:\n//        - string_key: 172.10.10.20\n//\n//  (2)\n//   name: route-scope2\n//   route_configuration_name: route-config2\n//   key:\n//     fragments:\n//       - string_key: 172.20.20.30\n//\n// A request from a client such as:\n//\n// .. code::\n//\n//     GET / HTTP/1.1\n//     Host: foo.com\n//     X-Route-Selector: vip=172.10.10.20\n//\n// would result in the routing table defined by the `route-config1`\n// RouteConfiguration being assigned to the HTTP request/stream.\n//\nmessage ScopedRouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.ScopedRouteConfiguration\";\n\n  // Specifies a key which is matched against the output of the\n  // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder>`\n  // specified in the HttpConnectionManager. The matching is done per HTTP\n  // request and is dependent on the order of the fragments contained in the\n  // Key.\n  message Key {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.ScopedRouteConfiguration.Key\";\n\n    message Fragment {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.api.v2.ScopedRouteConfiguration.Key.Fragment\";\n\n      oneof type {\n        option (validate.required) = true;\n\n        // A string to match against.\n        string string_key = 1;\n      }\n    }\n\n    // The ordered set of fragments to match against. The order must match the\n    // fragments in the corresponding\n    // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder>`.\n    repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Whether the RouteConfiguration should be loaded on demand.\n  bool on_demand = 4;\n\n  // The name assigned to the routing scope.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an\n  // RDS server to fetch the :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` associated\n  // with this scope.\n  string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The key to match against.\n  Key key = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP route configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// [#next-free-field: 11]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.RouteConfiguration\";\n\n  // The name of the route configuration. For example, it might match\n  // :ref:`route_config_name\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.Rds.route_config_name>` in\n  // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.Rds`.\n  string name = 1;\n\n  // An array of virtual hosts that make up the route table.\n  repeated VirtualHost virtual_hosts = 2;\n\n  // An array of virtual hosts will be dynamically loaded via the VHDS API.\n  // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used\n  // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for\n  // on-demand discovery of virtual hosts. The contents of these two fields will be merged to\n  // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration\n  // taking precedence.\n  Vhds vhds = 9;\n\n  // Optionally specifies a list of HTTP headers that the connection manager\n  // will consider to be internal only. If they are found on external requests they will be cleaned\n  // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information.\n  repeated string internal_only_headers = 3 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each response that\n  // the connection manager encodes. Headers specified at this level are applied\n  // after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption response_headers_to_add = 4\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // that the connection manager encodes.\n  repeated string response_headers_to_remove = 5 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // routed by the HTTP connection manager. Headers specified at this level are\n  // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption request_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // routed by the HTTP connection manager.\n  repeated string request_headers_to_remove = 8 [\n    (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}\n  ];\n\n  // By default, headers that should be added/removed are evaluated from most to least specific:\n  //\n  // * route level\n  // * virtual host level\n  // * connection manager level\n  //\n  // To allow setting overrides at the route or virtual host level, this order can be reversed\n  // by setting this option to true. Defaults to false.\n  //\n  // [#next-major-version: In the v3 API, this will default to true.]\n  bool most_specific_header_mutations_wins = 10;\n\n  // An optional boolean that specifies whether the clusters that the route\n  // table refers to will be validated by the cluster manager. If set to true\n  // and a route refers to a non-existent cluster, the route table will not\n  // load. If set to false and a route refers to a non-existent cluster, the\n  // route table will load and the router filter will return a 404 if the route\n  // is selected at runtime. This setting defaults to true if the route table\n  // is statically defined via the :ref:`route_config\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.route_config>`\n  // option. This setting default to false if the route table is loaded dynamically via the\n  // :ref:`rds\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.rds>`\n  // option. Users may wish to override the default behavior in certain cases (for example when\n  // using CDS with a static route table).\n  google.protobuf.BoolValue validate_clusters = 7;\n}\n\nmessage Vhds {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Vhds\";\n\n  // Configuration source specifier for VHDS.\n  core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v4alpha/route_components.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/proxy_protocol.proto\";\nimport \"envoy/type/matcher/v4alpha/regex.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/metadata/v3/metadata.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v4alpha\";\noption java_outer_classname = \"RouteComponentsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP route components]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n// * HTTP :ref:`router filter <config_http_filters_router>`\n\n// The top level element in the routing configuration is a virtual host. Each virtual host has\n// a logical name as well as a set of domains that get routed to it based on the incoming request's\n// host header. This allows a single listener to service multiple top level domain path trees. Once\n// a virtual host is selected based on the domain, the routes are processed in order to see which\n// upstream cluster to route to or whether to perform a redirect.\n// [#next-free-field: 21]\nmessage VirtualHost {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.VirtualHost\";\n\n  enum TlsRequirementType {\n    // No TLS requirement for the virtual host.\n    NONE = 0;\n\n    // External requests must use TLS. If a request is external and it is not\n    // using TLS, a 301 redirect will be sent telling the client to use HTTPS.\n    EXTERNAL_ONLY = 1;\n\n    // All requests must use TLS. If a request is not using TLS, a 301 redirect\n    // will be sent telling the client to use HTTPS.\n    ALL = 2;\n  }\n\n  reserved 9, 12;\n\n  reserved \"per_filter_config\";\n\n  // The logical name of the virtual host. This is used when emitting certain\n  // statistics but is not relevant for routing.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A list of domains (host/authority header) that will be matched to this\n  // virtual host. Wildcard hosts are supported in the suffix or prefix form.\n  //\n  // Domain search order:\n  //  1. Exact domain names: ``www.foo.com``.\n  //  2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``.\n  //  3. Prefix domain wildcards: ``foo.*`` or ``foo-*``.\n  //  4. Special wildcard ``*`` matching any domain.\n  //\n  // .. note::\n  //\n  //   The wildcard will not match the empty string.\n  //   e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``.\n  //   The longest wildcards match first.\n  //   Only a single virtual host in the entire route configuration can match on ``*``. A domain\n  //   must be unique across all virtual hosts or the config will fail to load.\n  //\n  // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE.\n  repeated string domains = 2 [(validate.rules).repeated = {\n    min_items: 1\n    items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}}\n  }];\n\n  // The list of routes that will be matched, in order, for incoming requests.\n  // The first route that matches will be used.\n  repeated Route routes = 3;\n\n  // Specifies the type of TLS enforcement the virtual host expects. If this option is not\n  // specified, there is no TLS requirement for the virtual host.\n  TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // A list of virtual clusters defined for this virtual host. Virtual clusters\n  // are used for additional statistics gathering.\n  repeated VirtualCluster virtual_clusters = 5;\n\n  // Specifies a set of rate limit configurations that will be applied to the\n  // virtual host.\n  repeated RateLimit rate_limits = 6;\n\n  // Specifies a list of HTTP headers that should be added to each request\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption request_headers_to_add = 7\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // handled by this virtual host.\n  repeated string request_headers_to_remove = 13 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a list of HTTP headers that should be added to each response\n  // handled by this virtual host. Headers specified at this level are applied\n  // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // handled by this virtual host.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Indicates that the virtual host has a CORS policy.\n  CorsPolicy cors = 8;\n\n  // The per_filter_config field can be used to provide virtual host-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n  // for if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 15;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the upstream request. Setting this option will cause it to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the upstream\n  // will see the attempt count as perceived by the second Envoy. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v4alpha.Router.suppress_envoy_headers>` flag.\n  //\n  // [#next-major-version: rename to include_attempt_count_in_request.]\n  bool include_request_attempt_count = 14;\n\n  // Decides whether the :ref:`x-envoy-attempt-count\n  // <config_http_filters_router_x-envoy-attempt-count>` header should be included\n  // in the downstream response. Setting this option will cause the router to override any existing header\n  // value, so in the case of two Envoys on the request path with this option enabled, the downstream\n  // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false.\n  // This header is unaffected by the\n  // :ref:`suppress_envoy_headers\n  // <envoy_api_field_extensions.filters.http.router.v4alpha.Router.suppress_envoy_headers>` flag.\n  bool include_attempt_count_in_response = 19;\n\n  // Indicates the retry policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  RetryPolicy retry_policy = 16;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that setting a route level entry\n  // will take precedence over this config and it'll be treated independently (e.g.: values are not\n  // inherited). :ref:`Retry policy <envoy_api_field_config.route.v4alpha.VirtualHost.retry_policy>` should not be\n  // set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 20;\n\n  // Indicates the hedge policy for all routes in this virtual host. Note that setting a\n  // route level entry will take precedence over this config and it'll be treated\n  // independently (e.g.: values are not inherited).\n  HedgePolicy hedge_policy = 17;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum\n  // value of this and the listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18;\n}\n\n// A filter-defined action type.\nmessage FilterAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.FilterAction\";\n\n  google.protobuf.Any action = 1;\n}\n\n// A route is both a specification of how to match a request as well as an indication of what to do\n// next (e.g., redirect, forward, rewrite, etc.).\n//\n// .. attention::\n//\n//   Envoy supports routing on HTTP method via :ref:`header matching\n//   <envoy_api_msg_config.route.v4alpha.HeaderMatcher>`.\n// [#next-free-field: 18]\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Route\";\n\n  reserved 6, 8;\n\n  reserved \"per_filter_config\";\n\n  // Name for the route.\n  string name = 14;\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  oneof action {\n    option (validate.required) = true;\n\n    // Route request to some upstream cluster.\n    RouteAction route = 2;\n\n    // Return a redirect.\n    RedirectAction redirect = 3;\n\n    // Return an arbitrary HTTP response directly, without proxying.\n    DirectResponseAction direct_response = 7;\n\n    // [#not-implemented-hide:]\n    // If true, a filter will define the action (e.g., it could dynamically generate the\n    // RouteAction).\n    // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when\n    // implemented]\n    FilterAction filter_action = 17;\n  }\n\n  // The Metadata field can be used to provide additional information\n  // about the route. It can be used for configuration, stats, and logging.\n  // The metadata should go under the filter namespace that will need it.\n  // For instance, if the metadata is intended for the Router filter,\n  // the filter name should be specified as *envoy.filters.http.router*.\n  core.v4alpha.Metadata metadata = 4;\n\n  // Decorator for the matched route.\n  Decorator decorator = 5;\n\n  // The typed_per_filter_config field can be used to provide route-specific\n  // configurations for filters. The key should match the filter name, such as\n  // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n  // specific; see the :ref:`HTTP filter documentation <config_http_filters>` for\n  // if and how it is utilized.\n  map<string, google.protobuf.Any> typed_per_filter_config = 13;\n\n  // Specifies a set of headers that will be added to requests matching this\n  // route. Headers specified at this level are applied before headers from the\n  // enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on\n  // header value syntax, see the documentation on :ref:`custom request headers\n  // <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption request_headers_to_add = 9\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each request\n  // matching this route.\n  repeated string request_headers_to_remove = 12 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Specifies a set of headers that will be added to responses to requests\n  // matching this route. Headers specified at this level are applied before\n  // headers from the enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and\n  // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including\n  // details on header value syntax, see the documentation on\n  // :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10\n      [(validate.rules).repeated = {max_items: 1000}];\n\n  // Specifies a list of HTTP headers that should be removed from each response\n  // to requests matching this route.\n  repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {\n    items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}\n  }];\n\n  // Presence of the object defines whether the connection manager's tracing configuration\n  // is overridden by this route specific instance.\n  Tracing tracing = 15;\n\n  // The maximum bytes which will be buffered for retries and shadowing.\n  // If set, the bytes actually buffered will be the minimum value of this and the\n  // listener per_connection_buffer_limit_bytes.\n  google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16;\n}\n\n// Compared to the :ref:`cluster <envoy_api_field_config.route.v4alpha.RouteAction.cluster>` field that specifies a\n// single upstream cluster as the target of a request, the :ref:`weighted_clusters\n// <envoy_api_field_config.route.v4alpha.RouteAction.weighted_clusters>` option allows for specification of\n// multiple upstream clusters along with weights that indicate the percentage of\n// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the\n// weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.WeightedCluster\";\n\n  // [#next-free-field: 11]\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.WeightedCluster.ClusterWeight\";\n\n    reserved 7, 8;\n\n    reserved \"per_filter_config\";\n\n    // Name of the upstream cluster. The cluster must exist in the\n    // :ref:`cluster manager configuration <config_cluster_manager>`.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // An integer between 0 and :ref:`total_weight\n    // <envoy_api_field_config.route.v4alpha.WeightedCluster.total_weight>`. When a request matches the route,\n    // the choice of an upstream cluster is determined by its weight. The sum of weights across all\n    // entries in the clusters array must add up to the total_weight, which defaults to 100.\n    google.protobuf.UInt32Value weight = 2;\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field will be considered for\n    // load balancing. Note that this will be merged with what's provided in\n    // :ref:`RouteAction.metadata_match <envoy_api_field_config.route.v4alpha.RouteAction.metadata_match>`, with\n    // values here taking precedence. The filter name should be specified as *envoy.lb*.\n    core.v4alpha.Metadata metadata_match = 3;\n\n    // Specifies a list of headers to be added to requests when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v4alpha.HeaderValueOption request_headers_to_add = 4\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of HTTP headers that should be removed from each request when\n    // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    repeated string request_headers_to_remove = 9 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // Specifies a list of headers to be added to responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    // Headers specified at this level are applied before headers from the enclosing\n    // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and\n    // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on\n    // header value syntax, see the documentation on :ref:`custom request headers\n    // <config_http_conn_man_headers_custom_request_headers>`.\n    repeated core.v4alpha.HeaderValueOption response_headers_to_add = 5\n        [(validate.rules).repeated = {max_items: 1000}];\n\n    // Specifies a list of headers to be removed from responses when this cluster is selected\n    // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.\n    repeated string response_headers_to_remove = 6 [(validate.rules).repeated = {\n      items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}\n    }];\n\n    // The per_filter_config field can be used to provide weighted cluster-specific\n    // configurations for filters. The key should match the filter name, such as\n    // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter\n    // specific; see the :ref:`HTTP filter documentation <config_http_filters>`\n    // for if and how it is utilized.\n    map<string, google.protobuf.Any> typed_per_filter_config = 10;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Specifies the total weight across all clusters. The sum of all cluster weights must equal this\n  // value, which must be greater than 0. Defaults to 100.\n  google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies the runtime key prefix that should be used to construct the\n  // runtime keys associated with each cluster. When the *runtime_key_prefix* is\n  // specified, the router will look for weights associated with each upstream\n  // cluster under the key *runtime_key_prefix* + \".\" + *cluster[i].name* where\n  // *cluster[i]* denotes an entry in the clusters array field. If the runtime\n  // key for the cluster does not exist, the value specified in the\n  // configuration file will be used as the default weight. See the :ref:`runtime documentation\n  // <operations_runtime>` for how key names map to the underlying implementation.\n  string runtime_key_prefix = 2;\n}\n\n// [#next-free-field: 13]\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RouteMatch\";\n\n  message GrpcRouteMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions\";\n  }\n\n  message TlsContextMatchOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteMatch.TlsContextMatchOptions\";\n\n    // If specified, the route will match against whether or not a certificate is presented.\n    // If not specified, certificate presentation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue presented = 1;\n\n    // If specified, the route will match against whether or not a certificate is validated.\n    // If not specified, certificate validation status (true or false) will not be considered when route matching.\n    google.protobuf.BoolValue validated = 2;\n  }\n\n  // An extensible message for matching CONNECT requests.\n  message ConnectMatcher {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteMatch.ConnectMatcher\";\n  }\n\n  reserved 5, 3;\n\n  reserved \"regex\";\n\n  oneof path_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route is a prefix rule meaning that the prefix must\n    // match the beginning of the *:path* header.\n    string prefix = 1;\n\n    // If specified, the route is an exact path rule meaning that the path must\n    // exactly match the *:path* header once the query string is removed.\n    string path = 2;\n\n    // If specified, the route is a regular expression rule meaning that the\n    // regex must match the *:path* header once the query string is removed. The entire path\n    // (without the query string) must match the regex. The rule will not match if only a\n    // subsequence of the *:path* header matches the regex.\n    //\n    // [#next-major-version: In the v3 API we should redo how path specification works such\n    // that we utilize StringMatcher, and additionally have consistent options around whether we\n    // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive\n    // to deprecate the existing options. We should even consider whether we want to do away with\n    // path_specifier entirely and just rely on a set of header matchers which can already match\n    // on :path, etc. The issue with that is it is unclear how to generically deal with query string\n    // stripping. This needs more thought.]\n    type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];\n\n    // If this is used as the matcher, the matcher will only match CONNECT requests.\n    // Note that this will not match HTTP/2 upgrade-style CONNECT requests\n    // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style\n    // upgrades.\n    // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,\n    // where Extended CONNECT requests may have a path, the path matchers will work if\n    // there is a path present.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectMatcher connect_matcher = 12;\n  }\n\n  // Indicates that prefix/path matching should be case sensitive. The default\n  // is true.\n  google.protobuf.BoolValue case_sensitive = 4;\n\n  // Indicates that the route should additionally match on a runtime key. Every time the route\n  // is considered for a match, it must also fall under the percentage of matches indicated by\n  // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the\n  // number is <= the value of the numerator N, or if the key is not present, the default\n  // value, the router continues to evaluate the remaining match criteria. A runtime_fraction\n  // route configuration can be used to roll out route changes in a gradual manner without full\n  // code/config deploys. Refer to the :ref:`traffic shifting\n  // <config_http_conn_man_route_table_traffic_splitting_shift>` docs for additional documentation.\n  //\n  // .. note::\n  //\n  //    Parsing this field is implemented such that the runtime key's data may be represented\n  //    as a FractionalPercent proto represented as JSON/YAML and may also be represented as an\n  //    integer with the assumption that the value is an integral percentage out of 100. For\n  //    instance, a runtime key lookup returning the value \"42\" would parse as a FractionalPercent\n  //    whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics.\n  core.v4alpha.RuntimeFractionalPercent runtime_fraction = 9;\n\n  // Specifies a set of headers that the route should match on. The router will\n  // check the request’s headers against all the specified headers in the route\n  // config. A match will happen if all the headers in the route are present in\n  // the request with the same values (or based on presence if the value field\n  // is not in the config).\n  repeated HeaderMatcher headers = 6;\n\n  // Specifies a set of URL query parameters on which the route should\n  // match. The router will check the query string from the *path* header\n  // against all the specified query parameters. If the number of specified\n  // query parameters is nonzero, they all must match the *path* header's\n  // query string for a match to occur.\n  repeated QueryParameterMatcher query_parameters = 7;\n\n  // If specified, only gRPC requests will be matched. The router will check\n  // that the content-type header has a application/grpc or one of the various\n  // application/grpc+ values.\n  GrpcRouteMatchOptions grpc = 8;\n\n  // If specified, the client tls context will be matched against the defined\n  // match options.\n  //\n  // [#next-major-version: unify with RBAC]\n  TlsContextMatchOptions tls_context = 11;\n}\n\n// [#next-free-field: 12]\nmessage CorsPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.CorsPolicy\";\n\n  reserved 1, 8, 7;\n\n  reserved \"allow_origin\", \"allow_origin_regex\", \"enabled\";\n\n  // Specifies string patterns that match allowed origins. An origin is allowed if any of the\n  // string matchers match.\n  repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11;\n\n  // Specifies the content for the *access-control-allow-methods* header.\n  string allow_methods = 2;\n\n  // Specifies the content for the *access-control-allow-headers* header.\n  string allow_headers = 3;\n\n  // Specifies the content for the *access-control-expose-headers* header.\n  string expose_headers = 4;\n\n  // Specifies the content for the *access-control-max-age* header.\n  string max_age = 5;\n\n  // Specifies whether the resource allows credentials.\n  google.protobuf.BoolValue allow_credentials = 6;\n\n  oneof enabled_specifier {\n    // Specifies the % of requests for which the CORS filter is enabled.\n    //\n    // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS\n    // filter will be enabled for 100% of the requests.\n    //\n    // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is\n    // specified, Envoy will lookup the runtime key to get the percentage of requests to filter.\n    core.v4alpha.RuntimeFractionalPercent filter_enabled = 9;\n  }\n\n  // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not\n  // enforced.\n  //\n  // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those\n  // fields have to explicitly disable the filter in order for this setting to take effect.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* to determine if it's valid but will not enforce any policies.\n  core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10;\n}\n\n// [#next-free-field: 37]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RouteAction\";\n\n  enum ClusterNotFoundResponseCode {\n    // HTTP status code - 503 Service Unavailable.\n    SERVICE_UNAVAILABLE = 0;\n\n    // HTTP status code - 404 Not Found.\n    NOT_FOUND = 1;\n  }\n\n  // Configures :ref:`internal redirect <arch_overview_internal_redirects>` behavior.\n  // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.]\n  enum InternalRedirectAction {\n    option deprecated = true;\n\n    PASS_THROUGH_INTERNAL_REDIRECT = 0;\n    HANDLE_INTERNAL_REDIRECT = 1;\n  }\n\n  // The router is capable of shadowing traffic from one cluster to another. The current\n  // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n  // respond before returning the response from the primary cluster. All normal statistics are\n  // collected for the shadow cluster making this feature useful for testing.\n  //\n  // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is\n  // useful for logging. For example, *cluster1* becomes *cluster1-shadow*.\n  //\n  // .. note::\n  //\n  //   Shadowing will not be triggered if the primary cluster does not exist.\n  message RequestMirrorPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.RequestMirrorPolicy\";\n\n    reserved 2;\n\n    reserved \"runtime_key\";\n\n    // Specifies the cluster that requests will be mirrored to. The cluster must\n    // exist in the cluster manager configuration.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // If not specified, all requests to the target cluster will be mirrored.\n    //\n    // If specified, this field takes precedence over the `runtime_key` field and requests must also\n    // fall under the percentage of matches indicated by this field.\n    //\n    // For some fraction N/D, a random number in the range [0,D) is selected. If the\n    // number is <= the value of the numerator N, or if the key is not present, the default\n    // value, the request will be mirrored.\n    core.v4alpha.RuntimeFractionalPercent runtime_fraction = 3;\n\n    // Determines if the trace span should be sampled. Defaults to true.\n    google.protobuf.BoolValue trace_sampled = 4;\n  }\n\n  // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer\n  // <arch_overview_load_balancing_types>`.\n  // [#next-free-field: 7]\n  message HashPolicy {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.HashPolicy\";\n\n    message Header {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.Header\";\n\n      // The name of the request header that will be used to obtain the hash\n      // key. If the request header is not present, no hash will be produced.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // If specified, the request header value will be rewritten and used\n      // to produce the hash key.\n      type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2;\n    }\n\n    // Envoy supports two types of cookie affinity:\n    //\n    // 1. Passive. Envoy takes a cookie that's present in the cookies header and\n    //    hashes on its value.\n    //\n    // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL)\n    //    on the first request from the client in its response to the client,\n    //    based on the endpoint the request gets sent to. The client then\n    //    presents this on the next and all subsequent requests. The hash of\n    //    this is sufficient to ensure these requests get sent to the same\n    //    endpoint. The cookie is generated by hashing the source and\n    //    destination ports and addresses so that multiple independent HTTP2\n    //    streams on the same connection will independently receive the same\n    //    cookie, even if they arrive at the Envoy simultaneously.\n    message Cookie {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.Cookie\";\n\n      // The name of the cookie that will be used to obtain the hash key. If the\n      // cookie is not present and ttl below is not set, no hash will be\n      // produced.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If specified, a cookie with the TTL will be generated if the cookie is\n      // not present. If the TTL is present and zero, the generated cookie will\n      // be a session cookie.\n      google.protobuf.Duration ttl = 2;\n\n      // The name of the path for the cookie. If no path is specified here, no path\n      // will be set for the cookie.\n      string path = 3;\n    }\n\n    message ConnectionProperties {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties\";\n\n      // Hash on source IP address.\n      bool source_ip = 1;\n    }\n\n    message QueryParameter {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter\";\n\n      // The name of the URL query parameter that will be used to obtain the hash\n      // key. If the parameter is not present, no hash will be produced. Query\n      // parameter names are case-sensitive.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    message FilterState {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.HashPolicy.FilterState\";\n\n      // The name of the Object in the per-request filterState, which is an\n      // Envoy::Http::Hashable object. If there is no data associated with the key,\n      // or the stored object is not Envoy::Http::Hashable, no hash will be produced.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // Header hash policy.\n      Header header = 1;\n\n      // Cookie hash policy.\n      Cookie cookie = 2;\n\n      // Connection properties hash policy.\n      ConnectionProperties connection_properties = 3;\n\n      // Query parameter hash policy.\n      QueryParameter query_parameter = 5;\n\n      // Filter state hash policy.\n      FilterState filter_state = 6;\n    }\n\n    // The flag that short-circuits the hash computing. This field provides a\n    // 'fallback' style of configuration: \"if a terminal policy doesn't work,\n    // fallback to rest of the policy list\", it saves time when the terminal\n    // policy works.\n    //\n    // If true, and there is already a hash computed, ignore rest of the\n    // list of hash polices.\n    // For example, if the following hash methods are configured:\n    //\n    //  ========= ========\n    //  specifier terminal\n    //  ========= ========\n    //  Header A  true\n    //  Header B  false\n    //  Header C  false\n    //  ========= ========\n    //\n    // The generateHash process ends if policy \"header A\" generates a hash, as\n    // it's a terminal policy.\n    bool terminal = 4;\n  }\n\n  // Allows enabling and disabling upgrades on a per-route basis.\n  // This overrides any enabled/disabled upgrade filter chain specified in the\n  // HttpConnectionManager\n  // :ref:`upgrade_configs\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.upgrade_configs>`\n  // but does not affect any custom filter chain specified there.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.UpgradeConfig\";\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    message ConnectConfig {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig\";\n\n      // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream.\n      core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1;\n    }\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type] will be proxied upstream.\n    string upgrade_type = 1\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Determines if upgrades are available on this route. Defaults to true.\n    google.protobuf.BoolValue enabled = 2;\n\n    // Configuration for sending data upstream as a raw data payload. This is used for\n    // CONNECT requests, when forwarding CONNECT payload as raw TCP.\n    // Note that CONNECT support is currently considered alpha in Envoy.\n    // [#comment:TODO(htuch): Replace the above comment with an alpha tag.\n    ConnectConfig connect_config = 3;\n  }\n\n  message MaxStreamDuration {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RouteAction.MaxStreamDuration\";\n\n    // Specifies the maximum duration allowed for streams on the route. If not specified, the value\n    // from the :ref:`max_stream_duration\n    // <envoy_api_field_config.core.v4alpha.HttpProtocolOptions.max_stream_duration>` field in\n    // :ref:`HttpConnectionManager.common_http_protocol_options\n    // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.common_http_protocol_options>`\n    // is used. If this field is set explicitly to zero, any\n    // HttpConnectionManager max_stream_duration timeout will be disabled for\n    // this route.\n    google.protobuf.Duration max_stream_duration = 1;\n\n    // If present, and the request contains a `grpc-timeout header\n    // <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, use that value as the\n    // *max_stream_duration*, but limit the applied timeout to the maximum value specified here.\n    // If set to 0, the `grpc-timeout` header is used without modification.\n    google.protobuf.Duration grpc_timeout_header_max = 2;\n\n    // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by\n    // subtracting the provided duration from the header. This is useful for allowing Envoy to set\n    // its global timeout to be less than that of the deadline imposed by the calling client, which\n    // makes it more likely that Envoy will handle the timeout instead of having the call canceled\n    // by the client. If, after applying the offset, the resulting timeout is zero or negative,\n    // the stream will timeout immediately.\n    google.protobuf.Duration grpc_timeout_header_offset = 3;\n  }\n\n  reserved 12, 18, 19, 16, 22, 21, 10;\n\n  reserved \"request_mirror_policy\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // HTTP header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist, Envoy will\n    // return a 404 response.\n    //\n    // .. attention::\n    //\n    //   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1\n    //   *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n    string cluster_header = 2\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster. See\n    // :ref:`traffic splitting <config_http_conn_man_route_table_traffic_splitting_split>`\n    // for additional documentation.\n    WeightedCluster weighted_clusters = 3;\n  }\n\n  // The HTTP status code to use when configured cluster is not found.\n  // The default response code is 503 Service Unavailable.\n  ClusterNotFoundResponseCode cluster_not_found_response_code = 20\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n  // in the upstream cluster with metadata matching what's set in this field will be considered\n  // for load balancing. If using :ref:`weighted_clusters\n  // <envoy_api_field_config.route.v4alpha.RouteAction.weighted_clusters>`, metadata will be merged, with values\n  // provided there taking precedence. The filter name should be specified as *envoy.lb*.\n  core.v4alpha.Metadata metadata_match = 4;\n\n  // Indicates that during forwarding, the matched prefix (or path) should be\n  // swapped with this value. This option allows application URLs to be rooted\n  // at a different path from those exposed at the reverse proxy layer. The router filter will\n  // place the original path before rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of *prefix_rewrite* or\n  // :ref:`regex_rewrite <envoy_api_field_config.route.v4alpha.RouteAction.regex_rewrite>`\n  // may be specified.\n  //\n  // .. attention::\n  //\n  //   Pay careful attention to the use of trailing slashes in the\n  //   :ref:`route's match <envoy_api_field_config.route.v4alpha.Route.match>` prefix value.\n  //   Stripping a prefix from a path requires multiple Routes to handle all cases. For example,\n  //   rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single\n  //   :ref:`Route <envoy_api_msg_config.route.v4alpha.Route>`, as shown by the below config entries:\n  //\n  //   .. code-block:: yaml\n  //\n  //     - match:\n  //         prefix: \"/prefix/\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //     - match:\n  //         prefix: \"/prefix\"\n  //       route:\n  //         prefix_rewrite: \"/\"\n  //\n  //   Having above entries in the config, requests to */prefix* will be stripped to */*, while\n  //   requests to */prefix/etc* will be stripped to */etc*.\n  string prefix_rewrite = 5\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Indicates that during forwarding, portions of the path that match the\n  // pattern should be rewritten, even allowing the substitution of capture\n  // groups from the pattern into the new path as specified by the rewrite\n  // substitution string. This is useful to allow application paths to be\n  // rewritten in a way that is aware of segments with variable content like\n  // identifiers. The router filter will place the original path as it was\n  // before the rewrite into the :ref:`x-envoy-original-path\n  // <config_http_filters_router_x-envoy-original-path>` header.\n  //\n  // Only one of :ref:`prefix_rewrite <envoy_api_field_config.route.v4alpha.RouteAction.prefix_rewrite>`\n  // or *regex_rewrite* may be specified.\n  //\n  // Examples using Google's `RE2 <https://github.com/google/re2>`_ engine:\n  //\n  // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution\n  //   string of ``\\2/instance/\\1`` would transform ``/service/foo/v1/api``\n  //   into ``/v1/api/instance/foo``.\n  //\n  // * The pattern ``one`` paired with a substitution string of ``two`` would\n  //   transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``.\n  //\n  // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of\n  //   ``\\1two\\2`` would replace only the first occurrence of ``one``,\n  //   transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``.\n  //\n  // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/``\n  //   would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to\n  //   ``/aaa/yyy/bbb``.\n  type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32;\n\n  oneof host_rewrite_specifier {\n    // Indicates that during forwarding, the host header will be swapped with\n    // this value.\n    string host_rewrite_literal = 6\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the hostname of the upstream host chosen by the cluster manager. This\n    // option is applicable only when the destination cluster for a route is of\n    // type *strict_dns* or *logical_dns*. Setting this to true with other cluster\n    // types has no effect.\n    google.protobuf.BoolValue auto_host_rewrite = 7;\n\n    // Indicates that during forwarding, the host header will be swapped with the content of given\n    // downstream or :ref:`custom <config_http_conn_man_headers_custom_request_headers>` header.\n    // If header value is empty, host header is left intact.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the potential security implications of using this option. Provided header\n    //   must come from trusted source.\n    string host_rewrite_header = 29\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // Indicates that during forwarding, the host header will be swapped with\n    // the result of the regex substitution executed on path value with query and fragment removed.\n    // This is useful for transitioning variable content between path segment and subdomain.\n    //\n    // For example with the following config:\n    //\n    //   .. code-block:: yaml\n    //\n    //     host_rewrite_path_regex:\n    //       pattern:\n    //         google_re2: {}\n    //         regex: \"^/(.+)/.+$\"\n    //       substitution: \\1\n    //\n    // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`.\n    type.matcher.v4alpha.RegexMatchAndSubstitute host_rewrite_path_regex = 35;\n  }\n\n  // Specifies the upstream timeout for the route. If not specified, the default is 15s. This\n  // spans between the point at which the entire downstream request (i.e. end-of-stream) has been\n  // processed and when the upstream response has been completely processed. A value of 0 will\n  // disable the route's timeout.\n  //\n  // .. note::\n  //\n  //   This timeout includes all retries. See also\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //   :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //   :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration timeout = 8;\n\n  // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout,\n  // although the connection manager wide :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.stream_idle_timeout>`\n  // will still apply. A value of 0 will completely disable the route's idle timeout, even if a\n  // connection manager stream idle timeout is configured.\n  //\n  // The idle timeout is distinct to :ref:`timeout\n  // <envoy_api_field_config.route.v4alpha.RouteAction.timeout>`, which provides an upper bound\n  // on the upstream response time; :ref:`idle_timeout\n  // <envoy_api_field_config.route.v4alpha.RouteAction.idle_timeout>` instead bounds the amount\n  // of time the request's stream may be idle.\n  //\n  // After header decoding, the idle timeout will apply on downstream and\n  // upstream request events. Each time an encode/decode event for headers or\n  // data is processed for the stream, the timer will be reset. If the timeout\n  // fires, the stream is terminated with a 408 Request Timeout error code if no\n  // upstream response header has been received, otherwise a stream reset\n  // occurs.\n  google.protobuf.Duration idle_timeout = 24;\n\n  // Indicates that the route has a retry policy. Note that if this is set,\n  // it'll take precedence over the virtual host level retry policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  RetryPolicy retry_policy = 9;\n\n  // [#not-implemented-hide:]\n  // Specifies the configuration for retry policy extension. Note that if this is set, it'll take\n  // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged,\n  // most internal one becomes the enforced policy). :ref:`Retry policy <envoy_api_field_config.route.v4alpha.VirtualHost.retry_policy>`\n  // should not be set if this field is used.\n  google.protobuf.Any retry_policy_typed_config = 33;\n\n  // Indicates that the route has request mirroring policies.\n  repeated RequestMirrorPolicy request_mirror_policies = 30;\n\n  // Optionally specifies the :ref:`routing priority <arch_overview_http_routing_priority>`.\n  core.v4alpha.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies a set of rate limit configurations that could be applied to the\n  // route.\n  repeated RateLimit rate_limits = 13;\n\n  // Specifies if the rate limit filter should include the virtual host rate\n  // limits. By default, if the route configured rate limits, the virtual host\n  // :ref:`rate_limits <envoy_api_field_config.route.v4alpha.VirtualHost.rate_limits>` are not applied to the\n  // request.\n  //\n  // This field is deprecated. Please use :ref:`vh_rate_limits <envoy_v3_api_field_extensions.filters.http.ratelimit.v3.RateLimitPerRoute.vh_rate_limits>`\n  google.protobuf.BoolValue hidden_envoy_deprecated_include_vh_rate_limits = 14 [deprecated = true];\n\n  // Specifies a list of hash policies to use for ring hash load balancing. Each\n  // hash policy is evaluated individually and the combined result is used to\n  // route the request. The method of combination is deterministic such that\n  // identical lists of hash policies will produce the same hash. Since a hash\n  // policy examines specific parts of a request, it can fail to produce a hash\n  // (i.e. if the hashed header is not present). If (and only if) all configured\n  // hash policies fail to generate a hash, no hash will be produced for\n  // the route. In this case, the behavior is the same as if no hash policies\n  // were specified (i.e. the ring hash load balancer will choose a random\n  // backend). If a hash policy has the \"terminal\" attribute set to true, and\n  // there is already a hash generated, the hash is returned immediately,\n  // ignoring the rest of the hash policy list.\n  repeated HashPolicy hash_policy = 15;\n\n  // Indicates that the route has a CORS policy.\n  CorsPolicy cors = 17;\n\n  // Deprecated by :ref:`grpc_timeout_header_max <envoy_api_field_config.route.v4alpha.RouteAction.MaxStreamDuration.grpc_timeout_header_max>`\n  // If present, and the request is a gRPC request, use the\n  // `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_,\n  // or its default value (infinity) instead of\n  // :ref:`timeout <envoy_api_field_config.route.v4alpha.RouteAction.timeout>`, but limit the applied timeout\n  // to the maximum value specified here. If configured as 0, the maximum allowed timeout for\n  // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used\n  // and gRPC requests time out like any other requests using\n  // :ref:`timeout <envoy_api_field_config.route.v4alpha.RouteAction.timeout>` or its default.\n  // This can be used to prevent unexpected upstream request timeouts due to potentially long\n  // time gaps between gRPC request and response in gRPC streaming mode.\n  //\n  // .. note::\n  //\n  //    If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes\n  //    precedence over `grpc-timeout header <https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md>`_, when\n  //    both are present. See also\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,\n  //    :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the\n  //    :ref:`retry overview <arch_overview_http_routing_retry>`.\n  google.protobuf.Duration hidden_envoy_deprecated_max_grpc_timeout = 23 [deprecated = true];\n\n  // Deprecated by :ref:`grpc_timeout_header_offset <envoy_api_field_config.route.v4alpha.RouteAction.MaxStreamDuration.grpc_timeout_header_offset>`.\n  // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting\n  // the provided duration from the header. This is useful in allowing Envoy to set its global\n  // timeout to be less than that of the deadline imposed by the calling client, which makes it more\n  // likely that Envoy will handle the timeout instead of having the call canceled by the client.\n  // The offset will only be applied if the provided grpc_timeout is greater than the offset. This\n  // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning\n  // infinity).\n  google.protobuf.Duration hidden_envoy_deprecated_grpc_timeout_offset = 28 [deprecated = true];\n\n  repeated UpgradeConfig upgrade_configs = 25;\n\n  // If present, Envoy will try to follow an upstream redirect response instead of proxying the\n  // response back to the downstream. An upstream redirect response is defined\n  // by :ref:`redirect_response_codes\n  // <envoy_api_field_config.route.v4alpha.InternalRedirectPolicy.redirect_response_codes>`.\n  InternalRedirectPolicy internal_redirect_policy = 34;\n\n  InternalRedirectAction hidden_envoy_deprecated_internal_redirect_action = 26 [deprecated = true];\n\n  // An internal redirect is handled, iff the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value, and\n  // :ref:`internal_redirect_action <envoy_api_field_config.route.v4alpha.RouteAction.internal_redirect_action>`\n  // is set to :ref:`HANDLE_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_config.route.v4alpha.RouteAction.InternalRedirectAction.HANDLE_INTERNAL_REDIRECT>`\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or has\n  // :ref:`internal_redirect_action <envoy_api_field_config.route.v4alpha.RouteAction.internal_redirect_action>`\n  // set to\n  // :ref:`PASS_THROUGH_INTERNAL_REDIRECT\n  // <envoy_api_enum_value_config.route.v4alpha.RouteAction.InternalRedirectAction.PASS_THROUGH_INTERNAL_REDIRECT>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value hidden_envoy_deprecated_max_internal_redirects = 31\n      [deprecated = true];\n\n  // Indicates that the route has a hedge policy. Note that if this is set,\n  // it'll take precedence over the virtual host level hedge policy entirely\n  // (e.g.: policies are not merged, most internal one becomes the enforced policy).\n  HedgePolicy hedge_policy = 27;\n\n  // Specifies the maximum stream duration for this route.\n  MaxStreamDuration max_stream_duration = 36;\n}\n\n// HTTP retry :ref:`architecture overview <arch_overview_http_routing_retry>`.\n// [#next-free-field: 12]\nmessage RetryPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RetryPolicy\";\n\n  enum ResetHeaderFormat {\n    SECONDS = 0;\n    UNIX_TIMESTAMP = 1;\n  }\n\n  message RetryPriority {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RetryPriority\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryHostPredicate {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RetryHostPredicate\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  message RetryBackOff {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RetryBackOff\";\n\n    // Specifies the base interval between retries. This parameter is required and must be greater\n    // than zero. Values less than 1 ms are rounded up to 1 ms.\n    // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's\n    // back-off algorithm.\n    google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n\n    // Specifies the maximum interval between retries. This parameter is optional, but must be\n    // greater than or equal to the `base_interval` if set. The default is 10 times the\n    // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion\n    // of Envoy's back-off algorithm.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  message ResetHeader {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.ResetHeader\";\n\n    string name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // A retry back-off strategy that applies when the upstream server rate limits\n  // the request.\n  //\n  // Given this configuration:\n  //\n  // .. code-block:: yaml\n  //\n  //   rate_limited_retry_back_off:\n  //     reset_headers:\n  //     - name: Retry-After\n  //       format: SECONDS\n  //     - name: X-RateLimit-Reset\n  //       format: UNIX_TIMESTAMP\n  //     max_interval: \"300s\"\n  //\n  // The following algorithm will apply:\n  //\n  //  1. If the response contains the header ``Retry-After`` its value must be on\n  //     the form ``120`` (an integer that represents the number of seconds to\n  //     wait before retrying). If so, this value is used as the back-off interval.\n  //  2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its\n  //     value must be on the form ``1595320702`` (an integer that represents the\n  //     point in time at which to retry, as a Unix timestamp in seconds). If so,\n  //     the current time is subtracted from this value and the result is used as\n  //     the back-off interval.\n  //  3. Otherwise, Envoy will use the default\n  //     :ref:`exponential back-off <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_back_off>`\n  //     strategy.\n  //\n  // No matter which format is used, if the resulting back-off interval exceeds\n  // ``max_interval`` it is discarded and the next header in ``reset_headers``\n  // is tried. If a request timeout is configured for the route it will further\n  // limit how long the request will be allowed to run.\n  //\n  // To prevent many clients retrying at the same point in time jitter is added\n  // to the back-off interval, so the resulting interval is decided by taking:\n  // ``random(interval, interval * 1.5)``.\n  //\n  // .. attention::\n  //\n  //   Configuring ``rate_limited_retry_back_off`` will not by itself cause a request\n  //   to be retried. You will still need to configure the right retry policy to match\n  //   the responses from the upstream server.\n  message RateLimitedRetryBackOff {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff\";\n\n    // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``)\n    // to match against the response. Headers are tried in order, and matched case\n    // insensitive. The first header to be parsed successfully is used. If no headers\n    // match the default exponential back-off is used instead.\n    repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}];\n\n    // Specifies the maximum back off interval that Envoy will allow. If a reset\n    // header contains an interval longer than this then it will be discarded and\n    // the next header will be tried. Defaults to 300 seconds.\n    google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Specifies the conditions under which retry takes place. These are the same\n  // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and\n  // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`.\n  string retry_on = 1;\n\n  // Specifies the allowed number of retries. This parameter is optional and\n  // defaults to 1. These are the same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-max-retries`.\n  google.protobuf.UInt32Value max_retries = 2;\n\n  // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The\n  // same conditions documented for\n  // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.\n  //\n  // .. note::\n  //\n  //   If left unspecified, Envoy will use the global\n  //   :ref:`route timeout <envoy_api_field_config.route.v4alpha.RouteAction.timeout>` for the request.\n  //   Consequently, when using a :ref:`5xx <config_http_filters_router_x-envoy-retry-on>` based\n  //   retry policy, a request that times out will not be retried as the total timeout budget\n  //   would have been exhausted.\n  google.protobuf.Duration per_try_timeout = 3;\n\n  // Specifies an implementation of a RetryPriority which is used to determine the\n  // distribution of load across priorities used for retries. Refer to\n  // :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more details.\n  RetryPriority retry_priority = 4;\n\n  // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host\n  // for retries. If any of the predicates reject the host, host selection will be reattempted.\n  // Refer to :ref:`retry plugin configuration <arch_overview_http_retry_plugins>` for more\n  // details.\n  repeated RetryHostPredicate retry_host_predicate = 5;\n\n  // The maximum number of times host selection will be reattempted before giving up, at which\n  // point the host that was last selected will be routed to. If unspecified, this will default to\n  // retrying once.\n  int64 host_selection_retry_max_attempts = 6;\n\n  // HTTP status codes that should trigger a retry in addition to those specified by retry_on.\n  repeated uint32 retriable_status_codes = 7;\n\n  // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the\n  // default base interval is 25 milliseconds or, if set, the current value of the\n  // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times\n  // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries`\n  // describes Envoy's back-off algorithm.\n  RetryBackOff retry_back_off = 8;\n\n  // Specifies parameters that control a retry back-off strategy that is used\n  // when the request is rate limited by the upstream server. The server may\n  // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to\n  // provide feedback to the client on how long to wait before retrying. If\n  // configured, this back-off strategy will be used instead of the\n  // default exponential back off strategy (configured using `retry_back_off`)\n  // whenever a response includes the matching headers.\n  RateLimitedRetryBackOff rate_limited_retry_back_off = 11;\n\n  // HTTP response headers that trigger a retry if present in the response. A retry will be\n  // triggered if any of the header matches match the upstream response headers.\n  // The field is only consulted if 'retriable-headers' retry policy is active.\n  repeated HeaderMatcher retriable_headers = 9;\n\n  // HTTP headers which must be present in the request for retries to be attempted.\n  repeated HeaderMatcher retriable_request_headers = 10;\n}\n\n// HTTP request hedging :ref:`architecture overview <arch_overview_http_routing_hedging>`.\nmessage HedgePolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.HedgePolicy\";\n\n  // Specifies the number of initial requests that should be sent upstream.\n  // Must be at least 1.\n  // Defaults to 1.\n  // [#not-implemented-hide:]\n  google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}];\n\n  // Specifies a probability that an additional upstream request should be sent\n  // on top of what is specified by initial_requests.\n  // Defaults to 0.\n  // [#not-implemented-hide:]\n  type.v3.FractionalPercent additional_request_chance = 2;\n\n  // Indicates that a hedged request should be sent when the per-try timeout\n  // is hit. This will only occur if the retry policy also indicates that a\n  // timed out request should be retried.\n  // Once a timed out request is retried due to per try timeout, the router\n  // filter will ensure that it is not retried again even if the returned\n  // response headers would otherwise be retried according the specified\n  // :ref:`RetryPolicy <envoy_api_msg_config.route.v4alpha.RetryPolicy>`.\n  // Defaults to false.\n  bool hedge_on_per_try_timeout = 3;\n}\n\n// [#next-free-field: 9]\nmessage RedirectAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.RedirectAction\";\n\n  enum RedirectResponseCode {\n    // Moved Permanently HTTP Status Code - 301.\n    MOVED_PERMANENTLY = 0;\n\n    // Found HTTP Status Code - 302.\n    FOUND = 1;\n\n    // See Other HTTP Status Code - 303.\n    SEE_OTHER = 2;\n\n    // Temporary Redirect HTTP Status Code - 307.\n    TEMPORARY_REDIRECT = 3;\n\n    // Permanent Redirect HTTP Status Code - 308.\n    PERMANENT_REDIRECT = 4;\n  }\n\n  // When the scheme redirection take place, the following rules apply:\n  //  1. If the source URI scheme is `http` and the port is explicitly\n  //     set to `:80`, the port will be removed after the redirection\n  //  2. If the source URI scheme is `https` and the port is explicitly\n  //     set to `:443`, the port will be removed after the redirection\n  oneof scheme_rewrite_specifier {\n    // The scheme portion of the URL will be swapped with \"https\".\n    bool https_redirect = 4;\n\n    // The scheme portion of the URL will be swapped with this value.\n    string scheme_redirect = 7;\n  }\n\n  // The host portion of the URL will be swapped with this value.\n  string host_redirect = 1\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The port value of the URL will be swapped with this value.\n  uint32 port_redirect = 8;\n\n  oneof path_rewrite_specifier {\n    // The path portion of the URL will be swapped with this value.\n    // Please note that query string in path_redirect will override the\n    // request's query string and will not be stripped.\n    //\n    // For example, let's say we have the following routes:\n    //\n    // - match: { path: \"/old-path-1\" }\n    //   redirect: { path_redirect: \"/new-path-1\" }\n    // - match: { path: \"/old-path-2\" }\n    //   redirect: { path_redirect: \"/new-path-2\", strip-query: \"true\" }\n    // - match: { path: \"/old-path-3\" }\n    //   redirect: { path_redirect: \"/new-path-3?foo=1\", strip_query: \"true\" }\n    //\n    // 1. if request uri is \"/old-path-1?bar=1\", users will be redirected to \"/new-path-1?bar=1\"\n    // 2. if request uri is \"/old-path-2?bar=1\", users will be redirected to \"/new-path-2\"\n    // 3. if request uri is \"/old-path-3?bar=1\", users will be redirected to \"/new-path-3?foo=1\"\n    string path_redirect = 2\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n    // Indicates that during redirection, the matched prefix (or path)\n    // should be swapped with this value. This option allows redirect URLs be dynamically created\n    // based on the request.\n    //\n    // .. attention::\n    //\n    //   Pay attention to the use of trailing slashes as mentioned in\n    //   :ref:`RouteAction's prefix_rewrite <envoy_api_field_config.route.v4alpha.RouteAction.prefix_rewrite>`.\n    string prefix_rewrite = 5\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // The HTTP status code to use in the redirect response. The default response\n  // code is MOVED_PERMANENTLY (301).\n  RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Indicates that during redirection, the query portion of the URL will\n  // be removed. Default value is false.\n  bool strip_query = 6;\n}\n\nmessage DirectResponseAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.DirectResponseAction\";\n\n  // Specifies the HTTP response status to be returned.\n  uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}];\n\n  // Specifies the content of the response body. If this setting is omitted,\n  // no body is included in the generated response.\n  //\n  // .. note::\n  //\n  //   Headers can be specified using *response_headers_to_add* in the enclosing\n  //   :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` or\n  //   :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`.\n  core.v4alpha.DataSource body = 2;\n}\n\nmessage Decorator {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Decorator\";\n\n  // The operation name associated with the request matched to this route. If tracing is\n  // enabled, this information will be used as the span name reported for this request.\n  //\n  // .. note::\n  //\n  //   For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden\n  //   by the :ref:`x-envoy-decorator-operation\n  //   <config_http_filters_router_x-envoy-decorator-operation>` header.\n  string operation = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether the decorated details should be propagated to the other party. The default is true.\n  google.protobuf.BoolValue propagate = 2;\n}\n\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.Tracing\";\n\n  // Target percentage of requests managed by this HTTP connection manager that will be force\n  // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n  // header is set. This field is a direct analog for the runtime variable\n  // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n  // <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent client_sampling = 1;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be randomly\n  // selected for trace generation, if not requested by the client or not forced. This field is\n  // a direct analog for the runtime variable 'tracing.random_sampling' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent random_sampling = 2;\n\n  // Target percentage of requests managed by this HTTP connection manager that will be traced\n  // after all other sampling checks have been applied (client-directed, force tracing, random\n  // sampling). This field functions as an upper limit on the total configured sampling rate. For\n  // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n  // of client requests with the appropriate headers to be force traced. This field is a direct\n  // analog for the runtime variable 'tracing.global_enabled' in the\n  // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n  // Default: 100%\n  type.v3.FractionalPercent overall_sampling = 3;\n\n  // A list of custom tags with unique tag name to create tags for the active span.\n  // It will take effect after merging with the :ref:`corresponding configuration\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.Tracing.custom_tags>`\n  // configured in the HTTP connection manager. If two tags with the same name are configured\n  // each in the HTTP connection manager and the route level, the one configured here takes\n  // priority.\n  repeated type.tracing.v3.CustomTag custom_tags = 4;\n}\n\n// A virtual cluster is a way of specifying a regex matching rule against\n// certain important endpoints such that statistics are generated explicitly for\n// the matched requests. The reason this is useful is that when doing\n// prefix/path matching Envoy does not always know what the application\n// considers to be an endpoint. Thus, it’s impossible for Envoy to generically\n// emit per endpoint statistics. However, often systems have highly critical\n// endpoints that they wish to get “perfect” statistics on. Virtual cluster\n// statistics are perfect in the sense that they are emitted on the downstream\n// side such that they include network level failures.\n//\n// Documentation for :ref:`virtual cluster statistics <config_http_filters_router_vcluster_stats>`.\n//\n// .. note::\n//\n//    Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for\n//    every application endpoint. This is both not easily maintainable and as well the matching and\n//    statistics output are not free.\nmessage VirtualCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.VirtualCluster\";\n\n  reserved 1, 3;\n\n  reserved \"pattern\", \"method\";\n\n  // Specifies a list of header matchers to use for matching requests. Each specified header must\n  // match. The pseudo-headers `:path` and `:method` can be used to match the request path and\n  // method, respectively.\n  repeated HeaderMatcher headers = 4;\n\n  // Specifies the name of the virtual cluster. The virtual cluster name as well\n  // as the virtual host name are used when emitting statistics. The statistics are emitted by the\n  // router filter and are documented :ref:`here <config_http_filters_router_stats>`.\n  string name = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Global rate limiting :ref:`architecture overview <arch_overview_global_rate_limit>`.\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.route.v3.RateLimit\";\n\n  // [#next-free-field: 8]\n  message Action {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RateLimit.Action\";\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"source_cluster\", \"<local service cluster>\")\n    //\n    // <local service cluster> is derived from the :option:`--service-cluster` option.\n    message SourceCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.SourceCluster\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"destination_cluster\", \"<routed target cluster>\")\n    //\n    // Once a request matches against a route table rule, a routed cluster is determined by one of\n    // the following :ref:`route table configuration <envoy_api_msg_config.route.v4alpha.RouteConfiguration>`\n    // settings:\n    //\n    // * :ref:`cluster <envoy_api_field_config.route.v4alpha.RouteAction.cluster>` indicates the upstream cluster\n    //   to route to.\n    // * :ref:`weighted_clusters <envoy_api_field_config.route.v4alpha.RouteAction.weighted_clusters>`\n    //   chooses a cluster randomly from a set of clusters with attributed weight.\n    // * :ref:`cluster_header <envoy_api_field_config.route.v4alpha.RouteAction.cluster_header>` indicates which\n    //   header in the request contains the target cluster.\n    message DestinationCluster {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.DestinationCluster\";\n    }\n\n    // The following descriptor entry is appended when a header contains a key that matches the\n    // *header_name*:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<header_value_queried_from_header>\")\n    message RequestHeaders {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.RequestHeaders\";\n\n      // The header name to be queried from the request headers. The header’s\n      // value is used to populate the value of the descriptor entry for the\n      // descriptor_key.\n      string header_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 2 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, Envoy skips the descriptor while calling rate limiting service\n      // when header is not present in the request. By default it skips calling the\n      // rate limiting service if this header is not present in the request.\n      bool skip_if_absent = 3;\n    }\n\n    // The following descriptor entry is appended to the descriptor and is populated using the\n    // trusted address from :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"remote_address\", \"<trusted address from x-forwarded-for>\")\n    message RemoteAddress {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.RemoteAddress\";\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"generic_key\", \"<descriptor_value>\")\n    message GenericKey {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.GenericKey\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // An optional key to use in the descriptor entry. If not set it defaults\n      // to 'generic_key' as the descriptor key.\n      string descriptor_key = 2;\n    }\n\n    // The following descriptor entry is appended to the descriptor:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"header_match\", \"<descriptor_value>\")\n    message HeaderValueMatch {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.HeaderValueMatch\";\n\n      // The value to use in the descriptor entry.\n      string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];\n\n      // If set to true, the action will append a descriptor entry when the\n      // request matches the headers. If set to false, the action will append a\n      // descriptor entry when the request does not match the headers. The\n      // default value is true.\n      google.protobuf.BoolValue expect_match = 2;\n\n      // Specifies a set of headers that the rate limit action should match\n      // on. The action will check the request’s headers against all the\n      // specified headers in the config. A match will happen if all the\n      // headers in the config are present in the request with the same values\n      // (or based on presence if the value field is not in the config).\n      repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    // The following descriptor entry is appended when the dynamic metadata contains a key value:\n    //\n    // .. code-block:: cpp\n    //\n    //   (\"<descriptor_key>\", \"<value_queried_from_metadata>\")\n    message DynamicMetaData {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Action.DynamicMetaData\";\n\n      // The key to use in the descriptor entry.\n      string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Metadata struct that defines the key and path to retrieve the string value. A match will\n      // only happen if the value in the dynamic metadata is of type string.\n      type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];\n\n      // An optional value to use if *metadata_key* is empty. If not set and\n      // no value is present under the metadata_key then no descriptor is generated.\n      string default_value = 3;\n    }\n\n    oneof action_specifier {\n      option (validate.required) = true;\n\n      // Rate limit on source cluster.\n      SourceCluster source_cluster = 1;\n\n      // Rate limit on destination cluster.\n      DestinationCluster destination_cluster = 2;\n\n      // Rate limit on request headers.\n      RequestHeaders request_headers = 3;\n\n      // Rate limit on remote address.\n      RemoteAddress remote_address = 4;\n\n      // Rate limit on a generic key.\n      GenericKey generic_key = 5;\n\n      // Rate limit on the existence of request headers.\n      HeaderValueMatch header_value_match = 6;\n\n      // Rate limit on dynamic metadata.\n      DynamicMetaData dynamic_metadata = 7;\n    }\n  }\n\n  message Override {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.RateLimit.Override\";\n\n    // Fetches the override from the dynamic metadata.\n    message DynamicMetadata {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.RateLimit.Override.DynamicMetadata\";\n\n      // Metadata struct that defines the key and path to retrieve the struct value.\n      // The value must be a struct containing an integer \"requests_per_unit\" property\n      // and a \"unit\" property with a value parseable to :ref:`RateLimitUnit\n      // enum <envoy_api_enum_type.v3.RateLimitUnit>`\n      type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}];\n    }\n\n    oneof override_specifier {\n      option (validate.required) = true;\n\n      // Limit override from dynamic metadata.\n      DynamicMetadata dynamic_metadata = 1;\n    }\n  }\n\n  // Refers to the stage set in the filter. The rate limit configuration only\n  // applies to filters with the same stage number. The default stage number is\n  // 0.\n  //\n  // .. note::\n  //\n  //   The filter supports a range of 0 - 10 inclusively for stage numbers.\n  google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}];\n\n  // The key to be set in runtime to disable this rate limit configuration.\n  string disable_key = 2;\n\n  // A list of actions that are to be applied for this rate limit configuration.\n  // Order matters as the actions are processed sequentially and the descriptor\n  // is composed by appending descriptor entries in that sequence. If an action\n  // cannot append a descriptor entry, no descriptor is generated for the\n  // configuration. See :ref:`composing actions\n  // <config_http_filters_rate_limit_composing_actions>` for additional documentation.\n  repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}];\n\n  // An optional limit override to be appended to the descriptor produced by this\n  // rate limit configuration. If the override value is invalid or cannot be resolved\n  // from metadata, no override is provided. See :ref:`rate limit override\n  // <config_http_filters_rate_limit_rate_limit_override>` for more information.\n  Override limit = 4;\n}\n\n// .. attention::\n//\n//   Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host*\n//   header. Thus, if attempting to match on *Host*, match on *:authority* instead.\n//\n// .. attention::\n//\n//   To route on HTTP method, use the special HTTP/2 *:method* header. This works for both\n//   HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g.,\n//\n//   .. code-block:: json\n//\n//     {\n//       \"name\": \":method\",\n//       \"exact_match\": \"POST\"\n//     }\n//\n// .. attention::\n//   In the absence of any header match specifier, match will default to :ref:`present_match\n//   <envoy_api_field_config.route.v4alpha.HeaderMatcher.present_match>`. i.e, a request that has the :ref:`name\n//   <envoy_api_field_config.route.v4alpha.HeaderMatcher.name>` header will match, regardless of the header's\n//   value.\n//\n//  [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.]\n// [#next-free-field: 13]\nmessage HeaderMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.HeaderMatcher\";\n\n  reserved 2, 3, 5;\n\n  reserved \"regex_match\";\n\n  // Specifies the name of the header in the request.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // Specifies how the header match will be performed to route the request.\n  oneof header_match_specifier {\n    // If specified, header match will be performed based on the value of the header.\n    string exact_match = 4;\n\n    // If specified, this regex string is a regular expression rule which implies the entire request\n    // header value must match the regex. The rule will not match if only a subsequence of the\n    // request header value matches the regex.\n    type.matcher.v4alpha.RegexMatcher safe_regex_match = 11;\n\n    // If specified, header match will be performed based on range.\n    // The rule will match if the request header value is within this range.\n    // The entire request header value must represent an integer in base 10 notation: consisting of\n    // an optional plus or minus sign followed by a sequence of digits. The rule will not match if\n    // the header value does not represent an integer. Match will fail for empty values, floating\n    // point numbers or if only a subsequence of the header value is an integer.\n    //\n    // Examples:\n    //\n    // * For range [-10,0), route will match for header value -1, but not for 0, \"somestring\", 10.9,\n    //   \"-1somestring\"\n    type.v3.Int64Range range_match = 6;\n\n    // If specified, header match will be performed based on whether the header is in the\n    // request.\n    bool present_match = 7;\n\n    // If specified, header match will be performed based on the prefix of the header value.\n    // Note: empty prefix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.\n    string prefix_match = 9 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on the suffix of the header value.\n    // Note: empty suffix is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.\n    string suffix_match = 10 [(validate.rules).string = {min_len: 1}];\n\n    // If specified, header match will be performed based on whether the header value contains\n    // the given value or not.\n    // Note: empty contains match is not allowed, please use present_match instead.\n    //\n    // Examples:\n    //\n    // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*.\n    string contains_match = 12 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // If specified, the match result will be inverted before checking. Defaults to false.\n  //\n  // Examples:\n  //\n  // * The regex ``\\d{3}`` does not match the value *1234*, so it will match when inverted.\n  // * The range [-10,0) will match the value -1, so it will not match when inverted.\n  bool invert_match = 8;\n}\n\n// Query parameter matching treats the query string of a request's :path header\n// as an ampersand-separated list of keys and/or key=value elements.\n// [#next-free-field: 7]\nmessage QueryParameterMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.QueryParameterMatcher\";\n\n  reserved 3, 4;\n\n  reserved \"value\", \"regex\";\n\n  // Specifies the name of a key that must be present in the requested\n  // *path*'s query string.\n  string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}];\n\n  oneof query_parameter_match_specifier {\n    // Specifies whether a query parameter value should match against a string.\n    type.matcher.v4alpha.StringMatcher string_match = 5\n        [(validate.rules).message = {required: true}];\n\n    // Specifies whether a query parameter should be present.\n    bool present_match = 6;\n  }\n}\n\n// HTTP Internal Redirect :ref:`architecture overview <arch_overview_internal_redirects>`.\nmessage InternalRedirectPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.InternalRedirectPolicy\";\n\n  // An internal redirect is not handled, unless the number of previous internal redirects that a\n  // downstream request has encountered is lower than this value.\n  // In the case where a downstream request is bounced among multiple routes by internal redirect,\n  // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy\n  // <envoy_api_field_config.route.v4alpha.RouteAction.internal_redirect_policy>`\n  // will pass the redirect back to downstream.\n  //\n  // If not specified, at most one redirect will be followed.\n  google.protobuf.UInt32Value max_internal_redirects = 1;\n\n  // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified,\n  // only 302 will be treated as internal redirect.\n  // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored.\n  repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}];\n\n  // Specifies a list of predicates that are queried when an upstream response is deemed\n  // to trigger an internal redirect by all other criteria. Any predicate in the list can reject\n  // the redirect, causing the response to be proxied to downstream.\n  repeated core.v4alpha.TypedExtensionConfig predicates = 3;\n\n  // Allow internal redirect to follow a target URI with a different scheme than the value of\n  // x-forwarded-proto. The default is false.\n  bool allow_cross_scheme_redirect = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.route.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.route.v4alpha\";\noption java_outer_classname = \"ScopedRouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP scoped routing configuration]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// Specifies a routing scope, which associates a\n// :ref:`Key<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration.Key>` to a\n// :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` (identified by its resource name).\n//\n// The HTTP connection manager builds up a table consisting of these Key to\n// RouteConfiguration mappings, and looks up the RouteConfiguration to use per\n// request according to the algorithm specified in the\n// :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scope_key_builder>`\n// assigned to the HttpConnectionManager.\n//\n// For example, with the following configurations (in YAML):\n//\n// HttpConnectionManager config:\n//\n// .. code::\n//\n//   ...\n//   scoped_routes:\n//     name: foo-scoped-routes\n//     scope_key_builder:\n//       fragments:\n//         - header_value_extractor:\n//             name: X-Route-Selector\n//             element_separator: ,\n//             element:\n//               separator: =\n//               key: vip\n//\n// ScopedRouteConfiguration resources (specified statically via\n// :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scoped_route_configurations_list>`\n// or obtained dynamically via SRDS):\n//\n// .. code::\n//\n//  (1)\n//   name: route-scope1\n//   route_configuration_name: route-config1\n//   key:\n//      fragments:\n//        - string_key: 172.10.10.20\n//\n//  (2)\n//   name: route-scope2\n//   route_configuration_name: route-config2\n//   key:\n//     fragments:\n//       - string_key: 172.20.20.30\n//\n// A request from a client such as:\n//\n// .. code::\n//\n//     GET / HTTP/1.1\n//     Host: foo.com\n//     X-Route-Selector: vip=172.10.10.20\n//\n// would result in the routing table defined by the `route-config1`\n// RouteConfiguration being assigned to the HTTP request/stream.\n//\nmessage ScopedRouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.route.v3.ScopedRouteConfiguration\";\n\n  // Specifies a key which is matched against the output of the\n  // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scope_key_builder>`\n  // specified in the HttpConnectionManager. The matching is done per HTTP\n  // request and is dependent on the order of the fragments contained in the\n  // Key.\n  message Key {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.route.v3.ScopedRouteConfiguration.Key\";\n\n    message Fragment {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment\";\n\n      oneof type {\n        option (validate.required) = true;\n\n        // A string to match against.\n        string string_key = 1;\n      }\n    }\n\n    // The ordered set of fragments to match against. The order must match the\n    // fragments in the corresponding\n    // :ref:`scope_key_builder<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scope_key_builder>`.\n    repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Whether the RouteConfiguration should be loaded on demand.\n  bool on_demand = 4;\n\n  // The name assigned to the routing scope.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v4alpha.DiscoveryRequest` to an\n  // RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated\n  // with this scope.\n  string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The key to match against.\n  Key key = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/matcher/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/tap/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.tap.v3;\n\nimport \"envoy/config/common/matcher/v3/matcher.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.tap.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common tap configuration]\n\n// Tap configuration.\nmessage TapConfig {\n  // [#comment:TODO(mattklein123): Rate limiting]\n\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.TapConfig\";\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  // Exactly one of :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` and\n  // :ref:`match_config <envoy_api_field_config.tap.v3.TapConfig.match_config>` must be set. If both\n  // are set, the :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` will be used.\n  MatchPredicate match_config = 1 [deprecated = true];\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  // Exactly one of :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` and\n  // :ref:`match_config <envoy_api_field_config.tap.v3.TapConfig.match_config>` must be set. If both\n  // are set, the :ref:`match <envoy_api_field_config.tap.v3.TapConfig.match>` will be used.\n  common.matcher.v3.MatchPredicate match = 4;\n\n  // The tap output configuration. If a match configuration matches a data source being tapped,\n  // a tap will occur and the data will be written to the configured output.\n  OutputConfig output_config = 2 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\\connections for\n  // which the tap matching is enabled. When not enabled, the request\\connection will not be\n  // recorded.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  core.v3.RuntimeFractionalPercent tap_enabled = 3;\n}\n\n// Tap match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.MatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.tap.v2alpha.MatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.HttpHeadersMatch\";\n\n  // HTTP headers to match.\n  repeated route.v3.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  message GenericTextMatch {\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Tap output configuration.\nmessage OutputConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.OutputConfig\";\n\n  // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple\n  // sink types are supported this constraint will be relaxed.\n  repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}];\n\n  // For buffered tapping, the maximum amount of received body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_rx_bytes = 2;\n\n  // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_tx_bytes = 3;\n\n  // Indicates whether taps produce a single buffered message per tap, or multiple streamed\n  // messages per tap in the emitted :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. Note that streamed tapping does not\n  // mean that no buffering takes place. Buffering may be required if data is processed before a\n  // match can be determined. See the HTTP tap filter :ref:`streaming\n  // <config_http_filters_tap_streaming>` documentation for more information.\n  bool streaming = 4;\n}\n\n// Tap output sink configuration.\nmessage OutputSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.OutputSink\";\n\n  // Output format. All output is in the form of one or more :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. This enumeration indicates\n  // how those messages are written. Note that not all sinks support all output formats. See\n  // individual sink documentation for more information.\n  enum Format {\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_bytes\n    // <envoy_api_field_data.tap.v3.Body.as_bytes>` field. This means that body data will be\n    // base64 encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n    JSON_BODY_AS_BYTES = 0;\n\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_string\n    // <envoy_api_field_data.tap.v3.Body.as_string>` field. This means that body data will be\n    // string encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_. This format type is\n    // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the\n    // user wishes to view it directly without being forced to base64 decode the body.\n    JSON_BODY_AS_STRING = 1;\n\n    // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes\n    // multiple binary messages without any length information the data stream will not be\n    // useful. However, for certain sinks that are self-delimiting (e.g., one message per file)\n    // this output format makes consumption simpler.\n    PROTO_BINARY = 2;\n\n    // Messages are written as a sequence tuples, where each tuple is the message length encoded\n    // as a `protobuf 32-bit varint\n    // <https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.coded_stream>`_\n    // followed by the binary message. The messages can be read back using the language specific\n    // protobuf coded stream implementation to obtain the message length and the message.\n    PROTO_BINARY_LENGTH_DELIMITED = 3;\n\n    // Text proto format.\n    PROTO_TEXT = 4;\n  }\n\n  // Sink output format.\n  Format format = 1 [(validate.rules).enum = {defined_only: true}];\n\n  oneof output_sink_type {\n    option (validate.required) = true;\n\n    // Tap output will be streamed out the :http:post:`/tap` admin endpoint.\n    //\n    // .. attention::\n    //\n    //   It is only allowed to specify the streaming admin output sink if the tap is being\n    //   configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has\n    //   been configured to receive tap configuration from some other source (e.g., static\n    //   file, XDS, etc.) configuring the streaming admin output type will fail.\n    StreamingAdminSink streaming_admin = 2;\n\n    // Tap output will be written to a file per tap sink.\n    FilePerTapSink file_per_tap = 3;\n\n    // [#not-implemented-hide:]\n    // GrpcService to stream data to. The format argument must be PROTO_BINARY.\n    // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented]\n    StreamingGrpcSink streaming_grpc = 4;\n  }\n}\n\n// Streaming admin sink configuration.\nmessage StreamingAdminSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamingAdminSink\";\n}\n\n// The file per tap sink outputs a discrete file for every tapped stream.\nmessage FilePerTapSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.FilePerTapSink\";\n\n  // Path prefix. The output file will be of the form <path_prefix>_<id>.pb, where <id> is an\n  // identifier distinguishing the recorded trace for stream instances (the Envoy\n  // connection ID, HTTP stream ID, etc.).\n  string path_prefix = 1 [(validate.rules).string = {min_len: 1}];\n}\n\n// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC\n// server.\nmessage StreamingGrpcSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamingGrpcSink\";\n\n  // Opaque identifier, that will be sent back to the streaming grpc server.\n  string tap_id = 1;\n\n  // The gRPC server that hosts the Tap Sink Service.\n  core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/matcher/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/tap/v4alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.tap.v4alpha;\n\nimport \"envoy/config/common/matcher/v4alpha/matcher.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.tap.v4alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common tap configuration]\n\n// Tap configuration.\nmessage TapConfig {\n  // [#comment:TODO(mattklein123): Rate limiting]\n\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.TapConfig\";\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  // Exactly one of :ref:`match <envoy_api_field_config.tap.v4alpha.TapConfig.match>` and\n  // :ref:`match_config <envoy_api_field_config.tap.v4alpha.TapConfig.match_config>` must be set. If both\n  // are set, the :ref:`match <envoy_api_field_config.tap.v4alpha.TapConfig.match>` will be used.\n  MatchPredicate hidden_envoy_deprecated_match_config = 1 [deprecated = true];\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  // Exactly one of :ref:`match <envoy_api_field_config.tap.v4alpha.TapConfig.match>` and\n  // :ref:`match_config <envoy_api_field_config.tap.v4alpha.TapConfig.match_config>` must be set. If both\n  // are set, the :ref:`match <envoy_api_field_config.tap.v4alpha.TapConfig.match>` will be used.\n  common.matcher.v4alpha.MatchPredicate match = 4;\n\n  // The tap output configuration. If a match configuration matches a data source being tapped,\n  // a tap will occur and the data will be written to the configured output.\n  OutputConfig output_config = 2 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\\connections for\n  // which the tap matching is enabled. When not enabled, the request\\connection will not be\n  // recorded.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  core.v4alpha.RuntimeFractionalPercent tap_enabled = 3;\n}\n\n// Tap match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 11]\nmessage MatchPredicate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.MatchPredicate\";\n\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.tap.v3.MatchPredicate.MatchSet\";\n\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n\n    // HTTP request generic body match configuration.\n    HttpGenericBodyMatch http_request_generic_body_match = 9;\n\n    // HTTP response generic body match configuration.\n    HttpGenericBodyMatch http_response_generic_body_match = 10;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.HttpHeadersMatch\";\n\n  // HTTP headers to match.\n  repeated route.v4alpha.HeaderMatcher headers = 1;\n}\n\n// HTTP generic body match configuration.\n// List of text strings and hex strings to be located in HTTP body.\n// All specified strings must be found in the HTTP body for positive match.\n// The search may be limited to specified number of bytes from the body start.\n//\n// .. attention::\n//\n//   Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match.\n//   If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified\n//   to scan only part of the http body.\nmessage HttpGenericBodyMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.HttpGenericBodyMatch\";\n\n  message GenericTextMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch\";\n\n    oneof rule {\n      option (validate.required) = true;\n\n      // Text string to be located in HTTP body.\n      string string_match = 1 [(validate.rules).string = {min_len: 1}];\n\n      // Sequence of bytes to be located in HTTP body.\n      bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}];\n    }\n  }\n\n  // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer).\n  uint32 bytes_limit = 1;\n\n  // List of patterns to match.\n  repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Tap output configuration.\nmessage OutputConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.OutputConfig\";\n\n  // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple\n  // sink types are supported this constraint will be relaxed.\n  repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}];\n\n  // For buffered tapping, the maximum amount of received body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_rx_bytes = 2;\n\n  // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v3.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_tx_bytes = 3;\n\n  // Indicates whether taps produce a single buffered message per tap, or multiple streamed\n  // messages per tap in the emitted :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. Note that streamed tapping does not\n  // mean that no buffering takes place. Buffering may be required if data is processed before a\n  // match can be determined. See the HTTP tap filter :ref:`streaming\n  // <config_http_filters_tap_streaming>` documentation for more information.\n  bool streaming = 4;\n}\n\n// Tap output sink configuration.\nmessage OutputSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.OutputSink\";\n\n  // Output format. All output is in the form of one or more :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v3.TraceWrapper>` messages. This enumeration indicates\n  // how those messages are written. Note that not all sinks support all output formats. See\n  // individual sink documentation for more information.\n  enum Format {\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_bytes\n    // <envoy_api_field_data.tap.v3.Body.as_bytes>` field. This means that body data will be\n    // base64 encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n    JSON_BODY_AS_BYTES = 0;\n\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v3.Body>`\n    // data will be present in the :ref:`as_string\n    // <envoy_api_field_data.tap.v3.Body.as_string>` field. This means that body data will be\n    // string encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_. This format type is\n    // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the\n    // user wishes to view it directly without being forced to base64 decode the body.\n    JSON_BODY_AS_STRING = 1;\n\n    // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes\n    // multiple binary messages without any length information the data stream will not be\n    // useful. However, for certain sinks that are self-delimiting (e.g., one message per file)\n    // this output format makes consumption simpler.\n    PROTO_BINARY = 2;\n\n    // Messages are written as a sequence tuples, where each tuple is the message length encoded\n    // as a `protobuf 32-bit varint\n    // <https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.coded_stream>`_\n    // followed by the binary message. The messages can be read back using the language specific\n    // protobuf coded stream implementation to obtain the message length and the message.\n    PROTO_BINARY_LENGTH_DELIMITED = 3;\n\n    // Text proto format.\n    PROTO_TEXT = 4;\n  }\n\n  // Sink output format.\n  Format format = 1 [(validate.rules).enum = {defined_only: true}];\n\n  oneof output_sink_type {\n    option (validate.required) = true;\n\n    // Tap output will be streamed out the :http:post:`/tap` admin endpoint.\n    //\n    // .. attention::\n    //\n    //   It is only allowed to specify the streaming admin output sink if the tap is being\n    //   configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has\n    //   been configured to receive tap configuration from some other source (e.g., static\n    //   file, XDS, etc.) configuring the streaming admin output type will fail.\n    StreamingAdminSink streaming_admin = 2;\n\n    // Tap output will be written to a file per tap sink.\n    FilePerTapSink file_per_tap = 3;\n\n    // [#not-implemented-hide:]\n    // GrpcService to stream data to. The format argument must be PROTO_BINARY.\n    // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented]\n    StreamingGrpcSink streaming_grpc = 4;\n  }\n}\n\n// Streaming admin sink configuration.\nmessage StreamingAdminSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.StreamingAdminSink\";\n}\n\n// The file per tap sink outputs a discrete file for every tapped stream.\nmessage FilePerTapSink {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.tap.v3.FilePerTapSink\";\n\n  // Path prefix. The output file will be of the form <path_prefix>_<id>.pb, where <id> is an\n  // identifier distinguishing the recorded trace for stream instances (the Envoy\n  // connection ID, HTTP stream ID, etc.).\n  string path_prefix = 1 [(validate.rules).string = {min_len: 1}];\n}\n\n// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC\n// server.\nmessage StreamingGrpcSink {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.tap.v3.StreamingGrpcSink\";\n\n  // Opaque identifier, that will be sent back to the streaming grpc server.\n  string tap_id = 1;\n\n  // The gRPC server that hosts the Tap Sink Service.\n  core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/datadog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"DatadogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Datadog tracer]\n\n// Configuration for the Datadog tracer.\n// [#extension: envoy.tracers.datadog]\nmessage DatadogConfig {\n  // The cluster to use for submitting traces to the Datadog agent.\n  string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The name used for the service when traces are generated by envoy.\n  string service_name = 2 [(validate.rules).string = {min_bytes: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"DynamicOtProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Dynamically loadable OpenTracing tracer]\n\n// DynamicOtConfig is used to dynamically load a tracer from a shared library\n// that implements the `OpenTracing dynamic loading API\n// <https://github.com/opentracing/opentracing-cpp>`_.\n// [#extension: envoy.tracers.dynamic_ot]\nmessage DynamicOtConfig {\n  // Dynamic library implementing the `OpenTracing API\n  // <https://github.com/opentracing/opentracing-cpp>`_.\n  string library = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The configuration to use when creating a tracer from the given dynamic\n  // library.\n  google.protobuf.Struct config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/http_tracer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"HttpTracerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tracing]\n// Tracing :ref:`architecture overview <arch_overview_tracing>`.\n\n// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy.\n//\n// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one\n// supported.\n//\n// .. attention::\n//\n//   Use of this message type has been deprecated in favor of direct use of\n//   :ref:`Tracing.Http <envoy_api_msg_config.trace.v2.Tracing.Http>`.\nmessage Tracing {\n  // Configuration for an HTTP tracer provider used by Envoy.\n  //\n  // The configuration is defined by the\n  // :ref:`HttpConnectionManager.Tracing <envoy_api_msg_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing>`\n  // :ref:`provider <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing.provider>`\n  // field.\n  message Http {\n    // The name of the HTTP trace driver to instantiate. The name must match a\n    // supported HTTP trace driver. Built-in trace drivers:\n    //\n    // - *envoy.tracers.lightstep*\n    // - *envoy.tracers.zipkin*\n    // - *envoy.tracers.dynamic_ot*\n    // - *envoy.tracers.datadog*\n    // - *envoy.tracers.opencensus*\n    // - *envoy.tracers.xray*\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // Trace driver specific configuration which depends on the driver being instantiated.\n    // See the trace drivers for examples:\n    //\n    // - :ref:`LightstepConfig <envoy_api_msg_config.trace.v2.LightstepConfig>`\n    // - :ref:`ZipkinConfig <envoy_api_msg_config.trace.v2.ZipkinConfig>`\n    // - :ref:`DynamicOtConfig <envoy_api_msg_config.trace.v2.DynamicOtConfig>`\n    // - :ref:`DatadogConfig <envoy_api_msg_config.trace.v2.DatadogConfig>`\n    // - :ref:`OpenCensusConfig <envoy_api_msg_config.trace.v2.OpenCensusConfig>`\n    // - :ref:`AWS X-Ray <envoy_api_msg_config.trace.v2alpha.XRayConfig>`\n    oneof config_type {\n      google.protobuf.Struct config = 2 [deprecated = true];\n\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Provides configuration for the HTTP tracer.\n  Http http = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/lightstep.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"LightstepProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: LightStep tracer]\n\n// Configuration for the LightStep tracer.\n// [#extension: envoy.tracers.lightstep]\nmessage LightstepConfig {\n  // Available propagation modes\n  enum PropagationMode {\n    // Propagate trace context in the single header x-ot-span-context.\n    ENVOY = 0;\n\n    // Propagate trace context using LightStep's native format.\n    LIGHTSTEP = 1;\n\n    // Propagate trace context using the b3 format.\n    B3 = 2;\n\n    // Propagation trace context using the w3 trace-context standard.\n    TRACE_CONTEXT = 3;\n  }\n\n  // The cluster manager cluster that hosts the LightStep collectors.\n  string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // File containing the access token to the `LightStep\n  // <https://lightstep.com/>`_ API.\n  string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Propagation modes to use by LightStep's tracer.\n  repeated PropagationMode propagation_modes = 3\n      [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/opencensus.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"opencensus/proto/trace/v1/trace_config.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"OpencensusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: OpenCensus tracer]\n\n// Configuration for the OpenCensus tracer.\n// [#next-free-field: 15]\n// [#extension: envoy.tracers.opencensus]\nmessage OpenCensusConfig {\n  enum TraceContext {\n    // No-op default, no trace context is utilized.\n    NONE = 0;\n\n    // W3C Trace-Context format \"traceparent:\" header.\n    TRACE_CONTEXT = 1;\n\n    // Binary \"grpc-trace-bin:\" header.\n    GRPC_TRACE_BIN = 2;\n\n    // \"X-Cloud-Trace-Context:\" header.\n    CLOUD_TRACE_CONTEXT = 3;\n\n    // X-B3-* headers.\n    B3 = 4;\n  }\n\n  reserved 7;\n\n  // Configures tracing, e.g. the sampler, max number of annotations, etc.\n  opencensus.proto.trace.v1.TraceConfig trace_config = 1;\n\n  // Enables the stdout exporter if set to true. This is intended for debugging\n  // purposes.\n  bool stdout_exporter_enabled = 2;\n\n  // Enables the Stackdriver exporter if set to true. The project_id must also\n  // be set.\n  bool stackdriver_exporter_enabled = 3;\n\n  // The Cloud project_id to use for Stackdriver tracing.\n  string stackdriver_project_id = 4;\n\n  // (optional) By default, the Stackdriver exporter will connect to production\n  // Stackdriver. If stackdriver_address is non-empty, it will instead connect\n  // to this address, which is in the gRPC format:\n  // https://github.com/grpc/grpc/blob/master/doc/naming.md\n  string stackdriver_address = 10;\n\n  // (optional) The gRPC server that hosts Stackdriver tracing service. Only\n  // Google gRPC is supported. If :ref:`target_uri <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.target_uri>`\n  // is not provided, the default production Stackdriver address will be used.\n  api.v2.core.GrpcService stackdriver_grpc_service = 13;\n\n  // Enables the Zipkin exporter if set to true. The url and service name must\n  // also be set.\n  bool zipkin_exporter_enabled = 5;\n\n  // The URL to Zipkin, e.g. \"http://127.0.0.1:9411/api/v2/spans\"\n  string zipkin_url = 6;\n\n  // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or\n  // ocagent_grpc_service must also be set.\n  bool ocagent_exporter_enabled = 11;\n\n  // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC\n  // format: https://github.com/grpc/grpc/blob/master/doc/naming.md\n  // [#comment:TODO: deprecate this field]\n  string ocagent_address = 12;\n\n  // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported.\n  // This is only used if the ocagent_address is left empty.\n  api.v2.core.GrpcService ocagent_grpc_service = 14;\n\n  // List of incoming trace context headers we will accept. First one found\n  // wins.\n  repeated TraceContext incoming_trace_context = 8;\n\n  // List of outgoing trace context headers we will produce.\n  repeated TraceContext outgoing_trace_context = 9;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"envoy/api/v2/core/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"ServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Trace Service]\n\n// Configuration structure.\nmessage TraceServiceConfig {\n  // The upstream gRPC cluster that hosts the metrics service.\n  api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/trace.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/config/trace/v2/datadog.proto\";\nimport public \"envoy/config/trace/v2/dynamic_ot.proto\";\nimport public \"envoy/config/trace/v2/http_tracer.proto\";\nimport public \"envoy/config/trace/v2/lightstep.proto\";\nimport public \"envoy/config/trace/v2/opencensus.proto\";\nimport public \"envoy/config/trace/v2/service.proto\";\nimport public \"envoy/config/trace/v2/zipkin.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"TraceProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2/zipkin.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2\";\noption java_outer_classname = \"ZipkinProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Zipkin tracer]\n\n// Configuration for the Zipkin tracer.\n// [#extension: envoy.tracers.zipkin]\n// [#next-free-field: 6]\nmessage ZipkinConfig {\n  // Available Zipkin collector endpoint versions.\n  enum CollectorEndpointVersion {\n    // Zipkin API v1, JSON over HTTP.\n    // [#comment: The default implementation of Zipkin client before this field is added was only v1\n    // and the way user configure this was by not explicitly specifying the version. Consequently,\n    // before this is added, the corresponding Zipkin collector expected to receive v1 payload.\n    // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when\n    // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field,\n    // since in Zipkin realm this v1 version is considered to be not preferable anymore.]\n    HTTP_JSON_V1 = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Zipkin API v2, JSON over HTTP.\n    HTTP_JSON = 1;\n\n    // Zipkin API v2, protobuf over HTTP.\n    HTTP_PROTO = 2;\n\n    // [#not-implemented-hide:]\n    GRPC = 3;\n  }\n\n  // The cluster manager cluster that hosts the Zipkin collectors. Note that the\n  // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster\n  // resources <envoy_api_field_config.bootstrap.v2.Bootstrap.StaticResources.clusters>`.\n  string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The API endpoint of the Zipkin service where the spans will be sent. When\n  // using a standard Zipkin installation, the API endpoint is typically\n  // /api/v1/spans, which is the default value.\n  string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}];\n\n  // Determines whether a 128bit trace id will be used when creating a new\n  // trace instance. The default value is false, which will result in a 64 bit trace id being used.\n  bool trace_id_128bit = 3;\n\n  // Determines whether client and server spans will share the same span context.\n  // The default value is true.\n  google.protobuf.BoolValue shared_span_context = 4;\n\n  // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be\n  // used.\n  CollectorEndpointVersion collector_endpoint_version = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v2alpha/xray.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v2alpha\";\noption java_outer_classname = \"XrayProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: AWS X-Ray Tracer Configuration]\n// Configuration for AWS X-Ray tracer\n\nmessage XRayConfig {\n  // The UDP endpoint of the X-Ray Daemon where the spans will be sent.\n  // If this value is not set, the default value of 127.0.0.1:2000 will be used.\n  api.v2.core.SocketAddress daemon_endpoint = 1;\n\n  // The name of the X-Ray segment.\n  string segment_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The location of a local custom sampling rules JSON file.\n  // For an example of the sampling rules see:\n  // `X-Ray SDK documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-sampling>`_\n  api.v2.core.DataSource sampling_rule_manifest = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/trace/v2:pkg\",\n        \"//envoy/config/trace/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/datadog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"DatadogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.tracers.datadog.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Datadog tracer]\n\n// Configuration for the Datadog tracer.\n// [#extension: envoy.tracers.datadog]\nmessage DatadogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.DatadogConfig\";\n\n  // The cluster to use for submitting traces to the Datadog agent.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The name used for the service when traces are generated by envoy.\n  string service_name = 2 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"DynamicOtProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.tracers.dynamic_ot.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamically loadable OpenTracing tracer]\n\n// DynamicOtConfig is used to dynamically load a tracer from a shared library\n// that implements the `OpenTracing dynamic loading API\n// <https://github.com/opentracing/opentracing-cpp>`_.\n// [#extension: envoy.tracers.dynamic_ot]\nmessage DynamicOtConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.DynamicOtConfig\";\n\n  // Dynamic library implementing the `OpenTracing API\n  // <https://github.com/opentracing/opentracing-cpp>`_.\n  string library = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The configuration to use when creating a tracer from the given dynamic\n  // library.\n  google.protobuf.Struct config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/http_tracer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"HttpTracerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tracing]\n// Tracing :ref:`architecture overview <arch_overview_tracing>`.\n\n// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy.\n//\n// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one\n// supported.\n//\n// .. attention::\n//\n//   Use of this message type has been deprecated in favor of direct use of\n//   :ref:`Tracing.Http <envoy_api_msg_config.trace.v3.Tracing.Http>`.\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v2.Tracing\";\n\n  // Configuration for an HTTP tracer provider used by Envoy.\n  //\n  // The configuration is defined by the\n  // :ref:`HttpConnectionManager.Tracing <envoy_api_msg_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing>`\n  // :ref:`provider <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider>`\n  // field.\n  message Http {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.trace.v2.Tracing.Http\";\n\n    // The name of the HTTP trace driver to instantiate. The name must match a\n    // supported HTTP trace driver. Built-in trace drivers:\n    //\n    // - *envoy.tracers.lightstep*\n    // - *envoy.tracers.zipkin*\n    // - *envoy.tracers.dynamic_ot*\n    // - *envoy.tracers.datadog*\n    // - *envoy.tracers.opencensus*\n    // - *envoy.tracers.xray*\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Trace driver specific configuration which depends on the driver being instantiated.\n    // See the trace drivers for examples:\n    //\n    // - :ref:`LightstepConfig <envoy_api_msg_config.trace.v3.LightstepConfig>`\n    // - :ref:`ZipkinConfig <envoy_api_msg_config.trace.v3.ZipkinConfig>`\n    // - :ref:`DynamicOtConfig <envoy_api_msg_config.trace.v3.DynamicOtConfig>`\n    // - :ref:`DatadogConfig <envoy_api_msg_config.trace.v3.DatadogConfig>`\n    // - :ref:`OpenCensusConfig <envoy_api_msg_config.trace.v3.OpenCensusConfig>`\n    // - :ref:`AWS X-Ray <envoy_api_msg_config.trace.v3.XRayConfig>`\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n\n      google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n    }\n  }\n\n  // Provides configuration for the HTTP tracer.\n  Http http = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/lightstep.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"LightstepProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.tracers.lightstep.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: LightStep tracer]\n\n// Configuration for the LightStep tracer.\n// [#extension: envoy.tracers.lightstep]\nmessage LightstepConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.LightstepConfig\";\n\n  // Available propagation modes\n  enum PropagationMode {\n    // Propagate trace context in the single header x-ot-span-context.\n    ENVOY = 0;\n\n    // Propagate trace context using LightStep's native format.\n    LIGHTSTEP = 1;\n\n    // Propagate trace context using the b3 format.\n    B3 = 2;\n\n    // Propagation trace context using the w3 trace-context standard.\n    TRACE_CONTEXT = 3;\n  }\n\n  // The cluster manager cluster that hosts the LightStep collectors.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // File containing the access token to the `LightStep\n  // <https://lightstep.com/>`_ API.\n  string access_token_file = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Propagation modes to use by LightStep's tracer.\n  repeated PropagationMode propagation_modes = 3\n      [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/opencensus.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"opencensus/proto/trace/v1/trace_config.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"OpencensusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.tracers.opencensus.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: OpenCensus tracer]\n\n// Configuration for the OpenCensus tracer.\n// [#next-free-field: 15]\n// [#extension: envoy.tracers.opencensus]\nmessage OpenCensusConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.OpenCensusConfig\";\n\n  enum TraceContext {\n    // No-op default, no trace context is utilized.\n    NONE = 0;\n\n    // W3C Trace-Context format \"traceparent:\" header.\n    TRACE_CONTEXT = 1;\n\n    // Binary \"grpc-trace-bin:\" header.\n    GRPC_TRACE_BIN = 2;\n\n    // \"X-Cloud-Trace-Context:\" header.\n    CLOUD_TRACE_CONTEXT = 3;\n\n    // X-B3-* headers.\n    B3 = 4;\n  }\n\n  reserved 7;\n\n  // Configures tracing, e.g. the sampler, max number of annotations, etc.\n  opencensus.proto.trace.v1.TraceConfig trace_config = 1;\n\n  // Enables the stdout exporter if set to true. This is intended for debugging\n  // purposes.\n  bool stdout_exporter_enabled = 2;\n\n  // Enables the Stackdriver exporter if set to true. The project_id must also\n  // be set.\n  bool stackdriver_exporter_enabled = 3;\n\n  // The Cloud project_id to use for Stackdriver tracing.\n  string stackdriver_project_id = 4;\n\n  // (optional) By default, the Stackdriver exporter will connect to production\n  // Stackdriver. If stackdriver_address is non-empty, it will instead connect\n  // to this address, which is in the gRPC format:\n  // https://github.com/grpc/grpc/blob/master/doc/naming.md\n  string stackdriver_address = 10;\n\n  // (optional) The gRPC server that hosts Stackdriver tracing service. Only\n  // Google gRPC is supported. If :ref:`target_uri <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.target_uri>`\n  // is not provided, the default production Stackdriver address will be used.\n  core.v3.GrpcService stackdriver_grpc_service = 13;\n\n  // Enables the Zipkin exporter if set to true. The url and service name must\n  // also be set.\n  bool zipkin_exporter_enabled = 5;\n\n  // The URL to Zipkin, e.g. \"http://127.0.0.1:9411/api/v2/spans\"\n  string zipkin_url = 6;\n\n  // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or\n  // ocagent_grpc_service must also be set.\n  bool ocagent_exporter_enabled = 11;\n\n  // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC\n  // format: https://github.com/grpc/grpc/blob/master/doc/naming.md\n  // [#comment:TODO: deprecate this field]\n  string ocagent_address = 12;\n\n  // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported.\n  // This is only used if the ocagent_address is left empty.\n  core.v3.GrpcService ocagent_grpc_service = 14;\n\n  // List of incoming trace context headers we will accept. First one found\n  // wins.\n  repeated TraceContext incoming_trace_context = 8;\n\n  // List of outgoing trace context headers we will produce.\n  repeated TraceContext outgoing_trace_context = 9;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"ServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Trace Service]\n\n// Configuration structure.\nmessage TraceServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2.TraceServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/trace.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/config/trace/v3/datadog.proto\";\nimport public \"envoy/config/trace/v3/dynamic_ot.proto\";\nimport public \"envoy/config/trace/v3/http_tracer.proto\";\nimport public \"envoy/config/trace/v3/lightstep.proto\";\nimport public \"envoy/config/trace/v3/opencensus.proto\";\nimport public \"envoy/config/trace/v3/service.proto\";\nimport public \"envoy/config/trace/v3/zipkin.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"TraceProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/xray.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"XrayProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.tracers.xray.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: AWS X-Ray Tracer Configuration]\n// Configuration for AWS X-Ray tracer\n\nmessage XRayConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v2alpha.XRayConfig\";\n\n  message SegmentFields {\n    // The type of AWS resource, e.g. \"AWS::AppMesh::Proxy\".\n    string origin = 1;\n\n    // AWS resource metadata dictionary.\n    // See: `X-Ray Segment Document documentation <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-aws>`__\n    google.protobuf.Struct aws = 2;\n  }\n\n  // The UDP endpoint of the X-Ray Daemon where the spans will be sent.\n  // If this value is not set, the default value of 127.0.0.1:2000 will be used.\n  core.v3.SocketAddress daemon_endpoint = 1;\n\n  // The name of the X-Ray segment.\n  string segment_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The location of a local custom sampling rules JSON file.\n  // For an example of the sampling rules see:\n  // `X-Ray SDK documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-sampling>`_\n  core.v3.DataSource sampling_rule_manifest = 3;\n\n  // Optional custom fields to be added to each trace segment.\n  // see: `X-Ray Segment Document documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html>`__\n  SegmentFields segment_fields = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v3/zipkin.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v3\";\noption java_outer_classname = \"ZipkinProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.extensions.tracers.zipkin.v4alpha\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Zipkin tracer]\n\n// Configuration for the Zipkin tracer.\n// [#extension: envoy.tracers.zipkin]\n// [#next-free-field: 6]\nmessage ZipkinConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v2.ZipkinConfig\";\n\n  // Available Zipkin collector endpoint versions.\n  enum CollectorEndpointVersion {\n    // Zipkin API v1, JSON over HTTP.\n    // [#comment: The default implementation of Zipkin client before this field is added was only v1\n    // and the way user configure this was by not explicitly specifying the version. Consequently,\n    // before this is added, the corresponding Zipkin collector expected to receive v1 payload.\n    // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when\n    // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field,\n    // since in Zipkin realm this v1 version is considered to be not preferable anymore.]\n    hidden_envoy_deprecated_HTTP_JSON_V1 = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Zipkin API v2, JSON over HTTP.\n    HTTP_JSON = 1;\n\n    // Zipkin API v2, protobuf over HTTP.\n    HTTP_PROTO = 2;\n\n    // [#not-implemented-hide:]\n    GRPC = 3;\n  }\n\n  // The cluster manager cluster that hosts the Zipkin collectors. Note that the\n  // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster\n  // resources <envoy_api_field_config.bootstrap.v3.Bootstrap.StaticResources.clusters>`.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The API endpoint of the Zipkin service where the spans will be sent. When\n  // using a standard Zipkin installation, the API endpoint is typically\n  // /api/v1/spans, which is the default value.\n  string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Determines whether a 128bit trace id will be used when creating a new\n  // trace instance. The default value is false, which will result in a 64 bit trace id being used.\n  bool trace_id_128bit = 3;\n\n  // Determines whether client and server spans will share the same span context.\n  // The default value is true.\n  google.protobuf.BoolValue shared_span_context = 4;\n\n  // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be\n  // used.\n  CollectorEndpointVersion collector_endpoint_version = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v4alpha;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v4alpha\";\noption java_outer_classname = \"HttpTracerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tracing]\n// Tracing :ref:`architecture overview <arch_overview_tracing>`.\n\n// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy.\n//\n// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one\n// supported.\n//\n// .. attention::\n//\n//   Use of this message type has been deprecated in favor of direct use of\n//   :ref:`Tracing.Http <envoy_api_msg_config.trace.v4alpha.Tracing.Http>`.\nmessage Tracing {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v3.Tracing\";\n\n  // Configuration for an HTTP tracer provider used by Envoy.\n  //\n  // The configuration is defined by the\n  // :ref:`HttpConnectionManager.Tracing <envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.Tracing>`\n  // :ref:`provider <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.Tracing.provider>`\n  // field.\n  message Http {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.trace.v3.Tracing.Http\";\n\n    reserved 2;\n\n    reserved \"config\";\n\n    // The name of the HTTP trace driver to instantiate. The name must match a\n    // supported HTTP trace driver. Built-in trace drivers:\n    //\n    // - *envoy.tracers.lightstep*\n    // - *envoy.tracers.zipkin*\n    // - *envoy.tracers.dynamic_ot*\n    // - *envoy.tracers.datadog*\n    // - *envoy.tracers.opencensus*\n    // - *envoy.tracers.xray*\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Trace driver specific configuration which depends on the driver being instantiated.\n    // See the trace drivers for examples:\n    //\n    // - :ref:`LightstepConfig <envoy_api_msg_extensions.tracers.lightstep.v4alpha.LightstepConfig>`\n    // - :ref:`ZipkinConfig <envoy_api_msg_extensions.tracers.zipkin.v4alpha.ZipkinConfig>`\n    // - :ref:`DynamicOtConfig <envoy_api_msg_extensions.tracers.dynamic_ot.v4alpha.DynamicOtConfig>`\n    // - :ref:`DatadogConfig <envoy_api_msg_extensions.tracers.datadog.v4alpha.DatadogConfig>`\n    // - :ref:`OpenCensusConfig <envoy_api_msg_extensions.tracers.opencensus.v4alpha.OpenCensusConfig>`\n    // - :ref:`AWS X-Ray <envoy_api_msg_extensions.tracers.xray.v4alpha.XRayConfig>`\n    oneof config_type {\n      google.protobuf.Any typed_config = 3;\n    }\n  }\n\n  // Provides configuration for the HTTP tracer.\n  Http http = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/trace/v4alpha/service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.trace.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.trace.v4alpha\";\noption java_outer_classname = \"ServiceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Trace Service]\n\n// Configuration structure.\nmessage TraceServiceConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.TraceServiceConfig\";\n\n  // The upstream gRPC cluster that hosts the metrics service.\n  core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.transport_socket.alts.v2alpha;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.transport_socket.alts.v2alpha\";\noption java_outer_classname = \"AltsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.alts.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: ALTS]\n// [#extension: envoy.transport_sockets.alts]\n\n// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy.\n// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/\nmessage Alts {\n  // The location of a handshaker service, this is usually 169.254.169.254:8080\n  // on GCE.\n  string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The acceptable service accounts from peer, peers not in the list will be rejected in the\n  // handshake validation step. If empty, no validation will be performed.\n  repeated string peer_service_accounts = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.transport_socket.raw_buffer.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.transport_socket.raw_buffer.v2\";\noption java_outer_classname = \"RawBufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.raw_buffer.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Raw Buffer]\n// [#extension: envoy.transport_sockets.raw_buffer]\n\n// Configuration for raw buffer transport socket.\nmessage RawBuffer {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.config.transport_socket.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/config/common/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.config.transport_socket.tap.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package =\n    \"envoy.extensions.transport_sockets.tap.v3\";\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap]\n// [#extension: envoy.transport_sockets.tap]\n\n// Configuration for tap transport socket. This wraps another transport socket, providing the\n// ability to interpose and record in plain text any traffic that is surfaced to Envoy.\nmessage Tap {\n  // Common configuration for the tap transport socket.\n  common.tap.v2alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // The underlying transport socket being wrapped.\n  api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.accesslog.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.accesslog.v2\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC access logs]\n// Envoy access logs describe incoming interaction with Envoy over a fixed\n// period of time, and typically cover a single request/response exchange,\n// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP).\n// Access logs contain fields defined in protocol-specific protobuf messages.\n//\n// Except where explicitly declared otherwise, all fields describe\n// *downstream* interaction between Envoy and a connected client.\n// Fields describing *upstream* interaction will explicitly include ``upstream``\n// in their name.\n\nmessage TCPAccessLogEntry {\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  // Properties of the TCP connection.\n  ConnectionProperties connection_properties = 2;\n}\n\nmessage HTTPAccessLogEntry {\n  // HTTP version\n  enum HTTPVersion {\n    PROTOCOL_UNSPECIFIED = 0;\n    HTTP10 = 1;\n    HTTP11 = 2;\n    HTTP2 = 3;\n    HTTP3 = 4;\n  }\n\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  HTTPVersion protocol_version = 2;\n\n  // Description of the incoming HTTP request.\n  HTTPRequestProperties request = 3;\n\n  // Description of the outgoing HTTP response.\n  HTTPResponseProperties response = 4;\n}\n\n// Defines fields for a connection\nmessage ConnectionProperties {\n  // Number of bytes received from downstream.\n  uint64 received_bytes = 1;\n\n  // Number of bytes sent to downstream.\n  uint64 sent_bytes = 2;\n}\n\n// Defines fields that are shared by all Envoy access logs.\n// [#next-free-field: 22]\nmessage AccessLogCommon {\n  // [#not-implemented-hide:]\n  // This field indicates the rate at which this log entry was sampled.\n  // Valid range is (0.0, 1.0].\n  double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}];\n\n  // This field is the remote/origin address on which the request from the user was received.\n  // Note: This may not be the physical peer. E.g, if the remote address is inferred from for\n  // example the x-forwarder-for header, proxy protocol, etc.\n  api.v2.core.Address downstream_remote_address = 2;\n\n  // This field is the local/destination address on which the request from the user was received.\n  api.v2.core.Address downstream_local_address = 3;\n\n  // If the connection is secure,S this field will contain TLS properties.\n  TLSProperties tls_properties = 4;\n\n  // The time that Envoy started servicing this request. This is effectively the time that the first\n  // downstream byte is received.\n  google.protobuf.Timestamp start_time = 5;\n\n  // Interval between the first downstream byte received and the last\n  // downstream byte received (i.e. time it takes to receive a request).\n  google.protobuf.Duration time_to_last_rx_byte = 6;\n\n  // Interval between the first downstream byte received and the first upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_upstream_tx_byte = 7;\n\n  // Interval between the first downstream byte received and the last upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_last_upstream_tx_byte = 8;\n\n  // Interval between the first downstream byte received and the first upstream\n  // byte received (i.e. time it takes to start receiving a response).\n  google.protobuf.Duration time_to_first_upstream_rx_byte = 9;\n\n  // Interval between the first downstream byte received and the last upstream\n  // byte received (i.e. time it takes to receive a complete response).\n  google.protobuf.Duration time_to_last_upstream_rx_byte = 10;\n\n  // Interval between the first downstream byte received and the first downstream byte sent.\n  // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field\n  // due to filters. Additionally, the same caveats apply as documented in\n  // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_downstream_tx_byte = 11;\n\n  // Interval between the first downstream byte received and the last downstream byte sent.\n  // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta\n  // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate\n  // time. In the current implementation it does not include kernel socket buffer time. In the\n  // current implementation it also does not include send window buffering inside the HTTP/2 codec.\n  // In the future it is likely that work will be done to make this duration more accurate.\n  google.protobuf.Duration time_to_last_downstream_tx_byte = 12;\n\n  // The upstream remote/destination address that handles this exchange. This does not include\n  // retries.\n  api.v2.core.Address upstream_remote_address = 13;\n\n  // The upstream local/origin address that handles this exchange. This does not include retries.\n  api.v2.core.Address upstream_local_address = 14;\n\n  // The upstream cluster that *upstream_remote_address* belongs to.\n  string upstream_cluster = 15;\n\n  // Flags indicating occurrences during request/response processing.\n  ResponseFlags response_flags = 16;\n\n  // All metadata encountered during request processing, including endpoint\n  // selection.\n  //\n  // This can be used to associate IDs attached to the various configurations\n  // used to process this request with the access log entry. For example, a\n  // route created from a higher level forwarding rule with some ID can place\n  // that ID in this field and cross reference later. It can also be used to\n  // determine if a canary endpoint was used or not.\n  api.v2.core.Metadata metadata = 17;\n\n  // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the\n  // failure reason from the transport socket. The format of this field depends on the configured\n  // upstream transport socket. Common TLS failures are in\n  // :ref:`TLS trouble shooting <arch_overview_ssl_trouble_shooting>`.\n  string upstream_transport_failure_reason = 18;\n\n  // The name of the route\n  string route_name = 19;\n\n  // This field is the downstream direct remote address on which the request from the user was\n  // received. Note: This is always the physical peer, even if the remote address is inferred from\n  // for example the x-forwarder-for header, proxy protocol, etc.\n  api.v2.core.Address downstream_direct_remote_address = 20;\n\n  // Map of filter state in stream info that have been configured to be logged. If the filter\n  // state serialized to any message other than `google.protobuf.Any` it will be packed into\n  // `google.protobuf.Any`.\n  map<string, google.protobuf.Any> filter_state_objects = 21;\n}\n\n// Flags indicating occurrences during request/response processing.\n// [#next-free-field: 20]\nmessage ResponseFlags {\n  message Unauthorized {\n    // Reasons why the request was unauthorized\n    enum Reason {\n      REASON_UNSPECIFIED = 0;\n\n      // The request was denied by the external authorization service.\n      EXTERNAL_SERVICE = 1;\n    }\n\n    Reason reason = 1;\n  }\n\n  // Indicates local server healthcheck failed.\n  bool failed_local_healthcheck = 1;\n\n  // Indicates there was no healthy upstream.\n  bool no_healthy_upstream = 2;\n\n  // Indicates an there was an upstream request timeout.\n  bool upstream_request_timeout = 3;\n\n  // Indicates local codec level reset was sent on the stream.\n  bool local_reset = 4;\n\n  // Indicates remote codec level reset was received on the stream.\n  bool upstream_remote_reset = 5;\n\n  // Indicates there was a local reset by a connection pool due to an initial connection failure.\n  bool upstream_connection_failure = 6;\n\n  // Indicates the stream was reset due to an upstream connection termination.\n  bool upstream_connection_termination = 7;\n\n  // Indicates the stream was reset because of a resource overflow.\n  bool upstream_overflow = 8;\n\n  // Indicates no route was found for the request.\n  bool no_route_found = 9;\n\n  // Indicates that the request was delayed before proxying.\n  bool delay_injected = 10;\n\n  // Indicates that the request was aborted with an injected error code.\n  bool fault_injected = 11;\n\n  // Indicates that the request was rate-limited locally.\n  bool rate_limited = 12;\n\n  // Indicates if the request was deemed unauthorized and the reason for it.\n  Unauthorized unauthorized_details = 13;\n\n  // Indicates that the request was rejected because there was an error in rate limit service.\n  bool rate_limit_service_error = 14;\n\n  // Indicates the stream was reset due to a downstream connection termination.\n  bool downstream_connection_termination = 15;\n\n  // Indicates that the upstream retry limit was exceeded, resulting in a downstream error.\n  bool upstream_retry_limit_exceeded = 16;\n\n  // Indicates that the stream idle timeout was hit, resulting in a downstream 408.\n  bool stream_idle_timeout = 17;\n\n  // Indicates that the request was rejected because an envoy request header failed strict\n  // validation.\n  bool invalid_envoy_request_headers = 18;\n\n  // Indicates there was an HTTP protocol error on the downstream request.\n  bool downstream_protocol_error = 19;\n}\n\n// Properties of a negotiated TLS connection.\n// [#next-free-field: 7]\nmessage TLSProperties {\n  enum TLSVersion {\n    VERSION_UNSPECIFIED = 0;\n    TLSv1 = 1;\n    TLSv1_1 = 2;\n    TLSv1_2 = 3;\n    TLSv1_3 = 4;\n  }\n\n  message CertificateProperties {\n    message SubjectAltName {\n      oneof san {\n        string uri = 1;\n\n        // [#not-implemented-hide:]\n        string dns = 2;\n      }\n    }\n\n    // SANs present in the certificate.\n    repeated SubjectAltName subject_alt_name = 1;\n\n    // The subject field of the certificate.\n    string subject = 2;\n  }\n\n  // Version of TLS that was negotiated.\n  TLSVersion tls_version = 1;\n\n  // TLS cipher suite negotiated during handshake. The value is a\n  // four-digit hex code defined by the IANA TLS Cipher Suite Registry\n  // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``).\n  //\n  // Here it is expressed as an integer.\n  google.protobuf.UInt32Value tls_cipher_suite = 2;\n\n  // SNI hostname from handshake.\n  string tls_sni_hostname = 3;\n\n  // Properties of the local certificate used to negotiate TLS.\n  CertificateProperties local_certificate_properties = 4;\n\n  // Properties of the peer certificate used to negotiate TLS.\n  CertificateProperties peer_certificate_properties = 5;\n\n  // The TLS session ID.\n  string tls_session_id = 6;\n}\n\n// [#next-free-field: 14]\nmessage HTTPRequestProperties {\n  // The request method (RFC 7231/2616).\n  api.v2.core.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The scheme portion of the incoming request URI.\n  string scheme = 2;\n\n  // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value.\n  string authority = 3;\n\n  // The port of the incoming request URI\n  // (unused currently, as port is composed onto authority).\n  google.protobuf.UInt32Value port = 4;\n\n  // The path portion from the incoming request URI.\n  string path = 5;\n\n  // Value of the ``User-Agent`` request header.\n  string user_agent = 6;\n\n  // Value of the ``Referer`` request header.\n  string referer = 7;\n\n  // Value of the ``X-Forwarded-For`` request header.\n  string forwarded_for = 8;\n\n  // Value of the ``X-Request-Id`` request header\n  //\n  // This header is used by Envoy to uniquely identify a request.\n  // It will be generated for all external requests and internal requests that\n  // do not already have a request ID.\n  string request_id = 9;\n\n  // Value of the ``X-Envoy-Original-Path`` request header.\n  string original_path = 10;\n\n  // Size of the HTTP request headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_headers_bytes = 11;\n\n  // Size of the HTTP request body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_body_bytes = 12;\n\n  // Map of additional headers that have been configured to be logged.\n  map<string, string> request_headers = 13;\n}\n\n// [#next-free-field: 7]\nmessage HTTPResponseProperties {\n  // The HTTP response code returned by Envoy.\n  google.protobuf.UInt32Value response_code = 1;\n\n  // Size of the HTTP response headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_headers_bytes = 2;\n\n  // Size of the HTTP response body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_body_bytes = 3;\n\n  // Map of additional headers configured to be logged.\n  map<string, string> response_headers = 4;\n\n  // Map of trailers configured to be logged.\n  map<string, string> response_trailers = 5;\n\n  // The HTTP response code details.\n  string response_code_details = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/accesslog/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.accesslog.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.accesslog.v3\";\noption java_outer_classname = \"AccesslogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC access logs]\n// Envoy access logs describe incoming interaction with Envoy over a fixed\n// period of time, and typically cover a single request/response exchange,\n// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP).\n// Access logs contain fields defined in protocol-specific protobuf messages.\n//\n// Except where explicitly declared otherwise, all fields describe\n// *downstream* interaction between Envoy and a connected client.\n// Fields describing *upstream* interaction will explicitly include ``upstream``\n// in their name.\n\nmessage TCPAccessLogEntry {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.TCPAccessLogEntry\";\n\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  // Properties of the TCP connection.\n  ConnectionProperties connection_properties = 2;\n}\n\nmessage HTTPAccessLogEntry {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.HTTPAccessLogEntry\";\n\n  // HTTP version\n  enum HTTPVersion {\n    PROTOCOL_UNSPECIFIED = 0;\n    HTTP10 = 1;\n    HTTP11 = 2;\n    HTTP2 = 3;\n    HTTP3 = 4;\n  }\n\n  // Common properties shared by all Envoy access logs.\n  AccessLogCommon common_properties = 1;\n\n  HTTPVersion protocol_version = 2;\n\n  // Description of the incoming HTTP request.\n  HTTPRequestProperties request = 3;\n\n  // Description of the outgoing HTTP response.\n  HTTPResponseProperties response = 4;\n}\n\n// Defines fields for a connection\nmessage ConnectionProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.ConnectionProperties\";\n\n  // Number of bytes received from downstream.\n  uint64 received_bytes = 1;\n\n  // Number of bytes sent to downstream.\n  uint64 sent_bytes = 2;\n}\n\n// Defines fields that are shared by all Envoy access logs.\n// [#next-free-field: 22]\nmessage AccessLogCommon {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.AccessLogCommon\";\n\n  // [#not-implemented-hide:]\n  // This field indicates the rate at which this log entry was sampled.\n  // Valid range is (0.0, 1.0].\n  double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}];\n\n  // This field is the remote/origin address on which the request from the user was received.\n  // Note: This may not be the physical peer. E.g, if the remote address is inferred from for\n  // example the x-forwarder-for header, proxy protocol, etc.\n  config.core.v3.Address downstream_remote_address = 2;\n\n  // This field is the local/destination address on which the request from the user was received.\n  config.core.v3.Address downstream_local_address = 3;\n\n  // If the connection is secure,S this field will contain TLS properties.\n  TLSProperties tls_properties = 4;\n\n  // The time that Envoy started servicing this request. This is effectively the time that the first\n  // downstream byte is received.\n  google.protobuf.Timestamp start_time = 5;\n\n  // Interval between the first downstream byte received and the last\n  // downstream byte received (i.e. time it takes to receive a request).\n  google.protobuf.Duration time_to_last_rx_byte = 6;\n\n  // Interval between the first downstream byte received and the first upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_upstream_tx_byte = 7;\n\n  // Interval between the first downstream byte received and the last upstream byte sent. There may\n  // by considerable delta between *time_to_last_rx_byte* and this value due to filters.\n  // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about\n  // not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_last_upstream_tx_byte = 8;\n\n  // Interval between the first downstream byte received and the first upstream\n  // byte received (i.e. time it takes to start receiving a response).\n  google.protobuf.Duration time_to_first_upstream_rx_byte = 9;\n\n  // Interval between the first downstream byte received and the last upstream\n  // byte received (i.e. time it takes to receive a complete response).\n  google.protobuf.Duration time_to_last_upstream_rx_byte = 10;\n\n  // Interval between the first downstream byte received and the first downstream byte sent.\n  // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field\n  // due to filters. Additionally, the same caveats apply as documented in\n  // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc.\n  google.protobuf.Duration time_to_first_downstream_tx_byte = 11;\n\n  // Interval between the first downstream byte received and the last downstream byte sent.\n  // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta\n  // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate\n  // time. In the current implementation it does not include kernel socket buffer time. In the\n  // current implementation it also does not include send window buffering inside the HTTP/2 codec.\n  // In the future it is likely that work will be done to make this duration more accurate.\n  google.protobuf.Duration time_to_last_downstream_tx_byte = 12;\n\n  // The upstream remote/destination address that handles this exchange. This does not include\n  // retries.\n  config.core.v3.Address upstream_remote_address = 13;\n\n  // The upstream local/origin address that handles this exchange. This does not include retries.\n  config.core.v3.Address upstream_local_address = 14;\n\n  // The upstream cluster that *upstream_remote_address* belongs to.\n  string upstream_cluster = 15;\n\n  // Flags indicating occurrences during request/response processing.\n  ResponseFlags response_flags = 16;\n\n  // All metadata encountered during request processing, including endpoint\n  // selection.\n  //\n  // This can be used to associate IDs attached to the various configurations\n  // used to process this request with the access log entry. For example, a\n  // route created from a higher level forwarding rule with some ID can place\n  // that ID in this field and cross reference later. It can also be used to\n  // determine if a canary endpoint was used or not.\n  config.core.v3.Metadata metadata = 17;\n\n  // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the\n  // failure reason from the transport socket. The format of this field depends on the configured\n  // upstream transport socket. Common TLS failures are in\n  // :ref:`TLS trouble shooting <arch_overview_ssl_trouble_shooting>`.\n  string upstream_transport_failure_reason = 18;\n\n  // The name of the route\n  string route_name = 19;\n\n  // This field is the downstream direct remote address on which the request from the user was\n  // received. Note: This is always the physical peer, even if the remote address is inferred from\n  // for example the x-forwarder-for header, proxy protocol, etc.\n  config.core.v3.Address downstream_direct_remote_address = 20;\n\n  // Map of filter state in stream info that have been configured to be logged. If the filter\n  // state serialized to any message other than `google.protobuf.Any` it will be packed into\n  // `google.protobuf.Any`.\n  map<string, google.protobuf.Any> filter_state_objects = 21;\n}\n\n// Flags indicating occurrences during request/response processing.\n// [#next-free-field: 24]\nmessage ResponseFlags {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.ResponseFlags\";\n\n  message Unauthorized {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.accesslog.v2.ResponseFlags.Unauthorized\";\n\n    // Reasons why the request was unauthorized\n    enum Reason {\n      REASON_UNSPECIFIED = 0;\n\n      // The request was denied by the external authorization service.\n      EXTERNAL_SERVICE = 1;\n    }\n\n    Reason reason = 1;\n  }\n\n  // Indicates local server healthcheck failed.\n  bool failed_local_healthcheck = 1;\n\n  // Indicates there was no healthy upstream.\n  bool no_healthy_upstream = 2;\n\n  // Indicates an there was an upstream request timeout.\n  bool upstream_request_timeout = 3;\n\n  // Indicates local codec level reset was sent on the stream.\n  bool local_reset = 4;\n\n  // Indicates remote codec level reset was received on the stream.\n  bool upstream_remote_reset = 5;\n\n  // Indicates there was a local reset by a connection pool due to an initial connection failure.\n  bool upstream_connection_failure = 6;\n\n  // Indicates the stream was reset due to an upstream connection termination.\n  bool upstream_connection_termination = 7;\n\n  // Indicates the stream was reset because of a resource overflow.\n  bool upstream_overflow = 8;\n\n  // Indicates no route was found for the request.\n  bool no_route_found = 9;\n\n  // Indicates that the request was delayed before proxying.\n  bool delay_injected = 10;\n\n  // Indicates that the request was aborted with an injected error code.\n  bool fault_injected = 11;\n\n  // Indicates that the request was rate-limited locally.\n  bool rate_limited = 12;\n\n  // Indicates if the request was deemed unauthorized and the reason for it.\n  Unauthorized unauthorized_details = 13;\n\n  // Indicates that the request was rejected because there was an error in rate limit service.\n  bool rate_limit_service_error = 14;\n\n  // Indicates the stream was reset due to a downstream connection termination.\n  bool downstream_connection_termination = 15;\n\n  // Indicates that the upstream retry limit was exceeded, resulting in a downstream error.\n  bool upstream_retry_limit_exceeded = 16;\n\n  // Indicates that the stream idle timeout was hit, resulting in a downstream 408.\n  bool stream_idle_timeout = 17;\n\n  // Indicates that the request was rejected because an envoy request header failed strict\n  // validation.\n  bool invalid_envoy_request_headers = 18;\n\n  // Indicates there was an HTTP protocol error on the downstream request.\n  bool downstream_protocol_error = 19;\n\n  // Indicates there was a max stream duration reached on the upstream request.\n  bool upstream_max_stream_duration_reached = 20;\n\n  // Indicates the response was served from a cache filter.\n  bool response_from_cache_filter = 21;\n\n  // Indicates that a filter configuration is not available.\n  bool no_filter_config_found = 22;\n\n  // Indicates that request or connection exceeded the downstream connection duration.\n  bool duration_timeout = 23;\n}\n\n// Properties of a negotiated TLS connection.\n// [#next-free-field: 7]\nmessage TLSProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.TLSProperties\";\n\n  enum TLSVersion {\n    VERSION_UNSPECIFIED = 0;\n    TLSv1 = 1;\n    TLSv1_1 = 2;\n    TLSv1_2 = 3;\n    TLSv1_3 = 4;\n  }\n\n  message CertificateProperties {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.accesslog.v2.TLSProperties.CertificateProperties\";\n\n    message SubjectAltName {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.data.accesslog.v2.TLSProperties.CertificateProperties.SubjectAltName\";\n\n      oneof san {\n        string uri = 1;\n\n        // [#not-implemented-hide:]\n        string dns = 2;\n      }\n    }\n\n    // SANs present in the certificate.\n    repeated SubjectAltName subject_alt_name = 1;\n\n    // The subject field of the certificate.\n    string subject = 2;\n  }\n\n  // Version of TLS that was negotiated.\n  TLSVersion tls_version = 1;\n\n  // TLS cipher suite negotiated during handshake. The value is a\n  // four-digit hex code defined by the IANA TLS Cipher Suite Registry\n  // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``).\n  //\n  // Here it is expressed as an integer.\n  google.protobuf.UInt32Value tls_cipher_suite = 2;\n\n  // SNI hostname from handshake.\n  string tls_sni_hostname = 3;\n\n  // Properties of the local certificate used to negotiate TLS.\n  CertificateProperties local_certificate_properties = 4;\n\n  // Properties of the peer certificate used to negotiate TLS.\n  CertificateProperties peer_certificate_properties = 5;\n\n  // The TLS session ID.\n  string tls_session_id = 6;\n}\n\n// [#next-free-field: 14]\nmessage HTTPRequestProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.HTTPRequestProperties\";\n\n  // The request method (RFC 7231/2616).\n  config.core.v3.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The scheme portion of the incoming request URI.\n  string scheme = 2;\n\n  // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value.\n  string authority = 3;\n\n  // The port of the incoming request URI\n  // (unused currently, as port is composed onto authority).\n  google.protobuf.UInt32Value port = 4;\n\n  // The path portion from the incoming request URI.\n  string path = 5;\n\n  // Value of the ``User-Agent`` request header.\n  string user_agent = 6;\n\n  // Value of the ``Referer`` request header.\n  string referer = 7;\n\n  // Value of the ``X-Forwarded-For`` request header.\n  string forwarded_for = 8;\n\n  // Value of the ``X-Request-Id`` request header\n  //\n  // This header is used by Envoy to uniquely identify a request.\n  // It will be generated for all external requests and internal requests that\n  // do not already have a request ID.\n  string request_id = 9;\n\n  // Value of the ``X-Envoy-Original-Path`` request header.\n  string original_path = 10;\n\n  // Size of the HTTP request headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_headers_bytes = 11;\n\n  // Size of the HTTP request body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 request_body_bytes = 12;\n\n  // Map of additional headers that have been configured to be logged.\n  map<string, string> request_headers = 13;\n}\n\n// [#next-free-field: 7]\nmessage HTTPResponseProperties {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.accesslog.v2.HTTPResponseProperties\";\n\n  // The HTTP response code returned by Envoy.\n  google.protobuf.UInt32Value response_code = 1;\n\n  // Size of the HTTP response headers in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_headers_bytes = 2;\n\n  // Size of the HTTP response body in bytes.\n  //\n  // This value is captured from the OSI layer 7 perspective, i.e. it does not\n  // include overhead from framing or encoding at other networking layers.\n  uint64 response_body_bytes = 3;\n\n  // Map of additional headers configured to be logged.\n  map<string, string> response_headers = 4;\n\n  // Map of trailers configured to be logged.\n  map<string, string> response_trailers = 5;\n\n  // The HTTP response code details.\n  string response_code_details = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/cluster/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.cluster.v2alpha;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.cluster.v2alpha\";\noption java_outer_classname = \"OutlierDetectionEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.data.cluster.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Outlier detection logging events]\n// :ref:`Outlier detection logging <arch_overview_outlier_detection_logging>`.\n\n// Type of ejection that took place\nenum OutlierEjectionType {\n  // In case upstream host returns certain number of consecutive 5xx.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all type of errors are treated as HTTP 5xx errors.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  CONSECUTIVE_5XX = 0;\n\n  // In case upstream host returns certain number of consecutive gateway errors\n  CONSECUTIVE_GATEWAY_FAILURE = 1;\n\n  // Runs over aggregated success rate statistics from every host in cluster\n  // and selects hosts for which ratio of successful replies deviates from other hosts\n  // in the cluster.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors (externally and locally generated) are used to calculate success rate\n  // statistics. See :ref:`Cluster outlier detection <arch_overview_outlier_detection>`\n  // documentation for details.\n  SUCCESS_RATE = 2;\n\n  // Consecutive local origin failures: Connection failures, resets, timeouts, etc\n  // This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3;\n\n  // Runs over aggregated success rate statistics for local origin failures\n  // for all hosts in the cluster and selects hosts for which success rate deviates from other\n  // hosts in the cluster. This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  SUCCESS_RATE_LOCAL_ORIGIN = 4;\n\n  // Runs over aggregated success rate statistics from every host in cluster and selects hosts for\n  // which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE = 5;\n\n  // Runs over aggregated success rate statistics for local origin failures from every host in\n  // cluster and selects hosts for which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6;\n}\n\n// Represents possible action applied to upstream host\nenum Action {\n  // In case host was excluded from service\n  EJECT = 0;\n\n  // In case host was brought back into service\n  UNEJECT = 1;\n}\n\n// [#next-free-field: 12]\nmessage OutlierDetectionEvent {\n  // In case of eject represents type of ejection that took place.\n  OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 2;\n\n  // The time in seconds since the last action (either an ejection or unejection) took place.\n  google.protobuf.UInt64Value secs_since_last_action = 3;\n\n  // The :ref:`cluster <envoy_api_msg_Cluster>` that owns the ejected host.\n  string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}];\n\n  // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``.\n  string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}];\n\n  // The action that took place.\n  Action action = 6 [(validate.rules).enum = {defined_only: true}];\n\n  // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to\n  // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and\n  // then re-added).\n  uint32 num_ejections = 7;\n\n  // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was\n  // ejected. ``false`` means the event was logged but the host was not actually ejected.\n  bool enforced = 8;\n\n  oneof event {\n    option (validate.required) = true;\n\n    OutlierEjectSuccessRate eject_success_rate_event = 9;\n\n    OutlierEjectConsecutive eject_consecutive_event = 10;\n\n    OutlierEjectFailurePercentage eject_failure_percentage_event = 11;\n  }\n}\n\nmessage OutlierEjectSuccessRate {\n  // Host’s success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n\n  // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100\n  // range.\n  uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}];\n\n  // Success rate ejection threshold at the time of the ejection event.\n  uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}];\n}\n\nmessage OutlierEjectConsecutive {\n}\n\nmessage OutlierEjectFailurePercentage {\n  // Host's success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/data/cluster/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.cluster.v3;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.cluster.v3\";\noption java_outer_classname = \"OutlierDetectionEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Outlier detection logging events]\n// :ref:`Outlier detection logging <arch_overview_outlier_detection_logging>`.\n\n// Type of ejection that took place\nenum OutlierEjectionType {\n  // In case upstream host returns certain number of consecutive 5xx.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all type of errors are treated as HTTP 5xx errors.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  // details.\n  CONSECUTIVE_5XX = 0;\n\n  // In case upstream host returns certain number of consecutive gateway errors\n  CONSECUTIVE_GATEWAY_FAILURE = 1;\n\n  // Runs over aggregated success rate statistics from every host in cluster\n  // and selects hosts for which ratio of successful replies deviates from other hosts\n  // in the cluster.\n  // If\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is *false*, all errors (externally and locally generated) are used to calculate success rate\n  // statistics. See :ref:`Cluster outlier detection <arch_overview_outlier_detection>`\n  // documentation for details.\n  SUCCESS_RATE = 2;\n\n  // Consecutive local origin failures: Connection failures, resets, timeouts, etc\n  // This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3;\n\n  // Runs over aggregated success rate statistics for local origin failures\n  // for all hosts in the cluster and selects hosts for which success rate deviates from other\n  // hosts in the cluster. This type of ejection happens only when\n  // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>`\n  // is set to *true*.\n  // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for\n  SUCCESS_RATE_LOCAL_ORIGIN = 4;\n\n  // Runs over aggregated success rate statistics from every host in cluster and selects hosts for\n  // which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE = 5;\n\n  // Runs over aggregated success rate statistics for local origin failures from every host in\n  // cluster and selects hosts for which ratio of failed replies is above configured value.\n  FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6;\n}\n\n// Represents possible action applied to upstream host\nenum Action {\n  // In case host was excluded from service\n  EJECT = 0;\n\n  // In case host was brought back into service\n  UNEJECT = 1;\n}\n\n// [#next-free-field: 12]\nmessage OutlierDetectionEvent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierDetectionEvent\";\n\n  // In case of eject represents type of ejection that took place.\n  OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 2;\n\n  // The time in seconds since the last action (either an ejection or unejection) took place.\n  google.protobuf.UInt64Value secs_since_last_action = 3;\n\n  // The :ref:`cluster <envoy_api_msg_config.cluster.v3.Cluster>` that owns the ejected host.\n  string cluster_name = 4 [(validate.rules).string = {min_len: 1}];\n\n  // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``.\n  string upstream_url = 5 [(validate.rules).string = {min_len: 1}];\n\n  // The action that took place.\n  Action action = 6 [(validate.rules).enum = {defined_only: true}];\n\n  // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to\n  // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and\n  // then re-added).\n  uint32 num_ejections = 7;\n\n  // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was\n  // ejected. ``false`` means the event was logged but the host was not actually ejected.\n  bool enforced = 8;\n\n  oneof event {\n    option (validate.required) = true;\n\n    OutlierEjectSuccessRate eject_success_rate_event = 9;\n\n    OutlierEjectConsecutive eject_consecutive_event = 10;\n\n    OutlierEjectFailurePercentage eject_failure_percentage_event = 11;\n  }\n}\n\nmessage OutlierEjectSuccessRate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierEjectSuccessRate\";\n\n  // Host’s success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n\n  // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100\n  // range.\n  uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}];\n\n  // Success rate ejection threshold at the time of the ejection event.\n  uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}];\n}\n\nmessage OutlierEjectConsecutive {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierEjectConsecutive\";\n}\n\nmessage OutlierEjectFailurePercentage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.cluster.v2alpha.OutlierEjectFailurePercentage\";\n\n  // Host's success rate at the time of the ejection event on a 0-100 range.\n  uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/core/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.core.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.core.v2alpha\";\noption java_outer_classname = \"HealthCheckEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health check logging events]\n// :ref:`Health check logging <arch_overview_health_check_logging>`.\n\nenum HealthCheckFailureType {\n  ACTIVE = 0;\n  PASSIVE = 1;\n  NETWORK = 2;\n}\n\nenum HealthCheckerType {\n  HTTP = 0;\n  TCP = 1;\n  GRPC = 2;\n  REDIS = 3;\n}\n\n// [#next-free-field: 10]\nmessage HealthCheckEvent {\n  HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  api.v2.core.Address host = 2;\n\n  string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}];\n\n  oneof event {\n    option (validate.required) = true;\n\n    // Host ejection.\n    HealthCheckEjectUnhealthy eject_unhealthy_event = 4;\n\n    // Host addition.\n    HealthCheckAddHealthy add_healthy_event = 5;\n\n    // Host failure.\n    HealthCheckFailure health_check_failure_event = 7;\n\n    // Healthy host became degraded.\n    DegradedHealthyHost degraded_healthy_host = 8;\n\n    // A degraded host returned to being healthy.\n    NoLongerDegradedHost no_longer_degraded_host = 9;\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 6;\n}\n\nmessage HealthCheckEjectUnhealthy {\n  // The type of failure that caused this ejection.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\nmessage HealthCheckAddHealthy {\n  // Whether this addition is the result of the first ever health check on a host, in which case\n  // the configured :ref:`healthy threshold <envoy_api_field_core.HealthCheck.healthy_threshold>`\n  // is bypassed and the host is immediately added.\n  bool first_check = 1;\n}\n\nmessage HealthCheckFailure {\n  // The type of failure that caused this event.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Whether this event is the result of the first ever health check on a host.\n  bool first_check = 2;\n}\n\nmessage DegradedHealthyHost {\n}\n\nmessage NoLongerDegradedHost {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/core/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/core/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/core/v3/health_check_event.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.core.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.core.v3\";\noption java_outer_classname = \"HealthCheckEventProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health check logging events]\n// :ref:`Health check logging <arch_overview_health_check_logging>`.\n\nenum HealthCheckFailureType {\n  ACTIVE = 0;\n  PASSIVE = 1;\n  NETWORK = 2;\n}\n\nenum HealthCheckerType {\n  HTTP = 0;\n  TCP = 1;\n  GRPC = 2;\n  REDIS = 3;\n}\n\n// [#next-free-field: 10]\nmessage HealthCheckEvent {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckEvent\";\n\n  HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  config.core.v3.Address host = 2;\n\n  string cluster_name = 3 [(validate.rules).string = {min_len: 1}];\n\n  oneof event {\n    option (validate.required) = true;\n\n    // Host ejection.\n    HealthCheckEjectUnhealthy eject_unhealthy_event = 4;\n\n    // Host addition.\n    HealthCheckAddHealthy add_healthy_event = 5;\n\n    // Host failure.\n    HealthCheckFailure health_check_failure_event = 7;\n\n    // Healthy host became degraded.\n    DegradedHealthyHost degraded_healthy_host = 8;\n\n    // A degraded host returned to being healthy.\n    NoLongerDegradedHost no_longer_degraded_host = 9;\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 6;\n}\n\nmessage HealthCheckEjectUnhealthy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckEjectUnhealthy\";\n\n  // The type of failure that caused this ejection.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n}\n\nmessage HealthCheckAddHealthy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckAddHealthy\";\n\n  // Whether this addition is the result of the first ever health check on a host, in which case\n  // the configured :ref:`healthy threshold <envoy_api_field_config.core.v3.HealthCheck.healthy_threshold>`\n  // is bypassed and the host is immediately added.\n  bool first_check = 1;\n}\n\nmessage HealthCheckFailure {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.HealthCheckFailure\";\n\n  // The type of failure that caused this event.\n  HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Whether this event is the result of the first ever health check on a host.\n  bool first_check = 2;\n}\n\nmessage DegradedHealthyHost {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.DegradedHealthyHost\";\n}\n\nmessage NoLongerDegradedHost {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.core.v2alpha.NoLongerDegradedHost\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/dns/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.dns.v2alpha;\n\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.dns.v2alpha\";\noption java_outer_classname = \"DnsTableProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: DNS Filter Table Data]\n// :ref:`DNS Filter config overview <config_udp_listener_filters_dns_filter>`.\n\n// This message contains the configuration for the DNS Filter if populated\n// from the control plane\nmessage DnsTable {\n  // This message contains a list of IP addresses returned for a query for a known name\n  message AddressList {\n    // This field contains a well formed IP address that is returned\n    // in the answer for a name query. The address field can be an\n    // IPv4 or IPv6 address. Address family detection is done automatically\n    // when Envoy parses the string. Since this field is repeated,\n    // Envoy will return one randomly chosen entry from this list in the\n    // DNS response. The random index will vary per query so that we prevent\n    // clients pinning on a single address for a configured domain\n    repeated string address = 1 [(validate.rules).repeated = {\n      min_items: 1\n      items {string {min_len: 3}}\n    }];\n  }\n\n  // This message type is extensible and can contain a list of addresses\n  // or dictate some other method for resolving the addresses for an\n  // endpoint\n  message DnsEndpoint {\n    oneof endpoint_config {\n      option (validate.required) = true;\n\n      AddressList address_list = 1;\n    }\n  }\n\n  message DnsVirtualDomain {\n    // The domain name for which Envoy will respond to query requests\n    string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The configuration containing the method to determine the address\n    // of this endpoint\n    DnsEndpoint endpoint = 2;\n\n    // Sets the TTL in dns answers from Envoy returned to the client\n    google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}];\n  }\n\n  // Control how many times envoy makes an attempt to forward a query to\n  // an external server\n  uint32 external_retry_count = 1;\n\n  // Fully qualified domain names for which Envoy will respond to queries\n  repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // This field serves to help Envoy determine whether it can authoritatively\n  // answer a query for a name matching a suffix in this list. If the query\n  // name does not match a suffix in this list, Envoy will forward\n  // the query to an upstream DNS server\n  repeated type.matcher.StringMatcher known_suffixes = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/dns/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/data/dns/v2alpha:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/dns/v3/dns_table.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.dns.v3;\n\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.dns.v3\";\noption java_outer_classname = \"DnsTableProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: DNS Filter Table Data]\n// :ref:`DNS Filter config overview <config_udp_listener_filters_dns_filter>`.\n\n// This message contains the configuration for the DNS Filter if populated\n// from the control plane\nmessage DnsTable {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.dns.v2alpha.DnsTable\";\n\n  // This message contains a list of IP addresses returned for a query for a known name\n  message AddressList {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v2alpha.DnsTable.AddressList\";\n\n    // This field contains a well formed IP address that is returned in the answer for a\n    // name query. The address field can be an IPv4 or IPv6 address. Address family\n    // detection is done automatically when Envoy parses the string. Since this field is\n    // repeated, Envoy will return as many entries from this list in the DNS response while\n    // keeping the response under 512 bytes\n    repeated string address = 1 [(validate.rules).repeated = {\n      min_items: 1\n      items {string {min_len: 3}}\n    }];\n  }\n\n  // Specify the service protocol using a numeric or string value\n  message DnsServiceProtocol {\n    oneof protocol_config {\n      option (validate.required) = true;\n\n      // Specify the protocol number for the service. Envoy will try to resolve the number to\n      // the protocol name. For example, 6 will resolve to \"tcp\". Refer to:\n      // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml\n      // for protocol names and numbers\n      uint32 number = 1 [(validate.rules).uint32 = {lt: 255}];\n\n      // Specify the protocol name for the service.\n      string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n  }\n\n  // Specify the target for a given DNS service\n  // [#next-free-field: 6]\n  message DnsServiceTarget {\n    // Specify the name of the endpoint for the Service. The name is a hostname or a cluster\n    oneof endpoint_type {\n      option (validate.required) = true;\n\n      // Use a resolvable hostname as the endpoint for a service.\n      string host_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n      // Use a cluster name as the endpoint for a service.\n      string cluster_name = 2\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n\n    // The priority of the service record target\n    uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The weight of the service record target\n    uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The port to which the service is bound. This value is optional if the target is a\n    // cluster. Setting port to zero in this case makes the filter use the port value\n    // from the cluster host\n    uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}];\n  }\n\n  // This message defines a service selection record returned for a service query in a domain\n  message DnsService {\n    // The name of the service without the protocol or domain name\n    string service_name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The service protocol. This can be specified as a string or the numeric value of the protocol\n    DnsServiceProtocol protocol = 2;\n\n    // The service entry time to live. This is independent from the DNS Answer record TTL\n    google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // The list of targets hosting the service\n    repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Define a list of service records for a given service\n  message DnsServiceList {\n    repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  message DnsEndpoint {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v2alpha.DnsTable.DnsEndpoint\";\n\n    oneof endpoint_config {\n      option (validate.required) = true;\n\n      // Define a list of addresses to return for the specified endpoint\n      AddressList address_list = 1;\n\n      // Define a cluster whose addresses are returned for the specified endpoint\n      string cluster_name = 2;\n\n      // Define a DNS Service List for the specified endpoint\n      DnsServiceList service_list = 3;\n    }\n  }\n\n  message DnsVirtualDomain {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain\";\n\n    // A domain name for which Envoy will respond to query requests\n    string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The configuration containing the method to determine the address of this endpoint\n    DnsEndpoint endpoint = 2;\n\n    // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s\n    google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}];\n  }\n\n  // Control how many times Envoy makes an attempt to forward a query to an external DNS server\n  uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}];\n\n  // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this\n  // list empty, Envoy will forward all queries to external resolvers\n  repeated DnsVirtualDomain virtual_domains = 2;\n\n  // This field serves to help Envoy determine whether it can authoritatively answer a query\n  // for a name matching a suffix in this list. If the query name does not match a suffix in\n  // this list, Envoy will forward the query to an upstream DNS server\n  repeated type.matcher.v3.StringMatcher known_suffixes = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/dns/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/data/dns/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.dns.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.dns.v4alpha\";\noption java_outer_classname = \"DnsTableProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: DNS Filter Table Data]\n// :ref:`DNS Filter config overview <config_udp_listener_filters_dns_filter>`.\n\n// This message contains the configuration for the DNS Filter if populated\n// from the control plane\nmessage DnsTable {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.dns.v3.DnsTable\";\n\n  // This message contains a list of IP addresses returned for a query for a known name\n  message AddressList {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.AddressList\";\n\n    // This field contains a well formed IP address that is returned in the answer for a\n    // name query. The address field can be an IPv4 or IPv6 address. Address family\n    // detection is done automatically when Envoy parses the string. Since this field is\n    // repeated, Envoy will return as many entries from this list in the DNS response while\n    // keeping the response under 512 bytes\n    repeated string address = 1 [(validate.rules).repeated = {\n      min_items: 1\n      items {string {min_len: 3}}\n    }];\n  }\n\n  // Specify the service protocol using a numeric or string value\n  message DnsServiceProtocol {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsServiceProtocol\";\n\n    oneof protocol_config {\n      option (validate.required) = true;\n\n      // Specify the protocol number for the service. Envoy will try to resolve the number to\n      // the protocol name. For example, 6 will resolve to \"tcp\". Refer to:\n      // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml\n      // for protocol names and numbers\n      uint32 number = 1 [(validate.rules).uint32 = {lt: 255}];\n\n      // Specify the protocol name for the service.\n      string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n  }\n\n  // Specify the target for a given DNS service\n  // [#next-free-field: 6]\n  message DnsServiceTarget {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsServiceTarget\";\n\n    // Specify the name of the endpoint for the Service. The name is a hostname or a cluster\n    oneof endpoint_type {\n      option (validate.required) = true;\n\n      // Use a resolvable hostname as the endpoint for a service.\n      string host_name = 1\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n      // Use a cluster name as the endpoint for a service.\n      string cluster_name = 2\n          [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n    }\n\n    // The priority of the service record target\n    uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The weight of the service record target\n    uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}];\n\n    // The port to which the service is bound. This value is optional if the target is a\n    // cluster. Setting port to zero in this case makes the filter use the port value\n    // from the cluster host\n    uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}];\n  }\n\n  // This message defines a service selection record returned for a service query in a domain\n  message DnsService {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsService\";\n\n    // The name of the service without the protocol or domain name\n    string service_name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The service protocol. This can be specified as a string or the numeric value of the protocol\n    DnsServiceProtocol protocol = 2;\n\n    // The service entry time to live. This is independent from the DNS Answer record TTL\n    google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // The list of targets hosting the service\n    repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Define a list of service records for a given service\n  message DnsServiceList {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsServiceList\";\n\n    repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  message DnsEndpoint {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsEndpoint\";\n\n    oneof endpoint_config {\n      option (validate.required) = true;\n\n      // Define a list of addresses to return for the specified endpoint\n      AddressList address_list = 1;\n\n      // Define a cluster whose addresses are returned for the specified endpoint\n      string cluster_name = 2;\n\n      // Define a DNS Service List for the specified endpoint\n      DnsServiceList service_list = 3;\n    }\n  }\n\n  message DnsVirtualDomain {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.dns.v3.DnsTable.DnsVirtualDomain\";\n\n    // A domain name for which Envoy will respond to query requests\n    string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];\n\n    // The configuration containing the method to determine the address of this endpoint\n    DnsEndpoint endpoint = 2;\n\n    // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s\n    google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}];\n  }\n\n  // Control how many times Envoy makes an attempt to forward a query to an external DNS server\n  uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}];\n\n  // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this\n  // list empty, Envoy will forward all queries to external resolvers\n  repeated DnsVirtualDomain virtual_domains = 2;\n\n  // This field serves to help Envoy determine whether it can authoritatively answer a query\n  // for a name matching a suffix in this list. If the query name does not match a suffix in\n  // this list, Envoy will forward the query to an upstream DNS server\n  repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v2alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap common data]\n\n// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received\n// and transmitted data, etc.\nmessage Body {\n  oneof body_type {\n    // Body data as bytes. By default, tap body data will be present in this field, as the proto\n    // `bytes` type can contain any valid byte.\n    bytes as_bytes = 1;\n\n    // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING\n    // <envoy_api_enum_value_service.tap.v2alpha.OutputSink.Format.JSON_BODY_AS_STRING>` sink\n    // format type is selected. See the documentation for that option for why this is useful.\n    string as_string = 2;\n  }\n\n  // Specifies whether body data has been truncated to fit within the specified\n  // :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_rx_bytes>` and\n  // :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_tx_bytes>` settings.\n  bool truncated = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v2alpha/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/data/tap/v2alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP tap data]\n\n// A fully buffered HTTP trace message.\nmessage HttpBufferedTrace {\n  // HTTP message wrapper.\n  message Message {\n    // Message headers.\n    repeated api.v2.core.HeaderValue headers = 1;\n\n    // Message body.\n    Body body = 2;\n\n    // Message trailers.\n    repeated api.v2.core.HeaderValue trailers = 3;\n  }\n\n  // Request message.\n  Message request = 1;\n\n  // Response message.\n  Message response = 2;\n}\n\n// A streamed HTTP trace segment. Multiple segments make up a full trace.\n// [#next-free-field: 8]\nmessage HttpStreamedTraceSegment {\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Request headers.\n    api.v2.core.HeaderMap request_headers = 2;\n\n    // Request body chunk.\n    Body request_body_chunk = 3;\n\n    // Request trailers.\n    api.v2.core.HeaderMap request_trailers = 4;\n\n    // Response headers.\n    api.v2.core.HeaderMap response_headers = 5;\n\n    // Response body chunk.\n    Body response_body_chunk = 6;\n\n    // Response trailers.\n    api.v2.core.HeaderMap response_trailers = 7;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v2alpha/transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/data/tap/v2alpha/common.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"TransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Transport tap data]\n// Trace format for the tap transport socket extension. This dumps plain text read/write\n// sequences on a socket.\n\n// Connection properties.\nmessage Connection {\n  // Local address.\n  api.v2.core.Address local_address = 2;\n\n  // Remote address.\n  api.v2.core.Address remote_address = 3;\n}\n\n// Event in a socket trace.\nmessage SocketEvent {\n  // Data read by Envoy from the transport socket.\n  message Read {\n    // TODO(htuch): Half-close for reads.\n\n    // Binary data read.\n    Body data = 1;\n  }\n\n  // Data written by Envoy to the transport socket.\n  message Write {\n    // Binary data written.\n    Body data = 1;\n\n    // Stream was half closed after this write.\n    bool end_stream = 2;\n  }\n\n  // The connection was closed.\n  message Closed {\n    // TODO(mattklein123): Close event type.\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 1;\n\n  // Read or write with content as bytes string.\n  oneof event_selector {\n    Read read = 2;\n\n    Write write = 3;\n\n    Closed closed = 4;\n  }\n}\n\n// Sequence of read/write events that constitute a buffered trace on a socket.\n// [#next-free-field: 6]\nmessage SocketBufferedTrace {\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  // Connection properties.\n  Connection connection = 2;\n\n  // Sequence of observed events.\n  repeated SocketEvent events = 3;\n\n  // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_rx_bytes>` setting.\n  bool read_truncated = 4;\n\n  // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_service.tap.v2alpha.OutputConfig.max_buffered_tx_bytes>` setting.\n  bool write_truncated = 5;\n}\n\n// A streamed socket trace segment. Multiple segments make up a full trace.\nmessage SocketStreamedTraceSegment {\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Connection properties.\n    Connection connection = 2;\n\n    // Socket event.\n    SocketEvent event = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v2alpha;\n\nimport \"envoy/data/tap/v2alpha/http.proto\";\nimport \"envoy/data/tap/v2alpha/transport.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v2alpha\";\noption java_outer_classname = \"WrapperProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap data wrappers]\n\n// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for\n// sending traces over gRPC APIs or more easily persisting binary messages to files.\nmessage TraceWrapper {\n  oneof trace {\n    option (validate.required) = true;\n\n    // An HTTP buffered tap trace.\n    HttpBufferedTrace http_buffered_trace = 1;\n\n    // An HTTP streamed tap trace segment.\n    HttpStreamedTraceSegment http_streamed_trace_segment = 2;\n\n    // A socket buffered tap trace.\n    SocketBufferedTrace socket_buffered_trace = 3;\n\n    // A socket streamed tap trace segment.\n    SocketStreamedTraceSegment socket_streamed_trace_segment = 4;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap common data]\n\n// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received\n// and transmitted data, etc.\nmessage Body {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.tap.v2alpha.Body\";\n\n  oneof body_type {\n    // Body data as bytes. By default, tap body data will be present in this field, as the proto\n    // `bytes` type can contain any valid byte.\n    bytes as_bytes = 1;\n\n    // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING\n    // <envoy_api_enum_value_config.tap.v3.OutputSink.Format.JSON_BODY_AS_STRING>` sink\n    // format type is selected. See the documentation for that option for why this is useful.\n    string as_string = 2;\n  }\n\n  // Specifies whether body data has been truncated to fit within the specified\n  // :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_rx_bytes>` and\n  // :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_tx_bytes>` settings.\n  bool truncated = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v3/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP tap data]\n\n// A fully buffered HTTP trace message.\nmessage HttpBufferedTrace {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.HttpBufferedTrace\";\n\n  // HTTP message wrapper.\n  message Message {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.HttpBufferedTrace.Message\";\n\n    // Message headers.\n    repeated config.core.v3.HeaderValue headers = 1;\n\n    // Message body.\n    Body body = 2;\n\n    // Message trailers.\n    repeated config.core.v3.HeaderValue trailers = 3;\n  }\n\n  // Request message.\n  Message request = 1;\n\n  // Response message.\n  Message response = 2;\n}\n\n// A streamed HTTP trace segment. Multiple segments make up a full trace.\n// [#next-free-field: 8]\nmessage HttpStreamedTraceSegment {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.HttpStreamedTraceSegment\";\n\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Request headers.\n    config.core.v3.HeaderMap request_headers = 2;\n\n    // Request body chunk.\n    Body request_body_chunk = 3;\n\n    // Request trailers.\n    config.core.v3.HeaderMap request_trailers = 4;\n\n    // Response headers.\n    config.core.v3.HeaderMap response_headers = 5;\n\n    // Response body chunk.\n    Body response_body_chunk = 6;\n\n    // Response trailers.\n    config.core.v3.HeaderMap response_trailers = 7;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v3/transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/data/tap/v3/common.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"TransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Transport tap data]\n// Trace format for the tap transport socket extension. This dumps plain text read/write\n// sequences on a socket.\n\n// Connection properties.\nmessage Connection {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.tap.v2alpha.Connection\";\n\n  // Local address.\n  config.core.v3.Address local_address = 2;\n\n  // Remote address.\n  config.core.v3.Address remote_address = 3;\n}\n\n// Event in a socket trace.\nmessage SocketEvent {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.data.tap.v2alpha.SocketEvent\";\n\n  // Data read by Envoy from the transport socket.\n  message Read {\n    // TODO(htuch): Half-close for reads.\n\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.SocketEvent.Read\";\n\n    // Binary data read.\n    Body data = 1;\n  }\n\n  // Data written by Envoy to the transport socket.\n  message Write {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.SocketEvent.Write\";\n\n    // Binary data written.\n    Body data = 1;\n\n    // Stream was half closed after this write.\n    bool end_stream = 2;\n  }\n\n  // The connection was closed.\n  message Closed {\n    // TODO(mattklein123): Close event type.\n\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.data.tap.v2alpha.SocketEvent.Closed\";\n  }\n\n  // Timestamp for event.\n  google.protobuf.Timestamp timestamp = 1;\n\n  // Read or write with content as bytes string.\n  oneof event_selector {\n    Read read = 2;\n\n    Write write = 3;\n\n    Closed closed = 4;\n  }\n}\n\n// Sequence of read/write events that constitute a buffered trace on a socket.\n// [#next-free-field: 6]\nmessage SocketBufferedTrace {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.SocketBufferedTrace\";\n\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  // Connection properties.\n  Connection connection = 2;\n\n  // Sequence of observed events.\n  repeated SocketEvent events = 3;\n\n  // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_rx_bytes>` setting.\n  bool read_truncated = 4;\n\n  // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes\n  // <envoy_api_field_config.tap.v3.OutputConfig.max_buffered_tx_bytes>` setting.\n  bool write_truncated = 5;\n}\n\n// A streamed socket trace segment. Multiple segments make up a full trace.\nmessage SocketStreamedTraceSegment {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.SocketStreamedTraceSegment\";\n\n  // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used\n  // for long term stable uniqueness. Matches connection IDs used in Envoy logs.\n  uint64 trace_id = 1;\n\n  oneof message_piece {\n    // Connection properties.\n    Connection connection = 2;\n\n    // Socket event.\n    SocketEvent event = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/data/tap/v3/wrapper.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.data.tap.v3;\n\nimport \"envoy/data/tap/v3/http.proto\";\nimport \"envoy/data/tap/v3/transport.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.data.tap.v3\";\noption java_outer_classname = \"WrapperProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap data wrappers]\n\n// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for\n// sending traces over gRPC APIs or more easily persisting binary messages to files.\nmessage TraceWrapper {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.data.tap.v2alpha.TraceWrapper\";\n\n  oneof trace {\n    option (validate.required) = true;\n\n    // An HTTP buffered tap trace.\n    HttpBufferedTrace http_buffered_trace = 1;\n\n    // An HTTP streamed tap trace segment.\n    HttpStreamedTraceSegment http_streamed_trace_segment = 2;\n\n    // A socket buffered tap trace.\n    SocketBufferedTrace socket_buffered_trace = 3;\n\n    // A socket streamed tap trace segment.\n    SocketStreamedTraceSegment socket_streamed_trace_segment = 4;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v2:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.file.v3;\n\nimport \"envoy/config/core/v3/substitution_format_string.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.file.v3\";\noption java_outer_classname = \"FileProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: File access log]\n// [#extension: envoy.access_loggers.file]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.accesslog.v3.AccessLog>`\n// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file*\n// AccessLog.\n// [#next-free-field: 6]\nmessage FileAccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.FileAccessLog\";\n\n  // A path to a local file to which to write the access log entries.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof access_log_format {\n    // Access log :ref:`format string<config_access_log_format_strings>`.\n    // Envoy supports :ref:`custom access log formats <config_access_log_format>` as well as a\n    // :ref:`default format <config_access_log_default_format>`.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    string format = 2 [deprecated = true];\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. All values\n    // are rendered as strings.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    google.protobuf.Struct json_format = 3 [deprecated = true];\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. Values are\n    // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may\n    // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the\n    // documentation for a specific command operator for details.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    google.protobuf.Struct typed_json_format = 4 [deprecated = true];\n\n    // Configuration to form access log data and format.\n    // If not specified, use :ref:`default format <config_access_log_default_format>`.\n    config.core.v3.SubstitutionFormatString log_format = 5\n        [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/access_loggers/file/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.file.v4alpha;\n\nimport \"envoy/config/core/v4alpha/substitution_format_string.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha\";\noption java_outer_classname = \"FileProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: File access log]\n// [#extension: envoy.access_loggers.file]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.accesslog.v4alpha.AccessLog>`\n// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file*\n// AccessLog.\n// [#next-free-field: 6]\nmessage FileAccessLog {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.access_loggers.file.v3.FileAccessLog\";\n\n  // A path to a local file to which to write the access log entries.\n  string path = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof access_log_format {\n    // Access log :ref:`format string<config_access_log_format_strings>`.\n    // Envoy supports :ref:`custom access log formats <config_access_log_format>` as well as a\n    // :ref:`default format <config_access_log_default_format>`.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    string hidden_envoy_deprecated_format = 2 [deprecated = true];\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. All values\n    // are rendered as strings.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    google.protobuf.Struct hidden_envoy_deprecated_json_format = 3 [deprecated = true];\n\n    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. Values are\n    // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may\n    // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the\n    // documentation for a specific command operator for details.\n    // This field is deprecated.\n    // Please use :ref:`log_format <envoy_v3_api_field_extensions.access_loggers.file.v3.FileAccessLog.log_format>`.\n    google.protobuf.Struct hidden_envoy_deprecated_typed_json_format = 4 [deprecated = true];\n\n    // Configuration to form access log data and format.\n    // If not specified, use :ref:`default format <config_access_log_default_format>`.\n    config.core.v4alpha.SubstitutionFormatString log_format = 5\n        [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v2:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.grpc.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.grpc.v3\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Configuration for the built-in *envoy.access_loggers.http_grpc*\n// :ref:`AccessLog <envoy_api_msg_config.accesslog.v3.AccessLog>`. This configuration will\n// populate :ref:`StreamAccessLogsMessage.http_logs\n// <envoy_api_field_service.accesslog.v3.StreamAccessLogsMessage.http_logs>`.\n// [#extension: envoy.access_loggers.http_grpc]\nmessage HttpGrpcAccessLogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.HttpGrpcAccessLogConfig\";\n\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n\n  // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers\n  // <envoy_api_field_data.accesslog.v3.HTTPRequestProperties.request_headers>`.\n  repeated string additional_request_headers_to_log = 2;\n\n  // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers\n  // <envoy_api_field_data.accesslog.v3.HTTPResponseProperties.response_headers>`.\n  repeated string additional_response_headers_to_log = 3;\n\n  // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers\n  // <envoy_api_field_data.accesslog.v3.HTTPResponseProperties.response_trailers>`.\n  repeated string additional_response_trailers_to_log = 4;\n}\n\n// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will\n// populate *StreamAccessLogsMessage.tcp_logs*.\n// [#extension: envoy.access_loggers.tcp_grpc]\nmessage TcpGrpcAccessLogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.TcpGrpcAccessLogConfig\";\n\n  CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];\n}\n\n// Common configuration for gRPC access logs.\n// [#next-free-field: 7]\nmessage CommonGrpcAccessLogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.accesslog.v2.CommonGrpcAccessLogConfig\";\n\n  // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier\n  // <envoy_api_msg_service.accesslog.v3.StreamAccessLogsMessage.Identifier>`. This allows the\n  // access log server to differentiate between different access logs coming from the same Envoy.\n  string log_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The gRPC service for the access log service.\n  config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n\n  // API version for access logs service transport protocol. This describes the access logs service\n  // gRPC endpoint and version of messages used on the wire.\n  config.core.v3.ApiVersion transport_api_version = 6\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time\n  // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to\n  // 1 second.\n  google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}];\n\n  // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until\n  // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it\n  // to zero effectively disables the batching. Defaults to 16384.\n  google.protobuf.UInt32Value buffer_size_bytes = 4;\n\n  // Additional filter state objects to log in :ref:`filter_state_objects\n  // <envoy_api_field_data.accesslog.v3.AccessLogCommon.filter_state_objects>`.\n  // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object.\n  repeated string filter_state_objects_to_log = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.access_loggers.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.access_loggers.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm access log]\n// [#extension: envoy.access_loggers.wasm]\n\n// Custom configuration for an :ref:`AccessLog <envoy_api_msg_config.accesslog.v3.AccessLog>`\n// that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm*\n// AccessLog.\nmessage WasmAccessLog {\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/aggregate/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.clusters.aggregate.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.clusters.aggregate.v3\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Aggregate cluster configuration]\n\n// Configuration for the aggregate cluster. See the :ref:`architecture overview\n// <arch_overview_aggregate_cluster>` for more information.\n// [#extension: envoy.clusters.aggregate]\nmessage ClusterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.aggregate.v2alpha.ClusterConfig\";\n\n  // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they\n  // appear in this list.\n  repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.clusters.dynamic_forward_proxy.v3;\n\nimport \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v3\";\noption java_outer_classname = \"ClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamic forward proxy cluster configuration]\n\n// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.clusters.dynamic_forward_proxy]\nmessage ClusterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig\";\n\n  // The DNS cache configuration that the cluster will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy HTTP filter configuration\n  // <envoy_api_field_extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options\n  // in the :ref:`cluster's upstream_http_protocol_options\n  // <envoy_api_field_config.cluster.v3.Cluster.upstream_http_protocol_options>`\n  bool allow_insecure_cluster_options = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/redis:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.clusters.redis.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.clusters.redis.v3\";\noption java_outer_classname = \"RedisClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Redis Cluster Configuration]\n// This cluster adds support for `Redis Cluster <https://redis.io/topics/cluster-spec>`_, as part\n// of :ref:`Envoy's support for Redis Cluster <arch_overview_redis>`.\n//\n// Redis Cluster is an extension of Redis which supports sharding and high availability (where a\n// shard that loses its primary fails over to a replica, and designates it as the new primary).\n// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client\n// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the\n// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS\n// command <https://redis.io/commands/cluster-slots>`_. This result is then stored locally, and\n// updated at user-configured intervals.\n//\n// Additionally, if\n// :ref:`enable_redirection<envoy_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.ConnPoolSettings.enable_redirection>`\n// is true, then moved and ask redirection errors from upstream servers will trigger a topology\n// refresh when they exceed a user-configured error threshold.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     name: name\n//     connect_timeout: 0.25s\n//     dns_lookup_family: V4_ONLY\n//     hosts:\n//     - socket_address:\n//       address: foo.bar.com\n//       port_value: 22120\n//     cluster_type:\n//     name: envoy.clusters.redis\n//     typed_config:\n//       \"@type\": type.googleapis.com/google.protobuf.Struct\n//       value:\n//         cluster_refresh_rate: 30s\n//         cluster_refresh_timeout: 0.5s\n//         redirect_refresh_interval: 10s\n//         redirect_refresh_threshold: 10\n// [#extension: envoy.clusters.redis]\n\n// [#next-free-field: 7]\nmessage RedisClusterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.cluster.redis.RedisClusterConfig\";\n\n  // Interval between successive topology refresh requests. If not set, this defaults to 5s.\n  google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}];\n\n  // Timeout for topology refresh request. If not set, this defaults to 3s.\n  google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}];\n\n  // The minimum interval that must pass after triggering a topology refresh request before a new\n  // request can possibly be triggered again. Any errors received during one of these\n  // time intervals are ignored. If not set, this defaults to 5s.\n  google.protobuf.Duration redirect_refresh_interval = 3;\n\n  // The number of redirection errors that must be received before\n  // triggering a topology refresh request. If not set, this defaults to 5.\n  // If this is set to 0, topology refresh after redirect is disabled.\n  google.protobuf.UInt32Value redirect_refresh_threshold = 4;\n\n  // The number of failures that must be received before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to failure.\n  uint32 failure_refresh_threshold = 5;\n\n  // The number of hosts became degraded or unhealthy before triggering a topology refresh request.\n  // If not set, this defaults to 0, which disables the topology refresh due to degraded or\n  // unhealthy host.\n  uint32 host_degraded_refresh_threshold = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.dynamic_forward_proxy.v3;\n\nimport \"envoy/config/cluster/v3/cluster.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v3\";\noption java_outer_classname = \"DnsCacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamic forward proxy common configuration]\n\n// Configuration of circuit breakers for resolver.\nmessage DnsCacheCircuitBreakers {\n  // The maximum number of pending requests that Envoy will allow to the\n  // resolver. If not specified, the default is 1024.\n  google.protobuf.UInt32Value max_pending_requests = 1;\n}\n\n// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#next-free-field: 9]\nmessage DnsCacheConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig\";\n\n  // The name of the cache. Multiple named caches allow independent dynamic forward proxy\n  // configurations to operate within a single Envoy process using different configurations. All\n  // configurations with the same name *must* otherwise have the same settings when referenced\n  // from different configuration components. Configuration will fail to load if this is not\n  // the case.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The DNS lookup family to use during resolution.\n  //\n  // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 \"happy eyeballs\" mode. The\n  // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and\n  // then configures a host to have a primary and fall back address. With this, we could very\n  // likely build a \"happy eyeballs\" connection pool which would race the primary / fall back\n  // address and return the one that wins. This same method could potentially also be used for\n  // QUIC to TCP fall back.]\n  config.cluster.v3.Cluster.DnsLookupFamily dns_lookup_family = 2\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s.\n  //\n  // .. note:\n  //\n  //  The returned DNS TTL is not currently used to alter the refresh rate. This feature will be\n  //  added in a future change.\n  //\n  // .. note:\n  //\n  // The refresh rate is rounded to the closest millisecond, and must be at least 1ms.\n  google.protobuf.Duration dns_refresh_rate = 3\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n\n  // The TTL for hosts that are unused. Hosts that have not been used in the configured time\n  // interval will be purged. If not specified defaults to 5m.\n  //\n  // .. note:\n  //\n  //   The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This\n  //   means that if the configured TTL is shorter than the refresh rate the host may not be removed\n  //   immediately.\n  //\n  //  .. note:\n  //\n  //   The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage.\n  google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}];\n\n  // The maximum number of hosts that the cache will hold. If not specified defaults to 1024.\n  //\n  // .. note:\n  //\n  //   The implementation is approximate and enforced independently on each worker thread, thus\n  //   it is possible for the maximum hosts in the cache to go slightly above the configured\n  //   value depending on timing. This is similar to how other circuit breakers work.\n  google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}];\n\n  // If the DNS failure refresh rate is specified,\n  // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is\n  // not specified, the failure refresh rate defaults to the dns_refresh_rate.\n  config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6;\n\n  // The config of circuit breakers for resolver. It provides a configurable threshold.\n  // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled,\n  // envoy will use dns cache circuit breakers with default settings even if this value is not set.\n  DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7;\n\n  // [#next-major-version: Reconcile DNS options in a single message.]\n  // Always use TCP queries instead of UDP queries for DNS lookups.\n  // Setting this value causes failure if the\n  // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during\n  // server startup. Apple' API only uses UDP for DNS resolution.\n  bool use_tcp_for_dns_lookups = 8;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.ratelimit.v3;\n\nimport \"envoy/type/v3/ratelimit_unit.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.ratelimit.v3\";\noption java_outer_classname = \"RatelimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common rate limit components]\n\n// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to\n// determine the final rate limit key and overall allowed limit. Here are some examples of how\n// they might be used for the domain \"envoy\".\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The\n// configuration supplies a default limit for the *remote_address* key. If there is a desire to\n// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the\n// configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if\n// configured that way in the service).\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"false\"], [\"path\": \"/foo/bar\"], [\"remote_address\": \"10.0.0.1\"]\n//\n// What it does: Limits unauthenticated traffic to a specific path for a specific IP address.\n// Like (1) we can raise/block specific IP addresses if we want with an override configuration.\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"]\n//\n// What it does: Limits all traffic for an authenticated client \"foo\"\n//\n// .. code-block:: cpp\n//\n//   [\"authenticated\": \"true\"], [\"client_id\": \"foo\"], [\"path\": \"/foo/bar\"]\n//\n// What it does: Limits traffic to a specific path for an authenticated client \"foo\"\n//\n// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired.\n// This enables building complex application scenarios with a generic backend.\n//\n// Optionally the descriptor can contain a limit override under a \"limit\" key, that specifies\n// the number of requests per unit to use instead of the number configured in the\n// rate limiting service.\nmessage RateLimitDescriptor {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.ratelimit.RateLimitDescriptor\";\n\n  message Entry {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.ratelimit.RateLimitDescriptor.Entry\";\n\n    // Descriptor key.\n    string key = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Descriptor value.\n    string value = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Override rate limit to apply to this descriptor instead of the limit\n  // configured in the rate limit service. See :ref:`rate limit override\n  // <config_http_filters_rate_limit_rate_limit_override>` for more information.\n  message RateLimitOverride {\n    // The number of requests per unit of time.\n    uint32 requests_per_unit = 1;\n\n    // The unit of time.\n    type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  // Descriptor entries.\n  repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}];\n\n  // Optional rate limit override to supply to the ratelimit service.\n  RateLimitOverride limit = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/common/tap/v2alpha:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/tap/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.tap.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/tap/v3/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.tap.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common tap extension configuration]\n\n// Common configuration for all tap extensions.\nmessage CommonExtensionConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.tap.v2alpha.CommonExtensionConfig\";\n\n  // [#not-implemented-hide:]\n  message TapDSConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.common.tap.v2alpha.CommonExtensionConfig.TapDSConfig\";\n\n    // Configuration for the source of TapDS updates for this Cluster.\n    config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n    // Tap config to request from XDS server.\n    string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n    // Resource locator for TAP. This is mutually exclusive to *name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator tap_resource_locator = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n  }\n\n  oneof config_type {\n    option (validate.required) = true;\n\n    // If specified, the tap filter will be configured via an admin handler.\n    AdminConfig admin_config = 1;\n\n    // If specified, the tap filter will be configured via a static configuration that cannot be\n    // changed.\n    config.tap.v3.TapConfig static_config = 2;\n\n    // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter.\n    TapDSConfig tapds_config = 3;\n  }\n}\n\n// Configuration for the admin handler. See :ref:`here <config_http_filters_tap_admin_handler>` for\n// more information.\nmessage AdminConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.common.tap.v2alpha.AdminConfig\";\n\n  // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is\n  // matched to the configured filter opaque ID to determine which filter to configure.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/tap/v4alpha:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.common.tap.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/tap/v4alpha/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.common.tap.v4alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common tap extension configuration]\n\n// Common configuration for all tap extensions.\nmessage CommonExtensionConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.common.tap.v3.CommonExtensionConfig\";\n\n  // [#not-implemented-hide:]\n  message TapDSConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.common.tap.v3.CommonExtensionConfig.TapDSConfig\";\n\n    // Configuration for the source of TapDS updates for this Cluster.\n    config.core.v4alpha.ConfigSource config_source = 1\n        [(validate.rules).message = {required: true}];\n\n    oneof name_specifier {\n      // Tap config to request from XDS server.\n      string name = 2;\n\n      // Resource locator for TAP. This is mutually exclusive to *name*.\n      // [#not-implemented-hide:]\n      udpa.core.v1.ResourceLocator tap_resource_locator = 3;\n    }\n  }\n\n  oneof config_type {\n    option (validate.required) = true;\n\n    // If specified, the tap filter will be configured via an admin handler.\n    AdminConfig admin_config = 1;\n\n    // If specified, the tap filter will be configured via a static configuration that cannot be\n    // changed.\n    config.tap.v4alpha.TapConfig static_config = 2;\n\n    // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter.\n    TapDSConfig tapds_config = 3;\n  }\n}\n\n// Configuration for the admin handler. See :ref:`here <config_http_filters_tap_admin_handler>` for\n// more information.\nmessage AdminConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.common.tap.v3.AdminConfig\";\n\n  // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is\n  // matched to the configured filter opaque ID to determine which filter to configure.\n  string config_id = 1 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.compression.gzip.compressor.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Gzip Compressor]\n// [#extension: envoy.compression.gzip.compressor]\n\n// [#next-free-field: 6]\nmessage Gzip {\n  // All the values of this enumeration translate directly to zlib's compression strategies.\n  // For more information about each strategy, please refer to zlib manual.\n  enum CompressionStrategy {\n    DEFAULT_STRATEGY = 0;\n    FILTERED = 1;\n    HUFFMAN_ONLY = 2;\n    RLE = 3;\n    FIXED = 4;\n  }\n\n  enum CompressionLevel {\n    option allow_alias = true;\n\n    DEFAULT_COMPRESSION = 0;\n    BEST_SPEED = 1;\n    COMPRESSION_LEVEL_1 = 1;\n    COMPRESSION_LEVEL_2 = 2;\n    COMPRESSION_LEVEL_3 = 3;\n    COMPRESSION_LEVEL_4 = 4;\n    COMPRESSION_LEVEL_5 = 5;\n    COMPRESSION_LEVEL_6 = 6;\n    COMPRESSION_LEVEL_7 = 7;\n    COMPRESSION_LEVEL_8 = 8;\n    COMPRESSION_LEVEL_9 = 9;\n    BEST_COMPRESSION = 9;\n  }\n\n  // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values\n  // use more memory, but are faster and produce better compression results. The default value is 5.\n  google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}];\n\n  // A value used for selecting the zlib compression level. This setting will affect speed and\n  // amount of compression applied to the content. \"BEST_COMPRESSION\" provides higher compression\n  // at the cost of higher latency and is equal to \"COMPRESSION_LEVEL_9\". \"BEST_SPEED\" provides\n  // lower compression with minimum impact on response time, the same as \"COMPRESSION_LEVEL_1\".\n  // \"DEFAULT_COMPRESSION\" provides an optimal result between speed and compression. According\n  // to zlib's manual this level gives the same result as \"COMPRESSION_LEVEL_6\".\n  // This field will be set to \"DEFAULT_COMPRESSION\" if not specified.\n  CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // A value used for selecting the zlib compression strategy which is directly related to the\n  // characteristics of the content. Most of the time \"DEFAULT_STRATEGY\" will be the best choice,\n  // which is also the default value for the parameter, though there are situations when\n  // changing this parameter might produce better results. For example, run-length encoding (RLE)\n  // is typically used when the content is known for having sequences which same data occurs many\n  // consecutive times. For more information about each strategy, please refer to zlib manual.\n  CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size.\n  // Larger window results in better compression at the expense of memory usage. The default is 12\n  // which will produce a 4096 bytes window. For more details about this parameter, please refer to\n  // zlib manual > deflateInit2.\n  google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Value for Zlib's next output buffer. If not set, defaults to 4096.\n  // See https://www.zlib.net/manual.html for more details. Also see\n  // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance.\n  google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.compression.gzip.decompressor.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Gzip Decompressor]\n// [#extension: envoy.compression.gzip.decompressor]\n\nmessage Gzip {\n  // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size.\n  // The decompression window size needs to be equal or larger than the compression window size.\n  // The default is 12 to match the default in the\n  // :ref:`gzip compressor <envoy_api_field_extensions.compression.gzip.compressor.v3.Gzip.window_bits>`.\n  // For more details about this parameter, please refer to `zlib manual <https://www.zlib.net/manual.html>`_ > inflateInit2.\n  google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Value for zlib's decompressor output buffer. If not set, defaults to 4096.\n  // See https://www.zlib.net/manual.html for more details.\n  google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/filter/fault/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.common.fault.v3;\n\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.common.fault.v3\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common fault injection types]\n\n// Delay specification is used to inject latency into the\n// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections.\n// [#next-free-field: 6]\nmessage FaultDelay {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.fault.v2.FaultDelay\";\n\n  enum FaultDelayType {\n    // Unused and deprecated.\n    FIXED = 0;\n  }\n\n  // Fault delays are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderDelay {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.fault.v2.FaultDelay.HeaderDelay\";\n  }\n\n  reserved 2;\n\n  oneof fault_delay_secifier {\n    option (validate.required) = true;\n\n    // Add a fixed delay before forwarding the operation upstream. See\n    // https://developers.google.com/protocol-buffers/docs/proto3#json for\n    // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified\n    // delay will be injected before a new request/operation. For TCP\n    // connections, the proxying of the connection upstream will be delayed\n    // for the specified period. This is required if type is FIXED.\n    google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}];\n\n    // Fault delays are controlled via an HTTP header (if applicable).\n    HeaderDelay header_delay = 5;\n  }\n\n  // The percentage of operations/connections/requests on which the delay will be injected.\n  type.v3.FractionalPercent percentage = 4;\n\n  FaultDelayType hidden_envoy_deprecated_type = 1\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// Describes a rate limit to be applied.\nmessage FaultRateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.fault.v2.FaultRateLimit\";\n\n  // Describes a fixed/constant rate limit.\n  message FixedLimit {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.fault.v2.FaultRateLimit.FixedLimit\";\n\n    // The limit supplied in KiB/s.\n    uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // Rate limits are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderLimit {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit\";\n  }\n\n  oneof limit_type {\n    option (validate.required) = true;\n\n    // A fixed rate limit.\n    FixedLimit fixed_limit = 1;\n\n    // Rate limits are controlled via an HTTP header (if applicable).\n    HeaderLimit header_limit = 3;\n  }\n\n  // The percentage of operations/connections/requests on which the rate limit will be injected.\n  type.v3.FractionalPercent percentage = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.adaptive_concurrency.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.adaptive_concurrency.v3\";\noption java_outer_classname = \"AdaptiveConcurrencyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Adaptive Concurrency]\n// Adaptive Concurrency Control :ref:`configuration overview\n// <config_http_filters_adaptive_concurrency>`.\n// [#extension: envoy.filters.http.adaptive_concurrency]\n\n// Configuration parameters for the gradient controller.\nmessage GradientControllerConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig\";\n\n  // Parameters controlling the periodic recalculation of the concurrency limit from sampled request\n  // latencies.\n  message ConcurrencyLimitCalculationParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig.\"\n        \"ConcurrencyLimitCalculationParams\";\n\n    // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000.\n    google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // The period of time samples are taken to recalculate the concurrency limit.\n    google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = {\n      required: true\n      gt {}\n    }];\n  }\n\n  // Parameters controlling the periodic minRTT recalculation.\n  // [#next-free-field: 6]\n  message MinimumRTTCalculationParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig.\"\n        \"MinimumRTTCalculationParams\";\n\n    // The time interval between recalculating the minimum request round-trip time. Has to be\n    // positive.\n    google.protobuf.Duration interval = 1 [(validate.rules).duration = {\n      required: true\n      gte {nanos: 1000000}\n    }];\n\n    // The number of requests to aggregate/sample during the minRTT recalculation window before\n    // updating. Defaults to 50.\n    google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}];\n\n    // Randomized time delta that will be introduced to the start of the minRTT calculation window.\n    // This is represented as a percentage of the interval duration. Defaults to 15%.\n    //\n    // Example: If the interval is 10s and the jitter is 15%, the next window will begin\n    // somewhere in the range (10s - 11.5s).\n    type.v3.Percent jitter = 3;\n\n    // The concurrency limit set while measuring the minRTT. Defaults to 3.\n    google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}];\n\n    // Amount added to the measured minRTT to add stability to the concurrency limit during natural\n    // variability in latency. This is expressed as a percentage of the measured value and can be\n    // adjusted to allow more or less tolerance to the sampled latency values.\n    //\n    // Defaults to 25%.\n    type.v3.Percent buffer = 5;\n  }\n\n  // The percentile to use when summarizing aggregated samples. Defaults to p50.\n  type.v3.Percent sample_aggregate_percentile = 1;\n\n  ConcurrencyLimitCalculationParams concurrency_limit_params = 2\n      [(validate.rules).message = {required: true}];\n\n  MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}];\n}\n\nmessage AdaptiveConcurrency {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency\";\n\n  oneof concurrency_controller_config {\n    option (validate.required) = true;\n\n    // Gradient concurrency control will be used.\n    GradientControllerConfig gradient_controller_config = 1\n        [(validate.rules).message = {required: true}];\n  }\n\n  // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the\n  // message is unspecified, the filter will be enabled.\n  config.core.v3.RuntimeFeatureFlag enabled = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.admission_control.v3alpha;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha\";\noption java_outer_classname = \"AdmissionControlProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Admission Control]\n// [#extension: envoy.filters.http.admission_control]\n\n// [#next-free-field: 6]\nmessage AdmissionControl {\n  // Default method of specifying what constitutes a successful request. All status codes that\n  // indicate a successful request must be explicitly specified if not relying on the default\n  // values.\n  message SuccessCriteria {\n    message HttpCriteria {\n      // Status code ranges that constitute a successful request. Configurable codes are in the\n      // range [100, 600).\n      repeated type.v3.Int32Range http_success_status = 1\n          [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    message GrpcCriteria {\n      // Status codes that constitute a successful request.\n      // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.\n      repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}];\n    }\n\n    // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful\n    // responses.\n    //\n    // .. note::\n    //\n    //    The default HTTP codes considered successful by the admission controller are done so due\n    //    to the unlikelihood that sending fewer requests would change their behavior (for example:\n    //    redirects, unauthorized access, or bad requests won't be alleviated by sending less\n    //    traffic).\n    HttpCriteria http_criteria = 1;\n\n    // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok,\n    // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated,\n    // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented.\n    //\n    // .. note::\n    //\n    //    The default gRPC codes that are considered successful by the admission controller are\n    //    chosen because of the unlikelihood that sending fewer requests will change the behavior.\n    GrpcCriteria grpc_criteria = 2;\n  }\n\n  // If set to false, the admission control filter will operate as a pass-through filter. If the\n  // message is unspecified, the filter will be enabled.\n  config.core.v3.RuntimeFeatureFlag enabled = 1;\n\n  // Defines how a request is considered a success/failure.\n  oneof evaluation_criteria {\n    option (validate.required) = true;\n\n    SuccessCriteria success_criteria = 2;\n  }\n\n  // The sliding time window over which the success rate is calculated. The window is rounded to the\n  // nearest second. Defaults to 30s.\n  google.protobuf.Duration sampling_window = 3;\n\n  // Rejection probability is defined by the formula::\n  //\n  //     max(0, (rq_count -  rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression)\n  //\n  // The aggression dictates how heavily the admission controller will throttle requests upon SR\n  // dropping at or below the threshold. A value of 1 will result in a linear increase in\n  // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the\n  // message is unspecified, the aggression is 1.0. See `the admission control documentation\n  // <https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/admission_control_filter.html>`_\n  // for a diagram illustrating this.\n  config.core.v3.RuntimeDouble aggression = 4;\n\n  // Dictates the success rate at which the rejection probability is non-zero. As success rate drops\n  // below this threshold, rejection probability will increase. Any success rate above the threshold\n  // results in a rejection probability of 0. Defaults to 95%.\n  config.core.v3.RuntimePercent sr_threshold = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/aws_lambda/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.aws_lambda.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.aws_lambda.v3\";\noption java_outer_classname = \"AwsLambdaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: AWS Lambda]\n// AWS Lambda :ref:`configuration overview <config_http_filters_aws_lambda>`.\n// [#extension: envoy.filters.http.aws_lambda]\n\n// AWS Lambda filter config\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.aws_lambda.v2alpha.Config\";\n\n  enum InvocationMode {\n    // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In\n    // this mode the output of the Lambda function becomes the response of the HTTP request.\n    SYNCHRONOUS = 0;\n\n    // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be\n    // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the\n    // call which is translated to an HTTP 200 OK by the filter.\n    ASYNCHRONOUS = 1;\n  }\n\n  // The ARN of the AWS Lambda to invoke when the filter is engaged\n  // Must be in the following format:\n  // arn:<partition>:lambda:<region>:<account-number>:function:<function-name>\n  string arn = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Whether to transform the request (headers and body) to a JSON payload or pass it as is.\n  bool payload_passthrough = 2;\n\n  // Determines the way to invoke the Lambda function.\n  InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}];\n}\n\n// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different\n// version of the same Lambda depending on the route.\nmessage PerRouteConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.aws_lambda.v2alpha.PerRouteConfig\";\n\n  Config invoke_config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/aws_request_signing/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.aws_request_signing.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.aws_request_signing.v3\";\noption java_outer_classname = \"AwsRequestSigningProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: AwsRequestSigning]\n// AwsRequestSigning :ref:`configuration overview <config_http_filters_aws_request_signing>`.\n// [#extension: envoy.filters.http.aws_request_signing]\n\n// Top level configuration for the AWS request signing filter.\nmessage AwsRequestSigning {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning\";\n\n  // The `service namespace\n  // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_\n  // of the HTTP endpoint.\n  //\n  // Example: s3\n  string service_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the HTTP\n  // endpoint.\n  //\n  // Example: us-west-2\n  string region = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Indicates that before signing headers, the host header will be swapped with\n  // this value. If not set or empty, the original host header value\n  // will be used and no rewrite will happen.\n  //\n  // Note: this rewrite affects both signing and host header forwarding. However, this\n  // option shouldn't be used with\n  // :ref:`HCM host rewrite <envoy_api_field_config.route.v3.RouteAction.host_rewrite_literal>` given that the\n  // value set here would be used for signing whereas the value set in the HCM would be used\n  // for host header forwarding which is not the desired outcome.\n  string host_rewrite = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/buffer/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.buffer.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.buffer.v3\";\noption java_outer_classname = \"BufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Buffer]\n// Buffer :ref:`configuration overview <config_http_filters_buffer>`.\n// [#extension: envoy.filters.http.buffer]\n\nmessage Buffer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.buffer.v2.Buffer\";\n\n  reserved 2;\n\n  // The maximum request size that the filter will buffer before the connection\n  // manager will stop buffering and return a 413 response.\n  google.protobuf.UInt32Value max_request_bytes = 1\n      [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}];\n}\n\nmessage BufferPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.buffer.v2.BufferPerRoute\";\n\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the buffer filter for this particular vhost or route.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Override the global configuration of the filter with this new config.\n    Buffer buffer = 2 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/cache/v2alpha:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cache.v3alpha;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha\";\noption java_outer_classname = \"CacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP Cache Filter]\n// [#extension: envoy.filters.http.cache]\n\nmessage CacheConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.cache.v2alpha.CacheConfig\";\n\n  // [#not-implemented-hide:]\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  message KeyCreatorParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.cache.v2alpha.CacheConfig.KeyCreatorParams\";\n\n    // If true, exclude the URL scheme from the cache key. Set to true if your origins always\n    // produce the same response for http and https requests.\n    bool exclude_scheme = 1;\n\n    // If true, exclude the host from the cache key. Set to true if your origins' responses don't\n    // ever depend on host.\n    bool exclude_host = 2;\n\n    // If *query_parameters_included* is nonempty, only query parameters matched\n    // by one or more of its matchers are included in the cache key. Any other\n    // query params will not affect cache lookup.\n    repeated config.route.v3.QueryParameterMatcher query_parameters_included = 3;\n\n    // If *query_parameters_excluded* is nonempty, query parameters matched by one\n    // or more of its matchers are excluded from the cache key (even if also\n    // matched by *query_parameters_included*), and will not affect cache lookup.\n    repeated config.route.v3.QueryParameterMatcher query_parameters_excluded = 4;\n  }\n\n  // Config specific to the cache storage implementation.\n  google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];\n\n  // List of matching rules that defines allowed *Vary* headers.\n  //\n  // The *vary* response header holds a list of header names that affect the\n  // contents of a response, as described by\n  // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.\n  //\n  // During insertion, *allowed_vary_headers* acts as a allowlist: if a\n  // response's *vary* header mentions any header names that aren't matched by any rules in\n  // *allowed_vary_headers*, that response will not be cached.\n  //\n  // During lookup, *allowed_vary_headers* controls what request headers will be\n  // sent to the cache storage implementation.\n  repeated type.matcher.v3.StringMatcher allowed_vary_headers = 2;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement key customization>\n  //\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  KeyCreatorParams key_creator_params = 3;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement size limit>\n  //\n  // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache\n  // storage implementation may have its own limit beyond which it will reject insertions).\n  uint32 max_body_bytes = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/cache/v3alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cache.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha\";\noption java_outer_classname = \"CacheProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP Cache Filter]\n// [#extension: envoy.filters.http.cache]\n\nmessage CacheConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.cache.v3alpha.CacheConfig\";\n\n  // [#not-implemented-hide:]\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  message KeyCreatorParams {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams\";\n\n    // If true, exclude the URL scheme from the cache key. Set to true if your origins always\n    // produce the same response for http and https requests.\n    bool exclude_scheme = 1;\n\n    // If true, exclude the host from the cache key. Set to true if your origins' responses don't\n    // ever depend on host.\n    bool exclude_host = 2;\n\n    // If *query_parameters_included* is nonempty, only query parameters matched\n    // by one or more of its matchers are included in the cache key. Any other\n    // query params will not affect cache lookup.\n    repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3;\n\n    // If *query_parameters_excluded* is nonempty, query parameters matched by one\n    // or more of its matchers are excluded from the cache key (even if also\n    // matched by *query_parameters_included*), and will not affect cache lookup.\n    repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4;\n  }\n\n  // Config specific to the cache storage implementation.\n  google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];\n\n  // List of matching rules that defines allowed *Vary* headers.\n  //\n  // The *vary* response header holds a list of header names that affect the\n  // contents of a response, as described by\n  // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.\n  //\n  // During insertion, *allowed_vary_headers* acts as a allowlist: if a\n  // response's *vary* header mentions any header names that aren't matched by any rules in\n  // *allowed_vary_headers*, that response will not be cached.\n  //\n  // During lookup, *allowed_vary_headers* controls what request headers will be\n  // sent to the cache storage implementation.\n  repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement key customization>\n  //\n  // Modifies cache key creation by restricting which parts of the URL are included.\n  KeyCreatorParams key_creator_params = 3;\n\n  // [#not-implemented-hide:]\n  // <TODO(toddmgreer) implement size limit>\n  //\n  // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache\n  // storage implementation may have its own limit beyond which it will reject insertions).\n  uint32 max_body_bytes = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cdn_loop.v3alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha\";\noption java_outer_classname = \"CdnLoopProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP CDN-Loop Filter]\n// [#extension: envoy.filters.http.cdn_loop]\n\n// CDN-Loop Header filter config. See the :ref:`configuration overview\n// <config_http_filters_cdn_loop>` for more information.\nmessage CdnLoopConfig {\n  // The CDN identifier to use for loop checks and to append to the\n  // CDN-Loop header.\n  //\n  // RFC 8586 calls this the cdn-id. The cdn-id can either be a\n  // pseudonym or hostname the CDN is in control of.\n  //\n  // cdn_id must not be empty.\n  string cdn_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The maximum allowed count of cdn_id in the downstream CDN-Loop\n  // request header.\n  //\n  // The default of 0 means a request can transit the CdnLoopFilter\n  // once. A value of 1 means that a request can transit the\n  // CdnLoopFilter twice and so on.\n  uint32 max_allowed_occurrences = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/compressor/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.compressor.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.compressor.v3\";\noption java_outer_classname = \"CompressorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Compressor]\n// Compressor :ref:`configuration overview <config_http_filters_compressor>`.\n// [#extension: envoy.filters.http.compressor]\n\n// [#next-free-field: 7]\nmessage Compressor {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.compressor.v2.Compressor\";\n\n  // Minimum response length, in bytes, which will trigger compression. The default value is 30.\n  google.protobuf.UInt32Value content_length = 1;\n\n  // Set of strings that allows specifying which mime-types yield compression; e.g.,\n  // application/json, text/html, etc. When this field is not defined, compression will be applied\n  // to the following mime-types: \"application/javascript\", \"application/json\",\n  // \"application/xhtml+xml\", \"image/svg+xml\", \"text/css\", \"text/html\", \"text/plain\", \"text/xml\"\n  // and their synonyms.\n  repeated string content_type = 2;\n\n  // If true, disables compression when the response contains an etag header. When it is false, the\n  // filter will preserve weak etags and remove the ones that require strong validation.\n  bool disable_on_etag_header = 3;\n\n  // If true, removes accept-encoding from the request headers before dispatching it to the upstream\n  // so that responses do not get compressed before reaching the filter.\n  // .. attention:\n  //\n  //    To avoid interfering with other compression filters in the same chain use this option in\n  //    the filter closest to the upstream.\n  bool remove_accept_encoding_header = 4;\n\n  // Runtime flag that controls whether the filter is enabled or not. If set to false, the\n  // filter will operate as a pass-through filter. If not specified, defaults to enabled.\n  config.core.v3.RuntimeFeatureFlag runtime_enabled = 5;\n\n  // A compressor library to use for compression. Currently only\n  // :ref:`envoy.compression.gzip.compressor<envoy_api_msg_extensions.compression.gzip.compressor.v3.Gzip>`\n  // is included in Envoy.\n  // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise.\n  config.core.v3.TypedExtensionConfig compressor_library = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/cors/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.cors.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.cors.v3\";\noption java_outer_classname = \"CorsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Cors]\n// CORS Filter :ref:`configuration overview <config_http_filters_cors>`.\n// [#extension: envoy.filters.http.cors]\n\n// Cors filter config.\nmessage Cors {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.cors.v2.Cors\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/csrf/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.csrf.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.csrf.v3\";\noption java_outer_classname = \"CsrfProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: CSRF]\n// Cross-Site Request Forgery :ref:`configuration overview <config_http_filters_csrf>`.\n// [#extension: envoy.filters.http.csrf]\n\n// CSRF filter config.\nmessage CsrfPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.csrf.v2.CsrfPolicy\";\n\n  // Specifies the % of requests for which the CSRF filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  config.core.v3.RuntimeFractionalPercent filter_enabled = 1\n      [(validate.rules).message = {required: true}];\n\n  // Specifies that CSRF policies will be evaluated and tracked, but not enforced.\n  //\n  // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* and *Destination* to determine if it's valid, but will not\n  // enforce any policies.\n  config.core.v3.RuntimeFractionalPercent shadow_enabled = 2;\n\n  // Specifies additional source origins that will be allowed in addition to\n  // the destination origin.\n  //\n  // More information on how this can be configured via runtime can be found\n  // :ref:`here <csrf-configuration>`.\n  repeated type.matcher.v3.StringMatcher additional_origins = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/csrf/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.csrf.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha\";\noption java_outer_classname = \"CsrfProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: CSRF]\n// Cross-Site Request Forgery :ref:`configuration overview <config_http_filters_csrf>`.\n// [#extension: envoy.filters.http.csrf]\n\n// CSRF filter config.\nmessage CsrfPolicy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.csrf.v3.CsrfPolicy\";\n\n  // Specifies the % of requests for which the CSRF filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.v3.FractionalPercent.DenominatorType>`.\n  config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1\n      [(validate.rules).message = {required: true}];\n\n  // Specifies that CSRF policies will be evaluated and tracked, but not enforced.\n  //\n  // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate\n  // and track the request's *Origin* and *Destination* to determine if it's valid, but will not\n  // enforce any policies.\n  config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2;\n\n  // Specifies additional source origins that will be allowed in addition to\n  // the destination origin.\n  //\n  // More information on how this can be configured via runtime can be found\n  // :ref:`here <csrf-configuration>`.\n  repeated type.matcher.v4alpha.StringMatcher additional_origins = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.decompressor.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.decompressor.v3\";\noption java_outer_classname = \"DecompressorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Decompressor]\n// [#extension: envoy.filters.http.decompressor]\n\nmessage Decompressor {\n  // Common configuration for filter behavior on both the request and response direction.\n  message CommonDirectionConfig {\n    // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the\n    // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled.\n    config.core.v3.RuntimeFeatureFlag enabled = 1;\n  }\n\n  // Configuration for filter behavior on the request direction.\n  message RequestDirectionConfig {\n    CommonDirectionConfig common_config = 1;\n\n    // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding\n    // request header by appending the decompressor_library's encoding. Defaults to true.\n    google.protobuf.BoolValue advertise_accept_encoding = 2;\n  }\n\n  // Configuration for filter behavior on the response direction.\n  message ResponseDirectionConfig {\n    CommonDirectionConfig common_config = 1;\n  }\n\n  // A decompressor library to use for both request and response decompression. Currently only\n  // :ref:`envoy.compression.gzip.compressor<envoy_api_msg_extensions.compression.gzip.decompressor.v3.Gzip>`\n  // is included in Envoy.\n  config.core.v3.TypedExtensionConfig decompressor_library = 1\n      [(validate.rules).message = {required: true}];\n\n  // Configuration for request decompression. Decompression is enabled by default if left empty.\n  RequestDirectionConfig request_direction_config = 2;\n\n  // Configuration for response decompression. Decompression is enabled by default if left empty.\n  ResponseDirectionConfig response_direction_config = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg\",\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.dynamic_forward_proxy.v3;\n\nimport \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v3\";\noption java_outer_classname = \"DynamicForwardProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamic forward proxy]\n\n// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview\n// <arch_overview_http_dynamic_forward_proxy>` for more information.\n// [#extension: envoy.filters.http.dynamic_forward_proxy]\nmessage FilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig\";\n\n  // The DNS cache configuration that the filter will attach to. Note this configuration must\n  // match that of associated :ref:`dynamic forward proxy cluster configuration\n  // <envoy_api_field_extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Per route Configuration for the dynamic forward proxy HTTP filter.\nmessage PerRouteConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig\";\n\n  oneof host_rewrite_specifier {\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // this value. If not set or empty, the original host header value\n    // will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite <envoy_api_field_config.route.v3.RouteAction.host_rewrite_literal>` given that the\n    // value set here would be used for DNS lookups whereas the value set in the HCM would be used\n    // for host header forwarding which is not the desired outcome.\n    string host_rewrite_literal = 1;\n\n    // Indicates that before DNS lookup, the host header will be swapped with\n    // the value of this header. If not set or empty, the original host header\n    // value will be used and no rewrite will happen.\n    //\n    // Note: this rewrite affects both DNS lookup and host header forwarding. However, this\n    // option shouldn't be used with\n    // :ref:`HCM host rewrite header <envoy_api_field_config.route.v3.RouteAction.auto_host_rewrite>`\n    // given that the value set here would be used for DNS lookups whereas the value set in the HCM\n    // would be used for host header forwarding which is not the desired outcome.\n    string host_rewrite_header = 2;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/dynamo/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.dynamo.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.dynamo.v3\";\noption java_outer_classname = \"DynamoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dynamo]\n// Dynamo :ref:`configuration overview <config_http_filters_dynamo>`.\n// [#extension: envoy.filters.http.dynamo]\n\n// Dynamo filter config.\nmessage Dynamo {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.dynamo.v2.Dynamo\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/ext_authz/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ext_authz.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ext_authz.v3\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: External Authorization]\n// External Authorization :ref:`configuration overview <config_http_filters_ext_authz>`.\n// [#extension: envoy.filters.http.ext_authz]\n\n// [#next-free-field: 15]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.ExtAuthz\";\n\n  // External authorization service configuration.\n  oneof services {\n    // gRPC service configuration (default timeout: 200ms).\n    config.core.v3.GrpcService grpc_service = 1;\n\n    // HTTP service configuration (default timeout: 200ms).\n    HttpService http_service = 3;\n  }\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of messages used on the wire.\n  config.core.v3.ApiVersion transport_api_version = 12\n      [(validate.rules).enum = {defined_only: true}];\n\n  //  Changes filter's behaviour on errors:\n  //\n  //  1. When set to true, the filter will *accept* client request even if the communication with\n  //  the authorization service has failed, or if the authorization service has returned a HTTP 5xx\n  //  error.\n  //\n  //  2. When set to false, ext-authz will *reject* client requests and return a *Forbidden*\n  //  response if the communication with the authorization service has failed, or if the\n  //  authorization service has returned a HTTP 5xx error.\n  //\n  // Note that errors can be *always* tracked in the :ref:`stats\n  // <config_http_filters_ext_authz_stats>`.\n  bool failure_mode_allow = 2;\n\n  // Enables filter to buffer the client request body and send it within the authorization request.\n  // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization\n  // request message indicating if the body data is partial.\n  BufferSettings with_request_body = 5;\n\n  // Clears route cache in order to allow the external authorization service to correctly affect\n  // routing decisions. Filter clears all cached routes when:\n  //\n  // 1. The field is set to *true*.\n  //\n  // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0.\n  //\n  // 3. At least one *authorization response header* is added to the client request, or is used for\n  // altering another client request header.\n  //\n  bool clear_route_cache = 6;\n\n  // Sets the HTTP status that is returned to the client when there is a network error between the\n  // filter and the authorization server. The default status is HTTP 403 Forbidden.\n  type.v3.HttpStatus status_on_error = 7;\n\n  // Specifies a list of metadata namespaces whose values, if present, will be passed to the\n  // ext_authz service as an opaque *protobuf::Struct*.\n  //\n  // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata\n  // <envoy_api_field_extensions.filters.http.jwt_authn.v3.JwtProvider.payload_in_metadata>` is set,\n  // then the following will pass the jwt payload to the authorization server.\n  //\n  // .. code-block:: yaml\n  //\n  //    metadata_context_namespaces:\n  //    - envoy.filters.http.jwt_authn\n  //\n  repeated string metadata_context_namespaces = 8;\n\n  // Specifies if the filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // If this field is not specified, the filter will be enabled for all requests.\n  config.core.v3.RuntimeFractionalPercent filter_enabled = 9;\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14;\n\n  // Specifies whether to deny the requests, when the filter is disabled.\n  // If :ref:`runtime_key <envoy_api_field_config.core.v3.RuntimeFeatureFlag.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to determine whether to deny request for\n  // filter protected path at filter disabling. If filter is disabled in\n  // typed_per_filter_config for the path, requests will not be denied.\n  //\n  // If this field is not specified, all requests will be allowed when disabled.\n  config.core.v3.RuntimeFeatureFlag deny_at_disable = 11;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v3.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 10;\n\n  // Optional additional prefix to use when emitting statistics. This allows to distinguish\n  // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //   http_filters:\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc.\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc.\n  //\n  string stat_prefix = 13;\n\n  bool hidden_envoy_deprecated_use_alpha = 4\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// Configuration for buffering the request data.\nmessage BufferSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.BufferSettings\";\n\n  // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return\n  // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number\n  // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow\n  // <envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.failure_mode_allow>`.\n  uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached.\n  // The authorization request will be dispatched and no 413 HTTP error will be returned by the\n  // filter.\n  bool allow_partial_message = 2;\n\n  // If true, the body sent to the external authorization service is set with raw bytes, it sets\n  // the :ref:`raw_body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.raw_body>`\n  // field of HTTP request attribute context. Otherwise, :ref:`\n  // body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` will be filled\n  // with UTF-8 string request body.\n  bool pack_as_bytes = 3;\n}\n\n// HttpService is used for raw HTTP communication between the filter and the authorization service.\n// When configured, the filter will parse the client request and use these attributes to call the\n// authorization server. Depending on the response, the filter may reject or accept the client\n// request. Note that in any of these events, metadata can be added, removed or overridden by the\n// filter:\n//\n// *On authorization request*, a list of allowed request headers may be supplied. See\n// :ref:`allowed_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationRequest.allowed_headers>`\n// for details. Additional headers metadata may be added to the authorization request. See\n// :ref:`headers_to_add\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationRequest.headers_to_add>` for\n// details.\n//\n// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and\n// additional headers metadata may be added to the original client request. See\n// :ref:`allowed_upstream_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationResponse.allowed_upstream_headers>`\n// for details.\n//\n// On other authorization response statuses, the filter will not allow traffic. Additional headers\n// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v3.AuthorizationResponse.allowed_client_headers>`\n// for details.\n// [#next-free-field: 9]\nmessage HttpService {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.HttpService\";\n\n  reserved 3, 4, 5, 6;\n\n  // Sets the HTTP server URI which the authorization requests must be sent to.\n  config.core.v3.HttpUri server_uri = 1;\n\n  // Sets a prefix to the value of authorization request header *Path*.\n  string path_prefix = 2;\n\n  // Settings used for controlling authorization request metadata.\n  AuthorizationRequest authorization_request = 7;\n\n  // Settings used for controlling authorization response metadata.\n  AuthorizationResponse authorization_response = 8;\n}\n\nmessage AuthorizationRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.AuthorizationRequest\";\n\n  // Authorization request will include the client request headers that have a correspondent match\n  // in the :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>`. Note that in addition to the\n  // user's supplied matchers:\n  //\n  // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list.\n  //\n  // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have\n  // a message body. However, the authorization request can include the buffered client request body\n  // (controlled by :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.with_request_body>` setting),\n  // consequently the value of *Content-Length* of the authorization request reflects the size of\n  // its payload size.\n  //\n  type.matcher.v3.ListStringMatcher allowed_headers = 1;\n\n  // Sets a list of headers that will be included to the request to authorization service. Note that\n  // client request of the same key will be overridden.\n  repeated config.core.v3.HeaderValue headers_to_add = 2;\n}\n\nmessage AuthorizationResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.AuthorizationResponse\";\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the original client request.\n  // Note that coexistent headers will be overridden.\n  type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that coexistent headers will be appended.\n  type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v3.ListStringMatcher>`. is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that when this list is *not* set, all the authorization response headers, except *Authority\n  // (Host)* will be in the response to the client. When a header is included in this list, *Path*,\n  // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added.\n  type.matcher.v3.ListStringMatcher allowed_client_headers = 2;\n}\n\n// Extra settings on a per virtualhost/route/weighted-cluster level.\nmessage ExtAuthzPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.ExtAuthzPerRoute\";\n\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the ext auth filter for this particular vhost or route.\n    // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Check request settings for this route.\n    CheckSettings check_settings = 2 [(validate.rules).message = {required: true}];\n  }\n}\n\n// Extra settings for the check request.\nmessage CheckSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ext_authz.v2.CheckSettings\";\n\n  // Context extensions to set on the CheckRequest's\n  // :ref:`AttributeContext.context_extensions<envoy_api_field_service.auth.v3.AttributeContext.context_extensions>`\n  //\n  // You can use this to provide extra context for the external authorization server on specific\n  // virtual hosts/routes. For example, adding a context extension on the virtual host level can\n  // give the ext-authz server information on what virtual host is used without needing to parse the\n  // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged\n  // in order, and the result will be used.\n  //\n  // Merge semantics for this field are such that keys from more specific configs override.\n  //\n  // .. note::\n  //\n  //   These settings are only applied to a filter configured with a\n  //   :ref:`grpc_service<envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.grpc_service>`.\n  map<string, string> context_extensions = 1;\n\n  // When set to true, disable the configured :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.with_request_body>` for a route.\n  bool disable_request_body_buffering = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/ext_authz/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ext_authz.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: External Authorization]\n// External Authorization :ref:`configuration overview <config_http_filters_ext_authz>`.\n// [#extension: envoy.filters.http.ext_authz]\n\n// [#next-free-field: 15]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\";\n\n  reserved 4;\n\n  reserved \"use_alpha\";\n\n  // External authorization service configuration.\n  oneof services {\n    // gRPC service configuration (default timeout: 200ms).\n    config.core.v4alpha.GrpcService grpc_service = 1;\n\n    // HTTP service configuration (default timeout: 200ms).\n    HttpService http_service = 3;\n  }\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of messages used on the wire.\n  config.core.v4alpha.ApiVersion transport_api_version = 12\n      [(validate.rules).enum = {defined_only: true}];\n\n  //  Changes filter's behaviour on errors:\n  //\n  //  1. When set to true, the filter will *accept* client request even if the communication with\n  //  the authorization service has failed, or if the authorization service has returned a HTTP 5xx\n  //  error.\n  //\n  //  2. When set to false, ext-authz will *reject* client requests and return a *Forbidden*\n  //  response if the communication with the authorization service has failed, or if the\n  //  authorization service has returned a HTTP 5xx error.\n  //\n  // Note that errors can be *always* tracked in the :ref:`stats\n  // <config_http_filters_ext_authz_stats>`.\n  bool failure_mode_allow = 2;\n\n  // Enables filter to buffer the client request body and send it within the authorization request.\n  // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization\n  // request message indicating if the body data is partial.\n  BufferSettings with_request_body = 5;\n\n  // Clears route cache in order to allow the external authorization service to correctly affect\n  // routing decisions. Filter clears all cached routes when:\n  //\n  // 1. The field is set to *true*.\n  //\n  // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0.\n  //\n  // 3. At least one *authorization response header* is added to the client request, or is used for\n  // altering another client request header.\n  //\n  bool clear_route_cache = 6;\n\n  // Sets the HTTP status that is returned to the client when there is a network error between the\n  // filter and the authorization server. The default status is HTTP 403 Forbidden.\n  type.v3.HttpStatus status_on_error = 7;\n\n  // Specifies a list of metadata namespaces whose values, if present, will be passed to the\n  // ext_authz service as an opaque *protobuf::Struct*.\n  //\n  // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata\n  // <envoy_api_field_extensions.filters.http.jwt_authn.v4alpha.JwtProvider.payload_in_metadata>` is set,\n  // then the following will pass the jwt payload to the authorization server.\n  //\n  // .. code-block:: yaml\n  //\n  //    metadata_context_namespaces:\n  //    - envoy.filters.http.jwt_authn\n  //\n  repeated string metadata_context_namespaces = 8;\n\n  // Specifies if the filter is enabled.\n  //\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFractionalPercent.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to get the percentage of requests to filter.\n  //\n  // If this field is not specified, the filter will be enabled for all requests.\n  config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9;\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14;\n\n  // Specifies whether to deny the requests, when the filter is disabled.\n  // If :ref:`runtime_key <envoy_api_field_config.core.v4alpha.RuntimeFeatureFlag.runtime_key>` is specified,\n  // Envoy will lookup the runtime key to determine whether to deny request for\n  // filter protected path at filter disabling. If filter is disabled in\n  // typed_per_filter_config for the path, requests will not be denied.\n  //\n  // If this field is not specified, all requests will be allowed when disabled.\n  config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v4alpha.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 10;\n\n  // Optional additional prefix to use when emitting statistics. This allows to distinguish\n  // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //   http_filters:\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc.\n  //     - name: envoy.filters.http.ext_authz\n  //       typed_config:\n  //         \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n  //         stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc.\n  //\n  string stat_prefix = 13;\n}\n\n// Configuration for buffering the request data.\nmessage BufferSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.BufferSettings\";\n\n  // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return\n  // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number\n  // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow\n  // <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.failure_mode_allow>`.\n  uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached.\n  // The authorization request will be dispatched and no 413 HTTP error will be returned by the\n  // filter.\n  bool allow_partial_message = 2;\n\n  // If true, the body sent to the external authorization service is set with raw bytes, it sets\n  // the :ref:`raw_body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.raw_body>`\n  // field of HTTP request attribute context. Otherwise, :ref:`\n  // body<envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` will be filled\n  // with UTF-8 string request body.\n  bool pack_as_bytes = 3;\n}\n\n// HttpService is used for raw HTTP communication between the filter and the authorization service.\n// When configured, the filter will parse the client request and use these attributes to call the\n// authorization server. Depending on the response, the filter may reject or accept the client\n// request. Note that in any of these events, metadata can be added, removed or overridden by the\n// filter:\n//\n// *On authorization request*, a list of allowed request headers may be supplied. See\n// :ref:`allowed_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationRequest.allowed_headers>`\n// for details. Additional headers metadata may be added to the authorization request. See\n// :ref:`headers_to_add\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationRequest.headers_to_add>` for\n// details.\n//\n// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and\n// additional headers metadata may be added to the original client request. See\n// :ref:`allowed_upstream_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationResponse.allowed_upstream_headers>`\n// for details.\n//\n// On other authorization response statuses, the filter will not allow traffic. Additional headers\n// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers\n// <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.AuthorizationResponse.allowed_client_headers>`\n// for details.\n// [#next-free-field: 9]\nmessage HttpService {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.HttpService\";\n\n  reserved 3, 4, 5, 6;\n\n  // Sets the HTTP server URI which the authorization requests must be sent to.\n  config.core.v4alpha.HttpUri server_uri = 1;\n\n  // Sets a prefix to the value of authorization request header *Path*.\n  string path_prefix = 2;\n\n  // Settings used for controlling authorization request metadata.\n  AuthorizationRequest authorization_request = 7;\n\n  // Settings used for controlling authorization response metadata.\n  AuthorizationResponse authorization_response = 8;\n}\n\nmessage AuthorizationRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest\";\n\n  // Authorization request will include the client request headers that have a correspondent match\n  // in the :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>`. Note that in addition to the\n  // user's supplied matchers:\n  //\n  // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list.\n  //\n  // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have\n  // a message body. However, the authorization request can include the buffered client request body\n  // (controlled by :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.with_request_body>` setting),\n  // consequently the value of *Content-Length* of the authorization request reflects the size of\n  // its payload size.\n  //\n  type.matcher.v4alpha.ListStringMatcher allowed_headers = 1;\n\n  // Sets a list of headers that will be included to the request to authorization service. Note that\n  // client request of the same key will be overridden.\n  repeated config.core.v4alpha.HeaderValue headers_to_add = 2;\n}\n\nmessage AuthorizationResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse\";\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the original client request.\n  // Note that coexistent headers will be overridden.\n  type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>` is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that coexistent headers will be appended.\n  type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3;\n\n  // When this :ref:`list <envoy_api_msg_type.matcher.v4alpha.ListStringMatcher>`. is set, authorization\n  // response headers that have a correspondent match will be added to the client's response. Note\n  // that when this list is *not* set, all the authorization response headers, except *Authority\n  // (Host)* will be in the response to the client. When a header is included in this list, *Path*,\n  // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added.\n  type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2;\n}\n\n// Extra settings on a per virtualhost/route/weighted-cluster level.\nmessage ExtAuthzPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\";\n\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the ext auth filter for this particular vhost or route.\n    // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // Check request settings for this route.\n    CheckSettings check_settings = 2 [(validate.rules).message = {required: true}];\n  }\n}\n\n// Extra settings for the check request.\nmessage CheckSettings {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.ext_authz.v3.CheckSettings\";\n\n  // Context extensions to set on the CheckRequest's\n  // :ref:`AttributeContext.context_extensions<envoy_api_field_service.auth.v4alpha.AttributeContext.context_extensions>`\n  //\n  // You can use this to provide extra context for the external authorization server on specific\n  // virtual hosts/routes. For example, adding a context extension on the virtual host level can\n  // give the ext-authz server information on what virtual host is used without needing to parse the\n  // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged\n  // in order, and the result will be used.\n  //\n  // Merge semantics for this field are such that keys from more specific configs override.\n  //\n  // .. note::\n  //\n  //   These settings are only applied to a filter configured with a\n  //   :ref:`grpc_service<envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.grpc_service>`.\n  map<string, string> context_extensions = 1;\n\n  // When set to true, disable the configured :ref:`with_request_body\n  // <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.ExtAuthz.with_request_body>` for a route.\n  bool disable_request_body_buffering = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/fault/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.fault.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/extensions/filters/common/fault/v3/fault.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.fault.v3\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Fault Injection]\n// Fault Injection :ref:`configuration overview <config_http_filters_fault_injection>`.\n// [#extension: envoy.filters.http.fault]\n\n// [#next-free-field: 6]\nmessage FaultAbort {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.fault.v2.FaultAbort\";\n\n  // Fault aborts are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderAbort {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.fault.v2.FaultAbort.HeaderAbort\";\n  }\n\n  reserved 1;\n\n  oneof error_type {\n    option (validate.required) = true;\n\n    // HTTP status code to use to abort the HTTP request.\n    uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n    // gRPC status code to use to abort the gRPC request.\n    uint32 grpc_status = 5;\n\n    // Fault aborts are controlled via an HTTP header (if applicable).\n    HeaderAbort header_abort = 4;\n  }\n\n  // The percentage of requests/operations/connections that will be aborted with the error code\n  // provided.\n  type.v3.FractionalPercent percentage = 3;\n}\n\n// [#next-free-field: 15]\nmessage HTTPFault {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.fault.v2.HTTPFault\";\n\n  // If specified, the filter will inject delays based on the values in the\n  // object.\n  common.fault.v3.FaultDelay delay = 1;\n\n  // If specified, the filter will abort requests based on the values in\n  // the object. At least *abort* or *delay* must be specified.\n  FaultAbort abort = 2;\n\n  // Specifies the name of the (destination) upstream cluster that the\n  // filter should match on. Fault injection will be restricted to requests\n  // bound to the specific upstream cluster.\n  string upstream_cluster = 3;\n\n  // Specifies a set of headers that the filter should match on. The fault\n  // injection filter can be applied selectively to requests that match a set of\n  // headers specified in the fault filter config. The chances of actual fault\n  // injection further depend on the value of the :ref:`percentage\n  // <envoy_api_field_extensions.filters.http.fault.v3.FaultAbort.percentage>` field.\n  // The filter will check the request's headers against all the specified\n  // headers in the filter config. A match will happen if all the headers in the\n  // config are present in the request with the same values (or based on\n  // presence if the *value* field is not in the config).\n  repeated config.route.v3.HeaderMatcher headers = 4;\n\n  // Faults are injected for the specified list of downstream hosts. If this\n  // setting is not set, faults are injected for all downstream nodes.\n  // Downstream node name is taken from :ref:`the HTTP\n  // x-envoy-downstream-service-node\n  // <config_http_conn_man_headers_downstream-service-node>` header and compared\n  // against downstream_nodes list.\n  repeated string downstream_nodes = 5;\n\n  // The maximum number of faults that can be active at a single time via the configured fault\n  // filter. Note that because this setting can be overridden at the route level, it's possible\n  // for the number of active faults to be greater than this value (if injected via a different\n  // route). If not specified, defaults to unlimited. This setting can be overridden via\n  // `runtime <config_http_filters_fault_injection_runtime>` and any faults that are not injected\n  // due to overflow will be indicated via the `faults_overflow\n  // <config_http_filters_fault_injection_stats>` stat.\n  //\n  // .. attention::\n  //   Like other :ref:`circuit breakers <arch_overview_circuit_break>` in Envoy, this is a fuzzy\n  //   limit. It's possible for the number of active faults to rise slightly above the configured\n  //   amount due to the implementation details.\n  google.protobuf.UInt32Value max_active_faults = 6;\n\n  // The response rate limit to be applied to the response body of the stream. When configured,\n  // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent\n  // <config_http_filters_fault_injection_runtime>` runtime key.\n  //\n  // .. attention::\n  //  This is a per-stream limit versus a connection level limit. This means that concurrent streams\n  //  will each get an independent limit.\n  common.fault.v3.FaultRateLimit response_rate_limit = 7;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_delay_percent\n  string delay_percent_runtime = 8;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.abort_percent\n  string abort_percent_runtime = 9;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_duration_ms\n  string delay_duration_runtime = 10;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.http_status\n  string abort_http_status_runtime = 11;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.max_active_faults\n  string max_active_faults_runtime = 12;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.rate_limit.response_percent\n  string response_rate_limit_percent_runtime = 13;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.grpc_status\n  string abort_grpc_status_runtime = 14;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"//envoy/extensions/filters/http/fault/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.fault.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/extensions/filters/common/fault/v3/fault.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha\";\noption java_outer_classname = \"FaultProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Fault Injection]\n// Fault Injection :ref:`configuration overview <config_http_filters_fault_injection>`.\n// [#extension: envoy.filters.http.fault]\n\n// [#next-free-field: 6]\nmessage FaultAbort {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.fault.v3.FaultAbort\";\n\n  // Fault aborts are controlled via an HTTP header (if applicable). See the\n  // :ref:`HTTP fault filter <config_http_filters_fault_injection_http_header>` documentation for\n  // more information.\n  message HeaderAbort {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort\";\n  }\n\n  reserved 1;\n\n  oneof error_type {\n    option (validate.required) = true;\n\n    // HTTP status code to use to abort the HTTP request.\n    uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n    // gRPC status code to use to abort the gRPC request.\n    uint32 grpc_status = 5;\n\n    // Fault aborts are controlled via an HTTP header (if applicable).\n    HeaderAbort header_abort = 4;\n  }\n\n  // The percentage of requests/operations/connections that will be aborted with the error code\n  // provided.\n  type.v3.FractionalPercent percentage = 3;\n}\n\n// [#next-free-field: 15]\nmessage HTTPFault {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.fault.v3.HTTPFault\";\n\n  // If specified, the filter will inject delays based on the values in the\n  // object.\n  common.fault.v3.FaultDelay delay = 1;\n\n  // If specified, the filter will abort requests based on the values in\n  // the object. At least *abort* or *delay* must be specified.\n  FaultAbort abort = 2;\n\n  // Specifies the name of the (destination) upstream cluster that the\n  // filter should match on. Fault injection will be restricted to requests\n  // bound to the specific upstream cluster.\n  string upstream_cluster = 3;\n\n  // Specifies a set of headers that the filter should match on. The fault\n  // injection filter can be applied selectively to requests that match a set of\n  // headers specified in the fault filter config. The chances of actual fault\n  // injection further depend on the value of the :ref:`percentage\n  // <envoy_api_field_extensions.filters.http.fault.v4alpha.FaultAbort.percentage>` field.\n  // The filter will check the request's headers against all the specified\n  // headers in the filter config. A match will happen if all the headers in the\n  // config are present in the request with the same values (or based on\n  // presence if the *value* field is not in the config).\n  repeated config.route.v4alpha.HeaderMatcher headers = 4;\n\n  // Faults are injected for the specified list of downstream hosts. If this\n  // setting is not set, faults are injected for all downstream nodes.\n  // Downstream node name is taken from :ref:`the HTTP\n  // x-envoy-downstream-service-node\n  // <config_http_conn_man_headers_downstream-service-node>` header and compared\n  // against downstream_nodes list.\n  repeated string downstream_nodes = 5;\n\n  // The maximum number of faults that can be active at a single time via the configured fault\n  // filter. Note that because this setting can be overridden at the route level, it's possible\n  // for the number of active faults to be greater than this value (if injected via a different\n  // route). If not specified, defaults to unlimited. This setting can be overridden via\n  // `runtime <config_http_filters_fault_injection_runtime>` and any faults that are not injected\n  // due to overflow will be indicated via the `faults_overflow\n  // <config_http_filters_fault_injection_stats>` stat.\n  //\n  // .. attention::\n  //   Like other :ref:`circuit breakers <arch_overview_circuit_break>` in Envoy, this is a fuzzy\n  //   limit. It's possible for the number of active faults to rise slightly above the configured\n  //   amount due to the implementation details.\n  google.protobuf.UInt32Value max_active_faults = 6;\n\n  // The response rate limit to be applied to the response body of the stream. When configured,\n  // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent\n  // <config_http_filters_fault_injection_runtime>` runtime key.\n  //\n  // .. attention::\n  //  This is a per-stream limit versus a connection level limit. This means that concurrent streams\n  //  will each get an independent limit.\n  common.fault.v3.FaultRateLimit response_rate_limit = 7;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_delay_percent\n  string delay_percent_runtime = 8;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.abort_percent\n  string abort_percent_runtime = 9;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.delay.fixed_duration_ms\n  string delay_duration_runtime = 10;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.http_status\n  string abort_http_status_runtime = 11;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.max_active_faults\n  string max_active_faults_runtime = 12;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.rate_limit.response_percent\n  string response_rate_limit_percent_runtime = 13;\n\n  // The runtime key to override the :ref:`default <config_http_filters_fault_injection_runtime>`\n  // runtime. The default is: fault.http.abort.grpc_status\n  string abort_grpc_status_runtime = 14;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/grpc_http1_bridge/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_http1_bridge.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_http1_bridge.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC HTTP/1.1 Bridge]\n// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview <config_http_filters_grpc_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_bridge]\n\n// gRPC HTTP/1.1 Bridge filter config.\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_http1_bridge.v2.Config\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge]\n// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview\n// <config_http_filters_grpc_http1_reverse_bridge>`.\n// [#extension: envoy.filters.http.grpc_http1_reverse_bridge]\n\n// gRPC reverse bridge filter configuration\nmessage FilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfig\";\n\n  // The content-type to pass to the upstream when the gRPC bridge filter is applied.\n  // The filter will also validate that the upstream responds with the same content type.\n  string content_type = 1 [(validate.rules).string = {min_len: 1}];\n\n  // If true, Envoy will assume that the upstream doesn't understand gRPC frames and\n  // strip the gRPC frame from the request, and add it back in to the response. This will\n  // hide the gRPC semantics from the upstream, allowing it to receive and respond with a\n  // simple binary encoded protobuf.\n  bool withhold_grpc_frames = 2;\n}\n\n// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level.\nmessage FilterConfigPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfigPerRoute\";\n\n  // If true, disables gRPC reverse bridge filter for this particular vhost or route.\n  // If disabled is specified in multiple per-filter-configs, the most specific one will be used.\n  bool disabled = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/transcoder/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_json_transcoder.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_json_transcoder.v3\";\noption java_outer_classname = \"TranscoderProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC-JSON transcoder]\n// gRPC-JSON transcoder :ref:`configuration overview <config_http_filters_grpc_json_transcoder>`.\n// [#extension: envoy.filters.http.grpc_json_transcoder]\n\n// [#next-free-field: 10]\nmessage GrpcJsonTranscoder {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder\";\n\n  message PrintOptions {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder.PrintOptions\";\n\n    // Whether to add spaces, line breaks and indentation to make the JSON\n    // output easy to read. Defaults to false.\n    bool add_whitespace = 1;\n\n    // Whether to always print primitive fields. By default primitive\n    // fields with default values will be omitted in JSON output. For\n    // example, an int32 field set to 0 will be omitted. Setting this flag to\n    // true will override the default behavior and print primitive fields\n    // regardless of their values. Defaults to false.\n    bool always_print_primitive_fields = 2;\n\n    // Whether to always print enums as ints. By default they are rendered\n    // as strings. Defaults to false.\n    bool always_print_enums_as_ints = 3;\n\n    // Whether to preserve proto field names. By default protobuf will\n    // generate JSON field names using the ``json_name`` option, or lower camel case,\n    // in that order. Setting this flag will preserve the original field names. Defaults to false.\n    bool preserve_proto_field_names = 4;\n  }\n\n  oneof descriptor_set {\n    option (validate.required) = true;\n\n    // Supplies the filename of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    string proto_descriptor = 1;\n\n    // Supplies the binary content of\n    // :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC\n    // services.\n    bytes proto_descriptor_bin = 4;\n  }\n\n  // A list of strings that\n  // supplies the fully qualified service names (i.e. \"package_name.service_name\") that\n  // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``,\n  // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than\n  // the service names specified here, but they won't be translated.\n  repeated string services = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // Control options for response JSON. These options are passed directly to\n  // `JsonPrintOptions <https://developers.google.com/protocol-buffers/docs/reference/cpp/\n  // google.protobuf.util.json_util#JsonPrintOptions>`_.\n  PrintOptions print_options = 3;\n\n  // Whether to keep the incoming request route after the outgoing headers have been transformed to\n  // the match the upstream gRPC service. Note: This means that routes for gRPC services that are\n  // not transcoded cannot be used in combination with *match_incoming_request_route*.\n  bool match_incoming_request_route = 5;\n\n  // A list of query parameters to be ignored for transcoding method mapping.\n  // By default, the transcoder filter will not transcode a request if there are any\n  // unknown/invalid query parameters.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {\n  //         option (google.api.http) = {\n  //           get: \"/shelves/{shelf}\"\n  //         };\n  //       }\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable\n  // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow\n  // the same request to be mapped to ``GetShelf``.\n  repeated string ignored_query_parameters = 6;\n\n  // Whether to route methods without the ``google.api.http`` option.\n  //\n  // Example :\n  //\n  // .. code-block:: proto\n  //\n  //     package bookstore;\n  //\n  //     service Bookstore {\n  //       rpc GetShelf(GetShelfRequest) returns (Shelf) {}\n  //     }\n  //\n  //     message GetShelfRequest {\n  //       int64 shelf = 1;\n  //     }\n  //\n  //     message Shelf {}\n  //\n  // The client could ``post`` a json body ``{\"shelf\": 1234}`` with the path of\n  // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``.\n  bool auto_mapping = 7;\n\n  // Whether to ignore query parameters that cannot be mapped to a corresponding\n  // protobuf field. Use this if you cannot control the query parameters and do\n  // not know them beforehand. Otherwise use ``ignored_query_parameters``.\n  // Defaults to false.\n  bool ignore_unknown_query_parameters = 8;\n\n  // Whether to convert gRPC status headers to JSON.\n  // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status``\n  // from the ``grpc-status-details-bin`` header and use it as JSON body.\n  // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and\n  // ``grpc-message`` headers.\n  // The error details types must be present in the ``proto_descriptor``.\n  //\n  // For example, if an upstream server replies with headers:\n  //\n  // .. code-block:: none\n  //\n  //     grpc-status: 5\n  //     grpc-status-details-bin:\n  //         CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ\n  //\n  // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message\n  // ``google.rpc.Status``. It will be transcoded into:\n  //\n  // .. code-block:: none\n  //\n  //     HTTP/1.1 404 Not Found\n  //     content-type: application/json\n  //\n  //     {\"code\":5,\"details\":[{\"@type\":\"type.googleapis.com/google.rpc.RequestInfo\",\"requestId\":\"r-1\"}]}\n  //\n  //  In order to transcode the message, the ``google.rpc.RequestInfo`` type from\n  //  the ``google/rpc/error_details.proto`` should be included in the configured\n  //  :ref:`proto descriptor set <config_grpc_json_generate_proto_descriptor_set>`.\n  bool convert_grpc_status = 9;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/grpc_stats/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_stats.v3;\n\nimport \"envoy/config/core/v3/grpc_method_list.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_stats.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC statistics] gRPC statistics filter\n// :ref:`configuration overview <config_http_filters_grpc_stats>`.\n// [#extension: envoy.filters.http.grpc_stats]\n\n// gRPC statistics filter configuration\nmessage FilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_stats.v2alpha.FilterConfig\";\n\n  // If true, the filter maintains a filter state object with the request and response message\n  // counts.\n  bool emit_filter_state = 1;\n\n  oneof per_method_stat_specifier {\n    // If set, specifies an allowlist of service/methods that will have individual stats\n    // emitted for them. Any call that does not match the allowlist will be counted\n    // in a stat with no method specifier: `cluster.<name>.grpc.*`.\n    config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2;\n\n    // If set to true, emit stats for all service/method names.\n    //\n    // If set to false, emit stats for all service/message types to the same stats without including\n    // the service/method in the name, with prefix `cluster.<name>.grpc`. This can be useful if\n    // service/method granularity is not needed, or if each cluster only receives a single method.\n    //\n    // .. attention::\n    //   This option is only safe if all clients are trusted. If this option is enabled\n    //   with untrusted clients, the clients could cause unbounded growth in the number of stats in\n    //   Envoy, using unbounded memory and potentially slowing down stats pipelines.\n    //\n    // .. attention::\n    //   If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the\n    //   behavior will default to `stats_for_all_methods=true`. This default value is deprecated,\n    //   and in a future release, if neither field is set, it will default to\n    //   `stats_for_all_methods=false` in order to be safe by default. This behavior can be\n    //   controlled with runtime override\n    //   `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`.\n    google.protobuf.BoolValue stats_for_all_methods = 3;\n  }\n\n  // If true, the filter will gather a histogram for the request time of the upstream.\n  // It works with :ref:`stats_for_all_methods\n  // <envoy_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_methods>`\n  // and :ref:`individual_method_stats_allowlist\n  // <envoy_api_field_extensions.filters.http.grpc_stats.v3.FilterConfig.individual_method_stats_allowlist>` the same way\n  // request_message_count and response_message_count works.\n  bool enable_upstream_stats = 4;\n}\n\n// gRPC statistics filter state object in protobuf form.\nmessage FilterObject {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_stats.v2alpha.FilterObject\";\n\n  // Count of request messages in the request stream.\n  uint64 request_message_count = 1;\n\n  // Count of response messages in the response stream.\n  uint64 response_message_count = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/grpc_web/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.grpc_web.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.grpc_web.v3\";\noption java_outer_classname = \"GrpcWebProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Web]\n// gRPC Web :ref:`configuration overview <config_http_filters_grpc_web>`.\n// [#extension: envoy.filters.http.grpc_web]\n\n// gRPC Web filter config.\nmessage GrpcWeb {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.grpc_web.v2.GrpcWeb\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/gzip/v2:pkg\",\n        \"//envoy/extensions/filters/http/compressor/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.gzip.v3;\n\nimport \"envoy/extensions/filters/http/compressor/v3/compressor.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.gzip.v3\";\noption java_outer_classname = \"GzipProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Gzip]\n// Gzip :ref:`configuration overview <config_http_filters_gzip>`.\n// [#extension: envoy.filters.http.gzip]\n\n// [#next-free-field: 12]\nmessage Gzip {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.gzip.v2.Gzip\";\n\n  enum CompressionStrategy {\n    DEFAULT = 0;\n    FILTERED = 1;\n    HUFFMAN = 2;\n    RLE = 3;\n  }\n\n  message CompressionLevel {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.gzip.v2.Gzip.CompressionLevel\";\n\n    enum Enum {\n      DEFAULT = 0;\n      BEST = 1;\n      SPEED = 2;\n    }\n  }\n\n  // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values\n  // use more memory, but are faster and produce better compression results. The default value is 5.\n  google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}];\n\n  // A value used for selecting the zlib compression level. This setting will affect speed and\n  // amount of compression applied to the content. \"BEST\" provides higher compression at the cost of\n  // higher latency, \"SPEED\" provides lower compression with minimum impact on response time.\n  // \"DEFAULT\" provides an optimal result between speed and compression. This field will be set to\n  // \"DEFAULT\" if not specified.\n  CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // A value used for selecting the zlib compression strategy which is directly related to the\n  // characteristics of the content. Most of the time \"DEFAULT\" will be the best choice, though\n  // there are situations which changing this parameter might produce better results. For example,\n  // run-length encoding (RLE) is typically used when the content is known for having sequences\n  // which same data occurs many consecutive times. For more information about each strategy, please\n  // refer to zlib manual.\n  CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}];\n\n  // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size.\n  // Larger window results in better compression at the expense of memory usage. The default is 12\n  // which will produce a 4096 bytes window. For more details about this parameter, please refer to\n  // zlib manual > deflateInit2.\n  google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}];\n\n  // Set of configuration parameters common for all compression filters. If this field is set then\n  // the fields `content_length`, `content_type`, `disable_on_etag_header` and\n  // `remove_accept_encoding_header` are ignored.\n  compressor.v3.Compressor compressor = 10;\n\n  // Value for Zlib's next output buffer. If not set, defaults to 4096.\n  // See https://www.zlib.net/manual.html for more details. Also see\n  // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance.\n  google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}];\n\n  google.protobuf.UInt32Value hidden_envoy_deprecated_content_length = 2 [deprecated = true];\n\n  repeated string hidden_envoy_deprecated_content_type = 6 [deprecated = true];\n\n  bool hidden_envoy_deprecated_disable_on_etag_header = 7 [deprecated = true];\n\n  bool hidden_envoy_deprecated_remove_accept_encoding_header = 8 [deprecated = true];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/header_to_metadata/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.header_to_metadata.v3;\n\nimport \"envoy/type/matcher/v3/regex.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v3\";\noption java_outer_classname = \"HeaderToMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Header-To-Metadata Filter]\n//\n// The configuration for transforming headers into metadata. This is useful\n// for matching load balancer subsets, logging, etc.\n//\n// Header to Metadata :ref:`configuration overview <config_http_filters_header_to_metadata>`.\n// [#extension: envoy.filters.http.header_to_metadata]\n\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.header_to_metadata.v2.Config\";\n\n  enum ValueType {\n    STRING = 0;\n\n    NUMBER = 1;\n\n    // The value is a serialized `protobuf.Value\n    // <https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62>`_.\n    PROTOBUF_VALUE = 2;\n  }\n\n  // ValueEncode defines the encoding algorithm.\n  enum ValueEncode {\n    // The value is not encoded.\n    NONE = 0;\n\n    // The value is encoded in `Base64 <https://tools.ietf.org/html/rfc4648#section-4>`_.\n    // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the\n    // non-ASCII characters in the header.\n    BASE64 = 1;\n  }\n\n  // [#next-free-field: 7]\n  message KeyValuePair {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair\";\n\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The value to pair with the given key.\n    //\n    // When used for a\n    // :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`\n    // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added.\n    //\n    // When used for a :ref:`on_header_missing <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_missing>`\n    // case, a non-empty value must be provided otherwise no metadata is added.\n    string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = \"value_type\"];\n\n    // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value\n    // is used as-is.\n    //\n    // This is only used for :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`.\n    //\n    // Note: if the `value` field is non-empty this field should be empty.\n    type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6\n        [(udpa.annotations.field_migrate).oneof_promotion = \"value_type\"];\n\n    // The value's type — defaults to string.\n    ValueType type = 4 [(validate.rules).enum = {defined_only: true}];\n\n    // How is the value encoded, default is NONE (not encoded).\n    // The value will be decoded accordingly before storing to metadata.\n    ValueEncode encode = 5;\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  // [#next-free-field: 6]\n  message Rule {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.header_to_metadata.v2.Config.Rule\";\n\n    // Specifies that a match will be performed on the value of a header or a cookie.\n    //\n    // The header to be extracted.\n    string header = 1 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false},\n      (udpa.annotations.field_migrate).oneof_promotion = \"header_cookie_specifier\"\n    ];\n\n    // The cookie to be extracted.\n    string cookie = 5 [\n      (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false},\n      (udpa.annotations.field_migrate).oneof_promotion = \"header_cookie_specifier\"\n    ];\n\n    // If the header or cookie is present, apply this metadata KeyValuePair.\n    //\n    // If the value in the KeyValuePair is non-empty, it'll be used instead\n    // of the header or cookie value.\n    KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = \"on_present\"];\n\n    // If the header or cookie is not present, apply this metadata KeyValuePair.\n    //\n    // The value in the KeyValuePair must be set, since it'll be used in lieu\n    // of the missing header or cookie value.\n    KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = \"on_missing\"];\n\n    // Whether or not to remove the header after a rule is applied.\n    //\n    // This prevents headers from leaking.\n    // This field is not supported in case of a cookie.\n    bool remove = 4;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule request_rules = 1;\n\n  // The list of rules to apply to responses.\n  repeated Rule response_rules = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/filters/http/header_to_metadata/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.header_to_metadata.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/regex.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha\";\noption java_outer_classname = \"HeaderToMetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Header-To-Metadata Filter]\n//\n// The configuration for transforming headers into metadata. This is useful\n// for matching load balancer subsets, logging, etc.\n//\n// Header to Metadata :ref:`configuration overview <config_http_filters_header_to_metadata>`.\n// [#extension: envoy.filters.http.header_to_metadata]\n\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.header_to_metadata.v3.Config\";\n\n  enum ValueType {\n    STRING = 0;\n\n    NUMBER = 1;\n\n    // The value is a serialized `protobuf.Value\n    // <https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62>`_.\n    PROTOBUF_VALUE = 2;\n  }\n\n  // ValueEncode defines the encoding algorithm.\n  enum ValueEncode {\n    // The value is not encoded.\n    NONE = 0;\n\n    // The value is encoded in `Base64 <https://tools.ietf.org/html/rfc4648#section-4>`_.\n    // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the\n    // non-ASCII characters in the header.\n    BASE64 = 1;\n  }\n\n  // [#next-free-field: 7]\n  message KeyValuePair {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair\";\n\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_len: 1}];\n\n    oneof value_type {\n      // The value to pair with the given key.\n      //\n      // When used for a\n      // :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`\n      // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added.\n      //\n      // When used for a :ref:`on_header_missing <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_missing>`\n      // case, a non-empty value must be provided otherwise no metadata is added.\n      string value = 3;\n\n      // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value\n      // is used as-is.\n      //\n      // This is only used for :ref:`on_header_present <envoy_v3_api_field_extensions.filters.http.header_to_metadata.v3.Config.Rule.on_header_present>`.\n      //\n      // Note: if the `value` field is non-empty this field should be empty.\n      type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6;\n    }\n\n    // The value's type — defaults to string.\n    ValueType type = 4 [(validate.rules).enum = {defined_only: true}];\n\n    // How is the value encoded, default is NONE (not encoded).\n    // The value will be decoded accordingly before storing to metadata.\n    ValueEncode encode = 5;\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  // [#next-free-field: 6]\n  message Rule {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule\";\n\n    oneof header_cookie_specifier {\n      // Specifies that a match will be performed on the value of a header or a cookie.\n      //\n      // The header to be extracted.\n      string header = 1\n          [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n      // The cookie to be extracted.\n      string cookie = 5\n          [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n    }\n\n    // If the header or cookie is present, apply this metadata KeyValuePair.\n    //\n    // If the value in the KeyValuePair is non-empty, it'll be used instead\n    // of the header or cookie value.\n    KeyValuePair on_present = 2;\n\n    // If the header or cookie is not present, apply this metadata KeyValuePair.\n    //\n    // The value in the KeyValuePair must be set, since it'll be used in lieu\n    // of the missing header or cookie value.\n    KeyValuePair on_missing = 3;\n\n    // Whether or not to remove the header after a rule is applied.\n    //\n    // This prevents headers from leaking.\n    // This field is not supported in case of a cookie.\n    bool remove = 4;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule request_rules = 1;\n\n  // The list of rules to apply to responses.\n  repeated Rule response_rules = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/health_check/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.health_check.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.health_check.v3\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health check]\n// Health check :ref:`configuration overview <config_http_filters_health_check>`.\n// [#extension: envoy.filters.http.health_check]\n\n// [#next-free-field: 6]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.health_check.v2.HealthCheck\";\n\n  reserved 2;\n\n  // Specifies whether the filter operates in pass through mode or not.\n  google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}];\n\n  // If operating in pass through mode, the amount of time in milliseconds\n  // that the filter should cache the upstream response.\n  google.protobuf.Duration cache_time = 3;\n\n  // If operating in non-pass-through mode, specifies a set of upstream cluster\n  // names and the minimum percentage of servers in each of those clusters that\n  // must be healthy or degraded in order for the filter to return a 200.\n  //\n  // .. note::\n  //\n  //    This value is interpreted as an integer by truncating, so 12.50% will be calculated\n  //    as if it were 12%.\n  map<string, type.v3.Percent> cluster_min_healthy_percentages = 4;\n\n  // Specifies a set of health check request headers to match on. The health check filter will\n  // check a request’s headers against all the specified headers. To specify the health check\n  // endpoint, set the ``:path`` header to match on.\n  repeated config.route.v3.HeaderMatcher headers = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/health_check/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.health_check.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha\";\noption java_outer_classname = \"HealthCheckProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Health check]\n// Health check :ref:`configuration overview <config_http_filters_health_check>`.\n// [#extension: envoy.filters.http.health_check]\n\n// [#next-free-field: 6]\nmessage HealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.health_check.v3.HealthCheck\";\n\n  reserved 2;\n\n  // Specifies whether the filter operates in pass through mode or not.\n  google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}];\n\n  // If operating in pass through mode, the amount of time in milliseconds\n  // that the filter should cache the upstream response.\n  google.protobuf.Duration cache_time = 3;\n\n  // If operating in non-pass-through mode, specifies a set of upstream cluster\n  // names and the minimum percentage of servers in each of those clusters that\n  // must be healthy or degraded in order for the filter to return a 200.\n  //\n  // .. note::\n  //\n  //    This value is interpreted as an integer by truncating, so 12.50% will be calculated\n  //    as if it were 12%.\n  map<string, type.v3.Percent> cluster_min_healthy_percentages = 4;\n\n  // Specifies a set of health check request headers to match on. The health check filter will\n  // check a request’s headers against all the specified headers. To specify the health check\n  // endpoint, set the ``:path`` header to match on.\n  repeated config.route.v4alpha.HeaderMatcher headers = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/ip_tagging/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ip_tagging.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ip_tagging.v3\";\noption java_outer_classname = \"IpTaggingProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: IP tagging]\n// IP tagging :ref:`configuration overview <config_http_filters_ip_tagging>`.\n// [#extension: envoy.filters.http.ip_tagging]\n\nmessage IPTagging {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.ip_tagging.v2.IPTagging\";\n\n  // The type of requests the filter should apply to. The supported types\n  // are internal, external or both. The\n  // :ref:`x-forwarded-for<config_http_conn_man_headers_x-forwarded-for_internal_origin>` header is\n  // used to determine if a request is internal and will result in\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>`\n  // being set. The filter defaults to both, and it will apply to all request types.\n  enum RequestType {\n    // Both external and internal requests will be tagged. This is the default value.\n    BOTH = 0;\n\n    // Only internal requests will be tagged.\n    INTERNAL = 1;\n\n    // Only external requests will be tagged.\n    EXTERNAL = 2;\n  }\n\n  // Supplies the IP tag name and the IP address subnets.\n  message IPTag {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.http.ip_tagging.v2.IPTagging.IPTag\";\n\n    // Specifies the IP tag name to apply.\n    string ip_tag_name = 1;\n\n    // A list of IP address subnets that will be tagged with\n    // ip_tag_name. Both IPv4 and IPv6 are supported.\n    repeated config.core.v3.CidrRange ip_list = 2;\n  }\n\n  // The type of request the filter should apply to.\n  RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system.\n  // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695]\n  // The set of IP tags for the filter.\n  repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/jwt_authn/v2alpha:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.jwt_authn.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: JWT Authentication]\n// JWT Authentication :ref:`configuration overview <config_http_filters_jwt_authn>`.\n// [#extension: envoy.filters.http.jwt_authn]\n\n// Please see following for JWT authentication flow:\n//\n// * `JSON Web Token (JWT) <https://tools.ietf.org/html/rfc7519>`_\n// * `The OAuth 2.0 Authorization Framework <https://tools.ietf.org/html/rfc6749>`_\n// * `OpenID Connect <http://openid.net/connect>`_\n//\n// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies:\n//\n// * issuer: the principal that issues the JWT. It has to match the one from the token.\n// * allowed audiences: the ones in the token have to be listed here.\n// * how to fetch public key JWKS to verify the token signature.\n// * how to extract JWT token in the request.\n// * how to pass successfully verified token payload.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     issuer: https://example.com\n//     audiences:\n//     - bookstore_android.apps.googleusercontent.com\n//     - bookstore_web.apps.googleusercontent.com\n//     remote_jwks:\n//       http_uri:\n//         uri: https://example.com/.well-known/jwks.json\n//         cluster: example_jwks_cluster\n//       cache_duration:\n//         seconds: 300\n//\n// [#next-free-field: 10]\nmessage JwtProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider\";\n\n  // Specify the `principal <https://tools.ietf.org/html/rfc7519#section-4.1.1>`_ that issued\n  // the JWT, usually a URL or an email address.\n  //\n  // Example: https://securetoken.google.com\n  // Example: 1234567-compute@developer.gserviceaccount.com\n  //\n  string issuer = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The list of JWT `audiences <https://tools.ietf.org/html/rfc7519#section-4.1.3>`_ are\n  // allowed to access. A JWT containing any of these audiences will be accepted. If not specified,\n  // will not check audiences in the token.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //     audiences:\n  //     - bookstore_android.apps.googleusercontent.com\n  //     - bookstore_web.apps.googleusercontent.com\n  //\n  repeated string audiences = 2;\n\n  // `JSON Web Key Set (JWKS) <https://tools.ietf.org/html/rfc7517#appendix-A>`_ is needed to\n  // validate signature of a JWT. This field specifies where to fetch JWKS.\n  oneof jwks_source_specifier {\n    option (validate.required) = true;\n\n    // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP\n    // URI and how the fetched JWKS should be cached.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    remote_jwks:\n    //      http_uri:\n    //        uri: https://www.googleapis.com/oauth2/v1/certs\n    //        cluster: jwt.www.googleapis.com|443\n    //      cache_duration:\n    //        seconds: 300\n    //\n    RemoteJwks remote_jwks = 3;\n\n    // JWKS is in local data source. It could be either in a local file or embedded in the\n    // inline_string.\n    //\n    // Example: local file\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      filename: /etc/envoy/jwks/jwks1.txt\n    //\n    // Example: inline_string\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      inline_string: ACADADADADA\n    //\n    config.core.v3.DataSource local_jwks = 4;\n  }\n\n  // If false, the JWT is removed in the request after a success verification. If true, the JWT is\n  // not removed in the request. Default value is false.\n  bool forward = 5;\n\n  // Two fields below define where to extract the JWT from an HTTP request.\n  //\n  // If no explicit location is specified, the following default locations are tried in order:\n  //\n  // 1. The Authorization header using the `Bearer schema\n  // <https://tools.ietf.org/html/rfc6750#section-2.1>`_. Example::\n  //\n  //    Authorization: Bearer <token>.\n  //\n  // 2. `access_token <https://tools.ietf.org/html/rfc6750#section-2.3>`_ query parameter.\n  //\n  // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations\n  // its provider specified or from the default locations.\n  //\n  // Specify the HTTP headers to extract JWT token. For examples, following config:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_headers:\n  //   - name: x-goog-iap-jwt-assertion\n  //\n  // can be used to extract token from header::\n  //\n  //   ``x-goog-iap-jwt-assertion: <JWT>``.\n  //\n  repeated JwtHeader from_headers = 6;\n\n  // JWT is sent in a query parameter. `jwt_params` represents the query parameter names.\n  //\n  // For example, if config is:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_params:\n  //   - jwt_token\n  //\n  // The JWT format in query parameter is::\n  //\n  //    /path?jwt_token=<JWT>\n  //\n  repeated string from_params = 7;\n\n  // This field specifies the header name to forward a successfully verified JWT payload to the\n  // backend. The forwarded data is::\n  //\n  //    base64url_encoded(jwt_payload_in_JSON)\n  //\n  // If it is not specified, the payload will not be forwarded.\n  string forward_payload_header = 8\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata\n  // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn**\n  // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields*\n  // and the value is the *protobuf::Struct* converted from JWT JSON payload.\n  //\n  // For example, if payload_in_metadata is *my_payload*:\n  //\n  // .. code-block:: yaml\n  //\n  //   envoy.filters.http.jwt_authn:\n  //     my_payload:\n  //       iss: https://example.com\n  //       sub: test@example.com\n  //       aud: https://example.com\n  //       exp: 1501281058\n  //\n  string payload_in_metadata = 9;\n}\n\n// This message specifies how to fetch JWKS from remote and how to cache it.\nmessage RemoteJwks {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.RemoteJwks\";\n\n  // The HTTP URI to fetch the JWKS. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //    http_uri:\n  //      uri: https://www.googleapis.com/oauth2/v1/certs\n  //      cluster: jwt.www.googleapis.com|443\n  //\n  config.core.v3.HttpUri http_uri = 1;\n\n  // Duration after which the cached JWKS should be expired. If not specified, default cache\n  // duration is 5 minutes.\n  google.protobuf.Duration cache_duration = 2;\n}\n\n// This message specifies a header location to extract JWT token.\nmessage JwtHeader {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtHeader\";\n\n  // The HTTP header name.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // The value prefix. The value format is \"value_prefix<token>\"\n  // For example, for \"Authorization: Bearer <token>\", value_prefix=\"Bearer \" with a space at the\n  // end.\n  string value_prefix = 2\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n}\n\n// Specify a required provider with audiences.\nmessage ProviderWithAudiences {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.ProviderWithAudiences\";\n\n  // Specify a required provider name.\n  string provider_name = 1;\n\n  // This field overrides the one specified in the JwtProvider.\n  repeated string audiences = 2;\n}\n\n// This message specifies a Jwt requirement. An empty message means JWT verification is not\n// required. Here are some config examples:\n//\n// .. code-block:: yaml\n//\n//  # Example 1: not required with an empty message\n//\n//  # Example 2: require A\n//  provider_name: provider-A\n//\n//  # Example 3: require A or B\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 4: require A and B\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 5: require A and (B or C)\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_any:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 6: require A or (B and C)\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_all:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 7: A is optional (if token from A is provided, it must be valid, but also allows\n//  missing token.)\n//  requires_any:\n//    requirements:\n//    - provider_name: provider-A\n//    - allow_missing: {}\n//\n//  # Example 8: A is optional and B is required.\n//  requires_all:\n//    requirements:\n//    - requires_any:\n//        requirements:\n//        - provider_name: provider-A\n//        - allow_missing: {}\n//    - provider_name: provider-B\n//\n// [#next-free-field: 7]\nmessage JwtRequirement {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirement\";\n\n  oneof requires_type {\n    // Specify a required provider name.\n    string provider_name = 1;\n\n    // Specify a required provider with audiences.\n    ProviderWithAudiences provider_and_audiences = 2;\n\n    // Specify list of JwtRequirement. Their results are OR-ed.\n    // If any one of them passes, the result is passed.\n    JwtRequirementOrList requires_any = 3;\n\n    // Specify list of JwtRequirement. Their results are AND-ed.\n    // All of them must pass, if one of them fails or missing, it fails.\n    JwtRequirementAndList requires_all = 4;\n\n    // The requirement is always satisfied even if JWT is missing or the JWT\n    // verification fails. A typical usage is: this filter is used to only verify\n    // JWTs and pass the verified JWT payloads to another filter, the other filter\n    // will make decision. In this mode, all JWT tokens will be verified.\n    google.protobuf.Empty allow_missing_or_failed = 5;\n\n    // The requirement is satisfied if JWT is missing, but failed if JWT is\n    // presented but invalid. Similar to allow_missing_or_failed, this is used\n    // to only verify JWTs and pass the verified payload to another filter. The\n    // different is this mode will reject requests with invalid tokens.\n    google.protobuf.Empty allow_missing = 6;\n  }\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are OR-ed; if any one of them passes, the result is passed\nmessage JwtRequirementOrList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementOrList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails.\nmessage JwtRequirementAndList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementAndList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a Jwt requirement for a specific Route condition.\n// Example 1:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /healthz\n//\n// In above example, \"requires\" field is empty for /healthz prefix match,\n// it means that requests matching the path prefix don't require JWT authentication.\n//\n// Example 2:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /\n//      requires: { provider_name: provider-A }\n//\n// In above example, all requests matched the path prefix require jwt authentication\n// from \"provider-A\".\nmessage RequirementRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.RequirementRule\";\n\n  // The route matching parameter. Only when the match is satisfied, the \"requires\" field will\n  // apply.\n  //\n  // For example: following match will match all requests.\n  //\n  // .. code-block:: yaml\n  //\n  //    match:\n  //      prefix: /\n  //\n  config.route.v3.RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Specify a Jwt Requirement. Please detail comment in message JwtRequirement.\n  JwtRequirement requires = 2;\n}\n\n// This message specifies Jwt requirements based on stream_info.filterState.\n// This FilterState should use `Router::StringAccessor` object to set a string value.\n// Other HTTP filters can use it to specify Jwt requirements dynamically.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//    name: jwt_selector\n//    requires:\n//      issuer_1:\n//        provider_name: issuer1\n//      issuer_2:\n//        provider_name: issuer2\n//\n// If a filter set \"jwt_selector\" with \"issuer_1\" to FilterState for a request,\n// jwt_authn filter will use JwtRequirement{\"provider_name\": \"issuer1\"} to verify.\nmessage FilterStateRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule\";\n\n  // The filter state name to retrieve the `Router::StringAccessor` object.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A map of string keys to requirements. The string key is the string value\n  // in the FilterState with the name specified in the *name* field above.\n  map<string, JwtRequirement> requires = 3;\n}\n\n// This is the Envoy HTTP filter config for JWT authentication.\n//\n// For example:\n//\n// .. code-block:: yaml\n//\n//   providers:\n//      provider1:\n//        issuer: issuer1\n//        audiences:\n//        - audience1\n//        - audience2\n//        remote_jwks:\n//          http_uri:\n//            uri: https://example.com/.well-known/jwks.json\n//            cluster: example_jwks_cluster\n//      provider2:\n//        issuer: issuer2\n//        local_jwks:\n//          inline_string: jwks_string\n//\n//   rules:\n//      # Not jwt verification is required for /health path\n//      - match:\n//          prefix: /health\n//\n//      # Jwt verification for provider1 is required for path prefixed with \"prefix\"\n//      - match:\n//          prefix: /prefix\n//        requires:\n//          provider_name: provider1\n//\n//      # Jwt verification for either provider1 or provider2 is required for all other requests.\n//      - match:\n//          prefix: /\n//        requires:\n//          requires_any:\n//            requirements:\n//              - provider_name: provider1\n//              - provider_name: provider2\n//\nmessage JwtAuthentication {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.jwt_authn.v2alpha.JwtAuthentication\";\n\n  // Map of provider names to JwtProviders.\n  //\n  // .. code-block:: yaml\n  //\n  //   providers:\n  //     provider1:\n  //        issuer: issuer1\n  //        audiences:\n  //        - audience1\n  //        - audience2\n  //        remote_jwks:\n  //          http_uri:\n  //            uri: https://example.com/.well-known/jwks.json\n  //            cluster: example_jwks_cluster\n  //      provider2:\n  //        issuer: provider2\n  //        local_jwks:\n  //          inline_string: jwks_string\n  //\n  map<string, JwtProvider> providers = 1;\n\n  // Specifies requirements based on the route matches. The first matched requirement will be\n  // applied. If there are overlapped match conditions, please put the most specific match first.\n  //\n  // Examples\n  //\n  // .. code-block:: yaml\n  //\n  //   rules:\n  //     - match:\n  //         prefix: /healthz\n  //     - match:\n  //         prefix: /baz\n  //       requires:\n  //         provider_name: provider1\n  //     - match:\n  //         prefix: /foo\n  //       requires:\n  //         requires_any:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //     - match:\n  //         prefix: /bar\n  //       requires:\n  //         requires_all:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //\n  repeated RequirementRule rules = 2;\n\n  // This message specifies Jwt requirements based on stream_info.filterState.\n  // Other HTTP filters can use it to specify Jwt requirements dynamically.\n  // The *rules* field above is checked first, if it could not find any matches,\n  // check this one.\n  FilterStateRule filter_state_rules = 3;\n\n  // When set to true, bypass the `CORS preflight request\n  // <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight>`_ regardless of JWT\n  // requirements specified in the rules.\n  bool bypass_cors_preflight = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/jwt_authn/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.jwt_authn.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/empty.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: JWT Authentication]\n// JWT Authentication :ref:`configuration overview <config_http_filters_jwt_authn>`.\n// [#extension: envoy.filters.http.jwt_authn]\n\n// Please see following for JWT authentication flow:\n//\n// * `JSON Web Token (JWT) <https://tools.ietf.org/html/rfc7519>`_\n// * `The OAuth 2.0 Authorization Framework <https://tools.ietf.org/html/rfc6749>`_\n// * `OpenID Connect <http://openid.net/connect>`_\n//\n// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies:\n//\n// * issuer: the principal that issues the JWT. It has to match the one from the token.\n// * allowed audiences: the ones in the token have to be listed here.\n// * how to fetch public key JWKS to verify the token signature.\n// * how to extract JWT token in the request.\n// * how to pass successfully verified token payload.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//     issuer: https://example.com\n//     audiences:\n//     - bookstore_android.apps.googleusercontent.com\n//     - bookstore_web.apps.googleusercontent.com\n//     remote_jwks:\n//       http_uri:\n//         uri: https://example.com/.well-known/jwks.json\n//         cluster: example_jwks_cluster\n//       cache_duration:\n//         seconds: 300\n//\n// [#next-free-field: 10]\nmessage JwtProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtProvider\";\n\n  // Specify the `principal <https://tools.ietf.org/html/rfc7519#section-4.1.1>`_ that issued\n  // the JWT, usually a URL or an email address.\n  //\n  // Example: https://securetoken.google.com\n  // Example: 1234567-compute@developer.gserviceaccount.com\n  //\n  string issuer = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The list of JWT `audiences <https://tools.ietf.org/html/rfc7519#section-4.1.3>`_ are\n  // allowed to access. A JWT containing any of these audiences will be accepted. If not specified,\n  // will not check audiences in the token.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //     audiences:\n  //     - bookstore_android.apps.googleusercontent.com\n  //     - bookstore_web.apps.googleusercontent.com\n  //\n  repeated string audiences = 2;\n\n  // `JSON Web Key Set (JWKS) <https://tools.ietf.org/html/rfc7517#appendix-A>`_ is needed to\n  // validate signature of a JWT. This field specifies where to fetch JWKS.\n  oneof jwks_source_specifier {\n    option (validate.required) = true;\n\n    // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP\n    // URI and how the fetched JWKS should be cached.\n    //\n    // Example:\n    //\n    // .. code-block:: yaml\n    //\n    //    remote_jwks:\n    //      http_uri:\n    //        uri: https://www.googleapis.com/oauth2/v1/certs\n    //        cluster: jwt.www.googleapis.com|443\n    //      cache_duration:\n    //        seconds: 300\n    //\n    RemoteJwks remote_jwks = 3;\n\n    // JWKS is in local data source. It could be either in a local file or embedded in the\n    // inline_string.\n    //\n    // Example: local file\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      filename: /etc/envoy/jwks/jwks1.txt\n    //\n    // Example: inline_string\n    //\n    // .. code-block:: yaml\n    //\n    //    local_jwks:\n    //      inline_string: ACADADADADA\n    //\n    config.core.v4alpha.DataSource local_jwks = 4;\n  }\n\n  // If false, the JWT is removed in the request after a success verification. If true, the JWT is\n  // not removed in the request. Default value is false.\n  bool forward = 5;\n\n  // Two fields below define where to extract the JWT from an HTTP request.\n  //\n  // If no explicit location is specified, the following default locations are tried in order:\n  //\n  // 1. The Authorization header using the `Bearer schema\n  // <https://tools.ietf.org/html/rfc6750#section-2.1>`_. Example::\n  //\n  //    Authorization: Bearer <token>.\n  //\n  // 2. `access_token <https://tools.ietf.org/html/rfc6750#section-2.3>`_ query parameter.\n  //\n  // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations\n  // its provider specified or from the default locations.\n  //\n  // Specify the HTTP headers to extract JWT token. For examples, following config:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_headers:\n  //   - name: x-goog-iap-jwt-assertion\n  //\n  // can be used to extract token from header::\n  //\n  //   ``x-goog-iap-jwt-assertion: <JWT>``.\n  //\n  repeated JwtHeader from_headers = 6;\n\n  // JWT is sent in a query parameter. `jwt_params` represents the query parameter names.\n  //\n  // For example, if config is:\n  //\n  // .. code-block:: yaml\n  //\n  //   from_params:\n  //   - jwt_token\n  //\n  // The JWT format in query parameter is::\n  //\n  //    /path?jwt_token=<JWT>\n  //\n  repeated string from_params = 7;\n\n  // This field specifies the header name to forward a successfully verified JWT payload to the\n  // backend. The forwarded data is::\n  //\n  //    base64url_encoded(jwt_payload_in_JSON)\n  //\n  // If it is not specified, the payload will not be forwarded.\n  string forward_payload_header = 8\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata\n  // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn**\n  // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields*\n  // and the value is the *protobuf::Struct* converted from JWT JSON payload.\n  //\n  // For example, if payload_in_metadata is *my_payload*:\n  //\n  // .. code-block:: yaml\n  //\n  //   envoy.filters.http.jwt_authn:\n  //     my_payload:\n  //       iss: https://example.com\n  //       sub: test@example.com\n  //       aud: https://example.com\n  //       exp: 1501281058\n  //\n  string payload_in_metadata = 9;\n}\n\n// This message specifies how to fetch JWKS from remote and how to cache it.\nmessage RemoteJwks {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks\";\n\n  // The HTTP URI to fetch the JWKS. For example:\n  //\n  // .. code-block:: yaml\n  //\n  //    http_uri:\n  //      uri: https://www.googleapis.com/oauth2/v1/certs\n  //      cluster: jwt.www.googleapis.com|443\n  //\n  config.core.v4alpha.HttpUri http_uri = 1;\n\n  // Duration after which the cached JWKS should be expired. If not specified, default cache\n  // duration is 5 minutes.\n  google.protobuf.Duration cache_duration = 2;\n}\n\n// This message specifies a header location to extract JWT token.\nmessage JwtHeader {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtHeader\";\n\n  // The HTTP header name.\n  string name = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n  // The value prefix. The value format is \"value_prefix<token>\"\n  // For example, for \"Authorization: Bearer <token>\", value_prefix=\"Bearer \" with a space at the\n  // end.\n  string value_prefix = 2\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n}\n\n// Specify a required provider with audiences.\nmessage ProviderWithAudiences {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences\";\n\n  // Specify a required provider name.\n  string provider_name = 1;\n\n  // This field overrides the one specified in the JwtProvider.\n  repeated string audiences = 2;\n}\n\n// This message specifies a Jwt requirement. An empty message means JWT verification is not\n// required. Here are some config examples:\n//\n// .. code-block:: yaml\n//\n//  # Example 1: not required with an empty message\n//\n//  # Example 2: require A\n//  provider_name: provider-A\n//\n//  # Example 3: require A or B\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 4: require A and B\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - provider_name: provider-B\n//\n//  # Example 5: require A and (B or C)\n//  requires_all:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_any:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 6: require A or (B and C)\n//  requires_any:\n//    requirements:\n//      - provider_name: provider-A\n//      - requires_all:\n//        requirements:\n//          - provider_name: provider-B\n//          - provider_name: provider-C\n//\n//  # Example 7: A is optional (if token from A is provided, it must be valid, but also allows\n//  missing token.)\n//  requires_any:\n//    requirements:\n//    - provider_name: provider-A\n//    - allow_missing: {}\n//\n//  # Example 8: A is optional and B is required.\n//  requires_all:\n//    requirements:\n//    - requires_any:\n//        requirements:\n//        - provider_name: provider-A\n//        - allow_missing: {}\n//    - provider_name: provider-B\n//\n// [#next-free-field: 7]\nmessage JwtRequirement {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement\";\n\n  oneof requires_type {\n    // Specify a required provider name.\n    string provider_name = 1;\n\n    // Specify a required provider with audiences.\n    ProviderWithAudiences provider_and_audiences = 2;\n\n    // Specify list of JwtRequirement. Their results are OR-ed.\n    // If any one of them passes, the result is passed.\n    JwtRequirementOrList requires_any = 3;\n\n    // Specify list of JwtRequirement. Their results are AND-ed.\n    // All of them must pass, if one of them fails or missing, it fails.\n    JwtRequirementAndList requires_all = 4;\n\n    // The requirement is always satisfied even if JWT is missing or the JWT\n    // verification fails. A typical usage is: this filter is used to only verify\n    // JWTs and pass the verified JWT payloads to another filter, the other filter\n    // will make decision. In this mode, all JWT tokens will be verified.\n    google.protobuf.Empty allow_missing_or_failed = 5;\n\n    // The requirement is satisfied if JWT is missing, but failed if JWT is\n    // presented but invalid. Similar to allow_missing_or_failed, this is used\n    // to only verify JWTs and pass the verified payload to another filter. The\n    // different is this mode will reject requests with invalid tokens.\n    google.protobuf.Empty allow_missing = 6;\n  }\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are OR-ed; if any one of them passes, the result is passed\nmessage JwtRequirementOrList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a list of RequiredProvider.\n// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails.\nmessage JwtRequirementAndList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList\";\n\n  // Specify a list of JwtRequirement.\n  repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];\n}\n\n// This message specifies a Jwt requirement for a specific Route condition.\n// Example 1:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /healthz\n//\n// In above example, \"requires\" field is empty for /healthz prefix match,\n// it means that requests matching the path prefix don't require JWT authentication.\n//\n// Example 2:\n//\n// .. code-block:: yaml\n//\n//    - match:\n//        prefix: /\n//      requires: { provider_name: provider-A }\n//\n// In above example, all requests matched the path prefix require jwt authentication\n// from \"provider-A\".\nmessage RequirementRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.RequirementRule\";\n\n  // The route matching parameter. Only when the match is satisfied, the \"requires\" field will\n  // apply.\n  //\n  // For example: following match will match all requests.\n  //\n  // .. code-block:: yaml\n  //\n  //    match:\n  //      prefix: /\n  //\n  config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Specify a Jwt Requirement. Please detail comment in message JwtRequirement.\n  JwtRequirement requires = 2;\n}\n\n// This message specifies Jwt requirements based on stream_info.filterState.\n// This FilterState should use `Router::StringAccessor` object to set a string value.\n// Other HTTP filters can use it to specify Jwt requirements dynamically.\n//\n// Example:\n//\n// .. code-block:: yaml\n//\n//    name: jwt_selector\n//    requires:\n//      issuer_1:\n//        provider_name: issuer1\n//      issuer_2:\n//        provider_name: issuer2\n//\n// If a filter set \"jwt_selector\" with \"issuer_1\" to FilterState for a request,\n// jwt_authn filter will use JwtRequirement{\"provider_name\": \"issuer1\"} to verify.\nmessage FilterStateRule {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule\";\n\n  // The filter state name to retrieve the `Router::StringAccessor` object.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // A map of string keys to requirements. The string key is the string value\n  // in the FilterState with the name specified in the *name* field above.\n  map<string, JwtRequirement> requires = 3;\n}\n\n// This is the Envoy HTTP filter config for JWT authentication.\n//\n// For example:\n//\n// .. code-block:: yaml\n//\n//   providers:\n//      provider1:\n//        issuer: issuer1\n//        audiences:\n//        - audience1\n//        - audience2\n//        remote_jwks:\n//          http_uri:\n//            uri: https://example.com/.well-known/jwks.json\n//            cluster: example_jwks_cluster\n//      provider2:\n//        issuer: issuer2\n//        local_jwks:\n//          inline_string: jwks_string\n//\n//   rules:\n//      # Not jwt verification is required for /health path\n//      - match:\n//          prefix: /health\n//\n//      # Jwt verification for provider1 is required for path prefixed with \"prefix\"\n//      - match:\n//          prefix: /prefix\n//        requires:\n//          provider_name: provider1\n//\n//      # Jwt verification for either provider1 or provider2 is required for all other requests.\n//      - match:\n//          prefix: /\n//        requires:\n//          requires_any:\n//            requirements:\n//              - provider_name: provider1\n//              - provider_name: provider2\n//\nmessage JwtAuthentication {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\";\n\n  // Map of provider names to JwtProviders.\n  //\n  // .. code-block:: yaml\n  //\n  //   providers:\n  //     provider1:\n  //        issuer: issuer1\n  //        audiences:\n  //        - audience1\n  //        - audience2\n  //        remote_jwks:\n  //          http_uri:\n  //            uri: https://example.com/.well-known/jwks.json\n  //            cluster: example_jwks_cluster\n  //      provider2:\n  //        issuer: provider2\n  //        local_jwks:\n  //          inline_string: jwks_string\n  //\n  map<string, JwtProvider> providers = 1;\n\n  // Specifies requirements based on the route matches. The first matched requirement will be\n  // applied. If there are overlapped match conditions, please put the most specific match first.\n  //\n  // Examples\n  //\n  // .. code-block:: yaml\n  //\n  //   rules:\n  //     - match:\n  //         prefix: /healthz\n  //     - match:\n  //         prefix: /baz\n  //       requires:\n  //         provider_name: provider1\n  //     - match:\n  //         prefix: /foo\n  //       requires:\n  //         requires_any:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //     - match:\n  //         prefix: /bar\n  //       requires:\n  //         requires_all:\n  //           requirements:\n  //             - provider_name: provider1\n  //             - provider_name: provider2\n  //\n  repeated RequirementRule rules = 2;\n\n  // This message specifies Jwt requirements based on stream_info.filterState.\n  // Other HTTP filters can use it to specify Jwt requirements dynamically.\n  // The *rules* field above is checked first, if it could not find any matches,\n  // check this one.\n  FilterStateRule filter_state_rules = 3;\n\n  // When set to true, bypass the `CORS preflight request\n  // <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight>`_ regardless of JWT\n  // requirements specified in the rules.\n  bool bypass_cors_preflight = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.local_ratelimit.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/http_status.proto\";\nimport \"envoy/type/v3/token_bucket.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3\";\noption java_outer_classname = \"LocalRateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Local Rate limit]\n// Local Rate limit :ref:`configuration overview <config_http_filters_local_rate_limit>`.\n// [#extension: envoy.filters.http.local_ratelimit]\n\n// [#next-free-field: 7]\nmessage LocalRateLimit {\n  // The human readable prefix to use when emitting stats.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // This field allows for a custom HTTP response status code to the downstream client when\n  // the request has been rate limited.\n  // Defaults to 429 (TooManyRequests).\n  //\n  // .. note::\n  //   If this is set to < 400, 429 will be used instead.\n  type.v3.HttpStatus status = 2;\n\n  // The token bucket configuration to use for rate limiting requests that are processed by this\n  // filter. Each request processed by the filter consumes a single token. If the token is available,\n  // the request will be allowed. If no tokens are available, the request will receive the configured\n  // rate limit status.\n  //\n  // .. note::\n  //   It's fine for the token bucket to be unset for the global configuration since the rate limit\n  //   can be applied at a the virtual host or route level. Thus, the token bucket must be set\n  //   for the per route configuration otherwise the config will be rejected.\n  //\n  // .. note::\n  //   When using per route configuration, the bucket becomes unique to that route.\n  //\n  // .. note::\n  //   In the current implementation the token bucket's :ref:`fill_interval\n  //   <envoy_api_field_type.v3.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive\n  //   refills.\n  type.v3.TokenBucket token_bucket = 3;\n\n  // If set, this will enable -- but not necessarily enforce -- the rate limit for the given\n  // fraction of requests.\n  // Defaults to 0% of requests for safety.\n  config.core.v3.RuntimeFractionalPercent filter_enabled = 4;\n\n  // If set, this will enforce the rate limit decisions for the given fraction of requests.\n  //\n  // Note: this only applies to the fraction of enabled requests.\n  //\n  // Defaults to 0% of requests for safety.\n  config.core.v3.RuntimeFractionalPercent filter_enforced = 5;\n\n  // Specifies a list of HTTP headers that should be added to each response for requests that\n  // have been rate limited.\n  repeated config.core.v3.HeaderValueOption response_headers_to_add = 6\n      [(validate.rules).repeated = {max_items: 10}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/http/lua/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.lua.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.lua.v3\";\noption java_outer_classname = \"LuaProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Lua]\n// Lua :ref:`configuration overview <config_http_filters_lua>`.\n// [#extension: envoy.filters.http.lua]\n\nmessage Lua {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.lua.v2.Lua\";\n\n  // The Lua code that Envoy will execute. This can be a very small script that\n  // further loads code from disk if desired. Note that if JSON configuration is used, the code must\n  // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line\n  // strings so complex scripts can be easily expressed inline in the configuration.\n  string inline_code = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute\n  // <envoy_v3_api_msg_extensions.filters.http.lua.v3.LuaPerRoute>`. The Lua source codes can be\n  // loaded from inline string or local files.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //   source_codes:\n  //     hello.lua:\n  //       inline_string: |\n  //         function envoy_on_response(response_handle)\n  //           -- Do something.\n  //         end\n  //     world.lua:\n  //       filename: /etc/lua/world.lua\n  //\n  map<string, config.core.v3.DataSource> source_codes = 2;\n}\n\nmessage LuaPerRoute {\n  oneof override {\n    option (validate.required) = true;\n\n    // Disable the Lua filter for this particular vhost or route. If disabled is specified in\n    // multiple per-filter-configs, the most specific one will be used.\n    bool disabled = 1 [(validate.rules).bool = {const: true}];\n\n    // A name of a Lua source code stored in\n    // :ref:`Lua.source_codes <envoy_v3_api_field_extensions.filters.http.lua.v3.Lua.source_codes>`.\n    string name = 2 [(validate.rules).string = {min_len: 1}];\n\n    // A configured per-route Lua source code that can be served by RDS or provided inline.\n    config.core.v3.DataSource source_code = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.oauth2.v3alpha;\n\nimport \"envoy/config/core/v3/http_uri.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\nimport \"envoy/type/matcher/v3/path.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha\";\noption java_outer_classname = \"OauthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: OAuth]\n// OAuth :ref:`configuration overview <config_http_filters_oauth>`.\n// [#extension: envoy.filters.http.oauth2]\n//\n\nmessage OAuth2Credentials {\n  // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server.\n  string client_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server.\n  transport_sockets.tls.v3.SdsSecretConfig token_secret = 2\n      [(validate.rules).message = {required: true}];\n\n  // Configures how the secret token should be created.\n  oneof token_formation {\n    option (validate.required) = true;\n\n    // If present, the secret token will be a HMAC using the provided secret.\n    transport_sockets.tls.v3.SdsSecretConfig hmac_secret = 3\n        [(validate.rules).message = {required: true}];\n  }\n}\n\n// OAuth config\n//\n// [#next-free-field: 9]\nmessage OAuth2Config {\n  // Endpoint on the authorization server to retrieve the access token from.\n  config.core.v3.HttpUri token_endpoint = 1;\n\n  // The endpoint redirect to for authorization in response to unauthorized requests.\n  string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Credentials used for OAuth.\n  OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}];\n\n  // The redirect URI passed to the authorization endpoint. Supports header formatting\n  // tokens. For more information, including details on header value syntax, see the\n  // documentation on :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  //\n  // This URI should not contain any query parameters.\n  string redirect_uri = 4 [(validate.rules).string = {min_len: 1}];\n\n  // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server.\n  type.matcher.v3.PathMatcher redirect_path_matcher = 5\n      [(validate.rules).message = {required: true}];\n\n  // The path to sign a user out, clearing their credential cookies.\n  type.matcher.v3.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}];\n\n  // Forward the OAuth token as a Bearer to upstream web service.\n  bool forward_bearer_token = 7;\n\n  // Any request that matches any of the provided matchers will be passed through without OAuth validation.\n  repeated config.route.v3.HeaderMatcher pass_through_matcher = 8;\n}\n\n// Filter config.\nmessage OAuth2 {\n  // Leave this empty to disable OAuth2 for a specific route, using per filter config.\n  OAuth2Config config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/oauth2/v3alpha:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v4alpha:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.oauth2.v4alpha;\n\nimport \"envoy/config/core/v4alpha/http_uri.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/secret.proto\";\nimport \"envoy/type/matcher/v4alpha/path.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.oauth2.v4alpha\";\noption java_outer_classname = \"OauthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: OAuth]\n// OAuth :ref:`configuration overview <config_http_filters_oauth>`.\n// [#extension: envoy.filters.http.oauth2]\n//\n\nmessage OAuth2Credentials {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials\";\n\n  // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server.\n  string client_id = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server.\n  transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2\n      [(validate.rules).message = {required: true}];\n\n  // Configures how the secret token should be created.\n  oneof token_formation {\n    option (validate.required) = true;\n\n    // If present, the secret token will be a HMAC using the provided secret.\n    transport_sockets.tls.v4alpha.SdsSecretConfig hmac_secret = 3\n        [(validate.rules).message = {required: true}];\n  }\n}\n\n// OAuth config\n//\n// [#next-free-field: 9]\nmessage OAuth2Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Config\";\n\n  // Endpoint on the authorization server to retrieve the access token from.\n  config.core.v4alpha.HttpUri token_endpoint = 1;\n\n  // The endpoint redirect to for authorization in response to unauthorized requests.\n  string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Credentials used for OAuth.\n  OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}];\n\n  // The redirect URI passed to the authorization endpoint. Supports header formatting\n  // tokens. For more information, including details on header value syntax, see the\n  // documentation on :ref:`custom request headers <config_http_conn_man_headers_custom_request_headers>`.\n  //\n  // This URI should not contain any query parameters.\n  string redirect_uri = 4 [(validate.rules).string = {min_len: 1}];\n\n  // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server.\n  type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5\n      [(validate.rules).message = {required: true}];\n\n  // The path to sign a user out, clearing their credential cookies.\n  type.matcher.v4alpha.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}];\n\n  // Forward the OAuth token as a Bearer to upstream web service.\n  bool forward_bearer_token = 7;\n\n  // Any request that matches any of the provided matchers will be passed through without OAuth validation.\n  repeated config.route.v4alpha.HeaderMatcher pass_through_matcher = 8;\n}\n\n// Filter config.\nmessage OAuth2 {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.oauth2.v3alpha.OAuth2\";\n\n  // Leave this empty to disable OAuth2 for a specific route, using per filter config.\n  OAuth2Config config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/on_demand/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.on_demand.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.on_demand.v3\";\noption java_outer_classname = \"OnDemandProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: OnDemand]\n// IP tagging :ref:`configuration overview <config_http_filters_on_demand>`.\n// [#extension: envoy.filters.http.on_demand]\n\nmessage OnDemand {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.on_demand.v2.OnDemand\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/original_src/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.original_src.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.original_src.v3\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the request. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\n// [#extension: envoy.filters.http.original_src]\nmessage OriginalSrc {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.original_src.v2alpha1.OriginalSrc\";\n\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/rate_limit/v2:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.ratelimit.v3;\n\nimport \"envoy/config/ratelimit/v3/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.ratelimit.v3\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_http_filters_rate_limit>`.\n// [#extension: envoy.filters.http.ratelimit]\n\n// [#next-free-field: 9]\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.rate_limit.v2.RateLimit\";\n\n  // Defines the version of the standard to use for X-RateLimit headers.\n  enum XRateLimitHeadersRFCVersion {\n    // X-RateLimit headers disabled.\n    OFF = 0;\n\n    // Use `draft RFC Version 03 <https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html>`_.\n    DRAFT_VERSION_03 = 1;\n  }\n\n  // The rate limit domain to use when calling the rate limit service.\n  string domain = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specifies the rate limit configurations to be applied with the same\n  // stage number. If not set, the default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The type of requests the filter should apply to. The supported\n  // types are *internal*, *external* or *both*. A request is considered internal if\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is set to true. If\n  // :ref:`x-envoy-internal<config_http_conn_man_headers_x-envoy-internal>` is not set or false, a\n  // request is considered external. The filter defaults to *both*, and it will apply to all request\n  // types.\n  string request_type = 3\n      [(validate.rules).string = {in: \"internal\" in: \"external\" in: \"both\" in: \"\"}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead\n  // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The\n  // HTTP code will be 200 for a gRPC response.\n  bool rate_limited_as_resource_exhausted = 6;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7\n      [(validate.rules).message = {required: true}];\n\n  // Defines the standard version to use for X-RateLimit headers emitted by the filter:\n  //\n  // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the\n  //   client in the current time-window followed by the description of the\n  //   quota policy. The values are returned by the rate limiting service in\n  //   :ref:`current_limit<envoy_v3_api_field_service.ratelimit.v3.RateLimitResponse.DescriptorStatus.current_limit>`\n  //   field. Example: `10, 10;w=1;name=\"per-ip\", 1000;w=3600`.\n  // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the\n  //   current time-window. The values are returned by the rate limiting service\n  //   in :ref:`limit_remaining<envoy_v3_api_field_service.ratelimit.v3.RateLimitResponse.DescriptorStatus.limit_remaining>`\n  //   field.\n  // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of\n  //   the current time-window. The values are returned by the rate limiting service\n  //   in :ref:`duration_until_reset<envoy_v3_api_field_service.ratelimit.v3.RateLimitResponse.DescriptorStatus.duration_until_reset>`\n  //   field.\n  //\n  // In case rate limiting policy specifies more then one time window, the values\n  // above represent the window that is closest to reaching its limit.\n  //\n  // For more information about the headers specification see selected version of\n  // the `draft RFC <https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html>`_.\n  //\n  // Disabled by default.\n  XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8\n      [(validate.rules).enum = {defined_only: true}];\n}\n\nmessage RateLimitPerRoute {\n  enum VhRateLimitsOptions {\n    // Use the virtual host rate limits unless the route has a rate limit policy.\n    OVERRIDE = 0;\n\n    // Use the virtual host rate limits even if the route has a rate limit policy.\n    INCLUDE = 1;\n\n    // Ignore the virtual host rate limits even if the route does not have a rate limit policy.\n    IGNORE = 2;\n  }\n\n  // Specifies if the rate limit filter should include the virtual host rate limits.\n  VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/rbac/v2:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.rbac.v3;\n\nimport \"envoy/config/rbac/v3/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.rbac.v3\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_http_filters_rbac>`.\n// [#extension: envoy.filters.http.rbac]\n\n// RBAC filter config.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.rbac.v2.RBAC\";\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v3.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter (i.e., returning a 403)\n  // but will emit stats and logs and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v3.RBAC shadow_rules = 2;\n}\n\nmessage RBACPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.rbac.v2.RBACPerRoute\";\n\n  reserved 1;\n\n  // Override the global configuration of the filter with this new config.\n  // If absent, the global RBAC policy will be disabled for this route.\n  RBAC rbac = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.rbac.v4alpha;\n\nimport \"envoy/config/rbac/v4alpha/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.rbac.v4alpha\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_http_filters_rbac>`.\n// [#extension: envoy.filters.http.rbac]\n\n// RBAC filter config.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.rbac.v3.RBAC\";\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter (i.e., returning a 403)\n  // but will emit stats and logs and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC shadow_rules = 2;\n}\n\nmessage RBACPerRoute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.rbac.v3.RBACPerRoute\";\n\n  reserved 1;\n\n  // Override the global configuration of the filter with this new config.\n  // If absent, the global RBAC policy will be disabled for this route.\n  RBAC rbac = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/filter/http/router/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.router.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.router.v3\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Router]\n// Router :ref:`configuration overview <config_http_filters_router>`.\n// [#extension: envoy.filters.http.router]\n\n// [#next-free-field: 7]\nmessage Router {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.router.v2.Router\";\n\n  // Whether the router generates dynamic cluster statistics. Defaults to\n  // true. Can be disabled in high performance scenarios.\n  google.protobuf.BoolValue dynamic_stats = 1;\n\n  // Whether to start a child span for egress routed calls. This can be\n  // useful in scenarios where other filters (auth, ratelimit, etc.) make\n  // outbound calls and have child spans rooted at the same ingress\n  // parent. Defaults to false.\n  bool start_child_span = 2;\n\n  // Configuration for HTTP upstream logs emitted by the router. Upstream logs\n  // are configured in the same way as access logs, but each log entry represents\n  // an upstream request. Presuming retries are configured, multiple upstream\n  // requests may be made for each downstream (inbound) request.\n  repeated config.accesslog.v3.AccessLog upstream_log = 3;\n\n  // Do not add any additional *x-envoy-* headers to requests or responses. This\n  // only affects the :ref:`router filter generated *x-envoy-* headers\n  // <config_http_filters_router_headers_set>`, other Envoy filters and the HTTP\n  // connection manager may continue to set *x-envoy-* headers.\n  bool suppress_envoy_headers = 4;\n\n  // Specifies a list of HTTP headers to strictly validate. Envoy will reject a\n  // request and respond with HTTP status 400 if the request contains an invalid\n  // value for any of the headers listed in this field. Strict header checking\n  // is only supported for the following headers:\n  //\n  // Value must be a ','-delimited list (i.e. no spaces) of supported retry\n  // policy values:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`\n  // * :ref:`config_http_filters_router_x-envoy-retry-on`\n  //\n  // Value must be an integer:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-max-retries`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`\n  repeated string strict_check_headers = 5 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"x-envoy-upstream-rq-timeout-ms\"\n        in: \"x-envoy-upstream-rq-per-try-timeout-ms\"\n        in: \"x-envoy-max-retries\"\n        in: \"x-envoy-retry-grpc-on\"\n        in: \"x-envoy-retry-on\"\n      }\n    }\n  }];\n\n  // If not set, ingress Envoy will ignore\n  // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress\n  // Envoy, when deriving timeout for upstream cluster.\n  bool respect_expected_rq_timeout = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/router/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.router.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.router.v4alpha\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Router]\n// Router :ref:`configuration overview <config_http_filters_router>`.\n// [#extension: envoy.filters.http.router]\n\n// [#next-free-field: 7]\nmessage Router {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.router.v3.Router\";\n\n  // Whether the router generates dynamic cluster statistics. Defaults to\n  // true. Can be disabled in high performance scenarios.\n  google.protobuf.BoolValue dynamic_stats = 1;\n\n  // Whether to start a child span for egress routed calls. This can be\n  // useful in scenarios where other filters (auth, ratelimit, etc.) make\n  // outbound calls and have child spans rooted at the same ingress\n  // parent. Defaults to false.\n  bool start_child_span = 2;\n\n  // Configuration for HTTP upstream logs emitted by the router. Upstream logs\n  // are configured in the same way as access logs, but each log entry represents\n  // an upstream request. Presuming retries are configured, multiple upstream\n  // requests may be made for each downstream (inbound) request.\n  repeated config.accesslog.v4alpha.AccessLog upstream_log = 3;\n\n  // Do not add any additional *x-envoy-* headers to requests or responses. This\n  // only affects the :ref:`router filter generated *x-envoy-* headers\n  // <config_http_filters_router_headers_set>`, other Envoy filters and the HTTP\n  // connection manager may continue to set *x-envoy-* headers.\n  bool suppress_envoy_headers = 4;\n\n  // Specifies a list of HTTP headers to strictly validate. Envoy will reject a\n  // request and respond with HTTP status 400 if the request contains an invalid\n  // value for any of the headers listed in this field. Strict header checking\n  // is only supported for the following headers:\n  //\n  // Value must be a ','-delimited list (i.e. no spaces) of supported retry\n  // policy values:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`\n  // * :ref:`config_http_filters_router_x-envoy-retry-on`\n  //\n  // Value must be an integer:\n  //\n  // * :ref:`config_http_filters_router_x-envoy-max-retries`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`\n  // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`\n  repeated string strict_check_headers = 5 [(validate.rules).repeated = {\n    items {\n      string {\n        in: \"x-envoy-upstream-rq-timeout-ms\"\n        in: \"x-envoy-upstream-rq-per-try-timeout-ms\"\n        in: \"x-envoy-max-retries\"\n        in: \"x-envoy-retry-grpc-on\"\n        in: \"x-envoy-retry-on\"\n      }\n    }\n  }];\n\n  // If not set, ingress Envoy will ignore\n  // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress\n  // Envoy, when deriving timeout for upstream cluster.\n  bool respect_expected_rq_timeout = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/squash/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/squash/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.squash.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.squash.v3\";\noption java_outer_classname = \"SquashProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Squash]\n// Squash :ref:`configuration overview <config_http_filters_squash>`.\n// [#extension: envoy.filters.http.squash]\n\n// [#next-free-field: 6]\nmessage Squash {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.squash.v2.Squash\";\n\n  // The name of the cluster that hosts the Squash server.\n  string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // When the filter requests the Squash server to create a DebugAttachment, it will use this\n  // structure as template for the body of the request. It can contain reference to environment\n  // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server\n  // with more information to find the process to attach the debugger to. For example, in a\n  // Istio/k8s environment, this will contain information on the pod:\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"spec\": {\n  //      \"attachment\": {\n  //        \"pod\": \"{{ POD_NAME }}\",\n  //        \"namespace\": \"{{ POD_NAMESPACE }}\"\n  //      },\n  //      \"match_request\": true\n  //    }\n  //  }\n  //\n  // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API)\n  google.protobuf.Struct attachment_template = 2;\n\n  // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second.\n  google.protobuf.Duration request_timeout = 3;\n\n  // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60\n  // seconds.\n  google.protobuf.Duration attachment_timeout = 4;\n\n  // Amount of time to poll for the status of the attachment object in the Squash server\n  // (to check if has been attached). Defaults to 1 second.\n  google.protobuf.Duration attachment_poll_period = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/http/tap/v2alpha:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.tap.v3;\n\nimport \"envoy/extensions/common/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.tap.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap]\n// Tap :ref:`configuration overview <config_http_filters_tap>`.\n// [#extension: envoy.filters.http.tap]\n\n// Top level configuration for the tap filter.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.http.tap.v2alpha.Tap\";\n\n  // Common configuration for the HTTP tap filter.\n  common.tap.v3.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/common/tap/v4alpha:pkg\",\n        \"//envoy/extensions/filters/http/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.tap.v4alpha;\n\nimport \"envoy/extensions/common/tap/v4alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.tap.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap]\n// Tap :ref:`configuration overview <config_http_filters_tap>`.\n// [#extension: envoy.filters.http.tap]\n\n// Top level configuration for the tap filter.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.http.tap.v3.Tap\";\n\n  // Common configuration for the HTTP tap filter.\n  common.tap.v4alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.http.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.http.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// [#extension: envoy.filters.http.wasm]\n// Wasm :ref:`configuration overview <config_http_filters_wasm>`.\n\nmessage Wasm {\n  // General Plugin configuration.\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/http_inspector/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.http_inspector.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.http_inspector.v3\";\noption java_outer_classname = \"HttpInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP Inspector Filter]\n// Detect whether the application protocol is HTTP.\n// [#extension: envoy.filters.listener.http_inspector]\n\nmessage HttpInspector {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.http_inspector.v2.HttpInspector\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/original_dst/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.original_dst.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.original_dst.v3\";\noption java_outer_classname = \"OriginalDstProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Original Dst Filter]\n// Use the Original destination address on downstream connections.\n// [#extension: envoy.filters.listener.original_dst]\n\nmessage OriginalDst {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.original_dst.v2.OriginalDst\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/original_src/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.original_src.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.original_src.v3\";\noption java_outer_classname = \"OriginalSrcProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Original Src Filter]\n// Use the Original source address on upstream connections.\n// [#extension: envoy.filters.listener.original_src]\n\n// The Original Src filter binds upstream connections to the original source address determined\n// for the connection. This address could come from something like the Proxy Protocol filter, or it\n// could come from trusted http headers.\nmessage OriginalSrc {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.original_src.v2alpha1.OriginalSrc\";\n\n  // Whether to bind the port to the one used in the original downstream connection.\n  // [#not-implemented-hide:]\n  bool bind_port = 1;\n\n  // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to\n  // ensure that non-local addresses may be routed back through envoy when binding to the original\n  // source address. The option will not be applied if the mark is 0.\n  uint32 mark = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/proxy_protocol/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.proxy_protocol.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3\";\noption java_outer_classname = \"ProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Proxy Protocol Filter]\n// PROXY protocol listener filter.\n// [#extension: envoy.filters.listener.proxy_protocol]\n\nmessage ProxyProtocol {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol\";\n\n  message KeyValuePair {\n    // The namespace — if this is empty, the filter's namespace will be used.\n    string metadata_namespace = 1;\n\n    // The key to use within the namespace.\n    string key = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // A Rule defines what metadata to apply when a header is present or missing.\n  message Rule {\n    // The type that triggers the rule - required\n    // TLV type is defined as uint8_t in proxy protocol. See `the spec\n    // <https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt>`_ for details.\n    uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}];\n\n    // If the TLV type is present, apply this metadata KeyValuePair.\n    KeyValuePair on_tlv_present = 2;\n  }\n\n  // The list of rules to apply to requests.\n  repeated Rule rules = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/listener/tls_inspector/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.listener.tls_inspector.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.listener.tls_inspector.v3\";\noption java_outer_classname = \"TlsInspectorProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: TLS Inspector Filter]\n// Allows detecting whether the transport appears to be TLS or plaintext.\n// [#extension: envoy.filters.listener.tls_inspector]\n\nmessage TlsInspector {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.listener.tls_inspector.v2.TlsInspector\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/client_ssl_auth/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.client_ssl_auth.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.client_ssl_auth.v3\";\noption java_outer_classname = \"ClientSslAuthProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Client TLS authentication]\n// Client TLS authentication\n// :ref:`configuration overview <config_network_filters_client_ssl_auth>`.\n// [#extension: envoy.filters.network.client_ssl_auth]\n\nmessage ClientSSLAuth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.client_ssl_auth.v2.ClientSSLAuth\";\n\n  // The :ref:`cluster manager <arch_overview_cluster_manager>` cluster that runs\n  // the authentication service. The filter will connect to the service every 60s to fetch the list\n  // of principals. The service must support the expected :ref:`REST API\n  // <config_network_filters_client_ssl_auth_rest_api>`.\n  string auth_api_cluster = 1\n      [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_client_ssl_auth_stats>`.\n  string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Time in milliseconds between principal refreshes from the\n  // authentication service. Default is 60000 (60s). The actual fetch time\n  // will be this value plus a random jittered value between\n  // 0-refresh_delay_ms milliseconds.\n  google.protobuf.Duration refresh_delay = 3;\n\n  // An optional list of IP address and subnet masks that should be white\n  // listed for access by the filter. If no list is provided, there is no\n  // IP allowlist.\n  repeated config.core.v3.CidrRange ip_white_list = 4\n      [(udpa.annotations.field_migrate).rename = \"ip_allowlist\"];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/direct_response/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.direct_response.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.direct_response.v3\";\noption java_outer_classname = \"ConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Direct response]\n// Direct response :ref:`configuration overview <config_network_filters_direct_response>`.\n// [#extension: envoy.filters.network.direct_response]\n\nmessage Config {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.direct_response.v2.Config\";\n\n  // Response data as a data source.\n  config.core.v3.DataSource response = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/dubbo/router/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.router.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.router.v3\";\noption java_outer_classname = \"RouterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Router]\n// Dubbo router :ref:`configuration overview <config_dubbo_filters_router>`.\n\nmessage Router {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.dubbo.router.v2alpha1.Router\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v3;\n\nimport \"envoy/extensions/filters/network/dubbo_proxy/v3/route.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3\";\noption java_outer_classname = \"DubboProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dubbo Proxy]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n// [#extension: envoy.filters.network.dubbo_proxy]\n\n// Dubbo Protocol types supported by Envoy.\nenum ProtocolType {\n  // the default protocol.\n  Dubbo = 0;\n}\n\n// Dubbo Serialization types supported by Envoy.\nenum SerializationType {\n  // the default serialization protocol.\n  Hessian2 = 0;\n}\n\n// [#next-free-field: 6]\nmessage DubboProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy\";\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Configure the protocol used.\n  ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Configure the serialization protocol used.\n  SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  repeated RouteConfiguration route_config = 4;\n\n  // A list of individual Dubbo filters that make up the filter chain for requests made to the\n  // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no dubbo_filters are specified, a default Dubbo router filter\n  // (`envoy.filters.dubbo.router`) is used.\n  repeated DubboFilter dubbo_filters = 5;\n}\n\n// DubboFilter configures a Dubbo filter.\nmessage DubboFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v3;\n\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Dubbo Proxy Route Configuration]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n\n// [#next-free-field: 6]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The interface name of the service.\n  string interface = 2;\n\n  // Which group does the interface belong to.\n  string group = 3;\n\n  // The version number of the interface.\n  string version = 4;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 5;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteMatch\";\n\n  // Method level routing matching.\n  MethodMatch method = 1;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v3.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed.\n    string cluster = 1;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    // Currently ClusterWeight only supports the name and weight fields.\n    config.route.v3.WeightedCluster weighted_clusters = 2;\n  }\n}\n\nmessage MethodMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch\";\n\n  // The parameter matching type.\n  message ParameterMatchSpecifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch.ParameterMatchSpecifier\";\n\n    oneof parameter_match_specifier {\n      // If specified, header match will be performed based on the value of the header.\n      string exact_match = 3;\n\n      // If specified, header match will be performed based on range.\n      // The rule will match if the request header value is within this range.\n      // The entire request header value must represent an integer in base 10 notation: consisting\n      // of an optional plus or minus sign followed by a sequence of digits. The rule will not match\n      // if the header value does not represent an integer. Match will fail for empty values,\n      // floating point numbers or if only a subsequence of the header value is an integer.\n      //\n      // Examples:\n      //\n      // * For range [-10,0), route will match for header value -1, but not for 0,\n      //   \"somestring\", 10.9, \"-1somestring\"\n      type.v3.Int64Range range_match = 4;\n    }\n  }\n\n  // The name of the method.\n  type.matcher.v3.StringMatcher name = 1;\n\n  // Method parameter definition.\n  // The key is the parameter index, starting from 0.\n  // The value is the parameter matching type.\n  map<uint32, ParameterMatchSpecifier> params_match = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/dubbo_proxy/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v4alpha;\n\nimport \"envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha\";\noption java_outer_classname = \"DubboProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Dubbo Proxy]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n// [#extension: envoy.filters.network.dubbo_proxy]\n\n// Dubbo Protocol types supported by Envoy.\nenum ProtocolType {\n  // the default protocol.\n  Dubbo = 0;\n}\n\n// Dubbo Serialization types supported by Envoy.\nenum SerializationType {\n  // the default serialization protocol.\n  Hessian2 = 0;\n}\n\n// [#next-free-field: 6]\nmessage DubboProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\";\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Configure the protocol used.\n  ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Configure the serialization protocol used.\n  SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  repeated RouteConfiguration route_config = 4;\n\n  // A list of individual Dubbo filters that make up the filter chain for requests made to the\n  // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no dubbo_filters are specified, a default Dubbo router filter\n  // (`envoy.filters.dubbo.router`) is used.\n  repeated DubboFilter dubbo_filters = 5;\n}\n\n// DubboFilter configures a Dubbo filter.\nmessage DubboFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being\n  // instantiated. See the supported filters for further documentation.\n  google.protobuf.Any config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.dubbo_proxy.v4alpha;\n\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Dubbo Proxy Route Configuration]\n// Dubbo Proxy :ref:`configuration overview <config_network_filters_dubbo_proxy>`.\n\n// [#next-free-field: 6]\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The interface name of the service.\n  string interface = 2;\n\n  // Which group does the interface belong to.\n  string group = 3;\n\n  // The version number of the interface.\n  string version = 4;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 5;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch\";\n\n  // Method level routing matching.\n  MethodMatch method = 1;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v4alpha.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates the upstream cluster to which the request should be routed.\n    string cluster = 1;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    // Currently ClusterWeight only supports the name and weight fields.\n    config.route.v4alpha.WeightedCluster weighted_clusters = 2;\n  }\n}\n\nmessage MethodMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch\";\n\n  // The parameter matching type.\n  message ParameterMatchSpecifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier\";\n\n    oneof parameter_match_specifier {\n      // If specified, header match will be performed based on the value of the header.\n      string exact_match = 3;\n\n      // If specified, header match will be performed based on range.\n      // The rule will match if the request header value is within this range.\n      // The entire request header value must represent an integer in base 10 notation: consisting\n      // of an optional plus or minus sign followed by a sequence of digits. The rule will not match\n      // if the header value does not represent an integer. Match will fail for empty values,\n      // floating point numbers or if only a subsequence of the header value is an integer.\n      //\n      // Examples:\n      //\n      // * For range [-10,0), route will match for header value -1, but not for 0,\n      //   \"somestring\", 10.9, \"-1somestring\"\n      type.v3.Int64Range range_match = 4;\n    }\n  }\n\n  // The name of the method.\n  type.matcher.v4alpha.StringMatcher name = 1;\n\n  // Method parameter definition.\n  // The key is the parameter index, starting from 0.\n  // The value is the parameter matching type.\n  map<uint32, ParameterMatchSpecifier> params_match = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/echo/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.echo.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.echo.v3\";\noption java_outer_classname = \"EchoProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Echo]\n// Echo :ref:`configuration overview <config_network_filters_echo>`.\n// [#extension: envoy.filters.network.echo]\n\nmessage Echo {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.echo.v2.Echo\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/ext_authz/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.ext_authz.v3;\n\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/grpc_service.proto\";\nimport \"envoy/type/matcher/v3/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.ext_authz.v3\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Network External Authorization ]\n// The network layer external authorization service configuration\n// :ref:`configuration overview <config_network_filters_ext_authz>`.\n// [#extension: envoy.filters.network.ext_authz]\n\n// External Authorization filter calls out to an external service over the\n// gRPC Authorization API defined by\n// :ref:`CheckRequest <envoy_api_msg_service.auth.v3.CheckRequest>`.\n// A failed check will cause this filter to close the TCP connection.\n// [#next-free-field: 7]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.ext_authz.v2.ExtAuthz\";\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The external authorization gRPC service configuration.\n  // The default timeout is set to 200ms by this filter.\n  config.core.v3.GrpcService grpc_service = 2;\n\n  // The filter's behaviour in case the external authorization service does\n  // not respond back. When it is set to true, Envoy will also allow traffic in case of\n  // communication failure between authorization service and the proxy.\n  // Defaults to false.\n  bool failure_mode_allow = 3;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v3.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 4;\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of Check{Request,Response} used on the wire.\n  config.core.v3.ApiVersion transport_api_version = 5\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/ext_authz/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.ext_authz.v4alpha;\n\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\nimport \"envoy/type/matcher/v4alpha/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha\";\noption java_outer_classname = \"ExtAuthzProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Network External Authorization ]\n// The network layer external authorization service configuration\n// :ref:`configuration overview <config_network_filters_ext_authz>`.\n// [#extension: envoy.filters.network.ext_authz]\n\n// External Authorization filter calls out to an external service over the\n// gRPC Authorization API defined by\n// :ref:`CheckRequest <envoy_api_msg_service.auth.v4alpha.CheckRequest>`.\n// A failed check will cause this filter to close the TCP connection.\n// [#next-free-field: 7]\nmessage ExtAuthz {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.ext_authz.v3.ExtAuthz\";\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The external authorization gRPC service configuration.\n  // The default timeout is set to 200ms by this filter.\n  config.core.v4alpha.GrpcService grpc_service = 2;\n\n  // The filter's behaviour in case the external authorization service does\n  // not respond back. When it is set to true, Envoy will also allow traffic in case of\n  // communication failure between authorization service and the proxy.\n  // Defaults to false.\n  bool failure_mode_allow = 3;\n\n  // Specifies if the peer certificate is sent to the external service.\n  //\n  // When this field is true, Envoy will include the peer X.509 certificate, if available, in the\n  // :ref:`certificate<envoy_api_field_service.auth.v4alpha.AttributeContext.Peer.certificate>`.\n  bool include_peer_certificate = 4;\n\n  // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and\n  // version of Check{Request,Response} used on the wire.\n  config.core.v4alpha.ApiVersion transport_api_version = 5\n      [(validate.rules).enum = {defined_only: true}];\n\n  // Specifies if the filter is enabled with metadata matcher.\n  // If this field is not specified, the filter will be enabled for all requests.\n  type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/http_connection_manager/v2:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.http_connection_manager.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/config/core/v3/protocol.proto\";\nimport \"envoy/config/core/v3/substitution_format_string.proto\";\nimport \"envoy/config/route/v3/route.proto\";\nimport \"envoy/config/route/v3/scoped_route.proto\";\nimport \"envoy/config/trace/v3/http_tracer.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3\";\noption java_outer_classname = \"HttpConnectionManagerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP connection manager]\n// HTTP connection manager :ref:`configuration overview <config_http_conn_man>`.\n// [#extension: envoy.filters.network.http_connection_manager]\n\n// [#next-free-field: 41]\nmessage HttpConnectionManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\";\n\n  enum CodecType {\n    // For every new connection, the connection manager will determine which\n    // codec to use. This mode supports both ALPN for TLS listeners as well as\n    // protocol inference for plaintext listeners. If ALPN data is available, it\n    // is preferred, otherwise protocol inference is used. In almost all cases,\n    // this is the right option to choose for this setting.\n    AUTO = 0;\n\n    // The connection manager will assume that the client is speaking HTTP/1.1.\n    HTTP1 = 1;\n\n    // The connection manager will assume that the client is speaking HTTP/2\n    // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN.\n    // Prior knowledge is allowed).\n    HTTP2 = 2;\n\n    // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n    // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n    // to distinguish HTTP1 and HTTP2 traffic.\n    HTTP3 = 3;\n  }\n\n  enum ServerHeaderTransformation {\n    // Overwrite any Server header with the contents of server_name.\n    OVERWRITE = 0;\n\n    // If no Server header is present, append Server server_name\n    // If a Server header is present, pass it through.\n    APPEND_IF_ABSENT = 1;\n\n    // Pass through the value of the server header, and do not append a header\n    // if none is present.\n    PASS_THROUGH = 2;\n  }\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  enum ForwardClientCertDetails {\n    // Do not send the XFCC header to the next hop. This is the default value.\n    SANITIZE = 0;\n\n    // When the client connection is mTLS (Mutual TLS), forward the XFCC header\n    // in the request.\n    FORWARD_ONLY = 1;\n\n    // When the client connection is mTLS, append the client certificate\n    // information to the request’s XFCC header and forward it.\n    APPEND_FORWARD = 2;\n\n    // When the client connection is mTLS, reset the XFCC header with the client\n    // certificate information and send it to the next hop.\n    SANITIZE_SET = 3;\n\n    // Always forward the XFCC header in the request, regardless of whether the\n    // client connection is mTLS.\n    ALWAYS_FORWARD_ONLY = 4;\n  }\n\n  // [#next-free-field: 10]\n  message Tracing {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing\";\n\n    enum OperationName {\n      // The HTTP listener is used for ingress/incoming requests.\n      INGRESS = 0;\n\n      // The HTTP listener is used for egress/outgoing requests.\n      EGRESS = 1;\n    }\n\n    // Target percentage of requests managed by this HTTP connection manager that will be force\n    // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n    // header is set. This field is a direct analog for the runtime variable\n    // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n    // <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent client_sampling = 3;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be randomly\n    // selected for trace generation, if not requested by the client or not forced. This field is\n    // a direct analog for the runtime variable 'tracing.random_sampling' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent random_sampling = 4;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be traced\n    // after all other sampling checks have been applied (client-directed, force tracing, random\n    // sampling). This field functions as an upper limit on the total configured sampling rate. For\n    // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n    // of client requests with the appropriate headers to be force traced. This field is a direct\n    // analog for the runtime variable 'tracing.global_enabled' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent overall_sampling = 5;\n\n    // Whether to annotate spans with additional data. If true, spans will include logs for stream\n    // events.\n    bool verbose = 6;\n\n    // Maximum length of the request path to extract and include in the HttpUrl tag. Used to\n    // truncate lengthy request paths to meet the needs of a tracing backend.\n    // Default: 256\n    google.protobuf.UInt32Value max_path_tag_length = 7;\n\n    // A list of custom tags with unique tag name to create tags for the active span.\n    repeated type.tracing.v3.CustomTag custom_tags = 8;\n\n    // Configuration for an external tracing provider.\n    // If not specified, no tracing will be performed.\n    //\n    // .. attention::\n    //   Please be aware that *envoy.tracers.opencensus* provider can only be configured once\n    //   in Envoy lifetime.\n    //   Any attempts to reconfigure it or to use different configurations for different HCM filters\n    //   will be rejected.\n    //   Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes\n    //   on OpenCensus side.\n    config.trace.v3.Tracing.Http provider = 9;\n\n    OperationName hidden_envoy_deprecated_operation_name = 1 [\n      deprecated = true,\n      (validate.rules).enum = {defined_only: true},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    repeated string hidden_envoy_deprecated_request_headers_for_tags = 2 [deprecated = true];\n  }\n\n  message InternalAddressConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.\"\n        \"InternalAddressConfig\";\n\n    // Whether unix socket addresses should be considered internal.\n    bool unix_sockets = 1;\n  }\n\n  // [#next-free-field: 7]\n  message SetCurrentClientCertDetails {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.\"\n        \"SetCurrentClientCertDetails\";\n\n    reserved 2;\n\n    // Whether to forward the subject of the client cert. Defaults to false.\n    google.protobuf.BoolValue subject = 1;\n\n    // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the\n    // XFCC header comma separated from other values with the value Cert=\"PEM\".\n    // Defaults to false.\n    bool cert = 3;\n\n    // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM\n    // format. This will appear in the XFCC header comma separated from other values with the value\n    // Chain=\"PEM\".\n    // Defaults to false.\n    bool chain = 6;\n\n    // Whether to forward the DNS type Subject Alternative Names of the client cert.\n    // Defaults to false.\n    bool dns = 4;\n\n    // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to\n    // false.\n    bool uri = 5;\n  }\n\n  // The configuration for HTTP upgrades.\n  // For each upgrade type desired, an UpgradeConfig must be added.\n  //\n  // .. warning::\n  //\n  //    The current implementation of upgrade headers does not handle\n  //    multi-valued upgrade headers. Support for multi-valued headers may be\n  //    added in the future if needed.\n  //\n  // .. warning::\n  //    The current implementation of upgrade headers does not work with HTTP/2\n  //    upstreams.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.\"\n        \"UpgradeConfig\";\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type]\n    // will be proxied upstream.\n    string upgrade_type = 1;\n\n    // If present, this represents the filter chain which will be created for\n    // this type of upgrade. If no filters are present, the filter chain for\n    // HTTP connections will be used for this upgrade type.\n    repeated HttpFilter filters = 2;\n\n    // Determines if upgrades are enabled or disabled by default. Defaults to true.\n    // This can be overridden on a per-route basis with :ref:`cluster\n    // <envoy_api_field_config.route.v3.RouteAction.upgrade_configs>` as documented in the\n    // :ref:`upgrade documentation <arch_overview_upgrades>`.\n    google.protobuf.BoolValue enabled = 3;\n  }\n\n  reserved 27;\n\n  // Supplies the type of codec that the connection manager should use.\n  CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics for the\n  // connection manager. See the :ref:`statistics documentation <config_http_conn_man_stats>` for\n  // more information.\n  string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The connection manager’s route table will be dynamically loaded via the RDS API.\n    Rds rds = 3;\n\n    // The route table for the connection manager is static and is specified in this property.\n    config.route.v3.RouteConfiguration route_config = 4;\n\n    // A route table will be dynamically assigned to each request based on request attributes\n    // (e.g., the value of a header). The \"routing scopes\" (i.e., route tables) and \"scope keys\" are\n    // specified in this message.\n    ScopedRoutes scoped_routes = 31;\n  }\n\n  // A list of individual HTTP filters that make up the filter chain for\n  // requests made to the connection manager. :ref:`Order matters <arch_overview_http_filters_ordering>`\n  // as the filters are processed sequentially as request events happen.\n  repeated HttpFilter http_filters = 5;\n\n  // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent`\n  // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked\n  // documentation for more information. Defaults to false.\n  google.protobuf.BoolValue add_user_agent = 6;\n\n  // Presence of the object defines whether the connection manager\n  // emits :ref:`tracing <arch_overview_tracing>` data to the :ref:`configured tracing provider\n  // <envoy_api_msg_config.trace.v3.Tracing>`.\n  Tracing tracing = 7;\n\n  // Additional settings for HTTP requests handled by the connection manager. These will be\n  // applicable to both HTTP1 and HTTP2 requests.\n  config.core.v3.HttpProtocolOptions common_http_protocol_options = 35\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Additional HTTP/1 settings that are passed to the HTTP/1 codec.\n  config.core.v3.Http1ProtocolOptions http_protocol_options = 8;\n\n  // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec.\n  config.core.v3.Http2ProtocolOptions http2_protocol_options = 9\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // An optional override that the connection manager will write to the server\n  // header in responses. If not set, the default is *envoy*.\n  string server_name = 10\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Defines the action to be applied to the Server header on the response path.\n  // By default, Envoy will overwrite the header with the value specified in\n  // server_name.\n  ServerHeaderTransformation server_header_transformation = 34\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The maximum request headers size for incoming connections.\n  // If unconfigured, the default max request headers allowed is 60 KiB.\n  // Requests that exceed this limit will receive a 431 response.\n  // The max configurable limit is 96 KiB, based on current implementation\n  // constraints.\n  google.protobuf.UInt32Value max_request_headers_kb = 29\n      [(validate.rules).uint32 = {lte: 96 gt: 0}];\n\n  // The stream idle timeout for connections managed by the connection manager.\n  // If not specified, this defaults to 5 minutes. The default value was selected\n  // so as not to interfere with any smaller configured timeouts that may have\n  // existed in configurations prior to the introduction of this feature, while\n  // introducing robustness to TCP connections that terminate without a FIN.\n  //\n  // This idle timeout applies to new streams and is overridable by the\n  // :ref:`route-level idle_timeout\n  // <envoy_api_field_config.route.v3.RouteAction.idle_timeout>`. Even on a stream in\n  // which the override applies, prior to receipt of the initial request\n  // headers, the :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout>`\n  // applies. Each time an encode/decode event for headers or data is processed\n  // for the stream, the timer will be reset. If the timeout fires, the stream\n  // is terminated with a 408 Request Timeout error code if no upstream response\n  // header has been received, otherwise a stream reset occurs.\n  //\n  // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough\n  // window to write any remaining stream data once the entirety of stream data (local end stream is\n  // true) has been buffered pending available window. In other words, this timeout defends against\n  // a peer that does not release enough window to completely write the stream, even though all\n  // data has been proxied within available flow control windows. If the timeout is hit in this\n  // case, the :ref:`tx_flush_timeout <config_http_conn_man_stats_per_codec>` counter will be\n  // incremented. Note that :ref:`max_stream_duration\n  // <envoy_api_field_config.core.v3.HttpProtocolOptions.max_stream_duration>` does not apply to\n  // this corner case.\n  //\n  // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due\n  // to the granularity of events presented to the connection manager. For example, while receiving\n  // very large request headers, it may be the case that there is traffic regularly arriving on the\n  // wire while the connection manage is only able to observe the end-of-headers event, hence the\n  // stream may still idle timeout.\n  //\n  // A value of 0 will completely disable the connection manager stream idle\n  // timeout, although per-route idle timeout overrides will continue to apply.\n  google.protobuf.Duration stream_idle_timeout = 24\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The amount of time that Envoy will wait for the entire request to be received.\n  // The timer is activated when the request is initiated, and is disarmed when the last byte of the\n  // request is sent upstream (i.e. all decoding filters have processed the request), OR when the\n  // response is initiated. If not specified or set to 0, this timeout is disabled.\n  google.protobuf.Duration request_timeout = 28\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The time that Envoy will wait between sending an HTTP/2 “shutdown\n  // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame.\n  // This is used so that Envoy provides a grace period for new streams that\n  // race with the final GOAWAY frame. During this grace period, Envoy will\n  // continue to accept new streams. After the grace period, a final GOAWAY\n  // frame is sent and Envoy will start refusing new streams. Draining occurs\n  // both when a connection hits the idle timeout or during general server\n  // draining. The default grace period is 5000 milliseconds (5 seconds) if this\n  // option is not specified.\n  google.protobuf.Duration drain_timeout = 12;\n\n  // The delayed close timeout is for downstream connections managed by the HTTP connection manager.\n  // It is defined as a grace period after connection close processing has been locally initiated\n  // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy\n  // from the downstream connection) prior to Envoy closing the socket associated with that\n  // connection.\n  // NOTE: This timeout is enforced even when the socket associated with the downstream connection\n  // is pending a flush of the write buffer. However, any progress made writing data to the socket\n  // will restart the timer associated with this timeout. This means that the total grace period for\n  // a socket in this state will be\n  // <total_time_waiting_for_write_buffer_flushes>+<delayed_close_timeout>.\n  //\n  // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close\n  // sequence mitigates a race condition that exists when downstream clients do not drain/process\n  // data in a connection's receive buffer after a remote close has been detected via a socket\n  // write(). This race leads to such clients failing to process the response code sent by Envoy,\n  // which could result in erroneous downstream processing.\n  //\n  // If the timeout triggers, Envoy will close the connection's socket.\n  //\n  // The default timeout is 1000 ms if this option is not specified.\n  //\n  // .. NOTE::\n  //    To be useful in avoiding the race condition described above, this timeout must be set\n  //    to *at least* <max round trip time expected between clients and Envoy>+<100ms to account for\n  //    a reasonable \"worst\" case processing time for a full iteration of Envoy's event loop>.\n  //\n  // .. WARNING::\n  //    A value of 0 will completely disable delayed close processing. When disabled, the downstream\n  //    connection's socket will be closed immediately after the write flush is completed or will\n  //    never close if the write flush does not complete.\n  google.protobuf.Duration delayed_close_timeout = 26;\n\n  // Configuration for :ref:`HTTP access logs <arch_overview_access_logs>`\n  // emitted by the connection manager.\n  repeated config.accesslog.v3.AccessLog access_log = 13;\n\n  // If set to true, the connection manager will use the real remote address\n  // of the client connection when determining internal versus external origin and manipulating\n  // various headers. If set to false or absent, the connection manager will use the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for`,\n  // :ref:`config_http_conn_man_headers_x-envoy-internal`, and\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information.\n  google.protobuf.BoolValue use_remote_address = 14\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The number of additional ingress proxy hops from the right side of the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when\n  // determining the origin client's IP address. The default is zero if this option\n  // is not specified. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information.\n  uint32 xff_num_trusted_hops = 19;\n\n  // Configures what network addresses are considered internal for stats and header sanitation\n  // purposes. If unspecified, only RFC1918 IP addresses will be considered internal.\n  // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information about internal/external addresses.\n  InternalAddressConfig internal_address_config = 25;\n\n  // If set, Envoy will not append the remote address to the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in\n  // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager\n  // has mutated the request headers. While :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>`\n  // will also suppress XFF addition, it has consequences for logging and other\n  // Envoy uses of the remote address, so *skip_xff_append* should be used\n  // when only an elision of XFF addition is intended.\n  bool skip_xff_append = 21;\n\n  // Via header value to append to request and response headers. If this is\n  // empty, no via header will be appended.\n  string via = 22;\n\n  // Whether the connection manager will generate the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to\n  // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature\n  // is not desired it can be disabled.\n  google.protobuf.BoolValue generate_request_id = 15;\n\n  // Whether the connection manager will keep the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if passed for a request that is edge\n  // (Edge request is the request from external clients to front Envoy) and not reset it, which\n  // is the current Envoy behaviour. This defaults to false.\n  bool preserve_external_request_id = 32;\n\n  // If set, Envoy will always set :ref:`x-request-id <config_http_conn_man_headers_x-request-id>` header in response.\n  // If this is false or not set, the request ID is returned in responses only if tracing is forced using\n  // :ref:`x-envoy-force-trace <config_http_conn_man_headers_x-envoy-force-trace>` header.\n  bool always_set_request_id_in_response = 37;\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  ForwardClientCertDetails forward_client_cert_details = 16\n      [(validate.rules).enum = {defined_only: true}];\n\n  // This field is valid only when :ref:`forward_client_cert_details\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.forward_client_cert_details>`\n  // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in\n  // the client certificate to be forwarded. Note that in the\n  // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and\n  // *By* is always set when the client certificate presents the URI type Subject Alternative Name\n  // value.\n  SetCurrentClientCertDetails set_current_client_cert_details = 17;\n\n  // If proxy_100_continue is true, Envoy will proxy incoming \"Expect:\n  // 100-continue\" headers upstream, and forward \"100 Continue\" responses\n  // downstream. If this is false or not set, Envoy will instead strip the\n  // \"Expect: 100-continue\" header, and send a \"100 Continue\" response itself.\n  bool proxy_100_continue = 18;\n\n  // If\n  // :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address>`\n  // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is\n  // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*.\n  // This is useful for testing compatibility of upstream services that parse the header value. For\n  // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses\n  // <https://tools.ietf.org/html/rfc4291#section-2.5.5.2>`_ for details. This will also affect the\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See\n  // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6\n  // <config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6>` for runtime\n  // control.\n  // [#not-implemented-hide:]\n  bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20;\n\n  repeated UpgradeConfig upgrade_configs = 23;\n\n  // Should paths be normalized according to RFC 3986 before any processing of\n  // requests by HTTP filters or routing? This affects the upstream *:path* header\n  // as well. For paths that fail this check, Envoy will respond with 400 to\n  // paths that are malformed. This defaults to false currently but will default\n  // true in the future. When not specified, this value may be overridden by the\n  // runtime variable\n  // :ref:`http_connection_manager.normalize_path<config_http_conn_man_runtime_normalize_path>`.\n  // See `Normalization and Comparison <https://tools.ietf.org/html/rfc3986#section-6>`_\n  // for details of normalization.\n  // Note that Envoy does not perform\n  // `case normalization <https://tools.ietf.org/html/rfc3986#section-6.2.2.1>`_\n  google.protobuf.BoolValue normalize_path = 30;\n\n  // Determines if adjacent slashes in the path are merged into one before any processing of\n  // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without\n  // setting this option, incoming requests with path `//dir///file` will not match against route\n  // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of\n  // `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool merge_slashes = 33;\n\n  // The configuration of the request ID extension. This includes operations such as\n  // generation, validation, and associated tracing operations.\n  //\n  // If not set, Envoy uses the default UUID-based behavior:\n  //\n  // 1. Request ID is propagated using *x-request-id* header.\n  //\n  // 2. Request ID is a universally unique identifier (UUID).\n  //\n  // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID.\n  RequestIDExtension request_id_extension = 36;\n\n  // The configuration to customize local reply returned by Envoy. It can customize status code,\n  // body text and response content type. If not specified, status code and text body are hard\n  // coded in Envoy, the response content type is plain text.\n  LocalReplyConfig local_reply_config = 38;\n\n  // Determines if the port part should be removed from host/authority header before any processing\n  // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's<envoy_api_field_config.listener.v3.Listener.address>`\n  // local port and request method is not CONNECT. This affects the upstream host header as well.\n  // Without setting this option, incoming requests with host `example:443` will not match against\n  // route with :ref:`domains<envoy_api_field_config.route.v3.VirtualHost.domains>` match set to `example`. Defaults to `false`. Note that port removal is not part\n  // of `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool strip_matching_host_port = 39;\n\n  // Governs Envoy's behavior when receiving invalid HTTP from downstream.\n  // If this option is false (default), Envoy will err on the conservative side handling HTTP\n  // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request.\n  // If this option is set to true, Envoy will be more permissive, only resetting the invalid\n  // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire\n  // request is read for HTTP/1.1)\n  // In general this should be true for deployments receiving trusted traffic (L2 Envoys,\n  // company-internal mesh) and false when receiving untrusted traffic (edge deployments).\n  //\n  // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are\n  // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message>` or the new HTTP/2 option\n  // :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>`\n  // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.stream_error_on_invalid_http_messaging>`\n  google.protobuf.BoolValue stream_error_on_invalid_http_message = 40;\n\n  google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// The configuration to customize local reply returned by Envoy.\nmessage LocalReplyConfig {\n  // Configuration of list of mappers which allows to filter and change local response.\n  // The mappers will be checked by the specified order until one is matched.\n  repeated ResponseMapper mappers = 1;\n\n  // The configuration to form response body from the :ref:`command operators <config_access_log_command_operators>`\n  // and to specify response content type as one of: plain/text or application/json.\n  //\n  // Example one: \"plain/text\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n  //\n  // The following response body in \"plain/text\" format will be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: text\n  //\n  //   upstream connect error:503:path=/foo\n  //\n  // Example two: \"application/json\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   json_format:\n  //     status: \"%RESPONSE_CODE%\"\n  //     message: \"%LOCAL_REPLY_BODY%\"\n  //     path: \"%REQ(:path)%\"\n  //\n  // The following response body in \"application/json\" format would be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"status\": 503,\n  //    \"message\": \"upstream connection error\",\n  //    \"path\": \"/foo\"\n  //  }\n  //\n  config.core.v3.SubstitutionFormatString body_format = 2;\n}\n\n// The configuration to filter and change local response.\n// [#next-free-field: 6]\nmessage ResponseMapper {\n  // Filter to determine if this mapper should apply.\n  config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}];\n\n  // The new response status code if specified.\n  google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n  // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%`\n  // command operator in the `body_format`.\n  config.core.v3.DataSource body = 3;\n\n  // A per mapper `body_format` to override the :ref:`body_format <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format>`.\n  // It will be used when this mapper is matched.\n  config.core.v3.SubstitutionFormatString body_format_override = 4;\n\n  // HTTP headers to add to a local reply. This allows the response mapper to append, to add\n  // or to override headers of any local reply before it is sent to a downstream client.\n  repeated config.core.v3.HeaderValueOption headers_to_add = 5\n      [(validate.rules).repeated = {max_items: 1000}];\n}\n\nmessage Rds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.Rds\";\n\n  // Configuration source specifier for RDS.\n  config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n  // The name of the route configuration. This name will be passed to the RDS\n  // API. This allows an Envoy configuration with multiple HTTP listeners (and\n  // associated HTTP connection manager filters) to use different route\n  // configurations.\n  string route_config_name = 2\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // Resource locator for RDS. This is mutually exclusive to *route_config_name*.\n  // [#not-implemented-hide:]\n  udpa.core.v1.ResourceLocator rds_resource_locator = 3\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n}\n\n// This message is used to work around the limitations with 'oneof' and repeated fields.\nmessage ScopedRouteConfigurationsList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.ScopedRouteConfigurationsList\";\n\n  repeated config.route.v3.ScopedRouteConfiguration scoped_route_configurations = 1\n      [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#next-free-field: 6]\nmessage ScopedRoutes {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes\";\n\n  // Specifies the mechanism for constructing \"scope keys\" based on HTTP request attributes. These\n  // keys are matched against a set of :ref:`Key<envoy_api_msg_config.route.v3.ScopedRouteConfiguration.Key>`\n  // objects assembled from :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v3.ScopedRouteConfiguration>`\n  // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via\n  // :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list>`.\n  //\n  // Upon receiving a request's headers, the Router will build a key using the algorithm specified\n  // by this message. This key will be used to look up the routing table (i.e., the\n  // :ref:`RouteConfiguration<envoy_api_msg_config.route.v3.RouteConfiguration>`) to use for the request.\n  message ScopeKeyBuilder {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder\";\n\n    // Specifies the mechanism for constructing key fragments which are composed into scope keys.\n    message FragmentBuilder {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder.\"\n          \"FragmentBuilder\";\n\n      // Specifies how the value of a header should be extracted.\n      // The following example maps the structure of a header to the fields in this message.\n      //\n      // .. code::\n      //\n      //              <0> <1>   <-- index\n      //    X-Header: a=b;c=d\n      //    |         || |\n      //    |         || \\----> <element_separator>\n      //    |         ||\n      //    |         |\\----> <element.separator>\n      //    |         |\n      //    |         \\----> <element.key>\n      //    |\n      //    \\----> <name>\n      //\n      //    Each 'a=b' key-value pair constitutes an 'element' of the header field.\n      message HeaderValueExtractor {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder.\"\n            \"FragmentBuilder.HeaderValueExtractor\";\n\n        // Specifies a header field's key value pair to match on.\n        message KvElement {\n          option (udpa.annotations.versioning).previous_message_type =\n              \"envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder.\"\n              \"FragmentBuilder.HeaderValueExtractor.KvElement\";\n\n          // The separator between key and value (e.g., '=' separates 'k=v;...').\n          // If an element is an empty string, the element is ignored.\n          // If an element contains no separator, the whole element is parsed as key and the\n          // fragment value is an empty string.\n          // If there are multiple values for a matched key, the first value is returned.\n          string separator = 1 [(validate.rules).string = {min_len: 1}];\n\n          // The key to match on.\n          string key = 2 [(validate.rules).string = {min_len: 1}];\n        }\n\n        // The name of the header field to extract the value from.\n        string name = 1 [(validate.rules).string = {min_len: 1}];\n\n        // The element separator (e.g., ';' separates 'a;b;c;d').\n        // Default: empty string. This causes the entirety of the header field to be extracted.\n        // If this field is set to an empty string and 'index' is used in the oneof below, 'index'\n        // must be set to 0.\n        string element_separator = 2;\n\n        oneof extract_type {\n          // Specifies the zero based index of the element to extract.\n          // Note Envoy concatenates multiple values of the same header key into a comma separated\n          // string, the splitting always happens after the concatenation.\n          uint32 index = 3;\n\n          // Specifies the key value pair to extract the value from.\n          KvElement element = 4;\n        }\n      }\n\n      oneof type {\n        option (validate.required) = true;\n\n        // Specifies how a header field's value should be extracted.\n        HeaderValueExtractor header_value_extractor = 1;\n      }\n    }\n\n    // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the\n    // fragments of a :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v3.ScopedRouteConfiguration>`.\n    // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key.\n    repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the scoped routing configuration.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The algorithm to use for constructing a scope key for each request.\n  ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];\n\n  // Configuration source specifier for RDS.\n  // This config source is used to subscribe to RouteConfiguration resources specified in\n  // ScopedRouteConfiguration messages.\n  config.core.v3.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}];\n\n  oneof config_specifier {\n    option (validate.required) = true;\n\n    // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by\n    // matching a key constructed from the request's attributes according to the algorithm specified\n    // by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRouteConfigurationsList scoped_route_configurations_list = 4;\n\n    // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS\n    // API. A scope is assigned to a request by matching a key constructed from the request's\n    // attributes according to the algorithm specified by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRds scoped_rds = 5;\n  }\n}\n\nmessage ScopedRds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.ScopedRds\";\n\n  // Configuration source specifier for scoped RDS.\n  config.core.v3.ConfigSource scoped_rds_config_source = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// [#next-free-field: 6]\nmessage HttpFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.HttpFilter\";\n\n  reserved 3;\n\n  // The name of the filter configuration. The name is used as a fallback to\n  // select an extension if the type of the configuration proto is not\n  // sufficient. It also serves as a resource name in ExtensionConfigDS.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof config_type {\n    // Filter specific configuration which depends on the filter being instantiated. See the supported\n    // filters for further documentation.\n    google.protobuf.Any typed_config = 4;\n\n    // Configuration source specifier for an extension configuration discovery service.\n    // In case of a failure and without the default configuration, the HTTP listener responds with code 500.\n    // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061).\n    config.core.v3.ExtensionConfigSource config_discovery = 5;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n\nmessage RequestIDExtension {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.http_connection_manager.v2.RequestIDExtension\";\n\n  // Request ID extension specific configuration.\n  google.protobuf.Any typed_config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/config/trace/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/http_connection_manager/v3:pkg\",\n        \"//envoy/type/tracing/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.http_connection_manager.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/config/core/v4alpha/protocol.proto\";\nimport \"envoy/config/core/v4alpha/substitution_format_string.proto\";\nimport \"envoy/config/route/v4alpha/route.proto\";\nimport \"envoy/config/route/v4alpha/scoped_route.proto\";\nimport \"envoy/config/trace/v4alpha/http_tracer.proto\";\nimport \"envoy/type/tracing/v3/custom_tag.proto\";\nimport \"envoy/type/v3/percent.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/security.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v4alpha\";\noption java_outer_classname = \"HttpConnectionManagerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: HTTP connection manager]\n// HTTP connection manager :ref:`configuration overview <config_http_conn_man>`.\n// [#extension: envoy.filters.network.http_connection_manager]\n\n// [#next-free-field: 41]\nmessage HttpConnectionManager {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\";\n\n  enum CodecType {\n    // For every new connection, the connection manager will determine which\n    // codec to use. This mode supports both ALPN for TLS listeners as well as\n    // protocol inference for plaintext listeners. If ALPN data is available, it\n    // is preferred, otherwise protocol inference is used. In almost all cases,\n    // this is the right option to choose for this setting.\n    AUTO = 0;\n\n    // The connection manager will assume that the client is speaking HTTP/1.1.\n    HTTP1 = 1;\n\n    // The connection manager will assume that the client is speaking HTTP/2\n    // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN.\n    // Prior knowledge is allowed).\n    HTTP2 = 2;\n\n    // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n    // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n    // to distinguish HTTP1 and HTTP2 traffic.\n    HTTP3 = 3;\n  }\n\n  enum ServerHeaderTransformation {\n    // Overwrite any Server header with the contents of server_name.\n    OVERWRITE = 0;\n\n    // If no Server header is present, append Server server_name\n    // If a Server header is present, pass it through.\n    APPEND_IF_ABSENT = 1;\n\n    // Pass through the value of the server header, and do not append a header\n    // if none is present.\n    PASS_THROUGH = 2;\n  }\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  enum ForwardClientCertDetails {\n    // Do not send the XFCC header to the next hop. This is the default value.\n    SANITIZE = 0;\n\n    // When the client connection is mTLS (Mutual TLS), forward the XFCC header\n    // in the request.\n    FORWARD_ONLY = 1;\n\n    // When the client connection is mTLS, append the client certificate\n    // information to the request’s XFCC header and forward it.\n    APPEND_FORWARD = 2;\n\n    // When the client connection is mTLS, reset the XFCC header with the client\n    // certificate information and send it to the next hop.\n    SANITIZE_SET = 3;\n\n    // Always forward the XFCC header in the request, regardless of whether the\n    // client connection is mTLS.\n    ALWAYS_FORWARD_ONLY = 4;\n  }\n\n  // [#next-free-field: 10]\n  message Tracing {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing\";\n\n    enum OperationName {\n      // The HTTP listener is used for ingress/incoming requests.\n      INGRESS = 0;\n\n      // The HTTP listener is used for egress/outgoing requests.\n      EGRESS = 1;\n    }\n\n    reserved 1, 2;\n\n    reserved \"operation_name\", \"request_headers_for_tags\";\n\n    // Target percentage of requests managed by this HTTP connection manager that will be force\n    // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`\n    // header is set. This field is a direct analog for the runtime variable\n    // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager\n    // <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent client_sampling = 3;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be randomly\n    // selected for trace generation, if not requested by the client or not forced. This field is\n    // a direct analog for the runtime variable 'tracing.random_sampling' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent random_sampling = 4;\n\n    // Target percentage of requests managed by this HTTP connection manager that will be traced\n    // after all other sampling checks have been applied (client-directed, force tracing, random\n    // sampling). This field functions as an upper limit on the total configured sampling rate. For\n    // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1%\n    // of client requests with the appropriate headers to be force traced. This field is a direct\n    // analog for the runtime variable 'tracing.global_enabled' in the\n    // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.\n    // Default: 100%\n    type.v3.Percent overall_sampling = 5;\n\n    // Whether to annotate spans with additional data. If true, spans will include logs for stream\n    // events.\n    bool verbose = 6;\n\n    // Maximum length of the request path to extract and include in the HttpUrl tag. Used to\n    // truncate lengthy request paths to meet the needs of a tracing backend.\n    // Default: 256\n    google.protobuf.UInt32Value max_path_tag_length = 7;\n\n    // A list of custom tags with unique tag name to create tags for the active span.\n    repeated type.tracing.v3.CustomTag custom_tags = 8;\n\n    // Configuration for an external tracing provider.\n    // If not specified, no tracing will be performed.\n    //\n    // .. attention::\n    //   Please be aware that *envoy.tracers.opencensus* provider can only be configured once\n    //   in Envoy lifetime.\n    //   Any attempts to reconfigure it or to use different configurations for different HCM filters\n    //   will be rejected.\n    //   Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes\n    //   on OpenCensus side.\n    config.trace.v4alpha.Tracing.Http provider = 9;\n  }\n\n  message InternalAddressConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.\"\n        \"InternalAddressConfig\";\n\n    // Whether unix socket addresses should be considered internal.\n    bool unix_sockets = 1;\n  }\n\n  // [#next-free-field: 7]\n  message SetCurrentClientCertDetails {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.\"\n        \"SetCurrentClientCertDetails\";\n\n    reserved 2;\n\n    // Whether to forward the subject of the client cert. Defaults to false.\n    google.protobuf.BoolValue subject = 1;\n\n    // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the\n    // XFCC header comma separated from other values with the value Cert=\"PEM\".\n    // Defaults to false.\n    bool cert = 3;\n\n    // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM\n    // format. This will appear in the XFCC header comma separated from other values with the value\n    // Chain=\"PEM\".\n    // Defaults to false.\n    bool chain = 6;\n\n    // Whether to forward the DNS type Subject Alternative Names of the client cert.\n    // Defaults to false.\n    bool dns = 4;\n\n    // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to\n    // false.\n    bool uri = 5;\n  }\n\n  // The configuration for HTTP upgrades.\n  // For each upgrade type desired, an UpgradeConfig must be added.\n  //\n  // .. warning::\n  //\n  //    The current implementation of upgrade headers does not handle\n  //    multi-valued upgrade headers. Support for multi-valued headers may be\n  //    added in the future if needed.\n  //\n  // .. warning::\n  //    The current implementation of upgrade headers does not work with HTTP/2\n  //    upstreams.\n  message UpgradeConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.\"\n        \"UpgradeConfig\";\n\n    // The case-insensitive name of this upgrade, e.g. \"websocket\".\n    // For each upgrade type present in upgrade_configs, requests with\n    // Upgrade: [upgrade_type]\n    // will be proxied upstream.\n    string upgrade_type = 1;\n\n    // If present, this represents the filter chain which will be created for\n    // this type of upgrade. If no filters are present, the filter chain for\n    // HTTP connections will be used for this upgrade type.\n    repeated HttpFilter filters = 2;\n\n    // Determines if upgrades are enabled or disabled by default. Defaults to true.\n    // This can be overridden on a per-route basis with :ref:`cluster\n    // <envoy_api_field_config.route.v4alpha.RouteAction.upgrade_configs>` as documented in the\n    // :ref:`upgrade documentation <arch_overview_upgrades>`.\n    google.protobuf.BoolValue enabled = 3;\n  }\n\n  reserved 27, 11;\n\n  reserved \"idle_timeout\";\n\n  // Supplies the type of codec that the connection manager should use.\n  CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics for the\n  // connection manager. See the :ref:`statistics documentation <config_http_conn_man_stats>` for\n  // more information.\n  string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The connection manager’s route table will be dynamically loaded via the RDS API.\n    Rds rds = 3;\n\n    // The route table for the connection manager is static and is specified in this property.\n    config.route.v4alpha.RouteConfiguration route_config = 4;\n\n    // A route table will be dynamically assigned to each request based on request attributes\n    // (e.g., the value of a header). The \"routing scopes\" (i.e., route tables) and \"scope keys\" are\n    // specified in this message.\n    ScopedRoutes scoped_routes = 31;\n  }\n\n  // A list of individual HTTP filters that make up the filter chain for\n  // requests made to the connection manager. :ref:`Order matters <arch_overview_http_filters_ordering>`\n  // as the filters are processed sequentially as request events happen.\n  repeated HttpFilter http_filters = 5;\n\n  // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent`\n  // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked\n  // documentation for more information. Defaults to false.\n  google.protobuf.BoolValue add_user_agent = 6;\n\n  // Presence of the object defines whether the connection manager\n  // emits :ref:`tracing <arch_overview_tracing>` data to the :ref:`configured tracing provider\n  // <envoy_api_msg_config.trace.v4alpha.Tracing>`.\n  Tracing tracing = 7;\n\n  // Additional settings for HTTP requests handled by the connection manager. These will be\n  // applicable to both HTTP1 and HTTP2 requests.\n  config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // Additional HTTP/1 settings that are passed to the HTTP/1 codec.\n  config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8;\n\n  // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec.\n  config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // An optional override that the connection manager will write to the server\n  // header in responses. If not set, the default is *envoy*.\n  string server_name = 10\n      [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];\n\n  // Defines the action to be applied to the Server header on the response path.\n  // By default, Envoy will overwrite the header with the value specified in\n  // server_name.\n  ServerHeaderTransformation server_header_transformation = 34\n      [(validate.rules).enum = {defined_only: true}];\n\n  // The maximum request headers size for incoming connections.\n  // If unconfigured, the default max request headers allowed is 60 KiB.\n  // Requests that exceed this limit will receive a 431 response.\n  // The max configurable limit is 96 KiB, based on current implementation\n  // constraints.\n  google.protobuf.UInt32Value max_request_headers_kb = 29\n      [(validate.rules).uint32 = {lte: 96 gt: 0}];\n\n  // The stream idle timeout for connections managed by the connection manager.\n  // If not specified, this defaults to 5 minutes. The default value was selected\n  // so as not to interfere with any smaller configured timeouts that may have\n  // existed in configurations prior to the introduction of this feature, while\n  // introducing robustness to TCP connections that terminate without a FIN.\n  //\n  // This idle timeout applies to new streams and is overridable by the\n  // :ref:`route-level idle_timeout\n  // <envoy_api_field_config.route.v4alpha.RouteAction.idle_timeout>`. Even on a stream in\n  // which the override applies, prior to receipt of the initial request\n  // headers, the :ref:`stream_idle_timeout\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.stream_idle_timeout>`\n  // applies. Each time an encode/decode event for headers or data is processed\n  // for the stream, the timer will be reset. If the timeout fires, the stream\n  // is terminated with a 408 Request Timeout error code if no upstream response\n  // header has been received, otherwise a stream reset occurs.\n  //\n  // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough\n  // window to write any remaining stream data once the entirety of stream data (local end stream is\n  // true) has been buffered pending available window. In other words, this timeout defends against\n  // a peer that does not release enough window to completely write the stream, even though all\n  // data has been proxied within available flow control windows. If the timeout is hit in this\n  // case, the :ref:`tx_flush_timeout <config_http_conn_man_stats_per_codec>` counter will be\n  // incremented. Note that :ref:`max_stream_duration\n  // <envoy_api_field_config.core.v4alpha.HttpProtocolOptions.max_stream_duration>` does not apply to\n  // this corner case.\n  //\n  // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due\n  // to the granularity of events presented to the connection manager. For example, while receiving\n  // very large request headers, it may be the case that there is traffic regularly arriving on the\n  // wire while the connection manage is only able to observe the end-of-headers event, hence the\n  // stream may still idle timeout.\n  //\n  // A value of 0 will completely disable the connection manager stream idle\n  // timeout, although per-route idle timeout overrides will continue to apply.\n  google.protobuf.Duration stream_idle_timeout = 24\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The amount of time that Envoy will wait for the entire request to be received.\n  // The timer is activated when the request is initiated, and is disarmed when the last byte of the\n  // request is sent upstream (i.e. all decoding filters have processed the request), OR when the\n  // response is initiated. If not specified or set to 0, this timeout is disabled.\n  google.protobuf.Duration request_timeout = 28\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The time that Envoy will wait between sending an HTTP/2 “shutdown\n  // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame.\n  // This is used so that Envoy provides a grace period for new streams that\n  // race with the final GOAWAY frame. During this grace period, Envoy will\n  // continue to accept new streams. After the grace period, a final GOAWAY\n  // frame is sent and Envoy will start refusing new streams. Draining occurs\n  // both when a connection hits the idle timeout or during general server\n  // draining. The default grace period is 5000 milliseconds (5 seconds) if this\n  // option is not specified.\n  google.protobuf.Duration drain_timeout = 12;\n\n  // The delayed close timeout is for downstream connections managed by the HTTP connection manager.\n  // It is defined as a grace period after connection close processing has been locally initiated\n  // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy\n  // from the downstream connection) prior to Envoy closing the socket associated with that\n  // connection.\n  // NOTE: This timeout is enforced even when the socket associated with the downstream connection\n  // is pending a flush of the write buffer. However, any progress made writing data to the socket\n  // will restart the timer associated with this timeout. This means that the total grace period for\n  // a socket in this state will be\n  // <total_time_waiting_for_write_buffer_flushes>+<delayed_close_timeout>.\n  //\n  // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close\n  // sequence mitigates a race condition that exists when downstream clients do not drain/process\n  // data in a connection's receive buffer after a remote close has been detected via a socket\n  // write(). This race leads to such clients failing to process the response code sent by Envoy,\n  // which could result in erroneous downstream processing.\n  //\n  // If the timeout triggers, Envoy will close the connection's socket.\n  //\n  // The default timeout is 1000 ms if this option is not specified.\n  //\n  // .. NOTE::\n  //    To be useful in avoiding the race condition described above, this timeout must be set\n  //    to *at least* <max round trip time expected between clients and Envoy>+<100ms to account for\n  //    a reasonable \"worst\" case processing time for a full iteration of Envoy's event loop>.\n  //\n  // .. WARNING::\n  //    A value of 0 will completely disable delayed close processing. When disabled, the downstream\n  //    connection's socket will be closed immediately after the write flush is completed or will\n  //    never close if the write flush does not complete.\n  google.protobuf.Duration delayed_close_timeout = 26;\n\n  // Configuration for :ref:`HTTP access logs <arch_overview_access_logs>`\n  // emitted by the connection manager.\n  repeated config.accesslog.v4alpha.AccessLog access_log = 13;\n\n  // If set to true, the connection manager will use the real remote address\n  // of the client connection when determining internal versus external origin and manipulating\n  // various headers. If set to false or absent, the connection manager will use the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for`,\n  // :ref:`config_http_conn_man_headers_x-envoy-internal`, and\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information.\n  google.protobuf.BoolValue use_remote_address = 14\n      [(udpa.annotations.security).configure_for_untrusted_downstream = true];\n\n  // The number of additional ingress proxy hops from the right side of the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when\n  // determining the origin client's IP address. The default is zero if this option\n  // is not specified. See the documentation for\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information.\n  uint32 xff_num_trusted_hops = 19;\n\n  // Configures what network addresses are considered internal for stats and header sanitation\n  // purposes. If unspecified, only RFC1918 IP addresses will be considered internal.\n  // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more\n  // information about internal/external addresses.\n  InternalAddressConfig internal_address_config = 25;\n\n  // If set, Envoy will not append the remote address to the\n  // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in\n  // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager\n  // has mutated the request headers. While :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.use_remote_address>`\n  // will also suppress XFF addition, it has consequences for logging and other\n  // Envoy uses of the remote address, so *skip_xff_append* should be used\n  // when only an elision of XFF addition is intended.\n  bool skip_xff_append = 21;\n\n  // Via header value to append to request and response headers. If this is\n  // empty, no via header will be appended.\n  string via = 22;\n\n  // Whether the connection manager will generate the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to\n  // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature\n  // is not desired it can be disabled.\n  google.protobuf.BoolValue generate_request_id = 15;\n\n  // Whether the connection manager will keep the :ref:`x-request-id\n  // <config_http_conn_man_headers_x-request-id>` header if passed for a request that is edge\n  // (Edge request is the request from external clients to front Envoy) and not reset it, which\n  // is the current Envoy behaviour. This defaults to false.\n  bool preserve_external_request_id = 32;\n\n  // If set, Envoy will always set :ref:`x-request-id <config_http_conn_man_headers_x-request-id>` header in response.\n  // If this is false or not set, the request ID is returned in responses only if tracing is forced using\n  // :ref:`x-envoy-force-trace <config_http_conn_man_headers_x-envoy-force-trace>` header.\n  bool always_set_request_id_in_response = 37;\n\n  // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP\n  // header.\n  ForwardClientCertDetails forward_client_cert_details = 16\n      [(validate.rules).enum = {defined_only: true}];\n\n  // This field is valid only when :ref:`forward_client_cert_details\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.forward_client_cert_details>`\n  // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in\n  // the client certificate to be forwarded. Note that in the\n  // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and\n  // *By* is always set when the client certificate presents the URI type Subject Alternative Name\n  // value.\n  SetCurrentClientCertDetails set_current_client_cert_details = 17;\n\n  // If proxy_100_continue is true, Envoy will proxy incoming \"Expect:\n  // 100-continue\" headers upstream, and forward \"100 Continue\" responses\n  // downstream. If this is false or not set, Envoy will instead strip the\n  // \"Expect: 100-continue\" header, and send a \"100 Continue\" response itself.\n  bool proxy_100_continue = 18;\n\n  // If\n  // :ref:`use_remote_address\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.use_remote_address>`\n  // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is\n  // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*.\n  // This is useful for testing compatibility of upstream services that parse the header value. For\n  // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses\n  // <https://tools.ietf.org/html/rfc4291#section-2.5.5.2>`_ for details. This will also affect the\n  // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See\n  // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6\n  // <config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6>` for runtime\n  // control.\n  // [#not-implemented-hide:]\n  bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20;\n\n  repeated UpgradeConfig upgrade_configs = 23;\n\n  // Should paths be normalized according to RFC 3986 before any processing of\n  // requests by HTTP filters or routing? This affects the upstream *:path* header\n  // as well. For paths that fail this check, Envoy will respond with 400 to\n  // paths that are malformed. This defaults to false currently but will default\n  // true in the future. When not specified, this value may be overridden by the\n  // runtime variable\n  // :ref:`http_connection_manager.normalize_path<config_http_conn_man_runtime_normalize_path>`.\n  // See `Normalization and Comparison <https://tools.ietf.org/html/rfc3986#section-6>`_\n  // for details of normalization.\n  // Note that Envoy does not perform\n  // `case normalization <https://tools.ietf.org/html/rfc3986#section-6.2.2.1>`_\n  google.protobuf.BoolValue normalize_path = 30;\n\n  // Determines if adjacent slashes in the path are merged into one before any processing of\n  // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without\n  // setting this option, incoming requests with path `//dir///file` will not match against route\n  // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of\n  // `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool merge_slashes = 33;\n\n  // The configuration of the request ID extension. This includes operations such as\n  // generation, validation, and associated tracing operations.\n  //\n  // If not set, Envoy uses the default UUID-based behavior:\n  //\n  // 1. Request ID is propagated using *x-request-id* header.\n  //\n  // 2. Request ID is a universally unique identifier (UUID).\n  //\n  // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID.\n  RequestIDExtension request_id_extension = 36;\n\n  // The configuration to customize local reply returned by Envoy. It can customize status code,\n  // body text and response content type. If not specified, status code and text body are hard\n  // coded in Envoy, the response content type is plain text.\n  LocalReplyConfig local_reply_config = 38;\n\n  // Determines if the port part should be removed from host/authority header before any processing\n  // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's<envoy_api_field_config.listener.v4alpha.Listener.address>`\n  // local port and request method is not CONNECT. This affects the upstream host header as well.\n  // Without setting this option, incoming requests with host `example:443` will not match against\n  // route with :ref:`domains<envoy_api_field_config.route.v4alpha.VirtualHost.domains>` match set to `example`. Defaults to `false`. Note that port removal is not part\n  // of `HTTP spec <https://tools.ietf.org/html/rfc3986>`_ and is provided for convenience.\n  bool strip_matching_host_port = 39;\n\n  // Governs Envoy's behavior when receiving invalid HTTP from downstream.\n  // If this option is false (default), Envoy will err on the conservative side handling HTTP\n  // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request.\n  // If this option is set to true, Envoy will be more permissive, only resetting the invalid\n  // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire\n  // request is read for HTTP/1.1)\n  // In general this should be true for deployments receiving trusted traffic (L2 Envoys,\n  // company-internal mesh) and false when receiving untrusted traffic (edge deployments).\n  //\n  // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are\n  // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message>` or the new HTTP/2 option\n  // :ref:`override_stream_error_on_invalid_http_message\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message>`\n  // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging\n  // <envoy_v3_api_field_config.core.v3.Http2ProtocolOptions.stream_error_on_invalid_http_messaging>`\n  google.protobuf.BoolValue stream_error_on_invalid_http_message = 40;\n}\n\n// The configuration to customize local reply returned by Envoy.\nmessage LocalReplyConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig\";\n\n  // Configuration of list of mappers which allows to filter and change local response.\n  // The mappers will be checked by the specified order until one is matched.\n  repeated ResponseMapper mappers = 1;\n\n  // The configuration to form response body from the :ref:`command operators <config_access_log_command_operators>`\n  // and to specify response content type as one of: plain/text or application/json.\n  //\n  // Example one: \"plain/text\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   text_format: \"%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\\n\"\n  //\n  // The following response body in \"plain/text\" format will be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: text\n  //\n  //   upstream connect error:503:path=/foo\n  //\n  // Example two: \"application/json\" ``body_format``.\n  //\n  // .. validated-code-block:: yaml\n  //   :type-name: envoy.config.core.v3.SubstitutionFormatString\n  //\n  //   json_format:\n  //     status: \"%RESPONSE_CODE%\"\n  //     message: \"%LOCAL_REPLY_BODY%\"\n  //     path: \"%REQ(:path)%\"\n  //\n  // The following response body in \"application/json\" format would be generated for a request with\n  // local reply body of \"upstream connection error\", response_code=503 and path=/foo.\n  //\n  // .. code-block:: json\n  //\n  //  {\n  //    \"status\": 503,\n  //    \"message\": \"upstream connection error\",\n  //    \"path\": \"/foo\"\n  //  }\n  //\n  config.core.v4alpha.SubstitutionFormatString body_format = 2;\n}\n\n// The configuration to filter and change local response.\n// [#next-free-field: 6]\nmessage ResponseMapper {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper\";\n\n  // Filter to determine if this mapper should apply.\n  config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}];\n\n  // The new response status code if specified.\n  google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n\n  // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%`\n  // command operator in the `body_format`.\n  config.core.v4alpha.DataSource body = 3;\n\n  // A per mapper `body_format` to override the :ref:`body_format <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format>`.\n  // It will be used when this mapper is matched.\n  config.core.v4alpha.SubstitutionFormatString body_format_override = 4;\n\n  // HTTP headers to add to a local reply. This allows the response mapper to append, to add\n  // or to override headers of any local reply before it is sent to a downstream client.\n  repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5\n      [(validate.rules).repeated = {max_items: 1000}];\n}\n\nmessage Rds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.Rds\";\n\n  // Configuration source specifier for RDS.\n  config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];\n\n  oneof name_specifier {\n    // The name of the route configuration. This name will be passed to the RDS\n    // API. This allows an Envoy configuration with multiple HTTP listeners (and\n    // associated HTTP connection manager filters) to use different route\n    // configurations.\n    string route_config_name = 2;\n\n    // Resource locator for RDS. This is mutually exclusive to *route_config_name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator rds_resource_locator = 3;\n  }\n}\n\n// This message is used to work around the limitations with 'oneof' and repeated fields.\nmessage ScopedRouteConfigurationsList {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList\";\n\n  repeated config.route.v4alpha.ScopedRouteConfiguration scoped_route_configurations = 1\n      [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#next-free-field: 6]\nmessage ScopedRoutes {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes\";\n\n  // Specifies the mechanism for constructing \"scope keys\" based on HTTP request attributes. These\n  // keys are matched against a set of :ref:`Key<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration.Key>`\n  // objects assembled from :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration>`\n  // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via\n  // :ref:`scoped_route_configurations_list<envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.scoped_route_configurations_list>`.\n  //\n  // Upon receiving a request's headers, the Router will build a key using the algorithm specified\n  // by this message. This key will be used to look up the routing table (i.e., the\n  // :ref:`RouteConfiguration<envoy_api_msg_config.route.v4alpha.RouteConfiguration>`) to use for the request.\n  message ScopeKeyBuilder {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder\";\n\n    // Specifies the mechanism for constructing key fragments which are composed into scope keys.\n    message FragmentBuilder {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.\"\n          \"ScopeKeyBuilder.FragmentBuilder\";\n\n      // Specifies how the value of a header should be extracted.\n      // The following example maps the structure of a header to the fields in this message.\n      //\n      // .. code::\n      //\n      //              <0> <1>   <-- index\n      //    X-Header: a=b;c=d\n      //    |         || |\n      //    |         || \\----> <element_separator>\n      //    |         ||\n      //    |         |\\----> <element.separator>\n      //    |         |\n      //    |         \\----> <element.key>\n      //    |\n      //    \\----> <name>\n      //\n      //    Each 'a=b' key-value pair constitutes an 'element' of the header field.\n      message HeaderValueExtractor {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.\"\n            \"ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor\";\n\n        // Specifies a header field's key value pair to match on.\n        message KvElement {\n          option (udpa.annotations.versioning).previous_message_type =\n              \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.\"\n              \"ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement\";\n\n          // The separator between key and value (e.g., '=' separates 'k=v;...').\n          // If an element is an empty string, the element is ignored.\n          // If an element contains no separator, the whole element is parsed as key and the\n          // fragment value is an empty string.\n          // If there are multiple values for a matched key, the first value is returned.\n          string separator = 1 [(validate.rules).string = {min_len: 1}];\n\n          // The key to match on.\n          string key = 2 [(validate.rules).string = {min_len: 1}];\n        }\n\n        // The name of the header field to extract the value from.\n        string name = 1 [(validate.rules).string = {min_len: 1}];\n\n        // The element separator (e.g., ';' separates 'a;b;c;d').\n        // Default: empty string. This causes the entirety of the header field to be extracted.\n        // If this field is set to an empty string and 'index' is used in the oneof below, 'index'\n        // must be set to 0.\n        string element_separator = 2;\n\n        oneof extract_type {\n          // Specifies the zero based index of the element to extract.\n          // Note Envoy concatenates multiple values of the same header key into a comma separated\n          // string, the splitting always happens after the concatenation.\n          uint32 index = 3;\n\n          // Specifies the key value pair to extract the value from.\n          KvElement element = 4;\n        }\n      }\n\n      oneof type {\n        option (validate.required) = true;\n\n        // Specifies how a header field's value should be extracted.\n        HeaderValueExtractor header_value_extractor = 1;\n      }\n    }\n\n    // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the\n    // fragments of a :ref:`ScopedRouteConfiguration<envoy_api_msg_config.route.v4alpha.ScopedRouteConfiguration>`.\n    // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key.\n    repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The name assigned to the scoped routing configuration.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The algorithm to use for constructing a scope key for each request.\n  ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];\n\n  // Configuration source specifier for RDS.\n  // This config source is used to subscribe to RouteConfiguration resources specified in\n  // ScopedRouteConfiguration messages.\n  config.core.v4alpha.ConfigSource rds_config_source = 3\n      [(validate.rules).message = {required: true}];\n\n  oneof config_specifier {\n    option (validate.required) = true;\n\n    // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by\n    // matching a key constructed from the request's attributes according to the algorithm specified\n    // by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRouteConfigurationsList scoped_route_configurations_list = 4;\n\n    // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS\n    // API. A scope is assigned to a request by matching a key constructed from the request's\n    // attributes according to the algorithm specified by the\n    // :ref:`ScopeKeyBuilder<envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.ScopedRoutes.ScopeKeyBuilder>`\n    // in this message.\n    ScopedRds scoped_rds = 5;\n  }\n}\n\nmessage ScopedRds {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds\";\n\n  // Configuration source specifier for scoped RDS.\n  config.core.v4alpha.ConfigSource scoped_rds_config_source = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// [#next-free-field: 6]\nmessage HttpFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter\";\n\n  reserved 3, 2;\n\n  reserved \"config\";\n\n  // The name of the filter configuration. The name is used as a fallback to\n  // select an extension if the type of the configuration proto is not\n  // sufficient. It also serves as a resource name in ExtensionConfigDS.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof config_type {\n    // Filter specific configuration which depends on the filter being instantiated. See the supported\n    // filters for further documentation.\n    google.protobuf.Any typed_config = 4;\n\n    // Configuration source specifier for an extension configuration discovery service.\n    // In case of a failure and without the default configuration, the HTTP listener responds with code 500.\n    // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061).\n    config.core.v4alpha.ExtensionConfigSource config_discovery = 5;\n  }\n}\n\nmessage RequestIDExtension {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension\";\n\n  // Request ID extension specific configuration.\n  google.protobuf.Any typed_config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/kafka_broker/v2alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.kafka_broker.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.kafka_broker.v3\";\noption java_outer_classname = \"KafkaBrokerProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Kafka Broker]\n// Kafka Broker :ref:`configuration overview <config_network_filters_kafka_broker>`.\n// [#extension: envoy.filters.network.kafka_broker]\n\nmessage KafkaBroker {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker\";\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_kafka_broker_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/local_rate_limit/v2alpha:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.local_ratelimit.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/token_bucket.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.local_ratelimit.v3\";\noption java_outer_classname = \"LocalRateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Local rate limit]\n// Local rate limit :ref:`configuration overview <config_network_filters_local_rate_limit>`.\n// [#extension: envoy.filters.network.local_ratelimit]\n\nmessage LocalRateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.local_rate_limit.v2alpha.LocalRateLimit\";\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_local_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The token bucket configuration to use for rate limiting connections that are processed by the\n  // filter's filter chain. Each incoming connection processed by the filter consumes a single\n  // token. If the token is available, the connection will be allowed. If no tokens are available,\n  // the connection will be immediately closed.\n  //\n  // .. note::\n  //   In the current implementation each filter and filter chain has an independent rate limit.\n  //\n  // .. note::\n  //   In the current implementation the token bucket's :ref:`fill_interval\n  //   <envoy_api_field_type.v3.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive\n  //   refills.\n  type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}];\n\n  // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults\n  // to enabled.\n  config.core.v3.RuntimeFeatureFlag runtime_enabled = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/mongo_proxy/v2:pkg\",\n        \"//envoy/extensions/filters/common/fault/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.mongo_proxy.v3;\n\nimport \"envoy/extensions/filters/common/fault/v3/fault.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.mongo_proxy.v3\";\noption java_outer_classname = \"MongoProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Mongo proxy]\n// MongoDB :ref:`configuration overview <config_network_filters_mongo_proxy>`.\n// [#extension: envoy.filters.network.mongo_proxy]\n\nmessage MongoProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.mongo_proxy.v2.MongoProxy\";\n\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mongo_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The optional path to use for writing Mongo access logs. If not access log\n  // path is specified no access logs will be written. Note that access log is\n  // also gated :ref:`runtime <config_network_filters_mongo_proxy_runtime>`.\n  string access_log = 2;\n\n  // Inject a fixed delay before proxying a Mongo operation. Delays are\n  // applied to the following MongoDB operations: Query, Insert, GetMore,\n  // and KillCursors. Once an active delay is in progress, all incoming\n  // data up until the timer event fires will be a part of the delay.\n  common.fault.v3.FaultDelay delay = 3;\n\n  // Flag to specify whether :ref:`dynamic metadata\n  // <config_network_filters_mongo_proxy_dynamic_metadata>` should be emitted. Defaults to false.\n  bool emit_dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.mysql_proxy.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.mysql_proxy.v3\";\noption java_outer_classname = \"MysqlProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: MySQL proxy]\n// MySQL Proxy :ref:`configuration overview <config_network_filters_mysql_proxy>`.\n// [#extension: envoy.filters.network.mysql_proxy]\n\nmessage MySQLProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy\";\n\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_mysql_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing MySQL access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.postgres_proxy.v3alpha;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.postgres_proxy.v3alpha\";\noption java_outer_classname = \"PostgresProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Postgres proxy]\n// Postgres Proxy :ref:`configuration overview\n// <config_network_filters_postgres_proxy>`.\n// [#extension: envoy.filters.network.postgres_proxy]\n\nmessage PostgresProxy {\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_postgres_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Controls whether SQL statements received in Frontend Query messages\n  // are parsed. Parsing is required to produce Postgres proxy filter\n  // metadata. Defaults to true.\n  google.protobuf.BoolValue enable_sql_parsing = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/rate_limit/v2:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"//envoy/extensions/common/ratelimit/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.ratelimit.v3;\n\nimport \"envoy/config/ratelimit/v3/rls.proto\";\nimport \"envoy/extensions/common/ratelimit/v3/ratelimit.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.ratelimit.v3\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_network_filters_rate_limit>`.\n// [#extension: envoy.filters.network.ratelimit]\n\n// [#next-free-field: 7]\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.rate_limit.v2.RateLimit\";\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_rate_limit_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The rate limit descriptor list to use in the rate limit service request.\n  repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3\n      [(validate.rules).repeated = {min_items: 1}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 4;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 5;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 6\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/rbac/v2:pkg\",\n        \"//envoy/config/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rbac.v3;\n\nimport \"envoy/config/rbac/v3/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rbac.v3\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_network_filters_rbac>`.\n// [#extension: envoy.filters.network.rbac]\n\n// RBAC network filter config.\n//\n// Header should not be used in rules/shadow_rules in RBAC network filter as\n// this information is only available in :ref:`RBAC http filter <config_http_filters_rbac>`.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.rbac.v2.RBAC\";\n\n  enum EnforcementType {\n    // Apply RBAC policies when the first byte of data arrives on the connection.\n    ONE_TIME_ON_FIRST_BYTE = 0;\n\n    // Continuously apply RBAC policies as data arrives. Use this mode when\n    // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka,\n    // etc. when the protocol decoders emit dynamic metadata such as the\n    // resources being accessed and the operations on the resources.\n    CONTINUOUS = 1;\n  }\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v3.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter but will emit stats and logs\n  // and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v3.RBAC shadow_rules = 2;\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 3 [(validate.rules).string = {min_len: 1}];\n\n  // RBAC enforcement strategy. By default RBAC will be enforced only once\n  // when the first byte of data arrives from the downstream. When used in\n  // conjunction with filters that emit dynamic metadata after decoding\n  // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to\n  // CONTINUOUS to enforce RBAC policies on every message boundary.\n  EnforcementType enforcement_type = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/rbac/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/rbac/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rbac.v4alpha;\n\nimport \"envoy/config/rbac/v4alpha/rbac.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rbac.v4alpha\";\noption java_outer_classname = \"RbacProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: RBAC]\n// Role-Based Access Control :ref:`configuration overview <config_network_filters_rbac>`.\n// [#extension: envoy.filters.network.rbac]\n\n// RBAC network filter config.\n//\n// Header should not be used in rules/shadow_rules in RBAC network filter as\n// this information is only available in :ref:`RBAC http filter <config_http_filters_rbac>`.\nmessage RBAC {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rbac.v3.RBAC\";\n\n  enum EnforcementType {\n    // Apply RBAC policies when the first byte of data arrives on the connection.\n    ONE_TIME_ON_FIRST_BYTE = 0;\n\n    // Continuously apply RBAC policies as data arrives. Use this mode when\n    // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka,\n    // etc. when the protocol decoders emit dynamic metadata such as the\n    // resources being accessed and the operations on the resources.\n    CONTINUOUS = 1;\n  }\n\n  // Specify the RBAC rules to be applied globally.\n  // If absent, no enforcing RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC rules = 1;\n\n  // Shadow rules are not enforced by the filter but will emit stats and logs\n  // and can be used for rule testing.\n  // If absent, no shadow RBAC policy will be applied.\n  config.rbac.v4alpha.RBAC shadow_rules = 2;\n\n  // The prefix to use when emitting statistics.\n  string stat_prefix = 3 [(validate.rules).string = {min_len: 1}];\n\n  // RBAC enforcement strategy. By default RBAC will be enforced only once\n  // when the first byte of data arrives from the downstream. When used in\n  // conjunction with filters that emit dynamic metadata after decoding\n  // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to\n  // CONTINUOUS to enforce RBAC policies on every message boundary.\n  EnforcementType enforcement_type = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/redis_proxy/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.redis_proxy.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.redis_proxy.v3\";\noption java_outer_classname = \"RedisProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Redis Proxy]\n// Redis Proxy :ref:`configuration overview <config_network_filters_redis_proxy>`.\n// [#extension: envoy.filters.network.redis_proxy]\n\n// [#next-free-field: 9]\nmessage RedisProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.redis_proxy.v2.RedisProxy\";\n\n  // Redis connection pool settings.\n  // [#next-free-field: 9]\n  message ConnPoolSettings {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings\";\n\n    // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently\n    // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data\n    // because replication is asynchronous and requires some delay. You need to ensure that your\n    // application can tolerate stale data.\n    enum ReadPolicy {\n      // Default mode. Read from the current primary node.\n      MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = \"PRIMARY\"];\n\n      // Read from the primary, but if it is unavailable, read from replica nodes.\n      PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = \"PREFER_PRIMARY\"];\n\n      // Read from replica nodes. If multiple replica nodes are present within a shard, a random\n      // node is selected. Healthy nodes have precedent over unhealthy nodes.\n      REPLICA = 2;\n\n      // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not\n      // present or unhealthy), read from the primary.\n      PREFER_REPLICA = 3;\n\n      // Read from any node of the cluster. A random node is selected among the primary and\n      // replicas, healthy nodes have precedent over unhealthy nodes.\n      ANY = 4;\n    }\n\n    // Per-operation timeout in milliseconds. The timer starts when the first\n    // command of a pipeline is written to the backend connection. Each response received from Redis\n    // resets the timer since it signifies that the next command is being processed by the backend.\n    // The only exception to this behavior is when a connection to a backend is not yet established.\n    // In that case, the connect timeout on the cluster will govern the timeout until the connection\n    // is ready.\n    google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}];\n\n    // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be\n    // forwarded to the same upstream. The hash key used for determining the upstream in a\n    // consistent hash ring configuration will be computed from the hash tagged key instead of the\n    // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster\n    // implementation <https://redis.io/topics/cluster-spec#keys-hash-tags>`_.\n    //\n    // Examples:\n    //\n    // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream\n    // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream\n    bool enable_hashtagging = 2;\n\n    // Accept `moved and ask redirection\n    // <https://redis.io/topics/cluster-spec#redirection-and-resharding>`_ errors from upstream\n    // redis servers, and retry commands to the specified target server. The target server does not\n    // need to be known to the cluster manager. If the command cannot be redirected, then the\n    // original error is passed downstream unchanged. By default, this support is not enabled.\n    bool enable_redirection = 3;\n\n    // Maximum size of encoded request buffer before flush is triggered and encoded requests\n    // are sent upstream. If this is unset, the buffer flushes whenever it receives data\n    // and performs no batching.\n    // This feature makes it possible for multiple clients to send requests to Envoy and have\n    // them batched- for example if one is running several worker processes, each with its own\n    // Redis connection. There is no benefit to using this with a single downstream process.\n    // Recommended size (if enabled) is 1024 bytes.\n    uint32 max_buffer_size_before_flush = 4;\n\n    // The encoded request buffer is flushed N milliseconds after the first request has been\n    // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`.\n    // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise,\n    // the timer should be set according to the number of clients, overall request rate and\n    // desired maximum latency for a single command. For example, if there are many requests\n    // being batched together at a high rate, the buffer will likely be filled before the timer\n    // fires. Alternatively, if the request rate is lower the buffer will not be filled as often\n    // before the timer fires.\n    // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter\n    // defaults to 3ms.\n    google.protobuf.Duration buffer_flush_timeout = 5;\n\n    // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts\n    // can be created at any given time by any given worker thread (see `enable_redirection` for\n    // more details). If the host is unknown and a connection cannot be created due to enforcing\n    // this limit, then redirection will fail and the original redirection error will be passed\n    // downstream unchanged. This limit defaults to 100.\n    google.protobuf.UInt32Value max_upstream_unknown_connections = 6;\n\n    // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate\n    // count. These commands are measured in microseconds.\n    bool enable_command_stats = 8;\n\n    // Read policy. The default is to read from the primary.\n    ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}];\n  }\n\n  message PrefixRoutes {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes\";\n\n    message Route {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route\";\n\n      // The router is capable of shadowing traffic from one cluster to another. The current\n      // implementation is \"fire and forget,\" meaning Envoy will not wait for the shadow cluster to\n      // respond before returning the response from the primary cluster. All normal statistics are\n      // collected for the shadow cluster making this feature useful for testing.\n      message RequestMirrorPolicy {\n        option (udpa.annotations.versioning).previous_message_type =\n            \"envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route.\"\n            \"RequestMirrorPolicy\";\n\n        // Specifies the cluster that requests will be mirrored to. The cluster must\n        // exist in the cluster manager configuration.\n        string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n        // If not specified or the runtime key is not present, all requests to the target cluster\n        // will be mirrored.\n        //\n        // If specified, Envoy will lookup the runtime key to get the percentage of requests to the\n        // mirror.\n        config.core.v3.RuntimeFractionalPercent runtime_fraction = 2;\n\n        // Set this to TRUE to only mirror write commands, this is effectively replicating the\n        // writes in a \"fire and forget\" manner.\n        bool exclude_read_commands = 3;\n      }\n\n      // String prefix that must match the beginning of the keys. Envoy will always favor the\n      // longest match.\n      string prefix = 1 [(validate.rules).string = {max_bytes: 1000}];\n\n      // Indicates if the prefix needs to be removed from the key when forwarded.\n      bool remove_prefix = 2;\n\n      // Upstream cluster to forward the command to.\n      string cluster = 3 [(validate.rules).string = {min_len: 1}];\n\n      // Indicates that the route has a request mirroring policy.\n      repeated RequestMirrorPolicy request_mirror_policy = 4;\n    }\n\n    // List of prefix routes.\n    repeated Route routes = 1;\n\n    // Indicates that prefix matching should be case insensitive.\n    bool case_insensitive = 2;\n\n    // Optional catch-all route to forward commands that doesn't match any of the routes. The\n    // catch-all route becomes required when no routes are specified.\n    Route catch_all_route = 4;\n\n    string hidden_envoy_deprecated_catch_all_cluster = 3\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n  }\n\n  // RedisFault defines faults used for fault injection.\n  message RedisFault {\n    enum RedisFaultType {\n      // Delays requests. This is the base fault; other faults can have delays added.\n      DELAY = 0;\n\n      // Returns errors on requests.\n      ERROR = 1;\n    }\n\n    // Fault type.\n    RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}];\n\n    // Percentage of requests fault applies to.\n    config.core.v3.RuntimeFractionalPercent fault_enabled = 2\n        [(validate.rules).message = {required: true}];\n\n    // Delay for all faults. If not set, defaults to zero\n    google.protobuf.Duration delay = 3;\n\n    // Commands fault is restricted to, if any. If not set, fault applies to all commands\n    // other than auth and ping (due to special handling of those commands in Envoy).\n    repeated string commands = 4;\n  }\n\n  // The prefix to use when emitting :ref:`statistics <config_network_filters_redis_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Network settings for the connection pool to the upstream clusters.\n  ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}];\n\n  // Indicates that latency stat should be computed in microseconds. By default it is computed in\n  // milliseconds. This does not apply to upstream command stats currently.\n  bool latency_in_micros = 4;\n\n  // List of **unique** prefixes used to separate keys from different workloads to different\n  // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all\n  // cluster can be used to forward commands when there is no match. Time complexity of the\n  // lookups are in O(min(longest key prefix, key length)).\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    prefix_routes:\n  //      routes:\n  //        - prefix: \"ab\"\n  //          cluster: \"cluster_a\"\n  //        - prefix: \"abc\"\n  //          cluster: \"cluster_b\"\n  //\n  // When using the above routes, the following prefixes would be sent to:\n  //\n  // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b.\n  // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a.\n  // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all\n  //   route<envoy_api_field_extensions.filters.network.redis_proxy.v3.RedisProxy.PrefixRoutes.catch_all_route>`\n  //   would have retrieved the key from that cluster instead.\n  //\n  // See the :ref:`configuration section\n  // <arch_overview_redis_configuration>` of the architecture overview for recommendations on\n  // configuring the backing clusters.\n  PrefixRoutes prefix_routes = 5;\n\n  // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis\n  // AUTH command <https://redis.io/commands/auth>`_ with this password before enabling any other\n  // command. If an AUTH command's password matches this password, an \"OK\" response will be returned\n  // to the client. If the AUTH command password does not match this password, then an \"ERR invalid\n  // password\" error will be returned. If any other command is received before AUTH when this\n  // password is set, then a \"NOAUTH Authentication required.\" error response will be sent to the\n  // client. If an AUTH command is received when the password is not set, then an \"ERR Client sent\n  // AUTH, but no password is set\" error will be returned.\n  config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true];\n\n  // List of faults to inject. Faults currently come in two flavors:\n  // - Delay, which delays a request.\n  // - Error, which responds to a request with an error. Errors can also have delays attached.\n  //\n  // Example:\n  //\n  // .. code-block:: yaml\n  //\n  //    faults:\n  //    - fault_type: ERROR\n  //      fault_enabled:\n  //        default_value:\n  //          numerator: 10\n  //          denominator: HUNDRED\n  //        runtime_key: \"bogus_key\"\n  //        commands:\n  //        - GET\n  //      - fault_type: DELAY\n  //        fault_enabled:\n  //          default_value:\n  //            numerator: 10\n  //            denominator: HUNDRED\n  //          runtime_key: \"bogus_key\"\n  //        delay: 2s\n  //\n  // See the :ref:`fault injection section\n  // <config_network_filters_redis_proxy_fault_injection>` for more information on how to configure this.\n  repeated RedisFault faults = 8;\n\n  // If a username is provided an ACL style AUTH command will be required with a username and password.\n  // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis\n  // AUTH command <https://redis.io/commands/auth>`_ with this username and the *downstream_auth_password*\n  // before enabling any other command. If an AUTH command's username and password matches this username\n  // and the *downstream_auth_password* , an \"OK\" response will be returned to the client. If the AUTH\n  // command username or password does not match this username or the *downstream_auth_password*, then an\n  // \"WRONGPASS invalid username-password pair\" error will be returned. If any other command is received before AUTH when this\n  // password is set, then a \"NOAUTH Authentication required.\" error response will be sent to the\n  // client. If an AUTH command is received when the password is not set, then an \"ERR Client sent\n  // AUTH, but no ACL is set\" error will be returned.\n  config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true];\n\n  string hidden_envoy_deprecated_cluster = 2\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n}\n\n// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in\n// :ref:`typed_extension_protocol_options<envoy_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.redis_proxy`.\nmessage RedisProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions\";\n\n  // Upstream server password as defined by the `requirepass` directive\n  // <https://redis.io/topics/config>`_ in the server's configuration file.\n  config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true];\n\n  // Upstream server username as defined by the `user` directive\n  // <https://redis.io/topics/acl>`_ in the server's configuration file.\n  config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v3;\n\nimport \"envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3\";\noption java_outer_classname = \"RocketmqProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RocketMQ Proxy]\n// RocketMQ Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n// [#extension: envoy.filters.network.rocketmq_proxy]\n\nmessage RocketmqProxy {\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is specified in this property.\n  RouteConfiguration route_config = 2;\n\n  // The largest duration transient object expected to live, more than 10s is recommended.\n  google.protobuf.Duration transient_object_life_span = 3;\n\n  // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting\n  // facility without considering backward compatibility of exiting RocketMQ client SDK.\n  bool develop_mode = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rocketmq Proxy Route Configuration]\n// Rocketmq Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n\nmessage RouteConfiguration {\n  // The name of the route configuration.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  // The name of the topic.\n  type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}];\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v3.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  // Indicates the upstream cluster to which the request should be routed.\n  string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer.\n  config.core.v3.Metadata metadata_match = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v4alpha;\n\nimport \"envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha\";\noption java_outer_classname = \"RocketmqProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: RocketMQ Proxy]\n// RocketMQ Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n// [#extension: envoy.filters.network.rocketmq_proxy]\n\nmessage RocketmqProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\";\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is specified in this property.\n  RouteConfiguration route_config = 2;\n\n  // The largest duration transient object expected to live, more than 10s is recommended.\n  google.protobuf.Duration transient_object_life_span = 3;\n\n  // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting\n  // facility without considering backward compatibility of exiting RocketMQ client SDK.\n  bool develop_mode = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.rocketmq_proxy.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Rocketmq Proxy Route Configuration]\n// Rocketmq Proxy :ref:`configuration overview <config_network_filters_rocketmq_proxy>`.\n\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration\";\n\n  // The name of the route configuration.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch\";\n\n  // The name of the topic.\n  type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}];\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config).\n  repeated config.route.v4alpha.HeaderMatcher headers = 2;\n}\n\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction\";\n\n  // Indicates the upstream cluster to which the request should be routed.\n  string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Optional endpoint metadata match criteria used by the subset load balancer.\n  config.core.v4alpha.Metadata metadata_match = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/sni_cluster/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.sni_cluster.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.sni_cluster.v3\";\noption java_outer_classname = \"SniClusterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SNI Cluster Filter]\n// Set the upstream cluster name from the SNI field in the TLS connection.\n// [#extension: envoy.filters.network.sni_cluster]\n\nmessage SniCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.sni_cluster.v2.SniCluster\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/common/dynamic_forward_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha;\n\nimport \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha\";\noption java_outer_classname = \"SniDynamicForwardProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SNI dynamic forward proxy]\n\n// Configuration for the SNI-based dynamic forward proxy filter. See the\n// :ref:`architecture overview <arch_overview_http_dynamic_forward_proxy>` for\n// more information. Note this filter must be configured along with\n// :ref:`TLS inspector listener filter <config_listener_filters_tls_inspector>`\n// to work.\n// [#extension: envoy.filters.network.sni_dynamic_forward_proxy]\nmessage FilterConfig {\n  // The DNS cache configuration that the filter will attach to. Note this\n  // configuration must match that of associated :ref:`dynamic forward proxy\n  // cluster configuration\n  // <envoy_api_field_extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig.dns_cache_config>`.\n  common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1\n      [(validate.rules).message = {required: true}];\n\n  oneof port_specifier {\n    // The port number to connect to the upstream.\n    uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/tcp_proxy/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.tcp_proxy.v3;\n\nimport \"envoy/config/accesslog/v3/accesslog.proto\";\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/v3/hash_policy.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v3\";\noption java_outer_classname = \"TcpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: TCP Proxy]\n// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.\n// [#extension: envoy.filters.network.tcp_proxy]\n\n// [#next-free-field: 14]\nmessage TcpProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy\";\n\n  // Allows for specification of multiple upstream clusters along with weights\n  // that indicate the percentage of traffic to be forwarded to each cluster.\n  // The router selects an upstream cluster based on these weights.\n  message WeightedCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster\";\n\n    message ClusterWeight {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight\";\n\n      // Name of the upstream cluster.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // When a request matches the route, the choice of an upstream cluster is\n      // determined by its weight. The sum of weights across all entries in the\n      // clusters array determines the total weight.\n      uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n      // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n      // in the upstream cluster with metadata matching what is set in this field will be considered\n      // for load balancing. Note that this will be merged with what's provided in\n      // :ref:`TcpProxy.metadata_match\n      // <envoy_api_field_extensions.filters.network.tcp_proxy.v3.TcpProxy.metadata_match>`, with values\n      // here taking precedence. The filter name should be specified as *envoy.lb*.\n      config.core.v3.Metadata metadata_match = 3;\n    }\n\n    // Specifies one or more upstream clusters associated with the route.\n    repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Configuration for tunneling TCP over other transports or application layers.\n  // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will\n  // remain the default.\n  message TunnelingConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig\";\n\n    // The hostname to send in the synthesized CONNECT headers to the upstream proxy.\n    string hostname = 1 [(validate.rules).string = {min_len: 1}];\n  }\n\n  message DeprecatedV1 {\n    option deprecated = true;\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1\";\n\n    // [#next-free-field: 6]\n    message TCPRoute {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1.TCPRoute\";\n\n      string cluster = 1 [(validate.rules).string = {min_bytes: 1}];\n\n      repeated config.core.v3.CidrRange destination_ip_list = 2;\n\n      string destination_ports = 3;\n\n      repeated config.core.v3.CidrRange source_ip_list = 4;\n\n      string source_ports = 5;\n    }\n\n    repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_tcp_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 10;\n  }\n\n  // Optional endpoint metadata match criteria. Only endpoints in the upstream\n  // cluster with metadata matching that set in metadata_match will be\n  // considered. The filter name should be specified as *envoy.lb*.\n  config.core.v3.Metadata metadata_match = 9;\n\n  // The idle timeout for connections managed by the TCP proxy filter. The idle timeout\n  // is defined as the period in which there are no bytes sent or received on either\n  // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set\n  // to 0s, the timeout will be disabled.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 8;\n\n  // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy\n  // filter. The idle timeout is defined as the period in which there is no\n  // active traffic. If not set, there is no idle timeout. When the idle timeout\n  // is reached the connection will be closed. The distinction between\n  // downstream_idle_timeout/upstream_idle_timeout provides a means to set\n  // timeout based on the last byte sent on the downstream/upstream connection.\n  google.protobuf.Duration downstream_idle_timeout = 3;\n\n  // [#not-implemented-hide:]\n  google.protobuf.Duration upstream_idle_timeout = 4;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by the this tcp_proxy.\n  repeated config.accesslog.v3.AccessLog access_log = 5;\n\n  // The maximum number of unsuccessful connection attempts that will be made before\n  // giving up. If the parameter is not specified, 1 connection attempt will be made.\n  google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}];\n\n  // [#not-implemented-hide:] feature in progress\n  // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP\n  // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload\n  // will be proxied upstream as per usual.\n  TunnelingConfig tunneling_config = 12;\n\n  // The maximum duration of a connection. The duration is defined as the period since a connection\n  // was established. If not set, there is no max duration. When max_downstream_connection_duration\n  // is reached the connection will be closed. Duration must be at least 1ms.\n  google.protobuf.Duration max_downstream_connection_duration = 13\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n\n  DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/accesslog/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/tcp_proxy/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.tcp_proxy.v4alpha;\n\nimport \"envoy/config/accesslog/v4alpha/accesslog.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/v3/hash_policy.proto\";\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha\";\noption java_outer_classname = \"TcpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: TCP Proxy]\n// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.\n// [#extension: envoy.filters.network.tcp_proxy]\n\n// [#next-free-field: 14]\nmessage TcpProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\";\n\n  // Allows for specification of multiple upstream clusters along with weights\n  // that indicate the percentage of traffic to be forwarded to each cluster.\n  // The router selects an upstream cluster based on these weights.\n  message WeightedCluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster\";\n\n    message ClusterWeight {\n      option (udpa.annotations.versioning).previous_message_type =\n          \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight\";\n\n      // Name of the upstream cluster.\n      string name = 1 [(validate.rules).string = {min_len: 1}];\n\n      // When a request matches the route, the choice of an upstream cluster is\n      // determined by its weight. The sum of weights across all entries in the\n      // clusters array determines the total weight.\n      uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n      // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints\n      // in the upstream cluster with metadata matching what is set in this field will be considered\n      // for load balancing. Note that this will be merged with what's provided in\n      // :ref:`TcpProxy.metadata_match\n      // <envoy_api_field_extensions.filters.network.tcp_proxy.v4alpha.TcpProxy.metadata_match>`, with values\n      // here taking precedence. The filter name should be specified as *envoy.lb*.\n      config.core.v4alpha.Metadata metadata_match = 3;\n    }\n\n    // Specifies one or more upstream clusters associated with the route.\n    repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Configuration for tunneling TCP over other transports or application layers.\n  // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will\n  // remain the default.\n  message TunnelingConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig\";\n\n    // The hostname to send in the synthesized CONNECT headers to the upstream proxy.\n    string hostname = 1 [(validate.rules).string = {min_len: 1}];\n  }\n\n  reserved 6;\n\n  reserved \"deprecated_v1\";\n\n  // The prefix to use when emitting :ref:`statistics\n  // <config_network_filters_tcp_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2;\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 10;\n  }\n\n  // Optional endpoint metadata match criteria. Only endpoints in the upstream\n  // cluster with metadata matching that set in metadata_match will be\n  // considered. The filter name should be specified as *envoy.lb*.\n  config.core.v4alpha.Metadata metadata_match = 9;\n\n  // The idle timeout for connections managed by the TCP proxy filter. The idle timeout\n  // is defined as the period in which there are no bytes sent or received on either\n  // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set\n  // to 0s, the timeout will be disabled.\n  //\n  // .. warning::\n  //   Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP\n  //   FIN packets, etc.\n  google.protobuf.Duration idle_timeout = 8;\n\n  // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy\n  // filter. The idle timeout is defined as the period in which there is no\n  // active traffic. If not set, there is no idle timeout. When the idle timeout\n  // is reached the connection will be closed. The distinction between\n  // downstream_idle_timeout/upstream_idle_timeout provides a means to set\n  // timeout based on the last byte sent on the downstream/upstream connection.\n  google.protobuf.Duration downstream_idle_timeout = 3;\n\n  // [#not-implemented-hide:]\n  google.protobuf.Duration upstream_idle_timeout = 4;\n\n  // Configuration for :ref:`access logs <arch_overview_access_logs>`\n  // emitted by the this tcp_proxy.\n  repeated config.accesslog.v4alpha.AccessLog access_log = 5;\n\n  // The maximum number of unsuccessful connection attempts that will be made before\n  // giving up. If the parameter is not specified, 1 connection attempt will be made.\n  google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}];\n\n  // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}];\n\n  // [#not-implemented-hide:] feature in progress\n  // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP\n  // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload\n  // will be proxied upstream as per usual.\n  TunnelingConfig tunneling_config = 12;\n\n  // The maximum duration of a connection. The duration is defined as the period since a connection\n  // was established. If not set, there is no max duration. When max_downstream_connection_duration\n  // is reached the connection will be closed. Duration must be at least 1ms.\n  google.protobuf.Duration max_downstream_connection_duration = 13\n      [(validate.rules).duration = {gte {nanos: 1000000}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg\",\n        \"//envoy/config/ratelimit/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3;\n\nimport \"envoy/config/ratelimit/v3/rls.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3\";\noption java_outer_classname = \"RateLimitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate limit]\n// Rate limit :ref:`configuration overview <config_thrift_filters_rate_limit>`.\n// [#extension: envoy.filters.thrift.ratelimit]\n\n// [#next-free-field: 6]\nmessage RateLimit {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit\";\n\n  // The rate limit domain to use in the rate limit service request.\n  string domain = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Specifies the rate limit configuration stage. Each configured rate limit filter performs a\n  // rate limit check using descriptors configured in the\n  // :ref:`envoy_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` for the request.\n  // Only those entries with a matching stage number are used for a given filter. If not set, the\n  // default stage number is 0.\n  //\n  // .. note::\n  //\n  //  The filter supports a range of 0 - 10 inclusively for stage numbers.\n  uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}];\n\n  // The timeout in milliseconds for the rate limit service RPC. If not\n  // set, this defaults to 20ms.\n  google.protobuf.Duration timeout = 3;\n\n  // The filter's behaviour in case the rate limiting service does\n  // not respond back. When it is set to true, Envoy will not allow traffic in case of\n  // communication failure between rate limiting service and the proxy.\n  // Defaults to false.\n  bool failure_mode_deny = 4;\n\n  // Configuration for an external rate limit service provider. If not\n  // specified, any calls to the rate limit service will immediately return\n  // success.\n  config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 5\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg\",\n        \"//envoy/config/route/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Thrift Proxy Route Configuration]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch\";\n\n  oneof match_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route must exactly match the request method name. As a special case, an\n    // empty string matches any request method name.\n    string method_name = 1;\n\n    // If specified, the route must have the service name as the request method name prefix. As a\n    // special case, an empty string matches any service name. Only relevant when service\n    // multiplexing.\n    string service_name = 2;\n  }\n\n  // Inverts whatever matching is done in the :ref:`method_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.method_name>` or\n  // :ref:`service_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.service_name>` fields.\n  // Cannot be combined with wildcard matching as that would result in routes never being matched.\n  //\n  // .. note::\n  //\n  //   This does not invert matching done as part of the :ref:`headers field\n  //   <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteMatch.headers>` field. To\n  //   invert header matching, see :ref:`invert_match\n  //   <envoy_api_field_config.route.v3.HeaderMatcher.invert_match>`.\n  bool invert = 3;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config). Note that this only applies for Thrift transports and/or\n  // protocols that support headers.\n  repeated config.route.v3.HeaderMatcher headers = 4;\n}\n\n// [#next-free-field: 7]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates a single upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 2;\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // Thrift header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist Envoy will\n    // respond with an unknown method exception or an internal error exception,\n    // respectively.\n    string cluster_header = 6\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n  // the upstream cluster with metadata matching what is set in this field will be considered.\n  // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight.metadata_match>`,\n  // with values there taking precedence. Keys and values should be provided under the \"envoy.lb\"\n  // metadata key.\n  config.core.v3.Metadata metadata_match = 3;\n\n  // Specifies a set of rate limit configurations that could be applied to the route.\n  // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders\n  // action with the header name \":method-name\".\n  repeated config.route.v3.RateLimit rate_limits = 4;\n\n  // Strip the service prefix from the method name, if there's a prefix. For\n  // example, the method call Service:method would end up being just method.\n  bool strip_service_name = 5;\n}\n\n// Allows for specification of multiple upstream clusters along with weights that indicate the\n// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster\n// based on these weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster\";\n\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight\";\n\n    // Name of the upstream cluster.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // When a request matches the route, the choice of an upstream cluster is determined by its\n    // weight. The sum of weights across all entries in the clusters array determines the total\n    // weight.\n    google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field, combined with what's\n    // provided in :ref:`RouteAction's metadata_match\n    // <envoy_api_field_extensions.filters.network.thrift_proxy.v3.RouteAction.metadata_match>`,\n    // will be considered. Values here will take precedence. Keys and values should be provided\n    // under the \"envoy.lb\" metadata key.\n    config.core.v3.Metadata metadata_match = 3;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v3;\n\nimport \"envoy/extensions/filters/network/thrift_proxy/v3/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3\";\noption java_outer_classname = \"ThriftProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Thrift Proxy]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n// [#extension: envoy.filters.network.thrift_proxy]\n\n// Thrift transport types supported by Envoy.\nenum TransportType {\n  // For downstream connections, the Thrift proxy will attempt to determine which transport to use.\n  // For upstream connections, the Thrift proxy will use same transport as the downstream\n  // connection.\n  AUTO_TRANSPORT = 0;\n\n  // The Thrift proxy will use the Thrift framed transport.\n  FRAMED = 1;\n\n  // The Thrift proxy will use the Thrift unframed transport.\n  UNFRAMED = 2;\n\n  // The Thrift proxy will assume the client is using the Thrift header transport.\n  HEADER = 3;\n}\n\n// Thrift Protocol types supported by Envoy.\nenum ProtocolType {\n  // For downstream connections, the Thrift proxy will attempt to determine which protocol to use.\n  // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol\n  // detection. For upstream connections, the Thrift proxy will use the same protocol as the\n  // downstream connection.\n  AUTO_PROTOCOL = 0;\n\n  // The Thrift proxy will use the Thrift binary protocol.\n  BINARY = 1;\n\n  // The Thrift proxy will use Thrift non-strict binary protocol.\n  LAX_BINARY = 2;\n\n  // The Thrift proxy will use the Thrift compact protocol.\n  COMPACT = 3;\n\n  // The Thrift proxy will use the Thrift \"Twitter\" protocol implemented by the finagle library.\n  TWITTER = 4;\n}\n\n// [#next-free-field: 6]\nmessage ThriftProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy\";\n\n  // Supplies the type of transport that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.TransportType.AUTO_TRANSPORT>`.\n  TransportType transport = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.ProtocolType.AUTO_PROTOCOL>`.\n  ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  RouteConfiguration route_config = 4;\n\n  // A list of individual Thrift filters that make up the filter chain for requests made to the\n  // Thrift proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no thrift_filters are specified, a default Thrift router filter\n  // (`envoy.filters.thrift.router`) is used.\n  repeated ThriftFilter thrift_filters = 5;\n}\n\n// ThriftFilter configures a Thrift filter.\nmessage ThriftFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftFilter\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter. The built-in filters are:\n  //\n  // [#comment:TODO(zuercher): Auto generate the following list]\n  // * :ref:`envoy.filters.thrift.router <config_thrift_filters_router>`\n  // * :ref:`envoy.filters.thrift.rate_limit <config_thrift_filters_rate_limit>`\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true];\n  }\n}\n\n// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in\n// in\n// :ref:`typed_extension_protocol_options<envoy_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.thrift_proxy`.\nmessage ThriftProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProtocolOptions\";\n\n  // Supplies the type of transport that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.TransportType.AUTO_TRANSPORT>`,\n  // which is the default, causes the proxy to use the same transport as the downstream connection.\n  TransportType transport = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v3.ProtocolType.AUTO_PROTOCOL>`,\n  // which is the default, causes the proxy to use the same protocol as the downstream connection.\n  ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/route/v4alpha:pkg\",\n        \"//envoy/extensions/filters/network/thrift_proxy/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/route/v4alpha/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha\";\noption java_outer_classname = \"RouteProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Thrift Proxy Route Configuration]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n\nmessage RouteConfiguration {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration\";\n\n  // The name of the route configuration. Reserved for future use in asynchronous route discovery.\n  string name = 1;\n\n  // The list of routes that will be matched, in order, against incoming requests. The first route\n  // that matches will be used.\n  repeated Route routes = 2;\n}\n\nmessage Route {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.Route\";\n\n  // Route matching parameters.\n  RouteMatch match = 1 [(validate.rules).message = {required: true}];\n\n  // Route request to some upstream cluster.\n  RouteAction route = 2 [(validate.rules).message = {required: true}];\n}\n\nmessage RouteMatch {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch\";\n\n  oneof match_specifier {\n    option (validate.required) = true;\n\n    // If specified, the route must exactly match the request method name. As a special case, an\n    // empty string matches any request method name.\n    string method_name = 1;\n\n    // If specified, the route must have the service name as the request method name prefix. As a\n    // special case, an empty string matches any service name. Only relevant when service\n    // multiplexing.\n    string service_name = 2;\n  }\n\n  // Inverts whatever matching is done in the :ref:`method_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteMatch.method_name>` or\n  // :ref:`service_name\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteMatch.service_name>` fields.\n  // Cannot be combined with wildcard matching as that would result in routes never being matched.\n  //\n  // .. note::\n  //\n  //   This does not invert matching done as part of the :ref:`headers field\n  //   <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteMatch.headers>` field. To\n  //   invert header matching, see :ref:`invert_match\n  //   <envoy_api_field_config.route.v4alpha.HeaderMatcher.invert_match>`.\n  bool invert = 3;\n\n  // Specifies a set of headers that the route should match on. The router will check the request’s\n  // headers against all the specified headers in the route config. A match will happen if all the\n  // headers in the route are present in the request with the same values (or based on presence if\n  // the value field is not in the config). Note that this only applies for Thrift transports and/or\n  // protocols that support headers.\n  repeated config.route.v4alpha.HeaderMatcher headers = 4;\n}\n\n// [#next-free-field: 7]\nmessage RouteAction {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.RouteAction\";\n\n  oneof cluster_specifier {\n    option (validate.required) = true;\n\n    // Indicates a single upstream cluster to which the request should be routed\n    // to.\n    string cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Multiple upstream clusters can be specified for a given route. The\n    // request is routed to one of the upstream clusters based on weights\n    // assigned to each cluster.\n    WeightedCluster weighted_clusters = 2;\n\n    // Envoy will determine the cluster to route to by reading the value of the\n    // Thrift header named by cluster_header from the request headers. If the\n    // header is not found or the referenced cluster does not exist Envoy will\n    // respond with an unknown method exception or an internal error exception,\n    // respectively.\n    string cluster_header = 6\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];\n  }\n\n  // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n  // the upstream cluster with metadata matching what is set in this field will be considered.\n  // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match\n  // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.WeightedCluster.ClusterWeight.metadata_match>`,\n  // with values there taking precedence. Keys and values should be provided under the \"envoy.lb\"\n  // metadata key.\n  config.core.v4alpha.Metadata metadata_match = 3;\n\n  // Specifies a set of rate limit configurations that could be applied to the route.\n  // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders\n  // action with the header name \":method-name\".\n  repeated config.route.v4alpha.RateLimit rate_limits = 4;\n\n  // Strip the service prefix from the method name, if there's a prefix. For\n  // example, the method call Service:method would end up being just method.\n  bool strip_service_name = 5;\n}\n\n// Allows for specification of multiple upstream clusters along with weights that indicate the\n// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster\n// based on these weights.\nmessage WeightedCluster {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster\";\n\n  message ClusterWeight {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight\";\n\n    // Name of the upstream cluster.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // When a request matches the route, the choice of an upstream cluster is determined by its\n    // weight. The sum of weights across all entries in the clusters array determines the total\n    // weight.\n    google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}];\n\n    // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in\n    // the upstream cluster with metadata matching what is set in this field, combined with what's\n    // provided in :ref:`RouteAction's metadata_match\n    // <envoy_api_field_extensions.filters.network.thrift_proxy.v4alpha.RouteAction.metadata_match>`,\n    // will be considered. Values here will take precedence. Keys and values should be provided\n    // under the \"envoy.lb\" metadata key.\n    config.core.v4alpha.Metadata metadata_match = 3;\n  }\n\n  // Specifies one or more upstream clusters associated with the route.\n  repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.thrift_proxy.v4alpha;\n\nimport \"envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha\";\noption java_outer_classname = \"ThriftProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Thrift Proxy]\n// Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.\n// [#extension: envoy.filters.network.thrift_proxy]\n\n// Thrift transport types supported by Envoy.\nenum TransportType {\n  // For downstream connections, the Thrift proxy will attempt to determine which transport to use.\n  // For upstream connections, the Thrift proxy will use same transport as the downstream\n  // connection.\n  AUTO_TRANSPORT = 0;\n\n  // The Thrift proxy will use the Thrift framed transport.\n  FRAMED = 1;\n\n  // The Thrift proxy will use the Thrift unframed transport.\n  UNFRAMED = 2;\n\n  // The Thrift proxy will assume the client is using the Thrift header transport.\n  HEADER = 3;\n}\n\n// Thrift Protocol types supported by Envoy.\nenum ProtocolType {\n  // For downstream connections, the Thrift proxy will attempt to determine which protocol to use.\n  // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol\n  // detection. For upstream connections, the Thrift proxy will use the same protocol as the\n  // downstream connection.\n  AUTO_PROTOCOL = 0;\n\n  // The Thrift proxy will use the Thrift binary protocol.\n  BINARY = 1;\n\n  // The Thrift proxy will use Thrift non-strict binary protocol.\n  LAX_BINARY = 2;\n\n  // The Thrift proxy will use the Thrift compact protocol.\n  COMPACT = 3;\n\n  // The Thrift proxy will use the Thrift \"Twitter\" protocol implemented by the finagle library.\n  TWITTER = 4;\n}\n\n// [#next-free-field: 6]\nmessage ThriftProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\";\n\n  // Supplies the type of transport that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.TransportType.AUTO_TRANSPORT>`.\n  TransportType transport = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use. Defaults to\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.ProtocolType.AUTO_PROTOCOL>`.\n  ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];\n\n  // The human readable prefix to use when emitting statistics.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The route table for the connection manager is static and is specified in this property.\n  RouteConfiguration route_config = 4;\n\n  // A list of individual Thrift filters that make up the filter chain for requests made to the\n  // Thrift proxy. Order matters as the filters are processed sequentially. For backwards\n  // compatibility, if no thrift_filters are specified, a default Thrift router filter\n  // (`envoy.filters.thrift.router`) is used.\n  repeated ThriftFilter thrift_filters = 5;\n}\n\n// ThriftFilter configures a Thrift filter.\nmessage ThriftFilter {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // The name of the filter to instantiate. The name must match a supported\n  // filter. The built-in filters are:\n  //\n  // [#comment:TODO(zuercher): Auto generate the following list]\n  // * :ref:`envoy.filters.thrift.router <config_thrift_filters_router>`\n  // * :ref:`envoy.filters.thrift.rate_limit <config_thrift_filters_rate_limit>`\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Filter specific configuration which depends on the filter being instantiated. See the supported\n  // filters for further documentation.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3;\n  }\n}\n\n// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in\n// in\n// :ref:`typed_extension_protocol_options<envoy_api_field_config.cluster.v4alpha.Cluster.typed_extension_protocol_options>`,\n// keyed by the name `envoy.filters.network.thrift_proxy`.\nmessage ThriftProtocolOptions {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions\";\n\n  // Supplies the type of transport that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.TransportType.AUTO_TRANSPORT>`,\n  // which is the default, causes the proxy to use the same transport as the downstream connection.\n  TransportType transport = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Supplies the type of protocol that the Thrift proxy should use for upstream connections.\n  // Selecting\n  // :ref:`AUTO_PROTOCOL<envoy_api_enum_value_extensions.filters.network.thrift_proxy.v4alpha.ProtocolType.AUTO_PROTOCOL>`,\n  // which is the default, causes the proxy to use the same protocol as the downstream connection.\n  ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// [#extension: envoy.filters.network.wasm]\n// Wasm :ref:`configuration overview <config_network_filters_wasm>`.\n\nmessage Wasm {\n  // General Plugin configuration.\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.network.zookeeper_proxy.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.network.zookeeper_proxy.v3\";\noption java_outer_classname = \"ZookeeperProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: ZooKeeper proxy]\n// ZooKeeper Proxy :ref:`configuration overview <config_network_filters_zookeeper_proxy>`.\n// [#extension: envoy.filters.network.zookeeper_proxy]\n\nmessage ZooKeeperProxy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.network.zookeeper_proxy.v1alpha1.ZooKeeperProxy\";\n\n  // The human readable prefix to use when emitting :ref:`statistics\n  // <config_network_filters_zookeeper_proxy_stats>`.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs.\n  // If the access log field is empty, access logs will not be written.\n  string access_log = 2;\n\n  // Messages — requests, responses and events — that are bigger than this value will\n  // be ignored. If it is not set, the default value is 1Mb.\n  //\n  // The value here should match the jute.maxbuffer property in your cluster configuration:\n  //\n  // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options\n  //\n  // if that is set. If it isn't, ZooKeeper's default is also 1Mb.\n  google.protobuf.UInt32Value max_packet_bytes = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/dns/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.udp.dns_filter.v3alpha;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/dns/v3/dns_table.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha\";\noption java_outer_classname = \"DnsFilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: DNS Filter]\n// DNS Filter :ref:`configuration overview <config_udp_listener_filters_dns_filter>`.\n// [#extension: envoy.filters.udp_listener.dns_filter]\n\n// Configuration for the DNS filter.\nmessage DnsFilterConfig {\n  // This message contains the configuration for the DNS Filter operating\n  // in a server context. This message will contain the virtual hosts and\n  // associated addresses with which Envoy will respond to queries\n  message ServerContextConfig {\n    oneof config_source {\n      option (validate.required) = true;\n\n      // Load the configuration specified from the control plane\n      data.dns.v3.DnsTable inline_dns_table = 1;\n\n      // Seed the filter configuration from an external path. This source\n      // is a yaml formatted file that contains the DnsTable driving Envoy's\n      // responses to DNS queries\n      config.core.v3.DataSource external_dns_table = 2;\n    }\n  }\n\n  // This message contains the configuration for the DNS Filter operating\n  // in a client context. This message will contain the timeouts, retry,\n  // and forwarding configuration for Envoy to make DNS requests to other\n  // resolvers\n  message ClientContextConfig {\n    // Sets the maximum time we will wait for the upstream query to complete\n    // We allow 5s for the upstream resolution to complete, so the minimum\n    // value here is 1. Note that the total latency for a failed query is the\n    // number of retries multiplied by the resolver_timeout.\n    google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // A list of DNS servers to which we can forward queries. If not\n    // specified, Envoy will use the ambient DNS resolvers in the\n    // system.\n    repeated config.core.v3.Address upstream_resolvers = 2;\n\n    // Controls how many outstanding external lookup contexts the filter tracks.\n    // The context structure allows the filter to respond to every query even if the external\n    // resolution times out or is otherwise unsuccessful\n    uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // The stat prefix used when emitting DNS filter statistics\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Server context configuration contains the data that the filter uses to respond\n  // to DNS requests.\n  ServerContextConfig server_config = 2;\n\n  // Client context configuration controls Envoy's behavior when it must use external\n  // resolvers to answer a query. This object is optional and if omitted instructs\n  // the filter to resolve queries from the data in the server_config\n  ClientContextConfig client_config = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/data/dns/v4alpha:pkg\",\n        \"//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.udp.dns_filter.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/data/dns/v4alpha/dns_table.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha\";\noption java_outer_classname = \"DnsFilterProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: DNS Filter]\n// DNS Filter :ref:`configuration overview <config_udp_listener_filters_dns_filter>`.\n// [#extension: envoy.filters.udp_listener.dns_filter]\n\n// Configuration for the DNS filter.\nmessage DnsFilterConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig\";\n\n  // This message contains the configuration for the DNS Filter operating\n  // in a server context. This message will contain the virtual hosts and\n  // associated addresses with which Envoy will respond to queries\n  message ServerContextConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig\";\n\n    oneof config_source {\n      option (validate.required) = true;\n\n      // Load the configuration specified from the control plane\n      data.dns.v4alpha.DnsTable inline_dns_table = 1;\n\n      // Seed the filter configuration from an external path. This source\n      // is a yaml formatted file that contains the DnsTable driving Envoy's\n      // responses to DNS queries\n      config.core.v4alpha.DataSource external_dns_table = 2;\n    }\n  }\n\n  // This message contains the configuration for the DNS Filter operating\n  // in a client context. This message will contain the timeouts, retry,\n  // and forwarding configuration for Envoy to make DNS requests to other\n  // resolvers\n  message ClientContextConfig {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig\";\n\n    // Sets the maximum time we will wait for the upstream query to complete\n    // We allow 5s for the upstream resolution to complete, so the minimum\n    // value here is 1. Note that the total latency for a failed query is the\n    // number of retries multiplied by the resolver_timeout.\n    google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}];\n\n    // A list of DNS servers to which we can forward queries. If not\n    // specified, Envoy will use the ambient DNS resolvers in the\n    // system.\n    repeated config.core.v4alpha.Address upstream_resolvers = 2;\n\n    // Controls how many outstanding external lookup contexts the filter tracks.\n    // The context structure allows the filter to respond to every query even if the external\n    // resolution times out or is otherwise unsuccessful\n    uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}];\n  }\n\n  // The stat prefix used when emitting DNS filter statistics\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Server context configuration contains the data that the filter uses to respond\n  // to DNS requests.\n  ServerContextConfig server_config = 2;\n\n  // Client context configuration controls Envoy's behavior when it must use external\n  // resolvers to answer a query. This object is optional and if omitted instructs\n  // the filter to resolve queries from the data in the server_config\n  ClientContextConfig client_config = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/filter/udp/udp_proxy/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.filters.udp.udp_proxy.v3;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3\";\noption java_outer_classname = \"UdpProxyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: UDP proxy]\n// UDP proxy :ref:`configuration overview <config_udp_listener_filters_udp_proxy>`.\n// [#extension: envoy.filters.udp_listener.udp_proxy]\n\n// Configuration for the UDP proxy filter.\n// [#next-free-field: 6]\nmessage UdpProxyConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig\";\n\n  // Specifies the UDP hash policy.\n  // The packets can be routed by hash policy.\n  message HashPolicy {\n    oneof policy_specifier {\n      option (validate.required) = true;\n\n      // The source IP will be used to compute the hash used by hash-based load balancing algorithms.\n      bool source_ip = 1 [(validate.rules).bool = {const: true}];\n    }\n  }\n\n  // The stat prefix used when emitting UDP proxy filter stats.\n  string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];\n\n  oneof route_specifier {\n    option (validate.required) = true;\n\n    // The upstream cluster to connect to.\n    string cluster = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by\n  // the session. The default if not specified is 1 minute.\n  google.protobuf.Duration idle_timeout = 3;\n\n  // Use the remote downstream IP address as the sender IP address when sending packets to upstream hosts.\n  // This option requires Envoy to be run with the *CAP_NET_ADMIN* capability on Linux.\n  // And the IPv6 stack must be enabled on Linux kernel.\n  // This option does not preserve the remote downstream port.\n  // If this option is enabled, the IP address of sent datagrams will be changed to the remote downstream IP address.\n  // This means that Envoy will not receive packets that are sent by upstream hosts because the upstream hosts\n  // will send the packets with the remote downstream IP address as the destination. All packets will be routed\n  // to the remote downstream directly if there are route rules on the upstream host side.\n  // There are two options to return the packets back to the remote downstream.\n  // The first one is to use DSR (Direct Server Return).\n  // The other one is to configure routing rules on the upstream hosts to forward\n  // all packets back to Envoy and configure iptables rules on the host running Envoy to\n  // forward all packets from upstream hosts to the Envoy process so that Envoy can forward the packets to the downstream.\n  // If the platform does not support this option, Envoy will raise a configuration error.\n  bool use_original_src_ip = 4;\n\n  // Optional configuration for UDP proxy hash policies. If hash_policies is not set, the hash-based\n  // load balancing algorithms will select a host randomly. Currently the number of hash policies is\n  // limited to 1.\n  repeated HashPolicy hash_policies = 5 [(validate.rules).repeated = {max_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.internal_redirect.allow_listed_routes.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3\";\noption java_outer_classname = \"AllowListedRoutesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Allow listed routes internal redirect predicate]\n\n// An internal redirect predicate that accepts only explicitly allowed target routes.\n// [#extension: envoy.internal_redirect_predicates.allow_listed_routes]\nmessage AllowListedRoutesConfig {\n  // The list of routes that's allowed as redirect target by this predicate,\n  // identified by the route's :ref:`name <envoy_api_field_config.route.v3.Route.route>`.\n  // Empty route names are not allowed.\n  repeated string allowed_route_names = 1\n      [(validate.rules).repeated = {items {string {min_len: 1}}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.internal_redirect.previous_routes.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3\";\noption java_outer_classname = \"PreviousRoutesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Previous routes internal redirect predicate]\n\n// An internal redirect predicate that rejects redirect targets that are pointing\n// to a route that has been followed by a previous redirect from the current route.\n// [#extension: envoy.internal_redirect_predicates.previous_routes]\nmessage PreviousRoutesConfig {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.internal_redirect.safe_cross_scheme.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3\";\noption java_outer_classname = \"SafeCrossSchemeConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SafeCrossScheme internal redirect predicate]\n\n// An internal redirect predicate that checks the scheme between the\n// downstream url and the redirect target url and allows a) same scheme\n// redirect and b) safe cross scheme redirect, which means if the downstream\n// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the\n// downstream scheme is HTTP, only HTTP redirect targets are allowed.\n// [#extension:\n// envoy.internal_redirect_predicates.safe_cross_scheme]\nmessage SafeCrossSchemeConfig {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.network.socket_interface.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.network.socket_interface.v3\";\noption java_outer_classname = \"DefaultSocketInterfaceProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Default Socket Interface configuration]\n\n// Configuration for default socket interface that relies on OS dependent syscall to create\n// sockets.\nmessage DefaultSocketInterface {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/retry/omit_host_metadata/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.retry.host.omit_host_metadata.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.retry.host.omit_host_metadata.v3\";\noption java_outer_classname = \"OmitHostMetadataConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Omit host metadata retry predicate]\n\n// A retry host predicate that can be used to reject a host based on\n// predefined metadata match criteria.\n// [#extension: envoy.retry_host_predicates.omit_host_metadata]\nmessage OmitHostMetadataConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.retry.omit_host_metadata.v2.OmitHostMetadataConfig\";\n\n  // Retry host predicate metadata match criteria. The hosts in\n  // the upstream cluster with matching metadata will be omitted while\n  // attempting a retry of a failed request. The metadata should be specified\n  // under the *envoy.lb* key.\n  config.core.v3.Metadata metadata_match = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/retry/previous_priorities:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.retry.priority.previous_priorities.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.retry.priority.previous_priorities.v3\";\noption java_outer_classname = \"PreviousPrioritiesConfigProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Previous priorities retry selector]\n\n// A retry host selector that attempts to spread retries between priorities, even if certain\n// priorities would not normally be attempted due to higher priorities being available.\n//\n// As priorities get excluded, load will be distributed amongst the remaining healthy priorities\n// based on the relative health of the priorities, matching how load is distributed during regular\n// host selection. For example, given priority healths of {100, 50, 50}, the original load will be\n// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load\n// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the\n// remaining to spill over to P2.\n//\n// Each priority attempted will be excluded until there are no healthy priorities left, at which\n// point the list of attempted priorities will be reset, essentially starting from the beginning.\n// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the\n// following sequence of priorities would be selected (assuming update_frequency = 1):\n// Attempt 1: P0 (P0 is 100% healthy)\n// Attempt 2: P2 (P0 already attempted, P2 only healthy priority)\n// Attempt 3: P0 (no healthy priorities, reset)\n// Attempt 4: P2\n//\n// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original\n// priority load, so behavior should be identical to not using this plugin.\n//\n// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of\n// priorities), which might incur significant overhead for clusters with many priorities.\n// [#extension: envoy.retry_priorities.previous_priorities]\nmessage PreviousPrioritiesConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.retry.previous_priorities.PreviousPrioritiesConfig\";\n\n  // How often the priority load should be updated based on previously attempted priorities. Useful\n  // to allow each priorities to receive more than one request before being excluded or to reduce\n  // the number of times that the priority load has to be recomputed.\n  //\n  // For example, by setting this to 2, then the first two attempts (initial attempt and first\n  // retry) will use the unmodified priority load. The third and fourth attempt will use priority\n  // load which excludes the priorities routed to with the first two attempts, and the fifth and\n  // sixth attempt will use the priority load excluding the priorities used for the first four\n  // attempts.\n  //\n  // Must be greater than 0.\n  int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/wasm/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.stat_sinks.wasm.v3;\n\nimport \"envoy/extensions/wasm/v3/wasm.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// Wasm :ref:`configuration overview <config_stat_sinks_wasm>`.\n// [#extension: envoy.stat_sinks.wasm]\n\nmessage Wasm {\n  // General Plugin configuration.\n  envoy.extensions.wasm.v3.PluginConfig config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.datadog.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.datadog.v4alpha\";\noption java_outer_classname = \"DatadogProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Datadog tracer]\n\n// Configuration for the Datadog tracer.\n// [#extension: envoy.tracers.datadog]\nmessage DatadogConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.DatadogConfig\";\n\n  // The cluster to use for submitting traces to the Datadog agent.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The name used for the service when traces are generated by envoy.\n  string service_name = 2 [(validate.rules).string = {min_len: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.dynamic_ot.v4alpha;\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.dynamic_ot.v4alpha\";\noption java_outer_classname = \"DynamicOtProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Dynamically loadable OpenTracing tracer]\n\n// DynamicOtConfig is used to dynamically load a tracer from a shared library\n// that implements the `OpenTracing dynamic loading API\n// <https://github.com/opentracing/opentracing-cpp>`_.\n// [#extension: envoy.tracers.dynamic_ot]\nmessage DynamicOtConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.DynamicOtConfig\";\n\n  // Dynamic library implementing the `OpenTracing API\n  // <https://github.com/opentracing/opentracing-cpp>`_.\n  string library = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The configuration to use when creating a tracer from the given dynamic\n  // library.\n  google.protobuf.Struct config = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.lightstep.v4alpha;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.lightstep.v4alpha\";\noption java_outer_classname = \"LightstepProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: LightStep tracer]\n\n// Configuration for the LightStep tracer.\n// [#extension: envoy.tracers.lightstep]\nmessage LightstepConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.LightstepConfig\";\n\n  // Available propagation modes\n  enum PropagationMode {\n    // Propagate trace context in the single header x-ot-span-context.\n    ENVOY = 0;\n\n    // Propagate trace context using LightStep's native format.\n    LIGHTSTEP = 1;\n\n    // Propagate trace context using the b3 format.\n    B3 = 2;\n\n    // Propagation trace context using the w3 trace-context standard.\n    TRACE_CONTEXT = 3;\n  }\n\n  // The cluster manager cluster that hosts the LightStep collectors.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // File containing the access token to the `LightStep\n  // <https://lightstep.com/>`_ API.\n  string access_token_file = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Propagation modes to use by LightStep's tracer.\n  repeated PropagationMode propagation_modes = 3\n      [(validate.rules).repeated = {items {enum {defined_only: true}}}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.opencensus.v4alpha;\n\nimport \"envoy/config/core/v4alpha/grpc_service.proto\";\n\nimport \"opencensus/proto/trace/v1/trace_config.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.opencensus.v4alpha\";\noption java_outer_classname = \"OpencensusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: OpenCensus tracer]\n\n// Configuration for the OpenCensus tracer.\n// [#next-free-field: 15]\n// [#extension: envoy.tracers.opencensus]\nmessage OpenCensusConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.trace.v3.OpenCensusConfig\";\n\n  enum TraceContext {\n    // No-op default, no trace context is utilized.\n    NONE = 0;\n\n    // W3C Trace-Context format \"traceparent:\" header.\n    TRACE_CONTEXT = 1;\n\n    // Binary \"grpc-trace-bin:\" header.\n    GRPC_TRACE_BIN = 2;\n\n    // \"X-Cloud-Trace-Context:\" header.\n    CLOUD_TRACE_CONTEXT = 3;\n\n    // X-B3-* headers.\n    B3 = 4;\n  }\n\n  reserved 7;\n\n  // Configures tracing, e.g. the sampler, max number of annotations, etc.\n  .opencensus.proto.trace.v1.TraceConfig trace_config = 1;\n\n  // Enables the stdout exporter if set to true. This is intended for debugging\n  // purposes.\n  bool stdout_exporter_enabled = 2;\n\n  // Enables the Stackdriver exporter if set to true. The project_id must also\n  // be set.\n  bool stackdriver_exporter_enabled = 3;\n\n  // The Cloud project_id to use for Stackdriver tracing.\n  string stackdriver_project_id = 4;\n\n  // (optional) By default, the Stackdriver exporter will connect to production\n  // Stackdriver. If stackdriver_address is non-empty, it will instead connect\n  // to this address, which is in the gRPC format:\n  // https://github.com/grpc/grpc/blob/master/doc/naming.md\n  string stackdriver_address = 10;\n\n  // (optional) The gRPC server that hosts Stackdriver tracing service. Only\n  // Google gRPC is supported. If :ref:`target_uri <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.target_uri>`\n  // is not provided, the default production Stackdriver address will be used.\n  config.core.v4alpha.GrpcService stackdriver_grpc_service = 13;\n\n  // Enables the Zipkin exporter if set to true. The url and service name must\n  // also be set.\n  bool zipkin_exporter_enabled = 5;\n\n  // The URL to Zipkin, e.g. \"http://127.0.0.1:9411/api/v2/spans\"\n  string zipkin_url = 6;\n\n  // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or\n  // ocagent_grpc_service must also be set.\n  bool ocagent_exporter_enabled = 11;\n\n  // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC\n  // format: https://github.com/grpc/grpc/blob/master/doc/naming.md\n  // [#comment:TODO: deprecate this field]\n  string ocagent_address = 12;\n\n  // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported.\n  // This is only used if the ocagent_address is left empty.\n  config.core.v4alpha.GrpcService ocagent_grpc_service = 14;\n\n  // List of incoming trace context headers we will accept. First one found\n  // wins.\n  repeated TraceContext incoming_trace_context = 8;\n\n  // List of outgoing trace context headers we will produce.\n  repeated TraceContext outgoing_trace_context = 9;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/xray.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.xray.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.xray.v4alpha\";\noption java_outer_classname = \"XrayProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: AWS X-Ray Tracer Configuration]\n// Configuration for AWS X-Ray tracer\n\nmessage XRayConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v3.XRayConfig\";\n\n  message SegmentFields {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.config.trace.v3.XRayConfig.SegmentFields\";\n\n    // The type of AWS resource, e.g. \"AWS::AppMesh::Proxy\".\n    string origin = 1;\n\n    // AWS resource metadata dictionary.\n    // See: `X-Ray Segment Document documentation <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-aws>`__\n    google.protobuf.Struct aws = 2;\n  }\n\n  // The UDP endpoint of the X-Ray Daemon where the spans will be sent.\n  // If this value is not set, the default value of 127.0.0.1:2000 will be used.\n  config.core.v4alpha.SocketAddress daemon_endpoint = 1;\n\n  // The name of the X-Ray segment.\n  string segment_name = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The location of a local custom sampling rules JSON file.\n  // For an example of the sampling rules see:\n  // `X-Ray SDK documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-sampling>`_\n  config.core.v4alpha.DataSource sampling_rule_manifest = 3;\n\n  // Optional custom fields to be added to each trace segment.\n  // see: `X-Ray Segment Document documentation\n  // <https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html>`__\n  SegmentFields segment_fields = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/config/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.tracers.zipkin.v4alpha;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.tracers.zipkin.v4alpha\";\noption java_outer_classname = \"ZipkinProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Zipkin tracer]\n\n// Configuration for the Zipkin tracer.\n// [#extension: envoy.tracers.zipkin]\n// [#next-free-field: 6]\nmessage ZipkinConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.config.trace.v3.ZipkinConfig\";\n\n  // Available Zipkin collector endpoint versions.\n  enum CollectorEndpointVersion {\n    // Zipkin API v1, JSON over HTTP.\n    // [#comment: The default implementation of Zipkin client before this field is added was only v1\n    // and the way user configure this was by not explicitly specifying the version. Consequently,\n    // before this is added, the corresponding Zipkin collector expected to receive v1 payload.\n    // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when\n    // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field,\n    // since in Zipkin realm this v1 version is considered to be not preferable anymore.]\n    hidden_envoy_deprecated_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0\n        [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n\n    // Zipkin API v2, JSON over HTTP.\n    HTTP_JSON = 1;\n\n    // Zipkin API v2, protobuf over HTTP.\n    HTTP_PROTO = 2;\n\n    // [#not-implemented-hide:]\n    GRPC = 3;\n  }\n\n  // The cluster manager cluster that hosts the Zipkin collectors. Note that the\n  // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster\n  // resources <envoy_api_field_config.bootstrap.v4alpha.Bootstrap.StaticResources.clusters>`.\n  string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The API endpoint of the Zipkin service where the spans will be sent. When\n  // using a standard Zipkin installation, the API endpoint is typically\n  // /api/v1/spans, which is the default value.\n  string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Determines whether a 128bit trace id will be used when creating a new\n  // trace instance. The default value is false, which will result in a 64 bit trace id being used.\n  bool trace_id_128bit = 3;\n\n  // Determines whether client and server spans will share the same span context.\n  // The default value is true.\n  google.protobuf.BoolValue shared_span_context = 4;\n\n  // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be\n  // used.\n  CollectorEndpointVersion collector_endpoint_version = 5;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/transport_socket/alts/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.alts.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.alts.v3\";\noption java_outer_classname = \"AltsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: ALTS]\n// [#extension: envoy.transport_sockets.alts]\n\n// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy.\n// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/\nmessage Alts {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.transport_socket.alts.v2alpha.Alts\";\n\n  // The location of a handshaker service, this is usually 169.254.169.254:8080\n  // on GCE.\n  string handshaker_service = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The acceptable service accounts from peer, peers not in the list will be rejected in the\n  // handshake validation step. If empty, no validation will be performed.\n  repeated string peer_service_accounts = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.proxy_protocol.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/proxy_protocol.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3\";\noption java_outer_classname = \"UpstreamProxyProtocolProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Upstream Proxy Protocol]\n// [#extension: envoy.transport_sockets.upstream_proxy_protocol]\n\n// Configuration for PROXY protocol socket\nmessage ProxyProtocolUpstreamTransport {\n  // The PROXY protocol settings\n  config.core.v3.ProxyProtocolConfig config = 1;\n\n  // The underlying transport socket being wrapped.\n  config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.quic.v3;\n\nimport \"envoy/extensions/transport_sockets/tls/v3/tls.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.quic.v3\";\noption java_outer_classname = \"QuicTransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: quic transport]\n// [#extension: envoy.transport_sockets.quic]\n\n// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicDownstreamTransport {\n  tls.v3.DownstreamTlsContext downstream_tls_context = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicUpstreamTransport {\n  tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/extensions/transport_sockets/quic/v3:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.quic.v4alpha;\n\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/tls.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha\";\noption java_outer_classname = \"QuicTransportProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: quic transport]\n// [#extension: envoy.transport_sockets.quic]\n\n// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicDownstreamTransport {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport\";\n\n  tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1\n      [(validate.rules).message = {required: true}];\n}\n\n// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy.\nmessage QuicUpstreamTransport {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport\";\n\n  tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/transport_socket/raw_buffer/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.raw_buffer.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.raw_buffer.v3\";\noption java_outer_classname = \"RawBufferProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Raw Buffer]\n// [#extension: envoy.transport_sockets.raw_buffer]\n\n// Configuration for raw buffer transport socket.\nmessage RawBuffer {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.transport_socket.raw_buffer.v2.RawBuffer\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/transport_socket/tap/v2alpha:pkg\",\n        \"//envoy/extensions/common/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tap.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/extensions/common/tap/v3/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tap.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap]\n// [#extension: envoy.transport_sockets.tap]\n\n// Configuration for tap transport socket. This wraps another transport socket, providing the\n// ability to interpose and record in plain text any traffic that is surfaced to Envoy.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.config.transport_socket.tap.v2alpha.Tap\";\n\n  // Common configuration for the tap transport socket.\n  common.tap.v3.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // The underlying transport socket being wrapped.\n  config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/common/tap/v4alpha:pkg\",\n        \"//envoy/extensions/transport_sockets/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tap.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/extensions/common/tap/v4alpha/common.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tap.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap]\n// [#extension: envoy.transport_sockets.tap]\n\n// Configuration for tap transport socket. This wraps another transport socket, providing the\n// ability to interpose and record in plain text any traffic that is surfaced to Envoy.\nmessage Tap {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tap.v3.Tap\";\n\n  // Common configuration for the tap transport socket.\n  common.tap.v4alpha.CommonExtensionConfig common_config = 1\n      [(validate.rules).message = {required: true}];\n\n  // The underlying transport socket being wrapped.\n  config.core.v4alpha.TransportSocket transport_socket = 2\n      [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/api/v2/auth:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"udpa/annotations/status.proto\";\n\nimport public \"envoy/extensions/transport_sockets/tls/v3/common.proto\";\nimport public \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\nimport public \"envoy/extensions/transport_sockets/tls/v3/tls.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"CertProto\";\noption java_multiple_files = true;\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common TLS configuration]\n\nmessage TlsParameters {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.TlsParameters\";\n\n  enum TlsProtocol {\n    // Envoy will choose the optimal TLS version.\n    TLS_AUTO = 0;\n\n    // TLS 1.0\n    TLSv1_0 = 1;\n\n    // TLS 1.1\n    TLSv1_1 = 2;\n\n    // TLS 1.2\n    TLSv1_2 = 3;\n\n    // TLS 1.3\n    TLSv1_3 = 4;\n  }\n\n  // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for\n  // servers.\n  TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for\n  // servers.\n  TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // If specified, the TLS listener will only support the specified `cipher list\n  // <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration>`_\n  // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not\n  // specified, the default list will be used.\n  //\n  // In non-FIPS builds, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]\n  //   [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   ECDHE-ECDSA-AES128-GCM-SHA256\n  //   ECDHE-RSA-AES128-GCM-SHA256\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  repeated string cipher_suites = 3;\n\n  // If specified, the TLS connection will only support the specified ECDH\n  // curves. If not specified, the default curves will be used.\n  //\n  // In non-FIPS builds, the default curves are:\n  //\n  // .. code-block:: none\n  //\n  //   X25519\n  //   P-256\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default curve is:\n  //\n  // .. code-block:: none\n  //\n  //   P-256\n  repeated string ecdh_curves = 4;\n}\n\n// BoringSSL private key method configuration. The private key methods are used for external\n// (potentially asynchronous) signing and decryption operations. Some use cases for private key\n// methods would be TPM support and TLS acceleration.\nmessage PrivateKeyProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.PrivateKeyProvider\";\n\n  // Private key method provider name. The name must match a\n  // supported private key method provider type.\n  string provider_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Private key method provider specific configuration.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true];\n\n    google.protobuf.Struct hidden_envoy_deprecated_config = 2\n        [deprecated = true, (udpa.annotations.sensitive) = true];\n  }\n}\n\n// [#next-free-field: 7]\nmessage TlsCertificate {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.TlsCertificate\";\n\n  // The TLS certificate chain.\n  config.core.v3.DataSource certificate_chain = 1;\n\n  // The TLS private key.\n  config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n  // BoringSSL private key method provider. This is an alternative to :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.TlsCertificate.private_key>` field. This can't be\n  // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.TlsCertificate.private_key>` and\n  // :ref:`private_key_provider\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.TlsCertificate.private_key_provider>` fields will result in an\n  // error.\n  PrivateKeyProvider private_key_provider = 6;\n\n  // The password to decrypt the TLS private key. If this field is not set, it is assumed that the\n  // TLS private key is not password encrypted.\n  config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true];\n\n  // The OCSP response to be stapled with this certificate during the handshake.\n  // The response must be DER-encoded and may only be  provided via ``filename`` or\n  // ``inline_bytes``. The response may pertain to only one certificate.\n  config.core.v3.DataSource ocsp_staple = 4;\n\n  // [#not-implemented-hide:]\n  repeated config.core.v3.DataSource signed_certificate_timestamp = 5;\n}\n\nmessage TlsSessionTicketKeys {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.TlsSessionTicketKeys\";\n\n  // Keys for encrypting and decrypting TLS session tickets. The\n  // first key in the array contains the key to encrypt all new sessions created by this context.\n  // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys\n  // by, for example, putting the new key first, and the previous key second.\n  //\n  // If :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys>`\n  // is not specified, the TLS library will still support resuming sessions via tickets, but it will\n  // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts\n  // or on different hosts.\n  //\n  // Each key must contain exactly 80 bytes of cryptographically-secure random data. For\n  // example, the output of ``openssl rand 80``.\n  //\n  // .. attention::\n  //\n  //   Using this feature has serious security considerations and risks. Improper handling of keys\n  //   may result in loss of secrecy in connections, even if ciphers supporting perfect forward\n  //   secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some\n  //   discussion. To minimize the risk, you must:\n  //\n  //   * Keep the session ticket keys at least as secure as your TLS certificate private keys\n  //   * Rotate session ticket keys at least daily, and preferably hourly\n  //   * Always generate keys using a cryptographically-secure random data source\n  repeated config.core.v3.DataSource keys = 1\n      [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true];\n}\n\n// [#next-free-field: 11]\nmessage CertificateValidationContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.CertificateValidationContext\";\n\n  // Peer certificate verification mode.\n  enum TrustChainVerification {\n    // Perform default certificate verification (e.g., against CA / verification lists)\n    VERIFY_TRUST_CHAIN = 0;\n\n    // Connections where the certificate fails verification will be permitted.\n    // For HTTP connections, the result of certificate verification can be used in route matching. (\n    // see :ref:`validated <envoy_api_field_config.route.v3.RouteMatch.TlsContextMatchOptions.validated>` ).\n    ACCEPT_UNTRUSTED = 1;\n  }\n\n  reserved 5;\n\n  // TLS certificate data containing certificate authority certificates to use in verifying\n  // a presented peer certificate (e.g. server certificate for clusters or client certificate\n  // for listeners). If not specified and a peer certificate is presented it will not be\n  // verified. By default, a client certificate is optional, unless one of the additional\n  // options (:ref:`require_client_certificate\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.require_client_certificate>`,\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_spki>`,\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>`, or\n  // :ref:`match_subject_alt_names\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.match_subject_alt_names>`) is also\n  // specified.\n  //\n  // It can optionally contain certificate revocation lists, in which case Envoy will verify\n  // that the presented peer certificate has not been revoked by one of the included CRLs. Note\n  // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be\n  // provided for all certificate authorities in that chain. Failure to do so will result in\n  // verification failure for both revoked and unrevoked certificates from that chain.\n  //\n  // See :ref:`the TLS overview <arch_overview_ssl_enabling_verification>` for a list of common\n  // system CA locations.\n  config.core.v3.DataSource trusted_ca = 1;\n\n  // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the\n  // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate\n  // matches one of the specified values.\n  //\n  // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -pubkey\n  //     | openssl pkey -pubin -outform DER\n  //     | openssl dgst -sha256 -binary\n  //     | openssl enc -base64\n  //   NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A=\n  //\n  // This is the format used in HTTP Public Key Pinning.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  //\n  // .. attention::\n  //\n  //   This option is preferred over :ref:`verify_certificate_hash\n  //   <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>`,\n  //   because SPKI is tied to a private key, so it doesn't change when the certificate\n  //   is renewed using the same private key.\n  repeated string verify_certificate_spki = 3\n      [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}];\n\n  // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that\n  // the SHA-256 of the DER-encoded presented certificate matches one of the specified values.\n  //\n  // A hex-encoded SHA-256 of the certificate can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d\" \" -f2\n  //   df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a\n  //\n  // A long hex-encoded and colon-separated SHA-256 (a.k.a. \"fingerprint\") of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d\"=\" -f2\n  //   DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A\n  //\n  // Both of those formats are acceptable.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  repeated string verify_certificate_hash = 2\n      [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}];\n\n  // An optional list of Subject Alternative name matchers. Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified matches.\n  //\n  // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be\n  // configured with exact match type in the :ref:`string matcher <envoy_api_msg_type.matcher.v3.StringMatcher>`.\n  // For example if the certificate has \"\\*.example.com\" as DNS SAN entry, to allow only \"api.example.com\",\n  // it should be configured as shown below.\n  //\n  // .. code-block:: yaml\n  //\n  //  match_subject_alt_names:\n  //    exact: \"api.example.com\"\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca>`.\n  repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9;\n\n  // [#not-implemented-hide:] Must present signed certificate time-stamp.\n  google.protobuf.BoolValue require_signed_certificate_timestamp = 6;\n\n  // An optional `certificate revocation list\n  // <https://en.wikipedia.org/wiki/Certificate_revocation_list>`_\n  // (in PEM format). If specified, Envoy will verify that the presented peer\n  // certificate has not been revoked by this CRL. If this DataSource contains\n  // multiple CRLs, all of them will be used. Note that if a CRL is provided\n  // for any certificate authority in a trust chain, a CRL must be provided\n  // for all certificate authorities in that chain. Failure to do so will\n  // result in verification failure for both revoked and unrevoked certificates\n  // from that chain.\n  config.core.v3.DataSource crl = 7;\n\n  // If specified, Envoy will not reject expired certificates.\n  bool allow_expired_certificate = 8;\n\n  // Certificate trust chain verification mode.\n  TrustChainVerification trust_chain_verification = 10\n      [(validate.rules).enum = {defined_only: true}];\n\n  repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 [deprecated = true];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/config_source.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"SecretProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Secrets configuration]\n\nmessage GenericSecret {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.GenericSecret\";\n\n  // Secret of generic type and is available to filters.\n  config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true];\n}\n\nmessage SdsSecretConfig {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.SdsSecretConfig\";\n\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  // When both name and config are specified, then secret can be fetched and/or reloaded via\n  // SDS. When only name is specified, then secret will be loaded from static resources.\n  string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // Resource locator for SDS. This is mutually exclusive to *name*.\n  // [#not-implemented-hide:]\n  udpa.core.v1.ResourceLocator sds_resource_locator = 3\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  config.core.v3.ConfigSource sds_config = 2;\n}\n\n// [#next-free-field: 6]\nmessage Secret {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.Secret\";\n\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  string name = 1;\n\n  oneof type {\n    TlsCertificate tls_certificate = 2;\n\n    TlsSessionTicketKeys session_ticket_keys = 3;\n\n    CertificateValidationContext validation_context = 4;\n\n    GenericSecret generic_secret = 5;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v3;\n\nimport \"envoy/config/core/v3/extension.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/common.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v3/secret.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v3\";\noption java_outer_classname = \"TlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: TLS transport socket]\n// [#extension: envoy.transport_sockets.tls]\n// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS.\n\nmessage UpstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.UpstreamTlsContext\";\n\n  // Common TLS context settings.\n  //\n  // .. attention::\n  //\n  //   Server certificate verification is not enabled by default. Configure\n  //   :ref:`trusted_ca<envoy_api_field_extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca>` to enable\n  //   verification.\n  CommonTlsContext common_tls_context = 1;\n\n  // SNI string to use when creating TLS backend connections.\n  string sni = 2 [(validate.rules).string = {max_bytes: 255}];\n\n  // If true, server-initiated TLS renegotiation will be allowed.\n  //\n  // .. attention::\n  //\n  //   TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary.\n  bool allow_renegotiation = 3;\n\n  // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets\n  // for TLSv1.2 and older) to store for the purpose of session resumption.\n  //\n  // Defaults to 1, setting this to 0 disables session resumption.\n  google.protobuf.UInt32Value max_session_keys = 4;\n}\n\n// [#next-free-field: 9]\nmessage DownstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.auth.DownstreamTlsContext\";\n\n  enum OcspStaplePolicy {\n    // OCSP responses are optional. If an OCSP response is absent\n    // or expired, the associated certificate will be used for\n    // connections without an OCSP staple.\n    LENIENT_STAPLING = 0;\n\n    // OCSP responses are optional. If an OCSP response is absent,\n    // the associated certificate will be used without an\n    // OCSP staple. If a response is provided but is expired,\n    // the associated certificate will not be used for\n    // subsequent connections. If no suitable certificate is found,\n    // the connection is rejected.\n    STRICT_STAPLING = 1;\n\n    // OCSP responses are required. Configuration will fail if\n    // a certificate is provided without an OCSP response. If a\n    // response expires, the associated certificate will not be\n    // used connections. If no suitable certificate is found, the\n    // connection is rejected.\n    MUST_STAPLE = 2;\n  }\n\n  // Common TLS context settings.\n  CommonTlsContext common_tls_context = 1;\n\n  // If specified, Envoy will reject connections without a valid client\n  // certificate.\n  google.protobuf.BoolValue require_client_certificate = 2;\n\n  // If specified, Envoy will reject connections without a valid and matching SNI.\n  // [#not-implemented-hide:]\n  google.protobuf.BoolValue require_sni = 3;\n\n  oneof session_ticket_keys_type {\n    // TLS session ticket key settings.\n    TlsSessionTicketKeys session_ticket_keys = 4;\n\n    // Config for fetching TLS session ticket keys via SDS API.\n    SdsSecretConfig session_ticket_keys_sds_secret_config = 5;\n\n    // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS\n    // server to not issue TLS session tickets for the purposes of stateless TLS session resumption.\n    // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using\n    // the keys specified through either :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys>`\n    // or :ref:`session_ticket_keys_sds_secret_config <envoy_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys_sds_secret_config>`.\n    // If this config is set to false and no keys are explicitly configured, the TLS server will issue\n    // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the\n    // implication that sessions cannot be resumed across hot restarts or on different hosts.\n    bool disable_stateless_session_resumption = 7;\n  }\n\n  // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session\n  // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2)\n  // <https://tools.ietf.org/html/rfc5077#section-5.6>`\n  // only seconds could be specified (fractional seconds are going to be ignored).\n  google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = {\n    lt {seconds: 4294967296}\n    gte {}\n  }];\n\n  // Config for whether to use certificates if they do not have\n  // an accompanying OCSP response or if the response expires at runtime.\n  // Defaults to LENIENT_STAPLING\n  OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}];\n}\n\n// TLS context shared by both client and server TLS contexts.\n// [#next-free-field: 14]\nmessage CommonTlsContext {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.auth.CommonTlsContext\";\n\n  // Config for Certificate provider to get certificates. This provider should allow certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  message CertificateProvider {\n    // opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"TLS\" to specify a new tls-certificate.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Provider specific config.\n    // Note: an implementation is expected to dedup multiple instances of the same config\n    // to maintain a single certificate-provider instance. The sharing can happen, for\n    // example, among multiple clusters or between the tls_certificate and validation_context\n    // certificate providers of a cluster.\n    // This config could be supplied inline or (in future) a named xDS resource.\n    oneof config {\n      option (validate.required) = true;\n\n      config.core.v3.TypedExtensionConfig typed_config = 2;\n    }\n  }\n\n  // Similar to CertificateProvider above, but allows the provider instances to be configured on\n  // the client side instead of being sent from the control plane.\n  message CertificateProviderInstance {\n    // Provider instance name. This name must be defined in the client's configuration (e.g., a\n    // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config\n    // field that would be sent in the CertificateProvider message if the config was sent by the\n    // control plane). If not present, defaults to \"default\".\n    //\n    // Instance names should generally be defined not in terms of the underlying provider\n    // implementation (e.g., \"file_watcher\") but rather in terms of the function of the\n    // certificates (e.g., \"foo_deployment_identity\").\n    string instance_name = 1;\n\n    // Opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"example.com\" to specify a certificate for a\n    // particular domain. Not all provider instances will actually use this field, so the value\n    // defaults to the empty string.\n    string certificate_name = 2;\n  }\n\n  message CombinedCertificateValidationContext {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext\";\n\n    // How to validate peer certificates.\n    CertificateValidationContext default_validation_context = 1\n        [(validate.rules).message = {required: true}];\n\n    // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n    // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n    // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n    // or validation_context_certificate_provider_instance may be used.\n    SdsSecretConfig validation_context_sds_secret_config = 2 [\n      (validate.rules).message = {required: true},\n      (udpa.annotations.field_migrate).oneof_promotion = \"dynamic_validation_context\"\n    ];\n\n    // Certificate provider for fetching validation context.\n    // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n    // or validation_context_certificate_provider_instance may be used.\n    // [#not-implemented-hide:]\n    CertificateProvider validation_context_certificate_provider = 3\n        [(udpa.annotations.field_migrate).oneof_promotion = \"dynamic_validation_context\"];\n\n    // Certificate provider instance for fetching validation context.\n    // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n    // or validation_context_certificate_provider_instance may be used.\n    // [#not-implemented-hide:]\n    CertificateProviderInstance validation_context_certificate_provider_instance = 4\n        [(udpa.annotations.field_migrate).oneof_promotion = \"dynamic_validation_context\"];\n  }\n\n  reserved 5;\n\n  // TLS protocol versions, cipher suites etc.\n  TlsParameters tls_params = 1;\n\n  // :ref:`Multiple TLS certificates <arch_overview_ssl_cert_select>` can be associated with the\n  // same context to allow both RSA and ECDSA certificates.\n  //\n  // Only a single TLS certificate is supported in client contexts. In server contexts, the first\n  // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is\n  // used for clients that support ECDSA.\n  repeated TlsCertificate tls_certificates = 2;\n\n  // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6\n      [(validate.rules).repeated = {max_items: 1}];\n\n  // Certificate provider for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProvider tls_certificate_certificate_provider = 9;\n\n  // Certificate provider instance for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProviderInstance tls_certificate_certificate_provider_instance = 11;\n\n  oneof validation_context_type {\n    // How to validate peer certificates.\n    CertificateValidationContext validation_context = 3;\n\n    // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n    // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n    SdsSecretConfig validation_context_sds_secret_config = 7;\n\n    // Combined certificate validation context holds a default CertificateValidationContext\n    // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic\n    // and default CertificateValidationContext are merged into a new CertificateValidationContext\n    // for validation. This merge is done by Message::MergeFrom(), so dynamic\n    // CertificateValidationContext overwrites singular fields in default\n    // CertificateValidationContext, and concatenates repeated fields to default\n    // CertificateValidationContext, and logical OR is applied to boolean fields.\n    CombinedCertificateValidationContext combined_validation_context = 8;\n\n    // Certificate provider for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProvider validation_context_certificate_provider = 10;\n\n    // Certificate provider instance for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProviderInstance validation_context_certificate_provider_instance = 12;\n  }\n\n  // Supplies the list of ALPN protocols that the listener should expose. In\n  // practice this is likely to be set to one of two values (see the\n  // :ref:`codec_type\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.codec_type>`\n  // parameter in the HTTP connection manager for more information):\n  //\n  // * \"h2,http/1.1\" If the listener is going to support both HTTP/2 and HTTP/1.1.\n  // * \"http/1.1\" If the listener is only going to support HTTP/1.1.\n  //\n  // There is no default for this parameter. If empty, Envoy will not expose ALPN.\n  repeated string alpn_protocols = 4;\n\n  // Custom TLS handshaker. If empty, defaults to native TLS handshaking\n  // behavior.\n  config.core.v3.TypedExtensionConfig custom_handshaker = 13;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/extensions/transport_sockets/tls/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common TLS configuration]\n\nmessage TlsParameters {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.TlsParameters\";\n\n  enum TlsProtocol {\n    // Envoy will choose the optimal TLS version.\n    TLS_AUTO = 0;\n\n    // TLS 1.0\n    TLSv1_0 = 1;\n\n    // TLS 1.1\n    TLSv1_1 = 2;\n\n    // TLS 1.2\n    TLSv1_2 = 3;\n\n    // TLS 1.3\n    TLSv1_3 = 4;\n  }\n\n  // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for\n  // servers.\n  TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}];\n\n  // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for\n  // servers.\n  TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}];\n\n  // If specified, the TLS listener will only support the specified `cipher list\n  // <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration>`_\n  // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not\n  // specified, the default list will be used.\n  //\n  // In non-FIPS builds, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]\n  //   [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default cipher list is:\n  //\n  // .. code-block:: none\n  //\n  //   ECDHE-ECDSA-AES128-GCM-SHA256\n  //   ECDHE-RSA-AES128-GCM-SHA256\n  //   ECDHE-ECDSA-AES128-SHA\n  //   ECDHE-RSA-AES128-SHA\n  //   AES128-GCM-SHA256\n  //   AES128-SHA\n  //   ECDHE-ECDSA-AES256-GCM-SHA384\n  //   ECDHE-RSA-AES256-GCM-SHA384\n  //   ECDHE-ECDSA-AES256-SHA\n  //   ECDHE-RSA-AES256-SHA\n  //   AES256-GCM-SHA384\n  //   AES256-SHA\n  repeated string cipher_suites = 3;\n\n  // If specified, the TLS connection will only support the specified ECDH\n  // curves. If not specified, the default curves will be used.\n  //\n  // In non-FIPS builds, the default curves are:\n  //\n  // .. code-block:: none\n  //\n  //   X25519\n  //   P-256\n  //\n  // In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default curve is:\n  //\n  // .. code-block:: none\n  //\n  //   P-256\n  repeated string ecdh_curves = 4;\n}\n\n// BoringSSL private key method configuration. The private key methods are used for external\n// (potentially asynchronous) signing and decryption operations. Some use cases for private key\n// methods would be TPM support and TLS acceleration.\nmessage PrivateKeyProvider {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider\";\n\n  reserved 2;\n\n  reserved \"config\";\n\n  // Private key method provider name. The name must match a\n  // supported private key method provider type.\n  string provider_name = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Private key method provider specific configuration.\n  oneof config_type {\n    google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true];\n  }\n}\n\n// [#next-free-field: 7]\nmessage TlsCertificate {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.TlsCertificate\";\n\n  // The TLS certificate chain.\n  config.core.v4alpha.DataSource certificate_chain = 1;\n\n  // The TLS private key.\n  config.core.v4alpha.DataSource private_key = 2 [(udpa.annotations.sensitive) = true];\n\n  // BoringSSL private key method provider. This is an alternative to :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.TlsCertificate.private_key>` field. This can't be\n  // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.TlsCertificate.private_key>` and\n  // :ref:`private_key_provider\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.TlsCertificate.private_key_provider>` fields will result in an\n  // error.\n  PrivateKeyProvider private_key_provider = 6;\n\n  // The password to decrypt the TLS private key. If this field is not set, it is assumed that the\n  // TLS private key is not password encrypted.\n  config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true];\n\n  // The OCSP response to be stapled with this certificate during the handshake.\n  // The response must be DER-encoded and may only be  provided via ``filename`` or\n  // ``inline_bytes``. The response may pertain to only one certificate.\n  config.core.v4alpha.DataSource ocsp_staple = 4;\n\n  // [#not-implemented-hide:]\n  repeated config.core.v4alpha.DataSource signed_certificate_timestamp = 5;\n}\n\nmessage TlsSessionTicketKeys {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys\";\n\n  // Keys for encrypting and decrypting TLS session tickets. The\n  // first key in the array contains the key to encrypt all new sessions created by this context.\n  // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys\n  // by, for example, putting the new key first, and the previous key second.\n  //\n  // If :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.session_ticket_keys>`\n  // is not specified, the TLS library will still support resuming sessions via tickets, but it will\n  // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts\n  // or on different hosts.\n  //\n  // Each key must contain exactly 80 bytes of cryptographically-secure random data. For\n  // example, the output of ``openssl rand 80``.\n  //\n  // .. attention::\n  //\n  //   Using this feature has serious security considerations and risks. Improper handling of keys\n  //   may result in loss of secrecy in connections, even if ciphers supporting perfect forward\n  //   secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some\n  //   discussion. To minimize the risk, you must:\n  //\n  //   * Keep the session ticket keys at least as secure as your TLS certificate private keys\n  //   * Rotate session ticket keys at least daily, and preferably hourly\n  //   * Always generate keys using a cryptographically-secure random data source\n  repeated config.core.v4alpha.DataSource keys = 1\n      [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true];\n}\n\n// [#next-free-field: 11]\nmessage CertificateValidationContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext\";\n\n  // Peer certificate verification mode.\n  enum TrustChainVerification {\n    // Perform default certificate verification (e.g., against CA / verification lists)\n    VERIFY_TRUST_CHAIN = 0;\n\n    // Connections where the certificate fails verification will be permitted.\n    // For HTTP connections, the result of certificate verification can be used in route matching. (\n    // see :ref:`validated <envoy_api_field_config.route.v4alpha.RouteMatch.TlsContextMatchOptions.validated>` ).\n    ACCEPT_UNTRUSTED = 1;\n  }\n\n  reserved 4, 5;\n\n  reserved \"verify_subject_alt_name\";\n\n  // TLS certificate data containing certificate authority certificates to use in verifying\n  // a presented peer certificate (e.g. server certificate for clusters or client certificate\n  // for listeners). If not specified and a peer certificate is presented it will not be\n  // verified. By default, a client certificate is optional, unless one of the additional\n  // options (:ref:`require_client_certificate\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.require_client_certificate>`,\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_spki>`,\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>`, or\n  // :ref:`match_subject_alt_names\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.match_subject_alt_names>`) is also\n  // specified.\n  //\n  // It can optionally contain certificate revocation lists, in which case Envoy will verify\n  // that the presented peer certificate has not been revoked by one of the included CRLs. Note\n  // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be\n  // provided for all certificate authorities in that chain. Failure to do so will result in\n  // verification failure for both revoked and unrevoked certificates from that chain.\n  //\n  // See :ref:`the TLS overview <arch_overview_ssl_enabling_verification>` for a list of common\n  // system CA locations.\n  config.core.v4alpha.DataSource trusted_ca = 1;\n\n  // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the\n  // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate\n  // matches one of the specified values.\n  //\n  // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -pubkey\n  //     | openssl pkey -pubin -outform DER\n  //     | openssl dgst -sha256 -binary\n  //     | openssl enc -base64\n  //   NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A=\n  //\n  // This is the format used in HTTP Public Key Pinning.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  //\n  // .. attention::\n  //\n  //   This option is preferred over :ref:`verify_certificate_hash\n  //   <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>`,\n  //   because SPKI is tied to a private key, so it doesn't change when the certificate\n  //   is renewed using the same private key.\n  repeated string verify_certificate_spki = 3\n      [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}];\n\n  // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that\n  // the SHA-256 of the DER-encoded presented certificate matches one of the specified values.\n  //\n  // A hex-encoded SHA-256 of the certificate can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d\" \" -f2\n  //   df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a\n  //\n  // A long hex-encoded and colon-separated SHA-256 (a.k.a. \"fingerprint\") of the certificate\n  // can be generated with the following command:\n  //\n  // .. code-block:: bash\n  //\n  //   $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d\"=\" -f2\n  //   DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A\n  //\n  // Both of those formats are acceptable.\n  //\n  // When both:\n  // :ref:`verify_certificate_hash\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_hash>` and\n  // :ref:`verify_certificate_spki\n  // <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.verify_certificate_spki>` are specified,\n  // a hash matching value from either of the lists will result in the certificate being accepted.\n  repeated string verify_certificate_hash = 2\n      [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}];\n\n  // An optional list of Subject Alternative name matchers. Envoy will verify that the\n  // Subject Alternative Name of the presented certificate matches one of the specified matches.\n  //\n  // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be\n  // configured with exact match type in the :ref:`string matcher <envoy_api_msg_type.matcher.v4alpha.StringMatcher>`.\n  // For example if the certificate has \"\\*.example.com\" as DNS SAN entry, to allow only \"api.example.com\",\n  // it should be configured as shown below.\n  //\n  // .. code-block:: yaml\n  //\n  //  match_subject_alt_names:\n  //    exact: \"api.example.com\"\n  //\n  // .. attention::\n  //\n  //   Subject Alternative Names are easily spoofable and verifying only them is insecure,\n  //   therefore this option must be used together with :ref:`trusted_ca\n  //   <envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.trusted_ca>`.\n  repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9;\n\n  // [#not-implemented-hide:] Must present signed certificate time-stamp.\n  google.protobuf.BoolValue require_signed_certificate_timestamp = 6;\n\n  // An optional `certificate revocation list\n  // <https://en.wikipedia.org/wiki/Certificate_revocation_list>`_\n  // (in PEM format). If specified, Envoy will verify that the presented peer\n  // certificate has not been revoked by this CRL. If this DataSource contains\n  // multiple CRLs, all of them will be used. Note that if a CRL is provided\n  // for any certificate authority in a trust chain, a CRL must be provided\n  // for all certificate authorities in that chain. Failure to do so will\n  // result in verification failure for both revoked and unrevoked certificates\n  // from that chain.\n  config.core.v4alpha.DataSource crl = 7;\n\n  // If specified, Envoy will not reject expired certificates.\n  bool allow_expired_certificate = 8;\n\n  // Certificate trust chain verification mode.\n  TrustChainVerification trust_chain_verification = 10\n      [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/config_source.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/common.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha\";\noption java_outer_classname = \"SecretProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Secrets configuration]\n\nmessage GenericSecret {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.GenericSecret\";\n\n  // Secret of generic type and is available to filters.\n  config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true];\n}\n\nmessage SdsSecretConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig\";\n\n  oneof name_specifier {\n    // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n    // When both name and config are specified, then secret can be fetched and/or reloaded via\n    // SDS. When only name is specified, then secret will be loaded from static resources.\n    string name = 1;\n\n    // Resource locator for SDS. This is mutually exclusive to *name*.\n    // [#not-implemented-hide:]\n    udpa.core.v1.ResourceLocator sds_resource_locator = 3;\n  }\n\n  config.core.v4alpha.ConfigSource sds_config = 2;\n}\n\n// [#next-free-field: 6]\nmessage Secret {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.Secret\";\n\n  // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.\n  string name = 1;\n\n  oneof type {\n    TlsCertificate tls_certificate = 2;\n\n    TlsSessionTicketKeys session_ticket_keys = 3;\n\n    CertificateValidationContext validation_context = 4;\n\n    GenericSecret generic_secret = 5;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.transport_sockets.tls.v4alpha;\n\nimport \"envoy/config/core/v4alpha/extension.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/common.proto\";\nimport \"envoy/extensions/transport_sockets/tls/v4alpha/secret.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha\";\noption java_outer_classname = \"TlsProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: TLS transport socket]\n// [#extension: envoy.transport_sockets.tls]\n// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS.\n\nmessage UpstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\";\n\n  // Common TLS context settings.\n  //\n  // .. attention::\n  //\n  //   Server certificate verification is not enabled by default. Configure\n  //   :ref:`trusted_ca<envoy_api_field_extensions.transport_sockets.tls.v4alpha.CertificateValidationContext.trusted_ca>` to enable\n  //   verification.\n  CommonTlsContext common_tls_context = 1;\n\n  // SNI string to use when creating TLS backend connections.\n  string sni = 2 [(validate.rules).string = {max_bytes: 255}];\n\n  // If true, server-initiated TLS renegotiation will be allowed.\n  //\n  // .. attention::\n  //\n  //   TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary.\n  bool allow_renegotiation = 3;\n\n  // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets\n  // for TLSv1.2 and older) to store for the purpose of session resumption.\n  //\n  // Defaults to 1, setting this to 0 disables session resumption.\n  google.protobuf.UInt32Value max_session_keys = 4;\n}\n\n// [#next-free-field: 9]\nmessage DownstreamTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\";\n\n  enum OcspStaplePolicy {\n    // OCSP responses are optional. If an OCSP response is absent\n    // or expired, the associated certificate will be used for\n    // connections without an OCSP staple.\n    LENIENT_STAPLING = 0;\n\n    // OCSP responses are optional. If an OCSP response is absent,\n    // the associated certificate will be used without an\n    // OCSP staple. If a response is provided but is expired,\n    // the associated certificate will not be used for\n    // subsequent connections. If no suitable certificate is found,\n    // the connection is rejected.\n    STRICT_STAPLING = 1;\n\n    // OCSP responses are required. Configuration will fail if\n    // a certificate is provided without an OCSP response. If a\n    // response expires, the associated certificate will not be\n    // used connections. If no suitable certificate is found, the\n    // connection is rejected.\n    MUST_STAPLE = 2;\n  }\n\n  // Common TLS context settings.\n  CommonTlsContext common_tls_context = 1;\n\n  // If specified, Envoy will reject connections without a valid client\n  // certificate.\n  google.protobuf.BoolValue require_client_certificate = 2;\n\n  // If specified, Envoy will reject connections without a valid and matching SNI.\n  // [#not-implemented-hide:]\n  google.protobuf.BoolValue require_sni = 3;\n\n  oneof session_ticket_keys_type {\n    // TLS session ticket key settings.\n    TlsSessionTicketKeys session_ticket_keys = 4;\n\n    // Config for fetching TLS session ticket keys via SDS API.\n    SdsSecretConfig session_ticket_keys_sds_secret_config = 5;\n\n    // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS\n    // server to not issue TLS session tickets for the purposes of stateless TLS session resumption.\n    // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using\n    // the keys specified through either :ref:`session_ticket_keys <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.session_ticket_keys>`\n    // or :ref:`session_ticket_keys_sds_secret_config <envoy_api_field_extensions.transport_sockets.tls.v4alpha.DownstreamTlsContext.session_ticket_keys_sds_secret_config>`.\n    // If this config is set to false and no keys are explicitly configured, the TLS server will issue\n    // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the\n    // implication that sessions cannot be resumed across hot restarts or on different hosts.\n    bool disable_stateless_session_resumption = 7;\n  }\n\n  // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session\n  // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2)\n  // <https://tools.ietf.org/html/rfc5077#section-5.6>`\n  // only seconds could be specified (fractional seconds are going to be ignored).\n  google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = {\n    lt {seconds: 4294967296}\n    gte {}\n  }];\n\n  // Config for whether to use certificates if they do not have\n  // an accompanying OCSP response or if the response expires at runtime.\n  // Defaults to LENIENT_STAPLING\n  OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}];\n}\n\n// TLS context shared by both client and server TLS contexts.\n// [#next-free-field: 14]\nmessage CommonTlsContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext\";\n\n  // Config for Certificate provider to get certificates. This provider should allow certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  message CertificateProvider {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider\";\n\n    // opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"TLS\" to specify a new tls-certificate.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // Provider specific config.\n    // Note: an implementation is expected to dedup multiple instances of the same config\n    // to maintain a single certificate-provider instance. The sharing can happen, for\n    // example, among multiple clusters or between the tls_certificate and validation_context\n    // certificate providers of a cluster.\n    // This config could be supplied inline or (in future) a named xDS resource.\n    oneof config {\n      option (validate.required) = true;\n\n      config.core.v4alpha.TypedExtensionConfig typed_config = 2;\n    }\n  }\n\n  // Similar to CertificateProvider above, but allows the provider instances to be configured on\n  // the client side instead of being sent from the control plane.\n  message CertificateProviderInstance {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance\";\n\n    // Provider instance name. This name must be defined in the client's configuration (e.g., a\n    // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config\n    // field that would be sent in the CertificateProvider message if the config was sent by the\n    // control plane). If not present, defaults to \"default\".\n    //\n    // Instance names should generally be defined not in terms of the underlying provider\n    // implementation (e.g., \"file_watcher\") but rather in terms of the function of the\n    // certificates (e.g., \"foo_deployment_identity\").\n    string instance_name = 1;\n\n    // Opaque name used to specify certificate instances or types. For example, \"ROOTCA\" to specify\n    // a root-certificate (validation context) or \"example.com\" to specify a certificate for a\n    // particular domain. Not all provider instances will actually use this field, so the value\n    // defaults to the empty string.\n    string certificate_name = 2;\n  }\n\n  message CombinedCertificateValidationContext {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.\"\n        \"CombinedCertificateValidationContext\";\n\n    // How to validate peer certificates.\n    CertificateValidationContext default_validation_context = 1\n        [(validate.rules).message = {required: true}];\n\n    oneof dynamic_validation_context {\n      // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n      // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n      // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n      // or validation_context_certificate_provider_instance may be used.\n      SdsSecretConfig validation_context_sds_secret_config = 2\n          [(validate.rules).message = {required: true}];\n\n      // Certificate provider for fetching validation context.\n      // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n      // or validation_context_certificate_provider_instance may be used.\n      // [#not-implemented-hide:]\n      CertificateProvider validation_context_certificate_provider = 3;\n\n      // Certificate provider instance for fetching validation context.\n      // Only one of validation_context_sds_secret_config, validation_context_certificate_provider,\n      // or validation_context_certificate_provider_instance may be used.\n      // [#not-implemented-hide:]\n      CertificateProviderInstance validation_context_certificate_provider_instance = 4;\n    }\n  }\n\n  reserved 5;\n\n  // TLS protocol versions, cipher suites etc.\n  TlsParameters tls_params = 1;\n\n  // :ref:`Multiple TLS certificates <arch_overview_ssl_cert_select>` can be associated with the\n  // same context to allow both RSA and ECDSA certificates.\n  //\n  // Only a single TLS certificate is supported in client contexts. In server contexts, the first\n  // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is\n  // used for clients that support ECDSA.\n  repeated TlsCertificate tls_certificates = 2;\n\n  // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be\n  // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n  repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6\n      [(validate.rules).repeated = {max_items: 1}];\n\n  // Certificate provider for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProvider tls_certificate_certificate_provider = 9;\n\n  // Certificate provider instance for fetching TLS certificates.\n  // [#not-implemented-hide:]\n  CertificateProviderInstance tls_certificate_certificate_provider_instance = 11;\n\n  oneof validation_context_type {\n    // How to validate peer certificates.\n    CertificateValidationContext validation_context = 3;\n\n    // Config for fetching validation context via SDS API. Note SDS API allows certificates to be\n    // fetched/refreshed over the network asynchronously with respect to the TLS handshake.\n    SdsSecretConfig validation_context_sds_secret_config = 7;\n\n    // Combined certificate validation context holds a default CertificateValidationContext\n    // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic\n    // and default CertificateValidationContext are merged into a new CertificateValidationContext\n    // for validation. This merge is done by Message::MergeFrom(), so dynamic\n    // CertificateValidationContext overwrites singular fields in default\n    // CertificateValidationContext, and concatenates repeated fields to default\n    // CertificateValidationContext, and logical OR is applied to boolean fields.\n    CombinedCertificateValidationContext combined_validation_context = 8;\n\n    // Certificate provider for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProvider validation_context_certificate_provider = 10;\n\n    // Certificate provider instance for fetching validation context.\n    // [#not-implemented-hide:]\n    CertificateProviderInstance validation_context_certificate_provider_instance = 12;\n  }\n\n  // Supplies the list of ALPN protocols that the listener should expose. In\n  // practice this is likely to be set to one of two values (see the\n  // :ref:`codec_type\n  // <envoy_api_field_extensions.filters.network.http_connection_manager.v4alpha.HttpConnectionManager.codec_type>`\n  // parameter in the HTTP connection manager for more information):\n  //\n  // * \"h2,http/1.1\" If the listener is going to support both HTTP/2 and HTTP/1.1.\n  // * \"http/1.1\" If the listener is only going to support HTTP/1.1.\n  //\n  // There is no default for this parameter. If empty, Envoy will not expose ALPN.\n  repeated string alpn_protocols = 4;\n\n  // Custom TLS handshaker. If empty, defaults to native TLS handshaking\n  // behavior.\n  config.core.v4alpha.TypedExtensionConfig custom_handshaker = 13;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.upstreams.http.generic.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.upstreams.http.generic.v3\";\noption java_outer_classname = \"GenericConnectionPoolProtoOuterClass\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Generic Connection Pool]\n\n// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream,\n// based on CONNECT configuration.\n// [#extension: envoy.upstreams.http.generic]\nmessage GenericConnectionPoolProto {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.upstreams.http.http.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.upstreams.http.http.v3\";\noption java_outer_classname = \"HttpConnectionPoolProtoOuterClass\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Http Connection Pool]\n\n// A connection pool which forwards downstream HTTP as HTTP to upstream.\n// [#extension: envoy.upstreams.http.http]\nmessage HttpConnectionPoolProto {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.upstreams.http.tcp.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3\";\noption java_outer_classname = \"TcpConnectionPoolProtoOuterClass\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tcp Connection Pool]\n\n// A connection pool which forwards downstream HTTP as TCP to upstream,\n// [#extension: envoy.upstreams.http.tcp]\nmessage TcpConnectionPoolProto {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/wasm/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.wasm.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.wasm.v3\";\noption java_outer_classname = \"WasmProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Wasm]\n// [#extension: envoy.bootstrap.wasm]\n\n// Configuration for a Wasm VM.\n// [#next-free-field: 7]\nmessage VmConfig {\n  // An ID which will be used along with a hash of the wasm code (or the name of the registered Null\n  // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same\n  // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can\n  // reduce memory utilization and make sharing of data easier which may have security implications.\n  // See ref: \"TODO: add ref\" for details.\n  string vm_id = 1;\n\n  // The Wasm runtime type (either \"v8\" or \"null\" for code compiled into Envoy).\n  string runtime = 2 [(validate.rules).string = {min_len: 1}];\n\n  // The Wasm code that Envoy will execute.\n  config.core.v3.AsyncDataSource code = 3;\n\n  // The Wasm configuration used in initialization of a new VM\n  // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before\n  // passing it to the plugin. `google.protobuf.BytesValue` and\n  // `google.protobuf.StringValue` are passed directly without the wrapper.\n  google.protobuf.Any configuration = 4;\n\n  // Allow the wasm file to include pre-compiled code on VMs which support it.\n  // Warning: this should only be enable for trusted sources as the precompiled code is not\n  // verified.\n  bool allow_precompiled = 5;\n\n  // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration\n  // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter\n  // warming state.\n  bool nack_on_code_cache_miss = 6;\n}\n\n// Base Configuration for Wasm Plugins e.g. filters and services.\n// [#next-free-field: 6]\nmessage PluginConfig {\n  // A unique name for a filters/services in a VM for use in identifying the filter/service if\n  // multiple filters/services are handled by the same *vm_id* and *root_id* and for\n  // logging/debugging.\n  string name = 1;\n\n  // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts\n  // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all\n  // filters/services with a blank root_id with the same *vm_id* will share Context(s).\n  string root_id = 2;\n\n  // Configuration for finding or starting VM.\n  oneof vm {\n    VmConfig vm_config = 3;\n    // TODO: add referential VM configurations.\n  }\n\n  // Filter/service configuration used to configure or reconfigure a plugin\n  // (proxy_on_configuration).\n  // `google.protobuf.Struct` is serialized as JSON before\n  // passing it to the plugin. `google.protobuf.BytesValue` and\n  // `google.protobuf.StringValue` are passed directly without the wrapper.\n  google.protobuf.Any configuration = 4;\n\n  // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false),\n  // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error,\n  // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false\n  // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial\n  // startup the proxy will not start.\n  bool fail_open = 5;\n}\n\n// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService\n// <config_wasm_service>` This opaque configuration will be used to create a Wasm Service.\nmessage WasmService {\n  // General plugin configuration.\n  PluginConfig config = 1;\n\n  // If true, create a single VM rather than creating one VM per worker. Such a singleton can\n  // not be used with filters.\n  bool singleton = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/watchdog/abort_action/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.watchdog.abort_action.v3alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.watchdog.abort_action.v3alpha\";\noption java_outer_classname = \"AbortActionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Watchdog Action that sends a SIGABRT to kill the process.]\n// [#extension: envoy.watchdog.abort_action]\n\n// A GuardDogAction that will terminate the process by sending SIGABRT to the\n// stuck thread. This would allow easier access to the call stack of the stuck\n// thread since we would run signal handlers on that thread. This would be\n// more useful than the default watchdog kill behaviors since those PANIC\n// from the watchdog's thread.\n\n// This is currently only implemented for systems that support kill to send\n// signals.\nmessage AbortActionConfig {\n  // How long to wait for the thread to respond to the SIGABRT before killing the\n  // process from this action. This is a blocking action.\n  google.protobuf.Duration wait_duration = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.watchdog.profile_action.v3alpha;\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha\";\noption java_outer_classname = \"ProfileActionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).work_in_progress = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Watchdog Action that does CPU profiling.]\n// [#extension: envoy.watchdog.profile_action]\n\n// Configuration for the profile watchdog action.\nmessage ProfileActionConfig {\n  // How long the profile should last. If not set defaults to 5 seconds.\n  google.protobuf.Duration profile_duration = 1;\n\n  // File path to the directory to output profiles.\n  string profile_path = 2 [(validate.rules).string = {min_len: 1}];\n\n  // Limits the max number of profiles that can be generated by this action\n  // over its lifetime to avoid filling the disk.\n  // If not set (i.e. it's 0), a default of 10 will be used.\n  uint64 max_profiles = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/README.md",
    "content": "Protocol buffer definitions for gRPC and REST services.\n\nVisibility should be constrained to none (default).\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/accesslog/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/data/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/accesslog/v2/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.accesslog.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/data/accesslog/v2/accesslog.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.accesslog.v2\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Service for streaming access logs from Envoy to an access log server.\nservice AccessLogService {\n  // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different\n  // API for \"critical\" access logs in which Envoy will buffer access logs for some period of time\n  // until it gets an ACK so it could then retry. This API is designed for high throughput with the\n  // expectation that it might be lossy.\n  rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {\n  }\n}\n\n// Empty response for the StreamAccessLogs API. Will never be sent. See below.\nmessage StreamAccessLogsResponse {\n}\n\n// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream\n// access logs without ever expecting a response.\nmessage StreamAccessLogsMessage {\n  message Identifier {\n    // The node sending the access log messages over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig\n    // <envoy_api_msg_config.accesslog.v2.CommonGrpcAccessLogConfig>`.\n    string log_name = 2 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Wrapper for batches of HTTP access log entries.\n  message HTTPAccessLogEntries {\n    repeated data.accesslog.v2.HTTPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Wrapper for batches of TCP access log entries.\n  message TCPAccessLogEntries {\n    repeated data.accesslog.v2.TCPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batches of log entries of a single type. Generally speaking, a given stream should only\n  // ever include one type of log entry.\n  oneof log_entries {\n    option (validate.required) = true;\n\n    HTTPAccessLogEntries http_logs = 2;\n\n    TCPAccessLogEntries tcp_logs = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/accesslog/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/accesslog/v3:pkg\",\n        \"//envoy/service/accesslog/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/accesslog/v3/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.accesslog.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/accesslog/v3/accesslog.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.accesslog.v3\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Service for streaming access logs from Envoy to an access log server.\nservice AccessLogService {\n  // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different\n  // API for \"critical\" access logs in which Envoy will buffer access logs for some period of time\n  // until it gets an ACK so it could then retry. This API is designed for high throughput with the\n  // expectation that it might be lossy.\n  rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {\n  }\n}\n\n// Empty response for the StreamAccessLogs API. Will never be sent. See below.\nmessage StreamAccessLogsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v2.StreamAccessLogsResponse\";\n}\n\n// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream\n// access logs without ever expecting a response.\nmessage StreamAccessLogsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v2.StreamAccessLogsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig\n    // <envoy_api_msg_extensions.access_loggers.grpc.v3.CommonGrpcAccessLogConfig>`.\n    string log_name = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Wrapper for batches of HTTP access log entries.\n  message HTTPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries\";\n\n    repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Wrapper for batches of TCP access log entries.\n  message TCPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries\";\n\n    repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batches of log entries of a single type. Generally speaking, a given stream should only\n  // ever include one type of log entry.\n  oneof log_entries {\n    option (validate.required) = true;\n\n    HTTPAccessLogEntries http_logs = 2;\n\n    TCPAccessLogEntries tcp_logs = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/accesslog/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/data/accesslog/v3:pkg\",\n        \"//envoy/service/accesslog/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.accesslog.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/data/accesslog/v3/accesslog.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.accesslog.v4alpha\";\noption java_outer_classname = \"AlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC Access Log Service (ALS)]\n\n// Service for streaming access logs from Envoy to an access log server.\nservice AccessLogService {\n  // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different\n  // API for \"critical\" access logs in which Envoy will buffer access logs for some period of time\n  // until it gets an ACK so it could then retry. This API is designed for high throughput with the\n  // expectation that it might be lossy.\n  rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {\n  }\n}\n\n// Empty response for the StreamAccessLogs API. Will never be sent. See below.\nmessage StreamAccessLogsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v3.StreamAccessLogsResponse\";\n}\n\n// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream\n// access logs without ever expecting a response.\nmessage StreamAccessLogsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.accesslog.v3.StreamAccessLogsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v3.StreamAccessLogsMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig\n    // <envoy_api_msg_extensions.access_loggers.grpc.v3.CommonGrpcAccessLogConfig>`.\n    string log_name = 2 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Wrapper for batches of HTTP access log entries.\n  message HTTPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v3.StreamAccessLogsMessage.HTTPAccessLogEntries\";\n\n    repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Wrapper for batches of TCP access log entries.\n  message TCPAccessLogEntries {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.accesslog.v3.StreamAccessLogsMessage.TCPAccessLogEntries\";\n\n    repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1\n        [(validate.rules).repeated = {min_items: 1}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batches of log entries of a single type. Generally speaking, a given stream should only\n  // ever include one type of log entry.\n  oneof log_entries {\n    option (validate.required) = true;\n\n    HTTPAccessLogEntries http_logs = 2;\n\n    TCPAccessLogEntries tcp_logs = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v2/attribute_context.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v2;\n\nimport \"envoy/api/v2/core/address.proto\";\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v2\";\noption java_outer_classname = \"AttributeContextProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Attribute Context ]\n\n// See :ref:`network filter configuration overview <config_network_filters_ext_authz>`\n// and :ref:`HTTP filter configuration overview <config_http_filters_ext_authz>`.\n\n// An attribute is a piece of metadata that describes an activity on a network.\n// For example, the size of an HTTP request, or the status code of an HTTP response.\n//\n// Each attribute has a type and a name, which is logically defined as a proto message field\n// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes\n// supported by Envoy authorization system.\n// [#comment: The following items are left out of this proto\n// Request.Auth field for jwt tokens\n// Request.Api for api management\n// Origin peer that originated the request\n// Caching Protocol\n// request_context return values to inject back into the filter chain\n// peer.claims -- from X.509 extensions\n// Configuration\n// - field mask to send\n// - which return values from request_context are copied back\n// - which return values are copied into request_headers]\n// [#next-free-field: 12]\nmessage AttributeContext {\n  // This message defines attributes for a node that handles a network request.\n  // The node can be either a service or an application that sends, forwards,\n  // or receives the request. Service peers should fill in the `service`,\n  // `principal`, and `labels` as appropriate.\n  // [#next-free-field: 6]\n  message Peer {\n    // The address of the peer, this is typically the IP address.\n    // It can also be UDS path, or others.\n    api.v2.core.Address address = 1;\n\n    // The canonical service name of the peer.\n    // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster\n    // <config_http_conn_man_headers_downstream-service-cluster>`\n    // If a more trusted source of the service name is available through mTLS/secure naming, it\n    // should be used.\n    string service = 2;\n\n    // The labels associated with the peer.\n    // These could be pod labels for Kubernetes or tags for VMs.\n    // The source of the labels could be an X.509 certificate or other configuration.\n    map<string, string> labels = 3;\n\n    // The authenticated identity of this peer.\n    // For example, the identity associated with the workload such as a service account.\n    // If an X.509 certificate is used to assert the identity this field should be sourced from\n    // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.\n    // The primary identity should be the principal. The principal format is issuer specific.\n    //\n    // Example:\n    // *    SPIFFE format is `spiffe://trust-domain/path`\n    // *    Google account format is `https://accounts.google.com/{userid}`\n    string principal = 4;\n\n    // The X.509 certificate used to authenticate the identify of this peer.\n    // When present, the certificate contents are encoded in URL and PEM format.\n    string certificate = 5;\n  }\n\n  // Represents a network request, such as an HTTP request.\n  message Request {\n    // The timestamp when the proxy receives the first byte of the request.\n    google.protobuf.Timestamp time = 1;\n\n    // Represents an HTTP request or an HTTP-like request.\n    HttpRequest http = 2;\n  }\n\n  // This message defines attributes for an HTTP request.\n  // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.\n  // [#next-free-field: 12]\n  message HttpRequest {\n    // The unique ID for a request, which can be propagated to downstream\n    // systems. The ID should have low probability of collision\n    // within a single day for a specific service.\n    // For HTTP requests, it should be X-Request-ID or equivalent.\n    string id = 1;\n\n    // The HTTP request method, such as `GET`, `POST`.\n    string method = 2;\n\n    // The HTTP request headers. If multiple headers share the same key, they\n    // must be merged according to the HTTP spec. All header keys must be\n    // lower-cased, because HTTP header keys are case-insensitive.\n    map<string, string> headers = 3;\n\n    // The request target, as it appears in the first line of the HTTP request. This includes\n    // the URL path and query-string. No decoding is performed.\n    string path = 4;\n\n    // The HTTP request `Host` or 'Authority` header value.\n    string host = 5;\n\n    // The HTTP URL scheme, such as `http` and `https`.\n    string scheme = 6;\n\n    // This field is always empty, and exists for compatibility reasons. The HTTP URL query is\n    // included in `path` field.\n    string query = 7;\n\n    // This field is always empty, and exists for compatibility reasons. The URL fragment is\n    // not submitted as part of HTTP requests; it is unknowable.\n    string fragment = 8;\n\n    // The HTTP request size in bytes. If unknown, it must be -1.\n    int64 size = 9;\n\n    // The network protocol used with the request, such as \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2\".\n    //\n    // See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all\n    // possible values.\n    string protocol = 10;\n\n    // The HTTP request body.\n    string body = 11;\n  }\n\n  // The source of a network activity, such as starting a TCP connection.\n  // In a multi hop network activity, the source represents the sender of the\n  // last hop.\n  Peer source = 1;\n\n  // The destination of a network activity, such as accepting a TCP connection.\n  // In a multi hop network activity, the destination represents the receiver of\n  // the last hop.\n  Peer destination = 2;\n\n  // Represents a network request, such as an HTTP request.\n  Request request = 4;\n\n  // This is analogous to http_request.headers, however these contents will not be sent to the\n  // upstream server. Context_extensions provide an extension mechanism for sending additional\n  // information to the auth server without modifying the proto definition. It maps to the\n  // internal opaque context in the filter chain.\n  map<string, string> context_extensions = 10;\n\n  // Dynamic metadata associated with the request.\n  api.v2.core.Metadata metadata_context = 11;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v2/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/service/auth/v2/attribute_context.proto\";\nimport \"envoy/type/http_status.proto\";\n\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v2\";\noption java_outer_classname = \"ExternalAuthProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(CheckRequest) returns (CheckResponse) {\n  }\n}\n\nmessage CheckRequest {\n  // The request attributes.\n  AttributeContext attributes = 1;\n}\n\n// HTTP attributes for a denied response.\nmessage DeniedHttpResponse {\n  // This field allows the authorization service to send a HTTP response status\n  // code to the downstream client other than 403 (Forbidden).\n  type.HttpStatus status = 1 [(validate.rules).message = {required: true}];\n\n  // This field allows the authorization service to send HTTP response headers\n  // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message.\n  repeated api.v2.core.HeaderValueOption headers = 2;\n\n  // This field allows the authorization service to send a response body data\n  // to the downstream client.\n  string body = 3;\n}\n\n// HTTP attributes for an ok response.\nmessage OkHttpResponse {\n  // HTTP entity headers in addition to the original request headers. This allows the authorization\n  // service to append, to add or to override headers from the original request before\n  // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message. By setting the `append` field to `true`,\n  // the filter will append the correspondent header value to the matched request header.\n  // By leaving `append` as false, the filter will either add a new header, or override an existing\n  // one if there is a match.\n  repeated api.v2.core.HeaderValueOption headers = 2;\n}\n\n// Intended for gRPC and Network Authorization servers `only`.\nmessage CheckResponse {\n  // Status `OK` allows the request. Any other status indicates the request should be denied.\n  google.rpc.Status status = 1;\n\n  // An message that contains HTTP response attributes. This message is\n  // used when the authorization service needs to send custom responses to the\n  // downstream client or, to modify/add request headers being dispatched to the upstream.\n  oneof http_response {\n    // Supplies http attributes for a denied response.\n    DeniedHttpResponse denied_response = 2;\n\n    // Supplies http attributes for an ok response.\n    OkHttpResponse ok_response = 3;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v2alpha/BUILD",
    "content": "load(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\n# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\napi_proto_package(\n    has_services = True,\n    deps = [\"//envoy/service/auth/v2:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v2alpha;\n\noption java_multiple_files = true;\noption java_generic_services = true;\noption java_outer_classname = \"CertsProto\";\noption java_package = \"io.envoyproxy.envoy.service.auth.v2alpha\";\n\nimport \"envoy/service/auth/v2/external_auth.proto\";\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(v2.CheckRequest) returns (v2.CheckResponse);\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/auth/v2:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v3/attribute_context.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v3;\n\nimport \"envoy/config/core/v3/address.proto\";\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v3\";\noption java_outer_classname = \"AttributeContextProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Attribute Context ]\n\n// See :ref:`network filter configuration overview <config_network_filters_ext_authz>`\n// and :ref:`HTTP filter configuration overview <config_http_filters_ext_authz>`.\n\n// An attribute is a piece of metadata that describes an activity on a network.\n// For example, the size of an HTTP request, or the status code of an HTTP response.\n//\n// Each attribute has a type and a name, which is logically defined as a proto message field\n// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes\n// supported by Envoy authorization system.\n// [#comment: The following items are left out of this proto\n// Request.Auth field for jwt tokens\n// Request.Api for api management\n// Origin peer that originated the request\n// Caching Protocol\n// request_context return values to inject back into the filter chain\n// peer.claims -- from X.509 extensions\n// Configuration\n// - field mask to send\n// - which return values from request_context are copied back\n// - which return values are copied into request_headers]\n// [#next-free-field: 12]\nmessage AttributeContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.AttributeContext\";\n\n  // This message defines attributes for a node that handles a network request.\n  // The node can be either a service or an application that sends, forwards,\n  // or receives the request. Service peers should fill in the `service`,\n  // `principal`, and `labels` as appropriate.\n  // [#next-free-field: 6]\n  message Peer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v2.AttributeContext.Peer\";\n\n    // The address of the peer, this is typically the IP address.\n    // It can also be UDS path, or others.\n    config.core.v3.Address address = 1;\n\n    // The canonical service name of the peer.\n    // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster\n    // <config_http_conn_man_headers_downstream-service-cluster>`\n    // If a more trusted source of the service name is available through mTLS/secure naming, it\n    // should be used.\n    string service = 2;\n\n    // The labels associated with the peer.\n    // These could be pod labels for Kubernetes or tags for VMs.\n    // The source of the labels could be an X.509 certificate or other configuration.\n    map<string, string> labels = 3;\n\n    // The authenticated identity of this peer.\n    // For example, the identity associated with the workload such as a service account.\n    // If an X.509 certificate is used to assert the identity this field should be sourced from\n    // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.\n    // The primary identity should be the principal. The principal format is issuer specific.\n    //\n    // Example:\n    // *    SPIFFE format is `spiffe://trust-domain/path`\n    // *    Google account format is `https://accounts.google.com/{userid}`\n    string principal = 4;\n\n    // The X.509 certificate used to authenticate the identify of this peer.\n    // When present, the certificate contents are encoded in URL and PEM format.\n    string certificate = 5;\n  }\n\n  // Represents a network request, such as an HTTP request.\n  message Request {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v2.AttributeContext.Request\";\n\n    // The timestamp when the proxy receives the first byte of the request.\n    google.protobuf.Timestamp time = 1;\n\n    // Represents an HTTP request or an HTTP-like request.\n    HttpRequest http = 2;\n  }\n\n  // This message defines attributes for an HTTP request.\n  // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.\n  // [#next-free-field: 13]\n  message HttpRequest {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v2.AttributeContext.HttpRequest\";\n\n    // The unique ID for a request, which can be propagated to downstream\n    // systems. The ID should have low probability of collision\n    // within a single day for a specific service.\n    // For HTTP requests, it should be X-Request-ID or equivalent.\n    string id = 1;\n\n    // The HTTP request method, such as `GET`, `POST`.\n    string method = 2;\n\n    // The HTTP request headers. If multiple headers share the same key, they\n    // must be merged according to the HTTP spec. All header keys must be\n    // lower-cased, because HTTP header keys are case-insensitive.\n    map<string, string> headers = 3;\n\n    // The request target, as it appears in the first line of the HTTP request. This includes\n    // the URL path and query-string. No decoding is performed.\n    string path = 4;\n\n    // The HTTP request `Host` or 'Authority` header value.\n    string host = 5;\n\n    // The HTTP URL scheme, such as `http` and `https`.\n    string scheme = 6;\n\n    // This field is always empty, and exists for compatibility reasons. The HTTP URL query is\n    // included in `path` field.\n    string query = 7;\n\n    // This field is always empty, and exists for compatibility reasons. The URL fragment is\n    // not submitted as part of HTTP requests; it is unknowable.\n    string fragment = 8;\n\n    // The HTTP request size in bytes. If unknown, it must be -1.\n    int64 size = 9;\n\n    // The network protocol used with the request, such as \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2\".\n    //\n    // See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all\n    // possible values.\n    string protocol = 10;\n\n    // The HTTP request body.\n    string body = 11;\n\n    // The HTTP request body in bytes. This is used instead of\n    // :ref:`body <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` when\n    // :ref:`pack_as_bytes <envoy_api_field_extensions.filters.http.ext_authz.v3.BufferSettings.pack_as_bytes>`\n    // is set to true.\n    bytes raw_body = 12;\n  }\n\n  // The source of a network activity, such as starting a TCP connection.\n  // In a multi hop network activity, the source represents the sender of the\n  // last hop.\n  Peer source = 1;\n\n  // The destination of a network activity, such as accepting a TCP connection.\n  // In a multi hop network activity, the destination represents the receiver of\n  // the last hop.\n  Peer destination = 2;\n\n  // Represents a network request, such as an HTTP request.\n  Request request = 4;\n\n  // This is analogous to http_request.headers, however these contents will not be sent to the\n  // upstream server. Context_extensions provide an extension mechanism for sending additional\n  // information to the auth server without modifying the proto definition. It maps to the\n  // internal opaque context in the filter chain.\n  map<string, string> context_extensions = 10;\n\n  // Dynamic metadata associated with the request.\n  config.core.v3.Metadata metadata_context = 11;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v3/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/service/auth/v3/attribute_context.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"google/protobuf/struct.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v3\";\noption java_outer_classname = \"ExternalAuthProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(CheckRequest) returns (CheckResponse) {\n  }\n}\n\nmessage CheckRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.auth.v2.CheckRequest\";\n\n  // The request attributes.\n  AttributeContext attributes = 1;\n}\n\n// HTTP attributes for a denied response.\nmessage DeniedHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.DeniedHttpResponse\";\n\n  // This field allows the authorization service to send a HTTP response status\n  // code to the downstream client other than 403 (Forbidden).\n  type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];\n\n  // This field allows the authorization service to send HTTP response headers\n  // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message.\n  repeated config.core.v3.HeaderValueOption headers = 2;\n\n  // This field allows the authorization service to send a response body data\n  // to the downstream client.\n  string body = 3;\n}\n\n// HTTP attributes for an OK response.\n// [#next-free-field: 6]\nmessage OkHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.OkHttpResponse\";\n\n  // HTTP entity headers in addition to the original request headers. This allows the authorization\n  // service to append, to add or to override headers from the original request before\n  // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message. By setting the `append` field to `true`,\n  // the filter will append the correspondent header value to the matched request header.\n  // By leaving `append` as false, the filter will either add a new header, or override an existing\n  // one if there is a match.\n  repeated config.core.v3.HeaderValueOption headers = 2;\n\n  // HTTP entity headers to remove from the original request before dispatching\n  // it to the upstream. This allows the authorization service to act on auth\n  // related headers (like `Authorization`), process them, and consume them.\n  // Under this model, the upstream will either receive the request (if it's\n  // authorized) or not receive it (if it's not), but will not see headers\n  // containing authorization credentials.\n  //\n  // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as\n  // the header `Host`, may not be removed as that would make the request\n  // malformed. If mentioned in `headers_to_remove` these special headers will\n  // be ignored.\n  //\n  // When using the HTTP service this must instead be set by the HTTP\n  // authorization service as a comma separated list like so:\n  // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``.\n  repeated string headers_to_remove = 5;\n\n  // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata\n  // <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>`. Until it is removed,\n  // setting this field overrides :ref:`CheckResponse.dynamic_metadata\n  // <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>`.\n  google.protobuf.Struct dynamic_metadata = 3 [deprecated = true];\n}\n\n// Intended for gRPC and Network Authorization servers `only`.\nmessage CheckResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v2.CheckResponse\";\n\n  // Status `OK` allows the request. Any other status indicates the request should be denied.\n  google.rpc.Status status = 1;\n\n  // An message that contains HTTP response attributes. This message is\n  // used when the authorization service needs to send custom responses to the\n  // downstream client or, to modify/add request headers being dispatched to the upstream.\n  oneof http_response {\n    // Supplies http attributes for a denied response.\n    DeniedHttpResponse denied_response = 2;\n\n    // Supplies http attributes for an ok response.\n    OkHttpResponse ok_response = 3;\n  }\n\n  // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next\n  // filter. This metadata lives in a namespace specified by the canonical name of extension filter\n  // that requires it:\n  //\n  // - :ref:`envoy.filters.http.ext_authz <config_http_filters_ext_authz_dynamic_metadata>` for HTTP filter.\n  // - :ref:`envoy.filters.network.ext_authz <config_network_filters_ext_authz_dynamic_metadata>` for network filter.\n  google.protobuf.Struct dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/auth/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v4alpha;\n\nimport \"envoy/config/core/v4alpha/address.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v4alpha\";\noption java_outer_classname = \"AttributeContextProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Attribute Context ]\n\n// See :ref:`network filter configuration overview <config_network_filters_ext_authz>`\n// and :ref:`HTTP filter configuration overview <config_http_filters_ext_authz>`.\n\n// An attribute is a piece of metadata that describes an activity on a network.\n// For example, the size of an HTTP request, or the status code of an HTTP response.\n//\n// Each attribute has a type and a name, which is logically defined as a proto message field\n// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes\n// supported by Envoy authorization system.\n// [#comment: The following items are left out of this proto\n// Request.Auth field for jwt tokens\n// Request.Api for api management\n// Origin peer that originated the request\n// Caching Protocol\n// request_context return values to inject back into the filter chain\n// peer.claims -- from X.509 extensions\n// Configuration\n// - field mask to send\n// - which return values from request_context are copied back\n// - which return values are copied into request_headers]\n// [#next-free-field: 12]\nmessage AttributeContext {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.AttributeContext\";\n\n  // This message defines attributes for a node that handles a network request.\n  // The node can be either a service or an application that sends, forwards,\n  // or receives the request. Service peers should fill in the `service`,\n  // `principal`, and `labels` as appropriate.\n  // [#next-free-field: 6]\n  message Peer {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v3.AttributeContext.Peer\";\n\n    // The address of the peer, this is typically the IP address.\n    // It can also be UDS path, or others.\n    config.core.v4alpha.Address address = 1;\n\n    // The canonical service name of the peer.\n    // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster\n    // <config_http_conn_man_headers_downstream-service-cluster>`\n    // If a more trusted source of the service name is available through mTLS/secure naming, it\n    // should be used.\n    string service = 2;\n\n    // The labels associated with the peer.\n    // These could be pod labels for Kubernetes or tags for VMs.\n    // The source of the labels could be an X.509 certificate or other configuration.\n    map<string, string> labels = 3;\n\n    // The authenticated identity of this peer.\n    // For example, the identity associated with the workload such as a service account.\n    // If an X.509 certificate is used to assert the identity this field should be sourced from\n    // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.\n    // The primary identity should be the principal. The principal format is issuer specific.\n    //\n    // Example:\n    // *    SPIFFE format is `spiffe://trust-domain/path`\n    // *    Google account format is `https://accounts.google.com/{userid}`\n    string principal = 4;\n\n    // The X.509 certificate used to authenticate the identify of this peer.\n    // When present, the certificate contents are encoded in URL and PEM format.\n    string certificate = 5;\n  }\n\n  // Represents a network request, such as an HTTP request.\n  message Request {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v3.AttributeContext.Request\";\n\n    // The timestamp when the proxy receives the first byte of the request.\n    google.protobuf.Timestamp time = 1;\n\n    // Represents an HTTP request or an HTTP-like request.\n    HttpRequest http = 2;\n  }\n\n  // This message defines attributes for an HTTP request.\n  // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.\n  // [#next-free-field: 13]\n  message HttpRequest {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.auth.v3.AttributeContext.HttpRequest\";\n\n    // The unique ID for a request, which can be propagated to downstream\n    // systems. The ID should have low probability of collision\n    // within a single day for a specific service.\n    // For HTTP requests, it should be X-Request-ID or equivalent.\n    string id = 1;\n\n    // The HTTP request method, such as `GET`, `POST`.\n    string method = 2;\n\n    // The HTTP request headers. If multiple headers share the same key, they\n    // must be merged according to the HTTP spec. All header keys must be\n    // lower-cased, because HTTP header keys are case-insensitive.\n    map<string, string> headers = 3;\n\n    // The request target, as it appears in the first line of the HTTP request. This includes\n    // the URL path and query-string. No decoding is performed.\n    string path = 4;\n\n    // The HTTP request `Host` or 'Authority` header value.\n    string host = 5;\n\n    // The HTTP URL scheme, such as `http` and `https`.\n    string scheme = 6;\n\n    // This field is always empty, and exists for compatibility reasons. The HTTP URL query is\n    // included in `path` field.\n    string query = 7;\n\n    // This field is always empty, and exists for compatibility reasons. The URL fragment is\n    // not submitted as part of HTTP requests; it is unknowable.\n    string fragment = 8;\n\n    // The HTTP request size in bytes. If unknown, it must be -1.\n    int64 size = 9;\n\n    // The network protocol used with the request, such as \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2\".\n    //\n    // See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all\n    // possible values.\n    string protocol = 10;\n\n    // The HTTP request body.\n    string body = 11;\n\n    // The HTTP request body in bytes. This is used instead of\n    // :ref:`body <envoy_v3_api_field_service.auth.v3.AttributeContext.HttpRequest.body>` when\n    // :ref:`pack_as_bytes <envoy_api_field_extensions.filters.http.ext_authz.v4alpha.BufferSettings.pack_as_bytes>`\n    // is set to true.\n    bytes raw_body = 12;\n  }\n\n  // The source of a network activity, such as starting a TCP connection.\n  // In a multi hop network activity, the source represents the sender of the\n  // last hop.\n  Peer source = 1;\n\n  // The destination of a network activity, such as accepting a TCP connection.\n  // In a multi hop network activity, the destination represents the receiver of\n  // the last hop.\n  Peer destination = 2;\n\n  // Represents a network request, such as an HTTP request.\n  Request request = 4;\n\n  // This is analogous to http_request.headers, however these contents will not be sent to the\n  // upstream server. Context_extensions provide an extension mechanism for sending additional\n  // information to the auth server without modifying the proto definition. It maps to the\n  // internal opaque context in the filter chain.\n  map<string, string> context_extensions = 10;\n\n  // Dynamic metadata associated with the request.\n  config.core.v4alpha.Metadata metadata_context = 11;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.auth.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/service/auth/v4alpha/attribute_context.proto\";\nimport \"envoy/type/v3/http_status.proto\";\n\nimport \"google/protobuf/struct.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.auth.v4alpha\";\noption java_outer_classname = \"ExternalAuthProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Authorization Service ]\n\n// The authorization service request messages used by external authorization :ref:`network filter\n// <config_network_filters_ext_authz>` and :ref:`HTTP filter <config_http_filters_ext_authz>`.\n\n// A generic interface for performing authorization check on incoming\n// requests to a networked service.\nservice Authorization {\n  // Performs authorization check based on the attributes associated with the\n  // incoming request, and returns status `OK` or not `OK`.\n  rpc Check(CheckRequest) returns (CheckResponse) {\n  }\n}\n\nmessage CheckRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.auth.v3.CheckRequest\";\n\n  // The request attributes.\n  AttributeContext attributes = 1;\n}\n\n// HTTP attributes for a denied response.\nmessage DeniedHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.DeniedHttpResponse\";\n\n  // This field allows the authorization service to send a HTTP response status\n  // code to the downstream client other than 403 (Forbidden).\n  type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];\n\n  // This field allows the authorization service to send HTTP response headers\n  // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message.\n  repeated config.core.v4alpha.HeaderValueOption headers = 2;\n\n  // This field allows the authorization service to send a response body data\n  // to the downstream client.\n  string body = 3;\n}\n\n// HTTP attributes for an OK response.\n// [#next-free-field: 6]\nmessage OkHttpResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.OkHttpResponse\";\n\n  // HTTP entity headers in addition to the original request headers. This allows the authorization\n  // service to append, to add or to override headers from the original request before\n  // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to\n  // false when used in this message. By setting the `append` field to `true`,\n  // the filter will append the correspondent header value to the matched request header.\n  // By leaving `append` as false, the filter will either add a new header, or override an existing\n  // one if there is a match.\n  repeated config.core.v4alpha.HeaderValueOption headers = 2;\n\n  // HTTP entity headers to remove from the original request before dispatching\n  // it to the upstream. This allows the authorization service to act on auth\n  // related headers (like `Authorization`), process them, and consume them.\n  // Under this model, the upstream will either receive the request (if it's\n  // authorized) or not receive it (if it's not), but will not see headers\n  // containing authorization credentials.\n  //\n  // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as\n  // the header `Host`, may not be removed as that would make the request\n  // malformed. If mentioned in `headers_to_remove` these special headers will\n  // be ignored.\n  //\n  // When using the HTTP service this must instead be set by the HTTP\n  // authorization service as a comma separated list like so:\n  // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``.\n  repeated string headers_to_remove = 5;\n\n  // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata\n  // <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>`. Until it is removed,\n  // setting this field overrides :ref:`CheckResponse.dynamic_metadata\n  // <envoy_v3_api_field_service.auth.v3.CheckResponse.dynamic_metadata>`.\n  google.protobuf.Struct hidden_envoy_deprecated_dynamic_metadata = 3 [deprecated = true];\n}\n\n// Intended for gRPC and Network Authorization servers `only`.\nmessage CheckResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.auth.v3.CheckResponse\";\n\n  // Status `OK` allows the request. Any other status indicates the request should be denied.\n  google.rpc.Status status = 1;\n\n  // An message that contains HTTP response attributes. This message is\n  // used when the authorization service needs to send custom responses to the\n  // downstream client or, to modify/add request headers being dispatched to the upstream.\n  oneof http_response {\n    // Supplies http attributes for a denied response.\n    DeniedHttpResponse denied_response = 2;\n\n    // Supplies http attributes for an ok response.\n    OkHttpResponse ok_response = 3;\n  }\n\n  // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next\n  // filter. This metadata lives in a namespace specified by the canonical name of extension filter\n  // that requires it:\n  //\n  // - :ref:`envoy.filters.http.ext_authz <config_http_filters_ext_authz_dynamic_metadata>` for HTTP filter.\n  // - :ref:`envoy.filters.network.ext_authz <config_network_filters_ext_authz_dynamic_metadata>` for network filter.\n  google.protobuf.Struct dynamic_metadata = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/cluster/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/cluster/v3/cds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.cluster.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.cluster.v3\";\noption java_outer_classname = \"CdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: CDS]\n\n// Return list of all clusters this proxy will load balance to.\nservice ClusterDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.cluster.v3.Cluster\";\n\n  rpc StreamClusters(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaClusters(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchClusters(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:clusters\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage CdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.CdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v2/ads.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"AdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Aggregated Discovery Service (ADS)]\n\n// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes,\n// and listeners are retained in the package `envoy.api.v2` for backwards\n// compatibility with existing management servers. New development in discovery\n// services should proceed in the package `envoy.service.discovery.v2`.\n\n// See https://github.com/lyft/envoy-api#apis for a description of the role of\n// ADS and how it is intended to be used by a management server. ADS requests\n// have the same structure as their singleton xDS counterparts, but can\n// multiplex many resource types on a single stream. The type_url in the\n// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover\n// the multiplexed singleton APIs at the Envoy instance and management server.\nservice AggregatedDiscoveryService {\n  // This is a gRPC-only API.\n  rpc StreamAggregatedResources(stream api.v2.DiscoveryRequest)\n      returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaAggregatedResources(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage AdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v2/hds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/health_check.proto\";\nimport \"envoy/api/v2/endpoint/endpoint_components.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"HdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.health.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Health Discovery Service (HDS)]\n\n// HDS is Health Discovery Service. It compliments Envoy’s health checking\n// service by designating this Envoy to be a healthchecker for a subset of hosts\n// in the cluster. The status of these health checks will be reported to the\n// management server, where it can be aggregated etc and redistributed back to\n// Envoy through EDS.\nservice HealthDiscoveryService {\n  // 1. Envoy starts up and if its can_healthcheck option in the static\n  //    bootstrap config is enabled, sends HealthCheckRequest to the management\n  //    server. It supplies its capabilities (which protocol it can health check\n  //    with, what zone it resides in, etc.).\n  // 2. In response to (1), the management server designates this Envoy as a\n  //    healthchecker to health check a subset of all upstream hosts for a given\n  //    cluster (for example upstream Host 1 and Host 2). It streams\n  //    HealthCheckSpecifier messages with cluster related configuration for all\n  //    clusters this Envoy is designated to health check. Subsequent\n  //    HealthCheckSpecifier message will be sent on changes to:\n  //    a. Endpoints to health checks\n  //    b. Per cluster configuration change\n  // 3. Envoy creates a health probe based on the HealthCheck config and sends\n  //    it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck\n  //    configuration Envoy waits upon the arrival of the probe response and\n  //    looks at the content of the response to decide whether the endpoint is\n  //    healthy or not. If a response hasn't been received within the timeout\n  //    interval, the endpoint health status is considered TIMEOUT.\n  // 4. Envoy reports results back in an EndpointHealthResponse message.\n  //    Envoy streams responses as often as the interval configured by the\n  //    management server in HealthCheckSpecifier.\n  // 5. The management Server collects health statuses for all endpoints in the\n  //    cluster (for all clusters) and uses this information to construct\n  //    EndpointDiscoveryResponse messages.\n  // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load\n  //    balances traffic to them without additional health checking. It may\n  //    use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection\n  //    failed to a particular endpoint to account for health status propagation\n  //    delay between HDS and EDS).\n  // By default, can_healthcheck is true. If can_healthcheck is false, Cluster\n  // configuration may not contain HealthCheck message.\n  // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above\n  // invariant?\n  // TODO(htuch): Add @amb67's diagram.\n  rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse)\n      returns (stream HealthCheckSpecifier) {\n  }\n\n  // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of\n  // request/response. Should we add an identifier to the HealthCheckSpecifier\n  // to bind with the response?\n  rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {\n    option (google.api.http).post = \"/v2/discovery:health_check\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Defines supported protocols etc, so the management server can assign proper\n// endpoints to healthcheck.\nmessage Capability {\n  // Different Envoy instances may have different capabilities (e.g. Redis)\n  // and/or have ports enabled for different protocols.\n  enum Protocol {\n    HTTP = 0;\n    TCP = 1;\n    REDIS = 2;\n  }\n\n  repeated Protocol health_check_protocols = 1;\n}\n\nmessage HealthCheckRequest {\n  api.v2.core.Node node = 1;\n\n  Capability capability = 2;\n}\n\nmessage EndpointHealth {\n  api.v2.endpoint.Endpoint endpoint = 1;\n\n  api.v2.core.HealthStatus health_status = 2;\n}\n\nmessage EndpointHealthResponse {\n  repeated EndpointHealth endpoints_health = 1;\n}\n\nmessage HealthCheckRequestOrEndpointHealthResponse {\n  oneof request_type {\n    HealthCheckRequest health_check_request = 1;\n\n    EndpointHealthResponse endpoint_health_response = 2;\n  }\n}\n\nmessage LocalityEndpoints {\n  api.v2.core.Locality locality = 1;\n\n  repeated api.v2.endpoint.Endpoint endpoints = 2;\n}\n\n// The cluster name and locality is provided to Envoy for the endpoints that it\n// health checks to support statistics reporting, logging and debugging by the\n// Envoy instance (outside of HDS). For maximum usefulness, it should match the\n// same cluster structure as that provided by EDS.\nmessage ClusterHealthCheck {\n  string cluster_name = 1;\n\n  repeated api.v2.core.HealthCheck health_checks = 2;\n\n  repeated LocalityEndpoints locality_endpoints = 3;\n}\n\nmessage HealthCheckSpecifier {\n  repeated ClusterHealthCheck cluster_health_checks = 1;\n\n  // The default is 1 second.\n  google.protobuf.Duration interval = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v2/rtds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"RtdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.runtime.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Runtime Discovery Service (RTDS)]\n// RTDS :ref:`configuration overview <config_runtime_rtds>`\n\n// Discovery service for Runtime resources.\nservice RuntimeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.service.discovery.v2.Runtime\";\n\n  rpc StreamRuntime(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaRuntime(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:runtime\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage RtdsDummy {\n}\n\n// RTDS resource type. This describes a layer in the runtime virtual filesystem.\nmessage Runtime {\n  // Runtime resource name. This makes the Runtime a self-describing xDS\n  // resource.\n  string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  google.protobuf.Struct layer = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v2/sds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v2\";\noption java_outer_classname = \"SdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.secret.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Secret Discovery Service (SDS)]\n\nservice SecretDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.api.v2.auth.Secret\";\n\n  rpc DeltaSecrets(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamSecrets(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc FetchSecrets(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:secrets\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage SdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v3/ads.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v3\";\noption java_outer_classname = \"AdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Aggregated Discovery Service (ADS)]\n\n// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes,\n// and listeners are retained in the package `envoy.api.v2` for backwards\n// compatibility with existing management servers. New development in discovery\n// services should proceed in the package `envoy.service.discovery.v2`.\n\n// See https://github.com/lyft/envoy-api#apis for a description of the role of\n// ADS and how it is intended to be used by a management server. ADS requests\n// have the same structure as their singleton xDS counterparts, but can\n// multiplex many resource types on a single stream. The type_url in the\n// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover\n// the multiplexed singleton APIs at the Envoy instance and management server.\nservice AggregatedDiscoveryService {\n  // This is a gRPC-only API.\n  rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest)\n      returns (stream DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage AdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.AdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v3/discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\nimport \"udpa/core/v1/resource_name.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v3\";\noption java_outer_classname = \"DiscoveryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Common discovery API components]\n\n// A DiscoveryRequest requests a set of versioned resources of the same type for\n// a given Envoy node on some API.\n// [#next-free-field: 7]\nmessage DiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.DiscoveryRequest\";\n\n  // The version_info provided in the request messages will be the version_info\n  // received with the most recent successfully processed response or empty on\n  // the first request. It is expected that no new request is sent after a\n  // response is received until the Envoy instance is ready to ACK/NACK the new\n  // configuration. ACK/NACK takes place by returning the new API config version\n  // as applied or the previous API config version respectively. Each type_url\n  // (see below) has an independent version associated with it.\n  string version_info = 1;\n\n  // The node making the request.\n  config.core.v3.Node node = 2;\n\n  // List of resources to subscribe to, e.g. list of cluster names or a route\n  // configuration name. If this is empty, all resources for the API are\n  // returned. LDS/CDS may have empty resource_names, which will cause all\n  // resources for the Envoy instance to be returned. The LDS and CDS responses\n  // will then imply a number of resources that need to be fetched via EDS/RDS,\n  // which will be explicitly enumerated in resource_names.\n  repeated string resource_names = 3;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This is implicit\n  // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is\n  // required for ADS.\n  string type_url = 4;\n\n  // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above\n  // discussion on version_info and the DiscoveryResponse nonce comment. This\n  // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP,\n  // or 2) the client has not yet accepted an update in this xDS stream (unlike\n  // delta, where it is populated only for new explicit ACKs).\n  string response_nonce = 5;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v3.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details* provides the Envoy\n  // internal exception related to the failure. It is only intended for consumption during manual\n  // debugging, the string provided is not guaranteed to be stable across Envoy versions.\n  google.rpc.Status error_detail = 6;\n}\n\n// [#next-free-field: 7]\nmessage DiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.DiscoveryResponse\";\n\n  // The version of the response data.\n  string version_info = 1;\n\n  // The response resources. These resources are typed and depend on the API being called.\n  repeated google.protobuf.Any resources = 2;\n\n  // [#not-implemented-hide:]\n  // Canary is used to support two Envoy command line flags:\n  //\n  // * --terminate-on-canary-transition-failure. When set, Envoy is able to\n  //   terminate if it detects that configuration is stuck at canary. Consider\n  //   this example sequence of updates:\n  //   - Management server applies a canary config successfully.\n  //   - Management server rolls back to a production config.\n  //   - Envoy rejects the new production config.\n  //   Since there is no sensible way to continue receiving configuration\n  //   updates, Envoy will then terminate and apply production config from a\n  //   clean slate.\n  // * --dry-run-canary. When set, a canary response will never be applied, only\n  //   validated via a dry run.\n  bool canary = 3;\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty).\n  string type_url = 4;\n\n  // For gRPC based subscriptions, the nonce provides a way to explicitly ack a\n  // specific DiscoveryResponse in a following DiscoveryRequest. Additional\n  // messages may have been sent by Envoy to the management server for the\n  // previous version on the stream prior to this DiscoveryResponse, that were\n  // unprocessed at response send time. The nonce allows the management server\n  // to ignore any further DiscoveryRequests for the previous version until a\n  // DiscoveryRequest bearing the nonce. The nonce is optional and is not\n  // required for non-stream based xDS implementations.\n  string nonce = 5;\n\n  // [#not-implemented-hide:]\n  // The control plane instance that sent the response.\n  config.core.v3.ControlPlane control_plane = 6;\n}\n\n// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC\n// endpoint for Delta xDS.\n//\n// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full\n// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a\n// diff to the state of a xDS client.\n// In Delta XDS there are per-resource versions, which allow tracking state at\n// the resource granularity.\n// An xDS Delta session is always in the context of a gRPC bidirectional\n// stream. This allows the xDS server to keep track of the state of xDS clients\n// connected to it.\n//\n// In Delta xDS the nonce field is required and used to pair\n// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK.\n// Optionally, a response message level system_version_info is present for\n// debugging purposes only.\n//\n// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest\n// can be either or both of: [1] informing the server of what resources the\n// client has gained/lost interest in (using resource_names_subscribe and\n// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from\n// the server (using response_nonce, with presence of error_detail making it a NACK).\n// Additionally, the first message (for a given type_url) of a reconnected gRPC stream\n// has a third role: informing the server of the resources (and their versions)\n// that the client already possesses, using the initial_resource_versions field.\n//\n// As with state-of-the-world, when multiple resource types are multiplexed (ADS),\n// all requests/acknowledgments/updates are logically walled off by type_url:\n// a Cluster ACK exists in a completely separate world from a prior Route NACK.\n// In particular, initial_resource_versions being sent at the \"start\" of every\n// gRPC stream actually entails a message for each type_url, each with its own\n// initial_resource_versions.\n// [#next-free-field: 10]\nmessage DeltaDiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.DeltaDiscoveryRequest\";\n\n  // The node making the request.\n  config.core.v3.Node node = 1;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This does not need to be set if\n  // resources are only referenced via *udpa_resource_subscribe* and\n  // *udpa_resources_unsubscribe*.\n  string type_url = 2;\n\n  // DeltaDiscoveryRequests allow the client to add or remove individual\n  // resources to the set of tracked resources in the context of a stream.\n  // All resource names in the resource_names_subscribe list are added to the\n  // set of tracked resources and all resource names in the resource_names_unsubscribe\n  // list are removed from the set of tracked resources.\n  //\n  // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or\n  // resource_names_unsubscribe list simply means that no resources are to be\n  // added or removed to the resource list.\n  // *Like* state-of-the-world xDS, the server must send updates for all tracked\n  // resources, but can also send updates for resources the client has not subscribed to.\n  //\n  // NOTE: the server must respond with all resources listed in resource_names_subscribe,\n  // even if it believes the client has the most recent version of them. The reason:\n  // the client may have dropped them, but then regained interest before it had a chance\n  // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd.\n  //\n  // These two fields can be set in any DeltaDiscoveryRequest, including ACKs\n  // and initial_resource_versions.\n  //\n  // A list of Resource names to add to the list of tracked resources.\n  repeated string resource_names_subscribe = 3;\n\n  // As with *resource_names_subscribe* but used when subscribing to resources indicated\n  // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator\n  // are ignored and the context parameters are matched with\n  // *context_param_specifier* specific semantics.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8;\n\n  // A list of Resource names to remove from the list of tracked resources.\n  repeated string resource_names_unsubscribe = 4;\n\n  // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a\n  // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed\n  // resource locator provided in *udpa_resources_subscribe*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9;\n\n  // Informs the server of the versions of the resources the xDS client knows of, to enable the\n  // client to continue the same logical xDS session even in the face of gRPC stream reconnection.\n  // It will not be populated: [1] in the very first stream of a session, since the client will\n  // not yet have any resources,  [2] in any message after the first in a stream (for a given\n  // type_url), since the server will already be correctly tracking the client's state.\n  // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.)\n  // The map's keys are names of xDS resources known to the xDS client.\n  // The map's values are opaque resource versions.\n  map<string, string> initial_resource_versions = 5;\n\n  // When the DeltaDiscoveryRequest is a ACK or NACK message in response\n  // to a previous DeltaDiscoveryResponse, the response_nonce must be the\n  // nonce in the DeltaDiscoveryResponse.\n  // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted.\n  string response_nonce = 6;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v3.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details*\n  // provides the Envoy internal exception related to the failure.\n  google.rpc.Status error_detail = 7;\n}\n\n// [#next-free-field: 8]\nmessage DeltaDiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.api.v2.DeltaDiscoveryResponse\";\n\n  // The version of the response data (used for debugging).\n  string system_version_info = 1;\n\n  // The response resources. These are typed resources, whose types must match\n  // the type_url field.\n  repeated Resource resources = 2;\n\n  // field id 3 IS available!\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty.\n  // This does not need to be set if *udpa_removed_resources* is used instead of\n  // *removed_resources*.\n  string type_url = 4;\n\n  // Resources names of resources that have be deleted and to be removed from the xDS Client.\n  // Removed resources for missing resources can be ignored.\n  repeated string removed_resources = 6;\n\n  // As with *removed_resources* but used when a removed resource was named in\n  // its *Resource*s with a *udpa.core.v1.ResourceName*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceName udpa_removed_resources = 7;\n\n  // The nonce provides a way for DeltaDiscoveryRequests to uniquely\n  // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required.\n  string nonce = 5;\n}\n\n// [#next-free-field: 6]\nmessage Resource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.Resource\";\n\n  // The resource's name, to distinguish it from others of the same type of resource.\n  string name = 3 [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered.\n  udpa.core.v1.ResourceName udpa_resource_name = 5\n      [(udpa.annotations.field_migrate).oneof_promotion = \"name_specifier\"];\n\n  // The aliases are a list of other names that this resource can go by.\n  repeated string aliases = 4;\n\n  // The resource level version. It allows xDS to track the state of individual\n  // resources.\n  string version = 1;\n\n  // The resource being tracked.\n  google.protobuf.Any resource = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v4alpha/ads.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v4alpha;\n\nimport \"envoy/service/discovery/v4alpha/discovery.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v4alpha\";\noption java_outer_classname = \"AdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Aggregated Discovery Service (ADS)]\n\n// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes,\n// and listeners are retained in the package `envoy.api.v2` for backwards\n// compatibility with existing management servers. New development in discovery\n// services should proceed in the package `envoy.service.discovery.v2`.\n\n// See https://github.com/lyft/envoy-api#apis for a description of the role of\n// ADS and how it is intended to be used by a management server. ADS requests\n// have the same structure as their singleton xDS counterparts, but can\n// multiplex many resource types on a single stream. The type_url in the\n// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover\n// the multiplexed singleton APIs at the Envoy instance and management server.\nservice AggregatedDiscoveryService {\n  // This is a gRPC-only API.\n  rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) {\n  }\n\n  rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest)\n      returns (stream DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage AdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.AdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/discovery/v4alpha/discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.discovery.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/rpc/status.proto\";\n\nimport \"udpa/core/v1/resource_locator.proto\";\nimport \"udpa/core/v1/resource_name.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.discovery.v4alpha\";\noption java_outer_classname = \"DiscoveryProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Common discovery API components]\n\n// A DiscoveryRequest requests a set of versioned resources of the same type for\n// a given Envoy node on some API.\n// [#next-free-field: 7]\nmessage DiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DiscoveryRequest\";\n\n  // The version_info provided in the request messages will be the version_info\n  // received with the most recent successfully processed response or empty on\n  // the first request. It is expected that no new request is sent after a\n  // response is received until the Envoy instance is ready to ACK/NACK the new\n  // configuration. ACK/NACK takes place by returning the new API config version\n  // as applied or the previous API config version respectively. Each type_url\n  // (see below) has an independent version associated with it.\n  string version_info = 1;\n\n  // The node making the request.\n  config.core.v4alpha.Node node = 2;\n\n  // List of resources to subscribe to, e.g. list of cluster names or a route\n  // configuration name. If this is empty, all resources for the API are\n  // returned. LDS/CDS may have empty resource_names, which will cause all\n  // resources for the Envoy instance to be returned. The LDS and CDS responses\n  // will then imply a number of resources that need to be fetched via EDS/RDS,\n  // which will be explicitly enumerated in resource_names.\n  repeated string resource_names = 3;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This is implicit\n  // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is\n  // required for ADS.\n  string type_url = 4;\n\n  // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above\n  // discussion on version_info and the DiscoveryResponse nonce comment. This\n  // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP,\n  // or 2) the client has not yet accepted an update in this xDS stream (unlike\n  // delta, where it is populated only for new explicit ACKs).\n  string response_nonce = 5;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v4alpha.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details* provides the Envoy\n  // internal exception related to the failure. It is only intended for consumption during manual\n  // debugging, the string provided is not guaranteed to be stable across Envoy versions.\n  google.rpc.Status error_detail = 6;\n}\n\n// [#next-free-field: 7]\nmessage DiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DiscoveryResponse\";\n\n  // The version of the response data.\n  string version_info = 1;\n\n  // The response resources. These resources are typed and depend on the API being called.\n  repeated google.protobuf.Any resources = 2;\n\n  // [#not-implemented-hide:]\n  // Canary is used to support two Envoy command line flags:\n  //\n  // * --terminate-on-canary-transition-failure. When set, Envoy is able to\n  //   terminate if it detects that configuration is stuck at canary. Consider\n  //   this example sequence of updates:\n  //   - Management server applies a canary config successfully.\n  //   - Management server rolls back to a production config.\n  //   - Envoy rejects the new production config.\n  //   Since there is no sensible way to continue receiving configuration\n  //   updates, Envoy will then terminate and apply production config from a\n  //   clean slate.\n  // * --dry-run-canary. When set, a canary response will never be applied, only\n  //   validated via a dry run.\n  bool canary = 3;\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty).\n  string type_url = 4;\n\n  // For gRPC based subscriptions, the nonce provides a way to explicitly ack a\n  // specific DiscoveryResponse in a following DiscoveryRequest. Additional\n  // messages may have been sent by Envoy to the management server for the\n  // previous version on the stream prior to this DiscoveryResponse, that were\n  // unprocessed at response send time. The nonce allows the management server\n  // to ignore any further DiscoveryRequests for the previous version until a\n  // DiscoveryRequest bearing the nonce. The nonce is optional and is not\n  // required for non-stream based xDS implementations.\n  string nonce = 5;\n\n  // [#not-implemented-hide:]\n  // The control plane instance that sent the response.\n  config.core.v4alpha.ControlPlane control_plane = 6;\n}\n\n// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC\n// endpoint for Delta xDS.\n//\n// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full\n// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a\n// diff to the state of a xDS client.\n// In Delta XDS there are per-resource versions, which allow tracking state at\n// the resource granularity.\n// An xDS Delta session is always in the context of a gRPC bidirectional\n// stream. This allows the xDS server to keep track of the state of xDS clients\n// connected to it.\n//\n// In Delta xDS the nonce field is required and used to pair\n// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK.\n// Optionally, a response message level system_version_info is present for\n// debugging purposes only.\n//\n// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest\n// can be either or both of: [1] informing the server of what resources the\n// client has gained/lost interest in (using resource_names_subscribe and\n// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from\n// the server (using response_nonce, with presence of error_detail making it a NACK).\n// Additionally, the first message (for a given type_url) of a reconnected gRPC stream\n// has a third role: informing the server of the resources (and their versions)\n// that the client already possesses, using the initial_resource_versions field.\n//\n// As with state-of-the-world, when multiple resource types are multiplexed (ADS),\n// all requests/acknowledgments/updates are logically walled off by type_url:\n// a Cluster ACK exists in a completely separate world from a prior Route NACK.\n// In particular, initial_resource_versions being sent at the \"start\" of every\n// gRPC stream actually entails a message for each type_url, each with its own\n// initial_resource_versions.\n// [#next-free-field: 10]\nmessage DeltaDiscoveryRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DeltaDiscoveryRequest\";\n\n  // The node making the request.\n  config.core.v4alpha.Node node = 1;\n\n  // Type of the resource that is being requested, e.g.\n  // \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\". This does not need to be set if\n  // resources are only referenced via *udpa_resource_subscribe* and\n  // *udpa_resources_unsubscribe*.\n  string type_url = 2;\n\n  // DeltaDiscoveryRequests allow the client to add or remove individual\n  // resources to the set of tracked resources in the context of a stream.\n  // All resource names in the resource_names_subscribe list are added to the\n  // set of tracked resources and all resource names in the resource_names_unsubscribe\n  // list are removed from the set of tracked resources.\n  //\n  // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or\n  // resource_names_unsubscribe list simply means that no resources are to be\n  // added or removed to the resource list.\n  // *Like* state-of-the-world xDS, the server must send updates for all tracked\n  // resources, but can also send updates for resources the client has not subscribed to.\n  //\n  // NOTE: the server must respond with all resources listed in resource_names_subscribe,\n  // even if it believes the client has the most recent version of them. The reason:\n  // the client may have dropped them, but then regained interest before it had a chance\n  // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd.\n  //\n  // These two fields can be set in any DeltaDiscoveryRequest, including ACKs\n  // and initial_resource_versions.\n  //\n  // A list of Resource names to add to the list of tracked resources.\n  repeated string resource_names_subscribe = 3;\n\n  // As with *resource_names_subscribe* but used when subscribing to resources indicated\n  // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator\n  // are ignored and the context parameters are matched with\n  // *context_param_specifier* specific semantics.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8;\n\n  // A list of Resource names to remove from the list of tracked resources.\n  repeated string resource_names_unsubscribe = 4;\n\n  // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a\n  // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed\n  // resource locator provided in *udpa_resources_subscribe*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9;\n\n  // Informs the server of the versions of the resources the xDS client knows of, to enable the\n  // client to continue the same logical xDS session even in the face of gRPC stream reconnection.\n  // It will not be populated: [1] in the very first stream of a session, since the client will\n  // not yet have any resources,  [2] in any message after the first in a stream (for a given\n  // type_url), since the server will already be correctly tracking the client's state.\n  // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.)\n  // The map's keys are names of xDS resources known to the xDS client.\n  // The map's values are opaque resource versions.\n  map<string, string> initial_resource_versions = 5;\n\n  // When the DeltaDiscoveryRequest is a ACK or NACK message in response\n  // to a previous DeltaDiscoveryResponse, the response_nonce must be the\n  // nonce in the DeltaDiscoveryResponse.\n  // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted.\n  string response_nonce = 6;\n\n  // This is populated when the previous :ref:`DiscoveryResponse <envoy_api_msg_service.discovery.v4alpha.DiscoveryResponse>`\n  // failed to update configuration. The *message* field in *error_details*\n  // provides the Envoy internal exception related to the failure.\n  google.rpc.Status error_detail = 7;\n}\n\n// [#next-free-field: 8]\nmessage DeltaDiscoveryResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.DeltaDiscoveryResponse\";\n\n  // The version of the response data (used for debugging).\n  string system_version_info = 1;\n\n  // The response resources. These are typed resources, whose types must match\n  // the type_url field.\n  repeated Resource resources = 2;\n\n  // field id 3 IS available!\n\n  // Type URL for resources. Identifies the xDS API when muxing over ADS.\n  // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty.\n  // This does not need to be set if *udpa_removed_resources* is used instead of\n  // *removed_resources*.\n  string type_url = 4;\n\n  // Resources names of resources that have be deleted and to be removed from the xDS Client.\n  // Removed resources for missing resources can be ignored.\n  repeated string removed_resources = 6;\n\n  // As with *removed_resources* but used when a removed resource was named in\n  // its *Resource*s with a *udpa.core.v1.ResourceName*.\n  // [#not-implemented-hide:]\n  repeated udpa.core.v1.ResourceName udpa_removed_resources = 7;\n\n  // The nonce provides a way for DeltaDiscoveryRequests to uniquely\n  // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required.\n  string nonce = 5;\n}\n\n// [#next-free-field: 6]\nmessage Resource {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v3.Resource\";\n\n  oneof name_specifier {\n    // The resource's name, to distinguish it from others of the same type of resource.\n    string name = 3;\n\n    // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered.\n    udpa.core.v1.ResourceName udpa_resource_name = 5;\n  }\n\n  // The aliases are a list of other names that this resource can go by.\n  repeated string aliases = 4;\n\n  // The resource level version. It allows xDS to track the state of individual\n  // resources.\n  string version = 1;\n\n  // The resource being tracked.\n  google.protobuf.Any resource = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/endpoint/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/endpoint/v3/eds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.endpoint.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.endpoint.v3\";\noption java_outer_classname = \"EdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: EDS]\n// Endpoint discovery :ref:`architecture overview <arch_overview_service_discovery_types_eds>`\n\nservice EndpointDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.endpoint.v3.ClusterLoadAssignment\";\n\n  // The resource_names field in DiscoveryRequest specifies a list of clusters\n  // to subscribe to updates for.\n  rpc StreamEndpoints(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaEndpoints(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchEndpoints(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:endpoints\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage EdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.EdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.event_reporting.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.event_reporting.v2alpha\";\noption java_outer_classname = \"EventReportingServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.service.event_reporting.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: gRPC Event Reporting Service]\n\n// [#not-implemented-hide:]\n// Service for streaming different types of events from Envoy to a server. The examples of\n// such events may be health check or outlier detection events.\nservice EventReportingService {\n  // Envoy will connect and send StreamEventsRequest messages forever.\n  // The management server may send StreamEventsResponse to configure event stream. See below.\n  // This API is designed for high throughput with the expectation that it might be lossy.\n  rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) {\n  }\n}\n\n// [#not-implemented-hide:]\n// An events envoy sends to the management server.\nmessage StreamEventsRequest {\n  message Identifier {\n    // The node sending the event messages over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batch of events. When the stream is already active, it will be the events occurred\n  // since the last message had been sent. If the server receives unknown event type, it should\n  // silently ignore it.\n  //\n  // The following events are supported:\n  //\n  // * :ref:`HealthCheckEvent <envoy_api_msg_data.core.v2alpha.HealthCheckEvent>`\n  // * :ref:`OutlierDetectionEvent <envoy_api_msg_data.cluster.v2alpha.OutlierDetectionEvent>`\n  repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#not-implemented-hide:]\n// The management server may send envoy a StreamEventsResponse to tell which events the server\n// is interested in. In future, with aggregated event reporting service, this message will\n// contain, for example, clusters the envoy should send events for, or event types the server\n// wants to process.\nmessage StreamEventsResponse {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/event_reporting/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/event_reporting/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.event_reporting.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.event_reporting.v3\";\noption java_outer_classname = \"EventReportingServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: gRPC Event Reporting Service]\n\n// [#not-implemented-hide:]\n// Service for streaming different types of events from Envoy to a server. The examples of\n// such events may be health check or outlier detection events.\nservice EventReportingService {\n  // Envoy will connect and send StreamEventsRequest messages forever.\n  // The management server may send StreamEventsResponse to configure event stream. See below.\n  // This API is designed for high throughput with the expectation that it might be lossy.\n  rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) {\n  }\n}\n\n// [#not-implemented-hide:]\n// An events envoy sends to the management server.\nmessage StreamEventsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v2alpha.StreamEventsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.event_reporting.v2alpha.StreamEventsRequest.Identifier\";\n\n    // The node sending the event messages over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batch of events. When the stream is already active, it will be the events occurred\n  // since the last message had been sent. If the server receives unknown event type, it should\n  // silently ignore it.\n  //\n  // The following events are supported:\n  //\n  // * :ref:`HealthCheckEvent <envoy_api_msg_data.core.v3.HealthCheckEvent>`\n  // * :ref:`OutlierDetectionEvent <envoy_api_msg_data.cluster.v3.OutlierDetectionEvent>`\n  repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#not-implemented-hide:]\n// The management server may send envoy a StreamEventsResponse to tell which events the server\n// is interested in. In future, with aggregated event reporting service, this message will\n// contain, for example, clusters the envoy should send events for, or event types the server\n// wants to process.\nmessage StreamEventsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v2alpha.StreamEventsResponse\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/event_reporting/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/event_reporting/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/event_reporting/v4alpha/event_reporting_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.event_reporting.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.event_reporting.v4alpha\";\noption java_outer_classname = \"EventReportingServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: gRPC Event Reporting Service]\n\n// [#not-implemented-hide:]\n// Service for streaming different types of events from Envoy to a server. The examples of\n// such events may be health check or outlier detection events.\nservice EventReportingService {\n  // Envoy will connect and send StreamEventsRequest messages forever.\n  // The management server may send StreamEventsResponse to configure event stream. See below.\n  // This API is designed for high throughput with the expectation that it might be lossy.\n  rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) {\n  }\n}\n\n// [#not-implemented-hide:]\n// An events envoy sends to the management server.\nmessage StreamEventsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v3.StreamEventsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.event_reporting.v3.StreamEventsRequest.Identifier\";\n\n    // The node sending the event messages over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data that will only be sent in the first message on the stream. This is effectively\n  // structured metadata and is a performance optimization.\n  Identifier identifier = 1;\n\n  // Batch of events. When the stream is already active, it will be the events occurred\n  // since the last message had been sent. If the server receives unknown event type, it should\n  // silently ignore it.\n  //\n  // The following events are supported:\n  //\n  // * :ref:`HealthCheckEvent <envoy_api_msg_data.core.v3.HealthCheckEvent>`\n  // * :ref:`OutlierDetectionEvent <envoy_api_msg_data.cluster.v3.OutlierDetectionEvent>`\n  repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// [#not-implemented-hide:]\n// The management server may send envoy a StreamEventsResponse to tell which events the server\n// is interested in. In future, with aggregated event reporting service, this message will\n// contain, for example, clusters the envoy should send events for, or event types the server\n// wants to process.\nmessage StreamEventsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.event_reporting.v3.StreamEventsResponse\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/extension/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/extension/v3/config_discovery.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.extension.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.extension.v3\";\noption java_outer_classname = \"ConfigDiscoveryProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Extension Config Discovery Service (ECDS)]\n\n// Return extension configurations.\nservice ExtensionConfigDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.core.v3.TypedExtensionConfig\";\n\n  rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest)\n      returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:extension_configs\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue\n// with importing services: https://github.com/google/protobuf/issues/4221 and\n// protoxform to upgrade the file.\nmessage EcdsDummy {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/health/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/cluster/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/health/v3/hds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.health.v3;\n\nimport \"envoy/config/cluster/v3/cluster.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\nimport \"envoy/config/endpoint/v3/endpoint_components.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.health.v3\";\noption java_outer_classname = \"HdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Health Discovery Service (HDS)]\n\n// HDS is Health Discovery Service. It compliments Envoy’s health checking\n// service by designating this Envoy to be a healthchecker for a subset of hosts\n// in the cluster. The status of these health checks will be reported to the\n// management server, where it can be aggregated etc and redistributed back to\n// Envoy through EDS.\nservice HealthDiscoveryService {\n  // 1. Envoy starts up and if its can_healthcheck option in the static\n  //    bootstrap config is enabled, sends HealthCheckRequest to the management\n  //    server. It supplies its capabilities (which protocol it can health check\n  //    with, what zone it resides in, etc.).\n  // 2. In response to (1), the management server designates this Envoy as a\n  //    healthchecker to health check a subset of all upstream hosts for a given\n  //    cluster (for example upstream Host 1 and Host 2). It streams\n  //    HealthCheckSpecifier messages with cluster related configuration for all\n  //    clusters this Envoy is designated to health check. Subsequent\n  //    HealthCheckSpecifier message will be sent on changes to:\n  //    a. Endpoints to health checks\n  //    b. Per cluster configuration change\n  // 3. Envoy creates a health probe based on the HealthCheck config and sends\n  //    it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck\n  //    configuration Envoy waits upon the arrival of the probe response and\n  //    looks at the content of the response to decide whether the endpoint is\n  //    healthy or not. If a response hasn't been received within the timeout\n  //    interval, the endpoint health status is considered TIMEOUT.\n  // 4. Envoy reports results back in an EndpointHealthResponse message.\n  //    Envoy streams responses as often as the interval configured by the\n  //    management server in HealthCheckSpecifier.\n  // 5. The management Server collects health statuses for all endpoints in the\n  //    cluster (for all clusters) and uses this information to construct\n  //    EndpointDiscoveryResponse messages.\n  // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load\n  //    balances traffic to them without additional health checking. It may\n  //    use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection\n  //    failed to a particular endpoint to account for health status propagation\n  //    delay between HDS and EDS).\n  // By default, can_healthcheck is true. If can_healthcheck is false, Cluster\n  // configuration may not contain HealthCheck message.\n  // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above\n  // invariant?\n  // TODO(htuch): Add @amb67's diagram.\n  rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse)\n      returns (stream HealthCheckSpecifier) {\n  }\n\n  // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of\n  // request/response. Should we add an identifier to the HealthCheckSpecifier\n  // to bind with the response?\n  rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {\n    option (google.api.http).post = \"/v3/discovery:health_check\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Defines supported protocols etc, so the management server can assign proper\n// endpoints to healthcheck.\nmessage Capability {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.Capability\";\n\n  // Different Envoy instances may have different capabilities (e.g. Redis)\n  // and/or have ports enabled for different protocols.\n  enum Protocol {\n    HTTP = 0;\n    TCP = 1;\n    REDIS = 2;\n  }\n\n  repeated Protocol health_check_protocols = 1;\n}\n\nmessage HealthCheckRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.HealthCheckRequest\";\n\n  config.core.v3.Node node = 1;\n\n  Capability capability = 2;\n}\n\nmessage EndpointHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.EndpointHealth\";\n\n  config.endpoint.v3.Endpoint endpoint = 1;\n\n  config.core.v3.HealthStatus health_status = 2;\n}\n\n// Group endpoint health by locality under each cluster.\nmessage LocalityEndpointsHealth {\n  config.core.v3.Locality locality = 1;\n\n  repeated EndpointHealth endpoints_health = 2;\n}\n\n// The health status of endpoints in a cluster. The cluster name and locality\n// should match the corresponding fields in ClusterHealthCheck message.\nmessage ClusterEndpointsHealth {\n  string cluster_name = 1;\n\n  repeated LocalityEndpointsHealth locality_endpoints_health = 2;\n}\n\nmessage EndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.EndpointHealthResponse\";\n\n  // Deprecated - Flat list of endpoint health information.\n  repeated EndpointHealth endpoints_health = 1 [deprecated = true];\n\n  // Organize Endpoint health information by cluster.\n  repeated ClusterEndpointsHealth cluster_endpoints_health = 2;\n}\n\nmessage HealthCheckRequestOrEndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.HealthCheckRequestOrEndpointHealthResponse\";\n\n  oneof request_type {\n    HealthCheckRequest health_check_request = 1;\n\n    EndpointHealthResponse endpoint_health_response = 2;\n  }\n}\n\nmessage LocalityEndpoints {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.LocalityEndpoints\";\n\n  config.core.v3.Locality locality = 1;\n\n  repeated config.endpoint.v3.Endpoint endpoints = 2;\n}\n\n// The cluster name and locality is provided to Envoy for the endpoints that it\n// health checks to support statistics reporting, logging and debugging by the\n// Envoy instance (outside of HDS). For maximum usefulness, it should match the\n// same cluster structure as that provided by EDS.\nmessage ClusterHealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.ClusterHealthCheck\";\n\n  string cluster_name = 1;\n\n  repeated config.core.v3.HealthCheck health_checks = 2;\n\n  repeated LocalityEndpoints locality_endpoints = 3;\n\n  // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria <envoy_api_field_config.core.v3.HealthCheck.transport_socket_match_criteria>`\n  // on connection when health checking. For more details, see\n  // :ref:`config.cluster.v3.Cluster.transport_socket_matches <envoy_api_field_config.cluster.v3.Cluster.transport_socket_matches>`.\n  repeated config.cluster.v3.Cluster.TransportSocketMatch transport_socket_matches = 4;\n}\n\nmessage HealthCheckSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.HealthCheckSpecifier\";\n\n  repeated ClusterHealthCheck cluster_health_checks = 1;\n\n  // The default is 1 second.\n  google.protobuf.Duration interval = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/health/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/cluster/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/health/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/health/v4alpha/hds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.health.v4alpha;\n\nimport \"envoy/config/cluster/v4alpha/cluster.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/core/v4alpha/health_check.proto\";\nimport \"envoy/config/endpoint/v3/endpoint_components.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.health.v4alpha\";\noption java_outer_classname = \"HdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Health Discovery Service (HDS)]\n\n// HDS is Health Discovery Service. It compliments Envoy’s health checking\n// service by designating this Envoy to be a healthchecker for a subset of hosts\n// in the cluster. The status of these health checks will be reported to the\n// management server, where it can be aggregated etc and redistributed back to\n// Envoy through EDS.\nservice HealthDiscoveryService {\n  // 1. Envoy starts up and if its can_healthcheck option in the static\n  //    bootstrap config is enabled, sends HealthCheckRequest to the management\n  //    server. It supplies its capabilities (which protocol it can health check\n  //    with, what zone it resides in, etc.).\n  // 2. In response to (1), the management server designates this Envoy as a\n  //    healthchecker to health check a subset of all upstream hosts for a given\n  //    cluster (for example upstream Host 1 and Host 2). It streams\n  //    HealthCheckSpecifier messages with cluster related configuration for all\n  //    clusters this Envoy is designated to health check. Subsequent\n  //    HealthCheckSpecifier message will be sent on changes to:\n  //    a. Endpoints to health checks\n  //    b. Per cluster configuration change\n  // 3. Envoy creates a health probe based on the HealthCheck config and sends\n  //    it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck\n  //    configuration Envoy waits upon the arrival of the probe response and\n  //    looks at the content of the response to decide whether the endpoint is\n  //    healthy or not. If a response hasn't been received within the timeout\n  //    interval, the endpoint health status is considered TIMEOUT.\n  // 4. Envoy reports results back in an EndpointHealthResponse message.\n  //    Envoy streams responses as often as the interval configured by the\n  //    management server in HealthCheckSpecifier.\n  // 5. The management Server collects health statuses for all endpoints in the\n  //    cluster (for all clusters) and uses this information to construct\n  //    EndpointDiscoveryResponse messages.\n  // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load\n  //    balances traffic to them without additional health checking. It may\n  //    use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection\n  //    failed to a particular endpoint to account for health status propagation\n  //    delay between HDS and EDS).\n  // By default, can_healthcheck is true. If can_healthcheck is false, Cluster\n  // configuration may not contain HealthCheck message.\n  // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above\n  // invariant?\n  // TODO(htuch): Add @amb67's diagram.\n  rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse)\n      returns (stream HealthCheckSpecifier) {\n  }\n\n  // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of\n  // request/response. Should we add an identifier to the HealthCheckSpecifier\n  // to bind with the response?\n  rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {\n    option (google.api.http).post = \"/v3/discovery:health_check\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Defines supported protocols etc, so the management server can assign proper\n// endpoints to healthcheck.\nmessage Capability {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.health.v3.Capability\";\n\n  // Different Envoy instances may have different capabilities (e.g. Redis)\n  // and/or have ports enabled for different protocols.\n  enum Protocol {\n    HTTP = 0;\n    TCP = 1;\n    REDIS = 2;\n  }\n\n  repeated Protocol health_check_protocols = 1;\n}\n\nmessage HealthCheckRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.HealthCheckRequest\";\n\n  config.core.v4alpha.Node node = 1;\n\n  Capability capability = 2;\n}\n\nmessage EndpointHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.EndpointHealth\";\n\n  config.endpoint.v3.Endpoint endpoint = 1;\n\n  config.core.v4alpha.HealthStatus health_status = 2;\n}\n\n// Group endpoint health by locality under each cluster.\nmessage LocalityEndpointsHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.LocalityEndpointsHealth\";\n\n  config.core.v4alpha.Locality locality = 1;\n\n  repeated EndpointHealth endpoints_health = 2;\n}\n\n// The health status of endpoints in a cluster. The cluster name and locality\n// should match the corresponding fields in ClusterHealthCheck message.\nmessage ClusterEndpointsHealth {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.ClusterEndpointsHealth\";\n\n  string cluster_name = 1;\n\n  repeated LocalityEndpointsHealth locality_endpoints_health = 2;\n}\n\nmessage EndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.EndpointHealthResponse\";\n\n  // Deprecated - Flat list of endpoint health information.\n  repeated EndpointHealth hidden_envoy_deprecated_endpoints_health = 1 [deprecated = true];\n\n  // Organize Endpoint health information by cluster.\n  repeated ClusterEndpointsHealth cluster_endpoints_health = 2;\n}\n\nmessage HealthCheckRequestOrEndpointHealthResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse\";\n\n  oneof request_type {\n    HealthCheckRequest health_check_request = 1;\n\n    EndpointHealthResponse endpoint_health_response = 2;\n  }\n}\n\nmessage LocalityEndpoints {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.LocalityEndpoints\";\n\n  config.core.v4alpha.Locality locality = 1;\n\n  repeated config.endpoint.v3.Endpoint endpoints = 2;\n}\n\n// The cluster name and locality is provided to Envoy for the endpoints that it\n// health checks to support statistics reporting, logging and debugging by the\n// Envoy instance (outside of HDS). For maximum usefulness, it should match the\n// same cluster structure as that provided by EDS.\nmessage ClusterHealthCheck {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.ClusterHealthCheck\";\n\n  string cluster_name = 1;\n\n  repeated config.core.v4alpha.HealthCheck health_checks = 2;\n\n  repeated LocalityEndpoints locality_endpoints = 3;\n\n  // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria <envoy_api_field_config.core.v4alpha.HealthCheck.transport_socket_match_criteria>`\n  // on connection when health checking. For more details, see\n  // :ref:`config.cluster.v3.Cluster.transport_socket_matches <envoy_api_field_config.cluster.v4alpha.Cluster.transport_socket_matches>`.\n  repeated config.cluster.v4alpha.Cluster.TransportSocketMatch transport_socket_matches = 4;\n}\n\nmessage HealthCheckSpecifier {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.health.v3.HealthCheckSpecifier\";\n\n  repeated ClusterHealthCheck cluster_health_checks = 1;\n\n  // The default is 1 second.\n  google.protobuf.Duration interval = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/listener/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/listener/v3/lds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.listener.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.listener.v3\";\noption java_outer_classname = \"LdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Listener]\n// Listener :ref:`configuration overview <config_listeners>`\n\n// The Envoy instance initiates an RPC at startup to discover a list of\n// listeners. Updates are delivered via streaming from the LDS server and\n// consist of a complete update of all listeners. Existing connections will be\n// allowed to drain from listeners that are no longer present.\nservice ListenerDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.listener.v3.Listener\";\n\n  rpc DeltaListeners(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamListeners(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc FetchListeners(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:listeners\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage LdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.LdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/load_stats/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/endpoint:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/load_stats/v2/lrs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.load_stats.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/endpoint/load_report.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.load_stats.v2\";\noption java_outer_classname = \"LrsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Load reporting service]\n\nservice LoadReportingService {\n  // Advanced API to allow for multi-dimensional load balancing by remote\n  // server. For receiving LB assignments, the steps are:\n  // 1, The management server is configured with per cluster/zone/load metric\n  //    capacity configuration. The capacity configuration definition is\n  //    outside of the scope of this document.\n  // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters\n  //    to balance.\n  //\n  // Independently, Envoy will initiate a StreamLoadStats bidi stream with a\n  // management server:\n  // 1. Once a connection establishes, the management server publishes a\n  //    LoadStatsResponse for all clusters it is interested in learning load\n  //    stats about.\n  // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts\n  //    based on per-zone weights and/or per-instance weights (if specified)\n  //    based on intra-zone LbPolicy. This information comes from the above\n  //    {Stream,Fetch}Endpoints.\n  // 3. When upstream hosts reply, they optionally add header <define header\n  //    name> with ASCII representation of EndpointLoadMetricStats.\n  // 4. Envoy aggregates load reports over the period of time given to it in\n  //    LoadStatsResponse.load_reporting_interval. This includes aggregation\n  //    stats Envoy maintains by itself (total_requests, rpc_errors etc.) as\n  //    well as load metrics from upstream hosts.\n  // 5. When the timer of load_reporting_interval expires, Envoy sends new\n  //    LoadStatsRequest filled with load reports for each cluster.\n  // 6. The management server uses the load reports from all reported Envoys\n  //    from around the world, computes global assignment and prepares traffic\n  //    assignment destined for each zone Envoys are located in. Goto 2.\n  rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {\n  }\n}\n\n// A load report Envoy sends to the management server.\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\nmessage LoadStatsRequest {\n  // Node identifier for Envoy instance.\n  api.v2.core.Node node = 1;\n\n  // A list of load stats to report.\n  repeated api.v2.endpoint.ClusterStats cluster_stats = 2;\n}\n\n// The management server sends envoy a LoadStatsResponse with all clusters it\n// is interested in learning load stats about.\n// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.\nmessage LoadStatsResponse {\n  // Clusters to report stats for.\n  // Not populated if *send_all_clusters* is true.\n  repeated string clusters = 1;\n\n  // If true, the client should send all clusters it knows about.\n  // Only clients that advertise the \"envoy.lrs.supports_send_all_clusters\" capability in their\n  // :ref:`client_features<envoy_api_field_core.Node.client_features>` field will honor this field.\n  bool send_all_clusters = 4;\n\n  // The minimum interval of time to collect stats over. This is only a minimum for two reasons:\n  // 1. There may be some delay from when the timer fires until stats sampling occurs.\n  // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic\n  //    that is observed in between the corresponding previous *LoadStatsRequest* and this\n  //    *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period\n  //    of inobservability that might otherwise exists between the messages. New clusters are not\n  //    subject to this consideration.\n  google.protobuf.Duration load_reporting_interval = 2;\n\n  // Set to *true* if the management server supports endpoint granularity\n  // report.\n  bool report_endpoint_granularity = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/load_stats/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/load_stats/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/load_stats/v3/lrs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.load_stats.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/endpoint/v3/load_report.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.load_stats.v3\";\noption java_outer_classname = \"LrsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Load Reporting service (LRS)]\n\n// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional\n// stream with a management server. Upon connecting, the management server can send a\n// :ref:`LoadStatsResponse <envoy_api_msg_service.load_stats.v3.LoadStatsResponse>` to a node it is\n// interested in getting the load reports for. Envoy in this node will start sending\n// :ref:`LoadStatsRequest <envoy_api_msg_service.load_stats.v3.LoadStatsRequest>`. This is done periodically\n// based on the :ref:`load reporting interval <envoy_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`\n// For details, take a look at the :ref:`Load Reporting Service sandbox example <install_sandboxes_load_reporting_service>`.\n\nservice LoadReportingService {\n  // Advanced API to allow for multi-dimensional load balancing by remote\n  // server. For receiving LB assignments, the steps are:\n  // 1, The management server is configured with per cluster/zone/load metric\n  //    capacity configuration. The capacity configuration definition is\n  //    outside of the scope of this document.\n  // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters\n  //    to balance.\n  //\n  // Independently, Envoy will initiate a StreamLoadStats bidi stream with a\n  // management server:\n  // 1. Once a connection establishes, the management server publishes a\n  //    LoadStatsResponse for all clusters it is interested in learning load\n  //    stats about.\n  // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts\n  //    based on per-zone weights and/or per-instance weights (if specified)\n  //    based on intra-zone LbPolicy. This information comes from the above\n  //    {Stream,Fetch}Endpoints.\n  // 3. When upstream hosts reply, they optionally add header <define header\n  //    name> with ASCII representation of EndpointLoadMetricStats.\n  // 4. Envoy aggregates load reports over the period of time given to it in\n  //    LoadStatsResponse.load_reporting_interval. This includes aggregation\n  //    stats Envoy maintains by itself (total_requests, rpc_errors etc.) as\n  //    well as load metrics from upstream hosts.\n  // 5. When the timer of load_reporting_interval expires, Envoy sends new\n  //    LoadStatsRequest filled with load reports for each cluster.\n  // 6. The management server uses the load reports from all reported Envoys\n  //    from around the world, computes global assignment and prepares traffic\n  //    assignment destined for each zone Envoys are located in. Goto 2.\n  rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {\n  }\n}\n\n// A load report Envoy sends to the management server.\nmessage LoadStatsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v2.LoadStatsRequest\";\n\n  // Node identifier for Envoy instance.\n  config.core.v3.Node node = 1;\n\n  // A list of load stats to report.\n  repeated config.endpoint.v3.ClusterStats cluster_stats = 2;\n}\n\n// The management server sends envoy a LoadStatsResponse with all clusters it\n// is interested in learning load stats about.\nmessage LoadStatsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v2.LoadStatsResponse\";\n\n  // Clusters to report stats for.\n  // Not populated if *send_all_clusters* is true.\n  repeated string clusters = 1;\n\n  // If true, the client should send all clusters it knows about.\n  // Only clients that advertise the \"envoy.lrs.supports_send_all_clusters\" capability in their\n  // :ref:`client_features<envoy_api_field_config.core.v3.Node.client_features>` field will honor this field.\n  bool send_all_clusters = 4;\n\n  // The minimum interval of time to collect stats over. This is only a minimum for two reasons:\n  //\n  // 1. There may be some delay from when the timer fires until stats sampling occurs.\n  // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic\n  //    that is observed in between the corresponding previous *LoadStatsRequest* and this\n  //    *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period\n  //    of inobservability that might otherwise exists between the messages. New clusters are not\n  //    subject to this consideration.\n  google.protobuf.Duration load_reporting_interval = 2;\n\n  // Set to *true* if the management server supports endpoint granularity\n  // report.\n  bool report_endpoint_granularity = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/load_stats/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/config/endpoint/v3:pkg\",\n        \"//envoy/service/load_stats/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/load_stats/v4alpha/lrs.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.load_stats.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/config/endpoint/v3/load_report.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.load_stats.v4alpha\";\noption java_outer_classname = \"LrsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Load Reporting service (LRS)]\n\n// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional\n// stream with a management server. Upon connecting, the management server can send a\n// :ref:`LoadStatsResponse <envoy_api_msg_service.load_stats.v4alpha.LoadStatsResponse>` to a node it is\n// interested in getting the load reports for. Envoy in this node will start sending\n// :ref:`LoadStatsRequest <envoy_api_msg_service.load_stats.v4alpha.LoadStatsRequest>`. This is done periodically\n// based on the :ref:`load reporting interval <envoy_api_field_service.load_stats.v4alpha.LoadStatsResponse.load_reporting_interval>`\n// For details, take a look at the :ref:`Load Reporting Service sandbox example <install_sandboxes_load_reporting_service>`.\n\nservice LoadReportingService {\n  // Advanced API to allow for multi-dimensional load balancing by remote\n  // server. For receiving LB assignments, the steps are:\n  // 1, The management server is configured with per cluster/zone/load metric\n  //    capacity configuration. The capacity configuration definition is\n  //    outside of the scope of this document.\n  // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters\n  //    to balance.\n  //\n  // Independently, Envoy will initiate a StreamLoadStats bidi stream with a\n  // management server:\n  // 1. Once a connection establishes, the management server publishes a\n  //    LoadStatsResponse for all clusters it is interested in learning load\n  //    stats about.\n  // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts\n  //    based on per-zone weights and/or per-instance weights (if specified)\n  //    based on intra-zone LbPolicy. This information comes from the above\n  //    {Stream,Fetch}Endpoints.\n  // 3. When upstream hosts reply, they optionally add header <define header\n  //    name> with ASCII representation of EndpointLoadMetricStats.\n  // 4. Envoy aggregates load reports over the period of time given to it in\n  //    LoadStatsResponse.load_reporting_interval. This includes aggregation\n  //    stats Envoy maintains by itself (total_requests, rpc_errors etc.) as\n  //    well as load metrics from upstream hosts.\n  // 5. When the timer of load_reporting_interval expires, Envoy sends new\n  //    LoadStatsRequest filled with load reports for each cluster.\n  // 6. The management server uses the load reports from all reported Envoys\n  //    from around the world, computes global assignment and prepares traffic\n  //    assignment destined for each zone Envoys are located in. Goto 2.\n  rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {\n  }\n}\n\n// A load report Envoy sends to the management server.\nmessage LoadStatsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v3.LoadStatsRequest\";\n\n  // Node identifier for Envoy instance.\n  config.core.v4alpha.Node node = 1;\n\n  // A list of load stats to report.\n  repeated config.endpoint.v3.ClusterStats cluster_stats = 2;\n}\n\n// The management server sends envoy a LoadStatsResponse with all clusters it\n// is interested in learning load stats about.\nmessage LoadStatsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.load_stats.v3.LoadStatsResponse\";\n\n  // Clusters to report stats for.\n  // Not populated if *send_all_clusters* is true.\n  repeated string clusters = 1;\n\n  // If true, the client should send all clusters it knows about.\n  // Only clients that advertise the \"envoy.lrs.supports_send_all_clusters\" capability in their\n  // :ref:`client_features<envoy_api_field_config.core.v4alpha.Node.client_features>` field will honor this field.\n  bool send_all_clusters = 4;\n\n  // The minimum interval of time to collect stats over. This is only a minimum for two reasons:\n  //\n  // 1. There may be some delay from when the timer fires until stats sampling occurs.\n  // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic\n  //    that is observed in between the corresponding previous *LoadStatsRequest* and this\n  //    *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period\n  //    of inobservability that might otherwise exists between the messages. New clusters are not\n  //    subject to this consideration.\n  google.protobuf.Duration load_reporting_interval = 2;\n\n  // Set to *true* if the management server supports endpoint granularity\n  // report.\n  bool report_endpoint_granularity = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/metrics/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@prometheus_metrics_model//:client_model\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.metrics.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"metrics.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.metrics.v2\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metrics service]\n\n// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric\n// data model as a standard to represent metrics information.\nservice MetricsService {\n  // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure.\n  rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) {\n  }\n}\n\nmessage StreamMetricsResponse {\n}\n\nmessage StreamMetricsMessage {\n  message Identifier {\n    // The node sending metrics over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // A list of metric entries\n  repeated io.prometheus.client.MetricFamily envoy_metrics = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/metrics/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/metrics/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@prometheus_metrics_model//:client_model\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.metrics.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"metrics.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.metrics.v3\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metrics service]\n\n// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric\n// data model as a standard to represent metrics information.\nservice MetricsService {\n  // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure.\n  rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) {\n  }\n}\n\nmessage StreamMetricsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v2.StreamMetricsResponse\";\n}\n\nmessage StreamMetricsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v2.StreamMetricsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.metrics.v2.StreamMetricsMessage.Identifier\";\n\n    // The node sending metrics over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // A list of metric entries\n  repeated io.prometheus.client.MetricFamily envoy_metrics = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/metrics/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/metrics/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@prometheus_metrics_model//:client_model\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.metrics.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"metrics.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.metrics.v4alpha\";\noption java_outer_classname = \"MetricsServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metrics service]\n\n// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric\n// data model as a standard to represent metrics information.\nservice MetricsService {\n  // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure.\n  rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) {\n  }\n}\n\nmessage StreamMetricsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v3.StreamMetricsResponse\";\n}\n\nmessage StreamMetricsMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.metrics.v3.StreamMetricsMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.metrics.v3.StreamMetricsMessage.Identifier\";\n\n    // The node sending metrics over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // A list of metric entries\n  repeated io.prometheus.client.MetricFamily envoy_metrics = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/ratelimit/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/ratelimit:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/ratelimit/v2/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.ratelimit.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/ratelimit/ratelimit.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.ratelimit.v2\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Rate Limit Service (RLS)]\n\nservice RateLimitService {\n  // Determine whether rate limiting should take place.\n  rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) {\n  }\n}\n\n// Main message for a rate limit request. The rate limit service is designed to be fully generic\n// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded\n// configuration will parse the request and find the most specific limit to apply. In addition,\n// a RateLimitRequest can contain multiple \"descriptors\" to limit on. When multiple descriptors\n// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any\n// of them are over limit. This enables more complex application level rate limiting scenarios\n// if desired.\nmessage RateLimitRequest {\n  // All rate limit requests must specify a domain. This enables the configuration to be per\n  // application without fear of overlap. E.g., \"envoy\".\n  string domain = 1;\n\n  // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is\n  // processed by the service (see below). If any of the descriptors are over limit, the entire\n  // request is considered to be over limit.\n  repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 2;\n\n  // Rate limit requests can optionally specify the number of hits a request adds to the matched\n  // limit. If the value is not set in the message, a request increases the matched limit by 1.\n  uint32 hits_addend = 3;\n}\n\n// A response from a ShouldRateLimit call.\nmessage RateLimitResponse {\n  enum Code {\n    // The response code is not known.\n    UNKNOWN = 0;\n\n    // The response code to notify that the number of requests are under limit.\n    OK = 1;\n\n    // The response code to notify that the number of requests are over limit.\n    OVER_LIMIT = 2;\n  }\n\n  // Defines an actual rate limit in terms of requests per unit of time and the unit itself.\n  message RateLimit {\n    enum Unit {\n      // The time unit is not known.\n      UNKNOWN = 0;\n\n      // The time unit representing a second.\n      SECOND = 1;\n\n      // The time unit representing a minute.\n      MINUTE = 2;\n\n      // The time unit representing an hour.\n      HOUR = 3;\n\n      // The time unit representing a day.\n      DAY = 4;\n    }\n\n    // A name or description of this limit.\n    string name = 3;\n\n    // The number of requests per unit of time.\n    uint32 requests_per_unit = 1;\n\n    // The unit of time.\n    Unit unit = 2;\n  }\n\n  message DescriptorStatus {\n    // The response code for an individual descriptor.\n    Code code = 1;\n\n    // The current limit as configured by the server. Useful for debugging, etc.\n    RateLimit current_limit = 2;\n\n    // The limit remaining in the current time unit.\n    uint32 limit_remaining = 3;\n  }\n\n  // The overall response code which takes into account all of the descriptors that were passed\n  // in the RateLimitRequest message.\n  Code overall_code = 1;\n\n  // A list of DescriptorStatus messages which matches the length of the descriptor list passed\n  // in the RateLimitRequest. This can be used by the caller to determine which individual\n  // descriptors failed and/or what the currently configured limits are for all of them.\n  repeated DescriptorStatus statuses = 2;\n\n  // A list of headers to add to the response\n  repeated api.v2.core.HeaderValue headers = 3\n      [(udpa.annotations.field_migrate).rename = \"response_headers_to_add\"];\n\n  // A list of headers to add to the request when forwarded\n  repeated api.v2.core.HeaderValue request_headers_to_add = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/ratelimit/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/extensions/common/ratelimit/v3:pkg\",\n        \"//envoy/service/ratelimit/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/ratelimit/v3/rls.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.ratelimit.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/extensions/common/ratelimit/v3/ratelimit.proto\";\n\nimport \"google/protobuf/duration.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.ratelimit.v3\";\noption java_outer_classname = \"RlsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Rate Limit Service (RLS)]\n\nservice RateLimitService {\n  // Determine whether rate limiting should take place.\n  rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) {\n  }\n}\n\n// Main message for a rate limit request. The rate limit service is designed to be fully generic\n// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded\n// configuration will parse the request and find the most specific limit to apply. In addition,\n// a RateLimitRequest can contain multiple \"descriptors\" to limit on. When multiple descriptors\n// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any\n// of them are over limit. This enables more complex application level rate limiting scenarios\n// if desired.\nmessage RateLimitRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.ratelimit.v2.RateLimitRequest\";\n\n  // All rate limit requests must specify a domain. This enables the configuration to be per\n  // application without fear of overlap. E.g., \"envoy\".\n  string domain = 1;\n\n  // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is\n  // processed by the service (see below). If any of the descriptors are over limit, the entire\n  // request is considered to be over limit.\n  repeated envoy.extensions.common.ratelimit.v3.RateLimitDescriptor descriptors = 2;\n\n  // Rate limit requests can optionally specify the number of hits a request adds to the matched\n  // limit. If the value is not set in the message, a request increases the matched limit by 1.\n  uint32 hits_addend = 3;\n}\n\n// A response from a ShouldRateLimit call.\nmessage RateLimitResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.ratelimit.v2.RateLimitResponse\";\n\n  enum Code {\n    // The response code is not known.\n    UNKNOWN = 0;\n\n    // The response code to notify that the number of requests are under limit.\n    OK = 1;\n\n    // The response code to notify that the number of requests are over limit.\n    OVER_LIMIT = 2;\n  }\n\n  // Defines an actual rate limit in terms of requests per unit of time and the unit itself.\n  message RateLimit {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.ratelimit.v2.RateLimitResponse.RateLimit\";\n\n    // Identifies the unit of of time for rate limit.\n    // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4]\n    enum Unit {\n      // The time unit is not known.\n      UNKNOWN = 0;\n\n      // The time unit representing a second.\n      SECOND = 1;\n\n      // The time unit representing a minute.\n      MINUTE = 2;\n\n      // The time unit representing an hour.\n      HOUR = 3;\n\n      // The time unit representing a day.\n      DAY = 4;\n    }\n\n    // A name or description of this limit.\n    string name = 3;\n\n    // The number of requests per unit of time.\n    uint32 requests_per_unit = 1;\n\n    // The unit of time.\n    Unit unit = 2;\n  }\n\n  message DescriptorStatus {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.ratelimit.v2.RateLimitResponse.DescriptorStatus\";\n\n    // The response code for an individual descriptor.\n    Code code = 1;\n\n    // The current limit as configured by the server. Useful for debugging, etc.\n    RateLimit current_limit = 2;\n\n    // The limit remaining in the current time unit.\n    uint32 limit_remaining = 3;\n\n    // Duration until reset of the current limit window.\n    google.protobuf.Duration duration_until_reset = 4;\n  }\n\n  // The overall response code which takes into account all of the descriptors that were passed\n  // in the RateLimitRequest message.\n  Code overall_code = 1;\n\n  // A list of DescriptorStatus messages which matches the length of the descriptor list passed\n  // in the RateLimitRequest. This can be used by the caller to determine which individual\n  // descriptors failed and/or what the currently configured limits are for all of them.\n  repeated DescriptorStatus statuses = 2;\n\n  // A list of headers to add to the response\n  repeated config.core.v3.HeaderValue response_headers_to_add = 3;\n\n  // A list of headers to add to the request when forwarded\n  repeated config.core.v3.HeaderValue request_headers_to_add = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/route/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/api/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/route/v3/rds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.route.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.route.v3\";\noption java_outer_classname = \"RdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: RDS]\n\n// The resource_names field in DiscoveryRequest specifies a route configuration.\n// This allows an Envoy configuration with multiple HTTP listeners (and\n// associated HTTP connection manager filters) to use different route\n// configurations. Each listener will bind its HTTP connection manager filter to\n// a route table via this identifier.\nservice RouteDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.route.v3.RouteConfiguration\";\n\n  rpc StreamRoutes(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaRoutes(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for\n// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered\n// during the processing of an HTTP request if a route for the request cannot be resolved. The\n// :ref:`resource_names_subscribe <envoy_api_field_service.discovery.v3.DeltaDiscoveryRequest.resource_names_subscribe>`\n// field contains a list of virtual host names or aliases to track. The contents of an alias would\n// be the contents of a *host* or *authority* header used to make an http request. An xDS server\n// will match an alias to a virtual host based on the content of :ref:`domains'\n// <envoy_api_field_config.route.v3.VirtualHost.domains>` field. The *resource_names_unsubscribe* field\n// contains a list of virtual host names that have been :ref:`unsubscribed\n// <xds_protocol_unsubscribe>` from the routing table associated with the RouteConfiguration.\nservice VirtualHostDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.route.v3.VirtualHost\";\n\n  rpc DeltaVirtualHosts(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage RdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.RdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/route/v3/srds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.route.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.route.v3\";\noption java_outer_classname = \"SrdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: SRDS]\n// * Routing :ref:`architecture overview <arch_overview_http_routing>`\n\n// The Scoped Routes Discovery Service (SRDS) API distributes\n// :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`\n// resources. Each ScopedRouteConfiguration resource represents a \"routing\n// scope\" containing a mapping that allows the HTTP connection manager to\n// dynamically assign a routing table (specified via a\n// :ref:`RouteConfiguration<envoy_api_msg_config.route.v3.RouteConfiguration>` message) to each\n// HTTP request.\nservice ScopedRoutesDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.config.route.v3.ScopedRouteConfiguration\";\n\n  rpc StreamScopedRoutes(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaScopedRoutes(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchScopedRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:scoped-routes\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file.\nmessage SrdsDummy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.api.v2.SrdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/runtime/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/runtime/v3/rtds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.runtime.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.runtime.v3\";\noption java_outer_classname = \"RtdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Runtime Discovery Service (RTDS)]\n// RTDS :ref:`configuration overview <config_runtime_rtds>`\n\n// Discovery service for Runtime resources.\nservice RuntimeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.service.runtime.v3.Runtime\";\n\n  rpc StreamRuntime(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc DeltaRuntime(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchRuntime(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:runtime\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage RtdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.RtdsDummy\";\n}\n\n// RTDS resource type. This describes a layer in the runtime virtual filesystem.\nmessage Runtime {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.service.discovery.v2.Runtime\";\n\n  // Runtime resource name. This makes the Runtime a self-describing xDS\n  // resource.\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n\n  google.protobuf.Struct layer = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/secret/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/service/discovery/v2:pkg\",\n        \"//envoy/service/discovery/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/secret/v3/sds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.secret.v3;\n\nimport \"envoy/service/discovery/v3/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.secret.v3\";\noption java_outer_classname = \"SdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Secret Discovery Service (SDS)]\n\nservice SecretDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.extensions.transport_sockets.tls.v3.Secret\";\n\n  rpc DeltaSecrets(stream discovery.v3.DeltaDiscoveryRequest)\n      returns (stream discovery.v3.DeltaDiscoveryResponse) {\n  }\n\n  rpc StreamSecrets(stream discovery.v3.DiscoveryRequest)\n      returns (stream discovery.v3.DiscoveryResponse) {\n  }\n\n  rpc FetchSecrets(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:secrets\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing\n// services: https://github.com/google/protobuf/issues/4221\nmessage SdsDummy {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.discovery.v2.SdsDummy\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/status/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/admin/v2alpha:pkg\",\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/status/v2/csds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.status.v2;\n\nimport \"envoy/admin/v2alpha/config_dump.proto\";\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/type/matcher/node.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.status.v2\";\noption java_outer_classname = \"CsdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Client Status Discovery Service (CSDS)]\n\n// CSDS is Client Status Discovery Service. It can be used to get the status of\n// an xDS-compliant client from the management server's point of view. In the\n// future, it can potentially be used as an interface to get the current\n// state directly from the client.\nservice ClientStatusDiscoveryService {\n  rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {\n  }\n\n  rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) {\n    option (google.api.http).post = \"/v2/discovery:client_status\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Status of a config.\nenum ConfigStatus {\n  // Status info is not available/unknown.\n  UNKNOWN = 0;\n\n  // Management server has sent the config to client and received ACK.\n  SYNCED = 1;\n\n  // Config is not sent.\n  NOT_SENT = 2;\n\n  // Management server has sent the config to client but hasn’t received\n  // ACK/NACK.\n  STALE = 3;\n\n  // Management server has sent the config to client but received NACK.\n  ERROR = 4;\n}\n\n// Request for client status of clients identified by a list of NodeMatchers.\nmessage ClientStatusRequest {\n  // Management server can use these match criteria to identify clients.\n  // The match follows OR semantics.\n  repeated type.matcher.NodeMatcher node_matchers = 1;\n}\n\n// Detailed config (per xDS) with status.\n// [#next-free-field: 6]\nmessage PerXdsConfig {\n  ConfigStatus status = 1;\n\n  oneof per_xds_config {\n    admin.v2alpha.ListenersConfigDump listener_config = 2;\n\n    admin.v2alpha.ClustersConfigDump cluster_config = 3;\n\n    admin.v2alpha.RoutesConfigDump route_config = 4;\n\n    admin.v2alpha.ScopedRoutesConfigDump scoped_route_config = 5;\n  }\n}\n\n// All xds configs for a particular client.\nmessage ClientConfig {\n  // Node for a particular client.\n  api.v2.core.Node node = 1;\n\n  repeated PerXdsConfig xds_config = 2;\n}\n\nmessage ClientStatusResponse {\n  // Client configs for the clients specified in the ClientStatusRequest.\n  repeated ClientConfig config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/status/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/admin/v3:pkg\",\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/status/v2:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/status/v3/csds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.status.v3;\n\nimport \"envoy/admin/v3/config_dump.proto\";\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/type/matcher/v3/node.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.status.v3\";\noption java_outer_classname = \"CsdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Client Status Discovery Service (CSDS)]\n\n// CSDS is Client Status Discovery Service. It can be used to get the status of\n// an xDS-compliant client from the management server's point of view. It can\n// also be used to get the current xDS states directly from the client.\nservice ClientStatusDiscoveryService {\n  rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {\n  }\n\n  rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) {\n    option (google.api.http).post = \"/v3/discovery:client_status\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Status of a config from a management server view.\nenum ConfigStatus {\n  // Status info is not available/unknown.\n  UNKNOWN = 0;\n\n  // Management server has sent the config to client and received ACK.\n  SYNCED = 1;\n\n  // Config is not sent.\n  NOT_SENT = 2;\n\n  // Management server has sent the config to client but hasn’t received\n  // ACK/NACK.\n  STALE = 3;\n\n  // Management server has sent the config to client but received NACK. The\n  // attached config dump will be the latest config (the rejected one), since\n  // it is the persisted version in the management server.\n  ERROR = 4;\n}\n\n// Config status from a client-side view.\nenum ClientConfigStatus {\n  // Config status is not available/unknown.\n  CLIENT_UNKNOWN = 0;\n\n  // Client requested the config but hasn't received any config from management\n  // server yet.\n  CLIENT_REQUESTED = 1;\n\n  // Client received the config and replied with ACK.\n  CLIENT_ACKED = 2;\n\n  // Client received the config and replied with NACK. Notably, the attached\n  // config dump is not the NACKed version, but the most recent accepted one. If\n  // no config is accepted yet, the attached config dump will be empty.\n  CLIENT_NACKED = 3;\n}\n\n// Request for client status of clients identified by a list of NodeMatchers.\nmessage ClientStatusRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.ClientStatusRequest\";\n\n  // Management server can use these match criteria to identify clients.\n  // The match follows OR semantics.\n  repeated type.matcher.v3.NodeMatcher node_matchers = 1;\n\n  // The node making the csds request.\n  config.core.v3.Node node = 2;\n}\n\n// Detailed config (per xDS) with status.\n// [#next-free-field: 8]\nmessage PerXdsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.PerXdsConfig\";\n\n  // Config status generated by management servers. Will not be present if the\n  // CSDS server is an xDS client.\n  ConfigStatus status = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"status_config\"];\n\n  // Client config status is populated by xDS clients. Will not be present if\n  // the CSDS server is an xDS server. No matter what the client config status\n  // is, xDS clients should always dump the most recent accepted xDS config.\n  ClientConfigStatus client_status = 7\n      [(udpa.annotations.field_migrate).oneof_promotion = \"status_config\"];\n\n  oneof per_xds_config {\n    admin.v3.ListenersConfigDump listener_config = 2;\n\n    admin.v3.ClustersConfigDump cluster_config = 3;\n\n    admin.v3.RoutesConfigDump route_config = 4;\n\n    admin.v3.ScopedRoutesConfigDump scoped_route_config = 5;\n\n    admin.v3.EndpointsConfigDump endpoint_config = 6;\n  }\n}\n\n// All xds configs for a particular client.\nmessage ClientConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.ClientConfig\";\n\n  // Node for a particular client.\n  config.core.v3.Node node = 1;\n\n  repeated PerXdsConfig xds_config = 2;\n}\n\nmessage ClientStatusResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v2.ClientStatusResponse\";\n\n  // Client configs for the clients specified in the ClientStatusRequest.\n  repeated ClientConfig config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/status/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/admin/v4alpha:pkg\",\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/status/v3:pkg\",\n        \"//envoy/type/matcher/v4alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/status/v4alpha/csds.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.status.v4alpha;\n\nimport \"envoy/admin/v4alpha/config_dump.proto\";\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/type/matcher/v4alpha/node.proto\";\n\nimport \"google/api/annotations.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.status.v4alpha\";\noption java_outer_classname = \"CsdsProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Client Status Discovery Service (CSDS)]\n\n// CSDS is Client Status Discovery Service. It can be used to get the status of\n// an xDS-compliant client from the management server's point of view. It can\n// also be used to get the current xDS states directly from the client.\nservice ClientStatusDiscoveryService {\n  rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {\n  }\n\n  rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) {\n    option (google.api.http).post = \"/v3/discovery:client_status\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\n// Status of a config from a management server view.\nenum ConfigStatus {\n  // Status info is not available/unknown.\n  UNKNOWN = 0;\n\n  // Management server has sent the config to client and received ACK.\n  SYNCED = 1;\n\n  // Config is not sent.\n  NOT_SENT = 2;\n\n  // Management server has sent the config to client but hasn’t received\n  // ACK/NACK.\n  STALE = 3;\n\n  // Management server has sent the config to client but received NACK. The\n  // attached config dump will be the latest config (the rejected one), since\n  // it is the persisted version in the management server.\n  ERROR = 4;\n}\n\n// Config status from a client-side view.\nenum ClientConfigStatus {\n  // Config status is not available/unknown.\n  CLIENT_UNKNOWN = 0;\n\n  // Client requested the config but hasn't received any config from management\n  // server yet.\n  CLIENT_REQUESTED = 1;\n\n  // Client received the config and replied with ACK.\n  CLIENT_ACKED = 2;\n\n  // Client received the config and replied with NACK. Notably, the attached\n  // config dump is not the NACKed version, but the most recent accepted one. If\n  // no config is accepted yet, the attached config dump will be empty.\n  CLIENT_NACKED = 3;\n}\n\n// Request for client status of clients identified by a list of NodeMatchers.\nmessage ClientStatusRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.ClientStatusRequest\";\n\n  // Management server can use these match criteria to identify clients.\n  // The match follows OR semantics.\n  repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1;\n\n  // The node making the csds request.\n  config.core.v4alpha.Node node = 2;\n}\n\n// Detailed config (per xDS) with status.\n// [#next-free-field: 8]\nmessage PerXdsConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.PerXdsConfig\";\n\n  oneof status_config {\n    // Config status generated by management servers. Will not be present if the\n    // CSDS server is an xDS client.\n    ConfigStatus status = 1;\n\n    // Client config status is populated by xDS clients. Will not be present if\n    // the CSDS server is an xDS server. No matter what the client config status\n    // is, xDS clients should always dump the most recent accepted xDS config.\n    ClientConfigStatus client_status = 7;\n  }\n\n  oneof per_xds_config {\n    admin.v4alpha.ListenersConfigDump listener_config = 2;\n\n    admin.v4alpha.ClustersConfigDump cluster_config = 3;\n\n    admin.v4alpha.RoutesConfigDump route_config = 4;\n\n    admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5;\n\n    admin.v4alpha.EndpointsConfigDump endpoint_config = 6;\n  }\n}\n\n// All xds configs for a particular client.\nmessage ClientConfig {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.ClientConfig\";\n\n  // Node for a particular client.\n  config.core.v4alpha.Node node = 1;\n\n  repeated PerXdsConfig xds_config = 2;\n}\n\nmessage ClientStatusResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.status.v3.ClientStatusResponse\";\n\n  // Client configs for the clients specified in the ClientStatusRequest.\n  repeated ClientConfig config = 1;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/tap/v2alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"//envoy/api/v2/route:pkg\",\n        \"//envoy/data/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/tap/v2alpha/common.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/api/v2/core/grpc_service.proto\";\nimport \"envoy/api/v2/route/route_components.proto\";\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v2alpha\";\noption java_outer_classname = \"CommonProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.config.tap.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Common tap configuration]\n\n// Tap configuration.\nmessage TapConfig {\n  // [#comment:TODO(mattklein123): Rate limiting]\n\n  // The match configuration. If the configuration matches the data source being tapped, a tap will\n  // occur, with the result written to the configured output.\n  MatchPredicate match_config = 1 [(validate.rules).message = {required: true}];\n\n  // The tap output configuration. If a match configuration matches a data source being tapped,\n  // a tap will occur and the data will be written to the configured output.\n  OutputConfig output_config = 2 [(validate.rules).message = {required: true}];\n\n  // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\\connections for\n  // which the tap matching is enabled. When not enabled, the request\\connection will not be\n  // recorded.\n  //\n  // .. note::\n  //\n  //   This field defaults to 100/:ref:`HUNDRED\n  //   <envoy_api_enum_type.FractionalPercent.DenominatorType>`.\n  api.v2.core.RuntimeFractionalPercent tap_enabled = 3;\n}\n\n// Tap match configuration. This is a recursive structure which allows complex nested match\n// configurations to be built using various logical operators.\n// [#next-free-field: 9]\nmessage MatchPredicate {\n  // A set of match configurations used for logical operations.\n  message MatchSet {\n    // The list of rules that make up the set.\n    repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];\n  }\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // A set that describes a logical OR. If any member of the set matches, the match configuration\n    // matches.\n    MatchSet or_match = 1;\n\n    // A set that describes a logical AND. If all members of the set match, the match configuration\n    // matches.\n    MatchSet and_match = 2;\n\n    // A negation match. The match configuration will match if the negated match condition matches.\n    MatchPredicate not_match = 3;\n\n    // The match configuration will always match.\n    bool any_match = 4 [(validate.rules).bool = {const: true}];\n\n    // HTTP request headers match configuration.\n    HttpHeadersMatch http_request_headers_match = 5;\n\n    // HTTP request trailers match configuration.\n    HttpHeadersMatch http_request_trailers_match = 6;\n\n    // HTTP response headers match configuration.\n    HttpHeadersMatch http_response_headers_match = 7;\n\n    // HTTP response trailers match configuration.\n    HttpHeadersMatch http_response_trailers_match = 8;\n  }\n}\n\n// HTTP headers match configuration.\nmessage HttpHeadersMatch {\n  // HTTP headers to match.\n  repeated api.v2.route.HeaderMatcher headers = 1;\n}\n\n// Tap output configuration.\nmessage OutputConfig {\n  // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple\n  // sink types are supported this constraint will be relaxed.\n  repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}];\n\n  // For buffered tapping, the maximum amount of received body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v2alpha.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_rx_bytes = 2;\n\n  // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to\n  // truncation. If truncation occurs, the :ref:`truncated\n  // <envoy_api_field_data.tap.v2alpha.Body.truncated>` field will be set. If not specified, the\n  // default is 1KiB.\n  google.protobuf.UInt32Value max_buffered_tx_bytes = 3;\n\n  // Indicates whether taps produce a single buffered message per tap, or multiple streamed\n  // messages per tap in the emitted :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v2alpha.TraceWrapper>` messages. Note that streamed tapping does not\n  // mean that no buffering takes place. Buffering may be required if data is processed before a\n  // match can be determined. See the HTTP tap filter :ref:`streaming\n  // <config_http_filters_tap_streaming>` documentation for more information.\n  bool streaming = 4;\n}\n\n// Tap output sink configuration.\nmessage OutputSink {\n  // Output format. All output is in the form of one or more :ref:`TraceWrapper\n  // <envoy_api_msg_data.tap.v2alpha.TraceWrapper>` messages. This enumeration indicates\n  // how those messages are written. Note that not all sinks support all output formats. See\n  // individual sink documentation for more information.\n  enum Format {\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v2alpha.Body>`\n    // data will be present in the :ref:`as_bytes\n    // <envoy_api_field_data.tap.v2alpha.Body.as_bytes>` field. This means that body data will be\n    // base64 encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_.\n    JSON_BODY_AS_BYTES = 0;\n\n    // Each message will be written as JSON. Any :ref:`body <envoy_api_msg_data.tap.v2alpha.Body>`\n    // data will be present in the :ref:`as_string\n    // <envoy_api_field_data.tap.v2alpha.Body.as_string>` field. This means that body data will be\n    // string encoded as per the `proto3 JSON mappings\n    // <https://developers.google.com/protocol-buffers/docs/proto3#json>`_. This format type is\n    // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the\n    // user wishes to view it directly without being forced to base64 decode the body.\n    JSON_BODY_AS_STRING = 1;\n\n    // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes\n    // multiple binary messages without any length information the data stream will not be\n    // useful. However, for certain sinks that are self-delimiting (e.g., one message per file)\n    // this output format makes consumption simpler.\n    PROTO_BINARY = 2;\n\n    // Messages are written as a sequence tuples, where each tuple is the message length encoded\n    // as a `protobuf 32-bit varint\n    // <https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.coded_stream>`_\n    // followed by the binary message. The messages can be read back using the language specific\n    // protobuf coded stream implementation to obtain the message length and the message.\n    PROTO_BINARY_LENGTH_DELIMITED = 3;\n\n    // Text proto format.\n    PROTO_TEXT = 4;\n  }\n\n  // Sink output format.\n  Format format = 1 [(validate.rules).enum = {defined_only: true}];\n\n  oneof output_sink_type {\n    option (validate.required) = true;\n\n    // Tap output will be streamed out the :http:post:`/tap` admin endpoint.\n    //\n    // .. attention::\n    //\n    //   It is only allowed to specify the streaming admin output sink if the tap is being\n    //   configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has\n    //   been configured to receive tap configuration from some other source (e.g., static\n    //   file, XDS, etc.) configuring the streaming admin output type will fail.\n    StreamingAdminSink streaming_admin = 2;\n\n    // Tap output will be written to a file per tap sink.\n    FilePerTapSink file_per_tap = 3;\n\n    // [#not-implemented-hide:]\n    // GrpcService to stream data to. The format argument must be PROTO_BINARY.\n    StreamingGrpcSink streaming_grpc = 4;\n  }\n}\n\n// Streaming admin sink configuration.\nmessage StreamingAdminSink {\n}\n\n// The file per tap sink outputs a discrete file for every tapped stream.\nmessage FilePerTapSink {\n  // Path prefix. The output file will be of the form <path_prefix>_<id>.pb, where <id> is an\n  // identifier distinguishing the recorded trace for stream instances (the Envoy\n  // connection ID, HTTP stream ID, etc.).\n  string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}];\n}\n\n// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC\n// server.\nmessage StreamingGrpcSink {\n  // Opaque identifier, that will be sent back to the streaming grpc server.\n  string tap_id = 1;\n\n  // The gRPC server that hosts the Tap Sink Service.\n  api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/tap/v2alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v2alpha;\n\nimport \"envoy/api/v2/core/base.proto\";\nimport \"envoy/data/tap/v2alpha/wrapper.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v2alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Tap Sink Service]\n\n// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call\n// StreamTaps to deliver captured taps to the server\nservice TapSinkService {\n  // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect.\n  rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server\n// and stream taps without ever expecting a response.\nmessage StreamTapsRequest {\n  message Identifier {\n    // The node sending taps over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The opaque identifier that was set in the :ref:`output config\n    // <envoy_api_field_service.tap.v2alpha.StreamingGrpcSink.tap_id>`.\n    string tap_id = 2;\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // The trace id. this can be used to merge together a streaming trace. Note that the trace_id\n  // is not guaranteed to be spatially or temporally unique.\n  uint64 trace_id = 2;\n\n  // The trace data.\n  data.tap.v2alpha.TraceWrapper trace = 3;\n}\n\n// [#not-implemented-hide:]\nmessage StreamTapsResponse {\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/tap/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/data/tap/v3:pkg\",\n        \"//envoy/service/tap/v2alpha:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/tap/v3/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/data/tap/v3/wrapper.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v3\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Tap Sink Service]\n\n// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call\n// StreamTaps to deliver captured taps to the server\nservice TapSinkService {\n  // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect.\n  rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server\n// and stream taps without ever expecting a response.\nmessage StreamTapsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamTapsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.tap.v2alpha.StreamTapsRequest.Identifier\";\n\n    // The node sending taps over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The opaque identifier that was set in the :ref:`output config\n    // <envoy_api_field_config.tap.v3.StreamingGrpcSink.tap_id>`.\n    string tap_id = 2;\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // The trace id. this can be used to merge together a streaming trace. Note that the trace_id\n  // is not guaranteed to be spatially or temporally unique.\n  uint64 trace_id = 2;\n\n  // The trace data.\n  data.tap.v3.TraceWrapper trace = 3;\n}\n\n// [#not-implemented-hide:]\nmessage StreamTapsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v2alpha.StreamTapsResponse\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/tap/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/data/tap/v3:pkg\",\n        \"//envoy/service/tap/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/tap/v4alpha/tap.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.tap.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\nimport \"envoy/data/tap/v3/wrapper.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.tap.v4alpha\";\noption java_outer_classname = \"TapProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Tap Sink Service]\n\n// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call\n// StreamTaps to deliver captured taps to the server\nservice TapSinkService {\n  // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any\n  // response to be sent as nothing would be done in the case of failure. The server should\n  // disconnect if it expects Envoy to reconnect.\n  rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) {\n  }\n}\n\n// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server\n// and stream taps without ever expecting a response.\nmessage StreamTapsRequest {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v3.StreamTapsRequest\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.tap.v3.StreamTapsRequest.Identifier\";\n\n    // The node sending taps over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n\n    // The opaque identifier that was set in the :ref:`output config\n    // <envoy_api_field_config.tap.v4alpha.StreamingGrpcSink.tap_id>`.\n    string tap_id = 2;\n  }\n\n  // Identifier data effectively is a structured metadata. As a performance optimization this will\n  // only be sent in the first message on the stream.\n  Identifier identifier = 1;\n\n  // The trace id. this can be used to merge together a streaming trace. Note that the trace_id\n  // is not guaranteed to be spatially or temporally unique.\n  uint64 trace_id = 2;\n\n  // The trace data.\n  data.tap.v3.TraceWrapper trace = 3;\n}\n\n// [#not-implemented-hide:]\nmessage StreamTapsResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.tap.v3.StreamTapsResponse\";\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/trace/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/api/v2/core:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/trace/v2/trace_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.trace.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"opencensus/proto/trace/v1/trace.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.trace.v2\";\noption java_outer_classname = \"TraceServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Trace service]\n\n// Service for streaming traces to server that consumes the trace data. It\n// uses OpenCensus data model as a standard to represent trace information.\nservice TraceService {\n  // Envoy will connect and send StreamTracesMessage messages forever. It does\n  // not expect any response to be sent as nothing would be done in the case\n  // of failure.\n  rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) {\n  }\n}\n\nmessage StreamTracesResponse {\n}\n\nmessage StreamTracesMessage {\n  message Identifier {\n    // The node sending the access log messages over the stream.\n    api.v2.core.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata.\n  // As a performance optimization this will only be sent in the first message\n  // on the stream.\n  Identifier identifier = 1;\n\n  // A list of Span entries\n  repeated opencensus.proto.trace.v1.Span spans = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/trace/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v3:pkg\",\n        \"//envoy/service/trace/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/trace/v3/trace_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.trace.v3;\n\nimport \"envoy/config/core/v3/base.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"opencensus/proto/trace/v1/trace.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.trace.v3\";\noption java_outer_classname = \"TraceServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Trace service]\n\n// Service for streaming traces to server that consumes the trace data. It\n// uses OpenCensus data model as a standard to represent trace information.\nservice TraceService {\n  // Envoy will connect and send StreamTracesMessage messages forever. It does\n  // not expect any response to be sent as nothing would be done in the case\n  // of failure.\n  rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) {\n  }\n}\n\nmessage StreamTracesResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v2.StreamTracesResponse\";\n}\n\nmessage StreamTracesMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v2.StreamTracesMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.trace.v2.StreamTracesMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v3.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata.\n  // As a performance optimization this will only be sent in the first message\n  // on the stream.\n  Identifier identifier = 1;\n\n  // A list of Span entries\n  repeated opencensus.proto.trace.v1.Span spans = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/trace/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    has_services = True,\n    deps = [\n        \"//envoy/config/core/v4alpha:pkg\",\n        \"//envoy/service/trace/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@opencensus_proto//opencensus/proto/trace/v1:trace_proto\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/service/trace/v4alpha/trace_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.service.trace.v4alpha;\n\nimport \"envoy/config/core/v4alpha/base.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"opencensus/proto/trace/v1/trace.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.service.trace.v4alpha\";\noption java_outer_classname = \"TraceServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Trace service]\n\n// Service for streaming traces to server that consumes the trace data. It\n// uses OpenCensus data model as a standard to represent trace information.\nservice TraceService {\n  // Envoy will connect and send StreamTracesMessage messages forever. It does\n  // not expect any response to be sent as nothing would be done in the case\n  // of failure.\n  rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) {\n  }\n}\n\nmessage StreamTracesResponse {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v3.StreamTracesResponse\";\n}\n\nmessage StreamTracesMessage {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.service.trace.v3.StreamTracesMessage\";\n\n  message Identifier {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.service.trace.v3.StreamTracesMessage.Identifier\";\n\n    // The node sending the access log messages over the stream.\n    config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // Identifier data effectively is a structured metadata.\n  // As a performance optimization this will only be sent in the first message\n  // on the stream.\n  Identifier identifier = 1;\n\n  // A list of Span entries\n  repeated opencensus.proto.trace.v1.Span spans = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/hash_policy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"HashPolicyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Hash Policy]\n\n// Specifies the hash policy\nmessage HashPolicy {\n  // The source IP will be used to compute the hash used by hash-based load balancing\n  // algorithms.\n  message SourceIp {\n  }\n\n  oneof policy_specifier {\n    option (validate.required) = true;\n\n    SourceIp source_ip = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP]\n\nenum CodecClientType {\n  HTTP1 = 0;\n\n  HTTP2 = 1;\n\n  // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n  // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n  // to distinguish HTTP1 and HTTP2 traffic.\n  HTTP3 = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/http_status.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"HttpStatusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: HTTP status codes]\n\n// HTTP response codes supported in Envoy.\n// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml\nenum StatusCode {\n  // Empty - This code not part of the HTTP status code specification, but it is needed for proto\n  // `enum` type.\n  Empty = 0;\n\n  Continue = 100;\n\n  OK = 200;\n\n  Created = 201;\n\n  Accepted = 202;\n\n  NonAuthoritativeInformation = 203;\n\n  NoContent = 204;\n\n  ResetContent = 205;\n\n  PartialContent = 206;\n\n  MultiStatus = 207;\n\n  AlreadyReported = 208;\n\n  IMUsed = 226;\n\n  MultipleChoices = 300;\n\n  MovedPermanently = 301;\n\n  Found = 302;\n\n  SeeOther = 303;\n\n  NotModified = 304;\n\n  UseProxy = 305;\n\n  TemporaryRedirect = 307;\n\n  PermanentRedirect = 308;\n\n  BadRequest = 400;\n\n  Unauthorized = 401;\n\n  PaymentRequired = 402;\n\n  Forbidden = 403;\n\n  NotFound = 404;\n\n  MethodNotAllowed = 405;\n\n  NotAcceptable = 406;\n\n  ProxyAuthenticationRequired = 407;\n\n  RequestTimeout = 408;\n\n  Conflict = 409;\n\n  Gone = 410;\n\n  LengthRequired = 411;\n\n  PreconditionFailed = 412;\n\n  PayloadTooLarge = 413;\n\n  URITooLong = 414;\n\n  UnsupportedMediaType = 415;\n\n  RangeNotSatisfiable = 416;\n\n  ExpectationFailed = 417;\n\n  MisdirectedRequest = 421;\n\n  UnprocessableEntity = 422;\n\n  Locked = 423;\n\n  FailedDependency = 424;\n\n  UpgradeRequired = 426;\n\n  PreconditionRequired = 428;\n\n  TooManyRequests = 429;\n\n  RequestHeaderFieldsTooLarge = 431;\n\n  InternalServerError = 500;\n\n  NotImplemented = 501;\n\n  BadGateway = 502;\n\n  ServiceUnavailable = 503;\n\n  GatewayTimeout = 504;\n\n  HTTPVersionNotSupported = 505;\n\n  VariantAlsoNegotiates = 506;\n\n  InsufficientStorage = 507;\n\n  LoopDetected = 508;\n\n  NotExtended = 510;\n\n  NetworkAuthenticationRequired = 511;\n}\n\n// HTTP status.\nmessage HttpStatus {\n  // Supplies HTTP response code.\n  StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metadata matcher]\n\n// MetadataMatcher provides a general interface to check if a given value is matched in\n// :ref:`Metadata <envoy_api_msg_core.Metadata>`. It uses `filter` and `path` to retrieve the value\n// from the Metadata and then check if it's matched to the specified value.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.filters.http.rbac:\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following MetadataMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to\n// enforce access control based on dynamic metadata in a request. See :ref:`Permission\n// <envoy_api_msg_config.rbac.v2.Permission>` and :ref:`Principal\n// <envoy_api_msg_config.rbac.v2.Principal>`.\n\n// [#next-major-version: MetadataMatcher should use StructMatcher]\nmessage MetadataMatcher {\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that\n  // if the segment key refers to a list, it has to be the last segment in a path.\n  message PathSegment {\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The filter name to retrieve the Struct from the Metadata.\n  string filter = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The MetadataMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/node.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/string.proto\";\nimport \"envoy/type/matcher/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"NodeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Node matcher]\n\n// Specifies the way to match a Node.\n// The match follows AND semantics.\nmessage NodeMatcher {\n  // Specifies match criteria on the node id.\n  StringMatcher node_id = 1;\n\n  // Specifies match criteria on the node metadata.\n  repeated StructMatcher node_metadatas = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/number.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"NumberProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Number matcher]\n\n// Specifies the way to match a double value.\nmessage DoubleMatcher {\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, the input double value must be in the range specified here.\n    // Note: The range is using half-open interval semantics [start, end).\n    DoubleRange range = 1;\n\n    // If specified, the input double value must be equal to the value specified here.\n    double exact = 2;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/path.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"PathProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Path matcher]\n\n// Specifies the way to match a path on HTTP request.\nmessage PathMatcher {\n  oneof rule {\n    option (validate.required) = true;\n\n    // The `path` must match the URL path portion of the :path header. The query and fragment\n    // string (if present) are removed in the URL path portion.\n    // For example, the path */data* will match the *:path* header */data#fragment?param=value*.\n    StringMatcher path = 1 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/regex.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"RegexProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Regex matcher]\n\n// A regex matcher designed for safety when used with untrusted input.\nmessage RegexMatcher {\n  // Google's `RE2 <https://github.com/google/re2>`_ regex engine. The regex string must adhere to\n  // the documented `syntax <https://github.com/google/re2/wiki/Syntax>`_. The engine is designed\n  // to complete execution in linear time as well as limit the amount of memory used.\n  //\n  // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level`\n  // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or\n  // complexity that a compiled regex can have before an exception is thrown or a warning is\n  // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and\n  // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning).\n  //\n  // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`,\n  // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented\n  // each time the program size exceeds the warn level threshold.\n  message GoogleRE2 {\n    // This field controls the RE2 \"program size\" which is a rough estimate of how complex a\n    // compiled regex is to evaluate. A regex that has a program size greater than the configured\n    // value will fail to compile. In this case, the configured max program size can be increased\n    // or the regex can be simplified. If not specified, the default is 100.\n    //\n    // This field is deprecated; regexp validation should be performed on the management server\n    // instead of being done by each individual client.\n    google.protobuf.UInt32Value max_program_size = 1 [deprecated = true];\n  }\n\n  oneof engine_type {\n    option (validate.required) = true;\n\n    // Google's RE2 regex engine.\n    GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // The regex match string. The string must be supported by the configured engine.\n  string regex = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Describes how to match a string and then produce a new string using a regular\n// expression and a substitution string.\nmessage RegexMatchAndSubstitute {\n  // The regular expression used to find portions of a string (hereafter called\n  // the \"subject string\") that should be replaced. When a new string is\n  // produced during the substitution operation, the new string is initially\n  // the same as the subject string, but then all matches in the subject string\n  // are replaced by the substitution string. If replacing all matches isn't\n  // desired, regular expression anchors can be used to ensure a single match,\n  // so as to replace just one occurrence of a pattern. Capture groups can be\n  // used in the pattern to extract portions of the subject string, and then\n  // referenced in the substitution string.\n  RegexMatcher pattern = 1;\n\n  // The string that should be substituted into matching portions of the\n  // subject string during a substitution operation to produce a new string.\n  // Capture groups in the pattern can be referenced in the substitution\n  // string. Note, however, that the syntax for referring to capture groups is\n  // defined by the chosen regular expression engine. Google's `RE2\n  // <https://github.com/google/re2>`_ regular expression engine uses a\n  // backslash followed by the capture group number to denote a numbered\n  // capture group. E.g., ``\\1`` refers to capture group 1, and ``\\2`` refers\n  // to capture group 2.\n  string substitution = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/regex.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"StringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: String matcher]\n\n// Specifies the way to match a string.\n// [#next-free-field: 7]\nmessage StringMatcher {\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // The input string must match exactly the string specified here.\n    //\n    // Examples:\n    //\n    // * *abc* only matches the value *abc*.\n    string exact = 1;\n\n    // The input string must have the prefix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *abc.xyz*\n    string prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must have the suffix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc*\n    string suffix = 3 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must match the regular expression specified here.\n    // The regex grammar is defined `here\n    // <https://en.cppreference.com/w/cpp/regex/ecmascript>`_.\n    //\n    // Examples:\n    //\n    // * The regex ``\\d{3}`` matches the value *123*\n    // * The regex ``\\d{3}`` does not match the value *1234*\n    // * The regex ``\\d{3}`` does not match the value *123.456*\n    //\n    // .. attention::\n    //   This field has been deprecated in favor of `safe_regex` as it is not safe for use with\n    //   untrusted input in all cases.\n    string regex = 4 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n\n    // The input string must match the regular expression specified here.\n    RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];\n  }\n\n  // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no\n  // effect for the safe_regex match.\n  // For example, the matcher *data* will match both input string *Data* and *data* if set to true.\n  bool ignore_case = 6;\n}\n\n// Specifies a list of ways to match a string.\nmessage ListStringMatcher {\n  repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/struct.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"StructProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Struct matcher]\n\n// StructMatcher provides a general interface to check if a given value is matched in\n// google.protobuf.Struct. It uses `path` to retrieve the value\n// from the struct and then check if it's matched to the specified value.\n//\n// For example, for the following Struct:\n//\n// .. code-block:: yaml\n//\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following StructMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of StructMatcher is to match metadata in envoy.v*.core.Node.\nmessage StructMatcher {\n  // Specifies the segment in a path to retrieve value from Struct.\n  message PathSegment {\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The StructMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type/matcher:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metadata matcher]\n\n// MetadataMatcher provides a general interface to check if a given value is matched in\n// :ref:`Metadata <envoy_api_msg_config.core.v3.Metadata>`. It uses `filter` and `path` to retrieve the value\n// from the Metadata and then check if it's matched to the specified value.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.filters.http.rbac:\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following MetadataMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to\n// enforce access control based on dynamic metadata in a request. See :ref:`Permission\n// <envoy_api_msg_config.rbac.v3.Permission>` and :ref:`Principal\n// <envoy_api_msg_config.rbac.v3.Principal>`.\n\n// [#next-major-version: MetadataMatcher should use StructMatcher]\nmessage MetadataMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.MetadataMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that\n  // if the segment key refers to a list, it has to be the last segment in a path.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.MetadataMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The filter name to retrieve the Struct from the Metadata.\n  string filter = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The MetadataMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/node.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/string.proto\";\nimport \"envoy/type/matcher/v3/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"NodeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Node matcher]\n\n// Specifies the way to match a Node.\n// The match follows AND semantics.\nmessage NodeMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.NodeMatcher\";\n\n  // Specifies match criteria on the node id.\n  StringMatcher node_id = 1;\n\n  // Specifies match criteria on the node metadata.\n  repeated StructMatcher node_metadatas = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/number.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"NumberProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Number matcher]\n\n// Specifies the way to match a double value.\nmessage DoubleMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.DoubleMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, the input double value must be in the range specified here.\n    // Note: The range is using half-open interval semantics [start, end).\n    type.v3.DoubleRange range = 1;\n\n    // If specified, the input double value must be equal to the value specified here.\n    double exact = 2;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/path.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"PathProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Path matcher]\n\n// Specifies the way to match a path on HTTP request.\nmessage PathMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.PathMatcher\";\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // The `path` must match the URL path portion of the :path header. The query and fragment\n    // string (if present) are removed in the URL path portion.\n    // For example, the path */data* will match the *:path* header */data#fragment?param=value*.\n    StringMatcher path = 1 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/regex.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"RegexProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Regex matcher]\n\n// A regex matcher designed for safety when used with untrusted input.\nmessage RegexMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.RegexMatcher\";\n\n  // Google's `RE2 <https://github.com/google/re2>`_ regex engine. The regex string must adhere to\n  // the documented `syntax <https://github.com/google/re2/wiki/Syntax>`_. The engine is designed\n  // to complete execution in linear time as well as limit the amount of memory used.\n  //\n  // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level`\n  // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or\n  // complexity that a compiled regex can have before an exception is thrown or a warning is\n  // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and\n  // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning).\n  //\n  // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`,\n  // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented\n  // each time the program size exceeds the warn level threshold.\n  message GoogleRE2 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.RegexMatcher.GoogleRE2\";\n\n    // This field controls the RE2 \"program size\" which is a rough estimate of how complex a\n    // compiled regex is to evaluate. A regex that has a program size greater than the configured\n    // value will fail to compile. In this case, the configured max program size can be increased\n    // or the regex can be simplified. If not specified, the default is 100.\n    //\n    // This field is deprecated; regexp validation should be performed on the management server\n    // instead of being done by each individual client.\n    google.protobuf.UInt32Value max_program_size = 1 [deprecated = true];\n  }\n\n  oneof engine_type {\n    option (validate.required) = true;\n\n    // Google's RE2 regex engine.\n    GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // The regex match string. The string must be supported by the configured engine.\n  string regex = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Describes how to match a string and then produce a new string using a regular\n// expression and a substitution string.\nmessage RegexMatchAndSubstitute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.RegexMatchAndSubstitute\";\n\n  // The regular expression used to find portions of a string (hereafter called\n  // the \"subject string\") that should be replaced. When a new string is\n  // produced during the substitution operation, the new string is initially\n  // the same as the subject string, but then all matches in the subject string\n  // are replaced by the substitution string. If replacing all matches isn't\n  // desired, regular expression anchors can be used to ensure a single match,\n  // so as to replace just one occurrence of a pattern. Capture groups can be\n  // used in the pattern to extract portions of the subject string, and then\n  // referenced in the substitution string.\n  RegexMatcher pattern = 1 [(validate.rules).message = {required: true}];\n\n  // The string that should be substituted into matching portions of the\n  // subject string during a substitution operation to produce a new string.\n  // Capture groups in the pattern can be referenced in the substitution\n  // string. Note, however, that the syntax for referring to capture groups is\n  // defined by the chosen regular expression engine. Google's `RE2\n  // <https://github.com/google/re2>`_ regular expression engine uses a\n  // backslash followed by the capture group number to denote a numbered\n  // capture group. E.g., ``\\1`` refers to capture group 1, and ``\\2`` refers\n  // to capture group 2.\n  string substitution = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/regex.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"StringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: String matcher]\n\n// Specifies the way to match a string.\n// [#next-free-field: 8]\nmessage StringMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.StringMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // The input string must match exactly the string specified here.\n    //\n    // Examples:\n    //\n    // * *abc* only matches the value *abc*.\n    string exact = 1;\n\n    // The input string must have the prefix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *abc.xyz*\n    string prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must have the suffix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc*\n    string suffix = 3 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must match the regular expression specified here.\n    RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];\n\n    // The input string must have the substring specified here.\n    // Note: empty contains match is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc.def*\n    string contains = 7 [(validate.rules).string = {min_len: 1}];\n\n    string hidden_envoy_deprecated_regex = 4 [\n      deprecated = true,\n      (validate.rules).string = {max_bytes: 1024},\n      (envoy.annotations.disallowed_by_default) = true\n    ];\n  }\n\n  // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no\n  // effect for the safe_regex match.\n  // For example, the matcher *data* will match both input string *Data* and *data* if set to true.\n  bool ignore_case = 6;\n}\n\n// Specifies a list of ways to match a string.\nmessage ListStringMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.ListStringMatcher\";\n\n  repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/struct.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"StructProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Struct matcher]\n\n// StructMatcher provides a general interface to check if a given value is matched in\n// google.protobuf.Struct. It uses `path` to retrieve the value\n// from the struct and then check if it's matched to the specified value.\n//\n// For example, for the following Struct:\n//\n// .. code-block:: yaml\n//\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following StructMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of StructMatcher is to match metadata in envoy.v*.core.Node.\nmessage StructMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.StructMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Struct.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.StructMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The StructMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v3/value.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v3;\n\nimport \"envoy/type/matcher/v3/number.proto\";\nimport \"envoy/type/matcher/v3/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v3\";\noption java_outer_classname = \"ValueProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Value matcher]\n\n// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported.\n// StructValue is not supported and is always not matched.\n// [#next-free-field: 7]\nmessage ValueMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.ValueMatcher\";\n\n  // NullMatch is an empty message to specify a null value.\n  message NullMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.ValueMatcher.NullMatch\";\n  }\n\n  // Specifies how to match a value.\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, a match occurs if and only if the target value is a NullValue.\n    NullMatch null_match = 1;\n\n    // If specified, a match occurs if and only if the target value is a double value and is\n    // matched to this field.\n    DoubleMatcher double_match = 2;\n\n    // If specified, a match occurs if and only if the target value is a string value and is\n    // matched to this field.\n    StringMatcher string_match = 3;\n\n    // If specified, a match occurs if and only if the target value is a bool value and is equal\n    // to this field.\n    bool bool_match = 4;\n\n    // If specified, value match will be performed based on whether the path is referring to a\n    // valid primitive value in the metadata. If the path is referring to a non-primitive value,\n    // the result is always not matched.\n    bool present_match = 5;\n\n    // If specified, a match occurs if and only if the target value is a list value and\n    // is matched to this field.\n    ListMatcher list_match = 6;\n  }\n}\n\n// Specifies the way to match a list value.\nmessage ListMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.ListMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, at least one of the values in the list must match the value specified.\n    ValueMatcher one_of = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/annotations:pkg\",\n        \"//envoy/type/matcher/v3:pkg\",\n        \"//envoy/type/v3:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Metadata matcher]\n\n// MetadataMatcher provides a general interface to check if a given value is matched in\n// :ref:`Metadata <envoy_api_msg_config.core.v4alpha.Metadata>`. It uses `filter` and `path` to retrieve the value\n// from the Metadata and then check if it's matched to the specified value.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.filters.http.rbac:\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following MetadataMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    filter: envoy.filters.http.rbac\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to\n// enforce access control based on dynamic metadata in a request. See :ref:`Permission\n// <envoy_api_msg_config.rbac.v4alpha.Permission>` and :ref:`Principal\n// <envoy_api_msg_config.rbac.v4alpha.Principal>`.\n\n// [#next-major-version: MetadataMatcher should use StructMatcher]\nmessage MetadataMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.MetadataMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that\n  // if the segment key refers to a list, it has to be the last segment in a path.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.MetadataMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The filter name to retrieve the Struct from the Metadata.\n  string filter = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The MetadataMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/node.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/string.proto\";\nimport \"envoy/type/matcher/v4alpha/struct.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"NodeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Node matcher]\n\n// Specifies the way to match a Node.\n// The match follows AND semantics.\nmessage NodeMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.NodeMatcher\";\n\n  // Specifies match criteria on the node id.\n  StringMatcher node_id = 1;\n\n  // Specifies match criteria on the node metadata.\n  repeated StructMatcher node_metadatas = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/number.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/v3/range.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"NumberProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Number matcher]\n\n// Specifies the way to match a double value.\nmessage DoubleMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.DoubleMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, the input double value must be in the range specified here.\n    // Note: The range is using half-open interval semantics [start, end).\n    v3.DoubleRange range = 1;\n\n    // If specified, the input double value must be equal to the value specified here.\n    double exact = 2;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/path.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"PathProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Path matcher]\n\n// Specifies the way to match a path on HTTP request.\nmessage PathMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.PathMatcher\";\n\n  oneof rule {\n    option (validate.required) = true;\n\n    // The `path` must match the URL path portion of the :path header. The query and fragment\n    // string (if present) are removed in the URL path portion.\n    // For example, the path */data* will match the *:path* header */data#fragment?param=value*.\n    StringMatcher path = 1 [(validate.rules).message = {required: true}];\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"RegexProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Regex matcher]\n\n// A regex matcher designed for safety when used with untrusted input.\nmessage RegexMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.RegexMatcher\";\n\n  // Google's `RE2 <https://github.com/google/re2>`_ regex engine. The regex string must adhere to\n  // the documented `syntax <https://github.com/google/re2/wiki/Syntax>`_. The engine is designed\n  // to complete execution in linear time as well as limit the amount of memory used.\n  //\n  // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level`\n  // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or\n  // complexity that a compiled regex can have before an exception is thrown or a warning is\n  // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and\n  // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning).\n  //\n  // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`,\n  // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented\n  // each time the program size exceeds the warn level threshold.\n  message GoogleRE2 {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.RegexMatcher.GoogleRE2\";\n\n    // This field controls the RE2 \"program size\" which is a rough estimate of how complex a\n    // compiled regex is to evaluate. A regex that has a program size greater than the configured\n    // value will fail to compile. In this case, the configured max program size can be increased\n    // or the regex can be simplified. If not specified, the default is 100.\n    //\n    // This field is deprecated; regexp validation should be performed on the management server\n    // instead of being done by each individual client.\n    google.protobuf.UInt32Value hidden_envoy_deprecated_max_program_size = 1 [deprecated = true];\n  }\n\n  oneof engine_type {\n    option (validate.required) = true;\n\n    // Google's RE2 regex engine.\n    GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}];\n  }\n\n  // The regex match string. The string must be supported by the configured engine.\n  string regex = 2 [(validate.rules).string = {min_len: 1}];\n}\n\n// Describes how to match a string and then produce a new string using a regular\n// expression and a substitution string.\nmessage RegexMatchAndSubstitute {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.RegexMatchAndSubstitute\";\n\n  // The regular expression used to find portions of a string (hereafter called\n  // the \"subject string\") that should be replaced. When a new string is\n  // produced during the substitution operation, the new string is initially\n  // the same as the subject string, but then all matches in the subject string\n  // are replaced by the substitution string. If replacing all matches isn't\n  // desired, regular expression anchors can be used to ensure a single match,\n  // so as to replace just one occurrence of a pattern. Capture groups can be\n  // used in the pattern to extract portions of the subject string, and then\n  // referenced in the substitution string.\n  RegexMatcher pattern = 1 [(validate.rules).message = {required: true}];\n\n  // The string that should be substituted into matching portions of the\n  // subject string during a substitution operation to produce a new string.\n  // Capture groups in the pattern can be referenced in the substitution\n  // string. Note, however, that the syntax for referring to capture groups is\n  // defined by the chosen regular expression engine. Google's `RE2\n  // <https://github.com/google/re2>`_ regular expression engine uses a\n  // backslash followed by the capture group number to denote a numbered\n  // capture group. E.g., ``\\1`` refers to capture group 1, and ``\\2`` refers\n  // to capture group 2.\n  string substitution = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/string.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/regex.proto\";\n\nimport \"envoy/annotations/deprecation.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"StringProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: String matcher]\n\n// Specifies the way to match a string.\n// [#next-free-field: 8]\nmessage StringMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.StringMatcher\";\n\n  reserved 4;\n\n  reserved \"regex\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // The input string must match exactly the string specified here.\n    //\n    // Examples:\n    //\n    // * *abc* only matches the value *abc*.\n    string exact = 1;\n\n    // The input string must have the prefix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *abc.xyz*\n    string prefix = 2 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must have the suffix specified here.\n    // Note: empty prefix is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc*\n    string suffix = 3 [(validate.rules).string = {min_len: 1}];\n\n    // The input string must match the regular expression specified here.\n    RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];\n\n    // The input string must have the substring specified here.\n    // Note: empty contains match is not allowed, please use regex instead.\n    //\n    // Examples:\n    //\n    // * *abc* matches the value *xyz.abc.def*\n    string contains = 7 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no\n  // effect for the safe_regex match.\n  // For example, the matcher *data* will match both input string *Data* and *data* if set to true.\n  bool ignore_case = 6;\n}\n\n// Specifies a list of ways to match a string.\nmessage ListStringMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.ListStringMatcher\";\n\n  repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/value.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"StructProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Struct matcher]\n\n// StructMatcher provides a general interface to check if a given value is matched in\n// google.protobuf.Struct. It uses `path` to retrieve the value\n// from the struct and then check if it's matched to the specified value.\n//\n// For example, for the following Struct:\n//\n// .. code-block:: yaml\n//\n//        fields:\n//          a:\n//            struct_value:\n//              fields:\n//                b:\n//                  struct_value:\n//                    fields:\n//                      c:\n//                        string_value: pro\n//                t:\n//                  list_value:\n//                    values:\n//                      - string_value: m\n//                      - string_value: n\n//\n// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value \"pro\"\n// from the Metadata which is matched to the specified prefix match.\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: b\n//    - key: c\n//    value:\n//      string_match:\n//        prefix: pr\n//\n// The following StructMatcher is matched as the code will match one of the string values in the\n// list at the path [a, t].\n//\n// .. code-block:: yaml\n//\n//    path:\n//    - key: a\n//    - key: t\n//    value:\n//      list_match:\n//        one_of:\n//          string_match:\n//            exact: m\n//\n// An example use of StructMatcher is to match metadata in envoy.v*.core.Node.\nmessage StructMatcher {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.matcher.v3.StructMatcher\";\n\n  // Specifies the segment in a path to retrieve value from Struct.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.StructMatcher.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The path to retrieve the Value from the Struct.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n\n  // The StructMatcher is matched if the value retrieved by path is matched to this value.\n  ValueMatcher value = 3 [(validate.rules).message = {required: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/v4alpha/value.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher.v4alpha;\n\nimport \"envoy/type/matcher/v4alpha/number.proto\";\nimport \"envoy/type/matcher/v4alpha/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher.v4alpha\";\noption java_outer_classname = \"ValueProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// [#protodoc-title: Value matcher]\n\n// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported.\n// StructValue is not supported and is always not matched.\n// [#next-free-field: 7]\nmessage ValueMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.ValueMatcher\";\n\n  // NullMatch is an empty message to specify a null value.\n  message NullMatch {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.matcher.v3.ValueMatcher.NullMatch\";\n  }\n\n  // Specifies how to match a value.\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, a match occurs if and only if the target value is a NullValue.\n    NullMatch null_match = 1;\n\n    // If specified, a match occurs if and only if the target value is a double value and is\n    // matched to this field.\n    DoubleMatcher double_match = 2;\n\n    // If specified, a match occurs if and only if the target value is a string value and is\n    // matched to this field.\n    StringMatcher string_match = 3;\n\n    // If specified, a match occurs if and only if the target value is a bool value and is equal\n    // to this field.\n    bool bool_match = 4;\n\n    // If specified, value match will be performed based on whether the path is referring to a\n    // valid primitive value in the metadata. If the path is referring to a non-primitive value,\n    // the result is always not matched.\n    bool present_match = 5;\n\n    // If specified, a match occurs if and only if the target value is a list value and\n    // is matched to this field.\n    ListMatcher list_match = 6;\n  }\n}\n\n// Specifies the way to match a list value.\nmessage ListMatcher {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.matcher.v3.ListMatcher\";\n\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, at least one of the values in the list must match the value specified.\n    ValueMatcher one_of = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/matcher/value.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.matcher;\n\nimport \"envoy/type/matcher/number.proto\";\nimport \"envoy/type/matcher/string.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.matcher\";\noption java_outer_classname = \"ValueProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Value matcher]\n\n// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported.\n// StructValue is not supported and is always not matched.\n// [#next-free-field: 7]\nmessage ValueMatcher {\n  // NullMatch is an empty message to specify a null value.\n  message NullMatch {\n  }\n\n  // Specifies how to match a value.\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, a match occurs if and only if the target value is a NullValue.\n    NullMatch null_match = 1;\n\n    // If specified, a match occurs if and only if the target value is a double value and is\n    // matched to this field.\n    DoubleMatcher double_match = 2;\n\n    // If specified, a match occurs if and only if the target value is a string value and is\n    // matched to this field.\n    StringMatcher string_match = 3;\n\n    // If specified, a match occurs if and only if the target value is a bool value and is equal\n    // to this field.\n    bool bool_match = 4;\n\n    // If specified, value match will be performed based on whether the path is referring to a\n    // valid primitive value in the metadata. If the path is referring to a non-primitive value,\n    // the result is always not matched.\n    bool present_match = 5;\n\n    // If specified, a match occurs if and only if the target value is a list value and\n    // is matched to this field.\n    ListMatcher list_match = 6;\n  }\n}\n\n// Specifies the way to match a list value.\nmessage ListMatcher {\n  oneof match_pattern {\n    option (validate.required) = true;\n\n    // If specified, at least one of the values in the list must match the value specified.\n    ValueMatcher one_of = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/metadata/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/metadata/v2/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.metadata.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.metadata.v2\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.type.metadata.v3\";\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Metadata]\n\n// MetadataKey provides a general interface using `key` and `path` to retrieve value from\n// :ref:`Metadata <envoy_api_msg_core.Metadata>`.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.xxx:\n//        prop:\n//          foo: bar\n//          xyz:\n//            hello: envoy\n//\n// The following MetadataKey will retrieve a string value \"bar\" from the Metadata.\n//\n// .. code-block:: yaml\n//\n//    key: envoy.xxx\n//    path:\n//    - key: prop\n//    - key: foo\n//\nmessage MetadataKey {\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Currently it is only supported to specify the key, i.e. field name, as one segment of a path.\n  message PathSegment {\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_bytes: 1}];\n    }\n  }\n\n  // The key name of Metadata to retrieve the Struct from the metadata.\n  // Typically, it represents a builtin subsystem or custom extension.\n  string key = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // The path to retrieve the Value from the Struct. It can be a prefix or a full path,\n  // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example,\n  // which depends on the particular scenario.\n  //\n  // Note: Due to that only the key type segment is supported, the path can not specify a list\n  // unless the list is the last segment.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Describes what kind of metadata.\nmessage MetadataKind {\n  // Represents dynamic metadata associated with the request.\n  message Request {\n  }\n\n  // Represents metadata from :ref:`the route<envoy_api_field_route.Route.metadata>`.\n  message Route {\n  }\n\n  // Represents metadata from :ref:`the upstream cluster<envoy_api_field_Cluster.metadata>`.\n  message Cluster {\n  }\n\n  // Represents metadata from :ref:`the upstream\n  // host<envoy_api_field_endpoint.LbEndpoint.metadata>`.\n  message Host {\n  }\n\n  oneof kind {\n    option (validate.required) = true;\n\n    // Request kind of metadata.\n    Request request = 1;\n\n    // Route kind of metadata.\n    Route route = 2;\n\n    // Cluster kind of metadata.\n    Cluster cluster = 3;\n\n    // Host kind of metadata.\n    Host host = 4;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/metadata/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/metadata/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/metadata/v3/metadata.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.metadata.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.metadata.v3\";\noption java_outer_classname = \"MetadataProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Metadata]\n\n// MetadataKey provides a general interface using `key` and `path` to retrieve value from\n// :ref:`Metadata <envoy_api_msg_config.core.v3.Metadata>`.\n//\n// For example, for the following Metadata:\n//\n// .. code-block:: yaml\n//\n//    filter_metadata:\n//      envoy.xxx:\n//        prop:\n//          foo: bar\n//          xyz:\n//            hello: envoy\n//\n// The following MetadataKey will retrieve a string value \"bar\" from the Metadata.\n//\n// .. code-block:: yaml\n//\n//    key: envoy.xxx\n//    path:\n//    - key: prop\n//    - key: foo\n//\nmessage MetadataKey {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.metadata.v2.MetadataKey\";\n\n  // Specifies the segment in a path to retrieve value from Metadata.\n  // Currently it is only supported to specify the key, i.e. field name, as one segment of a path.\n  message PathSegment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKey.PathSegment\";\n\n    oneof segment {\n      option (validate.required) = true;\n\n      // If specified, use the key to retrieve the value in a Struct.\n      string key = 1 [(validate.rules).string = {min_len: 1}];\n    }\n  }\n\n  // The key name of Metadata to retrieve the Struct from the metadata.\n  // Typically, it represents a builtin subsystem or custom extension.\n  string key = 1 [(validate.rules).string = {min_len: 1}];\n\n  // The path to retrieve the Value from the Struct. It can be a prefix or a full path,\n  // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example,\n  // which depends on the particular scenario.\n  //\n  // Note: Due to that only the key type segment is supported, the path can not specify a list\n  // unless the list is the last segment.\n  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];\n}\n\n// Describes what kind of metadata.\nmessage MetadataKind {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.type.metadata.v2.MetadataKind\";\n\n  // Represents dynamic metadata associated with the request.\n  message Request {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Request\";\n  }\n\n  // Represents metadata from :ref:`the route<envoy_api_field_config.route.v3.Route.metadata>`.\n  message Route {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Route\";\n  }\n\n  // Represents metadata from :ref:`the upstream cluster<envoy_api_field_config.cluster.v3.Cluster.metadata>`.\n  message Cluster {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Cluster\";\n  }\n\n  // Represents metadata from :ref:`the upstream\n  // host<envoy_api_field_config.endpoint.v3.LbEndpoint.metadata>`.\n  message Host {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.metadata.v2.MetadataKind.Host\";\n  }\n\n  oneof kind {\n    option (validate.required) = true;\n\n    // Request kind of metadata.\n    Request request = 1;\n\n    // Route kind of metadata.\n    Route route = 2;\n\n    // Cluster kind of metadata.\n    Cluster cluster = 3;\n\n    // Host kind of metadata.\n    Host host = 4;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/percent.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"PercentProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Percent]\n\n// Identifies a percentage, in the range [0.0, 100.0].\nmessage Percent {\n  double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}];\n}\n\n// A fractional percentage is used in cases in which for performance reasons performing floating\n// point to integer conversions during randomness calculations is undesirable. The message includes\n// both a numerator and denominator that together determine the final fractional value.\n//\n// * **Example**: 1/100 = 1%.\n// * **Example**: 3/10000 = 0.03%.\nmessage FractionalPercent {\n  // Fraction percentages support several fixed denominator values.\n  enum DenominatorType {\n    // 100.\n    //\n    // **Example**: 1/100 = 1%.\n    HUNDRED = 0;\n\n    // 10,000.\n    //\n    // **Example**: 1/10000 = 0.01%.\n    TEN_THOUSAND = 1;\n\n    // 1,000,000.\n    //\n    // **Example**: 1/1000000 = 0.0001%.\n    MILLION = 2;\n  }\n\n  // Specifies the numerator. Defaults to 0.\n  uint32 numerator = 1;\n\n  // Specifies the denominator. If the denominator specified is less than the numerator, the final\n  // fractional percentage is capped at 1 (100%).\n  DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/range.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"RangeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Range]\n\n// Specifies the int64 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int64Range {\n  // start of the range (inclusive)\n  int64 start = 1;\n\n  // end of the range (exclusive)\n  int64 end = 2;\n}\n\n// Specifies the int32 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int32Range {\n  // start of the range (inclusive)\n  int32 start = 1;\n\n  // end of the range (exclusive)\n  int32 end = 2;\n}\n\n// Specifies the double start and end of the range using half-open interval semantics [start,\n// end).\nmessage DoubleRange {\n  // start of the range (inclusive)\n  double start = 1;\n\n  // end of the range (exclusive)\n  double end = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/semantic_version.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"SemanticVersionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Semantic Version]\n\n// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate\n// expected behaviors and APIs, the patch version field is used only\n// for security fixes and can be generally ignored.\nmessage SemanticVersion {\n  uint32 major_number = 1;\n\n  uint32 minor_number = 2;\n\n  uint32 patch = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/token_bucket.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type\";\noption java_outer_classname = \"TokenBucketProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Token bucket]\n\n// Configures a token bucket, typically used for rate limiting.\nmessage TokenBucket {\n  // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket\n  // initially contains.\n  uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // The number of tokens added to the bucket during each fill interval. If not specified, defaults\n  // to a single token.\n  google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}];\n\n  // The fill interval that tokens are added to the bucket. During each fill interval\n  // `tokens_per_fill` are added to the bucket. The bucket will never contain more than\n  // `max_tokens` tokens.\n  google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/tracing/v2/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/metadata/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.tracing.v2;\n\nimport \"envoy/type/metadata/v2/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.tracing.v2\";\noption java_outer_classname = \"CustomTagProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\n// [#protodoc-title: Custom Tag]\n\n// Describes custom tags for the active span.\n// [#next-free-field: 6]\nmessage CustomTag {\n  // Literal type custom tag with static value for the tag value.\n  message Literal {\n    // Static literal value to populate the tag value.\n    string value = 1 [(validate.rules).string = {min_bytes: 1}];\n  }\n\n  // Environment type custom tag with environment name and default value.\n  message Environment {\n    // Environment variable name to obtain the value to populate the tag value.\n    string name = 1 [(validate.rules).string = {min_bytes: 1}];\n\n    // When the environment variable is not found,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Header type custom tag with header name and default value.\n  message Header {\n    // Header name to obtain the value to populate the tag value.\n    string name = 1\n        [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // When the header does not exist,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Metadata type custom tag using\n  // :ref:`MetadataKey <envoy_api_msg_type.metadata.v2.MetadataKey>` to retrieve the protobuf value\n  // from :ref:`Metadata <envoy_api_msg_core.Metadata>`, and populate the tag value with\n  // `the canonical JSON <https://developers.google.com/protocol-buffers/docs/proto3#json>`_\n  // representation of it.\n  message Metadata {\n    // Specify what kind of metadata to obtain tag value from.\n    metadata.v2.MetadataKind kind = 1;\n\n    // Metadata key to define the path to retrieve the tag value.\n    metadata.v2.MetadataKey metadata_key = 2;\n\n    // When no valid metadata is found,\n    // the tag value would be populated with this default value if specified,\n    // otherwise no tag would be populated.\n    string default_value = 3;\n  }\n\n  // Used to populate the tag name.\n  string tag = 1 [(validate.rules).string = {min_bytes: 1}];\n\n  // Used to specify what kind of custom tag.\n  oneof type {\n    option (validate.required) = true;\n\n    // A literal custom tag.\n    Literal literal = 2;\n\n    // An environment custom tag.\n    Environment environment = 3;\n\n    // A request header custom tag.\n    Header request_header = 4;\n\n    // A custom tag to obtain tag value from the metadata.\n    Metadata metadata = 5;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/tracing/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type/metadata/v3:pkg\",\n        \"//envoy/type/tracing/v2:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.tracing.v3;\n\nimport \"envoy/type/metadata/v3/metadata.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.tracing.v3\";\noption java_outer_classname = \"CustomTagProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Custom Tag]\n\n// Describes custom tags for the active span.\n// [#next-free-field: 6]\nmessage CustomTag {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.tracing.v2.CustomTag\";\n\n  // Literal type custom tag with static value for the tag value.\n  message Literal {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Literal\";\n\n    // Static literal value to populate the tag value.\n    string value = 1 [(validate.rules).string = {min_len: 1}];\n  }\n\n  // Environment type custom tag with environment name and default value.\n  message Environment {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Environment\";\n\n    // Environment variable name to obtain the value to populate the tag value.\n    string name = 1 [(validate.rules).string = {min_len: 1}];\n\n    // When the environment variable is not found,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Header type custom tag with header name and default value.\n  message Header {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Header\";\n\n    // Header name to obtain the value to populate the tag value.\n    string name = 1\n        [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];\n\n    // When the header does not exist,\n    // the tag value will be populated with this default value if specified,\n    // otherwise no tag will be populated.\n    string default_value = 2;\n  }\n\n  // Metadata type custom tag using\n  // :ref:`MetadataKey <envoy_api_msg_type.metadata.v3.MetadataKey>` to retrieve the protobuf value\n  // from :ref:`Metadata <envoy_api_msg_config.core.v3.Metadata>`, and populate the tag value with\n  // `the canonical JSON <https://developers.google.com/protocol-buffers/docs/proto3#json>`_\n  // representation of it.\n  message Metadata {\n    option (udpa.annotations.versioning).previous_message_type =\n        \"envoy.type.tracing.v2.CustomTag.Metadata\";\n\n    // Specify what kind of metadata to obtain tag value from.\n    metadata.v3.MetadataKind kind = 1;\n\n    // Metadata key to define the path to retrieve the tag value.\n    metadata.v3.MetadataKey metadata_key = 2;\n\n    // When no valid metadata is found,\n    // the tag value would be populated with this default value if specified,\n    // otherwise no tag would be populated.\n    string default_value = 3;\n  }\n\n  // Used to populate the tag name.\n  string tag = 1 [(validate.rules).string = {min_len: 1}];\n\n  // Used to specify what kind of custom tag.\n  oneof type {\n    option (validate.required) = true;\n\n    // A literal custom tag.\n    Literal literal = 2;\n\n    // An environment custom tag.\n    Environment environment = 3;\n\n    // A request header custom tag.\n    Header request_header = 4;\n\n    // A custom tag to obtain tag value from the metadata.\n    Metadata metadata = 5;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/BUILD",
    "content": "# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package(\n    deps = [\n        \"//envoy/type:pkg\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n    ],\n)\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/hash_policy.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"HashPolicyProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Hash Policy]\n\n// Specifies the hash policy\nmessage HashPolicy {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.HashPolicy\";\n\n  // The source IP will be used to compute the hash used by hash-based load balancing\n  // algorithms.\n  message SourceIp {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.type.HashPolicy.SourceIp\";\n  }\n\n  oneof policy_specifier {\n    option (validate.required) = true;\n\n    SourceIp source_ip = 1;\n  }\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/http.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"HttpProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP]\n\nenum CodecClientType {\n  HTTP1 = 0;\n\n  HTTP2 = 1;\n\n  // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with\n  // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient\n  // to distinguish HTTP1 and HTTP2 traffic.\n  HTTP3 = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/http_status.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"HttpStatusProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: HTTP status codes]\n\n// HTTP response codes supported in Envoy.\n// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml\nenum StatusCode {\n  // Empty - This code not part of the HTTP status code specification, but it is needed for proto\n  // `enum` type.\n  Empty = 0;\n\n  Continue = 100;\n\n  OK = 200;\n\n  Created = 201;\n\n  Accepted = 202;\n\n  NonAuthoritativeInformation = 203;\n\n  NoContent = 204;\n\n  ResetContent = 205;\n\n  PartialContent = 206;\n\n  MultiStatus = 207;\n\n  AlreadyReported = 208;\n\n  IMUsed = 226;\n\n  MultipleChoices = 300;\n\n  MovedPermanently = 301;\n\n  Found = 302;\n\n  SeeOther = 303;\n\n  NotModified = 304;\n\n  UseProxy = 305;\n\n  TemporaryRedirect = 307;\n\n  PermanentRedirect = 308;\n\n  BadRequest = 400;\n\n  Unauthorized = 401;\n\n  PaymentRequired = 402;\n\n  Forbidden = 403;\n\n  NotFound = 404;\n\n  MethodNotAllowed = 405;\n\n  NotAcceptable = 406;\n\n  ProxyAuthenticationRequired = 407;\n\n  RequestTimeout = 408;\n\n  Conflict = 409;\n\n  Gone = 410;\n\n  LengthRequired = 411;\n\n  PreconditionFailed = 412;\n\n  PayloadTooLarge = 413;\n\n  URITooLong = 414;\n\n  UnsupportedMediaType = 415;\n\n  RangeNotSatisfiable = 416;\n\n  ExpectationFailed = 417;\n\n  MisdirectedRequest = 421;\n\n  UnprocessableEntity = 422;\n\n  Locked = 423;\n\n  FailedDependency = 424;\n\n  UpgradeRequired = 426;\n\n  PreconditionRequired = 428;\n\n  TooManyRequests = 429;\n\n  RequestHeaderFieldsTooLarge = 431;\n\n  InternalServerError = 500;\n\n  NotImplemented = 501;\n\n  BadGateway = 502;\n\n  ServiceUnavailable = 503;\n\n  GatewayTimeout = 504;\n\n  HTTPVersionNotSupported = 505;\n\n  VariantAlsoNegotiates = 506;\n\n  InsufficientStorage = 507;\n\n  LoopDetected = 508;\n\n  NotExtended = 510;\n\n  NetworkAuthenticationRequired = 511;\n}\n\n// HTTP status.\nmessage HttpStatus {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.HttpStatus\";\n\n  // Supplies HTTP response code.\n  StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/percent.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"PercentProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Percent]\n\n// Identifies a percentage, in the range [0.0, 100.0].\nmessage Percent {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.Percent\";\n\n  double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}];\n}\n\n// A fractional percentage is used in cases in which for performance reasons performing floating\n// point to integer conversions during randomness calculations is undesirable. The message includes\n// both a numerator and denominator that together determine the final fractional value.\n//\n// * **Example**: 1/100 = 1%.\n// * **Example**: 3/10000 = 0.03%.\nmessage FractionalPercent {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.FractionalPercent\";\n\n  // Fraction percentages support several fixed denominator values.\n  enum DenominatorType {\n    // 100.\n    //\n    // **Example**: 1/100 = 1%.\n    HUNDRED = 0;\n\n    // 10,000.\n    //\n    // **Example**: 1/10000 = 0.01%.\n    TEN_THOUSAND = 1;\n\n    // 1,000,000.\n    //\n    // **Example**: 1/1000000 = 0.0001%.\n    MILLION = 2;\n  }\n\n  // Specifies the numerator. Defaults to 0.\n  uint32 numerator = 1;\n\n  // Specifies the denominator. If the denominator specified is less than the numerator, the final\n  // fractional percentage is capped at 1 (100%).\n  DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}];\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/range.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"RangeProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Range]\n\n// Specifies the int64 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int64Range {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.Int64Range\";\n\n  // start of the range (inclusive)\n  int64 start = 1;\n\n  // end of the range (exclusive)\n  int64 end = 2;\n}\n\n// Specifies the int32 start and end of the range using half-open interval semantics [start,\n// end).\nmessage Int32Range {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.Int32Range\";\n\n  // start of the range (inclusive)\n  int32 start = 1;\n\n  // end of the range (exclusive)\n  int32 end = 2;\n}\n\n// Specifies the double start and end of the range using half-open interval semantics [start,\n// end).\nmessage DoubleRange {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.DoubleRange\";\n\n  // start of the range (inclusive)\n  double start = 1;\n\n  // end of the range (exclusive)\n  double end = 2;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/ratelimit_unit.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"RatelimitUnitProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Ratelimit Time Unit]\n\n// Identifies the unit of of time for rate limit.\nenum RateLimitUnit {\n  // The time unit is not known.\n  UNKNOWN = 0;\n\n  // The time unit representing a second.\n  SECOND = 1;\n\n  // The time unit representing a minute.\n  MINUTE = 2;\n\n  // The time unit representing an hour.\n  HOUR = 3;\n\n  // The time unit representing a day.\n  DAY = 4;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/semantic_version.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"SemanticVersionProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Semantic Version]\n\n// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate\n// expected behaviors and APIs, the patch version field is used only\n// for security fixes and can be generally ignored.\nmessage SemanticVersion {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.SemanticVersion\";\n\n  uint32 major_number = 1;\n\n  uint32 minor_number = 2;\n\n  uint32 patch = 3;\n}\n"
  },
  {
    "path": "generated_api_shadow/envoy/type/v3/token_bucket.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.type.v3;\n\nimport \"google/protobuf/duration.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\nimport \"validate/validate.proto\";\n\noption java_package = \"io.envoyproxy.envoy.type.v3\";\noption java_outer_classname = \"TokenBucketProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// [#protodoc-title: Token bucket]\n\n// Configures a token bucket, typically used for rate limiting.\nmessage TokenBucket {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.type.TokenBucket\";\n\n  // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket\n  // initially contains.\n  uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}];\n\n  // The number of tokens added to the bucket during each fill interval. If not specified, defaults\n  // to a single token.\n  google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}];\n\n  // The fill interval that tokens are added to the bucket. During each fill interval\n  // `tokens_per_fill` are added to the bucket. The bucket will never contain more than\n  // `max_tokens` tokens.\n  google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = {\n    required: true\n    gt {}\n  }];\n}\n"
  },
  {
    "path": "include/envoy/access_log/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"access_log_interface\",\n    hdrs = [\"access_log.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/access_log/access_log.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace AccessLog {\n\nclass AccessLogFile {\npublic:\n  virtual ~AccessLogFile() = default;\n\n  /**\n   * Write data to the file.\n   */\n  virtual void write(absl::string_view) PURE;\n\n  /**\n   * Reopen the file.\n   */\n  virtual void reopen() PURE;\n\n  /**\n   * Synchronously flush all pending data to disk.\n   */\n  virtual void flush() PURE;\n};\n\nusing AccessLogFileSharedPtr = std::shared_ptr<AccessLogFile>;\n\nclass AccessLogManager {\npublic:\n  virtual ~AccessLogManager() = default;\n\n  /**\n   * Reopen all of the access log files.\n   */\n  virtual void reopen() PURE;\n\n  /**\n   * Create a new access log file managed by the access log manager.\n   * @param file_name specifies the file to create/open.\n   * @return the opened file.\n   */\n  virtual AccessLogFileSharedPtr createAccessLog(const std::string& file_name) PURE;\n};\n\nusing AccessLogManagerPtr = std::unique_ptr<AccessLogManager>;\n\n/**\n * Interface for access log filters.\n */\nclass Filter {\npublic:\n  virtual ~Filter() = default;\n\n  /**\n   * Evaluate whether an access log should be written based on request and response data.\n   * @return TRUE if the log should be written.\n   */\n  virtual bool evaluate(const StreamInfo::StreamInfo& info,\n                        const Http::RequestHeaderMap& request_headers,\n                        const Http::ResponseHeaderMap& response_headers,\n                        const Http::ResponseTrailerMap& response_trailers) const PURE;\n};\n\nusing FilterPtr = std::unique_ptr<Filter>;\n\n/**\n * Abstract access logger for requests and connections.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  /**\n   * Log a completed request.\n   * @param request_headers supplies the incoming request headers after filtering.\n   * @param response_headers supplies response headers.\n   * @param response_trailers supplies response trailers.\n   * @param stream_info supplies additional information about the request not\n   * contained in the request headers.\n   */\n  virtual void log(const Http::RequestHeaderMap* request_headers,\n                   const Http::ResponseHeaderMap* response_headers,\n                   const Http::ResponseTrailerMap* response_trailers,\n                   const StreamInfo::StreamInfo& stream_info) PURE;\n};\n\nusing InstanceSharedPtr = std::shared_ptr<Instance>;\n\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/api/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"api_interface\",\n    hdrs = [\"api.h\"],\n    deps = [\n        \"//include/envoy/common:random_generator_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//include/envoy/server:process_context_interface\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"io_error_interface\",\n    hdrs = [\"io_error.h\"],\n)\n\nenvoy_cc_library(\n    name = \"os_sys_calls_interface\",\n    hdrs = [\n        \"os_sys_calls.h\",\n        \"os_sys_calls_common.h\",\n        \"os_sys_calls_hot_restart.h\",\n        \"os_sys_calls_linux.h\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/api/api.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/server/process_context.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Api {\n\n/**\n * \"Public\" API that different components use to interact with the various system abstractions.\n */\nclass Api {\npublic:\n  virtual ~Api() = default;\n\n  /**\n   * Allocate a dispatcher.\n   * @param name the identity name for a dispatcher, e.g. \"worker_2\" or \"main_thread\".\n   *             This name will appear in per-handler/worker statistics, such as\n   *             \"server.worker_2.watchdog_miss\".\n   * @return Event::DispatcherPtr which is owned by the caller.\n   */\n  virtual Event::DispatcherPtr allocateDispatcher(const std::string& name) PURE;\n\n  /**\n   * Allocate a dispatcher.\n   * @param name the identity name for a dispatcher, e.g. \"worker_2\" or \"main_thread\".\n   *             This name will appear in per-handler/worker statistics, such as\n   *             \"server.worker_2.watchdog_miss\".\n   * @param watermark_factory the watermark factory, ownership is transferred to the dispatcher.\n   * @return Event::DispatcherPtr which is owned by the caller.\n   */\n  virtual Event::DispatcherPtr\n  allocateDispatcher(const std::string& name, Buffer::WatermarkFactoryPtr&& watermark_factory) PURE;\n\n  /**\n   * @return a reference to the ThreadFactory\n   */\n  virtual Thread::ThreadFactory& threadFactory() PURE;\n\n  /**\n   * @return a reference to the Filesystem::Instance\n   */\n  virtual Filesystem::Instance& fileSystem() PURE;\n\n  /**\n   * @return a reference to the TimeSource\n   */\n  virtual TimeSource& timeSource() PURE;\n\n  /**\n   * @return a constant reference to the root Stats::Scope\n   */\n  virtual const Stats::Scope& rootScope() PURE;\n\n  /**\n   * @return a reference to the RandomGenerator.\n   */\n  virtual Random::RandomGenerator& randomGenerator() PURE;\n\n  /**\n   * @return an optional reference to the ProcessContext\n   */\n  virtual ProcessContextOptRef processContext() PURE;\n};\n\nusing ApiPtr = std::unique_ptr<Api>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/api/io_error.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Api {\n\n/**\n * Base class for any I/O error.\n */\nclass IoError {\npublic:\n  enum class IoErrorCode {\n    // No data available right now, try again later.\n    Again,\n    // Not supported.\n    NoSupport,\n    // Address family not supported.\n    AddressFamilyNoSupport,\n    // During non-blocking connect, the connection cannot be completed immediately.\n    InProgress,\n    // Permission denied.\n    Permission,\n    // Message too big to send.\n    MessageTooBig,\n    // Kernel interrupt.\n    Interrupt,\n    // Requested a nonexistent interface or a non-local source address.\n    AddressNotAvailable,\n    // Bad file descriptor.\n    BadFd,\n    // Other error codes cannot be mapped to any one above in getErrorCode().\n    UnknownError\n  };\n  virtual ~IoError() = default;\n\n  virtual IoErrorCode getErrorCode() const PURE;\n  virtual std::string getErrorDetails() const PURE;\n};\n\nusing IoErrorDeleterType = void (*)(IoError*);\nusing IoErrorPtr = std::unique_ptr<IoError, IoErrorDeleterType>;\n\n/**\n * Basic type for return result which has a return code and error code defined\n * according to different implementations.\n * If the call succeeds, ok() should return true and |rc_| is valid. Otherwise |err_|\n * can be passed into IoError::getErrorCode() to extract the error. In this\n * case, |rc_| is invalid.\n */\ntemplate <typename ReturnValue> struct IoCallResult {\n  IoCallResult(ReturnValue rc, IoErrorPtr err) : rc_(rc), err_(std::move(err)) {}\n\n  IoCallResult(IoCallResult<ReturnValue>&& result) noexcept\n      : rc_(result.rc_), err_(std::move(result.err_)) {}\n\n  virtual ~IoCallResult() = default;\n\n  IoCallResult& operator=(IoCallResult&& result) noexcept {\n    rc_ = result.rc_;\n    err_ = std::move(result.err_);\n    return *this;\n  }\n\n  /**\n   * @return true if the call succeeds.\n   */\n  bool ok() const { return err_ == nullptr; }\n\n  // TODO(danzh): rename it to be more meaningful, i.e. return_value_.\n  ReturnValue rc_;\n  IoErrorPtr err_;\n};\n\nusing IoCallBoolResult = IoCallResult<bool>;\nusing IoCallSizeResult = IoCallResult<ssize_t>;\nusing IoCallUint64Result = IoCallResult<uint64_t>;\n\ninline Api::IoCallUint64Result ioCallUint64ResultNoError() {\n  return IoCallUint64Result(0, IoErrorPtr(nullptr, [](IoError*) {}));\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/api/os_sys_calls.h",
    "content": "#pragma once\n\n#include <sys/stat.h>\n\n#include <memory>\n#include <string>\n\n#include \"envoy/api/os_sys_calls_common.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass OsSysCalls {\npublic:\n  virtual ~OsSysCalls() = default;\n\n  /**\n   * @see bind (man 2 bind)\n   */\n  virtual SysCallIntResult bind(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) PURE;\n\n  /**\n   * @see chmod (man 2 chmod)\n   */\n  virtual SysCallIntResult chmod(const std::string& path, mode_t mode) PURE;\n\n  /**\n   * @see ioctl (man 2 ioctl)\n   */\n  virtual SysCallIntResult ioctl(os_fd_t sockfd, unsigned long int request, void* argp) PURE;\n\n  /**\n   * @see writev (man 2 writev)\n   */\n  virtual SysCallSizeResult writev(os_fd_t fd, const iovec* iov, int num_iov) PURE;\n\n  /**\n   * @see readv (man 2 readv)\n   */\n  virtual SysCallSizeResult readv(os_fd_t fd, const iovec* iov, int num_iov) PURE;\n\n  /**\n   * @see recv (man 2 recv)\n   */\n  virtual SysCallSizeResult recv(os_fd_t socket, void* buffer, size_t length, int flags) PURE;\n\n  /**\n   * @see recvmsg (man 2 recvmsg)\n   */\n  virtual SysCallSizeResult recvmsg(os_fd_t sockfd, msghdr* msg, int flags) PURE;\n\n  /**\n   * @see recvmmsg (man 2 recvmmsg)\n   */\n  virtual SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen,\n                                    int flags, struct timespec* timeout) PURE;\n\n  /**\n   * return true if the OS supports recvmmsg() and sendmmsg().\n   */\n  virtual bool supportsMmsg() const PURE;\n\n  /**\n   * return true if the OS supports UDP GRO.\n   */\n  virtual bool supportsUdpGro() const PURE;\n\n  /**\n   * return true if the OS supports UDP GSO\n   */\n  virtual bool supportsUdpGso() const PURE;\n\n  /**\n   * return true if the OS support both IP_TRANSPARENT and IPV6_TRANSPARENT options\n   */\n  virtual bool supportsIpTransparent() const PURE;\n\n  /**\n   * Release all resources allocated for fd.\n   * @return zero on success, -1 returned otherwise.\n   */\n  virtual SysCallIntResult close(os_fd_t fd) PURE;\n\n  /**\n   * @see man 2 ftruncate\n   */\n  virtual SysCallIntResult ftruncate(int fd, off_t length) PURE;\n\n  /**\n   * @see man 2 mmap\n   */\n  virtual SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd,\n                                off_t offset) PURE;\n\n  /**\n   * @see man 2 stat\n   */\n  virtual SysCallIntResult stat(const char* pathname, struct stat* buf) PURE;\n\n  /**\n   * @see man 2 setsockopt\n   */\n  virtual SysCallIntResult setsockopt(os_fd_t sockfd, int level, int optname, const void* optval,\n                                      socklen_t optlen) PURE;\n\n  /**\n   * @see man 2 getsockopt\n   */\n  virtual SysCallIntResult getsockopt(os_fd_t sockfd, int level, int optname, void* optval,\n                                      socklen_t* optlen) PURE;\n\n  /**\n   * @see man 2 socket\n   */\n  virtual SysCallSocketResult socket(int domain, int type, int protocol) PURE;\n\n  /**\n   * @see man 2 sendmsg\n   */\n  virtual SysCallSizeResult sendmsg(os_fd_t sockfd, const msghdr* message, int flags) PURE;\n\n  /**\n   * @see man 2 getsockname\n   */\n  virtual SysCallIntResult getsockname(os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) PURE;\n\n  /**\n   * @see man 2 gethostname\n   */\n  virtual SysCallIntResult gethostname(char* name, size_t length) PURE;\n\n  /**\n   * @see man 2 getpeername\n   */\n  virtual SysCallIntResult getpeername(os_fd_t sockfd, sockaddr* name, socklen_t* namelen) PURE;\n\n  /**\n   * Toggle the blocking state bit using fcntl\n   */\n  virtual SysCallIntResult setsocketblocking(os_fd_t sockfd, bool blocking) PURE;\n\n  /**\n   * @see man 2 connect\n   */\n  virtual SysCallIntResult connect(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) PURE;\n\n  /**\n   * @see man 2 shutdown\n   */\n  virtual SysCallIntResult shutdown(os_fd_t sockfd, int how) PURE;\n\n  /**\n   * @see man 2 socketpair\n   */\n  virtual SysCallIntResult socketpair(int domain, int type, int protocol, os_fd_t sv[2]) PURE;\n\n  /**\n   * @see man 2 listen\n   */\n  virtual SysCallIntResult listen(os_fd_t sockfd, int backlog) PURE;\n\n  /**\n   * @see man 2 write\n   */\n  virtual SysCallSizeResult write(os_fd_t socket, const void* buffer, size_t length) PURE;\n\n  /**\n   * @see man 2 accept. The fds returned are configured to be non-blocking.\n   */\n  virtual SysCallSocketResult accept(os_fd_t socket, sockaddr* addr, socklen_t* addrlen) PURE;\n};\n\nusing OsSysCallsPtr = std::unique_ptr<OsSysCalls>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/api/os_sys_calls_common.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n\nnamespace Envoy {\nnamespace Api {\n/**\n * SysCallResult holds the rc and errno values resulting from a system call.\n */\ntemplate <typename T> struct SysCallResult {\n\n  /**\n   * The return code from the system call.\n   */\n  T rc_;\n\n  /**\n   * The errno value as captured after the system call.\n   */\n  int errno_;\n};\n\nusing SysCallIntResult = SysCallResult<int>;\nusing SysCallSizeResult = SysCallResult<ssize_t>;\nusing SysCallPtrResult = SysCallResult<void*>;\nusing SysCallStringResult = SysCallResult<std::string>;\nusing SysCallBoolResult = SysCallResult<bool>;\nusing SysCallSocketResult = SysCallResult<os_fd_t>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/api/os_sys_calls_hot_restart.h",
    "content": "#pragma once\n\n#ifndef WIN32\n#include <sys/mman.h> // for mode_t\n\n#endif\n\n#include \"envoy/api/os_sys_calls_common.h\"\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass HotRestartOsSysCalls {\npublic:\n  virtual ~HotRestartOsSysCalls() = default;\n\n  /**\n   * @see shm_open (man 3 shm_open)\n   */\n  virtual SysCallIntResult shmOpen(const char* name, int oflag, mode_t mode) PURE;\n\n  /**\n   * @see shm_unlink (man 3 shm_unlink)\n   */\n  virtual SysCallIntResult shmUnlink(const char* name) PURE;\n};\n\nusing HotRestartOsSysCallsPtr = std::unique_ptr<HotRestartOsSysCalls>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/api/os_sys_calls_linux.h",
    "content": "#pragma once\n\n#if !defined(__linux__)\n#error \"Linux platform file is part of non-Linux build.\"\n#endif\n\n#include <sched.h>\n\n#include \"envoy/api/os_sys_calls_common.h\"\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass LinuxOsSysCalls {\npublic:\n  virtual ~LinuxOsSysCalls() = default;\n\n  /**\n   * @see sched_getaffinity (man 2 sched_getaffinity)\n   */\n  virtual SysCallIntResult sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t* mask) PURE;\n};\n\nusing LinuxOsSysCallsPtr = std::unique_ptr<LinuxOsSysCalls>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/buffer/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"buffer_interface\",\n    hdrs = [\"buffer.h\"],\n    external_deps = [\n        \"abseil_inlined_vector\",\n    ],\n    deps = [\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//source/common/common:byte_order_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/buffer/buffer.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/os_sys_calls.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/byte_order.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"absl/types/span.h\"\n\nnamespace Envoy {\nnamespace Buffer {\n\n/**\n * A raw memory data slice including location and length.\n */\nstruct RawSlice {\n  void* mem_ = nullptr;\n  size_t len_ = 0;\n\n  bool operator==(const RawSlice& rhs) const { return mem_ == rhs.mem_ && len_ == rhs.len_; }\n};\n\nusing RawSliceVector = absl::InlinedVector<RawSlice, 16>;\n\n/**\n * A wrapper class to facilitate passing in externally owned data to a buffer via addBufferFragment.\n * When the buffer no longer needs the data passed in through a fragment, it calls done() on it.\n */\nclass BufferFragment {\npublic:\n  virtual ~BufferFragment() = default;\n  /**\n   * @return const void* a pointer to the referenced data.\n   */\n  virtual const void* data() const PURE;\n\n  /**\n   * @return size_t the size of the referenced data.\n   */\n  virtual size_t size() const PURE;\n\n  /**\n   * Called by a buffer when the referenced data is no longer needed.\n   */\n  virtual void done() PURE;\n};\n\n/**\n * A class to facilitate extracting buffer slices from a buffer instance.\n */\nclass SliceData {\npublic:\n  virtual ~SliceData() = default;\n\n  /**\n   * @return a mutable view of the slice data.\n   */\n  virtual absl::Span<uint8_t> getMutableData() PURE;\n};\n\nusing SliceDataPtr = std::unique_ptr<SliceData>;\n\n/**\n * A basic buffer abstraction.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  /**\n   * Register function to call when the last byte in the last slice of this\n   * buffer has fully drained. Note that slices may be transferred to\n   * downstream buffers, drain trackers are transferred along with the bytes\n   * they track so the function is called only after the last byte is drained\n   * from all buffers.\n   */\n  virtual void addDrainTracker(std::function<void()> drain_tracker) PURE;\n\n  /**\n   * Copy data into the buffer (deprecated, use absl::string_view variant\n   * instead).\n   * TODO(htuch): Cleanup deprecated call sites.\n   * @param data supplies the data address.\n   * @param size supplies the data size.\n   */\n  virtual void add(const void* data, uint64_t size) PURE;\n\n  /**\n   * Add externally owned data into the buffer. No copying is done. fragment is not owned. When\n   * the fragment->data() is no longer needed, fragment->done() is called.\n   * @param fragment the externally owned data to add to the buffer.\n   */\n  virtual void addBufferFragment(BufferFragment& fragment) PURE;\n\n  /**\n   * Copy a string into the buffer.\n   * @param data supplies the string to copy.\n   */\n  virtual void add(absl::string_view data) PURE;\n\n  /**\n   * Copy another buffer into this buffer.\n   * @param data supplies the buffer to copy.\n   */\n  virtual void add(const Instance& data) PURE;\n\n  /**\n   * Prepend a string_view to the buffer.\n   * @param data supplies the string_view to copy.\n   */\n  virtual void prepend(absl::string_view data) PURE;\n\n  /**\n   * Prepend data from another buffer to this buffer.\n   * The supplied buffer is drained after this operation.\n   * @param data supplies the buffer to copy.\n   */\n  virtual void prepend(Instance& data) PURE;\n\n  /**\n   * Commit a set of slices originally obtained from reserve(). The number of slices should match\n   * the number obtained from reserve(). The size of each slice can also be altered. Commit must\n   * occur once following a reserve() without any mutating operations in between other than to the\n   * iovecs len_ fields.\n   * @param iovecs supplies the array of slices to commit.\n   * @param num_iovecs supplies the size of the slices array.\n   */\n  virtual void commit(RawSlice* iovecs, uint64_t num_iovecs) PURE;\n\n  /**\n   * Copy out a section of the buffer.\n   * @param start supplies the buffer index to start copying from.\n   * @param size supplies the size of the output buffer.\n   * @param data supplies the output buffer to fill.\n   */\n  virtual void copyOut(size_t start, uint64_t size, void* data) const PURE;\n\n  /**\n   * Drain data from the buffer.\n   * @param size supplies the length of data to drain.\n   */\n  virtual void drain(uint64_t size) PURE;\n\n  /**\n   * Fetch the raw buffer slices.\n   * @param max_slices supplies an optional limit on the number of slices to fetch, for performance.\n   * @return RawSliceVector with non-empty slices in the buffer.\n   */\n  virtual RawSliceVector\n  getRawSlices(absl::optional<uint64_t> max_slices = absl::nullopt) const PURE;\n\n  /**\n   * Transfer ownership of the front slice to the caller. Must only be called if the\n   * buffer is not empty otherwise the implementation will have undefined behavior.\n   * If the underlying slice is immutable then the implementation must create and return\n   * a mutable slice that has a copy of the immutable data.\n   * @return pointer to SliceData object that wraps the front slice\n   */\n  virtual SliceDataPtr extractMutableFrontSlice() PURE;\n\n  /**\n   * @return uint64_t the total length of the buffer (not necessarily contiguous in memory).\n   */\n  virtual uint64_t length() const PURE;\n\n  /**\n   * @return a pointer to the first byte of data that has been linearized out to size bytes.\n   */\n  virtual void* linearize(uint32_t size) PURE;\n\n  /**\n   * Move a buffer into this buffer. As little copying is done as possible.\n   * @param rhs supplies the buffer to move.\n   */\n  virtual void move(Instance& rhs) PURE;\n\n  /**\n   * Move a portion of a buffer into this buffer. As little copying is done as possible.\n   * @param rhs supplies the buffer to move.\n   * @param length supplies the amount of data to move.\n   */\n  virtual void move(Instance& rhs, uint64_t length) PURE;\n\n  /**\n   * Reserve space in the buffer.\n   * @param length supplies the amount of space to reserve.\n   * @param iovecs supplies the slices to fill with reserved memory.\n   * @param num_iovecs supplies the size of the slices array.\n   * @return the number of iovecs used to reserve the space.\n   */\n  virtual uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) PURE;\n\n  /**\n   * Search for an occurrence of data within the buffer.\n   * @param data supplies the data to search for.\n   * @param size supplies the length of the data to search for.\n   * @param start supplies the starting index to search from.\n   * @param length limits the search to specified number of bytes starting from start index.\n   * When length value is zero, entire length of data from starting index to the end is searched.\n   * @return the index where the match starts or -1 if there is no match.\n   */\n  virtual ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const PURE;\n\n  /**\n   * Search for an occurrence of data within entire buffer.\n   * @param data supplies the data to search for.\n   * @param size supplies the length of the data to search for.\n   * @param start supplies the starting index to search from.\n   * @return the index where the match starts or -1 if there is no match.\n   */\n  ssize_t search(const void* data, uint64_t size, size_t start) const {\n    return search(data, size, start, 0);\n  }\n\n  /**\n   * Search for an occurrence of data at the start of a buffer.\n   * @param data supplies the data to search for.\n   * @return true if this buffer starts with data, false otherwise.\n   */\n  virtual bool startsWith(absl::string_view data) const PURE;\n\n  /**\n   * Constructs a flattened string from a buffer.\n   * @return the flattened string.\n   */\n  virtual std::string toString() const PURE;\n\n  /**\n   * Copy an integer out of the buffer.\n   * @param start supplies the buffer index to start copying from.\n   * @param Size how many bytes to read out of the buffer.\n   * @param Endianness specifies the byte order to use when decoding the integer.\n   * @details Size parameter: Some protocols have integer fields whose size in bytes won't match the\n   * size in bytes of C++'s integer types. Take a 3-byte integer field for example, which we want to\n   * represent as a 32-bit (4 bytes) integer. One option to deal with that situation is to read 4\n   * bytes from the buffer and ignore 1. There are a few problems with that solution, though.\n   *   * The first problem is buffer underflow: there may not be more than Size bytes available\n   * (say, last field in the payload), so that's an edge case to take into consideration.\n   *   * The second problem is draining the buffer after reading. With the above solution we cannot\n   *     read and discard in one go. We'd need to peek 4 bytes, ignore 1 and then drain 3. That not\n   *     only looks hacky since the sizes don't match, but also produces less terse code and\n   * requires the caller to propagate that logic to all call sites. Things complicate even further\n   * when endianness is taken into consideration: should the most or least-significant bytes be\n   * padded? Dealing with this situation requires a high level of care and attention to detail.\n   * Properly calculating which bytes to discard and how to displace the data is not only error\n   * prone, but also shifts to the caller a burden that could be solved in a much more generic,\n   * transparent and well tested manner.\n   *   * The last problem in the list is sign extension, which should be properly handled when\n   * reading signed types with negative values. To make matters easier, the optional Size parameter\n   * can be specified in those situations where there's a need to read less bytes than a C++'s\n   * integer size in bytes. For the most common case when one needs to read exactly as many bytes as\n   * the size of C++'s integer, this parameter can simply be omitted and it will be automatically\n   * deduced from the size of the type T\n   */\n  template <typename T, ByteOrder Endianness = ByteOrder::Host, size_t Size = sizeof(T)>\n  T peekInt(uint64_t start = 0) const {\n    static_assert(Size <= sizeof(T), \"requested size is bigger than integer being read\");\n\n    if (length() < start + Size) {\n      ExceptionUtil::throwEnvoyException(\"buffer underflow\");\n    }\n\n    constexpr const auto displacement = Endianness == ByteOrder::BigEndian ? sizeof(T) - Size : 0;\n\n    auto result = static_cast<T>(0);\n    constexpr const auto all_bits_enabled = static_cast<T>(~static_cast<T>(0));\n\n    int8_t* bytes = reinterpret_cast<int8_t*>(std::addressof(result));\n    copyOut(start, Size, &bytes[displacement]);\n\n    constexpr const auto most_significant_read_byte =\n        Endianness == ByteOrder::BigEndian ? displacement : Size - 1;\n\n    // If Size == sizeof(T), we need to make sure we don't generate an invalid left shift\n    // (e.g. int32 << 32), even though we know that that branch of the conditional will.\n    // not be taken. Size % sizeof(T) gives us the correct left shift when Size < sizeof(T),\n    // and generates a left shift of 0 bits when Size == sizeof(T)\n    const auto sign_extension_bits =\n        std::is_signed<T>::value && Size < sizeof(T) && bytes[most_significant_read_byte] < 0\n            ? static_cast<T>(static_cast<typename std::make_unsigned<T>::type>(all_bits_enabled)\n                             << ((Size % sizeof(T)) * CHAR_BIT))\n            : static_cast<T>(0);\n\n    return fromEndianness<Endianness>(static_cast<T>(result)) | sign_extension_bits;\n  }\n\n  /**\n   * Copy a little endian integer out of the buffer.\n   * @param start supplies the buffer index to start copying from.\n   * @param Size how many bytes to read out of the buffer.\n   */\n  template <typename T, size_t Size = sizeof(T)> T peekLEInt(uint64_t start = 0) const {\n    return peekInt<T, ByteOrder::LittleEndian, Size>(start);\n  }\n\n  /**\n   * Copy a big endian integer out of the buffer.\n   * @param start supplies the buffer index to start copying from.\n   * @param Size how many bytes to read out of the buffer.\n   */\n  template <typename T, size_t Size = sizeof(T)> T peekBEInt(uint64_t start = 0) const {\n    return peekInt<T, ByteOrder::BigEndian, Size>(start);\n  }\n\n  /**\n   * Copy an integer out of the buffer and drain the read data.\n   * @param Size how many bytes to read out of the buffer.\n   * @param Endianness specifies the byte order to use when decoding the integer.\n   */\n  template <typename T, ByteOrder Endianness = ByteOrder::Host, size_t Size = sizeof(T)>\n  T drainInt() {\n    const auto result = peekInt<T, Endianness, Size>();\n    drain(Size);\n    return result;\n  }\n\n  /**\n   * Copy a little endian integer out of the buffer and drain the read data.\n   * @param Size how many bytes to read out of the buffer.\n   */\n  template <typename T, size_t Size = sizeof(T)> T drainLEInt() {\n    return drainInt<T, ByteOrder::LittleEndian, Size>();\n  }\n\n  /**\n   * Copy a big endian integer out of the buffer and drain the read data.\n   * @param Size how many bytes to read out of the buffer.\n   */\n  template <typename T, size_t Size = sizeof(T)> T drainBEInt() {\n    return drainInt<T, ByteOrder::BigEndian, Size>();\n  }\n\n  /**\n   * Copy a byte into the buffer.\n   * @param value supplies the byte to copy into the buffer.\n   */\n  void writeByte(uint8_t value) { add(std::addressof(value), 1); }\n\n  /**\n   * Copy value as a byte into the buffer.\n   * @param value supplies the byte to copy into the buffer.\n   */\n  template <typename T> void writeByte(T value) { writeByte(static_cast<uint8_t>(value)); }\n\n  /**\n   * Copy an integer into the buffer.\n   * @param value supplies the integer to copy into the buffer.\n   * @param Size how many bytes to write from the requested integer.\n   * @param Endianness specifies the byte order to use when encoding the integer.\n   */\n  template <ByteOrder Endianness = ByteOrder::Host, typename T, size_t Size = sizeof(T)>\n  void writeInt(T value) {\n    static_assert(Size <= sizeof(T), \"requested size is bigger than integer being written\");\n\n    const auto data = toEndianness<Endianness>(value);\n    constexpr const auto displacement = Endianness == ByteOrder::BigEndian ? sizeof(T) - Size : 0;\n    add(reinterpret_cast<const char*>(std::addressof(data)) + displacement, Size);\n  }\n\n  /**\n   * Copy an integer into the buffer in little endian byte order.\n   * @param value supplies the integer to copy into the buffer.\n   * @param Size how many bytes to write from the requested integer.\n   */\n  template <typename T, size_t Size = sizeof(T)> void writeLEInt(T value) {\n    writeInt<ByteOrder::LittleEndian, T, Size>(value);\n  }\n\n  /**\n   * Copy an integer into the buffer in big endian byte order.\n   * @param value supplies the integer to copy into the buffer.\n   * @param Size how many bytes to write from the requested integer.\n   */\n  template <typename T, size_t Size = sizeof(T)> void writeBEInt(T value) {\n    writeInt<ByteOrder::BigEndian, T, Size>(value);\n  }\n};\n\nusing InstancePtr = std::unique_ptr<Instance>;\n\n/**\n * A factory for creating buffers which call callbacks when reaching high and low watermarks.\n */\nclass WatermarkFactory {\npublic:\n  virtual ~WatermarkFactory() = default;\n\n  /**\n   * Creates and returns a unique pointer to a new buffer.\n   * @param below_low_watermark supplies a function to call if the buffer goes under a configured\n   *   low watermark.\n   * @param above_high_watermark supplies a function to call if the buffer goes over a configured\n   *   high watermark.\n   * @return a newly created InstancePtr.\n   */\n  virtual InstancePtr create(std::function<void()> below_low_watermark,\n                             std::function<void()> above_high_watermark,\n                             std::function<void()> above_overflow_watermark) PURE;\n};\n\nusing WatermarkFactoryPtr = std::unique_ptr<WatermarkFactory>;\n\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_basic_cc_library\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_basic_cc_library(\n    name = \"base_includes\",\n    hdrs = [\n        \"exception.h\",\n        \"platform.h\",\n        \"pure.h\",\n    ],\n    external_deps = [\"abseil_optional\"],\n    include_prefix = \"envoy/common\",\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_interface\",\n    hdrs = [\"conn_pool.h\"],\n    deps = [\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/upstream:upstream_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"mutex_tracer\",\n    hdrs = [\"mutex_tracer.h\"],\n)\n\nenvoy_cc_library(\n    name = \"random_generator_interface\",\n    hdrs = [\"random_generator.h\"],\n)\n\nenvoy_cc_library(\n    name = \"resource_interface\",\n    hdrs = [\"resource.h\"],\n)\n\nenvoy_cc_library(\n    name = \"time_interface\",\n    hdrs = [\"time.h\"],\n)\n\nenvoy_cc_library(\n    name = \"matchers_interface\",\n    hdrs = [\"matchers.h\"],\n)\n\nenvoy_cc_library(\n    name = \"regex_interface\",\n    hdrs = [\"regex.h\"],\n    deps = [\n        \":matchers_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"token_bucket_interface\",\n    hdrs = [\"token_bucket.h\"],\n    deps = [\n        \":time_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"interval_set_interface\",\n    hdrs = [\"interval_set.h\"],\n)\n\nenvoy_cc_library(\n    name = \"callback\",\n    hdrs = [\"callback.h\"],\n)\n\nenvoy_cc_library(\n    name = \"backoff_strategy_interface\",\n    hdrs = [\"backoff_strategy.h\"],\n)\n\nenvoy_cc_library(\n    name = \"scope_tracker_interface\",\n    hdrs = [\"scope_tracker.h\"],\n)\n"
  },
  {
    "path": "include/envoy/common/backoff_strategy.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\n/**\n * Generic interface for all backoff strategy implementations.\n */\nclass BackOffStrategy {\npublic:\n  virtual ~BackOffStrategy() = default;\n\n  /**\n   * @return the next backoff interval in milli seconds.\n   */\n  virtual uint64_t nextBackOffMs() PURE;\n\n  /**\n   * Resets the intervals so that the back off intervals can start again.\n   */\n  virtual void reset() PURE;\n};\n\nusing BackOffStrategyPtr = std::unique_ptr<BackOffStrategy>;\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/callback.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Common {\n\n/**\n * Handle for a callback that can be removed. Destruction of the handle does NOT remove the\n * callback.\n */\nclass CallbackHandle {\npublic:\n  virtual ~CallbackHandle() = default;\n\n  /**\n   * Remove the callback. After this routine returns the callback will no longer be called.\n   */\n  virtual void remove() PURE;\n};\n\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/conn_pool.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace ConnectionPool {\n\n/**\n * Controls the behavior of a canceled stream.\n */\nenum class CancelPolicy {\n  // By default, canceled streams allow a pending connection to complete and become\n  // available for a future stream.\n  Default,\n  // When a stream is canceled, closes a pending connection if there will still be sufficient\n  // connections to serve pending streams. CloseExcess is largely useful for callers that never\n  // re-use connections (e.g. by closing rather than releasing connections). Using CloseExcess in\n  // this situation guarantees that no idle connections will be held open by the conn pool awaiting\n  // a connection stream.\n  CloseExcess,\n};\n\n/**\n * Handle that allows a pending connection or stream to be canceled before it is completed.\n */\nclass Cancellable {\npublic:\n  virtual ~Cancellable() = default;\n\n  /**\n   * Cancel the pending connection or stream.\n   * @param cancel_policy a CancelPolicy that controls the behavior of this cancellation.\n   */\n  virtual void cancel(CancelPolicy cancel_policy) PURE;\n};\n\n/**\n * An instance of a generic connection pool.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  /**\n   * Called when a connection pool has been drained of pending streams, busy connections, and\n   * ready connections.\n   */\n  using DrainedCb = std::function<void()>;\n\n  /**\n   * Register a callback that gets called when the connection pool is fully drained. No actual\n   * draining is done. The owner of the connection pool is responsible for not creating any\n   * new streams.\n   */\n  virtual void addDrainedCallback(DrainedCb cb) PURE;\n\n  /**\n   * Actively drain all existing connection pool connections. This method can be used in cases\n   * where the connection pool is not being destroyed, but the caller wishes to make sure that\n   * all new streams take place on a new connection. For example, when a health check failure\n   * occurs.\n   */\n  virtual void drainConnections() PURE;\n\n  /**\n   * @return Upstream::HostDescriptionConstSharedPtr the host for which connections are pooled.\n   */\n  virtual Upstream::HostDescriptionConstSharedPtr host() const PURE;\n\n  /**\n   * Prefetches an upstream connection, if existing connections do not meet both current and\n   * anticipated load.\n   *\n   * @return true if a connection was prefetched, false otherwise.\n   */\n  virtual bool maybePrefetch(float prefetch_ratio) PURE;\n};\n\nenum class PoolFailureReason {\n  // A resource overflowed and policy prevented a new connection from being created.\n  Overflow,\n  // A local connection failure took place while creating a new connection.\n  LocalConnectionFailure,\n  // A remote connection failure took place while creating a new connection.\n  RemoteConnectionFailure,\n  // A timeout occurred while creating a new connection.\n  Timeout,\n};\n\n} // namespace ConnectionPool\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/crypto/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"crypto_interface\",\n    hdrs = [\"crypto.h\"],\n)\n"
  },
  {
    "path": "include/envoy/common/crypto/crypto.h",
    "content": "#pragma once\n\n#include <memory>\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\n\nclass CryptoObject {\npublic:\n  virtual ~CryptoObject() = default;\n};\n\nusing CryptoObjectPtr = std::unique_ptr<CryptoObject>;\n\nnamespace Access {\n\ntemplate <class T> T* getTyped(CryptoObject& crypto) { return dynamic_cast<T*>(&crypto); }\n\n} // namespace Access\n\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/exception.h",
    "content": "#pragma once\n\n#include <stdexcept>\n#include <string>\n\nnamespace Envoy {\n/**\n * Base class for all envoy exceptions.\n */\nclass EnvoyException : public std::runtime_error {\npublic:\n  EnvoyException(const std::string& message) : std::runtime_error(message) {}\n};\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/interval_set.h",
    "content": "#pragma once\n\n#include <utility>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\n\n/**\n * Maintains sets of numeric intervals. As new intervals are added, existing ones in the\n * set are combined so that no overlapping intervals remain in the representation.\n *\n * Value can be any type that is comparable with <, ==, and >.\n */\ntemplate <typename Value> class IntervalSet {\npublic:\n  virtual ~IntervalSet() = default;\n\n  using Interval = std::pair<Value, Value>;\n\n  /**\n   * Inserts a new interval into the set, merging any overlaps. The intervals are in\n   * the form [left_inclusive, right_exclusive). E.g. an interval [3, 5) includes the\n   * numbers 3 and 4, but not 5.\n   * @param left_inclusive Value the left-bound, inclusive.\n   * @param right_exclusive Value the right-bound, which is exclusive.\n   */\n  virtual void insert(Value left_inclusive, Value right_exclusive) PURE;\n\n  /**\n   * @return std::vector<Interval> the interval-set as a vector.\n   */\n  virtual std::vector<Interval> toVector() const PURE;\n\n  /**\n   * Clears the contents of the interval set.\n   */\n  virtual void clear() PURE;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/matchers.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Matchers {\n\n/**\n * Generic string matching interface.\n */\nclass StringMatcher {\npublic:\n  virtual ~StringMatcher() = default;\n\n  /**\n   * Return whether a passed string value matches.\n   */\n  virtual bool match(const absl::string_view value) const PURE;\n};\n\nusing StringMatcherPtr = std::unique_ptr<const StringMatcher>;\n\n} // namespace Matchers\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/mutex_tracer.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\n/**\n * Generic interface for all MutexTracer implementations. Any MutexTracer should initialize itself\n * with absl::RegisterMutexTracer() at initialization, record statistics, and deliver those\n * statistics in a thread-safe manner.\n */\nclass MutexTracer {\npublic:\n  virtual ~MutexTracer() = default;\n\n  /**\n   * @return resets the captured statistics.\n   */\n  virtual void reset() PURE;\n\n  /**\n   * @return the number of experienced mutex contentions.\n   */\n  virtual int64_t numContentions() const PURE;\n\n  /**\n   * @return the length of the ongoing wait cycle. Note that the wait cycles are not\n   * guaranteed to correspond to core clock frequency, as per absl::base_internal::CycleClock.\n   */\n  virtual int64_t currentWaitCycles() const PURE;\n\n  /**\n   * @return the cumulative length of all experienced wait cycles. See the above note on wait cycles\n   * v. core clock frequency.\n   */\n  virtual int64_t lifetimeWaitCycles() const PURE;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/platform.h",
    "content": "#pragma once\n// NOLINT(namespace-envoy)\n\n// This common \"platform.h\" header exists to simplify the most common references\n// to non-ANSI C/C++ headers, required on Windows, Posix, Linux, BSD etc,\n// and to provide substitute definitions when absolutely required.\n//\n// The goal is to eventually not require this file of envoy header declarations,\n// but limit the use of these architecture-specific types and declarations\n// to the corresponding .cc implementation files.\n#include \"absl/strings/string_view.h\"\n\n#ifdef _MSC_VER\n\n#include <windows.h>\n#include <winsock2.h>\n\n// These must follow afterwards\n#include <mswsock.h>\n#include <ws2tcpip.h>\n\n// This is introduced in Windows SDK 10.0.17063.0 which is required\n// to build Envoy on Windows (we will reevaluate whether earlier builds\n// of Windows can be detected and PipeInstance marked unsupported at runtime.)\n#include <afunix.h>\n\n// <windows.h> defines some frequently used symbols, so we need to undef these\n// interfering symbols.\n#undef DELETE\n#undef ERROR\n#undef GetMessage\n#undef interface\n#undef TRUE\n#undef IGNORE\n\n#include <io.h>\n#include <stdint.h>\n#include <time.h>\n\n#define htole16(x) (x)\n#define htole32(x) (x)\n#define htole64(x) (x)\n#define le16toh(x) (x)\n#define le32toh(x) (x)\n#define le64toh(x) (x)\n\n#define htobe16(x) htons((x))\n#define htobe32(x) htonl((x))\n#define htobe64(x) htonll((x))\n#define be16toh(x) ntohs((x))\n#define be32toh(x) ntohl((x))\n#define be64toh(x) ntohll((x))\n\n#define PACKED_STRUCT(definition, ...)                                                             \\\n  __pragma(pack(push, 1)) definition, ##__VA_ARGS__;                                               \\\n  __pragma(pack(pop))\n\ntypedef ptrdiff_t ssize_t;\n\n// This is needed so the OsSysCalls interface compiles on Windows,\n// shmOpen takes mode_t as an argument.\ntypedef uint32_t mode_t;\n\ntypedef SOCKET os_fd_t;\ntypedef HANDLE filesystem_os_id_t; // NOLINT(modernize-use-using)\n\ntypedef unsigned int sa_family_t;\n\n// Posix structure for scatter/gather I/O, not present on Windows.\nstruct iovec {\n  void* iov_base;\n  size_t iov_len;\n};\n\n// Posix structure for describing messages sent by 'sendmsg` and received by\n// 'recvmsg'\nstruct msghdr {\n  void* msg_name;\n  socklen_t msg_namelen;\n  iovec* msg_iov;\n  size_t msg_iovlen;\n  void* msg_control;\n  size_t msg_controllen;\n  int msg_flags;\n};\n\n// Windows cmsghdr elements are just slightly different than Posix,\n// they are defined with the cmsg_len as a 32 bit uint, not size_t\n// We express our sendmsg and recvmsg in terms of Posix structures\n// and semantics, but won't adjust the cmsghdr elements.\n// The default Windows macros dereference the Windows style msghdr\n// member Control, which is an indirection that doesn't exist on posix.\n\n#undef CMSG_FIRSTHDR\n#define CMSG_FIRSTHDR(msg)                                                                         \\\n  (((msg)->msg_controllen >= sizeof(WSACMSGHDR)) ? (LPWSACMSGHDR)(msg)->msg_control                \\\n                                                 : (LPWSACMSGHDR)NULL)\n\n#undef CMSG_NXTHDR\n#define CMSG_NXTHDR(msg, cmsg)                                                                     \\\n  (((cmsg) == NULL)                                                                                \\\n       ? CMSG_FIRSTHDR(msg)                                                                        \\\n       : ((((PUCHAR)(cmsg) + WSA_CMSGHDR_ALIGN((cmsg)->cmsg_len) + sizeof(WSACMSGHDR)) >           \\\n           (PUCHAR)((msg)->msg_control) + (msg)->msg_controllen)                                   \\\n              ? (LPWSACMSGHDR)NULL                                                                 \\\n              : (LPWSACMSGHDR)((PUCHAR)(cmsg) + WSA_CMSGHDR_ALIGN((cmsg)->cmsg_len))))\n\n#ifdef CMSG_DATA\n#undef CMSG_DATA\n#endif\n#define CMSG_DATA(msg) WSA_CMSG_DATA(msg)\n\n// Following Cygwin's porting example (may not be comprehensive)\n#define SO_REUSEPORT SO_REUSEADDR\n\n// Solve for rfc2292 (need to address rfc3542?)\n#ifndef IPV6_RECVPKTINFO\n#define IPV6_RECVPKTINFO IPV6_PKTINFO\n#endif\n\n#define INVALID_HANDLE INVALID_HANDLE_VALUE\n#define SOCKET_VALID(sock) ((sock) != INVALID_SOCKET)\n#define SOCKET_INVALID(sock) ((sock) == INVALID_SOCKET)\n#define SOCKET_FAILURE(rc) ((rc) == SOCKET_ERROR)\n#define SET_SOCKET_INVALID(sock) (sock) = INVALID_SOCKET\n\n// arguments to shutdown\n#define ENVOY_SHUT_RD SD_RECEIVE\n#define ENVOY_SHUT_WR SD_SEND\n#define ENVOY_SHUT_RDWR SD_BOTH\n\n// winsock2 functions return distinct set of error codes, disjoint from POSIX errors (that are\n// also available on Windows and set by POSIX function invocations). Here we map winsock2 error\n// codes with platform agnostic macros that correspond to the same or roughly similar errors on\n// POSIX systems for use in cross-platform socket error handling.\n#define SOCKET_ERROR_AGAIN WSAEWOULDBLOCK\n#define SOCKET_ERROR_NOT_SUP WSAEOPNOTSUPP\n#define SOCKET_ERROR_AF_NO_SUP WSAEAFNOSUPPORT\n#define SOCKET_ERROR_IN_PROGRESS WSAEINPROGRESS\n// winsock2 does not differentiate between PERM and ACCESS violations\n#define SOCKET_ERROR_PERM WSAEACCES\n#define SOCKET_ERROR_ACCESS WSAEACCES\n#define SOCKET_ERROR_MSG_SIZE WSAEMSGSIZE\n#define SOCKET_ERROR_INTR WSAEINTR\n#define SOCKET_ERROR_ADDR_NOT_AVAIL WSAEADDRNOTAVAIL\n#define SOCKET_ERROR_INVAL WSAEINVAL\n#define SOCKET_ERROR_ADDR_IN_USE WSAEADDRINUSE\n\n#define HANDLE_ERROR_PERM ERROR_ACCESS_DENIED\n#define HANDLE_ERROR_INVALID ERROR_INVALID_HANDLE\n\nnamespace Platform {\nconstexpr absl::string_view null_device_path{\"NUL\"};\n}\n#else // POSIX\n\n#include <arpa/inet.h>\n#include <fcntl.h>\n#include <ifaddrs.h>\n#include <netdb.h>\n#include <netinet/in.h>\n#include <netinet/tcp.h>\n#include <netinet/udp.h> // for UDP_GRO\n#include <sys/ioctl.h>\n#include <sys/mman.h> // for mode_t\n#include <sys/socket.h>\n#include <sys/stat.h>\n#include <sys/uio.h> // for iovec\n#include <sys/un.h>\n#include <sys/wait.h>\n#include <unistd.h>\n\n#ifdef __APPLE__\n#include <libkern/OSByteOrder.h>\n#define htole16(x) OSSwapHostToLittleInt16((x))\n#define htole32(x) OSSwapHostToLittleInt32((x))\n#define htole64(x) OSSwapHostToLittleInt64((x))\n#define le16toh(x) OSSwapLittleToHostInt16((x))\n#define le32toh(x) OSSwapLittleToHostInt32((x))\n#define le64toh(x) OSSwapLittleToHostInt64((x))\n#define htobe16(x) OSSwapHostToBigInt16((x))\n#define htobe32(x) OSSwapHostToBigInt32((x))\n#define htobe64(x) OSSwapHostToBigInt64((x))\n#define be16toh(x) OSSwapBigToHostInt16((x))\n#define be32toh(x) OSSwapBigToHostInt32((x))\n#define be64toh(x) OSSwapBigToHostInt64((x))\n#else\n#include <endian.h>\n#endif\n\n#if defined(__linux__)\n#include <linux/netfilter_ipv4.h>\n#endif\n\n#define PACKED_STRUCT(definition, ...) definition, ##__VA_ARGS__ __attribute__((packed))\n\n#ifndef IP6T_SO_ORIGINAL_DST\n// From linux/netfilter_ipv6/ip6_tables.h\n#define IP6T_SO_ORIGINAL_DST 80\n#endif\n\n#ifndef SOL_UDP\n#define SOL_UDP 17\n#endif\n\n#ifndef UDP_GRO\n#define UDP_GRO 104\n#endif\n\n#ifndef UDP_SEGMENT\n#define UDP_SEGMENT 103\n#endif\n\ntypedef int os_fd_t;\ntypedef int filesystem_os_id_t; // NOLINT(modernize-use-using)\n\n#define INVALID_HANDLE -1\n#define INVALID_SOCKET -1\n#define SOCKET_VALID(sock) ((sock) >= 0)\n#define SOCKET_INVALID(sock) ((sock) == -1)\n#define SOCKET_FAILURE(rc) ((rc) == -1)\n#define SET_SOCKET_INVALID(sock) (sock) = -1\n\n// arguments to shutdown\n#define ENVOY_SHUT_RD SHUT_RD\n#define ENVOY_SHUT_WR SHUT_WR\n#define ENVOY_SHUT_RDWR SHUT_RDWR\n\n// Mapping POSIX socket errors to common error names\n#define SOCKET_ERROR_AGAIN EAGAIN\n#define SOCKET_ERROR_NOT_SUP ENOTSUP\n#define SOCKET_ERROR_AF_NO_SUP EAFNOSUPPORT\n#define SOCKET_ERROR_IN_PROGRESS EINPROGRESS\n#define SOCKET_ERROR_PERM EPERM\n#define SOCKET_ERROR_ACCESS EACCES\n#define SOCKET_ERROR_MSG_SIZE EMSGSIZE\n#define SOCKET_ERROR_INTR EINTR\n#define SOCKET_ERROR_ADDR_NOT_AVAIL EADDRNOTAVAIL\n#define SOCKET_ERROR_INVAL EINVAL\n#define SOCKET_ERROR_ADDR_IN_USE EADDRINUSE\n\n// Mapping POSIX file errors to common error names\n#define HANDLE_ERROR_PERM EACCES\n#define HANDLE_ERROR_INVALID EBADF\n\nnamespace Platform {\nconstexpr absl::string_view null_device_path{\"/dev/null\"};\n}\n#endif\n\n// Note: chromium disabled recvmmsg regardless of ndk version. However, the only Android target\n// currently actively using Envoy is Envoy Mobile, where recvmmsg is not actively disabled. In fact,\n// defining mmsghdr here caused a conflicting definition with the ndk's definition of the struct\n// (https://github.com/lyft/envoy-mobile/pull/772/checks?check_run_id=534152886#step:4:64).\n// Therefore, we decided to remove the Android check introduced here in\n// https://github.com/envoyproxy/envoy/pull/10120. If someone out there encounters problems with\n// this please bring up in Envoy's slack channel #envoy-udp-quic-dev.\n#if defined(__linux__) || defined(__EMSCRIPTEN__)\n#define ENVOY_MMSG_MORE 1\n#else\n#define ENVOY_MMSG_MORE 0\n#define MSG_WAITFORONE 0x10000 // recvmmsg(): block until 1+ packets avail.\n// Posix structure for describing messages sent by 'sendmmsg` and received by\n// 'recvmmsg'\nstruct mmsghdr {\n  struct msghdr msg_hdr;\n  unsigned int msg_len;\n};\n#endif\n\n#define SUPPORTS_GETIFADDRS\n#ifdef WIN32\n#undef SUPPORTS_GETIFADDRS\n#endif\n\n// https://android.googlesource.com/platform/prebuilts/ndk/+/dev/platform/sysroot/usr/include/ifaddrs.h\n#ifdef __ANDROID_API__\n#if __ANDROID_API__ < 24\n#undef SUPPORTS_GETIFADDRS\n#endif // __ANDROID_API__ < 24\n#endif // ifdef __ANDROID_API__\n\n// https://android.googlesource.com/platform/bionic/+/master/docs/status.md\n// ``pthread_getname_np`` is introduced in API 26\n#define SUPPORTS_PTHREAD_NAMING 0\n#if defined(__ANDROID_API__)\n#if __ANDROID_API__ >= 26\n#undef SUPPORTS_PTHREAD_NAMING\n#define SUPPORTS_PTHREAD_NAMING 1\n#endif // __ANDROID_API__ >= 26\n#elif defined(__linux__)\n#undef SUPPORTS_PTHREAD_NAMING\n#define SUPPORTS_PTHREAD_NAMING 1\n#endif // defined(__ANDROID_API__)\n\n#if defined(__linux__)\n// On Linux, default listen backlog size to net.core.somaxconn which is runtime configurable\n#define ENVOY_TCP_BACKLOG_SIZE -1\n#else\n// On non-Linux platforms use 128 which is libevent listener default\n#define ENVOY_TCP_BACKLOG_SIZE 128\n#endif\n"
  },
  {
    "path": "include/envoy/common/pure.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n/**\n * Friendly name for a pure virtual routine.\n */\n#define PURE = 0\n"
  },
  {
    "path": "include/envoy/common/random_generator.h",
    "content": "#pragma once\n\n#include <limits>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Random {\n\n/**\n * Random number generator. Implementations should be thread safe.\n */\nclass RandomGenerator {\npublic:\n  virtual ~RandomGenerator() = default;\n\n  using result_type = uint64_t; // NOLINT(readability-identifier-naming)\n\n  /**\n   * @return uint64_t a new random number.\n   */\n  virtual result_type random() PURE;\n\n  /*\n   * @return the smallest value that `operator()` may return. The value is\n   * strictly less than `max()`.\n   */\n  constexpr static result_type min() noexcept { return std::numeric_limits<result_type>::min(); };\n\n  /*\n   * @return the largest value that `operator()` may return. The value is\n   * strictly greater than `min()`.\n   */\n  constexpr static result_type max() noexcept { return std::numeric_limits<result_type>::max(); };\n\n  /*\n   * @return a value in the closed interval `[min(), max()]`. Has amortized\n   * constant complexity.\n   */\n  result_type operator()() { return result_type(random()); };\n\n  /**\n   * @return std::string containing uuid4 of 36 char length.\n   * for example, 7c25513b-0466-4558-a64c-12c6704f37ed\n   */\n  virtual std::string uuid() PURE;\n\n  /**\n   * @return a random boolean value, with probability `p` equaling true.\n   */\n  bool bernoulli(float p) {\n    if (p <= 0) {\n      return false;\n    } else if (p >= 1) {\n      return true;\n    }\n    return random() < static_cast<result_type>(p * static_cast<float>(max()));\n  }\n};\n\nusing RandomGeneratorPtr = std::unique_ptr<RandomGenerator>;\n\n} // namespace Random\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/regex.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/matchers.h\"\n\nnamespace Envoy {\nnamespace Regex {\n\n/**\n * A compiled regex expression matcher which uses an abstract regex engine.\n */\nclass CompiledMatcher : public Matchers::StringMatcher {\npublic:\n  /**\n   * Replaces all non-overlapping occurrences of the pattern in \"value\" with\n   * \"substitution\". The \"substitution\" string can make references to\n   * capture groups in the pattern, using the syntax specific to that\n   * regular expression engine.\n   */\n  virtual std::string replaceAll(absl::string_view value,\n                                 absl::string_view substitution) const PURE;\n};\n\nusing CompiledMatcherPtr = std::unique_ptr<const CompiledMatcher>;\n\n} // namespace Regex\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/resource.h",
    "content": "#include <cstdint>\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/types/optional.h\"\n\n#pragma once\n\nnamespace Envoy {\n\n/**\n * A handle for use by any resource managers.\n */\nclass ResourceLimit {\npublic:\n  virtual ~ResourceLimit() = default;\n\n  /**\n   * @return true if the resource can be created.\n   */\n  virtual bool canCreate() PURE;\n\n  /**\n   * Increment the resource count.\n   */\n  virtual void inc() PURE;\n\n  /**\n   * Decrement the resource count.\n   */\n  virtual void dec() PURE;\n\n  /**\n   * Decrement the resource count by a specific amount.\n   */\n  virtual void decBy(uint64_t amount) PURE;\n\n  /**\n   * @return the current maximum allowed number of this resource.\n   */\n  virtual uint64_t max() PURE;\n\n  /**\n   * @return the current resource count.\n   */\n  virtual uint64_t count() const PURE;\n};\n\nusing ResourceLimitOptRef = absl::optional<std::reference_wrapper<ResourceLimit>>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/scope_tracker.h",
    "content": "#pragma once\n\n#include <ostream>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\n\n/*\n * A class for tracking the scope of work.\n * Currently this is only used for best-effort tracking the L7 stream doing\n * work if a fatal error occurs.\n */\nclass ScopeTrackedObject {\npublic:\n  virtual ~ScopeTrackedObject() = default;\n\n  /**\n   * Dump debug state of the object in question to the provided ostream\n   *\n   * This is called on Envoy fatal errors, so should do minimal memory allocation.\n   *\n   * @param os the ostream to output to.\n   * @param indent_level how far to indent, for pretty-printed classes and subclasses.\n   */\n  virtual void dumpState(std::ostream& os, int indent_level = 0) const PURE;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/time.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\n\n/**\n * Less typing for common system time and steady time type.\n *\n * SystemTime should be used when getting a time to present to the user, e.g. for logging.\n * MonotonicTime should be used when tracking time for computing an interval.\n */\nusing Seconds = std::chrono::seconds;\nusing SystemTime = std::chrono::time_point<std::chrono::system_clock>;\nusing MonotonicTime = std::chrono::time_point<std::chrono::steady_clock>;\n\n/**\n * Captures a system-time source, capable of computing both monotonically increasing\n * and real time.\n */\nclass TimeSource {\npublic:\n  virtual ~TimeSource() = default;\n\n  /**\n   * @return the current system time; not guaranteed to be monotonically increasing.\n   */\n  virtual SystemTime systemTime() PURE;\n  /**\n   * @return the current monotonic time.\n   */\n  virtual MonotonicTime monotonicTime() PURE;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/common/token_bucket.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n\nnamespace Envoy {\n\n/**\n * This class defines an interface for the token bucket algorithm.\n *\n * https://en.wikipedia.org/wiki/Token_bucket\n */\nclass TokenBucket {\npublic:\n  virtual ~TokenBucket() = default;\n\n  /**\n   * @param tokens supplies the number of tokens to be consumed.\n   * @param allow_partial supplies whether the token bucket will allow consumption of less tokens\n   *                      than asked for. If allow_partial is true, the bucket contains 3 tokens,\n   *                      and the caller asks for 5, the bucket will return 3 tokens and now be\n   *                      empty.\n   * @return the number of tokens actually consumed.\n   */\n  virtual uint64_t consume(uint64_t tokens, bool allow_partial) PURE;\n\n  /**\n   * @return returns the approximate time until a next token is available. Currently it\n   * returns the upper bound on the amount of time until a next token is available.\n   */\n  virtual std::chrono::milliseconds nextTokenAvailable() PURE;\n\n  /**\n   * Reset the bucket with a specific number of tokens. Refill will begin again from the time that\n   * this routine is called.\n   */\n  virtual void reset(uint64_t num_tokens) PURE;\n};\n\nusing TokenBucketPtr = std::unique_ptr<TokenBucket>;\n\n}; // namespace Envoy\n"
  },
  {
    "path": "include/envoy/compression/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"compressor_interface\",\n    hdrs = [\"compressor.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"compressor_factory_interface\",\n    hdrs = [\"factory.h\"],\n    deps = [\n        \":compressor_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"compressor_config_interface\",\n    hdrs = [\"config.h\"],\n    deps = [\n        \":compressor_factory_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/compression/compressor/compressor.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Compressor {\n\n/**\n * Compressor state whether to flush the compressor or to finish the compression stream.\n */\nenum class State { Flush, Finish };\n\n/**\n * Allows compressing data.\n */\nclass Compressor {\npublic:\n  virtual ~Compressor() = default;\n\n  /**\n   * Compresses data buffer.\n   * @param buffer supplies the reference to data to be compressed. The content of the buffer will\n   *        be replaced inline with the compressed data.\n   * @param state supplies the compressor state.\n   */\n  virtual void compress(Buffer::Instance& buffer, State state) PURE;\n};\n\nusing CompressorPtr = std::unique_ptr<Compressor>;\n\n} // namespace Compressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/compression/compressor/config.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/factory.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/server/filter_config.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Compressor {\n\nclass NamedCompressorLibraryConfigFactory : public Config::TypedFactory {\npublic:\n  ~NamedCompressorLibraryConfigFactory() override = default;\n\n  virtual CompressorFactoryPtr\n  createCompressorFactoryFromProto(const Protobuf::Message& config,\n                                   Server::Configuration::FactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.compression.compressor\"; }\n};\n\n} // namespace Compressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/compression/compressor/factory.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/compressor.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Compressor {\n\nclass CompressorFactory {\npublic:\n  virtual ~CompressorFactory() = default;\n\n  virtual CompressorPtr createCompressor() PURE;\n  virtual const std::string& statsPrefix() const PURE;\n  virtual const std::string& contentEncoding() const PURE;\n};\n\nusing CompressorFactoryPtr = std::unique_ptr<CompressorFactory>;\n\n} // namespace Compressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/compression/decompressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"decompressor_config_interface\",\n    hdrs = [\"config.h\"],\n    deps = [\n        \":decompressor_factory_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"decompressor_factory_interface\",\n    hdrs = [\"factory.h\"],\n    deps = [\n        \":decompressor_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"decompressor_interface\",\n    hdrs = [\"decompressor.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/compression/decompressor/config.h",
    "content": "#pragma once\n\n#include \"envoy/compression/decompressor/factory.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/server/filter_config.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Decompressor {\n\nclass NamedDecompressorLibraryConfigFactory : public Config::TypedFactory {\npublic:\n  ~NamedDecompressorLibraryConfigFactory() override = default;\n\n  virtual DecompressorFactoryPtr\n  createDecompressorFactoryFromProto(const Protobuf::Message& config,\n                                     Server::Configuration::FactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.compression.decompressor\"; }\n};\n\n} // namespace Decompressor\n} // namespace Compression\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/compression/decompressor/decompressor.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Decompressor {\n\n/**\n * Allows decompressing data.\n */\nclass Decompressor {\npublic:\n  virtual ~Decompressor() = default;\n\n  /**\n   * Decompresses data from one buffer into another buffer.\n   * @param input_buffer supplies the buffer with compressed data.\n   * @param output_buffer supplies the buffer to output decompressed data.\n   */\n  virtual void decompress(const Buffer::Instance& input_buffer,\n                          Buffer::Instance& output_buffer) PURE;\n};\n\nusing DecompressorPtr = std::unique_ptr<Decompressor>;\n\n} // namespace Decompressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/compression/decompressor/factory.h",
    "content": "#pragma once\n\n#include \"envoy/compression/decompressor/decompressor.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Decompressor {\n\nclass DecompressorFactory {\npublic:\n  virtual ~DecompressorFactory() = default;\n\n  virtual DecompressorPtr createDecompressor(const std::string& stats_prefix) PURE;\n  virtual const std::string& statsPrefix() const PURE;\n  // TODO(junr03): this method assumes that decompressors are used on http messages.\n  // A more generic method might be `hint()` which gives the user of the decompressor a hint about\n  // the type of decompression that it can perform.\n  virtual const std::string& contentEncoding() const PURE;\n};\n\nusing DecompressorFactoryPtr = std::unique_ptr<DecompressorFactory>;\n\n} // namespace Decompressor\n} // namespace Compression\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/config/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"config_provider_interface\",\n    hdrs = [\"config_provider.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config_provider_manager_interface\",\n    hdrs = [\"config_provider_manager.h\"],\n    deps = [\n        \":config_provider_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"extension_config_provider_interface\",\n    hdrs = [\"extension_config_provider.h\"],\n    deps = [\"//source/common/protobuf\"],\n)\n\nenvoy_cc_library(\n    name = \"grpc_mux_interface\",\n    hdrs = [\"grpc_mux.h\"],\n    deps = [\n        \":subscription_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"subscription_factory_interface\",\n    hdrs = [\"subscription_factory.h\"],\n    deps = [\n        \":subscription_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"subscription_interface\",\n    hdrs = [\"subscription.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/config:api_type_oracle_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"typed_config_interface\",\n    hdrs = [\"typed_config.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"typed_metadata_interface\",\n    hdrs = [\"typed_metadata.h\"],\n    deps = [\n        \":typed_config_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/config/config_provider.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/time.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * A provider for configuration obtained statically (via static resources in the bootstrap config),\n * inline with a higher level resource or dynamically via xDS APIs.\n *\n * The ConfigProvider is an abstraction layer which higher level components such as the\n * HttpConnectionManager, Listener, etc can leverage to interface with Envoy's configuration\n * mechanisms. Implementations of this interface build upon lower level abstractions such as\n * Envoy::Config::Subscription and Envoy::Config::SubscriptionCallbacks.\n *\n * The interface exposed below allows xDS providers to share the underlying config protos and\n * resulting config implementations (i.e., the ConfigProvider::Config); this enables linear memory\n * scaling based on the size of the configuration set, regardless of the number of threads/workers.\n *\n * Use config() to obtain a shared_ptr to the implementation of the config, and configProtoInfo() to\n * obtain a reference to the underlying config proto and version (applicable only to dynamic config\n * providers).\n */\nclass ConfigProvider {\npublic:\n  /**\n   * The \"implementation\" of the configuration.\n   * Use config() to obtain a typed object that corresponds to the specific configuration\n   * represented by this abstract type.\n   */\n  class Config {\n  public:\n    virtual ~Config() = default;\n  };\n  using ConfigConstSharedPtr = std::shared_ptr<const Config>;\n\n  /**\n   * The type of API represented by a ConfigProvider.\n   */\n  enum class ApiType {\n    /**\n     * A \"Full\" API delivers a complete configuration as part of each resource (top level\n     * config proto); i.e., each resource contains the whole representation of the config intent. An\n     * example of this type of API is RDS.\n     */\n    Full,\n    /**\n     * A \"Delta\" API delivers a subset of the config intent as part of each resource (top level\n     * config proto). Examples of this type of API are CDS, LDS and SRDS.\n     */\n    Delta\n  };\n\n  /**\n   * Stores the config proto as well as the associated version.\n   */\n  template <typename P> struct ConfigProtoInfo {\n    const P& config_proto_;\n\n    // Only populated by dynamic config providers.\n    std::string version_;\n  };\n\n  using ConfigProtoVector = std::vector<const Protobuf::Message*>;\n  /**\n   * Stores the config protos associated with a \"Delta\" API.\n   */\n  template <typename P> struct ConfigProtoInfoVector {\n    const std::vector<const P*> config_protos_;\n\n    // Only populated by dynamic config providers.\n    std::string version_;\n  };\n\n  virtual ~ConfigProvider() = default;\n\n  /**\n   * The type of API.\n   */\n  virtual ApiType apiType() const PURE;\n\n  /**\n   * Returns a ConfigProtoInfo associated with a ApiType::Full provider.\n   * @return absl::optional<ConfigProtoInfo<P>> an optional ConfigProtoInfo; the value is set when a\n   * config is available.\n   */\n  template <typename P> absl::optional<ConfigProtoInfo<P>> configProtoInfo() const {\n    static_assert(std::is_base_of<Protobuf::Message, P>::value,\n                  \"Proto type must derive from Protobuf::Message\");\n\n    const auto* config_proto = dynamic_cast<const P*>(getConfigProto());\n    if (config_proto == nullptr) {\n      return absl::nullopt;\n    }\n    return ConfigProtoInfo<P>{*config_proto, getConfigVersion()};\n  }\n\n  /**\n   * Returns a ConfigProtoInfoVector associated with a ApiType::Delta provider.\n   * @return absl::optional<ConfigProtoInfoVector> an optional ConfigProtoInfoVector; the value is\n   * set when a config is available.\n   */\n  template <typename P> absl::optional<ConfigProtoInfoVector<P>> configProtoInfoVector() const {\n    static_assert(std::is_base_of<Protobuf::Message, P>::value,\n                  \"Proto type must derive from Protobuf::Message\");\n\n    const ConfigProtoVector config_protos = getConfigProtos();\n    if (config_protos.empty()) {\n      return absl::nullopt;\n    }\n    std::vector<const P*> ret_protos;\n    ret_protos.reserve(config_protos.size());\n    for (const auto* elem : config_protos) {\n      ret_protos.push_back(static_cast<const P*>(elem));\n    }\n    return ConfigProtoInfoVector<P>{std::move(ret_protos), getConfigVersion()};\n  }\n\n  /**\n   * Returns the Config corresponding to the provider.\n   * @return std::shared_ptr<const C> a shared pointer to the Config.\n   */\n  template <typename C> std::shared_ptr<const C> config() const {\n    static_assert(std::is_base_of<Config, C>::value,\n                  \"Config type must derive from ConfigProvider::Config\");\n\n    return std::dynamic_pointer_cast<const C>(getConfig());\n  }\n\n  /**\n   * Returns the timestamp associated with the last update to the Config.\n   * @return SystemTime the timestamp corresponding to the last config update.\n   */\n  virtual SystemTime lastUpdated() const PURE;\n\nprotected:\n  /**\n   * Returns the config proto associated with the provider.\n   * @return Protobuf::Message* the config proto corresponding to the Config instantiated by the\n   *         provider.\n   */\n  virtual const Protobuf::Message* getConfigProto() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  /**\n   * Returns the config protos associated with the provider.\n   * @return const ConfigProtoVector the config protos corresponding to the Config instantiated by\n   *         the provider.\n   */\n  virtual ConfigProtoVector getConfigProtos() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  /**\n   * Returns the config version associated with the provider.\n   * @return std::string the config version.\n   */\n  virtual std::string getConfigVersion() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  /**\n   * Returns the config implementation associated with the provider.\n   * @return ConfigConstSharedPtr the config as the base type.\n   */\n  virtual ConfigConstSharedPtr getConfig() const PURE;\n};\n\nusing ConfigProviderPtr = std::unique_ptr<ConfigProvider>;\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/config/config_provider_manager.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/config_provider.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * A ConfigProvider manager which instantiates static and dynamic (xDS) providers.\n *\n * ConfigProvider objects are owned by the caller of the\n * createXdsConfigProvider()/createStaticConfigProvider() functions. The ConfigProviderManager holds\n * raw pointers to those objects.\n *\n * Configuration implementations returned by ConfigProvider::config() are immutable, which allows\n * them to share the underlying objects such as config protos and subscriptions (for dynamic\n * providers) without synchronization related performance penalties. This enables linear memory\n * growth based on the size of the configuration set, regardless of the number of threads/objects\n * that must hold a reference/pointer to them.\n */\nclass ConfigProviderManager {\npublic:\n  class OptionalArg {\n  public:\n    virtual ~OptionalArg() = default;\n  };\n\n  class NullOptionalArg : public OptionalArg {\n  public:\n    NullOptionalArg() = default;\n    ~NullOptionalArg() override = default;\n  };\n\n  virtual ~ConfigProviderManager() = default;\n\n  /**\n   * Returns a dynamic ConfigProvider which receives configuration via an xDS API.\n   * A shared ownership model is used, such that the underlying subscription, config proto\n   * and Config are shared amongst all providers relying on the same config source.\n   * @param config_source_proto supplies the proto containing the xDS API configuration.\n   * @param factory_context is the context to use for the provider.\n   * @param init_manager is the Init::Manager to use for the provider.\n   * @param stat_prefix supplies the prefix to use for statistics.\n   * @param optarg supplies an optional argument with data specific to the concrete class.\n   * @return ConfigProviderPtr a newly allocated dynamic config provider which shares underlying\n   *                           data structures with other dynamic providers configured with the same\n   *                           API source.\n   */\n  virtual ConfigProviderPtr\n  createXdsConfigProvider(const Protobuf::Message& config_source_proto,\n                          Server::Configuration::ServerFactoryContext& factory_context,\n                          Init::Manager& init_manager, const std::string& stat_prefix,\n                          const OptionalArg& optarg) PURE;\n\n  /**\n   * Returns a ConfigProvider associated with a statically specified configuration.\n   * @param config_proto supplies the configuration proto.\n   * @param factory_context is the context to use for the provider.\n   * @param optarg supplies an optional argument with data specific to the concrete class.\n   * @return ConfigProviderPtr a newly allocated static config provider.\n   */\n  virtual ConfigProviderPtr\n  createStaticConfigProvider(const Protobuf::Message& config_proto,\n                             Server::Configuration::ServerFactoryContext& factory_context,\n                             const OptionalArg& optarg) {\n    UNREFERENCED_PARAMETER(config_proto);\n    UNREFERENCED_PARAMETER(factory_context);\n    UNREFERENCED_PARAMETER(optarg);\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  /**\n   * Returns a ConfigProvider associated with a statically specified configuration. This is intended\n   * to be used when a set of configuration protos is required to build the full configuration.\n   * @param config_protos supplies a vector of configuration protos.\n   * @param factory_context is the context to use for the provider.\n   * @param optarg supplies an optional argument with data specific to the concrete class.\n   * @return ConfigProviderPtr a newly allocated static config provider.\n   */\n  virtual ConfigProviderPtr\n  createStaticConfigProvider(ProtobufTypes::ConstMessagePtrVector&& config_protos,\n                             Server::Configuration::ServerFactoryContext& factory_context,\n                             const OptionalArg& optarg) {\n    UNREFERENCED_PARAMETER(config_protos);\n    UNREFERENCED_PARAMETER(factory_context);\n    UNREFERENCED_PARAMETER(optarg);\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/config/extension_config_provider.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nusing ConfigAppliedCb = std::function<void()>;\n\n/**\n * A provider for extension configurations obtained either statically or via\n * the extension configuration discovery service. Dynamically updated extension\n * configurations may share subscriptions across extension config providers.\n */\ntemplate <class Factory, class FactoryCallback> class ExtensionConfigProvider {\npublic:\n  virtual ~ExtensionConfigProvider() = default;\n\n  /**\n   * Get the extension configuration resource name.\n   **/\n  virtual const std::string& name() PURE;\n\n  /**\n   * @return FactoryCallback an extension factory callback. Note that if the\n   * provider has not yet performed an initial configuration load and no\n   * default is provided, an empty optional will be returned. The factory\n   * callback is the latest version of the extension configuration, and should\n   * generally apply only to new requests and connections.\n   */\n  virtual absl::optional<FactoryCallback> config() PURE;\n\n  /**\n   * Validate that the configuration is applicable in the context of the provider. If an exception\n   * is thrown by any of the config providers for an update, the extension configuration update is\n   * rejected.\n   * @param proto_config is the candidate configuration update.\n   * @param factory used to instantiate an extension config.\n   */\n  virtual void validateConfig(const ProtobufWkt::Any& proto_config, Factory& factory) PURE;\n\n  /**\n   * Update the provider with a new configuration.\n   * @param config is an extension factory callback to replace the existing configuration.\n   * @param version_info is the version of the new extension configuration.\n   * @param cb the continuation callback for a completed configuration application.\n   */\n  virtual void onConfigUpdate(FactoryCallback config, const std::string& version_info,\n                              ConfigAppliedCb cb) PURE;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/config/grpc_mux.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nusing ScopedResume = std::unique_ptr<Cleanup>;\n/**\n * All control plane related stats. @see stats_macros.h\n */\n#define ALL_CONTROL_PLANE_STATS(COUNTER, GAUGE, TEXT_READOUT)                                      \\\n  COUNTER(rate_limit_enforced)                                                                     \\\n  GAUGE(connected_state, NeverImport)                                                              \\\n  GAUGE(pending_requests, Accumulate)                                                              \\\n  TEXT_READOUT(identifier)\n\n/**\n * Struct definition for all control plane stats. @see stats_macros.h\n */\nstruct ControlPlaneStats {\n  ALL_CONTROL_PLANE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT,\n                          GENERATE_TEXT_READOUT_STRUCT)\n};\n\n/**\n * Handle on a muxed gRPC subscription. The subscription is canceled on destruction.\n */\nclass GrpcMuxWatch {\npublic:\n  virtual ~GrpcMuxWatch() = default;\n\n  /**\n   * Updates the set of resources that the watch is interested in.\n   * @param resources set of resource names to watch for\n   */\n  virtual void update(const std::set<std::string>& resources) PURE;\n};\n\nusing GrpcMuxWatchPtr = std::unique_ptr<GrpcMuxWatch>;\n\n/**\n * Manage one or more gRPC subscriptions on a single stream to management server. This can be used\n * for a single xDS API, e.g. EDS, or to combined multiple xDS APIs for ADS.\n */\nclass GrpcMux {\npublic:\n  virtual ~GrpcMux() = default;\n\n  /**\n   * Initiate stream with management server.\n   */\n  virtual void start() PURE;\n\n  /**\n   * Pause discovery requests for a given API type. This is useful when we're processing an update\n   * for LDS or CDS and don't want a flood of updates for RDS or EDS respectively. Discovery\n   * requests may later be resumed with resume().\n   * @param type_url type URL corresponding to xDS API, e.g.\n   * type.googleapis.com/envoy.api.v2.Cluster.\n   *\n   * @return a ScopedResume object, which when destructed, resumes the paused discovery requests.\n   * A discovery request will be sent if one would have been sent during the pause.\n   */\n  ABSL_MUST_USE_RESULT virtual ScopedResume pause(const std::string& type_url) PURE;\n\n  /**\n   * Pause discovery requests for given API types. This is useful when we're processing an update\n   * for LDS or CDS and don't want a flood of updates for RDS or EDS respectively. Discovery\n   * requests may later be resumed with resume().\n   * @param type_urls type URLs corresponding to xDS API, e.g.\n   * type.googleapis.com/envoy.api.v2.Cluster.\n   *\n   * @return a ScopedResume object, which when destructed, resumes the paused discovery requests.\n   * A discovery request will be sent if one would have been sent during the pause.\n   */\n  ABSL_MUST_USE_RESULT virtual ScopedResume pause(const std::vector<std::string> type_urls) PURE;\n\n  /**\n   * Start a configuration subscription asynchronously for some API type and resources.\n   * @param type_url type URL corresponding to xDS API, e.g.\n   * type.googleapis.com/envoy.api.v2.Cluster.\n   * @param resources set of resource names to watch for. If this is empty, then all\n   *                  resources for type_url will result in callbacks.\n   * @param callbacks the callbacks to be notified of configuration updates. These must be valid\n   *                  until GrpcMuxWatch is destroyed.\n   * @param resource_decoder how incoming opaque resource objects are to be decoded.\n   * @param use_namespace_matching if namespace watch should be created. This is used for creating\n   * watches on collections of resources; individual members of a collection are identified by the\n   * namespace in resource name.\n   * @return GrpcMuxWatchPtr a handle to cancel the subscription with. E.g. when a cluster goes\n   * away, its EDS updates should be cancelled by destroying the GrpcMuxWatchPtr.\n   */\n  virtual GrpcMuxWatchPtr addWatch(const std::string& type_url,\n                                   const std::set<std::string>& resources,\n                                   SubscriptionCallbacks& callbacks,\n                                   OpaqueResourceDecoder& resource_decoder,\n                                   const bool use_namespace_matching) PURE;\n\n  virtual void requestOnDemandUpdate(const std::string& type_url,\n                                     const std::set<std::string>& for_update) PURE;\n\n  using TypeUrlMap = absl::flat_hash_map<std::string, std::string>;\n  static TypeUrlMap& typeUrlMap() { MUTABLE_CONSTRUCT_ON_FIRST_USE(TypeUrlMap, {}); }\n};\n\nusing GrpcMuxPtr = std::unique_ptr<GrpcMux>;\nusing GrpcMuxSharedPtr = std::shared_ptr<GrpcMux>;\n\ntemplate <class ResponseProto> using ResponseProtoPtr = std::unique_ptr<ResponseProto>;\n/**\n * A grouping of callbacks that a GrpcMux should provide to its GrpcStream.\n */\ntemplate <class ResponseProto> class GrpcStreamCallbacks {\npublic:\n  virtual ~GrpcStreamCallbacks() = default;\n\n  /**\n   * For the GrpcStream to prompt the context to take appropriate action in response to the\n   * gRPC stream having been successfully established.\n   */\n  virtual void onStreamEstablished() PURE;\n\n  /**\n   * For the GrpcStream to prompt the context to take appropriate action in response to\n   * failure to establish the gRPC stream.\n   */\n  virtual void onEstablishmentFailure() PURE;\n\n  /**\n   * For the GrpcStream to pass received protos to the context.\n   */\n  virtual void onDiscoveryResponse(ResponseProtoPtr<ResponseProto>&& message,\n                                   ControlPlaneStats& control_plane_stats) PURE;\n\n  /**\n   * For the GrpcStream to call when its rate limiting logic allows more requests to be sent.\n   */\n  virtual void onWriteable() PURE;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/config/subscription.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Reason that a config update is failed.\n */\nenum class ConfigUpdateFailureReason {\n  // A connection failure took place and the update could not be fetched.\n  ConnectionFailure,\n  // Config fetch timed out.\n  FetchTimedout,\n  // Update rejected because there is a problem in applying the update.\n  UpdateRejected\n};\n\n/**\n * A wrapper for xDS resources that have been deserialized from the wire.\n */\nclass DecodedResource {\npublic:\n  virtual ~DecodedResource() = default;\n\n  /**\n   * @return const std::string& resource name.\n   */\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return const std::vector<std::string& resource aliases.\n   */\n  virtual const std::vector<std::string>& aliases() const PURE;\n\n  /**\n   * @return const std::string& resource version.\n   */\n  virtual const std::string& version() const PURE;\n\n  /**\n   * @return const Protobuf::Message& resource message reference. If hasResource() is false, this\n   *         will be the empty message.\n   */\n  virtual const Protobuf::Message& resource() const PURE;\n\n  /**\n   * @return bool does the xDS discovery response have a set resource payload?\n   */\n  virtual bool hasResource() const PURE;\n};\n\nusing DecodedResourcePtr = std::unique_ptr<DecodedResource>;\nusing DecodedResourceRef = std::reference_wrapper<DecodedResource>;\n\nclass OpaqueResourceDecoder {\npublic:\n  virtual ~OpaqueResourceDecoder() = default;\n\n  /**\n   * @param resource some opaque resource (ProtobufWkt::Any).\n   * @return ProtobufTypes::MessagePtr decoded protobuf message in the opaque resource, e.g. the\n   *         RouteConfiguration for an Any containing envoy.config.route.v3.RouteConfiguration.\n   */\n  virtual ProtobufTypes::MessagePtr decodeResource(const ProtobufWkt::Any& resource) PURE;\n\n  /**\n   * @param resource some opaque resource (Protobuf::Message).\n   * @return std::String the resource name in a Protobuf::Message returned by decodeResource(), e.g.\n   *         the route config name for a envoy.config.route.v3.RouteConfiguration message.\n   */\n  virtual std::string resourceName(const Protobuf::Message& resource) PURE;\n};\n\n/**\n * Subscription to DecodedResources.\n */\nclass SubscriptionCallbacks {\npublic:\n  virtual ~SubscriptionCallbacks() = default;\n\n  /**\n   * Called when a state-of-the-world configuration update is received. (State-of-the-world is\n   * everything other than delta gRPC - filesystem, HTTP, non-delta gRPC).\n   * @param resources vector of fetched resources corresponding to the configuration update.\n   * @param version_info supplies the version information as supplied by the xDS discovery response.\n   * @throw EnvoyException with reason if the configuration is rejected. Otherwise the configuration\n   *        is accepted. Accepted configurations have their version_info reflected in subsequent\n   *        requests.\n   */\n  virtual void onConfigUpdate(const std::vector<DecodedResourceRef>& resources,\n                              const std::string& version_info) PURE;\n\n  /**\n   * Called when a delta configuration update is received.\n   * @param added_resources resources newly added since the previous fetch.\n   * @param removed_resources names of resources that this fetch instructed to be removed.\n   * @param system_version_info aggregate response data \"version\", for debugging.\n   * @throw EnvoyException with reason if the config changes are rejected. Otherwise the changes\n   *        are accepted. Accepted changes have their version_info reflected in subsequent requests.\n   */\n  virtual void onConfigUpdate(const std::vector<DecodedResourceRef>& added_resources,\n                              const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                              const std::string& system_version_info) PURE;\n\n  /**\n   * Called when either the Subscription is unable to fetch a config update or when onConfigUpdate\n   * invokes an exception.\n   * @param reason supplies the update failure reason.\n   * @param e supplies any exception data on why the fetch failed. May be nullptr.\n   */\n  virtual void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) PURE;\n};\n\n/**\n * Invoked when raw config received from xDS wire.\n */\nclass UntypedConfigUpdateCallbacks {\npublic:\n  virtual ~UntypedConfigUpdateCallbacks() = default;\n\n  /**\n   * Called when a state-of-the-world configuration update is received. (State-of-the-world is\n   * everything other than delta gRPC - filesystem, HTTP, non-delta gRPC).\n   * @param resources vector of fetched resources corresponding to the configuration update.\n   * @param version_info supplies the version information as supplied by the xDS discovery response.\n   * @throw EnvoyException with reason if the configuration is rejected. Otherwise the configuration\n   *        is accepted. Accepted configurations have their version_info reflected in subsequent\n   *        requests.\n   */\n  virtual void onConfigUpdate(const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& resources,\n                              const std::string& version_info) PURE;\n\n  /**\n   * Called when a delta configuration update is received.\n   * @param added_resources resources newly added since the previous fetch.\n   * @param removed_resources names of resources that this fetch instructed to be removed.\n   * @param system_version_info aggregate response data \"version\", for debugging.\n   * @throw EnvoyException with reason if the config changes are rejected. Otherwise the changes\n   * @param use_namespace_matching if the resources should me matched on their namespaces, rather\n   * than unique names. This is used when a collection of resources (e.g. virtual hosts in VHDS) is\n   * being updated. Accepted changes have their version_info reflected in subsequent\n   * requests.\n   */\n  virtual void onConfigUpdate(\n      const Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>& added_resources,\n      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n      const std::string& system_version_info) PURE;\n\n  /**\n   * Called when either the Subscription is unable to fetch a config update or when onConfigUpdate\n   * invokes an exception.\n   * @param reason supplies the update failure reason.\n   * @param e supplies any exception data on why the fetch failed. May be nullptr.\n   */\n  virtual void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) PURE;\n};\n\n/**\n * Common abstraction for subscribing to versioned config updates. This may be implemented via bidi\n * gRPC streams, periodic/long polling REST or inotify filesystem updates.\n */\nclass Subscription {\npublic:\n  virtual ~Subscription() = default;\n\n  /**\n   * Start a configuration subscription asynchronously. This should be called once and will continue\n   * to fetch throughout the lifetime of the Subscription object.\n   * @param resources set of resource names to fetch.\n   * @param use_namespace_matching if the subscription is for a collection of resources. In such a\n   * case a namespace watch will be created.\n   */\n  virtual void start(const std::set<std::string>& resource_names,\n                     const bool use_namespace_matching = false) PURE;\n\n  /**\n   * Update the resources to fetch.\n   * @param resources vector of resource names to fetch. It's a (not unordered_)set so that it can\n   * be passed to std::set_difference, which must be given sorted collections.\n   */\n  virtual void updateResourceInterest(const std::set<std::string>& update_to_these_names) PURE;\n\n  /**\n   * Creates a discovery request for resources.\n   * @param add_these_names resource ids for inclusion in the discovery request.\n   */\n  virtual void requestOnDemandUpdate(const std::set<std::string>& add_these_names) PURE;\n};\n\nusing SubscriptionPtr = std::unique_ptr<Subscription>;\n\n/**\n * Per subscription stats. @see stats_macros.h\n */\n#define ALL_SUBSCRIPTION_STATS(COUNTER, GAUGE, TEXT_READOUT)                                       \\\n  COUNTER(init_fetch_timeout)                                                                      \\\n  COUNTER(update_attempt)                                                                          \\\n  COUNTER(update_failure)                                                                          \\\n  COUNTER(update_rejected)                                                                         \\\n  COUNTER(update_success)                                                                          \\\n  GAUGE(update_time, NeverImport)                                                                  \\\n  GAUGE(version, NeverImport)                                                                      \\\n  TEXT_READOUT(version_text)\n\n/**\n * Struct definition for per subscription stats. @see stats_macros.h\n */\nstruct SubscriptionStats {\n  ALL_SUBSCRIPTION_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT,\n                         GENERATE_TEXT_READOUT_STRUCT)\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/config/subscription_factory.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/stats/scope.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nclass SubscriptionFactory {\npublic:\n  virtual ~SubscriptionFactory() = default;\n\n  /**\n   * Subscription factory interface.\n   *\n   * @param config envoy::config::core::v3::ConfigSource to construct from.\n   * @param type_url type URL for the resource being subscribed to.\n   * @param scope stats scope for any stats tracked by the subscription.\n   * @param callbacks the callbacks needed by all Subscription objects, to deliver config updates.\n   *                  The callbacks must not result in the deletion of the Subscription object.\n   * @param resource_decoder how incoming opaque resource objects are to be decoded.\n   *\n   * @return SubscriptionPtr subscription object corresponding for config and type_url.\n   */\n  virtual SubscriptionPtr\n  subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config,\n                               absl::string_view type_url, Stats::Scope& scope,\n                               SubscriptionCallbacks& callbacks,\n                               OpaqueResourceDecoder& resource_decoder) PURE;\n\n  /**\n   * Collection subscription factory interface for UDPA URLs.\n   *\n   * @param collection_locator collection resource locator.\n   * @param config envoy::config::core::v3::ConfigSource for authority resolution.\n   * @param type_url type URL for the resources inside the collection.\n   * @param scope stats scope for any stats tracked by the subscription.\n   * @param callbacks the callbacks needed by all [Collection]Subscription objects, to deliver\n   *                  config updates. The callbacks must not result in the deletion of the\n   *                  CollectionSubscription object.\n   * @param resource_decoder how incoming opaque resource objects are to be decoded.\n   *\n   * @return SubscriptionPtr subscription object corresponding for collection_locator.\n   */\n  virtual SubscriptionPtr\n  collectionSubscriptionFromUrl(const udpa::core::v1::ResourceLocator& collection_locator,\n                                const envoy::config::core::v3::ConfigSource& config,\n                                absl::string_view type_url, Stats::Scope& scope,\n                                SubscriptionCallbacks& callbacks,\n                                OpaqueResourceDecoder& resource_decoder) PURE;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/config/typed_config.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Base class for an extension factory.\n */\nclass UntypedFactory {\npublic:\n  virtual ~UntypedFactory() = default;\n\n  /**\n   * Name of the factory, a reversed DNS name is encouraged to avoid cross-org conflict.\n   * It's used as key in the metadata map, as well as key in the factory registry.\n   */\n  virtual std::string name() const PURE;\n\n  /**\n   * @return std::string the identifying category name for objects\n   * created by this factory. Used for automatic registration with\n   * FactoryCategoryRegistry.\n   */\n  virtual std::string category() const PURE;\n\n  /**\n   * @return configuration proto full name, or empty for untyped factories.\n   */\n  virtual std::string configType() { return \"\"; }\n};\n\n/**\n * Base class for an extension factory configured by a typed proto message.\n */\nclass TypedFactory : public UntypedFactory {\npublic:\n  ~TypedFactory() override = default;\n\n  /**\n   * @return ProtobufTypes::MessagePtr create empty config proto message for v2. The config, which\n   * arrives in an opaque google.protobuf.Struct message, will be converted to JSON and then parsed\n   * into this empty proto.\n   */\n  virtual ProtobufTypes::MessagePtr createEmptyConfigProto() PURE;\n\n  std::string configType() override {\n    auto ptr = createEmptyConfigProto();\n    ASSERT(ptr != nullptr);\n    return ptr->GetDescriptor()->full_name();\n  }\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/config/typed_metadata.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/typed_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * TypedMetadata interface.\n */\nclass TypedMetadata {\npublic:\n  class Object {\n  public:\n    virtual ~Object() = default;\n  };\n\n  virtual ~TypedMetadata() = default;\n\n  /**\n   * @return a T instance by key. If the conversion is not able to complete, or\n   * if the data is not in the store, returns a nullptr.\n   */\n  template <typename T> const T* get(const std::string& key) const {\n    static_assert(std::is_base_of<Object, T>::value,\n                  \"Data type must be subclass of TypedMetadata::Object\");\n    const Object* p = getData(key);\n    if (p != nullptr) {\n      return dynamic_cast<const T*>(p);\n    }\n    return nullptr;\n  }\n\nprotected:\n  /**\n   * Returns data associated with given 'key'.\n   * If there is no data associated with this key, a nullptr is returned.\n   * @param key the key (usually a reversed DNS) associated with the typed metadata.\n   * @return A TypedMetadata::Object pointer, nullptr if no data is associated with the key.\n   */\n  virtual const Object* getData(const std::string& key) const PURE;\n};\n\n/**\n * Typed metadata should implement this factory and register via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass TypedMetadataFactory : public UntypedFactory {\npublic:\n  ~TypedMetadataFactory() override = default;\n\n  /**\n   * Convert the google.protobuf.Struct into an instance of TypedMetadata::Object.\n   * It should throw an EnvoyException in case the conversion can't be completed.\n   * @param data config data stored as a protobuf struct.\n   * @return a derived class object pointer of TypedMetadata.\n   * @throw EnvoyException if the parsing can't be done.\n   */\n  virtual std::unique_ptr<const TypedMetadata::Object>\n  parse(const ProtobufWkt::Struct& data) const PURE;\n\n  std::string category() const override { return \"envoy.typed_metadata\"; }\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/event/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"deferred_deletable\",\n    hdrs = [\"deferred_deletable.h\"],\n)\n\nenvoy_cc_library(\n    name = \"dispatcher_interface\",\n    hdrs = [\"dispatcher.h\"],\n    deps = [\n        \":deferred_deletable\",\n        \":file_event_interface\",\n        \":schedulable_cb_interface\",\n        \":signal_interface\",\n        \"//include/envoy/common:scope_tracker_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/filesystem:watcher_interface\",\n        \"//include/envoy/network:connection_handler_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"file_event_interface\",\n    hdrs = [\"file_event.h\"],\n)\n\nenvoy_cc_library(\n    name = \"range_timer_interface\",\n    hdrs = [\"range_timer.h\"],\n    deps = [\n        \":timer_interface\",\n        \"//include/envoy/common:time_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"schedulable_cb_interface\",\n    hdrs = [\"schedulable_cb.h\"],\n)\n\nenvoy_cc_library(\n    name = \"signal_interface\",\n    hdrs = [\"signal.h\"],\n)\n\nenvoy_cc_library(\n    name = \"timer_interface\",\n    hdrs = [\"timer.h\"],\n    deps = [\n        \":schedulable_cb_interface\",\n        \"//include/envoy/common:time_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/event/deferred_deletable.h",
    "content": "#pragma once\n\n#include <memory>\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * If an object derives from this class, it can be passed to the dispatcher who guarantees to delete\n * it in a future event loop cycle. This allows clear ownership with unique_ptr while not having\n * to worry about stack unwind issues during event processing.\n */\nclass DeferredDeletable {\npublic:\n  virtual ~DeferredDeletable() = default;\n};\n\nusing DeferredDeletablePtr = std::unique_ptr<DeferredDeletable>;\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/event/dispatcher.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/scope_tracker.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/file_event.h\"\n#include \"envoy/event/schedulable_cb.h\"\n#include \"envoy/event/signal.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/filesystem/watcher.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/listener.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stream_info/stream_info.h\"\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * All dispatcher stats. @see stats_macros.h\n */\n#define ALL_DISPATCHER_STATS(HISTOGRAM)                                                            \\\n  HISTOGRAM(loop_duration_us, Microseconds)                                                        \\\n  HISTOGRAM(poll_delay_us, Microseconds)\n\n/**\n * Struct definition for all dispatcher stats. @see stats_macros.h\n */\nstruct DispatcherStats {\n  ALL_DISPATCHER_STATS(GENERATE_HISTOGRAM_STRUCT)\n};\n\nusing DispatcherStatsPtr = std::unique_ptr<DispatcherStats>;\n\n/**\n * Callback invoked when a dispatcher post() runs.\n */\nusing PostCb = std::function<void()>;\n\nusing PostCbSharedPtr = std::shared_ptr<PostCb>;\n\n/**\n * Abstract event dispatching loop.\n */\nclass Dispatcher {\npublic:\n  virtual ~Dispatcher() = default;\n\n  /**\n   * Returns the name that identifies this dispatcher, such as \"worker_2\" or \"main_thread\".\n   * @return const std::string& the name that identifies this dispatcher.\n   */\n  virtual const std::string& name() PURE;\n\n  /**\n   * Returns a time-source to use with this dispatcher.\n   */\n  virtual TimeSource& timeSource() PURE;\n\n  /**\n   * Initializes stats for this dispatcher. Note that this can't generally be done at construction\n   * time, since the main and worker thread dispatchers are constructed before\n   * ThreadLocalStoreImpl::initializeThreading.\n   * @param scope the scope to contain the new per-dispatcher stats created here.\n   * @param prefix the stats prefix to identify this dispatcher. If empty, the dispatcher will be\n   *               identified by its name.\n   */\n  virtual void initializeStats(Stats::Scope& scope,\n                               const absl::optional<std::string>& prefix = absl::nullopt) PURE;\n\n  /**\n   * Clears any items in the deferred deletion queue.\n   */\n  virtual void clearDeferredDeleteList() PURE;\n\n  /**\n   * Wraps an already-accepted socket in an instance of Envoy's server Network::Connection.\n   * @param socket supplies an open file descriptor and connection metadata to use for the\n   *        connection. Takes ownership of the socket.\n   * @param transport_socket supplies a transport socket to be used by the connection.\n   * @param stream_info info object for the server connection\n   * @return Network::ConnectionPtr a server connection that is owned by the caller.\n   */\n  virtual Network::ConnectionPtr\n  createServerConnection(Network::ConnectionSocketPtr&& socket,\n                         Network::TransportSocketPtr&& transport_socket,\n                         StreamInfo::StreamInfo& stream_info) PURE;\n\n  /**\n   * Creates an instance of Envoy's Network::ClientConnection. Does NOT initiate the connection;\n   * the caller must then call connect() on the returned Network::ClientConnection.\n   * @param address supplies the address to connect to.\n   * @param source_address supplies an address to bind to or nullptr if no bind is necessary.\n   * @param transport_socket supplies a transport socket to be used by the connection.\n   * @param options the socket options to be set on the underlying socket before anything is sent\n   *        on the socket.\n   * @return Network::ClientConnectionPtr a client connection that is owned by the caller.\n   */\n  virtual Network::ClientConnectionPtr\n  createClientConnection(Network::Address::InstanceConstSharedPtr address,\n                         Network::Address::InstanceConstSharedPtr source_address,\n                         Network::TransportSocketPtr&& transport_socket,\n                         const Network::ConnectionSocket::OptionsSharedPtr& options) PURE;\n\n  /**\n   * Creates an async DNS resolver. The resolver should only be used on the thread that runs this\n   * dispatcher.\n   * @param resolvers supplies the addresses of DNS resolvers that this resolver should use. If left\n   * empty, it will not use any specific resolvers, but use defaults (/etc/resolv.conf)\n   * @param use_tcp_for_dns_lookups if set to true, tcp will be used to perform dns lookups.\n   * Otherwise, udp is used.\n   * @return Network::DnsResolverSharedPtr that is owned by the caller.\n   */\n  virtual Network::DnsResolverSharedPtr\n  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n                    bool use_tcp_for_dns_lookups) PURE;\n\n  /**\n   * Creates a file event that will signal when a file is readable or writable. On UNIX systems this\n   * can be used for any file like interface (files, sockets, etc.).\n   * @param fd supplies the fd to watch.\n   * @param cb supplies the callback to fire when the file is ready.\n   * @param trigger specifies whether to edge or level trigger.\n   * @param events supplies a logical OR of FileReadyType events that the file event should\n   *               initially listen on.\n   */\n  virtual FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger,\n                                       uint32_t events) PURE;\n\n  /**\n   * @return Filesystem::WatcherPtr a filesystem watcher owned by the caller.\n   */\n  virtual Filesystem::WatcherPtr createFilesystemWatcher() PURE;\n\n  /**\n   * Creates a listener on a specific port.\n   * @param socket supplies the socket to listen on.\n   * @param cb supplies the callbacks to invoke for listener events.\n   * @param bind_to_port controls whether the listener binds to a transport port or not.\n   * @param backlog_size controls listener pending connections backlog\n   * @return Network::ListenerPtr a new listener that is owned by the caller.\n   */\n  virtual Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket,\n                                              Network::TcpListenerCallbacks& cb, bool bind_to_port,\n                                              uint32_t backlog_size) PURE;\n\n  /**\n   * Creates a logical udp listener on a specific port.\n   * @param socket supplies the socket to listen on.\n   * @param cb supplies the udp listener callbacks to invoke for listener events.\n   * @return Network::ListenerPtr a new listener that is owned by the caller.\n   */\n  virtual Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket,\n                                                    Network::UdpListenerCallbacks& cb) PURE;\n  /**\n   * Allocates a timer. @see Timer for docs on how to use the timer.\n   * @param cb supplies the callback to invoke when the timer fires.\n   */\n  virtual Event::TimerPtr createTimer(TimerCb cb) PURE;\n\n  /**\n   * Allocates a schedulable callback. @see SchedulableCallback for docs on how to use the wrapped\n   * callback.\n   * @param cb supplies the callback to invoke when the SchedulableCallback is triggered on the\n   * event loop.\n   */\n  virtual Event::SchedulableCallbackPtr createSchedulableCallback(std::function<void()> cb) PURE;\n\n  /**\n   * Submits an item for deferred delete. @see DeferredDeletable.\n   */\n  virtual void deferredDelete(DeferredDeletablePtr&& to_delete) PURE;\n\n  /**\n   * Exits the event loop.\n   */\n  virtual void exit() PURE;\n\n  /**\n   * Listens for a signal event. Only a single dispatcher in the process can listen for signals.\n   * If more than one dispatcher calls this routine in the process the behavior is undefined.\n   *\n   * @param signal_num supplies the signal to listen on.\n   * @param cb supplies the callback to invoke when the signal fires.\n   * @return SignalEventPtr a signal event that is owned by the caller.\n   */\n  virtual SignalEventPtr listenForSignal(int signal_num, SignalCb cb) PURE;\n\n  /**\n   * Posts a functor to the dispatcher. This is safe cross thread. The functor runs in the context\n   * of the dispatcher event loop which may be on a different thread than the caller.\n   */\n  virtual void post(PostCb callback) PURE;\n\n  /**\n   * Runs the event loop. This will not return until exit() is called either from within a callback\n   * or from a different thread.\n   * @param type specifies whether to run in blocking mode (run() will not return until exit() is\n   *              called) or non-blocking mode where only active events will be executed and then\n   *              run() will return.\n   */\n  enum class RunType {\n    Block,       // Runs the event-loop until there are no pending events.\n    NonBlock,    // Checks for any pending events to activate, executes them,\n                 // then exits. Exits immediately if there are no pending or\n                 // active events.\n    RunUntilExit // Runs the event-loop until loopExit() is called, blocking\n                 // until there are pending or active events.\n  };\n  virtual void run(RunType type) PURE;\n\n  /**\n   * Returns a factory which connections may use for watermark buffer creation.\n   * @return the watermark buffer factory for this dispatcher.\n   */\n  virtual Buffer::WatermarkFactory& getWatermarkFactory() PURE;\n\n  /**\n   * Sets a tracked object, which is currently operating in this Dispatcher.\n   * This should be cleared with another call to setTrackedObject() when the object is done doing\n   * work. Calling setTrackedObject(nullptr) results in no object being tracked.\n   *\n   * This is optimized for performance, to avoid allocation where we do scoped object tracking.\n   *\n   * @return The previously tracked object or nullptr if there was none.\n   */\n  virtual const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) PURE;\n\n  /**\n   * Validates that an operation is thread-safe with respect to this dispatcher; i.e. that the\n   * current thread of execution is on the same thread upon which the dispatcher loop is running.\n   */\n  virtual bool isThreadSafe() const PURE;\n\n  /**\n   * Returns a recently cached MonotonicTime value.\n   */\n  virtual MonotonicTime approximateMonotonicTime() const PURE;\n\n  /**\n   * Updates approximate monotonic time to current value.\n   */\n  virtual void updateApproximateMonotonicTime() PURE;\n};\n\nusing DispatcherPtr = std::unique_ptr<Dispatcher>;\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/event/file_event.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nstruct FileReadyType {\n  // File is ready for reading.\n  static const uint32_t Read = 0x1;\n  // File is ready for writing.\n  static const uint32_t Write = 0x2;\n  // File has been remote closed.\n  static const uint32_t Closed = 0x4;\n};\n\nenum class FileTriggerType { Level, Edge };\n\nstatic constexpr FileTriggerType PlatformDefaultTriggerType\n#ifdef WIN32\n    // Libevent only supports Level trigger on Windows.\n    {FileTriggerType::Level};\n#else\n    {FileTriggerType::Edge};\n#endif\n\n/**\n * Callback invoked when a FileEvent is ready for reading or writing.\n */\nusing FileReadyCb = std::function<void(uint32_t events)>;\n\n/**\n * Wrapper for file based (read/write) event notifications.\n */\nclass FileEvent {\npublic:\n  virtual ~FileEvent() = default;\n\n  /**\n   * Activate the file event explicitly for a set of events. Should be a logical OR of FileReadyType\n   * events. This method \"injects\" the event (and fires callbacks) regardless of whether the event\n   * is actually ready on the underlying file.\n   */\n  virtual void activate(uint32_t events) PURE;\n\n  /**\n   * Enable the file event explicitly for a set of events. Should be a logical OR of FileReadyType\n   * events. As opposed to activate(), this routine causes the file event to listen for the\n   * registered events and fire callbacks when they are active.\n   */\n  virtual void setEnabled(uint32_t events) PURE;\n};\n\nusing FileEventPtr = std::unique_ptr<FileEvent>;\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/event/range_timer.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/timer.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * An abstract event timer that can be scheduled for a timeout within a range. The actual timeout\n * used is left up to individual implementations.\n */\nclass RangeTimer {\npublic:\n  virtual ~RangeTimer() = default;\n\n  /**\n   * Disable a pending timeout without destroying the underlying timer.\n   */\n  virtual void disableTimer() PURE;\n\n  /**\n   * Enable a pending timeout within the given range. If a timeout is already pending, it will be\n   * reset to the new timeout.\n   *\n   * @param min_ms supplies the minimum duration of the alarm in milliseconds.\n   * @param max_ms supplies the maximum duration of the alarm in milliseconds.\n   * @param object supplies an optional scope for the duration of the alarm.\n   */\n  virtual void enableTimer(std::chrono::milliseconds min_ms, std::chrono::milliseconds max_ms,\n                           const ScopeTrackedObject* object = nullptr) PURE;\n\n  /**\n   * Return whether the timer is currently armed.\n   */\n  virtual bool enabled() PURE;\n};\n\nusing RangeTimerPtr = std::unique_ptr<RangeTimer>;\n\n} // namespace Event\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/event/schedulable_cb.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Callback wrapper that allows direct scheduling of callbacks in the event loop.\n */\nclass SchedulableCallback {\npublic:\n  virtual ~SchedulableCallback() = default;\n\n  /**\n   * Schedule the callback so it runs in the current iteration of the event loop after all events\n   * scheduled in the current event loop have had a chance to execute.\n   */\n  virtual void scheduleCallbackCurrentIteration() PURE;\n\n  /**\n   * Schedule the callback so it runs in the next iteration of the event loop. There are no\n   * ordering guarantees for callbacks scheduled for the next iteration, not even among\n   * next-iteration callbacks.\n   */\n  virtual void scheduleCallbackNextIteration() PURE;\n\n  /**\n   * Cancel pending execution of the callback.\n   */\n  virtual void cancel() PURE;\n\n  /**\n   * Return true whether the SchedulableCallback is scheduled for execution.\n   */\n  virtual bool enabled() PURE;\n};\n\nusing SchedulableCallbackPtr = std::unique_ptr<SchedulableCallback>;\n\n/**\n * SchedulableCallback factory.\n */\nclass CallbackScheduler {\npublic:\n  virtual ~CallbackScheduler() = default;\n\n  /**\n   * Create a schedulable callback.\n   */\n  virtual SchedulableCallbackPtr createSchedulableCallback(const std::function<void()>& cb) PURE;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/event/signal.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Callback invoked when a signal event fires.\n */\nusing SignalCb = std::function<void()>;\n\n/**\n * An abstract signal event. Free the event to stop listening on the signal.\n */\nclass SignalEvent {\npublic:\n  virtual ~SignalEvent() = default;\n};\n\nusing SignalEventPtr = std::unique_ptr<SignalEvent>;\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/event/timer.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/schedulable_cb.h\"\n\nnamespace Envoy {\n\nclass ScopeTrackedObject;\n\nnamespace Event {\n\nclass Dispatcher;\n\n/**\n * Callback invoked when a timer event fires.\n */\nusing TimerCb = std::function<void()>;\n\n/**\n * An abstract timer event. Free the timer to unregister any pending timeouts. Must be freed before\n * the dispatcher is torn down.\n */\nclass Timer {\npublic:\n  virtual ~Timer() = default;\n\n  /**\n   * Disable a pending timeout without destroying the underlying timer.\n   */\n  virtual void disableTimer() PURE;\n\n  /**\n   * Enable a pending timeout. If a timeout is already pending, it will be reset to the new timeout.\n   *\n   * @param ms supplies the duration of the alarm in milliseconds.\n   * @param object supplies an optional scope for the duration of the alarm.\n   */\n  virtual void enableTimer(std::chrono::milliseconds ms,\n                           const ScopeTrackedObject* object = nullptr) PURE;\n\n  /**\n   * Enable a pending high resolution timeout. If a timeout is already pending, it will be reset to\n   * the new timeout.\n   *\n   * @param us supplies the duration of the alarm in microseconds.\n   * @param object supplies an optional scope for the duration of the alarm.\n   */\n  virtual void enableHRTimer(std::chrono::microseconds us,\n                             const ScopeTrackedObject* object = nullptr) PURE;\n  /**\n   * Return whether the timer is currently armed.\n   */\n  virtual bool enabled() PURE;\n};\n\nusing TimerPtr = std::unique_ptr<Timer>;\n\nclass Scheduler {\npublic:\n  virtual ~Scheduler() = default;\n\n  /**\n   * Creates a timer.\n   */\n  virtual TimerPtr createTimer(const TimerCb& cb, Dispatcher& dispatcher) PURE;\n};\n\nusing SchedulerPtr = std::unique_ptr<Scheduler>;\n\n/**\n * Interface providing a mechanism to measure time and set timers that run callbacks\n * when the timer fires.\n */\nclass TimeSystem : public TimeSource {\npublic:\n  ~TimeSystem() override = default;\n\n  using Duration = MonotonicTime::duration;\n  using Nanoseconds = std::chrono::nanoseconds;\n  using Microseconds = std::chrono::microseconds;\n  using Milliseconds = std::chrono::milliseconds;\n  using Seconds = std::chrono::seconds;\n\n  /**\n   * Creates a timer factory. This indirection enables thread-local timer-queue management,\n   * so servers can have a separate timer-factory in each thread.\n   */\n  virtual SchedulerPtr createScheduler(Scheduler& base_scheduler,\n                                       CallbackScheduler& cb_scheduler) PURE;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/filesystem/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"filesystem_interface\",\n    hdrs = [\"filesystem.h\"],\n    deps = [\n        \"//include/envoy/api:io_error_interface\",\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"watcher_interface\",\n    hdrs = [\"watcher.h\"],\n)\n"
  },
  {
    "path": "include/envoy/filesystem/filesystem.h",
    "content": "#pragma once\n\n#include <bitset>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/io_error.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nusing FlagSet = std::bitset<4>;\n\n/**\n * Abstraction for a basic file on disk.\n */\nclass File {\npublic:\n  virtual ~File() = default;\n\n  enum Operation {\n    Read,\n    Write,\n    Create,\n    Append,\n  };\n\n  /**\n   * Open the file with Flag\n   * The file will be closed when this object is destructed\n   *\n   * @return bool whether the open succeeded\n   */\n  virtual Api::IoCallBoolResult open(FlagSet flags) PURE;\n\n  /**\n   * Write the buffer to the file. The file must be explicitly opened before writing.\n   *\n   * @return ssize_t number of bytes written, or -1 for failure\n   */\n  virtual Api::IoCallSizeResult write(absl::string_view buffer) PURE;\n\n  /**\n   * Close the file.\n   *\n   * @return bool whether the close succeeded\n   */\n  virtual Api::IoCallBoolResult close() PURE;\n\n  /**\n   * @return bool is the file open\n   */\n  virtual bool isOpen() const PURE;\n\n  /**\n   * @return string the file path\n   */\n  virtual std::string path() const PURE;\n};\n\nusing FilePtr = std::unique_ptr<File>;\n\n/**\n * Contains the result of splitting the file name and its parent directory from\n * a given file path.\n */\nstruct PathSplitResult {\n  absl::string_view directory_;\n  absl::string_view file_;\n};\n\n/**\n * Abstraction for some basic filesystem operations\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  /**\n   *  @param path The path of the File\n   *  @return a FilePtr. The file is not opened.\n   */\n  virtual FilePtr createFile(const std::string& path) PURE;\n\n  /**\n   * @return bool whether a file exists on disk and can be opened for read.\n   */\n  virtual bool fileExists(const std::string& path) PURE;\n\n  /**\n   * @return bool whether a directory exists on disk and can be opened for read.\n   */\n  virtual bool directoryExists(const std::string& path) PURE;\n\n  /**\n   * @return ssize_t the size in bytes of the specified file, or -1 if the file size\n   *                 cannot be determined for any reason, including without limitation\n   *                 the non-existence of the file.\n   */\n  virtual ssize_t fileSize(const std::string& path) PURE;\n\n  /**\n   * @return full file content as a string.\n   * @throw EnvoyException if the file cannot be read.\n   * Be aware, this is not most highly performing file reading method.\n   */\n  virtual std::string fileReadToEnd(const std::string& path) PURE;\n\n  /**\n   * @path file path to split\n   * @return PathSplitResult containing the parent directory of the input path and the file name\n   * @note will throw an exception if path does not contain any path separator character\n   */\n  virtual PathSplitResult splitPathFromFilename(absl::string_view path) PURE;\n\n  /**\n   * Determine if the path is on a list of paths Envoy will refuse to access. This\n   * is a basic sanity check for users, denying some clearly bad paths. Paths\n   * may still be problematic (e.g. indirectly leading to /dev/mem) even if this\n   * returns false, it is up to the user to validate that supplied paths are\n   * valid.\n   * @param path some filesystem path.\n   * @return is the path on the deny list?\n   */\n  virtual bool illegalPath(const std::string& path) PURE;\n};\n\nusing InstancePtr = std::unique_ptr<Instance>;\n\nenum class FileType { Regular, Directory, Other };\n\nstruct DirectoryEntry {\n  // name_ is the name of the file in the directory, not including the directory path itself\n  // For example, if we have directory a/b containing file c, name_ will be c\n  std::string name_;\n\n  // Note that if the file represented by name_ is a symlink, type_ will be the file type of the\n  // target. For example, if name_ is a symlink to a directory, its file type will be Directory.\n  FileType type_;\n\n  bool operator==(const DirectoryEntry& rhs) const {\n    return name_ == rhs.name_ && type_ == rhs.type_;\n  }\n};\n\nclass DirectoryIteratorImpl;\nclass DirectoryIterator {\npublic:\n  DirectoryIterator() : entry_({\"\", FileType::Other}) {}\n  virtual ~DirectoryIterator() = default;\n\n  const DirectoryEntry& operator*() const { return entry_; }\n\n  bool operator!=(const DirectoryIterator& rhs) const { return !(entry_ == *rhs); }\n\n  virtual DirectoryIteratorImpl& operator++() PURE;\n\nprotected:\n  DirectoryEntry entry_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/filesystem/watcher.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\n/**\n * Abstraction for a file watcher.\n */\nclass Watcher {\npublic:\n  using OnChangedCb = std::function<void(uint32_t events)>;\n\n  struct Events {\n    static const uint32_t MovedTo = 0x1;\n    static const uint32_t Modified = 0x2;\n  };\n\n  virtual ~Watcher() = default;\n\n  /**\n   * Add a file watch.\n   * @param path supplies the path to watch.\n   *        If path is a file, callback is called on events for the given file.\n   *        If path is a directory (ends with \"/\"), callback is called on events\n   *        for the given directory.\n   * @param events supplies the events to watch.\n   * @param cb supplies the callback to invoke when a change occurs.\n   */\n  virtual void addWatch(absl::string_view path, uint32_t events, OnChangedCb cb) PURE;\n};\n\nusing WatcherPtr = std::unique_ptr<Watcher>;\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/filter/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"filter_config_provider_interface\",\n    hdrs = [\"filter_config_provider.h\"],\n    deps = [\n        \"//include/envoy/config:extension_config_provider_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/filter/http/filter_config_provider.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/extension_config_provider.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Filter {\nnamespace Http {\n\nusing FilterConfigProvider =\n    Envoy::Config::ExtensionConfigProvider<Server::Configuration::NamedHttpFilterConfigFactory,\n                                           Envoy::Http::FilterFactoryCb>;\nusing FilterConfigProviderPtr = std::unique_ptr<FilterConfigProvider>;\n\n/**\n * The FilterConfigProviderManager exposes the ability to get an FilterConfigProvider\n * for both static and dynamic filter config providers.\n */\nclass FilterConfigProviderManager {\npublic:\n  virtual ~FilterConfigProviderManager() = default;\n\n  /**\n   * Get an FilterConfigProviderPtr for a filter config. The config providers may share\n   * the underlying subscriptions to the filter config discovery service.\n   * @param config_source supplies the configuration source for the filter configs.\n   * @param filter_config_name the filter config resource name.\n   * @param require_type_urls enforces that the typed filter config must have a certain type URL.\n   * @param factory_context is the context to use for the filter config provider.\n   * @param stat_prefix supplies the stat_prefix to use for the provider stats.\n   * @param apply_without_warming initializes immediately with the default config and starts the\n   * subscription.\n   */\n  virtual FilterConfigProviderPtr createDynamicFilterConfigProvider(\n      const envoy::config::core::v3::ConfigSource& config_source,\n      const std::string& filter_config_name, const std::set<std::string>& require_type_urls,\n      Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix,\n      bool apply_without_warming) PURE;\n\n  /**\n   * Get an FilterConfigProviderPtr for a statically inlined filter config.\n   * @param config is a fully resolved filter instantiation factory.\n   * @param filter_config_name is the name of the filter configuration resource.\n   */\n  virtual FilterConfigProviderPtr\n  createStaticFilterConfigProvider(const Envoy::Http::FilterFactoryCb& config,\n                                   const std::string& filter_config_name) PURE;\n};\n\n} // namespace Http\n} // namespace Filter\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/formatter/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"substitution_formatter_interface\",\n    hdrs = [\"substitution_formatter.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/formatter/substitution_formatter.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\nnamespace Envoy {\nnamespace Formatter {\n\n/**\n * Interface for substitution formatter.\n * Formatters provide a complete substitution output line for the given headers/trailers/stream.\n */\nclass Formatter {\npublic:\n  virtual ~Formatter() = default;\n\n  /**\n   * Return a formatted substitution line.\n   * @param request_headers supplies the request headers.\n   * @param response_headers supplies the response headers.\n   * @param response_trailers supplies the response trailers.\n   * @param stream_info supplies the stream info.\n   * @param local_reply_body supplies the local reply body.\n   * @return std::string string containing the complete formatted substitution line.\n   */\n  virtual std::string format(const Http::RequestHeaderMap& request_headers,\n                             const Http::ResponseHeaderMap& response_headers,\n                             const Http::ResponseTrailerMap& response_trailers,\n                             const StreamInfo::StreamInfo& stream_info,\n                             absl::string_view local_reply_body) const PURE;\n};\n\nusing FormatterPtr = std::unique_ptr<Formatter>;\n\n/**\n * Interface for substitution provider.\n * FormatterProviders extract information from the given headers/trailers/stream.\n */\nclass FormatterProvider {\npublic:\n  virtual ~FormatterProvider() = default;\n\n  /**\n   * Extract a value from the provided headers/trailers/stream.\n   * @param request_headers supplies the request headers.\n   * @param response_headers supplies the response headers.\n   * @param response_trailers supplies the response trailers.\n   * @param stream_info supplies the stream info.\n   * @param local_reply_body supplies the local reply body.\n   * @return absl::optional<std::string> optional string containing a single value extracted from\n   * the given headers/trailers/stream.\n   */\n  virtual absl::optional<std::string> format(const Http::RequestHeaderMap& request_headers,\n                                             const Http::ResponseHeaderMap& response_headers,\n                                             const Http::ResponseTrailerMap& response_trailers,\n                                             const StreamInfo::StreamInfo& stream_info,\n                                             absl::string_view local_reply_body) const PURE;\n  /**\n   * Extract a value from the provided headers/trailers/stream, preserving the value's type.\n   * @param request_headers supplies the request headers.\n   * @param response_headers supplies the response headers.\n   * @param response_trailers supplies the response trailers.\n   * @param stream_info supplies the stream info.\n   * @param local_reply_body supplies the local reply body.\n   * @return ProtobufWkt::Value containing a single value extracted from the given\n   *         headers/trailers/stream.\n   */\n  virtual ProtobufWkt::Value formatValue(const Http::RequestHeaderMap& request_headers,\n                                         const Http::ResponseHeaderMap& response_headers,\n                                         const Http::ResponseTrailerMap& response_trailers,\n                                         const StreamInfo::StreamInfo& stream_info,\n                                         absl::string_view local_reply_body) const PURE;\n};\n\nusing FormatterProviderPtr = std::unique_ptr<FormatterProvider>;\n\n} // namespace Formatter\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/grpc/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"async_client_interface\",\n    hdrs = [\"async_client.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":status\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"async_client_manager_interface\",\n    hdrs = [\"async_client_manager.h\"],\n    deps = [\n        \":async_client_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_interface\",\n    hdrs = [\"context.h\"],\n    deps = [\n        \"//include/envoy/http:context_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"google_grpc_creds_interface\",\n    hdrs = [\"google_grpc_creds.h\"],\n    external_deps = [\n        \"grpc\",\n    ],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"status\",\n    hdrs = [\"status.h\"],\n)\n"
  },
  {
    "path": "include/envoy/grpc/async_client.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\n/**\n * An in-flight gRPC unary RPC.\n */\nclass AsyncRequest {\npublic:\n  virtual ~AsyncRequest() = default;\n\n  /**\n   * Signals that the request should be cancelled. No further callbacks will be invoked.\n   */\n  virtual void cancel() PURE;\n};\n\n/**\n * An in-flight gRPC stream.\n */\nclass RawAsyncStream {\npublic:\n  virtual ~RawAsyncStream() = default;\n\n  /**\n   * Send request message to the stream.\n   * @param request serialized message.\n   * @param end_stream close the stream locally. No further methods may be invoked on the stream\n   *                   object, but callbacks may still be received until the stream is closed\n   *                   remotely.\n   */\n  virtual void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) PURE;\n\n  /**\n   * Close the stream locally and send an empty DATA frame to the remote. No further methods may be\n   * invoked on the stream object, but callbacks may still be received until the stream is closed\n   * remotely.\n   */\n  virtual void closeStream() PURE;\n\n  /**\n   * Close the stream locally and remotely (as needed). No further methods may be invoked on the\n   * stream object and no further callbacks will be invoked.\n   */\n  virtual void resetStream() PURE;\n\n  /***\n   * @returns if the stream has enough buffered outbound data to be over the configured buffer\n   * limits\n   */\n  virtual bool isAboveWriteBufferHighWatermark() const PURE;\n};\n\nclass RawAsyncRequestCallbacks {\npublic:\n  virtual ~RawAsyncRequestCallbacks() = default;\n\n  /**\n   * Called when populating the headers to send with initial metadata.\n   * @param metadata initial metadata reference.\n   */\n  virtual void onCreateInitialMetadata(Http::RequestHeaderMap& metadata) PURE;\n\n  /**\n   * Called when the async gRPC request succeeds. No further callbacks will be invoked.\n   * @param response the gRPC response bytes.\n   * @param span a tracing span to fill with extra tags.\n   */\n  virtual void onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span& span) PURE;\n\n  /**\n   * Called when the async gRPC request fails. No further callbacks will be invoked.\n   * @param status the gRPC status.\n   * @param message the gRPC status message or empty string if not present.\n   * @param span a tracing span to fill with extra tags.\n   */\n  virtual void onFailure(Status::GrpcStatus status, const std::string& message,\n                         Tracing::Span& span) PURE;\n};\n\n/**\n * Notifies caller of async gRPC stream status.\n * Note the gRPC stream is full-duplex, even if the local to remote stream has been ended by\n * AsyncStream.close(), AsyncStreamCallbacks can continue to receive events until the remote\n * to local stream is closed (onRemoteClose), and vice versa. Once the stream is closed remotely, no\n * further callbacks will be invoked.\n */\nclass RawAsyncStreamCallbacks {\npublic:\n  virtual ~RawAsyncStreamCallbacks() = default;\n\n  /**\n   * Called when populating the headers to send with initial metadata.\n   * @param metadata initial metadata reference.\n   */\n  virtual void onCreateInitialMetadata(Http::RequestHeaderMap& metadata) PURE;\n\n  /**\n   * Called when initial metadata is received. This will be called with empty metadata on a\n   * trailers-only response, followed by onReceiveTrailingMetadata() with the trailing metadata.\n   * @param metadata initial metadata reference.\n   */\n  virtual void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) PURE;\n\n  /**\n   * Called when an async gRPC message is received.\n   * @param response the gRPC message.\n   * @return bool which is true if the message well formed and false otherwise which will cause\n              the stream to shutdown with an INTERNAL error.\n   */\n  virtual bool onReceiveMessageRaw(Buffer::InstancePtr&& response) PURE;\n\n  /**\n   * Called when trailing metadata is received. This will also be called on non-Ok grpc-status\n   * stream termination.\n   * @param metadata trailing metadata reference.\n   */\n  virtual void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) PURE;\n\n  /**\n   * Called when the remote closes or an error occurs on the gRPC stream. The stream is\n   * considered remotely closed after this invocation and no further callbacks will be\n   * invoked. In addition, no further stream operations are permitted.\n   * @param status the gRPC status.\n   * @param message the gRPC status message or empty string if not present.\n   */\n  virtual void onRemoteClose(Status::GrpcStatus status, const std::string& message) PURE;\n};\n\n/**\n * Supports sending gRPC requests and receiving responses asynchronously. This can be used to\n * implement either plain gRPC or streaming gRPC calls.\n */\nclass RawAsyncClient {\npublic:\n  virtual ~RawAsyncClient() = default;\n\n  /**\n   * Start a gRPC unary RPC asynchronously.\n   * @param service_full_name full name of the service (i.e. service_method.service()->full_name()).\n   * @param method_name name of the method (i.e. service_method.name()).\n   * @param request serialized message.\n   * @param callbacks the callbacks to be notified of RPC status.\n   * @param parent_span the current parent tracing context.\n   * @param options the data struct to control the request sending.\n   * @return a request handle or nullptr if no request could be started. NOTE: In this case\n   *         onFailure() has already been called inline. The client owns the request and the\n   *         handle should just be used to cancel.\n   */\n  virtual AsyncRequest* sendRaw(absl::string_view service_full_name, absl::string_view method_name,\n                                Buffer::InstancePtr&& request, RawAsyncRequestCallbacks& callbacks,\n                                Tracing::Span& parent_span,\n                                const Http::AsyncClient::RequestOptions& options) PURE;\n\n  /**\n   * Start a gRPC stream asynchronously.\n   * TODO(mattklein123): Determine if tracing should be added to streaming requests.\n   * @param service_full_name full name of the service (i.e. service_method.service()->full_name()).\n   * @param method_name name of the method (i.e. service_method.name()).\n   * @param callbacks the callbacks to be notified of stream status.\n   * @param options the data struct to control the stream.\n   * @return a stream handle or nullptr if no stream could be started. NOTE: In this case\n   *         onRemoteClose() has already been called inline. The client owns the stream and\n   *         the handle can be used to send more messages or finish the stream. It is expected that\n   *         closeStream() is invoked by the caller to notify the client that the stream resources\n   *         may be reclaimed.\n   */\n  virtual RawAsyncStream* startRaw(absl::string_view service_full_name,\n                                   absl::string_view method_name,\n                                   RawAsyncStreamCallbacks& callbacks,\n                                   const Http::AsyncClient::StreamOptions& options) PURE;\n};\n\nusing RawAsyncClientPtr = std::unique_ptr<RawAsyncClient>;\nusing RawAsyncClientSharedPtr = std::shared_ptr<RawAsyncClient>;\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/grpc/async_client_manager.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/stats/scope.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\n// Per-service factory for Grpc::RawAsyncClients. This factory is thread aware and will instantiate\n// with thread local state. Clients will use ThreadLocal::Instance::dispatcher() for event handling.\nclass AsyncClientFactory {\npublic:\n  virtual ~AsyncClientFactory() = default;\n\n  /**\n   * Create a gRPC::RawAsyncClient.\n   * @return RawAsyncClientPtr async client.\n   */\n  virtual RawAsyncClientPtr create() PURE;\n};\n\nusing AsyncClientFactoryPtr = std::unique_ptr<AsyncClientFactory>;\n\n// Singleton gRPC client manager. Grpc::AsyncClientManager can be used to create per-service\n// Grpc::AsyncClientFactory instances. All manufactured Grpc::AsyncClients must\n// be destroyed before the AsyncClientManager can be safely destructed.\nclass AsyncClientManager {\npublic:\n  virtual ~AsyncClientManager() = default;\n\n  /**\n   * Create a Grpc::AsyncClients factory for a service. Validation of the service is performed and\n   * will raise an exception on failure.\n   * @param grpc_service envoy::config::core::v3::GrpcService configuration.\n   * @param scope stats scope.\n   * @param skip_cluster_check if set to true skips checks for cluster presence and being statically\n   * configured.\n   * @return AsyncClientFactoryPtr factory for grpc_service.\n   * @throws EnvoyException when grpc_service validation fails.\n   */\n  virtual AsyncClientFactoryPtr\n  factoryForGrpcService(const envoy::config::core::v3::GrpcService& grpc_service,\n                        Stats::Scope& scope, bool skip_cluster_check) PURE;\n};\n\nusing AsyncClientManagerPtr = std::unique_ptr<AsyncClientManager>;\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/grpc/context.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nstruct StatNames;\n\n/**\n * Captures grpc-related structures with cardinality of one per server.\n */\nclass Context {\npublic:\n  virtual ~Context() = default;\n\n  enum class Protocol { Grpc, GrpcWeb };\n\n  struct RequestStatNames;\n\n  /**\n   * Parses out request grpc service-name and method from the path, returning a\n   * populated RequestStatNames if successful. See the implementation\n   * (source/common/grpc/common.h) for the definition of RequestStatNames. It is\n   * hidden in the implementation since it references StatName, which is defined\n   * only in the stats implementation.\n   *\n   * @param path the request path.\n   * @return the request names, expressed as StatName.\n   */\n  virtual absl::optional<RequestStatNames>\n  resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) PURE;\n\n  /**\n   * Charge a success/failure stat to a cluster/service/method.\n   * @param cluster supplies the target cluster.\n   * @param protocol supplies the downstream protocol in use.\n   * @param request_names supplies the request names.\n   * @param grpc_status supplies the gRPC status.\n   */\n  virtual void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol,\n                          const absl::optional<RequestStatNames>& request_names,\n                          const Http::HeaderEntry* grpc_status) PURE;\n\n  /**\n   * Charge a success/failure stat to a cluster/service/method.\n   * @param cluster supplies the target cluster.\n   * @param protocol supplies the downstream protocol in use.\n   * @param request_names supplies the request names.\n   * @param success supplies whether the call succeeded.\n   */\n  virtual void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol,\n                          const absl::optional<RequestStatNames>& request_names, bool success) PURE;\n\n  /**\n   * Charge a success/failure stat to a cluster/service/method.\n   * @param cluster supplies the target cluster.\n   * @param request_names supplies the request names.\n   * @param success supplies whether the call succeeded.\n   */\n  virtual void chargeStat(const Upstream::ClusterInfo& cluster,\n                          const absl::optional<RequestStatNames>& request_names, bool success) PURE;\n\n  /**\n   * Charge a request message stat to a cluster/service/method.\n   * @param cluster supplies the target cluster.\n   * @param request_names supplies the request names.\n   * @param amount supplies the number of the request messages.\n   */\n  virtual void chargeRequestMessageStat(const Upstream::ClusterInfo& cluster,\n                                        const absl::optional<RequestStatNames>& request_names,\n                                        uint64_t amount) PURE;\n\n  /**\n   * Charge a response message stat to a cluster/service/method.\n   * @param cluster supplies the target cluster.\n   * @param request_names supplies the request names.\n   * @param amount supplies the number of the response messages.\n   */\n  virtual void chargeResponseMessageStat(const Upstream::ClusterInfo& cluster,\n                                         const absl::optional<RequestStatNames>& request_names,\n                                         uint64_t amount) PURE;\n\n  /**\n   * Charge upstream stat to a cluster/service/method.\n   * @param cluster supplies the target cluster.\n   * @param request_names supplies the request names.\n   * @param duration supplies the duration of the upstream request.\n   */\n  virtual void chargeUpstreamStat(const Upstream::ClusterInfo& cluster,\n                                  const absl::optional<RequestStatNames>& request_names,\n                                  std::chrono::milliseconds duration) PURE;\n\n  /**\n   * @return a struct containing StatNames for gRPC stat tokens.\n   */\n  virtual StatNames& statNames() PURE;\n};\n\nusing ContextPtr = std::unique_ptr<Context>;\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/grpc/google_grpc_creds.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/typed_config.h\"\n\n#include \"grpcpp/grpcpp.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\n/**\n * Interface for all Google gRPC credentials factories.\n */\nclass GoogleGrpcCredentialsFactory : public Config::UntypedFactory {\npublic:\n  ~GoogleGrpcCredentialsFactory() override = default;\n\n  /**\n   * Get a ChannelCredentials to be used for authentication of a gRPC channel.\n   *\n   * GoogleGrpcCredentialsFactory should always return a ChannelCredentials. To use CallCredentials,\n   * the ChannelCredentials can be created by using a combination of CompositeChannelCredentials and\n   * CompositeCallCredentials to combine multiple credentials.\n   *\n   * @param grpc_service_config contains configuration options\n   * @param api reference to the Api object\n   * @return std::shared_ptr<grpc::ChannelCredentials> to be used to authenticate a Google gRPC\n   * channel.\n   */\n  virtual std::shared_ptr<grpc::ChannelCredentials>\n  getChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service_config,\n                        Api::Api& api) PURE;\n\n  std::string category() const override { return \"envoy.grpc_credentials\"; }\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/grpc/status.h",
    "content": "#pragma once\n\n#include <cstdint>\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass Status {\npublic:\n  using GrpcStatus = int64_t;\n\n  enum WellKnownGrpcStatus {\n    // The RPC completed successfully.\n    Ok = 0,\n    // The RPC was canceled.\n    Canceled = 1,\n    // Some unknown error occurred.\n    Unknown = 2,\n    // An argument to the RPC was invalid.\n    InvalidArgument = 3,\n    // The deadline for the RPC expired before the RPC completed.\n    DeadlineExceeded = 4,\n    // Some resource for the RPC was not found.\n    NotFound = 5,\n    // A resource the RPC attempted to create already exists.\n    AlreadyExists = 6,\n    // Permission was denied for the RPC.\n    PermissionDenied = 7,\n    // Some resource is exhausted, resulting in RPC failure.\n    ResourceExhausted = 8,\n    // Some precondition for the RPC failed.\n    FailedPrecondition = 9,\n    // The RPC was aborted.\n    Aborted = 10,\n    // Some operation was requested outside of a legal range.\n    OutOfRange = 11,\n    // The RPC requested was not implemented.\n    Unimplemented = 12,\n    // Some internal error occurred.\n    Internal = 13,\n    // The RPC endpoint is current unavailable.\n    Unavailable = 14,\n    // There was some data loss resulting in RPC failure.\n    DataLoss = 15,\n    // The RPC does not have required credentials for the RPC to succeed.\n    Unauthenticated = 16,\n\n    // Maximum value of valid status codes.\n    MaximumKnown = Unauthenticated,\n\n    // This is a non-GRPC error code, indicating the status code in gRPC headers\n    // was invalid.\n    InvalidCode = -1,\n  };\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"api_listener_interface\",\n    hdrs = [\"api_listener.h\"],\n    deps = [\":codec_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"async_client_interface\",\n    hdrs = [\"async_client.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":message_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codec_interface\",\n    hdrs = [\"codec.h\"],\n    deps = [\n        \":header_map_interface\",\n        \":metadata_interface\",\n        \":protocol_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/grpc:status\",\n        \"//include/envoy/network:address_interface\",\n        \"//source/common/http:status_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codes_interface\",\n    hdrs = [\"codes.h\"],\n    deps = [\"//include/envoy/stats:stats_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_interface\",\n    hdrs = [\"conn_pool.h\"],\n    deps = [\n        \":codec_interface\",\n        \"//include/envoy/common:conn_pool_interface\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/upstream:upstream_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_interface\",\n    hdrs = [\"context.h\"],\n    deps = [\n        \":codes_interface\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_interface\",\n    hdrs = [\"filter.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":codec_interface\",\n        \":header_map_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/common:scope_tracker_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/grpc:status\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/ssl:connection_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hash_policy_interface\",\n    hdrs = [\"hash_policy.h\"],\n    deps = [\n        \":header_map_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"header_map_interface\",\n    hdrs = [\"header_map.h\"],\n    external_deps = [\n        \"abseil_inlined_vector\",\n    ],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hash_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"message_interface\",\n    hdrs = [\"message.h\"],\n    deps = [\n        \":header_map_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protocol_interface\",\n    hdrs = [\"protocol.h\"],\n)\n\nenvoy_cc_library(\n    name = \"query_params_interface\",\n    hdrs = [\"query_params.h\"],\n)\n\nenvoy_cc_library(\n    name = \"metadata_interface\",\n    hdrs = [\"metadata_interface.h\"],\n    external_deps = [\"abseil_node_hash_map\"],\n)\n\nenvoy_cc_library(\n    name = \"request_id_extension_interface\",\n    hdrs = [\"request_id_extension.h\"],\n    deps = [\n        \":header_map_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/http/api_listener.h",
    "content": "#pragma once\n\n#include \"envoy/http/codec.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * ApiListener that allows consumers to interact with HTTP streams via API calls.\n */\n// TODO(junr03): this is a replica of the functions in ServerConnectionCallbacks. It would be nice\n// to not duplicate this interface layout.\nclass ApiListener {\npublic:\n  virtual ~ApiListener() = default;\n\n  /**\n   * Invoked when a new request stream is initiated by the remote.\n   * @param response_encoder supplies the encoder to use for creating the response. The request and\n   *                         response are backed by the same Stream object.\n   * @param is_internally_created indicates if this stream was originated by a\n   *   client, or was created by Envoy, by example as part of an internal redirect.\n   * @return RequestDecoder& supplies the decoder callbacks to fire into for stream\n   *   decoding events.\n   */\n  virtual RequestDecoder& newStream(ResponseEncoder& response_encoder,\n                                    bool is_internally_created = false) PURE;\n};\n\nusing ApiListenerPtr = std::unique_ptr<ApiListener>;\nusing ApiListenerOptRef = absl::optional<std::reference_wrapper<ApiListener>>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/async_client.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/message.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Supports sending an HTTP request message and receiving a response asynchronously.\n */\nclass AsyncClient {\npublic:\n  /**\n   * An in-flight HTTP request.\n   */\n  class Request {\n  public:\n    virtual ~Request() = default;\n\n    /**\n     * Signals that the request should be cancelled.\n     */\n    virtual void cancel() PURE;\n  };\n\n  /**\n   * Async Client failure reasons.\n   */\n  enum class FailureReason {\n    // The stream has been reset.\n    Reset\n  };\n\n  /**\n   * Notifies caller of async HTTP request status.\n   *\n   * To support a use case where a caller makes multiple requests in parallel,\n   * individual callback methods provide request context corresponding to that response.\n   */\n  class Callbacks {\n  public:\n    virtual ~Callbacks() = default;\n\n    /**\n     * Called when the async HTTP request succeeds.\n     * @param request  request handle.\n     *                 NOTE: request handle is passed for correlation purposes only, e.g.\n     *                 for client code to be able to exclude that handle from a list of\n     *                 requests in progress.\n     * @param response the HTTP response\n     */\n    virtual void onSuccess(const Request& request, ResponseMessagePtr&& response) PURE;\n\n    /**\n     * Called when the async HTTP request fails.\n     * @param request request handle.\n     *                NOTE: request handle is passed for correlation purposes only, e.g.\n     *                for client code to be able to exclude that handle from a list of\n     *                requests in progress.\n     * @param reason  failure reason\n     */\n    virtual void onFailure(const Request& request, FailureReason reason) PURE;\n\n    /**\n     * Called before finalizing upstream span when the request is complete or reset.\n     * @param span a tracing span to fill with extra tags.\n     * @param response_headers the response headers.\n     */\n    virtual void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span& span,\n                                              const Http::ResponseHeaderMap* response_headers) PURE;\n  };\n\n  /**\n   * Notifies caller of async HTTP stream status.\n   * Note the HTTP stream is full-duplex, even if the local to remote stream has been ended\n   * by Stream.sendHeaders/sendData with end_stream=true or sendTrailers,\n   * StreamCallbacks can continue to receive events until the remote to local stream is closed,\n   * and vice versa.\n   */\n  class StreamCallbacks {\n  public:\n    virtual ~StreamCallbacks() = default;\n\n    /**\n     * Called when all headers get received on the async HTTP stream.\n     * @param headers the headers received\n     * @param end_stream whether the response is header only\n     */\n    virtual void onHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) PURE;\n\n    /**\n     * Called when a data frame get received on the async HTTP stream.\n     * This can be invoked multiple times if the data get streamed.\n     * @param data the data received\n     * @param end_stream whether the data is the last data frame\n     */\n    virtual void onData(Buffer::Instance& data, bool end_stream) PURE;\n\n    /**\n     * Called when all trailers get received on the async HTTP stream.\n     * @param trailers the trailers received.\n     */\n    virtual void onTrailers(ResponseTrailerMapPtr&& trailers) PURE;\n\n    /**\n     * Called when both the local and remote have gracefully closed the stream.\n     * Useful for asymmetric cases where end_stream may not be bidirectionally observable.\n     * Note this is NOT called on stream reset.\n     */\n    virtual void onComplete() PURE;\n\n    /**\n     * Called when the async HTTP stream is reset.\n     */\n    virtual void onReset() PURE;\n  };\n\n  /**\n   * An in-flight HTTP stream.\n   */\n  class Stream {\n  public:\n    virtual ~Stream() = default;\n\n    /***\n     * Send headers to the stream. This method cannot be invoked more than once and\n     * need to be called before sendData.\n     * @param headers supplies the headers to send.\n     * @param end_stream supplies whether this is a header only request.\n     */\n    virtual void sendHeaders(RequestHeaderMap& headers, bool end_stream) PURE;\n\n    /***\n     * Send data to the stream. This method can be invoked multiple times if it get streamed.\n     * To end the stream without data, call this method with empty buffer.\n     * @param data supplies the data to send.\n     * @param end_stream supplies whether this is the last data.\n     */\n    virtual void sendData(Buffer::Instance& data, bool end_stream) PURE;\n\n    /***\n     * Send trailers. This method cannot be invoked more than once, and implicitly ends the stream.\n     * @param trailers supplies the trailers to send.\n     */\n    virtual void sendTrailers(RequestTrailerMap& trailers) PURE;\n\n    /***\n     * Reset the stream.\n     */\n    virtual void reset() PURE;\n\n    /***\n     * @returns if the stream has enough buffered outbound data to be over the configured buffer\n     * limits\n     */\n    virtual bool isAboveWriteBufferHighWatermark() const PURE;\n  };\n\n  virtual ~AsyncClient() = default;\n\n  /**\n   * A structure to hold the options for AsyncStream object.\n   */\n  struct StreamOptions {\n    StreamOptions& setTimeout(const absl::optional<std::chrono::milliseconds>& v) {\n      timeout = v;\n      return *this;\n    }\n    StreamOptions& setTimeout(const std::chrono::milliseconds& v) {\n      timeout = v;\n      return *this;\n    }\n    StreamOptions& setBufferBodyForRetry(bool v) {\n      buffer_body_for_retry = v;\n      return *this;\n    }\n    StreamOptions& setSendXff(bool v) {\n      send_xff = v;\n      return *this;\n    }\n    StreamOptions& setHashPolicy(\n        const Protobuf::RepeatedPtrField<envoy::config::route::v3::RouteAction::HashPolicy>& v) {\n      hash_policy = v;\n      return *this;\n    }\n\n    // For gmock test\n    bool operator==(const StreamOptions& src) const {\n      return timeout == src.timeout && buffer_body_for_retry == src.buffer_body_for_retry &&\n             send_xff == src.send_xff;\n    }\n\n    // The timeout supplies the stream timeout, measured since when the frame with\n    // end_stream flag is sent until when the first frame is received.\n    absl::optional<std::chrono::milliseconds> timeout;\n\n    // The buffer_body_for_retry specifies whether the streamed body will be buffered so that\n    // it can be retried. In general, this should be set to false for a true stream. However,\n    // streaming is also used in certain cases such as gRPC unary calls, where retry can\n    // still be useful.\n    bool buffer_body_for_retry{false};\n\n    // If true, x-forwarded-for header will be added.\n    bool send_xff{true};\n\n    // Provides the hash policy for hashing load balancing strategies.\n    Protobuf::RepeatedPtrField<envoy::config::route::v3::RouteAction::HashPolicy> hash_policy;\n  };\n\n  /**\n   * A structure to hold the options for AsyncRequest object.\n   */\n  struct RequestOptions : public StreamOptions {\n    RequestOptions& setTimeout(const absl::optional<std::chrono::milliseconds>& v) {\n      StreamOptions::setTimeout(v);\n      return *this;\n    }\n    RequestOptions& setTimeout(const std::chrono::milliseconds& v) {\n      StreamOptions::setTimeout(v);\n      return *this;\n    }\n    RequestOptions& setBufferBodyForRetry(bool v) {\n      StreamOptions::setBufferBodyForRetry(v);\n      return *this;\n    }\n    RequestOptions& setSendXff(bool v) {\n      StreamOptions::setSendXff(v);\n      return *this;\n    }\n    RequestOptions& setHashPolicy(\n        const Protobuf::RepeatedPtrField<envoy::config::route::v3::RouteAction::HashPolicy>& v) {\n      StreamOptions::setHashPolicy(v);\n      return *this;\n    }\n    RequestOptions& setParentSpan(Tracing::Span& parent_span) {\n      parent_span_ = &parent_span;\n      return *this;\n    }\n    RequestOptions& setChildSpanName(const std::string& child_span_name) {\n      child_span_name_ = child_span_name;\n      return *this;\n    }\n    RequestOptions& setSampled(bool sampled) {\n      sampled_ = sampled;\n      return *this;\n    }\n\n    // For gmock test\n    bool operator==(const RequestOptions& src) const {\n      return StreamOptions::operator==(src) && parent_span_ == src.parent_span_ &&\n             child_span_name_ == src.child_span_name_ && sampled_ == src.sampled_;\n    }\n\n    // The parent span that child spans are created under to trace egress requests/responses.\n    // If not set, requests will not be traced.\n    Tracing::Span* parent_span_{nullptr};\n    // The name to give to the child span that represents the async http request.\n    // If left empty and parent_span_ is set, then the default name will have the cluster name.\n    // Only used if parent_span_ is set.\n    std::string child_span_name_{\"\"};\n    // Sampling decision for the tracing span. The span is sampled by default.\n    bool sampled_{true};\n  };\n\n  /**\n   * Send an HTTP request asynchronously\n   * @param request the request to send.\n   * @param callbacks the callbacks to be notified of request status.\n   * @param options the data struct to control the request sending.\n   * @return a request handle or nullptr if no request could be created. NOTE: In this case\n   *         onFailure() has already been called inline. The client owns the request and the\n   *         handle should just be used to cancel.\n   */\n\n  virtual Request* send(RequestMessagePtr&& request, Callbacks& callbacks,\n                        const RequestOptions& options) PURE;\n\n  /**\n   * Start an HTTP stream asynchronously.\n   * @param callbacks the callbacks to be notified of stream status.\n   * @param options the data struct to control the stream.\n   * @return a stream handle or nullptr if no stream could be started. NOTE: In this case\n   *         onResetStream() has already been called inline. The client owns the stream and\n   *         the handle can be used to send more messages or close the stream.\n   */\n  virtual Stream* start(StreamCallbacks& callbacks, const StreamOptions& options) PURE;\n\n  /**\n   * @return Event::Dispatcher& the dispatcher backing this client.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n};\n\nusing AsyncClientPtr = std::unique_ptr<AsyncClient>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/codec.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <limits>\n#include <memory>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/metadata_interface.h\"\n#include \"envoy/http/protocol.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/http/status.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nnamespace Http1 {\nstruct CodecStats;\n}\n\nnamespace Http2 {\nstruct CodecStats;\n}\n\n// Legacy default value of 60K is safely under both codec default limits.\nstatic const uint32_t DEFAULT_MAX_REQUEST_HEADERS_KB = 60;\n// Default maximum number of headers.\nstatic const uint32_t DEFAULT_MAX_HEADERS_COUNT = 100;\n\nconst char MaxRequestHeadersCountOverrideKey[] =\n    \"envoy.reloadable_features.max_request_headers_count\";\nconst char MaxResponseHeadersCountOverrideKey[] =\n    \"envoy.reloadable_features.max_response_headers_count\";\n\nclass Stream;\n\n/**\n * Error codes used to convey the reason for a GOAWAY.\n */\nenum class GoAwayErrorCode {\n  NoError,\n  Other,\n};\n\n/**\n * Stream encoder options specific to HTTP/1.\n */\nclass Http1StreamEncoderOptions {\npublic:\n  virtual ~Http1StreamEncoderOptions() = default;\n\n  /**\n   * Force disable chunk encoding, even if there is no known content length. This effectively forces\n   * HTTP/1.0 behavior in which the connection will need to be closed to indicate end of stream.\n   */\n  virtual void disableChunkEncoding() PURE;\n};\n\nusing Http1StreamEncoderOptionsOptRef =\n    absl::optional<std::reference_wrapper<Http1StreamEncoderOptions>>;\n\n/**\n * Encodes an HTTP stream. This interface contains methods common to both the request and response\n * path.\n * TODO(mattklein123): Consider removing the StreamEncoder interface entirely and just duplicating\n * the methods in both the request/response path for simplicity.\n */\nclass StreamEncoder {\npublic:\n  virtual ~StreamEncoder() = default;\n\n  /**\n   * Encode a data frame.\n   * @param data supplies the data to encode. The data may be moved by the encoder.\n   * @param end_stream supplies whether this is the last data frame.\n   */\n  virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * @return Stream& the backing stream.\n   */\n  virtual Stream& getStream() PURE;\n\n  /**\n   * Encode metadata.\n   * @param metadata_map_vector is the vector of metadata maps to encode.\n   */\n  virtual void encodeMetadata(const MetadataMapVector& metadata_map_vector) PURE;\n\n  /**\n   * Return the HTTP/1 stream encoder options if applicable. If the stream is not HTTP/1 returns\n   * absl::nullopt.\n   */\n  virtual Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() PURE;\n};\n\n/**\n * Stream encoder used for sending a request (client to server). Virtual inheritance is required\n * due to a parallel implementation split between the shared base class and the derived class.\n */\nclass RequestEncoder : public virtual StreamEncoder {\npublic:\n  /**\n   * Encode headers, optionally indicating end of stream.\n   * @param headers supplies the header map to encode.\n   * @param end_stream supplies whether this is a header only request.\n   */\n  virtual void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) PURE;\n\n  /**\n   * Encode trailers. This implicitly ends the stream.\n   * @param trailers supplies the trailers to encode.\n   */\n  virtual void encodeTrailers(const RequestTrailerMap& trailers) PURE;\n};\n\n/**\n * Stream encoder used for sending a response (server to client). Virtual inheritance is required\n * due to a parallel implementation split between the shared base class and the derived class.\n */\nclass ResponseEncoder : public virtual StreamEncoder {\npublic:\n  /**\n   * Encode 100-Continue headers.\n   * @param headers supplies the 100-Continue header map to encode.\n   */\n  virtual void encode100ContinueHeaders(const ResponseHeaderMap& headers) PURE;\n\n  /**\n   * Encode headers, optionally indicating end of stream. Response headers must\n   * have a valid :status set.\n   * @param headers supplies the header map to encode.\n   * @param end_stream supplies whether this is a header only response.\n   */\n  virtual void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) PURE;\n\n  /**\n   * Encode trailers. This implicitly ends the stream.\n   * @param trailers supplies the trailers to encode.\n   */\n  virtual void encodeTrailers(const ResponseTrailerMap& trailers) PURE;\n\n  /**\n   * Indicates whether invalid HTTP messaging should be handled with a stream error or a connection\n   * error.\n   */\n  virtual bool streamErrorOnInvalidHttpMessage() const PURE;\n};\n\n/**\n * Decodes an HTTP stream. These are callbacks fired into a sink. This interface contains methods\n * common to both the request and response path.\n * TODO(mattklein123): Consider removing the StreamDecoder interface entirely and just duplicating\n * the methods in both the request/response path for simplicity.\n */\nclass StreamDecoder {\npublic:\n  virtual ~StreamDecoder() = default;\n\n  /**\n   * Called with a decoded data frame.\n   * @param data supplies the decoded data.\n   * @param end_stream supplies whether this is the last data frame.\n   */\n  virtual void decodeData(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Called with decoded METADATA.\n   * @param decoded METADATA.\n   */\n  virtual void decodeMetadata(MetadataMapPtr&& metadata_map) PURE;\n};\n\n/**\n * Stream decoder used for receiving a request (client to server). Virtual inheritance is required\n * due to a parallel implementation split between the shared base class and the derived class.\n */\nclass RequestDecoder : public virtual StreamDecoder {\npublic:\n  /**\n   * Called with decoded headers, optionally indicating end of stream.\n   * @param headers supplies the decoded headers map.\n   * @param end_stream supplies whether this is a header only request.\n   */\n  virtual void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) PURE;\n\n  /**\n   * Called with a decoded trailers frame. This implicitly ends the stream.\n   * @param trailers supplies the decoded trailers.\n   */\n  virtual void decodeTrailers(RequestTrailerMapPtr&& trailers) PURE;\n\n  /**\n   * Called if the codec needs to send a protocol error.\n   * @param is_grpc_request indicates if the request is a gRPC request\n   * @param code supplies the HTTP error code to send.\n   * @param body supplies an optional body to send with the local reply.\n   * @param modify_headers supplies a way to edit headers before they are sent downstream.\n   * @param grpc_status an optional gRPC status for gRPC requests\n   * @param details details about the source of the error, for debug purposes\n   */\n  virtual void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body,\n                              const std::function<void(ResponseHeaderMap& headers)>& modify_headers,\n                              const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                              absl::string_view details) PURE;\n};\n\n/**\n * Stream decoder used for receiving a response (server to client). Virtual inheritance is required\n * due to a parallel implementation split between the shared base class and the derived class.\n */\nclass ResponseDecoder : public virtual StreamDecoder {\npublic:\n  /**\n   * Called with decoded 100-Continue headers.\n   * @param headers supplies the decoded 100-Continue headers map.\n   */\n  virtual void decode100ContinueHeaders(ResponseHeaderMapPtr&& headers) PURE;\n\n  /**\n   * Called with decoded headers, optionally indicating end of stream.\n   * @param headers supplies the decoded headers map.\n   * @param end_stream supplies whether this is a header only response.\n   */\n  virtual void decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) PURE;\n\n  /**\n   * Called with a decoded trailers frame. This implicitly ends the stream.\n   * @param trailers supplies the decoded trailers.\n   */\n  virtual void decodeTrailers(ResponseTrailerMapPtr&& trailers) PURE;\n};\n\n/**\n * Stream reset reasons.\n */\nenum class StreamResetReason {\n  // If a local codec level reset was sent on the stream.\n  LocalReset,\n  // If a local codec level refused stream reset was sent on the stream (allowing for retry).\n  LocalRefusedStreamReset,\n  // If a remote codec level reset was received on the stream.\n  RemoteReset,\n  // If a remote codec level refused stream reset was received on the stream (allowing for retry).\n  RemoteRefusedStreamReset,\n  // If the stream was locally reset by a connection pool due to an initial connection failure.\n  ConnectionFailure,\n  // If the stream was locally reset due to connection termination.\n  ConnectionTermination,\n  // The stream was reset because of a resource overflow.\n  Overflow\n};\n\n/**\n * Callbacks that fire against a stream.\n */\nclass StreamCallbacks {\npublic:\n  virtual ~StreamCallbacks() = default;\n\n  /**\n   * Fires when a stream has been remote reset.\n   * @param reason supplies the reset reason.\n   * @param transport_failure_reason supplies underlying transport failure reason.\n   */\n  virtual void onResetStream(StreamResetReason reason,\n                             absl::string_view transport_failure_reason) PURE;\n\n  /**\n   * Fires when a stream, or the connection the stream is sending to, goes over its high watermark.\n   */\n  virtual void onAboveWriteBufferHighWatermark() PURE;\n\n  /**\n   * Fires when a stream, or the connection the stream is sending to, goes from over its high\n   * watermark to under its low watermark.\n   */\n  virtual void onBelowWriteBufferLowWatermark() PURE;\n};\n\n/**\n * An HTTP stream (request, response, and push).\n */\nclass Stream {\npublic:\n  virtual ~Stream() = default;\n\n  /**\n   * Add stream callbacks.\n   * @param callbacks supplies the callbacks to fire on stream events.\n   */\n  virtual void addCallbacks(StreamCallbacks& callbacks) PURE;\n\n  /**\n   * Remove stream callbacks.\n   * @param callbacks supplies the callbacks to remove.\n   */\n  virtual void removeCallbacks(StreamCallbacks& callbacks) PURE;\n\n  /**\n   * Reset the stream. No events will fire beyond this point.\n   * @param reason supplies the reset reason.\n   */\n  virtual void resetStream(StreamResetReason reason) PURE;\n\n  /**\n   * Enable/disable further data from this stream.\n   * Cessation of data may not be immediate. For example, for HTTP/2 this may stop further flow\n   * control window updates which will result in the peer eventually stopping sending data.\n   * @param disable informs if reads should be disabled (true) or re-enabled (false).\n   *\n   * Note that this function reference counts calls. For example\n   * readDisable(true);  // Disables data\n   * readDisable(true);  // Notes the stream is blocked by two sources\n   * readDisable(false);  // Notes the stream is blocked by one source\n   * readDisable(false);  // Marks the stream as unblocked, so resumes reading.\n   */\n  virtual void readDisable(bool disable) PURE;\n\n  /**\n   * Return the number of bytes this stream is allowed to buffer, or 0 if there is no limit\n   * configured.\n   * @return uint32_t the stream's configured buffer limits.\n   */\n  virtual uint32_t bufferLimit() PURE;\n\n  /**\n   * @return string_view optionally return the reason behind codec level errors.\n   *\n   * This information is communicated via direct accessor rather than passed with the\n   * CodecProtocolException so that the error can be associated only with the problematic stream and\n   * not associated with every stream on the connection.\n   */\n  virtual absl::string_view responseDetails() { return \"\"; }\n\n  /**\n   * @return const Address::InstanceConstSharedPtr& the local address of the connection associated\n   * with the stream.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() PURE;\n\n  /**\n   * Set the flush timeout for the stream. At the codec level this is used to bound the amount of\n   * time the codec will wait to flush body data pending open stream window. It does *not* count\n   * small window updates as satisfying the idle timeout as this is a potential DoS vector.\n   */\n  virtual void setFlushTimeout(std::chrono::milliseconds timeout) PURE;\n};\n\n/**\n * Connection level callbacks.\n */\nclass ConnectionCallbacks {\npublic:\n  virtual ~ConnectionCallbacks() = default;\n\n  /**\n   * Fires when the remote indicates \"go away.\" No new streams should be created.\n   */\n  virtual void onGoAway(GoAwayErrorCode error_code) PURE;\n};\n\n/**\n * HTTP/1.* Codec settings\n */\nstruct Http1Settings {\n  // Enable codec to parse absolute URIs. This enables forward/explicit proxy support for non TLS\n  // traffic\n  bool allow_absolute_url_{false};\n  // Allow HTTP/1.0 from downstream.\n  bool accept_http_10_{false};\n  // Set a default host if no Host: header is present for HTTP/1.0 requests.`\n  std::string default_host_for_http_10_;\n  // Encode trailers in Http. By default the HTTP/1 codec drops proxied trailers.\n  // Note that this only happens when Envoy is chunk encoding which occurs when:\n  //  - The request is HTTP/1.1\n  //  - Is neither a HEAD only request nor a HTTP Upgrade\n  //  - Not a HEAD request\n  bool enable_trailers_{false};\n  // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding`\n  // headers set. By default such messages are rejected, but if option is enabled - Envoy will\n  // remove Content-Length header and process message.\n  bool allow_chunked_length_{false};\n\n  enum class HeaderKeyFormat {\n    // By default no formatting is performed, presenting all headers in lowercase (as Envoy\n    // internals normalize everything to lowercase.)\n    Default,\n    // Performs proper casing of header keys: the first and all alpha characters following a\n    // non-alphanumeric character is capitalized.\n    ProperCase,\n  };\n\n  // How header keys should be formatted when serializing HTTP/1.1 headers.\n  HeaderKeyFormat header_key_format_{HeaderKeyFormat::Default};\n\n  // Behaviour on invalid HTTP messaging:\n  // - if true, the HTTP/1.1 connection is left open (where possible)\n  // - if false, the HTTP/1.1 connection is terminated\n  bool stream_error_on_invalid_http_message_{false};\n};\n\n/**\n * A connection (client or server) that owns multiple streams.\n */\nclass Connection {\npublic:\n  virtual ~Connection() = default;\n\n  /**\n   * Dispatch incoming connection data.\n   * @param data supplies the data to dispatch. The codec will drain as many bytes as it processes.\n   * @return Status indicating the status of the codec. Holds any errors encountered while\n   * processing the incoming data.\n   */\n  virtual Status dispatch(Buffer::Instance& data) PURE;\n\n  /**\n   * Indicate \"go away\" to the remote. No new streams can be created beyond this point.\n   */\n  virtual void goAway() PURE;\n\n  /**\n   * @return the protocol backing the connection. This can change if for example an HTTP/1.1\n   *         connection gets an HTTP/1.0 request on it.\n   */\n  virtual Protocol protocol() PURE;\n\n  /**\n   * Indicate a \"shutdown notice\" to the remote. This is a hint that the remote should not send\n   * any new streams, but if streams do arrive that will not be reset.\n   */\n  virtual void shutdownNotice() PURE;\n\n  /**\n   * @return bool whether the codec has data that it wants to write but cannot due to protocol\n   *              reasons (e.g, needing window updates).\n   */\n  virtual bool wantsToWrite() PURE;\n\n  /**\n   * Called when the underlying Network::Connection goes over its high watermark.\n   */\n  virtual void onUnderlyingConnectionAboveWriteBufferHighWatermark() PURE;\n\n  /**\n   * Called when the underlying Network::Connection goes from over its high watermark to under its\n   * low watermark.\n   */\n  virtual void onUnderlyingConnectionBelowWriteBufferLowWatermark() PURE;\n};\n\n/**\n * Callbacks for downstream connection watermark limits.\n */\nclass DownstreamWatermarkCallbacks {\npublic:\n  virtual ~DownstreamWatermarkCallbacks() = default;\n\n  /**\n   * Called when the downstream connection or stream goes over its high watermark. Note that this\n   * may be called separately for both the stream going over and the connection going over. It\n   * is the responsibility of the DownstreamWatermarkCallbacks implementation to handle unwinding\n   * multiple high and low watermark calls.\n   */\n  virtual void onAboveWriteBufferHighWatermark() PURE;\n\n  /**\n   * Called when the downstream connection or stream goes from over its high watermark to under its\n   * low watermark. As with onAboveWriteBufferHighWatermark above, this may be called independently\n   * when both the stream and the connection go under the low watermark limit, and the callee must\n   * ensure that the flow of data does not resume until all callers which were above their high\n   * watermarks have gone below.\n   */\n  virtual void onBelowWriteBufferLowWatermark() PURE;\n};\n\n/**\n * Callbacks for server connections.\n */\nclass ServerConnectionCallbacks : public virtual ConnectionCallbacks {\npublic:\n  /**\n   * Invoked when a new request stream is initiated by the remote.\n   * @param response_encoder supplies the encoder to use for creating the response. The request and\n   *                         response are backed by the same Stream object.\n   * @param is_internally_created indicates if this stream was originated by a\n   *   client, or was created by Envoy, by example as part of an internal redirect.\n   * @return RequestDecoder& supplies the decoder callbacks to fire into for stream decoding\n   *   events.\n   */\n  virtual RequestDecoder& newStream(ResponseEncoder& response_encoder,\n                                    bool is_internally_created = false) PURE;\n};\n\n/**\n * A server side HTTP connection.\n */\nclass ServerConnection : public virtual Connection {};\nusing ServerConnectionPtr = std::unique_ptr<ServerConnection>;\n\n/**\n * A client side HTTP connection.\n */\nclass ClientConnection : public virtual Connection {\npublic:\n  /**\n   * Create a new outgoing request stream.\n   * @param response_decoder supplies the decoder callbacks to fire response events into.\n   * @return RequestEncoder& supplies the encoder to write the request into.\n   */\n  virtual RequestEncoder& newStream(ResponseDecoder& response_decoder) PURE;\n};\n\nusing ClientConnectionPtr = std::unique_ptr<ClientConnection>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/codes.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/symbol_table.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * HTTP response codes.\n * http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml\n */\nenum class Code {\n  // clang-format off\n  Continue                      = 100,\n  SwitchingProtocols            = 101,\n\n  OK                            = 200,\n  Created                       = 201,\n  Accepted                      = 202,\n  NonAuthoritativeInformation   = 203,\n  NoContent                     = 204,\n  ResetContent                  = 205,\n  PartialContent                = 206,\n  MultiStatus                   = 207,\n  AlreadyReported               = 208,\n  IMUsed                        = 226,\n\n  MultipleChoices               = 300,\n  MovedPermanently              = 301,\n  Found                         = 302,\n  SeeOther                      = 303,\n  NotModified                   = 304,\n  UseProxy                      = 305,\n  TemporaryRedirect             = 307,\n  PermanentRedirect             = 308,\n\n  BadRequest                    = 400,\n  Unauthorized                  = 401,\n  PaymentRequired               = 402,\n  Forbidden                     = 403,\n  NotFound                      = 404,\n  MethodNotAllowed              = 405,\n  NotAcceptable                 = 406,\n  ProxyAuthenticationRequired   = 407,\n  RequestTimeout                = 408,\n  Conflict                      = 409,\n  Gone                          = 410,\n  LengthRequired                = 411,\n  PreconditionFailed            = 412,\n  PayloadTooLarge               = 413,\n  URITooLong                    = 414,\n  UnsupportedMediaType          = 415,\n  RangeNotSatisfiable           = 416,\n  ExpectationFailed             = 417,\n  MisdirectedRequest            = 421,\n  UnprocessableEntity           = 422,\n  Locked                        = 423,\n  FailedDependency              = 424,\n  UpgradeRequired               = 426,\n  PreconditionRequired          = 428,\n  TooManyRequests               = 429,\n  RequestHeaderFieldsTooLarge   = 431,\n\n  InternalServerError           = 500,\n  NotImplemented                = 501,\n  BadGateway                    = 502,\n  ServiceUnavailable            = 503,\n  GatewayTimeout                = 504,\n  HTTPVersionNotSupported       = 505,\n  VariantAlsoNegotiates         = 506,\n  InsufficientStorage           = 507,\n  LoopDetected                  = 508,\n  NotExtended                   = 510,\n  NetworkAuthenticationRequired = 511\n  // clang-format on\n};\n\n/**\n * Manages updating of statistics for HTTP Status Codes. Sets up string-tokens\n * for fast combining of tokens based on scope, status-code buckets (2xx,\n * 4xx...), and exact status code.\n */\nclass CodeStats {\npublic:\n  virtual ~CodeStats() = default;\n\n  struct ResponseStatInfo;\n  struct ResponseTimingInfo;\n\n  /**\n   * Charge a simple response stat to an upstream.\n   */\n  virtual void chargeBasicResponseStat(Stats::Scope& scope, Stats::StatName prefix,\n                                       Code response_code) const PURE;\n\n  /**\n   * Charge a response stat to both agg counters (*xx) as well as code specific counters. This\n   * routine also looks for the x-envoy-upstream-canary header and if it is set, also charges\n   * canary stats.\n   */\n  virtual void chargeResponseStat(const ResponseStatInfo& info) const PURE;\n\n  /**\n   * Charge a response timing to the various dynamic stat postfixes.\n   */\n  virtual void chargeResponseTiming(const ResponseTimingInfo& info) const PURE;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/conn_pool.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/conn_pool.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace ConnectionPool {\n\nusing PoolFailureReason = ::Envoy::ConnectionPool::PoolFailureReason;\nusing Cancellable = ::Envoy::ConnectionPool::Cancellable;\n\n/**\n * Pool callbacks invoked in the context of a newStream() call, either synchronously or\n * asynchronously.\n */\nclass Callbacks {\npublic:\n  virtual ~Callbacks() = default;\n\n  /**\n   * Called when a pool error occurred and no connection could be acquired for making the request.\n   * @param reason supplies the failure reason.\n   * @param transport_failure_reason supplies the details of the transport failure reason.\n   * @param host supplies the description of the host that caused the failure. This may be nullptr\n   *             if no host was involved in the failure (for example overflow).\n   */\n  virtual void onPoolFailure(PoolFailureReason reason, absl::string_view transport_failure_reason,\n                             Upstream::HostDescriptionConstSharedPtr host) PURE;\n\n  /**\n   * Called when a connection is available to process a request/response.\n   * @param encoder supplies the request encoder to use.\n   * @param host supplies the description of the host that will carry the request. For logical\n   *             connection pools the description may be different each time this is called.\n   * @param info supplies the stream info object associated with the upstream connection.\n   */\n  virtual void onPoolReady(RequestEncoder& encoder, Upstream::HostDescriptionConstSharedPtr host,\n                           const StreamInfo::StreamInfo& info) PURE;\n};\n\n/**\n * An instance of a generic connection pool.\n */\nclass Instance : public Envoy::ConnectionPool::Instance, public Event::DeferredDeletable {\npublic:\n  ~Instance() override = default;\n\n  /**\n   * @return Http::Protocol Reports the protocol in use by this connection pool.\n   */\n  virtual Http::Protocol protocol() const PURE;\n\n  /**\n   * Determines whether the connection pool is actively processing any requests.\n   * @return true if the connection pool has any pending requests or any active requests.\n   */\n  virtual bool hasActiveConnections() const PURE;\n\n  /**\n   * Create a new stream on the pool.\n   * @param response_decoder supplies the decoder events to fire when the response is\n   *                         available.\n   * @param cb supplies the callbacks to invoke when the connection is ready or has failed. The\n   *           callbacks may be invoked immediately within the context of this call if there is a\n   *           ready connection or an immediate failure. In this case, the routine returns nullptr.\n   * @return Cancellable* If no connection is ready, the callback is not invoked, and a handle\n   *                      is returned that can be used to cancel the request. Otherwise, one of the\n   *                      callbacks is called and the routine returns nullptr. NOTE: Once a callback\n   *                      is called, the handle is no longer valid and any further cancellation\n   *                      should be done by resetting the stream.\n   * @warning Do not call cancel() from the callbacks, as the request is implicitly canceled when\n   *          the callbacks are called.\n   */\n  virtual Cancellable* newStream(Http::ResponseDecoder& response_decoder,\n                                 Callbacks& callbacks) PURE;\n};\n\nusing InstancePtr = std::unique_ptr<Instance>;\n\n} // namespace ConnectionPool\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/context.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/http/codes.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nstruct UserAgentContext;\n\n/**\n * Captures http-related structures with cardinality of one per server.\n */\nclass Context {\npublic:\n  virtual ~Context() = default;\n\n  /**\n   * Get the default tracing configuration, i.e. one from the bootstrap config.\n   *\n   * Once deprecation window for the tracer provider configuration in the bootstrap config is over,\n   * this method will no longer be necessary.\n   *\n   * @return Tracing.\n   */\n  virtual const envoy::config::trace::v3::Tracing& defaultTracingConfig() PURE;\n\n  virtual CodeStats& codeStats() PURE;\n  virtual const UserAgentContext& userAgentContext() const PURE;\n};\n\nusing ContextPtr = std::unique_ptr<Context>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/filter.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/scope_tracker.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/ssl/connection.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Return codes for encode/decode headers filter invocations. The connection manager bases further\n * filter invocations on the return code of the previous filter.\n */\nenum class FilterHeadersStatus {\n  // Continue filter chain iteration.\n  Continue,\n  // Do not iterate to any of the remaining filters in the chain. Returning\n  // FilterDataStatus::Continue from decodeData()/encodeData() or calling\n  // continueDecoding()/continueEncoding() MUST be called if continued filter iteration is desired.\n  StopIteration,\n  // Continue headers iteration to remaining filters, but ignore any subsequent data or trailers.\n  // This results in creating a header only request/response.\n  // This status MUST NOT be returned by decodeHeaders() when end_stream is set to true.\n  ContinueAndEndStream,\n  // Continue headers iteration to remaining filters, but delay ending the stream. This status MUST\n  // NOT be returned when end_stream is already set to false.\n  //\n  // Used when a filter wants to add a body to a headers-only request/response, but this body is not\n  // readily available. Delaying end_stream allows the filter to add the body once it's available\n  // without stopping headers iteration.\n  //\n  // The filter is responsible to continue the stream by providing a body through calling\n  // injectDecodedDataToFilterChain()/injectEncodedDataToFilterChain(), possibly multiple times\n  // if the body needs to be divided into several chunks. The filter may need to handle\n  // watermark events when injecting a body, see:\n  // https://github.com/envoyproxy/envoy/blob/master/source/docs/flow_control.md.\n  //\n  // The last call to inject data MUST have end_stream set to true to conclude the stream.\n  // If the filter cannot provide a body the stream should be reset.\n  //\n  // Adding a body through calling addDecodedData()/addEncodedData() then\n  // continueDecoding()/continueEncoding() is currently NOT supported and causes an assert failure.\n  //\n  // Adding trailers in this scenario is currently NOT supported.\n  //\n  // The filter MUST NOT attempt to continue the stream without providing a body using\n  // continueDecoding()/continueEncoding().\n  //\n  // TODO(yosrym93): Support adding a body in this case by calling addDecodedData()/addEncodedData()\n  // then continueDecoding()/continueEncoding(). To support this a new FilterManager::IterationState\n  // needs to be added and set when a filter returns this status in\n  // FilterManager::decodeHeaders/FilterManager::encodeHeaders()\n  // Currently, when a filter returns this, the IterationState is Continue. This causes ASSERTs in\n  // FilterManager::commonContinue() to fail when continueDecoding()/continueEncoding() is called;\n  // due to trying to continue iteration when the IterationState is already Continue.\n  // In this case, a different ASSERT will be needed to make sure the filter does not try to\n  // continue without adding a body first.\n  //\n  // TODO(yosrym93): Support adding trailers in this case by implementing new functions to inject\n  // trailers, similar to the inject data functions.\n  ContinueAndDontEndStream,\n  // Do not iterate for headers as well as data and trailers for the current filter and the filters\n  // following, and buffer body data for later dispatching. ContinueDecoding() MUST\n  // be called if continued filter iteration is desired.\n  //\n  // Used when a filter wants to stop iteration on data and trailers while waiting for headers'\n  // iteration to resume.\n  //\n  // If buffering the request causes buffered data to exceed the configured buffer limit, a 413 will\n  // be sent to the user. On the response path exceeding buffer limits will result in a 500.\n  //\n  // TODO(soya3129): stop metadata parsing when StopAllIterationAndBuffer is set.\n  StopAllIterationAndBuffer,\n  // Do not iterate for headers as well as data and trailers for the current filter and the filters\n  // following, and buffer body data for later dispatching. continueDecoding() MUST\n  // be called if continued filter iteration is desired.\n  //\n  // Used when a filter wants to stop iteration on data and trailers while waiting for headers'\n  // iteration to resume.\n  //\n  // This will cause the flow of incoming data to cease until continueDecoding() function is called.\n  //\n  // TODO(soya3129): stop metadata parsing when StopAllIterationAndWatermark is set.\n  StopAllIterationAndWatermark,\n};\n\n/**\n * Return codes for encode/decode data filter invocations. The connection manager bases further\n * filter invocations on the return code of the previous filter.\n */\nenum class FilterDataStatus {\n  // Continue filter chain iteration. If headers have not yet been sent to the next filter, they\n  // will be sent first via decodeHeaders()/encodeHeaders(). If data has previously been buffered,\n  // the data in this callback will be added to the buffer before the entirety is sent to the next\n  // filter.\n  Continue,\n  // Do not iterate to any of the remaining filters in the chain, and buffer body data for later\n  // dispatching. Returning FilterDataStatus::Continue from decodeData()/encodeData() or calling\n  // continueDecoding()/continueEncoding() MUST be called if continued filter iteration is desired.\n  //\n  // This should be called by filters which must parse a larger block of the incoming data before\n  // continuing processing and so can not push back on streaming data via watermarks.\n  //\n  // If buffering the request causes buffered data to exceed the configured buffer limit, a 413 will\n  // be sent to the user. On the response path exceeding buffer limits will result in a 500.\n  StopIterationAndBuffer,\n  // Do not iterate to any of the remaining filters in the chain, and buffer body data for later\n  // dispatching. Returning FilterDataStatus::Continue from decodeData()/encodeData() or calling\n  // continueDecoding()/continueEncoding() MUST be called if continued filter iteration is desired.\n  //\n  // This will cause the flow of incoming data to cease until one of the continue.*() functions is\n  // called.\n  //\n  // This should be returned by filters which can nominally stream data but have a transient back-up\n  // such as the configured delay of the fault filter, or if the router filter is still fetching an\n  // upstream connection.\n  StopIterationAndWatermark,\n  // Do not iterate to any of the remaining filters in the chain, but do not buffer any of the\n  // body data for later dispatching. Returning FilterDataStatus::Continue from\n  // decodeData()/encodeData() or calling continueDecoding()/continueEncoding() MUST be called if\n  // continued filter iteration is desired.\n  StopIterationNoBuffer\n};\n\n/**\n * Return codes for encode/decode trailers filter invocations. The connection manager bases further\n * filter invocations on the return code of the previous filter.\n */\nenum class FilterTrailersStatus {\n  // Continue filter chain iteration.\n  Continue,\n  // Do not iterate to any of the remaining filters in the chain. Calling\n  // continueDecoding()/continueEncoding() MUST be called if continued filter iteration is desired.\n  StopIteration\n};\n\n/**\n * Return codes for encode metadata filter invocations. Metadata currently can not stop filter\n * iteration.\n */\nenum class FilterMetadataStatus {\n  // Continue filter chain iteration.\n  Continue,\n};\n\n/**\n * The stream filter callbacks are passed to all filters to use for writing response data and\n * interacting with the underlying stream in general.\n */\nclass StreamFilterCallbacks {\npublic:\n  virtual ~StreamFilterCallbacks() = default;\n\n  /**\n   * @return const Network::Connection* the originating connection, or nullptr if there is none.\n   */\n  virtual const Network::Connection* connection() PURE;\n\n  /**\n   * @return Event::Dispatcher& the thread local dispatcher for allocating timers, etc.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * Reset the underlying stream.\n   */\n  virtual void resetStream() PURE;\n\n  /**\n   * Returns the route for the current request. The assumption is that the implementation can do\n   * caching where applicable to avoid multiple lookups. If a filter has modified the headers in\n   * a way that affects routing, clearRouteCache() must be called to clear the cache.\n   *\n   * NOTE: In the future we want to split route() into 2 methods, one that just\n   * returns current route and another that actually resolve the route.\n   */\n  virtual Router::RouteConstSharedPtr route() PURE;\n\n  /**\n   * Invokes callback with a matched route, callback can choose to accept this route by returning\n   * Router::RouteMatchStatus::Accept or continue route match from last matched route by returning\n   * Router::RouteMatchStatus::Continue, if there are more routes available.\n   *\n   * Returns route accepted by the callback or nullptr if no match found or none of route is\n   * accepted by the callback.\n   *\n   * NOTE: clearRouteCache() must be called before invoking this method otherwise cached route will\n   * be returned directly to the caller and the callback will not be invoked.\n   *\n   * Currently a route callback's decision is overridden by clearRouteCache() / route() call in the\n   * subsequent filters. We may want to persist callbacks so they always participate in later route\n   * resolution or make it an independent entity like filters that gets called on route resolution.\n   */\n  virtual Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) PURE;\n\n  /**\n   * Returns the clusterInfo for the cached route.\n   * This method is to avoid multiple look ups in the filter chain, it also provides a consistent\n   * view of clusterInfo after a route is picked/repicked.\n   * NOTE: Cached clusterInfo and route will be updated the same time.\n   */\n  virtual Upstream::ClusterInfoConstSharedPtr clusterInfo() PURE;\n\n  /**\n   * Clears the route cache for the current request. This must be called when a filter has modified\n   * the headers in a way that would affect routing.\n   */\n  virtual void clearRouteCache() PURE;\n\n  /**\n   * @return uint64_t the ID of the originating stream for logging purposes.\n   */\n  virtual uint64_t streamId() const PURE;\n\n  /**\n   * @return streamInfo for logging purposes. Individual filter may add specific information to be\n   * put into the access log.\n   */\n  virtual StreamInfo::StreamInfo& streamInfo() PURE;\n\n  /**\n   * @return span context used for tracing purposes. Individual filters may add or modify\n   *              information in the span context.\n   */\n  virtual Tracing::Span& activeSpan() PURE;\n\n  /**\n   * @return tracing configuration.\n   */\n  virtual const Tracing::Config& tracingConfig() PURE;\n\n  /**\n   * @return the ScopeTrackedObject for this stream.\n   */\n  virtual const ScopeTrackedObject& scope() PURE;\n};\n\n/**\n * RouteConfigUpdatedCallback is used to notify an OnDemandRouteUpdate filter about completion of a\n * RouteConfig update. The filter (and the associated ActiveStream) where the original on-demand\n * request was originated can be destroyed before a response to an on-demand update request is\n * received and updates are propagated. To handle this:\n *\n * OnDemandRouteUpdate filter instance holds a RouteConfigUpdatedCallbackSharedPtr to a callback.\n * Envoy::Router::RdsRouteConfigProviderImpl holds a weak pointer to the RouteConfigUpdatedCallback\n * above in an Envoy::Router::UpdateOnDemandCallback struct\n *\n * In RdsRouteConfigProviderImpl::onConfigUpdate(), before invoking the callback, a check is made to\n * verify if the callback is still available.\n */\nusing RouteConfigUpdatedCallback = std::function<void(bool)>;\nusing RouteConfigUpdatedCallbackSharedPtr = std::shared_ptr<RouteConfigUpdatedCallback>;\n\n/**\n * Stream decoder filter callbacks add additional callbacks that allow a decoding filter to restart\n * decoding if they decide to hold data (e.g. for buffering or rate limiting).\n */\nclass StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks {\npublic:\n  /**\n   * Continue iterating through the filter chain with buffered headers and body data. This routine\n   * can only be called if the filter has previously returned StopIteration from decodeHeaders()\n   * AND one of StopIterationAndBuffer, StopIterationAndWatermark, or StopIterationNoBuffer\n   * from each previous call to decodeData().\n   *\n   * The connection manager will dispatch headers and any buffered body data to the\n   * next filter in the chain. Further note that if the request is not complete, this filter will\n   * still receive decodeData() calls and must return an appropriate status code depending on what\n   * the filter needs to do.\n   */\n  virtual void continueDecoding() PURE;\n\n  /**\n   * @return const Buffer::Instance* the currently buffered data as buffered by this filter or\n   *         previous ones in the filter chain. May be nullptr if nothing has been buffered yet.\n   */\n  virtual const Buffer::Instance* decodingBuffer() PURE;\n\n  /**\n   * Allows modifying the decoding buffer. May only be called before any data has been continued\n   * past the calling filter.\n   */\n  virtual void modifyDecodingBuffer(std::function<void(Buffer::Instance&)> callback) PURE;\n\n  /**\n   * Add buffered body data. This method is used in advanced cases where returning\n   * StopIterationAndBuffer from decodeData() is not sufficient.\n   *\n   * 1) If a headers only request needs to be turned into a request with a body, this method can\n   * be called to add body in the decodeHeaders() callback. Subsequent filters will receive\n   * decodeHeaders(..., false) followed by decodeData(..., true). This works both in the direct\n   * iteration as well as the continuation case.\n   *\n   * 2) If a filter is going to look at all buffered data from within a data callback with end\n   * stream set, this method can be called to immediately buffer the data. This avoids having\n   * to deal with the existing buffered data and the data from the current callback.\n   *\n   * 3) If additional buffered body data needs to be added by a filter before continuation of data\n   * to further filters (outside of callback context).\n   *\n   * 4) If additional data needs to be added in the decodeTrailers() callback, this method can be\n   * called in the context of the callback. All further filters will receive decodeData(..., false)\n   * followed by decodeTrailers(). However if the iteration is stopped, the added data will\n   * buffered, so that the further filters will not receive decodeData() before decodeHeaders().\n   *\n   * It is an error to call this method in any other case.\n   *\n   * See also injectDecodedDataToFilterChain() for a different way of passing data to further\n   * filters and also how the two methods are different.\n   *\n   * @param data Buffer::Instance supplies the data to be decoded.\n   * @param streaming_filter boolean supplies if this filter streams data or buffers the full body.\n   */\n  virtual void addDecodedData(Buffer::Instance& data, bool streaming_filter) PURE;\n\n  /**\n   * Decode data directly to subsequent filters in the filter chain. This method is used in\n   * advanced cases in which a filter needs full control over how subsequent filters view data,\n   * and does not want to make use of HTTP connection manager buffering. Using this method allows\n   * a filter to buffer data (or not) and then periodically inject data to subsequent filters,\n   * indicating end_stream at an appropriate time. This can be used to implement rate limiting,\n   * periodic data emission, etc.\n   *\n   * This method should only be called outside of callback context. I.e., do not call this method\n   * from within a filter's decodeData() call.\n   *\n   * When using this callback, filters should generally only return\n   * FilterDataStatus::StopIterationNoBuffer from their decodeData() call, since use of this method\n   * indicates that a filter does not wish to participate in standard HTTP connection manager\n   * buffering and continuation and will perform any necessary buffering and continuation on its\n   * own.\n   *\n   * This callback is different from addDecodedData() in that the specified data and end_stream\n   * status will be propagated directly to further filters in the filter chain. This is different\n   * from addDecodedData() where data is added to the HTTP connection manager's buffered data with\n   * the assumption that standard HTTP connection manager buffering and continuation are being used.\n   */\n  virtual void injectDecodedDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Adds decoded trailers. May only be called in decodeData when end_stream is set to true.\n   * If called in any other context, an assertion will be triggered.\n   *\n   * When called in decodeData, the trailers map will be initialized to an empty map and returned by\n   * reference. Calling this function more than once is invalid.\n   *\n   * @return a reference to the newly created trailers map.\n   */\n  virtual RequestTrailerMap& addDecodedTrailers() PURE;\n\n  /**\n   * Attempts to create a locally generated response using the provided response_code and body_text\n   * parameters. If the request was a gRPC request the local reply will be encoded as a gRPC\n   * response with a 200 HTTP response code and grpc-status and grpc-message headers mapped from the\n   * provided parameters.\n   *\n   * If a response has already started (e.g. if the router calls sendSendLocalReply after encoding\n   * headers) this will either ship the reply directly to the downstream codec, or reset the stream.\n   *\n   * @param response_code supplies the HTTP response code.\n   * @param body_text supplies the optional body text which is sent using the text/plain content\n   *                  type, or encoded in the grpc-message header.\n   * @param modify_headers supplies an optional callback function that can modify the\n   *                       response headers.\n   * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with.\n   * @param details a string detailing why this local reply was sent.\n   */\n  virtual void sendLocalReply(Code response_code, absl::string_view body_text,\n                              std::function<void(ResponseHeaderMap& headers)> modify_headers,\n                              const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                              absl::string_view details) PURE;\n\n  /**\n   * Adds decoded metadata. This function can only be called in\n   * StreamDecoderFilter::decodeHeaders/Data/Trailers(). Do not call in\n   * StreamDecoderFilter::decodeMetadata().\n   *\n   * @return a reference to metadata map vector, where new metadata map can be added.\n   */\n  virtual MetadataMapVector& addDecodedMetadata() PURE;\n\n  /**\n   * Called with 100-Continue headers to be encoded.\n   *\n   * This is not folded into encodeHeaders because most Envoy users and filters will not be proxying\n   * 100-continue and with it split out, can ignore the complexity of multiple encodeHeaders calls.\n   *\n   * This must not be invoked more than once per request.\n   *\n   * @param headers supplies the headers to be encoded.\n   */\n  virtual void encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) PURE;\n\n  /**\n   * Called with headers to be encoded, optionally indicating end of stream.\n   *\n   * The connection manager inspects certain pseudo headers that are not actually sent downstream.\n   * - See source/common/http/headers.h\n   *\n   * The only 1xx that may be provided to encodeHeaders() is a 101 upgrade, which will be the final\n   * encodeHeaders() for a response.\n   *\n   * @param headers supplies the headers to be encoded.\n   * @param end_stream supplies whether this is a header only request/response.\n   * @param details supplies the details of why this response was sent.\n   */\n  virtual void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream,\n                             absl::string_view details) PURE;\n\n  /**\n   * Called with data to be encoded, optionally indicating end of stream.\n   * @param data supplies the data to be encoded.\n   * @param end_stream supplies whether this is the last data frame.\n   */\n  virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Called with trailers to be encoded. This implicitly ends the stream.\n   * @param trailers supplies the trailers to encode.\n   */\n  virtual void encodeTrailers(ResponseTrailerMapPtr&& trailers) PURE;\n\n  /**\n   * Called with metadata to be encoded.\n   *\n   * @param metadata_map supplies the unique_ptr of the metadata to be encoded.\n   */\n  virtual void encodeMetadata(MetadataMapPtr&& metadata_map) PURE;\n\n  /**\n   * Called when the buffer for a decoder filter or any buffers the filter sends data to go over\n   * their high watermark.\n   *\n   * In the case of a filter such as the router filter, which spills into multiple buffers (codec,\n   * connection etc.) this may be called multiple times. Any such filter is responsible for calling\n   * the low watermark callbacks an equal number of times as the respective buffers are drained.\n   */\n  virtual void onDecoderFilterAboveWriteBufferHighWatermark() PURE;\n\n  /**\n   * Called when a decoder filter or any buffers the filter sends data to go from over its high\n   * watermark to under its low watermark.\n   */\n  virtual void onDecoderFilterBelowWriteBufferLowWatermark() PURE;\n\n  /**\n   * This routine can be called by a filter to subscribe to watermark events on the downstream\n   * stream and downstream connection.\n   *\n   * Immediately after subscribing, the filter will get a high watermark callback for each\n   * outstanding backed up buffer.\n   */\n  virtual void addDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& callbacks) PURE;\n\n  /**\n   * This routine can be called by a filter to stop subscribing to watermark events on the\n   * downstream stream and downstream connection.\n   *\n   * It is not safe to call this from under the stack of a DownstreamWatermarkCallbacks callback.\n   */\n  virtual void removeDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& callbacks) PURE;\n\n  /**\n   * This routine may be called to change the buffer limit for decoder filters.\n   *\n   * @param limit supplies the desired buffer limit.\n   */\n  virtual void setDecoderBufferLimit(uint32_t limit) PURE;\n\n  /**\n   * This routine returns the current buffer limit for decoder filters. Filters should abide by\n   * this limit or change it via setDecoderBufferLimit.\n   * A buffer limit of 0 bytes indicates no limits are applied.\n   *\n   * @return the buffer limit the filter should apply.\n   */\n  virtual uint32_t decoderBufferLimit() PURE;\n\n  // Takes a stream, and acts as if the headers are newly arrived.\n  // On success, this will result in a creating a new filter chain and likely upstream request\n  // associated with the original downstream stream.\n  // On failure, if the preconditions outlined below are not met, the caller is\n  // responsible for handling or terminating the original stream.\n  //\n  // This is currently limited to\n  //   - streams which are completely read\n  //   - streams which do not have a request body.\n  //\n  // Note that HttpConnectionManager sanitization will *not* be performed on the\n  // recreated stream, as it is assumed that sanitization has already been done.\n  virtual bool recreateStream() PURE;\n\n  /**\n   * Adds socket options to be applied to any connections used for upstream requests. Note that\n   * unique values for the options will likely lead to many connection pools being created. The\n   * added options are appended to any previously added.\n   *\n   * @param options The options to be added.\n   */\n  virtual void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) PURE;\n\n  /**\n   * @return The socket options to be applied to the upstream request.\n   */\n  virtual Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const PURE;\n\n  /**\n   * Schedules a request for a RouteConfiguration update from the management server.\n   * @param route_config_updated_cb callback to be called when the configuration update has been\n   * propagated to the worker thread.\n   */\n  virtual void\n  requestRouteConfigUpdate(RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) PURE;\n};\n\n/**\n * Common base class for both decoder and encoder filters. Functions here are related to the\n * lifecycle of a filter. Currently the life cycle is as follows:\n * - All filters receive onStreamComplete()\n * - All log handlers receive log()\n * - All filters receive onDestroy()\n *\n * This means:\n * - onStreamComplete can be used to make state changes that are intended to appear in the access\n * logs (like streamInfo().dynamicMetadata() or streamInfo().filterState()).\n * - onDestroy is used to cleanup all pending filter resources like pending http requests and\n * timers.\n */\nclass StreamFilterBase {\npublic:\n  virtual ~StreamFilterBase() = default;\n\n  /**\n   * This routine is called before the access log handlers' log() is called. Filters can use this\n   * callback to enrich the data passed in to the log handlers.\n   */\n  virtual void onStreamComplete() {}\n\n  /**\n   * This routine is called prior to a filter being destroyed. This may happen after normal stream\n   * finish (both downstream and upstream) or due to reset. Every filter is responsible for making\n   * sure that any async events are cleaned up in the context of this routine. This includes timers,\n   * network calls, etc. The reason there is an onDestroy() method vs. doing this type of cleanup\n   * in the destructor is due to the deferred deletion model that Envoy uses to avoid stack unwind\n   * complications. Filters must not invoke either encoder or decoder filter callbacks after having\n   * onDestroy() invoked. Filters that cross-register as access log handlers receive log() before\n   * onDestroy().\n   */\n  virtual void onDestroy() PURE;\n};\n\n/**\n * Stream decoder filter interface.\n */\nclass StreamDecoderFilter : public StreamFilterBase {\npublic:\n  /**\n   * Called with decoded headers, optionally indicating end of stream.\n   * @param headers supplies the decoded headers map.\n   * @param end_stream supplies whether this is a header only request/response.\n   * @return FilterHeadersStatus determines how filter chain iteration proceeds.\n   */\n  virtual FilterHeadersStatus decodeHeaders(RequestHeaderMap& headers, bool end_stream) PURE;\n\n  /**\n   * Called with a decoded data frame.\n   * @param data supplies the decoded data.\n   * @param end_stream supplies whether this is the last data frame.\n   * @return FilterDataStatus determines how filter chain iteration proceeds.\n   */\n  virtual FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Called with decoded trailers, implicitly ending the stream.\n   * @param trailers supplies the decoded trailers.\n   */\n  virtual FilterTrailersStatus decodeTrailers(RequestTrailerMap& trailers) PURE;\n\n  /**\n   * Called with decoded metadata. Add new metadata to metadata_map directly. Do not call\n   * StreamDecoderFilterCallbacks::addDecodedMetadata() to add new metadata.\n   *\n   * Note: decodeMetadata() currently cannot stop the filter iteration, and always returns Continue.\n   * That means metadata will go through the complete filter chain at once, even if the other frame\n   * types return StopIteration. If metadata should not pass through all filters at once, users\n   * should consider using StopAllIterationAndBuffer or StopAllIterationAndWatermark in\n   * decodeHeaders() to prevent metadata passing to the following filters.\n   *\n   * @param metadata_map supplies the decoded metadata.\n   */\n  virtual FilterMetadataStatus decodeMetadata(MetadataMap& /* metadata_map */) {\n    return Http::FilterMetadataStatus::Continue;\n  }\n\n  /**\n   * Called by the filter manager once to initialize the filter decoder callbacks that the\n   * filter should use. Callbacks will not be invoked by the filter after onDestroy() is called.\n   */\n  virtual void setDecoderFilterCallbacks(StreamDecoderFilterCallbacks& callbacks) PURE;\n\n  /**\n   * Called at the end of the stream, when all data has been decoded.\n   */\n  virtual void decodeComplete() {}\n};\n\nusing StreamDecoderFilterSharedPtr = std::shared_ptr<StreamDecoderFilter>;\n\n/**\n * Stream encoder filter callbacks add additional callbacks that allow a encoding filter to restart\n * encoding if they decide to hold data (e.g. for buffering or rate limiting).\n */\nclass StreamEncoderFilterCallbacks : public virtual StreamFilterCallbacks {\npublic:\n  /**\n   * Continue iterating through the filter chain with buffered headers and body data. This routine\n   * can only be called if the filter has previously returned StopIteration from encodeHeaders() AND\n   * one of StopIterationAndBuffer, StopIterationAndWatermark, or StopIterationNoBuffer\n   * from each previous call to encodeData().\n   *\n   * The connection manager will dispatch headers and any buffered body data to the next filter in\n   * the chain. Further note that if the response is not complete, this filter will still receive\n   * encodeData() calls and must return an appropriate status code depending on what the filter\n   * needs to do.\n   */\n  virtual void continueEncoding() PURE;\n\n  /**\n   * @return const Buffer::Instance* the currently buffered data as buffered by this filter or\n   *         previous ones in the filter chain. May be nullptr if nothing has been buffered yet.\n   */\n  virtual const Buffer::Instance* encodingBuffer() PURE;\n\n  /**\n   * Allows modifying the encoding buffer. May only be called before any data has been continued\n   * past the calling filter.\n   */\n  virtual void modifyEncodingBuffer(std::function<void(Buffer::Instance&)> callback) PURE;\n\n  /**\n   * Add buffered body data. This method is used in advanced cases where returning\n   * StopIterationAndBuffer from encodeData() is not sufficient.\n   *\n   * 1) If a headers only response needs to be turned into a response with a body, this method can\n   * be called to add body in the encodeHeaders() callback. Subsequent filters will receive\n   * encodeHeaders(..., false) followed by encodeData(..., true). This works both in the direct\n   * iteration as well as the continuation case.\n   *\n   * 2) If a filter is going to look at all buffered data from within a data callback with end\n   * stream set, this method can be called to immediately buffer the data. This avoids having\n   * to deal with the existing buffered data and the data from the current callback.\n   *\n   * 3) If additional buffered body data needs to be added by a filter before continuation of data\n   * to further filters (outside of callback context).\n   *\n   * 4) If additional data needs to be added in the encodeTrailers() callback, this method can be\n   * called in the context of the callback. All further filters will receive encodeData(..., false)\n   * followed by encodeTrailers(). However if the iteration is stopped, the added data will\n   * buffered, so that the further filters will not receive encodeData() before encodeHeaders().\n   *\n   * It is an error to call this method in any other case.\n   *\n   * See also injectEncodedDataToFilterChain() for a different way of passing data to further\n   * filters and also how the two methods are different.\n   *\n   * @param data Buffer::Instance supplies the data to be encoded.\n   * @param streaming_filter boolean supplies if this filter streams data or buffers the full body.\n   */\n  virtual void addEncodedData(Buffer::Instance& data, bool streaming_filter) PURE;\n\n  /**\n   * Encode data directly to subsequent filters in the filter chain. This method is used in\n   * advanced cases in which a filter needs full control over how subsequent filters view data,\n   * and does not want to make use of HTTP connection manager buffering. Using this method allows\n   * a filter to buffer data (or not) and then periodically inject data to subsequent filters,\n   * indicating end_stream at an appropriate time. This can be used to implement rate limiting,\n   * periodic data emission, etc.\n   *\n   * This method should only be called outside of callback context. I.e., do not call this method\n   * from within a filter's encodeData() call.\n   *\n   * When using this callback, filters should generally only return\n   * FilterDataStatus::StopIterationNoBuffer from their encodeData() call, since use of this method\n   * indicates that a filter does not wish to participate in standard HTTP connection manager\n   * buffering and continuation and will perform any necessary buffering and continuation on its\n   * own.\n   *\n   * This callback is different from addEncodedData() in that the specified data and end_stream\n   * status will be propagated directly to further filters in the filter chain. This is different\n   * from addEncodedData() where data is added to the HTTP connection manager's buffered data with\n   * the assumption that standard HTTP connection manager buffering and continuation are being used.\n   */\n  virtual void injectEncodedDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Adds encoded trailers. May only be called in encodeData when end_stream is set to true.\n   * If called in any other context, an assertion will be triggered.\n   *\n   * When called in encodeData, the trailers map will be initialized to an empty map and returned by\n   * reference. Calling this function more than once is invalid.\n   *\n   * @return a reference to the newly created trailers map.\n   */\n  virtual ResponseTrailerMap& addEncodedTrailers() PURE;\n\n  /**\n   * Attempts to create a locally generated response using the provided response_code and body_text\n   * parameters. If the request was a gRPC request the local reply will be encoded as a gRPC\n   * response with a 200 HTTP response code and grpc-status and grpc-message headers mapped from the\n   * provided parameters.\n   *\n   * If a response has already started (e.g. if the router calls sendSendLocalReply after encoding\n   * headers) this will either ship the reply directly to the downstream codec, or reset the stream.\n   *\n   * @param response_code supplies the HTTP response code.\n   * @param body_text supplies the optional body text which is sent using the text/plain content\n   *                  type, or encoded in the grpc-message header.\n   * @param modify_headers supplies an optional callback function that can modify the\n   *                       response headers.\n   * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with.\n   * @param details a string detailing why this local reply was sent.\n   */\n  virtual void sendLocalReply(Code response_code, absl::string_view body_text,\n                              std::function<void(ResponseHeaderMap& headers)> modify_headers,\n                              const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                              absl::string_view details) PURE;\n  /**\n   * Adds new metadata to be encoded.\n   *\n   * @param metadata_map supplies the unique_ptr of the metadata to be encoded.\n   */\n  virtual void addEncodedMetadata(MetadataMapPtr&& metadata_map) PURE;\n\n  /**\n   * Called when an encoder filter goes over its high watermark.\n   */\n  virtual void onEncoderFilterAboveWriteBufferHighWatermark() PURE;\n\n  /**\n   * Called when a encoder filter goes from over its high watermark to under its low watermark.\n   */\n  virtual void onEncoderFilterBelowWriteBufferLowWatermark() PURE;\n\n  /**\n   * This routine may be called to change the buffer limit for encoder filters.\n   *\n   * @param limit supplies the desired buffer limit.\n   */\n  virtual void setEncoderBufferLimit(uint32_t limit) PURE;\n\n  /**\n   * This routine returns the current buffer limit for encoder filters. Filters should abide by\n   * this limit or change it via setEncoderBufferLimit.\n   * A buffer limit of 0 bytes indicates no limits are applied.\n   *\n   * @return the buffer limit the filter should apply.\n   */\n  virtual uint32_t encoderBufferLimit() PURE;\n\n  /**\n   * Return the HTTP/1 stream encoder options if applicable. If the stream is not HTTP/1 returns\n   * absl::nullopt.\n   */\n  virtual Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() PURE;\n};\n\n/**\n * Stream encoder filter interface.\n */\nclass StreamEncoderFilter : public StreamFilterBase {\npublic:\n  /**\n   * Called with 100-continue headers.\n   *\n   * This is not folded into encodeHeaders because most Envoy users and filters\n   * will not be proxying 100-continue and with it split out, can ignore the\n   * complexity of multiple encodeHeaders calls.\n   *\n   * This will only be invoked once per request.\n   *\n   * @param headers supplies the 100-continue response headers to be encoded.\n   * @return FilterHeadersStatus determines how filter chain iteration proceeds.\n   *\n   */\n  virtual FilterHeadersStatus encode100ContinueHeaders(ResponseHeaderMap& headers) PURE;\n\n  /**\n   * Called with headers to be encoded, optionally indicating end of stream.\n   *\n   * The only 1xx that may be provided to encodeHeaders() is a 101 upgrade, which will be the final\n   * encodeHeaders() for a response.\n   *\n   * @param headers supplies the headers to be encoded.\n   * @param end_stream supplies whether this is a header only request/response.\n   * @return FilterHeadersStatus determines how filter chain iteration proceeds.\n   */\n  virtual FilterHeadersStatus encodeHeaders(ResponseHeaderMap& headers, bool end_stream) PURE;\n\n  /**\n   * Called with data to be encoded, optionally indicating end of stream.\n   * @param data supplies the data to be encoded.\n   * @param end_stream supplies whether this is the last data frame.\n   * @return FilterDataStatus determines how filter chain iteration proceeds.\n   */\n  virtual FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Called with trailers to be encoded, implicitly ending the stream.\n   * @param trailers supplies the trailers to be encoded.\n   */\n  virtual FilterTrailersStatus encodeTrailers(ResponseTrailerMap& trailers) PURE;\n\n  /**\n   * Called with metadata to be encoded. New metadata should be added directly to metadata_map. DO\n   * NOT call StreamDecoderFilterCallbacks::encodeMetadata() interface to add new metadata.\n   *\n   * @param metadata_map supplies the metadata to be encoded.\n   * @return FilterMetadataStatus, which currently is always FilterMetadataStatus::Continue;\n   */\n  virtual FilterMetadataStatus encodeMetadata(MetadataMap& metadata_map) PURE;\n\n  /**\n   * Called by the filter manager once to initialize the filter callbacks that the filter should\n   * use. Callbacks will not be invoked by the filter after onDestroy() is called.\n   */\n  virtual void setEncoderFilterCallbacks(StreamEncoderFilterCallbacks& callbacks) PURE;\n\n  /**\n   * Called at the end of the stream, when all data has been encoded.\n   */\n  virtual void encodeComplete() {}\n};\n\nusing StreamEncoderFilterSharedPtr = std::shared_ptr<StreamEncoderFilter>;\n\n/**\n * A filter that handles both encoding and decoding.\n */\nclass StreamFilter : public virtual StreamDecoderFilter, public virtual StreamEncoderFilter {};\n\nusing StreamFilterSharedPtr = std::shared_ptr<StreamFilter>;\n\n/**\n * These callbacks are provided by the connection manager to the factory so that the factory can\n * build the filter chain in an application specific way.\n */\nclass FilterChainFactoryCallbacks {\npublic:\n  virtual ~FilterChainFactoryCallbacks() = default;\n\n  /**\n   * Add a decoder filter that is used when reading stream data.\n   * @param filter supplies the filter to add.\n   */\n  virtual void addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr filter) PURE;\n\n  /**\n   * Add an encoder filter that is used when writing stream data.\n   * @param filter supplies the filter to add.\n   */\n  virtual void addStreamEncoderFilter(Http::StreamEncoderFilterSharedPtr filter) PURE;\n\n  /**\n   * Add a decoder/encoder filter that is used both when reading and writing stream data.\n   * @param filter supplies the filter to add.\n   */\n  virtual void addStreamFilter(Http::StreamFilterSharedPtr filter) PURE;\n\n  /**\n   * Add an access log handler that is called when the stream is destroyed.\n   * @param handler supplies the handler to add.\n   */\n  virtual void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) PURE;\n};\n\n/**\n * This function is used to wrap the creation of an HTTP filter chain for new streams as they\n * come in. Filter factories create the function at configuration initialization time, and then\n * they are used at runtime.\n * @param callbacks supplies the callbacks for the stream to install filters to. Typically the\n * function will install a single filter, but it's technically possibly to install more than one\n * if desired.\n */\nusing FilterFactoryCb = std::function<void(FilterChainFactoryCallbacks& callbacks)>;\n\n/**\n * A FilterChainFactory is used by a connection manager to create an HTTP level filter chain when a\n * new stream is created on the connection (either locally or remotely). Typically it would be\n * implemented by a configuration engine that would install a set of filters that are able to\n * process an application scenario on top of a stream.\n */\nclass FilterChainFactory {\npublic:\n  virtual ~FilterChainFactory() = default;\n\n  /**\n   * Called when a new HTTP stream is created on the connection.\n   * @param callbacks supplies the \"sink\" that is used for actually creating the filter chain. @see\n   *                  FilterChainFactoryCallbacks.\n   */\n  virtual void createFilterChain(FilterChainFactoryCallbacks& callbacks) PURE;\n\n  /**\n   * Called when a new upgrade stream is created on the connection.\n   * @param upgrade supplies the upgrade header from downstream\n   * @param per_route_upgrade_map supplies the upgrade map, if any, for this route.\n   * @param callbacks supplies the \"sink\" that is used for actually creating the filter chain. @see\n   *                  FilterChainFactoryCallbacks.\n   * @return true if upgrades of this type are allowed and the filter chain has been created.\n   *    returns false if this upgrade type is not configured, and no filter chain is created.\n   */\n  using UpgradeMap = std::map<std::string, bool>;\n  virtual bool createUpgradeFilterChain(absl::string_view upgrade,\n                                        const UpgradeMap* per_route_upgrade_map,\n                                        FilterChainFactoryCallbacks& callbacks) PURE;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/hash_policy.h",
    "content": "#pragma once\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass Hashable {\npublic:\n  virtual absl::optional<uint64_t> hash() const PURE;\n  virtual ~Hashable() = default;\n};\n\n/**\n * Request hash policy. I.e., if using a hashing load balancer, how a request should be hashed onto\n * an upstream host.\n */\nclass HashPolicy {\npublic:\n  virtual ~HashPolicy() = default;\n\n  /**\n   * A callback used for requesting that a cookie be set with the given lifetime.\n   * @param key the name of the cookie to be set\n   * @param path the path of the cookie, or the empty string if no path should be set.\n   * @param ttl the lifetime of the cookie\n   * @return std::string the opaque value of the cookie that will be set\n   */\n  using AddCookieCallback = std::function<std::string(\n      const std::string& key, const std::string& path, std::chrono::seconds ttl)>;\n\n  /**\n   * @param downstream_address is the address of the connected client host, or nullptr if the\n   * request is initiated from within this host\n   * @param headers stores the HTTP headers for the stream\n   * @param add_cookie is called to add a set-cookie header on the reply sent to the downstream\n   * host\n   * @return absl::optional<uint64_t> an optional hash value to route on. A hash value might not be\n   * returned if for example the specified HTTP header does not exist.\n   */\n  virtual absl::optional<uint64_t>\n  generateHash(const Network::Address::Instance* downstream_address,\n               const RequestHeaderMap& headers, AddCookieCallback add_cookie,\n               const StreamInfo::FilterStateSharedPtr filter_state) const PURE;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/header_map.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <cstdint>\n#include <cstring>\n#include <iostream>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/macros.h\"\n\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n// Used by ASSERTs to validate internal consistency. E.g. valid HTTP header keys/values should\n// never contain embedded NULLs.\nstatic inline bool validHeaderString(absl::string_view s) {\n  // If you modify this list of illegal embedded characters you will probably\n  // want to change header_map_fuzz_impl_test at the same time.\n  for (const char c : s) {\n    switch (c) {\n    case '\\0':\n      FALLTHRU;\n    case '\\r':\n      FALLTHRU;\n    case '\\n':\n      return false;\n    default:\n      continue;\n    }\n  }\n  return true;\n}\n\n/**\n * Wrapper for a lower case string used in header operations to generally avoid needless case\n * insensitive compares.\n */\nclass LowerCaseString {\npublic:\n  LowerCaseString(LowerCaseString&& rhs) noexcept : string_(std::move(rhs.string_)) {\n    ASSERT(valid());\n  }\n  LowerCaseString(const LowerCaseString& rhs) : string_(rhs.string_) { ASSERT(valid()); }\n  explicit LowerCaseString(const std::string& new_string) : string_(new_string) {\n    ASSERT(valid());\n    lower();\n  }\n\n  const std::string& get() const { return string_; }\n  bool operator==(const LowerCaseString& rhs) const { return string_ == rhs.string_; }\n  bool operator!=(const LowerCaseString& rhs) const { return string_ != rhs.string_; }\n  bool operator<(const LowerCaseString& rhs) const { return string_.compare(rhs.string_) < 0; }\n\nprivate:\n  void lower() {\n    std::transform(string_.begin(), string_.end(), string_.begin(), absl::ascii_tolower);\n  }\n  bool valid() const { return validHeaderString(string_); }\n\n  std::string string_;\n};\n\n/**\n * Convenient type for a vector of lower case string and string pair.\n */\nusing LowerCaseStrPairVector =\n    std::vector<std::pair<const Http::LowerCaseString, const std::string>>;\n\n/**\n * Convenient type for an inline vector that will be used by HeaderString.\n */\nusing InlineHeaderVector = absl::InlinedVector<char, 128>;\n\n/**\n * Convenient type for the underlying type of HeaderString that allows a variant\n * between string_view and the InlinedVector.\n */\nusing VariantHeader = absl::variant<absl::string_view, InlineHeaderVector>;\n\n/**\n * This is a string implementation for use in header processing. It is heavily optimized for\n * performance. It supports 2 different types of storage and can switch between them:\n * 1) A reference.\n * 2) An InlinedVector (an optimized interned string for small strings, but allows heap\n * allocation if needed).\n */\nclass HeaderString {\npublic:\n  /**\n   * Default constructor. Sets up for inline storage.\n   */\n  HeaderString();\n\n  /**\n   * Constructor for a string reference.\n   * @param ref_value MUST point to data that will live beyond the lifetime of any request/response\n   *        using the string (since a codec may optimize for zero copy).\n   */\n  explicit HeaderString(const LowerCaseString& ref_value);\n\n  /**\n   * Constructor for a string reference.\n   * @param ref_value MUST point to data that will live beyond the lifetime of any request/response\n   *        using the string (since a codec may optimize for zero copy).\n   */\n  explicit HeaderString(absl::string_view ref_value);\n\n  HeaderString(HeaderString&& move_value) noexcept;\n  ~HeaderString() = default;\n\n  /**\n   * Append data to an existing string. If the string is a reference string the reference data is\n   * not copied.\n   */\n  void append(const char* data, uint32_t size);\n\n  /**\n   * Transforms the inlined vector data using the given UnaryOperation (conforms\n   * to std::transform).\n   * @param unary_op the operations to be performed on each of the elements.\n   */\n  template <typename UnaryOperation> void inlineTransform(UnaryOperation&& unary_op) {\n    ASSERT(type() == Type::Inline);\n    std::transform(absl::get<InlineHeaderVector>(buffer_).begin(),\n                   absl::get<InlineHeaderVector>(buffer_).end(),\n                   absl::get<InlineHeaderVector>(buffer_).begin(), unary_op);\n  }\n\n  /**\n   * Trim trailing whitespaces from the HeaderString. Only supported by the \"Inline\" HeaderString\n   * representation.\n   */\n  void rtrim();\n\n  /**\n   * Get an absl::string_view. It will NOT be NUL terminated!\n   *\n   * @return an absl::string_view.\n   */\n  absl::string_view getStringView() const;\n\n  /**\n   * Return the string to a default state. Reference strings are not touched. Both inline/dynamic\n   * strings are reset to zero size.\n   */\n  void clear();\n\n  /**\n   * @return whether the string is empty or not.\n   */\n  bool empty() const { return size() == 0; }\n\n  // Looking for find? Use getStringView().find()\n\n  /**\n   * Set the value of the string by copying data into it. This overwrites any existing string.\n   */\n  void setCopy(const char* data, uint32_t size);\n\n  /**\n   * Set the value of the string by copying data into it. This overwrites any existing string.\n   */\n  void setCopy(absl::string_view view);\n\n  /**\n   * Set the value of the string to an integer. This overwrites any existing string.\n   */\n  void setInteger(uint64_t value);\n\n  /**\n   * Set the value of the string to a string reference.\n   * @param ref_value MUST point to data that will live beyond the lifetime of any request/response\n   *        using the string (since a codec may optimize for zero copy).\n   */\n  void setReference(absl::string_view ref_value);\n\n  /**\n   * @return whether the string is a reference or an InlinedVector.\n   */\n  bool isReference() const { return type() == Type::Reference; }\n\n  /**\n   * @return the size of the string, not including the null terminator.\n   */\n  uint32_t size() const;\n\n  bool operator==(const char* rhs) const {\n    return getStringView() == absl::NullSafeStringView(rhs);\n  }\n  bool operator==(absl::string_view rhs) const { return getStringView() == rhs; }\n  bool operator!=(const char* rhs) const {\n    return getStringView() != absl::NullSafeStringView(rhs);\n  }\n  bool operator!=(absl::string_view rhs) const { return getStringView() != rhs; }\n\nprivate:\n  enum class Type { Reference, Inline };\n\n  VariantHeader buffer_;\n\n  bool valid() const;\n\n  /**\n   * @return the type of backing storage for the string.\n   */\n  Type type() const;\n};\n\n/**\n * Encapsulates an individual header entry (including both key and value).\n */\nclass HeaderEntry {\npublic:\n  virtual ~HeaderEntry() = default;\n\n  /**\n   * @return the header key.\n   */\n  virtual const HeaderString& key() const PURE;\n\n  /**\n   * Set the header value by copying data into it.\n   */\n  virtual void value(absl::string_view value) PURE;\n\n  /**\n   * Set the header value by copying an integer into it.\n   */\n  virtual void value(uint64_t value) PURE;\n\n  /**\n   * Set the header value by copying the value in another header entry.\n   */\n  virtual void value(const HeaderEntry& header) PURE;\n\n  /**\n   * @return the header value.\n   */\n  virtual const HeaderString& value() const PURE;\n\n  /**\n   * @return the header value.\n   */\n  virtual HeaderString& value() PURE;\n\nprivate:\n  void value(const char*); // Do not allow auto conversion to std::string\n};\n\n/**\n * The following defines all default request headers that Envoy allows direct access to inside of\n * the header map. In practice, these are all headers used during normal Envoy request flow\n * processing. This allows O(1) access to these headers without even a hash lookup.\n */\n#define INLINE_REQ_HEADERS(HEADER_FUNC)                                                            \\\n  HEADER_FUNC(ClientTraceId)                                                                       \\\n  HEADER_FUNC(EnvoyDownstreamServiceCluster)                                                       \\\n  HEADER_FUNC(EnvoyDownstreamServiceNode)                                                          \\\n  HEADER_FUNC(EnvoyExpectedRequestTimeoutMs)                                                       \\\n  HEADER_FUNC(EnvoyExternalAddress)                                                                \\\n  HEADER_FUNC(EnvoyForceTrace)                                                                     \\\n  HEADER_FUNC(EnvoyHedgeOnPerTryTimeout)                                                           \\\n  HEADER_FUNC(EnvoyInternalRequest)                                                                \\\n  HEADER_FUNC(EnvoyIpTags)                                                                         \\\n  HEADER_FUNC(EnvoyMaxRetries)                                                                     \\\n  HEADER_FUNC(EnvoyRetryOn)                                                                        \\\n  HEADER_FUNC(EnvoyRetryGrpcOn)                                                                    \\\n  HEADER_FUNC(EnvoyRetriableStatusCodes)                                                           \\\n  HEADER_FUNC(EnvoyRetriableHeaderNames)                                                           \\\n  HEADER_FUNC(EnvoyOriginalPath)                                                                   \\\n  HEADER_FUNC(EnvoyOriginalUrl)                                                                    \\\n  HEADER_FUNC(EnvoyUpstreamAltStatName)                                                            \\\n  HEADER_FUNC(EnvoyUpstreamRequestPerTryTimeoutMs)                                                 \\\n  HEADER_FUNC(EnvoyUpstreamRequestTimeoutAltResponse)                                              \\\n  HEADER_FUNC(EnvoyUpstreamRequestTimeoutMs)                                                       \\\n  HEADER_FUNC(Expect)                                                                              \\\n  HEADER_FUNC(ForwardedClientCert)                                                                 \\\n  HEADER_FUNC(ForwardedFor)                                                                        \\\n  HEADER_FUNC(ForwardedProto)                                                                      \\\n  HEADER_FUNC(GrpcTimeout)                                                                         \\\n  HEADER_FUNC(Host)                                                                                \\\n  HEADER_FUNC(Method)                                                                              \\\n  HEADER_FUNC(Path)                                                                                \\\n  HEADER_FUNC(Protocol)                                                                            \\\n  HEADER_FUNC(Scheme)                                                                              \\\n  HEADER_FUNC(TE)                                                                                  \\\n  HEADER_FUNC(UserAgent)\n\n/**\n * Default O(1) response headers.\n */\n#define INLINE_RESP_HEADERS(HEADER_FUNC)                                                           \\\n  HEADER_FUNC(Date)                                                                                \\\n  HEADER_FUNC(EnvoyDegraded)                                                                       \\\n  HEADER_FUNC(EnvoyImmediateHealthCheckFail)                                                       \\\n  HEADER_FUNC(EnvoyRateLimited)                                                                    \\\n  HEADER_FUNC(EnvoyUpstreamCanary)                                                                 \\\n  HEADER_FUNC(EnvoyUpstreamHealthCheckedCluster)                                                   \\\n  HEADER_FUNC(EnvoyUpstreamServiceTime)                                                            \\\n  HEADER_FUNC(Location)                                                                            \\\n  HEADER_FUNC(Server)                                                                              \\\n  HEADER_FUNC(Status)\n\n/**\n * Default O(1) request and response headers.\n */\n#define INLINE_REQ_RESP_HEADERS(HEADER_FUNC)                                                       \\\n  HEADER_FUNC(Connection)                                                                          \\\n  HEADER_FUNC(ContentLength)                                                                       \\\n  HEADER_FUNC(ContentType)                                                                         \\\n  HEADER_FUNC(EnvoyAttemptCount)                                                                   \\\n  HEADER_FUNC(EnvoyDecoratorOperation)                                                             \\\n  HEADER_FUNC(KeepAlive)                                                                           \\\n  HEADER_FUNC(ProxyConnection)                                                                     \\\n  HEADER_FUNC(RequestId)                                                                           \\\n  HEADER_FUNC(TransferEncoding)                                                                    \\\n  HEADER_FUNC(Upgrade)                                                                             \\\n  HEADER_FUNC(Via)\n\n/**\n * Default O(1) response headers and trailers.\n */\n#define INLINE_RESP_HEADERS_TRAILERS(HEADER_FUNC)                                                  \\\n  HEADER_FUNC(GrpcMessage)                                                                         \\\n  HEADER_FUNC(GrpcStatus)\n\n/**\n * The following functions are defined for each inline header above.\n\n * E.g., for path we have:\n * Path() -> returns the header entry if it exists or nullptr.\n * appendPath(path, \"/\") -> appends the string path with delimiter \"/\" to the header value.\n * setReferencePath(PATH) -> sets header value to reference string PATH.\n * setPath(path_string) -> sets the header value to the string path_string by copying the data.\n * removePath() -> removes the header if it exists.\n *\n * For inline headers that use integers, we have:\n * setContentLength(5) -> sets the header value to the integer 5.\n *\n * TODO(asraa): Remove the integer set for inline headers that do not take integer values.\n */\n#define DEFINE_INLINE_HEADER(name)                                                                 \\\n  virtual const HeaderEntry* name() const PURE;                                                    \\\n  virtual void append##name(absl::string_view data, absl::string_view delimiter) PURE;             \\\n  virtual void setReference##name(absl::string_view value) PURE;                                   \\\n  virtual void set##name(absl::string_view value) PURE;                                            \\\n  virtual void set##name(uint64_t value) PURE;                                                     \\\n  virtual size_t remove##name() PURE;                                                              \\\n  virtual absl::string_view get##name##Value() const PURE;\n\n/**\n * Wraps a set of HTTP headers.\n */\nclass HeaderMap {\npublic:\n  virtual ~HeaderMap() = default;\n\n  /**\n   * For testing. This is an exact match comparison (order matters).\n   */\n  virtual bool operator==(const HeaderMap& rhs) const PURE;\n  virtual bool operator!=(const HeaderMap& rhs) const PURE;\n\n  /**\n   * Add a header via full move. This is the expected high performance paths for codecs populating\n   * a map when receiving.\n   * @param key supplies the header key.\n   * @param value supplies the header value.\n   */\n  virtual void addViaMove(HeaderString&& key, HeaderString&& value) PURE;\n\n  /**\n   * Add a reference header to the map. Both key and value MUST point to data that will live beyond\n   * the lifetime of any request/response using the string (since a codec may optimize for zero\n   * copy). The key will not be copied and a best effort will be made not to\n   * copy the value (but this may happen when comma concatenating, see below).\n   *\n   * Calling addReference multiple times for the same header will result in:\n   * - Comma concatenation for predefined inline headers.\n   * - Multiple headers being present in the HeaderMap for other headers.\n   *\n   * @param key specifies the name of the header to add; it WILL NOT be copied.\n   * @param value specifies the value of the header to add; it WILL NOT be copied.\n   */\n  virtual void addReference(const LowerCaseString& key, absl::string_view value) PURE;\n\n  /**\n   * Add a header with a reference key to the map. The key MUST point to data that will live beyond\n   * the lifetime of any request/response using the string (since a codec may optimize for zero\n   * copy). The value will be copied.\n   *\n   * Calling addReference multiple times for the same header will result in:\n   * - Comma concatenation for predefined inline headers.\n   * - Multiple headers being present in the HeaderMap for other headers.\n   *\n   * @param key specifies the name of the header to add; it WILL NOT be copied.\n   * @param value specifies the value of the header to add; it WILL be copied.\n   */\n  virtual void addReferenceKey(const LowerCaseString& key, uint64_t value) PURE;\n\n  /**\n   * Add a header with a reference key to the map. The key MUST point to point to data that will\n   * live beyond the lifetime of any request/response using the string (since a codec may optimize\n   * for zero copy). The value will be copied.\n   *\n   * Calling addReference multiple times for the same header will result in:\n   * - Comma concatenation for predefined inline headers.\n   * - Multiple headers being present in the HeaderMap for other headers.\n   *\n   * @param key specifies the name of the header to add; it WILL NOT be copied.\n   * @param value specifies the value of the header to add; it WILL be copied.\n   */\n  virtual void addReferenceKey(const LowerCaseString& key, absl::string_view value) PURE;\n\n  /**\n   * Add a header by copying both the header key and the value.\n   *\n   * Calling addCopy multiple times for the same header will result in:\n   * - Comma concatenation for predefined inline headers.\n   * - Multiple headers being present in the HeaderMap for other headers.\n   *\n   * @param key specifies the name of the header to add; it WILL be copied.\n   * @param value specifies the value of the header to add; it WILL be copied.\n   */\n  virtual void addCopy(const LowerCaseString& key, uint64_t value) PURE;\n\n  /**\n   * Add a header by copying both the header key and the value.\n   *\n   * Calling addCopy multiple times for the same header will result in:\n   * - Comma concatenation for predefined inline headers.\n   * - Multiple headers being present in the HeaderMap for other headers.\n   *\n   * @param key specifies the name of the header to add; it WILL be copied.\n   * @param value specifies the value of the header to add; it WILL be copied.\n   */\n  virtual void addCopy(const LowerCaseString& key, absl::string_view value) PURE;\n\n  /**\n   * Appends data to header. If header already has a value, the string \",\" is added between the\n   * existing value and data.\n   *\n   * @param key specifies the name of the header to append; it WILL be copied.\n   * @param value specifies the value of the header to add; it WILL be copied.\n   *\n   * Caution: This iterates over the HeaderMap to find the header to append. This will modify only\n   * the first occurrence of the header.\n   * TODO(asraa): Investigate whether necessary to append to all headers with the key.\n   */\n  virtual void appendCopy(const LowerCaseString& key, absl::string_view value) PURE;\n\n  /**\n   * Set a reference header in the map. Both key and value MUST point to data that will live beyond\n   * the lifetime of any request/response using the string (since a codec may optimize for zero\n   * copy). Nothing will be copied.\n   *\n   * Calling setReference multiple times for the same header will result in only the last header\n   * being present in the HeaderMap.\n   *\n   * @param key specifies the name of the header to set; it WILL NOT be copied.\n   * @param value specifies the value of the header to set; it WILL NOT be copied.\n   */\n  virtual void setReference(const LowerCaseString& key, absl::string_view value) PURE;\n\n  /**\n   * Set a header with a reference key in the map. The key MUST point to point to data that will\n   * live beyond the lifetime of any request/response using the string (since a codec may optimize\n   * for zero copy). The value will be copied.\n   *\n   * Calling setReferenceKey multiple times for the same header will result in only the last header\n   * being present in the HeaderMap.\n   *\n   * @param key specifies the name of the header to set; it WILL NOT be copied.\n   * @param value specifies the value of the header to set; it WILL be copied.\n   */\n  virtual void setReferenceKey(const LowerCaseString& key, absl::string_view value) PURE;\n\n  /**\n   * Replaces a header value by copying the value. Copies the key if the key does not exist.\n   *\n   * Calling setCopy multiple times for the same header will result in only the last header\n   * being present in the HeaderMap.\n   *\n   * @param key specifies the name of the header to set; it WILL be copied.\n   * @param value specifies the value of the header to set; it WILL be copied.\n   *\n   * Caution: This iterates over the HeaderMap to find the header to set. This will modify only the\n   * first occurrence of the header.\n   * TODO(asraa): Investigate whether necessary to set all headers with the key.\n   */\n  virtual void setCopy(const LowerCaseString& key, absl::string_view value) PURE;\n\n  /**\n   * @return uint64_t the size of the header map in bytes. This is the sum of the header keys and\n   * values and does not account for data structure overhead.\n   */\n  virtual uint64_t byteSize() const PURE;\n\n  /**\n   * Get a header by key.\n   * @param key supplies the header key.\n   * @return the header entry if it exists otherwise nullptr.\n   */\n  virtual const HeaderEntry* get(const LowerCaseString& key) const PURE;\n\n  /**\n   * This is a wrapper for the return result from getAll(). It avoids a copy when translating from\n   * non-const HeaderEntry to const HeaderEntry and only provides const access to the result.\n   */\n  using NonConstGetResult = absl::InlinedVector<HeaderEntry*, 1>;\n  class GetResult {\n  public:\n    GetResult() = default;\n    explicit GetResult(NonConstGetResult&& result) : result_(std::move(result)) {}\n    void operator=(GetResult&& rhs) noexcept { result_ = std::move(rhs.result_); }\n\n    bool empty() const { return result_.empty(); }\n    size_t size() const { return result_.size(); }\n    const HeaderEntry* operator[](size_t i) const { return result_[i]; }\n\n  private:\n    NonConstGetResult result_;\n  };\n\n  /**\n   * Get a header by key.\n   * @param key supplies the header key.\n   * @return all header entries matching the key.\n   */\n  virtual GetResult getAll(const LowerCaseString& key) const PURE;\n\n  // aliases to make iterate() and iterateReverse() callbacks easier to read\n  enum class Iterate { Continue, Break };\n\n  /**\n   * Callback when calling iterate() over a const header map.\n   * @param header supplies the header entry.\n   * @return Iterate::Continue to continue iteration, or Iterate::Break to stop;\n   */\n  using ConstIterateCb = std::function<Iterate(const HeaderEntry&)>;\n\n  /**\n   * Iterate over a constant header map.\n   * @param cb supplies the iteration callback.\n   */\n  virtual void iterate(ConstIterateCb cb) const PURE;\n\n  /**\n   * Iterate over a constant header map in reverse order.\n   * @param cb supplies the iteration callback.\n   */\n  virtual void iterateReverse(ConstIterateCb cb) const PURE;\n\n  /**\n   * Clears the headers in the map.\n   */\n  virtual void clear() PURE;\n\n  /**\n   * Remove all instances of a header by key.\n   * @param key supplies the header key to remove.\n   * @return the number of headers removed.\n   */\n  virtual size_t remove(const LowerCaseString& key) PURE;\n\n  /**\n   * Remove all instances of headers where the header matches the predicate.\n   * @param predicate supplies the predicate to match headers against.\n   * @return the number of headers removed.\n   */\n  using HeaderMatchPredicate = std::function<bool(const HeaderEntry&)>;\n  virtual size_t removeIf(const HeaderMatchPredicate& predicate) PURE;\n\n  /**\n   * Remove all instances of headers where the key begins with the supplied prefix.\n   * @param prefix supplies the prefix to match header keys against.\n   * @return the number of headers removed.\n   */\n  virtual size_t removePrefix(const LowerCaseString& prefix) PURE;\n\n  /**\n   * @return the number of headers in the map.\n   */\n  virtual size_t size() const PURE;\n\n  /**\n   * @return true if the map is empty, false otherwise.\n   */\n  virtual bool empty() const PURE;\n\n  /**\n   * Dump the header map to the ostream specified\n   *\n   * @param os the stream to dump state to\n   * @param indent_level the depth, for pretty-printing.\n   *\n   * This function is called on Envoy fatal errors so should avoid memory allocation where possible.\n   */\n  virtual void dumpState(std::ostream& os, int indent_level = 0) const PURE;\n\n  /**\n   * Allow easy pretty-printing of the key/value pairs in HeaderMap\n   * @param os supplies the ostream to print to.\n   * @param headers the headers to print.\n   */\n  friend std::ostream& operator<<(std::ostream& os, const HeaderMap& headers) {\n    headers.dumpState(os);\n    return os;\n  }\n};\n\nusing HeaderMapPtr = std::unique_ptr<HeaderMap>;\n\n/**\n * Registry for custom headers. Headers can be registered multiple times in independent\n * compilation units and will still point to the same slot. Headers are registered independently\n * for each concrete header map type and do not overlap. Handles are strongly typed and do not\n * allow mixing.\n */\nclass CustomInlineHeaderRegistry {\npublic:\n  enum class Type { RequestHeaders, RequestTrailers, ResponseHeaders, ResponseTrailers };\n  using RegistrationMap = std::map<LowerCaseString, size_t>;\n\n  // A \"phantom\" type is used here to force the compiler to verify that handles are not mixed\n  // between concrete header map types.\n  template <Type type> struct Handle {\n    Handle(RegistrationMap::const_iterator it) : it_(it) {}\n    bool operator==(const Handle& rhs) const { return it_ == rhs.it_; }\n\n    RegistrationMap::const_iterator it_;\n  };\n\n  /**\n   * Register an inline header and return a handle for use in inline header calls. Must be called\n   * prior to finalize().\n   */\n  template <Type type>\n  static Handle<type> registerInlineHeader(const LowerCaseString& header_name) {\n    static size_t inline_header_index = 0;\n\n    ASSERT(!mutableFinalized<type>());\n    auto& map = mutableRegistrationMap<type>();\n    auto entry = map.find(header_name);\n    if (entry == map.end()) {\n      map[header_name] = inline_header_index++;\n    }\n    return Handle<type>(map.find(header_name));\n  }\n\n  /**\n   * Fetch the handle for a registered inline header. May only be called after finalized().\n   */\n  template <Type type>\n  static absl::optional<Handle<type>> getInlineHeader(const LowerCaseString& header_name) {\n    ASSERT(mutableFinalized<type>());\n    auto& map = mutableRegistrationMap<type>();\n    auto entry = map.find(header_name);\n    if (entry != map.end()) {\n      return Handle<type>(entry);\n    }\n    return absl::nullopt;\n  }\n\n  /**\n   * Fetch all registered headers. May only be called after finalized().\n   */\n  template <Type type> static const RegistrationMap& headers() {\n    ASSERT(mutableFinalized<type>());\n    return mutableRegistrationMap<type>();\n  }\n\n  /**\n   * Finalize the custom header registrations. No further changes are allowed after this point.\n   * This guaranteed that all header maps created by the process have the same variable size and\n   * custom registrations.\n   */\n  template <Type type> static void finalize() {\n    ASSERT(!mutableFinalized<type>());\n    mutableFinalized<type>() = true;\n  }\n\nprivate:\n  template <Type type> static RegistrationMap& mutableRegistrationMap() {\n    MUTABLE_CONSTRUCT_ON_FIRST_USE(RegistrationMap);\n  }\n  template <Type type> static bool& mutableFinalized() { MUTABLE_CONSTRUCT_ON_FIRST_USE(bool); }\n};\n\n/**\n * Static initializer to register a custom header in a compilation unit. This can be used by\n * extensions to register custom headers.\n */\ntemplate <CustomInlineHeaderRegistry::Type type> class RegisterCustomInlineHeader {\npublic:\n  RegisterCustomInlineHeader(const LowerCaseString& header)\n      : handle_(CustomInlineHeaderRegistry::registerInlineHeader<type>(header)) {}\n\n  typename CustomInlineHeaderRegistry::Handle<type> handle() { return handle_; }\n\nprivate:\n  const typename CustomInlineHeaderRegistry::Handle<type> handle_;\n};\n\n/**\n * The following functions allow O(1) access for custom inline headers.\n */\ntemplate <CustomInlineHeaderRegistry::Type type> class CustomInlineHeaderBase {\npublic:\n  virtual ~CustomInlineHeaderBase() = default;\n\n  static constexpr CustomInlineHeaderRegistry::Type header_map_type = type;\n  using Handle = CustomInlineHeaderRegistry::Handle<header_map_type>;\n\n  virtual const HeaderEntry* getInline(Handle handle) const PURE;\n  virtual void appendInline(Handle handle, absl::string_view data,\n                            absl::string_view delimiter) PURE;\n  virtual void setReferenceInline(Handle, absl::string_view value) PURE;\n  virtual void setInline(Handle, absl::string_view value) PURE;\n  virtual void setInline(Handle, uint64_t value) PURE;\n  virtual size_t removeInline(Handle handle) PURE;\n  absl::string_view getInlineValue(Handle handle) const {\n    const auto header = getInline(handle);\n    if (header != nullptr) {\n      return header->value().getStringView();\n    }\n    return {};\n  }\n};\n\n/**\n * Typed derived classes for all header map types.\n */\n\n// Base class for both request and response headers.\nclass RequestOrResponseHeaderMap : public HeaderMap {\npublic:\n  INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER)\n};\n\n// Request headers.\nclass RequestHeaderMap\n    : public RequestOrResponseHeaderMap,\n      public CustomInlineHeaderBase<CustomInlineHeaderRegistry::Type::RequestHeaders> {\npublic:\n  INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER)\n};\nusing RequestHeaderMapPtr = std::unique_ptr<RequestHeaderMap>;\nusing RequestHeaderMapOptRef = absl::optional<std::reference_wrapper<RequestHeaderMap>>;\n\n// Request trailers.\nclass RequestTrailerMap\n    : public HeaderMap,\n      public CustomInlineHeaderBase<CustomInlineHeaderRegistry::Type::RequestTrailers> {};\nusing RequestTrailerMapPtr = std::unique_ptr<RequestTrailerMap>;\nusing RequestTrailerMapOptRef = absl::optional<std::reference_wrapper<RequestTrailerMap>>;\n\n// Base class for both response headers and trailers.\nclass ResponseHeaderOrTrailerMap {\npublic:\n  virtual ~ResponseHeaderOrTrailerMap() = default;\n\n  INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER)\n};\n\n// Response headers.\nclass ResponseHeaderMap\n    : public RequestOrResponseHeaderMap,\n      public ResponseHeaderOrTrailerMap,\n      public CustomInlineHeaderBase<CustomInlineHeaderRegistry::Type::ResponseHeaders> {\npublic:\n  INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER)\n};\nusing ResponseHeaderMapPtr = std::unique_ptr<ResponseHeaderMap>;\nusing ResponseHeaderMapOptRef = absl::optional<std::reference_wrapper<ResponseHeaderMap>>;\n\n// Response trailers.\nclass ResponseTrailerMap\n    : public ResponseHeaderOrTrailerMap,\n      public HeaderMap,\n      public CustomInlineHeaderBase<CustomInlineHeaderRegistry::Type::ResponseTrailers> {};\nusing ResponseTrailerMapPtr = std::unique_ptr<ResponseTrailerMap>;\nusing ResponseTrailerMapOptRef = absl::optional<std::reference_wrapper<ResponseTrailerMap>>;\n\n/**\n * Convenient container type for storing Http::LowerCaseString and std::string key/value pairs.\n */\nusing HeaderVector = std::vector<std::pair<LowerCaseString, std::string>>;\n\n/**\n * An interface to be implemented by header matchers.\n */\nclass HeaderMatcher {\npublic:\n  virtual ~HeaderMatcher() = default;\n\n  /**\n   * Check whether header matcher matches any headers in a given HeaderMap.\n   */\n  virtual bool matchesHeaders(const HeaderMap& headers) const PURE;\n};\n\nusing HeaderMatcherSharedPtr = std::shared_ptr<HeaderMatcher>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/message.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Wraps an HTTP message including its headers, body, and any trailers.\n */\ntemplate <class HeaderType, class TrailerType> class Message {\npublic:\n  virtual ~Message() = default;\n\n  /**\n   * @return HeaderType& the message headers.\n   */\n  virtual HeaderType& headers() PURE;\n\n  /**\n   * @return Buffer::Instance the message body, if any. Callers are free to modify the body.\n   */\n  virtual Buffer::Instance& body() PURE;\n\n  /**\n   * @return TrailerType* the message trailers, if any.\n   */\n  virtual TrailerType* trailers() PURE;\n\n  /**\n   * Set the trailers.\n   * @param trailers supplies the new trailers.\n   */\n  virtual void trailers(std::unique_ptr<TrailerType>&& trailers) PURE;\n\n  /**\n   * @return std::string the message body as a std::string.\n   */\n  virtual std::string bodyAsString() const PURE;\n};\n\nusing RequestMessage = Message<RequestHeaderMap, RequestTrailerMap>;\nusing RequestMessagePtr = std::unique_ptr<RequestMessage>;\nusing ResponseMessage = Message<ResponseHeaderMap, ResponseTrailerMap>;\nusing ResponseMessagePtr = std::unique_ptr<ResponseMessage>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/metadata_interface.h",
    "content": "#pragma once\n\n#include <functional>\n#include <iostream>\n#include <memory>\n#include <vector>\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Please refer to #2394 for more info about Envoy METADATA.\n * Envoy metadata docs can be found at source/docs/h2_metadata.md.\n */\nconstexpr uint8_t METADATA_FRAME_TYPE = 0x4d;\nconstexpr uint8_t END_METADATA_FLAG = 0x4;\n\n// NGHTTP2_MAX_PAYLOADLEN in nghttp2.\n// TODO(soya3129): Respect max_frame_size after nghttp2 #1250 is resolved.\nconstexpr uint64_t METADATA_MAX_PAYLOAD_SIZE = 16384;\n\nusing UnorderedStringMap = absl::node_hash_map<std::string, std::string>;\n\nclass MetadataMap : public UnorderedStringMap {\npublic:\n  using UnorderedStringMap::UnorderedStringMap;\n\n  friend std::ostream& operator<<(std::ostream& out, const MetadataMap& metadata_map) {\n    out << \"metadata map:\";\n    for (const auto& metadata : metadata_map) {\n      out << \"\\nkey: \" << metadata.first << \", value: \" << metadata.second << std::endl;\n    }\n    return out;\n  }\n};\n\nusing MetadataMapPtr = std::unique_ptr<MetadataMap>;\n\nusing VectorMetadataMapPtr = std::vector<MetadataMapPtr>;\n\nclass MetadataMapVector : public VectorMetadataMapPtr {\npublic:\n  using VectorMetadataMapPtr::VectorMetadataMapPtr;\n\n  friend std::ostream& operator<<(std::ostream& out, const MetadataMapVector& metadata_map_vector) {\n    out << \"metadata_map_vector:\\n\";\n    for (const auto& metadata_map : metadata_map_vector) {\n      out << *metadata_map;\n    }\n    return out;\n  }\n};\n\nusing MetadataCallback = std::function<void(MetadataMapPtr&&)>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/protocol.h",
    "content": "#pragma once\n\n#include <cstddef>\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Possible HTTP connection/request protocols. The parallel NumProtocols constant allows defining\n * fixed arrays for each protocol, but does not pollute the enum.\n */\nenum class Protocol { Http10, Http11, Http2, Http3 };\nconst size_t NumProtocols = 4;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/query_params.h",
    "content": "#pragma once\n\n#include <map>\n#include <string>\n\nnamespace Envoy {\nnamespace Http {\nnamespace Utility {\n\n// TODO(jmarantz): this should probably be a proper class, with methods to serialize\n// using proper formatting. Perhaps similar to\n// https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/query_params.h\n\nusing QueryParams = std::map<std::string, std::string>;\n\n} // namespace Utility\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/http/request_id_extension.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nenum class TraceStatus { NoTrace, Sampled, Client, Forced };\n\n/**\n * Abstract request id utilities for getting/setting the request IDs and tracing status of requests\n */\nclass RequestIDExtension {\npublic:\n  virtual ~RequestIDExtension() = default;\n\n  /**\n   * Directly set a request ID into the provided request headers. Override any previous request ID\n   * if any.\n   * @param request_headers supplies the incoming request headers for setting a request ID.\n   * @param force specifies if a new request ID should be forcefully set if one is already present.\n   */\n  virtual void set(Http::RequestHeaderMap& request_headers, bool force) PURE;\n\n  /**\n   * Preserve request ID in response headers if any is set in the request headers.\n   * @param response_headers supplies the downstream response headers for setting the request ID.\n   * @param request_headers supplies the incoming request headers for retrieving the request ID.\n   */\n  virtual void setInResponse(Http::ResponseHeaderMap& response_headers,\n                             const Http::RequestHeaderMap& request_headers) PURE;\n\n  /**\n   * Perform a mod operation across the request id within a request and store the result in the\n   * provided output variable. This is used to perform sampling and validate the request ID.\n   * @param request_headers supplies the incoming request headers for retrieving the request ID.\n   * @param out reference to a variable where we store the result of the mod operation.\n   * @param mod integer to mod the request ID by.\n   * @return true if request ID is valid and out is populated by the result.\n   */\n  virtual bool modBy(const Http::RequestHeaderMap& request_headers, uint64_t& out,\n                     uint64_t mod) PURE;\n\n  /**\n   * Get the current tracing status of a request given its headers.\n   * @param request_headers supplies the incoming request headers for retrieving the request ID.\n   * @return trace status of the request based on the given headers.\n   */\n  virtual TraceStatus getTraceStatus(const Http::RequestHeaderMap& request_headers) PURE;\n\n  /**\n   * Set the tracing status of a request.\n   * @param request_headers supplies the incoming request headers for setting the trace status.\n   * @param status the trace status that should be set for this request.\n   */\n  virtual void setTraceStatus(Http::RequestHeaderMap& request_headers, TraceStatus status) PURE;\n};\n\nusing RequestIDExtensionSharedPtr = std::shared_ptr<RequestIDExtension>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/init/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"watcher_interface\",\n    hdrs = [\"watcher.h\"],\n)\n\nenvoy_cc_library(\n    name = \"target_interface\",\n    hdrs = [\"target.h\"],\n    deps = [\n        \":watcher_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"manager_interface\",\n    hdrs = [\"manager.h\"],\n    deps = [\n        \":target_interface\",\n        \":watcher_interface\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/init/manager.h",
    "content": "#pragma once\n\n#include \"envoy/admin/v3/init_dump.pb.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/init/target.h\"\n#include \"envoy/init/watcher.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Init {\n\n/**\n * Init::Manager coordinates initialization of one or more \"targets.\" A typical flow would be:\n *\n *   - One or more initialization targets are registered with a manager using `add`.\n *   - The manager is told to `initialize` all its targets, given a Watcher to notify when all\n *     registered targets are initialized.\n *   - Each target will initialize, either immediately or asynchronously, and will signal\n *     `ready` to the manager when initialized.\n *   - When all targets are initialized, the manager signals `ready` to the watcher it was given\n *     previously.\n *\n * Since there are several entities involved in this flow -- the owner of the manager, the targets\n * registered with the manager, and the manager itself -- it may be difficult or impossible in some\n * cases to guarantee that their lifetimes line up correctly to avoid use-after-free errors. The\n * interface design here in Init allows implementations to avoid the issue:\n *\n *   - A Target can only be initialized via a TargetHandle, which acts as a weak reference.\n *     Attempting to initialize a destroyed Target via its handle has no ill effects.\n *   - Likewise, a Watcher can only be notified that initialization was complete via a\n *     WatcherHandle, which acts as a weak reference as well.\n *\n * See target.h and watcher.h, as well as implementation in source/common/init for details.\n */\nstruct Manager {\n  virtual ~Manager() = default;\n\n  /**\n   * The manager's state, used e.g. for reporting in the admin server.\n   */\n  enum class State {\n    /**\n     * Targets have not been initialized.\n     */\n    Uninitialized,\n    /**\n     * Targets are currently being initialized.\n     */\n    Initializing,\n    /**\n     * All targets have been initialized.\n     */\n    Initialized\n  };\n\n  /**\n   * @return the current state of the manager.\n   */\n  virtual State state() const PURE;\n\n  /**\n   * Register an initialization target. If the manager's current state is uninitialized, the target\n   * will be saved for invocation later, when `initialize` is called. If the current state is\n   * initializing, the target will be invoked immediately. It is an error to register a target with\n   * a manager that is already in initialized state.\n   * @param target the target to be invoked when initialization begins.\n   */\n  virtual void add(const Target& target) PURE;\n\n  /**\n   * Start initialization of all previously registered targets, and notify the given Watcher when\n   * initialization is complete. It is an error to call initialize on a manager that is already in\n   * initializing or initialized state. If the manager contains no targets, initialization completes\n   * immediately.\n   * @param watcher the watcher to notify when initialization is complete.\n   */\n  virtual void initialize(const Watcher& watcher) PURE;\n\n  /**\n   * @return the unready targets of the manager.\n   */\n  virtual const absl::flat_hash_map<std::string, uint32_t>& unreadyTargets() const PURE;\n\n  /**\n   * Add unready targets information into the config dump.\n   */\n  virtual void dumpUnreadyTargets(envoy::admin::v3::UnreadyTargetsDumps& dumps) PURE;\n};\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/init/target.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/init/watcher.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Init {\n\n/**\n * A TargetHandle functions as a weak reference to a Target. It is how an implementation of\n * Init::Manager would safely tell a target to `initialize` with no guarantees about the\n * target's lifetime. Typical usage (outside of Init::ManagerImpl) does not require touching\n * TargetHandles at all.\n */\nstruct TargetHandle {\n  virtual ~TargetHandle() = default;\n\n  /**\n   * Tell the target to begin initialization, if it is still available.\n   * @param watcher A Watcher for the target to notify when it has initialized.\n   * @return true if the target received this call, false if the target was already destroyed.\n   */\n  virtual bool initialize(const Watcher& watcher) const PURE;\n\n  /**\n   * @return a human-readable target name, for logging / debugging / tracking target names.\n   * The target name has to be unique.\n   */\n  virtual absl::string_view name() const PURE;\n};\nusing TargetHandlePtr = std::unique_ptr<TargetHandle>;\n\n/**\n * An initialization Target is an entity that can be registered with a Manager for initialization.\n * It can only be invoked through a TargetHandle.\n */\nstruct Target {\n  virtual ~Target() = default;\n\n  /**\n   * @return a human-readable target name, for logging / debugging.\n   */\n  virtual absl::string_view name() const PURE;\n\n  /**\n   * Create a new handle that can initialize this target.\n   * @param name a human readable handle name, for logging / debugging.\n   * @return a new handle that can initialize this target.\n   */\n  virtual TargetHandlePtr createHandle(absl::string_view name) const PURE;\n};\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/init/watcher.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Init {\n\n/**\n * A WatcherHandle functions as a weak reference to a Watcher. It is how an implementation of\n * Init::Target would safely notify a Manager that it has initialized, and likewise it's how\n * an implementation of Init::Manager would safely tell its client that all registered targets\n * have initialized, with no guarantees about the lifetimes of the manager or client. Typical usage\n * (outside of Init::TargetImpl and ManagerImpl) does not require touching WatcherHandles at\n * all.\n */\nstruct WatcherHandle {\n  virtual ~WatcherHandle() = default;\n\n  /**\n   * Tell the watcher that initialization has completed, if it is still available.\n   * @return true if the watcher received this call, false if the watcher was already destroyed.\n   */\n  virtual bool ready() const PURE;\n};\nusing WatcherHandlePtr = std::unique_ptr<WatcherHandle>;\n\n/**\n * A Watcher is an entity that listens for notifications that either an initialization target or\n * all targets registered with a manager have initialized. It can only be invoked through a\n * WatcherHandle.\n */\nstruct Watcher {\n  virtual ~Watcher() = default;\n\n  /**\n   * @return a human-readable target name, for logging / debugging.\n   */\n  virtual absl::string_view name() const PURE;\n\n  /**\n   * Create a new handle that can notify this watcher.\n   * @param name a human readable handle name, for logging / debugging.\n   * @return a new handle that can notify this watcher.\n   */\n  virtual WatcherHandlePtr createHandle(absl::string_view name) const PURE;\n};\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/json/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"json_object_interface\",\n    hdrs = [\"json_object.h\"],\n)\n"
  },
  {
    "path": "include/envoy/json/json_object.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Json {\nclass Object;\n\nusing ObjectSharedPtr = std::shared_ptr<Object>;\n\n// @return false if immediate exit from iteration required.\nusing ObjectCallback = std::function<bool(const std::string&, const Object&)>;\n\n/**\n * Exception thrown when a JSON error occurs.\n */\nclass Exception : public EnvoyException {\npublic:\n  Exception(const std::string& message) : EnvoyException(message) {}\n};\n\n/**\n * Wraps an individual JSON node.\n */\nclass Object {\npublic:\n  virtual ~Object() = default;\n\n  /**\n   * Convert a generic object into an array of objects. This is useful for dealing\n   * with arrays of arrays.\n   * @return std::vector<ObjectSharedPtr> the converted object.\n   */\n  virtual std::vector<ObjectSharedPtr> asObjectArray() const PURE;\n\n  /**\n   * Get a boolean value by name.\n   * @param name supplies the key name.\n   * @return bool the value.\n   */\n  virtual bool getBoolean(const std::string& name) const PURE;\n\n  /**\n   * Get a boolean value by name.\n   * @param name supplies the key name.\n   * @param default_value supplies the value to return if the name does not exist.\n   * @return bool the value.\n   */\n  virtual bool getBoolean(const std::string& name, bool default_value) const PURE;\n\n  /**\n   * Get an integer value by name.\n   * @param name supplies the key name.\n   * @return int64_t the value.\n   */\n  virtual int64_t getInteger(const std::string& name) const PURE;\n\n  /**\n   * Get an integer value by name or return a default if name does not exist.\n   * @param name supplies the key name.\n   * @param default_value supplies the value to return if name does not exist.\n   * @return int64_t the value.\n   */\n  virtual int64_t getInteger(const std::string& name, int64_t default_value) const PURE;\n\n  /**\n   * Get a sub-object by name.\n   * @param name supplies the key name.\n   * @param allow_empty supplies whether to return an empty object if the key does not\n   * exist.\n   * @return ObjectObjectSharedPtr the sub-object.\n   */\n  virtual ObjectSharedPtr getObject(const std::string& name, bool allow_empty = false) const PURE;\n\n  /**\n   * Determine if an object is null.\n   * @return bool is the object null?\n   */\n  virtual bool isNull() const PURE;\n\n  /**\n   * Determine if an object has type Object.\n   * @return bool is the object an Object?\n   */\n  virtual bool isObject() const PURE;\n\n  /**\n   * Determine if an object has type Array.\n   * @return bool is the object an Array?\n   */\n  virtual bool isArray() const PURE;\n\n  /**\n   * Get an array by name.\n   * @param name supplies the key name.\n   * @param allow_empty specifies whether to return an empty array if the key does not exist.\n   * @return std::vector<ObjectSharedPtr> the array of JSON  objects.\n   */\n  virtual std::vector<ObjectSharedPtr> getObjectArray(const std::string& name,\n                                                      bool allow_empty = false) const PURE;\n\n  /**\n   * Get a string value by name.\n   * @param name supplies the key name.\n   * @return std::string the value.\n   */\n  virtual std::string getString(const std::string& name) const PURE;\n\n  /**\n   * Get a string value by name or return a default if name does not exist.\n   * @param name supplies the key name.\n   * @param default_value supplies the value to return if name does not exist.\n   * @return std::string the value.\n   */\n  virtual std::string getString(const std::string& name,\n                                const std::string& default_value) const PURE;\n\n  /**\n   * Get a string array by name.\n   * @param name supplies the key name.\n   * @param allow_empty specifies whether to return an empty array if the key does not exist.\n   * @return std::vector<std::string> the array of strings.\n   */\n  virtual std::vector<std::string> getStringArray(const std::string& name,\n                                                  bool allow_empty = false) const PURE;\n\n  /**\n   * Get a double value by name.\n   * @param name supplies the key name.\n   * @return double the value.\n   */\n  virtual double getDouble(const std::string& name) const PURE;\n\n  /**\n   * Get a double value by name.\n   * @param name supplies the key name.\n   * @param default_value supplies the value to return if name does not exist.\n   * @return double the value.\n   */\n  virtual double getDouble(const std::string& name, double default_value) const PURE;\n\n  /**\n   * @return a hash of the JSON object.\n   * Per RFC 7159:\n   *    An object is an unordered collection of zero or more name/value\n   *    pairs, where a name is a string and a value is a string, number,\n   *    boolean, null, object, or array.\n   * Objects with fields in different orders are equivalent and produce the same hash.\n   * It does not consider white space that was originally in the parsed JSON.\n   */\n  virtual uint64_t hash() const PURE;\n\n  /**\n   * Iterate over key-value pairs in an Object and call callback on each pair.\n   */\n  virtual void iterate(const ObjectCallback& callback) const PURE;\n\n  /**\n   * @return TRUE if the Object contains the key.\n   * @param name supplies the key name to lookup.\n   */\n  virtual bool hasObject(const std::string& name) const PURE;\n\n  /**\n   * Validates JSON object against passed in schema.\n   * @param schema supplies the schema in string format. A Json::Exception will be thrown if\n   *        the JSON object doesn't conform to the supplied schema or the schema itself is not\n   *        valid.\n   */\n  virtual void validateSchema(const std::string& schema) const PURE;\n\n  /**\n   * @return the value of the object as a string (where the object is a string).\n   */\n  virtual std::string asString() const PURE;\n\n  /**\n   * @return the value of the object as a boolean (where the object is a boolean).\n   */\n  virtual bool asBoolean() const PURE;\n\n  /**\n   * @return the value of the object as a double (where the object is a double).\n   */\n  virtual double asDouble() const PURE;\n\n  /**\n   * @return the value of the object as an integer (where the object is an integer).\n   */\n  virtual int64_t asInteger() const PURE;\n\n  /**\n   * @return the JSON string representation of the object.\n   */\n  virtual std::string asJsonString() const PURE;\n\n  /**\n   * @return true if the JSON object is empty;\n   */\n  virtual bool empty() const PURE;\n};\n\n} // namespace Json\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/local_info/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"local_info_interface\",\n    hdrs = [\"local_info.h\"],\n    deps = [\n        \"//include/envoy/network:address_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/local_info/local_info.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/address.h\"\n\nnamespace Envoy {\nnamespace LocalInfo {\n\n/**\n * Information about the local environment.\n */\nclass LocalInfo {\npublic:\n  virtual ~LocalInfo() = default;\n\n  /**\n   * @return the local (non-loopback) address of the server.\n   */\n  virtual Network::Address::InstanceConstSharedPtr address() const PURE;\n\n  /**\n   * @return the human readable zone name. E.g., \"us-east-1a\".\n   */\n  virtual const std::string& zoneName() const PURE;\n\n  /**\n   * @return the human readable cluster name. E.g., \"eta\".\n   */\n  virtual const std::string& clusterName() const PURE;\n\n  /**\n   * @return the human readable individual node name. E.g., \"i-123456\".\n   */\n  virtual const std::string& nodeName() const PURE;\n\n  /**\n   * @return the full node identity presented to management servers.\n   */\n  virtual const envoy::config::core::v3::Node& node() const PURE;\n};\n\nusing LocalInfoPtr = std::unique_ptr<LocalInfo>;\n\n} // namespace LocalInfo\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"address_interface\",\n    hdrs = [\"address.h\"],\n    deps = [\n        \"//include/envoy/api:os_sys_calls_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"connection_interface\",\n    hdrs = [\"connection.h\"],\n    external_deps = [\"abseil_int128\"],\n    deps = [\n        \":address_interface\",\n        \":filter_interface\",\n        \":listen_socket_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/ssl:connection_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"connection_handler_interface\",\n    hdrs = [\"connection_handler.h\"],\n    deps = [\n        \":listen_socket_interface\",\n        \":listener_interface\",\n        \"//include/envoy/ssl:context_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udp_packet_writer_handler_interface\",\n    hdrs = [\"udp_packet_writer_handler.h\"],\n    deps = [\n        \":address_interface\",\n        \":io_handle_interface\",\n        \":socket_interface\",\n        \"//include/envoy/api:io_error_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dns_interface\",\n    hdrs = [\"dns.h\"],\n    deps = [\"//include/envoy/network:address_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"drain_decision_interface\",\n    hdrs = [\"drain_decision.h\"],\n)\n\nenvoy_cc_library(\n    name = \"exception_interface\",\n    hdrs = [\"exception.h\"],\n)\n\nenvoy_cc_library(\n    name = \"filter_interface\",\n    hdrs = [\"filter.h\"],\n    deps = [\n        \":listen_socket_interface\",\n        \":transport_socket_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/upstream:host_description_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hash_policy_interface\",\n    hdrs = [\"hash_policy.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":address_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"io_handle_interface\",\n    hdrs = [\"io_handle.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":address_interface\",\n        \"//include/envoy/api:io_error_interface\",\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"socket_interface\",\n    hdrs = [\"socket.h\"],\n    deps = [\n        \":address_interface\",\n        \":io_handle_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"socket_interface_interface\",\n    hdrs = [\"socket_interface.h\"],\n    deps = [\n        \":socket_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"listen_socket_interface\",\n    hdrs = [\"listen_socket.h\"],\n    deps = [\n        \":io_handle_interface\",\n        \":socket_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"transport_socket_interface\",\n    hdrs = [\"transport_socket.h\"],\n    deps = [\n        \":io_handle_interface\",\n        \":post_io_action_interface\",\n        \":proxy_protocol_options_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/ssl:connection_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"post_io_action_interface\",\n    hdrs = [\"post_io_action.h\"],\n    deps = [],\n)\n\nenvoy_cc_library(\n    name = \"connection_balancer_interface\",\n    hdrs = [\"connection_balancer.h\"],\n    deps = [\n        \":listen_socket_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"listener_interface\",\n    hdrs = [\"listener.h\"],\n    deps = [\n        \":connection_balancer_interface\",\n        \":connection_interface\",\n        \":listen_socket_interface\",\n        \":udp_packet_writer_handler_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/common:resource_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resolver_interface\",\n    hdrs = [\"resolver.h\"],\n    deps = [\n        \":address_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udp_packet_writer_config_interface\",\n    hdrs = [\"udp_packet_writer_config.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/network:udp_packet_writer_handler_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"proxy_protocol_options_lib\",\n    hdrs = [\"proxy_protocol.h\"],\n    deps = [\n        \":address_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/network/address.h",
    "content": "#pragma once\n\n#include <sys/types.h>\n\n#include <array>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/os_sys_calls.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"absl/numeric/int128.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/* Forward declaration */\nclass SocketInterface;\n\nnamespace Address {\n\n/**\n * Interface for an Ipv4 address.\n */\nclass Ipv4 {\npublic:\n  virtual ~Ipv4() = default;\n\n  /**\n   * @return the 32-bit IPv4 address in network byte order.\n   */\n  virtual uint32_t address() const PURE;\n};\n\n/**\n * Interface for an Ipv6 address.\n */\nclass Ipv6 {\npublic:\n  virtual ~Ipv6() = default;\n\n  /**\n   * @return the absl::uint128 IPv6 address in network byte order.\n   */\n  virtual absl::uint128 address() const PURE;\n\n  /**\n   * @return true if address is Ipv6 and Ipv4 compatibility is disabled, false otherwise\n   */\n  virtual bool v6only() const PURE;\n};\n\nenum class IpVersion { v4, v6 }; // NOLINT(readability-identifier-naming)\n\n/**\n * Interface for a generic IP address.\n */\nclass Ip {\npublic:\n  virtual ~Ip() = default;\n\n  /**\n   * @return the address as a string. E.g., \"1.2.3.4\" for an IPv4 address.\n   */\n  virtual const std::string& addressAsString() const PURE;\n\n  /**\n   * @return whether this address is wild card, i.e., '0.0.0.0'.\n   */\n  virtual bool isAnyAddress() const PURE;\n\n  /**\n   * @return whether this address is a valid unicast address, i.e., not an wild card, broadcast, or\n   * multicast address.\n   */\n  virtual bool isUnicastAddress() const PURE;\n\n  /**\n   * @return Ipv4 address data IFF version() == IpVersion::v4, otherwise nullptr.\n   */\n  virtual const Ipv4* ipv4() const PURE;\n\n  /**\n   * @return Ipv6 address data IFF version() == IpVersion::v6, otherwise nullptr.\n   */\n  virtual const Ipv6* ipv6() const PURE;\n\n  /**\n   * @return the port associated with the address. Port may be zero if not specified, not\n   * determinable before socket creation, or not applicable.\n   * The port is in host byte order.\n   */\n  virtual uint32_t port() const PURE;\n\n  /**\n   * @return the version of IP address.\n   */\n  virtual IpVersion version() const PURE;\n};\n\n/**\n * Interface for a generic Pipe address.\n */\nclass Pipe {\npublic:\n  virtual ~Pipe() = default;\n  /**\n   * @return abstract namespace flag.\n   */\n  virtual bool abstractNamespace() const PURE;\n\n  /**\n   * @return pipe mode.\n   */\n  virtual mode_t mode() const PURE;\n};\n\n/**\n * Interface for a generic internal address.\n */\nclass EnvoyInternalAddress {\npublic:\n  virtual ~EnvoyInternalAddress() = default;\n\n  /**\n   * @return The unique id of the internal address. If the address represents the destination\n   * internal listener, the address id is that listener name.\n   */\n  virtual const std::string& addressId() const PURE;\n};\n\nenum class Type { Ip, Pipe, EnvoyInternal };\n\n/**\n * Interface for all network addresses.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  virtual bool operator==(const Instance& rhs) const PURE;\n  bool operator!=(const Instance& rhs) const { return !operator==(rhs); }\n\n  /**\n   * @return a human readable string for the address that represents the\n   * physical/resolved address. (This will not necessarily include port\n   * information, if applicable, since that may not be resolved until bind()).\n   *\n   * This string will be compatible with the following example formats:\n   * For IPv4 addresses: \"1.2.3.4:80\"\n   * For IPv6 addresses: \"[1234:5678::9]:443\"\n   * For pipe addresses: \"/foo\"\n   */\n  virtual const std::string& asString() const PURE;\n\n  /**\n   * @return Similar to asString but returns a string view.\n   */\n  virtual absl::string_view asStringView() const PURE;\n\n  /**\n   * @return a human readable string for the address that represents the\n   * logical/unresolved name.\n   *\n   * This string has a source-dependent format and should preserve the original\n   * name for Address::Instances resolved by a Network::Address::Resolver.\n   */\n  virtual const std::string& logicalName() const PURE;\n\n  /**\n   * @return the IP address information IFF type() == Type::Ip, otherwise nullptr.\n   */\n  virtual const Ip* ip() const PURE;\n\n  /**\n   * @return the pipe address information IFF type() == Type::Pipe, otherwise nullptr.\n   */\n  virtual const Pipe* pipe() const PURE;\n\n  /**\n   * @return the envoy internal address information IFF type() ==\n   * Type::EnvoyInternal, otherwise nullptr.\n   */\n  virtual const EnvoyInternalAddress* envoyInternalAddress() const PURE;\n\n  /**\n   * @return the underlying structure wherein the address is stored. Return nullptr if the address\n   * type is internal address.\n   */\n  virtual const sockaddr* sockAddr() const PURE;\n\n  /**\n   * @return length of the address container.\n   */\n  virtual socklen_t sockAddrLen() const PURE;\n\n  /**\n   * @return the type of address.\n   */\n  virtual Type type() const PURE;\n\n  /**\n   * @return SocketInterface to be used with the address.\n   */\n  virtual const Network::SocketInterface& socketInterface() const PURE;\n};\n\nusing InstanceConstSharedPtr = std::shared_ptr<const Instance>;\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/connection.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/ssl/connection.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\nnamespace Envoy {\nnamespace Event {\nclass Dispatcher;\n}\n\nnamespace Network {\n\n/**\n * Events that occur on a connection.\n */\nenum class ConnectionEvent {\n  RemoteClose,\n  LocalClose,\n  Connected,\n};\n\n/**\n * Connections have both a read and write buffer.\n */\nenum class ConnectionBufferType { Read, Write };\n\n/**\n * Network level callbacks that happen on a connection.\n */\nclass ConnectionCallbacks {\npublic:\n  virtual ~ConnectionCallbacks() = default;\n\n  /**\n   * Callback for connection events.\n   * @param events supplies the ConnectionEvent that occurred.\n   */\n  virtual void onEvent(ConnectionEvent event) PURE;\n\n  /**\n   * Called when the write buffer for a connection goes over its high watermark.\n   */\n  virtual void onAboveWriteBufferHighWatermark() PURE;\n\n  /**\n   * Called when the write buffer for a connection goes from over its high\n   * watermark to under its low watermark.\n   */\n  virtual void onBelowWriteBufferLowWatermark() PURE;\n};\n\n/**\n * Type of connection close to perform.\n */\nenum class ConnectionCloseType {\n  FlushWrite, // Flush pending write data before raising ConnectionEvent::LocalClose\n  NoFlush,    // Do not flush any pending data and immediately raise ConnectionEvent::LocalClose\n  FlushWriteAndDelay // Flush pending write data and delay raising a ConnectionEvent::LocalClose\n                     // until the delayed_close_timeout expires\n};\n\n/**\n * An abstract raw connection. Free the connection or call close() to disconnect.\n */\nclass Connection : public Event::DeferredDeletable, public FilterManager {\npublic:\n  enum class State { Open, Closing, Closed };\n\n  /**\n   * Callback function for when bytes have been sent by a connection.\n   * @param bytes_sent supplies the number of bytes written to the connection.\n   */\n  using BytesSentCb = std::function<void(uint64_t bytes_sent)>;\n\n  struct ConnectionStats {\n    Stats::Counter& read_total_;\n    Stats::Gauge& read_current_;\n    Stats::Counter& write_total_;\n    Stats::Gauge& write_current_;\n    // Counter* as this is an optional counter. Bind errors will not be tracked if this is nullptr.\n    Stats::Counter* bind_errors_;\n    // Optional counter. Delayed close timeouts will not be tracked if this is nullptr.\n    Stats::Counter* delayed_close_timeouts_;\n  };\n\n  ~Connection() override = default;\n\n  /**\n   * Register callbacks that fire when connection events occur.\n   */\n  virtual void addConnectionCallbacks(ConnectionCallbacks& cb) PURE;\n\n  /**\n   * Register for callback every time bytes are written to the underlying TransportSocket.\n   */\n  virtual void addBytesSentCallback(BytesSentCb cb) PURE;\n\n  /**\n   * Enable half-close semantics on this connection. Reading a remote half-close\n   * will not fully close the connection. This is off by default.\n   * @param enabled Whether to set half-close semantics as enabled or disabled.\n   */\n  virtual void enableHalfClose(bool enabled) PURE;\n\n  /**\n   * Close the connection.\n   */\n  virtual void close(ConnectionCloseType type) PURE;\n\n  /**\n   * @return Event::Dispatcher& the dispatcher backing this connection.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * @return uint64_t the unique local ID of this connection.\n   */\n  virtual uint64_t id() const PURE;\n\n  /**\n   * @param vector of bytes to which the connection should append hash key data. Any data already in\n   * the key vector must not be modified.\n   */\n  virtual void hashKey(std::vector<uint8_t>& hash) const PURE;\n\n  /**\n   * @return std::string the next protocol to use as selected by network level negotiation. (E.g.,\n   *         ALPN). If network level negotiation is not supported by the connection or no protocol\n   *         has been negotiated the empty string is returned.\n   */\n  virtual std::string nextProtocol() const PURE;\n\n  /**\n   * Enable/Disable TCP NO_DELAY on the connection.\n   */\n  virtual void noDelay(bool enable) PURE;\n\n  /**\n   * Disable socket reads on the connection, applying external back pressure. When reads are\n   * enabled again if there is data still in the input buffer it will be re-dispatched through\n   * the filter chain.\n   * @param disable supplies TRUE is reads should be disabled, FALSE if they should be enabled.\n   *\n   * Note that this function reference counts calls. For example\n   * readDisable(true);  // Disables data\n   * readDisable(true);  // Notes the connection is blocked by two sources\n   * readDisable(false);  // Notes the connection is blocked by one source\n   * readDisable(false);  // Marks the connection as unblocked, so resumes reading.\n   */\n  virtual void readDisable(bool disable) PURE;\n\n  /**\n   * Set if Envoy should detect TCP connection close when readDisable(true) is called.\n   * By default, this is true on newly created connections.\n   *\n   * @param should_detect supplies if disconnects should be detected when the connection has been\n   * read disabled\n   */\n  virtual void detectEarlyCloseWhenReadDisabled(bool should_detect) PURE;\n\n  /**\n   * @return bool whether reading is enabled on the connection.\n   */\n  virtual bool readEnabled() const PURE;\n\n  /**\n   * @return The address of the remote client. Note that this method will never return nullptr.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& remoteAddress() const PURE;\n\n  /**\n   * @return The address of the remote directly connected peer. Note that this method\n   * will never return nullptr. This address is not affected or modified by PROXY protocol\n   * or any other listener filter.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& directRemoteAddress() const PURE;\n\n  /**\n   * Credentials of the peer of a socket as decided by SO_PEERCRED.\n   */\n  struct UnixDomainSocketPeerCredentials {\n    /**\n     * The process id of the peer.\n     */\n    int32_t pid;\n    /**\n     * The user id of the peer.\n     */\n    uint32_t uid;\n    /**\n     * The group id of the peer.\n     */\n    uint32_t gid;\n  };\n\n  /**\n   * @return The unix socket peer credentials of the remote client. Note that this is only\n   * supported for unix socket connections.\n   */\n  virtual absl::optional<UnixDomainSocketPeerCredentials> unixSocketPeerCredentials() const PURE;\n\n  /**\n   * @return the local address of the connection. For client connections, this is the origin\n   * address. For server connections, this is the local destination address. For server connections\n   * it can be different from the proxy address if the downstream connection has been redirected or\n   * the proxy is operating in transparent mode. Note that this method will never return nullptr.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& localAddress() const PURE;\n\n  /**\n   * Set the stats to update for various connection state changes. Note that for performance reasons\n   * these stats are eventually consistent and may not always accurately represent the connection\n   * state at any given point in time.\n   */\n  virtual void setConnectionStats(const ConnectionStats& stats) PURE;\n\n  /**\n   * @return the const SSL connection data if this is an SSL connection, or nullptr if it is not.\n   */\n  // TODO(snowp): Remove this in favor of StreamInfo::downstreamSslConnection.\n  virtual Ssl::ConnectionInfoConstSharedPtr ssl() const PURE;\n\n  /**\n   * @return requested server name (e.g. SNI in TLS), if any.\n   */\n  virtual absl::string_view requestedServerName() const PURE;\n\n  /**\n   * @return State the current state of the connection.\n   */\n  virtual State state() const PURE;\n\n  /**\n   * Write data to the connection. Will iterate through downstream filters with the buffer if any\n   * are installed.\n   * @param data Supplies the data to write to the connection.\n   * @param end_stream If true, this indicates that this is the last write to the connection. If\n   *        end_stream is true, the connection is half-closed. This may only be set to true if\n   *        enableHalfClose(true) has been set on this connection.\n   */\n  virtual void write(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Set a soft limit on the size of buffers for the connection.\n   * For the read buffer, this limits the bytes read prior to flushing to further stages in the\n   * processing pipeline.\n   * For the write buffer, it sets watermarks. When enough data is buffered it triggers a call to\n   * onAboveWriteBufferHighWatermark, which allows subscribers to enforce flow control by disabling\n   * reads on the socket funneling data to the write buffer. When enough data is drained from the\n   * write buffer, onBelowWriteBufferHighWatermark is called which similarly allows subscribers\n   * resuming reading.\n   */\n  virtual void setBufferLimits(uint32_t limit) PURE;\n\n  /**\n   * Get the value set with setBufferLimits.\n   */\n  virtual uint32_t bufferLimit() const PURE;\n\n  /**\n   * @return boolean telling if the connection's local address has been restored to an original\n   *         destination address, rather than the address the connection was accepted at.\n   */\n  virtual bool localAddressRestored() const PURE;\n\n  /**\n   * @return boolean telling if the connection is currently above the high watermark.\n   */\n  virtual bool aboveHighWatermark() const PURE;\n\n  /**\n   * Get the socket options set on this connection.\n   */\n  virtual const ConnectionSocket::OptionsSharedPtr& socketOptions() const PURE;\n\n  /**\n   * The StreamInfo object associated with this connection. This is typically\n   * used for logging purposes. Individual filters may add specific information\n   * via the FilterState object within the StreamInfo object. The StreamInfo\n   * object in this context is one per connection i.e. different than the one in\n   * the http ConnectionManager implementation which is one per request.\n   *\n   * @return StreamInfo object associated with this connection.\n   */\n  virtual StreamInfo::StreamInfo& streamInfo() PURE;\n  virtual const StreamInfo::StreamInfo& streamInfo() const PURE;\n\n  /**\n   * Set the timeout for delayed connection close()s.\n   * This can only be called prior to issuing a close() on the connection.\n   * @param timeout The timeout value in milliseconds\n   */\n  virtual void setDelayedCloseTimeout(std::chrono::milliseconds timeout) PURE;\n\n  /**\n   * @return std::string the failure reason of the underlying transport socket, if no failure\n   *         occurred an empty string is returned.\n   */\n  virtual absl::string_view transportFailureReason() const PURE;\n\n  /**\n   *  @return absl::optional<std::chrono::milliseconds> An optional of the most recent round-trip\n   *  time of the connection. If the platform does not support this, then an empty optional is\n   *  returned.\n   */\n  virtual absl::optional<std::chrono::milliseconds> lastRoundTripTime() const PURE;\n};\n\nusing ConnectionPtr = std::unique_ptr<Connection>;\n\n/**\n * Connections capable of outbound connects.\n */\nclass ClientConnection : public virtual Connection {\npublic:\n  /**\n   * Connect to a remote host. Errors or connection events are reported via the event callback\n   * registered via addConnectionCallbacks().\n   */\n  virtual void connect() PURE;\n};\n\nusing ClientConnectionPtr = std::unique_ptr<ClientConnection>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/connection_balancer.h",
    "content": "#pragma once\n\n#include \"envoy/network/listen_socket.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * A connection handler that is balanced. Typically implemented by individual listeners depending\n * on their balancing configuration.\n */\nclass BalancedConnectionHandler {\npublic:\n  virtual ~BalancedConnectionHandler() = default;\n\n  /**\n   * @return the number of active connections within the handler.\n   */\n  virtual uint64_t numConnections() const PURE;\n\n  /**\n   * Increment the number of connections within the handler. This must be called by a connection\n   * balancer implementation prior to a connection being picked via pickTargetHandler(). This makes\n   * sure that connection counts are accurate during connection transfer (i.e., that the target\n   * balancer accounts for the incoming connection). This is done by the balancer vs. the\n   * connection handler to account for different locking needs inside the balancer.\n   */\n  virtual void incNumConnections() PURE;\n\n  /**\n   * Post a connected socket to this connection handler. This is used for cross-thread connection\n   * transfer during the balancing process.\n   */\n  virtual void post(Network::ConnectionSocketPtr&& socket) PURE;\n};\n\n/**\n * An implementation of a connection balancer. This abstracts the underlying policy (e.g., exact,\n * fuzzy, etc.).\n */\nclass ConnectionBalancer {\npublic:\n  virtual ~ConnectionBalancer() = default;\n\n  /**\n   * Register a new handler with the balancer that is available for balancing.\n   */\n  virtual void registerHandler(BalancedConnectionHandler& handler) PURE;\n\n  /**\n   * Unregister a handler with the balancer that is no longer available for balancing.\n   */\n  virtual void unregisterHandler(BalancedConnectionHandler& handler) PURE;\n\n  /**\n   * Pick a target handler to send a connection to.\n   * @param current_handler supplies the currently executing connection handler.\n   * @return current_handler if the connection should stay bound to the current handler, or a\n   *         different handler if the connection should be rebalanced.\n   *\n   * NOTE: It is the responsibility of the balancer to call incNumConnections() on the returned\n   *       balancer. See the comments above for more explanation.\n   */\n  virtual BalancedConnectionHandler&\n  pickTargetHandler(BalancedConnectionHandler& current_handler) PURE;\n};\n\nusing ConnectionBalancerSharedPtr = std::shared_ptr<ConnectionBalancer>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/connection_handler.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/listener.h\"\n#include \"envoy/ssl/context.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Abstract connection handler.\n */\nclass ConnectionHandler {\npublic:\n  virtual ~ConnectionHandler() = default;\n\n  /**\n   * @return uint64_t the number of active connections owned by the handler.\n   */\n  virtual uint64_t numConnections() const PURE;\n\n  /**\n   * Increment the return value of numConnections() by one.\n   * TODO(mattklein123): re-visit the connection accounting interface. Make TCP\n   * listener to do accounting through these interfaces instead of directly\n   * access the counter.\n   */\n  virtual void incNumConnections() PURE;\n\n  /**\n   * Decrement the return value of numConnections() by one.\n   */\n  virtual void decNumConnections() PURE;\n\n  /**\n   * Adds a listener to the handler, optionally replacing the existing listener.\n   * @param overridden_listener tag of the existing listener. nullopt if no previous listener.\n   * @param config listener configuration options.\n   */\n  virtual void addListener(absl::optional<uint64_t> overridden_listener,\n                           ListenerConfig& config) PURE;\n\n  /**\n   * Remove listeners using the listener tag as a key. All connections owned by the removed\n   * listeners will be closed.\n   * @param listener_tag supplies the tag passed to addListener().\n   */\n  virtual void removeListeners(uint64_t listener_tag) PURE;\n\n  /**\n   * Get the ``UdpListenerCallbacks`` associated with ``listener_tag``. This will be\n   * absl::nullopt for non-UDP listeners and for ``listener_tag`` values that have already been\n   * removed.\n   */\n  virtual UdpListenerCallbacksOptRef getUdpListenerCallbacks(uint64_t listener_tag) PURE;\n\n  /**\n   * Remove the filter chains and the connections in the listener. All connections owned\n   * by the filter chains will be closed. Once all the connections are destroyed(connections\n   * could be deferred deleted!), invoke the completion.\n   * @param listener_tag supplies the tag passed to addListener().\n   * @param filter_chains supplies the filter chains to be removed.\n   */\n  virtual void removeFilterChains(uint64_t listener_tag,\n                                  const std::list<const FilterChain*>& filter_chains,\n                                  std::function<void()> completion) PURE;\n\n  /**\n   * Stop listeners using the listener tag as a key. This will not close any connections and is used\n   * for draining.\n   * @param listener_tag supplies the tag passed to addListener().\n   */\n  virtual void stopListeners(uint64_t listener_tag) PURE;\n\n  /**\n   * Stop all listeners. This will not close any connections and is used for draining.\n   */\n  virtual void stopListeners() PURE;\n\n  /**\n   * Disable all listeners. This will not close any connections and is used to temporarily\n   * stop accepting connections on all listeners.\n   */\n  virtual void disableListeners() PURE;\n\n  /**\n   * Enable all listeners. This is used to re-enable accepting connections on all listeners\n   * after they have been temporarily disabled.\n   */\n  virtual void enableListeners() PURE;\n\n  /**\n   * @return the stat prefix used for per-handler stats.\n   */\n  virtual const std::string& statPrefix() const PURE;\n\n  /**\n   * Used by ConnectionHandler to manage listeners.\n   */\n  class ActiveListener {\n  public:\n    virtual ~ActiveListener() = default;\n\n    /**\n     * @return the tag value as configured.\n     */\n    virtual uint64_t listenerTag() PURE;\n\n    /**\n     * @return the actual Listener object.\n     */\n    virtual Listener* listener() PURE;\n\n    /**\n     * Temporarily stop listening according to implementation's own definition.\n     */\n    virtual void pauseListening() PURE;\n\n    /**\n     * Resume listening according to implementation's own definition.\n     */\n    virtual void resumeListening() PURE;\n\n    /**\n     * Stop listening according to implementation's own definition.\n     */\n    virtual void shutdownListener() PURE;\n  };\n\n  using ActiveListenerPtr = std::unique_ptr<ActiveListener>;\n\n  /**\n   * Used by ConnectionHandler to manage UDP listeners.\n   */\n  class ActiveUdpListener : public virtual ActiveListener, public Network::UdpListenerCallbacks {\n  public:\n    ~ActiveUdpListener() override = default;\n\n    /**\n     * Returns the worker index that ``data`` should be delivered to. The return value must be in\n     * the range [0, concurrency).\n     */\n    virtual uint32_t destination(const Network::UdpRecvData& data) const PURE;\n  };\n\n  using ActiveUdpListenerPtr = std::unique_ptr<ActiveUdpListener>;\n};\n\nusing ConnectionHandlerPtr = std::unique_ptr<ConnectionHandler>;\n\n/**\n * A registered factory interface to create different kinds of ActiveUdpListener.\n */\nclass ActiveUdpListenerFactory {\npublic:\n  virtual ~ActiveUdpListenerFactory() = default;\n\n  /**\n   * Creates an ActiveUdpListener object and a corresponding UdpListener\n   * according to given config.\n   * @param worker_index The index of the worker this listener is being created on.\n   * @param parent is the owner of the created ActiveListener objects.\n   * @param dispatcher is used to create actual UDP listener.\n   * @param config provides information needed to create ActiveUdpListener and\n   * UdpListener objects.\n   * @return the ActiveUdpListener created.\n   */\n  virtual ConnectionHandler::ActiveUdpListenerPtr\n  createActiveUdpListener(uint32_t worker_index, ConnectionHandler& parent,\n                          Event::Dispatcher& dispatcher, Network::ListenerConfig& config) PURE;\n\n  /**\n   * @return true if the UDP passing through listener doesn't form stateful connections.\n   */\n  virtual bool isTransportConnectionless() const PURE;\n};\n\nusing ActiveUdpListenerFactoryPtr = std::unique_ptr<ActiveUdpListenerFactory>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/dns.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/network/address.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * An active async DNS query.\n */\nclass ActiveDnsQuery {\npublic:\n  virtual ~ActiveDnsQuery() = default;\n\n  /**\n   * Cancel an outstanding DNS request.\n   */\n  virtual void cancel() PURE;\n};\n\n/**\n * DNS response.\n */\nstruct DnsResponse {\n  DnsResponse(const Address::InstanceConstSharedPtr& address, const std::chrono::seconds ttl)\n      : address_(address), ttl_(ttl) {}\n\n  const Address::InstanceConstSharedPtr address_;\n  const std::chrono::seconds ttl_;\n};\n\nenum class DnsLookupFamily { V4Only, V6Only, Auto };\n\n/**\n * An asynchronous DNS resolver.\n */\nclass DnsResolver {\npublic:\n  virtual ~DnsResolver() = default;\n\n  /**\n   * Final status for a DNS resolution.\n   */\n  enum class ResolutionStatus { Success, Failure };\n\n  /**\n   * Called when a resolution attempt is complete.\n   * @param status supplies the final status of the resolution.\n   * @param response supplies the list of resolved IP addresses and TTLs.\n   */\n  using ResolveCb = std::function<void(ResolutionStatus status, std::list<DnsResponse>&& response)>;\n\n  /**\n   * Initiate an async DNS resolution.\n   * @param dns_name supplies the DNS name to lookup.\n   * @param dns_lookup_family the DNS IP version lookup policy.\n   * @param callback supplies the callback to invoke when the resolution is complete.\n   * @return if non-null, a handle that can be used to cancel the resolution.\n   *         This is only valid until the invocation of callback or ~DnsResolver().\n   */\n  virtual ActiveDnsQuery* resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family,\n                                  ResolveCb callback) PURE;\n};\n\nusing DnsResolverSharedPtr = std::shared_ptr<DnsResolver>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/drain_decision.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass DrainDecision {\npublic:\n  virtual ~DrainDecision() = default;\n\n  /**\n   * @return TRUE if a connection should be drained and closed. It is up to individual network\n   *         filters to determine when this should be called for the least impact possible.\n   */\n  virtual bool drainClose() const PURE;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/exception.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Thrown when there is a runtime error creating/binding a listener.\n */\nclass CreateListenerException : public EnvoyException {\npublic:\n  CreateListenerException(const std::string& what) : EnvoyException(what) {}\n};\n\n/**\n * Thrown when there is a runtime error binding a socket.\n */\nclass SocketBindException : public CreateListenerException {\npublic:\n  SocketBindException(const std::string& what, int error_number)\n      : CreateListenerException(what), error_number_(error_number) {}\n\n  // This can't be called errno because otherwise the standard errno macro expansion replaces it.\n  int errorNumber() const { return error_number_; }\n\nprivate:\n  const int error_number_;\n};\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/network/filter.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/upstream/host_description.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\n\nnamespace Event {\nclass Dispatcher;\n}\n\nnamespace Network {\n\nclass Connection;\nclass ConnectionSocket;\nclass UdpListener;\nstruct UdpRecvData;\n\n/**\n * Status codes returned by filters that can cause future filters to not get iterated to.\n */\nenum class FilterStatus {\n  // Continue to further filters.\n  Continue,\n  // Stop executing further filters.\n  StopIteration\n};\n\n/**\n * Callbacks used by individual filter instances to communicate with the filter manager.\n */\nclass NetworkFilterCallbacks {\npublic:\n  virtual ~NetworkFilterCallbacks() = default;\n\n  /**\n   * @return the connection that owns this filter.\n   */\n  virtual Connection& connection() PURE;\n};\n\n/**\n * Callbacks used by individual write filter instances to communicate with the filter manager.\n */\nclass WriteFilterCallbacks : public virtual NetworkFilterCallbacks {\npublic:\n  ~WriteFilterCallbacks() override = default;\n\n  /**\n   * Pass data directly to subsequent filters in the filter chain. This method is used in\n   * advanced cases in which a filter needs full control over how subsequent filters view data.\n   * Using this method allows a filter to buffer data (or not) and then periodically inject data\n   * to subsequent filters, indicating end_stream at an appropriate time.\n   * This can be used to implement rate limiting, periodic data emission, etc.\n   *\n   * When using this callback, filters should generally move passed in buffer and return\n   * FilterStatus::StopIteration from their onWrite() call, since use of this method\n   * indicates that a filter does not wish to participate in a standard write flow\n   * and will perform any necessary buffering and continuation on its own.\n   *\n   * @param data supplies the write data to be propagated directly to further filters in the filter\n   *             chain.\n   * @param end_stream supplies the end_stream status to be propagated directly to further filters\n   *                   in the filter chain.\n   */\n  virtual void injectWriteDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE;\n};\n\n/**\n * A write path binary connection filter.\n */\nclass WriteFilter {\npublic:\n  virtual ~WriteFilter() = default;\n\n  /**\n   * Called when data is to be written on the connection.\n   * @param data supplies the buffer to be written which may be modified.\n   * @param end_stream supplies whether this is the last byte to write on the connection.\n   * @return status used by the filter manager to manage further filter iteration.\n   */\n  virtual FilterStatus onWrite(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Initializes the write filter callbacks used to interact with the filter manager. It will be\n   * called by the filter manager a single time when the filter is first registered. Thus, any\n   * construction that requires the backing connection should take place in the context of this\n   * function.\n   *\n   * IMPORTANT: No outbound networking or complex processing should be done in this function.\n   *            That should be done in the context of ReadFilter::onNewConnection() if needed.\n   *\n   * @param callbacks supplies the callbacks.\n   */\n  virtual void initializeWriteFilterCallbacks(WriteFilterCallbacks&) {}\n};\n\nusing WriteFilterSharedPtr = std::shared_ptr<WriteFilter>;\n\n/**\n * Callbacks used by individual read filter instances to communicate with the filter manager.\n */\nclass ReadFilterCallbacks : public virtual NetworkFilterCallbacks {\npublic:\n  ~ReadFilterCallbacks() override = default;\n\n  /**\n   * If a read filter stopped filter iteration, continueReading() can be called to continue the\n   * filter chain. The next filter will be called with all currently available data in the read\n   * buffer (it will also have onNewConnection() called on it if it was not previously called).\n   */\n  virtual void continueReading() PURE;\n\n  /**\n   * Pass data directly to subsequent filters in the filter chain. This method is used in\n   * advanced cases in which a filter needs full control over how subsequent filters view data,\n   * and does not want to make use of connection-level buffering. Using this method allows\n   * a filter to buffer data (or not) and then periodically inject data to subsequent filters,\n   * indicating end_stream at an appropriate time. This can be used to implement rate limiting,\n   * periodic data emission, etc.\n   *\n   * When using this callback, filters should generally move passed in buffer and return\n   * FilterStatus::StopIteration from their onData() call, since use of this method\n   * indicates that a filter does not wish to participate in standard connection-level\n   * buffering and continuation and will perform any necessary buffering and continuation on its\n   * own.\n   *\n   * This callback is different from continueReading() in that the specified data and end_stream\n   * status will be propagated verbatim to further filters in the filter chain\n   * (while continueReading() propagates connection-level read buffer and end_stream status).\n   *\n   * @param data supplies the read data to be propagated directly to further filters in the filter\n   *             chain.\n   * @param end_stream supplies the end_stream status to be propagated directly to further filters\n   *                   in the filter chain.\n   */\n  virtual void injectReadDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Return the currently selected upstream host, if any. This can be used for communication\n   * between multiple network level filters, for example the TCP proxy filter communicating its\n   * selection to another filter for logging.\n   */\n  virtual Upstream::HostDescriptionConstSharedPtr upstreamHost() PURE;\n\n  /**\n   * Set the currently selected upstream host for the connection.\n   */\n  virtual void upstreamHost(Upstream::HostDescriptionConstSharedPtr host) PURE;\n};\n\n/**\n * A read path binary connection filter.\n */\nclass ReadFilter {\npublic:\n  virtual ~ReadFilter() = default;\n\n  /**\n   * Called when data is read on the connection.\n   * @param data supplies the read data which may be modified.\n   * @param end_stream supplies whether this is the last byte on the connection. This will only\n   *        be set if the connection has half-close semantics enabled.\n   * @return status used by the filter manager to manage further filter iteration.\n   */\n  virtual FilterStatus onData(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Called when a connection is first established. Filters should do one time long term processing\n   * that needs to be done when a connection is established. Filter chain iteration can be stopped\n   * if needed.\n   * @return status used by the filter manager to manage further filter iteration.\n   */\n  virtual FilterStatus onNewConnection() PURE;\n\n  /**\n   * Initializes the read filter callbacks used to interact with the filter manager. It will be\n   * called by the filter manager a single time when the filter is first registered. Thus, any\n   * construction that requires the backing connection should take place in the context of this\n   * function.\n   *\n   * IMPORTANT: No outbound networking or complex processing should be done in this function.\n   *            That should be done in the context of onNewConnection() if needed.\n   *\n   * @param callbacks supplies the callbacks.\n   */\n  virtual void initializeReadFilterCallbacks(ReadFilterCallbacks& callbacks) PURE;\n};\n\nusing ReadFilterSharedPtr = std::shared_ptr<ReadFilter>;\n\n/**\n * A combination read and write filter. This allows a single filter instance to cover\n * both the read and write paths.\n */\nclass Filter : public WriteFilter, public ReadFilter {};\nusing FilterSharedPtr = std::shared_ptr<Filter>;\n\n/**\n * Interface for adding individual network filters to a manager.\n */\nclass FilterManager {\npublic:\n  virtual ~FilterManager() = default;\n\n  /**\n   * Add a write filter to the connection. Filters are invoked in LIFO order (the last added\n   * filter is called first).\n   */\n  virtual void addWriteFilter(WriteFilterSharedPtr filter) PURE;\n\n  /**\n   * Add a combination filter to the connection. Equivalent to calling both addWriteFilter()\n   * and addReadFilter() with the same filter instance.\n   */\n  virtual void addFilter(FilterSharedPtr filter) PURE;\n\n  /**\n   * Add a read filter to the connection. Filters are invoked in FIFO order (the filter added\n   * first is called first).\n   */\n  virtual void addReadFilter(ReadFilterSharedPtr filter) PURE;\n\n  /**\n   * Initialize all of the installed read filters. This effectively calls onNewConnection() on\n   * each of them.\n   * @return true if read filters were initialized successfully, otherwise false.\n   */\n  virtual bool initializeReadFilters() PURE;\n};\n\n/**\n * This function is used to wrap the creation of a network filter chain for new connections as\n * they come in. Filter factories create the lambda at configuration initialization time, and then\n * they are used at runtime.\n * @param filter_manager supplies the filter manager for the connection to install filters\n * to. Typically the function will install a single filter, but it's technically possibly to\n * install more than one if desired.\n */\nusing FilterFactoryCb = std::function<void(FilterManager& filter_manager)>;\n\n/**\n * Callbacks used by individual listener filter instances to communicate with the listener filter\n * manager.\n */\nclass ListenerFilterCallbacks {\npublic:\n  virtual ~ListenerFilterCallbacks() = default;\n\n  /**\n   * @return ConnectionSocket the socket the filter is operating on.\n   */\n  virtual ConnectionSocket& socket() PURE;\n\n  /**\n   * @return the Dispatcher for issuing events.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * If a filter stopped filter iteration by returning FilterStatus::StopIteration,\n   * the filter should call continueFilterChain(true) when complete to continue the filter chain,\n   * or continueFilterChain(false) if the filter execution failed and the connection must be\n   * closed.\n   * @param success boolean telling whether the filter execution was successful or not.\n   */\n  virtual void continueFilterChain(bool success) PURE;\n\n  /**\n   * @param name the namespace used in the metadata in reverse DNS format, for example:\n   * envoy.test.my_filter.\n   * @param value the struct to set on the namespace. A merge will be performed with new values for\n   * the same key overriding existing.\n   */\n  virtual void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) PURE;\n\n  /**\n   * @return const envoy::config::core::v3::Metadata& the dynamic metadata associated with this\n   * connection.\n   */\n  virtual envoy::config::core::v3::Metadata& dynamicMetadata() PURE;\n  virtual const envoy::config::core::v3::Metadata& dynamicMetadata() const PURE;\n};\n\n/**\n *  Interface for a listener filter matching with incoming traffic.\n */\nclass ListenerFilterMatcher {\npublic:\n  virtual ~ListenerFilterMatcher() = default;\n  virtual bool matches(Network::ListenerFilterCallbacks& cb) const PURE;\n};\nusing ListenerFilterMatcherPtr = std::unique_ptr<ListenerFilterMatcher>;\nusing ListenerFilterMatcherSharedPtr = std::shared_ptr<ListenerFilterMatcher>;\n\n/**\n * Listener Filter\n */\nclass ListenerFilter {\npublic:\n  virtual ~ListenerFilter() = default;\n\n  /**\n   * Called when a new connection is accepted, but before a Connection is created.\n   * Filter chain iteration can be stopped if needed.\n   * @param cb the callbacks the filter instance can use to communicate with the filter chain.\n   * @return status used by the filter manager to manage further filter iteration.\n   */\n  virtual FilterStatus onAccept(ListenerFilterCallbacks& cb) PURE;\n};\n\nusing ListenerFilterPtr = std::unique_ptr<ListenerFilter>;\n\n/**\n * Interface for filter callbacks and adding listener filters to a manager.\n */\nclass ListenerFilterManager {\npublic:\n  virtual ~ListenerFilterManager() = default;\n\n  /**\n   * Add a filter to the listener. Filters are invoked in FIFO order (the filter added\n   * first is called first).\n   * @param listener_filter_matcher supplies the matcher to decide when filter is enabled.\n   * @param filter supplies the filter being added.\n   */\n  virtual void addAcceptFilter(const ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n                               ListenerFilterPtr&& filter) PURE;\n};\n\n/**\n * This function is used to wrap the creation of a listener filter chain for new sockets as they are\n * created. Filter factories create the lambda at configuration initialization time, and then they\n * are used at runtime.\n * @param filter_manager supplies the filter manager for the listener to install filters to.\n * Typically the function will install a single filter, but it's technically possibly to install\n * more than one if desired.\n */\nusing ListenerFilterFactoryCb = std::function<void(ListenerFilterManager& filter_manager)>;\n\n/**\n * Interface representing a single filter chain.\n */\nclass FilterChain {\npublic:\n  virtual ~FilterChain() = default;\n\n  /**\n   * @return const TransportSocketFactory& a transport socket factory to be used by the new\n   * connection.\n   */\n  virtual const TransportSocketFactory& transportSocketFactory() const PURE;\n\n  /**\n   * const std::vector<FilterFactoryCb>& a list of filters to be used by the new connection.\n   */\n  virtual const std::vector<FilterFactoryCb>& networkFilterFactories() const PURE;\n};\n\nusing FilterChainSharedPtr = std::shared_ptr<FilterChain>;\n\n/**\n * A filter chain that can be drained.\n */\nclass DrainableFilterChain : public FilterChain {\npublic:\n  virtual void startDraining() PURE;\n};\n\nusing DrainableFilterChainSharedPtr = std::shared_ptr<DrainableFilterChain>;\n\n/**\n * Interface for searching through configured filter chains.\n */\nclass FilterChainManager {\npublic:\n  virtual ~FilterChainManager() = default;\n\n  /**\n   * Find filter chain that's matching metadata from the new connection.\n   * @param socket supplies connection metadata that's going to be used for the filter chain lookup.\n   * @return const FilterChain* filter chain to be used by the new connection,\n   *         nullptr if no matching filter chain was found.\n   */\n  virtual const FilterChain* findFilterChain(const ConnectionSocket& socket) const PURE;\n};\n\n/**\n * Callbacks used by individual UDP listener read filter instances to communicate with the filter\n * manager.\n */\nclass UdpReadFilterCallbacks {\npublic:\n  virtual ~UdpReadFilterCallbacks() = default;\n\n  /**\n   * @return the udp listener that owns this read filter.\n   */\n  virtual UdpListener& udpListener() PURE;\n};\n\n/**\n * UDP Listener Read Filter\n */\nclass UdpListenerReadFilter {\npublic:\n  virtual ~UdpListenerReadFilter() = default;\n\n  /**\n   * Called when a new data packet is received on a UDP listener.\n   * @param data supplies the read data which may be modified.\n   */\n  virtual void onData(UdpRecvData& data) PURE;\n\n  /**\n   * Called when there is an error event in the receive data path.\n   *\n   * @param error_code supplies the received error on the listener.\n   */\n  virtual void onReceiveError(Api::IoError::IoErrorCode error_code) PURE;\n\nprotected:\n  /**\n   * @param callbacks supplies the read filter callbacks used to interact with the filter manager.\n   */\n  UdpListenerReadFilter(UdpReadFilterCallbacks& callbacks) : read_callbacks_(&callbacks) {}\n\n  UdpReadFilterCallbacks* read_callbacks_{};\n};\n\nusing UdpListenerReadFilterPtr = std::unique_ptr<UdpListenerReadFilter>;\n\n/**\n * Interface for adding UDP listener filters to a manager.\n */\nclass UdpListenerFilterManager {\npublic:\n  virtual ~UdpListenerFilterManager() = default;\n\n  /**\n   * Add a read filter to the udp listener. Filters are invoked in FIFO order (the\n   * filter added first is called first).\n   * @param filter supplies the filter being added.\n   */\n  virtual void addReadFilter(UdpListenerReadFilterPtr&& filter) PURE;\n};\n\nusing UdpListenerFilterFactoryCb = std::function<void(\n    UdpListenerFilterManager& udp_listener_filter_manager, UdpReadFilterCallbacks& callbacks)>;\n\n/**\n * Creates a chain of network filters for a new connection.\n */\nclass FilterChainFactory {\npublic:\n  virtual ~FilterChainFactory() = default;\n\n  /**\n   * Called to create the network filter chain.\n   * @param connection supplies the connection to create the chain on.\n   * @param filter_factories supplies a list of filter factories to create the chain from.\n   * @return true if filter chain was created successfully. Otherwise\n   *   false, e.g. filter chain is empty.\n   */\n  virtual bool createNetworkFilterChain(Connection& connection,\n                                        const std::vector<FilterFactoryCb>& filter_factories) PURE;\n\n  /**\n   * Called to create the listener filter chain.\n   * @param listener supplies the listener to create the chain on.\n   * @return true if filter chain was created successfully. Otherwise false.\n   */\n  virtual bool createListenerFilterChain(ListenerFilterManager& listener) PURE;\n\n  /**\n   * Called to create a Udp Listener Filter Chain object\n   *\n   * @param udp_listener supplies the listener to create the chain on.\n   * @param callbacks supplies the callbacks needed to create a filter.\n   */\n  virtual void createUdpListenerFilterChain(UdpListenerFilterManager& udp_listener,\n                                            UdpReadFilterCallbacks& callbacks) PURE;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/hash_policy.h",
    "content": "#pragma once\n\n#include \"envoy/network/address.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Network {\n/**\n * Hash policy for transport layer protocol.\n */\nclass HashPolicy {\npublic:\n  virtual ~HashPolicy() = default;\n\n  /**\n   * @param downstream_address is the address of the connected client.\n   * @param upstream_address is the address of the connected server.\n   * @return absl::optional<uint64_t> an optional hash value to route on. A hash value might not be\n   * returned if for example the downstream address is nullptr.\n   */\n  virtual absl::optional<uint64_t>\n  generateHash(const Network::Address::Instance* downstream_address,\n               const Network::Address::Instance* upstream_address) const PURE;\n};\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/io_handle.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n\n#include \"envoy/api/io_error.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/file_event.h\"\n#include \"envoy/network/address.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Buffer {\nstruct RawSlice;\nclass Instance;\n} // namespace Buffer\n\nnamespace Event {\nclass Dispatcher;\n} // namespace Event\n\nusing RawSliceArrays = absl::FixedArray<absl::FixedArray<Buffer::RawSlice>>;\n\nnamespace Network {\n\n/**\n * IoHandle: an abstract interface for all I/O operations\n */\nclass IoHandle {\npublic:\n  virtual ~IoHandle() = default;\n\n  /**\n   * NOTE: Must NOT be used for new use cases!\n   *\n   * This is most probably not the function you are looking for. IoHandle has wrappers for most of\n   * the POSIX socket api functions so there should be no need to interact with the internal fd by\n   * means of syscalls. Moreover, depending on the IoHandle implementation, the fd might not be an\n   * underlying OS file descriptor. If any api function is missing, a wrapper for it should be added\n   * to the IoHandle interface.\n   *\n   * Return data associated with IoHandle. It is not necessarily a file descriptor.\n   */\n  virtual os_fd_t fdDoNotUse() const PURE;\n\n  /**\n   * Clean up IoHandle resources\n   */\n  virtual Api::IoCallUint64Result close() PURE;\n\n  /**\n   * Return true if close() hasn't been called.\n   */\n  virtual bool isOpen() const PURE;\n\n  /**\n   * Read data into given slices.\n   * @param max_length supplies the maximum length to read.\n   * @param slices points to the output location.\n   * @param num_slice indicates the number of slices |slices| contains.\n   * @return a Api::IoCallUint64Result with err_ = an Api::IoError instance or\n   * err_ = nullptr and rc_ = the bytes read for success.\n   */\n  virtual Api::IoCallUint64Result readv(uint64_t max_length, Buffer::RawSlice* slices,\n                                        uint64_t num_slice) PURE;\n\n  /**\n   * Read from a io handle directly into buffer.\n   * @param buffer supplies the buffer to read into.\n   * @param max_length supplies the maximum length to read.\n   * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes\n   * read if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be used.\n   */\n  virtual Api::IoCallUint64Result read(Buffer::Instance& buffer, uint64_t max_length) PURE;\n\n  /**\n   * Write the data in slices out.\n   * @param slices points to the location of data to be written.\n   * @param num_slice indicates number of slices |slices| contains.\n   * @return a Api::IoCallUint64Result with err_ = an Api::IoError instance or\n   * err_ = nullptr and rc_ = the bytes written for success.\n   */\n  virtual Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) PURE;\n\n  /**\n   * Write the buffer out to a file descriptor.\n   * @param buffer supplies the buffer to write to.\n   * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes\n   * written if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be\n   * used.\n   */\n  virtual Api::IoCallUint64Result write(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Send a message to the address.\n   * @param slices points to the location of data to be sent.\n   * @param num_slice indicates number of slices |slices| contains.\n   * @param self_ip is the source address whose port should be ignored. Nullptr\n   * if caller wants kernel to select source address.\n   * @param peer_address is the destination address.\n   * @return a Api::IoCallUint64Result with err_ = an Api::IoError instance or\n   * err_ = nullptr and rc_ = the bytes written for success.\n   */\n  virtual Api::IoCallUint64Result sendmsg(const Buffer::RawSlice* slices, uint64_t num_slice,\n                                          int flags, const Address::Ip* self_ip,\n                                          const Address::Instance& peer_address) PURE;\n\n  struct RecvMsgPerPacketInfo {\n    // The destination address from transport header.\n    Address::InstanceConstSharedPtr local_address_;\n    // The source address from transport header.\n    Address::InstanceConstSharedPtr peer_address_;\n    // The payload length of this packet.\n    unsigned int msg_len_{0};\n    // The gso_size, if specified in the transport header\n    unsigned int gso_size_{0};\n  };\n\n  /**\n   * The output parameter type for recvmsg and recvmmsg.\n   */\n  struct RecvMsgOutput {\n    /*\n     * @param num_packets_per_call is the max number of packets allowed per\n     * recvmmsg call. For recvmsg call, any value larger than 0 is allowed, but\n     * only one packet will be returned.\n     * @param dropped_packets points to a variable to store how many packets are\n     * dropped so far. If nullptr, recvmsg() won't try to get this information\n     * from transport header.\n     */\n    RecvMsgOutput(size_t num_packets_per_call, uint32_t* dropped_packets)\n        : dropped_packets_(dropped_packets), msg_(num_packets_per_call) {}\n\n    // If not nullptr, its value is the total number of packets dropped. recvmsg() will update it\n    // when more packets are dropped.\n    uint32_t* dropped_packets_;\n\n    // Packet headers for each received packet. It's populated according to packet receive order.\n    // Only the first entry is used to return per packet information by recvmsg.\n    absl::FixedArray<RecvMsgPerPacketInfo> msg_;\n  };\n\n  /**\n   * Receive a message into given slices, output overflow, source/destination\n   * addresses via passed-in parameters upon success.\n   * @param slices points to the location of receiving buffer.\n   * @param num_slice indicates number of slices |slices| contains.\n   * @param self_port the port this handle is assigned to. This is used to populate\n   * local_address because local port can't be retrieved from control message.\n   * @param output modified upon each call to return fields requested in it.\n   * @return a Api::IoCallUint64Result with err_ = an Api::IoError instance or\n   * err_ = nullptr and rc_ = the bytes received for success.\n   */\n  virtual Api::IoCallUint64Result recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice,\n                                          uint32_t self_port, RecvMsgOutput& output) PURE;\n\n  /**\n   * If the platform supports, receive multiple messages into given slices, output overflow,\n   * source/destination addresses per message via passed-in parameters upon success.\n   * @param slices are the receive buffers for the messages. Each message\n   * received are stored in an individual entry of |slices|.\n   * @param self_port is the same as the one in recvmsg().\n   * @param output is modified upon each call and each message received.\n   */\n  virtual Api::IoCallUint64Result recvmmsg(RawSliceArrays& slices, uint32_t self_port,\n                                           RecvMsgOutput& output) PURE;\n\n  /**\n   * Read data into given buffer for connected handles\n   * @param buffer buffer to read the data into\n   * @param length buffer length\n   * @param flags flags to pass to the underlying recv function (see man 2 recv)\n   */\n  virtual Api::IoCallUint64Result recv(void* buffer, size_t length, int flags) PURE;\n\n  /**\n   * return true if the platform supports recvmmsg() and sendmmsg().\n   */\n  virtual bool supportsMmsg() const PURE;\n\n  /**\n   * return true if the platform supports udp_gro\n   */\n  virtual bool supportsUdpGro() const PURE;\n\n  /**\n   * Bind to address. The handle should have been created with a call to socket()\n   * @param address address to bind to.\n   * @param addrlen address length\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call\n   *   is successful, errno_ shouldn't be used.\n   */\n  virtual Api::SysCallIntResult bind(Address::InstanceConstSharedPtr address) PURE;\n\n  /**\n   * Listen on bound handle.\n   * @param backlog maximum number of pending connections for listener\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call\n   *   is successful, errno_ shouldn't be used.\n   */\n  virtual Api::SysCallIntResult listen(int backlog) PURE;\n\n  /**\n   * Accept on listening handle\n   * @param addr remote address to be returned\n   * @param addrlen remote address length\n   * @param flags flags to be applied to accepted session\n   * @return accepted IoHandlePtr\n   */\n  virtual std::unique_ptr<IoHandle> accept(struct sockaddr* addr, socklen_t* addrlen) PURE;\n\n  /**\n   * Connect to address. The handle should have been created with a call to socket()\n   * on this object.\n   * @param address remote address to connect to.\n   * @param addrlen remote address length\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call\n   *   is successful, errno_ shouldn't be used.\n   */\n  virtual Api::SysCallIntResult connect(Address::InstanceConstSharedPtr address) PURE;\n\n  /**\n   * Set option (see man 2 setsockopt)\n   */\n  virtual Api::SysCallIntResult setOption(int level, int optname, const void* optval,\n                                          socklen_t optlen) PURE;\n\n  /**\n   * Get option (see man 2 getsockopt)\n   */\n  virtual Api::SysCallIntResult getOption(int level, int optname, void* optval,\n                                          socklen_t* optlen) PURE;\n\n  /**\n   * Toggle blocking behavior\n   * @param blocking flag to set/unset blocking state\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call\n   * is successful, errno_ shouldn't be used.\n   */\n  virtual Api::SysCallIntResult setBlocking(bool blocking) PURE;\n\n  /**\n   * @return the domain used by underlying socket (see man 2 socket)\n   */\n  virtual absl::optional<int> domain() PURE;\n\n  /**\n   * Get local address (ip:port pair)\n   * @return local address as @ref Address::InstanceConstSharedPtr\n   */\n  virtual Address::InstanceConstSharedPtr localAddress() PURE;\n\n  /**\n   * Get peer's address (ip:port pair)\n   * @return peer's address as @ref Address::InstanceConstSharedPtr\n   */\n  virtual Address::InstanceConstSharedPtr peerAddress() PURE;\n\n  /**\n   * Creates a file event that will signal when the io handle is readable, writable or closed.\n   * @param dispatcher dispatcher to be used to allocate the file event.\n   * @param cb supplies the callback to fire when the handle is ready.\n   * @param trigger specifies whether to edge or level trigger.\n   * @param events supplies a logical OR of @ref Event::FileReadyType events that the file event\n   *               should initially listen on.\n   * @return @ref Event::FileEventPtr\n   */\n  virtual Event::FileEventPtr createFileEvent(Event::Dispatcher& dispatcher, Event::FileReadyCb cb,\n                                              Event::FileTriggerType trigger, uint32_t events) PURE;\n\n  /**\n   * Shut down part of a full-duplex connection (see man 2 shutdown)\n   */\n  virtual Api::SysCallIntResult shutdown(int how) PURE;\n\n  /**\n   *  @return absl::optional<std::chrono::milliseconds> An optional of the most recent round-trip\n   *  time of the connection. If the platform does not support this, then an empty optional is\n   *  returned.\n   */\n  virtual absl::optional<std::chrono::milliseconds> lastRoundTripTime() PURE;\n};\n\nusing IoHandlePtr = std::unique_ptr<IoHandle>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/listen_socket.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n#include <tuple>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/io_handle.h\"\n#include \"envoy/network/socket.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * A socket passed to a connection. For server connections this represents the accepted socket, and\n * for client connections this represents the socket being connected to a remote address.\n *\n * TODO(jrajahalme): Hide internals (e.g., fd) from listener filters by providing callbacks filters\n * may need (set/getsockopt(), peek(), recv(), etc.)\n */\nclass ConnectionSocket : public virtual Socket {\npublic:\n  ~ConnectionSocket() override = default;\n\n  /**\n   * @return the remote address of the socket.\n   */\n  virtual const Address::InstanceConstSharedPtr& remoteAddress() const PURE;\n\n  /**\n   * @return the direct remote address of the socket. This is the address of the directly\n   *         connected peer, and cannot be modified by listener filters.\n   */\n  virtual const Address::InstanceConstSharedPtr& directRemoteAddress() const PURE;\n\n  /**\n   * Restores the local address of the socket. On accepted sockets the local address defaults to the\n   * one at which the connection was received at, which is the same as the listener's address, if\n   * the listener is bound to a specific address. Call this to restore the address to a value\n   * different from the one the socket was initially accepted at. This should only be called when\n   * restoring the original destination address of a connection redirected by iptables REDIRECT. The\n   * caller is responsible for making sure the new address is actually different.\n   *\n   * @param local_address the new local address.\n   */\n  virtual void restoreLocalAddress(const Address::InstanceConstSharedPtr& local_address) PURE;\n\n  /**\n   * Set the remote address of the socket.\n   */\n  virtual void setRemoteAddress(const Address::InstanceConstSharedPtr& remote_address) PURE;\n\n  /**\n   * @return true if the local address has been restored to a value that is different from the\n   *         address the socket was initially accepted at.\n   */\n  virtual bool localAddressRestored() const PURE;\n\n  /**\n   * Set detected transport protocol (e.g. RAW_BUFFER, TLS).\n   */\n  virtual void setDetectedTransportProtocol(absl::string_view protocol) PURE;\n\n  /**\n   * @return detected transport protocol (e.g. RAW_BUFFER, TLS), if any.\n   */\n  virtual absl::string_view detectedTransportProtocol() const PURE;\n\n  /**\n   * Set requested application protocol(s) (e.g. ALPN in TLS).\n   */\n  virtual void\n  setRequestedApplicationProtocols(const std::vector<absl::string_view>& protocol) PURE;\n\n  /**\n   * @return requested application protocol(s) (e.g. ALPN in TLS), if any.\n   */\n  virtual const std::vector<std::string>& requestedApplicationProtocols() const PURE;\n\n  /**\n   * Set requested server name (e.g. SNI in TLS).\n   */\n  virtual void setRequestedServerName(absl::string_view server_name) PURE;\n\n  /**\n   * @return requested server name (e.g. SNI in TLS), if any.\n   */\n  virtual absl::string_view requestedServerName() const PURE;\n\n  /**\n   *  @return absl::optional<std::chrono::milliseconds> An optional of the most recent round-trip\n   *  time of the connection. If the platform does not support this, then an empty optional is\n   *  returned.\n   */\n  virtual absl::optional<std::chrono::milliseconds> lastRoundTripTime() PURE;\n};\n\nusing ConnectionSocketPtr = std::unique_ptr<ConnectionSocket>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/listener.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/api/io_error.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/resource.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/connection_balancer.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/udp_packet_writer_handler.h\"\n#include \"envoy/stats/scope.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass ActiveUdpListenerFactory;\nclass UdpListenerWorkerRouter;\n\nusing UdpListenerWorkerRouterOptRef =\n    absl::optional<std::reference_wrapper<UdpListenerWorkerRouter>>;\n\n/**\n * ListenSocketFactory is a member of ListenConfig to provide listen socket.\n * Listeners created from the same ListenConfig instance have listening sockets\n * provided by the same ListenSocketFactory instance.\n */\nclass ListenSocketFactory {\npublic:\n  virtual ~ListenSocketFactory() = default;\n\n  /**\n   * Called during actual listener creation.\n   * @return the socket to be used for a certain listener, which might be shared\n   * with other listeners of the same config on other worker threads.\n   */\n  virtual SocketSharedPtr getListenSocket() PURE;\n\n  /**\n   * @return the type of the socket getListenSocket() returns.\n   */\n  virtual Socket::Type socketType() const PURE;\n\n  /**\n   * @return the listening address of the socket getListenSocket() returns. Before getListenSocket()\n   * is called, the return value might has 0 as port number if the config doesn't specify it.\n   */\n  virtual const Address::InstanceConstSharedPtr& localAddress() const PURE;\n\n  /**\n   * @return the socket shared by worker threads if any; otherwise return null.\n   */\n  virtual SocketOptRef sharedSocket() const PURE;\n};\n\nusing ListenSocketFactorySharedPtr = std::shared_ptr<ListenSocketFactory>;\n\n/**\n * A configuration for an individual listener.\n */\nclass ListenerConfig {\npublic:\n  virtual ~ListenerConfig() = default;\n\n  /**\n   * @return FilterChainManager& the factory for adding and searching through configured\n   *         filter chains.\n   */\n  virtual FilterChainManager& filterChainManager() PURE;\n\n  /**\n   * @return FilterChainFactory& the factory for setting up the filter chain on a new\n   *         connection.\n   */\n  virtual FilterChainFactory& filterChainFactory() PURE;\n\n  /**\n   * @return ListenSocketFactory& the factory to create listen socket.\n   */\n  virtual ListenSocketFactory& listenSocketFactory() PURE;\n\n  /**\n   * @return bool specifies whether the listener should actually listen on the port.\n   *         A listener that doesn't listen on a port can only receive connections\n   *         redirected from other listeners.\n   */\n  virtual bool bindToPort() PURE;\n\n  /**\n   * @return bool if a connection should be handed off to another Listener after the original\n   *         destination address has been restored. 'true' when 'use_original_dst' flag in listener\n   *         configuration is set, false otherwise. Note that this flag is deprecated and will be\n   *         removed from the v2 API.\n   */\n  virtual bool handOffRestoredDestinationConnections() const PURE;\n\n  /**\n   * @return uint32_t providing a soft limit on size of the listener's new connection read and\n   * write buffers.\n   */\n  virtual uint32_t perConnectionBufferLimitBytes() const PURE;\n\n  /**\n   * @return std::chrono::milliseconds the time to wait for all listener filters to complete\n   *         operation. If the timeout is reached, the accepted socket is closed without a\n   *         connection being created unless continueOnListenerFiltersTimeout() returns true.\n   *         0 specifies a disabled timeout.\n   */\n  virtual std::chrono::milliseconds listenerFiltersTimeout() const PURE;\n\n  /**\n   * @return bool whether the listener should try to create a connection when listener filters\n   *         time out.\n   */\n  virtual bool continueOnListenerFiltersTimeout() const PURE;\n\n  /**\n   * @return Stats::Scope& the stats scope to use for all listener specific stats.\n   */\n  virtual Stats::Scope& listenerScope() PURE;\n\n  /**\n   * @return uint64_t the tag the listener should use for connection handler tracking.\n   */\n  virtual uint64_t listenerTag() const PURE;\n\n  /**\n   * @return const std::string& the listener's name.\n   */\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return factory pointer if listening on UDP socket, otherwise return\n   * nullptr.\n   */\n  virtual ActiveUdpListenerFactory* udpListenerFactory() PURE;\n\n  /**\n   * @return factory if writing on UDP socket, otherwise return\n   * nullopt.\n   */\n  virtual UdpPacketWriterFactoryOptRef udpPacketWriterFactory() PURE;\n\n  /**\n   * @return the ``UdpListenerWorkerRouter`` for this listener. This will\n   * be non-empty iff this is a UDP listener.\n   */\n  virtual UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() PURE;\n\n  /**\n   * @return traffic direction of the listener.\n   */\n  virtual envoy::config::core::v3::TrafficDirection direction() const PURE;\n\n  /**\n   * @return the connection balancer for this listener. All listeners have a connection balancer,\n   *         though the implementation may be a NOP balancer.\n   */\n  virtual ConnectionBalancer& connectionBalancer() PURE;\n\n  /**\n   * Open connection resources for this listener.\n   */\n  virtual ResourceLimit& openConnections() PURE;\n\n  /**\n   * @return std::vector<AccessLog::InstanceSharedPtr> access logs emitted by the listener.\n   */\n  virtual const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const PURE;\n\n  /**\n   * @return pending connection backlog for TCP listeners.\n   */\n  virtual uint32_t tcpBacklogSize() const PURE;\n\n  /**\n   * @return init manager of the listener.\n   */\n  virtual Init::Manager& initManager() PURE;\n};\n\n/**\n * Callbacks invoked by a listener.\n */\nclass TcpListenerCallbacks {\npublic:\n  virtual ~TcpListenerCallbacks() = default;\n\n  /**\n   * Called when a new connection is accepted.\n   * @param socket supplies the socket that is moved into the callee.\n   */\n  virtual void onAccept(ConnectionSocketPtr&& socket) PURE;\n\n  /**\n   * Called when a new connection is rejected.\n   */\n  virtual void onReject() PURE;\n};\n\n/**\n * Utility struct that encapsulates the information from a udp socket's recvmmsg call.\n */\nstruct UdpRecvData {\n  struct LocalPeerAddresses {\n    bool operator==(const LocalPeerAddresses& rhs) const {\n      // TODO(mattklein123): Implement a hash directly on Address that does not use strings.\n      return local_->asStringView() == rhs.local_->asStringView() &&\n             peer_->asStringView() == rhs.peer_->asStringView();\n    }\n\n    template <typename H> friend H AbslHashValue(H h, const LocalPeerAddresses& addresses) {\n      // TODO(mattklein123): Implement a hash directly on Address that does not use strings.\n      return H::combine(std::move(h), addresses.local_->asStringView(),\n                        addresses.peer_->asStringView());\n    }\n\n    Address::InstanceConstSharedPtr local_;\n    Address::InstanceConstSharedPtr peer_;\n  };\n\n  LocalPeerAddresses addresses_;\n  Buffer::InstancePtr buffer_;\n  MonotonicTime receive_time_;\n};\n\n/**\n * Encapsulates the information needed to send a udp packet to a target\n */\nstruct UdpSendData {\n  const Address::Ip* local_ip_;\n  const Address::Instance& peer_address_;\n\n  // The buffer is a reference so that it can be reused by the sender to send different\n  // messages\n  Buffer::Instance& buffer_;\n};\n\n/**\n * UDP listener callbacks.\n */\nclass UdpListenerCallbacks {\npublic:\n  virtual ~UdpListenerCallbacks() = default;\n\n  /**\n   * Called whenever data is received by the underlying udp socket.\n   * TODO(danzh2010): Consider returning a value to indicate if more work is to\n   * be done in the next event loop due to a limit on how much processing is\n   * allowed in each event loop.\n   *\n   * @param data UdpRecvData from the underlying socket.\n   */\n  virtual void onData(UdpRecvData&& data) PURE;\n\n  /**\n   * Called when the underlying socket is ready for read, before onData() is\n   * called. Called only once per event loop, even if followed by multiple\n   * onData() calls.\n   *\n   */\n  virtual void onReadReady() PURE;\n\n  /**\n   * Called when the underlying socket is ready for write.\n   *\n   * @param socket Underlying server socket for the listener.\n   *\n   * TODO(conqerAtapple): Maybe we need a UdpWriter here instead of Socket.\n   */\n  virtual void onWriteReady(const Socket& socket) PURE;\n\n  /**\n   * Called when there is an error event in the receive data path.\n   * The send side error is a return type on the send method.\n   *\n   * @param error_code supplies the received error on the listener.\n   */\n  virtual void onReceiveError(Api::IoError::IoErrorCode error_code) PURE;\n\n  /**\n   * Returns the pointer to the udp_packet_writer associated with the\n   * UdpListenerCallback\n   */\n  virtual UdpPacketWriter& udpPacketWriter() PURE;\n\n  /**\n   * Returns the index of this worker, in the range of [0, concurrency).\n   */\n  virtual uint32_t workerIndex() const PURE;\n\n  /**\n   * Called whenever data is received on the underlying udp socket, on\n   * the destination worker for the datagram according to ``destination()``.\n   */\n  virtual void onDataWorker(Network::UdpRecvData&& data) PURE;\n\n  /**\n   * Posts ``data`` to be delivered on this worker.\n   */\n  virtual void post(Network::UdpRecvData&& data) PURE;\n};\n\nusing UdpListenerCallbacksOptRef = absl::optional<std::reference_wrapper<UdpListenerCallbacks>>;\n\n/**\n * An abstract socket listener. Free the listener to stop listening on the socket.\n */\nclass Listener {\npublic:\n  virtual ~Listener() = default;\n\n  /**\n   * Temporarily disable accepting new connections.\n   */\n  virtual void disable() PURE;\n\n  /**\n   * Enable accepting new connections.\n   */\n  virtual void enable() PURE;\n};\n\nusing ListenerPtr = std::unique_ptr<Listener>;\n\n/**\n * A UDP listener interface.\n */\nclass UdpListener : public virtual Listener {\npublic:\n  ~UdpListener() override = default;\n\n  /**\n   * @return Event::Dispatcher& the dispatcher backing this listener.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * @return the local address of the socket.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& localAddress() const PURE;\n\n  /**\n   * Send data through the underlying udp socket. If the send buffer of the socket FD is full, an\n   * error code is returned.\n   * TODO(sumukhs): We do not currently handle max MTU size of the datagram. Determine if we could\n   * expose the path MTU information to the caller.\n   *\n   * @param data Supplies the data to send to a target using udp.\n   * @return the error code of the underlying send api. On successfully sending 'n' bytes, the\n   * underlying buffers in the data  are drained by 'n' bytes. The remaining can be retried by the\n   * sender.\n   */\n  virtual Api::IoCallUint64Result send(const UdpSendData& data) PURE;\n\n  /**\n   * Flushes out remaining buffered data since last call of send().\n   * This is a no-op if the implementation doesn't buffer data while sending.\n   *\n   * @return the error code of the underlying flush api.\n   */\n  virtual Api::IoCallUint64Result flush() PURE;\n\n  /**\n   * Make this listener readable at the beginning of the next event loop.\n   *\n   * @note: it may become readable during the current loop if feature\n   * ``envoy.reloadable_features.activate_fds_next_event_loop`` is disabled.\n   */\n  virtual void activateRead() PURE;\n};\n\nusing UdpListenerPtr = std::unique_ptr<UdpListener>;\n\n/**\n * Handles delivering datagrams to the correct worker.\n */\nclass UdpListenerWorkerRouter {\npublic:\n  virtual ~UdpListenerWorkerRouter() = default;\n\n  /**\n   * Registers a worker's callbacks for this listener. This worker must accept\n   * packets until it calls ``unregisterWorker``.\n   */\n  virtual void registerWorkerForListener(UdpListenerCallbacks& listener) PURE;\n\n  /**\n   * Unregisters a worker's callbacks for this listener.\n   */\n  virtual void unregisterWorkerForListener(UdpListenerCallbacks& listener) PURE;\n\n  /**\n   * Deliver ``data`` to the correct worker by calling ``onDataWorker()``\n   * or ``post()`` on one of the registered workers.\n   */\n  virtual void deliver(uint32_t dest_worker_index, UdpRecvData&& data) PURE;\n};\n\nusing UdpListenerWorkerRouterPtr = std::unique_ptr<UdpListenerWorkerRouter>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/post_io_action.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Action that should occur on a connection after I/O.\n */\nenum class PostIoAction {\n  // Close the connection.\n  Close,\n  // Keep the connection open.\n  KeepOpen\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/proxy_protocol.h",
    "content": "#pragma once\n\n#include \"envoy/network/address.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nstruct ProxyProtocolData {\n  const Network::Address::InstanceConstSharedPtr src_addr_;\n  const Network::Address::InstanceConstSharedPtr dst_addr_;\n};\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/network/resolver.h",
    "content": "#pragma once\n\n#include <sys/types.h>\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/network/address.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n\n/**\n * Interface for all network address resolvers.\n */\nclass Resolver : public Config::UntypedFactory {\npublic:\n  ~Resolver() override = default;\n\n  /**\n   * Resolve a custom address string and port to an Address::Instance.\n   * @param socket_address supplies the socket address to resolve.\n   * @return InstanceConstSharedPtr appropriate Address::Instance.\n   */\n  virtual InstanceConstSharedPtr\n  resolve(const envoy::config::core::v3::SocketAddress& socket_address) PURE;\n\n  std::string category() const override { return \"envoy.resolvers\"; }\n};\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/socket.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/io_handle.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n// SocketOptionName is an optional value that captures the setsockopt(2)\n// arguments. The idea here is that if a socket option is not supported\n// on a platform, we can make this the empty value, which allows us to\n// avoid #ifdef proliferation.\nstruct SocketOptionName {\n  SocketOptionName() = default;\n  SocketOptionName(const SocketOptionName&) = default;\n  SocketOptionName(int level, int option, const std::string& name)\n      : value_(std::make_tuple(level, option, name)) {}\n\n  int level() const { return std::get<0>(value_.value()); }\n  int option() const { return std::get<1>(value_.value()); }\n  const std::string& name() const { return std::get<2>(value_.value()); }\n\n  bool hasValue() const { return value_.has_value(); }\n  bool operator==(const SocketOptionName& rhs) const { return value_ == rhs.value_; }\n\nprivate:\n  absl::optional<std::tuple<int, int, std::string>> value_;\n};\n\n// ENVOY_MAKE_SOCKET_OPTION_NAME is a helper macro to generate a\n// SocketOptionName with a descriptive string name.\n#define ENVOY_MAKE_SOCKET_OPTION_NAME(level, option)                                               \\\n  Network::SocketOptionName(level, option, #level \"/\" #option)\n\n/**\n * Base class for Sockets\n */\nclass Socket {\npublic:\n  virtual ~Socket() = default;\n\n  /**\n   * Type of sockets supported. See man 2 socket for more details\n   */\n  enum class Type { Stream, Datagram };\n\n  /**\n   * @return the local address of the socket.\n   */\n  virtual const Address::InstanceConstSharedPtr& localAddress() const PURE;\n\n  /**\n   * Set the local address of the socket. On accepted sockets the local address defaults to the\n   * one at which the connection was received at, which is the same as the listener's address, if\n   * the listener is bound to a specific address.\n   *\n   * @param local_address the new local address.\n   */\n  virtual void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) PURE;\n\n  /**\n   * @return IoHandle for the underlying connection\n   */\n  virtual IoHandle& ioHandle() PURE;\n\n  /**\n   * @return const IoHandle for the underlying connection\n   */\n  virtual const IoHandle& ioHandle() const PURE;\n\n  /**\n   * @return the type (stream or datagram) of the socket.\n   */\n  virtual Socket::Type socketType() const PURE;\n\n  /**\n   * @return the type (IP or pipe) of addresses used by the socket (subset of socket domain)\n   */\n  virtual Address::Type addressType() const PURE;\n\n  /**\n   * @return the IP version used by the socket if address type is IP, absl::nullopt otherwise\n   */\n  virtual absl::optional<Address::IpVersion> ipVersion() const PURE;\n\n  /**\n   * Close the underlying socket.\n   */\n  virtual void close() PURE;\n\n  /**\n   * Return true if close() hasn't been called.\n   */\n  virtual bool isOpen() const PURE;\n\n  /**\n   * Bind a socket to this address. The socket should have been created with a call to socket()\n   * @param address address to bind the socket to.\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call\n   *   is successful, errno_ shouldn't be used.\n   */\n  virtual Api::SysCallIntResult bind(const Address::InstanceConstSharedPtr address) PURE;\n\n  /**\n   * Listen on bound socket.\n   * @param backlog maximum number of pending connections for listener\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call\n   *   is successful, errno_ shouldn't be used.\n   */\n  virtual Api::SysCallIntResult listen(int backlog) PURE;\n\n  /**\n   * Connect a socket to this address. The socket should have been created with a call to socket()\n   * on this object.\n   * @param address remote address to connect to.\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call\n   *   is successful, errno_ shouldn't be used.\n   */\n  virtual Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr address) PURE;\n\n  /**\n   * Propagates option to underlying socket (@see man 2 setsockopt)\n   */\n  virtual Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval,\n                                                socklen_t optlen) PURE;\n\n  /**\n   * Retrieves option from underlying socket (@see man 2 getsockopt)\n   */\n  virtual Api::SysCallIntResult getSocketOption(int level, int optname, void* optval,\n                                                socklen_t* optlen) const PURE;\n\n  /**\n   * Toggle socket blocking state\n   */\n  virtual Api::SysCallIntResult setBlockingForTest(bool blocking) PURE;\n\n  /**\n   * Visitor class for setting socket options.\n   */\n  class Option {\n  public:\n    virtual ~Option() = default;\n\n    /**\n     * @param socket the socket on which to apply options.\n     * @param state the current state of the socket. Significant for options that can only be\n     *        set for some particular state of the socket.\n     * @return true if succeeded, false otherwise.\n     */\n    virtual bool setOption(Socket& socket,\n                           envoy::config::core::v3::SocketOption::SocketState state) const PURE;\n\n    /**\n     * @param vector of bytes to which the option should append hash key data that will be used\n     *        to separate connections based on the option. Any data already in the key vector must\n     *        not be modified.\n     */\n    virtual void hashKey(std::vector<uint8_t>& key) const PURE;\n\n    /**\n     * Contains details about what this option applies to a socket.\n     */\n    struct Details {\n      SocketOptionName name_;\n      std::string value_; ///< Binary string representation of an option's value.\n\n      bool operator==(const Details& other) const {\n        return name_ == other.name_ && value_ == other.value_;\n      }\n    };\n\n    /**\n     * @param socket The socket for which we want to know the options that would be applied.\n     * @param state The state at which we would apply the options.\n     * @return What we would apply to the socket at the provided state. Empty if we'd apply nothing.\n     */\n    virtual absl::optional<Details>\n    getOptionDetails(const Socket& socket,\n                     envoy::config::core::v3::SocketOption::SocketState state) const PURE;\n  };\n\n  using OptionConstSharedPtr = std::shared_ptr<const Option>;\n  using Options = std::vector<OptionConstSharedPtr>;\n  using OptionsSharedPtr = std::shared_ptr<Options>;\n\n  static OptionsSharedPtr& appendOptions(OptionsSharedPtr& to, const OptionsSharedPtr& from) {\n    to->insert(to->end(), from->begin(), from->end());\n    return to;\n  }\n\n  static bool applyOptions(const OptionsSharedPtr& options, Socket& socket,\n                           envoy::config::core::v3::SocketOption::SocketState state) {\n    if (options == nullptr) {\n      return true;\n    }\n    for (const auto& option : *options) {\n      if (!option->setOption(socket, state)) {\n        return false;\n      }\n    }\n    return true;\n  }\n\n  /**\n   * Add a socket option visitor for later retrieval with options().\n   */\n  virtual void addOption(const OptionConstSharedPtr&) PURE;\n\n  /**\n   * Add socket option visitors for later retrieval with options().\n   */\n  virtual void addOptions(const OptionsSharedPtr&) PURE;\n\n  /**\n   * @return the socket options stored earlier with addOption() and addOptions() calls, if any.\n   */\n  virtual const OptionsSharedPtr& options() const PURE;\n};\n\nusing SocketPtr = std::unique_ptr<Socket>;\nusing SocketSharedPtr = std::shared_ptr<Socket>;\nusing SocketOptRef = absl::optional<std::reference_wrapper<Socket>>;\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/network/socket_interface.h",
    "content": "#pragma once\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/network/socket.h\"\n\nnamespace Envoy {\nnamespace Network {\nclass SocketInterface {\npublic:\n  virtual ~SocketInterface() = default;\n\n  /**\n   * Low level api to create a socket in the underlying host stack. Does not create a\n   * @ref Network::SocketImpl\n   * @param type type of socket requested\n   * @param addr_type type of address used with the socket\n   * @param version IP version if address type is IP\n   * @param socket_v6only if the socket is ipv6 version only\n   * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor\n   */\n  virtual IoHandlePtr socket(Socket::Type type, Address::Type addr_type, Address::IpVersion version,\n                             bool socket_v6only) const PURE;\n\n  /**\n   * Low level api to create a socket in the underlying host stack. Does not create an\n   * @ref Network::SocketImpl\n   * @param socket_type type of socket requested\n   * @param addr address that is gleaned for address type and version if needed\n   * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor\n   */\n  virtual IoHandlePtr socket(Socket::Type socket_type,\n                             const Address::InstanceConstSharedPtr addr) const PURE;\n\n  /**\n   * Wrap socket file descriptor in IoHandle\n   * @param fd socket file descriptor to be wrapped\n   * @return @ref Network::IoHandlePtr that wraps the socket file descriptor\n   */\n  virtual IoHandlePtr socket(os_fd_t fd) PURE;\n\n  /**\n   * Returns true if the given family is supported on this machine.\n   * @param domain the IP family.\n   */\n  virtual bool ipFamilySupported(int domain) PURE;\n};\n\nusing SocketInterfacePtr = std::unique_ptr<SocketInterface>;\n\n/**\n * Create IoHandle for given address.\n * @param type type of socket to be requested\n * @param addr address that is gleaned for address type, version and socket interface name\n * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor\n */\nstatic inline IoHandlePtr ioHandleForAddr(Socket::Type type,\n                                          const Address::InstanceConstSharedPtr addr) {\n  return addr->socketInterface().socket(type, addr);\n}\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/network/transport_socket.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/network/io_handle.h\"\n#include \"envoy/network/post_io_action.h\"\n#include \"envoy/network/proxy_protocol.h\"\n#include \"envoy/ssl/connection.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass Connection;\nenum class ConnectionEvent;\n\n/**\n * Result of each I/O event.\n */\nstruct IoResult {\n  PostIoAction action_;\n\n  /**\n   * Number of bytes processed by the I/O event.\n   */\n  uint64_t bytes_processed_;\n\n  /**\n   * True if an end-of-stream was read from a connection. This\n   * can only be true for read operations.\n   */\n  bool end_stream_read_;\n};\n\n/**\n * Callbacks used by transport socket instances to communicate with connection.\n */\nclass TransportSocketCallbacks {\npublic:\n  virtual ~TransportSocketCallbacks() = default;\n\n  /**\n   * @return reference to the IoHandle associated with the connection.\n   */\n  virtual IoHandle& ioHandle() PURE;\n\n  /**\n   * @return const reference to the IoHandle associated with the connection.\n   */\n  virtual const IoHandle& ioHandle() const PURE;\n\n  /**\n   * @return Network::Connection& the connection interface.\n   */\n  virtual Network::Connection& connection() PURE;\n\n  /**\n   * @return bool whether the read buffer should be drained. This is used to enforce yielding for\n   *         configured read limits.\n   */\n  virtual bool shouldDrainReadBuffer() PURE;\n\n  /**\n   * Mark read buffer ready to read in the event loop. This is used when yielding following\n   * shouldDrainReadBuffer().\n   */\n  virtual void setReadBufferReady() PURE;\n\n  /**\n   * Raise a connection event to the connection. This can be used by a secure socket (e.g. TLS)\n   * to raise a connected event when handshake is done.\n   * @param event supplies the connection event\n   */\n  virtual void raiseEvent(ConnectionEvent event) PURE;\n\n  /**\n   * If the callbacks' write buffer is not empty, try to drain the buffer.\n   * As of 2/20, used by Google.\n   */\n  virtual void flushWriteBuffer() PURE;\n};\n\n/**\n * A transport socket that does actual read / write. It can also do some transformations on\n * the data (e.g. TLS).\n */\nclass TransportSocket {\npublic:\n  virtual ~TransportSocket() = default;\n\n  /**\n   * Called by connection once to initialize the transport socket callbacks that the transport\n   * socket should use.\n   * @param callbacks supplies the callbacks instance.\n   */\n  virtual void setTransportSocketCallbacks(TransportSocketCallbacks& callbacks) PURE;\n\n  /**\n   * @return std::string the protocol to use as selected by network level negotiation. (E.g., ALPN).\n   *         If network level negotiation is not supported by the connection or no protocol\n   *         has been negotiated the empty string is returned.\n   */\n  virtual std::string protocol() const PURE;\n\n  /**\n   * @return std::string the last failure reason occurred on the transport socket. If no failure\n   *         has been occurred the empty string is returned.\n   */\n  virtual absl::string_view failureReason() const PURE;\n\n  /**\n   * @return bool whether the socket can be flushed and closed.\n   */\n  virtual bool canFlushClose() PURE;\n\n  /**\n   * Closes the transport socket.\n   * @param event supplies the connection event that is closing the socket.\n   */\n  virtual void closeSocket(Network::ConnectionEvent event) PURE;\n\n  /**\n   * @param buffer supplies the buffer to read to.\n   * @return IoResult the result of the read action.\n   */\n  virtual IoResult doRead(Buffer::Instance& buffer) PURE;\n\n  /**\n   * @param buffer supplies the buffer to write from\n   * @param end_stream supplies whether this is the end of the stream. If true and all\n   *        data in buffer is written, the connection will be half-closed.\n   * @return IoResult the result of the write action.\n   */\n  virtual IoResult doWrite(Buffer::Instance& buffer, bool end_stream) PURE;\n\n  /**\n   * Called when underlying transport is established.\n   */\n  virtual void onConnected() PURE;\n\n  /**\n   * @return the const SSL connection data if this is an SSL connection, or nullptr if it is not.\n   */\n  virtual Ssl::ConnectionInfoConstSharedPtr ssl() const PURE;\n};\n\nusing TransportSocketPtr = std::unique_ptr<TransportSocket>;\n\n/**\n * Options for creating transport sockets.\n */\nclass TransportSocketOptions {\npublic:\n  virtual ~TransportSocketOptions() = default;\n\n  /**\n   * @return the const optional server name to set in the transport socket, for example SNI for\n   *         SSL, regardless of the upstream cluster configuration. Filters that influence\n   *         upstream connection selection, such as tcp_proxy, should take this option into account\n   *         and should pass it through to the connection pool to ensure the correct endpoints are\n   *         selected and the upstream connection is set up accordingly.\n   */\n  virtual const absl::optional<std::string>& serverNameOverride() const PURE;\n\n  /**\n   * @return the optional overridden SAN names to verify, if the transport socket supports SAN\n   *         verification.\n   */\n  virtual const std::vector<std::string>& verifySubjectAltNameListOverride() const PURE;\n\n  /**\n   * The application protocols to use when negotiating an upstream connection. When an application\n   * protocol override is provided, it will *always* be used.\n   * @return the optional overridden application protocols.\n   */\n  virtual const std::vector<std::string>& applicationProtocolListOverride() const PURE;\n\n  /**\n   * The application protocol to use when negotiating an upstream connection and no other\n   * application protocol has been configured. Both\n   * TransportSocketOptions::applicationProtocolListOverride and application protocols configured\n   * in the CommonTlsContext on the Cluster will take precedence.\n   *\n   * Note that this option is intended for intermediate code (e.g. the HTTP connection pools) to\n   * specify a default ALPN when no specific values are specified elsewhere. As such, providing a\n   * value here might not make sense prior to load balancing.\n   * @return the optional fallback for application protocols, for when they are not specified in the\n   *         TLS configuration.\n   */\n  virtual const absl::optional<std::string>& applicationProtocolFallback() const PURE;\n\n  /**\n   * @return optional PROXY protocol address information.\n   */\n  virtual absl::optional<Network::ProxyProtocolData> proxyProtocolOptions() const PURE;\n\n  /**\n   * @param vector of bytes to which the option should append hash key data that will be used\n   *        to separate connections based on the option. Any data already in the key vector must\n   *        not be modified.\n   */\n  virtual void hashKey(std::vector<uint8_t>& key) const PURE;\n};\n\n// TODO(mattklein123): Rename to TransportSocketOptionsConstSharedPtr in a dedicated follow up.\nusing TransportSocketOptionsSharedPtr = std::shared_ptr<const TransportSocketOptions>;\n\n/**\n * A factory for creating transport socket. It will be associated to filter chains and clusters.\n */\nclass TransportSocketFactory {\npublic:\n  virtual ~TransportSocketFactory() = default;\n\n  /**\n   * @return bool whether the transport socket implements secure transport.\n   */\n  virtual bool implementsSecureTransport() const PURE;\n\n  /**\n   * @param options for creating the transport socket\n   * @return Network::TransportSocketPtr a transport socket to be passed to connection.\n   */\n  virtual TransportSocketPtr\n  createTransportSocket(TransportSocketOptionsSharedPtr options) const PURE;\n};\n\nusing TransportSocketFactoryPtr = std::unique_ptr<TransportSocketFactory>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/udp_packet_writer_config.h",
    "content": "#pragma once\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/network/udp_packet_writer_handler.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass UdpPacketWriterConfigFactory : public Config::TypedFactory {\npublic:\n  ~UdpPacketWriterConfigFactory() override = default;\n\n  /**\n   * Create an UdpPacketWriterFactory object according to given message.\n   * @param message specifies Udp Packet Writer options in a protobuf.\n   */\n  virtual Network::UdpPacketWriterFactoryPtr\n  createUdpPacketWriterFactory(const Protobuf::Message& message) PURE;\n\n  std::string category() const override { return \"envoy.udp_packet_writers\"; }\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/network/udp_packet_writer_handler.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/api/io_error.h\"\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/socket.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Max v6 packet size, excluding IP and UDP headers.\n */\nconstexpr uint64_t UdpMaxOutgoingPacketSize = 1452;\n\n/**\n * UdpPacketWriterBuffer bundles a buffer and a function that\n * releases it.\n */\nstruct UdpPacketWriterBuffer {\n  UdpPacketWriterBuffer() = default;\n  UdpPacketWriterBuffer(uint8_t* buffer, size_t length,\n                        std::function<void(const char*)> release_buffer)\n      : buffer_(buffer), length_(length), release_buffer_(std::move(release_buffer)) {}\n\n  uint8_t* buffer_ = nullptr;\n  size_t length_ = 0;\n  std::function<void(const char*)> release_buffer_;\n};\n\nclass UdpPacketWriter {\npublic:\n  virtual ~UdpPacketWriter() = default;\n\n  /**\n   * @brief Sends a packet via given UDP socket with specific source address.\n   *\n   * @param buffer points to the buffer containing the packet\n   * @param local_ip is the source address to be used to send. If it is null,\n   * picks up the default network interface ip address.\n   * @param peer_address is the destination address to send to.\n   * @return result with number of bytes written, and write status\n   */\n  virtual Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer,\n                                              const Address::Ip* local_ip,\n                                              const Address::Instance& peer_address) PURE;\n\n  /**\n   * @returns true if the network socket is not writable.\n   */\n  virtual bool isWriteBlocked() const PURE;\n\n  /**\n   * @brief mark the socket as writable when the socket is unblocked.\n   */\n  virtual void setWritable() PURE;\n\n  /**\n   * @brief Get the maximum size of the packet which can be written using this\n   * writer for the supplied peer address.\n   *\n   * @param peer_address  is the destination address to send to.\n   * @return the max packet size\n   */\n  virtual uint64_t getMaxPacketSize(const Address::Instance& peer_address) const PURE;\n\n  /**\n   * @return true if Batch Mode\n   * @return false if PassThroughMode\n   */\n  virtual bool isBatchMode() const PURE;\n\n  /**\n   * @brief Get pointer to the next write location in internal buffer,\n   * it should be called iff the caller does not call writePacket\n   * for the returned buffer. The caller is expected to call writePacket\n   * with the buffer returned from this function to save a memcpy.\n   *\n   * @param local_ip is the source address to be used to send.\n   * @param peer_address is the destination address to send to.\n   * @return { char* to the next write location,\n   *           func to release buffer }\n   */\n  virtual UdpPacketWriterBuffer getNextWriteLocation(const Address::Ip* local_ip,\n                                                     const Address::Instance& peer_address) PURE;\n\n  /**\n   * @brief Batch Mode: Try to send all buffered packets\n   *        PassThrough Mode: NULL operation\n   *\n   * @return Api::IoCallUint64Result\n   */\n  virtual Api::IoCallUint64Result flush() PURE;\n};\n\nusing UdpPacketWriterPtr = std::unique_ptr<UdpPacketWriter>;\n\nclass UdpPacketWriterFactory {\npublic:\n  virtual ~UdpPacketWriterFactory() = default;\n\n  /**\n   * Creates an UdpPacketWriter object for the given Udp Socket\n   * @param socket UDP socket used to send packets.\n   * @return the UdpPacketWriter created.\n   */\n  virtual UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle,\n                                                   Stats::Scope& scope) PURE;\n};\n\nusing UdpPacketWriterFactoryPtr = std::unique_ptr<UdpPacketWriterFactory>;\nusing UdpPacketWriterFactoryOptRef = absl::optional<std::reference_wrapper<UdpPacketWriterFactory>>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/protobuf/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"message_validator_interface\",\n    hdrs = [\"message_validator.h\"],\n    deps = [\n        \"//source/common/protobuf\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/protobuf/message_validator.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace ProtobufMessage {\n\n/**\n * Exception class for reporting validation errors due to the presence of unknown\n * fields in a protobuf.\n */\nclass UnknownProtoFieldException : public EnvoyException {\npublic:\n  UnknownProtoFieldException(const std::string& message) : EnvoyException(message) {}\n};\n\n/**\n * Exception class for reporting validation errors due to the presence of deprecated\n * fields in a protobuf.\n */\nclass DeprecatedProtoFieldException : public EnvoyException {\npublic:\n  DeprecatedProtoFieldException(const std::string& message) : EnvoyException(message) {}\n};\n\n/**\n * Visitor interface for a Protobuf::Message. The methods of ValidationVisitor are invoked to\n * perform validation based on events encountered during or after the parsing of proto binary\n * or JSON/YAML.\n */\nclass ValidationVisitor {\npublic:\n  virtual ~ValidationVisitor() = default;\n\n  /**\n   * Invoked when an unknown field is encountered.\n   * @param description human readable description of the field.\n   */\n  virtual void onUnknownField(absl::string_view description) PURE;\n\n  /**\n   * If true, skip this validation visitor in the interest of speed when\n   * possible.\n   **/\n  virtual bool skipValidation() PURE;\n\n  /**\n   * Invoked when deprecated field is encountered.\n   * @param description human readable description of the field.\n   * @param soft_deprecation is set to true, visitor would log a warning message, otherwise would\n   * throw an exception.\n   */\n  virtual void onDeprecatedField(absl::string_view description, bool soft_deprecation) PURE;\n};\n\nclass ValidationContext {\npublic:\n  virtual ~ValidationContext() = default;\n\n  /**\n   * @return ValidationVisitor& the validation visitor for static configuration.\n   */\n  virtual ValidationVisitor& staticValidationVisitor() PURE;\n\n  /**\n   * @return ValidationVisitor& the validation visitor for dynamic configuration.\n   */\n  virtual ValidationVisitor& dynamicValidationVisitor() PURE;\n};\n\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"ratelimit_interface\",\n    hdrs = [\"ratelimit.h\"],\n    deps = [\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/ratelimit/ratelimit.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/type/v3/ratelimit_unit.pb.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace RateLimit {\n\n/**\n * An optional dynamic override for the rate limit. See ratelimit.proto\n */\nstruct RateLimitOverride {\n  uint32_t requests_per_unit_;\n  envoy::type::v3::RateLimitUnit unit_;\n};\n\n/**\n * A single rate limit request descriptor entry. See ratelimit.proto.\n */\nstruct DescriptorEntry {\n  std::string key_;\n  std::string value_;\n};\n\n/**\n * A single rate limit request descriptor. See ratelimit.proto.\n */\nstruct Descriptor {\n  std::vector<DescriptorEntry> entries_;\n  absl::optional<RateLimitOverride> limit_ = absl::nullopt;\n};\n\n} // namespace RateLimit\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/registry/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"registry\",\n    hdrs = [\"registry.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:api_type_oracle_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/registry/registry.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <functional>\n#include <map>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/api_type_oracle.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/common/utility.h\"\n\n#include \"absl/base/attributes.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Registry {\n\ntemplate <class Base> class FactoryRegistry;\ntemplate <class T, class Base> class RegisterFactory;\n\n/**\n * FactoryRegistryProxy is a proxy object that provides access to the\n * static methods of a strongly-typed factory registry.\n */\nclass FactoryRegistryProxy {\npublic:\n  virtual ~FactoryRegistryProxy() = default;\n  virtual std::vector<absl::string_view> registeredNames() const PURE;\n  // Return all registered factory names, including disabled factories.\n  virtual std::vector<absl::string_view> allRegisteredNames() const PURE;\n  virtual absl::optional<envoy::config::core::v3::BuildVersion>\n  getFactoryVersion(absl::string_view name) const PURE;\n  virtual bool disableFactory(absl::string_view) PURE;\n  virtual bool isFactoryDisabled(absl::string_view) const PURE;\n};\n\ntemplate <class Base> class FactoryRegistryProxyImpl : public FactoryRegistryProxy {\npublic:\n  using FactoryRegistry = Envoy::Registry::FactoryRegistry<Base>;\n\n  std::vector<absl::string_view> registeredNames() const override {\n    return FactoryRegistry::registeredNames();\n  }\n\n  std::vector<absl::string_view> allRegisteredNames() const override {\n    return FactoryRegistry::registeredNames(true);\n  }\n\n  absl::optional<envoy::config::core::v3::BuildVersion>\n  getFactoryVersion(absl::string_view name) const override {\n    return FactoryRegistry::getFactoryVersion(name);\n  }\n\n  bool disableFactory(absl::string_view name) override {\n    return FactoryRegistry::disableFactory(name);\n  }\n\n  bool isFactoryDisabled(absl::string_view name) const override {\n    return FactoryRegistry::isFactoryDisabled(name);\n  }\n};\n\n/**\n * BaseFactoryCategoryRegistry holds the static factory map for\n * FactoryCategoryRegistry, ensuring that friends of that class\n * cannot get non-const access to it.\n */\nclass BaseFactoryCategoryRegistry {\nprotected:\n  using MapType = absl::flat_hash_map<std::string, FactoryRegistryProxy*>;\n\n  static MapType& factories() {\n    static auto* factories = new MapType();\n    return *factories;\n  }\n};\n\n/**\n * FactoryCategoryRegistry registers factory registries by their\n * declared category. The category is exposed by a static category()\n * method on the factory base type.\n *\n * Only RegisterFactory instances are able to register factory registries.\n */\nclass FactoryCategoryRegistry : public BaseFactoryCategoryRegistry {\npublic:\n  /**\n   * @return a read-only reference to the map of registered factory\n   * registries.\n   */\n  static const MapType& registeredFactories() { return factories(); }\n\n  /**\n   * @return whether the given category name is already registered.\n   */\n  static bool isRegistered(absl::string_view category) {\n    return factories().find(category) != factories().end();\n  }\n\n  static bool disableFactory(absl::string_view category, absl::string_view name) {\n    auto registry = factories().find(category);\n\n    if (registry != factories().end()) {\n      return registry->second->disableFactory(name);\n    }\n\n    return false;\n  }\n\nprivate:\n  // Allow RegisterFactory and the test helper InjectFactoryCategory to register a category, but\n  // no-one else. This enforces correct use of the registration machinery.\n  template <class T, class Base> friend class RegisterFactory;\n  template <class Base> friend class InjectFactoryCategory;\n\n  static void registerCategory(const std::string& category, FactoryRegistryProxy* factory_names) {\n    auto result = factories().emplace(std::make_pair(category, factory_names));\n    RELEASE_ASSERT(result.second == true,\n                   fmt::format(\"Double registration for category: '{}'\", category));\n  }\n\n  static void deregisterCategoryForTest(const std::string& category) {\n    factories().erase(category);\n    RELEASE_ASSERT(factories().find(category) == factories().end(),\n                   fmt::format(\"Deregistration for category '{}' failed\", category));\n  }\n};\n\n// Forward declaration of test class for friend declaration below.\ntemplate <typename T> class InjectFactory;\n\n/**\n * General registry for implementation factories. The registry is templated by the Base class that a\n * set of factories conforms to.\n *\n * Classes are found by name, so a single name cannot be registered twice for the same Base class.\n * Factories are registered by reference and this reference is expected to be valid through the life\n * of the program. Factories cannot be deregistered.\n * Factories should generally be registered by statically instantiating the RegisterFactory class.\n *\n * Note: This class is not thread safe, so registration should only occur in a single threaded\n * environment, which is guaranteed by the static instantiation mentioned above.\n *\n * Example lookup: BaseFactoryType *factory =\n * FactoryRegistry<BaseFactoryType>::getFactory(\"example_factory_name\");\n */\ntemplate <class Base> class FactoryRegistry : public Logger::Loggable<Logger::Id::config> {\npublic:\n  /**\n   * Return a sorted vector of registered factory names.\n   */\n  static std::vector<absl::string_view> registeredNames(bool include_disabled = false) {\n    std::vector<absl::string_view> ret;\n\n    ret.reserve(factories().size());\n\n    for (const auto& [factory_name, factory] : factories()) {\n      if (factory || include_disabled) {\n        ret.push_back(factory_name);\n      }\n    }\n\n    std::sort(ret.begin(), ret.end());\n\n    return ret;\n  }\n\n  /**\n   * Gets the current map of factory implementations.\n   */\n  static absl::flat_hash_map<std::string, Base*>& factories() {\n    static auto* factories = new absl::flat_hash_map<std::string, Base*>;\n    return *factories;\n  }\n\n  /**\n   * Gets the current map of vendor specific factory versions.\n   */\n  static absl::flat_hash_map<std::string, envoy::config::core::v3::BuildVersion>&\n  versionedFactories() {\n    using VersionedFactoryMap =\n        absl::flat_hash_map<std::string, envoy::config::core::v3::BuildVersion>;\n    MUTABLE_CONSTRUCT_ON_FIRST_USE(VersionedFactoryMap);\n  }\n\n  static absl::flat_hash_map<std::string, std::string>& deprecatedFactoryNames() {\n    static auto* deprecated_factory_names = new absl::flat_hash_map<std::string, std::string>;\n    return *deprecated_factory_names;\n  }\n\n  /**\n   * Lazily constructs a mapping from the configuration message type to a factory,\n   * including the deprecated configuration message types.\n   * Must be invoked after factory registration is completed.\n   */\n  static absl::flat_hash_map<std::string, Base*>& factoriesByType() {\n    static absl::flat_hash_map<std::string, Base*>* factories_by_type =\n        buildFactoriesByType().release();\n\n    return *factories_by_type;\n  }\n\n  /**\n   * instead_value are used when passed name was deprecated.\n   */\n  static void registerFactory(Base& factory, absl::string_view name,\n                              absl::string_view instead_value = \"\") {\n    auto result = factories().emplace(std::make_pair(name, &factory));\n    if (!result.second) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"Double registration for name: '{}'\", factory.name()));\n    }\n\n    if (!instead_value.empty()) {\n      deprecatedFactoryNames().emplace(std::make_pair(name, instead_value));\n    }\n  }\n\n  /**\n   * version is used for registering vendor specific factories that are versioned\n   * independently of Envoy.\n   */\n  static void registerFactory(Base& factory, absl::string_view name,\n                              const envoy::config::core::v3::BuildVersion& version,\n                              absl::string_view instead_value = \"\") {\n    auto result = factories().emplace(std::make_pair(name, &factory));\n    if (!result.second) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"Double registration for name: '{}'\", factory.name()));\n    }\n    versionedFactories().emplace(std::make_pair(name, version));\n    if (!instead_value.empty()) {\n      deprecatedFactoryNames().emplace(std::make_pair(name, instead_value));\n    }\n  }\n\n  /**\n   * Permanently disables the named factory by setting the corresponding\n   * factory pointer to null. If the factory is registered under multiple\n   * (deprecated) names, all the possible names are disabled.\n   */\n  static bool disableFactory(absl::string_view name) {\n    const auto disable = [](absl::string_view name) -> bool {\n      auto it = factories().find(name);\n      if (it != factories().end()) {\n        it->second = nullptr;\n        return true;\n      }\n      return false;\n    };\n\n    // First, find the canonical name for this factory.\n    absl::string_view canonicalName = canonicalFactoryName(name);\n\n    // Next, disable the factory by all its deprecated names.\n    for (const auto& [deprecated_name, mapped_canonical_name] : deprecatedFactoryNames()) {\n      if (mapped_canonical_name == canonicalName) {\n        disable(deprecated_name);\n      }\n    }\n\n    // Finally, disable the factory by its canonical name.\n    return disable(canonicalName);\n  }\n\n  /**\n   * Gets a factory by name. If the name isn't found in the registry, returns nullptr.\n   */\n  static Base* getFactory(absl::string_view name) {\n    auto it = factories().find(name);\n    if (it == factories().end()) {\n      return nullptr;\n    }\n\n    if (!checkDeprecated(name)) {\n      return nullptr;\n    }\n    return it->second;\n  }\n\n  static Base* getFactoryByType(absl::string_view type) {\n    auto it = factoriesByType().find(type);\n    if (it == factoriesByType().end()) {\n      return nullptr;\n    }\n    return it->second;\n  }\n\n  /**\n   * @return the canonical name of the factory. If the given name is a\n   * deprecated factory name, the canonical name is returned instead.\n   */\n  static absl::string_view canonicalFactoryName(absl::string_view name) {\n    const auto it = deprecatedFactoryNames().find(name);\n    return (it == deprecatedFactoryNames().end()) ? name : it->second;\n  }\n\n  static bool checkDeprecated(absl::string_view name) {\n    auto it = deprecatedFactoryNames().find(name);\n    const bool deprecated = it != deprecatedFactoryNames().end();\n    if (deprecated) {\n      return Extensions::Common::Utility::ExtensionNameUtil::allowDeprecatedExtensionName(\n          \"\", it->first, it->second);\n    }\n\n    return true;\n  }\n\n  /**\n   * @return true if the named factory was disabled.\n   */\n  static bool isFactoryDisabled(absl::string_view name) {\n    auto it = factories().find(name);\n    ASSERT(it != factories().end());\n    return it->second == nullptr;\n  }\n\n  /**\n   * @return vendor specific version of a factory.\n   */\n  static absl::optional<envoy::config::core::v3::BuildVersion>\n  getFactoryVersion(absl::string_view name) {\n    auto it = versionedFactories().find(name);\n    if (it == versionedFactories().end()) {\n      return absl::nullopt;\n    }\n    return it->second;\n  }\n\nprivate:\n  // Allow factory injection only in tests.\n  friend class InjectFactory<Base>;\n\n  static std::unique_ptr<absl::flat_hash_map<std::string, Base*>> buildFactoriesByType() {\n    auto mapping = std::make_unique<absl::flat_hash_map<std::string, Base*>>();\n\n    for (const auto& [factory_name, factory] : factories()) {\n      if (factory == nullptr) {\n        continue;\n      }\n\n      // Skip untyped factories.\n      std::string config_type = factory->configType();\n      if (config_type.empty()) {\n        continue;\n      }\n\n      // Register config types in the mapping and traverse the deprecated message type chain.\n      while (true) {\n        auto it = mapping->find(config_type);\n        if (it != mapping->end() && it->second != factory) {\n          // Mark double-registered types with a nullptr.\n          // See issue https://github.com/envoyproxy/envoy/issues/9643.\n          ENVOY_LOG(warn, \"Double registration for type: '{}' by '{}' and '{}'\", config_type,\n                    factory->name(), it->second ? it->second->name() : \"\");\n          it->second = nullptr;\n        } else {\n          mapping->emplace(std::make_pair(config_type, factory));\n        }\n\n        const Protobuf::Descriptor* previous =\n            Config::ApiTypeOracle::getEarlierVersionDescriptor(config_type);\n        if (previous == nullptr) {\n          break;\n        }\n        config_type = previous->full_name();\n      }\n    }\n\n    return mapping;\n  }\n\n  // Rebuild the factories-by-type map based on the current factories.\n  static void rebuildFactoriesByTypeForTest() {\n    auto& mapping = factoriesByType();\n    auto updated_mapping = buildFactoriesByType();\n\n    // Copy the updated mapping over the old one.\n    mapping = *updated_mapping;\n  }\n\n  /**\n   * Replaces a factory by name. This method should only be used for testing purposes.\n   * @param factory is the factory to inject.\n   * @param deprecated_names install the given deprecated names for this factory.\n   * @return std::function<void()> a function that will restore the previously registered factories\n   *         (by name or type).\n   */\n  static std::function<void()>\n  replaceFactoryForTest(Base& factory,\n                        std::initializer_list<absl::string_view> deprecated_names = {}) {\n    using DeprecatedNamesVector = std::vector<std::pair<std::string, std::string>>;\n\n    // If an existing factory is registered with this name, track it for later restoration.\n    Base* prev_by_name = nullptr;\n    auto it = factories().find(factory.name());\n    if (it != factories().end()) {\n      prev_by_name = it->second;\n      factories().erase(it);\n\n      ENVOY_LOG(\n          info, \"Factory '{}' (type '{}') displaced-by-name with test factory '{}' (type '{}')\",\n          prev_by_name->name(), prev_by_name->configType(), factory.name(), factory.configType());\n    } else {\n      ENVOY_LOG(info, \"Factory '{}' (type '{}') registered for tests\", factory.name(),\n                factory.configType());\n    }\n\n    factories().emplace(factory.name(), &factory);\n    RELEASE_ASSERT(getFactory(factory.name()) == &factory,\n                   \"test factory by-name registration failed\");\n\n    DeprecatedNamesVector prev_deprecated_names;\n    if (deprecated_names.size() > 0) {\n      for (auto deprecated_name : deprecated_names) {\n        auto it = deprecatedFactoryNames().find(deprecated_name);\n        if (it != deprecatedFactoryNames().end()) {\n          prev_deprecated_names.emplace_back(std::make_pair(it->first, it->second));\n          deprecatedFactoryNames().erase(it);\n\n          ENVOY_LOG(\n              info,\n              \"Deprecated name '{}' (mapped to '{}') displaced with test factory '{}' (type '{}')\",\n              it->first, it->second, factory.name(), factory.configType());\n        } else {\n          // Name not previously mapped, remember to remove it.\n          prev_deprecated_names.emplace_back(std::make_pair(deprecated_name, \"\"));\n\n          ENVOY_LOG(info, \"Deprecated name '{}' (mapped to '{}')\", deprecated_name, factory.name());\n        }\n\n        // Register the replacement factory with a deprecated name.\n        factories().emplace(deprecated_name, &factory);\n        RELEASE_ASSERT(getFactory(deprecated_name) == &factory,\n                       \"test factory registration by deprecated name failed\");\n\n        // Register the replacement factory's deprecated name.\n        deprecatedFactoryNames().emplace(std::make_pair(deprecated_name, factory.name()));\n      }\n    }\n\n    rebuildFactoriesByTypeForTest();\n\n    return [replacement = &factory, prev_by_name, prev_deprecated_names]() {\n      // Unregister the replacement factory by name.\n      factories().erase(replacement->name());\n\n      ENVOY_LOG(info, \"Removed test factory '{}' (type '{}')\", replacement->name(),\n                replacement->configType());\n\n      if (prev_by_name) {\n        // Restore any factory displaced by name, but only register the type if it's non-empty.\n        factories().emplace(prev_by_name->name(), prev_by_name);\n\n        ENVOY_LOG(info, \"Restored factory '{}' (type '{}'), formerly displaced-by-name\",\n                  prev_by_name->name(), prev_by_name->configType());\n      }\n\n      for (auto [prev_deprecated_name, mapped_canonical_name] : prev_deprecated_names) {\n        deprecatedFactoryNames().erase(prev_deprecated_name);\n\n        ENVOY_LOG(info, \"Removed deprecated name '{}'\", prev_deprecated_name);\n\n        if (!mapped_canonical_name.empty()) {\n          deprecatedFactoryNames().emplace(\n              std::make_pair(prev_deprecated_name, mapped_canonical_name));\n\n          auto* deprecated_factory = getFactory(mapped_canonical_name);\n          RELEASE_ASSERT(deprecated_factory != nullptr,\n                         \"failed to restore deprecated factory name\");\n          factories().emplace(mapped_canonical_name, deprecated_factory);\n\n          ENVOY_LOG(info, \"Restored deprecated name '{}' (mapped to '{}'\", prev_deprecated_name,\n                    mapped_canonical_name);\n        }\n      }\n\n      rebuildFactoriesByTypeForTest();\n    };\n  }\n};\n\n/**\n * Factory registration template. Enables users to register a particular implementation factory with\n * the FactoryRegistry by instantiating this templated class with the specific factory class and the\n * general Base class to which that factory conforms.\n *\n * Because factories are generally registered once and live for the length of the program, the\n * standard use of this class is static instantiation within a linked implementation's translation\n * unit. For an example of a typical use case, @see NamedNetworkFilterConfigFactory.\n *\n * Example registration: REGISTER_FACTORY(SpecificFactory, BaseFactory);\n *                       REGISTER_FACTORY(SpecificFactory, BaseFactory){\"deprecated_name\"};\n */\ntemplate <class T, class Base> class RegisterFactory {\npublic:\n  /**\n   * Constructor that registers an instance of the factory with the FactoryRegistry.\n   */\n  RegisterFactory() {\n    ASSERT(!instance_.name().empty());\n    FactoryRegistry<Base>::registerFactory(instance_, instance_.name());\n\n    // Also register this factory with its category.\n    //\n    // Each time a factory registers, the registry will attempt to\n    // register its category here. This means that we have to ignore\n    // multiple attempts to register the same category and can't detect\n    // duplicate categories.\n    if (!FactoryCategoryRegistry::isRegistered(instance_.category())) {\n      FactoryCategoryRegistry::registerCategory(instance_.category(),\n                                                new FactoryRegistryProxyImpl<Base>());\n    }\n  }\n\n  /**\n   * Constructor that registers an instance of the factory with the FactoryRegistry along with\n   * deprecated names.\n   */\n  explicit RegisterFactory(std::initializer_list<absl::string_view> deprecated_names) {\n    if (!instance_.name().empty()) {\n      FactoryRegistry<Base>::registerFactory(instance_, instance_.name());\n    } else {\n      ASSERT(deprecated_names.size() != 0,\n             \"Attempted to register a factory without a name or deprecated name\");\n    }\n\n    for (auto deprecated_name : deprecated_names) {\n      ASSERT(!deprecated_name.empty());\n      FactoryRegistry<Base>::registerFactory(instance_, deprecated_name, instance_.name());\n    }\n\n    if (!FactoryCategoryRegistry::isRegistered(instance_.category())) {\n      FactoryCategoryRegistry::registerCategory(instance_.category(),\n                                                new FactoryRegistryProxyImpl<Base>());\n    }\n  }\n\n  /**\n   * Constructor that registers an instance of the factory with the FactoryRegistry along with\n   * vendor specific version.\n   */\n  RegisterFactory(uint32_t major, uint32_t minor, uint32_t patch,\n                  const std::map<std::string, std::string>& version_metadata)\n      : RegisterFactory(major, minor, patch, version_metadata, {}) {}\n\n  /**\n   * Constructor that registers an instance of the factory with the FactoryRegistry along with\n   * vendor specific version and deprecated names.\n   */\n  RegisterFactory(uint32_t major, uint32_t minor, uint32_t patch,\n                  const std::map<std::string, std::string>& version_metadata,\n                  std::initializer_list<absl::string_view> deprecated_names) {\n    auto version = makeBuildVersion(major, minor, patch, version_metadata);\n    if (instance_.name().empty()) {\n      ASSERT(deprecated_names.size() != 0);\n    } else {\n      FactoryRegistry<Base>::registerFactory(instance_, instance_.name(), version);\n    }\n\n    for (auto deprecated_name : deprecated_names) {\n      ASSERT(!deprecated_name.empty());\n      FactoryRegistry<Base>::registerFactory(instance_, deprecated_name, version, instance_.name());\n    }\n\n    if (!FactoryCategoryRegistry::isRegistered(instance_.category())) {\n      FactoryCategoryRegistry::registerCategory(instance_.category(),\n                                                new FactoryRegistryProxyImpl<Base>());\n    }\n  }\n\nprivate:\n  static envoy::config::core::v3::BuildVersion\n  makeBuildVersion(uint32_t major, uint32_t minor, uint32_t patch,\n                   const std::map<std::string, std::string>& metadata) {\n    envoy::config::core::v3::BuildVersion version;\n    version.mutable_version()->set_major_number(major);\n    version.mutable_version()->set_minor_number(minor);\n    version.mutable_version()->set_patch(patch);\n    *version.mutable_metadata() = MessageUtil::keyValueStruct(metadata);\n    return version;\n  }\n\n  T instance_{};\n};\n\n/**\n * RegisterInternalFactory is a special case for registering factories\n * that are considered internal implementation details that should\n * not be exposed to operators via the factory categories.\n *\n * There is no corresponding REGISTER_INTERNAL_FACTORY because\n * this should be used sparingly and only in special cases.\n */\ntemplate <class T, class Base> class RegisterInternalFactory {\npublic:\n  RegisterInternalFactory() {\n    ASSERT(!instance_.name().empty());\n    FactoryRegistry<Base>::registerFactory(instance_, instance_.name());\n  }\n\nprivate:\n  T instance_{};\n};\n\n/**\n * Macro used for static registration.\n */\n#define REGISTER_FACTORY(FACTORY, BASE)                                                            \\\n  ABSL_ATTRIBUTE_UNUSED void forceRegister##FACTORY() {}                                           \\\n  static Envoy::Registry::RegisterFactory</* NOLINT(fuchsia-statically-constructed-objects) */     \\\n                                          FACTORY, BASE>                                           \\\n      FACTORY##_registered\n\n#define FACTORY_VERSION(major, minor, patch, ...) major, minor, patch, __VA_ARGS__\n\n/**\n * Macro used for static registration declaration.\n * Calling forceRegister...(); can be used to force the static factory initializer to run in a\n * setting in which Envoy is bundled as a static archive. In this case, the static initializer is\n * not run until a function in the compilation unit is invoked. The force function can be invoked\n * from a static library wrapper.\n */\n#define DECLARE_FACTORY(FACTORY) ABSL_ATTRIBUTE_UNUSED void forceRegister##FACTORY()\n\n} // namespace Registry\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"rds_interface\",\n    hdrs = [\"rds.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"route_config_provider_manager_interface\",\n    hdrs = [\"route_config_provider_manager.h\"],\n    deps = [\n        \":rds_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/json:json_object_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"route_config_update_info_interface\",\n    hdrs = [\"route_config_update_receiver.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":rds_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_interface\",\n    hdrs = [\"router.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":internal_redirect_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/common:conn_pool_interface\",\n        \"//include/envoy/common:matchers_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/config:typed_metadata_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/http:hash_policy_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/upstream:resource_manager_interface\",\n        \"//include/envoy/upstream:retry_interface\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"scopes_interface\",\n    hdrs = [\"scopes.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/config:config_provider_interface\",\n        \"//include/envoy/http:header_map_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_ratelimit_interface\",\n    hdrs = [\"router_ratelimit.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"shadow_writer_interface\",\n    hdrs = [\"shadow_writer.h\"],\n    deps = [\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/http:message_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"string_accessor_interface\",\n    hdrs = [\"string_accessor.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/stream_info:filter_state_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"internal_redirect_interface\",\n    hdrs = [\"internal_redirect.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/router/internal_redirect.h",
    "content": "#pragma once\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Used to decide if an internal redirect is allowed to be followed based on the target route.\n * Subclassing Logger::Loggable so that implementations can log details.\n */\nclass InternalRedirectPredicate : Logger::Loggable<Logger::Id::router> {\npublic:\n  virtual ~InternalRedirectPredicate() = default;\n\n  /**\n   * A FilterState is provided so that predicate implementation can use it to preserve state across\n   * internal redirects.\n   * @param filter_state supplies the filter state associated with the current request so that the\n   *        predicates can use it to persist states across filter chains.\n   * @param target_route_name indicates the route that an internal redirect is targeting.\n   * @param downstream_is_https indicates the downstream request is using https.\n   * @param target_is_https indicates the internal redirect target url has https in the url.\n   * @return whether the route specified by target_route_name is allowed to be followed. Any\n   *         predicate returning false will prevent the redirect from being followed, causing the\n   *         response to be proxied downstream.\n   */\n  virtual bool acceptTargetRoute(StreamInfo::FilterState& filter_state,\n                                 absl::string_view target_route_name, bool downstream_is_https,\n                                 bool target_is_https) PURE;\n\n  /**\n   * @return the name of the current predicate.\n   */\n  virtual absl::string_view name() const PURE;\n};\n\nusing InternalRedirectPredicateSharedPtr = std::shared_ptr<InternalRedirectPredicate>;\n\n/**\n * Factory for InternalRedirectPredicate.\n */\nclass InternalRedirectPredicateFactory : public Config::TypedFactory {\npublic:\n  ~InternalRedirectPredicateFactory() override = default;\n\n  /**\n   * @param config contains the proto stored in TypedExtensionConfig.typed_config for the predicate.\n   * @param current_route_name stores the route name of the route where the predicate is installed.\n   * @return an InternalRedirectPredicate. The given current_route_name is useful for predicates\n   *         that need to create per-route FilterState.\n   */\n  virtual InternalRedirectPredicateSharedPtr\n  createInternalRedirectPredicate(const Protobuf::Message& config,\n                                  absl::string_view current_route_name) PURE;\n\n  std::string category() const override { return \"envoy.internal_redirect_predicates\"; }\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/rds.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * A provider for constant route configurations.\n */\nclass RouteConfigProvider {\npublic:\n  struct ConfigInfo {\n    // A reference to the currently loaded route configuration. Do not hold this reference beyond\n    // the caller of configInfo()'s scope.\n    const envoy::config::route::v3::RouteConfiguration& config_;\n\n    // The discovery version that supplied this route. This will be set to \"\" in the case of\n    // static clusters.\n    std::string version_;\n  };\n\n  virtual ~RouteConfigProvider() = default;\n\n  /**\n   * @return Router::ConfigConstSharedPtr a route configuration for use during a single request. The\n   * returned config may be different on a subsequent call, so a new config should be acquired for\n   * each request flow.\n   */\n  virtual Router::ConfigConstSharedPtr config() PURE;\n\n  /**\n   * @return the configuration information for the currently loaded route configuration. Note that\n   * if the provider has not yet performed an initial configuration load, no information will be\n   * returned.\n   */\n  virtual absl::optional<ConfigInfo> configInfo() const PURE;\n\n  /**\n   * @return the last time this RouteConfigProvider was updated. Used for config dumps.\n   */\n  virtual SystemTime lastUpdated() const PURE;\n\n  /**\n   * Callback used to notify RouteConfigProvider about configuration changes.\n   */\n  virtual void onConfigUpdate() PURE;\n\n  /**\n   * Validate if the route configuration can be applied to the context of the route config provider.\n   */\n  virtual void\n  validateConfig(const envoy::config::route::v3::RouteConfiguration& config) const PURE;\n\n  /**\n   * Callback used to request an update to the route configuration from the management server.\n   * @param for_domain supplies the domain name that virtual hosts must match on\n   * @param thread_local_dispatcher thread-local dispatcher\n   * @param route_config_updated_cb callback to be called when the configuration update has been\n   * propagated to worker threads\n   */\n  virtual void requestVirtualHostsUpdate(\n      const std::string& for_domain, Event::Dispatcher& thread_local_dispatcher,\n      std::weak_ptr<Http::RouteConfigUpdatedCallback> route_config_updated_cb) PURE;\n};\n\nusing RouteConfigProviderPtr = std::unique_ptr<RouteConfigProvider>;\nusing RouteConfigProviderSharedPtr = std::shared_ptr<RouteConfigProvider>;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/route_config_provider_manager.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/json/json_object.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * The RouteConfigProviderManager exposes the ability to get a RouteConfigProvider. This interface\n * is exposed to the Server's FactoryContext in order to allow HttpConnectionManagers to get\n * RouteConfigProviders.\n */\nclass RouteConfigProviderManager {\npublic:\n  virtual ~RouteConfigProviderManager() = default;\n\n  /**\n   * Get a RouteConfigProviderPtr for a route from RDS. Ownership of the RouteConfigProvider is the\n   * HttpConnectionManagers who calls this function. The RouteConfigProviderManager holds raw\n   * pointers to the RouteConfigProviders. Clean up of the pointers happen from the destructor of\n   * the RouteConfigProvider. This method creates a RouteConfigProvider which may share the\n   * underlying RDS subscription with the same (route_config_name, cluster).\n   * @param rds supplies the proto configuration of an RDS-configured RouteConfigProvider.\n   * @param factory_context is the context to use for the route config provider.\n   * @param stat_prefix supplies the stat_prefix to use for the provider stats.\n   * @param init_manager the Init::Manager used to coordinate initialization of a the underlying RDS\n   * subscription.\n   */\n  virtual RouteConfigProviderSharedPtr createRdsRouteConfigProvider(\n      const envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n      Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix,\n      Init::Manager& init_manager) PURE;\n\n  /**\n   * Get a RouteConfigSharedPtr for a statically defined route. Ownership is as described for\n   * getRdsRouteConfigProvider above. This method always create a new RouteConfigProvider.\n   * @param route_config supplies the RouteConfiguration for this route\n   * @param factory_context is the context to use for the route config provider.\n   * @param validator is the message validator for route config.\n   */\n  virtual RouteConfigProviderPtr\n  createStaticRouteConfigProvider(const envoy::config::route::v3::RouteConfiguration& route_config,\n                                  Server::Configuration::ServerFactoryContext& factory_context,\n                                  ProtobufMessage::ValidationVisitor& validator) PURE;\n};\n\nusing RouteConfigProviderManagerPtr = std::unique_ptr<RouteConfigProviderManager>;\nusing RouteConfigProviderManagerSharedPtr = std::shared_ptr<RouteConfigProviderManager>;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/route_config_update_receiver.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * A primitive that keeps track of updates to a RouteConfiguration.\n */\nclass RouteConfigUpdateReceiver {\npublic:\n  virtual ~RouteConfigUpdateReceiver() = default;\n\n  /**\n   * Called on updates via RDS.\n   * @param rc supplies the RouteConfiguration.\n   * @param version_info supplies RouteConfiguration version.\n   * @return bool whether RouteConfiguration has been updated.\n   */\n  virtual bool onRdsUpdate(const envoy::config::route::v3::RouteConfiguration& rc,\n                           const std::string& version_info) PURE;\n\n  using VirtualHostRefVector =\n      std::vector<std::reference_wrapper<const envoy::config::route::v3::VirtualHost>>;\n\n  /**\n   * Called on updates via VHDS.\n   * @param added_vhosts supplies VirtualHosts that have been added.\n   * @param added_resource_ids set of resources IDs (names + aliases) added.\n   * @param removed_resources supplies names of VirtualHosts that have been removed.\n   * @param version_info supplies RouteConfiguration version.\n   * @return bool whether RouteConfiguration has been updated.\n   */\n  virtual bool onVhdsUpdate(const VirtualHostRefVector& added_vhosts,\n                            const std::set<std::string>& added_resource_ids,\n                            const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                            const std::string& version_info) PURE;\n\n  /**\n   * @return std::string& the name of RouteConfiguration.\n   */\n  virtual const std::string& routeConfigName() const PURE;\n\n  /**\n   * @return std::string& the version of RouteConfiguration.\n   */\n  virtual const std::string& configVersion() const PURE;\n\n  /**\n   * @return bool return whether VHDS configuration has been changed in the last RDS update.\n   */\n  // TODO(dmitri-d): Consider splitting RouteConfigUpdateReceiver into a RouteConfig state and a\n  // last update state. The latter could be passed to callbacks as a parameter, which would make the\n  // intent and the lifecycle of the \"last update state\" less muddled.\n  virtual bool vhdsConfigurationChanged() const PURE;\n\n  /**\n   * @return uint64_t the hash value of RouteConfiguration.\n   */\n  virtual uint64_t configHash() const PURE;\n\n  /**\n   * @return absl::optional<RouteConfigProvider::ConfigInfo> containing an instance of\n   * RouteConfigProvider::ConfigInfo if RouteConfiguration has been updated at least once. Otherwise\n   * returns an empty absl::optional<RouteConfigProvider::ConfigInfo>.\n   */\n  virtual absl::optional<RouteConfigProvider::ConfigInfo> configInfo() const PURE;\n\n  /**\n   * @return envoy::config::route::v3::RouteConfiguration& current RouteConfiguration.\n   */\n  virtual const envoy::config::route::v3::RouteConfiguration& routeConfiguration() PURE;\n\n  /**\n   * @return SystemTime the time of the last update.\n   */\n  virtual SystemTime lastUpdated() const PURE;\n\n  /**\n   * @return the union of all resource names and aliases (if any) received with the last VHDS\n   * update.\n   */\n  virtual const std::set<std::string>& resourceIdsInLastVhdsUpdate() PURE;\n};\n\nusing RouteConfigUpdatePtr = std::unique_ptr<RouteConfigUpdateReceiver>;\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/router.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <map>\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/conn_pool.h\"\n#include \"envoy/common/matchers.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/http/hash_policy.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/router/internal_redirect.h\"\n#include \"envoy/tcp/conn_pool.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/resource_manager.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\nnamespace Upstream {\nclass ClusterManager;\nclass LoadBalancerContext;\n} // namespace Upstream\n\nnamespace Router {\n\n/**\n * Functionality common among routing primitives, such as DirectResponseEntry and RouteEntry.\n */\nclass ResponseEntry {\npublic:\n  virtual ~ResponseEntry() = default;\n\n  /**\n   * Do potentially destructive header transforms on response headers prior to forwarding. For\n   * example, adding or removing headers. This should only be called ONCE immediately after\n   * obtaining the initial response headers.\n   * @param headers supplies the response headers, which may be modified during this call.\n   * @param stream_info holds additional information about the request.\n   */\n  virtual void finalizeResponseHeaders(Http::ResponseHeaderMap& headers,\n                                       const StreamInfo::StreamInfo& stream_info) const PURE;\n};\n\n/**\n * A routing primitive that specifies a direct (non-proxied) HTTP response.\n */\nclass DirectResponseEntry : public ResponseEntry {\npublic:\n  ~DirectResponseEntry() override = default;\n\n  /**\n   * Returns the HTTP status code to return.\n   * @return Http::Code the response Code.\n   */\n  virtual Http::Code responseCode() const PURE;\n\n  /**\n   * Returns the redirect path based on the request headers.\n   * @param headers supplies the request headers.\n   * @return std::string the redirect URL if this DirectResponseEntry is a redirect,\n   *         or an empty string otherwise.\n   */\n  virtual std::string newPath(const Http::RequestHeaderMap& headers) const PURE;\n\n  /**\n   * Returns the response body to send with direct responses.\n   * @return std::string& the response body specified in the route configuration,\n   *         or an empty string if no response body is specified.\n   */\n  virtual const std::string& responseBody() const PURE;\n\n  /**\n   * Do potentially destructive header transforms on Path header prior to redirection. For\n   * example prefix rewriting for redirects etc. This should only be called ONCE\n   * immediately prior to redirecting.\n   * @param headers supplies the request headers, which may be modified during this call.\n   * @param insert_envoy_original_path insert x-envoy-original-path header?\n   */\n  virtual void rewritePathHeader(Http::RequestHeaderMap& headers,\n                                 bool insert_envoy_original_path) const PURE;\n\n  /**\n   * @return std::string& the name of the route.\n   */\n  virtual const std::string& routeName() const PURE;\n};\n\n/**\n * CorsPolicy for Route and VirtualHost.\n */\nclass CorsPolicy {\npublic:\n  virtual ~CorsPolicy() = default;\n\n  /**\n   * @return std::vector<StringMatcherPtr>& access-control-allow-origin matchers.\n   */\n  virtual const std::vector<Matchers::StringMatcherPtr>& allowOrigins() const PURE;\n\n  /**\n   * @return std::string access-control-allow-methods value.\n   */\n  virtual const std::string& allowMethods() const PURE;\n\n  /**\n   * @return std::string access-control-allow-headers value.\n   */\n  virtual const std::string& allowHeaders() const PURE;\n\n  /**\n   * @return std::string access-control-expose-headers value.\n   */\n  virtual const std::string& exposeHeaders() const PURE;\n\n  /**\n   * @return std::string access-control-max-age value.\n   */\n  virtual const std::string& maxAge() const PURE;\n\n  /**\n   * @return const absl::optional<bool>& Whether access-control-allow-credentials should be true.\n   */\n  virtual const absl::optional<bool>& allowCredentials() const PURE;\n\n  /**\n   * @return bool Whether CORS is enabled for the route or virtual host.\n   */\n  virtual bool enabled() const PURE;\n\n  /**\n   * @return bool Whether CORS policies are evaluated when filter is off.\n   */\n  virtual bool shadowEnabled() const PURE;\n};\n\n/**\n * An interface to be implemented by rate limited reset header parsers.\n */\nclass ResetHeaderParser {\npublic:\n  virtual ~ResetHeaderParser() = default;\n\n  /**\n   * Iterate over the headers, choose the first one that matches by name, and try to parse its\n   * value.\n   */\n  virtual absl::optional<std::chrono::milliseconds>\n  parseInterval(TimeSource& time_source, const Http::HeaderMap& headers) const PURE;\n};\n\nusing ResetHeaderParserSharedPtr = std::shared_ptr<ResetHeaderParser>;\n\n/**\n * Route level retry policy.\n */\nclass RetryPolicy {\npublic:\n  // clang-format off\n  static const uint32_t RETRY_ON_5XX                     = 0x1;\n  static const uint32_t RETRY_ON_GATEWAY_ERROR           = 0x2;\n  static const uint32_t RETRY_ON_CONNECT_FAILURE         = 0x4;\n  static const uint32_t RETRY_ON_RETRIABLE_4XX           = 0x8;\n  static const uint32_t RETRY_ON_REFUSED_STREAM          = 0x10;\n  static const uint32_t RETRY_ON_GRPC_CANCELLED          = 0x20;\n  static const uint32_t RETRY_ON_GRPC_DEADLINE_EXCEEDED  = 0x40;\n  static const uint32_t RETRY_ON_GRPC_RESOURCE_EXHAUSTED = 0x80;\n  static const uint32_t RETRY_ON_GRPC_UNAVAILABLE        = 0x100;\n  static const uint32_t RETRY_ON_GRPC_INTERNAL           = 0x200;\n  static const uint32_t RETRY_ON_RETRIABLE_STATUS_CODES  = 0x400;\n  static const uint32_t RETRY_ON_RESET                   = 0x800;\n  static const uint32_t RETRY_ON_RETRIABLE_HEADERS       = 0x1000;\n  static const uint32_t RETRY_ON_ENVOY_RATE_LIMITED      = 0x2000;\n  // clang-format on\n\n  virtual ~RetryPolicy() = default;\n\n  /**\n   * @return std::chrono::milliseconds timeout per retry attempt.\n   */\n  virtual std::chrono::milliseconds perTryTimeout() const PURE;\n\n  /**\n   * @return uint32_t the number of retries to allow against the route.\n   */\n  virtual uint32_t numRetries() const PURE;\n\n  /**\n   * @return uint32_t a local OR of RETRY_ON values above.\n   */\n  virtual uint32_t retryOn() const PURE;\n\n  /**\n   * Initializes a new set of RetryHostPredicates to be used when retrying with this retry policy.\n   * @return list of RetryHostPredicates to use\n   */\n  virtual std::vector<Upstream::RetryHostPredicateSharedPtr> retryHostPredicates() const PURE;\n\n  /**\n   * Initializes a RetryPriority to be used when retrying with this retry policy.\n   * @return the RetryPriority to use when determining priority load for retries, or nullptr\n   * if none should be used.\n   */\n  virtual Upstream::RetryPrioritySharedPtr retryPriority() const PURE;\n\n  /**\n   * Number of times host selection should be reattempted when selecting a host\n   * for a retry attempt.\n   */\n  virtual uint32_t hostSelectionMaxAttempts() const PURE;\n\n  /**\n   * List of status codes that should trigger a retry when the retriable-status-codes retry\n   * policy is enabled.\n   */\n  virtual const std::vector<uint32_t>& retriableStatusCodes() const PURE;\n\n  /**\n   * @return std::vector<Http::HeaderMatcherSharedPtr>& list of response header matchers that\n   * will be checked when the 'retriable-headers' retry policy is enabled.\n   */\n  virtual const std::vector<Http::HeaderMatcherSharedPtr>& retriableHeaders() const PURE;\n\n  /**\n   * @return std::vector<Http::HeaderMatcherSharedPt>& list of request header\n   * matchers that will be checked before enabling retries.\n   */\n  virtual const std::vector<Http::HeaderMatcherSharedPtr>& retriableRequestHeaders() const PURE;\n\n  /**\n   * @return absl::optional<std::chrono::milliseconds> base retry interval\n   */\n  virtual absl::optional<std::chrono::milliseconds> baseInterval() const PURE;\n\n  /**\n   * @return absl::optional<std::chrono::milliseconds> maximum retry interval\n   */\n  virtual absl::optional<std::chrono::milliseconds> maxInterval() const PURE;\n\n  /**\n   * @return std::vector<Http::ResetHeaderParserSharedPtr>& list of reset header\n   * parsers that will be used to extract a retry back-off interval from response headers.\n   */\n  virtual const std::vector<ResetHeaderParserSharedPtr>& resetHeaders() const PURE;\n\n  /**\n   * @return std::chrono::milliseconds upper limit placed on a retry\n   * back-off interval parsed from response headers.\n   */\n  virtual std::chrono::milliseconds resetMaxInterval() const PURE;\n};\n\n/**\n * RetryStatus whether request should be retried or not.\n */\nenum class RetryStatus { No, NoOverflow, NoRetryLimitExceeded, Yes };\n\n/**\n * InternalRedirectPolicy from the route configuration.\n */\nclass InternalRedirectPolicy {\npublic:\n  virtual ~InternalRedirectPolicy() = default;\n\n  /**\n   * @return whether internal redirect is enabled on this route.\n   */\n  virtual bool enabled() const PURE;\n\n  /**\n   * @param response_code the response code from the upstream.\n   * @return whether the given response_code should trigger an internal redirect on this route.\n   */\n  virtual bool shouldRedirectForResponseCode(const Http::Code& response_code) const PURE;\n\n  /**\n   * Creates the target route predicates. This should really be called only once for each upstream\n   * redirect response. Creating the predicates lazily to avoid wasting CPU cycles on non-redirect\n   * responses, which should be the most common case.\n   * @return a vector of newly constructed InternalRedirectPredicate instances.\n   */\n  virtual std::vector<InternalRedirectPredicateSharedPtr> predicates() const PURE;\n\n  /**\n   * @return the maximum number of allowed internal redirects on this route.\n   */\n  virtual uint32_t maxInternalRedirects() const PURE;\n\n  /**\n   * @return if it is allowed to follow the redirect with a different scheme in\n   *         the target URI than the downstream request.\n   */\n  virtual bool isCrossSchemeRedirectAllowed() const PURE;\n};\n\n/**\n * Wraps retry state for an active routed request.\n */\nclass RetryState {\npublic:\n  using DoRetryCallback = std::function<void()>;\n\n  virtual ~RetryState() = default;\n\n  /**\n   * @return true if a policy is in place for the active request that allows retries.\n   */\n  virtual bool enabled() PURE;\n\n  /**\n   * Attempts to parse any matching rate limited reset headers (RFC 7231), either in the form of an\n   * interval directly, or in the form of a unix timestamp relative to the current system time.\n   * @return the interval if parsing was successful.\n   */\n  virtual absl::optional<std::chrono::milliseconds>\n  parseResetInterval(const Http::ResponseHeaderMap& response_headers) const PURE;\n\n  /**\n   * Determine whether a request should be retried based on the response headers.\n   * @param response_headers supplies the response headers.\n   * @param callback supplies the callback that will be invoked when the retry should take place.\n   *                 This is used to add timed backoff, etc. The callback will never be called\n   *                 inline.\n   * @return RetryStatus if a retry should take place. @param callback will be called at some point\n   *         in the future. Otherwise a retry should not take place and the callback will never be\n   *         called. Calling code should proceed with error handling.\n   */\n  virtual RetryStatus shouldRetryHeaders(const Http::ResponseHeaderMap& response_headers,\n                                         DoRetryCallback callback) PURE;\n\n  /**\n   * Determines whether given response headers would be retried by the retry policy, assuming\n   * sufficient retry budget and circuit breaker headroom. This is useful in cases where\n   * the information about whether a response is \"good\" or not is useful, but a retry should\n   * not be attempted for other reasons.\n   * @param response_headers supplies the response headers.\n   * @return bool true if a retry would be warranted based on the retry policy.\n   */\n  virtual bool wouldRetryFromHeaders(const Http::ResponseHeaderMap& response_headers) PURE;\n\n  /**\n   * Determine whether a request should be retried after a reset based on the reason for the reset.\n   * @param reset_reason supplies the reset reason.\n   * @param callback supplies the callback that will be invoked when the retry should take place.\n   *                 This is used to add timed backoff, etc. The callback will never be called\n   *                 inline.\n   * @return RetryStatus if a retry should take place. @param callback will be called at some point\n   *         in the future. Otherwise a retry should not take place and the callback will never be\n   *         called. Calling code should proceed with error handling.\n   */\n  virtual RetryStatus shouldRetryReset(const Http::StreamResetReason reset_reason,\n                                       DoRetryCallback callback) PURE;\n\n  /**\n   * Determine whether a \"hedged\" retry should be sent after the per try\n   * timeout expires. This means the original request is not canceled, but a\n   * new one is sent to hedge against the original request taking even longer.\n   * @param callback supplies the callback that will be invoked when the retry should take place.\n   *                 This is used to add timed backoff, etc. The callback will never be called\n   *                 inline.\n   * @return RetryStatus if a retry should take place. @param callback will be called at some point\n   *         in the future. Otherwise a retry should not take place and the callback will never be\n   *         called. Calling code should proceed with error handling.\n   */\n  virtual RetryStatus shouldHedgeRetryPerTryTimeout(DoRetryCallback callback) PURE;\n\n  /**\n   * Called when a host was attempted but the request failed and is eligible for another retry.\n   * Should be used to update whatever internal state depends on previously attempted hosts.\n   * @param host the previously attempted host.\n   */\n  virtual void onHostAttempted(Upstream::HostDescriptionConstSharedPtr host) PURE;\n\n  /**\n   * Determine whether host selection should be reattempted. Applies to host selection during\n   * retries, and is used to provide configurable host selection for retries.\n   * @param host the host under consideration\n   * @return whether host selection should be reattempted\n   */\n  virtual bool shouldSelectAnotherHost(const Upstream::Host& host) PURE;\n\n  /**\n   * Returns a reference to the PriorityLoad that should be used for the next retry.\n   * @param priority_set current priority set.\n   * @param original_priority_load original priority load.\n   * @param priority_mapping_func see @Upstream::RetryPriority::PriorityMappingFunc.\n   * @return HealthyAndDegradedLoad that should be used to select a priority for the next retry.\n   */\n  virtual const Upstream::HealthyAndDegradedLoad& priorityLoadForRetry(\n      const Upstream::PrioritySet& priority_set,\n      const Upstream::HealthyAndDegradedLoad& original_priority_load,\n      const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) PURE;\n  /**\n   * return how many times host selection should be reattempted during host selection.\n   */\n  virtual uint32_t hostSelectionMaxAttempts() const PURE;\n};\n\nusing RetryStatePtr = std::unique_ptr<RetryState>;\n\n/**\n * Per route policy for request shadowing.\n */\nclass ShadowPolicy {\npublic:\n  virtual ~ShadowPolicy() = default;\n\n  /**\n   * @return the name of the cluster that a matching request should be shadowed to. Returns empty\n   *         string if no shadowing should take place.\n   */\n  virtual const std::string& cluster() const PURE;\n\n  /**\n   * @return the runtime key that will be used to determine whether an individual request should\n   *         be shadowed. The lack of a key means that all requests will be shadowed. If a key is\n   *         present it will be used to drive random selection in the range 0-10000 for 0.01%\n   *         increments.\n   */\n  virtual const std::string& runtimeKey() const PURE;\n\n  /**\n   * @return the default fraction of traffic the should be shadowed, if the runtime key is not\n   *         present.\n   */\n  virtual const envoy::type::v3::FractionalPercent& defaultValue() const PURE;\n\n  /**\n   * @return true if the trace span should be sampled.\n   */\n  virtual bool traceSampled() const PURE;\n};\n\nusing ShadowPolicyPtr = std::unique_ptr<ShadowPolicy>;\n\n/**\n * All virtual cluster stats. @see stats_macro.h\n */\n#define ALL_VIRTUAL_CLUSTER_STATS(COUNTER)                                                         \\\n  COUNTER(upstream_rq_retry)                                                                       \\\n  COUNTER(upstream_rq_retry_limit_exceeded)                                                        \\\n  COUNTER(upstream_rq_retry_overflow)                                                              \\\n  COUNTER(upstream_rq_retry_success)                                                               \\\n  COUNTER(upstream_rq_timeout)                                                                     \\\n  COUNTER(upstream_rq_total)\n\n/**\n * Struct definition for all virtual cluster stats. @see stats_macro.h\n */\nstruct VirtualClusterStats {\n  ALL_VIRTUAL_CLUSTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Virtual cluster definition (allows splitting a virtual host into virtual clusters orthogonal to\n * routes for stat tracking and priority purposes).\n */\nclass VirtualCluster {\npublic:\n  virtual ~VirtualCluster() = default;\n\n  /**\n   * @return the stat-name of the virtual cluster.\n   */\n  virtual Stats::StatName statName() const PURE;\n\n  /**\n   * @return VirtualClusterStats& strongly named stats for this virtual cluster.\n   */\n  virtual VirtualClusterStats& stats() const PURE;\n\n  static VirtualClusterStats generateStats(Stats::Scope& scope) {\n    return {ALL_VIRTUAL_CLUSTER_STATS(POOL_COUNTER(scope))};\n  }\n};\n\nclass RateLimitPolicy;\nclass Config;\n\n/**\n * All route specific config returned by the method at\n *   NamedHttpFilterConfigFactory::createRouteSpecificFilterConfig\n * should be derived from this class.\n */\nclass RouteSpecificFilterConfig {\npublic:\n  virtual ~RouteSpecificFilterConfig() = default;\n};\nusing RouteSpecificFilterConfigConstSharedPtr = std::shared_ptr<const RouteSpecificFilterConfig>;\n\n/**\n * Virtual host definition.\n */\nclass VirtualHost {\npublic:\n  virtual ~VirtualHost() = default;\n\n  /**\n   * @return const CorsPolicy* the CORS policy for this virtual host.\n   */\n  virtual const CorsPolicy* corsPolicy() const PURE;\n\n  /**\n   * @return the stat-name of the virtual host.\n   */\n  virtual Stats::StatName statName() const PURE;\n\n  /**\n   * @return const RateLimitPolicy& the rate limit policy for the virtual host.\n   */\n  virtual const RateLimitPolicy& rateLimitPolicy() const PURE;\n\n  /**\n   * @return const Config& the RouteConfiguration that owns this virtual host.\n   */\n  virtual const Config& routeConfig() const PURE;\n\n  /**\n   * @return const RouteSpecificFilterConfig* the per-filter config pre-processed object for\n   *  the given filter name. If there is not per-filter config, or the filter factory returns\n   *  nullptr, nullptr is returned.\n   */\n  virtual const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const PURE;\n\n  /**\n   * This is a helper on top of perFilterConfig() that casts the return object to the specified\n   * type.\n   */\n  template <class Derived> const Derived* perFilterConfigTyped(const std::string& name) const {\n    return dynamic_cast<const Derived*>(perFilterConfig(name));\n  }\n\n  /**\n   * @return bool whether to include the request count header in upstream requests.\n   */\n  virtual bool includeAttemptCountInRequest() const PURE;\n\n  /**\n   * @return bool whether to include the request count header in the downstream response.\n   */\n  virtual bool includeAttemptCountInResponse() const PURE;\n\n  /**\n   * @return uint32_t any route cap on bytes which should be buffered for shadowing or retries.\n   *         This is an upper bound so does not necessarily reflect the bytes which will be buffered\n   *         as other limits may apply.\n   *         If a per route limit exists, it takes precedence over this configuration.\n   *         Unlike some other buffer limits, 0 here indicates buffering should not be performed\n   *         rather than no limit applies.\n   */\n  virtual uint32_t retryShadowBufferLimit() const PURE;\n};\n\n/**\n * Route level hedging policy.\n */\nclass HedgePolicy {\npublic:\n  virtual ~HedgePolicy() = default;\n\n  /**\n   * @return number of upstream requests that should be sent initially.\n   */\n  virtual uint32_t initialRequests() const PURE;\n\n  /**\n   * @return percent chance that an additional upstream request should be sent\n   * on top of the value from initialRequests().\n   */\n  virtual const envoy::type::v3::FractionalPercent& additionalRequestChance() const PURE;\n\n  /**\n   * @return bool indicating whether request hedging should occur when a request\n   * is retried due to a per try timeout. The alternative is the original request\n   * will be canceled immediately.\n   */\n  virtual bool hedgeOnPerTryTimeout() const PURE;\n};\n\nclass MetadataMatchCriterion {\npublic:\n  virtual ~MetadataMatchCriterion() = default;\n\n  /*\n   * @return const std::string& the name of the metadata key\n   */\n  virtual const std::string& name() const PURE;\n\n  /*\n   * @return const Envoy::HashedValue& the value for the metadata key\n   */\n  virtual const HashedValue& value() const PURE;\n};\n\nusing MetadataMatchCriterionConstSharedPtr = std::shared_ptr<const MetadataMatchCriterion>;\n\nclass MetadataMatchCriteria;\nusing MetadataMatchCriteriaConstPtr = std::unique_ptr<const MetadataMatchCriteria>;\n\nclass MetadataMatchCriteria {\npublic:\n  virtual ~MetadataMatchCriteria() = default;\n\n  /*\n   * @return std::vector<MetadataMatchCriterionConstSharedPtr>& a vector of\n   * metadata to be matched against upstream endpoints when load\n   * balancing, sorted lexically by name.\n   */\n  virtual const std::vector<MetadataMatchCriterionConstSharedPtr>&\n  metadataMatchCriteria() const PURE;\n\n  /**\n   * Creates a new MetadataMatchCriteria, merging existing\n   * metadata criteria with the provided criteria. The result criteria is the\n   * combination of both sets of criteria, with those from the metadata_matches\n   * ProtobufWkt::Struct taking precedence.\n   * @param metadata_matches supplies the new criteria.\n   * @return MetadataMatchCriteriaConstPtr the result criteria.\n   */\n  virtual MetadataMatchCriteriaConstPtr\n  mergeMatchCriteria(const ProtobufWkt::Struct& metadata_matches) const PURE;\n\n  /**\n   * Creates a new MetadataMatchCriteria with criteria vector reduced to given names\n   * @param names names of metadata keys to preserve\n   * @return MetadataMatchCriteriaConstPtr the result criteria. Returns nullptr if the result\n   * criteria are empty.\n   */\n  virtual MetadataMatchCriteriaConstPtr\n  filterMatchCriteria(const std::set<std::string>& names) const PURE;\n};\n\n/**\n * Criterion that a route entry uses for matching TLS connection context.\n */\nclass TlsContextMatchCriteria {\npublic:\n  virtual ~TlsContextMatchCriteria() = default;\n\n  /**\n   * @return bool indicating whether the client presented credentials.\n   */\n  virtual const absl::optional<bool>& presented() const PURE;\n\n  /**\n   * @return bool indicating whether the client credentials successfully validated against the TLS\n   * context validation context.\n   */\n  virtual const absl::optional<bool>& validated() const PURE;\n};\n\nusing TlsContextMatchCriteriaConstPtr = std::unique_ptr<const TlsContextMatchCriteria>;\n\n/**\n * Type of path matching that a route entry uses.\n */\nenum class PathMatchType {\n  None,\n  Prefix,\n  Exact,\n  Regex,\n};\n\n/**\n * Criterion that a route entry uses for matching a particular path.\n */\nclass PathMatchCriterion {\npublic:\n  virtual ~PathMatchCriterion() = default;\n\n  /**\n   * @return PathMatchType type of path match.\n   */\n  virtual PathMatchType matchType() const PURE;\n\n  /**\n   * @return const std::string& the string with which to compare paths.\n   */\n  virtual const std::string& matcher() const PURE;\n};\n\n/**\n * Base class for all route typed metadata factories.\n */\nclass HttpRouteTypedMetadataFactory : public Envoy::Config::TypedMetadataFactory {};\n\n/**\n * An individual resolved route entry.\n */\nclass RouteEntry : public ResponseEntry {\npublic:\n  ~RouteEntry() override = default;\n\n  /**\n   * @return const std::string& the upstream cluster that owns the route.\n   */\n  virtual const std::string& clusterName() const PURE;\n\n  /**\n   * Returns the HTTP status code to use when configured cluster is not found.\n   * @return Http::Code to use when configured cluster is not found.\n   */\n  virtual Http::Code clusterNotFoundResponseCode() const PURE;\n\n  /**\n   * @return const CorsPolicy* the CORS policy for this virtual host.\n   */\n  virtual const CorsPolicy* corsPolicy() const PURE;\n\n  /**\n   * Do potentially destructive header transforms on request headers prior to forwarding. For\n   * example URL prefix rewriting, adding headers, etc. This should only be called ONCE\n   * immediately prior to forwarding. It is done this way vs. copying for performance reasons.\n   * @param headers supplies the request headers, which may be modified during this call.\n   * @param stream_info holds additional information about the request.\n   * @param insert_envoy_original_path insert x-envoy-original-path header if path rewritten?\n   */\n  virtual void finalizeRequestHeaders(Http::RequestHeaderMap& headers,\n                                      const StreamInfo::StreamInfo& stream_info,\n                                      bool insert_envoy_original_path) const PURE;\n\n  /**\n   * @return const HashPolicy* the optional hash policy for the route.\n   */\n  virtual const Http::HashPolicy* hashPolicy() const PURE;\n\n  /**\n   * @return const HedgePolicy& the hedge policy for the route. All routes have a hedge policy even\n   *         if it is empty and does not allow for hedged requests.\n   */\n  virtual const HedgePolicy& hedgePolicy() const PURE;\n\n  /**\n   * @return the priority of the route.\n   */\n  virtual Upstream::ResourcePriority priority() const PURE;\n\n  /**\n   * @return const RateLimitPolicy& the rate limit policy for the route.\n   */\n  virtual const RateLimitPolicy& rateLimitPolicy() const PURE;\n\n  /**\n   * @return const RetryPolicy& the retry policy for the route. All routes have a retry policy even\n   *         if it is empty and does not allow retries.\n   */\n  virtual const RetryPolicy& retryPolicy() const PURE;\n\n  /**\n   * @return const InternalRedirectPolicy& the internal redirect policy for the route. All routes\n   *         have a internal redirect policy even if it is not enabled, which means redirects are\n   *         simply proxied as normal responses.\n   */\n  virtual const InternalRedirectPolicy& internalRedirectPolicy() const PURE;\n\n  /**\n   * @return uint32_t any route cap on bytes which should be buffered for shadowing or retries.\n   *         This is an upper bound so does not necessarily reflect the bytes which will be buffered\n   *         as other limits may apply.\n   *         Unlike some other buffer limits, 0 here indicates buffering should not be performed\n   *         rather than no limit applies.\n   */\n  virtual uint32_t retryShadowBufferLimit() const PURE;\n\n  /**\n   * @return const std::vector<ShadowPolicy>& the shadow policies for the route. The vector is empty\n   *         if no shadowing takes place.\n   */\n  virtual const std::vector<ShadowPolicyPtr>& shadowPolicies() const PURE;\n\n  /**\n   * @return std::chrono::milliseconds the route's timeout.\n   */\n  virtual std::chrono::milliseconds timeout() const PURE;\n\n  /**\n   * @return optional<std::chrono::milliseconds> the route's idle timeout. Zero indicates a\n   *         disabled idle timeout, while nullopt indicates deference to the global timeout.\n   */\n  virtual absl::optional<std::chrono::milliseconds> idleTimeout() const PURE;\n\n  /**\n   * @return optional<std::chrono::milliseconds> the route's maximum stream duration.\n   */\n  virtual absl::optional<std::chrono::milliseconds> maxStreamDuration() const PURE;\n\n  /**\n   * @return optional<std::chrono::milliseconds> the max grpc-timeout this route will allow.\n   */\n  virtual absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderMax() const PURE;\n\n  /**\n   * @return optional<std::chrono::milliseconds> the delta between grpc-timeout and enforced grpc\n   *         timeout.\n   */\n  virtual absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderOffset() const PURE;\n\n  /**\n   * @return absl::optional<std::chrono::milliseconds> the maximum allowed timeout value derived\n   * from 'grpc-timeout' header of a gRPC request. Non-present value disables use of 'grpc-timeout'\n   * header, while 0 represents infinity.\n   */\n  virtual absl::optional<std::chrono::milliseconds> maxGrpcTimeout() const PURE;\n\n  /**\n   * @return absl::optional<std::chrono::milliseconds> the timeout offset to apply to the timeout\n   * provided by the 'grpc-timeout' header of a gRPC request. This value will be positive and should\n   * be subtracted from the value provided by the header.\n   */\n  virtual absl::optional<std::chrono::milliseconds> grpcTimeoutOffset() const PURE;\n\n  /**\n   * Determine whether a specific request path belongs to a virtual cluster for use in stats, etc.\n   * @param headers supplies the request headers.\n   * @return the virtual cluster or nullptr if there is no match.\n   */\n  virtual const VirtualCluster* virtualCluster(const Http::HeaderMap& headers) const PURE;\n\n  /**\n   * @return const VirtualHost& the virtual host that owns the route.\n   */\n  virtual const VirtualHost& virtualHost() const PURE;\n\n  /**\n   * @return bool true if the :authority header should be overwritten with the upstream hostname.\n   */\n  virtual bool autoHostRewrite() const PURE;\n\n  /**\n   * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when\n   * selecting an upstream host\n   */\n  virtual const MetadataMatchCriteria* metadataMatchCriteria() const PURE;\n\n  /**\n   * @return const std::multimap<std::string, std::string> the opaque configuration associated\n   *         with the route\n   */\n  virtual const std::multimap<std::string, std::string>& opaqueConfig() const PURE;\n\n  /**\n   * @return bool true if the virtual host rate limits should be included.\n   */\n  virtual bool includeVirtualHostRateLimits() const PURE;\n\n  /**\n   * @return const Envoy::Config::TypedMetadata& return the typed metadata provided in the config\n   * for this route.\n   */\n  virtual const Envoy::Config::TypedMetadata& typedMetadata() const PURE;\n\n  /**\n   * @return const envoy::config::core::v3::Metadata& return the metadata provided in the config for\n   * this route.\n   */\n  virtual const envoy::config::core::v3::Metadata& metadata() const PURE;\n\n  /**\n   * @return TlsContextMatchCriteria* the tls context match criterion for this route. If there is no\n   * tls context match criteria, nullptr is returned.\n   */\n  virtual const TlsContextMatchCriteria* tlsContextMatchCriteria() const PURE;\n\n  /**\n   * @return const PathMatchCriterion& the match criterion for this route.\n   */\n  virtual const PathMatchCriterion& pathMatchCriterion() const PURE;\n\n  /**\n   * @return const RouteSpecificFilterConfig* the per-filter config pre-processed object for\n   *  the given filter name. If there is not per-filter config, or the filter factory returns\n   *  nullptr, nullptr is returned.\n   */\n  virtual const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const PURE;\n\n  /**\n   * This is a helper on top of perFilterConfig() that casts the return object to the specified\n   * type.\n   */\n  template <class Derived> const Derived* perFilterConfigTyped(const std::string& name) const {\n    return dynamic_cast<const Derived*>(perFilterConfig(name));\n  };\n\n  /**\n   * This is a helper to get the route's per-filter config if it exists, otherwise the virtual\n   * host's. Or nullptr if none of them exist.\n   */\n  template <class Derived>\n  const Derived* mostSpecificPerFilterConfigTyped(const std::string& name) const {\n    const Derived* config = perFilterConfigTyped<Derived>(name);\n    return config ? config : virtualHost().perFilterConfigTyped<Derived>(name);\n  }\n\n  /**\n   * True if the virtual host this RouteEntry belongs to is configured to include the attempt\n   * count header.\n   * @return bool whether x-envoy-attempt-count should be included on the upstream request.\n   */\n  virtual bool includeAttemptCountInRequest() const PURE;\n\n  /**\n   * True if the virtual host this RouteEntry belongs to is configured to include the attempt\n   * count header.\n   * @return bool whether x-envoy-attempt-count should be included on the downstream response.\n   */\n  virtual bool includeAttemptCountInResponse() const PURE;\n\n  using UpgradeMap = std::map<std::string, bool>;\n  /**\n   * @return a map of route-specific upgrades to their enabled/disabled status.\n   */\n  virtual const UpgradeMap& upgradeMap() const PURE;\n\n  using ConnectConfig = envoy::config::route::v3::RouteAction::UpgradeConfig::ConnectConfig;\n  /**\n   * If present, informs how to handle proxying CONNECT requests on this route.\n   */\n  virtual const absl::optional<ConnectConfig>& connectConfig() const PURE;\n\n  /**\n   * @return std::string& the name of the route.\n   */\n  virtual const std::string& routeName() const PURE;\n};\n\n/**\n * An interface representing the Decorator.\n */\nclass Decorator {\npublic:\n  virtual ~Decorator() = default;\n\n  /**\n   * This method decorates the supplied span.\n   * @param Tracing::Span& the span.\n   */\n  virtual void apply(Tracing::Span& span) const PURE;\n\n  /**\n   * This method returns the operation name.\n   * @return the operation name\n   */\n  virtual const std::string& getOperation() const PURE;\n\n  /**\n   * This method returns whether the decorator information\n   * should be propagated to other services.\n   * @return whether to propagate\n   */\n  virtual bool propagate() const PURE;\n};\n\nusing DecoratorConstPtr = std::unique_ptr<const Decorator>;\n\n/**\n * An interface representing the Tracing for the route configuration.\n */\nclass RouteTracing {\npublic:\n  virtual ~RouteTracing() = default;\n\n  /**\n   * This method returns the client sampling percentage.\n   * @return the client sampling percentage\n   */\n  virtual const envoy::type::v3::FractionalPercent& getClientSampling() const PURE;\n\n  /**\n   * This method returns the random sampling percentage.\n   * @return the random sampling percentage\n   */\n  virtual const envoy::type::v3::FractionalPercent& getRandomSampling() const PURE;\n\n  /**\n   * This method returns the overall sampling percentage.\n   * @return the overall sampling percentage\n   */\n  virtual const envoy::type::v3::FractionalPercent& getOverallSampling() const PURE;\n\n  /**\n   * This method returns the route level tracing custom tags.\n   * @return the tracing custom tags.\n   */\n  virtual const Tracing::CustomTagMap& getCustomTags() const PURE;\n};\n\nusing RouteTracingConstPtr = std::unique_ptr<const RouteTracing>;\n\n/**\n * An interface that holds a DirectResponseEntry or RouteEntry for a request.\n */\nclass Route {\npublic:\n  virtual ~Route() = default;\n\n  /**\n   * @return the direct response entry or nullptr if there is no direct response for the request.\n   */\n  virtual const DirectResponseEntry* directResponseEntry() const PURE;\n\n  /**\n   * @return the route entry or nullptr if there is no matching route for the request.\n   */\n  virtual const RouteEntry* routeEntry() const PURE;\n\n  /**\n   * @return the decorator or nullptr if not defined for the request.\n   */\n  virtual const Decorator* decorator() const PURE;\n\n  /**\n   * @return the tracing config or nullptr if not defined for the request.\n   */\n  virtual const RouteTracing* tracingConfig() const PURE;\n\n  /**\n   * @return const RouteSpecificFilterConfig* the per-filter config pre-processed object for\n   *  the given filter name. If there is not per-filter config, or the filter factory returns\n   *  nullptr, nullptr is returned.\n   */\n  virtual const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const PURE;\n\n  /**\n   * This is a helper on top of perFilterConfig() that casts the return object to the specified\n   * type.\n   */\n  template <class Derived> const Derived* perFilterConfigTyped(const std::string& name) const {\n    return dynamic_cast<const Derived*>(perFilterConfig(name));\n  }\n};\n\nusing RouteConstSharedPtr = std::shared_ptr<const Route>;\n\n/**\n * RouteCallback, returns one of these enums to the route matcher to indicate\n * if the matched route has been accepted or it wants the route matching to\n * continue.\n */\nenum class RouteMatchStatus {\n  // Continue matching route\n  Continue,\n  // Accept matched route\n  Accept\n};\n\n/**\n * RouteCallback is passed this enum to indicate if more routes are available for evaluation.\n */\nenum class RouteEvalStatus {\n  // Has more routes that can be evaluated for match.\n  HasMoreRoutes,\n  // All routes have been evaluated for match.\n  NoMoreRoutes\n};\n\n/**\n * RouteCallback can be used to override routing decision made by the Route::Config::route,\n * this callback is passed the RouteConstSharedPtr, when a matching route is found, and\n * RouteEvalStatus indicating whether there are more routes available for evaluation.\n *\n * RouteCallback will be called back only when at least one matching route is found, if no matching\n * routes are found RouteCallback will not be invoked. RouteCallback can return one of the\n * RouteMatchStatus enum to indicate if the match has been accepted or should the route match\n * evaluation continue.\n *\n * Returning RouteMatchStatus::Continue, when no more routes available for evaluation will result in\n * no further callbacks and no route is deemed to be accepted and nullptr is returned to the caller\n * of Route::Config::route.\n */\nusing RouteCallback = std::function<RouteMatchStatus(RouteConstSharedPtr, RouteEvalStatus)>;\n\n/**\n * The router configuration.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  /**\n   * Based on the incoming HTTP request headers, determine the target route (containing either a\n   * route entry or a direct response entry) for the request.\n   * @param headers supplies the request headers.\n   * @param random_value supplies the random seed to use if a runtime choice is required. This\n   *        allows stable choices between calls if desired.\n   * @return the route or nullptr if there is no matching route for the request.\n   */\n  virtual RouteConstSharedPtr route(const Http::RequestHeaderMap& headers,\n                                    const StreamInfo::StreamInfo& stream_info,\n                                    uint64_t random_value) const PURE;\n\n  /**\n   * Based on the incoming HTTP request headers, determine the target route (containing either a\n   * route entry or a direct response entry) for the request.\n   *\n   * Invokes callback with matched route, callback can choose to accept the route by returning\n   * RouteStatus::Stop or continue route match from last matched route by returning\n   * RouteMatchStatus::Continue, when more routes are available.\n   *\n   * @param cb supplies callback to be invoked upon route match.\n   * @param headers supplies the request headers.\n   * @param random_value supplies the random seed to use if a runtime choice is required. This\n   *        allows stable choices between calls if desired.\n   * @return the route accepted by the callback or nullptr if no match found or none of route is\n   * accepted by the callback.\n   */\n  virtual RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers,\n                                    const StreamInfo::StreamInfo& stream_info,\n                                    uint64_t random_value) const PURE;\n\n  /**\n   * Return a list of headers that will be cleaned from any requests that are not from an internal\n   * (RFC1918) source.\n   */\n  virtual const std::list<Http::LowerCaseString>& internalOnlyHeaders() const PURE;\n\n  /**\n   * @return const std::string the RouteConfiguration name.\n   */\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return whether router configuration uses VHDS.\n   */\n  virtual bool usesVhds() const PURE;\n\n  /**\n   * @return bool whether most specific header mutations should take precedence. The default\n   * evaluation order is route level, then virtual host level and finally global connection\n   * manager level.\n   */\n  virtual bool mostSpecificHeaderMutationsWins() const PURE;\n};\n\nusing ConfigConstSharedPtr = std::shared_ptr<const Config>;\n\nclass GenericConnectionPoolCallbacks;\nclass GenericUpstream;\n\n/**\n * An API for wrapping either an HTTP or a TCP connection pool.\n *\n * The GenericConnPool exists to create a GenericUpstream handle via a call to\n * newStream resulting in an eventual call to onPoolReady\n */\nclass GenericConnPool {\npublic:\n  virtual ~GenericConnPool() = default;\n\n  /**\n   * Called to create a new HTTP stream or TCP connection for \"CONNECT streams\".\n   *\n   * The implementation of the GenericConnPool will either call\n   * GenericConnectionPoolCallbacks::onPoolReady\n   * when a stream is available or GenericConnectionPoolCallbacks::onPoolFailure\n   * if stream creation fails.\n   *\n   * The caller is responsible for calling cancelAnyPendingStream() if stream\n   * creation is no longer desired. newStream may only be called once per\n   * GenericConnPool.\n   *\n   * @param callbacks callbacks to communicate stream failure or creation on.\n   */\n  virtual void newStream(GenericConnectionPoolCallbacks* callbacks) PURE;\n  /**\n   * Called to cancel any pending newStream request,\n   */\n  virtual bool cancelAnyPendingStream() PURE;\n  /**\n   * @return optionally returns the protocol for the connection pool.\n   */\n  virtual absl::optional<Http::Protocol> protocol() const PURE;\n  /**\n   * @return optionally returns the host for the connection pool.\n   */\n  virtual Upstream::HostDescriptionConstSharedPtr host() const PURE;\n};\n\n/**\n * An API for the interactions the upstream stream needs to have with the downstream stream\n * and/or router components\n */\nclass UpstreamToDownstream : public Http::ResponseDecoder, public Http::StreamCallbacks {\npublic:\n  /**\n   * @return return the routeEntry for the downstream stream.\n   */\n  virtual const RouteEntry& routeEntry() const PURE;\n  /**\n   * @return return the connection for the downstream stream.\n   */\n  virtual const Network::Connection& connection() const PURE;\n};\n\n/**\n * An API for wrapping callbacks from either an HTTP or a TCP connection pool.\n *\n * Just like the connection pool callbacks, the GenericConnectionPoolCallbacks\n * will either call onPoolReady when a GenericUpstream is ready, or\n * onPoolFailure if a connection/stream can not be established.\n */\nclass GenericConnectionPoolCallbacks {\npublic:\n  virtual ~GenericConnectionPoolCallbacks() = default;\n\n  /**\n   * Called to indicate a failure for GenericConnPool::newStream to establish a stream.\n   *\n   * @param reason supplies the failure reason.\n   * @param transport_failure_reason supplies the details of the transport failure reason.\n   * @param host supplies the description of the host that caused the failure. This may be nullptr\n   *             if no host was involved in the failure (for example overflow).\n   */\n  virtual void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                             absl::string_view transport_failure_reason,\n                             Upstream::HostDescriptionConstSharedPtr host) PURE;\n  /**\n   * Called when GenericConnPool::newStream has established a new stream.\n   *\n   * @param upstream supplies the generic upstream for the stream.\n   * @param host supplies the description of the host that will carry the request. For logical\n   *             connection pools the description may be different each time this is called.\n   * @param upstream_local_address supplies the local address of the upstream connection.\n   * @param info supplies the stream info object associated with the upstream connection.\n   */\n  virtual void onPoolReady(std::unique_ptr<GenericUpstream>&& upstream,\n                           Upstream::HostDescriptionConstSharedPtr host,\n                           const Network::Address::InstanceConstSharedPtr& upstream_local_address,\n                           const StreamInfo::StreamInfo& info) PURE;\n\n  // @return the UpstreamToDownstream interface for this stream.\n  //\n  // This is the interface for all interactions the upstream stream needs to have with the\n  // downstream stream. It is in the GenericConnectionPoolCallbacks as the GenericConnectionPool\n  // creates the GenericUpstream, and the GenericUpstream will need this interface.\n  virtual UpstreamToDownstream& upstreamToDownstream() PURE;\n};\n\n/**\n * An API for sending information to either a TCP or HTTP upstream.\n *\n * It is similar logically to RequestEncoder, only without the getStream interface.\n */\nclass GenericUpstream {\npublic:\n  virtual ~GenericUpstream() = default;\n  /**\n   * Encode a data frame.\n   * @param data supplies the data to encode. The data may be moved by the encoder.\n   * @param end_stream supplies whether this is the last data frame.\n   */\n  virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE;\n  /**\n   * Encode metadata.\n   * @param metadata_map_vector is the vector of metadata maps to encode.\n   */\n  virtual void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) PURE;\n  /**\n   * Encode headers, optionally indicating end of stream.\n   * @param headers supplies the header map to encode.\n   * @param end_stream supplies whether this is a header only request.\n   */\n  virtual void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) PURE;\n  /**\n   * Encode trailers. This implicitly ends the stream.\n   * @param trailers supplies the trailers to encode.\n   */\n  virtual void encodeTrailers(const Http::RequestTrailerMap& trailers) PURE;\n  /**\n   * Enable/disable further data from this stream.\n   */\n  virtual void readDisable(bool disable) PURE;\n  /**\n   * Reset the stream. No events will fire beyond this point.\n   * @param reason supplies the reset reason.\n   */\n  virtual void resetStream() PURE;\n};\n\nusing GenericConnPoolPtr = std::unique_ptr<GenericConnPool>;\n\n/*\n * A factory for creating generic connection pools.\n */\nclass GenericConnPoolFactory : public Envoy::Config::TypedFactory {\npublic:\n  ~GenericConnPoolFactory() override = default;\n\n  /*\n   * @param options for creating the transport socket\n   * @return may be null\n   */\n  virtual GenericConnPoolPtr\n  createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect,\n                        const RouteEntry& route_entry,\n                        absl::optional<Http::Protocol> downstream_protocol,\n                        Upstream::LoadBalancerContext* ctx) const PURE;\n};\n\nusing GenericConnPoolFactoryPtr = std::unique_ptr<GenericConnPoolFactory>;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/router_ratelimit.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Base interface for generic rate limit override action.\n */\nclass RateLimitOverrideAction {\npublic:\n  virtual ~RateLimitOverrideAction() = default;\n\n  /**\n   * Potentially populate the descriptors 'limit' property with a RateLimitOverride instance\n   * @param descriptor supplies the descriptor to optionally fill.\n   * @param metadata supplies the dynamic metadata for the request.\n   * @return true if RateLimitOverride was set in the descriptor.\n   */\n  virtual bool populateOverride(RateLimit::Descriptor& descriptor,\n                                const envoy::config::core::v3::Metadata* metadata) const PURE;\n};\n\nusing RateLimitOverrideActionPtr = std::unique_ptr<RateLimitOverrideAction>;\n\n/**\n * Base interface for generic rate limit action.\n */\nclass RateLimitAction {\npublic:\n  virtual ~RateLimitAction() = default;\n\n  /**\n   * Potentially append a descriptor entry to the end of descriptor.\n   * @param route supplies the target route for the request.\n   * @param descriptor supplies the descriptor to optionally fill.\n   * @param local_service_cluster supplies the name of the local service cluster.\n   * @param headers supplies the header for the request.\n   * @param remote_address supplies the trusted downstream address for the connection.\n   * @param dynamic_metadata supplies the dynamic metadata for the request\n   * @return true if the RateLimitAction populated the descriptor.\n   */\n  virtual bool\n  populateDescriptor(const RouteEntry& route, RateLimit::Descriptor& descriptor,\n                     const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                     const Network::Address::Instance& remote_address,\n                     const envoy::config::core::v3::Metadata* dynamic_metadata) const PURE;\n};\n\nusing RateLimitActionPtr = std::unique_ptr<RateLimitAction>;\n\n/**\n * Rate limit configuration.\n */\nclass RateLimitPolicyEntry {\npublic:\n  virtual ~RateLimitPolicyEntry() = default;\n\n  /**\n   * @return the stage value that the configuration is applicable to.\n   */\n  virtual uint64_t stage() const PURE;\n\n  /**\n   * @return runtime key to be set to disable the configuration.\n   */\n  virtual const std::string& disableKey() const PURE;\n\n  /**\n   * Potentially populate the descriptor array with new descriptors to query.\n   * @param route supplies the target route for the request.\n   * @param descriptors supplies the descriptor array to optionally fill.\n   * @param local_service_cluster supplies the name of the local service cluster.\n   * @param headers supplies the header for the request.\n   * @param remote_address supplies the trusted downstream address for the connection.\n   * @param dynamic_metadata supplies the dynamic metadata for the request.\n   */\n  virtual void\n  populateDescriptors(const RouteEntry& route, std::vector<RateLimit::Descriptor>& descriptors,\n                      const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                      const Network::Address::Instance& remote_address,\n                      const envoy::config::core::v3::Metadata* dynamic_metadata) const PURE;\n};\n\n/**\n * Rate limiting policy.\n */\nclass RateLimitPolicy {\npublic:\n  virtual ~RateLimitPolicy() = default;\n\n  /**\n   * @return true if there is no rate limit policy for all stage settings.\n   */\n  virtual bool empty() const PURE;\n\n  /**\n   * @param stage the value for finding applicable rate limit configurations.\n   * @return set of RateLimitPolicyEntry that are applicable for a stage.\n   */\n  virtual const std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>&\n  getApplicableRateLimit(uint64_t stage) const PURE;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/scopes.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/config_provider.h\"\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Scope key fragment base class.\n */\nclass ScopeKeyFragmentBase {\npublic:\n  bool operator!=(const ScopeKeyFragmentBase& other) const { return !(*this == other); }\n\n  bool operator==(const ScopeKeyFragmentBase& other) const {\n    if (typeid(*this) == typeid(other)) {\n      return hash() == other.hash();\n    }\n    return false;\n  }\n  virtual ~ScopeKeyFragmentBase() = default;\n\n  // Hash of the fragment.\n  virtual uint64_t hash() const PURE;\n};\n\n/**\n *  Scope Key is composed of non-null fragments.\n **/\nclass ScopeKey {\npublic:\n  ScopeKey() = default;\n  ScopeKey(ScopeKey&& other) = default;\n\n  // Scopekey is not copy-assignable and copy-constructible as it contains unique_ptr inside itself.\n  ScopeKey(const ScopeKey&) = delete;\n  ScopeKey operator=(const ScopeKey&) = delete;\n\n  // Caller should guarantee the fragment is not nullptr.\n  void addFragment(std::unique_ptr<ScopeKeyFragmentBase>&& fragment) {\n    ASSERT(fragment != nullptr, \"null fragment not allowed in ScopeKey.\");\n    updateHash(*fragment);\n    fragments_.emplace_back(std::move(fragment));\n  }\n\n  uint64_t hash() const { return hash_; }\n  bool operator!=(const ScopeKey& other) const;\n  bool operator==(const ScopeKey& other) const;\n\nprivate:\n  // Update the key's hash with the new fragment hash.\n  void updateHash(const ScopeKeyFragmentBase& fragment) {\n    std::stringbuf buffer;\n    buffer.sputn(reinterpret_cast<const char*>(&hash_), sizeof(hash_));\n    const auto& fragment_hash = fragment.hash();\n    buffer.sputn(reinterpret_cast<const char*>(&fragment_hash), sizeof(fragment_hash));\n    hash_ = HashUtil::xxHash64(buffer.str());\n  }\n\n  uint64_t hash_{0};\n  std::vector<std::unique_ptr<ScopeKeyFragmentBase>> fragments_;\n};\n\nusing ScopeKeyPtr = std::unique_ptr<ScopeKey>;\n\n// String fragment.\nclass StringKeyFragment : public ScopeKeyFragmentBase {\npublic:\n  explicit StringKeyFragment(absl::string_view value)\n      : value_(value), hash_(HashUtil::xxHash64(value_)) {}\n\n  uint64_t hash() const override { return hash_; }\n\nprivate:\n  const std::string value_;\n  const uint64_t hash_;\n};\n\n/**\n * The scoped routing configuration.\n */\nclass ScopedConfig : public Envoy::Config::ConfigProvider::Config {\npublic:\n  ~ScopedConfig() override = default;\n\n  /**\n   * Based on the incoming HTTP request headers, returns the configuration to use for selecting a\n   * target route.\n   * @param headers the request headers to match the scoped routing configuration against.\n   * @return ConfigConstSharedPtr the router's Config matching the request headers.\n   */\n  virtual ConfigConstSharedPtr getRouteConfig(const Http::HeaderMap& headers) const PURE;\n\n  /**\n   * Based on the incoming HTTP request headers, returns the hash value of its scope key.\n   * @param headers the request headers to match the scoped routing configuration against.\n   * @return unique_ptr of the scope key computed from header.\n   */\n  virtual ScopeKeyPtr computeScopeKey(const Http::HeaderMap&) const { return {}; }\n};\n\nusing ScopedConfigConstSharedPtr = std::shared_ptr<const ScopedConfig>;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/shadow_writer.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/message.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Interface used to shadow complete requests to an alternate upstream cluster in a \"fire and\n * forget\" fashion. Right now this interface takes a fully buffered request and cannot be used for\n * streaming. This is sufficient for current use cases.\n */\nclass ShadowWriter {\npublic:\n  virtual ~ShadowWriter() = default;\n\n  /**\n   * Shadow a request.\n   * @param cluster supplies the cluster name to shadow to.\n   * @param message supplies the complete request to shadow.\n   * @param timeout supplies the shadowed request timeout.\n   */\n  virtual void shadow(const std::string& cluster, Http::RequestMessagePtr&& request,\n                      const Http::AsyncClient::RequestOptions& options) PURE;\n};\n\nusing ShadowWriterPtr = std::unique_ptr<ShadowWriter>;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/router/string_accessor.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Contains a string in a form which is usable with FilterState and\n * allows lazy evaluation if needed. All values meant to be accessible to the\n * custom request/response header mechanism must use this type.\n */\nclass StringAccessor : public StreamInfo::FilterState::Object {\npublic:\n  /**\n   * @return the string the accessor represents.\n   */\n  virtual absl::string_view asString() const PURE;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/runtime/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"runtime_interface\",\n    hdrs = [\"runtime.h\"],\n    external_deps = [\n        \"abseil_node_hash_map\",\n        \"abseil_optional\",\n    ],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/singleton:threadsafe_singleton\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/runtime/runtime.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <limits>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/singleton/threadsafe_singleton.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\nnamespace Upstream {\nclass ClusterManager;\n}\n\nnamespace Runtime {\n\n/**\n * A snapshot of runtime data.\n */\nclass Snapshot : public ThreadLocal::ThreadLocalObject {\npublic:\n  struct Entry {\n    std::string raw_string_value_;\n    absl::optional<uint64_t> uint_value_;\n    absl::optional<double> double_value_;\n    absl::optional<envoy::type::v3::FractionalPercent> fractional_percent_value_;\n    absl::optional<bool> bool_value_;\n  };\n\n  using EntryMap = absl::flat_hash_map<std::string, Entry>;\n\n  /**\n   * A provider of runtime values. One or more of these compose the snapshot's source of values,\n   * where successive layers override the previous ones.\n   */\n  class OverrideLayer {\n  public:\n    virtual ~OverrideLayer() = default;\n\n    /**\n     * @return const absl::flat_hash_map<std::string, Entry>& the values in this layer.\n     */\n    virtual const EntryMap& values() const PURE;\n\n    /**\n     * @return const std::string& a user-friendly alias for this layer, e.g. \"admin\" or \"disk\".\n     */\n    virtual const std::string& name() const PURE;\n  };\n\n  using OverrideLayerConstPtr = std::unique_ptr<const OverrideLayer>;\n\n  /**\n   * Returns true if a deprecated feature is allowed.\n   *\n   * Fundamentally, deprecated features are boolean values.\n   * They are allowed by default or with explicit configuration to \"true\" via runtime configuration.\n   * They can be disallowed either by inclusion in the hard-coded disallowed_features[] list, or by\n   * configuration of \"false\" in runtime config.\n   * @param key supplies the key to lookup.\n   * @param default_value supplies the default value that will be used if either the key\n   *        does not exist or it is not a boolean.\n   */\n  virtual bool deprecatedFeatureEnabled(absl::string_view key, bool default_enabled) const PURE;\n\n  // Returns true if a runtime feature is enabled.\n  //\n  // Runtime features are used to easily allow switching between old and new code paths for high\n  // risk changes. The intent is for the old code path to be short lived - the old code path is\n  // deprecated as the feature is defaulted true, and removed with the following Envoy release.\n  virtual bool runtimeFeatureEnabled(absl::string_view key) const PURE;\n\n  /**\n   * Test if a feature is enabled using the built in random generator. This is done by generating\n   * a random number in the range 0-99 and seeing if this number is < the value stored in the\n   * runtime key, or the default_value if the runtime key is invalid.\n   * NOTE: In the current implementation, this routine may return different results each time it is\n   *       called because a new random number is used each time. Callers should understand this\n   *       behavior and not assume that subsequent calls using the same snapshot will be consistent.\n   * @param key supplies the feature key to lookup.\n   * @param default_value supplies the default value that will be used if either the feature key\n   *        does not exist or it is not an integer.\n   * @return true if the feature is enabled.\n   */\n  virtual bool featureEnabled(absl::string_view key, uint64_t default_value) const PURE;\n\n  /**\n   * Test if a feature is enabled using a supplied stable random value. This variant is used if\n   * the caller wants a stable result over multiple calls.\n   * @param key supplies the feature key to lookup.\n   * @param default_value supplies the default value that will be used if either the feature key\n   *        does not exist or it is not an integer.\n   * @param random_value supplies the stable random value to use for determining whether the feature\n   *        is enabled.\n   * @return true if the feature is enabled.\n   */\n  virtual bool featureEnabled(absl::string_view key, uint64_t default_value,\n                              uint64_t random_value) const PURE;\n\n  /**\n   * Test if a feature is enabled using a supplied stable random value and total number of buckets\n   * for sampling.\n   * This variant is used if the caller wants a stable result over multiple calls\n   * and have more granularity for samples.\n   * @param key supplies the feature key to lookup.\n   * @param default_value supplies the default value that will be used if either the feature key\n   *        does not exist or it is not an integer.\n   * @param random_value supplies the stable random value to use for determining whether the feature\n   *        is enabled.\n   * @param num_buckets control max number of buckets for sampling. Sampled value will be in a range\n   *        of [0, num_buckets).\n   * @return true if the feature is enabled.\n   */\n  virtual bool featureEnabled(absl::string_view key, uint64_t default_value, uint64_t random_value,\n                              uint64_t num_buckets) const PURE;\n\n  /**\n   * Test if a feature is enabled using the built in random generator. This is done by generating a\n   * random number between 0 and the fractional percent denominator and seeing if this number is <\n   * the numerator value stored in the runtime key. The default_value's numerator/denominator is\n   * used if the runtime key is invalid.\n   *\n   * If the runtime value for the provided runtime key is provided as an integer, it is assumed that\n   * the value is the numerator and the denominator is 100.\n   *\n   * NOTE: In the current implementation, this routine may return different results each time it is\n   *       called because a new random number is used each time. Callers should understand this\n   *       behavior and not assume that subsequent calls using the same snapshot will be consistent.\n   * @param key supplies the feature key to lookup.\n   * @param default_value supplies the default value that will be used if either the feature key\n   *        does not exist or it is not a fractional percent.\n   * @return true if the feature is enabled.\n   */\n  virtual bool featureEnabled(absl::string_view key,\n                              const envoy::type::v3::FractionalPercent& default_value) const PURE;\n\n  /**\n   * Test if a feature is enabled using a supplied stable random value. This variant is used if\n   * the caller wants a stable result over multiple calls.\n   *\n   * If the runtime value for the provided runtime key is provided as an integer, it is assumed that\n   * the value is the numerator and the denominator is 100.\n   *\n   * @param key supplies the feature key to lookup.\n   * @param default_value supplies the default value that will be used if either the feature key\n   *        does not exist or it is not a fractional percent.\n   * @param random_value supplies the stable random value to use for determining whether the feature\n   *        is enabled.\n   * @return true if the feature is enabled.\n   */\n  virtual bool featureEnabled(absl::string_view key,\n                              const envoy::type::v3::FractionalPercent& default_value,\n                              uint64_t random_value) const PURE;\n\n  using ConstStringOptRef = absl::optional<std::reference_wrapper<const std::string>>;\n  /**\n   * Fetch raw runtime data based on key.\n   * @param key supplies the key to fetch.\n   * @return absl::nullopt if the key does not exist or reference to the value std::string.\n   */\n  virtual ConstStringOptRef get(absl::string_view key) const PURE;\n\n  /**\n   * Fetch an integer runtime key. Runtime keys larger than ~2^53 may not be accurately converted\n   * into integers and will return default_value.\n   * @param key supplies the key to fetch.\n   * @param default_value supplies the value to return if the key does not exist or it does not\n   *        contain an integer.\n   * @return uint64_t the runtime value or the default value.\n   */\n  virtual uint64_t getInteger(absl::string_view key, uint64_t default_value) const PURE;\n\n  /**\n   * Fetch a double runtime key.\n   * @param key supplies the key to fetch.\n   * @param default_value supplies the value to return if the key does not exist or it does not\n   *        contain a double.\n   * @return double the runtime value or the default value.\n   */\n  virtual double getDouble(absl::string_view key, double default_value) const PURE;\n\n  /**\n   * Fetch a boolean runtime key.\n   * @param key supplies the key to fetch.\n   * @param default_value supplies the value to return if the key does not exist or it does not\n   *        contain a boolean.\n   * @return bool the runtime value or the default value.\n   */\n  virtual bool getBoolean(absl::string_view key, bool default_value) const PURE;\n\n  /**\n   * Fetch the OverrideLayers that provide values in this snapshot. Layers are ordered from bottom\n   * to top; for instance, the second layer's entries override the first layer's entries, and so on.\n   * Any layer can add a key in addition to overriding keys in layers below. The layer vector is\n   * safe only for the lifetime of the Snapshot.\n   * @return const std::vector<OverrideLayerConstPtr>& the raw map of loaded values.\n   */\n  virtual const std::vector<OverrideLayerConstPtr>& getLayers() const PURE;\n};\n\nusing SnapshotConstSharedPtr = std::shared_ptr<const Snapshot>;\n\n/**\n * Loads runtime snapshots from storage (local disk, etc.).\n */\nclass Loader {\npublic:\n  virtual ~Loader() = default;\n\n  using ReadyCallback = std::function<void()>;\n\n  /**\n   * Post-construction initialization. Runtime will be generally available after\n   * the constructor is finished, with the exception of dynamic RTDS layers,\n   * which require ClusterManager.\n   * @param cm cluster manager reference.\n   */\n  virtual void initialize(Upstream::ClusterManager& cm) PURE;\n\n  /**\n   * @return const Snapshot& the current snapshot. This reference is safe to use for the duration of\n   *         the calling routine, but may be overwritten on a future event loop cycle so should be\n   *         fetched again when needed. This may only be called from worker threads.\n   */\n  virtual const Snapshot& snapshot() PURE;\n\n  /**\n   * @return shared_ptr<const Snapshot> the current snapshot. This function may safely be called\n   *         from non-worker threads.\n   */\n  virtual SnapshotConstSharedPtr threadsafeSnapshot() PURE;\n\n  /**\n   * Merge the given map of key-value pairs into the runtime's state. To remove a previous merge for\n   * a key, use an empty string as the value.\n   * @param values the values to merge\n   */\n  virtual void mergeValues(const absl::node_hash_map<std::string, std::string>& values) PURE;\n\n  /**\n   * Initiate all RTDS subscriptions. The `on_done` callback is invoked when all RTDS requests\n   * have either received and applied their responses or timed out.\n   */\n  virtual void startRtdsSubscriptions(ReadyCallback on_done) PURE;\n\n  /**\n   * @return Stats::Scope& the root scope.\n   */\n  virtual Stats::Scope& getRootScope() PURE;\n\n  /**\n   * Updates deprecated feature use stats.\n   */\n  virtual void countDeprecatedFeatureUse() const PURE;\n};\n\nusing LoaderPtr = std::unique_ptr<Loader>;\n\n// To make the runtime generally accessible, we make use of the dreaded\n// singleton class. For Envoy, the runtime will be created and cleaned up by the\n// Server::InstanceImpl initialize() and destructor, respectively.\n//\n// This makes it possible for call sites to easily make use of runtime values to\n// determine if a given feature is on or off, as well as various deprecated configuration\n// protos being enabled or disabled by default.\nusing LoaderSingleton = InjectableSingleton<Loader>;\nusing ScopedLoaderSingleton = ScopedInjectableLoader<Loader>;\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/secret/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"secret_callbacks_interface\",\n    hdrs = [\"secret_callbacks.h\"],\n)\n\nenvoy_cc_library(\n    name = \"secret_provider_interface\",\n    hdrs = [\"secret_provider.h\"],\n    deps = [\n        \":secret_callbacks_interface\",\n        \"//include/envoy/common:callback\",\n        \"//include/envoy/ssl:certificate_validation_context_config_interface\",\n        \"//include/envoy/ssl:tls_certificate_config_interface\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"secret_manager_interface\",\n    hdrs = [\"secret_manager.h\"],\n    deps = [\n        \":secret_provider_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/secret/secret_callbacks.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\n/**\n * Callbacks invoked by a dynamic secret provider.\n */\nclass SecretCallbacks {\npublic:\n  virtual ~SecretCallbacks() = default;\n\n  virtual void onAddOrUpdateSecret() PURE;\n};\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/secret/secret_manager.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/secret/secret_provider.h\"\n\nnamespace Envoy {\n\nnamespace Server {\nnamespace Configuration {\nclass TransportSocketFactoryContext;\n} // namespace Configuration\n} // namespace Server\n\nnamespace Secret {\n\n/**\n * A manager for static and dynamic secrets.\n */\nclass SecretManager {\npublic:\n  virtual ~SecretManager() = default;\n\n  /**\n   * @param add a static secret from envoy::extensions::transport_sockets::tls::v3::Secret.\n   * @throw an EnvoyException if the secret is invalid or not supported, or there is duplicate.\n   */\n  virtual void\n  addStaticSecret(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) PURE;\n\n  /**\n   * @param name a name of the static TlsCertificateConfigProvider.\n   * @return the TlsCertificateConfigProviderSharedPtr. Returns nullptr if the static secret is not\n   * found.\n   */\n  virtual TlsCertificateConfigProviderSharedPtr\n  findStaticTlsCertificateProvider(const std::string& name) const PURE;\n\n  /**\n   * @param name a name of the static CertificateValidationContextConfigProviderSharedPtr.\n   * @return the CertificateValidationContextConfigProviderSharedPtr. Returns nullptr\n   * if the static certificate validation context is not found.\n   */\n  virtual CertificateValidationContextConfigProviderSharedPtr\n  findStaticCertificateValidationContextProvider(const std::string& name) const PURE;\n\n  /**\n   * @param name a name of the static TlsSessionTicketKeysConfigProviderSharedPtr.\n   * @return the TlsSessionTicketKeysConfigProviderSharedPtr. Returns nullptr\n   * if the static tls session ticket keys are not found.\n   */\n  virtual TlsSessionTicketKeysConfigProviderSharedPtr\n  findStaticTlsSessionTicketKeysContextProvider(const std::string& name) const PURE;\n\n  /**\n   * @param name a name of the static GenericSecretConfigProvider.\n   * @return the GenericSecretConfigProviderSharedPtr. Returns nullptr if the static secret is not\n   * found.\n   */\n  virtual GenericSecretConfigProviderSharedPtr\n  findStaticGenericSecretProvider(const std::string& name) const PURE;\n\n  /**\n   * @param tls_certificate the protobuf config of the TLS certificate.\n   * @return a TlsCertificateConfigProviderSharedPtr created from tls_certificate.\n   */\n  virtual TlsCertificateConfigProviderSharedPtr createInlineTlsCertificateProvider(\n      const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& tls_certificate) PURE;\n\n  /**\n   * @param certificate_validation_context the protobuf config of the certificate validation\n   * context.\n   * @return a CertificateValidationContextConfigProviderSharedPtr created from\n   * certificate_validation_context.\n   */\n  virtual CertificateValidationContextConfigProviderSharedPtr\n  createInlineCertificateValidationContextProvider(\n      const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n          certificate_validation_context) PURE;\n\n  /**\n   * @param tls_certificate the protobuf config of the TLS session ticket keys.\n   * @return a TlsSessionTicketKeysConfigProviderSharedPtr created from session_ticket_keys.\n   */\n  virtual TlsSessionTicketKeysConfigProviderSharedPtr createInlineTlsSessionTicketKeysProvider(\n      const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys& tls_certificate)\n      PURE;\n\n  /**\n   * @param generic_secret the protobuf config of the generic secret.\n   * @return a GenericSecretConfigProviderSharedPtr created from tls_certificate.\n   */\n  virtual GenericSecretConfigProviderSharedPtr createInlineGenericSecretProvider(\n      const envoy::extensions::transport_sockets::tls::v3::GenericSecret& generic_secret) PURE;\n\n  /**\n   * Finds and returns a dynamic secret provider associated to SDS config. Create\n   * a new one if such provider does not exist.\n   *\n   * @param config_source a protobuf message object containing a SDS config source.\n   * @param config_name a name that uniquely refers to the SDS config source.\n   * @param secret_provider_context context that provides components for creating and initializing\n   * secret provider.\n   * @return TlsCertificateConfigProviderSharedPtr the dynamic TLS secret provider.\n   */\n  virtual TlsCertificateConfigProviderSharedPtr findOrCreateTlsCertificateProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) PURE;\n\n  /**\n   * Finds and returns a dynamic secret provider associated to SDS config. Create\n   * a new one if such provider does not exist.\n   *\n   * @param config_source a protobuf message object containing a SDS config source.\n   * @param config_name a name that uniquely refers to the SDS config source.\n   * @param secret_provider_context context that provides components for creating and initializing\n   * secret provider.\n   * @return CertificateValidationContextConfigProviderSharedPtr the dynamic certificate validation\n   * context secret provider.\n   */\n  virtual CertificateValidationContextConfigProviderSharedPtr\n  findOrCreateCertificateValidationContextProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) PURE;\n\n  /**\n   * Finds and returns a dynamic secret provider associated to SDS config. Create\n   * a new one if such provider does not exist.\n   *\n   * @param config_source a protobuf message object containing a SDS config source.\n   * @param config_name a name that uniquely refers to the SDS config source.\n   * @param secret_provider_context context that provides components for creating and initializing\n   * secret provider.\n   * @return TlsSessionTicketKeysConfigProviderSharedPtr the dynamic tls session ticket keys secret\n   * provider.\n   */\n  virtual TlsSessionTicketKeysConfigProviderSharedPtr\n  findOrCreateTlsSessionTicketKeysContextProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) PURE;\n\n  /**\n   * Finds and returns a dynamic secret provider associated to SDS config. Create a new one if such\n   * provider does not exist.\n   *\n   * @param config_source a protobuf message object containing a SDS config source.\n   * @param config_name a name that uniquely refers to the SDS config source.\n   * @param secret_provider_context context that provides components for creating and initializing\n   * secret provider.\n   * @return GenericSecretConfigProviderSharedPtr the dynamic generic secret provider.\n   */\n  virtual GenericSecretConfigProviderSharedPtr findOrCreateGenericSecretProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) PURE;\n};\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/secret/secret_provider.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/common/callback.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/ssl/certificate_validation_context_config.h\"\n#include \"envoy/ssl/tls_certificate_config.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\n/**\n * A secret provider for each kind of secret.\n */\ntemplate <class SecretType> class SecretProvider {\npublic:\n  virtual ~SecretProvider() = default;\n\n  /**\n   * @return the secret. Returns nullptr if the secret is not ready.\n   */\n  virtual const SecretType* secret() const PURE;\n\n  /**\n   * Add secret validation callback into secret provider.\n   * It is safe to call this method by main thread and callback is safe to be invoked\n   * on main thread.\n   * @param callback callback that is executed by secret provider.\n   * @return CallbackHandle the handle which can remove that validation callback.\n   */\n  virtual Common::CallbackHandle*\n  addValidationCallback(std::function<void(const SecretType&)> callback) PURE;\n\n  /**\n   * Add secret update callback into secret provider.\n   * It is safe to call this method by main thread and callback is safe to be invoked\n   * on main thread.\n   * @param callback callback that is executed by secret provider.\n   * @return CallbackHandle the handle which can remove that update callback.\n   */\n  virtual Common::CallbackHandle* addUpdateCallback(std::function<void()> callback) PURE;\n};\n\nusing TlsCertificatePtr =\n    std::unique_ptr<envoy::extensions::transport_sockets::tls::v3::TlsCertificate>;\nusing CertificateValidationContextPtr =\n    std::unique_ptr<envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext>;\nusing TlsSessionTicketKeysPtr =\n    std::unique_ptr<envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys>;\nusing GenericSecretPtr =\n    std::unique_ptr<envoy::extensions::transport_sockets::tls::v3::GenericSecret>;\n\nusing TlsCertificateConfigProvider =\n    SecretProvider<envoy::extensions::transport_sockets::tls::v3::TlsCertificate>;\nusing TlsCertificateConfigProviderSharedPtr = std::shared_ptr<TlsCertificateConfigProvider>;\n\nusing CertificateValidationContextConfigProvider =\n    SecretProvider<envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext>;\nusing CertificateValidationContextConfigProviderSharedPtr =\n    std::shared_ptr<CertificateValidationContextConfigProvider>;\n\nusing TlsSessionTicketKeysConfigProvider =\n    SecretProvider<envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys>;\nusing TlsSessionTicketKeysConfigProviderSharedPtr =\n    std::shared_ptr<TlsSessionTicketKeysConfigProvider>;\n\nusing GenericSecretConfigProvider =\n    SecretProvider<envoy::extensions::transport_sockets::tls::v3::GenericSecret>;\nusing GenericSecretConfigProviderSharedPtr = std::shared_ptr<GenericSecretConfigProvider>;\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"access_log_config_interface\",\n    hdrs = [\"access_log_config.h\"],\n    deps = [\n        \":filter_config_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"admin_interface\",\n    hdrs = [\"admin.h\"],\n    deps = [\n        \":config_tracker_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:query_params_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"api_listener_interface\",\n    hdrs = [\"api_listener.h\"],\n    deps = [\"//include/envoy/http:api_listener_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"configuration_interface\",\n    hdrs = [\"configuration.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/http:context_interface\",\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config_tracker_interface\",\n    hdrs = [\"config_tracker.h\"],\n    deps = [\n        \"//source/common/common:non_copyable\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"drain_manager_interface\",\n    hdrs = [\"drain_manager.h\"],\n    deps = [\"//include/envoy/network:drain_decision_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"guarddog_interface\",\n    hdrs = [\"guarddog.h\"],\n    deps = [\n        \"//include/envoy/server:watchdog_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"guarddog_config_interface\",\n    hdrs = [\"guarddog_config.h\"],\n    deps = [\n        \":guarddog_interface\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/protobuf:message_validator_interface\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"health_checker_config_interface\",\n    hdrs = [\"health_checker_config.h\"],\n    deps = [\n        \"//include/envoy/common:random_generator_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hot_restart_interface\",\n    hdrs = [\"hot_restart.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/thread:thread_interface\",\n        \"//source/server:hot_restart_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"instance_interface\",\n    hdrs = [\"instance.h\"],\n    deps = [\n        \":admin_interface\",\n        \":drain_manager_interface\",\n        \":hot_restart_interface\",\n        \":lifecycle_notifier_interface\",\n        \":listener_manager_interface\",\n        \":options_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/common:mutex_tracer\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:context_interface\",\n        \"//include/envoy/http:query_params_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/secret:secret_manager_interface\",\n        \"//include/envoy/server:overload_manager_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"options_interface\",\n    hdrs = [\"options.h\"],\n    deps = [\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"worker_interface\",\n    hdrs = [\"worker.h\"],\n    deps = [\n        \":overload_manager_interface\",\n        \"//include/envoy/server:guarddog_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"watchdog_interface\",\n    hdrs = [\"watchdog.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"factory_context_interface\",\n    hdrs = [\"factory_context.h\"],\n    deps = [\n        \":admin_interface\",\n        \":drain_manager_interface\",\n        \":lifecycle_notifier_interface\",\n        \":process_context_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/grpc:context_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:context_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:drain_decision_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:overload_manager_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_config_interface\",\n    hdrs = [\"filter_config.h\"],\n    deps = [\n        \":drain_manager_interface\",\n        \":factory_context_interface\",\n        \":lifecycle_notifier_interface\",\n        \":process_context_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/server:overload_manager_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"lifecycle_notifier_interface\",\n    hdrs = [\"lifecycle_notifier.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"listener_manager_interface\",\n    hdrs = [\"listener_manager.h\"],\n    deps = [\n        \":api_listener_interface\",\n        \":drain_manager_interface\",\n        \":filter_config_interface\",\n        \":guarddog_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"process_context_interface\",\n    hdrs = [\"process_context.h\"],\n)\n\nenvoy_cc_library(\n    name = \"transport_socket_config_interface\",\n    hdrs = [\"transport_socket_config.h\"],\n    deps = [\n        \":factory_context_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/secret:secret_manager_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resource_monitor_interface\",\n    hdrs = [\"resource_monitor.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"request_id_extension_config_interface\",\n    hdrs = [\"request_id_extension_config.h\"],\n    deps = [\n        \":filter_config_interface\",\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resource_monitor_config_interface\",\n    hdrs = [\"resource_monitor_config.h\"],\n    deps = [\n        \":resource_monitor_interface\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/protobuf:message_validator_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"overload_manager_interface\",\n    hdrs = [\"overload_manager.h\"],\n    deps = [\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tracer_config_interface\",\n    hdrs = [\"tracer_config.h\"],\n    deps = [\n        \":filter_config_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"active_udp_listener_config_interface\",\n    hdrs = [\"active_udp_listener_config.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/network:connection_handler_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"bootstrap_extension_config_interface\",\n    hdrs = [\"bootstrap_extension_config.h\"],\n    deps = [\n        \":factory_context_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/server/access_log_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Implemented for each AccessLog::Instance and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass AccessLogInstanceFactory : public Config::TypedFactory {\npublic:\n  ~AccessLogInstanceFactory() override = default;\n\n  /**\n   * Create a particular AccessLog::Instance implementation from a config proto. If the\n   * implementation is unable to produce a factory with the provided parameters, it should throw an\n   * EnvoyException. The returned pointer should never be nullptr.\n   * @param config the custom configuration for this access log type.\n   * @param filter filter to determine whether a particular request should be logged. If no filter\n   * was specified in the configuration, argument will be nullptr.\n   * @param context general filter context through which persistent resources can be accessed.\n   */\n  virtual AccessLog::InstanceSharedPtr createAccessLogInstance(const Protobuf::Message& config,\n                                                               AccessLog::FilterPtr&& filter,\n                                                               FactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.access_loggers\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/active_udp_listener_config.h",
    "content": "#pragma once\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/network/connection_handler.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Interface to create udp listener according to\n * envoy::config::listener::v3::UdpListenerConfig.udp_listener_name.\n */\nclass ActiveUdpListenerConfigFactory : public Config::UntypedFactory {\npublic:\n  ~ActiveUdpListenerConfigFactory() override = default;\n\n  virtual ProtobufTypes::MessagePtr createEmptyConfigProto() PURE;\n\n  /**\n   * Create an ActiveUdpListenerFactory object according to given message.\n   * @param message specifies QUIC protocol options in a protobuf.\n   * @param concurrency is the number of listeners instances to be created.\n   */\n  virtual Network::ActiveUdpListenerFactoryPtr\n  createActiveUdpListenerFactory(const Protobuf::Message& message, uint32_t concurrency) PURE;\n\n  std::string category() const override { return \"envoy.udp_listeners\"; }\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/admin.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/query_params.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/server/config_tracker.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass AdminStream {\npublic:\n  virtual ~AdminStream() = default;\n\n  /**\n   * @param end_stream set to false for streaming response. Default is true, which will\n   * end the response when the initial handler completes.\n   */\n  virtual void setEndStreamOnComplete(bool end_stream) PURE;\n\n  /**\n   * @param cb callback to be added to the list of callbacks invoked by onDestroy() when stream\n   * is closed.\n   */\n  virtual void addOnDestroyCallback(std::function<void()> cb) PURE;\n\n  /**\n   * @return Http::StreamDecoderFilterCallbacks& to be used by the handler to get HTTP request data\n   * for streaming.\n   */\n  virtual Http::StreamDecoderFilterCallbacks& getDecoderFilterCallbacks() const PURE;\n\n  /**\n   * @return const Buffer::Instance* the fully buffered admin request if applicable.\n   */\n  virtual const Buffer::Instance* getRequestBody() const PURE;\n\n  /**\n   * @return Http::HeaderMap& to be used by handler to parse header information sent with the\n   * request.\n   */\n  virtual const Http::RequestHeaderMap& getRequestHeaders() const PURE;\n\n  /**\n   * Return the HTTP/1 stream encoder options if applicable. If the stream is not HTTP/1 returns\n   * absl::nullopt.\n   */\n  virtual Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() PURE;\n};\n\n/**\n * This macro is used to add handlers to the Admin HTTP Endpoint. It builds\n * a callback that executes X when the specified admin handler is hit. This macro can be\n * used to add static handlers as in source/server/admin/admin.cc and also dynamic handlers as\n * done in the RouteConfigProviderManagerImpl constructor in source/common/router/rds_impl.cc.\n */\n#define MAKE_ADMIN_HANDLER(X)                                                                      \\\n  [this](absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers,              \\\n         Buffer::Instance& data, Server::AdminStream& admin_stream) -> Http::Code {                \\\n    return X(path_and_query, response_headers, data, admin_stream);                                \\\n  }\n\n/**\n * Global admin HTTP endpoint for the server.\n */\nclass Admin {\npublic:\n  virtual ~Admin() = default;\n\n  /**\n   * Callback for admin URL handlers.\n   * @param path_and_query supplies the path and query of the request URL.\n   * @param response_headers enables setting of http headers (e.g., content-type, cache-control) in\n   * the handler.\n   * @param response supplies the buffer to fill in with the response body.\n   * @param admin_stream supplies the filter which invoked the handler, enables the handler to use\n   * its data.\n   * @return Http::Code the response code.\n   */\n  using HandlerCb = std::function<Http::Code(\n      absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers,\n      Buffer::Instance& response, AdminStream& admin_stream)>;\n\n  /**\n   * Add an admin handler.\n   * @param prefix supplies the URL prefix to handle.\n   * @param help_text supplies the help text for the handler.\n   * @param callback supplies the callback to invoke when the prefix matches.\n   * @param removable if true allows the handler to be removed via removeHandler.\n   * @param mutates_server_state indicates whether callback will mutate server state.\n   * @return bool true if the handler was added, false if it was not added.\n   */\n  virtual bool addHandler(const std::string& prefix, const std::string& help_text,\n                          HandlerCb callback, bool removable, bool mutates_server_state) PURE;\n\n  /**\n   * Remove an admin handler if it is removable.\n   * @param prefix supplies the URL prefix of the handler to delete.\n   * @return bool true if the handler was removed, false if it was not removed.\n   */\n  virtual bool removeHandler(const std::string& prefix) PURE;\n\n  /**\n   * Obtain socket the admin endpoint is bound to.\n   * @return Network::Socket& socket reference.\n   */\n  virtual const Network::Socket& socket() PURE;\n\n  /**\n   * @return ConfigTracker& tracker for /config_dump endpoint.\n   */\n  virtual ConfigTracker& getConfigTracker() PURE;\n\n  /**\n   * Expose this Admin console as an HTTP server.\n   * @param access_log_path file path to write the HTTP request log to.\n   * @param address_out_path file path to write the listening socket's address to.\n   * @param address network address to bind and listen on.\n   * @param listener_scope stats scope for the listener being started,\n   */\n  virtual void startHttpListener(const std::string& access_log_path,\n                                 const std::string& address_out_path,\n                                 Network::Address::InstanceConstSharedPtr address,\n                                 const Network::Socket::OptionsSharedPtr& socket_options,\n                                 Stats::ScopePtr&& listener_scope) PURE;\n\n  /**\n   * Executes an admin request with the specified query params. Note: this must\n   * be called from Envoy's main thread.\n   *\n   * @param path_and_query the path and query of the admin URL.\n   * @param method the HTTP method (POST or GET).\n   * @param response_headers populated the response headers from executing the request,\n   *     most notably content-type.\n   * @param body populated with the response-body from the admin request.\n   * @return Http::Code The HTTP response code from the admin request.\n   */\n  virtual Http::Code request(absl::string_view path_and_query, absl::string_view method,\n                             Http::ResponseHeaderMap& response_headers, std::string& body) PURE;\n\n  /**\n   * Add this Admin's listener to the provided handler, if the listener exists.\n   * Throws an exception if the listener does not exist.\n   * @param handler the handler that will receive this Admin's listener.\n   */\n  virtual void addListenerToHandler(Network::ConnectionHandler* handler) PURE;\n\n  /**\n   * @return the number of worker threads to run in the server.\n   */\n  virtual uint32_t concurrency() const PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/api_listener.h",
    "content": "#pragma once\n\n#include \"envoy/http/api_listener.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Listener that allows consumer to interact with Envoy via a designated API.\n */\nclass ApiListener {\npublic:\n  enum class Type { HttpApiListener };\n\n  virtual ~ApiListener() = default;\n\n  /**\n   * An ApiListener is uniquely identified by its name.\n   *\n   * @return the name of the ApiListener.\n   */\n  virtual absl::string_view name() const PURE;\n\n  /**\n   * Shutdown the ApiListener. This is an interrupt, not a drain. In other words, calling this\n   * function results in termination of all active streams vs. draining where no new streams are\n   * allowed, but already existing streams are allowed to finish.\n   */\n  virtual void shutdown() PURE;\n\n  /**\n   * @return the Type of the ApiListener.\n   */\n  virtual Type type() const PURE;\n\n  /**\n   * @return valid ref IFF type() == Type::HttpApiListener, otherwise nullopt.\n   */\n  virtual Http::ApiListenerOptRef http() PURE;\n};\n\nusing ApiListenerPtr = std::unique_ptr<ApiListener>;\nusing ApiListenerOptRef = absl::optional<std::reference_wrapper<ApiListener>>;\n\n} // namespace Server\n} // namespace Envoy"
  },
  {
    "path": "include/envoy/server/bootstrap_extension_config.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/server/factory_context.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Parent class for bootstrap extensions.\n */\nclass BootstrapExtension {\npublic:\n  virtual ~BootstrapExtension() = default;\n};\n\nusing BootstrapExtensionPtr = std::unique_ptr<BootstrapExtension>;\n\nnamespace Configuration {\n\n/**\n * Implemented for each bootstrap extension and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass BootstrapExtensionFactory : public Config::TypedFactory {\npublic:\n  ~BootstrapExtensionFactory() override = default;\n\n  /**\n   * Create a particular bootstrap extension implementation from a config proto. If the\n   * implementation is unable to produce a factory with the provided parameters, it should throw an\n   * EnvoyException. The returned pointer should never be nullptr.\n   * @param config the custom configuration for this bootstrap extension type.\n   * @param context general filter context through which persistent resources can be accessed.\n   */\n  virtual BootstrapExtensionPtr createBootstrapExtension(const Protobuf::Message& config,\n                                                         ServerFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.bootstrap\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/config_tracker.h",
    "content": "#pragma once\n\n#include <functional>\n#include <map>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/non_copyable.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * ConfigTracker is used by the `/config_dump` admin endpoint to manage storage of config-providing\n * callbacks with weak ownership semantics. Callbacks added to ConfigTracker only live as long as\n * the returned EntryOwner object (or ConfigTracker itself, if shorter). Keys should be descriptors\n * of the configs provided by the corresponding callback. They must be unique.\n * ConfigTracker is *not* threadsafe.\n */\nclass ConfigTracker {\npublic:\n  using Cb = std::function<ProtobufTypes::MessagePtr()>;\n  using CbsMap = std::map<std::string, Cb>;\n\n  /**\n   * EntryOwner supplies RAII semantics for entries in the map.\n   * The entry is not removed until the EntryOwner or the ConfigTracker itself is destroyed,\n   * whichever happens first. When you add() an entry, you must hold onto the returned\n   * owner object for as long as you want the entry to stay in the map.\n   */\n  class EntryOwner {\n  public:\n    virtual ~EntryOwner() = default;\n\n  protected:\n    EntryOwner() = default; // A sly way to make this class \"abstract.\"\n  };\n  using EntryOwnerPtr = std::unique_ptr<EntryOwner>;\n\n  virtual ~ConfigTracker() = default;\n\n  /**\n   * @return const CbsMap& The map of string keys to tracked callbacks.\n   */\n  virtual const CbsMap& getCallbacksMap() const PURE;\n\n  /**\n   * Add a new callback to the map under the given key\n   * @param key the map key for the new callback.\n   * @param cb the callback to add. *must not* return nullptr.\n   * @return EntryOwnerPtr the new entry's owner object. nullptr if the key is already present.\n   */\n  virtual EntryOwnerPtr add(const std::string& key, Cb cb) PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/configuration.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/stats/sink.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/*\n * Watchdog configuration.\n */\nclass Watchdog {\npublic:\n  virtual ~Watchdog() = default;\n\n  /**\n   * @return std::chrono::milliseconds the time interval after which we count a nonresponsive thread\n   *         event as a \"miss\" statistic.\n   */\n  virtual std::chrono::milliseconds missTimeout() const PURE;\n\n  /**\n   * @return std::chrono::milliseconds the time interval after which we count a nonresponsive thread\n   *         event as a \"mega miss\" statistic.\n   */\n  virtual std::chrono::milliseconds megaMissTimeout() const PURE;\n\n  /**\n   * @return std::chrono::milliseconds the time interval after which we kill the process due to a\n   *         single nonresponsive thread.\n   */\n  virtual std::chrono::milliseconds killTimeout() const PURE;\n\n  /**\n   * @return std::chrono::milliseconds the time interval after which we kill the process due to\n   *         multiple nonresponsive threads.\n   */\n  virtual std::chrono::milliseconds multiKillTimeout() const PURE;\n\n  /**\n   * @return double the percentage of threads that need to meet the MultiKillTimeout before we\n   *         kill the process. This is used in the calculation below\n   *         Max(2, ceil(registered_threads * Fraction(MultiKillThreshold)))\n   *         which computes the number of threads that need to be be nonresponsive\n   *         for at least MultiKillTimeout before we kill the process.\n   */\n  virtual double multiKillThreshold() const PURE;\n\n  /**\n   * @return Protobuf::RepeatedPtrField<envoy::config::bootstrap::v3::Watchdog::WatchdogAction>\n   *         the WatchDog Actions that trigger on WatchDog Events.\n   */\n  virtual Protobuf::RepeatedPtrField<envoy::config::bootstrap::v3::Watchdog::WatchdogAction>\n  actions() const PURE;\n};\n\n/**\n * The main server configuration.\n */\nclass Main {\npublic:\n  virtual ~Main() = default;\n\n  /**\n   * @return Upstream::ClusterManager* singleton for use by the entire server.\n   *         This will be nullptr if the cluster manager has not initialized yet.\n   */\n  virtual Upstream::ClusterManager* clusterManager() PURE;\n\n  /**\n   * @return std::list<Stats::SinkPtr>& the list of stats sinks initialized from the configuration.\n   */\n  virtual std::list<Stats::SinkPtr>& statsSinks() PURE;\n\n  /**\n   * @return std::chrono::milliseconds the time interval between flushing to configured stat sinks.\n   *         The server latches counters.\n   */\n  virtual std::chrono::milliseconds statsFlushInterval() const PURE;\n\n  /**\n   * @return const Watchdog& the configuration of the main thread watchdog.\n   */\n  virtual const Watchdog& mainThreadWatchdogConfig() const PURE;\n\n  /**\n   * @return const Watchdog& the configuration of the worker watchdog.\n   */\n  virtual const Watchdog& workerWatchdogConfig() const PURE;\n};\n\n/**\n * Admin configuration.\n */\nclass Admin {\npublic:\n  virtual ~Admin() = default;\n\n  /**\n   * @return const std::string& the admin access log path.\n   */\n  virtual const std::string& accessLogPath() const PURE;\n\n  /**\n   * @return const std::string& profiler output path.\n   */\n  virtual const std::string& profilePath() const PURE;\n\n  /**\n   * @return Network::Address::InstanceConstSharedPtr the server address.\n   */\n  virtual Network::Address::InstanceConstSharedPtr address() PURE;\n\n  /**\n   * @return Network::Address::OptionsSharedPtr the list of listener socket options.\n   */\n  virtual Network::Socket::OptionsSharedPtr socketOptions() PURE;\n};\n\n/**\n * Initial configuration values that are needed before the main configuration load.\n */\nclass Initial {\npublic:\n  virtual ~Initial() = default;\n\n  /**\n   * @return Admin& the admin config.\n   */\n  virtual Admin& admin() PURE;\n\n  /**\n   * @return absl::optional<std::string> the path to look for flag files.\n   */\n  virtual absl::optional<std::string> flagsPath() const PURE;\n\n  /**\n   * @return const envoy::config::bootstrap::v2::LayeredRuntime& runtime\n   *         configuration.\n   */\n  virtual const envoy::config::bootstrap::v3::LayeredRuntime& runtime() PURE;\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/drain_manager.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/network/drain_decision.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Handles connection draining. This concept is used globally during hot restart / server draining\n * as well as on individual listeners when they are being dynamically removed.\n */\nclass DrainManager : public Network::DrainDecision {\npublic:\n  /**\n   * Invoked to begin the drain procedure. (Making drain close operations more likely).\n   * @param drain_complete_cb will be invoked once the drain sequence is finished. The parameter is\n   * optional and can be an unassigned function.\n   */\n  virtual void startDrainSequence(std::function<void()> drain_complete_cb) PURE;\n\n  /**\n   * @return whether the drain sequence has started.\n   */\n  virtual bool draining() const PURE;\n\n  /**\n   * Invoked in the newly launched primary process to begin the parent shutdown sequence. At the end\n   * of the sequence the previous primary process will be terminated.\n   */\n  virtual void startParentShutdownSequence() PURE;\n};\n\nusing DrainManagerPtr = std::unique_ptr<DrainManager>;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/factory_context.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <functional>\n#include <memory>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/grpc/context.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/context.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/lifecycle_notifier.h\"\n#include \"envoy/server/overload_manager.h\"\n#include \"envoy/server/process_context.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Common interface for downstream and upstream network filters.\n */\nclass CommonFactoryContext {\npublic:\n  virtual ~CommonFactoryContext() = default;\n\n  /**\n   * @return Upstream::ClusterManager& singleton for use by the entire server.\n   */\n  virtual Upstream::ClusterManager& clusterManager() PURE;\n\n  /**\n   * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used\n   *         for all singleton processing.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * @return information about the local environment the server is running in.\n   */\n  virtual const LocalInfo::LocalInfo& localInfo() const PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE;\n\n  /**\n   * @return Runtime::Loader& the singleton runtime loader for the server.\n   */\n  virtual Envoy::Runtime::Loader& runtime() PURE;\n\n  /**\n   * @return Stats::Scope& the filter's stats scope.\n   */\n  virtual Stats::Scope& scope() PURE;\n\n  /**\n   * @return Singleton::Manager& the server-wide singleton manager.\n   */\n  virtual Singleton::Manager& singletonManager() PURE;\n\n  /**\n   * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is\n   *         used to allow runtime lockless updates to configuration, etc. across multiple threads.\n   */\n  virtual ThreadLocal::SlotAllocator& threadLocal() PURE;\n\n  /**\n   * @return Server::Admin& the server's global admin HTTP endpoint.\n   */\n  virtual Server::Admin& admin() PURE;\n\n  /**\n   * @return TimeSource& a reference to the time source.\n   */\n  virtual TimeSource& timeSource() PURE;\n\n  /**\n   * @return Api::Api& a reference to the api object.\n   */\n  virtual Api::Api& api() PURE;\n};\n\n/**\n * ServerFactoryContext is an specialization of common interface for downstream and upstream network\n * filters. The implementation guarantees the lifetime is no shorter than server. It could be used\n * across listeners.\n */\nclass ServerFactoryContext : public virtual CommonFactoryContext {\npublic:\n  ~ServerFactoryContext() override = default;\n\n  /**\n   * @return the server-wide grpc context.\n   */\n  virtual Grpc::Context& grpcContext() PURE;\n\n  /**\n   * @return DrainManager& the server-wide drain manager.\n   */\n  virtual Envoy::Server::DrainManager& drainManager() PURE;\n\n  /**\n   * @return the server's init manager. This can be used for extensions that need to initialize\n   *         after cluster manager init but before the server starts listening. All extensions\n   *         should register themselves during configuration load. initialize() will be called on\n   *         each registered target after cluster manager init but before the server starts\n   *         listening. Once all targets have initialized and invoked their callbacks, the server\n   *         will start listening.\n   */\n  virtual Init::Manager& initManager() PURE;\n\n  /**\n   * @return ServerLifecycleNotifier& the lifecycle notifier for the server.\n   */\n  virtual ServerLifecycleNotifier& lifecycleNotifier() PURE;\n\n  /**\n   * @return std::chrono::milliseconds the flush interval of stats sinks.\n   */\n  virtual std::chrono::milliseconds statsFlushInterval() const PURE;\n};\n\n/**\n * Context passed to network and HTTP filters to access server resources.\n * TODO(mattklein123): When we lock down visibility of the rest of the code, filters should only\n * access the rest of the server via interfaces exposed here.\n */\nclass FactoryContext : public virtual CommonFactoryContext {\npublic:\n  ~FactoryContext() override = default;\n\n  /**\n   * @return ServerFactoryContext which lifetime is no shorter than the server.\n   */\n  virtual ServerFactoryContext& getServerFactoryContext() const PURE;\n\n  /**\n   * @return TransportSocketFactoryContext which lifetime is no shorter than the server.\n   */\n  virtual TransportSocketFactoryContext& getTransportSocketFactoryContext() const PURE;\n\n  /**\n   * @return AccessLogManager for use by the entire server.\n   */\n  virtual AccessLog::AccessLogManager& accessLogManager() PURE;\n\n  /**\n   * @return envoy::config::core::v3::TrafficDirection the direction of the traffic relative to\n   * the local proxy.\n   */\n  virtual envoy::config::core::v3::TrafficDirection direction() const PURE;\n\n  /**\n   * @return const Network::DrainDecision& a drain decision that filters can use to determine if\n   *         they should be doing graceful closes on connections when possible.\n   */\n  virtual const Network::DrainDecision& drainDecision() PURE;\n\n  /**\n   * @return whether external healthchecks are currently failed or not.\n   */\n  virtual bool healthCheckFailed() PURE;\n\n  /**\n   * @return the server's init manager. This can be used for extensions that need to initialize\n   *         after cluster manager init but before the server starts listening. All extensions\n   *         should register themselves during configuration load. initialize() will be called on\n   *         each registered target after cluster manager init but before the server starts\n   *         listening. Once all targets have initialized and invoked their callbacks, the server\n   *         will start listening.\n   */\n  virtual Init::Manager& initManager() PURE;\n\n  /**\n   * @return ServerLifecycleNotifier& the lifecycle notifier for the server.\n   */\n  virtual ServerLifecycleNotifier& lifecycleNotifier() PURE;\n\n  /**\n   * @return Stats::Scope& the listener's stats scope.\n   */\n  virtual Stats::Scope& listenerScope() PURE;\n\n  /**\n   * @return const envoy::config::core::v3::Metadata& the config metadata associated with this\n   * listener.\n   */\n  virtual const envoy::config::core::v3::Metadata& listenerMetadata() const PURE;\n\n  /**\n   * @return OverloadManager& the overload manager for the server.\n   */\n  virtual OverloadManager& overloadManager() PURE;\n\n  /**\n   * @return Http::Context& a reference to the http context.\n   */\n  virtual Http::Context& httpContext() PURE;\n\n  /**\n   * @return Grpc::Context& a reference to the grpc context.\n   */\n  virtual Grpc::Context& grpcContext() PURE;\n\n  /**\n   * @return ProcessContextOptRef an optional reference to the\n   * process context. Will be unset when running in validation mode.\n   */\n  virtual ProcessContextOptRef processContext() PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE;\n};\n\n/**\n * An implementation of FactoryContext. The life time is no shorter than the created filter chains.\n * The life time is no longer than the owning listener. It should be used to create\n * NetworkFilterChain.\n */\nclass FilterChainFactoryContext : public virtual FactoryContext {\npublic:\n  /**\n   * Set the flag that all attached filter chains will be destroyed.\n   */\n  virtual void startDraining() PURE;\n};\n\nusing FilterChainFactoryContextPtr = std::unique_ptr<FilterChainFactoryContext>;\n\n/**\n * An implementation of FactoryContext. The life time should cover the lifetime of the filter chains\n * and connections. It can be used to create ListenerFilterChain.\n */\nclass ListenerFactoryContext : public virtual FactoryContext {\npublic:\n  /**\n   * Give access to the listener configuration\n   */\n  virtual const Network::ListenerConfig& listenerConfig() const PURE;\n};\n\n/**\n * FactoryContext for ProtocolOptionsFactory.\n */\nusing ProtocolOptionsFactoryContext = Server::Configuration::TransportSocketFactoryContext;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/filter_config.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/factory_context.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Common interface for listener filters and UDP listener filters\n */\nclass ListenerFilterConfigFactoryBase : public Config::TypedFactory {\npublic:\n  ~ListenerFilterConfigFactoryBase() override = default;\n};\n\n/**\n * Implemented by each listener filter and registered via Registry::registerFactory()\n * or the convenience class RegisterFactory.\n */\nclass NamedListenerFilterConfigFactory : public ListenerFilterConfigFactoryBase {\npublic:\n  ~NamedListenerFilterConfigFactory() override = default;\n\n  /**\n   * Create a particular listener filter factory implementation. If the implementation is unable to\n   * produce a factory with the provided parameters, it should throw an EnvoyException in the case\n   * of general error or a Json::Exception if the json configuration is erroneous. The returned\n   * callback should always be initialized.\n   * @param config supplies the general protobuf configuration for the filter.\n   * @param listener_filter_matcher supplies the matcher to decide when filter is enabled.\n   * @param context supplies the filter's context.\n   * @return Network::ListenerFilterFactoryCb the factory creation function.\n   */\n  virtual Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto(\n      const Protobuf::Message& config,\n      const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n      ListenerFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.filters.listener\"; }\n};\n\n/**\n * Implemented by each UDP listener filter and registered via Registry::registerFactory()\n * or the convenience class RegisterFactory.\n */\nclass NamedUdpListenerFilterConfigFactory : public ListenerFilterConfigFactoryBase {\npublic:\n  ~NamedUdpListenerFilterConfigFactory() override = default;\n\n  /**\n   * Create a particular UDP listener filter factory implementation. If the implementation is unable\n   * to produce a factory with the provided parameters, it should throw an EnvoyException.\n   * The returned callback should always be initialized.\n   * @param config supplies the general protobuf configuration for the filter\n   * @param context supplies the filter's context.\n   * @return Network::UdpListenerFilterFactoryCb the factory creation function.\n   */\n  virtual Network::UdpListenerFilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& config,\n                               ListenerFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.filters.udp_listener\"; }\n};\n\n/**\n * Implemented by filter factories that require more options to process the protocol used by the\n * upstream cluster.\n */\nclass ProtocolOptionsFactory : public Config::TypedFactory {\npublic:\n  ~ProtocolOptionsFactory() override = default;\n\n  /**\n   * Create a particular filter's protocol specific options implementation. If the factory\n   * implementation is unable to produce a factory with the provided parameters, it should throw an\n   * EnvoyException.\n   * @param config supplies the protobuf configuration for the filter\n   * @param validation_visitor message validation visitor instance.\n   * @return Upstream::ProtocolOptionsConfigConstSharedPtr the protocol options\n   */\n  virtual Upstream::ProtocolOptionsConfigConstSharedPtr\n  createProtocolOptionsConfig(const Protobuf::Message& config,\n                              ProtocolOptionsFactoryContext& factory_context) {\n    UNREFERENCED_PARAMETER(config);\n    UNREFERENCED_PARAMETER(factory_context);\n    return nullptr;\n  }\n\n  /**\n   * @return ProtobufTypes::MessagePtr a newly created empty protocol specific options message or\n   *         nullptr if protocol specific options are not available.\n   */\n  virtual ProtobufTypes::MessagePtr createEmptyProtocolOptionsProto() { return nullptr; }\n};\n\n/**\n * Implemented by each network filter and registered via Registry::registerFactory()\n * or the convenience class RegisterFactory.\n */\nclass NamedNetworkFilterConfigFactory : public ProtocolOptionsFactory {\npublic:\n  ~NamedNetworkFilterConfigFactory() override = default;\n\n  /**\n   * Create a particular network filter factory implementation. If the implementation is unable to\n   * produce a factory with the provided parameters, it should throw an EnvoyException. The returned\n   * callback should always be initialized.\n   * @param config supplies the general json configuration for the filter\n   * @param filter_chain_factory_context supplies the filter's context.\n   * @return Network::FilterFactoryCb the factory creation function.\n   */\n  virtual Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& config,\n                               FactoryContext& filter_chain_factory_context) PURE;\n\n  std::string category() const override { return \"envoy.filters.network\"; }\n\n  /**\n   * @return bool true if this filter must be the last filter in a filter chain, false otherwise.\n   */\n  virtual bool isTerminalFilter() { return false; }\n};\n\n/**\n * Implemented by each upstream cluster network filter and registered via\n * Registry::registerFactory() or the convenience class RegisterFactory.\n */\nclass NamedUpstreamNetworkFilterConfigFactory : public ProtocolOptionsFactory {\npublic:\n  ~NamedUpstreamNetworkFilterConfigFactory() override = default;\n\n  /**\n   * Create a particular upstream network filter factory implementation. If the implementation is\n   * unable to produce a factory with the provided parameters, it should throw an EnvoyException in\n   * the case of general error. The returned callback should always be initialized.\n   */\n  virtual Network::FilterFactoryCb createFilterFactoryFromProto(const Protobuf::Message& config,\n                                                                CommonFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.filters.upstream_network\"; }\n};\n\n/**\n * Implemented by each HTTP filter and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedHttpFilterConfigFactory : public ProtocolOptionsFactory {\npublic:\n  ~NamedHttpFilterConfigFactory() override = default;\n\n  /**\n   * Create a particular http filter factory implementation. If the implementation is unable to\n   * produce a factory with the provided parameters, it should throw an EnvoyException. The returned\n   * callback should always be initialized.\n   * @param config supplies the general Protobuf message to be marshaled into a filter-specific\n   * configuration.\n   * @param stat_prefix prefix for stat logging\n   * @param context supplies the filter's context.\n   * @return Http::FilterFactoryCb the factory creation function.\n   */\n  virtual Http::FilterFactoryCb createFilterFactoryFromProto(const Protobuf::Message& config,\n                                                             const std::string& stat_prefix,\n                                                             FactoryContext& context) PURE;\n\n  /**\n   * @return ProtobufTypes::MessagePtr create an empty virtual host, route, or weighted\n   *         cluster-local config proto message for v2. The filter config, which arrives in an\n   *         opaque message, will be parsed into this empty proto. By default, this method\n   *         returns the same value as createEmptyConfigProto, and can be optionally overridden\n   *         in implementations.\n   */\n  virtual ProtobufTypes::MessagePtr createEmptyRouteConfigProto() {\n    return createEmptyConfigProto();\n  }\n\n  /**\n   * @return RouteSpecificFilterConfigConstSharedPtr allow the filter to pre-process per route\n   * config. Returned object will be stored in the loaded route configuration.\n   */\n  virtual Router::RouteSpecificFilterConfigConstSharedPtr\n  createRouteSpecificFilterConfig(const Protobuf::Message&, ServerFactoryContext&,\n                                  ProtobufMessage::ValidationVisitor&) {\n    return nullptr;\n  }\n\n  std::string category() const override { return \"envoy.filters.http\"; }\n\n  /**\n   * @return bool true if this filter must be the last filter in a filter chain, false otherwise.\n   */\n  virtual bool isTerminalFilter() { return false; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/guarddog.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/server/watchdog.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * The GuardDog runs a background thread which scans a number of shared WatchDog\n * objects periodically to verify that they have been recently touched. If some\n * of the watched items have not responded the GuardDog will take action ranging\n * from stats counter increments to killing the entire process (if a deadlock is\n * suspected).\n *\n * The lifespan of the GuardDog thread is tied to the lifespan of this object.\n */\nclass GuardDog {\npublic:\n  virtual ~GuardDog() = default;\n\n  /**\n   * Get a WatchDog object pointer to a new WatchDog.\n   *\n   * After this method returns the WatchDog object must be touched periodically\n   * to avoid triggering the GuardDog. If no longer needed use the\n   * stopWatching() method to remove it from the list of watched objects.\n   *\n   * @param thread_id a Thread::ThreadId containing the system thread id\n   * @param thread_name supplies the name of the thread which is used for per-thread miss stats.\n   */\n  virtual WatchDogSharedPtr createWatchDog(Thread::ThreadId thread_id,\n                                           const std::string& thread_name) PURE;\n\n  /**\n   * Tell the GuardDog to forget about this WatchDog.\n   * After calling this method it is no longer necessary to touch the WatchDog\n   * object.\n   *\n   * @param wd A WatchDogSharedPtr obtained from createWatchDog.\n   */\n  virtual void stopWatching(WatchDogSharedPtr wd) PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/guarddog_config.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/server/guarddog.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nstruct GuardDogActionFactoryContext {\n  Api::Api& api_;\n  Event::Dispatcher& dispatcher_; // not owned (this is the guard dog's dispatcher)\n  Stats::Scope& stats_;           // not owned (this is the server's stats scope)\n  absl::string_view guarddog_name_;\n};\n\nclass GuardDogAction {\npublic:\n  virtual ~GuardDogAction() = default;\n  /**\n   * Callback function for when the GuardDog observes an event.\n   * @param event the event the GuardDog observes.\n   * @param thread_last_checkin_pairs pair of the relevant thread to the event, and the\n   *  last check in time of those threads with their watchdog.\n   * @param now the current time.\n   */\n  virtual void\n  run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event,\n      const std::vector<std::pair<Thread::ThreadId, MonotonicTime>>& thread_last_checkin_pairs,\n      MonotonicTime now) PURE;\n};\n\nusing GuardDogActionPtr = std::unique_ptr<GuardDogAction>;\n\n/**\n * Implemented by each custom GuardDogAction and registered via Registry::registerFactory()\n * or the convenience class RegisterFactory.\n */\nclass GuardDogActionFactory : public Config::TypedFactory {\npublic:\n  ~GuardDogActionFactory() override = default;\n\n  /**\n   * Creates a particular GuardDog Action factory implementation.\n   *\n   * @param config supplies the configuration for the action.\n   * @param context supplies the GuardDog Action's context.\n   * @return GuardDogActionPtr the GuardDogAction object.\n   */\n  virtual GuardDogActionPtr createGuardDogActionFromProto(\n      const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config,\n      GuardDogActionFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.guarddog_actions\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/health_checker_config.h",
    "content": "#pragma once\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/health_checker.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nclass HealthCheckerFactoryContext {\npublic:\n  virtual ~HealthCheckerFactoryContext() = default;\n\n  /**\n   * @return Upstream::Cluster& the owning cluster.\n   */\n  virtual Upstream::Cluster& cluster() PURE;\n\n  /**\n   * @return Runtime::Loader& the singleton runtime loader for the server.\n   */\n  virtual Envoy::Runtime::Loader& runtime() PURE;\n\n  /**\n   * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used\n   *         for all singleton processing.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /*\n   * @return Upstream::HealthCheckEventLoggerPtr the health check event logger for the\n   * created health checkers. This function may not be idempotent.\n   */\n  virtual Upstream::HealthCheckEventLoggerPtr eventLogger() PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationVisitor& validation visitor for health checker configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE;\n\n  /**\n   * @return Api::Api& the API used by the server.\n   */\n  virtual Api::Api& api() PURE;\n};\n\n/**\n * Implemented by each custom health checker and registered via Registry::registerFactory()\n * or the convenience class RegisterFactory.\n */\nclass CustomHealthCheckerFactory : public Config::TypedFactory {\npublic:\n  ~CustomHealthCheckerFactory() override = default;\n\n  /**\n   * Creates a particular custom health checker factory implementation.\n   *\n   * @param config supplies the configuration as a full envoy::config::core::v3::HealthCheck\n   * config. The implementation of this method can get the specific configuration for a custom\n   * health check from custom_health_check().config().\n   * @param context supplies the custom health checker's context.\n   * @return HealthCheckerSharedPtr the pointer of a health checker instance.\n   */\n  virtual Upstream::HealthCheckerSharedPtr\n  createCustomHealthChecker(const envoy::config::core::v3::HealthCheck& config,\n                            HealthCheckerFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.health_checkers\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/hot_restart.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/stats/allocator.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/thread/thread.h\"\n\n#include \"source/server/hot_restart.pb.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass Instance;\n\n/**\n * Abstracts functionality required to \"hot\" (live) restart the server including code and\n * configuration. Right now this interface assumes a UNIX like socket interface for fd passing\n * but it could be relatively easily swapped with something else if necessary.\n */\nclass HotRestart {\npublic:\n  struct ServerStatsFromParent {\n    uint64_t parent_memory_allocated_ = 0;\n    uint64_t parent_connections_ = 0;\n  };\n\n  virtual ~HotRestart() = default;\n\n  /**\n   * Shutdown listeners in the parent process if applicable. Listeners will begin draining to\n   * clear out old connections.\n   */\n  virtual void drainParentListeners() PURE;\n\n  /**\n   * Retrieve a listening socket on the specified address from the parent process. The socket will\n   * be duplicated across process boundaries.\n   * @param address supplies the address of the socket to duplicate, e.g. tcp://127.0.0.1:5000.\n   * @return int the fd or -1 if there is no bound listen port in the parent.\n   */\n  virtual int duplicateParentListenSocket(const std::string& address) PURE;\n\n  /**\n   * Initialize the parent logic of our restarter. Meant to be called after initialization of a\n   * new child has begun. The hot restart implementation needs to be created early to deal with\n   * shared memory, logging, etc. so late initialization of needed interfaces is done here.\n   */\n  virtual void initialize(Event::Dispatcher& dispatcher, Server::Instance& server) PURE;\n\n  /**\n   * Shutdown admin processing in the parent process if applicable. This allows admin processing\n   * to start up in the new process.\n   * @param original_start_time will be filled with information from our parent, if retrieved.\n   */\n  virtual void sendParentAdminShutdownRequest(time_t& original_start_time) PURE;\n\n  /**\n   * Tell our parent process to gracefully terminate itself.\n   */\n  virtual void sendParentTerminateRequest() PURE;\n\n  /**\n   * Retrieve stats from our parent process and merges them into stats_store, taking into account\n   * the stats values we've already seen transferred.\n   * Skips all of the above and returns 0s if there is not currently a parent.\n   * @param stats_store the store whose stats will be updated.\n   * @param stats_proto the stats values we are updating with.\n   * @return special values relating to the \"server\" stats scope, whose\n   *         merging has to be handled by Server::InstanceImpl.\n   */\n  virtual ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot& stats_store) PURE;\n\n  /**\n   * Shutdown the half of our hot restarter that acts as a parent.\n   */\n  virtual void shutdown() PURE;\n\n  /**\n   * Return the base id used to generate a domain socket name.\n   */\n  virtual uint32_t baseId() PURE;\n\n  /**\n   * Return the hot restart compatibility version so that operations code can decide whether to\n   * perform a full or hot restart.\n   */\n  virtual std::string version() PURE;\n\n  /**\n   * @return Thread::BasicLockable& a lock for logging.\n   */\n  virtual Thread::BasicLockable& logLock() PURE;\n\n  /**\n   * @return Thread::BasicLockable& a lock for access logs.\n   */\n  virtual Thread::BasicLockable& accessLogLock() PURE;\n};\n\n/**\n * HotRestartDomainSocketInUseException is thrown during HotRestart construction only when the\n * underlying domain socket is in use.\n */\nclass HotRestartDomainSocketInUseException : public EnvoyException {\npublic:\n  HotRestartDomainSocketInUseException(const std::string& what) : EnvoyException(what) {}\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/instance.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/api/api.h\"\n#include \"envoy/common/mutex_tracer.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/grpc/context.h\"\n#include \"envoy/http/context.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/hot_restart.h\"\n#include \"envoy/server/lifecycle_notifier.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/server/options.h\"\n#include \"envoy/server/overload_manager.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * An instance of the running server.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  /**\n   * @return Admin& the global HTTP admin endpoint for the server.\n   */\n  virtual Admin& admin() PURE;\n\n  /**\n   * @return Api::Api& the API used by the server.\n   */\n  virtual Api::Api& api() PURE;\n\n  /**\n   * @return Upstream::ClusterManager& singleton for use by the entire server.\n   */\n  virtual Upstream::ClusterManager& clusterManager() PURE;\n\n  /**\n   * @return Ssl::ContextManager& singleton for use by the entire server.\n   */\n  virtual Ssl::ContextManager& sslContextManager() PURE;\n\n  /**\n   * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used\n   *         for all singleton processing.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * @return Network::DnsResolverSharedPtr the singleton DNS resolver for the server.\n   */\n  virtual Network::DnsResolverSharedPtr dnsResolver() PURE;\n\n  /**\n   * Close the server's listening sockets and begin draining the listeners.\n   */\n  virtual void drainListeners() PURE;\n\n  /**\n   * @return DrainManager& singleton for use by the entire server.\n   */\n  virtual DrainManager& drainManager() PURE;\n\n  /**\n   * @return AccessLogManager for use by the entire server.\n   */\n  virtual AccessLog::AccessLogManager& accessLogManager() PURE;\n\n  /**\n   * Toggle whether the server fails or passes external healthchecks.\n   */\n  virtual void failHealthcheck(bool fail) PURE;\n\n  /**\n   * @return whether external healthchecks are currently failed or not.\n   */\n  virtual bool healthCheckFailed() PURE;\n\n  /**\n   * @return the server's hot restarter.\n   */\n  virtual HotRestart& hotRestart() PURE;\n\n  /**\n   * @return the server's init manager. This can be used for extensions that need to initialize\n   *         after cluster manager init but before the server starts listening. All extensions\n   *         should register themselves during configuration load. initialize() will be called on\n   *         each registered target after cluster manager init but before the server starts\n   *         listening. Once all targets have initialized and invoked their callbacks, the server\n   *         will start listening.\n   */\n  virtual Init::Manager& initManager() PURE;\n\n  /**\n   * @return the server's listener manager.\n   */\n  virtual ListenerManager& listenerManager() PURE;\n\n  /**\n   * @return the server's global mutex tracer, if it was instantiated. Nullptr otherwise.\n   */\n  virtual Envoy::MutexTracer* mutexTracer() PURE;\n\n  /**\n   * @return the server's overload manager.\n   */\n  virtual OverloadManager& overloadManager() PURE;\n\n  /**\n   * @return the server's secret manager\n   */\n  virtual Secret::SecretManager& secretManager() PURE;\n\n  /**\n   * @return the server's CLI options.\n   */\n  virtual const Options& options() PURE;\n\n  /**\n   * @return Runtime::Loader& the singleton runtime loader for the server.\n   */\n  virtual Runtime::Loader& runtime() PURE;\n\n  /**\n   * @return ServerLifecycleNotifier& the singleton lifecycle notifier for the server.\n   */\n  virtual ServerLifecycleNotifier& lifecycleNotifier() PURE;\n\n  /**\n   * Shutdown the server gracefully.\n   */\n  virtual void shutdown() PURE;\n\n  /**\n   * @return whether the shutdown method has been called.\n   */\n  virtual bool isShutdown() PURE;\n\n  /**\n   * Shutdown the server's admin processing. This includes the admin API, stat flushing, etc.\n   */\n  virtual void shutdownAdmin() PURE;\n\n  /**\n   * @return Singleton::Manager& the server-wide singleton manager.\n   */\n  virtual Singleton::Manager& singletonManager() PURE;\n\n  /**\n   * @return the time that the server started during the current hot restart epoch.\n   */\n  virtual time_t startTimeCurrentEpoch() PURE;\n\n  /**\n   * @return the time that the server started the first hot restart epoch.\n   */\n  virtual time_t startTimeFirstEpoch() PURE;\n\n  /**\n   * @return the server-wide stats store.\n   */\n  virtual Stats::Store& stats() PURE;\n\n  /**\n   * @return the server-wide grpc context.\n   */\n  virtual Grpc::Context& grpcContext() PURE;\n\n  /**\n   * @return the server-wide http context.\n   */\n  virtual Http::Context& httpContext() PURE;\n\n  /**\n   * @return the server-wide process context.\n   */\n  virtual ProcessContextOptRef processContext() PURE;\n\n  /**\n   * @return ThreadLocal::Instance& the thread local storage engine for the server. This is used to\n   *         allow runtime lockless updates to configuration, etc. across multiple threads.\n   */\n  virtual ThreadLocal::Instance& threadLocal() PURE;\n\n  /**\n   * @return information about the local environment the server is running in.\n   */\n  virtual const LocalInfo::LocalInfo& localInfo() const PURE;\n\n  /**\n   * @return the time source used for the server.\n   */\n  virtual TimeSource& timeSource() PURE;\n\n  /**\n   * @return the flush interval of stats sinks.\n   */\n  virtual std::chrono::milliseconds statsFlushInterval() const PURE;\n\n  /**\n   * Flush the stats sinks outside of a flushing interval.\n   * Note: stats flushing may not be synchronous.\n   * Therefore, this function may return prior to flushing taking place.\n   */\n  virtual void flushStats() PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationContext& validation context for configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE;\n\n  /**\n   * @return Configuration::ServerFactoryContext& factory context for filters.\n   */\n  virtual Configuration::ServerFactoryContext& serverFactoryContext() PURE;\n\n  /**\n   * @return Configuration::TransportSocketFactoryContext& factory context for transport sockets.\n   */\n  virtual Configuration::TransportSocketFactoryContext& transportSocketFactoryContext() PURE;\n\n  /**\n   * Set the default server-wide tracer provider configuration that will be used as a fallback\n   * if an \"envoy.filters.network.http_connection_manager\" filter that has tracing enabled doesn't\n   * define a tracer provider in-place.\n   *\n   * Once deprecation window for the tracer provider configuration in the bootstrap config is over,\n   * this method will no longer be necessary.\n   */\n  virtual void\n  setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/lifecycle_notifier.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/dispatcher.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ServerLifecycleNotifier {\npublic:\n  virtual ~ServerLifecycleNotifier() = default;\n\n  /**\n   * Stages of the envoy server instance lifecycle.\n   */\n  enum class Stage {\n    /**\n     * The server instance main thread is about to enter the dispatcher loop.\n     */\n    Startup,\n\n    /**\n     * The server instance init manager has finished initialization.\n     */\n    PostInit,\n\n    /**\n     * The server instance is being shutdown and the dispatcher is about to exit.\n     * This provides listeners a last chance to run a callback on the main dispatcher.\n     * Note: the server will wait for callbacks that registered to take a completion\n     * before exiting the dispatcher loop.\n     * Note: callbacks that registered with a completion will only be notified for this\n     * stage if the server did not prematurely shutdown before fully starting up (specifically\n     * if the server shutdown before worker threads were started).\n     */\n    ShutdownExit\n  };\n\n  // A handle to a callback registration. Deleting this handle will unregister the callback.\n  class Handle {\n  public:\n    virtual ~Handle() = default;\n  };\n  using HandlePtr = std::unique_ptr<Handle>;\n\n  /**\n   * Callback invoked when the server reaches a certain lifecycle stage.\n   *\n   * Instances of the second type which take an Event::PostCb parameter must post\n   * that callback to the main dispatcher when they have finished processing of\n   * the new lifecycle state. This is useful when the main dispatcher needs to\n   * wait for registered callbacks to finish their work before continuing, e.g.,\n   * during server shutdown.\n   */\n  using StageCallback = std::function<void()>;\n  using StageCallbackWithCompletion = std::function<void(Event::PostCb)>;\n\n  /**\n   * Register a callback function that will be invoked on the main thread when\n   * the specified stage is reached.\n   *\n   * The second version which takes a completion back is currently only supported\n   * for the ShutdownExit stage.\n   */\n  virtual HandlePtr registerCallback(Stage stage, StageCallback callback) PURE;\n  virtual HandlePtr registerCallback(Stage stage, StageCallbackWithCompletion callback) PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/listener_manager.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/listener.h\"\n#include \"envoy/server/api_listener.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/guarddog.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Interface for an LDS API provider.\n */\nclass LdsApi {\npublic:\n  virtual ~LdsApi() = default;\n\n  /**\n   * @return std::string the last received version by the xDS API for LDS.\n   */\n  virtual std::string versionInfo() const PURE;\n};\n\nusing LdsApiPtr = std::unique_ptr<LdsApi>;\n\nstruct ListenSocketCreationParams {\n  ListenSocketCreationParams(bool bind_to_port, bool duplicate_parent_socket = true)\n      : bind_to_port(bind_to_port), duplicate_parent_socket(duplicate_parent_socket) {}\n\n  // For testing.\n  bool operator==(const ListenSocketCreationParams& rhs) const;\n  bool operator!=(const ListenSocketCreationParams& rhs) const;\n\n  // whether to actually bind the socket.\n  bool bind_to_port;\n  // whether to duplicate socket from hot restart parent.\n  bool duplicate_parent_socket;\n};\n\n/**\n * Factory for creating listener components.\n */\nclass ListenerComponentFactory {\npublic:\n  virtual ~ListenerComponentFactory() = default;\n\n  /**\n   * @return an LDS API provider.\n   * @param lds_config supplies the management server configuration.\n   * @param lds_resources_locator udpa::core::v1::ResourceLocator for listener collection.\n   */\n  virtual LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config,\n                                 const udpa::core::v1::ResourceLocator* lds_resources_locator) PURE;\n\n  /**\n   * Creates a socket.\n   * @param address supplies the socket's address.\n   * @param socket_type the type of socket (stream or datagram) to create.\n   * @param options to be set on the created socket just before calling 'bind()'.\n   * @param params used to control how a socket being created.\n   * @return Network::SocketSharedPtr an initialized and potentially bound socket.\n   */\n  virtual Network::SocketSharedPtr\n  createListenSocket(Network::Address::InstanceConstSharedPtr address,\n                     Network::Socket::Type socket_type,\n                     const Network::Socket::OptionsSharedPtr& options,\n                     const ListenSocketCreationParams& params) PURE;\n\n  /**\n   * Creates a list of filter factories.\n   * @param filters supplies the proto configuration.\n   * @param context supplies the factory creation context.\n   * @return std::vector<Network::FilterFactoryCb> the list of filter factories.\n   */\n  virtual std::vector<Network::FilterFactoryCb> createNetworkFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n      Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context) PURE;\n\n  /**\n   * Creates a list of listener filter factories.\n   * @param filters supplies the JSON configuration.\n   * @param context supplies the factory creation context.\n   * @return std::vector<Network::ListenerFilterFactoryCb> the list of filter factories.\n   */\n  virtual std::vector<Network::ListenerFilterFactoryCb> createListenerFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context) PURE;\n\n  /**\n   * Creates a list of UDP listener filter factories.\n   * @param filters supplies the configuration.\n   * @param context supplies the factory creation context.\n   * @return std::vector<Network::UdpListenerFilterFactoryCb> the list of filter factories.\n   */\n  virtual std::vector<Network::UdpListenerFilterFactoryCb> createUdpListenerFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context) PURE;\n\n  /**\n   * @return DrainManagerPtr a new drain manager.\n   * @param drain_type supplies the type of draining to do for the owning listener.\n   */\n  virtual DrainManagerPtr\n  createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) PURE;\n\n  /**\n   * @return uint64_t a listener tag usable for connection handler tracking.\n   */\n  virtual uint64_t nextListenerTag() PURE;\n};\n\n/**\n * A manager for all listeners and all threaded connection handling workers.\n */\nclass ListenerManager {\npublic:\n  // Indicates listeners to stop.\n  enum class StopListenersType {\n    // Listeners in the inbound direction are only stopped.\n    InboundOnly,\n    // All listeners are stopped.\n    All,\n  };\n\n  // The types of listeners to be returned from listeners(ListenerState).\n  // An enum instead of enum class so the underlying type is an int and bitwise operations can be\n  // used without casting.\n  enum ListenerState : uint8_t {\n    ACTIVE = 1 << 0,\n    WARMING = 1 << 1,\n    DRAINING = 1 << 2,\n    ALL = ACTIVE | WARMING | DRAINING\n  };\n\n  virtual ~ListenerManager() = default;\n\n  /**\n   * Add or update a listener. Listeners are referenced by a unique name. If no name is provided,\n   * the manager will allocate a UUID. Listeners that expect to be dynamically updated should\n   * provide a unique name. The manager will search by name to find the existing listener that\n   * should be updated. The new listener must have the same configured address. The old listener\n   * will be gracefully drained once the new listener is ready to take traffic (e.g. when RDS has\n   * been initialized).\n   * @param config supplies the configuration proto.\n   * @param version_info supplies the xDS version of the listener.\n   * @param modifiable supplies whether the added listener can be updated or removed. If the\n   *        listener is not modifiable, future calls to this function or removeListener() on behalf\n   *        of this listener will return false.\n   * @return TRUE if a listener was added or FALSE if the listener was not updated because it is\n   *         a duplicate of the existing listener. This routine will throw an EnvoyException if\n   *         there is a fundamental error preventing the listener from being added or updated.\n   */\n  virtual bool addOrUpdateListener(const envoy::config::listener::v3::Listener& config,\n                                   const std::string& version_info, bool modifiable) PURE;\n\n  /**\n   * Instruct the listener manager to create an LDS API provider. This is a separate operation\n   * during server initialization because the listener manager is created prior to several core\n   * pieces of the server existing.\n   * @param lds_config supplies the management server configuration.\n   * @param lds_resources_locator udpa::core::v1::ResourceLocator for listener collection.\n   */\n  virtual void createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config,\n                            const udpa::core::v1::ResourceLocator* lds_resources_locator) PURE;\n\n  /**\n   * @param state the type of listener to be returned (defaults to ACTIVE), states can be OR'd\n   * together to return multiple different types\n   * @return std::vector<std::reference_wrapper<Network::ListenerConfig>> a list of currently known\n   * listeners in the requested state. Note that this routine returns references to the existing\n   * listeners. The references are only valid in the context of the current call stack and should\n   * not be stored.\n   */\n  virtual std::vector<std::reference_wrapper<Network::ListenerConfig>>\n  listeners(ListenerState state = ListenerState::ACTIVE) PURE;\n\n  /**\n   * @return uint64_t the total number of connections owned by all listeners across all workers.\n   */\n  virtual uint64_t numConnections() const PURE;\n\n  /**\n   * Remove a listener by name.\n   * @param name supplies the listener name to remove.\n   * @return TRUE if the listener was found and removed. Note that when this routine returns TRUE,\n   * the listener has not necessarily been actually deleted right away. The listener will be\n   * drained and fully removed at some later time.\n   */\n  virtual bool removeListener(const std::string& name) PURE;\n\n  /**\n   * Start all workers accepting new connections on all added listeners.\n   * @param guard_dog supplies the guard dog to use for thread watching.\n   */\n  virtual void startWorkers(GuardDog& guard_dog) PURE;\n\n  /**\n   * Stop all listeners from accepting new connections without actually removing any of them. This\n   * is used for server draining and /drain_listeners admin endpoint. This method directly stops the\n   * listeners on workers. Once a listener is stopped, any listener modifications are not allowed.\n   * @param stop_listeners_type indicates listeners to stop.\n   */\n  virtual void stopListeners(StopListenersType stop_listeners_type) PURE;\n\n  /**\n   * Stop all threaded workers from running. When this routine returns all worker threads will\n   * have exited.\n   */\n  virtual void stopWorkers() PURE;\n\n  /*\n   * Warn the listener manager of an impending update. This allows the listener to clear per-update\n   * state.\n   */\n  virtual void beginListenerUpdate() PURE;\n\n  /*\n   * Inform the listener manager that the update has completed, and informs the listener of any\n   * errors handled by the reload source.\n   */\n  using FailureStates = std::vector<std::unique_ptr<envoy::admin::v3::UpdateFailureState>>;\n  virtual void endListenerUpdate(FailureStates&& failure_states) PURE;\n\n  // TODO(junr03): once ApiListeners support warming and draining, this function should return a\n  // weak_ptr to its caller. This would allow the caller to verify if the\n  // ApiListener is available to receive API calls on it.\n  /**\n   * @return the server's API Listener if it exists, nullopt if it does not.\n   */\n  virtual ApiListenerOptRef apiListener() PURE;\n\n  /*\n   * @return TRUE if the worker has started or FALSE if not.\n   */\n  virtual bool isWorkerStarted() PURE;\n};\n\n// overload operator| to allow ListenerManager::listeners(ListenerState) to be called using a\n// combination of flags, such as listeners(ListenerState::WARMING|ListenerState::ACTIVE)\nconstexpr ListenerManager::ListenerState operator|(const ListenerManager::ListenerState lhs,\n                                                   const ListenerManager::ListenerState rhs) {\n  return static_cast<ListenerManager::ListenerState>(static_cast<uint8_t>(lhs) |\n                                                     static_cast<uint8_t>(rhs));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/options.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/admin/v3/server_info.pb.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/network/address.h\"\n\n#include \"absl/types/optional.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Whether to run Envoy in serving mode, or in config validation mode at one of two levels (in which\n * case we'll verify the configuration file is valid, print any errors, and exit without serving.)\n */\nenum class Mode {\n  /**\n   * Default mode: Regular Envoy serving process. Configs are validated in the normal course of\n   * initialization, but if all is well we proceed to serve traffic.\n   */\n  Serve,\n\n  /**\n   * Validate as much as possible without opening network connections upstream or downstream.\n   */\n  Validate,\n\n  /**\n   * Completely load and initialize the config, and then exit without running the listener loop.\n   */\n  InitOnly,\n\n  // TODO(rlazarus): Add a fourth option for \"light validation\": Mock out access to the filesystem.\n  // Perform no validation of files referenced in the config, such as runtime configs, SSL certs,\n  // etc. Validation will pass even if those files are malformed or don't exist, allowing the config\n  // to be validated in a non-prod environment.\n};\n\n/**\n * During the drain sequence, different components ask the DrainManager\n * whether to drain via drainClose(). This enum dictates the behaviour of\n * drainClose() calls.\n */\nenum class DrainStrategy {\n  /**\n   * The probability of drainClose() returning true increases from 0 to 100%\n   * over the duration of the drain period.\n   */\n  Gradual,\n\n  /**\n   * drainClose() will return true as soon as the drain sequence is initiated.\n   */\n  Immediate,\n};\n\nusing CommandLineOptionsPtr = std::unique_ptr<envoy::admin::v3::CommandLineOptions>;\n\n/**\n * General options for the server.\n */\nclass Options {\npublic:\n  virtual ~Options() = default;\n\n  /**\n   * @return uint64_t the base ID for the server. This is required for system-wide things like\n   *         shared memory, domain sockets, etc. that are used during hot restart. Setting the\n   *         base ID to a different value will allow the server to run multiple times on the same\n   *         host if desired.\n   */\n  virtual uint64_t baseId() const PURE;\n\n  /**\n   * @return bool choose an unused base ID dynamically. The chosen base id can be written to a\n   *         a file using the baseIdPath option.\n   */\n  virtual bool useDynamicBaseId() const PURE;\n\n  /**\n   * @return const std::string& the dynamic base id output file.\n   */\n  virtual const std::string& baseIdPath() const PURE;\n\n  /**\n   * @return the number of worker threads to run in the server.\n   */\n  virtual uint32_t concurrency() const PURE;\n\n  /**\n   * @return the duration of the drain period in seconds.\n   */\n  virtual std::chrono::seconds drainTime() const PURE;\n\n  /**\n   * @return the strategy that defines behaviour of DrainManager::drainClose();\n   */\n  virtual DrainStrategy drainStrategy() const PURE;\n\n  /**\n   * @return the delay before shutting down the parent envoy in a hot restart,\n   *         generally longer than drainTime().\n   */\n  virtual std::chrono::seconds parentShutdownTime() const PURE;\n\n  /**\n   * @return const std::string& the path to the configuration file.\n   */\n  virtual const std::string& configPath() const PURE;\n\n  /**\n   * @return const std::string& an inline YAML bootstrap config that merges\n   *                            into the config loaded in configPath().\n   */\n  virtual const std::string& configYaml() const PURE;\n\n  /**\n   * @return const envoy::config::bootstrap::v2::Bootstrap& a bootstrap proto object\n   * that merges into the config last, after configYaml and configPath.\n   */\n  virtual const envoy::config::bootstrap::v3::Bootstrap& configProto() const PURE;\n\n  /**\n   * @return const absl::optional<uint32_t>& the bootstrap version to use, if specified.\n   */\n  virtual const absl::optional<uint32_t>& bootstrapVersion() const PURE;\n\n  /**\n   * @return bool allow unknown fields in the static configuration?\n   */\n  virtual bool allowUnknownStaticFields() const PURE;\n\n  /**\n   * @return bool allow unknown fields in the dynamic configuration?\n   */\n  virtual bool rejectUnknownDynamicFields() const PURE;\n\n  /**\n   * @return bool ignore unknown fields in the dynamic configuration?\n   **/\n  virtual bool ignoreUnknownDynamicFields() const PURE;\n\n  /**\n   * @return const std::string& the admin address output file.\n   */\n  virtual const std::string& adminAddressPath() const PURE;\n\n  /**\n   * @return Network::Address::IpVersion the local address IP version.\n   */\n  virtual Network::Address::IpVersion localAddressIpVersion() const PURE;\n\n  /**\n   * @return spdlog::level::level_enum the default log level for the server.\n   */\n  virtual spdlog::level::level_enum logLevel() const PURE;\n\n  /**\n   * @return const std::vector<std::pair<std::string, spdlog::level::level_enum>>& pair of\n   * component,log level for all configured components.\n   */\n  virtual const std::vector<std::pair<std::string, spdlog::level::level_enum>>&\n  componentLogLevels() const PURE;\n\n  /**\n   * @return const std::string& the log format string.\n   */\n  virtual const std::string& logFormat() const PURE;\n\n  /**\n   * @return const bool indicating whether to escape c-style escape sequences in logs.\n   */\n  virtual bool logFormatEscaped() const PURE;\n\n  /**\n   * @return const bool logger mode: whether to use Fancy Logger.\n   */\n  virtual bool enableFineGrainLogging() const PURE;\n\n  /**\n   * @return const std::string& the log file path.\n   */\n  virtual const std::string& logPath() const PURE;\n\n  /**\n   * @return the restart epoch. 0 indicates the first server start, 1 the second, and so on.\n   */\n  virtual uint64_t restartEpoch() const PURE;\n\n  /**\n   * @return whether to verify the configuration file is valid, print any errors, and exit\n   *         without serving.\n   */\n  virtual Mode mode() const PURE;\n\n  /**\n   * @return std::chrono::milliseconds the duration in msec between log flushes.\n   */\n  virtual std::chrono::milliseconds fileFlushIntervalMsec() const PURE;\n\n  /**\n   * @return const std::string& the server's cluster.\n   */\n  virtual const std::string& serviceClusterName() const PURE;\n\n  /**\n   * @return const std::string& the server's node identification.\n   */\n  virtual const std::string& serviceNodeName() const PURE;\n\n  /**\n   * @return const std::string& the server's zone.\n   */\n  virtual const std::string& serviceZone() const PURE;\n\n  /**\n   * @return bool indicating whether the hot restart functionality has been disabled via cli flags.\n   */\n  virtual bool hotRestartDisabled() const PURE;\n\n  /**\n   * @return bool indicating whether system signal listeners are enabled.\n   */\n  virtual bool signalHandlingEnabled() const PURE;\n\n  /**\n   * @return bool indicating whether mutex tracing functionality has been enabled.\n   */\n  virtual bool mutexTracingEnabled() const PURE;\n\n  /**\n   * @return whether to use the fake symbol table implementation.\n   */\n  virtual bool fakeSymbolTableEnabled() const PURE;\n\n  /**\n   * @return bool indicating whether cpuset size should determine the number of worker threads.\n   */\n  virtual bool cpusetThreadsEnabled() const PURE;\n\n  /**\n   * @return the names of extensions to disable.\n   */\n  virtual const std::vector<std::string>& disabledExtensions() const PURE;\n\n  /**\n   * Converts the Options in to CommandLineOptions proto message defined in server_info.proto.\n   * @return CommandLineOptionsPtr the protobuf representation of the options.\n   */\n  virtual CommandLineOptionsPtr toCommandLineOptions() const PURE;\n\n  /**\n   * @return the path of socket file.\n   */\n  virtual const std::string& socketPath() const PURE;\n\n  /**\n   * @return the mode of socket file.\n   */\n  virtual mode_t socketMode() const PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/overload_manager.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Tracks the state of an overload action. The state is a number between 0 and 1 that represents the\n * level of saturation. The values are categorized in two groups:\n * - Saturated (value = 1): indicates that an overload action is active because at least one of its\n *   triggers has reached saturation.\n * - Scaling (0 <= value < 1): indicates that an overload action is not saturated.\n */\nclass OverloadActionState {\npublic:\n  static constexpr OverloadActionState inactive() { return OverloadActionState(0); }\n\n  static constexpr OverloadActionState saturated() { return OverloadActionState(1.0); }\n\n  explicit constexpr OverloadActionState(float value)\n      : action_value_(std::min(1.0f, std::max(0.0f, value))) {}\n\n  float value() const { return action_value_; }\n  bool isSaturated() const { return action_value_ == 1; }\n\nprivate:\n  float action_value_;\n};\n\n/**\n * Callback invoked when an overload action changes state.\n */\nusing OverloadActionCb = std::function<void(OverloadActionState)>;\n\n/**\n * Thread-local copy of the state of each configured overload action.\n */\nclass ThreadLocalOverloadState : public ThreadLocal::ThreadLocalObject {\npublic:\n  // Get a thread-local reference to the value for the given action key.\n  virtual const OverloadActionState& getState(const std::string& action) PURE;\n};\n\n/**\n * Well-known overload action names.\n */\nclass OverloadActionNameValues {\npublic:\n  // Overload action to stop accepting new HTTP requests.\n  const std::string StopAcceptingRequests = \"envoy.overload_actions.stop_accepting_requests\";\n\n  // Overload action to disable http keepalive (for HTTP1.x).\n  const std::string DisableHttpKeepAlive = \"envoy.overload_actions.disable_http_keepalive\";\n\n  // Overload action to stop accepting new connections.\n  const std::string StopAcceptingConnections = \"envoy.overload_actions.stop_accepting_connections\";\n\n  // Overload action to try to shrink the heap by releasing free memory.\n  const std::string ShrinkHeap = \"envoy.overload_actions.shrink_heap\";\n};\n\nusing OverloadActionNames = ConstSingleton<OverloadActionNameValues>;\n\n/**\n * The OverloadManager protects the Envoy instance from being overwhelmed by client\n * requests. It monitors a set of resources and notifies registered listeners if\n * configured thresholds for those resources have been exceeded.\n */\nclass OverloadManager {\npublic:\n  virtual ~OverloadManager() = default;\n\n  /**\n   * Start a recurring timer to monitor resources and notify listeners when overload actions\n   * change state.\n   */\n  virtual void start() PURE;\n\n  /**\n   * Register a callback to be invoked when the specified overload action changes state\n   * (i.e., becomes activated or inactivated). Must be called before the start method is called.\n   * @param action const std::string& the name of the overload action to register for\n   * @param dispatcher Event::Dispatcher& the dispatcher on which callbacks will be posted\n   * @param callback OverloadActionCb the callback to post when the overload action\n   *        changes state\n   * @returns true if action was registered and false if no such action has been configured\n   */\n  virtual bool registerForAction(const std::string& action, Event::Dispatcher& dispatcher,\n                                 OverloadActionCb callback) PURE;\n\n  /**\n   * Get the thread-local overload action states. Lookups in this object can be used as\n   * an alternative to registering a callback for overload action state changes.\n   */\n  virtual ThreadLocalOverloadState& getThreadLocalOverloadState() PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/process_context.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\n/**\n * Represents some other part of the process.\n */\nclass ProcessObject {\npublic:\n  virtual ~ProcessObject() = default;\n};\n\nusing ProcessObjectOptRef = absl::optional<std::reference_wrapper<ProcessObject>>;\n\n/**\n * Context passed to filters to access resources from non-Envoy parts of the\n * process.\n */\nclass ProcessContext {\npublic:\n  virtual ~ProcessContext() = default;\n\n  /**\n   * @return the ProcessObject for this context.\n   */\n  virtual ProcessObject& get() const PURE;\n};\n\nusing ProcessContextOptRef = absl::optional<std::reference_wrapper<ProcessContext>>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/request_id_extension_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Implemented for each RequestIDExtension and registered via Registry::registerFactory\n * or the convenience class RegisterFactory.\n */\nclass RequestIDExtensionFactory : public Envoy::Config::TypedFactory {\npublic:\n  ~RequestIDExtensionFactory() override = default;\n\n  /**\n   * Create a Request ID Extension instance from the provided config proto.\n   * @param config the custom configuration for this request id extension type.\n   * @param context general filter context through which persistent resources can be accessed.\n   */\n  virtual Http::RequestIDExtensionSharedPtr createExtensionInstance(const Protobuf::Message& config,\n                                                                    FactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.request_id_extension\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/resource_monitor.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n// Struct for reporting usage for a particular resource.\nstruct ResourceUsage {\n  bool operator==(const ResourceUsage& rhs) const {\n    return resource_pressure_ == rhs.resource_pressure_;\n  }\n\n  // Fraction of (resource usage)/(resource limit).\n  double resource_pressure_;\n};\n\nclass ResourceMonitor {\npublic:\n  virtual ~ResourceMonitor() = default;\n\n  /**\n   * Notifies caller of updated resource usage.\n   */\n  class Callbacks {\n  public:\n    virtual ~Callbacks() = default;\n\n    /**\n     * Called when the request for updated resource usage succeeds.\n     * @param usage the updated resource usage\n     */\n    virtual void onSuccess(const ResourceUsage& usage) PURE;\n\n    /**\n     * Called when the request for updated resource usage fails.\n     * @param error the exception caught when trying to get updated resource usage\n     */\n    virtual void onFailure(const EnvoyException& error) PURE;\n  };\n\n  /**\n   * Recalculate resource usage.\n   * This must be non-blocking so if RPCs need to be made they should be\n   * done asynchronously and invoke the callback when finished.\n   */\n  virtual void updateResourceUsage(Callbacks& callbacks) PURE;\n};\n\nusing ResourceMonitorPtr = std::unique_ptr<ResourceMonitor>;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/resource_monitor_config.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/server/resource_monitor.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nclass ResourceMonitorFactoryContext {\npublic:\n  virtual ~ResourceMonitorFactoryContext() = default;\n\n  /**\n   * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used\n   *         for all singleton processing.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * @return reference to the Api object\n   */\n  virtual Api::Api& api() PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE;\n};\n\n/**\n * Implemented by each resource monitor and registered via Registry::registerFactory()\n * or the convenience class RegistryFactory.\n */\nclass ResourceMonitorFactory : public Config::TypedFactory {\npublic:\n  ~ResourceMonitorFactory() override = default;\n\n  /**\n   * Create a particular resource monitor implementation.\n   * @param config const ProtoBuf::Message& supplies the config for the resource monitor\n   *        implementation.\n   * @param context ResourceMonitorFactoryContext& supplies the resource monitor's context.\n   * @return ResourceMonitorPtr the resource monitor instance. Should not be nullptr.\n   * @throw EnvoyException if the implementation is unable to produce an instance with\n   *        the provided parameters.\n   */\n  virtual ResourceMonitorPtr createResourceMonitor(const Protobuf::Message& config,\n                                                   ResourceMonitorFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.resource_monitors\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/tracer_config.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Context passed to HTTP tracers to access server resources.\n */\nclass TracerFactoryContext {\npublic:\n  virtual ~TracerFactoryContext() = default;\n\n  /**\n   * @return ServerFactoryContext which lifetime is no shorter than the server.\n   */\n  virtual ServerFactoryContext& serverFactoryContext() PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationVisitor& validation visitor for tracer configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE;\n};\n\nusing TracerFactoryContextPtr = std::unique_ptr<TracerFactoryContext>;\n\n/**\n * Implemented by each Tracer and registered via Registry::registerFactory() or the convenience\n * class RegisterFactory.\n */\nclass TracerFactory : public Config::TypedFactory {\npublic:\n  ~TracerFactory() override = default;\n\n  /**\n   * Create a particular HttpTracer implementation. If the implementation is unable to produce an\n   * HttpTracer with the provided parameters, it should throw an EnvoyException in the case of\n   * general error or a Json::Exception if the json configuration is erroneous. The returned\n   * pointer should always be valid.\n   *\n   * NOTE: Due to the corner case of OpenCensus, who can only support a single tracing\n   *       configuration per entire process, the returned HttpTracer instance is not guaranteed\n   *       to be unique.\n   *       That is why the return type has been changed to std::shared_ptr<> instead of a more\n   *       idiomatic std::unique_ptr<>.\n   *\n   * @param config supplies the proto configuration for the HttpTracer\n   * @param context supplies the factory context\n   */\n  virtual Tracing::HttpTracerSharedPtr createHttpTracer(const Protobuf::Message& config,\n                                                        TracerFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.tracers\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/transport_socket_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/factory_context.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Context passed to transport socket factory to access server resources.\n */\nclass TransportSocketFactoryContext {\npublic:\n  virtual ~TransportSocketFactoryContext() = default;\n\n  /**\n   * @return Server::Admin& the server's admin interface.\n   */\n  virtual Server::Admin& admin() PURE;\n\n  /**\n   * @return Ssl::ContextManager& the SSL context manager.\n   */\n  virtual Ssl::ContextManager& sslContextManager() PURE;\n\n  /**\n   * @return Stats::Scope& the transport socket's stats scope.\n   */\n  virtual Stats::Scope& scope() PURE;\n\n  /**\n   * Return the instance of secret manager.\n   */\n  virtual Secret::SecretManager& secretManager() PURE;\n\n  /**\n   * @return the instance of ClusterManager.\n   */\n  virtual Upstream::ClusterManager& clusterManager() PURE;\n\n  /**\n   * @return information about the local environment the server is running in.\n   */\n  virtual const LocalInfo::LocalInfo& localInfo() const PURE;\n\n  /**\n   * @return Event::Dispatcher& the main thread's dispatcher.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * @return the server-wide stats store.\n   */\n  virtual Stats::Store& stats() PURE;\n\n  /**\n   * @return a reference to the instance of an init manager.\n   */\n  virtual Init::Manager& initManager() PURE;\n\n  /**\n   * @return the server's singleton manager.\n   */\n  virtual Singleton::Manager& singletonManager() PURE;\n\n  /**\n   * @return the server's TLS slot allocator.\n   */\n  virtual ThreadLocal::SlotAllocator& threadLocal() PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE;\n\n  /**\n   * @return reference to the Api object\n   */\n  virtual Api::Api& api() PURE;\n};\n\nclass TransportSocketConfigFactory : public Config::TypedFactory {\npublic:\n  ~TransportSocketConfigFactory() override = default;\n};\n\n/**\n * Implemented by each transport socket used for upstream connections. Registered via class\n * RegisterFactory.\n */\nclass UpstreamTransportSocketConfigFactory : public virtual TransportSocketConfigFactory {\npublic:\n  /**\n   * Create a particular transport socket factory implementation.\n   * @param config const Protobuf::Message& supplies the config message for the transport socket\n   *        implementation.\n   * @param context TransportSocketFactoryContext& supplies the transport socket's context.\n   * @return Network::TransportSocketFactoryPtr the transport socket factory instance. The returned\n   *         TransportSocketFactoryPtr should not be nullptr.\n   *\n   * @throw EnvoyException if the implementation is unable to produce a factory with the provided\n   *        parameters.\n   */\n  virtual Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message& config,\n                               TransportSocketFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.transport_sockets.upstream\"; }\n};\n\n/**\n * Implemented by each transport socket used for downstream connections. Registered via class\n * RegisterFactory.\n */\nclass DownstreamTransportSocketConfigFactory : public virtual TransportSocketConfigFactory {\npublic:\n  /**\n   * Create a particular downstream transport socket factory implementation.\n   * @param server_names const std::vector<std::string>& the names of the server. This parameter is\n   *        currently used by SNI implementation to know the expected server names.\n   * @param config const Protobuf::Message& supplies the config message for the transport socket\n   *        implementation.\n   * @param context TransportSocketFactoryContext& supplies the transport socket's context.\n   * @return Network::TransportSocketFactoryPtr the transport socket factory instance. The returned\n   *         TransportSocketFactoryPtr should not be nullptr.\n   *\n   * @throw EnvoyException if the implementation is unable to produce a factory with the provided\n   *        parameters.\n   */\n  virtual Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message& config,\n                               TransportSocketFactoryContext& context,\n                               const std::vector<std::string>& server_names) PURE;\n\n  std::string category() const override { return \"envoy.transport_sockets.downstream\"; }\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/watchdog.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * WatchDog objects are an individual thread's interface with the deadlock\n * GuardDog. A shared pointer to a WatchDog is obtained from the GuardDog at\n * thread startup. After this point the \"touch\" method must be called\n * periodically to avoid triggering the deadlock detector.\n */\nclass WatchDog {\npublic:\n  virtual ~WatchDog() = default;\n\n  /**\n   * Start a recurring touch timer in the dispatcher passed as argument.\n   *\n   * This will automatically call the touch() method at the interval specified\n   * during construction.\n   *\n   * The timer object is stored within the WatchDog object. It will go away if\n   * the object goes out of scope and stop the timer.\n   */\n  virtual void startWatchdog(Event::Dispatcher& dispatcher) PURE;\n\n  /**\n   * Manually indicate that you are still alive by calling this.\n   *\n   * This can be used if this is later used on a thread where there is no dispatcher.\n   */\n  virtual void touch() PURE;\n  virtual Thread::ThreadId threadId() const PURE;\n  virtual MonotonicTime lastTouchTime() const PURE;\n};\n\nusing WatchDogSharedPtr = std::shared_ptr<WatchDog>;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/server/worker.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/server/guarddog.h\"\n#include \"envoy/server/overload_manager.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Interface for a threaded connection handling worker. All routines are thread safe.\n */\nclass Worker {\npublic:\n  virtual ~Worker() = default;\n\n  /**\n   * Completion called when a listener has been added on a worker and is listening for new\n   * connections.\n   * @param success supplies whether the addition was successful or not. FALSE can be returned\n   *                when there is a race condition between bind() and listen().\n   */\n  using AddListenerCompletion = std::function<void(bool success)>;\n\n  /**\n   * Add a listener to the worker and replace the previous listener if any. If the previous listener\n   * doesn't exist, the behavior should be equivalent to add a new listener.\n   * @param overridden_listener The previous listener tag to be replaced. nullopt if it's a new\n   * listener.\n   * @param listener supplies the listener to add.\n   * @param completion supplies the completion to call when the listener has been added (or not) on\n   *                   the worker.\n   */\n  virtual void addListener(absl::optional<uint64_t> overridden_listener,\n                           Network::ListenerConfig& listener,\n                           AddListenerCompletion completion) PURE;\n\n  /**\n   * @return uint64_t the number of connections across all listeners that the worker owns.\n   */\n  virtual uint64_t numConnections() const PURE;\n\n  /**\n   * Start the worker thread.\n   * @param guard_dog supplies the guard dog to use for thread watching.\n   */\n  virtual void start(GuardDog& guard_dog) PURE;\n\n  /**\n   * Initialize stats for this worker's dispatcher, if available. The worker will output\n   * thread-specific stats under the given scope.\n   * @param scope the scope to contain the new per-dispatcher stats created here.\n   */\n  virtual void initializeStats(Stats::Scope& scope) PURE;\n\n  /**\n   * Stop the worker thread.\n   */\n  virtual void stop() PURE;\n\n  /**\n   * Remove a listener from the worker.\n   * @param listener supplies the listener to remove.\n   * @param completion supplies the completion to be called when the listener has been removed.\n   *        This completion is called on the worker thread. No locking is performed by the worker.\n   */\n  virtual void removeListener(Network::ListenerConfig& listener,\n                              std::function<void()> completion) PURE;\n  /**\n   * Remove the stale filter chains of the given listener but leave the listener running.\n   * @param listener_tag supplies the tag passed to addListener().\n   * @param filter_chains supplies the filter chains to be removed.\n   * @param completion supplies the completion to be called when the listener removed all the\n   * untracked connections. This completion is called on the worker thread. No locking is performed\n   * by the worker.\n   */\n  virtual void removeFilterChains(uint64_t listener_tag,\n                                  const std::list<const Network::FilterChain*>& filter_chains,\n                                  std::function<void()> completion) PURE;\n\n  /**\n   * Stop a listener from accepting new connections. This is used for server draining.\n   * @param listener supplies the listener to stop.\n   * @param completion supplies the completion to be called when the listener has stopped\n   * accepting new connections. This completion is called on the worker thread. No locking is\n   * performed by the worker.\n   */\n  virtual void stopListener(Network::ListenerConfig& listener,\n                            std::function<void()> completion) PURE;\n};\n\nusing WorkerPtr = std::unique_ptr<Worker>;\n\n/**\n * Factory for creating workers.\n */\nclass WorkerFactory {\npublic:\n  virtual ~WorkerFactory() = default;\n\n  /**\n   * @param index supplies the index of the worker, in the range of [0, concurrency).\n   * @param overload_manager supplies the server's overload manager.\n   * @param worker_name supplies the name of the worker, used for per-worker stats.\n   * @return WorkerPtr a new worker.\n   */\n  virtual WorkerPtr createWorker(uint32_t index, OverloadManager& overload_manager,\n                                 const std::string& worker_name) PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/singleton/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"instance_interface\",\n    hdrs = [\"instance.h\"],\n)\n\nenvoy_cc_library(\n    name = \"manager_interface\",\n    hdrs = [\"manager.h\"],\n    deps = [\n        \":instance_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/registry\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/singleton/instance.h",
    "content": "#pragma once\n\n#include <memory>\n\nnamespace Envoy {\nnamespace Singleton {\n\n/**\n * All singletons must derive from this type.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n};\n\nusing InstanceSharedPtr = std::shared_ptr<Instance>;\n\n} // namespace Singleton\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/singleton/manager.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/singleton/instance.h\"\n\nnamespace Envoy {\nnamespace Singleton {\n\n/**\n * An abstract registration for a singleton entry.\n */\nclass Registration : public Config::UntypedFactory {\npublic:\n  ~Registration() override = default;\n  std::string category() const override { return \"envoy.singleton\"; }\n};\n\n/**\n * A concrete implementation of a singleton registration. All singletons are referenced by name\n * and must be statically registered ahead of time. This can be done like so:\n *\n * static constexpr char foo_singleton_name[] = \"foo_singleton\";\n * static Registry::RegisterFactory<Singleton::RegistrationImpl<foo_singleton_name>,\n *                                  Singleton::Registration>\n *     date_provider_singleton_registered_;\n *\n * Once this is done, the singleton can be get/set via the manager. See the Manager interface\n * for more information.\n */\ntemplate <const char* name_param> class RegistrationImpl : public Registration {\npublic:\n  std::string name() const override { return name_param; }\n};\n\n/**\n * Macro used to statically register singletons managed by the singleton manager\n * defined in envoy/singleton/manager.h. After the NAME has been registered use the\n * SINGLETON_MANAGER_REGISTERED_NAME macro to access the name registered with the\n * singleton manager.\n */\n#define SINGLETON_MANAGER_REGISTRATION(NAME)                                                       \\\n  static constexpr char NAME##_singleton_name[] = #NAME \"_singleton\";                              \\\n  static Envoy::Registry::RegisterInternalFactory<                                                 \\\n      Envoy::Singleton::RegistrationImpl<NAME##_singleton_name>, Envoy::Singleton::Registration>   \\\n      NAME##_singleton_registered_;\n\n#define SINGLETON_MANAGER_REGISTERED_NAME(NAME) NAME##_singleton_name\n\n/**\n * Callback function used to create a singleton.\n */\nusing SingletonFactoryCb = std::function<InstanceSharedPtr()>;\n\n/**\n * A manager for all server-side singletons.\n */\nclass Manager {\npublic:\n  virtual ~Manager() = default;\n\n  /**\n   * This is a helper on top of get() that casts the object stored to the specified type. Since the\n   * manager only stores pointers to the base interface, dynamic_cast provides some level of\n   * protection via RTTI.\n   */\n  template <class T> std::shared_ptr<T> getTyped(const std::string& name, SingletonFactoryCb cb) {\n    return std::dynamic_pointer_cast<T>(get(name, cb));\n  }\n\n  /**\n   * Get a singleton and create it if it does not exist.\n   * @param name supplies the singleton name. Must be registered via RegistrationImpl.\n   * @param singleton supplies the singleton creation callback. This will only be called if the\n   *        singleton does not already exist. NOTE: The manager only stores a weak pointer. This\n   *        allows a singleton to be cleaned up if it is not needed any more. All code that uses\n   *        singletons must store the shared_ptr for as long as the singleton is needed.\n   * @return InstancePtr the singleton.\n   */\n  virtual InstanceSharedPtr get(const std::string& name, SingletonFactoryCb) PURE;\n};\n\nusing ManagerPtr = std::unique_ptr<Manager>;\n\n} // namespace Singleton\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"connection_interface\",\n    hdrs = [\"connection.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":ssl_socket_state\",\n        \"//include/envoy/common:time_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_interface\",\n    hdrs = [\"context.h\"],\n    deps = [\"@envoy_api//envoy/admin/v3:pkg_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"context_config_interface\",\n    hdrs = [\"context_config.h\"],\n    deps = [\n        \":certificate_validation_context_config_interface\",\n        \":handshaker_interface\",\n        \":tls_certificate_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_manager_interface\",\n    hdrs = [\"context_manager.h\"],\n    deps = [\n        \":context_config_interface\",\n        \":context_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tls_certificate_config_interface\",\n    hdrs = [\"tls_certificate_config.h\"],\n    deps = [\n        \"//include/envoy/ssl/private_key:private_key_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"certificate_validation_context_config_interface\",\n    hdrs = [\"certificate_validation_context_config.h\"],\n    deps = [\n        \"//source/common/common:matchers_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ssl_socket_extended_info_interface\",\n    hdrs = [\"ssl_socket_extended_info.h\"],\n    deps = [\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ssl_socket_state\",\n    hdrs = [\"ssl_socket_state.h\"],\n    deps = [],\n)\n\nenvoy_cc_library(\n    name = \"handshaker_interface\",\n    hdrs = [\"handshaker.h\"],\n    external_deps = [\"ssl\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:post_io_action_interface\",\n        \"//include/envoy/protobuf:message_validator_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/ssl/certificate_validation_context_config.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass CertificateValidationContextConfig {\npublic:\n  virtual ~CertificateValidationContextConfig() = default;\n\n  /**\n   * @return The CA certificate to use for peer validation.\n   */\n  virtual const std::string& caCert() const PURE;\n\n  /**\n   * @return Path of the CA certificate to use for peer validation or \"<inline>\"\n   * if the CA certificate was inlined.\n   */\n  virtual const std::string& caCertPath() const PURE;\n\n  /**\n   * @return The CRL to check if a cert is revoked.\n   */\n  virtual const std::string& certificateRevocationList() const PURE;\n\n  /**\n   * @return Path of the certificate revocation list, or \"<inline>\" if the CRL\n   * was inlined.\n   */\n  virtual const std::string& certificateRevocationListPath() const PURE;\n\n  /**\n   * @return The subject alt names to be verified, if enabled.\n   */\n  virtual const std::vector<std::string>& verifySubjectAltNameList() const PURE;\n\n  /**\n   * @return The subject alt name matchers to be verified, if enabled.\n   */\n  virtual const std::vector<envoy::type::matcher::v3::StringMatcher>&\n  subjectAltNameMatchers() const PURE;\n\n  /**\n   * @return A list of a hex-encoded SHA-256 certificate hashes to be verified.\n   */\n  virtual const std::vector<std::string>& verifyCertificateHashList() const PURE;\n\n  /**\n   * @return A list of a hex-encoded SHA-256 SPKI hashes to be verified.\n   */\n  virtual const std::vector<std::string>& verifyCertificateSpkiList() const PURE;\n\n  /**\n   * @return whether to ignore expired certificates (both too new and too old).\n   */\n  virtual bool allowExpiredCertificate() const PURE;\n\n  /**\n   * @return client certificate validation configuration.\n   */\n  virtual envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext::\n      TrustChainVerification\n      trustChainVerification() const PURE;\n};\n\nusing CertificateValidationContextConfigPtr = std::unique_ptr<CertificateValidationContextConfig>;\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/connection.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"absl/types/span.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\n/**\n * Base connection interface for all SSL connections.\n */\nclass ConnectionInfo {\npublic:\n  virtual ~ConnectionInfo() = default;\n\n  /**\n   * @return bool whether the peer certificate is presented.\n   **/\n  virtual bool peerCertificatePresented() const PURE;\n\n  /**\n   * @return bool whether the peer certificate was validated.\n   **/\n  virtual bool peerCertificateValidated() const PURE;\n\n  /**\n   * @return absl::Span<const std::string>the URIs in the SAN field of the local certificate.\n   *         Returns {} if there is no local certificate, or no SAN field, or no URI.\n   **/\n  virtual absl::Span<const std::string> uriSanLocalCertificate() const PURE;\n\n  /**\n   * @return std::string the subject field of the local certificate in RFC 2253 format. Returns \"\"\n   *         if there is no local certificate, or no subject.\n   **/\n  virtual const std::string& subjectLocalCertificate() const PURE;\n\n  /**\n   * @return std::string the SHA256 digest of the peer certificate. Returns \"\" if there is no peer\n   *         certificate which can happen in TLS (non mTLS) connections.\n   */\n  virtual const std::string& sha256PeerCertificateDigest() const PURE;\n\n  /**\n   * @return std::string the SHA1 digest of the peer certificate. Returns \"\" if there is no peer\n   *         certificate which can happen in TLS (non mTLS) connections.\n   */\n  virtual const std::string& sha1PeerCertificateDigest() const PURE;\n\n  /**\n   * @return std::string the serial number field of the peer certificate. Returns \"\" if\n   *         there is no peer certificate, or no serial number.\n   **/\n  virtual const std::string& serialNumberPeerCertificate() const PURE;\n\n  /**\n   * @return std::string the issuer field of the peer certificate in RFC 2253 format. Returns \"\" if\n   *         there is no peer certificate, or no issuer.\n   **/\n  virtual const std::string& issuerPeerCertificate() const PURE;\n\n  /**\n   * @return std::string the subject field of the peer certificate in RFC 2253 format. Returns \"\" if\n   *         there is no peer certificate, or no subject.\n   **/\n  virtual const std::string& subjectPeerCertificate() const PURE;\n\n  /**\n   * @return absl::Span<const std::string> the URIs in the SAN field of the peer certificate.\n   *         Returns {} if there is no peer certificate, or no SAN field, or no URI.\n   **/\n  virtual absl::Span<const std::string> uriSanPeerCertificate() const PURE;\n\n  /**\n   * @return std::string the URL-encoded PEM-encoded representation of the peer certificate. Returns\n   *         \"\" if there is no peer certificate or encoding fails.\n   **/\n  virtual const std::string& urlEncodedPemEncodedPeerCertificate() const PURE;\n\n  /**\n   * @return std::string the URL-encoded PEM-encoded representation of the full peer certificate\n   *         chain including the leaf certificate. Returns \"\" if there is no peer certificate or\n   *         encoding fails.\n   **/\n  virtual const std::string& urlEncodedPemEncodedPeerCertificateChain() const PURE;\n\n  /**\n   * @return absl::Span<const std::string> the DNS entries in the SAN field of the peer certificate.\n   *         Returns {} if there is no peer certificate, or no SAN field, or no DNS.\n   **/\n  virtual absl::Span<const std::string> dnsSansPeerCertificate() const PURE;\n\n  /**\n   * @return absl::Span<const std::string> the DNS entries in the SAN field of the local\n   *certificate. Returns {} if there is no local certificate, or no SAN field, or no DNS.\n   **/\n  virtual absl::Span<const std::string> dnsSansLocalCertificate() const PURE;\n\n  /**\n   * @return absl::optional<SystemTime> the time that the peer certificate was issued and should be\n   *         considered valid from. Returns empty absl::optional if there is no peer certificate.\n   **/\n  virtual absl::optional<SystemTime> validFromPeerCertificate() const PURE;\n\n  /**\n   * @return absl::optional<SystemTime> the time that the peer certificate expires and should not be\n   *         considered valid after. Returns empty absl::optional if there is no peer certificate.\n   **/\n  virtual absl::optional<SystemTime> expirationPeerCertificate() const PURE;\n\n  /**\n   * @return std::string the hex-encoded TLS session ID as defined in rfc5246.\n   **/\n  virtual const std::string& sessionId() const PURE;\n\n  /**\n   * @return uint16_t the standard ID for the ciphers used in the established TLS connection.\n   *         Returns 0xffff if there is no current negotiated ciphersuite.\n   **/\n  virtual uint16_t ciphersuiteId() const PURE;\n\n  /**\n   * @return std::string the OpenSSL name for the set of ciphers used in the established TLS\n   *         connection. Returns \"\" if there is no current negotiated ciphersuite.\n   **/\n  virtual std::string ciphersuiteString() const PURE;\n\n  /**\n   * @return std::string the TLS version (e.g., TLSv1.2, TLSv1.3) used in the established TLS\n   *         connection.\n   **/\n  virtual const std::string& tlsVersion() const PURE;\n};\n\nusing ConnectionInfoConstSharedPtr = std::shared_ptr<const ConnectionInfo>;\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/context.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/admin/v3/certs.pb.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nusing CertificateDetailsPtr = std::unique_ptr<envoy::admin::v3::CertificateDetails>;\n\n/**\n * SSL Context is used as a template for SSL connection configuration.\n */\nclass Context {\npublic:\n  virtual ~Context() = default;\n\n  /**\n   * @return the number of days in this context until the next certificate will expire\n   */\n  virtual size_t daysUntilFirstCertExpires() const PURE;\n\n  /**\n   * @return certificate details conforming to proto admin.v2alpha.certs.\n   */\n  virtual CertificateDetailsPtr getCaCertInformation() const PURE;\n\n  /**\n   * @return certificate details conforming to proto admin.v2alpha.certs.\n   */\n  virtual std::vector<CertificateDetailsPtr> getCertChainInformation() const PURE;\n\n  /**\n   * @return the number of seconds in this context until the next OCSP response will\n   * expire, or `absl::nullopt` if no OCSP responses exist.\n   */\n  virtual absl::optional<uint64_t> secondsUntilFirstOcspResponseExpires() const PURE;\n};\nusing ContextSharedPtr = std::shared_ptr<Context>;\n\nclass ClientContext : public virtual Context {};\nusing ClientContextSharedPtr = std::shared_ptr<ClientContext>;\n\nclass ServerContext : public virtual Context {};\nusing ServerContextSharedPtr = std::shared_ptr<ServerContext>;\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/context_config.h",
    "content": "#pragma once\n\n#include <array>\n#include <chrono>\n#include <functional>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/ssl/certificate_validation_context_config.h\"\n#include \"envoy/ssl/handshaker.h\"\n#include \"envoy/ssl/tls_certificate_config.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\n/**\n * Supplies the configuration for an SSL context.\n */\nclass ContextConfig {\npublic:\n  virtual ~ContextConfig() = default;\n\n  /**\n   * The list of supported protocols exposed via ALPN. Client connections will send these\n   * protocols to the server. Server connections will use these protocols to select the next\n   * protocol if the client supports ALPN.\n   */\n  virtual const std::string& alpnProtocols() const PURE;\n\n  /**\n   * The ':' delimited list of supported cipher suites\n   */\n  virtual const std::string& cipherSuites() const PURE;\n\n  /**\n   * The ':' delimited list of supported ECDH curves.\n   */\n  virtual const std::string& ecdhCurves() const PURE;\n\n  /**\n   * @return std::vector<std::reference_wrapper<const TlsCertificateConfig>> TLS\n   * certificate configs.\n   */\n  virtual std::vector<std::reference_wrapper<const TlsCertificateConfig>>\n  tlsCertificates() const PURE;\n\n  /**\n   * @return CertificateValidationContextConfig the certificate validation context config.\n   */\n  virtual const CertificateValidationContextConfig* certificateValidationContext() const PURE;\n\n  /**\n   * @return The minimum TLS protocol version to negotiate.\n   */\n  virtual unsigned minProtocolVersion() const PURE;\n\n  /**\n   * @return The maximum TLS protocol version to negotiate.\n   */\n  virtual unsigned maxProtocolVersion() const PURE;\n\n  /**\n   * @return true if the ContextConfig is able to provide secrets to create SSL context,\n   * and false if dynamic secrets are expected but are not downloaded from SDS server yet.\n   */\n  virtual bool isReady() const PURE;\n\n  /**\n   * Add secret callback into context config. When dynamic secrets are in use and new secrets\n   * are downloaded from SDS server, this callback is invoked to update SSL context.\n   * @param callback callback that is executed by context config.\n   */\n  virtual void setSecretUpdateCallback(std::function<void()> callback) PURE;\n\n  /**\n   * @return a callback which can be used to create Handshaker instances.\n   */\n  virtual HandshakerFactoryCb createHandshaker() const PURE;\n\n  /**\n   * @return the set of capabilities for handshaker instances created by this context.\n   */\n  virtual HandshakerCapabilities capabilities() const PURE;\n};\n\nclass ClientContextConfig : public virtual ContextConfig {\npublic:\n  /**\n   * @return The server name indication if it's set and ssl enabled\n   * Otherwise, \"\"\n   */\n  virtual const std::string& serverNameIndication() const PURE;\n\n  /**\n   * @return true if server-initiated TLS renegotiation will be allowed.\n   */\n  virtual bool allowRenegotiation() const PURE;\n\n  /**\n   * @return The maximum number of session keys to store.\n   */\n  virtual size_t maxSessionKeys() const PURE;\n\n  /**\n   * @return const std::string& with the signature algorithms for the context.\n   *         This is a :-delimited list of algorithms, see\n   *         https://tools.ietf.org/id/draft-ietf-tls-tls13-21.html#rfc.section.4.2.3\n   *         for names.\n   */\n  virtual const std::string& signingAlgorithmsForTest() const PURE;\n};\n\nusing ClientContextConfigPtr = std::unique_ptr<ClientContextConfig>;\n\nclass ServerContextConfig : public virtual ContextConfig {\npublic:\n  struct SessionTicketKey {\n    std::array<uint8_t, 16> name_;         // 16 == SSL_TICKET_KEY_NAME_LEN\n    std::array<uint8_t, 32> hmac_key_;     // 32 == SHA256_DIGEST_LENGTH\n    std::array<uint8_t, 256 / 8> aes_key_; // AES256 key size, in bytes\n  };\n\n  enum class OcspStaplePolicy {\n    LenientStapling,\n    StrictStapling,\n    MustStaple,\n  };\n\n  /**\n   * @return True if client certificate is required, false otherwise.\n   */\n  virtual bool requireClientCertificate() const PURE;\n\n  /**\n   * @return OcspStaplePolicy The rule for determining whether to staple OCSP\n   * responses on new connections.\n   */\n  virtual OcspStaplePolicy ocspStaplePolicy() const PURE;\n\n  /**\n   * @return The keys to use for encrypting and decrypting session tickets.\n   * The first element is used for encrypting new tickets, and all elements\n   * are candidates for decrypting received tickets.\n   */\n  virtual const std::vector<SessionTicketKey>& sessionTicketKeys() const PURE;\n\n  /**\n   * @return timeout in seconds for the session.\n   * Session timeout is used to specify lifetime hint of tls tickets.\n   */\n  virtual absl::optional<std::chrono::seconds> sessionTimeout() const PURE;\n\n  /**\n   * @return True if stateless TLS session resumption is disabled, false otherwise.\n   */\n  virtual bool disableStatelessSessionResumption() const PURE;\n};\n\nusing ServerContextConfigPtr = std::unique_ptr<ServerContextConfig>;\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/context_manager.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/ssl/context.h\"\n#include \"envoy/ssl/context_config.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n#include \"envoy/stats/scope.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\n/**\n * Manages all of the SSL contexts in the process\n */\nclass ContextManager {\npublic:\n  virtual ~ContextManager() = default;\n\n  /**\n   * Builds a ClientContext from a ClientContextConfig.\n   */\n  virtual ClientContextSharedPtr createSslClientContext(Stats::Scope& scope,\n                                                        const ClientContextConfig& config) PURE;\n\n  /**\n   * Builds a ServerContext from a ServerContextConfig.\n   */\n  virtual ServerContextSharedPtr\n  createSslServerContext(Stats::Scope& scope, const ServerContextConfig& config,\n                         const std::vector<std::string>& server_names) PURE;\n\n  /**\n   * @return the number of days until the next certificate being managed will expire.\n   */\n  virtual size_t daysUntilFirstCertExpires() const PURE;\n\n  /**\n   * Iterate through all currently allocated contexts.\n   */\n  virtual void iterateContexts(std::function<void(const Context&)> callback) PURE;\n\n  /**\n   * Access the private key operations manager, which is part of SSL\n   * context manager.\n   */\n  virtual PrivateKeyMethodManager& privateKeyMethodManager() PURE;\n\n  /**\n   * @return the number of seconds until the next OCSP response being managed will\n   * expire, or `absl::nullopt` if no OCSP responses exist.\n   */\n  virtual absl::optional<uint64_t> secondsUntilFirstOcspResponseExpires() const PURE;\n};\n\nusing ContextManagerPtr = std::unique_ptr<ContextManager>;\n\nclass ContextManagerFactory : public Config::UntypedFactory {\npublic:\n  ~ContextManagerFactory() override = default;\n  virtual ContextManagerPtr createContextManager(TimeSource& time_source) PURE;\n\n  // There could be only one factory thus the name is static.\n  std::string name() const override { return \"ssl_context_manager\"; }\n  std::string category() const override { return \"envoy.ssl_context_manager\"; }\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/handshaker.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/post_io_action.h\"\n#include \"envoy/protobuf/message_validator.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass HandshakeCallbacks {\npublic:\n  virtual ~HandshakeCallbacks() = default;\n\n  /**\n   * @return the connection.\n   */\n  virtual Network::Connection& connection() const PURE;\n\n  /**\n   * A callback which will be executed at most once upon successful completion\n   * of a handshake.\n   */\n  virtual void onSuccess(SSL* ssl) PURE;\n\n  /**\n   * A callback which will be executed at most once upon handshake failure.\n   */\n  virtual void onFailure() PURE;\n\n  /**\n   * Returns a pointer to the transportSocketCallbacks struct, or nullptr if\n   * unset.\n   */\n  virtual Network::TransportSocketCallbacks* transportSocketCallbacks() PURE;\n};\n\n/**\n * Base interface for performing TLS handshakes.\n */\nclass Handshaker {\npublic:\n  virtual ~Handshaker() = default;\n\n  /**\n   * Performs a TLS handshake and returns an action indicating\n   * whether the callsite should close the connection or keep it open.\n   */\n  virtual Network::PostIoAction doHandshake() PURE;\n};\n\nusing HandshakerSharedPtr = std::shared_ptr<Handshaker>;\nusing HandshakerFactoryCb =\n    std::function<HandshakerSharedPtr(bssl::UniquePtr<SSL>, int, HandshakeCallbacks*)>;\n\nclass HandshakerFactoryContext {\npublic:\n  virtual ~HandshakerFactoryContext() = default;\n\n  /**\n   * @return reference to the Api object\n   */\n  virtual Api::Api& api() PURE;\n\n  /**\n   * The list of supported protocols exposed via ALPN, from ContextConfig.\n   */\n  virtual absl::string_view alpnProtocols() const PURE;\n};\n\nstruct HandshakerCapabilities {\n  // Whether or not a handshaker implementation provides certificates itself.\n  bool provides_certificates = false;\n\n  // Whether or not a handshaker implementation verifies certificates itself.\n  bool verifies_peer_certificates = false;\n\n  // Whether or not a handshaker implementation handles session resumption\n  // itself.\n  bool handles_session_resumption = false;\n\n  // Whether or not a handshaker implementation provides its own list of ciphers\n  // and curves.\n  bool provides_ciphers_and_curves = false;\n\n  // Whether or not a handshaker implementation handles ALPN selection.\n  bool handles_alpn_selection = false;\n\n  // Should return true if this handshaker is FIPS-compliant.\n  // Envoy will fail to compile if this returns true and `--define=boringssl=fips`.\n  bool is_fips_compliant = true;\n};\n\nclass HandshakerFactory : public Config::TypedFactory {\npublic:\n  /**\n   * @returns a callback to create a Handshaker. Accepts the |config| and\n   * |validation_visitor| for early validation. This virtual base doesn't\n   * perform MessageUtil::downcastAndValidate, but an implementation should.\n   */\n  virtual HandshakerFactoryCb\n  createHandshakerCb(const Protobuf::Message& message,\n                     HandshakerFactoryContext& handshaker_factory_context,\n                     ProtobufMessage::ValidationVisitor& validation_visitor) PURE;\n\n  std::string category() const override { return \"envoy.tls_handshakers\"; }\n\n  /**\n   * Implementations should return a struct with their capabilities. See\n   * HandshakerCapabilities above. For any capability a Handshaker\n   * implementation explicitly declares, Envoy will not also configure that SSL\n   * capability.\n   */\n  virtual HandshakerCapabilities capabilities() const PURE;\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/private_key/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"private_key_interface\",\n    hdrs = [\"private_key.h\"],\n    external_deps = [\"ssl\"],\n    deps = [\n        \":private_key_callbacks_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"private_key_config_interface\",\n    hdrs = [\"private_key_config.h\"],\n    deps = [\n        \":private_key_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/registry\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"private_key_callbacks_interface\",\n    hdrs = [\"private_key_callbacks.h\"],\n    external_deps = [\"ssl\"],\n)\n"
  },
  {
    "path": "include/envoy/ssl/private_key/private_key.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/ssl/private_key/private_key_callbacks.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n// Prevent a dependency loop with the forward declaration.\nclass TransportSocketFactoryContext;\n} // namespace Configuration\n} // namespace Server\n\nnamespace Ssl {\n\n#ifdef OPENSSL_IS_BORINGSSL\nusing BoringSslPrivateKeyMethodSharedPtr = std::shared_ptr<SSL_PRIVATE_KEY_METHOD>;\n#endif\n\nclass PrivateKeyMethodProvider {\npublic:\n  virtual ~PrivateKeyMethodProvider() = default;\n\n  /**\n   * Register an SSL connection to private key operations by the provider.\n   * @param ssl a SSL connection object.\n   * @param cb a callbacks object, whose \"complete\" method will be invoked\n   * when the asynchronous processing is complete.\n   * @param dispatcher supplies the owning thread's dispatcher.\n   */\n  virtual void registerPrivateKeyMethod(SSL* ssl, PrivateKeyConnectionCallbacks& cb,\n                                        Event::Dispatcher& dispatcher) PURE;\n\n  /**\n   * Unregister an SSL connection from private key operations by the provider.\n   * @param ssl a SSL connection object.\n   * @throw EnvoyException if registration fails.\n   */\n  virtual void unregisterPrivateKeyMethod(SSL* ssl) PURE;\n\n  /**\n   * Check whether the private key method satisfies FIPS requirements.\n   * @return true if FIPS key requirements are satisfied, false if not.\n   */\n  virtual bool checkFips() PURE;\n\n#ifdef OPENSSL_IS_BORINGSSL\n  /**\n   * Get the private key methods from the provider.\n   * @return the private key methods associated with this provider and\n   * configuration.\n   */\n  virtual BoringSslPrivateKeyMethodSharedPtr getBoringSslPrivateKeyMethod() PURE;\n#endif\n};\n\nusing PrivateKeyMethodProviderSharedPtr = std::shared_ptr<PrivateKeyMethodProvider>;\n\n/**\n * A manager for finding correct user-provided functions for handling BoringSSL private key\n * operations.\n */\nclass PrivateKeyMethodManager {\npublic:\n  virtual ~PrivateKeyMethodManager() = default;\n\n  /**\n   * Finds and returns a private key operations provider for BoringSSL.\n   *\n   * @param config a protobuf message object containing a PrivateKeyProvider message.\n   * @param factory_context context that provides components for creating and\n   * initializing connections using asynchronous private key operations.\n   * @return PrivateKeyMethodProvider the private key operations provider, or nullptr if\n   * no provider can be used with the context configuration.\n   */\n  virtual PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProvider(\n      const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& config,\n      Envoy::Server::Configuration::TransportSocketFactoryContext& factory_context) PURE;\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/private_key/private_key_callbacks.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass PrivateKeyConnectionCallbacks {\npublic:\n  virtual ~PrivateKeyConnectionCallbacks() = default;\n\n  /**\n   * Callback function which is called when the asynchronous private key\n   * operation has been completed (with either success or failure). The\n   * provider will communicate the success status when SSL_do_handshake()\n   * is called the next time.\n   */\n  virtual void onPrivateKeyMethodComplete() PURE;\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/private_key/private_key_config.h",
    "content": "#pragma once\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\n// Base class which the private key operation provider implementations can register.\n\nclass PrivateKeyMethodProviderInstanceFactory : public Config::UntypedFactory {\npublic:\n  ~PrivateKeyMethodProviderInstanceFactory() override = default;\n\n  /**\n   * Create a particular PrivateKeyMethodProvider implementation. If the implementation is\n   * unable to produce a PrivateKeyMethodProvider with the provided parameters, it should throw\n   * an EnvoyException. The returned pointer should always be valid.\n   * @param config supplies the custom proto configuration for the PrivateKeyMethodProvider\n   * @param context supplies the factory context\n   */\n  virtual PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProviderInstance(\n      const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& config,\n      Server::Configuration::TransportSocketFactoryContext& factory_context) PURE;\n\n  std::string category() const override { return \"envoy.tls.key_providers\"; };\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/ssl_socket_extended_info.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nenum class ClientValidationStatus { NotValidated, NoClientCertificate, Validated, Failed };\n\nclass SslExtendedSocketInfo {\npublic:\n  virtual ~SslExtendedSocketInfo() = default;\n\n  /**\n   * Set the peer certificate validation status.\n   **/\n  virtual void setCertificateValidationStatus(ClientValidationStatus validated) PURE;\n\n  /**\n   * @return ClientValidationStatus The peer certificate validation status.\n   **/\n  virtual ClientValidationStatus certificateValidationStatus() const PURE;\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/ssl_socket_state.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Ssl {\n\nenum class SocketState { PreHandshake, HandshakeInProgress, HandshakeComplete, ShutdownSent };\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/ssl/tls_certificate_config.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass TlsCertificateConfig {\npublic:\n  virtual ~TlsCertificateConfig() = default;\n\n  /**\n   * @return a string of certificate chain.\n   */\n  virtual const std::string& certificateChain() const PURE;\n\n  /**\n   * @return path of the certificate chain used to identify the local side or \"<inline>\" if the\n   * certificate chain was inlined.\n   */\n  virtual const std::string& certificateChainPath() const PURE;\n\n  /**\n   * @return a string of private key.\n   */\n  virtual const std::string& privateKey() const PURE;\n\n  /**\n   * @return path of the private key used to identify the local side or \"<inline>\" if the private\n   * key was inlined.\n   */\n  virtual const std::string& privateKeyPath() const PURE;\n\n  /**\n   * @return private key method provider.\n   */\n  virtual Envoy::Ssl::PrivateKeyMethodProviderSharedPtr privateKeyMethod() const PURE;\n\n  /**\n   * @return a string of password.\n   */\n  virtual const std::string& password() const PURE;\n\n  /**\n   * @return path of the password file to be used to decrypt the private key or \"<inline>\" if the\n   * password was inlined.\n   */\n  virtual const std::string& passwordPath() const PURE;\n\n  /**\n   * @return a byte vector of ocsp response.\n   */\n  virtual const std::vector<uint8_t>& ocspStaple() const PURE;\n\n  /**\n   * @return path of the ocsp response file for this certificate or \"<inline>\" if the\n   * ocsp response was inlined.\n   */\n  virtual const std::string& ocspStaplePath() const PURE;\n};\n\nusing TlsCertificateConfigPtr = std::unique_ptr<TlsCertificateConfig>;\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"refcount_ptr_interface\",\n    hdrs = [\"refcount_ptr.h\"],\n    deps = [\"//source/common/common:assert_lib\"],\n)\n\n# TODO(jmarantz): atomize the build rules to match the include files.\nenvoy_cc_library(\n    name = \"stats_interface\",\n    hdrs = [\n        \"allocator.h\",\n        \"histogram.h\",\n        \"scope.h\",\n        \"sink.h\",\n        \"stats.h\",\n        \"stats_matcher.h\",\n        \"store.h\",\n        \"tag.h\",\n        \"tag_extractor.h\",\n        \"tag_producer.h\",\n    ],\n    external_deps = [\"abseil_inlined_vector\"],\n    deps = [\n        \":refcount_ptr_interface\",\n        \":symbol_table_interface\",\n        \"//include/envoy/common:interval_set_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"primitive_stats_interface\",\n    hdrs = [\n        \"primitive_stats.h\",\n    ],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:non_copyable\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"symbol_table_interface\",\n    hdrs = [\"symbol_table.h\"],\n    external_deps = [\"abseil_inlined_vector\"],\n    deps = [\n        \"//source/common/common:hash_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"timespan_interface\",\n    hdrs = [\"timespan.h\"],\n)\n\nenvoy_cc_library(\n    name = \"stats_macros\",\n    hdrs = [\"stats_macros.h\"],\n    deps = [\":stats_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"primitive_stats_macros\",\n    hdrs = [\"primitive_stats_macros.h\"],\n    deps = [\":primitive_stats_interface\"],\n)\n"
  },
  {
    "path": "include/envoy/stats/allocator.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/symbol_table.h\"\n#include \"envoy/stats/tag.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Abstract interface for allocating statistics. Implementations can\n * be created utilizing a single fixed-size block suitable for\n * shared-memory, or in the heap, allowing for pointers and sharing of\n * substrings, with an opportunity for reduced memory consumption.\n */\nclass Allocator {\npublic:\n  virtual ~Allocator() = default;\n\n  /**\n   * @param name the full name of the stat.\n   * @param tag_extracted_name the name of the stat with tag-values stripped out.\n   * @param tags the tag values.\n   * @return CounterSharedPtr a counter.\n   */\n  virtual CounterSharedPtr makeCounter(StatName name, StatName tag_extracted_name,\n                                       const StatNameTagVector& stat_name_tags) PURE;\n\n  /**\n   * @param name the full name of the stat.\n   * @param tag_extracted_name the name of the stat with tag-values stripped out.\n   * @param stat_name_tags the tag values.\n   * @return GaugeSharedPtr a gauge.\n   */\n  virtual GaugeSharedPtr makeGauge(StatName name, StatName tag_extracted_name,\n                                   const StatNameTagVector& stat_name_tags,\n                                   Gauge::ImportMode import_mode) PURE;\n\n  /**\n   * @param name the full name of the stat.\n   * @param tag_extracted_name the name of the stat with tag-values stripped out.\n   * @param tags the tag values.\n   * @return TextReadoutSharedPtr a text readout.\n   */\n  virtual TextReadoutSharedPtr makeTextReadout(StatName name, StatName tag_extracted_name,\n                                               const StatNameTagVector& stat_name_tags) PURE;\n  virtual const SymbolTable& constSymbolTable() const PURE;\n  virtual SymbolTable& symbolTable() PURE;\n\n  // TODO(jmarantz): create a parallel mechanism to instantiate histograms. At\n  // the moment, histograms don't fit the same pattern of counters and gauges\n  // as they are not actually created in the context of a stats allocator.\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/histogram.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/refcount_ptr.h\"\n#include \"envoy/stats/stats.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nusing ConstSupportedBuckets = const std::vector<double>;\n\nclass HistogramSettings {\npublic:\n  virtual ~HistogramSettings() = default;\n\n  /**\n   * For formats like Prometheus where the entire histogram is published (but not\n   * like statsd where each value to include in the histogram is emitted separately),\n   * get the limits for each histogram bucket.\n   * @return The buckets for the histogram. Each value is an upper bound of a bucket.\n   */\n  virtual ConstSupportedBuckets& buckets(absl::string_view stat_name) const PURE;\n};\n\nusing HistogramSettingsConstPtr = std::unique_ptr<const HistogramSettings>;\n\n/**\n * Holds the computed statistics for a histogram.\n */\nclass HistogramStatistics {\npublic:\n  virtual ~HistogramStatistics() = default;\n\n  /**\n   * Returns quantile summary representation of the histogram.\n   */\n  virtual std::string quantileSummary() const PURE;\n\n  /**\n   * Returns bucket summary representation of the histogram.\n   */\n  virtual std::string bucketSummary() const PURE;\n\n  /**\n   * Returns supported quantiles.\n   */\n  virtual const std::vector<double>& supportedQuantiles() const PURE;\n\n  /**\n   * Returns computed quantile values during the period.\n   */\n  virtual const std::vector<double>& computedQuantiles() const PURE;\n\n  /**\n   * Returns supported buckets. Each value is the upper bound of the bucket\n   * with 0 as the implicit lower bound. For timers, these bucket thresholds\n   * are in milliseconds but the thresholds are applicable to all types of data.\n   */\n  virtual ConstSupportedBuckets& supportedBuckets() const PURE;\n\n  /**\n   * Returns computed bucket values during the period. The vector contains an approximation\n   * of samples below each quantile bucket defined in supportedBuckets(). This vector is\n   * guaranteed to be the same length as supportedBuckets().\n   */\n  virtual const std::vector<uint64_t>& computedBuckets() const PURE;\n\n  /**\n   * Returns number of values during the period. This number may be an approximation\n   * of the number of samples in the histogram, it is not guaranteed that this will be\n   * 100% the number of samples observed.\n   */\n  virtual uint64_t sampleCount() const PURE;\n\n  /**\n   * Returns sum of all values during the period.\n   */\n  virtual double sampleSum() const PURE;\n};\n\n/**\n * A histogram that records values one at a time.\n * Note: Histograms now incorporate what used to be timers because the only\n * difference between the two stat types was the units being represented.\n */\nclass Histogram : public Metric {\npublic:\n  /**\n   * Histogram values represent scalar quantity like time, length, mass,\n   * distance, or in general anything which has only magnitude and no other\n   * characteristics. These are often accompanied by a unit of measurement.\n   * This enum defines units for commonly measured quantities. Base units\n   * are preferred unless they are not granular enough to be useful as an\n   * integer.\n   */\n  enum class Unit {\n    Null,        // The histogram has been rejected, i.e. it's a null histogram and is not recording\n                 // anything.\n    Unspecified, // Measured quantity does not require a unit, e.g. \"items\".\n    Bytes,\n    Microseconds,\n    Milliseconds,\n  };\n\n  ~Histogram() override = default;\n\n  /**\n   * @return the unit of measurement for values recorded by the histogram.\n   */\n  virtual Unit unit() const PURE;\n\n  /**\n   * Records an unsigned value in the unit specified during the construction.\n   */\n  virtual void recordValue(uint64_t value) PURE;\n};\n\nusing HistogramSharedPtr = RefcountPtr<Histogram>;\n\n/**\n * A histogram that is stored in main thread and provides summary view of the histogram.\n */\nclass ParentHistogram : public Histogram {\npublic:\n  ~ParentHistogram() override = default;\n\n  /**\n   * This method is called during the main stats flush process for each of the histograms and used\n   * to merge the histogram values.\n   */\n  virtual void merge() PURE;\n\n  /**\n   * Returns the interval histogram summary statistics for the flush interval.\n   */\n  virtual const HistogramStatistics& intervalStatistics() const PURE;\n\n  /**\n   * Returns the cumulative histogram summary statistics.\n   */\n  virtual const HistogramStatistics& cumulativeStatistics() const PURE;\n\n  /**\n   * Returns the quantile summary representation.\n   */\n  virtual const std::string quantileSummary() const PURE;\n\n  /**\n   * Returns the bucket summary representation.\n   */\n  virtual const std::string bucketSummary() const PURE;\n};\n\nusing ParentHistogramSharedPtr = RefcountPtr<ParentHistogram>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/primitive_stats.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/non_copyable.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Primitive, low-memory-overhead counter with incrementing and latching capabilities. Each\n * increment is added both to a global counter as well as periodic counter. Calling latch()\n * returns the periodic counter and clears it.\n */\nclass PrimitiveCounter : NonCopyable {\npublic:\n  PrimitiveCounter() = default;\n\n  uint64_t value() const { return value_; }\n\n  void add(uint64_t amount) {\n    value_ += amount;\n    pending_increment_ += amount;\n  }\n  void inc() { add(1); }\n  void reset() { value_ = 0; }\n  uint64_t latch() { return pending_increment_.exchange(0); }\n\nprivate:\n  std::atomic<uint64_t> value_{0};\n  std::atomic<uint64_t> pending_increment_{0};\n};\n\nusing PrimitiveCounterReference = std::reference_wrapper<const PrimitiveCounter>;\n\n/**\n * Primitive, low-memory-overhead gauge with increment and decrement capabilities.\n */\nclass PrimitiveGauge : NonCopyable {\npublic:\n  PrimitiveGauge() = default;\n\n  uint64_t value() const { return value_; }\n\n  void add(uint64_t amount) { value_ += amount; }\n  void dec() { sub(1); }\n  void inc() { add(1); }\n  void set(uint64_t value) { value_ = value; }\n  void sub(uint64_t amount) {\n    ASSERT(value_ >= amount);\n    value_ -= amount;\n  }\n\nprivate:\n  std::atomic<uint64_t> value_{0};\n};\n\nusing PrimitiveGaugeReference = std::reference_wrapper<const PrimitiveGauge>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/primitive_stats_macros.h",
    "content": "#pragma once\n\n#include \"envoy/stats/primitive_stats.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\n/**\n * These are helper macros for allocating \"fixed\" stats throughout the code base in a way that\n * is also easy to mock and test. The general flow looks like this:\n *\n * Define a block of stats like this:\n *   #define MY_COOL_STATS(COUNTER, GAUGE)     \\\n *     COUNTER(counter1)                       \\\n *     GAUGE(gauge1)                           \\\n *     ...\n *\n * By convention, starting with #7083, we sort the lines of this macro block, so\n * all the counters are grouped together, then all the gauges, etc. We do not\n * use clang-format-on/off etc. \"./tools/code_format/check_format.py fix\" will take care of\n * lining up the backslashes.\n *\n * Now actually put these stats somewhere, usually as a member of a struct:\n *   struct MyCoolStats {\n *     MY_COOL_STATS(GENERATE_PRIMITIVE_COUNTER_STRUCT, GENERATE_PRIMITIVE_GAUGE_STRUCT);\n *\n *     // Optional: Provide access to counters as a map.\n *     std::vector<std::pair<absl::string_view, PrimitiveCounterReference>> counters() const {\n *       return {MY_COOL_STATS(PRIMITIVE_COUNTER_NAME_AND_REFERENCE, IGNORE_PRIMITIVE_GAUGE)};\n *     }\n *\n *     // Optional: Provide access to gauges as a map.\n *     std::vector<std::pair<absl::string_view, PrimitiveGaugeReference>> gauges() const {\n *       return {MY_COOL_STATS(IGNORE_PRIMITIVE_COUNTER, PRIMITIVE_GAUGE_NAME_AND_REFERENCE)};\n *     }\n *   };\n *\n * Finally, when you want to actually instantiate the above struct you do:\n *   MyCoolStats stats;\n */\n\n// Fully-qualified for use in external callsites.\n#define GENERATE_PRIMITIVE_COUNTER_STRUCT(NAME) Envoy::Stats::PrimitiveCounter NAME##_;\n#define GENERATE_PRIMITIVE_GAUGE_STRUCT(NAME) Envoy::Stats::PrimitiveGauge NAME##_;\n\n// Name and counter/gauge reference pair used to construct map of counters/gauges.\n#define PRIMITIVE_COUNTER_NAME_AND_REFERENCE(X) {absl::string_view(#X), std::ref(X##_)},\n#define PRIMITIVE_GAUGE_NAME_AND_REFERENCE(X) {absl::string_view(#X), std::ref(X##_)},\n\n// Ignore a counter or gauge.\n#define IGNORE_PRIMITIVE_COUNTER(X)\n#define IGNORE_PRIMITIVE_GAUGE(X)\n\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/refcount_ptr.h",
    "content": "#pragma once\n\n#include <atomic>\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n// Implements a reference-counted pointer to a class, so that its external usage\n// model is identical to std::shared_ptr, but the reference count itself is held\n// in the class. The class is expected to implement three methods:\n//    void incRefCount()\n//    bool decRefCount()  -- returns true if the reference count goes to zero.\n//    uint32_t use_count()\n// It may implement them by delegating to RefcountHelper (see below), or by\n// inheriting from RefcountInterface (see below).\n//\n// TODO(jmarantz): replace this with an absl or std implementation when\n// available. See https://github.com/abseil/abseil-cpp/issues/344, issued June\n// 26, 2019, and http://wg21.link/p0406, issued 2017. Note that the turnaround\n// time for getting an absl API added is measurable in months, and for a std\n// API in years.\ntemplate <class T> class RefcountPtr {\npublic:\n  RefcountPtr() : ptr_(nullptr) {}\n\n  // Constructing a reference-counted object from a pointer; this is safe to\n  // do when the reference-count is held in the object. For example, this code\n  // crashes:\n  //   {\n  //     std::shared_ptr<std::string> a = std::make_shared<std::string>(\"x\");\n  //     std::shared_ptr<std::string> b(a.get());\n  //   }\n  // whereas the analogous code for RefcountPtr works fine.\n  RefcountPtr(T* ptr) : ptr_(ptr) {\n    if (ptr_ != nullptr) {\n      ptr_->incRefCount();\n    }\n  }\n\n  RefcountPtr(const RefcountPtr& src) : RefcountPtr(src.get()) {}\n\n  // Constructor for up-casting reference-counted pointers. This doesn't change\n  // the underlying object; it just upcasts the DerivedClass* in src.ptr_ to a\n  // BaseClass* for assignment to this->ptr_. For usage of this to compile,\n  // DerivedClass* must be assignable to BaseClass* without explicit casts.\n  template <class DerivedClass>\n  RefcountPtr(const RefcountPtr<DerivedClass>& src) : RefcountPtr(src.get()) {}\n\n  // Move-construction is used by absl::flat_hash_map during resizes.\n  RefcountPtr(RefcountPtr&& src) noexcept : ptr_(src.ptr_) { src.ptr_ = nullptr; }\n\n  RefcountPtr& operator=(const RefcountPtr& src) {\n    if (&src != this && src.ptr_ != ptr_) {\n      resetInternal();\n      ptr_ = src.get();\n      if (ptr_ != nullptr) {\n        ptr_->incRefCount();\n      }\n    }\n    return *this;\n  }\n\n  // Move-assignment is used during std::vector resizes.\n  RefcountPtr& operator=(RefcountPtr&& src) noexcept {\n    if (&src != this && src.ptr_ != ptr_) {\n      resetInternal();\n      ptr_ = src.ptr_;\n      src.ptr_ = nullptr;\n    }\n    return *this;\n  }\n\n  ~RefcountPtr() { resetInternal(); }\n\n  // Implements required subset of the shared_ptr API;\n  // see https://en.cppreference.com/w/cpp/memory/shared_ptr for details.\n  T* operator->() const { return ptr_; }\n  T* get() const { return ptr_; }\n  T& operator*() const { return *ptr_; }\n  operator bool() const { return ptr_ != nullptr; }\n  bool operator==(const T* ptr) const { return ptr_ == ptr; }\n  bool operator!=(const T* ptr) const { return ptr_ != ptr; }\n  bool operator==(const RefcountPtr& a) const { return ptr_ == a.ptr_; }\n  bool operator!=(const RefcountPtr& a) const { return ptr_ != a.ptr_; }\n  uint32_t use_count() const { return ptr_->use_count(); }\n  void reset() {\n    resetInternal();\n    ptr_ = nullptr;\n  }\n\nprivate:\n  // Like reset() but does not bother to clear ptr_, as it is about to be\n  // overwritten or destroyed.\n  void resetInternal() {\n    if (ptr_ != nullptr && ptr_->decRefCount()) {\n      delete ptr_;\n    }\n  }\n\n  T* ptr_;\n};\n\ntemplate <class T> static bool operator==(std::nullptr_t, const RefcountPtr<T>& a) {\n  return a == nullptr;\n}\ntemplate <class T> static bool operator!=(std::nullptr_t, const RefcountPtr<T>& a) {\n  return a != nullptr;\n}\n\n// Helper interface for classes to derive from, enabling implementation of the\n// three methods as part of derived classes. It is not necessary to inherit from\n// this interface to wrap a class in RefcountPtr; instead the class can just\n// implement the same method.\nclass RefcountInterface {\npublic:\n  virtual ~RefcountInterface() = default;\n\n  /**\n   * Increments the reference count.\n   */\n  virtual void incRefCount() PURE;\n\n  /**\n   * Decrements the reference count.\n   * @return true if the reference count has gone to zero, so the object should be freed.\n   */\n  virtual bool decRefCount() PURE;\n\n  /**\n   * @return the number of references to the object.\n   */\n  virtual uint32_t use_count() const PURE;\n};\n\n// Delegation helper for RefcountPtr. This can be instantiated in a class, but\n// explicit delegation will be needed for each of the three methods.\nstruct RefcountHelper {\n  // Implements the RefcountInterface API.\n  void incRefCount() {\n    // Note: The ++ref_count_ here and --ref_count_ below have implicit memory\n    // orderings of sequentially consistent. Relaxed on addition and\n    // acquire/release on subtraction is the typical use for reference\n    // counting. On x86, the difference in instruction count is minimal, but the\n    // savings are greater on other platforms.\n    //\n    // https://www.boost.org/doc/libs/1_69_0/doc/html/atomic/usage_examples.html#boost_atomic.usage_examples.example_reference_counters\n    ++ref_count_;\n  }\n  bool decRefCount() {\n    ASSERT(ref_count_ >= 1);\n    return --ref_count_ == 0;\n  }\n  uint32_t use_count() const { return ref_count_; }\n\n  std::atomic<uint32_t> ref_count_{0};\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/scope.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/symbol_table.h\"\n#include \"envoy/stats/tag.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass Counter;\nclass Gauge;\nclass Histogram;\nclass NullGaugeImpl;\nclass Scope;\nclass TextReadout;\n\nusing CounterOptConstRef = absl::optional<std::reference_wrapper<const Counter>>;\nusing GaugeOptConstRef = absl::optional<std::reference_wrapper<const Gauge>>;\nusing HistogramOptConstRef = absl::optional<std::reference_wrapper<const Histogram>>;\nusing TextReadoutOptConstRef = absl::optional<std::reference_wrapper<const TextReadout>>;\nusing ScopePtr = std::unique_ptr<Scope>;\nusing ScopeSharedPtr = std::shared_ptr<Scope>;\n\ntemplate <class StatType> using IterateFn = std::function<bool(const RefcountPtr<StatType>&)>;\n\n/**\n * A named scope for stats. Scopes are a grouping of stats that can be acted on as a unit if needed\n * (for example to free/delete all of them).\n */\nclass Scope {\npublic:\n  virtual ~Scope() = default;\n\n  /**\n   * Allocate a new scope. NOTE: The implementation should correctly handle overlapping scopes\n   * that point to the same reference counted backing stats. This allows a new scope to be\n   * gracefully swapped in while an old scope with the same name is being destroyed.\n   * @param name supplies the scope's namespace prefix.\n   */\n  virtual ScopePtr createScope(const std::string& name) PURE;\n\n  /**\n   * Deliver an individual histogram value to all registered sinks.\n   */\n  virtual void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) PURE;\n\n  /**\n   * Creates a Counter from the stat name. Tag extraction will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @return a counter within the scope's namespace.\n   */\n  Counter& counterFromStatName(const StatName& name) {\n    return counterFromStatNameWithTags(name, absl::nullopt);\n  }\n  /**\n   * Creates a Counter from the stat name and tags. If tags are not provided, tag extraction\n   * will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @param tags optionally specified tags.\n   * @return a counter within the scope's namespace.\n   */\n  virtual Counter& counterFromStatNameWithTags(const StatName& name,\n                                               StatNameTagVectorOptConstRef tags) PURE;\n\n  /**\n   * TODO(#6667): this variant is deprecated: use counterFromStatName.\n   * @param name The name, expressed as a string.\n   * @return a counter within the scope's namespace.\n   */\n  virtual Counter& counterFromString(const std::string& name) PURE;\n\n  /**\n   * Creates a Gauge from the stat name. Tag extraction will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @param import_mode Whether hot-restart should accumulate this value.\n   * @return a gauge within the scope's namespace.\n   */\n  Gauge& gaugeFromStatName(const StatName& name, Gauge::ImportMode import_mode) {\n    return gaugeFromStatNameWithTags(name, absl::nullopt, import_mode);\n  }\n\n  /**\n   * Creates a Gauge from the stat name and tags. If tags are not provided, tag extraction\n   * will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @param tags optionally specified tags.\n   * @param import_mode Whether hot-restart should accumulate this value.\n   * @return a gauge within the scope's namespace.\n   */\n  virtual Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                           Gauge::ImportMode import_mode) PURE;\n\n  /**\n   * TODO(#6667): this variant is deprecated: use gaugeFromStatName.\n   * @param name The name, expressed as a string.\n   * @param import_mode Whether hot-restart should accumulate this value.\n   * @return a gauge within the scope's namespace.\n   */\n  virtual Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) PURE;\n\n  /**\n   * @return a null gauge within the scope's namespace.\n   */\n  virtual NullGaugeImpl& nullGauge(const std::string& name) PURE;\n\n  /**\n   * Creates a Histogram from the stat name. Tag extraction will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @param unit The unit of measurement.\n   * @return a histogram within the scope's namespace with a particular value type.\n   */\n  Histogram& histogramFromStatName(const StatName& name, Histogram::Unit unit) {\n    return histogramFromStatNameWithTags(name, absl::nullopt, unit);\n  }\n\n  /**\n   * Creates a Histogram from the stat name and tags. If tags are not provided, tag extraction\n   * will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @param tags optionally specified tags.\n   * @param unit The unit of measurement.\n   * @return a histogram within the scope's namespace with a particular value type.\n   */\n  virtual Histogram& histogramFromStatNameWithTags(const StatName& name,\n                                                   StatNameTagVectorOptConstRef tags,\n                                                   Histogram::Unit unit) PURE;\n\n  /**\n   * TODO(#6667): this variant is deprecated: use histogramFromStatName.\n   * @param name The name, expressed as a string.\n   * @param unit The unit of measurement.\n   * @return a histogram within the scope's namespace with a particular value type.\n   */\n  virtual Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) PURE;\n\n  /**\n   * Creates a TextReadout from the stat name. Tag extraction will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @return a text readout within the scope's namespace.\n   */\n  TextReadout& textReadoutFromStatName(const StatName& name) {\n    return textReadoutFromStatNameWithTags(name, absl::nullopt);\n  }\n\n  /**\n   * Creates a TextReadout from the stat name and tags. If tags are not provided, tag extraction\n   * will be performed on the name.\n   * @param name The name of the stat, obtained from the SymbolTable.\n   * @param tags optionally specified tags.\n   * @return a text readout within the scope's namespace.\n   */\n  virtual TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                                       StatNameTagVectorOptConstRef tags) PURE;\n\n  /**\n   * TODO(#6667): this variant is deprecated: use textReadoutFromStatName.\n   * @param name The name, expressed as a string.\n   * @return a text readout within the scope's namespace.\n   */\n  virtual TextReadout& textReadoutFromString(const std::string& name) PURE;\n\n  /**\n   * @param The name of the stat, obtained from the SymbolTable.\n   * @return a reference to a counter within the scope's namespace, if it exists.\n   */\n  virtual CounterOptConstRef findCounter(StatName name) const PURE;\n\n  /**\n   * @param The name of the stat, obtained from the SymbolTable.\n   * @return a reference to a gauge within the scope's namespace, if it exists.\n   */\n  virtual GaugeOptConstRef findGauge(StatName name) const PURE;\n\n  /**\n   * @param The name of the stat, obtained from the SymbolTable.\n   * @return a reference to a histogram within the scope's namespace, if it\n   * exists.\n   */\n  virtual HistogramOptConstRef findHistogram(StatName name) const PURE;\n\n  /**\n   * @param The name of the stat, obtained from the SymbolTable.\n   * @return a reference to a text readout within the scope's namespace, if it exists.\n   */\n  virtual TextReadoutOptConstRef findTextReadout(StatName name) const PURE;\n\n  /**\n   * @return a reference to the symbol table.\n   */\n  virtual const SymbolTable& constSymbolTable() const PURE;\n  virtual SymbolTable& symbolTable() PURE;\n\n  /**\n   * Calls 'fn' for every counter. Note that in the case of overlapping scopes,\n   * the implementation may call fn more than one time for each counter. Iteration\n   * stops if `fn` returns false;\n   *\n   * @param fn Function to be run for every counter, or until fn return false.\n   * @return false if fn(counter) return false during iteration, true if every counter was hit.\n   */\n  virtual bool iterate(const IterateFn<Counter>& fn) const PURE;\n\n  /**\n   * Calls 'fn' for every gauge. Note that in the case of overlapping scopes,\n   * the implementation may call fn more than one time for each gauge. Iteration\n   * stops if `fn` returns false;\n   *\n   * @param fn Function to be run for every gauge, or until fn return false.\n   * @return false if fn(gauge) return false during iteration, true if every gauge was hit.\n   */\n  virtual bool iterate(const IterateFn<Gauge>& fn) const PURE;\n\n  /**\n   * Calls 'fn' for every histogram. Note that in the case of overlapping\n   * scopes, the implementation may call fn more than one time for each\n   * histogram. Iteration stops if `fn` returns false;\n   *\n   * @param fn Function to be run for every histogram, or until fn return false.\n   * @return false if fn(histogram) return false during iteration, true if every histogram was hit.\n   */\n  virtual bool iterate(const IterateFn<Histogram>& fn) const PURE;\n\n  /**\n   * Calls 'fn' for every text readout. Note that in the case of overlapping\n   * scopes, the implementation may call fn more than one time for each\n   * text readout. Iteration stops if `fn` returns false;\n   *\n   * @param fn Function to be run for every text readout, or until fn return false.\n   * @return false if fn(text_readout) return false during iteration, true if every text readout\n   *         was hit.\n   */\n  virtual bool iterate(const IterateFn<TextReadout>& fn) const PURE;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/sink.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/stats.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass Histogram;\n\nclass MetricSnapshot {\npublic:\n  struct CounterSnapshot {\n    uint64_t delta_;\n    std::reference_wrapper<const Counter> counter_;\n  };\n\n  virtual ~MetricSnapshot() = default;\n\n  /**\n   * @return a snapshot of all counters with pre-latched deltas.\n   */\n  virtual const std::vector<CounterSnapshot>& counters() PURE;\n\n  /**\n   * @return a snapshot of all gauges.\n   */\n  virtual const std::vector<std::reference_wrapper<const Gauge>>& gauges() PURE;\n\n  /**\n   * @return a snapshot of all histograms.\n   */\n  virtual const std::vector<std::reference_wrapper<const ParentHistogram>>& histograms() PURE;\n\n  /**\n   * @return a snapshot of all text readouts.\n   */\n  virtual const std::vector<std::reference_wrapper<const TextReadout>>& textReadouts() PURE;\n};\n\n/**\n * A sink for stats. Each sink is responsible for writing stats to a backing store.\n */\nclass Sink {\npublic:\n  virtual ~Sink() = default;\n\n  /**\n   * Periodic metric flush to the sink.\n   * @param snapshot interface through which the sink can access all metrics being flushed.\n   */\n  virtual void flush(MetricSnapshot& snapshot) PURE;\n\n  /**\n   * Flush a single histogram sample. Note: this call is called synchronously as a part of recording\n   * the metric, so implementations must be thread-safe.\n   * @param histogram the histogram that this sample applies to.\n   * @param value the value of the sample.\n   */\n  virtual void onHistogramComplete(const Histogram& histogram, uint64_t value) PURE;\n};\n\nusing SinkPtr = std::unique_ptr<Sink>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/stats.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/refcount_ptr.h\"\n#include \"envoy/stats/symbol_table.h\"\n#include \"envoy/stats/tag.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * General interface for all stats objects.\n */\nclass Metric : public RefcountInterface {\npublic:\n  ~Metric() override = default;\n  /**\n   * Returns the full name of the Metric. This is intended for most uses, such\n   * as streaming out the name to a stats sink or admin request, or comparing\n   * against it in a test. Independent of the evolution of the data\n   * representation for the name, this method will be available. For storing the\n   * name as a map key, however, nameCStr() is a better choice, albeit one that\n   * might change in the future to return a symbolized representation of the\n   * elaborated string.\n   */\n  virtual std::string name() const PURE;\n\n  /**\n   * Returns the full name of the Metric as an encoded array of symbols.\n   */\n  virtual StatName statName() const PURE;\n\n  /**\n   * Returns a vector of configurable tags to identify this Metric.\n   */\n  virtual TagVector tags() const PURE;\n\n  /**\n   * See a more detailed description in tagExtractedStatName(), which is the\n   * preferred API to use when feasible. This API needs to compose the\n   * std::string on the fly, and return it by value.\n   *\n   * @return The stat name with all tag values extracted, as a std::string.\n   */\n  virtual std::string tagExtractedName() const PURE;\n\n  /**\n   * Returns the name of the Metric with the portions designated as tags removed\n   * as a string. For example, The stat name \"vhost.foo.vcluster.bar.c1\" would\n   * have \"foo\" extracted as the value of tag \"vhost\" and \"bar\" extracted as the\n   * value of tag \"vcluster\". Thus the tagExtractedName is simply\n   * \"vhost.vcluster.c1\".\n   *\n   * @return the name of the Metric with the portions designated as tags\n   *     removed.\n   */\n  virtual StatName tagExtractedStatName() const PURE;\n\n  // Function to be called from iterateTagStatNames passing name and value as StatNames.\n  using TagStatNameIterFn = std::function<bool(StatName, StatName)>;\n\n  /**\n   * Iterates over all tags, calling a functor for each name/value pair. The\n   * functor can return 'true' to continue or 'false' to stop the\n   * iteration.\n   *\n   * @param fn The functor to call for StatName pair.\n   */\n  virtual void iterateTagStatNames(const TagStatNameIterFn& fn) const PURE;\n\n  // Function to be called from iterateTags passing name and value as const Tag&.\n  using TagIterFn = std::function<bool(const Tag&)>;\n\n  /**\n   * Indicates whether this metric has been updated since the server was started.\n   */\n  virtual bool used() const PURE;\n\n  /**\n   * Flags:\n   * Used: used by all stats types to figure out whether they have been used.\n   * Logic...: used by gauges to cache how they should be combined with a parent's value.\n   */\n  struct Flags {\n    static const uint8_t Used = 0x01;\n    static const uint8_t LogicAccumulate = 0x02;\n    static const uint8_t NeverImport = 0x04;\n  };\n  virtual SymbolTable& symbolTable() PURE;\n  virtual const SymbolTable& constSymbolTable() const PURE;\n};\n\n/**\n * An always incrementing counter with latching capability. Each increment is added both to a\n * global counter as well as periodic counter. Calling latch() returns the periodic counter and\n * clears it.\n */\nclass Counter : public Metric {\npublic:\n  ~Counter() override = default;\n\n  virtual void add(uint64_t amount) PURE;\n  virtual void inc() PURE;\n  virtual uint64_t latch() PURE;\n  virtual void reset() PURE;\n  virtual uint64_t value() const PURE;\n};\n\nusing CounterSharedPtr = RefcountPtr<Counter>;\n\n/**\n * A gauge that can both increment and decrement.\n */\nclass Gauge : public Metric {\npublic:\n  enum class ImportMode {\n    Uninitialized, // Gauge was discovered during hot-restart transfer.\n    NeverImport,   // On hot-restart, each process starts with gauge at 0.\n    Accumulate,    // Transfers gauge state on hot-restart.\n  };\n\n  ~Gauge() override = default;\n\n  virtual void add(uint64_t amount) PURE;\n  virtual void dec() PURE;\n  virtual void inc() PURE;\n  virtual void set(uint64_t value) PURE;\n  virtual void sub(uint64_t amount) PURE;\n  virtual uint64_t value() const PURE;\n\n  /**\n   * Sets a value from a hot-restart parent. This parent contribution must be\n   * kept distinct from the child value, so that when we erase the value it\n   * is not commingled with the child value, which may have been set() directly.\n   *\n   * @param parent_value the value from the hot-restart parent.\n   */\n  virtual void setParentValue(uint64_t parent_value) PURE;\n\n  /**\n   * @return the import mode, dictating behavior of the gauge across hot restarts.\n   */\n  virtual ImportMode importMode() const PURE;\n\n  /**\n   * Gauges can be created with ImportMode::Uninitialized during hot-restart\n   * merges, if they haven't yet been instantiated by the child process. When\n   * they finally get instantiated, mergeImportMode should be called to\n   * initialize the gauge's import mode. It is only valid to call\n   * mergeImportMode when the current mode is ImportMode::Uninitialized.\n   *\n   * @param import_mode the new import mode.\n   */\n  virtual void mergeImportMode(ImportMode import_mode) PURE;\n};\n\nusing GaugeSharedPtr = RefcountPtr<Gauge>;\n\n/**\n * A string, possibly non-ASCII.\n */\nclass TextReadout : public virtual Metric {\npublic:\n  // Text readout type is used internally to disambiguate isolated store\n  // constructors. In the future we can extend it to specify text encoding or\n  // some such.\n  enum class Type {\n    Default, // No particular meaning.\n  };\n\n  ~TextReadout() override = default;\n\n  /**\n   * Sets the value of this TextReadout by moving the input |value| to minimize\n   * buffer copies under the lock.\n   */\n  virtual void set(absl::string_view value) PURE;\n  /**\n   * @return the copy of this TextReadout value.\n   */\n  virtual std::string value() const PURE;\n};\n\nusing TextReadoutSharedPtr = RefcountPtr<TextReadout>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/stats_macros.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\n/**\n * These are helper macros for allocating \"fixed\" stats throughout the code base in a way that\n * is also easy to mock and test. The general flow looks like this:\n *\n * Define a block of stats like this:\n *   #define MY_COOL_STATS(COUNTER, GAUGE, HISTOGRAM)     \\\n *     COUNTER(counter1)                                  \\\n *     GAUGE(gauge1, mode)                                \\\n *     HISTOGRAM(histogram1, unit)\n *     ...\n *\n * By convention, starting with #7083, we sort the lines of this macro block, so\n * all the counters are grouped together, then all the gauges, etc. We do not\n * use clang-format-on/off etc. \"./tools/code_format/check_format.py fix\" will take care of\n * lining up the backslashes.\n *\n * Now actually put these stats somewhere, usually as a member of a struct:\n *   struct MyCoolStats {\n *     MY_COOL_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n *   };\n *\n * Finally, when you want to actually instantiate the above struct using a Stats::Pool, you do:\n *   MyCoolStats stats{\n *     MY_COOL_STATS(POOL_COUNTER(...), POOL_GAUGE(...), POOL_HISTOGRAM(...))};\n */\n\n// Fully-qualified for use in external callsites.\n#define GENERATE_COUNTER_STRUCT(NAME) Envoy::Stats::Counter& NAME##_;\n#define GENERATE_GAUGE_STRUCT(NAME, MODE) Envoy::Stats::Gauge& NAME##_;\n#define GENERATE_HISTOGRAM_STRUCT(NAME, UNIT) Envoy::Stats::Histogram& NAME##_;\n#define GENERATE_TEXT_READOUT_STRUCT(NAME) Envoy::Stats::TextReadout& NAME##_;\n\n#define FINISH_STAT_DECL_(X) #X)),\n#define FINISH_STAT_DECL_MODE_(X, MODE) #X), Envoy::Stats::Gauge::ImportMode::MODE),\n#define FINISH_STAT_DECL_UNIT_(X, UNIT) #X), Envoy::Stats::Histogram::Unit::UNIT),\n\nstatic inline std::string statPrefixJoin(absl::string_view prefix, absl::string_view token) {\n  if (prefix.empty()) {\n    return std::string(token);\n  } else if (absl::EndsWith(prefix, \".\")) {\n    // TODO(jmarantz): eliminate this case -- remove all the trailing dots from prefixes.\n    return absl::StrCat(prefix, token);\n  }\n  return absl::StrCat(prefix, \".\", token);\n}\n\n#define POOL_COUNTER_PREFIX(POOL, PREFIX) (POOL).counterFromString(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_\n#define POOL_GAUGE_PREFIX(POOL, PREFIX) (POOL).gaugeFromString(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_MODE_\n#define POOL_HISTOGRAM_PREFIX(POOL, PREFIX) (POOL).histogramFromString(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_UNIT_\n#define POOL_TEXT_READOUT_PREFIX(POOL, PREFIX) (POOL).textReadoutFromString(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_\n\n#define POOL_COUNTER(POOL) POOL_COUNTER_PREFIX(POOL, \"\")\n#define POOL_GAUGE(POOL) POOL_GAUGE_PREFIX(POOL, \"\")\n#define POOL_HISTOGRAM(POOL) POOL_HISTOGRAM_PREFIX(POOL, \"\")\n#define POOL_TEXT_READOUT(POOL) POOL_TEXT_READOUT_PREFIX(POOL, \"\")\n\n#define NULL_STAT_DECL_(X) std::string(#X)),\n#define NULL_STAT_DECL_IGNORE_MODE_(X, MODE) std::string(#X)),\n\n#define NULL_POOL_GAUGE(POOL) (POOL).nullGauge(NULL_STAT_DECL_IGNORE_MODE_\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/stats_matcher.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass StatsMatcher {\npublic:\n  virtual ~StatsMatcher() = default;\n\n  /**\n   * Take a metric name and report whether or not it should be instantiated.\n   * @param the name of a Stats::Metric.\n   * @return bool true if that stat should not be instantiated.\n   */\n  virtual bool rejects(const std::string& name) const PURE;\n\n  /**\n   * Helps determine whether the matcher needs to be called. This can be used\n   * to short-circuit elaboration of stats names.\n   *\n   * @return bool whether StatsMatcher can be statically determined to accept\n   *              all stats. It's possible to construct a matcher where\n   *              acceptsAll() returns false, but rejects() is always false.\n   */\n  virtual bool acceptsAll() const PURE;\n\n  /**\n   * Helps determine whether the matcher needs to be called. This can be used\n   * to short-circuit elaboration of stats names.\n   *\n   * @return bool whether StatsMatcher can be statically determined to reject\n   *              all stats. It's possible to construct a matcher where\n   *              rejectsAll() returns false, but rejects() is always true.\n   */\n  virtual bool rejectsAll() const PURE;\n};\n\nusing StatsMatcherPtr = std::unique_ptr<const StatsMatcher>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/store.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_matcher.h\"\n#include \"envoy/stats/tag_producer.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nclass Dispatcher;\n}\n\nnamespace ThreadLocal {\nclass Instance;\n}\n\nnamespace Stats {\n\nclass Sink;\n\n/**\n * A store for all known counters, gauges, and timers.\n */\nclass Store : public Scope {\npublic:\n  /**\n   * @return a list of all known counters.\n   */\n  virtual std::vector<CounterSharedPtr> counters() const PURE;\n\n  /**\n   * @return a list of all known gauges.\n   */\n  virtual std::vector<GaugeSharedPtr> gauges() const PURE;\n\n  /**\n   * @return a list of all known text readouts.\n   */\n  virtual std::vector<TextReadoutSharedPtr> textReadouts() const PURE;\n\n  /**\n   * @return a list of all known histograms.\n   */\n  virtual std::vector<ParentHistogramSharedPtr> histograms() const PURE;\n};\n\nusing StorePtr = std::unique_ptr<Store>;\n\n/**\n * Callback invoked when a store's mergeHistogram() runs.\n */\nusing PostMergeCb = std::function<void()>;\n\n/**\n * The root of the stat store.\n */\nclass StoreRoot : public Store {\npublic:\n  /**\n   * Add a sink that is used for stat flushing.\n   */\n  virtual void addSink(Sink& sink) PURE;\n\n  /**\n   * Set the given tag producer to control tags.\n   */\n  virtual void setTagProducer(TagProducerPtr&& tag_producer) PURE;\n\n  /**\n   * Attach a StatsMatcher to this StoreRoot to prevent the initialization of stats according to\n   * some ruleset.\n   * @param stats_matcher a StatsMatcher to attach to this StoreRoot.\n   */\n  virtual void setStatsMatcher(StatsMatcherPtr&& stats_matcher) PURE;\n\n  /**\n   * Attach a HistogramSettings to this StoreRoot to generate histogram configurations\n   * according to some ruleset.\n   */\n  virtual void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) PURE;\n\n  /**\n   * Initialize the store for threading. This will be called once after all worker threads have\n   * been initialized. At this point the store can initialize itself for multi-threaded operation.\n   */\n  virtual void initializeThreading(Event::Dispatcher& main_thread_dispatcher,\n                                   ThreadLocal::Instance& tls) PURE;\n\n  /**\n   * Shutdown threading support in the store. This is called once when the server is about to shut\n   * down.\n   */\n  virtual void shutdownThreading() PURE;\n\n  /**\n   * Called during the flush process to merge all the thread local histograms. The passed in\n   * callback will be called on the main thread, but it will happen after the method returns\n   * which means that the actual flush process will happen on the main thread after this method\n   * returns. It is expected that only one merge runs at any time and concurrent calls to this\n   * method would be asserted.\n   */\n  virtual void mergeHistograms(PostMergeCb merge_complete_cb) PURE;\n};\n\nusing StoreRootPtr = std::unique_ptr<StoreRoot>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/symbol_table.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n#include <utility>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Runtime representation of an encoded stat name. This is predeclared only in\n * the interface without abstract methods, because (a) the underlying class\n * representation is common to both implementations of SymbolTable, and (b)\n * we do not want or need the overhead of a vptr per StatName. The common\n * declaration for StatName is in source/common/stats/symbol_table_impl.h\n */\nclass StatName;\nusing StatNameVec = absl::InlinedVector<StatName, 8>;\n\nclass StatNameList;\nclass StatNameSet;\n\nusing StatNameSetPtr = std::unique_ptr<StatNameSet>;\n\n/**\n * Holds a range of indexes indicating which parts of a stat-name are\n * dynamic. This is used to transfer stats from hot-restart parent to child,\n * retaining the same name structure.\n */\nusing DynamicSpan = std::pair<uint32_t, uint32_t>;\nusing DynamicSpans = std::vector<DynamicSpan>;\n\n/**\n * SymbolTable manages a namespace optimized for stat names, exploiting their\n * typical composition from \".\"-separated tokens, with a significant overlap\n * between the tokens. The interface is designed to balance optimal storage\n * at scale with hiding details from users. We seek to provide the most abstract\n * interface possible that avoids adding per-stat overhead or taking locks in\n * the hot path.\n */\nclass SymbolTable {\npublic:\n  /**\n   * Efficient byte-encoded storage of an array of tokens. The most common\n   * tokens are typically < 127, and are represented directly. tokens >= 128\n   * spill into the next byte, allowing for tokens of arbitrary numeric value to\n   * be stored. As long as the most common tokens are low-valued, the\n   * representation is space-efficient. This scheme is similar to UTF-8. The\n   * token ordering is dependent on the order in which stat-names are encoded\n   * into the SymbolTable, which will not be optimal, but in practice appears\n   * to be pretty good.\n   *\n   * This is exposed in the interface for the benefit of join(), which which is\n   * used in the hot-path to append two stat-names into a temp without taking\n   * locks. This is used then in thread-local cache lookup, so that once warm,\n   * no locks are taken when looking up stats.\n   */\n  using Storage = uint8_t[];\n  using StoragePtr = std::unique_ptr<Storage>;\n\n  virtual ~SymbolTable() = default;\n\n  /**\n   * @return uint64_t the number of symbols in the symbol table.\n   */\n  virtual uint64_t numSymbols() const PURE;\n\n  /**\n   * Decodes a vector of symbols back into its period-delimited stat name. If\n   * decoding fails on any part of the symbol_vec, we release_assert and crash,\n   * since this should never happen, and we don't want to continue running\n   * with a corrupt stats set.\n   *\n   * @param stat_name the stat name.\n   * @return std::string stringified stat_name.\n   */\n  virtual std::string toString(const StatName& stat_name) const PURE;\n\n  /**\n   * Determines whether one StatName lexically precedes another. Note that\n   * the lexical order may not exactly match the lexical order of the\n   * elaborated strings. For example, stat-name of \"-.-\" would lexically\n   * sort after \"---\" but when encoded as a StatName would come lexically\n   * earlier. In practice this is unlikely to matter as those are not\n   * reasonable names for Envoy stats.\n   *\n   * Note that this operation has to be performed with the context of the\n   * SymbolTable so that the individual Symbol objects can be converted\n   * into strings for lexical comparison.\n   *\n   * @param a the first stat name\n   * @param b the second stat name\n   * @return bool true if a lexically precedes b.\n   */\n  virtual bool lessThan(const StatName& a, const StatName& b) const PURE;\n\n  /**\n   * Joins two or more StatNames. For example if we have StatNames for {\"a.b\",\n   * \"c.d\", \"e.f\"} then the joined stat-name matches \"a.b.c.d.e.f\". The\n   * advantage of using this representation is that it avoids having to\n   * decode/encode into the elaborated form, and does not require locking the\n   * SymbolTable.\n   *\n   * Note that this method does not bump reference counts on the referenced\n   * Symbols in the SymbolTable, so it's only valid as long for the lifetime of\n   * the joined StatNames.\n   *\n   * This is intended for use doing cached name lookups of scoped stats, where\n   * the scope prefix and the names to combine it with are already in StatName\n   * form. Using this class, they can be combined without accessing the\n   * SymbolTable or, in particular, taking its lock.\n   *\n   * @param stat_names the names to join.\n   * @return Storage allocated for the joined name.\n   */\n  virtual StoragePtr join(const StatNameVec& stat_names) const PURE;\n\n  /**\n   * Populates a StatNameList from a list of encodings. This is not done at\n   * construction time to enable StatNameList to be instantiated directly in\n   * a class that doesn't have a live SymbolTable when it is constructed.\n   *\n   * @param names A pointer to the first name in an array, allocated by the caller.\n   * @param num_names The number of names.\n   * @param symbol_table The symbol table in which to encode the names.\n   */\n  virtual void populateList(const StatName* names, uint32_t num_names, StatNameList& list) PURE;\n\n#ifndef ENVOY_CONFIG_COVERAGE\n  virtual void debugPrint() const PURE;\n#endif\n\n  /**\n   * Calls the provided function with a string-view representation of the\n   * elaborated name. This is useful during the interim period when we\n   * are using FakeSymbolTableImpl, to avoid an extra allocation. Once\n   * we migrate to using SymbolTableImpl, this interface will no longer\n   * be helpful and can be removed. The reason it's useful now is that\n   * it makes up, in part, for some extra runtime overhead that is spent\n   * on the SymbolTable abstraction and API, without getting full benefit\n   * from the improved representation.\n   *\n   * TODO(#6307): Remove this when the transition from FakeSymbolTableImpl to\n   * SymbolTableImpl is complete.\n   *\n   * @param stat_name The stat name.\n   * @param fn The function to call with the elaborated stat name as a string_view.\n   */\n  virtual void callWithStringView(StatName stat_name,\n                                  const std::function<void(absl::string_view)>& fn) const PURE;\n\n  using RecentLookupsFn = std::function<void(absl::string_view, uint64_t)>;\n\n  /**\n   * Calls the provided function with the name of the most recently looked-up\n   * symbols, including lookups on any StatNameSets, and with a count of\n   * the recent lookups on that symbol.\n   *\n   * @param iter the function to call for every recent item.\n   */\n  virtual uint64_t getRecentLookups(const RecentLookupsFn& iter) const PURE;\n\n  /**\n   * Clears the recent-lookups structures.\n   */\n  virtual void clearRecentLookups() PURE;\n\n  /**\n   * Sets the recent-lookup capacity.\n   */\n  virtual void setRecentLookupCapacity(uint64_t capacity) PURE;\n\n  /**\n   * @return The configured recent-lookup tracking capacity.\n   */\n  virtual uint64_t recentLookupCapacity() const PURE;\n\n  /**\n   * Creates a StatNameSet.\n   *\n   * @param name the name of the set.\n   * @return the set.\n   */\n  virtual StatNameSetPtr makeSet(absl::string_view name) PURE;\n\n  /**\n   * Identifies the dynamic components of a stat_name into an array of integer\n   * pairs, indicating the begin/end of spans of tokens in the stat-name that\n   * are created from StatNameDynamicStore or StatNameDynamicPool.\n   *\n   * This can be used to reconstruct the same exact StatNames in\n   * StatNames::mergeStats(), to enable stat continuity across hot-restart.\n   *\n   * @param stat_name the input stat name.\n   * @return the array of pairs indicating the bounds.\n   */\n  virtual DynamicSpans getDynamicSpans(StatName stat_name) const PURE;\n\nprivate:\n  friend struct HeapStatData;\n  friend class StatNameDynamicStorage;\n  friend class StatNameStorage;\n  friend class StatNameList;\n  friend class StatNameSet;\n\n  // The following methods are private, but are called by friend classes\n  // StatNameStorage and StatNameList, which must be friendly with SymbolTable\n  // in order to manage the reference-counted symbols they own.\n\n  /**\n   * Since SymbolTable does manual reference counting, a client of SymbolTable\n   * must manually call free(symbol_vec) when it is freeing the backing store\n   * for a StatName. This way, the symbol table will grow and shrink\n   * dynamically, instead of being write-only.\n   *\n   * @param stat_name the stat name.\n   */\n  virtual void free(const StatName& stat_name) PURE;\n\n  /**\n   * StatName backing-store can be managed by callers in a variety of ways\n   * to minimize overhead. But any persistent reference to a StatName needs\n   * to hold onto its own reference-counts for all symbols. This method\n   * helps callers ensure the symbol-storage is maintained for the lifetime\n   * of a reference.\n   *\n   * @param stat_name the stat name.\n   */\n  virtual void incRefCount(const StatName& stat_name) PURE;\n\n  /**\n   * Encodes 'name' into the symbol table. Bumps reference counts for referenced\n   * symbols. The caller must manage the storage, and is responsible for calling\n   * SymbolTable::free() to release the reference counts.\n   *\n   * @param name The name to encode.\n   * @return The encoded name, transferring ownership to the caller.\n   *\n   */\n  virtual StoragePtr encode(absl::string_view name) PURE;\n\n  virtual StoragePtr makeDynamicStorage(absl::string_view name) PURE;\n};\n\nusing SymbolTablePtr = std::unique_ptr<SymbolTable>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/tag.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/stats/symbol_table.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * General representation of a tag.\n */\nstruct Tag {\n  std::string name_;\n  std::string value_;\n\n  bool operator==(const Tag& other) const {\n    return other.name_ == name_ && other.value_ == value_;\n  };\n};\n\nusing TagVector = std::vector<Tag>;\n\nusing StatNameTag = std::pair<StatName, StatName>;\nusing StatNameTagVector = std::vector<StatNameTag>;\nusing StatNameTagVectorOptConstRef =\n    absl::optional<std::reference_wrapper<const StatNameTagVector>>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/tag_extractor.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/interval_set.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/tag.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Class to extract tags from the stat names.\n */\nclass TagExtractor {\npublic:\n  virtual ~TagExtractor() = default;\n\n  /**\n   * Identifier for the tag extracted by this object.\n   */\n  virtual std::string name() const PURE;\n\n  /**\n   * Finds tags for stat_name and adds them to the tags vector. If the tag is not\n   * represented in the name, the tags vector will remain unmodified. Also finds the\n   * character indexes for the tags in stat_name and adds them to remove_characters (an\n   * in/out arg). Returns true if a tag-match was found. The characters removed from the\n   * name may be different from the values put into the tag vector for readability\n   * purposes. Note: The extraction process is expected to be run iteratively, aggregating\n   * the character intervals to be removed from the name after all the tag extractions are\n   * complete. This approach simplifies the tag searching process because without mutations,\n   * the tag extraction will be order independent, apart from the order of the tag array.\n   * @param stat_name name from which the tag will be extracted if found to exist.\n   * @param tags list of tags updated with the tag name and value if found in the name.\n   * @param remove_characters set of intervals of character-indices to be removed from name.\n   * @return bool indicates whether a tag was found in the name.\n   */\n  virtual bool extractTag(absl::string_view stat_name, TagVector& tags,\n                          IntervalSet<size_t>& remove_characters) const PURE;\n\n  /**\n   * Finds a prefix string associated with the matching criteria owned by the\n   * extractor. This is used to reduce the number of extractors required for\n   * processing each stat, by pulling the first \".\"-separated token on the tag.\n   *\n   * If a prefix cannot be extracted, an empty string_view is returned, and the\n   * matcher must be applied on all inputs.\n   *\n   * The storage for the prefix is owned by the TagExtractor.\n   *\n   * @return absl::string_view the prefix, or an empty string_view if none was found.\n   */\n  virtual absl::string_view prefixToken() const PURE;\n};\n\nusing TagExtractorPtr = std::unique_ptr<const TagExtractor>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/tag_producer.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stats/tag.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass TagProducer {\npublic:\n  virtual ~TagProducer() = default;\n\n  /**\n   * Take a metric name and a vector then add proper tags into the vector and\n   * return an extracted metric name. The tags array will be populated with\n   * name/value pairs extracted from the full metric name, using the regular\n   * expressions in source/common/config/well_known_names.cc. For example, the\n   * stat name \"vhost.foo.vcluster.bar.c1\" would have \"foo\" extracted as the\n   * value of tag \"vhost\" and \"bar\" extracted as the value of tag\n   * \"vcluster\", so this will populate tags with {\"vhost\", \"foo\"} and\n   * {\"vcluster\", \"bar\"}, and return \"vhost.vcluster.c1\".\n   *\n   * @param metric_name std::string a name of Stats::Metric (Counter, Gauge, Histogram).\n   * @param tags TagVector a set of Stats::Tag.\n   */\n  virtual std::string produceTags(absl::string_view metric_name, TagVector& tags) const PURE;\n};\n\nusing TagProducerPtr = std::unique_ptr<const TagProducer>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stats/timespan.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * An abstraction of timespan which can be completed.\n */\nclass CompletableTimespan {\npublic:\n  virtual ~CompletableTimespan() = default;\n\n  /**\n   * Time elapsed since the creation of the timespan.\n   */\n  virtual std::chrono::milliseconds elapsed() const PURE;\n\n  /**\n   * Complete the timespan.\n   */\n  virtual void complete() PURE;\n};\n\nusing Timespan = CompletableTimespan;\nusing TimespanPtr = std::unique_ptr<Timespan>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stream_info/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"stream_info_interface\",\n    hdrs = [\"stream_info.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":filter_state_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:protocol_interface\",\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/ssl:connection_interface\",\n        \"//include/envoy/upstream:host_description_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/singleton:const_singleton\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_state_interface\",\n    hdrs = [\"filter_state.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"uint32_accessor_interface\",\n    hdrs = [\"uint32_accessor.h\"],\n    deps = [\n        \":filter_state_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/stream_info/filter_state.h",
    "content": "#pragma once\n\n#include <memory>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nclass FilterState;\n\nusing FilterStateSharedPtr = std::shared_ptr<FilterState>;\n\n/**\n * FilterState represents dynamically generated information regarding a stream (TCP or HTTP level)\n * or a connection by various filters in Envoy. FilterState can be write-once or write-many.\n */\nclass FilterState {\npublic:\n  enum class StateType { ReadOnly, Mutable };\n  // Objects stored in the FilterState may have different life span. Life span is what controls\n  // how long an object stored in FilterState lives. Implementation of this interface actually\n  // stores objects in a (reverse) tree manner - multiple FilterStateImpl with shorter life span may\n  // share the same FilterStateImpl as parent, which may recursively share parent with other\n  // FilterStateImpl at the same life span. This interface is supposed to be accessed at the leaf\n  // level (FilterChain) for objects with any desired longer life span.\n  //\n  // - FilterChain has the shortest life span, which is as long as the filter chain lives.\n  //\n  // - Request is longer than FilterChain. When internal redirect is enabled, one\n  //   downstream request may create multiple filter chains. Request allows an object to\n  //   survive across filter chains for bookkeeping needs. This is not used for the upstream case.\n  //\n  // - Connection makes an object survive the entire duration of a connection.\n  //   Any stream within this connection can see the same object.\n  //\n  // Note that order matters in this enum because it's assumed that life span grows as enum number\n  // grows.\n  enum LifeSpan { FilterChain, Request, Connection, TopSpan = Connection };\n\n  class Object {\n  public:\n    virtual ~Object() = default;\n\n    /**\n     * @return Protobuf::MessagePtr an unique pointer to the proto serialization of the filter\n     * state. If returned message type is ProtobufWkt::Any it will be directly used in protobuf\n     * logging. nullptr if the filter state cannot be serialized or serialization is not supported.\n     */\n    virtual ProtobufTypes::MessagePtr serializeAsProto() const { return nullptr; }\n\n    /**\n     * @return absl::optional<std::string> a optional string to the serialization of the filter\n     * state. No value if the filter state cannot be serialized or serialization is not supported.\n     * This method can be used to get an unstructured serialization result.\n     */\n    virtual absl::optional<std::string> serializeAsString() const { return absl::nullopt; }\n  };\n\n  virtual ~FilterState() = default;\n\n  /**\n   * @param data_name the name of the data being set.\n   * @param data an owning pointer to the data to be stored.\n   * @param state_type indicates whether the object is mutable or not.\n   * @param life_span indicates the life span of the object: bound to the filter chain, a\n   * request, or a connection.\n   *\n   * Note that it is an error to call setData() twice with the same\n   * data_name, if the existing object is immutable. Similarly, it is an\n   * error to call setData() with same data_name but different state_types\n   * (mutable and readOnly, or readOnly and mutable) or different life_span.\n   * This is to enforce a single authoritative source for each piece of\n   * data stored in FilterState.\n   */\n  virtual void setData(absl::string_view data_name, std::shared_ptr<Object> data,\n                       StateType state_type, LifeSpan life_span = LifeSpan::FilterChain) PURE;\n\n  /**\n   * @param data_name the name of the data being looked up (mutable/readonly).\n   * @return a const reference to the stored data.\n   * An exception will be thrown if the data does not exist. This function\n   * will fail if the data stored under |data_name| cannot be dynamically\n   * cast to the type specified.\n   */\n  template <typename T> const T& getDataReadOnly(absl::string_view data_name) const {\n    const T* result = dynamic_cast<const T*>(getDataReadOnlyGeneric(data_name));\n    if (!result) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"Data stored under {} cannot be coerced to specified type\", data_name));\n    }\n    return *result;\n  }\n\n  /**\n   * @param data_name the name of the data being looked up (mutable/readonly).\n   * @return a const reference to the stored data.\n   * An exception will be thrown if the data does not exist.\n   */\n  virtual const Object* getDataReadOnlyGeneric(absl::string_view data_name) const PURE;\n\n  /**\n   * @param data_name the name of the data being looked up (mutable only).\n   * @return a non-const reference to the stored data if and only if the\n   * underlying data is mutable.\n   * An exception will be thrown if the data does not exist or if it is\n   * immutable. This function will fail if the data stored under\n   * |data_name| cannot be dynamically cast to the type specified.\n   */\n  template <typename T> T& getDataMutable(absl::string_view data_name) {\n    T* result = dynamic_cast<T*>(getDataMutableGeneric(data_name));\n    if (!result) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"Data stored under {} cannot be coerced to specified type\", data_name));\n    }\n    return *result;\n  }\n\n  /**\n   * @param data_name the name of the data being probed.\n   * @return Whether data of the type and name specified exists in the\n   * data store.\n   */\n  template <typename T> bool hasData(absl::string_view data_name) const {\n    return (hasDataWithName(data_name) &&\n            (dynamic_cast<const T*>(getDataReadOnlyGeneric(data_name)) != nullptr));\n  }\n\n  /**\n   * @param data_name the name of the data being probed.\n   * @return Whether data of any type and the name specified exists in the\n   * data store.\n   */\n  virtual bool hasDataWithName(absl::string_view data_name) const PURE;\n\n  /**\n   * @param life_span the LifeSpan above which data existence is checked.\n   * @return whether data of any type exist with LifeSpan greater than life_span.\n   */\n  virtual bool hasDataAtOrAboveLifeSpan(LifeSpan life_span) const PURE;\n\n  /**\n   * @return the LifeSpan of objects stored by this instance. Objects with\n   * LifeSpan longer than this are handled recursively.\n   */\n  virtual LifeSpan lifeSpan() const PURE;\n\n  /**\n   * @return the pointer of the parent FilterState that has longer life span. nullptr means this is\n   * either the top LifeSpan or the parent is not yet created.\n   */\n  virtual FilterStateSharedPtr parent() const PURE;\n\nprotected:\n  virtual Object* getDataMutableGeneric(absl::string_view data_name) PURE;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stream_info/stream_info.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/protocol.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/ssl/connection.h\"\n#include \"envoy/stream_info/filter_state.h\"\n#include \"envoy/upstream/host_description.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\nnamespace Router {\nclass RouteEntry;\n} // namespace Router\n\nnamespace Upstream {\nclass ClusterInfo;\nusing ClusterInfoConstSharedPtr = std::shared_ptr<const ClusterInfo>;\n} // namespace Upstream\n\nnamespace StreamInfo {\n\nenum ResponseFlag {\n  // Local server healthcheck failed.\n  FailedLocalHealthCheck = 0x1,\n  // No healthy upstream.\n  NoHealthyUpstream = 0x2,\n  // Request timeout on upstream.\n  UpstreamRequestTimeout = 0x4,\n  // Local codec level reset was sent on the stream.\n  LocalReset = 0x8,\n  // Remote codec level reset was received on the stream.\n  UpstreamRemoteReset = 0x10,\n  // Local reset by a connection pool due to an initial connection failure.\n  UpstreamConnectionFailure = 0x20,\n  // If the stream was locally reset due to connection termination.\n  UpstreamConnectionTermination = 0x40,\n  // The stream was reset because of a resource overflow.\n  UpstreamOverflow = 0x80,\n  // No route found for a given request.\n  NoRouteFound = 0x100,\n  // Request was delayed before proxying.\n  DelayInjected = 0x200,\n  // Abort with error code was injected.\n  FaultInjected = 0x400,\n  // Request was ratelimited locally by rate limit filter.\n  RateLimited = 0x800,\n  // Request was unauthorized by external authorization service.\n  UnauthorizedExternalService = 0x1000,\n  // Unable to call Ratelimit service.\n  RateLimitServiceError = 0x2000,\n  // If the stream was reset due to a downstream connection termination.\n  DownstreamConnectionTermination = 0x4000,\n  // Exceeded upstream retry limit.\n  UpstreamRetryLimitExceeded = 0x8000,\n  // Request hit the stream idle timeout, triggering a 408.\n  StreamIdleTimeout = 0x10000,\n  // Request specified x-envoy-* header values that failed strict header checks.\n  InvalidEnvoyRequestHeaders = 0x20000,\n  // Downstream request had an HTTP protocol error\n  DownstreamProtocolError = 0x40000,\n  // Upstream request reached to user defined max stream duration.\n  UpstreamMaxStreamDurationReached = 0x80000,\n  // True if the response was served from an Envoy cache filter.\n  ResponseFromCacheFilter = 0x100000,\n  // Filter config was not received within the permitted warming deadline.\n  NoFilterConfigFound = 0x200000,\n  // Request or connection exceeded the downstream connection duration.\n  DurationTimeout = 0x400000,\n  // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG.\n  LastFlag = DurationTimeout\n};\n\n/**\n * Constants for the response code details field of StreamInfo for details sent\n * by core (non-extension) code.\n *\n * These provide details about the stream state such as whether the\n * response is from the upstream or from envoy (in case of a local reply).\n * Custom extensions can define additional values provided they are appropriately\n * scoped to avoid collisions.\n */\nstruct ResponseCodeDetailValues {\n  // Response code was set by the upstream.\n  const std::string ViaUpstream = \"via_upstream\";\n  // Envoy is doing non-streaming proxying, and the request payload exceeded\n  // configured limits.\n  const std::string RequestPayloadTooLarge = \"request_payload_too_large\";\n  // Envoy is doing non-streaming proxying, and the response payload exceeded\n  // configured limits.\n  const std::string ResponsePayloadTooLarge = \"response_payload_too_large\";\n  // Envoy is doing streaming proxying, but too much data arrived while waiting\n  // to attempt a retry.\n  const std::string RequestPayloadExceededRetryBufferLimit =\n      \"request_payload_exceeded_retry_buffer_limit\";\n  // Envoy is doing non-streaming proxying, and the response payload exceeded\n  // configured limits.\n  const std::string ResponsePayloadTooLArge = \"response_payload_too_large\";\n  // The per-stream keepalive timeout was exceeded.\n  const std::string StreamIdleTimeout = \"stream_idle_timeout\";\n  // The per-stream max duration timeout was exceeded.\n  const std::string MaxDurationTimeout = \"max_duration_timeout\";\n  // The per-stream total request timeout was exceeded.\n  const std::string RequestOverallTimeout = \"request_overall_timeout\";\n  // The request was rejected due to the Overload Manager reaching configured resource limits.\n  const std::string Overload = \"overload\";\n  // The HTTP/1.0 or HTTP/0.9 request was rejected due to HTTP/1.0 support not being configured.\n  const std::string LowVersion = \"low_version\";\n  // The request was rejected due to a missing Host: or :authority field.\n  const std::string MissingHost = \"missing_host_header\";\n  // The request was rejected due to x-envoy-* headers failing strict header validation.\n  const std::string InvalidEnvoyRequestHeaders = \"request_headers_failed_strict_check\";\n  // The request was rejected due to a missing Path or :path header field.\n  const std::string MissingPath = \"missing_path_rejected\";\n  // The request was rejected due to using an absolute path on a route not supporting them.\n  const std::string AbsolutePath = \"absolute_path_rejected\";\n  // The request was rejected because path normalization was configured on and failed, probably due\n  // to an invalid path.\n  const std::string PathNormalizationFailed = \"path_normalization_failed\";\n  // The request was rejected because it attempted an unsupported upgrade.\n  const std::string UpgradeFailed = \"upgrade_failed\";\n\n  // The request was rejected by the HCM because there was no route configuration found.\n  const std::string RouteConfigurationNotFound = \"route_configuration_not_found\";\n  // The request was rejected by the router filter because there was no route found.\n  const std::string RouteNotFound = \"route_not_found\";\n  // A direct response was generated by the router filter.\n  const std::string DirectResponse = \"direct_response\";\n  // The request was rejected by the router filter because there was no cluster found for the\n  // selected route.\n  const std::string ClusterNotFound = \"cluster_not_found\";\n  // The request was rejected by the router filter because the cluster was in maintenance mode.\n  const std::string MaintenanceMode = \"maintenance_mode\";\n  // The request was rejected by the router filter because there was no healthy upstream found.\n  const std::string NoHealthyUpstream = \"no_healthy_upstream\";\n  // The upstream response timed out.\n  const std::string UpstreamTimeout = \"upstream_response_timeout\";\n  // The final upstream try timed out.\n  const std::string UpstreamPerTryTimeout = \"upstream_per_try_timeout\";\n  // The request was destroyed because of user defined max stream duration.\n  const std::string UpstreamMaxStreamDurationReached = \"upstream_max_stream_duration_reached\";\n  // The upstream connection was reset before a response was started. This\n  // will generally be accompanied by details about why the reset occurred.\n  const std::string EarlyUpstreamReset = \"upstream_reset_before_response_started\";\n  // The upstream connection was reset after a response was started. This\n  // will generally be accompanied by details about why the reset occurred but\n  // indicates that original \"success\" headers may have been sent downstream\n  // despite the subsequent failure.\n  const std::string LateUpstreamReset = \"upstream_reset_after_response_started\";\n  // The request was rejected due to no matching filter chain.\n  const std::string FilterChainNotFound = \"filter_chain_not_found\";\n  // The client disconnected unexpectedly.\n  const std::string DownstreamRemoteDisconnect = \"downstream_remote_disconnect\";\n  // The client connection was locally closed for an unspecified reason.\n  const std::string DownstreamLocalDisconnect = \"downstream_local_disconnect\";\n  // The max connection duration was exceeded.\n  const std::string DurationTimeout = \"duration_timeout\";\n  // The response was generated by the admin filter.\n  const std::string AdminFilterResponse = \"admin_filter_response\";\n  // The original stream was replaced with an internal redirect.\n  const std::string InternalRedirect = \"internal_redirect\";\n  // Changes or additions to details should be reflected in\n  // docs/root/configuration/http/http_conn_man/response_code_details_details.rst\n};\n\nusing ResponseCodeDetails = ConstSingleton<ResponseCodeDetailValues>;\n\nstruct UpstreamTiming {\n  /**\n   * Sets the time when the first byte of the request was sent upstream.\n   */\n  void onFirstUpstreamTxByteSent(TimeSource& time_source) {\n    ASSERT(!first_upstream_tx_byte_sent_);\n    first_upstream_tx_byte_sent_ = time_source.monotonicTime();\n  }\n\n  /**\n   * Sets the time when the first byte of the response is received from upstream.\n   */\n  void onLastUpstreamTxByteSent(TimeSource& time_source) {\n    ASSERT(!last_upstream_tx_byte_sent_);\n    last_upstream_tx_byte_sent_ = time_source.monotonicTime();\n  }\n\n  /**\n   * Sets the time when the last byte of the response is received from upstream.\n   */\n  void onFirstUpstreamRxByteReceived(TimeSource& time_source) {\n    ASSERT(!first_upstream_rx_byte_received_);\n    first_upstream_rx_byte_received_ = time_source.monotonicTime();\n  }\n\n  /**\n   * Sets the time when the last byte of the request was sent upstream.\n   */\n  void onLastUpstreamRxByteReceived(TimeSource& time_source) {\n    ASSERT(!last_upstream_rx_byte_received_);\n    last_upstream_rx_byte_received_ = time_source.monotonicTime();\n  }\n\n  absl::optional<MonotonicTime> first_upstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> last_upstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> first_upstream_rx_byte_received_;\n  absl::optional<MonotonicTime> last_upstream_rx_byte_received_;\n};\n\n/**\n * Additional information about a completed request for logging.\n */\nclass StreamInfo {\npublic:\n  virtual ~StreamInfo() = default;\n\n  /**\n   * @param response_flag the response flag. Each filter can set independent response flags. The\n   * flags are accumulated.\n   */\n  virtual void setResponseFlag(ResponseFlag response_flag) PURE;\n\n  /**\n   * @param rc_details the response code details string to set for this request.\n   * See ResponseCodeDetailValues above for well-known constants.\n   */\n  virtual void setResponseCodeDetails(absl::string_view rc_details) PURE;\n\n  /**\n   * @param connection_termination_details the termination details string to set for this\n   * connection.\n   */\n  virtual void\n  setConnectionTerminationDetails(absl::string_view connection_termination_details) PURE;\n\n  /**\n   * @param response_flags the response_flags to intersect with.\n   * @return true if the intersection of the response_flags argument and the currently set response\n   * flags is non-empty.\n   */\n  virtual bool intersectResponseFlags(uint64_t response_flags) const PURE;\n\n  /**\n   * @param host the selected upstream host for the request.\n   */\n  virtual void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) PURE;\n\n  /**\n   * @param std::string name denotes the name of the route.\n   */\n  virtual void setRouteName(absl::string_view name) PURE;\n\n  /**\n   * @return std::string& the name of the route.\n   */\n  virtual const std::string& getRouteName() const PURE;\n  /**\n   * @param bytes_received denotes number of bytes to add to total received bytes.\n   */\n  virtual void addBytesReceived(uint64_t bytes_received) PURE;\n\n  /**\n   * @return the number of body bytes received in the request.\n   */\n  virtual uint64_t bytesReceived() const PURE;\n\n  /**\n   * @return the protocol of the request.\n   */\n  virtual absl::optional<Http::Protocol> protocol() const PURE;\n\n  /**\n   * @param protocol the request's protocol.\n   */\n  virtual void protocol(Http::Protocol protocol) PURE;\n\n  /**\n   * @return the response code.\n   */\n  virtual absl::optional<uint32_t> responseCode() const PURE;\n\n  /**\n   * @return the response code details.\n   */\n  virtual const absl::optional<std::string>& responseCodeDetails() const PURE;\n\n  /**\n   * @return the termination details of the connection.\n   */\n  virtual const absl::optional<std::string>& connectionTerminationDetails() const PURE;\n\n  /**\n   * @return the time that the first byte of the request was received.\n   */\n  virtual SystemTime startTime() const PURE;\n\n  /**\n   * @return the monotonic time that the first byte of the request was received. Duration\n   * calculations should be made relative to this value.\n   */\n  virtual MonotonicTime startTimeMonotonic() const PURE;\n\n  /**\n   * @return the duration between the last byte of the request was received and the start of the\n   * request.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> lastDownstreamRxByteReceived() const PURE;\n\n  /**\n   * Sets the time when the last byte of the request was received.\n   */\n  virtual void onLastDownstreamRxByteReceived() PURE;\n\n  /**\n   * Sets the upstream timing information for this stream. This is useful for\n   * when multiple upstream requests are issued and we want to save timing\n   * information for the one that \"wins\".\n   */\n  virtual void setUpstreamTiming(const UpstreamTiming& upstream_timing) PURE;\n\n  /**\n   * @return the duration between the first byte of the request was sent upstream and the start of\n   * the request. There may be a considerable delta between lastDownstreamByteReceived and this\n   * value due to filters.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> firstUpstreamTxByteSent() const PURE;\n\n  /**\n   * @return the duration between the last byte of the request was sent upstream and the start of\n   * the request.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> lastUpstreamTxByteSent() const PURE;\n\n  /**\n   * @return the duration between the first byte of the response is received from upstream and the\n   * start of the request.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> firstUpstreamRxByteReceived() const PURE;\n\n  /**\n   * @return the duration between the last byte of the response is received from upstream and the\n   * start of the request.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> lastUpstreamRxByteReceived() const PURE;\n  /**\n   * @return the duration between the first byte of the response is sent downstream and the start of\n   * the request. There may be a considerable delta between lastUpstreamByteReceived and this value\n   * due to filters.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> firstDownstreamTxByteSent() const PURE;\n\n  /**\n   * Sets the time when the first byte of the response is sent downstream.\n   */\n  virtual void onFirstDownstreamTxByteSent() PURE;\n\n  /**\n   * @return the duration between the last byte of the response is sent downstream and the start of\n   * the request.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> lastDownstreamTxByteSent() const PURE;\n\n  /**\n   * Sets the time when the last byte of the response is sent downstream.\n   */\n  virtual void onLastDownstreamTxByteSent() PURE;\n\n  /**\n   * @return the total duration of the request (i.e., when the request's ActiveStream is destroyed)\n   * and may be longer than lastDownstreamTxByteSent.\n   */\n  virtual absl::optional<std::chrono::nanoseconds> requestComplete() const PURE;\n\n  /**\n   * Sets the end time for the request. This method is called once the request has been fully\n   * completed (i.e., when the request's ActiveStream is destroyed).\n   */\n  virtual void onRequestComplete() PURE;\n\n  /**\n   * @param bytes_sent denotes the number of bytes to add to total sent bytes.\n   */\n  virtual void addBytesSent(uint64_t bytes_sent) PURE;\n\n  /**\n   * @return the number of body bytes sent in the response.\n   */\n  virtual uint64_t bytesSent() const PURE;\n\n  /**\n   * @return whether response flag is set or not.\n   */\n  virtual bool hasResponseFlag(ResponseFlag response_flag) const PURE;\n\n  /**\n   * @return whether any response flag is set or not.\n   */\n  virtual bool hasAnyResponseFlag() const PURE;\n\n  /**\n   * @return response flags encoded as an integer.\n   */\n  virtual uint64_t responseFlags() const PURE;\n\n  /**\n   * @return upstream host description.\n   */\n  virtual Upstream::HostDescriptionConstSharedPtr upstreamHost() const PURE;\n\n  /**\n   * @param upstream_local_address sets the local address of the upstream connection. Note that it\n   * can be different than the local address of the downstream connection.\n   */\n  virtual void setUpstreamLocalAddress(\n      const Network::Address::InstanceConstSharedPtr& upstream_local_address) PURE;\n\n  /**\n   * @return the upstream local address.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& upstreamLocalAddress() const PURE;\n\n  /**\n   * @return whether the request is a health check request or not.\n   */\n  virtual bool healthCheck() const PURE;\n\n  /**\n   * @param is_health_check whether the request is a health check request or not.\n   */\n  virtual void healthCheck(bool is_health_check) PURE;\n\n  /**\n   * @param downstream_local_address sets the local address of the downstream connection. Note that\n   * it can be different than the local address of the upstream connection.\n   */\n  virtual void setDownstreamLocalAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_local_address) PURE;\n\n  /**\n   * @return the downstream local address. Note that this will never be nullptr.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& downstreamLocalAddress() const PURE;\n\n  /**\n   * @param downstream_direct_remote_address sets the direct physical address of downstream\n   * connection.\n   */\n  virtual void setDownstreamDirectRemoteAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_direct_remote_address) PURE;\n\n  /**\n   * @return the downstream directly connected address. This will never be nullptr. This is\n   * equivalent to the address of the physical connection.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr&\n  downstreamDirectRemoteAddress() const PURE;\n\n  /**\n   * @param downstream_remote_address sets the remote address of downstream connection.\n   */\n  virtual void setDownstreamRemoteAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_remote_address) PURE;\n\n  /**\n   * @return the downstream remote address. Note that this will never be nullptr. This may be\n   * equivalent to downstreamDirectRemoteAddress, unless the remote address is inferred from a\n   * proxy proto, x-forwarded-for, etc.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& downstreamRemoteAddress() const PURE;\n\n  /**\n   * @param connection_info sets the downstream ssl connection.\n   */\n  virtual void\n  setDownstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& ssl_connection_info) PURE;\n\n  /**\n   * @return the downstream SSL connection. This will be nullptr if the downstream\n   * connection does not use SSL.\n   */\n  virtual Ssl::ConnectionInfoConstSharedPtr downstreamSslConnection() const PURE;\n\n  /**\n   * @param connection_info sets the upstream ssl connection.\n   */\n  virtual void\n  setUpstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& ssl_connection_info) PURE;\n\n  /**\n   * @return the upstream SSL connection. This will be nullptr if the upstream\n   * connection does not use SSL.\n   */\n  virtual Ssl::ConnectionInfoConstSharedPtr upstreamSslConnection() const PURE;\n\n  /**\n   * @return const Router::RouteEntry* Get the route entry selected for this request. Note: this\n   * will be nullptr if no route was selected.\n   */\n  virtual const Router::RouteEntry* routeEntry() const PURE;\n\n  /**\n   * @return const envoy::config::core::v3::Metadata& the dynamic metadata associated with this\n   * request\n   */\n  virtual envoy::config::core::v3::Metadata& dynamicMetadata() PURE;\n  virtual const envoy::config::core::v3::Metadata& dynamicMetadata() const PURE;\n\n  /**\n   * @param name the namespace used in the metadata in reverse DNS format, for example:\n   * envoy.test.my_filter.\n   * @param value the struct to set on the namespace. A merge will be performed with new values for\n   * the same key overriding existing.\n   */\n  virtual void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) PURE;\n\n  /**\n   * Object on which filters can share data on a per-request basis. For singleton data objects, only\n   * one filter can produce a named data object. List data objects can be updated by multiple\n   * filters (append only). Both object types can be consumed by multiple filters.\n   * @return the filter state associated with this request.\n   */\n  virtual const FilterStateSharedPtr& filterState() PURE;\n  virtual const FilterState& filterState() const PURE;\n\n  /**\n   * Filter State object to be shared between upstream and downstream filters.\n   * @param pointer to upstream connections filter state.\n   * @return pointer to filter state to be used by upstream connections.\n   */\n  virtual const FilterStateSharedPtr& upstreamFilterState() const PURE;\n  virtual void setUpstreamFilterState(const FilterStateSharedPtr& filter_state) PURE;\n\n  /**\n   * @param SNI value requested.\n   */\n  virtual void setRequestedServerName(const absl::string_view requested_server_name) PURE;\n\n  /**\n   * @return SNI value for downstream host.\n   */\n  virtual const std::string& requestedServerName() const PURE;\n\n  /**\n   * @param failure_reason the upstream transport failure reason.\n   */\n  virtual void setUpstreamTransportFailureReason(absl::string_view failure_reason) PURE;\n\n  /**\n   * @return const std::string& the upstream transport failure reason, e.g. certificate validation\n   *         failed.\n   */\n  virtual const std::string& upstreamTransportFailureReason() const PURE;\n\n  /**\n   * @param headers request headers.\n   */\n  virtual void setRequestHeaders(const Http::RequestHeaderMap& headers) PURE;\n\n  /**\n   * @return request headers.\n   */\n  virtual const Http::RequestHeaderMap* getRequestHeaders() const PURE;\n\n  /**\n   * @param Upstream Connection's ClusterInfo.\n   */\n  virtual void\n  setUpstreamClusterInfo(const Upstream::ClusterInfoConstSharedPtr& upstream_cluster_info) PURE;\n\n  /**\n   * @return Upstream Connection's ClusterInfo.\n   * This returns an optional to differentiate between unset(absl::nullopt),\n   * no route or cluster does not exist(nullptr), and set to a valid cluster(not nullptr).\n   */\n  virtual absl::optional<Upstream::ClusterInfoConstSharedPtr> upstreamClusterInfo() const PURE;\n\n  /**\n   * @param utils The requestID utils implementation this stream uses\n   */\n  virtual void setRequestIDExtension(Http::RequestIDExtensionSharedPtr utils) PURE;\n\n  /**\n   * @return A shared pointer to the request ID utils for this stream\n   */\n  virtual Http::RequestIDExtensionSharedPtr getRequestIDExtension() const PURE;\n\n  /**\n   * @return Connection ID of the downstream connection, or unset if not available.\n   **/\n  virtual absl::optional<uint64_t> connectionID() const PURE;\n\n  /**\n   * @param id Connection ID of the downstream connection.\n   **/\n  virtual void setConnectionID(uint64_t id) PURE;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/stream_info/uint32_accessor.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\n/**\n * A FilterState object that tracks a single uint32_t value.\n */\nclass UInt32Accessor : public FilterState::Object {\npublic:\n  /**\n   * Increments the tracked value by 1.\n   */\n  virtual void increment() PURE;\n\n  /**\n   * @return the tracked value.\n   */\n  virtual uint32_t value() const PURE;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/tcp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"conn_pool_interface\",\n    hdrs = [\"conn_pool.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/common:conn_pool_interface\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/upstream:upstream_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/tcp/conn_pool.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/conn_pool.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Tcp {\nnamespace ConnectionPool {\n\n/*\n * UpstreamCallbacks for connection pool upstream connection callbacks and data. Note that\n * onEvent(Connected) is never triggered since the event always occurs before a ConnectionPool\n * caller is assigned a connection.\n */\nclass UpstreamCallbacks : public Network::ConnectionCallbacks {\npublic:\n  ~UpstreamCallbacks() override = default;\n\n  /*\n   * Invoked when data is delivered from the upstream connection while the connection is owned by a\n   * ConnectionPool::Instance caller.\n   * @param data supplies data from the upstream\n   * @param end_stream whether the data is the last data frame\n   */\n  virtual void onUpstreamData(Buffer::Instance& data, bool end_stream) PURE;\n};\n\n/**\n * ConnectionState is a base class for connection state maintained across requests. For example, a\n * protocol may maintain a connection-specific request sequence number or negotiate options that\n * affect the behavior of requests for the duration of the connection. A ConnectionState subclass\n * is assigned to the ConnectionData to track this state when the connection is returned to the\n * pool so that the state is available when the connection is re-used for a subsequent request.\n * The ConnectionState assigned to a connection is automatically destroyed when the connection is\n * closed.\n */\nclass ConnectionState {\npublic:\n  virtual ~ConnectionState() = default;\n};\n\nusing ConnectionStatePtr = std::unique_ptr<ConnectionState>;\n\n/*\n * ConnectionData wraps a ClientConnection allocated to a caller. Open ClientConnections are\n * released back to the pool for re-use when their containing ConnectionData is destroyed.\n */\nclass ConnectionData {\npublic:\n  virtual ~ConnectionData() = default;\n\n  /**\n   * @return the ClientConnection for the connection.\n   */\n  virtual Network::ClientConnection& connection() PURE;\n\n  /**\n   * Sets the ConnectionState for this connection. Any existing ConnectionState is destroyed.\n   * @param ConnectionStatePtr&& new ConnectionState for this connection.\n   */\n  virtual void setConnectionState(ConnectionStatePtr&& state) PURE;\n\n  /**\n   * @return T* the current ConnectionState or nullptr if no state is set or if the state's type\n   *            is not T.\n   */\n  template <class T> T* connectionStateTyped() { return dynamic_cast<T*>(connectionState()); }\n\n  /**\n   * Sets the ConnectionPool::UpstreamCallbacks for the connection. If no callback is attached,\n   * data from the upstream will cause the connection to be closed. Callbacks cease when the\n   * connection is released.\n   * @param callback the UpstreamCallbacks to invoke for upstream data\n   */\n  virtual void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callback) PURE;\n\nprotected:\n  /**\n   * @return ConnectionState* pointer to the current ConnectionState or nullptr if not set\n   */\n  virtual ConnectionState* connectionState() PURE;\n};\n\nusing ConnectionDataPtr = std::unique_ptr<ConnectionData>;\nusing PoolFailureReason = ::Envoy::ConnectionPool::PoolFailureReason;\nusing Cancellable = ::Envoy::ConnectionPool::Cancellable;\nusing CancelPolicy = ::Envoy::ConnectionPool::CancelPolicy;\n\n/**\n * Pool callbacks invoked in the context of a newConnection() call, either synchronously or\n * asynchronously.\n */\nclass Callbacks {\npublic:\n  virtual ~Callbacks() = default;\n\n  /**\n   * Called when a pool error occurred and no connection could be acquired for making the request.\n   * @param reason supplies the failure reason.\n   * @param host supplies the description of the host that caused the failure. This may be nullptr\n   *             if no host was involved in the failure (for example overflow).\n   */\n  virtual void onPoolFailure(PoolFailureReason reason,\n                             Upstream::HostDescriptionConstSharedPtr host) PURE;\n\n  /**\n   * Called when a connection is available to process a request/response. Connections may be\n   * released back to the pool for re-use by resetting the ConnectionDataPtr. If the connection is\n   * no longer viable for reuse (e.g. due to some kind of protocol error), the underlying\n   * ClientConnection should be closed to prevent its reuse.\n   *\n   * @param conn supplies the connection data to use.\n   * @param host supplies the description of the host that will carry the request. For logical\n   *             connection pools the description may be different each time this is called.\n   */\n  virtual void onPoolReady(ConnectionDataPtr&& conn,\n                           Upstream::HostDescriptionConstSharedPtr host) PURE;\n};\n\n/**\n * An instance of a generic connection pool.\n */\nclass Instance : public Envoy::ConnectionPool::Instance, public Event::DeferredDeletable {\npublic:\n  /**\n   * Immediately close all existing connection pool connections. This method can be used in cases\n   * where the connection pool is not being destroyed, but the caller wishes to terminate all\n   * existing connections. For example, when a health check failure occurs.\n   */\n  virtual void closeConnections() PURE;\n\n  /**\n   * Create a new connection on the pool.\n   * @param cb supplies the callbacks to invoke when the connection is ready or has failed. The\n   *           callbacks may be invoked immediately within the context of this call if there is a\n   *           ready connection or an immediate failure. In this case, the routine returns nullptr.\n   * @return Cancellable* If no connection is ready, the callback is not invoked, and a handle\n   *                      is returned that can be used to cancel the request. Otherwise, one of the\n   *                      callbacks is called and the routine returns nullptr. NOTE: Once a callback\n   *                      is called, the handle is no longer valid and any further cancellation\n   *                      should be done by resetting the connection.\n   */\n  virtual Cancellable* newConnection(Callbacks& callbacks) PURE;\n};\n\nusing InstancePtr = std::unique_ptr<Instance>;\n\n} // namespace ConnectionPool\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/thread/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"thread_interface\",\n    hdrs = [\"thread.h\"],\n    deps = [\"//source/common/common:thread_annotations\"],\n)\n"
  },
  {
    "path": "include/envoy/thread/thread.h",
    "content": "#pragma once\n\n#include <functional>\n#include <limits>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/thread_annotations.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\n/**\n * An id for a thread.\n */\nclass ThreadId {\npublic:\n  ThreadId() : id_(std::numeric_limits<int64_t>::min()) {}\n  explicit ThreadId(int64_t id) : id_(id) {}\n\n  int64_t getId() const { return id_; }\n  std::string debugString() const { return std::to_string(id_); }\n  bool isEmpty() const { return *this == ThreadId(); }\n  friend bool operator==(ThreadId lhs, ThreadId rhs) { return lhs.id_ == rhs.id_; }\n  friend bool operator!=(ThreadId lhs, ThreadId rhs) { return lhs.id_ != rhs.id_; }\n  template <typename H> friend H AbslHashValue(H h, ThreadId id) {\n    return H::combine(std::move(h), id.id_);\n  }\n\nprivate:\n  int64_t id_;\n};\n\nclass Thread {\npublic:\n  virtual ~Thread() = default;\n\n  /**\n   * @return the name of the thread.\n   */\n  virtual std::string name() const PURE;\n\n  /**\n   * Blocks until the thread exits.\n   */\n  virtual void join() PURE;\n};\n\nusing ThreadPtr = std::unique_ptr<Thread>;\n\n// Options specified during thread creation.\nstruct Options {\n  std::string name_; // A name supplied for the thread. On Linux this is limited to 15 chars.\n};\n\nusing OptionsOptConstRef = const absl::optional<Options>&;\n\n/**\n * Interface providing a mechanism for creating threads.\n */\nclass ThreadFactory {\npublic:\n  virtual ~ThreadFactory() = default;\n\n  /**\n   * Creates a thread, immediately starting the thread_routine.\n   *\n   * @param thread_routine supplies the function to invoke in the thread.\n   * @param options supplies options specified on thread creation.\n   */\n  virtual ThreadPtr createThread(std::function<void()> thread_routine,\n                                 OptionsOptConstRef options = absl::nullopt) PURE;\n\n  /**\n   * Return the current system thread ID\n   */\n  virtual ThreadId currentThreadId() PURE;\n};\n\nusing ThreadFactoryPtr = std::unique_ptr<ThreadFactory>;\n\n/**\n * Like the C++11 \"basic lockable concept\" but a pure virtual interface vs. a template, and\n * with thread annotations.\n */\nclass ABSL_LOCKABLE BasicLockable {\npublic:\n  virtual ~BasicLockable() = default;\n\n  virtual void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() PURE;\n  virtual bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) PURE;\n  virtual void unlock() ABSL_UNLOCK_FUNCTION() PURE;\n};\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/thread_local/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"thread_local_interface\",\n    hdrs = [\"thread_local.h\"],\n    deps = [\"//include/envoy/event:dispatcher_interface\"],\n)\n"
  },
  {
    "path": "include/envoy/thread_local/thread_local.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/dispatcher.h\"\n\nnamespace Envoy {\nnamespace ThreadLocal {\n\n/**\n * All objects that are stored via the ThreadLocal interface must derive from this type.\n */\nclass ThreadLocalObject {\npublic:\n  virtual ~ThreadLocalObject() = default;\n\n  /**\n   * Return the object casted to a concrete type. See getTyped() below for comments on the casts.\n   */\n  template <class T> T& asType() {\n    ASSERT(dynamic_cast<T*>(this) != nullptr);\n    return *static_cast<T*>(this);\n  }\n};\n\nusing ThreadLocalObjectSharedPtr = std::shared_ptr<ThreadLocalObject>;\n\n/**\n * An individual allocated TLS slot. When the slot is destroyed the stored thread local will\n * be freed on each thread.\n */\nclass Slot {\npublic:\n  virtual ~Slot() = default;\n\n  /**\n   * Returns if there is thread local data for this thread.\n   *\n   * This should return true for Envoy worker threads and false for threads which do not have thread\n   * local storage allocated.\n   *\n   * @return true if registerThread has been called for this thread, false otherwise.\n   */\n  virtual bool currentThreadRegistered() PURE;\n\n  /**\n   * @return ThreadLocalObjectSharedPtr a thread local object stored in the slot.\n   */\n  virtual ThreadLocalObjectSharedPtr get() PURE;\n\n  /**\n   * This is a helper on top of get() that casts the object stored in the slot to the specified\n   * type. Since the slot only stores pointers to the base interface, the static_cast operates\n   * in production for performance, and the dynamic_cast validates correctness in tests and debug\n   * builds.\n   */\n  template <class T> T& getTyped() {\n    ASSERT(std::dynamic_pointer_cast<T>(get()) != nullptr);\n    return *static_cast<T*>(get().get());\n  }\n\n  /**\n   * Set thread local data on all threads previously registered via registerThread().\n   * @param initializeCb supplies the functor that will be called *on each thread*. The functor\n   *                     returns the thread local object which is then stored. The storage is via\n   *                     a shared_ptr. Thus, this is a flexible mechanism that can be used to share\n   *                     the same data across all threads or to share different data on each thread.\n   *\n   * NOTE: The initialize callback is not supposed to capture the Slot, or its owner. As the owner\n   * may be destructed in main thread before the update_cb gets called in a worker thread.\n   */\n  using InitializeCb = std::function<ThreadLocalObjectSharedPtr(Event::Dispatcher& dispatcher)>;\n  virtual void set(InitializeCb cb) PURE;\n\n  /**\n   * UpdateCb takes the current stored data, and returns an updated/new version data.\n   * TLS will run the callback and replace the stored data with the returned value *in each thread*.\n   *\n   * NOTE: The update callback is not supposed to capture the Slot, or its owner. As the owner may\n   * be destructed in main thread before the update_cb gets called in a worker thread.\n   **/\n  using UpdateCb = std::function<ThreadLocalObjectSharedPtr(ThreadLocalObjectSharedPtr)>;\n  virtual void runOnAllThreads(const UpdateCb& update_cb) PURE;\n  virtual void runOnAllThreads(const UpdateCb& update_cb, Event::PostCb complete_cb) PURE;\n};\n\nusing SlotPtr = std::unique_ptr<Slot>;\n\n/**\n * Interface used to allocate thread local slots.\n */\nclass SlotAllocator {\npublic:\n  virtual ~SlotAllocator() = default;\n\n  /**\n   * @return SlotPtr a dedicated slot for use in further calls to get(), set(), etc.\n   */\n  virtual SlotPtr allocateSlot() PURE;\n};\n\n/**\n * Interface for getting and setting thread local data as well as registering a thread\n */\nclass Instance : public SlotAllocator {\npublic:\n  /**\n   * A thread (via its dispatcher) must be registered before set() is called on any allocated slots\n   * to receive thread local data updates.\n   * @param dispatcher supplies the thread's dispatcher.\n   * @param main_thread supplies whether this is the main program thread or not. (The only\n   *                    difference is that callbacks fire immediately on the main thread when posted\n   *                    from the main thread).\n   */\n  virtual void registerThread(Event::Dispatcher& dispatcher, bool main_thread) PURE;\n\n  /**\n   * This should be called by the main thread before any worker threads start to exit. This will\n   * block TLS removal during slot destruction, given that worker threads are about to call\n   * shutdownThread(). This avoids having to implement de-registration of threads.\n   */\n  virtual void shutdownGlobalThreading() PURE;\n\n  /**\n   * The owning thread is about to exit. This will free all thread local variables. It must be\n   * called on the thread that is shutting down.\n   */\n  virtual void shutdownThread() PURE;\n\n  /**\n   * @return Event::Dispatcher& the thread local dispatcher.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n};\n\n} // namespace ThreadLocal\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/tracing/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"http_tracer_interface\",\n    hdrs = [\"http_tracer.h\"],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/http:header_map_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http_tracer_manager_interface\",\n    hdrs = [\"http_tracer_manager.h\"],\n    deps = [\n        \":http_tracer_interface\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/tracing/http_tracer.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\nclass Span;\nusing SpanPtr = std::unique_ptr<Span>;\n\nconstexpr uint32_t DefaultMaxPathTagLength = 256;\n\nenum class OperationName { Ingress, Egress };\n\n/**\n * The reasons why trace sampling may or may not be performed.\n */\nenum class Reason {\n  // Not sampled based on supplied request id.\n  NotTraceableRequestId,\n  // Not sampled due to being a health check.\n  HealthCheck,\n  // Sampling enabled.\n  Sampling,\n  // Sampling forced by the service.\n  ServiceForced,\n  // Sampling forced by the client.\n  ClientForced,\n};\n\n/**\n * The decision regarding whether traces should be sampled, and the reason for it.\n */\nstruct Decision {\n  Reason reason;\n  bool traced;\n};\n\n/**\n * The context for the custom tag to obtain the tag value.\n */\nstruct CustomTagContext {\n  const Http::RequestHeaderMap* request_headers;\n  const StreamInfo::StreamInfo& stream_info;\n};\n\n/**\n * Tracing custom tag, with tag name and how it would be applied to the span.\n */\nclass CustomTag {\npublic:\n  virtual ~CustomTag() = default;\n\n  /**\n   * @return the tag name view.\n   */\n  virtual absl::string_view tag() const PURE;\n\n  /**\n   * The way how to apply the custom tag to the span,\n   * generally obtain the tag value from the context and attached it to the span.\n   * @param span the active span.\n   * @param ctx the custom tag context.\n   */\n  virtual void apply(Span& span, const CustomTagContext& ctx) const PURE;\n};\n\nusing CustomTagConstSharedPtr = std::shared_ptr<const CustomTag>;\nusing CustomTagMap = absl::flat_hash_map<std::string, CustomTagConstSharedPtr>;\n\n/**\n * Tracing configuration, it carries additional data needed to populate the span.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  /**\n   * @return operation name for tracing, e.g., ingress.\n   */\n  virtual OperationName operationName() const PURE;\n\n  /**\n   * @return custom tags to be attached to the active span.\n   */\n  virtual const CustomTagMap* customTags() const PURE;\n\n  /**\n   * @return true if spans should be annotated with more detailed information.\n   */\n  virtual bool verbose() const PURE;\n\n  /**\n   * @return the maximum length allowed for paths in the extracted HttpUrl tag.\n   */\n  virtual uint32_t maxPathTagLength() const PURE;\n};\n\n/**\n * Basic abstraction for span.\n */\nclass Span {\npublic:\n  virtual ~Span() = default;\n\n  /**\n   * Set the operation name.\n   * @param operation the operation name\n   */\n  virtual void setOperation(absl::string_view operation) PURE;\n\n  /**\n   * Attach metadata to a Span, to be handled in an implementation-dependent fashion.\n   * @param name the name of the tag\n   * @param value the value to associate with the tag\n   */\n  virtual void setTag(absl::string_view name, absl::string_view value) PURE;\n\n  /**\n   * Record an event associated with a span, to be handled in an implementation-dependent fashion.\n   * @param timestamp the time of the event.\n   * @param event the name of the event.\n   */\n  virtual void log(SystemTime timestamp, const std::string& event) PURE;\n\n  /**\n   * Capture the final duration for this Span and carry out any work necessary to complete it.\n   * Once this method is called, the Span may be safely discarded.\n   */\n  virtual void finishSpan() PURE;\n\n  /**\n   * Mutate the provided headers with the context necessary to propagate this\n   * (implementation-specific) trace.\n   * @param request_headers the headers to which propagation context will be added\n   */\n  virtual void injectContext(Http::RequestHeaderMap& request_headers) PURE;\n\n  /**\n   * Create and start a child Span, with this Span as its parent in the trace.\n   * @param config the tracing configuration\n   * @param name operation name captured by the spawned child\n   * @param start_time initial start time for the operation captured by the child\n   */\n  virtual SpanPtr spawnChild(const Config& config, const std::string& name,\n                             SystemTime start_time) PURE;\n\n  /**\n   * This method overrides any previous sampling decision associated with the trace instance.\n   * If the sampled parameter is false, this span and any subsequent child spans\n   * are not reported to the tracing system.\n   * @param sampled whether the span and any subsequent child spans should be sampled\n   */\n  virtual void setSampled(bool sampled) PURE;\n\n  /**\n   * Retrieve a key's value from the span's baggage.\n   * This baggage data could've been set by this span or any parent spans.\n   * @param key baggage key\n   * @return the baggage's value for the given input key\n   */\n  virtual std::string getBaggage(absl::string_view key) PURE;\n\n  /**\n   * Set a key/value pair in the current span's baggage.\n   * All subsequent child spans will have access to this baggage.\n   * @param key baggage key\n   * @param key baggage value\n   */\n  virtual void setBaggage(absl::string_view key, absl::string_view value) PURE;\n};\n\n/**\n * Tracing driver is responsible for span creation.\n */\nclass Driver {\npublic:\n  virtual ~Driver() = default;\n\n  /**\n   * Start driver specific span.\n   */\n  virtual SpanPtr startSpan(const Config& config, Http::RequestHeaderMap& request_headers,\n                            const std::string& operation_name, SystemTime start_time,\n                            const Tracing::Decision tracing_decision) PURE;\n};\n\nusing DriverPtr = std::unique_ptr<Driver>;\n\n/**\n * HttpTracer is responsible for handling traces and delegate actions to the\n * corresponding drivers.\n */\nclass HttpTracer {\npublic:\n  virtual ~HttpTracer() = default;\n\n  virtual SpanPtr startSpan(const Config& config, Http::RequestHeaderMap& request_headers,\n                            const StreamInfo::StreamInfo& stream_info,\n                            const Tracing::Decision tracing_decision) PURE;\n};\n\nusing HttpTracerSharedPtr = std::shared_ptr<HttpTracer>;\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/tracing/http_tracer_manager.h",
    "content": "#pragma once\n\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\n/**\n * An HttpTracer manager which ensures existence of at most one\n * HttpTracer instance for a given configuration.\n */\nclass HttpTracerManager {\npublic:\n  virtual ~HttpTracerManager() = default;\n\n  /**\n   * Get an existing HttpTracer or create a new one for a given configuration.\n   * @param config supplies the configuration for the tracing provider.\n   * @return HttpTracerSharedPtr.\n   */\n  virtual HttpTracerSharedPtr\n  getOrCreateHttpTracer(const envoy::config::trace::v3::Tracing_Http* config) PURE;\n};\n\nusing HttpTracerManagerSharedPtr = std::shared_ptr<HttpTracerManager>;\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/udp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"hash_policy_interface\",\n    hdrs = [\"hash_policy.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/network:address_interface\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/udp/hash_policy.h",
    "content": "#pragma once\n\n#include \"envoy/network/address.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Udp {\n/**\n * Hash policy for UDP transport layer protocol.\n */\nclass HashPolicy {\npublic:\n  virtual ~HashPolicy() = default;\n\n  /**\n   * @param downstream_address is the address of the peer client.\n   * @return absl::optional<uint64_t> an optional hash value to route on. A hash value might not be\n   * returned if for example the downstream address has a unix domain socket type.\n   */\n  virtual absl::optional<uint64_t>\n  generateHash(const Network::Address::Instance& downstream_address) const PURE;\n};\n} // namespace Udp\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"cluster_manager_interface\",\n    hdrs = [\"cluster_manager.h\"],\n    external_deps = [\n        \"abseil_node_hash_map\",\n    ],\n    deps = [\n        \":health_checker_interface\",\n        \":load_balancer_interface\",\n        \":thread_local_cluster_interface\",\n        \":upstream_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/common:random_generator_interface\",\n        \"//include/envoy/config:grpc_mux_interface\",\n        \"//include/envoy/config:subscription_factory_interface\",\n        \"//include/envoy/grpc:async_client_manager_interface\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/secret:secret_manager_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"health_checker_interface\",\n    hdrs = [\"health_checker.h\"],\n    deps = [\n        \":upstream_interface\",\n        \"@envoy_api//envoy/data/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"health_check_host_monitor_interface\",\n    hdrs = [\"health_check_host_monitor.h\"],\n)\n\nenvoy_cc_library(\n    name = \"host_description_interface\",\n    hdrs = [\"host_description.h\"],\n    deps = [\n        \":health_check_host_monitor_interface\",\n        \":outlier_detection_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/stats:primitive_stats_macros\",\n        \"//include/envoy/stats:stats_macros\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"load_balancer_interface\",\n    hdrs = [\"load_balancer.h\"],\n    deps = [\n        \":upstream_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/upstream:types_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"load_balancer_type_interface\",\n    hdrs = [\"load_balancer_type.h\"],\n    deps = [\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"locality_lib\",\n    hdrs = [\"locality.h\"],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"outlier_detection_interface\",\n    hdrs = [\"outlier_detection.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"@envoy_api//envoy/data/cluster/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"retry_interface\",\n    hdrs = [\"retry.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/upstream:types_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resource_manager_interface\",\n    hdrs = [\"resource_manager.h\"],\n    deps = [\"//include/envoy/common:resource_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"thread_local_cluster_interface\",\n    hdrs = [\"thread_local_cluster.h\"],\n    deps = [\n        \":load_balancer_interface\",\n        \":upstream_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"types_interface\",\n    hdrs = [\"types.h\"],\n    deps = [\n        \"//source/common/common:phantom\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_interface\",\n    hdrs = [\"upstream.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":health_check_host_monitor_interface\",\n        \":load_balancer_type_interface\",\n        \":locality_lib\",\n        \":resource_manager_interface\",\n        \"//include/envoy/common:callback\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/config:typed_metadata_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/upstream:types_interface\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cluster_factory_interface\",\n    hdrs = [\"cluster_factory.h\"],\n    deps = [\n        \":cluster_manager_interface\",\n        \":health_check_host_monitor_interface\",\n        \":load_balancer_type_interface\",\n        \":locality_lib\",\n        \":resource_manager_interface\",\n        \":upstream_interface\",\n        \"//include/envoy/common:callback\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/config:typed_metadata_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "include/envoy/upstream/cluster_factory.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/ssl/context.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/outlier_detection.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Context passed to cluster factory to access envoy resources. Cluster factory should only access\n * the rest of the server through this context object.\n */\nclass ClusterFactoryContext {\npublic:\n  virtual ~ClusterFactoryContext() = default;\n\n  /**\n   * @return bool flag indicating whether the cluster is added via api.\n   */\n  virtual bool addedViaApi() PURE;\n\n  /**\n   * @return Server::Admin& the server's admin interface.\n   */\n  virtual Server::Admin& admin() PURE;\n\n  /**\n   * @return Api::Api& a reference to the api object.\n   */\n  virtual Api::Api& api() PURE;\n\n  /**\n   * @return Upstream::ClusterManager& singleton for use by the entire server.\n   */\n  virtual ClusterManager& clusterManager() PURE;\n\n  /**\n   * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used\n   *         for all singleton processing.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * @return Network::DnsResolverSharedPtr the dns resolver for the server.\n   */\n  virtual Network::DnsResolverSharedPtr dnsResolver() PURE;\n\n  /**\n   * @return information about the local environment the server is running in.\n   */\n  virtual const LocalInfo::LocalInfo& localInfo() PURE;\n\n  /**\n   * @return AccessLogManager for use by the entire server.\n   */\n  virtual AccessLog::AccessLogManager& logManager() PURE;\n\n  /**\n   * @return Runtime::Loader& the singleton runtime loader for the server.\n   */\n  virtual Runtime::Loader& runtime() PURE;\n\n  /**\n   * @return Singleton::Manager& the server-wide singleton manager.\n   */\n  virtual Singleton::Manager& singletonManager() PURE;\n\n  /**\n   * @return Ssl::ContextManager& the SSL context manager.\n   */\n  virtual Ssl::ContextManager& sslContextManager() PURE;\n\n  /**\n   * @return the server-wide stats store.\n   */\n  virtual Stats::Store& stats() PURE;\n\n  /**\n   * @return the server's TLS slot allocator.\n   */\n  virtual ThreadLocal::SlotAllocator& tls() PURE;\n\n  /**\n   * @return Outlier::EventLoggerSharedPtr sink for outlier detection event logs.\n   */\n  virtual Outlier::EventLoggerSharedPtr outlierEventLogger() PURE;\n\n  /**\n   * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration\n   *         messages.\n   */\n  virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE;\n};\n\n/**\n * Implemented by cluster and registered via Registry::registerFactory() or the convenience class\n * RegisterFactory.\n */\nclass ClusterFactory : public Config::UntypedFactory {\npublic:\n  ~ClusterFactory() override = default;\n\n  /**\n   * Create a new instance of cluster. If the implementation is unable to produce a cluster instance\n   * with the provided parameters, it should throw an EnvoyException in the case of general error.\n   * @param cluster supplies the general protobuf configuration for the cluster.\n   * @param context supplies the cluster's context.\n   * @return a pair containing the cluster instance as well as an option thread aware load\n   *         balancer if this cluster has an integrated load balancer.\n   */\n  virtual std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>\n  create(const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.clusters\"; }\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/cluster_manager.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/grpc_mux.h\"\n#include \"envoy/config/subscription_factory.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/tcp/conn_pool.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/health_checker.h\"\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/thread_local_cluster.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * ClusterUpdateCallbacks provide a way to exposes Cluster lifecycle events in the\n * ClusterManager.\n */\nclass ClusterUpdateCallbacks {\npublic:\n  virtual ~ClusterUpdateCallbacks() = default;\n\n  /**\n   * onClusterAddOrUpdate is called when a new cluster is added or an existing cluster\n   * is updated in the ClusterManager.\n   * @param cluster is the ThreadLocalCluster that represents the updated\n   * cluster.\n   */\n  virtual void onClusterAddOrUpdate(ThreadLocalCluster& cluster) PURE;\n\n  /**\n   * onClusterRemoval is called when a cluster is removed; the argument is the cluster name.\n   * @param cluster_name is the name of the removed cluster.\n   */\n  virtual void onClusterRemoval(const std::string& cluster_name) PURE;\n};\n\n/**\n * ClusterUpdateCallbacksHandle is a RAII wrapper for a ClusterUpdateCallbacks. Deleting\n * the ClusterUpdateCallbacksHandle will remove the callbacks from ClusterManager in O(1).\n */\nclass ClusterUpdateCallbacksHandle {\npublic:\n  virtual ~ClusterUpdateCallbacksHandle() = default;\n};\n\nusing ClusterUpdateCallbacksHandlePtr = std::unique_ptr<ClusterUpdateCallbacksHandle>;\n\nclass ClusterManagerFactory;\n\n/**\n * Manages connection pools and load balancing for upstream clusters. The cluster manager is\n * persistent and shared among multiple ongoing requests/connections.\n * Cluster manager is initialized in two phases. In the first phase which begins at the construction\n * all primary clusters (i.e. with endpoint assignments provisioned statically in bootstrap,\n * discovered through DNS or file based CDS) are initialized. This phase may complete synchronously\n * with cluster manager construction iff all clusters are STATIC and without health checks\n * configured. At the completion of the first phase cluster manager invokes callback set through the\n * `setPrimaryClustersInitializedCb` method.\n * After the first phase has completed the server instance initializes services (i.e. RTDS) needed\n * to successfully deploy the rest of dynamic configuration.\n * In the second phase all secondary clusters (with endpoint assignments provisioned by xDS servers)\n * are initialized and then the rest of the configuration provisioned through xDS.\n */\nclass ClusterManager {\npublic:\n  using PrimaryClustersReadyCallback = std::function<void()>;\n  using InitializationCompleteCallback = std::function<void()>;\n\n  virtual ~ClusterManager() = default;\n\n  /**\n   * Add or update a cluster via API. The semantics of this API are:\n   * 1) The hash of the config is used to determine if an already existing cluster has changed.\n   *    Nothing is done if the hash matches the previously running configuration.\n   * 2) Statically defined clusters (those present when Envoy starts) can not be updated via API.\n   *\n   * @param cluster supplies the cluster configuration.\n   * @param version_info supplies the xDS version of the cluster.\n   * @return true if the action results in an add/update of a cluster.\n   */\n  virtual bool addOrUpdateCluster(const envoy::config::cluster::v3::Cluster& cluster,\n                                  const std::string& version_info) PURE;\n\n  /**\n   * Set a callback that will be invoked when all primary clusters have been initialized.\n   */\n  virtual void setPrimaryClustersInitializedCb(PrimaryClustersReadyCallback callback) PURE;\n\n  /**\n   * Set a callback that will be invoked when all owned clusters have been initialized.\n   */\n  virtual void setInitializedCb(InitializationCompleteCallback callback) PURE;\n\n  /**\n   * Start initialization of secondary clusters and then dynamically configured clusters.\n   * The \"initialized callback\" set in the method above is invoked when secondary and\n   * dynamically provisioned clusters have finished initializing.\n   */\n  virtual void\n  initializeSecondaryClusters(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) PURE;\n\n  using ClusterInfoMap = absl::node_hash_map<std::string, std::reference_wrapper<const Cluster>>;\n\n  /**\n   * @return ClusterInfoMap all current clusters. These are the primary (not thread local)\n   * clusters which should only be used for stats/admin.\n   */\n  virtual ClusterInfoMap clusters() PURE;\n\n  using ClusterSet = absl::flat_hash_set<std::string>;\n\n  /**\n   * @return const ClusterSet& providing the cluster names that are eligible as\n   *         xDS API config sources. These must be static (i.e. in the\n   *         bootstrap) and non-EDS.\n   */\n  virtual const ClusterSet& primaryClusters() PURE;\n\n  /**\n   * @return ThreadLocalCluster* the thread local cluster with the given name or nullptr if it\n   * does not exist. This is thread safe.\n   *\n   * NOTE: The pointer returned by this function is ONLY safe to use in the context of the owning\n   * call (or if the caller knows that the cluster is fully static and will never be deleted). In\n   * the case of dynamic clusters, subsequent event loop iterations may invalidate this pointer.\n   * If information about the cluster needs to be kept, use the ThreadLocalCluster::info() method to\n   * obtain cluster information that is safe to store.\n   */\n  virtual ThreadLocalCluster* get(absl::string_view cluster) PURE;\n\n  /**\n   * Allocate a load balanced HTTP connection pool for a cluster. This is *per-thread* so that\n   * callers do not need to worry about per thread synchronization. The load balancing policy that\n   * is used is the one defined on the cluster when it was created.\n   *\n   * Can return nullptr if there is no host available in the cluster or if the cluster does not\n   * exist.\n   *\n   * To resolve the protocol to use, we provide the downstream protocol (if one exists).\n   */\n  virtual Http::ConnectionPool::Instance*\n  httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority,\n                         absl::optional<Http::Protocol> downstream_protocol,\n                         LoadBalancerContext* context) PURE;\n\n  /**\n   * Allocate a load balanced TCP connection pool for a cluster. This is *per-thread* so that\n   * callers do not need to worry about per thread synchronization. The load balancing policy that\n   * is used is the one defined on the cluster when it was created.\n   *\n   * Can return nullptr if there is no host available in the cluster or if the cluster does not\n   * exist.\n   */\n  virtual Tcp::ConnectionPool::Instance* tcpConnPoolForCluster(const std::string& cluster,\n                                                               ResourcePriority priority,\n                                                               LoadBalancerContext* context) PURE;\n\n  /**\n   * Allocate a load balanced TCP connection for a cluster. The created connection is already\n   * bound to the correct *per-thread* dispatcher, so no further synchronization is needed. The\n   * load balancing policy that is used is the one defined on the cluster when it was created.\n   *\n   * Returns both a connection and the host that backs the connection. Both can be nullptr if there\n   * is no host available in the cluster.\n   */\n  virtual Host::CreateConnectionData tcpConnForCluster(const std::string& cluster,\n                                                       LoadBalancerContext* context) PURE;\n\n  /**\n   * Returns a client that can be used to make async HTTP calls against the given cluster. The\n   * client may be backed by a connection pool or by a multiplexed connection. The cluster manager\n   * owns the client.\n   */\n  virtual Http::AsyncClient& httpAsyncClientForCluster(const std::string& cluster) PURE;\n\n  /**\n   * Remove a cluster via API. Only clusters added via addOrUpdateCluster() can\n   * be removed in this manner. Statically defined clusters present when Envoy starts cannot be\n   * removed.\n   *\n   * @return true if the action results in the removal of a cluster.\n   */\n  virtual bool removeCluster(const std::string& cluster) PURE;\n\n  /**\n   * Shutdown the cluster manager prior to destroying connection pools and other thread local data.\n   */\n  virtual void shutdown() PURE;\n\n  /**\n   * @return const envoy::config::core::v3::BindConfig& cluster manager wide bind configuration for\n   * new upstream connections.\n   */\n  virtual const envoy::config::core::v3::BindConfig& bindConfig() const PURE;\n\n  /**\n   * Returns a shared_ptr to the singleton xDS-over-gRPC provider for upstream control plane muxing\n   * of xDS. This is treated somewhat as a special case in ClusterManager, since it does not relate\n   * logically to the management of clusters but instead is required early in ClusterManager/server\n   * initialization and in various sites that need ClusterManager for xDS API interfacing.\n   *\n   * @return GrpcMux& ADS API provider referencee.\n   */\n  virtual Config::GrpcMuxSharedPtr adsMux() PURE;\n\n  /**\n   * @return Grpc::AsyncClientManager& the cluster manager's gRPC client manager.\n   */\n  virtual Grpc::AsyncClientManager& grpcAsyncClientManager() PURE;\n\n  /**\n   * Return the local cluster name, if it was configured.\n   *\n   * @return absl::optional<std::string> the local cluster name, or empty if no local cluster was\n   * configured.\n   */\n  virtual const absl::optional<std::string>& localClusterName() const PURE;\n\n  /**\n   * This method allows to register callbacks for cluster lifecycle events in the ClusterManager.\n   * The callbacks will be registered in a thread local slot and the callbacks will be executed\n   * on the thread that registered them.\n   * To be executed on all threads, Callbacks need to be registered on all threads.\n   *\n   * @param callbacks are the ClusterUpdateCallbacks to add or remove to the cluster manager.\n   * @return ClusterUpdateCallbacksHandlePtr a RAII that needs to be deleted to\n   * unregister the callback.\n   */\n  virtual ClusterUpdateCallbacksHandlePtr\n  addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& callbacks) PURE;\n\n  /**\n   * Return the factory to use for creating cluster manager related objects.\n   */\n  virtual ClusterManagerFactory& clusterManagerFactory() PURE;\n\n  /**\n   * Obtain the subscription factory for the cluster manager. Since subscriptions may have an\n   * upstream component, the factory is a facet of the cluster manager.\n   *\n   * @return Config::SubscriptionFactory& the subscription factory.\n   */\n  virtual Config::SubscriptionFactory& subscriptionFactory() PURE;\n};\n\nusing ClusterManagerPtr = std::unique_ptr<ClusterManager>;\n\n/**\n * Abstract interface for a CDS API provider.\n */\nclass CdsApi {\npublic:\n  virtual ~CdsApi() = default;\n\n  /**\n   * Start the first fetch of CDS data.\n   */\n  virtual void initialize() PURE;\n\n  /**\n   * Set a callback that will be called when the CDS API has done an initial load from the remote\n   * server. If the initial load fails, the callback will also be called.\n   */\n  virtual void setInitializedCb(std::function<void()> callback) PURE;\n\n  /**\n   * @return std::string last accepted version from fetch.\n   */\n  virtual const std::string versionInfo() const PURE;\n};\n\nusing CdsApiPtr = std::unique_ptr<CdsApi>;\n\n/**\n * Factory for objects needed during cluster manager operation.\n */\nclass ClusterManagerFactory {\npublic:\n  virtual ~ClusterManagerFactory() = default;\n\n  /**\n   * Allocate a cluster manager from configuration proto.\n   */\n  virtual ClusterManagerPtr\n  clusterManagerFromProto(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) PURE;\n\n  /**\n   * Allocate an HTTP connection pool for the host. Pools are separated by 'priority',\n   * 'protocol', and 'options->hashKey()', if any.\n   */\n  virtual Http::ConnectionPool::InstancePtr\n  allocateConnPool(Event::Dispatcher& dispatcher, HostConstSharedPtr host,\n                   ResourcePriority priority, Http::Protocol protocol,\n                   const Network::ConnectionSocket::OptionsSharedPtr& options,\n                   const Network::TransportSocketOptionsSharedPtr& transport_socket_options) PURE;\n\n  /**\n   * Allocate a TCP connection pool for the host. Pools are separated by 'priority' and\n   * 'options->hashKey()', if any.\n   */\n  virtual Tcp::ConnectionPool::InstancePtr\n  allocateTcpConnPool(Event::Dispatcher& dispatcher, HostConstSharedPtr host,\n                      ResourcePriority priority,\n                      const Network::ConnectionSocket::OptionsSharedPtr& options,\n                      Network::TransportSocketOptionsSharedPtr transport_socket_options) PURE;\n\n  /**\n   * Allocate a cluster from configuration proto.\n   */\n  virtual std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>\n  clusterFromProto(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm,\n                   Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) PURE;\n\n  /**\n   * Create a CDS API provider from configuration proto.\n   */\n  virtual CdsApiPtr createCds(const envoy::config::core::v3::ConfigSource& cds_config,\n                              ClusterManager& cm) PURE;\n\n  /**\n   * Returns the secret manager.\n   */\n  virtual Secret::SecretManager& secretManager() PURE;\n};\n\n/**\n * Factory for creating ClusterInfo\n */\nclass ClusterInfoFactory {\npublic:\n  virtual ~ClusterInfoFactory() = default;\n\n  /**\n   * Parameters for createClusterInfo().\n   */\n  struct CreateClusterInfoParams {\n    Server::Admin& admin_;\n    Runtime::Loader& runtime_;\n    const envoy::config::cluster::v3::Cluster& cluster_;\n    const envoy::config::core::v3::BindConfig& bind_config_;\n    Stats::Store& stats_;\n    Ssl::ContextManager& ssl_context_manager_;\n    const bool added_via_api_;\n    ClusterManager& cm_;\n    const LocalInfo::LocalInfo& local_info_;\n    Event::Dispatcher& dispatcher_;\n    Singleton::Manager& singleton_manager_;\n    ThreadLocal::SlotAllocator& tls_;\n    ProtobufMessage::ValidationVisitor& validation_visitor_;\n    Api::Api& api_;\n  };\n\n  /**\n   * This method returns a Upstream::ClusterInfoConstSharedPtr given construction parameters.\n   */\n  virtual Upstream::ClusterInfoConstSharedPtr\n  createClusterInfo(const CreateClusterInfoParams& params) PURE;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/health_check_host_monitor.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * A monitor for \"passive\" health check events that might happen on every thread. For example, if a\n * special HTTP header is received, the data plane may decide to fast fail a host to avoid waiting\n * for the full Health Check interval to elapse before determining the host is active health check\n * failed.\n */\nclass HealthCheckHostMonitor {\npublic:\n  virtual ~HealthCheckHostMonitor() = default;\n\n  /**\n   * Mark the host as unhealthy. Note that this may not be immediate as events may need to be\n   * propagated between multiple threads.\n   */\n  virtual void setUnhealthy() PURE;\n};\n\nusing HealthCheckHostMonitorPtr = std::unique_ptr<HealthCheckHostMonitor>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/health_checker.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nenum class HealthState { Unhealthy, Healthy };\n\nenum class HealthTransition {\n  /**\n   * Used when the health state of a host hasn't changed.\n   */\n  Unchanged,\n  /**\n   * Used when the health state of a host has changed.\n   */\n  Changed,\n  /**\n   * Used when the health check result differs from the health state of a host, but a change to the\n   * latter is delayed due to healthy/unhealthy threshold settings.\n   */\n  ChangePending\n};\n\n/**\n * Wraps active health checking of an upstream cluster.\n */\nclass HealthChecker {\npublic:\n  virtual ~HealthChecker() = default;\n\n  /**\n   * Called when a host has been health checked.\n   * @param host supplies the host that was just health checked.\n   * @param changed_state supplies whether the health check resulted in a host moving from healthy\n   *                       to not healthy or vice versa.\n   */\n  using HostStatusCb =\n      std::function<void(const HostSharedPtr& host, HealthTransition changed_state)>;\n\n  /**\n   * Install a callback that will be invoked every time a health check round is completed for\n   * a host. The host's health check state may not have changed.\n   * @param callback supplies the callback to invoke.\n   */\n  virtual void addHostCheckCompleteCb(HostStatusCb callback) PURE;\n\n  /**\n   * Start cyclic health checking based on the provided settings and the type of health checker.\n   */\n  virtual void start() PURE;\n};\n\nusing HealthCheckerSharedPtr = std::shared_ptr<HealthChecker>;\n\nstd::ostream& operator<<(std::ostream& out, HealthState state);\nstd::ostream& operator<<(std::ostream& out, HealthTransition changed_state);\n\n/**\n * Sink for health check event logs.\n */\nclass HealthCheckEventLogger {\npublic:\n  virtual ~HealthCheckEventLogger() = default;\n\n  /**\n   * Log an unhealthy host ejection event.\n   * @param health_checker_type supplies the type of health checker that generated the event.\n   * @param host supplies the host that generated the event.\n   * @param failure_type supplies the type of health check failure.\n   */\n  virtual void logEjectUnhealthy(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                                 const HostDescriptionConstSharedPtr& host,\n                                 envoy::data::core::v3::HealthCheckFailureType failure_type) PURE;\n\n  /**\n   * Log an unhealthy host event.\n   * @param health_checker_type supplies the type of health checker that generated the event.\n   * @param host supplies the host that generated the event.\n   * @param failure_type supplies the type of health check failure.\n   * @param first_check whether this is a failure on the first health check for this host.\n   */\n  virtual void logUnhealthy(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                            const HostDescriptionConstSharedPtr& host,\n                            envoy::data::core::v3::HealthCheckFailureType failure_type,\n                            bool first_check) PURE;\n\n  /**\n   * Log a healthy host addition event.\n   * @param health_checker_type supplies the type of health checker that generated the event.\n   * @param host supplies the host that generated the event.\n   * @param healthy_threshold supplied the configured healthy threshold for this health check.\n   * @param first_check whether this is a fast path success on the first health check for this host.\n   */\n  virtual void logAddHealthy(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                             const HostDescriptionConstSharedPtr& host, bool first_check) PURE;\n\n  /**\n   * Log a degraded healthy host event.\n   * @param health_checker_type supplies the type of health checker that generated the event.\n   * @param host supplies the host that generated the event.\n   */\n  virtual void logDegraded(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                           const HostDescriptionConstSharedPtr& host) PURE;\n  /**\n   * Log a no degraded healthy host event.\n   * @param health_checker_type supplies the type of health checker that generated the event.\n   * @param host supplies the host that generated the event.\n   */\n  virtual void logNoLongerDegraded(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                                   const HostDescriptionConstSharedPtr& host) PURE;\n};\n\nusing HealthCheckEventLoggerPtr = std::unique_ptr<HealthCheckEventLogger>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/host_description.h",
    "content": "#pragma once\n\n#include <map>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/stats/primitive_stats_macros.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/upstream/health_check_host_monitor.h\"\n#include \"envoy/upstream/outlier_detection.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nusing MetadataConstSharedPtr = std::shared_ptr<const envoy::config::core::v3::Metadata>;\n\n/**\n * All per host stats. @see stats_macros.h\n *\n * {rq_success, rq_error} have specific semantics driven by the needs of EDS load reporting. See\n * envoy.api.v2.endpoint.UpstreamLocalityStats for the definitions of success/error. These are\n * latched by LoadStatsReporter, independent of the normal stats sink flushing.\n */\n#define ALL_HOST_STATS(COUNTER, GAUGE)                                                             \\\n  COUNTER(cx_connect_fail)                                                                         \\\n  COUNTER(cx_total)                                                                                \\\n  COUNTER(rq_error)                                                                                \\\n  COUNTER(rq_success)                                                                              \\\n  COUNTER(rq_timeout)                                                                              \\\n  COUNTER(rq_total)                                                                                \\\n  GAUGE(cx_active)                                                                                 \\\n  GAUGE(rq_active)\n\n/**\n * All per host stats defined. @see stats_macros.h\n */\nstruct HostStats {\n  ALL_HOST_STATS(GENERATE_PRIMITIVE_COUNTER_STRUCT, GENERATE_PRIMITIVE_GAUGE_STRUCT);\n\n  // Provide access to name,counter pairs.\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveCounterReference>> counters() const {\n    return {ALL_HOST_STATS(PRIMITIVE_COUNTER_NAME_AND_REFERENCE, IGNORE_PRIMITIVE_GAUGE)};\n  }\n\n  // Provide access to name,gauge pairs.\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>> gauges() const {\n    return {ALL_HOST_STATS(IGNORE_PRIMITIVE_COUNTER, PRIMITIVE_GAUGE_NAME_AND_REFERENCE)};\n  }\n};\n\nclass ClusterInfo;\n\n/**\n * A description of an upstream host.\n */\nclass HostDescription {\npublic:\n  virtual ~HostDescription() = default;\n\n  /**\n   * @return whether the host is a canary.\n   */\n  virtual bool canary() const PURE;\n\n  /**\n   * Update the canary status of the host.\n   */\n  virtual void canary(bool is_canary) PURE;\n\n  /**\n   * @return the metadata associated with this host\n   */\n  virtual MetadataConstSharedPtr metadata() const PURE;\n\n  /**\n   * Set the current metadata.\n   */\n  virtual void metadata(MetadataConstSharedPtr new_metadata) PURE;\n\n  /**\n   * @return the cluster the host is a member of.\n   */\n  virtual const ClusterInfo& cluster() const PURE;\n\n  /**\n   * @return the host's outlier detection monitor.\n   */\n  virtual Outlier::DetectorHostMonitor& outlierDetector() const PURE;\n\n  /**\n   * @return the host's health checker monitor.\n   */\n  virtual HealthCheckHostMonitor& healthChecker() const PURE;\n\n  /**\n   * @return The hostname used as the host header for health checking.\n   */\n  virtual const std::string& hostnameForHealthChecks() const PURE;\n\n  /**\n   * @return the hostname associated with the host if any.\n   * Empty string \"\" indicates that hostname is not a DNS name.\n   */\n  virtual const std::string& hostname() const PURE;\n\n  /**\n   * @return the transport socket factory responsible for this host.\n   */\n  virtual Network::TransportSocketFactory& transportSocketFactory() const PURE;\n\n  /**\n   * @return the address used to connect to the host.\n   */\n  virtual Network::Address::InstanceConstSharedPtr address() const PURE;\n\n  /**\n   * @return host specific stats.\n   */\n  virtual HostStats& stats() const PURE;\n\n  /**\n   * @return the locality of the host (deployment specific). This will be the default instance if\n   *         unknown.\n   */\n  virtual const envoy::config::core::v3::Locality& locality() const PURE;\n\n  /**\n   * @return the human readable name of the host's locality zone as a StatName.\n   */\n  virtual Stats::StatName localityZoneStatName() const PURE;\n\n  /**\n   * @return the address used to health check the host.\n   */\n  virtual Network::Address::InstanceConstSharedPtr healthCheckAddress() const PURE;\n\n  /**\n   * @return the priority of the host.\n   */\n  virtual uint32_t priority() const PURE;\n\n  /**\n   * Set the current priority.\n   */\n  virtual void priority(uint32_t) PURE;\n};\n\nusing HostDescriptionConstSharedPtr = std::shared_ptr<const HostDescription>;\n\n#define ALL_TRANSPORT_SOCKET_MATCH_STATS(COUNTER) COUNTER(total_match_count)\n\n/**\n * The stats for transport socket match.\n */\nstruct TransportSocketMatchStats {\n  ALL_TRANSPORT_SOCKET_MATCH_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Library to determine what transport socket configuration to use for a given host.\n */\nclass TransportSocketMatcher {\npublic:\n  struct MatchData {\n    MatchData(Network::TransportSocketFactory& factory, TransportSocketMatchStats& stats,\n              std::string name)\n        : factory_(factory), stats_(stats), name_(std::move(name)) {}\n    Network::TransportSocketFactory& factory_;\n    TransportSocketMatchStats& stats_;\n    std::string name_;\n  };\n  virtual ~TransportSocketMatcher() = default;\n\n  /**\n   * Resolve the transport socket configuration for a particular host.\n   * @param metadata the metadata of the given host.\n   * @return the match information of the transport socket selected.\n   */\n  virtual MatchData resolve(const envoy::config::core::v3::Metadata* metadata) const PURE;\n};\n\nusing TransportSocketMatcherPtr = std::unique_ptr<TransportSocketMatcher>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/load_balancer.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/upstream/types.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Context information passed to a load balancer to use when choosing a host. Not all load\n * balancers make use of all context information.\n */\nclass LoadBalancerContext {\npublic:\n  virtual ~LoadBalancerContext() = default;\n\n  /**\n   * Compute and return an optional hash key to use during load balancing. This\n   * method may modify internal state so it should only be called once per\n   * routing attempt.\n   * @return absl::optional<uint64_t> the optional hash key to use.\n   */\n  virtual absl::optional<uint64_t> computeHashKey() PURE;\n\n  /**\n   * @return Router::MetadataMatchCriteria* metadata for use in selecting a subset of hosts\n   *         during load balancing.\n   */\n  virtual const Router::MetadataMatchCriteria* metadataMatchCriteria() PURE;\n\n  /**\n   * @return const Network::Connection* the incoming connection or nullptr to use during load\n   * balancing.\n   */\n  virtual const Network::Connection* downstreamConnection() const PURE;\n\n  /**\n   * @return const Http::HeaderMap* the incoming headers or nullptr to use during load\n   * balancing.\n   */\n  virtual const Http::RequestHeaderMap* downstreamHeaders() const PURE;\n\n  /**\n   * Called to retrieve a reference to the priority load data that should be used when selecting a\n   * priority. Implementations may return the provided original reference to make no changes, or\n   * return a reference to alternative PriorityLoad held internally.\n   *\n   * @param priority_state current priority state of the cluster being being load balanced.\n   * @param original_priority_load the cached priority load for the cluster being load balanced.\n   * @param priority_mapping_func see @Upstream::RetryPriority::PriorityMappingFunc.\n   * @return a reference to the priority load data that should be used to select a priority.\n   *\n   */\n  virtual const HealthyAndDegradedLoad& determinePriorityLoad(\n      const PrioritySet& priority_set, const HealthyAndDegradedLoad& original_priority_load,\n      const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) PURE;\n\n  /**\n   * Called to determine whether we should reperform host selection. The load balancer\n   * will retry host selection until either this function returns true or hostSelectionRetryCount is\n   * reached.\n   */\n  virtual bool shouldSelectAnotherHost(const Host& host) PURE;\n\n  /**\n   * Called to determine how many times host selection should be retried until the filter is\n   * ignored.\n   */\n  virtual uint32_t hostSelectionRetryCount() const PURE;\n\n  /**\n   * Returns the set of socket options which should be applied on upstream connections\n   */\n  virtual Network::Socket::OptionsSharedPtr upstreamSocketOptions() const PURE;\n\n  /**\n   * Returns the transport socket options which should be applied on upstream connections\n   */\n  virtual Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const PURE;\n};\n\n/**\n * Abstract load balancing interface.\n */\nclass LoadBalancer {\npublic:\n  virtual ~LoadBalancer() = default;\n\n  /**\n   * Ask the load balancer for the next host to use depending on the underlying LB algorithm.\n   * @param context supplies the load balancer context. Not all load balancers make use of all\n   *        context information. Load balancers should be written to assume that context information\n   *        is missing and use sensible defaults.\n   */\n  virtual HostConstSharedPtr chooseHost(LoadBalancerContext* context) PURE;\n\n  /**\n   * Returns a best effort prediction of the next host to be picked, or nullptr if not predictable.\n   * Advances with subsequent calls, so while the first call will return the next host to be picked,\n   * a subsequent call will return the second host to be picked.\n   * @param context supplies the context which is used in host selection.\n   */\n  virtual HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) PURE;\n};\n\nusing LoadBalancerPtr = std::unique_ptr<LoadBalancer>;\n\n/**\n * Factory for load balancers.\n */\nclass LoadBalancerFactory {\npublic:\n  virtual ~LoadBalancerFactory() = default;\n\n  /**\n   * @return LoadBalancerPtr a new load balancer.\n   */\n  virtual LoadBalancerPtr create() PURE;\n};\n\nusing LoadBalancerFactorySharedPtr = std::shared_ptr<LoadBalancerFactory>;\n\n/**\n * A thread aware load balancer is a load balancer that is global to all workers on behalf of a\n * cluster. These load balancers are harder to write so not every load balancer has to be one.\n * If a load balancer is a thread aware load balancer, the following semantics are used:\n * 1) A single instance is created on the main thread.\n * 2) The shared factory is passed to all workers.\n * 3) Every time there is a host set update on the main thread, all workers will create a new\n *    worker local load balancer via the factory.\n *\n * The above semantics mean that any global state in the factory must be protected by appropriate\n * locks. Additionally, the factory *must not* refer back to the owning thread aware load\n * balancer. If a cluster is removed via CDS, the thread aware load balancer can be destroyed\n * before cluster destruction reaches each worker. See the ring hash load balancer for one\n * example of how this pattern is used in practice. The common expected pattern is that the\n * factory will be consuming shared immutable state from the main thread\n *\n * TODO(mattklein123): The reason that locking is used in the above threading model vs. pure TLS\n * has to do with the lack of a TLS function that does the following:\n * 1) Create a per-worker data structure on the main thread. E.g., allocate 4 objects for 4\n *    workers.\n * 2) Then fan those objects out to each worker.\n * With the existence of a function like that, the callback locking from the worker to the main\n * thread could be removed. We can look at this in a follow up. The reality though is that the\n * locking is currently only used to protect some small bits of data on host set update and will\n * never be contended.\n */\nclass ThreadAwareLoadBalancer {\npublic:\n  virtual ~ThreadAwareLoadBalancer() = default;\n\n  /**\n   * @return LoadBalancerFactorySharedPtr the shared factory to use for creating new worker local\n   * load balancers.\n   */\n  virtual LoadBalancerFactorySharedPtr factory() PURE;\n\n  /**\n   * When a thread aware load balancer is constructed, it should return nullptr for any created\n   * load balancer chooseHost() calls. Once initialize is called, the load balancer should\n   * instantiate any needed structured and prepare for further updates. The cluster manager\n   * will do this at the appropriate time.\n   */\n  virtual void initialize() PURE;\n};\n\nusing ThreadAwareLoadBalancerPtr = std::unique_ptr<ThreadAwareLoadBalancer>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/load_balancer_type.h",
    "content": "#pragma once\n\n#include <set>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Type of load balancing to perform.\n */\nenum class LoadBalancerType {\n  RoundRobin,\n  LeastRequest,\n  Random,\n  RingHash,\n  OriginalDst,\n  Maglev,\n  ClusterProvided\n};\n\n/**\n * Subset selector configuration\n */\nclass SubsetSelector {\npublic:\n  virtual ~SubsetSelector() = default;\n\n  /**\n   * @return keys defined for this selector\n   */\n  virtual const std::set<std::string>& selectorKeys() const PURE;\n\n  /**\n   * @return fallback policy defined for this selector, or NOT_DEFINED\n   */\n  virtual envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n      LbSubsetSelectorFallbackPolicy\n      fallbackPolicy() const PURE;\n\n  /**\n   * @return fallback keys subset defined for this selector, or empty set\n   */\n  virtual const std::set<std::string>& fallbackKeysSubset() const PURE;\n\n  virtual bool singleHostPerSubset() const PURE;\n};\n\nusing SubsetSelectorPtr = std::shared_ptr<SubsetSelector>;\n\n/**\n * Load Balancer subset configuration.\n */\nclass LoadBalancerSubsetInfo {\npublic:\n  virtual ~LoadBalancerSubsetInfo() = default;\n\n  /**\n   * @return bool true if load balancer subsets are configured.\n   */\n  virtual bool isEnabled() const PURE;\n\n  /**\n   * @return LbSubsetFallbackPolicy the fallback policy used when\n   * route metadata does not match any subset.\n   */\n  virtual envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetFallbackPolicy\n  fallbackPolicy() const PURE;\n\n  /**\n   * @return ProtobufWkt::Struct the struct describing the metadata for a\n   *         host to be included in the default subset.\n   */\n  virtual const ProtobufWkt::Struct& defaultSubset() const PURE;\n\n  /*\n   * @return const std:vector<std:set<std::string>>& a vector of\n   * sorted keys used to define load balancer subsets.\n   */\n  virtual const std::vector<SubsetSelectorPtr>& subsetSelectors() const PURE;\n\n  /*\n   * @return bool whether routing to subsets should take locality weights into account.\n   */\n  virtual bool localityWeightAware() const PURE;\n\n  /*\n   * @return bool whether the locality weights should be scaled to compensate for the\n   * fraction of hosts removed from the original host set.\n   */\n  virtual bool scaleLocalityWeight() const PURE;\n\n  /*\n   * @return bool whether to attempt to select a host from the entire cluster if host\n   * selection from the fallback subset fails.\n   */\n  virtual bool panicModeAny() const PURE;\n\n  /*\n   * @return bool whether matching metadata should attempt to match against any of the\n   * elements in a list value defined in endpoint metadata.\n   */\n  virtual bool listAsAny() const PURE;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/locality.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n// We use a tuple representation for hashing/equality/comparison, since this\n// ensures we are not subject to proto nuances like unknown fields (e.g. from\n// original type information annotations).\nusing LocalityTuple = std::tuple<const std::string&, const std::string&, const std::string&>;\n\nstruct LocalityHash {\n  size_t operator()(const envoy::config::core::v3::Locality& locality) const {\n    return absl::Hash<LocalityTuple>()({locality.region(), locality.zone(), locality.sub_zone()});\n  }\n};\n\nstruct LocalityEqualTo {\n  bool operator()(const envoy::config::core::v3::Locality& lhs,\n                  const envoy::config::core::v3::Locality& rhs) const {\n    const LocalityTuple lhs_tuple = LocalityTuple(lhs.region(), lhs.zone(), lhs.sub_zone());\n    const LocalityTuple rhs_tuple = LocalityTuple(rhs.region(), rhs.zone(), rhs.sub_zone());\n    return lhs_tuple == rhs_tuple;\n  }\n};\n\nstruct LocalityLess {\n  bool operator()(const envoy::config::core::v3::Locality& lhs,\n                  const envoy::config::core::v3::Locality& rhs) const {\n    const LocalityTuple lhs_tuple = LocalityTuple(lhs.region(), lhs.zone(), lhs.sub_zone());\n    const LocalityTuple rhs_tuple = LocalityTuple(rhs.region(), rhs.zone(), rhs.sub_zone());\n    return lhs_tuple < rhs_tuple;\n  }\n};\n\n// For tests etc. where this is convenient.\nstatic inline envoy::config::core::v3::Locality\nLocality(const std::string& region, const std::string& zone, const std::string sub_zone) {\n  envoy::config::core::v3::Locality locality;\n  locality.set_region(region);\n  locality.set_zone(zone);\n  locality.set_sub_zone(sub_zone);\n  return locality;\n}\n\n} // namespace Upstream\n} // namespace Envoy\n\n// Something heinous this way comes. Required to allow == for LocalityWeightsMap.h in eds.h.\nnamespace envoy {\nnamespace config {\nnamespace core {\nnamespace v3 {\n\ninline bool operator==(const envoy::config::core::v3::Locality& x,\n                       const envoy::config::core::v3::Locality& y) {\n  return Envoy::Upstream::LocalityEqualTo()(x, y);\n}\n\n} // namespace v3\n} // namespace core\n} // namespace config\n} // namespace envoy\n"
  },
  {
    "path": "include/envoy/upstream/outlier_detection.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/data/cluster/v2alpha/outlier_detection_event.pb.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass Host;\nusing HostSharedPtr = std::shared_ptr<Host>;\n\nclass HostDescription;\nusing HostDescriptionConstSharedPtr = std::shared_ptr<const HostDescription>;\n\nnamespace Outlier {\n\n/**\n * Non-HTTP result of requests/operations.\n */\nenum class Result {\n  // Local origin errors detected by Envoy.\n  LocalOriginTimeout,             // Timed out while connecting or executing a request.\n  LocalOriginConnectFailed,       // Remote host rejected the connection.\n  LocalOriginConnectSuccess,      // Successfully established a connection to upstream host.\n                                  // Use this code when there is another protocol on top of\n                                  // transport protocol. For example HTTP runs on top of tcp.\n                                  // The same for redis. It first establishes TCP and then runs\n                                  // a transaction.\n  LocalOriginConnectSuccessFinal, // Successfully established a connection to upstream host\n                                  // Use this code when there is no other protocol on top of the\n                                  // protocol used by a filter. For example tcp_proxy filter\n                                  // serves only tcp level. There is no other protocol on top of\n                                  // tcp which the tcp_proxy filter is aware of.\n\n  // The entries below only make sense when Envoy understands requests/responses for the\n  // protocol being proxied. They do not make sense for TcpProxy, for example.\n  // External origin errors.\n  ExtOriginRequestFailed, // The server indicated it cannot process a request\n  ExtOriginRequestSuccess // Request was completed successfully.\n};\n\n/**\n * Monitor for per host data. Proxy filters should send pertinent data when available.\n */\nclass DetectorHostMonitor {\npublic:\n  // Types of Success Rate monitors.\n  enum class SuccessRateMonitorType { ExternalOrigin, LocalOrigin };\n\n  virtual ~DetectorHostMonitor() = default;\n\n  /**\n   * @return the number of times this host has been ejected.\n   */\n  virtual uint32_t numEjections() PURE;\n\n  /**\n   * Add an HTTP response code for a host.\n   */\n  virtual void putHttpResponseCode(uint64_t code) PURE;\n\n  /**\n   * Add a non-HTTP result for a host.\n   * Some non-HTTP codes like TIMEOUT may require special mapping to HTTP code\n   * and such code may be passed as optional parameter.\n   */\n  virtual void putResult(Result result, absl::optional<uint64_t> code) PURE;\n\n  /**\n   * Wrapper around putResult with 2 params when mapping to HTTP code is not\n   * required.\n   */\n  void putResult(Result result) { putResult(result, absl::nullopt); }\n\n  /**\n   * Add a response time for a host (in this case response time is generic and might be used for\n   * different operations including HTTP, Mongo, Redis, etc.).\n   */\n  virtual void putResponseTime(std::chrono::milliseconds time) PURE;\n\n  /**\n   * Get the time of last ejection.\n   * @return the last time this host was ejected, if the host has been ejected previously.\n   */\n  virtual const absl::optional<MonotonicTime>& lastEjectionTime() PURE;\n\n  /**\n   * Get the time of last unejection.\n   * @return the last time this host was unejected, if the host has been unejected previously.\n   */\n  virtual const absl::optional<MonotonicTime>& lastUnejectionTime() PURE;\n\n  /**\n   * @return the success rate of the host in the last calculated interval, in the range 0-100.\n   *         -1 means that the host did not have enough request volume to calculate success rate\n   *         or the cluster did not have enough hosts to run through success rate outlier ejection.\n   * @param type specifies for which Success Rate Monitor the success rate value should be returned.\n   *         If the outlier detector is configured not to split external and local origin errors,\n   *         ExternalOrigin type returns success rate for all types of errors: external and local\n   * origin and LocalOrigin type returns -1. If the outlier detector is configured to split external\n   * and local origin errors, ExternalOrigin type returns success rate for external origin errors\n   * and LocalOrigin type returns success rate for local origin errors.\n   */\n  virtual double successRate(SuccessRateMonitorType type) const PURE;\n};\n\nusing DetectorHostMonitorPtr = std::unique_ptr<DetectorHostMonitor>;\n\n/**\n * Interface for an outlier detection engine. Uses per host data to determine which hosts in a\n * cluster are outliers and should be ejected.\n */\nclass Detector {\npublic:\n  virtual ~Detector() = default;\n\n  /**\n   * Outlier detection change state callback.\n   */\n  using ChangeStateCb = std::function<void(const HostSharedPtr& host)>;\n\n  /**\n   * Add a changed state callback to the detector. The callback will be called whenever any host\n   * changes state (either ejected or brought back in) due to outlier status.\n   */\n  virtual void addChangedStateCb(ChangeStateCb cb) PURE;\n\n  /**\n   * Returns the average success rate of the hosts in the Detector for the last aggregation\n   * interval.\n   * @return the average success rate, or -1 if there were not enough hosts with enough request\n   *         volume to proceed with success rate based outlier ejection.\n   * @param type - see DetectorHostMonitor::successRate.\n   */\n  virtual double successRateAverage(DetectorHostMonitor::SuccessRateMonitorType) const PURE;\n\n  /**\n   * Returns the success rate threshold used in the last interval. The threshold is used to eject\n   * hosts based on their success rate.\n   * @return the threshold, or -1 if there were not enough hosts with enough request volume to\n   *         proceed with success rate based outlier ejection.\n   */\n  virtual double\n      successRateEjectionThreshold(DetectorHostMonitor::SuccessRateMonitorType) const PURE;\n};\n\nusing DetectorSharedPtr = std::shared_ptr<Detector>;\n\n/**\n * Sink for outlier detection event logs.\n */\nclass EventLogger {\npublic:\n  virtual ~EventLogger() = default;\n\n  /**\n   * Log an ejection event.\n   * @param host supplies the host that generated the event.\n   * @param detector supplies the detector that is doing the ejection.\n   * @param type supplies the type of the event.\n   * @param enforced is true if the ejection took place; false, if only logging took place.\n   */\n  virtual void logEject(const HostDescriptionConstSharedPtr& host, Detector& detector,\n                        envoy::data::cluster::v2alpha::OutlierEjectionType type,\n                        bool enforced) PURE;\n\n  /**\n   * Log an unejection event.\n   * @param host supplies the host that generated the event.\n   */\n  virtual void logUneject(const HostDescriptionConstSharedPtr& host) PURE;\n};\n\nusing EventLoggerSharedPtr = std::shared_ptr<EventLogger>;\n\n} // namespace Outlier\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/resource_manager.h",
    "content": "#pragma once\n\n#include <cstddef>\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/resource.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Resource priority classes. The parallel NumResourcePriorities constant allows defining fixed\n * arrays for each priority, but does not pollute the enum.\n */\nenum class ResourcePriority { Default, High };\nconst size_t NumResourcePriorities = 2;\n\n/**\n * RAII wrapper that increments a resource on construction and decrements it on destruction.\n */\nclass ResourceAutoIncDec {\npublic:\n  ResourceAutoIncDec(ResourceLimit& resource) : resource_(resource) { resource_.inc(); }\n  ~ResourceAutoIncDec() { resource_.dec(); }\n\nprivate:\n  ResourceLimit& resource_;\n};\n\nusing ResourceAutoIncDecPtr = std::unique_ptr<ResourceAutoIncDec>;\n\n/**\n * Global resource manager that loosely synchronizes maximum connections, pending requests, etc.\n * NOTE: Currently this is used on a per cluster basis. In the future we may consider also chaining\n *       this with a global resource manager.\n */\nclass ResourceManager {\npublic:\n  virtual ~ResourceManager() = default;\n\n  /**\n   * @return ResourceLimit& active TCP connections and UDP sessions.\n   */\n  virtual ResourceLimit& connections() PURE;\n\n  /**\n   * @return ResourceLimit& active pending requests (requests that have not yet been attached to a\n   *         connection pool connection).\n   */\n  virtual ResourceLimit& pendingRequests() PURE;\n\n  /**\n   * @return ResourceLimit& active requests (requests that are currently bound to a connection pool\n   *         connection and are awaiting response).\n   */\n  virtual ResourceLimit& requests() PURE;\n\n  /**\n   * @return ResourceLimit& active retries.\n   */\n  virtual ResourceLimit& retries() PURE;\n\n  /**\n   * @return ResourceLimit& active connection pools.\n   */\n  virtual ResourceLimit& connectionPools() PURE;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/retry.h",
    "content": "#pragma once\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/upstream/types.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Used to optionally modify the PriorityLoad when selecting a priority for\n * a retry attempt.\n *\n * Each RetryPriority will live throughout the lifetime of a request and updated\n * with attempted hosts through onHostAttempted.\n */\nclass RetryPriority {\npublic:\n  virtual ~RetryPriority() = default;\n\n  /**\n   * Function that maps a HostDescription to it's effective priority level in a cluster.\n   * For most cluster types, the mapping is simply `return host.priority()`, but some\n   * cluster types require more complex mapping.\n   * @return either the effective priority, or absl::nullopt if the mapping cannot be determined,\n   *         which can happen if the host has been removed from the configurations since it was\n   *         used.\n   */\n  using PriorityMappingFunc =\n      std::function<absl::optional<uint32_t>(const Upstream::HostDescription&)>;\n\n  static absl::optional<uint32_t> defaultPriorityMapping(const Upstream::HostDescription& host) {\n    return host.priority();\n  }\n\n  /**\n   * Determines what PriorityLoad to use.\n   *\n   * @param priority_set current priority set of cluster.\n   * @param original_priority_load the unmodified HealthAndDegradedLoad.\n   * @param priority_mapping_func a callback to get the priority of a host that has\n   *        been attempted. This function may only be called on hosts that were\n   *        passed to calls to `onHostAttempted()` on this object.\n   * @return HealthAndDegradedLoad load that should be used for the next retry. Return\n   * original_priority_load if the original load should be used. a pointer to original_priority,\n   * original_degraded_priority if no changes should be made.\n   */\n  virtual const HealthyAndDegradedLoad&\n  determinePriorityLoad(const PrioritySet& priority_set,\n                        const HealthyAndDegradedLoad& original_priority_load,\n                        const PriorityMappingFunc& priority_mapping_func) PURE;\n\n  /**\n   * Called after a host has been attempted but before host selection for the next attempt has\n   * begun.\n   *\n   * @param attempted_host the host that was previously attempted.\n   */\n  virtual void onHostAttempted(HostDescriptionConstSharedPtr attempted_host) PURE;\n};\n\nusing RetryPrioritySharedPtr = std::shared_ptr<RetryPriority>;\n\n/**\n * Used to decide whether a selected host should be rejected during retries. Host selection will be\n * reattempted until either the host predicate accepts the host or a configured max number of\n * attempts is reached.\n *\n * Each RetryHostPredicate will live throughout the lifetime of a request and updated\n * with attempted hosts through onHostAttempted.\n */\nclass RetryHostPredicate {\npublic:\n  virtual ~RetryHostPredicate() = default;\n\n  /**\n   * Determines whether a host should be rejected during host selection.\n   *\n   * @param candidate_host the host to either reject or accept.\n   * @return whether the host should be rejected and host selection reattempted.\n   */\n  virtual bool shouldSelectAnotherHost(const Host& candidate_host) PURE;\n\n  /**\n   * Called after a host has been attempted but before host selection for the next attempt has\n   * begun.\n   *\n   * @param attempted_host the host that was previously attempted.\n   */\n  virtual void onHostAttempted(HostDescriptionConstSharedPtr attempted_host) PURE;\n};\n\nusing RetryHostPredicateSharedPtr = std::shared_ptr<RetryHostPredicate>;\n\n/**\n * Factory for RetryPriority.\n */\nclass RetryPriorityFactory : public Config::TypedFactory {\npublic:\n  ~RetryPriorityFactory() override = default;\n\n  virtual RetryPrioritySharedPtr\n  createRetryPriority(const Protobuf::Message& config,\n                      ProtobufMessage::ValidationVisitor& validation_visitor,\n                      uint32_t retry_count) PURE;\n\n  std::string category() const override { return \"envoy.retry_priorities\"; }\n};\n\n/**\n * Factory for RetryHostPredicate.\n */\nclass RetryHostPredicateFactory : public Config::TypedFactory {\npublic:\n  ~RetryHostPredicateFactory() override = default;\n\n  virtual RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message& config,\n                                                          uint32_t retry_count) PURE;\n\n  std::string category() const override { return \"envoy.retry_host_predicates\"; }\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/thread_local_cluster.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * A thread local cluster instance that can be used for direct load balancing and host set\n * interactions. In general, an instance of ThreadLocalCluster can only be safely used in the\n * direct call context after it is retrieved from the cluster manager. See ClusterManager::get()\n * for more information.\n */\nclass ThreadLocalCluster {\npublic:\n  virtual ~ThreadLocalCluster() = default;\n\n  /**\n   * @return const PrioritySet& the backing priority set.\n   */\n  virtual const PrioritySet& prioritySet() PURE;\n\n  /**\n   * @return ClusterInfoConstSharedPtr the info for this cluster. The info is safe to store beyond\n   * the lifetime of the ThreadLocalCluster instance itself.\n   */\n  virtual ClusterInfoConstSharedPtr info() PURE;\n\n  /**\n   * @return LoadBalancer& the backing load balancer.\n   */\n  virtual LoadBalancer& loadBalancer() PURE;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/types.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <vector>\n\n#include \"common/common/phantom.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n// Phantom type indicating that the type is related to load.\nstruct Load {};\n\n// Mapping from a priority to how much of the total traffic load should be directed to this\n// priority. For example, {50, 30, 20} means that 50% of traffic should go to P0, 30% to P1\n// and 20% to P2.\n//\n// This should either sum to 100 or consist of all zeros.\nusing PriorityLoad = Phantom<std::vector<uint32_t>, Load>;\n\n// PriorityLoad specific to degraded hosts.\nstruct DegradedLoad : PriorityLoad {\n  using PriorityLoad::PriorityLoad;\n};\n\n// PriorityLoad specific to healthy hosts.\nstruct HealthyLoad : PriorityLoad {\n  using PriorityLoad::PriorityLoad;\n};\n\nstruct HealthyAndDegradedLoad {\n  HealthyLoad healthy_priority_load_;\n  DegradedLoad degraded_priority_load_;\n};\n\n// Phantom type indicating that the type is related to host availability.\nstruct Availability {};\n\n// Mapping from a priority how available the given priority is, e.g., the ratio of healthy host to\n// total hosts.\nusing PriorityAvailability = Phantom<std::vector<uint32_t>, Availability>;\n\n// Availability specific to degraded hosts.\nstruct DegradedAvailability : PriorityAvailability {\n  using PriorityAvailability::PriorityAvailability;\n};\n\n// Availability specific to healthy hosts.\nstruct HealthyAvailability : PriorityAvailability {\n  using PriorityAvailability::PriorityAvailability;\n};\n\n// Phantom type indicating that the type is related to healthy hosts.\nstruct Healthy {};\n// Phantom type indicating that the type is related to degraded hosts.\nstruct Degraded {};\n// Phantom type indicating that the type is related to excluded hosts.\nstruct Excluded {};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "include/envoy/upstream/upstream.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/callback.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/ssl/context.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/upstream/health_check_host_monitor.h\"\n#include \"envoy/upstream/load_balancer_type.h\"\n#include \"envoy/upstream/locality.h\"\n#include \"envoy/upstream/outlier_detection.h\"\n#include \"envoy/upstream/resource_manager.h\"\n#include \"envoy/upstream/types.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * An upstream host.\n */\nclass Host : virtual public HostDescription {\npublic:\n  struct CreateConnectionData {\n    Network::ClientConnectionPtr connection_;\n    HostDescriptionConstSharedPtr host_description_;\n  };\n\n  // We use an X-macro here to make it easier to verify that all the enum values are accounted for.\n  // clang-format off\n#define HEALTH_FLAG_ENUM_VALUES(m)                                               \\\n  /* The host is currently failing active health checks. */                      \\\n  m(FAILED_ACTIVE_HC, 0x1)                                                       \\\n  /* The host is currently considered an outlier and has been ejected. */        \\\n  m(FAILED_OUTLIER_CHECK, 0x02)                                                  \\\n  /* The host is currently marked as unhealthy by EDS. */                        \\\n  m(FAILED_EDS_HEALTH, 0x04)                                                     \\\n  /* The host is currently marked as degraded through active health checking. */ \\\n  m(DEGRADED_ACTIVE_HC, 0x08)                                                    \\\n  /* The host is currently marked as degraded by EDS. */                         \\\n  m(DEGRADED_EDS_HEALTH, 0x10)                                                   \\\n  /* The host is pending removal from discovery but is stabilized due to */      \\\n  /* active HC. */                                                               \\\n  m(PENDING_DYNAMIC_REMOVAL, 0x20)                                               \\\n  /* The host is pending its initial active health check. */                     \\\n  m(PENDING_ACTIVE_HC, 0x40)\n  // clang-format on\n\n#define DECLARE_ENUM(name, value) name = value,\n\n  enum class HealthFlag { HEALTH_FLAG_ENUM_VALUES(DECLARE_ENUM) };\n\n#undef DECLARE_ENUM\n\n  enum class ActiveHealthFailureType {\n    // The failure type is unknown, all hosts' failure types are initialized as UNKNOWN\n    UNKNOWN,\n    // The host is actively responding it's unhealthy\n    UNHEALTHY,\n    // The host is timing out\n    TIMEOUT,\n  };\n\n  /**\n   * @return host specific counters.\n   */\n  virtual std::vector<std::pair<absl::string_view, Stats::PrimitiveCounterReference>>\n  counters() const PURE;\n\n  /**\n   * Create a connection for this host.\n   * @param dispatcher supplies the owning dispatcher.\n   * @param options supplies the socket options that will be set on the new connection.\n   * @param transport_socket_options supplies the transport options that will be set on the new\n   * connection.\n   * @return the connection data which includes the raw network connection as well as the *real*\n   *         host that backs it. The reason why a 2nd host is returned is that some hosts are\n   *         logical and wrap multiple real network destinations. In this case, a different host\n   *         will be returned along with the connection vs. the host the method was called on.\n   *         If it matters, callers should not assume that the returned host will be the same.\n   */\n  virtual CreateConnectionData\n  createConnection(Event::Dispatcher& dispatcher,\n                   const Network::ConnectionSocket::OptionsSharedPtr& options,\n                   Network::TransportSocketOptionsSharedPtr transport_socket_options) const PURE;\n\n  /**\n   * Create a health check connection for this host.\n   * @param dispatcher supplies the owning dispatcher.\n   * @param transport_socket_options supplies the transport options that will be set on the new\n   * connection.\n   * @return the connection data.\n   */\n  virtual CreateConnectionData\n  createHealthCheckConnection(Event::Dispatcher& dispatcher,\n                              Network::TransportSocketOptionsSharedPtr transport_socket_options,\n                              const envoy::config::core::v3::Metadata* metadata) const PURE;\n\n  /**\n   * @return host specific gauges.\n   */\n  virtual std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>>\n  gauges() const PURE;\n\n  /**\n   * Atomically clear a health flag for a host. Flags are specified in HealthFlags.\n   */\n  virtual void healthFlagClear(HealthFlag flag) PURE;\n\n  /**\n   * Atomically get whether a health flag is set for a host. Flags are specified in HealthFlags.\n   */\n  virtual bool healthFlagGet(HealthFlag flag) const PURE;\n\n  /**\n   * Atomically set a health flag for a host. Flags are specified in HealthFlags.\n   */\n  virtual void healthFlagSet(HealthFlag flag) PURE;\n\n  enum class Health {\n    /**\n     * Host is unhealthy and is not able to serve traffic. A host may be marked as unhealthy either\n     * through EDS or through active health checking.\n     */\n    Unhealthy,\n    /**\n     * Host is healthy, but degraded. It is able to serve traffic, but hosts that aren't degraded\n     * should be preferred. A host may be marked as degraded either through EDS or through active\n     * health checking.\n     */\n    Degraded,\n    /**\n     * Host is healthy and is able to serve traffic.\n     */\n    Healthy,\n  };\n\n  /**\n   * @return the health of the host.\n   */\n  virtual Health health() const PURE;\n\n  /**\n   * Returns the host's ActiveHealthFailureType. Types are specified in ActiveHealthFailureType.\n   */\n  virtual ActiveHealthFailureType getActiveHealthFailureType() const PURE;\n\n  /**\n   * Set the most recent health failure type for a host. Types are specified in\n   * ActiveHealthFailureType.\n   */\n  virtual void setActiveHealthFailureType(ActiveHealthFailureType flag) PURE;\n\n  /**\n   * Set the host's health checker monitor. Monitors are assumed to be thread safe, however\n   * a new monitor must be installed before the host is used across threads. Thus,\n   * this routine should only be called on the main thread before the host is used across threads.\n   */\n  virtual void setHealthChecker(HealthCheckHostMonitorPtr&& health_checker) PURE;\n\n  /**\n   * Set the host's outlier detector monitor. Outlier detector monitors are assumed to be thread\n   * safe, however a new outlier detector monitor must be installed before the host is used across\n   * threads. Thus, this routine should only be called on the main thread before the host is used\n   * across threads.\n   */\n  virtual void setOutlierDetector(Outlier::DetectorHostMonitorPtr&& outlier_detector) PURE;\n\n  /**\n   * @return the current load balancing weight of the host, in the range 1-128 (see\n   * envoy.api.v2.endpoint.Endpoint.load_balancing_weight).\n   */\n  virtual uint32_t weight() const PURE;\n\n  /**\n   * Set the current load balancing weight of the host, in the range 1-128 (see\n   * envoy.api.v2.endpoint.Endpoint.load_balancing_weight).\n   */\n  virtual void weight(uint32_t new_weight) PURE;\n\n  /**\n   * @return the current boolean value of host being in use.\n   */\n  virtual bool used() const PURE;\n\n  /**\n   * @param new_used supplies the new value of host being in use to be stored.\n   */\n  virtual void used(bool new_used) PURE;\n};\n\nusing HostConstSharedPtr = std::shared_ptr<const Host>;\n\nusing HostVector = std::vector<HostSharedPtr>;\nusing HealthyHostVector = Phantom<HostVector, Healthy>;\nusing DegradedHostVector = Phantom<HostVector, Degraded>;\nusing ExcludedHostVector = Phantom<HostVector, Excluded>;\nusing HostMap = absl::node_hash_map<std::string, Upstream::HostSharedPtr>;\nusing HostVectorSharedPtr = std::shared_ptr<HostVector>;\nusing HostVectorConstSharedPtr = std::shared_ptr<const HostVector>;\n\nusing HealthyHostVectorConstSharedPtr = std::shared_ptr<const HealthyHostVector>;\nusing DegradedHostVectorConstSharedPtr = std::shared_ptr<const DegradedHostVector>;\nusing ExcludedHostVectorConstSharedPtr = std::shared_ptr<const ExcludedHostVector>;\n\nusing HostListPtr = std::unique_ptr<HostVector>;\nusing LocalityWeightsMap =\n    absl::node_hash_map<envoy::config::core::v3::Locality, uint32_t, LocalityHash, LocalityEqualTo>;\nusing PriorityState = std::vector<std::pair<HostListPtr, LocalityWeightsMap>>;\n\n/**\n * Bucket hosts by locality.\n */\nclass HostsPerLocality {\npublic:\n  virtual ~HostsPerLocality() = default;\n\n  /**\n   * @return bool is local locality one of the locality buckets? If so, the\n   *         local locality will be the first in the get() vector.\n   */\n  virtual bool hasLocalLocality() const PURE;\n\n  /**\n   * @return const std::vector<HostVector>& list of hosts organized per\n   *         locality. The local locality is the first entry if\n   *         hasLocalLocality() is true.\n   */\n  virtual const std::vector<HostVector>& get() const PURE;\n\n  /**\n   * Clone object with multiple filter predicates. Returns a vector of clones, each with host that\n   * match the provided predicates.\n   * @param predicates vector of predicates on Host entries.\n   * @return vector of HostsPerLocalityConstSharedPtr clones of the HostsPerLocality that match\n   *         hosts according to predicates.\n   */\n  virtual std::vector<std::shared_ptr<const HostsPerLocality>>\n  filter(const std::vector<std::function<bool(const Host&)>>& predicates) const PURE;\n\n  /**\n   * Clone object.\n   * @return HostsPerLocalityConstSharedPtr clone of the HostsPerLocality.\n   */\n  std::shared_ptr<const HostsPerLocality> clone() const {\n    return filter({[](const Host&) { return true; }})[0];\n  }\n};\n\nusing HostsPerLocalitySharedPtr = std::shared_ptr<HostsPerLocality>;\nusing HostsPerLocalityConstSharedPtr = std::shared_ptr<const HostsPerLocality>;\n\n// Weight for each locality index in HostsPerLocality.\nusing LocalityWeights = std::vector<uint32_t>;\nusing LocalityWeightsSharedPtr = std::shared_ptr<LocalityWeights>;\nusing LocalityWeightsConstSharedPtr = std::shared_ptr<const LocalityWeights>;\n\n/**\n * Base host set interface. This contains all of the endpoints for a given LocalityLbEndpoints\n * priority level.\n */\n// TODO(snowp): Remove the const ref accessors in favor of the shared_ptr ones.\nclass HostSet {\npublic:\n  virtual ~HostSet() = default;\n\n  /**\n   * @return all hosts that make up the set at the current time.\n   */\n  virtual const HostVector& hosts() const PURE;\n\n  /**\n   * @return a shared ptr to the vector returned by hosts().\n   */\n  virtual HostVectorConstSharedPtr hostsPtr() const PURE;\n\n  /**\n   * @return all healthy hosts contained in the set at the current time. NOTE: This set is\n   *         eventually consistent. There is a time window where a host in this set may become\n   *         unhealthy and calling healthy() on it will return false. Code should be written to\n   *         deal with this case if it matters.\n   */\n  virtual const HostVector& healthyHosts() const PURE;\n\n  /**\n   * @return a shared ptr to the vector returned by healthyHosts().\n   */\n  virtual HealthyHostVectorConstSharedPtr healthyHostsPtr() const PURE;\n\n  /**\n   * @return all degraded hosts contained in the set at the current time. NOTE: This set is\n   *         eventually consistent. There is a time window where a host in this set may become\n   *         undegraded and calling degraded() on it will return false. Code should be written to\n   *         deal with this case if it matters.\n   */\n  virtual const HostVector& degradedHosts() const PURE;\n\n  /**\n   * @return a shared ptr to the vector returned by degradedHosts().\n   */\n  virtual DegradedHostVectorConstSharedPtr degradedHostsPtr() const PURE;\n\n  /*\n   * @return all excluded hosts contained in the set at the current time. Excluded hosts should be\n   * ignored when computing load balancing weights, but may overlap with hosts in hosts().\n   */\n  virtual const HostVector& excludedHosts() const PURE;\n\n  /**\n   * @return a shared ptr to the vector returned by excludedHosts().\n   */\n  virtual ExcludedHostVectorConstSharedPtr excludedHostsPtr() const PURE;\n\n  /**\n   * @return hosts per locality.\n   */\n  virtual const HostsPerLocality& hostsPerLocality() const PURE;\n\n  /**\n   * @return a shared ptr to the HostsPerLocality returned by hostsPerLocality().\n   */\n  virtual HostsPerLocalityConstSharedPtr hostsPerLocalityPtr() const PURE;\n\n  /**\n   * @return same as hostsPerLocality but only contains healthy hosts.\n   */\n  virtual const HostsPerLocality& healthyHostsPerLocality() const PURE;\n\n  /**\n   * @return a shared ptr to the HostsPerLocality returned by healthyHostsPerLocality().\n   */\n  virtual HostsPerLocalityConstSharedPtr healthyHostsPerLocalityPtr() const PURE;\n\n  /**\n   * @return same as hostsPerLocality but only contains degraded hosts.\n   */\n  virtual const HostsPerLocality& degradedHostsPerLocality() const PURE;\n\n  /**\n   * @return a shared ptr to the HostsPerLocality returned by degradedHostsPerLocality().\n   */\n  virtual HostsPerLocalityConstSharedPtr degradedHostsPerLocalityPtr() const PURE;\n\n  /**\n   * @return same as hostsPerLocality but only contains excluded hosts.\n   */\n  virtual const HostsPerLocality& excludedHostsPerLocality() const PURE;\n\n  /**\n   * @return a shared ptr to the HostsPerLocality returned by excludedHostsPerLocality().\n   */\n  virtual HostsPerLocalityConstSharedPtr excludedHostsPerLocalityPtr() const PURE;\n\n  /**\n   * @return weights for each locality in the host set.\n   */\n  virtual LocalityWeightsConstSharedPtr localityWeights() const PURE;\n\n  /**\n   * @return next locality index to route to if performing locality weighted balancing\n   * against healthy hosts.\n   */\n  virtual absl::optional<uint32_t> chooseHealthyLocality() PURE;\n\n  /**\n   * @return next locality index to route to if performing locality weighted balancing\n   * against degraded hosts.\n   */\n  virtual absl::optional<uint32_t> chooseDegradedLocality() PURE;\n\n  /**\n   * @return uint32_t the priority of this host set.\n   */\n  virtual uint32_t priority() const PURE;\n\n  /**\n   * @return uint32_t the overprovisioning factor of this host set.\n   */\n  virtual uint32_t overprovisioningFactor() const PURE;\n};\n\nusing HostSetPtr = std::unique_ptr<HostSet>;\n\n/**\n * This class contains all of the HostSets for a given cluster grouped by priority, for\n * ease of load balancing.\n */\nclass PrioritySet {\npublic:\n  using MemberUpdateCb =\n      std::function<void(const HostVector& hosts_added, const HostVector& hosts_removed)>;\n\n  using PriorityUpdateCb = std::function<void(uint32_t priority, const HostVector& hosts_added,\n                                              const HostVector& hosts_removed)>;\n\n  virtual ~PrioritySet() = default;\n\n  /**\n   * Install a callback that will be invoked when any of the HostSets in the PrioritySet changes.\n   * hosts_added and hosts_removed will only be populated when a host is added or completely removed\n   * from the PrioritySet.\n   * This includes when a new HostSet is created.\n   *\n   * @param callback supplies the callback to invoke.\n   * @return Common::CallbackHandle* a handle which can be used to unregister the callback.\n   */\n  virtual Common::CallbackHandle* addMemberUpdateCb(MemberUpdateCb callback) const PURE;\n\n  /**\n   * Install a callback that will be invoked when a host set changes. Triggers when any change\n   * happens to the hosts within the host set. If hosts are added/removed from the host set, the\n   * added/removed hosts will be passed to the callback.\n   *\n   * @param callback supplies the callback to invoke.\n   * @return Common::CallbackHandle* a handle which can be used to unregister the callback.\n   */\n  virtual Common::CallbackHandle* addPriorityUpdateCb(PriorityUpdateCb callback) const PURE;\n\n  /**\n   * @return const std::vector<HostSetPtr>& the host sets, ordered by priority.\n   */\n  virtual const std::vector<HostSetPtr>& hostSetsPerPriority() const PURE;\n\n  /**\n   * Parameter class for updateHosts.\n   */\n  struct UpdateHostsParams {\n    HostVectorConstSharedPtr hosts;\n    HealthyHostVectorConstSharedPtr healthy_hosts;\n    DegradedHostVectorConstSharedPtr degraded_hosts;\n    ExcludedHostVectorConstSharedPtr excluded_hosts;\n    HostsPerLocalityConstSharedPtr hosts_per_locality;\n    HostsPerLocalityConstSharedPtr healthy_hosts_per_locality;\n    HostsPerLocalityConstSharedPtr degraded_hosts_per_locality;\n    HostsPerLocalityConstSharedPtr excluded_hosts_per_locality;\n  };\n\n  /**\n   * Updates the hosts in a given host set.\n   *\n   * @param priority the priority of the host set to update.\n   * @param update_hosts_param supplies the list of hosts and hosts per locality.\n   * @param locality_weights supplies a map from locality to associated weight.\n   * @param hosts_added supplies the hosts added since the last update.\n   * @param hosts_removed supplies the hosts removed since the last update.\n   * @param overprovisioning_factor if presents, overwrites the current overprovisioning_factor.\n   */\n  virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_host_params,\n                           LocalityWeightsConstSharedPtr locality_weights,\n                           const HostVector& hosts_added, const HostVector& hosts_removed,\n                           absl::optional<uint32_t> overprovisioning_factor) PURE;\n\n  /**\n   * Callback provided during batch updates that can be used to update hosts.\n   */\n  class HostUpdateCb {\n  public:\n    virtual ~HostUpdateCb() = default;\n    /**\n     * Updates the hosts in a given host set.\n     *\n     * @param priority the priority of the host set to update.\n     * @param update_hosts_param supplies the list of hosts and hosts per locality.\n     * @param locality_weights supplies a map from locality to associated weight.\n     * @param hosts_added supplies the hosts added since the last update.\n     * @param hosts_removed supplies the hosts removed since the last update.\n     * @param overprovisioning_factor if presents, overwrites the current overprovisioning_factor.\n     */\n    virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_host_params,\n                             LocalityWeightsConstSharedPtr locality_weights,\n                             const HostVector& hosts_added, const HostVector& hosts_removed,\n                             absl::optional<uint32_t> overprovisioning_factor) PURE;\n  };\n\n  /**\n   * Callback that provides the mechanism for performing batch host updates for a PrioritySet.\n   */\n  class BatchUpdateCb {\n  public:\n    virtual ~BatchUpdateCb() = default;\n\n    /**\n     * Performs a batch host update. Implementors should use the provided callback to update hosts\n     * in the PrioritySet.\n     */\n    virtual void batchUpdate(HostUpdateCb& host_update_cb) PURE;\n  };\n\n  /**\n   * Allows updating hosts for multiple priorities at once, deferring the MemberUpdateCb from\n   * triggering until all priorities have been updated. The resulting callback will take into\n   * account hosts moved from one priority to another.\n   *\n   * @param callback callback to use to add hosts.\n   */\n  virtual void batchHostUpdate(BatchUpdateCb& callback) PURE;\n};\n\n/**\n * All cluster stats. @see stats_macros.h\n */\n#define ALL_CLUSTER_STATS(COUNTER, GAUGE, HISTOGRAM)                                               \\\n  COUNTER(assignment_stale)                                                                        \\\n  COUNTER(assignment_timeout_received)                                                             \\\n  COUNTER(bind_errors)                                                                             \\\n  COUNTER(lb_healthy_panic)                                                                        \\\n  COUNTER(lb_local_cluster_not_ok)                                                                 \\\n  COUNTER(lb_recalculate_zone_structures)                                                          \\\n  COUNTER(lb_subsets_created)                                                                      \\\n  COUNTER(lb_subsets_fallback)                                                                     \\\n  COUNTER(lb_subsets_fallback_panic)                                                               \\\n  COUNTER(lb_subsets_removed)                                                                      \\\n  COUNTER(lb_subsets_selected)                                                                     \\\n  COUNTER(lb_zone_cluster_too_small)                                                               \\\n  COUNTER(lb_zone_no_capacity_left)                                                                \\\n  COUNTER(lb_zone_number_differs)                                                                  \\\n  COUNTER(lb_zone_routing_all_directly)                                                            \\\n  COUNTER(lb_zone_routing_cross_zone)                                                              \\\n  COUNTER(lb_zone_routing_sampled)                                                                 \\\n  COUNTER(membership_change)                                                                       \\\n  COUNTER(original_dst_host_invalid)                                                               \\\n  COUNTER(retry_or_shadow_abandoned)                                                               \\\n  COUNTER(update_attempt)                                                                          \\\n  COUNTER(update_empty)                                                                            \\\n  COUNTER(update_failure)                                                                          \\\n  COUNTER(update_no_rebuild)                                                                       \\\n  COUNTER(update_success)                                                                          \\\n  COUNTER(upstream_cx_close_notify)                                                                \\\n  COUNTER(upstream_cx_connect_attempts_exceeded)                                                   \\\n  COUNTER(upstream_cx_connect_fail)                                                                \\\n  COUNTER(upstream_cx_connect_timeout)                                                             \\\n  COUNTER(upstream_cx_destroy)                                                                     \\\n  COUNTER(upstream_cx_destroy_local)                                                               \\\n  COUNTER(upstream_cx_destroy_local_with_active_rq)                                                \\\n  COUNTER(upstream_cx_destroy_remote)                                                              \\\n  COUNTER(upstream_cx_destroy_remote_with_active_rq)                                               \\\n  COUNTER(upstream_cx_destroy_with_active_rq)                                                      \\\n  COUNTER(upstream_cx_http1_total)                                                                 \\\n  COUNTER(upstream_cx_http2_total)                                                                 \\\n  COUNTER(upstream_cx_idle_timeout)                                                                \\\n  COUNTER(upstream_cx_max_requests)                                                                \\\n  COUNTER(upstream_cx_none_healthy)                                                                \\\n  COUNTER(upstream_cx_overflow)                                                                    \\\n  COUNTER(upstream_cx_pool_overflow)                                                               \\\n  COUNTER(upstream_cx_protocol_error)                                                              \\\n  COUNTER(upstream_cx_rx_bytes_total)                                                              \\\n  COUNTER(upstream_cx_total)                                                                       \\\n  COUNTER(upstream_cx_tx_bytes_total)                                                              \\\n  COUNTER(upstream_flow_control_backed_up_total)                                                   \\\n  COUNTER(upstream_flow_control_drained_total)                                                     \\\n  COUNTER(upstream_flow_control_paused_reading_total)                                              \\\n  COUNTER(upstream_flow_control_resumed_reading_total)                                             \\\n  COUNTER(upstream_internal_redirect_failed_total)                                                 \\\n  COUNTER(upstream_internal_redirect_succeeded_total)                                              \\\n  COUNTER(upstream_rq_cancelled)                                                                   \\\n  COUNTER(upstream_rq_completed)                                                                   \\\n  COUNTER(upstream_rq_maintenance_mode)                                                            \\\n  COUNTER(upstream_rq_max_duration_reached)                                                        \\\n  COUNTER(upstream_rq_pending_failure_eject)                                                       \\\n  COUNTER(upstream_rq_pending_overflow)                                                            \\\n  COUNTER(upstream_rq_pending_total)                                                               \\\n  COUNTER(upstream_rq_per_try_timeout)                                                             \\\n  COUNTER(upstream_rq_retry)                                                                       \\\n  COUNTER(upstream_rq_retry_backoff_exponential)                                                   \\\n  COUNTER(upstream_rq_retry_backoff_ratelimited)                                                   \\\n  COUNTER(upstream_rq_retry_limit_exceeded)                                                        \\\n  COUNTER(upstream_rq_retry_overflow)                                                              \\\n  COUNTER(upstream_rq_retry_success)                                                               \\\n  COUNTER(upstream_rq_rx_reset)                                                                    \\\n  COUNTER(upstream_rq_timeout)                                                                     \\\n  COUNTER(upstream_rq_total)                                                                       \\\n  COUNTER(upstream_rq_tx_reset)                                                                    \\\n  GAUGE(lb_subsets_active, Accumulate)                                                             \\\n  GAUGE(max_host_weight, NeverImport)                                                              \\\n  GAUGE(membership_degraded, NeverImport)                                                          \\\n  GAUGE(membership_excluded, NeverImport)                                                          \\\n  GAUGE(membership_healthy, NeverImport)                                                           \\\n  GAUGE(membership_total, NeverImport)                                                             \\\n  GAUGE(upstream_cx_active, Accumulate)                                                            \\\n  GAUGE(upstream_cx_rx_bytes_buffered, Accumulate)                                                 \\\n  GAUGE(upstream_cx_tx_bytes_buffered, Accumulate)                                                 \\\n  GAUGE(upstream_rq_active, Accumulate)                                                            \\\n  GAUGE(upstream_rq_pending_active, Accumulate)                                                    \\\n  GAUGE(version, NeverImport)                                                                      \\\n  HISTOGRAM(upstream_cx_connect_ms, Milliseconds)                                                  \\\n  HISTOGRAM(upstream_cx_length_ms, Milliseconds)\n\n/**\n * All cluster load report stats. These are only use for EDS load reporting and not sent to the\n * stats sink. See envoy.api.v2.endpoint.ClusterStats for the definition of upstream_rq_dropped.\n * These are latched by LoadStatsReporter, independent of the normal stats sink flushing.\n */\n#define ALL_CLUSTER_LOAD_REPORT_STATS(COUNTER) COUNTER(upstream_rq_dropped)\n\n/**\n * Cluster circuit breakers stats. Open circuit breaker stats and remaining resource stats\n * can be handled differently by passing in different macros.\n */\n#define ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(OPEN_GAUGE, REMAINING_GAUGE)                            \\\n  OPEN_GAUGE(cx_open, Accumulate)                                                                  \\\n  OPEN_GAUGE(cx_pool_open, Accumulate)                                                             \\\n  OPEN_GAUGE(rq_open, Accumulate)                                                                  \\\n  OPEN_GAUGE(rq_pending_open, Accumulate)                                                          \\\n  OPEN_GAUGE(rq_retry_open, Accumulate)                                                            \\\n  REMAINING_GAUGE(remaining_cx, Accumulate)                                                        \\\n  REMAINING_GAUGE(remaining_cx_pools, Accumulate)                                                  \\\n  REMAINING_GAUGE(remaining_pending, Accumulate)                                                   \\\n  REMAINING_GAUGE(remaining_retries, Accumulate)                                                   \\\n  REMAINING_GAUGE(remaining_rq, Accumulate)\n\n/**\n * All stats tracking request/response headers and body sizes. Not used by default.\n */\n#define ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(HISTOGRAM)                                         \\\n  HISTOGRAM(upstream_rq_headers_size, Bytes)                                                       \\\n  HISTOGRAM(upstream_rq_body_size, Bytes)                                                          \\\n  HISTOGRAM(upstream_rs_headers_size, Bytes)                                                       \\\n  HISTOGRAM(upstream_rs_body_size, Bytes)\n\n/**\n * All stats around timeout budgets. Not used by default.\n */\n#define ALL_CLUSTER_TIMEOUT_BUDGET_STATS(HISTOGRAM)                                                \\\n  HISTOGRAM(upstream_rq_timeout_budget_percent_used, Unspecified)                                  \\\n  HISTOGRAM(upstream_rq_timeout_budget_per_try_percent_used, Unspecified)\n\n/**\n * Struct definition for all cluster stats. @see stats_macros.h\n */\nstruct ClusterStats {\n  ALL_CLUSTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\n/**\n * Struct definition for all cluster load report stats. @see stats_macros.h\n */\nstruct ClusterLoadReportStats {\n  ALL_CLUSTER_LOAD_REPORT_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Struct definition for cluster circuit breakers stats. @see stats_macros.h\n */\nstruct ClusterCircuitBreakersStats {\n  ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Struct definition for cluster timeout budget stats. @see stats_macros.h\n */\nstruct ClusterRequestResponseSizeStats {\n  ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(GENERATE_HISTOGRAM_STRUCT)\n};\n\nusing ClusterRequestResponseSizeStatsPtr = std::unique_ptr<ClusterRequestResponseSizeStats>;\nusing ClusterRequestResponseSizeStatsOptRef =\n    absl::optional<std::reference_wrapper<ClusterRequestResponseSizeStats>>;\n\n/**\n * Struct definition for cluster timeout budget stats. @see stats_macros.h\n */\nstruct ClusterTimeoutBudgetStats {\n  ALL_CLUSTER_TIMEOUT_BUDGET_STATS(GENERATE_HISTOGRAM_STRUCT)\n};\n\nusing ClusterTimeoutBudgetStatsPtr = std::unique_ptr<ClusterTimeoutBudgetStats>;\nusing ClusterTimeoutBudgetStatsOptRef =\n    absl::optional<std::reference_wrapper<ClusterTimeoutBudgetStats>>;\n\n/**\n * All extension protocol specific options returned by the method at\n *   NamedNetworkFilterConfigFactory::createProtocolOptions\n * must be derived from this class.\n */\nclass ProtocolOptionsConfig {\npublic:\n  virtual ~ProtocolOptionsConfig() = default;\n};\nusing ProtocolOptionsConfigConstSharedPtr = std::shared_ptr<const ProtocolOptionsConfig>;\n\n/**\n *  Base class for all cluster typed metadata factory.\n */\nclass ClusterTypedMetadataFactory : public Envoy::Config::TypedMetadataFactory {};\n\n/**\n * Information about a given upstream cluster.\n */\nclass ClusterInfo {\npublic:\n  struct Features {\n    // Whether the upstream supports HTTP2. This is used when creating connection pools.\n    static const uint64_t HTTP2 = 0x1;\n    // Use the downstream protocol (HTTP1.1, HTTP2) for upstream connections as well, if available.\n    // This is used when creating connection pools.\n    static const uint64_t USE_DOWNSTREAM_PROTOCOL = 0x2;\n    // Whether connections should be immediately closed upon health failure.\n    static const uint64_t CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE = 0x4;\n  };\n\n  virtual ~ClusterInfo() = default;\n\n  /**\n   * @return bool whether the cluster was added via API (if false the cluster was present in the\n   *         initial configuration and cannot be removed or updated).\n   */\n  virtual bool addedViaApi() const PURE;\n\n  /**\n   * @return the connect timeout for upstream hosts that belong to this cluster.\n   */\n  virtual std::chrono::milliseconds connectTimeout() const PURE;\n\n  /**\n   * @return the idle timeout for upstream connection pool connections.\n   */\n  virtual const absl::optional<std::chrono::milliseconds> idleTimeout() const PURE;\n\n  /**\n   * @return how many streams should be anticipated per each current stream.\n   */\n  virtual float perUpstreamPrefetchRatio() const PURE;\n\n  /**\n   * @return how many streams should be anticipated per each current stream.\n   */\n  virtual float peekaheadRatio() const PURE;\n\n  /**\n   * @return soft limit on size of the cluster's connections read and write buffers.\n   */\n  virtual uint32_t perConnectionBufferLimitBytes() const PURE;\n\n  /**\n   * @return uint64_t features supported by the cluster. @see Features.\n   */\n  virtual uint64_t features() const PURE;\n\n  /**\n   * @return const Http::Http1Settings& for HTTP/1.1 connections created on behalf of this cluster.\n   *         @see Http::Http1Settings.\n   */\n  virtual const Http::Http1Settings& http1Settings() const PURE;\n\n  /**\n   * @return const envoy::config::core::v3::Http2ProtocolOptions& for HTTP/2 connections\n   * created on behalf of this cluster.\n   *         @see envoy::config::core::v3::Http2ProtocolOptions.\n   */\n  virtual const envoy::config::core::v3::Http2ProtocolOptions& http2Options() const PURE;\n\n  /**\n   * @return const envoy::config::core::v3::HttpProtocolOptions for all of HTTP versions.\n   */\n  virtual const envoy::config::core::v3::HttpProtocolOptions&\n  commonHttpProtocolOptions() const PURE;\n\n  /**\n   * @param name std::string containing the well-known name of the extension for which protocol\n   *        options are desired\n   * @return std::shared_ptr<const Derived> where Derived is a subclass of ProtocolOptionsConfig\n   *         and contains extension-specific protocol options for upstream connections.\n   */\n  template <class Derived>\n  const std::shared_ptr<const Derived>\n  extensionProtocolOptionsTyped(const std::string& name) const {\n    return std::dynamic_pointer_cast<const Derived>(extensionProtocolOptions(name));\n  }\n\n  /**\n   * @return const envoy::config::cluster::v3::Cluster::CommonLbConfig& the common configuration for\n   * all load balancers for this cluster.\n   */\n  virtual const envoy::config::cluster::v3::Cluster::CommonLbConfig& lbConfig() const PURE;\n\n  /**\n   * @return the type of load balancing that the cluster should use.\n   */\n  virtual LoadBalancerType lbType() const PURE;\n\n  /**\n   * @return the service discovery type to use for resolving the cluster.\n   */\n  virtual envoy::config::cluster::v3::Cluster::DiscoveryType type() const PURE;\n\n  /**\n   * @return the type of cluster, only used for custom discovery types.\n   */\n  virtual const absl::optional<envoy::config::cluster::v3::Cluster::CustomClusterType>&\n  clusterType() const PURE;\n\n  /**\n   * @return configuration for least request load balancing, only used if LB type is least request.\n   */\n  virtual const absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>&\n  lbLeastRequestConfig() const PURE;\n\n  /**\n   * @return configuration for ring hash load balancing, only used if type is set to ring_hash_lb.\n   */\n  virtual const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig>&\n  lbRingHashConfig() const PURE;\n\n  /**\n   * @return configuration for maglev load balancing, only used if type is set to maglev_lb.\n   */\n  virtual const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig>&\n  lbMaglevConfig() const PURE;\n\n  /**\n   * @return const absl::optional<envoy::config::cluster::v3::Cluster::OriginalDstLbConfig>& the\n   * configuration for the Original Destination load balancing policy, only used if type is set to\n   *         ORIGINAL_DST_LB.\n   */\n  virtual const absl::optional<envoy::config::cluster::v3::Cluster::OriginalDstLbConfig>&\n  lbOriginalDstConfig() const PURE;\n\n  /**\n   * @return const absl::optional<envoy::config::core::v3::TypedExtensionConfig>& the configuration\n   *         for the upstream, if a custom upstream is configured.\n   */\n  virtual const absl::optional<envoy::config::core::v3::TypedExtensionConfig>&\n  upstreamConfig() const PURE;\n\n  /**\n   * @return Whether the cluster is currently in maintenance mode and should not be routed to.\n   *         Different filters may handle this situation in different ways. The implementation\n   *         of this routine is typically based on randomness and may not return the same answer\n   *         on each call.\n   */\n  virtual bool maintenanceMode() const PURE;\n\n  /**\n   * @return uint64_t the maximum number of outbound requests that a connection pool will make on\n   *         each upstream connection. This can be used to increase spread if the backends cannot\n   *         tolerate imbalance. 0 indicates no maximum.\n   */\n  virtual uint64_t maxRequestsPerConnection() const PURE;\n\n  /**\n   * @return uint32_t the maximum number of response headers. The default value is 100. Results in a\n   * reset if the number of headers exceeds this value.\n   */\n  virtual uint32_t maxResponseHeadersCount() const PURE;\n\n  /**\n   * @return the human readable name of the cluster.\n   */\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return ResourceManager& the resource manager to use by proxy agents for this cluster (at\n   *         a particular priority).\n   */\n  virtual ResourceManager& resourceManager(ResourcePriority priority) const PURE;\n\n  /**\n   * @return TransportSocketMatcher& the transport socket matcher associated\n   * factory.\n   */\n  virtual TransportSocketMatcher& transportSocketMatcher() const PURE;\n\n  /**\n   * @return ClusterStats& strongly named stats for this cluster.\n   */\n  virtual ClusterStats& stats() const PURE;\n\n  /**\n   * @return the stats scope that contains all cluster stats. This can be used to produce dynamic\n   *         stats that will be freed when the cluster is removed.\n   */\n  virtual Stats::Scope& statsScope() const PURE;\n\n  /**\n   * @return ClusterLoadReportStats& strongly named load report stats for this cluster.\n   */\n  virtual ClusterLoadReportStats& loadReportStats() const PURE;\n\n  /**\n   * @return absl::optional<std::reference_wrapper<ClusterRequestResponseSizeStats>> stats to track\n   * headers/body sizes of request/response for this cluster.\n   */\n  virtual ClusterRequestResponseSizeStatsOptRef requestResponseSizeStats() const PURE;\n\n  /**\n   * @return absl::optional<std::reference_wrapper<ClusterTimeoutBudgetStats>> stats on timeout\n   * budgets for this cluster.\n   */\n  virtual ClusterTimeoutBudgetStatsOptRef timeoutBudgetStats() const PURE;\n\n  /**\n   * Returns an optional source address for upstream connections to bind to.\n   *\n   * @return a source address to bind to or nullptr if no bind need occur.\n   */\n  virtual const Network::Address::InstanceConstSharedPtr& sourceAddress() const PURE;\n\n  /**\n   * @return the configuration for load balancer subsets.\n   */\n  virtual const LoadBalancerSubsetInfo& lbSubsetInfo() const PURE;\n\n  /**\n   * @return const envoy::config::core::v3::Metadata& the configuration metadata for this cluster.\n   */\n  virtual const envoy::config::core::v3::Metadata& metadata() const PURE;\n\n  /**\n   * @return const Envoy::Config::TypedMetadata&& the typed metadata for this cluster.\n   */\n  virtual const Envoy::Config::TypedMetadata& typedMetadata() const PURE;\n\n  /**\n   *\n   * @return const Network::ConnectionSocket::OptionsSharedPtr& socket options for all\n   *         connections for this cluster.\n   */\n  virtual const Network::ConnectionSocket::OptionsSharedPtr& clusterSocketOptions() const PURE;\n\n  /**\n   * @return whether to skip waiting for health checking before draining connections\n   *         after a host is removed from service discovery.\n   */\n  virtual bool drainConnectionsOnHostRemoval() const PURE;\n\n  /**\n   *  @return whether to create a new connection pool for each downstream connection routed to\n   *          the cluster\n   */\n  virtual bool connectionPoolPerDownstreamConnection() const PURE;\n\n  /**\n   * @return true if this cluster is configured to ignore hosts for the purpose of load balancing\n   * computations until they have been health checked for the first time.\n   */\n  virtual bool warmHosts() const PURE;\n\n  /**\n   * @return eds cluster service_name of the cluster.\n   */\n  virtual absl::optional<std::string> edsServiceName() const PURE;\n\n  /**\n   * Create network filters on a new upstream connection.\n   */\n  virtual void createNetworkFilterChain(Network::Connection& connection) const PURE;\n\n  /**\n   * Calculate upstream protocol based on features.\n   */\n  virtual Http::Protocol\n  upstreamHttpProtocol(absl::optional<Http::Protocol> downstream_protocol) const PURE;\n\n  /**\n   * @return http protocol options for upstream connection\n   */\n  virtual const absl::optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>&\n  upstreamHttpProtocolOptions() const PURE;\n\n  /**\n   * @return the Http1 Codec Stats.\n   */\n  virtual Http::Http1::CodecStats& http1CodecStats() const PURE;\n\n  /**\n   * @return the Http2 Codec Stats.\n   */\n  virtual Http::Http2::CodecStats& http2CodecStats() const PURE;\n\nprotected:\n  /**\n   * Invoked by extensionProtocolOptionsTyped.\n   * @param name std::string containing the well-known name of the extension for which protocol\n   *        options are desired\n   * @return ProtocolOptionsConfigConstSharedPtr with extension-specific protocol options for\n   *         upstream connections.\n   */\n  virtual ProtocolOptionsConfigConstSharedPtr\n  extensionProtocolOptions(const std::string& name) const PURE;\n};\n\nusing ClusterInfoConstSharedPtr = std::shared_ptr<const ClusterInfo>;\n\nclass HealthChecker;\n\n/**\n * An upstream cluster (group of hosts). This class is the \"primary\" singleton cluster used amongst\n * all forwarding threads/workers. Individual HostSets are used on the workers themselves.\n */\nclass Cluster {\npublic:\n  virtual ~Cluster() = default;\n\n  enum class InitializePhase { Primary, Secondary };\n\n  /**\n   * @return a pointer to the cluster's health checker. If a health checker has not been installed,\n   *         returns nullptr.\n   */\n  virtual HealthChecker* healthChecker() PURE;\n\n  /**\n   * @return the information about this upstream cluster.\n   */\n  virtual ClusterInfoConstSharedPtr info() const PURE;\n\n  /**\n   * @return a pointer to the cluster's outlier detector. If an outlier detector has not been\n   *         installed, returns nullptr.\n   */\n  virtual Outlier::Detector* outlierDetector() PURE;\n  virtual const Outlier::Detector* outlierDetector() const PURE;\n\n  /**\n   * Initialize the cluster. This will be called either immediately at creation or after all primary\n   * clusters have been initialized (determined via initializePhase()).\n   * @param callback supplies a callback that will be invoked after the cluster has undergone first\n   *        time initialization. E.g., for a dynamic DNS cluster the initialize callback will be\n   *        called when initial DNS resolution is complete.\n   */\n  virtual void initialize(std::function<void()> callback) PURE;\n\n  /**\n   * @return the phase in which the cluster is initialized at boot. This mechanism is used such that\n   *         clusters that depend on other clusters can correctly initialize. (E.g., an EDS cluster\n   *         that depends on resolution of the EDS server itself).\n   */\n  virtual InitializePhase initializePhase() const PURE;\n\n  /**\n   * @return the PrioritySet for the cluster.\n   */\n  virtual PrioritySet& prioritySet() PURE;\n\n  /**\n   * @return the const PrioritySet for the cluster.\n   */\n  virtual const PrioritySet& prioritySet() const PURE;\n};\n\nusing ClusterSharedPtr = std::shared_ptr<Cluster>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "repokitteh.star",
    "content": "pin(\"github.com/repokitteh/modules\", \"4ee2ed0c3622aad7fcddc04cb5dc866e44a541e6\")\n\nuse(\"github.com/repokitteh/modules/assign.star\")\nuse(\"github.com/repokitteh/modules/review.star\")\nuse(\"github.com/repokitteh/modules/wait.star\")\nuse(\"github.com/repokitteh/modules/circleci.star\", secret_token=get_secret('circle_token'))\nuse(\"github.com/envoyproxy/envoy/ci/repokitteh/modules/azure_pipelines.star\", secret_token=get_secret('azp_token'))\nuse(\n  \"github.com/envoyproxy/envoy/ci/repokitteh/modules/ownerscheck.star\",\n  paths=[\n    {\n      \"owner\": \"envoyproxy/api-shepherds!\",\n      \"path\":\n      \"(api/envoy[\\w/]*/(v1alpha\\d?|v1|v2alpha\\d?|v2))|(api/envoy/type/(matcher/)?\\w+.proto)\",\n      \"label\": \"v2-freeze\",\n      \"allow_global_approval\": False,\n      \"github_status_label\": \"v2 freeze violations\",\n    },\n    {\n      \"owner\": \"envoyproxy/api-shepherds!\",\n      \"path\": \"api/envoy/\",\n      \"label\": \"api\",\n      \"github_status_label\": \"any API change\",\n    },\n    {\n      \"owner\": \"envoyproxy/api-watchers\",\n      \"path\": \"api/envoy/\",\n    },\n    {\n      \"owner\": \"envoyproxy/dependency-watchers\",\n      \"path\":\n      \"(bazel/repository_locations\\.bzl)|(api/bazel/repository_locations\\.bzl)|(.*/requirements\\.txt)\",\n    },\n  ],\n)\n\nalias('retest-circle', 'retry-circle')\nalias('retest', 'retry-azp')\n\ndef _backport():\n  github.issue_label('backport/review')\n\nhandlers.command(name='backport', func=_backport)\n"
  },
  {
    "path": "restarter/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nexports_files([\n    \"hot-restarter.py\",\n])\n"
  },
  {
    "path": "restarter/hot-restarter.py",
    "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport signal\nimport sys\nimport time\n\n# The number of seconds to wait for children to gracefully exit after\n# propagating SIGTERM before force killing children.\n# NOTE: If using a shutdown mechanism such as runit's `force-stop` which sends\n# a KILL after a specified timeout period, it's important to ensure that this\n# constant is smaller than the KILL timeout\nTERM_WAIT_SECONDS = 30\n\nrestart_epoch = 0\npid_list = []\n\n\ndef term_all_children():\n  \"\"\" Iterate through all known child processes, send a TERM signal to each of\n  them, and then wait up to TERM_WAIT_SECONDS for them to exit gracefully,\n  exiting early if all children go away. If one or more children have not\n  exited after TERM_WAIT_SECONDS, they will be forcibly killed \"\"\"\n\n  # First uninstall the SIGCHLD handler so that we don't get called again.\n  signal.signal(signal.SIGCHLD, signal.SIG_DFL)\n\n  global pid_list\n  for pid in pid_list:\n    print(\"sending TERM to PID={}\".format(pid))\n    try:\n      os.kill(pid, signal.SIGTERM)\n    except OSError:\n      print(\"error sending TERM to PID={} continuing\".format(pid))\n\n  all_exited = False\n\n  # wait for TERM_WAIT_SECONDS seconds for children to exit cleanly\n  retries = 0\n  while not all_exited and retries < TERM_WAIT_SECONDS:\n    for pid in list(pid_list):\n      ret_pid, exit_status = os.waitpid(pid, os.WNOHANG)\n      if ret_pid == 0 and exit_status == 0:\n        # the child is still running\n        continue\n\n      pid_list.remove(pid)\n\n    if len(pid_list) == 0:\n      all_exited = True\n    else:\n      retries += 1\n      time.sleep(1)\n\n  if all_exited:\n    print(\"all children exited cleanly\")\n  else:\n    for pid in pid_list:\n      print(\"child PID={} did not exit cleanly, killing\".format(pid))\n    force_kill_all_children()\n    sys.exit(1)  # error status because a child did not exit cleanly\n\n\ndef force_kill_all_children():\n  \"\"\" Iterate through all known child processes and force kill them. Typically\n  term_all_children() should be attempted first to give child processes an\n  opportunity to clean up state before exiting \"\"\"\n\n  global pid_list\n  for pid in pid_list:\n    print(\"force killing PID={}\".format(pid))\n    try:\n      os.kill(pid, signal.SIGKILL)\n    except OSError:\n      print(\"error force killing PID={} continuing\".format(pid))\n\n  pid_list = []\n\n\ndef shutdown():\n  \"\"\" Attempt to gracefully shutdown all child Envoy processes and then exit.\n  See term_all_children() for further discussion. \"\"\"\n  term_all_children()\n  sys.exit(0)\n\n\ndef sigterm_handler(signum, frame):\n  \"\"\" Handler for SIGTERM. \"\"\"\n  print(\"got SIGTERM\")\n  shutdown()\n\n\ndef sigint_handler(signum, frame):\n  \"\"\" Handler for SIGINT (ctrl-c). The same as the SIGTERM handler. \"\"\"\n  print(\"got SIGINT\")\n  shutdown()\n\n\ndef sighup_handler(signum, frame):\n  \"\"\" Handler for SIGHUP. This signal is used to cause the restarter to fork and exec a new\n      child. \"\"\"\n\n  print(\"got SIGHUP\")\n  fork_and_exec()\n\n\ndef sigusr1_handler(signum, frame):\n  \"\"\" Handler for SIGUSR1. Propagate SIGUSR1 to all of the child processes \"\"\"\n\n  global pid_list\n  for pid in pid_list:\n    print(\"sending SIGUSR1 to PID={}\".format(pid))\n    try:\n      os.kill(pid, signal.SIGUSR1)\n    except OSError:\n      print(\"error in SIGUSR1 to PID={} continuing\".format(pid))\n\n\ndef sigchld_handler(signum, frame):\n  \"\"\" Handler for SIGCHLD. Iterates through all of our known child processes and figures out whether\n      the signal/exit was expected or not. Python doesn't have any of the native signal handlers\n      ability to get the child process info directly from the signal handler so we need to iterate\n      through all child processes and see what happened.\"\"\"\n\n  print(\"got SIGCHLD\")\n\n  kill_all_and_exit = False\n  global pid_list\n  pid_list_copy = list(pid_list)\n  for pid in pid_list_copy:\n    ret_pid, exit_status = os.waitpid(pid, os.WNOHANG)\n    if ret_pid == 0 and exit_status == 0:\n      # This child is still running.\n      continue\n\n    pid_list.remove(pid)\n\n    # Now we see how the child exited.\n    if os.WIFEXITED(exit_status):\n      exit_code = os.WEXITSTATUS(exit_status)\n      print(\"PID={} exited with code={}\".format(ret_pid, exit_code))\n      if exit_code == 0:\n        # Normal exit. We assume this was on purpose.\n        pass\n      else:\n        # Something bad happened. We need to tear everything down so that whoever started the\n        # restarter can know about this situation and restart the whole thing.\n        kill_all_and_exit = True\n    elif os.WIFSIGNALED(exit_status):\n      print(\"PID={} was killed with signal={}\".format(ret_pid, os.WTERMSIG(exit_status)))\n      kill_all_and_exit = True\n    else:\n      kill_all_and_exit = True\n\n  if kill_all_and_exit:\n    print(\"Due to abnormal exit, force killing all child processes and exiting\")\n\n    # First uninstall the SIGCHLD handler so that we don't get called again.\n    signal.signal(signal.SIGCHLD, signal.SIG_DFL)\n\n    force_kill_all_children()\n\n  # Our last child died, so we have no purpose. Exit.\n  if not pid_list:\n    print(\"exiting due to lack of child processes\")\n    sys.exit(1 if kill_all_and_exit else 0)\n\n\ndef fork_and_exec():\n  \"\"\" This routine forks and execs a new child process and keeps track of its PID. Before we fork,\n      set the current restart epoch in an env variable that processes can read if they care. \"\"\"\n\n  global restart_epoch\n  os.environ['RESTART_EPOCH'] = str(restart_epoch)\n  print(\"forking and execing new child process at epoch {}\".format(restart_epoch))\n  restart_epoch += 1\n\n  child_pid = os.fork()\n  if child_pid == 0:\n    # Child process\n    os.execl(sys.argv[1], sys.argv[1])\n  else:\n    # Parent process\n    print(\"forked new child process with PID={}\".format(child_pid))\n    pid_list.append(child_pid)\n\n\ndef main():\n  \"\"\" Script main. This script is designed so that a process watcher like runit or monit can watch\n      this process and take corrective action if it ever goes away. \"\"\"\n\n  print(\"starting hot-restarter with target: {}\".format(sys.argv[1]))\n\n  signal.signal(signal.SIGTERM, sigterm_handler)\n  signal.signal(signal.SIGINT, sigint_handler)\n  signal.signal(signal.SIGHUP, sighup_handler)\n  signal.signal(signal.SIGCHLD, sigchld_handler)\n  signal.signal(signal.SIGUSR1, sigusr1_handler)\n\n  # Start the first child process and then go into an endless loop since everything else happens via\n  # signals.\n  fork_and_exec()\n  while True:\n    time.sleep(60)\n\n\nif __name__ == '__main__':\n  main()\n"
  },
  {
    "path": "security/email-templates.md",
    "content": "# Envoy Security Process Email Templates\n\nThis is a collection of email templates to handle various situations the security team encounters.\n\n## Upcoming security release to envoy-security-announce@googlegroups.com\n\n```\nSubject: Upcoming security release of Envoy $VERSION\nTo: envoy-security-announce@googlegroups.com\nCc: envoy-announce@googlegroups.com, envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com\n\nHello Envoy Community,\n\nThe Envoy security team would like to announce the forthcoming release of Envoy\n$VERSION.\n\nThis release will be made available on the $ORDINALDAY of $MONTH $YEAR at\n$PDTHOUR PDT ($GMTHOUR GMT). This release will fix $NUMDEFECTS security\ndefect(s). The highest rated security defect is considered $SEVERITY severity.\n\nNo further details or patches will be made available in advance of the release.\n\nThanks,\n$PERSON (on behalf of the Envoy security team and maintainers)\n```\n\n## Upcoming security release to cncf-envoy-distributors-announce@lists.cncf.io\n\n```\nSubject: [CONFIDENTIAL] Upcoming security release of Envoy $VERSION\nTo: cncf-envoy-distributors-announce@lists.cncf.io\nCc: envoy-security@googlegroups.com\n\nHello Envoy Distributors,\n\nThe Envoy security team would like to provide advanced notice to the Envoy\nPrivate Distributors List of some details on the pending Envoy $VERSION\nsecurity release, following the process described at\nhttps://github.com/envoyproxy/envoy/blob/master/SECURITY.md.\n\nThis release will be made available on the $ORDINALDAY of $MONTH $YEAR at\n$PDTHOUR PDT ($GMTHOUR GMT). This release will fix $NUMDEFECTS security\ndefect(s). The highest rated security defect is considered $SEVERITY severity.\n\nBelow we provide details of these vulnerabilities under our embargo policy\n(https://github.com/envoyproxy/envoy/blob/master/SECURITY.md#embargo-policy).\nThis information should be treated as confidential until public release by the\nEnvoy maintainers on the Envoy GitHub.\n\nWe will address the following CVE(s):\n\n* CVE-YEAR-ABCDEF (CVSS score $CVSS, $SEVERITY): $CVESUMMARY\n  - Link to the appropriate section of the CVE writeup document with gh-cve-template.md content.\n...\n\nWe intend to make candidates release patches available under embargo on the\n$ORDINALDAY of $MONTH $YEAR, which you may use for testing and preparing your\ndistributions.\n\nPlease direct further communication amongst private distributors to this list\nor to envoy-security@googlegroups.com for direct communication with the Envoy\nsecurity team.\n\nThanks,\n$PERSON (on behalf of the Envoy security team and maintainers)\n```\n\n## Candidate release patches to cncf-envoy-distributors-announce@lists.cncf.io\n\n```\nSubject: [CONFIDENTIAL] Further details on security release of Envoy $VERSION\nTo: cncf-envoy-distributors-announce@lists.cncf.io\nCc: envoy-security@googlegroups.com\n\nHello Envoy Distributors,\n\nPlease find attached candidate patches for CVE-YEAR-ABCDEF. You may use the\nattached patches for testing and preparing your distributions. The patches can\nbe applied with \"git am\".\n\nPatches starting with \"$VERSION\" should be applied against the $OLDVERSION release.\n\nPatches starting with \"master-\" should be applied against commit $COMMIT.\n\nAs a reminder, these patches are under embargo until $ORDINALDAY of $MONTH $YEAR\nat $PDTHOUR PDT ($GMTHOUR GMT). The information below should be treated as\nconfidential and shared only on a need-to-know basis. The rules outline in our\nembargo policy\n(https://github.com/envoyproxy/envoy/blob/master/SECURITY.md#embargo-policy)\nstill apply, and it is extremely important that any communication related to\nthese CVEs are not forwarded further.\n\nNo fixes should be made publicly available, either in binary or source form,\nbefore the aforementioned disclosure date.\n\nWe would appreciate any feedback on these patches. Please direct further\ncommunication amongst private distributors to this list or to\nenvoy-security@googlegroups.com for direct communication with the Envoy\nsecurity team.\n\nThanks,\n$PERSON (on behalf of the Envoy security team and maintainers)\n```\n\n## Security Fix Announcement\n\n```\nSubject: Security release of Envoy $VERSION is now available\nTo: envoy-security-announce@googlegroups.com\nCc: envoy-announce@googlegroups.com, envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com\n\nHello Envoy Community,\n\nThe Envoy security team would like to announce the availability of Envoy $VERSION.\nThis addresses the following CVE(s):\n\n* CVE-YEAR-ABCDEF (CVSS score $CVSS): $CVESUMMARY\n...\n\nUpgrading to $VERSION is encouraged to fix these issues.\n\nGitHub tag: https://github.com/envoyproxy/envoy/releases/tag/v$VERSION\nDocker images: https://hub.docker.com/r/envoyproxy/envoy/tags\nRelease notes: https://www.envoyproxy.io/docs/envoy/v$VERSION/version_history/current.rst\nDocs: https://www.envoyproxy.io/docs/envoy/v$VERSION/\n\n**Am I vulnerable?**\n\nRun `envoy --version` and if it indicates a base version of $OLDVERSION or\nolder you are running a vulnerable version.\n\n<!-- Provide details on features, extensions, configuration that make it likely that a system is\nvulnerable in practice. -->\n\n**How do I mitigate the vulnerability?**\n\n<!--\n[This is an optional section. Remove if there are no mitigations.]\n-->\n\nAvoid the use of feature XYZ in Envoy configuration.\n\n**How do I upgrade?**\n\nUpdate to $VERSION via your Envoy distribution or rebuild from the Envoy GitHub\nsource at the $VERSION tag or HEAD @ master.\n\n**Vulnerability Details**\n\n<!--\n[For each CVE]\n-->\n\n***CVE-YEAR-ABCDEF***\n\n$CVESUMMARY\n\nThis issue is filed as $CVE. We have rated it as [$CVSSSTRING]($CVSSURL)\n($CVSS, $SEVERITY) [See the GitHub issue for more details]($GITHUBISSUEURL)\n\n**Thank you**\n\nThank you to $REPORTER, $DEVELOPERS, and the $RELEASEMANAGERS for the\ncoordination in making this release.\n\nThanks,\n\n$PERSON (on behalf of the Envoy security team and maintainers)\n```\n\n## Security Fix of Main Branch Announcement\n\n```\nSubject: Security fix of Envoy main branch (that includes $GITSHORTCOMMITHASH) is now available\nTo: envoy-security-announce@googlegroups.com\nCc: envoy-announce@googlegroups.com, envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com\n\nHello Envoy Community,\n\nThe Envoy security team would like to announce the availability of the fix for security defect(s)\nintroduced in the main branch by [$GITSHORTCOMMITHASH]($GITHUBCOMMITURL) commit. The defect(s)\ncaused by the [$GITSHORTCOMMITHASH]($GITHUBCOMMITURL) were not part of any Envoy stable releases.\n\n$DEFECTSSUMMARY\n\n<!-- Provide details on features, extensions, configuration that make it likely that a system is\nvulnerable in practice. -->\n\nThe CVSS score for this is [$CVSSSTRING]($CVSSURL).\n\nIncluding the [$FIXGITSHORTCOMMITHASH]($FIXGITHUBCOMMITURL) commit is encouraged to fix this issue.\n\n**Security fix timeline**\n\n1. The defect(s) introduced in [$GITSHORTCOMMITHASH]($GITHUBCOMMITURL) were landed in the main\n   branch on $ORDINALDAY of $MONTH $YEAR at $PDTHOUR PDT ($GMTHOUR GMT).\n2. The fix [$FIXGITSHORTCOMMITHASH]($FIXGITHUBCOMMITURL) was merged into the main branch on\n   $ORDINALDAY of $MONTH $YEAR at $PDTHOUR PDT ($GMTHOUR GMT).\n\n**Thank you**\n\nThank you to $REPORTER, $DEVELOPERS, and the $RELEASEMANAGERS for the coordination in making this\nrelease.\n\nThanks,\n\n$PERSON (on behalf of the Envoy security team and maintainers)\n```\n"
  },
  {
    "path": "security/gh-cve-template.md",
    "content": ">This template is for public disclosure of CVE details on Envoy's GitHub. It should be filed\nwith the public release of a security patch version, and will be linked to in the announcement sent\nto envoy-security-announce@googlegroups.com. The title of this issue should be the CVE identifier\nand it should have the `security` label applied.\n\n# CVE-YEAR-ABCDEF\n\n## Brief description\n\n>Brief description used when filing CVE.\n\n## CVSS\n\n>[$CVSSSTRING]($CVSSURL)($CVSSSCORE, $SEVERITY)\n\n## Affected version(s)\n\n>Envoy x.y.z and before.\n\n## Affected component(s)\n\n>List affected internal components and features.\n\n## Attack vector(s)\n\n>How would an attacker use this?\n\n## Discover(s)/Credits\n\n>Individual and optional organization.\n\n## Example exploit or proof-of-concept\n\n>If there is proof-of-concept or example, provide a concrete example.\n\n## Details\n\n>Deep dive into the defect. This should be detailed enough to maintain a record for posterity while\nbeing clear and concise.\n\n## Mitigations\n\n>Are there configuration or CLI options that can be used to mitigate?\n\n## Detection\n\n>How can exploitation of this bug be detected in existing and future Envoy versions? E.g. access logs.\n\n## References\n\n* CVE: $CVEURL\n>Any other public information.\n"
  },
  {
    "path": "security/postmortem-template.md",
    "content": "> Slimmed down template from: Betsy Beyer, Chris Jones, Jennifer Petoff, and Niall Richard\n> Murphy. [“Site Reliability\n> Engineering.”](https://landing.google.com/sre/book/chapters/postmortem.html),\n> modified from\n> https://raw.githubusercontent.com/dastergon/postmortem-templates/master/templates/postmortem-template-srebook.md.\n\n> Follow the SRE link for examples of how to populate.\n\n> A PR should be opened with  postmortem placed in security/postmortems/cve-year-abcdef.md. If there\n> are multiple CVEs in the postmortem, populate each alias with the string \"See cve-year-abcdef.md\".\n\n# Security postmortem for CVE-YEAR-ABCDEF, CVE-YEAR-ABCDEG\n\n## Incident date(s)\n\n> YYYY-MM-DD (as a date range if over a period of time)\n\n## Authors\n\n> @foo, @bar, ...\n\n## Status\n\n> Draft | Final\n\n## Summary\n\n> A few sentence summary.\n\n## CVE issue(s)\n\n> https://github.com/envoyproxy/envoy/issues/${CVE_ISSUED_ID}\n\n## Root Causes\n\n> What defect in Envoy led to the CVEs? How did this defect arise?\n\n## Resolution\n\n> How was the security release process followed? How were the fix patches\n> structured and authored?\n\n## Detection\n\n> How was this discovered? Reported by XYZ, found by fuzzing? Private or public\n> disclosure?\n\n## Action Items\n\n> Create action item issues and include in their body \"Action item for\n> CVE-YEAR-ABCDEF\". Modify the search string below to include in the PR:\n\nhttps://github.com/envoyproxy/envoy/issues?utf8=%E2%9C%93&q=is%3Aissue+%22Action+item+for+CVE-YEAR-ABCDEF%22\n\n## Lessons Learned\n\n### What went well\n\n### What went wrong\n\n### Where we got lucky\n\n## Timeline\n\nAll times US/Pacific\n\nYYYY-MM-DD\n* HH:MM Cake was made available\n* HH:MM People ate the cake\n\nYYYY-MM-DD\n* HH:MM More cake was available\n* HH:MM People ate more cake\n\n## Supporting information\n"
  },
  {
    "path": "security/postmortems/cve-2019-15225.md",
    "content": "# Security postmortem for CVE-2019-15225, CVE-2019-15226\n\n## Incident date(s)\n\n2019-07-25 - 2019-10-10\n\n## Authors\n\n@asraa\n\n## Status\n\nFinal\n\n## Summary\n\nAfter an Envoy user publicly reported a crash in Envoy about regular expression matching in route\nresolution (https://github.com/envoyproxy/envoy/issues/7728), the Envoy security team found that\nissue could be leveraged for a DoS attack and would go through the public security release\nprocess. The fix landed in master with a public PR (https://github.com/envoyproxy/envoy/pull/7878)\nand was targeted to be included in a 1.11.2 security release.\n\nCVE-2019-15226 was detected via fuzzers just after the 1.11.1 security release. With the fix of\nCVE-2019-15225 in progress, the Envoy security team decided to lump the two fixes into a 1.11.2\nsecurity release. This was the first time in which the Envoy security release included a publicly\ndisclosed vulnerability with a fix that was merged into master. The security release included a\nbackported patch of the fix as well as the patches for CVE-2019-15226.\n\n## CVE issue(s)\n\n* https://github.com/envoyproxy/envoy/issues/8519\n* https://github.com/envoyproxy/envoy/issues/8520\n\n## Root Causes\n\nCVE-2019-15225 was caused by the use of a recursive algorithm for matching regular\nexpressions. Envoy’s HTTP router can be configured with regular expressions for routing incoming\nHTTP requests that matched header values. Envoy used the libstdc++ `std::regex` implementation for\nthese regular expressions. As a result, an HTTP request with sufficiently large header values may\nconsume large amounts of stack memory and cause abnormal process termination. Regular expressions\nwith the `*` or `+` quantifiers are particularly vulnerable and may cause abnormal process\ntermination. This appeared when matching header values of 16Kb or more.\n\nCVE-2019-15226 resulted from excessive iteration of the `HeaderMap` from a time-consuming header\nsize validation that occurred for each header added. Both codec libraries http_parser and nghttp2\nhave internal limits for the maximum request header size. Envoy’s HTTP/2 codec originally checked\nagainst a hard-coded max header size of 63K, which was just under the default max headers length in\nnghttp2. The check occurred every time a header was added, resulting in O(n^2) performance. Work on\nmaking this limit configurable (https://github.com/envoyproxy/envoy/issues/5626) also introduced the\nissue in Envoy’s HTTP/1 codec, where the check was added per header field mimicking the same\nproblematic pattern as the original HTTP/2 codec.\n\n## Resolution\n\nTo resolve the memory consumption caused by excessive memory consumption from regex matching, Envoy\n1.11.2 deprecates the use of `std::regex` in user facing paths. A new safe regex matcher introduces\nan explicitly configurable regex engine. Currently, the regex engine is limited to Google’s RE2\nregex engine that implements a safe subset of the std::regex language features. The existing regex\nengine is in a deprecation period to allow users to switch to safe regex engines.\n\nGoogle’s RE2 regex engine is designed to complete execution in linear time\n(https://github.com/google/re2/wiki/WhyRE2) and limit the amount of memory used. Envoy 1.11.2 also\nincludes an option to configure a “program size” when using Google RE2, a rough estimate of how\ncomplex a compiled regex is to evaluate. A regex that has a program size greater than this value\nwill fail to compile.\n\nCVE-2019-15226 was first noticed via fuzzers when a timeout was reported by\n`h1_capture_direct_fuzz_test`: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16325 on\n08-09-2019. Once a reproducer was made in an Envoy deployment to confirm the issue, and some\nprofiling work was done by the Envoy security team, we moved to a private fix process targeting the\n1.11.2 release along with CVE-2019-15225. Other calls to `byteSize()` and iterations over\n`HeaderMap` were used were also analyzed for potential DoS vulnerabilities and performance issues.\n\nThe fix re-implemented the `HeaderMapImpl::byteSize()` method to have O(1) performance by returning\na `cached_byte_size_` member to `HeaderMapImpl` that was updated as header entries are added, rather\nthan iterate over the `HeaderMap` to calculate the byte size. To resolve excessive iterations over\nthe `HeaderMap` that can appear in access logging with many header formatters and many headers, the\nfix also included configurable limits for the maximum number of headers.\n\nThe following patches were produced:\n* https://github.com/envoyproxy/envoy/commit/afc39bea36fd436e54262f150c009e8d72db5014\n* https://github.com/envoyproxy/envoy/commit/5c122a35ebd7d3f7678b0f1c9846c1e282bba079\n\nA 1.11.2 security release was announced on 09-18-2019. An e-mail was sent to the Envoy private\ndistributor list sharing the details of CVE-2019-15226. A week later, the candidate fix patches for\nCVE-2019-15226 were shared with distributors on 2019-09-24. This provided two weeks for distributors\nto test and prepare their software for the security release date, as per the guidelines set in place\nafter security release 1.9.1.\n\n## Detection\n\nCVE-2019-15225 was reported by Seikun Kambashi in a public GitHub Issue describing a crash caused by\na request with a very large URI for routes configured with a regex matcher:\nhttps://github.com/envoyproxy/envoy/issues/7728.\n\nEnvoy’s `route_fuzz_test`, which fuzzes route resolution and header finalization, ideally should\nhave caught this crash. The test takes a `RouteConfiguration` and a set of headers as inputs, and\nroutes a request with the input headers with the `RouteConfiguration` given. It should have been\nfairly easy for the fuzzers to produce a wildcard matcher and a long header string. However, the\nfuzz test itself had a logical error that resulted in ignoring input path headers and setting them\nto a default value of “/”. As a result, the fuzz test would never have tested a large URI and an OOM\nor crash would never have been detected. The fuzz test was fixed in\nhttps://github.com/envoyproxy/envoy/pull/8653, and a reproducer for the CVE was added.\n\nThe underlying issue behind CVE-2019-15226 was first noticed via fuzzers when a timeout was reported\nby `h1_capture_direct_fuzz_test`: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16325. Some\nprofiling work revealed that `HeaderMapImpl::byteSize()`, which is O(n) in the number of headers, is\ncalled for every single header in both HTTP/1.1 and HTTP/2 codecs. Although Envoy’s stateless HTTP/2\nheader fuzzers (`request_header_fuzz_test` and `response_header_fuzz_test`) perform 10x more\nexecutions per second than this fuzzer, these tested one header frame per testcase and used\nnghttp2’s default max header frame size (16 KB). Because of this, the frame size was too small to\namplify the effect of the O(n^2) process enough to produce a timeout.\n\n## Action Items\n\n* https://github.com/envoyproxy/envoy/issues/8567\n\n* https://github.com/envoyproxy/envoy/issues/8875\n\n* https://github.com/envoyproxy/envoy/issues/8898\n\n* https://github.com/envoyproxy/envoy/issues/8901\n\n## Lessons Learned\n\n### What went well\n\n* CVE-2019-15226 was detected quickly after the fuzzer reported the timeout.\n\n* The fixes for CVE-2019-15226 were straightforward and localized.\n\n* The security release occurred on time and followed the guidelines established in\n  https://github.com/envoyproxy/envoy/blob/master/SECURITY.md\n\n### What went wrong\n\n* It took nearly a week to set up a branch for fix patches. This was due to some confusion over\n  whether to use the new GitHub Security advisories, which didn’t support the required permission\n  model and CI integrations. In the process, the envoy-setec branch was temporarily made readable to\n  all Envoy contributors.\n\n* While resolving the above permission issue, we hit an issue with Github permissions on envoyproxy:\n  people could no longer assign issues to members in the Envoy repository. This was fixed with some\n  restructuring of GitHub team’s to support the limited GitHub IAM model.\n\n* It was possible to push to envoy-setec branches by fix team, e.g. the 1.11.2 could be directly\n  pushed to (master as well). We need branch protection to ensure that CI gates merges; this will\n  provide confidence that the staged release branches are likely to work on the main Envoy\n  repository.\n\n* We had manual patch sets the day before release, but no envoy-setec branches reflecting them\n  passing end-to-end. We should not consider a release ready to go until it passes a full CI pass.\n\n* It wasn’t possible to get a full CI pass due to docs/image/etc push issues. We should have a set\n  of presubmits that provide a simple yes/no in the GH UX.\n\n* Our route resolution fuzzer would not have picked up the regex vulnerability due to a logical\n  error in the fuzzer.\n\n* Our more efficient request and response fuzzers would not have picked up this vulnerability\n  earlier. They only fuzz a single HEADER frame, and the maximum frame size for HTTP/2 is by default\n  16 KB.\n\n* From a distributor: “We didn't realize about safe_regex until the note this morning. So we're\n  patching ... to switch to safe_regex -- would it be possible in future notes to distributors to\n  note if usage changes are required?”\n\n* We coupled the CVE-2019-15225 and CVE-2019-15226 releases. This made sense initially, due to\n  release overhead, but as the release date for the header map fixes was extended, it meant that a\n  somewhat known vulnerability was fixed on master but not on any released version of Envoy.\n\n\n### Where we got lucky\n\n* Release branches (master and v1.12.2) passed had only minor CI failures\n  (bazel.compile_time_options) despite no complete pass of either assembled branch on private fix\n  branch\n\n## Timeline\n\nAll times US/Pacific\n\n2019-07-25:\n* [CVE-2019-15225] https://github.com/envoyproxy/envoy/issues/7728 was opened reporting crashes from\n  route regex matches with very long request URIs\n\n2019-08-09:\n* [CVE-2019-15226] ClusterFuzz reports https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16325\n  under embargo.\n\n2019-08-13:\n* [CVE-2019-15226] Email thread to envoy-security@googlegroups.com regarding the HeaderMap\n  DoS. Analysis began to determine similar O(n^2) performance in code that uses HeaderMap.\n\n2019-08-19:\n* CVE ID Request for CVE-2019-15225 and CVE-2019-15226\n\n2019-08-20:\n* Branch permissions / CI permissions for envoy-setec branch set up.\n\n2019-08-21:\n* [CVE-2019-15226] Draft fix PRs for CVE-2019-15226 were shared on private Envoy security\n  repository. Reviews and further development occurred over the next three weeks.\n\n2019-08-23:\n* [CVE-2019-15225] Fix for CVE-2019-15225 is opened at https://github.com/envoyproxy/envoy/pull/7878\n\n2019-09-18:\n* [CVE-2019-15226] CVE summary details shared with cncf-envoy-distributors-announce@lists.cncf.io.\n\n2019-09-19:\n* [CVE-2019-15226] Vulnerability exists in all Envoy distributions for HTTP/2 traffic, CVE updated\n\n2019-09-24:\n* [CVE-2019-15226] Candidate fix patches was shared with\n  cncf-envoy-distributors-announce@lists.cncf.io.\n\n2019-10-07:\n* [CVE-2019-15226] Patch sets assembled based on previous reviewed work as manual patches.\n\n2019-10-08:\n* [CVE-2019-15226] Some last minute patch fixup to have staged branches pass on CI. Patches\n  scheduled for public release.\n* 11:20 AM v1.11.2 pushed\n* [CVE-2019-15226] CVE updated on publication\n\n2019-10-10\n* [CVE-2019-15226] Filed follow-up GitHub issue https://github.com/envoyproxy/envoy/issues/8567\n"
  },
  {
    "path": "security/postmortems/cve-2019-15226.md",
    "content": "See [cve-2019-15225.md](cve-2019-15225.md)\n"
  },
  {
    "path": "security/postmortems/cve-2019-9900.md",
    "content": "# Security postmortem for CVE-2019-9900, CVE-2019-9901\n\n## Incident date(s)\n\n2019-02-18 - 2019-04-05\n\n## Authors\n\n@htuch\n\n## Status\n\nFinal\n\n## Summary\n\nTwo independent vulnerabilities related to a mismatch between the information used for request\nmatching and routing were discovered in February/March 2019, leading to the potential ability for an\nattacker to bypass access control checks and route table intent. Since these issues had similar\nattack vectors and were discovered within the same embargo window, the issues were grouped and\nresolved (mostly) privately by Envoy and Istio fix teams, resulting in the Envoy 1.9.1 security\nrelease issued on 2019-04-05.\n\nThis was the first time in which the Envoy security release process was followed and provided a\nlearning opportunity to refine the process, originally borrowed from the Kubernetes project, to\nEnvoy's requirements. While the issues were upgraded from medium to high criticality during the fix\nprocess, they were limited in impact to a subset of users and specific configuration patterns. This\npostmortem captures the issues encountered during the fix process and provides actionable next\nsteps.\n\n## CVE issue(s)\n\n* https://github.com/envoyproxy/envoy/issues/6434\n* https://github.com/envoyproxy/envoy/issues/6435\n\n## Root Causes\n\nCVE-2019-9900 resulted from Envoy assuming that its codec libraries (http-parser, nghttp2) followed\nRFC 7230 and would reject any header value with an embedded NUL character. Unfortunately,\nhttp-parser did not do this due to an optimization in header value processing\n(https://github.com/nodejs/http-parser/issues/468, https://github.com/nodejs/http-parser/pull/469).\nIn addition, Envoy viewed header strings with a mixture of `c_str()` and `string_view`, allowing the\npossibility of inconsistent views between checks and resulting action. A combination of a buggy\nexternal dependency and problematic use of C string views led to this vulnerability.\n\nCVE-2019-9901 resulted from two distinct views of the role of a proxy in path handling. On the one\nhand, Envoy was considered a data forwarding engine for HTTP requests that did not need to perform\npath normalization, with this concern left to client and backend. However, at the same time, Envoy\nwas being used in applications where it intermediated on requests for access control purposes (e.g.\nRBAC, `ext_authz`) and performed path matching against policy. Especially in the presence of a\nbackend that itself normalizes, this access control role required that path normalization be applied\nin the proxy.\n\n## Resolution\n\nCVE-2019-9900 was reported by Envoy maintainer @htuch on 2019-03-10 to\nenvoy-security@googlegroups.com. After some discussion, it was agreed that this warranted invoking\nthe security release process. The issue was mitigated in the Envoy private security repository by\n@htuch and the Envoy security fix team. A single patch\n(https://github.com/envoyproxy/envoy/commit/b155af75fad7861e941b5939dc001abf581c9203) was required\nto workaround the http-parser behavior. In addition, both tests and fuzzers were\ncreated to validate the behavior when NULs were introduced anywhere in an HTTP/1 or HTTP/2 request\n(https://github.com/envoyproxy/envoy/commit/1e61a3f95f2c4d9ac1e54feae8693cee7906e2eb). Manual code\ninspection was also performed in nghttp2 to verify the absence of vulnerability. While doing so, a\nnon-security related bug was discovered (https://github.com/nghttp2/nghttp2/issues/1331).\n\nShortly after discovery of CVE-2019-9900, the http-parser issue was reported to the Node.js security\nworking group at security@nodejs.org, since http-parser lives under the umbrella of the Node.js\nproject. The full vulnerability was described and the Envoy security team proposed working with\nNode.js PST. As there was no reply, we proceeded independently. Unfortunately, it appears that the\nNode.js security WG never received the e-mail, due to the reliance of Node.js on HackerOne to gate\nincoming issues and a problematic e-mail forwarding chain\n(https://github.com/nodejs/security-wg/issues/454#issuecomment-481919759). We have since filed a\nHackerOne issue with the original report e-mail.\n\nCVE-2019-9901 was privately disclosed to the Istio security team by an external researcher on\n2019-02-18 and accidentally publicly disclosed in part in\nhttps://github.com/envoyproxy/envoy/issues/6008 on 2019-03-13. Once the severity of this was\nrealized via offline discussion between the Envoy security team and the PR authors, we moved to a\nprivate fix process in conjunction with CVE-2019-9900, targeting the 1.9.1 release. The Google Istio\nsecurity and networking teams led the efforts to fix this vulnerability in Envoy's private security\nrepository. The workaround implementation of path normalization borrowed from Chromium's URL\nlibrary, adapted and minified for the Envoy context. The follow patches were\nproduced:\n* https://github.com/envoyproxy/envoy/commit/c22cfd2c483fc26534382a0b6835f45264bb137a\n* https://github.com/envoyproxy/envoy/commit/7ed6d2187df94c4cb96f7dccb8643bf764af2ccb\n\nIn both cases, the Envoy security team considered the issues of medium criticality (CVSS 6.5)\ninitially, since it was thought that the attack complexity was high, requiring special circumstances\nto apply. As we continued discussion with Istio and Google teams, it became apparent that the\nexploits were trivial to automate and we upgraded to high criticality (CVSS 8.3), due to the lower\nattack complexity.\n\nA 1.9.1 security release was initially targeted for 2019-04-02 and announced on 2019-03-22. An\ne-mail was sent to the Envoy private distributor list sharing CVE details.\n\nAfter private discussions with a distributor on 2019-03-28, who expressed concern over the very\nshort (3 working day) distance between fix patch availability and release, the Envoy security team\ndecided to delay the 1.9.1 release until 2019-04-05. This provided 1 week for distributors to\nprepare their software for the security release date.\n\nFix patches were shared with the private distributor list late on 2019-03-28.\n\nDuring the fix process, two distributors reached out to us to request the ability to stage in\npublicly accessible locations binary images with the fixes applied. While technically this would\nviolate embargo, we decided to allow this due to a lack of a clear alternative; Envoy's sidecar\nuse cases and reliance on Docker for distribution, where images are generally staged on public hubs,\ndid not lend itself to opaque rollout.\n\n## Detection\n\nThe underlying issue behind CVE-2019-9900 was first noticed via fuzzers when an explicit `ASSERT`\ncheck for embedded NULs was added in #6170. The following issue was tripped by\n`h1_capture_fuzz_test`:\nhttps://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13613. Some experiments with `netcat`, `tcpdump`\nand an Envoy binary demonstrated that it was viable to at least bypass header suffix matches via\nthis mechanism.\n\nCVE-2019-9901 was reported by an external researcher (Erlend Oftedal) to the Istio security team.\n\n## Action Items\n\n* https://github.com/envoyproxy/envoy/issues?utf8=%E2%9C%93&q=is%3Aissue+%22Action+item+for+CVE-2019-9900%22+\n* https://github.com/envoyproxy/envoy/issues?utf8=%E2%9C%93&q=is%3Aissue+%22Action+item+for+CVE-2019-9901%22+\n\n## Lessons Learned\n\n### What went well\n\n* Fix patches were available within 2 weeks of vulnerability disclosure. The\n  changes were localized and relatively clean.\n\n### What went wrong\n\n* The Envoy private distributor list was initially almost empty. We sent out an\n  e-mail to remind distributors to sign up on 2019-03-14 and the list is now O(10).\n\n* The security impact of https://github.com/envoyproxy/envoy/issues/6008\n  was not caught by Envoy until this was brought to our attention ~20 days after\n  the issue was first pushed. Ideally such issues should be routed to\n  envoy-security@googlegroups.com first in the future and Envoy\n  reviewers/maintainers should keep an eye out for inadvertent security\n  disclosures through public channels. In addition, an earlier issue\n  https://github.com/envoyproxy/envoy/issues/2956 was opened a year previous, but was not tagged as\n  being security sensitive.\n\n* Applicants for the private distributor list were turned down based on\n  membership criteria that was adopted from k8s. This is now being revisited in\n  https://github.com/envoyproxy/envoy/issues/6586.\n\n* Distributors were only provided 3 days from candidate fix patch availability\n  until public release at first. While this was extended to 1 week, even this\n  might be too little. This is now being codified in\n  https://github.com/envoyproxy/envoy/issues/6587.\n\n* The Chromium URL library was forked, minified and adapted to Envoy. This was\n  expedient but not a maintainable long term solution, see\n  https://github.com/envoyproxy/envoy/issues/6588.\n\n* Only coarse grained control over path normalization was provided, since this\n  was expedient and mitigated the vulnerability. We should provide finer grained\n  controls, see https://github.com/envoyproxy/envoy/issues/6589.\n\n* Our report to the Node.js security working group was lost due to\n  https://github.com/nodejs/security-wg/issues/454#issuecomment-481919759.\n  We should avoid this happening Envoy-side, see https://github.com/envoyproxy/envoy/issues/6590.\n  More generally, we should err on the side of reaching out over more channels\n  in the future, since it's unclear how effective any given disclosure channel\n  is.\n\n* The security release day (2019-04-05) was Friday PDT. We should pick a\n  globally friendly day-of-week, e.g. Tue-Thu, for security releases.\n\n* Nginx already had a CVE for path normalization\n  (https://www.rapid7.com/db/vulnerabilities/nginx-cve-2009-3898) similar to\n  CVE-2019-9901, but we did not know this until after the fact. We should audit\n  CVEs for similar class software, see\n  https://github.com/envoyproxy/envoy/issues/6592.\n\n* A distributor reached out to the security team for permission to perform\n  silent binary rollouts as discussed above. While in principle our relaxation\n  of the embargo policy applied to all distributors, an e-mail was not sent to\n  the list. This resulted in confusion when a second distributor observed this\n  rollout. We should ensure going forward that any policy relaxation during CVE\n  handling is clearly communicated across the board.\n\n* Public, albeit silent, staging of Docker images before the public security\n  release date was a necessary pragmatic tradeoff. We need to refine the\n  security release process to deal with this explicitly, see\n  https://github.com/envoyproxy/envoy/issues/6593.\n\n* The security release forced `envoy-dev:latest` back to the 1.9.1 release\n  branch. This should be fixed, see\n  https://github.com/envoyproxy/envoy/issues/6595.\n\n* There was a window of ~50 minutes between the release tagging of the Envoy\n  1.9.1 branch and availability of Docker images. Ideally we shrink this to\n  allow users to upgrade faster. See\n  https://github.com/envoyproxy/envoy/issues/6596.\n\n* The CVE-2019-9901 fix required either control plane or runtime changes. This\n  orchestration was not well suited to all deployment environments, so some\n  distributions, e.g. Istio, applied additional patches to enable at compile\n  time. Ideally we support control plane, runtime and CLI or compile-time fix\n  opt-in abilities.\n\n### Where we got lucky\n\n* The defects were not critical (by CVSS scoring and intuition) and (mostly)\n  privately disclosed. This provided an opportunity to exercise and refine the\n  Envoy security release process.\n\n* Huffman and HPACK in general frustrates HTTP/2 testing and fuzzing for security\n  properties. We had no effective fuzzing or testing for this previously as a\n  result, we were lucky that the scope of CVE-2019-9900 was limited to HTTP/1.1.\n\n* CVE-2019-9900 was only discovered as a result of additional `ASSERT`s added to\n  verify a property that Envoy developers were certain held. Fuzzing alone had\n  not previously caught this.\n\n* Distributors were able to execute their own security releases within the 1\n  week provided from patch availability. Anecdotally, this involved effort\n  beyond that which we should expect normally to manage an Envoy fix.\n\n* No known instances reported of pre-release embargo breakage due to silent\n  public staging of Docker images.\n\n## Timeline\n\nAll times US/Pacific\n\n2019-02-18:\n* [CVE-2019-9901] Path normalization issue was reported to Istio security team at vulnerabilities@discuss.istio.io.\n\n2019-02-19:\n* [CVE-2019-9901] https://github.com/envoyproxy/envoy/issues/6008 was opened. This was not the first Envoy report of\n  missing path normalization (see https://github.com/envoyproxy/envoy/issues/2956). Neither issue\n  mentioned the security basis and Envoy reviewers speculated on the potential for path traversal\n  attacks.\n\n2019-03-08:\n* [CVE-2019-9900] oss-fuzz reports https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13613 under embargo.\n\n2019-03-10:\n* [CVE-2019-9900] E-mail thread on envoy-security@googlegroups.com regarding the\n  potential effects of this bugs. While it was unclear whether there would be\n  impact beyond some narrow circumstances, agreement was reached to start the\n  security release process. Analysis began to determine the extent of impact on\n  HTTP/2 and Envoy's code base was audited.\n\n2019-03-11:\n* [CVE-2019-9901] https://github.com/envoyproxy/envoy/pull/6258 was opened to address\n  https://github.com/envoyproxy/envoy/issues/6008.\n\n2019-03-13:\n* [CVE-2019-9901] https://github.com/envoyproxy/envoy/pull/6258 was closed after offline discussions\n  between Envoy security team and the author, once the Envoy security team became aware of the\n  potential severity in the Istio setup (in particular with RBAC and Mixer in play).\n\n2019-03-14:\n* [CVE-2019-9900] Finding were presented to envoy-security@. A fix plan was\n  agreed upon and a candidate fix PR was shared with the team by e-mail. At this\n  point, no private fix repository existed.\n* [CVE-2019-9901] The Istio fix leads initiated private work on a fix patch.\n  Since it was likely that this would land within the 1.9.1 release\n  window for CVE-2019-9900, CVE-2019-9901 was also scheduled for the release.\n* [Announcement](https://groups.google.com/forum/#!topic/envoy-announce/dEOLqAiaSUI) sent to remind\n  distributors to join cncf-envoy-distributors-announce@lists.cncf.io.\n\n2019-03-20:\n* CVEs were requested from MITRE for both issues.\n* Draft fix PRs for CVE-2019-9900 and CVE-2019-9901 were shared on private Envoy\n  security repository. Reviews and further development occurred over the\n  following week.\n\n2019-03-22:\n* 11:20 1.9.1 security release for the two vulnerabilities was\n  [announced](https://groups.google.com/d/msg/envoy-announce/6fwGB2TxB74/dKeURAdfAgAJ).\n* 11:24 CVE summary details shared with cncf-envoy-distributors-announce@lists.cncf.io.\n\n2019-03-28:\n* Envoy security team met with a distributor to discuss their concerns over the lack of time between\n  patch availability and the release date. We agreed that three days was insufficient and agreed to\n  extend to a week.\n* 13:53 A delay of the 1.9.1 release until 2019-04-05 was\n  [announced](https://groups.google.com/d/msg/envoy-announce/6fwGB2TxB74/Pe3PPFbPBAAJ).\n* 20:07 Candidate fix patches for both CVE shared with\n  cncf-envoy-distributors-announce@lists.cncf.io.\n\n2019-03-29:\n* Envoy security team was contacted by a distributor regarding the permissibility of silently\n  staging binary images in public locations in advance of the security release due to a lack of\n  viable alternatives. The Envoy security team agreed that there was no better alternative and\n  provided an exemption.\n\n2019-04-02:\n* 08:15 The increase of severity from medium to high was\n  [announced](https://groups.google.com/d/msg/envoy-announce/6fwGB2TxB74/qiDEgclFBgAJ).\n  This followed several days of offline discussion between Istio and Envoy teams\n  on Istio's independent assessment of the issues as high severity, and a better\n  awareness of how to score. What was missing was the intuition that a\n  vulnerability can be high severity even if it only affects a rather limited\n  number of users.\n\n2019-04-04:\n* 15:41 The Envoy master branch was frozen to prepare for the security release. PRs were rebased\n  against master and prepared for the release push.\n* 18:33 Envoy security team was contacted by a distributor who had noticed public visibility of\n  binary images with the fix patch by other vendors. After discussion, we agreed on a general\n  exemption for these CVEs to the embargo policy for binary images with some constraints.\n* 19:18 cncf-envoy-distributors-announce@lists.cncf.io was e-mailed to clarify position on staging\n  of binary images on public sites prior to the release date. A narrow set of circumstances under\n  which this was permissible were outlined.\n\n2019-04-05:\n* 10:00 - 10:05 The [v1.9.1](https://github.com/envoyproxy/envoy/tree/v1.9.1) release branch was\n  pushed and the 1.9.1 releaes was tagged. This started the Docker build process for the release.\n  The same PRs were pushed to master.\n* 10:05 The Envoy 1.9.1 security release was\n  [announced](https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!topic/envoy-announce/VoHfnDqZiAM).\n* 10:57 The v1.9.1 image was available at https://hub.docker.com/r/envoyproxy/envoy/tags.\n\n## Supporting information\n"
  },
  {
    "path": "security/postmortems/cve-2019-9901.md",
    "content": "See [cve-2019-9900.md](cve-2019-9900.md)\n"
  },
  {
    "path": "source/common/access_log/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"access_log_lib\",\n    srcs = [\"access_log_impl.cc\"],\n    hdrs = [\"access_log_impl.h\"],\n    external_deps = [\n        \"abseil_hash\",\n    ],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:access_log_config_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"access_log_manager_lib\",\n    srcs = [\"access_log_manager_impl.cc\"],\n    hdrs = [\"access_log_manager_impl.h\"],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/api:api_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/access_log/access_log_impl.cc",
    "content": "#include \"common/access_log/access_log_impl.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/config/accesslog/v3/accesslog.pb.validate.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stream_info/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace AccessLog {\n\nComparisonFilter::ComparisonFilter(const envoy::config::accesslog::v3::ComparisonFilter& config,\n                                   Runtime::Loader& runtime)\n    : config_(config), runtime_(runtime) {}\n\nbool ComparisonFilter::compareAgainstValue(uint64_t lhs) const {\n  uint64_t value = config_.value().default_value();\n\n  if (!config_.value().runtime_key().empty()) {\n    value = runtime_.snapshot().getInteger(config_.value().runtime_key(), value);\n  }\n\n  switch (config_.op()) {\n  case envoy::config::accesslog::v3::ComparisonFilter::GE:\n    return lhs >= value;\n  case envoy::config::accesslog::v3::ComparisonFilter::EQ:\n    return lhs == value;\n  case envoy::config::accesslog::v3::ComparisonFilter::LE:\n    return lhs <= value;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nFilterPtr FilterFactory::fromProto(const envoy::config::accesslog::v3::AccessLogFilter& config,\n                                   Runtime::Loader& runtime, Random::RandomGenerator& random,\n                                   ProtobufMessage::ValidationVisitor& validation_visitor) {\n  switch (config.filter_specifier_case()) {\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kStatusCodeFilter:\n    return FilterPtr{new StatusCodeFilter(config.status_code_filter(), runtime)};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kDurationFilter:\n    return FilterPtr{new DurationFilter(config.duration_filter(), runtime)};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kNotHealthCheckFilter:\n    return FilterPtr{new NotHealthCheckFilter()};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kTraceableFilter:\n    return FilterPtr{new TraceableRequestFilter()};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kRuntimeFilter:\n    return FilterPtr{new RuntimeFilter(config.runtime_filter(), runtime, random)};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kAndFilter:\n    return FilterPtr{new AndFilter(config.and_filter(), runtime, random, validation_visitor)};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kOrFilter:\n    return FilterPtr{new OrFilter(config.or_filter(), runtime, random, validation_visitor)};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kHeaderFilter:\n    return FilterPtr{new HeaderFilter(config.header_filter())};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kResponseFlagFilter:\n    MessageUtil::validate(config, validation_visitor);\n    return FilterPtr{new ResponseFlagFilter(config.response_flag_filter())};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kGrpcStatusFilter:\n    MessageUtil::validate(config, validation_visitor);\n    return FilterPtr{new GrpcStatusFilter(config.grpc_status_filter())};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kMetadataFilter:\n    return FilterPtr{new MetadataFilter(config.metadata_filter())};\n  case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kExtensionFilter:\n    MessageUtil::validate(config, validation_visitor);\n    {\n      auto& factory =\n          Config::Utility::getAndCheckFactory<ExtensionFilterFactory>(config.extension_filter());\n      return factory.createFilter(config.extension_filter(), runtime, random);\n    }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool TraceableRequestFilter::evaluate(const StreamInfo::StreamInfo& info,\n                                      const Http::RequestHeaderMap& request_headers,\n                                      const Http::ResponseHeaderMap&,\n                                      const Http::ResponseTrailerMap&) const {\n  Tracing::Decision decision = Tracing::HttpTracerUtility::isTracing(info, request_headers);\n\n  return decision.traced && decision.reason == Tracing::Reason::ServiceForced;\n}\n\nbool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&,\n                                const Http::ResponseHeaderMap&,\n                                const Http::ResponseTrailerMap&) const {\n  if (!info.responseCode()) {\n    return compareAgainstValue(0ULL);\n  }\n\n  return compareAgainstValue(info.responseCode().value());\n}\n\nbool DurationFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&,\n                              const Http::ResponseHeaderMap&,\n                              const Http::ResponseTrailerMap&) const {\n  absl::optional<std::chrono::nanoseconds> final = info.requestComplete();\n  ASSERT(final);\n\n  return compareAgainstValue(\n      std::chrono::duration_cast<std::chrono::milliseconds>(final.value()).count());\n}\n\nRuntimeFilter::RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& config,\n                             Runtime::Loader& runtime, Random::RandomGenerator& random)\n    : runtime_(runtime), random_(random), runtime_key_(config.runtime_key()),\n      percent_(config.percent_sampled()),\n      use_independent_randomness_(config.use_independent_randomness()) {}\n\nbool RuntimeFilter::evaluate(const StreamInfo::StreamInfo& stream_info,\n                             const Http::RequestHeaderMap& request_headers,\n                             const Http::ResponseHeaderMap&,\n                             const Http::ResponseTrailerMap&) const {\n  auto rid_extension = stream_info.getRequestIDExtension();\n  uint64_t random_value;\n  if (use_independent_randomness_ ||\n      !rid_extension->modBy(\n          request_headers, random_value,\n          ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent_.denominator()))) {\n    random_value = random_.random();\n  }\n\n  return runtime_.snapshot().featureEnabled(\n      runtime_key_, percent_.numerator(), random_value,\n      ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent_.denominator()));\n}\n\nOperatorFilter::OperatorFilter(\n    const Protobuf::RepeatedPtrField<envoy::config::accesslog::v3::AccessLogFilter>& configs,\n    Runtime::Loader& runtime, Random::RandomGenerator& random,\n    ProtobufMessage::ValidationVisitor& validation_visitor) {\n  for (const auto& config : configs) {\n    filters_.emplace_back(FilterFactory::fromProto(config, runtime, random, validation_visitor));\n  }\n}\n\nOrFilter::OrFilter(const envoy::config::accesslog::v3::OrFilter& config, Runtime::Loader& runtime,\n                   Random::RandomGenerator& random,\n                   ProtobufMessage::ValidationVisitor& validation_visitor)\n    : OperatorFilter(config.filters(), runtime, random, validation_visitor) {}\n\nAndFilter::AndFilter(const envoy::config::accesslog::v3::AndFilter& config,\n                     Runtime::Loader& runtime, Random::RandomGenerator& random,\n                     ProtobufMessage::ValidationVisitor& validation_visitor)\n    : OperatorFilter(config.filters(), runtime, random, validation_visitor) {}\n\nbool OrFilter::evaluate(const StreamInfo::StreamInfo& info,\n                        const Http::RequestHeaderMap& request_headers,\n                        const Http::ResponseHeaderMap& response_headers,\n                        const Http::ResponseTrailerMap& response_trailers) const {\n  bool result = false;\n  for (auto& filter : filters_) {\n    result |= filter->evaluate(info, request_headers, response_headers, response_trailers);\n\n    if (result) {\n      break;\n    }\n  }\n\n  return result;\n}\n\nbool AndFilter::evaluate(const StreamInfo::StreamInfo& info,\n                         const Http::RequestHeaderMap& request_headers,\n                         const Http::ResponseHeaderMap& response_headers,\n                         const Http::ResponseTrailerMap& response_trailers) const {\n  bool result = true;\n  for (auto& filter : filters_) {\n    result &= filter->evaluate(info, request_headers, response_headers, response_trailers);\n\n    if (!result) {\n      break;\n    }\n  }\n\n  return result;\n}\n\nbool NotHealthCheckFilter::evaluate(const StreamInfo::StreamInfo& info,\n                                    const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                    const Http::ResponseTrailerMap&) const {\n  return !info.healthCheck();\n}\n\nHeaderFilter::HeaderFilter(const envoy::config::accesslog::v3::HeaderFilter& config)\n    : header_data_(std::make_unique<Http::HeaderUtility::HeaderData>(config.header())) {}\n\nbool HeaderFilter::evaluate(const StreamInfo::StreamInfo&,\n                            const Http::RequestHeaderMap& request_headers,\n                            const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) const {\n  return Http::HeaderUtility::matchHeaders(request_headers, *header_data_);\n}\n\nResponseFlagFilter::ResponseFlagFilter(\n    const envoy::config::accesslog::v3::ResponseFlagFilter& config) {\n  for (int i = 0; i < config.flags_size(); i++) {\n    absl::optional<StreamInfo::ResponseFlag> response_flag =\n        StreamInfo::ResponseFlagUtils::toResponseFlag(config.flags(i));\n    // The config has been validated. Therefore, every flag in the config will have a mapping.\n    ASSERT(response_flag.has_value());\n    configured_flags_ |= response_flag.value();\n  }\n}\n\nbool ResponseFlagFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&,\n                                  const Http::ResponseHeaderMap&,\n                                  const Http::ResponseTrailerMap&) const {\n  if (configured_flags_ != 0) {\n    return info.intersectResponseFlags(configured_flags_);\n  }\n  return info.hasAnyResponseFlag();\n}\n\nGrpcStatusFilter::GrpcStatusFilter(const envoy::config::accesslog::v3::GrpcStatusFilter& config) {\n  for (int i = 0; i < config.statuses_size(); i++) {\n    statuses_.insert(protoToGrpcStatus(config.statuses(i)));\n  }\n\n  exclude_ = config.exclude();\n}\n\nbool GrpcStatusFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&,\n                                const Http::ResponseHeaderMap& response_headers,\n                                const Http::ResponseTrailerMap& response_trailers) const {\n\n  Grpc::Status::GrpcStatus status = Grpc::Status::WellKnownGrpcStatus::Unknown;\n  const auto& optional_status =\n      Grpc::Common::getGrpcStatus(response_trailers, response_headers, info);\n  if (optional_status.has_value()) {\n    status = optional_status.value();\n  }\n\n  const bool found = statuses_.find(status) != statuses_.end();\n  return exclude_ ? !found : found;\n}\n\nGrpc::Status::GrpcStatus GrpcStatusFilter::protoToGrpcStatus(\n    envoy::config::accesslog::v3::GrpcStatusFilter::Status status) const {\n  return static_cast<Grpc::Status::GrpcStatus>(status);\n}\n\nMetadataFilter::MetadataFilter(const envoy::config::accesslog::v3::MetadataFilter& filter_config)\n    : default_match_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(filter_config, match_if_key_not_found, true)),\n      filter_(filter_config.matcher().filter()) {\n\n  if (filter_config.has_matcher()) {\n    auto& matcher_config = filter_config.matcher();\n\n    for (const auto& seg : matcher_config.path()) {\n      path_.push_back(seg.key());\n    }\n\n    // Matches if the value equals the configured 'MetadataMatcher' value.\n    const auto& val = matcher_config.value();\n    value_matcher_ = Matchers::ValueMatcher::create(val);\n  }\n\n  // Matches if the value is present in dynamic metadata\n  auto present_val = envoy::type::matcher::v3::ValueMatcher();\n  present_val.set_present_match(true);\n  present_matcher_ = Matchers::ValueMatcher::create(present_val);\n}\n\nbool MetadataFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&,\n                              const Http::ResponseHeaderMap&,\n                              const Http::ResponseTrailerMap&) const {\n  const auto& value =\n      Envoy::Config::Metadata::metadataValue(&info.dynamicMetadata(), filter_, path_);\n  // If the key corresponds to a set value in dynamic metadata, return true if the value matches the\n  // the configured 'MetadataMatcher' value and false otherwise\n  if (present_matcher_->match(value)) {\n    return value_matcher_ && value_matcher_->match(value);\n  }\n\n  // If the key does not correspond to a set value in dynamic metadata, return true if\n  // 'match_if_key_not_found' is set to true and false otherwise\n  return default_match_;\n}\n\nInstanceSharedPtr AccessLogFactory::fromProto(const envoy::config::accesslog::v3::AccessLog& config,\n                                              Server::Configuration::FactoryContext& context) {\n  FilterPtr filter;\n  if (config.has_filter()) {\n    filter = FilterFactory::fromProto(config.filter(), context.runtime(),\n                                      context.api().randomGenerator(),\n                                      context.messageValidationVisitor());\n  }\n\n  auto& factory =\n      Config::Utility::getAndCheckFactory<Server::Configuration::AccessLogInstanceFactory>(config);\n  ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n      config, context.messageValidationVisitor(), factory);\n\n  return factory.createAccessLogInstance(*message, std::move(filter), context);\n}\n\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/access_log/access_log_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/access_log_config.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/grpc/status.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/hash/hash.h\"\n\nnamespace Envoy {\nnamespace AccessLog {\n\n/**\n * Access log filter factory that reads from proto.\n */\nclass FilterFactory {\npublic:\n  /**\n   * Read a filter definition from proto and instantiate a concrete filter class.\n   */\n  static FilterPtr fromProto(const envoy::config::accesslog::v3::AccessLogFilter& config,\n                             Runtime::Loader& runtime, Random::RandomGenerator& random,\n                             ProtobufMessage::ValidationVisitor& validation_visitor);\n};\n\n/**\n * Base implementation of an access log filter that performs comparisons.\n */\nclass ComparisonFilter : public Filter {\nprotected:\n  ComparisonFilter(const envoy::config::accesslog::v3::ComparisonFilter& config,\n                   Runtime::Loader& runtime);\n\n  bool compareAgainstValue(uint64_t lhs) const;\n\n  envoy::config::accesslog::v3::ComparisonFilter config_;\n  Runtime::Loader& runtime_;\n};\n\n/**\n * Filter on response status code.\n */\nclass StatusCodeFilter : public ComparisonFilter {\npublic:\n  StatusCodeFilter(const envoy::config::accesslog::v3::StatusCodeFilter& config,\n                   Runtime::Loader& runtime)\n      : ComparisonFilter(config.comparison(), runtime) {}\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n};\n\n/**\n * Filter on total request/response duration.\n */\nclass DurationFilter : public ComparisonFilter {\npublic:\n  DurationFilter(const envoy::config::accesslog::v3::DurationFilter& config,\n                 Runtime::Loader& runtime)\n      : ComparisonFilter(config.comparison(), runtime) {}\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n};\n\n/**\n * Base operator filter, compose other filters with operation\n */\nclass OperatorFilter : public Filter {\npublic:\n  OperatorFilter(\n      const Protobuf::RepeatedPtrField<envoy::config::accesslog::v3::AccessLogFilter>& configs,\n      Runtime::Loader& runtime, Random::RandomGenerator& random,\n      ProtobufMessage::ValidationVisitor& validation_visitor);\n\nprotected:\n  std::vector<FilterPtr> filters_;\n};\n\n/**\n * *And* operator filter, apply logical *and* operation to all of the sub filters.\n */\nclass AndFilter : public OperatorFilter {\npublic:\n  AndFilter(const envoy::config::accesslog::v3::AndFilter& config, Runtime::Loader& runtime,\n            Random::RandomGenerator& random,\n            ProtobufMessage::ValidationVisitor& validation_visitor);\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n};\n\n/**\n * *Or* operator filter, apply logical *or* operation to all of the sub filters.\n */\nclass OrFilter : public OperatorFilter {\npublic:\n  OrFilter(const envoy::config::accesslog::v3::OrFilter& config, Runtime::Loader& runtime,\n           Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor);\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n};\n\n/**\n * Filter out health check requests.\n */\nclass NotHealthCheckFilter : public Filter {\npublic:\n  NotHealthCheckFilter() = default;\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n};\n\n/**\n * Filter traceable requests.\n */\nclass TraceableRequestFilter : public Filter {\npublic:\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n};\n\n/**\n * Filter that uses a runtime feature key to check if the log should be written.\n */\nclass RuntimeFilter : public Filter {\npublic:\n  RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& config, Runtime::Loader& runtime,\n                Random::RandomGenerator& random);\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n\nprivate:\n  Runtime::Loader& runtime_;\n  Random::RandomGenerator& random_;\n  const std::string runtime_key_;\n  const envoy::type::v3::FractionalPercent percent_;\n  const bool use_independent_randomness_;\n};\n\n/**\n * Filter based on headers.\n */\nclass HeaderFilter : public Filter {\npublic:\n  HeaderFilter(const envoy::config::accesslog::v3::HeaderFilter& config);\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n\nprivate:\n  const Http::HeaderUtility::HeaderDataPtr header_data_;\n};\n\n/**\n * Filter requests that had a response with an Envoy response flag set.\n */\nclass ResponseFlagFilter : public Filter {\npublic:\n  ResponseFlagFilter(const envoy::config::accesslog::v3::ResponseFlagFilter& config);\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n\nprivate:\n  uint64_t configured_flags_{};\n};\n\n/**\n * Filters requests that have a response with a gRPC status. Because the gRPC protocol does not\n * guarantee a gRPC status code, if a gRPC status code is not available, then the filter will infer\n * the gRPC status code from an HTTP status code if available.\n */\nclass GrpcStatusFilter : public Filter {\npublic:\n  using GrpcStatusHashSet =\n      absl::node_hash_set<Grpc::Status::GrpcStatus, absl::Hash<Grpc::Status::GrpcStatus>>;\n\n  GrpcStatusFilter(const envoy::config::accesslog::v3::GrpcStatusFilter& config);\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n\nprivate:\n  GrpcStatusHashSet statuses_;\n  bool exclude_;\n\n  /**\n   * Converts a Protobuf representation of a gRPC status into the equivalent code version of a gRPC\n   * status.\n   */\n  Grpc::Status::GrpcStatus\n  protoToGrpcStatus(envoy::config::accesslog::v3::GrpcStatusFilter::Status status) const;\n};\n\n/**\n * Filters requests based on dynamic metadata\n */\nclass MetadataFilter : public Filter {\npublic:\n  MetadataFilter(const envoy::config::accesslog::v3::MetadataFilter& filter_config);\n\n  bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n                const Http::ResponseHeaderMap& response_headers,\n                const Http::ResponseTrailerMap& response_trailers) const override;\n\nprivate:\n  Matchers::ValueMatcherConstSharedPtr present_matcher_;\n  Matchers::ValueMatcherConstSharedPtr value_matcher_;\n\n  std::vector<std::string> path_;\n\n  const bool default_match_;\n  const std::string filter_;\n};\n\n/**\n * Extension filter factory that reads from ExtensionFilter proto.\n */\nclass ExtensionFilterFactory : public Config::TypedFactory {\npublic:\n  ~ExtensionFilterFactory() override = default;\n\n  /**\n   * Create a particular extension filter implementation from a config proto. If the\n   * implementation is unable to produce a filter with the provided parameters, it should throw an\n   * EnvoyException. The returned pointer should never be nullptr.\n   * @param config supplies the custom configuration for this filter type.\n   * @param runtime supplies the runtime loader.\n   * @param random supplies the random generator.\n   * @return an instance of extension filter implementation from a config proto.\n   */\n  virtual FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config,\n                                 Runtime::Loader& runtime, Random::RandomGenerator& random) PURE;\n\n  std::string category() const override { return \"envoy.access_logger.extension_filters\"; }\n};\n\n/**\n * Access log factory that reads the configuration from proto.\n */\nclass AccessLogFactory {\npublic:\n  /**\n   * Read a filter definition from proto and instantiate an Instance.\n   */\n  static InstanceSharedPtr fromProto(const envoy::config::accesslog::v3::AccessLog& config,\n                                     Server::Configuration::FactoryContext& context);\n};\n\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/access_log/access_log_manager_impl.cc",
    "content": "#include \"common/access_log/access_log_manager_impl.h\"\n\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/lock_guard.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace AccessLog {\n\nAccessLogManagerImpl::~AccessLogManagerImpl() {\n  for (auto& [log_key, log_file_ptr] : access_logs_) {\n    ENVOY_LOG(debug, \"destroying access logger {}\", log_key);\n    log_file_ptr.reset();\n  }\n  ENVOY_LOG(debug, \"destroyed access loggers\");\n}\n\nvoid AccessLogManagerImpl::reopen() {\n  for (auto& iter : access_logs_) {\n    iter.second->reopen();\n  }\n}\n\nAccessLogFileSharedPtr AccessLogManagerImpl::createAccessLog(const std::string& file_name) {\n  if (access_logs_.count(file_name)) {\n    return access_logs_[file_name];\n  }\n\n  access_logs_[file_name] = std::make_shared<AccessLogFileImpl>(\n      api_.fileSystem().createFile(file_name), dispatcher_, lock_, file_stats_,\n      file_flush_interval_msec_, api_.threadFactory());\n  return access_logs_[file_name];\n}\n\nAccessLogFileImpl::AccessLogFileImpl(Filesystem::FilePtr&& file, Event::Dispatcher& dispatcher,\n                                     Thread::BasicLockable& lock, AccessLogFileStats& stats,\n                                     std::chrono::milliseconds flush_interval_msec,\n                                     Thread::ThreadFactory& thread_factory)\n    : file_(std::move(file)), file_lock_(lock),\n      flush_timer_(dispatcher.createTimer([this]() -> void {\n        stats_.flushed_by_timer_.inc();\n        flush_event_.notifyOne();\n        flush_timer_->enableTimer(flush_interval_msec_);\n      })),\n      thread_factory_(thread_factory), flush_interval_msec_(flush_interval_msec), stats_(stats) {\n  open();\n}\n\nFilesystem::FlagSet AccessLogFileImpl::defaultFlags() {\n  static constexpr Filesystem::FlagSet default_flags{1 << Filesystem::File::Operation::Write |\n                                                     1 << Filesystem::File::Operation::Create |\n                                                     1 << Filesystem::File::Operation::Append};\n\n  return default_flags;\n}\n\nvoid AccessLogFileImpl::open() {\n  const Api::IoCallBoolResult result = file_->open(defaultFlags());\n  if (!result.rc_) {\n    throw EnvoyException(\n        fmt::format(\"unable to open file '{}': {}\", file_->path(), result.err_->getErrorDetails()));\n  }\n}\n\nvoid AccessLogFileImpl::reopen() { reopen_file_ = true; }\n\nAccessLogFileImpl::~AccessLogFileImpl() {\n  {\n    Thread::LockGuard lock(write_lock_);\n    flush_thread_exit_ = true;\n    flush_event_.notifyOne();\n  }\n\n  if (flush_thread_ != nullptr) {\n    flush_thread_->join();\n  }\n\n  // Flush any remaining data. If file was not opened for some reason, skip flushing part.\n  if (file_->isOpen()) {\n    if (flush_buffer_.length() > 0) {\n      doWrite(flush_buffer_);\n    }\n\n    const Api::IoCallBoolResult result = file_->close();\n    ASSERT(result.rc_, fmt::format(\"unable to close file '{}': {}\", file_->path(),\n                                   result.err_->getErrorDetails()));\n  }\n}\n\nvoid AccessLogFileImpl::doWrite(Buffer::Instance& buffer) {\n  Buffer::RawSliceVector slices = buffer.getRawSlices();\n\n  // We must do the actual writes to disk under lock, so that we don't intermix chunks from\n  // different AccessLogFileImpl pointing to the same underlying file. This can happen either via\n  // hot restart or if calling code opens the same underlying file into a different\n  // AccessLogFileImpl in the same process.\n  // TODO PERF: Currently, we use a single cross process lock to serialize all disk writes. This\n  //            will never block network workers, but does mean that only a single flush thread can\n  //            actually flush to disk. In the future it would be nice if we did away with the cross\n  //            process lock or had multiple locks.\n  {\n    Thread::LockGuard lock(file_lock_);\n    for (const Buffer::RawSlice& slice : slices) {\n      absl::string_view data(static_cast<char*>(slice.mem_), slice.len_);\n      const Api::IoCallSizeResult result = file_->write(data);\n      if (result.ok() && result.rc_ == static_cast<ssize_t>(slice.len_)) {\n        stats_.write_completed_.inc();\n      } else {\n        // Probably disk full.\n        stats_.write_failed_.inc();\n      }\n    }\n  }\n\n  stats_.write_total_buffered_.sub(buffer.length());\n  buffer.drain(buffer.length());\n}\n\nvoid AccessLogFileImpl::flushThreadFunc() {\n\n  while (true) {\n    std::unique_lock<Thread::BasicLockable> flush_lock;\n\n    {\n      Thread::LockGuard write_lock(write_lock_);\n\n      // flush_event_ can be woken up either by large enough flush_buffer or by timer.\n      // In case it was timer, flush_buffer_ can be empty.\n      while (flush_buffer_.length() == 0 && !flush_thread_exit_ && !reopen_file_) {\n        // CondVar::wait() does not throw, so it's safe to pass the mutex rather than the guard.\n        flush_event_.wait(write_lock_);\n      }\n\n      if (flush_thread_exit_) {\n        return;\n      }\n\n      flush_lock = std::unique_lock<Thread::BasicLockable>(flush_lock_);\n      about_to_write_buffer_.move(flush_buffer_);\n      ASSERT(flush_buffer_.length() == 0);\n    }\n\n    // if we failed to open file before, then simply ignore\n    if (file_->isOpen()) {\n      try {\n        if (reopen_file_) {\n          reopen_file_ = false;\n          const Api::IoCallBoolResult result = file_->close();\n          ASSERT(result.rc_, fmt::format(\"unable to close file '{}': {}\", file_->path(),\n                                         result.err_->getErrorDetails()));\n          open();\n        }\n\n        doWrite(about_to_write_buffer_);\n      } catch (const EnvoyException&) {\n        stats_.reopen_failed_.inc();\n      }\n    }\n  }\n}\n\nvoid AccessLogFileImpl::flush() {\n  std::unique_lock<Thread::BasicLockable> flush_buffer_lock;\n\n  {\n    Thread::LockGuard write_lock(write_lock_);\n\n    // flush_lock_ must be held while checking this or else it is\n    // possible that flushThreadFunc() has already moved data from\n    // flush_buffer_ to about_to_write_buffer_, has unlocked write_lock_,\n    // but has not yet completed doWrite(). This would allow flush() to\n    // return before the pending data has actually been written to disk.\n    flush_buffer_lock = std::unique_lock<Thread::BasicLockable>(flush_lock_);\n\n    if (flush_buffer_.length() == 0) {\n      return;\n    }\n\n    about_to_write_buffer_.move(flush_buffer_);\n    ASSERT(flush_buffer_.length() == 0);\n  }\n\n  doWrite(about_to_write_buffer_);\n}\n\nvoid AccessLogFileImpl::write(absl::string_view data) {\n  Thread::LockGuard lock(write_lock_);\n\n  if (flush_thread_ == nullptr) {\n    createFlushStructures();\n  }\n\n  stats_.write_buffered_.inc();\n  stats_.write_total_buffered_.add(data.length());\n  flush_buffer_.add(data.data(), data.size());\n  if (flush_buffer_.length() > MIN_FLUSH_SIZE) {\n    flush_event_.notifyOne();\n  }\n}\n\nvoid AccessLogFileImpl::createFlushStructures() {\n  flush_thread_ = thread_factory_.createThread([this]() -> void { flushThreadFunc(); },\n                                               Thread::Options{\"AccessLogFlush\"});\n  flush_timer_->enableTimer(flush_interval_msec_);\n}\n\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/access_log/access_log_manager_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/api/api.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/store.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\n\n#define ACCESS_LOG_FILE_STATS(COUNTER, GAUGE)                                                      \\\n  COUNTER(flushed_by_timer)                                                                        \\\n  COUNTER(reopen_failed)                                                                           \\\n  COUNTER(write_buffered)                                                                          \\\n  COUNTER(write_completed)                                                                         \\\n  COUNTER(write_failed)                                                                            \\\n  GAUGE(write_total_buffered, Accumulate)\n\nstruct AccessLogFileStats {\n  ACCESS_LOG_FILE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\nnamespace AccessLog {\n\nclass AccessLogManagerImpl : public AccessLogManager, Logger::Loggable<Logger::Id::main> {\npublic:\n  AccessLogManagerImpl(std::chrono::milliseconds file_flush_interval_msec, Api::Api& api,\n                       Event::Dispatcher& dispatcher, Thread::BasicLockable& lock,\n                       Stats::Store& stats_store)\n      : file_flush_interval_msec_(file_flush_interval_msec), api_(api), dispatcher_(dispatcher),\n        lock_(lock), file_stats_{\n                         ACCESS_LOG_FILE_STATS(POOL_COUNTER_PREFIX(stats_store, \"filesystem.\"),\n                                               POOL_GAUGE_PREFIX(stats_store, \"filesystem.\"))} {}\n  ~AccessLogManagerImpl() override;\n\n  // AccessLog::AccessLogManager\n  void reopen() override;\n  AccessLogFileSharedPtr createAccessLog(const std::string& file_name) override;\n\nprivate:\n  const std::chrono::milliseconds file_flush_interval_msec_;\n  Api::Api& api_;\n  Event::Dispatcher& dispatcher_;\n  Thread::BasicLockable& lock_;\n  AccessLogFileStats file_stats_;\n  absl::node_hash_map<std::string, AccessLogFileSharedPtr> access_logs_;\n};\n\n/**\n * This is a file implementation geared for writing out access logs. It turn out that in certain\n * cases even if a standard file is opened with O_NONBLOCK, the kernel can still block when writing.\n * This implementation uses a flush thread per file, with the idea there aren't that many\n * files. If this turns out to be a good implementation we can potentially have a single flush\n * thread that flushes all files, but we will start with this.\n */\nclass AccessLogFileImpl : public AccessLogFile {\npublic:\n  AccessLogFileImpl(Filesystem::FilePtr&& file, Event::Dispatcher& dispatcher,\n                    Thread::BasicLockable& lock, AccessLogFileStats& stats,\n                    std::chrono::milliseconds flush_interval_msec,\n                    Thread::ThreadFactory& thread_factory);\n  ~AccessLogFileImpl() override;\n\n  // AccessLog::AccessLogFile\n  void write(absl::string_view data) override;\n\n  /**\n   * Reopen file asynchronously.\n   * This only sets reopen flag, actual reopen operation is delayed.\n   * Reopen happens before the next write operation.\n   */\n  void reopen() override;\n  void flush() override;\n\nprivate:\n  void doWrite(Buffer::Instance& buffer);\n  void flushThreadFunc();\n  void open();\n  void createFlushStructures();\n\n  // return default flags set which used by open\n  static Filesystem::FlagSet defaultFlags();\n\n  // Minimum size before the flush thread will be told to flush.\n  static const uint64_t MIN_FLUSH_SIZE = 1024 * 64;\n\n  Filesystem::FilePtr file_;\n\n  // These locks are always acquired in the following order if multiple locks are held:\n  //    1) write_lock_\n  //    2) flush_lock_\n  //    3) file_lock_\n  Thread::BasicLockable& file_lock_;      // This lock is used only by the flush thread when writing\n                                          // to disk. This is used to make sure that file blocks do\n                                          // not get interleaved by multiple processes writing to\n                                          // the same file during hot-restart.\n  Thread::MutexBasicLockable flush_lock_; // This lock is used to prevent simultaneous flushes from\n                                          // the flush thread and a synchronous flush. This protects\n                                          // concurrent access to the about_to_write_buffer_, fd_,\n                                          // and all other data used during flushing and file\n                                          // re-opening.\n  Thread::MutexBasicLockable\n      write_lock_; // The lock is used when filling the flush buffer. It allows\n                   // multiple threads to write to the same file at relatively\n                   // high performance. It is always local to the process.\n  Thread::ThreadPtr flush_thread_;\n  Thread::CondVar flush_event_;\n  std::atomic<bool> flush_thread_exit_{};\n  std::atomic<bool> reopen_file_{};\n  Buffer::OwnedImpl\n      flush_buffer_ ABSL_GUARDED_BY(write_lock_); // This buffer is used by multiple threads. It\n                                                  // gets filled and then flushed either when max\n                                                  // size is reached or when a timer fires.\n  // TODO(jmarantz): this should be ABSL_GUARDED_BY(flush_lock_) but the analysis cannot poke\n  // through the std::make_unique assignment. I do not believe it's possible to annotate this\n  // properly now due to limitations in the clang thread annotation analysis.\n  Buffer::OwnedImpl about_to_write_buffer_; // This buffer is used only by the flush thread. Data\n                                            // is moved from flush_buffer_ under lock, and then\n                                            // the lock is released so that flush_buffer_ can\n                                            // continue to fill. This buffer is then used for the\n                                            // final write to disk.\n  Event::TimerPtr flush_timer_;\n  Thread::ThreadFactory& thread_factory_;\n  const std::chrono::milliseconds flush_interval_msec_; // Time interval buffer gets flushed no\n                                                        // matter if it reached the MIN_FLUSH_SIZE\n                                                        // or not.\n  AccessLogFileStats& stats_;\n};\n\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n    \"envoy_select_hot_restart\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"api_lib\",\n    srcs = [\"api_impl.cc\"],\n    hdrs = [\"api_impl.h\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:socket_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"os_sys_calls_lib\",\n    srcs = select({\n        \"//bazel:linux\": [\n            \"posix/os_sys_calls_impl.cc\",\n            \"posix/os_sys_calls_impl_linux.cc\",\n        ],\n        \"//bazel:windows_x86_64\": [\"win32/os_sys_calls_impl.cc\"],\n        \"//conditions:default\": [\"posix/os_sys_calls_impl.cc\"],\n    }) + envoy_select_hot_restart([\"posix/os_sys_calls_impl_hot_restart.cc\"]),\n    hdrs = select({\n        \"//bazel:linux\": [\n            \"posix/os_sys_calls_impl.h\",\n            \"posix/os_sys_calls_impl_linux.h\",\n        ],\n        \"//bazel:windows_x86_64\": [\"win32/os_sys_calls_impl.h\"],\n        \"//conditions:default\": [\"posix/os_sys_calls_impl.h\"],\n    }) + envoy_select_hot_restart([\"posix/os_sys_calls_impl_hot_restart.h\"]),\n    strip_include_prefix = select({\n        \"//bazel:windows_x86_64\": \"win32\",\n        \"//conditions:default\": \"posix\",\n    }),\n    deps = [\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//source/common/singleton:threadsafe_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/common/api/api_impl.cc",
    "content": "#include \"common/api/api_impl.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"common/common/thread.h\"\n#include \"common/event/dispatcher_impl.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nImpl::Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store,\n           Event::TimeSystem& time_system, Filesystem::Instance& file_system,\n           Random::RandomGenerator& random_generator, const ProcessContextOptRef& process_context)\n    : thread_factory_(thread_factory), store_(store), time_system_(time_system),\n      file_system_(file_system), random_generator_(random_generator),\n      process_context_(process_context) {}\n\nEvent::DispatcherPtr Impl::allocateDispatcher(const std::string& name) {\n  return std::make_unique<Event::DispatcherImpl>(name, *this, time_system_);\n}\n\nEvent::DispatcherPtr Impl::allocateDispatcher(const std::string& name,\n                                              Buffer::WatermarkFactoryPtr&& factory) {\n  return std::make_unique<Event::DispatcherImpl>(name, std::move(factory), *this, time_system_);\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/api_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/network/socket.h\"\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Api {\n\n/**\n * Implementation of Api::Api\n */\nclass Impl : public Api {\npublic:\n  Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store, Event::TimeSystem& time_system,\n       Filesystem::Instance& file_system, Random::RandomGenerator& random_generator,\n       const ProcessContextOptRef& process_context = absl::nullopt);\n\n  // Api::Api\n  Event::DispatcherPtr allocateDispatcher(const std::string& name) override;\n  Event::DispatcherPtr allocateDispatcher(const std::string& name,\n                                          Buffer::WatermarkFactoryPtr&& watermark_factory) override;\n  Thread::ThreadFactory& threadFactory() override { return thread_factory_; }\n  Filesystem::Instance& fileSystem() override { return file_system_; }\n  TimeSource& timeSource() override { return time_system_; }\n  const Stats::Scope& rootScope() override { return store_; }\n  Random::RandomGenerator& randomGenerator() override { return random_generator_; }\n  ProcessContextOptRef processContext() override { return process_context_; }\n\nprivate:\n  Thread::ThreadFactory& thread_factory_;\n  Stats::Store& store_;\n  Event::TimeSystem& time_system_;\n  Filesystem::Instance& file_system_;\n  Random::RandomGenerator& random_generator_;\n  ProcessContextOptRef process_context_;\n};\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/posix/os_sys_calls_impl.cc",
    "content": "#include <fcntl.h>\n#include <sys/stat.h>\n#include <unistd.h>\n\n#include <cerrno>\n#include <string>\n\n#include \"common/api/os_sys_calls_impl.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nSysCallIntResult OsSysCallsImpl::bind(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n  const int rc = ::bind(sockfd, addr, addrlen);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::chmod(const std::string& path, mode_t mode) {\n  const int rc = ::chmod(path.c_str(), mode);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::ioctl(os_fd_t sockfd, unsigned long int request, void* argp) {\n  const int rc = ::ioctl(sockfd, request, argp);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::close(os_fd_t fd) {\n  const int rc = ::close(fd);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallSizeResult OsSysCallsImpl::writev(os_fd_t fd, const iovec* iov, int num_iov) {\n  const ssize_t rc = ::writev(fd, iov, num_iov);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallSizeResult OsSysCallsImpl::readv(os_fd_t fd, const iovec* iov, int num_iov) {\n  const ssize_t rc = ::readv(fd, iov, num_iov);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallSizeResult OsSysCallsImpl::recv(os_fd_t socket, void* buffer, size_t length, int flags) {\n  const ssize_t rc = ::recv(socket, buffer, length, flags);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallSizeResult OsSysCallsImpl::recvmsg(os_fd_t sockfd, msghdr* msg, int flags) {\n  const ssize_t rc = ::recvmsg(sockfd, msg, flags);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen,\n                                          int flags, struct timespec* timeout) {\n#if ENVOY_MMSG_MORE\n  const int rc = ::recvmmsg(sockfd, msgvec, vlen, flags, timeout);\n  return {rc, errno};\n#else\n  UNREFERENCED_PARAMETER(sockfd);\n  UNREFERENCED_PARAMETER(msgvec);\n  UNREFERENCED_PARAMETER(vlen);\n  UNREFERENCED_PARAMETER(flags);\n  UNREFERENCED_PARAMETER(timeout);\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n#endif\n}\n\nbool OsSysCallsImpl::supportsMmsg() const {\n#if ENVOY_MMSG_MORE\n  return true;\n#else\n  return false;\n#endif\n}\n\nbool OsSysCallsImpl::supportsUdpGro() const {\n#if !defined(__linux__)\n  return false;\n#else\n  static const bool is_supported = [] {\n    int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP);\n    if (fd < 0) {\n      return false;\n    }\n    int val = 1;\n    bool result = (0 == ::setsockopt(fd, IPPROTO_UDP, UDP_GRO, &val, sizeof(val)));\n    ::close(fd);\n    return result;\n  }();\n  return is_supported;\n#endif\n}\n\nbool OsSysCallsImpl::supportsUdpGso() const {\n#if !defined(__linux__)\n  return false;\n#else\n  static const bool is_supported = [] {\n    int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP);\n    if (fd < 0) {\n      return false;\n    }\n    int optval;\n    socklen_t optlen = sizeof(optval);\n    bool result = (0 <= ::getsockopt(fd, IPPROTO_UDP, UDP_SEGMENT, &optval, &optlen));\n    ::close(fd);\n    return result;\n  }();\n  return is_supported;\n#endif\n}\n\nbool OsSysCallsImpl::supportsIpTransparent() const {\n#if !defined(__linux__) || !defined(IPV6_TRANSPARENT)\n  return false;\n#else\n  // The linux kernel supports IP_TRANSPARENT by following patch(starting from v2.6.28) :\n  // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/net/ipv4/ip_sockglue.c?id=f5715aea4564f233767ea1d944b2637a5fd7cd2e\n  //\n  // The linux kernel supports IPV6_TRANSPARENT by following patch(starting from v2.6.37) :\n  // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/net/ipv6/ipv6_sockglue.c?id=6c46862280c5f55eda7750391bc65cd7e08c7535\n  //\n  // So, almost recent linux kernel supports both IP_TRANSPARENT and IPV6_TRANSPARENT options.\n  //\n  // And these socket options need CAP_NET_ADMIN capability to be applied.\n  // The CAP_NET_ADMIN capability should be applied by root user before call this function.\n  static const bool is_supported = [] {\n    // Check ipv4 case\n    int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP);\n    if (fd < 0) {\n      return false;\n    }\n    int val = 1;\n    bool result = (0 == ::setsockopt(fd, IPPROTO_IP, IP_TRANSPARENT, &val, sizeof(val)));\n    ::close(fd);\n    if (!result) {\n      return false;\n    }\n    // Check ipv6 case\n    fd = ::socket(AF_INET6, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP);\n    if (fd < 0) {\n      return false;\n    }\n    val = 1;\n    result = (0 == ::setsockopt(fd, IPPROTO_IPV6, IPV6_TRANSPARENT, &val, sizeof(val)));\n    ::close(fd);\n    return result;\n  }();\n  return is_supported;\n#endif\n}\n\nSysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) {\n  const int rc = ::ftruncate(fd, length);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallPtrResult OsSysCallsImpl::mmap(void* addr, size_t length, int prot, int flags, int fd,\n                                      off_t offset) {\n  void* rc = ::mmap(addr, length, prot, flags, fd, offset);\n  return {rc, rc != MAP_FAILED ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::stat(const char* pathname, struct stat* buf) {\n  const int rc = ::stat(pathname, buf);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::setsockopt(os_fd_t sockfd, int level, int optname,\n                                            const void* optval, socklen_t optlen) {\n  const int rc = ::setsockopt(sockfd, level, optname, optval, optlen);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::getsockopt(os_fd_t sockfd, int level, int optname, void* optval,\n                                            socklen_t* optlen) {\n  const int rc = ::getsockopt(sockfd, level, optname, optval, optlen);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallSocketResult OsSysCallsImpl::socket(int domain, int type, int protocol) {\n  const os_fd_t rc = ::socket(domain, type, protocol);\n  return {rc, SOCKET_VALID(rc) ? 0 : errno};\n}\n\nSysCallSizeResult OsSysCallsImpl::sendmsg(os_fd_t fd, const msghdr* message, int flags) {\n  const int rc = ::sendmsg(fd, message, flags);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::getsockname(os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) {\n  const int rc = ::getsockname(sockfd, addr, addrlen);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::gethostname(char* name, size_t length) {\n  const int rc = ::gethostname(name, length);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::getpeername(os_fd_t sockfd, sockaddr* name, socklen_t* namelen) {\n  const int rc = ::getpeername(sockfd, name, namelen);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::setsocketblocking(os_fd_t sockfd, bool blocking) {\n  const int flags = ::fcntl(sockfd, F_GETFL, 0);\n  int rc;\n  if (flags == -1) {\n    return {-1, errno};\n  }\n  if (blocking) {\n    rc = ::fcntl(sockfd, F_SETFL, flags & ~O_NONBLOCK);\n  } else {\n    rc = ::fcntl(sockfd, F_SETFL, flags | O_NONBLOCK);\n  }\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::connect(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n  const int rc = ::connect(sockfd, addr, addrlen);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::shutdown(os_fd_t sockfd, int how) {\n  const int rc = ::shutdown(sockfd, how);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, os_fd_t sv[2]) {\n  const int rc = ::socketpair(domain, type, protocol, sv);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::listen(os_fd_t sockfd, int backlog) {\n  const int rc = ::listen(sockfd, backlog);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallSizeResult OsSysCallsImpl::write(os_fd_t sockfd, const void* buffer, size_t length) {\n  const ssize_t rc = ::write(sockfd, buffer, length);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallSocketResult OsSysCallsImpl::accept(os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) {\n  os_fd_t rc;\n\n#if defined(__linux__)\n  rc = ::accept4(sockfd, addr, addrlen, SOCK_NONBLOCK);\n  // If failed with EINVAL try without flags\n  if (rc >= 0 || errno != EINVAL) {\n    return {rc, rc != -1 ? 0 : errno};\n  }\n#endif\n\n  rc = ::accept(sockfd, addr, addrlen);\n  if (rc >= 0) {\n    setsocketblocking(rc, false);\n  }\n\n  return {rc, rc != -1 ? 0 : errno};\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/posix/os_sys_calls_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/api/os_sys_calls.h\"\n\n#include \"common/singleton/threadsafe_singleton.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass OsSysCallsImpl : public OsSysCalls {\npublic:\n  // Api::OsSysCalls\n  SysCallIntResult bind(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) override;\n  SysCallIntResult chmod(const std::string& path, mode_t mode) override;\n  SysCallIntResult ioctl(os_fd_t sockfd, unsigned long int request, void* argp) override;\n  SysCallSizeResult writev(os_fd_t fd, const iovec* iov, int num_iov) override;\n  SysCallSizeResult readv(os_fd_t fd, const iovec* iov, int num_iov) override;\n  SysCallSizeResult recv(os_fd_t socket, void* buffer, size_t length, int flags) override;\n  SysCallSizeResult recvmsg(os_fd_t sockfd, msghdr* msg, int flags) override;\n  SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags,\n                            struct timespec* timeout) override;\n  bool supportsMmsg() const override;\n  bool supportsUdpGro() const override;\n  bool supportsUdpGso() const override;\n  bool supportsIpTransparent() const override;\n  SysCallIntResult close(os_fd_t fd) override;\n  SysCallIntResult ftruncate(int fd, off_t length) override;\n  SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd,\n                        off_t offset) override;\n  SysCallIntResult stat(const char* pathname, struct stat* buf) override;\n  SysCallIntResult setsockopt(os_fd_t sockfd, int level, int optname, const void* optval,\n                              socklen_t optlen) override;\n  SysCallIntResult getsockopt(os_fd_t sockfd, int level, int optname, void* optval,\n                              socklen_t* optlen) override;\n  SysCallSocketResult socket(int domain, int type, int protocol) override;\n  SysCallSizeResult sendmsg(os_fd_t fd, const msghdr* message, int flags) override;\n  SysCallIntResult getsockname(os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) override;\n  SysCallIntResult gethostname(char* name, size_t length) override;\n  SysCallIntResult getpeername(os_fd_t sockfd, sockaddr* name, socklen_t* namelen) override;\n  SysCallIntResult setsocketblocking(os_fd_t sockfd, bool blocking) override;\n  SysCallIntResult connect(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) override;\n  SysCallIntResult shutdown(os_fd_t sockfd, int how) override;\n  SysCallIntResult socketpair(int domain, int type, int protocol, os_fd_t sv[2]) override;\n  SysCallIntResult listen(os_fd_t sockfd, int backlog) override;\n  SysCallSizeResult write(os_fd_t socket, const void* buffer, size_t length) override;\n  SysCallSocketResult accept(os_fd_t socket, sockaddr* addr, socklen_t* addrlen) override;\n};\n\nusing OsSysCallsSingleton = ThreadSafeSingleton<OsSysCallsImpl>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/posix/os_sys_calls_impl_hot_restart.cc",
    "content": "#include <cerrno>\n\n#include \"common/api/os_sys_calls_impl_hot_restart.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nSysCallIntResult HotRestartOsSysCallsImpl::shmOpen(const char* name, int oflag, mode_t mode) {\n  const int rc = ::shm_open(name, oflag, mode);\n  return {rc, errno};\n}\n\nSysCallIntResult HotRestartOsSysCallsImpl::shmUnlink(const char* name) {\n  const int rc = ::shm_unlink(name);\n  return {rc, errno};\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/posix/os_sys_calls_impl_hot_restart.h",
    "content": "#pragma once\n\n#include \"envoy/api/os_sys_calls_hot_restart.h\"\n\n#include \"common/singleton/threadsafe_singleton.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass HotRestartOsSysCallsImpl : public HotRestartOsSysCalls {\npublic:\n  // Api::HotRestartOsSysCalls\n  SysCallIntResult shmOpen(const char* name, int oflag, mode_t mode) override;\n  SysCallIntResult shmUnlink(const char* name) override;\n};\n\nusing HotRestartOsSysCallsSingleton = ThreadSafeSingleton<HotRestartOsSysCallsImpl>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/posix/os_sys_calls_impl_linux.cc",
    "content": "#if !defined(__linux__)\n#error \"Linux platform file is part of non-Linux build.\"\n#endif\n\n#include <sched.h>\n\n#include <cerrno>\n\n#include \"common/api/os_sys_calls_impl_linux.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nSysCallIntResult LinuxOsSysCallsImpl::sched_getaffinity(pid_t pid, size_t cpusetsize,\n                                                        cpu_set_t* mask) {\n  const int rc = ::sched_getaffinity(pid, cpusetsize, mask);\n  return {rc, errno};\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/posix/os_sys_calls_impl_linux.h",
    "content": "#pragma once\n\n#if !defined(__linux__)\n#error \"Linux platform file is part of non-Linux build.\"\n#endif\n\n#include \"envoy/api/os_sys_calls_linux.h\"\n\n#include \"common/singleton/threadsafe_singleton.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass LinuxOsSysCallsImpl : public LinuxOsSysCalls {\npublic:\n  // Api::LinuxOsSysCalls\n  SysCallIntResult sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t* mask) override;\n};\n\nusing LinuxOsSysCallsSingleton = ThreadSafeSingleton<LinuxOsSysCallsImpl>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/win32/os_sys_calls_impl.cc",
    "content": "#include <errno.h>\n#include <fcntl.h>\n#include <io.h>\n#include <sys/stat.h>\n\n#include <cstdint>\n#include <string>\n\n#include \"common/api/os_sys_calls_impl.h\"\n\n#define DWORD_MAX UINT32_MAX\n\nnamespace Envoy {\nnamespace Api {\nnamespace {\n\nusing WSAMSGPtr = std::unique_ptr<WSAMSG>;\n\nstruct wsamsgResult {\n  WSAMSGPtr wsamsg_;\n  std::vector<WSABUF> buff_data_;\n};\n\nstd::vector<WSABUF> iovecToWSABUF(const iovec* vec, int in_vec) {\n\n  DWORD num_vec = 0;\n  for (int i = 0; i < in_vec; i++) {\n    size_t cur_len = vec[i].iov_len;\n    num_vec++;\n    while (cur_len > DWORD_MAX) {\n      num_vec++;\n      cur_len -= DWORD_MAX;\n    }\n  }\n\n  std::vector<WSABUF> buff(num_vec);\n  auto it = buff.begin();\n\n  std::vector<iovec> vecs(vec, vec + in_vec);\n  for (const auto& vec : vecs) {\n    auto chunk = (CHAR*)vec.iov_base;\n    size_t chunk_len = vec.iov_len;\n    // There is the case that the chunk does not fit into a single WSABUF buffer\n    // this is the case because sizeof(size_t) > sizeof(DWORD).\n    // In this case we split the chunk into multiple WSABUF buffers\n    auto remaining_data = chunk_len;\n    do {\n      (*it).buf = chunk;\n      (*it).len = (remaining_data > DWORD_MAX) ? DWORD_MAX : static_cast<ULONG>(chunk_len);\n      remaining_data -= (*it).len;\n      chunk += (*it).len;\n      it++;\n    } while (remaining_data > 0);\n  }\n  return buff;\n}\n\nLPFN_WSARECVMSG getFnPtrWSARecvMsg() {\n  LPFN_WSARECVMSG recvmsg_fn_ptr = NULL;\n  GUID recvmsg_guid = WSAID_WSARECVMSG;\n  SOCKET sock = INVALID_SOCKET;\n  DWORD bytes_received = 0;\n\n  sock = socket(AF_INET6, SOCK_DGRAM, 0);\n\n  RELEASE_ASSERT(\n      WSAIoctl(sock, SIO_GET_EXTENSION_FUNCTION_POINTER, &recvmsg_guid, sizeof(recvmsg_guid),\n               &recvmsg_fn_ptr, sizeof(recvmsg_fn_ptr), &bytes_received, NULL,\n               NULL) != SOCKET_ERROR,\n      \"WSAIoctl SIO_GET_EXTENSION_FUNCTION_POINTER for WSARecvMsg failed, not implemented?\");\n\n  closesocket(sock);\n\n  return recvmsg_fn_ptr;\n}\n\nwsamsgResult msghdrToWSAMSG(const msghdr* msg) {\n  WSAMSGPtr wsa_msg(new WSAMSG);\n\n  wsa_msg->name = reinterpret_cast<SOCKADDR*>(msg->msg_name);\n  wsa_msg->namelen = msg->msg_namelen;\n  auto buffer = iovecToWSABUF(msg->msg_iov, msg->msg_iovlen);\n  wsa_msg->lpBuffers = buffer.data();\n  wsa_msg->dwBufferCount = buffer.size();\n\n  WSABUF control;\n  control.buf = reinterpret_cast<CHAR*>(msg->msg_control);\n  control.len = msg->msg_controllen;\n  wsa_msg->Control = control;\n  wsa_msg->dwFlags = msg->msg_flags;\n\n  return wsamsgResult{std::move(wsa_msg), std::move(buffer)};\n}\n\n} // namespace\n\nSysCallIntResult OsSysCallsImpl::bind(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n  const int rc = ::bind(sockfd, addr, addrlen);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::chmod(const std::string& path, mode_t mode) {\n  const int rc = ::_chmod(path.c_str(), mode);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::ioctl(os_fd_t sockfd, unsigned long int request, void* argp) {\n  const int rc = ::ioctlsocket(sockfd, request, static_cast<u_long*>(argp));\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::close(os_fd_t fd) {\n  const int rc = ::closesocket(fd);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallSizeResult OsSysCallsImpl::writev(os_fd_t fd, const iovec* iov, int num_iov) {\n  DWORD bytes_sent;\n  auto buffer = iovecToWSABUF(iov, num_iov);\n\n  const int rc = ::WSASend(fd, buffer.data(), buffer.size(), &bytes_sent, 0, nullptr, nullptr);\n  if (SOCKET_FAILURE(rc)) {\n    return {-1, ::WSAGetLastError()};\n  }\n  return {bytes_sent, 0};\n}\n\nSysCallSizeResult OsSysCallsImpl::readv(os_fd_t fd, const iovec* iov, int num_iov) {\n  DWORD bytes_received;\n  DWORD flags = 0;\n  auto buffer = iovecToWSABUF(iov, num_iov);\n\n  const int rc =\n      ::WSARecv(fd, buffer.data(), buffer.size(), &bytes_received, &flags, nullptr, nullptr);\n  if (SOCKET_FAILURE(rc)) {\n    return {-1, ::WSAGetLastError()};\n  }\n  return {bytes_received, 0};\n}\n\nSysCallSizeResult OsSysCallsImpl::recv(os_fd_t socket, void* buffer, size_t length, int flags) {\n  const ssize_t rc = ::recv(socket, static_cast<char*>(buffer), length, flags);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallSizeResult OsSysCallsImpl::recvmsg(os_fd_t sockfd, msghdr* msg, int flags) {\n  DWORD bytes_received;\n  LPFN_WSARECVMSG recvmsg_fn_ptr = getFnPtrWSARecvMsg();\n  wsamsgResult wsamsg = msghdrToWSAMSG(msg);\n  // Windows supports only a single flag on input to WSARecvMsg\n  wsamsg.wsamsg_->dwFlags = flags & MSG_PEEK;\n  const int rc = recvmsg_fn_ptr(sockfd, wsamsg.wsamsg_.get(), &bytes_received, nullptr, nullptr);\n  if (rc == SOCKET_ERROR) {\n    return {-1, ::WSAGetLastError()};\n  }\n  msg->msg_namelen = wsamsg.wsamsg_->namelen;\n  msg->msg_flags = wsamsg.wsamsg_->dwFlags;\n  msg->msg_controllen = wsamsg.wsamsg_->Control.len;\n  return {bytes_received, 0};\n}\n\nSysCallIntResult OsSysCallsImpl::recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen,\n                                          int flags, struct timespec* timeout) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\nbool OsSysCallsImpl::supportsMmsg() const {\n  // Windows doesn't support it.\n  return false;\n}\n\nbool OsSysCallsImpl::supportsUdpGro() const {\n  // Windows doesn't support it.\n  return false;\n}\n\nbool OsSysCallsImpl::supportsUdpGso() const {\n  // Windows doesn't support it.\n  return false;\n}\n\nbool OsSysCallsImpl::supportsIpTransparent() const {\n  // Windows doesn't support it.\n  return false;\n}\n\nSysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) {\n  const int rc = ::_chsize_s(fd, length);\n  return {rc, rc == 0 ? 0 : errno};\n}\n\nSysCallPtrResult OsSysCallsImpl::mmap(void* addr, size_t length, int prot, int flags, int fd,\n                                      off_t offset) {\n  PANIC(\"mmap not implemented on Windows\");\n}\n\nSysCallIntResult OsSysCallsImpl::stat(const char* pathname, struct stat* buf) {\n  const int rc = ::stat(pathname, buf);\n  return {rc, rc != -1 ? 0 : errno};\n}\n\nSysCallIntResult OsSysCallsImpl::setsockopt(os_fd_t sockfd, int level, int optname,\n                                            const void* optval, socklen_t optlen) {\n  const int rc = ::setsockopt(sockfd, level, optname, static_cast<const char*>(optval), optlen);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::getsockopt(os_fd_t sockfd, int level, int optname, void* optval,\n                                            socklen_t* optlen) {\n  const int rc = ::getsockopt(sockfd, level, optname, static_cast<char*>(optval), optlen);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallSocketResult OsSysCallsImpl::socket(int domain, int type, int protocol) {\n  const os_fd_t rc = ::socket(domain, type, protocol);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallSizeResult OsSysCallsImpl::sendmsg(os_fd_t sockfd, const msghdr* msg, int flags) {\n  DWORD bytes_received;\n  // if overlapped and/or completion routines are supported adjust the arguments accordingly\n  wsamsgResult wsamsg = msghdrToWSAMSG(msg);\n  const int rc =\n      ::WSASendMsg(sockfd, wsamsg.wsamsg_.get(), flags, &bytes_received, nullptr, nullptr);\n  if (rc == SOCKET_ERROR) {\n    return {-1, ::WSAGetLastError()};\n  }\n  return {bytes_received, 0};\n}\n\nSysCallIntResult OsSysCallsImpl::getsockname(os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) {\n  const int rc = ::getsockname(sockfd, addr, addrlen);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::gethostname(char* name, size_t length) {\n  const int rc = ::gethostname(name, length);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::getpeername(os_fd_t sockfd, sockaddr* name, socklen_t* namelen) {\n  const int rc = ::getpeername(sockfd, name, namelen);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::setsocketblocking(os_fd_t sockfd, bool blocking) {\n  u_long io_mode = blocking ? 0 : 1;\n  const int rc = ::ioctlsocket(sockfd, FIONBIO, &io_mode);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::connect(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n  const int rc = ::connect(sockfd, addr, addrlen);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::shutdown(os_fd_t sockfd, int how) {\n  const int rc = ::shutdown(sockfd, how);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, os_fd_t sv[2]) {\n  if (sv == nullptr) {\n    return {SOCKET_ERROR, SOCKET_ERROR_INVAL};\n  }\n\n  sv[0] = sv[1] = INVALID_SOCKET;\n\n  SysCallSocketResult socket_result = socket(domain, type, protocol);\n  if (SOCKET_INVALID(socket_result.rc_)) {\n    return {SOCKET_ERROR, socket_result.errno_};\n  }\n\n  os_fd_t listener = socket_result.rc_;\n\n  typedef union {\n    struct sockaddr_storage sa;\n    struct sockaddr_in in;\n    struct sockaddr_in6 in6;\n  } sa_union;\n  sa_union a = {};\n  socklen_t sa_size = sizeof(a);\n\n  a.sa.ss_family = domain;\n  if (domain == AF_INET) {\n    a.in.sin_addr.s_addr = ::htonl(INADDR_LOOPBACK);\n    a.in.sin_port = 0;\n  } else if (domain == AF_INET6) {\n    a.in6.sin6_addr = in6addr_loopback;\n    a.in6.sin6_port = 0;\n  } else {\n    return {SOCKET_ERROR, SOCKET_ERROR_INVAL};\n  }\n\n  auto onErr = [this, listener, sv]() -> void {\n    ::closesocket(listener);\n    ::closesocket(sv[0]);\n    ::closesocket(sv[1]);\n    sv[0] = INVALID_SOCKET;\n    sv[1] = INVALID_SOCKET;\n  };\n\n  SysCallIntResult int_result = bind(listener, reinterpret_cast<sockaddr*>(&a), sa_size);\n  if (int_result.rc_ == SOCKET_ERROR) {\n    onErr();\n    return int_result;\n  }\n\n  int_result = listen(listener, 1);\n  if (int_result.rc_ == SOCKET_ERROR) {\n    onErr();\n    return int_result;\n  }\n\n  socket_result = socket(domain, type, protocol);\n  if (SOCKET_INVALID(socket_result.rc_)) {\n    onErr();\n    return {SOCKET_ERROR, socket_result.errno_};\n  }\n  sv[0] = socket_result.rc_;\n\n  a = {};\n  int_result = getsockname(listener, reinterpret_cast<sockaddr*>(&a), &sa_size);\n  if (int_result.rc_ == SOCKET_ERROR) {\n    onErr();\n    return int_result;\n  }\n\n  int_result = connect(sv[0], reinterpret_cast<sockaddr*>(&a), sa_size);\n  if (int_result.rc_ == SOCKET_ERROR) {\n    onErr();\n    return int_result;\n  }\n\n  socket_result.rc_ = ::accept(listener, nullptr, nullptr);\n  if (SOCKET_INVALID(socket_result.rc_)) {\n    socket_result.errno_ = ::WSAGetLastError();\n    onErr();\n    return {SOCKET_ERROR, socket_result.errno_};\n  }\n  sv[1] = socket_result.rc_;\n\n  ::closesocket(listener);\n  return {0, 0};\n}\n\nSysCallIntResult OsSysCallsImpl::listen(os_fd_t sockfd, int backlog) {\n  const int rc = ::listen(sockfd, backlog);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallSizeResult OsSysCallsImpl::write(os_fd_t sockfd, const void* buffer, size_t length) {\n  const ssize_t rc = ::send(sockfd, static_cast<const char*>(buffer), length, 0);\n  return {rc, rc != -1 ? 0 : ::WSAGetLastError()};\n}\n\nSysCallSocketResult OsSysCallsImpl::accept(os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) {\n  const os_fd_t rc = ::accept(sockfd, addr, addrlen);\n  if (SOCKET_INVALID(rc)) {\n    return {rc, ::WSAGetLastError()};\n  }\n\n  setsocketblocking(rc, false);\n  return {rc, 0};\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/api/win32/os_sys_calls_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/api/os_sys_calls.h\"\n\n#include \"common/singleton/threadsafe_singleton.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass OsSysCallsImpl : public OsSysCalls {\npublic:\n  // Api::OsSysCalls\n  SysCallIntResult bind(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) override;\n  SysCallIntResult chmod(const std::string& path, mode_t mode) override;\n  SysCallIntResult ioctl(os_fd_t sockfd, unsigned long int request, void* argp) override;\n  SysCallSizeResult writev(os_fd_t fd, const iovec* iov, int num_iov) override;\n  SysCallSizeResult readv(os_fd_t fd, const iovec* iov, int num_iov) override;\n  SysCallSizeResult recv(os_fd_t socket, void* buffer, size_t length, int flags) override;\n  SysCallSizeResult recvmsg(os_fd_t sockfd, msghdr* msg, int flags) override;\n  SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags,\n                            struct timespec* timeout) override;\n  bool supportsMmsg() const override;\n  bool supportsUdpGro() const override;\n  bool supportsUdpGso() const override;\n  bool supportsIpTransparent() const override;\n  SysCallIntResult close(os_fd_t fd) override;\n  SysCallIntResult ftruncate(int fd, off_t length) override;\n  SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd,\n                        off_t offset) override;\n  SysCallIntResult stat(const char* pathname, struct stat* buf) override;\n  SysCallIntResult setsockopt(os_fd_t sockfd, int level, int optname, const void* optval,\n                              socklen_t optlen) override;\n  SysCallIntResult getsockopt(os_fd_t sockfd, int level, int optname, void* optval,\n                              socklen_t* optlen) override;\n  SysCallSocketResult socket(int domain, int type, int protocol) override;\n  SysCallSizeResult sendmsg(os_fd_t fd, const msghdr* message, int flags) override;\n  SysCallIntResult getsockname(os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) override;\n  SysCallIntResult gethostname(char* name, size_t length) override;\n\n  SysCallIntResult getpeername(os_fd_t sockfd, sockaddr* name, socklen_t* namelen) override;\n  SysCallIntResult setsocketblocking(os_fd_t sockfd, bool blocking) override;\n  SysCallIntResult connect(os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) override;\n  SysCallIntResult shutdown(os_fd_t sockfd, int how) override;\n  SysCallIntResult socketpair(int domain, int type, int protocol, os_fd_t sv[2]) override;\n  SysCallIntResult listen(os_fd_t sockfd, int backlog) override;\n  SysCallSizeResult write(os_fd_t socket, const void* buffer, size_t length) override;\n  SysCallSocketResult accept(os_fd_t socket, sockaddr* addr, socklen_t* addrlen) override;\n};\n\nusing OsSysCallsSingleton = ThreadSafeSingleton<OsSysCallsImpl>;\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/buffer/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"watermark_buffer_lib\",\n    srcs = [\"watermark_buffer.cc\"],\n    hdrs = [\"watermark_buffer.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"buffer_lib\",\n    srcs = [\"buffer_impl.cc\"],\n    hdrs = [\"buffer_impl.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:non_copyable\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/event:libevent_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"zero_copy_input_stream_lib\",\n    srcs = [\"zero_copy_input_stream_impl.cc\"],\n    hdrs = [\"zero_copy_input_stream_impl.h\"],\n    deps = [\n        \":buffer_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n"
  },
  {
    "path": "source/common/buffer/buffer_impl.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"common/common/assert.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"event2/buffer.h\"\n\nnamespace Envoy {\nnamespace Buffer {\nnamespace {\n// This size has been determined to be optimal from running the\n// //test/integration:http_benchmark benchmark tests.\n// TODO(yanavlasov): This may not be optimal for all hardware configurations or traffic patterns and\n// may need to be configurable in the future.\nconstexpr uint64_t CopyThreshold = 512;\n} // namespace\n\nvoid OwnedImpl::addImpl(const void* data, uint64_t size) {\n  const char* src = static_cast<const char*>(data);\n  bool new_slice_needed = slices_.empty();\n  while (size != 0) {\n    if (new_slice_needed) {\n      slices_.emplace_back(OwnedSlice::create(size));\n    }\n    uint64_t copy_size = slices_.back()->append(src, size);\n    src += copy_size;\n    size -= copy_size;\n    length_ += copy_size;\n    new_slice_needed = true;\n  }\n}\n\nvoid OwnedImpl::addDrainTracker(std::function<void()> drain_tracker) {\n  ASSERT(!slices_.empty());\n  slices_.back()->addDrainTracker(std::move(drain_tracker));\n}\n\nvoid OwnedImpl::add(const void* data, uint64_t size) { addImpl(data, size); }\n\nvoid OwnedImpl::addBufferFragment(BufferFragment& fragment) {\n  length_ += fragment.size();\n  slices_.emplace_back(std::make_unique<UnownedSlice>(fragment));\n}\n\nvoid OwnedImpl::add(absl::string_view data) { add(data.data(), data.size()); }\n\nvoid OwnedImpl::add(const Instance& data) {\n  ASSERT(&data != this);\n  for (const RawSlice& slice : data.getRawSlices()) {\n    add(slice.mem_, slice.len_);\n  }\n}\n\nvoid OwnedImpl::prepend(absl::string_view data) {\n  uint64_t size = data.size();\n  bool new_slice_needed = slices_.empty();\n  while (size != 0) {\n    if (new_slice_needed) {\n      slices_.emplace_front(OwnedSlice::create(size));\n    }\n    uint64_t copy_size = slices_.front()->prepend(data.data(), size);\n    size -= copy_size;\n    length_ += copy_size;\n    new_slice_needed = true;\n  }\n}\n\nvoid OwnedImpl::prepend(Instance& data) {\n  ASSERT(&data != this);\n  OwnedImpl& other = static_cast<OwnedImpl&>(data);\n  while (!other.slices_.empty()) {\n    uint64_t slice_size = other.slices_.back()->dataSize();\n    length_ += slice_size;\n    slices_.emplace_front(std::move(other.slices_.back()));\n    other.slices_.pop_back();\n    other.length_ -= slice_size;\n  }\n  other.postProcess();\n}\n\nvoid OwnedImpl::commit(RawSlice* iovecs, uint64_t num_iovecs) {\n  if (num_iovecs == 0) {\n    return;\n  }\n  // Find the slices in the buffer that correspond to the iovecs:\n  // First, scan backward from the end of the buffer to find the last slice containing\n  // any content. Reservations are made from the end of the buffer, and out-of-order commits\n  // aren't supported, so any slices before this point cannot match the iovecs being committed.\n  ssize_t slice_index = static_cast<ssize_t>(slices_.size()) - 1;\n  while (slice_index >= 0 && slices_[slice_index]->dataSize() == 0) {\n    slice_index--;\n  }\n  if (slice_index < 0) {\n    // There was no slice containing any data, so rewind the iterator at the first slice.\n    slice_index = 0;\n    if (!slices_[0]) {\n      return;\n    }\n  }\n\n  // Next, scan forward and attempt to match the slices against iovecs.\n  uint64_t num_slices_committed = 0;\n  while (num_slices_committed < num_iovecs) {\n    if (slices_[slice_index]->commit(iovecs[num_slices_committed])) {\n      length_ += iovecs[num_slices_committed].len_;\n      num_slices_committed++;\n    }\n    slice_index++;\n    if (slice_index == static_cast<ssize_t>(slices_.size())) {\n      break;\n    }\n  }\n\n  // In case an extra slice was reserved, remove empty slices from the end of the buffer.\n  while (!slices_.empty() && slices_.back()->dataSize() == 0) {\n    slices_.pop_back();\n  }\n\n  ASSERT(num_slices_committed > 0);\n}\n\nvoid OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const {\n  uint64_t bytes_to_skip = start;\n  uint8_t* dest = static_cast<uint8_t*>(data);\n  for (const auto& slice : slices_) {\n    if (size == 0) {\n      break;\n    }\n    uint64_t data_size = slice->dataSize();\n    if (data_size <= bytes_to_skip) {\n      // The offset where the caller wants to start copying is after the end of this slice,\n      // so just skip over this slice completely.\n      bytes_to_skip -= data_size;\n      continue;\n    }\n    uint64_t copy_size = std::min(size, data_size - bytes_to_skip);\n    memcpy(dest, slice->data() + bytes_to_skip, copy_size);\n    size -= copy_size;\n    dest += copy_size;\n    // Now that we've started copying, there are no bytes left to skip over. If there\n    // is any more data to be copied, the next iteration can start copying from the very\n    // beginning of the next slice.\n    bytes_to_skip = 0;\n  }\n  ASSERT(size == 0);\n}\n\nvoid OwnedImpl::drain(uint64_t size) { drainImpl(size); }\n\nvoid OwnedImpl::drainImpl(uint64_t size) {\n  while (size != 0) {\n    if (slices_.empty()) {\n      break;\n    }\n    uint64_t slice_size = slices_.front()->dataSize();\n    if (slice_size <= size) {\n      slices_.pop_front();\n      length_ -= slice_size;\n      size -= slice_size;\n    } else {\n      slices_.front()->drain(size);\n      length_ -= size;\n      size = 0;\n    }\n  }\n  // Make sure to drain any zero byte fragments that might have been added as\n  // sentinels for flushed data.\n  while (!slices_.empty() && slices_.front()->dataSize() == 0) {\n    slices_.pop_front();\n  }\n}\n\nRawSliceVector OwnedImpl::getRawSlices(absl::optional<uint64_t> max_slices) const {\n  uint64_t max_out = slices_.size();\n  if (max_slices.has_value()) {\n    max_out = std::min(max_out, max_slices.value());\n  }\n\n  RawSliceVector raw_slices;\n  raw_slices.reserve(max_out);\n  for (const auto& slice : slices_) {\n    if (raw_slices.size() >= max_out) {\n      break;\n    }\n\n    if (slice->dataSize() == 0) {\n      continue;\n    }\n\n    // Temporary cast to fix 32-bit Envoy mobile builds, where sizeof(uint64_t) != sizeof(size_t).\n    // dataSize represents the size of a buffer so size_t should always be large enough to hold its\n    // size regardless of architecture. Buffer slices should in practice be relatively small, but\n    // there is currently no max size validation.\n    // TODO(antoniovicente) Set realistic limits on the max size of BufferSlice and consider use of\n    // size_t instead of uint64_t in the Slice interface.\n    raw_slices.emplace_back(RawSlice{slice->data(), static_cast<size_t>(slice->dataSize())});\n  }\n  return raw_slices;\n}\n\nSliceDataPtr OwnedImpl::extractMutableFrontSlice() {\n  RELEASE_ASSERT(length_ > 0, \"Extract called on empty buffer\");\n  // Remove zero byte fragments from the front of the queue to ensure\n  // that the extracted slice has data.\n  while (!slices_.empty() && slices_.front()->dataSize() == 0) {\n    slices_.pop_front();\n  }\n  ASSERT(!slices_.empty());\n  ASSERT(slices_.front());\n  auto slice = std::move(slices_.front());\n  auto size = slice->dataSize();\n  length_ -= size;\n  slices_.pop_front();\n  if (!slice->isMutable()) {\n    // Create a mutable copy of the immutable slice data.\n    auto mutable_slice = OwnedSlice::create(size);\n    auto copy_size = mutable_slice->append(slice->data(), size);\n    ASSERT(copy_size == size);\n    // Drain trackers for the immutable slice will be called as part of the slice destructor.\n    return mutable_slice;\n  } else {\n    // Make sure drain trackers are called before ownership of the slice is transferred from\n    // the buffer to the caller.\n    slice->callAndClearDrainTrackers();\n    return slice;\n  }\n}\n\nuint64_t OwnedImpl::length() const {\n#ifndef NDEBUG\n  // When running in debug mode, verify that the precomputed length matches the sum\n  // of the lengths of the slices.\n  uint64_t length = 0;\n  for (const auto& slice : slices_) {\n    length += slice->dataSize();\n  }\n  ASSERT(length == length_);\n#endif\n\n  return length_;\n}\n\nvoid* OwnedImpl::linearize(uint32_t size) {\n  RELEASE_ASSERT(size <= length(), \"Linearize size exceeds buffer size\");\n  if (slices_.empty()) {\n    return nullptr;\n  }\n  if (slices_[0]->dataSize() < size) {\n    auto new_slice = OwnedSlice::create(size);\n    Slice::Reservation reservation = new_slice->reserve(size);\n    ASSERT(reservation.mem_ != nullptr);\n    ASSERT(reservation.len_ == size);\n    copyOut(0, size, reservation.mem_);\n    new_slice->commit(reservation);\n\n    // Replace the first 'size' bytes in the buffer with the new slice. Since new_slice re-adds the\n    // drained bytes, avoid use of the overridable 'drain' method to avoid incorrectly checking if\n    // we dipped below low-watermark.\n    drainImpl(size);\n    slices_.emplace_front(std::move(new_slice));\n    length_ += size;\n  }\n  return slices_.front()->data();\n}\n\nvoid OwnedImpl::coalesceOrAddSlice(SlicePtr&& other_slice) {\n  const uint64_t slice_size = other_slice->dataSize();\n  // The `other_slice` content can be coalesced into the existing slice IFF:\n  // 1. The `other_slice` can be coalesced. Objects of type UnownedSlice can not be coalesced. See\n  //    comment in the UnownedSlice class definition;\n  // 2. There are existing slices;\n  // 3. The `other_slice` content length is under the CopyThreshold;\n  // 4. There is enough unused space in the existing slice to accommodate the `other_slice` content.\n  if (other_slice->canCoalesce() && !slices_.empty() && slice_size < CopyThreshold &&\n      slices_.back()->reservableSize() >= slice_size) {\n    // Copy content of the `other_slice`. The `move` methods which call this method effectively\n    // drain the source buffer.\n    addImpl(other_slice->data(), slice_size);\n    other_slice->transferDrainTrackersTo(*slices_.back());\n  } else {\n    // Take ownership of the slice.\n    slices_.emplace_back(std::move(other_slice));\n    length_ += slice_size;\n  }\n}\n\nvoid OwnedImpl::move(Instance& rhs) {\n  ASSERT(&rhs != this);\n  // We do the static cast here because in practice we only have one buffer implementation right\n  // now and this is safe. This is a reasonable compromise in a high performance path where we\n  // want to maintain an abstraction.\n  OwnedImpl& other = static_cast<OwnedImpl&>(rhs);\n  while (!other.slices_.empty()) {\n    const uint64_t slice_size = other.slices_.front()->dataSize();\n    coalesceOrAddSlice(std::move(other.slices_.front()));\n    other.length_ -= slice_size;\n    other.slices_.pop_front();\n  }\n  other.postProcess();\n}\n\nvoid OwnedImpl::move(Instance& rhs, uint64_t length) {\n  ASSERT(&rhs != this);\n  // See move() above for why we do the static cast.\n  OwnedImpl& other = static_cast<OwnedImpl&>(rhs);\n  while (length != 0 && !other.slices_.empty()) {\n    const uint64_t slice_size = other.slices_.front()->dataSize();\n    const uint64_t copy_size = std::min(slice_size, length);\n    if (copy_size == 0) {\n      other.slices_.pop_front();\n    } else if (copy_size < slice_size) {\n      // TODO(brian-pane) add reference-counting to allow slices to share their storage\n      // and eliminate the copy for this partial-slice case?\n      add(other.slices_.front()->data(), copy_size);\n      other.slices_.front()->drain(copy_size);\n      other.length_ -= copy_size;\n    } else {\n      coalesceOrAddSlice(std::move(other.slices_.front()));\n      other.slices_.pop_front();\n      other.length_ -= slice_size;\n    }\n    length -= copy_size;\n  }\n  other.postProcess();\n}\n\nuint64_t OwnedImpl::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) {\n  if (num_iovecs == 0 || length == 0) {\n    return 0;\n  }\n  // Check whether there are any empty slices with reservable space at the end of the buffer.\n  size_t first_reservable_slice = slices_.size();\n  while (first_reservable_slice > 0) {\n    if (slices_[first_reservable_slice - 1]->reservableSize() == 0) {\n      break;\n    }\n    first_reservable_slice--;\n    if (slices_[first_reservable_slice]->dataSize() != 0) {\n      // There is some content in this slice, so anything in front of it is non-reservable.\n      break;\n    }\n  }\n\n  // Having found the sequence of reservable slices at the back of the buffer, reserve\n  // as much space as possible from each one.\n  uint64_t num_slices_used = 0;\n  uint64_t bytes_remaining = length;\n  size_t slice_index = first_reservable_slice;\n  while (slice_index < slices_.size() && bytes_remaining != 0 && num_slices_used < num_iovecs) {\n    auto& slice = slices_[slice_index];\n    const uint64_t reservation_size = std::min(slice->reservableSize(), bytes_remaining);\n    if (num_slices_used + 1 == num_iovecs && reservation_size < bytes_remaining) {\n      // There is only one iovec left, and this next slice does not have enough space to\n      // complete the reservation. Stop iterating, with last one iovec still unpopulated,\n      // so the code following this loop can allocate a new slice to hold the rest of the\n      // reservation.\n      break;\n    }\n    iovecs[num_slices_used] = slice->reserve(reservation_size);\n    bytes_remaining -= iovecs[num_slices_used].len_;\n    num_slices_used++;\n    slice_index++;\n  }\n\n  // If needed, allocate one more slice at the end to provide the remainder of the reservation.\n  if (bytes_remaining != 0) {\n    slices_.emplace_back(OwnedSlice::create(bytes_remaining));\n    iovecs[num_slices_used] = slices_.back()->reserve(bytes_remaining);\n    bytes_remaining -= iovecs[num_slices_used].len_;\n    num_slices_used++;\n  }\n\n  ASSERT(num_slices_used <= num_iovecs);\n  ASSERT(bytes_remaining == 0);\n  return num_slices_used;\n}\n\nssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start, size_t length) const {\n  // This implementation uses the same search algorithm as evbuffer_search(), a naive\n  // scan that requires O(M*N) comparisons in the worst case.\n  // TODO(brian-pane): replace this with a more efficient search if it shows up\n  // prominently in CPU profiling.\n  if (size == 0) {\n    return (start <= length_) ? start : -1;\n  }\n\n  // length equal to zero means that entire buffer must be searched.\n  // Adjust the length to buffer length taking the staring index into account.\n  size_t left_to_search = length;\n  if (0 == length) {\n    left_to_search = length_ - start;\n  }\n  ssize_t offset = 0;\n  const uint8_t* needle = static_cast<const uint8_t*>(data);\n  for (size_t slice_index = 0; slice_index < slices_.size() && (left_to_search > 0);\n       slice_index++) {\n    const auto& slice = slices_[slice_index];\n    uint64_t slice_size = slice->dataSize();\n    if (slice_size <= start) {\n      start -= slice_size;\n      offset += slice_size;\n      continue;\n    }\n    const uint8_t* slice_start = slice->data();\n    const uint8_t* haystack = slice_start;\n    const uint8_t* haystack_end = haystack + slice_size;\n    haystack += start;\n    while (haystack < haystack_end) {\n      const size_t slice_search_limit =\n          std::min(static_cast<size_t>(haystack_end - haystack), left_to_search);\n      // Search within this slice for the first byte of the needle.\n      const uint8_t* first_byte_match =\n          static_cast<const uint8_t*>(memchr(haystack, needle[0], slice_search_limit));\n      if (first_byte_match == nullptr) {\n        left_to_search -= slice_search_limit;\n        break;\n      }\n      // After finding a match for the first byte of the needle, check whether the following\n      // bytes in the buffer match the remainder of the needle. Note that the match can span\n      // two or more slices.\n      left_to_search -= static_cast<size_t>(first_byte_match - haystack + 1);\n      // Save the current number of bytes left to search.\n      // If the pattern is not found, the search will resume from the next byte\n      // and left_to_search value must be restored.\n      const size_t saved_left_to_search = left_to_search;\n      size_t i = 1;\n      size_t match_index = slice_index;\n      const uint8_t* match_next = first_byte_match + 1;\n      const uint8_t* match_end = haystack_end;\n      while ((i < size) && (0 < left_to_search)) {\n        if (match_next >= match_end) {\n          // We've hit the end of this slice, so continue checking against the next slice.\n          match_index++;\n          if (match_index == slices_.size()) {\n            // We've hit the end of the entire buffer.\n            break;\n          }\n          const auto& match_slice = slices_[match_index];\n          match_next = match_slice->data();\n          match_end = match_next + match_slice->dataSize();\n          continue;\n        }\n        left_to_search--;\n        if (*match_next++ != needle[i]) {\n          break;\n        }\n        i++;\n      }\n      if (i == size) {\n        // Successful match of the entire needle.\n        return offset + (first_byte_match - slice_start);\n      }\n      // If this wasn't a successful match, start scanning again at the next byte.\n      haystack = first_byte_match + 1;\n      left_to_search = saved_left_to_search;\n    }\n    start = 0;\n    offset += slice_size;\n  }\n  return -1;\n}\n\nbool OwnedImpl::startsWith(absl::string_view data) const {\n  if (length() < data.length()) {\n    // Buffer is too short to contain data.\n    return false;\n  }\n\n  if (data.length() == 0) {\n    return true;\n  }\n\n  const uint8_t* prefix = reinterpret_cast<const uint8_t*>(data.data());\n  size_t size = data.length();\n  for (const auto& slice : slices_) {\n    uint64_t slice_size = slice->dataSize();\n    const uint8_t* slice_start = slice->data();\n\n    if (slice_size >= size) {\n      // The remaining size bytes of data are in this slice.\n      return memcmp(prefix, slice_start, size) == 0;\n    }\n\n    // Slice is smaller than data, see if the prefix matches.\n    if (memcmp(prefix, slice_start, slice_size) != 0) {\n      return false;\n    }\n\n    // Prefix matched. Continue looking at the next slice.\n    prefix += slice_size;\n    size -= slice_size;\n  }\n\n  // Less data in slices than length() reported.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nOwnedImpl::OwnedImpl() = default;\n\nOwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); }\n\nOwnedImpl::OwnedImpl(const Instance& data) : OwnedImpl() { add(data); }\n\nOwnedImpl::OwnedImpl(const void* data, uint64_t size) : OwnedImpl() { add(data, size); }\n\nstd::string OwnedImpl::toString() const {\n  std::string output;\n  output.reserve(length());\n  for (const RawSlice& slice : getRawSlices()) {\n    output.append(static_cast<const char*>(slice.mem_), slice.len_);\n  }\n\n  return output;\n}\n\nvoid OwnedImpl::postProcess() {}\n\nvoid OwnedImpl::appendSliceForTest(const void* data, uint64_t size) {\n  slices_.emplace_back(OwnedSlice::create(data, size));\n  length_ += size;\n}\n\nvoid OwnedImpl::appendSliceForTest(absl::string_view data) {\n  appendSliceForTest(data.data(), data.size());\n}\n\nstd::vector<OwnedSlice::SliceRepresentation> OwnedImpl::describeSlicesForTest() const {\n  std::vector<OwnedSlice::SliceRepresentation> slices;\n  for (const auto& slice : slices_) {\n    slices.push_back(slice->describeSliceForTest());\n  }\n  return slices;\n}\n\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/buffer/buffer_impl.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <cstdint>\n#include <deque>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/non_copyable.h\"\n#include \"common/common/utility.h\"\n#include \"common/event/libevent.h\"\n\nnamespace Envoy {\nnamespace Buffer {\n\n/**\n * A Slice manages a contiguous block of bytes.\n * The block is arranged like this:\n *                   |<- dataSize() ->|<- reservableSize() ->|\n * +-----------------+----------------+----------------------+\n * | Drained         | Data           | Reservable           |\n * | Unused space    | Usable content | New content can be   |\n * | that formerly   |                | added here with      |\n * | was in the Data |                | reserve()/commit()   |\n * | section         |                |                      |\n * +-----------------+----------------+----------------------+\n *                   ^\n *                   |\n *                   data()\n */\nclass Slice : public SliceData {\npublic:\n  using Reservation = RawSlice;\n\n  ~Slice() override { callAndClearDrainTrackers(); }\n\n  // SliceData\n  absl::Span<uint8_t> getMutableData() override {\n    RELEASE_ASSERT(isMutable(), \"Not allowed to call getMutableData if slice is immutable\");\n    return {base_ + data_, static_cast<absl::Span<uint8_t>::size_type>(reservable_ - data_)};\n  }\n\n  /**\n   * @return true if the data in the slice is mutable\n   */\n  virtual bool isMutable() const { return false; }\n\n  /**\n   * @return a pointer to the start of the usable content.\n   */\n  const uint8_t* data() const { return base_ + data_; }\n\n  /**\n   * @return a pointer to the start of the usable content.\n   */\n  uint8_t* data() { return base_ + data_; }\n\n  /**\n   * @return the size in bytes of the usable content.\n   */\n  uint64_t dataSize() const { return reservable_ - data_; }\n\n  /**\n   * Remove the first `size` bytes of usable content. Runs in O(1) time.\n   * @param size number of bytes to remove. If greater than data_size(), the result is undefined.\n   */\n  void drain(uint64_t size) {\n    ASSERT(data_ + size <= reservable_);\n    data_ += size;\n    if (data_ == reservable_) {\n      // All the data in the slice has been drained. Reset the offsets so all\n      // the data can be reused.\n      data_ = 0;\n      reservable_ = 0;\n    }\n  }\n\n  /**\n   * @return the number of bytes available to be reserve()d.\n   * @note Read-only implementations of Slice should return zero from this method.\n   */\n  uint64_t reservableSize() const {\n    ASSERT(capacity_ >= reservable_);\n    return capacity_ - reservable_;\n  }\n\n  /**\n   * Reserve `size` bytes that the caller can populate with content. The caller SHOULD then\n   * call commit() to add the newly populated content from the Reserved section to the Data\n   * section.\n   * @note If there is already an outstanding reservation (i.e., a reservation obtained\n   *       from reserve() that has not been released by calling commit()), this method will\n   *       return a new reservation that replaces it.\n   * @param size the number of bytes to reserve. The Slice implementation MAY reserve\n   *        fewer bytes than requested (for example, if it doesn't have enough room in the\n   *        Reservable section to fulfill the whole request).\n   * @return a tuple containing the address of the start of resulting reservation and the\n   *         reservation size in bytes. If the address is null, the reservation failed.\n   * @note Read-only implementations of Slice should return {nullptr, 0} from this method.\n   */\n  Reservation reserve(uint64_t size) {\n    if (size == 0) {\n      return {nullptr, 0};\n    }\n    // Verify the semantics that drain() enforces: if the slice is empty, either because\n    // no data has been added or because all the added data has been drained, the data\n    // section is at the very start of the slice.\n    ASSERT(!(dataSize() == 0 && data_ > 0));\n    uint64_t available_size = capacity_ - reservable_;\n    if (available_size == 0) {\n      return {nullptr, 0};\n    }\n    uint64_t reservation_size = std::min(size, available_size);\n    void* reservation = &(base_[reservable_]);\n    return {reservation, static_cast<size_t>(reservation_size)};\n  }\n\n  /**\n   * Commit a Reservation that was previously obtained from a call to reserve().\n   * The Reservation's size is added to the Data section.\n   * @param reservation a reservation obtained from a previous call to reserve().\n   *        If the reservation is not from this Slice, commit() will return false.\n   *        If the caller is committing fewer bytes than provided by reserve(), it\n   *        should change the len_ field of the reservation before calling commit().\n   *        For example, if a caller reserve()s 4KB to do a nonblocking socket read,\n   *        and the read only returns two bytes, the caller should set\n   *        reservation.len_ = 2 and then call `commit(reservation)`.\n   * @return whether the Reservation was successfully committed to the Slice.\n   */\n  bool commit(const Reservation& reservation) {\n    if (static_cast<const uint8_t*>(reservation.mem_) != base_ + reservable_ ||\n        reservable_ + reservation.len_ > capacity_ || reservable_ >= capacity_) {\n      // The reservation is not from this OwnedSlice.\n      return false;\n    }\n    reservable_ += reservation.len_;\n    return true;\n  }\n\n  /**\n   * Copy as much of the supplied data as possible to the end of the slice.\n   * @param data start of the data to copy.\n   * @param size number of bytes to copy.\n   * @return number of bytes copied (may be a smaller than size, may even be zero).\n   */\n  uint64_t append(const void* data, uint64_t size) {\n    uint64_t copy_size = std::min(size, reservableSize());\n    if (copy_size == 0) {\n      return 0;\n    }\n    uint8_t* dest = base_ + reservable_;\n    reservable_ += copy_size;\n    // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)\n    memcpy(dest, data, copy_size);\n    return copy_size;\n  }\n\n  /**\n   * Copy as much of the supplied data as possible to the front of the slice.\n   * If only part of the data will fit in the slice, the bytes from the _end_ are\n   * copied.\n   * @param data start of the data to copy.\n   * @param size number of bytes to copy.\n   * @return number of bytes copied (may be a smaller than size, may even be zero).\n   */\n  uint64_t prepend(const void* data, uint64_t size) {\n    const uint8_t* src = static_cast<const uint8_t*>(data);\n    uint64_t copy_size;\n    if (dataSize() == 0) {\n      // There is nothing in the slice, so put the data at the very end in case the caller\n      // later tries to prepend anything else in front of it.\n      copy_size = std::min(size, reservableSize());\n      reservable_ = capacity_;\n      data_ = capacity_ - copy_size;\n    } else {\n      if (data_ == 0) {\n        // There is content in the slice, and no space in front of it to write anything.\n        return 0;\n      }\n      // Write into the space in front of the slice's current content.\n      copy_size = std::min(size, data_);\n      data_ -= copy_size;\n    }\n    memcpy(base_ + data_, src + size - copy_size, copy_size);\n    return copy_size;\n  }\n\n  /**\n   * @return true if content in this Slice can be coalesced into another Slice.\n   */\n  virtual bool canCoalesce() const { return true; }\n\n  /**\n   * Describe the in-memory representation of the slice. For use\n   * in tests that want to make assertions about the specific arrangement of\n   * bytes in a slice.\n   */\n  struct SliceRepresentation {\n    uint64_t data;\n    uint64_t reservable;\n    uint64_t capacity;\n  };\n  SliceRepresentation describeSliceForTest() const {\n    return SliceRepresentation{dataSize(), reservableSize(), capacity_};\n  }\n\n  /**\n   * Move all drain trackers from the current slice to the destination slice.\n   */\n  void transferDrainTrackersTo(Slice& destination) {\n    destination.drain_trackers_.splice(destination.drain_trackers_.end(), drain_trackers_);\n    ASSERT(drain_trackers_.empty());\n  }\n\n  /**\n   * Add a drain tracker to the slice.\n   */\n  void addDrainTracker(std::function<void()> drain_tracker) {\n    drain_trackers_.emplace_back(std::move(drain_tracker));\n  }\n\n  /**\n   * Call all drain trackers associated with the slice, then clear\n   * the drain tracker list.\n   */\n  void callAndClearDrainTrackers() {\n    for (const auto& drain_tracker : drain_trackers_) {\n      drain_tracker();\n    }\n    drain_trackers_.clear();\n  }\n\nprotected:\n  Slice(uint64_t data, uint64_t reservable, uint64_t capacity)\n      : data_(data), reservable_(reservable), capacity_(capacity) {}\n\n  /** Start of the slice - subclasses must set this */\n  uint8_t* base_{nullptr};\n\n  /** Offset in bytes from the start of the slice to the start of the Data section */\n  uint64_t data_;\n\n  /** Offset in bytes from the start of the slice to the start of the Reservable section */\n  uint64_t reservable_;\n\n  /** Total number of bytes in the slice */\n  uint64_t capacity_;\n\n  std::list<std::function<void()>> drain_trackers_;\n};\n\nusing SlicePtr = std::unique_ptr<Slice>;\n\n// OwnedSlice can not be derived from as it has variable sized array as member.\nclass OwnedSlice final : public Slice, public InlineStorage {\npublic:\n  /**\n   * Create an empty OwnedSlice.\n   * @param capacity number of bytes of space the slice should have.\n   * @return an OwnedSlice with at least the specified capacity.\n   */\n  static SlicePtr create(uint64_t capacity) {\n    uint64_t slice_capacity = sliceSize(capacity);\n    return SlicePtr(new (slice_capacity) OwnedSlice(slice_capacity));\n  }\n\n  /**\n   * Create an OwnedSlice and initialize it with a copy of the supplied copy.\n   * @param data the content to copy into the slice.\n   * @param size length of the content.\n   * @return an OwnedSlice containing a copy of the content, which may (dependent on\n   *         the internal implementation) have a nonzero amount of reservable space at the end.\n   */\n  static SlicePtr create(const void* data, uint64_t size) {\n    uint64_t slice_capacity = sliceSize(size);\n    std::unique_ptr<OwnedSlice> slice(new (slice_capacity) OwnedSlice(slice_capacity));\n    memcpy(slice->base_, data, size);\n    slice->reservable_ = size;\n    return slice;\n  }\n\nprivate:\n  OwnedSlice(uint64_t size) : Slice(0, 0, size) { base_ = storage_; }\n\n  bool isMutable() const override { return true; }\n\n  /**\n   * Compute a slice size big enough to hold a specified amount of data.\n   * @param data_size the minimum amount of data the slice must be able to store, in bytes.\n   * @return a recommended slice size, in bytes.\n   */\n  static uint64_t sliceSize(uint64_t data_size) {\n    static constexpr uint64_t PageSize = 4096;\n    const uint64_t num_pages = (sizeof(OwnedSlice) + data_size + PageSize - 1) / PageSize;\n    return num_pages * PageSize - sizeof(OwnedSlice);\n  }\n\n  uint8_t storage_[];\n};\n\n/**\n * Queue of SlicePtr that supports efficient read and write access to both\n * the front and the back of the queue.\n * @note This class has similar properties to std::deque<T>. The reason for using\n *       a custom deque implementation is that benchmark testing during development\n *       revealed that std::deque was too slow to reach performance parity with the\n *       prior evbuffer-based buffer implementation.\n */\nclass SliceDeque {\npublic:\n  SliceDeque() : ring_(inline_ring_), capacity_(InlineRingCapacity) {}\n\n  SliceDeque(SliceDeque&& rhs) noexcept {\n    // This custom move constructor is needed so that ring_ will be updated properly.\n    std::move(rhs.inline_ring_, rhs.inline_ring_ + InlineRingCapacity, inline_ring_);\n    external_ring_ = std::move(rhs.external_ring_);\n    ring_ = (external_ring_ != nullptr) ? external_ring_.get() : inline_ring_;\n    start_ = rhs.start_;\n    size_ = rhs.size_;\n    capacity_ = rhs.capacity_;\n  }\n\n  SliceDeque& operator=(SliceDeque&& rhs) noexcept {\n    // This custom assignment move operator is needed so that ring_ will be updated properly.\n    std::move(rhs.inline_ring_, rhs.inline_ring_ + InlineRingCapacity, inline_ring_);\n    external_ring_ = std::move(rhs.external_ring_);\n    ring_ = (external_ring_ != nullptr) ? external_ring_.get() : inline_ring_;\n    start_ = rhs.start_;\n    size_ = rhs.size_;\n    capacity_ = rhs.capacity_;\n    return *this;\n  }\n\n  void emplace_back(SlicePtr&& slice) {\n    growRing();\n    size_t index = internalIndex(size_);\n    ring_[index] = std::move(slice);\n    size_++;\n  }\n\n  void emplace_front(SlicePtr&& slice) {\n    growRing();\n    start_ = (start_ == 0) ? capacity_ - 1 : start_ - 1;\n    ring_[start_] = std::move(slice);\n    size_++;\n  }\n\n  bool empty() const { return size() == 0; }\n  size_t size() const { return size_; }\n\n  SlicePtr& front() { return ring_[start_]; }\n  const SlicePtr& front() const { return ring_[start_]; }\n  SlicePtr& back() { return ring_[internalIndex(size_ - 1)]; }\n  const SlicePtr& back() const { return ring_[internalIndex(size_ - 1)]; }\n\n  SlicePtr& operator[](size_t i) { return ring_[internalIndex(i)]; }\n  const SlicePtr& operator[](size_t i) const { return ring_[internalIndex(i)]; }\n\n  void pop_front() {\n    if (size() == 0) {\n      return;\n    }\n    front() = SlicePtr();\n    size_--;\n    start_++;\n    if (start_ == capacity_) {\n      start_ = 0;\n    }\n  }\n\n  void pop_back() {\n    if (size() == 0) {\n      return;\n    }\n    back() = SlicePtr();\n    size_--;\n  }\n\n  /**\n   * Forward const iterator for SliceDeque.\n   * @note this implementation currently supports the minimum functionality needed to support\n   *       the `for (const auto& slice : slice_deque)` idiom.\n   */\n  class ConstIterator {\n  public:\n    const SlicePtr& operator*() { return deque_[index_]; }\n\n    ConstIterator operator++() {\n      index_++;\n      return *this;\n    }\n\n    bool operator!=(const ConstIterator& rhs) const {\n      return &deque_ != &rhs.deque_ || index_ != rhs.index_;\n    }\n\n    friend class SliceDeque;\n\n  private:\n    ConstIterator(const SliceDeque& deque, size_t index) : deque_(deque), index_(index) {}\n    const SliceDeque& deque_;\n    size_t index_;\n  };\n\n  ConstIterator begin() const noexcept { return {*this, 0}; }\n\n  ConstIterator end() const noexcept { return {*this, size_}; }\n\nprivate:\n  constexpr static size_t InlineRingCapacity = 8;\n\n  size_t internalIndex(size_t index) const {\n    size_t internal_index = start_ + index;\n    if (internal_index >= capacity_) {\n      internal_index -= capacity_;\n      ASSERT(internal_index < capacity_);\n    }\n    return internal_index;\n  }\n\n  void growRing() {\n    if (size_ < capacity_) {\n      return;\n    }\n    const size_t new_capacity = capacity_ * 2;\n    auto new_ring = std::make_unique<SlicePtr[]>(new_capacity);\n    for (size_t i = 0; i < new_capacity; i++) {\n      ASSERT(new_ring[i] == nullptr);\n    }\n    size_t src = start_;\n    size_t dst = 0;\n    for (size_t i = 0; i < size_; i++) {\n      new_ring[dst++] = std::move(ring_[src++]);\n      if (src == capacity_) {\n        src = 0;\n      }\n    }\n    for (size_t i = 0; i < capacity_; i++) {\n      ASSERT(ring_[i].get() == nullptr);\n    }\n    external_ring_.swap(new_ring);\n    ring_ = external_ring_.get();\n    start_ = 0;\n    capacity_ = new_capacity;\n  }\n\n  SlicePtr inline_ring_[InlineRingCapacity];\n  std::unique_ptr<SlicePtr[]> external_ring_;\n  SlicePtr* ring_; // points to start of either inline or external ring.\n  size_t start_{0};\n  size_t size_{0};\n  size_t capacity_;\n};\n\nclass UnownedSlice : public Slice {\npublic:\n  UnownedSlice(BufferFragment& fragment)\n      : Slice(0, fragment.size(), fragment.size()), fragment_(fragment) {\n    base_ = static_cast<uint8_t*>(const_cast<void*>(fragment.data()));\n  }\n\n  ~UnownedSlice() override { fragment_.done(); }\n\n  /**\n   * BufferFragment objects encapsulated by UnownedSlice are used to track when response content\n   * is written into transport connection. As a result these slices can not be coalesced when moved\n   * between buffers.\n   */\n  bool canCoalesce() const override { return false; }\n\nprivate:\n  BufferFragment& fragment_;\n};\n\n/**\n * An implementation of BufferFragment where a releasor callback is called when the data is\n * no longer needed.\n */\nclass BufferFragmentImpl : NonCopyable, public BufferFragment {\npublic:\n  /**\n   * Creates a new wrapper around the externally owned <data> of size <size>.\n   * The caller must ensure <data> is valid until releasor() is called, or for the lifetime of the\n   * fragment. releasor() is called with <data>, <size> and <this> to allow caller to delete\n   * the fragment object.\n   * @param data external data to reference\n   * @param size size of data\n   * @param releasor a callback function to be called when data is no longer needed.\n   */\n  BufferFragmentImpl(\n      const void* data, size_t size,\n      const std::function<void(const void*, size_t, const BufferFragmentImpl*)>& releasor)\n      : data_(data), size_(size), releasor_(releasor) {}\n\n  // Buffer::BufferFragment\n  const void* data() const override { return data_; }\n  size_t size() const override { return size_; }\n  void done() override {\n    if (releasor_) {\n      releasor_(data_, size_, this);\n    }\n  }\n\nprivate:\n  const void* const data_;\n  const size_t size_;\n  const std::function<void(const void*, size_t, const BufferFragmentImpl*)> releasor_;\n};\n\nclass LibEventInstance : public Instance {\npublic:\n  // Called after accessing the memory in buffer() directly to allow any post-processing.\n  virtual void postProcess() PURE;\n};\n\n/**\n * Wrapper for uint64_t that asserts upon integer overflow and underflow.\n */\nclass OverflowDetectingUInt64 {\npublic:\n  operator uint64_t() const { return value_; }\n\n  OverflowDetectingUInt64& operator+=(uint64_t size) {\n    uint64_t new_value = value_ + size;\n    RELEASE_ASSERT(new_value >= value_, \"64-bit unsigned integer overflowed\");\n    value_ = new_value;\n    return *this;\n  }\n\n  OverflowDetectingUInt64& operator-=(uint64_t size) {\n    RELEASE_ASSERT(value_ >= size, \"unsigned integer underflowed\");\n    value_ -= size;\n    return *this;\n  }\n\nprivate:\n  uint64_t value_{0};\n};\n\n/**\n * Wraps an allocated and owned buffer.\n *\n * Note that due to the internals of move(), OwnedImpl is not\n * compatible with non-OwnedImpl buffers.\n */\nclass OwnedImpl : public LibEventInstance {\npublic:\n  OwnedImpl();\n  OwnedImpl(absl::string_view data);\n  OwnedImpl(const Instance& data);\n  OwnedImpl(const void* data, uint64_t size);\n\n  // Buffer::Instance\n  void addDrainTracker(std::function<void()> drain_tracker) override;\n  void add(const void* data, uint64_t size) override;\n  void addBufferFragment(BufferFragment& fragment) override;\n  void add(absl::string_view data) override;\n  void add(const Instance& data) override;\n  void prepend(absl::string_view data) override;\n  void prepend(Instance& data) override;\n  void commit(RawSlice* iovecs, uint64_t num_iovecs) override;\n  void copyOut(size_t start, uint64_t size, void* data) const override;\n  void drain(uint64_t size) override;\n  RawSliceVector getRawSlices(absl::optional<uint64_t> max_slices = absl::nullopt) const override;\n  SliceDataPtr extractMutableFrontSlice() override;\n  uint64_t length() const override;\n  void* linearize(uint32_t size) override;\n  void move(Instance& rhs) override;\n  void move(Instance& rhs, uint64_t length) override;\n  uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override;\n  ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override;\n  bool startsWith(absl::string_view data) const override;\n  std::string toString() const override;\n\n  // LibEventInstance\n  void postProcess() override;\n\n  /**\n   * Create a new slice at the end of the buffer, and copy the supplied content into it.\n   * @param data start of the content to copy.\n   *\n   */\n  virtual void appendSliceForTest(const void* data, uint64_t size);\n\n  /**\n   * Create a new slice at the end of the buffer, and copy the supplied string into it.\n   * @param data the string to append to the buffer.\n   */\n  virtual void appendSliceForTest(absl::string_view data);\n\n  /**\n   * Describe the in-memory representation of the slices in the buffer. For use\n   * in tests that want to make assertions about the specific arrangement of\n   * bytes in the buffer.\n   */\n  std::vector<OwnedSlice::SliceRepresentation> describeSlicesForTest() const;\n\nprivate:\n  /**\n   * @param rhs another buffer\n   * @return whether the rhs buffer is also an instance of OwnedImpl (or a subclass) that\n   *         uses the same internal implementation as this buffer.\n   */\n  bool isSameBufferImpl(const Instance& rhs) const;\n\n  void addImpl(const void* data, uint64_t size);\n  void drainImpl(uint64_t size);\n\n  /**\n   * Moves contents of the `other_slice` by either taking its ownership or coalescing it\n   * into an existing slice.\n   * NOTE: the caller is responsible for draining the buffer that contains the `other_slice`.\n   */\n  void coalesceOrAddSlice(SlicePtr&& other_slice);\n\n  /** Ring buffer of slices. */\n  SliceDeque slices_;\n\n  /** Sum of the dataSize of all slices. */\n  OverflowDetectingUInt64 length_;\n};\n\nusing BufferFragmentPtr = std::unique_ptr<BufferFragment>;\n\n/**\n * An implementation of BufferFragment where a releasor callback is called when the data is\n * no longer needed. Copies data into internal buffer.\n */\nclass OwnedBufferFragmentImpl final : public BufferFragment, public InlineStorage {\npublic:\n  using Releasor = std::function<void(const OwnedBufferFragmentImpl*)>;\n\n  /**\n   * Copies the data into internal buffer. The releasor is called when the data has been\n   * fully drained or the buffer that contains this fragment is destroyed.\n   * @param data external data to reference\n   * @param releasor a callback function to be called when data is no longer needed.\n   */\n\n  static BufferFragmentPtr create(absl::string_view data, const Releasor& releasor) {\n    return BufferFragmentPtr(new (sizeof(OwnedBufferFragmentImpl) + data.size())\n                                 OwnedBufferFragmentImpl(data, releasor));\n  }\n\n  // Buffer::BufferFragment\n  const void* data() const override { return data_; }\n  size_t size() const override { return size_; }\n  void done() override { releasor_(this); }\n\nprivate:\n  OwnedBufferFragmentImpl(absl::string_view data, const Releasor& releasor)\n      : releasor_(releasor), size_(data.size()) {\n    ASSERT(releasor != nullptr);\n    memcpy(data_, data.data(), data.size());\n  }\n\n  const Releasor releasor_;\n  const size_t size_;\n  uint8_t data_[];\n};\n\nusing OwnedBufferFragmentImplPtr = std::unique_ptr<OwnedBufferFragmentImpl>;\n\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/buffer/watermark_buffer.cc",
    "content": "#include \"common/buffer/watermark_buffer.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/runtime/runtime_features.h\"\n\nnamespace Envoy {\nnamespace Buffer {\n\nvoid WatermarkBuffer::add(const void* data, uint64_t size) {\n  OwnedImpl::add(data, size);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::add(absl::string_view data) {\n  OwnedImpl::add(data);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::add(const Instance& data) {\n  OwnedImpl::add(data);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::prepend(absl::string_view data) {\n  OwnedImpl::prepend(data);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::prepend(Instance& data) {\n  OwnedImpl::prepend(data);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::commit(RawSlice* iovecs, uint64_t num_iovecs) {\n  OwnedImpl::commit(iovecs, num_iovecs);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::drain(uint64_t size) {\n  OwnedImpl::drain(size);\n  checkLowWatermark();\n}\n\nvoid WatermarkBuffer::move(Instance& rhs) {\n  OwnedImpl::move(rhs);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::move(Instance& rhs, uint64_t length) {\n  OwnedImpl::move(rhs, length);\n  checkHighAndOverflowWatermarks();\n}\n\nSliceDataPtr WatermarkBuffer::extractMutableFrontSlice() {\n  auto result = OwnedImpl::extractMutableFrontSlice();\n  checkLowWatermark();\n  return result;\n}\n\nuint64_t WatermarkBuffer::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) {\n  uint64_t bytes_reserved = OwnedImpl::reserve(length, iovecs, num_iovecs);\n  checkHighAndOverflowWatermarks();\n  return bytes_reserved;\n}\n\nvoid WatermarkBuffer::appendSliceForTest(const void* data, uint64_t size) {\n  OwnedImpl::appendSliceForTest(data, size);\n  checkHighAndOverflowWatermarks();\n}\n\nvoid WatermarkBuffer::appendSliceForTest(absl::string_view data) {\n  appendSliceForTest(data.data(), data.size());\n}\n\nvoid WatermarkBuffer::setWatermarks(uint32_t low_watermark, uint32_t high_watermark) {\n  ASSERT(low_watermark < high_watermark || (high_watermark == 0 && low_watermark == 0));\n  uint32_t overflow_watermark_multiplier =\n      Runtime::getInteger(\"envoy.buffer.overflow_multiplier\", 0);\n  if (overflow_watermark_multiplier > 0 &&\n      (static_cast<uint64_t>(overflow_watermark_multiplier) * high_watermark) >\n          std::numeric_limits<uint32_t>::max()) {\n    ENVOY_LOG_MISC(debug, \"Error setting overflow threshold: envoy.buffer.overflow_multiplier * \"\n                          \"high_watermark is overflowing. Disabling overflow watermark.\");\n    overflow_watermark_multiplier = 0;\n  }\n  low_watermark_ = low_watermark;\n  high_watermark_ = high_watermark;\n  overflow_watermark_ = overflow_watermark_multiplier * high_watermark;\n  checkHighAndOverflowWatermarks();\n  checkLowWatermark();\n}\n\nvoid WatermarkBuffer::checkLowWatermark() {\n  if (!above_high_watermark_called_ ||\n      (high_watermark_ != 0 && OwnedImpl::length() > low_watermark_)) {\n    return;\n  }\n\n  above_high_watermark_called_ = false;\n  below_low_watermark_();\n}\n\nvoid WatermarkBuffer::checkHighAndOverflowWatermarks() {\n  if (high_watermark_ == 0 || OwnedImpl::length() <= high_watermark_) {\n    return;\n  }\n\n  if (!above_high_watermark_called_) {\n    above_high_watermark_called_ = true;\n    above_high_watermark_();\n  }\n\n  // Check if overflow watermark is enabled, wasn't previously triggered,\n  // and the buffer size is above the threshold\n  if (overflow_watermark_ != 0 && !above_overflow_watermark_called_ &&\n      OwnedImpl::length() > overflow_watermark_) {\n    above_overflow_watermark_called_ = true;\n    above_overflow_watermark_();\n  }\n}\n\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/buffer/watermark_buffer.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Buffer {\n\n// A subclass of OwnedImpl which does watermark validation.\n// Each time the buffer is resized (written to or drained), the watermarks are checked. As the\n// buffer size transitions from under the low watermark to above the high watermark, the\n// above_high_watermark function is called one time. It will not be called again until the buffer\n// is drained below the low watermark, at which point the below_low_watermark function is called.\n// If the buffer size is above the overflow watermark, above_overflow_watermark is called.\n// It is only called on the first time the buffer overflows.\nclass WatermarkBuffer : public OwnedImpl {\npublic:\n  WatermarkBuffer(std::function<void()> below_low_watermark,\n                  std::function<void()> above_high_watermark,\n                  std::function<void()> above_overflow_watermark)\n      : below_low_watermark_(below_low_watermark), above_high_watermark_(above_high_watermark),\n        above_overflow_watermark_(above_overflow_watermark) {}\n\n  // Override all functions from Instance which can result in changing the size\n  // of the underlying buffer.\n  void add(const void* data, uint64_t size) override;\n  void add(absl::string_view data) override;\n  void add(const Instance& data) override;\n  void prepend(absl::string_view data) override;\n  void prepend(Instance& data) override;\n  void commit(RawSlice* iovecs, uint64_t num_iovecs) override;\n  void drain(uint64_t size) override;\n  void move(Instance& rhs) override;\n  void move(Instance& rhs, uint64_t length) override;\n  SliceDataPtr extractMutableFrontSlice() override;\n  uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override;\n  void postProcess() override { checkLowWatermark(); }\n  void appendSliceForTest(const void* data, uint64_t size) override;\n  void appendSliceForTest(absl::string_view data) override;\n\n  void setWatermarks(uint32_t watermark) { setWatermarks(watermark / 2, watermark); }\n  void setWatermarks(uint32_t low_watermark, uint32_t high_watermark);\n  uint32_t highWatermark() const { return high_watermark_; }\n  // Returns true if the high watermark callbacks have been called more recently\n  // than the low watermark callbacks.\n  bool highWatermarkTriggered() const { return above_high_watermark_called_; }\n\nprivate:\n  void checkHighAndOverflowWatermarks();\n  void checkLowWatermark();\n\n  std::function<void()> below_low_watermark_;\n  std::function<void()> above_high_watermark_;\n  std::function<void()> above_overflow_watermark_;\n\n  // Used for enforcing buffer limits (off by default). If these are set to non-zero by a call to\n  // setWatermarks() the watermark callbacks will be called as described above.\n  uint32_t high_watermark_{0};\n  uint32_t low_watermark_{0};\n  uint32_t overflow_watermark_{0};\n  // Tracks the latest state of watermark callbacks.\n  // True between the time above_high_watermark_ has been called until above_high_watermark_ has\n  // been called.\n  bool above_high_watermark_called_{false};\n  // Set to true when above_overflow_watermark_ is called (and isn't cleared).\n  bool above_overflow_watermark_called_{false};\n};\n\nusing WatermarkBufferPtr = std::unique_ptr<WatermarkBuffer>;\n\nclass WatermarkBufferFactory : public WatermarkFactory {\npublic:\n  // Buffer::WatermarkFactory\n  InstancePtr create(std::function<void()> below_low_watermark,\n                     std::function<void()> above_high_watermark,\n                     std::function<void()> above_overflow_watermark) override {\n    return std::make_unique<WatermarkBuffer>(below_low_watermark, above_high_watermark,\n                                             above_overflow_watermark);\n  }\n};\n\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/buffer/zero_copy_input_stream_impl.cc",
    "content": "#include \"common/buffer/zero_copy_input_stream_impl.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Buffer {\n\nZeroCopyInputStreamImpl::ZeroCopyInputStreamImpl() : buffer_(new Buffer::OwnedImpl) {}\n\nZeroCopyInputStreamImpl::ZeroCopyInputStreamImpl(Buffer::InstancePtr&& buffer)\n    : buffer_(std::move(buffer)) {\n  finish();\n}\n\nvoid ZeroCopyInputStreamImpl::move(Buffer::Instance& instance) {\n  ASSERT(!finished_);\n\n  buffer_->move(instance);\n}\n\nvoid ZeroCopyInputStreamImpl::drainLastSlice() {\n  if (position_ != 0) {\n    buffer_->drain(position_);\n    position_ = 0;\n  }\n}\n\nbool ZeroCopyInputStreamImpl::Next(const void** data, int* size) {\n  drainLastSlice();\n\n  Buffer::RawSliceVector slices = buffer_->getRawSlices(1);\n\n  if (!slices.empty() && slices[0].len_ > 0) {\n    auto& slice = slices[0];\n    *data = slice.mem_;\n    *size = slice.len_;\n    position_ = slice.len_;\n    byte_count_ += slice.len_;\n    return true;\n  }\n\n  if (!finished_) {\n    *data = nullptr;\n    *size = 0;\n    return true;\n  }\n  return false;\n}\n\nbool ZeroCopyInputStreamImpl::Skip(int count) {\n  ASSERT(count >= 0);\n  drainLastSlice();\n\n  // Could not skip more than buffer length.\n  if (static_cast<uint64_t>(count) > buffer_->length()) {\n    return false;\n  }\n\n  buffer_->drain(count);\n  byte_count_ += count;\n  return true;\n}\n\nvoid ZeroCopyInputStreamImpl::BackUp(int count) {\n  ASSERT(count >= 0);\n  ASSERT(uint64_t(count) <= position_);\n\n  // Preconditions for BackUp:\n  // - The last method called must have been Next().\n  // - count must be less than or equal to the size of the last buffer returned by Next().\n  // Due to preconditions above, it is safe to just adjust position_ and byte_count_ here, and\n  // drain in Next().\n  position_ -= count;\n  byte_count_ -= count;\n}\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/buffer/zero_copy_input_stream_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\n\nnamespace Buffer {\n\nclass ZeroCopyInputStreamImpl : public virtual Protobuf::io::ZeroCopyInputStream {\npublic:\n  // Create input stream with one buffer, and finish immediately\n  ZeroCopyInputStreamImpl(Buffer::InstancePtr&& buffer);\n\n  // Create input stream with empty buffer\n  ZeroCopyInputStreamImpl();\n\n  // Add a buffer to input stream, will consume all buffer from parameter\n  // if the stream is not finished\n  void move(Buffer::Instance& instance);\n\n  // Mark the stream is finished\n  void finish() { finished_ = true; }\n\n  // Protobuf::io::ZeroCopyInputStream\n  // See\n  // https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.zero_copy_stream#ZeroCopyInputStream\n  // for each method details.\n\n  // Note Next() will return true with no data until more data is available if the stream is not\n  // finished. It is the caller's responsibility to finish the stream or wrap with\n  // LimitingInputStream before passing to protobuf code to avoid a spin loop.\n  bool Next(const void** data, int* size) override;\n  void BackUp(int count) override;\n  bool Skip(int count) override;\n  ProtobufTypes::Int64 ByteCount() const override { return byte_count_; }\n\nprotected:\n  // The last slice is kept to support limited BackUp() calls.\n  // This function will drain it.\n  void drainLastSlice();\n\n  Buffer::InstancePtr buffer_;\n  uint64_t position_{0};\n  bool finished_{false};\n\nprivate:\n  uint64_t byte_count_{0};\n};\n\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/chromium_url/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"chromium_url\",\n    srcs = [\n        \"url_canon.cc\",\n        \"url_canon_internal.cc\",\n        \"url_canon_path.cc\",\n        \"url_canon_stdstring.cc\",\n    ],\n    hdrs = [\n        \"envoy_shim.h\",\n        \"url_canon.h\",\n        \"url_canon_internal.h\",\n        \"url_canon_stdstring.h\",\n        \"url_parse.h\",\n        \"url_parse_internal.h\",\n    ],\n    deps = [\"//source/common/common:assert_lib\"],\n)\n"
  },
  {
    "path": "source/common/chromium_url/LICENSE",
    "content": "// Copyright 2015 The Chromium Authors. All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//    * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//    * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//    * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "source/common/chromium_url/README.md",
    "content": "This is a manually minified variant of\nhttps://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz,\nproviding just the parts needed for `url::CanonicalizePath()`. This is intended\nto support a security release fix for CVE-2019-9901. Long term we need this to\nbe moved to absl or QUICHE for upgrades and long-term support.\n\nSome specific transforms of interest:\n* The namespace `url` was changed to `chromium_url`.\n* `url_parse.h` is minified to just `Component` and flattened back into the URL\n  directory. It does not contain any non-Chromium authored code any longer and\n  so does not have a separate LICENSE.\n* `envoy_shim.h` adapts various macros to the Envoy context.\n* Anything not reachable from `url::CanonicalizePath()` has been dropped.\n* Header include paths have changed as needed.\n* BUILD was manually written.\n* Various clang-tidy and format fixes.\n"
  },
  {
    "path": "source/common/chromium_url/envoy_shim.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n\n// This is a minimal Envoy adaptation layer for the Chromium URL library.\n// NOLINT(namespace-envoy)\n\n#define DISALLOW_COPY_AND_ASSIGN(TypeName)                                                         \\\n  TypeName(const TypeName&) = delete;                                                              \\\n  TypeName& operator=(const TypeName&) = delete\n\n#define EXPORT_TEMPLATE_DECLARE(x)\n#define EXPORT_TEMPLATE_DEFINE(x)\n#define COMPONENT_EXPORT(x)\n\n#define DCHECK(x) ASSERT(x)\n#define NOTREACHED() NOT_REACHED_GCOVR_EXCL_LINE\n"
  },
  {
    "path": "source/common/chromium_url/url_canon.cc",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2017 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#include \"common/chromium_url/url_canon.h\"\n\n#include \"common/chromium_url/envoy_shim.h\"\n\nnamespace chromium_url {\n\ntemplate class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT<char>;\n\n} // namespace chromium_url\n"
  },
  {
    "path": "source/common/chromium_url/url_canon.h",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#ifndef URL_URL_CANON_H_\n#define URL_URL_CANON_H_\n\n#include <stdlib.h>\n#include <string.h>\n\n#include \"common/chromium_url/envoy_shim.h\"\n#include \"common/chromium_url/url_parse.h\"\n\nnamespace chromium_url {\n\n// Canonicalizer output -------------------------------------------------------\n\n// Base class for the canonicalizer output, this maintains a buffer and\n// supports simple resizing and append operations on it.\n//\n// It is VERY IMPORTANT that no virtual function calls be made on the common\n// code path. We only have two virtual function calls, the destructor and a\n// resize function that is called when the existing buffer is not big enough.\n// The derived class is then in charge of setting up our buffer which we will\n// manage.\ntemplate <typename T> class CanonOutputT {\npublic:\n  CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {}\n  virtual ~CanonOutputT() = default;\n\n  // Implemented to resize the buffer. This function should update the buffer\n  // pointer to point to the new buffer, and any old data up to |cur_len_| in\n  // the buffer must be copied over.\n  //\n  // The new size |sz| must be larger than buffer_len_.\n  virtual void Resize(int sz) = 0;\n\n  // Accessor for returning a character at a given position. The input offset\n  // must be in the valid range.\n  inline T at(int offset) const { return buffer_[offset]; }\n\n  // Sets the character at the given position. The given position MUST be less\n  // than the length().\n  inline void set(int offset, T ch) { buffer_[offset] = ch; }\n\n  // Returns the number of characters currently in the buffer.\n  inline int length() const { return cur_len_; }\n\n  // Returns the current capacity of the buffer. The length() is the number of\n  // characters that have been declared to be written, but the capacity() is\n  // the number that can be written without reallocation. If the caller must\n  // write many characters at once, it can make sure there is enough capacity,\n  // write the data, then use set_size() to declare the new length().\n  int capacity() const { return buffer_len_; }\n\n  // Called by the user of this class to get the output. The output will NOT\n  // be NULL-terminated. Call length() to get the\n  // length.\n  const T* data() const { return buffer_; }\n  T* data() { return buffer_; }\n\n  // Shortens the URL to the new length. Used for \"backing up\" when processing\n  // relative paths. This can also be used if an external function writes a lot\n  // of data to the buffer (when using the \"Raw\" version below) beyond the end,\n  // to declare the new length.\n  //\n  // This MUST NOT be used to expand the size of the buffer beyond capacity().\n  void set_length(int new_len) { cur_len_ = new_len; }\n\n  // This is the most performance critical function, since it is called for\n  // every character.\n  void push_back(T ch) {\n    // In VC2005, putting this common case first speeds up execution\n    // dramatically because this branch is predicted as taken.\n    if (cur_len_ < buffer_len_) {\n      buffer_[cur_len_] = ch;\n      cur_len_++;\n      return;\n    }\n\n    // Grow the buffer to hold at least one more item. Hopefully we won't have\n    // to do this very often.\n    if (!Grow(1))\n      return;\n\n    // Actually do the insertion.\n    buffer_[cur_len_] = ch;\n    cur_len_++;\n  }\n\n  // Appends the given string to the output.\n  void Append(const T* str, int str_len) {\n    if (cur_len_ + str_len > buffer_len_) {\n      if (!Grow(cur_len_ + str_len - buffer_len_))\n        return;\n    }\n    for (int i = 0; i < str_len; i++)\n      buffer_[cur_len_ + i] = str[i];\n    cur_len_ += str_len;\n  }\n\n  void ReserveSizeIfNeeded(int estimated_size) {\n    // Reserve a bit extra to account for escaped chars.\n    if (estimated_size > buffer_len_)\n      Resize(estimated_size + 8);\n  }\n\nprotected:\n  // Grows the given buffer so that it can fit at least |min_additional|\n  // characters. Returns true if the buffer could be resized, false on OOM.\n  bool Grow(int min_additional) {\n    static const int kMinBufferLen = 16;\n    int new_len = (buffer_len_ == 0) ? kMinBufferLen : buffer_len_;\n    do {\n      if (new_len >= (1 << 30)) // Prevent overflow below.\n        return false;\n      new_len *= 2;\n    } while (new_len < buffer_len_ + min_additional);\n    Resize(new_len);\n    return true;\n  }\n\n  T* buffer_;\n  int buffer_len_;\n\n  // Used characters in the buffer.\n  int cur_len_;\n};\n\n// Simple implementation of the CanonOutput using new[]. This class\n// also supports a static buffer so if it is allocated on the stack, most\n// URLs can be canonicalized with no heap allocations.\ntemplate <typename T, int fixed_capacity = 1024> class RawCanonOutputT : public CanonOutputT<T> {\npublic:\n  RawCanonOutputT() : CanonOutputT<T>() {\n    this->buffer_ = fixed_buffer_;\n    this->buffer_len_ = fixed_capacity;\n  }\n  ~RawCanonOutputT() override {\n    if (this->buffer_ != fixed_buffer_)\n      delete[] this->buffer_;\n  }\n\n  void Resize(int sz) override {\n    T* new_buf = new T[sz];\n    memcpy(new_buf, this->buffer_, sizeof(T) * (this->cur_len_ < sz ? this->cur_len_ : sz));\n    if (this->buffer_ != fixed_buffer_)\n      delete[] this->buffer_;\n    this->buffer_ = new_buf;\n    this->buffer_len_ = sz;\n  }\n\nprotected:\n  T fixed_buffer_[fixed_capacity];\n};\n\n// Explicitly instantiate commonly used instantiations.\nextern template class EXPORT_TEMPLATE_DECLARE(COMPONENT_EXPORT(URL)) CanonOutputT<char>;\n\n// Normally, all canonicalization output is in narrow characters. We support\n// the templates so it can also be used internally if a wide buffer is\n// required.\nusing CanonOutput = CanonOutputT<char>;\n\ntemplate <int fixed_capacity>\nclass RawCanonOutput : public RawCanonOutputT<char, fixed_capacity> {};\n\n// Path. If the input does not begin in a slash (including if the input is\n// empty), we'll prepend a slash to the path to make it canonical.\n//\n// The 8-bit version assumes UTF-8 encoding, but does not verify the validity\n// of the UTF-8 (i.e., you can have invalid UTF-8 sequences, invalid\n// characters, etc.). Normally, URLs will come in as UTF-16, so this isn't\n// an issue. Somebody giving us an 8-bit path is responsible for generating\n// the path that the server expects (we'll escape high-bit characters), so\n// if something is invalid, it's their problem.\nCOMPONENT_EXPORT(URL)\nbool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output,\n                      Component* out_path);\n\n} // namespace chromium_url\n\n#endif // URL_URL_CANON_H_\n"
  },
  {
    "path": "source/common/chromium_url/url_canon_internal.cc",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#include \"common/chromium_url/url_canon_internal.h\"\n\nnamespace chromium_url {\n\n// See the header file for this array's declaration.\nconst unsigned char kSharedCharTypeTable[0x100] = {\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0x00 - 0x0f\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,                                           // 0x10 - 0x1f\n    0,                                           // 0x20  ' ' (escape spaces in queries)\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x21  !\n    0,                                           // 0x22  \"\n    0,                          // 0x23  #  (invalid in query since it marks the ref)\n    CHAR_QUERY | CHAR_USERINFO, // 0x24  $\n    CHAR_QUERY | CHAR_USERINFO, // 0x25  %\n    CHAR_QUERY | CHAR_USERINFO, // 0x26  &\n    0,                          // 0x27  '  (Try to prevent XSS.)\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,             // 0x28  (\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,             // 0x29  )\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,             // 0x2a  *\n    CHAR_QUERY | CHAR_USERINFO,                              // 0x2b  +\n    CHAR_QUERY | CHAR_USERINFO,                              // 0x2c  ,\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,             // 0x2d  -\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x2e  .\n    CHAR_QUERY,                                              // 0x2f  /\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT, // 0x30  0\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT, // 0x31  1\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT, // 0x32  2\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT, // 0x33  3\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT, // 0x34  4\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT, // 0x35  5\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT, // 0x36  6\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT |\n        CHAR_COMPONENT,                                                            // 0x37  7\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x38  8\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x39  9\n    CHAR_QUERY,                                                                    // 0x3a  :\n    CHAR_QUERY,                                                                    // 0x3b  ;\n    0,          // 0x3c  <  (Try to prevent certain types of XSS.)\n    CHAR_QUERY, // 0x3d  =\n    0,          // 0x3e  >  (Try to prevent certain types of XSS.)\n    CHAR_QUERY, // 0x3f  ?\n    CHAR_QUERY, // 0x40  @\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x41  A\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x42  B\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x43  C\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x44  D\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x45  E\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x46  F\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x47  G\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x48  H\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x49  I\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x4a  J\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x4b  K\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x4c  L\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x4d  M\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x4e  N\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x4f  O\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x50  P\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x51  Q\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x52  R\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x53  S\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x54  T\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x55  U\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x56  V\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x57  W\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT,            // 0x58  X\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x59  Y\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x5a  Z\n    CHAR_QUERY,                                                         // 0x5b  [\n    CHAR_QUERY,                                                         // 0x5c  '\\'\n    CHAR_QUERY,                                                         // 0x5d  ]\n    CHAR_QUERY,                                                         // 0x5e  ^\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x5f  _\n    CHAR_QUERY,                                                         // 0x60  `\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x61  a\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x62  b\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x63  c\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x64  d\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x65  e\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x66  f\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x67  g\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x68  h\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x69  i\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x6a  j\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x6b  k\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x6c  l\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x6d  m\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x6e  n\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x6f  o\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x70  p\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x71  q\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x72  r\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x73  s\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x74  t\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x75  u\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x76  v\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x77  w\n    CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT,            // 0x78  x\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x79  y\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x7a  z\n    CHAR_QUERY,                                                         // 0x7b  {\n    CHAR_QUERY,                                                         // 0x7c  |\n    CHAR_QUERY,                                                         // 0x7d  }\n    CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT,                        // 0x7e  ~\n    0,                                                                  // 0x7f\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0x80 - 0x8f\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0x90 - 0x9f\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0xa0 - 0xaf\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0xb0 - 0xbf\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0xc0 - 0xcf\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0xd0 - 0xdf\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0xe0 - 0xef\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0,\n    0, // 0xf0 - 0xff\n};\n\nconst char kHexCharLookup[0x10] = {\n    '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F',\n};\n\nconst char kCharToHexLookup[8] = {\n    0,        // 0x00 - 0x1f\n    '0',      // 0x20 - 0x3f: digits 0 - 9 are 0x30 - 0x39\n    'A' - 10, // 0x40 - 0x5f: letters A - F are 0x41 - 0x46\n    'a' - 10, // 0x60 - 0x7f: letters a - f are 0x61 - 0x66\n    0,        // 0x80 - 0x9F\n    0,        // 0xA0 - 0xBF\n    0,        // 0xC0 - 0xDF\n    0,        // 0xE0 - 0xFF\n};\n\n} // namespace chromium_url\n"
  },
  {
    "path": "source/common/chromium_url/url_canon_internal.h",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#ifndef URL_URL_CANON_INTERNAL_H_\n#define URL_URL_CANON_INTERNAL_H_\n\n// This file is intended to be included in another C++ file where the character\n// types are defined. This allows us to write mostly generic code, but not have\n// template bloat because everything is inlined when anybody calls any of our\n// functions.\n\n#include <stddef.h>\n#include <stdlib.h>\n\n#include \"common/chromium_url/envoy_shim.h\"\n#include \"common/chromium_url/url_canon.h\"\n\nnamespace chromium_url {\n\n// Character type handling -----------------------------------------------------\n\n// Bits that identify different character types. These types identify different\n// bits that are set for each 8-bit character in the kSharedCharTypeTable.\nenum SharedCharTypes {\n  // Characters that do not require escaping in queries. Characters that do\n  // not have this flag will be escaped; see url_canon_query.cc\n  CHAR_QUERY = 1,\n\n  // Valid in the username/password field.\n  CHAR_USERINFO = 2,\n\n  // Valid in a IPv4 address (digits plus dot and 'x' for hex).\n  CHAR_IPV4 = 4,\n\n  // Valid in an ASCII-representation of a hex digit (as in %-escaped).\n  CHAR_HEX = 8,\n\n  // Valid in an ASCII-representation of a decimal digit.\n  CHAR_DEC = 16,\n\n  // Valid in an ASCII-representation of an octal digit.\n  CHAR_OCT = 32,\n\n  // Characters that do not require escaping in encodeURIComponent. Characters\n  // that do not have this flag will be escaped; see url_util.cc.\n  CHAR_COMPONENT = 64,\n};\n\n// This table contains the flags in SharedCharTypes for each 8-bit character.\n// Some canonicalization functions have their own specialized lookup table.\n// For those with simple requirements, we have collected the flags in one\n// place so there are fewer lookup tables to load into the CPU cache.\n//\n// Using an unsigned char type has a small but measurable performance benefit\n// over using a 32-bit number.\nextern const unsigned char kSharedCharTypeTable[0x100];\n\n// More readable wrappers around the character type lookup table.\ninline bool IsCharOfType(unsigned char c, SharedCharTypes type) {\n  return !!(kSharedCharTypeTable[c] & type);\n}\ninline bool IsQueryChar(unsigned char c) { return IsCharOfType(c, CHAR_QUERY); }\ninline bool IsIPv4Char(unsigned char c) { return IsCharOfType(c, CHAR_IPV4); }\ninline bool IsHexChar(unsigned char c) { return IsCharOfType(c, CHAR_HEX); }\ninline bool IsComponentChar(unsigned char c) { return IsCharOfType(c, CHAR_COMPONENT); }\n\n// Maps the hex numerical values 0x0 to 0xf to the corresponding ASCII digit\n// that will be used to represent it.\nCOMPONENT_EXPORT(URL) extern const char kHexCharLookup[0x10];\n\n// This lookup table allows fast conversion between ASCII hex letters and their\n// corresponding numerical value. The 8-bit range is divided up into 8\n// regions of 0x20 characters each. Each of the three character types (numbers,\n// uppercase, lowercase) falls into different regions of this range. The table\n// contains the amount to subtract from characters in that range to get at\n// the corresponding numerical value.\n//\n// See HexDigitToValue for the lookup.\nextern const char kCharToHexLookup[8];\n\n// Assumes the input is a valid hex digit! Call IsHexChar before using this.\ninline unsigned char HexCharToValue(unsigned char c) { return c - kCharToHexLookup[c / 0x20]; }\n\n// Indicates if the given character is a dot or dot equivalent, returning the\n// number of characters taken by it. This will be one for a literal dot, 3 for\n// an escaped dot. If the character is not a dot, this will return 0.\ntemplate <typename CHAR> inline int IsDot(const CHAR* spec, int offset, int end) {\n  if (spec[offset] == '.') {\n    return 1;\n  } else if (spec[offset] == '%' && offset + 3 <= end && spec[offset + 1] == '2' &&\n             (spec[offset + 2] == 'e' || spec[offset + 2] == 'E')) {\n    // Found \"%2e\"\n    return 3;\n  }\n  return 0;\n}\n\n// Write a single character, escaped, to the output. This always escapes: it\n// does no checking that thee character requires escaping.\n// Escaping makes sense only 8 bit chars, so code works in all cases of\n// input parameters (8/16bit).\ntemplate <typename UINCHAR, typename OUTCHAR>\ninline void AppendEscapedChar(UINCHAR ch, CanonOutputT<OUTCHAR>* output) {\n  output->push_back('%');\n  output->push_back(kHexCharLookup[(ch >> 4) & 0xf]);\n  output->push_back(kHexCharLookup[ch & 0xf]);\n}\n\n// UTF-8 functions ------------------------------------------------------------\n\n// Generic To-UTF-8 converter. This will call the given append method for each\n// character that should be appended, with the given output method. Wrappers\n// are provided below for escaped and non-escaped versions of this.\n//\n// The char_value must have already been checked that it's a valid Unicode\n// character.\ntemplate <class Output, void Appender(unsigned char, Output*)>\ninline void DoAppendUTF8(unsigned char_value, Output* output) {\n  if (char_value <= 0x7f) {\n    Appender(static_cast<unsigned char>(char_value), output);\n  } else if (char_value <= 0x7ff) {\n    // 110xxxxx 10xxxxxx\n    Appender(static_cast<unsigned char>(0xC0 | (char_value >> 6)), output);\n    Appender(static_cast<unsigned char>(0x80 | (char_value & 0x3f)), output);\n  } else if (char_value <= 0xffff) {\n    // 1110xxxx 10xxxxxx 10xxxxxx\n    Appender(static_cast<unsigned char>(0xe0 | (char_value >> 12)), output);\n    Appender(static_cast<unsigned char>(0x80 | ((char_value >> 6) & 0x3f)), output);\n    Appender(static_cast<unsigned char>(0x80 | (char_value & 0x3f)), output);\n  } else if (char_value <= 0x10FFFF) { // Max Unicode code point.\n    // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx\n    Appender(static_cast<unsigned char>(0xf0 | (char_value >> 18)), output);\n    Appender(static_cast<unsigned char>(0x80 | ((char_value >> 12) & 0x3f)), output);\n    Appender(static_cast<unsigned char>(0x80 | ((char_value >> 6) & 0x3f)), output);\n    Appender(static_cast<unsigned char>(0x80 | (char_value & 0x3f)), output);\n  } else {\n    // Invalid UTF-8 character (>20 bits).\n    NOTREACHED();\n  }\n}\n\n// Helper used by AppendUTF8Value below. We use an unsigned parameter so there\n// are no funny sign problems with the input, but then have to convert it to\n// a regular char for appending.\ninline void AppendCharToOutput(unsigned char ch, CanonOutput* output) {\n  output->push_back(static_cast<char>(ch));\n}\n\n// Writes the given character to the output as UTF-8. This does NO checking\n// of the validity of the Unicode characters; the caller should ensure that\n// the value it is appending is valid to append.\ninline void AppendUTF8Value(unsigned char_value, CanonOutput* output) {\n  DoAppendUTF8<CanonOutput, AppendCharToOutput>(char_value, output);\n}\n\n// Writes the given character to the output as UTF-8, escaping ALL\n// characters (even when they are ASCII). This does NO checking of the\n// validity of the Unicode characters; the caller should ensure that the value\n// it is appending is valid to append.\ninline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) {\n  DoAppendUTF8<CanonOutput, AppendEscapedChar>(char_value, output);\n}\n\n// Given a '%' character at |*begin| in the string |spec|, this will decode\n// the escaped value and put it into |*unescaped_value| on success (returns\n// true). On failure, this will return false, and will not write into\n// |*unescaped_value|.\n//\n// |*begin| will be updated to point to the last character of the escape\n// sequence so that when called with the index of a for loop, the next time\n// through it will point to the next character to be considered. On failure,\n// |*begin| will be unchanged.\ninline bool Is8BitChar(char /*c*/) {\n  return true; // this case is specialized to avoid a warning\n}\n\ntemplate <typename CHAR>\ninline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* unescaped_value) {\n  if (*begin + 3 > end || !Is8BitChar(spec[*begin + 1]) || !Is8BitChar(spec[*begin + 2])) {\n    // Invalid escape sequence because there's not enough room, or the\n    // digits are not ASCII.\n    return false;\n  }\n\n  unsigned char first = static_cast<unsigned char>(spec[*begin + 1]);\n  unsigned char second = static_cast<unsigned char>(spec[*begin + 2]);\n  if (!IsHexChar(first) || !IsHexChar(second)) {\n    // Invalid hex digits, fail.\n    return false;\n  }\n\n  // Valid escape sequence.\n  *unescaped_value = (HexCharToValue(first) << 4) + HexCharToValue(second);\n  *begin += 2;\n  return true;\n}\n\n} // namespace chromium_url\n\n#endif // URL_URL_CANON_INTERNAL_H_\n"
  },
  {
    "path": "source/common/chromium_url/url_canon_path.cc",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#include <limits.h>\n\n#include \"common/chromium_url/url_canon.h\"\n#include \"common/chromium_url/url_canon_internal.h\"\n#include \"common/chromium_url/url_parse_internal.h\"\n\nnamespace chromium_url {\n\nnamespace {\n\nenum CharacterFlags {\n  // Pass through unchanged, whether escaped or unescaped. This doesn't\n  // actually set anything so you can't OR it to check, it's just to make the\n  // table below more clear when neither ESCAPE or UNESCAPE is set.\n  PASS = 0,\n\n  // This character requires special handling in DoPartialPath. Doing this test\n  // first allows us to filter out the common cases of regular characters that\n  // can be directly copied.\n  SPECIAL = 1,\n\n  // This character must be escaped in the canonical output. Note that all\n  // escaped chars also have the \"special\" bit set so that the code that looks\n  // for this is triggered. Not valid with PASS or ESCAPE\n  ESCAPE_BIT = 2,\n  ESCAPE = ESCAPE_BIT | SPECIAL,\n\n  // This character must be unescaped in canonical output. Not valid with\n  // ESCAPE or PASS. We DON'T set the SPECIAL flag since if we encounter these\n  // characters unescaped, they should just be copied.\n  UNESCAPE = 4,\n\n  // This character is disallowed in URLs. Note that the \"special\" bit is also\n  // set to trigger handling.\n  INVALID_BIT = 8,\n  INVALID = INVALID_BIT | SPECIAL,\n};\n\n// This table contains one of the above flag values. Note some flags are more\n// than one bits because they also turn on the \"special\" flag. Special is the\n// only flag that may be combined with others.\n//\n// This table is designed to match exactly what IE does with the characters.\n//\n// Dot is even more special, and the escaped version is handled specially by\n// IsDot. Therefore, we don't need the \"escape\" flag, and even the \"unescape\"\n// bit is never handled (we just need the \"special\") bit.\nconst unsigned char kPathCharLookup[0x100] = {\n    //   NULL     control chars...\n    INVALID, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    //   control chars...\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    //   ' '      !        \"        #        $        %        &        '        (        )        *\n    //   +        ,        -        . /\n    ESCAPE, PASS, ESCAPE, ESCAPE, PASS, ESCAPE, PASS, PASS, PASS, PASS, PASS, PASS, PASS, UNESCAPE,\n    SPECIAL, PASS,\n    //   0        1        2        3        4        5        6        7        8        9        :\n    //   ;        <        =        >        ?\n    UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE,\n    UNESCAPE, PASS, PASS, ESCAPE, PASS, ESCAPE, ESCAPE,\n    //   @        A        B        C        D        E        F        G        H        I        J\n    //   K        L        M        N        O\n    PASS, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE,\n    UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE,\n    //   P        Q        R        S        T        U        V        W        X        Y        Z\n    //   [        \\        ]        ^        _\n    UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE,\n    UNESCAPE, UNESCAPE, PASS, ESCAPE, PASS, ESCAPE, UNESCAPE,\n    //   `        a        b        c        d        e        f        g        h        i        j\n    //   k        l        m        n        o\n    ESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE,\n    UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE,\n    //   p        q        r        s        t        u        v        w        x        y        z\n    //   {        |        }        ~        <NBSP>\n    UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE,\n    UNESCAPE, UNESCAPE, ESCAPE, ESCAPE, ESCAPE, UNESCAPE, ESCAPE,\n    //   ...all the high-bit characters are escaped\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE,\n    ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE};\n\nenum DotDisposition {\n  // The given dot is just part of a filename and is not special.\n  NOT_A_DIRECTORY,\n\n  // The given dot is the current directory.\n  DIRECTORY_CUR,\n\n  // The given dot is the first of a double dot that should take us up one.\n  DIRECTORY_UP\n};\n\n// When the path resolver finds a dot, this function is called with the\n// character following that dot to see what it is. The return value\n// indicates what type this dot is (see above). This code handles the case\n// where the dot is at the end of the input.\n//\n// |*consumed_len| will contain the number of characters in the input that\n// express what we found.\n//\n// If the input is \"../foo\", |after_dot| = 1, |end| = 6, and\n// at the end, |*consumed_len| = 2 for the \"./\" this function consumed. The\n// original dot length should be handled by the caller.\ntemplate <typename CHAR>\nDotDisposition ClassifyAfterDot(const CHAR* spec, int after_dot, int end, int* consumed_len) {\n  if (after_dot == end) {\n    // Single dot at the end.\n    *consumed_len = 0;\n    return DIRECTORY_CUR;\n  }\n  if (IsURLSlash(spec[after_dot])) {\n    // Single dot followed by a slash.\n    *consumed_len = 1; // Consume the slash\n    return DIRECTORY_CUR;\n  }\n\n  int second_dot_len = IsDot(spec, after_dot, end);\n  if (second_dot_len) {\n    int after_second_dot = after_dot + second_dot_len;\n    if (after_second_dot == end) {\n      // Double dot at the end.\n      *consumed_len = second_dot_len;\n      return DIRECTORY_UP;\n    }\n    if (IsURLSlash(spec[after_second_dot])) {\n      // Double dot followed by a slash.\n      *consumed_len = second_dot_len + 1;\n      return DIRECTORY_UP;\n    }\n  }\n\n  // The dots are followed by something else, not a directory.\n  *consumed_len = 0;\n  return NOT_A_DIRECTORY;\n}\n\n// Rewinds the output to the previous slash. It is assumed that the output\n// ends with a slash and this doesn't count (we call this when we are\n// appending directory paths, so the previous path component has and ending\n// slash).\n//\n// This will stop at the first slash (assumed to be at position\n// |path_begin_in_output| and not go any higher than that. Some web pages\n// do \"..\" too many times, so we need to handle that brokenness.\n//\n// It searches for a literal slash rather than including a backslash as well\n// because it is run only on the canonical output.\n//\n// The output is guaranteed to end in a slash when this function completes.\nvoid BackUpToPreviousSlash(int path_begin_in_output, CanonOutput* output) {\n  DCHECK(output->length() > 0);\n\n  int i = output->length() - 1;\n  DCHECK(output->at(i) == '/');\n  if (i == path_begin_in_output)\n    return; // We're at the first slash, nothing to do.\n\n  // Now back up (skipping the trailing slash) until we find another slash.\n  i--;\n  while (output->at(i) != '/' && i > path_begin_in_output)\n    i--;\n\n  // Now shrink the output to just include that last slash we found.\n  output->set_length(i + 1);\n}\n\n// Looks for problematic nested escape sequences and escapes the output as\n// needed to ensure they can't be misinterpreted.\n//\n// Our concern is that in input escape sequence that's invalid because it\n// contains nested escape sequences might look valid once those are unescaped.\n// For example, \"%%300\" is not a valid escape sequence, but after unescaping the\n// inner \"%30\" this becomes \"%00\" which is valid. Leaving this in the output\n// string can result in callers re-canonicalizing the string and unescaping this\n// sequence, thus resulting in something fundamentally different than the\n// original input here. This can cause a variety of problems.\n//\n// This function is called after we've just unescaped a sequence that's within\n// two output characters of a previous '%' that we know didn't begin a valid\n// escape sequence in the input string. We look for whether the output is going\n// to turn into a valid escape sequence, and if so, convert the initial '%' into\n// an escaped \"%25\" so the output can't be misinterpreted.\n//\n// |spec| is the input string we're canonicalizing.\n// |next_input_index| is the index of the next unprocessed character in |spec|.\n// |input_len| is the length of |spec|.\n// |last_invalid_percent_index| is the index in |output| of a previously-seen\n// '%' character. The caller knows this '%' character isn't followed by a valid\n// escape sequence in the input string.\n// |output| is the canonicalized output thus far. The caller guarantees this\n// ends with a '%' followed by one or two characters, and the '%' is the one\n// pointed to by |last_invalid_percent_index|. The last character in the string\n// was just unescaped.\ntemplate <typename CHAR>\nvoid CheckForNestedEscapes(const CHAR* spec, int next_input_index, int input_len,\n                           int last_invalid_percent_index, CanonOutput* output) {\n  const int length = output->length();\n  const char last_unescaped_char = output->at(length - 1);\n\n  // If |output| currently looks like \"%c\", we need to try appending the next\n  // input character to see if this will result in a problematic escape\n  // sequence. Note that this won't trigger on the first nested escape of a\n  // two-escape sequence like \"%%30%30\" -- we'll allow the conversion to\n  // \"%0%30\" -- but the second nested escape will be caught by this function\n  // when it's called again in that case.\n  const bool append_next_char = last_invalid_percent_index == length - 2;\n  if (append_next_char) {\n    // If the input doesn't contain a 7-bit character next, this case won't be a\n    // problem.\n    if ((next_input_index == input_len) || (spec[next_input_index] >= 0x80))\n      return;\n    output->push_back(static_cast<char>(spec[next_input_index]));\n  }\n\n  // Now output ends like \"%cc\". Try to unescape this.\n  int begin = last_invalid_percent_index;\n  unsigned char temp;\n  if (DecodeEscaped(output->data(), &begin, output->length(), &temp)) {\n    // New escape sequence found. Overwrite the characters following the '%'\n    // with \"25\", and push_back() the one or two characters that were following\n    // the '%' when we were called.\n    if (!append_next_char)\n      output->push_back(output->at(last_invalid_percent_index + 1));\n    output->set(last_invalid_percent_index + 1, '2');\n    output->set(last_invalid_percent_index + 2, '5');\n    output->push_back(last_unescaped_char);\n  } else if (append_next_char) {\n    // Not a valid escape sequence, but we still need to undo appending the next\n    // source character so the caller can process it normally.\n    output->set_length(length);\n  }\n}\n\n// Appends the given path to the output. It assumes that if the input path\n// starts with a slash, it should be copied to the output. If no path has\n// already been appended to the output (the case when not resolving\n// relative URLs), the path should begin with a slash.\n//\n// If there are already path components (this mode is used when appending\n// relative paths for resolving), it assumes that the output already has\n// a trailing slash and that if the input begins with a slash, it should be\n// copied to the output.\n//\n// We do not collapse multiple slashes in a row to a single slash. It seems\n// no web browsers do this, and we don't want incompatibilities, even though\n// it would be correct for most systems.\ntemplate <typename CHAR, typename UCHAR>\nbool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_output,\n                   CanonOutput* output) {\n  int end = path.end();\n\n  // We use this variable to minimize the amount of work done when unescaping --\n  // we'll only call CheckForNestedEscapes() when this points at one of the last\n  // couple of characters in |output|.\n  int last_invalid_percent_index = INT_MIN;\n\n  bool success = true;\n  for (int i = path.begin; i < end; i++) {\n    UCHAR uch = static_cast<UCHAR>(spec[i]);\n    // Chromium UTF8 logic is unneeded, as the missing templated result\n    // refers only to char const* (single-byte) characters at this time.\n    // This only trips up MSVC, since linux gcc seems to optimize it away.\n    // Indention is to avoid gratuitous diffs to origin source\n    {\n      unsigned char out_ch = static_cast<unsigned char>(uch);\n      unsigned char flags = kPathCharLookup[out_ch];\n      if (flags & SPECIAL) {\n        // Needs special handling of some sort.\n        int dotlen;\n        if ((dotlen = IsDot(spec, i, end)) > 0) {\n          // See if this dot was preceded by a slash in the output. We\n          // assume that when canonicalizing paths, they will always\n          // start with a slash and not a dot, so we don't have to\n          // bounds check the output.\n          //\n          // Note that we check this in the case of dots so we don't have to\n          // special case slashes. Since slashes are much more common than\n          // dots, this actually increases performance measurably (though\n          // slightly).\n          DCHECK(output->length() > path_begin_in_output);\n          if (output->length() > path_begin_in_output && output->at(output->length() - 1) == '/') {\n            // Slash followed by a dot, check to see if this is means relative\n            int consumed_len;\n            switch (ClassifyAfterDot<CHAR>(spec, i + dotlen, end, &consumed_len)) {\n            case NOT_A_DIRECTORY:\n              // Copy the dot to the output, it means nothing special.\n              output->push_back('.');\n              i += dotlen - 1;\n              break;\n            case DIRECTORY_CUR: // Current directory, just skip the input.\n              i += dotlen + consumed_len - 1;\n              break;\n            case DIRECTORY_UP:\n              BackUpToPreviousSlash(path_begin_in_output, output);\n              i += dotlen + consumed_len - 1;\n              break;\n            }\n          } else {\n            // This dot is not preceded by a slash, it is just part of some\n            // file name.\n            output->push_back('.');\n            i += dotlen - 1;\n          }\n\n        } else if (out_ch == '\\\\') {\n          // Convert backslashes to forward slashes\n          output->push_back('/');\n\n        } else if (out_ch == '%') {\n          // Handle escape sequences.\n          unsigned char unescaped_value;\n          if (DecodeEscaped(spec, &i, end, &unescaped_value)) {\n            // Valid escape sequence, see if we keep, reject, or unescape it.\n            // Note that at this point DecodeEscape() will have advanced |i| to\n            // the last character of the escape sequence.\n            char unescaped_flags = kPathCharLookup[unescaped_value];\n\n            if (unescaped_flags & UNESCAPE) {\n              // This escaped value shouldn't be escaped. Try to copy it.\n              output->push_back(unescaped_value);\n              // If we just unescaped a value within 2 output characters of the\n              // '%' from a previously-detected invalid escape sequence, we\n              // might have an input string with problematic nested escape\n              // sequences; detect and fix them.\n              if (last_invalid_percent_index >= (output->length() - 3)) {\n                CheckForNestedEscapes(spec, i + 1, end, last_invalid_percent_index, output);\n              }\n            } else {\n              // Either this is an invalid escaped character, or it's a valid\n              // escaped character we should keep escaped. In the first case we\n              // should just copy it exactly and remember the error. In the\n              // second we also copy exactly in case the server is sensitive to\n              // changing the case of any hex letters.\n              output->push_back('%');\n              output->push_back(static_cast<char>(spec[i - 1]));\n              output->push_back(static_cast<char>(spec[i]));\n              if (unescaped_flags & INVALID_BIT)\n                success = false;\n            }\n          } else {\n            // Invalid escape sequence. IE7+ rejects any URLs with such\n            // sequences, while other browsers pass them through unchanged. We\n            // use the permissive behavior.\n            // TODO(brettw): Consider testing IE's strict behavior, which would\n            // allow removing the code to handle nested escapes above.\n            last_invalid_percent_index = output->length();\n            output->push_back('%');\n          }\n\n        } else if (flags & INVALID_BIT) {\n          // For NULLs, etc. fail.\n          AppendEscapedChar(out_ch, output);\n          success = false;\n\n        } else if (flags & ESCAPE_BIT) {\n          // This character should be escaped.\n          AppendEscapedChar(out_ch, output);\n        }\n      } else {\n        // Nothing special about this character, just append it.\n        output->push_back(out_ch);\n      }\n    }\n  }\n  return success;\n}\n\ntemplate <typename CHAR, typename UCHAR>\nbool DoPath(const CHAR* spec, const Component& path, CanonOutput* output, Component* out_path) {\n  bool success = true;\n  out_path->begin = output->length();\n  if (path.len > 0) {\n    // Write out an initial slash if the input has none. If we just parse a URL\n    // and then canonicalize it, it will of course have a slash already. This\n    // check is for the replacement and relative URL resolving cases of file\n    // URLs.\n    if (!IsURLSlash(spec[path.begin]))\n      output->push_back('/');\n\n    success = DoPartialPath<CHAR, UCHAR>(spec, path, out_path->begin, output);\n  } else {\n    // No input, canonical path is a slash.\n    output->push_back('/');\n  }\n  out_path->len = output->length() - out_path->begin;\n  return success;\n}\n\n} // namespace\n\nbool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output,\n                      Component* out_path) {\n  return DoPath<char, unsigned char>(spec, path, output, out_path);\n}\n\n} // namespace chromium_url\n"
  },
  {
    "path": "source/common/chromium_url/url_canon_stdstring.cc",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#include \"common/chromium_url/url_canon_stdstring.h\"\n\nnamespace chromium_url {\n\nStdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) {\n  cur_len_ = static_cast<int>(str_->size()); // Append to existing data.\n  buffer_ = str_->empty() ? NULL : &(*str_)[0];\n  buffer_len_ = static_cast<int>(str_->size());\n}\n\nStdStringCanonOutput::~StdStringCanonOutput() {\n  // Nothing to do, we don't own the string.\n}\n\nvoid StdStringCanonOutput::Complete() {\n  str_->resize(cur_len_);\n  buffer_len_ = cur_len_;\n}\n\nvoid StdStringCanonOutput::Resize(int sz) {\n  str_->resize(sz);\n  buffer_ = str_->empty() ? NULL : &(*str_)[0];\n  buffer_len_ = sz;\n}\n\n} // namespace chromium_url\n"
  },
  {
    "path": "source/common/chromium_url/url_canon_stdstring.h",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#ifndef URL_URL_CANON_STDSTRING_H_\n#define URL_URL_CANON_STDSTRING_H_\n\n// This header file defines a canonicalizer output method class for STL\n// strings. Because the canonicalizer tries not to be dependent on the STL,\n// we have segregated it here.\n\n#include <string>\n\n#include \"common/chromium_url/envoy_shim.h\"\n#include \"common/chromium_url/url_canon.h\"\n\n#define DISALLOW_COPY_AND_ASSIGN(TypeName)                                                         \\\n  TypeName(const TypeName&) = delete;                                                              \\\n  TypeName& operator=(const TypeName&) = delete\n\nnamespace chromium_url {\n\n// Write into a std::string given in the constructor. This object does not own\n// the string itself, and the user must ensure that the string stays alive\n// throughout the lifetime of this object.\n//\n// The given string will be appended to; any existing data in the string will\n// be preserved.\n//\n// Note that when canonicalization is complete, the string will likely have\n// unused space at the end because we make the string very big to start out\n// with (by |initial_size|). This ends up being important because resize\n// operations are slow, and because the base class needs to write directly\n// into the buffer.\n//\n// Therefore, the user should call Complete() before using the string that\n// this class wrote into.\nclass COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput {\npublic:\n  StdStringCanonOutput(std::string* str);\n  ~StdStringCanonOutput() override;\n\n  // Must be called after writing has completed but before the string is used.\n  void Complete();\n\n  void Resize(int sz) override;\n\nprotected:\n  std::string* str_;\n  DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput);\n};\n\n} // namespace chromium_url\n\n#endif // URL_URL_CANON_STDSTRING_H_\n"
  },
  {
    "path": "source/common/chromium_url/url_parse.h",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#ifndef URL_PARSE_H_\n#define URL_PARSE_H_\n\nnamespace chromium_url {\n\n// Component ------------------------------------------------------------------\n\n// Represents a substring for URL parsing.\nstruct Component {\n  Component() : begin(0), len(-1) {}\n\n  // Normal constructor: takes an offset and a length.\n  Component(int b, int l) : begin(b), len(l) {}\n\n  int end() const { return begin + len; }\n\n  // Returns true if this component is valid, meaning the length is given. Even\n  // valid components may be empty to record the fact that they exist.\n  bool is_valid() const { return (len != -1); }\n\n  // Returns true if the given component is specified on false, the component\n  // is either empty or invalid.\n  bool is_nonempty() const { return (len > 0); }\n\n  void reset() {\n    begin = 0;\n    len = -1;\n  }\n\n  bool operator==(const Component& other) const { return begin == other.begin && len == other.len; }\n\n  int begin; // Byte offset in the string of this component.\n  int len;   // Will be -1 if the component is unspecified.\n};\n\n// Helper that returns a component created with the given begin and ending\n// points. The ending point is non-inclusive.\ninline Component MakeRange(int begin, int end) { return Component(begin, end - begin); }\n\n} // namespace chromium_url\n\n#endif // URL_PARSE_H_\n"
  },
  {
    "path": "source/common/chromium_url/url_parse_internal.h",
    "content": "// Envoy snapshot of Chromium URL path normalization, see README.md.\n// NOLINT(namespace-envoy)\n\n// Copyright 2013 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n#ifndef URL_URL_PARSE_INTERNAL_H_\n#define URL_URL_PARSE_INTERNAL_H_\n\nnamespace chromium_url {\n\n// We treat slashes and backslashes the same for IE compatibility.\ninline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\\\'; }\n\n} // namespace chromium_url\n\n#endif // URL_URL_PARSE_INTERNAL_H_\n"
  },
  {
    "path": "source/common/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_basic_cc_library\",\n    \"envoy_cc_library\",\n    \"envoy_cc_platform_dep\",\n    \"envoy_cc_posix_library\",\n    \"envoy_cc_win32_library\",\n    \"envoy_include_prefix\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"assert_lib\",\n    srcs = [\"assert.cc\"],\n    hdrs = [\"assert.h\"],\n    external_deps = [\n        \"abseil_base\",\n        \"abseil_synchronization\",\n    ],\n    deps = [\":minimal_logger_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"debug_recursion_checker_lib\",\n    hdrs = [\"debug_recursion_checker.h\"],\n    deps = [\":assert_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"backoff_lib\",\n    srcs = [\"backoff_strategy.cc\"],\n    hdrs = [\"backoff_strategy.h\"],\n    deps = [\n        \":assert_lib\",\n        \"//include/envoy/common:backoff_strategy_interface\",\n        \"//include/envoy/common:random_generator_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"base64_lib\",\n    srcs = [\"base64.cc\"],\n    hdrs = [\"base64.h\"],\n    deps = [\n        \":assert_lib\",\n        \":empty_string\",\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"byte_order_lib\",\n    hdrs = [\"byte_order.h\"],\n)\n\nenvoy_cc_library(\n    name = \"c_smart_ptr_lib\",\n    hdrs = [\"c_smart_ptr.h\"],\n)\n\nenvoy_cc_library(\n    name = \"cleanup_lib\",\n    hdrs = [\"cleanup.h\"],\n    deps = [\n        \":assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"compiler_requirements_lib\",\n    hdrs = [\"compiler_requirements.h\"],\n)\n\nenvoy_cc_library(\n    name = \"documentation_url_lib\",\n    hdrs = [\"documentation_url.h\"],\n)\n\nenvoy_cc_library(\n    name = \"empty_string\",\n    hdrs = [\"empty_string.h\"],\n)\n\nenvoy_cc_library(\n    name = \"enum_to_int\",\n    hdrs = [\"enum_to_int.h\"],\n)\n\n# fmt_lib is automatically a dependency of all envoy_cc_library definitions.\nenvoy_basic_cc_library(\n    name = \"fmt_lib\",\n    hdrs = [\"fmt.h\"],\n    external_deps = [\n        \"abseil_strings\",\n        \"fmtlib\",\n    ],\n    include_prefix = envoy_include_prefix(package_name()),\n    deps = [\"//include/envoy/common:base_includes\"],\n)\n\nenvoy_cc_library(\n    name = \"hash_lib\",\n    srcs = [\"hash.cc\"],\n    hdrs = [\"hash.h\"],\n    external_deps = [\"xxhash\"],\n)\n\nenvoy_cc_library(\n    name = \"hex_lib\",\n    srcs = [\"hex.cc\"],\n    hdrs = [\"hex.h\"],\n    deps = [\":utility_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"linked_object\",\n    hdrs = [\"linked_object.h\"],\n    deps = [\":assert_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"mem_block_builder_lib\",\n    hdrs = [\"mem_block_builder.h\"],\n    deps = [\":assert_lib\"],\n)\n\n# Contains macros and helpers for dumpState utilities\nenvoy_cc_library(\n    name = \"dump_state_utils\",\n    hdrs = [\"dump_state_utils.h\"],\n)\n\n# Contains minimal code for logging to stderr.\nenvoy_cc_library(\n    name = \"minimal_logger_lib\",\n    srcs = [\n        \"fancy_logger.cc\",\n        \"logger.cc\",\n    ],\n    hdrs = [\n        \"fancy_logger.h\",\n        \"logger.h\",\n    ],\n    external_deps = [\"abseil_synchronization\"],\n    deps = [\n        \":base_logger_lib\",\n        \":lock_guard_lib\",\n        \":macros\",\n        \":non_copyable\",\n    ] + select({\n        \"//bazel:android_logger\": [\"logger_impl_lib_android\"],\n        \"//conditions:default\": [\"logger_impl_lib_standard\"],\n    }),\n)\n\nenvoy_cc_library(\n    name = \"base_logger_lib\",\n    srcs = [\"base_logger.cc\"],\n    hdrs = [\"base_logger.h\"],\n)\n\nenvoy_cc_library(\n    name = \"logger_impl_lib_standard\",\n    hdrs = [\"standard/logger_impl.h\"],\n    strip_include_prefix = \"standard\",\n    deps = [\":base_logger_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"logger_impl_lib_android\",\n    srcs = select({\n        \"//bazel:android_logger\": [\"android/logger_impl.cc\"],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"//bazel:android_logger\": [\"android/logger_impl.h\"],\n        \"//conditions:default\": [],\n    }),\n    strip_include_prefix = \"android\",\n    deps = [\":base_logger_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"mutex_tracer_lib\",\n    srcs = [\"mutex_tracer_impl.cc\"],\n    hdrs = [\"mutex_tracer_impl.h\"],\n    external_deps = [\"abseil_synchronization\"],\n    deps = [\n        \":assert_lib\",\n        \"//include/envoy/common:mutex_tracer\",\n    ],\n)\n\n# All non-essential logger delegates should go here to reduce dependencies that\n# minimal_logger_lib maintains.\nenvoy_cc_library(\n    name = \"logger_lib\",\n    srcs = [\"logger_delegates.cc\"],\n    hdrs = [\"logger_delegates.h\"],\n    deps = [\n        \":dump_state_utils\",\n        \":macros\",\n        \":minimal_logger_lib\",\n        \"//include/envoy/access_log:access_log_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"basic_resource_lib\",\n    hdrs = [\"basic_resource_impl.h\"],\n    deps = [\n        \"//include/envoy/common:resource_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"macros\",\n    hdrs = [\"macros.h\"],\n)\n\nenvoy_cc_library(\n    name = \"matchers_lib\",\n    srcs = [\"matchers.cc\"],\n    hdrs = [\"matchers.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/common:matchers_interface\",\n        \"//source/common/common:regex_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/http:path_utility_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"random_generator_lib\",\n    srcs = [\n        \"random_generator.cc\",\n    ],\n    hdrs = [\n        \"random_generator.h\",\n    ],\n    external_deps = [\"ssl\"],\n    deps = [\n        \":assert_lib\",\n        \"//include/envoy/common:random_generator_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"regex_lib\",\n    srcs = [\"regex.cc\"],\n    hdrs = [\"regex.h\"],\n    deps = [\n        \":assert_lib\",\n        \"//include/envoy/common:regex_interface\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"@com_googlesource_code_re2//:re2\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"non_copyable\",\n    hdrs = [\"non_copyable.h\"],\n)\n\nenvoy_cc_library(\n    name = \"phantom\",\n    hdrs = [\"phantom.h\"],\n)\n\nenvoy_cc_library(\n    name = \"scope_tracker\",\n    hdrs = [\"scope_tracker.h\"],\n    deps = [\n        \"//include/envoy/common:scope_tracker_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stl_helpers\",\n    hdrs = [\"stl_helpers.h\"],\n)\n\nenvoy_cc_library(\n    name = \"thread_annotations\",\n    hdrs = [\"thread_annotations.h\"],\n    external_deps = [\"abseil_base\"],\n)\n\nenvoy_cc_library(\n    name = \"thread_synchronizer_lib\",\n    srcs = [\"thread_synchronizer.cc\"],\n    hdrs = [\"thread_synchronizer.h\"],\n    external_deps = [\"abseil_synchronization\"],\n    deps = [\n        \":assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"thread_lib\",\n    hdrs = [\"thread.h\"],\n    external_deps = [\"abseil_synchronization\"],\n    deps = envoy_cc_platform_dep(\"thread_impl_lib\") + [\n        \":non_copyable\",\n    ],\n)\n\nenvoy_cc_posix_library(\n    name = \"thread_impl_lib\",\n    srcs = [\"posix/thread_impl.cc\"],\n    hdrs = [\"posix/thread_impl.h\"],\n    strip_include_prefix = \"posix\",\n    deps = [\n        \":assert_lib\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_win32_library(\n    name = \"thread_impl_lib\",\n    srcs = [\"win32/thread_impl.cc\"],\n    hdrs = [\"win32/thread_impl.h\"],\n    strip_include_prefix = \"win32\",\n    deps = [\n        \":assert_lib\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"lock_guard_lib\",\n    hdrs = [\"lock_guard.h\"],\n    deps = [\n        \":thread_annotations\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    external_deps = [\"abseil_node_hash_map\"],\n    deps = [\n        \":assert_lib\",\n        \":hash_lib\",\n        \":non_copyable\",\n        \"//include/envoy/common:interval_set_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"callback_impl_lib\",\n    hdrs = [\"callback_impl.h\"],\n    deps = [\n        \":assert_lib\",\n        \"//include/envoy/common:callback\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"perf_annotation_lib\",\n    srcs = [\"perf_annotation.cc\"],\n    hdrs = [\"perf_annotation.h\"],\n    deps = [\n        \":assert_lib\",\n        \":thread_annotations\",\n        \":thread_lib\",\n        \":utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"scalar_to_byte_vector_lib\",\n    hdrs = [\"scalar_to_byte_vector.h\"],\n)\n\nenvoy_cc_library(\n    name = \"token_bucket_impl_lib\",\n    srcs = [\"token_bucket_impl.cc\"],\n    hdrs = [\"token_bucket_impl.h\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/common:token_bucket_interface\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"statusor_lib\",\n    hdrs = [\"statusor.h\"],\n    deps = [\n        \"//third_party/statusor:statusor_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/common/android/logger_impl.cc",
    "content": "#include \"common/common/logger_impl.h\"\n\n#include \"spdlog/sinks/android_sink.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\nAndroidLogger::AndroidLogger(const std::string& name)\n    : Logger(std::make_shared<spdlog::logger>(\n          name, std::make_shared<spdlog::sinks::android_sink<std::mutex>>())) {}\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/android/logger_impl.h",
    "content": "#pragma once\n\n#include \"common/common/base_logger.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\n#define GENERATE_LOGGER(X) AndroidLogger(#X),\n\n/**\n * Logger that uses spdlog::sinks::android_sink.\n */\nclass AndroidLogger : public Logger {\nprivate:\n  AndroidLogger(const std::string& name);\n\n  friend class Registry;\n};\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/assert.cc",
    "content": "#include \"common/common/assert.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Assert {\n\nclass ActionRegistrationImpl : public ActionRegistration {\npublic:\n  ActionRegistrationImpl(std::function<void()> action) {\n    ASSERT(debug_assertion_failure_record_action_ == nullptr);\n    debug_assertion_failure_record_action_ = action;\n  }\n\n  ~ActionRegistrationImpl() override {\n    ASSERT(debug_assertion_failure_record_action_ != nullptr);\n    debug_assertion_failure_record_action_ = nullptr;\n  }\n\n  static void invokeAction() {\n    if (debug_assertion_failure_record_action_ != nullptr) {\n      debug_assertion_failure_record_action_();\n    }\n  }\n\nprivate:\n  // This implementation currently only handles one action being set at a time. This is currently\n  // sufficient. If multiple actions are ever needed, the actions should be chained when\n  // additional actions are registered.\n  static std::function<void()> debug_assertion_failure_record_action_;\n};\n\n// This class implements the logic for triggering ENVOY_BUG logs and actions. Logging and actions\n// will be triggered with exponential back-off per file and line bug.\nclass EnvoyBugRegistrationImpl : public ActionRegistration {\npublic:\n  EnvoyBugRegistrationImpl(std::function<void()> action) {\n    ASSERT(envoy_bug_failure_record_action_ == nullptr,\n           \"An ENVOY_BUG action was already set. Currently only a single action is supported.\");\n    envoy_bug_failure_record_action_ = action;\n    counters_.clear();\n  }\n\n  ~EnvoyBugRegistrationImpl() override {\n    ASSERT(envoy_bug_failure_record_action_ != nullptr);\n    envoy_bug_failure_record_action_ = nullptr;\n  }\n\n  // This method is invoked when an ENVOY_BUG condition fails. It increments a per file and line\n  // counter for every ENVOY_BUG hit in a mutex guarded map.\n  // The implementation may also be a inline static counter per-file and line. There is no benchmark\n  // to show that the performance of this mutex is any worse than atomic counters. Acquiring and\n  // releasing a mutex is cheaper than a cache miss, but the mutex here is contended for every\n  // ENVOY_BUG failure rather than per individual bug. Logging ENVOY_BUGs is not a performance\n  // critical path, and mutex contention would indicate that there is a serious failure.\n  // Currently, this choice reduces code size and has the advantage that behavior is easier to\n  // understand and debug, and test behavior is predictable.\n  static bool shouldLogAndInvoke(absl::string_view bug_name) {\n    // Increment counter, inserting first if counter does not exist.\n    uint64_t counter_value = 0;\n    {\n      absl::MutexLock lock(&mutex_);\n      counter_value = ++counters_[bug_name];\n    }\n\n    // Check if counter is power of two by its bitwise representation.\n    return (counter_value & (counter_value - 1)) == 0;\n  }\n\n  static void invokeAction() {\n    if (envoy_bug_failure_record_action_ != nullptr) {\n      envoy_bug_failure_record_action_();\n    }\n  }\n\nprivate:\n  // This implementation currently only handles one action being set at a time. This is currently\n  // sufficient. If multiple actions are ever needed, the actions should be chained when\n  // additional actions are registered.\n  static std::function<void()> envoy_bug_failure_record_action_;\n\n  using EnvoyBugMap = absl::flat_hash_map<std::string, uint64_t>;\n  static absl::Mutex mutex_;\n  static EnvoyBugMap counters_ GUARDED_BY(mutex_);\n};\n\nstd::function<void()> ActionRegistrationImpl::debug_assertion_failure_record_action_;\nstd::function<void()> EnvoyBugRegistrationImpl::envoy_bug_failure_record_action_;\nEnvoyBugRegistrationImpl::EnvoyBugMap EnvoyBugRegistrationImpl::counters_;\nabsl::Mutex EnvoyBugRegistrationImpl::mutex_;\n\nActionRegistrationPtr setDebugAssertionFailureRecordAction(const std::function<void()>& action) {\n  return std::make_unique<ActionRegistrationImpl>(action);\n}\n\nActionRegistrationPtr setEnvoyBugFailureRecordAction(const std::function<void()>& action) {\n  return std::make_unique<EnvoyBugRegistrationImpl>(action);\n}\n\nvoid invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly() {\n  ActionRegistrationImpl::invokeAction();\n}\n\nvoid invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly() {\n  EnvoyBugRegistrationImpl::invokeAction();\n}\n\nbool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_name) {\n  return EnvoyBugRegistrationImpl::shouldLogAndInvoke(bug_name);\n}\n\n} // namespace Assert\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/assert.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Assert {\n\nclass ActionRegistration {\npublic:\n  virtual ~ActionRegistration() = default;\n};\nusing ActionRegistrationPtr = std::unique_ptr<ActionRegistration>;\n\n/**\n * Sets an action to be invoked when a debug assertion failure is detected\n * in a release build. This action will be invoked each time an assertion\n * failure is detected.\n *\n * This function is not thread-safe; concurrent calls to set the action are not allowed.\n *\n * The action may be invoked concurrently if two ASSERTS in different threads fail at the\n * same time, so the action must be thread-safe.\n *\n * This has no effect in debug builds (assertion failure aborts the process)\n * or in release builds without ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE defined (assertion\n * tests are compiled out).\n *\n * @param action The action to take when an assertion fails.\n * @return A registration object. The registration is removed when the object is destructed.\n */\nActionRegistrationPtr setDebugAssertionFailureRecordAction(const std::function<void()>& action);\n\n/**\n * Sets an action to be invoked when an ENVOY_BUG failure is detected in a release build. This\n * action will be invoked each time an ENVOY_BUG failure is detected.\n *\n * This function is not thread-safe; concurrent calls to set the action are not allowed.\n *\n * The action may be invoked concurrently if two ENVOY_BUGs in different threads fail at the\n * same time, so the action must be thread-safe.\n *\n * This has no effect in debug builds (envoy bug failure aborts the process).\n *\n * @param action The action to take when an envoy bug fails.\n * @return A registration object. The registration is removed when the object is destructed.\n */\nActionRegistrationPtr setEnvoyBugFailureRecordAction(const std::function<void()>& action);\n\n/**\n * Invokes the action set by setDebugAssertionFailureRecordAction, or does nothing if\n * no action has been set.\n *\n * This should only be called by ASSERT macros in this file.\n */\nvoid invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly();\n\n/**\n * Invokes the action set by setEnvoyBugFailureRecordAction, or does nothing if\n * no action has been set.\n *\n * This should only be called by ENVOY_BUG macros in this file.\n */\nvoid invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly();\n\n/**\n * Increments power of two counter for EnvoyBugRegistrationImpl.\n *\n * This should only be called by ENVOY_BUG macros in this file.\n */\nbool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_name);\n\n// CONDITION_STR is needed to prevent macros in condition from being expected, which obfuscates\n// the logged failure, e.g., \"EAGAIN\" vs \"11\".\n#define _ASSERT_IMPL(CONDITION, CONDITION_STR, ACTION, DETAILS)                                    \\\n  do {                                                                                             \\\n    if (!(CONDITION)) {                                                                            \\\n      const std::string& details = (DETAILS);                                                      \\\n      ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::assert), critical,    \\\n                          \"assert failure: {}.{}{}\", CONDITION_STR,                                \\\n                          details.empty() ? \"\" : \" Details: \", details);                           \\\n      ACTION;                                                                                      \\\n    }                                                                                              \\\n  } while (false)\n\n// This non-implementation ensures that its argument is a valid expression that can be statically\n// casted to a bool, but the expression is never evaluated and will be compiled away.\n#define _NULL_ASSERT_IMPL(X, ...)                                                                  \\\n  do {                                                                                             \\\n    constexpr bool __assert_dummy_variable = false && static_cast<bool>(X);                        \\\n    (void)__assert_dummy_variable;                                                                 \\\n  } while (false)\n\n/**\n * assert macro that uses our builtin logging which gives us thread ID and can log to various\n * sinks.\n *\n * The old style release assert was of the form RELEASE_ASSERT(foo == bar);\n * where it would log stack traces and the failed conditional and crash if the\n * condition is not met. The are many legacy RELEASE_ASSERTS in Envoy which\n * were converted to RELEASE_ASSERT(foo == bar, \"\");\n *\n * The new style of release assert is of the form\n * RELEASE_ASSERT(foo == bar, \"reason foo should actually be bar\");\n * new uses of RELEASE_ASSERT should supply a verbose explanation of what went wrong.\n */\n#define RELEASE_ASSERT(X, DETAILS) _ASSERT_IMPL(X, #X, abort(), DETAILS)\n\n/**\n * Assert macro intended for security guarantees. It has the same functionality\n * as RELEASE_ASSERT, but is intended for memory bounds-checking.\n */\n#define SECURITY_ASSERT(X, DETAILS) _ASSERT_IMPL(X, #X, abort(), DETAILS)\n\n#if !defined(NDEBUG) || defined(ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE)\n\n#if !defined(NDEBUG) // If this is a debug build.\n#define ASSERT_ACTION abort()\n#else // If this is not a debug build, but ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE is defined.\n#define ASSERT_ACTION Envoy::Assert::invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly()\n#endif // !defined(NDEBUG)\n\n#define _ASSERT_ORIGINAL(X) _ASSERT_IMPL(X, #X, ASSERT_ACTION, \"\")\n#define _ASSERT_VERBOSE(X, Y) _ASSERT_IMPL(X, #X, ASSERT_ACTION, Y)\n#define _ASSERT_SELECTOR(_1, _2, ASSERT_MACRO, ...) ASSERT_MACRO\n\n// This is a workaround for fact that MSVC expands __VA_ARGS__ after passing them into a macro,\n// rather than before passing them into a macro. Without this, _ASSERT_SELECTOR does not work\n// correctly when compiled with MSVC\n#define EXPAND(X) X\n\n#if !defined(ENVOY_DISABLE_KNOWN_ISSUE_ASSERTS)\n/**\n * Assert wrapper for an as-yet unidentified issue. Even with ASSERTs compiled in, it may be\n * excluded, by defining ENVOY_DISABLE_KNOWN_ISSUE_ASSERTS. It represents a condition that\n * should always pass but that sometimes fails for an unknown reason. The macro allows it to\n * be temporarily compiled out while the failure is triaged and investigated.\n */\n#define KNOWN_ISSUE_ASSERT(X, DETAILS) _ASSERT_IMPL(X, #X, abort(), DETAILS)\n#else\n// This non-implementation ensures that its argument is a valid expression that can be statically\n// casted to a bool, but the expression is never evaluated and will be compiled away.\n#define KNOWN_ISSUE_ASSERT _NULL_ASSERT_IMPL\n#endif // defined(ENVOY_DISABLE_KNOWN_ISSUE_ASSERTS)\n\n// If ASSERT is called with one argument, the ASSERT_SELECTOR will return\n// _ASSERT_ORIGINAL and this will call _ASSERT_ORIGINAL(__VA_ARGS__).\n// If ASSERT is called with two arguments, ASSERT_SELECTOR will return\n// _ASSERT_VERBOSE, and this will call _ASSERT_VERBOSE,(__VA_ARGS__)\n#define ASSERT(...)                                                                                \\\n  EXPAND(_ASSERT_SELECTOR(__VA_ARGS__, _ASSERT_VERBOSE, _ASSERT_ORIGINAL)(__VA_ARGS__))\n#else\n#define ASSERT _NULL_ASSERT_IMPL\n#define KNOWN_ISSUE_ASSERT _NULL_ASSERT_IMPL\n#endif // !defined(NDEBUG) || defined(ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE)\n\n/**\n * Indicate a panic situation and exit.\n */\n#define PANIC(X)                                                                                   \\\n  do {                                                                                             \\\n    ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::assert), critical,      \\\n                        \"panic: {}\", X);                                                           \\\n    abort();                                                                                       \\\n  } while (false)\n\n#if !defined(NDEBUG)\n#define ENVOY_BUG_ACTION abort()\n#else\n#define ENVOY_BUG_ACTION Envoy::Assert::invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly()\n#endif\n\n// These macros are needed to stringify __LINE__ correctly.\n#define STRINGIFY(X) #X\n#define TOSTRING(X) STRINGIFY(X)\n\n// CONDITION_STR is needed to prevent macros in condition from being expected, which obfuscates\n// the logged failure, e.g., \"EAGAIN\" vs \"11\".\n// ENVOY_BUG logging and actions are invoked only on power-of-two instances per log line.\n#define _ENVOY_BUG_IMPL(CONDITION, CONDITION_STR, ACTION, DETAILS)                                 \\\n  do {                                                                                             \\\n    if (!(CONDITION) && Envoy::Assert::shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(          \\\n                            __FILE__ \":\" TOSTRING(__LINE__))) {                                    \\\n      const std::string& details = (DETAILS);                                                      \\\n      ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::envoy_bug), error,    \\\n                          \"envoy bug failure: {}.{}{}\", CONDITION_STR,                             \\\n                          details.empty() ? \"\" : \" Details: \", details);                           \\\n      ACTION;                                                                                      \\\n    }                                                                                              \\\n  } while (false)\n\n#define _ENVOY_BUG_VERBOSE(X, Y) _ENVOY_BUG_IMPL(X, #X, ENVOY_BUG_ACTION, Y)\n\n// This macro is needed to help to remove: \"warning C4003: not enough arguments for function-like\n// macro invocation '<identifier>'\" when expanding __VA_ARGS__. In our setup, MSVC treats this\n// warning as an error. A sample code to reproduce the case: https://godbolt.org/z/M4zZNG.\n#define PASS_ON(...) __VA_ARGS__\n\n/**\n * Indicate a failure condition that should never be met in normal circumstances. In contrast\n * with ASSERT, an ENVOY_BUG is compiled in release mode. If a failure condition is met in release\n * mode, it is logged and a stat is incremented with exponential back-off per ENVOY_BUG. In debug\n * mode, it will crash if the condition is not met. ENVOY_BUG must be called with two arguments for\n * verbose logging.\n */\n#define ENVOY_BUG(...) PASS_ON(PASS_ON(_ENVOY_BUG_VERBOSE)(__VA_ARGS__))\n\n// NOT_IMPLEMENTED_GCOVR_EXCL_LINE is for overridden functions that are expressly not implemented.\n// The macro name includes \"GCOVR_EXCL_LINE\" to exclude the macro's usage from code coverage\n// reports.\n#define NOT_IMPLEMENTED_GCOVR_EXCL_LINE PANIC(\"not implemented\")\n\n// NOT_REACHED_GCOVR_EXCL_LINE is for spots the compiler insists on having a return, but where we\n// know that it shouldn't be possible to arrive there, assuming no horrendous bugs. For example,\n// after a switch (some_enum) with all enum values included in the cases. The macro name includes\n// \"GCOVR_EXCL_LINE\" to exclude the macro's usage from code coverage reports.\n#define NOT_REACHED_GCOVR_EXCL_LINE PANIC(\"not reached\")\n} // namespace Assert\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/backoff_strategy.cc",
    "content": "#include \"common/common/backoff_strategy.h\"\n\nnamespace Envoy {\n\nJitteredExponentialBackOffStrategy::JitteredExponentialBackOffStrategy(\n    uint64_t base_interval, uint64_t max_interval, Random::RandomGenerator& random)\n    : base_interval_(base_interval), max_interval_(max_interval), next_interval_(base_interval),\n      random_(random) {\n  ASSERT(base_interval_ > 0);\n  ASSERT(base_interval_ <= max_interval_);\n}\n\nuint64_t JitteredExponentialBackOffStrategy::nextBackOffMs() {\n  const uint64_t backoff = next_interval_;\n  ASSERT(backoff > 0);\n  // Set next_interval_ to max_interval_ if doubling the interval would exceed the max or overflow.\n  if (next_interval_ < max_interval_ / 2) {\n    next_interval_ *= 2;\n  } else {\n    next_interval_ = max_interval_;\n  }\n  return std::min(random_.random() % backoff, max_interval_);\n}\n\nvoid JitteredExponentialBackOffStrategy::reset() { next_interval_ = base_interval_; }\n\nJitteredLowerBoundBackOffStrategy::JitteredLowerBoundBackOffStrategy(\n    uint64_t min_interval, Random::RandomGenerator& random)\n    : min_interval_(min_interval), random_(random) {\n  ASSERT(min_interval_ > 1);\n}\n\nuint64_t JitteredLowerBoundBackOffStrategy::nextBackOffMs() {\n  // random(min_interval_, 1.5 * min_interval_)\n  return (random_.random() % (min_interval_ >> 1)) + min_interval_;\n}\n\nFixedBackOffStrategy::FixedBackOffStrategy(uint64_t interval_ms) : interval_ms_(interval_ms) {\n  ASSERT(interval_ms_ > 0);\n}\n\nuint64_t FixedBackOffStrategy::nextBackOffMs() { return interval_ms_; }\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/backoff_strategy.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/backoff_strategy.h\"\n#include \"envoy/common/random_generator.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\n\n/**\n * Implementation of BackOffStrategy that uses a fully jittered exponential backoff algorithm.\n */\nclass JitteredExponentialBackOffStrategy : public BackOffStrategy {\n\npublic:\n  /**\n   * Constructs fully jittered backoff strategy.\n   * @param base_interval the base interval to be used for next backoff computation. It should be\n   * greater than zero and less than or equal to max_interval.\n   * @param max_interval the cap on the next backoff value.\n   * @param random the random generator.\n   */\n  JitteredExponentialBackOffStrategy(uint64_t base_interval, uint64_t max_interval,\n                                     Random::RandomGenerator& random);\n\n  // BackOffStrategy methods\n  uint64_t nextBackOffMs() override;\n  void reset() override;\n\nprivate:\n  const uint64_t base_interval_;\n  const uint64_t max_interval_{};\n  uint64_t next_interval_;\n  Random::RandomGenerator& random_;\n};\n\n/**\n * Implementation of BackOffStrategy that returns random values in the range\n * [min_interval, 1.5 * min_interval).\n */\nclass JitteredLowerBoundBackOffStrategy : public BackOffStrategy {\npublic:\n  /**\n   * Constructs fully jittered backoff strategy.\n   * @param min_interval the lower bound on the next backoff value. It must be greater than one.\n   * @param random the random generator.\n   */\n  JitteredLowerBoundBackOffStrategy(uint64_t min_interval, Random::RandomGenerator& random);\n\n  // BackOffStrategy methods\n  uint64_t nextBackOffMs() override;\n  void reset() override {}\n\nprivate:\n  const uint64_t min_interval_;\n  Random::RandomGenerator& random_;\n};\n\n/**\n * Implementation of BackOffStrategy that uses a fixed backoff.\n */\nclass FixedBackOffStrategy : public BackOffStrategy {\n\npublic:\n  /**\n   * Constructs fixed backoff strategy.\n   * @param interval_ms the fixed backoff duration. It should be greater than zero.\n   */\n  FixedBackOffStrategy(uint64_t interval_ms);\n\n  // BackOffStrategy methods.\n  uint64_t nextBackOffMs() override;\n  void reset() override {}\n\nprivate:\n  const uint64_t interval_ms_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/base64.cc",
    "content": "#include \"common/common/base64.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace {\n\n// clang-format off\nconstexpr char CHAR_TABLE[] =\n    \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\";\n\n// Conversion table is taken from\n// https://opensource.apple.com/source/QuickTimeStreamingServer/QuickTimeStreamingServer-452/CommonUtilitiesLib/base64.c\nconstexpr unsigned char REVERSE_LOOKUP_TABLE[256] = {\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,\n    52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 64, 64, 64, 64, 0,  1,  2,  3,  4,  5,  6,\n    7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,\n    64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,\n    49, 50, 51, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64};\n\n// The base64url tables are copied from above and modified based on table in\n// https://tools.ietf.org/html/rfc4648#section-5\nconstexpr char URL_CHAR_TABLE[] =\n    \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_\";\n\nconstexpr unsigned char URL_REVERSE_LOOKUP_TABLE[256] = {\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64,\n    52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 64, 64, 64, 64, 0,  1,  2,  3,  4,  5,  6,\n    7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,\n    64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,\n    49, 50, 51, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,\n    64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64};\n// clang-format on\n\ninline bool decodeBase(const uint8_t cur_char, uint64_t pos, std::string& ret,\n                       const unsigned char* const reverse_lookup_table) {\n  const unsigned char c = reverse_lookup_table[static_cast<uint32_t>(cur_char)];\n  if (c == 64) {\n    // Invalid character\n    return false;\n  }\n\n  switch (pos % 4) {\n  case 0:\n    ret.push_back(c << 2);\n    break;\n  case 1:\n    ret.back() |= c >> 4;\n    ret.push_back(c << 4);\n    break;\n  case 2:\n    ret.back() |= c >> 2;\n    ret.push_back(c << 6);\n    break;\n  case 3:\n    ret.back() |= c;\n    break;\n  }\n  return true;\n}\n\ninline bool decodeLast(const uint8_t cur_char, uint64_t pos, std::string& ret,\n                       const unsigned char* const reverse_lookup_table) {\n  const unsigned char c = reverse_lookup_table[static_cast<uint32_t>(cur_char)];\n  if (c == 64) {\n    // Invalid character\n    return false;\n  }\n\n  switch (pos % 4) {\n  case 0:\n    return false;\n  case 1:\n    ret.back() |= c >> 4;\n    return (c & 0b1111) == 0;\n  case 2:\n    ret.back() |= c >> 2;\n    return (c & 0b11) == 0;\n  case 3:\n    ret.back() |= c;\n    break;\n  }\n  return true;\n}\n\ninline void encodeBase(const uint8_t cur_char, uint64_t pos, uint8_t& next_c, std::string& ret,\n                       const char* const char_table) {\n  switch (pos % 3) {\n  case 0:\n    ret.push_back(char_table[cur_char >> 2]);\n    next_c = (cur_char & 0x03) << 4;\n    break;\n  case 1:\n    ret.push_back(char_table[next_c | (cur_char >> 4)]);\n    next_c = (cur_char & 0x0f) << 2;\n    break;\n  case 2:\n    ret.push_back(char_table[next_c | (cur_char >> 6)]);\n    ret.push_back(char_table[cur_char & 0x3f]);\n    next_c = 0;\n    break;\n  }\n}\n\ninline void encodeLast(uint64_t pos, uint8_t last_char, std::string& ret,\n                       const char* const char_table, bool add_padding) {\n  switch (pos % 3) {\n  case 1:\n    ret.push_back(char_table[last_char]);\n    if (add_padding) {\n      ret.push_back('=');\n      ret.push_back('=');\n    }\n    break;\n  case 2:\n    ret.push_back(char_table[last_char]);\n    if (add_padding) {\n      ret.push_back('=');\n    }\n    break;\n  default:\n    break;\n  }\n}\n\n} // namespace\n\nstd::string Base64::decode(const std::string& input) {\n  if (input.length() % 4) {\n    return EMPTY_STRING;\n  }\n  return decodeWithoutPadding(input);\n}\n\nstd::string Base64::decodeWithoutPadding(absl::string_view input) {\n  if (input.empty()) {\n    return EMPTY_STRING;\n  }\n\n  // At most last two chars can be '='.\n  size_t n = input.length();\n  if (input[n - 1] == '=') {\n    n--;\n    if (n > 0 && input[n - 1] == '=') {\n      n--;\n    }\n  }\n  // Last position before \"valid\" padding character.\n  uint64_t last = n - 1;\n  // Determine output length.\n  size_t max_length = (n + 3) / 4 * 3;\n  if (n % 4 == 3) {\n    max_length -= 1;\n  }\n  if (n % 4 == 2) {\n    max_length -= 2;\n  }\n\n  std::string ret;\n  ret.reserve(max_length);\n  for (uint64_t i = 0; i < last; ++i) {\n    if (!decodeBase(input[i], i, ret, REVERSE_LOOKUP_TABLE)) {\n      return EMPTY_STRING;\n    }\n  }\n\n  if (!decodeLast(input[last], last, ret, REVERSE_LOOKUP_TABLE)) {\n    return EMPTY_STRING;\n  }\n\n  ASSERT(ret.size() == max_length);\n  return ret;\n}\n\nstd::string Base64::encode(const Buffer::Instance& buffer, uint64_t length) {\n  uint64_t output_length = (std::min(length, buffer.length()) + 2) / 3 * 4;\n  std::string ret;\n  ret.reserve(output_length);\n\n  uint64_t j = 0;\n  uint8_t next_c = 0;\n  for (const Buffer::RawSlice& slice : buffer.getRawSlices()) {\n    const uint8_t* slice_mem = static_cast<const uint8_t*>(slice.mem_);\n\n    for (uint64_t i = 0; i < slice.len_ && j < length; ++i, ++j) {\n      encodeBase(slice_mem[i], j, next_c, ret, CHAR_TABLE);\n    }\n\n    if (j == length) {\n      break;\n    }\n  }\n\n  encodeLast(j, next_c, ret, CHAR_TABLE, true);\n\n  return ret;\n}\n\nstd::string Base64::encode(const char* input, uint64_t length) {\n  return encode(input, length, true);\n}\n\nstd::string Base64::encode(const char* input, uint64_t length, bool add_padding) {\n  uint64_t output_length = (length + 2) / 3 * 4;\n  std::string ret;\n  ret.reserve(output_length);\n\n  uint64_t pos = 0;\n  uint8_t next_c = 0;\n\n  for (uint64_t i = 0; i < length; ++i) {\n    encodeBase(input[i], pos++, next_c, ret, CHAR_TABLE);\n  }\n\n  encodeLast(pos, next_c, ret, CHAR_TABLE, add_padding);\n\n  return ret;\n}\n\nstd::string Base64Url::decode(const std::string& input) {\n  if (input.empty()) {\n    return EMPTY_STRING;\n  }\n\n  std::string ret;\n  ret.reserve(input.length() / 4 * 3 + 3);\n\n  uint64_t last = input.length() - 1;\n  for (uint64_t i = 0; i < last; ++i) {\n    if (!decodeBase(input[i], i, ret, URL_REVERSE_LOOKUP_TABLE)) {\n      return EMPTY_STRING;\n    }\n  }\n\n  if (!decodeLast(input[last], last, ret, URL_REVERSE_LOOKUP_TABLE)) {\n    return EMPTY_STRING;\n  }\n\n  return ret;\n}\n\nstd::string Base64Url::encode(const char* input, uint64_t length) {\n  uint64_t output_length = (length + 2) / 3 * 4;\n  std::string ret;\n  ret.reserve(output_length);\n\n  uint64_t pos = 0;\n  uint8_t next_c = 0;\n\n  for (uint64_t i = 0; i < length; ++i) {\n    encodeBase(input[i], pos++, next_c, ret, URL_CHAR_TABLE);\n  }\n\n  encodeLast(pos, next_c, ret, URL_CHAR_TABLE, false);\n\n  return ret;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/base64.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\n\n/**\n * A utility class to support base64 encoding, which is defined in RFC4648 Section 4.\n * See https://tools.ietf.org/html/rfc4648#section-4\n */\nclass Base64 {\npublic:\n  /**\n   * Base64 encode an input buffer.\n   * @param buffer supplies the buffer to encode.\n   * @param length supplies the length to encode which may be <= the buffer length.\n   */\n  static std::string encode(const Buffer::Instance& buffer, uint64_t length);\n\n  /**\n   * Base64 encode an input char buffer with a given length.\n   * @param input char array to encode.\n   * @param length of the input array.\n   */\n  static std::string encode(const char* input, uint64_t length);\n\n  /**\n   * Base64 encode an input char buffer with a given length.\n   * @param input char array to encode.\n   * @param length of the input array.\n   * @param whether add padding at the end of the output.\n   */\n  static std::string encode(const char* input, uint64_t length, bool add_padding);\n\n  /**\n   * Base64 decode an input string. Padding is required.\n   * @param input supplies the input to decode.\n   *\n   * Note, decoded string may contain '\\0' at any position, it should be treated as a sequence of\n   * bytes.\n   */\n  static std::string decode(const std::string& input);\n\n  /**\n   * Base64 decode an input string. Padding is not required.\n   * @param input supplies the input to decode.\n   *\n   * Note, decoded string may contain '\\0' at any position, it should be treated as a sequence of\n   * bytes.\n   */\n  static std::string decodeWithoutPadding(absl::string_view input);\n};\n\n/**\n * A utility class to support base64url encoding, which is defined in RFC4648 Section 5.\n * See https://tools.ietf.org/html/rfc4648#section-5\n */\nclass Base64Url {\npublic:\n  /**\n   * Base64url encode an input char buffer with a given length.\n   * @param input char array to encode.\n   * @param length of the input array.\n   */\n  static std::string encode(const char* input, uint64_t length);\n\n  /**\n   * Base64url decode an input string. Padding must not be included in the input.\n   * @param input supplies the input to decode.\n   *\n   * Note, decoded string may contain '\\0' at any position, it should be treated as a sequence of\n   * bytes.\n   */\n  static std::string decode(const std::string& input);\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/base_logger.cc",
    "content": "#include \"common/common/base_logger.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\nconst char* Logger::DEFAULT_LOG_FORMAT = \"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v\";\n\nLogger::Logger(std::shared_ptr<spdlog::logger> logger) : logger_(logger) {\n  logger_->set_pattern(DEFAULT_LOG_FORMAT);\n  logger_->set_level(spdlog::level::trace);\n\n  // Ensure that critical errors, especially ASSERT/PANIC, get flushed\n  logger_->flush_on(spdlog::level::critical);\n}\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/base_logger.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\n/**\n * Logger wrapper for a spdlog logger.\n */\nclass Logger {\npublic:\n  /* This is simple mapping between Logger severity levels and spdlog severity levels.\n   * The only reason for this mapping is to go around the fact that spdlog defines level as err\n   * but the method to log at err level is called LOGGER.error not LOGGER.err. All other level are\n   * fine spdlog::info corresponds to LOGGER.info method.\n   */\n  using Levels = enum {\n    trace = spdlog::level::trace,       // NOLINT(readability-identifier-naming)\n    debug = spdlog::level::debug,       // NOLINT(readability-identifier-naming)\n    info = spdlog::level::info,         // NOLINT(readability-identifier-naming)\n    warn = spdlog::level::warn,         // NOLINT(readability-identifier-naming)\n    error = spdlog::level::err,         // NOLINT(readability-identifier-naming)\n    critical = spdlog::level::critical, // NOLINT(readability-identifier-naming)\n    off = spdlog::level::off            // NOLINT(readability-identifier-naming)\n  };\n\n  spdlog::string_view_t levelString() const {\n    return spdlog::level::level_string_views[logger_->level()];\n  }\n  std::string name() const { return logger_->name(); }\n  void setLevel(spdlog::level::level_enum level) { logger_->set_level(level); }\n  spdlog::level::level_enum level() const { return logger_->level(); }\n\n  static const char* DEFAULT_LOG_FORMAT;\n\nprotected:\n  Logger(std::shared_ptr<spdlog::logger> logger);\n\nprivate:\n  std::shared_ptr<spdlog::logger> logger_; // Use shared_ptr here to allow static construction\n                                           // of vector in Registry::allLoggers().\n  // TODO(junr03): expand Logger's public API to delete this friendship.\n  friend class Registry;\n};\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/basic_resource_impl.h",
    "content": "#pragma once\n\n#include <limits>\n\n#include \"envoy/common/resource.h\"\n#include \"envoy/runtime/runtime.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\n/**\n * A handle to track some limited resource.\n *\n * NOTE:\n * This implementation makes some assumptions which favor simplicity over correctness. Though\n * atomics are used, it is possible for resources to temporarily go above the supplied maximums.\n * This should not effect overall behavior.\n */\nclass BasicResourceLimitImpl : public ResourceLimit {\npublic:\n  BasicResourceLimitImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key)\n      : max_(max), runtime_(&runtime), runtime_key_(runtime_key) {}\n  BasicResourceLimitImpl(uint64_t max) : max_(max) {}\n  BasicResourceLimitImpl() : max_(std::numeric_limits<uint64_t>::max()) {}\n\n  bool canCreate() override { return current_.load() < max(); }\n\n  void inc() override { ++current_; }\n\n  void dec() override { decBy(1); }\n\n  void decBy(uint64_t amount) override {\n    ASSERT(current_ >= amount);\n    current_ -= amount;\n  }\n\n  uint64_t max() override {\n    return (runtime_ != nullptr && runtime_key_.has_value())\n               ? runtime_->snapshot().getInteger(runtime_key_.value(), max_)\n               : max_;\n  }\n\n  uint64_t count() const override { return current_.load(); }\n\n  void setMax(uint64_t new_max) { max_ = new_max; }\n  void resetMax() { max_ = std::numeric_limits<uint64_t>::max(); }\n\nprotected:\n  std::atomic<uint64_t> current_{};\n\nprivate:\n  uint64_t max_;\n  Runtime::Loader* runtime_{nullptr};\n  const absl::optional<std::string> runtime_key_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/byte_order.h",
    "content": "#pragma once\n\n#include <cstddef>\n#include <cstdint>\n\n#include \"envoy/common/platform.h\"\n\n// NOLINT(namespace-envoy)\n\nenum class ByteOrder { Host, LittleEndian, BigEndian };\n\ntemplate <ByteOrder, typename Integral, size_t = sizeof(Integral)> struct EndiannessConverter;\n\n// convenience function that converts an integer from host byte-order to a specified endianness\ntemplate <ByteOrder Endianness, typename T> inline T toEndianness(T value) {\n  return EndiannessConverter<Endianness, T>::to(value);\n}\n\n// convenience function that converts an integer from a specified endianness to host byte-order\ntemplate <ByteOrder Endianness, typename T> inline T fromEndianness(T value) {\n  return EndiannessConverter<Endianness, T>::from(value);\n}\n\n// Implementation details below\n\n// implementation details of EndiannessConverter for 8-bit host endianness integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::Host, T, sizeof(uint8_t)> {\n  static_assert(sizeof(T) == sizeof(uint8_t), \"incorrect type width\");\n\n  static T to(T value) { return value; }\n\n  static T from(T value) { return value; }\n};\n\n// implementation details of EndiannessConverter for 16-bit host endianness integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::Host, T, sizeof(uint16_t)> {\n  static_assert(sizeof(T) == sizeof(uint16_t), \"incorrect type width\");\n\n  static T to(T value) { return value; }\n\n  static T from(T value) { return value; }\n};\n\n// implementation details of EndiannessConverter for 32-bit host endianness integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::Host, T, sizeof(uint32_t)> {\n  static_assert(sizeof(T) == sizeof(uint32_t), \"incorrect type width\");\n\n  static T to(T value) { return value; }\n\n  static T from(T value) { return value; }\n};\n\n// implementation details of EndiannessConverter for 64-bit host endianness integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::Host, T, sizeof(uint64_t)> {\n  static_assert(sizeof(T) == sizeof(uint64_t), \"incorrect type width\");\n\n  static T to(T value) { return value; }\n\n  static T from(T value) { return value; }\n};\n\n// implementation details of EndiannessConverter for 8-bit little endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::LittleEndian, T, sizeof(uint8_t)> {\n  static_assert(sizeof(T) == sizeof(uint8_t), \"incorrect type width\");\n\n  static T to(T value) { return value; }\n\n  static T from(T value) { return value; }\n};\n\n// implementation details of EndiannessConverter for 16-bit little endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::LittleEndian, T, sizeof(uint16_t)> {\n  static_assert(sizeof(T) == sizeof(uint16_t), \"incorrect type width\");\n\n  static T to(T value) { return static_cast<T>(htole16(static_cast<uint16_t>(value))); }\n\n  static T from(T value) { return static_cast<T>(le16toh(static_cast<uint16_t>(value))); }\n};\n\n// implementation details of EndiannessConverter for 32-bit little endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::LittleEndian, T, sizeof(uint32_t)> {\n  static_assert(sizeof(T) == sizeof(uint32_t), \"incorrect type width\");\n\n  static T to(T value) { return static_cast<T>(htole32(static_cast<uint32_t>(value))); }\n\n  static T from(T value) { return static_cast<T>(le32toh(static_cast<uint32_t>(value))); }\n};\n\n// implementation details of EndiannessConverter for 64-bit little endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::LittleEndian, T, sizeof(uint64_t)> {\n  static_assert(sizeof(T) == sizeof(uint64_t), \"incorrect type width\");\n\n  static T to(T value) { return static_cast<T>(htole64(static_cast<uint64_t>(value))); }\n\n  static T from(T value) { return static_cast<T>(le64toh(static_cast<uint64_t>(value))); }\n};\n\n// implementation details of EndiannessConverter for 8-bit big endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::BigEndian, T, sizeof(uint8_t)> {\n  static_assert(sizeof(T) == sizeof(uint8_t), \"incorrect type width\");\n\n  static T to(T value) { return value; }\n\n  static T from(T value) { return value; }\n};\n\n// implementation details of EndiannessConverter for 16-bit big endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::BigEndian, T, sizeof(uint16_t)> {\n  static_assert(sizeof(T) == sizeof(uint16_t), \"incorrect type width\");\n\n  static T to(T value) { return static_cast<T>(htobe16(static_cast<uint16_t>(value))); }\n\n  static T from(T value) { return static_cast<T>(be16toh(static_cast<uint16_t>(value))); }\n};\n\n// implementation details of EndiannessConverter for 32-bit big endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::BigEndian, T, sizeof(uint32_t)> {\n  static_assert(sizeof(T) == sizeof(uint32_t), \"incorrect type width\");\n\n  static T to(T value) { return static_cast<T>(htobe32(static_cast<uint32_t>(value))); }\n\n  static T from(T value) { return static_cast<T>(be32toh(static_cast<uint32_t>(value))); }\n};\n\n// implementation details of EndiannessConverter for 64-bit big endian integers\ntemplate <typename T> struct EndiannessConverter<ByteOrder::BigEndian, T, sizeof(uint64_t)> {\n  static_assert(sizeof(T) == sizeof(uint64_t), \"incorrect type width\");\n\n  static T to(T value) { return static_cast<T>(htobe64(static_cast<uint64_t>(value))); }\n\n  static T from(T value) { return static_cast<T>(be64toh(static_cast<uint64_t>(value))); }\n};\n"
  },
  {
    "path": "source/common/common/c_smart_ptr.h",
    "content": "#pragma once\n\n#include <memory>\n\nnamespace Envoy {\n/**\n * This is a helper that wraps C style API objects that need to be deleted with a smart pointer.\n */\ntemplate <class T, void (*deleter)(T*)> class CSmartPtr : public std::unique_ptr<T, void (*)(T*)> {\npublic:\n  CSmartPtr() : std::unique_ptr<T, void (*)(T*)>(nullptr, deleter) {}\n  CSmartPtr(T* object) : std::unique_ptr<T, void (*)(T*)>(object, deleter) {}\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/callback_impl.h",
    "content": "#pragma once\n\n#include <functional>\n#include <list>\n\n#include \"envoy/common/callback.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Common {\n\n/**\n * Utility class for managing callbacks.\n */\ntemplate <typename... CallbackArgs> class CallbackManager {\npublic:\n  using Callback = std::function<void(CallbackArgs...)>;\n\n  /**\n   * Add a callback.\n   * @param callback supplies the callback to add.\n   * @return CallbackHandle* a handle that can be used to remove the callback.\n   */\n  CallbackHandle* add(Callback callback) {\n    callbacks_.emplace_back(*this, callback);\n    // get the list iterator of added callback handle, which will be used to remove itself from\n    // callbacks_ list.\n    callbacks_.back().it_ = (--callbacks_.end());\n    return &callbacks_.back();\n  }\n\n  /**\n   * Run all registered callbacks.\n   * NOTE: This code is currently safe if a callback deletes ITSELF from within a callback. It is\n   *       not safe if a callback deletes other callbacks. If that is required the code will need\n   *       to change (specifically, it will crash if the next callback in the list is deleted).\n   * @param args supplies the callback arguments.\n   */\n  void runCallbacks(CallbackArgs... args) {\n    for (auto it = callbacks_.cbegin(); it != callbacks_.cend();) {\n      auto current = it++;\n      current->cb_(args...);\n    }\n  }\n\nprivate:\n  struct CallbackHolder : public CallbackHandle {\n    CallbackHolder(CallbackManager& parent, Callback cb) : parent_(parent), cb_(cb) {}\n\n    // CallbackHandle\n    void remove() override { parent_.remove(it_); }\n\n    CallbackManager& parent_;\n    Callback cb_;\n\n    // the iterator of this callback holder inside callbacks_ list\n    // upon removal, use this iterator to delete callback holder in O(1)\n    typename std::list<CallbackHolder>::iterator it_;\n  };\n\n  /**\n   * Remove a member update callback added via add().\n   * @param handle supplies the callback handle to remove.\n   */\n  void remove(typename std::list<CallbackHolder>::iterator& it) { callbacks_.erase(it); }\n\n  std::list<CallbackHolder> callbacks_;\n};\n\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/cleanup.h",
    "content": "#pragma once\n\n#include <functional>\n#include <list>\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\n\n// RAII cleanup via functor.\nclass Cleanup {\npublic:\n  Cleanup(std::function<void()> f) : f_(std::move(f)), cancelled_(false) {}\n  ~Cleanup() { f_(); }\n\n  void cancel() {\n    cancelled_ = true;\n    f_ = []() {};\n  }\n\n  bool cancelled() { return cancelled_; }\n\nprivate:\n  std::function<void()> f_;\n  bool cancelled_;\n};\n\n// RAII helper class to add an element to an std::list on construction and erase\n// it on destruction, unless the cancel method has been called.\ntemplate <class T> class RaiiListElement {\npublic:\n  RaiiListElement(std::list<T>& container, T element) : container_(container), cancelled_(false) {\n    it_ = container.emplace(container.begin(), element);\n  }\n  virtual ~RaiiListElement() {\n    if (!cancelled_) {\n      erase();\n    }\n  }\n\n  // Cancel deletion of the element on destruction. This should be called if the iterator has\n  // been invalidated, e.g., if the list has been cleared or the element removed some other way.\n  void cancel() { cancelled_ = true; }\n\n  // Delete the element now, instead of at destruction.\n  void erase() {\n    ASSERT(!cancelled_);\n    container_.erase(it_);\n    cancelled_ = true;\n  }\n\nprivate:\n  std::list<T>& container_;\n  typename std::list<T>::iterator it_;\n  bool cancelled_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/compiler_requirements.h",
    "content": "#pragma once\n\nnamespace Envoy {\n\n#if __cplusplus < 201402L\n#error \"Your compiler does not support C++14. GCC 5+, Clang, or MSVC 2017+ is required.\"\n#endif\n\n// See:\n//   https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html\n//   https://bugzilla.redhat.com/show_bug.cgi?id=1546704\n#if defined(_GLIBCXX_USE_CXX11_ABI) && _GLIBCXX_USE_CXX11_ABI != 1 &&                              \\\n    !defined(ENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR)\n#error \"Your toolchain has set _GLIBCXX_USE_CXX11_ABI to a value that uses a std::string \"         \\\n  \"implementation that is not thread-safe. This may cause rare and difficult-to-debug errors \"     \\\n  \"if std::string is passed between threads in any way. If you accept this risk, you may define \"  \\\n  \"ENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1 in your build.\"\n#endif\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/debug_recursion_checker.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Common {\n/**\n * A helper class to assert that a call is not recursive.\n */\nclass DebugRecursionChecker {\npublic:\n  void enter() {\n    ASSERT(!entered_, \"A resource should only be entered once\");\n#if !defined(NDEBUG)\n    entered_ = true;\n#endif // !defined(NDEBUG)\n  }\n\n  void exit() {\n#if !defined(NDEBUG)\n    entered_ = false;\n#endif // !defined(NDEBUG)\n  }\n\nprivate:\n  bool entered_ = false;\n};\n\nclass AutoDebugRecursionChecker {\npublic:\n  explicit AutoDebugRecursionChecker(DebugRecursionChecker& checker) : checker_(checker) {\n    checker.enter();\n  }\n\n  ~AutoDebugRecursionChecker() { checker_.exit(); }\n\nprivate:\n  DebugRecursionChecker& checker_;\n};\n\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/documentation_url.h",
    "content": "namespace Envoy {\n\n// TODO(ggreenway): replace 'latest' with the current version, pulled from the VERSION file at\n// the root of the repo.\n#define ENVOY_DOC_URL_ROOT \"https://www.envoyproxy.io/docs/envoy/latest\"\n\n#define ENVOY_DOC_URL_VERSION_HISTORY ENVOY_DOC_URL_ROOT \"/version_history/version_history\"\n\n#define ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED                                                  \\\n  ENVOY_DOC_URL_ROOT                                                                               \\\n  \"/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features\"\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/dump_state_utils.h",
    "content": "#pragma once\n\n#include <sstream>\n\nnamespace Envoy {\n\n// A collection of macros for pretty printing objects on fatal error.\n// These are fairly ugly in an attempt to maximize the conditions where fatal error logging occurs,\n// i.e. under the Envoy signal handler if encountering a crash due to OOM, where allocating more\n// memory would likely lead to the crash handler itself causing a subsequent OOM.\n\n#define DUMP_MEMBER(member) \", \" #member \": \" << (member)\n\n#define DUMP_OPTIONAL_MEMBER(member)                                                               \\\n  \", \" #member \": \" << ((member).has_value() ? absl::StrCat((member).value()) : \"null\")\n\n// Macro assumes local member variables\n// os (ostream)\n// indent_level (int)\n#define DUMP_DETAILS(member)                                                                       \\\n  do {                                                                                             \\\n    os << spaces << #member \": \";                                                                  \\\n    if ((member) != nullptr) {                                                                     \\\n      os << \"\\n\";                                                                                  \\\n      (member)->dumpState(os, indent_level + 1);                                                   \\\n    } else {                                                                                       \\\n      os << spaces << \"null\\n\";                                                                    \\\n    }                                                                                              \\\n  } while (false)\n\n// Macro assumes local member variables\n// os (ostream)\n// indent_level (int)\n#define DUMP_OPT_REF_DETAILS(member)                                                               \\\n  do {                                                                                             \\\n    os << spaces << #member \": \";                                                                  \\\n    if ((member).has_value()) {                                                                    \\\n      os << \"\\n\";                                                                                  \\\n      (member)->get().dumpState(os, indent_level + 1);                                             \\\n    } else {                                                                                       \\\n      os << spaces << \"empty\\n\";                                                                   \\\n    }                                                                                              \\\n  } while (false)\n\n// Return the const char* equivalent of string(level*2, ' '), without dealing\n// with string creation overhead. Cap arbitrarily at 6 as we're (hopefully)\n// not going to have nested objects deeper than that.\ninline const char* spacesForLevel(int level) {\n  switch (level) {\n  case 0:\n    return \"\";\n  case 1:\n    return \"  \";\n  case 2:\n    return \"    \";\n  case 3:\n    return \"      \";\n  case 4:\n    return \"        \";\n  case 5:\n    return \"          \";\n  default:\n    return \"            \";\n  }\n  return \"\";\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/empty_string.h",
    "content": "#pragma once\n\n#include <string>\n\nnamespace Envoy {\nstatic const std::string EMPTY_STRING;\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/enum_to_int.h",
    "content": "#pragma once\n\n#include <cstdint>\n\nnamespace Envoy {\n/**\n * Convert an int based enum to an int.\n */\ntemplate <typename T> constexpr uint32_t enumToInt(T val) { return static_cast<uint32_t>(val); }\n\n/**\n * Convert an int based enum to a signed int.\n */\ntemplate <typename T> constexpr int32_t enumToSignedInt(T val) { return static_cast<int32_t>(val); }\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/fancy_logger.cc",
    "content": "#include \"common/common/fancy_logger.h\"\n\n#include <atomic>\n#include <memory>\n\n#include \"common/common/logger.h\"\n\nusing spdlog::level::level_enum;\n\nnamespace Envoy {\n\n/**\n * Implements a lock from BasicLockable, to avoid dependency problem of thread.h.\n */\nclass FancyBasicLockable : public Thread::BasicLockable {\npublic:\n  // BasicLockable\n  void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() override { mutex_.Lock(); }\n  bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) override { return mutex_.TryLock(); }\n  void unlock() ABSL_UNLOCK_FUNCTION() override { mutex_.Unlock(); }\n\nprivate:\n  absl::Mutex mutex_;\n};\n\nSpdLoggerSharedPtr FancyContext::getFancyLogEntry(std::string key)\n    ABSL_LOCKS_EXCLUDED(fancy_log_lock_) {\n  absl::ReaderMutexLock l(&fancy_log_lock_);\n  auto it = fancy_log_map_->find(key);\n  if (it != fancy_log_map_->end()) {\n    return it->second;\n  }\n  return nullptr;\n}\n\nvoid FancyContext::initFancyLogger(std::string key, std::atomic<spdlog::logger*>& logger)\n    ABSL_LOCKS_EXCLUDED(fancy_log_lock_) {\n  absl::WriterMutexLock l(&fancy_log_lock_);\n  auto it = fancy_log_map_->find(key);\n  spdlog::logger* target;\n  if (it == fancy_log_map_->end()) {\n    target = createLogger(key);\n  } else {\n    target = it->second.get();\n  }\n  logger.store(target);\n}\n\nbool FancyContext::setFancyLogger(std::string key, level_enum log_level)\n    ABSL_LOCKS_EXCLUDED(fancy_log_lock_) {\n  absl::ReaderMutexLock l(&fancy_log_lock_);\n  auto it = fancy_log_map_->find(key);\n  if (it != fancy_log_map_->end()) {\n    it->second->set_level(log_level);\n    return true;\n  }\n  return false;\n}\n\nvoid FancyContext::setDefaultFancyLevelFormat(spdlog::level::level_enum level, std::string format)\n    ABSL_LOCKS_EXCLUDED(fancy_log_lock_) {\n  if (level == Logger::Context::getFancyDefaultLevel() &&\n      format == Logger::Context::getFancyLogFormat()) {\n    return;\n  }\n  absl::ReaderMutexLock l(&fancy_log_lock_);\n  for (const auto& it : *fancy_log_map_) {\n    if (it.second->level() == Logger::Context::getFancyDefaultLevel()) {\n      // if logger is default level now\n      it.second->set_level(level);\n    }\n    it.second->set_pattern(format);\n  }\n}\n\nstd::string FancyContext::listFancyLoggers() ABSL_LOCKS_EXCLUDED(fancy_log_lock_) {\n  std::string info = \"\";\n  absl::ReaderMutexLock l(&fancy_log_lock_);\n  for (const auto& it : *fancy_log_map_) {\n    info += fmt::format(\"   {}: {}\\n\", it.first, static_cast<int>(it.second->level()));\n  }\n  return info;\n}\n\nvoid FancyContext::setAllFancyLoggers(spdlog::level::level_enum level)\n    ABSL_LOCKS_EXCLUDED(fancy_log_lock_) {\n  absl::ReaderMutexLock l(&fancy_log_lock_);\n  for (const auto& it : *fancy_log_map_) {\n    it.second->set_level(level);\n  }\n}\n\nFancyLogLevelMap FancyContext::getAllFancyLogLevelsForTest() ABSL_LOCKS_EXCLUDED(fancy_log_lock_) {\n  FancyLogLevelMap log_levels;\n  absl::ReaderMutexLock l(&fancy_log_lock_);\n  for (const auto& it : *fancy_log_map_) {\n    log_levels[it.first] = it.second->level();\n  }\n  return log_levels;\n}\n\nvoid FancyContext::initSink() {\n  spdlog::sink_ptr sink = Logger::Registry::getSink();\n  Logger::DelegatingLogSinkSharedPtr sp = std::static_pointer_cast<Logger::DelegatingLogSink>(sink);\n  if (!sp->hasLock()) {\n    static FancyBasicLockable tlock;\n    sp->setLock(tlock);\n    sp->setShouldEscape(false);\n  }\n}\n\nspdlog::logger* FancyContext::createLogger(std::string key, int level)\n    ABSL_EXCLUSIVE_LOCKS_REQUIRED(fancy_log_lock_) {\n  SpdLoggerSharedPtr new_logger =\n      std::make_shared<spdlog::logger>(key, Logger::Registry::getSink());\n  if (!Logger::Registry::getSink()->hasLock()) { // occurs in benchmark test\n    initSink();\n  }\n  level_enum lv = Logger::Context::getFancyDefaultLevel();\n  if (level > -1) {\n    lv = static_cast<level_enum>(level);\n  }\n  new_logger->set_level(lv);\n  new_logger->set_pattern(Logger::Context::getFancyLogFormat());\n  new_logger->flush_on(level_enum::critical);\n  fancy_log_map_->insert(std::make_pair(key, new_logger));\n  return new_logger.get();\n}\n\nFancyContext& getFancyContext() { MUTABLE_CONSTRUCT_ON_FIRST_USE(FancyContext); }\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/fancy_logger.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/common/macros.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\n\nusing SpdLoggerSharedPtr = std::shared_ptr<spdlog::logger>;\nusing FancyMap = absl::flat_hash_map<std::string, SpdLoggerSharedPtr>;\nusing FancyMapPtr = std::shared_ptr<FancyMap>;\nusing FancyLogLevelMap = absl::flat_hash_map<std::string, spdlog::level::level_enum>;\n\n/**\n * Stores the lock and functions used by Fancy Logger's macro so that we don't need to declare\n * them globally. Functions are provided to initialize a logger, set log level, flush a logger.\n */\nclass FancyContext {\npublic:\n  /**\n   * Gets a logger from map given a key (e.g. file name).\n   */\n  SpdLoggerSharedPtr getFancyLogEntry(std::string key) ABSL_LOCKS_EXCLUDED(fancy_log_lock_);\n\n  /**\n   * Initializes Fancy Logger and register it in global map if not done.\n   */\n  void initFancyLogger(std::string key, std::atomic<spdlog::logger*>& logger)\n      ABSL_LOCKS_EXCLUDED(fancy_log_lock_);\n\n  /**\n   * Sets log level. If not found, return false.\n   */\n  bool setFancyLogger(std::string key, spdlog::level::level_enum log_level)\n      ABSL_LOCKS_EXCLUDED(fancy_log_lock_);\n\n  /**\n   * Sets the default logger level and format when updating context. It should only be used in\n   * Context, otherwise the fancy_default_level will possibly be inconsistent with the actual\n   * logger level.\n   */\n  void setDefaultFancyLevelFormat(spdlog::level::level_enum level, std::string format)\n      ABSL_LOCKS_EXCLUDED(fancy_log_lock_);\n\n  /**\n   * Lists keys and levels of all loggers in a string for admin page usage.\n   */\n  std::string listFancyLoggers() ABSL_LOCKS_EXCLUDED(fancy_log_lock_);\n\n  /**\n   * Sets the levels of all loggers.\n   */\n  void setAllFancyLoggers(spdlog::level::level_enum level) ABSL_LOCKS_EXCLUDED(fancy_log_lock_);\n\n  /**\n   * Obtain a map from logger key to log level. Useful for testing, e.g. in macros such as\n   * EXPECT_LOG_CONTAINS_ALL_OF_HELPER.\n   */\n  FancyLogLevelMap getAllFancyLogLevelsForTest() ABSL_LOCKS_EXCLUDED(fancy_log_lock_);\n\nprivate:\n  /**\n   * Initializes sink for the initialization of loggers, needed only in benchmark test.\n   */\n  void initSink();\n\n  /**\n   * Creates a logger given key and log level, and add it to map.\n   * Key is the log component name, e.g. file name now.\n   */\n  spdlog::logger* createLogger(std::string key, int level = -1)\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(fancy_log_lock_);\n\n  /**\n   * Lock for the following map (not for the corresponding loggers).\n   */\n  absl::Mutex fancy_log_lock_;\n\n  /**\n   * Map that stores <key, logger> pairs, key can be the file name.\n   */\n  FancyMapPtr fancy_log_map_ ABSL_GUARDED_BY(fancy_log_lock_) = std::make_shared<FancyMap>();\n};\n\nFancyContext& getFancyContext();\n\n#define FANCY_KEY std::string(__FILE__)\n\n/**\n * Macro for fancy logger.\n * Uses a global map to store logger and take use of thread-safe spdlog::logger.\n * The local pointer is used to avoid another load() when logging. Here we use\n * spdlog::logger* as atomic<shared_ptr> is a C++20 feature.\n */\n#define FANCY_LOG(LEVEL, ...)                                                                      \\\n  do {                                                                                             \\\n    static std::atomic<spdlog::logger*> flogger{0};                                                \\\n    spdlog::logger* local_flogger = flogger.load(std::memory_order_relaxed);                       \\\n    if (!local_flogger) {                                                                          \\\n      ::Envoy::getFancyContext().initFancyLogger(FANCY_KEY, flogger);                              \\\n      local_flogger = flogger.load(std::memory_order_relaxed);                                     \\\n    }                                                                                              \\\n    if (ENVOY_LOG_COMP_LEVEL(*local_flogger, LEVEL)) {                                             \\\n      local_flogger->log(spdlog::source_loc{__FILE__, __LINE__, __func__},                         \\\n                         ENVOY_SPDLOG_LEVEL(LEVEL), __VA_ARGS__);                                  \\\n    }                                                                                              \\\n  } while (0)\n\n/**\n * Convenient macro for connection log.\n */\n#define FANCY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ...)                                             \\\n  FANCY_LOG(LEVEL, \"[C{}] \" FORMAT, (CONNECTION).id(), ##__VA_ARGS__)\n\n/**\n * Convenient macro for stream log.\n */\n#define FANCY_STREAM_LOG(LEVEL, FORMAT, STREAM, ...)                                               \\\n  FANCY_LOG(LEVEL, \"[C{}][S{}] \" FORMAT, (STREAM).connection() ? (STREAM).connection()->id() : 0,  \\\n            (STREAM).streamId(), ##__VA_ARGS__)\n\n/**\n * Convenient macro for log flush.\n */\n#define FANCY_FLUSH_LOG()                                                                          \\\n  do {                                                                                             \\\n    SpdLoggerSharedPtr p = ::Envoy::getFancyContext().getFancyLogEntry(FANCY_KEY);                 \\\n    if (p) {                                                                                       \\\n      p->flush();                                                                                  \\\n    }                                                                                              \\\n  } while (0)\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/fmt.h",
    "content": "#pragma once\n\n#include \"envoy/common/platform.h\" // Avert format.h including windows.h\n\n#include \"absl/strings/string_view.h\"\n#include \"fmt/format.h\"\n#include \"fmt/ostream.h\"\n\n// NOLINT(namespace-envoy)\n\nnamespace fmt {\n\n// Provide an implementation of formatter for fmt::format that allows absl::string_view to be\n// formatted with the same format specifiers available to std::string.\n// TODO(zuercher): Once absl::string_view is replaced with the std type, this can be removed\n// as fmtlib handles string_view natively.\n// NOLINTNEXTLINE(readability-identifier-naming)\ntemplate <> struct formatter<absl::string_view> : formatter<string_view> {\n  auto format(absl::string_view absl_string_view, fmt::format_context& ctx) -> decltype(ctx.out()) {\n    string_view fmt_string_view(absl_string_view.data(), absl_string_view.size());\n    return formatter<string_view>::format(fmt_string_view, ctx);\n  }\n};\n\n} // namespace fmt\n"
  },
  {
    "path": "source/common/common/hash.cc",
    "content": "#include \"common/common/hash.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\n\n// Computes a 64-bit murmur hash 2, only works with 64-bit platforms. Revisit if support for 32-bit\n// platforms are needed.\n// from\n// (https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=libstdc%2b%2b-v3/libsupc%2b%2b/hash_bytes.cc)\nuint64_t MurmurHash::murmurHash2(absl::string_view key, uint64_t seed) {\n  static const uint64_t mul = 0xc6a4a7935bd1e995UL;\n  const char* const buf = static_cast<const char*>(key.data());\n  uint64_t len = key.size();\n\n  // Remove the bytes not divisible by the sizeof(uint64_t). This\n  // allows the main loop to process the data as 64-bit integers.\n  const int len_aligned = len & ~0x7;\n  const char* const end = buf + len_aligned;\n  uint64_t hash = seed ^ (len * mul);\n  for (const char* p = buf; p != end; p += 8) {\n    const uint64_t data = shiftMix(unalignedLoad(p) * mul) * mul;\n    hash ^= data;\n    hash *= mul;\n  }\n\n  if ((len & 0x7) != 0) {\n    const uint64_t data = loadBytes(end, len & 0x7);\n    hash ^= data;\n    hash *= mul;\n  }\n  hash = shiftMix(hash) * mul;\n  hash = shiftMix(hash);\n  return hash;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/hash.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xxhash.h\"\n\nnamespace Envoy {\n\nclass HashUtil {\npublic:\n  /**\n   * Return 64-bit hash from the xxHash algorithm.\n   * @param input supplies the string view to hash.\n   * @param seed supplies the hash seed which defaults to 0.\n   * See https://github.com/Cyan4973/xxHash for details.\n   */\n  static uint64_t xxHash64(absl::string_view input, uint64_t seed = 0) {\n    return XXH64(input.data(), input.size(), seed);\n  }\n\n  /**\n   * TODO(gsagula): extend xxHash to handle case-insensitive.\n   *\n   * Return 64-bit hash representation of string ignoring case.\n   * See djb2 (http://www.cse.yorku.ca/~oz/hash.html) for more details.\n   * @param input supplies the string view.\n   * @return 64-bit hash representation of the supplied string view.\n   */\n  static uint64_t djb2CaseInsensitiveHash(absl::string_view input) {\n    uint64_t hash = 5381;\n    for (unsigned char c : input) {\n      hash += ((hash << 5) + hash) + absl::ascii_tolower(c);\n    };\n    return hash;\n  }\n};\n\n/**\n * From\n * (https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=libstdc%2b%2b-v3/libsupc%2b%2b/hash_bytes.cc).\n * Which is based on (https://sites.google.com/site/murmurhash/).\n */\nclass MurmurHash {\npublic:\n  static const uint64_t STD_HASH_SEED = 0xc70f6907UL;\n  /**\n   * Return 64-bit hash from murmur hash2 as is implemented in std::hash<string>.\n   * @param key supplies the string view\n   * @param seed the seed to use for the hash\n   * @return 64-bit hash representation of the supplied string view\n   */\n  static uint64_t murmurHash2(absl::string_view key, uint64_t seed = STD_HASH_SEED);\n\nprivate:\n  static inline uint64_t unalignedLoad(const char* p) {\n    uint64_t result;\n    memcpy(&result, p, sizeof(result));\n    return result;\n  }\n\n  // Loads n bytes, where 1 <= n < 8.\n  static inline uint64_t loadBytes(const char* p, int n) {\n    uint64_t result = 0;\n    --n;\n    do {\n      result = (result << 8) + static_cast<unsigned char>(p[n]);\n    } while (--n >= 0);\n    return result;\n  }\n\n  static inline uint64_t shiftMix(uint64_t v) { return v ^ (v >> 47); }\n};\n\nusing SharedString = std::shared_ptr<std::string>;\n\nstruct HeterogeneousStringHash {\n  // Specifying is_transparent indicates to the library infrastructure that\n  // type-conversions should not be applied when calling find(), but instead\n  // pass the actual types of the contained and searched-for objects directly to\n  // these functors. See\n  // https://en.cppreference.com/w/cpp/utility/functional/less_void for an\n  // official reference, and https://abseil.io/tips/144 for a description of\n  // using it in the context of absl.\n  using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n  size_t operator()(absl::string_view a) const { return HashUtil::xxHash64(a); }\n  size_t operator()(const SharedString& a) const { return HashUtil::xxHash64(*a); }\n};\n\nstruct HeterogeneousStringEqual {\n  // See description for HeterogeneousStringHash::is_transparent.\n  using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n  size_t operator()(absl::string_view a, absl::string_view b) const { return a == b; }\n  size_t operator()(const SharedString& a, const SharedString& b) const { return *a == *b; }\n  size_t operator()(absl::string_view a, const SharedString& b) const { return a == *b; }\n  size_t operator()(const SharedString& a, absl::string_view b) const { return *a == b; }\n};\n\n// We use heterogeneous hash/equal functors to do a find() without constructing\n// a shared_string, which would entail making a full copy of the stat name.\nusing SharedStringSet =\n    absl::flat_hash_set<SharedString, HeterogeneousStringHash, HeterogeneousStringEqual>;\n\n// A special heterogeneous comparator is not needed for maps of strings; absl\n// hashes allow for looking up a string-container with a string-view by default.\ntemplate <class Value> using StringMap = absl::flat_hash_map<std::string, Value>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/hex.cc",
    "content": "#include \"common/common/hex.h\"\n\n#include <array>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nstd::string Hex::encode(const uint8_t* data, size_t length) {\n  static const char* const digits = \"0123456789abcdef\";\n\n  std::string ret;\n  ret.reserve(length * 2);\n\n  for (size_t i = 0; i < length; i++) {\n    uint8_t d = data[i];\n    ret.push_back(digits[d >> 4]);\n    ret.push_back(digits[d & 0xf]);\n  }\n\n  return ret;\n}\n\nstd::vector<uint8_t> Hex::decode(const std::string& hex_string) {\n  if (hex_string.empty() || hex_string.size() % 2 != 0) {\n    return {};\n  }\n\n  std::vector<uint8_t> segment;\n  for (size_t i = 0; i < hex_string.size(); i += 2) {\n    std::string hex_byte = hex_string.substr(i, 2);\n    uint64_t out;\n    if (!StringUtil::atoull(hex_byte.c_str(), out, 16)) {\n      return {};\n    }\n\n    segment.push_back(out);\n  }\n\n  return segment;\n}\n\nstd::string Hex::uint64ToHex(uint64_t value) {\n  std::array<uint8_t, 8> data;\n\n  // This is explicitly done for performance reasons\n  data[7] = (value & 0x00000000000000FF);\n  data[6] = (value & 0x000000000000FF00) >> 8;\n  data[5] = (value & 0x0000000000FF0000) >> 16;\n  data[4] = (value & 0x00000000FF000000) >> 24;\n  data[3] = (value & 0x000000FF00000000) >> 32;\n  data[2] = (value & 0x0000FF0000000000) >> 40;\n  data[1] = (value & 0x00FF000000000000) >> 48;\n  data[0] = (value & 0xFF00000000000000) >> 56;\n\n  return encode(data.data(), data.size());\n}\n\nstd::string Hex::uint32ToHex(uint32_t value) {\n  std::array<uint8_t, 4> data;\n\n  // This is explicitly done for performance reasons\n  // using std::stringstream with std::hex is ~3 orders of magnitude slower\n  data[3] = (value & 0x000000FF);\n  data[2] = (value & 0x0000FF00) >> 8;\n  data[1] = (value & 0x00FF0000) >> 16;\n  data[0] = (value & 0xFF000000) >> 24;\n\n  return encode(data.data(), data.size());\n}\n\nstd::string Hex::uint16ToHex(uint16_t value) {\n  std::array<uint8_t, 2> data;\n\n  // This is explicitly done for performance reasons\n  // using std::stringstream with std::hex is ~3 orders of magnitude slower.\n  data[1] = (value & 0x00FF);\n  data[0] = (value & 0xFF00) >> 8;\n\n  return encode(data.data(), data.size());\n}\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/hex.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\nnamespace Envoy {\n/**\n * Hex encoder/decoder. Produces lowercase hex digits. Can consume either lowercase or uppercase\n * digits.\n */\nclass Hex final {\npublic:\n  /**\n   * Generates a hex dump of the given data\n   * @param data the binary data to convert\n   * @return the hex encoded string representing data\n   */\n  static std::string encode(const std::vector<uint8_t>& data) {\n    return encode(data.data(), data.size());\n  }\n\n  /**\n   * Generates a hex dump of the given data\n   * @param data the binary data to convert\n   * @param length the length of the data\n   * @return the hex encoded string representing data\n   */\n  static std::string encode(const uint8_t* data, size_t length);\n\n  /**\n   * Converts a hex dump to binary data\n   * @param input the hex dump to decode\n   * @return binary data or empty vector in case of invalid input\n   */\n  static std::vector<uint8_t> decode(const std::string& input);\n\n  /**\n   * Converts the given 64-bit unsigned integer into a hexadecimal string.\n   * The result is always a string of 16 characters left padded with zeroes.\n   * @param value The unsigned integer to be converted.\n   * @return value as hexadecimal string\n   */\n  static std::string uint64ToHex(uint64_t value);\n\n  /**\n   * Converts the given 32-bit unsigned integer into a hexadecimal string.\n   * The result is always a string of 8 characters left padded with zeroes.\n   * @param value The unsigned integer to be converted.\n   * @return value as hexadecimal string\n   */\n  static std::string uint32ToHex(uint32_t value);\n\n  /**\n   * Converts the given 16-bit unsigned integer into a hexadecimal string.\n   * The result is always a string of 4 characters left padded with zeroes.\n   * @param value The unsigned integer to be converted.\n   * @return value as hexadecimal string\n   */\n  static std::string uint16ToHex(uint16_t value);\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/linked_object.h",
    "content": "#pragma once\n\n#include <list>\n#include <memory>\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\n\n/**\n * Helper methods for placing LinkedObject into a list.\n */\nnamespace LinkedList {\n\n/**\n * Move an item into a linked list at the front.\n * @param item supplies the item to move in.\n * @param list supplies the list to move the item into.\n */\ntemplate <typename T, typename U>\nvoid moveIntoList(std::unique_ptr<T>&& item, std::list<std::unique_ptr<U>>& list) {\n  ASSERT(!item->inserted_);\n  item->inserted_ = true;\n  auto position = list.emplace(list.begin(), std::move(item));\n  (*position)->entry_ = position;\n}\n\n/**\n * Move an item into a linked list at the back.\n * @param item supplies the item to move in.\n * @param list supplies the list to move the item into.\n */\ntemplate <typename T, typename U>\nvoid moveIntoListBack(std::unique_ptr<T>&& item, std::list<std::unique_ptr<U>>& list) {\n  ASSERT(!item->inserted_);\n  item->inserted_ = true;\n  auto position = list.emplace(list.end(), std::move(item));\n  (*position)->entry_ = position;\n}\n\n} // namespace LinkedList\n\n/**\n * Mixin class that allows an object contained in a unique pointer to be easily linked and unlinked\n * from lists.\n */\ntemplate <class T> class LinkedObject {\npublic:\n  using ListType = std::list<std::unique_ptr<T>>;\n\n  /**\n   * @return the list iterator for the object.\n   */\n  typename ListType::iterator entry() {\n    ASSERT(inserted_);\n    return entry_;\n  }\n\n  /**\n   * @return whether the object is currently inserted into a list.\n   */\n  bool inserted() { return inserted_; }\n\n  /**\n   * Move a linked item from src list to dst list.\n   * @param src supplies the list that the item is currently in.\n   * @param dst supplies the destination list for the item.\n   */\n  void moveBetweenLists(ListType& src, ListType& dst) {\n    ASSERT(inserted_);\n    ASSERT(std::find(src.begin(), src.end(), *entry_) != src.end());\n\n    dst.splice(dst.begin(), src, entry_);\n  }\n\n  /**\n   * Remove this item from a list.\n   * @param list supplies the list to remove from. This item should be in this list.\n   */\n  std::unique_ptr<T> removeFromList(ListType& list) {\n    ASSERT(inserted_);\n    ASSERT(std::find(list.begin(), list.end(), *entry_) != list.end());\n\n    std::unique_ptr<T> removed = std::move(*entry_);\n    list.erase(entry_);\n    inserted_ = false;\n    return removed;\n  }\n\nprotected:\n  LinkedObject() = default;\n\nprivate:\n  template <typename U, typename V>\n  friend void LinkedList::moveIntoList(std::unique_ptr<U>&&, std::list<std::unique_ptr<V>>&);\n  template <typename U, typename V>\n  friend void LinkedList::moveIntoListBack(std::unique_ptr<U>&&, std::list<std::unique_ptr<V>>&);\n\n  typename ListType::iterator entry_;\n  bool inserted_{false}; // iterators do not have any \"invalid\" value so we need this boolean for\n                         // sanity checking.\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/lock_guard.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\n/**\n * A lock guard that deals with an optional lock.\n */\nclass ABSL_SCOPED_LOCKABLE OptionalLockGuard {\npublic:\n  /**\n   * Establishes a scoped mutex-lock. If non-null, the mutex is locked upon construction.\n   *\n   * @param lock the mutex.\n   */\n  OptionalLockGuard(BasicLockable* lock) ABSL_EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {\n    if (lock_ != nullptr) {\n      lock_->lock();\n    }\n  }\n\n  /**\n   * Destruction of the OptionalLockGuard unlocks the lock, if it is non-null.\n   */\n  ~OptionalLockGuard() ABSL_UNLOCK_FUNCTION() {\n    if (lock_ != nullptr) {\n      lock_->unlock();\n    }\n  }\n\nprivate:\n  BasicLockable* const lock_;\n};\n\n// At the moment, TryLockGuard is very hard to annotate correctly, I\n// believe due to limitations in clang. At the moment there are no\n// ABSL_GUARDED_BY variables for any tryLocks in the codebase, so it's\n// easiest just to leave it out. In a future clang release it's\n// possible we can enable this. See also the commented-out block\n// in ThreadTest.TestTryLockGuard in test/common/common/thread_test.cc.\n#define DISABLE_TRYLOCKGUARD_ANNOTATION(annotation)\n\n/**\n * Like LockGuard, but uses a tryLock() on construction rather than a lock(). This\n * class lacks thread annotations, as clang currently does appear to be able to handle\n * conditional thread annotations. So the ones we'd like are commented out.\n */\nclass ABSL_SCOPED_LOCKABLE TryLockGuard {\npublic:\n  /**\n   * Establishes a scoped mutex-lock; the a mutex lock is attempted via tryLock, so\n   * an expected outcome is that the lock may fail. isLocked() must be called to\n   * determine whether he lock was actually acquired.\n   *\n   * @param lock the mutex.\n   */\n  TryLockGuard(BasicLockable& lock) : lock_(lock) {}\n\n  /**\n   * Destruction of the TryLockGuard unlocks the lock, if it was locked.\n   */\n  ~TryLockGuard() DISABLE_TRYLOCKGUARD_ANNOTATION(ABSL_UNLOCK_FUNCTION()) {\n    if (is_locked_) {\n      lock_.unlock();\n    }\n  }\n\n  /**\n   * @return bool whether the lock was successfully acquired.\n   */\n  bool tryLock() DISABLE_TRYLOCKGUARD_ANNOTATION(ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true)) {\n    is_locked_ = lock_.tryLock();\n    return is_locked_;\n  }\n\nprivate:\n  BasicLockable& lock_;\n  bool is_locked_{false};\n};\n\n/**\n * Implements a LockGuard that is identical to absl::ReleasableMutexLock, but takes a\n * BasicLockable& to allow usages to be agnostic to cross-process mutexes vs. single-process\n * mutexes.\n *\n * Note: this implementation holds the mutex for the lifetime of the LockGuard, simplifying\n * implementation (no conditionals) and readability at call-sites. In some cases, an early\n * release is needed, in which case, a ReleasableLockGuard can be used.\n */\nclass ABSL_SCOPED_LOCKABLE LockGuard {\npublic:\n  /**\n   * Establishes a scoped mutex-lock; the mutex is locked upon construction.\n   *\n   * @param lock the mutex.\n   */\n  explicit LockGuard(BasicLockable& lock) ABSL_EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {\n    lock_.lock();\n  }\n\n  /**\n   * Destruction of the LockGuard unlocks the lock.\n   */\n  ~LockGuard() ABSL_UNLOCK_FUNCTION() { lock_.unlock(); }\n\nprivate:\n  BasicLockable& lock_;\n};\n\n/**\n * Implements a LockGuard that is identical to absl::ReleasableMutexLock, but takes a\n * BasicLockable& to allow usages to be agnostic to cross-process mutexes vs. single-process\n * mutexes.\n */\nclass ABSL_SCOPED_LOCKABLE ReleasableLockGuard {\npublic:\n  /**\n   * Establishes a scoped mutex-lock; the mutex is locked upon construction.\n   *\n   * @param lock the mutex.\n   */\n  explicit ReleasableLockGuard(BasicLockable& lock) ABSL_EXCLUSIVE_LOCK_FUNCTION(lock)\n      : lock_(&lock) {\n    lock_->lock();\n  }\n\n  /**\n   * Destruction of the LockGuard unlocks the lock, if it has not already been explicitly released.\n   */\n  ~ReleasableLockGuard() ABSL_UNLOCK_FUNCTION() { release(); }\n\n  /**\n   * Unlocks the mutex. This enables call-sites to release the mutex prior to the Lock going out of\n   * scope. This is called release() for consistency with absl::ReleasableMutexLock.\n   */\n  void release() ABSL_UNLOCK_FUNCTION() {\n    if (lock_ != nullptr) {\n      lock_->unlock();\n      lock_ = nullptr;\n    }\n  }\n\nprivate:\n  BasicLockable* lock_; // Set to nullptr on unlock, to prevent double-unlocking.\n};\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/logger.cc",
    "content": "#include \"common/common/logger.h\"\n\n#include <cassert> // use direct system-assert to avoid cyclic dependency.\n#include <cstdint>\n#include <iostream>\n#include <string>\n#include <vector>\n\n#include \"common/common/lock_guard.h\"\n\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"absl/strings/strip.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\nStandardLogger::StandardLogger(const std::string& name)\n    : Logger(std::make_shared<spdlog::logger>(name, Registry::getSink())) {}\n\nSinkDelegate::SinkDelegate(DelegatingLogSinkSharedPtr log_sink) : log_sink_(log_sink) {}\n\nSinkDelegate::~SinkDelegate() {\n  // The previous delegate should have never been set or should have been reset by now via\n  // restoreDelegate();\n  assert(previous_delegate_ == nullptr);\n}\n\nvoid SinkDelegate::setDelegate() {\n  // There should be no previous delegate before this call.\n  assert(previous_delegate_ == nullptr);\n  previous_delegate_ = log_sink_->delegate();\n  log_sink_->setDelegate(this);\n}\n\nvoid SinkDelegate::restoreDelegate() {\n  // Ensures stacked allocation of delegates.\n  assert(log_sink_->delegate() == this);\n  log_sink_->setDelegate(previous_delegate_);\n  previous_delegate_ = nullptr;\n}\n\nStderrSinkDelegate::StderrSinkDelegate(DelegatingLogSinkSharedPtr log_sink)\n    : SinkDelegate(log_sink) {\n  setDelegate();\n}\n\nStderrSinkDelegate::~StderrSinkDelegate() { restoreDelegate(); }\n\nvoid StderrSinkDelegate::log(absl::string_view msg) {\n  Thread::OptionalLockGuard guard(lock_);\n  std::cerr << msg;\n}\n\nvoid StderrSinkDelegate::flush() {\n  Thread::OptionalLockGuard guard(lock_);\n  std::cerr << std::flush;\n}\n\nvoid DelegatingLogSink::set_formatter(std::unique_ptr<spdlog::formatter> formatter) {\n  absl::MutexLock lock(&format_mutex_);\n  formatter_ = std::move(formatter);\n}\n\nvoid DelegatingLogSink::log(const spdlog::details::log_msg& msg) {\n  absl::ReleasableMutexLock lock(&format_mutex_);\n  absl::string_view msg_view = absl::string_view(msg.payload.data(), msg.payload.size());\n\n  // This memory buffer must exist in the scope of the entire function,\n  // otherwise the string_view will refer to memory that is already free.\n  spdlog::memory_buf_t formatted;\n  if (formatter_) {\n    formatter_->format(msg, formatted);\n    msg_view = absl::string_view(formatted.data(), formatted.size());\n  }\n  lock.Release();\n\n  // Hold the sink mutex while performing the actual logging. This prevents the sink from being\n  // swapped during an individual log event.\n  // TODO(mattklein123): In production this lock will never be contended. In practice, thread\n  // protection is really only needed in tests. It would be nice to figure out a test-only\n  // mechanism for this that does not require extra locking that we don't explicitly need in the\n  // prod code.\n  absl::ReaderMutexLock sink_lock(&sink_mutex_);\n  if (should_escape_) {\n    sink_->log(escapeLogLine(msg_view));\n  } else {\n    sink_->log(msg_view);\n  }\n}\n\nstd::string DelegatingLogSink::escapeLogLine(absl::string_view msg_view) {\n  // Split the actual message from the trailing whitespace.\n  auto eol_it = std::find_if_not(msg_view.rbegin(), msg_view.rend(), absl::ascii_isspace);\n  absl::string_view msg_leading = msg_view.substr(0, msg_view.rend() - eol_it);\n  absl::string_view msg_trailing_whitespace =\n      msg_view.substr(msg_view.rend() - eol_it, eol_it - msg_view.rbegin());\n\n  // Escape the message, but keep the whitespace unescaped.\n  return absl::StrCat(absl::CEscape(msg_leading), msg_trailing_whitespace);\n}\n\nDelegatingLogSinkSharedPtr DelegatingLogSink::init() {\n  DelegatingLogSinkSharedPtr delegating_sink(new DelegatingLogSink);\n  delegating_sink->stderr_sink_ = std::make_unique<StderrSinkDelegate>(delegating_sink);\n  return delegating_sink;\n}\n\nstatic Context* current_context = nullptr;\n\nContext::Context(spdlog::level::level_enum log_level, const std::string& log_format,\n                 Thread::BasicLockable& lock, bool should_escape, bool enable_fine_grain_logging)\n    : log_level_(log_level), log_format_(log_format), lock_(lock), should_escape_(should_escape),\n      enable_fine_grain_logging_(enable_fine_grain_logging), save_context_(current_context) {\n  current_context = this;\n  activate();\n}\n\nContext::~Context() {\n  current_context = save_context_;\n  if (current_context != nullptr) {\n    current_context->activate();\n  } else {\n    Registry::getSink()->clearLock();\n  }\n}\n\nvoid Context::activate() {\n  Registry::getSink()->setLock(lock_);\n  Registry::getSink()->setShouldEscape(should_escape_);\n  Registry::setLogLevel(log_level_);\n  Registry::setLogFormat(log_format_);\n\n  // sets level and format for Fancy Logger\n  fancy_default_level_ = log_level_;\n  fancy_log_format_ = log_format_;\n  if (enable_fine_grain_logging_) {\n    // loggers with default level before are set to log_level_ as new default\n    getFancyContext().setDefaultFancyLevelFormat(log_level_, log_format_);\n    if (log_format_ == Logger::Logger::DEFAULT_LOG_FORMAT) {\n      fancy_log_format_ = absl::StrReplaceAll(log_format_, {{\"[%n]\", \"\"}});\n    }\n  }\n}\n\nbool Context::useFancyLogger() {\n  if (current_context) {\n    return current_context->enable_fine_grain_logging_;\n  }\n  return false;\n}\n\nvoid Context::enableFancyLogger() {\n  current_context->enable_fine_grain_logging_ = true;\n  if (current_context) {\n    getFancyContext().setDefaultFancyLevelFormat(current_context->log_level_,\n                                                 current_context->log_format_);\n    current_context->fancy_default_level_ = current_context->log_level_;\n    current_context->fancy_log_format_ = current_context->log_format_;\n    if (current_context->log_format_ == Logger::Logger::DEFAULT_LOG_FORMAT) {\n      current_context->fancy_log_format_ =\n          absl::StrReplaceAll(current_context->log_format_, {{\"[%n]\", \"\"}});\n    }\n  }\n}\n\nvoid Context::disableFancyLogger() {\n  if (current_context) {\n    current_context->enable_fine_grain_logging_ = false;\n  }\n}\n\nstd::string Context::getFancyLogFormat() {\n  if (!current_context) { // Context is not instantiated in benchmark test\n    return \"[%Y-%m-%d %T.%e][%t][%l] %v\";\n  }\n  return current_context->fancy_log_format_;\n}\n\nspdlog::level::level_enum Context::getFancyDefaultLevel() {\n  if (!current_context) {\n    return spdlog::level::info;\n  }\n  return current_context->fancy_default_level_;\n}\n\nstd::vector<Logger>& Registry::allLoggers() {\n  static std::vector<Logger>* all_loggers =\n      new std::vector<Logger>({ALL_LOGGER_IDS(GENERATE_LOGGER)});\n  return *all_loggers;\n}\n\nspdlog::logger& Registry::getLog(Id id) { return *allLoggers()[static_cast<int>(id)].logger_; }\n\nvoid Registry::setLogLevel(spdlog::level::level_enum log_level) {\n  for (Logger& logger : allLoggers()) {\n    logger.logger_->set_level(static_cast<spdlog::level::level_enum>(log_level));\n  }\n}\n\nvoid Registry::setLogFormat(const std::string& log_format) {\n  for (Logger& logger : allLoggers()) {\n    logger.logger_->set_pattern(log_format);\n  }\n}\n\nLogger* Registry::logger(const std::string& log_name) {\n  Logger* logger_to_return = nullptr;\n  for (Logger& logger : loggers()) {\n    if (logger.name() == log_name) {\n      logger_to_return = &logger;\n      break;\n    }\n  }\n  return logger_to_return;\n}\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/logger.h",
    "content": "#pragma once\n\n#include <bitset>\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/thread/thread.h\"\n\n#include \"common/common/base_logger.h\"\n#include \"common/common/fancy_logger.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/logger_impl.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/non_copyable.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"fmt/ostream.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\n// TODO: find out a way for extensions to register new logger IDs\n#define ALL_LOGGER_IDS(FUNCTION)                                                                   \\\n  FUNCTION(admin)                                                                                  \\\n  FUNCTION(aws)                                                                                    \\\n  FUNCTION(assert)                                                                                 \\\n  FUNCTION(backtrace)                                                                              \\\n  FUNCTION(cache_filter)                                                                           \\\n  FUNCTION(client)                                                                                 \\\n  FUNCTION(config)                                                                                 \\\n  FUNCTION(connection)                                                                             \\\n  FUNCTION(conn_handler)                                                                           \\\n  FUNCTION(decompression)                                                                          \\\n  FUNCTION(dubbo)                                                                                  \\\n  FUNCTION(envoy_bug)                                                                              \\\n  FUNCTION(ext_authz)                                                                              \\\n  FUNCTION(rocketmq)                                                                               \\\n  FUNCTION(file)                                                                                   \\\n  FUNCTION(filter)                                                                                 \\\n  FUNCTION(forward_proxy)                                                                          \\\n  FUNCTION(grpc)                                                                                   \\\n  FUNCTION(hc)                                                                                     \\\n  FUNCTION(health_checker)                                                                         \\\n  FUNCTION(http)                                                                                   \\\n  FUNCTION(http2)                                                                                  \\\n  FUNCTION(hystrix)                                                                                \\\n  FUNCTION(init)                                                                                   \\\n  FUNCTION(io)                                                                                     \\\n  FUNCTION(jwt)                                                                                    \\\n  FUNCTION(kafka)                                                                                  \\\n  FUNCTION(lua)                                                                                    \\\n  FUNCTION(main)                                                                                   \\\n  FUNCTION(misc)                                                                                   \\\n  FUNCTION(mongo)                                                                                  \\\n  FUNCTION(quic)                                                                                   \\\n  FUNCTION(quic_stream)                                                                            \\\n  FUNCTION(pool)                                                                                   \\\n  FUNCTION(rbac)                                                                                   \\\n  FUNCTION(redis)                                                                                  \\\n  FUNCTION(router)                                                                                 \\\n  FUNCTION(runtime)                                                                                \\\n  FUNCTION(stats)                                                                                  \\\n  FUNCTION(secret)                                                                                 \\\n  FUNCTION(tap)                                                                                    \\\n  FUNCTION(testing)                                                                                \\\n  FUNCTION(thrift)                                                                                 \\\n  FUNCTION(tracing)                                                                                \\\n  FUNCTION(upstream)                                                                               \\\n  FUNCTION(udp)                                                                                    \\\n  FUNCTION(wasm)\n\n// clang-format off\nenum class Id {\n  ALL_LOGGER_IDS(GENERATE_ENUM)\n};\n// clang-format on\n\n/**\n * Logger that uses the DelegatingLogSink.\n */\nclass StandardLogger : public Logger {\nprivate:\n  StandardLogger(const std::string& name);\n\n  friend class Registry;\n};\n\nclass DelegatingLogSink;\nusing DelegatingLogSinkSharedPtr = std::shared_ptr<DelegatingLogSink>;\n\n/**\n * Captures a logging sink that can be delegated to for a bounded amount of time.\n * On destruction, logging is reverted to its previous state. SinkDelegates must\n * be allocated/freed as a stack.\n */\nclass SinkDelegate : NonCopyable {\npublic:\n  explicit SinkDelegate(DelegatingLogSinkSharedPtr log_sink);\n  virtual ~SinkDelegate();\n\n  virtual void log(absl::string_view msg) PURE;\n  virtual void flush() PURE;\n\nprotected:\n  // Swap the current log sink delegate for this one. This should be called by the derived class\n  // constructor immediately before returning. This is required to match restoreDelegate(),\n  // otherwise it's possible for the previous delegate to get set in the base class constructor,\n  // the derived class constructor throws, and cleanup becomes broken.\n  void setDelegate();\n\n  // Swap the current log sink (this) for the previous one. This should be called by the derived\n  // class destructor in the body. This is critical as otherwise it's possible for a log message\n  // to get routed to a partially destructed sink.\n  void restoreDelegate();\n\n  SinkDelegate* previousDelegate() { return previous_delegate_; }\n\nprivate:\n  SinkDelegate* previous_delegate_{nullptr};\n  DelegatingLogSinkSharedPtr log_sink_;\n};\n\n/**\n * SinkDelegate that writes log messages to stderr.\n */\nclass StderrSinkDelegate : public SinkDelegate {\npublic:\n  explicit StderrSinkDelegate(DelegatingLogSinkSharedPtr log_sink);\n  ~StderrSinkDelegate() override;\n\n  // SinkDelegate\n  void log(absl::string_view msg) override;\n  void flush() override;\n\n  bool hasLock() const { return lock_ != nullptr; }\n  void setLock(Thread::BasicLockable& lock) { lock_ = &lock; }\n  void clearLock() { lock_ = nullptr; }\n\nprivate:\n  Thread::BasicLockable* lock_{};\n};\n\n/**\n * Stacks logging sinks, so you can temporarily override the logging mechanism, restoring\n * the previous state when the DelegatingSink is destructed.\n */\nclass DelegatingLogSink : public spdlog::sinks::sink {\npublic:\n  void setLock(Thread::BasicLockable& lock) { stderr_sink_->setLock(lock); }\n  void clearLock() { stderr_sink_->clearLock(); }\n\n  // spdlog::sinks::sink\n  void log(const spdlog::details::log_msg& msg) override;\n  void flush() override {\n    absl::ReaderMutexLock lock(&sink_mutex_);\n    sink_->flush();\n  }\n  void set_pattern(const std::string& pattern) override {\n    set_formatter(spdlog::details::make_unique<spdlog::pattern_formatter>(pattern));\n  }\n  void set_formatter(std::unique_ptr<spdlog::formatter> formatter) override;\n  void setShouldEscape(bool should_escape) { should_escape_ = should_escape; }\n\n  /**\n   * @return bool whether a lock has been established.\n   */\n  bool hasLock() const { return stderr_sink_->hasLock(); }\n\n  /**\n   * Constructs a new DelegatingLogSink, sets up the default sink to stderr,\n   * and returns a shared_ptr to it.\n   *\n   * A shared_ptr is required for sinks used\n   * in spdlog::logger; it would not otherwise be required in Envoy. This method\n   * must own the construction process because StderrSinkDelegate needs access to\n   * the DelegatingLogSinkSharedPtr, not just the DelegatingLogSink*, and that is only\n   * available after construction.\n   */\n  static DelegatingLogSinkSharedPtr init();\n\n  /**\n   * Give a log line with trailing whitespace, this will escape all c-style\n   * escape sequences except for the trailing whitespace.\n   * This allows logging escaped messages, but preserves end-of-line characters.\n   *\n   * @param source the log line with trailing whitespace\n   * @return a string with all c-style escape sequences escaped, except trailing whitespace\n   */\n  static std::string escapeLogLine(absl::string_view source);\n\nprivate:\n  friend class SinkDelegate;\n\n  DelegatingLogSink() = default;\n\n  void setDelegate(SinkDelegate* sink) {\n    absl::WriterMutexLock lock(&sink_mutex_);\n    sink_ = sink;\n  }\n  SinkDelegate* delegate() {\n    absl::ReaderMutexLock lock(&sink_mutex_);\n    return sink_;\n  }\n\n  SinkDelegate* sink_ ABSL_GUARDED_BY(sink_mutex_){nullptr};\n  absl::Mutex sink_mutex_;\n  std::unique_ptr<StderrSinkDelegate> stderr_sink_; // Builtin sink to use as a last resort.\n  std::unique_ptr<spdlog::formatter> formatter_ ABSL_GUARDED_BY(format_mutex_);\n  absl::Mutex format_mutex_;\n  bool should_escape_{false};\n};\n\nenum class LoggerMode { Envoy, Fancy };\n\n/**\n * Defines a scope for the logging system with the specified lock and log level.\n * This is equivalent to setLogLevel, setLogFormat, and setLock, which can be\n * called individually as well, e.g. to set the log level without changing the\n * lock or format.\n *\n * Contexts can be nested. When a nested context is destroyed, the previous\n * context is restored. When all contexts are destroyed, the lock is cleared,\n * and logging will remain unlocked, the same state it is in prior to\n * instantiating a Context.\n *\n * Settings for Fancy Logger, a file level logger without explicit implementation of\n * Envoy::Logger:Loggable, are integrated here, as they should be updated when\n * context switch occurs.\n */\nclass Context {\npublic:\n  Context(spdlog::level::level_enum log_level, const std::string& log_format,\n          Thread::BasicLockable& lock, bool should_escape, bool enable_fine_grain_logging = false);\n  ~Context();\n\n  /**\n   * Same as before, with boolean returned to use in log macros.\n   */\n  static bool useFancyLogger();\n\n  static void enableFancyLogger();\n  static void disableFancyLogger();\n\n  static std::string getFancyLogFormat();\n  static spdlog::level::level_enum getFancyDefaultLevel();\n\nprivate:\n  void activate();\n\n  const spdlog::level::level_enum log_level_;\n  const std::string log_format_;\n  Thread::BasicLockable& lock_;\n  bool should_escape_;\n  bool enable_fine_grain_logging_;\n  Context* const save_context_;\n\n  std::string fancy_log_format_ = \"[%Y-%m-%d %T.%e][%t][%l][%n] %v\";\n  spdlog::level::level_enum fancy_default_level_ = spdlog::level::info;\n};\n\n/**\n * A registry of all named loggers in envoy. Usable for adjusting levels of each logger\n * individually.\n */\nclass Registry {\npublic:\n  /**\n   * @param id supplies the fixed ID of the logger to create.\n   * @return spdlog::logger& a logger with system specified sinks for a given ID.\n   */\n  static spdlog::logger& getLog(Id id);\n\n  /**\n   * @return the singleton sink to use for all loggers.\n   */\n  static DelegatingLogSinkSharedPtr getSink() {\n    static DelegatingLogSinkSharedPtr sink = DelegatingLogSink::init();\n    return sink;\n  }\n\n  /**\n   * Sets the minimum log severity required to print messages.\n   * Messages below this loglevel will be suppressed.\n   */\n  static void setLogLevel(spdlog::level::level_enum log_level);\n\n  /**\n   * Sets the log format.\n   */\n  static void setLogFormat(const std::string& log_format);\n\n  /**\n   * @return std::vector<Logger>& the installed loggers.\n   */\n  static std::vector<Logger>& loggers() { return allLoggers(); }\n\n  /**\n   * @Return bool whether the registry has been initialized.\n   */\n  static bool initialized() { return getSink()->hasLock(); }\n\n  static Logger* logger(const std::string& log_name);\n\nprivate:\n  /*\n   * @return std::vector<Logger>& return the installed loggers.\n   */\n  static std::vector<Logger>& allLoggers();\n};\n\n/**\n * Mixin class that allows any class to perform logging with a logger of a particular ID.\n */\ntemplate <Id id> class Loggable {\nprotected:\n  /**\n   * Do not use this directly, use macros defined below.\n   * @return spdlog::logger& the static log instance to use for class local logging.\n   */\n  static spdlog::logger& __log_do_not_use_read_comment() {\n    static spdlog::logger& instance = Registry::getLog(id);\n    return instance;\n  }\n};\n\n} // namespace Logger\n\n/**\n * Base logging macros. It is expected that users will use the convenience macros below rather than\n * invoke these directly.\n */\n\n#define ENVOY_SPDLOG_LEVEL(LEVEL)                                                                  \\\n  (static_cast<spdlog::level::level_enum>(Envoy::Logger::Logger::LEVEL))\n\n#define ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL) (ENVOY_SPDLOG_LEVEL(LEVEL) >= (LOGGER).level())\n\n// Compare levels before invoking logger. This is an optimization to avoid\n// executing expressions computing log contents when they would be suppressed.\n// The same filtering will also occur in spdlog::logger.\n#define ENVOY_LOG_COMP_AND_LOG(LOGGER, LEVEL, ...)                                                 \\\n  do {                                                                                             \\\n    if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) {                                                     \\\n      LOGGER.log(::spdlog::source_loc{__FILE__, __LINE__, __func__}, ENVOY_SPDLOG_LEVEL(LEVEL),    \\\n                 __VA_ARGS__);                                                                     \\\n    }                                                                                              \\\n  } while (0)\n\n#define ENVOY_LOG_CHECK_LEVEL(LEVEL) ENVOY_LOG_COMP_LEVEL(ENVOY_LOGGER(), LEVEL)\n\n/**\n * Convenience macro to log to a user-specified logger. When fancy logging is used, the specific\n * logger is ignored and instead the file-specific logger is used.\n */\n#define ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ...)                                                    \\\n  do {                                                                                             \\\n    if (Envoy::Logger::Context::useFancyLogger()) {                                                \\\n      FANCY_LOG(LEVEL, ##__VA_ARGS__);                                                             \\\n    } else {                                                                                       \\\n      ENVOY_LOG_COMP_AND_LOG(LOGGER, LEVEL, ##__VA_ARGS__);                                        \\\n    }                                                                                              \\\n  } while (0)\n\n/**\n * Convenience macro to get logger.\n */\n#define ENVOY_LOGGER() __log_do_not_use_read_comment()\n\n/**\n * Convenience macro to log to the misc logger, which allows for logging without of direct access to\n * a logger.\n */\n#define GET_MISC_LOGGER() ::Envoy::Logger::Registry::getLog(::Envoy::Logger::Id::misc)\n#define ENVOY_LOG_MISC(LEVEL, ...) ENVOY_LOG_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, ##__VA_ARGS__)\n\n/**\n * Convenience macros for logging with connection ID.\n */\n#define ENVOY_CONN_LOG_TO_LOGGER(LOGGER, LEVEL, FORMAT, CONNECTION, ...)                           \\\n  ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, \"[C{}] \" FORMAT, (CONNECTION).id(), ##__VA_ARGS__)\n\n/**\n * Convenience macros for logging with a stream ID and a connection ID.\n */\n#define ENVOY_STREAM_LOG_TO_LOGGER(LOGGER, LEVEL, FORMAT, STREAM, ...)                             \\\n  ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, \"[C{}][S{}] \" FORMAT,                                         \\\n                      (STREAM).connection() ? (STREAM).connection()->id() : 0,                     \\\n                      (STREAM).streamId(), ##__VA_ARGS__)\n\n// TODO(danielhochman): macros(s)/function(s) for logging structures that support iteration.\n\n/**\n * Command line options for log macros: use Fancy Logger or not.\n */\n#define ENVOY_LOG(LEVEL, ...) ENVOY_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__)\n\n#define ENVOY_LOG_FIRST_N_TO_LOGGER(LOGGER, LEVEL, N, ...)                                         \\\n  do {                                                                                             \\\n    if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) {                                                     \\\n      static auto* countdown = new std::atomic<uint64_t>();                                        \\\n      if (countdown->fetch_add(1) < N) {                                                           \\\n        ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__);                                         \\\n      }                                                                                            \\\n    }                                                                                              \\\n  } while (0)\n\n#define ENVOY_LOG_FIRST_N(LEVEL, N, ...)                                                           \\\n  ENVOY_LOG_FIRST_N_TO_LOGGER(ENVOY_LOGGER(), LEVEL, N, ##__VA_ARGS__)\n\n#define ENVOY_LOG_FIRST_N_MISC(LEVEL, N, ...)                                                      \\\n  ENVOY_LOG_FIRST_N_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, N, ##__VA_ARGS__)\n\n#define ENVOY_LOG_ONCE_TO_LOGGER(LOGGER, LEVEL, ...)                                               \\\n  ENVOY_LOG_FIRST_N_TO_LOGGER(LOGGER, LEVEL, 1, ##__VA_ARGS__)\n\n#define ENVOY_LOG_ONCE(LEVEL, ...) ENVOY_LOG_ONCE_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__)\n\n#define ENVOY_LOG_ONCE_MISC(LEVEL, ...)                                                            \\\n  ENVOY_LOG_ONCE_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, ##__VA_ARGS__)\n\n#define ENVOY_LOG_EVERY_NTH_TO_LOGGER(LOGGER, LEVEL, N, ...)                                       \\\n  do {                                                                                             \\\n    if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) {                                                     \\\n      static auto* count = new std::atomic<uint64_t>();                                            \\\n      if ((count->fetch_add(1) % N) == 0) {                                                        \\\n        ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__);                                         \\\n      }                                                                                            \\\n    }                                                                                              \\\n  } while (0)\n\n#define ENVOY_LOG_EVERY_NTH(LEVEL, N, ...)                                                         \\\n  ENVOY_LOG_EVERY_NTH_TO_LOGGER(ENVOY_LOGGER(), LEVEL, N, ##__VA_ARGS__)\n\n#define ENVOY_LOG_EVERY_NTH_MISC(LEVEL, N, ...)                                                    \\\n  ENVOY_LOG_EVERY_NTH_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, N, ##__VA_ARGS__)\n\n#define ENVOY_LOG_EVERY_POW_2_TO_LOGGER(LOGGER, LEVEL, ...)                                        \\\n  do {                                                                                             \\\n    if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) {                                                     \\\n      static auto* count = new std::atomic<uint64_t>();                                            \\\n      if (std::bitset<64>(1 /* for the first hit*/ + count->fetch_add(1)).count() == 1) {          \\\n        ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__);                                         \\\n      }                                                                                            \\\n    }                                                                                              \\\n  } while (0)\n\n#define ENVOY_LOG_EVERY_POW_2(LEVEL, ...)                                                          \\\n  ENVOY_LOG_EVERY_POW_2_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__)\n\n#define ENVOY_LOG_EVERY_POW_2_MISC(LEVEL, ...)                                                     \\\n  ENVOY_LOG_EVERY_POW_2_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, ##__VA_ARGS__)\n\n// This is to get us to pass the format check. We reference a real-world time source here.\n// We'd have to introduce a singleton for a time source here, and consensus was that avoiding\n// that is preferable.\nusing t_logclock = std::chrono::steady_clock; // NOLINT\n\n#define ENVOY_LOG_PERIODIC_TO_LOGGER(LOGGER, LEVEL, CHRONO_DURATION, ...)                          \\\n  do {                                                                                             \\\n    if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) {                                                     \\\n      static auto* last_hit = new std::atomic<int64_t>();                                          \\\n      auto last = last_hit->load();                                                                \\\n      const auto now = t_logclock::now().time_since_epoch().count();                               \\\n      if ((now - last) >                                                                           \\\n              std::chrono::duration_cast<std::chrono::nanoseconds>(CHRONO_DURATION).count() &&     \\\n          last_hit->compare_exchange_strong(last, now)) {                                          \\\n        ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__);                                         \\\n      }                                                                                            \\\n    }                                                                                              \\\n  } while (0)\n\n#define ENVOY_LOG_PERIODIC(LEVEL, CHRONO_DURATION, ...)                                            \\\n  ENVOY_LOG_PERIODIC_TO_LOGGER(ENVOY_LOGGER(), LEVEL, CHRONO_DURATION, ##__VA_ARGS__)\n\n#define ENVOY_LOG_PERIODIC_MISC(LEVEL, CHRONO_DURATION, ...)                                       \\\n  ENVOY_LOG_PERIODIC_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, CHRONO_DURATION, ##__VA_ARGS__)\n\n#define ENVOY_FLUSH_LOG()                                                                          \\\n  do {                                                                                             \\\n    if (Envoy::Logger::Context::useFancyLogger()) {                                                \\\n      FANCY_FLUSH_LOG();                                                                           \\\n    } else {                                                                                       \\\n      ENVOY_LOGGER().flush();                                                                      \\\n    }                                                                                              \\\n  } while (0)\n\n#define ENVOY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ...)                                             \\\n  do {                                                                                             \\\n    if (Envoy::Logger::Context::useFancyLogger()) {                                                \\\n      FANCY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ##__VA_ARGS__);                                    \\\n    } else {                                                                                       \\\n      ENVOY_CONN_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, FORMAT, CONNECTION, ##__VA_ARGS__);          \\\n    }                                                                                              \\\n  } while (0)\n\n#define ENVOY_STREAM_LOG(LEVEL, FORMAT, STREAM, ...)                                               \\\n  do {                                                                                             \\\n    if (Envoy::Logger::Context::useFancyLogger()) {                                                \\\n      FANCY_STREAM_LOG(LEVEL, FORMAT, STREAM, ##__VA_ARGS__);                                      \\\n    } else {                                                                                       \\\n      ENVOY_STREAM_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, FORMAT, STREAM, ##__VA_ARGS__);            \\\n    }                                                                                              \\\n  } while (0)\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/logger_delegates.cc",
    "content": "#include \"common/common/logger_delegates.h\"\n\n#include <cassert> // use direct system-assert to avoid cyclic dependency.\n#include <cstdint>\n#include <iostream>\n#include <string>\n\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\nFileSinkDelegate::FileSinkDelegate(const std::string& log_path,\n                                   AccessLog::AccessLogManager& log_manager,\n                                   DelegatingLogSinkSharedPtr log_sink)\n    : SinkDelegate(log_sink), log_file_(log_manager.createAccessLog(log_path)) {\n  setDelegate();\n}\n\nFileSinkDelegate::~FileSinkDelegate() { restoreDelegate(); }\n\nvoid FileSinkDelegate::log(absl::string_view msg) {\n  // Log files have internal locking to ensure serial, non-interleaved\n  // writes, so no additional locking needed here.\n  log_file_->write(msg);\n}\n\nvoid FileSinkDelegate::flush() {\n  // Log files have internal locking to ensure serial, non-interleaved\n  // writes, so no additional locking needed here.\n  log_file_->flush();\n}\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/logger_delegates.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/macros.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\n/**\n * SinkDelegate that writes log messages to a file.\n */\nclass FileSinkDelegate : public SinkDelegate {\npublic:\n  FileSinkDelegate(const std::string& log_path, AccessLog::AccessLogManager& log_manager,\n                   DelegatingLogSinkSharedPtr log_sink);\n  ~FileSinkDelegate() override;\n\n  // SinkDelegate\n  void log(absl::string_view msg) override;\n  void flush() override;\n\nprivate:\n  AccessLog::AccessLogFileSharedPtr log_file_;\n};\n\n} // namespace Logger\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/macros.h",
    "content": "#pragma once\n\nnamespace Envoy {\n\n/**\n * @return the size of a C array.\n */\n#define ARRAY_SIZE(X) (sizeof(X) / sizeof(X[0]))\n\n/**\n * @return the length of a static string literal, e.g. STATIC_STRLEN(\"foo\") == 3.\n */\n#define STATIC_STRLEN(X) (sizeof(X) - 1)\n\n/**\n * Helper macros from enum to string macros.\n */\n#define GENERATE_ENUM(X) X,\n#define GENERATE_STRING(X) #X,\n\n/**\n * Stop the compiler from complaining about an unreferenced parameter.\n */\n#ifndef WIN32\n#define UNREFERENCED_PARAMETER(X) ((void)(X))\n#endif\n\n/**\n * Construct On First Use idiom.\n * See https://isocpp.org/wiki/faq/ctors#static-init-order-on-first-use.\n */\n#define CONSTRUCT_ON_FIRST_USE(type, ...)                                                          \\\n  do {                                                                                             \\\n    static const type* objectptr = new type{__VA_ARGS__};                                          \\\n    return *objectptr;                                                                             \\\n  } while (0)\n\n#define MUTABLE_CONSTRUCT_ON_FIRST_USE(type, ...)                                                  \\\n  do {                                                                                             \\\n    static type* objectptr = new type{__VA_ARGS__};                                                \\\n    return *objectptr;                                                                             \\\n  } while (0)\n\n/**\n * Have a generic fall-through for different versions of C++\n */\n#if __cplusplus >= 201703L // C++17 and above\n#define FALLTHRU [[fallthrough]]\n#elif __cplusplus >= 201402L && __clang_major__ >= 5 // C++14 clang-5\n#define FALLTHRU [[fallthrough]]\n#elif __cplusplus >= 201103L && __GNUC__ >= 7 // C++11 gcc 7\n#define FALLTHRU [[gnu::fallthrough]]\n#else // C++11 on gcc 6, and all other cases\n#define FALLTHRU\n#endif\n\n#if (defined(__GNUC__) && !defined(__clang__))\n#define GCC_COMPILER\n#endif\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/matchers.cc",
    "content": "#include \"common/common/matchers.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/type/matcher/v3/metadata.pb.h\"\n#include \"envoy/type/matcher/v3/number.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n#include \"envoy/type/matcher/v3/value.pb.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/common/regex.h\"\n#include \"common/config/metadata.h\"\n#include \"common/http/path_utility.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Matchers {\n\nValueMatcherConstSharedPtr ValueMatcher::create(const envoy::type::matcher::v3::ValueMatcher& v) {\n  switch (v.match_pattern_case()) {\n  case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kNullMatch:\n    return std::make_shared<const NullMatcher>();\n  case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kDoubleMatch:\n    return std::make_shared<const DoubleMatcher>(v.double_match());\n  case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kStringMatch:\n    return std::make_shared<const StringMatcherImpl>(v.string_match());\n  case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kBoolMatch:\n    return std::make_shared<const BoolMatcher>(v.bool_match());\n  case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kPresentMatch:\n    return std::make_shared<const PresentMatcher>(v.present_match());\n  case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kListMatch:\n    return std::make_shared<const ListMatcher>(v.list_match());\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool NullMatcher::match(const ProtobufWkt::Value& value) const {\n  return value.kind_case() == ProtobufWkt::Value::kNullValue;\n}\n\nbool BoolMatcher::match(const ProtobufWkt::Value& value) const {\n  return value.kind_case() == ProtobufWkt::Value::kBoolValue && matcher_ == value.bool_value();\n}\n\nbool PresentMatcher::match(const ProtobufWkt::Value& value) const {\n  return matcher_ && value.kind_case() != ProtobufWkt::Value::KIND_NOT_SET;\n}\n\nbool DoubleMatcher::match(const ProtobufWkt::Value& value) const {\n  if (value.kind_case() != ProtobufWkt::Value::kNumberValue) {\n    return false;\n  }\n\n  const double v = value.number_value();\n  switch (matcher_.match_pattern_case()) {\n  case envoy::type::matcher::v3::DoubleMatcher::MatchPatternCase::kRange:\n    return matcher_.range().start() <= v && v < matcher_.range().end();\n  case envoy::type::matcher::v3::DoubleMatcher::MatchPatternCase::kExact:\n    return matcher_.exact() == v;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  };\n}\n\nStringMatcherImpl::StringMatcherImpl(const envoy::type::matcher::v3::StringMatcher& matcher)\n    : matcher_(matcher) {\n  if (matcher.match_pattern_case() ==\n      envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex) {\n    if (matcher.ignore_case()) {\n      throw EnvoyException(\"ignore_case has no effect for regex.\");\n    }\n    regex_ =\n        Regex::Utility::parseStdRegexAsCompiledMatcher(matcher_.hidden_envoy_deprecated_regex());\n  } else if (matcher.match_pattern_case() ==\n             envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex) {\n    if (matcher.ignore_case()) {\n      throw EnvoyException(\"ignore_case has no effect for safe_regex.\");\n    }\n    regex_ = Regex::Utility::parseRegex(matcher_.safe_regex());\n  } else if (matcher.match_pattern_case() ==\n             envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kContains) {\n    if (matcher_.ignore_case()) {\n      // Cache the lowercase conversion of the Contains matcher for future use\n      lowercase_contains_match_ = absl::AsciiStrToLower(matcher_.contains());\n    }\n  }\n}\n\nbool StringMatcherImpl::match(const ProtobufWkt::Value& value) const {\n  if (value.kind_case() != ProtobufWkt::Value::kStringValue) {\n    return false;\n  }\n\n  return match(value.string_value());\n}\n\nbool StringMatcherImpl::match(const absl::string_view value) const {\n  switch (matcher_.match_pattern_case()) {\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact:\n    return matcher_.ignore_case() ? absl::EqualsIgnoreCase(value, matcher_.exact())\n                                  : value == matcher_.exact();\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kPrefix:\n    return matcher_.ignore_case() ? absl::StartsWithIgnoreCase(value, matcher_.prefix())\n                                  : absl::StartsWith(value, matcher_.prefix());\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSuffix:\n    return matcher_.ignore_case() ? absl::EndsWithIgnoreCase(value, matcher_.suffix())\n                                  : absl::EndsWith(value, matcher_.suffix());\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kContains:\n    return matcher_.ignore_case()\n               ? absl::StrContains(absl::AsciiStrToLower(value), lowercase_contains_match_)\n               : absl::StrContains(value, matcher_.contains());\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex:\n    FALLTHRU;\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex:\n    return regex_->match(value);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nListMatcher::ListMatcher(const envoy::type::matcher::v3::ListMatcher& matcher) : matcher_(matcher) {\n  ASSERT(matcher_.match_pattern_case() ==\n         envoy::type::matcher::v3::ListMatcher::MatchPatternCase::kOneOf);\n\n  oneof_value_matcher_ = ValueMatcher::create(matcher_.one_of());\n}\n\nbool ListMatcher::match(const ProtobufWkt::Value& value) const {\n  if (value.kind_case() != ProtobufWkt::Value::kListValue) {\n    return false;\n  }\n\n  if (oneof_value_matcher_) {\n    for (const auto& lv : value.list_value().values()) {\n      if (oneof_value_matcher_->match(lv)) {\n        return true;\n      }\n    }\n  }\n  return false;\n}\n\nMetadataMatcher::MetadataMatcher(const envoy::type::matcher::v3::MetadataMatcher& matcher)\n    : matcher_(matcher) {\n  for (const auto& seg : matcher.path()) {\n    path_.push_back(seg.key());\n  }\n  const auto& v = matcher_.value();\n  value_matcher_ = ValueMatcher::create(v);\n}\n\nPathMatcherConstSharedPtr PathMatcher::createExact(const std::string& exact, bool ignore_case) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(exact);\n  matcher.set_ignore_case(ignore_case);\n  return std::make_shared<const PathMatcher>(matcher);\n}\n\nPathMatcherConstSharedPtr PathMatcher::createPrefix(const std::string& prefix, bool ignore_case) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_prefix(prefix);\n  matcher.set_ignore_case(ignore_case);\n  return std::make_shared<const PathMatcher>(matcher);\n}\n\nbool MetadataMatcher::match(const envoy::config::core::v3::Metadata& metadata) const {\n  const auto& value = Envoy::Config::Metadata::metadataValue(&metadata, matcher_.filter(), path_);\n  return value_matcher_ && value_matcher_->match(value);\n}\n\nbool PathMatcher::match(const absl::string_view path) const {\n  return matcher_.match(Http::PathUtil::removeQueryAndFragment(path));\n}\n\n} // namespace Matchers\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/matchers.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/common/matchers.h\"\n#include \"envoy/common/regex.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/type/matcher/v3/metadata.pb.h\"\n#include \"envoy/type/matcher/v3/number.pb.h\"\n#include \"envoy/type/matcher/v3/path.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n#include \"envoy/type/matcher/v3/value.pb.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Matchers {\n\nclass ValueMatcher;\nusing ValueMatcherConstSharedPtr = std::shared_ptr<const ValueMatcher>;\n\nclass PathMatcher;\nusing PathMatcherConstSharedPtr = std::shared_ptr<const PathMatcher>;\n\nclass ValueMatcher {\npublic:\n  virtual ~ValueMatcher() = default;\n\n  /**\n   * Check whether the value is matched to the matcher.\n   */\n  virtual bool match(const ProtobufWkt::Value& value) const PURE;\n\n  /**\n   * Create the matcher object.\n   */\n  static ValueMatcherConstSharedPtr create(const envoy::type::matcher::v3::ValueMatcher& value);\n};\n\nclass NullMatcher : public ValueMatcher {\npublic:\n  /**\n   * Check whether the value is NULL.\n   */\n  bool match(const ProtobufWkt::Value& value) const override;\n};\n\nclass BoolMatcher : public ValueMatcher {\npublic:\n  BoolMatcher(bool matcher) : matcher_(matcher) {}\n\n  bool match(const ProtobufWkt::Value& value) const override;\n\nprivate:\n  const bool matcher_;\n};\n\nclass PresentMatcher : public ValueMatcher {\npublic:\n  PresentMatcher(bool matcher) : matcher_(matcher) {}\n\n  bool match(const ProtobufWkt::Value& value) const override;\n\nprivate:\n  const bool matcher_;\n};\n\nclass DoubleMatcher : public ValueMatcher {\npublic:\n  DoubleMatcher(const envoy::type::matcher::v3::DoubleMatcher& matcher) : matcher_(matcher) {}\n\n  bool match(const ProtobufWkt::Value& value) const override;\n\nprivate:\n  const envoy::type::matcher::v3::DoubleMatcher matcher_;\n};\n\nclass StringMatcherImpl : public ValueMatcher, public StringMatcher {\npublic:\n  explicit StringMatcherImpl(const envoy::type::matcher::v3::StringMatcher& matcher);\n\n  bool match(const absl::string_view value) const override;\n  bool match(const ProtobufWkt::Value& value) const override;\n\n  const envoy::type::matcher::v3::StringMatcher& matcher() const { return matcher_; }\n\nprivate:\n  const envoy::type::matcher::v3::StringMatcher matcher_;\n  Regex::CompiledMatcherPtr regex_;\n  std::string lowercase_contains_match_;\n};\n\nclass ListMatcher : public ValueMatcher {\npublic:\n  ListMatcher(const envoy::type::matcher::v3::ListMatcher& matcher);\n\n  bool match(const ProtobufWkt::Value& value) const override;\n\nprivate:\n  const envoy::type::matcher::v3::ListMatcher matcher_;\n\n  ValueMatcherConstSharedPtr oneof_value_matcher_;\n};\n\nclass MetadataMatcher {\npublic:\n  MetadataMatcher(const envoy::type::matcher::v3::MetadataMatcher& matcher);\n\n  /**\n   * Check whether the metadata is matched to the matcher.\n   * @param metadata the metadata to check.\n   * @return true if it's matched otherwise false.\n   */\n  bool match(const envoy::config::core::v3::Metadata& metadata) const;\n\nprivate:\n  const envoy::type::matcher::v3::MetadataMatcher matcher_;\n  std::vector<std::string> path_;\n\n  ValueMatcherConstSharedPtr value_matcher_;\n};\n\nclass PathMatcher : public StringMatcher {\npublic:\n  PathMatcher(const envoy::type::matcher::v3::PathMatcher& path) : matcher_(path.path()) {}\n  PathMatcher(const envoy::type::matcher::v3::StringMatcher& matcher) : matcher_(matcher) {}\n\n  static PathMatcherConstSharedPtr createExact(const std::string& exact, bool ignore_case);\n  static PathMatcherConstSharedPtr createPrefix(const std::string& prefix, bool ignore_case);\n\n  bool match(const absl::string_view path) const override;\n\nprivate:\n  const StringMatcherImpl matcher_;\n};\n\n} // namespace Matchers\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/mem_block_builder.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"common/common/assert.h\"\n\n#include \"absl/types/span.h\"\n\nnamespace Envoy {\n\n// Manages a block of raw memory for objects of type T. T is generally expected\n// to be a POD, where it makes sense to memcpy over it. This class carries extra\n// member variables for tracking size, and a write-pointer to support safe\n// appends.\n//\n// MemBlockBuilder is used to safely write blocks of data into a memory\n// buffer. Due to two extra member variables, it is not optimal for storing in\n// data structures. The intended usage model is to release the raw assembled\n// memory block from the MemBlockBuilder for efficient storage.\n//\n// The goal for this class is to provide a usage model to replace direct usage\n// of memcpy with a pattern that is easy to validate for correctness by\n// inspection, asserts, and fuzzing, but when compiled for optimization is\n// roughly as efficient as raw memcpy.\ntemplate <class T> class MemBlockBuilder {\npublic:\n  // Constructs a MemBlockBuilder allowing for 'capacity' instances of T.\n  explicit MemBlockBuilder(uint64_t capacity)\n      : data_(std::make_unique<T[]>(capacity)), write_span_(data_.get(), capacity) {}\n  MemBlockBuilder() = default;\n\n  /**\n   * Allocates (or reallocates) memory for the MemBlockBuilder to make it the\n   * specified capacity. This does not have resize semantics; when setCapacity()\n   * is called any previous contents are erased.\n   *\n   * @param capacity The number of memory elements to allocate.\n   */\n  void setCapacity(uint64_t capacity) {\n    setCapacityHelper(capacity, std::make_unique<T[]>(capacity));\n  }\n\n  /**\n   * @return the capacity.\n   */\n  uint64_t capacity() const { return write_span_.size() + write_span_.data() - data_.get(); }\n\n  /**\n   * Appends a single object of type T, moving an internal write-pointer\n   * forward. Asserts that there is room to write the object when compiled\n   * for debug.\n   *\n   * @param object the object to append.\n   */\n  void appendOne(T object) {\n    SECURITY_ASSERT(write_span_.size() >= 1, \"insufficient capacity\");\n    *write_span_.data() = object;\n    write_span_.remove_prefix(1);\n  }\n\n  /**\n   * Appends raw data specified as a span, moving an internal write-pointer\n   * forward. Asserts that there is room to write the block. It is the caller's\n   * responsibility to ensure that the input data is valid.\n   *\n   * @param data The span of objects to insert.\n   */\n  void appendData(absl::Span<const T> data) {\n    uint64_t size = data.size();\n    SECURITY_ASSERT(write_span_.size() >= size, \"insufficient capacity\");\n    if (size == 0) {\n      return;\n    }\n    memcpy(write_span_.data(), data.data(), size * sizeof(T));\n    write_span_.remove_prefix(size);\n  }\n\n  /**\n   * Appends the contents of another memory block to this one.\n   *\n   * @param src the block to append.\n   */\n  void appendBlock(const MemBlockBuilder& src) { appendData(src.span()); }\n\n  /**\n   * @return the number of elements remaining in the MemBlockBuilder.\n   */\n  uint64_t capacityRemaining() const { return write_span_.size(); }\n\n  /**\n   * Empties the contents of this.\n   */\n  void reset() { setCapacityHelper(0, std::unique_ptr<T[]>(nullptr)); }\n\n  /**\n   * Returns the underlying storage as a unique pointer, clearing this.\n   *\n   * @return the transferred storage.\n   */\n  std::unique_ptr<T[]> release() {\n    write_span_ = absl::MakeSpan(static_cast<T*>(nullptr), 0);\n    return std::move(data_);\n  }\n\n  /**\n   * @return the populated data as an absl::Span.\n   */\n  absl::Span<T> span() const { return absl::MakeSpan(data_.get(), write_span_.data()); }\n\n  /**\n   * @return The number of elements the have been added to the builder.\n   */\n  uint64_t size() const { return write_span_.data() - data_.get(); }\n\nprivate:\n  void setCapacityHelper(uint64_t capacity, std::unique_ptr<T[]> data) {\n    data_ = std::move(data);\n    write_span_ = absl::MakeSpan(data_.get(), capacity);\n  }\n\n  std::unique_ptr<T[]> data_;\n  absl::Span<T> write_span_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/mutex_tracer_impl.cc",
    "content": "#include \"common/common/mutex_tracer_impl.h\"\n\n#include <iostream>\n#include <memory>\n\n#include \"common/common/assert.h\"\n\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\n\nMutexTracerImpl* MutexTracerImpl::singleton_ = nullptr;\n\nMutexTracerImpl& MutexTracerImpl::getOrCreateTracer() {\n  if (singleton_ == nullptr) {\n    singleton_ = new MutexTracerImpl;\n    // There's no easy way to unregister a hook. Luckily, this hook is innocuous enough that it\n    // seems safe to leave it registered during testing, even though this technically breaks\n    // hermeticity.\n    absl::RegisterMutexTracer(&Envoy::MutexTracerImpl::contentionHook);\n  }\n  return *singleton_;\n}\n\nvoid MutexTracerImpl::contentionHook(const char* msg, const void* obj, int64_t wait_cycles) {\n  ASSERT(singleton_ != nullptr);\n  singleton_->recordContention(msg, obj, wait_cycles);\n}\n\nvoid MutexTracerImpl::reset() {\n  num_contentions_.store(0, order_);\n  current_wait_cycles_.store(0, order_);\n  lifetime_wait_cycles_.store(0, order_);\n}\n\ninline void MutexTracerImpl::recordContention(const char*, const void*, int64_t wait_cycles) {\n  num_contentions_.fetch_add(1, order_);\n  current_wait_cycles_.store(wait_cycles, order_);\n  lifetime_wait_cycles_.fetch_add(wait_cycles, order_);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/mutex_tracer_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstddef>\n#include <cstdint>\n\n#include \"envoy/common/mutex_tracer.h\"\n\n#include \"common/common/thread_annotations.h\"\n\nnamespace Envoy {\n\n// Encapsulates a contention hook which is registered from getOrCreateTracer() which records\n// statistics about that contention.\n//\n// MutexTracer should be accessed via getOrCreateTracer(), which ensures that the global singleton\n// MutexTracer object is always being called. This is necessary because of the type signature which\n// absl::RegisterMutexTracer() expects.\n//\n// *NB*: getOrCreateTracer() is not thread-safe, and should be called once at startup, after which\n// the internal contention hook is thread-safe. This is possible by utilizing memory_order_relaxed\n// atomic writes.\nclass MutexTracerImpl final : public MutexTracer {\npublic:\n  static MutexTracerImpl& getOrCreateTracer();\n\n  // Resets the recorded statistics.\n  void reset() override;\n\n  int64_t numContentions() const override { return num_contentions_.load(order_); }\n  int64_t currentWaitCycles() const override { return current_wait_cycles_.load(order_); }\n  int64_t lifetimeWaitCycles() const override { return lifetime_wait_cycles_.load(order_); }\n\nprivate:\n  friend class MutexTracerTest;\n\n  // Hook called by absl mutex system once registered from getOrCreateTracer().\n  static void contentionHook(const char* msg, const void* obj, int64_t wait_cycles);\n\n  // Utility function for contentionHook.\n  inline void recordContention(const char*, const void*, int64_t wait_cycles);\n\n  // Keeping singleton_ as a static class member avoids the barrier-lookup for the tracer object on\n  // every contention.\n  static MutexTracerImpl* singleton_;\n\n  // Number of mutex contention occurrences since last reset.\n  std::atomic<int64_t> num_contentions_{0};\n  // Length of the current contention wait cycle.\n  std::atomic<int64_t> current_wait_cycles_{0};\n  // Total sum of all wait cycles.\n  std::atomic<int64_t> lifetime_wait_cycles_{0};\n  // TODO(ambuc): Build running averages here?\n\n  // We utilize std::memory_order_relaxed for all operations for the least possible contention.\n  static constexpr std::memory_order order_{std::memory_order_relaxed};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/non_copyable.h",
    "content": "#pragma once\n\nnamespace Envoy {\n/**\n * Mixin class that makes derived classes not copyable and not moveable. Like boost::noncopyable\n * without boost.\n */\nclass NonCopyable {\nprotected:\n  NonCopyable() = default;\n\n  // Non-moveable.\n  NonCopyable(NonCopyable&&) noexcept = delete;\n  NonCopyable& operator=(NonCopyable&&) noexcept = delete;\n\n  // Non-copyable.\n  NonCopyable(const NonCopyable&) = delete;\n  NonCopyable& operator=(const NonCopyable&) = delete;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/perf_annotation.cc",
    "content": "#ifndef ENVOY_PERF_ANNOTATION\n#define ENVOY_PERF_ANNOTATION\n#endif\n\n#include \"common/common/perf_annotation.h\"\n\n#include <chrono>\n#include <iostream>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\n\nPerfOperation::PerfOperation()\n    : context_(PerfAnnotationContext::getOrCreate()), start_time_(context_->currentTime()) {}\n\nvoid PerfOperation::record(absl::string_view category, absl::string_view description) {\n  const MonotonicTime end_time = context_->currentTime();\n  const std::chrono::nanoseconds duration =\n      std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time_);\n  context_->record(duration, category, description);\n}\n\n// The ctor is explicitly declared private to encourage clients to use getOrCreate(), at\n// least for now. Given that it's declared it must be instantiated. It's not inlined\n// because the constructor is non-trivial due to the contained unordered_map.\nPerfAnnotationContext::PerfAnnotationContext() = default;\n\nvoid PerfAnnotationContext::record(std::chrono::nanoseconds duration, absl::string_view category,\n                                   absl::string_view description) {\n  CategoryDescription key = {std::string(category), std::string(description)};\n  {\n#if PERF_THREAD_SAFE\n    Thread::LockGuard lock(mutex_);\n#endif\n    DurationStats& stats = duration_stats_map_[key];\n    stats.stddev_.update(static_cast<double>(duration.count()));\n    if ((stats.stddev_.count() == 1) || (duration < stats.min_)) {\n      stats.min_ = duration;\n    }\n    stats.max_ = std::max(stats.max_, duration);\n    stats.total_ += duration;\n  }\n}\n\n// TODO(jmarantz): Consider hooking up perf information-dump into admin console, if\n// we find a performance problem we want to annotate with a live server.\nvoid PerfAnnotationContext::dump() { std::cout << toString() << std::endl; }\n\nstd::string PerfAnnotationContext::toString() {\n  PerfAnnotationContext* context = getOrCreate();\n  std::string out;\n#if PERF_THREAD_SAFE\n  Thread::LockGuard lock(context->mutex_);\n#endif\n\n  // The map is from category/description -> [duration, time]. Reverse-sort by duration.\n  std::vector<const DurationStatsMap::value_type*> sorted_values;\n  sorted_values.reserve(context->duration_stats_map_.size());\n  for (const auto& iter : context->duration_stats_map_) {\n    sorted_values.push_back(&iter);\n  }\n  std::sort(\n      sorted_values.begin(), sorted_values.end(),\n      [](const DurationStatsMap::value_type* a, const DurationStatsMap::value_type* b) -> bool {\n        const DurationStats& a_stats = a->second;\n        const DurationStats& b_stats = b->second;\n        return a_stats.total_ > b_stats.total_;\n      });\n\n  // Organize the report so it lines up in columns. Note that the widest duration comes first,\n  // though that may not be descending order of calls or per_call time, so we need two passes\n  // to compute column widths. First collect the column headers and their widths.\n  //\n  // TODO(jmarantz): Add a mechanism for dumping to HTML for viewing results in web browser.\n  static const char* headers[] = {\"Duration(us)\", \"# Calls\", \"Mean(ns)\", \"StdDev(ns)\",\n                                  \"Min(ns)\",      \"Max(ns)\", \"Category\", \"Description\"};\n  constexpr int num_columns = ARRAY_SIZE(headers);\n  size_t widths[num_columns];\n  std::vector<std::string> columns[num_columns];\n  for (size_t i = 0; i < num_columns; ++i) {\n    std::string column(headers[i]);\n    widths[i] = column.size();\n    columns[i].emplace_back(column);\n  }\n\n  // Compute all the column strings and their max widths.\n  for (const auto& p : sorted_values) {\n    const DurationStats& stats = p->second;\n    const auto microseconds_string = [](std::chrono::nanoseconds ns) -> std::string {\n      return std::to_string(std::chrono::duration_cast<std::chrono::microseconds>(ns).count());\n    };\n    const auto nanoseconds_string = [](std::chrono::nanoseconds ns) -> std::string {\n      return std::to_string(std::chrono::duration_cast<std::chrono::nanoseconds>(ns).count());\n    };\n    columns[0].push_back(microseconds_string(stats.total_));\n    const uint64_t count = stats.stddev_.count();\n    columns[1].push_back(std::to_string(count));\n    columns[2].push_back(\n        (count == 0)\n            ? \"NaN\"\n            : std::to_string(\n                  std::chrono::duration_cast<std::chrono::nanoseconds>(stats.total_).count() /\n                  count));\n    columns[3].push_back(absl::StrCat(\"\", stats.stddev_.computeStandardDeviation()));\n    columns[4].push_back(nanoseconds_string(stats.min_));\n    columns[5].push_back(nanoseconds_string(stats.max_));\n    const CategoryDescription& category_description = p->first;\n    columns[6].push_back(category_description.category);\n    columns[7].push_back(category_description.description);\n    for (size_t i = 0; i < num_columns; ++i) {\n      widths[i] = std::max(widths[i], columns[i].back().size());\n    }\n  }\n\n  // Create format-strings to right justify each column, e.g. {:>14} for a column of width 14.\n  std::vector<std::string> formats;\n  for (size_t i = 0; i < num_columns; ++i) {\n    // left-justify category & description, but right-justify the numeric columns.\n    const absl::string_view justify = (i < num_columns - 2) ? \">\" : \"<\";\n    formats.push_back(absl::StrCat(\"{:\", justify, widths[i], \"}\"));\n  }\n\n  // Write out the table.\n  for (size_t row = 0; row < columns[0].size(); ++row) {\n    for (size_t i = 0; i < num_columns; ++i) {\n      const std::string& str = columns[i][row];\n      absl::StrAppend(&out, fmt::format(formats[i], str), (i != (num_columns - 1) ? \"  \" : \"\\n\"));\n    }\n  }\n  return out;\n}\n\nvoid PerfAnnotationContext::clear() {\n  PerfAnnotationContext* context = getOrCreate();\n#if PERF_THREAD_SAFE\n  Thread::LockGuard lock(context->mutex_);\n#endif\n  context->duration_stats_map_.clear();\n}\n\nPerfAnnotationContext* PerfAnnotationContext::getOrCreate() {\n  static auto* context = new PerfAnnotationContext();\n  return context;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/perf_annotation.h",
    "content": "#pragma once\n\n#ifdef ENVOY_PERF_ANNOTATION\n\n#include <chrono>\n#include <cstdint>\n\n#include \"common/common/thread.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n\n// Performance Annotation system, enabled with\n//   bazel --define=perf_annotation=enabled ...\n// or, in individual .cc files:\n//   #define ENVOY_PERF_ANNOTATION\n// In the absence of such directives, the support classes are built and tested.\n// However, the macros for instrumenting code for performance analysis will expand\n// to nothing.\n//\n// See also: https://github.com/LLNL/Caliper -- it may be worth integrating with\n// that for added functionality, particularly around loops.\n//\n// See also, for a much more comprehensive study in performance annotation:\n// https://labs.vmware.com/vmtj/methodology-for-performance-analysis-of-vmware-vsphere-under-tier-1-applications\n// https://dl.acm.org/citation.cfm?id=1899945&dl=ACM&coll=DL\n\n/**\n * Initiates a performance operation, storing its state in perf_var. A perf_var\n * can then be reported multiple times.\n */\n#define PERF_OPERATION(perf_var) Envoy::PerfOperation perf_var\n\n/**\n * Records performance data initiated with PERF_OPERATION. The category and description\n * are joined with in the library, but only if perf is enabled. This way, any concatenation\n * overhead is skipped when perf-annotation is disabled.\n */\n#define PERF_RECORD(perf, category, description)                                                   \\\n  do {                                                                                             \\\n    perf.record(category, description);                                                            \\\n  } while (false)\n\n/**\n * Dumps recorded performance data to stdout. Expands to nothing if not enabled.\n */\n#define PERF_DUMP() Envoy::PerfAnnotationContext::dump()\n\n/**\n * Returns the aggregated performance data as a formatted multi-line string, showing a\n * formatted table of values. Returns \"\" if perf-annotation is disabled.\n */\n#define PERF_TO_STRING() Envoy::PerfAnnotationContext::toString()\n\n/**\n * Clears all performance data.\n */\n#define PERF_CLEAR() Envoy::PerfAnnotationContext::clear()\n\n/**\n * Controls whether performances collection and reporting is thread safe. For now,\n * leaving this enabled for predictability across multiple applications, on the assumption\n * that an uncontended mutex lock has vanishingly small cost. In the future we may try\n * to make this system thread-unsafe if mutex contention disturbs the metrics.\n */\n#define PERF_THREAD_SAFE true\n\nnamespace Envoy {\n\n/**\n * Defines a context for collecting performance data. Note that this class is\n * fully declared and defined even if ENVOY_PERF_AUTOMATION is off. We depend on\n * the macros to disable performance collection for production.\n */\nclass PerfAnnotationContext {\npublic:\n  /**\n   * Records time consumed by a category and description, which are shown as separate\n   * columns in the generated output table.\n   *\n   * @param duration the duration.\n   * @param category the name of a category for the recording.\n   * @param description the name of description for the recording.\n   */\n  void record(std::chrono::nanoseconds duration, absl::string_view category,\n              absl::string_view description);\n\n  /** @return MonotonicTime the current time */\n  MonotonicTime currentTime() { return time_source_.monotonicTime(); }\n\n  /**\n   * Renders the aggregated statistics as a string.\n   * @return std::string the performance data as a formatted string.\n   */\n  static std::string toString();\n\n  /**\n   * Dumps aggregated statistics (if any) to stdout.\n   */\n  static void dump();\n\n  /**\n   * Thread-safe lazy-initialization of a PerfAnnotationContext on first use.\n   * @return PerfAnnotationContext* the context.\n   */\n  static PerfAnnotationContext* getOrCreate();\n\n  /**\n   * Clears out all aggregated statistics.\n   */\n  static void clear();\n\nprivate:\n  /**\n   * PerfAnnotationContext construction should be done via getOrCreate().\n   */\n  PerfAnnotationContext();\n\n  struct CategoryDescription {\n    std::string category;\n    std::string description;\n\n    bool operator==(const CategoryDescription& other) const {\n      return category == other.category && description == other.description;\n    }\n  };\n\n  struct DurationStats {\n    std::chrono::nanoseconds total_{0};\n    std::chrono::nanoseconds min_{0};\n    std::chrono::nanoseconds max_{0};\n    WelfordStandardDeviation stddev_;\n  };\n\n  struct Hash {\n    size_t operator()(const CategoryDescription& a) const {\n      return std::hash<std::string>()(a.category) + 13 * std::hash<std::string>()(a.description);\n    }\n  };\n\n  using DurationStatsMap = absl::node_hash_map<CategoryDescription, DurationStats, Hash>;\n\n  // Maps {category, description} to DurationStats.\n#if PERF_THREAD_SAFE\n  DurationStatsMap duration_stats_map_ ABSL_GUARDED_BY(mutex_);\n  Thread::MutexBasicLockable mutex_;\n#else\n  DurationStatsMap duration_stats_map_;\n#endif\n  RealTimeSource time_source_;\n};\n\n/**\n * Represents an operation for reporting timing to the perf system. Usage:\n *\n * f() {\n *   PerfOperation perf_op;\n *   computeIntensiveWork();\n *   perf_op.record(\"category\", \"description\");\n * }\n */\nclass PerfOperation {\npublic:\n  PerfOperation();\n\n  /**\n   * Report an event relative to the operation in progress. Note report can be called\n   * multiple times on a single PerfOperation, with distinct category/description combinations.\n   * @param category the name of a category for the recording.\n   * @param description the name of description for the recording.\n   */\n  void record(absl::string_view category, absl::string_view description);\n\nprivate:\n  PerfAnnotationContext* context_;\n  MonotonicTime start_time_;\n};\n\n} // namespace Envoy\n\n#else\n\n// Macros that expand to nothing when performance collection is disabled. These are contrived to\n// work syntactically as a C++ statement (e.g. if (foo) PERF_RECORD(...) else PERF_RECORD(...)).\n\n#define PERF_OPERATION(perf_var)                                                                   \\\n  do {                                                                                             \\\n  } while (false)\n#define PERF_RECORD(perf, category, description)                                                   \\\n  do {                                                                                             \\\n  } while (false)\n#define PERF_DUMP()                                                                                \\\n  do {                                                                                             \\\n  } while (false)\n#define PERF_TO_STRING() \"\"\n#define PERF_CLEAR()                                                                               \\\n  do {                                                                                             \\\n  } while (false)\n\n#endif\n"
  },
  {
    "path": "source/common/common/phantom.h",
    "content": "#pragma once\n\n#include <utility>\n\nnamespace Envoy {\n\n// A phantom type allows wrapping a common type with additional type information in order to allow\n// additional compile time safety when passing it around.\ntemplate <class InnerT, class TagT> struct Phantom {\n  Phantom() = default;\n  explicit Phantom(const InnerT& t) : val_(t) {}\n  explicit Phantom(InnerT&& t) : val_(std::move(t)) {}\n\n  InnerT& get() { return val_; }\n  const InnerT& get() const { return val_; };\n\n  bool operator==(const Phantom<InnerT, TagT>& other) const { return val_ == other.val_; }\n\nprivate:\n  InnerT val_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/posix/thread_impl.cc",
    "content": "#include \"common/common/assert.h\"\n#include \"common/common/thread_impl.h\"\n\n#include \"absl/strings/str_cat.h\"\n\n#if defined(__linux__)\n#include <sys/syscall.h>\n#endif\n\nnamespace Envoy {\nnamespace Thread {\n\nnamespace {\n\nint64_t getCurrentThreadId() {\n#ifdef __linux__\n  return static_cast<int64_t>(syscall(SYS_gettid));\n#elif defined(__APPLE__)\n  uint64_t tid;\n  pthread_threadid_np(nullptr, &tid);\n  return tid;\n#else\n#error \"Enable and test pthread id retrieval code for you arch in pthread/thread_impl.cc\"\n#endif\n}\n\n} // namespace\n\n// See https://www.man7.org/linux/man-pages/man3/pthread_setname_np.3.html.\n// The maximum thread name is 16 bytes including the terminating nul byte,\n// so we need to truncate the string_view to 15 bytes.\n#define PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE 16\n\n/**\n * Wrapper for a pthread thread. We don't use std::thread because it eats exceptions and leads to\n * unusable stack traces.\n */\nclass ThreadImplPosix : public Thread {\npublic:\n  ThreadImplPosix(std::function<void()> thread_routine, OptionsOptConstRef options)\n      : thread_routine_(std::move(thread_routine)) {\n    if (options) {\n      name_ = options->name_.substr(0, PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE - 1);\n    }\n    RELEASE_ASSERT(Logger::Registry::initialized(), \"\");\n    const int rc = pthread_create(\n        &thread_handle_, nullptr,\n        [](void* arg) -> void* {\n          static_cast<ThreadImplPosix*>(arg)->thread_routine_();\n          return nullptr;\n        },\n        this);\n    RELEASE_ASSERT(rc == 0, \"\");\n\n#if SUPPORTS_PTHREAD_NAMING\n    // If the name was not specified, get it from the OS. If the name was\n    // specified, write it into the thread, and assert that the OS sees it the\n    // same way.\n    if (name_.empty()) {\n      getNameFromOS(name_);\n    } else {\n      const int set_name_rc = pthread_setname_np(thread_handle_, name_.c_str());\n      if (set_name_rc != 0) {\n        ENVOY_LOG_MISC(trace, \"Error {} setting name `{}'\", set_name_rc, name_);\n      } else {\n        // When compiling in debug mode, read back the thread-name from the OS,\n        // and verify it's what we asked for. This ensures the truncation is as\n        // expected, and that the OS will actually retain all the bytes of the\n        // name we expect.\n        //\n        // Note that the system-call to read the thread name may fail in case\n        // the thread exits after the call to set the name above, and before the\n        // call to get the name, so we can only do the assert if that call\n        // succeeded.\n        std::string check_name;\n        ASSERT(!getNameFromOS(check_name) || check_name == name_,\n               absl::StrCat(\"configured name=\", name_, \" os name=\", check_name));\n      }\n    }\n#endif\n  }\n\n  ~ThreadImplPosix() override { ASSERT(joined_); }\n\n  std::string name() const override { return name_; }\n\n  // Thread::Thread\n  void join() override {\n    ASSERT(!joined_);\n    joined_ = true;\n    const int rc = pthread_join(thread_handle_, nullptr);\n    RELEASE_ASSERT(rc == 0, \"\");\n  }\n\nprivate:\n#if SUPPORTS_PTHREAD_NAMING\n  // Attempts to get the name from the operating system, returning true and\n  // updating 'name' if successful. Note that during normal operation this\n  // may fail, if the thread exits prior to the system call.\n  bool getNameFromOS(std::string& name) {\n    // Verify that the name got written into the thread as expected.\n    char buf[PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE];\n    const int get_name_rc = pthread_getname_np(thread_handle_, buf, sizeof(buf));\n    if (get_name_rc != 0) {\n      ENVOY_LOG_MISC(trace, \"Error {} getting name\", get_name_rc);\n      return false;\n    }\n    name = buf;\n    return true;\n  }\n#endif\n\n  std::function<void()> thread_routine_;\n  pthread_t thread_handle_;\n  std::string name_;\n  bool joined_{false};\n};\n\nThreadPtr ThreadFactoryImplPosix::createThread(std::function<void()> thread_routine,\n                                               OptionsOptConstRef options) {\n  return std::make_unique<ThreadImplPosix>(thread_routine, options);\n}\n\nThreadId ThreadFactoryImplPosix::currentThreadId() { return ThreadId(getCurrentThreadId()); }\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/posix/thread_impl.h",
    "content": "#pragma once\n\n#include <pthread.h>\n\n#include <functional>\n\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\n/**\n * Implementation of ThreadFactory\n */\nclass ThreadFactoryImplPosix : public ThreadFactory {\npublic:\n  // Thread::ThreadFactory\n  ThreadPtr createThread(std::function<void()> thread_routine, OptionsOptConstRef options) override;\n  ThreadId currentThreadId() override;\n};\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/random_generator.cc",
    "content": "#include \"common/common/random_generator.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"openssl/rand.h\"\n\nnamespace Envoy {\nnamespace Random {\n\nconst size_t RandomGeneratorImpl::UUID_LENGTH = 36;\n\nuint64_t RandomGeneratorImpl::random() {\n  // Prefetch 256 * sizeof(uint64_t) bytes of randomness. buffered_idx is initialized to 256,\n  // i.e. out-of-range value, so the buffer will be filled with randomness on the first call\n  // to this function.\n  //\n  // There is a diminishing return when increasing the prefetch size, as illustrated below in\n  // a test that generates 1,000,000,000 uint64_t numbers (results on Intel Xeon E5-1650v3).\n  //\n  // //test/common/runtime:runtime_impl_test - Random.DISABLED_benchmarkRandom\n  //\n  //  prefetch  |  time  | improvement\n  // (uint64_t) |  (ms)  | (% vs prev)\n  // ---------------------------------\n  //         32 | 25,931 |\n  //         64 | 15,124 | 42% faster\n  //        128 |  9,653 | 36% faster\n  //        256 |  6,930 | 28% faster  <-- used right now\n  //        512 |  5,571 | 20% faster\n  //       1024 |  4,888 | 12% faster\n  //       2048 |  4,594 |  6% faster\n  //       4096 |  4,424 |  4% faster\n  //       8192 |  4,386 |  1% faster\n\n  const size_t prefetch = 256;\n  static thread_local uint64_t buffered[prefetch];\n  static thread_local size_t buffered_idx = prefetch;\n\n  if (buffered_idx >= prefetch) {\n    int rc = RAND_bytes(reinterpret_cast<uint8_t*>(buffered), sizeof(buffered));\n    ASSERT(rc == 1);\n    buffered_idx = 0;\n  }\n\n  // Consume uint64_t from the buffer.\n  return buffered[buffered_idx++];\n}\n\nstd::string RandomGeneratorImpl::uuid() {\n  // Prefetch 2048 bytes of randomness. buffered_idx is initialized to sizeof(buffered),\n  // i.e. out-of-range value, so the buffer will be filled with randomness on the first\n  // call to this function.\n  //\n  // There is a diminishing return when increasing the prefetch size, as illustrated below\n  // in a test that generates 100,000,000 UUIDs (results on Intel Xeon E5-1650v3).\n  //\n  // //test/common/runtime:uuid_util_test - UUIDUtilsTest.DISABLED_benchmark\n  //\n  //   prefetch |  time  | improvement\n  //   (bytes)  |  (ms)  | (% vs prev)\n  // ---------------------------------\n  //        128 | 16,353 |\n  //        256 | 11,827 | 28% faster\n  //        512 |  9,676 | 18% faster\n  //       1024 |  8,594 | 11% faster\n  //       2048 |  8,097 |  6% faster  <-- used right now\n  //       4096 |  7,790 |  4% faster\n  //       8192 |  7,737 |  1% faster\n\n  static thread_local uint8_t buffered[2048];\n  static thread_local size_t buffered_idx = sizeof(buffered);\n\n  if (buffered_idx + 16 > sizeof(buffered)) {\n    int rc = RAND_bytes(buffered, sizeof(buffered));\n    ASSERT(rc == 1);\n    buffered_idx = 0;\n  }\n\n  // Consume 16 bytes from the buffer.\n  ASSERT(buffered_idx + 16 <= sizeof(buffered));\n  uint8_t* rand = &buffered[buffered_idx];\n  buffered_idx += 16;\n\n  // Create UUID from Truly Random or Pseudo-Random Numbers.\n  // See: https://tools.ietf.org/html/rfc4122#section-4.4\n  rand[6] = (rand[6] & 0x0f) | 0x40; // UUID version 4 (random)\n  rand[8] = (rand[8] & 0x3f) | 0x80; // UUID variant 1 (RFC4122)\n\n  // Convert UUID to a string representation, e.g. a121e9e1-feae-4136-9e0e-6fac343d56c9.\n  static const char* const hex = \"0123456789abcdef\";\n  char uuid[UUID_LENGTH];\n\n  for (uint8_t i = 0; i < 4; i++) {\n    const uint8_t d = rand[i];\n    uuid[2 * i] = hex[d >> 4];\n    uuid[2 * i + 1] = hex[d & 0x0f];\n  }\n\n  uuid[8] = '-';\n\n  for (uint8_t i = 4; i < 6; i++) {\n    const uint8_t d = rand[i];\n    uuid[2 * i + 1] = hex[d >> 4];\n    uuid[2 * i + 2] = hex[d & 0x0f];\n  }\n\n  uuid[13] = '-';\n\n  for (uint8_t i = 6; i < 8; i++) {\n    const uint8_t d = rand[i];\n    uuid[2 * i + 2] = hex[d >> 4];\n    uuid[2 * i + 3] = hex[d & 0x0f];\n  }\n\n  uuid[18] = '-';\n\n  for (uint8_t i = 8; i < 10; i++) {\n    const uint8_t d = rand[i];\n    uuid[2 * i + 3] = hex[d >> 4];\n    uuid[2 * i + 4] = hex[d & 0x0f];\n  }\n\n  uuid[23] = '-';\n\n  for (uint8_t i = 10; i < 16; i++) {\n    const uint8_t d = rand[i];\n    uuid[2 * i + 4] = hex[d >> 4];\n    uuid[2 * i + 5] = hex[d & 0x0f];\n  }\n\n  return std::string(uuid, UUID_LENGTH);\n}\n\n} // namespace Random\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/random_generator.h",
    "content": "#pragma once\n\n#include \"envoy/common/random_generator.h\"\n\nnamespace Envoy {\nnamespace Random {\n/**\n * Implementation of RandomGenerator that uses per-thread RANLUX generators seeded with current\n * time.\n */\nclass RandomGeneratorImpl : public RandomGenerator {\npublic:\n  // Random::RandomGenerator\n  uint64_t random() override;\n  std::string uuid() override;\n\n  static const size_t UUID_LENGTH;\n};\n\n} // namespace Random\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/regex.cc",
    "content": "#include \"common/common/regex.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/type/matcher/v3/regex.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"re2/re2.h\"\n\nnamespace Envoy {\nnamespace Regex {\nnamespace {\n\nclass CompiledStdMatcher : public CompiledMatcher {\npublic:\n  CompiledStdMatcher(std::regex&& regex) : regex_(std::move(regex)) {}\n\n  // CompiledMatcher\n  bool match(absl::string_view value) const override {\n    try {\n      return std::regex_match(value.begin(), value.end(), regex_);\n    } catch (const std::regex_error& e) {\n      return false;\n    }\n  }\n\n  // CompiledMatcher\n  std::string replaceAll(absl::string_view value, absl::string_view substitution) const override {\n    try {\n      return std::regex_replace(std::string(value), regex_, std::string(substitution));\n    } catch (const std::regex_error& e) {\n      return std::string(value);\n    }\n  }\n\nprivate:\n  const std::regex regex_;\n};\n\nclass CompiledGoogleReMatcher : public CompiledMatcher {\npublic:\n  CompiledGoogleReMatcher(const envoy::type::matcher::v3::RegexMatcher& config)\n      : regex_(config.regex(), re2::RE2::Quiet) {\n    if (!regex_.ok()) {\n      throw EnvoyException(regex_.error());\n    }\n\n    const uint32_t regex_program_size = static_cast<uint32_t>(regex_.ProgramSize());\n\n    // Check if the deprecated field max_program_size is set first, and follow the old logic if so.\n    if (config.google_re2().has_max_program_size()) {\n      const uint32_t max_program_size =\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.google_re2(), max_program_size, 100);\n      if (regex_program_size > max_program_size) {\n        throw EnvoyException(fmt::format(\"regex '{}' RE2 program size of {} > max program size of \"\n                                         \"{}. Increase configured max program size if necessary.\",\n                                         config.regex(), regex_program_size, max_program_size));\n      }\n      return;\n    }\n\n    Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting();\n    if (runtime) {\n      Stats::Scope& root_scope = runtime->getRootScope();\n\n      // TODO(perf): It would be more efficient to create the stats (program size histogram, warning\n      // counter) on startup and not with each regex match.\n      Stats::StatNameManagedStorage program_size_stat_name(\"re2.program_size\",\n                                                           root_scope.symbolTable());\n      Stats::Histogram& program_size_stat = root_scope.histogramFromStatName(\n          program_size_stat_name.statName(), Stats::Histogram::Unit::Unspecified);\n      program_size_stat.recordValue(regex_program_size);\n\n      Stats::StatNameManagedStorage warn_count_stat_name(\"re2.exceeded_warn_level\",\n                                                         root_scope.symbolTable());\n      Stats::Counter& warn_count = root_scope.counterFromStatName(warn_count_stat_name.statName());\n\n      const uint32_t max_program_size_error_level =\n          runtime->snapshot().getInteger(\"re2.max_program_size.error_level\", 100);\n      if (regex_program_size > max_program_size_error_level) {\n        throw EnvoyException(fmt::format(\"regex '{}' RE2 program size of {} > max program size of \"\n                                         \"{} set for the error level threshold. Increase \"\n                                         \"configured max program size if necessary.\",\n                                         config.regex(), regex_program_size,\n                                         max_program_size_error_level));\n      }\n\n      const uint32_t max_program_size_warn_level =\n          runtime->snapshot().getInteger(\"re2.max_program_size.warn_level\", UINT32_MAX);\n      if (regex_program_size > max_program_size_warn_level) {\n        warn_count.inc();\n        ENVOY_LOG_MISC(\n            warn,\n            \"regex '{}' RE2 program size of {} > max program size of {} set for the warn \"\n            \"level threshold. Increase configured max program size if necessary.\",\n            config.regex(), regex_program_size, max_program_size_warn_level);\n      }\n    }\n  }\n\n  // CompiledMatcher\n  bool match(absl::string_view value) const override {\n    return re2::RE2::FullMatch(re2::StringPiece(value.data(), value.size()), regex_);\n  }\n\n  // CompiledMatcher\n  std::string replaceAll(absl::string_view value, absl::string_view substitution) const override {\n    std::string result = std::string(value);\n    re2::RE2::GlobalReplace(&result, regex_,\n                            re2::StringPiece(substitution.data(), substitution.size()));\n    return result;\n  }\n\nprivate:\n  const re2::RE2 regex_;\n};\n\n} // namespace\n\nCompiledMatcherPtr Utility::parseRegex(const envoy::type::matcher::v3::RegexMatcher& matcher) {\n  // Google Re is the only currently supported engine.\n  ASSERT(matcher.has_google_re2());\n  return std::make_unique<CompiledGoogleReMatcher>(matcher);\n}\n\nCompiledMatcherPtr Utility::parseStdRegexAsCompiledMatcher(const std::string& regex,\n                                                           std::regex::flag_type flags) {\n  return std::make_unique<CompiledStdMatcher>(parseStdRegex(regex, flags));\n}\n\nstd::regex Utility::parseStdRegex(const std::string& regex, std::regex::flag_type flags) {\n  // TODO(zuercher): In the future, PGV (https://github.com/envoyproxy/protoc-gen-validate)\n  // annotations may allow us to remove this in favor of direct validation of regular\n  // expressions.\n  try {\n    return std::regex(regex, flags);\n  } catch (const std::regex_error& e) {\n    throw EnvoyException(fmt::format(\"Invalid regex '{}': {}\", regex, e.what()));\n  }\n}\n\n} // namespace Regex\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/regex.h",
    "content": "#pragma once\n\n#include <memory>\n#include <regex>\n\n#include \"envoy/common/regex.h\"\n#include \"envoy/type/matcher/v3/regex.pb.h\"\n\nnamespace Envoy {\nnamespace Regex {\n\n/**\n * Utilities for constructing regular expressions.\n */\nclass Utility {\npublic:\n  /**\n   * Constructs a std::regex, converting any std::regex_error exception into an EnvoyException.\n   * @param regex std::string containing the regular expression to parse.\n   * @param flags std::regex::flag_type containing parser flags. Defaults to std::regex::optimize.\n   * @return std::regex constructed from regex and flags.\n   * @throw EnvoyException if the regex string is invalid.\n   */\n  static std::regex parseStdRegex(const std::string& regex,\n                                  std::regex::flag_type flags = std::regex::optimize);\n\n  /**\n   * Construct an std::regex compiled regex matcher.\n   *\n   * TODO(mattklein123): In general this is only currently used in deprecated code paths and can be\n   * removed once all of those code paths are removed.\n   */\n  static CompiledMatcherPtr\n  parseStdRegexAsCompiledMatcher(const std::string& regex,\n                                 std::regex::flag_type flags = std::regex::optimize);\n\n  /**\n   * Construct a compiled regex matcher from a match config.\n   */\n  static CompiledMatcherPtr parseRegex(const envoy::type::matcher::v3::RegexMatcher& matcher);\n};\n\n} // namespace Regex\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/scalar_to_byte_vector.h",
    "content": "#pragma once\n\n#include <cinttypes>\n#include <vector>\n\nnamespace Envoy {\ntemplate <typename T> void pushScalarToByteVector(T val, std::vector<uint8_t>& bytes) {\n  uint8_t* byte_ptr = reinterpret_cast<uint8_t*>(&val);\n  for (uint32_t byte_index = 0; byte_index < sizeof val; byte_index++) {\n    bytes.push_back(*byte_ptr++);\n  }\n}\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/scope_tracker.h",
    "content": "#pragma once\n\n#include \"envoy/common/scope_tracker.h\"\n#include \"envoy/event/dispatcher.h\"\n\nnamespace Envoy {\n\n// A small class for tracking the scope of the object which is currently having\n// work done in this thread.\n//\n// When created, it sets the tracked object in the dispatcher, and when destroyed it points the\n// dispatcher at the previously tracked object.\nclass ScopeTrackerScopeState {\npublic:\n  ScopeTrackerScopeState(const ScopeTrackedObject* object, Event::Dispatcher& dispatcher)\n      : dispatcher_(dispatcher) {\n    latched_object_ = dispatcher_.setTrackedObject(object);\n  }\n\n  ~ScopeTrackerScopeState() { dispatcher_.setTrackedObject(latched_object_); }\n\nprivate:\n  const ScopeTrackedObject* latched_object_;\n  Event::Dispatcher& dispatcher_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/standard/logger_impl.h",
    "content": "#pragma once\n\n#include \"common/common/base_logger.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\n#define GENERATE_LOGGER(X) StandardLogger(#X),\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/statusor.h",
    "content": "#pragma once\n\n#include \"third_party/statusor/statusor.h\"\n\n/**\n * Facility for returning either a valid value or an error in a form of Envoy::Status.\n *\n * IMPORTANT: StatusOr default constructor must not be used as it does not fit into any\n * Envoy's use cases. The error extracting functions in the common/common/status.h will\n * RELEASE_ASSERT on default initialized StatusOr.\n *\n * To return an error StatusOr object an error creating function from common/common/status.h must be\n * used.\n * TODO(yanavlasov): add clang-tidy or lint check to enforce this.\n *\n * Usage example:\n *\n *  Envoy::StatusOr<int> Foo() {\n *    ...\n *    if (codec_error) {\n *      return CodecProtocolError(\"Invalid protocol\");\n *    }\n *    return 123456;\n *  }\n *\n *  void Bar() {\n *    auto status_or = Foo();\n *    if (status_or.ok()) {\n *      int result = status_or.value();\n *      ...\n *    } else {\n *      ASSERT(IsCodecProtocolError(status_or.status()));\n *      ENVOY_LOG(debug, \"Codec error encountered: {}\", status_or.status().message());\n *    }\n *  }\n */\n\nnamespace Envoy {\n\nusing absl::StatusOr; // NOLINT(misc-unused-using-decls)\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/stl_helpers.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <functional>\n#include <iostream>\n#include <vector>\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\n/**\n * See if a reference exists within a container of std::reference_wrappers.\n */\ntemplate <class Container, class T> bool containsReference(const Container& c, const T& t) {\n  return std::find_if(c.begin(), c.end(), [&](std::reference_wrapper<T> e) -> bool {\n           return &e.get() == &t;\n         }) != c.end();\n}\n} // namespace Envoy\n\n// NOLINT(namespace-envoy)\n// Overload functions in std library.\nnamespace std {\n// Overload std::operator<< to output a vector.\ntemplate <class T> std::ostream& operator<<(std::ostream& out, const std::vector<T>& v) {\n  out << \"vector { \" << absl::StrJoin(v, \", \", absl::StreamFormatter()) << \" }\";\n  return out;\n}\n\n} // namespace std\n"
  },
  {
    "path": "source/common/common/thread.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstring>\n#include <functional>\n#include <memory>\n\n#include \"envoy/thread/thread.h\"\n\n#include \"common/common/non_copyable.h\"\n\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\n/**\n * Implementation of BasicLockable\n */\nclass MutexBasicLockable : public BasicLockable {\npublic:\n  // BasicLockable\n  void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() override { mutex_.Lock(); }\n  bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) override { return mutex_.TryLock(); }\n  void unlock() ABSL_UNLOCK_FUNCTION() override { mutex_.Unlock(); }\n\nprivate:\n  friend class CondVar;\n  absl::Mutex mutex_;\n};\n\n/**\n * Implementation of condvar, based on MutexLockable. This interface is a hybrid\n * between std::condition_variable and absl::CondVar.\n */\nclass CondVar {\npublic:\n  enum class WaitStatus {\n    Timeout,\n    NoTimeout, // Success or Spurious\n  };\n\n  /**\n   * Note that it is not necessary to be holding an associated mutex to call\n   * notifyOne or notifyAll. See the discussion in\n   *     http://en.cppreference.com/w/cpp/thread/condition_variable_any/notify_one\n   * for more details.\n   */\n  void notifyOne() noexcept { condvar_.Signal(); }\n  void notifyAll() noexcept { condvar_.SignalAll(); };\n\n  /**\n   * wait() and waitFor do not throw, and never will, as they are based on\n   * absl::CondVar, so it's safe to pass the a mutex to wait() directly, even if\n   * it's also managed by a LockGuard. See definition of CondVar in\n   * source/source/thread.h for an alternate implementation, which does not work\n   * with thread annotation.\n   */\n  void wait(MutexBasicLockable& mutex) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) {\n    condvar_.Wait(&mutex.mutex_);\n  }\n\n  /**\n   * @return WaitStatus whether the condition timed out or not.\n   */\n  template <class Rep, class Period>\n  WaitStatus waitFor(MutexBasicLockable& mutex,\n                     std::chrono::duration<Rep, Period> duration) noexcept\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) {\n    return condvar_.WaitWithTimeout(&mutex.mutex_, absl::FromChrono(duration))\n               ? WaitStatus::Timeout\n               : WaitStatus::NoTimeout;\n  }\n\nprivate:\n  // Note: alternate implementation of this class based on std::condition_variable_any\n  // https://gist.github.com/jmarantz/d22b836cee3ca203cc368553eda81ce5\n  // does not currently work well with thread-annotation.\n  absl::CondVar condvar_;\n};\n\nenum class AtomicPtrAllocMode { DoNotDelete, DeleteOnDestruct };\n\n// Manages an array of atomic pointers to T, providing a relatively\n// contention-free mechanism to lazily get a T* at an index, where the caller\n// provides a mechanism to instantiate a T* under lock, if one has not already\n// been stored at that index.\n//\n// alloc_mode controls whether allocated T* entries should be deleted on\n// destruction of the array. This should be set to AtomicPtrAllocMode::DoNotDelete\n// if the T* returned from MakeObject are managed by the caller.\ntemplate <class T, uint32_t size, AtomicPtrAllocMode alloc_mode>\nclass AtomicPtrArray : NonCopyable {\npublic:\n  AtomicPtrArray() {\n    for (std::atomic<T*>& atomic_ref : data_) {\n      atomic_ref = nullptr;\n    }\n  }\n\n  ~AtomicPtrArray() {\n    if (alloc_mode == AtomicPtrAllocMode::DeleteOnDestruct) {\n      for (std::atomic<T*>& atomic_ref : data_) {\n        T* ptr = atomic_ref.load();\n        if (ptr != nullptr) {\n          delete ptr;\n        }\n      }\n    }\n  }\n\n  // User-defined function for allocating an object. This will be called\n  // under a lock controlled by this class, so MakeObject will not race\n  // against itself. MakeObject is allowed to return nullptr, in which\n  // case the next call to get() will call MakeObject again.\n  using MakeObject = std::function<T*()>;\n\n  /*\n   * Returns an already existing T* at index, or calls make_object to\n   * instantiate and save the T* under lock.\n   *\n   * @param index the Index to look up.\n   * @param make_object function to call under lock to make a T*.\n   * @return The new or already-existing T*, possibly nullptr if make_object returns nullptr.\n   */\n  T* get(uint32_t index, const MakeObject& make_object) {\n    std::atomic<T*>& atomic_ref = data_[index];\n\n    // First, use an atomic load to see if the object has already been allocated.\n    if (atomic_ref.load() == nullptr) {\n      absl::MutexLock lock(&mutex_);\n\n      // If that fails, check again under lock as two threads might have raced\n      // to create the object.\n      if (atomic_ref.load() == nullptr) {\n        atomic_ref = make_object();\n      }\n    }\n    return atomic_ref.load();\n  }\n\nprivate:\n  std::atomic<T*> data_[size];\n  absl::Mutex mutex_;\n};\n\n// Manages a pointer to T, providing a relatively contention-free mechanism to\n// lazily create a T*, where the caller provides a mechanism to instantiate a\n// T* under lock, if one has not already been stored.\n//\n// alloc_mode controls whether allocated T* objects should be deleted on\n// destruction of the AtomicObject. This should be set to\n// AtomicPtrAllocMode::DoNotDelete if the T* returned from MakeObject are managed\n// by the caller.\ntemplate <class T, AtomicPtrAllocMode alloc_mode>\nclass AtomicPtr : private AtomicPtrArray<T, 1, alloc_mode> {\npublic:\n  using BaseClass = AtomicPtrArray<T, 1, alloc_mode>;\n  using typename BaseClass::MakeObject;\n\n  /*\n   * Returns an already existing T*, or calls make_object to instantiate and\n   * save the T* under lock.\n   *\n   * @param make_object function to call under lock to make a T*.\n   * @return The new or already-existing T*, possibly nullptr if make_object returns nullptr.\n   */\n  T* get(const MakeObject& make_object) { return BaseClass::get(0, make_object); }\n};\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/thread_annotations.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n#include \"absl/base/thread_annotations.h\"\n"
  },
  {
    "path": "source/common/common/thread_synchronizer.cc",
    "content": "#include \"common/common/thread_synchronizer.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\nvoid ThreadSynchronizer::enable() {\n  ASSERT(data_ == nullptr);\n  data_ = std::make_unique<SynchronizerData>();\n}\n\nThreadSynchronizer::SynchronizerEntry&\nThreadSynchronizer::getOrCreateEntry(absl::string_view event_name) {\n  absl::MutexLock lock(&data_->mutex_);\n  auto& existing_entry = data_->entries_[event_name];\n  if (existing_entry == nullptr) {\n    ENVOY_LOG(debug, \"thread synchronzier: creating entry: {}\", event_name);\n    existing_entry = std::make_unique<SynchronizerEntry>();\n  }\n  return *existing_entry;\n}\n\nvoid ThreadSynchronizer::waitOnWorker(absl::string_view event_name) {\n  SynchronizerEntry& entry = getOrCreateEntry(event_name);\n  absl::MutexLock lock(&entry.mutex_);\n  ENVOY_LOG(debug, \"thread synchronizer: waiting on next {}\", event_name);\n  ASSERT(!entry.wait_on_);\n  entry.wait_on_ = true;\n}\n\nvoid ThreadSynchronizer::syncPointWorker(absl::string_view event_name) {\n  SynchronizerEntry& entry = getOrCreateEntry(event_name);\n  absl::MutexLock lock(&entry.mutex_);\n\n  // See if we are ignoring waits. If so, just return.\n  if (!entry.wait_on_) {\n    ENVOY_LOG(debug, \"thread synchronizer: sync point {}: ignoring\", event_name);\n    return;\n  }\n  entry.wait_on_ = false;\n\n  // See if we are already signaled. If so, just clear signaled and return.\n  if (entry.signaled_) {\n    ENVOY_LOG(debug, \"thread synchronizer: sync point {}: already signaled\", event_name);\n    entry.signaled_ = false;\n    return;\n  }\n\n  // Now signal any barrier waiters.\n  entry.at_barrier_ = true;\n\n  // Now wait to be signaled.\n  ENVOY_LOG(debug, \"thread synchronizer: blocking on sync point {}\", event_name);\n  entry.mutex_.Await(absl::Condition(&entry.signaled_));\n  ENVOY_LOG(debug, \"thread synchronizer: done blocking for sync point {}\", event_name);\n\n  // Clear the barrier and signaled before unlocking and returning.\n  ASSERT(entry.at_barrier_);\n  entry.at_barrier_ = false;\n  ASSERT(entry.signaled_);\n  entry.signaled_ = false;\n}\n\nvoid ThreadSynchronizer::barrierOnWorker(absl::string_view event_name) {\n  SynchronizerEntry& entry = getOrCreateEntry(event_name);\n  absl::MutexLock lock(&entry.mutex_);\n  ENVOY_LOG(debug, \"thread synchronizer: barrier on {}\", event_name);\n  entry.mutex_.Await(absl::Condition(&entry.at_barrier_));\n  ENVOY_LOG(debug, \"thread synchronizer: barrier complete {}\", event_name);\n}\n\nvoid ThreadSynchronizer::signalWorker(absl::string_view event_name) {\n  SynchronizerEntry& entry = getOrCreateEntry(event_name);\n  absl::MutexLock lock(&entry.mutex_);\n  ASSERT(!entry.signaled_);\n  ENVOY_LOG(debug, \"thread synchronizer: signaling {}\", event_name);\n  entry.signaled_ = true;\n}\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/thread_synchronizer.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\n/**\n * This class allows for forcing hard to test thread permutations. It is loosely modeled after:\n * https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/thread/thread_synchronizer.h\n *\n * The idea is that there is almost no cost if the synchronizer is not enabled by test code as\n * there is a single inline pointer check.\n */\nclass ThreadSynchronizer : Logger::Loggable<Logger::Id::misc> {\npublic:\n  /**\n   * Enable the synchronizer. This should be called once per test by test code.\n   */\n  void enable();\n\n  /**\n   * This is the only API that should generally be called from production code. It introduces\n   * a \"sync point\" that test code can then use to force blocking, thread barriers, etc. Even\n   * when the synchronizer is enabled(), the syncPoint() will do nothing unless it has been\n   * registered to block via waitOn().\n   */\n  void syncPoint(absl::string_view event_name) {\n    if (data_ != nullptr) {\n      syncPointWorker(event_name);\n    }\n  }\n\n  /**\n   * The next time the sync point registered with event_name is invoked via syncPoint(), the calling\n   * code will block until signaled. Note that this is a one-shot operation and the sync point's\n   * wait status will be cleared.\n   */\n  void waitOn(absl::string_view event_name) {\n    ASSERT(data_ != nullptr, \"call enable() from test code before calling this method\");\n    waitOnWorker(event_name);\n  }\n\n  /**\n   * This call will block until the next time the sync point registered with event_name is invoked.\n   * The event_name must have been previously registered for blocking via waitOn(). The typical\n   * test pattern is to have a thread arrive at a sync point, block, and then release a test\n   * thread which continues test execution, eventually calling signal() to release the other thread.\n   */\n  void barrierOn(absl::string_view event_name) {\n    ASSERT(data_ != nullptr, \"call enable() from test code before calling this method\");\n    barrierOnWorker(event_name);\n  }\n\n  /**\n   * Signal an event such that a thread that is blocked within syncPoint() will now proceed.\n   */\n  void signal(absl::string_view event_name) {\n    ASSERT(data_ != nullptr, \"call enable() from test code before calling this method\");\n    signalWorker(event_name);\n  }\n\nprivate:\n  struct SynchronizerEntry {\n    ~SynchronizerEntry() {\n      // Make sure we don't have any pending signals which would indicate a bad test.\n      ASSERT(!signaled_);\n    }\n\n    absl::Mutex mutex_;\n    bool wait_on_ ABSL_GUARDED_BY(mutex_){};\n    bool signaled_ ABSL_GUARDED_BY(mutex_){};\n    bool at_barrier_ ABSL_GUARDED_BY(mutex_){};\n  };\n\n  struct SynchronizerData {\n    absl::Mutex mutex_;\n    absl::flat_hash_map<std::string, std::unique_ptr<SynchronizerEntry>>\n        entries_ ABSL_GUARDED_BY(mutex_);\n  };\n\n  SynchronizerEntry& getOrCreateEntry(absl::string_view event_name);\n  void syncPointWorker(absl::string_view event_name);\n  void waitOnWorker(absl::string_view event_name);\n  void barrierOnWorker(absl::string_view event_name);\n  void signalWorker(absl::string_view event_name);\n\n  std::unique_ptr<SynchronizerData> data_;\n};\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/token_bucket_impl.cc",
    "content": "#include \"common/common/token_bucket_impl.h\"\n\n#include <chrono>\n\nnamespace Envoy {\n\nTokenBucketImpl::TokenBucketImpl(uint64_t max_tokens, TimeSource& time_source, double fill_rate)\n    : max_tokens_(max_tokens), fill_rate_(std::abs(fill_rate)), tokens_(max_tokens),\n      last_fill_(time_source.monotonicTime()), time_source_(time_source) {}\n\nuint64_t TokenBucketImpl::consume(uint64_t tokens, bool allow_partial) {\n  if (tokens_ < max_tokens_) {\n    const auto time_now = time_source_.monotonicTime();\n    tokens_ = std::min((std::chrono::duration<double>(time_now - last_fill_).count() * fill_rate_) +\n                           tokens_,\n                       max_tokens_);\n    last_fill_ = time_now;\n  }\n\n  if (allow_partial) {\n    tokens = std::min(tokens, static_cast<uint64_t>(std::floor(tokens_)));\n  }\n\n  if (tokens_ < tokens) {\n    return 0;\n  }\n\n  tokens_ -= tokens;\n  return tokens;\n}\n\nstd::chrono::milliseconds TokenBucketImpl::nextTokenAvailable() {\n  // If there are tokens available, return immediately.\n  if (tokens_ >= 1) {\n    return std::chrono::milliseconds(0);\n  }\n  // TODO(ramaraochavali): implement a more precise way that works for very low rate limits.\n  return std::chrono::milliseconds(static_cast<uint64_t>(std::ceil((1 / fill_rate_) * 1000)));\n}\n\nvoid TokenBucketImpl::reset(uint64_t num_tokens) {\n  ASSERT(num_tokens <= max_tokens_);\n  tokens_ = num_tokens;\n  last_fill_ = time_source_.monotonicTime();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/token_bucket_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/time.h\"\n#include \"envoy/common/token_bucket.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\n\n/**\n * A class that implements token bucket interface (not thread-safe).\n */\nclass TokenBucketImpl : public TokenBucket {\npublic:\n  /**\n   * @param max_tokens supplies the maximum number of tokens in the bucket.\n   * @param time_source supplies the time source.\n   * @param fill_rate supplies the number of tokens that will return to the bucket on each second.\n   * The default is 1.\n   */\n  explicit TokenBucketImpl(uint64_t max_tokens, TimeSource& time_source, double fill_rate = 1);\n\n  // TokenBucket\n  uint64_t consume(uint64_t tokens, bool allow_partial) override;\n  std::chrono::milliseconds nextTokenAvailable() override;\n  void reset(uint64_t num_tokens) override;\n\nprivate:\n  const double max_tokens_;\n  const double fill_rate_;\n  double tokens_;\n  MonotonicTime last_fill_;\n  TimeSource& time_source_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/utility.cc",
    "content": "#include \"common/common/utility.h\"\n\n#include <array>\n#include <chrono>\n#include <cmath>\n#include <cstdint>\n#include <iterator>\n#include <regex>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hash.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/time/time.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\n\nnamespace {\n\nclass SpecifierConstantValues {\npublic:\n  // This captures three groups: subsecond-specifier, subsecond-specifier width and\n  // second-specifier.\n  const std::regex PATTERN{\"(%([1-9])?f)|(%s)\", std::regex::optimize};\n};\n\nusing SpecifierConstants = ConstSingleton<SpecifierConstantValues>;\nusing UnsignedMilliseconds = std::chrono::duration<uint64_t, std::milli>;\n\n} // namespace\n\nconst std::string errorDetails(int error_code) {\n#ifndef WIN32\n  // clang-format off\n  return strerror(error_code);\n  // clang-format on\n#else\n  // Windows error codes do not correspond to POSIX errno values\n  // Use FormatMessage, strip trailing newline, and return \"Unknown error\" on failure (as on POSIX).\n  // Failures will usually be due to the error message not being found.\n  char* buffer = NULL;\n  DWORD msg_size = FormatMessage(\n      FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_ALLOCATE_BUFFER,\n      NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR)&buffer, 0, NULL);\n  if (msg_size == 0) {\n    return \"Unknown error\";\n  }\n  if (msg_size > 1 && buffer[msg_size - 2] == '\\r' && buffer[msg_size - 1] == '\\n') {\n    msg_size -= 2;\n  }\n  std::string error_details(buffer, msg_size);\n  ASSERT(LocalFree(buffer) == NULL);\n  return error_details;\n#endif\n}\n\nstd::string DateFormatter::fromTime(const SystemTime& time) const {\n  struct CachedTime {\n    // The string length of a number of seconds since the Epoch. E.g. for \"1528270093\", the length\n    // is 10.\n    size_t seconds_length;\n\n    // A container object to hold a absl::FormatTime string, its timestamp (in seconds) and a list\n    // of position offsets for each specifier found in a format string.\n    struct Formatted {\n      // The resulted string after format string is passed to absl::FormatTime at a given point in\n      // time.\n      std::string str;\n\n      // A timestamp (in seconds) when this object is created.\n      std::chrono::seconds epoch_time_seconds;\n\n      // List of offsets for each specifier found in a format string. This is needed to compensate\n      // the position of each recorded specifier due to the possible size change of the previous\n      // segment (after being formatted).\n      SpecifierOffsets specifier_offsets;\n    };\n    // A map is used to keep different formatted format strings at a given second.\n    absl::node_hash_map<std::string, const Formatted> formatted;\n  };\n  static thread_local CachedTime cached_time;\n\n  const std::chrono::nanoseconds epoch_time_ns =\n      std::chrono::duration_cast<std::chrono::nanoseconds>(time.time_since_epoch());\n\n  const std::chrono::seconds epoch_time_seconds =\n      std::chrono::duration_cast<std::chrono::seconds>(epoch_time_ns);\n\n  const auto& item = cached_time.formatted.find(raw_format_string_);\n  if (item == cached_time.formatted.end() ||\n      item->second.epoch_time_seconds != epoch_time_seconds) {\n    // Remove all the expired cached items.\n    for (auto it = cached_time.formatted.cbegin(); it != cached_time.formatted.cend();) {\n      if (it->second.epoch_time_seconds != epoch_time_seconds) {\n        auto next_it = std::next(it);\n        cached_time.formatted.erase(it);\n        it = next_it;\n      } else {\n        ++it;\n      }\n    }\n\n    const time_t current_time = std::chrono::system_clock::to_time_t(time);\n\n    // Build a new formatted format string at current time.\n    CachedTime::Formatted formatted;\n    const std::string seconds_str = fmt::format_int(epoch_time_seconds.count()).str();\n    formatted.str =\n        fromTimeAndPrepareSpecifierOffsets(current_time, formatted.specifier_offsets, seconds_str);\n    cached_time.seconds_length = seconds_str.size();\n\n    // Stamp the formatted string using the current epoch time in seconds, and then cache it in.\n    formatted.epoch_time_seconds = epoch_time_seconds;\n    cached_time.formatted.emplace(std::make_pair(raw_format_string_, formatted));\n  }\n\n  const auto& formatted = cached_time.formatted.at(raw_format_string_);\n  ASSERT(specifiers_.size() == formatted.specifier_offsets.size());\n\n  // Copy the current cached formatted format string, then replace its subseconds part (when it has\n  // non-zero width) by correcting its position using prepared subseconds offsets.\n  std::string formatted_str = formatted.str;\n  std::string nanoseconds = fmt::format_int(epoch_time_ns.count()).str();\n  // Special case handling for beginning of time, we should never need to do this outside of\n  // tests or a time machine.\n  if (nanoseconds.size() < 10) {\n    nanoseconds = std::string(10 - nanoseconds.size(), '0') + nanoseconds;\n  }\n\n  for (size_t i = 0; i < specifiers_.size(); ++i) {\n    const auto& specifier = specifiers_.at(i);\n\n    // When specifier.width_ is zero, skip the replacement. This is the last segment or it has no\n    // specifier.\n    if (specifier.width_ > 0 && !specifier.second_) {\n      ASSERT(specifier.position_ + formatted.specifier_offsets.at(i) < formatted_str.size());\n      formatted_str.replace(specifier.position_ + formatted.specifier_offsets.at(i),\n                            specifier.width_,\n                            nanoseconds.substr(cached_time.seconds_length, specifier.width_));\n    }\n  }\n\n  ASSERT(formatted_str.size() == formatted.str.size());\n  return formatted_str;\n}\n\nvoid DateFormatter::parse(const std::string& format_string) {\n  std::string suffix = format_string;\n  std::smatch matched;\n  // \"step\" is the last specifier's position + the last specifier's width. It's not the current\n  // position in \"format_string\" because the length has changed. It is actually the index which\n  // points to the end of the last specifier in formatted string (generated in the future).\n  size_t step = 0;\n  while (regex_search(suffix, matched, SpecifierConstants::get().PATTERN)) {\n    // The std::smatch matched for (%([1-9])?f)|(%s): [all, subsecond-specifier, subsecond-specifier\n    // width, second-specifier].\n    const std::string& width_specifier = matched[2];\n    const std::string& second_specifier = matched[3];\n\n    // In the template string to be used in runtime substitution, the width is the number of\n    // characters to be replaced.\n    const size_t width = width_specifier.empty() ? 9 : width_specifier.at(0) - '0';\n\n    ASSERT(!suffix.empty());\n    // This records matched position, the width of current subsecond pattern, and also the string\n    // segment before the matched position. These values will be used later at data path.\n    specifiers_.emplace_back(\n        second_specifier.empty()\n            ? Specifier(step + matched.position(), width, suffix.substr(0, matched.position()))\n            : Specifier(step + matched.position(), suffix.substr(0, matched.position())));\n    step = specifiers_.back().position_ + specifiers_.back().width_;\n    suffix = matched.suffix();\n  }\n\n  // To capture the segment after the last specifier pattern of a format string by creating a zero\n  // width specifier. E.g. %3f-this-is-the-last-%s-segment-%Y-until-this.\n  if (!suffix.empty()) {\n    Specifier specifier(step, 0, suffix);\n    specifiers_.emplace_back(specifier);\n  }\n}\n\nstd::string\nDateFormatter::fromTimeAndPrepareSpecifierOffsets(time_t time, SpecifierOffsets& specifier_offsets,\n                                                  const std::string& seconds_str) const {\n  std::string formatted_time;\n\n  int32_t previous = 0;\n  specifier_offsets.reserve(specifiers_.size());\n  for (const auto& specifier : specifiers_) {\n    std::string current_format =\n        absl::FormatTime(specifier.segment_, absl::FromTimeT(time), absl::UTCTimeZone());\n    absl::StrAppend(&formatted_time, current_format,\n                    specifier.second_ ? seconds_str : std::string(specifier.width_, '?'));\n\n    // This computes and saves offset of each specifier's pattern to correct its position after the\n    // previous string segment is formatted. An offset can be a negative value.\n    //\n    // If the current specifier is a second specifier (%s), it needs to be corrected by 2.\n    const int32_t offset =\n        (current_format.length() + (specifier.second_ ? (seconds_str.size() - 2) : 0)) -\n        specifier.segment_.size();\n    specifier_offsets.emplace_back(previous + offset);\n    previous += offset;\n  }\n\n  return formatted_time;\n}\n\nstd::string DateFormatter::now(TimeSource& time_source) {\n  return fromTime(time_source.systemTime());\n}\n\nConstMemoryStreamBuffer::ConstMemoryStreamBuffer(const char* data, size_t size) {\n  // std::streambuf won't modify `data`, but the interface still requires a char* for convenience,\n  // so we need to const_cast.\n  char* ptr = const_cast<char*>(data);\n\n  this->setg(ptr, ptr, ptr + size);\n}\n\nInputConstMemoryStream::InputConstMemoryStream(const char* data, size_t size)\n    : ConstMemoryStreamBuffer{data, size}, std::istream{static_cast<std::streambuf*>(this)} {}\n\nbool DateUtil::timePointValid(SystemTime time_point) {\n  return std::chrono::duration_cast<std::chrono::milliseconds>(time_point.time_since_epoch())\n             .count() != 0;\n}\n\nbool DateUtil::timePointValid(MonotonicTime time_point) {\n  return std::chrono::duration_cast<std::chrono::milliseconds>(time_point.time_since_epoch())\n             .count() != 0;\n}\n\nuint64_t DateUtil::nowToMilliseconds(TimeSource& time_source) {\n  const SystemTime& now = time_source.systemTime();\n  return std::chrono::time_point_cast<UnsignedMilliseconds>(now).time_since_epoch().count();\n}\n\nconst char StringUtil::WhitespaceChars[] = \" \\t\\f\\v\\n\\r\";\n\nconst char* StringUtil::strtoull(const char* str, uint64_t& out, int base) {\n  if (strlen(str) == 0) {\n    return nullptr;\n  }\n\n  char* end_ptr;\n  errno = 0;\n  out = std::strtoull(str, &end_ptr, base);\n  if (end_ptr == str || (out == ULLONG_MAX && errno == ERANGE)) {\n    return nullptr;\n  } else {\n    return end_ptr;\n  }\n}\n\nbool StringUtil::atoull(const char* str, uint64_t& out, int base) {\n  const char* end_ptr = StringUtil::strtoull(str, out, base);\n  if (end_ptr == nullptr || *end_ptr != '\\0') {\n    return false;\n  } else {\n    return true;\n  }\n}\n\nabsl::string_view StringUtil::ltrim(absl::string_view source) {\n  const absl::string_view::size_type pos = source.find_first_not_of(WhitespaceChars);\n  if (pos != absl::string_view::npos) {\n    source.remove_prefix(pos);\n  } else {\n    source.remove_prefix(source.size());\n  }\n  return source;\n}\n\nabsl::string_view StringUtil::rtrim(absl::string_view source) {\n  const absl::string_view::size_type pos = source.find_last_not_of(WhitespaceChars);\n  if (pos != absl::string_view::npos) {\n    source.remove_suffix(source.size() - pos - 1);\n  } else {\n    source.remove_suffix(source.size());\n  }\n  return source;\n}\n\nabsl::string_view StringUtil::trim(absl::string_view source) { return ltrim(rtrim(source)); }\n\nabsl::string_view StringUtil::removeTrailingCharacters(absl::string_view source, char ch) {\n  const absl::string_view::size_type pos = source.find_last_not_of(ch);\n  if (pos != absl::string_view::npos) {\n    source.remove_suffix(source.size() - pos - 1);\n  } else {\n    source.remove_suffix(source.size());\n  }\n  return source;\n}\n\nbool StringUtil::findToken(absl::string_view source, absl::string_view delimiters,\n                           absl::string_view key_token, bool trim_whitespace) {\n  const auto tokens = splitToken(source, delimiters, trim_whitespace);\n  if (trim_whitespace) {\n    for (const auto& token : tokens) {\n      if (key_token == trim(token)) {\n        return true;\n      }\n    }\n    return false;\n  }\n\n  return std::find(tokens.begin(), tokens.end(), key_token) != tokens.end();\n}\n\nbool StringUtil::caseFindToken(absl::string_view source, absl::string_view delimiters,\n                               absl::string_view key_token, bool trim_whitespace) {\n  const auto tokens = splitToken(source, delimiters, trim_whitespace);\n  std::function<bool(absl::string_view)> predicate;\n\n  if (trim_whitespace) {\n    predicate = [&](absl::string_view token) {\n      return absl::EqualsIgnoreCase(key_token, trim(token));\n    };\n  } else {\n    predicate = [&](absl::string_view token) { return absl::EqualsIgnoreCase(key_token, token); };\n  }\n\n  return std::find_if(tokens.begin(), tokens.end(), predicate) != tokens.end();\n}\n\nabsl::string_view StringUtil::cropRight(absl::string_view source, absl::string_view delimiter) {\n  const absl::string_view::size_type pos = source.find(delimiter);\n  if (pos != absl::string_view::npos) {\n    source.remove_suffix(source.size() - pos);\n  }\n  return source;\n}\n\nabsl::string_view StringUtil::cropLeft(absl::string_view source, absl::string_view delimiter) {\n  const absl::string_view::size_type pos = source.find(delimiter);\n  if (pos != absl::string_view::npos) {\n    source.remove_prefix(pos + delimiter.size());\n  }\n  return source;\n}\n\nstd::vector<absl::string_view> StringUtil::splitToken(absl::string_view source,\n                                                      absl::string_view delimiters,\n                                                      bool keep_empty_string,\n                                                      bool trim_whitespace) {\n  std::vector<absl::string_view> result;\n  if (keep_empty_string) {\n    result = absl::StrSplit(source, absl::ByAnyChar(delimiters));\n  } else {\n    if (trim_whitespace) {\n      result = absl::StrSplit(source, absl::ByAnyChar(delimiters), absl::SkipWhitespace());\n    } else {\n      result = absl::StrSplit(source, absl::ByAnyChar(delimiters), absl::SkipEmpty());\n    }\n  }\n\n  if (trim_whitespace) {\n    for_each(result.begin(), result.end(), [](auto& v) { v = trim(v); });\n  }\n  return result;\n}\n\nstd::string StringUtil::removeTokens(absl::string_view source, absl::string_view delimiters,\n                                     const CaseUnorderedSet& tokens_to_remove,\n                                     absl::string_view joiner) {\n  auto values = Envoy::StringUtil::splitToken(source, delimiters, false, true);\n  auto end = std::remove_if(values.begin(), values.end(),\n                            [&](absl::string_view t) { return tokens_to_remove.count(t) != 0; });\n  return absl::StrJoin(values.begin(), end, joiner);\n}\n\nuint32_t StringUtil::itoa(char* out, size_t buffer_size, uint64_t i) {\n  // The maximum size required for an unsigned 64-bit integer is 21 chars (including null).\n  if (buffer_size < 21) {\n    throw std::invalid_argument(\"itoa buffer too small\");\n  }\n\n  char* current = out;\n  do {\n    *current++ = \"0123456789\"[i % 10];\n    i /= 10;\n  } while (i > 0);\n\n  for (uint64_t i = 0, j = current - out - 1; i < j; i++, j--) {\n    char c = out[i];\n    out[i] = out[j];\n    out[j] = c;\n  }\n\n  *current = 0;\n  return static_cast<uint32_t>(current - out);\n}\n\nsize_t StringUtil::strlcpy(char* dst, const char* src, size_t size) {\n  strncpy(dst, src, size - 1);\n  dst[size - 1] = '\\0';\n  return strlen(src);\n}\n\nstd::string StringUtil::subspan(absl::string_view source, size_t start, size_t end) {\n  return std::string(source.data() + start, end - start);\n}\n\nstd::string StringUtil::escape(const std::string& source) {\n  std::string ret;\n\n  // Prevent unnecessary allocation by allocating 2x original size.\n  ret.reserve(source.length() * 2);\n  for (char c : source) {\n    switch (c) {\n    case '\\r':\n      ret += \"\\\\r\";\n      break;\n    case '\\n':\n      ret += \"\\\\n\";\n      break;\n    case '\\t':\n      ret += \"\\\\t\";\n      break;\n    case '\"':\n      ret += \"\\\\\\\"\";\n      break;\n    default:\n      ret += c;\n      break;\n    }\n  }\n\n  return ret;\n}\n\nconst std::string& getDefaultDateFormat() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"%Y-%m-%dT%H:%M:%E3SZ\");\n}\n\nstd::string AccessLogDateTimeFormatter::fromTime(const SystemTime& system_time) {\n  struct CachedTime {\n    std::chrono::seconds epoch_time_seconds;\n    std::string formatted_time;\n  };\n  static thread_local CachedTime cached_time;\n\n  const std::chrono::milliseconds epoch_time_ms =\n      std::chrono::duration_cast<std::chrono::milliseconds>(system_time.time_since_epoch());\n\n  const std::chrono::seconds epoch_time_seconds =\n      std::chrono::duration_cast<std::chrono::seconds>(epoch_time_ms);\n\n  if (cached_time.formatted_time.empty() || cached_time.epoch_time_seconds != epoch_time_seconds) {\n    cached_time.formatted_time = absl::FormatTime(\n        getDefaultDateFormat(), absl::FromChrono(system_time), absl::UTCTimeZone());\n    cached_time.epoch_time_seconds = epoch_time_seconds;\n  } else {\n    // Overwrite the digits in the \".000Z\" at the end of the string with the\n    // millisecond count from the input time.\n    ASSERT(cached_time.formatted_time.length() == 24);\n    size_t offset = cached_time.formatted_time.length() - 4;\n    uint32_t msec = epoch_time_ms.count() % 1000;\n    cached_time.formatted_time[offset++] = ('0' + (msec / 100));\n    msec %= 100;\n    cached_time.formatted_time[offset++] = ('0' + (msec / 10));\n    msec %= 10;\n    cached_time.formatted_time[offset++] = ('0' + msec);\n  }\n\n  return cached_time.formatted_time;\n}\n\nconst std::string& StringUtil::nonEmptyStringOrDefault(const std::string& s,\n                                                       const std::string& default_value) {\n  return s.empty() ? default_value : s;\n}\n\nstd::string StringUtil::toUpper(absl::string_view s) {\n  std::string upper_s;\n  upper_s.reserve(s.size());\n  std::transform(s.cbegin(), s.cend(), std::back_inserter(upper_s), absl::ascii_toupper);\n  return upper_s;\n}\n\nbool StringUtil::CaseInsensitiveCompare::operator()(absl::string_view lhs,\n                                                    absl::string_view rhs) const {\n  return absl::EqualsIgnoreCase(lhs, rhs);\n}\n\nuint64_t StringUtil::CaseInsensitiveHash::operator()(absl::string_view key) const {\n  return HashUtil::djb2CaseInsensitiveHash(key);\n}\n\nstd::string StringUtil::removeCharacters(const absl::string_view& str,\n                                         const IntervalSet<size_t>& remove_characters) {\n  std::string ret;\n  size_t pos = 0;\n  const auto intervals = remove_characters.toVector();\n  std::vector<absl::string_view> pieces;\n  pieces.reserve(intervals.size());\n  for (const auto& [left_bound, right_bound] : intervals) {\n    if (left_bound != pos) {\n      ASSERT(right_bound <= str.size());\n      pieces.push_back(str.substr(pos, left_bound - pos));\n    }\n    pos = right_bound;\n  }\n  if (pos != str.size()) {\n    pieces.push_back(str.substr(pos));\n  }\n  return absl::StrJoin(pieces, \"\");\n}\n\nbool Primes::isPrime(uint32_t x) {\n  if (x < 4) {\n    return true; // eliminates special-casing 2.\n  } else if ((x & 1) == 0) {\n    return false; // eliminates even numbers >2.\n  }\n\n  uint32_t limit = sqrt(x);\n  for (uint32_t factor = 3; factor <= limit; factor += 2) {\n    if ((x % factor) == 0) {\n      return false;\n    }\n  }\n  return true;\n}\n\nuint32_t Primes::findPrimeLargerThan(uint32_t x) {\n  x += (x % 2) + 1;\n  while (!isPrime(x)) {\n    x += 2;\n  }\n  return x;\n}\n\n// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm\nvoid WelfordStandardDeviation::update(double new_value) {\n  ++count_;\n  const double delta = new_value - mean_;\n  mean_ += delta / count_;\n  const double delta2 = new_value - mean_;\n  m2_ += delta * delta2;\n}\n\ndouble WelfordStandardDeviation::computeVariance() const {\n  if (count_ < 2) {\n    return std::nan(\"\");\n  }\n  return m2_ / (count_ - 1);\n}\n\ndouble WelfordStandardDeviation::computeStandardDeviation() const {\n  const double variance = computeVariance();\n  // It seems very difficult for variance to go negative, but from the calculation in update()\n  // above, I can't quite convince myself it's impossible, so put in a guard to be sure.\n  return (std::isnan(variance) || variance < 0) ? std::nan(\"\") : sqrt(variance);\n}\n\nInlineString::InlineString(const char* str, size_t size) : size_(size) {\n  RELEASE_ASSERT(size <= 0xffffffff, \"size must fit in 32 bits\");\n  memcpy(data_, str, size);\n}\n\nvoid ExceptionUtil::throwEnvoyException(const std::string& message) {\n  throw EnvoyException(message);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/utility.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <set>\n#include <sstream>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/interval_set.h\"\n#include \"envoy/common/time.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/non_copyable.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\n\n/**\n * Retrieve string description of error code\n * @param int error code\n * @return const std::string error detail description\n */\nconst std::string errorDetails(int error_code);\n\n/**\n * Utility class for formatting dates given an absl::FormatTime style format string.\n */\nclass DateFormatter {\npublic:\n  DateFormatter(const std::string& format_string) : raw_format_string_(format_string) {\n    parse(format_string);\n  }\n\n  /**\n   * @return std::string representing the GMT/UTC time based on the input time.\n   */\n  std::string fromTime(const SystemTime& time) const;\n\n  /**\n   * @param time_source time keeping source.\n   * @return std::string representing the GMT/UTC time of a TimeSource based on the format string.\n   */\n  std::string now(TimeSource& time_source);\n\n  /**\n   * @return std::string the format string used.\n   */\n  const std::string& formatString() const { return raw_format_string_; }\n\nprivate:\n  void parse(const std::string& format_string);\n\n  using SpecifierOffsets = std::vector<int32_t>;\n  std::string fromTimeAndPrepareSpecifierOffsets(time_t time, SpecifierOffsets& specifier_offsets,\n                                                 const std::string& seconds_str) const;\n\n  // A container to hold a specifiers (%f, %Nf, %s) found in a format string.\n  struct Specifier {\n    // To build a subsecond-specifier.\n    Specifier(const size_t position, const size_t width, const std::string& segment)\n        : position_(position), width_(width), segment_(segment), second_(false) {}\n\n    // To build a second-specifier (%s), the number of characters to be replaced is always 2.\n    Specifier(const size_t position, const std::string& segment)\n        : position_(position), width_(2), segment_(segment), second_(true) {}\n\n    // The position/index of a specifier in a format string.\n    const size_t position_;\n\n    // The width of a specifier, e.g. given %3f, the width is 3. If %f is set as the\n    // specifier, the width value should be 9 (the number of nanosecond digits).\n    const size_t width_;\n\n    // The string before the current specifier's position and after the previous found specifier. A\n    // segment may include absl::FormatTime accepted specifiers. E.g. given\n    // \"%3f-this-i%s-a-segment-%4f\", the current specifier is \"%4f\" and the segment is\n    // \"-this-i%s-a-segment-\".\n    const std::string segment_;\n\n    // As an indication that this specifier is a %s (expect to be replaced by seconds since the\n    // epoch).\n    const bool second_;\n  };\n\n  // This holds all specifiers found in a given format string.\n  std::vector<Specifier> specifiers_;\n\n  // This is the format string as supplied in configuration, e.g. \"foo %3f bar\".\n  const std::string raw_format_string_;\n};\n\n/**\n * Utility class for access log date/time format with milliseconds support.\n */\nclass AccessLogDateTimeFormatter {\npublic:\n  static std::string fromTime(const SystemTime& time);\n};\n\n/**\n * Real-world time implementation of TimeSource.\n */\nclass RealTimeSource : public TimeSource {\npublic:\n  // TimeSource\n  SystemTime systemTime() override { return std::chrono::system_clock::now(); }\n  MonotonicTime monotonicTime() override { return std::chrono::steady_clock::now(); }\n};\n\n/**\n * Class used for creating non-copying std::istream's. See InputConstMemoryStream below.\n */\nclass ConstMemoryStreamBuffer : public std::streambuf {\npublic:\n  ConstMemoryStreamBuffer(const char* data, size_t size);\n};\n\n/**\n * std::istream class similar to std::istringstream, except that it provides a view into a region of\n * constant memory. It can be more efficient than std::istringstream because it doesn't copy the\n * provided string.\n *\n * See https://stackoverflow.com/a/13059195/4447365.\n */\nclass InputConstMemoryStream : public virtual ConstMemoryStreamBuffer, public std::istream {\npublic:\n  InputConstMemoryStream(const char* data, size_t size);\n};\n\n/**\n * Utility class for date/time helpers.\n */\nclass DateUtil {\npublic:\n  /**\n   * @return whether a time_point contains a valid, not default constructed time.\n   */\n  static bool timePointValid(SystemTime time_point);\n\n  /**\n   * @return whether a time_point contains a valid, not default constructed time.\n   */\n  static bool timePointValid(MonotonicTime time_point);\n\n  /**\n   * @param time_source time keeping source.\n   * @return uint64_t the number of milliseconds since the epoch.\n   */\n  static uint64_t nowToMilliseconds(TimeSource& time_source);\n};\n\n/**\n * Utility routines for working with strings.\n */\nclass StringUtil {\npublic:\n  /**\n   * Callable struct that returns the result of string comparison ignoring case.\n   * @param lhs supplies the first string view.\n   * @param rhs supplies the second string view.\n   * @return true if strings are semantically the same and false otherwise.\n   */\n  struct CaseInsensitiveCompare {\n    // Enable heterogeneous lookup (https://abseil.io/tips/144)\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n    bool operator()(absl::string_view lhs, absl::string_view rhs) const;\n  };\n\n  /**\n   * Callable struct that returns the hash representation of a case-insensitive string_view input.\n   * @param key supplies the string view.\n   * @return uint64_t hash representation of the supplied string view.\n   */\n  struct CaseInsensitiveHash {\n    // Enable heterogeneous lookup (https://abseil.io/tips/144)\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n    uint64_t operator()(absl::string_view key) const;\n  };\n\n  /**\n   * Definition of unordered set of case-insensitive std::string.\n   */\n  using CaseUnorderedSet =\n      absl::flat_hash_set<std::string, CaseInsensitiveHash, CaseInsensitiveCompare>;\n\n  static const char WhitespaceChars[];\n\n  /**\n   * Convert a string to an unsigned long, checking for error.\n   * @return pointer to the remainder of 'str' if successful, nullptr otherwise.\n   */\n  static const char* strtoull(const char* str, uint64_t& out, int base = 10);\n\n  /**\n   * Convert a string to an unsigned long, checking for error.\n   *\n   * Consider absl::SimpleAtoi instead if using base 10.\n   *\n   * @param return true if successful, false otherwise.\n   */\n  static bool atoull(const char* str, uint64_t& out, int base = 10);\n\n  /**\n   * Convert an unsigned integer to a base 10 string as fast as possible.\n   * @param out supplies the string to fill.\n   * @param out_len supplies the length of the output buffer. Must be >= MIN_ITOA_OUT_LEN.\n   * @param i supplies the number to convert.\n   * @return the size of the string, not including the null termination.\n   */\n  static constexpr size_t MIN_ITOA_OUT_LEN = 21;\n  static uint32_t itoa(char* out, size_t out_len, uint64_t i);\n\n  /**\n   * Trim leading whitespace from a string view.\n   * @param source supplies the string view to be trimmed.\n   * @return trimmed string view.\n   */\n  static absl::string_view ltrim(absl::string_view source);\n\n  /**\n   * Trim trailing whitespaces from a string view.\n   * @param source supplies the string view to be trimmed.\n   * @return trimmed string view.\n   */\n  static absl::string_view rtrim(absl::string_view source);\n\n  /**\n   * Trim leading and trailing whitespaces from a string view.\n   * @param source supplies the string view to be trimmed.\n   * @return trimmed string view.\n   */\n  static absl::string_view trim(absl::string_view source);\n\n  /**\n   * Removes any specific trailing characters from the end of a string_view.\n   *\n   * @param source the string_view.\n   * @param ch the character to strip from the end of the string_view.\n   * @return a view of the string with the end characters removed.\n   */\n  static absl::string_view removeTrailingCharacters(absl::string_view source, char ch);\n\n  /**\n   * Look up for an exactly token in a delimiter-separated string view.\n   * @param source supplies the delimiter-separated string view.\n   * @param multi-delimiter supplies chars used to split the delimiter-separated string view.\n   * @param token supplies the lookup string view.\n   * @param trim_whitespace remove leading and trailing whitespaces from each of the split\n   * string views; default = true.\n   * @return true if found and false otherwise.\n   *\n   * E.g.,\n   *\n   * findToken(\"A=5; b\", \"=;\", \"5\")   . true\n   * findToken(\"A=5; b\", \"=;\", \"A=5\") . false\n   * findToken(\"A=5; b\", \"=;\", \"A\")   . true\n   * findToken(\"A=5; b\", \"=;\", \"b\")   . true\n   * findToken(\"A=5\", \".\", \"A=5\")     . true\n   */\n  static bool findToken(absl::string_view source, absl::string_view delimiters,\n                        absl::string_view token, bool trim_whitespace = true);\n\n  /**\n   * Look up for a token in a delimiter-separated string view ignoring case\n   * sensitivity.\n   * @param source supplies the delimiter-separated string view.\n   * @param multi-delimiter supplies chars used to split the delimiter-separated string view.\n   * @param token supplies the lookup string view.\n   * @param trim_whitespace remove leading and trailing whitespaces from each of the split\n   * string views; default = true.\n   * @return true if found a string that is semantically the same and false otherwise.\n   *\n   * E.g.,\n   *\n   * findToken(\"hello; world\", \";\", \"HELLO\")   . true\n   */\n  static bool caseFindToken(absl::string_view source, absl::string_view delimiters,\n                            absl::string_view key_token, bool trim_whitespace = true);\n\n  /**\n   * Crop characters from a string view starting at the first character of the matched\n   * delimiter string view until the end of the source string view.\n   * @param source supplies the string view to be processed.\n   * @param delimiter supplies the string view that delimits the starting point for deletion.\n   * @return sub-string of the string view if any.\n   *\n   * E.g.,\n   *\n   * cropRight(\"foo ; ; ; ; ; ; \", \";\") == \"foo \"\n   */\n  static absl::string_view cropRight(absl::string_view source, absl::string_view delimiters);\n\n  /**\n   * Crop characters from a string view starting at the first character of the matched\n   * delimiter string view until the beginning of the source string view.\n   * @param source supplies the string view to be processed.\n   * @param delimiter supplies the string view that delimits the starting point for deletion.\n   * @return sub-string of the string view if any.\n   *\n   * E.g.,\n   *\n   * cropLeft(\"foo ; ; ; ; ; \", \";\") == \" ; ; ; ; \"\n   */\n  static absl::string_view cropLeft(absl::string_view source, absl::string_view delimiters);\n\n  /**\n   * Split a delimiter-separated string view.\n   * @param source supplies the delimiter-separated string view.\n   * @param multi-delimiter supplies chars used to split the delimiter-separated string view.\n   * @param keep_empty_string result contains empty strings if the string starts or ends with\n   * 'split', or if instances of 'split' are adjacent; default = false.\n   * @param trim_whitespace remove leading and trailing whitespaces from each of the split\n   * string views; default = false.\n   * @return vector containing views of the split strings\n   */\n  static std::vector<absl::string_view> splitToken(absl::string_view source,\n                                                   absl::string_view delimiters,\n                                                   bool keep_empty_string = false,\n                                                   bool trim_whitespace = false);\n\n  /**\n   * Remove tokens from a delimiter-separated string view. The tokens are trimmed before\n   * they are compared ignoring case with the elements of 'tokens_to_remove'. The output is\n   * built from the trimmed tokens preserving case.\n   * @param source supplies the delimiter-separated string view.\n   * @param multi-delimiters supplies chars used to split the delimiter-separated string view.\n   * @param tokens_to_remove supplies a set of tokens which should not appear in the result.\n   * @param joiner contains a string used between tokens in the result.\n   * @return string of the remaining joined tokens.\n   */\n  static std::string removeTokens(absl::string_view source, absl::string_view delimiters,\n                                  const CaseUnorderedSet& tokens_to_remove,\n                                  absl::string_view joiner);\n\n  /**\n   * Size-bounded string copying and concatenation\n   */\n  static size_t strlcpy(char* dst, const char* src, size_t size);\n\n  /**\n   * Version of substr() that operates on a start and end index instead of a start index and a\n   * length.\n   * @return string substring starting at start, and ending right before end.\n   */\n  static std::string subspan(absl::string_view source, size_t start, size_t end);\n\n  /**\n   * Escape strings for logging purposes. Returns a copy of the string with\n   * \\n, \\r, \\t, and \" (double quote) escaped.\n   * @param source supplies the string to escape.\n   * @return escaped string.\n   */\n  static std::string escape(const std::string& source);\n\n  /**\n   * Provide a default value for a string if empty.\n   * @param s string.\n   * @param default_value replacement for s if empty.\n   * @return s is !s.empty() otherwise default_value.\n   */\n  static const std::string& nonEmptyStringOrDefault(const std::string& s,\n                                                    const std::string& default_value);\n\n  /**\n   * Convert a string to upper case.\n   * @param s string.\n   * @return std::string s converted to upper case.\n   */\n  static std::string toUpper(absl::string_view s);\n\n  /**\n   * Removes all the character indices from str contained in the interval-set.\n   * @param str the string containing the characters to be removed.\n   * @param remove_characters the set of character-intervals.\n   * @return std::string the string with the desired characters removed.\n   */\n  static std::string removeCharacters(const absl::string_view& str,\n                                      const IntervalSet<size_t>& remove_characters);\n};\n\n/**\n * Utilities for finding primes.\n */\nclass Primes {\npublic:\n  /**\n   * Determines whether x is prime.\n   */\n  static bool isPrime(uint32_t x);\n\n  /**\n   * Finds the next prime number larger than x.\n   */\n  static uint32_t findPrimeLargerThan(uint32_t x);\n};\n\n/**\n * Utilities for working with weighted clusters.\n */\nclass WeightedClusterUtil {\npublic:\n  /*\n   * Returns a WeightedClusterEntry from the given weighted clusters based on\n   * the total cluster weight and a random value.\n   * @param weighted_clusters a vector of WeightedClusterEntry instances.\n   * @param total_cluster_weight the total weight of all clusters.\n   * @param random_value the random value.\n   * @param ignore_overflow whether to ignore cluster weight overflows.\n   * @return a WeightedClusterEntry.\n   */\n  template <typename WeightedClusterEntry>\n  static const WeightedClusterEntry&\n  pickCluster(const std::vector<WeightedClusterEntry>& weighted_clusters,\n              const uint64_t total_cluster_weight, const uint64_t random_value,\n              const bool ignore_overflow) {\n    uint64_t selected_value = random_value % total_cluster_weight;\n    uint64_t begin = 0;\n    uint64_t end = 0;\n\n    // Find the right cluster to route to based on the interval in which\n    // the selected value falls. The intervals are determined as\n    // [0, cluster1_weight), [cluster1_weight, cluster1_weight+cluster2_weight),..\n    for (const WeightedClusterEntry& cluster : weighted_clusters) {\n      end = begin + cluster->clusterWeight();\n      if (!ignore_overflow) {\n        // end > total_cluster_weight: This case can only occur with Runtimes,\n        // when the user specifies invalid weights such that\n        // sum(weights) > total_cluster_weight.\n        ASSERT(end <= total_cluster_weight);\n      }\n\n      if (selected_value >= begin && selected_value < end) {\n        return cluster;\n      }\n      begin = end;\n    }\n\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n};\n\n/**\n * Maintains sets of numeric intervals. As new intervals are added, existing ones in the\n * set are combined so that no overlapping intervals remain in the representation.\n *\n * Value can be any type that is comparable with <, ==, and >.\n */\ntemplate <typename Value> class IntervalSetImpl : public IntervalSet<Value> {\npublic:\n  // Interval is a pair of Values.\n  using Interval = typename IntervalSet<Value>::Interval;\n\n  void insert(Value left, Value right) override {\n    if (left == right) {\n      return;\n    }\n    ASSERT(left < right);\n\n    // There 3 cases where we'll decide the [left, right) is disjoint with the\n    // current contents, and just need to insert. But we'll structure the code\n    // to search for where existing interval(s) needs to be merged, and fall back\n    // to the disjoint insertion case.\n    if (!intervals_.empty()) {\n      const auto left_pos = intervals_.lower_bound(Interval(left, left));\n      if (left_pos != intervals_.end() && (right >= left_pos->first)) {\n        // upper_bound is exclusive, and we want to be inclusive.\n        auto right_pos = intervals_.upper_bound(Interval(right, right));\n        if (right_pos != intervals_.begin()) {\n          --right_pos;\n          if (right_pos->second >= left) {\n            // Both bounds overlap, with one or more existing intervals.\n            left = std::min(left_pos->first, left);\n            right = std::max(right_pos->second, right);\n            ++right_pos; // erase is non-inclusive on upper bound.\n            intervals_.erase(left_pos, right_pos);\n          }\n        }\n      }\n    }\n    intervals_.insert(Interval(left, right));\n  }\n\n  std::vector<Interval> toVector() const override {\n    return std::vector<Interval>(intervals_.begin(), intervals_.end());\n  }\n\n  void clear() override { intervals_.clear(); }\n\nprivate:\n  struct Compare {\n    bool operator()(const Interval& a, const Interval& b) const { return a.second < b.first; }\n  };\n  std::set<Interval, Compare> intervals_; // Intervals do not overlap or abut.\n};\n\n/**\n * Hashing functor for use with enum class types.\n * This is needed for GCC 5.X; newer versions of GCC, as well as clang7, provide native hashing\n * specializations.\n */\nstruct EnumClassHash {\n  template <typename T> std::size_t operator()(T t) const {\n    return std::hash<std::size_t>()(static_cast<std::size_t>(t));\n  }\n};\n\n/**\n * Computes running standard-deviation using Welford's algorithm:\n * https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm\n */\nclass WelfordStandardDeviation {\npublic:\n  /**\n   * Accumulates a new value into the standard deviation.\n   * @param new_value the new value\n   */\n  void update(double new_value);\n\n  /**\n   * @return double the computed mean value.\n   */\n  double mean() const { return mean_; }\n\n  /**\n   * @return uint64_t the number of times update() was called\n   */\n  uint64_t count() const { return count_; }\n\n  /**\n   * @return double the standard deviation.\n   */\n  double computeStandardDeviation() const;\n\nprivate:\n  double computeVariance() const;\n\n  uint64_t count_{0};\n  double mean_{0};\n  double m2_{0};\n};\n\ntemplate <class Value> struct TrieEntry {\n  Value value_{};\n  std::array<std::unique_ptr<TrieEntry>, 256> entries_;\n};\n\n/**\n * A trie used for faster lookup with lookup time at most equal to the size of the key.\n */\ntemplate <class Value> struct TrieLookupTable {\n\n  /**\n   * Adds an entry to the Trie at the given Key.\n   * @param key the key used to add the entry.\n   * @param value the value to be associated with the key.\n   * @param overwrite_existing will overwrite the value when the value for a given key already\n   * exists.\n   * @return false when a value already exists for the given key.\n   */\n  bool add(absl::string_view key, Value value, bool overwrite_existing = true) {\n    TrieEntry<Value>* current = &root_;\n    for (uint8_t c : key) {\n      if (!current->entries_[c]) {\n        current->entries_[c] = std::make_unique<TrieEntry<Value>>();\n      }\n      current = current->entries_[c].get();\n    }\n    if (current->value_ && !overwrite_existing) {\n      return false;\n    }\n    current->value_ = value;\n    return true;\n  }\n\n  /**\n   * Finds the entry associated with the key.\n   * @param key the key used to find.\n   * @return the value associated with the key.\n   */\n  Value find(absl::string_view key) const {\n    const TrieEntry<Value>* current = &root_;\n    for (uint8_t c : key) {\n      current = current->entries_[c].get();\n      if (current == nullptr) {\n        return nullptr;\n      }\n    }\n    return current->value_;\n  }\n\n  /**\n   * Finds the entry associated with the longest prefix. Complexity is O(min(longest key prefix, key\n   * length))\n   * @param key the key used to find.\n   * @return the value matching the longest prefix based on the key.\n   */\n  Value findLongestPrefix(const char* key) const {\n    const TrieEntry<Value>* current = &root_;\n    const TrieEntry<Value>* result = nullptr;\n    while (uint8_t c = *key) {\n      if (current->value_) {\n        result = current;\n      }\n\n      // https://github.com/facebook/mcrouter/blob/master/mcrouter/lib/fbi/cpp/Trie-inl.h#L126-L143\n      current = current->entries_[c].get();\n      if (current == nullptr) {\n        return result ? result->value_ : nullptr;\n      }\n\n      key++;\n    }\n    return current ? current->value_ : result->value_;\n  }\n\n  TrieEntry<Value> root_;\n};\n\n/**\n * A global utility class to take care of all the exception throwing behaviors in header files.\n * Its functions simply forward the throwing into .cc file.\n */\nclass ExceptionUtil {\npublic:\n  [[noreturn]] static void throwEnvoyException(const std::string& message);\n};\n\n// Mix-in class for allocating classes with variable-sized inlined storage.\n//\n// Use this class by inheriting from it, ensuring that:\n//  - The variable sized array is declared as VarType[] as the last\n//    member variable of the class.\n//  - YourType accurately describes the type that will be stored there,\n//    to enable the compiler to perform correct alignment. No casting\n//    should be needed.\n//  - The class constructor is private, because you need to allocate the\n//    class the placed new operator exposed in the protected section below.\n//    Constructing the class directly will not provide space for the\n//    variable-size data.\n//  - You expose a public factory method that return a placement-new, e.g.\n//      static YourClass* alloc(size_t num_elements, constructor_args...) {\n//        new (num_elements * sizeof(VarType)) YourClass(constructor_args...);\n//      }\n//\n// See InlineString below for an example usage.\n//\n//\n// Perf note: The alignment will be correct and safe without further\n// consideration as long as there are no casts. But for micro-optimization,\n// consider this case:\n//   struct MyStruct : public InlineStorage { uint64_t a_; uint16_t b_; uint8_t data_[]; };\n// When compiled with a typical compiler on a 64-bit machine:\n//   sizeof(MyStruct) == 16, because the compiler will round up from 10 for uint64_t alignment.\n// So:\n//   calling new (6) MyStruct() causes an allocation of 16+6=22, rounded up to 24 bytes.\n// But data_ doesn't need 8-byte alignment, so it will wind up adjacent to the uint16_t.\n//   ((char*) my_struct.data) - ((char*) &my_struct) == 10\n// If we had instead declared data_[6], then the whole allocation would have fit in 16 bytes.\n// Instead:\n//   - the starting address of data will not be 8-byte aligned. This is not required\n//     by the C++ standard for a uint8_t, but may be suboptimal on some processors.\n//   - the 6 bytes of data will be at byte offsets 10 to 15, and bytes 16 to 23 will be\n//     unused. This may be surprising to some users, and suboptimal in resource usage.\n// One possible tweak is to declare data_ as a uint64_t[], or to use an `alignas`\n// declaration. As always, micro-optimizations should be informed by\n// microbenchmarks, showing the benefit.\nclass InlineStorage : public NonCopyable {\npublic:\n  // Custom delete operator to keep C++14 from using the global operator delete(void*, size_t),\n  // which would result in the compiler error:\n  // \"exception cleanup for this placement new selects non-placement operator delete\"\n  static void operator delete(void* address) { ::operator delete(address); }\n\nprotected:\n  /**\n   * @param object_size the size of the base object; supplied automatically by the compiler.\n   * @param data_size the amount of variable-size storage to be added, in bytes.\n   * @return a variable-size object based on data_size_bytes.\n   */\n  static void* operator new(size_t object_size, size_t data_size_bytes) {\n    return ::operator new(object_size + data_size_bytes);\n  }\n};\n\nclass InlineString;\nusing InlineStringPtr = std::unique_ptr<InlineString>;\n\n// Represents immutable string data, keeping the storage inline with the\n// object. These cannot be copied or held by value; they must be created\n// as unique pointers.\n//\n// Note: this is not yet proven better (smaller or faster) than std::string for\n// all applications, but memory-size improvements have been measured for one\n// application (Stats::SymbolTableImpl). This is presented here to serve as an\n// example of how to use InlineStorage.\nclass InlineString : public InlineStorage {\npublic:\n  /**\n   * @param str the string_view for which to create an InlineString\n   * @return a unique_ptr to the InlineString containing the bytes of str.\n   */\n  static InlineStringPtr create(absl::string_view str) {\n    return InlineStringPtr(new (str.size()) InlineString(str.data(), str.size()));\n  }\n\n  /**\n   * @return a std::string copy of the InlineString.\n   */\n  std::string toString() const { return std::string(data_, size_); }\n\n  /**\n   * @return a string_view into the InlineString.\n   */\n  absl::string_view toStringView() const { return {data_, size_}; }\n\nprivate:\n  // Constructor is declared private so that no one constructs one without the\n  // proper size allocation. to accommodate the variable-size buffer.\n  InlineString(const char* str, size_t size);\n\n  uint32_t size_;\n  char data_[];\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/win32/thread_impl.cc",
    "content": "#include <process.h>\n\n#include \"common/common/assert.h\"\n#include \"common/common/thread_impl.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\nThreadImplWin32::ThreadImplWin32(std::function<void()> thread_routine, OptionsOptConstRef options)\n    : thread_routine_(thread_routine) {\n  if (options) {\n    name_ = options->name_;\n    // TODO(jmarantz): set the thread name for task manager, etc, or pull the\n    // auto-generated name from the OS if options is not present.\n  }\n\n  RELEASE_ASSERT(Logger::Registry::initialized(), \"\");\n  thread_handle_ = reinterpret_cast<HANDLE>(::_beginthreadex(\n      nullptr, 0,\n      [](void* arg) -> unsigned int {\n        static_cast<ThreadImplWin32*>(arg)->thread_routine_();\n        return 0;\n      },\n      this, 0, nullptr));\n  RELEASE_ASSERT(thread_handle_ != 0, \"\");\n}\n\nThreadImplWin32::~ThreadImplWin32() { ::CloseHandle(thread_handle_); }\n\nvoid ThreadImplWin32::join() {\n  const DWORD rc = ::WaitForSingleObject(thread_handle_, INFINITE);\n  RELEASE_ASSERT(rc == WAIT_OBJECT_0, \"\");\n}\n\nThreadPtr ThreadFactoryImplWin32::createThread(std::function<void()> thread_routine,\n                                               OptionsOptConstRef options) {\n  return std::make_unique<ThreadImplWin32>(thread_routine, options);\n}\n\nThreadId ThreadFactoryImplWin32::currentThreadId() {\n  // TODO(mhoran): test this in windows please.\n  return ThreadId(static_cast<int64_t>(::GetCurrentThreadId()));\n}\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/common/win32/thread_impl.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\n/**\n * Wrapper for a win32 thread. We don't use std::thread because it eats exceptions and leads to\n * unusable stack traces.\n */\nclass ThreadImplWin32 : public Thread {\npublic:\n  ThreadImplWin32(std::function<void()> thread_routine, OptionsOptConstRef options);\n  ~ThreadImplWin32();\n\n  // Thread::Thread\n  void join() override;\n  std::string name() const override { return name_; }\n\n  // Needed for WatcherImpl for the QueueUserAPC callback context\n  HANDLE handle() const { return thread_handle_; }\n\nprivate:\n  std::function<void()> thread_routine_;\n  HANDLE thread_handle_;\n  std::string name_;\n};\n\n/**\n * Implementation of ThreadFactory\n */\nclass ThreadFactoryImplWin32 : public ThreadFactory {\npublic:\n  // Thread::ThreadFactory\n  ThreadPtr createThread(std::function<void()> thread_routine, OptionsOptConstRef options) override;\n  ThreadId currentThreadId() override;\n};\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"api_type_oracle_lib\",\n    srcs = [\"api_type_oracle.cc\"],\n    hdrs = [\"api_type_oracle.h\"],\n    deps = [\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:type_util_lib\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"api_version_lib\",\n    hdrs = [\"api_version.h\"],\n)\n\nenvoy_cc_library(\n    name = \"config_provider_lib\",\n    srcs = [\"config_provider_impl.cc\"],\n    hdrs = [\"config_provider_impl.h\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/config:config_provider_interface\",\n        \"//include/envoy/config:config_provider_manager_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:config_tracker_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/init:watcher_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"datasource_lib\",\n    srcs = [\"datasource.cc\"],\n    hdrs = [\"datasource.h\"],\n    deps = [\n        \":remote_data_fetcher_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:backoff_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"decoded_resource_lib\",\n    hdrs = [\"decoded_resource_impl.h\"],\n    deps = [\n        \"//include/envoy/config:subscription_interface\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"delta_subscription_state_lib\",\n    srcs = [\"delta_subscription_state.cc\"],\n    hdrs = [\"delta_subscription_state.h\"],\n    deps = [\n        \":api_version_lib\",\n        \":pausable_ack_queue_lib\",\n        \":utility_lib\",\n        \":watch_map_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:backoff_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:token_bucket_impl_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filesystem_subscription_lib\",\n    srcs = [\"filesystem_subscription_impl.cc\"],\n    hdrs = [\"filesystem_subscription_impl.h\"],\n    deps = [\n        \":decoded_resource_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"grpc_stream_lib\",\n    hdrs = [\"grpc_stream.h\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/config:grpc_mux_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:backoff_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:token_bucket_impl_lib\",\n        \"//source/common/grpc:async_client_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"grpc_mux_lib\",\n    srcs = [\"grpc_mux_impl.cc\"],\n    hdrs = [\"grpc_mux_impl.h\"],\n    deps = [\n        \":api_version_lib\",\n        \":decoded_resource_lib\",\n        \":grpc_stream_lib\",\n        \":utility_lib\",\n        \"//include/envoy/config:grpc_mux_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/memory:utils_lib\",\n        \"//source/common/protobuf\",\n        \"@com_google_absl//absl/container:btree\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"grpc_subscription_lib\",\n    srcs = [\"grpc_subscription_impl.cc\"],\n    hdrs = [\"grpc_subscription_impl.h\"],\n    deps = [\n        \":grpc_mux_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/grpc:async_client_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"new_grpc_mux_lib\",\n    srcs = [\"new_grpc_mux_impl.cc\"],\n    hdrs = [\"new_grpc_mux_impl.h\"],\n    deps = [\n        \":delta_subscription_state_lib\",\n        \":grpc_stream_lib\",\n        \":pausable_ack_queue_lib\",\n        \":version_converter_lib\",\n        \":watch_map_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//source/common/memory:utils_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http_subscription_lib\",\n    srcs = [\"http_subscription_impl.cc\"],\n    hdrs = [\"http_subscription_impl.h\"],\n    external_deps = [\n        \"http_api_protos\",\n    ],\n    deps = [\n        \":api_version_lib\",\n        \":decoded_resource_lib\",\n        \":version_converter_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:rest_api_fetcher_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metadata_lib\",\n    srcs = [\"metadata.cc\"],\n    hdrs = [\"metadata.h\"],\n    deps = [\n        \"//include/envoy/config:typed_metadata_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/shared_pool:shared_pool_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/metadata/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"opaque_resource_decoder_lib\",\n    hdrs = [\"opaque_resource_decoder_impl.h\"],\n    deps = [\n        \"//include/envoy/config:subscription_interface\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"pausable_ack_queue_lib\",\n    srcs = [\"pausable_ack_queue.cc\"],\n    hdrs = [\"pausable_ack_queue.h\"],\n    deps = [\n        \":update_ack_lib\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protobuf_link_hacks\",\n    hdrs = [\"protobuf_link_hacks.h\"],\n    deps = [\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/extension/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/ratelimit/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/runtime/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/secret/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"remote_data_fetcher_lib\",\n    srcs = [\"remote_data_fetcher.cc\"],\n    hdrs = [\"remote_data_fetcher.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resource_name_lib\",\n    hdrs = [\"resource_name.h\"],\n    deps = [\n        \":api_type_oracle_lib\",\n        \"//source/common/common:assert_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"runtime_utility_lib\",\n    srcs = [\"runtime_utility.cc\"],\n    hdrs = [\"runtime_utility.h\"],\n    deps = [\"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"subscription_factory_lib\",\n    srcs = [\"subscription_factory_impl.cc\"],\n    hdrs = [\"subscription_factory_impl.h\"],\n    deps = [\n        \":filesystem_subscription_lib\",\n        \":grpc_subscription_lib\",\n        \":http_subscription_lib\",\n        \":new_grpc_mux_lib\",\n        \":type_to_endpoint_lib\",\n        \":udpa_resource_lib\",\n        \":utility_lib\",\n        \"//include/envoy/config:subscription_factory_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"type_to_endpoint_lib\",\n    srcs = [\"type_to_endpoint.cc\"],\n    hdrs = [\"type_to_endpoint.h\"],\n    deps = [\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/annotations:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udpa_context_params_lib\",\n    srcs = [\"udpa_context_params.cc\"],\n    hdrs = [\"udpa_context_params.h\"],\n    deps = [\n        \"//source/common/common:macros\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udpa_resource_lib\",\n    srcs = [\"udpa_resource.cc\"],\n    hdrs = [\"udpa_resource.h\"],\n    deps = [\n        \"//source/common/http:utility_lib\",\n        \"@com_github_cncf_udpa//udpa/core/v1:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"update_ack_lib\",\n    hdrs = [\"update_ack.h\"],\n    deps = [\"@com_google_googleapis//google/rpc:status_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    deps = [\n        \":api_type_oracle_lib\",\n        \":version_converter_lib\",\n        \"//include/envoy/config:grpc_mux_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:backoff_lib\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/common/stats:histogram_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stats:stats_matcher_lib\",\n        \"//source/common/stats:tag_producer_lib\",\n        \"@com_github_cncf_udpa//udpa/type/v1:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"version_converter_lib\",\n    srcs = [\"version_converter.cc\"],\n    hdrs = [\"version_converter.h\"],\n    deps = [\n        \":api_type_oracle_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:visitor_lib\",\n        \"//source/common/protobuf:well_known_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"watch_map_lib\",\n    srcs = [\"watch_map.cc\"],\n    hdrs = [\"watch_map.h\"],\n    deps = [\n        \":decoded_resource_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"subscription_base_interface\",\n    hdrs = [\"subscription_base.h\"],\n    deps = [\n        \":opaque_resource_decoder_lib\",\n        \":resource_name_lib\",\n        \"//include/envoy/config:subscription_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    srcs = [\"well_known_names.cc\"],\n    hdrs = [\"well_known_names.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/common/config/README.md",
    "content": "# xDS\n\nxDS stands for [fill in the blank] Discovery Service. It provides dynamic config discovery/updates.\n\ntldr: xDS can use the filesystem, REST, or gRPC. gRPC xDS comes in four flavors.\nHowever, Envoy code uses all of that via the same Subscription interface.\nIf you are an Envoy developer with your hands on a valid Subscription object,\nyou can mostly forget the filesystem/REST/gRPC distinction, and you can\nespecially forget about the gRPC flavors. All of that is specified in the\nbootstrap config, which is read and put into action by ClusterManagerImpl.\n\nNote that there can be multiple active gRPC subscriptions for a single resource\ntype. This concept is called \"resource watches\". If one EDS subscription\nsubscribes to X and Y, and another subscribes to Y and Z, the underlying\nsubscription logic will maintain a subscription to the union: X Y and Z. Updates\nto X will be delivered to the first object, Y to both, Z to the second. This\nlogic is implemented by WatchMap.\n\n### If you are working on Envoy's gRPC xDS client logic itself, read on.\n\nWhen using gRPC, xDS has two pairs of options: aggregated/non-aggregated, and\ndelta/state-of-the-world updates. All four combinations of these are usable.\n\n\"Aggregated\" means that EDS, CDS, etc resources are all carried by the same gRPC stream.\nFor Envoy's implementation of xDS client logic, there is effectively no difference\nbetween aggregated xDS and non-aggregated: they both use the same request/response protos. The\nnon-aggregated case is handled by running the aggregated logic, and just happening to only have 1\nxDS subscription type to \"aggregate\", i.e., NewGrpcMuxImpl only has one\nDeltaSubscriptionState entry in its map.\n\nHowever, to the config server, there is a huge difference: when using ADS (caused\nby the user providing an ads_config in the bootstrap config), the gRPC client sets\nits method string to {Delta,Stream}AggregatedResources, as opposed to {Delta,Stream}Clusters,\n{Delta,Stream}Routes, etc. So, despite using the same request/response protos,\nand having identical client code, they're actually different gRPC services.\n\nDelta vs state-of-the-world is a question of wire format: the protos in question are named\n[Delta]Discovery{Request,Response}. That is what the GrpcMux interface is useful for: its\nNewGrpcMuxImpl (TODO may be renamed) implementation works with DeltaDiscovery{Request,Response} and has\ndelta-specific logic; its GrpxMuxImpl implementation (TODO will be merged into NewGrpcMuxImpl) works with Discovery{Request,Response}\nand has SotW-specific logic. Both the delta and SotW Subscription implementations (TODO will be merged) hold a shared_ptr<GrpcMux>.\nThe shared_ptr allows for both non- and aggregated: if non-aggregated, you'll be the only holder of that shared_ptr.\n\n![xDS_code_diagram](xDS_code_diagram.png)\n\nNote that the orange flow does not necessarily have to happen in response to the blue flow; there can be spontaneous updates. ACKs are not shown in this diagram; they are also carred by the [Delta]DiscoveryRequest protos.\nWhat does GrpcXdsContext even do in this diagram? Just own things and pass through function calls? Answer: it sequences the requests and ACKs that the various type_urls send.\n"
  },
  {
    "path": "source/common/config/api_type_oracle.cc",
    "content": "#include \"common/config/api_type_oracle.h\"\n\n#include \"udpa/annotations/versioning.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nconst Protobuf::Descriptor*\nApiTypeOracle::getEarlierVersionDescriptor(const std::string& message_type) {\n  const auto previous_message_string = getEarlierVersionMessageTypeName(message_type);\n  if (previous_message_string != absl::nullopt) {\n    const Protobuf::Descriptor* earlier_desc =\n        Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(\n            previous_message_string.value());\n    return earlier_desc;\n  } else {\n    return nullptr;\n  }\n}\n\nconst absl::optional<std::string>\nApiTypeOracle::getEarlierVersionMessageTypeName(const std::string& message_type) {\n  // Determine if there is an earlier API version for message_type.\n  const Protobuf::Descriptor* desc =\n      Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(std::string{message_type});\n  if (desc == nullptr) {\n    return absl::nullopt;\n  }\n  if (desc->options().HasExtension(udpa::annotations::versioning)) {\n    return desc->options().GetExtension(udpa::annotations::versioning).previous_message_type();\n  }\n  return absl::nullopt;\n}\n\nconst absl::optional<std::string> ApiTypeOracle::getEarlierTypeUrl(const std::string& type_url) {\n  const std::string type{TypeUtil::typeUrlToDescriptorFullName(type_url)};\n  absl::optional<std::string> old_type = ApiTypeOracle::getEarlierVersionMessageTypeName(type);\n  if (old_type.has_value()) {\n    return TypeUtil::descriptorFullNameToTypeUrl(old_type.value());\n  }\n  return {};\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/api_type_oracle.h",
    "content": "#pragma once\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/type_util.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nclass ApiTypeOracle {\npublic:\n  /**\n   * Based on a given message, determine if there exists an earlier version of\n   * this message. If so, return the descriptor for the earlier\n   * message, to support upgrading via VersionConverter::upgrade().\n   *\n   * @param message_type protobuf message type\n   * @return const Protobuf::Descriptor* descriptor for earlier message version\n   *         corresponding to message, if any, otherwise nullptr.\n   */\n  static const Protobuf::Descriptor* getEarlierVersionDescriptor(const std::string& message_type);\n\n  static const absl::optional<std::string>\n  getEarlierVersionMessageTypeName(const std::string& message_type);\n\n  static const absl::optional<std::string> getEarlierTypeUrl(const std::string& type_url);\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/api_version.h",
    "content": "#pragma once\n\n// Use this to force a specific version of a given config proto, preventing API\n// boosting from modifying it. E.g. API_NO_BOOST(envoy::api::v2::Cluster).\n#define API_NO_BOOST(x) x\n\nnamespace Envoy {}\n"
  },
  {
    "path": "source/common/config/config_provider_impl.cc",
    "content": "#include \"common/config/config_provider_impl.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nImmutableConfigProviderBase::ImmutableConfigProviderBase(\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ConfigProviderManagerImplBase& config_provider_manager,\n    ConfigProviderInstanceType instance_type, ApiType api_type)\n    : last_updated_(factory_context.timeSource().systemTime()),\n      config_provider_manager_(config_provider_manager), instance_type_(instance_type),\n      api_type_(api_type) {\n  ASSERT(instance_type_ == ConfigProviderInstanceType::Static ||\n         instance_type_ == ConfigProviderInstanceType::Inline);\n  config_provider_manager_.bindImmutableConfigProvider(this);\n}\n\nImmutableConfigProviderBase::~ImmutableConfigProviderBase() {\n  config_provider_manager_.unbindImmutableConfigProvider(this);\n}\n\nConfigSubscriptionCommonBase::~ConfigSubscriptionCommonBase() {\n  local_init_target_.ready();\n  config_provider_manager_.unbindSubscription(manager_identifier_);\n}\n\nvoid ConfigSubscriptionCommonBase::applyConfigUpdate(const ConfigUpdateCb& update_fn) {\n  tls_->runOnAllThreads([update_fn](ThreadLocal::ThreadLocalObjectSharedPtr previous)\n                            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    auto prev_thread_local_config = std::dynamic_pointer_cast<ThreadLocalConfig>(previous);\n    prev_thread_local_config->config_ = update_fn(prev_thread_local_config->config_);\n    return previous;\n  });\n}\n\nbool ConfigSubscriptionInstance::checkAndApplyConfigUpdate(const Protobuf::Message& config_proto,\n                                                           const std::string& config_name,\n                                                           const std::string& version_info) {\n  const uint64_t new_hash = MessageUtil::hash(config_proto);\n  if (config_info_) {\n    ASSERT(config_info_.value().last_config_hash_.has_value());\n    if (config_info_.value().last_config_hash_.value() == new_hash) {\n      return false;\n    }\n  }\n\n  config_info_ = {new_hash, version_info};\n  ENVOY_LOG(debug, \"{}: loading new configuration: config_name={} hash={}\", name_, config_name,\n            new_hash);\n  ConfigProvider::ConfigConstSharedPtr new_config_impl = onConfigProtoUpdate(config_proto);\n  applyConfigUpdate([new_config_impl](ConfigProvider::ConfigConstSharedPtr)\n                        -> ConfigProvider::ConfigConstSharedPtr { return new_config_impl; });\n  return true;\n}\n\nConfigProviderManagerImplBase::ConfigProviderManagerImplBase(Server::Admin& admin,\n                                                             const std::string& config_name) {\n  config_tracker_entry_ =\n      admin.getConfigTracker().add(config_name, [this] { return dumpConfigs(); });\n  // ConfigTracker keys must be unique. We are asserting that no one has stolen the key\n  // from us, since the returned entry will be nullptr if the key already exists.\n  RELEASE_ASSERT(config_tracker_entry_, \"\");\n}\n\nconst ConfigProviderManagerImplBase::ConfigProviderSet&\nConfigProviderManagerImplBase::immutableConfigProviders(ConfigProviderInstanceType type) const {\n  static ConfigProviderSet empty_set;\n  ConfigProviderMap::const_iterator it;\n  if ((it = immutable_config_providers_map_.find(type)) == immutable_config_providers_map_.end()) {\n    return empty_set;\n  }\n\n  return *it->second;\n}\n\nvoid ConfigProviderManagerImplBase::bindImmutableConfigProvider(\n    ImmutableConfigProviderBase* provider) {\n  ConfigProviderMap::iterator it;\n  if ((it = immutable_config_providers_map_.find(provider->instanceType())) ==\n      immutable_config_providers_map_.end()) {\n    immutable_config_providers_map_.insert(std::make_pair(\n        provider->instanceType(),\n        std::make_unique<ConfigProviderSet>(std::initializer_list<ConfigProvider*>({provider}))));\n  } else {\n    it->second->insert(provider);\n  }\n}\n\nvoid ConfigProviderManagerImplBase::unbindImmutableConfigProvider(\n    ImmutableConfigProviderBase* provider) {\n  auto it = immutable_config_providers_map_.find(provider->instanceType());\n  ASSERT(it != immutable_config_providers_map_.end());\n  it->second->erase(provider);\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/config_provider_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/config_provider.h\"\n#include \"envoy/config/config_provider_manager.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/config_tracker.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/init/target_impl.h\"\n#include \"common/init/watcher_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// This file provides a set of base classes, (ImmutableConfigProviderBase,\n// MutableConfigProviderCommonBase, ConfigProviderManagerImplBase, ConfigSubscriptionCommonBase,\n// ConfigSubscriptionInstance, DeltaConfigSubscriptionInstance), conforming to the\n// ConfigProvider/ConfigProviderManager interfaces, which in tandem provide a framework for\n// implementing statically defined (i.e., immutable) and dynamic (mutable via subscriptions)\n// configuration for Envoy.\n//\n// The mutability property applies to the ConfigProvider itself and _not_ the underlying config\n// proto, which is always immutable. MutableConfigProviderCommonBase objects receive config proto\n// updates via xDS subscriptions, resulting in new ConfigProvider::Config objects being instantiated\n// with the corresponding change in behavior corresponding to updated config. ConfigProvider::Config\n// objects must be latched/associated with the appropriate objects in the connection and request\n// processing pipeline, such that configuration stays consistent for the lifetime of the connection\n// and/or stream/request (if required by the configuration being processed).\n//\n// Dynamic configuration is distributed via xDS APIs (see\n// https://github.com/envoyproxy/data-plane-api/blob/master/xds_protocol.rst). The framework exposed\n// by these classes simplifies creation of client xDS implementations following a shared ownership\n// model, where according to the config source specification, a config subscription, config protos\n// received over the subscription and the subsequent config \"implementation\" (i.e., data structures\n// and associated business logic) are shared across ConfigProvider objects and Envoy worker threads.\n//\n// This approach enables linear memory scalability based primarily on the size of the configuration\n// set.\n//\n// A blueprint to follow for implementing mutable or immutable config providers is as follows:\n//\n// For both:\n//   1) Create a class derived from ConfigProviderManagerImplBase and implement the required\n//   interface.\n//      When implementing createXdsConfigProvider(), it is expected that getSubscription<T>() will\n//      be called to fetch either an existing ConfigSubscriptionCommonBase if the config\n//      source configuration matches, or a newly instantiated subscription otherwise.\n//\n// For immutable providers:\n//   1) Create a class derived from ImmutableConfigProviderBase and implement the required\n//   interface.\n//\n// For mutable (xDS) providers:\n//   1) According to the API type, create a class derived from MutableConfigProviderCommonBase and\n//   implement the required interface.\n//   2) According to the API type, create a class derived from\n//   ConfigSubscriptionInstance or DeltaConfigSubscriptionInstance; this is the entity responsible\n//   for owning and managing the Envoy::Config::Subscription<ConfigProto> that provides the\n//   underlying config subscription, and the Config implementation shared by associated providers.\n//     a) For a ConfigProvider::ApiType::Full subscription instance (i.e., a\n//     ConfigSubscriptionInstance child):\n//     - When subscription callbacks (onConfigUpdate, onConfigUpdateFailed) are issued by the\n//     underlying subscription, the corresponding ConfigSubscriptionInstance functions\n//     must be called as well.\n//     - On a successful config update, checkAndApplyConfigUpdate() should be called to instantiate\n//     the new config implementation and propagate it to the shared config providers and all worker\n//     threads.\n//       - On a successful return from checkAndApplyConfigUpdate(), the config proto must be latched\n//       into this class and returned via the getConfigProto() override.\n//    b) For a ConfigProvider::ApiType::Delta subscription instance (i.e., a\n//    DeltaConfigSubscriptionInstance child):\n//    - When subscription callbacks (onConfigUpdate, onConfigUpdateFailed) are issued by the\n//    underlying subscription, the corresponding ConfigSubscriptionInstance functions must be called\n//    as well.\n//    - On a successful config update, applyConfigUpdate() should be called to propagate the\n//    config updates to all bound config providers and worker threads.\n\nclass ConfigProviderManagerImplBase;\n\n/**\n * Specifies the type of config associated with a ConfigProvider.\n */\nenum class ConfigProviderInstanceType {\n  // Configuration defined as a static resource in the bootstrap config.\n  Static,\n  // Configuration defined inline in a resource that may be specified statically or obtained via\n  // xDS.\n  Inline,\n  // Configuration obtained from an xDS subscription.\n  Xds\n};\n\n/**\n * ConfigProvider implementation for immutable configuration.\n *\n * TODO(AndresGuedez): support sharing of config protos and config impls, as is\n * done with the MutableConfigProviderCommonBase.\n *\n * This class can not be instantiated directly; instead, it provides the foundation for\n * immutable config provider implementations which derive from it.\n */\nclass ImmutableConfigProviderBase : public ConfigProvider {\npublic:\n  ~ImmutableConfigProviderBase() override;\n\n  // Envoy::Config::ConfigProvider\n  SystemTime lastUpdated() const override { return last_updated_; }\n  ApiType apiType() const override { return api_type_; }\n\n  ConfigProviderInstanceType instanceType() const { return instance_type_; }\n\nprotected:\n  ImmutableConfigProviderBase(Server::Configuration::ServerFactoryContext& factory_context,\n                              ConfigProviderManagerImplBase& config_provider_manager,\n                              ConfigProviderInstanceType instance_type, ApiType api_type);\n\nprivate:\n  SystemTime last_updated_;\n  ConfigProviderManagerImplBase& config_provider_manager_;\n  ConfigProviderInstanceType instance_type_;\n  ApiType api_type_;\n};\n\nclass MutableConfigProviderCommonBase;\n\n/**\n * Provides common DS API subscription functionality required by the ConfigProvider::ApiType.\n *\n * This class can not be instantiated directly; instead, it provides the foundation for\n * config subscription implementations which derive from it.\n *\n * A subscription is intended to be co-owned by config providers with the same config source, it's\n * designed to be created/destructed on admin thread only.\n *\n */\nclass ConfigSubscriptionCommonBase : protected Logger::Loggable<Logger::Id::config> {\npublic:\n  // Callback for updating a Config implementation held in each worker thread, the callback is\n  // called in applyConfigUpdate() with the current version Config, and is expected to return the\n  // new version Config.\n  using ConfigUpdateCb =\n      std::function<ConfigProvider::ConfigConstSharedPtr(ConfigProvider::ConfigConstSharedPtr)>;\n\n  struct LastConfigInfo {\n    absl::optional<uint64_t> last_config_hash_;\n    std::string last_config_version_;\n  };\n\n  virtual ~ConfigSubscriptionCommonBase();\n\n  /**\n   * Starts the subscription corresponding to a config source.\n   * A derived class must own the configuration proto specific Envoy::Config::Subscription to be\n   * started.\n   */\n  virtual void start() PURE;\n\n  const SystemTime& lastUpdated() const { return last_updated_; }\n\n  const absl::optional<LastConfigInfo>& configInfo() const { return config_info_; }\n\n  ConfigProvider::ConfigConstSharedPtr getConfig() const {\n    return tls_->getTyped<ThreadLocalConfig>().config_;\n  }\n\n  /**\n   * Must be called by derived classes when the onConfigUpdate() callback associated with the\n   * underlying subscription is issued.\n   */\n  void onConfigUpdate() {\n    setLastUpdated();\n    local_init_target_.ready();\n  }\n\n  /**\n   * Must be called by derived classes when the onConfigUpdateFailed() callback associated with the\n   * underlying subscription is issued.\n   */\n  void onConfigUpdateFailed() {\n    setLastUpdated();\n    local_init_target_.ready();\n  }\n\nprotected:\n  struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject {\n    explicit ThreadLocalConfig(ConfigProvider::ConfigConstSharedPtr initial_config)\n        : config_(std::move(initial_config)) {}\n\n    ConfigProvider::ConfigConstSharedPtr config_;\n  };\n\n  ConfigSubscriptionCommonBase(const std::string& name, const uint64_t manager_identifier,\n                               ConfigProviderManagerImplBase& config_provider_manager,\n                               Server::Configuration::ServerFactoryContext& factory_context)\n      : name_(name), tls_(factory_context.threadLocal().allocateSlot()),\n        local_init_target_(\n            fmt::format(\"ConfigSubscriptionCommonBase local init target '{}'\", name_),\n            [this]() { start(); }),\n        parent_init_target_(fmt::format(\"ConfigSubscriptionCommonBase init target '{}'\", name_),\n                            [this]() { local_init_manager_.initialize(local_init_watcher_); }),\n        local_init_watcher_(fmt::format(\"ConfigSubscriptionCommonBase local watcher '{}'\", name_),\n                            [this]() { parent_init_target_.ready(); }),\n        local_init_manager_(\n            fmt::format(\"ConfigSubscriptionCommonBase local init manager '{}'\", name_)),\n        manager_identifier_(manager_identifier), config_provider_manager_(config_provider_manager),\n        time_source_(factory_context.timeSource()),\n        last_updated_(factory_context.timeSource().systemTime()) {\n    Envoy::Config::Utility::checkLocalInfo(name, factory_context.localInfo());\n    local_init_manager_.add(local_init_target_);\n  }\n\n  /**\n   * Propagates a config update to worker threads.\n   *\n   * @param update_fn the callback to run on each thread, it takes the previous version Config and\n   * returns a updated/new version Config.\n   */\n  void applyConfigUpdate(const ConfigUpdateCb& update_fn);\n\n  void setLastUpdated() { last_updated_ = time_source_.systemTime(); }\n  Init::Manager& localInitManager() { return local_init_manager_; }\n  void setLastConfigInfo(absl::optional<LastConfigInfo>&& config_info) {\n    config_info_ = std::move(config_info);\n  }\n\n  const std::string name_;\n  absl::optional<LastConfigInfo> config_info_;\n  // This slot holds a Config implementation in each thread, which is intended to be shared between\n  // config providers from the same config source.\n  ThreadLocal::SlotPtr tls_;\n\nprivate:\n  // Local init target which signals first RPC interaction with management server.\n  Init::TargetImpl local_init_target_;\n  // Target added to factory context's initManager.\n  Init::TargetImpl parent_init_target_;\n  // Watcher that marks parent_init_target_ ready when the local init manager is ready.\n  Init::WatcherImpl local_init_watcher_;\n  // Local manager that tracks the subscription initialization, it is also used for sub-resource\n  // initialization if the sub-resource is not initialized.\n  Init::ManagerImpl local_init_manager_;\n\n  const uint64_t manager_identifier_;\n  ConfigProviderManagerImplBase& config_provider_manager_;\n  TimeSource& time_source_;\n  SystemTime last_updated_;\n\n  // ConfigSubscriptionCommonBase, MutableConfigProviderCommonBase and\n  // ConfigProviderManagerImplBase are tightly coupled with the current shared ownership model; use\n  // friend classes to explicitly denote the binding between them.\n  //\n  // TODO(AndresGuedez): Investigate whether a shared ownership model avoiding the <shared_ptr>s and\n  // instead centralizing lifetime management in the ConfigProviderManagerImplBase with explicit\n  // reference counting would be more maintainable.\n  friend class ConfigProviderManagerImplBase;\n};\n\nusing ConfigSubscriptionCommonBaseSharedPtr = std::shared_ptr<ConfigSubscriptionCommonBase>;\n\n/**\n * Provides common subscription functionality required by ConfigProvider::ApiType::Full DS APIs.\n * A single Config instance is shared across all providers and all workers associated with this\n * subscription.\n */\nclass ConfigSubscriptionInstance : public ConfigSubscriptionCommonBase {\npublic:\n  ConfigSubscriptionInstance(const std::string& name, const uint64_t manager_identifier,\n                             ConfigProviderManagerImplBase& config_provider_manager,\n                             Server::Configuration::ServerFactoryContext& factory_context)\n      : ConfigSubscriptionCommonBase(name, manager_identifier, config_provider_manager,\n                                     factory_context) {}\n\n  /**\n   * Must be called by the derived class' constructor.\n   * @param initial_config supplies an initial Envoy::Config::ConfigProvider::Config associated\n   * with the underlying subscription, shared across all providers and workers.\n   */\n  void initialize(const ConfigProvider::ConfigConstSharedPtr& initial_config) {\n    tls_->set([initial_config](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n      return std::make_shared<ThreadLocalConfig>(initial_config);\n    });\n  }\n\n  /**\n   * Determines whether a configuration proto is a new update, and if so, propagates it to all\n   * config providers associated with this subscription.\n   * @param config_proto supplies the newly received config proto.\n   * @param config_name supplies the name associated with the config.\n   * @param version_info supplies the version associated with the config.\n   * @return bool false when the config proto has no delta from the previous config, true\n   * otherwise.\n   */\n  bool checkAndApplyConfigUpdate(const Protobuf::Message& config_proto,\n                                 const std::string& config_name, const std::string& version_info);\n\nprotected:\n  /**\n   * Called when a new config proto is received via an xDS subscription.\n   * On successful validation of the config, must return a shared_ptr to a ConfigProvider::Config\n   * implementation that will be propagated to all mutable config providers sharing the\n   * subscription.\n   * Note that this function is called _once_ across all shared config providers per xDS\n   * subscription config update.\n   * @param config_proto supplies the configuration proto.\n   * @return ConfigConstSharedPtr the ConfigProvider::Config to share with other providers.\n   */\n  virtual ConfigProvider::ConfigConstSharedPtr\n  onConfigProtoUpdate(const Protobuf::Message& config_proto) PURE;\n};\n\n/**\n * Provides common subscription functionality required by ConfigProvider::ApiType::Delta DS APIs.\n */\nclass DeltaConfigSubscriptionInstance : public ConfigSubscriptionCommonBase {\nprotected:\n  using ConfigSubscriptionCommonBase::ConfigSubscriptionCommonBase;\n\n  /**\n   * Must be called by the derived class' constructor.\n   * @param init_cb supplies an initial Envoy::Config::ConfigProvider::Config associated with the\n   * underlying subscription for each worker thread.\n   */\n  void initialize(const std::function<ConfigProvider::ConfigConstSharedPtr()>& init_cb) {\n    tls_->set([init_cb](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n      return std::make_shared<ThreadLocalConfig>(init_cb());\n    });\n  }\n};\n\n/**\n * Provides generic functionality required by the ConfigProvider::ApiType specific dynamic config\n * providers.\n *\n * This class can not be instantiated directly; instead, it provides the foundation for\n * dynamic config provider implementations which derive from it.\n */\nclass MutableConfigProviderCommonBase : public ConfigProvider {\npublic:\n  // Envoy::Config::ConfigProvider\n  SystemTime lastUpdated() const override { return subscription_->lastUpdated(); }\n  ApiType apiType() const override { return api_type_; }\n\nprotected:\n  MutableConfigProviderCommonBase(ConfigSubscriptionCommonBaseSharedPtr&& subscription,\n                                  ApiType api_type)\n      : subscription_(subscription), api_type_(api_type) {}\n\n  // Envoy::Config::ConfigProvider\n  ConfigConstSharedPtr getConfig() const override { return subscription_->getConfig(); }\n\n  ConfigSubscriptionCommonBaseSharedPtr subscription_;\n\nprivate:\n  ApiType api_type_;\n};\n\n/**\n * Provides generic functionality required by all config provider managers, such as managing\n * shared lifetime of subscriptions and dynamic config providers, along with determining which\n * subscriptions should be associated with newly instantiated providers.\n *\n * The implementation of this class is not thread safe. Note that ImmutableConfigProviderBase\n * and ConfigSubscriptionCommonBase call the corresponding {bind,unbind}* functions exposed\n * by this class.\n *\n * All config processing is done on the main thread, so instantiation of *ConfigProvider* objects\n * via createStaticConfigProvider() and createXdsConfigProvider() is naturally thread safe. Care\n * must be taken with regards to destruction of these objects, since it must also happen on the\n * main thread _prior_ to destruction of the ConfigProviderManagerImplBase object from which they\n * were created.\n *\n * This class can not be instantiated directly; instead, it provides the foundation for\n * dynamic config provider implementations which derive from it.\n */\nclass ConfigProviderManagerImplBase : public ConfigProviderManager, public Singleton::Instance {\npublic:\n  /**\n   * This is invoked by the /config_dump admin handler.\n   * @return ProtobufTypes::MessagePtr the config dump proto corresponding to the associated\n   *                                   config providers.\n   */\n  virtual ProtobufTypes::MessagePtr dumpConfigs() const PURE;\n\nprotected:\n  // Ordered set for deterministic config dump output.\n  using ConfigProviderSet = std::set<ConfigProvider*>;\n  using ConfigProviderMap = absl::node_hash_map<ConfigProviderInstanceType,\n                                                std::unique_ptr<ConfigProviderSet>, EnumClassHash>;\n  using ConfigSubscriptionMap =\n      absl::node_hash_map<uint64_t, std::weak_ptr<ConfigSubscriptionCommonBase>>;\n\n  ConfigProviderManagerImplBase(Server::Admin& admin, const std::string& config_name);\n\n  const ConfigSubscriptionMap& configSubscriptions() const { return config_subscriptions_; }\n\n  /**\n   * Returns the set of bound ImmutableConfigProviderBase-derived providers of a given type.\n   * @param type supplies the type of config providers to return.\n   * @return const ConfigProviderSet* the set of config providers corresponding to the type.\n   */\n  const ConfigProviderSet& immutableConfigProviders(ConfigProviderInstanceType type) const;\n\n  /**\n   * Returns the subscription associated with the config_source_proto; if none exists, a new one\n   * is allocated according to the subscription_factory_fn.\n   * @param config_source_proto supplies the proto specifying the config subscription parameters.\n   * @param init_manager supplies the init manager.\n   * @param subscription_factory_fn supplies a function to be called when a new subscription needs\n   *                                to be allocated.\n   * @return std::shared_ptr<T> an existing (if a match is found) or newly allocated subscription.\n   */\n  template <typename T>\n  std::shared_ptr<T>\n  getSubscription(const Protobuf::Message& config_source_proto, Init::Manager& init_manager,\n                  const std::function<ConfigSubscriptionCommonBaseSharedPtr(\n                      const uint64_t, ConfigProviderManagerImplBase&)>& subscription_factory_fn) {\n    static_assert(std::is_base_of<ConfigSubscriptionCommonBase, T>::value,\n                  \"T must be a subclass of ConfigSubscriptionCommonBase\");\n\n    ConfigSubscriptionCommonBaseSharedPtr subscription;\n    const uint64_t manager_identifier = MessageUtil::hash(config_source_proto);\n\n    auto it = config_subscriptions_.find(manager_identifier);\n    if (it == config_subscriptions_.end()) {\n      // std::make_shared does not work for classes with private constructors. There are ways\n      // around it. However, since this is not a performance critical path we err on the side\n      // of simplicity.\n      subscription = subscription_factory_fn(manager_identifier, *this);\n      init_manager.add(subscription->parent_init_target_);\n\n      bindSubscription(manager_identifier, subscription);\n    } else {\n      // Because the ConfigProviderManagerImplBase's weak_ptrs only get cleaned up\n      // in the ConfigSubscriptionCommonBase destructor, and the single threaded nature\n      // of this code, locking the weak_ptr will not fail.\n      subscription = it->second.lock();\n    }\n    ASSERT(subscription);\n\n    return std::static_pointer_cast<T>(subscription);\n  }\n\nprivate:\n  void bindSubscription(const uint64_t manager_identifier,\n                        ConfigSubscriptionCommonBaseSharedPtr& subscription) {\n    config_subscriptions_.insert({manager_identifier, subscription});\n  }\n\n  void unbindSubscription(const uint64_t manager_identifier) {\n    config_subscriptions_.erase(manager_identifier);\n  }\n\n  void bindImmutableConfigProvider(ImmutableConfigProviderBase* provider);\n  void unbindImmutableConfigProvider(ImmutableConfigProviderBase* provider);\n\n  // TODO(jsedgwick) These two members are prime candidates for the owned-entry list/map\n  // as in ConfigTracker. I.e. the ProviderImpls would have an EntryOwner for these lists\n  // Then the lifetime management stuff is centralized and opaque.\n  ConfigSubscriptionMap config_subscriptions_;\n  ConfigProviderMap immutable_config_providers_map_;\n\n  Server::ConfigTracker::EntryOwnerPtr config_tracker_entry_;\n\n  // See comment for friend classes in the ConfigSubscriptionCommonBase for more details on\n  // the use of friends.\n  friend class ConfigSubscriptionCommonBase;\n  friend class ImmutableConfigProviderBase;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/datasource.cc",
    "content": "#include \"common/config/datasource.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"fmt/format.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace DataSource {\n\n// Parameters of the jittered backoff strategy.\nstatic constexpr uint32_t RetryInitialDelayMilliseconds = 1000;\nstatic constexpr uint32_t RetryMaxDelayMilliseconds = 10 * 1000;\nstatic constexpr uint32_t RetryCount = 1;\n\nstd::string read(const envoy::config::core::v3::DataSource& source, bool allow_empty,\n                 Api::Api& api) {\n  switch (source.specifier_case()) {\n  case envoy::config::core::v3::DataSource::SpecifierCase::kFilename:\n    return api.fileSystem().fileReadToEnd(source.filename());\n  case envoy::config::core::v3::DataSource::SpecifierCase::kInlineBytes:\n    return source.inline_bytes();\n  case envoy::config::core::v3::DataSource::SpecifierCase::kInlineString:\n    return source.inline_string();\n  default:\n    if (!allow_empty) {\n      throw EnvoyException(\n          fmt::format(\"Unexpected DataSource::specifier_case(): {}\", source.specifier_case()));\n    }\n    return \"\";\n  }\n}\n\nabsl::optional<std::string> getPath(const envoy::config::core::v3::DataSource& source) {\n  return source.specifier_case() == envoy::config::core::v3::DataSource::SpecifierCase::kFilename\n             ? absl::make_optional(source.filename())\n             : absl::nullopt;\n}\n\nRemoteAsyncDataProvider::RemoteAsyncDataProvider(\n    Upstream::ClusterManager& cm, Init::Manager& manager,\n    const envoy::config::core::v3::RemoteDataSource& source, Event::Dispatcher& dispatcher,\n    Random::RandomGenerator& random, bool allow_empty, AsyncDataSourceCb&& callback)\n    : allow_empty_(allow_empty), callback_(std::move(callback)),\n      fetcher_(std::make_unique<Config::DataFetcher::RemoteDataFetcher>(cm, source.http_uri(),\n                                                                        source.sha256(), *this)),\n      init_target_(\"RemoteAsyncDataProvider\", [this]() { start(); }),\n      retries_remaining_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(source.retry_policy(), num_retries, RetryCount)) {\n\n  uint64_t base_interval_ms = RetryInitialDelayMilliseconds;\n  uint64_t max_interval_ms = RetryMaxDelayMilliseconds;\n  if (source.has_retry_policy()) {\n    if (source.retry_policy().has_retry_back_off()) {\n      base_interval_ms =\n          PROTOBUF_GET_MS_REQUIRED(source.retry_policy().retry_back_off(), base_interval);\n\n      max_interval_ms = PROTOBUF_GET_MS_OR_DEFAULT(source.retry_policy().retry_back_off(),\n                                                   max_interval, base_interval_ms * 10);\n\n      if (max_interval_ms < base_interval_ms) {\n        throw EnvoyException(\"max_interval must be greater than or equal to the base_interval\");\n      }\n    }\n  }\n\n  backoff_strategy_ = std::make_unique<JitteredExponentialBackOffStrategy>(base_interval_ms,\n                                                                           max_interval_ms, random);\n  retry_timer_ = dispatcher.createTimer([this]() -> void { start(); });\n\n  manager.add(init_target_);\n}\n\n} // namespace DataSource\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/datasource.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/backoff_strategy.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/config/remote_data_fetcher.h\"\n#include \"common/init/target_impl.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace DataSource {\n\n/**\n * Read contents of the DataSource.\n * @param source data source.\n * @param allow_empty return an empty string if no DataSource case is specified.\n * @param api reference to the Api object\n * @return std::string with DataSource contents.\n * @throw EnvoyException if no DataSource case is specified and !allow_empty.\n */\nstd::string read(const envoy::config::core::v3::DataSource& source, bool allow_empty,\n                 Api::Api& api);\n\n/**\n * @param source data source.\n * @return absl::optional<std::string> path to DataSource if a filename, otherwise absl::nullopt.\n */\nabsl::optional<std::string> getPath(const envoy::config::core::v3::DataSource& source);\n\n/**\n * Callback for async data source.\n */\nusing AsyncDataSourceCb = std::function<void(const std::string&)>;\n\nclass LocalAsyncDataProvider {\npublic:\n  LocalAsyncDataProvider(Init::Manager& manager, const envoy::config::core::v3::DataSource& source,\n                         bool allow_empty, Api::Api& api, AsyncDataSourceCb&& callback)\n      : init_target_(\"LocalAsyncDataProvider\", [this, &source, allow_empty, &api, callback]() {\n          callback(DataSource::read(source, allow_empty, api));\n          init_target_.ready();\n        }) {\n    manager.add(init_target_);\n  }\n\n  ~LocalAsyncDataProvider() { init_target_.ready(); }\n\nprivate:\n  Init::TargetImpl init_target_;\n};\n\nusing LocalAsyncDataProviderPtr = std::unique_ptr<LocalAsyncDataProvider>;\n\nclass RemoteAsyncDataProvider : public Event::DeferredDeletable,\n                                public Config::DataFetcher::RemoteDataFetcherCallback,\n                                public Logger::Loggable<Logger::Id::config> {\npublic:\n  RemoteAsyncDataProvider(Upstream::ClusterManager& cm, Init::Manager& manager,\n                          const envoy::config::core::v3::RemoteDataSource& source,\n                          Event::Dispatcher& dispatcher, Random::RandomGenerator& random,\n                          bool allow_empty, AsyncDataSourceCb&& callback);\n\n  ~RemoteAsyncDataProvider() override {\n    init_target_.ready();\n    if (retry_timer_) {\n      retry_timer_->disableTimer();\n    }\n  }\n\n  // Config::DataFetcher::RemoteDataFetcherCallback\n  void onSuccess(const std::string& data) override {\n    callback_(data);\n    init_target_.ready();\n  }\n\n  // Config::DataFetcher::RemoteDataFetcherCallback\n  void onFailure(Config::DataFetcher::FailureReason failure) override {\n    ENVOY_LOG(debug, \"Failed to fetch remote data, failure reason: {}\", enumToInt(failure));\n    if (retries_remaining_-- == 0) {\n      ENVOY_LOG(warn, \"Retry limit exceeded for fetching data from remote data source.\");\n      if (allow_empty_) {\n        callback_(EMPTY_STRING);\n      }\n      // We need to allow server startup to continue.\n      init_target_.ready();\n      return;\n    }\n\n    const auto retry_ms = std::chrono::milliseconds(backoff_strategy_->nextBackOffMs());\n    ENVOY_LOG(debug, \"Remote data provider will retry in {} ms.\", retry_ms.count());\n    retry_timer_->enableTimer(retry_ms);\n  }\n\nprivate:\n  void start() { fetcher_->fetch(); }\n\n  bool allow_empty_;\n  AsyncDataSourceCb callback_;\n  const Config::DataFetcher::RemoteDataFetcherPtr fetcher_;\n  Init::TargetImpl init_target_;\n\n  Event::TimerPtr retry_timer_;\n  BackOffStrategyPtr backoff_strategy_;\n  uint32_t retries_remaining_;\n};\n\nusing RemoteAsyncDataProviderPtr = std::unique_ptr<RemoteAsyncDataProvider>;\n\n} // namespace DataSource\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/decoded_resource_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/subscription.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"udpa/core/v1/collection_entry.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nnamespace {\n\nstd::vector<std::string>\nrepeatedPtrFieldToVector(const Protobuf::RepeatedPtrField<std::string>& xs) {\n  std::vector<std::string> ys;\n  std::copy(xs.begin(), xs.end(), std::back_inserter(ys));\n  return ys;\n}\n\n} // namespace\n\nclass DecodedResourceImpl : public DecodedResource {\npublic:\n  DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, const ProtobufWkt::Any& resource,\n                      const std::string& version)\n      : DecodedResourceImpl(resource_decoder, {}, Protobuf::RepeatedPtrField<std::string>(),\n                            resource, true, version) {}\n  DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder,\n                      const envoy::service::discovery::v3::Resource& resource)\n      : DecodedResourceImpl(resource_decoder, resource.name(), resource.aliases(),\n                            resource.resource(), resource.has_resource(), resource.version()) {}\n  DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder,\n                      const udpa::core::v1::CollectionEntry::InlineEntry& inline_entry)\n      : DecodedResourceImpl(resource_decoder, inline_entry.name(),\n                            Protobuf::RepeatedPtrField<std::string>(), inline_entry.resource(),\n                            true, inline_entry.version()) {}\n  DecodedResourceImpl(ProtobufTypes::MessagePtr resource, const std::string& name,\n                      const std::vector<std::string>& aliases, const std::string& version)\n      : resource_(std::move(resource)), has_resource_(true), name_(name), aliases_(aliases),\n        version_(version) {}\n\n  // Config::DecodedResource\n  const std::string& name() const override { return name_; }\n  const std::vector<std::string>& aliases() const override { return aliases_; }\n  const std::string& version() const override { return version_; };\n  const Protobuf::Message& resource() const override { return *resource_; };\n  bool hasResource() const override { return has_resource_; }\n\nprivate:\n  DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, absl::optional<std::string> name,\n                      const Protobuf::RepeatedPtrField<std::string>& aliases,\n                      const ProtobufWkt::Any& resource, bool has_resource,\n                      const std::string& version)\n      : resource_(resource_decoder.decodeResource(resource)), has_resource_(has_resource),\n        name_(name ? *name : resource_decoder.resourceName(*resource_)),\n        aliases_(repeatedPtrFieldToVector(aliases)), version_(version) {}\n\n  const ProtobufTypes::MessagePtr resource_;\n  const bool has_resource_;\n  const std::string name_;\n  const std::vector<std::string> aliases_;\n  const std::string version_;\n};\n\nusing DecodedResourceImplPtr = std::unique_ptr<DecodedResourceImpl>;\n\nstruct DecodedResourcesWrapper {\n  DecodedResourcesWrapper() = default;\n  DecodedResourcesWrapper(OpaqueResourceDecoder& resource_decoder,\n                          const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& resources,\n                          const std::string& version) {\n    for (const auto& resource : resources) {\n      pushBack(std::make_unique<DecodedResourceImpl>(resource_decoder, resource, version));\n    }\n  }\n\n  void pushBack(Config::DecodedResourcePtr&& resource) {\n    owned_resources_.push_back(std::move(resource));\n    refvec_.emplace_back(*owned_resources_.back());\n  }\n\n  std::vector<Config::DecodedResourcePtr> owned_resources_;\n  std::vector<Config::DecodedResourceRef> refvec_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/delta_subscription_state.cc",
    "content": "#include \"common/config/delta_subscription_state.h\"\n\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/hash.h\"\n#include \"common/config/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nDeltaSubscriptionState::DeltaSubscriptionState(std::string type_url,\n                                               UntypedConfigUpdateCallbacks& watch_map,\n                                               const LocalInfo::LocalInfo& local_info)\n    : type_url_(std::move(type_url)), watch_map_(watch_map), local_info_(local_info) {}\n\nvoid DeltaSubscriptionState::updateSubscriptionInterest(const std::set<std::string>& cur_added,\n                                                        const std::set<std::string>& cur_removed) {\n  for (const auto& a : cur_added) {\n    setResourceWaitingForServer(a);\n    // If interest in a resource is removed-then-added (all before a discovery request\n    // can be sent), we must treat it as a \"new\" addition: our user may have forgotten its\n    // copy of the resource after instructing us to remove it, and need to be reminded of it.\n    names_removed_.erase(a);\n    names_added_.insert(a);\n  }\n  for (const auto& r : cur_removed) {\n    setLostInterestInResource(r);\n    // Ideally, when interest in a resource is added-then-removed in between requests,\n    // we would avoid putting a superfluous \"unsubscribe [resource that was never subscribed]\"\n    // in the request. However, the removed-then-added case *does* need to go in the request,\n    // and due to how we accomplish that, it's difficult to distinguish remove-add-remove from\n    // add-remove (because \"remove-add\" has to be treated as equivalent to just \"add\").\n    names_added_.erase(r);\n    names_removed_.insert(r);\n  }\n}\n\n// Not having sent any requests yet counts as an \"update pending\" since you're supposed to resend\n// the entirety of your interest at the start of a stream, even if nothing has changed.\nbool DeltaSubscriptionState::subscriptionUpdatePending() const {\n  return !names_added_.empty() || !names_removed_.empty() ||\n         !any_request_sent_yet_in_current_stream_;\n}\n\nUpdateAck DeltaSubscriptionState::handleResponse(\n    const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) {\n  // We *always* copy the response's nonce into the next request, even if we're going to make that\n  // request a NACK by setting error_detail.\n  UpdateAck ack(message.nonce(), type_url_);\n  try {\n    handleGoodResponse(message);\n  } catch (const EnvoyException& e) {\n    handleBadResponse(e, ack);\n  }\n  return ack;\n}\n\nvoid DeltaSubscriptionState::handleGoodResponse(\n    const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) {\n  absl::flat_hash_set<std::string> names_added_removed;\n  for (const auto& resource : message.resources()) {\n    if (!names_added_removed.insert(resource.name()).second) {\n      throw EnvoyException(\n          fmt::format(\"duplicate name {} found among added/updated resources\", resource.name()));\n    }\n    // DeltaDiscoveryResponses for unresolved aliases don't contain an actual resource\n    if (!resource.has_resource() && resource.aliases_size() > 0) {\n      continue;\n    }\n    if (message.type_url() != resource.resource().type_url()) {\n      throw EnvoyException(fmt::format(\"type URL {} embedded in an individual Any does not match \"\n                                       \"the message-wide type URL {} in DeltaDiscoveryResponse {}\",\n                                       resource.resource().type_url(), message.type_url(),\n                                       message.DebugString()));\n    }\n  }\n  for (const auto& name : message.removed_resources()) {\n    if (!names_added_removed.insert(name).second) {\n      throw EnvoyException(\n          fmt::format(\"duplicate name {} found in the union of added+removed resources\", name));\n    }\n  }\n  watch_map_.onConfigUpdate(message.resources(), message.removed_resources(),\n                            message.system_version_info());\n  for (const auto& resource : message.resources()) {\n    setResourceVersion(resource.name(), resource.version());\n  }\n  // If a resource is gone, there is no longer a meaningful version for it that makes sense to\n  // provide to the server upon stream reconnect: either it will continue to not exist, in which\n  // case saying nothing is fine, or the server will bring back something new, which we should\n  // receive regardless (which is the logic that not specifying a version will get you).\n  //\n  // So, leave the version map entry present but blank. It will be left out of\n  // initial_resource_versions messages, but will remind us to explicitly tell the server \"I'm\n  // cancelling my subscription\" when we lose interest.\n  for (const auto& resource_name : message.removed_resources()) {\n    if (resource_names_.find(resource_name) != resource_names_.end()) {\n      setResourceWaitingForServer(resource_name);\n    }\n  }\n  ENVOY_LOG(debug, \"Delta config for {} accepted with {} resources added, {} removed\", type_url_,\n            message.resources().size(), message.removed_resources().size());\n}\n\nvoid DeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAck& ack) {\n  // Note that error_detail being set is what indicates that a DeltaDiscoveryRequest is a NACK.\n  ack.error_detail_.set_code(Grpc::Status::WellKnownGrpcStatus::Internal);\n  ack.error_detail_.set_message(Config::Utility::truncateGrpcStatusMessage(e.what()));\n  ENVOY_LOG(warn, \"delta config for {} rejected: {}\", type_url_, e.what());\n  watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e);\n}\n\nvoid DeltaSubscriptionState::handleEstablishmentFailure() {\n  watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure,\n                                  nullptr);\n}\n\nenvoy::service::discovery::v3::DeltaDiscoveryRequest\nDeltaSubscriptionState::getNextRequestAckless() {\n  envoy::service::discovery::v3::DeltaDiscoveryRequest request;\n  if (!any_request_sent_yet_in_current_stream_) {\n    any_request_sent_yet_in_current_stream_ = true;\n    // initial_resource_versions \"must be populated for first request in a stream\".\n    // Also, since this might be a new server, we must explicitly state *all* of our subscription\n    // interest.\n    for (auto const& [resource_name, resource_version] : resource_versions_) {\n      // Populate initial_resource_versions with the resource versions we currently have.\n      // Resources we are interested in, but are still waiting to get any version of from the\n      // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!)\n      if (!resource_version.waitingForServer()) {\n        (*request.mutable_initial_resource_versions())[resource_name] = resource_version.version();\n      }\n      // As mentioned above, fill resource_names_subscribe with everything, including names we\n      // have yet to receive any resource for.\n      names_added_.insert(resource_name);\n    }\n    names_removed_.clear();\n  }\n  std::copy(names_added_.begin(), names_added_.end(),\n            Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_subscribe()));\n  std::copy(names_removed_.begin(), names_removed_.end(),\n            Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_unsubscribe()));\n  names_added_.clear();\n  names_removed_.clear();\n\n  request.set_type_url(type_url_);\n  request.mutable_node()->MergeFrom(local_info_.node());\n  return request;\n}\n\nenvoy::service::discovery::v3::DeltaDiscoveryRequest\nDeltaSubscriptionState::getNextRequestWithAck(const UpdateAck& ack) {\n  envoy::service::discovery::v3::DeltaDiscoveryRequest request = getNextRequestAckless();\n  request.set_response_nonce(ack.nonce_);\n  if (ack.error_detail_.code() != Grpc::Status::WellKnownGrpcStatus::Ok) {\n    // Don't needlessly make the field present-but-empty if status is ok.\n    request.mutable_error_detail()->CopyFrom(ack.error_detail_);\n  }\n  return request;\n}\n\nvoid DeltaSubscriptionState::setResourceVersion(const std::string& resource_name,\n                                                const std::string& resource_version) {\n  resource_versions_[resource_name] = ResourceVersion(resource_version);\n  resource_names_.insert(resource_name);\n}\n\nvoid DeltaSubscriptionState::setResourceWaitingForServer(const std::string& resource_name) {\n  resource_versions_[resource_name] = ResourceVersion();\n  resource_names_.insert(resource_name);\n}\n\nvoid DeltaSubscriptionState::setLostInterestInResource(const std::string& resource_name) {\n  resource_versions_.erase(resource_name);\n  resource_names_.erase(resource_name);\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/delta_subscription_state.h",
    "content": "#pragma once\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/pausable_ack_queue.h\"\n#include \"common/config/watch_map.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// Tracks the xDS protocol state of an individual ongoing delta xDS session, i.e. a single type_url.\n// There can be multiple DeltaSubscriptionStates active. They will always all be\n// blissfully unaware of each other's existence, even when their messages are\n// being multiplexed together by ADS.\nclass DeltaSubscriptionState : public Logger::Loggable<Logger::Id::config> {\npublic:\n  DeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map,\n                         const LocalInfo::LocalInfo& local_info);\n\n  // Update which resources we're interested in subscribing to.\n  void updateSubscriptionInterest(const std::set<std::string>& cur_added,\n                                  const std::set<std::string>& cur_removed);\n  void addAliasesToResolve(const std::set<std::string>& aliases);\n\n  // Whether there was a change in our subscription interest we have yet to inform the server of.\n  bool subscriptionUpdatePending() const;\n\n  void markStreamFresh() { any_request_sent_yet_in_current_stream_ = false; }\n\n  UpdateAck handleResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message);\n\n  void handleEstablishmentFailure();\n\n  // Returns the next gRPC request proto to be sent off to the server, based on this object's\n  // understanding of the current protocol state, and new resources that Envoy wants to request.\n  envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestAckless();\n  // The WithAck version first calls the Ack-less version, then adds in the passed-in ack.\n  envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestWithAck(const UpdateAck& ack);\n\n  DeltaSubscriptionState(const DeltaSubscriptionState&) = delete;\n  DeltaSubscriptionState& operator=(const DeltaSubscriptionState&) = delete;\n\nprivate:\n  void handleGoodResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message);\n  void handleBadResponse(const EnvoyException& e, UpdateAck& ack);\n\n  class ResourceVersion {\n  public:\n    explicit ResourceVersion(absl::string_view version) : version_(version) {}\n    // Builds a ResourceVersion in the waitingForServer state.\n    ResourceVersion() = default;\n\n    // If true, we currently have no version of this resource - we are waiting for the server to\n    // provide us with one.\n    bool waitingForServer() const { return version_ == absl::nullopt; }\n    // Must not be called if waitingForServer() == true.\n    std::string version() const {\n      ASSERT(version_.has_value());\n      return version_.value_or(\"\");\n    }\n\n  private:\n    absl::optional<std::string> version_;\n  };\n\n  // Use these helpers to ensure resource_versions_ and resource_names_ get updated together.\n  void setResourceVersion(const std::string& resource_name, const std::string& resource_version);\n  void setResourceWaitingForServer(const std::string& resource_name);\n  void setLostInterestInResource(const std::string& resource_name);\n  void populateDiscoveryRequest(envoy::service::discovery::v3::DeltaDiscoveryResponse& request);\n\n  // A map from resource name to per-resource version. The keys of this map are exactly the resource\n  // names we are currently interested in. Those in the waitingForServer state currently don't have\n  // any version for that resource: we need to inform the server if we lose interest in them, but we\n  // also need to *not* include them in the initial_resource_versions map upon a reconnect.\n  absl::node_hash_map<std::string, ResourceVersion> resource_versions_;\n  // The keys of resource_versions_. Only tracked separately because std::map does not provide an\n  // iterator into just its keys, e.g. for use in std::set_difference.\n  std::set<std::string> resource_names_;\n\n  const std::string type_url_;\n  UntypedConfigUpdateCallbacks& watch_map_;\n  const LocalInfo::LocalInfo& local_info_;\n  std::chrono::milliseconds init_fetch_timeout_;\n\n  bool any_request_sent_yet_in_current_stream_{};\n\n  // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent.\n  // TODO: Can't use absl::flat_hash_set due to ordering issues in gTest expectation matching.\n  // Feel free to change to an unordered container once we figure out how to make it work.\n  std::set<std::string> names_added_;\n  std::set<std::string> names_removed_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/filesystem_subscription_impl.cc",
    "content": "#include \"common/config/filesystem_subscription_impl.h\"\n\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/decoded_resource_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nFilesystemSubscriptionImpl::FilesystemSubscriptionImpl(\n    Event::Dispatcher& dispatcher, absl::string_view path, SubscriptionCallbacks& callbacks,\n    OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats,\n    ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api)\n    : path_(path), watcher_(dispatcher.createFilesystemWatcher()), callbacks_(callbacks),\n      resource_decoder_(resource_decoder), stats_(stats), api_(api),\n      validation_visitor_(validation_visitor) {\n  watcher_->addWatch(path_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t) {\n    if (started_) {\n      refresh();\n    }\n  });\n}\n\n// Config::Subscription\nvoid FilesystemSubscriptionImpl::start(const std::set<std::string>&, const bool) {\n  started_ = true;\n  // Attempt to read in case there is a file there already.\n  refresh();\n}\n\nvoid FilesystemSubscriptionImpl::updateResourceInterest(const std::set<std::string>&) {\n  // Bump stats for consistent behavior with other xDS.\n  stats_.update_attempt_.inc();\n}\n\nvoid FilesystemSubscriptionImpl::configRejected(const EnvoyException& e,\n                                                const std::string& message) {\n  ENVOY_LOG(warn, \"Filesystem config update rejected: {}\", e.what());\n  ENVOY_LOG(debug, \"Failed configuration:\\n{}\", message);\n  stats_.update_rejected_.inc();\n  callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e);\n}\n\nstd::string FilesystemSubscriptionImpl::refreshInternal(ProtobufTypes::MessagePtr* config_update) {\n  auto owned_message = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n  auto& message = *owned_message;\n  MessageUtil::loadFromFile(path_, message, validation_visitor_, api_);\n  *config_update = std::move(owned_message);\n  const auto decoded_resources =\n      DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info());\n  callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info());\n  return message.version_info();\n}\n\nvoid FilesystemSubscriptionImpl::refresh() {\n  ENVOY_LOG(debug, \"Filesystem config refresh for {}\", path_);\n  stats_.update_attempt_.inc();\n  ProtobufTypes::MessagePtr config_update;\n  try {\n    const std::string version = refreshInternal(&config_update);\n    stats_.update_time_.set(DateUtil::nowToMilliseconds(api_.timeSource()));\n    stats_.version_.set(HashUtil::xxHash64(version));\n    stats_.version_text_.set(version);\n    stats_.update_success_.inc();\n    ENVOY_LOG(debug, \"Filesystem config update accepted for {}: {}\", path_,\n              config_update->DebugString());\n  } catch (const ProtobufMessage::UnknownProtoFieldException& e) {\n    configRejected(e, config_update == nullptr ? \"\" : config_update->DebugString());\n  } catch (const EnvoyException& e) {\n    if (config_update != nullptr) {\n      configRejected(e, config_update->DebugString());\n    } else {\n      ENVOY_LOG(warn, \"Filesystem config update failure: {}\", e.what());\n      stats_.update_failure_.inc();\n      // This could happen due to filesystem issues or a bad configuration (e.g. proto validation).\n      // Since the latter is more likely, for now we will treat it as rejection.\n      callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e);\n    }\n  }\n}\n\nFilesystemCollectionSubscriptionImpl::FilesystemCollectionSubscriptionImpl(\n    Event::Dispatcher& dispatcher, absl::string_view path, SubscriptionCallbacks& callbacks,\n    OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats,\n    ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api)\n    : FilesystemSubscriptionImpl(dispatcher, path, callbacks, resource_decoder, stats,\n                                 validation_visitor, api) {}\n\nstd::string\nFilesystemCollectionSubscriptionImpl::refreshInternal(ProtobufTypes::MessagePtr* config_update) {\n  auto owned_resource_message = std::make_unique<envoy::service::discovery::v3::Resource>();\n  auto& resource_message = *owned_resource_message;\n  MessageUtil::loadFromFile(path_, resource_message, validation_visitor_, api_);\n  // Dynamically load the collection message.\n  const std::string collection_type =\n      std::string(TypeUtil::typeUrlToDescriptorFullName(resource_message.resource().type_url()));\n  const Protobuf::Descriptor* collection_descriptor =\n      Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(collection_type);\n  if (collection_descriptor == nullptr) {\n    throw EnvoyException(fmt::format(\"Unknown collection type {}\", collection_type));\n  }\n  Protobuf::DynamicMessageFactory dmf;\n  ProtobufTypes::MessagePtr collection_message;\n  collection_message.reset(dmf.GetPrototype(collection_descriptor)->New());\n  MessageUtil::unpackTo(resource_message.resource(), *collection_message);\n  const auto* collection_entries_field_descriptor = collection_descriptor->field(0);\n  // Verify collection message type structure.\n  if (collection_entries_field_descriptor == nullptr ||\n      collection_entries_field_descriptor->type() != Protobuf::FieldDescriptor::TYPE_MESSAGE ||\n      collection_entries_field_descriptor->message_type()->full_name() !=\n          \"udpa.core.v1.CollectionEntry\" ||\n      !collection_entries_field_descriptor->is_repeated()) {\n    throw EnvoyException(fmt::format(\"Invalid structure for collection type {} in {}\",\n                                     collection_type, resource_message.DebugString()));\n  }\n  const auto* reflection = collection_message->GetReflection();\n  const uint32_t num_entries =\n      reflection->FieldSize(*collection_message, collection_entries_field_descriptor);\n  DecodedResourcesWrapper decoded_resources;\n  for (uint32_t i = 0; i < num_entries; ++i) {\n    udpa::core::v1::CollectionEntry collection_entry;\n    collection_entry.MergeFrom(reflection->GetRepeatedMessage(\n        *collection_message, collection_entries_field_descriptor, i));\n    // TODO(htuch): implement indirect collection entries.\n    if (collection_entry.has_inline_entry()) {\n      decoded_resources.pushBack(std::make_unique<DecodedResourceImpl>(\n          resource_decoder_, collection_entry.inline_entry()));\n    }\n  }\n  *config_update = std::move(owned_resource_message);\n  callbacks_.onConfigUpdate(decoded_resources.refvec_, resource_message.version());\n  return resource_message.version();\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/filesystem_subscription_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/protobuf/message_validator.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Filesystem inotify implementation of the API Subscription interface. This allows the API to be\n * consumed on filesystem changes to files containing the JSON canonical representation of\n * lists of xDS resources.\n */\nclass FilesystemSubscriptionImpl : public Config::Subscription,\n                                   protected Logger::Loggable<Logger::Id::config> {\npublic:\n  FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path,\n                             SubscriptionCallbacks& callbacks,\n                             OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats,\n                             ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api);\n\n  // Config::Subscription\n  // We report all discovered resources in the watched file, so the resource names arguments are\n  // unused, and updateResourceInterest is a no-op (other than updating a stat).\n  void start(const std::set<std::string>&, const bool use_namespace_matching = false) override;\n  void updateResourceInterest(const std::set<std::string>&) override;\n  void requestOnDemandUpdate(const std::set<std::string>&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\nprotected:\n  virtual std::string refreshInternal(ProtobufTypes::MessagePtr* config_update);\n  void refresh();\n  void configRejected(const EnvoyException& e, const std::string& message);\n\n  bool started_{};\n  const std::string path_;\n  std::unique_ptr<Filesystem::Watcher> watcher_;\n  SubscriptionCallbacks& callbacks_;\n  OpaqueResourceDecoder& resource_decoder_;\n  SubscriptionStats stats_;\n  Api::Api& api_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n};\n\n// Currently a FilesystemSubscriptionImpl subclass, but this will need to change when we support\n// non-inline collection resources.\nclass FilesystemCollectionSubscriptionImpl : public FilesystemSubscriptionImpl {\npublic:\n  FilesystemCollectionSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path,\n                                       SubscriptionCallbacks& callbacks,\n                                       OpaqueResourceDecoder& resource_decoder,\n                                       SubscriptionStats stats,\n                                       ProtobufMessage::ValidationVisitor& validation_visitor,\n                                       Api::Api& api);\n\n  std::string refreshInternal(ProtobufTypes::MessagePtr* config_update) override;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/grpc_mux_impl.cc",
    "content": "#include \"common/config/grpc_mux_impl.h\"\n\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/decoded_resource_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/memory/utils.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/container/btree_map.h\"\n#include \"absl/container/node_hash_set.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nGrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info,\n                         Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher,\n                         const Protobuf::MethodDescriptor& service_method,\n                         envoy::config::core::v3::ApiVersion transport_api_version,\n                         Random::RandomGenerator& random, Stats::Scope& scope,\n                         const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node)\n    : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope,\n                   rate_limit_settings),\n      local_info_(local_info), skip_subsequent_node_(skip_subsequent_node),\n      first_stream_request_(true), transport_api_version_(transport_api_version),\n      enable_type_url_downgrade_and_upgrade_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\")) {\n  Config::Utility::checkLocalInfo(\"ads\", local_info);\n}\n\nvoid GrpcMuxImpl::start() { grpc_stream_.establishNewStream(); }\n\nvoid GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) {\n  ApiState& api_state = api_state_[type_url];\n  auto& request = api_state.request_;\n  request.mutable_resource_names()->Clear();\n\n  // Maintain a set to avoid dupes.\n  absl::node_hash_set<std::string> resources;\n  for (const auto* watch : api_state.watches_) {\n    for (const std::string& resource : watch->resources_) {\n      if (resources.count(resource) == 0) {\n        resources.emplace(resource);\n        request.add_resource_names(resource);\n      }\n    }\n  }\n\n  if (skip_subsequent_node_ && !first_stream_request_) {\n    request.clear_node();\n  }\n  VersionConverter::prepareMessageForGrpcWire(request, transport_api_version_);\n  ENVOY_LOG(trace, \"Sending DiscoveryRequest for {}: {}\", type_url, request.DebugString());\n  grpc_stream_.sendMessage(request);\n  first_stream_request_ = false;\n\n  // clear error_detail after the request is sent if it exists.\n  if (api_state_[type_url].request_.has_error_detail()) {\n    api_state_[type_url].request_.clear_error_detail();\n  }\n}\n\nGrpcMuxWatchPtr GrpcMuxImpl::addWatch(const std::string& type_url,\n                                      const std::set<std::string>& resources,\n                                      SubscriptionCallbacks& callbacks,\n                                      OpaqueResourceDecoder& resource_decoder, const bool) {\n  auto watch =\n      std::make_unique<GrpcMuxWatchImpl>(resources, callbacks, resource_decoder, type_url, *this);\n  ENVOY_LOG(debug, \"gRPC mux addWatch for \" + type_url);\n\n  // Lazily kick off the requests based on first subscription. This has the\n  // convenient side-effect that we order messages on the channel based on\n  // Envoy's internal dependency ordering.\n  // TODO(gsagula): move TokenBucketImpl params to a config.\n  if (!api_state_[type_url].subscribed_) {\n    api_state_[type_url].request_.set_type_url(type_url);\n    api_state_[type_url].request_.mutable_node()->MergeFrom(local_info_.node());\n    api_state_[type_url].subscribed_ = true;\n    subscriptions_.emplace_back(type_url);\n    if (enable_type_url_downgrade_and_upgrade_) {\n      registerVersionedTypeUrl(type_url);\n    }\n  }\n\n  // This will send an updated request on each subscription.\n  // TODO(htuch): For RDS/EDS, this will generate a new DiscoveryRequest on each resource we added.\n  // Consider in the future adding some kind of collation/batching during CDS/LDS updates so that we\n  // only send a single RDS/EDS update after the CDS/LDS update.\n  queueDiscoveryRequest(type_url);\n\n  return watch;\n}\n\nScopedResume GrpcMuxImpl::pause(const std::string& type_url) {\n  return pause(std::vector<std::string>{type_url});\n}\n\nScopedResume GrpcMuxImpl::pause(const std::vector<std::string> type_urls) {\n  for (const auto& type_url : type_urls) {\n    ApiState& api_state = api_state_[type_url];\n    ENVOY_LOG(debug, \"Pausing discovery requests for {} (previous count {})\", type_url,\n              api_state.pauses_);\n    ++api_state.pauses_;\n  }\n  return std::make_unique<Cleanup>([this, type_urls]() {\n    for (const auto& type_url : type_urls) {\n      ApiState& api_state = api_state_[type_url];\n      ENVOY_LOG(debug, \"Resuming discovery requests for {} (previous count {})\", type_url,\n                api_state.pauses_);\n      ASSERT(api_state.paused());\n\n      if (--api_state.pauses_ == 0 && api_state.pending_ && api_state.subscribed_) {\n        queueDiscoveryRequest(type_url);\n        api_state.pending_ = false;\n      }\n    }\n  });\n}\n\nvoid GrpcMuxImpl::registerVersionedTypeUrl(const std::string& type_url) {\n  TypeUrlMap& type_url_map = typeUrlMap();\n  if (type_url_map.find(type_url) != type_url_map.end()) {\n    return;\n  }\n  // If type_url is v3, earlier_type_url will contain v2 type url.\n  const absl::optional<std::string> earlier_type_url = ApiTypeOracle::getEarlierTypeUrl(type_url);\n  // Register v2 to v3 and v3 to v2 type_url mapping in the hash map.\n  if (earlier_type_url.has_value()) {\n    type_url_map[earlier_type_url.value()] = type_url;\n    type_url_map[type_url] = earlier_type_url.value();\n  }\n}\n\nvoid GrpcMuxImpl::onDiscoveryResponse(\n    std::unique_ptr<envoy::service::discovery::v3::DiscoveryResponse>&& message,\n    ControlPlaneStats& control_plane_stats) {\n  std::string type_url = message->type_url();\n  ENVOY_LOG(debug, \"Received gRPC message for {} at version {}\", type_url, message->version_info());\n  if (message->has_control_plane()) {\n    control_plane_stats.identifier_.set(message->control_plane().identifier());\n  }\n  // If this type url is not watched(no subscriber or no watcher), try another version of type url.\n  if (enable_type_url_downgrade_and_upgrade_ && api_state_.count(type_url) == 0) {\n    registerVersionedTypeUrl(type_url);\n    TypeUrlMap& type_url_map = typeUrlMap();\n    if (type_url_map.find(type_url) != type_url_map.end()) {\n      type_url = type_url_map[type_url];\n    }\n  }\n  if (api_state_.count(type_url) == 0) {\n    // TODO(yuval-k): This should never happen. consider dropping the stream as this is a\n    // protocol violation\n    ENVOY_LOG(warn, \"Ignoring the message for type URL {} as it has no current subscribers.\",\n              type_url);\n    return;\n  }\n  if (api_state_[type_url].watches_.empty()) {\n    // update the nonce as we are processing this response.\n    api_state_[type_url].request_.set_response_nonce(message->nonce());\n    if (message->resources().empty()) {\n      // No watches and no resources. This can happen when envoy unregisters from a\n      // resource that's removed from the server as well. For example, a deleted cluster\n      // triggers un-watching the ClusterLoadAssignment watch, and at the same time the\n      // xDS server sends an empty list of ClusterLoadAssignment resources. we'll accept\n      // this update. no need to send a discovery request, as we don't watch for anything.\n      api_state_[type_url].request_.set_version_info(message->version_info());\n    } else {\n      // No watches and we have resources - this should not happen. send a NACK (by not\n      // updating the version).\n      ENVOY_LOG(warn, \"Ignoring unwatched type URL {}\", type_url);\n      queueDiscoveryRequest(type_url);\n    }\n    return;\n  }\n  ScopedResume same_type_resume;\n  // We pause updates of the same type. This is necessary for SotW and GrpcMuxImpl, since unlike\n  // delta and NewGRpcMuxImpl, independent watch additions/removals trigger updates regardless of\n  // the delta state. The proper fix for this is to converge these implementations,\n  // see https://github.com/envoyproxy/envoy/issues/11477.\n  same_type_resume = pause(type_url);\n  try {\n    // To avoid O(n^2) explosion (e.g. when we have 1000s of EDS watches), we\n    // build a map here from resource name to resource and then walk watches_.\n    // We have to walk all watches (and need an efficient map as a result) to\n    // ensure we deliver empty config updates when a resource is dropped. We make the map ordered\n    // for test determinism.\n    std::vector<DecodedResourceImplPtr> resources;\n    absl::btree_map<std::string, DecodedResourceRef> resource_ref_map;\n    std::vector<DecodedResourceRef> all_resource_refs;\n    OpaqueResourceDecoder& resource_decoder =\n        api_state_[type_url].watches_.front()->resource_decoder_;\n    for (const auto& resource : message->resources()) {\n      if (message->type_url() != resource.type_url()) {\n        throw EnvoyException(\n            fmt::format(\"{} does not match the message-wide type URL {} in DiscoveryResponse {}\",\n                        resource.type_url(), message->type_url(), message->DebugString()));\n      }\n      resources.emplace_back(\n          new DecodedResourceImpl(resource_decoder, resource, message->version_info()));\n      all_resource_refs.emplace_back(*resources.back());\n      resource_ref_map.emplace(resources.back()->name(), *resources.back());\n    }\n    for (auto watch : api_state_[type_url].watches_) {\n      // onConfigUpdate should be called in all cases for single watch xDS (Cluster and\n      // Listener) even if the message does not have resources so that update_empty stat\n      // is properly incremented and state-of-the-world semantics are maintained.\n      if (watch->resources_.empty()) {\n        watch->callbacks_.onConfigUpdate(all_resource_refs, message->version_info());\n        continue;\n      }\n      std::vector<DecodedResourceRef> found_resources;\n      for (const auto& watched_resource_name : watch->resources_) {\n        auto it = resource_ref_map.find(watched_resource_name);\n        if (it != resource_ref_map.end()) {\n          found_resources.emplace_back(it->second);\n        }\n      }\n      // onConfigUpdate should be called only on watches(clusters/routes) that have\n      // updates in the message for EDS/RDS.\n      if (!found_resources.empty()) {\n        watch->callbacks_.onConfigUpdate(found_resources, message->version_info());\n      }\n    }\n    // TODO(mattklein123): In the future if we start tracking per-resource versions, we\n    // would do that tracking here.\n    api_state_[type_url].request_.set_version_info(message->version_info());\n    Memory::Utils::tryShrinkHeap();\n  } catch (const EnvoyException& e) {\n    for (auto watch : api_state_[type_url].watches_) {\n      watch->callbacks_.onConfigUpdateFailed(\n          Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e);\n    }\n    ::google::rpc::Status* error_detail = api_state_[type_url].request_.mutable_error_detail();\n    error_detail->set_code(Grpc::Status::WellKnownGrpcStatus::Internal);\n    error_detail->set_message(Config::Utility::truncateGrpcStatusMessage(e.what()));\n  }\n  api_state_[type_url].request_.set_response_nonce(message->nonce());\n  ASSERT(api_state_[type_url].paused());\n  queueDiscoveryRequest(type_url);\n}\n\nvoid GrpcMuxImpl::onWriteable() { drainRequests(); }\n\nvoid GrpcMuxImpl::onStreamEstablished() {\n  first_stream_request_ = true;\n  grpc_stream_.maybeUpdateQueueSizeStat(0);\n  request_queue_ = std::make_unique<std::queue<std::string>>();\n  for (const auto& type_url : subscriptions_) {\n    queueDiscoveryRequest(type_url);\n  }\n}\n\nvoid GrpcMuxImpl::onEstablishmentFailure() {\n  for (const auto& api_state : api_state_) {\n    for (auto watch : api_state.second.watches_) {\n      watch->callbacks_.onConfigUpdateFailed(\n          Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, nullptr);\n    }\n  }\n}\n\nvoid GrpcMuxImpl::queueDiscoveryRequest(const std::string& queue_item) {\n  if (!grpc_stream_.grpcStreamAvailable()) {\n    ENVOY_LOG(debug, \"No stream available to queueDiscoveryRequest for {}\", queue_item);\n    return; // Drop this request; the reconnect will enqueue a new one.\n  }\n  ApiState& api_state = api_state_[queue_item];\n  if (api_state.paused()) {\n    ENVOY_LOG(trace, \"API {} paused during queueDiscoveryRequest(), setting pending.\", queue_item);\n    api_state.pending_ = true;\n    return; // Drop this request; the unpause will enqueue a new one.\n  }\n  request_queue_->push(queue_item);\n  drainRequests();\n}\n\nvoid GrpcMuxImpl::drainRequests() {\n  while (!request_queue_->empty() && grpc_stream_.checkRateLimitAllowsDrain()) {\n    // Process the request, if rate limiting is not enabled at all or if it is under rate limit.\n    sendDiscoveryRequest(request_queue_->front());\n    request_queue_->pop();\n  }\n  grpc_stream_.maybeUpdateQueueSizeStat(request_queue_->size());\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/grpc_mux_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <queue>\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/config/grpc_mux.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/grpc_stream.h\"\n#include \"common/config/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Config {\n/**\n * ADS API implementation that fetches via gRPC.\n */\nclass GrpcMuxImpl : public GrpcMux,\n                    public GrpcStreamCallbacks<envoy::service::discovery::v3::DiscoveryResponse>,\n                    public Logger::Loggable<Logger::Id::config> {\npublic:\n  GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client,\n              Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method,\n              envoy::config::core::v3::ApiVersion transport_api_version,\n              Random::RandomGenerator& random, Stats::Scope& scope,\n              const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node);\n  ~GrpcMuxImpl() override = default;\n\n  void start() override;\n\n  // GrpcMux\n  ScopedResume pause(const std::string& type_url) override;\n  ScopedResume pause(const std::vector<std::string> type_urls) override;\n\n  GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set<std::string>& resources,\n                           SubscriptionCallbacks& callbacks,\n                           OpaqueResourceDecoder& resource_decoder,\n                           const bool use_namespace_matching = false) override;\n\n  void requestOnDemandUpdate(const std::string&, const std::set<std::string>&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  void handleDiscoveryResponse(\n      std::unique_ptr<envoy::service::discovery::v3::DiscoveryResponse>&& message);\n\n  // Config::GrpcStreamCallbacks\n  void onStreamEstablished() override;\n  void onEstablishmentFailure() override;\n  void registerVersionedTypeUrl(const std::string& type_url);\n  void\n  onDiscoveryResponse(std::unique_ptr<envoy::service::discovery::v3::DiscoveryResponse>&& message,\n                      ControlPlaneStats& control_plane_stats) override;\n  void onWriteable() override;\n\n  GrpcStream<envoy::service::discovery::v3::DiscoveryRequest,\n             envoy::service::discovery::v3::DiscoveryResponse>&\n  grpcStreamForTest() {\n    return grpc_stream_;\n  }\n\nprivate:\n  void drainRequests();\n  void setRetryTimer();\n  void sendDiscoveryRequest(const std::string& type_url);\n\n  struct GrpcMuxWatchImpl : public GrpcMuxWatch {\n    GrpcMuxWatchImpl(const std::set<std::string>& resources, SubscriptionCallbacks& callbacks,\n                     OpaqueResourceDecoder& resource_decoder, const std::string& type_url,\n                     GrpcMuxImpl& parent)\n        : resources_(resources), callbacks_(callbacks), resource_decoder_(resource_decoder),\n          type_url_(type_url), parent_(parent), watches_(parent.api_state_[type_url].watches_) {\n      watches_.emplace(watches_.begin(), this);\n    }\n\n    ~GrpcMuxWatchImpl() override {\n      watches_.remove(this);\n      if (!resources_.empty()) {\n        parent_.queueDiscoveryRequest(type_url_);\n      }\n    }\n\n    void update(const std::set<std::string>& resources) override {\n      watches_.remove(this);\n      if (!resources_.empty()) {\n        parent_.queueDiscoveryRequest(type_url_);\n      }\n      resources_ = resources;\n      // move this watch to the beginning of the list\n      watches_.emplace(watches_.begin(), this);\n      parent_.queueDiscoveryRequest(type_url_);\n    }\n\n    std::set<std::string> resources_;\n    SubscriptionCallbacks& callbacks_;\n    OpaqueResourceDecoder& resource_decoder_;\n    const std::string type_url_;\n    GrpcMuxImpl& parent_;\n\n  private:\n    std::list<GrpcMuxWatchImpl*>& watches_;\n  };\n\n  // Per muxed API state.\n  struct ApiState {\n    bool paused() const { return pauses_ > 0; }\n\n    // Watches on the returned resources for the API;\n    std::list<GrpcMuxWatchImpl*> watches_;\n    // Current DiscoveryRequest for API.\n    envoy::service::discovery::v3::DiscoveryRequest request_;\n    // Count of unresumed pause() invocations.\n    uint32_t pauses_{};\n    // Was a DiscoveryRequest elided during a pause?\n    bool pending_{};\n    // Has this API been tracked in subscriptions_?\n    bool subscribed_{};\n  };\n\n  // Request queue management logic.\n  void queueDiscoveryRequest(const std::string& queue_item);\n\n  GrpcStream<envoy::service::discovery::v3::DiscoveryRequest,\n             envoy::service::discovery::v3::DiscoveryResponse>\n      grpc_stream_;\n  const LocalInfo::LocalInfo& local_info_;\n  const bool skip_subsequent_node_;\n  bool first_stream_request_;\n  absl::node_hash_map<std::string, ApiState> api_state_;\n  // Envoy's dependency ordering.\n  std::list<std::string> subscriptions_;\n\n  // A queue to store requests while rate limited. Note that when requests cannot be sent due to the\n  // gRPC stream being down, this queue does not store them; rather, they are simply dropped.\n  // This string is a type URL.\n  std::unique_ptr<std::queue<std::string>> request_queue_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n  bool enable_type_url_downgrade_and_upgrade_;\n};\n\nusing GrpcMuxImplPtr = std::unique_ptr<GrpcMuxImpl>;\nusing GrpcMuxImplSharedPtr = std::shared_ptr<GrpcMuxImpl>;\n\nclass NullGrpcMuxImpl : public GrpcMux,\n                        GrpcStreamCallbacks<envoy::service::discovery::v3::DiscoveryResponse> {\npublic:\n  void start() override {}\n  ScopedResume pause(const std::string&) override {\n    return std::make_unique<Cleanup>([] {});\n  }\n  ScopedResume pause(const std::vector<std::string>) override {\n    return std::make_unique<Cleanup>([] {});\n  }\n\n  GrpcMuxWatchPtr addWatch(const std::string&, const std::set<std::string>&, SubscriptionCallbacks&,\n                           OpaqueResourceDecoder&, const bool) override {\n    ExceptionUtil::throwEnvoyException(\"ADS must be configured to support an ADS config source\");\n  }\n\n  void requestOnDemandUpdate(const std::string&, const std::set<std::string>&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  void onWriteable() override {}\n  void onStreamEstablished() override {}\n  void onEstablishmentFailure() override {}\n  void onDiscoveryResponse(std::unique_ptr<envoy::service::discovery::v3::DiscoveryResponse>&&,\n                           ControlPlaneStats&) override {}\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/grpc_stream.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/grpc_mux.h\"\n#include \"envoy/grpc/async_client.h\"\n\n#include \"common/common/backoff_strategy.h\"\n#include \"common/common/token_bucket_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/grpc/typed_async_client.h\"\n\nnamespace Envoy {\nnamespace Config {\n\ntemplate <class ResponseProto> using ResponseProtoPtr = std::unique_ptr<ResponseProto>;\n\n// Oversees communication for gRPC xDS implementations (parent to both regular xDS and delta\n// xDS variants). Reestablishes the gRPC channel when necessary, and provides rate limiting of\n// requests.\ntemplate <class RequestProto, class ResponseProto>\nclass GrpcStream : public Grpc::AsyncStreamCallbacks<ResponseProto>,\n                   public Logger::Loggable<Logger::Id::config> {\npublic:\n  GrpcStream(GrpcStreamCallbacks<ResponseProto>* callbacks, Grpc::RawAsyncClientPtr async_client,\n             const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random,\n             Event::Dispatcher& dispatcher, Stats::Scope& scope,\n             const RateLimitSettings& rate_limit_settings)\n      : callbacks_(callbacks), async_client_(std::move(async_client)),\n        service_method_(service_method),\n        control_plane_stats_(Utility::generateControlPlaneStats(scope)), random_(random),\n        time_source_(dispatcher.timeSource()),\n        rate_limiting_enabled_(rate_limit_settings.enabled_) {\n    retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); });\n    if (rate_limiting_enabled_) {\n      // Default Bucket contains 100 tokens maximum and refills at 10 tokens/sec.\n      limit_request_ = std::make_unique<TokenBucketImpl>(\n          rate_limit_settings.max_tokens_, time_source_, rate_limit_settings.fill_rate_);\n      drain_request_timer_ = dispatcher.createTimer([this]() {\n        if (stream_ != nullptr) {\n          callbacks_->onWriteable();\n        }\n      });\n    }\n\n    // TODO(htuch): Make this configurable.\n    static constexpr uint32_t RetryInitialDelayMs = 500;\n    static constexpr uint32_t RetryMaxDelayMs = 30000; // Do not cross more than 30s\n    backoff_strategy_ = std::make_unique<JitteredExponentialBackOffStrategy>(\n        RetryInitialDelayMs, RetryMaxDelayMs, random_);\n  }\n\n  void establishNewStream() {\n    ENVOY_LOG(debug, \"Establishing new gRPC bidi stream for {}\", service_method_.DebugString());\n    if (stream_ != nullptr) {\n      ENVOY_LOG(warn, \"gRPC bidi stream for {} already exists!\", service_method_.DebugString());\n      return;\n    }\n    stream_ = async_client_->start(service_method_, *this, Http::AsyncClient::StreamOptions());\n    if (stream_ == nullptr) {\n      ENVOY_LOG(warn, \"Unable to establish new stream\");\n      callbacks_->onEstablishmentFailure();\n      setRetryTimer();\n      return;\n    }\n    control_plane_stats_.connected_state_.set(1);\n    callbacks_->onStreamEstablished();\n  }\n\n  bool grpcStreamAvailable() const { return stream_ != nullptr; }\n\n  void sendMessage(const RequestProto& request) { stream_->sendMessage(request, false); }\n\n  // Grpc::AsyncStreamCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap& metadata) override {\n    UNREFERENCED_PARAMETER(metadata);\n  }\n\n  void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) override {\n    UNREFERENCED_PARAMETER(metadata);\n  }\n\n  void onReceiveMessage(ResponseProtoPtr<ResponseProto>&& message) override {\n    // Reset here so that it starts with fresh backoff interval on next disconnect.\n    backoff_strategy_->reset();\n    // Sometimes during hot restarts this stat's value becomes inconsistent and will continue to\n    // have 0 until it is reconnected. Setting here ensures that it is consistent with the state of\n    // management server connection.\n    control_plane_stats_.connected_state_.set(1);\n    callbacks_->onDiscoveryResponse(std::move(message), control_plane_stats_);\n  }\n\n  void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) override {\n    UNREFERENCED_PARAMETER(metadata);\n  }\n\n  void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override {\n    ENVOY_LOG(warn, \"{} gRPC config stream closed: {}, {}\", service_method_.name(), status,\n              message);\n    stream_ = nullptr;\n    control_plane_stats_.connected_state_.set(0);\n    callbacks_->onEstablishmentFailure();\n    setRetryTimer();\n  }\n\n  void maybeUpdateQueueSizeStat(uint64_t size) {\n    // Although request_queue_.push() happens elsewhere, the only time the queue is non-transiently\n    // non-empty is when it remains non-empty after a drain attempt. (The push() doesn't matter\n    // because we always attempt this drain immediately after the push). Basically, a change in\n    // queue length is not \"meaningful\" until it has persisted until here. We need the\n    // if(>0 || used) to keep this stat from being wrongly marked interesting by a pointless set(0)\n    // and needlessly taking up space. The first time we set(123), used becomes true, and so we will\n    // subsequently always do the set (including set(0)).\n    if (size > 0 || control_plane_stats_.pending_requests_.used()) {\n      control_plane_stats_.pending_requests_.set(size);\n    }\n  }\n\n  bool checkRateLimitAllowsDrain() {\n    if (!rate_limiting_enabled_ || limit_request_->consume(1, false)) {\n      return true;\n    }\n    ASSERT(drain_request_timer_ != nullptr);\n    control_plane_stats_.rate_limit_enforced_.inc();\n    // Enable the drain request timer.\n    if (!drain_request_timer_->enabled()) {\n      drain_request_timer_->enableTimer(limit_request_->nextTokenAvailable());\n    }\n    return false;\n  }\n\nprivate:\n  void setRetryTimer() {\n    retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs()));\n  }\n\n  GrpcStreamCallbacks<ResponseProto>* const callbacks_;\n\n  Grpc::AsyncClient<RequestProto, ResponseProto> async_client_;\n  Grpc::AsyncStream<RequestProto> stream_{};\n  const Protobuf::MethodDescriptor& service_method_;\n  ControlPlaneStats control_plane_stats_;\n\n  // Reestablishes the gRPC channel when necessary, with some backoff politeness.\n  Event::TimerPtr retry_timer_;\n  Random::RandomGenerator& random_;\n  TimeSource& time_source_;\n  BackOffStrategyPtr backoff_strategy_;\n\n  // Prevents the Envoy from making too many requests.\n  TokenBucketPtr limit_request_;\n  const bool rate_limiting_enabled_;\n  Event::TimerPtr drain_request_timer_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/grpc_subscription_impl.cc",
    "content": "#include \"common/config/grpc_subscription_impl.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nGrpcSubscriptionImpl::GrpcSubscriptionImpl(\n    GrpcMuxSharedPtr grpc_mux, SubscriptionCallbacks& callbacks,\n    OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, absl::string_view type_url,\n    Event::Dispatcher& dispatcher, std::chrono::milliseconds init_fetch_timeout, bool is_aggregated)\n    : grpc_mux_(grpc_mux), callbacks_(callbacks), resource_decoder_(resource_decoder),\n      stats_(stats), type_url_(type_url), dispatcher_(dispatcher),\n      init_fetch_timeout_(init_fetch_timeout), is_aggregated_(is_aggregated) {}\n\n// Config::Subscription\nvoid GrpcSubscriptionImpl::start(const std::set<std::string>& resources,\n                                 const bool use_namespace_matching) {\n  if (init_fetch_timeout_.count() > 0) {\n    init_fetch_timeout_timer_ = dispatcher_.createTimer([this]() -> void {\n      callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout,\n                                      nullptr);\n    });\n    init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_);\n  }\n\n  watch_ =\n      grpc_mux_->addWatch(type_url_, resources, *this, resource_decoder_, use_namespace_matching);\n\n  // The attempt stat here is maintained for the purposes of having consistency between ADS and\n  // gRPC/filesystem/REST Subscriptions. Since ADS is push based and muxed, the notion of an\n  // \"attempt\" for a given xDS API combined by ADS is not really that meaningful.\n  stats_.update_attempt_.inc();\n\n  // ADS initial request batching relies on the users of the GrpcMux *not* calling start on it,\n  // whereas non-ADS xDS users must call it themselves.\n  if (!is_aggregated_) {\n    grpc_mux_->start();\n  }\n}\n\nvoid GrpcSubscriptionImpl::updateResourceInterest(\n    const std::set<std::string>& update_to_these_names) {\n  watch_->update(update_to_these_names);\n  stats_.update_attempt_.inc();\n}\n\nvoid GrpcSubscriptionImpl::requestOnDemandUpdate(const std::set<std::string>& for_update) {\n  grpc_mux_->requestOnDemandUpdate(type_url_, for_update);\n  stats_.update_attempt_.inc();\n}\n\n// Config::SubscriptionCallbacks\nvoid GrpcSubscriptionImpl::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                                          const std::string& version_info) {\n  disableInitFetchTimeoutTimer();\n  // TODO(mattklein123): In the future if we start tracking per-resource versions, we need to\n  // supply those versions to onConfigUpdate() along with the xDS response (\"system\")\n  // version_info. This way, both types of versions can be tracked and exposed for debugging by\n  // the configuration update targets.\n  callbacks_.onConfigUpdate(resources, version_info);\n  stats_.update_success_.inc();\n  stats_.update_attempt_.inc();\n  stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource()));\n  stats_.version_.set(HashUtil::xxHash64(version_info));\n  stats_.version_text_.set(version_info);\n  ENVOY_LOG(debug, \"gRPC config for {} accepted with {} resources with version {}\", type_url_,\n            resources.size(), version_info);\n}\n\nvoid GrpcSubscriptionImpl::onConfigUpdate(\n    const std::vector<Config::DecodedResourceRef>& added_resources,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n    const std::string& system_version_info) {\n  disableInitFetchTimeoutTimer();\n  stats_.update_attempt_.inc();\n  callbacks_.onConfigUpdate(added_resources, removed_resources, system_version_info);\n  stats_.update_success_.inc();\n  stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource()));\n  stats_.version_.set(HashUtil::xxHash64(system_version_info));\n  stats_.version_text_.set(system_version_info);\n}\n\nvoid GrpcSubscriptionImpl::onConfigUpdateFailed(ConfigUpdateFailureReason reason,\n                                                const EnvoyException* e) {\n  switch (reason) {\n  case Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure:\n    stats_.update_failure_.inc();\n    ENVOY_LOG(debug, \"gRPC update for {} failed\", type_url_);\n    break;\n  case Envoy::Config::ConfigUpdateFailureReason::FetchTimedout:\n    stats_.init_fetch_timeout_.inc();\n    disableInitFetchTimeoutTimer();\n    ENVOY_LOG(warn, \"gRPC config: initial fetch timed out for {}\", type_url_);\n    callbacks_.onConfigUpdateFailed(reason, e);\n    break;\n  case Envoy::Config::ConfigUpdateFailureReason::UpdateRejected:\n    // We expect Envoy exception to be thrown when update is rejected.\n    ASSERT(e != nullptr);\n    disableInitFetchTimeoutTimer();\n    stats_.update_rejected_.inc();\n    ENVOY_LOG(warn, \"gRPC config for {} rejected: {}\", type_url_, e->what());\n    callbacks_.onConfigUpdateFailed(reason, e);\n    break;\n  }\n\n  stats_.update_attempt_.inc();\n}\n\nScopedResume GrpcSubscriptionImpl::pause() { return grpc_mux_->pause(type_url_); }\n\nvoid GrpcSubscriptionImpl::disableInitFetchTimeoutTimer() {\n  if (init_fetch_timeout_timer_) {\n    init_fetch_timeout_timer_->disableTimer();\n    init_fetch_timeout_timer_.reset();\n  }\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/grpc_subscription_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/grpc_mux.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Adapter from typed Subscription to untyped GrpcMux. Also handles per-xDS API stats/logging.\n */\nclass GrpcSubscriptionImpl : public Subscription,\n                             SubscriptionCallbacks,\n                             Logger::Loggable<Logger::Id::config> {\npublic:\n  GrpcSubscriptionImpl(GrpcMuxSharedPtr grpc_mux, SubscriptionCallbacks& callbacks,\n                       OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats,\n                       absl::string_view type_url, Event::Dispatcher& dispatcher,\n                       std::chrono::milliseconds init_fetch_timeout, bool is_aggregated);\n\n  // Config::Subscription\n  void start(const std::set<std::string>& resource_names,\n             const bool use_namespace_matching = false) override;\n  void updateResourceInterest(const std::set<std::string>& update_to_these_names) override;\n  void requestOnDemandUpdate(const std::set<std::string>& add_these_names) override;\n  // Config::SubscriptionCallbacks (all pass through to callbacks_!)\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) override;\n\n  GrpcMuxSharedPtr grpcMux() { return grpc_mux_; }\n\n  ScopedResume pause();\n\nprivate:\n  void disableInitFetchTimeoutTimer();\n\n  GrpcMuxSharedPtr grpc_mux_;\n  SubscriptionCallbacks& callbacks_;\n  OpaqueResourceDecoder& resource_decoder_;\n  SubscriptionStats stats_;\n  const std::string type_url_;\n  GrpcMuxWatchPtr watch_;\n  Event::Dispatcher& dispatcher_;\n  // NOTE: if another subscription of the same type_url has already been started, this value will be\n  // ignored in favor of the other subscription's.\n  std::chrono::milliseconds init_fetch_timeout_;\n  Event::TimerPtr init_fetch_timeout_timer_;\n  const bool is_aggregated_;\n};\n\nusing GrpcSubscriptionImplPtr = std::unique_ptr<GrpcSubscriptionImpl>;\nusing GrpcSubscriptionImplSharedPtr = std::shared_ptr<GrpcSubscriptionImpl>;\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/http_subscription_impl.cc",
    "content": "#include \"common/config/http_subscription_impl.h\"\n\n#include <memory>\n\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/decoded_resource_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/http/headers.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"google/api/annotations.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nHttpSubscriptionImpl::HttpSubscriptionImpl(\n    const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm,\n    const std::string& remote_cluster_name, Event::Dispatcher& dispatcher,\n    Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval,\n    std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method,\n    absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version,\n    SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder,\n    SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout,\n    ProtobufMessage::ValidationVisitor& validation_visitor)\n    : Http::RestApiFetcher(cm, remote_cluster_name, dispatcher, random, refresh_interval,\n                           request_timeout),\n      callbacks_(callbacks), resource_decoder_(resource_decoder), stats_(stats),\n      dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout),\n      validation_visitor_(validation_visitor), transport_api_version_(transport_api_version) {\n  request_.mutable_node()->CopyFrom(local_info.node());\n  request_.set_type_url(std::string(type_url));\n  ASSERT(service_method.options().HasExtension(google::api::http));\n  const auto& http_rule = service_method.options().GetExtension(google::api::http);\n  path_ = http_rule.post();\n  ASSERT(http_rule.body() == \"*\");\n}\n\n// Config::Subscription\nvoid HttpSubscriptionImpl::start(const std::set<std::string>& resource_names, const bool) {\n  if (init_fetch_timeout_.count() > 0) {\n    init_fetch_timeout_timer_ = dispatcher_.createTimer([this]() -> void {\n      handleFailure(Config::ConfigUpdateFailureReason::FetchTimedout, nullptr);\n    });\n    init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_);\n  }\n\n  Protobuf::RepeatedPtrField<std::string> resources_vector(resource_names.begin(),\n                                                           resource_names.end());\n  request_.mutable_resource_names()->Swap(&resources_vector);\n  initialize();\n}\n\nvoid HttpSubscriptionImpl::updateResourceInterest(\n    const std::set<std::string>& update_to_these_names) {\n  Protobuf::RepeatedPtrField<std::string> resources_vector(update_to_these_names.begin(),\n                                                           update_to_these_names.end());\n  request_.mutable_resource_names()->Swap(&resources_vector);\n}\n\n// Http::RestApiFetcher\nvoid HttpSubscriptionImpl::createRequest(Http::RequestMessage& request) {\n  ENVOY_LOG(debug, \"Sending REST request for {}\", path_);\n  stats_.update_attempt_.inc();\n  request.headers().setReferenceMethod(Http::Headers::get().MethodValues.Post);\n  request.headers().setPath(path_);\n  request.body().add(VersionConverter::getJsonStringFromMessage(request_, transport_api_version_));\n  request.headers().setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  request.headers().setContentLength(request.body().length());\n}\n\nvoid HttpSubscriptionImpl::parseResponse(const Http::ResponseMessage& response) {\n  disableInitFetchTimeoutTimer();\n  envoy::service::discovery::v3::DiscoveryResponse message;\n  try {\n    MessageUtil::loadFromJson(response.bodyAsString(), message, validation_visitor_);\n  } catch (const EnvoyException& e) {\n    handleFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e);\n    return;\n  }\n  try {\n    const auto decoded_resources =\n        DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info());\n    callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info());\n    request_.set_version_info(message.version_info());\n    stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource()));\n    stats_.version_.set(HashUtil::xxHash64(request_.version_info()));\n    stats_.version_text_.set(request_.version_info());\n    stats_.update_success_.inc();\n  } catch (const EnvoyException& e) {\n    handleFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e);\n  }\n}\n\nvoid HttpSubscriptionImpl::onFetchComplete() {}\n\nvoid HttpSubscriptionImpl::onFetchFailure(Config::ConfigUpdateFailureReason reason,\n                                          const EnvoyException* e) {\n  handleFailure(reason, e);\n}\n\nvoid HttpSubscriptionImpl::handleFailure(Config::ConfigUpdateFailureReason reason,\n                                         const EnvoyException* e) {\n\n  switch (reason) {\n  case Config::ConfigUpdateFailureReason::ConnectionFailure:\n    ENVOY_LOG(warn, \"REST update for {} failed\", path_);\n    stats_.update_failure_.inc();\n    break;\n  case Config::ConfigUpdateFailureReason::FetchTimedout:\n    ENVOY_LOG(warn, \"REST config: initial fetch timeout for {}\", path_);\n    stats_.init_fetch_timeout_.inc();\n    disableInitFetchTimeoutTimer();\n    break;\n  case Config::ConfigUpdateFailureReason::UpdateRejected:\n    ASSERT(e != nullptr);\n    ENVOY_LOG(warn, \"REST config for {} rejected: {}\", path_, e->what());\n    stats_.update_rejected_.inc();\n    disableInitFetchTimeoutTimer();\n    break;\n  }\n\n  if (reason == Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure) {\n    // New requests will be sent again.\n    // If init_fetch_timeout is non-zero, server will continue startup after it timeout\n    return;\n  }\n\n  callbacks_.onConfigUpdateFailed(reason, e);\n}\n\nvoid HttpSubscriptionImpl::disableInitFetchTimeoutTimer() {\n  if (init_fetch_timeout_timer_) {\n    init_fetch_timeout_timer_->disableTimer();\n    init_fetch_timeout_timer_.reset();\n  }\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/http_subscription_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/http/rest_api_fetcher.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * REST implementation of the API Subscription interface. This fetches the API via periodic polling\n * with jitter (based on RestApiFetcher). The REST requests are POSTs of the JSON canonical\n * representation of the DiscoveryRequest proto and the responses are in the form of the JSON\n * canonical representation of DiscoveryResponse. This implementation is responsible for translating\n * between the proto serializable objects in the Subscription API and the REST JSON representation.\n */\nclass HttpSubscriptionImpl : public Http::RestApiFetcher,\n                             public Config::Subscription,\n                             Logger::Loggable<Logger::Id::config> {\npublic:\n  HttpSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm,\n                       const std::string& remote_cluster_name, Event::Dispatcher& dispatcher,\n                       Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval,\n                       std::chrono::milliseconds request_timeout,\n                       const Protobuf::MethodDescriptor& service_method, absl::string_view type_url,\n                       envoy::config::core::v3::ApiVersion transport_api_version,\n                       SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder,\n                       SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout,\n                       ProtobufMessage::ValidationVisitor& validation_visitor);\n\n  // Config::Subscription\n  void start(const std::set<std::string>& resource_names,\n             const bool use_namespace_matching = false) override;\n  void updateResourceInterest(const std::set<std::string>& update_to_these_names) override;\n  void requestOnDemandUpdate(const std::set<std::string>&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  // Http::RestApiFetcher\n  void createRequest(Http::RequestMessage& request) override;\n  void parseResponse(const Http::ResponseMessage& response) override;\n  void onFetchComplete() override;\n  void onFetchFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override;\n\nprivate:\n  void handleFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e);\n  void disableInitFetchTimeoutTimer();\n\n  std::string path_;\n  Protobuf::RepeatedPtrField<std::string> resources_;\n  envoy::service::discovery::v3::DiscoveryRequest request_;\n  Config::SubscriptionCallbacks& callbacks_;\n  Config::OpaqueResourceDecoder& resource_decoder_;\n  SubscriptionStats stats_;\n  Event::Dispatcher& dispatcher_;\n  std::chrono::milliseconds init_fetch_timeout_;\n  Event::TimerPtr init_fetch_timeout_timer_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/metadata.cc",
    "content": "#include \"common/config/metadata.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/type/metadata/v3/metadata.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nSINGLETON_MANAGER_REGISTRATION(const_metadata_shared_pool);\n\nMetadataKey::MetadataKey(const envoy::type::metadata::v3::MetadataKey& metadata_key)\n    : key_(metadata_key.key()) {\n  for (const auto& seg : metadata_key.path()) {\n    path_.push_back(seg.key());\n  }\n}\n\nconst ProtobufWkt::Value& Metadata::metadataValue(const envoy::config::core::v3::Metadata* metadata,\n                                                  const MetadataKey& metadata_key) {\n  return metadataValue(metadata, metadata_key.key_, metadata_key.path_);\n}\n\nconst ProtobufWkt::Value& Metadata::metadataValue(const envoy::config::core::v3::Metadata* metadata,\n                                                  const std::string& filter,\n                                                  const std::vector<std::string>& path) {\n  if (!metadata) {\n    return ProtobufWkt::Value::default_instance();\n  }\n  const auto filter_it = metadata->filter_metadata().find(filter);\n  if (filter_it == metadata->filter_metadata().end()) {\n    return ProtobufWkt::Value::default_instance();\n  }\n  const ProtobufWkt::Struct* data_struct = &(filter_it->second);\n  const ProtobufWkt::Value* val = nullptr;\n  // go through path to select sub entries\n  for (const auto& p : path) {\n    if (nullptr == data_struct) { // sub entry not found\n      return ProtobufWkt::Value::default_instance();\n    }\n    const auto entry_it = data_struct->fields().find(p);\n    if (entry_it == data_struct->fields().end()) {\n      return ProtobufWkt::Value::default_instance();\n    }\n    val = &(entry_it->second);\n    if (val->has_struct_value()) {\n      data_struct = &(val->struct_value());\n    } else {\n      data_struct = nullptr;\n    }\n  }\n  if (nullptr == val) {\n    return ProtobufWkt::Value::default_instance();\n  }\n  return *val;\n}\n\nconst ProtobufWkt::Value& Metadata::metadataValue(const envoy::config::core::v3::Metadata* metadata,\n                                                  const std::string& filter,\n                                                  const std::string& key) {\n  const std::vector<std::string> path{key};\n  return metadataValue(metadata, filter, path);\n}\n\nProtobufWkt::Value& Metadata::mutableMetadataValue(envoy::config::core::v3::Metadata& metadata,\n                                                   const std::string& filter,\n                                                   const std::string& key) {\n  return (*(*metadata.mutable_filter_metadata())[filter].mutable_fields())[key];\n}\n\nbool Metadata::metadataLabelMatch(const LabelSet& label_set,\n                                  const envoy::config::core::v3::Metadata* host_metadata,\n                                  const std::string& filter_key, bool list_as_any) {\n  if (!host_metadata) {\n    return label_set.empty();\n  }\n  const auto filter_it = host_metadata->filter_metadata().find(filter_key);\n  if (filter_it == host_metadata->filter_metadata().end()) {\n    return label_set.empty();\n  }\n  const ProtobufWkt::Struct& data_struct = filter_it->second;\n  const auto& fields = data_struct.fields();\n  for (const auto& kv : label_set) {\n    const auto entry_it = fields.find(kv.first);\n    if (entry_it == fields.end()) {\n      return false;\n    }\n\n    if (list_as_any && entry_it->second.kind_case() == ProtobufWkt::Value::kListValue) {\n      bool any_match = false;\n      for (const auto& v : entry_it->second.list_value().values()) {\n        if (ValueUtil::equal(v, kv.second)) {\n          any_match = true;\n          break;\n        }\n      }\n      if (!any_match) {\n        return false;\n      }\n    } else if (!ValueUtil::equal(entry_it->second, kv.second)) {\n      return false;\n    }\n  }\n  return true;\n}\n\nConstMetadataSharedPoolSharedPtr\nMetadata::getConstMetadataSharedPool(Singleton::Manager& manager, Event::Dispatcher& dispatcher) {\n  return manager\n      .getTyped<SharedPool::ObjectSharedPool<const envoy::config::core::v3::Metadata, MessageUtil>>(\n          SINGLETON_MANAGER_REGISTERED_NAME(const_metadata_shared_pool), [&dispatcher] {\n            return std::make_shared<\n                SharedPool::ObjectSharedPool<const envoy::config::core::v3::Metadata, MessageUtil>>(\n                dispatcher);\n          });\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/metadata.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/type/metadata/v3/metadata.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/shared_pool/shared_pool.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nusing ConstMetadataSharedPoolSharedPtr = std::shared_ptr<\n    SharedPool::ObjectSharedPool<const envoy::config::core::v3::Metadata, MessageUtil>>;\n\n/**\n * MetadataKey presents the key name and path to retrieve value from metadata.\n */\nstruct MetadataKey {\n  std::string key_;\n  std::vector<std::string> path_;\n\n  MetadataKey(const envoy::type::metadata::v3::MetadataKey& metadata_key);\n};\n\n/**\n * Config metadata helpers.\n */\nclass Metadata {\npublic:\n  /**\n   * Lookup value of a key for a given filter in Metadata.\n   * @param metadata reference.\n   * @param filter name.\n   * @param key for filter metadata.\n   * @return const ProtobufWkt::Value& value if found, empty if not found.\n   */\n  static const ProtobufWkt::Value& metadataValue(const envoy::config::core::v3::Metadata* metadata,\n                                                 const std::string& filter, const std::string& key);\n  /**\n   * Lookup value by a multi-key path for a given filter in Metadata. If path is empty\n   * will return the empty struct.\n   * @param metadata reference.\n   * @param filter name.\n   * @param path multi-key path.\n   * @return const ProtobufWkt::Value& value if found, empty if not found.\n   */\n  static const ProtobufWkt::Value& metadataValue(const envoy::config::core::v3::Metadata* metadata,\n                                                 const std::string& filter,\n                                                 const std::vector<std::string>& path);\n  /**\n   * Lookup the value by a metadata key from a Metadata.\n   * @param metadata reference.\n   * @param metadata_key with key name and path to retrieve the value.\n   * @return const ProtobufWkt::Value& value if found, empty if not found.\n   */\n  static const ProtobufWkt::Value& metadataValue(const envoy::config::core::v3::Metadata* metadata,\n                                                 const MetadataKey& metadata_key);\n\n  /**\n   * Obtain mutable reference to metadata value for a given filter and key.\n   * @param metadata reference.\n   * @param filter name.\n   * @param key for filter metadata.\n   * @return ProtobufWkt::Value&. A Value message is created if not found.\n   */\n  static ProtobufWkt::Value& mutableMetadataValue(envoy::config::core::v3::Metadata& metadata,\n                                                  const std::string& filter,\n                                                  const std::string& key);\n\n  using LabelSet = std::vector<std::pair<std::string, ProtobufWkt::Value>>;\n\n  /**\n   * Returns whether a set of the labels match a particular host's metadata.\n   * @param label_set the target label key/value pair set.\n   * @param host_metadata a given host's metadata.\n   * @param filter_key identifies the entry in the metadata entry for the match.\n   * @param list_as_any if the metadata value entry is a list, and any one of\n   * the element equals to the input label_set, it's considered as match.\n   */\n  static bool metadataLabelMatch(const LabelSet& label_set,\n                                 const envoy::config::core::v3::Metadata* host_metadata,\n                                 const std::string& filter_key, bool list_as_any);\n  /**\n   * Returns an ObjectSharedPool to store const Metadata\n   * @param manager used to create singleton\n   * @param dispatcher the dispatcher object reference to the thread that created the\n   * ObjectSharedPool\n   */\n  static ConstMetadataSharedPoolSharedPtr getConstMetadataSharedPool(Singleton::Manager& manager,\n                                                                     Event::Dispatcher& dispatcher);\n};\n\ntemplate <typename factoryClass> class TypedMetadataImpl : public TypedMetadata {\npublic:\n  static_assert(std::is_base_of<Config::TypedMetadataFactory, factoryClass>::value,\n                \"Factory type must be inherited from Envoy::Config::TypedMetadataFactory.\");\n  TypedMetadataImpl(const envoy::config::core::v3::Metadata& metadata) { populateFrom(metadata); }\n\n  const TypedMetadata::Object* getData(const std::string& key) const override {\n    const auto& it = data_.find(key);\n    return it == data_.end() ? nullptr : it->second.get();\n  }\n\nprotected:\n  /* Attempt to run each of the registered factories for TypedMetadata, to\n   * populate the data_ map.\n   */\n  void populateFrom(const envoy::config::core::v3::Metadata& metadata) {\n    auto& data_by_key = metadata.filter_metadata();\n    for (const auto& [factory_name, factory] :\n         Registry::FactoryRegistry<factoryClass>::factories()) {\n      const auto& meta_iter = data_by_key.find(factory_name);\n      if (meta_iter != data_by_key.end()) {\n        data_[factory->name()] = factory->parse(meta_iter->second);\n      }\n    }\n  }\n\n  absl::node_hash_map<std::string, std::unique_ptr<const TypedMetadata::Object>> data_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/new_grpc_mux_impl.cc",
    "content": "#include \"common/config/new_grpc_mux_impl.h\"\n\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/backoff_strategy.h\"\n#include \"common/common/token_bucket_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/memory/utils.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nNewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client,\n                               Event::Dispatcher& dispatcher,\n                               const Protobuf::MethodDescriptor& service_method,\n                               envoy::config::core::v3::ApiVersion transport_api_version,\n                               Random::RandomGenerator& random, Stats::Scope& scope,\n                               const RateLimitSettings& rate_limit_settings,\n                               const LocalInfo::LocalInfo& local_info)\n    : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope,\n                   rate_limit_settings),\n      local_info_(local_info), transport_api_version_(transport_api_version),\n      enable_type_url_downgrade_and_upgrade_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\")) {}\n\nScopedResume NewGrpcMuxImpl::pause(const std::string& type_url) {\n  return pause(std::vector<std::string>{type_url});\n}\n\nScopedResume NewGrpcMuxImpl::pause(const std::vector<std::string> type_urls) {\n  for (const auto& type_url : type_urls) {\n    pausable_ack_queue_.pause(type_url);\n  }\n\n  return std::make_unique<Cleanup>([this, type_urls]() {\n    for (const auto& type_url : type_urls) {\n      pausable_ack_queue_.resume(type_url);\n      if (!pausable_ack_queue_.paused(type_url)) {\n        trySendDiscoveryRequests();\n      }\n    }\n  });\n}\n\nvoid NewGrpcMuxImpl::registerVersionedTypeUrl(const std::string& type_url) {\n\n  TypeUrlMap& type_url_map = typeUrlMap();\n  if (type_url_map.find(type_url) != type_url_map.end()) {\n    return;\n  }\n  // If type_url is v3, earlier_type_url will contain v2 type url.\n  absl::optional<std::string> earlier_type_url = ApiTypeOracle::getEarlierTypeUrl(type_url);\n  // Register v2 to v3 and v3 to v2 type_url mapping in the hash map.\n  if (earlier_type_url.has_value()) {\n    type_url_map[earlier_type_url.value()] = type_url;\n    type_url_map[type_url] = earlier_type_url.value();\n  }\n}\n\nvoid NewGrpcMuxImpl::onDiscoveryResponse(\n    std::unique_ptr<envoy::service::discovery::v3::DeltaDiscoveryResponse>&& message,\n    ControlPlaneStats&) {\n  ENVOY_LOG(debug, \"Received DeltaDiscoveryResponse for {} at version {}\", message->type_url(),\n            message->system_version_info());\n  auto sub = subscriptions_.find(message->type_url());\n  // If this type url is not watched, try another version type url.\n  if (enable_type_url_downgrade_and_upgrade_ && sub == subscriptions_.end()) {\n    const std::string& type_url = message->type_url();\n    registerVersionedTypeUrl(type_url);\n    TypeUrlMap& type_url_map = typeUrlMap();\n    if (type_url_map.find(type_url) != type_url_map.end()) {\n      sub = subscriptions_.find(type_url_map[type_url]);\n    }\n  }\n  if (sub == subscriptions_.end()) {\n    ENVOY_LOG(warn,\n              \"Dropping received DeltaDiscoveryResponse (with version {}) for non-existent \"\n              \"subscription {}.\",\n              message->system_version_info(), message->type_url());\n    return;\n  }\n\n  kickOffAck(sub->second->sub_state_.handleResponse(*message));\n  Memory::Utils::tryShrinkHeap();\n}\n\nvoid NewGrpcMuxImpl::onStreamEstablished() {\n  for (auto& [type_url, subscription] : subscriptions_) {\n    subscription->sub_state_.markStreamFresh();\n  }\n  trySendDiscoveryRequests();\n}\n\nvoid NewGrpcMuxImpl::onEstablishmentFailure() {\n  // If this happens while Envoy is still initializing, the onConfigUpdateFailed() we ultimately\n  // call on CDS will cause LDS to start up, which adds to subscriptions_ here. So, to avoid a\n  // crash, the iteration needs to dance around a little: collect pointers to all\n  // SubscriptionStates, call on all those pointers we haven't yet called on, repeat if there are\n  // now more SubscriptionStates.\n  absl::flat_hash_map<std::string, DeltaSubscriptionState*> all_subscribed;\n  absl::flat_hash_map<std::string, DeltaSubscriptionState*> already_called;\n  do {\n    for (auto& [type_url, subscription] : subscriptions_) {\n      all_subscribed[type_url] = &subscription->sub_state_;\n    }\n    for (auto& sub : all_subscribed) {\n      if (already_called.insert(sub).second) { // insert succeeded ==> not already called\n        sub.second->handleEstablishmentFailure();\n      }\n    }\n  } while (all_subscribed.size() != subscriptions_.size());\n}\n\nvoid NewGrpcMuxImpl::onWriteable() { trySendDiscoveryRequests(); }\n\nvoid NewGrpcMuxImpl::kickOffAck(UpdateAck ack) {\n  pausable_ack_queue_.push(std::move(ack));\n  trySendDiscoveryRequests();\n}\n\n// TODO(fredlas) to be removed from the GrpcMux interface very soon.\nvoid NewGrpcMuxImpl::start() { grpc_stream_.establishNewStream(); }\n\nGrpcMuxWatchPtr NewGrpcMuxImpl::addWatch(const std::string& type_url,\n                                         const std::set<std::string>& resources,\n                                         SubscriptionCallbacks& callbacks,\n                                         OpaqueResourceDecoder& resource_decoder,\n                                         const bool use_namespace_matching) {\n  auto entry = subscriptions_.find(type_url);\n  if (entry == subscriptions_.end()) {\n    // We don't yet have a subscription for type_url! Make one!\n    if (enable_type_url_downgrade_and_upgrade_) {\n      registerVersionedTypeUrl(type_url);\n    }\n    addSubscription(type_url, use_namespace_matching);\n    return addWatch(type_url, resources, callbacks, resource_decoder, use_namespace_matching);\n  }\n\n  Watch* watch = entry->second->watch_map_.addWatch(callbacks, resource_decoder);\n  // updateWatch() queues a discovery request if any of 'resources' are not yet subscribed.\n  updateWatch(type_url, watch, resources, use_namespace_matching);\n  return std::make_unique<WatchImpl>(type_url, watch, *this);\n}\n\n// Updates the list of resource names watched by the given watch. If an added name is new across\n// the whole subscription, or if a removed name has no other watch interested in it, then the\n// subscription will enqueue and attempt to send an appropriate discovery request.\nvoid NewGrpcMuxImpl::updateWatch(const std::string& type_url, Watch* watch,\n                                 const std::set<std::string>& resources,\n                                 bool creating_namespace_watch) {\n  ASSERT(watch != nullptr);\n  auto sub = subscriptions_.find(type_url);\n  RELEASE_ASSERT(sub != subscriptions_.end(),\n                 fmt::format(\"Watch of {} has no subscription to update.\", type_url));\n  auto added_removed = sub->second->watch_map_.updateWatchInterest(watch, resources);\n  if (creating_namespace_watch) {\n    // This is to prevent sending out of requests that contain prefixes instead of resource names\n    sub->second->sub_state_.updateSubscriptionInterest({}, {});\n  } else {\n    sub->second->sub_state_.updateSubscriptionInterest(added_removed.added_,\n                                                       added_removed.removed_);\n  }\n  // Tell the server about our change in interest, if any.\n  if (sub->second->sub_state_.subscriptionUpdatePending()) {\n    trySendDiscoveryRequests();\n  }\n}\n\nvoid NewGrpcMuxImpl::requestOnDemandUpdate(const std::string& type_url,\n                                           const std::set<std::string>& for_update) {\n  auto sub = subscriptions_.find(type_url);\n  RELEASE_ASSERT(sub != subscriptions_.end(),\n                 fmt::format(\"Watch of {} has no subscription to update.\", type_url));\n  sub->second->sub_state_.updateSubscriptionInterest(for_update, {});\n  // Tell the server about our change in interest, if any.\n  if (sub->second->sub_state_.subscriptionUpdatePending()) {\n    trySendDiscoveryRequests();\n  }\n}\n\nvoid NewGrpcMuxImpl::removeWatch(const std::string& type_url, Watch* watch) {\n  updateWatch(type_url, watch, {});\n  auto entry = subscriptions_.find(type_url);\n  ASSERT(entry != subscriptions_.end(),\n         fmt::format(\"removeWatch() called for non-existent subscription {}.\", type_url));\n  entry->second->watch_map_.removeWatch(watch);\n}\n\nvoid NewGrpcMuxImpl::addSubscription(const std::string& type_url,\n                                     const bool use_namespace_matching) {\n  subscriptions_.emplace(\n      type_url, std::make_unique<SubscriptionStuff>(type_url, local_info_, use_namespace_matching));\n  subscription_ordering_.emplace_back(type_url);\n}\n\nvoid NewGrpcMuxImpl::trySendDiscoveryRequests() {\n  while (true) {\n    // Do any of our subscriptions even want to send a request?\n    absl::optional<std::string> maybe_request_type = whoWantsToSendDiscoveryRequest();\n    if (!maybe_request_type.has_value()) {\n      break;\n    }\n    // If so, which one (by type_url)?\n    std::string next_request_type_url = maybe_request_type.value();\n    // If we don't have a subscription object for this request's type_url, drop the request.\n    auto sub = subscriptions_.find(next_request_type_url);\n    RELEASE_ASSERT(sub != subscriptions_.end(),\n                   fmt::format(\"Tried to send discovery request for non-existent subscription {}.\",\n                               next_request_type_url));\n\n    // Try again later if paused/rate limited/stream down.\n    if (!canSendDiscoveryRequest(next_request_type_url)) {\n      break;\n    }\n    envoy::service::discovery::v3::DeltaDiscoveryRequest request;\n    // Get our subscription state to generate the appropriate DeltaDiscoveryRequest, and send.\n    if (!pausable_ack_queue_.empty()) {\n      // Because ACKs take precedence over plain requests, if there is anything in the queue, it's\n      // safe to assume it's of the type_url that we're wanting to send.\n      request = sub->second->sub_state_.getNextRequestWithAck(pausable_ack_queue_.popFront());\n    } else {\n      request = sub->second->sub_state_.getNextRequestAckless();\n    }\n    VersionConverter::prepareMessageForGrpcWire(request, transport_api_version_);\n    grpc_stream_.sendMessage(request);\n  }\n  grpc_stream_.maybeUpdateQueueSizeStat(pausable_ack_queue_.size());\n}\n\n// Checks whether external conditions allow sending a DeltaDiscoveryRequest. (Does not check\n// whether we *want* to send a DeltaDiscoveryRequest).\nbool NewGrpcMuxImpl::canSendDiscoveryRequest(const std::string& type_url) {\n  RELEASE_ASSERT(\n      !pausable_ack_queue_.paused(type_url),\n      fmt::format(\"canSendDiscoveryRequest() called on paused type_url {}. Pausedness is \"\n                  \"supposed to be filtered out by whoWantsToSendDiscoveryRequest(). \",\n                  type_url));\n\n  if (!grpc_stream_.grpcStreamAvailable()) {\n    ENVOY_LOG(trace, \"No stream available to send a discovery request for {}.\", type_url);\n    return false;\n  } else if (!grpc_stream_.checkRateLimitAllowsDrain()) {\n    ENVOY_LOG(trace, \"{} discovery request hit rate limit; will try later.\", type_url);\n    return false;\n  }\n  return true;\n}\n\n// Checks whether we have something to say in a DeltaDiscoveryRequest, which can be an ACK and/or\n// a subscription update. (Does not check whether we *can* send that DeltaDiscoveryRequest).\n// Returns the type_url we should send the DeltaDiscoveryRequest for (if any).\n// First, prioritizes ACKs over non-ACK subscription interest updates.\n// Then, prioritizes non-ACK updates in the order the various types\n// of subscriptions were activated.\nabsl::optional<std::string> NewGrpcMuxImpl::whoWantsToSendDiscoveryRequest() {\n  // All ACKs are sent before plain updates. trySendDiscoveryRequests() relies on this. So, choose\n  // type_url from pausable_ack_queue_ if possible, before looking at pending updates.\n  if (!pausable_ack_queue_.empty()) {\n    return pausable_ack_queue_.front().type_url_;\n  }\n  // If we're looking to send multiple non-ACK requests, send them in the order that their\n  // subscriptions were initiated.\n  for (const auto& sub_type : subscription_ordering_) {\n    auto sub = subscriptions_.find(sub_type);\n    if (sub != subscriptions_.end() && sub->second->sub_state_.subscriptionUpdatePending() &&\n        !pausable_ack_queue_.paused(sub_type)) {\n      return sub->first;\n    }\n  }\n  return absl::nullopt;\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/new_grpc_mux_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/token_bucket.h\"\n#include \"envoy/config/grpc_mux.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/delta_subscription_state.h\"\n#include \"common/config/grpc_stream.h\"\n#include \"common/config/pausable_ack_queue.h\"\n#include \"common/config/watch_map.h\"\n#include \"common/grpc/common.h\"\n#include \"common/runtime/runtime_features.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// Manages subscriptions to one or more type of resource. The logical protocol\n// state of those subscription(s) is handled by DeltaSubscriptionState.\n// This class owns the GrpcStream used to talk to the server, maintains queuing\n// logic to properly order the subscription(s)' various messages, and allows\n// starting/stopping/pausing of the subscriptions.\nclass NewGrpcMuxImpl\n    : public GrpcMux,\n      public GrpcStreamCallbacks<envoy::service::discovery::v3::DeltaDiscoveryResponse>,\n      Logger::Loggable<Logger::Id::config> {\npublic:\n  NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher,\n                 const Protobuf::MethodDescriptor& service_method,\n                 envoy::config::core::v3::ApiVersion transport_api_version,\n                 Random::RandomGenerator& random, Stats::Scope& scope,\n                 const RateLimitSettings& rate_limit_settings,\n                 const LocalInfo::LocalInfo& local_info);\n\n  GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set<std::string>& resources,\n                           SubscriptionCallbacks& callbacks,\n                           OpaqueResourceDecoder& resource_decoder,\n                           const bool use_namespace_matching = false) override;\n\n  void requestOnDemandUpdate(const std::string& type_url,\n                             const std::set<std::string>& for_update) override;\n\n  ScopedResume pause(const std::string& type_url) override;\n  ScopedResume pause(const std::vector<std::string> type_urls) override;\n\n  void registerVersionedTypeUrl(const std::string& type_url);\n\n  void onDiscoveryResponse(\n      std::unique_ptr<envoy::service::discovery::v3::DeltaDiscoveryResponse>&& message,\n      ControlPlaneStats& control_plane_stats) override;\n\n  void onStreamEstablished() override;\n\n  void onEstablishmentFailure() override;\n\n  void onWriteable() override;\n\n  void kickOffAck(UpdateAck ack);\n\n  // TODO(fredlas) remove this from the GrpcMux interface.\n  void start() override;\n\n  struct SubscriptionStuff {\n    SubscriptionStuff(const std::string& type_url, const LocalInfo::LocalInfo& local_info,\n                      const bool use_namespace_matching)\n        : watch_map_(use_namespace_matching), sub_state_(type_url, watch_map_, local_info) {}\n\n    WatchMap watch_map_;\n    DeltaSubscriptionState sub_state_;\n\n    SubscriptionStuff(const SubscriptionStuff&) = delete;\n    SubscriptionStuff& operator=(const SubscriptionStuff&) = delete;\n  };\n\n  using SubscriptionStuffPtr = std::unique_ptr<SubscriptionStuff>;\n\n  // for use in tests only\n  const absl::flat_hash_map<std::string, SubscriptionStuffPtr>& subscriptions() {\n    return subscriptions_;\n  }\n\nprivate:\n  class WatchImpl : public GrpcMuxWatch {\n  public:\n    WatchImpl(const std::string& type_url, Watch* watch, NewGrpcMuxImpl& parent)\n        : type_url_(type_url), watch_(watch), parent_(parent) {}\n\n    ~WatchImpl() override { remove(); }\n\n    void remove() {\n      if (watch_) {\n        parent_.removeWatch(type_url_, watch_);\n        watch_ = nullptr;\n      }\n    }\n\n    void update(const std::set<std::string>& resources) override {\n      parent_.updateWatch(type_url_, watch_, resources);\n    }\n\n  private:\n    const std::string type_url_;\n    Watch* watch_;\n    NewGrpcMuxImpl& parent_;\n  };\n\n  void removeWatch(const std::string& type_url, Watch* watch);\n\n  // Updates the list of resource names watched by the given watch. If an added name is new across\n  // the whole subscription, or if a removed name has no other watch interested in it, then the\n  // subscription will enqueue and attempt to send an appropriate discovery request.\n  void updateWatch(const std::string& type_url, Watch* watch,\n                   const std::set<std::string>& resources,\n                   const bool creating_namespace_watch = false);\n\n  void addSubscription(const std::string& type_url, const bool use_namespace_matching);\n\n  void trySendDiscoveryRequests();\n\n  // Checks whether external conditions allow sending a DeltaDiscoveryRequest. (Does not check\n  // whether we *want* to send a DeltaDiscoveryRequest).\n  bool canSendDiscoveryRequest(const std::string& type_url);\n\n  // Checks whether we have something to say in a DeltaDiscoveryRequest, which can be an ACK and/or\n  // a subscription update. (Does not check whether we *can* send that DeltaDiscoveryRequest).\n  // Returns the type_url we should send the DeltaDiscoveryRequest for (if any).\n  // First, prioritizes ACKs over non-ACK subscription interest updates.\n  // Then, prioritizes non-ACK updates in the order the various types\n  // of subscriptions were activated.\n  absl::optional<std::string> whoWantsToSendDiscoveryRequest();\n\n  // Resource (N)ACKs we're waiting to send, stored in the order that they should be sent in. All\n  // of our different resource types' ACKs are mixed together in this queue. See class for\n  // description of how it interacts with pause() and resume().\n  PausableAckQueue pausable_ack_queue_;\n\n  // Map key is type_url.\n  absl::flat_hash_map<std::string, SubscriptionStuffPtr> subscriptions_;\n\n  // Determines the order of initial discovery requests. (Assumes that subscriptions are added in\n  // the order of Envoy's dependency ordering).\n  std::list<std::string> subscription_ordering_;\n\n  GrpcStream<envoy::service::discovery::v3::DeltaDiscoveryRequest,\n             envoy::service::discovery::v3::DeltaDiscoveryResponse>\n      grpc_stream_;\n\n  const LocalInfo::LocalInfo& local_info_;\n\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n\n  const bool enable_type_url_downgrade_and_upgrade_;\n};\n\nusing NewGrpcMuxImplPtr = std::unique_ptr<NewGrpcMuxImpl>;\nusing NewGrpcMuxImplSharedPtr = std::shared_ptr<NewGrpcMuxImpl>;\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/opaque_resource_decoder_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/subscription.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\ntemplate <typename Current> class OpaqueResourceDecoderImpl : public Config::OpaqueResourceDecoder {\npublic:\n  OpaqueResourceDecoderImpl(ProtobufMessage::ValidationVisitor& validation_visitor,\n                            absl::string_view name_field)\n      : validation_visitor_(validation_visitor), name_field_(name_field) {}\n\n  // Config::OpaqueResourceDecoder\n  ProtobufTypes::MessagePtr decodeResource(const ProtobufWkt::Any& resource) override {\n    auto typed_message = std::make_unique<Current>();\n    // If the Any is a synthetic empty message (e.g. because the resource field was not set in\n    // Resource, this might be empty, so we shouldn't decode.\n    if (!resource.type_url().empty()) {\n      MessageUtil::anyConvertAndValidate<Current>(resource, *typed_message, validation_visitor_);\n    }\n    return typed_message;\n  }\n\n  std::string resourceName(const Protobuf::Message& resource) override {\n    return MessageUtil::getStringField(resource, name_field_);\n  }\n\nprivate:\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  const std::string name_field_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/pausable_ack_queue.cc",
    "content": "#include \"common/config/pausable_ack_queue.h\"\n\n#include <list>\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nvoid PausableAckQueue::push(UpdateAck x) { storage_.push_back(std::move(x)); }\n\nsize_t PausableAckQueue::size() const { return storage_.size(); }\n\nbool PausableAckQueue::empty() {\n  for (const auto& entry : storage_) {\n    if (pauses_[entry.type_url_] == 0) {\n      return false;\n    }\n  }\n  return true;\n}\n\nconst UpdateAck& PausableAckQueue::front() {\n  for (const auto& entry : storage_) {\n    if (pauses_[entry.type_url_] == 0) {\n      return entry;\n    }\n  }\n  RELEASE_ASSERT(false, \"front() on an empty queue is undefined behavior!\");\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nUpdateAck PausableAckQueue::popFront() {\n  for (auto it = storage_.begin(); it != storage_.end(); ++it) {\n    if (pauses_[it->type_url_] == 0) {\n      UpdateAck ret = *it;\n      storage_.erase(it);\n      return ret;\n    }\n  }\n  RELEASE_ASSERT(false, \"popFront() on an empty queue is undefined behavior!\");\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid PausableAckQueue::pause(const std::string& type_url) {\n  // It's ok to pause a subscription that doesn't exist yet.\n  auto& pause_entry = pauses_[type_url];\n  ++pause_entry;\n}\n\nvoid PausableAckQueue::resume(const std::string& type_url) {\n  auto& pause_entry = pauses_[type_url];\n  ASSERT(pause_entry > 0);\n  --pause_entry;\n}\n\nbool PausableAckQueue::paused(const std::string& type_url) const {\n  auto entry = pauses_.find(type_url);\n  if (entry == pauses_.end()) {\n    return false;\n  }\n  return entry->second > 0;\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/pausable_ack_queue.h",
    "content": "#pragma once\n\n#include <list>\n\n#include \"common/config/update_ack.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// There is a head-of-line blocking issue resulting from the intersection of 1) ADS's need for\n// subscription request ordering and 2) the ability to \"pause\" one of the resource types within ADS.\n// We need a queue that understands ADS's resource type pausing. Specifically, we need front()/pop()\n// to choose the first element whose type_url isn't paused.\nclass PausableAckQueue {\npublic:\n  void push(UpdateAck x);\n  size_t size() const;\n  bool empty();\n  const UpdateAck& front();\n  UpdateAck popFront();\n  void pause(const std::string& type_url);\n  void resume(const std::string& type_url);\n  bool paused(const std::string& type_url) const;\n\nprivate:\n  // It's ok for non-existent subs to be paused/resumed. The cleanest way to support that is to give\n  // the pause state its own map. (Map key is type_url.)\n  absl::flat_hash_map<std::string, uint32_t> pauses_;\n  std::list<UpdateAck> storage_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/protobuf_link_hacks.h",
    "content": "#pragma once\n\n#include \"envoy/api/v2/cds.pb.h\"\n#include \"envoy/api/v2/eds.pb.h\"\n#include \"envoy/api/v2/lds.pb.h\"\n#include \"envoy/api/v2/rds.pb.h\"\n#include \"envoy/api/v2/srds.pb.h\"\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n#include \"envoy/service/cluster/v3/cds.pb.h\"\n#include \"envoy/service/discovery/v2/ads.pb.h\"\n#include \"envoy/service/discovery/v2/hds.pb.h\"\n#include \"envoy/service/discovery/v2/rtds.pb.h\"\n#include \"envoy/service/discovery/v2/sds.pb.h\"\n#include \"envoy/service/discovery/v3/ads.pb.h\"\n#include \"envoy/service/endpoint/v3/eds.pb.h\"\n#include \"envoy/service/extension/v3/config_discovery.pb.h\"\n#include \"envoy/service/listener/v3/lds.pb.h\"\n#include \"envoy/service/ratelimit/v2/rls.pb.h\"\n#include \"envoy/service/ratelimit/v3/rls.pb.h\"\n#include \"envoy/service/route/v3/rds.pb.h\"\n#include \"envoy/service/route/v3/srds.pb.h\"\n#include \"envoy/service/runtime/v3/rtds.pb.h\"\n#include \"envoy/service/secret/v3/sds.pb.h\"\n\n// API_NO_BOOST_FILE\n\nnamespace Envoy {\n\n// Hack to force linking of the service: https://github.com/google/protobuf/issues/4221.\n// This file should be included ONLY if this hack is required.\nconst envoy::service::discovery::v2::AdsDummy _ads_dummy_v2;\nconst envoy::service::ratelimit::v2::RateLimitRequest _rls_dummy_v2;\nconst envoy::service::discovery::v2::SdsDummy _sds_dummy_v2;\nconst envoy::service::discovery::v2::RtdsDummy _tds_dummy_v2;\nconst envoy::api::v2::LdsDummy _lds_dummy_v2;\nconst envoy::api::v2::RdsDummy _rds_dummy_v2;\nconst envoy::api::v2::CdsDummy _cds_dummy_v2;\nconst envoy::api::v2::EdsDummy _eds_dummy_v2;\nconst envoy::api::v2::SrdsDummy _srds_dummy_v2;\n\nconst envoy::service::discovery::v3::AdsDummy _ads_dummy_v3;\nconst envoy::service::ratelimit::v3::RateLimitRequest _rls_dummy_v3;\nconst envoy::service::secret::v3::SdsDummy _sds_dummy_v3;\nconst envoy::service::runtime::v3::RtdsDummy _tds_dummy_v3;\nconst envoy::service::listener::v3::LdsDummy _lds_dummy_v3;\nconst envoy::service::route::v3::RdsDummy _rds_dummy_v3;\nconst envoy::service::cluster::v3::CdsDummy _cds_dummy_v3;\nconst envoy::service::endpoint::v3::EdsDummy _eds_dummy_v3;\nconst envoy::service::route::v3::SrdsDummy _srds_dummy_v3;\n\n// With the v2 -> v3 migration there is another, related linking issue.\n// Symbols for v2 protos which headers are not included in any file in the codebase are being\n// dropped by the linker in some circumstances. For example, in the Envoy Mobile iOS build system.\n// Even though all v2 packages are included as a dependency in their corresponding v3 package, and\n// `always_link` is set for all proto bazel targets.\n// Further proof of this can be seen by way of counter example with the envoy.api.v2.Cluster type,\n// which is checked for by proto_descriptors.cc. This type **is** getting linked because its headers\n// is still included in cds_api_impl.cc. On the other side because the v2 hds header is not included\n// anywhere the v2 service type is getting dropped, and thus the descriptor is not present in the\n// descriptor pool.\n// https://github.com/envoyproxy/envoy/issues/9639\nconst envoy::config::bootstrap::v2::Bootstrap _bootstrap_dummy_v2;\nconst envoy::service::discovery::v2::Capability _hds_dummy_v2;\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/remote_data_fetcher.cc",
    "content": "#include \"common/config/remote_data_fetcher.h\"\n\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/hex.h\"\n#include \"common/crypto/utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace DataFetcher {\n\nRemoteDataFetcher::RemoteDataFetcher(Upstream::ClusterManager& cm,\n                                     const envoy::config::core::v3::HttpUri& uri,\n                                     const std::string& content_hash,\n                                     RemoteDataFetcherCallback& callback)\n    : cm_(cm), uri_(uri), content_hash_(content_hash), callback_(callback) {}\n\nRemoteDataFetcher::~RemoteDataFetcher() { cancel(); }\n\nvoid RemoteDataFetcher::cancel() {\n  if (request_) {\n    request_->cancel();\n    ENVOY_LOG(debug, \"fetch remote data [uri = {}]: canceled\", uri_.uri());\n  }\n\n  request_ = nullptr;\n}\n\nvoid RemoteDataFetcher::fetch() {\n  Http::RequestMessagePtr message = Http::Utility::prepareHeaders(uri_);\n  message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Get);\n  ENVOY_LOG(debug, \"fetch remote data from [uri = {}]: start\", uri_.uri());\n  request_ = cm_.httpAsyncClientForCluster(uri_.cluster())\n                 .send(std::move(message), *this,\n                       Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(\n                           DurationUtil::durationToMilliseconds(uri_.timeout()))));\n}\n\nvoid RemoteDataFetcher::onSuccess(const Http::AsyncClient::Request&,\n                                  Http::ResponseMessagePtr&& response) {\n  const uint64_t status_code = Http::Utility::getResponseStatus(response->headers());\n  if (status_code == enumToInt(Http::Code::OK)) {\n    ENVOY_LOG(debug, \"fetch remote data [uri = {}]: success\", uri_.uri());\n    if (response->body().length() > 0) {\n      auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n      const auto content_hash = Hex::encode(crypto_util.getSha256Digest(response->body()));\n\n      if (content_hash_ != content_hash) {\n        ENVOY_LOG(debug, \"fetch remote data [uri = {}]: data is invalid\", uri_.uri());\n        callback_.onFailure(FailureReason::InvalidData);\n      } else {\n        callback_.onSuccess(response->bodyAsString());\n      }\n    } else {\n      ENVOY_LOG(debug, \"fetch remote data [uri = {}]: body is empty\", uri_.uri());\n      callback_.onFailure(FailureReason::Network);\n    }\n  } else {\n    ENVOY_LOG(debug, \"fetch remote data [uri = {}]: response status code {}\", uri_.uri(),\n              status_code);\n    callback_.onFailure(FailureReason::Network);\n  }\n\n  request_ = nullptr;\n}\n\nvoid RemoteDataFetcher::onFailure(const Http::AsyncClient::Request&,\n                                  Http::AsyncClient::FailureReason reason) {\n  ENVOY_LOG(debug, \"fetch remote data [uri = {}]: network error {}\", uri_.uri(), enumToInt(reason));\n  request_ = nullptr;\n  callback_.onFailure(FailureReason::Network);\n}\n\n} // namespace DataFetcher\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/remote_data_fetcher.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace DataFetcher {\n\n/**\n * Failure reason.\n */\nenum class FailureReason {\n  /* A network error occurred causing remote data retrieval failure. */\n  Network,\n  /* A failure occurred when trying to verify remote data using sha256. */\n  InvalidData,\n};\n\n/**\n * Callback used by remote data fetcher.\n */\nclass RemoteDataFetcherCallback {\npublic:\n  virtual ~RemoteDataFetcherCallback() = default;\n\n  /**\n   * This function will be called when data is fetched successfully from remote.\n   * @param data remote data\n   */\n  virtual void onSuccess(const std::string& data) PURE;\n\n  /**\n   * This function is called when error happens during fetching data.\n   * @param reason failure reason.\n   */\n  virtual void onFailure(FailureReason reason) PURE;\n};\n\n/**\n * Remote data fetcher.\n */\nclass RemoteDataFetcher : public Logger::Loggable<Logger::Id::config>,\n                          public Http::AsyncClient::Callbacks {\npublic:\n  RemoteDataFetcher(Upstream::ClusterManager& cm, const envoy::config::core::v3::HttpUri& uri,\n                    const std::string& content_hash, RemoteDataFetcherCallback& callback);\n\n  ~RemoteDataFetcher() override;\n\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override;\n  void onFailure(const Http::AsyncClient::Request&,\n                 Http::AsyncClient::FailureReason reason) override;\n  void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&,\n                                    const Http::ResponseHeaderMap*) override {}\n\n  /**\n   * Fetch data from remote.\n   * @param uri remote URI\n   * @param content_hash for verifying data integrity\n   * @param callback callback when fetch is done.\n   */\n  void fetch();\n\n  /**\n   * Cancel the fetch.\n   */\n  void cancel();\n\nprivate:\n  Upstream::ClusterManager& cm_;\n  const envoy::config::core::v3::HttpUri uri_;\n  const std::string content_hash_;\n  RemoteDataFetcherCallback& callback_;\n\n  Http::AsyncClient::Request* request_{};\n};\n\nusing RemoteDataFetcherPtr = std::unique_ptr<RemoteDataFetcher>;\n\n} // namespace DataFetcher\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/resource_name.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/config/api_type_oracle.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Get resource name from api type and version.\n */\ntemplate <typename Current>\nstd::string getResourceName(envoy::config::core::v3::ApiVersion resource_api_version) {\n  switch (resource_api_version) {\n  case envoy::config::core::v3::ApiVersion::AUTO:\n  case envoy::config::core::v3::ApiVersion::V2:\n    return ApiTypeOracle::getEarlierVersionMessageTypeName(Current().GetDescriptor()->full_name())\n        .value();\n  case envoy::config::core::v3::ApiVersion::V3:\n    return Current().GetDescriptor()->full_name();\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n/**\n * Get type url from api type and version.\n */\ntemplate <typename Current>\nstd::string getTypeUrl(envoy::config::core::v3::ApiVersion resource_api_version) {\n  return \"type.googleapis.com/\" + getResourceName<Current>(resource_api_version);\n}\n\n/**\n * get all version resource names.\n */\ntemplate <typename Current> std::vector<std::string> getAllVersionResourceNames() {\n  return std::vector<std::string>{\n      Current().GetDescriptor()->full_name(),\n      ApiTypeOracle::getEarlierVersionMessageTypeName(Current().GetDescriptor()->full_name())\n          .value()};\n}\n\n/**\n * get all version type urls.\n */\ntemplate <typename Current> std::vector<std::string> getAllVersionTypeUrls() {\n  auto resource_names = getAllVersionResourceNames<Current>();\n  for (auto&& resource_name : resource_names) {\n    resource_name = \"type.googleapis.com/\" + resource_name;\n  }\n  return resource_names;\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/runtime_utility.cc",
    "content": "#include \"common/config/runtime_utility.h\"\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nvoid translateRuntime(const envoy::config::bootstrap::v3::Runtime& runtime_config,\n                      envoy::config::bootstrap::v3::LayeredRuntime& layered_runtime_config) {\n  {\n    auto* layer = layered_runtime_config.add_layers();\n    layer->set_name(\"base\");\n    layer->mutable_static_layer()->MergeFrom(runtime_config.base());\n  }\n  if (!runtime_config.symlink_root().empty()) {\n    {\n      auto* layer = layered_runtime_config.add_layers();\n      layer->set_name(\"root\");\n      layer->mutable_disk_layer()->set_symlink_root(runtime_config.symlink_root());\n      layer->mutable_disk_layer()->set_subdirectory(runtime_config.subdirectory());\n    }\n    if (!runtime_config.override_subdirectory().empty()) {\n      auto* layer = layered_runtime_config.add_layers();\n      layer->set_name(\"override\");\n      layer->mutable_disk_layer()->set_symlink_root(runtime_config.symlink_root());\n      layer->mutable_disk_layer()->set_subdirectory(runtime_config.override_subdirectory());\n      layer->mutable_disk_layer()->set_append_service_cluster(true);\n    }\n  }\n  {\n    auto* layer = layered_runtime_config.add_layers();\n    layer->set_name(\"admin\");\n    layer->mutable_admin_layer();\n  }\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/runtime_utility.h",
    "content": "#pragma once\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// Translate from old fixed runtime to new layered runtime configuration.\nvoid translateRuntime(const envoy::config::bootstrap::v3::Runtime& runtime_config,\n                      envoy::config::bootstrap::v3::LayeredRuntime& layered_runtime_config);\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/subscription_base.h",
    "content": "#pragma once\n\n#include \"envoy/config/subscription.h\"\n\n#include \"common/config/opaque_resource_decoder_impl.h\"\n#include \"common/config/resource_name.h\"\n\nnamespace Envoy {\nnamespace Config {\n\ntemplate <typename Current> struct SubscriptionBase : public Config::SubscriptionCallbacks {\npublic:\n  SubscriptionBase(const envoy::config::core::v3::ApiVersion api_version,\n                   ProtobufMessage::ValidationVisitor& validation_visitor,\n                   absl::string_view name_field)\n      : resource_decoder_(validation_visitor, name_field), api_version_(api_version) {}\n\n  std::string getResourceName() const {\n    return Envoy::Config::getResourceName<Current>(api_version_);\n  }\n\nprotected:\n  Config::OpaqueResourceDecoderImpl<Current> resource_decoder_;\n\nprivate:\n  const envoy::config::core::v3::ApiVersion api_version_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/subscription_factory_impl.cc",
    "content": "#include \"common/config/subscription_factory_impl.h\"\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n\n#include \"common/config/filesystem_subscription_impl.h\"\n#include \"common/config/grpc_mux_impl.h\"\n#include \"common/config/grpc_subscription_impl.h\"\n#include \"common/config/http_subscription_impl.h\"\n#include \"common/config/new_grpc_mux_impl.h\"\n#include \"common/config/type_to_endpoint.h\"\n#include \"common/config/udpa_resource.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nSubscriptionFactoryImpl::SubscriptionFactoryImpl(\n    const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher,\n    Upstream::ClusterManager& cm, ProtobufMessage::ValidationVisitor& validation_visitor,\n    Api::Api& api, Runtime::Loader& runtime)\n    : local_info_(local_info), dispatcher_(dispatcher), cm_(cm),\n      validation_visitor_(validation_visitor), api_(api), runtime_(runtime) {}\n\nSubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource(\n    const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url,\n    Stats::Scope& scope, SubscriptionCallbacks& callbacks,\n    OpaqueResourceDecoder& resource_decoder) {\n  Config::Utility::checkLocalInfo(type_url, local_info_);\n  std::unique_ptr<Subscription> result;\n  SubscriptionStats stats = Utility::generateStats(scope);\n  auto& runtime_snapshot = runtime_.snapshot();\n\n  const auto transport_api_version = config.api_config_source().transport_api_version();\n  if (transport_api_version == envoy::config::core::v3::ApiVersion::V2 &&\n      runtime_snapshot.runtimeFeatureEnabled(\n          \"envoy.reloadable_features.enable_deprecated_v2_api_warning\")) {\n    runtime_.countDeprecatedFeatureUse();\n    ENVOY_LOG(warn,\n              \"xDS of version v2 has been deprecated and will be removed in subsequent versions\");\n  }\n\n  switch (config.config_source_specifier_case()) {\n  case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath: {\n    Utility::checkFilesystemSubscriptionBackingPath(config.path(), api_);\n    return std::make_unique<Config::FilesystemSubscriptionImpl>(\n        dispatcher_, config.path(), callbacks, resource_decoder, stats, validation_visitor_, api_);\n  }\n  case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kApiConfigSource: {\n    const envoy::config::core::v3::ApiConfigSource& api_config_source = config.api_config_source();\n    Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(),\n                                                            api_config_source);\n\n    switch (api_config_source.api_type()) {\n    case envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY:\n      throw EnvoyException(\n          \"REST_LEGACY no longer a supported ApiConfigSource. \"\n          \"Please specify an explicit supported api_type in the following config:\\n\" +\n          config.DebugString());\n    case envoy::config::core::v3::ApiConfigSource::REST:\n      return std::make_unique<HttpSubscriptionImpl>(\n          local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_,\n          api_.randomGenerator(), Utility::apiConfigSourceRefreshDelay(api_config_source),\n          Utility::apiConfigSourceRequestTimeout(api_config_source),\n          restMethod(type_url, api_config_source.transport_api_version()), type_url,\n          api_config_source.transport_api_version(), callbacks, resource_decoder, stats,\n          Utility::configSourceInitialFetchTimeout(config), validation_visitor_);\n    case envoy::config::core::v3::ApiConfigSource::GRPC:\n      return std::make_unique<GrpcSubscriptionImpl>(\n          std::make_shared<Config::GrpcMuxImpl>(\n              local_info_,\n              Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(),\n                                                     api_config_source, scope, true)\n                  ->create(),\n              dispatcher_, sotwGrpcMethod(type_url, api_config_source.transport_api_version()),\n              api_config_source.transport_api_version(), api_.randomGenerator(), scope,\n              Utility::parseRateLimitSettings(api_config_source),\n              api_config_source.set_node_on_first_message_only()),\n          callbacks, resource_decoder, stats, type_url, dispatcher_,\n          Utility::configSourceInitialFetchTimeout(config),\n          /*is_aggregated*/ false);\n    case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: {\n      return std::make_unique<GrpcSubscriptionImpl>(\n          std::make_shared<Config::NewGrpcMuxImpl>(\n              Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(),\n                                                             api_config_source, scope, true)\n                  ->create(),\n              dispatcher_, deltaGrpcMethod(type_url, api_config_source.transport_api_version()),\n              api_config_source.transport_api_version(), api_.randomGenerator(), scope,\n              Utility::parseRateLimitSettings(api_config_source), local_info_),\n          callbacks, resource_decoder, stats, type_url, dispatcher_,\n          Utility::configSourceInitialFetchTimeout(config), false);\n    }\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n  case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kAds: {\n    return std::make_unique<GrpcSubscriptionImpl>(\n        cm_.adsMux(), callbacks, resource_decoder, stats, type_url, dispatcher_,\n        Utility::configSourceInitialFetchTimeout(config), true);\n  }\n  default:\n    throw EnvoyException(\n        \"Missing config source specifier in envoy::config::core::v3::ConfigSource\");\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nSubscriptionPtr SubscriptionFactoryImpl::collectionSubscriptionFromUrl(\n    const udpa::core::v1::ResourceLocator& collection_locator,\n    const envoy::config::core::v3::ConfigSource& /*config*/, absl::string_view /*type_url*/,\n    Stats::Scope& scope, SubscriptionCallbacks& callbacks,\n    OpaqueResourceDecoder& resource_decoder) {\n  std::unique_ptr<Subscription> result;\n  SubscriptionStats stats = Utility::generateStats(scope);\n\n  switch (collection_locator.scheme()) {\n  case udpa::core::v1::ResourceLocator::FILE: {\n    const std::string path =\n        Http::Utility::localPathFromFilePath(absl::StrJoin(collection_locator.id(), \"/\"));\n    Utility::checkFilesystemSubscriptionBackingPath(path, api_);\n    return std::make_unique<Config::FilesystemCollectionSubscriptionImpl>(\n        dispatcher_, path, callbacks, resource_decoder, stats, validation_visitor_, api_);\n  }\n  default:\n    throw EnvoyException(fmt::format(\"Unsupported collection resource locator: {}\",\n                                     UdpaResourceIdentifier::encodeUrl(collection_locator)));\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/subscription_factory_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/config/subscription_factory.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nclass SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggable<Logger::Id::config> {\npublic:\n  SubscriptionFactoryImpl(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher,\n                          Upstream::ClusterManager& cm,\n                          ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api,\n                          Runtime::Loader& runtime);\n\n  // Config::SubscriptionFactory\n  SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config,\n                                               absl::string_view type_url, Stats::Scope& scope,\n                                               SubscriptionCallbacks& callbacks,\n                                               OpaqueResourceDecoder& resource_decoder) override;\n  SubscriptionPtr\n  collectionSubscriptionFromUrl(const udpa::core::v1::ResourceLocator& collection_locator,\n                                const envoy::config::core::v3::ConfigSource& config,\n                                absl::string_view type_url, Stats::Scope& scope,\n                                SubscriptionCallbacks& callbacks,\n                                OpaqueResourceDecoder& resource_decoder) override;\n\nprivate:\n  const LocalInfo::LocalInfo& local_info_;\n  Event::Dispatcher& dispatcher_;\n  Upstream::ClusterManager& cm_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  Api::Api& api_;\n  Runtime::Loader& runtime_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/type_to_endpoint.cc",
    "content": "#include \"common/config/type_to_endpoint.h\"\n\n#include \"envoy/annotations/resource.pb.h\"\n\n#include \"common/grpc/common.h\"\n\n// API_NO_BOOST_FILE\n\n#define SERVICE_VERSION_INFO(v2, v3)                                                               \\\n  createServiceVersionInfoMap(v2, {v2, v3}), createServiceVersionInfoMap(v3, {v2, v3})\n\nnamespace Envoy {\nnamespace Config {\n\nnamespace {\n\n// A service's name, e.g. \"envoy.api.v2.RouteDiscoveryService\",\n// \"envoy.service.route.v3.RouteDiscoveryService\".\nusing ServiceName = std::string;\n\nstruct ServiceVersionInfo {\n  // This hold a name for each transport_api_version, for example for\n  // \"envoy.api.v2.RouteDiscoveryService\":\n  // {\n  //    \"V2\": \"envoy.api.v2.RouteDiscoveryService\",\n  //    \"V3\": \"envoy.service.route.v3.RouteDiscoveryService\"\n  // }\n  absl::flat_hash_map<envoy::config::core::v3::ApiVersion, ServiceName> names_;\n};\n\n// A ServiceVersionInfoMap holds a service's transport_api_version and possible names for each\n// available transport_api_version. For examples:\n//\n// Given \"envoy.api.v2.RouteDiscoveryService\" as the service name:\n// {\n//   \"envoy.api.v2.RouteDiscoveryService\": {\n//     \"names_\": {\n//       \"V2\": \"envoy.api.v2.RouteDiscoveryService\",\n//       \"V3\": \"envoy.service.route.v3.RouteDiscoveryService\"\n//     }\n//   }\n// }\n//\n// And for \"envoy.service.route.v3.RouteDiscoveryService\":\n// {\n//   \"envoy.service.route.v3.RouteDiscoveryService\":\n//     \"names_\": {\n//       \"V2\": \"envoy.api.v2.RouteDiscoveryService\",\n//       \"V3\": \"envoy.service.route.v3.RouteDiscoveryService\"\n//     }\n//   }\n// }\nusing ServiceVersionInfoMap = absl::flat_hash_map<ServiceName, ServiceVersionInfo>;\n\n// This creates a ServiceVersionInfoMap, with service name (For example:\n// \"envoy.api.v2.RouteDiscoveryService\") as the key.\nServiceVersionInfoMap\ncreateServiceVersionInfoMap(absl::string_view service_name,\n                            const std::array<std::string, 2>& versioned_service_names) {\n  const auto key = static_cast<ServiceName>(service_name);\n  return ServiceVersionInfoMap{{\n      // ServiceName as the key.\n      key,\n\n      // ServiceVersionInfo as the value.\n      ServiceVersionInfo{{\n          {envoy::config::core::v3::ApiVersion::V2, versioned_service_names[0]},\n          {envoy::config::core::v3::ApiVersion::V3, versioned_service_names[1]},\n      }},\n  }};\n}\n\n// A resource type URL. For example: \"type.googleapis.com/envoy.api.v2.RouteConfiguration\".\nusing TypeUrl = std::string;\n\nTypeUrl getResourceTypeUrl(absl::string_view service_name) {\n  const auto* service_desc = Protobuf::DescriptorPool::generated_pool()->FindServiceByName(\n      static_cast<ServiceName>(service_name));\n  ASSERT(service_desc != nullptr, fmt::format(\"{} missing\", service_name));\n  ASSERT(service_desc->options().HasExtension(envoy::annotations::resource));\n\n  return Grpc::Common::typeUrl(\n      service_desc->options().GetExtension(envoy::annotations::resource).type());\n}\n\n// A method name, e.g. \"envoy.api.v2.RouteDiscoveryService.StreamRoutes\".\nusing MethodName = std::string;\n\nstruct VersionedDiscoveryType {\n  // A map of transport_api_version to discovery service RPC method fully qualified names. e.g.\n  // {\n  //   \"V2\": \"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n  //   \"V3\": \"envoy.service.route.v3.RouteDiscoveryService.StreamRoutes\"\n  // }\n  absl::flat_hash_map<envoy::config::core::v3::ApiVersion, MethodName> methods_;\n};\n\n// This holds versioned discovery types.\nstruct VersionedService {\n  VersionedDiscoveryType sotw_grpc_;\n  VersionedDiscoveryType delta_grpc_;\n  VersionedDiscoveryType rest_;\n};\n\nusing TypeUrlToVersionedServiceMap = absl::flat_hash_map<TypeUrl, VersionedService>;\n\n// buildTypeUrlToServiceMap() builds a reverse map from a resource type URLs to a versioned service\n// (by transport_api_version).\n//\n// The way we build it is by firstly constructing a list of ServiceVersionInfoMap:\n// [\n//   {\n//     \"envoy.api.v2.RouteDiscoveryService\": {\n//       \"names_\": {\n//         \"V2\": \"envoy.api.v2.RouteDiscoveryService\",\n//         \"V3\": \"envoy.service.route.v3.RouteDiscoveryService\"\n//       }\n//     }\n//   },\n//   {\n//     \"envoy.service.route.v3.RouteDiscoveryService\": {\n//       \"names_\": {\n//         \"V2\": \"envoy.api.v2.RouteDiscoveryService\",\n//         \"V3\": \"envoy.service.route.v3.RouteDiscoveryService\"\n//       }\n//     }\n//   }\n//  ...\n// ]\n//\n// Then we convert it into the following map, with the inferred resource type URL as the key:\n//\n// {\n//   \"type.googleapis.com/envoy.api.v2.RouteConfiguration\": {\n//     \"sotw_grpc_\": {\n//       \"methods_\": {\n//         \"V2\": \"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n//         \"V3\": \"envoy.service.route.v3.RouteDiscoveryService.StreamRoutes\"\n//       }\n//     },\n//     ...\n//   },\n//   \"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\": {\n//     \"sotw_grpc_\": {\n//       \"methods_\": {\n//         \"V2\": \"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n//         \"V3\": \"envoy.service.route.v3.RouteDiscoveryService.StreamRoutes\"\n//       }\n//     },\n//     ...\n//   }\n// }\n//\nTypeUrlToVersionedServiceMap* buildTypeUrlToServiceMap() {\n  auto* type_url_to_versioned_service_map = new TypeUrlToVersionedServiceMap();\n\n  // This happens once in the lifetime of Envoy. We build a reverse map from resource type URL to\n  // service methods (versioned by transport_api_version). We explicitly enumerate all services,\n  // since DescriptorPool doesn't support iterating over all descriptors, due its lazy load design,\n  // see https://www.mail-archive.com/protobuf@googlegroups.com/msg04540.html.\n  for (const ServiceVersionInfoMap& registered : {\n           SERVICE_VERSION_INFO(\"envoy.api.v2.RouteDiscoveryService\",\n                                \"envoy.service.route.v3.RouteDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.api.v2.ScopedRoutesDiscoveryService\",\n                                \"envoy.service.route.v3.ScopedRoutesDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.api.v2.ScopedRoutesDiscoveryService\",\n                                \"envoy.service.route.v3.ScopedRoutesDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.api.v2.VirtualHostDiscoveryService\",\n                                \"envoy.service.route.v3.VirtualHostDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.service.discovery.v2.SecretDiscoveryService\",\n                                \"envoy.service.secret.v3.SecretDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.api.v2.ClusterDiscoveryService\",\n                                \"envoy.service.cluster.v3.ClusterDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.api.v2.EndpointDiscoveryService\",\n                                \"envoy.service.endpoint.v3.EndpointDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.api.v2.ListenerDiscoveryService\",\n                                \"envoy.service.listener.v3.ListenerDiscoveryService\"),\n           SERVICE_VERSION_INFO(\"envoy.service.discovery.v2.RuntimeDiscoveryService\",\n                                \"envoy.service.runtime.v3.RuntimeDiscoveryService\"),\n           ServiceVersionInfoMap{{\n               \"envoy.service.extension.v3.ExtensionConfigDiscoveryService\",\n               ServiceVersionInfo{{\n                   {envoy::config::core::v3::ApiVersion::V3,\n                    \"envoy.service.extension.v3.ExtensionConfigDiscoveryService\"},\n               }},\n           }},\n       }) {\n    for (const auto& [registered_service_name, registered_service_info] : registered) {\n      const TypeUrl resource_type_url = getResourceTypeUrl(registered_service_name);\n      VersionedService& service = (*type_url_to_versioned_service_map)[resource_type_url];\n\n      for (const auto& [transport_api_version, service_name] : registered_service_info.names_) {\n        const auto* service_desc =\n            Protobuf::DescriptorPool::generated_pool()->FindServiceByName(service_name);\n        ASSERT(service_desc != nullptr, fmt::format(\"{} missing\", service_name));\n        ASSERT(service_desc->options().HasExtension(envoy::annotations::resource));\n\n        // We populate the service methods that are known below, but it's possible that some\n        // services don't implement all, e.g. VHDS doesn't support SotW or REST.\n        for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) {\n          const auto& method_desc = *service_desc->method(method_index);\n          if (absl::StartsWith(method_desc.name(), \"Stream\")) {\n            service.sotw_grpc_.methods_[transport_api_version] = method_desc.full_name();\n          } else if (absl::StartsWith(method_desc.name(), \"Delta\")) {\n            service.delta_grpc_.methods_[transport_api_version] = method_desc.full_name();\n          } else if (absl::StartsWith(method_desc.name(), \"Fetch\")) {\n            service.rest_.methods_[transport_api_version] = method_desc.full_name();\n          } else {\n            ASSERT(false, \"Unknown xDS service method\");\n          }\n        }\n      }\n    }\n  }\n  return type_url_to_versioned_service_map;\n}\n\nTypeUrlToVersionedServiceMap& typeUrlToVersionedServiceMap() {\n  static TypeUrlToVersionedServiceMap* type_url_to_versioned_service_map =\n      buildTypeUrlToServiceMap();\n  return *type_url_to_versioned_service_map;\n}\n\nenvoy::config::core::v3::ApiVersion\neffectiveTransportApiVersion(envoy::config::core::v3::ApiVersion transport_api_version) {\n  // By default (when the transport_api_version is \"AUTO\"), the effective transport_api_version is\n  // envoy::config::core::v3::ApiVersion::V2.\n  if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO) {\n    return envoy::config::core::v3::ApiVersion::V2;\n  }\n  return transport_api_version;\n}\n\n} // namespace\n\nconst Protobuf::MethodDescriptor&\ndeltaGrpcMethod(absl::string_view type_url,\n                envoy::config::core::v3::ApiVersion transport_api_version) {\n  const auto it = typeUrlToVersionedServiceMap().find(static_cast<TypeUrl>(type_url));\n  ASSERT(it != typeUrlToVersionedServiceMap().cend());\n  return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n      it->second.delta_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]);\n}\n\nconst Protobuf::MethodDescriptor&\nsotwGrpcMethod(absl::string_view type_url,\n               envoy::config::core::v3::ApiVersion transport_api_version) {\n  const auto it = typeUrlToVersionedServiceMap().find(static_cast<TypeUrl>(type_url));\n  ASSERT(it != typeUrlToVersionedServiceMap().cend());\n  return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n      it->second.sotw_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]);\n}\n\nconst Protobuf::MethodDescriptor&\nrestMethod(absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version) {\n  const auto it = typeUrlToVersionedServiceMap().find(static_cast<TypeUrl>(type_url));\n  ASSERT(it != typeUrlToVersionedServiceMap().cend());\n  return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n      it->second.rest_.methods_[effectiveTransportApiVersion(transport_api_version)]);\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/type_to_endpoint.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// Translates an xDS resource type_url to the name of the delta gRPC service that carries it.\nconst Protobuf::MethodDescriptor&\ndeltaGrpcMethod(absl::string_view resource_type_url,\n                envoy::config::core::v3::ApiVersion transport_api_version);\n// Translates an xDS resource type_url to the name of the state-of-the-world gRPC service that\n// carries it.\nconst Protobuf::MethodDescriptor&\nsotwGrpcMethod(absl::string_view resource_type_url,\n               envoy::config::core::v3::ApiVersion transport_api_version);\n// Translates an xDS resource type_url to the name of the REST service that carries it.\nconst Protobuf::MethodDescriptor&\nrestMethod(absl::string_view resource_type_url,\n           envoy::config::core::v3::ApiVersion transport_api_version);\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/udpa_context_params.cc",
    "content": "#include \"common/config/udpa_context_params.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nnamespace {\n\nusing RenderContextParamCb = std::function<std::string(const envoy::config::core::v3::Node& node)>;\nusing NodeContextRenderers = absl::flat_hash_map<std::string, RenderContextParamCb>;\n\nRenderContextParamCb directStringFieldRenderer(const std::string& field) {\n  return [field](const envoy::config::core::v3::Node& node) -> std::string {\n    return MessageUtil::getStringField(node, field);\n  };\n}\n\nRenderContextParamCb localityStringFieldRenderer(const std::string& field) {\n  return [field](const envoy::config::core::v3::Node& node) -> std::string {\n    return MessageUtil::getStringField(node.locality(), field);\n  };\n}\n\nstd::string buildSemanticVersionRenderer(const envoy::config::core::v3::Node& node) {\n  const auto& semver = node.user_agent_build_version().version();\n  return fmt::format(\"{}.{}.{}\", semver.major_number(), semver.minor_number(), semver.patch());\n}\n\nconst NodeContextRenderers& nodeParamCbs() {\n  CONSTRUCT_ON_FIRST_USE(NodeContextRenderers, {\"id\", directStringFieldRenderer(\"id\")},\n                         {\"cluster\", directStringFieldRenderer(\"cluster\")},\n                         {\"user_agent_name\", directStringFieldRenderer(\"user_agent_name\")},\n                         {\"user_agent_version\", directStringFieldRenderer(\"user_agent_version\")},\n                         {\"locality.region\", localityStringFieldRenderer(\"region\")},\n                         {\"locality.zone\", localityStringFieldRenderer(\"zone\")},\n                         {\"locality.sub_zone\", localityStringFieldRenderer(\"sub_zone\")},\n                         {\"user_agent_build_version.version\", buildSemanticVersionRenderer});\n}\n\nvoid mergeMetadataJson(Protobuf::Map<std::string, std::string>& params,\n                       const ProtobufWkt::Struct& metadata, const std::string& prefix) {\n  for (const auto& it : metadata.fields()) {\n    params[prefix + it.first] = MessageUtil::getJsonStringFromMessage(it.second);\n  }\n}\n\n} // namespace\n\nudpa::core::v1::ContextParams UdpaContextParams::encode(\n    const envoy::config::core::v3::Node& node, const std::vector<std::string>& node_context_params,\n    const udpa::core::v1::ContextParams& resource_context_params,\n    const std::vector<std::string>& client_features,\n    const absl::flat_hash_map<std::string, std::string>& extra_resource_params) {\n  udpa::core::v1::ContextParams context_params;\n  auto& mutable_params = *context_params.mutable_params();\n  // 1. Establish base layer of per-node context parameters.\n  for (const std::string& ncp : node_context_params) {\n    // First attempt field accessors known ahead of time, if that fails we consider the cases of\n    // metadata, either directly in the Node message, or nested in the user_agent_build_version.\n    if (nodeParamCbs().count(ncp) > 0) {\n      mutable_params[\"udpa.node.\" + ncp] = nodeParamCbs().at(ncp)(node);\n    } else if (ncp == \"metadata\") {\n      mergeMetadataJson(mutable_params, node.metadata(), \"udpa.node.metadata.\");\n    } else if (ncp == \"user_agent_build_version.metadata\") {\n      mergeMetadataJson(mutable_params, node.user_agent_build_version().metadata(),\n                        \"udpa.node.user_agent_build_version.metadata.\");\n    }\n  }\n\n  // 2. Overlay with context parameters from resource name.\n  for (const auto& it : resource_context_params.params()) {\n    mutable_params[it.first] = it.second;\n  }\n\n  // 3. Overlay with per-resource type context parameters.\n  for (const std::string& cf : client_features) {\n    mutable_params[\"udpa.client_feature.\" + cf] = \"true\";\n  }\n\n  // 4. Overlay with per-resource well-known attributes.\n  for (const auto& it : extra_resource_params) {\n    mutable_params[\"udpa.resource.\" + it.first] = it.second;\n  }\n\n  return context_params;\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/udpa_context_params.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"udpa/core/v1/context_params.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// Utilities for working with context parameters.\nclass UdpaContextParams {\npublic:\n  /**\n   * Encode context parameters by following the xDS transport precedence algorithm and applying\n   * parameter prefixes.\n   * @param node reference to the local Node information.\n   * @param node_context_params a list of node fields to include in context parameters.\n   * @param resource_context_params context parameters from resource locator.\n   * @param client_features client feature capabilities.\n   * @param extra_resource_param per-resource type well known attributes.\n   * @return udpa::core::v1::ContextParams encoded context parameters.\n   */\n  static udpa::core::v1::ContextParams\n  encode(const envoy::config::core::v3::Node& node,\n         const std::vector<std::string>& node_context_params,\n         const udpa::core::v1::ContextParams& resource_context_params,\n         const std::vector<std::string>& client_features,\n         const absl::flat_hash_map<std::string, std::string>& extra_resource_params);\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/udpa_resource.cc",
    "content": "#include \"common/config/udpa_resource.h\"\n\n#include <algorithm>\n\n#include \"common/common/fmt.h\"\n#include \"common/http/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n\n// TODO(htuch): This file has a bunch of ad hoc URI encoding/decoding based on Envoy's HTTP util\n// functions. Once https://github.com/envoyproxy/envoy/issues/6588 lands, we can replace with GURL.\n\nnamespace Envoy {\nnamespace Config {\n\nusing PercentEncoding = Http::Utility::PercentEncoding;\n\nnamespace {\n\n// We need to percent-encode authority, id, path and query params. Resource types should not have\n// reserved characters.\n\nstd::string encodeAuthority(const std::string& authority) {\n  return PercentEncoding::encode(authority, \"%/?#\");\n}\n\nstd::string encodeIdPath(const Protobuf::RepeatedPtrField<std::string>& id) {\n  std::vector<std::string> path_components;\n  for (const auto& id_component : id) {\n    path_components.emplace_back(PercentEncoding::encode(id_component, \"%:/?#[]\"));\n  }\n  const std::string path = absl::StrJoin(path_components, \"/\");\n  return path.empty() ? \"\" : absl::StrCat(\"/\", path);\n}\n\nstd::string encodeContextParams(const udpa::core::v1::ContextParams& context_params,\n                                bool sort_context_params) {\n  std::vector<std::string> query_param_components;\n  for (const auto& context_param : context_params.params()) {\n    query_param_components.emplace_back(\n        absl::StrCat(PercentEncoding::encode(context_param.first, \"%#[]&=\"), \"=\",\n                     PercentEncoding::encode(context_param.second, \"%#[]&=\")));\n  }\n  if (sort_context_params) {\n    std::sort(query_param_components.begin(), query_param_components.end());\n  }\n  return query_param_components.empty() ? \"\" : \"?\" + absl::StrJoin(query_param_components, \"&\");\n}\n\nstd::string encodeDirectives(\n    const Protobuf::RepeatedPtrField<udpa::core::v1::ResourceLocator::Directive>& directives) {\n  std::vector<std::string> fragment_components;\n  const std::string DirectiveEscapeChars = \"%#[],\";\n  for (const auto& directive : directives) {\n    switch (directive.directive_case()) {\n    case udpa::core::v1::ResourceLocator::Directive::DirectiveCase::kAlt:\n      fragment_components.emplace_back(absl::StrCat(\n          \"alt=\", PercentEncoding::encode(UdpaResourceIdentifier::encodeUrl(directive.alt()),\n                                          DirectiveEscapeChars)));\n      break;\n    case udpa::core::v1::ResourceLocator::Directive::DirectiveCase::kEntry:\n      fragment_components.emplace_back(\n          absl::StrCat(\"entry=\", PercentEncoding::encode(directive.entry(), DirectiveEscapeChars)));\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n  return fragment_components.empty() ? \"\" : \"#\" + absl::StrJoin(fragment_components, \",\");\n}\n\n} // namespace\n\nstd::string UdpaResourceIdentifier::encodeUrn(const udpa::core::v1::ResourceName& resource_name,\n                                              const EncodeOptions& options) {\n  const std::string authority = encodeAuthority(resource_name.authority());\n  const std::string id_path = encodeIdPath(resource_name.id());\n  const std::string query_params =\n      encodeContextParams(resource_name.context(), options.sort_context_params_);\n  return absl::StrCat(\"udpa://\", authority, \"/\", resource_name.resource_type(), id_path,\n                      query_params);\n}\n\nstd::string\nUdpaResourceIdentifier::encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator,\n                                  const EncodeOptions& options) {\n  const std::string id_path = encodeIdPath(resource_locator.id());\n  const std::string fragment = encodeDirectives(resource_locator.directives());\n  std::string scheme = \"udpa:\";\n  switch (resource_locator.scheme()) {\n  case udpa::core::v1::ResourceLocator::HTTP:\n    scheme = \"http:\";\n    FALLTHRU;\n  case udpa::core::v1::ResourceLocator::UDPA: {\n    const std::string authority = encodeAuthority(resource_locator.authority());\n    const std::string query_params =\n        encodeContextParams(resource_locator.exact_context(), options.sort_context_params_);\n    return absl::StrCat(scheme, \"//\", authority, \"/\", resource_locator.resource_type(), id_path,\n                        query_params, fragment);\n  }\n  case udpa::core::v1::ResourceLocator::FILE: {\n    return absl::StrCat(\"file://\", id_path, fragment);\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nnamespace {\n\nvoid decodePath(absl::string_view path, std::string* resource_type,\n                Protobuf::RepeatedPtrField<std::string>& id) {\n  // This is guaranteed by Http::Utility::extractHostPathFromUrn.\n  ASSERT(absl::StartsWith(path, \"/\"));\n  const std::vector<absl::string_view> path_components = absl::StrSplit(path.substr(1), '/');\n  auto id_it = path_components.cbegin();\n  if (resource_type != nullptr) {\n    *resource_type = std::string(path_components[0]);\n    if (resource_type->empty()) {\n      throw UdpaResourceIdentifier::DecodeException(\n          fmt::format(\"Resource type missing from {}\", path));\n    }\n    id_it = std::next(id_it);\n  }\n  for (; id_it != path_components.cend(); id_it++) {\n    *id.Add() = PercentEncoding::decode(*id_it);\n  }\n}\n\nvoid decodeQueryParams(absl::string_view query_params,\n                       udpa::core::v1::ContextParams& context_params) {\n  Http::Utility::QueryParams query_params_components =\n      Http::Utility::parseQueryString(query_params);\n  for (const auto& it : query_params_components) {\n    (*context_params.mutable_params())[PercentEncoding::decode(it.first)] =\n        PercentEncoding::decode(it.second);\n  }\n}\n\nvoid decodeFragment(\n    absl::string_view fragment,\n    Protobuf::RepeatedPtrField<udpa::core::v1::ResourceLocator::Directive>& directives) {\n  const std::vector<absl::string_view> fragment_components = absl::StrSplit(fragment, ',');\n  for (const absl::string_view& fragment_component : fragment_components) {\n    if (absl::StartsWith(fragment_component, \"alt=\")) {\n      directives.Add()->mutable_alt()->MergeFrom(\n          UdpaResourceIdentifier::decodeUrl(PercentEncoding::decode(fragment_component.substr(4))));\n    } else if (absl::StartsWith(fragment_component, \"entry=\")) {\n      directives.Add()->set_entry(PercentEncoding::decode(fragment_component.substr(6)));\n    } else {\n      throw UdpaResourceIdentifier::DecodeException(\n          fmt::format(\"Unknown fragment component {}\", fragment_component));\n      ;\n    }\n  }\n}\n\n} // namespace\n\nudpa::core::v1::ResourceName UdpaResourceIdentifier::decodeUrn(absl::string_view resource_urn) {\n  if (!absl::StartsWith(resource_urn, \"udpa:\")) {\n    throw UdpaResourceIdentifier::DecodeException(\n        fmt::format(\"{} does not have an udpa: scheme\", resource_urn));\n  }\n  absl::string_view host, path;\n  Http::Utility::extractHostPathFromUri(resource_urn, host, path);\n  udpa::core::v1::ResourceName decoded_resource_name;\n  decoded_resource_name.set_authority(PercentEncoding::decode(host));\n  const size_t query_params_start = path.find('?');\n  if (query_params_start != absl::string_view::npos) {\n    decodeQueryParams(path.substr(query_params_start), *decoded_resource_name.mutable_context());\n    path = path.substr(0, query_params_start);\n  }\n  decodePath(path, decoded_resource_name.mutable_resource_type(),\n             *decoded_resource_name.mutable_id());\n  return decoded_resource_name;\n}\n\nudpa::core::v1::ResourceLocator UdpaResourceIdentifier::decodeUrl(absl::string_view resource_url) {\n  absl::string_view host, path;\n  Http::Utility::extractHostPathFromUri(resource_url, host, path);\n  udpa::core::v1::ResourceLocator decoded_resource_locator;\n  const size_t fragment_start = path.find('#');\n  if (fragment_start != absl::string_view::npos) {\n    decodeFragment(path.substr(fragment_start + 1), *decoded_resource_locator.mutable_directives());\n    path = path.substr(0, fragment_start);\n  }\n  if (absl::StartsWith(resource_url, \"udpa:\")) {\n    decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::UDPA);\n  } else if (absl::StartsWith(resource_url, \"http:\")) {\n    decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::HTTP);\n  } else if (absl::StartsWith(resource_url, \"file:\")) {\n    decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::FILE);\n    // File URLs only have a path and fragment.\n    decodePath(path, nullptr, *decoded_resource_locator.mutable_id());\n    return decoded_resource_locator;\n  } else {\n    throw UdpaResourceIdentifier::DecodeException(\n        fmt::format(\"{} does not have a udpa:, http: or file: scheme\", resource_url));\n  }\n  decoded_resource_locator.set_authority(PercentEncoding::decode(host));\n  const size_t query_params_start = path.find('?');\n  if (query_params_start != absl::string_view::npos) {\n    decodeQueryParams(path.substr(query_params_start),\n                      *decoded_resource_locator.mutable_exact_context());\n    path = path.substr(0, query_params_start);\n  }\n  decodePath(path, decoded_resource_locator.mutable_resource_type(),\n             *decoded_resource_locator.mutable_id());\n  return decoded_resource_locator;\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/udpa_resource.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"udpa/core/v1/resource_locator.pb.h\"\n#include \"udpa/core/v1/resource_name.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n// Utilities for URI encoding/decoding of udpa::core::v1::Resource{Name,Locator}.\nclass UdpaResourceIdentifier {\npublic:\n  // Options for encoded URIs.\n  struct EncodeOptions {\n    // Should the context params be sorted by key? This provides deterministic encoding.\n    bool sort_context_params_{};\n  };\n\n  /**\n   * Encode a udpa::core::v1::ResourceName message as a udpa:// URN string.\n   *\n   * @param resource_name resource name message.\n   * @param options encoding options.\n   * @return std::string udpa:// URN for resource_name.\n   */\n  static std::string encodeUrn(const udpa::core::v1::ResourceName& resource_name,\n                               const EncodeOptions& options);\n  static std::string encodeUrn(const udpa::core::v1::ResourceName& resource_name) {\n    return encodeUrn(resource_name, {});\n  }\n\n  /**\n   * Encode a udpa::core::v1::ResourceLocator message as a udpa:// URL string.\n   *\n   * @param resource_name resource name message.\n   * @param options encoding options.\n   * @return std::string udpa:// URL for resource_name.\n   */\n  static std::string encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator,\n                               const EncodeOptions& options);\n  static std::string encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator) {\n    return encodeUrl(resource_locator, {});\n  }\n\n  // Thrown when an exception occurs during URI decoding.\n  class DecodeException : public EnvoyException {\n  public:\n    DecodeException(const std::string& what) : EnvoyException(what) {}\n  };\n\n  /**\n   * Decode a udpa:// URN string to a udpa::core::v1::ResourceName.\n   *\n   * @param resource_urn udpa:// resource URN.\n   * @return udpa::core::v1::ResourceName resource name message for resource_urn.\n   * @throws DecodeException when parsing fails.\n   */\n  static udpa::core::v1::ResourceName decodeUrn(absl::string_view resource_urn);\n\n  /**\n   * Decode a udpa:// URL string to a udpa::core::v1::ResourceLocator.\n   *\n   * @param resource_url udpa:// resource URL.\n   * @return udpa::core::v1::ResourceLocator resource name message for resource_url.\n   * @throws DecodeException when parsing fails.\n   */\n  static udpa::core::v1::ResourceLocator decodeUrl(absl::string_view resource_url);\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/update_ack.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"absl/strings/string_view.h\"\n#include \"google/rpc/status.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nstruct UpdateAck {\n  UpdateAck(absl::string_view nonce, absl::string_view type_url)\n      : nonce_(nonce), type_url_(type_url) {}\n  const std::string nonce_;\n  const std::string type_url_;\n  ::google::rpc::Status error_detail_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/utility.cc",
    "content": "#include \"common/config/utility.h\"\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hex.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/api_type_oracle.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/histogram_impl.h\"\n#include \"common/stats/stats_matcher_impl.h\"\n#include \"common/stats/tag_producer_impl.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nstd::string Utility::truncateGrpcStatusMessage(absl::string_view error_message) {\n  // GRPC sends error message via trailers, which by default has a 8KB size limit(see\n  // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests). Truncates the\n  // error message if it's too long.\n  constexpr uint32_t kProtobufErrMsgLen = 4096;\n  return fmt::format(\"{}{}\", error_message.substr(0, kProtobufErrMsgLen),\n                     error_message.length() > kProtobufErrMsgLen ? \"...(truncated)\" : \"\");\n}\n\nvoid Utility::translateApiConfigSource(\n    const std::string& cluster, uint32_t refresh_delay_ms, const std::string& api_type,\n    envoy::config::core::v3::ApiConfigSource& api_config_source) {\n  // TODO(junr03): document the option to chose an api type once we have created\n  // stronger constraints around v2.\n  if (api_type == ApiType::get().Grpc) {\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    envoy::config::core::v3::GrpcService* grpc_service = api_config_source.add_grpc_services();\n    grpc_service->mutable_envoy_grpc()->set_cluster_name(cluster);\n  } else {\n    if (api_type == ApiType::get().UnsupportedRestLegacy) {\n      api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::\n                                         hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY);\n    } else if (api_type == ApiType::get().Rest) {\n      api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n    }\n    api_config_source.add_cluster_names(cluster);\n  }\n\n  api_config_source.mutable_refresh_delay()->CopyFrom(\n      Protobuf::util::TimeUtil::MillisecondsToDuration(refresh_delay_ms));\n}\n\nvoid Utility::checkCluster(absl::string_view error_prefix, absl::string_view cluster_name,\n                           Upstream::ClusterManager& cm, bool allow_added_via_api) {\n  Upstream::ThreadLocalCluster* cluster = cm.get(cluster_name);\n  if (cluster == nullptr) {\n    throw EnvoyException(fmt::format(\"{}: unknown cluster '{}'\", error_prefix, cluster_name));\n  }\n\n  if (!allow_added_via_api && cluster->info()->addedViaApi()) {\n    throw EnvoyException(fmt::format(\n        \"{}: invalid cluster '{}': currently only static (non-CDS) clusters are supported\",\n        error_prefix, cluster_name));\n  }\n}\n\nvoid Utility::checkClusterAndLocalInfo(absl::string_view error_prefix,\n                                       absl::string_view cluster_name, Upstream::ClusterManager& cm,\n                                       const LocalInfo::LocalInfo& local_info) {\n  checkCluster(error_prefix, cluster_name, cm);\n  checkLocalInfo(error_prefix, local_info);\n}\n\nvoid Utility::checkLocalInfo(absl::string_view error_prefix,\n                             const LocalInfo::LocalInfo& local_info) {\n  if (local_info.clusterName().empty() || local_info.nodeName().empty()) {\n    throw EnvoyException(\n        fmt::format(\"{}: node 'id' and 'cluster' are required. Set it either in 'node' config or \"\n                    \"via --service-node and --service-cluster options.\",\n                    error_prefix, local_info.node().DebugString()));\n  }\n}\n\nvoid Utility::checkFilesystemSubscriptionBackingPath(const std::string& path, Api::Api& api) {\n  // TODO(junr03): the file might be deleted between this check and the\n  // watch addition.\n  if (!api.fileSystem().fileExists(path)) {\n    throw EnvoyException(fmt::format(\n        \"envoy::api::v2::Path must refer to an existing path in the system: '{}' does not exist\",\n        path));\n  }\n}\n\nvoid Utility::checkApiConfigSourceNames(\n    const envoy::config::core::v3::ApiConfigSource& api_config_source) {\n  const bool is_grpc =\n      (api_config_source.api_type() == envoy::config::core::v3::ApiConfigSource::GRPC ||\n       api_config_source.api_type() == envoy::config::core::v3::ApiConfigSource::DELTA_GRPC);\n\n  if (api_config_source.cluster_names().empty() && api_config_source.grpc_services().empty()) {\n    throw EnvoyException(\n        fmt::format(\"API configs must have either a gRPC service or a cluster name defined: {}\",\n                    api_config_source.DebugString()));\n  }\n\n  if (is_grpc) {\n    if (!api_config_source.cluster_names().empty()) {\n      throw EnvoyException(\n          fmt::format(\"{}::(DELTA_)GRPC must not have a cluster name specified: {}\",\n                      api_config_source.GetTypeName(), api_config_source.DebugString()));\n    }\n    if (api_config_source.grpc_services().size() > 1) {\n      throw EnvoyException(\n          fmt::format(\"{}::(DELTA_)GRPC must have a single gRPC service specified: {}\",\n                      api_config_source.GetTypeName(), api_config_source.DebugString()));\n    }\n  } else {\n    if (!api_config_source.grpc_services().empty()) {\n      throw EnvoyException(\n          fmt::format(\"{}, if not a gRPC type, must not have a gRPC service specified: {}\",\n                      api_config_source.GetTypeName(), api_config_source.DebugString()));\n    }\n    if (api_config_source.cluster_names().size() != 1) {\n      throw EnvoyException(fmt::format(\"{} must have a singleton cluster name specified: {}\",\n                                       api_config_source.GetTypeName(),\n                                       api_config_source.DebugString()));\n    }\n  }\n}\n\nvoid Utility::validateClusterName(const Upstream::ClusterManager::ClusterSet& primary_clusters,\n                                  const std::string& cluster_name,\n                                  const std::string& config_source) {\n  const auto& it = primary_clusters.find(cluster_name);\n  if (it == primary_clusters.end()) {\n    throw EnvoyException(fmt::format(\"{} must have a statically defined non-EDS cluster: '{}' does \"\n                                     \"not exist, was added via api, or is an EDS cluster\",\n                                     config_source, cluster_name));\n  }\n}\n\nvoid Utility::checkApiConfigSourceSubscriptionBackingCluster(\n    const Upstream::ClusterManager::ClusterSet& primary_clusters,\n    const envoy::config::core::v3::ApiConfigSource& api_config_source) {\n  Utility::checkApiConfigSourceNames(api_config_source);\n\n  const bool is_grpc =\n      (api_config_source.api_type() == envoy::config::core::v3::ApiConfigSource::GRPC);\n\n  if (!api_config_source.cluster_names().empty()) {\n    // All API configs of type REST and UNSUPPORTED_REST_LEGACY should have cluster names.\n    // Additionally, some gRPC API configs might have a cluster name set instead\n    // of an envoy gRPC.\n    Utility::validateClusterName(primary_clusters, api_config_source.cluster_names()[0],\n                                 api_config_source.GetTypeName());\n  } else if (is_grpc) {\n    // Some ApiConfigSources of type GRPC won't have a cluster name, such as if\n    // they've been configured with google_grpc.\n    if (api_config_source.grpc_services()[0].has_envoy_grpc()) {\n      // If an Envoy gRPC exists, we take its cluster name.\n      Utility::validateClusterName(primary_clusters,\n                                   api_config_source.grpc_services()[0].envoy_grpc().cluster_name(),\n                                   api_config_source.GetTypeName());\n    }\n  }\n  // Otherwise, there is no cluster name to validate.\n}\n\nstd::chrono::milliseconds Utility::apiConfigSourceRefreshDelay(\n    const envoy::config::core::v3::ApiConfigSource& api_config_source) {\n  if (!api_config_source.has_refresh_delay()) {\n    throw EnvoyException(\"refresh_delay is required for REST API configuration sources\");\n  }\n\n  return std::chrono::milliseconds(\n      DurationUtil::durationToMilliseconds(api_config_source.refresh_delay()));\n}\n\nstd::chrono::milliseconds Utility::apiConfigSourceRequestTimeout(\n    const envoy::config::core::v3::ApiConfigSource& api_config_source) {\n  return std::chrono::milliseconds(\n      PROTOBUF_GET_MS_OR_DEFAULT(api_config_source, request_timeout, 1000));\n}\n\nstd::chrono::milliseconds Utility::configSourceInitialFetchTimeout(\n    const envoy::config::core::v3::ConfigSource& config_source) {\n  return std::chrono::milliseconds(\n      PROTOBUF_GET_MS_OR_DEFAULT(config_source, initial_fetch_timeout, 15000));\n}\n\nRateLimitSettings\nUtility::parseRateLimitSettings(const envoy::config::core::v3::ApiConfigSource& api_config_source) {\n  RateLimitSettings rate_limit_settings;\n  if (api_config_source.has_rate_limit_settings()) {\n    rate_limit_settings.enabled_ = true;\n    rate_limit_settings.max_tokens_ =\n        PROTOBUF_GET_WRAPPED_OR_DEFAULT(api_config_source.rate_limit_settings(), max_tokens,\n                                        Envoy::Config::RateLimitSettings::DefaultMaxTokens);\n    rate_limit_settings.fill_rate_ =\n        PROTOBUF_GET_WRAPPED_OR_DEFAULT(api_config_source.rate_limit_settings(), fill_rate,\n                                        Envoy::Config::RateLimitSettings::DefaultFillRate);\n  }\n  return rate_limit_settings;\n}\n\nStats::TagProducerPtr\nUtility::createTagProducer(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n  return std::make_unique<Stats::TagProducerImpl>(bootstrap.stats_config());\n}\n\nStats::StatsMatcherPtr\nUtility::createStatsMatcher(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n  return std::make_unique<Stats::StatsMatcherImpl>(bootstrap.stats_config());\n}\n\nStats::HistogramSettingsConstPtr\nUtility::createHistogramSettings(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n  return std::make_unique<Stats::HistogramSettingsImpl>(bootstrap.stats_config());\n}\n\nGrpc::AsyncClientFactoryPtr Utility::factoryForGrpcApiConfigSource(\n    Grpc::AsyncClientManager& async_client_manager,\n    const envoy::config::core::v3::ApiConfigSource& api_config_source, Stats::Scope& scope,\n    bool skip_cluster_check) {\n  Utility::checkApiConfigSourceNames(api_config_source);\n\n  if (api_config_source.api_type() != envoy::config::core::v3::ApiConfigSource::GRPC &&\n      api_config_source.api_type() != envoy::config::core::v3::ApiConfigSource::DELTA_GRPC) {\n    throw EnvoyException(fmt::format(\"{} type must be gRPC: {}\", api_config_source.GetTypeName(),\n                                     api_config_source.DebugString()));\n  }\n\n  envoy::config::core::v3::GrpcService grpc_service;\n  grpc_service.MergeFrom(api_config_source.grpc_services(0));\n\n  return async_client_manager.factoryForGrpcService(grpc_service, scope, skip_cluster_check);\n}\n\nenvoy::config::endpoint::v3::ClusterLoadAssignment Utility::translateClusterHosts(\n    const Protobuf::RepeatedPtrField<envoy::config::core::v3::Address>& hosts) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n  envoy::config::endpoint::v3::LocalityLbEndpoints* locality_lb_endpoints =\n      load_assignment.add_endpoints();\n  // Since this LocalityLbEndpoints is built from hosts list, set the default weight to 1.\n  locality_lb_endpoints->mutable_load_balancing_weight()->set_value(1);\n  for (const envoy::config::core::v3::Address& host : hosts) {\n    envoy::config::endpoint::v3::LbEndpoint* lb_endpoint =\n        locality_lb_endpoints->add_lb_endpoints();\n    lb_endpoint->mutable_endpoint()->mutable_address()->MergeFrom(host);\n    lb_endpoint->mutable_load_balancing_weight()->set_value(1);\n  }\n  return load_assignment;\n}\n\nvoid Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config,\n                                    const ProtobufWkt::Struct& config,\n                                    ProtobufMessage::ValidationVisitor& validation_visitor,\n                                    Protobuf::Message& out_proto) {\n  static const std::string struct_type =\n      ProtobufWkt::Struct::default_instance().GetDescriptor()->full_name();\n  static const std::string typed_struct_type =\n      udpa::type::v1::TypedStruct::default_instance().GetDescriptor()->full_name();\n\n  if (!typed_config.value().empty()) {\n    // Unpack methods will only use the fully qualified type name after the last '/'.\n    // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87\n    absl::string_view type = TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url());\n\n    if (type == typed_struct_type) {\n      udpa::type::v1::TypedStruct typed_struct;\n      MessageUtil::unpackTo(typed_config, typed_struct);\n      // if out_proto is expecting Struct, return directly\n      if (out_proto.GetDescriptor()->full_name() == struct_type) {\n        out_proto.CopyFrom(typed_struct.value());\n      } else {\n        // The typed struct might match out_proto, or some earlier version, let\n        // MessageUtil::jsonConvert sort this out.\n        MessageUtil::jsonConvert(typed_struct.value(), validation_visitor, out_proto);\n      }\n    } // out_proto is expecting Struct, unpack directly\n    else if (type != struct_type || out_proto.GetDescriptor()->full_name() == struct_type) {\n      MessageUtil::unpackTo(typed_config, out_proto);\n    } else {\n      ProtobufWkt::Struct struct_config;\n      MessageUtil::unpackTo(typed_config, struct_config);\n      MessageUtil::jsonConvert(struct_config, validation_visitor, out_proto);\n    }\n  }\n\n  if (!config.fields().empty()) {\n    MessageUtil::jsonConvert(config, validation_visitor, out_proto);\n  }\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/utility.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/grpc_mux.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_matcher.h\"\n#include \"envoy/stats/tag_producer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/backoff_strategy.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/hex.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"udpa/type/v1/typed_struct.pb.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Constant Api Type Values, used by envoy::config::core::v3::ApiConfigSource.\n */\nclass ApiTypeValues {\npublic:\n  const std::string UnsupportedRestLegacy{\"REST_LEGACY\"};\n  const std::string Rest{\"REST\"};\n  const std::string Grpc{\"GRPC\"};\n};\n\n/**\n * RateLimitSettings for discovery requests.\n */\nstruct RateLimitSettings {\n  // Default Max Tokens.\n  static const uint32_t DefaultMaxTokens = 100;\n  // Default Fill Rate.\n  static constexpr double DefaultFillRate = 10;\n\n  uint32_t max_tokens_{DefaultMaxTokens};\n  double fill_rate_{DefaultFillRate};\n  bool enabled_{false};\n};\n\nusing ApiType = ConstSingleton<ApiTypeValues>;\n\n/**\n * General config API utilities.\n */\nclass Utility {\npublic:\n  /**\n   * Legacy APIs uses JSON and do not have an explicit version.\n   * @param input the input to hash.\n   * @return std::pair<std::string, uint64_t> the string is the hash converted into\n   *         a hex string, pre-pended by a user friendly prefix. The uint64_t is the\n   *         raw hash.\n   */\n  static std::pair<std::string, uint64_t> computeHashedVersion(const std::string& input) {\n    uint64_t hash = HashUtil::xxHash64(input);\n    return std::make_pair(\"hash_\" + Hex::uint64ToHex(hash), hash);\n  }\n\n  /**\n   * Extract refresh_delay as a std::chrono::milliseconds from\n   * envoy::config::core::v3::ApiConfigSource.\n   */\n  static std::chrono::milliseconds\n  apiConfigSourceRefreshDelay(const envoy::config::core::v3::ApiConfigSource& api_config_source);\n\n  /**\n   * Extract request_timeout as a std::chrono::milliseconds from\n   * envoy::config::core::v3::ApiConfigSource. If request_timeout isn't set in the config source, a\n   * default value of 1s will be returned.\n   */\n  static std::chrono::milliseconds\n  apiConfigSourceRequestTimeout(const envoy::config::core::v3::ApiConfigSource& api_config_source);\n\n  /**\n   * Extract initial_fetch_timeout as a std::chrono::milliseconds from\n   * envoy::config::core::v3::ApiConfigSource. If request_timeout isn't set in the config source, a\n   * default value of 0s will be returned.\n   */\n  static std::chrono::milliseconds\n  configSourceInitialFetchTimeout(const envoy::config::core::v3::ConfigSource& config_source);\n\n  /**\n   * Populate an envoy::config::core::v3::ApiConfigSource.\n   * @param cluster supplies the cluster name for the ApiConfigSource.\n   * @param refresh_delay_ms supplies the refresh delay for the ApiConfigSource in ms.\n   * @param api_type supplies the type of subscription to use for the ApiConfigSource.\n   * @param api_config_source a reference to the envoy::config::core::v3::ApiConfigSource object to\n   * populate.\n   */\n  static void translateApiConfigSource(const std::string& cluster, uint32_t refresh_delay_ms,\n                                       const std::string& api_type,\n                                       envoy::config::core::v3::ApiConfigSource& api_config_source);\n\n  /**\n   * Check cluster info for API config sanity. Throws on error.\n   * @param error_prefix supplies the prefix to use in error messages.\n   * @param cluster_name supplies the cluster name to check.\n   * @param cm supplies the cluster manager.\n   * @param allow_added_via_api indicates whether a cluster is allowed to be added via api\n   *                            rather than be a static resource from the bootstrap config.\n   */\n  static void checkCluster(absl::string_view error_prefix, absl::string_view cluster_name,\n                           Upstream::ClusterManager& cm, bool allow_added_via_api = false);\n\n  /**\n   * Check cluster/local info for API config sanity. Throws on error.\n   * @param error_prefix supplies the prefix to use in error messages.\n   * @param cluster_name supplies the cluster name to check.\n   * @param cm supplies the cluster manager.\n   * @param local_info supplies the local info.\n   */\n  static void checkClusterAndLocalInfo(absl::string_view error_prefix,\n                                       absl::string_view cluster_name, Upstream::ClusterManager& cm,\n                                       const LocalInfo::LocalInfo& local_info);\n\n  /**\n   * Check local info for API config sanity. Throws on error.\n   * @param error_prefix supplies the prefix to use in error messages.\n   * @param local_info supplies the local info.\n   */\n  static void checkLocalInfo(absl::string_view error_prefix,\n                             const LocalInfo::LocalInfo& local_info);\n\n  /**\n   * Check the existence of a path for a filesystem subscription. Throws on error.\n   * @param path the path to validate.\n   * @param api reference to the Api object\n   */\n  static void checkFilesystemSubscriptionBackingPath(const std::string& path, Api::Api& api);\n\n  /**\n   * Check the grpc_services and cluster_names for API config sanity. Throws on error.\n   * @param api_config_source the config source to validate.\n   * @throws EnvoyException when an API config has the wrong number of gRPC\n   * services or cluster names, depending on expectations set by its API type.\n   */\n  static void\n  checkApiConfigSourceNames(const envoy::config::core::v3::ApiConfigSource& api_config_source);\n\n  /**\n   * Check the validity of a cluster backing an api config source. Throws on error.\n   * @param primary_clusters the API config source eligible clusters.\n   * @param cluster_name the cluster name to validate.\n   * @param config_source the config source typed name.\n   * @throws EnvoyException when an API config doesn't have a statically defined non-EDS cluster.\n   */\n  static void validateClusterName(const Upstream::ClusterManager::ClusterSet& primary_clusters,\n                                  const std::string& cluster_name,\n                                  const std::string& config_source);\n\n  /**\n   * Potentially calls Utility::validateClusterName, if a cluster name can be found.\n   * @param primary_clusters the API config source eligible clusters.\n   * @param api_config_source the config source to validate.\n   * @throws EnvoyException when an API config doesn't have a statically defined non-EDS cluster.\n   */\n  static void checkApiConfigSourceSubscriptionBackingCluster(\n      const Upstream::ClusterManager::ClusterSet& primary_clusters,\n      const envoy::config::core::v3::ApiConfigSource& api_config_source);\n\n  /**\n   * Parses RateLimit configuration from envoy::config::core::v3::ApiConfigSource to\n   * RateLimitSettings.\n   * @param api_config_source ApiConfigSource.\n   * @return RateLimitSettings.\n   */\n  static RateLimitSettings\n  parseRateLimitSettings(const envoy::config::core::v3::ApiConfigSource& api_config_source);\n\n  /**\n   * Generate a ControlPlaneStats object from stats scope.\n   * @param scope for stats.\n   * @return ControlPlaneStats for scope.\n   */\n  static ControlPlaneStats generateControlPlaneStats(Stats::Scope& scope) {\n    const std::string control_plane_prefix = \"control_plane.\";\n    return {ALL_CONTROL_PLANE_STATS(POOL_COUNTER_PREFIX(scope, control_plane_prefix),\n                                    POOL_GAUGE_PREFIX(scope, control_plane_prefix),\n                                    POOL_TEXT_READOUT_PREFIX(scope, control_plane_prefix))};\n  }\n\n  /**\n   * Generate a SubscriptionStats object from stats scope.\n   * @param scope for stats.\n   * @return SubscriptionStats for scope.\n   */\n  static SubscriptionStats generateStats(Stats::Scope& scope) {\n    return {\n        ALL_SUBSCRIPTION_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_TEXT_READOUT(scope))};\n  }\n\n  /**\n   * Get a Factory from the registry with a particular name (and templated type) with error checking\n   * to ensure the name and factory are valid.\n   * @param name string identifier for the particular implementation. Note: this is a proto string\n   * because it is assumed that this value will be pulled directly from the configuration proto.\n   */\n  template <class Factory> static Factory& getAndCheckFactoryByName(const std::string& name) {\n    if (name.empty()) {\n      ExceptionUtil::throwEnvoyException(\"Provided name for static registration lookup was empty.\");\n    }\n\n    Factory* factory = Registry::FactoryRegistry<Factory>::getFactory(name);\n\n    if (factory == nullptr) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"Didn't find a registered implementation for name: '{}'\", name));\n    }\n\n    return *factory;\n  }\n\n  /**\n   * Get a Factory from the registry with error checking to ensure the name and the factory are\n   * valid.\n   * @param message proto that contains fields 'name' and 'typed_config'.\n   */\n  template <class Factory, class ProtoMessage>\n  static Factory& getAndCheckFactory(const ProtoMessage& message) {\n    Factory* factory = Utility::getFactoryByType<Factory>(message.typed_config());\n    if (factory != nullptr) {\n      return *factory;\n    }\n\n    return Utility::getAndCheckFactoryByName<Factory>(message.name());\n  }\n\n  /**\n   * Get type URL from a typed config.\n   * @param typed_config for the extension config.\n   */\n  static std::string getFactoryType(const ProtobufWkt::Any& typed_config) {\n    static const std::string& typed_struct_type =\n        udpa::type::v1::TypedStruct::default_instance().GetDescriptor()->full_name();\n    // Unpack methods will only use the fully qualified type name after the last '/'.\n    // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87\n    auto type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url()));\n    if (type == typed_struct_type) {\n      udpa::type::v1::TypedStruct typed_struct;\n      MessageUtil::unpackTo(typed_config, typed_struct);\n      // Not handling nested structs or typed structs in typed structs\n      return std::string(TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url()));\n    }\n    return type;\n  }\n\n  /**\n   * Get a Factory from the registry by type URL.\n   * @param typed_config for the extension config.\n   */\n  template <class Factory> static Factory* getFactoryByType(const ProtobufWkt::Any& typed_config) {\n    if (typed_config.type_url().empty()) {\n      return nullptr;\n    }\n    return Registry::FactoryRegistry<Factory>::getFactoryByType(getFactoryType(typed_config));\n  }\n\n  /**\n   * Translate a nested config into a proto message provided by the implementation factory.\n   * @param enclosing_message proto that contains a field 'config'. Note: the enclosing proto is\n   * provided because for statically registered implementations, a custom config is generally\n   * optional, which means the conversion must be done conditionally.\n   * @param validation_visitor message validation visitor instance.\n   * @param factory implementation factory with the method 'createEmptyConfigProto' to produce a\n   * proto to be filled with the translated configuration.\n   */\n  template <class ProtoMessage, class Factory>\n  static ProtobufTypes::MessagePtr\n  translateToFactoryConfig(const ProtoMessage& enclosing_message,\n                           ProtobufMessage::ValidationVisitor& validation_visitor,\n                           Factory& factory) {\n    ProtobufTypes::MessagePtr config = factory.createEmptyConfigProto();\n\n    // Fail in an obvious way if a plugin does not return a proto.\n    RELEASE_ASSERT(config != nullptr, \"\");\n\n    // Check that the config type is not google.protobuf.Empty\n    RELEASE_ASSERT(config->GetDescriptor()->full_name() != \"google.protobuf.Empty\", \"\");\n\n    translateOpaqueConfig(enclosing_message.typed_config(),\n                          enclosing_message.hidden_envoy_deprecated_config(), validation_visitor,\n                          *config);\n    return config;\n  }\n\n  /**\n   * Translate the typed any field into a proto message provided by the implementation factory.\n   * @param typed_config typed configuration.\n   * @param validation_visitor message validation visitor instance.\n   * @param factory implementation factory with the method 'createEmptyConfigProto' to produce a\n   * proto to be filled with the translated configuration.\n   */\n  template <class Factory>\n  static ProtobufTypes::MessagePtr\n  translateAnyToFactoryConfig(const ProtobufWkt::Any& typed_config,\n                              ProtobufMessage::ValidationVisitor& validation_visitor,\n                              Factory& factory) {\n    ProtobufTypes::MessagePtr config = factory.createEmptyConfigProto();\n\n    // Fail in an obvious way if a plugin does not return a proto.\n    RELEASE_ASSERT(config != nullptr, \"\");\n\n    // Check that the config type is not google.protobuf.Empty\n    RELEASE_ASSERT(config->GetDescriptor()->full_name() != \"google.protobuf.Empty\", \"\");\n\n    translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), validation_visitor, *config);\n    return config;\n  }\n\n  /**\n   * Truncates the message to a length less than default GRPC trailers size limit (by default 8KiB).\n   */\n  static std::string truncateGrpcStatusMessage(absl::string_view error_message);\n\n  /**\n   * Create TagProducer instance. Check all tag names for conflicts to avoid\n   * unexpected tag name overwriting.\n   * @param bootstrap bootstrap proto.\n   * @throws EnvoyException when the conflict of tag names is found.\n   */\n  static Stats::TagProducerPtr\n  createTagProducer(const envoy::config::bootstrap::v3::Bootstrap& bootstrap);\n\n  /**\n   * Create StatsMatcher instance.\n   */\n  static Stats::StatsMatcherPtr\n  createStatsMatcher(const envoy::config::bootstrap::v3::Bootstrap& bootstrap);\n\n  /**\n   * Create HistogramSettings instance.\n   */\n  static Stats::HistogramSettingsConstPtr\n  createHistogramSettings(const envoy::config::bootstrap::v3::Bootstrap& bootstrap);\n\n  /**\n   * Obtain gRPC async client factory from a envoy::config::core::v3::ApiConfigSource.\n   * @param async_client_manager gRPC async client manager.\n   * @param api_config_source envoy::config::core::v3::ApiConfigSource. Must have config type GRPC.\n   * @param skip_cluster_check whether to skip cluster validation.\n   * @return Grpc::AsyncClientFactoryPtr gRPC async client factory.\n   */\n  static Grpc::AsyncClientFactoryPtr\n  factoryForGrpcApiConfigSource(Grpc::AsyncClientManager& async_client_manager,\n                                const envoy::config::core::v3::ApiConfigSource& api_config_source,\n                                Stats::Scope& scope, bool skip_cluster_check);\n\n  /**\n   * Translate a set of cluster's hosts into a load assignment configuration.\n   * @param hosts cluster's list of hosts.\n   * @return envoy::config::endpoint::v3::ClusterLoadAssignment a load assignment configuration.\n   */\n  static envoy::config::endpoint::v3::ClusterLoadAssignment\n  translateClusterHosts(const Protobuf::RepeatedPtrField<envoy::config::core::v3::Address>& hosts);\n\n  /**\n   * Translate opaque config from google.protobuf.Any or google.protobuf.Struct to defined proto\n   * message.\n   * @param typed_config opaque config packed in google.protobuf.Any\n   * @param config the deprecated google.protobuf.Struct config, empty struct if doesn't exist.\n   * @param validation_visitor message validation visitor instance.\n   * @param out_proto the proto message instantiated by extensions\n   */\n  static void translateOpaqueConfig(const ProtobufWkt::Any& typed_config,\n                                    const ProtobufWkt::Struct& config,\n                                    ProtobufMessage::ValidationVisitor& validation_visitor,\n                                    Protobuf::Message& out_proto);\n\n  /**\n   * Verify that any filter designed to be terminal is configured to be terminal, and vice versa.\n   * @param name the name of the filter.\n   * @param filter_type the type of filter.\n   * @param filter_chain_type the type of filter chain.\n   * @param is_terminal_filter true if the filter is designed to be terminal.\n   * @param last_filter_in_current_config true if the filter is last in the configuration.\n   * @throws EnvoyException if there is a mismatch between design and configuration.\n   */\n  static void validateTerminalFilters(const std::string& name, const std::string& filter_type,\n                                      const char* filter_chain_type, bool is_terminal_filter,\n                                      bool last_filter_in_current_config) {\n    if (is_terminal_filter && !last_filter_in_current_config) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"Error: terminal filter named {} of type {} must be the \"\n                      \"last filter in a {} filter chain.\",\n                      name, filter_type, filter_chain_type));\n    } else if (!is_terminal_filter && last_filter_in_current_config) {\n      ExceptionUtil::throwEnvoyException(fmt::format(\n          \"Error: non-terminal filter named {} of type {} is the last filter in a {} filter chain.\",\n          name, filter_type, filter_chain_type));\n    }\n  }\n\n  /**\n   * Prepares the DNS failure refresh backoff strategy given the cluster configuration.\n   * @param config the config that contains dns refresh information.\n   * @param dns_refresh_rate_ms the default DNS refresh rate.\n   * @param random the random generator.\n   * @return BackOffStrategyPtr for scheduling refreshes.\n   */\n  template <typename T>\n  static BackOffStrategyPtr prepareDnsRefreshStrategy(const T& config, uint64_t dns_refresh_rate_ms,\n                                                      Random::RandomGenerator& random) {\n    if (config.has_dns_failure_refresh_rate()) {\n      uint64_t base_interval_ms =\n          PROTOBUF_GET_MS_REQUIRED(config.dns_failure_refresh_rate(), base_interval);\n      uint64_t max_interval_ms = PROTOBUF_GET_MS_OR_DEFAULT(config.dns_failure_refresh_rate(),\n                                                            max_interval, base_interval_ms * 10);\n      if (max_interval_ms < base_interval_ms) {\n        ExceptionUtil::throwEnvoyException(\n            \"dns_failure_refresh_rate must have max_interval greater than \"\n            \"or equal to the base_interval\");\n      }\n      return std::make_unique<JitteredExponentialBackOffStrategy>(base_interval_ms, max_interval_ms,\n                                                                  random);\n    }\n    return std::make_unique<FixedBackOffStrategy>(dns_refresh_rate_ms);\n  }\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/version_converter.cc",
    "content": "#include \"common/config/version_converter.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/config/api_type_oracle.h\"\n#include \"common/protobuf/visitor.h\"\n#include \"common/protobuf/well_known.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nnamespace {\n\nclass ProtoVisitor {\npublic:\n  virtual ~ProtoVisitor() = default;\n\n  // Invoked when a field is visited, with the message, field descriptor and\n  // context. Returns a new context for use when traversing the sub-message in a\n  // field.\n  virtual const void* onField(Protobuf::Message&, const Protobuf::FieldDescriptor&,\n                              const void* ctxt) {\n    return ctxt;\n  }\n\n  // Invoked when a message is visited, with the message and a context.\n  virtual void onMessage(Protobuf::Message&, const void*){};\n};\n\n// Reinterpret a Protobuf message as another Protobuf message by converting to wire format and back.\n// This only works for messages that can be effectively duck typed this way, e.g. with a subtype\n// relationship modulo field name.\nvoid wireCast(const Protobuf::Message& src, Protobuf::Message& dst) {\n  // This should should generally succeed, but if there are malformed UTF-8 strings in a message,\n  // this can fail.\n  if (!dst.ParseFromString(src.SerializeAsString())) {\n    throw EnvoyException(\"Unable to deserialize during wireCast()\");\n  }\n}\n\n// Create a new dynamic message based on some message wire cast to the target\n// descriptor. If the descriptor is null, a copy is performed.\nDynamicMessagePtr createForDescriptorWithCast(const Protobuf::Message& message,\n                                              const Protobuf::Descriptor* desc) {\n  auto dynamic_message = std::make_unique<DynamicMessage>();\n  if (desc != nullptr) {\n    dynamic_message->msg_.reset(dynamic_message->dynamic_msg_factory_.GetPrototype(desc)->New());\n    wireCast(message, *dynamic_message->msg_);\n    return dynamic_message;\n  }\n  // Unnecessary copy, since the existing message is being treated as\n  // \"dynamic\". However, we want to transfer an owned object, so this is the\n  // best we can do.\n  dynamic_message->msg_.reset(message.New());\n  dynamic_message->msg_->MergeFrom(message);\n  return dynamic_message;\n}\n\n} // namespace\n\nvoid VersionConverter::upgrade(const Protobuf::Message& prev_message,\n                               Protobuf::Message& next_message) {\n  wireCast(prev_message, next_message);\n  // Track original type to support recoverOriginal().\n  annotateWithOriginalType(*prev_message.GetDescriptor(), next_message);\n}\n\n// This needs to be recursive, since sub-messages are consumed and stored\n// internally, we later want to recover their original types.\nvoid VersionConverter::annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor,\n                                                Protobuf::Message& upgraded_message) {\n  class TypeAnnotatingProtoVisitor : public ProtobufMessage::ProtoVisitor {\n  public:\n    void onMessage(Protobuf::Message& message, const void* ctxt) override {\n      const Protobuf::Descriptor* descriptor = message.GetDescriptor();\n      const Protobuf::Reflection* reflection = message.GetReflection();\n      const Protobuf::Descriptor& prev_descriptor = *static_cast<const Protobuf::Descriptor*>(ctxt);\n      // If they are the same type, there's no possibility of any different type\n      // further down, so we're done.\n      if (descriptor->full_name() == prev_descriptor.full_name()) {\n        return;\n      }\n      auto* unknown_field_set = reflection->MutableUnknownFields(&message);\n      unknown_field_set->AddLengthDelimited(ProtobufWellKnown::OriginalTypeFieldNumber,\n                                            prev_descriptor.full_name());\n    }\n\n    const void* onField(Protobuf::Message&, const Protobuf::FieldDescriptor& field,\n                        const void* ctxt) override {\n      const Protobuf::Descriptor& prev_descriptor = *static_cast<const Protobuf::Descriptor*>(ctxt);\n      // TODO(htuch): This is a terrible hack, there should be no per-resource\n      // business logic in this file. The reason this is required is that\n      // endpoints, when captured in configuration such as inlined hosts in\n      // Clusters for config dump purposes, can potentially contribute a\n      // significant amount to memory consumption. stats_integration_test\n      // complains as a result if we increase any memory due to type annotations.\n      // In theory, we should be able to just clean up these annotations in\n      // ClusterManagerImpl with type erasure, but protobuf doesn't free up memory\n      // as expected, we probably need some arena level trick to address this.\n      if (prev_descriptor.full_name() == \"envoy.api.v2.Cluster\" &&\n          (field.name() == \"hidden_envoy_deprecated_hosts\" || field.name() == \"load_assignment\")) {\n        // This will cause the sub-message visit to abort early.\n        return field.message_type();\n      }\n      const Protobuf::FieldDescriptor* prev_field =\n          prev_descriptor.FindFieldByNumber(field.number());\n      return prev_field != nullptr ? prev_field->message_type() : nullptr;\n    }\n  };\n  TypeAnnotatingProtoVisitor proto_visitor;\n  ProtobufMessage::traverseMutableMessage(proto_visitor, upgraded_message, &prev_descriptor);\n}\n\nvoid VersionConverter::eraseOriginalTypeInformation(Protobuf::Message& message) {\n  class TypeErasingProtoVisitor : public ProtobufMessage::ProtoVisitor {\n  public:\n    void onMessage(Protobuf::Message& message, const void*) override {\n      const Protobuf::Reflection* reflection = message.GetReflection();\n      auto* unknown_field_set = reflection->MutableUnknownFields(&message);\n      unknown_field_set->DeleteByNumber(ProtobufWellKnown::OriginalTypeFieldNumber);\n    }\n  };\n  TypeErasingProtoVisitor proto_visitor;\n  ProtobufMessage::traverseMutableMessage(proto_visitor, message, nullptr);\n}\n\nDynamicMessagePtr VersionConverter::recoverOriginal(const Protobuf::Message& upgraded_message) {\n  const Protobuf::Reflection* reflection = upgraded_message.GetReflection();\n  const auto& unknown_field_set = reflection->GetUnknownFields(upgraded_message);\n  for (int i = 0; i < unknown_field_set.field_count(); ++i) {\n    const auto& unknown_field = unknown_field_set.field(i);\n    if (unknown_field.number() == ProtobufWellKnown::OriginalTypeFieldNumber) {\n      ASSERT(unknown_field.type() == Protobuf::UnknownField::TYPE_LENGTH_DELIMITED);\n      const std::string& original_type = unknown_field.length_delimited();\n      const Protobuf::Descriptor* original_descriptor =\n          Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(original_type);\n      auto result = createForDescriptorWithCast(upgraded_message, original_descriptor);\n      // We should clear out the OriginalTypeFieldNumber in the recovered message.\n      eraseOriginalTypeInformation(*result->msg_);\n      return result;\n    }\n  }\n  return createForDescriptorWithCast(upgraded_message, nullptr);\n}\n\nDynamicMessagePtr VersionConverter::downgrade(const Protobuf::Message& message) {\n  const Protobuf::Descriptor* prev_desc =\n      ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name());\n  return createForDescriptorWithCast(message, prev_desc);\n}\n\nstd::string\nVersionConverter::getJsonStringFromMessage(const Protobuf::Message& message,\n                                           envoy::config::core::v3::ApiVersion api_version) {\n  DynamicMessagePtr dynamic_message;\n  switch (api_version) {\n  case envoy::config::core::v3::ApiVersion::AUTO:\n    FALLTHRU;\n  case envoy::config::core::v3::ApiVersion::V2: {\n    // TODO(htuch): this works as long as there are no new fields in the v3+\n    // DiscoveryRequest. When they are added, we need to do a full v2 conversion\n    // and also discard unknown fields. Tracked at\n    // https://github.com/envoyproxy/envoy/issues/9619.\n    dynamic_message = downgrade(message);\n    break;\n  }\n  case envoy::config::core::v3::ApiVersion::V3: {\n    // We need to scrub the hidden fields.\n    dynamic_message = std::make_unique<DynamicMessage>();\n    dynamic_message->msg_.reset(message.New());\n    dynamic_message->msg_->MergeFrom(message);\n    VersionUtil::scrubHiddenEnvoyDeprecated(*dynamic_message->msg_);\n    break;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  eraseOriginalTypeInformation(*dynamic_message->msg_);\n  std::string json;\n  Protobuf::util::JsonPrintOptions json_options;\n  json_options.preserve_proto_field_names = true;\n  const auto status =\n      Protobuf::util::MessageToJsonString(*dynamic_message->msg_, &json, json_options);\n  // This should always succeed unless something crash-worthy such as out-of-memory.\n  RELEASE_ASSERT(status.ok(), \"\");\n  return json;\n}\n\nvoid VersionConverter::prepareMessageForGrpcWire(Protobuf::Message& message,\n                                                 envoy::config::core::v3::ApiVersion api_version) {\n  // TODO(htuch): this works as long as there are no new fields in the v3+\n  // DiscoveryRequest. When they are added, we need to do a full v2 conversion\n  // and also discard unknown fields. Tracked at\n  // https://github.com/envoyproxy/envoy/issues/9619.\n  if (api_version == envoy::config::core::v3::ApiVersion::V3) {\n    VersionUtil::scrubHiddenEnvoyDeprecated(message);\n  }\n  eraseOriginalTypeInformation(message);\n}\n\nvoid VersionUtil::scrubHiddenEnvoyDeprecated(Protobuf::Message& message) {\n  class HiddenFieldScrubbingProtoVisitor : public ProtobufMessage::ProtoVisitor {\n  public:\n    const void* onField(Protobuf::Message& message, const Protobuf::FieldDescriptor& field,\n                        const void*) override {\n      const Protobuf::Reflection* reflection = message.GetReflection();\n      if (absl::StartsWith(field.name(), DeprecatedFieldShadowPrefix)) {\n        reflection->ClearField(&message, &field);\n      }\n      return nullptr;\n    }\n  };\n  HiddenFieldScrubbingProtoVisitor proto_visitor;\n  ProtobufMessage::traverseMutableMessage(proto_visitor, message, nullptr);\n}\n\nconst char VersionUtil::DeprecatedFieldShadowPrefix[] = \"hidden_envoy_deprecated_\";\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/version_converter.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n// Convenience macro for downgrading a message and obtaining a reference.\n#define API_DOWNGRADE(msg) (*Envoy::Config::VersionConverter::downgrade(msg)->msg_)\n\n// Convenience macro for recovering original message and obtaining a reference.\n#define API_RECOVER_ORIGINAL(msg) (*Envoy::Config::VersionConverter::recoverOriginal(msg)->msg_)\n\nnamespace Envoy {\nnamespace Config {\n\n// An instance of a dynamic message from a DynamicMessageFactory.\nstruct DynamicMessage {\n  // The dynamic message factory must outlive the message.\n  Protobuf::DynamicMessageFactory dynamic_msg_factory_;\n\n  // Dynamic message.\n  ProtobufTypes::MessagePtr msg_;\n};\n\nusing DynamicMessagePtr = std::unique_ptr<DynamicMessage>;\n\nclass VersionConverter {\npublic:\n  /**\n   * Upgrade a message from an earlier to later version of the Envoy API. This\n   * performs a simple wire-level reinterpretation of the fields. As a result of\n   * shadow protos, earlier deprecated fields such as foo are materialized as\n   * hidden_envoy_deprecated_foo.\n   *\n   * This should be used when you have wire input (e.g. bootstrap, xDS, some\n   * opaque config) that might be at any supported version and want to upgrade\n   * to Envoy's internal latest API usage.\n   *\n   * @param prev_message previous version message input.\n   * @param next_message next version message to generate.\n   *\n   * @throw EnvoyException if a Protobuf (de)serialization error occurs.\n   */\n  static void upgrade(const Protobuf::Message& prev_message, Protobuf::Message& next_message);\n\n  /**\n   * Downgrade a message to the previous version. If no previous version exists,\n   * the given message is copied in the return value. This is not super\n   * efficient, most uses are expected to be tests and performance agnostic\n   * code.\n   *\n   * This is used primarily in tests, to allow tests to internally use the\n   * latest supported API but ensure that earlier versions are used on the wire.\n   *\n   * @param message message input.\n   * @return DynamicMessagePtr with the downgraded message (and associated\n   *         factory state).\n   *\n   * @throw EnvoyException if a Protobuf (de)serialization error occurs.\n   */\n  static DynamicMessagePtr downgrade(const Protobuf::Message& message);\n\n  /**\n   * Obtain JSON wire representation for an Envoy internal API message at v3\n   * based on a given transport API version. This will downgrade() to an earlier\n   * version or scrub the shadow deprecated fields in the existing one.\n   *\n   * This is typically used when Envoy is generating a JSON wire message from\n   * some internally generated message, e.g. DiscoveryRequest, and we want to\n   * ensure it matches a specific API version. For example, a v3\n   * DiscoveryRequest must have any deprecated v2 fields removed (they only\n   * exist because of shadowing) and a v2 DiscoveryRequest needs to have type\n   * envoy.api.v2.DiscoveryRequest to ensure JSON representations have the\n   * correct field names (after renames/deprecations are reversed).\n   *\n   * @param message message input.\n   * @param api_version target API version.\n   * @return std::string JSON representation.\n   */\n  static std::string getJsonStringFromMessage(const Protobuf::Message& message,\n                                              envoy::config::core::v3::ApiVersion api_version);\n\n  /**\n   * Modify a v3 message to make it suitable for sending as a gRPC message. This\n   * requires that a v3 message has hidden_envoy_deprecated_* fields removed,\n   * and that for all versions that original type information is removed.\n   *\n   * @param message message to modify.\n   * @param api_version target API version.\n   */\n  static void prepareMessageForGrpcWire(Protobuf::Message& message,\n                                        envoy::config::core::v3::ApiVersion api_version);\n\n  /**\n   * Annotate an upgraded message with original message type information.\n   *\n   * @param prev_descriptor descriptor for original type.\n   * @param upgraded_message upgraded message.\n   */\n  static void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor,\n                                       Protobuf::Message& upgraded_message);\n\n  /**\n   * For a message that may have been upgraded, recover the original message.\n   * This is useful for config dump, debug output etc.\n   *\n   * @param upgraded_message upgraded message input.\n   *\n   * @return DynamicMessagePtr original message (as a dynamic message).\n   *\n   * @throw EnvoyException if a Protobuf (de)serialization error occurs.\n   */\n  static DynamicMessagePtr recoverOriginal(const Protobuf::Message& upgraded_message);\n\n  /**\n   * Remove original type information, when it's not needed, e.g. in tests.\n   *\n   * @param message upgraded message to scrub.\n   */\n  static void eraseOriginalTypeInformation(Protobuf::Message& message);\n};\n\nclass VersionUtil {\npublic:\n  // Some helpers for working with earlier message version deprecated fields.\n  static void scrubHiddenEnvoyDeprecated(Protobuf::Message& message);\n\n  // A prefix that is added to deprecated fields names upon shadowing.\n  static const char DeprecatedFieldShadowPrefix[];\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/watch_map.cc",
    "content": "#include \"common/config/watch_map.h\"\n\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/config/decoded_resource_impl.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nnamespace {\n// Returns the namespace part (if there's any) in the resource name.\nstd::string namespaceFromName(const std::string& resource_name) {\n  const auto pos = resource_name.find_last_of('/');\n  // we are not interested in the \"/\" character in the namespace\n  return pos == std::string::npos ? \"\" : resource_name.substr(0, pos);\n}\n} // namespace\n\nWatch* WatchMap::addWatch(SubscriptionCallbacks& callbacks,\n                          OpaqueResourceDecoder& resource_decoder) {\n  auto watch = std::make_unique<Watch>(callbacks, resource_decoder);\n  Watch* watch_ptr = watch.get();\n  wildcard_watches_.insert(watch_ptr);\n  watches_.insert(std::move(watch));\n  return watch_ptr;\n}\n\nvoid WatchMap::removeWatch(Watch* watch) {\n  if (deferred_removed_during_update_ != nullptr) {\n    deferred_removed_during_update_->insert(watch);\n  } else {\n    wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone.\n    watches_.erase(watch);\n  }\n}\n\nvoid WatchMap::removeDeferredWatches() {\n  for (auto& watch : *deferred_removed_during_update_) {\n    wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone.\n    watches_.erase(watch);\n  }\n  deferred_removed_during_update_ = nullptr;\n}\n\nAddedRemoved WatchMap::updateWatchInterest(Watch* watch,\n                                           const std::set<std::string>& update_to_these_names) {\n  if (update_to_these_names.empty()) {\n    wildcard_watches_.insert(watch);\n  } else {\n    wildcard_watches_.erase(watch);\n  }\n\n  std::vector<std::string> newly_added_to_watch;\n  std::set_difference(update_to_these_names.begin(), update_to_these_names.end(),\n                      watch->resource_names_.begin(), watch->resource_names_.end(),\n                      std::inserter(newly_added_to_watch, newly_added_to_watch.begin()));\n\n  std::vector<std::string> newly_removed_from_watch;\n  std::set_difference(watch->resource_names_.begin(), watch->resource_names_.end(),\n                      update_to_these_names.begin(), update_to_these_names.end(),\n                      std::inserter(newly_removed_from_watch, newly_removed_from_watch.begin()));\n\n  watch->resource_names_ = update_to_these_names;\n\n  return AddedRemoved(findAdditions(newly_added_to_watch, watch),\n                      findRemovals(newly_removed_from_watch, watch));\n}\n\nabsl::flat_hash_set<Watch*> WatchMap::watchesInterestedIn(const std::string& resource_name) {\n  absl::flat_hash_set<Watch*> ret;\n  if (!use_namespace_matching_) {\n    ret = wildcard_watches_;\n  }\n\n  const auto prefix = namespaceFromName(resource_name);\n  const auto resource_key = use_namespace_matching_ && !prefix.empty() ? prefix : resource_name;\n  const auto watches_interested = watch_interest_.find(resource_key);\n  if (watches_interested != watch_interest_.end()) {\n    for (const auto& watch : watches_interested->second) {\n      ret.insert(watch);\n    }\n  }\n  return ret;\n}\n\nvoid WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& resources,\n                              const std::string& version_info) {\n  if (watches_.empty()) {\n    return;\n  }\n\n  // Track any removals triggered by earlier watch updates.\n  ASSERT(deferred_removed_during_update_ == nullptr);\n  deferred_removed_during_update_ = std::make_unique<absl::flat_hash_set<Watch*>>();\n  Cleanup cleanup([this] { removeDeferredWatches(); });\n  // Build a map from watches, to the set of updated resources that each watch cares about. Each\n  // entry in the map is then a nice little bundle that can be fed directly into the individual\n  // onConfigUpdate()s.\n  std::vector<DecodedResourceImplPtr> decoded_resources;\n  absl::flat_hash_map<Watch*, std::vector<DecodedResourceRef>> per_watch_updates;\n  for (const auto& r : resources) {\n    decoded_resources.emplace_back(\n        new DecodedResourceImpl((*watches_.begin())->resource_decoder_, r, version_info));\n    const absl::flat_hash_set<Watch*>& interested_in_r =\n        watchesInterestedIn(decoded_resources.back()->name());\n    for (const auto& interested_watch : interested_in_r) {\n      per_watch_updates[interested_watch].emplace_back(*decoded_resources.back());\n    }\n  }\n\n  const bool map_is_single_wildcard = (watches_.size() == 1 && wildcard_watches_.size() == 1);\n  // We just bundled up the updates into nice per-watch packages. Now, deliver them.\n  for (auto& watch : watches_) {\n    if (deferred_removed_during_update_->count(watch.get()) > 0) {\n      continue;\n    }\n    const auto this_watch_updates = per_watch_updates.find(watch);\n    if (this_watch_updates == per_watch_updates.end()) {\n      // This update included no resources this watch cares about.\n      // 1) If there is only a single, wildcard watch (i.e. Cluster or Listener), always call\n      //    its onConfigUpdate even if just a no-op, to properly maintain state-of-the-world\n      //    semantics and the update_empty stat.\n      // 2) If this watch previously had some resources, it means this update is removing all\n      //    of this watch's resources, so the watch must be informed with an onConfigUpdate.\n      // 3) Otherwise, we can skip onConfigUpdate for this watch.\n      if (map_is_single_wildcard || !watch->state_of_the_world_empty_) {\n        watch->state_of_the_world_empty_ = true;\n        watch->callbacks_.onConfigUpdate({}, version_info);\n      }\n    } else {\n      watch->state_of_the_world_empty_ = false;\n      watch->callbacks_.onConfigUpdate(this_watch_updates->second, version_info);\n    }\n  }\n}\n\nvoid WatchMap::onConfigUpdate(\n    const Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>& added_resources,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n    const std::string& system_version_info) {\n  // Track any removals triggered by earlier watch updates.\n  ASSERT(deferred_removed_during_update_ == nullptr);\n  deferred_removed_during_update_ = std::make_unique<absl::flat_hash_set<Watch*>>();\n  Cleanup cleanup([this] { removeDeferredWatches(); });\n  // Build a pair of maps: from watches, to the set of resources {added,removed} that each watch\n  // cares about. Each entry in the map-pair is then a nice little bundle that can be fed directly\n  // into the individual onConfigUpdate()s.\n  std::vector<DecodedResourceImplPtr> decoded_resources;\n  absl::flat_hash_map<Watch*, std::vector<DecodedResourceRef>> per_watch_added;\n  for (const auto& r : added_resources) {\n    const absl::flat_hash_set<Watch*>& interested_in_r = watchesInterestedIn(r.name());\n    // If there are no watches, then we don't need to decode. If there are watches, they should all\n    // be for the same resource type, so we can just use the callbacks of the first watch to decode.\n    if (interested_in_r.empty()) {\n      continue;\n    }\n    decoded_resources.emplace_back(\n        new DecodedResourceImpl((*interested_in_r.begin())->resource_decoder_, r));\n    for (const auto& interested_watch : interested_in_r) {\n      per_watch_added[interested_watch].emplace_back(*decoded_resources.back());\n    }\n  }\n  absl::flat_hash_map<Watch*, Protobuf::RepeatedPtrField<std::string>> per_watch_removed;\n  for (const auto& r : removed_resources) {\n    const absl::flat_hash_set<Watch*>& interested_in_r = watchesInterestedIn(r);\n    for (const auto& interested_watch : interested_in_r) {\n      *per_watch_removed[interested_watch].Add() = r;\n    }\n  }\n\n  // We just bundled up the updates into nice per-watch packages. Now, deliver them.\n  for (const auto& [cur_watch, resource_to_add] : per_watch_added) {\n    if (deferred_removed_during_update_->count(cur_watch) > 0) {\n      continue;\n    }\n    const auto removed = per_watch_removed.find(cur_watch);\n    if (removed == per_watch_removed.end()) {\n      // additions only, no removals\n      cur_watch->callbacks_.onConfigUpdate(resource_to_add, {}, system_version_info);\n    } else {\n      // both additions and removals\n      cur_watch->callbacks_.onConfigUpdate(resource_to_add, removed->second, system_version_info);\n      // Drop the removals now, so the final removals-only pass won't use them.\n      per_watch_removed.erase(removed);\n    }\n  }\n  // Any removals-only updates will not have been picked up in the per_watch_added loop.\n  for (auto& [cur_watch, resource_to_remove] : per_watch_removed) {\n    if (deferred_removed_during_update_->count(cur_watch) > 0) {\n      continue;\n    }\n    cur_watch->callbacks_.onConfigUpdate({}, resource_to_remove, system_version_info);\n  }\n  // notify empty update\n  if (added_resources.empty() && removed_resources.empty()) {\n    for (auto& cur_watch : wildcard_watches_) {\n      cur_watch->callbacks_.onConfigUpdate({}, {}, system_version_info);\n    }\n  }\n}\n\nvoid WatchMap::onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) {\n  for (auto& watch : watches_) {\n    watch->callbacks_.onConfigUpdateFailed(reason, e);\n  }\n}\n\nstd::set<std::string> WatchMap::findAdditions(const std::vector<std::string>& newly_added_to_watch,\n                                              Watch* watch) {\n  std::set<std::string> newly_added_to_subscription;\n  for (const auto& name : newly_added_to_watch) {\n    auto entry = watch_interest_.find(name);\n    if (entry == watch_interest_.end()) {\n      newly_added_to_subscription.insert(name);\n      watch_interest_[name] = {watch};\n    } else {\n      // Add this watch to the already-existing set at watch_interest_[name]\n      entry->second.insert(watch);\n    }\n  }\n  return newly_added_to_subscription;\n}\n\nstd::set<std::string>\nWatchMap::findRemovals(const std::vector<std::string>& newly_removed_from_watch, Watch* watch) {\n  std::set<std::string> newly_removed_from_subscription;\n  for (const auto& name : newly_removed_from_watch) {\n    auto entry = watch_interest_.find(name);\n    RELEASE_ASSERT(\n        entry != watch_interest_.end(),\n        fmt::format(\"WatchMap: tried to remove a watch from untracked resource {}\", name));\n\n    entry->second.erase(watch);\n    if (entry->second.empty()) {\n      watch_interest_.erase(entry);\n      newly_removed_from_subscription.insert(name);\n    }\n  }\n  return newly_removed_from_subscription;\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/watch_map.h",
    "content": "#pragma once\n\n#include <set>\n#include <string>\n#include <utility>\n\n#include \"envoy/config/subscription.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nstruct AddedRemoved {\n  AddedRemoved(std::set<std::string>&& added, std::set<std::string>&& removed)\n      : added_(std::move(added)), removed_(std::move(removed)) {}\n  std::set<std::string> added_;\n  std::set<std::string> removed_;\n};\n\nstruct Watch {\n  Watch(SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder)\n      : callbacks_(callbacks), resource_decoder_(resource_decoder) {}\n  SubscriptionCallbacks& callbacks_;\n  OpaqueResourceDecoder& resource_decoder_;\n  std::set<std::string> resource_names_; // must be sorted set, for set_difference.\n  // Needed only for state-of-the-world.\n  // Whether the most recent update contained any resources this watch cares about.\n  // If true, a new update that also contains no resources can skip this watch.\n  bool state_of_the_world_empty_{true};\n};\n\n// NOTE: Users are responsible for eventually calling removeWatch() on the Watch* returned\n//       by addWatch(). We don't expect there to be new users of this class beyond\n//       NewGrpcMuxImpl and DeltaSubscriptionImpl (TODO(fredlas) to be renamed).\n//\n// Manages \"watches\" of xDS resources. Several xDS callers might ask for a subscription to the same\n// resource name \"X\". The xDS machinery must return to each their very own subscription to X.\n// The xDS machinery's \"watch\" concept accomplishes that, while avoiding parallel redundant xDS\n// requests for X. Each of those subscriptions is viewed as a \"watch\" on X, while behind the scenes\n// there is just a single real subscription to that resource name.\n//\n// This class maintains the watches<-->subscription mapping: it\n// 1) delivers updates to all interested watches, and\n// 2) tracks which resource names should be {added to,removed from} the subscription when the\n//    {first,last} watch on a resource name is {added,removed}.\n//\n// #1 is accomplished by WatchMap's implementation of the SubscriptionCallbacks interface.\n// This interface allows the xDS client to just throw each xDS update message it receives directly\n// into WatchMap::onConfigUpdate, rather than having to track the various watches' callbacks.\n//\n// The information for #2 is returned by updateWatchInterest(); the caller should use it to\n// update the subscription accordingly.\n//\n// A WatchMap is assumed to be dedicated to a single type_url type of resource (EDS, CDS, etc).\nclass WatchMap : public UntypedConfigUpdateCallbacks, public Logger::Loggable<Logger::Id::config> {\npublic:\n  WatchMap(const bool use_namespace_matching) : use_namespace_matching_(use_namespace_matching) {}\n\n  // Adds 'callbacks' to the WatchMap, with every possible resource being watched.\n  // (Use updateWatchInterest() to narrow it down to some specific names).\n  // Returns the newly added watch, to be used with updateWatchInterest and removeWatch.\n  Watch* addWatch(SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder);\n\n  // Updates the set of resource names that the given watch should watch.\n  // Returns any resource name additions/removals that are unique across all watches. That is:\n  // 1) if 'resources' contains X and no other watch cares about X, X will be in added_.\n  // 2) if 'resources' does not contain Y, and this watch was the only one that cared about Y,\n  //    Y will be in removed_.\n  AddedRemoved updateWatchInterest(Watch* watch,\n                                   const std::set<std::string>& update_to_these_names);\n\n  // Expects that the watch to be removed has already had all of its resource names removed via\n  // updateWatchInterest().\n  void removeWatch(Watch* watch);\n\n  // UntypedConfigUpdateCallbacks.\n  void onConfigUpdate(const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(\n      const Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>& added_resources,\n      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) override;\n\n  WatchMap(const WatchMap&) = delete;\n  WatchMap& operator=(const WatchMap&) = delete;\n\nprivate:\n  void removeDeferredWatches();\n\n  // Given a list of names that are new to an individual watch, returns those names that are in fact\n  // new to the entire subscription.\n  std::set<std::string> findAdditions(const std::vector<std::string>& newly_added_to_watch,\n                                      Watch* watch);\n\n  // Given a list of names that an individual watch no longer cares about, returns those names that\n  // in fact the entire subscription no longer cares about.\n  std::set<std::string> findRemovals(const std::vector<std::string>& newly_removed_from_watch,\n                                     Watch* watch);\n\n  // Returns the union of watch_interest_[resource_name] and wildcard_watches_.\n  absl::flat_hash_set<Watch*> watchesInterestedIn(const std::string& resource_name);\n\n  absl::flat_hash_set<std::unique_ptr<Watch>> watches_;\n\n  // Watches whose interest set is currently empty, which is interpreted as \"everything\".\n  absl::flat_hash_set<Watch*> wildcard_watches_;\n\n  // Watches that have been removed inside the call stack of the WatchMap's onConfigUpdate(). This\n  // can happen when a watch's onConfigUpdate() results in another watch being removed via\n  // removeWatch().\n  std::unique_ptr<absl::flat_hash_set<Watch*>> deferred_removed_during_update_;\n\n  // Maps a resource name to the set of watches interested in that resource. Has two purposes:\n  // 1) Acts as a reference count; no watches care anymore ==> the resource can be removed.\n  // 2) Enables efficient lookup of all interested watches when a resource has been updated.\n  absl::flat_hash_map<std::string, absl::flat_hash_set<Watch*>> watch_interest_;\n\n  const bool use_namespace_matching_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/well_known_names.cc",
    "content": "#include \"common/config/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nTagNameValues::TagNameValues() {\n  // Note: the default regexes are defined below in the order that they will typically be matched\n  // (see the TagExtractor class definition for an explanation of the iterative matching process).\n  // This ordering is roughly from most specific to least specific. Despite the fact that these\n  // regexes are defined with a particular ordering in mind, users can customize the ordering of the\n  // processing of the default tag extraction regexes and include custom tags with regexes via the\n  // bootstrap configuration. Because of this flexibility, these regexes are designed to not\n  // interfere with one another no matter the ordering. They are tested in forward and reverse\n  // ordering to ensure they will be safe in most ordering configurations.\n\n  // To give a more user-friendly explanation of the intended behavior of each regex, each is\n  // preceded by a comment with a simplified notation to explain what the regex is designed to\n  // match:\n  // - The text that the regex is intended to capture will be enclosed in ().\n  // - Other default tags that are expected to exist in the name (and may or may not have been\n  // removed before this regex has been applied) are enclosed in [].\n  // - Stand-ins for a variable segment of the name (including inside capture groups) will be\n  // enclosed in <>.\n  // - Typical * notation will be used to denote an arbitrary set of characters.\n\n  // *_rq(_<response_code>)\n  addRegex(RESPONSE_CODE, \"_rq(_(\\\\d{3}))$\", \"_rq_\");\n\n  // *_rq_(<response_code_class>)xx\n  addRegex(RESPONSE_CODE_CLASS, \"_rq_(\\\\d)xx$\", \"_rq_\");\n\n  // http.[<stat_prefix>.]dynamodb.table.[<table_name>.]capacity.[<operation_name>.](__partition_id=<last_seven_characters_from_partition_id>)\n  addRegex(DYNAMO_PARTITION_ID,\n           \"^http(?=\\\\.).*?\\\\.dynamodb\\\\.table(?=\\\\.).*?\\\\.\"\n           \"capacity(?=\\\\.).*?(\\\\.__partition_id=(\\\\w{7}))$\",\n           \".dynamodb.table.\");\n\n  // http.[<stat_prefix>.]dynamodb.operation.(<operation_name>.)<base_stat> or\n  // http.[<stat_prefix>.]dynamodb.table.[<table_name>.]capacity.(<operation_name>.)[<partition_id>]\n  addRegex(DYNAMO_OPERATION,\n           \"^http(?=\\\\.).*?\\\\.dynamodb.(?:operation|table(?=\"\n           \"\\\\.).*?\\\\.capacity)(\\\\.(.*?))(?:\\\\.|$)\",\n           \".dynamodb.\");\n\n  // mongo.[<stat_prefix>.]collection.[<collection>.]callsite.(<callsite>.)query.<base_stat>\n  addRegex(MONGO_CALLSITE,\n           R\"(^mongo(?=\\.).*?\\.collection(?=\\.).*?\\.callsite\\.((.*?)\\.).*?query.\\w+?$)\",\n           \".collection.\");\n\n  // http.[<stat_prefix>.]dynamodb.table.(<table_name>.) or\n  // http.[<stat_prefix>.]dynamodb.error.(<table_name>.)*\n  addRegex(DYNAMO_TABLE, R\"(^http(?=\\.).*?\\.dynamodb.(?:table|error)\\.((.*?)\\.))\", \".dynamodb.\");\n\n  // mongo.[<stat_prefix>.]collection.(<collection>.)query.<base_stat>\n  addRegex(MONGO_COLLECTION, R\"(^mongo(?=\\.).*?\\.collection\\.((.*?)\\.).*?query.\\w+?$)\",\n           \".collection.\");\n\n  // mongo.[<stat_prefix>.]cmd.(<cmd>.)<base_stat>\n  addRegex(MONGO_CMD, R\"(^mongo(?=\\.).*?\\.cmd\\.((.*?)\\.)\\w+?$)\", \".cmd.\");\n\n  // cluster.[<route_target_cluster>.]grpc.[<grpc_service>.](<grpc_method>.)<base_stat>\n  addRegex(GRPC_BRIDGE_METHOD, R\"(^cluster(?=\\.).*?\\.grpc(?=\\.).*\\.((.*?)\\.)\\w+?$)\", \".grpc.\");\n\n  // http.[<stat_prefix>.]user_agent.(<user_agent>.)<base_stat>\n  addRegex(HTTP_USER_AGENT, R\"(^http(?=\\.).*?\\.user_agent\\.((.*?)\\.)\\w+?$)\", \".user_agent.\");\n\n  // vhost.[<virtual host name>.]vcluster.(<virtual_cluster_name>.)<base_stat>\n  addRegex(VIRTUAL_CLUSTER, R\"(^vhost(?=\\.).*?\\.vcluster\\.((.*?)\\.)\\w+?$)\", \".vcluster.\");\n\n  // http.[<stat_prefix>.]fault.(<downstream_cluster>.)<base_stat>\n  addRegex(FAULT_DOWNSTREAM_CLUSTER, R\"(^http(?=\\.).*?\\.fault\\.((.*?)\\.)\\w+?$)\", \".fault.\");\n\n  // listener.[<address>.]ssl.cipher.(<cipher>)\n  addRegex(SSL_CIPHER, R\"(^listener(?=\\.).*?\\.ssl\\.cipher(\\.(.*?))$)\");\n\n  // cluster.[<cluster_name>.]ssl.ciphers.(<cipher>)\n  addRegex(SSL_CIPHER_SUITE, R\"(^cluster(?=\\.).*?\\.ssl\\.ciphers(\\.(.*?))$)\", \".ssl.ciphers.\");\n\n  // cluster.[<route_target_cluster>.]grpc.(<grpc_service>.)*\n  addRegex(GRPC_BRIDGE_SERVICE, R\"(^cluster(?=\\.).*?\\.grpc\\.((.*?)\\.))\", \".grpc.\");\n\n  // tcp.(<stat_prefix>.)<base_stat>\n  addRegex(TCP_PREFIX, R\"(^tcp\\.((.*?)\\.)\\w+?$)\");\n\n  // udp.(<stat_prefix>.)<base_stat>\n  addRegex(UDP_PREFIX, R\"(^udp\\.((.*?)\\.)\\w+?$)\");\n\n  // auth.clientssl.(<stat_prefix>.)<base_stat>\n  addRegex(CLIENTSSL_PREFIX, R\"(^auth\\.clientssl\\.((.*?)\\.)\\w+?$)\");\n\n  // ratelimit.(<stat_prefix>.)<base_stat>\n  addRegex(RATELIMIT_PREFIX, R\"(^ratelimit\\.((.*?)\\.)\\w+?$)\");\n\n  // cluster.(<cluster_name>.)*\n  addRegex(CLUSTER_NAME, \"^cluster\\\\.((.*?)\\\\.)\");\n\n  // listener.[<address>.]http.(<stat_prefix>.)*\n  addRegex(HTTP_CONN_MANAGER_PREFIX, R\"(^listener(?=\\.).*?\\.http\\.((.*?)\\.))\", \".http.\");\n\n  // http.(<stat_prefix>.)*\n  addRegex(HTTP_CONN_MANAGER_PREFIX, \"^http\\\\.((.*?)\\\\.)\");\n\n  // listener.(<address>.)*\n  addRegex(LISTENER_ADDRESS,\n           R\"(^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.))\");\n\n  // vhost.(<virtual host name>.)*\n  addRegex(VIRTUAL_HOST, \"^vhost\\\\.((.*?)\\\\.)\");\n\n  // mongo.(<stat_prefix>.)*\n  addRegex(MONGO_PREFIX, \"^mongo\\\\.((.*?)\\\\.)\");\n\n  // http.[<stat_prefix>.]rds.(<route_config_name>.)<base_stat>\n  addRegex(RDS_ROUTE_CONFIG, R\"(^http(?=\\.).*?\\.rds\\.((.*?)\\.)\\w+?$)\", \".rds.\");\n\n  // listener_manager.(worker_<id>.)*\n  addRegex(WORKER_ID, R\"(^listener_manager\\.((worker_\\d+)\\.))\", \"listener_manager.worker_\");\n}\n\nvoid TagNameValues::addRegex(const std::string& name, const std::string& regex,\n                             const std::string& substr) {\n  descriptor_vec_.emplace_back(Descriptor(name, regex, substr));\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/config/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Well-known address resolver names.\n */\nclass AddressResolverNameValues {\npublic:\n  // Basic IP resolver\n  const std::string IP = \"envoy.ip\";\n};\n\nusing AddressResolverNames = ConstSingleton<AddressResolverNameValues>;\n\n/**\n * Well-known metadata filter namespaces.\n */\nclass MetadataFilterValues {\npublic:\n  // Filter namespace for built-in load balancer.\n  const std::string ENVOY_LB = \"envoy.lb\";\n  // Filter namespace for built-in transport socket match in cluster.\n  const std::string ENVOY_TRANSPORT_SOCKET_MATCH = \"envoy.transport_socket_match\";\n};\n\nusing MetadataFilters = ConstSingleton<MetadataFilterValues>;\n\n/**\n * Keys for MetadataFilterValues::ENVOY_LB metadata.\n */\nclass MetadataEnvoyLbKeyValues {\npublic:\n  // Key in envoy.lb filter namespace for endpoint canary bool value.\n  const std::string CANARY = \"canary\";\n};\n\nusing MetadataEnvoyLbKeys = ConstSingleton<MetadataEnvoyLbKeyValues>;\n\n/**\n * Well known tags values and a mapping from these names to the regexes they\n * represent. Note: when names are added to the list, they also must be added to\n * the regex map by adding an entry in the getRegexMapping function.\n */\nclass TagNameValues {\npublic:\n  TagNameValues();\n\n  /**\n   * Represents a tag extraction. This structure may be extended to\n   * allow for an faster pattern-matching engine to be used as an\n   * alternative to regexes, on an individual tag basis. Some of the\n   * tags, such as \"_rq_(\\\\d)xx$\", will probably stay as regexes.\n   */\n  struct Descriptor {\n    Descriptor(const std::string& name, const std::string& regex, const std::string& substr = \"\")\n        : name_(name), regex_(regex), substr_(substr) {}\n    const std::string name_;\n    const std::string regex_;\n    const std::string substr_;\n  };\n\n  // Cluster name tag\n  const std::string CLUSTER_NAME = \"envoy.cluster_name\";\n  // Listener port tag\n  const std::string LISTENER_ADDRESS = \"envoy.listener_address\";\n  // Stats prefix for HttpConnectionManager\n  const std::string HTTP_CONN_MANAGER_PREFIX = \"envoy.http_conn_manager_prefix\";\n  // User agent for a connection\n  const std::string HTTP_USER_AGENT = \"envoy.http_user_agent\";\n  // SSL cipher for a connection\n  const std::string SSL_CIPHER = \"envoy.ssl_cipher\";\n  // SSL cipher suite\n  const std::string SSL_CIPHER_SUITE = \"cipher_suite\";\n  // Stats prefix for the Client SSL Auth network filter\n  const std::string CLIENTSSL_PREFIX = \"envoy.clientssl_prefix\";\n  // Stats prefix for the Mongo Proxy network filter\n  const std::string MONGO_PREFIX = \"envoy.mongo_prefix\";\n  // Request command for the Mongo Proxy network filter\n  const std::string MONGO_CMD = \"envoy.mongo_cmd\";\n  // Request collection for the Mongo Proxy network filter\n  const std::string MONGO_COLLECTION = \"envoy.mongo_collection\";\n  // Request callsite for the Mongo Proxy network filter\n  const std::string MONGO_CALLSITE = \"envoy.mongo_callsite\";\n  // Stats prefix for the Ratelimit network filter\n  const std::string RATELIMIT_PREFIX = \"envoy.ratelimit_prefix\";\n  // Stats prefix for the TCP Proxy network filter\n  const std::string TCP_PREFIX = \"envoy.tcp_prefix\";\n  // Stats prefix for the UDP Proxy network filter\n  const std::string UDP_PREFIX = \"envoy.udp_prefix\";\n  // Downstream cluster for the Fault http filter\n  const std::string FAULT_DOWNSTREAM_CLUSTER = \"envoy.fault_downstream_cluster\";\n  // Operation name for the Dynamo http filter\n  const std::string DYNAMO_OPERATION = \"envoy.dynamo_operation\";\n  // Table name for the Dynamo http filter\n  const std::string DYNAMO_TABLE = \"envoy.dynamo_table\";\n  // Partition ID for the Dynamo http filter\n  const std::string DYNAMO_PARTITION_ID = \"envoy.dynamo_partition_id\";\n  // Request service name GRPC Bridge http filter\n  const std::string GRPC_BRIDGE_SERVICE = \"envoy.grpc_bridge_service\";\n  // Request method name for the GRPC Bridge http filter\n  const std::string GRPC_BRIDGE_METHOD = \"envoy.grpc_bridge_method\";\n  // Request virtual host given by the Router http filter\n  const std::string VIRTUAL_HOST = \"envoy.virtual_host\";\n  // Request virtual cluster given by the Router http filter\n  const std::string VIRTUAL_CLUSTER = \"envoy.virtual_cluster\";\n  // Request response code\n  const std::string RESPONSE_CODE = \"envoy.response_code\";\n  // Request response code class\n  const std::string RESPONSE_CODE_CLASS = \"envoy.response_code_class\";\n  // Route config name for RDS updates\n  const std::string RDS_ROUTE_CONFIG = \"envoy.rds_route_config\";\n  // Listener manager worker id\n  const std::string WORKER_ID = \"envoy.worker_id\";\n\n  // Mapping from the names above to their respective regex strings.\n  const std::vector<std::pair<std::string, std::string>> name_regex_pairs_;\n\n  // Returns the list of descriptors.\n  const std::vector<Descriptor>& descriptorVec() const { return descriptor_vec_; }\n\nprivate:\n  void addRegex(const std::string& name, const std::string& regex, const std::string& substr = \"\");\n\n  // Collection of tag descriptors.\n  std::vector<Descriptor> descriptor_vec_;\n};\n\nusing TagNames = ConstSingleton<TagNameValues>;\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/conn_pool/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"conn_pool_base_lib\",\n    srcs = [\"conn_pool_base.cc\"],\n    hdrs = [\"conn_pool_base.h\"],\n    deps = [\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/upstream:upstream_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/conn_pool/conn_pool_base.cc",
    "content": "#include \"common/conn_pool/conn_pool_base.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/stats/timespan_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace ConnectionPool {\n\nConnPoolImplBase::ConnPoolImplBase(\n    Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n    Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options,\n    const Network::TransportSocketOptionsSharedPtr& transport_socket_options)\n    : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options),\n      transport_socket_options_(transport_socket_options) {}\n\nConnPoolImplBase::~ConnPoolImplBase() {\n  ASSERT(ready_clients_.empty());\n  ASSERT(busy_clients_.empty());\n  ASSERT(connecting_clients_.empty());\n}\n\nvoid ConnPoolImplBase::destructAllConnections() {\n  for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) {\n    while (!list->empty()) {\n      list->front()->close();\n    }\n  }\n\n  // Make sure all clients are destroyed before we are destroyed.\n  dispatcher_.clearDeferredDeleteList();\n}\n\nbool ConnPoolImplBase::shouldCreateNewConnection(float global_prefetch_ratio) const {\n  // If the host is not healthy, don't make it do extra work, especially as\n  // upstream selection logic may result in bypassing this upstream entirely.\n  // If an Envoy user wants prefetching for degraded upstreams this could be\n  // added later via extending the prefetch config.\n  if (host_->health() != Upstream::Host::Health::Healthy) {\n    return pending_streams_.size() > connecting_stream_capacity_;\n  }\n\n  // If global prefetching is on, and this connection is within the global\n  // prefetch limit, prefetch.\n  // We may eventually want to track prefetch_attempts to allow more prefetching for\n  // heavily weighted upstreams or sticky picks.\n  if (global_prefetch_ratio > 1.0 &&\n      ((pending_streams_.size() + 1 + num_active_streams_) * global_prefetch_ratio >\n       (connecting_stream_capacity_ + num_active_streams_))) {\n    return true;\n  }\n\n  // The number of streams we want to be provisioned for is the number of\n  // pending and active streams times the prefetch ratio.\n  // The number of streams we are (theoretically) provisioned for is the\n  // connecting stream capacity plus the number of active streams.\n  //\n  // If prefetch ratio is not set, it defaults to 1, and this simplifies to the\n  // legacy value of pending_streams_.size() > connecting_stream_capacity_\n  return (pending_streams_.size() + num_active_streams_) * perUpstreamPrefetchRatio() >\n         (connecting_stream_capacity_ + num_active_streams_);\n}\n\nfloat ConnPoolImplBase::perUpstreamPrefetchRatio() const {\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.allow_prefetch\")) {\n    return host_->cluster().perUpstreamPrefetchRatio();\n  } else {\n    return 1.0;\n  }\n}\n\nvoid ConnPoolImplBase::tryCreateNewConnections() {\n  // Somewhat arbitrarily cap the number of connections prefetched due to new\n  // incoming connections. The prefetch ratio is capped at 3, so in steady\n  // state, no more than 3 connections should be prefetched. If hosts go\n  // unhealthy, and connections are not immediately prefetched, it could be that\n  // many connections are desired when the host becomes healthy again, but\n  // overwhelming it with connections is not desirable.\n  for (int i = 0; i < 3; ++i) {\n    if (!tryCreateNewConnection()) {\n      return;\n    }\n  }\n}\n\nbool ConnPoolImplBase::tryCreateNewConnection(float global_prefetch_ratio) {\n  // There are already enough CONNECTING connections for the number of queued streams.\n  if (!shouldCreateNewConnection(global_prefetch_ratio)) {\n    return false;\n  }\n\n  const bool can_create_connection =\n      host_->cluster().resourceManager(priority_).connections().canCreate();\n  if (!can_create_connection) {\n    host_->cluster().stats().upstream_cx_overflow_.inc();\n  }\n  // If we are at the connection circuit-breaker limit due to other upstreams having\n  // too many open connections, and this upstream has no connections, always create one, to\n  // prevent pending streams being queued to this upstream with no way to be processed.\n  if (can_create_connection ||\n      (ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty())) {\n    ENVOY_LOG(debug, \"creating a new connection\");\n    ActiveClientPtr client = instantiateActiveClient();\n    ASSERT(client->state_ == ActiveClient::State::CONNECTING);\n    ASSERT(std::numeric_limits<uint64_t>::max() - connecting_stream_capacity_ >=\n           client->effectiveConcurrentStreamLimit());\n    ASSERT(client->real_host_description_);\n    connecting_stream_capacity_ += client->effectiveConcurrentStreamLimit();\n    LinkedList::moveIntoList(std::move(client), owningList(client->state_));\n  }\n  return can_create_connection;\n}\n\nvoid ConnPoolImplBase::attachStreamToClient(Envoy::ConnectionPool::ActiveClient& client,\n                                            AttachContext& context) {\n  ASSERT(client.state_ == Envoy::ConnectionPool::ActiveClient::State::READY);\n\n  if (!host_->cluster().resourceManager(priority_).requests().canCreate()) {\n    ENVOY_LOG(debug, \"max streams overflow\");\n    onPoolFailure(client.real_host_description_, absl::string_view(),\n                  ConnectionPool::PoolFailureReason::Overflow, context);\n    host_->cluster().stats().upstream_rq_pending_overflow_.inc();\n  } else {\n    ENVOY_CONN_LOG(debug, \"creating stream\", client);\n\n    client.remaining_streams_--;\n    if (client.remaining_streams_ == 0) {\n      ENVOY_CONN_LOG(debug, \"maximum streams per connection, DRAINING\", client);\n      host_->cluster().stats().upstream_cx_max_requests_.inc();\n      transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING);\n    } else if (client.numActiveStreams() + 1 >= client.concurrent_stream_limit_) {\n      // As soon as the new stream is created, the client will be maxed out.\n      transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY);\n    }\n\n    num_active_streams_++;\n    host_->stats().rq_total_.inc();\n    host_->stats().rq_active_.inc();\n    host_->cluster().stats().upstream_rq_total_.inc();\n    host_->cluster().stats().upstream_rq_active_.inc();\n    host_->cluster().resourceManager(priority_).requests().inc();\n\n    onPoolReady(client, context);\n  }\n}\n\nvoid ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& client,\n                                      bool delay_attaching_stream) {\n  ENVOY_CONN_LOG(debug, \"destroying stream: {} remaining\", client, client.numActiveStreams());\n  ASSERT(num_active_streams_ > 0);\n  num_active_streams_--;\n  host_->stats().rq_active_.dec();\n  host_->cluster().stats().upstream_rq_active_.dec();\n  host_->cluster().resourceManager(priority_).requests().dec();\n  if (client.state_ == ActiveClient::State::DRAINING && client.numActiveStreams() == 0) {\n    // Close out the draining client if we no longer have active streams.\n    client.close();\n  } else if (client.state_ == ActiveClient::State::BUSY) {\n    // A stream was just ended, so we should be below the limit now.\n    ASSERT(client.numActiveStreams() < client.concurrent_stream_limit_);\n\n    transitionActiveClientState(client, ActiveClient::State::READY);\n    if (!delay_attaching_stream) {\n      onUpstreamReady();\n    }\n  }\n}\n\nConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) {\n  if (!ready_clients_.empty()) {\n    ActiveClient& client = *ready_clients_.front();\n    ENVOY_CONN_LOG(debug, \"using existing connection\", client);\n    attachStreamToClient(client, context);\n    // Even if there's a ready client, we may want to prefetch a new connection\n    // to handle the next incoming stream.\n    tryCreateNewConnections();\n    return nullptr;\n  }\n\n  if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) {\n    ConnectionPool::Cancellable* pending = newPendingStream(context);\n    // This must come after newPendingStream() because this function uses the\n    // length of pending_streams_ to determine if a new connection is needed.\n    tryCreateNewConnections();\n\n    return pending;\n  } else {\n    ENVOY_LOG(debug, \"max pending streams overflow\");\n    onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow,\n                  context);\n    host_->cluster().stats().upstream_rq_pending_overflow_.inc();\n    return nullptr;\n  }\n}\n\nbool ConnPoolImplBase::maybePrefetch(float global_prefetch_ratio) {\n  return tryCreateNewConnection(global_prefetch_ratio);\n}\n\nvoid ConnPoolImplBase::onUpstreamReady() {\n  while (!pending_streams_.empty() && !ready_clients_.empty()) {\n    ActiveClientPtr& client = ready_clients_.front();\n    ENVOY_CONN_LOG(debug, \"attaching to next stream\", *client);\n    // Pending streams are pushed onto the front, so pull from the back.\n    attachStreamToClient(*client, pending_streams_.back()->context());\n    pending_streams_.pop_back();\n  }\n}\n\nstd::list<ActiveClientPtr>& ConnPoolImplBase::owningList(ActiveClient::State state) {\n  switch (state) {\n  case ActiveClient::State::CONNECTING:\n    return connecting_clients_;\n  case ActiveClient::State::READY:\n    return ready_clients_;\n  case ActiveClient::State::BUSY:\n    return busy_clients_;\n  case ActiveClient::State::DRAINING:\n    return busy_clients_;\n  case ActiveClient::State::CLOSED:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid ConnPoolImplBase::transitionActiveClientState(ActiveClient& client,\n                                                   ActiveClient::State new_state) {\n  auto& old_list = owningList(client.state_);\n  auto& new_list = owningList(new_state);\n  client.state_ = new_state;\n\n  // old_list and new_list can be equal when transitioning from BUSY to DRAINING.\n  //\n  // The documentation for list.splice() (which is what moveBetweenLists() calls) is\n  // unclear whether it is allowed for src and dst to be the same, so check here\n  // since it is a no-op anyways.\n  if (&old_list != &new_list) {\n    client.moveBetweenLists(old_list, new_list);\n  }\n}\n\nvoid ConnPoolImplBase::addDrainedCallbackImpl(Instance::DrainedCb cb) {\n  drained_callbacks_.push_back(cb);\n  checkForDrained();\n}\n\nvoid ConnPoolImplBase::closeIdleConnections() {\n  // Create a separate list of elements to close to avoid mutate-while-iterating problems.\n  std::list<ActiveClient*> to_close;\n\n  for (auto& client : ready_clients_) {\n    if (client->numActiveStreams() == 0) {\n      to_close.push_back(client.get());\n    }\n  }\n\n  if (pending_streams_.empty()) {\n    for (auto& client : connecting_clients_) {\n      to_close.push_back(client.get());\n    }\n  }\n\n  for (auto& entry : to_close) {\n    entry->close();\n  }\n}\n\nvoid ConnPoolImplBase::drainConnectionsImpl() {\n  closeIdleConnections();\n\n  // closeIdleConnections() closes all connections in ready_clients_ with no active streams,\n  // so all remaining entries in ready_clients_ are serving streams. Move them and all entries\n  // in busy_clients_ to draining.\n  while (!ready_clients_.empty()) {\n    transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING);\n  }\n\n  // Changing busy_clients_ to DRAINING does not move them between lists,\n  // so use a for-loop since the list is not mutated.\n  ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_);\n  for (auto& busy_client : busy_clients_) {\n    transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING);\n  }\n}\n\nvoid ConnPoolImplBase::checkForDrained() {\n  if (drained_callbacks_.empty()) {\n    return;\n  }\n\n  closeIdleConnections();\n\n  if (pending_streams_.empty() && ready_clients_.empty() && busy_clients_.empty() &&\n      connecting_clients_.empty()) {\n    ENVOY_LOG(debug, \"invoking drained callbacks\");\n    for (const Instance::DrainedCb& cb : drained_callbacks_) {\n      cb();\n    }\n  }\n}\n\nvoid ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view failure_reason,\n                                         Network::ConnectionEvent event) {\n  if (client.state_ == ActiveClient::State::CONNECTING) {\n    ASSERT(connecting_stream_capacity_ >= client.effectiveConcurrentStreamLimit());\n    connecting_stream_capacity_ -= client.effectiveConcurrentStreamLimit();\n  }\n\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    // The client died.\n    ENVOY_CONN_LOG(debug, \"client disconnected, failure reason: {}\", client, failure_reason);\n\n    Envoy::Upstream::reportUpstreamCxDestroy(host_, event);\n    const bool incomplete_stream = client.closingWithIncompleteStream();\n    if (incomplete_stream) {\n      Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event);\n    }\n\n    if (client.state_ == ActiveClient::State::CONNECTING) {\n      host_->cluster().stats().upstream_cx_connect_fail_.inc();\n      host_->stats().cx_connect_fail_.inc();\n\n      ConnectionPool::PoolFailureReason reason;\n      if (client.timed_out_) {\n        reason = ConnectionPool::PoolFailureReason::Timeout;\n      } else if (event == Network::ConnectionEvent::RemoteClose) {\n        reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure;\n      } else {\n        reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure;\n      }\n\n      // Raw connect failures should never happen under normal circumstances. If we have an upstream\n      // that is behaving badly, streams can get stuck here in the pending state. If we see a\n      // connect failure, we purge all pending streams so that calling code can determine what to\n      // do with the stream.\n      // NOTE: We move the existing pending streams to a temporary list. This is done so that\n      //       if retry logic submits a new stream to the pool, we don't fail it inline.\n      purgePendingStreams(client.real_host_description_, failure_reason, reason);\n      // See if we should prefetch another connection based on active connections.\n      tryCreateNewConnections();\n    }\n\n    // We need to release our resourceManager() resources before checking below for\n    // whether we can create a new connection. Normally this would happen when\n    // client's destructor runs, but this object needs to be deferredDelete'd(), so\n    // this forces part of its cleanup to happen now.\n    client.releaseResources();\n\n    dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_)));\n    if (incomplete_stream) {\n      checkForDrained();\n    }\n\n    client.state_ = ActiveClient::State::CLOSED;\n\n    // If we have pending streams and we just lost a connection we should make a new one.\n    if (!pending_streams_.empty()) {\n      tryCreateNewConnections();\n    }\n  } else if (event == Network::ConnectionEvent::Connected) {\n    client.conn_connect_ms_->complete();\n    client.conn_connect_ms_.reset();\n\n    ASSERT(client.state_ == ActiveClient::State::CONNECTING);\n    transitionActiveClientState(client, ActiveClient::State::READY);\n\n    onUpstreamReady();\n    checkForDrained();\n  }\n\n  if (client.connect_timer_) {\n    client.connect_timer_->disableTimer();\n    client.connect_timer_.reset();\n  }\n}\n\nPendingStream::PendingStream(ConnPoolImplBase& parent) : parent_(parent) {\n  parent_.host()->cluster().stats().upstream_rq_pending_total_.inc();\n  parent_.host()->cluster().stats().upstream_rq_pending_active_.inc();\n  parent_.host()->cluster().resourceManager(parent_.priority()).pendingRequests().inc();\n}\n\nPendingStream::~PendingStream() {\n  parent_.host()->cluster().stats().upstream_rq_pending_active_.dec();\n  parent_.host()->cluster().resourceManager(parent_.priority()).pendingRequests().dec();\n}\n\nvoid PendingStream::cancel(Envoy::ConnectionPool::CancelPolicy policy) {\n  parent_.onPendingStreamCancel(*this, policy);\n}\n\nvoid ConnPoolImplBase::purgePendingStreams(\n    const Upstream::HostDescriptionConstSharedPtr& host_description,\n    absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason) {\n  // NOTE: We move the existing pending streams to a temporary list. This is done so that\n  //       if retry logic submits a new stream to the pool, we don't fail it inline.\n  pending_streams_to_purge_ = std::move(pending_streams_);\n  while (!pending_streams_to_purge_.empty()) {\n    PendingStreamPtr stream =\n        pending_streams_to_purge_.front()->removeFromList(pending_streams_to_purge_);\n    host_->cluster().stats().upstream_rq_pending_failure_eject_.inc();\n    onPoolFailure(host_description, failure_reason, reason, stream->context());\n  }\n}\n\nbool ConnPoolImplBase::connectingConnectionIsExcess() const {\n  ASSERT(connecting_stream_capacity_ >=\n         connecting_clients_.front()->effectiveConcurrentStreamLimit());\n  // If perUpstreamPrefetchRatio is one, this simplifies to checking if there would still be\n  // sufficient connecting stream capacity to serve all pending streams if the most recent client\n  // were removed from the picture.\n  //\n  // If prefetch ratio is set, it also factors in the anticipated load based on both queued streams\n  // and active streams, and makes sure the connecting capacity would still be sufficient to serve\n  // that even with the most recent client removed.\n  return (pending_streams_.size() + num_active_streams_) * perUpstreamPrefetchRatio() <=\n         (connecting_stream_capacity_ -\n          connecting_clients_.front()->effectiveConcurrentStreamLimit() + num_active_streams_);\n}\n\nvoid ConnPoolImplBase::onPendingStreamCancel(PendingStream& stream,\n                                             Envoy::ConnectionPool::CancelPolicy policy) {\n  ENVOY_LOG(debug, \"cancelling pending stream\");\n  if (!pending_streams_to_purge_.empty()) {\n    // If pending_streams_to_purge_ is not empty, it means that we are called from\n    // with-in a onPoolFailure callback invoked in purgePendingStreams (i.e. purgePendingStreams\n    // is down in the call stack). Remove this stream from the list as it is cancelled,\n    // and there is no need to call its onPoolFailure callback.\n    stream.removeFromList(pending_streams_to_purge_);\n  } else {\n    stream.removeFromList(pending_streams_);\n  }\n  if (policy == Envoy::ConnectionPool::CancelPolicy::CloseExcess && !connecting_clients_.empty() &&\n      connectingConnectionIsExcess()) {\n    auto& client = *connecting_clients_.front();\n    transitionActiveClientState(client, ActiveClient::State::DRAINING);\n    client.close();\n  }\n\n  host_->cluster().stats().upstream_rq_cancelled_.inc();\n  checkForDrained();\n}\n\nnamespace {\n// Translate zero to UINT64_MAX so that the zero/unlimited case doesn't\n// have to be handled specially.\nuint64_t translateZeroToUnlimited(uint64_t limit) {\n  return (limit != 0) ? limit : std::numeric_limits<uint64_t>::max();\n}\n} // namespace\n\nActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_stream_limit,\n                           uint64_t concurrent_stream_limit)\n    : parent_(parent), remaining_streams_(translateZeroToUnlimited(lifetime_stream_limit)),\n      concurrent_stream_limit_(translateZeroToUnlimited(concurrent_stream_limit)),\n      connect_timer_(parent_.dispatcher().createTimer([this]() -> void { onConnectTimeout(); })) {\n  conn_connect_ms_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      parent_.host()->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher().timeSource());\n  conn_length_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      parent_.host()->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher().timeSource());\n  connect_timer_->enableTimer(parent_.host()->cluster().connectTimeout());\n  parent_.host()->stats().cx_total_.inc();\n  parent_.host()->stats().cx_active_.inc();\n  parent_.host()->cluster().stats().upstream_cx_total_.inc();\n  parent_.host()->cluster().stats().upstream_cx_active_.inc();\n  parent_.host()->cluster().resourceManager(parent_.priority()).connections().inc();\n}\n\nActiveClient::~ActiveClient() { releaseResources(); }\n\nvoid ActiveClient::onEvent(Network::ConnectionEvent event) {\n  parent_.onConnectionEvent(*this, \"\", event);\n}\n\nvoid ActiveClient::releaseResources() {\n  if (!resources_released_) {\n    resources_released_ = true;\n\n    conn_length_->complete();\n\n    parent_.host()->cluster().stats().upstream_cx_active_.dec();\n    parent_.host()->stats().cx_active_.dec();\n    parent_.host()->cluster().resourceManager(parent_.priority()).connections().dec();\n  }\n}\n\nvoid ActiveClient::onConnectTimeout() {\n  ENVOY_CONN_LOG(debug, \"connect timeout\", *this);\n  parent_.host()->cluster().stats().upstream_cx_connect_timeout_.inc();\n  timed_out_ = true;\n  close();\n}\n\n} // namespace ConnectionPool\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/conn_pool/conn_pool_base.h",
    "content": "#pragma once\n\n#include \"envoy/common/conn_pool.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/common/linked_object.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace ConnectionPool {\n\nclass ConnPoolImplBase;\n\n// A placeholder struct for whatever data a given connection pool needs to\n// successfully attach and upstream connection to a downstream connection.\nstruct AttachContext {\n  // Add a virtual destructor to allow for the dynamic_cast ASSERT in typedContext.\n  virtual ~AttachContext() = default;\n};\n\n// ActiveClient provides a base class for connection pool clients that handles connection timings\n// as well as managing the connection timeout.\nclass ActiveClient : public LinkedObject<ActiveClient>,\n                     public Network::ConnectionCallbacks,\n                     public Event::DeferredDeletable,\n                     protected Logger::Loggable<Logger::Id::pool> {\npublic:\n  ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_stream_limit,\n               uint64_t concurrent_stream_limit);\n  ~ActiveClient() override;\n\n  void releaseResources();\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  // Called if the connection does not complete within the cluster's connectTimeout()\n  void onConnectTimeout();\n\n  // Returns the concurrent stream limit, accounting for if the total stream limit\n  // is less than the concurrent stream limit.\n  uint64_t effectiveConcurrentStreamLimit() const {\n    return std::min(remaining_streams_, concurrent_stream_limit_);\n  }\n\n  // Closes the underlying connection.\n  virtual void close() PURE;\n  // Returns the ID of the underlying connection.\n  virtual uint64_t id() const PURE;\n  // Returns true if this closed with an incomplete stream, for stats tracking/ purposes.\n  virtual bool closingWithIncompleteStream() const PURE;\n  // Returns the number of active streams on this connection.\n  virtual size_t numActiveStreams() const PURE;\n\n  enum class State {\n    CONNECTING, // Connection is not yet established.\n    READY,      // Additional streams may be immediately dispatched to this connection.\n    BUSY,       // Connection is at its concurrent stream limit.\n    DRAINING,   // No more streams can be dispatched to this connection, and it will be closed\n    // when all streams complete.\n    CLOSED // Connection is closed and object is queued for destruction.\n  };\n\n  ConnPoolImplBase& parent_;\n  uint64_t remaining_streams_;\n  const uint64_t concurrent_stream_limit_;\n  State state_{State::CONNECTING};\n  Upstream::HostDescriptionConstSharedPtr real_host_description_;\n  Stats::TimespanPtr conn_connect_ms_;\n  Stats::TimespanPtr conn_length_;\n  Event::TimerPtr connect_timer_;\n  bool resources_released_{false};\n  bool timed_out_{false};\n};\n\n// PendingStream is the base class tracking streams for which a connection has been created but not\n// yet established.\nclass PendingStream : public LinkedObject<PendingStream>, public ConnectionPool::Cancellable {\npublic:\n  PendingStream(ConnPoolImplBase& parent);\n  ~PendingStream() override;\n\n  // ConnectionPool::Cancellable\n  void cancel(Envoy::ConnectionPool::CancelPolicy policy) override;\n\n  // The context here returns a pointer to whatever context is provided with newStream(),\n  // which will be passed back to the parent in onPoolReady or onPoolFailure.\n  virtual AttachContext& context() PURE;\n\n  ConnPoolImplBase& parent_;\n};\n\nusing PendingStreamPtr = std::unique_ptr<PendingStream>;\n\nusing ActiveClientPtr = std::unique_ptr<ActiveClient>;\n\n// Base class that handles stream queueing logic shared between connection pool implementations.\nclass ConnPoolImplBase : protected Logger::Loggable<Logger::Id::pool> {\npublic:\n  ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                   Event::Dispatcher& dispatcher,\n                   const Network::ConnectionSocket::OptionsSharedPtr& options,\n                   const Network::TransportSocketOptionsSharedPtr& transport_socket_options);\n  virtual ~ConnPoolImplBase();\n\n  // A helper function to get the specific context type from the base class context.\n  template <class T> T& typedContext(AttachContext& context) {\n    ASSERT(dynamic_cast<T*>(&context) != nullptr);\n    return *static_cast<T*>(&context);\n  }\n\n  void addDrainedCallbackImpl(Instance::DrainedCb cb);\n  void drainConnectionsImpl();\n\n  // Closes and destroys all connections. This must be called in the destructor of\n  // derived classes because the derived ActiveClient will downcast parent_ to a more\n  // specific type of ConnPoolImplBase, but if the more specific part is already destructed\n  // (due to bottom-up destructor ordering in c++) that access will be invalid.\n  void destructAllConnections();\n\n  // Returns a new instance of ActiveClient.\n  virtual ActiveClientPtr instantiateActiveClient() PURE;\n\n  // Gets a pointer to the list that currently owns this client.\n  std::list<ActiveClientPtr>& owningList(ActiveClient::State state);\n\n  // Removes the PendingStream from the list of streams. Called when the PendingStream is\n  // cancelled, e.g. when the stream is reset before a connection has been established.\n  void onPendingStreamCancel(PendingStream& stream, Envoy::ConnectionPool::CancelPolicy policy);\n\n  // Fails all pending streams, calling onPoolFailure on the associated callbacks.\n  void purgePendingStreams(const Upstream::HostDescriptionConstSharedPtr& host_description,\n                           absl::string_view failure_reason,\n                           ConnectionPool::PoolFailureReason pool_failure_reason);\n\n  // Closes any idle connections.\n  void closeIdleConnections();\n\n  // Changes the state_ of an ActiveClient and moves to the appropriate list.\n  void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state);\n\n  void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason,\n                         Network::ConnectionEvent event);\n  void checkForDrained();\n  void onUpstreamReady();\n  ConnectionPool::Cancellable* newStream(AttachContext& context);\n  // Called if this pool is likely to be picked soon, to determine if it's worth\n  // prefetching a connection.\n  bool maybePrefetch(float global_prefetch_ratio);\n\n  virtual ConnectionPool::Cancellable* newPendingStream(AttachContext& context) PURE;\n\n  void attachStreamToClient(Envoy::ConnectionPool::ActiveClient& client, AttachContext& context);\n\n  virtual void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description,\n                             absl::string_view failure_reason,\n                             ConnectionPool::PoolFailureReason pool_failure_reason,\n                             AttachContext& context) PURE;\n  virtual void onPoolReady(ActiveClient& client, AttachContext& context) PURE;\n  // Called by derived classes any time a stream is completed or destroyed for any reason.\n  void onStreamClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_stream);\n\n  const Upstream::HostConstSharedPtr& host() const { return host_; }\n  Event::Dispatcher& dispatcher() { return dispatcher_; }\n  Upstream::ResourcePriority priority() const { return priority_; }\n  const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() { return socket_options_; }\n  const Network::TransportSocketOptionsSharedPtr& transportSocketOptions() {\n    return transport_socket_options_;\n  }\n\nprotected:\n  // Creates up to 3 connections, based on the prefetch ratio.\n  void tryCreateNewConnections();\n\n  // Creates a new connection if there is sufficient demand, it is allowed by resourceManager, or\n  // to avoid starving this pool.\n  // Demand is determined either by perUpstreamPrefetchRatio() or global_prefetch_ratio\n  // if this is called by maybePrefetch()\n  bool tryCreateNewConnection(float global_prefetch_ratio = 0);\n\n  // A helper function which determines if a canceled pending connection should\n  // be closed as excess or not.\n  bool connectingConnectionIsExcess() const;\n\n  // A helper function which determines if a new incoming stream should trigger\n  // connection prefetch.\n  bool shouldCreateNewConnection(float global_prefetch_ratio) const;\n\n  float perUpstreamPrefetchRatio() const;\n\n  const Upstream::HostConstSharedPtr host_;\n  const Upstream::ResourcePriority priority_;\n\n  Event::Dispatcher& dispatcher_;\n  const Network::ConnectionSocket::OptionsSharedPtr socket_options_;\n  const Network::TransportSocketOptionsSharedPtr transport_socket_options_;\n\n  std::list<Instance::DrainedCb> drained_callbacks_;\n  std::list<PendingStreamPtr> pending_streams_;\n\n  // When calling purgePendingStreams, this list will be used to hold the streams we are about\n  // to purge. We need this if one cancelled streams cancels a different pending stream\n  std::list<PendingStreamPtr> pending_streams_to_purge_;\n\n  // Clients that are ready to handle additional streams.\n  // All entries are in state READY.\n  std::list<ActiveClientPtr> ready_clients_;\n\n  // Clients that are not ready to handle additional streams due to being BUSY or DRAINING.\n  std::list<ActiveClientPtr> busy_clients_;\n\n  // Clients that are not ready to handle additional streams because they are CONNECTING.\n  std::list<ActiveClientPtr> connecting_clients_;\n\n  // The number of streams currently attached to clients.\n  uint64_t num_active_streams_{0};\n\n  // The number of streams that can be immediately dispatched\n  // if all CONNECTING connections become connected.\n  uint64_t connecting_stream_capacity_{0};\n};\n\n} // namespace ConnectionPool\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/crypto/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    hdrs = [\n        \"utility.h\",\n    ],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/common/crypto:crypto_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/singleton:threadsafe_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/common/crypto/utility.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/crypto/crypto.h\"\n\n#include \"common/singleton/threadsafe_singleton.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\n\nstruct VerificationOutput {\n  /**\n   * Verification result. If result_ is true, error_message_ is empty.\n   */\n  bool result_;\n\n  /**\n   * Error message when verification failed.\n   * TODO(crazyxy): switch to absl::StatusOr when available\n   */\n  std::string error_message_;\n};\n\nclass Utility {\npublic:\n  virtual ~Utility() = default;\n\n  /**\n   * Computes the SHA-256 digest of a buffer.\n   * @param buffer the buffer.\n   * @return a vector of bytes for the computed digest.\n   */\n  virtual std::vector<uint8_t> getSha256Digest(const Buffer::Instance& buffer) PURE;\n\n  /**\n   * Computes the SHA-256 HMAC for a given key and message.\n   * @param key the HMAC function key.\n   * @param message message data for the HMAC function.\n   * @return a vector of bytes for the computed HMAC.\n   */\n  virtual std::vector<uint8_t> getSha256Hmac(const std::vector<uint8_t>& key,\n                                             absl::string_view message) PURE;\n\n  /**\n   * Verify cryptographic signatures.\n   * @param hash hash function(including SHA1, SHA224, SHA256, SHA384, SHA512)\n   * @param key pointer to EVP_PKEY public key\n   * @param signature signature\n   * @param text clear text\n   * @return If the result_ is true, the error_message_ is empty; otherwise,\n   * the error_message_ stores the error message\n   */\n  virtual const VerificationOutput verifySignature(absl::string_view hash, CryptoObject& key,\n                                                   const std::vector<uint8_t>& signature,\n                                                   const std::vector<uint8_t>& text) PURE;\n\n  /**\n   * Import public key.\n   * @param key key string\n   * @return pointer to EVP_PKEY public key\n   */\n  virtual CryptoObjectPtr importPublicKey(const std::vector<uint8_t>& key) PURE;\n};\n\nusing UtilitySingleton = InjectableSingleton<Utility>;\nusing ScopedUtilitySingleton = ScopedInjectableLoader<Utility>;\n\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"dispatcher_lib\",\n    srcs = [\n        \"dispatcher_impl.cc\",\n        \"file_event_impl.cc\",\n        \"signal_impl.cc\",\n    ],\n    hdrs = [\n        \"signal_impl.h\",\n    ],\n    deps = [\n        \":dispatcher_includes\",\n        \":libevent_scheduler_lib\",\n        \":real_time_system_lib\",\n        \"//include/envoy/common:scope_tracker_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:signal_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/filesystem:watcher_lib\",\n        \"//source/common/network:dns_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n    ] + select({\n        \"//bazel:apple\": [\"//source/common/network:apple_dns_lib\"],\n        \"//conditions:default\": [],\n    }),\n)\n\nenvoy_cc_library(\n    name = \"event_impl_base_lib\",\n    srcs = [\"event_impl_base.cc\"],\n    hdrs = [\"event_impl_base.h\"],\n    external_deps = [\n        \"event\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"real_time_system_lib\",\n    srcs = [\"real_time_system.cc\"],\n    hdrs = [\"real_time_system.h\"],\n    deps = [\n        \":event_impl_base_lib\",\n        \":timer_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/event:dispatcher_includes\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dispatcher_includes\",\n    hdrs = [\n        \"dispatcher_impl.h\",\n        \"event_impl_base.h\",\n        \"file_event_impl.h\",\n        \"schedulable_cb_impl.h\",\n    ],\n    deps = [\n        \":libevent_lib\",\n        \":libevent_scheduler_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/network:connection_handler_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/signal:fatal_error_handler_lib\",\n    ] + select({\n        \"//bazel:disable_signal_trace\": [],\n        \"//conditions:default\": [\n            \"//source/common/signal:sigaction_lib\",\n        ],\n    }),\n)\n\nenvoy_cc_library(\n    name = \"libevent_lib\",\n    srcs = [\"libevent.cc\"],\n    hdrs = [\"libevent.h\"],\n    external_deps = [\n        \"event\",\n    ],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"libevent_scheduler_lib\",\n    srcs = [\"libevent_scheduler.cc\"],\n    hdrs = [\"libevent_scheduler.h\"],\n    external_deps = [\"event\"],\n    deps = [\n        \":libevent_lib\",\n        \":schedulable_cb_lib\",\n        \":timer_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"schedulable_cb_lib\",\n    srcs = [\"schedulable_cb_impl.cc\"],\n    hdrs = [\"schedulable_cb_impl.h\"],\n    external_deps = [\"event\"],\n    deps = [\n        \":event_impl_base_lib\",\n        \":libevent_lib\",\n        \"//include/envoy/event:schedulable_cb_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"timer_lib\",\n    srcs = [\"timer_impl.cc\"],\n    hdrs = [\"timer_impl.h\"],\n    external_deps = [\"event\"],\n    deps = [\n        \":event_impl_base_lib\",\n        \":libevent_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//source/common/common:scope_tracker\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"deferred_task\",\n    hdrs = [\"deferred_task.h\"],\n    deps = [\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"scaled_range_timer_manager\",\n    srcs = [\"scaled_range_timer_manager.cc\"],\n    hdrs = [\"scaled_range_timer_manager.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:range_timer_interface\",\n        \"//source/common/common:scope_tracker\",\n    ],\n)\n"
  },
  {
    "path": "source/common/event/deferred_task.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/event/dispatcher.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * A util to schedule a task to run in a future event loop cycle. One of the use cases is to run the\n * task after the previously DeferredDeletable objects are destroyed.\n */\nclass DeferredTaskUtil {\nprivate:\n  class DeferredTask : public DeferredDeletable {\n  public:\n    DeferredTask(std::function<void()>&& task) : task_(std::move(task)) {}\n    ~DeferredTask() override { task_(); }\n\n  private:\n    std::function<void()> task_;\n  };\n\npublic:\n  /**\n   * Submits an item for run deferred delete.\n   */\n  static void deferredRun(Dispatcher& dispatcher, std::function<void()>&& func) {\n    dispatcher.deferredDelete(std::make_unique<DeferredTask>(std::move(func)));\n  }\n};\n\n} // namespace Event\n} // namespace Envoy"
  },
  {
    "path": "source/common/event/dispatcher_impl.cc",
    "content": "#include \"common/event/dispatcher_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <string>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/listener.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n#include \"common/event/file_event_impl.h\"\n#include \"common/event/libevent_scheduler.h\"\n#include \"common/event/signal_impl.h\"\n#include \"common/event/timer_impl.h\"\n#include \"common/filesystem/watcher_impl.h\"\n#include \"common/network/connection_impl.h\"\n#include \"common/network/dns_impl.h\"\n#include \"common/network/tcp_listener_impl.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"event2/event.h\"\n\n#ifdef ENVOY_HANDLE_SIGNALS\n#include \"common/signal/signal_action.h\"\n#endif\n\n#ifdef __APPLE__\n#include \"common/network/apple_dns_impl.h\"\n#endif\n\nnamespace Envoy {\nnamespace Event {\n\nDispatcherImpl::DispatcherImpl(const std::string& name, Api::Api& api,\n                               Event::TimeSystem& time_system)\n    : DispatcherImpl(name, std::make_unique<Buffer::WatermarkBufferFactory>(), api, time_system) {}\n\nDispatcherImpl::DispatcherImpl(const std::string& name, Buffer::WatermarkFactoryPtr&& factory,\n                               Api::Api& api, Event::TimeSystem& time_system)\n    : name_(name), api_(api), buffer_factory_(std::move(factory)),\n      scheduler_(time_system.createScheduler(base_scheduler_, base_scheduler_)),\n      deferred_delete_cb_(base_scheduler_.createSchedulableCallback(\n          [this]() -> void { clearDeferredDeleteList(); })),\n      post_cb_(base_scheduler_.createSchedulableCallback([this]() -> void { runPostCallbacks(); })),\n      current_to_delete_(&to_delete_1_) {\n  ASSERT(!name_.empty());\n  FatalErrorHandler::registerFatalErrorHandler(*this);\n  updateApproximateMonotonicTimeInternal();\n  base_scheduler_.registerOnPrepareCallback(\n      std::bind(&DispatcherImpl::updateApproximateMonotonicTime, this));\n}\n\nDispatcherImpl::~DispatcherImpl() { FatalErrorHandler::removeFatalErrorHandler(*this); }\n\nvoid DispatcherImpl::initializeStats(Stats::Scope& scope,\n                                     const absl::optional<std::string>& prefix) {\n  const std::string effective_prefix = prefix.has_value() ? *prefix : absl::StrCat(name_, \".\");\n  // This needs to be run in the dispatcher's thread, so that we have a thread id to log.\n  post([this, &scope, effective_prefix] {\n    stats_prefix_ = effective_prefix + \"dispatcher\";\n    stats_ = std::make_unique<DispatcherStats>(\n        DispatcherStats{ALL_DISPATCHER_STATS(POOL_HISTOGRAM_PREFIX(scope, stats_prefix_ + \".\"))});\n    base_scheduler_.initializeStats(stats_.get());\n    ENVOY_LOG(debug, \"running {} on thread {}\", stats_prefix_, run_tid_.debugString());\n  });\n}\n\nvoid DispatcherImpl::clearDeferredDeleteList() {\n  ASSERT(isThreadSafe());\n  std::vector<DeferredDeletablePtr>* to_delete = current_to_delete_;\n\n  size_t num_to_delete = to_delete->size();\n  if (deferred_deleting_ || !num_to_delete) {\n    return;\n  }\n\n  ENVOY_LOG(trace, \"clearing deferred deletion list (size={})\", num_to_delete);\n\n  // Swap the current deletion vector so that if we do deferred delete while we are deleting, we\n  // use the other vector. We will get another callback to delete that vector.\n  if (current_to_delete_ == &to_delete_1_) {\n    current_to_delete_ = &to_delete_2_;\n  } else {\n    current_to_delete_ = &to_delete_1_;\n  }\n\n  deferred_deleting_ = true;\n\n  // Calling clear() on the vector does not specify which order destructors run in. We want to\n  // destroy in FIFO order so just do it manually. This required 2 passes over the vector which is\n  // not optimal but can be cleaned up later if needed.\n  for (size_t i = 0; i < num_to_delete; i++) {\n    (*to_delete)[i].reset();\n  }\n\n  to_delete->clear();\n  deferred_deleting_ = false;\n}\n\nNetwork::ConnectionPtr\nDispatcherImpl::createServerConnection(Network::ConnectionSocketPtr&& socket,\n                                       Network::TransportSocketPtr&& transport_socket,\n                                       StreamInfo::StreamInfo& stream_info) {\n  ASSERT(isThreadSafe());\n  return std::make_unique<Network::ConnectionImpl>(*this, std::move(socket),\n                                                   std::move(transport_socket), stream_info, true);\n}\n\nNetwork::ClientConnectionPtr\nDispatcherImpl::createClientConnection(Network::Address::InstanceConstSharedPtr address,\n                                       Network::Address::InstanceConstSharedPtr source_address,\n                                       Network::TransportSocketPtr&& transport_socket,\n                                       const Network::ConnectionSocket::OptionsSharedPtr& options) {\n  ASSERT(isThreadSafe());\n  return std::make_unique<Network::ClientConnectionImpl>(*this, address, source_address,\n                                                         std::move(transport_socket), options);\n}\n\nNetwork::DnsResolverSharedPtr DispatcherImpl::createDnsResolver(\n    const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n    const bool use_tcp_for_dns_lookups) {\n  ASSERT(isThreadSafe());\n#ifdef __APPLE__\n  static bool use_apple_api_for_dns_lookups =\n      Runtime::runtimeFeatureEnabled(\"envoy.restart_features.use_apple_api_for_dns_lookups\");\n  if (use_apple_api_for_dns_lookups) {\n    RELEASE_ASSERT(\n        resolvers.empty(),\n        \"defining custom resolvers is not possible when using Apple APIs for DNS resolution. \"\n        \"Apple's API only allows overriding DNS resolvers via system settings. Delete resolvers \"\n        \"config or disable the envoy.restart_features.use_apple_api_for_dns_lookups runtime \"\n        \"feature.\");\n    RELEASE_ASSERT(!use_tcp_for_dns_lookups,\n                   \"using TCP for DNS lookups is not possible when using Apple APIs for DNS \"\n                   \"resolution. Apple' API only uses UDP for DNS resolution. Use UDP or disable \"\n                   \"the envoy.restart_features.use_apple_api_for_dns_lookups runtime feature.\");\n    return Network::DnsResolverSharedPtr{new Network::AppleDnsResolverImpl(*this)};\n  }\n#endif\n  return Network::DnsResolverSharedPtr{\n      new Network::DnsResolverImpl(*this, resolvers, use_tcp_for_dns_lookups)};\n}\n\nFileEventPtr DispatcherImpl::createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger,\n                                             uint32_t events) {\n  ASSERT(isThreadSafe());\n  return FileEventPtr{new FileEventImpl(*this, fd, cb, trigger, events)};\n}\n\nFilesystem::WatcherPtr DispatcherImpl::createFilesystemWatcher() {\n  ASSERT(isThreadSafe());\n  return Filesystem::WatcherPtr{new Filesystem::WatcherImpl(*this, api_)};\n}\n\nNetwork::ListenerPtr DispatcherImpl::createListener(Network::SocketSharedPtr&& socket,\n                                                    Network::TcpListenerCallbacks& cb,\n                                                    bool bind_to_port, uint32_t backlog_size) {\n  ASSERT(isThreadSafe());\n  return std::make_unique<Network::TcpListenerImpl>(*this, std::move(socket), cb, bind_to_port,\n                                                    backlog_size);\n}\n\nNetwork::UdpListenerPtr DispatcherImpl::createUdpListener(Network::SocketSharedPtr socket,\n                                                          Network::UdpListenerCallbacks& cb) {\n  ASSERT(isThreadSafe());\n  return std::make_unique<Network::UdpListenerImpl>(*this, std::move(socket), cb, timeSource());\n}\n\nTimerPtr DispatcherImpl::createTimer(TimerCb cb) {\n  ASSERT(isThreadSafe());\n  return createTimerInternal(cb);\n}\n\nEvent::SchedulableCallbackPtr DispatcherImpl::createSchedulableCallback(std::function<void()> cb) {\n  ASSERT(isThreadSafe());\n  return base_scheduler_.createSchedulableCallback(cb);\n}\n\nTimerPtr DispatcherImpl::createTimerInternal(TimerCb cb) {\n  return scheduler_->createTimer(cb, *this);\n}\n\nvoid DispatcherImpl::deferredDelete(DeferredDeletablePtr&& to_delete) {\n  ASSERT(isThreadSafe());\n  current_to_delete_->emplace_back(std::move(to_delete));\n  ENVOY_LOG(trace, \"item added to deferred deletion list (size={})\", current_to_delete_->size());\n  if (current_to_delete_->size() == 1) {\n    deferred_delete_cb_->scheduleCallbackCurrentIteration();\n  }\n}\n\nvoid DispatcherImpl::exit() { base_scheduler_.loopExit(); }\n\nSignalEventPtr DispatcherImpl::listenForSignal(int signal_num, SignalCb cb) {\n  ASSERT(isThreadSafe());\n  return SignalEventPtr{new SignalEventImpl(*this, signal_num, cb)};\n}\n\nvoid DispatcherImpl::post(std::function<void()> callback) {\n  bool do_post;\n  {\n    Thread::LockGuard lock(post_lock_);\n    do_post = post_callbacks_.empty();\n    post_callbacks_.push_back(callback);\n  }\n\n  if (do_post) {\n    post_cb_->scheduleCallbackCurrentIteration();\n  }\n}\n\nvoid DispatcherImpl::run(RunType type) {\n  run_tid_ = api_.threadFactory().currentThreadId();\n\n  // Flush all post callbacks before we run the event loop. We do this because there are post\n  // callbacks that have to get run before the initial event loop starts running. libevent does\n  // not guarantee that events are run in any particular order. So even if we post() and call\n  // event_base_once() before some other event, the other event might get called first.\n  runPostCallbacks();\n  base_scheduler_.run(type);\n}\n\nMonotonicTime DispatcherImpl::approximateMonotonicTime() const {\n  return approximate_monotonic_time_;\n}\n\nvoid DispatcherImpl::updateApproximateMonotonicTime() { updateApproximateMonotonicTimeInternal(); }\n\nvoid DispatcherImpl::updateApproximateMonotonicTimeInternal() {\n  approximate_monotonic_time_ = api_.timeSource().monotonicTime();\n}\n\nvoid DispatcherImpl::runPostCallbacks() {\n  while (true) {\n    // It is important that this declaration is inside the body of the loop so that the callback is\n    // destructed while post_lock_ is not held. If callback is declared outside the loop and reused\n    // for each iteration, the previous iteration's callback is destructed when callback is\n    // re-assigned, which happens while holding the lock. This can lead to a deadlock (via\n    // recursive mutex acquisition) if destroying the callback runs a destructor, which through some\n    // callstack calls post() on this dispatcher.\n    std::function<void()> callback;\n    {\n      Thread::LockGuard lock(post_lock_);\n      if (post_callbacks_.empty()) {\n        return;\n      }\n      callback = post_callbacks_.front();\n      post_callbacks_.pop_front();\n    }\n    callback();\n  }\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/dispatcher_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/scope_tracker.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/event/libevent.h\"\n#include \"common/event/libevent_scheduler.h\"\n#include \"common/signal/fatal_error_handler.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * libevent implementation of Event::Dispatcher.\n */\nclass DispatcherImpl : Logger::Loggable<Logger::Id::main>,\n                       public Dispatcher,\n                       public FatalErrorHandlerInterface {\npublic:\n  DispatcherImpl(const std::string& name, Api::Api& api, Event::TimeSystem& time_system);\n  DispatcherImpl(const std::string& name, Buffer::WatermarkFactoryPtr&& factory, Api::Api& api,\n                 Event::TimeSystem& time_system);\n  ~DispatcherImpl() override;\n\n  /**\n   * @return event_base& the libevent base.\n   */\n  event_base& base() { return base_scheduler_.base(); }\n\n  // Event::Dispatcher\n  const std::string& name() override { return name_; }\n  TimeSource& timeSource() override { return api_.timeSource(); }\n  void initializeStats(Stats::Scope& scope, const absl::optional<std::string>& prefix) override;\n  void clearDeferredDeleteList() override;\n  Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket,\n                                                Network::TransportSocketPtr&& transport_socket,\n                                                StreamInfo::StreamInfo& stream_info) override;\n  Network::ClientConnectionPtr\n  createClientConnection(Network::Address::InstanceConstSharedPtr address,\n                         Network::Address::InstanceConstSharedPtr source_address,\n                         Network::TransportSocketPtr&& transport_socket,\n                         const Network::ConnectionSocket::OptionsSharedPtr& options) override;\n  Network::DnsResolverSharedPtr\n  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n                    const bool use_tcp_for_dns_lookups) override;\n  FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger,\n                               uint32_t events) override;\n  Filesystem::WatcherPtr createFilesystemWatcher() override;\n  Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket,\n                                      Network::TcpListenerCallbacks& cb, bool bind_to_port,\n                                      uint32_t backlog_size) override;\n  Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket,\n                                            Network::UdpListenerCallbacks& cb) override;\n  TimerPtr createTimer(TimerCb cb) override;\n  Event::SchedulableCallbackPtr createSchedulableCallback(std::function<void()> cb) override;\n  void deferredDelete(DeferredDeletablePtr&& to_delete) override;\n  void exit() override;\n  SignalEventPtr listenForSignal(int signal_num, SignalCb cb) override;\n  void post(std::function<void()> callback) override;\n  void run(RunType type) override;\n  Buffer::WatermarkFactory& getWatermarkFactory() override { return *buffer_factory_; }\n  const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) override {\n    const ScopeTrackedObject* return_object = current_object_;\n    current_object_ = object;\n    return return_object;\n  }\n  MonotonicTime approximateMonotonicTime() const override;\n  void updateApproximateMonotonicTime() override;\n\n  // FatalErrorInterface\n  void onFatalError(std::ostream& os) const override {\n    // Dump the state of the tracked object if it is in the current thread. This generally results\n    // in dumping the active state only for the thread which caused the fatal error.\n    if (isThreadSafe()) {\n      if (current_object_) {\n        current_object_->dumpState(os);\n      }\n    }\n  }\n\nprivate:\n  TimerPtr createTimerInternal(TimerCb cb);\n  void updateApproximateMonotonicTimeInternal();\n  void runPostCallbacks();\n\n  // Validate that an operation is thread safe, i.e. it's invoked on the same thread that the\n  // dispatcher run loop is executing on. We allow run_tid_ to be empty for tests where we don't\n  // invoke run().\n  bool isThreadSafe() const override {\n    return run_tid_.isEmpty() || run_tid_ == api_.threadFactory().currentThreadId();\n  }\n\n  const std::string name_;\n  Api::Api& api_;\n  std::string stats_prefix_;\n  DispatcherStatsPtr stats_;\n  Thread::ThreadId run_tid_;\n  Buffer::WatermarkFactoryPtr buffer_factory_;\n  LibeventScheduler base_scheduler_;\n  SchedulerPtr scheduler_;\n  SchedulableCallbackPtr deferred_delete_cb_;\n  SchedulableCallbackPtr post_cb_;\n  std::vector<DeferredDeletablePtr> to_delete_1_;\n  std::vector<DeferredDeletablePtr> to_delete_2_;\n  std::vector<DeferredDeletablePtr>* current_to_delete_;\n  Thread::MutexBasicLockable post_lock_;\n  std::list<std::function<void()>> post_callbacks_ ABSL_GUARDED_BY(post_lock_);\n  const ScopeTrackedObject* current_object_{};\n  bool deferred_deleting_{};\n  MonotonicTime approximate_monotonic_time_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/event_impl_base.cc",
    "content": "#include \"common/event/event_impl_base.h\"\n\n#include \"event2/event.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nImplBase::~ImplBase() {\n  // Derived classes are assumed to have already assigned the raw event in the constructor.\n  event_del(&raw_event_);\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/event_impl_base.h",
    "content": "#pragma once\n\n#include \"event2/event_struct.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Base class for libevent event implementations. The event struct is embedded inside of this class\n * and derived classes are expected to assign it inside of the constructor.\n */\nclass ImplBase {\nprotected:\n  ~ImplBase();\n\n  event raw_event_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/file_event_impl.cc",
    "content": "#include \"common/event/file_event_impl.h\"\n\n#include <cstdint>\n\n#include \"common/common/assert.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"event2/event.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nFileEventImpl::FileEventImpl(DispatcherImpl& dispatcher, os_fd_t fd, FileReadyCb cb,\n                             FileTriggerType trigger, uint32_t events)\n    : cb_(cb), fd_(fd), trigger_(trigger),\n      activate_fd_events_next_event_loop_(\n          // Only read the runtime feature if the runtime loader singleton has already been created.\n          // Attempts to access runtime features too early in the initialization sequence triggers\n          // some spurious, scary-looking logs about not being able to read runtime feature config\n          // from the singleton. These warnings are caused by creation of filesystem watchers as\n          // part of the process of loading the runtime configuration from disk.\n          Runtime::LoaderSingleton::getExisting()\n              ? Runtime::runtimeFeatureEnabled(\n                    \"envoy.reloadable_features.activate_fds_next_event_loop\")\n              : true) {\n  // Treat the lack of a valid fd (which in practice should only happen if we run out of FDs) as\n  // an OOM condition and just crash.\n  RELEASE_ASSERT(SOCKET_VALID(fd), \"\");\n#ifdef WIN32\n  RELEASE_ASSERT(trigger_ == FileTriggerType::Level,\n                 \"libevent does not support edge triggers on Windows\");\n#endif\n  assignEvents(events, &dispatcher.base());\n  event_add(&raw_event_, nullptr);\n  if (activate_fd_events_next_event_loop_) {\n    activation_cb_ = dispatcher.createSchedulableCallback([this]() {\n      ASSERT(injected_activation_events_ != 0);\n      mergeInjectedEventsAndRunCb(0);\n    });\n  }\n}\n\nvoid FileEventImpl::activate(uint32_t events) {\n  // events is not empty.\n  ASSERT(events != 0);\n  // Only supported event types are set.\n  ASSERT((events & (FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed)) == events);\n\n  if (!activate_fd_events_next_event_loop_) {\n    // Legacy implementation\n    int libevent_events = 0;\n    if (events & FileReadyType::Read) {\n      libevent_events |= EV_READ;\n    }\n\n    if (events & FileReadyType::Write) {\n      libevent_events |= EV_WRITE;\n    }\n\n    if (events & FileReadyType::Closed) {\n      libevent_events |= EV_CLOSED;\n    }\n\n    ASSERT(libevent_events);\n    event_active(&raw_event_, libevent_events, 0);\n    return;\n  }\n\n  // Schedule the activation callback so it runs as part of the next loop iteration if it is not\n  // already scheduled.\n  if (injected_activation_events_ == 0) {\n    ASSERT(!activation_cb_->enabled());\n    activation_cb_->scheduleCallbackNextIteration();\n  }\n  ASSERT(activation_cb_->enabled());\n\n  // Merge new events with pending injected events.\n  injected_activation_events_ |= events;\n}\n\nvoid FileEventImpl::assignEvents(uint32_t events, event_base* base) {\n  ASSERT(base != nullptr);\n  event_assign(\n      &raw_event_, base, fd_,\n      EV_PERSIST | (trigger_ == FileTriggerType::Level ? 0 : EV_ET) |\n          (events & FileReadyType::Read ? EV_READ : 0) |\n          (events & FileReadyType::Write ? EV_WRITE : 0) |\n          (events & FileReadyType::Closed ? EV_CLOSED : 0),\n      [](evutil_socket_t, short what, void* arg) -> void {\n        auto* event = static_cast<FileEventImpl*>(arg);\n        uint32_t events = 0;\n        if (what & EV_READ) {\n          events |= FileReadyType::Read;\n        }\n\n        if (what & EV_WRITE) {\n          events |= FileReadyType::Write;\n        }\n\n        if (what & EV_CLOSED) {\n          events |= FileReadyType::Closed;\n        }\n\n        ASSERT(events != 0);\n        event->mergeInjectedEventsAndRunCb(events);\n      },\n      this);\n}\n\nvoid FileEventImpl::setEnabled(uint32_t events) {\n  if (activate_fd_events_next_event_loop_ && injected_activation_events_ != 0) {\n    // Clear pending events on updates to the fd event mask to avoid delivering events that are no\n    // longer relevant. Updating the event mask will reset the fd edge trigger state so the proxy\n    // will be able to determine the fd read/write state without need for the injected activation\n    // events.\n    injected_activation_events_ = 0;\n    activation_cb_->cancel();\n  }\n\n  auto* base = event_get_base(&raw_event_);\n  event_del(&raw_event_);\n  assignEvents(events, base);\n  event_add(&raw_event_, nullptr);\n}\n\nvoid FileEventImpl::mergeInjectedEventsAndRunCb(uint32_t events) {\n  if (activate_fd_events_next_event_loop_ && injected_activation_events_ != 0) {\n    events |= injected_activation_events_;\n    injected_activation_events_ = 0;\n    activation_cb_->cancel();\n  }\n  cb_(events);\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/file_event_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"envoy/event/file_event.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/event/event_impl_base.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Implementation of FileEvent for libevent that uses persistent events and\n * assumes the user will read/write until EAGAIN is returned from the file.\n */\nclass FileEventImpl : public FileEvent, ImplBase {\npublic:\n  FileEventImpl(DispatcherImpl& dispatcher, os_fd_t fd, FileReadyCb cb, FileTriggerType trigger,\n                uint32_t events);\n\n  // Event::FileEvent\n  void activate(uint32_t events) override;\n  void setEnabled(uint32_t events) override;\n\nprivate:\n  void assignEvents(uint32_t events, event_base* base);\n  void mergeInjectedEventsAndRunCb(uint32_t events);\n\n  FileReadyCb cb_;\n  os_fd_t fd_;\n  FileTriggerType trigger_;\n\n  // Injected FileReadyType events that were scheduled by recent calls to activate() and are pending\n  // delivery.\n  uint32_t injected_activation_events_{};\n  // Used to schedule delayed event activation. Armed iff pending_activation_events_ != 0.\n  SchedulableCallbackPtr activation_cb_;\n  // Latched \"envoy.reloadable_features.activate_fds_next_event_loop\" runtime feature. If true, fd\n  // events scheduled via activate are evaluated in the next iteration of the event loop after\n  // polling and activating new fd events.\n  const bool activate_fd_events_next_event_loop_;\n};\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/libevent.cc",
    "content": "#include \"common/event/libevent.h\"\n\n#include <csignal>\n\n#include \"common/common/assert.h\"\n\n#include \"event2/thread.h\"\n\nnamespace Envoy {\nnamespace Event {\nnamespace Libevent {\n\nbool Global::initialized_ = false;\n\nvoid Global::initialize() {\n#ifdef WIN32\n  evthread_use_windows_threads();\n#else\n  evthread_use_pthreads();\n\n  // Ignore SIGPIPE and allow errors to propagate through error codes.\n  signal(SIGPIPE, SIG_IGN);\n#endif\n  initialized_ = true;\n}\n\n} // namespace Libevent\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/libevent.h",
    "content": "#pragma once\n\n#include \"common/common/c_smart_ptr.h\"\n\nstruct event_base;\nextern \"C\" {\nvoid event_base_free(event_base*);\n}\n\nstruct evconnlistener;\nextern \"C\" {\nvoid evconnlistener_free(evconnlistener*);\n}\n\nnamespace Envoy {\nnamespace Event {\nnamespace Libevent {\n\n/**\n * Global functionality specific to libevent.\n */\nclass Global {\npublic:\n  static bool initialized() { return initialized_; }\n\n  /**\n   * Initialize the library globally.\n   */\n  static void initialize();\n\nprivate:\n  // True if initialized() has been called.\n  static bool initialized_;\n};\n\nusing BasePtr = CSmartPtr<event_base, event_base_free>;\n\n} // namespace Libevent\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/libevent_scheduler.cc",
    "content": "#include \"common/event/libevent_scheduler.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/event/schedulable_cb_impl.h\"\n#include \"common/event/timer_impl.h\"\n\n#include \"event2/util.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nnamespace {\nvoid recordTimeval(Stats::Histogram& histogram, const timeval& tv) {\n  histogram.recordValue(tv.tv_sec * 1000000 + tv.tv_usec);\n}\n} // namespace\n\nLibeventScheduler::LibeventScheduler() {\n#ifdef WIN32\n  event_config* event_config = event_config_new();\n  RELEASE_ASSERT(event_config != nullptr,\n                 \"Failed to initialize libevent event_base: event_config_new\");\n  // Request wepoll backend by avoiding win32 backend.\n  int error = event_config_avoid_method(event_config, \"win32\");\n  RELEASE_ASSERT(error == 0, \"Failed to initialize libevent event_base: event_config_avoid_method\");\n  event_base* event_base = event_base_new_with_config(event_config);\n  event_config_free(event_config);\n#else\n  event_base* event_base = event_base_new();\n#endif\n  RELEASE_ASSERT(event_base != nullptr, \"Failed to initialize libevent event_base\");\n  libevent_ = Libevent::BasePtr(event_base);\n\n  // The dispatcher won't work as expected if libevent hasn't been configured to use threads.\n  RELEASE_ASSERT(Libevent::Global::initialized(), \"\");\n}\n\nTimerPtr LibeventScheduler::createTimer(const TimerCb& cb, Dispatcher& dispatcher) {\n  return std::make_unique<TimerImpl>(libevent_, cb, dispatcher);\n};\n\nSchedulableCallbackPtr\nLibeventScheduler::createSchedulableCallback(const std::function<void()>& cb) {\n  return std::make_unique<SchedulableCallbackImpl>(libevent_, cb);\n};\n\nvoid LibeventScheduler::run(Dispatcher::RunType mode) {\n  int flag = 0;\n  switch (mode) {\n  case Dispatcher::RunType::NonBlock:\n    flag = LibeventScheduler::flagsBasedOnEventType();\n  case Dispatcher::RunType::Block:\n    // The default flags have 'block' behavior. See\n    // http://www.wangafu.net/~nickm/libevent-book/Ref3_eventloop.html\n    break;\n  case Dispatcher::RunType::RunUntilExit:\n    flag = EVLOOP_NO_EXIT_ON_EMPTY;\n    break;\n  }\n  event_base_loop(libevent_.get(), flag);\n}\n\nvoid LibeventScheduler::loopExit() { event_base_loopexit(libevent_.get(), nullptr); }\n\nvoid LibeventScheduler::registerOnPrepareCallback(OnPrepareCallback&& callback) {\n  ASSERT(callback);\n  ASSERT(!callback_);\n\n  callback_ = std::move(callback);\n  evwatch_prepare_new(libevent_.get(), &onPrepareForCallback, this);\n}\n\nvoid LibeventScheduler::initializeStats(DispatcherStats* stats) {\n  stats_ = stats;\n  // These are thread safe.\n  evwatch_prepare_new(libevent_.get(), &onPrepareForStats, this);\n  evwatch_check_new(libevent_.get(), &onCheckForStats, this);\n}\n\nvoid LibeventScheduler::onPrepareForCallback(evwatch*, const evwatch_prepare_cb_info*, void* arg) {\n  // `self` is `this`, passed in from evwatch_prepare_new.\n  auto self = static_cast<LibeventScheduler*>(arg);\n  self->callback_();\n}\n\nvoid LibeventScheduler::onPrepareForStats(evwatch*, const evwatch_prepare_cb_info* info,\n                                          void* arg) {\n  // `self` is `this`, passed in from evwatch_prepare_new.\n  auto self = static_cast<LibeventScheduler*>(arg);\n\n  // Record poll timeout and prepare time for this iteration of the event loop. The timeout is the\n  // expected polling duration, whereas the actual polling duration will be the difference measured\n  // between the prepare time and the check time immediately after polling. These are compared in\n  // onCheckForStats to compute the poll_delay stat.\n  self->timeout_set_ = evwatch_prepare_get_timeout(info, &self->timeout_);\n  evutil_gettimeofday(&self->prepare_time_, nullptr);\n\n  // If we have a check time available from a previous iteration of the event loop (that is, all but\n  // the first), compute the loop_duration stat.\n  if (self->check_time_.tv_sec != 0) {\n    timeval delta;\n    evutil_timersub(&self->prepare_time_, &self->check_time_, &delta);\n    recordTimeval(self->stats_->loop_duration_us_, delta);\n  }\n}\n\nvoid LibeventScheduler::onCheckForStats(evwatch*, const evwatch_check_cb_info*, void* arg) {\n  // `self` is `this`, passed in from evwatch_check_new.\n  auto self = static_cast<LibeventScheduler*>(arg);\n\n  // Record check time for this iteration of the event loop. Use this together with prepare time\n  // from above to compute the actual polling duration, and store it for the next iteration of the\n  // event loop to compute the loop duration.\n  evutil_gettimeofday(&self->check_time_, nullptr);\n  if (self->timeout_set_) {\n    timeval delta, delay;\n    evutil_timersub(&self->check_time_, &self->prepare_time_, &delta);\n    evutil_timersub(&delta, &self->timeout_, &delay);\n\n    // Delay can be negative, meaning polling completed early. This happens in normal operation,\n    // either because I/O was ready before we hit the timeout, or just because the kernel was\n    // feeling saucy. Disregard negative delays in stats, since they don't indicate anything\n    // particularly useful.\n    if (delay.tv_sec >= 0) {\n      recordTimeval(self->stats_->poll_delay_us_, delay);\n    }\n  }\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/libevent_scheduler.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/schedulable_cb.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/event/libevent.h\"\n\n#include \"event2/event.h\"\n#include \"event2/watch.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n// Implements Scheduler based on libevent.\n//\n// Here is a rough summary of operations that libevent performs in each event loop iteration, in\n// order. Note that the invocation order for \"same-iteration\" operations that execute as a group\n// can be surprising and invocation order of expired timers is non-deterministic.\n// Whenever possible, it is preferable to avoid making event invocation ordering assumptions.\n//\n// 1. Calculate the poll timeout by comparing the current time to the deadline of the closest\n// timer (the one at head of the priority queue).\n// 2. Run registered \"prepare\" callbacks.\n// 3. Poll for fd events using the closest timer as timeout, add active fds to the work list.\n// 4. Run registered \"check\" callbacks.\n// 5. Check timer deadlines against current time and move expired timers from the timer priority\n// queue to the work list. Expired timers are moved to the work list is a non-deterministic order.\n// 6. Execute items in the work list until the list is empty. Note that additional work\n// items could be added to the work list during execution of this step, more details below.\n// 7. Goto 1 if the loop termination condition has not been reached\n//\n// The following \"same-iteration\" work items are added directly to the work list when they are\n// scheduled so they execute in the current iteration of the event loop. Note that there are no\n// ordering guarantees when mixing the mechanisms below. Specifically, it is unsafe to assume that\n// calling post followed by deferredDelete will result in the post callback being invoked before the\n// deferredDelete; deferredDelete will run first if there is a pending deferredDeletion at the time\n// the post callback is scheduled because deferredDelete invocation is grouped.\n// - Event::Dispatcher::post(cb). Post callbacks are invoked as a group.\n// - Event::Dispatcher::deferredDelete(object) and Event::DeferredTaskUtil::deferredRun(...).\n// The same mechanism implements both of these operations, so they are invoked as a group.\n// - Event::SchedulableCallback::scheduleCallbackCurrentIteration(). Each of these callbacks is\n// scheduled and invoked independently.\n// - Event::FileEvent::activate() if \"envoy.reloadable_features.activate_fds_next_event_loop\"\n// runtime feature is disabled.\n// - Event::Timer::enableTimer(0) if \"envoy.reloadable_features.activate_timers_next_event_loop\"\n// runtime feature is disabled.\n//\n// Event::FileEvent::activate and Event::SchedulableCallback::scheduleCallbackNextIteration are\n// implemented as libevent timers with a deadline of 0. Both of these actions are moved to the work\n// list while checking for expired timers during step 5.\n//\n// Events execute in the following order, derived from the order in which items were added to the\n// work list:\n// 0. Events added via event_active prior to the start of the event loop (in tests)\n// 1. Fd events\n// 2. Timers, FileEvent::activate and SchedulableCallback::scheduleCallbackNextIteration\n// 3. \"Same-iteration\" work items described above, including Event::Dispatcher::post callbacks\nclass LibeventScheduler : public Scheduler, public CallbackScheduler {\npublic:\n  using OnPrepareCallback = std::function<void()>;\n  LibeventScheduler();\n\n  // Scheduler\n  TimerPtr createTimer(const TimerCb& cb, Dispatcher& dispatcher) override;\n  SchedulableCallbackPtr createSchedulableCallback(const std::function<void()>& cb) override;\n\n  /**\n   * Runs the event loop.\n   *\n   * @param mode The mode in which to run the event loop.\n   */\n  void run(Dispatcher::RunType mode);\n\n  /**\n   * Exits the libevent loop.\n   */\n  void loopExit();\n\n  /**\n   * TODO(jmarantz): consider strengthening this abstraction and instead of\n   * exposing the libevent base pointer, provide API abstractions for the calls\n   * into it. Among other benefits this might make it more tractable to someday\n   * consider an alternative to libevent if the need arises.\n   *\n   * @return the underlying libevent structure.\n   */\n  event_base& base() { return *libevent_; }\n\n  /**\n   * Register callback to be called in the event loop prior to polling for\n   * events. Must not be called more than once. |callback| must not be null.\n   * |callback| cannot be unregistered, therefore it has to be valid throughout\n   * the lifetime of |this|.\n   */\n  void registerOnPrepareCallback(OnPrepareCallback&& callback);\n\n  /**\n   * Start writing stats once thread-local storage is ready to receive them (see\n   * ThreadLocalStoreImpl::initializeThreading).\n   */\n  void initializeStats(DispatcherStats* stats);\n\nprivate:\n  static void onPrepareForCallback(evwatch*, const evwatch_prepare_cb_info* info, void* arg);\n  static void onPrepareForStats(evwatch*, const evwatch_prepare_cb_info* info, void* arg);\n  static void onCheckForStats(evwatch*, const evwatch_check_cb_info*, void* arg);\n\n  static constexpr int flagsBasedOnEventType() {\n    if constexpr (Event::PlatformDefaultTriggerType == FileTriggerType::Level) {\n      // On Windows, EVLOOP_NONBLOCK will cause the libevent event_base_loop to run forever.\n      // This is because libevent only supports level triggering on Windows, and so the write\n      // event callbacks will trigger every time through the loop. Adding EVLOOP_ONCE ensures the\n      // loop will run at most once\n      return EVLOOP_NONBLOCK | EVLOOP_ONCE;\n    }\n    return EVLOOP_NONBLOCK;\n  }\n\n  Libevent::BasePtr libevent_;\n  DispatcherStats* stats_{}; // stats owned by the containing DispatcherImpl\n  bool timeout_set_{};       // whether there is a poll timeout in the current event loop iteration\n  timeval timeout_{};        // the poll timeout for the current event loop iteration, if available\n  timeval prepare_time_{};   // timestamp immediately before polling\n  timeval check_time_{};     // timestamp immediately after polling\n  OnPrepareCallback callback_; // callback to be called from onPrepareForCallback()\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/real_time_system.cc",
    "content": "#include \"common/event/real_time_system.h\"\n\n#include <chrono>\n\n#include \"common/common/assert.h\"\n#include \"common/event/timer_impl.h\"\n\nnamespace Envoy {\nnamespace Event {\nnamespace {\n\nclass RealScheduler : public Scheduler {\npublic:\n  RealScheduler(Scheduler& base_scheduler) : base_scheduler_(base_scheduler) {}\n  TimerPtr createTimer(const TimerCb& cb, Dispatcher& d) override {\n    return base_scheduler_.createTimer(cb, d);\n  };\n\nprivate:\n  Scheduler& base_scheduler_;\n};\n\n} // namespace\n\nSchedulerPtr RealTimeSystem::createScheduler(Scheduler& base_scheduler, CallbackScheduler&) {\n  return std::make_unique<RealScheduler>(base_scheduler);\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/real_time_system.h",
    "content": "#pragma once\n\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Real-world time implementation of TimeSystem.\n */\nclass RealTimeSystem : public TimeSystem {\npublic:\n  // TimeSystem\n  SchedulerPtr createScheduler(Scheduler&, CallbackScheduler&) override;\n\n  // TimeSource\n  SystemTime systemTime() override { return time_source_.systemTime(); }\n  MonotonicTime monotonicTime() override { return time_source_.monotonicTime(); }\n\nprivate:\n  RealTimeSource time_source_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/scaled_range_timer_manager.cc",
    "content": "#include \"common/event/scaled_range_timer_manager.h\"\n\n#include <chrono>\n#include <cmath>\n#include <memory>\n\n#include \"envoy/event/range_timer.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/scope_tracker.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Implementation of RangeTimer that can be scaled by the backing manager object.\n *\n * Instances of this class exist in one of 3 states:\n *  - inactive: not enabled\n *  - waiting-for-min: enabled, min timeout not elapsed\n *  - scaling-max: enabled, min timeout elapsed, max timeout not elapsed\n *\n * The allowed state transitions are:\n *  - inactive -> waiting-for-min\n *  - waiting-for-min -> scaling-max | inactive\n *  - scaling-max -> inactive\n *\n * Some methods combine multiple state transitions; enableTimer(0, max) on a\n * timer in the scaling-max state will logically execute the transition sequence\n * [scaling-max -> inactive -> waiting-for-min -> scaling-max] in a single\n * method call. The waiting-for-min transitions are elided for efficiency.\n */\nclass ScaledRangeTimerManager::RangeTimerImpl final : public RangeTimer {\npublic:\n  RangeTimerImpl(TimerCb callback, ScaledRangeTimerManager& manager)\n      : manager_(manager), callback_(std::move(callback)),\n        min_duration_timer_(manager.dispatcher_.createTimer([this] { onMinTimerComplete(); })) {}\n\n  ~RangeTimerImpl() override { disableTimer(); }\n\n  void disableTimer() override {\n    struct Dispatch {\n      Dispatch(RangeTimerImpl& timer) : timer_(timer) {}\n      RangeTimerImpl& timer_;\n      void operator()(const Inactive&) {}\n      void operator()(const WaitingForMin&) { timer_.min_duration_timer_->disableTimer(); }\n      void operator()(ScalingMax& active) { timer_.manager_.removeTimer(active.handle_); }\n    };\n    absl::visit(Dispatch(*this), state_);\n    state_.emplace<Inactive>();\n    scope_ = nullptr;\n  }\n\n  void enableTimer(const std::chrono::milliseconds min_ms, const std::chrono::milliseconds max_ms,\n                   const ScopeTrackedObject* scope) override {\n    disableTimer();\n    scope_ = scope;\n    ENVOY_LOG_MISC(trace, \"enableTimer called on {} for ({}ms, {}ms)\", static_cast<void*>(this),\n                   min_ms.count(), max_ms.count());\n    if (min_ms <= std::chrono::milliseconds::zero()) {\n      // If the duration spread (max - min) is zero, skip over the waiting-for-min and straight to\n      // the scaling-max state.\n      auto handle = manager_.activateTimer(max_ms, *this);\n      state_.emplace<ScalingMax>(handle);\n    } else {\n      state_.emplace<WaitingForMin>(max_ms - min_ms);\n      min_duration_timer_->enableTimer(min_ms);\n    }\n  }\n\n  bool enabled() override { return !absl::holds_alternative<Inactive>(state_); }\n\n  void trigger() {\n    ASSERT(manager_.dispatcher_.isThreadSafe());\n    ASSERT(!absl::holds_alternative<Inactive>(state_));\n    ENVOY_LOG_MISC(trace, \"RangeTimerImpl triggered: {}\", static_cast<void*>(this));\n    state_.emplace<Inactive>();\n    if (scope_ == nullptr) {\n      callback_();\n    } else {\n      ScopeTrackerScopeState scope(scope_, manager_.dispatcher_);\n      scope_ = nullptr;\n      callback_();\n    }\n  }\n\nprivate:\n  struct Inactive {};\n\n  struct WaitingForMin {\n    WaitingForMin(std::chrono::milliseconds scalable_duration)\n        : scalable_duration_(scalable_duration) {}\n\n    // The amount of time between this enabled timer's max and min, which should\n    // be scaled by the current scale factor.\n    const std::chrono::milliseconds scalable_duration_;\n  };\n\n  struct ScalingMax {\n    ScalingMax(ScaledRangeTimerManager::ScalingTimerHandle handle) : handle_(handle) {}\n\n    // A handle that can be used to disable the timer.\n    ScaledRangeTimerManager::ScalingTimerHandle handle_;\n  };\n\n  /**\n   * This is called when the min timer expires, on the dispatcher for the manager. It registers with\n   * the manager so the duration can be scaled, unless the duration is zero in which case it just\n   * triggers the callback right away.\n   */\n  void onMinTimerComplete() {\n    ASSERT(manager_.dispatcher_.isThreadSafe());\n    ENVOY_LOG_MISC(info, \"min timer complete for {}\", static_cast<void*>(this));\n    ASSERT(absl::holds_alternative<WaitingForMin>(state_));\n    const WaitingForMin& waiting = absl::get<WaitingForMin>(state_);\n\n    // This\n    if (waiting.scalable_duration_ < std::chrono::milliseconds::zero()) {\n      trigger();\n    } else {\n      state_.emplace<ScalingMax>(manager_.activateTimer(waiting.scalable_duration_, *this));\n    }\n  }\n\n  ScaledRangeTimerManager& manager_;\n  const TimerCb callback_;\n  const TimerPtr min_duration_timer_;\n\n  absl::variant<Inactive, WaitingForMin, ScalingMax> state_;\n  const ScopeTrackedObject* scope_;\n};\n\nScaledRangeTimerManager::ScaledRangeTimerManager(Dispatcher& dispatcher)\n    : dispatcher_(dispatcher), scale_factor_(1.0) {}\n\nScaledRangeTimerManager::~ScaledRangeTimerManager() {\n  // Scaled timers created by the manager shouldn't outlive it. This is\n  // necessary but not sufficient to guarantee that.\n  ASSERT(queues_.empty());\n}\n\nRangeTimerPtr ScaledRangeTimerManager::createTimer(TimerCb callback) {\n  return std::make_unique<RangeTimerImpl>(callback, *this);\n}\n\nvoid ScaledRangeTimerManager::setScaleFactor(double scale_factor) {\n  const MonotonicTime now = dispatcher_.approximateMonotonicTime();\n  scale_factor_ = DurationScaleFactor(scale_factor);\n  for (auto& queue : queues_) {\n    resetQueueTimer(*queue, now);\n  }\n}\n\nScaledRangeTimerManager::Queue::Item::Item(RangeTimerImpl& timer, MonotonicTime active_time)\n    : timer_(timer), active_time_(active_time) {}\n\nScaledRangeTimerManager::Queue::Queue(std::chrono::milliseconds duration,\n                                      ScaledRangeTimerManager& manager, Dispatcher& dispatcher)\n    : duration_(duration),\n      timer_(dispatcher.createTimer([this, &manager] { manager.onQueueTimerFired(*this); })) {}\n\nScaledRangeTimerManager::ScalingTimerHandle::ScalingTimerHandle(Queue& queue,\n                                                                Queue::Iterator iterator)\n    : queue_(queue), iterator_(iterator) {}\n\nScaledRangeTimerManager::DurationScaleFactor::DurationScaleFactor(double value)\n    : value_(std::max(0.0, std::min(value, 1.0))) {}\n\nMonotonicTime ScaledRangeTimerManager::computeTriggerTime(const Queue::Item& item,\n                                                          std::chrono::milliseconds duration,\n                                                          DurationScaleFactor scale_factor) {\n  return item.active_time_ +\n         std::chrono::duration_cast<MonotonicTime::duration>(duration * scale_factor.value());\n}\n\nScaledRangeTimerManager::ScalingTimerHandle\nScaledRangeTimerManager::activateTimer(std::chrono::milliseconds duration,\n                                       RangeTimerImpl& range_timer) {\n  // Ensure this is being called on the same dispatcher.\n  ASSERT(dispatcher_.isThreadSafe());\n\n  // Find the matching queue for the (max - min) duration of the range timer; if there isn't one,\n  // create it.\n  auto it = queues_.find(duration);\n  if (it == queues_.end()) {\n    auto queue = std::make_unique<Queue>(duration, *this, dispatcher_);\n    it = queues_.emplace(std::move(queue)).first;\n  }\n  Queue& queue = **it;\n\n  // Put the timer at the back of the queue. Since the timer has the same maximum duration as all\n  // the other timers in the queue, and since the activation times are monotonic, the queue stays in\n  // sorted order.\n  queue.range_timers_.emplace_back(range_timer, dispatcher_.approximateMonotonicTime());\n  if (queue.range_timers_.size() == 1) {\n    resetQueueTimer(queue, dispatcher_.approximateMonotonicTime());\n  }\n\n  return ScalingTimerHandle(queue, --queue.range_timers_.end());\n}\n\nvoid ScaledRangeTimerManager::removeTimer(ScalingTimerHandle handle) {\n  // Ensure this is being called on the same dispatcher.\n  ASSERT(dispatcher_.isThreadSafe());\n\n  const bool was_front = handle.queue_.range_timers_.begin() == handle.iterator_;\n  handle.queue_.range_timers_.erase(handle.iterator_);\n  // Don't keep around empty queues\n  if (handle.queue_.range_timers_.empty()) {\n    queues_.erase(handle.queue_);\n    return;\n  }\n\n  // The queue's timer tracks the expiration time of the first range timer, so it only needs\n  // adjusting if the first timer is the one that was removed.\n  if (was_front) {\n    resetQueueTimer(handle.queue_, dispatcher_.approximateMonotonicTime());\n  }\n}\n\nvoid ScaledRangeTimerManager::resetQueueTimer(Queue& queue, MonotonicTime now) {\n  ASSERT(!queue.range_timers_.empty());\n  const MonotonicTime trigger_time =\n      computeTriggerTime(queue.range_timers_.front(), queue.duration_, scale_factor_);\n  if (trigger_time < now) {\n    queue.timer_->enableTimer(std::chrono::milliseconds::zero());\n  } else {\n    queue.timer_->enableTimer(\n        std::chrono::duration_cast<std::chrono::milliseconds>(trigger_time - now));\n  }\n}\n\nvoid ScaledRangeTimerManager::onQueueTimerFired(Queue& queue) {\n  auto& timers = queue.range_timers_;\n  ASSERT(!timers.empty());\n  const MonotonicTime now = dispatcher_.approximateMonotonicTime();\n\n  // Pop and trigger timers until the one at the front isn't supposed to have expired yet (given the\n  // current scale factor).\n  while (!timers.empty() &&\n         computeTriggerTime(timers.front(), queue.duration_, scale_factor_) <= now) {\n    auto item = std::move(queue.range_timers_.front());\n    queue.range_timers_.pop_front();\n    item.timer_.trigger();\n  }\n\n  if (queue.range_timers_.empty()) {\n    // Maintain the invariant that queues are never empty.\n    queues_.erase(queue);\n  } else {\n    resetQueueTimer(queue, now);\n  }\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/scaled_range_timer_manager.h",
    "content": "#include <chrono>\n#include <stack>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/range_timer.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Class for creating RangeTimer objects that can be adjusted towards either the minimum or maximum\n * of their range by the owner of the manager object. Users of this class can call createTimer() to\n * receive a new RangeTimer object that they can then enable or disable at will (but only on the\n * same dispatcher), and setScaleFactor() to change the scaling factor. The current scale factor is\n * applied to all timers, including those that are created later.\n *\n * Internally, the manager uses a set of queues to track timers. When an enabled timer reaches its\n * min duration, it adds a tracker object to the queue corresponding to the duration (max - min).\n * Each queue tracks timers of only a single duration, and uses a real Timer object to schedule the\n * expiration of the first timer in the queue. The expectation is that the number of (max - min)\n * values used to enable timers is small, so the number of queues is tightly bounded. The\n * queue-based implementation depends on that expectation for efficient operation.\n */\nclass ScaledRangeTimerManager {\npublic:\n  explicit ScaledRangeTimerManager(Dispatcher& dispatcher);\n  ~ScaledRangeTimerManager();\n\n  /**\n   * Creates a new range timer backed by the manager. The returned timer will be subject to the\n   * current and future scale factor values set on the manager. All returned timers must be deleted\n   * before the manager.\n   */\n  RangeTimerPtr createTimer(TimerCb callback);\n\n  /**\n   * Sets the scale factor for all timers created through this manager. The value should be between\n   * 0 and 1, inclusive. The scale factor affects the amount of time timers spend in their target\n   * range. The RangeTimers returned by createTimer will fire after (min + (max - min) *\n   * scale_factor). This means that a scale factor of 0 causes timers to fire immediately at the min\n   * duration, a factor of 0.5 causes firing halfway between min and max, and a factor of 1 causes\n   * firing at max.\n   */\n  void setScaleFactor(double scale_factor);\n\nprivate:\n  class RangeTimerImpl;\n\n  // A queue object that maintains a list of timers with the same (max - min) values.\n  struct Queue {\n    struct Item {\n      Item(RangeTimerImpl& timer, MonotonicTime active_time);\n      // The timer owned by the caller being kept in the queue.\n      RangeTimerImpl& timer_;\n      // The time at which the timer became active (when its min duration expired).\n      MonotonicTime active_time_;\n    };\n\n    // Typedef for convenience.\n    using Iterator = std::list<Item>::iterator;\n\n    Queue(std::chrono::milliseconds duration, ScaledRangeTimerManager& manager,\n          Dispatcher& dispatcher);\n\n    // The (max - min) value for all timers in range_timers_.\n    const std::chrono::milliseconds duration_;\n\n    // The list of active timers in this queue. This is implemented as a\n    // std::list so that the iterators held in ScalingTimerHandle instances are\n    // not invalidated by removal or insertion of other timers. The timers in\n    // the list are in sorted order by active_time_ because they are only\n    // inserted at the end of the list, and the time is monotonically increasing.\n    std::list<Item> range_timers_;\n\n    // A real Timer that tracks the expiration time of the first timer in the queue. This gets\n    // adjusted\n    //   1) at queue creation time\n    //   2) on expiration\n    //   3) when the scale factor changes\n    const TimerPtr timer_;\n  };\n\n  /**\n   * An object passed back to RangeTimerImpl that can be used to remove it from its queue.\n   */\n  struct ScalingTimerHandle {\n    ScalingTimerHandle(Queue& queue, Queue::Iterator iterator);\n    Queue& queue_;\n    Queue::Iterator iterator_;\n  };\n\n  // A simple wrapper around a float that ensures value() is sane (in the range [0, 1]).\n  class DurationScaleFactor {\n  public:\n    DurationScaleFactor(double value);\n    double value() const { return value_; }\n\n  private:\n    double value_;\n  };\n\n  struct Hash {\n    // Magic declaration to allow heterogeneous lookup.\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n    size_t operator()(const std::chrono::milliseconds duration) const {\n      return hash_(duration.count());\n    }\n    size_t operator()(const Queue& queue) const { return (*this)(queue.duration_); }\n    size_t operator()(const std::unique_ptr<Queue>& queue) const { return (*this)(*queue); }\n    std::hash<std::chrono::milliseconds::rep> hash_;\n  };\n\n  struct Eq {\n    // Magic declaration to allow heterogeneous lookup.\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n    bool operator()(const std::unique_ptr<Queue>& lhs, std::chrono::milliseconds rhs) const {\n      return lhs->duration_ == rhs;\n    }\n    bool operator()(const std::unique_ptr<Queue>& lhs, const Queue& rhs) const {\n      return (*this)(lhs, rhs.duration_);\n    }\n    bool operator()(const std::unique_ptr<Queue>& lhs, const std::unique_ptr<Queue>& rhs) const {\n      return (*this)(lhs, *rhs);\n    }\n  };\n\n  static MonotonicTime computeTriggerTime(const Queue::Item& item,\n                                          std::chrono::milliseconds duration,\n                                          DurationScaleFactor scale_factor);\n\n  ScalingTimerHandle activateTimer(std::chrono::milliseconds duration, RangeTimerImpl& timer);\n\n  void removeTimer(ScalingTimerHandle handle);\n\n  void resetQueueTimer(Queue& queue, MonotonicTime now);\n\n  void onQueueTimerFired(Queue& queue);\n\n  Dispatcher& dispatcher_;\n  DurationScaleFactor scale_factor_;\n  absl::flat_hash_set<std::unique_ptr<Queue>, Hash, Eq> queues_;\n};\n\n} // namespace Event\n} // namespace Envoy"
  },
  {
    "path": "source/common/event/schedulable_cb_impl.cc",
    "content": "#include \"common/event/schedulable_cb_impl.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"event2/event.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nSchedulableCallbackImpl::SchedulableCallbackImpl(Libevent::BasePtr& libevent,\n                                                 std::function<void()> cb)\n    : cb_(cb) {\n  ASSERT(cb_);\n  evtimer_assign(\n      &raw_event_, libevent.get(),\n      [](evutil_socket_t, short, void* arg) -> void {\n        SchedulableCallbackImpl* cb = static_cast<SchedulableCallbackImpl*>(arg);\n        cb->cb_();\n      },\n      this);\n}\n\nvoid SchedulableCallbackImpl::scheduleCallbackCurrentIteration() {\n  if (enabled()) {\n    return;\n  }\n  // event_active directly adds the event to the end of the work queue so it executes in the current\n  // iteration of the event loop.\n  event_active(&raw_event_, EV_TIMEOUT, 0);\n}\n\nvoid SchedulableCallbackImpl::scheduleCallbackNextIteration() {\n  if (enabled()) {\n    return;\n  }\n  // libevent computes the list of timers to move to the work list after polling for fd events, but\n  // iteration through the work list starts. Zero delay timers added while iterating through the\n  // work list execute on the next iteration of the event loop.\n  const timeval zero_tv{};\n  event_add(&raw_event_, &zero_tv);\n}\n\nvoid SchedulableCallbackImpl::cancel() { event_del(&raw_event_); }\n\nbool SchedulableCallbackImpl::enabled() { return 0 != evtimer_pending(&raw_event_, nullptr); }\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/schedulable_cb_impl.h",
    "content": "#pragma once\n\n#include \"envoy/event/schedulable_cb.h\"\n\n#include \"common/event/event_impl_base.h\"\n#include \"common/event/libevent.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nclass DispatcherImpl;\n\n/**\n * libevent implementation of SchedulableCallback.\n */\nclass SchedulableCallbackImpl : public SchedulableCallback, ImplBase {\npublic:\n  SchedulableCallbackImpl(Libevent::BasePtr& libevent, std::function<void()> cb);\n\n  // SchedulableCallback implementation.\n  void scheduleCallbackCurrentIteration() override;\n  void scheduleCallbackNextIteration() override;\n  void cancel() override;\n  bool enabled() override;\n\nprivate:\n  std::function<void()> cb_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/signal_impl.cc",
    "content": "#include \"common/event/signal_impl.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n\n#include \"event2/event.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nSignalEventImpl::SignalEventImpl(DispatcherImpl& dispatcher, int signal_num, SignalCb cb)\n    : cb_(cb) {\n  evsignal_assign(\n      &raw_event_, &dispatcher.base(), signal_num,\n      [](evutil_socket_t, short, void* arg) -> void { static_cast<SignalEventImpl*>(arg)->cb_(); },\n      this);\n  evsignal_add(&raw_event_, nullptr);\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/signal_impl.h",
    "content": "#pragma once\n\n#include \"envoy/event/signal.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/event/event_impl_base.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * libevent implementation of Event::SignalEvent.\n */\nclass SignalEventImpl : public SignalEvent, ImplBase {\npublic:\n  SignalEventImpl(DispatcherImpl& dispatcher, int signal_num, SignalCb cb);\n\nprivate:\n  SignalCb cb_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/timer_impl.cc",
    "content": "#include \"common/event/timer_impl.h\"\n\n#include <chrono>\n\n#include \"common/common/assert.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"event2/event.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nTimerImpl::TimerImpl(Libevent::BasePtr& libevent, TimerCb cb, Dispatcher& dispatcher)\n    : cb_(cb), dispatcher_(dispatcher),\n      activate_timers_next_event_loop_(\n          // Only read the runtime feature if the runtime loader singleton has already been created.\n          // Accessing runtime features too early in the initialization sequence triggers logging\n          // and the logging code itself depends on the use of timers. Attempts to log while\n          // initializing the logging subsystem will result in a crash.\n          Runtime::LoaderSingleton::getExisting()\n              ? Runtime::runtimeFeatureEnabled(\n                    \"envoy.reloadable_features.activate_timers_next_event_loop\")\n              : true) {\n  ASSERT(cb_);\n  evtimer_assign(\n      &raw_event_, libevent.get(),\n      [](evutil_socket_t, short, void* arg) -> void {\n        TimerImpl* timer = static_cast<TimerImpl*>(arg);\n        if (timer->object_ == nullptr) {\n          timer->cb_();\n          return;\n        }\n        ScopeTrackerScopeState scope(timer->object_, timer->dispatcher_);\n        timer->object_ = nullptr;\n        timer->cb_();\n      },\n      this);\n}\n\nvoid TimerImpl::disableTimer() { event_del(&raw_event_); }\n\nvoid TimerImpl::enableTimer(const std::chrono::milliseconds d, const ScopeTrackedObject* object) {\n  timeval tv;\n  TimerUtils::durationToTimeval(d, tv);\n  internalEnableTimer(tv, object);\n}\n\nvoid TimerImpl::enableHRTimer(const std::chrono::microseconds d,\n                              const ScopeTrackedObject* object = nullptr) {\n  timeval tv;\n  TimerUtils::durationToTimeval(d, tv);\n  internalEnableTimer(tv, object);\n}\n\nvoid TimerImpl::internalEnableTimer(const timeval& tv, const ScopeTrackedObject* object) {\n  object_ = object;\n\n  if (!activate_timers_next_event_loop_ && tv.tv_sec == 0 && tv.tv_usec == 0) {\n    event_active(&raw_event_, EV_TIMEOUT, 0);\n  } else {\n    event_add(&raw_event_, &tv);\n  }\n}\n\nbool TimerImpl::enabled() { return 0 != evtimer_pending(&raw_event_, nullptr); }\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/event/timer_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/scope_tracker.h\"\n#include \"common/common/utility.h\"\n#include \"common/event/event_impl_base.h\"\n#include \"common/event/libevent.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Utility helper functions for Timer implementation.\n */\nclass TimerUtils {\npublic:\n  /**\n   * Intended for consumption by enable(HR)Timer, this method is templated method to avoid implicit\n   * duration conversions for its input arguments. This lets us have an opportunity to check bounds\n   * before doing any conversions. When the passed in duration exceeds INT32_MAX max seconds, the\n   * output will be clipped to yield INT32_MAX seconds and 0 microseconds for the\n   * output argument. We clip to INT32_MAX to guard against overflowing the timeval structure.\n   * Throws an EnvoyException on negative duration input.\n   * @tparam Duration std::chrono duration type, e.g. seconds, milliseconds, ...\n   * @param d duration value\n   * @param tv output parameter that will be updated\n   */\n  template <typename Duration> static void durationToTimeval(const Duration& d, timeval& tv) {\n    if (d.count() < 0) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"Negative duration passed to durationToTimeval(): {}\", d.count()));\n    };\n    constexpr int64_t clip_to = INT32_MAX; // 136.102208 years\n    auto secs = std::chrono::duration_cast<std::chrono::seconds>(d);\n    if (secs.count() > clip_to) {\n      tv.tv_sec = clip_to;\n      tv.tv_usec = 0;\n      return;\n    }\n\n    auto usecs = std::chrono::duration_cast<std::chrono::microseconds>(d - secs);\n    tv.tv_sec = secs.count();\n    tv.tv_usec = usecs.count();\n  }\n};\n\n/**\n * libevent implementation of Timer.\n */\nclass TimerImpl : public Timer, ImplBase {\npublic:\n  TimerImpl(Libevent::BasePtr& libevent, TimerCb cb, Event::Dispatcher& dispatcher);\n\n  // Timer\n  void disableTimer() override;\n\n  void enableTimer(std::chrono::milliseconds d, const ScopeTrackedObject* scope) override;\n  void enableHRTimer(std::chrono::microseconds us, const ScopeTrackedObject* object) override;\n\n  bool enabled() override;\n\nprivate:\n  void internalEnableTimer(const timeval& tv, const ScopeTrackedObject* scope);\n  TimerCb cb_;\n  Dispatcher& dispatcher_;\n  // This has to be atomic for alarms which are handled out of thread, for\n  // example if the DispatcherImpl::post is called by two threads, they race to\n  // both set this to null.\n  std::atomic<const ScopeTrackedObject*> object_{};\n\n  // Latched \"envoy.reloadable_features.activate_timers_next_event_loop\" runtime feature. If true,\n  // timers scheduled with a 0 time delta are evaluated in the next iteration of the event loop\n  // after polling and activating new fd events.\n  const bool activate_timers_next_event_loop_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_cc_platform_dep\",\n    \"envoy_cc_posix_library\",\n    \"envoy_cc_win32_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"directory_lib\",\n    hdrs = [\"directory.h\"],\n    deps = envoy_cc_platform_dep(\"directory_iterator_impl_lib\"),\n)\n\nenvoy_cc_win32_library(\n    name = \"directory_iterator_impl_lib\",\n    srcs = [\"win32/directory_iterator_impl.cc\"],\n    hdrs = [\"win32/directory_iterator_impl.h\"],\n    strip_include_prefix = \"win32\",\n    deps = [\n        \"//include/envoy/filesystem:filesystem_interface\",\n    ],\n)\n\nenvoy_cc_posix_library(\n    name = \"directory_iterator_impl_lib\",\n    srcs = [\"posix/directory_iterator_impl.cc\"],\n    hdrs = [\"posix/directory_iterator_impl.h\"],\n    strip_include_prefix = \"posix\",\n    deps = [\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filesystem_lib\",\n    deps = envoy_cc_platform_dep(\"filesystem_impl_lib\"),\n)\n\nenvoy_cc_posix_library(\n    name = \"filesystem_impl_lib\",\n    srcs = [\"posix/filesystem_impl.cc\"],\n    hdrs = [\"posix/filesystem_impl.h\"],\n    strip_include_prefix = \"posix\",\n    deps = [\n        \":file_shared_lib\",\n    ],\n)\n\nenvoy_cc_win32_library(\n    name = \"filesystem_impl_lib\",\n    srcs = [\"win32/filesystem_impl.cc\"],\n    hdrs = [\"win32/filesystem_impl.h\"],\n    strip_include_prefix = \"win32\",\n    deps = [\n        \":file_shared_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"file_shared_lib\",\n    srcs = [\"file_shared_impl.cc\"],\n    hdrs = [\"file_shared_impl.h\"],\n    deps = [\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"watcher_lib\",\n    srcs = select({\n        \"//bazel:apple\": [\n            \"kqueue/watcher_impl.cc\",\n        ],\n        \"//bazel:windows_x86_64\": [\n            \"win32/watcher_impl.cc\",\n        ],\n        \"//conditions:default\": [\n            \"inotify/watcher_impl.cc\",\n        ],\n    }),\n    hdrs = select({\n        \"//bazel:apple\": [\n            \"kqueue/watcher_impl.h\",\n        ],\n        \"//bazel:windows_x86_64\": [\n            \"win32/watcher_impl.h\",\n        ],\n        \"//conditions:default\": [\n            \"inotify/watcher_impl.h\",\n        ],\n    }),\n    external_deps = [\n        \"event\",\n    ],\n    strip_include_prefix = select({\n        \"//bazel:apple\": \"kqueue\",\n        \"//bazel:windows_x86_64\": \"win32\",\n        \"//conditions:default\": \"inotify\",\n    }),\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n    ] + select({\n        \"//bazel:windows_x86_64\": [\n            \"//source/common/api:os_sys_calls_lib\",\n            \"//source/common/common:thread_lib\",\n        ],\n        \"//conditions:default\": [],\n    }),\n)\n"
  },
  {
    "path": "source/common/filesystem/directory.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/filesystem/filesystem.h\"\n\n#include \"common/filesystem/directory_iterator_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\n// This class does not do any validation of input data. Do not use with untrusted inputs.\nclass Directory {\npublic:\n  Directory(const std::string& directory_path) : directory_path_(directory_path) {}\n\n  DirectoryIteratorImpl begin() { return DirectoryIteratorImpl(directory_path_); }\n\n  DirectoryIteratorImpl end() { return DirectoryIteratorImpl(); }\n\nprivate:\n  const std::string directory_path_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/file_shared_impl.cc",
    "content": "#include \"common/filesystem/file_shared_impl.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nApi::IoError::IoErrorCode IoFileError::getErrorCode() const {\n  switch (errno_) {\n  case HANDLE_ERROR_PERM:\n    return IoErrorCode::Permission;\n  case HANDLE_ERROR_INVALID:\n    return IoErrorCode::BadFd;\n  default:\n    ENVOY_LOG_MISC(debug, \"Unknown error code {} details {}\", errno_, getErrorDetails());\n    return IoErrorCode::UnknownError;\n  }\n}\n\nstd::string IoFileError::getErrorDetails() const { return errorDetails(errno_); }\n\nbool FileSharedImpl::isOpen() const { return fd_ != INVALID_HANDLE; };\n\nstd::string FileSharedImpl::path() const { return path_; };\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/file_shared_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/filesystem/filesystem.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass IoFileError : public Api::IoError {\npublic:\n  explicit IoFileError(int sys_errno) : errno_(sys_errno) {}\n\n  ~IoFileError() override = default;\n\n  Api::IoError::IoErrorCode getErrorCode() const override;\n  std::string getErrorDetails() const override;\n\nprivate:\n  const int errno_;\n};\n\nusing IoFileErrorPtr = std::unique_ptr<IoFileError, Api::IoErrorDeleterType>;\n\ntemplate <typename T> Api::IoCallResult<T> resultFailure(T result, int sys_errno) {\n  return {result, IoFileErrorPtr(new IoFileError(sys_errno), [](Api::IoError* err) {\n            ASSERT(err != nullptr);\n            delete err;\n          })};\n}\n\ntemplate <typename T> Api::IoCallResult<T> resultSuccess(T result) {\n  return {result, IoFileErrorPtr(nullptr, [](Api::IoError*) { NOT_REACHED_GCOVR_EXCL_LINE; })};\n}\n\nclass FileSharedImpl : public File {\npublic:\n  FileSharedImpl(std::string path) : fd_(INVALID_HANDLE), path_(std::move(path)) {}\n\n  ~FileSharedImpl() override = default;\n\n  bool isOpen() const override;\n  std::string path() const override;\n\nprotected:\n  filesystem_os_id_t fd_;\n  const std::string path_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/inotify/watcher_impl.cc",
    "content": "#include <sys/inotify.h>\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/file_event.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/filesystem/watcher_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nWatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api)\n    : api_(api), inotify_fd_(inotify_init1(IN_NONBLOCK)),\n      inotify_event_(dispatcher.createFileEvent(\n          inotify_fd_,\n          [this](uint32_t events) -> void {\n            ASSERT(events == Event::FileReadyType::Read);\n            onInotifyEvent();\n          },\n          Event::FileTriggerType::Edge, Event::FileReadyType::Read)) {\n  RELEASE_ASSERT(inotify_fd_ >= 0,\n                 \"Consider increasing value of user.max_inotify_watches via sysctl\");\n}\n\nWatcherImpl::~WatcherImpl() { close(inotify_fd_); }\n\nvoid WatcherImpl::addWatch(absl::string_view path, uint32_t events, OnChangedCb callback) {\n  // Because of general inotify pain, we always watch the directory that the file lives in,\n  // and then synthetically raise per file events.\n  const PathSplitResult result = api_.fileSystem().splitPathFromFilename(path);\n\n  const uint32_t watch_mask = IN_MODIFY | IN_MOVED_TO;\n  int watch_fd = inotify_add_watch(inotify_fd_, std::string(result.directory_).c_str(), watch_mask);\n  if (watch_fd == -1) {\n    throw EnvoyException(\n        fmt::format(\"unable to add filesystem watch for file {}: {}\", path, errorDetails(errno)));\n  }\n\n  ENVOY_LOG(debug, \"added watch for directory: '{}' file: '{}' fd: {}\", result.directory_,\n            result.file_, watch_fd);\n  callback_map_[watch_fd].watches_.push_back({std::string(result.file_), events, callback});\n}\n\nvoid WatcherImpl::onInotifyEvent() {\n  while (true) {\n    uint8_t buffer[sizeof(inotify_event) + NAME_MAX + 1];\n    ssize_t rc = read(inotify_fd_, &buffer, sizeof(buffer));\n    if (rc == -1 && errno == EAGAIN) {\n      return;\n    }\n    RELEASE_ASSERT(rc >= 0, \"\");\n\n    const size_t event_count = rc;\n    size_t index = 0;\n    while (index < event_count) {\n      auto* file_event = reinterpret_cast<inotify_event*>(&buffer[index]);\n      ASSERT(callback_map_.count(file_event->wd) == 1);\n\n      std::string file;\n      if (file_event->len > 0) {\n        file.assign(file_event->name);\n      }\n\n      ENVOY_LOG(debug, \"notification: fd: {} mask: {:x} file: {}\", file_event->wd, file_event->mask,\n                file);\n\n      uint32_t events = 0;\n      if (file_event->mask & IN_MODIFY) {\n        events |= Events::Modified;\n      }\n      if (file_event->mask & IN_MOVED_TO) {\n        events |= Events::MovedTo;\n      }\n\n      for (FileWatch& watch : callback_map_[file_event->wd].watches_) {\n        if (watch.events_ & events) {\n          if (watch.file_ == file) {\n            ENVOY_LOG(debug, \"matched callback: file: {}\", file);\n            watch.cb_(events);\n          } else if (watch.file_.empty()) {\n            ENVOY_LOG(debug, \"matched callback: directory: {}\", file);\n            watch.cb_(events);\n          }\n        }\n      }\n\n      index += sizeof(inotify_event) + file_event->len;\n    }\n  }\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/inotify/watcher_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/filesystem/watcher.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\n/**\n * Implementation of Watcher that uses inotify. inotify is an awful API. In order to make this work\n * in a somewhat sane way we always watch the directory that owns the thing being watched, and then\n * filter for events that are relevant to the thing being watched.\n */\nclass WatcherImpl : public Watcher, Logger::Loggable<Logger::Id::file> {\npublic:\n  WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api);\n  ~WatcherImpl() override;\n\n  // Filesystem::Watcher\n  void addWatch(absl::string_view path, uint32_t events, OnChangedCb cb) override;\n\nprivate:\n  struct FileWatch {\n    std::string file_;\n    uint32_t events_;\n    OnChangedCb cb_;\n  };\n\n  struct DirectoryWatch {\n    std::list<FileWatch> watches_;\n  };\n\n  void onInotifyEvent();\n\n  Api::Api& api_;\n  int inotify_fd_;\n  Event::FileEventPtr inotify_event_;\n  absl::node_hash_map<int, DirectoryWatch> callback_map_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/kqueue/watcher_impl.cc",
    "content": "#include <sys/event.h>\n#include <sys/fcntl.h>\n#include <sys/types.h>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/file_event.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/filesystem/watcher_impl.h\"\n\n#include \"event2/event.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nWatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api)\n    : api_(api), queue_(kqueue()), kqueue_event_(dispatcher.createFileEvent(\n                                       queue_,\n                                       [this](uint32_t events) -> void {\n                                         if (events & Event::FileReadyType::Read) {\n                                           onKqueueEvent();\n                                         }\n                                       },\n                                       Event::FileTriggerType::Edge, Event::FileReadyType::Read)) {}\n\nWatcherImpl::~WatcherImpl() {\n  close(queue_);\n  watches_.clear();\n}\n\nvoid WatcherImpl::addWatch(absl::string_view path, uint32_t events, Watcher::OnChangedCb cb) {\n  FileWatchPtr watch = addWatch(path, events, cb, false);\n  if (watch == nullptr) {\n    throw EnvoyException(absl::StrCat(\"invalid watch path \", path));\n  }\n}\n\nWatcherImpl::FileWatchPtr WatcherImpl::addWatch(absl::string_view path, uint32_t events,\n                                                Watcher::OnChangedCb cb, bool path_must_exist) {\n  bool watching_dir = false;\n  std::string pathname(path);\n  int watch_fd = open(pathname.c_str(), O_SYMLINK);\n  if (watch_fd == -1) {\n    if (path_must_exist) {\n      return nullptr;\n    }\n\n    watch_fd =\n        open(std::string(api_.fileSystem().splitPathFromFilename(path).directory_).c_str(), 0);\n    if (watch_fd == -1) {\n      return nullptr;\n    }\n\n    watching_dir = true;\n  }\n\n  FileWatchPtr watch(new FileWatch());\n  watch->fd_ = watch_fd;\n  watch->file_ = pathname;\n  watch->events_ = events;\n  watch->callback_ = cb;\n  watch->watching_dir_ = watching_dir;\n\n  u_int flags = NOTE_DELETE | NOTE_RENAME | NOTE_WRITE;\n\n  struct kevent event;\n  EV_SET(&event, watch_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, flags, 0,\n         reinterpret_cast<void*>(watch_fd));\n\n  if (kevent(queue_, &event, 1, nullptr, 0, nullptr) == -1 || event.flags & EV_ERROR) {\n    throw EnvoyException(\n        fmt::format(\"unable to add filesystem watch for file {}: {}\", path, errorDetails(errno)));\n  }\n\n  ENVOY_LOG(debug, \"added watch for file: '{}' fd: {}\", path, watch_fd);\n\n  watches_[watch_fd] = watch;\n\n  return watch;\n}\n\nvoid WatcherImpl::removeWatch(FileWatchPtr& watch) {\n  // Removing the map entry closes the fd, which will automatically\n  // unregister the kqueue event.\n  int fd = watch->fd_;\n  watches_.erase(fd);\n}\n\nvoid WatcherImpl::onKqueueEvent() {\n  struct kevent event = {};\n  timespec nullts = {0, 0};\n\n  while (true) {\n    uint32_t events = 0;\n    int nevents = kevent(queue_, nullptr, 0, &event, 1, &nullts);\n    if (nevents < 1 || event.udata == nullptr) {\n      return;\n    }\n\n    int watch_fd = reinterpret_cast<std::intptr_t>(event.udata);\n\n    FileWatchPtr file = watches_[watch_fd];\n    ASSERT(file != nullptr);\n    ASSERT(watch_fd == file->fd_);\n\n    auto pathname = api_.fileSystem().splitPathFromFilename(file->file_);\n\n    if (file->watching_dir_) {\n      if (event.fflags & NOTE_DELETE) {\n        // directory was deleted\n        removeWatch(file);\n        return;\n      }\n\n      if (event.fflags & NOTE_WRITE) {\n        // directory was written -- check if the file we're actually watching appeared\n        FileWatchPtr new_file = addWatch(file->file_, file->events_, file->callback_, true);\n        if (new_file != nullptr) {\n          removeWatch(file);\n          file = new_file;\n\n          events |= Events::MovedTo;\n        }\n      }\n    } else if (pathname.file_.empty()) {\n      if (event.fflags & NOTE_WRITE) {\n        events |= Events::MovedTo;\n      }\n    } else {\n      // kqueue doesn't seem to work well with NOTE_RENAME and O_SYMLINK, so instead if we\n      // get a NOTE_DELETE on the symlink we check if there is another file with the same\n      // name we assume a NOTE_RENAME and re-attach another event to the new file.\n      if (event.fflags & NOTE_DELETE) {\n        removeWatch(file);\n\n        FileWatchPtr new_file = addWatch(file->file_, file->events_, file->callback_, true);\n        if (new_file == nullptr) {\n          return;\n        }\n\n        event.fflags |= NOTE_RENAME;\n        file = new_file;\n      }\n\n      if (event.fflags & NOTE_RENAME) {\n        events |= Events::MovedTo;\n      }\n      if (event.fflags & NOTE_WRITE) {\n        events |= Events::Modified;\n      }\n    }\n\n    ENVOY_LOG(debug, \"notification: fd: {} flags: {:x} file: {}\", file->fd_, event.fflags,\n              file->file_);\n\n    if (events & file->events_) {\n      ENVOY_LOG(debug, \"matched callback: file: {}\", file->file_);\n      file->callback_(events);\n    }\n  }\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/kqueue/watcher_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/filesystem/watcher.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\n/**\n * Implementation of Watcher that uses kqueue. If the file being watched doesn't exist, we watch\n * the directory, and then try to add a file watch each time there's a write event to the\n * directory.\n */\nclass WatcherImpl : public Watcher, Logger::Loggable<Logger::Id::file> {\npublic:\n  WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api);\n  ~WatcherImpl();\n\n  // Filesystem::Watcher\n  void addWatch(absl::string_view path, uint32_t events, OnChangedCb cb) override;\n\nprivate:\n  struct FileWatch : LinkedObject<FileWatch> {\n    ~FileWatch() { close(fd_); }\n\n    int fd_;\n    uint32_t events_;\n    std::string file_;\n    OnChangedCb callback_;\n    bool watching_dir_;\n  };\n\n  using FileWatchPtr = std::shared_ptr<FileWatch>;\n\n  void onKqueueEvent();\n  FileWatchPtr addWatch(absl::string_view path, uint32_t events, Watcher::OnChangedCb cb,\n                        bool pathMustExist);\n  void removeWatch(FileWatchPtr& watch);\n\n  Api::Api& api_;\n  int queue_;\n  absl::node_hash_map<int, FileWatchPtr> watches_;\n  Event::FileEventPtr kqueue_event_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/posix/directory_iterator_impl.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/filesystem/directory_iterator_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nDirectoryIteratorImpl::DirectoryIteratorImpl(const std::string& directory_path)\n    : directory_path_(directory_path), os_sys_calls_(Api::OsSysCallsSingleton::get()) {\n  openDirectory();\n  nextEntry();\n}\n\nDirectoryIteratorImpl::~DirectoryIteratorImpl() {\n  if (dir_ != nullptr) {\n    ::closedir(dir_);\n  }\n}\n\nDirectoryIteratorImpl& DirectoryIteratorImpl::operator++() {\n  nextEntry();\n  return *this;\n}\n\nvoid DirectoryIteratorImpl::openDirectory() {\n  DIR* temp_dir = ::opendir(directory_path_.c_str());\n  dir_ = temp_dir;\n  if (!dir_) {\n    throw EnvoyException(\n        fmt::format(\"unable to open directory {}: {}\", directory_path_, errorDetails(errno)));\n  }\n}\n\nvoid DirectoryIteratorImpl::nextEntry() {\n  errno = 0;\n  dirent* entry = ::readdir(dir_);\n  if (entry == nullptr && errno != 0) {\n    throw EnvoyException(\n        fmt::format(\"unable to iterate directory {}: {}\", directory_path_, errorDetails(errno)));\n  }\n\n  if (entry == nullptr) {\n    entry_ = {\"\", FileType::Other};\n  } else {\n    const std::string current_path(entry->d_name);\n    const std::string full_path(directory_path_ + \"/\" + current_path);\n    entry_ = {current_path, fileType(full_path)};\n  }\n}\n\nFileType DirectoryIteratorImpl::fileType(const std::string& full_path) const {\n  struct stat stat_buf;\n\n  const Api::SysCallIntResult result = os_sys_calls_.stat(full_path.c_str(), &stat_buf);\n  if (result.rc_ != 0) {\n    if (errno == ENOENT) {\n      // Special case. This directory entity is likely to be a symlink,\n      // but the reference is broken as the target could not be stat()'ed.\n      // If we confirm this with an lstat, treat this file entity as\n      // a regular file, which may be unlink()'ed.\n      if (::lstat(full_path.c_str(), &stat_buf) == 0 && S_ISLNK(stat_buf.st_mode)) {\n        return FileType::Regular;\n      }\n    }\n    throw EnvoyException(fmt::format(\"unable to stat file: '{}' ({})\", full_path, errno));\n  }\n\n  if (S_ISDIR(stat_buf.st_mode)) {\n    return FileType::Directory;\n  } else if (S_ISREG(stat_buf.st_mode)) {\n    return FileType::Regular;\n  }\n\n  return FileType::Other;\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/posix/directory_iterator_impl.h",
    "content": "#pragma once\n\n#include <dirent.h>\n\n#include \"envoy/filesystem/filesystem.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass DirectoryIteratorImpl : public DirectoryIterator {\npublic:\n  DirectoryIteratorImpl(const std::string& directory_path);\n  DirectoryIteratorImpl() : directory_path_(\"\"), os_sys_calls_(Api::OsSysCallsSingleton::get()) {}\n\n  ~DirectoryIteratorImpl() override;\n\n  DirectoryIteratorImpl& operator++() override;\n\n  // We don't want this iterator to be copied. If the copy gets destructed,\n  // then it will close its copy of the DIR* pointer, which will cause the\n  // original's to be invalid. While we could implement a deep copy constructor to\n  // work around this, it is not needed the moment.\n  DirectoryIteratorImpl(const DirectoryIteratorImpl&) = delete;\n  DirectoryIteratorImpl(DirectoryIteratorImpl&&) = default;\n\nprivate:\n  void nextEntry();\n  void openDirectory();\n  FileType fileType(const std::string& name) const;\n\n  std::string directory_path_;\n  DIR* dir_{nullptr};\n  Api::OsSysCallsImpl& os_sys_calls_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/posix/filesystem_impl.cc",
    "content": "#include <dirent.h>\n#include <fcntl.h>\n#include <sys/stat.h>\n#include <unistd.h>\n\n#include <cstdlib>\n#include <fstream>\n#include <iostream>\n#include <memory>\n#include <sstream>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nFileImplPosix::~FileImplPosix() {\n  if (isOpen()) {\n    const Api::IoCallBoolResult result = close();\n    ASSERT(result.rc_);\n  }\n}\n\nApi::IoCallBoolResult FileImplPosix::open(FlagSet in) {\n  if (isOpen()) {\n    return resultSuccess(true);\n  }\n\n  const auto flags_and_mode = translateFlag(in);\n  fd_ = ::open(path_.c_str(), flags_and_mode.flags_, flags_and_mode.mode_);\n  return fd_ != -1 ? resultSuccess(true) : resultFailure(false, errno);\n}\n\nApi::IoCallSizeResult FileImplPosix::write(absl::string_view buffer) {\n  const ssize_t rc = ::write(fd_, buffer.data(), buffer.size());\n  return rc != -1 ? resultSuccess(rc) : resultFailure(rc, errno);\n};\n\nApi::IoCallBoolResult FileImplPosix::close() {\n  ASSERT(isOpen());\n  int rc = ::close(fd_);\n  fd_ = -1;\n  return (rc != -1) ? resultSuccess(true) : resultFailure(false, errno);\n}\n\nFileImplPosix::FlagsAndMode FileImplPosix::translateFlag(FlagSet in) {\n  int out = 0;\n  mode_t mode = 0;\n  if (in.test(File::Operation::Create)) {\n    out |= O_CREAT;\n    mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;\n  }\n\n  if (in.test(File::Operation::Append)) {\n    out |= O_APPEND;\n  }\n\n  if (in.test(File::Operation::Read) && in.test(File::Operation::Write)) {\n    out |= O_RDWR;\n  } else if (in.test(File::Operation::Read)) {\n    out |= O_RDONLY;\n  } else if (in.test(File::Operation::Write)) {\n    out |= O_WRONLY;\n  }\n\n  return {out, mode};\n}\n\nFilePtr InstanceImplPosix::createFile(const std::string& path) {\n  return std::make_unique<FileImplPosix>(path);\n}\n\nbool InstanceImplPosix::fileExists(const std::string& path) {\n  std::ifstream input_file(path);\n  return input_file.is_open();\n}\n\nbool InstanceImplPosix::directoryExists(const std::string& path) {\n  DIR* const dir = ::opendir(path.c_str());\n  const bool dir_exists = nullptr != dir;\n  if (dir_exists) {\n    ::closedir(dir);\n  }\n\n  return dir_exists;\n}\n\nssize_t InstanceImplPosix::fileSize(const std::string& path) {\n  struct stat info;\n  if (::stat(path.c_str(), &info) != 0) {\n    return -1;\n  }\n  return info.st_size;\n}\n\nstd::string InstanceImplPosix::fileReadToEnd(const std::string& path) {\n  if (illegalPath(path)) {\n    throw EnvoyException(absl::StrCat(\"Invalid path: \", path));\n  }\n\n  std::ifstream file(path);\n  if (file.fail()) {\n    throw EnvoyException(absl::StrCat(\"unable to read file: \", path));\n  }\n\n  std::stringstream file_string;\n  file_string << file.rdbuf();\n\n  return file_string.str();\n}\n\nPathSplitResult InstanceImplPosix::splitPathFromFilename(absl::string_view path) {\n  size_t last_slash = path.rfind('/');\n  if (last_slash == std::string::npos) {\n    throw EnvoyException(fmt::format(\"invalid file path {}\", path));\n  }\n  absl::string_view name = path.substr(last_slash + 1);\n  // truncate all trailing slashes, except root slash\n  if (last_slash == 0) {\n    ++last_slash;\n  }\n  return {path.substr(0, last_slash), name};\n}\n\nbool InstanceImplPosix::illegalPath(const std::string& path) {\n  // Special case, allow /dev/fd/* access here so that config can be passed in a\n  // file descriptor from a bootstrap script via exec. The reason we do this\n  // _before_ canonicalizing the path is that different unix flavors implement\n  // /dev/fd/* differently, for example on linux they are symlinks to /dev/pts/*\n  // which are symlinks to /proc/self/fds/. On BSD (and darwin) they are not\n  // symlinks at all. To avoid lots of platform, specifics, we allowlist\n  // /dev/fd/* _before_ resolving the canonical path.\n  if (absl::StartsWith(path, \"/dev/fd/\")) {\n    return false;\n  }\n\n  const Api::SysCallStringResult canonical_path = canonicalPath(path);\n  if (canonical_path.rc_.empty()) {\n    ENVOY_LOG_MISC(debug, \"Unable to determine canonical path for {}: {}\", path,\n                   errorDetails(canonical_path.errno_));\n    return true;\n  }\n\n  // Platform specific path sanity; we provide a convenience to avoid Envoy\n  // instances poking in bad places. We may have to consider conditioning on\n  // platform in the future, growing these or relaxing some constraints (e.g.\n  // there are valid reasons to go via /proc for file paths).\n  // TODO(htuch): Optimize this as a hash lookup if we grow any further.\n  if (absl::StartsWith(canonical_path.rc_, \"/dev\") ||\n      absl::StartsWith(canonical_path.rc_, \"/sys\") ||\n      absl::StartsWith(canonical_path.rc_, \"/proc\")) {\n    return true;\n  }\n  return false;\n}\n\nApi::SysCallStringResult InstanceImplPosix::canonicalPath(const std::string& path) {\n  char* resolved_path = ::realpath(path.c_str(), nullptr);\n  if (resolved_path == nullptr) {\n    return {std::string(), errno};\n  }\n  std::string resolved_path_string{resolved_path};\n  ::free(resolved_path);\n  return {resolved_path_string, 0};\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/posix/filesystem_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/api/os_sys_calls.h\"\n\n#include \"common/filesystem/file_shared_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass FileImplPosix : public FileSharedImpl {\npublic:\n  FileImplPosix(const std::string& path) : FileSharedImpl(path) {}\n  ~FileImplPosix() override;\n\nprotected:\n  struct FlagsAndMode {\n    int flags_ = 0;\n    mode_t mode_ = 0;\n  };\n\n  Api::IoCallBoolResult open(FlagSet flag) override;\n  Api::IoCallSizeResult write(absl::string_view buffer) override;\n  Api::IoCallBoolResult close() override;\n\nprivate:\n  FlagsAndMode translateFlag(FlagSet in);\n  friend class FileSystemImplTest;\n};\n\nclass InstanceImplPosix : public Instance {\npublic:\n  // Filesystem::Instance\n  FilePtr createFile(const std::string& path) override;\n  bool fileExists(const std::string& path) override;\n  bool directoryExists(const std::string& path) override;\n  ssize_t fileSize(const std::string& path) override;\n  std::string fileReadToEnd(const std::string& path) override;\n  PathSplitResult splitPathFromFilename(absl::string_view path) override;\n  bool illegalPath(const std::string& path) override;\n\nprivate:\n  Api::SysCallStringResult canonicalPath(const std::string& path);\n  friend class FileSystemImplTest;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/win32/directory_iterator_impl.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/filesystem/directory_iterator_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nDirectoryIteratorImpl::DirectoryIteratorImpl(const std::string& directory_path)\n    : DirectoryIterator(), find_handle_(INVALID_HANDLE_VALUE) {\n  WIN32_FIND_DATA find_data;\n  const std::string glob = directory_path + \"\\\\*\";\n  find_handle_ = ::FindFirstFile(glob.c_str(), &find_data);\n  if (find_handle_ == INVALID_HANDLE_VALUE) {\n    throw EnvoyException(\n        fmt::format(\"unable to open directory {}: {}\", directory_path, ::GetLastError()));\n  }\n\n  entry_ = {std::string(find_data.cFileName), fileType(find_data)};\n}\n\nDirectoryIteratorImpl::~DirectoryIteratorImpl() {\n  if (find_handle_ != INVALID_HANDLE_VALUE) {\n    ::FindClose(find_handle_);\n  }\n}\n\nDirectoryIteratorImpl& DirectoryIteratorImpl::operator++() {\n  WIN32_FIND_DATA find_data;\n  const BOOL ret = ::FindNextFile(find_handle_, &find_data);\n  const DWORD err = ::GetLastError();\n  if (ret == 0 && err != ERROR_NO_MORE_FILES) {\n    throw EnvoyException(fmt::format(\"unable to iterate directory: {}\", err));\n  }\n\n  if (ret == 0) {\n    entry_ = {\"\", FileType::Other};\n  } else {\n    entry_ = {std::string(find_data.cFileName), fileType(find_data)};\n  }\n\n  return *this;\n}\n\nFileType DirectoryIteratorImpl::fileType(const WIN32_FIND_DATA& find_data) const {\n  if ((find_data.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) &&\n      !(find_data.dwReserved0 & IO_REPARSE_TAG_SYMLINK)) {\n    // The file is reparse point and not a symlink, so it can't be\n    // a regular file or a directory\n    return FileType::Other;\n  }\n\n  if (find_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {\n    return FileType::Directory;\n  }\n\n  return FileType::Regular;\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/win32/directory_iterator_impl.h",
    "content": "#pragma once\n\n#include \"envoy/filesystem/filesystem.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass DirectoryIteratorImpl : public DirectoryIterator {\npublic:\n  DirectoryIteratorImpl(const std::string& directory_path);\n  DirectoryIteratorImpl() : DirectoryIterator(), find_handle_(INVALID_HANDLE_VALUE) {}\n  ~DirectoryIteratorImpl();\n\n  DirectoryIteratorImpl& operator++() override;\n\n  // We don't want this iterator to be copied. If the copy gets destructed,\n  // then it will close its copy of the directory HANDLE, which will cause the\n  // original's to be invalid. While we could implement a deep copy constructor to\n  // work around this, it is not needed the moment.\n  DirectoryIteratorImpl(const DirectoryIteratorImpl&) = delete;\n  DirectoryIteratorImpl(DirectoryIteratorImpl&&) = default;\n  DirectoryIteratorImpl& operator=(DirectoryIteratorImpl&&) = default;\n\nprivate:\n  FileType fileType(const WIN32_FIND_DATA& find_data) const;\n\n  HANDLE find_handle_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/win32/filesystem_impl.cc",
    "content": "#include <fcntl.h>\n\n#include <fstream>\n#include <iostream>\n#include <sstream>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nFileImplWin32::~FileImplWin32() {\n  if (isOpen()) {\n    const Api::IoCallBoolResult result = close();\n    ASSERT(result.rc_);\n  }\n}\n\nApi::IoCallBoolResult FileImplWin32::open(FlagSet in) {\n  if (isOpen()) {\n    return resultSuccess(true);\n  }\n\n  auto flags = translateFlag(in);\n  fd_ = CreateFileA(path_.c_str(), flags.access_, FILE_SHARE_READ | FILE_SHARE_WRITE, 0,\n                    flags.creation_, 0, NULL);\n  if (fd_ == INVALID_HANDLE) {\n    return resultFailure(false, ::GetLastError());\n  }\n  return resultSuccess(true);\n}\n\nApi::IoCallSizeResult FileImplWin32::write(absl::string_view buffer) {\n  DWORD bytes_written;\n  BOOL result = WriteFile(fd_, buffer.data(), buffer.length(), &bytes_written, NULL);\n  if (result == 0) {\n    return resultFailure<ssize_t>(-1, ::GetLastError());\n  }\n  return resultSuccess<ssize_t>(bytes_written);\n};\n\nApi::IoCallBoolResult FileImplWin32::close() {\n  ASSERT(isOpen());\n\n  BOOL result = CloseHandle(fd_);\n  fd_ = INVALID_HANDLE;\n  if (result == 0) {\n    return resultFailure(false, ::GetLastError());\n  }\n  return resultSuccess(true);\n}\n\nFileImplWin32::FlagsAndMode FileImplWin32::translateFlag(FlagSet in) {\n  DWORD access = 0;\n  DWORD creation = OPEN_EXISTING;\n\n  if (in.test(File::Operation::Create)) {\n    creation = OPEN_ALWAYS;\n  }\n\n  if (in.test(File::Operation::Write)) {\n    access = GENERIC_WRITE;\n  }\n\n  // Order of tests matter here. There reason for that\n  // is that `FILE_APPEND_DATA` should not be used together\n  // with `GENERIC_WRITE`. If both of them are used the file\n  // is not opened in append mode.\n  if (in.test(File::Operation::Append)) {\n    access = FILE_APPEND_DATA;\n  }\n\n  if (in.test(File::Operation::Read)) {\n    access |= GENERIC_READ;\n  }\n\n  return {access, creation};\n}\n\nFilePtr InstanceImplWin32::createFile(const std::string& path) {\n  return std::make_unique<FileImplWin32>(path);\n}\n\nbool InstanceImplWin32::fileExists(const std::string& path) {\n  const DWORD attributes = ::GetFileAttributes(path.c_str());\n  return attributes != INVALID_FILE_ATTRIBUTES;\n}\n\nbool InstanceImplWin32::directoryExists(const std::string& path) {\n  const DWORD attributes = ::GetFileAttributes(path.c_str());\n  if (attributes == INVALID_FILE_ATTRIBUTES) {\n    return false;\n  }\n  return attributes & FILE_ATTRIBUTE_DIRECTORY;\n}\n\nssize_t InstanceImplWin32::fileSize(const std::string& path) {\n  auto fd = CreateFileA(path.c_str(), GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, 0, NULL);\n  if (fd == INVALID_HANDLE) {\n    return -1;\n  }\n  ssize_t result = 0;\n  LARGE_INTEGER lFileSize;\n  BOOL bGetSize = GetFileSizeEx(fd, &lFileSize);\n  CloseHandle(fd);\n  if (!bGetSize) {\n    return -1;\n  }\n  result += lFileSize.QuadPart;\n  return result;\n}\n\nstd::string InstanceImplWin32::fileReadToEnd(const std::string& path) {\n  if (illegalPath(path)) {\n    throw EnvoyException(absl::StrCat(\"Invalid path: \", path));\n  }\n\n  std::ios::sync_with_stdio(false);\n\n  // On Windows, we need to explicitly set the file mode as binary. Otherwise,\n  // 0x1a will be treated as EOF\n  std::ifstream file(path, std::ios_base::binary);\n  if (file.fail()) {\n    auto last_error = ::GetLastError();\n    if (last_error == ERROR_FILE_NOT_FOUND) {\n      throw EnvoyException(absl::StrCat(\"Invalid path: \", path));\n    }\n\n    throw EnvoyException(absl::StrCat(\"unable to read file: \", path));\n  }\n\n  std::stringstream file_string;\n  file_string << file.rdbuf();\n\n  return file_string.str();\n}\n\nPathSplitResult InstanceImplWin32::splitPathFromFilename(absl::string_view path) {\n  size_t last_slash = path.find_last_of(\":/\\\\\");\n  if (last_slash == std::string::npos) {\n    throw EnvoyException(fmt::format(\"invalid file path {}\", path));\n  }\n  absl::string_view name = path.substr(last_slash + 1);\n  // Truncate all trailing slashes, but retain the entire\n  // single '/', 'd:' drive, and 'd:\\' drive root paths\n  if (last_slash == 0 || path[last_slash] == ':' || path[last_slash - 1] == ':') {\n    ++last_slash;\n  }\n  return {path.substr(0, last_slash), name};\n}\n\n// clang-format off\n//\n// Filename warnings and caveats are documented at;\n// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file\n// Originally prepared by wrowe@rowe-clan.net for the Apache APR project, see;\n// http://svn.apache.org/viewvc/apr/apr/trunk/file_io/win32/filesys.c?view=log&pathrev=62242\n//\n// Note special delimiter cases for path prefixes;\n//     \"D:\\\" for local drive volumes\n//     \"\\server\\share\\\" for network volumes\n//     \"\\\\?\\\" to pass path directly to the underlying driver\n//          (invalidates the '/' separator and bypasses \".\", \"..\" handling)\n//     \"\\\\?\\D:\\\" for local drive volumes\n//     \"\\\\?\\UNC\\server\\share\\\" for network volumes (literal \"UNC\")\n//     \"\\\\.\\\" for device namespace (e.g. volume names, character devices)\n// File path components must not end in whitespace or '.' (except literal \".\" and \"..\")\n// Allow ':' for drive letter only (attempt to name alternate file stream)\n// Allow '/', '\\\\' as path delimiters only\n// Valid file name character excluding delimiters;\n\nstatic const char filename_char_table[] = {\n    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n //    !  \"  #  $  %  &  '  (  )  *  +  ,  -  .  /  0  1  2  3  4  5  6  7  8  9  :  ;  <  =  >  ?\n    1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0,\n // @  A  B  C  D  E  F  G  H  I  J  K  L  M  N  O  P  Q  R  S  T  U  V  W  X  Y  Z  [  \\  ]  ^  _\n    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,\n // `  a  b  c  d  e  f  g  h  i  j  k  l  m  n  o  p  q  r  s  t  u  v  w  x  y  z  {  |  }  ~\n    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0,\n // High bit codes are accepted (subject to code page translation rules)\n    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1\n};\n\n// The \"COM#\" and \"LPT#\" names below have boolean flag requiring a [1-9] suffix.\n// This list can be avoided by observing dwFileAttributes & FILE_ATTRIBUTE_DEVICE\n// within WIN32_FILE_ATTRIBUTE_DATA or WIN32_FIND_DATA results.\nabsl::node_hash_map<std::string, bool> pathelt_table = {\n    {\"CON\", false}, {\"NUL\", false}, {\"AUX\", false}, {\"PRN\", false}, {\"COM\", true}, {\"LPT\", true}\n};\n\n// clang-format on\n\nbool InstanceImplWin32::illegalPath(const std::string& path) {\n  std::string pathbuffer = path;\n  absl::string_view pathname = pathbuffer;\n\n  // Examine and skip common leading path patterns of \\\\?\\ and\n  // reject paths with any other leading \\\\.\\ device or an\n  // unrecognized \\\\*\\ prefix\n  if ((pathname.size() >= 4) && (pathname[0] == '/' || pathname[0] == '\\\\') &&\n      (pathname[1] == '/' || pathname[1] == '\\\\') && (pathname[3] == '/' || pathname[3] == '\\\\')) {\n    if (pathname[2] == '?') {\n      pathname = pathname.substr(4);\n    } else {\n      return true;\n    }\n  }\n  // Examine and accept D: drive prefix (last opportunity to\n  // accept a colon in the file path) and skip the D: component\n  // This may result in a relative-to working directory or absolute path on D:\n  if (pathname.size() >= 2 && std::isalpha(pathname[0]) && pathname[1] == ':') {\n    pathname = pathname.substr(2);\n  }\n  std::string ucase_prefix(\"   \");\n  std::vector<std::string> pathelts = absl::StrSplit(pathname, absl::ByAnyChar(\"/\\\\\"));\n  for (const std::string& elt : pathelts) {\n    // Accept element of empty, \".\", \"..\" as special cases,\n    if (elt.size() == 0 ||\n        (elt[0] == '.' && (elt.size() == 1 || (elt[1] == '.' && (elt.size() == 2))))) {\n      continue;\n    }\n    // Upper-case path segment prefix to compare to character device names\n    if (elt.size() >= 3) {\n      int i;\n      for (i = 0; i < 3; ++i) {\n        ucase_prefix[i] = ::toupper(elt[i]);\n      }\n      auto found_elt = pathelt_table.find(ucase_prefix);\n\n      if (found_elt != pathelt_table.end()) {\n        // If a non-zero digit is significant, but not present, treat as not-found\n        if (!found_elt->second || (elt.size() >= 4 && ::isdigit(elt[i]) && elt[i++] != '0')) {\n          if (elt.size() == i) {\n            return true;\n          }\n          // The literal device name is invalid for both an exact match,\n          // and also when followed by (whitespace plus) any .ext suffix\n          for (auto ch = elt.begin() + i; ch != elt.end(); ++ch) {\n            if (*ch == '.') {\n              return true;\n            }\n            if (*ch != ' ') {\n              break;\n            }\n          }\n        }\n      }\n    }\n\n    for (const char& ch : elt) {\n      if (!(filename_char_table[ch] & 1)) {\n        return true;\n      }\n    }\n    const char& lastch = elt[elt.size() - 1];\n    if (lastch == ' ' || lastch == '.') {\n      return true;\n    }\n  }\n\n  return false;\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/win32/filesystem_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"common/filesystem/file_shared_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass FileImplWin32 : public FileSharedImpl {\npublic:\n  FileImplWin32(const std::string& path) : FileSharedImpl(path) {}\n  ~FileImplWin32();\n\nprotected:\n  Api::IoCallBoolResult open(FlagSet flag) override;\n  Api::IoCallSizeResult write(absl::string_view buffer) override;\n  Api::IoCallBoolResult close() override;\n\nprivate:\n  struct FlagsAndMode {\n    DWORD access_ = 0;\n    DWORD creation_ = 0;\n  };\n\n  FlagsAndMode translateFlag(FlagSet in);\n  friend class FileSystemImplTest;\n};\n\nclass InstanceImplWin32 : public Instance {\npublic:\n  // Filesystem::Instance\n  FilePtr createFile(const std::string& path) override;\n  bool fileExists(const std::string& path) override;\n  bool directoryExists(const std::string& path) override;\n  ssize_t fileSize(const std::string& path) override;\n  std::string fileReadToEnd(const std::string& path) override;\n  PathSplitResult splitPathFromFilename(absl::string_view path) override;\n  bool illegalPath(const std::string& path) override;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/win32/watcher_impl.cc",
    "content": "#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/thread_impl.h\"\n#include \"common/filesystem/watcher_impl.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nWatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api)\n    : api_(api), os_sys_calls_(Api::OsSysCallsSingleton::get()) {\n  os_fd_t socks[2];\n  Api::SysCallIntResult result = os_sys_calls_.socketpair(AF_INET, SOCK_STREAM, IPPROTO_TCP, socks);\n  ASSERT(result.rc_ == 0);\n\n  event_read_ = socks[0];\n  event_write_ = socks[1];\n  result = os_sys_calls_.setsocketblocking(event_read_, false);\n  ASSERT(result.rc_ == 0);\n  result = os_sys_calls_.setsocketblocking(event_write_, false);\n  ASSERT(result.rc_ == 0);\n\n  directory_event_ = dispatcher.createFileEvent(\n      event_read_,\n      [this](uint32_t events) -> void {\n        ASSERT(events == Event::FileReadyType::Read);\n        onDirectoryEvent();\n      },\n      Event::FileTriggerType::Level, Event::FileReadyType::Read);\n\n  thread_exit_event_ = ::CreateEvent(nullptr, false, false, nullptr);\n  ASSERT(thread_exit_event_ != NULL);\n  keep_watching_ = true;\n\n  // See comments in WorkerImpl::start for the naming convention.\n  Thread::Options options{absl::StrCat(\"wat:\", dispatcher.name())};\n  watch_thread_ = thread_factory_.createThread([this]() -> void { watchLoop(); }, options);\n}\n\nWatcherImpl::~WatcherImpl() {\n  const BOOL rc = ::SetEvent(thread_exit_event_);\n  ASSERT(rc);\n\n  watch_thread_->join();\n\n  for (auto& entry : callback_map_) {\n    ::CloseHandle(entry.second->dir_handle_);\n    ::CloseHandle(entry.second->overlapped_.hEvent);\n  }\n  ::CloseHandle(thread_exit_event_);\n  ::closesocket(event_read_);\n  ::closesocket(event_write_);\n}\n\nvoid WatcherImpl::addWatch(absl::string_view path, uint32_t events, OnChangedCb cb) {\n  if (path == Platform::null_device_path) {\n    return;\n  }\n\n  const PathSplitResult result = api_.fileSystem().splitPathFromFilename(path);\n  // ReadDirectoryChangesW only has a Unicode version, so we need\n  // to use wide strings here\n  const std::wstring directory = wstring_converter_.from_bytes(std::string(result.directory_));\n  const std::wstring file = wstring_converter_.from_bytes(std::string(result.file_));\n\n  const HANDLE dir_handle = CreateFileW(\n      directory.c_str(), GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n      nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED, NULL);\n  if (dir_handle == INVALID_HANDLE_VALUE) {\n    throw EnvoyException(\n        fmt::format(\"unable to open directory {}: {}\", result.directory_, GetLastError()));\n  }\n  std::string fii_key(sizeof(FILE_ID_INFO), '\\0');\n  RELEASE_ASSERT(\n      GetFileInformationByHandleEx(dir_handle, FileIdInfo, &fii_key[0], sizeof(FILE_ID_INFO)),\n      fmt::format(\"unable to identify directory {}: {}\", result.directory_, GetLastError()));\n  if (callback_map_.find(fii_key) != callback_map_.end()) {\n    CloseHandle(dir_handle);\n  } else {\n    callback_map_[fii_key] = std::make_unique<DirectoryWatch>();\n    callback_map_[fii_key]->dir_handle_ = dir_handle;\n    callback_map_[fii_key]->buffer_.resize(16384);\n    callback_map_[fii_key]->watcher_ = this;\n\n    // According to Microsoft docs, \"the hEvent member of the OVERLAPPED structure is not used by\n    // the system, so you can use it yourself\". We will use it for synchronization of the completion\n    // routines\n    HANDLE event_handle = ::CreateEvent(nullptr, false, false, nullptr);\n    RELEASE_ASSERT(event_handle, fmt::format(\"CreateEvent failed: {}\", GetLastError()));\n\n    callback_map_[fii_key]->overlapped_.hEvent = event_handle;\n    dir_watch_complete_events_.push_back(event_handle);\n\n    // send the first ReadDirectoryChangesW request to our watch thread. This ensures that all of\n    // the io completion routines will run in that thread\n    DWORD rc = ::QueueUserAPC(&issueFirstRead,\n                              static_cast<Thread::ThreadImplWin32*>(watch_thread_.get())->handle(),\n                              reinterpret_cast<ULONG_PTR>(callback_map_[fii_key].get()));\n    RELEASE_ASSERT(rc, fmt::format(\"QueueUserAPC failed: {}\", GetLastError()));\n\n    // wait for issueFirstRead to confirm that it has issued a call to ReadDirectoryChangesW\n    rc = ::WaitForSingleObject(event_handle, INFINITE);\n    RELEASE_ASSERT(rc == WAIT_OBJECT_0,\n                   fmt::format(\"WaitForSingleObject failed: {}\", GetLastError()));\n\n    ENVOY_LOG(debug, \"created watch for directory: '{}' handle: {}\", result.directory_, dir_handle);\n  }\n\n  callback_map_[fii_key]->watches_.push_back({file, events, cb});\n  ENVOY_LOG(debug, \"added watch for file '{}' in directory '{}'\", result.file_, result.directory_);\n}\n\nvoid WatcherImpl::onDirectoryEvent() {\n  while (true) {\n    char data = 0;\n    const int rc = ::recv(event_read_, &data, sizeof(data), 0);\n    const int err = ::WSAGetLastError();\n    if (rc == SOCKET_ERROR && err == WSAEWOULDBLOCK) {\n      return;\n    }\n    RELEASE_ASSERT(rc != SOCKET_ERROR, fmt::format(\"recv errored: {}\", err));\n\n    if (data == 0) {\n      // no callbacks to run; this is just a notification that a DirectoryWatch exited\n      return;\n    }\n\n    CbClosure callback;\n    bool exists = active_callbacks_.try_pop(callback);\n    RELEASE_ASSERT(exists, \"expected callback, found none\");\n    ENVOY_LOG(debug, \"executing callback\");\n    callback();\n  }\n}\n\nvoid WatcherImpl::issueFirstRead(ULONG_PTR param) {\n  DirectoryWatch* dir_watch = reinterpret_cast<DirectoryWatch*>(param);\n  // Since the first member in each DirectoryWatch is an OVERLAPPED, we can pass\n  // a pointer to DirectoryWatch as the OVERLAPPED for ReadDirectoryChangesW. Then, the\n  // completion routine can use its OVERLAPPED* parameter to access the DirectoryWatch see:\n  // https://docs.microsoft.com/en-us/windows/desktop/ipc/named-pipe-server-using-completion-routines\n  ReadDirectoryChangesW(dir_watch->dir_handle_, &(dir_watch->buffer_[0]),\n                        dir_watch->buffer_.capacity(), false,\n                        FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE, nullptr,\n                        reinterpret_cast<LPOVERLAPPED>(param), &directoryChangeCompletion);\n\n  const BOOL rc = ::SetEvent(dir_watch->overlapped_.hEvent);\n  ASSERT(rc);\n}\n\nvoid WatcherImpl::endDirectoryWatch(os_fd_t sock, HANDLE event_handle) {\n  const BOOL rc = ::SetEvent(event_handle);\n  ASSERT(rc);\n  // let libevent know that a ReadDirectoryChangesW call returned\n  const char data = 0;\n  const int bytes_written = ::send(sock, &data, sizeof(data), 0);\n  RELEASE_ASSERT(bytes_written == sizeof(data),\n                 fmt::format(\"failed to write 1 byte: {}\", ::WSAGetLastError()));\n}\n\nvoid WatcherImpl::directoryChangeCompletion(DWORD err, DWORD num_bytes, LPOVERLAPPED overlapped) {\n  DirectoryWatch* dir_watch = reinterpret_cast<DirectoryWatch*>(overlapped);\n  WatcherImpl* watcher = dir_watch->watcher_;\n  PFILE_NOTIFY_INFORMATION fni = reinterpret_cast<PFILE_NOTIFY_INFORMATION>(&dir_watch->buffer_[0]);\n\n  if (err == ERROR_OPERATION_ABORTED) {\n    ENVOY_LOG(debug, \"ReadDirectoryChangesW aborted, exiting\");\n    endDirectoryWatch(watcher->event_write_, dir_watch->overlapped_.hEvent);\n    return;\n  } else if (err != 0) {\n    ENVOY_LOG(error, \"ReadDirectoryChangesW errored: {}, exiting\", err);\n    endDirectoryWatch(watcher->event_write_, dir_watch->overlapped_.hEvent);\n    return;\n  } else if (num_bytes < sizeof(_FILE_NOTIFY_INFORMATION)) {\n    ENVOY_LOG(error, \"ReadDirectoryChangesW returned {} bytes, expected {}, exiting\", num_bytes,\n              sizeof(_FILE_NOTIFY_INFORMATION));\n    endDirectoryWatch(watcher->event_write_, dir_watch->overlapped_.hEvent);\n    return;\n  }\n\n  DWORD next_entry = 0;\n  do {\n    fni = reinterpret_cast<PFILE_NOTIFY_INFORMATION>(reinterpret_cast<char*>(fni) + next_entry);\n    // the length of the file name is given in bytes, not wide characters\n    std::wstring file(fni->FileName, fni->FileNameLength / 2);\n    ENVOY_LOG(debug, \"notification: handle: {} action: {:x} file: {}\", dir_watch->dir_handle_,\n              fni->Action, watcher->wstring_converter_.to_bytes(file));\n\n    uint32_t events = 0;\n    if (fni->Action == FILE_ACTION_RENAMED_NEW_NAME) {\n      events |= Events::MovedTo;\n    }\n    if (fni->Action == FILE_ACTION_MODIFIED) {\n      events |= Events::Modified;\n    }\n\n    for (FileWatch& watch : dir_watch->watches_) {\n      if (watch.file_ == file && (watch.events_ & events)) {\n        ENVOY_LOG(debug, \"matched callback: file: {}\", watcher->wstring_converter_.to_bytes(file));\n        const auto cb = watch.cb_;\n        const auto cb_closure = [cb, events]() -> void { cb(events); };\n        watcher->active_callbacks_.push(cb_closure);\n        // write a byte to the other end of the socket that libevent is watching\n        // this tells the libevent callback to pull this callback off the active_callbacks_\n        // queue. We do this so that the callbacks are executed in the main libevent loop,\n        // not in this completion routine\n        const char data = 1;\n        const int bytes_written = ::send(watcher->event_write_, &data, sizeof(data), 0);\n        RELEASE_ASSERT(bytes_written == sizeof(data),\n                       fmt::format(\"failed to write 1 byte: {}\", ::WSAGetLastError()));\n      }\n    }\n\n    next_entry = fni->NextEntryOffset;\n  } while (next_entry != 0);\n\n  if (!watcher->keep_watching_.load()) {\n    ENVOY_LOG(debug, \"ending watch on directory: handle: {}\", dir_watch->dir_handle_);\n    endDirectoryWatch(watcher->event_write_, dir_watch->overlapped_.hEvent);\n    return;\n  }\n\n  ReadDirectoryChangesW(dir_watch->dir_handle_, &(dir_watch->buffer_[0]),\n                        dir_watch->buffer_.capacity(), false,\n                        FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE, nullptr,\n                        overlapped, directoryChangeCompletion);\n}\n\nvoid WatcherImpl::watchLoop() {\n  while (keep_watching_.load()) {\n    DWORD wait = WaitForSingleObjectEx(thread_exit_event_, INFINITE, true);\n    switch (wait) {\n    case WAIT_OBJECT_0:\n      // object is getting destroyed, exit the loop\n      keep_watching_.store(false);\n      break;\n    case WAIT_IO_COMPLETION:\n      // an IO completion routine finished, nothing to do\n      break;\n    default:\n      ENVOY_LOG(error, \"WaitForSingleObjectEx: {}, GetLastError: {}, exiting\", wait,\n                GetLastError());\n      keep_watching_.store(false);\n    }\n  }\n\n  for (auto& entry : callback_map_) {\n    ::CancelIoEx(entry.second->dir_handle_, nullptr);\n  }\n\n  const int num_directories = dir_watch_complete_events_.size();\n  if (num_directories > 0) {\n    while (true) {\n      DWORD wait = ::WaitForMultipleObjectsEx(num_directories, &dir_watch_complete_events_[0], true,\n                                              INFINITE, true);\n\n      if (WAIT_OBJECT_0 <= wait && wait < (WAIT_OBJECT_0 + num_directories)) {\n        // we have no pending IO remaining\n        return;\n      } else if (wait == WAIT_IO_COMPLETION) {\n        // an io completion routine finished, keep waiting\n        continue;\n      } else {\n        ENVOY_LOG(error, \"WaitForMultipleObjectsEx: {}, GetLastError: {}, exiting\", wait,\n                  GetLastError());\n        return;\n      }\n    }\n  }\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filesystem/win32/watcher_impl.h",
    "content": "#pragma once\n\n#include <concurrent_queue.h>\n\n#include <codecvt>\n#include <cstdint>\n#include <list>\n#include <locale>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/filesystem/watcher.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass WatcherImpl : public Watcher, Logger::Loggable<Logger::Id::file> {\npublic:\n  WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api);\n  ~WatcherImpl();\n\n  // Filesystem::Watcher\n  void addWatch(absl::string_view path, uint32_t events, OnChangedCb cb) override;\n\nprivate:\n  static void issueFirstRead(ULONG_PTR param);\n  static void directoryChangeCompletion(DWORD err, DWORD num_bytes, LPOVERLAPPED overlapped);\n  static void endDirectoryWatch(os_fd_t sock, HANDLE hEvent);\n  void watchLoop();\n  void onDirectoryEvent();\n\n  struct FileWatch {\n    // store the wide character string for ReadDirectoryChangesW\n    std::wstring file_;\n    uint32_t events_;\n    OnChangedCb cb_;\n  };\n\n  typedef std::function<void(void)> CbClosure;\n\n  struct DirectoryWatch {\n    OVERLAPPED overlapped_;\n    std::list<FileWatch> watches_;\n    HANDLE dir_handle_;\n    std::vector<uint8_t> buffer_;\n    WatcherImpl* watcher_;\n  };\n\n  typedef std::unique_ptr<DirectoryWatch> DirectoryWatchPtr;\n\n  Api::Api& api_;\n  absl::node_hash_map<std::string, DirectoryWatchPtr> callback_map_;\n  Event::FileEventPtr directory_event_;\n  os_fd_t event_write_;\n  os_fd_t event_read_;\n  Thread::ThreadPtr watch_thread_;\n  Thread::ThreadFactoryImplWin32 thread_factory_;\n  HANDLE thread_exit_event_;\n  std::vector<HANDLE> dir_watch_complete_events_;\n  std::atomic<bool> keep_watching_;\n  concurrency::concurrent_queue<CbClosure> active_callbacks_;\n  Api::OsSysCallsImpl& os_sys_calls_;\n  std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> wstring_converter_;\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filter/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"filter_config_discovery_lib\",\n    srcs = [\"filter_config_discovery_impl.cc\"],\n    hdrs = [\"filter_config_discovery_impl.h\"],\n    deps = [\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/filter/http:filter_config_provider_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:subscription_factory_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/init:watcher_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/filter/http/filter_config_discovery_impl.cc",
    "content": "#include \"common/filter/http/filter_config_discovery_impl.h\"\n\n#include \"envoy/config/core/v3/extension.pb.validate.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Filter {\nnamespace Http {\n\nDynamicFilterConfigProviderImpl::DynamicFilterConfigProviderImpl(\n    FilterConfigSubscriptionSharedPtr&& subscription,\n    const std::set<std::string>& require_type_urls,\n    Server::Configuration::FactoryContext& factory_context)\n    : subscription_(std::move(subscription)), require_type_urls_(require_type_urls),\n      tls_(factory_context.threadLocal().allocateSlot()),\n      init_target_(\"DynamicFilterConfigProviderImpl\", [this]() {\n        subscription_->start();\n        // This init target is used to activate the subscription but not wait\n        // for a response. It is used whenever a default config is provided to be\n        // used while waiting for a response.\n        init_target_.ready();\n      }) {\n  subscription_->filter_config_providers_.insert(this);\n  tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<ThreadLocalConfig>();\n  });\n}\n\nDynamicFilterConfigProviderImpl::~DynamicFilterConfigProviderImpl() {\n  subscription_->filter_config_providers_.erase(this);\n}\n\nconst std::string& DynamicFilterConfigProviderImpl::name() { return subscription_->name(); }\n\nabsl::optional<Envoy::Http::FilterFactoryCb> DynamicFilterConfigProviderImpl::config() {\n  return tls_->getTyped<ThreadLocalConfig>().config_;\n}\n\nvoid DynamicFilterConfigProviderImpl::validateConfig(\n    const ProtobufWkt::Any& proto_config, Server::Configuration::NamedHttpFilterConfigFactory&) {\n  auto type_url = Config::Utility::getFactoryType(proto_config);\n  if (require_type_urls_.count(type_url) == 0) {\n    throw EnvoyException(fmt::format(\"Error: filter config has type URL {} but expect {}.\",\n                                     type_url, absl::StrJoin(require_type_urls_, \", \")));\n  }\n}\n\nvoid DynamicFilterConfigProviderImpl::onConfigUpdate(Envoy::Http::FilterFactoryCb config,\n                                                     const std::string&,\n                                                     Config::ConfigAppliedCb cb) {\n  tls_->runOnAllThreads(\n      [config, cb](ThreadLocal::ThreadLocalObjectSharedPtr previous)\n          -> ThreadLocal::ThreadLocalObjectSharedPtr {\n        auto prev_config = std::dynamic_pointer_cast<ThreadLocalConfig>(previous);\n        prev_config->config_ = config;\n        if (cb) {\n          cb();\n        }\n        return previous;\n      },\n      [this, config]() {\n        // This happens after all workers have discarded the previous config so it can be safely\n        // deleted on the main thread by an update with the new config.\n        this->current_config_ = config;\n      });\n}\n\nFilterConfigSubscription::FilterConfigSubscription(\n    const envoy::config::core::v3::ConfigSource& config_source,\n    const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context,\n    const std::string& stat_prefix, FilterConfigProviderManagerImpl& filter_config_provider_manager,\n    const std::string& subscription_id)\n    : Config::SubscriptionBase<envoy::config::core::v3::TypedExtensionConfig>(\n          envoy::config::core::v3::ApiVersion::V3,\n          factory_context.messageValidationContext().dynamicValidationVisitor(), \"name\"),\n      filter_config_name_(filter_config_name), factory_context_(factory_context),\n      validator_(factory_context.messageValidationContext().dynamicValidationVisitor()),\n      init_target_(fmt::format(\"FilterConfigSubscription init {}\", filter_config_name_),\n                   [this]() { start(); }),\n      scope_(factory_context.scope().createScope(stat_prefix + \"extension_config_discovery.\" +\n                                                 filter_config_name_ + \".\")),\n      stat_prefix_(stat_prefix),\n      stats_({ALL_EXTENSION_CONFIG_DISCOVERY_STATS(POOL_COUNTER(*scope_))}),\n      filter_config_provider_manager_(filter_config_provider_manager),\n      subscription_id_(subscription_id) {\n  const auto resource_name = getResourceName();\n  subscription_ =\n      factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource(\n          config_source, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_);\n}\n\nvoid FilterConfigSubscription::start() {\n  if (!started_) {\n    started_ = true;\n    subscription_->start({filter_config_name_});\n  }\n}\n\nvoid FilterConfigSubscription::onConfigUpdate(\n    const std::vector<Config::DecodedResourceRef>& resources, const std::string& version_info) {\n  // Make sure to make progress in case the control plane is temporarily inconsistent.\n  init_target_.ready();\n\n  if (resources.size() != 1) {\n    throw EnvoyException(fmt::format(\n        \"Unexpected number of resources in ExtensionConfigDS response: {}\", resources.size()));\n  }\n  const auto& filter_config = dynamic_cast<const envoy::config::core::v3::TypedExtensionConfig&>(\n      resources[0].get().resource());\n  if (filter_config.name() != filter_config_name_) {\n    throw EnvoyException(fmt::format(\"Unexpected resource name in ExtensionConfigDS response: {}\",\n                                     filter_config.name()));\n  }\n  // Skip update if hash matches\n  const uint64_t new_hash = MessageUtil::hash(filter_config.typed_config());\n  if (new_hash == last_config_hash_) {\n    return;\n  }\n  auto& factory =\n      Config::Utility::getAndCheckFactory<Server::Configuration::NamedHttpFilterConfigFactory>(\n          filter_config);\n  // Ensure that the filter config is valid in the filter chain context once the proto is processed.\n  // Validation happens before updating to prevent a partial update application. It might be\n  // possible that the providers have distinct type URL constraints.\n  for (auto* provider : filter_config_providers_) {\n    provider->validateConfig(filter_config.typed_config(), factory);\n  }\n  ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig(\n      filter_config.typed_config(), validator_, factory);\n  Envoy::Http::FilterFactoryCb factory_callback =\n      factory.createFilterFactoryFromProto(*message, stat_prefix_, factory_context_);\n  ENVOY_LOG(debug, \"Updating filter config {}\", filter_config_name_);\n  const auto pending_update = std::make_shared<std::atomic<uint64_t>>(\n      (factory_context_.admin().concurrency() + 1) * filter_config_providers_.size());\n  for (auto* provider : filter_config_providers_) {\n    provider->onConfigUpdate(factory_callback, version_info, [this, pending_update]() {\n      if (--(*pending_update) == 0) {\n        stats_.config_reload_.inc();\n      }\n    });\n  }\n  last_config_hash_ = new_hash;\n}\n\nvoid FilterConfigSubscription::onConfigUpdate(\n    const std::vector<Config::DecodedResourceRef>& added_resources,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources, const std::string&) {\n  if (!removed_resources.empty()) {\n    ENVOY_LOG(error,\n              \"Server sent a delta ExtensionConfigDS update attempting to remove a resource (name: \"\n              \"{}). Ignoring.\",\n              removed_resources[0]);\n  }\n  if (!added_resources.empty()) {\n    onConfigUpdate(added_resources, added_resources[0].get().version());\n  }\n}\n\nvoid FilterConfigSubscription::onConfigUpdateFailed(Config::ConfigUpdateFailureReason reason,\n                                                    const EnvoyException*) {\n  ENVOY_LOG(debug, \"Updating filter config {} failed due to {}\", filter_config_name_, reason);\n  stats_.config_fail_.inc();\n  // Make sure to make progress in case the control plane is temporarily failing.\n  init_target_.ready();\n}\n\nFilterConfigSubscription::~FilterConfigSubscription() {\n  // If we get destroyed during initialization, make sure we signal that we \"initialized\".\n  init_target_.ready();\n  // Remove the subscription from the provider manager.\n  filter_config_provider_manager_.subscriptions_.erase(subscription_id_);\n}\n\nstd::shared_ptr<FilterConfigSubscription> FilterConfigProviderManagerImpl::getSubscription(\n    const envoy::config::core::v3::ConfigSource& config_source, const std::string& name,\n    Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix) {\n  // FilterConfigSubscriptions are unique based on their config source and filter config name\n  // combination.\n  // TODO(https://github.com/envoyproxy/envoy/issues/11967) Hash collision can cause subscription\n  // aliasing.\n  const std::string subscription_id = absl::StrCat(MessageUtil::hash(config_source), \".\", name);\n  auto it = subscriptions_.find(subscription_id);\n  if (it == subscriptions_.end()) {\n    auto subscription = std::make_shared<FilterConfigSubscription>(\n        config_source, name, factory_context, stat_prefix, *this, subscription_id);\n    subscriptions_.insert({subscription_id, std::weak_ptr<FilterConfigSubscription>(subscription)});\n    return subscription;\n  } else {\n    auto existing = it->second.lock();\n    ASSERT(existing != nullptr,\n           absl::StrCat(\"Cannot find subscribed filter config resource \", name));\n    return existing;\n  }\n}\n\nFilterConfigProviderPtr FilterConfigProviderManagerImpl::createDynamicFilterConfigProvider(\n    const envoy::config::core::v3::ConfigSource& config_source,\n    const std::string& filter_config_name, const std::set<std::string>& require_type_urls,\n    Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix,\n    bool apply_without_warming) {\n  auto subscription =\n      getSubscription(config_source, filter_config_name, factory_context, stat_prefix);\n  // For warming, wait until the subscription receives the first response to indicate readiness.\n  // Otherwise, mark ready immediately and start the subscription on initialization. A default\n  // config is expected in the latter case.\n  if (!apply_without_warming) {\n    factory_context.initManager().add(subscription->initTarget());\n  }\n  auto provider = std::make_unique<DynamicFilterConfigProviderImpl>(\n      std::move(subscription), require_type_urls, factory_context);\n  // Ensure the subscription starts if it has not already.\n  if (apply_without_warming) {\n    factory_context.initManager().add(provider->init_target_);\n  }\n  return provider;\n}\n\n} // namespace Http\n} // namespace Filter\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/filter/http/filter_config_discovery_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/extension.pb.h\"\n#include \"envoy/config/core/v3/extension.pb.validate.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/filter/http/filter_config_provider.h\"\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/server/factory_context.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/config/subscription_base.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/init/target_impl.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Filter {\nnamespace Http {\n\nclass FilterConfigProviderManagerImpl;\nclass FilterConfigSubscription;\n\nusing FilterConfigSubscriptionSharedPtr = std::shared_ptr<FilterConfigSubscription>;\n\n/**\n * Implementation of a filter config provider using discovery subscriptions.\n **/\nclass DynamicFilterConfigProviderImpl : public FilterConfigProvider {\npublic:\n  DynamicFilterConfigProviderImpl(FilterConfigSubscriptionSharedPtr&& subscription,\n                                  const std::set<std::string>& require_type_urls,\n                                  Server::Configuration::FactoryContext& factory_context);\n  ~DynamicFilterConfigProviderImpl() override;\n\n  // Config::ExtensionConfigProvider\n  const std::string& name() override;\n  absl::optional<Envoy::Http::FilterFactoryCb> config() override;\n  void validateConfig(const ProtobufWkt::Any& proto_config,\n                      Server::Configuration::NamedHttpFilterConfigFactory&) override;\n  void onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&,\n                      Config::ConfigAppliedCb cb) override;\n\nprivate:\n  struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject {\n    ThreadLocalConfig() : config_{absl::nullopt} {}\n    absl::optional<Envoy::Http::FilterFactoryCb> config_{};\n  };\n\n  FilterConfigSubscriptionSharedPtr subscription_;\n  const std::set<std::string> require_type_urls_;\n  // Currently applied configuration to ensure that the main thread deletes the last reference to\n  // it.\n  absl::optional<Envoy::Http::FilterFactoryCb> current_config_{absl::nullopt};\n  ThreadLocal::SlotPtr tls_;\n\n  // Local initialization target to ensure that the subscription starts in\n  // case no warming is requested by any other filter config provider.\n  Init::TargetImpl init_target_;\n\n  friend class FilterConfigProviderManagerImpl;\n};\n\n/**\n * All extension config discovery stats. @see stats_macros.h\n */\n#define ALL_EXTENSION_CONFIG_DISCOVERY_STATS(COUNTER)                                              \\\n  COUNTER(config_reload)                                                                           \\\n  COUNTER(config_fail)\n\n/**\n * Struct definition for all extension config discovery stats. @see stats_macros.h\n */\nstruct ExtensionConfigDiscoveryStats {\n  ALL_EXTENSION_CONFIG_DISCOVERY_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * A class that fetches the filter configuration dynamically using the filter config discovery API.\n * Subscriptions are shared between the filter config providers. The filter config providers are\n * notified when a new config is accepted.\n */\nclass FilterConfigSubscription\n    : Config::SubscriptionBase<envoy::config::core::v3::TypedExtensionConfig>,\n      Logger::Loggable<Logger::Id::filter> {\npublic:\n  FilterConfigSubscription(const envoy::config::core::v3::ConfigSource& config_source,\n                           const std::string& filter_config_name,\n                           Server::Configuration::FactoryContext& factory_context,\n                           const std::string& stat_prefix,\n                           FilterConfigProviderManagerImpl& filter_config_provider_manager,\n                           const std::string& subscription_id);\n\n  ~FilterConfigSubscription() override;\n\n  const Init::SharedTargetImpl& initTarget() { return init_target_; }\n  const std::string& name() { return filter_config_name_; }\n\nprivate:\n  void start();\n\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string&) override;\n  void onConfigUpdateFailed(Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException*) override;\n\n  const std::string filter_config_name_;\n  uint64_t last_config_hash_{0ul};\n  Server::Configuration::FactoryContext& factory_context_;\n  ProtobufMessage::ValidationVisitor& validator_;\n\n  Init::SharedTargetImpl init_target_;\n  bool started_{false};\n\n  Stats::ScopePtr scope_;\n  const std::string stat_prefix_;\n  ExtensionConfigDiscoveryStats stats_;\n\n  // FilterConfigProviderManagerImpl maintains active subscriptions in a map.\n  FilterConfigProviderManagerImpl& filter_config_provider_manager_;\n  const std::string subscription_id_;\n  absl::flat_hash_set<DynamicFilterConfigProviderImpl*> filter_config_providers_;\n  friend class DynamicFilterConfigProviderImpl;\n\n  // This must be the last since its destructor may call out to stats to report\n  // on draining requests.\n  std::unique_ptr<Config::Subscription> subscription_;\n};\n\n/**\n * Provider implementation of a static filter config.\n **/\nclass StaticFilterConfigProviderImpl : public FilterConfigProvider {\npublic:\n  StaticFilterConfigProviderImpl(const Envoy::Http::FilterFactoryCb& config,\n                                 const std::string filter_config_name)\n      : config_(config), filter_config_name_(filter_config_name) {}\n\n  // Config::ExtensionConfigProvider\n  const std::string& name() override { return filter_config_name_; }\n  absl::optional<Envoy::Http::FilterFactoryCb> config() override { return config_; }\n  void validateConfig(const ProtobufWkt::Any&,\n                      Server::Configuration::NamedHttpFilterConfigFactory&) override {\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  void onConfigUpdate(Envoy::Http::FilterFactoryCb, const std::string&,\n                      Config::ConfigAppliedCb) override {\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\nprivate:\n  Envoy::Http::FilterFactoryCb config_;\n  const std::string filter_config_name_;\n};\n\n/**\n * An implementation of FilterConfigProviderManager.\n */\nclass FilterConfigProviderManagerImpl : public FilterConfigProviderManager,\n                                        public Singleton::Instance {\npublic:\n  FilterConfigProviderPtr createDynamicFilterConfigProvider(\n      const envoy::config::core::v3::ConfigSource& config_source,\n      const std::string& filter_config_name, const std::set<std::string>& require_type_urls,\n      Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix,\n      bool apply_without_warming) override;\n\n  FilterConfigProviderPtr\n  createStaticFilterConfigProvider(const Envoy::Http::FilterFactoryCb& config,\n                                   const std::string& filter_config_name) override {\n    return std::make_unique<StaticFilterConfigProviderImpl>(config, filter_config_name);\n  }\n\nprivate:\n  std::shared_ptr<FilterConfigSubscription>\n  getSubscription(const envoy::config::core::v3::ConfigSource& config_source,\n                  const std::string& name, Server::Configuration::FactoryContext& factory_context,\n                  const std::string& stat_prefix);\n  absl::flat_hash_map<std::string, std::weak_ptr<FilterConfigSubscription>> subscriptions_;\n  friend class FilterConfigSubscription;\n};\n\n} // namespace Http\n} // namespace Filter\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/formatter/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"substitution_formatter_lib\",\n    srcs = [\"substitution_formatter.cc\"],\n    hdrs = [\"substitution_formatter.h\"],\n    external_deps = [\"abseil_str_format\"],\n    deps = [\n        \"//include/envoy/formatter:substitution_formatter_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/common/stream_info:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"substitution_format_string_lib\",\n    srcs = [\"substitution_format_string.cc\"],\n    hdrs = [\"substitution_format_string.h\"],\n    deps = [\n        \":substitution_formatter_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/formatter/substitution_format_string.cc",
    "content": "#include \"common/formatter/substitution_format_string.h\"\n\n#include \"common/formatter/substitution_formatter.h\"\n\nnamespace Envoy {\nnamespace Formatter {\n\nFormatterPtr\nSubstitutionFormatStringUtils::createJsonFormatter(const ProtobufWkt::Struct& struct_format,\n                                                   bool preserve_types, bool omit_empty_values) {\n  return std::make_unique<JsonFormatterImpl>(struct_format, preserve_types, omit_empty_values);\n}\n\nFormatterPtr SubstitutionFormatStringUtils::fromProtoConfig(\n    const envoy::config::core::v3::SubstitutionFormatString& config) {\n  switch (config.format_case()) {\n  case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kTextFormat:\n    return std::make_unique<FormatterImpl>(config.text_format(), config.omit_empty_values());\n  case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat: {\n    return createJsonFormatter(config.json_format(), true, config.omit_empty_values());\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  return nullptr;\n}\n\n} // namespace Formatter\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/formatter/substitution_format_string.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/substitution_format_string.pb.h\"\n#include \"envoy/formatter/substitution_formatter.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Formatter {\n\n/**\n * Utilities for using envoy::config::core::v3::SubstitutionFormatString\n */\nclass SubstitutionFormatStringUtils {\npublic:\n  /**\n   * Generate a formatter object from config SubstitutionFormatString.\n   */\n  static FormatterPtr\n  fromProtoConfig(const envoy::config::core::v3::SubstitutionFormatString& config);\n\n  /**\n   * Generate a Json formatter object from proto::Struct config\n   */\n  static FormatterPtr createJsonFormatter(const ProtobufWkt::Struct& struct_format,\n                                          bool preserve_types, bool omit_empty_values);\n};\n\n} // namespace Formatter\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/formatter/substitution_formatter.cc",
    "content": "#include \"common/formatter/substitution_formatter.h\"\n\n#include <climits>\n#include <cstdint>\n#include <regex>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/metadata.h\"\n#include \"common/grpc/common.h\"\n#include \"common/grpc/status.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stream_info/utility.h\"\n\n#include \"absl/strings/str_split.h\"\n#include \"fmt/format.h\"\n\nusing Envoy::Config::Metadata;\n\nnamespace Envoy {\nnamespace Formatter {\n\nstatic const std::string DefaultUnspecifiedValueString = \"-\";\n\nnamespace {\n\nconst ProtobufWkt::Value& unspecifiedValue() { return ValueUtil::nullValue(); }\n\nvoid truncate(std::string& str, absl::optional<uint32_t> max_length) {\n  if (!max_length) {\n    return;\n  }\n\n  str = str.substr(0, max_length.value());\n}\n\n// Matches newline pattern in a StartTimeFormatter format string.\nconst std::regex& getStartTimeNewlinePattern() {\n  CONSTRUCT_ON_FIRST_USE(std::regex, \"%[-_0^#]*[1-9]*(E|O)?n\");\n}\nconst std::regex& getNewlinePattern() { CONSTRUCT_ON_FIRST_USE(std::regex, \"\\n\"); }\n\ntemplate <class... Ts> struct JsonFormatMapVisitor : Ts... { using Ts::operator()...; };\ntemplate <class... Ts> JsonFormatMapVisitor(Ts...) -> JsonFormatMapVisitor<Ts...>;\n\n} // namespace\n\nconst std::string SubstitutionFormatUtils::DEFAULT_FORMAT =\n    \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\" \"\n    \"%RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% \"\n    \"%RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"\n    \"\\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \"\n    \"\\\"%REQ(:AUTHORITY)%\\\" \\\"%UPSTREAM_HOST%\\\"\\n\";\n\nFormatterPtr SubstitutionFormatUtils::defaultSubstitutionFormatter() {\n  return FormatterPtr{new FormatterImpl(DEFAULT_FORMAT, false)};\n}\n\nconst absl::optional<std::reference_wrapper<const std::string>>\nSubstitutionFormatUtils::protocolToString(const absl::optional<Http::Protocol>& protocol) {\n  if (protocol) {\n    return Http::Utility::getProtocolString(protocol.value());\n  }\n  return absl::nullopt;\n}\n\nconst std::string&\nSubstitutionFormatUtils::protocolToStringOrDefault(const absl::optional<Http::Protocol>& protocol) {\n  if (protocol) {\n    return Http::Utility::getProtocolString(protocol.value());\n  }\n  return DefaultUnspecifiedValueString;\n}\n\nconst absl::optional<std::string> SubstitutionFormatUtils::getHostname() {\n#ifdef HOST_NAME_MAX\n  const size_t len = HOST_NAME_MAX;\n#else\n  // This is notably the case in OSX.\n  const size_t len = 255;\n#endif\n  char name[len];\n  Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get();\n  const Api::SysCallIntResult result = os_sys_calls.gethostname(name, len);\n\n  absl::optional<std::string> hostname;\n  if (result.rc_ == 0) {\n    hostname = name;\n  }\n\n  return hostname;\n}\n\nconst std::string SubstitutionFormatUtils::getHostnameOrDefault() {\n  absl::optional<std::string> hostname = getHostname();\n  if (hostname.has_value()) {\n    return hostname.value();\n  }\n  return DefaultUnspecifiedValueString;\n}\n\nFormatterImpl::FormatterImpl(const std::string& format, bool omit_empty_values)\n    : empty_value_string_(omit_empty_values ? EMPTY_STRING : DefaultUnspecifiedValueString) {\n  providers_ = SubstitutionFormatParser::parse(format);\n}\n\nstd::string FormatterImpl::format(const Http::RequestHeaderMap& request_headers,\n                                  const Http::ResponseHeaderMap& response_headers,\n                                  const Http::ResponseTrailerMap& response_trailers,\n                                  const StreamInfo::StreamInfo& stream_info,\n                                  absl::string_view local_reply_body) const {\n  std::string log_line;\n  log_line.reserve(256);\n\n  for (const FormatterProviderPtr& provider : providers_) {\n    const auto bit = provider->format(request_headers, response_headers, response_trailers,\n                                      stream_info, local_reply_body);\n    log_line += bit.value_or(empty_value_string_);\n  }\n\n  return log_line;\n}\n\nstd::string JsonFormatterImpl::format(const Http::RequestHeaderMap& request_headers,\n                                      const Http::ResponseHeaderMap& response_headers,\n                                      const Http::ResponseTrailerMap& response_trailers,\n                                      const StreamInfo::StreamInfo& stream_info,\n                                      absl::string_view local_reply_body) const {\n  const auto output_struct =\n      toStruct(request_headers, response_headers, response_trailers, stream_info, local_reply_body);\n\n  const std::string log_line = MessageUtil::getJsonStringFromMessage(output_struct, false, true);\n  return absl::StrCat(log_line, \"\\n\");\n}\n\nJsonFormatterImpl::JsonFormatMapWrapper\nJsonFormatterImpl::toFormatMap(const ProtobufWkt::Struct& json_format) const {\n  auto output = std::make_unique<JsonFormatMap>();\n  for (const auto& pair : json_format.fields()) {\n    switch (pair.second.kind_case()) {\n    case ProtobufWkt::Value::kStringValue:\n      output->emplace(pair.first, SubstitutionFormatParser::parse(pair.second.string_value()));\n      break;\n    case ProtobufWkt::Value::kStructValue:\n      output->emplace(pair.first, toFormatMap(pair.second.struct_value()));\n      break;\n    default:\n      throw EnvoyException(\n          \"Only string values or nested structs are supported in the JSON access log format.\");\n    }\n  }\n  return {std::move(output)};\n};\n\nProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::RequestHeaderMap& request_headers,\n                                                const Http::ResponseHeaderMap& response_headers,\n                                                const Http::ResponseTrailerMap& response_trailers,\n                                                const StreamInfo::StreamInfo& stream_info,\n                                                absl::string_view local_reply_body) const {\n  const std::string& empty_value =\n      omit_empty_values_ ? EMPTY_STRING : DefaultUnspecifiedValueString;\n  const std::function<ProtobufWkt::Value(const std::vector<FormatterProviderPtr>&)>\n      providers_callback = [&](const std::vector<FormatterProviderPtr>& providers) {\n        ASSERT(!providers.empty());\n        if (providers.size() == 1) {\n          const auto& provider = providers.front();\n          if (preserve_types_) {\n            return provider->formatValue(request_headers, response_headers, response_trailers,\n                                         stream_info, local_reply_body);\n          }\n\n          if (omit_empty_values_) {\n            return ValueUtil::optionalStringValue(\n                provider->format(request_headers, response_headers, response_trailers, stream_info,\n                                 local_reply_body));\n          }\n\n          const auto str = provider->format(request_headers, response_headers, response_trailers,\n                                            stream_info, local_reply_body);\n          return ValueUtil::stringValue(str.value_or(DefaultUnspecifiedValueString));\n        }\n        // Multiple providers forces string output.\n        std::string str;\n        for (const auto& provider : providers) {\n          const auto bit = provider->format(request_headers, response_headers, response_trailers,\n                                            stream_info, local_reply_body);\n          str += bit.value_or(empty_value);\n        }\n        return ValueUtil::stringValue(str);\n      };\n  const std::function<ProtobufWkt::Value(const JsonFormatterImpl::JsonFormatMapWrapper&)>\n      json_format_map_callback = [&](const JsonFormatterImpl::JsonFormatMapWrapper& format) {\n        ProtobufWkt::Struct output;\n        auto* fields = output.mutable_fields();\n        JsonFormatMapVisitor visitor{json_format_map_callback, providers_callback};\n        for (const auto& pair : *format.value_) {\n          ProtobufWkt::Value value = absl::visit(visitor, pair.second);\n          if (omit_empty_values_ && value.kind_case() == ProtobufWkt::Value::kNullValue) {\n            continue;\n          }\n          (*fields)[pair.first] = value;\n        }\n        return ValueUtil::structValue(output);\n      };\n  return json_format_map_callback(json_output_format_).struct_value();\n}\n\nvoid SubstitutionFormatParser::parseCommandHeader(const std::string& token, const size_t start,\n                                                  std::string& main_header,\n                                                  std::string& alternative_header,\n                                                  absl::optional<size_t>& max_length) {\n  std::vector<std::string> subs;\n  parseCommand(token, start, \"?\", main_header, subs, max_length);\n  if (subs.size() > 1) {\n    throw EnvoyException(\n        // Header format rules support only one alternative header.\n        // docs/root/configuration/access_log.rst#format-rules\n        absl::StrCat(\"More than 1 alternative header specified in token: \", token));\n  }\n  if (subs.size() == 1) {\n    alternative_header = subs.front();\n  } else {\n    alternative_header = \"\";\n  }\n  // The main and alternative header should not contain invalid characters {NUL, LR, CF}.\n  if (std::regex_search(main_header, getNewlinePattern()) ||\n      std::regex_search(alternative_header, getNewlinePattern())) {\n    throw EnvoyException(\"Invalid header configuration. Format string contains newline.\");\n  }\n}\n\nvoid SubstitutionFormatParser::parseCommand(const std::string& token, const size_t start,\n                                            const std::string& separator, std::string& main,\n                                            std::vector<std::string>& sub_items,\n                                            absl::optional<size_t>& max_length) {\n  // TODO(dnoe): Convert this to use string_view throughout.\n  const size_t end_request = token.find(')', start);\n  sub_items.clear();\n  if (end_request != token.length() - 1) {\n    // Closing bracket is not found.\n    if (end_request == std::string::npos) {\n      throw EnvoyException(absl::StrCat(\"Closing bracket is missing in token: \", token));\n    }\n\n    // Closing bracket should be either last one or followed by ':' to denote limitation.\n    if (token[end_request + 1] != ':') {\n      throw EnvoyException(absl::StrCat(\"Incorrect position of ')' in token: \", token));\n    }\n\n    const auto length_str = absl::string_view(token).substr(end_request + 2);\n    uint64_t length_value;\n\n    if (!absl::SimpleAtoi(length_str, &length_value)) {\n      throw EnvoyException(absl::StrCat(\"Length must be an integer, given: \", length_str));\n    }\n\n    max_length = length_value;\n  }\n\n  const std::string name_data = token.substr(start, end_request - start);\n  if (!separator.empty()) {\n    const std::vector<std::string> keys = absl::StrSplit(name_data, separator);\n    if (!keys.empty()) {\n      // The main value is the first key\n      main = keys.at(0);\n      if (keys.size() > 1) {\n        // Sub items contain additional keys\n        sub_items.insert(sub_items.end(), keys.begin() + 1, keys.end());\n      }\n    }\n  } else {\n    main = name_data;\n  }\n}\n\n// TODO(derekargueta): #2967 - Rewrite SubstitutionFormatter with parser library & formal grammar\nstd::vector<FormatterProviderPtr> SubstitutionFormatParser::parse(const std::string& format) {\n  std::string current_token;\n  std::vector<FormatterProviderPtr> formatters;\n  static constexpr absl::string_view DYNAMIC_META_TOKEN{\"DYNAMIC_METADATA(\"};\n  static constexpr absl::string_view FILTER_STATE_TOKEN{\"FILTER_STATE(\"};\n  const std::regex command_w_args_regex(R\"EOF(^%([A-Z]|_)+(\\([^\\)]*\\))?(:[0-9]+)?(%))EOF\");\n\n  static constexpr absl::string_view PLAIN_SERIALIZATION{\"PLAIN\"};\n  static constexpr absl::string_view TYPED_SERIALIZATION{\"TYPED\"};\n\n  for (size_t pos = 0; pos < format.length(); ++pos) {\n    if (format[pos] == '%') {\n      if (!current_token.empty()) {\n        formatters.emplace_back(FormatterProviderPtr{new PlainStringFormatter(current_token)});\n        current_token = \"\";\n      }\n\n      std::smatch m;\n      const std::string search_space = format.substr(pos);\n      if (!std::regex_search(search_space, m, command_w_args_regex)) {\n        throw EnvoyException(\n            fmt::format(\"Incorrect configuration: {}. Couldn't find valid command at position {}\",\n                        format, pos));\n      }\n\n      const std::string match = m.str(0);\n      const std::string token = match.substr(1, match.length() - 2);\n      pos += 1;\n      const int command_end_position = pos + token.length();\n\n      if (absl::StartsWith(token, \"REQ(\")) {\n        std::string main_header, alternative_header;\n        absl::optional<size_t> max_length;\n\n        parseCommandHeader(token, ReqParamStart, main_header, alternative_header, max_length);\n\n        formatters.emplace_back(FormatterProviderPtr{\n            new RequestHeaderFormatter(main_header, alternative_header, max_length)});\n      } else if (absl::StartsWith(token, \"RESP(\")) {\n        std::string main_header, alternative_header;\n        absl::optional<size_t> max_length;\n\n        parseCommandHeader(token, RespParamStart, main_header, alternative_header, max_length);\n\n        formatters.emplace_back(FormatterProviderPtr{\n            new ResponseHeaderFormatter(main_header, alternative_header, max_length)});\n      } else if (absl::StartsWith(token, \"TRAILER(\")) {\n        std::string main_header, alternative_header;\n        absl::optional<size_t> max_length;\n\n        parseCommandHeader(token, TrailParamStart, main_header, alternative_header, max_length);\n\n        formatters.emplace_back(FormatterProviderPtr{\n            new ResponseTrailerFormatter(main_header, alternative_header, max_length)});\n      } else if (absl::StartsWith(token, \"LOCAL_REPLY_BODY\")) {\n        formatters.emplace_back(std::make_unique<LocalReplyBodyFormatter>());\n      } else if (absl::StartsWith(token, DYNAMIC_META_TOKEN)) {\n        std::string filter_namespace;\n        absl::optional<size_t> max_length;\n        std::vector<std::string> path;\n        const size_t start = DYNAMIC_META_TOKEN.size();\n\n        parseCommand(token, start, \":\", filter_namespace, path, max_length);\n        formatters.emplace_back(\n            FormatterProviderPtr{new DynamicMetadataFormatter(filter_namespace, path, max_length)});\n      } else if (absl::StartsWith(token, FILTER_STATE_TOKEN)) {\n        std::string key;\n        absl::optional<size_t> max_length;\n        std::vector<std::string> path;\n        const size_t start = FILTER_STATE_TOKEN.size();\n\n        parseCommand(token, start, \":\", key, path, max_length);\n        if (key.empty()) {\n          throw EnvoyException(\"Invalid filter state configuration, key cannot be empty.\");\n        }\n\n        const absl::string_view serialize_type =\n            !path.empty() ? path[path.size() - 1] : TYPED_SERIALIZATION;\n\n        if (serialize_type != PLAIN_SERIALIZATION && serialize_type != TYPED_SERIALIZATION) {\n          throw EnvoyException(\"Invalid filter state serialize type, only support PLAIN/TYPED.\");\n        }\n        const bool serialize_as_string = serialize_type == PLAIN_SERIALIZATION;\n\n        formatters.push_back(\n            std::make_unique<FilterStateFormatter>(key, max_length, serialize_as_string));\n      } else if (absl::StartsWith(token, \"START_TIME\")) {\n        const size_t parameters_length = pos + StartTimeParamStart + 1;\n        const size_t parameters_end = command_end_position - parameters_length;\n\n        const std::string args = token[StartTimeParamStart - 1] == '('\n                                     ? token.substr(StartTimeParamStart, parameters_end)\n                                     : \"\";\n        // Validate the input specifier here. The formatted string may be destined for a header, and\n        // should not contain invalid characters {NUL, LR, CF}.\n        if (std::regex_search(args, getStartTimeNewlinePattern())) {\n          throw EnvoyException(\"Invalid header configuration. Format string contains newline.\");\n        }\n        formatters.emplace_back(FormatterProviderPtr{new StartTimeFormatter(args)});\n      } else if (absl::StartsWith(token, \"GRPC_STATUS\")) {\n        formatters.emplace_back(FormatterProviderPtr{\n            new GrpcStatusFormatter(\"grpc-status\", \"\", absl::optional<size_t>())});\n      } else {\n        formatters.emplace_back(FormatterProviderPtr{new StreamInfoFormatter(token)});\n      }\n      pos = command_end_position;\n    } else {\n      current_token += format[pos];\n    }\n  }\n\n  if (!current_token.empty()) {\n    formatters.emplace_back(FormatterProviderPtr{new PlainStringFormatter(current_token)});\n  }\n\n  return formatters;\n}\n\n// StreamInfo std::string field extractor.\nclass StreamInfoStringFieldExtractor : public StreamInfoFormatter::FieldExtractor {\npublic:\n  using FieldExtractor = std::function<absl::optional<std::string>(const StreamInfo::StreamInfo&)>;\n\n  StreamInfoStringFieldExtractor(FieldExtractor f) : field_extractor_(f) {}\n\n  // StreamInfoFormatter::FieldExtractor\n  absl::optional<std::string> extract(const StreamInfo::StreamInfo& stream_info) const override {\n    return field_extractor_(stream_info);\n  }\n  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {\n    return ValueUtil::optionalStringValue(field_extractor_(stream_info));\n  }\n\nprivate:\n  FieldExtractor field_extractor_;\n};\n\n// StreamInfo std::chrono_nanoseconds field extractor.\nclass StreamInfoDurationFieldExtractor : public StreamInfoFormatter::FieldExtractor {\npublic:\n  using FieldExtractor =\n      std::function<absl::optional<std::chrono::nanoseconds>(const StreamInfo::StreamInfo&)>;\n\n  StreamInfoDurationFieldExtractor(FieldExtractor f) : field_extractor_(f) {}\n\n  // StreamInfoFormatter::FieldExtractor\n  absl::optional<std::string> extract(const StreamInfo::StreamInfo& stream_info) const override {\n    const auto millis = extractMillis(stream_info);\n    if (!millis) {\n      return absl::nullopt;\n    }\n\n    return fmt::format_int(millis.value()).str();\n  }\n  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {\n    const auto millis = extractMillis(stream_info);\n    if (!millis) {\n      return unspecifiedValue();\n    }\n\n    return ValueUtil::numberValue(millis.value());\n  }\n\nprivate:\n  absl::optional<int64_t> extractMillis(const StreamInfo::StreamInfo& stream_info) const {\n    const auto time = field_extractor_(stream_info);\n    if (time) {\n      return std::chrono::duration_cast<std::chrono::milliseconds>(time.value()).count();\n    }\n    return absl::nullopt;\n  }\n\n  FieldExtractor field_extractor_;\n};\n\n// StreamInfo uint64_t field extractor.\nclass StreamInfoUInt64FieldExtractor : public StreamInfoFormatter::FieldExtractor {\npublic:\n  using FieldExtractor = std::function<uint64_t(const StreamInfo::StreamInfo&)>;\n\n  StreamInfoUInt64FieldExtractor(FieldExtractor f) : field_extractor_(f) {}\n\n  // StreamInfoFormatter::FieldExtractor\n  absl::optional<std::string> extract(const StreamInfo::StreamInfo& stream_info) const override {\n    return fmt::format_int(field_extractor_(stream_info)).str();\n  }\n  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {\n    return ValueUtil::numberValue(field_extractor_(stream_info));\n  }\n\nprivate:\n  FieldExtractor field_extractor_;\n};\n\n// StreamInfo Envoy::Network::Address::InstanceConstSharedPtr field extractor.\nclass StreamInfoAddressFieldExtractor : public StreamInfoFormatter::FieldExtractor {\npublic:\n  using FieldExtractor =\n      std::function<Network::Address::InstanceConstSharedPtr(const StreamInfo::StreamInfo&)>;\n\n  static std::unique_ptr<StreamInfoAddressFieldExtractor> withPort(FieldExtractor f) {\n    return std::make_unique<StreamInfoAddressFieldExtractor>(\n        f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithPort);\n  }\n\n  static std::unique_ptr<StreamInfoAddressFieldExtractor> withoutPort(FieldExtractor f) {\n    return std::make_unique<StreamInfoAddressFieldExtractor>(\n        f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithoutPort);\n  }\n\n  static std::unique_ptr<StreamInfoAddressFieldExtractor> justPort(FieldExtractor f) {\n    return std::make_unique<StreamInfoAddressFieldExtractor>(\n        f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType::JustPort);\n  }\n\n  StreamInfoAddressFieldExtractor(\n      FieldExtractor f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType extraction_type)\n      : field_extractor_(f), extraction_type_(extraction_type) {}\n\n  // StreamInfoFormatter::FieldExtractor\n  absl::optional<std::string> extract(const StreamInfo::StreamInfo& stream_info) const override {\n    Network::Address::InstanceConstSharedPtr address = field_extractor_(stream_info);\n    if (!address) {\n      return absl::nullopt;\n    }\n\n    return toString(*address);\n  }\n  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {\n    Network::Address::InstanceConstSharedPtr address = field_extractor_(stream_info);\n    if (!address) {\n      return unspecifiedValue();\n    }\n\n    return ValueUtil::stringValue(toString(*address));\n  }\n\nprivate:\n  std::string toString(const Network::Address::Instance& address) const {\n    switch (extraction_type_) {\n    case StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithoutPort:\n      return StreamInfo::Utility::formatDownstreamAddressNoPort(address);\n    case StreamInfoFormatter::StreamInfoAddressFieldExtractionType::JustPort:\n      return StreamInfo::Utility::formatDownstreamAddressJustPort(address);\n    case StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithPort:\n    default:\n      return address.asString();\n    }\n  }\n\n  FieldExtractor field_extractor_;\n  const StreamInfoFormatter::StreamInfoAddressFieldExtractionType extraction_type_;\n};\n\n// Ssl::ConnectionInfo std::string field extractor.\nclass StreamInfoSslConnectionInfoFieldExtractor : public StreamInfoFormatter::FieldExtractor {\npublic:\n  using FieldExtractor =\n      std::function<absl::optional<std::string>(const Ssl::ConnectionInfo& connection_info)>;\n\n  StreamInfoSslConnectionInfoFieldExtractor(FieldExtractor f) : field_extractor_(f) {}\n\n  absl::optional<std::string> extract(const StreamInfo::StreamInfo& stream_info) const override {\n    if (stream_info.downstreamSslConnection() == nullptr) {\n      return absl::nullopt;\n    }\n\n    const auto value = field_extractor_(*stream_info.downstreamSslConnection());\n    if (value && value->empty()) {\n      return absl::nullopt;\n    }\n\n    return value;\n  }\n\n  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {\n    if (stream_info.downstreamSslConnection() == nullptr) {\n      return unspecifiedValue();\n    }\n\n    const auto value = field_extractor_(*stream_info.downstreamSslConnection());\n    if (value && value->empty()) {\n      return unspecifiedValue();\n    }\n\n    return ValueUtil::optionalStringValue(value);\n  }\n\nprivate:\n  FieldExtractor field_extractor_;\n};\n\nStreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) {\n  if (field_name == \"REQUEST_DURATION\") {\n    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.lastDownstreamRxByteReceived();\n        });\n  } else if (field_name == \"RESPONSE_DURATION\") {\n    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.firstUpstreamRxByteReceived();\n        });\n  } else if (field_name == \"RESPONSE_TX_DURATION\") {\n    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          auto downstream = stream_info.lastDownstreamTxByteSent();\n          auto upstream = stream_info.firstUpstreamRxByteReceived();\n\n          absl::optional<std::chrono::nanoseconds> result;\n          if (downstream && upstream) {\n            result = downstream.value() - upstream.value();\n          }\n\n          return result;\n        });\n  } else if (field_name == \"BYTES_RECEIVED\") {\n    field_extractor_ = std::make_unique<StreamInfoUInt64FieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) { return stream_info.bytesReceived(); });\n  } else if (field_name == \"PROTOCOL\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return SubstitutionFormatUtils::protocolToString(stream_info.protocol());\n        });\n  } else if (field_name == \"RESPONSE_CODE\") {\n    field_extractor_ = std::make_unique<StreamInfoUInt64FieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.responseCode().value_or(0);\n        });\n  } else if (field_name == \"RESPONSE_CODE_DETAILS\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.responseCodeDetails();\n        });\n  } else if (field_name == \"CONNECTION_TERMINATION_DETAILS\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.connectionTerminationDetails();\n        });\n  } else if (field_name == \"BYTES_SENT\") {\n    field_extractor_ = std::make_unique<StreamInfoUInt64FieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) { return stream_info.bytesSent(); });\n  } else if (field_name == \"DURATION\") {\n    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) { return stream_info.requestComplete(); });\n  } else if (field_name == \"RESPONSE_FLAGS\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return StreamInfo::ResponseFlagUtils::toShortString(stream_info);\n        });\n  } else if (field_name == \"UPSTREAM_HOST\") {\n    field_extractor_ =\n        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.upstreamHost() ? stream_info.upstreamHost()->address() : nullptr;\n        });\n  } else if (field_name == \"UPSTREAM_CLUSTER\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          std::string upstream_cluster_name;\n          if (nullptr != stream_info.upstreamHost()) {\n            upstream_cluster_name = stream_info.upstreamHost()->cluster().name();\n          }\n\n          return upstream_cluster_name.empty()\n                     ? absl::nullopt\n                     : absl::make_optional<std::string>(upstream_cluster_name);\n        });\n  } else if (field_name == \"UPSTREAM_LOCAL_ADDRESS\") {\n    field_extractor_ =\n        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.upstreamLocalAddress();\n        });\n  } else if (field_name == \"DOWNSTREAM_LOCAL_ADDRESS\") {\n    field_extractor_ =\n        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.downstreamLocalAddress();\n        });\n  } else if (field_name == \"DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT\") {\n    field_extractor_ = StreamInfoAddressFieldExtractor::withoutPort(\n        [](const Envoy::StreamInfo::StreamInfo& stream_info) {\n          return stream_info.downstreamLocalAddress();\n        });\n  } else if (field_name == \"DOWNSTREAM_LOCAL_PORT\") {\n    field_extractor_ = StreamInfoAddressFieldExtractor::justPort(\n        [](const Envoy::StreamInfo::StreamInfo& stream_info) {\n          return stream_info.downstreamLocalAddress();\n        });\n  } else if (field_name == \"DOWNSTREAM_REMOTE_ADDRESS\") {\n    field_extractor_ =\n        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.downstreamRemoteAddress();\n        });\n  } else if (field_name == \"DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT\") {\n    field_extractor_ =\n        StreamInfoAddressFieldExtractor::withoutPort([](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.downstreamRemoteAddress();\n        });\n  } else if (field_name == \"DOWNSTREAM_DIRECT_REMOTE_ADDRESS\") {\n    field_extractor_ =\n        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.downstreamDirectRemoteAddress();\n        });\n  } else if (field_name == \"DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT\") {\n    field_extractor_ =\n        StreamInfoAddressFieldExtractor::withoutPort([](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.downstreamDirectRemoteAddress();\n        });\n  } else if (field_name == \"CONNECTION_ID\") {\n    field_extractor_ = std::make_unique<StreamInfoUInt64FieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          return stream_info.connectionID().value_or(0);\n        });\n  } else if (field_name == \"REQUESTED_SERVER_NAME\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          absl::optional<std::string> result;\n          if (!stream_info.requestedServerName().empty()) {\n            result = stream_info.requestedServerName();\n          }\n          return result;\n        });\n  } else if (field_name == \"ROUTE_NAME\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          absl::optional<std::string> result;\n          std::string route_name = stream_info.getRouteName();\n          if (!route_name.empty()) {\n            result = route_name;\n          }\n          return result;\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_URI_SAN\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return absl::StrJoin(connection_info.uriSanPeerCertificate(), \",\");\n        });\n  } else if (field_name == \"DOWNSTREAM_LOCAL_URI_SAN\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return absl::StrJoin(connection_info.uriSanLocalCertificate(), \",\");\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_SUBJECT\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.subjectPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_LOCAL_SUBJECT\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.subjectLocalCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_TLS_SESSION_ID\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) { return connection_info.sessionId(); });\n  } else if (field_name == \"DOWNSTREAM_TLS_CIPHER\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.ciphersuiteString();\n        });\n  } else if (field_name == \"DOWNSTREAM_TLS_VERSION\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) { return connection_info.tlsVersion(); });\n  } else if (field_name == \"DOWNSTREAM_PEER_FINGERPRINT_256\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.sha256PeerCertificateDigest();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_FINGERPRINT_1\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.sha1PeerCertificateDigest();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_SERIAL\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.serialNumberPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_ISSUER\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.issuerPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_CERT\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.urlEncodedPemEncodedPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_CERT_V_START\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          absl::optional<SystemTime> time = connection_info.validFromPeerCertificate();\n          absl::optional<std::string> result;\n          if (time.has_value()) {\n            result = AccessLogDateTimeFormatter::fromTime(time.value());\n          }\n          return result;\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_CERT_V_END\") {\n    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(\n        [](const Ssl::ConnectionInfo& connection_info) {\n          absl::optional<SystemTime> time = connection_info.expirationPeerCertificate();\n          absl::optional<std::string> result;\n          if (time.has_value()) {\n            result = AccessLogDateTimeFormatter::fromTime(time.value());\n          }\n          return result;\n        });\n  } else if (field_name == \"UPSTREAM_TRANSPORT_FAILURE_REASON\") {\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [](const StreamInfo::StreamInfo& stream_info) {\n          absl::optional<std::string> result;\n          if (!stream_info.upstreamTransportFailureReason().empty()) {\n            result = stream_info.upstreamTransportFailureReason();\n          }\n          return result;\n        });\n  } else if (field_name == \"HOSTNAME\") {\n    absl::optional<std::string> hostname = SubstitutionFormatUtils::getHostname();\n    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(\n        [hostname](const StreamInfo::StreamInfo&) { return hostname; });\n  } else {\n    throw EnvoyException(fmt::format(\"Not supported field in StreamInfo: {}\", field_name));\n  }\n}\n\nabsl::optional<std::string> StreamInfoFormatter::format(const Http::RequestHeaderMap&,\n                                                        const Http::ResponseHeaderMap&,\n                                                        const Http::ResponseTrailerMap&,\n                                                        const StreamInfo::StreamInfo& stream_info,\n                                                        absl::string_view) const {\n  return field_extractor_->extract(stream_info);\n}\n\nProtobufWkt::Value StreamInfoFormatter::formatValue(const Http::RequestHeaderMap&,\n                                                    const Http::ResponseHeaderMap&,\n                                                    const Http::ResponseTrailerMap&,\n                                                    const StreamInfo::StreamInfo& stream_info,\n                                                    absl::string_view) const {\n  return field_extractor_->extractValue(stream_info);\n}\n\nPlainStringFormatter::PlainStringFormatter(const std::string& str) { str_.set_string_value(str); }\n\nabsl::optional<std::string> PlainStringFormatter::format(const Http::RequestHeaderMap&,\n                                                         const Http::ResponseHeaderMap&,\n                                                         const Http::ResponseTrailerMap&,\n                                                         const StreamInfo::StreamInfo&,\n                                                         absl::string_view) const {\n  return str_.string_value();\n}\n\nProtobufWkt::Value PlainStringFormatter::formatValue(const Http::RequestHeaderMap&,\n                                                     const Http::ResponseHeaderMap&,\n                                                     const Http::ResponseTrailerMap&,\n                                                     const StreamInfo::StreamInfo&,\n                                                     absl::string_view) const {\n  return str_;\n}\n\nabsl::optional<std::string>\nLocalReplyBodyFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                absl::string_view local_reply_body) const {\n  return std::string(local_reply_body);\n}\n\nProtobufWkt::Value LocalReplyBodyFormatter::formatValue(const Http::RequestHeaderMap&,\n                                                        const Http::ResponseHeaderMap&,\n                                                        const Http::ResponseTrailerMap&,\n                                                        const StreamInfo::StreamInfo&,\n                                                        absl::string_view local_reply_body) const {\n  return ValueUtil::stringValue(std::string(local_reply_body));\n}\n\nHeaderFormatter::HeaderFormatter(const std::string& main_header,\n                                 const std::string& alternative_header,\n                                 absl::optional<size_t> max_length)\n    : main_header_(main_header), alternative_header_(alternative_header), max_length_(max_length) {}\n\nconst Http::HeaderEntry* HeaderFormatter::findHeader(const Http::HeaderMap& headers) const {\n  const Http::HeaderEntry* header = headers.get(main_header_);\n\n  if (!header && !alternative_header_.get().empty()) {\n    return headers.get(alternative_header_);\n  }\n\n  return header;\n}\n\nabsl::optional<std::string> HeaderFormatter::format(const Http::HeaderMap& headers) const {\n  const Http::HeaderEntry* header = findHeader(headers);\n  if (!header) {\n    return absl::nullopt;\n  }\n\n  std::string val = std::string(header->value().getStringView());\n  truncate(val, max_length_);\n  return val;\n}\n\nProtobufWkt::Value HeaderFormatter::formatValue(const Http::HeaderMap& headers) const {\n  const Http::HeaderEntry* header = findHeader(headers);\n  if (!header) {\n    return unspecifiedValue();\n  }\n\n  std::string val = std::string(header->value().getStringView());\n  truncate(val, max_length_);\n  return ValueUtil::stringValue(val);\n}\n\nResponseHeaderFormatter::ResponseHeaderFormatter(const std::string& main_header,\n                                                 const std::string& alternative_header,\n                                                 absl::optional<size_t> max_length)\n    : HeaderFormatter(main_header, alternative_header, max_length) {}\n\nabsl::optional<std::string> ResponseHeaderFormatter::format(\n    const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers,\n    const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, absl::string_view) const {\n  return HeaderFormatter::format(response_headers);\n}\n\nProtobufWkt::Value ResponseHeaderFormatter::formatValue(\n    const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers,\n    const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, absl::string_view) const {\n  return HeaderFormatter::formatValue(response_headers);\n}\n\nRequestHeaderFormatter::RequestHeaderFormatter(const std::string& main_header,\n                                               const std::string& alternative_header,\n                                               absl::optional<size_t> max_length)\n    : HeaderFormatter(main_header, alternative_header, max_length) {}\n\nabsl::optional<std::string>\nRequestHeaderFormatter::format(const Http::RequestHeaderMap& request_headers,\n                               const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&,\n                               const StreamInfo::StreamInfo&, absl::string_view) const {\n  return HeaderFormatter::format(request_headers);\n}\n\nProtobufWkt::Value\nRequestHeaderFormatter::formatValue(const Http::RequestHeaderMap& request_headers,\n                                    const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&,\n                                    const StreamInfo::StreamInfo&, absl::string_view) const {\n  return HeaderFormatter::formatValue(request_headers);\n}\n\nResponseTrailerFormatter::ResponseTrailerFormatter(const std::string& main_header,\n                                                   const std::string& alternative_header,\n                                                   absl::optional<size_t> max_length)\n    : HeaderFormatter(main_header, alternative_header, max_length) {}\n\nabsl::optional<std::string>\nResponseTrailerFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap& response_trailers,\n                                 const StreamInfo::StreamInfo&, absl::string_view) const {\n  return HeaderFormatter::format(response_trailers);\n}\n\nProtobufWkt::Value\nResponseTrailerFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                      const Http::ResponseTrailerMap& response_trailers,\n                                      const StreamInfo::StreamInfo&, absl::string_view) const {\n  return HeaderFormatter::formatValue(response_trailers);\n}\n\nGrpcStatusFormatter::GrpcStatusFormatter(const std::string& main_header,\n                                         const std::string& alternative_header,\n                                         absl::optional<size_t> max_length)\n    : HeaderFormatter(main_header, alternative_header, max_length) {}\n\nabsl::optional<std::string>\nGrpcStatusFormatter::format(const Http::RequestHeaderMap&,\n                            const Http::ResponseHeaderMap& response_headers,\n                            const Http::ResponseTrailerMap& response_trailers,\n                            const StreamInfo::StreamInfo& info, absl::string_view) const {\n  const auto grpc_status =\n      Grpc::Common::getGrpcStatus(response_trailers, response_headers, info, true);\n  if (!grpc_status.has_value()) {\n    return absl::nullopt;\n  }\n  const auto grpc_status_message = Grpc::Utility::grpcStatusToString(grpc_status.value());\n  if (grpc_status_message == EMPTY_STRING || grpc_status_message == \"InvalidCode\") {\n    return std::to_string(grpc_status.value());\n  }\n  return grpc_status_message;\n}\n\nProtobufWkt::Value\nGrpcStatusFormatter::formatValue(const Http::RequestHeaderMap&,\n                                 const Http::ResponseHeaderMap& response_headers,\n                                 const Http::ResponseTrailerMap& response_trailers,\n                                 const StreamInfo::StreamInfo& info, absl::string_view) const {\n  const auto grpc_status =\n      Grpc::Common::getGrpcStatus(response_trailers, response_headers, info, true);\n  if (!grpc_status.has_value()) {\n    return unspecifiedValue();\n  }\n  const auto grpc_status_message = Grpc::Utility::grpcStatusToString(grpc_status.value());\n  if (grpc_status_message == EMPTY_STRING || grpc_status_message == \"InvalidCode\") {\n    return ValueUtil::stringValue(std::to_string(grpc_status.value()));\n  }\n  return ValueUtil::stringValue(grpc_status_message);\n}\n\nMetadataFormatter::MetadataFormatter(const std::string& filter_namespace,\n                                     const std::vector<std::string>& path,\n                                     absl::optional<size_t> max_length)\n    : filter_namespace_(filter_namespace), path_(path), max_length_(max_length) {}\n\nabsl::optional<std::string>\nMetadataFormatter::formatMetadata(const envoy::config::core::v3::Metadata& metadata) const {\n  ProtobufWkt::Value value = formatMetadataValue(metadata);\n  if (value.kind_case() == ProtobufWkt::Value::kNullValue) {\n    return absl::nullopt;\n  }\n\n  std::string json = MessageUtil::getJsonStringFromMessage(value, false, true);\n  truncate(json, max_length_);\n  return json;\n}\n\nProtobufWkt::Value\nMetadataFormatter::formatMetadataValue(const envoy::config::core::v3::Metadata& metadata) const {\n  if (path_.empty()) {\n    const auto filter_it = metadata.filter_metadata().find(filter_namespace_);\n    if (filter_it == metadata.filter_metadata().end()) {\n      return unspecifiedValue();\n    }\n    ProtobufWkt::Value output;\n    output.mutable_struct_value()->CopyFrom(filter_it->second);\n    return output;\n  }\n\n  const ProtobufWkt::Value& val = Metadata::metadataValue(&metadata, filter_namespace_, path_);\n  if (val.kind_case() == ProtobufWkt::Value::KindCase::KIND_NOT_SET) {\n    return unspecifiedValue();\n  }\n\n  return val;\n}\n\n// TODO(glicht): Consider adding support for route/listener/cluster metadata as suggested by @htuch.\n// See: https://github.com/envoyproxy/envoy/issues/3006\nDynamicMetadataFormatter::DynamicMetadataFormatter(const std::string& filter_namespace,\n                                                   const std::vector<std::string>& path,\n                                                   absl::optional<size_t> max_length)\n    : MetadataFormatter(filter_namespace, path, max_length) {}\n\nabsl::optional<std::string> DynamicMetadataFormatter::format(\n    const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&,\n    const StreamInfo::StreamInfo& stream_info, absl::string_view) const {\n  return MetadataFormatter::formatMetadata(stream_info.dynamicMetadata());\n}\n\nProtobufWkt::Value DynamicMetadataFormatter::formatValue(const Http::RequestHeaderMap&,\n                                                         const Http::ResponseHeaderMap&,\n                                                         const Http::ResponseTrailerMap&,\n                                                         const StreamInfo::StreamInfo& stream_info,\n                                                         absl::string_view) const {\n  return MetadataFormatter::formatMetadataValue(stream_info.dynamicMetadata());\n}\n\nFilterStateFormatter::FilterStateFormatter(const std::string& key,\n                                           absl::optional<size_t> max_length,\n                                           bool serialize_as_string)\n    : key_(key), max_length_(max_length), serialize_as_string_(serialize_as_string) {}\n\nconst Envoy::StreamInfo::FilterState::Object*\nFilterStateFormatter::filterState(const StreamInfo::StreamInfo& stream_info) const {\n  const StreamInfo::FilterState& filter_state = stream_info.filterState();\n  if (!filter_state.hasDataWithName(key_)) {\n    return nullptr;\n  }\n  return &filter_state.getDataReadOnly<StreamInfo::FilterState::Object>(key_);\n}\n\nabsl::optional<std::string> FilterStateFormatter::format(const Http::RequestHeaderMap&,\n                                                         const Http::ResponseHeaderMap&,\n                                                         const Http::ResponseTrailerMap&,\n                                                         const StreamInfo::StreamInfo& stream_info,\n                                                         absl::string_view) const {\n  const Envoy::StreamInfo::FilterState::Object* state = filterState(stream_info);\n  if (!state) {\n    return absl::nullopt;\n  }\n\n  if (serialize_as_string_) {\n    absl::optional<std::string> plain_value = state->serializeAsString();\n    if (plain_value.has_value()) {\n      truncate(plain_value.value(), max_length_);\n      return plain_value.value();\n    }\n    return absl::nullopt;\n  }\n\n  ProtobufTypes::MessagePtr proto = state->serializeAsProto();\n  if (proto == nullptr) {\n    return absl::nullopt;\n  }\n\n  std::string value;\n  const auto status = Protobuf::util::MessageToJsonString(*proto, &value);\n  if (!status.ok()) {\n    // If the message contains an unknown Any (from WASM or Lua), MessageToJsonString will fail.\n    // TODO(lizan): add support of unknown Any.\n    return absl::nullopt;\n  }\n\n  truncate(value, max_length_);\n  return value;\n}\n\nProtobufWkt::Value FilterStateFormatter::formatValue(const Http::RequestHeaderMap&,\n                                                     const Http::ResponseHeaderMap&,\n                                                     const Http::ResponseTrailerMap&,\n                                                     const StreamInfo::StreamInfo& stream_info,\n                                                     absl::string_view) const {\n  const Envoy::StreamInfo::FilterState::Object* state = filterState(stream_info);\n  if (!state) {\n    return unspecifiedValue();\n  }\n\n  if (serialize_as_string_) {\n    absl::optional<std::string> plain_value = state->serializeAsString();\n    if (plain_value.has_value()) {\n      truncate(plain_value.value(), max_length_);\n      return ValueUtil::stringValue(plain_value.value());\n    }\n    return unspecifiedValue();\n  }\n\n  ProtobufTypes::MessagePtr proto = state->serializeAsProto();\n  if (!proto) {\n    return unspecifiedValue();\n  }\n\n  ProtobufWkt::Value val;\n  try {\n    MessageUtil::jsonConvertValue(*proto, val);\n  } catch (EnvoyException& ex) {\n    return unspecifiedValue();\n  }\n  return val;\n}\n\nStartTimeFormatter::StartTimeFormatter(const std::string& format) : date_formatter_(format) {}\n\nabsl::optional<std::string> StartTimeFormatter::format(const Http::RequestHeaderMap&,\n                                                       const Http::ResponseHeaderMap&,\n                                                       const Http::ResponseTrailerMap&,\n                                                       const StreamInfo::StreamInfo& stream_info,\n                                                       absl::string_view) const {\n  if (date_formatter_.formatString().empty()) {\n    return AccessLogDateTimeFormatter::fromTime(stream_info.startTime());\n  }\n  return date_formatter_.fromTime(stream_info.startTime());\n}\n\nProtobufWkt::Value StartTimeFormatter::formatValue(\n    const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers,\n    const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info,\n    absl::string_view local_reply_body) const {\n  return ValueUtil::optionalStringValue(\n      format(request_headers, response_headers, response_trailers, stream_info, local_reply_body));\n}\n\n} // namespace Formatter\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/formatter/substitution_formatter.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/formatter/substitution_formatter.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Formatter {\n\n/**\n * Access log format parser.\n */\nclass SubstitutionFormatParser {\npublic:\n  static std::vector<FormatterProviderPtr> parse(const std::string& format);\n\nprivate:\n  /**\n   * Parse a header format rule of the form: %REQ(X?Y):Z% .\n   * Will populate a main_header and an optional alternative header if specified.\n   * See doc:\n   * docs/root/configuration/access_log.rst#format-rules\n   */\n  static void parseCommandHeader(const std::string& token, const size_t start,\n                                 std::string& main_header, std::string& alternative_header,\n                                 absl::optional<size_t>& max_length);\n\n  /**\n   * General parse command utility. Will parse token from start position. Token is expected to end\n   * with ')'. An optional \":max_length\" may be specified after the closing ')' char. Token may\n   * contain multiple values separated by \"separator\" string. First value will be populated in\n   * \"main\" and any additional sub values will be set in the vector \"subitems\". For example token\n   * of: \"com.test.my_filter:test_object:inner_key):100\" with separator of \":\" will set the\n   * following:\n   * - main: com.test.my_filter\n   * - subitems: {test_object, inner_key}\n   * - max_length: 100\n   *\n   * @param token the token to parse\n   * @param start the index to start parsing from\n   * @param separator separator between values\n   * @param main the first value\n   * @param sub_items any additional values\n   * @param max_length optional max_length will be populated if specified\n   *\n   * TODO(glicht) Rewrite with a parser library. See:\n   * https://github.com/envoyproxy/envoy/issues/2967\n   */\n  static void parseCommand(const std::string& token, const size_t start,\n                           const std::string& separator, std::string& main,\n                           std::vector<std::string>& sub_items, absl::optional<size_t>& max_length);\n\n  // the indexes of where the parameters for each directive is expected to begin\n  static const size_t ReqParamStart{sizeof(\"REQ(\") - 1};\n  static const size_t RespParamStart{sizeof(\"RESP(\") - 1};\n  static const size_t TrailParamStart{sizeof(\"TRAILER(\") - 1};\n  static const size_t StartTimeParamStart{sizeof(\"START_TIME(\") - 1};\n};\n\n/**\n * Util class for access log format.\n */\nclass SubstitutionFormatUtils {\npublic:\n  static FormatterPtr defaultSubstitutionFormatter();\n  // Optional references are not supported, but this method has large performance\n  // impact, so using reference_wrapper.\n  static const absl::optional<std::reference_wrapper<const std::string>>\n  protocolToString(const absl::optional<Http::Protocol>& protocol);\n  static const std::string&\n  protocolToStringOrDefault(const absl::optional<Http::Protocol>& protocol);\n  static const absl::optional<std::string> getHostname();\n  static const std::string getHostnameOrDefault();\n\nprivate:\n  SubstitutionFormatUtils();\n\n  static const std::string DEFAULT_FORMAT;\n};\n\n/**\n * Composite formatter implementation.\n */\nclass FormatterImpl : public Formatter {\npublic:\n  FormatterImpl(const std::string& format, bool omit_empty_values = false);\n\n  // Formatter::format\n  std::string format(const Http::RequestHeaderMap& request_headers,\n                     const Http::ResponseHeaderMap& response_headers,\n                     const Http::ResponseTrailerMap& response_trailers,\n                     const StreamInfo::StreamInfo& stream_info,\n                     absl::string_view local_reply_body) const override;\n\nprivate:\n  const std::string& empty_value_string_;\n  std::vector<FormatterProviderPtr> providers_;\n};\n\nclass JsonFormatterImpl : public Formatter {\npublic:\n  JsonFormatterImpl(const ProtobufWkt::Struct& format_mapping, bool preserve_types,\n                    bool omit_empty_values)\n      : omit_empty_values_(omit_empty_values), preserve_types_(preserve_types),\n        json_output_format_(toFormatMap(format_mapping)) {}\n\n  // Formatter::format\n  std::string format(const Http::RequestHeaderMap& request_headers,\n                     const Http::ResponseHeaderMap& response_headers,\n                     const Http::ResponseTrailerMap& response_trailers,\n                     const StreamInfo::StreamInfo& stream_info,\n                     absl::string_view local_reply_body) const override;\n\nprivate:\n  struct JsonFormatMapWrapper;\n  using JsonFormatMapValue =\n      absl::variant<const std::vector<FormatterProviderPtr>, const JsonFormatMapWrapper>;\n  // Although not required for JSON, it is nice to have the order of properties\n  // preserved between the format and the log entry, thus std::map.\n  using JsonFormatMap = std::map<std::string, JsonFormatMapValue>;\n  using JsonFormatMapPtr = std::unique_ptr<JsonFormatMap>;\n  struct JsonFormatMapWrapper {\n    JsonFormatMapPtr value_;\n  };\n\n  bool omit_empty_values_;\n  bool preserve_types_;\n  const JsonFormatMapWrapper json_output_format_;\n\n  ProtobufWkt::Struct toStruct(const Http::RequestHeaderMap& request_headers,\n                               const Http::ResponseHeaderMap& response_headers,\n                               const Http::ResponseTrailerMap& response_trailers,\n                               const StreamInfo::StreamInfo& stream_info,\n                               absl::string_view local_reply_body) const;\n  JsonFormatMapWrapper toFormatMap(const ProtobufWkt::Struct& json_format) const;\n};\n\n/**\n * FormatterProvider for string literals. It ignores headers and stream info and returns string by\n * which it was initialized.\n */\nclass PlainStringFormatter : public FormatterProvider {\npublic:\n  PlainStringFormatter(const std::string& str);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n\nprivate:\n  ProtobufWkt::Value str_;\n};\n\n/**\n * FormatterProvider for local_reply_body. It returns the string from `local_reply_body` argument.\n */\nclass LocalReplyBodyFormatter : public FormatterProvider {\npublic:\n  LocalReplyBodyFormatter() = default;\n\n  // Formatter::format\n  absl::optional<std::string> format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view local_reply_body) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view local_reply_body) const override;\n};\n\nclass HeaderFormatter {\npublic:\n  HeaderFormatter(const std::string& main_header, const std::string& alternative_header,\n                  absl::optional<size_t> max_length);\n\nprotected:\n  absl::optional<std::string> format(const Http::HeaderMap& headers) const;\n  ProtobufWkt::Value formatValue(const Http::HeaderMap& headers) const;\n\nprivate:\n  const Http::HeaderEntry* findHeader(const Http::HeaderMap& headers) const;\n\n  Http::LowerCaseString main_header_;\n  Http::LowerCaseString alternative_header_;\n  absl::optional<size_t> max_length_;\n};\n\n/**\n * FormatterProvider for request headers.\n */\nclass RequestHeaderFormatter : public FormatterProvider, HeaderFormatter {\npublic:\n  RequestHeaderFormatter(const std::string& main_header, const std::string& alternative_header,\n                         absl::optional<size_t> max_length);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap& request_headers,\n                                     const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n};\n\n/**\n * FormatterProvider for response headers.\n */\nclass ResponseHeaderFormatter : public FormatterProvider, HeaderFormatter {\npublic:\n  ResponseHeaderFormatter(const std::string& main_header, const std::string& alternative_header,\n                          absl::optional<size_t> max_length);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&,\n                                     const Http::ResponseHeaderMap& response_headers,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n};\n\n/**\n * FormatterProvider for response trailers.\n */\nclass ResponseTrailerFormatter : public FormatterProvider, HeaderFormatter {\npublic:\n  ResponseTrailerFormatter(const std::string& main_header, const std::string& alternative_header,\n                           absl::optional<size_t> max_length);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap& response_trailers,\n                                     const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n};\n\n/**\n * FormatterProvider for grpc-status\n */\nclass GrpcStatusFormatter : public FormatterProvider, HeaderFormatter {\npublic:\n  GrpcStatusFormatter(const std::string& main_header, const std::string& alternative_header,\n                      absl::optional<size_t> max_length);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&,\n                                     const Http::ResponseHeaderMap& response_headers,\n                                     const Http::ResponseTrailerMap& response_trailers,\n                                     const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n};\n\n/**\n * FormatterProvider based on StreamInfo fields.\n */\nclass StreamInfoFormatter : public FormatterProvider {\npublic:\n  StreamInfoFormatter(const std::string& field_name);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n\n  class FieldExtractor {\n  public:\n    virtual ~FieldExtractor() = default;\n\n    virtual absl::optional<std::string> extract(const StreamInfo::StreamInfo&) const PURE;\n    virtual ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo&) const PURE;\n  };\n  using FieldExtractorPtr = std::unique_ptr<FieldExtractor>;\n\n  enum class StreamInfoAddressFieldExtractionType { WithPort, WithoutPort, JustPort };\n\nprivate:\n  FieldExtractorPtr field_extractor_;\n};\n\n/**\n * Base formatter for formatting Metadata objects\n */\nclass MetadataFormatter {\npublic:\n  MetadataFormatter(const std::string& filter_namespace, const std::vector<std::string>& path,\n                    absl::optional<size_t> max_length);\n\nprotected:\n  absl::optional<std::string>\n  formatMetadata(const envoy::config::core::v3::Metadata& metadata) const;\n  ProtobufWkt::Value formatMetadataValue(const envoy::config::core::v3::Metadata& metadata) const;\n\nprivate:\n  std::string filter_namespace_;\n  std::vector<std::string> path_;\n  absl::optional<size_t> max_length_;\n};\n\n/**\n * FormatterProvider for DynamicMetadata from StreamInfo.\n */\nclass DynamicMetadataFormatter : public FormatterProvider, MetadataFormatter {\npublic:\n  DynamicMetadataFormatter(const std::string& filter_namespace,\n                           const std::vector<std::string>& path, absl::optional<size_t> max_length);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n};\n\n/**\n * FormatterProvider for FilterState from StreamInfo.\n */\nclass FilterStateFormatter : public FormatterProvider {\npublic:\n  FilterStateFormatter(const std::string& key, absl::optional<size_t> max_length,\n                       bool serialize_as_string);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n\nprivate:\n  const Envoy::StreamInfo::FilterState::Object*\n  filterState(const StreamInfo::StreamInfo& stream_info) const;\n\n  std::string key_;\n  absl::optional<size_t> max_length_;\n\n  bool serialize_as_string_;\n};\n\n/**\n * FormatterProvider for request start time from StreamInfo.\n */\nclass StartTimeFormatter : public FormatterProvider {\npublic:\n  StartTimeFormatter(const std::string& format);\n\n  // FormatterProvider\n  absl::optional<std::string> format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                     const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                     absl::string_view) const override;\n  ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                                 const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&,\n                                 absl::string_view) const override;\n\nprivate:\n  const Envoy::DateFormatter date_formatter_;\n};\n\n} // namespace Formatter\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_google_grpc_external_deps\",\n    \"envoy_package\",\n    \"envoy_select_google_grpc\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"typed_async_client_lib\",\n    srcs = [\"typed_async_client.cc\"],\n    hdrs = [\"typed_async_client.h\"],\n    deps = [\n        \":codec_lib\",\n        \":context_lib\",\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/http:async_client_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"async_client_lib\",\n    srcs = [\"async_client_impl.cc\"],\n    hdrs = [\"async_client_impl.h\"],\n    deps = [\n        \":codec_lib\",\n        \":context_lib\",\n        \":typed_async_client_lib\",\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/http:async_client_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"async_client_manager_lib\",\n    srcs = [\"async_client_manager_impl.cc\"],\n    hdrs = [\"async_client_manager_impl.h\"],\n    deps = [\n        \":async_client_lib\",\n        \":context_lib\",\n        \"//include/envoy/grpc:async_client_manager_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n    ] + envoy_select_google_grpc([\":google_async_client_lib\"]),\n)\n\nenvoy_cc_library(\n    name = \"codec_lib\",\n    srcs = [\"codec.cc\"],\n    hdrs = [\"codec.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"status_lib\",\n    srcs = [\"status.cc\"],\n    hdrs = [\"status.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/grpc:status\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"common_lib\",\n    srcs = [\"common.cc\"],\n    hdrs = [\"common.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:message_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/grpc:status_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_lib\",\n    srcs = [\"context_impl.cc\"],\n    hdrs = [\"context_impl.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":common_lib\",\n        \":stat_names_lib\",\n        \"//include/envoy/grpc:context_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"google_grpc_utils_lib\",\n    srcs = [\"google_grpc_utils.cc\"],\n    hdrs = [\"google_grpc_utils.h\"],\n    external_deps = [\n        \"abseil_optional\",\n        \"grpc\",\n    ],\n    deps = [\n        \":google_grpc_creds_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/grpc:status_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stat_names_lib\",\n    srcs = [\"stat_names.cc\"],\n    hdrs = [\"stat_names.h\"],\n    deps = [\n        \"//include/envoy/grpc:status\",\n        \"//source/common/stats:symbol_table_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"google_async_client_lib\",\n    srcs = [\"google_async_client_impl.cc\"],\n    hdrs = [\"google_async_client_impl.h\"],\n    external_deps = [\n        \"abseil_synchronization\",\n        \"grpc\",\n    ],\n    deps = [\n        \":context_lib\",\n        \":google_grpc_context_lib\",\n        \":google_grpc_creds_lib\",\n        \":google_grpc_utils_lib\",\n        \":stat_names_lib\",\n        \":typed_async_client_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/grpc:google_grpc_creds_interface\",\n        \"//include/envoy/thread:thread_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:thread_annotations\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"google_grpc_context_lib\",\n    srcs = [\"google_grpc_context.cc\"],\n    hdrs = [\"google_grpc_context.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:lock_guard_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:thread_lib\",\n    ] + envoy_google_grpc_external_deps(),\n)\n\nenvoy_cc_library(\n    name = \"google_grpc_creds_lib\",\n    srcs = [\"google_grpc_creds_impl.cc\"],\n    hdrs = [\"google_grpc_creds_impl.h\"],\n    external_deps = [\"grpc\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/grpc:google_grpc_creds_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/config:datasource_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/grpc/async_client_impl.cc",
    "content": "#include \"common/grpc/async_client_impl.h\"\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nAsyncClientImpl::AsyncClientImpl(Upstream::ClusterManager& cm,\n                                 const envoy::config::core::v3::GrpcService& config,\n                                 TimeSource& time_source)\n    : cm_(cm), remote_cluster_name_(config.envoy_grpc().cluster_name()),\n      host_name_(config.envoy_grpc().authority()), initial_metadata_(config.initial_metadata()),\n      time_source_(time_source) {}\n\nAsyncClientImpl::~AsyncClientImpl() {\n  while (!active_streams_.empty()) {\n    active_streams_.front()->resetStream();\n  }\n}\n\nAsyncRequest* AsyncClientImpl::sendRaw(absl::string_view service_full_name,\n                                       absl::string_view method_name, Buffer::InstancePtr&& request,\n                                       RawAsyncRequestCallbacks& callbacks,\n                                       Tracing::Span& parent_span,\n                                       const Http::AsyncClient::RequestOptions& options) {\n  auto* const async_request = new AsyncRequestImpl(\n      *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options);\n  AsyncStreamImplPtr grpc_stream{async_request};\n\n  grpc_stream->initialize(true);\n  if (grpc_stream->hasResetStream()) {\n    return nullptr;\n  }\n\n  LinkedList::moveIntoList(std::move(grpc_stream), active_streams_);\n  return async_request;\n}\n\nRawAsyncStream* AsyncClientImpl::startRaw(absl::string_view service_full_name,\n                                          absl::string_view method_name,\n                                          RawAsyncStreamCallbacks& callbacks,\n                                          const Http::AsyncClient::StreamOptions& options) {\n  auto grpc_stream =\n      std::make_unique<AsyncStreamImpl>(*this, service_full_name, method_name, callbacks, options);\n\n  grpc_stream->initialize(false);\n  if (grpc_stream->hasResetStream()) {\n    return nullptr;\n  }\n\n  LinkedList::moveIntoList(std::move(grpc_stream), active_streams_);\n  return active_streams_.front().get();\n}\n\nAsyncStreamImpl::AsyncStreamImpl(AsyncClientImpl& parent, absl::string_view service_full_name,\n                                 absl::string_view method_name, RawAsyncStreamCallbacks& callbacks,\n                                 const Http::AsyncClient::StreamOptions& options)\n    : parent_(parent), service_full_name_(service_full_name), method_name_(method_name),\n      callbacks_(callbacks), options_(options) {}\n\nvoid AsyncStreamImpl::initialize(bool buffer_body_for_retry) {\n  if (parent_.cm_.get(parent_.remote_cluster_name_) == nullptr) {\n    callbacks_.onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, \"Cluster not available\");\n    http_reset_ = true;\n    return;\n  }\n\n  auto& http_async_client = parent_.cm_.httpAsyncClientForCluster(parent_.remote_cluster_name_);\n  dispatcher_ = &http_async_client.dispatcher();\n  stream_ = http_async_client.start(*this, options_.setBufferBodyForRetry(buffer_body_for_retry));\n\n  if (stream_ == nullptr) {\n    callbacks_.onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, EMPTY_STRING);\n    http_reset_ = true;\n    return;\n  }\n\n  // TODO(htuch): match Google gRPC base64 encoding behavior for *-bin headers, see\n  // https://github.com/envoyproxy/envoy/pull/2444#discussion_r163914459.\n  headers_message_ = Common::prepareHeaders(\n      parent_.host_name_.empty() ? parent_.remote_cluster_name_ : parent_.host_name_,\n      service_full_name_, method_name_, options_.timeout);\n  // Fill service-wide initial metadata.\n  for (const auto& header_value : parent_.initial_metadata_) {\n    headers_message_->headers().addCopy(Http::LowerCaseString(header_value.key()),\n                                        header_value.value());\n  }\n  callbacks_.onCreateInitialMetadata(headers_message_->headers());\n  stream_->sendHeaders(headers_message_->headers(), false);\n}\n\n// TODO(htuch): match Google gRPC base64 encoding behavior for *-bin headers, see\n// https://github.com/envoyproxy/envoy/pull/2444#discussion_r163914459.\nvoid AsyncStreamImpl::onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) {\n  const auto http_response_status = Http::Utility::getResponseStatus(*headers);\n  const auto grpc_status = Common::getGrpcStatus(*headers);\n  callbacks_.onReceiveInitialMetadata(end_stream ? Http::ResponseHeaderMapImpl::create()\n                                                 : std::move(headers));\n  if (http_response_status != enumToInt(Http::Code::OK)) {\n    // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md requires that\n    // grpc-status be used if available.\n    if (end_stream && grpc_status) {\n      // Due to headers/trailers type differences we need to copy here. This is an uncommon case but\n      // we can potentially optimize in the future.\n\n      // TODO(mattklein123): clang-tidy is showing a use after move when passing to\n      // onReceiveInitialMetadata() above. This looks like an actual bug that I will fix in a\n      // follow up.\n      onTrailers(Http::createHeaderMap<Http::ResponseTrailerMapImpl>(*headers));\n      return;\n    }\n    // Technically this should be\n    // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md\n    // as given by Grpc::Utility::httpToGrpcStatus(), but the Google gRPC client treats\n    // this as WellKnownGrpcStatus::Canceled.\n    streamError(Status::WellKnownGrpcStatus::Canceled);\n    return;\n  }\n  if (end_stream) {\n    // Due to headers/trailers type differences we need to copy here. This is an uncommon case but\n    // we can potentially optimize in the future.\n    onTrailers(Http::createHeaderMap<Http::ResponseTrailerMapImpl>(*headers));\n  }\n}\n\nvoid AsyncStreamImpl::onData(Buffer::Instance& data, bool end_stream) {\n  decoded_frames_.clear();\n  if (!decoder_.decode(data, decoded_frames_)) {\n    streamError(Status::WellKnownGrpcStatus::Internal);\n    return;\n  }\n\n  for (auto& frame : decoded_frames_) {\n    if (frame.length_ > 0 && frame.flags_ != GRPC_FH_DEFAULT) {\n      streamError(Status::WellKnownGrpcStatus::Internal);\n      return;\n    }\n    if (!callbacks_.onReceiveMessageRaw(frame.data_ ? std::move(frame.data_)\n                                                    : std::make_unique<Buffer::OwnedImpl>())) {\n      streamError(Status::WellKnownGrpcStatus::Internal);\n      return;\n    }\n  }\n\n  if (end_stream) {\n    streamError(Status::WellKnownGrpcStatus::Unknown);\n  }\n}\n\n// TODO(htuch): match Google gRPC base64 encoding behavior for *-bin headers, see\n// https://github.com/envoyproxy/envoy/pull/2444#discussion_r163914459.\nvoid AsyncStreamImpl::onTrailers(Http::ResponseTrailerMapPtr&& trailers) {\n  auto grpc_status = Common::getGrpcStatus(*trailers);\n  const std::string grpc_message = Common::getGrpcMessage(*trailers);\n  callbacks_.onReceiveTrailingMetadata(std::move(trailers));\n  if (!grpc_status) {\n    grpc_status = Status::WellKnownGrpcStatus::Unknown;\n  }\n  callbacks_.onRemoteClose(grpc_status.value(), grpc_message);\n  cleanup();\n}\n\nvoid AsyncStreamImpl::streamError(Status::GrpcStatus grpc_status, const std::string& message) {\n  callbacks_.onReceiveTrailingMetadata(Http::ResponseTrailerMapImpl::create());\n  callbacks_.onRemoteClose(grpc_status, message);\n  resetStream();\n}\n\nvoid AsyncStreamImpl::onComplete() {\n  // No-op since stream completion is handled within other callbacks.\n}\n\nvoid AsyncStreamImpl::onReset() {\n  if (http_reset_) {\n    return;\n  }\n\n  http_reset_ = true;\n  streamError(Status::WellKnownGrpcStatus::Internal);\n}\n\nvoid AsyncStreamImpl::sendMessage(const Protobuf::Message& request, bool end_stream) {\n  stream_->sendData(*Common::serializeToGrpcFrame(request), end_stream);\n}\n\nvoid AsyncStreamImpl::sendMessageRaw(Buffer::InstancePtr&& buffer, bool end_stream) {\n  Common::prependGrpcFrameHeader(*buffer);\n  stream_->sendData(*buffer, end_stream);\n}\n\nvoid AsyncStreamImpl::closeStream() {\n  Buffer::OwnedImpl empty_buffer;\n  stream_->sendData(empty_buffer, true);\n}\n\nvoid AsyncStreamImpl::resetStream() { cleanup(); }\n\nvoid AsyncStreamImpl::cleanup() {\n  if (!http_reset_) {\n    http_reset_ = true;\n    stream_->reset();\n  }\n\n  // This will destroy us, but only do so if we are actually in a list. This does not happen in\n  // the immediate failure case.\n  if (LinkedObject<AsyncStreamImpl>::inserted()) {\n    dispatcher_->deferredDelete(\n        LinkedObject<AsyncStreamImpl>::removeFromList(parent_.active_streams_));\n  }\n}\n\nAsyncRequestImpl::AsyncRequestImpl(AsyncClientImpl& parent, absl::string_view service_full_name,\n                                   absl::string_view method_name, Buffer::InstancePtr&& request,\n                                   RawAsyncRequestCallbacks& callbacks, Tracing::Span& parent_span,\n                                   const Http::AsyncClient::RequestOptions& options)\n    : AsyncStreamImpl(parent, service_full_name, method_name, *this, options),\n      request_(std::move(request)), callbacks_(callbacks) {\n\n  current_span_ = parent_span.spawnChild(Tracing::EgressConfig::get(),\n                                         \"async \" + parent.remote_cluster_name_ + \" egress\",\n                                         parent.time_source_.systemTime());\n  current_span_->setTag(Tracing::Tags::get().UpstreamCluster, parent.remote_cluster_name_);\n  current_span_->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy);\n}\n\nvoid AsyncRequestImpl::initialize(bool buffer_body_for_retry) {\n  AsyncStreamImpl::initialize(buffer_body_for_retry);\n  if (this->hasResetStream()) {\n    return;\n  }\n  this->sendMessageRaw(std::move(request_), true);\n}\n\nvoid AsyncRequestImpl::cancel() {\n  current_span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled);\n  current_span_->finishSpan();\n  this->resetStream();\n}\n\nvoid AsyncRequestImpl::onCreateInitialMetadata(Http::RequestHeaderMap& metadata) {\n  current_span_->injectContext(metadata);\n  callbacks_.onCreateInitialMetadata(metadata);\n}\n\nvoid AsyncRequestImpl::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&&) {}\n\nbool AsyncRequestImpl::onReceiveMessageRaw(Buffer::InstancePtr&& response) {\n  response_ = std::move(response);\n  return true;\n}\n\nvoid AsyncRequestImpl::onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&&) {}\n\nvoid AsyncRequestImpl::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) {\n  current_span_->setTag(Tracing::Tags::get().GrpcStatusCode, std::to_string(status));\n\n  if (status != Grpc::Status::WellKnownGrpcStatus::Ok) {\n    current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);\n    callbacks_.onFailure(status, message, *current_span_);\n  } else if (response_ == nullptr) {\n    current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);\n    callbacks_.onFailure(Status::Internal, EMPTY_STRING, *current_span_);\n  } else {\n    callbacks_.onSuccessRaw(std::move(response_), *current_span_);\n  }\n\n  current_span_->finishSpan();\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/async_client_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/typed_async_client.h\"\n#include \"common/http/async_client_impl.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass AsyncRequestImpl;\n\nclass AsyncStreamImpl;\nusing AsyncStreamImplPtr = std::unique_ptr<AsyncStreamImpl>;\n\nclass AsyncClientImpl final : public RawAsyncClient {\npublic:\n  AsyncClientImpl(Upstream::ClusterManager& cm, const envoy::config::core::v3::GrpcService& config,\n                  TimeSource& time_source);\n  ~AsyncClientImpl() override;\n\n  // Grpc::AsyncClient\n  AsyncRequest* sendRaw(absl::string_view service_full_name, absl::string_view method_name,\n                        Buffer::InstancePtr&& request, RawAsyncRequestCallbacks& callbacks,\n                        Tracing::Span& parent_span,\n                        const Http::AsyncClient::RequestOptions& options) override;\n  RawAsyncStream* startRaw(absl::string_view service_full_name, absl::string_view method_name,\n                           RawAsyncStreamCallbacks& callbacks,\n                           const Http::AsyncClient::StreamOptions& options) override;\n\nprivate:\n  Upstream::ClusterManager& cm_;\n  const std::string remote_cluster_name_;\n  // The host header value in the http transport.\n  const std::string host_name_;\n  const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValue> initial_metadata_;\n  std::list<AsyncStreamImplPtr> active_streams_;\n  TimeSource& time_source_;\n\n  friend class AsyncRequestImpl;\n  friend class AsyncStreamImpl;\n};\n\nclass AsyncStreamImpl : public RawAsyncStream,\n                        Http::AsyncClient::StreamCallbacks,\n                        public Event::DeferredDeletable,\n                        public LinkedObject<AsyncStreamImpl> {\npublic:\n  AsyncStreamImpl(AsyncClientImpl& parent, absl::string_view service_full_name,\n                  absl::string_view method_name, RawAsyncStreamCallbacks& callbacks,\n                  const Http::AsyncClient::StreamOptions& options);\n\n  virtual void initialize(bool buffer_body_for_retry);\n\n  void sendMessage(const Protobuf::Message& request, bool end_stream);\n\n  // Http::AsyncClient::StreamCallbacks\n  void onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override;\n  void onData(Buffer::Instance& data, bool end_stream) override;\n  void onTrailers(Http::ResponseTrailerMapPtr&& trailers) override;\n  void onComplete() override;\n  void onReset() override;\n\n  // Grpc::AsyncStream\n  void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override;\n  void closeStream() override;\n  void resetStream() override;\n  bool isAboveWriteBufferHighWatermark() const override {\n    return stream_ && stream_->isAboveWriteBufferHighWatermark();\n  }\n\n  bool hasResetStream() const { return http_reset_; }\n\nprivate:\n  void streamError(Status::GrpcStatus grpc_status, const std::string& message);\n  void streamError(Status::GrpcStatus grpc_status) { streamError(grpc_status, EMPTY_STRING); }\n\n  void cleanup();\n  void trailerResponse(absl::optional<Status::GrpcStatus> grpc_status,\n                       const std::string& grpc_message);\n\n  Event::Dispatcher* dispatcher_{};\n  Http::RequestMessagePtr headers_message_;\n  AsyncClientImpl& parent_;\n  std::string service_full_name_;\n  std::string method_name_;\n  RawAsyncStreamCallbacks& callbacks_;\n  Http::AsyncClient::StreamOptions options_;\n  bool http_reset_{};\n  Http::AsyncClient::Stream* stream_{};\n  Decoder decoder_;\n  // This is a member to avoid reallocation on every onData().\n  std::vector<Frame> decoded_frames_;\n\n  friend class AsyncClientImpl;\n};\n\nclass AsyncRequestImpl : public AsyncRequest, public AsyncStreamImpl, RawAsyncStreamCallbacks {\npublic:\n  AsyncRequestImpl(AsyncClientImpl& parent, absl::string_view service_full_name,\n                   absl::string_view method_name, Buffer::InstancePtr&& request,\n                   RawAsyncRequestCallbacks& callbacks, Tracing::Span& parent_span,\n                   const Http::AsyncClient::RequestOptions& options);\n\n  void initialize(bool buffer_body_for_retry) override;\n\n  // Grpc::AsyncRequest\n  void cancel() override;\n\nprivate:\n  // Grpc::AsyncStreamCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap& metadata) override;\n  void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&&) override;\n  bool onReceiveMessageRaw(Buffer::InstancePtr&& response) override;\n  void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&&) override;\n  void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override;\n\n  Buffer::InstancePtr request_;\n  RawAsyncRequestCallbacks& callbacks_;\n  Tracing::SpanPtr current_span_;\n  Buffer::InstancePtr response_;\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/async_client_manager_impl.cc",
    "content": "#include \"common/grpc/async_client_manager_impl.h\"\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/grpc/async_client_impl.h\"\n\n#ifdef ENVOY_GOOGLE_GRPC\n#include \"common/grpc/google_async_client_impl.h\"\n#endif\n\nnamespace Envoy {\nnamespace Grpc {\n\nAsyncClientFactoryImpl::AsyncClientFactoryImpl(Upstream::ClusterManager& cm,\n                                               const envoy::config::core::v3::GrpcService& config,\n                                               bool skip_cluster_check, TimeSource& time_source)\n    : cm_(cm), config_(config), time_source_(time_source) {\n  if (skip_cluster_check) {\n    return;\n  }\n\n  const std::string& cluster_name = config.envoy_grpc().cluster_name();\n  auto clusters = cm_.clusters();\n  const auto& it = clusters.find(cluster_name);\n  if (it == clusters.end()) {\n    throw EnvoyException(fmt::format(\"Unknown gRPC client cluster '{}'\", cluster_name));\n  }\n  if (it->second.get().info()->addedViaApi()) {\n    throw EnvoyException(fmt::format(\"gRPC client cluster '{}' is not static\", cluster_name));\n  }\n}\n\nAsyncClientManagerImpl::AsyncClientManagerImpl(Upstream::ClusterManager& cm,\n                                               ThreadLocal::Instance& tls, TimeSource& time_source,\n                                               Api::Api& api, const StatNames& stat_names)\n    : cm_(cm), tls_(tls), time_source_(time_source), api_(api), stat_names_(stat_names) {\n#ifdef ENVOY_GOOGLE_GRPC\n  google_tls_slot_ = tls.allocateSlot();\n  google_tls_slot_->set(\n      [&api](Event::Dispatcher&) { return std::make_shared<GoogleAsyncClientThreadLocal>(api); });\n#else\n  UNREFERENCED_PARAMETER(api_);\n#endif\n}\n\nRawAsyncClientPtr AsyncClientFactoryImpl::create() {\n  return std::make_unique<AsyncClientImpl>(cm_, config_, time_source_);\n}\n\nGoogleAsyncClientFactoryImpl::GoogleAsyncClientFactoryImpl(\n    ThreadLocal::Instance& tls, ThreadLocal::Slot* google_tls_slot, Stats::Scope& scope,\n    const envoy::config::core::v3::GrpcService& config, Api::Api& api, const StatNames& stat_names)\n    : tls_(tls), google_tls_slot_(google_tls_slot),\n      scope_(scope.createScope(fmt::format(\"grpc.{}.\", config.google_grpc().stat_prefix()))),\n      config_(config), api_(api), stat_names_(stat_names) {\n\n#ifndef ENVOY_GOOGLE_GRPC\n  UNREFERENCED_PARAMETER(tls_);\n  UNREFERENCED_PARAMETER(google_tls_slot_);\n  UNREFERENCED_PARAMETER(scope_);\n  UNREFERENCED_PARAMETER(config_);\n  UNREFERENCED_PARAMETER(api_);\n  UNREFERENCED_PARAMETER(stat_names_);\n  throw EnvoyException(\"Google C++ gRPC client is not linked\");\n#else\n  ASSERT(google_tls_slot_ != nullptr);\n#endif\n}\n\nRawAsyncClientPtr GoogleAsyncClientFactoryImpl::create() {\n#ifdef ENVOY_GOOGLE_GRPC\n  GoogleGenericStubFactory stub_factory;\n  return std::make_unique<GoogleAsyncClientImpl>(\n      tls_.dispatcher(), google_tls_slot_->getTyped<GoogleAsyncClientThreadLocal>(), stub_factory,\n      scope_, config_, api_, stat_names_);\n#else\n  return nullptr;\n#endif\n}\n\nAsyncClientFactoryPtr\nAsyncClientManagerImpl::factoryForGrpcService(const envoy::config::core::v3::GrpcService& config,\n                                              Stats::Scope& scope, bool skip_cluster_check) {\n  switch (config.target_specifier_case()) {\n  case envoy::config::core::v3::GrpcService::TargetSpecifierCase::kEnvoyGrpc:\n    return std::make_unique<AsyncClientFactoryImpl>(cm_, config, skip_cluster_check, time_source_);\n  case envoy::config::core::v3::GrpcService::TargetSpecifierCase::kGoogleGrpc:\n    return std::make_unique<GoogleAsyncClientFactoryImpl>(tls_, google_tls_slot_.get(), scope,\n                                                          config, api_, stat_names_);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  return nullptr;\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/async_client_manager_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/grpc/stat_names.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass AsyncClientFactoryImpl : public AsyncClientFactory {\npublic:\n  AsyncClientFactoryImpl(Upstream::ClusterManager& cm,\n                         const envoy::config::core::v3::GrpcService& config,\n                         bool skip_cluster_check, TimeSource& time_source);\n\n  RawAsyncClientPtr create() override;\n\nprivate:\n  Upstream::ClusterManager& cm_;\n  const envoy::config::core::v3::GrpcService config_;\n  TimeSource& time_source_;\n};\n\nclass GoogleAsyncClientFactoryImpl : public AsyncClientFactory {\npublic:\n  GoogleAsyncClientFactoryImpl(ThreadLocal::Instance& tls, ThreadLocal::Slot* google_tls_slot,\n                               Stats::Scope& scope,\n                               const envoy::config::core::v3::GrpcService& config, Api::Api& api,\n                               const StatNames& stat_names);\n\n  RawAsyncClientPtr create() override;\n\nprivate:\n  ThreadLocal::Instance& tls_;\n  ThreadLocal::Slot* google_tls_slot_;\n  Stats::ScopeSharedPtr scope_;\n  const envoy::config::core::v3::GrpcService config_;\n  Api::Api& api_;\n  const StatNames& stat_names_;\n};\n\nclass AsyncClientManagerImpl : public AsyncClientManager {\npublic:\n  AsyncClientManagerImpl(Upstream::ClusterManager& cm, ThreadLocal::Instance& tls,\n                         TimeSource& time_source, Api::Api& api, const StatNames& stat_names);\n\n  // Grpc::AsyncClientManager\n  AsyncClientFactoryPtr factoryForGrpcService(const envoy::config::core::v3::GrpcService& config,\n                                              Stats::Scope& scope,\n                                              bool skip_cluster_check) override;\n\nprivate:\n  Upstream::ClusterManager& cm_;\n  ThreadLocal::Instance& tls_;\n  ThreadLocal::SlotPtr google_tls_slot_;\n  TimeSource& time_source_;\n  Api::Api& api_;\n  const StatNames& stat_names_;\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/codec.cc",
    "content": "#include \"common/grpc/codec.h\"\n\n#include <array>\n#include <cstdint>\n#include <memory>\n#include <vector>\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nEncoder::Encoder() = default;\n\nvoid Encoder::newFrame(uint8_t flags, uint64_t length, std::array<uint8_t, 5>& output) {\n  output[0] = flags;\n  output[1] = static_cast<uint8_t>(length >> 24);\n  output[2] = static_cast<uint8_t>(length >> 16);\n  output[3] = static_cast<uint8_t>(length >> 8);\n  output[4] = static_cast<uint8_t>(length);\n}\n\nvoid Encoder::prependFrameHeader(uint8_t flags, Buffer::Instance& buffer) {\n  // Compute the size of the payload and construct the length prefix.\n  std::array<uint8_t, Grpc::GRPC_FRAME_HEADER_SIZE> frame;\n  Grpc::Encoder().newFrame(flags, buffer.length(), frame);\n  Buffer::OwnedImpl frame_buffer(frame.data(), frame.size());\n  buffer.prepend(frame_buffer);\n}\n\nbool Decoder::decode(Buffer::Instance& input, std::vector<Frame>& output) {\n  decoding_error_ = false;\n  output_ = &output;\n  inspect(input);\n  output_ = nullptr;\n  if (decoding_error_) {\n    return false;\n  }\n  input.drain(input.length());\n  return true;\n}\n\nbool Decoder::frameStart(uint8_t flags) {\n  // Unsupported flags.\n  if (flags & ~GRPC_FH_COMPRESSED) {\n    decoding_error_ = true;\n    return false;\n  }\n  frame_.flags_ = flags;\n  return true;\n}\n\nvoid Decoder::frameDataStart() {\n  frame_.length_ = length_;\n  frame_.data_ = std::make_unique<Buffer::OwnedImpl>();\n}\n\nvoid Decoder::frameData(uint8_t* mem, uint64_t length) { frame_.data_->add(mem, length); }\n\nvoid Decoder::frameDataEnd() {\n  output_->push_back(std::move(frame_));\n  frame_.flags_ = 0;\n  frame_.length_ = 0;\n  frame_.data_ = nullptr;\n}\n\nuint64_t FrameInspector::inspect(const Buffer::Instance& data) {\n  uint64_t delta = 0;\n  for (const Buffer::RawSlice& slice : data.getRawSlices()) {\n    uint8_t* mem = reinterpret_cast<uint8_t*>(slice.mem_);\n    for (uint64_t j = 0; j < slice.len_;) {\n      uint8_t c = *mem;\n      switch (state_) {\n      case State::FhFlag:\n        if (!frameStart(c)) {\n          return delta;\n        }\n        count_ += 1;\n        delta += 1;\n        state_ = State::FhLen0;\n        mem++;\n        j++;\n        break;\n      case State::FhLen0:\n        length_ = static_cast<uint32_t>(c) << 24;\n        state_ = State::FhLen1;\n        mem++;\n        j++;\n        break;\n      case State::FhLen1:\n        length_ |= static_cast<uint32_t>(c) << 16;\n        state_ = State::FhLen2;\n        mem++;\n        j++;\n        break;\n      case State::FhLen2:\n        length_ |= static_cast<uint32_t>(c) << 8;\n        state_ = State::FhLen3;\n        mem++;\n        j++;\n        break;\n      case State::FhLen3:\n        length_ |= static_cast<uint32_t>(c);\n        frameDataStart();\n        if (length_ == 0) {\n          frameDataEnd();\n          state_ = State::FhFlag;\n        } else {\n          state_ = State::Data;\n        }\n        mem++;\n        j++;\n        break;\n      case State::Data:\n        uint64_t remain_in_buffer = slice.len_ - j;\n        if (remain_in_buffer <= length_) {\n          frameData(mem, remain_in_buffer);\n          mem += remain_in_buffer;\n          j += remain_in_buffer;\n          length_ -= remain_in_buffer;\n        } else {\n          frameData(mem, length_);\n          mem += length_;\n          j += length_;\n          length_ = 0;\n        }\n        if (length_ == 0) {\n          frameDataEnd();\n          state_ = State::FhFlag;\n        }\n        break;\n      }\n    }\n  }\n  return delta;\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/codec.h",
    "content": "#pragma once\n\n#include <array>\n#include <cstdint>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n// Last bit for an expanded message without compression.\nconst uint8_t GRPC_FH_DEFAULT = 0b0u;\n// Last bit for a compressed message.\nconst uint8_t GRPC_FH_COMPRESSED = 0b1u;\n\nconstexpr uint64_t GRPC_FRAME_HEADER_SIZE = sizeof(uint8_t) + sizeof(uint32_t);\n\nenum class CompressionAlgorithm { None, Gzip };\n\nstruct Frame {\n  uint8_t flags_;\n  uint32_t length_;\n  Buffer::InstancePtr data_;\n};\n\nclass Encoder {\npublic:\n  Encoder();\n\n  // Creates a new GRPC data frame with the given flags and length.\n  // @param flags supplies the GRPC data frame flags.\n  // @param length supplies the GRPC data frame length.\n  // @param output the buffer to store the encoded data. Its size must be 5.\n  void newFrame(uint8_t flags, uint64_t length, std::array<uint8_t, 5>& output);\n\n  // Prepend the gRPC frame into the buffer.\n  // @param flags supplies the GRPC data frame flags.\n  // @param buffer the buffer with the message payload.\n  void prependFrameHeader(uint8_t flags, Buffer::Instance& buffer);\n};\n\n// Wire format (http://www.grpc.io/docs/guides/wire.html) of GRPC data frame\n// header:\n//\n// -----------------------------------------------------------------------\n// |R|R|R|R|R|R|R|R|C|      L     |      L     |      L     |      L     |\n// -----------------------------------------------------------------------\n//    Flag (1 byte)                Message Length (4 bytes)\n//\n// A fixed header consists of five bytes.\n// The first byte is the Flag. The last one \"C\" bit indicates if the message\n// is compressed or not (0 is uncompressed, 1 is compressed). The other seven\n// \"R\" bits are reserved for future use.\n// The next four \"L\" bytes represent the message length in BigEndian format.\nenum class State {\n  // Waiting for decoding the flags (1 byte) of the GRPC data frame.\n  FhFlag,\n  // Waiting for decoding the 1st byte of the length (4 bytes in total) of the\n  // GRPC data frame.\n  FhLen0,\n  // Waiting for decoding the 2nd byte of the length (4 bytes in total) of the\n  // GRPC data frame.\n  FhLen1,\n  // Waiting for decoding the 3rd byte of the length (4 bytes in total) of the\n  // GRPC data frame.\n  FhLen2,\n  // Waiting for decoding the 4th byte of the length (4 bytes in total) of the\n  // GRPC data frame.\n  FhLen3,\n  // Waiting for decoding the data.\n  Data,\n};\n\nclass FrameInspector {\npublic:\n  // Inspects the given buffer with GRPC data frame and updates the frame count.\n  // Invokes visitor callbacks for each frame in the following sequence:\n  //   \"frameStart frameDataStart frameData* frameDataEnd\"\n  // If frameStart returns false, then the inspector aborts.\n  // Returns the increase in the frame count.\n  uint64_t inspect(const Buffer::Instance& input);\n\n  // Returns the current frame count, corresponding to the request/response\n  // message count. Counter is incremented on a frame start.\n  uint64_t frameCount() const { return count_; }\n\n  // Returns the current state in the frame parsing.\n  State state() const { return state_; }\n\n  virtual ~FrameInspector() = default;\n\nprotected:\n  virtual bool frameStart(uint8_t) { return true; }\n  virtual void frameDataStart() {}\n  virtual void frameData(uint8_t*, uint64_t) {}\n  virtual void frameDataEnd() {}\n\n  State state_{State::FhFlag};\n  uint32_t length_{0};\n  uint64_t count_{0};\n};\n\nclass Decoder : public FrameInspector {\npublic:\n  // Decodes the given buffer with GRPC data frame. Drains the input buffer when\n  // decoding succeeded (returns true). If the input is not sufficient to make a\n  // complete GRPC data frame, it will be buffered in the decoder. If a decoding\n  // error happened, the input buffer remains unchanged.\n  // @param input supplies the binary octets wrapped in a GRPC data frame.\n  // @param output supplies the buffer to store the decoded data.\n  // @return bool whether the decoding succeeded or not.\n  bool decode(Buffer::Instance& input, std::vector<Frame>& output);\n\n  // Determine the length of the current frame being decoded. This is useful when supplying a\n  // partial frame to decode() and wanting to know how many more bytes need to be read to complete\n  // the frame.\n  uint32_t length() const { return frame_.length_; }\n\n  // Indicates whether it has buffered any partial data.\n  bool hasBufferedData() const { return state_ != State::FhFlag; }\n\nprotected:\n  bool frameStart(uint8_t) override;\n  void frameDataStart() override;\n  void frameData(uint8_t*, uint64_t) override;\n  void frameDataEnd() override;\n\nprivate:\n  Frame frame_;\n  std::vector<Frame>* output_{nullptr};\n  bool decoding_error_{false};\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/common.cc",
    "content": "#include \"common/grpc/common.h\"\n\n#include <atomic>\n#include <cstdint>\n#include <cstring>\n#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nbool Common::hasGrpcContentType(const Http::RequestOrResponseHeaderMap& headers) {\n  const absl::string_view content_type = headers.getContentTypeValue();\n  // Content type is gRPC if it is exactly \"application/grpc\" or starts with\n  // \"application/grpc+\". Specifically, something like application/grpc-web is not gRPC.\n  return absl::StartsWith(content_type, Http::Headers::get().ContentTypeValues.Grpc) &&\n         (content_type.size() == Http::Headers::get().ContentTypeValues.Grpc.size() ||\n          content_type[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+');\n}\n\nbool Common::isGrpcRequestHeaders(const Http::RequestHeaderMap& headers) {\n  if (!headers.Path()) {\n    return false;\n  }\n  return hasGrpcContentType(headers);\n}\n\nbool Common::isGrpcResponseHeaders(const Http::ResponseHeaderMap& headers, bool end_stream) {\n  if (end_stream) {\n    // Trailers-only response, only grpc-status is required.\n    return headers.GrpcStatus() != nullptr;\n  }\n  if (Http::Utility::getResponseStatus(headers) != enumToInt(Http::Code::OK)) {\n    return false;\n  }\n  return hasGrpcContentType(headers);\n}\n\nabsl::optional<Status::GrpcStatus>\nCommon::getGrpcStatus(const Http::ResponseHeaderOrTrailerMap& trailers, bool allow_user_defined) {\n  const absl::string_view grpc_status_header = trailers.getGrpcStatusValue();\n  uint64_t grpc_status_code;\n\n  if (grpc_status_header.empty()) {\n    return absl::nullopt;\n  }\n  if (!absl::SimpleAtoi(grpc_status_header, &grpc_status_code) ||\n      (grpc_status_code > Status::WellKnownGrpcStatus::MaximumKnown && !allow_user_defined)) {\n    return {Status::WellKnownGrpcStatus::InvalidCode};\n  }\n  return {static_cast<Status::GrpcStatus>(grpc_status_code)};\n}\n\nabsl::optional<Status::GrpcStatus> Common::getGrpcStatus(const Http::ResponseTrailerMap& trailers,\n                                                         const Http::ResponseHeaderMap& headers,\n                                                         const StreamInfo::StreamInfo& info,\n                                                         bool allow_user_defined) {\n  // The gRPC specification does not guarantee a gRPC status code will be returned from a gRPC\n  // request. When it is returned, it will be in the response trailers. With that said, Envoy will\n  // treat a trailers-only response as a headers-only response, so we have to check the following\n  // in order:\n  //   1. trailers gRPC status, if it exists.\n  //   2. headers gRPC status, if it exists.\n  //   3. Inferred from info HTTP status, if it exists.\n  const std::array<absl::optional<Grpc::Status::GrpcStatus>, 3> optional_statuses = {{\n      {Grpc::Common::getGrpcStatus(trailers, allow_user_defined)},\n      {Grpc::Common::getGrpcStatus(headers, allow_user_defined)},\n      {info.responseCode() ? absl::optional<Grpc::Status::GrpcStatus>(\n                                 Grpc::Utility::httpToGrpcStatus(info.responseCode().value()))\n                           : absl::nullopt},\n  }};\n\n  for (const auto& optional_status : optional_statuses) {\n    if (optional_status.has_value()) {\n      return optional_status;\n    }\n  }\n\n  return absl::nullopt;\n}\n\nstd::string Common::getGrpcMessage(const Http::ResponseHeaderOrTrailerMap& trailers) {\n  const auto entry = trailers.GrpcMessage();\n  return entry ? std::string(entry->value().getStringView()) : EMPTY_STRING;\n}\n\nabsl::optional<google::rpc::Status>\nCommon::getGrpcStatusDetailsBin(const Http::HeaderMap& trailers) {\n  const Http::HeaderEntry* details_header = trailers.get(Http::Headers::get().GrpcStatusDetailsBin);\n  if (!details_header) {\n    return absl::nullopt;\n  }\n\n  // Some implementations use non-padded base64 encoding for grpc-status-details-bin.\n  auto decoded_value = Base64::decodeWithoutPadding(details_header->value().getStringView());\n  if (decoded_value.empty()) {\n    return absl::nullopt;\n  }\n\n  google::rpc::Status status;\n  if (!status.ParseFromString(decoded_value)) {\n    return absl::nullopt;\n  }\n\n  return {std::move(status)};\n}\n\nBuffer::InstancePtr Common::serializeToGrpcFrame(const Protobuf::Message& message) {\n  // http://www.grpc.io/docs/guides/wire.html\n  // Reserve enough space for the entire message and the 5 byte header.\n  // NB: we do not use prependGrpcFrameHeader because that would add another BufferFragment and this\n  // (using a single BufferFragment) is more efficient.\n  Buffer::InstancePtr body(new Buffer::OwnedImpl());\n  const uint32_t size = message.ByteSize();\n  const uint32_t alloc_size = size + 5;\n  Buffer::RawSlice iovec;\n  body->reserve(alloc_size, &iovec, 1);\n  ASSERT(iovec.len_ >= alloc_size);\n  iovec.len_ = alloc_size;\n  uint8_t* current = reinterpret_cast<uint8_t*>(iovec.mem_);\n  *current++ = 0; // flags\n  const uint32_t nsize = htonl(size);\n  std::memcpy(current, reinterpret_cast<const void*>(&nsize), sizeof(uint32_t));\n  current += sizeof(uint32_t);\n  Protobuf::io::ArrayOutputStream stream(current, size, -1);\n  Protobuf::io::CodedOutputStream codec_stream(&stream);\n  message.SerializeWithCachedSizes(&codec_stream);\n  body->commit(&iovec, 1);\n  return body;\n}\n\nBuffer::InstancePtr Common::serializeMessage(const Protobuf::Message& message) {\n  auto body = std::make_unique<Buffer::OwnedImpl>();\n  const uint32_t size = message.ByteSize();\n  Buffer::RawSlice iovec;\n  body->reserve(size, &iovec, 1);\n  ASSERT(iovec.len_ >= size);\n  iovec.len_ = size;\n  uint8_t* current = reinterpret_cast<uint8_t*>(iovec.mem_);\n  Protobuf::io::ArrayOutputStream stream(current, size, -1);\n  Protobuf::io::CodedOutputStream codec_stream(&stream);\n  message.SerializeWithCachedSizes(&codec_stream);\n  body->commit(&iovec, 1);\n  return body;\n}\n\nabsl::optional<std::chrono::milliseconds>\nCommon::getGrpcTimeout(const Http::RequestHeaderMap& request_headers) {\n  const Http::HeaderEntry* header_grpc_timeout_entry = request_headers.GrpcTimeout();\n  std::chrono::milliseconds timeout;\n  if (header_grpc_timeout_entry) {\n    uint64_t grpc_timeout;\n    // TODO(dnoe): Migrate to pure string_view (#6580)\n    std::string grpc_timeout_string(header_grpc_timeout_entry->value().getStringView());\n    const char* unit = StringUtil::strtoull(grpc_timeout_string.c_str(), grpc_timeout);\n    if (unit != nullptr && *unit != '\\0') {\n      switch (*unit) {\n      case 'H':\n        return std::chrono::hours(grpc_timeout);\n      case 'M':\n        return std::chrono::minutes(grpc_timeout);\n      case 'S':\n        return std::chrono::seconds(grpc_timeout);\n      case 'm':\n        return std::chrono::milliseconds(grpc_timeout);\n        break;\n      case 'u':\n        timeout = std::chrono::duration_cast<std::chrono::milliseconds>(\n            std::chrono::microseconds(grpc_timeout));\n        if (timeout < std::chrono::microseconds(grpc_timeout)) {\n          timeout++;\n        }\n        return timeout;\n      case 'n':\n        timeout = std::chrono::duration_cast<std::chrono::milliseconds>(\n            std::chrono::nanoseconds(grpc_timeout));\n        if (timeout < std::chrono::nanoseconds(grpc_timeout)) {\n          timeout++;\n        }\n        return timeout;\n      }\n    }\n  }\n  return absl::nullopt;\n}\n\nvoid Common::toGrpcTimeout(const std::chrono::milliseconds& timeout,\n                           Http::RequestHeaderMap& headers) {\n  uint64_t time = timeout.count();\n  static const char units[] = \"mSMH\";\n  const char* unit = units; // start with milliseconds\n  static constexpr size_t MAX_GRPC_TIMEOUT_VALUE = 99999999;\n  if (time > MAX_GRPC_TIMEOUT_VALUE) {\n    time /= 1000; // Convert from milliseconds to seconds\n    unit++;\n  }\n  while (time > MAX_GRPC_TIMEOUT_VALUE) {\n    if (*unit == 'H') {\n      time = MAX_GRPC_TIMEOUT_VALUE; // No bigger unit available, clip to max 8 digit hours.\n    } else {\n      time /= 60; // Convert from seconds to minutes to hours\n      unit++;\n    }\n  }\n  headers.setGrpcTimeout(absl::StrCat(time, absl::string_view(unit, 1)));\n}\n\nHttp::RequestMessagePtr\nCommon::prepareHeaders(const std::string& host_name, const std::string& service_full_name,\n                       const std::string& method_name,\n                       const absl::optional<std::chrono::milliseconds>& timeout) {\n  Http::RequestMessagePtr message(new Http::RequestMessageImpl());\n  message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post);\n  message->headers().setPath(absl::StrCat(\"/\", service_full_name, \"/\", method_name));\n  message->headers().setHost(host_name);\n  // According to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md TE should appear\n  // before Timeout and ContentType.\n  message->headers().setReferenceTE(Http::Headers::get().TEValues.Trailers);\n  if (timeout) {\n    toGrpcTimeout(timeout.value(), message->headers());\n  }\n  message->headers().setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc);\n\n  return message;\n}\n\nvoid Common::checkForHeaderOnlyError(Http::ResponseMessage& http_response) {\n  // First check for grpc-status in headers. If it is here, we have an error.\n  absl::optional<Status::GrpcStatus> grpc_status_code =\n      Common::getGrpcStatus(http_response.headers());\n  if (!grpc_status_code) {\n    return;\n  }\n\n  if (grpc_status_code.value() == Status::WellKnownGrpcStatus::InvalidCode) {\n    throw Exception(absl::optional<uint64_t>(), \"bad grpc-status header\");\n  }\n\n  throw Exception(grpc_status_code.value(), Common::getGrpcMessage(http_response.headers()));\n}\n\nvoid Common::validateResponse(Http::ResponseMessage& http_response) {\n  if (Http::Utility::getResponseStatus(http_response.headers()) != enumToInt(Http::Code::OK)) {\n    throw Exception(absl::optional<uint64_t>(), \"non-200 response code\");\n  }\n\n  checkForHeaderOnlyError(http_response);\n\n  // Check for existence of trailers.\n  if (!http_response.trailers()) {\n    throw Exception(absl::optional<uint64_t>(), \"no response trailers\");\n  }\n\n  absl::optional<Status::GrpcStatus> grpc_status_code =\n      Common::getGrpcStatus(*http_response.trailers());\n  if (!grpc_status_code || grpc_status_code.value() < 0) {\n    throw Exception(absl::optional<uint64_t>(), \"bad grpc-status trailer\");\n  }\n\n  if (grpc_status_code.value() != 0) {\n    throw Exception(grpc_status_code.value(), Common::getGrpcMessage(*http_response.trailers()));\n  }\n}\n\nconst std::string& Common::typeUrlPrefix() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"type.googleapis.com\");\n}\n\nstd::string Common::typeUrl(const std::string& qualified_name) {\n  return typeUrlPrefix() + \"/\" + qualified_name;\n}\n\nvoid Common::prependGrpcFrameHeader(Buffer::Instance& buffer) {\n  std::array<char, 5> header;\n  header[0] = 0; // flags\n  const uint32_t nsize = htonl(buffer.length());\n  std::memcpy(&header[1], reinterpret_cast<const void*>(&nsize), sizeof(uint32_t));\n  buffer.prepend(absl::string_view(&header[0], 5));\n}\n\nbool Common::parseBufferInstance(Buffer::InstancePtr&& buffer, Protobuf::Message& proto) {\n  Buffer::ZeroCopyInputStreamImpl stream(std::move(buffer));\n  return proto.ParseFromZeroCopyStream(&stream);\n}\n\nabsl::optional<Common::RequestNames>\nCommon::resolveServiceAndMethod(const Http::HeaderEntry* path) {\n  absl::optional<RequestNames> request_names;\n  if (path == nullptr) {\n    return request_names;\n  }\n  absl::string_view str = path->value().getStringView();\n  str = str.substr(0, str.find('?'));\n  const auto parts = StringUtil::splitToken(str, \"/\");\n  if (parts.size() != 2) {\n    return request_names;\n  }\n  request_names = RequestNames{parts[0], parts[1]};\n  return request_names;\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/common.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/message.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/grpc/status.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n#include \"google/rpc/status.pb.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass Exception : public EnvoyException {\npublic:\n  Exception(const absl::optional<uint64_t>& grpc_status, const std::string& message)\n      : EnvoyException(message), grpc_status_(grpc_status) {}\n\n  const absl::optional<uint64_t> grpc_status_;\n};\n\nclass Common {\npublic:\n  /**\n   * @param headers the headers to parse.\n   * @return bool indicating whether content-type is gRPC.\n   */\n  static bool hasGrpcContentType(const Http::RequestOrResponseHeaderMap& headers);\n\n  /**\n   * @param headers the headers to parse.\n   * @return bool indicating whether the header is a gRPC request header.\n   * Currently headers are considered gRPC request headers if they have the gRPC\n   * content type, and have a path header.\n   */\n  static bool isGrpcRequestHeaders(const Http::RequestHeaderMap& headers);\n\n  /**\n   * @param headers the headers to parse.\n   * @param bool indicating whether the header is at end_stream.\n   * @return bool indicating whether the header is a gRPC response header\n   */\n  static bool isGrpcResponseHeaders(const Http::ResponseHeaderMap& headers, bool end_stream);\n\n  /**\n   * Returns the GrpcStatus code from a given set of trailers, if present.\n   * @param trailers the trailers to parse.\n   * @param allow_user_status whether allow user defined grpc status.\n   *        if this value is false, custom grpc status is regarded as invalid status\n   * @return absl::optional<Status::GrpcStatus> the parsed status code or InvalidCode if no valid\n   * status is found.\n   */\n  static absl::optional<Status::GrpcStatus>\n  getGrpcStatus(const Http::ResponseHeaderOrTrailerMap& trailers, bool allow_user_defined = false);\n\n  /**\n   * Returns the GrpcStatus code from the set of trailers, headers, and StreamInfo, if present.\n   * @param trailers the trailers to parse for a status code\n   * @param headers the headers to parse if no status code was found in the trailers\n   * @param info the StreamInfo to check for HTTP response code if no code was found in the trailers\n   * or headers\n   * @return absl::optional<Status::GrpcStatus> the parsed status code or absl::nullopt if no status\n   * is found\n   */\n  static absl::optional<Status::GrpcStatus> getGrpcStatus(const Http::ResponseTrailerMap& trailers,\n                                                          const Http::ResponseHeaderMap& headers,\n                                                          const StreamInfo::StreamInfo& info,\n                                                          bool allow_user_defined = false);\n\n  /**\n   * Returns the grpc-message from a given set of trailers, if present.\n   * @param trailers the trailers to parse.\n   * @return std::string the gRPC status message or empty string if grpc-message is not present in\n   *         trailers.\n   */\n  static std::string getGrpcMessage(const Http::ResponseHeaderOrTrailerMap& trailers);\n\n  /**\n   * Returns the decoded google.rpc.Status message from a given set of trailers, if present.\n   * @param trailers the trailers to parse.\n   * @return std::unique_ptr<google::rpc::Status> the gRPC status message or empty pointer if no\n   *         grpc-status-details-bin trailer found or it was invalid.\n   */\n  static absl::optional<google::rpc::Status>\n  getGrpcStatusDetailsBin(const Http::HeaderMap& trailers);\n\n  /**\n   * Parse gRPC header 'grpc-timeout' value to a duration in milliseconds.\n   * @param request_headers the header map from which to extract the value of 'grpc-timeout' header.\n   *        If this header is missing the timeout corresponds to infinity. The header is encoded in\n   *        maximum of 8 decimal digits and a char for the unit.\n   * @return absl::optional<std::chrono::milliseconds> the duration in milliseconds. absl::nullopt\n   *         is returned if 'grpc-timeout' is missing or malformed.\n   */\n  static absl::optional<std::chrono::milliseconds>\n  getGrpcTimeout(const Http::RequestHeaderMap& request_headers);\n\n  /**\n   * Encode 'timeout' into 'grpc-timeout' format in the grpc-timeout header.\n   * @param timeout the duration in std::chrono::milliseconds.\n   * @param headers the HeaderMap in which the grpc-timeout header will be set with the timeout in\n   * 'grpc-timeout' format, up to 8 decimal digits and a letter indicating the unit.\n   */\n  static void toGrpcTimeout(const std::chrono::milliseconds& timeout,\n                            Http::RequestHeaderMap& headers);\n\n  /**\n   * Serialize protobuf message with gRPC frame header.\n   */\n  static Buffer::InstancePtr serializeToGrpcFrame(const Protobuf::Message& message);\n\n  /**\n   * Serialize protobuf message. Without grpc header.\n   */\n  static Buffer::InstancePtr serializeMessage(const Protobuf::Message& message);\n\n  /**\n   * Prepare headers for protobuf service.\n   */\n  static Http::RequestMessagePtr\n  prepareHeaders(const std::string& upstream_cluster, const std::string& service_full_name,\n                 const std::string& method_name,\n                 const absl::optional<std::chrono::milliseconds>& timeout);\n\n  /**\n   * Basic validation of gRPC response, @throws Grpc::Exception in case of non successful response.\n   */\n  static void validateResponse(Http::ResponseMessage& http_response);\n\n  /**\n   * @return const std::string& type URL prefix.\n   */\n  static const std::string& typeUrlPrefix();\n\n  /**\n   * Prefix type URL to a qualified name.\n   * @param qualified_name packagename.messagename.\n   * @return qualified_name prefixed with typeUrlPrefix + \"/\".\n   */\n  static std::string typeUrl(const std::string& qualified_name);\n\n  /**\n   * Prepend a gRPC frame header to a Buffer::Instance containing a single gRPC frame.\n   * @param buffer containing the frame data which will be modified.\n   */\n  static void prependGrpcFrameHeader(Buffer::Instance& buffer);\n\n  /**\n   * Parse a Buffer::Instance into a Protobuf::Message.\n   * @param buffer containing the data to be parsed.\n   * @param proto the parsed proto.\n   * @return bool true if the parse was successful.\n   */\n  static bool parseBufferInstance(Buffer::InstancePtr&& buffer, Protobuf::Message& proto);\n\n  struct RequestNames {\n    absl::string_view service_;\n    absl::string_view method_;\n  };\n\n  /**\n   * Resolve the gRPC service and method from the HTTP2 :path header.\n   * @param path supplies the :path header.\n   * @return if both gRPC serve and method have been resolved successfully returns\n   *   a populated RequestNames, otherwise returns an empty optional.\n   * @note The return value is only valid as long as `path` is still valid and unmodified.\n   */\n  static absl::optional<RequestNames> resolveServiceAndMethod(const Http::HeaderEntry* path);\n\nprivate:\n  static void checkForHeaderOnlyError(Http::ResponseMessage& http_response);\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/context_impl.cc",
    "content": "#include \"common/grpc/context_impl.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"common/grpc/common.h\"\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nContextImpl::ContextImpl(Stats::SymbolTable& symbol_table)\n    : stat_name_pool_(symbol_table), grpc_(stat_name_pool_.add(\"grpc\")),\n      grpc_web_(stat_name_pool_.add(\"grpc-web\")), success_(stat_name_pool_.add(\"success\")),\n      failure_(stat_name_pool_.add(\"failure\")), total_(stat_name_pool_.add(\"total\")),\n      zero_(stat_name_pool_.add(\"0\")),\n      request_message_count_(stat_name_pool_.add(\"request_message_count\")),\n      response_message_count_(stat_name_pool_.add(\"response_message_count\")),\n      upstream_rq_time_(stat_name_pool_.add(\"upstream_rq_time\")), stat_names_(symbol_table) {}\n\n// Gets the stat prefix and underlying storage, depending on whether request_names is empty\nStats::ElementVec ContextImpl::statElements(Protocol protocol,\n                                            const absl::optional<RequestStatNames>& request_names,\n                                            Stats::Element suffix) {\n  const Stats::StatName protocolName = protocolStatName(protocol);\n  if (request_names) {\n    return Stats::ElementVec{protocolName, request_names->service_, request_names->method_, suffix};\n  }\n  return Stats::ElementVec{protocolName, suffix};\n}\n\nvoid ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol,\n                             const absl::optional<RequestStatNames>& request_names,\n                             const Http::HeaderEntry* grpc_status) {\n  if (!grpc_status) {\n    return;\n  }\n\n  absl::string_view status_str = grpc_status->value().getStringView();\n  auto iter = stat_names_.status_names_.find(status_str);\n  Stats::ElementVec elements =\n      statElements(protocol, request_names,\n                   (iter != stat_names_.status_names_.end()) ? Stats::Element(iter->second)\n                                                             : Stats::DynamicName(status_str));\n  Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc();\n  chargeStat(cluster, protocol, request_names, (status_str == \"0\"));\n}\n\nvoid ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol,\n                             const absl::optional<RequestStatNames>& request_names, bool success) {\n  Stats::ElementVec elements = statElements(protocol, request_names, successStatName(success));\n  Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc();\n  elements.back() = total_;\n  Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc();\n}\n\nvoid ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster,\n                             const absl::optional<RequestStatNames>& request_names, bool success) {\n  chargeStat(cluster, Protocol::Grpc, request_names, success);\n}\n\nvoid ContextImpl::chargeRequestMessageStat(const Upstream::ClusterInfo& cluster,\n                                           const absl::optional<RequestStatNames>& request_names,\n                                           uint64_t amount) {\n  Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, request_message_count_);\n  Stats::Utility::counterFromElements(cluster.statsScope(), elements).add(amount);\n}\n\nvoid ContextImpl::chargeResponseMessageStat(const Upstream::ClusterInfo& cluster,\n                                            const absl::optional<RequestStatNames>& request_names,\n                                            uint64_t amount) {\n  Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, response_message_count_);\n  Stats::Utility::counterFromElements(cluster.statsScope(), elements).add(amount);\n}\n\nvoid ContextImpl::chargeUpstreamStat(const Upstream::ClusterInfo& cluster,\n                                     const absl::optional<RequestStatNames>& request_names,\n                                     std::chrono::milliseconds duration) {\n  Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, upstream_rq_time_);\n  Stats::Utility::histogramFromElements(cluster.statsScope(), elements,\n                                        Stats::Histogram::Unit::Milliseconds)\n      .recordValue(duration.count());\n}\n\nabsl::optional<ContextImpl::RequestStatNames>\nContextImpl::resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) {\n  absl::optional<Common::RequestNames> request_names = Common::resolveServiceAndMethod(path);\n  if (!request_names) {\n    return {};\n  }\n\n  Stats::Element service = Stats::DynamicName(request_names->service_);\n  Stats::Element method = Stats::DynamicName(request_names->method_);\n  return RequestStatNames{service, method};\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/context_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/grpc/context.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/grpc/stat_names.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nstruct Context::RequestStatNames {\n  Stats::Element service_; // supplies the service name.\n  Stats::Element method_;  // supplies the method name.\n};\n\nclass ContextImpl : public Context {\npublic:\n  explicit ContextImpl(Stats::SymbolTable& symbol_table);\n\n  // Context\n  void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol,\n                  const absl::optional<RequestStatNames>& request_names,\n                  const Http::HeaderEntry* grpc_status) override;\n  void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol,\n                  const absl::optional<RequestStatNames>& request_names, bool success) override;\n  void chargeStat(const Upstream::ClusterInfo& cluster,\n                  const absl::optional<RequestStatNames>& request_names, bool success) override;\n  void chargeRequestMessageStat(const Upstream::ClusterInfo& cluster,\n                                const absl::optional<RequestStatNames>& request_names,\n                                uint64_t amount) override;\n  void chargeResponseMessageStat(const Upstream::ClusterInfo& cluster,\n                                 const absl::optional<RequestStatNames>& request_names,\n                                 uint64_t amount) override;\n  void chargeUpstreamStat(const Upstream::ClusterInfo& cluster,\n                          const absl::optional<RequestStatNames>& request_names,\n                          std::chrono::milliseconds duration) override;\n\n  /**\n   * Resolve the gRPC service and method from the HTTP2 :path header.\n   * @param path supplies the :path header.\n   * @return if both gRPC serve and method have been resolved successfully returns\n   *   a populated RequestStatNames, otherwise returns an empty optional.\n   */\n  absl::optional<RequestStatNames>\n  resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) override;\n\n  Stats::StatName successStatName(bool success) const { return success ? success_ : failure_; }\n  Stats::StatName protocolStatName(Protocol protocol) const {\n    return protocol == Context::Protocol::Grpc ? grpc_ : grpc_web_;\n  }\n\n  StatNames& statNames() override { return stat_names_; }\n\nprivate:\n  // Creates an array of stat-name elements, comprising the protocol, optional\n  // service and method, and a suffix.\n  Stats::ElementVec statElements(Protocol protocol,\n                                 const absl::optional<RequestStatNames>& request_names,\n                                 Stats::Element suffix);\n\n  Stats::StatNamePool stat_name_pool_;\n  const Stats::StatName grpc_;\n  const Stats::StatName grpc_web_;\n  const Stats::StatName success_;\n  const Stats::StatName failure_;\n  const Stats::StatName total_;\n  const Stats::StatName zero_;\n  const Stats::StatName request_message_count_;\n  const Stats::StatName response_message_count_;\n  const Stats::StatName upstream_rq_time_;\n\n  StatNames stat_names_;\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_async_client_impl.cc",
    "content": "#include \"common/grpc/google_async_client_impl.h\"\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/config/datasource.h\"\n#include \"common/grpc/common.h\"\n#include \"common/grpc/google_grpc_creds_impl.h\"\n#include \"common/grpc/google_grpc_utils.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"grpcpp/support/proto_buffer_reader.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\nstatic constexpr int DefaultBufferLimitBytes = 1024 * 1024;\n}\n\nGoogleAsyncClientThreadLocal::GoogleAsyncClientThreadLocal(Api::Api& api)\n    : completion_thread_(api.threadFactory().createThread([this] { completionThread(); },\n                                                          Thread::Options{\"GrpcGoogClient\"})) {}\n\nGoogleAsyncClientThreadLocal::~GoogleAsyncClientThreadLocal() {\n  // Force streams to shutdown and invoke TryCancel() to start the drain of\n  // pending op. If we don't do this, Shutdown() below can jam on pending ops.\n  // This is also required to satisfy the contract that once Shutdown is called,\n  // streams no longer queue any additional tags.\n  for (auto it = streams_.begin(); it != streams_.end();) {\n    // resetStream() may result in immediate unregisterStream() and erase(),\n    // which would invalidate the iterator for the current element, so make sure\n    // we point to the next one first.\n    (*it++)->resetStream();\n  }\n  cq_.Shutdown();\n  ENVOY_LOG(debug, \"Joining completionThread\");\n  completion_thread_->join();\n  ENVOY_LOG(debug, \"Joined completionThread\");\n  // Ensure that we have cleaned up all orphan streams, now that CQ is gone.\n  while (!streams_.empty()) {\n    (*streams_.begin())->onCompletedOps();\n  }\n}\n\nvoid GoogleAsyncClientThreadLocal::completionThread() {\n  ENVOY_LOG(debug, \"completionThread running\");\n  void* tag;\n  bool ok;\n  while (cq_.Next(&tag, &ok)) {\n    const auto& google_async_tag = *reinterpret_cast<GoogleAsyncTag*>(tag);\n    const GoogleAsyncTag::Operation op = google_async_tag.op_;\n    GoogleAsyncStreamImpl& stream = google_async_tag.stream_;\n    ENVOY_LOG(trace, \"completionThread CQ event {} {}\", op, ok);\n    Thread::LockGuard lock(stream.completed_ops_lock_);\n\n    // It's an invariant that there must only be one pending post for arbitrary\n    // length completed_ops_, otherwise we can race in stream destruction, where\n    // we process multiple events in onCompletedOps() but have only partially\n    // consumed the posts on the dispatcher.\n    // TODO(htuch): This may result in unbounded processing on the silo thread\n    // in onCompletedOps() in extreme cases, when we emplace_back() in\n    // completionThread() at a high rate, consider bounding the length of such\n    // sequences if this behavior becomes an issue.\n    if (stream.completed_ops_.empty()) {\n      stream.dispatcher_.post([&stream] { stream.onCompletedOps(); });\n    }\n    stream.completed_ops_.emplace_back(op, ok);\n  }\n  ENVOY_LOG(debug, \"completionThread exiting\");\n}\n\nGoogleAsyncClientImpl::GoogleAsyncClientImpl(Event::Dispatcher& dispatcher,\n                                             GoogleAsyncClientThreadLocal& tls,\n                                             GoogleStubFactory& stub_factory,\n                                             Stats::ScopeSharedPtr scope,\n                                             const envoy::config::core::v3::GrpcService& config,\n                                             Api::Api& api, const StatNames& stat_names)\n    : dispatcher_(dispatcher), tls_(tls), stat_prefix_(config.google_grpc().stat_prefix()),\n      initial_metadata_(config.initial_metadata()), scope_(scope),\n      per_stream_buffer_limit_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config.google_grpc(), per_stream_buffer_limit_bytes, DefaultBufferLimitBytes)) {\n  // We rebuild the channel each time we construct the channel. It appears that the gRPC library is\n  // smart enough to do connection pooling and reuse with identical channel args, so this should\n  // have comparable overhead to what we are doing in Grpc::AsyncClientImpl, i.e. no expensive\n  // new connection implied.\n  std::shared_ptr<grpc::Channel> channel = GoogleGrpcUtils::createChannel(config, api);\n  stub_ = stub_factory.createStub(channel);\n  scope_->counterFromStatName(stat_names.google_grpc_client_creation_).inc();\n  // Initialize client stats.\n  // TODO(jmarantz): Capture these names in async_client_manager_impl.cc and\n  // pass in a struct of StatName objects so we don't have to take locks here.\n  stats_.streams_total_ = &scope_->counterFromStatName(stat_names.streams_total_);\n  for (uint32_t i = 0; i <= Status::WellKnownGrpcStatus::MaximumKnown; ++i) {\n    stats_.streams_closed_[i] = &scope_->counterFromStatName(stat_names.streams_closed_[i]);\n  }\n}\n\nGoogleAsyncClientImpl::~GoogleAsyncClientImpl() {\n  ENVOY_LOG(debug, \"Client teardown, resetting streams\");\n  while (!active_streams_.empty()) {\n    active_streams_.front()->resetStream();\n  }\n}\n\nAsyncRequest* GoogleAsyncClientImpl::sendRaw(absl::string_view service_full_name,\n                                             absl::string_view method_name,\n                                             Buffer::InstancePtr&& request,\n                                             RawAsyncRequestCallbacks& callbacks,\n                                             Tracing::Span& parent_span,\n                                             const Http::AsyncClient::RequestOptions& options) {\n  auto* const async_request = new GoogleAsyncRequestImpl(\n      *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options);\n  GoogleAsyncStreamImplPtr grpc_stream{async_request};\n\n  grpc_stream->initialize(true);\n  if (grpc_stream->callFailed()) {\n    return nullptr;\n  }\n\n  LinkedList::moveIntoList(std::move(grpc_stream), active_streams_);\n  return async_request;\n}\n\nRawAsyncStream* GoogleAsyncClientImpl::startRaw(absl::string_view service_full_name,\n                                                absl::string_view method_name,\n                                                RawAsyncStreamCallbacks& callbacks,\n                                                const Http::AsyncClient::StreamOptions& options) {\n  auto grpc_stream = std::make_unique<GoogleAsyncStreamImpl>(*this, service_full_name, method_name,\n                                                             callbacks, options);\n\n  grpc_stream->initialize(false);\n  if (grpc_stream->callFailed()) {\n    return nullptr;\n  }\n\n  LinkedList::moveIntoList(std::move(grpc_stream), active_streams_);\n  return active_streams_.front().get();\n}\n\nGoogleAsyncStreamImpl::GoogleAsyncStreamImpl(GoogleAsyncClientImpl& parent,\n                                             absl::string_view service_full_name,\n                                             absl::string_view method_name,\n                                             RawAsyncStreamCallbacks& callbacks,\n                                             const Http::AsyncClient::StreamOptions& options)\n    : parent_(parent), tls_(parent_.tls_), dispatcher_(parent_.dispatcher_), stub_(parent_.stub_),\n      service_full_name_(service_full_name), method_name_(method_name), callbacks_(callbacks),\n      options_(options) {}\n\nGoogleAsyncStreamImpl::~GoogleAsyncStreamImpl() {\n  ENVOY_LOG(debug, \"GoogleAsyncStreamImpl destruct\");\n}\n\nGoogleAsyncStreamImpl::PendingMessage::PendingMessage(Buffer::InstancePtr request, bool end_stream)\n    : buf_(GoogleGrpcUtils::makeByteBuffer(std::move(request))), end_stream_(end_stream) {}\n\n// TODO(htuch): figure out how to propagate \"this request should be buffered for\n// retry\" bit to Google gRPC library.\nvoid GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) {\n  parent_.stats_.streams_total_->inc();\n  gpr_timespec abs_deadline =\n      options_.timeout\n          ? gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),\n                         gpr_time_from_millis(options_.timeout.value().count(), GPR_TIMESPAN))\n          : gpr_inf_future(GPR_CLOCK_REALTIME);\n  ctxt_.set_deadline(abs_deadline);\n  // Fill service-wide initial metadata.\n  for (const auto& header_value : parent_.initial_metadata_) {\n    ctxt_.AddMetadata(header_value.key(), header_value.value());\n  }\n  // Due to the different HTTP header implementations, we effectively double\n  // copy headers here.\n  auto initial_metadata = Http::RequestHeaderMapImpl::create();\n  callbacks_.onCreateInitialMetadata(*initial_metadata);\n  initial_metadata->iterate([this](const Http::HeaderEntry& header) {\n    ctxt_.AddMetadata(std::string(header.key().getStringView()),\n                      std::string(header.value().getStringView()));\n    return Http::HeaderMap::Iterate::Continue;\n  });\n  // Invoke stub call.\n  rw_ = parent_.stub_->PrepareCall(&ctxt_, \"/\" + service_full_name_ + \"/\" + method_name_,\n                                   &parent_.tls_.completionQueue());\n  if (rw_ == nullptr) {\n    notifyRemoteClose(Status::WellKnownGrpcStatus::Unavailable, nullptr, EMPTY_STRING);\n    call_failed_ = true;\n    return;\n  }\n  parent_.tls_.registerStream(this);\n  rw_->StartCall(&init_tag_);\n  ++inflight_tags_;\n}\n\nvoid GoogleAsyncStreamImpl::notifyRemoteClose(Status::GrpcStatus grpc_status,\n                                              Http::ResponseTrailerMapPtr trailing_metadata,\n                                              const std::string& message) {\n  if (grpc_status > Status::WellKnownGrpcStatus::MaximumKnown || grpc_status < 0) {\n    ENVOY_LOG(error, \"notifyRemoteClose invalid gRPC status code {}\", grpc_status);\n    // Set the grpc_status as InvalidCode but increment the Unknown stream to avoid out-of-range\n    // crash..\n    grpc_status = Status::WellKnownGrpcStatus::InvalidCode;\n    parent_.stats_.streams_closed_[Status::WellKnownGrpcStatus::Unknown]->inc();\n  } else {\n    parent_.stats_.streams_closed_[grpc_status]->inc();\n  }\n  ENVOY_LOG(debug, \"notifyRemoteClose {} {}\", grpc_status, message);\n  callbacks_.onReceiveTrailingMetadata(trailing_metadata ? std::move(trailing_metadata)\n                                                         : Http::ResponseTrailerMapImpl::create());\n  callbacks_.onRemoteClose(grpc_status, message);\n}\n\nvoid GoogleAsyncStreamImpl::sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) {\n  write_pending_queue_.emplace(std::move(request), end_stream);\n  ENVOY_LOG(trace, \"Queued message to write ({} bytes)\",\n            write_pending_queue_.back().buf_.value().Length());\n  bytes_in_write_pending_queue_ += write_pending_queue_.back().buf_.value().Length();\n  writeQueued();\n}\n\nvoid GoogleAsyncStreamImpl::closeStream() {\n  // Empty EOS write queued.\n  write_pending_queue_.emplace();\n  writeQueued();\n}\n\nvoid GoogleAsyncStreamImpl::resetStream() {\n  ENVOY_LOG(debug, \"resetStream\");\n  cleanup();\n}\n\nvoid GoogleAsyncStreamImpl::writeQueued() {\n  if (!call_initialized_ || finish_pending_ || write_pending_ || write_pending_queue_.empty() ||\n      draining_cq_) {\n    return;\n  }\n  write_pending_ = true;\n  const PendingMessage& msg = write_pending_queue_.front();\n\n  if (!msg.buf_) {\n    ASSERT(msg.end_stream_);\n    rw_->WritesDone(&write_last_tag_);\n    ++inflight_tags_;\n  } else if (msg.end_stream_) {\n    grpc::WriteOptions write_options;\n    rw_->WriteLast(msg.buf_.value(), write_options, &write_last_tag_);\n    ++inflight_tags_;\n  } else {\n    rw_->Write(msg.buf_.value(), &write_tag_);\n    ++inflight_tags_;\n  }\n  ENVOY_LOG(trace, \"Write op dispatched\");\n}\n\nvoid GoogleAsyncStreamImpl::onCompletedOps() {\n  Thread::LockGuard lock(completed_ops_lock_);\n  while (!completed_ops_.empty()) {\n    GoogleAsyncTag::Operation op;\n    bool ok;\n    std::tie(op, ok) = completed_ops_.front();\n    completed_ops_.pop_front();\n    handleOpCompletion(op, ok);\n  }\n}\n\nvoid GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, bool ok) {\n  ENVOY_LOG(trace, \"handleOpCompletion op={} ok={} inflight={}\", op, ok, inflight_tags_);\n  ASSERT(inflight_tags_ > 0);\n  --inflight_tags_;\n  if (draining_cq_) {\n    if (inflight_tags_ == 0) {\n      deferredDelete();\n    }\n    // Ignore op completions while draining CQ.\n    return;\n  }\n  // Consider failure cases first.\n  if (!ok) {\n    // Early fails can be just treated as Internal.\n    if (op == GoogleAsyncTag::Operation::Init ||\n        op == GoogleAsyncTag::Operation::ReadInitialMetadata) {\n      notifyRemoteClose(Status::WellKnownGrpcStatus::Internal, nullptr, EMPTY_STRING);\n      resetStream();\n      return;\n    }\n    // Remote server has closed, we can pick up some meaningful status.\n    // TODO(htuch): We're assuming here that a failed Write/WriteLast operation will result in\n    // stream termination, and pick up on the failed Read here. Confirm that this assumption is\n    // valid.\n    if (op == GoogleAsyncTag::Operation::Read) {\n      finish_pending_ = true;\n      rw_->Finish(&status_, &finish_tag_);\n      ++inflight_tags_;\n    }\n    return;\n  }\n  switch (op) {\n  case GoogleAsyncTag::Operation::Init: {\n    ASSERT(ok);\n    ASSERT(!call_initialized_);\n    call_initialized_ = true;\n    rw_->ReadInitialMetadata(&read_initial_metadata_tag_);\n    ++inflight_tags_;\n    writeQueued();\n    break;\n  }\n  case GoogleAsyncTag::Operation::ReadInitialMetadata: {\n    ASSERT(ok);\n    ASSERT(call_initialized_);\n    rw_->Read(&read_buf_, &read_tag_);\n    ++inflight_tags_;\n    Http::ResponseHeaderMapPtr initial_metadata = Http::ResponseHeaderMapImpl::create();\n    metadataTranslate(ctxt_.GetServerInitialMetadata(), *initial_metadata);\n    callbacks_.onReceiveInitialMetadata(std::move(initial_metadata));\n    break;\n  }\n  case GoogleAsyncTag::Operation::Write: {\n    ASSERT(ok);\n    write_pending_ = false;\n    bytes_in_write_pending_queue_ -= write_pending_queue_.front().buf_.value().Length();\n    write_pending_queue_.pop();\n    writeQueued();\n    break;\n  }\n  case GoogleAsyncTag::Operation::WriteLast: {\n    ASSERT(ok);\n    write_pending_ = false;\n    break;\n  }\n  case GoogleAsyncTag::Operation::Read: {\n    ASSERT(ok);\n    auto buffer = GoogleGrpcUtils::makeBufferInstance(read_buf_);\n    if (!buffer || !callbacks_.onReceiveMessageRaw(std::move(buffer))) {\n      // This is basically streamError in Grpc::AsyncClientImpl.\n      notifyRemoteClose(Status::WellKnownGrpcStatus::Internal, nullptr, EMPTY_STRING);\n      resetStream();\n      break;\n    }\n    rw_->Read(&read_buf_, &read_tag_);\n    ++inflight_tags_;\n    break;\n  }\n  case GoogleAsyncTag::Operation::Finish: {\n    ASSERT(finish_pending_);\n    ENVOY_LOG(debug, \"Finish with grpc-status code {}\", status_.error_code());\n    Http::ResponseTrailerMapPtr trailing_metadata = Http::ResponseTrailerMapImpl::create();\n    metadataTranslate(ctxt_.GetServerTrailingMetadata(), *trailing_metadata);\n    notifyRemoteClose(static_cast<Status::GrpcStatus>(status_.error_code()),\n                      std::move(trailing_metadata), status_.error_message());\n    cleanup();\n    break;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid GoogleAsyncStreamImpl::metadataTranslate(\n    const std::multimap<grpc::string_ref, grpc::string_ref>& grpc_metadata,\n    Http::HeaderMap& header_map) {\n  // More painful copying, this time due to the mismatch in header\n  // representation data structures in Envoy and Google gRPC.\n  for (const auto& it : grpc_metadata) {\n    auto key = Http::LowerCaseString(std::string(it.first.data(), it.first.size()));\n    if (absl::EndsWith(key.get(), \"-bin\")) {\n      auto value = Base64::encode(it.second.data(), it.second.size());\n      header_map.addCopy(key, value);\n      continue;\n    }\n    header_map.addCopy(key, std::string(it.second.data(), it.second.size()));\n  }\n}\n\nvoid GoogleAsyncStreamImpl::deferredDelete() {\n  ENVOY_LOG(debug, \"Deferred delete\");\n  tls_.unregisterStream(this);\n  // We only get here following cleanup(), which has performed a\n  // remoteFromList(), resulting in self-ownership of the object's memory.\n  // Hence, it is safe here to create a unique_ptr to this and transfer\n  // ownership to dispatcher_.deferredDelete(). After this call, no further\n  // methods may be invoked on this object.\n  dispatcher_.deferredDelete(GoogleAsyncStreamImplPtr(this));\n}\n\nvoid GoogleAsyncStreamImpl::cleanup() {\n  ENVOY_LOG(debug, \"Stream cleanup with {} in-flight tags\", inflight_tags_);\n  // We can get here if the client has already issued resetStream() and, while\n  // this is in progress, the destructor runs.\n  if (draining_cq_) {\n    ENVOY_LOG(debug, \"Cleanup already in progress\");\n    return;\n  }\n  draining_cq_ = true;\n  ctxt_.TryCancel();\n  if (LinkedObject<GoogleAsyncStreamImpl>::inserted()) {\n    // We take ownership of our own memory at this point.\n    LinkedObject<GoogleAsyncStreamImpl>::removeFromList(parent_.active_streams_).release();\n    if (inflight_tags_ == 0) {\n      deferredDelete();\n    }\n  }\n}\n\nGoogleAsyncRequestImpl::GoogleAsyncRequestImpl(\n    GoogleAsyncClientImpl& parent, absl::string_view service_full_name,\n    absl::string_view method_name, Buffer::InstancePtr request, RawAsyncRequestCallbacks& callbacks,\n    Tracing::Span& parent_span, const Http::AsyncClient::RequestOptions& options)\n    : GoogleAsyncStreamImpl(parent, service_full_name, method_name, *this, options),\n      request_(std::move(request)), callbacks_(callbacks) {\n  current_span_ = parent_span.spawnChild(Tracing::EgressConfig::get(),\n                                         \"async \" + parent.stat_prefix_ + \" egress\",\n                                         parent.timeSource().systemTime());\n  current_span_->setTag(Tracing::Tags::get().UpstreamCluster, parent.stat_prefix_);\n  current_span_->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy);\n}\n\nvoid GoogleAsyncRequestImpl::initialize(bool buffer_body_for_retry) {\n  GoogleAsyncStreamImpl::initialize(buffer_body_for_retry);\n  if (callFailed()) {\n    return;\n  }\n  sendMessageRaw(std::move(request_), true);\n}\n\nvoid GoogleAsyncRequestImpl::cancel() {\n  current_span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled);\n  current_span_->finishSpan();\n  resetStream();\n}\n\nvoid GoogleAsyncRequestImpl::onCreateInitialMetadata(Http::RequestHeaderMap& metadata) {\n  current_span_->injectContext(metadata);\n  callbacks_.onCreateInitialMetadata(metadata);\n}\n\nvoid GoogleAsyncRequestImpl::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&&) {}\n\nbool GoogleAsyncRequestImpl::onReceiveMessageRaw(Buffer::InstancePtr&& response) {\n  response_ = std::move(response);\n  return true;\n}\n\nvoid GoogleAsyncRequestImpl::onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&&) {}\n\nvoid GoogleAsyncRequestImpl::onRemoteClose(Grpc::Status::GrpcStatus status,\n                                           const std::string& message) {\n  current_span_->setTag(Tracing::Tags::get().GrpcStatusCode, std::to_string(status));\n\n  if (status != Grpc::Status::WellKnownGrpcStatus::Ok) {\n    current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);\n    callbacks_.onFailure(status, message, *current_span_);\n  } else if (response_ == nullptr) {\n    current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);\n    callbacks_.onFailure(Status::Internal, EMPTY_STRING, *current_span_);\n  } else {\n    callbacks_.onSuccessRaw(std::move(response_), *current_span_);\n  }\n\n  current_span_->finishSpan();\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_async_client_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <queue>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread/thread.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/common/thread.h\"\n#include \"common/common/thread_annotations.h\"\n#include \"common/grpc/google_grpc_context.h\"\n#include \"common/grpc/stat_names.h\"\n#include \"common/grpc/typed_async_client.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"grpcpp/generic/generic_stub.h\"\n#include \"grpcpp/grpcpp.h\"\n#include \"grpcpp/support/proto_buffer_writer.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass GoogleAsyncStreamImpl;\n\nusing GoogleAsyncStreamImplPtr = std::unique_ptr<GoogleAsyncStreamImpl>;\n\nclass GoogleAsyncRequestImpl;\n\nstruct GoogleAsyncTag {\n  // Operation defines tags that are handed to the gRPC AsyncReaderWriter for use in completion\n  // notification for their namesake operations. Read* and Write* operations may be outstanding\n  // simultaneously, but there will be no more than one operation of each type in-flight for a given\n  // stream. Init and Finish will both be issued exclusively when no other operations are in-flight\n  // for a stream. See\n  // https://github.com/grpc/grpc/blob/master/include/grpc%2B%2B/impl/codegen/async_stream.h for\n  // further insight into the semantics of the different gRPC client operations.\n  enum Operation {\n    // Initial stub call issued, waiting for initialization to complete.\n    Init = 0,\n    // Waiting for initial meta-data from server following Init completion.\n    ReadInitialMetadata,\n    // Waiting for response protobuf from server following ReadInitialMetadata completion.\n    Read,\n    // Waiting for write of request protobuf to server to complete.\n    Write,\n    // Waiting for write of request protobuf (EOS) __OR__ an EOS WritesDone to server to complete.\n    WriteLast,\n    // Waiting for final status. This must only be issued once all Read* and Write* operations have\n    // completed.\n    Finish,\n  };\n\n  GoogleAsyncTag(GoogleAsyncStreamImpl& stream, Operation op) : stream_(stream), op_(op) {}\n\n  GoogleAsyncStreamImpl& stream_;\n  const Operation op_;\n};\n\nclass GoogleAsyncClientThreadLocal : public ThreadLocal::ThreadLocalObject,\n                                     Logger::Loggable<Logger::Id::grpc> {\npublic:\n  GoogleAsyncClientThreadLocal(Api::Api& api);\n  ~GoogleAsyncClientThreadLocal() override;\n\n  grpc::CompletionQueue& completionQueue() { return cq_; }\n\n  void registerStream(GoogleAsyncStreamImpl* stream) {\n    ASSERT(streams_.find(stream) == streams_.end());\n    streams_.insert(stream);\n  }\n\n  void unregisterStream(GoogleAsyncStreamImpl* stream) {\n    auto it = streams_.find(stream);\n    ASSERT(it != streams_.end());\n    streams_.erase(it);\n  }\n\nprivate:\n  void completionThread();\n\n  // There is blanket google-grpc initialization in MainCommonBase, but that\n  // doesn't cover unit tests. However, putting blanket coverage in ProcessWide\n  // causes background threaded memory allocation in all unit tests making it\n  // hard to measure memory. Thus we also initialize grpc using our idempotent\n  // wrapper-class in classes that need it. See\n  // https://github.com/envoyproxy/envoy/issues/8282 for details.\n  GoogleGrpcContext google_grpc_context_;\n\n  // The CompletionQueue for in-flight operations. This must precede completion_thread_ to ensure it\n  // is constructed before the thread runs.\n  grpc::CompletionQueue cq_;\n  // The threading model for the Google gRPC C++ library is not directly compatible with Envoy's\n  // siloed model. We resolve this by issuing non-blocking asynchronous\n  // operations on the GoogleAsyncClientImpl silo thread, and then synchronously\n  // blocking on a completion queue, cq_, on a distinct thread. When cq_ events\n  // are delivered, we cross-post to the silo dispatcher to continue the\n  // operation.\n  //\n  // We have an independent completion thread for each TLS silo (i.e. one per worker and\n  // also one for the main thread).\n  Thread::ThreadPtr completion_thread_;\n  // Track all streams that are currently using this CQ, so we can notify them\n  // on shutdown.\n  absl::node_hash_set<GoogleAsyncStreamImpl*> streams_;\n};\n\nusing GoogleAsyncClientThreadLocalPtr = std::unique_ptr<GoogleAsyncClientThreadLocal>;\n\n// Google gRPC client stats. TODO(htuch): consider how a wider set of stats collected by the\n// library, such as the census related ones, can be externalized as needed.\nstruct GoogleAsyncClientStats {\n  // .streams_total\n  Stats::Counter* streams_total_;\n  // .streams_closed_<gRPC status code>\n  std::array<Stats::Counter*, Status::WellKnownGrpcStatus::MaximumKnown + 1> streams_closed_;\n};\n\n// Interface to allow the gRPC stub to be mocked out by tests.\nclass GoogleStub {\npublic:\n  virtual ~GoogleStub() = default;\n\n  // See grpc::PrepareCall().\n  virtual std::unique_ptr<grpc::GenericClientAsyncReaderWriter>\n  PrepareCall(grpc::ClientContext* context, const grpc::string& method,\n              grpc::CompletionQueue* cq) PURE;\n};\n\nusing GoogleStubSharedPtr = std::shared_ptr<GoogleStub>;\n\nclass GoogleGenericStub : public GoogleStub {\npublic:\n  GoogleGenericStub(std::shared_ptr<grpc::Channel> channel) : stub_(channel) {}\n\n  std::unique_ptr<grpc::GenericClientAsyncReaderWriter>\n  PrepareCall(grpc::ClientContext* context, const grpc::string& method,\n              grpc::CompletionQueue* cq) override {\n    return stub_.PrepareCall(context, method, cq);\n  }\n\nprivate:\n  grpc::GenericStub stub_;\n};\n\n// Interface to allow the gRPC stub creation to be mocked out by tests.\nclass GoogleStubFactory {\npublic:\n  virtual ~GoogleStubFactory() = default;\n\n  // Create a stub from a given channel.\n  virtual GoogleStubSharedPtr createStub(std::shared_ptr<grpc::Channel> channel) PURE;\n};\n\nclass GoogleGenericStubFactory : public GoogleStubFactory {\npublic:\n  GoogleStubSharedPtr createStub(std::shared_ptr<grpc::Channel> channel) override {\n    return std::make_shared<GoogleGenericStub>(channel);\n  }\n};\n\n// Google gRPC C++ client library implementation of Grpc::AsyncClient.\nclass GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable<Logger::Id::grpc> {\npublic:\n  GoogleAsyncClientImpl(Event::Dispatcher& dispatcher, GoogleAsyncClientThreadLocal& tls,\n                        GoogleStubFactory& stub_factory, Stats::ScopeSharedPtr scope,\n                        const envoy::config::core::v3::GrpcService& config, Api::Api& api,\n                        const StatNames& stat_names);\n  ~GoogleAsyncClientImpl() override;\n\n  // Grpc::AsyncClient\n  AsyncRequest* sendRaw(absl::string_view service_full_name, absl::string_view method_name,\n                        Buffer::InstancePtr&& request, RawAsyncRequestCallbacks& callbacks,\n                        Tracing::Span& parent_span,\n                        const Http::AsyncClient::RequestOptions& options) override;\n  RawAsyncStream* startRaw(absl::string_view service_full_name, absl::string_view method_name,\n                           RawAsyncStreamCallbacks& callbacks,\n                           const Http::AsyncClient::StreamOptions& options) override;\n\n  TimeSource& timeSource() { return dispatcher_.timeSource(); }\n  uint64_t perStreamBufferLimitBytes() const { return per_stream_buffer_limit_bytes_; }\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n  GoogleAsyncClientThreadLocal& tls_;\n  // This is shared with child streams, so that they can cleanup independent of\n  // the client if it gets destructed. The streams need to wait for their tags\n  // to drain from the CQ.\n  GoogleStubSharedPtr stub_;\n  std::list<GoogleAsyncStreamImplPtr> active_streams_;\n  const std::string stat_prefix_;\n  const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValue> initial_metadata_;\n  Stats::ScopeSharedPtr scope_;\n  GoogleAsyncClientStats stats_;\n  uint64_t per_stream_buffer_limit_bytes_;\n\n  friend class GoogleAsyncClientThreadLocal;\n  friend class GoogleAsyncRequestImpl;\n  friend class GoogleAsyncStreamImpl;\n};\n\nclass GoogleAsyncStreamImpl : public RawAsyncStream,\n                              public Event::DeferredDeletable,\n                              Logger::Loggable<Logger::Id::grpc>,\n                              public LinkedObject<GoogleAsyncStreamImpl> {\npublic:\n  GoogleAsyncStreamImpl(GoogleAsyncClientImpl& parent, absl::string_view service_full_name,\n                        absl::string_view method_name, RawAsyncStreamCallbacks& callbacks,\n                        const Http::AsyncClient::StreamOptions& options);\n  ~GoogleAsyncStreamImpl() override;\n\n  virtual void initialize(bool buffer_body_for_retry);\n\n  // Grpc::RawAsyncStream\n  void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override;\n  void closeStream() override;\n  void resetStream() override;\n  // While the Google-gRPC code doesn't use Envoy watermark buffers, the logical\n  // analog is to make sure that the aren't too many bytes in the pending write\n  // queue.\n  bool isAboveWriteBufferHighWatermark() const override {\n    return bytes_in_write_pending_queue_ > parent_.perStreamBufferLimitBytes();\n  }\n\nprotected:\n  bool callFailed() const { return call_failed_; }\n\nprivate:\n  // Process queued events in completed_ops_ with handleOpCompletion() on\n  // GoogleAsyncClient silo thread.\n  void onCompletedOps();\n  // Handle Operation completion on GoogleAsyncClient silo thread. This is posted by\n  // GoogleAsyncClientThreadLocal::completionThread() when a message is received on cq_.\n  void handleOpCompletion(GoogleAsyncTag::Operation op, bool ok);\n  // Convert from Google gRPC client std::multimap metadata to Envoy Http::HeaderMap.\n  void metadataTranslate(const std::multimap<grpc::string_ref, grpc::string_ref>& grpc_metadata,\n                         Http::HeaderMap& header_map);\n  // Write the first PendingMessage in the write queue if non-empty.\n  void writeQueued();\n  // Deliver notification and update stats when the connection closes.\n  void notifyRemoteClose(Status::GrpcStatus grpc_status,\n                         Http::ResponseTrailerMapPtr trailing_metadata, const std::string& message);\n  // Schedule stream for deferred deletion.\n  void deferredDelete();\n  // Cleanup and schedule stream for deferred deletion if no inflight\n  // completions.\n  void cleanup();\n\n  // Pending serialized message on write queue. Only one Operation::Write is in-flight at any\n  // point-in-time, so we queue pending writes here.\n  struct PendingMessage {\n    PendingMessage(Buffer::InstancePtr request, bool end_stream);\n    // End-of-stream with no additional message.\n    PendingMessage() = default;\n\n    const absl::optional<grpc::ByteBuffer> buf_{};\n    const bool end_stream_{true};\n  };\n\n  GoogleAsyncTag init_tag_{*this, GoogleAsyncTag::Operation::Init};\n  GoogleAsyncTag read_initial_metadata_tag_{*this, GoogleAsyncTag::Operation::ReadInitialMetadata};\n  GoogleAsyncTag read_tag_{*this, GoogleAsyncTag::Operation::Read};\n  GoogleAsyncTag write_tag_{*this, GoogleAsyncTag::Operation::Write};\n  GoogleAsyncTag write_last_tag_{*this, GoogleAsyncTag::Operation::WriteLast};\n  GoogleAsyncTag finish_tag_{*this, GoogleAsyncTag::Operation::Finish};\n\n  GoogleAsyncClientImpl& parent_;\n  GoogleAsyncClientThreadLocal& tls_;\n  // Latch our own version of this reference, so that completionThread() doesn't\n  // try and access via parent_, which might not exist in teardown. We assume\n  // that the dispatcher lives longer than completionThread() life, which should\n  // hold for the expected server object lifetimes.\n  Event::Dispatcher& dispatcher_;\n  // We hold a ref count on the stub_ to allow the stream to wait for its tags\n  // to drain from the CQ on cleanup.\n  GoogleStubSharedPtr stub_;\n  std::string service_full_name_;\n  std::string method_name_;\n  RawAsyncStreamCallbacks& callbacks_;\n  const Http::AsyncClient::StreamOptions& options_;\n  grpc::ClientContext ctxt_;\n  std::unique_ptr<grpc::GenericClientAsyncReaderWriter> rw_;\n  std::queue<PendingMessage> write_pending_queue_;\n  uint64_t bytes_in_write_pending_queue_{};\n  grpc::ByteBuffer read_buf_;\n  grpc::Status status_;\n  // Has Operation::Init completed?\n  bool call_initialized_{};\n  // Did the stub Call fail? If this is true, no Operation::Init completion will ever occur.\n  bool call_failed_{};\n  // Is there an Operation::Write[Last] in-flight?\n  bool write_pending_{};\n  // Is an Operation::Finish in-flight?\n  bool finish_pending_{};\n  // Have we entered CQ draining state? If so, we're just waiting for all our\n  // ops on the CQ to drain away before freeing the stream.\n  bool draining_cq_{};\n  // Count of the tags in-flight. This must hit zero before the stream can be\n  // freed.\n  uint32_t inflight_tags_{};\n  // Queue of completed (op, ok) passed from completionThread() to\n  // handleOpCompletion().\n  std::deque<std::pair<GoogleAsyncTag::Operation, bool>>\n      completed_ops_ ABSL_GUARDED_BY(completed_ops_lock_);\n  Thread::MutexBasicLockable completed_ops_lock_;\n\n  friend class GoogleAsyncClientImpl;\n  friend class GoogleAsyncClientThreadLocal;\n};\n\nclass GoogleAsyncRequestImpl : public AsyncRequest,\n                               public GoogleAsyncStreamImpl,\n                               RawAsyncStreamCallbacks {\npublic:\n  GoogleAsyncRequestImpl(GoogleAsyncClientImpl& parent, absl::string_view service_full_name,\n                         absl::string_view method_name, Buffer::InstancePtr request,\n                         RawAsyncRequestCallbacks& callbacks, Tracing::Span& parent_span,\n                         const Http::AsyncClient::RequestOptions& options);\n\n  void initialize(bool buffer_body_for_retry) override;\n\n  // Grpc::AsyncRequest\n  void cancel() override;\n\nprivate:\n  // Grpc::RawAsyncStreamCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap& metadata) override;\n  void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&&) override;\n  bool onReceiveMessageRaw(Buffer::InstancePtr&& response) override;\n  void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&&) override;\n  void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override;\n\n  Buffer::InstancePtr request_;\n  RawAsyncRequestCallbacks& callbacks_;\n  Tracing::SpanPtr current_span_;\n  Buffer::InstancePtr response_;\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_grpc_context.cc",
    "content": "#include \"common/grpc/google_grpc_context.h\"\n\n#include <atomic>\n\n#include \"common/common/assert.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/thread.h\"\n\n#ifdef ENVOY_GOOGLE_GRPC\n#include \"grpcpp/grpcpp.h\"\n#endif\n\nnamespace Envoy {\nnamespace Grpc {\n\nGoogleGrpcContext::GoogleGrpcContext() : instance_tracker_(instanceTracker()) {\n#ifdef ENVOY_GOOGLE_GRPC\n  Thread::LockGuard lock(instance_tracker_.mutex_);\n  if (++instance_tracker_.live_instances_ == 1) {\n    grpc_init();\n  }\n#endif\n}\n\nGoogleGrpcContext::~GoogleGrpcContext() {\n#ifdef ENVOY_GOOGLE_GRPC\n  // Per https://github.com/grpc/grpc/issues/20303 it is OK to call\n  // grpc_shutdown_blocking() as long as no one can concurrently call\n  // grpc_init(). We use check_format.py to ensure that this file contains the\n  // only callers to grpc_init(), and the mutex to then make that guarantee\n  // across users of this class.\n  Thread::LockGuard lock(instance_tracker_.mutex_);\n  ASSERT(instance_tracker_.live_instances_ > 0);\n  if (--instance_tracker_.live_instances_ == 0) {\n    grpc_shutdown_blocking(); // Waiting for quiescence avoids non-determinism in tests.\n  }\n#endif\n}\n\nGoogleGrpcContext::InstanceTracker& GoogleGrpcContext::instanceTracker() {\n  MUTABLE_CONSTRUCT_ON_FIRST_USE(InstanceTracker);\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_grpc_context.h",
    "content": "#pragma once\n\n#include \"common/common/thread.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\n// Captures global grpc initialization and shutdown. Note that grpc\n// initialization starts several threads, so it is a little annoying to run them\n// alongside unrelated tests, particularly if they are trying to track memory\n// usage, or you are exploiting otherwise consistent run-to-run pointer values\n// during debug.\n//\n// Instantiating this class makes it easy to ensure classes that depend on grpc\n// libraries get them initialized.\nclass GoogleGrpcContext {\npublic:\n  GoogleGrpcContext();\n  ~GoogleGrpcContext();\n\nprivate:\n  struct InstanceTracker {\n    Thread::MutexBasicLockable mutex_;\n    uint64_t live_instances_ ABSL_GUARDED_BY(mutex_) = 0;\n  };\n\n  static InstanceTracker& instanceTracker();\n\n  InstanceTracker& instance_tracker_;\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_grpc_creds_impl.cc",
    "content": "#include \"common/grpc/google_grpc_creds_impl.h\"\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/google_grpc_creds.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/datasource.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nstd::shared_ptr<grpc::ChannelCredentials> CredsUtility::getChannelCredentials(\n    const envoy::config::core::v3::GrpcService::GoogleGrpc& google_grpc, Api::Api& api) {\n  if (google_grpc.has_channel_credentials()) {\n    switch (google_grpc.channel_credentials().credential_specifier_case()) {\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelCredentials::\n        CredentialSpecifierCase::kSslCredentials: {\n      const auto& ssl_credentials = google_grpc.channel_credentials().ssl_credentials();\n      const grpc::SslCredentialsOptions ssl_credentials_options = {\n          Config::DataSource::read(ssl_credentials.root_certs(), true, api),\n          Config::DataSource::read(ssl_credentials.private_key(), true, api),\n          Config::DataSource::read(ssl_credentials.cert_chain(), true, api),\n      };\n      return grpc::SslCredentials(ssl_credentials_options);\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelCredentials::\n        CredentialSpecifierCase::kLocalCredentials: {\n      return grpc::experimental::LocalCredentials(UDS);\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelCredentials::\n        CredentialSpecifierCase::kGoogleDefault: {\n      return grpc::GoogleDefaultCredentials();\n    }\n    default:\n      return nullptr;\n    }\n  }\n  return nullptr;\n}\n\nstd::shared_ptr<grpc::ChannelCredentials> CredsUtility::defaultSslChannelCredentials(\n    const envoy::config::core::v3::GrpcService& grpc_service_config, Api::Api& api) {\n  auto creds = getChannelCredentials(grpc_service_config.google_grpc(), api);\n  if (creds != nullptr) {\n    return creds;\n  }\n  return grpc::SslCredentials({});\n}\n\nstd::vector<std::shared_ptr<grpc::CallCredentials>>\nCredsUtility::callCredentials(const envoy::config::core::v3::GrpcService::GoogleGrpc& google_grpc) {\n  std::vector<std::shared_ptr<grpc::CallCredentials>> creds;\n  for (const auto& credential : google_grpc.call_credentials()) {\n    std::shared_ptr<grpc::CallCredentials> new_call_creds;\n    switch (credential.credential_specifier_case()) {\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kAccessToken: {\n      new_call_creds = grpc::AccessTokenCredentials(credential.access_token());\n      break;\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kGoogleComputeEngine: {\n      new_call_creds = grpc::GoogleComputeEngineCredentials();\n      break;\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kGoogleRefreshToken: {\n      new_call_creds = grpc::GoogleRefreshTokenCredentials(credential.google_refresh_token());\n      break;\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kServiceAccountJwtAccess: {\n      new_call_creds = grpc::ServiceAccountJWTAccessCredentials(\n          credential.service_account_jwt_access().json_key(),\n          credential.service_account_jwt_access().token_lifetime_seconds());\n      break;\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kGoogleIam: {\n      new_call_creds = grpc::GoogleIAMCredentials(credential.google_iam().authorization_token(),\n                                                  credential.google_iam().authority_selector());\n      break;\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kStsService: {\n      grpc::experimental::StsCredentialsOptions options = {\n          credential.sts_service().token_exchange_service_uri(),\n          credential.sts_service().resource(),\n          credential.sts_service().audience(),\n          credential.sts_service().scope(),\n          credential.sts_service().requested_token_type(),\n          credential.sts_service().subject_token_path(),\n          credential.sts_service().subject_token_type(),\n          credential.sts_service().actor_token_path(),\n          credential.sts_service().actor_token_type(),\n      };\n      new_call_creds = grpc::experimental::StsCredentials(options);\n      break;\n    }\n    default:\n      // We don't handle plugin credentials here, callers can do so instead if they want.\n      continue;\n    }\n    // Any of the above creds creation can fail, if they do they return nullptr\n    // and we ignore them.\n    if (new_call_creds != nullptr) {\n      creds.emplace_back(new_call_creds);\n    }\n  }\n  return creds;\n}\n\nstd::shared_ptr<grpc::ChannelCredentials> CredsUtility::defaultChannelCredentials(\n    const envoy::config::core::v3::GrpcService& grpc_service_config, Api::Api& api) {\n  std::shared_ptr<grpc::ChannelCredentials> channel_creds =\n      getChannelCredentials(grpc_service_config.google_grpc(), api);\n  if (channel_creds == nullptr) {\n    channel_creds = grpc::InsecureChannelCredentials();\n  }\n  auto call_creds_vec = callCredentials(grpc_service_config.google_grpc());\n  if (call_creds_vec.empty()) {\n    return channel_creds;\n  }\n  std::shared_ptr<grpc::CallCredentials> call_creds = call_creds_vec[0];\n  for (uint32_t i = 1; i < call_creds_vec.size(); ++i) {\n    call_creds = grpc::CompositeCallCredentials(call_creds, call_creds_vec[i]);\n  }\n  return grpc::CompositeChannelCredentials(channel_creds, call_creds);\n}\n\n/**\n * Default implementation of Google Grpc Credentials Factory\n * Uses ssl creds if available, or defaults to insecure channel.\n *\n * This is not the same as google_default credentials. This is the default implementation that is\n * loaded if no other implementation is configured.\n */\nclass DefaultGoogleGrpcCredentialsFactory : public GoogleGrpcCredentialsFactory {\n\npublic:\n  std::shared_ptr<grpc::ChannelCredentials>\n  getChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service_config,\n                        Api::Api& api) override {\n    return CredsUtility::defaultChannelCredentials(grpc_service_config, api);\n  }\n\n  std::string name() const override { return \"envoy.grpc_credentials.default\"; }\n};\n\n/**\n * Static registration for the default Google gRPC credentials factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(DefaultGoogleGrpcCredentialsFactory, GoogleGrpcCredentialsFactory);\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_grpc_creds_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"grpcpp/grpcpp.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\ngrpc::SslCredentialsOptions buildSslOptionsFromConfig(\n    const envoy::config::core::v3::GrpcService::GoogleGrpc::SslCredentials& ssl_config);\n\nstd::shared_ptr<grpc::ChannelCredentials>\ngetGoogleGrpcChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service,\n                                Api::Api& api);\n\nclass CredsUtility {\npublic:\n  /**\n   * Translation from envoy::config::core::v3::GrpcService::GoogleGrpc to grpc::ChannelCredentials\n   * for channel credentials.\n   * @param google_grpc Google gRPC config.\n   * @param api reference to the Api object\n   * @return std::shared_ptr<grpc::ChannelCredentials> channel credentials. A nullptr\n   *         will be returned in the absence of any configured credentials.\n   */\n  static std::shared_ptr<grpc::ChannelCredentials>\n  getChannelCredentials(const envoy::config::core::v3::GrpcService::GoogleGrpc& google_grpc,\n                        Api::Api& api);\n\n  /**\n   * Static translation from envoy::config::core::v3::GrpcService::GoogleGrpc to a vector of\n   * grpc::CallCredentials. Any plugin based call credentials will be elided.\n   * @param grpc_service Google gRPC config.\n   * @return std::vector<std::shared_ptr<grpc::CallCredentials>> call credentials.\n   */\n  static std::vector<std::shared_ptr<grpc::CallCredentials>>\n  callCredentials(const envoy::config::core::v3::GrpcService::GoogleGrpc& google_grpc);\n\n  /**\n   * Default translation from envoy::config::core::v3::GrpcService::GoogleGrpc to\n   * grpc::ChannelCredentials for SSL channel credentials.\n   * @param grpc_service_config gRPC service config.\n   * @param api reference to the Api object\n   * @return std::shared_ptr<grpc::ChannelCredentials> SSL channel credentials. Empty SSL\n   *         credentials will be set in the absence of any configured SSL in grpc_service_config,\n   *         forcing the channel to SSL.\n   */\n  static std::shared_ptr<grpc::ChannelCredentials>\n  defaultSslChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service_config,\n                               Api::Api& api);\n\n  /**\n   * Default static translation from envoy::config::core::v3::GrpcService::GoogleGrpc to\n   * grpc::ChannelCredentials for all non-plugin based channel and call credentials.\n   * @param grpc_service_config gRPC service config.\n   * @param api reference to the Api object\n   * @return std::shared_ptr<grpc::ChannelCredentials> composite channel and call credentials.\n   *         will be set in the absence of any configured SSL in grpc_service_config, forcing the\n   *         channel to SSL.\n   */\n  static std::shared_ptr<grpc::ChannelCredentials>\n  defaultChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service_config,\n                            Api::Api& api);\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_grpc_utils.cc",
    "content": "#include \"common/grpc/google_grpc_utils.h\"\n\n#include <atomic>\n#include <cstdint>\n#include <cstring>\n#include <string>\n\n#include \"envoy/grpc/google_grpc_creds.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nnamespace {\n\nstd::shared_ptr<grpc::ChannelCredentials>\ngetGoogleGrpcChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service,\n                                Api::Api& api) {\n  GoogleGrpcCredentialsFactory* credentials_factory = nullptr;\n  const std::string& google_grpc_credentials_factory_name =\n      grpc_service.google_grpc().credentials_factory_name();\n  if (google_grpc_credentials_factory_name.empty()) {\n    credentials_factory = Registry::FactoryRegistry<GoogleGrpcCredentialsFactory>::getFactory(\n        \"envoy.grpc_credentials.default\");\n  } else {\n    credentials_factory = Registry::FactoryRegistry<GoogleGrpcCredentialsFactory>::getFactory(\n        google_grpc_credentials_factory_name);\n  }\n  if (credentials_factory == nullptr) {\n    throw EnvoyException(absl::StrCat(\"Unknown google grpc credentials factory: \",\n                                      google_grpc_credentials_factory_name));\n  }\n  return credentials_factory->getChannelCredentials(grpc_service, api);\n}\n\n} // namespace\n\nstruct BufferInstanceContainer {\n  BufferInstanceContainer(int ref_count, Buffer::InstancePtr&& buffer)\n      : ref_count_(ref_count), buffer_(std::move(buffer)) {}\n  std::atomic<uint32_t> ref_count_; // In case gPRC dereferences in a different threads.\n  Buffer::InstancePtr buffer_;\n\n  static void derefBufferInstanceContainer(void* container_ptr) {\n    auto container = static_cast<BufferInstanceContainer*>(container_ptr);\n    container->ref_count_--;\n    // This is safe because the ref_count_ is never incremented.\n    if (container->ref_count_ <= 0) {\n      delete container;\n    }\n  }\n};\n\ngrpc::ByteBuffer GoogleGrpcUtils::makeByteBuffer(Buffer::InstancePtr&& buffer_instance) {\n  if (!buffer_instance) {\n    return {};\n  }\n  Buffer::RawSliceVector raw_slices = buffer_instance->getRawSlices();\n  if (raw_slices.empty()) {\n    return {};\n  }\n\n  auto* container =\n      new BufferInstanceContainer{static_cast<int>(raw_slices.size()), std::move(buffer_instance)};\n  std::vector<grpc::Slice> slices;\n  slices.reserve(raw_slices.size());\n  for (Buffer::RawSlice& raw_slice : raw_slices) {\n    slices.emplace_back(raw_slice.mem_, raw_slice.len_,\n                        &BufferInstanceContainer::derefBufferInstanceContainer, container);\n  }\n  return {&slices[0], slices.size()};\n}\n\nclass GrpcSliceBufferFragmentImpl : public Buffer::BufferFragment {\npublic:\n  explicit GrpcSliceBufferFragmentImpl(grpc::Slice&& slice) : slice_(std::move(slice)) {}\n\n  // Buffer::BufferFragment\n  const void* data() const override { return slice_.begin(); }\n  size_t size() const override { return slice_.size(); }\n  void done() override { delete this; }\n\nprivate:\n  const grpc::Slice slice_;\n};\n\nBuffer::InstancePtr GoogleGrpcUtils::makeBufferInstance(const grpc::ByteBuffer& byte_buffer) {\n  auto buffer = std::make_unique<Buffer::OwnedImpl>();\n  if (byte_buffer.Length() == 0) {\n    return buffer;\n  }\n  // NB: ByteBuffer::Dump moves the data out of the ByteBuffer so we need to ensure that the\n  // lifetime of the Slice(s) exceeds our Buffer::Instance.\n  std::vector<grpc::Slice> slices;\n  if (!byte_buffer.Dump(&slices).ok()) {\n    return nullptr;\n  }\n\n  for (auto& slice : slices) {\n    buffer->addBufferFragment(*new GrpcSliceBufferFragmentImpl(std::move(slice)));\n  }\n  return buffer;\n}\n\ngrpc::ChannelArguments\nGoogleGrpcUtils::channelArgsFromConfig(const envoy::config::core::v3::GrpcService& config) {\n  grpc::ChannelArguments args;\n  for (const auto& channel_arg : config.google_grpc().channel_args().args()) {\n    switch (channel_arg.second.value_specifier_case()) {\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelArgs::Value::kStringValue: {\n      args.SetString(channel_arg.first, channel_arg.second.string_value());\n      break;\n    }\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelArgs::Value::kIntValue: {\n      args.SetInt(channel_arg.first, channel_arg.second.int_value());\n      break;\n    }\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n  return args;\n}\n\nstd::shared_ptr<grpc::Channel>\nGoogleGrpcUtils::createChannel(const envoy::config::core::v3::GrpcService& config, Api::Api& api) {\n  std::shared_ptr<grpc::ChannelCredentials> creds = getGoogleGrpcChannelCredentials(config, api);\n  const grpc::ChannelArguments args = channelArgsFromConfig(config);\n  return CreateCustomChannel(config.google_grpc().target_uri(), creds, args);\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/google_grpc_utils.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"grpcpp/grpcpp.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass GoogleGrpcUtils {\npublic:\n  /**\n   * Build grpc::ByteBuffer which aliases the data in a Buffer::InstancePtr.\n   * @param buffer source data container.\n   * @return byteBuffer target container aliased to the data in Buffer::Instance and owning the\n   * Buffer::Instance.\n   */\n  static grpc::ByteBuffer makeByteBuffer(Buffer::InstancePtr&& buffer);\n\n  /**\n   * Build Buffer::Instance which aliases the data in a grpc::ByteBuffer.\n   * @param buffer source data container.\n   * @return a Buffer::InstancePtr aliased to the data in the provided grpc::ByteBuffer and\n   * owning the corresponding grpc::Slice(s) or nullptr if the grpc::ByteBuffer is bad.\n   */\n  static Buffer::InstancePtr makeBufferInstance(const grpc::ByteBuffer& buffer);\n\n  /**\n   * Build grpc::ChannelArguments from gRPC service config.\n   * @param config Google gRPC config.\n   * @return grpc::ChannelArguments corresponding to config.\n   */\n  static grpc::ChannelArguments\n  channelArgsFromConfig(const envoy::config::core::v3::GrpcService& config);\n\n  /**\n   * Build gRPC channel based on the given GrpcService configuration.\n   * @param config Google gRPC config.\n   * @param api reference to the Api object\n   * @return static std::shared_ptr<grpc::Channel> a gRPC channel.\n   */\n  static std::shared_ptr<grpc::Channel>\n  createChannel(const envoy::config::core::v3::GrpcService& config, Api::Api& api);\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/stat_names.cc",
    "content": "#include \"common/grpc/stat_names.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nStatNames::StatNames(Stats::SymbolTable& symbol_table)\n    : pool_(symbol_table), streams_total_(pool_.add(\"streams_total\")),\n      google_grpc_client_creation_(pool_.add(\"google_grpc_client_creation\")) {\n  for (uint32_t i = 0; i <= Status::WellKnownGrpcStatus::MaximumKnown; ++i) {\n    std::string status_str = absl::StrCat(i);\n    streams_closed_[i] = pool_.add(absl::StrCat(\"streams_closed_\", status_str));\n    status_names_[status_str] = pool_.add(status_str);\n  }\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/stat_names.h",
    "content": "#pragma once\n\n#include \"envoy/grpc/status.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\n/**\n * Captures symbolized representation for tokens used in grpc stats. These are\n * broken out so they can be allocated early and used across all gRPC-related\n * filters.\n */\nstruct StatNames {\n  explicit StatNames(Stats::SymbolTable& symbol_table);\n\n  Stats::StatNamePool pool_;\n  Stats::StatName streams_total_;\n  std::array<Stats::StatName, Status::WellKnownGrpcStatus::MaximumKnown + 1> streams_closed_;\n  absl::flat_hash_map<std::string, Stats::StatName> status_names_;\n  // Stat name tracking the creation of the Google grpc client.\n  Stats::StatName google_grpc_client_creation_;\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/status.cc",
    "content": "#include \"common/grpc/status.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nStatus::GrpcStatus Utility::httpToGrpcStatus(uint64_t http_response_status) {\n  // From\n  // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md.\n  switch (http_response_status) {\n  case 400:\n    return Status::WellKnownGrpcStatus::Internal;\n  case 401:\n    return Status::WellKnownGrpcStatus::Unauthenticated;\n  case 403:\n    return Status::WellKnownGrpcStatus::PermissionDenied;\n  case 404:\n    return Status::WellKnownGrpcStatus::Unimplemented;\n  case 429:\n  case 502:\n  case 503:\n  case 504:\n    return Status::WellKnownGrpcStatus::Unavailable;\n  default:\n    return Status::WellKnownGrpcStatus::Unknown;\n  }\n}\n\nuint64_t Utility::grpcToHttpStatus(Status::GrpcStatus grpc_status) {\n  // From https://cloud.google.com/apis/design/errors#handling_errors.\n  switch (grpc_status) {\n  case Status::WellKnownGrpcStatus::Ok:\n    return 200;\n  case Status::WellKnownGrpcStatus::Canceled:\n    // Client closed request.\n    return 499;\n  case Status::WellKnownGrpcStatus::Unknown:\n    // Internal server error.\n    return 500;\n  case Status::WellKnownGrpcStatus::InvalidArgument:\n    // Bad request.\n    return 400;\n  case Status::WellKnownGrpcStatus::DeadlineExceeded:\n    // Gateway Time-out.\n    return 504;\n  case Status::WellKnownGrpcStatus::NotFound:\n    // Not found.\n    return 404;\n  case Status::WellKnownGrpcStatus::AlreadyExists:\n    // Conflict.\n    return 409;\n  case Status::WellKnownGrpcStatus::PermissionDenied:\n    // Forbidden.\n    return 403;\n  case Status::WellKnownGrpcStatus::ResourceExhausted:\n    //  Too many requests.\n    return 429;\n  case Status::WellKnownGrpcStatus::FailedPrecondition:\n    // Bad request.\n    return 400;\n  case Status::WellKnownGrpcStatus::Aborted:\n    // Conflict.\n    return 409;\n  case Status::WellKnownGrpcStatus::OutOfRange:\n    // Bad request.\n    return 400;\n  case Status::WellKnownGrpcStatus::Unimplemented:\n    // Not implemented.\n    return 501;\n  case Status::WellKnownGrpcStatus::Internal:\n    // Internal server error.\n    return 500;\n  case Status::WellKnownGrpcStatus::Unavailable:\n    // Service unavailable.\n    return 503;\n  case Status::WellKnownGrpcStatus::DataLoss:\n    // Internal server error.\n    return 500;\n  case Status::WellKnownGrpcStatus::Unauthenticated:\n    // Unauthorized.\n    return 401;\n  case Status::WellKnownGrpcStatus::InvalidCode:\n  default:\n    // Internal server error.\n    return 500;\n  }\n}\n\nstd::string Utility::grpcStatusToString(Status::GrpcStatus grpc_status) {\n  switch (grpc_status) {\n  case Status::WellKnownGrpcStatus::Ok:\n    return \"OK\";\n  case Status::WellKnownGrpcStatus::Canceled:\n    return \"Canceled\";\n  case Status::WellKnownGrpcStatus::Unknown:\n    return \"Unknown\";\n  case Status::WellKnownGrpcStatus::InvalidArgument:\n    return \"InvalidArgument\";\n  case Status::WellKnownGrpcStatus::DeadlineExceeded:\n    return \"DeadlineExceeded\";\n  case Status::WellKnownGrpcStatus::NotFound:\n    return \"NotFound\";\n  case Status::WellKnownGrpcStatus::AlreadyExists:\n    return \"AlreadyExists\";\n  case Status::WellKnownGrpcStatus::PermissionDenied:\n    return \"PermissionDenied\";\n  case Status::WellKnownGrpcStatus::ResourceExhausted:\n    return \"ResourceExhausted\";\n  case Status::WellKnownGrpcStatus::FailedPrecondition:\n    return \"FailedPrecondition\";\n  case Status::WellKnownGrpcStatus::Aborted:\n    return \"Aborted\";\n  case Status::WellKnownGrpcStatus::OutOfRange:\n    return \"OutOfRange\";\n  case Status::WellKnownGrpcStatus::Unimplemented:\n    return \"Unimplemented\";\n  case Status::WellKnownGrpcStatus::Internal:\n    return \"Internal\";\n  case Status::WellKnownGrpcStatus::Unavailable:\n    return \"Unavailable\";\n  case Status::WellKnownGrpcStatus::DataLoss:\n    return \"DataLoss\";\n  case Status::WellKnownGrpcStatus::Unauthenticated:\n    return \"Unauthenticated\";\n  case Status::WellKnownGrpcStatus::InvalidCode:\n  default:\n    return \"InvalidCode\";\n  }\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/status.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/grpc/status.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\n/**\n * Grpc::Status utilities.\n */\nclass Utility {\npublic:\n  /**\n   * Returns the gRPC status code from a given HTTP response status code. Ordinarily, it is expected\n   * that a 200 response is provided, but gRPC defines a mapping for intermediaries that are not\n   * gRPC aware, see https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md.\n   * @param http_response_status HTTP status code.\n   * @return Status::GrpcStatus corresponding gRPC status code.\n   */\n  static Status::GrpcStatus httpToGrpcStatus(uint64_t http_response_status);\n\n  /**\n   * @param grpc_status gRPC status from grpc-status header.\n   * @return uint64_t the canonical HTTP status code corresponding to a gRPC status code.\n   */\n  static uint64_t grpcToHttpStatus(Status::GrpcStatus grpc_status);\n\n  /**\n   * @param grpc_status gRPC status from grpc-status header.\n   * @return gRPC status string converted from grpc-status.\n   */\n  static std::string grpcStatusToString(Status::GrpcStatus grpc_status);\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/typed_async_client.cc",
    "content": "#include \"common/grpc/typed_async_client.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace Internal {\n\nvoid sendMessageUntyped(RawAsyncStream* stream, const Protobuf::Message& request, bool end_stream) {\n  stream->sendMessageRaw(Common::serializeMessage(request), end_stream);\n}\n\nProtobufTypes::MessagePtr parseMessageUntyped(ProtobufTypes::MessagePtr&& message,\n                                              Buffer::InstancePtr&& response) {\n  // TODO(htuch): Need to add support for compressed responses as well here.\n  if (response->length() > 0) {\n    Buffer::ZeroCopyInputStreamImpl stream(std::move(response));\n    if (!message->ParseFromZeroCopyStream(&stream)) {\n      return nullptr;\n    }\n  }\n  return std::move(message);\n}\n\nRawAsyncStream* startUntyped(RawAsyncClient* client,\n                             const Protobuf::MethodDescriptor& service_method,\n                             RawAsyncStreamCallbacks& callbacks,\n                             const Http::AsyncClient::StreamOptions& options) {\n  return client->startRaw(service_method.service()->full_name(), service_method.name(), callbacks,\n                          options);\n}\n\nAsyncRequest* sendUntyped(RawAsyncClient* client, const Protobuf::MethodDescriptor& service_method,\n                          const Protobuf::Message& request, RawAsyncRequestCallbacks& callbacks,\n                          Tracing::Span& parent_span,\n                          const Http::AsyncClient::RequestOptions& options) {\n  return client->sendRaw(service_method.service()->full_name(), service_method.name(),\n                         Common::serializeMessage(request), callbacks, parent_span, options);\n}\n\n} // namespace Internal\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/grpc/typed_async_client.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n\n#include \"envoy/grpc/async_client.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/version_converter.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace Internal {\n\n/**\n * Forward declarations for helper functions.\n */\nvoid sendMessageUntyped(RawAsyncStream* stream, const Protobuf::Message& request, bool end_stream);\nProtobufTypes::MessagePtr parseMessageUntyped(ProtobufTypes::MessagePtr&& message,\n                                              Buffer::InstancePtr&& response);\nRawAsyncStream* startUntyped(RawAsyncClient* client,\n                             const Protobuf::MethodDescriptor& service_method,\n                             RawAsyncStreamCallbacks& callbacks,\n                             const Http::AsyncClient::StreamOptions& options);\nAsyncRequest* sendUntyped(RawAsyncClient* client, const Protobuf::MethodDescriptor& service_method,\n                          const Protobuf::Message& request, RawAsyncRequestCallbacks& callbacks,\n                          Tracing::Span& parent_span,\n                          const Http::AsyncClient::RequestOptions& options);\n\n} // namespace Internal\n\n/**\n * Convenience wrapper for an AsyncStream* providing typed protobuf support.\n */\ntemplate <typename Request> class AsyncStream /* : public RawAsyncStream */ {\npublic:\n  AsyncStream() = default;\n  AsyncStream(RawAsyncStream* stream) : stream_(stream) {}\n  AsyncStream(const AsyncStream& other) = default;\n  void sendMessage(const Protobuf::Message& request, bool end_stream) {\n    Internal::sendMessageUntyped(stream_, std::move(request), end_stream);\n  }\n  void sendMessage(const Protobuf::Message& request,\n                   envoy::config::core::v3::ApiVersion transport_api_version, bool end_stream) {\n    Config::VersionConverter::prepareMessageForGrpcWire(const_cast<Protobuf::Message&>(request),\n                                                        transport_api_version);\n    Internal::sendMessageUntyped(stream_, std::move(request), end_stream);\n  }\n  void closeStream() { stream_->closeStream(); }\n  void resetStream() { stream_->resetStream(); }\n  bool isAboveWriteBufferHighWatermark() const {\n    return stream_->isAboveWriteBufferHighWatermark();\n  }\n  AsyncStream* operator->() { return this; }\n  AsyncStream<Request> operator=(RawAsyncStream* stream) {\n    stream_ = stream;\n    return *this;\n  }\n  bool operator==(RawAsyncStream* stream) const { return stream_ == stream; }\n  bool operator!=(RawAsyncStream* stream) const { return stream_ != stream; }\n\nprivate:\n  RawAsyncStream* stream_{};\n};\n\ntemplate <typename Response> using ResponsePtr = std::unique_ptr<Response>;\n\n/**\n * Convenience subclasses for AsyncRequestCallbacks.\n */\ntemplate <typename Response> class AsyncRequestCallbacks : public RawAsyncRequestCallbacks {\npublic:\n  ~AsyncRequestCallbacks() override = default;\n  virtual void onSuccess(ResponsePtr<Response>&& response, Tracing::Span& span) PURE;\n\nprivate:\n  void onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span& span) override {\n    auto message = ResponsePtr<Response>(dynamic_cast<Response*>(\n        Internal::parseMessageUntyped(std::make_unique<Response>(), std::move(response))\n            .release()));\n    if (!message) {\n      onFailure(Status::WellKnownGrpcStatus::Internal, \"\", span);\n      return;\n    }\n    onSuccess(std::move(message), span);\n  }\n};\n\n/**\n * Versioned methods wrapper.\n */\nclass VersionedMethods {\npublic:\n  VersionedMethods(const std::string& v3, const std::string& v2, const std::string& v2_alpha = \"\")\n      : v3_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v3)),\n        v2_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2)),\n        v2_alpha_(v2_alpha.empty()\n                      ? nullptr\n                      : Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2_alpha)) {}\n\n  /**\n   * Given a version, return the method descriptor for a specific version.\n   *\n   * @param api_version target API version.\n   * @param use_alpha if this is an alpha version of an API method.\n   *\n   * @return Protobuf::MethodDescriptor& of a method for a specific version.\n   */\n  const Protobuf::MethodDescriptor&\n  getMethodDescriptorForVersion(envoy::config::core::v3::ApiVersion api_version,\n                                bool use_alpha = false) const {\n    switch (api_version) {\n    case envoy::config::core::v3::ApiVersion::AUTO:\n      FALLTHRU;\n    case envoy::config::core::v3::ApiVersion::V2: {\n      const auto* descriptor = use_alpha ? v2_alpha_ : v2_;\n      ASSERT(descriptor != nullptr);\n      return *descriptor;\n    }\n\n    case envoy::config::core::v3::ApiVersion::V3: {\n      const auto* descriptor = v3_;\n      ASSERT(descriptor != nullptr);\n      return *descriptor;\n    }\n\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\nprivate:\n  const Protobuf::MethodDescriptor* v3_{nullptr};\n  const Protobuf::MethodDescriptor* v2_{nullptr};\n  const Protobuf::MethodDescriptor* v2_alpha_{nullptr};\n};\n\n/**\n * Convenience subclasses for AsyncStreamCallbacks.\n */\ntemplate <typename Response> class AsyncStreamCallbacks : public RawAsyncStreamCallbacks {\npublic:\n  ~AsyncStreamCallbacks() override = default;\n  virtual void onReceiveMessage(ResponsePtr<Response>&& message) PURE;\n\nprivate:\n  bool onReceiveMessageRaw(Buffer::InstancePtr&& response) override {\n    auto message = ResponsePtr<Response>(dynamic_cast<Response*>(\n        Internal::parseMessageUntyped(std::make_unique<Response>(), std::move(response))\n            .release()));\n    if (!message) {\n      return false;\n    }\n    onReceiveMessage(std::move(message));\n    return true;\n  }\n};\n\ntemplate <typename Request, typename Response> class AsyncClient /* : public RawAsyncClient )*/ {\npublic:\n  AsyncClient() = default;\n  AsyncClient(RawAsyncClientPtr&& client) : client_(std::move(client)) {}\n  AsyncClient(RawAsyncClientSharedPtr client) : client_(client) {}\n  virtual ~AsyncClient() = default;\n\n  virtual AsyncRequest* send(const Protobuf::MethodDescriptor& service_method,\n                             const Protobuf::Message& request,\n                             AsyncRequestCallbacks<Response>& callbacks, Tracing::Span& parent_span,\n                             const Http::AsyncClient::RequestOptions& options) {\n    return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span,\n                                 options);\n  }\n  virtual AsyncRequest* send(const Protobuf::MethodDescriptor& service_method,\n                             const Protobuf::Message& request,\n                             AsyncRequestCallbacks<Response>& callbacks, Tracing::Span& parent_span,\n                             const Http::AsyncClient::RequestOptions& options,\n                             envoy::config::core::v3::ApiVersion transport_api_version) {\n    Config::VersionConverter::prepareMessageForGrpcWire(const_cast<Protobuf::Message&>(request),\n                                                        transport_api_version);\n    return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span,\n                                 options);\n  }\n\n  virtual AsyncStream<Request> start(const Protobuf::MethodDescriptor& service_method,\n                                     AsyncStreamCallbacks<Response>& callbacks,\n                                     const Http::AsyncClient::StreamOptions& options) {\n    return AsyncStream<Request>(\n        Internal::startUntyped(client_.get(), service_method, callbacks, options));\n  }\n\n  AsyncClient* operator->() { return this; }\n  void operator=(RawAsyncClientPtr&& client) { client_ = std::move(client); }\n  void reset() { client_.reset(); }\n\nprivate:\n  RawAsyncClientSharedPtr client_{};\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/html/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n)\n"
  },
  {
    "path": "source/common/html/utility.cc",
    "content": "#include \"common/html/utility.h\"\n\n#include <string>\n\n#include \"absl/strings/str_replace.h\"\n\nnamespace Envoy {\nnamespace Html {\n\nstd::string Utility::sanitize(const std::string& text) {\n  return absl::StrReplaceAll(\n      text, {{\"&\", \"&amp;\"}, {\"<\", \"&lt;\"}, {\">\", \"&gt;\"}, {\"\\\"\", \"&quot;\"}, {\"'\", \"&#39;\"}});\n}\n\n} // namespace Html\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/html/utility.h",
    "content": "#pragma once\n\n#include <string>\n\nnamespace Envoy {\nnamespace Html {\n\n/**\n * General HTML utilities.\n */\nclass Utility {\npublic:\n  /**\n   * Sanitizes arbitrary text so it can be included in HTML.\n   * @param text arbitrary text to be escaped for safe inclusion in HTML.\n   */\n  static std::string sanitize(const std::string& text);\n};\n\n} // namespace Html\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"async_client_lib\",\n    srcs = [\"async_client_impl.cc\"],\n    hdrs = [\"async_client_impl.h\"],\n    deps = [\n        \"//include/envoy/config:typed_metadata_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:context_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:message_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/router:router_ratelimit_interface\",\n        \"//include/envoy/router:shadow_writer_interface\",\n        \"//include/envoy/ssl:connection_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/router:router_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"async_client_utility_lib\",\n    srcs = [\"async_client_utility.cc\"],\n    hdrs = [\"async_client_utility.h\"],\n    deps = [\n        \"//include/envoy/http:async_client_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codec_client_lib\",\n    srcs = [\"codec_client.cc\"],\n    hdrs = [\"codec_client.h\"],\n    deps = [\n        \":codec_wrappers_lib\",\n        \":exception_lib\",\n        \":status_lib\",\n        \":utility_lib\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http/http1:codec_legacy_lib\",\n        \"//source/common/http/http1:codec_lib\",\n        \"//source/common/http/http2:codec_legacy_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//source/common/http/http3:quic_codec_factory_lib\",\n        \"//source/common/http/http3:well_known_names\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"//source/common/runtime:runtime_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codec_helper_lib\",\n    hdrs = [\"codec_helper.h\"],\n    deps = [\n        \"//include/envoy/http:codec_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"default_server_string_lib\",\n    hdrs = [\"default_server_string.h\"],\n    deps = [\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codec_wrappers_lib\",\n    hdrs = [\"codec_wrappers.h\"],\n    deps = [\"//include/envoy/http:codec_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"codes_lib\",\n    srcs = [\"codes.cc\"],\n    hdrs = [\"codes.h\"],\n    deps = [\n        \":headers_lib\",\n        \":utility_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_lib\",\n    srcs = [\"context_impl.cc\"],\n    hdrs = [\"context_impl.h\"],\n    deps = [\n        \":codes_lib\",\n        \":user_agent_lib\",\n        \"//include/envoy/http:context_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_base_lib\",\n    srcs = [\"conn_pool_base.cc\"],\n    hdrs = [\"conn_pool_base.h\"],\n    deps = [\n        \":codec_client_lib\",\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/conn_pool:conn_pool_base_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/upstream:upstream_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_manager_config_interface\",\n    hdrs = [\"conn_manager_config.h\"],\n    deps = [\n        \":date_provider_lib\",\n        \"//include/envoy/config:config_provider_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/router:rds_interface\",\n        \"//source/common/local_reply:local_reply_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_manager_lib\",\n    srcs = [\n        \"filter_manager.cc\",\n    ],\n    hdrs = [\n        \"filter_manager.h\",\n    ],\n    deps = [\n        \":headers_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/buffer:watermark_buffer_lib\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:scope_tracker\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/local_reply:local_reply_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_manager_lib\",\n    srcs = [\n        \"conn_manager_impl.cc\",\n        \"conn_manager_utility.cc\",\n    ],\n    hdrs = [\n        \"conn_manager_impl.h\",\n        \"conn_manager_utility.h\",\n    ],\n    deps = [\n        \":codes_lib\",\n        \":conn_manager_config_interface\",\n        \":exception_lib\",\n        \":filter_manager_lib\",\n        \":header_map_lib\",\n        \":header_utility_lib\",\n        \":headers_lib\",\n        \":path_utility_lib\",\n        \":status_lib\",\n        \":user_agent_lib\",\n        \":utility_lib\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/common:scope_tracker_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:api_listener_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:context_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:drain_decision_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/router:rds_interface\",\n        \"//include/envoy/router:scopes_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:overload_manager_interface\",\n        \"//include/envoy/ssl:connection_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:dump_state_utils\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:regex_lib\",\n        \"//source/common/common:scope_tracker\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http/http1:codec_legacy_lib\",\n        \"//source/common/http/http1:codec_lib\",\n        \"//source/common/http/http2:codec_legacy_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//source/common/http/http3:quic_codec_factory_lib\",\n        \"//source/common/http/http3:well_known_names\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/router:config_lib\",\n        \"//source/common/router:scoped_rds_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"date_provider_lib\",\n    srcs = [\"date_provider_impl.cc\"],\n    hdrs = [\n        \"date_provider.h\",\n        \"date_provider_impl.h\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"exception_lib\",\n    hdrs = [\"exception.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:header_map_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hash_policy_lib\",\n    srcs = [\"hash_policy.cc\"],\n    hdrs = [\"hash_policy.h\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/http:hash_policy_interface\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"header_list_view_lib\",\n    srcs = [\"header_list_view.cc\"],\n    hdrs = [\"header_list_view.h\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"header_map_lib\",\n    srcs = [\"header_map_impl.cc\"],\n    hdrs = [\"header_map_impl.h\"],\n    deps = [\n        \":headers_lib\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:dump_state_utils\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:non_copyable\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"headers_lib\",\n    hdrs = [\"headers.h\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/common/singleton:threadsafe_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"message_lib\",\n    hdrs = [\"message_impl.h\"],\n    deps = [\n        \":header_map_lib\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:message_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:non_copyable\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"rest_api_fetcher_lib\",\n    srcs = [\"rest_api_fetcher.cc\"],\n    hdrs = [\"rest_api_fetcher.h\"],\n    deps = [\n        \":message_lib\",\n        \":utility_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/config:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"user_agent_lib\",\n    srcs = [\"user_agent.cc\"],\n    hdrs = [\"user_agent.h\"],\n    deps = [\n        \":headers_lib\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    external_deps = [\n        \"abseil_node_hash_set\",\n        \"abseil_optional\",\n        \"http_parser\",\n        \"nghttp2\",\n    ],\n    deps = [\n        \":exception_lib\",\n        \":header_map_lib\",\n        \":headers_lib\",\n        \":message_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:query_params_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/grpc:status_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"header_utility_lib\",\n    srcs = [\"header_utility.cc\"],\n    hdrs = [\"header_utility.h\"],\n    external_deps = [\n        \"nghttp2\",\n    ],\n    deps = [\n        \":header_map_lib\",\n        \":utility_lib\",\n        \"//include/envoy/common:regex_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/common:regex_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"path_utility_lib\",\n    srcs = [\"path_utility.cc\"],\n    hdrs = [\"path_utility.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/chromium_url\",\n        \"//source/common/common:logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"request_id_extension_lib\",\n    srcs = [\n        \"request_id_extension_impl.cc\",\n        \"request_id_extension_uuid_impl.cc\",\n    ],\n    hdrs = [\n        \"request_id_extension_impl.h\",\n        \"request_id_extension_uuid_impl.h\",\n    ],\n    deps = [\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/server:request_id_extension_config_interface\",\n        \"//source/common/common:random_generator_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"status_lib\",\n    srcs = [\"status.cc\"],\n    hdrs = [\"status.h\"],\n    external_deps = [\n        \"abseil_status\",\n    ],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/http/async_client_impl.cc",
    "content": "#include \"common/http/async_client_impl.h\"\n\n#include <chrono>\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/grpc/common.h\"\n#include \"common/http/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nconst std::vector<std::reference_wrapper<const Router::RateLimitPolicyEntry>>\n    AsyncStreamImpl::NullRateLimitPolicy::rate_limit_policy_entry_;\nconst AsyncStreamImpl::NullHedgePolicy AsyncStreamImpl::RouteEntryImpl::hedge_policy_;\nconst AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::RouteEntryImpl::rate_limit_policy_;\nconst AsyncStreamImpl::NullRetryPolicy AsyncStreamImpl::RouteEntryImpl::retry_policy_;\nconst Router::InternalRedirectPolicyImpl AsyncStreamImpl::RouteEntryImpl::internal_redirect_policy_;\nconst std::vector<Router::ShadowPolicyPtr> AsyncStreamImpl::RouteEntryImpl::shadow_policies_;\nconst AsyncStreamImpl::NullVirtualHost AsyncStreamImpl::RouteEntryImpl::virtual_host_;\nconst AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::NullVirtualHost::rate_limit_policy_;\nconst AsyncStreamImpl::NullConfig AsyncStreamImpl::NullVirtualHost::route_configuration_;\nconst std::multimap<std::string, std::string> AsyncStreamImpl::RouteEntryImpl::opaque_config_;\nconst envoy::config::core::v3::Metadata AsyncStreamImpl::RouteEntryImpl::metadata_;\nconst Config::TypedMetadataImpl<Envoy::Config::TypedMetadataFactory>\n    AsyncStreamImpl::RouteEntryImpl::typed_metadata_({});\nconst AsyncStreamImpl::NullPathMatchCriterion\n    AsyncStreamImpl::RouteEntryImpl::path_match_criterion_;\nconst absl::optional<envoy::config::route::v3::RouteAction::UpgradeConfig::ConnectConfig>\n    AsyncStreamImpl::RouteEntryImpl::connect_config_nullopt_;\nconst std::list<LowerCaseString> AsyncStreamImpl::NullConfig::internal_only_headers_;\n\nAsyncClientImpl::AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster,\n                                 Stats::Store& stats_store, Event::Dispatcher& dispatcher,\n                                 const LocalInfo::LocalInfo& local_info,\n                                 Upstream::ClusterManager& cm, Runtime::Loader& runtime,\n                                 Random::RandomGenerator& random,\n                                 Router::ShadowWriterPtr&& shadow_writer,\n                                 Http::Context& http_context)\n    : cluster_(cluster), config_(\"http.async-client.\", local_info, stats_store, cm, runtime, random,\n                                 std::move(shadow_writer), true, false, false, false, {},\n                                 dispatcher.timeSource(), http_context),\n      dispatcher_(dispatcher) {}\n\nAsyncClientImpl::~AsyncClientImpl() {\n  while (!active_streams_.empty()) {\n    active_streams_.front()->reset();\n  }\n}\n\nAsyncClient::Request* AsyncClientImpl::send(RequestMessagePtr&& request,\n                                            AsyncClient::Callbacks& callbacks,\n                                            const AsyncClient::RequestOptions& options) {\n  AsyncRequestImpl* async_request =\n      new AsyncRequestImpl(std::move(request), *this, callbacks, options);\n  async_request->initialize();\n  std::unique_ptr<AsyncStreamImpl> new_request{async_request};\n\n  // The request may get immediately failed. If so, we will return nullptr.\n  if (!new_request->remote_closed_) {\n    LinkedList::moveIntoList(std::move(new_request), active_streams_);\n    return async_request;\n  } else {\n    new_request->cleanup();\n    return nullptr;\n  }\n}\n\nAsyncClient::Stream* AsyncClientImpl::start(AsyncClient::StreamCallbacks& callbacks,\n                                            const AsyncClient::StreamOptions& options) {\n  std::unique_ptr<AsyncStreamImpl> new_stream{new AsyncStreamImpl(*this, callbacks, options)};\n  LinkedList::moveIntoList(std::move(new_stream), active_streams_);\n  return active_streams_.front().get();\n}\n\nAsyncStreamImpl::AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCallbacks& callbacks,\n                                 const AsyncClient::StreamOptions& options)\n    : parent_(parent), stream_callbacks_(callbacks), stream_id_(parent.config_.random_.random()),\n      router_(parent.config_), stream_info_(Protocol::Http11, parent.dispatcher().timeSource()),\n      tracing_config_(Tracing::EgressConfig::get()),\n      route_(std::make_shared<RouteImpl>(parent_.cluster_->name(), options.timeout,\n                                         options.hash_policy)),\n      send_xff_(options.send_xff) {\n  if (options.buffer_body_for_retry) {\n    buffered_body_ = std::make_unique<Buffer::OwnedImpl>();\n  }\n\n  router_.setDecoderFilterCallbacks(*this);\n  // TODO(mattklein123): Correctly set protocol in stream info when we support access logging.\n}\n\nvoid AsyncStreamImpl::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream,\n                                    absl::string_view) {\n  ENVOY_LOG(debug, \"async http request response headers (end_stream={}):\\n{}\", end_stream,\n            *headers);\n  ASSERT(!remote_closed_);\n  encoded_response_headers_ = true;\n  stream_callbacks_.onHeaders(std::move(headers), end_stream);\n  closeRemote(end_stream);\n  // At present, the router cleans up stream state as soon as the remote is closed, making a\n  // half-open local stream unsupported and dangerous. Ensure we close locally to trigger completion\n  // and keep things consistent. Another option would be to issue a stream reset here if local isn't\n  // yet closed, triggering cleanup along a more standardized path. However, this would require\n  // additional logic to handle the response completion and subsequent reset, and run the risk of\n  // being interpreted as a failure, when in fact no error has necessarily occurred. Gracefully\n  // closing seems most in-line with behavior elsewhere in Envoy for now.\n  closeLocal(end_stream);\n}\n\nvoid AsyncStreamImpl::encodeData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_LOG(trace, \"async http request response data (length={} end_stream={})\", data.length(),\n            end_stream);\n  ASSERT(!remote_closed_);\n  stream_callbacks_.onData(data, end_stream);\n  closeRemote(end_stream);\n  // Ensure we close locally on receiving a complete response; see comment in encodeHeaders for\n  // rationale.\n  closeLocal(end_stream);\n}\n\nvoid AsyncStreamImpl::encodeTrailers(ResponseTrailerMapPtr&& trailers) {\n  ENVOY_LOG(debug, \"async http request response trailers:\\n{}\", *trailers);\n  ASSERT(!remote_closed_);\n  stream_callbacks_.onTrailers(std::move(trailers));\n  closeRemote(true);\n  // Ensure we close locally on receiving a complete response; see comment in encodeHeaders for\n  // rationale.\n  closeLocal(true);\n}\n\nvoid AsyncStreamImpl::sendHeaders(RequestHeaderMap& headers, bool end_stream) {\n  if (Http::Headers::get().MethodValues.Head == headers.getMethodValue()) {\n    is_head_request_ = true;\n  }\n\n  is_grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers);\n  headers.setReferenceEnvoyInternalRequest(Headers::get().EnvoyInternalRequestValues.True);\n  if (send_xff_) {\n    Utility::appendXff(headers, *parent_.config_.local_info_.address());\n  }\n  router_.decodeHeaders(headers, end_stream);\n  closeLocal(end_stream);\n}\n\nvoid AsyncStreamImpl::sendData(Buffer::Instance& data, bool end_stream) {\n  // Map send calls after local closure to no-ops. The send call could have been queued prior to\n  // remote reset or closure, and/or closure could have occurred synchronously in response to a\n  // previous send. In these cases the router will have already cleaned up stream state. This\n  // parallels handling in the main Http::ConnectionManagerImpl as well.\n  if (local_closed_) {\n    return;\n  }\n\n  // TODO(mattklein123): We trust callers currently to not do anything insane here if they set up\n  // buffering on an async client call. We should potentially think about limiting the size of\n  // buffering that we allow here.\n  if (buffered_body_ != nullptr) {\n    buffered_body_->add(data);\n  }\n\n  router_.decodeData(data, end_stream);\n  closeLocal(end_stream);\n}\n\nvoid AsyncStreamImpl::sendTrailers(RequestTrailerMap& trailers) {\n  // See explanation in sendData.\n  if (local_closed_) {\n    return;\n  }\n\n  router_.decodeTrailers(trailers);\n  closeLocal(true);\n}\n\nvoid AsyncStreamImpl::closeLocal(bool end_stream) {\n  // This guard ensures that we don't attempt to clean up a stream or fire a completion callback\n  // for a stream that has already been closed. Both send* calls and resets can result in stream\n  // closure, and this state may be updated synchronously during stream interaction and callbacks.\n  // Additionally AsyncRequestImpl maintains behavior wherein its onComplete callback will fire\n  // immediately upon receiving a complete response, regardless of whether it has finished sending\n  // a request.\n  // Previous logic treated post-closure entry here as more-or-less benign (providing later-stage\n  // guards against redundant cleanup), but to surface consistent stream state via callbacks,\n  // it's necessary to be more rigorous.\n  // TODO(goaway): Consider deeper cleanup of assumptions here.\n  if (local_closed_) {\n    return;\n  }\n\n  local_closed_ = end_stream;\n  if (complete()) {\n    stream_callbacks_.onComplete();\n    cleanup();\n  }\n}\n\nvoid AsyncStreamImpl::closeRemote(bool end_stream) {\n  // This guard ensures that we don't attempt to clean up a stream or fire a completion callback for\n  // a stream that has already been closed. This function is called synchronously after callbacks\n  // have executed, and it's possible for callbacks to, for instance, directly reset a stream or\n  // close the remote manually. The test case ResetInOnHeaders covers this case specifically.\n  // Previous logic treated post-closure entry here as more-or-less benign (providing later-stage\n  // guards against redundant cleanup), but to surface consistent stream state via callbacks, it's\n  // necessary to be more rigorous.\n  // TODO(goaway): Consider deeper cleanup of assumptions here.\n  if (remote_closed_) {\n    return;\n  }\n\n  remote_closed_ = end_stream;\n  if (complete()) {\n    stream_callbacks_.onComplete();\n    cleanup();\n  }\n}\n\nvoid AsyncStreamImpl::reset() {\n  router_.onDestroy();\n  resetStream();\n}\n\nvoid AsyncStreamImpl::cleanup() {\n  local_closed_ = remote_closed_ = true;\n  // This will destroy us, but only do so if we are actually in a list. This does not happen in\n  // the immediate failure case.\n  if (inserted()) {\n    dispatcher().deferredDelete(removeFromList(parent_.active_streams_));\n  }\n}\n\nvoid AsyncStreamImpl::resetStream() {\n  stream_callbacks_.onReset();\n  cleanup();\n}\n\nAsyncRequestImpl::AsyncRequestImpl(RequestMessagePtr&& request, AsyncClientImpl& parent,\n                                   AsyncClient::Callbacks& callbacks,\n                                   const AsyncClient::RequestOptions& options)\n    : AsyncStreamImpl(parent, *this, options), request_(std::move(request)), callbacks_(callbacks) {\n  if (nullptr != options.parent_span_) {\n    const std::string child_span_name =\n        options.child_span_name_.empty()\n            ? absl::StrCat(\"async \", parent.cluster_->name(), \" egress\")\n            : options.child_span_name_;\n    child_span_ = options.parent_span_->spawnChild(Tracing::EgressConfig::get(), child_span_name,\n                                                   parent.dispatcher().timeSource().systemTime());\n  } else {\n    child_span_ = std::make_unique<Tracing::NullSpan>();\n  }\n  child_span_->setSampled(options.sampled_);\n}\n\nvoid AsyncRequestImpl::initialize() {\n  child_span_->injectContext(request_->headers());\n  sendHeaders(request_->headers(), request_->body().length() == 0);\n  if (request_->body().length() != 0) {\n    // It's possible this will be a no-op due to a local response synchronously generated in\n    // sendHeaders; guards handle this within AsyncStreamImpl.\n    sendData(request_->body(), true);\n  }\n  // TODO(mattklein123): Support request trailers.\n}\n\nvoid AsyncRequestImpl::onComplete() {\n  callbacks_.onBeforeFinalizeUpstreamSpan(*child_span_, &response_->headers());\n\n  Tracing::HttpTracerUtility::finalizeUpstreamSpan(*child_span_, &response_->headers(),\n                                                   response_->trailers(), streamInfo(),\n                                                   Tracing::EgressConfig::get());\n\n  callbacks_.onSuccess(*this, std::move(response_));\n}\n\nvoid AsyncRequestImpl::onHeaders(ResponseHeaderMapPtr&& headers, bool) {\n  const uint64_t response_code = Http::Utility::getResponseStatus(*headers);\n  streamInfo().response_code_ = response_code;\n  response_ = std::make_unique<ResponseMessageImpl>(std::move(headers));\n}\n\nvoid AsyncRequestImpl::onData(Buffer::Instance& data, bool) {\n  streamInfo().addBytesReceived(data.length());\n  response_->body().move(data);\n}\n\nvoid AsyncRequestImpl::onTrailers(ResponseTrailerMapPtr&& trailers) {\n  response_->trailers(std::move(trailers));\n}\n\nvoid AsyncRequestImpl::onReset() {\n  if (!cancelled_) {\n    // Set \"error reason\" tag related to reset. The tagging for \"error true\" is done inside the\n    // Tracing::HttpTracerUtility::finalizeUpstreamSpan.\n    child_span_->setTag(Tracing::Tags::get().ErrorReason, \"Reset\");\n  }\n\n  callbacks_.onBeforeFinalizeUpstreamSpan(*child_span_,\n                                          remoteClosed() ? &response_->headers() : nullptr);\n\n  // Finalize the span based on whether we received a response or not.\n  Tracing::HttpTracerUtility::finalizeUpstreamSpan(\n      *child_span_, remoteClosed() ? &response_->headers() : nullptr,\n      remoteClosed() ? response_->trailers() : nullptr, streamInfo(), Tracing::EgressConfig::get());\n\n  if (!cancelled_) {\n    // In this case we don't have a valid response so we do need to raise a failure.\n    callbacks_.onFailure(*this, AsyncClient::FailureReason::Reset);\n  }\n}\n\nvoid AsyncRequestImpl::cancel() {\n  cancelled_ = true;\n\n  // Add tags about the cancellation.\n  child_span_->setTag(Tracing::Tags::get().Canceled, Tracing::Tags::get().True);\n\n  reset();\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/async_client_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/scope_tracker.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/context.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/message.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/router/router_ratelimit.h\"\n#include \"envoy/router/shadow_writer.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/ssl/connection.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/router/router.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass AsyncStreamImpl;\nclass AsyncRequestImpl;\n\nclass AsyncClientImpl final : public AsyncClient {\npublic:\n  AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, Stats::Store& stats_store,\n                  Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info,\n                  Upstream::ClusterManager& cm, Runtime::Loader& runtime,\n                  Random::RandomGenerator& random, Router::ShadowWriterPtr&& shadow_writer,\n                  Http::Context& http_context);\n  ~AsyncClientImpl() override;\n\n  // Http::AsyncClient\n  Request* send(RequestMessagePtr&& request, Callbacks& callbacks,\n                const AsyncClient::RequestOptions& options) override;\n  Stream* start(StreamCallbacks& callbacks, const AsyncClient::StreamOptions& options) override;\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n\nprivate:\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  Router::FilterConfig config_;\n  Event::Dispatcher& dispatcher_;\n  std::list<std::unique_ptr<AsyncStreamImpl>> active_streams_;\n\n  friend class AsyncStreamImpl;\n  friend class AsyncRequestImpl;\n};\n\n/**\n * Implementation of AsyncRequest. This implementation is capable of sending HTTP requests to a\n * ConnectionPool asynchronously.\n */\nclass AsyncStreamImpl : public AsyncClient::Stream,\n                        public StreamDecoderFilterCallbacks,\n                        public Event::DeferredDeletable,\n                        Logger::Loggable<Logger::Id::http>,\n                        public LinkedObject<AsyncStreamImpl>,\n                        public ScopeTrackedObject {\npublic:\n  AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCallbacks& callbacks,\n                  const AsyncClient::StreamOptions& options);\n\n  // Http::StreamDecoderFilterCallbacks\n  void requestRouteConfigUpdate(Http::RouteConfigUpdatedCallbackSharedPtr) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  // Http::AsyncClient::Stream\n  void sendHeaders(RequestHeaderMap& headers, bool end_stream) override;\n  void sendData(Buffer::Instance& data, bool end_stream) override;\n  void sendTrailers(RequestTrailerMap& trailers) override;\n  void reset() override;\n  bool isAboveWriteBufferHighWatermark() const override { return high_watermark_calls_ > 0; }\n\nprotected:\n  bool remoteClosed() { return remote_closed_; }\n  void closeLocal(bool end_stream);\n  StreamInfo::StreamInfoImpl& streamInfo() override { return stream_info_; }\n\n  AsyncClientImpl& parent_;\n\nprivate:\n  struct NullHedgePolicy : public Router::HedgePolicy {\n    // Router::HedgePolicy\n    uint32_t initialRequests() const override { return 1; }\n    const envoy::type::v3::FractionalPercent& additionalRequestChance() const override {\n      return additional_request_chance_;\n    }\n    bool hedgeOnPerTryTimeout() const override { return false; }\n\n    const envoy::type::v3::FractionalPercent additional_request_chance_;\n  };\n\n  struct NullRateLimitPolicy : public Router::RateLimitPolicy {\n    // Router::RateLimitPolicy\n    const std::vector<std::reference_wrapper<const Router::RateLimitPolicyEntry>>&\n    getApplicableRateLimit(uint64_t) const override {\n      return rate_limit_policy_entry_;\n    }\n    bool empty() const override { return true; }\n\n    static const std::vector<std::reference_wrapper<const Router::RateLimitPolicyEntry>>\n        rate_limit_policy_entry_;\n  };\n\n  struct NullRetryPolicy : public Router::RetryPolicy {\n    // Router::RetryPolicy\n    std::chrono::milliseconds perTryTimeout() const override {\n      return std::chrono::milliseconds(0);\n    }\n    std::vector<Upstream::RetryHostPredicateSharedPtr> retryHostPredicates() const override {\n      return {};\n    }\n    Upstream::RetryPrioritySharedPtr retryPriority() const override { return {}; }\n\n    uint32_t hostSelectionMaxAttempts() const override { return 1; }\n    uint32_t numRetries() const override { return 1; }\n    uint32_t retryOn() const override { return 0; }\n    const std::vector<uint32_t>& retriableStatusCodes() const override {\n      return retriable_status_codes_;\n    }\n    const std::vector<Http::HeaderMatcherSharedPtr>& retriableHeaders() const override {\n      return retriable_headers_;\n    }\n    const std::vector<Http::HeaderMatcherSharedPtr>& retriableRequestHeaders() const override {\n      return retriable_request_headers_;\n    }\n    absl::optional<std::chrono::milliseconds> baseInterval() const override {\n      return absl::nullopt;\n    }\n    absl::optional<std::chrono::milliseconds> maxInterval() const override { return absl::nullopt; }\n    const std::vector<Router::ResetHeaderParserSharedPtr>& resetHeaders() const override {\n      return reset_headers_;\n    }\n    std::chrono::milliseconds resetMaxInterval() const override {\n      return std::chrono::milliseconds(300000);\n    }\n\n    const std::vector<uint32_t> retriable_status_codes_{};\n    const std::vector<Http::HeaderMatcherSharedPtr> retriable_headers_{};\n    const std::vector<Http::HeaderMatcherSharedPtr> retriable_request_headers_{};\n    const std::vector<Router::ResetHeaderParserSharedPtr> reset_headers_{};\n  };\n\n  struct NullConfig : public Router::Config {\n    Router::RouteConstSharedPtr route(const Http::RequestHeaderMap&, const StreamInfo::StreamInfo&,\n                                      uint64_t) const override {\n      return nullptr;\n    }\n\n    Router::RouteConstSharedPtr route(const Router::RouteCallback&, const Http::RequestHeaderMap&,\n                                      const StreamInfo::StreamInfo&, uint64_t) const override {\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n\n    const std::list<LowerCaseString>& internalOnlyHeaders() const override {\n      return internal_only_headers_;\n    }\n\n    const std::string& name() const override { return EMPTY_STRING; }\n    bool usesVhds() const override { return false; }\n    bool mostSpecificHeaderMutationsWins() const override { return false; }\n\n    static const std::list<LowerCaseString> internal_only_headers_;\n  };\n\n  struct NullVirtualHost : public Router::VirtualHost {\n    // Router::VirtualHost\n    Stats::StatName statName() const override { return {}; }\n    const Router::RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; }\n    const Router::CorsPolicy* corsPolicy() const override { return nullptr; }\n    const Router::Config& routeConfig() const override { return route_configuration_; }\n    const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override {\n      return nullptr;\n    }\n    bool includeAttemptCountInRequest() const override { return false; }\n    bool includeAttemptCountInResponse() const override { return false; }\n    uint32_t retryShadowBufferLimit() const override {\n      return std::numeric_limits<uint32_t>::max();\n    }\n    static const NullRateLimitPolicy rate_limit_policy_;\n    static const NullConfig route_configuration_;\n  };\n\n  struct NullPathMatchCriterion : public Router::PathMatchCriterion {\n    Router::PathMatchType matchType() const override { return Router::PathMatchType::None; }\n    const std::string& matcher() const override { return EMPTY_STRING; }\n  };\n\n  struct RouteEntryImpl : public Router::RouteEntry {\n    RouteEntryImpl(\n        const std::string& cluster_name, const absl::optional<std::chrono::milliseconds>& timeout,\n        const Protobuf::RepeatedPtrField<envoy::config::route::v3::RouteAction::HashPolicy>&\n            hash_policy)\n        : cluster_name_(cluster_name), timeout_(timeout) {\n      if (!hash_policy.empty()) {\n        hash_policy_ = std::make_unique<HashPolicyImpl>(hash_policy);\n      }\n    }\n\n    // Router::RouteEntry\n    const std::string& clusterName() const override { return cluster_name_; }\n    Http::Code clusterNotFoundResponseCode() const override {\n      return Http::Code::InternalServerError;\n    }\n    const Router::CorsPolicy* corsPolicy() const override { return nullptr; }\n    void finalizeRequestHeaders(Http::RequestHeaderMap&, const StreamInfo::StreamInfo&,\n                                bool) const override {}\n    void finalizeResponseHeaders(Http::ResponseHeaderMap&,\n                                 const StreamInfo::StreamInfo&) const override {}\n    const HashPolicy* hashPolicy() const override { return hash_policy_.get(); }\n    const Router::HedgePolicy& hedgePolicy() const override { return hedge_policy_; }\n    const Router::MetadataMatchCriteria* metadataMatchCriteria() const override { return nullptr; }\n    Upstream::ResourcePriority priority() const override {\n      return Upstream::ResourcePriority::Default;\n    }\n    const Router::RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; }\n    const Router::RetryPolicy& retryPolicy() const override { return retry_policy_; }\n    const Router::InternalRedirectPolicy& internalRedirectPolicy() const override {\n      return internal_redirect_policy_;\n    }\n    uint32_t retryShadowBufferLimit() const override {\n      return std::numeric_limits<uint32_t>::max();\n    }\n    const std::vector<Router::ShadowPolicyPtr>& shadowPolicies() const override {\n      return shadow_policies_;\n    }\n    std::chrono::milliseconds timeout() const override {\n      if (timeout_) {\n        return timeout_.value();\n      } else {\n        return std::chrono::milliseconds(0);\n      }\n    }\n    absl::optional<std::chrono::milliseconds> idleTimeout() const override { return absl::nullopt; }\n    absl::optional<std::chrono::milliseconds> maxStreamDuration() const override {\n      return absl::nullopt;\n    }\n    absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderMax() const override {\n      return absl::nullopt;\n    }\n    absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderOffset() const override {\n      return absl::nullopt;\n    }\n    absl::optional<std::chrono::milliseconds> maxGrpcTimeout() const override {\n      return absl::nullopt;\n    }\n    absl::optional<std::chrono::milliseconds> grpcTimeoutOffset() const override {\n      return absl::nullopt;\n    }\n    const Router::VirtualCluster* virtualCluster(const Http::HeaderMap&) const override {\n      return nullptr;\n    }\n    const Router::TlsContextMatchCriteria* tlsContextMatchCriteria() const override {\n      return nullptr;\n    }\n    const std::multimap<std::string, std::string>& opaqueConfig() const override {\n      return opaque_config_;\n    }\n    const Router::VirtualHost& virtualHost() const override { return virtual_host_; }\n    bool autoHostRewrite() const override { return false; }\n    bool includeVirtualHostRateLimits() const override { return true; }\n    const envoy::config::core::v3::Metadata& metadata() const override { return metadata_; }\n    const Config::TypedMetadata& typedMetadata() const override { return typed_metadata_; }\n    const Router::PathMatchCriterion& pathMatchCriterion() const override {\n      return path_match_criterion_;\n    }\n\n    const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override {\n      return nullptr;\n    }\n    const absl::optional<ConnectConfig>& connectConfig() const override {\n      return connect_config_nullopt_;\n    }\n\n    bool includeAttemptCountInRequest() const override { return false; }\n    bool includeAttemptCountInResponse() const override { return false; }\n    const Router::RouteEntry::UpgradeMap& upgradeMap() const override { return upgrade_map_; }\n    const std::string& routeName() const override { return route_name_; }\n    std::unique_ptr<const HashPolicyImpl> hash_policy_;\n    static const NullHedgePolicy hedge_policy_;\n    static const NullRateLimitPolicy rate_limit_policy_;\n    static const NullRetryPolicy retry_policy_;\n    static const Router::InternalRedirectPolicyImpl internal_redirect_policy_;\n    static const std::vector<Router::ShadowPolicyPtr> shadow_policies_;\n    static const NullVirtualHost virtual_host_;\n    static const std::multimap<std::string, std::string> opaque_config_;\n    static const envoy::config::core::v3::Metadata metadata_;\n    // Async client doesn't require metadata.\n    static const Config::TypedMetadataImpl<Config::TypedMetadataFactory> typed_metadata_;\n    static const NullPathMatchCriterion path_match_criterion_;\n\n    Router::RouteEntry::UpgradeMap upgrade_map_;\n    const std::string& cluster_name_;\n    absl::optional<std::chrono::milliseconds> timeout_;\n    static const absl::optional<ConnectConfig> connect_config_nullopt_;\n    const std::string route_name_;\n  };\n\n  struct RouteImpl : public Router::Route {\n    RouteImpl(const std::string& cluster_name,\n              const absl::optional<std::chrono::milliseconds>& timeout,\n              const Protobuf::RepeatedPtrField<envoy::config::route::v3::RouteAction::HashPolicy>&\n                  hash_policy)\n        : route_entry_(cluster_name, timeout, hash_policy) {}\n\n    // Router::Route\n    const Router::DirectResponseEntry* directResponseEntry() const override { return nullptr; }\n    const Router::RouteEntry* routeEntry() const override { return &route_entry_; }\n    const Router::Decorator* decorator() const override { return nullptr; }\n    const Router::RouteTracing* tracingConfig() const override { return nullptr; }\n    const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override {\n      return nullptr;\n    }\n\n    RouteEntryImpl route_entry_;\n  };\n\n  void cleanup();\n  void closeRemote(bool end_stream);\n  bool complete() { return local_closed_ && remote_closed_; }\n\n  // Http::StreamDecoderFilterCallbacks\n  const Network::Connection* connection() override { return nullptr; }\n  Event::Dispatcher& dispatcher() override { return parent_.dispatcher_; }\n  void resetStream() override;\n  Router::RouteConstSharedPtr route() override { return route_; }\n  Router::RouteConstSharedPtr route(const Router::RouteCallback&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  Upstream::ClusterInfoConstSharedPtr clusterInfo() override { return parent_.cluster_; }\n  void clearRouteCache() override {}\n  uint64_t streamId() const override { return stream_id_; }\n  Tracing::Span& activeSpan() override { return active_span_; }\n  const Tracing::Config& tracingConfig() override { return tracing_config_; }\n  void continueDecoding() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  RequestTrailerMap& addDecodedTrailers() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  void addDecodedData(Buffer::Instance&, bool) override {\n    // This should only be called if the user has set up buffering. The request is already fully\n    // buffered. Note that this is only called via the async client's internal use of the router\n    // filter which uses this function for buffering.\n    ASSERT(buffered_body_ != nullptr);\n  }\n  MetadataMapVector& addDecodedMetadata() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  void injectDecodedDataToFilterChain(Buffer::Instance&, bool) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  const Buffer::Instance* decodingBuffer() override { return buffered_body_.get(); }\n  void modifyDecodingBuffer(std::function<void(Buffer::Instance&)>) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  void sendLocalReply(Code code, absl::string_view body,\n                      std::function<void(ResponseHeaderMap& headers)> modify_headers,\n                      const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                      absl::string_view details) override {\n    if (encoded_response_headers_) {\n      resetStream();\n      return;\n    }\n    Utility::sendLocalReply(\n        remote_closed_,\n        Utility::EncodeFunctions{nullptr, nullptr,\n                                 [this, modify_headers, &details](ResponseHeaderMapPtr&& headers,\n                                                                  bool end_stream) -> void {\n                                   if (modify_headers != nullptr) {\n                                     modify_headers(*headers);\n                                   }\n                                   encodeHeaders(std::move(headers), end_stream, details);\n                                 },\n                                 [this](Buffer::Instance& data, bool end_stream) -> void {\n                                   encodeData(data, end_stream);\n                                 }},\n        Utility::LocalReplyData{is_grpc_request_, code, body, grpc_status, is_head_request_});\n  }\n  // The async client won't pause if sending an Expect: 100-Continue so simply\n  // swallows any incoming encode100Continue.\n  void encode100ContinueHeaders(ResponseHeaderMapPtr&&) override {}\n  void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream,\n                     absl::string_view details) override;\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void encodeTrailers(ResponseTrailerMapPtr&& trailers) override;\n  void encodeMetadata(MetadataMapPtr&&) override {}\n  void onDecoderFilterAboveWriteBufferHighWatermark() override { ++high_watermark_calls_; }\n  void onDecoderFilterBelowWriteBufferLowWatermark() override {\n    ASSERT(high_watermark_calls_ != 0);\n    --high_watermark_calls_;\n  }\n  void addDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks&) override {}\n  void removeDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks&) override {}\n  void setDecoderBufferLimit(uint32_t) override {}\n  uint32_t decoderBufferLimit() override { return 0; }\n  bool recreateStream() override { return false; }\n  const ScopeTrackedObject& scope() override { return *this; }\n  void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr&) override {}\n  Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override { return {}; }\n\n  // ScopeTrackedObject\n  void dumpState(std::ostream& os, int indent_level) const override {\n    const char* spaces = spacesForLevel(indent_level);\n    os << spaces << \"AsyncClient \" << this << DUMP_MEMBER(stream_id_) << \"\\n\";\n    DUMP_DETAILS(&stream_info_);\n  }\n\n  AsyncClient::StreamCallbacks& stream_callbacks_;\n  const uint64_t stream_id_;\n  Router::ProdFilter router_;\n  StreamInfo::StreamInfoImpl stream_info_;\n  Tracing::NullSpan active_span_;\n  const Tracing::Config& tracing_config_;\n  std::shared_ptr<RouteImpl> route_;\n  uint32_t high_watermark_calls_{};\n  bool local_closed_{};\n  bool remote_closed_{};\n  Buffer::InstancePtr buffered_body_;\n  bool encoded_response_headers_{};\n  bool is_grpc_request_{};\n  bool is_head_request_{false};\n  bool send_xff_{true};\n\n  friend class AsyncClientImpl;\n  friend class AsyncClientImplUnitTest;\n};\n\nclass AsyncRequestImpl final : public AsyncClient::Request,\n                               AsyncStreamImpl,\n                               AsyncClient::StreamCallbacks {\npublic:\n  AsyncRequestImpl(RequestMessagePtr&& request, AsyncClientImpl& parent,\n                   AsyncClient::Callbacks& callbacks, const AsyncClient::RequestOptions& options);\n\n  // AsyncClient::Request\n  void cancel() override;\n\nprivate:\n  void initialize();\n\n  // AsyncClient::StreamCallbacks\n  void onHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override;\n  void onData(Buffer::Instance& data, bool end_stream) override;\n  void onTrailers(ResponseTrailerMapPtr&& trailers) override;\n  void onComplete() override;\n  void onReset() override;\n\n  // Http::StreamDecoderFilterCallbacks\n  void addDecodedData(Buffer::Instance&, bool) override {\n    // The request is already fully buffered. Note that this is only called via the async client's\n    // internal use of the router filter which uses this function for buffering.\n  }\n  const Buffer::Instance* decodingBuffer() override { return &request_->body(); }\n  void modifyDecodingBuffer(std::function<void(Buffer::Instance&)>) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  RequestMessagePtr request_;\n  AsyncClient::Callbacks& callbacks_;\n  std::unique_ptr<ResponseMessageImpl> response_;\n  bool cancelled_{};\n  Tracing::SpanPtr child_span_;\n\n  friend class AsyncClientImpl;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/async_client_utility.cc",
    "content": "#include \"common/http/async_client_utility.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nAsyncClientRequestTracker::~AsyncClientRequestTracker() {\n  for (auto* active_request : active_requests_) {\n    active_request->cancel();\n  }\n}\n\nvoid AsyncClientRequestTracker::add(AsyncClient::Request& request) {\n  ASSERT(active_requests_.find(&request) == active_requests_.end(), \"request is already tracked.\");\n  active_requests_.insert(&request);\n}\n\nvoid AsyncClientRequestTracker::remove(const AsyncClient::Request& request) {\n  // Notice that use of \"const_cast\" here is motivated by keeping API convenient for client code.\n  // In the context where remove() will be typically called, request.cancel() is no longer\n  // desirable and therefore get prevented by means of \"const\" modifier.\n  auto it = active_requests_.find(const_cast<AsyncClient::Request*>(&request));\n  // Support a use case where request callbacks might get called prior to a request handle\n  // is returned from AsyncClient::send().\n  if (it != active_requests_.end()) {\n    active_requests_.erase(it);\n  }\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/async_client_utility.h",
    "content": "#pragma once\n\n#include \"envoy/http/async_client.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Keeps track of active async HTTP requests to be able to cancel them on destruction.\n */\nclass AsyncClientRequestTracker {\npublic:\n  /**\n   * Cancels all known active async HTTP requests.\n   */\n  ~AsyncClientRequestTracker();\n  /**\n   * Includes a given async HTTP request into a set of known active requests.\n   * @param request request handle\n   */\n  void add(AsyncClient::Request& request);\n  /**\n   * Excludes a given async HTTP request from a set of known active requests.\n   *\n   * NOTE: Asymmetry between signatures of add() and remove() is caused by the difference\n   *       between contexts in which these methods will be used.\n   *       add() will be called right after AsyncClient::send() when request.cancel() is\n   *       perfectly valid and desirable.\n   *       However, remove() will be called in the context of\n   *       AsyncClient::Callbacks::[onSuccess | onFailure] where request.cancel() is no longer\n   *       expected and therefore get prevented by means of \"const\" modifier.\n   *\n   * @param request request handle\n   */\n  void remove(const AsyncClient::Request& request);\n\nprivate:\n  // Track active async HTTP requests to be able to cancel them on destruction.\n  absl::flat_hash_set<AsyncClient::Request*> active_requests_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/codec_client.cc",
    "content": "#include \"common/http/codec_client.h\"\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/http/codec.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http1/codec_impl_legacy.h\"\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/http/http2/codec_impl_legacy.h\"\n#include \"common/http/http3/quic_codec_factory.h\"\n#include \"common/http/http3/well_known_names.h\"\n#include \"common/http/status.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/runtime/runtime_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nCodecClient::CodecClient(Type type, Network::ClientConnectionPtr&& connection,\n                         Upstream::HostDescriptionConstSharedPtr host,\n                         Event::Dispatcher& dispatcher)\n    : type_(type), host_(host), connection_(std::move(connection)),\n      idle_timeout_(host_->cluster().idleTimeout()) {\n  if (type_ != Type::HTTP3) {\n    // Make sure upstream connections process data and then the FIN, rather than processing\n    // TCP disconnects immediately. (see https://github.com/envoyproxy/envoy/issues/1679 for\n    // details)\n    connection_->detectEarlyCloseWhenReadDisabled(false);\n  }\n  connection_->addConnectionCallbacks(*this);\n  connection_->addReadFilter(Network::ReadFilterSharedPtr{new CodecReadFilter(*this)});\n\n  ENVOY_CONN_LOG(debug, \"connecting\", *connection_);\n  connection_->connect();\n\n  if (idle_timeout_) {\n    idle_timer_ = dispatcher.createTimer([this]() -> void { onIdleTimeout(); });\n    enableIdleTimer();\n  }\n\n  // We just universally set no delay on connections. Theoretically we might at some point want\n  // to make this configurable.\n  connection_->noDelay(true);\n}\n\nCodecClient::~CodecClient() = default;\n\nvoid CodecClient::close() { connection_->close(Network::ConnectionCloseType::NoFlush); }\n\nvoid CodecClient::deleteRequest(ActiveRequest& request) {\n  connection_->dispatcher().deferredDelete(request.removeFromList(active_requests_));\n  if (codec_client_callbacks_) {\n    codec_client_callbacks_->onStreamDestroy();\n  }\n  if (numActiveRequests() == 0) {\n    enableIdleTimer();\n  }\n}\n\nRequestEncoder& CodecClient::newStream(ResponseDecoder& response_decoder) {\n  ActiveRequestPtr request(new ActiveRequest(*this, response_decoder));\n  request->encoder_ = &codec_->newStream(*request);\n  request->encoder_->getStream().addCallbacks(*request);\n  LinkedList::moveIntoList(std::move(request), active_requests_);\n  disableIdleTimer();\n  return *active_requests_.front()->encoder_;\n}\n\nvoid CodecClient::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::Connected) {\n    ENVOY_CONN_LOG(debug, \"connected\", *connection_);\n    connection_->streamInfo().setDownstreamSslConnection(connection_->ssl());\n    connected_ = true;\n  }\n\n  if (event == Network::ConnectionEvent::RemoteClose) {\n    remote_closed_ = true;\n  }\n\n  // HTTP/1 can signal end of response by disconnecting. We need to handle that case.\n  if (type_ == Type::HTTP1 && event == Network::ConnectionEvent::RemoteClose &&\n      !active_requests_.empty()) {\n    Buffer::OwnedImpl empty;\n    onData(empty);\n  }\n\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    ENVOY_CONN_LOG(debug, \"disconnect. resetting {} pending requests\", *connection_,\n                   active_requests_.size());\n    disableIdleTimer();\n    idle_timer_.reset();\n    while (!active_requests_.empty()) {\n      // Fake resetting all active streams so that reset() callbacks get invoked.\n      active_requests_.front()->encoder_->getStream().resetStream(\n          connected_ ? StreamResetReason::ConnectionTermination\n                     : StreamResetReason::ConnectionFailure);\n    }\n  }\n}\n\nvoid CodecClient::responseDecodeComplete(ActiveRequest& request) {\n  ENVOY_CONN_LOG(debug, \"response complete\", *connection_);\n  deleteRequest(request);\n\n  // HTTP/2 can send us a reset after a complete response if the request was not complete. Users\n  // of CodecClient will deal with the premature response case and we should not handle any\n  // further reset notification.\n  request.encoder_->getStream().removeCallbacks(request);\n}\n\nvoid CodecClient::onReset(ActiveRequest& request, StreamResetReason reason) {\n  ENVOY_CONN_LOG(debug, \"request reset\", *connection_);\n  if (codec_client_callbacks_) {\n    codec_client_callbacks_->onStreamReset(reason);\n  }\n\n  deleteRequest(request);\n}\n\nvoid CodecClient::onData(Buffer::Instance& data) {\n  bool protocol_error = false;\n  const Status status = codec_->dispatch(data);\n\n  if (isCodecProtocolError(status)) {\n    ENVOY_CONN_LOG(debug, \"protocol error: {}\", *connection_, status.message());\n    close();\n    protocol_error = true;\n  } else if (isPrematureResponseError(status)) {\n    ENVOY_CONN_LOG(debug, \"premature response\", *connection_);\n    close();\n\n    // Don't count 408 responses where we have no active requests as protocol errors\n    if (!active_requests_.empty() || getPrematureResponseHttpCode(status) != Code::RequestTimeout) {\n      protocol_error = true;\n    }\n  }\n\n  if (protocol_error) {\n    host_->cluster().stats().upstream_cx_protocol_error_.inc();\n  }\n}\n\nCodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& connection,\n                                 Upstream::HostDescriptionConstSharedPtr host,\n                                 Event::Dispatcher& dispatcher,\n                                 Random::RandomGenerator& random_generator)\n    : CodecClient(type, std::move(connection), host, dispatcher) {\n\n  switch (type) {\n  case Type::HTTP1: {\n    if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n      codec_ = std::make_unique<Http1::ClientConnectionImpl>(\n          *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(),\n          host->cluster().maxResponseHeadersCount());\n    } else {\n      codec_ = std::make_unique<Legacy::Http1::ClientConnectionImpl>(\n          *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(),\n          host->cluster().maxResponseHeadersCount());\n    }\n    break;\n  }\n  case Type::HTTP2: {\n    if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n      codec_ = std::make_unique<Http2::ClientConnectionImpl>(\n          *connection_, *this, host->cluster().http2CodecStats(), random_generator,\n          host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB,\n          host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get());\n    } else {\n      codec_ = std::make_unique<Http2::ClientConnectionImpl>(\n          *connection_, *this, host->cluster().http2CodecStats(), random_generator,\n          host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB,\n          host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get());\n    }\n    break;\n  }\n  case Type::HTTP3: {\n    codec_ = std::unique_ptr<ClientConnection>(\n        Config::Utility::getAndCheckFactoryByName<Http::QuicHttpClientConnectionFactory>(\n            Http::QuicCodecNames::get().Quiche)\n            .createQuicClientConnection(*connection_, *this));\n    break;\n  }\n  }\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/codec_client.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <memory>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/codec_wrappers.h\"\n#include \"common/network/filter_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Callbacks specific to a codec client.\n */\nclass CodecClientCallbacks {\npublic:\n  virtual ~CodecClientCallbacks() = default;\n\n  /**\n   * Called every time an owned stream is destroyed, whether complete or not.\n   */\n  virtual void onStreamDestroy() PURE;\n\n  /**\n   * Called when a stream is reset by the client.\n   * @param reason supplies the reset reason.\n   */\n  virtual void onStreamReset(StreamResetReason reason) PURE;\n};\n\n/**\n * This is an HTTP client that multiple stream management and underlying connection management\n * across multiple HTTP codec types.\n */\nclass CodecClient : Logger::Loggable<Logger::Id::client>,\n                    public Http::ConnectionCallbacks,\n                    public Network::ConnectionCallbacks,\n                    public Event::DeferredDeletable {\npublic:\n  /**\n   * Type of HTTP codec to use.\n   */\n  enum class Type { HTTP1, HTTP2, HTTP3 };\n\n  ~CodecClient() override;\n\n  /**\n   * Add a connection callback to the underlying network connection.\n   */\n  void addConnectionCallbacks(Network::ConnectionCallbacks& cb) {\n    connection_->addConnectionCallbacks(cb);\n  }\n\n  /**\n   * Close the underlying network connection. This is immediate and will not attempt to flush any\n   * pending write data.\n   */\n  void close();\n\n  /**\n   * Send a codec level go away indication to the peer.\n   */\n  void goAway() { codec_->goAway(); }\n\n  /**\n   * @return the underlying connection ID.\n   */\n  uint64_t id() const { return connection_->id(); }\n\n  /**\n   * @return the underlying codec protocol.\n   */\n  Protocol protocol() { return codec_->protocol(); }\n\n  /**\n   * @return the underlying connection error.\n   */\n  absl::string_view connectionFailureReason() { return connection_->transportFailureReason(); }\n\n  /**\n   * @return size_t the number of outstanding requests that have not completed or been reset.\n   */\n  size_t numActiveRequests() { return active_requests_.size(); }\n\n  /**\n   * Create a new stream. Note: The CodecClient will NOT buffer multiple requests for HTTP1\n   * connections. Thus, calling newStream() before the previous request has been fully encoded\n   * is an error. Pipelining is supported however.\n   * @param response_decoder supplies the decoder to use for response callbacks.\n   * @return StreamEncoder& the encoder to use for encoding the request.\n   */\n  RequestEncoder& newStream(ResponseDecoder& response_decoder);\n\n  void setConnectionStats(const Network::Connection::ConnectionStats& stats) {\n    connection_->setConnectionStats(stats);\n  }\n\n  void setCodecClientCallbacks(CodecClientCallbacks& callbacks) {\n    codec_client_callbacks_ = &callbacks;\n  }\n\n  void setCodecConnectionCallbacks(Http::ConnectionCallbacks& callbacks) {\n    codec_callbacks_ = &callbacks;\n  }\n\n  bool remoteClosed() const { return remote_closed_; }\n\n  Type type() const { return type_; }\n\n  const StreamInfo::StreamInfo& streamInfo() { return connection_->streamInfo(); }\n\nprotected:\n  /**\n   * Create a codec client and connect to a remote host/port.\n   * @param type supplies the codec type.\n   * @param connection supplies the connection to communicate on.\n   * @param host supplies the owning host.\n   */\n  CodecClient(Type type, Network::ClientConnectionPtr&& connection,\n              Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher);\n\n  // Http::ConnectionCallbacks\n  void onGoAway(GoAwayErrorCode error_code) override {\n    if (codec_callbacks_) {\n      codec_callbacks_->onGoAway(error_code);\n    }\n  }\n\n  void onIdleTimeout() {\n    host_->cluster().stats().upstream_cx_idle_timeout_.inc();\n    close();\n  }\n\n  void disableIdleTimer() {\n    if (idle_timer_ != nullptr) {\n      idle_timer_->disableTimer();\n    }\n  }\n\n  void enableIdleTimer() {\n    if (idle_timer_ != nullptr) {\n      idle_timer_->enableTimer(idle_timeout_.value());\n    }\n  }\n\n  const Type type_;\n  // The order of host_, connection_, and codec_ matter as during destruction each can refer to\n  // the previous, at least in tests.\n  Upstream::HostDescriptionConstSharedPtr host_;\n  Network::ClientConnectionPtr connection_;\n  ClientConnectionPtr codec_;\n  Event::TimerPtr idle_timer_;\n  const absl::optional<std::chrono::milliseconds> idle_timeout_;\n\nprivate:\n  /**\n   * Wrapper read filter to drive incoming connection data into the codec. We could potentially\n   * support other filters in the future.\n   */\n  struct CodecReadFilter : public Network::ReadFilterBaseImpl {\n    CodecReadFilter(CodecClient& parent) : parent_(parent) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n      parent_.onData(data);\n      return Network::FilterStatus::StopIteration;\n    }\n\n    CodecClient& parent_;\n  };\n\n  struct ActiveRequest;\n\n  /**\n   * Wrapper for an outstanding request. Designed for handling stream multiplexing.\n   */\n  struct ActiveRequest : LinkedObject<ActiveRequest>,\n                         public Event::DeferredDeletable,\n                         public StreamCallbacks,\n                         public ResponseDecoderWrapper {\n    ActiveRequest(CodecClient& parent, ResponseDecoder& inner)\n        : ResponseDecoderWrapper(inner), parent_(parent) {}\n\n    // StreamCallbacks\n    void onResetStream(StreamResetReason reason, absl::string_view) override {\n      parent_.onReset(*this, reason);\n    }\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    // StreamDecoderWrapper\n    void onPreDecodeComplete() override { parent_.responseDecodeComplete(*this); }\n    void onDecodeComplete() override {}\n\n    RequestEncoder* encoder_{};\n    CodecClient& parent_;\n  };\n\n  using ActiveRequestPtr = std::unique_ptr<ActiveRequest>;\n\n  /**\n   * Called when a response finishes decoding. This is called *before* forwarding on to the\n   * wrapped decoder.\n   */\n  void responseDecodeComplete(ActiveRequest& request);\n\n  void deleteRequest(ActiveRequest& request);\n  void onReset(ActiveRequest& request, StreamResetReason reason);\n  void onData(Buffer::Instance& data);\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  // Pass watermark events from the connection on to the codec which will pass it to the underlying\n  // streams.\n  void onAboveWriteBufferHighWatermark() override {\n    codec_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n  }\n  void onBelowWriteBufferLowWatermark() override {\n    codec_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n  }\n\n  std::list<ActiveRequestPtr> active_requests_;\n  Http::ConnectionCallbacks* codec_callbacks_{};\n  CodecClientCallbacks* codec_client_callbacks_{};\n  bool connected_{};\n  bool remote_closed_{};\n};\n\nusing CodecClientPtr = std::unique_ptr<CodecClient>;\n\n/**\n * Production implementation that installs a real codec.\n */\nclass CodecClientProd : public CodecClient {\npublic:\n  CodecClientProd(Type type, Network::ClientConnectionPtr&& connection,\n                  Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher,\n                  Random::RandomGenerator& random_generator);\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/codec_helper.h",
    "content": "#pragma once\n\n#include \"envoy/http/codec.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/container/inlined_vector.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass StreamCallbackHelper {\npublic:\n  void runLowWatermarkCallbacks() {\n    if (reset_callbacks_started_ || local_end_stream_) {\n      return;\n    }\n    ASSERT(high_watermark_callbacks_ > 0);\n    --high_watermark_callbacks_;\n    for (StreamCallbacks* callbacks : callbacks_) {\n      if (callbacks) {\n        callbacks->onBelowWriteBufferLowWatermark();\n      }\n    }\n  }\n\n  void runHighWatermarkCallbacks() {\n    if (reset_callbacks_started_ || local_end_stream_) {\n      return;\n    }\n    ++high_watermark_callbacks_;\n    for (StreamCallbacks* callbacks : callbacks_) {\n      if (callbacks) {\n        callbacks->onAboveWriteBufferHighWatermark();\n      }\n    }\n  }\n\n  void runResetCallbacks(StreamResetReason reason) {\n    // Reset callbacks are a special case, and the only StreamCallbacks allowed\n    // to run after local_end_stream_.\n    if (reset_callbacks_started_) {\n      return;\n    }\n\n    reset_callbacks_started_ = true;\n    for (StreamCallbacks* callbacks : callbacks_) {\n      if (callbacks) {\n        callbacks->onResetStream(reason, absl::string_view());\n      }\n    }\n  }\n\n  bool local_end_stream_{};\n\nprotected:\n  void addCallbacksHelper(StreamCallbacks& callbacks) {\n    ASSERT(!reset_callbacks_started_ && !local_end_stream_);\n    callbacks_.push_back(&callbacks);\n    for (uint32_t i = 0; i < high_watermark_callbacks_; ++i) {\n      callbacks.onAboveWriteBufferHighWatermark();\n    }\n  }\n\n  void removeCallbacksHelper(StreamCallbacks& callbacks) {\n    // For performance reasons we just clear the callback and do not resize the vector.\n    // Reset callbacks scale with the number of filters per request and do not get added and\n    // removed multiple times.\n    // The vector may not be safely resized without making sure the run.*Callbacks() helper\n    // functions above still handle removeCallbacksHelper() calls mid-loop.\n    for (auto& callback : callbacks_) {\n      if (callback == &callbacks) {\n        callback = nullptr;\n        return;\n      }\n    }\n  }\n\nprivate:\n  absl::InlinedVector<StreamCallbacks*, 8> callbacks_;\n  bool reset_callbacks_started_{};\n  uint32_t high_watermark_callbacks_{};\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/codec_wrappers.h",
    "content": "#pragma once\n\n#include \"envoy/http/codec.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Wrapper for ResponseDecoder that just forwards to an \"inner\" decoder.\n */\nclass ResponseDecoderWrapper : public ResponseDecoder {\npublic:\n  // ResponseDecoder\n  void decode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override {\n    inner_.decode100ContinueHeaders(std::move(headers));\n  }\n\n  void decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override {\n    if (end_stream) {\n      onPreDecodeComplete();\n    }\n\n    inner_.decodeHeaders(std::move(headers), end_stream);\n\n    if (end_stream) {\n      onDecodeComplete();\n    }\n  }\n\n  void decodeData(Buffer::Instance& data, bool end_stream) override {\n    if (end_stream) {\n      onPreDecodeComplete();\n    }\n\n    inner_.decodeData(data, end_stream);\n\n    if (end_stream) {\n      onDecodeComplete();\n    }\n  }\n\n  void decodeTrailers(ResponseTrailerMapPtr&& trailers) override {\n    onPreDecodeComplete();\n    inner_.decodeTrailers(std::move(trailers));\n    onDecodeComplete();\n  }\n\n  void decodeMetadata(MetadataMapPtr&& metadata_map) override {\n    inner_.decodeMetadata(std::move(metadata_map));\n  }\n\nprotected:\n  ResponseDecoderWrapper(ResponseDecoder& inner) : inner_(inner) {}\n\n  /**\n   * Consumers of the wrapper generally want to know when a decode is complete. This is called\n   * at that time and is implemented by derived classes.\n   */\n  virtual void onPreDecodeComplete() PURE;\n  virtual void onDecodeComplete() PURE;\n\n  ResponseDecoder& inner_;\n};\n\n/**\n * Wrapper for RequestEncoder that just forwards to an \"inner\" encoder.\n */\nclass RequestEncoderWrapper : public RequestEncoder {\npublic:\n  // RequestEncoder\n  void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override {\n    inner_.encodeHeaders(headers, end_stream);\n    if (end_stream) {\n      onEncodeComplete();\n    }\n  }\n\n  void encodeData(Buffer::Instance& data, bool end_stream) override {\n    inner_.encodeData(data, end_stream);\n    if (end_stream) {\n      onEncodeComplete();\n    }\n  }\n\n  void encodeTrailers(const RequestTrailerMap& trailers) override {\n    inner_.encodeTrailers(trailers);\n    onEncodeComplete();\n  }\n\n  void encodeMetadata(const MetadataMapVector& metadata_map_vector) override {\n    inner_.encodeMetadata(metadata_map_vector);\n  }\n\n  Stream& getStream() override { return inner_.getStream(); }\n\n  Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override {\n    return inner_.http1StreamEncoderOptions();\n  }\n\nprotected:\n  RequestEncoderWrapper(RequestEncoder& inner) : inner_(inner) {}\n\n  /**\n   * Consumers of the wrapper generally want to know when an encode is complete. This is called at\n   * that time and is implemented by derived classes.\n   */\n  virtual void onEncodeComplete() PURE;\n\n  RequestEncoder& inner_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/codes.cc",
    "content": "#include \"common/http/codes.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nCodeStatsImpl::CodeStatsImpl(Stats::SymbolTable& symbol_table)\n    : stat_name_pool_(symbol_table), symbol_table_(symbol_table),\n      canary_(stat_name_pool_.add(\"canary\")), external_(stat_name_pool_.add(\"external\")),\n      internal_(stat_name_pool_.add(\"internal\")),\n      upstream_rq_1xx_(stat_name_pool_.add(\"upstream_rq_1xx\")),\n      upstream_rq_2xx_(stat_name_pool_.add(\"upstream_rq_2xx\")),\n      upstream_rq_3xx_(stat_name_pool_.add(\"upstream_rq_3xx\")),\n      upstream_rq_4xx_(stat_name_pool_.add(\"upstream_rq_4xx\")),\n      upstream_rq_5xx_(stat_name_pool_.add(\"upstream_rq_5xx\")),\n      upstream_rq_unknown_(stat_name_pool_.add(\"upstream_rq_unknown\")), // Covers invalid http\n                                                                        // response codes e.g. 600.\n      upstream_rq_completed_(stat_name_pool_.add(\"upstream_rq_completed\")),\n      upstream_rq_time_(stat_name_pool_.add(\"upstream_rq_time\")),\n      vcluster_(stat_name_pool_.add(\"vcluster\")), vhost_(stat_name_pool_.add(\"vhost\")),\n      zone_(stat_name_pool_.add(\"zone\")) {\n\n  // Pre-allocate response codes 200, 404, and 503, as those seem quite likely.\n  // We don't pre-allocate all the HTTP codes because the first 127 allocations\n  // are likely to be encoded in one byte, and we would rather spend those on\n  // common components of stat-names that appear frequently.\n  upstreamRqStatName(Code::OK);\n  upstreamRqStatName(Code::NotFound);\n  upstreamRqStatName(Code::ServiceUnavailable);\n}\n\nvoid CodeStatsImpl::incCounter(Stats::Scope& scope, const Stats::StatNameVec& names) const {\n  const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join(names);\n  scope.counterFromStatName(Stats::StatName(stat_name_storage.get())).inc();\n}\n\nvoid CodeStatsImpl::incCounter(Stats::Scope& scope, Stats::StatName a, Stats::StatName b) const {\n  const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join({a, b});\n  scope.counterFromStatName(Stats::StatName(stat_name_storage.get())).inc();\n}\n\nvoid CodeStatsImpl::recordHistogram(Stats::Scope& scope, const Stats::StatNameVec& names,\n                                    Stats::Histogram::Unit unit, uint64_t count) const {\n  const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join(names);\n  scope.histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit).recordValue(count);\n}\n\nvoid CodeStatsImpl::chargeBasicResponseStat(Stats::Scope& scope, Stats::StatName prefix,\n                                            Code response_code) const {\n  ASSERT(&symbol_table_ == &scope.symbolTable());\n\n  // Build a dynamic stat for the response code and increment it.\n  incCounter(scope, prefix, upstream_rq_completed_);\n  const Stats::StatName rq_group = upstreamRqGroup(response_code);\n  if (!rq_group.empty()) {\n    incCounter(scope, prefix, rq_group);\n  }\n  incCounter(scope, prefix, upstreamRqStatName(response_code));\n}\n\nvoid CodeStatsImpl::chargeResponseStat(const ResponseStatInfo& info) const {\n  const Code code = static_cast<Code>(info.response_status_code_);\n\n  ASSERT(&info.cluster_scope_.symbolTable() == &symbol_table_);\n  chargeBasicResponseStat(info.cluster_scope_, info.prefix_, code);\n\n  const Stats::StatName rq_group = upstreamRqGroup(code);\n  const Stats::StatName rq_code = upstreamRqStatName(code);\n\n  // If the response is from a canary, also create canary stats.\n  if (info.upstream_canary_) {\n    writeCategory(info, rq_group, rq_code, canary_);\n  }\n\n  // Split stats into external vs. internal.\n  if (info.internal_request_) {\n    writeCategory(info, rq_group, rq_code, internal_);\n  } else {\n    writeCategory(info, rq_group, rq_code, external_);\n  }\n\n  // Handle request virtual cluster.\n  if (!info.request_vcluster_name_.empty()) {\n    incCounter(info.global_scope_, {vhost_, info.request_vhost_name_, vcluster_,\n                                    info.request_vcluster_name_, upstream_rq_completed_});\n    incCounter(info.global_scope_, {vhost_, info.request_vhost_name_, vcluster_,\n                                    info.request_vcluster_name_, rq_group});\n    incCounter(info.global_scope_,\n               {vhost_, info.request_vhost_name_, vcluster_, info.request_vcluster_name_, rq_code});\n  }\n\n  // Handle per zone stats.\n  if (!info.from_zone_.empty() && !info.to_zone_.empty()) {\n    incCounter(info.cluster_scope_,\n               {info.prefix_, zone_, info.from_zone_, info.to_zone_, upstream_rq_completed_});\n    incCounter(info.cluster_scope_,\n               {info.prefix_, zone_, info.from_zone_, info.to_zone_, rq_group});\n    incCounter(info.cluster_scope_, {info.prefix_, zone_, info.from_zone_, info.to_zone_, rq_code});\n  }\n}\n\nvoid CodeStatsImpl::writeCategory(const ResponseStatInfo& info, Stats::StatName rq_group,\n                                  Stats::StatName rq_code, Stats::StatName category) const {\n  incCounter(info.cluster_scope_, {info.prefix_, category, upstream_rq_completed_});\n  if (!rq_group.empty()) {\n    incCounter(info.cluster_scope_, {info.prefix_, category, rq_group});\n  }\n  incCounter(info.cluster_scope_, {info.prefix_, category, rq_code});\n}\n\nvoid CodeStatsImpl::chargeResponseTiming(const ResponseTimingInfo& info) const {\n  const uint64_t count = info.response_time_.count();\n  recordHistogram(info.cluster_scope_, {info.prefix_, upstream_rq_time_},\n                  Stats::Histogram::Unit::Milliseconds, count);\n  if (info.upstream_canary_) {\n    recordHistogram(info.cluster_scope_, {info.prefix_, canary_, upstream_rq_time_},\n                    Stats::Histogram::Unit::Milliseconds, count);\n  }\n\n  if (info.internal_request_) {\n    recordHistogram(info.cluster_scope_, {info.prefix_, internal_, upstream_rq_time_},\n                    Stats::Histogram::Unit::Milliseconds, count);\n  } else {\n    recordHistogram(info.cluster_scope_, {info.prefix_, external_, upstream_rq_time_},\n                    Stats::Histogram::Unit::Milliseconds, count);\n  }\n\n  if (!info.request_vcluster_name_.empty()) {\n    recordHistogram(info.global_scope_,\n                    {vhost_, info.request_vhost_name_, vcluster_, info.request_vcluster_name_,\n                     upstream_rq_time_},\n                    Stats::Histogram::Unit::Milliseconds, count);\n  }\n\n  // Handle per zone stats.\n  if (!info.from_zone_.empty() && !info.to_zone_.empty()) {\n    recordHistogram(info.cluster_scope_,\n                    {info.prefix_, zone_, info.from_zone_, info.to_zone_, upstream_rq_time_},\n                    Stats::Histogram::Unit::Milliseconds, count);\n  }\n}\n\nStats::StatName CodeStatsImpl::upstreamRqGroup(Code response_code) const {\n  switch (enumToInt(response_code) / 100) {\n  case 1:\n    return upstream_rq_1xx_;\n  case 2:\n    return upstream_rq_2xx_;\n  case 3:\n    return upstream_rq_3xx_;\n  case 4:\n    return upstream_rq_4xx_;\n  case 5:\n    return upstream_rq_5xx_;\n  }\n  return empty_; // Unknown codes do not go into a group.\n}\n\nStats::StatName CodeStatsImpl::upstreamRqStatName(Code response_code) const {\n  // Take a lock only if we've never seen this response-code before.\n  const uint32_t rc_index = static_cast<uint32_t>(response_code) - HttpCodeOffset;\n  if (rc_index >= NumHttpCodes) {\n    return upstream_rq_unknown_;\n  }\n  return Stats::StatName(rc_stat_names_.get(rc_index, [this, response_code]() -> const uint8_t* {\n    return stat_name_pool_.addReturningStorage(\n        absl::StrCat(\"upstream_rq_\", enumToInt(response_code)));\n  }));\n}\n\nstd::string CodeUtility::groupStringForResponseCode(Code response_code) {\n  // Note: this is only used in the unit test and in dynamo_filter.cc, which\n  // needs the same sort of symbolization treatment we are doing here.\n  if (CodeUtility::is1xx(enumToInt(response_code))) {\n    return \"1xx\";\n  } else if (CodeUtility::is2xx(enumToInt(response_code))) {\n    return \"2xx\";\n  } else if (CodeUtility::is3xx(enumToInt(response_code))) {\n    return \"3xx\";\n  } else if (CodeUtility::is4xx(enumToInt(response_code))) {\n    return \"4xx\";\n  } else if (CodeUtility::is5xx(enumToInt(response_code))) {\n    return \"5xx\";\n  } else {\n    return \"\";\n  }\n}\n\nconst char* CodeUtility::toString(Code code) {\n  // clang-format off\n  switch (code) {\n  // 1xx\n  case Code::Continue:                      return \"Continue\";\n  case Code::SwitchingProtocols:            return \"Switching Protocols\";\n\n  // 2xx\n  case Code::OK:                            return \"OK\";\n  case Code::Created:                       return \"Created\";\n  case Code::Accepted:                      return \"Accepted\";\n  case Code::NonAuthoritativeInformation:   return \"Non-Authoritative Information\";\n  case Code::NoContent:                     return \"No Content\";\n  case Code::ResetContent:                  return \"Reset Content\";\n  case Code::PartialContent:                return \"Partial Content\";\n  case Code::MultiStatus:                   return \"Multi-Status\";\n  case Code::AlreadyReported:               return \"Already Reported\";\n  case Code::IMUsed:                        return \"IM Used\";\n\n  // 3xx\n  case Code::MultipleChoices:               return \"Multiple Choices\";\n  case Code::MovedPermanently:              return \"Moved Permanently\";\n  case Code::Found:                         return \"Found\";\n  case Code::SeeOther:                      return \"See Other\";\n  case Code::NotModified:                   return \"Not Modified\";\n  case Code::UseProxy:                      return \"Use Proxy\";\n  case Code::TemporaryRedirect:             return \"Temporary Redirect\";\n  case Code::PermanentRedirect:             return \"Permanent Redirect\";\n\n  // 4xx\n  case Code::BadRequest:                    return \"Bad Request\";\n  case Code::Unauthorized:                  return \"Unauthorized\";\n  case Code::PaymentRequired:               return \"Payment Required\";\n  case Code::Forbidden:                     return \"Forbidden\";\n  case Code::NotFound:                      return \"Not Found\";\n  case Code::MethodNotAllowed:              return \"Method Not Allowed\";\n  case Code::NotAcceptable:                 return \"Not Acceptable\";\n  case Code::ProxyAuthenticationRequired:   return \"Proxy Authentication Required\";\n  case Code::RequestTimeout:                return \"Request Timeout\";\n  case Code::Conflict:                      return \"Conflict\";\n  case Code::Gone:                          return \"Gone\";\n  case Code::LengthRequired:                return \"Length Required\";\n  case Code::PreconditionFailed:            return \"Precondition Failed\";\n  case Code::PayloadTooLarge:               return \"Payload Too Large\";\n  case Code::URITooLong:                    return \"URI Too Long\";\n  case Code::UnsupportedMediaType:          return \"Unsupported Media Type\";\n  case Code::RangeNotSatisfiable:           return \"Range Not Satisfiable\";\n  case Code::ExpectationFailed:             return \"Expectation Failed\";\n  case Code::MisdirectedRequest:            return \"Misdirected Request\";\n  case Code::UnprocessableEntity:           return \"Unprocessable Entity\";\n  case Code::Locked:                        return \"Locked\";\n  case Code::FailedDependency:              return \"Failed Dependency\";\n  case Code::UpgradeRequired:               return \"Upgrade Required\";\n  case Code::PreconditionRequired:          return \"Precondition Required\";\n  case Code::TooManyRequests:               return \"Too Many Requests\";\n  case Code::RequestHeaderFieldsTooLarge:   return \"Request Header Fields Too Large\";\n\n  // 5xx\n  case Code::InternalServerError:           return \"Internal Server Error\";\n  case Code::NotImplemented:                return \"Not Implemented\";\n  case Code::BadGateway:                    return \"Bad Gateway\";\n  case Code::ServiceUnavailable:            return \"Service Unavailable\";\n  case Code::GatewayTimeout:                return \"Gateway Timeout\";\n  case Code::HTTPVersionNotSupported:       return \"HTTP Version Not Supported\";\n  case Code::VariantAlsoNegotiates:         return \"Variant Also Negotiates\";\n  case Code::InsufficientStorage:           return \"Insufficient Storage\";\n  case Code::LoopDetected:                  return \"Loop Detected\";\n  case Code::NotExtended:                   return \"Not Extended\";\n  case Code::NetworkAuthenticationRequired: return \"Network Authentication Required\";\n  }\n  // clang-format on\n\n  return \"Unknown\";\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/codes.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nstruct CodeStats::ResponseStatInfo {\n  Stats::Scope& global_scope_;\n  Stats::Scope& cluster_scope_;\n  Stats::StatName prefix_;\n  uint64_t response_status_code_;\n  bool internal_request_;\n  Stats::StatName request_vhost_name_;\n  Stats::StatName request_vcluster_name_;\n  Stats::StatName from_zone_;\n  Stats::StatName to_zone_;\n  bool upstream_canary_;\n};\n\nstruct CodeStats::ResponseTimingInfo {\n  Stats::Scope& global_scope_;\n  Stats::Scope& cluster_scope_;\n  Stats::StatName prefix_;\n  std::chrono::milliseconds response_time_;\n  bool upstream_canary_;\n  bool internal_request_;\n  Stats::StatName request_vhost_name_;\n  Stats::StatName request_vcluster_name_;\n  Stats::StatName from_zone_;\n  Stats::StatName to_zone_;\n};\n\nclass CodeStatsImpl : public CodeStats {\npublic:\n  explicit CodeStatsImpl(Stats::SymbolTable& symbol_table);\n\n  // CodeStats\n  void chargeBasicResponseStat(Stats::Scope& scope, Stats::StatName prefix,\n                               Code response_code) const override;\n  void chargeResponseStat(const ResponseStatInfo& info) const override;\n  void chargeResponseTiming(const ResponseTimingInfo& info) const override;\n\nprivate:\n  friend class CodeStatsTest;\n\n  void writeCategory(const ResponseStatInfo& info, Stats::StatName rq_group,\n                     Stats::StatName rq_code, Stats::StatName category) const;\n  void incCounter(Stats::Scope& scope, const Stats::StatNameVec& names) const;\n  void incCounter(Stats::Scope& scope, Stats::StatName a, Stats::StatName b) const;\n  void recordHistogram(Stats::Scope& scope, const Stats::StatNameVec& names,\n                       Stats::Histogram::Unit unit, uint64_t count) const;\n\n  Stats::StatName upstreamRqGroup(Code response_code) const;\n  Stats::StatName upstreamRqStatName(Code response_code) const;\n\n  mutable Stats::StatNamePool stat_name_pool_;\n  Stats::SymbolTable& symbol_table_;\n\n  const Stats::StatName canary_;\n  const Stats::StatName empty_; // Used for the group-name for invalid http codes.\n  const Stats::StatName external_;\n  const Stats::StatName internal_;\n  const Stats::StatName upstream_;\n  const Stats::StatName upstream_rq_1xx_;\n  const Stats::StatName upstream_rq_2xx_;\n  const Stats::StatName upstream_rq_3xx_;\n  const Stats::StatName upstream_rq_4xx_;\n  const Stats::StatName upstream_rq_5xx_;\n  const Stats::StatName upstream_rq_unknown_;\n  const Stats::StatName upstream_rq_completed_;\n  const Stats::StatName upstream_rq_time_;\n  const Stats::StatName vcluster_;\n  const Stats::StatName vhost_;\n  const Stats::StatName zone_;\n\n  // Use an array of atomic pointers to hold StatNameStorage objects for\n  // every conceivable HTTP response code. In the hot-path we'll reference\n  // these with a null-check, and if we need to allocate a symbol for a\n  // new code, we'll take a mutex to avoid duplicate allocations and\n  // subsequent leaks. This is similar in principle to a ReaderMutexLock,\n  // but should be faster, as ReaderMutexLocks appear to be too expensive for\n  // fine-grained controls. Another option would be to use a lock per\n  // stat-name, which might have similar performance to atomics with default\n  // barrier policy.\n  //\n  // We don't allocate these all up front during construction because\n  // SymbolTable greedily encodes the first 128 names it discovers in one\n  // byte. We don't want those high-value single-byte codes to go to fully\n  // enumerating the 4 prefixes combined with HTTP codes that are seldom used,\n  // so we allocate these on demand.\n  //\n  // There can be multiple symbol tables in a server. The one passed into the\n  // Codes constructor should be the same as the one passed to\n  // Stats::ThreadLocalStore. Note that additional symbol tables can be created\n  // from IsolatedStoreImpl's default constructor.\n  //\n  // The Codes object is global to the server.\n\n  static constexpr uint32_t NumHttpCodes = 500;\n  static constexpr uint32_t HttpCodeOffset = 100; // code 100 is at index 0.\n  mutable Thread::AtomicPtrArray<const uint8_t, NumHttpCodes,\n                                 Thread::AtomicPtrAllocMode::DoNotDelete>\n      rc_stat_names_;\n};\n\n/**\n * General utility routines for HTTP codes.\n */\nclass CodeUtility {\npublic:\n  /**\n   * Convert an HTTP response code to a descriptive string.\n   * @param code supplies the code to convert.\n   * @return const char* the string.\n   */\n  static const char* toString(Code code);\n\n  static bool is1xx(uint64_t code) { return code >= 100 && code < 200; }\n  static bool is2xx(uint64_t code) { return code >= 200 && code < 300; }\n  static bool is3xx(uint64_t code) { return code >= 300 && code < 400; }\n  static bool is4xx(uint64_t code) { return code >= 400 && code < 500; }\n  static bool is5xx(uint64_t code) { return code >= 500 && code < 600; }\n\n  static bool isGatewayError(uint64_t code) { return code >= 502 && code < 505; }\n\n  static std::string groupStringForResponseCode(Code response_code);\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/conn_manager_config.h",
    "content": "#pragma once\n\n#include \"envoy/config/config_provider.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/http/date_provider.h\"\n#include \"common/local_reply/local_reply.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * All stats for the connection manager. @see stats_macros.h\n */\n#define ALL_HTTP_CONN_MAN_STATS(COUNTER, GAUGE, HISTOGRAM)                                         \\\n  COUNTER(downstream_cx_delayed_close_timeout)                                                     \\\n  COUNTER(downstream_cx_destroy)                                                                   \\\n  COUNTER(downstream_cx_destroy_active_rq)                                                         \\\n  COUNTER(downstream_cx_destroy_local)                                                             \\\n  COUNTER(downstream_cx_destroy_local_active_rq)                                                   \\\n  COUNTER(downstream_cx_destroy_remote)                                                            \\\n  COUNTER(downstream_cx_destroy_remote_active_rq)                                                  \\\n  COUNTER(downstream_cx_drain_close)                                                               \\\n  COUNTER(downstream_cx_http1_total)                                                               \\\n  COUNTER(downstream_cx_http2_total)                                                               \\\n  COUNTER(downstream_cx_http3_total)                                                               \\\n  COUNTER(downstream_cx_idle_timeout)                                                              \\\n  COUNTER(downstream_cx_max_duration_reached)                                                      \\\n  COUNTER(downstream_cx_overload_disable_keepalive)                                                \\\n  COUNTER(downstream_cx_protocol_error)                                                            \\\n  COUNTER(downstream_cx_rx_bytes_total)                                                            \\\n  COUNTER(downstream_cx_ssl_total)                                                                 \\\n  COUNTER(downstream_cx_total)                                                                     \\\n  COUNTER(downstream_cx_tx_bytes_total)                                                            \\\n  COUNTER(downstream_cx_upgrades_total)                                                            \\\n  COUNTER(downstream_flow_control_paused_reading_total)                                            \\\n  COUNTER(downstream_flow_control_resumed_reading_total)                                           \\\n  COUNTER(downstream_rq_1xx)                                                                       \\\n  COUNTER(downstream_rq_2xx)                                                                       \\\n  COUNTER(downstream_rq_3xx)                                                                       \\\n  COUNTER(downstream_rq_4xx)                                                                       \\\n  COUNTER(downstream_rq_5xx)                                                                       \\\n  COUNTER(downstream_rq_completed)                                                                 \\\n  COUNTER(downstream_rq_http1_total)                                                               \\\n  COUNTER(downstream_rq_http2_total)                                                               \\\n  COUNTER(downstream_rq_http3_total)                                                               \\\n  COUNTER(downstream_rq_idle_timeout)                                                              \\\n  COUNTER(downstream_rq_non_relative_path)                                                         \\\n  COUNTER(downstream_rq_overload_close)                                                            \\\n  COUNTER(downstream_rq_response_before_rq_complete)                                               \\\n  COUNTER(downstream_rq_rx_reset)                                                                  \\\n  COUNTER(downstream_rq_timeout)                                                                   \\\n  COUNTER(downstream_rq_too_large)                                                                 \\\n  COUNTER(downstream_rq_total)                                                                     \\\n  COUNTER(downstream_rq_tx_reset)                                                                  \\\n  COUNTER(downstream_rq_max_duration_reached)                                                      \\\n  COUNTER(downstream_rq_ws_on_non_ws_route)                                                        \\\n  COUNTER(rs_too_large)                                                                            \\\n  GAUGE(downstream_cx_active, Accumulate)                                                          \\\n  GAUGE(downstream_cx_http1_active, Accumulate)                                                    \\\n  GAUGE(downstream_cx_http2_active, Accumulate)                                                    \\\n  GAUGE(downstream_cx_http3_active, Accumulate)                                                    \\\n  GAUGE(downstream_cx_rx_bytes_buffered, Accumulate)                                               \\\n  GAUGE(downstream_cx_ssl_active, Accumulate)                                                      \\\n  GAUGE(downstream_cx_tx_bytes_buffered, Accumulate)                                               \\\n  GAUGE(downstream_cx_upgrades_active, Accumulate)                                                 \\\n  GAUGE(downstream_rq_active, Accumulate)                                                          \\\n  HISTOGRAM(downstream_cx_length_ms, Milliseconds)                                                 \\\n  HISTOGRAM(downstream_rq_time, Milliseconds)\n\n/**\n * Wrapper struct for connection manager stats. @see stats_macros.h\n */\nstruct ConnectionManagerNamedStats {\n  ALL_HTTP_CONN_MAN_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\nstruct ConnectionManagerStats {\n  ConnectionManagerStats(ConnectionManagerNamedStats&& named_stats, const std::string& prefix,\n                         Stats::Scope& scope)\n      : named_(std::move(named_stats)), prefix_(prefix),\n        prefix_stat_name_storage_(prefix, scope.symbolTable()), scope_(scope) {}\n\n  Stats::StatName prefixStatName() const { return prefix_stat_name_storage_.statName(); }\n\n  ConnectionManagerNamedStats named_;\n  std::string prefix_;\n  Stats::StatNameManagedStorage prefix_stat_name_storage_;\n  Stats::Scope& scope_;\n};\n\n/**\n * Connection manager tracing specific stats. @see stats_macros.h\n */\n#define CONN_MAN_TRACING_STATS(COUNTER)                                                            \\\n  COUNTER(random_sampling)                                                                         \\\n  COUNTER(service_forced)                                                                          \\\n  COUNTER(client_enabled)                                                                          \\\n  COUNTER(not_traceable)                                                                           \\\n  COUNTER(health_check)\n\n/**\n * Wrapper struct for connection manager tracing stats. @see stats_macros.h\n */\nstruct ConnectionManagerTracingStats {\n  CONN_MAN_TRACING_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for tracing which is set on the connection manager level.\n * Http Tracing can be enabled/disabled on a per connection manager basis.\n * Here we specify some specific for connection manager settings.\n */\nstruct TracingConnectionManagerConfig {\n  Tracing::OperationName operation_name_;\n  Tracing::CustomTagMap custom_tags_;\n  envoy::type::v3::FractionalPercent client_sampling_;\n  envoy::type::v3::FractionalPercent random_sampling_;\n  envoy::type::v3::FractionalPercent overall_sampling_;\n  bool verbose_;\n  uint32_t max_path_tag_length_;\n};\n\nusing TracingConnectionManagerConfigPtr = std::unique_ptr<TracingConnectionManagerConfig>;\n\n/**\n * Connection manager per listener stats. @see stats_macros.h\n */\n#define CONN_MAN_LISTENER_STATS(COUNTER)                                                           \\\n  COUNTER(downstream_rq_1xx)                                                                       \\\n  COUNTER(downstream_rq_2xx)                                                                       \\\n  COUNTER(downstream_rq_3xx)                                                                       \\\n  COUNTER(downstream_rq_4xx)                                                                       \\\n  COUNTER(downstream_rq_5xx)                                                                       \\\n  COUNTER(downstream_rq_completed)\n\n/**\n * Wrapper struct for connection manager listener stats. @see stats_macros.h\n */\nstruct ConnectionManagerListenerStats {\n  CONN_MAN_LISTENER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for how to forward client certs.\n */\nenum class ForwardClientCertType {\n  ForwardOnly,\n  AppendForward,\n  SanitizeSet,\n  Sanitize,\n  AlwaysForwardOnly\n};\n\n/**\n * Configuration for the fields of the client cert, used for populating the current client cert\n * information to the next hop.\n */\nenum class ClientCertDetailsType { Cert, Chain, Subject, URI, DNS };\n\n/**\n * Configuration for what addresses should be considered internal beyond the defaults.\n */\nclass InternalAddressConfig {\npublic:\n  virtual ~InternalAddressConfig() = default;\n  virtual bool isInternalAddress(const Network::Address::Instance& address) const PURE;\n};\n\n/**\n * Determines if an address is internal based on whether it is an RFC1918 ip address.\n */\nclass DefaultInternalAddressConfig : public Http::InternalAddressConfig {\npublic:\n  bool isInternalAddress(const Network::Address::Instance& address) const override {\n    return Network::Utility::isInternalAddress(address);\n  }\n};\n\n/**\n * Abstract configuration for the connection manager.\n */\nclass ConnectionManagerConfig {\npublic:\n  using HttpConnectionManagerProto =\n      envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager;\n\n  virtual ~ConnectionManagerConfig() = default;\n\n  /**\n   * @return RequestIDExtensionSharedPtr The request id utilities instance to use\n   */\n  virtual RequestIDExtensionSharedPtr requestIDExtension() PURE;\n\n  /**\n   *  @return const std::list<AccessLog::InstanceSharedPtr>& the access logs to write to.\n   */\n  virtual const std::list<AccessLog::InstanceSharedPtr>& accessLogs() PURE;\n\n  /**\n   * Called to create a codec for the connection manager. This function will be called when the\n   * first byte of application data is received. This is done to support handling of ALPN, protocol\n   * detection, etc.\n   * @param connection supplies the owning connection.\n   * @param data supplies the currently available read data.\n   * @param callbacks supplies the callbacks to install into the codec.\n   * @return a codec or nullptr if no codec can be created.\n   */\n  virtual ServerConnectionPtr createCodec(Network::Connection& connection,\n                                          const Buffer::Instance& data,\n                                          ServerConnectionCallbacks& callbacks) PURE;\n\n  /**\n   * @return DateProvider& the date provider to use for\n   */\n  virtual DateProvider& dateProvider() PURE;\n\n  /**\n   * @return the time in milliseconds the connection manager will wait between issuing a \"shutdown\n   *         notice\" to the time it will issue a full GOAWAY and not accept any new streams.\n   */\n  virtual std::chrono::milliseconds drainTimeout() const PURE;\n\n  /**\n   * @return FilterChainFactory& the HTTP level filter factory to build the connection's filter\n   *         chain.\n   */\n  virtual FilterChainFactory& filterFactory() PURE;\n\n  /**\n   * @return whether the connection manager will generate a fresh x-request-id if the request does\n   *         not have one.\n   */\n  virtual bool generateRequestId() const PURE;\n\n  /**\n   * @return whether the x-request-id should not be reset on edge entry inside mesh\n   */\n  virtual bool preserveExternalRequestId() const PURE;\n\n  /**\n   * @return whether the x-request-id should always be set in the response.\n   */\n  virtual bool alwaysSetRequestIdInResponse() const PURE;\n\n  /**\n   * @return optional idle timeout for incoming connection manager connections.\n   */\n  virtual absl::optional<std::chrono::milliseconds> idleTimeout() const PURE;\n\n  /**\n   * @return if the connection manager does routing base on router config, e.g. a Server::Admin impl\n   * has no route config.\n   */\n  virtual bool isRoutable() const PURE;\n\n  /**\n   * @return optional maximum connection duration timeout for manager connections.\n   */\n  virtual absl::optional<std::chrono::milliseconds> maxConnectionDuration() const PURE;\n\n  /**\n   * @return maximum request headers size the connection manager will accept.\n   */\n  virtual uint32_t maxRequestHeadersKb() const PURE;\n\n  /**\n   * @return maximum number of request headers the codecs will accept.\n   */\n  virtual uint32_t maxRequestHeadersCount() const PURE;\n\n  /**\n   * @return per-stream idle timeout for incoming connection manager connections. Zero indicates a\n   *         disabled idle timeout.\n   */\n  virtual std::chrono::milliseconds streamIdleTimeout() const PURE;\n\n  /**\n   * @return request timeout for incoming connection manager connections. Zero indicates\n   *         a disabled request timeout.\n   */\n  virtual std::chrono::milliseconds requestTimeout() const PURE;\n\n  /**\n   * @return delayed close timeout for downstream HTTP connections. Zero indicates a disabled\n   *         timeout. See http_connection_manager.proto for a detailed description of this timeout.\n   */\n  virtual std::chrono::milliseconds delayedCloseTimeout() const PURE;\n\n  /**\n   * @return maximum duration time to keep alive stream\n   */\n  virtual absl::optional<std::chrono::milliseconds> maxStreamDuration() const PURE;\n\n  /**\n   * @return Router::RouteConfigProvider* the configuration provider used to acquire a route\n   *         config for each request flow. Pointer ownership is _not_ transferred to the caller of\n   *         this function. This will return nullptr when scoped routing is enabled.\n   */\n  virtual Router::RouteConfigProvider* routeConfigProvider() PURE;\n\n  /**\n   * @return Config::ConfigProvider* the configuration provider used to acquire scoped routing\n   * configuration for each request flow. Pointer ownership is _not_ transferred to the caller of\n   * this function. This will return nullptr when scoped routing is not enabled.\n   */\n  virtual Config::ConfigProvider* scopedRouteConfigProvider() PURE;\n\n  /**\n   * @return const std::string& the server name to write into responses.\n   */\n  virtual const std::string& serverName() const PURE;\n\n  /**\n   * @return ServerHeaderTransformation the transformation to apply to Server response headers.\n   */\n  virtual HttpConnectionManagerProto::ServerHeaderTransformation\n  serverHeaderTransformation() const PURE;\n\n  /**\n   * @return ConnectionManagerStats& the stats to write to.\n   */\n  virtual ConnectionManagerStats& stats() PURE;\n\n  /**\n   * @return ConnectionManagerTracingStats& the stats to write to.\n   */\n  virtual ConnectionManagerTracingStats& tracingStats() PURE;\n\n  /**\n   * @return bool whether to use the remote address for populating XFF, determining internal request\n   *         status, etc. or to assume that XFF will already be populated with the remote address.\n   */\n  virtual bool useRemoteAddress() const PURE;\n\n  /**\n   * @return InternalAddressConfig configuration for user defined internal addresses.\n   */\n  virtual const InternalAddressConfig& internalAddressConfig() const PURE;\n\n  /**\n   * @return uint32_t the number of trusted proxy hops in front of this Envoy instance, for\n   *         the purposes of XFF processing.\n   */\n  virtual uint32_t xffNumTrustedHops() const PURE;\n\n  /**\n   * @return bool don't append the remote address to XFF? This overrides the behavior of\n   *              useRemoteAddress() and may be used when XFF should not be modified but we still\n   *              want to avoid trusting incoming XFF in remote IP determination.\n   */\n  virtual bool skipXffAppend() const PURE;\n\n  /**\n   * @return const absl::optional<std::string>& value of via header to add to requests and response\n   *                                            headers if set.\n   */\n  virtual const std::string& via() const PURE;\n\n  /**\n   * @return ForwardClientCertType the configuration of how to forward the client cert information.\n   */\n  virtual ForwardClientCertType forwardClientCert() const PURE;\n\n  /**\n   * @return vector of ClientCertDetailsType the configuration of the current client cert's details\n   * to be forwarded.\n   */\n  virtual const std::vector<ClientCertDetailsType>& setCurrentClientCertDetails() const PURE;\n\n  /**\n   * @return local address.\n   * Gives richer information in case of internal requests.\n   */\n  virtual const Network::Address::Instance& localAddress() PURE;\n\n  /**\n   * @return custom user agent for internal requests for better debugging. Must be configured to\n   *         be enabled. User agent will only overwritten if it doesn't already exist. If enabled,\n   *         the same user agent will be written to the x-envoy-downstream-service-cluster header.\n   */\n  virtual const absl::optional<std::string>& userAgent() PURE;\n\n  /**\n   *  @return HttpTracerSharedPtr HttpTracer to use.\n   */\n  virtual Tracing::HttpTracerSharedPtr tracer() PURE;\n\n  /**\n   * @return tracing config.\n   */\n  virtual const TracingConnectionManagerConfig* tracingConfig() PURE;\n\n  /**\n   * @return ConnectionManagerListenerStats& the stats to write to.\n   */\n  virtual ConnectionManagerListenerStats& listenerStats() PURE;\n\n  /**\n   * @return bool supplies if the HttpConnectionManager should proxy the Expect: 100-Continue\n   */\n  virtual bool proxy100Continue() const PURE;\n\n  /**\n   * @return bool supplies if the HttpConnectionManager should handle invalid HTTP with a stream\n   * error or connection error.\n   */\n  virtual bool streamErrorOnInvalidHttpMessaging() const PURE;\n\n  /**\n   * @return supplies the http1 settings.\n   */\n  virtual const Http::Http1Settings& http1Settings() const PURE;\n\n  /**\n   * @return if the HttpConnectionManager should normalize url following RFC3986\n   */\n  virtual bool shouldNormalizePath() const PURE;\n\n  /**\n   * @return if the HttpConnectionManager should merge two or more adjacent slashes in the path into\n   * one.\n   */\n  virtual bool shouldMergeSlashes() const PURE;\n\n  /**\n   * @return if the HttpConnectionManager should remove the port from host/authority header\n   */\n  virtual bool shouldStripMatchingPort() const PURE;\n\n  /**\n   * @return the action HttpConnectionManager should take when receiving client request\n   * headers containing underscore characters.\n   */\n  virtual envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n  headersWithUnderscoresAction() const PURE;\n\n  /**\n   * @return LocalReply configuration which supplies mapping for local reply generated by Envoy.\n   */\n  virtual const LocalReply::LocalReply& localReply() const PURE;\n};\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/conn_manager_impl.cc",
    "content": "#include \"common/http/conn_manager_impl.h\"\n\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/ssl/connection.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stream_info/filter_state.h\"\n#include \"envoy/stream_info/stream_info.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/scope_tracker.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/conn_manager_utility.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/http/path_utility.h\"\n#include \"common/http/status.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stats/timespan_impl.h\"\n\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nConnectionManagerStats ConnectionManagerImpl::generateStats(const std::string& prefix,\n                                                            Stats::Scope& scope) {\n  return ConnectionManagerStats(\n      {ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER_PREFIX(scope, prefix), POOL_GAUGE_PREFIX(scope, prefix),\n                               POOL_HISTOGRAM_PREFIX(scope, prefix))},\n      prefix, scope);\n}\n\nConnectionManagerTracingStats ConnectionManagerImpl::generateTracingStats(const std::string& prefix,\n                                                                          Stats::Scope& scope) {\n  return {CONN_MAN_TRACING_STATS(POOL_COUNTER_PREFIX(scope, prefix + \"tracing.\"))};\n}\n\nConnectionManagerListenerStats\nConnectionManagerImpl::generateListenerStats(const std::string& prefix, Stats::Scope& scope) {\n  return {CONN_MAN_LISTENER_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n}\n\nConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config,\n                                             const Network::DrainDecision& drain_close,\n                                             Random::RandomGenerator& random_generator,\n                                             Http::Context& http_context, Runtime::Loader& runtime,\n                                             const LocalInfo::LocalInfo& local_info,\n                                             Upstream::ClusterManager& cluster_manager,\n                                             Server::OverloadManager& overload_manager,\n                                             TimeSource& time_source)\n    : config_(config), stats_(config_.stats()),\n      conn_length_(new Stats::HistogramCompletableTimespanImpl(\n          stats_.named_.downstream_cx_length_ms_, time_source)),\n      drain_close_(drain_close), user_agent_(http_context.userAgentContext()),\n      random_generator_(random_generator), http_context_(http_context), runtime_(runtime),\n      local_info_(local_info), cluster_manager_(cluster_manager),\n      listener_stats_(config_.listenerStats()),\n      overload_stop_accepting_requests_ref_(overload_manager.getThreadLocalOverloadState().getState(\n          Server::OverloadActionNames::get().StopAcceptingRequests)),\n      overload_disable_keepalive_ref_(overload_manager.getThreadLocalOverloadState().getState(\n          Server::OverloadActionNames::get().DisableHttpKeepAlive)),\n      time_source_(time_source) {}\n\nconst ResponseHeaderMap& ConnectionManagerImpl::continueHeader() {\n  static const auto headers = createHeaderMap<ResponseHeaderMapImpl>(\n      {{Http::Headers::get().Status, std::to_string(enumToInt(Code::Continue))}});\n  return *headers;\n}\n\nvoid ConnectionManagerImpl::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  read_callbacks_ = &callbacks;\n  stats_.named_.downstream_cx_total_.inc();\n  stats_.named_.downstream_cx_active_.inc();\n  if (read_callbacks_->connection().ssl()) {\n    stats_.named_.downstream_cx_ssl_total_.inc();\n    stats_.named_.downstream_cx_ssl_active_.inc();\n  }\n\n  read_callbacks_->connection().addConnectionCallbacks(*this);\n\n  if (config_.idleTimeout()) {\n    connection_idle_timer_ = read_callbacks_->connection().dispatcher().createTimer(\n        [this]() -> void { onIdleTimeout(); });\n    connection_idle_timer_->enableTimer(config_.idleTimeout().value());\n  }\n\n  if (config_.maxConnectionDuration()) {\n    connection_duration_timer_ = read_callbacks_->connection().dispatcher().createTimer(\n        [this]() -> void { onConnectionDurationTimeout(); });\n    connection_duration_timer_->enableTimer(config_.maxConnectionDuration().value());\n  }\n\n  read_callbacks_->connection().setDelayedCloseTimeout(config_.delayedCloseTimeout());\n\n  read_callbacks_->connection().setConnectionStats(\n      {stats_.named_.downstream_cx_rx_bytes_total_, stats_.named_.downstream_cx_rx_bytes_buffered_,\n       stats_.named_.downstream_cx_tx_bytes_total_, stats_.named_.downstream_cx_tx_bytes_buffered_,\n       nullptr, &stats_.named_.downstream_cx_delayed_close_timeout_});\n}\n\nConnectionManagerImpl::~ConnectionManagerImpl() {\n  stats_.named_.downstream_cx_destroy_.inc();\n\n  stats_.named_.downstream_cx_active_.dec();\n  if (read_callbacks_->connection().ssl()) {\n    stats_.named_.downstream_cx_ssl_active_.dec();\n  }\n\n  if (codec_) {\n    if (codec_->protocol() == Protocol::Http2) {\n      stats_.named_.downstream_cx_http2_active_.dec();\n    } else if (codec_->protocol() == Protocol::Http3) {\n      stats_.named_.downstream_cx_http3_active_.dec();\n    } else {\n      stats_.named_.downstream_cx_http1_active_.dec();\n    }\n  }\n\n  conn_length_->complete();\n  user_agent_.completeConnectionLength(*conn_length_);\n}\n\nvoid ConnectionManagerImpl::checkForDeferredClose() {\n  if (drain_state_ == DrainState::Closing && streams_.empty() && !codec_->wantsToWrite()) {\n    doConnectionClose(Network::ConnectionCloseType::FlushWriteAndDelay, absl::nullopt,\n                      StreamInfo::ResponseCodeDetails::get().DownstreamLocalDisconnect);\n  }\n}\n\nvoid ConnectionManagerImpl::doEndStream(ActiveStream& stream) {\n  // The order of what happens in this routine is important and a little complicated. We first see\n  // if the stream needs to be reset. If it needs to be, this will end up invoking reset callbacks\n  // and then moving the stream to the deferred destruction list. If the stream has not been reset,\n  // we move it to the deferred deletion list here. Then, we potentially close the connection. This\n  // must be done after deleting the stream since the stream refers to the connection and must be\n  // deleted first.\n  bool reset_stream = false;\n  // If the response encoder is still associated with the stream, reset the stream. The exception\n  // here is when Envoy \"ends\" the stream by calling recreateStream at which point recreateStream\n  // explicitly nulls out response_encoder to avoid the downstream being notified of the\n  // Envoy-internal stream instance being ended.\n  if (stream.response_encoder_ != nullptr &&\n      (!stream.filter_manager_.remoteComplete() || !stream.state_.codec_saw_local_complete_)) {\n    // Indicate local is complete at this point so that if we reset during a continuation, we don't\n    // raise further data or trailers.\n    ENVOY_STREAM_LOG(debug, \"doEndStream() resetting stream\", stream);\n    // TODO(snowp): This call might not be necessary, try to clean up + remove setter function.\n    stream.filter_manager_.setLocalComplete();\n    stream.state_.codec_saw_local_complete_ = true;\n    stream.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset);\n    reset_stream = true;\n  }\n\n  if (!reset_stream) {\n    doDeferredStreamDestroy(stream);\n  }\n\n  if (reset_stream && codec_->protocol() < Protocol::Http2) {\n    drain_state_ = DrainState::Closing;\n  }\n\n  checkForDeferredClose();\n}\n\nvoid ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) {\n  if (stream.max_stream_duration_timer_) {\n    stream.max_stream_duration_timer_->disableTimer();\n    stream.max_stream_duration_timer_ = nullptr;\n  }\n  if (stream.stream_idle_timer_ != nullptr) {\n    stream.stream_idle_timer_->disableTimer();\n    stream.stream_idle_timer_ = nullptr;\n  }\n  stream.filter_manager_.disarmRequestTimeout();\n\n  stream.completeRequest();\n  stream.filter_manager_.onStreamComplete();\n  stream.filter_manager_.log();\n\n  stream.filter_manager_.destroyFilters();\n\n  read_callbacks_->connection().dispatcher().deferredDelete(stream.removeFromList(streams_));\n\n  if (connection_idle_timer_ && streams_.empty()) {\n    connection_idle_timer_->enableTimer(config_.idleTimeout().value());\n  }\n}\n\nRequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encoder,\n                                                 bool is_internally_created) {\n  if (connection_idle_timer_) {\n    connection_idle_timer_->disableTimer();\n  }\n\n  ENVOY_CONN_LOG(debug, \"new stream\", read_callbacks_->connection());\n  ActiveStreamPtr new_stream(new ActiveStream(*this, response_encoder.getStream().bufferLimit()));\n  new_stream->state_.is_internally_created_ = is_internally_created;\n  new_stream->response_encoder_ = &response_encoder;\n  new_stream->response_encoder_->getStream().addCallbacks(*new_stream);\n  new_stream->response_encoder_->getStream().setFlushTimeout(new_stream->idle_timeout_ms_);\n  // If the network connection is backed up, the stream should be made aware of it on creation.\n  // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacksHelper.\n  ASSERT(read_callbacks_->connection().aboveHighWatermark() == false ||\n         new_stream->filter_manager_.aboveHighWatermark());\n  LinkedList::moveIntoList(std::move(new_stream), streams_);\n  return **streams_.begin();\n}\n\nvoid ConnectionManagerImpl::handleCodecError(absl::string_view error) {\n  ENVOY_CONN_LOG(debug, \"dispatch error: {}\", read_callbacks_->connection(), error);\n  read_callbacks_->connection().streamInfo().setResponseFlag(\n      StreamInfo::ResponseFlag::DownstreamProtocolError);\n\n  // HTTP/1.1 codec has already sent a 400 response if possible. HTTP/2 codec has already sent\n  // GOAWAY.\n  doConnectionClose(Network::ConnectionCloseType::FlushWriteAndDelay,\n                    StreamInfo::ResponseFlag::DownstreamProtocolError,\n                    absl::StrCat(\"codec error: \", error));\n}\n\nvoid ConnectionManagerImpl::createCodec(Buffer::Instance& data) {\n  ASSERT(!codec_);\n  codec_ = config_.createCodec(read_callbacks_->connection(), data, *this);\n\n  switch (codec_->protocol()) {\n  case Protocol::Http3:\n    stats_.named_.downstream_cx_http3_total_.inc();\n    stats_.named_.downstream_cx_http3_active_.inc();\n    break;\n  case Protocol::Http2:\n    stats_.named_.downstream_cx_http2_total_.inc();\n    stats_.named_.downstream_cx_http2_active_.inc();\n    break;\n  case Protocol::Http11:\n  case Protocol::Http10:\n    stats_.named_.downstream_cx_http1_total_.inc();\n    stats_.named_.downstream_cx_http1_active_.inc();\n    break;\n  }\n}\n\nNetwork::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool) {\n  if (!codec_) {\n    // Http3 codec should have been instantiated by now.\n    createCodec(data);\n  }\n\n  bool redispatch;\n  do {\n    redispatch = false;\n\n    const Status status = codec_->dispatch(data);\n\n    if (isBufferFloodError(status) || isInboundFramesWithEmptyPayloadError(status)) {\n      handleCodecError(status.message());\n      return Network::FilterStatus::StopIteration;\n    } else if (isCodecProtocolError(status)) {\n      stats_.named_.downstream_cx_protocol_error_.inc();\n      handleCodecError(status.message());\n      return Network::FilterStatus::StopIteration;\n    }\n    ASSERT(status.ok());\n\n    // Processing incoming data may release outbound data so check for closure here as well.\n    checkForDeferredClose();\n\n    // The HTTP/1 codec will pause dispatch after a single message is complete. We want to\n    // either redispatch if there are no streams and we have more data. If we have a single\n    // complete non-WebSocket stream but have not responded yet we will pause socket reads\n    // to apply back pressure.\n    if (codec_->protocol() < Protocol::Http2) {\n      if (read_callbacks_->connection().state() == Network::Connection::State::Open &&\n          data.length() > 0 && streams_.empty()) {\n        redispatch = true;\n      }\n    }\n  } while (redispatch);\n\n  if (!read_callbacks_->connection().streamInfo().protocol()) {\n    read_callbacks_->connection().streamInfo().protocol(codec_->protocol());\n  }\n\n  return Network::FilterStatus::StopIteration;\n}\n\nNetwork::FilterStatus ConnectionManagerImpl::onNewConnection() {\n  if (!read_callbacks_->connection().streamInfo().protocol()) {\n    // For Non-QUIC traffic, continue passing data to filters.\n    return Network::FilterStatus::Continue;\n  }\n  // Only QUIC connection's stream_info_ specifies protocol.\n  Buffer::OwnedImpl dummy;\n  createCodec(dummy);\n  ASSERT(codec_->protocol() == Protocol::Http3);\n  // Stop iterating through each filters for QUIC. Currently a QUIC connection\n  // only supports one filter, HCM, and bypasses the onData() interface. Because\n  // QUICHE already handles de-multiplexing.\n  return Network::FilterStatus::StopIteration;\n}\n\nvoid ConnectionManagerImpl::resetAllStreams(absl::optional<StreamInfo::ResponseFlag> response_flag,\n                                            absl::string_view details) {\n  while (!streams_.empty()) {\n    // Mimic a downstream reset in this case. We must also remove callbacks here. Though we are\n    // about to close the connection and will disable further reads, it is possible that flushing\n    // data out can cause stream callbacks to fire (e.g., low watermark callbacks).\n    //\n    // TODO(mattklein123): I tried to actually reset through the codec here, but ran into issues\n    // with nghttp2 state and being unhappy about sending reset frames after the connection had\n    // been terminated via GOAWAY. It might be possible to do something better here inside the h2\n    // codec but there are no easy answers and this seems simpler.\n    auto& stream = *streams_.front();\n    stream.response_encoder_->getStream().removeCallbacks(stream);\n    if (!stream.response_encoder_->getStream().responseDetails().empty()) {\n      stream.filter_manager_.streamInfo().setResponseCodeDetails(\n          stream.response_encoder_->getStream().responseDetails());\n    } else if (!details.empty()) {\n      stream.filter_manager_.streamInfo().setResponseCodeDetails(details);\n    }\n    if (response_flag.has_value()) {\n      stream.filter_manager_.streamInfo().setResponseFlag(response_flag.value());\n    }\n    stream.onResetStream(StreamResetReason::ConnectionTermination, absl::string_view());\n  }\n}\n\nvoid ConnectionManagerImpl::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::LocalClose) {\n    stats_.named_.downstream_cx_destroy_local_.inc();\n  }\n\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    if (event == Network::ConnectionEvent::RemoteClose) {\n      remote_close_ = true;\n      stats_.named_.downstream_cx_destroy_remote_.inc();\n    }\n    absl::string_view details =\n        event == Network::ConnectionEvent::RemoteClose\n            ? StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect\n            : StreamInfo::ResponseCodeDetails::get().DownstreamLocalDisconnect;\n    // TODO(mattklein123): It is technically possible that something outside of the filter causes\n    // a local connection close, so we still guard against that here. A better solution would be to\n    // have some type of \"pre-close\" callback that we could hook for cleanup that would get called\n    // regardless of where local close is invoked from.\n    // NOTE: that this will cause doConnectionClose() to get called twice in the common local close\n    // cases, but the method protects against that.\n    // NOTE: In the case where a local close comes from outside the filter, this will cause any\n    // stream closures to increment remote close stats. We should do better here in the future,\n    // via the pre-close callback mentioned above.\n    doConnectionClose(absl::nullopt, absl::nullopt, details);\n  }\n}\n\nvoid ConnectionManagerImpl::doConnectionClose(\n    absl::optional<Network::ConnectionCloseType> close_type,\n    absl::optional<StreamInfo::ResponseFlag> response_flag, absl::string_view details) {\n  if (connection_idle_timer_) {\n    connection_idle_timer_->disableTimer();\n    connection_idle_timer_.reset();\n  }\n\n  if (connection_duration_timer_) {\n    connection_duration_timer_->disableTimer();\n    connection_duration_timer_.reset();\n  }\n\n  if (drain_timer_) {\n    drain_timer_->disableTimer();\n    drain_timer_.reset();\n  }\n\n  if (!streams_.empty()) {\n    const Network::ConnectionEvent event = close_type.has_value()\n                                               ? Network::ConnectionEvent::LocalClose\n                                               : Network::ConnectionEvent::RemoteClose;\n    if (event == Network::ConnectionEvent::LocalClose) {\n      stats_.named_.downstream_cx_destroy_local_active_rq_.inc();\n    }\n    if (event == Network::ConnectionEvent::RemoteClose) {\n      stats_.named_.downstream_cx_destroy_remote_active_rq_.inc();\n    }\n\n    stats_.named_.downstream_cx_destroy_active_rq_.inc();\n    user_agent_.onConnectionDestroy(event, true);\n    // Note that resetAllStreams() does not actually write anything to the wire. It just resets\n    // all upstream streams and their filter stacks. Thus, there are no issues around recursive\n    // entry.\n    resetAllStreams(response_flag, details);\n  }\n\n  if (close_type.has_value()) {\n    read_callbacks_->connection().close(close_type.value());\n  }\n}\n\nvoid ConnectionManagerImpl::onGoAway(GoAwayErrorCode) {\n  // Currently we do nothing with remote go away frames. In the future we can decide to no longer\n  // push resources if applicable.\n}\n\nvoid ConnectionManagerImpl::onIdleTimeout() {\n  ENVOY_CONN_LOG(debug, \"idle timeout\", read_callbacks_->connection());\n  stats_.named_.downstream_cx_idle_timeout_.inc();\n  if (!codec_) {\n    // No need to delay close after flushing since an idle timeout has already fired. Attempt to\n    // write out buffered data one last time and issue a local close if successful.\n    doConnectionClose(Network::ConnectionCloseType::FlushWrite, absl::nullopt, \"\");\n  } else if (drain_state_ == DrainState::NotDraining) {\n    startDrainSequence();\n  }\n}\n\n// TODO(#13142): Add DurationTimeout response flag for HCM.\nvoid ConnectionManagerImpl::onConnectionDurationTimeout() {\n  ENVOY_CONN_LOG(debug, \"max connection duration reached\", read_callbacks_->connection());\n  stats_.named_.downstream_cx_max_duration_reached_.inc();\n  if (!codec_) {\n    // Attempt to write out buffered data one last time and issue a local close if successful.\n    doConnectionClose(Network::ConnectionCloseType::FlushWrite, absl::nullopt,\n                      StreamInfo::ResponseCodeDetails::get().DurationTimeout);\n  } else if (drain_state_ == DrainState::NotDraining) {\n    startDrainSequence();\n  }\n}\n\nvoid ConnectionManagerImpl::onDrainTimeout() {\n  ASSERT(drain_state_ != DrainState::NotDraining);\n  codec_->goAway();\n  drain_state_ = DrainState::Closing;\n  checkForDeferredClose();\n}\n\nvoid ConnectionManagerImpl::chargeTracingStats(const Tracing::Reason& tracing_reason,\n                                               ConnectionManagerTracingStats& tracing_stats) {\n  switch (tracing_reason) {\n  case Tracing::Reason::ClientForced:\n    tracing_stats.client_enabled_.inc();\n    break;\n  case Tracing::Reason::NotTraceableRequestId:\n    tracing_stats.not_traceable_.inc();\n    break;\n  case Tracing::Reason::Sampling:\n    tracing_stats.random_sampling_.inc();\n    break;\n  case Tracing::Reason::ServiceForced:\n    tracing_stats.service_forced_.inc();\n    break;\n  default:\n    throw std::invalid_argument(\n        absl::StrCat(\"invalid tracing reason, value: \", static_cast<int32_t>(tracing_reason)));\n  }\n}\n\n// TODO(chaoqin-li1123): Make on demand vhds and on demand srds works at the same time.\nvoid ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestRouteConfigUpdate(\n    Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) {\n  absl::optional<Router::ConfigConstSharedPtr> route_config = parent_.routeConfig();\n  Event::Dispatcher& thread_local_dispatcher =\n      parent_.connection_manager_.read_callbacks_->connection().dispatcher();\n  if (route_config.has_value() && route_config.value()->usesVhds()) {\n    ASSERT(!parent_.request_headers_->Host()->value().empty());\n    const auto& host_header = absl::AsciiStrToLower(parent_.request_headers_->getHostValue());\n    requestVhdsUpdate(host_header, thread_local_dispatcher, std::move(route_config_updated_cb));\n    return;\n  } else if (parent_.snapped_scoped_routes_config_ != nullptr) {\n    Router::ScopeKeyPtr scope_key =\n        parent_.snapped_scoped_routes_config_->computeScopeKey(*parent_.request_headers_);\n    // If scope_key is not null, the scope exists but RouteConfiguration is not initialized.\n    if (scope_key != nullptr) {\n      requestSrdsUpdate(std::move(scope_key), thread_local_dispatcher,\n                        std::move(route_config_updated_cb));\n      return;\n    }\n  }\n  // Continue the filter chain if no on demand update is requested.\n  (*route_config_updated_cb)(false);\n}\n\nvoid ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestVhdsUpdate(\n    const std::string& host_header, Event::Dispatcher& thread_local_dispatcher,\n    Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) {\n  route_config_provider_->requestVirtualHostsUpdate(host_header, thread_local_dispatcher,\n                                                    std::move(route_config_updated_cb));\n}\n\nvoid ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestSrdsUpdate(\n    Router::ScopeKeyPtr scope_key, Event::Dispatcher& thread_local_dispatcher,\n    Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) {\n  // Since inline scope_route_config_provider is not fully implemented and never used,\n  // dynamic cast in constructor always succeed and the pointer should not be null here.\n  ASSERT(scoped_route_config_provider_ != nullptr);\n  Http::RouteConfigUpdatedCallback scoped_route_config_updated_cb =\n      Http::RouteConfigUpdatedCallback(\n          [this, weak_route_config_updated_cb = std::weak_ptr<Http::RouteConfigUpdatedCallback>(\n                     route_config_updated_cb)](bool scope_exist) {\n            // If the callback can be locked, this ActiveStream is still alive.\n            if (auto cb = weak_route_config_updated_cb.lock()) {\n              // Refresh the route before continue the filter chain.\n              if (scope_exist) {\n                parent_.refreshCachedRoute();\n              }\n              (*cb)(scope_exist && parent_.hasCachedRoute());\n            }\n          });\n  scoped_route_config_provider_->onDemandRdsUpdate(std::move(scope_key), thread_local_dispatcher,\n                                                   std::move(scoped_route_config_updated_cb));\n}\n\nConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager,\n                                                  uint32_t buffer_limit)\n    : connection_manager_(connection_manager),\n      stream_id_(connection_manager.random_generator_.random()),\n      filter_manager_(*this, connection_manager_.read_callbacks_->connection().dispatcher(),\n                      connection_manager_.read_callbacks_->connection(), stream_id_,\n                      connection_manager_.config_.proxy100Continue(), buffer_limit,\n                      connection_manager_.config_.filterFactory(),\n                      connection_manager_.config_.localReply(),\n                      connection_manager_.codec_->protocol(), connection_manager_.timeSource(),\n                      connection_manager_.read_callbacks_->connection().streamInfo().filterState(),\n                      StreamInfo::FilterState::LifeSpan::Connection),\n      request_response_timespan_(new Stats::HistogramCompletableTimespanImpl(\n          connection_manager_.stats_.named_.downstream_rq_time_,\n          connection_manager_.timeSource())) {\n  ASSERT(!connection_manager.config_.isRoutable() ||\n             ((connection_manager.config_.routeConfigProvider() == nullptr &&\n               connection_manager.config_.scopedRouteConfigProvider() != nullptr) ||\n              (connection_manager.config_.routeConfigProvider() != nullptr &&\n               connection_manager.config_.scopedRouteConfigProvider() == nullptr)),\n         \"Either routeConfigProvider or scopedRouteConfigProvider should be set in \"\n         \"ConnectionManagerImpl.\");\n  for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) {\n    filter_manager_.addAccessLogHandler(access_log);\n  }\n\n  filter_manager_.streamInfo().setRequestIDExtension(\n      connection_manager.config_.requestIDExtension());\n\n  if (connection_manager_.config_.isRoutable() &&\n      connection_manager.config_.routeConfigProvider() != nullptr) {\n    route_config_update_requester_ =\n        std::make_unique<ConnectionManagerImpl::RdsRouteConfigUpdateRequester>(\n            connection_manager.config_.routeConfigProvider(), *this);\n  } else if (connection_manager_.config_.isRoutable() &&\n             connection_manager.config_.scopedRouteConfigProvider() != nullptr) {\n    route_config_update_requester_ =\n        std::make_unique<ConnectionManagerImpl::RdsRouteConfigUpdateRequester>(\n            connection_manager.config_.scopedRouteConfigProvider(), *this);\n  }\n  ScopeTrackerScopeState scope(this,\n                               connection_manager_.read_callbacks_->connection().dispatcher());\n\n  connection_manager_.stats_.named_.downstream_rq_total_.inc();\n  connection_manager_.stats_.named_.downstream_rq_active_.inc();\n  if (connection_manager_.codec_->protocol() == Protocol::Http2) {\n    connection_manager_.stats_.named_.downstream_rq_http2_total_.inc();\n  } else if (connection_manager_.codec_->protocol() == Protocol::Http3) {\n    connection_manager_.stats_.named_.downstream_rq_http3_total_.inc();\n  } else {\n    connection_manager_.stats_.named_.downstream_rq_http1_total_.inc();\n  }\n  filter_manager_.streamInfo().setDownstreamLocalAddress(\n      connection_manager_.read_callbacks_->connection().localAddress());\n  filter_manager_.streamInfo().setDownstreamDirectRemoteAddress(\n      connection_manager_.read_callbacks_->connection().directRemoteAddress());\n  // Initially, the downstream remote address is the source address of the\n  // downstream connection. That can change later in the request's lifecycle,\n  // based on XFF processing, but setting the downstream remote address here\n  // prevents surprises for logging code in edge cases.\n  filter_manager_.streamInfo().setDownstreamRemoteAddress(\n      connection_manager_.read_callbacks_->connection().remoteAddress());\n\n  filter_manager_.streamInfo().setDownstreamSslConnection(\n      connection_manager_.read_callbacks_->connection().ssl());\n\n  filter_manager_.streamInfo().setConnectionID(\n      connection_manager_.read_callbacks_->connection().id());\n\n  if (connection_manager_.config_.streamIdleTimeout().count()) {\n    idle_timeout_ms_ = connection_manager_.config_.streamIdleTimeout();\n    stream_idle_timer_ = connection_manager_.read_callbacks_->connection().dispatcher().createTimer(\n        [this]() -> void { onIdleTimeout(); });\n    resetIdleTimer();\n  }\n\n  if (connection_manager_.config_.requestTimeout().count()) {\n    std::chrono::milliseconds request_timeout_ms_ = connection_manager_.config_.requestTimeout();\n    request_timer_ = connection_manager.read_callbacks_->connection().dispatcher().createTimer(\n        [this]() -> void { onRequestTimeout(); });\n    request_timer_->enableTimer(request_timeout_ms_, this);\n  }\n\n  const auto max_stream_duration = connection_manager_.config_.maxStreamDuration();\n  if (max_stream_duration.has_value() && max_stream_duration.value().count()) {\n    max_stream_duration_timer_ =\n        connection_manager.read_callbacks_->connection().dispatcher().createTimer(\n            [this]() -> void { onStreamMaxDurationReached(); });\n    max_stream_duration_timer_->enableTimer(connection_manager_.config_.maxStreamDuration().value(),\n                                            this);\n  }\n\n  filter_manager_.streamInfo().setRequestedServerName(\n      connection_manager_.read_callbacks_->connection().requestedServerName());\n}\n\nvoid ConnectionManagerImpl::ActiveStream::completeRequest() {\n  filter_manager_.streamInfo().onRequestComplete();\n  Upstream::HostDescriptionConstSharedPtr upstream_host =\n      connection_manager_.read_callbacks_->upstreamHost();\n\n  if (upstream_host != nullptr) {\n    Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats =\n        upstream_host->cluster().requestResponseSizeStats();\n    if (req_resp_stats.has_value()) {\n      req_resp_stats->get().upstream_rq_body_size_.recordValue(\n          filter_manager_.streamInfo().bytesReceived());\n      req_resp_stats->get().upstream_rs_body_size_.recordValue(\n          filter_manager_.streamInfo().bytesSent());\n    }\n  }\n\n  if (connection_manager_.remote_close_) {\n    filter_manager_.streamInfo().setResponseCodeDetails(\n        StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect);\n    filter_manager_.streamInfo().setResponseFlag(\n        StreamInfo::ResponseFlag::DownstreamConnectionTermination);\n  }\n  // TODO(danzh) bring HTTP/3 to parity here.\n  if (connection_manager_.codec_->protocol() != Protocol::Http3) {\n    ASSERT(filter_manager_.streamInfo().responseCodeDetails().has_value());\n  }\n  connection_manager_.stats_.named_.downstream_rq_active_.dec();\n  if (filter_manager_.streamInfo().healthCheck()) {\n    connection_manager_.config_.tracingStats().health_check_.inc();\n  }\n\n  if (active_span_) {\n    Tracing::HttpTracerUtility::finalizeDownstreamSpan(\n        *active_span_, request_headers_.get(), response_headers_.get(), response_trailers_.get(),\n        filter_manager_.streamInfo(), *this);\n  }\n  if (state_.successful_upgrade_) {\n    connection_manager_.stats_.named_.downstream_cx_upgrades_active_.dec();\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::resetIdleTimer() {\n  if (stream_idle_timer_ != nullptr) {\n    // TODO(htuch): If this shows up in performance profiles, optimize by only\n    // updating a timestamp here and doing periodic checks for idle timeouts\n    // instead, or reducing the accuracy of timers.\n    stream_idle_timer_->enableTimer(idle_timeout_ms_);\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onIdleTimeout() {\n  connection_manager_.stats_.named_.downstream_rq_idle_timeout_.inc();\n  // If headers have not been sent to the user, send a 408.\n  if (responseHeaders().has_value() &&\n      !Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.allow_response_for_timeout\")) {\n    // TODO(htuch): We could send trailers here with an x-envoy timeout header\n    // or gRPC status code, and/or set H2 RST_STREAM error.\n    filter_manager_.streamInfo().setResponseCodeDetails(\n        StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout);\n    connection_manager_.doEndStream(*this);\n  } else {\n    // TODO(mattklein) this may result in multiple flags. This Ok?\n    filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout);\n    sendLocalReply(request_headers_ != nullptr &&\n                       Grpc::Common::isGrpcRequestHeaders(*request_headers_),\n                   Http::Code::RequestTimeout, \"stream timeout\", nullptr, absl::nullopt,\n                   StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout);\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onRequestTimeout() {\n  connection_manager_.stats_.named_.downstream_rq_timeout_.inc();\n  sendLocalReply(request_headers_ != nullptr &&\n                     Grpc::Common::isGrpcRequestHeaders(*request_headers_),\n                 Http::Code::RequestTimeout, \"request timeout\", nullptr, absl::nullopt,\n                 StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() {\n  ENVOY_STREAM_LOG(debug, \"Stream max duration time reached\", *this);\n  connection_manager_.stats_.named_.downstream_rq_max_duration_reached_.inc();\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.allow_response_for_timeout\")) {\n    sendLocalReply(request_headers_ != nullptr &&\n                       Grpc::Common::isGrpcRequestHeaders(*request_headers_),\n                   Http::Code::RequestTimeout, \"downstream duration timeout\", nullptr,\n                   Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded,\n                   StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout);\n  } else {\n    filter_manager_.streamInfo().setResponseCodeDetails(\n        StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout);\n    connection_manager_.doEndStream(*this);\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::chargeStats(const ResponseHeaderMap& headers) {\n  uint64_t response_code = Utility::getResponseStatus(headers);\n  filter_manager_.streamInfo().response_code_ = response_code;\n\n  if (filter_manager_.streamInfo().health_check_request_) {\n    return;\n  }\n\n  Upstream::HostDescriptionConstSharedPtr upstream_host =\n      connection_manager_.read_callbacks_->upstreamHost();\n\n  if (upstream_host != nullptr) {\n    Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats =\n        upstream_host->cluster().requestResponseSizeStats();\n    if (req_resp_stats.has_value()) {\n      req_resp_stats->get().upstream_rs_headers_size_.recordValue(headers.byteSize());\n    }\n  }\n\n  connection_manager_.stats_.named_.downstream_rq_completed_.inc();\n  connection_manager_.listener_stats_.downstream_rq_completed_.inc();\n  if (CodeUtility::is1xx(response_code)) {\n    connection_manager_.stats_.named_.downstream_rq_1xx_.inc();\n    connection_manager_.listener_stats_.downstream_rq_1xx_.inc();\n  } else if (CodeUtility::is2xx(response_code)) {\n    connection_manager_.stats_.named_.downstream_rq_2xx_.inc();\n    connection_manager_.listener_stats_.downstream_rq_2xx_.inc();\n  } else if (CodeUtility::is3xx(response_code)) {\n    connection_manager_.stats_.named_.downstream_rq_3xx_.inc();\n    connection_manager_.listener_stats_.downstream_rq_3xx_.inc();\n  } else if (CodeUtility::is4xx(response_code)) {\n    connection_manager_.stats_.named_.downstream_rq_4xx_.inc();\n    connection_manager_.listener_stats_.downstream_rq_4xx_.inc();\n  } else if (CodeUtility::is5xx(response_code)) {\n    connection_manager_.stats_.named_.downstream_rq_5xx_.inc();\n    connection_manager_.listener_stats_.downstream_rq_5xx_.inc();\n  }\n}\n\nconst Network::Connection* ConnectionManagerImpl::ActiveStream::connection() {\n  return &connection_manager_.read_callbacks_->connection();\n}\n\nuint32_t ConnectionManagerImpl::ActiveStream::localPort() {\n  auto ip = connection()->localAddress()->ip();\n  if (ip == nullptr) {\n    return 0;\n  }\n  return ip->port();\n}\n\n// Ordering in this function is complicated, but important.\n//\n// We want to do minimal work before selecting route and creating a filter\n// chain to maximize the number of requests which get custom filter behavior,\n// e.g. registering access logging.\n//\n// This must be balanced by doing sanity checking for invalid requests (one\n// can't route select properly without full headers), checking state required to\n// serve error responses (connection close, head requests, etc), and\n// modifications which may themselves affect route selection.\nvoid ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& headers,\n                                                        bool end_stream) {\n  ScopeTrackerScopeState scope(this,\n                               connection_manager_.read_callbacks_->connection().dispatcher());\n  request_headers_ = std::move(headers);\n  filter_manager_.requestHeadersInitialized();\n\n  Upstream::HostDescriptionConstSharedPtr upstream_host =\n      connection_manager_.read_callbacks_->upstreamHost();\n\n  if (upstream_host != nullptr) {\n    Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats =\n        upstream_host->cluster().requestResponseSizeStats();\n    if (req_resp_stats.has_value()) {\n      req_resp_stats->get().upstream_rq_headers_size_.recordValue(request_headers_->byteSize());\n    }\n  }\n\n  // Both saw_connection_close_ and is_head_request_ affect local replies: set\n  // them as early as possible.\n  const Protocol protocol = connection_manager_.codec_->protocol();\n  const bool fixed_connection_close =\n      Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.fixed_connection_close\");\n  if (fixed_connection_close) {\n    state_.saw_connection_close_ =\n        HeaderUtility::shouldCloseConnection(protocol, *request_headers_);\n  }\n  if (HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path() &&\n      !Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.stop_faking_paths\")) {\n    request_headers_->setPath(\"/\");\n  }\n\n  // We need to snap snapped_route_config_ here as it's used in mutateRequestHeaders later.\n  if (connection_manager_.config_.isRoutable()) {\n    if (connection_manager_.config_.routeConfigProvider() != nullptr) {\n      snapped_route_config_ = connection_manager_.config_.routeConfigProvider()->config();\n    } else if (connection_manager_.config_.scopedRouteConfigProvider() != nullptr) {\n      snapped_scoped_routes_config_ =\n          connection_manager_.config_.scopedRouteConfigProvider()->config<Router::ScopedConfig>();\n      snapScopedRouteConfig();\n    }\n  } else {\n    snapped_route_config_ = connection_manager_.config_.routeConfigProvider()->config();\n  }\n\n  ENVOY_STREAM_LOG(debug, \"request headers complete (end_stream={}):\\n{}\", *this, end_stream,\n                   *request_headers_);\n\n  // We end the decode here only if the request is header only. If we convert the request to a\n  // header only, the stream will be marked as done once a subsequent decodeData/decodeTrailers is\n  // called with end_stream=true.\n  filter_manager_.maybeEndDecode(end_stream);\n\n  // Drop new requests when overloaded as soon as we have decoded the headers.\n  if (connection_manager_.random_generator_.bernoulli(\n          connection_manager_.overload_stop_accepting_requests_ref_.value())) {\n    // In this one special case, do not create the filter chain. If there is a risk of memory\n    // overload it is more important to avoid unnecessary allocation than to create the filters.\n    filter_manager_.skipFilterChainCreation();\n    connection_manager_.stats_.named_.downstream_rq_overload_close_.inc();\n    sendLocalReply(Grpc::Common::isGrpcRequestHeaders(*request_headers_),\n                   Http::Code::ServiceUnavailable, \"envoy overloaded\", nullptr, absl::nullopt,\n                   StreamInfo::ResponseCodeDetails::get().Overload);\n    return;\n  }\n\n  if (!connection_manager_.config_.proxy100Continue() && request_headers_->Expect() &&\n      request_headers_->Expect()->value() == Headers::get().ExpectValues._100Continue.c_str()) {\n    // Note in the case Envoy is handling 100-Continue complexity, it skips the filter chain\n    // and sends the 100-Continue directly to the encoder.\n    chargeStats(continueHeader());\n    response_encoder_->encode100ContinueHeaders(continueHeader());\n    // Remove the Expect header so it won't be handled again upstream.\n    request_headers_->removeExpect();\n  }\n\n  connection_manager_.user_agent_.initializeFromHeaders(*request_headers_,\n                                                        connection_manager_.stats_.prefixStatName(),\n                                                        connection_manager_.stats_.scope_);\n\n  // Make sure we are getting a codec version we support.\n  if (protocol == Protocol::Http10) {\n    // Assume this is HTTP/1.0. This is fine for HTTP/0.9 but this code will also affect any\n    // requests with non-standard version numbers (0.9, 1.3), basically anything which is not\n    // HTTP/1.1.\n    //\n    // The protocol may have shifted in the HTTP/1.0 case so reset it.\n    filter_manager_.streamInfo().protocol(protocol);\n    if (!connection_manager_.config_.http1Settings().accept_http_10_) {\n      // Send \"Upgrade Required\" if HTTP/1.0 support is not explicitly configured on.\n      sendLocalReply(false, Code::UpgradeRequired, \"\", nullptr, absl::nullopt,\n                     StreamInfo::ResponseCodeDetails::get().LowVersion);\n      return;\n    } else if (!fixed_connection_close) {\n      // HTTP/1.0 defaults to single-use connections. Make sure the connection\n      // will be closed unless Keep-Alive is present.\n      state_.saw_connection_close_ = true;\n      if (absl::EqualsIgnoreCase(request_headers_->getConnectionValue(),\n                                 Http::Headers::get().ConnectionValues.KeepAlive)) {\n        state_.saw_connection_close_ = false;\n      }\n    }\n    if (!request_headers_->Host() &&\n        !connection_manager_.config_.http1Settings().default_host_for_http_10_.empty()) {\n      // Add a default host if configured to do so.\n      request_headers_->setHost(\n          connection_manager_.config_.http1Settings().default_host_for_http_10_);\n    }\n  }\n\n  if (!request_headers_->Host()) {\n    // Require host header. For HTTP/1.1 Host has already been translated to :authority.\n    sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, \"\",\n                   nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().MissingHost);\n    return;\n  }\n\n  // Verify header sanity checks which should have been performed by the codec.\n  ASSERT(HeaderUtility::requestHeadersValid(*request_headers_).has_value() == false);\n\n  // Check for the existence of the :path header for non-CONNECT requests, or present-but-empty\n  // :path header for CONNECT requests. We expect the codec to have broken the path into pieces if\n  // applicable. NOTE: Currently the HTTP/1.1 codec only does this when the allow_absolute_url flag\n  // is enabled on the HCM.\n  if ((!HeaderUtility::isConnect(*request_headers_) || request_headers_->Path()) &&\n      request_headers_->getPathValue().empty()) {\n    sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, \"\", nullptr,\n                   absl::nullopt, StreamInfo::ResponseCodeDetails::get().MissingPath);\n    return;\n  }\n\n  // Currently we only support relative paths at the application layer.\n  if (!request_headers_->getPathValue().empty() && request_headers_->getPathValue()[0] != '/') {\n    connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc();\n    sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, \"\", nullptr,\n                   absl::nullopt, StreamInfo::ResponseCodeDetails::get().AbsolutePath);\n    return;\n  }\n\n  // Path sanitization should happen before any path access other than the above sanity check.\n  if (!ConnectionManagerUtility::maybeNormalizePath(*request_headers_,\n                                                    connection_manager_.config_)) {\n    sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, \"\",\n                   nullptr, absl::nullopt,\n                   StreamInfo::ResponseCodeDetails::get().PathNormalizationFailed);\n    return;\n  }\n\n  ConnectionManagerUtility::maybeNormalizeHost(*request_headers_, connection_manager_.config_,\n                                               localPort());\n\n  if (!fixed_connection_close && protocol == Protocol::Http11 &&\n      absl::EqualsIgnoreCase(request_headers_->getConnectionValue(),\n                             Http::Headers::get().ConnectionValues.Close)) {\n    state_.saw_connection_close_ = true;\n  }\n  // Note: Proxy-Connection is not a standard header, but is supported here\n  // since it is supported by http-parser the underlying parser for http\n  // requests.\n  if (!fixed_connection_close && protocol < Protocol::Http2 && !state_.saw_connection_close_ &&\n      absl::EqualsIgnoreCase(request_headers_->getProxyConnectionValue(),\n                             Http::Headers::get().ConnectionValues.Close)) {\n    state_.saw_connection_close_ = true;\n  }\n\n  if (!state_.is_internally_created_) { // Only sanitize headers on first pass.\n    // Modify the downstream remote address depending on configuration and headers.\n    filter_manager_.streamInfo().setDownstreamRemoteAddress(\n        ConnectionManagerUtility::mutateRequestHeaders(\n            *request_headers_, connection_manager_.read_callbacks_->connection(),\n            connection_manager_.config_, *snapped_route_config_, connection_manager_.local_info_));\n  }\n  ASSERT(filter_manager_.streamInfo().downstreamRemoteAddress() != nullptr);\n\n  ASSERT(!cached_route_);\n  refreshCachedRoute();\n\n  if (!state_.is_internally_created_) { // Only mutate tracing headers on first pass.\n    ConnectionManagerUtility::mutateTracingRequestHeader(\n        *request_headers_, connection_manager_.runtime_, connection_manager_.config_,\n        cached_route_.value().get());\n  }\n\n  filter_manager_.streamInfo().setRequestHeaders(*request_headers_);\n\n  const bool upgrade_rejected = filter_manager_.createFilterChain() == false;\n\n  // TODO if there are no filters when starting a filter iteration, the connection manager\n  // should return 404. The current returns no response if there is no router filter.\n  if (hasCachedRoute()) {\n    // Do not allow upgrades if the route does not support it.\n    if (upgrade_rejected) {\n      // While downstream servers should not send upgrade payload without the upgrade being\n      // accepted, err on the side of caution and refuse to process any further requests on this\n      // connection, to avoid a class of HTTP/1.1 smuggling bugs where Upgrade or CONNECT payload\n      // contains a smuggled HTTP request.\n      state_.saw_connection_close_ = true;\n      connection_manager_.stats_.named_.downstream_rq_ws_on_non_ws_route_.inc();\n      sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::Forbidden, \"\",\n                     nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpgradeFailed);\n      return;\n    }\n    // Allow non websocket requests to go through websocket enabled routes.\n  }\n\n  if (hasCachedRoute()) {\n    const Router::RouteEntry* route_entry = cached_route_.value()->routeEntry();\n    if (route_entry != nullptr && route_entry->idleTimeout()) {\n      // TODO(mattklein123): Technically if the cached route changes, we should also see if the\n      // route idle timeout has changed and update the value.\n      idle_timeout_ms_ = route_entry->idleTimeout().value();\n      response_encoder_->getStream().setFlushTimeout(idle_timeout_ms_);\n      if (idle_timeout_ms_.count()) {\n        // If we have a route-level idle timeout but no global stream idle timeout, create a timer.\n        if (stream_idle_timer_ == nullptr) {\n          stream_idle_timer_ =\n              connection_manager_.read_callbacks_->connection().dispatcher().createTimer(\n                  [this]() -> void { onIdleTimeout(); });\n        }\n      } else if (stream_idle_timer_ != nullptr) {\n        // If we had a global stream idle timeout but the route-level idle timeout is set to zero\n        // (to override), we disable the idle timer.\n        stream_idle_timer_->disableTimer();\n        stream_idle_timer_ = nullptr;\n      }\n    }\n  }\n\n  // Check if tracing is enabled at all.\n  if (connection_manager_.config_.tracingConfig()) {\n    traceRequest();\n  }\n\n  filter_manager_.decodeHeaders(*request_headers_, end_stream);\n\n  // Reset it here for both global and overridden cases.\n  resetIdleTimer();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::traceRequest() {\n  Tracing::Decision tracing_decision =\n      Tracing::HttpTracerUtility::isTracing(filter_manager_.streamInfo(), *request_headers_);\n  ConnectionManagerImpl::chargeTracingStats(tracing_decision.reason,\n                                            connection_manager_.config_.tracingStats());\n\n  active_span_ = connection_manager_.tracer().startSpan(\n      *this, *request_headers_, filter_manager_.streamInfo(), tracing_decision);\n\n  if (!active_span_) {\n    return;\n  }\n\n  // TODO: Need to investigate the following code based on the cached route, as may\n  // be broken in the case a filter changes the route.\n\n  // If a decorator has been defined, apply it to the active span.\n  if (hasCachedRoute() && cached_route_.value()->decorator()) {\n    const Router::Decorator* decorator = cached_route_.value()->decorator();\n\n    decorator->apply(*active_span_);\n\n    state_.decorated_propagate_ = decorator->propagate();\n\n    // Cache decorated operation.\n    if (!decorator->getOperation().empty()) {\n      decorated_operation_ = &decorator->getOperation();\n    }\n  }\n\n  if (connection_manager_.config_.tracingConfig()->operation_name_ ==\n      Tracing::OperationName::Egress) {\n    // For egress (outbound) requests, pass the decorator's operation name (if defined and\n    // propagation enabled) as a request header to enable the receiving service to use it in its\n    // server span.\n    if (decorated_operation_ && state_.decorated_propagate_) {\n      request_headers_->setEnvoyDecoratorOperation(*decorated_operation_);\n    }\n  } else {\n    const HeaderEntry* req_operation_override = request_headers_->EnvoyDecoratorOperation();\n\n    // For ingress (inbound) requests, if a decorator operation name has been provided, it\n    // should be used to override the active span's operation.\n    if (req_operation_override) {\n      if (!req_operation_override->value().empty()) {\n        active_span_->setOperation(req_operation_override->value().getStringView());\n\n        // Clear the decorated operation so won't be used in the response header, as\n        // it has been overridden by the inbound decorator operation request header.\n        decorated_operation_ = nullptr;\n      }\n      // Remove header so not propagated to service\n      request_headers_->removeEnvoyDecoratorOperation();\n    }\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::decodeData(Buffer::Instance& data, bool end_stream) {\n  ScopeTrackerScopeState scope(this,\n                               connection_manager_.read_callbacks_->connection().dispatcher());\n  filter_manager_.maybeEndDecode(end_stream);\n  filter_manager_.streamInfo().addBytesReceived(data.length());\n\n  filter_manager_.decodeData(data, end_stream);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& trailers) {\n  ScopeTrackerScopeState scope(this,\n                               connection_manager_.read_callbacks_->connection().dispatcher());\n  resetIdleTimer();\n\n  ASSERT(!request_trailers_);\n  request_trailers_ = std::move(trailers);\n  filter_manager_.maybeEndDecode(true);\n  filter_manager_.decodeTrailers(*request_trailers_);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::decodeMetadata(MetadataMapPtr&& metadata_map) {\n  resetIdleTimer();\n  // After going through filters, the ownership of metadata_map will be passed to terminal filter.\n  // The terminal filter may encode metadata_map to the next hop immediately or store metadata_map\n  // and encode later when connection pool is ready.\n  filter_manager_.decodeMetadata(*metadata_map);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::disarmRequestTimeout() {\n  if (request_timer_) {\n    request_timer_->disableTimer();\n  }\n}\n\nvoid ConnectionManagerImpl::startDrainSequence() {\n  ASSERT(drain_state_ == DrainState::NotDraining);\n  drain_state_ = DrainState::Draining;\n  codec_->shutdownNotice();\n  drain_timer_ = read_callbacks_->connection().dispatcher().createTimer(\n      [this]() -> void { onDrainTimeout(); });\n  drain_timer_->enableTimer(config_.drainTimeout());\n}\n\nvoid ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() {\n  // NOTE: if a RDS subscription hasn't got a RouteConfiguration back, a Router::NullConfigImpl is\n  // returned, in that case we let it pass.\n  snapped_route_config_ = snapped_scoped_routes_config_->getRouteConfig(*request_headers_);\n  if (snapped_route_config_ == nullptr) {\n    ENVOY_STREAM_LOG(trace, \"can't find SRDS scope.\", *this);\n    // TODO(stevenzzzz): Consider to pass an error message to router filter, so that it can\n    // send back 404 with some more details.\n    snapped_route_config_ = std::make_shared<Router::NullConfigImpl>();\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { refreshCachedRoute(nullptr); }\n\nvoid ConnectionManagerImpl::ActiveStream::refreshDurationTimeout() {\n  if (!filter_manager_.streamInfo().route_entry_ || !request_headers_) {\n    return;\n  }\n  auto& route = filter_manager_.streamInfo().route_entry_;\n\n  auto grpc_timeout = Grpc::Common::getGrpcTimeout(*request_headers_);\n  std::chrono::milliseconds timeout;\n  bool disable_timer = false;\n\n  if (!grpc_timeout || !route->grpcTimeoutHeaderMax()) {\n    // Either there is no grpc-timeout header or special timeouts for it are not\n    // configured. Use stream duration.\n    if (route->maxStreamDuration()) {\n      timeout = route->maxStreamDuration().value();\n      if (timeout == std::chrono::milliseconds(0)) {\n        // Explicitly configured 0 means no timeout.\n        disable_timer = true;\n      }\n    } else {\n      // Fall back to HCM config. If no HCM duration limit exists, disable\n      // timers set by any prior route configuration.\n      const auto max_stream_duration = connection_manager_.config_.maxStreamDuration();\n      if (max_stream_duration.has_value() && max_stream_duration.value().count()) {\n        timeout = max_stream_duration.value();\n      } else {\n        disable_timer = true;\n      }\n    }\n  } else {\n    // Start with the timeout equal to the gRPC timeout header.\n    timeout = grpc_timeout.value();\n    // If there's a valid cap, apply it.\n    if (timeout > route->grpcTimeoutHeaderMax().value() &&\n        route->grpcTimeoutHeaderMax().value() != std::chrono::milliseconds(0)) {\n      timeout = route->grpcTimeoutHeaderMax().value();\n    }\n\n    // Apply the configured offset.\n    if (timeout != std::chrono::milliseconds(0) && route->grpcTimeoutHeaderOffset()) {\n      const auto offset = route->grpcTimeoutHeaderOffset().value();\n      if (offset < timeout) {\n        timeout -= offset;\n      } else {\n        timeout = std::chrono::milliseconds(0);\n      }\n    }\n  }\n\n  // Disable any existing timer if configured to do so.\n  if (disable_timer) {\n    if (max_stream_duration_timer_) {\n      max_stream_duration_timer_->disableTimer();\n    }\n    return;\n  }\n\n  // See how long this stream has been alive, and adjust the timeout\n  // accordingly.\n  std::chrono::duration time_used = std::chrono::duration_cast<std::chrono::milliseconds>(\n      connection_manager_.timeSource().monotonicTime() -\n      filter_manager_.streamInfo().startTimeMonotonic());\n  if (timeout > time_used) {\n    timeout -= time_used;\n  } else {\n    timeout = std::chrono::milliseconds(0);\n  }\n\n  // Finally create (if necessary) and enable the timer.\n  if (!max_stream_duration_timer_) {\n    max_stream_duration_timer_ =\n        connection_manager_.read_callbacks_->connection().dispatcher().createTimer(\n            [this]() -> void { onStreamMaxDurationReached(); });\n  }\n  max_stream_duration_timer_->enableTimer(timeout);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::refreshCachedRoute(const Router::RouteCallback& cb) {\n  Router::RouteConstSharedPtr route;\n  if (request_headers_ != nullptr) {\n    if (connection_manager_.config_.isRoutable() &&\n        connection_manager_.config_.scopedRouteConfigProvider() != nullptr) {\n      // NOTE: re-select scope as well in case the scope key header has been changed by a filter.\n      snapScopedRouteConfig();\n    }\n    if (snapped_route_config_ != nullptr) {\n      route = snapped_route_config_->route(cb, *request_headers_, filter_manager_.streamInfo(),\n                                           stream_id_);\n    }\n  }\n  filter_manager_.streamInfo().route_entry_ = route ? route->routeEntry() : nullptr;\n  cached_route_ = std::move(route);\n  if (nullptr == filter_manager_.streamInfo().route_entry_) {\n    cached_cluster_info_ = nullptr;\n  } else {\n    Upstream::ThreadLocalCluster* local_cluster = connection_manager_.cluster_manager_.get(\n        filter_manager_.streamInfo().route_entry_->clusterName());\n    cached_cluster_info_ = (nullptr == local_cluster) ? nullptr : local_cluster->info();\n  }\n\n  filter_manager_.streamInfo().setUpstreamClusterInfo(cached_cluster_info_.value());\n  refreshCachedTracingCustomTags();\n  refreshDurationTimeout();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::refreshCachedTracingCustomTags() {\n  if (!connection_manager_.config_.tracingConfig()) {\n    return;\n  }\n  const Tracing::CustomTagMap& conn_manager_tags =\n      connection_manager_.config_.tracingConfig()->custom_tags_;\n  const Tracing::CustomTagMap* route_tags = nullptr;\n  if (hasCachedRoute() && cached_route_.value()->tracingConfig()) {\n    route_tags = &cached_route_.value()->tracingConfig()->getCustomTags();\n  }\n  const bool configured_in_conn = !conn_manager_tags.empty();\n  const bool configured_in_route = route_tags && !route_tags->empty();\n  if (!configured_in_conn && !configured_in_route) {\n    return;\n  }\n  Tracing::CustomTagMap& custom_tag_map = getOrMakeTracingCustomTagMap();\n  if (configured_in_route) {\n    custom_tag_map.insert(route_tags->begin(), route_tags->end());\n  }\n  if (configured_in_conn) {\n    custom_tag_map.insert(conn_manager_tags.begin(), conn_manager_tags.end());\n  }\n}\n\n// TODO(chaoqin-li1123): Make on demand vhds and on demand srds works at the same time.\nvoid ConnectionManagerImpl::ActiveStream::requestRouteConfigUpdate(\n    Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) {\n  route_config_update_requester_->requestRouteConfigUpdate(route_config_updated_cb);\n}\n\nabsl::optional<Router::ConfigConstSharedPtr> ConnectionManagerImpl::ActiveStream::routeConfig() {\n  if (connection_manager_.config_.routeConfigProvider() != nullptr) {\n    return absl::optional<Router::ConfigConstSharedPtr>(\n        connection_manager_.config_.routeConfigProvider()->config());\n  }\n  return {};\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onLocalReply(Code code) {\n  // The BadRequest error code indicates there has been a messaging error.\n  if (Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.hcm_stream_error_on_invalid_message\") &&\n      code == Http::Code::BadRequest && connection_manager_.codec_->protocol() < Protocol::Http2 &&\n      !response_encoder_->streamErrorOnInvalidHttpMessage()) {\n    state_.saw_connection_close_ = true;\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders(\n    ResponseHeaderMap& response_headers) {\n  // Strip the T-E headers etc. Defer other header additions as well as drain-close logic to the\n  // continuation headers.\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, request_headers_.get(),\n                                                  connection_manager_.config_, EMPTY_STRING);\n\n  // Count both the 1xx and follow-up response code in stats.\n  chargeStats(response_headers);\n\n  ENVOY_STREAM_LOG(debug, \"encoding 100 continue headers via codec:\\n{}\", *this, response_headers);\n\n  // Now actually encode via the codec.\n  response_encoder_->encode100ContinueHeaders(response_headers);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& headers,\n                                                        bool end_stream) {\n  // Base headers.\n\n  // By default, always preserve the upstream date response header if present. If we choose to\n  // overwrite the upstream date unconditionally (a previous behavior), only do so if the response\n  // is not from cache\n  const bool should_preserve_upstream_date =\n      Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.preserve_upstream_date\") ||\n      filter_manager_.streamInfo().hasResponseFlag(\n          StreamInfo::ResponseFlag::ResponseFromCacheFilter);\n  if (!should_preserve_upstream_date || !headers.Date()) {\n    connection_manager_.config_.dateProvider().setDateHeader(headers);\n  }\n\n  // Following setReference() is safe because serverName() is constant for the life of the\n  // listener.\n  const auto transformation = connection_manager_.config_.serverHeaderTransformation();\n  if (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE ||\n      (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::APPEND_IF_ABSENT &&\n       headers.Server() == nullptr)) {\n    headers.setReferenceServer(connection_manager_.config_.serverName());\n  }\n  ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(),\n                                                  connection_manager_.config_,\n                                                  connection_manager_.config_.via());\n\n  bool drain_connection_due_to_overload = false;\n  if (connection_manager_.drain_state_ == DrainState::NotDraining &&\n      connection_manager_.random_generator_.bernoulli(\n          connection_manager_.overload_disable_keepalive_ref_.value())) {\n    ENVOY_STREAM_LOG(debug, \"disabling keepalive due to envoy overload\", *this);\n    if (connection_manager_.codec_->protocol() < Protocol::Http2 ||\n        Runtime::runtimeFeatureEnabled(\n            \"envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2\")) {\n      drain_connection_due_to_overload = true;\n    }\n    connection_manager_.stats_.named_.downstream_cx_overload_disable_keepalive_.inc();\n  }\n\n  // See if we want to drain/close the connection. Send the go away frame prior to encoding the\n  // header block.\n  if (connection_manager_.drain_state_ == DrainState::NotDraining &&\n      (connection_manager_.drain_close_.drainClose() || drain_connection_due_to_overload)) {\n\n    // This doesn't really do anything for HTTP/1.1 other then give the connection another boost\n    // of time to race with incoming requests. For HTTP/2 connections, send a GOAWAY frame to\n    // prevent any new streams.\n    connection_manager_.startDrainSequence();\n    connection_manager_.stats_.named_.downstream_cx_drain_close_.inc();\n    ENVOY_STREAM_LOG(debug, \"drain closing connection\", *this);\n  }\n\n  if (connection_manager_.codec_->protocol() == Protocol::Http10) {\n    // As HTTP/1.0 and below can not do chunked encoding, if there is no content\n    // length the response will be framed by connection close.\n    if (!headers.ContentLength()) {\n      state_.saw_connection_close_ = true;\n    }\n    // If the request came with a keep-alive and no other factor resulted in a\n    // connection close header, send an explicit keep-alive header.\n    if (!state_.saw_connection_close_) {\n      headers.setConnection(Headers::get().ConnectionValues.KeepAlive);\n    }\n  }\n\n  if (connection_manager_.drain_state_ == DrainState::NotDraining && state_.saw_connection_close_) {\n    ENVOY_STREAM_LOG(debug, \"closing connection due to connection close header\", *this);\n    connection_manager_.drain_state_ = DrainState::Closing;\n  }\n\n  // If we are destroying a stream before remote is complete and the connection does not support\n  // multiplexing, we should disconnect since we don't want to wait around for the request to\n  // finish.\n  if (!filter_manager_.remoteComplete()) {\n    if (connection_manager_.codec_->protocol() < Protocol::Http2) {\n      connection_manager_.drain_state_ = DrainState::Closing;\n    }\n\n    connection_manager_.stats_.named_.downstream_rq_response_before_rq_complete_.inc();\n  }\n\n  if (connection_manager_.drain_state_ != DrainState::NotDraining &&\n      connection_manager_.codec_->protocol() < Protocol::Http2) {\n    // If the connection manager is draining send \"Connection: Close\" on HTTP/1.1 connections.\n    // Do not do this for H2 (which drains via GOAWAY) or Upgrade or CONNECT (as the\n    // payload is no longer HTTP/1.1)\n    if (!Utility::isUpgrade(headers) &&\n        !HeaderUtility::isConnectResponse(request_headers_.get(), *responseHeaders())) {\n      headers.setReferenceConnection(Headers::get().ConnectionValues.Close);\n    }\n  }\n\n  if (connection_manager_.config_.tracingConfig()) {\n    if (connection_manager_.config_.tracingConfig()->operation_name_ ==\n        Tracing::OperationName::Ingress) {\n      // For ingress (inbound) responses, if the request headers do not include a\n      // decorator operation (override), and the decorated operation should be\n      // propagated, then pass the decorator's operation name (if defined)\n      // as a response header to enable the client service to use it in its client span.\n      if (decorated_operation_ && state_.decorated_propagate_) {\n        headers.setEnvoyDecoratorOperation(*decorated_operation_);\n      }\n    } else if (connection_manager_.config_.tracingConfig()->operation_name_ ==\n               Tracing::OperationName::Egress) {\n      const HeaderEntry* resp_operation_override = headers.EnvoyDecoratorOperation();\n\n      // For Egress (outbound) response, if a decorator operation name has been provided, it\n      // should be used to override the active span's operation.\n      if (resp_operation_override) {\n        if (!resp_operation_override->value().empty() && active_span_) {\n          active_span_->setOperation(resp_operation_override->value().getStringView());\n        }\n        // Remove header so not propagated to service.\n        headers.removeEnvoyDecoratorOperation();\n      }\n    }\n  }\n\n  chargeStats(headers);\n\n  ENVOY_STREAM_LOG(debug, \"encoding headers via codec (end_stream={}):\\n{}\", *this, end_stream,\n                   headers);\n\n  // Now actually encode via the codec.\n  filter_manager_.streamInfo().onFirstDownstreamTxByteSent();\n  response_encoder_->encodeHeaders(headers, end_stream);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::encodeData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_STREAM_LOG(trace, \"encoding data via codec (size={} end_stream={})\", *this, data.length(),\n                   end_stream);\n\n  filter_manager_.streamInfo().addBytesSent(data.length());\n  response_encoder_->encodeData(data, end_stream);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::encodeTrailers(ResponseTrailerMap& trailers) {\n  ENVOY_STREAM_LOG(debug, \"encoding trailers via codec:\\n{}\", *this, trailers);\n\n  response_encoder_->encodeTrailers(trailers);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::encodeMetadata(MetadataMapVector& metadata) {\n  ENVOY_STREAM_LOG(debug, \"encoding metadata via codec:\\n{}\", *this, metadata);\n  response_encoder_->encodeMetadata(metadata);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onDecoderFilterBelowWriteBufferLowWatermark() {\n  ENVOY_STREAM_LOG(debug, \"Read-enabling downstream stream due to filter callbacks.\", *this);\n  // If the state is destroyed, the codec's stream is already torn down. On\n  // teardown the codec will unwind any remaining read disable calls.\n  if (!filter_manager_.destroyed()) {\n    response_encoder_->getStream().readDisable(false);\n  }\n  connection_manager_.stats_.named_.downstream_flow_control_resumed_reading_total_.inc();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onDecoderFilterAboveWriteBufferHighWatermark() {\n  ENVOY_STREAM_LOG(debug, \"Read-disabling downstream stream due to filter callbacks.\", *this);\n  response_encoder_->getStream().readDisable(true);\n  connection_manager_.stats_.named_.downstream_flow_control_paused_reading_total_.inc();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason reset_reason,\n                                                        absl::string_view) {\n  // NOTE: This function gets called in all of the following cases:\n  //       1) We TX an app level reset\n  //       2) The codec TX a codec level reset\n  //       3) The codec RX a reset\n  //       If we need to differentiate we need to do it inside the codec. Can start with this.\n  ENVOY_STREAM_LOG(debug, \"stream reset\", *this);\n  connection_manager_.stats_.named_.downstream_rq_rx_reset_.inc();\n\n  // If the codec sets its responseDetails() for a reason other than peer reset, set a\n  // DownstreamProtocolError. Either way, propagate details.\n  const absl::string_view encoder_details = response_encoder_->getStream().responseDetails();\n  if (!encoder_details.empty() && reset_reason == StreamResetReason::LocalReset) {\n    filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError);\n  }\n  if (!encoder_details.empty()) {\n    filter_manager_.streamInfo().setResponseCodeDetails(encoder_details);\n  }\n\n  connection_manager_.doDeferredStreamDestroy(*this);\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onAboveWriteBufferHighWatermark() {\n  ENVOY_STREAM_LOG(debug, \"Disabling upstream stream due to downstream stream watermark.\", *this);\n  filter_manager_.callHighWatermarkCallbacks();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onBelowWriteBufferLowWatermark() {\n  ENVOY_STREAM_LOG(debug, \"Enabling upstream stream due to downstream stream watermark.\", *this);\n  filter_manager_.callLowWatermarkCallbacks();\n}\n\nTracing::OperationName ConnectionManagerImpl::ActiveStream::operationName() const {\n  return connection_manager_.config_.tracingConfig()->operation_name_;\n}\n\nconst Tracing::CustomTagMap* ConnectionManagerImpl::ActiveStream::customTags() const {\n  return tracing_custom_tags_.get();\n}\n\nbool ConnectionManagerImpl::ActiveStream::verbose() const {\n  return connection_manager_.config_.tracingConfig()->verbose_;\n}\n\nuint32_t ConnectionManagerImpl::ActiveStream::maxPathTagLength() const {\n  return connection_manager_.config_.tracingConfig()->max_path_tag_length_;\n}\n\nconst Router::RouteEntry::UpgradeMap* ConnectionManagerImpl::ActiveStream::upgradeMap() {\n  // We must check if the 'cached_route_' optional is populated since this function can be called\n  // early via sendLocalReply(), before the cached route is populated.\n  if (hasCachedRoute() && cached_route_.value()->routeEntry()) {\n    return &cached_route_.value()->routeEntry()->upgradeMap();\n  }\n\n  return nullptr;\n}\n\nTracing::Span& ConnectionManagerImpl::ActiveStream::activeSpan() {\n  if (active_span_) {\n    return *active_span_;\n  } else {\n    return Tracing::NullSpan::instance();\n  }\n}\n\nTracing::Config& ConnectionManagerImpl::ActiveStream::tracingConfig() { return *this; }\n\nconst ScopeTrackedObject& ConnectionManagerImpl::ActiveStream::scope() { return *this; }\n\nUpstream::ClusterInfoConstSharedPtr ConnectionManagerImpl::ActiveStream::clusterInfo() {\n  // NOTE: Refreshing route caches clusterInfo as well.\n  if (!cached_route_.has_value()) {\n    refreshCachedRoute();\n  }\n\n  return cached_cluster_info_.value();\n}\n\nRouter::RouteConstSharedPtr\nConnectionManagerImpl::ActiveStream::route(const Router::RouteCallback& cb) {\n  if (cached_route_.has_value()) {\n    return cached_route_.value();\n  }\n  refreshCachedRoute(cb);\n  return cached_route_.value();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::clearRouteCache() {\n  cached_route_ = absl::optional<Router::RouteConstSharedPtr>();\n  cached_cluster_info_ = absl::optional<Upstream::ClusterInfoConstSharedPtr>();\n  if (tracing_custom_tags_) {\n    tracing_custom_tags_->clear();\n  }\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onRequestDataTooLarge() {\n  connection_manager_.stats_.named_.downstream_rq_too_large_.inc();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::recreateStream(\n    StreamInfo::FilterStateSharedPtr filter_state) {\n  // n.b. we do not currently change the codecs to point at the new stream\n  // decoder because the decoder callbacks are complete. It would be good to\n  // null out that pointer but should not be necessary.\n  ResponseEncoder* response_encoder = response_encoder_;\n  response_encoder_ = nullptr;\n\n  response_encoder->getStream().removeCallbacks(*this);\n  // This functionally deletes the stream (via deferred delete) so do not\n  // reference anything beyond this point.\n  connection_manager_.doEndStream(*this);\n\n  RequestDecoder& new_stream = connection_manager_.newStream(*response_encoder, true);\n  // We don't need to copy over the old parent FilterState from the old StreamInfo if it did not\n  // store any objects with a LifeSpan at or above DownstreamRequest. This is to avoid unnecessary\n  // heap allocation.\n  // TODO(snowp): In the case where connection level filter state has been set on the connection\n  // FilterState that we inherit, we'll end up copying this every time even though we could get\n  // away with just resetting it to the HCM filter_state_.\n  if (filter_state->hasDataAtOrAboveLifeSpan(StreamInfo::FilterState::LifeSpan::Request)) {\n    (*connection_manager_.streams_.begin())->filter_manager_.streamInfo().filter_state_ =\n        std::make_shared<StreamInfo::FilterStateImpl>(\n            filter_state->parent(), StreamInfo::FilterState::LifeSpan::FilterChain);\n  }\n\n  new_stream.decodeHeaders(std::move(request_headers_), true);\n}\n\nHttp1StreamEncoderOptionsOptRef ConnectionManagerImpl::ActiveStream::http1StreamEncoderOptions() {\n  return response_encoder_->http1StreamEncoderOptions();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::onResponseDataTooLarge() {\n  connection_manager_.stats_.named_.rs_too_large_.inc();\n}\n\nvoid ConnectionManagerImpl::ActiveStream::resetStream() {\n  connection_manager_.stats_.named_.downstream_rq_tx_reset_.inc();\n  connection_manager_.doEndStream(*this);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/conn_manager_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <optional>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/scope_tracker.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/http/api_listener.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/context.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/router/scopes.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/overload_manager.h\"\n#include \"envoy/ssl/connection.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stream_info/filter_state.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/dump_state_utils.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/conn_manager_config.h\"\n#include \"common/http/filter_manager.h\"\n#include \"common/http/user_agent.h\"\n#include \"common/http/utility.h\"\n#include \"common/local_reply/local_reply.h\"\n#include \"common/router/scoped_rds.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Implementation of both ConnectionManager and ServerConnectionCallbacks. This is a\n * Network::Filter that can be installed on a connection that will perform HTTP protocol agnostic\n * handling of a connection and all requests/pushes that occur on a connection.\n */\nclass ConnectionManagerImpl : Logger::Loggable<Logger::Id::http>,\n                              public Network::ReadFilter,\n                              public ServerConnectionCallbacks,\n                              public Network::ConnectionCallbacks,\n                              public Http::ApiListener {\npublic:\n  ConnectionManagerImpl(ConnectionManagerConfig& config, const Network::DrainDecision& drain_close,\n                        Random::RandomGenerator& random_generator, Http::Context& http_context,\n                        Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info,\n                        Upstream::ClusterManager& cluster_manager,\n                        Server::OverloadManager& overload_manager, TimeSource& time_system);\n  ~ConnectionManagerImpl() override;\n\n  static ConnectionManagerStats generateStats(const std::string& prefix, Stats::Scope& scope);\n  static ConnectionManagerTracingStats generateTracingStats(const std::string& prefix,\n                                                            Stats::Scope& scope);\n  static void chargeTracingStats(const Tracing::Reason& tracing_reason,\n                                 ConnectionManagerTracingStats& tracing_stats);\n  static ConnectionManagerListenerStats generateListenerStats(const std::string& prefix,\n                                                              Stats::Scope& scope);\n  static const ResponseHeaderMap& continueHeader();\n\n  // Currently the ConnectionManager creates a codec lazily when either:\n  //   a) onConnection for H3.\n  //   b) onData for H1 and H2.\n  // With the introduction of ApiListeners, neither event occurs. This function allows consumer code\n  // to manually create a codec.\n  // TODO(junr03): consider passing a synthetic codec instead of creating once. The codec in the\n  // ApiListener case is solely used to determine the protocol version.\n  void createCodec(Buffer::Instance& data);\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n\n  // Http::ConnectionCallbacks\n  void onGoAway(GoAwayErrorCode error_code) override;\n\n  // Http::ServerConnectionCallbacks\n  RequestDecoder& newStream(ResponseEncoder& response_encoder,\n                            bool is_internally_created = false) override;\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  // Pass connection watermark events on to all the streams associated with that connection.\n  void onAboveWriteBufferHighWatermark() override {\n    codec_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n  }\n  void onBelowWriteBufferLowWatermark() override {\n    codec_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n  }\n\n  TimeSource& timeSource() { return time_source_; }\n\nprivate:\n  struct ActiveStream;\n\n  class RdsRouteConfigUpdateRequester {\n  public:\n    RdsRouteConfigUpdateRequester(Router::RouteConfigProvider* route_config_provider,\n                                  ActiveStream& parent)\n        : route_config_provider_(route_config_provider), parent_(parent) {}\n\n    RdsRouteConfigUpdateRequester(Config::ConfigProvider* scoped_route_config_provider,\n                                  ActiveStream& parent)\n        // Expect the dynamic cast to succeed because only ScopedRdsConfigProvider is fully\n        // implemented. Inline provider will be cast to nullptr here but it is not full implemented\n        // and can't not be used at this point. Should change this implementation if we have a\n        // functional inline scope route provider in the future.\n        : scoped_route_config_provider_(\n              dynamic_cast<Router::ScopedRdsConfigProvider*>(scoped_route_config_provider)),\n          parent_(parent) {}\n\n    void\n    requestRouteConfigUpdate(Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb);\n    void requestVhdsUpdate(const std::string& host_header,\n                           Event::Dispatcher& thread_local_dispatcher,\n                           Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb);\n    void requestSrdsUpdate(Router::ScopeKeyPtr scope_key,\n                           Event::Dispatcher& thread_local_dispatcher,\n                           Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb);\n\n  private:\n    Router::RouteConfigProvider* route_config_provider_;\n    Router::ScopedRdsConfigProvider* scoped_route_config_provider_;\n    ActiveStream& parent_;\n  };\n\n  /**\n   * Wraps a single active stream on the connection. These are either full request/response pairs\n   * or pushes.\n   */\n  struct ActiveStream : LinkedObject<ActiveStream>,\n                        public Event::DeferredDeletable,\n                        public StreamCallbacks,\n                        public RequestDecoder,\n                        public Tracing::Config,\n                        public ScopeTrackedObject,\n                        public FilterManagerCallbacks {\n    ActiveStream(ConnectionManagerImpl& connection_manager, uint32_t buffer_limit);\n    void completeRequest();\n\n    void chargeStats(const ResponseHeaderMap& headers);\n    const Network::Connection* connection();\n    void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body,\n                        const std::function<void(ResponseHeaderMap& headers)>& modify_headers,\n                        const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                        absl::string_view details) override {\n      return filter_manager_.sendLocalReply(is_grpc_request, code, body, modify_headers,\n                                            grpc_status, details);\n    }\n    uint64_t streamId() { return stream_id_; }\n\n    // This is a helper function for encodeHeaders and responseDataTooLarge which allows for\n    // shared code for the two headers encoding paths. It does header munging, updates timing\n    // stats, and sends the headers to the encoder.\n    void encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream);\n    // This is a helper function for encodeData and responseDataTooLarge which allows for shared\n    // code for the two data encoding paths. It does stats updates and tracks potential end of\n    // stream.\n    void encodeDataInternal(Buffer::Instance& data, bool end_stream);\n\n    // Http::StreamCallbacks\n    void onResetStream(StreamResetReason reason,\n                       absl::string_view transport_failure_reason) override;\n    void onAboveWriteBufferHighWatermark() override;\n    void onBelowWriteBufferLowWatermark() override;\n\n    // Http::StreamDecoder\n    void decodeData(Buffer::Instance& data, bool end_stream) override;\n    void decodeMetadata(MetadataMapPtr&&) override;\n\n    // Http::RequestDecoder\n    void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override;\n    void decodeTrailers(RequestTrailerMapPtr&& trailers) override;\n\n    // Tracing::TracingConfig\n    Tracing::OperationName operationName() const override;\n    const Tracing::CustomTagMap* customTags() const override;\n    bool verbose() const override;\n    uint32_t maxPathTagLength() const override;\n\n    // ScopeTrackedObject\n    void dumpState(std::ostream& os, int indent_level = 0) const override {\n      const char* spaces = spacesForLevel(indent_level);\n      os << spaces << \"ActiveStream \" << this << DUMP_MEMBER(stream_id_);\n\n      DUMP_DETAILS(&filter_manager_);\n    }\n\n    // FilterManagerCallbacks\n    void encodeHeaders(ResponseHeaderMap& response_headers, bool end_stream) override;\n    void encode100ContinueHeaders(ResponseHeaderMap& response_headers) override;\n    void encodeData(Buffer::Instance& data, bool end_stream) override;\n    void encodeTrailers(ResponseTrailerMap& trailers) override;\n    void encodeMetadata(MetadataMapVector& metadata) override;\n    void setRequestTrailers(Http::RequestTrailerMapPtr&& request_trailers) override {\n      ASSERT(!request_trailers_);\n      request_trailers_ = std::move(request_trailers);\n    }\n    void setContinueHeaders(Http::ResponseHeaderMapPtr&& continue_headers) override {\n      ASSERT(!continue_headers_);\n      continue_headers_ = std::move(continue_headers);\n    }\n    void setResponseHeaders(Http::ResponseHeaderMapPtr&& response_headers) override {\n      // We'll overwrite the headers in the case where we fail the stream after upstream headers\n      // have begun filter processing but before they have been sent downstream.\n      response_headers_ = std::move(response_headers);\n    }\n    void setResponseTrailers(Http::ResponseTrailerMapPtr&& response_trailers) override {\n      response_trailers_ = std::move(response_trailers);\n    }\n\n    // TODO(snowp): Create shared OptRef/OptConstRef helpers\n    Http::RequestHeaderMapOptRef requestHeaders() override {\n      return request_headers_ ? absl::make_optional(std::ref(*request_headers_)) : absl::nullopt;\n    }\n    Http::RequestTrailerMapOptRef requestTrailers() override {\n      return request_trailers_ ? absl::make_optional(std::ref(*request_trailers_)) : absl::nullopt;\n    }\n    Http::ResponseHeaderMapOptRef continueHeaders() override {\n      return continue_headers_ ? absl::make_optional(std::ref(*continue_headers_)) : absl::nullopt;\n    }\n    Http::ResponseHeaderMapOptRef responseHeaders() override {\n      return response_headers_ ? absl::make_optional(std::ref(*response_headers_)) : absl::nullopt;\n    }\n    Http::ResponseTrailerMapOptRef responseTrailers() override {\n      return response_trailers_ ? absl::make_optional(std::ref(*response_trailers_))\n                                : absl::nullopt;\n    }\n\n    void endStream() override {\n      ASSERT(!state_.codec_saw_local_complete_);\n      state_.codec_saw_local_complete_ = true;\n      filter_manager_.streamInfo().onLastDownstreamTxByteSent();\n      request_response_timespan_->complete();\n      connection_manager_.doEndStream(*this);\n    }\n    void onDecoderFilterBelowWriteBufferLowWatermark() override;\n    void onDecoderFilterAboveWriteBufferHighWatermark() override;\n    void upgradeFilterChainCreated() override {\n      connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc();\n      connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc();\n      state_.successful_upgrade_ = true;\n    }\n    void disarmRequestTimeout() override;\n    void resetIdleTimer() override;\n    void recreateStream(StreamInfo::FilterStateSharedPtr filter_state) override;\n    void resetStream() override;\n    const Router::RouteEntry::UpgradeMap* upgradeMap() override;\n    Upstream::ClusterInfoConstSharedPtr clusterInfo() override;\n    Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) override;\n    void clearRouteCache() override;\n    absl::optional<Router::ConfigConstSharedPtr> routeConfig() override;\n    Tracing::Span& activeSpan() override;\n    void onResponseDataTooLarge() override;\n    void onRequestDataTooLarge() override;\n    Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override;\n    void onLocalReply(Code code) override;\n    Tracing::Config& tracingConfig() override;\n    const ScopeTrackedObject& scope() override;\n\n    void traceRequest();\n\n    // Updates the snapped_route_config_ (by reselecting scoped route configuration), if a scope is\n    // not found, snapped_route_config_ is set to Router::NullConfigImpl.\n    void snapScopedRouteConfig();\n\n    void refreshCachedRoute();\n    void refreshCachedRoute(const Router::RouteCallback& cb);\n    void requestRouteConfigUpdate(\n        Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) override;\n\n    void refreshCachedTracingCustomTags();\n    void refreshDurationTimeout();\n\n    // All state for the stream. Put here for readability.\n    struct State {\n      State()\n          : codec_saw_local_complete_(false), saw_connection_close_(false),\n            successful_upgrade_(false), is_internally_created_(false), decorated_propagate_(true) {}\n\n      bool codec_saw_local_complete_ : 1; // This indicates that local is complete as written all\n                                          // the way through to the codec.\n      bool saw_connection_close_ : 1;\n      bool successful_upgrade_ : 1;\n\n      // True if this stream is internally created. Currently only used for\n      // internal redirects or other streams created via recreateStream().\n      bool is_internally_created_ : 1;\n\n      bool decorated_propagate_ : 1;\n    };\n\n    // Per-stream idle timeout callback.\n    void onIdleTimeout();\n    // Per-stream request timeout callback.\n    void onRequestTimeout();\n    // Per-stream alive duration reached.\n    void onStreamMaxDurationReached();\n    bool hasCachedRoute() { return cached_route_.has_value() && cached_route_.value(); }\n\n    // Return local port of the connection.\n    uint32_t localPort();\n\n    friend std::ostream& operator<<(std::ostream& os, const ActiveStream& s) {\n      s.dumpState(os);\n      return os;\n    }\n\n    Tracing::CustomTagMap& getOrMakeTracingCustomTagMap() {\n      if (tracing_custom_tags_ == nullptr) {\n        tracing_custom_tags_ = std::make_unique<Tracing::CustomTagMap>();\n      }\n      return *tracing_custom_tags_;\n    }\n\n    ConnectionManagerImpl& connection_manager_;\n    // TODO(snowp): It might make sense to move this to the FilterManager to avoid storing it in\n    // both locations, then refer to the FM when doing stream logs.\n    const uint64_t stream_id_;\n\n    RequestHeaderMapPtr request_headers_;\n    RequestTrailerMapPtr request_trailers_;\n\n    ResponseHeaderMapPtr continue_headers_;\n    ResponseHeaderMapPtr response_headers_;\n    ResponseTrailerMapPtr response_trailers_;\n\n    // Note: The FM must outlive the above headers, as they are possibly accessed during filter\n    // destruction.\n    FilterManager filter_manager_;\n\n    Router::ConfigConstSharedPtr snapped_route_config_;\n    Router::ScopedConfigConstSharedPtr snapped_scoped_routes_config_;\n    Tracing::SpanPtr active_span_;\n    ResponseEncoder* response_encoder_{};\n    Stats::TimespanPtr request_response_timespan_;\n    // Per-stream idle timeout.\n    Event::TimerPtr stream_idle_timer_;\n    // Per-stream request timeout.\n    Event::TimerPtr request_timer_;\n    // Per-stream alive duration.\n    Event::TimerPtr max_stream_duration_timer_;\n    std::chrono::milliseconds idle_timeout_ms_{};\n    State state_;\n    absl::optional<Router::RouteConstSharedPtr> cached_route_;\n    absl::optional<Upstream::ClusterInfoConstSharedPtr> cached_cluster_info_;\n    const std::string* decorated_operation_{nullptr};\n    std::unique_ptr<RdsRouteConfigUpdateRequester> route_config_update_requester_;\n    std::unique_ptr<Tracing::CustomTagMap> tracing_custom_tags_{nullptr};\n\n    friend FilterManager;\n  };\n\n  using ActiveStreamPtr = std::unique_ptr<ActiveStream>;\n\n  /**\n   * Check to see if the connection can be closed after gracefully waiting to send pending codec\n   * data.\n   */\n  void checkForDeferredClose();\n\n  /**\n   * Do a delayed destruction of a stream to allow for stack unwind. Also calls onDestroy() for\n   * each filter.\n   */\n  void doDeferredStreamDestroy(ActiveStream& stream);\n\n  /**\n   * Process a stream that is ending due to upstream response or reset.\n   */\n  void doEndStream(ActiveStream& stream);\n\n  void resetAllStreams(absl::optional<StreamInfo::ResponseFlag> response_flag,\n                       absl::string_view details);\n  void onIdleTimeout();\n  void onConnectionDurationTimeout();\n  void onDrainTimeout();\n  void startDrainSequence();\n  Tracing::HttpTracer& tracer() { return *config_.tracer(); }\n  void handleCodecError(absl::string_view error);\n  void doConnectionClose(absl::optional<Network::ConnectionCloseType> close_type,\n                         absl::optional<StreamInfo::ResponseFlag> response_flag,\n                         absl::string_view details);\n\n  enum class DrainState { NotDraining, Draining, Closing };\n\n  ConnectionManagerConfig& config_;\n  ConnectionManagerStats& stats_; // We store a reference here to avoid an extra stats() call on\n                                  // the config in the hot path.\n  ServerConnectionPtr codec_;\n  std::list<ActiveStreamPtr> streams_;\n  Stats::TimespanPtr conn_length_;\n  const Network::DrainDecision& drain_close_;\n  DrainState drain_state_{DrainState::NotDraining};\n  UserAgent user_agent_;\n  // An idle timer for the connection. This is only armed when there are no streams on the\n  // connection. When there are active streams it is disarmed in favor of each stream's\n  // stream_idle_timer_.\n  Event::TimerPtr connection_idle_timer_;\n  // A connection duration timer. Armed during handling new connection if enabled in config.\n  Event::TimerPtr connection_duration_timer_;\n  Event::TimerPtr drain_timer_;\n  Random::RandomGenerator& random_generator_;\n  Http::Context& http_context_;\n  Runtime::Loader& runtime_;\n  const LocalInfo::LocalInfo& local_info_;\n  Upstream::ClusterManager& cluster_manager_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  ConnectionManagerListenerStats& listener_stats_;\n  // References into the overload manager thread local state map. Using these lets us avoid a\n  // map lookup in the hot path of processing each request.\n  const Server::OverloadActionState& overload_stop_accepting_requests_ref_;\n  const Server::OverloadActionState& overload_disable_keepalive_ref_;\n  TimeSource& time_source_;\n  bool remote_close_{};\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/conn_manager_utility.cc",
    "content": "#include \"common/http/conn_manager_utility.h\"\n\n#include <atomic>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http1/codec_impl_legacy.h\"\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/http/http2/codec_impl_legacy.h\"\n#include \"common/http/path_utility.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nstd::string ConnectionManagerUtility::determineNextProtocol(Network::Connection& connection,\n                                                            const Buffer::Instance& data) {\n  if (!connection.nextProtocol().empty()) {\n    return connection.nextProtocol();\n  }\n\n  // See if the data we have so far shows the HTTP/2 prefix. We ignore the case where someone sends\n  // us the first few bytes of the HTTP/2 prefix since in all public cases we use SSL/ALPN. For\n  // internal cases this should practically never happen.\n  if (data.startsWith(Http2::CLIENT_MAGIC_PREFIX)) {\n    return Utility::AlpnNames::get().Http2;\n  }\n\n  return \"\";\n}\n\nServerConnectionPtr ConnectionManagerUtility::autoCreateCodec(\n    Network::Connection& connection, const Buffer::Instance& data,\n    ServerConnectionCallbacks& callbacks, Stats::Scope& scope, Random::RandomGenerator& random,\n    Http1::CodecStats::AtomicPtr& http1_codec_stats,\n    Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings,\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n    uint32_t max_request_headers_kb, uint32_t max_request_headers_count,\n    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n        headers_with_underscores_action) {\n  if (determineNextProtocol(connection, data) == Utility::AlpnNames::get().Http2) {\n    Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope);\n    if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n      return std::make_unique<Http2::ServerConnectionImpl>(\n          connection, callbacks, stats, random, http2_options, max_request_headers_kb,\n          max_request_headers_count, headers_with_underscores_action);\n    } else {\n      return std::make_unique<Legacy::Http2::ServerConnectionImpl>(\n          connection, callbacks, stats, random, http2_options, max_request_headers_kb,\n          max_request_headers_count, headers_with_underscores_action);\n    }\n  } else {\n    Http1::CodecStats& stats = Http1::CodecStats::atomicGet(http1_codec_stats, scope);\n    if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n      return std::make_unique<Http1::ServerConnectionImpl>(\n          connection, stats, callbacks, http1_settings, max_request_headers_kb,\n          max_request_headers_count, headers_with_underscores_action);\n    } else {\n      return std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n          connection, stats, callbacks, http1_settings, max_request_headers_kb,\n          max_request_headers_count, headers_with_underscores_action);\n    }\n  }\n}\n\nNetwork::Address::InstanceConstSharedPtr ConnectionManagerUtility::mutateRequestHeaders(\n    RequestHeaderMap& request_headers, Network::Connection& connection,\n    ConnectionManagerConfig& config, const Router::Config& route_config,\n    const LocalInfo::LocalInfo& local_info) {\n  // If this is a Upgrade request, do not remove the Connection and Upgrade headers,\n  // as we forward them verbatim to the upstream hosts.\n  if (Utility::isUpgrade(request_headers)) {\n    // The current WebSocket implementation re-uses the HTTP1 codec to send upgrade headers to\n    // the upstream host. This adds the \"transfer-encoding: chunked\" request header if the stream\n    // has not ended and content-length does not exist. In HTTP1.1, if transfer-encoding and\n    // content-length both do not exist this means there is no request body. After transfer-encoding\n    // is stripped here, the upstream request becomes invalid. We can fix it by explicitly adding a\n    // \"content-length: 0\" request header here.\n    const bool no_body = (!request_headers.TransferEncoding() && !request_headers.ContentLength());\n    if (no_body) {\n      request_headers.setContentLength(uint64_t(0));\n    }\n  } else {\n    request_headers.removeConnection();\n    request_headers.removeUpgrade();\n  }\n\n  // Clean proxy headers.\n  request_headers.removeEnvoyInternalRequest();\n  request_headers.removeKeepAlive();\n  request_headers.removeProxyConnection();\n  request_headers.removeTransferEncoding();\n\n  // If we are \"using remote address\" this means that we create/append to XFF with our immediate\n  // peer. Cases where we don't \"use remote address\" include trusted double proxy where we expect\n  // our peer to have already properly set XFF, etc.\n  Network::Address::InstanceConstSharedPtr final_remote_address;\n  bool single_xff_address;\n  const uint32_t xff_num_trusted_hops = config.xffNumTrustedHops();\n\n  if (config.useRemoteAddress()) {\n    single_xff_address = request_headers.ForwardedFor() == nullptr;\n    // If there are any trusted proxies in front of this Envoy instance (as indicated by\n    // the xff_num_trusted_hops configuration option), get the trusted client address\n    // from the XFF before we append to XFF.\n    if (xff_num_trusted_hops > 0) {\n      final_remote_address =\n          Utility::getLastAddressFromXFF(request_headers, xff_num_trusted_hops - 1).address_;\n    }\n    // If there aren't any trusted proxies in front of this Envoy instance, or there\n    // are but they didn't populate XFF properly, the trusted client address is the\n    // source address of the immediate downstream's connection to us.\n    if (final_remote_address == nullptr) {\n      final_remote_address = connection.remoteAddress();\n    }\n    if (!config.skipXffAppend()) {\n      if (Network::Utility::isLoopbackAddress(*connection.remoteAddress())) {\n        Utility::appendXff(request_headers, config.localAddress());\n      } else {\n        Utility::appendXff(request_headers, *connection.remoteAddress());\n      }\n    }\n    // If the prior hop is not a trusted proxy, overwrite any x-forwarded-proto value it set as\n    // untrusted. Alternately if no x-forwarded-proto header exists, add one.\n    if (xff_num_trusted_hops == 0 || request_headers.ForwardedProto() == nullptr) {\n      request_headers.setReferenceForwardedProto(\n          connection.ssl() ? Headers::get().SchemeValues.Https : Headers::get().SchemeValues.Http);\n    }\n  } else {\n    // If we are not using remote address, attempt to pull a valid IPv4 or IPv6 address out of XFF.\n    // If we find one, it will be used as the downstream address for logging. It may or may not be\n    // used for determining internal/external status (see below).\n    auto ret = Utility::getLastAddressFromXFF(request_headers, xff_num_trusted_hops);\n    final_remote_address = ret.address_;\n    single_xff_address = ret.single_address_;\n  }\n\n  // If the x-forwarded-proto header is not set, set it here, since Envoy uses it for determining\n  // scheme and communicating it upstream.\n  if (!request_headers.ForwardedProto()) {\n    request_headers.setReferenceForwardedProto(connection.ssl() ? Headers::get().SchemeValues.Https\n                                                                : Headers::get().SchemeValues.Http);\n  }\n\n  // At this point we can determine whether this is an internal or external request. The\n  // determination of internal status uses the following:\n  // 1) After remote address/XFF appending, the XFF header must contain a *single* address.\n  // 2) The single address must be an internal address.\n  // 3) If configured to not use remote address, but no XFF header is available, even if the real\n  //    remote is internal, the request is considered external.\n  // HUGE WARNING: The way we do this is not optimal but is how it worked \"from the beginning\" so\n  //               we can't change it at this point. In the future we will likely need to add\n  //               additional inference modes and make this mode legacy.\n  const bool internal_request =\n      single_xff_address && final_remote_address != nullptr &&\n      config.internalAddressConfig().isInternalAddress(*final_remote_address);\n\n  // After determining internal request status, if there is no final remote address, due to no XFF,\n  // busted XFF, etc., use the direct connection remote address for logging.\n  if (final_remote_address == nullptr) {\n    final_remote_address = connection.remoteAddress();\n  }\n\n  // Edge request is the request from external clients to front Envoy.\n  // Request from front Envoy to the internal service will be treated as not edge request.\n  const bool edge_request = !internal_request && config.useRemoteAddress();\n\n  // If internal request, set header and do other internal only modifications.\n  if (internal_request) {\n    request_headers.setReferenceEnvoyInternalRequest(\n        Headers::get().EnvoyInternalRequestValues.True);\n  } else {\n    if (edge_request) {\n      request_headers.removeEnvoyDecoratorOperation();\n      request_headers.removeEnvoyDownstreamServiceCluster();\n      request_headers.removeEnvoyDownstreamServiceNode();\n    }\n\n    request_headers.removeEnvoyRetriableStatusCodes();\n    request_headers.removeEnvoyRetriableHeaderNames();\n    request_headers.removeEnvoyRetryOn();\n    request_headers.removeEnvoyRetryGrpcOn();\n    request_headers.removeEnvoyMaxRetries();\n    request_headers.removeEnvoyUpstreamAltStatName();\n    request_headers.removeEnvoyUpstreamRequestTimeoutMs();\n    request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs();\n    request_headers.removeEnvoyUpstreamRequestTimeoutAltResponse();\n    request_headers.removeEnvoyExpectedRequestTimeoutMs();\n    request_headers.removeEnvoyForceTrace();\n    request_headers.removeEnvoyIpTags();\n    request_headers.removeEnvoyOriginalUrl();\n    request_headers.removeEnvoyHedgeOnPerTryTimeout();\n\n    for (const LowerCaseString& header : route_config.internalOnlyHeaders()) {\n      request_headers.remove(header);\n    }\n  }\n\n  if (config.userAgent()) {\n    request_headers.setEnvoyDownstreamServiceCluster(config.userAgent().value());\n    const HeaderEntry* user_agent_header = request_headers.UserAgent();\n    if (!user_agent_header || user_agent_header->value().empty()) {\n      // Following setReference() is safe because user agent is constant for the life of the\n      // listener.\n      request_headers.setReferenceUserAgent(config.userAgent().value());\n    }\n\n    // TODO(htuch): should this be under the config.userAgent() condition or in the outer scope?\n    if (!local_info.nodeName().empty()) {\n      // Following setReference() is safe because local info is constant for the life of the server.\n      request_headers.setReferenceEnvoyDownstreamServiceNode(local_info.nodeName());\n    }\n  }\n\n  if (!config.via().empty()) {\n    Utility::appendVia(request_headers, config.via());\n  }\n\n  // If we are an external request, AND we are \"using remote address\" (see above), we set\n  // x-envoy-external-address since this is our first ingress point into the trusted network.\n  if (edge_request && final_remote_address->type() == Network::Address::Type::Ip) {\n    request_headers.setEnvoyExternalAddress(final_remote_address->ip()->addressAsString());\n  }\n\n  // Generate x-request-id for all edge requests, or if there is none.\n  if (config.generateRequestId()) {\n    auto rid_extension = config.requestIDExtension();\n    // Unconditionally set a request ID if we are allowed to override it from\n    // the edge. Otherwise just ensure it is set.\n    const bool force_set = !config.preserveExternalRequestId() && edge_request;\n    rid_extension->set(request_headers, force_set);\n  }\n\n  mutateXfccRequestHeader(request_headers, connection, config);\n\n  return final_remote_address;\n}\n\nvoid ConnectionManagerUtility::mutateTracingRequestHeader(RequestHeaderMap& request_headers,\n                                                          Runtime::Loader& runtime,\n                                                          ConnectionManagerConfig& config,\n                                                          const Router::Route* route) {\n  if (!config.tracingConfig()) {\n    return;\n  }\n\n  auto rid_extension = config.requestIDExtension();\n  uint64_t result;\n  // Skip if request-id is corrupted, or non-existent\n  if (!rid_extension->modBy(request_headers, result, 10000)) {\n    return;\n  }\n\n  const envoy::type::v3::FractionalPercent* client_sampling =\n      &config.tracingConfig()->client_sampling_;\n  const envoy::type::v3::FractionalPercent* random_sampling =\n      &config.tracingConfig()->random_sampling_;\n  const envoy::type::v3::FractionalPercent* overall_sampling =\n      &config.tracingConfig()->overall_sampling_;\n\n  if (route && route->tracingConfig()) {\n    client_sampling = &route->tracingConfig()->getClientSampling();\n    random_sampling = &route->tracingConfig()->getRandomSampling();\n    overall_sampling = &route->tracingConfig()->getOverallSampling();\n  }\n\n  // Do not apply tracing transformations if we are currently tracing.\n  if (TraceStatus::NoTrace == rid_extension->getTraceStatus(request_headers)) {\n    if (request_headers.ClientTraceId() &&\n        runtime.snapshot().featureEnabled(\"tracing.client_enabled\", *client_sampling)) {\n      rid_extension->setTraceStatus(request_headers, TraceStatus::Client);\n    } else if (request_headers.EnvoyForceTrace()) {\n      rid_extension->setTraceStatus(request_headers, TraceStatus::Forced);\n    } else if (runtime.snapshot().featureEnabled(\"tracing.random_sampling\", *random_sampling,\n                                                 result)) {\n      rid_extension->setTraceStatus(request_headers, TraceStatus::Sampled);\n    }\n  }\n\n  if (!runtime.snapshot().featureEnabled(\"tracing.global_enabled\", *overall_sampling, result)) {\n    rid_extension->setTraceStatus(request_headers, TraceStatus::NoTrace);\n  }\n}\n\nvoid ConnectionManagerUtility::mutateXfccRequestHeader(RequestHeaderMap& request_headers,\n                                                       Network::Connection& connection,\n                                                       ConnectionManagerConfig& config) {\n  // When AlwaysForwardOnly is set, always forward the XFCC header without modification.\n  if (config.forwardClientCert() == ForwardClientCertType::AlwaysForwardOnly) {\n    return;\n  }\n  // When Sanitize is set, or the connection is not mutual TLS, remove the XFCC header.\n  if (config.forwardClientCert() == ForwardClientCertType::Sanitize ||\n      !(connection.ssl() && connection.ssl()->peerCertificatePresented())) {\n    request_headers.removeForwardedClientCert();\n    return;\n  }\n\n  // When ForwardOnly is set, always forward the XFCC header without modification.\n  if (config.forwardClientCert() == ForwardClientCertType::ForwardOnly) {\n    return;\n  }\n\n  // TODO(myidpt): Handle the special characters in By and URI fields.\n  // TODO: Optimize client_cert_details based on perf analysis (direct string appending may be more\n  // preferable).\n  std::vector<std::string> client_cert_details;\n  // When AppendForward or SanitizeSet is set, the client certificate information should be set into\n  // the XFCC header.\n  if (config.forwardClientCert() == ForwardClientCertType::AppendForward ||\n      config.forwardClientCert() == ForwardClientCertType::SanitizeSet) {\n    const auto uri_sans_local_cert = connection.ssl()->uriSanLocalCertificate();\n    if (!uri_sans_local_cert.empty()) {\n      client_cert_details.push_back(absl::StrCat(\"By=\", uri_sans_local_cert[0]));\n    }\n    const std::string cert_digest = connection.ssl()->sha256PeerCertificateDigest();\n    if (!cert_digest.empty()) {\n      client_cert_details.push_back(absl::StrCat(\"Hash=\", cert_digest));\n    }\n    for (const auto& detail : config.setCurrentClientCertDetails()) {\n      switch (detail) {\n      case ClientCertDetailsType::Cert: {\n        const std::string peer_cert = connection.ssl()->urlEncodedPemEncodedPeerCertificate();\n        if (!peer_cert.empty()) {\n          client_cert_details.push_back(absl::StrCat(\"Cert=\\\"\", peer_cert, \"\\\"\"));\n        }\n        break;\n      }\n      case ClientCertDetailsType::Chain: {\n        const std::string peer_chain = connection.ssl()->urlEncodedPemEncodedPeerCertificateChain();\n        if (!peer_chain.empty()) {\n          client_cert_details.push_back(absl::StrCat(\"Chain=\\\"\", peer_chain, \"\\\"\"));\n        }\n        break;\n      }\n      case ClientCertDetailsType::Subject:\n        // The \"Subject\" key still exists even if the subject is empty.\n        client_cert_details.push_back(\n            absl::StrCat(\"Subject=\\\"\", connection.ssl()->subjectPeerCertificate(), \"\\\"\"));\n        break;\n      case ClientCertDetailsType::URI: {\n        // The \"URI\" key still exists even if the URI is empty.\n        const auto sans = connection.ssl()->uriSanPeerCertificate();\n        const auto& uri_san = sans.empty() ? \"\" : sans[0];\n        client_cert_details.push_back(absl::StrCat(\"URI=\", uri_san));\n        break;\n      }\n      case ClientCertDetailsType::DNS: {\n        auto dns_sans = connection.ssl()->dnsSansPeerCertificate();\n        if (!dns_sans.empty()) {\n          for (const std::string& dns : dns_sans) {\n            client_cert_details.push_back(absl::StrCat(\"DNS=\", dns));\n          }\n        }\n        break;\n      }\n      }\n    }\n  }\n\n  const std::string client_cert_details_str = absl::StrJoin(client_cert_details, \";\");\n  if (config.forwardClientCert() == ForwardClientCertType::AppendForward) {\n    request_headers.appendForwardedClientCert(client_cert_details_str, \",\");\n  } else if (config.forwardClientCert() == ForwardClientCertType::SanitizeSet) {\n    request_headers.setForwardedClientCert(client_cert_details_str);\n  } else {\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid ConnectionManagerUtility::mutateResponseHeaders(ResponseHeaderMap& response_headers,\n                                                     const RequestHeaderMap* request_headers,\n                                                     ConnectionManagerConfig& config,\n                                                     const std::string& via) {\n  if (request_headers != nullptr && Utility::isUpgrade(*request_headers) &&\n      Utility::isUpgrade(response_headers)) {\n    // As in mutateRequestHeaders, Upgrade responses have special handling.\n    //\n    // Unlike mutateRequestHeaders there is no explicit protocol check. If Envoy is proxying an\n    // upgrade response it has already passed the protocol checks.\n    const bool no_body =\n        (!response_headers.TransferEncoding() && !response_headers.ContentLength());\n\n    const bool is_1xx = CodeUtility::is1xx(Utility::getResponseStatus(response_headers));\n\n    // We are explicitly forbidden from setting content-length for 1xx responses\n    // (RFC7230, Section 3.3.2). We ignore 204 because this is an upgrade.\n    if (no_body && !is_1xx) {\n      response_headers.setContentLength(uint64_t(0));\n    }\n  } else {\n    response_headers.removeConnection();\n    if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.fix_upgrade_response\")) {\n      response_headers.removeUpgrade();\n    }\n  }\n\n  response_headers.removeTransferEncoding();\n\n  if (request_headers != nullptr &&\n      (config.alwaysSetRequestIdInResponse() || request_headers->EnvoyForceTrace())) {\n    config.requestIDExtension()->setInResponse(response_headers, *request_headers);\n  }\n  response_headers.removeKeepAlive();\n  response_headers.removeProxyConnection();\n\n  if (!via.empty()) {\n    Utility::appendVia(response_headers, via);\n  }\n}\n\nbool ConnectionManagerUtility::maybeNormalizePath(RequestHeaderMap& request_headers,\n                                                  const ConnectionManagerConfig& config) {\n  if (!request_headers.Path()) {\n    return true; // It's as valid as it is going to get.\n  }\n  bool is_valid_path = true;\n  if (config.shouldNormalizePath()) {\n    is_valid_path = PathUtil::canonicalPath(request_headers);\n  }\n  // Merge slashes after path normalization to catch potential edge cases with percent encoding.\n  if (is_valid_path && config.shouldMergeSlashes()) {\n    PathUtil::mergeSlashes(request_headers);\n  }\n  return is_valid_path;\n}\n\nvoid ConnectionManagerUtility::maybeNormalizeHost(RequestHeaderMap& request_headers,\n                                                  const ConnectionManagerConfig& config,\n                                                  uint32_t port) {\n  if (config.shouldStripMatchingPort()) {\n    HeaderUtility::stripPortFromHost(request_headers, port);\n  }\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/conn_manager_utility.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstdint>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/http/conn_manager_impl.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http2/codec_stats.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Connection manager utilities split out for ease of testing.\n */\nclass ConnectionManagerUtility {\npublic:\n  /**\n   * Determine the next protocol to used based both on ALPN as well as protocol inspection.\n   * @param connection supplies the connection to determine a protocol for.\n   * @param data supplies the currently available read data on the connection.\n   */\n  static std::string determineNextProtocol(Network::Connection& connection,\n                                           const Buffer::Instance& data);\n\n  /**\n   * Create an HTTP codec given the connection and the beginning of the incoming data.\n   * @param connection supplies the connection.\n   * @param data supplies the initial data supplied by the client.\n   * @param callbacks supplies the codec callbacks.\n   * @param scope supplies the stats scope for codec stats.\n   * @param http1_settings supplies the HTTP/1 settings to use if HTTP/1 is chosen.\n   * @param http2_settings supplies the HTTP/2 settings to use if HTTP/2 is chosen.\n   */\n  static ServerConnectionPtr\n  autoCreateCodec(Network::Connection& connection, const Buffer::Instance& data,\n                  ServerConnectionCallbacks& callbacks, Stats::Scope& scope,\n                  Random::RandomGenerator& random, Http1::CodecStats::AtomicPtr& http1_codec_stats,\n                  Http2::CodecStats::AtomicPtr& http2_codec_stats,\n                  const Http1Settings& http1_settings,\n                  const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                  uint32_t max_request_headers_kb, uint32_t max_request_headers_count,\n                  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n                      headers_with_underscores_action);\n\n  /**\n   * Mutates request headers in various ways. This functionality is broken out because of its\n   * complexity for ease of testing. See the method itself for detailed comments on what\n   * mutations are performed.\n   *\n   * Note this function may be called twice on the response path if there are\n   * 100-Continue headers.\n   *\n   * @return the final trusted remote address. This depends on various settings and the\n   *         existence of the x-forwarded-for header. Again see the method for more details.\n   */\n  static Network::Address::InstanceConstSharedPtr\n  mutateRequestHeaders(RequestHeaderMap& request_headers, Network::Connection& connection,\n                       ConnectionManagerConfig& config, const Router::Config& route_config,\n                       const LocalInfo::LocalInfo& local_info);\n\n  static void mutateResponseHeaders(ResponseHeaderMap& response_headers,\n                                    const RequestHeaderMap* request_headers,\n                                    ConnectionManagerConfig& config, const std::string& via);\n\n  // Sanitize the path in the header map if the path exists and it is forced by config.\n  // Side affect: the string view of Path header is invalidated.\n  // Return false if error happens during the sanitization.\n  // Returns true if there is no path.\n  static bool maybeNormalizePath(RequestHeaderMap& request_headers,\n                                 const ConnectionManagerConfig& config);\n\n  static void maybeNormalizeHost(RequestHeaderMap& request_headers,\n                                 const ConnectionManagerConfig& config, uint32_t port);\n\n  /**\n   * Mutate request headers if request needs to be traced.\n   */\n  static void mutateTracingRequestHeader(RequestHeaderMap& request_headers,\n                                         Runtime::Loader& runtime, ConnectionManagerConfig& config,\n                                         const Router::Route* route);\n\nprivate:\n  static void mutateXfccRequestHeader(RequestHeaderMap& request_headers,\n                                      Network::Connection& connection,\n                                      ConnectionManagerConfig& config);\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/conn_pool_base.cc",
    "content": "#include \"common/http/conn_pool_base.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/stats/timespan_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nNetwork::TransportSocketOptionsSharedPtr\nwrapTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options,\n                           Protocol protocol) {\n  if (!Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.http_default_alpn\")) {\n    return transport_socket_options;\n  }\n\n  // If configured to do so, we override the ALPN to use for the upstream connection to match the\n  // selected protocol.\n  std::string alpn;\n  switch (protocol) {\n  case Http::Protocol::Http10:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  case Http::Protocol::Http11:\n    alpn = Http::Utility::AlpnNames::get().Http11;\n    break;\n  case Http::Protocol::Http2:\n    alpn = Http::Utility::AlpnNames::get().Http2;\n    break;\n  case Http::Protocol::Http3:\n    // TODO(snowp): Add once HTTP/3 upstream support is added.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    break;\n  }\n\n  if (transport_socket_options) {\n    return std::make_shared<Network::AlpnDecoratingTransportSocketOptions>(\n        std::move(alpn), transport_socket_options);\n  } else {\n    return std::make_shared<Network::TransportSocketOptionsImpl>(\n        \"\", std::vector<std::string>{}, std::vector<std::string>{}, std::move(alpn));\n  }\n}\n\nHttpConnPoolImplBase::HttpConnPoolImplBase(\n    Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n    Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options,\n    const Network::TransportSocketOptionsSharedPtr& transport_socket_options,\n    Http::Protocol protocol)\n    : Envoy::ConnectionPool::ConnPoolImplBase(\n          host, priority, dispatcher, options,\n          wrapTransportSocketOptions(transport_socket_options, protocol)) {}\n\nConnectionPool::Cancellable*\nHttpConnPoolImplBase::newStream(Http::ResponseDecoder& response_decoder,\n                                Http::ConnectionPool::Callbacks& callbacks) {\n  HttpAttachContext context({&response_decoder, &callbacks});\n  return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context);\n}\n\nbool HttpConnPoolImplBase::hasActiveConnections() const {\n  return (!pending_streams_.empty() || (num_active_streams_ > 0));\n}\n\nConnectionPool::Cancellable*\nHttpConnPoolImplBase::newPendingStream(Envoy::ConnectionPool::AttachContext& context) {\n  Http::ResponseDecoder& decoder = *typedContext<HttpAttachContext>(context).decoder_;\n  Http::ConnectionPool::Callbacks& callbacks = *typedContext<HttpAttachContext>(context).callbacks_;\n  ENVOY_LOG(debug, \"queueing stream due to no available connections\");\n  Envoy::ConnectionPool::PendingStreamPtr pending_stream(\n      new HttpPendingStream(*this, decoder, callbacks));\n  LinkedList::moveIntoList(std::move(pending_stream), pending_streams_);\n  return pending_streams_.front().get();\n}\n\nvoid HttpConnPoolImplBase::onPoolReady(Envoy::ConnectionPool::ActiveClient& client,\n                                       Envoy::ConnectionPool::AttachContext& context) {\n  ActiveClient* http_client = static_cast<ActiveClient*>(&client);\n  auto& http_context = typedContext<HttpAttachContext>(context);\n  Http::ResponseDecoder& response_decoder = *http_context.decoder_;\n  Http::ConnectionPool::Callbacks& callbacks = *http_context.callbacks_;\n  Http::RequestEncoder& new_encoder = http_client->newStreamEncoder(response_decoder);\n  callbacks.onPoolReady(new_encoder, client.real_host_description_,\n                        http_client->codec_client_->streamInfo());\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/conn_pool_base.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/conn_pool/conn_pool_base.h\"\n#include \"common/http/codec_client.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nstruct HttpAttachContext : public Envoy::ConnectionPool::AttachContext {\n  HttpAttachContext(Http::ResponseDecoder* d, Http::ConnectionPool::Callbacks* c)\n      : decoder_(d), callbacks_(c) {}\n  Http::ResponseDecoder* decoder_;\n  Http::ConnectionPool::Callbacks* callbacks_;\n};\n\n// An implementation of Envoy::ConnectionPool::PendingStream for HTTP/1.1 and HTTP/2\nclass HttpPendingStream : public Envoy::ConnectionPool::PendingStream {\npublic:\n  // OnPoolSuccess for HTTP requires both the decoder and callbacks. OnPoolFailure\n  // requires only the callbacks, but passes both for consistency.\n  HttpPendingStream(Envoy::ConnectionPool::ConnPoolImplBase& parent, Http::ResponseDecoder& decoder,\n                    Http::ConnectionPool::Callbacks& callbacks)\n      : Envoy::ConnectionPool::PendingStream(parent), context_(&decoder, &callbacks) {}\n\n  Envoy::ConnectionPool::AttachContext& context() override { return context_; }\n  HttpAttachContext context_;\n};\n\n// An implementation of Envoy::ConnectionPool::ConnPoolImplBase for shared code\n// between HTTP/1.1 and HTTP/2\nclass HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase,\n                             public Http::ConnectionPool::Instance {\npublic:\n  HttpConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                       Event::Dispatcher& dispatcher,\n                       const Network::ConnectionSocket::OptionsSharedPtr& options,\n                       const Network::TransportSocketOptionsSharedPtr& transport_socket_options,\n                       Http::Protocol protocol);\n\n  // ConnectionPool::Instance\n  void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); }\n  void drainConnections() override { drainConnectionsImpl(); }\n  Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }\n  ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder,\n                                         Http::ConnectionPool::Callbacks& callbacks) override;\n  bool maybePrefetch(float ratio) override {\n    return Envoy::ConnectionPool::ConnPoolImplBase::maybePrefetch(ratio);\n  }\n  bool hasActiveConnections() const override;\n\n  // Creates a new PendingStream and enqueues it into the queue.\n  ConnectionPool::Cancellable*\n  newPendingStream(Envoy::ConnectionPool::AttachContext& context) override;\n  void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description,\n                     absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason,\n                     Envoy::ConnectionPool::AttachContext& context) override {\n    auto* callbacks = typedContext<HttpAttachContext>(context).callbacks_;\n    callbacks->onPoolFailure(reason, failure_reason, host_description);\n  }\n  void onPoolReady(Envoy::ConnectionPool::ActiveClient& client,\n                   Envoy::ConnectionPool::AttachContext& context) override;\n\n  virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE;\n};\n\n// An implementation of Envoy::ConnectionPool::ActiveClient for HTTP/1.1 and HTTP/2\nclass ActiveClient : public Envoy::ConnectionPool::ActiveClient {\npublic:\n  ActiveClient(HttpConnPoolImplBase& parent, uint64_t lifetime_stream_limit,\n               uint64_t concurrent_stream_limit)\n      : Envoy::ConnectionPool::ActiveClient(parent, lifetime_stream_limit,\n                                            concurrent_stream_limit) {\n    Upstream::Host::CreateConnectionData data = parent_.host()->createConnection(\n        parent_.dispatcher(), parent_.socketOptions(), parent_.transportSocketOptions());\n    real_host_description_ = data.host_description_;\n    codec_client_ = parent.createCodecClient(data);\n    codec_client_->addConnectionCallbacks(*this);\n    codec_client_->setConnectionStats(\n        {parent_.host()->cluster().stats().upstream_cx_rx_bytes_total_,\n         parent_.host()->cluster().stats().upstream_cx_rx_bytes_buffered_,\n         parent_.host()->cluster().stats().upstream_cx_tx_bytes_total_,\n         parent_.host()->cluster().stats().upstream_cx_tx_bytes_buffered_,\n         &parent_.host()->cluster().stats().bind_errors_, nullptr});\n  }\n  void close() override { codec_client_->close(); }\n  virtual Http::RequestEncoder& newStreamEncoder(Http::ResponseDecoder& response_decoder) PURE;\n  void onEvent(Network::ConnectionEvent event) override {\n    parent_.onConnectionEvent(*this, codec_client_->connectionFailureReason(), event);\n  }\n  size_t numActiveStreams() const override { return codec_client_->numActiveRequests(); }\n  uint64_t id() const override { return codec_client_->id(); }\n\n  Http::CodecClientPtr codec_client_;\n};\n\n} // namespace Http\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/context_impl.cc",
    "content": "#include \"common/http/context_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nContextImpl::ContextImpl(Stats::SymbolTable& symbol_table)\n    : code_stats_(symbol_table), user_agent_context_(symbol_table) {}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/context_impl.h",
    "content": "#pragma once\n\n#include \"envoy/http/context.h\"\n\n#include \"common/http/codes.h\"\n#include \"common/http/user_agent.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Captures http-related structures with cardinality of one per server.\n */\nclass ContextImpl : public Context {\npublic:\n  explicit ContextImpl(Stats::SymbolTable& symbol_table);\n  ~ContextImpl() override = default;\n\n  const envoy::config::trace::v3::Tracing& defaultTracingConfig() override {\n    return default_tracing_config_;\n  }\n\n  CodeStats& codeStats() override { return code_stats_; }\n\n  void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) {\n    default_tracing_config_ = tracing_config;\n  }\n\n  const UserAgentContext& userAgentContext() const override { return user_agent_context_; }\n\nprivate:\n  CodeStatsImpl code_stats_;\n  UserAgentContext user_agent_context_;\n  envoy::config::trace::v3::Tracing default_tracing_config_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/date_provider.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Fills headers with a date header.\n */\nclass DateProvider {\npublic:\n  virtual ~DateProvider() = default;\n\n  /**\n   * Set the Date header potentially using a cached value.\n   * @param headers supplies the headers to fill.\n   */\n  virtual void setDateHeader(ResponseHeaderMap& headers) PURE;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/date_provider_impl.cc",
    "content": "#include \"common/http/date_provider_impl.h\"\n\n#include <chrono>\n#include <string>\n\nnamespace Envoy {\nnamespace Http {\n\nDateFormatter DateProviderImplBase::date_formatter_(\"%a, %d %b %Y %H:%M:%S GMT\");\n\nTlsCachingDateProviderImpl::TlsCachingDateProviderImpl(Event::Dispatcher& dispatcher,\n                                                       ThreadLocal::SlotAllocator& tls)\n    : DateProviderImplBase(dispatcher.timeSource()), tls_(tls.allocateSlot()),\n      refresh_timer_(dispatcher.createTimer([this]() -> void { onRefreshDate(); })) {\n\n  onRefreshDate();\n}\n\nvoid TlsCachingDateProviderImpl::onRefreshDate() {\n  std::string new_date_string = date_formatter_.now(time_source_);\n  tls_->set([new_date_string](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<ThreadLocalCachedDate>(new_date_string);\n  });\n\n  refresh_timer_->enableTimer(std::chrono::milliseconds(500));\n}\n\nvoid TlsCachingDateProviderImpl::setDateHeader(ResponseHeaderMap& headers) {\n  headers.setDate(tls_->getTyped<ThreadLocalCachedDate>().date_string_);\n}\n\nvoid SlowDateProviderImpl::setDateHeader(ResponseHeaderMap& headers) {\n  headers.setDate(date_formatter_.now(time_source_));\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/date_provider_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"date_provider.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Base for all providers.\n */\nclass DateProviderImplBase : public DateProvider {\npublic:\n  explicit DateProviderImplBase(TimeSource& time_source) : time_source_(time_source) {}\n\nprotected:\n  static DateFormatter date_formatter_;\n  TimeSource& time_source_;\n};\n\n/**\n * A caching thread local provider. This implementation updates the date string every 500ms and\n * caches on each thread.\n */\nclass TlsCachingDateProviderImpl : public DateProviderImplBase, public Singleton::Instance {\npublic:\n  TlsCachingDateProviderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls);\n\n  // Http::DateProvider\n  void setDateHeader(ResponseHeaderMap& headers) override;\n\nprivate:\n  struct ThreadLocalCachedDate : public ThreadLocal::ThreadLocalObject {\n    ThreadLocalCachedDate(const std::string& date_string) : date_string_(date_string) {}\n\n    const std::string date_string_;\n  };\n\n  void onRefreshDate();\n\n  ThreadLocal::SlotPtr tls_;\n  Event::TimerPtr refresh_timer_;\n};\n\n/**\n * A basic provider that just creates the date string every time.\n */\nclass SlowDateProviderImpl : public DateProviderImplBase {\n  using DateProviderImplBase::DateProviderImplBase;\n\npublic:\n  // Http::DateProvider\n  void setDateHeader(ResponseHeaderMap& headers) override;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/default_server_string.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass DefaultServerString {\npublic:\n  /**\n   * @return the default HTTP server header string.\n   */\n  static const std::string& get() { CONSTRUCT_ON_FIRST_USE(std::string, \"envoy\"); }\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/exception.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Indicates a non-recoverable protocol error that should result in connection termination.\n */\nclass CodecProtocolException : public EnvoyException {\npublic:\n  CodecProtocolException(const std::string& message) : EnvoyException(message) {}\n};\n\n/**\n * Raised when outbound frame queue flood is detected.\n */\nclass FrameFloodException : public CodecProtocolException {\npublic:\n  FrameFloodException(const std::string& message) : CodecProtocolException(message) {}\n};\n\n/**\n * Raised when a response is received on a connection that did not send a request. In practice\n * this can only happen on HTTP/1.1 connections.\n */\nclass PrematureResponseException : public EnvoyException {\npublic:\n  PrematureResponseException(Http::Code response_code)\n      : EnvoyException(\"\"), response_code_(response_code) {}\n\n  Http::Code responseCode() { return response_code_; }\n\nprivate:\n  const Http::Code response_code_;\n};\n\n/**\n * Indicates a client (local) side error which should not happen.\n */\nclass CodecClientException : public EnvoyException {\npublic:\n  CodecClientException(const std::string& message) : EnvoyException(message) {}\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/filter_manager.cc",
    "content": "#include \"common/http/filter_manager.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/scope_tracker.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nnamespace {\n\ntemplate <class T> using FilterList = std::list<std::unique_ptr<T>>;\n\n// Shared helper for recording the latest filter used.\ntemplate <class T>\nvoid recordLatestDataFilter(const typename FilterList<T>::iterator current_filter,\n                            T*& latest_filter, const FilterList<T>& filters) {\n  // If this is the first time we're calling onData, just record the current filter.\n  if (latest_filter == nullptr) {\n    latest_filter = current_filter->get();\n    return;\n  }\n\n  // We want to keep this pointing at the latest filter in the filter list that has received the\n  // onData callback. To do so, we compare the current latest with the *previous* filter. If they\n  // match, then we must be processing a new filter for the first time. We omit this check if we're\n  // the first filter, since the above check handles that case.\n  //\n  // We compare against the previous filter to avoid multiple filter iterations from resetting the\n  // pointer: If we just set latest to current, then the first onData filter iteration would\n  // correctly iterate over the filters and set latest, but on subsequent onData iterations\n  // we'd start from the beginning again, potentially allowing filter N to modify the buffer even\n  // though filter M > N was the filter that inserted data into the buffer.\n  if (current_filter != filters.begin() && latest_filter == std::prev(current_filter)->get()) {\n    latest_filter = current_filter->get();\n  }\n}\n\n} // namespace\n\nvoid ActiveStreamFilterBase::commonContinue() {\n  // TODO(mattklein123): Raise an error if this is called during a callback.\n  if (!canContinue()) {\n    ENVOY_STREAM_LOG(trace, \"cannot continue filter chain: filter={}\", *this,\n                     static_cast<const void*>(this));\n    return;\n  }\n\n  ENVOY_STREAM_LOG(trace, \"continuing filter chain: filter={}\", *this,\n                   static_cast<const void*>(this));\n  ASSERT(!canIterate(),\n         \"Attempting to continue iteration while the IterationState is already Continue\");\n  // If iteration has stopped for all frame types, set iterate_from_current_filter_ to true so the\n  // filter iteration starts with the current filter instead of the next one.\n  if (stoppedAll()) {\n    iterate_from_current_filter_ = true;\n  }\n  allowIteration();\n\n  // Only resume with do100ContinueHeaders() if we've actually seen a 100-Continue.\n  if (has100Continueheaders()) {\n    continue_headers_continued_ = true;\n    do100ContinueHeaders();\n    // If the response headers have not yet come in, don't continue on with\n    // headers and body. doHeaders expects request headers to exist.\n    if (!parent_.filter_manager_callbacks_.responseHeaders()) {\n      return;\n    }\n  }\n\n  // Make sure that we handle the zero byte data frame case. We make no effort to optimize this\n  // case in terms of merging it into a header only request/response. This could be done in the\n  // future.\n  if (!headers_continued_) {\n    headers_continued_ = true;\n    doHeaders(complete() && !bufferedData() && !hasTrailers());\n  }\n\n  doMetadata();\n\n  if (bufferedData()) {\n    doData(complete() && !hasTrailers());\n  }\n\n  if (hasTrailers()) {\n    doTrailers();\n  }\n\n  iterate_from_current_filter_ = false;\n}\n\nbool ActiveStreamFilterBase::commonHandleAfter100ContinueHeadersCallback(\n    FilterHeadersStatus status) {\n  ASSERT(parent_.state_.has_continue_headers_);\n  ASSERT(!continue_headers_continued_);\n  ASSERT(canIterate());\n\n  if (status == FilterHeadersStatus::StopIteration) {\n    iteration_state_ = IterationState::StopSingleIteration;\n    return false;\n  } else {\n    ASSERT(status == FilterHeadersStatus::Continue);\n    continue_headers_continued_ = true;\n    return true;\n  }\n}\n\nbool ActiveStreamFilterBase::commonHandleAfterHeadersCallback(FilterHeadersStatus status,\n                                                              bool& end_stream,\n                                                              bool& headers_only) {\n  ASSERT(!headers_continued_);\n  ASSERT(canIterate());\n\n  switch (status) {\n  case FilterHeadersStatus::StopIteration:\n    iteration_state_ = IterationState::StopSingleIteration;\n    break;\n  case FilterHeadersStatus::StopAllIterationAndBuffer:\n    iteration_state_ = IterationState::StopAllBuffer;\n    break;\n  case FilterHeadersStatus::StopAllIterationAndWatermark:\n    iteration_state_ = IterationState::StopAllWatermark;\n    break;\n  case FilterHeadersStatus::ContinueAndEndStream:\n    // Set headers_only to true so we know to end early if necessary,\n    // but continue filter iteration so we actually write the headers/run the cleanup code.\n    headers_only = true;\n    ENVOY_STREAM_LOG(debug, \"converting to headers only\", parent_);\n    break;\n  case FilterHeadersStatus::ContinueAndDontEndStream:\n    headers_only = false;\n    end_stream = false;\n    headers_continued_ = true;\n    ENVOY_STREAM_LOG(debug, \"converting to headers and body (body not available yet)\", parent_);\n    break;\n  case FilterHeadersStatus::Continue:\n    headers_continued_ = true;\n    break;\n  }\n\n  handleMetadataAfterHeadersCallback();\n\n  if (stoppedAll() || status == FilterHeadersStatus::StopIteration) {\n    return false;\n  } else {\n    return true;\n  }\n}\n\nvoid ActiveStreamFilterBase::commonHandleBufferData(Buffer::Instance& provided_data) {\n\n  // The way we do buffering is a little complicated which is why we have this common function\n  // which is used for both encoding and decoding. When data first comes into our filter pipeline,\n  // we send it through. Any filter can choose to stop iteration and buffer or not. If we then\n  // continue iteration in the future, we use the buffered data. A future filter can stop and\n  // buffer again. In this case, since we are already operating on buffered data, we don't\n  // rebuffer, because we assume the filter has modified the buffer as it wishes in place.\n  if (bufferedData().get() != &provided_data) {\n    if (!bufferedData()) {\n      bufferedData() = createBuffer();\n    }\n    bufferedData()->move(provided_data);\n  }\n}\n\nbool ActiveStreamFilterBase::commonHandleAfterDataCallback(FilterDataStatus status,\n                                                           Buffer::Instance& provided_data,\n                                                           bool& buffer_was_streaming) {\n\n  if (status == FilterDataStatus::Continue) {\n    if (iteration_state_ == IterationState::StopSingleIteration) {\n      commonHandleBufferData(provided_data);\n      commonContinue();\n      return false;\n    } else {\n      ASSERT(headers_continued_);\n    }\n  } else {\n    iteration_state_ = IterationState::StopSingleIteration;\n    if (status == FilterDataStatus::StopIterationAndBuffer ||\n        status == FilterDataStatus::StopIterationAndWatermark) {\n      buffer_was_streaming = status == FilterDataStatus::StopIterationAndWatermark;\n      commonHandleBufferData(provided_data);\n    } else if (complete() && !hasTrailers() && !bufferedData()) {\n      // If this filter is doing StopIterationNoBuffer and this stream is terminated with a zero\n      // byte data frame, we need to create an empty buffer to make sure that when commonContinue\n      // is called, the pipeline resumes with an empty data frame with end_stream = true\n      ASSERT(end_stream_);\n      bufferedData() = createBuffer();\n    }\n\n    return false;\n  }\n\n  return true;\n}\n\nbool ActiveStreamFilterBase::commonHandleAfterTrailersCallback(FilterTrailersStatus status) {\n\n  if (status == FilterTrailersStatus::Continue) {\n    if (iteration_state_ == IterationState::StopSingleIteration) {\n      commonContinue();\n      return false;\n    } else {\n      ASSERT(headers_continued_);\n    }\n  } else {\n    return false;\n  }\n\n  return true;\n}\n\nconst Network::Connection* ActiveStreamFilterBase::connection() { return parent_.connection(); }\n\nEvent::Dispatcher& ActiveStreamFilterBase::dispatcher() { return parent_.dispatcher_; }\n\nStreamInfo::StreamInfo& ActiveStreamFilterBase::streamInfo() { return parent_.stream_info_; }\n\nTracing::Span& ActiveStreamFilterBase::activeSpan() {\n  return parent_.filter_manager_callbacks_.activeSpan();\n}\n\nconst ScopeTrackedObject& ActiveStreamFilterBase::scope() {\n  return parent_.filter_manager_callbacks_.scope();\n}\n\nTracing::Config& ActiveStreamFilterBase::tracingConfig() {\n  return parent_.filter_manager_callbacks_.tracingConfig();\n}\n\nUpstream::ClusterInfoConstSharedPtr ActiveStreamFilterBase::clusterInfo() {\n  return parent_.filter_manager_callbacks_.clusterInfo();\n}\n\nRouter::RouteConstSharedPtr ActiveStreamFilterBase::route() { return route(nullptr); }\n\nRouter::RouteConstSharedPtr ActiveStreamFilterBase::route(const Router::RouteCallback& cb) {\n  return parent_.filter_manager_callbacks_.route(cb);\n}\n\nvoid ActiveStreamFilterBase::clearRouteCache() {\n  parent_.filter_manager_callbacks_.clearRouteCache();\n}\n\nbool ActiveStreamDecoderFilter::canContinue() {\n  // It is possible for the connection manager to respond directly to a request even while\n  // a filter is trying to continue. If a response has already happened, we should not\n  // continue to further filters. A concrete example of this is a filter buffering data, the\n  // last data frame comes in and the filter continues, but the final buffering takes the stream\n  // over the high watermark such that a 413 is returned.\n  return !parent_.state_.local_complete_;\n}\n\nBuffer::WatermarkBufferPtr ActiveStreamDecoderFilter::createBuffer() {\n  auto buffer = std::make_unique<Buffer::WatermarkBuffer>(\n      [this]() -> void { this->requestDataDrained(); },\n      [this]() -> void { this->requestDataTooLarge(); },\n      []() -> void { /* TODO(adisuissa): Handle overflow watermark */ });\n  buffer->setWatermarks(parent_.buffer_limit_);\n  return buffer;\n}\n\nBuffer::WatermarkBufferPtr& ActiveStreamDecoderFilter::bufferedData() {\n  return parent_.buffered_request_data_;\n}\n\nbool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_complete_; }\n\nvoid ActiveStreamDecoderFilter::doHeaders(bool end_stream) {\n  parent_.decodeHeaders(this, *parent_.filter_manager_callbacks_.requestHeaders(), end_stream);\n}\n\nvoid ActiveStreamDecoderFilter::doData(bool end_stream) {\n  parent_.decodeData(this, *parent_.buffered_request_data_, end_stream,\n                     FilterManager::FilterIterationStartState::CanStartFromCurrent);\n}\n\nvoid ActiveStreamDecoderFilter::doTrailers() {\n  parent_.decodeTrailers(this, *parent_.filter_manager_callbacks_.requestTrailers());\n}\nbool ActiveStreamDecoderFilter::hasTrailers() {\n  return parent_.filter_manager_callbacks_.requestTrailers().has_value();\n}\n\nvoid ActiveStreamDecoderFilter::drainSavedRequestMetadata() {\n  ASSERT(saved_request_metadata_ != nullptr);\n  for (auto& metadata_map : *getSavedRequestMetadata()) {\n    parent_.decodeMetadata(this, *metadata_map);\n  }\n  getSavedRequestMetadata()->clear();\n}\n\nvoid ActiveStreamDecoderFilter::handleMetadataAfterHeadersCallback() {\n  // If we drain accumulated metadata, the iteration must start with the current filter.\n  const bool saved_state = iterate_from_current_filter_;\n  iterate_from_current_filter_ = true;\n  // If decodeHeaders() returns StopAllIteration, we should skip draining metadata, and wait\n  // for doMetadata() to drain the metadata after iteration continues.\n  if (!stoppedAll() && saved_request_metadata_ != nullptr && !getSavedRequestMetadata()->empty()) {\n    drainSavedRequestMetadata();\n  }\n  // Restores the original value of iterate_from_current_filter_.\n  iterate_from_current_filter_ = saved_state;\n}\n\nRequestTrailerMap& ActiveStreamDecoderFilter::addDecodedTrailers() {\n  return parent_.addDecodedTrailers();\n}\n\nvoid ActiveStreamDecoderFilter::addDecodedData(Buffer::Instance& data, bool streaming) {\n  parent_.addDecodedData(*this, data, streaming);\n}\n\nMetadataMapVector& ActiveStreamDecoderFilter::addDecodedMetadata() {\n  return parent_.addDecodedMetadata();\n}\n\nvoid ActiveStreamDecoderFilter::injectDecodedDataToFilterChain(Buffer::Instance& data,\n                                                               bool end_stream) {\n  parent_.decodeData(this, data, end_stream,\n                     FilterManager::FilterIterationStartState::CanStartFromCurrent);\n}\n\nvoid ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); }\nconst Buffer::Instance* ActiveStreamDecoderFilter::decodingBuffer() {\n  return parent_.buffered_request_data_.get();\n}\n\nvoid ActiveStreamDecoderFilter::modifyDecodingBuffer(\n    std::function<void(Buffer::Instance&)> callback) {\n  ASSERT(parent_.state_.latest_data_decoding_filter_ == this);\n  callback(*parent_.buffered_request_data_.get());\n}\n\nvoid ActiveStreamDecoderFilter::sendLocalReply(\n    Code code, absl::string_view body,\n    std::function<void(ResponseHeaderMap& headers)> modify_headers,\n    const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) {\n  parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, grpc_status, details);\n}\n\nvoid ActiveStreamDecoderFilter::encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) {\n  // If Envoy is not configured to proxy 100-Continue responses, swallow the 100 Continue\n  // here. This avoids the potential situation where Envoy strips Expect: 100-Continue and sends a\n  // 100-Continue, then proxies a duplicate 100 Continue from upstream.\n  if (parent_.proxy_100_continue_) {\n    parent_.filter_manager_callbacks_.setContinueHeaders(std::move(headers));\n    parent_.encode100ContinueHeaders(nullptr, *parent_.filter_manager_callbacks_.continueHeaders());\n  }\n}\n\nvoid ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream,\n                                              absl::string_view details) {\n  parent_.stream_info_.setResponseCodeDetails(details);\n  parent_.filter_manager_callbacks_.setResponseHeaders(std::move(headers));\n  parent_.encodeHeaders(nullptr, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream);\n}\n\nvoid ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) {\n  parent_.encodeData(nullptr, data, end_stream,\n                     FilterManager::FilterIterationStartState::CanStartFromCurrent);\n}\n\nvoid ActiveStreamDecoderFilter::encodeTrailers(ResponseTrailerMapPtr&& trailers) {\n  parent_.filter_manager_callbacks_.setResponseTrailers(std::move(trailers));\n  parent_.encodeTrailers(nullptr, *parent_.filter_manager_callbacks_.responseTrailers());\n}\n\nvoid ActiveStreamDecoderFilter::encodeMetadata(MetadataMapPtr&& metadata_map_ptr) {\n  parent_.encodeMetadata(nullptr, std::move(metadata_map_ptr));\n}\n\nvoid ActiveStreamDecoderFilter::onDecoderFilterAboveWriteBufferHighWatermark() {\n  parent_.filter_manager_callbacks_.onDecoderFilterAboveWriteBufferHighWatermark();\n}\n\nvoid ActiveStreamDecoderFilter::requestDataTooLarge() {\n  ENVOY_STREAM_LOG(debug, \"request data too large watermark exceeded\", parent_);\n  if (parent_.state_.decoder_filters_streaming_) {\n    onDecoderFilterAboveWriteBufferHighWatermark();\n  } else {\n    parent_.filter_manager_callbacks_.onRequestDataTooLarge();\n    sendLocalReply(Code::PayloadTooLarge, CodeUtility::toString(Code::PayloadTooLarge), nullptr,\n                   absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestPayloadTooLarge);\n  }\n}\n\nvoid FilterManager::addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter,\n                                                 bool dual_filter) {\n  ActiveStreamDecoderFilterPtr wrapper(new ActiveStreamDecoderFilter(*this, filter, dual_filter));\n  filter->setDecoderFilterCallbacks(*wrapper);\n  // Note: configured decoder filters are appended to decoder_filters_.\n  // This means that if filters are configured in the following order (assume all three filters are\n  // both decoder/encoder filters):\n  //   http_filters:\n  //     - A\n  //     - B\n  //     - C\n  // The decoder filter chain will iterate through filters A, B, C.\n  LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_);\n}\n\nvoid FilterManager::addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter,\n                                                 bool dual_filter) {\n  ActiveStreamEncoderFilterPtr wrapper(new ActiveStreamEncoderFilter(*this, filter, dual_filter));\n  filter->setEncoderFilterCallbacks(*wrapper);\n  // Note: configured encoder filters are prepended to encoder_filters_.\n  // This means that if filters are configured in the following order (assume all three filters are\n  // both decoder/encoder filters):\n  //   http_filters:\n  //     - A\n  //     - B\n  //     - C\n  // The encoder filter chain will iterate through filters C, B, A.\n  LinkedList::moveIntoList(std::move(wrapper), encoder_filters_);\n}\n\nvoid FilterManager::addAccessLogHandler(AccessLog::InstanceSharedPtr handler) {\n  access_log_handlers_.push_back(handler);\n}\n\nvoid FilterManager::maybeContinueDecoding(\n    const std::list<ActiveStreamDecoderFilterPtr>::iterator& continue_data_entry) {\n  if (continue_data_entry != decoder_filters_.end()) {\n    // We use the continueDecoding() code since it will correctly handle not calling\n    // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code\n    // expects it.\n    ASSERT(buffered_request_data_);\n    (*continue_data_entry)->iteration_state_ =\n        ActiveStreamFilterBase::IterationState::StopSingleIteration;\n    (*continue_data_entry)->continueDecoding();\n  }\n}\n\nvoid FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHeaderMap& headers,\n                                  bool end_stream) {\n  // Headers filter iteration should always start with the next filter if available.\n  std::list<ActiveStreamDecoderFilterPtr>::iterator entry =\n      commonDecodePrefix(filter, FilterIterationStartState::AlwaysStartFromNext);\n  std::list<ActiveStreamDecoderFilterPtr>::iterator continue_data_entry = decoder_filters_.end();\n\n  for (; entry != decoder_filters_.end(); entry++) {\n    ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders));\n    state_.filter_call_state_ |= FilterCallState::DecodeHeaders;\n    (*entry)->end_stream_ = state_.decoding_headers_only_ ||\n                            (end_stream && continue_data_entry == decoder_filters_.end());\n    FilterHeadersStatus status = (*entry)->decodeHeaders(headers, (*entry)->end_stream_);\n\n    ASSERT(!(status == FilterHeadersStatus::ContinueAndEndStream && (*entry)->end_stream_),\n           \"Filters should not return FilterHeadersStatus::ContinueAndEndStream from decodeHeaders \"\n           \"when end_stream is already true\");\n    ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_),\n           \"Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from \"\n           \"decodeHeaders when end_stream is already false\");\n\n    state_.filter_call_state_ &= ~FilterCallState::DecodeHeaders;\n    ENVOY_STREAM_LOG(trace, \"decode headers called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n\n    (*entry)->decode_headers_called_ = true;\n\n    // decoding_headers_only_ is set if the filter returns ContinueAndEndStream.\n    const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(\n        status, end_stream, state_.decoding_headers_only_);\n\n    // If this filter ended the stream, decodeComplete() should be called for it.\n    if ((*entry)->end_stream_ || state_.decoding_headers_only_) {\n      (*entry)->handle_->decodeComplete();\n    }\n\n    const bool new_metadata_added = processNewlyAddedMetadata();\n    // If end_stream is set in headers, and a filter adds new metadata, we need to delay end_stream\n    // in headers by inserting an empty data frame with end_stream set. The empty data frame is sent\n    // after the new metadata.\n    if ((*entry)->end_stream_ && new_metadata_added && !buffered_request_data_) {\n      Buffer::OwnedImpl empty_data(\"\");\n      ENVOY_STREAM_LOG(\n          trace, \"inserting an empty data frame for end_stream due metadata being added.\", *this);\n      // Metadata frame doesn't carry end of stream bit. We need an empty data frame to end the\n      // stream.\n      addDecodedData(*((*entry).get()), empty_data, true);\n    }\n\n    if (!continue_iteration && std::next(entry) != decoder_filters_.end()) {\n      // Stop iteration IFF this is not the last filter. If it is the last filter, continue with\n      // processing since we need to handle the case where a terminal filter wants to buffer, but\n      // a previous filter has added body.\n      maybeContinueDecoding(continue_data_entry);\n      return;\n    }\n\n    // Here we handle the case where we have a header only request, but a filter adds a body\n    // to it. We need to not raise end_stream = true to further filters during inline iteration.\n    if (end_stream && buffered_request_data_ && continue_data_entry == decoder_filters_.end()) {\n      continue_data_entry = entry;\n    }\n  }\n\n  maybeContinueDecoding(continue_data_entry);\n\n  if (end_stream) {\n    disarmRequestTimeout();\n  }\n}\n\nvoid FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data,\n                               bool end_stream,\n                               FilterIterationStartState filter_iteration_start_state) {\n  ScopeTrackerScopeState scope(&*this, dispatcher_);\n  filter_manager_callbacks_.resetIdleTimer();\n\n  // If we previously decided to decode only the headers, do nothing here.\n  if (state_.decoding_headers_only_) {\n    return;\n  }\n\n  // If a response is complete or a reset has been sent, filters do not care about further body\n  // data. Just drop it.\n  if (state_.local_complete_) {\n    return;\n  }\n\n  auto trailers_added_entry = decoder_filters_.end();\n  const bool trailers_exists_at_start = filter_manager_callbacks_.requestTrailers().has_value();\n  // Filter iteration may start at the current filter.\n  std::list<ActiveStreamDecoderFilterPtr>::iterator entry =\n      commonDecodePrefix(filter, filter_iteration_start_state);\n\n  for (; entry != decoder_filters_.end(); entry++) {\n    // If the filter pointed by entry has stopped for all frame types, return now.\n    if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) {\n      return;\n    }\n    // If end_stream_ is marked for a filter, the data is not for this filter and filters after.\n    //\n    // In following case, ActiveStreamFilterBase::commonContinue() could be called recursively and\n    // its doData() is called with wrong data.\n    //\n    //  There are 3 decode filters and \"wrapper\" refers to ActiveStreamFilter object.\n    //\n    //  filter0->decodeHeaders(_, true)\n    //    return STOP\n    //  filter0->continueDecoding()\n    //    wrapper0->commonContinue()\n    //      wrapper0->decodeHeaders(_, _, true)\n    //        filter1->decodeHeaders(_, true)\n    //          filter1->addDecodeData()\n    //          return CONTINUE\n    //        filter2->decodeHeaders(_, false)\n    //          return CONTINUE\n    //        wrapper1->commonContinue() // Detects data is added.\n    //          wrapper1->doData()\n    //            wrapper1->decodeData()\n    //              filter2->decodeData(_, true)\n    //                 return CONTINUE\n    //      wrapper0->doData() // This should not be called\n    //        wrapper0->decodeData()\n    //          filter1->decodeData(_, true)  // It will cause assertions.\n    //\n    // One way to solve this problem is to mark end_stream_ for each filter.\n    // If a filter is already marked as end_stream_ when decodeData() is called, bails out the\n    // whole function. If just skip the filter, the codes after the loop will be called with\n    // wrong data. For encodeData, the response_encoder->encode() will be called.\n    if ((*entry)->end_stream_) {\n      return;\n    }\n    ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeData));\n\n    // We check the request_trailers_ pointer here in case addDecodedTrailers\n    // is called in decodeData during a previous filter invocation, at which point we communicate to\n    // the current and future filters that the stream has not yet ended.\n    if (end_stream) {\n      state_.filter_call_state_ |= FilterCallState::LastDataFrame;\n    }\n\n    recordLatestDataFilter(entry, state_.latest_data_decoding_filter_, decoder_filters_);\n\n    state_.filter_call_state_ |= FilterCallState::DecodeData;\n    (*entry)->end_stream_ = end_stream && !filter_manager_callbacks_.requestTrailers();\n    FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_);\n    if ((*entry)->end_stream_) {\n      (*entry)->handle_->decodeComplete();\n    }\n    state_.filter_call_state_ &= ~FilterCallState::DecodeData;\n    if (end_stream) {\n      state_.filter_call_state_ &= ~FilterCallState::LastDataFrame;\n    }\n    ENVOY_STREAM_LOG(trace, \"decode data called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n\n    processNewlyAddedMetadata();\n\n    if (!trailers_exists_at_start && filter_manager_callbacks_.requestTrailers() &&\n        trailers_added_entry == decoder_filters_.end()) {\n      trailers_added_entry = entry;\n    }\n\n    if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.decoder_filters_streaming_) &&\n        std::next(entry) != decoder_filters_.end()) {\n      // Stop iteration IFF this is not the last filter. If it is the last filter, continue with\n      // processing since we need to handle the case where a terminal filter wants to buffer, but\n      // a previous filter has added trailers.\n      return;\n    }\n  }\n\n  // If trailers were adding during decodeData we need to trigger decodeTrailers in order\n  // to allow filters to process the trailers.\n  if (trailers_added_entry != decoder_filters_.end()) {\n    decodeTrailers(trailers_added_entry->get(), *filter_manager_callbacks_.requestTrailers());\n  }\n\n  if (end_stream) {\n    disarmRequestTimeout();\n  }\n}\n\nRequestTrailerMap& FilterManager::addDecodedTrailers() {\n  // Trailers can only be added during the last data frame (i.e. end_stream = true).\n  ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame);\n\n  filter_manager_callbacks_.setRequestTrailers(RequestTrailerMapImpl::create());\n  return *filter_manager_callbacks_.requestTrailers();\n}\n\nvoid FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data,\n                                   bool streaming) {\n  if (state_.filter_call_state_ == 0 ||\n      (state_.filter_call_state_ & FilterCallState::DecodeHeaders) ||\n      (state_.filter_call_state_ & FilterCallState::DecodeData) ||\n      ((state_.filter_call_state_ & FilterCallState::DecodeTrailers) && !filter.canIterate())) {\n    // Make sure if this triggers watermarks, the correct action is taken.\n    state_.decoder_filters_streaming_ = streaming;\n    // If no call is happening or we are in the decode headers/data callback, buffer the data.\n    // Inline processing happens in the decodeHeaders() callback if necessary.\n    filter.commonHandleBufferData(data);\n  } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) {\n    // In this case we need to inline dispatch the data to further filters. If those filters\n    // choose to buffer/stop iteration that's fine.\n    decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext);\n  } else {\n    // TODO(mattklein123): Formalize error handling for filters and add tests. Should probably\n    // throw an exception here.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n}\n\nMetadataMapVector& FilterManager::addDecodedMetadata() { return *getRequestMetadataMapVector(); }\n\nvoid FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers) {\n  // If we previously decided to decode only the headers, do nothing here.\n  if (state_.decoding_headers_only_) {\n    return;\n  }\n\n  // See decodeData() above for why we check local_complete_ here.\n  if (state_.local_complete_) {\n    return;\n  }\n\n  // Filter iteration may start at the current filter.\n  std::list<ActiveStreamDecoderFilterPtr>::iterator entry =\n      commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent);\n\n  for (; entry != decoder_filters_.end(); entry++) {\n    // If the filter pointed by entry has stopped for all frame type, return now.\n    if ((*entry)->stoppedAll()) {\n      return;\n    }\n    ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeTrailers));\n    state_.filter_call_state_ |= FilterCallState::DecodeTrailers;\n    FilterTrailersStatus status = (*entry)->handle_->decodeTrailers(trailers);\n    (*entry)->handle_->decodeComplete();\n    (*entry)->end_stream_ = true;\n    state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers;\n    ENVOY_STREAM_LOG(trace, \"decode trailers called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n\n    processNewlyAddedMetadata();\n\n    if (!(*entry)->commonHandleAfterTrailersCallback(status)) {\n      return;\n    }\n  }\n  disarmRequestTimeout();\n}\n\nvoid FilterManager::decodeMetadata(ActiveStreamDecoderFilter* filter, MetadataMap& metadata_map) {\n  // Filter iteration may start at the current filter.\n  std::list<ActiveStreamDecoderFilterPtr>::iterator entry =\n      commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent);\n\n  for (; entry != decoder_filters_.end(); entry++) {\n    // If the filter pointed by entry has stopped for all frame type, stores metadata and returns.\n    // If the filter pointed by entry hasn't returned from decodeHeaders, stores newly added\n    // metadata in case decodeHeaders returns StopAllIteration. The latter can happen when headers\n    // callbacks generate new metadata.\n    if (!(*entry)->decode_headers_called_ || (*entry)->stoppedAll()) {\n      Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n      (*entry)->getSavedRequestMetadata()->emplace_back(std::move(metadata_map_ptr));\n      return;\n    }\n\n    FilterMetadataStatus status = (*entry)->handle_->decodeMetadata(metadata_map);\n    ENVOY_STREAM_LOG(trace, \"decode metadata called: filter={} status={}, metadata: {}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status),\n                     metadata_map);\n  }\n}\n\nvoid FilterManager::maybeEndDecode(bool end_stream) {\n  ASSERT(!state_.remote_complete_);\n  state_.remote_complete_ = end_stream;\n  if (end_stream) {\n    stream_info_.onLastDownstreamRxByteReceived();\n    ENVOY_STREAM_LOG(debug, \"request end stream\", *this);\n  }\n}\n\nvoid FilterManager::disarmRequestTimeout() { filter_manager_callbacks_.disarmRequestTimeout(); }\n\nstd::list<ActiveStreamEncoderFilterPtr>::iterator\nFilterManager::commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream,\n                                  FilterIterationStartState filter_iteration_start_state) {\n  // Only do base state setting on the initial call. Subsequent calls for filtering do not touch\n  // the base state.\n  if (filter == nullptr) {\n    ASSERT(!state_.local_complete_);\n    state_.local_complete_ = end_stream;\n    return encoder_filters_.begin();\n  }\n\n  if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent &&\n      (*(filter->entry()))->iterate_from_current_filter_) {\n    // The filter iteration has been stopped for all frame types, and now the iteration continues.\n    // The current filter's encoding callback has not be called. Call it now.\n    return filter->entry();\n  }\n  return std::next(filter->entry());\n}\n\nstd::list<ActiveStreamDecoderFilterPtr>::iterator\nFilterManager::commonDecodePrefix(ActiveStreamDecoderFilter* filter,\n                                  FilterIterationStartState filter_iteration_start_state) {\n  if (!filter) {\n    return decoder_filters_.begin();\n  }\n  if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent &&\n      (*(filter->entry()))->iterate_from_current_filter_) {\n    // The filter iteration has been stopped for all frame types, and now the iteration continues.\n    // The current filter's callback function has not been called. Call it now.\n    return filter->entry();\n  }\n  return std::next(filter->entry());\n}\n\nvoid FilterManager::sendLocalReply(\n    bool old_was_grpc_request, Code code, absl::string_view body,\n    const std::function<void(ResponseHeaderMap& headers)>& modify_headers,\n    const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) {\n  const bool is_head_request = state_.is_head_request_;\n  bool is_grpc_request = old_was_grpc_request;\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.unify_grpc_handling\")) {\n    is_grpc_request = state_.is_grpc_request_;\n  }\n\n  stream_info_.setResponseCodeDetails(details);\n\n  filter_manager_callbacks_.onLocalReply(code);\n\n  if (!filter_manager_callbacks_.responseHeaders().has_value()) {\n    // If the response has not started at all, send the response through the filter chain.\n    sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, is_head_request,\n                                 grpc_status, details);\n  } else if (!state_.non_100_response_headers_encoded_) {\n    ENVOY_STREAM_LOG(debug, \"Sending local reply with details {} directly to the encoder\", *this,\n                     details);\n    // In this case, at least the header and possibly the body has started\n    // processing through the filter chain, but no non-informational headers\n    // have been sent downstream. To ensure that filters don't get their\n    // state machine screwed up, bypass the filter chain and send the local\n    // reply directly to the codec.\n    //\n    sendDirectLocalReply(code, body, modify_headers, state_.is_head_request_, grpc_status);\n  } else {\n    // If we land in this branch, response headers have already been sent to the client.\n    // All we can do at this point is reset the stream.\n    ENVOY_STREAM_LOG(debug, \"Resetting stream due to {}. Prior headers have already been sent\",\n                     *this, details);\n    // TODO(snowp): This means we increment the tx_reset stat which we weren't doing previously.\n    // Intended?\n    filter_manager_callbacks_.resetStream();\n  }\n}\n\nvoid FilterManager::sendLocalReplyViaFilterChain(\n    bool is_grpc_request, Code code, absl::string_view body,\n    const std::function<void(ResponseHeaderMap& headers)>& modify_headers, bool is_head_request,\n    const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) {\n  ENVOY_STREAM_LOG(debug, \"Sending local reply with details {}\", *this, details);\n  ASSERT(!filter_manager_callbacks_.responseHeaders().has_value());\n  // For early error handling, do a best-effort attempt to create a filter chain\n  // to ensure access logging. If the filter chain already exists this will be\n  // a no-op.\n  createFilterChain();\n\n  Utility::sendLocalReply(\n      state_.destroyed_,\n      Utility::EncodeFunctions{\n          [this, modify_headers](ResponseHeaderMap& headers) -> void {\n            if (streamInfo().route_entry_ &&\n                Runtime::runtimeFeatureEnabled(\n                    \"envoy.reloadable_features.always_apply_route_header_rules\")) {\n              streamInfo().route_entry_->finalizeResponseHeaders(headers, streamInfo());\n            }\n            if (modify_headers) {\n              modify_headers(headers);\n            }\n          },\n          [this](ResponseHeaderMap& response_headers, Code& code, std::string& body,\n                 absl::string_view& content_type) -> void {\n            // TODO(snowp): This &get() business isn't nice, rework LocalReply and others to accept\n            // opt refs.\n            local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().has_value()\n                                     ? &filter_manager_callbacks_.requestHeaders()->get()\n                                     : nullptr,\n                                 response_headers, stream_info_, code, body, content_type);\n          },\n          [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void {\n            filter_manager_callbacks_.setResponseHeaders(std::move(headers));\n            // TODO: Start encoding from the last decoder filter that saw the\n            // request instead.\n            encodeHeaders(nullptr, filter_manager_callbacks_.responseHeaders()->get(), end_stream);\n          },\n          [this](Buffer::Instance& data, bool end_stream) -> void {\n            // TODO: Start encoding from the last decoder filter that saw the\n            // request instead.\n            encodeData(nullptr, data, end_stream,\n                       FilterManager::FilterIterationStartState::CanStartFromCurrent);\n          }},\n      Utility::LocalReplyData{is_grpc_request, code, body, grpc_status, is_head_request});\n}\n\nvoid FilterManager::sendDirectLocalReply(\n    Code code, absl::string_view body,\n    const std::function<void(ResponseHeaderMap&)>& modify_headers, bool is_head_request,\n    const absl::optional<Grpc::Status::GrpcStatus> grpc_status) {\n  // Make sure we won't end up with nested watermark calls from the body buffer.\n  state_.encoder_filters_streaming_ = true;\n  Http::Utility::sendLocalReply(\n      state_.destroyed_,\n      Utility::EncodeFunctions{\n          [this, modify_headers](ResponseHeaderMap& headers) -> void {\n            if (streamInfo().route_entry_ &&\n                Runtime::runtimeFeatureEnabled(\n                    \"envoy.reloadable_features.always_apply_route_header_rules\")) {\n              streamInfo().route_entry_->finalizeResponseHeaders(headers, streamInfo());\n            }\n            if (modify_headers) {\n              modify_headers(headers);\n            }\n          },\n          [&](ResponseHeaderMap& response_headers, Code& code, std::string& body,\n              absl::string_view& content_type) -> void {\n            local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().has_value()\n                                     ? &filter_manager_callbacks_.requestHeaders()->get()\n                                     : nullptr,\n                                 response_headers, stream_info_, code, body, content_type);\n          },\n          [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void {\n            // Move the response headers into the FilterManager to make sure they're visible to\n            // access logs.\n            filter_manager_callbacks_.setResponseHeaders(std::move(response_headers));\n\n            state_.non_100_response_headers_encoded_ = true;\n            filter_manager_callbacks_.encodeHeaders(*filter_manager_callbacks_.responseHeaders(),\n                                                    end_stream);\n\n            maybeEndEncode(end_stream);\n          },\n          [&](Buffer::Instance& data, bool end_stream) -> void {\n            filter_manager_callbacks_.encodeData(data, end_stream);\n            maybeEndEncode(end_stream);\n          }},\n      Utility::LocalReplyData{state_.is_grpc_request_, code, body, grpc_status, is_head_request});\n}\n\nvoid FilterManager::encode100ContinueHeaders(ActiveStreamEncoderFilter* filter,\n                                             ResponseHeaderMap& headers) {\n  filter_manager_callbacks_.resetIdleTimer();\n  ASSERT(proxy_100_continue_);\n  // The caller must guarantee that encode100ContinueHeaders() is invoked at most once.\n  ASSERT(!state_.has_continue_headers_ || filter != nullptr);\n  // Make sure commonContinue continues encode100ContinueHeaders.\n  state_.has_continue_headers_ = true;\n\n  // Similar to the block in encodeHeaders, run encode100ContinueHeaders on each\n  // filter. This is simpler than that case because 100 continue implies no\n  // end-stream, and because there are normal headers coming there's no need for\n  // complex continuation logic.\n  // 100-continue filter iteration should always start with the next filter if available.\n  std::list<ActiveStreamEncoderFilterPtr>::iterator entry =\n      commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext);\n  for (; entry != encoder_filters_.end(); entry++) {\n    ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode100ContinueHeaders));\n    state_.filter_call_state_ |= FilterCallState::Encode100ContinueHeaders;\n    FilterHeadersStatus status = (*entry)->handle_->encode100ContinueHeaders(headers);\n    state_.filter_call_state_ &= ~FilterCallState::Encode100ContinueHeaders;\n    ENVOY_STREAM_LOG(trace, \"encode 100 continue headers called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n    if (!(*entry)->commonHandleAfter100ContinueHeadersCallback(status)) {\n      return;\n    }\n  }\n\n  filter_manager_callbacks_.encode100ContinueHeaders(headers);\n}\n\nvoid FilterManager::maybeContinueEncoding(\n    const std::list<ActiveStreamEncoderFilterPtr>::iterator& continue_data_entry) {\n  if (continue_data_entry != encoder_filters_.end()) {\n    // We use the continueEncoding() code since it will correctly handle not calling\n    // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code\n    // expects it.\n    ASSERT(buffered_response_data_);\n    (*continue_data_entry)->iteration_state_ =\n        ActiveStreamFilterBase::IterationState::StopSingleIteration;\n    (*continue_data_entry)->continueEncoding();\n  }\n}\n\nvoid FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers,\n                                  bool end_stream) {\n  // See encodeHeaders() comments in include/envoy/http/filter.h for why the 1xx precondition holds.\n  ASSERT(!CodeUtility::is1xx(Utility::getResponseStatus(headers)) ||\n         Utility::getResponseStatus(headers) == enumToInt(Http::Code::SwitchingProtocols));\n  filter_manager_callbacks_.resetIdleTimer();\n  disarmRequestTimeout();\n\n  // Headers filter iteration should always start with the next filter if available.\n  std::list<ActiveStreamEncoderFilterPtr>::iterator entry =\n      commonEncodePrefix(filter, end_stream, FilterIterationStartState::AlwaysStartFromNext);\n  std::list<ActiveStreamEncoderFilterPtr>::iterator continue_data_entry = encoder_filters_.end();\n\n  for (; entry != encoder_filters_.end(); entry++) {\n    ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeHeaders));\n    state_.filter_call_state_ |= FilterCallState::EncodeHeaders;\n    (*entry)->end_stream_ = state_.encoding_headers_only_ ||\n                            (end_stream && continue_data_entry == encoder_filters_.end());\n    FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_);\n\n    ASSERT(!(status == FilterHeadersStatus::ContinueAndEndStream && (*entry)->end_stream_),\n           \"Filters should not return FilterHeadersStatus::ContinueAndEndStream from encodeHeaders \"\n           \"when end_stream is already true\");\n    ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_),\n           \"Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from \"\n           \"encodeHeaders when end_stream is already false\");\n\n    state_.filter_call_state_ &= ~FilterCallState::EncodeHeaders;\n    ENVOY_STREAM_LOG(trace, \"encode headers called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n\n    (*entry)->encode_headers_called_ = true;\n\n    // encoding_headers_only_ is set if the filter returns ContinueAndEndStream.\n    const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(\n        status, end_stream, state_.encoding_headers_only_);\n\n    // If this filter ended the stream, encodeComplete() should be called for it.\n    if ((*entry)->end_stream_ || state_.encoding_headers_only_) {\n      (*entry)->handle_->encodeComplete();\n    }\n\n    // If we're encoding a headers only response, then mark the local as complete. This ensures\n    // that we don't attempt to reset the downstream request in doEndStream.\n    if (state_.encoding_headers_only_) {\n      state_.local_complete_ = true;\n    }\n\n    if (!continue_iteration) {\n      if (!(*entry)->end_stream_) {\n        maybeContinueEncoding(continue_data_entry);\n      }\n      return;\n    }\n\n    // Here we handle the case where we have a header only response, but a filter adds a body\n    // to it. We need to not raise end_stream = true to further filters during inline iteration.\n    if (end_stream && buffered_response_data_ && continue_data_entry == encoder_filters_.end()) {\n      continue_data_entry = entry;\n    }\n  }\n\n  const bool modified_end_stream = state_.encoding_headers_only_ ||\n                                   (end_stream && continue_data_entry == encoder_filters_.end());\n  state_.non_100_response_headers_encoded_ = true;\n  filter_manager_callbacks_.encodeHeaders(headers, modified_end_stream);\n  maybeEndEncode(modified_end_stream);\n\n  if (!modified_end_stream) {\n    maybeContinueEncoding(continue_data_entry);\n  }\n}\n\nvoid FilterManager::encodeMetadata(ActiveStreamEncoderFilter* filter,\n                                   MetadataMapPtr&& metadata_map_ptr) {\n  filter_manager_callbacks_.resetIdleTimer();\n\n  std::list<ActiveStreamEncoderFilterPtr>::iterator entry =\n      commonEncodePrefix(filter, false, FilterIterationStartState::CanStartFromCurrent);\n\n  for (; entry != encoder_filters_.end(); entry++) {\n    // If the filter pointed by entry has stopped for all frame type, stores metadata and returns.\n    // If the filter pointed by entry hasn't returned from encodeHeaders, stores newly added\n    // metadata in case encodeHeaders returns StopAllIteration. The latter can happen when headers\n    // callbacks generate new metadata.\n    if (!(*entry)->encode_headers_called_ || (*entry)->stoppedAll()) {\n      (*entry)->getSavedResponseMetadata()->emplace_back(std::move(metadata_map_ptr));\n      return;\n    }\n\n    FilterMetadataStatus status = (*entry)->handle_->encodeMetadata(*metadata_map_ptr);\n    ENVOY_STREAM_LOG(trace, \"encode metadata called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n  }\n  // TODO(soya3129): update stats with metadata.\n\n  // Now encode metadata via the codec.\n  if (!metadata_map_ptr->empty()) {\n    MetadataMapVector metadata_map_vector;\n    metadata_map_vector.emplace_back(std::move(metadata_map_ptr));\n    filter_manager_callbacks_.encodeMetadata(metadata_map_vector);\n  }\n}\n\nResponseTrailerMap& FilterManager::addEncodedTrailers() {\n  // Trailers can only be added during the last data frame (i.e. end_stream = true).\n  ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame);\n\n  // Trailers can only be added once.\n  ASSERT(!filter_manager_callbacks_.responseTrailers());\n\n  filter_manager_callbacks_.setResponseTrailers(ResponseTrailerMapImpl::create());\n  return *filter_manager_callbacks_.responseTrailers();\n}\n\nvoid FilterManager::addEncodedData(ActiveStreamEncoderFilter& filter, Buffer::Instance& data,\n                                   bool streaming) {\n  if (state_.filter_call_state_ == 0 ||\n      (state_.filter_call_state_ & FilterCallState::EncodeHeaders) ||\n      (state_.filter_call_state_ & FilterCallState::EncodeData) ||\n      ((state_.filter_call_state_ & FilterCallState::EncodeTrailers) && !filter.canIterate())) {\n    // Make sure if this triggers watermarks, the correct action is taken.\n    state_.encoder_filters_streaming_ = streaming;\n    // If no call is happening or we are in the decode headers/data callback, buffer the data.\n    // Inline processing happens in the decodeHeaders() callback if necessary.\n    filter.commonHandleBufferData(data);\n  } else if (state_.filter_call_state_ & FilterCallState::EncodeTrailers) {\n    // In this case we need to inline dispatch the data to further filters. If those filters\n    // choose to buffer/stop iteration that's fine.\n    encodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext);\n  } else {\n    // TODO(mattklein123): Formalize error handling for filters and add tests. Should probably\n    // throw an exception here.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid FilterManager::encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data,\n                               bool end_stream,\n                               FilterIterationStartState filter_iteration_start_state) {\n  filter_manager_callbacks_.resetIdleTimer();\n\n  // If we previously decided to encode only the headers, do nothing here.\n  if (state_.encoding_headers_only_) {\n    return;\n  }\n\n  // Filter iteration may start at the current filter.\n  std::list<ActiveStreamEncoderFilterPtr>::iterator entry =\n      commonEncodePrefix(filter, end_stream, filter_iteration_start_state);\n  auto trailers_added_entry = encoder_filters_.end();\n\n  const bool trailers_exists_at_start = filter_manager_callbacks_.responseTrailers().has_value();\n  for (; entry != encoder_filters_.end(); entry++) {\n    // If the filter pointed by entry has stopped for all frame type, return now.\n    if (handleDataIfStopAll(**entry, data, state_.encoder_filters_streaming_)) {\n      return;\n    }\n    // If end_stream_ is marked for a filter, the data is not for this filter and filters after.\n    // For details, please see the comment in the ActiveStream::decodeData() function.\n    if ((*entry)->end_stream_) {\n      return;\n    }\n    ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeData));\n\n    // We check the response_trailers_ pointer here in case addEncodedTrailers\n    // is called in encodeData during a previous filter invocation, at which point we communicate to\n    // the current and future filters that the stream has not yet ended.\n    state_.filter_call_state_ |= FilterCallState::EncodeData;\n    if (end_stream) {\n      state_.filter_call_state_ |= FilterCallState::LastDataFrame;\n    }\n\n    recordLatestDataFilter(entry, state_.latest_data_encoding_filter_, encoder_filters_);\n\n    (*entry)->end_stream_ = end_stream && !filter_manager_callbacks_.responseTrailers();\n    FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_);\n    if ((*entry)->end_stream_) {\n      (*entry)->handle_->encodeComplete();\n    }\n    state_.filter_call_state_ &= ~FilterCallState::EncodeData;\n    if (end_stream) {\n      state_.filter_call_state_ &= ~FilterCallState::LastDataFrame;\n    }\n    ENVOY_STREAM_LOG(trace, \"encode data called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n\n    if (!trailers_exists_at_start && filter_manager_callbacks_.responseTrailers() &&\n        trailers_added_entry == encoder_filters_.end()) {\n      trailers_added_entry = entry;\n    }\n\n    if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.encoder_filters_streaming_)) {\n      return;\n    }\n  }\n\n  const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end();\n  ASSERT(!state_.encoding_headers_only_);\n  filter_manager_callbacks_.encodeData(data, modified_end_stream);\n  maybeEndEncode(modified_end_stream);\n\n  // If trailers were adding during encodeData we need to trigger decodeTrailers in order\n  // to allow filters to process the trailers.\n  if (trailers_added_entry != encoder_filters_.end()) {\n    encodeTrailers(trailers_added_entry->get(), *filter_manager_callbacks_.responseTrailers());\n  }\n}\n\nvoid FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter,\n                                   ResponseTrailerMap& trailers) {\n  filter_manager_callbacks_.resetIdleTimer();\n\n  // If we previously decided to encode only the headers, do nothing here.\n  if (state_.encoding_headers_only_) {\n    return;\n  }\n\n  // Filter iteration may start at the current filter.\n  std::list<ActiveStreamEncoderFilterPtr>::iterator entry =\n      commonEncodePrefix(filter, true, FilterIterationStartState::CanStartFromCurrent);\n  for (; entry != encoder_filters_.end(); entry++) {\n    // If the filter pointed by entry has stopped for all frame type, return now.\n    if ((*entry)->stoppedAll()) {\n      return;\n    }\n    ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers));\n    state_.filter_call_state_ |= FilterCallState::EncodeTrailers;\n    FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers);\n    (*entry)->handle_->encodeComplete();\n    (*entry)->end_stream_ = true;\n    state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers;\n    ENVOY_STREAM_LOG(trace, \"encode trailers called: filter={} status={}\", *this,\n                     static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));\n    if (!(*entry)->commonHandleAfterTrailersCallback(status)) {\n      return;\n    }\n  }\n\n  filter_manager_callbacks_.encodeTrailers(trailers);\n  maybeEndEncode(true);\n}\n\nvoid FilterManager::maybeEndEncode(bool end_stream) {\n  if (end_stream) {\n    filter_manager_callbacks_.endStream();\n  }\n}\n\nbool FilterManager::processNewlyAddedMetadata() {\n  if (request_metadata_map_vector_ == nullptr) {\n    return false;\n  }\n  for (const auto& metadata_map : *getRequestMetadataMapVector()) {\n    decodeMetadata(nullptr, *metadata_map);\n  }\n  getRequestMetadataMapVector()->clear();\n  return true;\n}\n\nbool FilterManager::handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data,\n                                        bool& filter_streaming) {\n  if (filter.stoppedAll()) {\n    ASSERT(!filter.canIterate());\n    filter_streaming =\n        filter.iteration_state_ == ActiveStreamFilterBase::IterationState::StopAllWatermark;\n    filter.commonHandleBufferData(data);\n    return true;\n  }\n  return false;\n}\n\nvoid FilterManager::callHighWatermarkCallbacks() {\n  ++high_watermark_count_;\n  for (auto watermark_callbacks : watermark_callbacks_) {\n    watermark_callbacks->onAboveWriteBufferHighWatermark();\n  }\n}\n\nvoid FilterManager::callLowWatermarkCallbacks() {\n  ASSERT(high_watermark_count_ > 0);\n  --high_watermark_count_;\n  for (auto watermark_callbacks : watermark_callbacks_) {\n    watermark_callbacks->onBelowWriteBufferLowWatermark();\n  }\n}\n\nvoid FilterManager::setBufferLimit(uint32_t new_limit) {\n  ENVOY_STREAM_LOG(debug, \"setting buffer limit to {}\", *this, new_limit);\n  buffer_limit_ = new_limit;\n  if (buffered_request_data_) {\n    buffered_request_data_->setWatermarks(buffer_limit_);\n  }\n  if (buffered_response_data_) {\n    buffered_response_data_->setWatermarks(buffer_limit_);\n  }\n}\n\nbool FilterManager::createFilterChain() {\n  if (state_.created_filter_chain_) {\n    return false;\n  }\n  bool upgrade_rejected = false;\n  const HeaderEntry* upgrade = nullptr;\n  if (filter_manager_callbacks_.requestHeaders()) {\n    upgrade = filter_manager_callbacks_.requestHeaders()->get().Upgrade();\n\n    // Treat CONNECT requests as a special upgrade case.\n    if (!upgrade && HeaderUtility::isConnect(*filter_manager_callbacks_.requestHeaders())) {\n      upgrade = filter_manager_callbacks_.requestHeaders()->get().Method();\n    }\n  }\n\n  state_.created_filter_chain_ = true;\n  if (upgrade != nullptr) {\n    const Router::RouteEntry::UpgradeMap* upgrade_map = filter_manager_callbacks_.upgradeMap();\n\n    if (filter_chain_factory_.createUpgradeFilterChain(upgrade->value().getStringView(),\n                                                       upgrade_map, *this)) {\n      filter_manager_callbacks_.upgradeFilterChainCreated();\n      return true;\n    } else {\n      upgrade_rejected = true;\n      // Fall through to the default filter chain. The function calling this\n      // will send a local reply indicating that the upgrade failed.\n    }\n  }\n\n  filter_chain_factory_.createFilterChain(*this);\n  return !upgrade_rejected;\n}\n\nvoid ActiveStreamDecoderFilter::requestDataDrained() {\n  // If this is called it means the call to requestDataTooLarge() was a\n  // streaming call, or a 413 would have been sent.\n  onDecoderFilterBelowWriteBufferLowWatermark();\n}\n\nvoid ActiveStreamDecoderFilter::onDecoderFilterBelowWriteBufferLowWatermark() {\n  parent_.filter_manager_callbacks_.onDecoderFilterBelowWriteBufferLowWatermark();\n}\n\nvoid ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks(\n    DownstreamWatermarkCallbacks& watermark_callbacks) {\n  // This is called exactly once per upstream-stream, by the router filter. Therefore, we\n  // expect the same callbacks to not be registered twice.\n  ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(),\n                   &watermark_callbacks) == parent_.watermark_callbacks_.end());\n  parent_.watermark_callbacks_.emplace(parent_.watermark_callbacks_.end(), &watermark_callbacks);\n  for (uint32_t i = 0; i < parent_.high_watermark_count_; ++i) {\n    watermark_callbacks.onAboveWriteBufferHighWatermark();\n  }\n}\n\nvoid ActiveStreamDecoderFilter::removeDownstreamWatermarkCallbacks(\n    DownstreamWatermarkCallbacks& watermark_callbacks) {\n  ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(),\n                   &watermark_callbacks) != parent_.watermark_callbacks_.end());\n  parent_.watermark_callbacks_.remove(&watermark_callbacks);\n}\n\nvoid ActiveStreamDecoderFilter::setDecoderBufferLimit(uint32_t limit) {\n  parent_.setBufferLimit(limit);\n}\n\nuint32_t ActiveStreamDecoderFilter::decoderBufferLimit() { return parent_.buffer_limit_; }\n\nbool ActiveStreamDecoderFilter::recreateStream() {\n  // Because the filter's and the HCM view of if the stream has a body and if\n  // the stream is complete may differ, re-check bytesReceived() to make sure\n  // there was no body from the HCM's point of view.\n  if (!complete() || parent_.stream_info_.bytesReceived() != 0) {\n    return false;\n  }\n\n  parent_.stream_info_.setResponseCodeDetails(\n      StreamInfo::ResponseCodeDetails::get().InternalRedirect);\n\n  parent_.filter_manager_callbacks_.recreateStream(parent_.stream_info_.filter_state_);\n\n  return true;\n}\n\nvoid ActiveStreamDecoderFilter::addUpstreamSocketOptions(\n    const Network::Socket::OptionsSharedPtr& options) {\n\n  Network::Socket::appendOptions(parent_.upstream_options_, options);\n}\n\nNetwork::Socket::OptionsSharedPtr ActiveStreamDecoderFilter::getUpstreamSocketOptions() const {\n  return parent_.upstream_options_;\n}\n\nvoid ActiveStreamDecoderFilter::requestRouteConfigUpdate(\n    Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) {\n  parent_.filter_manager_callbacks_.requestRouteConfigUpdate(std::move(route_config_updated_cb));\n}\n\nabsl::optional<Router::ConfigConstSharedPtr> ActiveStreamDecoderFilter::routeConfig() {\n  return parent_.filter_manager_callbacks_.routeConfig();\n}\n\nBuffer::WatermarkBufferPtr ActiveStreamEncoderFilter::createBuffer() {\n  auto buffer = new Buffer::WatermarkBuffer(\n      [this]() -> void { this->responseDataDrained(); },\n      [this]() -> void { this->responseDataTooLarge(); },\n      []() -> void { /* TODO(adisuissa): Handle overflow watermark */ });\n  buffer->setWatermarks(parent_.buffer_limit_);\n  return Buffer::WatermarkBufferPtr{buffer};\n}\nBuffer::WatermarkBufferPtr& ActiveStreamEncoderFilter::bufferedData() {\n  return parent_.buffered_response_data_;\n}\nbool ActiveStreamEncoderFilter::complete() { return parent_.state_.local_complete_; }\nbool ActiveStreamEncoderFilter::has100Continueheaders() {\n  return parent_.state_.has_continue_headers_ && !continue_headers_continued_;\n}\nvoid ActiveStreamEncoderFilter::do100ContinueHeaders() {\n  parent_.encode100ContinueHeaders(this, *parent_.filter_manager_callbacks_.continueHeaders());\n}\nvoid ActiveStreamEncoderFilter::doHeaders(bool end_stream) {\n  parent_.encodeHeaders(this, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream);\n}\nvoid ActiveStreamEncoderFilter::doData(bool end_stream) {\n  parent_.encodeData(this, *parent_.buffered_response_data_, end_stream,\n                     FilterManager::FilterIterationStartState::CanStartFromCurrent);\n}\nvoid ActiveStreamEncoderFilter::drainSavedResponseMetadata() {\n  ASSERT(saved_response_metadata_ != nullptr);\n  for (auto& metadata_map : *getSavedResponseMetadata()) {\n    parent_.encodeMetadata(this, std::move(metadata_map));\n  }\n  getSavedResponseMetadata()->clear();\n}\n\nvoid ActiveStreamEncoderFilter::handleMetadataAfterHeadersCallback() {\n  // If we drain accumulated metadata, the iteration must start with the current filter.\n  const bool saved_state = iterate_from_current_filter_;\n  iterate_from_current_filter_ = true;\n  // If encodeHeaders() returns StopAllIteration, we should skip draining metadata, and wait\n  // for doMetadata() to drain the metadata after iteration continues.\n  if (!stoppedAll() && saved_response_metadata_ != nullptr &&\n      !getSavedResponseMetadata()->empty()) {\n    drainSavedResponseMetadata();\n  }\n  // Restores the original value of iterate_from_current_filter_.\n  iterate_from_current_filter_ = saved_state;\n}\nvoid ActiveStreamEncoderFilter::doTrailers() {\n  parent_.encodeTrailers(this, *parent_.filter_manager_callbacks_.responseTrailers());\n}\nbool ActiveStreamEncoderFilter::hasTrailers() {\n  return parent_.filter_manager_callbacks_.responseTrailers().has_value();\n}\nvoid ActiveStreamEncoderFilter::addEncodedData(Buffer::Instance& data, bool streaming) {\n  return parent_.addEncodedData(*this, data, streaming);\n}\n\nvoid ActiveStreamEncoderFilter::injectEncodedDataToFilterChain(Buffer::Instance& data,\n                                                               bool end_stream) {\n  // TODO(yosrym93): Check if this filter had previously stopped headers iteration.\n  // If so, it should be continued before injecting data.\n  parent_.encodeData(this, data, end_stream,\n                     FilterManager::FilterIterationStartState::CanStartFromCurrent);\n}\n\nResponseTrailerMap& ActiveStreamEncoderFilter::addEncodedTrailers() {\n  return parent_.addEncodedTrailers();\n}\n\nvoid ActiveStreamEncoderFilter::addEncodedMetadata(MetadataMapPtr&& metadata_map_ptr) {\n  return parent_.encodeMetadata(this, std::move(metadata_map_ptr));\n}\n\nvoid ActiveStreamEncoderFilter::onEncoderFilterAboveWriteBufferHighWatermark() {\n  ENVOY_STREAM_LOG(debug, \"Disabling upstream stream due to filter callbacks.\", parent_);\n  parent_.callHighWatermarkCallbacks();\n}\n\nvoid ActiveStreamEncoderFilter::onEncoderFilterBelowWriteBufferLowWatermark() {\n  ENVOY_STREAM_LOG(debug, \"Enabling upstream stream due to filter callbacks.\", parent_);\n  parent_.callLowWatermarkCallbacks();\n}\n\nvoid ActiveStreamEncoderFilter::setEncoderBufferLimit(uint32_t limit) {\n  parent_.setBufferLimit(limit);\n}\n\nuint32_t ActiveStreamEncoderFilter::encoderBufferLimit() { return parent_.buffer_limit_; }\n\nvoid ActiveStreamEncoderFilter::continueEncoding() { commonContinue(); }\n\nconst Buffer::Instance* ActiveStreamEncoderFilter::encodingBuffer() {\n  return parent_.buffered_response_data_.get();\n}\n\nvoid ActiveStreamEncoderFilter::modifyEncodingBuffer(\n    std::function<void(Buffer::Instance&)> callback) {\n  ASSERT(parent_.state_.latest_data_encoding_filter_ == this);\n  callback(*parent_.buffered_response_data_.get());\n}\n\nvoid ActiveStreamEncoderFilter::sendLocalReply(\n    Code code, absl::string_view body,\n    std::function<void(ResponseHeaderMap& headers)> modify_headers,\n    const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) {\n  parent_.sendLocalReply(parent_.state_.is_grpc_request_, code, body, modify_headers, grpc_status,\n                         details);\n}\n\nHttp1StreamEncoderOptionsOptRef ActiveStreamEncoderFilter::http1StreamEncoderOptions() {\n  // TODO(mattklein123): At some point we might want to actually wrap this interface but for now\n  // we give the filter direct access to the encoder options.\n  return parent_.filter_manager_callbacks_.http1StreamEncoderOptions();\n}\n\nvoid ActiveStreamEncoderFilter::responseDataTooLarge() {\n  if (parent_.state_.encoder_filters_streaming_) {\n    onEncoderFilterAboveWriteBufferHighWatermark();\n  } else {\n    parent_.filter_manager_callbacks_.onResponseDataTooLarge();\n\n    // In this case, sendLocalReply will either send a response directly to the encoder, or\n    // reset the stream.\n    parent_.sendLocalReply(\n        parent_.filter_manager_callbacks_.requestHeaders() &&\n            Grpc::Common::isGrpcRequestHeaders(*parent_.filter_manager_callbacks_.requestHeaders()),\n        Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError),\n        nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge);\n  }\n}\n\nvoid ActiveStreamEncoderFilter::responseDataDrained() {\n  onEncoderFilterBelowWriteBufferLowWatermark();\n}\n\nvoid ActiveStreamFilterBase::resetStream() { parent_.filter_manager_callbacks_.resetStream(); }\n\nuint64_t ActiveStreamFilterBase::streamId() const { return parent_.streamId(); }\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/filter_manager.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/dump_state_utils.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/headers.h\"\n#include \"common/local_reply/local_reply.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass FilterManager;\n\n/**\n * Base class wrapper for both stream encoder and decoder filters.\n */\nstruct ActiveStreamFilterBase : public virtual StreamFilterCallbacks,\n                                Logger::Loggable<Logger::Id::http> {\n  ActiveStreamFilterBase(FilterManager& parent, bool dual_filter)\n      : parent_(parent), iteration_state_(IterationState::Continue),\n        iterate_from_current_filter_(false), headers_continued_(false),\n        continue_headers_continued_(false), end_stream_(false), dual_filter_(dual_filter),\n        decode_headers_called_(false), encode_headers_called_(false) {}\n\n  // Functions in the following block are called after the filter finishes processing\n  // corresponding data. Those functions handle state updates and data storage (if needed)\n  // according to the status returned by filter's callback functions.\n  bool commonHandleAfter100ContinueHeadersCallback(FilterHeadersStatus status);\n  bool commonHandleAfterHeadersCallback(FilterHeadersStatus status, bool& end_stream,\n                                        bool& headers_only);\n  bool commonHandleAfterDataCallback(FilterDataStatus status, Buffer::Instance& provided_data,\n                                     bool& buffer_was_streaming);\n  bool commonHandleAfterTrailersCallback(FilterTrailersStatus status);\n\n  // Buffers provided_data.\n  void commonHandleBufferData(Buffer::Instance& provided_data);\n\n  // If iteration has stopped for all frame types, calls this function to buffer the data before\n  // the filter processes data. The function also updates streaming state.\n  void commonBufferDataIfStopAll(Buffer::Instance& provided_data, bool& buffer_was_streaming);\n\n  void commonContinue();\n  virtual bool canContinue() PURE;\n  virtual Buffer::WatermarkBufferPtr createBuffer() PURE;\n  virtual Buffer::WatermarkBufferPtr& bufferedData() PURE;\n  virtual bool complete() PURE;\n  virtual bool has100Continueheaders() PURE;\n  virtual void do100ContinueHeaders() PURE;\n  virtual void doHeaders(bool end_stream) PURE;\n  virtual void doData(bool end_stream) PURE;\n  virtual void doTrailers() PURE;\n  virtual bool hasTrailers() PURE;\n  virtual void doMetadata() PURE;\n  // TODO(soya3129): make this pure when adding impl to encoder filter.\n  virtual void handleMetadataAfterHeadersCallback() PURE;\n\n  // Http::StreamFilterCallbacks\n  const Network::Connection* connection() override;\n  Event::Dispatcher& dispatcher() override;\n  void resetStream() override;\n  Router::RouteConstSharedPtr route() override;\n  Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) override;\n  Upstream::ClusterInfoConstSharedPtr clusterInfo() override;\n  void clearRouteCache() override;\n  uint64_t streamId() const override;\n  StreamInfo::StreamInfo& streamInfo() override;\n  Tracing::Span& activeSpan() override;\n  Tracing::Config& tracingConfig() override;\n  const ScopeTrackedObject& scope() override;\n\n  // Functions to set or get iteration state.\n  bool canIterate() { return iteration_state_ == IterationState::Continue; }\n  bool stoppedAll() {\n    return iteration_state_ == IterationState::StopAllBuffer ||\n           iteration_state_ == IterationState::StopAllWatermark;\n  }\n  void allowIteration() {\n    ASSERT(iteration_state_ != IterationState::Continue);\n    iteration_state_ = IterationState::Continue;\n  }\n  MetadataMapVector* getSavedRequestMetadata() {\n    if (saved_request_metadata_ == nullptr) {\n      saved_request_metadata_ = std::make_unique<MetadataMapVector>();\n    }\n    return saved_request_metadata_.get();\n  }\n  MetadataMapVector* getSavedResponseMetadata() {\n    if (saved_response_metadata_ == nullptr) {\n      saved_response_metadata_ = std::make_unique<MetadataMapVector>();\n    }\n    return saved_response_metadata_.get();\n  }\n\n  // A vector to save metadata when the current filter's [de|en]codeMetadata() can not be called,\n  // either because [de|en]codeHeaders() of the current filter returns StopAllIteration or because\n  // [de|en]codeHeaders() adds new metadata to [de|en]code, but we don't know\n  // [de|en]codeHeaders()'s return value yet. The storage is created on demand.\n  std::unique_ptr<MetadataMapVector> saved_request_metadata_{nullptr};\n  std::unique_ptr<MetadataMapVector> saved_response_metadata_{nullptr};\n  // The state of iteration.\n  enum class IterationState {\n    Continue,            // Iteration has not stopped for any frame type.\n    StopSingleIteration, // Iteration has stopped for headers, 100-continue, or data.\n    StopAllBuffer,       // Iteration has stopped for all frame types, and following data should\n                         // be buffered.\n    StopAllWatermark,    // Iteration has stopped for all frame types, and following data should\n                         // be buffered until high watermark is reached.\n  };\n  FilterManager& parent_;\n  IterationState iteration_state_;\n  // If the filter resumes iteration from a StopAllBuffer/Watermark state, the current filter\n  // hasn't parsed data and trailers. As a result, the filter iteration should start with the\n  // current filter instead of the next one. If true, filter iteration starts with the current\n  // filter. Otherwise, starts with the next filter in the chain.\n  bool iterate_from_current_filter_ : 1;\n  bool headers_continued_ : 1;\n  bool continue_headers_continued_ : 1;\n  // If true, end_stream is called for this filter.\n  bool end_stream_ : 1;\n  const bool dual_filter_ : 1;\n  bool decode_headers_called_ : 1;\n  bool encode_headers_called_ : 1;\n};\n\n/**\n * Wrapper for a stream decoder filter.\n */\nstruct ActiveStreamDecoderFilter : public ActiveStreamFilterBase,\n                                   public StreamDecoderFilterCallbacks,\n                                   LinkedObject<ActiveStreamDecoderFilter> {\n  ActiveStreamDecoderFilter(FilterManager& parent, StreamDecoderFilterSharedPtr filter,\n                            bool dual_filter)\n      : ActiveStreamFilterBase(parent, dual_filter), handle_(filter) {}\n\n  // ActiveStreamFilterBase\n  bool canContinue() override;\n  Buffer::WatermarkBufferPtr createBuffer() override;\n  Buffer::WatermarkBufferPtr& bufferedData() override;\n  bool complete() override;\n  bool has100Continueheaders() override { return false; }\n  void do100ContinueHeaders() override { NOT_REACHED_GCOVR_EXCL_LINE; }\n  void doHeaders(bool end_stream) override;\n  void doData(bool end_stream) override;\n  void doMetadata() override {\n    if (saved_request_metadata_ != nullptr) {\n      drainSavedRequestMetadata();\n    }\n  }\n  void doTrailers() override;\n  bool hasTrailers() override;\n\n  void drainSavedRequestMetadata();\n  // This function is called after the filter calls decodeHeaders() to drain accumulated metadata.\n  void handleMetadataAfterHeadersCallback() override;\n\n  // Http::StreamDecoderFilterCallbacks\n  void addDecodedData(Buffer::Instance& data, bool streaming) override;\n  void injectDecodedDataToFilterChain(Buffer::Instance& data, bool end_stream) override;\n  RequestTrailerMap& addDecodedTrailers() override;\n  MetadataMapVector& addDecodedMetadata() override;\n  void continueDecoding() override;\n  const Buffer::Instance* decodingBuffer() override;\n\n  void modifyDecodingBuffer(std::function<void(Buffer::Instance&)> callback) override;\n\n  void sendLocalReply(Code code, absl::string_view body,\n                      std::function<void(ResponseHeaderMap& headers)> modify_headers,\n                      const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                      absl::string_view details) override;\n  void encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override;\n  void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream,\n                     absl::string_view details) override;\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void encodeTrailers(ResponseTrailerMapPtr&& trailers) override;\n  void encodeMetadata(MetadataMapPtr&& metadata_map_ptr) override;\n  void onDecoderFilterAboveWriteBufferHighWatermark() override;\n  void onDecoderFilterBelowWriteBufferLowWatermark() override;\n  void addDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& watermark_callbacks) override;\n  void\n  removeDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& watermark_callbacks) override;\n  void setDecoderBufferLimit(uint32_t limit) override;\n  uint32_t decoderBufferLimit() override;\n  bool recreateStream() override;\n\n  void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) override;\n\n  Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override;\n\n  // Each decoder filter instance checks if the request passed to the filter is gRPC\n  // so that we can issue gRPC local responses to gRPC requests. Filter's decodeHeaders()\n  // called here may change the content type, so we must check it before the call.\n  FilterHeadersStatus decodeHeaders(RequestHeaderMap& headers, bool end_stream) {\n    is_grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers);\n    FilterHeadersStatus status = handle_->decodeHeaders(headers, end_stream);\n    return status;\n  }\n\n  void requestDataTooLarge();\n  void requestDataDrained();\n\n  void requestRouteConfigUpdate(\n      Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) override;\n  absl::optional<Router::ConfigConstSharedPtr> routeConfig();\n\n  StreamDecoderFilterSharedPtr handle_;\n  bool is_grpc_request_{};\n};\n\nusing ActiveStreamDecoderFilterPtr = std::unique_ptr<ActiveStreamDecoderFilter>;\n\n/**\n * Wrapper for a stream encoder filter.\n */\nstruct ActiveStreamEncoderFilter : public ActiveStreamFilterBase,\n                                   public StreamEncoderFilterCallbacks,\n                                   LinkedObject<ActiveStreamEncoderFilter> {\n  ActiveStreamEncoderFilter(FilterManager& parent, StreamEncoderFilterSharedPtr filter,\n                            bool dual_filter)\n      : ActiveStreamFilterBase(parent, dual_filter), handle_(filter) {}\n\n  // ActiveStreamFilterBase\n  bool canContinue() override { return true; }\n  Buffer::WatermarkBufferPtr createBuffer() override;\n  Buffer::WatermarkBufferPtr& bufferedData() override;\n  bool complete() override;\n  bool has100Continueheaders() override;\n  void do100ContinueHeaders() override;\n  void doHeaders(bool end_stream) override;\n  void doData(bool end_stream) override;\n  void drainSavedResponseMetadata();\n  void handleMetadataAfterHeadersCallback() override;\n\n  void doMetadata() override {\n    if (saved_response_metadata_ != nullptr) {\n      drainSavedResponseMetadata();\n    }\n  }\n  void doTrailers() override;\n  bool hasTrailers() override;\n\n  // Http::StreamEncoderFilterCallbacks\n  void addEncodedData(Buffer::Instance& data, bool streaming) override;\n  void injectEncodedDataToFilterChain(Buffer::Instance& data, bool end_stream) override;\n  ResponseTrailerMap& addEncodedTrailers() override;\n  void addEncodedMetadata(MetadataMapPtr&& metadata_map) override;\n  void onEncoderFilterAboveWriteBufferHighWatermark() override;\n  void onEncoderFilterBelowWriteBufferLowWatermark() override;\n  void setEncoderBufferLimit(uint32_t limit) override;\n  uint32_t encoderBufferLimit() override;\n  void continueEncoding() override;\n  const Buffer::Instance* encodingBuffer() override;\n  void modifyEncodingBuffer(std::function<void(Buffer::Instance&)> callback) override;\n  void sendLocalReply(Code code, absl::string_view body,\n                      std::function<void(ResponseHeaderMap& headers)> modify_headers,\n                      const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                      absl::string_view details) override;\n  Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override;\n\n  void responseDataTooLarge();\n  void responseDataDrained();\n\n  StreamEncoderFilterSharedPtr handle_;\n};\n\nusing ActiveStreamEncoderFilterPtr = std::unique_ptr<ActiveStreamEncoderFilter>;\n\n/**\n * Callbacks invoked by the FilterManager to pass filter data/events back to the caller.\n */\nclass FilterManagerCallbacks {\npublic:\n  virtual ~FilterManagerCallbacks() = default;\n\n  /**\n   * Called when the provided headers have been encoded by all the filters in the chain.\n   * @param response_headers the encoded headers.\n   * @param end_stream whether this is a header only response.\n   */\n  virtual void encodeHeaders(ResponseHeaderMap& response_headers, bool end_stream) PURE;\n\n  /**\n   * Called when the provided 100 Continue headers have been encoded by all the filters in the\n   * chain.\n   * @param response_headers the encoded headers.\n   */\n  virtual void encode100ContinueHeaders(ResponseHeaderMap& response_headers) PURE;\n\n  /**\n   * Called when the provided data has been encoded by all filters in the chain.\n   * @param data the encoded data.\n   * @param end_stream whether this is the end of the response.\n   */\n  virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE;\n\n  /**\n   * Called when the provided trailers have been encoded by all filters in the chain.\n   * @param trailers the encoded trailers.\n   */\n  virtual void encodeTrailers(ResponseTrailerMap& trailers) PURE;\n\n  /**\n   * Called when the provided metadata has been encoded by all filters in the chain.\n   * @param trailers the encoded trailers.\n   */\n  virtual void encodeMetadata(MetadataMapVector& metadata) PURE;\n\n  /**\n   * Injects request trailers into a stream that originally did not have request trailers.\n   */\n  virtual void setRequestTrailers(RequestTrailerMapPtr&& request_trailers) PURE;\n\n  /**\n   * Passes ownership of received continue headers to the parent. This may be called multiple times\n   * in the case of multiple upstream calls.\n   */\n  virtual void setContinueHeaders(ResponseHeaderMapPtr&& response_headers) PURE;\n\n  /**\n   * Passes ownership of received response headers to the parent. This may be called multiple times\n   * in the case of multiple upstream calls.\n   */\n  virtual void setResponseHeaders(ResponseHeaderMapPtr&& response_headers) PURE;\n\n  /**\n   * Passes ownership of received response trailers to the parent. This may be called multiple times\n   * in the case of multiple upstream calls.\n   */\n  virtual void setResponseTrailers(ResponseTrailerMapPtr&& response_trailers) PURE;\n\n  // TODO(snowp): We should consider moving filter access to headers/trailers to happen via the\n  // callbacks instead of via the encode/decode callbacks on the filters.\n\n  /**\n   * The downstream request headers if set.\n   */\n  virtual RequestHeaderMapOptRef requestHeaders() PURE;\n\n  /**\n   * The downstream request trailers if present.\n   */\n  virtual RequestTrailerMapOptRef requestTrailers() PURE;\n\n  /**\n   * Retrieves a pointer to the continue headers set via the call to setContinueHeaders.\n   */\n  virtual ResponseHeaderMapOptRef continueHeaders() PURE;\n\n  /**\n   * Retrieves a pointer to the response headers set via the last call to setResponseHeaders.\n   * Note that response headers might be set multiple times (e.g. if a local reply is issued after\n   * headers have been received but before headers have been encoded), so it is not safe in general\n   * to assume that any set of headers will be valid for the duration of a stream.\n   */\n  virtual ResponseHeaderMapOptRef responseHeaders() PURE;\n\n  /**\n   * Retrieves a pointer to the last response trailers set via setResponseTrailers.\n   * Note that response trailers might be set multiple times, so it is not safe in general to assume\n   * that any set of trailers will be valid for the duration of the stream.\n   */\n  virtual ResponseTrailerMapOptRef responseTrailers() PURE;\n\n  /**\n   * Called after encoding has completed.\n   */\n  virtual void endStream() PURE;\n\n  /**\n   * Called when the stream write buffer is no longer above the low watermark.\n   */\n  virtual void onDecoderFilterBelowWriteBufferLowWatermark() PURE;\n\n  /**\n   * Called when the stream write buffer is above above the high watermark.\n   */\n  virtual void onDecoderFilterAboveWriteBufferHighWatermark() PURE;\n\n  /**\n   * Called when the FilterManager creates an Upgrade filter chain.\n   */\n  virtual void upgradeFilterChainCreated() PURE;\n\n  /**\n   * Called when request activity indicates that the request timeout should be disarmed.\n   */\n  virtual void disarmRequestTimeout() PURE;\n\n  /**\n   * Called when stream activity indicates that the stream idle timeout should be reset.\n   */\n  virtual void resetIdleTimer() PURE;\n\n  /**\n   * Called when the stream should be re-created, e.g. for an internal redirect.\n   */\n  virtual void recreateStream(StreamInfo::FilterStateSharedPtr filter_state) PURE;\n\n  /**\n   * Called when the stream should be reset.\n   */\n  virtual void resetStream() PURE;\n\n  /**\n   * Returns the upgrade map for the current route entry.\n   */\n  virtual const Router::RouteEntry::UpgradeMap* upgradeMap() PURE;\n\n  /**\n   * Returns the cluster info for the current route entry.\n   */\n  virtual Upstream::ClusterInfoConstSharedPtr clusterInfo() PURE;\n\n  /**\n   * Returns the current route.\n   */\n  virtual Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) PURE;\n\n  /**\n   * Clears the cached route.\n   */\n  virtual void clearRouteCache() PURE;\n\n  /**\n   * Returns the current route configuration.\n   */\n  virtual absl::optional<Router::ConfigConstSharedPtr> routeConfig() PURE;\n\n  /**\n   * Update the current route configuration.\n   */\n  virtual void\n  requestRouteConfigUpdate(Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) PURE;\n\n  /**\n   * Returns the current active span.\n   */\n  virtual Tracing::Span& activeSpan() PURE;\n\n  // TODO(snowp): It might make more sense to pass (optional?) counters to the FM instead of\n  // calling back out to the AS to record them.\n  /**\n   * Called when a stream fails due to the response data being too large.\n   */\n  virtual void onResponseDataTooLarge() PURE;\n\n  /**\n   * Called when a stream fails due to the request data being too large.\n   */\n  virtual void onRequestDataTooLarge() PURE;\n\n  /**\n   * Returns the Http1StreamEncoderOptions associated with the response encoder.\n   */\n  virtual Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() PURE;\n\n  /**\n   * Called when a local reply is made by the filter manager.\n   * @param code the response code of the local reply.\n   */\n  virtual void onLocalReply(Code code) PURE;\n\n  /**\n   * Returns the tracing configuration to use for this stream.\n   */\n  virtual Tracing::Config& tracingConfig() PURE;\n\n  /**\n   * Returns the tracked scope to use for this stream.\n   */\n  virtual const ScopeTrackedObject& scope() PURE;\n};\n\n/**\n * FilterManager manages decoding a request through a series of decoding filter and the encoding\n * of the resulting response.\n */\nclass FilterManager : public ScopeTrackedObject,\n                      FilterChainFactoryCallbacks,\n                      Logger::Loggable<Logger::Id::http> {\npublic:\n  FilterManager(FilterManagerCallbacks& filter_manager_callbacks, Event::Dispatcher& dispatcher,\n                const Network::Connection& connection, uint64_t stream_id, bool proxy_100_continue,\n                uint32_t buffer_limit, FilterChainFactory& filter_chain_factory,\n                const LocalReply::LocalReply& local_reply, Http::Protocol protocol,\n                TimeSource& time_source, StreamInfo::FilterStateSharedPtr parent_filter_state,\n                StreamInfo::FilterState::LifeSpan filter_state_life_span)\n      : filter_manager_callbacks_(filter_manager_callbacks), dispatcher_(dispatcher),\n        connection_(connection), stream_id_(stream_id), proxy_100_continue_(proxy_100_continue),\n        buffer_limit_(buffer_limit), filter_chain_factory_(filter_chain_factory),\n        local_reply_(local_reply),\n        stream_info_(protocol, time_source, parent_filter_state, filter_state_life_span) {}\n  ~FilterManager() override {\n    ASSERT(state_.destroyed_);\n    ASSERT(state_.filter_call_state_ == 0);\n  }\n\n  // ScopeTrackedObject\n  void dumpState(std::ostream& os, int indent_level = 0) const override {\n    const char* spaces = spacesForLevel(indent_level);\n    os << spaces << \"FilterManager \" << this << DUMP_MEMBER(state_.has_continue_headers_)\n       << DUMP_MEMBER(state_.decoding_headers_only_) << DUMP_MEMBER(state_.encoding_headers_only_)\n       << \"\\n\";\n\n    DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.requestHeaders());\n    DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.requestTrailers());\n    DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.responseHeaders());\n    DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.responseTrailers());\n    DUMP_DETAILS(&stream_info_);\n  }\n\n  // Http::FilterChainFactoryCallbacks\n  void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override {\n    addStreamDecoderFilterWorker(filter, false);\n  }\n  void addStreamEncoderFilter(StreamEncoderFilterSharedPtr filter) override {\n    addStreamEncoderFilterWorker(filter, false);\n  }\n  void addStreamFilter(StreamFilterSharedPtr filter) override {\n    addStreamDecoderFilterWorker(filter, true);\n    addStreamEncoderFilterWorker(filter, true);\n  }\n  void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override;\n\n  void log() {\n    RequestHeaderMap* request_headers = nullptr;\n    if (filter_manager_callbacks_.requestHeaders()) {\n      request_headers = &filter_manager_callbacks_.requestHeaders()->get();\n    }\n    ResponseHeaderMap* response_headers = nullptr;\n    if (filter_manager_callbacks_.responseHeaders()) {\n      response_headers = &filter_manager_callbacks_.responseHeaders()->get();\n    }\n    ResponseTrailerMap* response_trailers = nullptr;\n    if (filter_manager_callbacks_.responseTrailers()) {\n      response_trailers = &filter_manager_callbacks_.responseTrailers()->get();\n    }\n\n    for (const auto& log_handler : access_log_handlers_) {\n      log_handler->log(request_headers, response_headers, response_trailers, stream_info_);\n    }\n  }\n\n  void onStreamComplete() {\n    for (auto& filter : decoder_filters_) {\n      filter->handle_->onStreamComplete();\n    }\n\n    for (auto& filter : encoder_filters_) {\n      // Do not call onStreamComplete twice for dual registered filters.\n      if (!filter->dual_filter_) {\n        filter->handle_->onStreamComplete();\n      }\n    }\n  }\n\n  void destroyFilters() {\n    state_.destroyed_ = true;\n\n    for (auto& filter : decoder_filters_) {\n      filter->handle_->onDestroy();\n    }\n\n    for (auto& filter : encoder_filters_) {\n      // Do not call on destroy twice for dual registered filters.\n      if (!filter->dual_filter_) {\n        filter->handle_->onDestroy();\n      }\n    }\n  }\n\n  /**\n   * Decodes the provided headers starting at the first filter in the chain.\n   * @param headers the headers to decode.\n   * @param end_stream whether the request is header only.\n   */\n  void decodeHeaders(RequestHeaderMap& headers, bool end_stream) {\n    decodeHeaders(nullptr, headers, end_stream);\n  }\n\n  /**\n   * Decodes the provided data starting at the first filter in the chain.\n   * @param data the data to decode.\n   * @param end_stream whether this data is the end of the request.\n   */\n  void decodeData(Buffer::Instance& data, bool end_stream) {\n    decodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent);\n  }\n\n  /**\n   * Decodes the provided trailers starting at the first filter in the chain.\n   * @param trailers the trailers to decode.\n   */\n  void decodeTrailers(RequestTrailerMap& trailers) { decodeTrailers(nullptr, trailers); }\n\n  /**\n   * Decodes the provided metadata starting at the first filter in the chain.\n   * @param metadata_map the metadata to decode.\n   */\n  void decodeMetadata(MetadataMap& metadata_map) { decodeMetadata(nullptr, metadata_map); }\n\n  // TODO(snowp): Make private as filter chain construction is moved into FM.\n  void addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, bool dual_filter);\n  void addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, bool dual_filter);\n\n  void disarmRequestTimeout();\n\n  /**\n   * If end_stream is true, marks decoding as complete. This is a noop if end_stream is false.\n   * @param end_stream whether decoding is complete.\n   */\n  void maybeEndDecode(bool end_stream);\n\n  /**\n   * If end_stream is true, marks encoding as complete. This is a noop if end_stream is false.\n   * @param end_stream whether encoding is complete.\n   */\n  void maybeEndEncode(bool end_stream);\n\n  void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body,\n                      const std::function<void(ResponseHeaderMap& headers)>& modify_headers,\n                      const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                      absl::string_view details);\n  /**\n   * Sends a local reply by constructing a response and passing it through all the encoder\n   * filters. The resulting response will be passed out via the FilterManagerCallbacks.\n   */\n  void sendLocalReplyViaFilterChain(\n      bool is_grpc_request, Code code, absl::string_view body,\n      const std::function<void(ResponseHeaderMap& headers)>& modify_headers, bool is_head_request,\n      const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details);\n\n  /**\n   * Sends a local reply by constructing a response and skipping the encoder filters. The\n   * resulting response will be passed out via the FilterManagerCallbacks.\n   */\n  void sendDirectLocalReply(Code code, absl::string_view body,\n                            const std::function<void(ResponseHeaderMap& headers)>& modify_headers,\n                            bool is_head_request,\n                            const absl::optional<Grpc::Status::GrpcStatus> grpc_status);\n\n  // Possibly increases buffer_limit_ to the value of limit.\n  void setBufferLimit(uint32_t limit);\n\n  /**\n   * @return bool whether any above high watermark triggers are currently active\n   */\n  bool aboveHighWatermark() { return high_watermark_count_ != 0; }\n\n  // Pass on watermark callbacks to watermark subscribers. This boils down to passing watermark\n  // events for this stream and the downstream connection to the router filter.\n  void callHighWatermarkCallbacks();\n  void callLowWatermarkCallbacks();\n\n  void requestHeadersInitialized() {\n    if (Http::Headers::get().MethodValues.Head ==\n        filter_manager_callbacks_.requestHeaders()->get().getMethodValue()) {\n      state_.is_head_request_ = true;\n    }\n    state_.is_grpc_request_ =\n        Grpc::Common::isGrpcRequestHeaders(filter_manager_callbacks_.requestHeaders()->get());\n  }\n\n  /**\n   * Marks local processing as complete.\n   */\n  void setLocalComplete() { state_.local_complete_ = true; }\n\n  /**\n   * Whether the filters have been destroyed.\n   */\n  bool destroyed() const { return state_.destroyed_; }\n\n  /**\n   * Whether remote processing has been marked as complete.\n   */\n  bool remoteComplete() const { return state_.remote_complete_; }\n\n  /**\n   * Instructs the FilterManager to not create a filter chain. This makes it possible to issue\n   * a local reply without the overhead of creating and traversing the filters.\n   */\n  void skipFilterChainCreation() {\n    ASSERT(!state_.created_filter_chain_);\n    state_.created_filter_chain_ = true;\n  }\n\n  // TODO(snowp): This should probably return a StreamInfo instead of the impl.\n  StreamInfo::StreamInfoImpl& streamInfo() { return stream_info_; }\n  const StreamInfo::StreamInfoImpl& streamInfo() const { return stream_info_; }\n\n  // Set up the Encoder/Decoder filter chain.\n  bool createFilterChain();\n\n  const Network::Connection* connection() const { return &connection_; }\n\n  uint64_t streamId() const { return stream_id_; }\n\nprivate:\n  // Indicates which filter to start the iteration with.\n  enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent };\n\n  // Returns the encoder filter to start iteration with.\n  std::list<ActiveStreamEncoderFilterPtr>::iterator\n  commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream,\n                     FilterIterationStartState filter_iteration_start_state);\n  // Returns the decoder filter to start iteration with.\n  std::list<ActiveStreamDecoderFilterPtr>::iterator\n  commonDecodePrefix(ActiveStreamDecoderFilter* filter,\n                     FilterIterationStartState filter_iteration_start_state);\n  void addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming);\n  RequestTrailerMap& addDecodedTrailers();\n  MetadataMapVector& addDecodedMetadata();\n  // Helper function for the case where we have a header only request, but a filter adds a body\n  // to it.\n  void maybeContinueDecoding(\n      const std::list<ActiveStreamDecoderFilterPtr>::iterator& maybe_continue_data_entry);\n  void decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHeaderMap& headers, bool end_stream);\n  // Sends data through decoding filter chains. filter_iteration_start_state indicates which\n  // filter to start the iteration with.\n  void decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream,\n                  FilterIterationStartState filter_iteration_start_state);\n  void decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers);\n  void decodeMetadata(ActiveStreamDecoderFilter* filter, MetadataMap& metadata_map);\n  void addEncodedData(ActiveStreamEncoderFilter& filter, Buffer::Instance& data, bool streaming);\n  ResponseTrailerMap& addEncodedTrailers();\n  void encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers);\n  // As with most of the encode functions, this runs encodeHeaders on various\n  // filters before calling encodeHeadersInternal which does final header munging and passes the\n  // headers to the encoder.\n  void maybeContinueEncoding(\n      const std::list<ActiveStreamEncoderFilterPtr>::iterator& maybe_continue_data_entry);\n  void encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers,\n                     bool end_stream);\n  // Sends data through encoding filter chains. filter_iteration_start_state indicates which\n  // filter to start the iteration with, and finally calls encodeDataInternal\n  // to update stats, do end stream bookkeeping, and send the data to encoder.\n  void encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream,\n                  FilterIterationStartState filter_iteration_start_state);\n  void encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers);\n  void encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr);\n\n  // Returns true if new metadata is decoded. Otherwise, returns false.\n  bool processNewlyAddedMetadata();\n\n  // Returns true if filter has stopped iteration for all frame types. Otherwise, returns false.\n  // filter_streaming is the variable to indicate if stream is streaming, and its value may be\n  // changed by the function.\n  bool handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data,\n                           bool& filter_streaming);\n\n  MetadataMapVector* getRequestMetadataMapVector() {\n    if (request_metadata_map_vector_ == nullptr) {\n      request_metadata_map_vector_ = std::make_unique<MetadataMapVector>();\n    }\n    return request_metadata_map_vector_.get();\n  }\n\n  FilterManagerCallbacks& filter_manager_callbacks_;\n  Event::Dispatcher& dispatcher_;\n  const Network::Connection& connection_;\n  const uint64_t stream_id_;\n  const bool proxy_100_continue_;\n\n  std::list<ActiveStreamDecoderFilterPtr> decoder_filters_;\n  std::list<ActiveStreamEncoderFilterPtr> encoder_filters_;\n  std::list<AccessLog::InstanceSharedPtr> access_log_handlers_;\n\n  // Stores metadata added in the decoding filter that is being processed. Will be cleared before\n  // processing the next filter. The storage is created on demand. We need to store metadata\n  // temporarily in the filter in case the filter has stopped all while processing headers.\n  std::unique_ptr<MetadataMapVector> request_metadata_map_vector_;\n  Buffer::WatermarkBufferPtr buffered_response_data_;\n  Buffer::WatermarkBufferPtr buffered_request_data_;\n  uint32_t buffer_limit_{0};\n  uint32_t high_watermark_count_{0};\n  std::list<DownstreamWatermarkCallbacks*> watermark_callbacks_;\n  Network::Socket::OptionsSharedPtr upstream_options_ =\n      std::make_shared<Network::Socket::Options>();\n\n  FilterChainFactory& filter_chain_factory_;\n  const LocalReply::LocalReply& local_reply_;\n  StreamInfo::StreamInfoImpl stream_info_;\n  // TODO(snowp): Once FM has been moved to its own file we'll make these private classes of FM,\n  // at which point they no longer need to be friends.\n  friend ActiveStreamFilterBase;\n  friend ActiveStreamDecoderFilter;\n  friend ActiveStreamEncoderFilter;\n\n  /**\n   * Flags that keep track of which filter calls are currently in progress.\n   */\n  // clang-format off\n    struct FilterCallState {\n      static constexpr uint32_t DecodeHeaders   = 0x01;\n      static constexpr uint32_t DecodeData      = 0x02;\n      static constexpr uint32_t DecodeTrailers  = 0x04;\n      static constexpr uint32_t EncodeHeaders   = 0x08;\n      static constexpr uint32_t EncodeData      = 0x10;\n      static constexpr uint32_t EncodeTrailers  = 0x20;\n      // Encode100ContinueHeaders is a bit of a special state as 100 continue\n      // headers may be sent during request processing. This state is only used\n      // to verify we do not encode100Continue headers more than once per\n      // filter.\n      static constexpr uint32_t Encode100ContinueHeaders  = 0x40;\n      // Used to indicate that we're processing the final [En|De]codeData frame,\n      // i.e. end_stream = true\n      static constexpr uint32_t LastDataFrame = 0x80;\n    };\n  // clang-format on\n\n  struct State {\n    State()\n        : remote_complete_(false), local_complete_(false), has_continue_headers_(false),\n          created_filter_chain_(false), is_head_request_(false), is_grpc_request_(false),\n          non_100_response_headers_encoded_(false) {}\n\n    uint32_t filter_call_state_{0};\n\n    bool remote_complete_ : 1;\n    bool local_complete_ : 1; // This indicates that local is complete prior to filter processing.\n                              // A filter can still stop the stream from being complete as seen\n                              // by the codec.\n    // By default, we will assume there are no 100-Continue headers. If encode100ContinueHeaders\n    // is ever called, this is set to true so commonContinue resumes processing the 100-Continue.\n    bool has_continue_headers_ : 1;\n    bool created_filter_chain_ : 1;\n    // These two are latched on initial header read, to determine if the original headers\n    // constituted a HEAD or gRPC request, respectively.\n    bool is_head_request_ : 1;\n    bool is_grpc_request_ : 1;\n    // Tracks if headers other than 100-Continue have been encoded to the codec.\n    bool non_100_response_headers_encoded_ : 1;\n\n    // The following 3 members are booleans rather than part of the space-saving bitfield as they\n    // are passed as arguments to functions expecting bools. Extend State using the bitfield\n    // where possible.\n    bool encoder_filters_streaming_{true};\n    bool decoder_filters_streaming_{true};\n    bool destroyed_{false};\n    // Whether a filter has indicated that the response should be treated as a headers only\n    // response.\n    bool encoding_headers_only_{false};\n    // Whether a filter has indicated that the request should be treated as a headers only\n    // request.\n    bool decoding_headers_only_{false};\n\n    // Used to track which filter is the latest filter that has received data.\n    ActiveStreamEncoderFilter* latest_data_encoding_filter_{};\n    ActiveStreamDecoderFilter* latest_data_decoding_filter_{};\n  };\n\n  State state_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/hash_policy.cc",
    "content": "#include \"common/http/hash_policy.h\"\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/common/regex.h\"\n#include \"common/http/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass HashMethodImplBase : public HashPolicyImpl::HashMethod {\npublic:\n  explicit HashMethodImplBase(bool terminal) : terminal_(terminal) {}\n\n  bool terminal() const override { return terminal_; }\n\nprivate:\n  const bool terminal_;\n};\n\nclass HeaderHashMethod : public HashMethodImplBase {\npublic:\n  HeaderHashMethod(const envoy::config::route::v3::RouteAction::HashPolicy::Header& header,\n                   bool terminal)\n      : HashMethodImplBase(terminal), header_name_(header.header_name()) {\n    if (header.has_regex_rewrite()) {\n      const auto& rewrite_spec = header.regex_rewrite();\n      regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern());\n      regex_rewrite_substitution_ = rewrite_spec.substitution();\n    }\n  }\n\n  absl::optional<uint64_t> evaluate(const Network::Address::Instance*,\n                                    const RequestHeaderMap& headers,\n                                    const HashPolicy::AddCookieCallback,\n                                    const StreamInfo::FilterStateSharedPtr) const override {\n    absl::optional<uint64_t> hash;\n\n    const HeaderEntry* header = headers.get(header_name_);\n    if (header) {\n      if (regex_rewrite_ != nullptr) {\n        hash = HashUtil::xxHash64(regex_rewrite_->replaceAll(header->value().getStringView(),\n                                                             regex_rewrite_substitution_));\n      } else {\n        hash = HashUtil::xxHash64(header->value().getStringView());\n      }\n    }\n    return hash;\n  }\n\nprivate:\n  const LowerCaseString header_name_;\n  Regex::CompiledMatcherPtr regex_rewrite_{};\n  std::string regex_rewrite_substitution_{};\n};\n\nclass CookieHashMethod : public HashMethodImplBase {\npublic:\n  CookieHashMethod(const std::string& key, const std::string& path,\n                   const absl::optional<std::chrono::seconds>& ttl, bool terminal)\n      : HashMethodImplBase(terminal), key_(key), path_(path), ttl_(ttl) {}\n\n  absl::optional<uint64_t> evaluate(const Network::Address::Instance*,\n                                    const RequestHeaderMap& headers,\n                                    const HashPolicy::AddCookieCallback add_cookie,\n                                    const StreamInfo::FilterStateSharedPtr) const override {\n    absl::optional<uint64_t> hash;\n    std::string value = Utility::parseCookieValue(headers, key_);\n    if (value.empty() && ttl_.has_value()) {\n      value = add_cookie(key_, path_, ttl_.value());\n      hash = HashUtil::xxHash64(value);\n\n    } else if (!value.empty()) {\n      hash = HashUtil::xxHash64(value);\n    }\n    return hash;\n  }\n\nprivate:\n  const std::string key_;\n  const std::string path_;\n  const absl::optional<std::chrono::seconds> ttl_;\n};\n\nclass IpHashMethod : public HashMethodImplBase {\npublic:\n  IpHashMethod(bool terminal) : HashMethodImplBase(terminal) {}\n\n  absl::optional<uint64_t> evaluate(const Network::Address::Instance* downstream_addr,\n                                    const RequestHeaderMap&, const HashPolicy::AddCookieCallback,\n                                    const StreamInfo::FilterStateSharedPtr) const override {\n    if (downstream_addr == nullptr) {\n      return absl::nullopt;\n    }\n    auto* downstream_ip = downstream_addr->ip();\n    if (downstream_ip == nullptr) {\n      return absl::nullopt;\n    }\n    const auto& downstream_addr_str = downstream_ip->addressAsString();\n    if (downstream_addr_str.empty()) {\n      return absl::nullopt;\n    }\n    return HashUtil::xxHash64(downstream_addr_str);\n  }\n};\n\nclass QueryParameterHashMethod : public HashMethodImplBase {\npublic:\n  QueryParameterHashMethod(const std::string& parameter_name, bool terminal)\n      : HashMethodImplBase(terminal), parameter_name_(parameter_name) {}\n\n  absl::optional<uint64_t> evaluate(const Network::Address::Instance*,\n                                    const RequestHeaderMap& headers,\n                                    const HashPolicy::AddCookieCallback,\n                                    const StreamInfo::FilterStateSharedPtr) const override {\n    absl::optional<uint64_t> hash;\n\n    const HeaderEntry* header = headers.Path();\n    if (header) {\n      Http::Utility::QueryParams query_parameters =\n          Http::Utility::parseQueryString(header->value().getStringView());\n      const auto& iter = query_parameters.find(parameter_name_);\n      if (iter != query_parameters.end()) {\n        hash = HashUtil::xxHash64(iter->second);\n      }\n    }\n    return hash;\n  }\n\nprivate:\n  const std::string parameter_name_;\n};\n\nclass FilterStateHashMethod : public HashMethodImplBase {\npublic:\n  FilterStateHashMethod(const std::string& key, bool terminal)\n      : HashMethodImplBase(terminal), key_(key) {}\n\n  absl::optional<uint64_t>\n  evaluate(const Network::Address::Instance*, const RequestHeaderMap&,\n           const HashPolicy::AddCookieCallback,\n           const StreamInfo::FilterStateSharedPtr filter_state) const override {\n    if (filter_state->hasData<Hashable>(key_)) {\n      return filter_state->getDataReadOnly<Hashable>(key_).hash();\n    }\n    return absl::nullopt;\n  }\n\nprivate:\n  const std::string key_;\n};\n\nHashPolicyImpl::HashPolicyImpl(\n    absl::Span<const envoy::config::route::v3::RouteAction::HashPolicy* const> hash_policies) {\n\n  hash_impls_.reserve(hash_policies.size());\n  for (auto* hash_policy : hash_policies) {\n    switch (hash_policy->policy_specifier_case()) {\n    case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kHeader:\n      hash_impls_.emplace_back(\n          new HeaderHashMethod(hash_policy->header(), hash_policy->terminal()));\n      break;\n    case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kCookie: {\n      absl::optional<std::chrono::seconds> ttl;\n      if (hash_policy->cookie().has_ttl()) {\n        ttl = std::chrono::seconds(hash_policy->cookie().ttl().seconds());\n      }\n      hash_impls_.emplace_back(new CookieHashMethod(hash_policy->cookie().name(),\n                                                    hash_policy->cookie().path(), ttl,\n                                                    hash_policy->terminal()));\n      break;\n    }\n    case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::\n        kConnectionProperties:\n      if (hash_policy->connection_properties().source_ip()) {\n        hash_impls_.emplace_back(new IpHashMethod(hash_policy->terminal()));\n      }\n      break;\n    case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kQueryParameter:\n      hash_impls_.emplace_back(new QueryParameterHashMethod(hash_policy->query_parameter().name(),\n                                                            hash_policy->terminal()));\n      break;\n    case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kFilterState:\n      hash_impls_.emplace_back(\n          new FilterStateHashMethod(hash_policy->filter_state().key(), hash_policy->terminal()));\n      break;\n    default:\n      throw EnvoyException(\n          absl::StrCat(\"Unsupported hash policy \", hash_policy->policy_specifier_case()));\n    }\n  }\n}\n\nabsl::optional<uint64_t>\nHashPolicyImpl::generateHash(const Network::Address::Instance* downstream_addr,\n                             const RequestHeaderMap& headers, const AddCookieCallback add_cookie,\n                             const StreamInfo::FilterStateSharedPtr filter_state) const {\n  absl::optional<uint64_t> hash;\n  for (const HashMethodPtr& hash_impl : hash_impls_) {\n    const absl::optional<uint64_t> new_hash =\n        hash_impl->evaluate(downstream_addr, headers, add_cookie, filter_state);\n    if (new_hash) {\n      // Rotating the old value prevents duplicate hash rules from cancelling each other out\n      // and preserves all of the entropy\n      const uint64_t old_value = hash ? ((hash.value() << 1) | (hash.value() >> 63)) : 0;\n      hash = old_value ^ new_hash.value();\n    }\n    // If the policy is a terminal policy and a hash has been generated, ignore\n    // the rest of the hash policies.\n    if (hash_impl->terminal() && hash) {\n      break;\n    }\n  }\n  return hash;\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/hash_policy.h",
    "content": "#pragma once\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/hash_policy.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Implementation of HashPolicy that reads from the proto route config and only currently supports\n * hashing on an HTTP header.\n */\nclass HashPolicyImpl : public HashPolicy {\npublic:\n  explicit HashPolicyImpl(\n      absl::Span<const envoy::config::route::v3::RouteAction::HashPolicy* const> hash_policy);\n\n  // Http::HashPolicy\n  absl::optional<uint64_t>\n  generateHash(const Network::Address::Instance* downstream_addr, const RequestHeaderMap& headers,\n               const AddCookieCallback add_cookie,\n               const StreamInfo::FilterStateSharedPtr filter_state) const override;\n\n  class HashMethod {\n  public:\n    virtual ~HashMethod() = default;\n    virtual absl::optional<uint64_t>\n    evaluate(const Network::Address::Instance* downstream_addr, const RequestHeaderMap& headers,\n             const AddCookieCallback add_cookie,\n             const StreamInfo::FilterStateSharedPtr filter_state) const PURE;\n\n    // If the method is a terminal method, ignore rest of the hash policy chain.\n    virtual bool terminal() const PURE;\n  };\n\n  using HashMethodPtr = std::unique_ptr<HashMethod>;\n\nprivate:\n  std::vector<HashMethodPtr> hash_impls_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/header_list_view.cc",
    "content": "#include \"common/http/header_list_view.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nHeaderListView::HeaderListView(const HeaderMap& header_map) {\n  header_map.iterate([this](const Http::HeaderEntry& header) -> HeaderMap::Iterate {\n    keys_.emplace_back(std::reference_wrapper<const HeaderString>(header.key()));\n    values_.emplace_back(std::reference_wrapper<const HeaderString>(header.value()));\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\n} // namespace Http\n} // namespace Envoy"
  },
  {
    "path": "source/common/http/header_list_view.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass HeaderListView {\npublic:\n  using HeaderStringRefs = std::vector<std::reference_wrapper<const HeaderString>>;\n\n  HeaderListView(const HeaderMap& header_map);\n  const HeaderStringRefs& keys() const { return keys_; }\n  const HeaderStringRefs& values() const { return values_; }\n\nprivate:\n  HeaderStringRefs keys_;\n  HeaderStringRefs values_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/header_map_impl.cc",
    "content": "#include \"common/http/header_map_impl.h\"\n\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/dump_state_utils.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nnamespace {\n// This includes the NULL (StringUtil::itoa technically only needs 21).\nconstexpr size_t MaxIntegerLength{32};\n\nvoid validateCapacity(uint64_t new_capacity) {\n  // If the resizing will cause buffer overflow due to hitting uint32_t::max, an OOM is likely\n  // imminent. Fast-fail rather than allow a buffer overflow attack (issue #1421)\n  RELEASE_ASSERT(new_capacity <= std::numeric_limits<uint32_t>::max(),\n                 \"Trying to allocate overly large headers.\");\n}\n\nabsl::string_view getStrView(const VariantHeader& buffer) {\n  return absl::get<absl::string_view>(buffer);\n}\n\nInlineHeaderVector& getInVec(VariantHeader& buffer) {\n  return absl::get<InlineHeaderVector>(buffer);\n}\n\nconst InlineHeaderVector& getInVec(const VariantHeader& buffer) {\n  return absl::get<InlineHeaderVector>(buffer);\n}\n} // namespace\n\n// Initialize as a Type::Inline\nHeaderString::HeaderString() : buffer_(InlineHeaderVector()) {\n  ASSERT((getInVec(buffer_).capacity()) >= MaxIntegerLength);\n  ASSERT(valid());\n}\n\n// Initialize as a Type::Reference\nHeaderString::HeaderString(const LowerCaseString& ref_value)\n    : buffer_(absl::string_view(ref_value.get().c_str(), ref_value.get().size())) {\n  ASSERT(valid());\n}\n\n// Initialize as a Type::Reference\nHeaderString::HeaderString(absl::string_view ref_value) : buffer_(ref_value) { ASSERT(valid()); }\n\nHeaderString::HeaderString(HeaderString&& move_value) noexcept\n    : buffer_(std::move(move_value.buffer_)) {\n  move_value.clear();\n  ASSERT(valid());\n}\n\nbool HeaderString::valid() const { return validHeaderString(getStringView()); }\n\nvoid HeaderString::append(const char* data, uint32_t data_size) {\n  // Make sure the requested memory allocation is below uint32_t::max\n  const uint64_t new_capacity = static_cast<uint64_t>(data_size) + size();\n  validateCapacity(new_capacity);\n  ASSERT(validHeaderString(absl::string_view(data, data_size)));\n\n  switch (type()) {\n  case Type::Reference: {\n    // Rather than be too clever and optimize this uncommon case, we switch to\n    // Inline mode and copy.\n    const absl::string_view prev = getStrView(buffer_);\n    buffer_ = InlineHeaderVector();\n    // Assigning new_capacity to avoid resizing when appending the new data\n    getInVec(buffer_).reserve(new_capacity);\n    getInVec(buffer_).assign(prev.begin(), prev.end());\n    break;\n  }\n  case Type::Inline: {\n    getInVec(buffer_).reserve(new_capacity);\n    break;\n  }\n  }\n  getInVec(buffer_).insert(getInVec(buffer_).end(), data, data + data_size);\n}\n\nvoid HeaderString::rtrim() {\n  ASSERT(type() == Type::Inline);\n  absl::string_view original = getStringView();\n  absl::string_view rtrimmed = StringUtil::rtrim(original);\n  if (original.size() != rtrimmed.size()) {\n    getInVec(buffer_).resize(rtrimmed.size());\n  }\n}\n\nabsl::string_view HeaderString::getStringView() const {\n  if (type() == Type::Reference) {\n    return getStrView(buffer_);\n  }\n  ASSERT(type() == Type::Inline);\n  return {getInVec(buffer_).data(), getInVec(buffer_).size()};\n}\n\nvoid HeaderString::clear() {\n  if (type() == Type::Inline) {\n    getInVec(buffer_).clear();\n  }\n}\n\nvoid HeaderString::setCopy(const char* data, uint32_t size) {\n  ASSERT(validHeaderString(absl::string_view(data, size)));\n\n  if (!absl::holds_alternative<InlineHeaderVector>(buffer_)) {\n    // Switching from Type::Reference to Type::Inline\n    buffer_ = InlineHeaderVector();\n  }\n\n  getInVec(buffer_).reserve(size);\n  getInVec(buffer_).assign(data, data + size);\n  ASSERT(valid());\n}\n\nvoid HeaderString::setCopy(absl::string_view view) {\n  this->setCopy(view.data(), static_cast<uint32_t>(view.size()));\n}\n\nvoid HeaderString::setInteger(uint64_t value) {\n  // Initialize the size to the max length, copy the actual data, and then\n  // reduce the size (but not the capacity) as needed\n  // Note: instead of using the inner_buffer, attempted the following:\n  // resize buffer_ to MaxIntegerLength, apply StringUtil::itoa to the buffer_.data(), and then\n  // resize buffer_ to int_length (the number of digits in value).\n  // However it was slower than the following approach.\n  char inner_buffer[MaxIntegerLength];\n  const uint32_t int_length = StringUtil::itoa(inner_buffer, MaxIntegerLength, value);\n\n  if (type() == Type::Reference) {\n    // Switching from Type::Reference to Type::Inline\n    buffer_ = InlineHeaderVector();\n  }\n  ASSERT((getInVec(buffer_).capacity()) > MaxIntegerLength);\n  getInVec(buffer_).assign(inner_buffer, inner_buffer + int_length);\n}\n\nvoid HeaderString::setReference(absl::string_view ref_value) {\n  buffer_ = ref_value;\n  ASSERT(valid());\n}\n\nuint32_t HeaderString::size() const {\n  if (type() == Type::Reference) {\n    return getStrView(buffer_).size();\n  }\n  ASSERT(type() == Type::Inline);\n  return getInVec(buffer_).size();\n}\n\nHeaderString::Type HeaderString::type() const {\n  // buffer_.index() is correlated with the order of Reference and Inline in the\n  // enum.\n  ASSERT(buffer_.index() == 0 || buffer_.index() == 1);\n  ASSERT((buffer_.index() == 0 && absl::holds_alternative<absl::string_view>(buffer_)) ||\n         (buffer_.index() != 0));\n  ASSERT((buffer_.index() == 1 && absl::holds_alternative<InlineHeaderVector>(buffer_)) ||\n         (buffer_.index() != 1));\n  return Type(buffer_.index());\n}\n\n// Specialization needed for HeaderMapImpl::HeaderList::insert() when key is LowerCaseString.\n// A fully specialized template must be defined once in the program, hence this may not be in\n// a header file.\ntemplate <> bool HeaderMapImpl::HeaderList::isPseudoHeader(const LowerCaseString& key) {\n  return key.get().c_str()[0] == ':';\n}\n\nbool HeaderMapImpl::HeaderList::maybeMakeMap() {\n  if (lazy_map_.empty()) {\n    if (headers_.size() < lazy_map_min_size_) {\n      return false;\n    }\n    // Add all entries from the list into the map.\n    for (auto node = headers_.begin(); node != headers_.end(); ++node) {\n      HeaderNodeVector& v = lazy_map_[node->key().getStringView()];\n      v.push_back(node);\n    }\n  }\n  return true;\n}\n\nsize_t HeaderMapImpl::HeaderList::remove(absl::string_view key) {\n  size_t removed_bytes = 0;\n  if (maybeMakeMap()) {\n    auto iter = lazy_map_.find(key);\n    if (iter != lazy_map_.end()) {\n      // Erase from the map, and all same key entries from the list.\n      HeaderNodeVector header_nodes = std::move(iter->second);\n      lazy_map_.erase(iter);\n      for (const HeaderNode& node : header_nodes) {\n        ASSERT(node->key() == key);\n        removed_bytes += node->key().size() + node->value().size();\n        erase(node, false /* remove_from_map */);\n      }\n    }\n  } else {\n    // Erase all same key entries from the list.\n    for (auto i = headers_.begin(); i != headers_.end();) {\n      if (i->key() == key) {\n        removed_bytes += i->key().size() + i->value().size();\n        i = erase(i, false /* remove_from_map */);\n      } else {\n        ++i;\n      }\n    }\n  }\n  return removed_bytes;\n}\n\nHeaderMapImpl::HeaderEntryImpl::HeaderEntryImpl(const LowerCaseString& key) : key_(key) {}\n\nHeaderMapImpl::HeaderEntryImpl::HeaderEntryImpl(const LowerCaseString& key, HeaderString&& value)\n    : key_(key), value_(std::move(value)) {}\n\nHeaderMapImpl::HeaderEntryImpl::HeaderEntryImpl(HeaderString&& key, HeaderString&& value)\n    : key_(std::move(key)), value_(std::move(value)) {}\n\nvoid HeaderMapImpl::HeaderEntryImpl::value(absl::string_view value) { value_.setCopy(value); }\n\nvoid HeaderMapImpl::HeaderEntryImpl::value(uint64_t value) { value_.setInteger(value); }\n\nvoid HeaderMapImpl::HeaderEntryImpl::value(const HeaderEntry& header) {\n  value(header.value().getStringView());\n}\n\ntemplate <> HeaderMapImpl::StaticLookupTable<RequestHeaderMap>::StaticLookupTable() {\n#define REGISTER_DEFAULT_REQUEST_HEADER(name)                                                      \\\n  CustomInlineHeaderRegistry::registerInlineHeader<RequestHeaderMap::header_map_type>(             \\\n      Headers::get().name);\n  INLINE_REQ_HEADERS(REGISTER_DEFAULT_REQUEST_HEADER)\n  INLINE_REQ_RESP_HEADERS(REGISTER_DEFAULT_REQUEST_HEADER)\n\n  finalizeTable();\n\n  // Special case where we map a legacy host header to :authority.\n  const auto handle =\n      CustomInlineHeaderRegistry::getInlineHeader<RequestHeaderMap::header_map_type>(\n          Headers::get().Host);\n  add(Headers::get().HostLegacy.get().c_str(), [handle](HeaderMapImpl& h) -> StaticLookupResponse {\n    return {&h.inlineHeaders()[handle.value().it_->second], &handle.value().it_->first};\n  });\n}\n\ntemplate <> HeaderMapImpl::StaticLookupTable<RequestTrailerMap>::StaticLookupTable() {\n  finalizeTable();\n}\n\ntemplate <> HeaderMapImpl::StaticLookupTable<ResponseHeaderMap>::StaticLookupTable() {\n#define REGISTER_RESPONSE_HEADER(name)                                                             \\\n  CustomInlineHeaderRegistry::registerInlineHeader<ResponseHeaderMap::header_map_type>(            \\\n      Headers::get().name);\n  INLINE_RESP_HEADERS(REGISTER_RESPONSE_HEADER)\n  INLINE_REQ_RESP_HEADERS(REGISTER_RESPONSE_HEADER)\n  INLINE_RESP_HEADERS_TRAILERS(REGISTER_RESPONSE_HEADER)\n\n  finalizeTable();\n}\n\ntemplate <> HeaderMapImpl::StaticLookupTable<ResponseTrailerMap>::StaticLookupTable() {\n#define REGISTER_RESPONSE_TRAILER(name)                                                            \\\n  CustomInlineHeaderRegistry::registerInlineHeader<ResponseTrailerMap::header_map_type>(           \\\n      Headers::get().name);\n  INLINE_RESP_HEADERS_TRAILERS(REGISTER_RESPONSE_TRAILER)\n\n  finalizeTable();\n}\n\nuint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data,\n                                       absl::string_view delimiter) {\n  if (data.empty()) {\n    return 0;\n  }\n  uint64_t byte_size = 0;\n  if (!header.empty()) {\n    header.append(delimiter.data(), delimiter.size());\n    byte_size += delimiter.size();\n  }\n  header.append(data.data(), data.size());\n  return data.size() + byte_size;\n}\n\nvoid HeaderMapImpl::updateSize(uint64_t from_size, uint64_t to_size) {\n  ASSERT(cached_byte_size_ >= from_size);\n  cached_byte_size_ -= from_size;\n  cached_byte_size_ += to_size;\n}\n\nvoid HeaderMapImpl::addSize(uint64_t size) { cached_byte_size_ += size; }\n\nvoid HeaderMapImpl::subtractSize(uint64_t size) {\n  ASSERT(cached_byte_size_ >= size);\n  cached_byte_size_ -= size;\n}\n\nvoid HeaderMapImpl::copyFrom(HeaderMap& lhs, const HeaderMap& header_map) {\n  header_map.iterate([&lhs](const HeaderEntry& header) -> HeaderMap::Iterate {\n    // TODO(mattklein123) PERF: Avoid copying here if not necessary.\n    HeaderString key_string;\n    key_string.setCopy(header.key().getStringView());\n    HeaderString value_string;\n    value_string.setCopy(header.value().getStringView());\n\n    lhs.addViaMove(std::move(key_string), std::move(value_string));\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\nnamespace {\n\n// This is currently only used in tests and is not optimized for performance.\nHeaderMap::ConstIterateCb\ncollectAllHeaders(std::vector<std::pair<absl::string_view, absl::string_view>>* dest) {\n  return [dest](const HeaderEntry& header) -> HeaderMap::Iterate {\n    dest->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView()));\n    return HeaderMap::Iterate::Continue;\n  };\n};\n\n} // namespace\n\n// This is currently only used in tests and is not optimized for performance.\nbool HeaderMapImpl::operator==(const HeaderMap& rhs) const {\n  if (size() != rhs.size()) {\n    return false;\n  }\n\n  std::vector<std::pair<absl::string_view, absl::string_view>> rhs_headers;\n  rhs_headers.reserve(rhs.size());\n  rhs.iterate(collectAllHeaders(&rhs_headers));\n\n  auto i = headers_.begin();\n  auto j = rhs_headers.begin();\n  for (; i != headers_.end(); ++i, ++j) {\n    if (i->key() != j->first || i->value() != j->second) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\nbool HeaderMapImpl::operator!=(const HeaderMap& rhs) const { return !operator==(rhs); }\n\nvoid HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) {\n  auto lookup = staticLookup(key.getStringView());\n  if (lookup.has_value()) {\n    key.clear();\n    if (*lookup.value().entry_ == nullptr) {\n      maybeCreateInline(lookup.value().entry_, *lookup.value().key_, std::move(value));\n    } else {\n      const uint64_t added_size =\n          appendToHeader((*lookup.value().entry_)->value(), value.getStringView());\n      addSize(added_size);\n      value.clear();\n    }\n  } else {\n    addSize(key.size() + value.size());\n    HeaderNode i = headers_.insert(std::move(key), std::move(value));\n    i->entry_ = i;\n  }\n}\n\nvoid HeaderMapImpl::addViaMove(HeaderString&& key, HeaderString&& value) {\n  insertByKey(std::move(key), std::move(value));\n}\n\nvoid HeaderMapImpl::addReference(const LowerCaseString& key, absl::string_view value) {\n  HeaderString ref_key(key);\n  HeaderString ref_value(value);\n  insertByKey(std::move(ref_key), std::move(ref_value));\n}\n\nvoid HeaderMapImpl::addReferenceKey(const LowerCaseString& key, uint64_t value) {\n  HeaderString ref_key(key);\n  HeaderString new_value;\n  new_value.setInteger(value);\n  insertByKey(std::move(ref_key), std::move(new_value));\n  ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)\n}\n\nvoid HeaderMapImpl::addReferenceKey(const LowerCaseString& key, absl::string_view value) {\n  HeaderString ref_key(key);\n  HeaderString new_value;\n  new_value.setCopy(value);\n  insertByKey(std::move(ref_key), std::move(new_value));\n  ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)\n}\n\nvoid HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) {\n  // In the case that the header is appended, we will perform a needless copy of the key and value.\n  // This is done on purpose to keep the code simple and should be rare.\n  HeaderString new_key;\n  new_key.setCopy(key.get());\n  HeaderString new_value;\n  new_value.setInteger(value);\n  insertByKey(std::move(new_key), std::move(new_value));\n  ASSERT(new_key.empty());   // NOLINT(bugprone-use-after-move)\n  ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)\n}\n\nvoid HeaderMapImpl::addCopy(const LowerCaseString& key, absl::string_view value) {\n  // In the case that the header is appended, we will perform a needless copy of the key and value.\n  // This is done on purpose to keep the code simple and should be rare.\n  HeaderString new_key;\n  new_key.setCopy(key.get());\n  HeaderString new_value;\n  new_value.setCopy(value);\n  insertByKey(std::move(new_key), std::move(new_value));\n  ASSERT(new_key.empty());   // NOLINT(bugprone-use-after-move)\n  ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)\n}\n\nvoid HeaderMapImpl::appendCopy(const LowerCaseString& key, absl::string_view value) {\n  // TODO(#9221): converge on and document a policy for coalescing multiple headers.\n  auto entry = getExisting(key);\n  if (!entry.empty()) {\n    const uint64_t added_size = appendToHeader(entry[0]->value(), value);\n    addSize(added_size);\n  } else {\n    addCopy(key, value);\n  }\n}\n\nvoid HeaderMapImpl::setReference(const LowerCaseString& key, absl::string_view value) {\n  remove(key);\n  addReference(key, value);\n}\n\nvoid HeaderMapImpl::setReferenceKey(const LowerCaseString& key, absl::string_view value) {\n  remove(key);\n  addReferenceKey(key, value);\n}\n\nvoid HeaderMapImpl::setCopy(const LowerCaseString& key, absl::string_view value) {\n  if (!Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.http_set_copy_replace_all_headers\")) {\n    auto entry = getExisting(key);\n    if (!entry.empty()) {\n      updateSize(entry[0]->value().size(), value.size());\n      entry[0]->value(value);\n    } else {\n      addCopy(key, value);\n    }\n  } else {\n    remove(key);\n    addCopy(key, value);\n  }\n}\n\nuint64_t HeaderMapImpl::byteSize() const { return cached_byte_size_; }\n\nvoid HeaderMapImpl::verifyByteSizeInternalForTest() const {\n  // Computes the total byte size by summing the byte size of the keys and values.\n  uint64_t byte_size = 0;\n  for (const HeaderEntryImpl& header : headers_) {\n    byte_size += header.key().size();\n    byte_size += header.value().size();\n  }\n  ASSERT(cached_byte_size_ == byte_size);\n}\n\nconst HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) const {\n  const auto result = getAll(key);\n  return result.empty() ? nullptr : result[0];\n}\n\nHeaderMap::GetResult HeaderMapImpl::getAll(const LowerCaseString& key) const {\n  return HeaderMap::GetResult(const_cast<HeaderMapImpl*>(this)->getExisting(key));\n}\n\nHeaderMap::NonConstGetResult HeaderMapImpl::getExisting(const LowerCaseString& key) {\n  // Attempt a trie lookup first to see if the user is requesting an O(1) header. This may be\n  // relatively common in certain header matching / routing patterns.\n  // TODO(mattklein123): Add inline handle support directly to the header matcher code to support\n  // this use case more directly.\n  HeaderMap::NonConstGetResult ret;\n  auto lookup = staticLookup(key.get());\n  if (lookup.has_value()) {\n    if (*lookup.value().entry_ != nullptr) {\n      ret.push_back(*lookup.value().entry_);\n    }\n    return ret;\n  }\n\n  // If the requested header is not an O(1) header try using the lazy map to\n  // search for it instead of iterating the headers list.\n  if (headers_.maybeMakeMap()) {\n    HeaderList::HeaderLazyMap::iterator iter = headers_.mapFind(key.get());\n    if (iter != headers_.mapEnd()) {\n      const HeaderList::HeaderNodeVector& v = iter->second;\n      ASSERT(!v.empty()); // It's impossible to have a map entry with an empty vector as its value.\n      for (const auto& values_it : v) {\n        // Convert the iterated value to a HeaderEntry*.\n        ret.push_back(&(*values_it));\n      }\n    }\n    return ret;\n  }\n\n  // If the requested header is not an O(1) header and the lazy map is not in use, we do a full\n  // scan. Doing the trie lookup is wasteful in the miss case, but is present for code consistency\n  // with other functions that do similar things.\n  for (HeaderEntryImpl& header : headers_) {\n    if (header.key() == key.get().c_str()) {\n      ret.push_back(&header);\n    }\n  }\n\n  return ret;\n}\n\nvoid HeaderMapImpl::iterate(HeaderMap::ConstIterateCb cb) const {\n  for (const HeaderEntryImpl& header : headers_) {\n    if (cb(header) == HeaderMap::Iterate::Break) {\n      break;\n    }\n  }\n}\n\nvoid HeaderMapImpl::iterateReverse(HeaderMap::ConstIterateCb cb) const {\n  for (auto it = headers_.rbegin(); it != headers_.rend(); it++) {\n    if (cb(*it) == HeaderMap::Iterate::Break) {\n      break;\n    }\n  }\n}\n\nvoid HeaderMapImpl::clear() {\n  clearInline();\n  headers_.clear();\n  cached_byte_size_ = 0;\n}\n\nsize_t HeaderMapImpl::removeIf(const HeaderMap::HeaderMatchPredicate& predicate) {\n  const size_t old_size = headers_.size();\n  headers_.removeIf([&predicate, this](const HeaderEntryImpl& entry) {\n    const bool to_remove = predicate(entry);\n    if (to_remove) {\n      // If this header should be removed, make sure any references in the\n      // static lookup table are cleared as well.\n      auto lookup = staticLookup(entry.key().getStringView());\n      if (lookup.has_value()) {\n        if (lookup.value().entry_) {\n          const uint32_t key_value_size =\n              (*lookup.value().entry_)->key().size() + (*lookup.value().entry_)->value().size();\n          subtractSize(key_value_size);\n          *lookup.value().entry_ = nullptr;\n        }\n      } else {\n        subtractSize(entry.key().size() + entry.value().size());\n      }\n    }\n    return to_remove;\n  });\n  return old_size - headers_.size();\n}\n\nsize_t HeaderMapImpl::remove(const LowerCaseString& key) {\n  const size_t old_size = headers_.size();\n  auto lookup = staticLookup(key.get());\n  if (lookup.has_value()) {\n    removeInline(lookup.value().entry_);\n  } else {\n    subtractSize(headers_.remove(key.get()));\n  }\n  return old_size - headers_.size();\n}\n\nsize_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) {\n  return HeaderMapImpl::removeIf([&prefix](const HeaderEntry& entry) -> bool {\n    return absl::StartsWith(entry.key().getStringView(), prefix.get());\n  });\n}\n\nvoid HeaderMapImpl::dumpState(std::ostream& os, int indent_level) const {\n  iterate([&os,\n           spaces = spacesForLevel(indent_level)](const HeaderEntry& header) -> HeaderMap::Iterate {\n    os << spaces << \"'\" << header.key().getStringView() << \"', '\" << header.value().getStringView()\n       << \"'\\n\";\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\nHeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl** entry,\n                                                                 const LowerCaseString& key) {\n  if (*entry) {\n    return **entry;\n  }\n\n  addSize(key.get().size());\n  HeaderNode i = headers_.insert(key);\n  i->entry_ = i;\n  *entry = &(*i);\n  return **entry;\n}\n\nHeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl** entry,\n                                                                 const LowerCaseString& key,\n                                                                 HeaderString&& value) {\n  if (*entry) {\n    value.clear();\n    return **entry;\n  }\n\n  addSize(key.get().size() + value.size());\n  HeaderNode i = headers_.insert(key, std::move(value));\n  i->entry_ = i;\n  *entry = &(*i);\n  return **entry;\n}\n\nsize_t HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) {\n  if (!*ptr_to_entry) {\n    return 0;\n  }\n\n  HeaderEntryImpl* entry = *ptr_to_entry;\n  const uint64_t size_to_subtract = entry->entry_->key().size() + entry->entry_->value().size();\n  subtractSize(size_to_subtract);\n  *ptr_to_entry = nullptr;\n  headers_.erase(entry->entry_, true);\n  return 1;\n}\n\nnamespace {\ntemplate <class T>\nHeaderMapImplUtility::HeaderMapImplInfo makeHeaderMapImplInfo(absl::string_view name) {\n  // Constructing a header map implementation will force the custom headers and sizing to be\n  // finalized, so do that first.\n  auto header_map = T::create();\n\n  HeaderMapImplUtility::HeaderMapImplInfo info;\n  info.name_ = std::string(name);\n  info.size_ = T::inlineHeadersSize() + sizeof(T);\n  for (const auto& header : CustomInlineHeaderRegistry::headers<T::header_map_type>()) {\n    info.registered_headers_.push_back(header.first.get());\n  }\n  return info;\n}\n} // namespace\n\nstd::vector<HeaderMapImplUtility::HeaderMapImplInfo>\nHeaderMapImplUtility::getAllHeaderMapImplInfo() {\n  std::vector<HeaderMapImplUtility::HeaderMapImplInfo> ret;\n  ret.push_back(makeHeaderMapImplInfo<RequestHeaderMapImpl>(\"request header map\"));\n  ret.push_back(makeHeaderMapImplInfo<RequestTrailerMapImpl>(\"request trailer map\"));\n  ret.push_back(makeHeaderMapImplInfo<ResponseHeaderMapImpl>(\"response header map\"));\n  ret.push_back(makeHeaderMapImplInfo<ResponseTrailerMapImpl>(\"response trailer map\"));\n  return ret;\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/header_map_impl.h",
    "content": "#pragma once\n\n#include <array>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n#include <type_traits>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/non_copyable.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/runtime/runtime_features.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * These are definitions of all of the inline header access functions described inside header_map.h\n */\n#define DEFINE_INLINE_HEADER_FUNCS(name)                                                           \\\npublic:                                                                                            \\\n  const HeaderEntry* name() const override { return getInline(HeaderHandles::get().name); }        \\\n  void append##name(absl::string_view data, absl::string_view delimiter) override {                \\\n    appendInline(HeaderHandles::get().name, data, delimiter);                                      \\\n  }                                                                                                \\\n  void setReference##name(absl::string_view value) override {                                      \\\n    setReferenceInline(HeaderHandles::get().name, value);                                          \\\n  }                                                                                                \\\n  void set##name(absl::string_view value) override {                                               \\\n    setInline(HeaderHandles::get().name, value);                                                   \\\n  }                                                                                                \\\n  void set##name(uint64_t value) override { setInline(HeaderHandles::get().name, value); }         \\\n  size_t remove##name() override { return removeInline(HeaderHandles::get().name); }               \\\n  absl::string_view get##name##Value() const override {                                            \\\n    return getInlineValue(HeaderHandles::get().name);                                              \\\n  }\n\n/**\n * Implementation of Http::HeaderMap. This is heavily optimized for performance. Roughly, when\n * headers are added to the map by string, we do a trie lookup to see if it's one of the O(1)\n * headers. If it is, we store a reference to it that can be accessed later directly via direct\n * method access. Most high performance paths use O(1) direct method access. In general, we try to\n * copy as little as possible and allocate as little as possible in any of the paths.\n */\nclass HeaderMapImpl : NonCopyable {\npublic:\n  virtual ~HeaderMapImpl() = default;\n\n  // The following \"constructors\" call virtual functions during construction and must use the\n  // static factory pattern.\n  static void copyFrom(HeaderMap& lhs, const HeaderMap& rhs);\n  // The value_type of iterator must be pair, and the first value of them must be LowerCaseString.\n  // If not, it won't be compiled successfully.\n  template <class It> static void initFromInitList(HeaderMap& new_header_map, It begin, It end) {\n    for (auto it = begin; it != end; ++it) {\n      static_assert(std::is_same<decltype(it->first), LowerCaseString>::value,\n                    \"iterator must be pair and the first value of them must be LowerCaseString\");\n      HeaderString key_string;\n      key_string.setCopy(it->first.get().c_str(), it->first.get().size());\n      HeaderString value_string;\n      value_string.setCopy(it->second.c_str(), it->second.size());\n      new_header_map.addViaMove(std::move(key_string), std::move(value_string));\n    }\n  }\n\n  // Performs a manual byte size count for test verification.\n  void verifyByteSizeInternalForTest() const;\n\n  // Note: This class does not actually implement Http::HeaderMap to avoid virtual inheritance in\n  // the derived classes. Instead, it is used as a mix-in class for TypedHeaderMapImpl below. This\n  // both avoid virtual inheritance and allows the concrete final header maps to use a variable\n  // length member at the end.\n  bool operator==(const HeaderMap& rhs) const;\n  bool operator!=(const HeaderMap& rhs) const;\n  void addViaMove(HeaderString&& key, HeaderString&& value);\n  void addReference(const LowerCaseString& key, absl::string_view value);\n  void addReferenceKey(const LowerCaseString& key, uint64_t value);\n  void addReferenceKey(const LowerCaseString& key, absl::string_view value);\n  void addCopy(const LowerCaseString& key, uint64_t value);\n  void addCopy(const LowerCaseString& key, absl::string_view value);\n  void appendCopy(const LowerCaseString& key, absl::string_view value);\n  void setReference(const LowerCaseString& key, absl::string_view value);\n  void setReferenceKey(const LowerCaseString& key, absl::string_view value);\n  void setCopy(const LowerCaseString& key, absl::string_view value);\n  uint64_t byteSize() const;\n  const HeaderEntry* get(const LowerCaseString& key) const;\n  HeaderMap::GetResult getAll(const LowerCaseString& key) const;\n  void iterate(HeaderMap::ConstIterateCb cb) const;\n  void iterateReverse(HeaderMap::ConstIterateCb cb) const;\n  void clear();\n  size_t remove(const LowerCaseString& key);\n  size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate);\n  size_t removePrefix(const LowerCaseString& key);\n  size_t size() const { return headers_.size(); }\n  bool empty() const { return headers_.empty(); }\n  void dumpState(std::ostream& os, int indent_level = 0) const;\n\nprotected:\n  struct HeaderEntryImpl : public HeaderEntry, NonCopyable {\n    HeaderEntryImpl(const LowerCaseString& key);\n    HeaderEntryImpl(const LowerCaseString& key, HeaderString&& value);\n    HeaderEntryImpl(HeaderString&& key, HeaderString&& value);\n\n    // HeaderEntry\n    const HeaderString& key() const override { return key_; }\n    void value(absl::string_view value) override;\n    void value(uint64_t value) override;\n    void value(const HeaderEntry& header) override;\n    const HeaderString& value() const override { return value_; }\n    HeaderString& value() override { return value_; }\n\n    HeaderString key_;\n    HeaderString value_;\n    std::list<HeaderEntryImpl>::iterator entry_;\n  };\n  using HeaderNode = std::list<HeaderEntryImpl>::iterator;\n\n  /**\n   * This is the static lookup table that is used to determine whether a header is one of the O(1)\n   * headers. This uses a trie for lookup time at most equal to the size of the incoming string.\n   */\n  struct StaticLookupResponse {\n    HeaderEntryImpl** entry_;\n    const LowerCaseString* key_;\n  };\n\n  /**\n   * Base class for a static lookup table that converts a string key into an O(1) header.\n   */\n  template <class Interface>\n  struct StaticLookupTable\n      : public TrieLookupTable<std::function<StaticLookupResponse(HeaderMapImpl&)>> {\n    StaticLookupTable();\n\n    void finalizeTable() {\n      CustomInlineHeaderRegistry::finalize<Interface::header_map_type>();\n      auto& headers = CustomInlineHeaderRegistry::headers<Interface::header_map_type>();\n      size_ = headers.size();\n      for (const auto& header : headers) {\n        this->add(header.first.get().c_str(), [&header](HeaderMapImpl& h) -> StaticLookupResponse {\n          return {&h.inlineHeaders()[header.second], &header.first};\n        });\n      }\n    }\n\n    static size_t size() {\n      // The size of the lookup table is finalized when the singleton lookup table is created. This\n      // allows for late binding of custom headers as well as envoy header prefix changes. This\n      // does mean that once the first header map is created of this type, no further changes are\n      // possible.\n      // TODO(mattklein123): If we decide to keep this implementation, it is conceivable that header\n      // maps could be created by an API factory that is owned by the listener/HCM, thus making\n      // O(1) header delivery over xDS possible.\n      return ConstSingleton<StaticLookupTable>::get().size_;\n    }\n\n    static absl::optional<StaticLookupResponse> lookup(HeaderMapImpl& header_map,\n                                                       absl::string_view key) {\n      const auto& entry = ConstSingleton<StaticLookupTable>::get().find(key);\n      if (entry != nullptr) {\n        return entry(header_map);\n      } else {\n        return absl::nullopt;\n      }\n    }\n\n    size_t size_;\n  };\n\n  /**\n   * List of HeaderEntryImpl that keeps the pseudo headers (key starting with ':') in the front\n   * of the list (as required by nghttp2) and otherwise maintains insertion order.\n   * When the list size is greater or equal to the envoy.http.headermap.lazy_map_min_size runtime\n   * feature value (or uint32_t max value if not set), all headers are added to a map, to allow\n   * fast access given a header key. Once the map is initialized, it will be used even if the number\n   * of headers decreases below the threshold.\n   *\n   * Note: the internal iterators held in fields make this unsafe to copy and move, since the\n   * reference to end() is not preserved across a move (see Notes in\n   * https://en.cppreference.com/w/cpp/container/list/list). The NonCopyable will suppress both copy\n   * and move constructors/assignment.\n   * TODO(htuch): Maybe we want this to movable one day; for now, our header map moves happen on\n   * HeaderMapPtr, so the performance impact should not be evident.\n   */\n  class HeaderList : NonCopyable {\n  public:\n    using HeaderNodeVector = absl::InlinedVector<HeaderNode, 1>;\n    using HeaderLazyMap = absl::flat_hash_map<absl::string_view, HeaderNodeVector>;\n\n    HeaderList()\n        : pseudo_headers_end_(headers_.end()),\n          lazy_map_min_size_(static_cast<uint32_t>(Runtime::getInteger(\n              \"envoy.http.headermap.lazy_map_min_size\", std::numeric_limits<uint32_t>::max()))) {}\n\n    template <class Key> bool isPseudoHeader(const Key& key) {\n      return !key.getStringView().empty() && key.getStringView()[0] == ':';\n    }\n\n    template <class Key, class... Value> HeaderNode insert(Key&& key, Value&&... value) {\n      const bool is_pseudo_header = isPseudoHeader(key);\n      HeaderNode i = headers_.emplace(is_pseudo_header ? pseudo_headers_end_ : headers_.end(),\n                                      std::forward<Key>(key), std::forward<Value>(value)...);\n      if (!lazy_map_.empty()) {\n        lazy_map_[i->key().getStringView()].push_back(i);\n      }\n      if (!is_pseudo_header && pseudo_headers_end_ == headers_.end()) {\n        pseudo_headers_end_ = i;\n      }\n      return i;\n    }\n\n    HeaderNode erase(HeaderNode i, bool remove_from_map) {\n      if (pseudo_headers_end_ == i) {\n        pseudo_headers_end_++;\n      }\n      if (remove_from_map) {\n        lazy_map_.erase(i->key().getStringView());\n      }\n      return headers_.erase(i);\n    }\n\n    template <class UnaryPredicate> void removeIf(UnaryPredicate p) {\n      if (!lazy_map_.empty()) {\n        // Lazy map is used, iterate over its elements and remove those that satisfy the predicate\n        // from the map and from the list.\n        for (auto map_it = lazy_map_.begin(); map_it != lazy_map_.end();) {\n          auto& values_vec = map_it->second;\n          ASSERT(!values_vec.empty());\n          // The following call to std::remove_if removes the elements that satisfy the\n          // UnaryPredicate and shifts the vector elements, but does not resize the vector.\n          // The call to erase that follows erases the unneeded cells (from remove_pos to the\n          // end) and modifies the vector's size.\n          const auto remove_pos =\n              std::remove_if(values_vec.begin(), values_vec.end(), [&](HeaderNode it) {\n                if (p(*(it->entry_))) {\n                  // Remove the element from the list.\n                  if (pseudo_headers_end_ == it->entry_) {\n                    pseudo_headers_end_++;\n                  }\n                  headers_.erase(it);\n                  return true;\n                }\n                return false;\n              });\n          values_vec.erase(remove_pos, values_vec.end());\n\n          // If all elements were removed from the map entry, erase it.\n          if (values_vec.empty()) {\n            lazy_map_.erase(map_it++);\n          } else {\n            map_it++;\n          }\n        }\n      } else {\n        // The lazy map isn't used, iterate over the list elements and remove elements that satisfy\n        // the predicate.\n        headers_.remove_if([&](const HeaderEntryImpl& entry) {\n          const bool to_remove = p(entry);\n          if (to_remove) {\n            if (pseudo_headers_end_ == entry.entry_) {\n              pseudo_headers_end_++;\n            }\n          }\n          return to_remove;\n        });\n      }\n    }\n\n    /*\n     * Creates and populates a map if the number of headers is at least the\n     * envoy.http.headermap.lazy_map_min_size runtime feature value.\n     *\n     * @return if a map was created.\n     */\n    bool maybeMakeMap();\n\n    /*\n     * Removes a given key and its values from the HeaderList.\n     *\n     * @return the number of bytes that were removed.\n     */\n    size_t remove(absl::string_view key);\n\n    std::list<HeaderEntryImpl>::iterator begin() { return headers_.begin(); }\n    std::list<HeaderEntryImpl>::iterator end() { return headers_.end(); }\n    std::list<HeaderEntryImpl>::const_iterator begin() const { return headers_.begin(); }\n    std::list<HeaderEntryImpl>::const_iterator end() const { return headers_.end(); }\n    std::list<HeaderEntryImpl>::const_reverse_iterator rbegin() const { return headers_.rbegin(); }\n    std::list<HeaderEntryImpl>::const_reverse_iterator rend() const { return headers_.rend(); }\n    HeaderLazyMap::iterator mapFind(absl::string_view key) { return lazy_map_.find(key); }\n    HeaderLazyMap::iterator mapEnd() { return lazy_map_.end(); }\n    size_t size() const { return headers_.size(); }\n    bool empty() const { return headers_.empty(); }\n    void clear() {\n      headers_.clear();\n      pseudo_headers_end_ = headers_.end();\n      lazy_map_.clear();\n    }\n\n  private:\n    std::list<HeaderEntryImpl> headers_;\n    HeaderNode pseudo_headers_end_;\n    // The number of headers threshold for lazy map usage.\n    const uint32_t lazy_map_min_size_;\n    HeaderLazyMap lazy_map_;\n  };\n\n  void insertByKey(HeaderString&& key, HeaderString&& value);\n  static uint64_t appendToHeader(HeaderString& header, absl::string_view data,\n                                 absl::string_view delimiter = \",\");\n  HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key);\n  HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key,\n                                     HeaderString&& value);\n  HeaderMap::NonConstGetResult getExisting(const LowerCaseString& key);\n  size_t removeInline(HeaderEntryImpl** entry);\n  void updateSize(uint64_t from_size, uint64_t to_size);\n  void addSize(uint64_t size);\n  void subtractSize(uint64_t size);\n  virtual absl::optional<StaticLookupResponse> staticLookup(absl::string_view) PURE;\n  virtual void clearInline() PURE;\n  virtual HeaderEntryImpl** inlineHeaders() PURE;\n\n  HeaderList headers_;\n  // This holds the internal byte size of the HeaderMap.\n  uint64_t cached_byte_size_ = 0;\n};\n\n/**\n * Typed derived classes for all header map types. This class implements the actual typed\n * interface and for the majority of methods just passes through to the HeaderMapImpl mix-in. Per\n * above, this avoids virtual inheritance.\n */\ntemplate <class Interface> class TypedHeaderMapImpl : public HeaderMapImpl, public Interface {\npublic:\n  // Implementation of Http::HeaderMap that passes through to HeaderMapImpl.\n  bool operator==(const HeaderMap& rhs) const override { return HeaderMapImpl::operator==(rhs); }\n  bool operator!=(const HeaderMap& rhs) const override { return HeaderMapImpl::operator!=(rhs); }\n  void addViaMove(HeaderString&& key, HeaderString&& value) override {\n    HeaderMapImpl::addViaMove(std::move(key), std::move(value));\n  }\n  void addReference(const LowerCaseString& key, absl::string_view value) override {\n    HeaderMapImpl::addReference(key, value);\n  }\n  void addReferenceKey(const LowerCaseString& key, uint64_t value) override {\n    HeaderMapImpl::addReferenceKey(key, value);\n  }\n  void addReferenceKey(const LowerCaseString& key, absl::string_view value) override {\n    HeaderMapImpl::addReferenceKey(key, value);\n  }\n  void addCopy(const LowerCaseString& key, uint64_t value) override {\n    HeaderMapImpl::addCopy(key, value);\n  }\n  void addCopy(const LowerCaseString& key, absl::string_view value) override {\n    HeaderMapImpl::addCopy(key, value);\n  }\n  void appendCopy(const LowerCaseString& key, absl::string_view value) override {\n    HeaderMapImpl::appendCopy(key, value);\n  }\n  void setReference(const LowerCaseString& key, absl::string_view value) override {\n    HeaderMapImpl::setReference(key, value);\n  }\n  void setReferenceKey(const LowerCaseString& key, absl::string_view value) override {\n    HeaderMapImpl::setReferenceKey(key, value);\n  }\n  void setCopy(const LowerCaseString& key, absl::string_view value) override {\n    HeaderMapImpl::setCopy(key, value);\n  }\n  uint64_t byteSize() const override { return HeaderMapImpl::byteSize(); }\n  const HeaderEntry* get(const LowerCaseString& key) const override {\n    return HeaderMapImpl::get(key);\n  }\n  HeaderMap::GetResult getAll(const LowerCaseString& key) const override {\n    return HeaderMapImpl::getAll(key);\n  }\n  void iterate(HeaderMap::ConstIterateCb cb) const override { HeaderMapImpl::iterate(cb); }\n  void iterateReverse(HeaderMap::ConstIterateCb cb) const override {\n    HeaderMapImpl::iterateReverse(cb);\n  }\n  void clear() override { HeaderMapImpl::clear(); }\n  size_t remove(const LowerCaseString& key) override { return HeaderMapImpl::remove(key); }\n  size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate) override {\n    return HeaderMapImpl::removeIf(predicate);\n  }\n  size_t removePrefix(const LowerCaseString& key) override {\n    return HeaderMapImpl::removePrefix(key);\n  }\n  size_t size() const override { return HeaderMapImpl::size(); }\n  bool empty() const override { return HeaderMapImpl::empty(); }\n  void dumpState(std::ostream& os, int indent_level = 0) const override {\n    HeaderMapImpl::dumpState(os, indent_level);\n  }\n\n  // Generic custom header functions for each fully typed interface. To avoid accidental issues,\n  // the Handle type is different for each interface, which is why these functions live here vs.\n  // inside HeaderMapImpl.\n  using Handle = CustomInlineHeaderRegistry::Handle<Interface::header_map_type>;\n  const HeaderEntry* getInline(Handle handle) const override {\n    ASSERT(handle.it_->second < inlineHeadersSize());\n    return constInlineHeaders()[handle.it_->second];\n  }\n  void appendInline(Handle handle, absl::string_view data, absl::string_view delimiter) override {\n    ASSERT(handle.it_->second < inlineHeadersSize());\n    HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first);\n    addSize(HeaderMapImpl::appendToHeader(entry.value(), data, delimiter));\n  }\n  void setReferenceInline(Handle handle, absl::string_view value) override {\n    ASSERT(handle.it_->second < inlineHeadersSize());\n    HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first);\n    updateSize(entry.value().size(), value.size());\n    entry.value().setReference(value);\n  }\n  void setInline(Handle handle, absl::string_view value) override {\n    ASSERT(handle.it_->second < inlineHeadersSize());\n    HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first);\n    updateSize(entry.value().size(), value.size());\n    entry.value().setCopy(value);\n  }\n  void setInline(Handle handle, uint64_t value) override {\n    ASSERT(handle.it_->second < inlineHeadersSize());\n    HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first);\n    subtractSize(entry.value().size());\n    entry.value().setInteger(value);\n    addSize(entry.value().size());\n  }\n  size_t removeInline(Handle handle) override {\n    ASSERT(handle.it_->second < inlineHeadersSize());\n    return HeaderMapImpl::removeInline(&inlineHeaders()[handle.it_->second]);\n  }\n  static size_t inlineHeadersSize() {\n    return StaticLookupTable<Interface>::size() * sizeof(HeaderEntryImpl*);\n  }\n\nprotected:\n  absl::optional<StaticLookupResponse> staticLookup(absl::string_view key) override {\n    return StaticLookupTable<Interface>::lookup(*this, key);\n  }\n  virtual const HeaderEntryImpl* const* constInlineHeaders() const PURE;\n};\n\n#define DEFINE_HEADER_HANDLE(name)                                                                 \\\n  Handle name =                                                                                    \\\n      CustomInlineHeaderRegistry::getInlineHeader<header_map_type>(Headers::get().name).value();\n\n/**\n * Concrete implementation of RequestHeaderMap which allows for variable custom registered inline\n * headers.\n */\nclass RequestHeaderMapImpl final : public TypedHeaderMapImpl<RequestHeaderMap>,\n                                   public InlineStorage {\npublic:\n  static std::unique_ptr<RequestHeaderMapImpl> create() {\n    return std::unique_ptr<RequestHeaderMapImpl>(new (inlineHeadersSize()) RequestHeaderMapImpl());\n  }\n\n  INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER_FUNCS)\n  INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS)\n\nprotected:\n  // NOTE: Because inline_headers_ is a variable size member, it must be the last member in the\n  // most derived class. This forces the definition of the following three functions to also be\n  // in the most derived class and thus duplicated. There may be a way to consolidate thus but it's\n  // not clear and can be deferred for now.\n  void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); }\n  const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; }\n  HeaderEntryImpl** inlineHeaders() override { return inline_headers_; }\n\nprivate:\n  struct HeaderHandleValues {\n    INLINE_REQ_HEADERS(DEFINE_HEADER_HANDLE)\n    INLINE_REQ_RESP_HEADERS(DEFINE_HEADER_HANDLE)\n  };\n\n  using HeaderHandles = ConstSingleton<HeaderHandleValues>;\n\n  RequestHeaderMapImpl() { clearInline(); }\n\n  HeaderEntryImpl* inline_headers_[];\n};\n\n/**\n * Concrete implementation of RequestTrailerMap which allows for variable custom registered inline\n * headers.\n */\nclass RequestTrailerMapImpl final : public TypedHeaderMapImpl<RequestTrailerMap>,\n                                    public InlineStorage {\npublic:\n  static std::unique_ptr<RequestTrailerMapImpl> create() {\n    return std::unique_ptr<RequestTrailerMapImpl>(new (inlineHeadersSize())\n                                                      RequestTrailerMapImpl());\n  }\n\nprotected:\n  // See comment in RequestHeaderMapImpl.\n  void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); }\n  const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; }\n  HeaderEntryImpl** inlineHeaders() override { return inline_headers_; }\n\nprivate:\n  RequestTrailerMapImpl() { clearInline(); }\n\n  HeaderEntryImpl* inline_headers_[];\n};\n\n/**\n * Concrete implementation of ResponseHeaderMap which allows for variable custom registered inline\n * headers.\n */\nclass ResponseHeaderMapImpl final : public TypedHeaderMapImpl<ResponseHeaderMap>,\n                                    public InlineStorage {\npublic:\n  static std::unique_ptr<ResponseHeaderMapImpl> create() {\n    return std::unique_ptr<ResponseHeaderMapImpl>(new (inlineHeadersSize())\n                                                      ResponseHeaderMapImpl());\n  }\n\n  INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS)\n  INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS)\n  INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_FUNCS)\n\nprotected:\n  // See comment in RequestHeaderMapImpl.\n  void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); }\n  const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; }\n  HeaderEntryImpl** inlineHeaders() override { return inline_headers_; }\n\nprivate:\n  struct HeaderHandleValues {\n    INLINE_RESP_HEADERS(DEFINE_HEADER_HANDLE)\n    INLINE_REQ_RESP_HEADERS(DEFINE_HEADER_HANDLE)\n    INLINE_RESP_HEADERS_TRAILERS(DEFINE_HEADER_HANDLE)\n  };\n\n  using HeaderHandles = ConstSingleton<HeaderHandleValues>;\n\n  ResponseHeaderMapImpl() { clearInline(); }\n\n  HeaderEntryImpl* inline_headers_[];\n};\n\n/**\n * Concrete implementation of ResponseTrailerMap which allows for variable custom registered\n * inline headers.\n */\nclass ResponseTrailerMapImpl final : public TypedHeaderMapImpl<ResponseTrailerMap>,\n                                     public InlineStorage {\npublic:\n  static std::unique_ptr<ResponseTrailerMapImpl> create() {\n    return std::unique_ptr<ResponseTrailerMapImpl>(new (inlineHeadersSize())\n                                                       ResponseTrailerMapImpl());\n  }\n\n  INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_FUNCS)\n\nprotected:\n  // See comment in RequestHeaderMapImpl.\n  void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); }\n  const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; }\n  HeaderEntryImpl** inlineHeaders() override { return inline_headers_; }\n\nprivate:\n  struct HeaderHandleValues {\n    INLINE_RESP_HEADERS_TRAILERS(DEFINE_HEADER_HANDLE)\n  };\n\n  using HeaderHandles = ConstSingleton<HeaderHandleValues>;\n\n  ResponseTrailerMapImpl() { clearInline(); }\n\n  HeaderEntryImpl* inline_headers_[];\n};\n\ntemplate <class T>\nstd::unique_ptr<T>\ncreateHeaderMap(const std::initializer_list<std::pair<LowerCaseString, std::string>>& values) {\n  auto new_header_map = T::create();\n  HeaderMapImpl::initFromInitList(*new_header_map, values.begin(), values.end());\n  return new_header_map;\n}\n\ntemplate <class T, class It> std::unique_ptr<T> createHeaderMap(It begin, It end) {\n  auto new_header_map = T::create();\n  HeaderMapImpl::initFromInitList(*new_header_map, begin, end);\n  return new_header_map;\n}\n\ntemplate <class T> std::unique_ptr<T> createHeaderMap(const HeaderMap& rhs) {\n  // TODO(mattklein123): Use of this function allows copying a request header map into a response\n  // header map, etc. which is probably not what we want. Unfortunately, we do this on purpose in\n  // a few places when dealing with gRPC headers/trailers conversions so it's not trivial to remove.\n  // We should revisit this to figure how to make this a bit safer as a non-intentional conversion\n  // may have surprising results with different O(1) headers, implementations, etc.\n  auto new_header_map = T::create();\n  HeaderMapImpl::copyFrom(*new_header_map, rhs);\n  return new_header_map;\n}\n\nstruct EmptyHeaders {\n  RequestHeaderMapPtr request_headers = RequestHeaderMapImpl::create();\n  ResponseHeaderMapPtr response_headers = ResponseHeaderMapImpl::create();\n  ResponseTrailerMapPtr response_trailers = ResponseTrailerMapImpl::create();\n};\n\nusing StaticEmptyHeaders = ConstSingleton<EmptyHeaders>;\n\nclass HeaderMapImplUtility {\npublic:\n  struct HeaderMapImplInfo {\n    // Human readable name for the header map used in info logging.\n    std::string name_;\n    // The byte size of the header map including both fixed space as well as variable space used\n    // by the registered custom headers.\n    size_t size_;\n    // All registered custom headers for the header map.\n    std::vector<std::string> registered_headers_;\n  };\n\n  /**\n   * Fetch detailed information about each header map implementation for use in logging.\n   */\n  static std::vector<HeaderMapImplInfo> getAllHeaderMapImplInfo();\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/header_utility.cc",
    "content": "#include \"common/http/header_utility.h\"\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/common/regex.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/strings/match.h\"\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nstruct SharedResponseCodeDetailsValues {\n  const absl::string_view InvalidAuthority = \"http.invalid_authority\";\n  const absl::string_view ConnectUnsupported = \"http.connect_not_supported\";\n};\n\nusing SharedResponseCodeDetails = ConstSingleton<SharedResponseCodeDetailsValues>;\n\n// HeaderMatcher will consist of:\n//   header_match_specifier which can be any one of exact_match, regex_match, range_match,\n//   present_match, prefix_match or suffix_match.\n//   Each of these also can be inverted with the invert_match option.\n//   Absence of these options implies empty header value match based on header presence.\n//   a.exact_match: value will be used for exact string matching.\n//   b.regex_match: Match will succeed if header value matches the value specified here.\n//   c.range_match: Match will succeed if header value lies within the range specified\n//     here, using half open interval semantics [start,end).\n//   d.present_match: Match will succeed if the header is present.\n//   f.prefix_match: Match will succeed if header value matches the prefix value specified here.\n//   g.suffix_match: Match will succeed if header value matches the suffix value specified here.\nHeaderUtility::HeaderData::HeaderData(const envoy::config::route::v3::HeaderMatcher& config)\n    : name_(config.name()), invert_match_(config.invert_match()) {\n  switch (config.header_match_specifier_case()) {\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kExactMatch:\n    header_match_type_ = HeaderMatchType::Value;\n    value_ = config.exact_match();\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::\n      kHiddenEnvoyDeprecatedRegexMatch:\n    header_match_type_ = HeaderMatchType::Regex;\n    regex_ = Regex::Utility::parseStdRegexAsCompiledMatcher(\n        config.hidden_envoy_deprecated_regex_match());\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kSafeRegexMatch:\n    header_match_type_ = HeaderMatchType::Regex;\n    regex_ = Regex::Utility::parseRegex(config.safe_regex_match());\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kRangeMatch:\n    header_match_type_ = HeaderMatchType::Range;\n    range_.set_start(config.range_match().start());\n    range_.set_end(config.range_match().end());\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kPresentMatch:\n    header_match_type_ = HeaderMatchType::Present;\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kPrefixMatch:\n    header_match_type_ = HeaderMatchType::Prefix;\n    value_ = config.prefix_match();\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kSuffixMatch:\n    header_match_type_ = HeaderMatchType::Suffix;\n    value_ = config.suffix_match();\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kContainsMatch:\n    header_match_type_ = HeaderMatchType::Contains;\n    value_ = config.contains_match();\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::\n      HEADER_MATCH_SPECIFIER_NOT_SET:\n    FALLTHRU;\n  default:\n    header_match_type_ = HeaderMatchType::Present;\n    break;\n  }\n}\n\nvoid HeaderUtility::getAllOfHeader(const HeaderMap& headers, absl::string_view key,\n                                   std::vector<absl::string_view>& out) {\n  headers.iterate([key = LowerCaseString(std::string(key)),\n                   &out](const HeaderEntry& header) -> HeaderMap::Iterate {\n    if (header.key() == key.get().c_str()) {\n      out.emplace_back(header.value().getStringView());\n    }\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\nbool HeaderUtility::matchHeaders(const HeaderMap& request_headers,\n                                 const std::vector<HeaderDataPtr>& config_headers) {\n  // No headers to match is considered a match.\n  if (!config_headers.empty()) {\n    for (const HeaderDataPtr& cfg_header_data : config_headers) {\n      if (!matchHeaders(request_headers, *cfg_header_data)) {\n        return false;\n      }\n    }\n  }\n\n  return true;\n}\n\nHeaderUtility::GetAllOfHeaderAsStringResult\nHeaderUtility::getAllOfHeaderAsString(const HeaderMap& headers, const Http::LowerCaseString& key) {\n  GetAllOfHeaderAsStringResult result;\n  const auto header_value = headers.getAll(key);\n\n  if (header_value.empty()) {\n    // Empty for clarity. Avoid handling the empty case in the block below if the runtime feature\n    // is disabled.\n  } else if (header_value.size() == 1 ||\n             !Runtime::runtimeFeatureEnabled(\n                 \"envoy.reloadable_features.http_match_on_all_headers\")) {\n    result.result_ = header_value[0]->value().getStringView();\n  } else {\n    // In this case we concatenate all found headers using a ',' delimiter before performing the\n    // final match. We use an InlinedVector of absl::string_view to invoke the optimized join\n    // algorithm. This requires a copying phase before we invoke join. The 3 used as the inline\n    // size has been arbitrarily chosen.\n    // TODO(mattklein123): Do we need to normalize any whitespace here?\n    absl::InlinedVector<absl::string_view, 3> string_view_vector;\n    string_view_vector.reserve(header_value.size());\n    for (size_t i = 0; i < header_value.size(); i++) {\n      string_view_vector.push_back(header_value[i]->value().getStringView());\n    }\n    result.result_backing_string_ = absl::StrJoin(string_view_vector, \",\");\n  }\n\n  return result;\n}\n\nbool HeaderUtility::matchHeaders(const HeaderMap& request_headers, const HeaderData& header_data) {\n  const auto header_value = getAllOfHeaderAsString(request_headers, header_data.name_);\n\n  if (!header_value.result().has_value()) {\n    return header_data.invert_match_ && header_data.header_match_type_ == HeaderMatchType::Present;\n  }\n\n  bool match;\n  switch (header_data.header_match_type_) {\n  case HeaderMatchType::Value:\n    match = header_data.value_.empty() || header_value.result().value() == header_data.value_;\n    break;\n  case HeaderMatchType::Regex:\n    match = header_data.regex_->match(header_value.result().value());\n    break;\n  case HeaderMatchType::Range: {\n    int64_t header_int_value = 0;\n    match = absl::SimpleAtoi(header_value.result().value(), &header_int_value) &&\n            header_int_value >= header_data.range_.start() &&\n            header_int_value < header_data.range_.end();\n    break;\n  }\n  case HeaderMatchType::Present:\n    match = true;\n    break;\n  case HeaderMatchType::Prefix:\n    match = absl::StartsWith(header_value.result().value(), header_data.value_);\n    break;\n  case HeaderMatchType::Suffix:\n    match = absl::EndsWith(header_value.result().value(), header_data.value_);\n    break;\n  case HeaderMatchType::Contains:\n    match = absl::StrContains(header_value.result().value(), header_data.value_);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  return match != header_data.invert_match_;\n}\n\nbool HeaderUtility::headerValueIsValid(const absl::string_view header_value) {\n  return nghttp2_check_header_value(reinterpret_cast<const uint8_t*>(header_value.data()),\n                                    header_value.size()) != 0;\n}\n\nbool HeaderUtility::headerNameContainsUnderscore(const absl::string_view header_name) {\n  return header_name.find('_') != absl::string_view::npos;\n}\n\nbool HeaderUtility::authorityIsValid(const absl::string_view header_value) {\n  return nghttp2_check_authority(reinterpret_cast<const uint8_t*>(header_value.data()),\n                                 header_value.size()) != 0;\n}\n\nbool HeaderUtility::isConnect(const RequestHeaderMap& headers) {\n  return headers.Method() && headers.Method()->value() == Http::Headers::get().MethodValues.Connect;\n}\n\nbool HeaderUtility::isConnectResponse(const RequestHeaderMap* request_headers,\n                                      const ResponseHeaderMap& response_headers) {\n  return request_headers && isConnect(*request_headers) &&\n         static_cast<Http::Code>(Http::Utility::getResponseStatus(response_headers)) ==\n             Http::Code::OK;\n}\n\nvoid HeaderUtility::addHeaders(HeaderMap& headers, const HeaderMap& headers_to_add) {\n  headers_to_add.iterate([&headers](const HeaderEntry& header) -> HeaderMap::Iterate {\n    HeaderString k;\n    k.setCopy(header.key().getStringView());\n    HeaderString v;\n    v.setCopy(header.value().getStringView());\n    headers.addViaMove(std::move(k), std::move(v));\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\nbool HeaderUtility::isEnvoyInternalRequest(const RequestHeaderMap& headers) {\n  const HeaderEntry* internal_request_header = headers.EnvoyInternalRequest();\n  return internal_request_header != nullptr &&\n         internal_request_header->value() == Headers::get().EnvoyInternalRequestValues.True;\n}\n\nvoid HeaderUtility::stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port) {\n\n  if (headers.getMethodValue() == Http::Headers::get().MethodValues.Connect) {\n    // According to RFC 2817 Connect method should have port part in host header.\n    // In this case we won't strip it even if configured to do so.\n    return;\n  }\n  const absl::string_view original_host = headers.getHostValue();\n  const absl::string_view::size_type port_start = original_host.rfind(':');\n  if (port_start == absl::string_view::npos) {\n    return;\n  }\n  // According to RFC3986 v6 address is always enclosed in \"[]\". section 3.2.2.\n  const auto v6_end_index = original_host.rfind(\"]\");\n  if (v6_end_index == absl::string_view::npos || v6_end_index < port_start) {\n    if ((port_start + 1) > original_host.size()) {\n      return;\n    }\n    const absl::string_view port_str = original_host.substr(port_start + 1);\n    uint32_t port = 0;\n    if (!absl::SimpleAtoi(port_str, &port)) {\n      return;\n    }\n    if (port != listener_port) {\n      // We would strip ports only if they are the same, as local port of the listener.\n      return;\n    }\n    const absl::string_view host = original_host.substr(0, port_start);\n    headers.setHost(host);\n  }\n}\n\nabsl::optional<std::reference_wrapper<const absl::string_view>>\nHeaderUtility::requestHeadersValid(const RequestHeaderMap& headers) {\n  // Make sure the host is valid.\n  if (headers.Host() && !HeaderUtility::authorityIsValid(headers.Host()->value().getStringView())) {\n    return SharedResponseCodeDetails::get().InvalidAuthority;\n  }\n  return absl::nullopt;\n}\n\nbool HeaderUtility::shouldCloseConnection(Http::Protocol protocol,\n                                          const RequestOrResponseHeaderMap& headers) {\n  // HTTP/1.0 defaults to single-use connections. Make sure the connection will be closed unless\n  // Keep-Alive is present.\n  if (protocol == Protocol::Http10 &&\n      (!headers.Connection() ||\n       !Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), \",\",\n                                         Http::Headers::get().ConnectionValues.KeepAlive))) {\n    return true;\n  }\n\n  if (protocol == Protocol::Http11 && headers.Connection() &&\n      Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), \",\",\n                                       Http::Headers::get().ConnectionValues.Close)) {\n    return true;\n  }\n\n  // Note: Proxy-Connection is not a standard header, but is supported here\n  // since it is supported by http-parser the underlying parser for http\n  // requests.\n  if (protocol < Protocol::Http2 && headers.ProxyConnection() &&\n      Envoy::StringUtil::caseFindToken(headers.ProxyConnection()->value().getStringView(), \",\",\n                                       Http::Headers::get().ConnectionValues.Close)) {\n    return true;\n  }\n  return false;\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/header_utility.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/common/regex.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/protocol.h\"\n#include \"envoy/type/v3/range.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Classes and methods for manipulating and checking HTTP headers.\n */\nclass HeaderUtility {\npublic:\n  enum class HeaderMatchType { Value, Regex, Range, Present, Prefix, Suffix, Contains };\n\n  /**\n   * Get all instances of the header key specified, and return the values in the vector provided.\n   *\n   * This should not be used for inline headers, as it turns a constant time lookup into O(n).\n   *\n   * @param headers the headers to return keys from\n   * @param key the header key to return values for\n   * @param out the vector to return values in\n   */\n  static void getAllOfHeader(const HeaderMap& headers, absl::string_view key,\n                             std::vector<absl::string_view>& out);\n\n  /**\n   * Get all header values as a single string. Multiple headers are concatenated with ','.\n   */\n  class GetAllOfHeaderAsStringResult {\n  public:\n    // The ultimate result of the concatenation. If absl::nullopt, no header values were found.\n    // If the final string required a string allocation, the memory is held in\n    // backingString(). This allows zero allocation in the common case of a single header\n    // value.\n    absl::optional<absl::string_view> result() const {\n      // This is safe for move/copy of this class as the backing string will be moved or copied.\n      // Otherwise result_ is valid. The assert verifies that both are empty or only 1 is set.\n      ASSERT((!result_.has_value() && result_backing_string_.empty()) ||\n             (result_.has_value() ^ !result_backing_string_.empty()));\n      return !result_backing_string_.empty() ? result_backing_string_ : result_;\n    }\n\n    const std::string& backingString() const { return result_backing_string_; }\n\n  private:\n    absl::optional<absl::string_view> result_;\n    // Valid only if result_ relies on memory allocation that must live beyond the call. See above.\n    std::string result_backing_string_;\n\n    friend class HeaderUtility;\n  };\n  static GetAllOfHeaderAsStringResult getAllOfHeaderAsString(const HeaderMap& headers,\n                                                             const Http::LowerCaseString& key);\n\n  // A HeaderData specifies one of exact value or regex or range element\n  // to match in a request's header, specified in the header_match_type_ member.\n  // It is the runtime equivalent of the HeaderMatchSpecifier proto in RDS API.\n  struct HeaderData : public HeaderMatcher {\n    HeaderData(const envoy::config::route::v3::HeaderMatcher& config);\n\n    const LowerCaseString name_;\n    HeaderMatchType header_match_type_;\n    std::string value_;\n    Regex::CompiledMatcherPtr regex_;\n    envoy::type::v3::Int64Range range_;\n    const bool invert_match_;\n\n    // HeaderMatcher\n    bool matchesHeaders(const HeaderMap& headers) const override {\n      return HeaderUtility::matchHeaders(headers, *this);\n    };\n  };\n\n  using HeaderDataPtr = std::unique_ptr<HeaderData>;\n\n  /**\n   * Build a vector of HeaderDataPtr given input config.\n   */\n  static std::vector<HeaderUtility::HeaderDataPtr> buildHeaderDataVector(\n      const Protobuf::RepeatedPtrField<envoy::config::route::v3::HeaderMatcher>& header_matchers) {\n    std::vector<HeaderUtility::HeaderDataPtr> ret;\n    for (const auto& header_matcher : header_matchers) {\n      ret.emplace_back(std::make_unique<HeaderUtility::HeaderData>(header_matcher));\n    }\n    return ret;\n  }\n\n  /**\n   * Build a vector of HeaderMatcherSharedPtr given input config.\n   */\n  static std::vector<Http::HeaderMatcherSharedPtr> buildHeaderMatcherVector(\n      const Protobuf::RepeatedPtrField<envoy::config::route::v3::HeaderMatcher>& header_matchers) {\n    std::vector<Http::HeaderMatcherSharedPtr> ret;\n    for (const auto& header_matcher : header_matchers) {\n      ret.emplace_back(std::make_shared<HeaderUtility::HeaderData>(header_matcher));\n    }\n    return ret;\n  }\n\n  /**\n   * See if the headers specified in the config are present in a request.\n   * @param request_headers supplies the headers from the request.\n   * @param config_headers supplies the list of configured header conditions on which to match.\n   * @return bool true if all the headers (and values) in the config_headers are found in the\n   *         request_headers. If no config_headers are specified, returns true.\n   */\n  static bool matchHeaders(const HeaderMap& request_headers,\n                           const std::vector<HeaderDataPtr>& config_headers);\n\n  static bool matchHeaders(const HeaderMap& request_headers, const HeaderData& config_header);\n\n  /**\n   * Validates that a header value is valid, according to RFC 7230, section 3.2.\n   * http://tools.ietf.org/html/rfc7230#section-3.2\n   * @return bool true if the header values are valid, according to the aforementioned RFC.\n   */\n  static bool headerValueIsValid(const absl::string_view header_value);\n\n  /**\n   * Checks if header name contains underscore characters.\n   * Underscore character is allowed in header names by the RFC-7230 and this check is implemented\n   * as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by\n   * default allows headers with underscore characters.\n   * @return bool true if header name contains underscore characters.\n   */\n  static bool headerNameContainsUnderscore(const absl::string_view header_name);\n\n  /**\n   * Validates that the characters in the authority are valid.\n   * @return bool true if the header values are valid, false otherwise.\n   */\n  static bool authorityIsValid(const absl::string_view authority_value);\n\n  /**\n   * @brief a helper function to determine if the headers represent a CONNECT request.\n   */\n  static bool isConnect(const RequestHeaderMap& headers);\n\n  /**\n   * @brief a helper function to determine if the headers represent an accepted CONNECT response.\n   */\n  static bool isConnectResponse(const RequestHeaderMap* request_headers,\n                                const ResponseHeaderMap& response_headers);\n\n  /**\n   * Add headers from one HeaderMap to another\n   * @param headers target where headers will be added\n   * @param headers_to_add supplies the headers to be added\n   */\n  static void addHeaders(HeaderMap& headers, const HeaderMap& headers_to_add);\n\n  /**\n   * @brief a helper function to determine if the headers represent an envoy internal request\n   */\n  static bool isEnvoyInternalRequest(const RequestHeaderMap& headers);\n\n  /**\n   * Determines if request headers pass Envoy validity checks.\n   * @param headers to validate\n   * @return details of the error if an error is present, otherwise absl::nullopt\n   */\n  static absl::optional<std::reference_wrapper<const absl::string_view>>\n  requestHeadersValid(const RequestHeaderMap& headers);\n\n  /**\n   * Determines if the response should be framed by Connection: Close based on protocol\n   * and headers.\n   * @param protocol the protocol of the request\n   * @param headers the request or response headers\n   * @return if the response should be framed by Connection: Close\n   */\n  static bool shouldCloseConnection(Http::Protocol protocol,\n                                    const RequestOrResponseHeaderMap& headers);\n\n  /**\n   * @brief Remove the port part from host/authority header if it is equal to provided port\n   */\n  static void stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port);\n};\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/headers.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/singleton/const_singleton.h\"\n#include \"common/singleton/threadsafe_singleton.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n// This class allows early override of the x-envoy prefix from bootstrap config,\n// so that servers can configure their own x-custom-string prefix.\n//\n// Once the HeaderValues const singleton has been created, changing the prefix\n// is disallowed. Essentially this is write-once then read-only.\nclass PrefixValue {\npublic:\n  const char* prefix() {\n    absl::WriterMutexLock lock(&m_);\n    read_ = true;\n    return prefix_.c_str();\n  }\n\n  // The char* prefix is used directly, so must be available for the interval where prefix() may be\n  // called.\n  void setPrefix(const char* prefix) {\n    absl::WriterMutexLock lock(&m_);\n    // The check for unchanged string is purely for integration tests - this\n    // should not happen in production.\n    RELEASE_ASSERT(!read_ || prefix_ == std::string(prefix),\n                   \"Attempting to change the header prefix after it has been used!\");\n    if (!read_) {\n      prefix_ = prefix;\n    }\n  }\n\nprivate:\n  absl::Mutex m_;\n  bool read_ = false;\n  std::string prefix_ = \"x-envoy\";\n};\n\n/**\n * These are headers that are used in extension custom O(1) header registration. These headers\n * *must* not contain any prefix override headers, as static init order requires that HeaderValues\n * be instantiated for the first time after bootstrap is loaded and before the header maps are\n * finalized.\n */\nclass CustomHeaderValues {\npublic:\n  const LowerCaseString Accept{\"accept\"};\n  const LowerCaseString AcceptEncoding{\"accept-encoding\"};\n  const LowerCaseString AccessControlRequestMethod{\"access-control-request-method\"};\n  const LowerCaseString AccessControlAllowOrigin{\"access-control-allow-origin\"};\n  const LowerCaseString AccessControlAllowHeaders{\"access-control-allow-headers\"};\n  const LowerCaseString AccessControlAllowMethods{\"access-control-allow-methods\"};\n  const LowerCaseString AccessControlExposeHeaders{\"access-control-expose-headers\"};\n  const LowerCaseString AccessControlMaxAge{\"access-control-max-age\"};\n  const LowerCaseString AccessControlAllowCredentials{\"access-control-allow-credentials\"};\n  const LowerCaseString Authorization{\"authorization\"};\n  const LowerCaseString CacheControl{\"cache-control\"};\n  const LowerCaseString CdnLoop{\"cdn-loop\"};\n  const LowerCaseString ContentEncoding{\"content-encoding\"};\n  const LowerCaseString Etag{\"etag\"};\n  const LowerCaseString GrpcAcceptEncoding{\"grpc-accept-encoding\"};\n  const LowerCaseString IfMatch{\"if-match\"};\n  const LowerCaseString IfNoneMatch{\"if-none-match\"};\n  const LowerCaseString IfModifiedSince{\"if-modified-since\"};\n  const LowerCaseString IfUnmodifiedSince{\"if-unmodified-since\"};\n  const LowerCaseString IfRange{\"if-range\"};\n  const LowerCaseString LastModified{\"last-modified\"};\n  const LowerCaseString Origin{\"origin\"};\n  const LowerCaseString OtSpanContext{\"x-ot-span-context\"};\n  const LowerCaseString Pragma{\"pragma\"};\n  const LowerCaseString Referer{\"referer\"};\n  const LowerCaseString Vary{\"vary\"};\n\n  struct {\n    const std::string Gzip{\"gzip\"};\n    const std::string Identity{\"identity\"};\n    const std::string Wildcard{\"*\"};\n  } AcceptEncodingValues;\n\n  struct {\n    const std::string All{\"*\"};\n  } AccessControlAllowOriginValue;\n\n  struct {\n    const std::string NoCache{\"no-cache\"};\n    const std::string NoCacheMaxAge0{\"no-cache, max-age=0\"};\n    const std::string NoTransform{\"no-transform\"};\n    const std::string Private{\"private\"};\n  } CacheControlValues;\n\n  struct {\n    const std::string Gzip{\"gzip\"};\n  } ContentEncodingValues;\n\n  struct {\n    const std::string True{\"true\"};\n  } CORSValues;\n\n  struct {\n    const std::string Default{\"identity,deflate,gzip\"};\n  } GrpcAcceptEncodingValues;\n\n  struct {\n    const std::string AcceptEncoding{\"Accept-Encoding\"};\n    const std::string Wildcard{\"*\"};\n  } VaryValues;\n};\n\nusing CustomHeaders = ConstSingleton<CustomHeaderValues>;\n\n/**\n * Constant HTTP headers and values. All lower case. This group of headers can contain prefix\n * override headers.\n */\nclass HeaderValues {\npublic:\n  const char* prefix() const { return ThreadSafeSingleton<PrefixValue>::get().prefix(); }\n\n  const LowerCaseString Age{\"age\"};\n  const LowerCaseString ProxyAuthenticate{\"proxy-authenticate\"};\n  const LowerCaseString ProxyAuthorization{\"proxy-authorization\"};\n  const LowerCaseString ClientTraceId{\"x-client-trace-id\"};\n  const LowerCaseString Connection{\"connection\"};\n  const LowerCaseString ContentLength{\"content-length\"};\n  const LowerCaseString ContentRange{\"content-range\"};\n  const LowerCaseString ContentType{\"content-type\"};\n  const LowerCaseString Cookie{\"cookie\"};\n  const LowerCaseString Date{\"date\"};\n  const LowerCaseString EnvoyAttemptCount{absl::StrCat(prefix(), \"-attempt-count\")};\n  const LowerCaseString EnvoyCluster{absl::StrCat(prefix(), \"-cluster\")};\n  const LowerCaseString EnvoyDegraded{absl::StrCat(prefix(), \"-degraded\")};\n  const LowerCaseString EnvoyDownstreamServiceCluster{\n      absl::StrCat(prefix(), \"-downstream-service-cluster\")};\n  const LowerCaseString EnvoyDownstreamServiceNode{\n      absl::StrCat(prefix(), \"-downstream-service-node\")};\n  const LowerCaseString EnvoyExternalAddress{absl::StrCat(prefix(), \"-external-address\")};\n  const LowerCaseString EnvoyForceTrace{absl::StrCat(prefix(), \"-force-trace\")};\n  const LowerCaseString EnvoyHedgeOnPerTryTimeout{\n      absl::StrCat(prefix(), \"-hedge-on-per-try-timeout\")};\n  const LowerCaseString EnvoyImmediateHealthCheckFail{\n      absl::StrCat(prefix(), \"-immediate-health-check-fail\")};\n  const LowerCaseString EnvoyOriginalUrl{absl::StrCat(prefix(), \"-original-url\")};\n  const LowerCaseString EnvoyInternalRequest{absl::StrCat(prefix(), \"-internal\")};\n  // TODO(mattklein123): EnvoyIpTags should be a custom header registered with the IP tagging\n  // filter. We need to figure out if we can remove this header from the set of headers that\n  // participate in prefix overrides.\n  const LowerCaseString EnvoyIpTags{absl::StrCat(prefix(), \"-ip-tags\")};\n  const LowerCaseString EnvoyMaxRetries{absl::StrCat(prefix(), \"-max-retries\")};\n  const LowerCaseString EnvoyNotForwarded{absl::StrCat(prefix(), \"-not-forwarded\")};\n  const LowerCaseString EnvoyOriginalDstHost{absl::StrCat(prefix(), \"-original-dst-host\")};\n  const LowerCaseString EnvoyOriginalMethod{absl::StrCat(prefix(), \"-original-method\")};\n  const LowerCaseString EnvoyOriginalPath{absl::StrCat(prefix(), \"-original-path\")};\n  const LowerCaseString EnvoyOverloaded{absl::StrCat(prefix(), \"-overloaded\")};\n  const LowerCaseString EnvoyRateLimited{absl::StrCat(prefix(), \"-ratelimited\")};\n  const LowerCaseString EnvoyRetryOn{absl::StrCat(prefix(), \"-retry-on\")};\n  const LowerCaseString EnvoyRetryGrpcOn{absl::StrCat(prefix(), \"-retry-grpc-on\")};\n  const LowerCaseString EnvoyRetriableStatusCodes{\n      absl::StrCat(prefix(), \"-retriable-status-codes\")};\n  const LowerCaseString EnvoyRetriableHeaderNames{\n      absl::StrCat(prefix(), \"-retriable-header-names\")};\n  const LowerCaseString EnvoyUpstreamAltStatName{absl::StrCat(prefix(), \"-upstream-alt-stat-name\")};\n  const LowerCaseString EnvoyUpstreamCanary{absl::StrCat(prefix(), \"-upstream-canary\")};\n  const LowerCaseString EnvoyUpstreamHostAddress{absl::StrCat(prefix(), \"-upstream-host-address\")};\n  const LowerCaseString EnvoyUpstreamHostname{absl::StrCat(prefix(), \"-upstream-hostname\")};\n  const LowerCaseString EnvoyUpstreamRequestTimeoutAltResponse{\n      absl::StrCat(prefix(), \"-upstream-rq-timeout-alt-response\")};\n  const LowerCaseString EnvoyUpstreamRequestTimeoutMs{\n      absl::StrCat(prefix(), \"-upstream-rq-timeout-ms\")};\n  const LowerCaseString EnvoyUpstreamRequestPerTryTimeoutMs{\n      absl::StrCat(prefix(), \"-upstream-rq-per-try-timeout-ms\")};\n  const LowerCaseString EnvoyExpectedRequestTimeoutMs{\n      absl::StrCat(prefix(), \"-expected-rq-timeout-ms\")};\n  const LowerCaseString EnvoyUpstreamServiceTime{absl::StrCat(prefix(), \"-upstream-service-time\")};\n  const LowerCaseString EnvoyUpstreamHealthCheckedCluster{\n      absl::StrCat(prefix(), \"-upstream-healthchecked-cluster\")};\n  const LowerCaseString EnvoyDecoratorOperation{absl::StrCat(prefix(), \"-decorator-operation\")};\n  const LowerCaseString Expect{\"expect\"};\n  const LowerCaseString Expires{\"expires\"};\n  const LowerCaseString ForwardedClientCert{\"x-forwarded-client-cert\"};\n  const LowerCaseString ForwardedFor{\"x-forwarded-for\"};\n  const LowerCaseString ForwardedHost{\"x-forwarded-host\"};\n  const LowerCaseString ForwardedProto{\"x-forwarded-proto\"};\n  const LowerCaseString GrpcMessage{\"grpc-message\"};\n  const LowerCaseString GrpcStatus{\"grpc-status\"};\n  const LowerCaseString GrpcTimeout{\"grpc-timeout\"};\n  const LowerCaseString GrpcStatusDetailsBin{\"grpc-status-details-bin\"};\n  const LowerCaseString Host{\":authority\"};\n  const LowerCaseString HostLegacy{\"host\"};\n  const LowerCaseString Http2Settings{\"http2-settings\"};\n  const LowerCaseString KeepAlive{\"keep-alive\"};\n  const LowerCaseString Location{\"location\"};\n  const LowerCaseString Method{\":method\"};\n  const LowerCaseString Path{\":path\"};\n  const LowerCaseString Protocol{\":protocol\"};\n  const LowerCaseString ProxyConnection{\"proxy-connection\"};\n  const LowerCaseString Range{\"range\"};\n  const LowerCaseString RequestId{\"x-request-id\"};\n  const LowerCaseString Scheme{\":scheme\"};\n  const LowerCaseString Server{\"server\"};\n  const LowerCaseString SetCookie{\"set-cookie\"};\n  const LowerCaseString Status{\":status\"};\n  const LowerCaseString TransferEncoding{\"transfer-encoding\"};\n  const LowerCaseString TE{\"te\"};\n  const LowerCaseString Upgrade{\"upgrade\"};\n  const LowerCaseString UserAgent{\"user-agent\"};\n  const LowerCaseString Vary{\"vary\"};\n  const LowerCaseString Via{\"via\"};\n  const LowerCaseString WWWAuthenticate{\"www-authenticate\"};\n  const LowerCaseString XContentTypeOptions{\"x-content-type-options\"};\n  const LowerCaseString XSquashDebug{\"x-squash-debug\"};\n\n  struct {\n    const std::string Close{\"close\"};\n    const std::string Http2Settings{\"http2-settings\"};\n    const std::string KeepAlive{\"keep-alive\"};\n    const std::string Upgrade{\"upgrade\"};\n  } ConnectionValues;\n\n  struct {\n    const std::string H2c{\"h2c\"};\n    const std::string WebSocket{\"websocket\"};\n  } UpgradeValues;\n\n  struct {\n    const std::string Text{\"text/plain\"};\n    const std::string TextEventStream{\"text/event-stream\"};\n    const std::string TextUtf8{\"text/plain; charset=UTF-8\"}; // TODO(jmarantz): fold this into Text\n    const std::string Html{\"text/html; charset=UTF-8\"};\n    const std::string Grpc{\"application/grpc\"};\n    const std::string GrpcWeb{\"application/grpc-web\"};\n    const std::string GrpcWebProto{\"application/grpc-web+proto\"};\n    const std::string GrpcWebText{\"application/grpc-web-text\"};\n    const std::string GrpcWebTextProto{\"application/grpc-web-text+proto\"};\n    const std::string Json{\"application/json\"};\n    const std::string Protobuf{\"application/x-protobuf\"};\n    const std::string FormUrlEncoded{\"application/x-www-form-urlencoded\"};\n  } ContentTypeValues;\n\n  struct {\n    const std::string True{\"true\"};\n  } EnvoyImmediateHealthCheckFailValues;\n\n  struct {\n    const std::string True{\"true\"};\n  } EnvoyInternalRequestValues;\n\n  struct {\n    const std::string True{\"true\"};\n  } EnvoyOverloadedValues;\n\n  struct {\n    const std::string True{\"true\"};\n  } EnvoyRateLimitedValues;\n\n  struct {\n    const std::string _5xx{\"5xx\"};\n    const std::string GatewayError{\"gateway-error\"};\n    const std::string ConnectFailure{\"connect-failure\"};\n    const std::string EnvoyRateLimited{\"envoy-ratelimited\"};\n    const std::string RefusedStream{\"refused-stream\"};\n    const std::string Retriable4xx{\"retriable-4xx\"};\n    const std::string RetriableStatusCodes{\"retriable-status-codes\"};\n    const std::string RetriableHeaders{\"retriable-headers\"};\n    const std::string Reset{\"reset\"};\n  } EnvoyRetryOnValues;\n\n  struct {\n    const std::string Cancelled{\"cancelled\"};\n    const std::string DeadlineExceeded{\"deadline-exceeded\"};\n    const std::string ResourceExhausted{\"resource-exhausted\"};\n    const std::string Unavailable{\"unavailable\"};\n    const std::string Internal{\"internal\"};\n  } EnvoyRetryOnGrpcValues;\n\n  struct {\n    const std::string _100Continue{\"100-continue\"};\n  } ExpectValues;\n\n  struct {\n    const std::string Connect{\"CONNECT\"};\n    const std::string Delete{\"DELETE\"};\n    const std::string Get{\"GET\"};\n    const std::string Head{\"HEAD\"};\n    const std::string Options{\"OPTIONS\"};\n    const std::string Patch{\"PATCH\"};\n    const std::string Post{\"POST\"};\n    const std::string Put{\"PUT\"};\n    const std::string Trace{\"TRACE\"};\n  } MethodValues;\n\n  struct {\n    // per https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02\n    const std::string Bytestream{\"bytestream\"};\n  } ProtocolValues;\n\n  struct {\n    const std::string Http{\"http\"};\n    const std::string Https{\"https\"};\n  } SchemeValues;\n\n  struct {\n    const std::string Chunked{\"chunked\"};\n    const std::string Deflate{\"deflate\"};\n    const std::string Gzip{\"gzip\"};\n    const std::string Identity{\"identity\"};\n  } TransferEncodingValues;\n\n  struct {\n    const std::string EnvoyHealthChecker{\"Envoy/HC\"};\n  } UserAgentValues;\n\n  struct {\n    const std::string Trailers{\"trailers\"};\n  } TEValues;\n\n  struct {\n    const std::string Nosniff{\"nosniff\"};\n  } XContentTypeOptionValues;\n\n  struct {\n    const std::string Http10String{\"HTTP/1.0\"};\n    const std::string Http11String{\"HTTP/1.1\"};\n    const std::string Http2String{\"HTTP/2\"};\n    const std::string Http3String{\"HTTP/3\"};\n  } ProtocolStrings;\n};\n\nusing Headers = ConstSingleton<HeaderValues>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"header_formatter_lib\",\n    srcs = [\"header_formatter.cc\"],\n    hdrs = [\"header_formatter.h\"],\n)\n\nenvoy_cc_library(\n    name = \"codec_stats_lib\",\n    hdrs = [\"codec_stats.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n\nCODEC_LIB_DEPS = [\n    \":codec_stats_lib\",\n    \":header_formatter_lib\",\n    \"//include/envoy/buffer:buffer_interface\",\n    \"//include/envoy/http:codec_interface\",\n    \"//include/envoy/http:header_map_interface\",\n    \"//include/envoy/network:connection_interface\",\n    \"//source/common/buffer:buffer_lib\",\n    \"//source/common/buffer:watermark_buffer_lib\",\n    \"//source/common/common:assert_lib\",\n    \"//source/common/common:statusor_lib\",\n    \"//source/common/common:utility_lib\",\n    \"//source/common/grpc:common_lib\",\n    \"//source/common/http:codec_helper_lib\",\n    \"//source/common/http:codes_lib\",\n    \"//source/common/http:exception_lib\",\n    \"//source/common/http:header_map_lib\",\n    \"//source/common/http:header_utility_lib\",\n    \"//source/common/http:headers_lib\",\n    \"//source/common/http:status_lib\",\n    \"//source/common/http:utility_lib\",\n    \"//source/common/runtime:runtime_features_lib\",\n    \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n]\n\nenvoy_cc_library(\n    name = \"codec_lib\",\n    srcs = [\"codec_impl.cc\"],\n    hdrs = [\"codec_impl.h\"],\n    external_deps = [\"http_parser\"],\n    deps = CODEC_LIB_DEPS + [\"//source/common/common:cleanup_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"codec_legacy_lib\",\n    srcs = [\"codec_impl_legacy.cc\"],\n    hdrs = [\"codec_impl_legacy.h\"],\n    external_deps = [\"http_parser\"],\n    deps = CODEC_LIB_DEPS,\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_lib\",\n    srcs = [\"conn_pool.cc\"],\n    hdrs = [\"conn_pool.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/http:codec_wrappers_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:conn_pool_base_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"//source/common/upstream:upstream_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/http/http1/codec_impl.cc",
    "content": "#include \"common/http/http1/codec_impl.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/statusor.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/http1/header_formatter.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/ascii.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\nnamespace {\n\n// Changes or additions to details should be reflected in\n// docs/root/configuration/http/http_conn_man/response_code_details_details.rst\nstruct Http1ResponseCodeDetailValues {\n  const absl::string_view TooManyHeaders = \"http1.too_many_headers\";\n  const absl::string_view HeadersTooLarge = \"http1.headers_too_large\";\n  const absl::string_view HttpCodecError = \"http1.codec_error\";\n  const absl::string_view InvalidCharacters = \"http1.invalid_characters\";\n  const absl::string_view ConnectionHeaderSanitization = \"http1.connection_header_rejected\";\n  const absl::string_view InvalidUrl = \"http1.invalid_url\";\n  const absl::string_view InvalidTransferEncoding = \"http1.invalid_transfer_encoding\";\n  const absl::string_view BodyDisallowed = \"http1.body_disallowed\";\n  const absl::string_view TransferEncodingNotAllowed = \"http1.transfer_encoding_not_allowed\";\n  const absl::string_view ContentLengthNotAllowed = \"http1.content_length_not_allowed\";\n  const absl::string_view InvalidUnderscore = \"http1.unexpected_underscore\";\n  const absl::string_view ChunkedContentLength = \"http1.content_length_and_chunked_not_allowed\";\n};\n\nstruct Http1HeaderTypesValues {\n  const absl::string_view Headers = \"headers\";\n  const absl::string_view Trailers = \"trailers\";\n};\n\nusing Http1ResponseCodeDetails = ConstSingleton<Http1ResponseCodeDetailValues>;\nusing Http1HeaderTypes = ConstSingleton<Http1HeaderTypesValues>;\n\nconst StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() {\n  CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet,\n                         Http::Headers::get().ConnectionValues.Upgrade,\n                         Http::Headers::get().ConnectionValues.Http2Settings);\n}\n\nHeaderKeyFormatterPtr formatter(const Http::Http1Settings& settings) {\n  if (settings.header_key_format_ == Http1Settings::HeaderKeyFormat::ProperCase) {\n    return std::make_unique<ProperCaseHeaderKeyFormatter>();\n  }\n\n  return nullptr;\n}\n\n} // namespace\n\nconst std::string StreamEncoderImpl::CRLF = \"\\r\\n\";\n// Last chunk as defined here https://tools.ietf.org/html/rfc7230#section-4.1\nconst std::string StreamEncoderImpl::LAST_CHUNK = \"0\\r\\n\";\n\nStreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection,\n                                     HeaderKeyFormatter* header_key_formatter)\n    : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true),\n      is_response_to_head_request_(false), is_response_to_connect_request_(false),\n      header_key_formatter_(header_key_formatter) {\n  if (connection_.connection().aboveHighWatermark()) {\n    runHighWatermarkCallbacks();\n  }\n}\n\nvoid StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const char* value,\n                                     uint32_t value_size) {\n\n  ASSERT(key_size > 0);\n\n  connection_.copyToBuffer(key, key_size);\n  connection_.addCharToBuffer(':');\n  connection_.addCharToBuffer(' ');\n  connection_.copyToBuffer(value, value_size);\n  connection_.addToBuffer(CRLF);\n}\nvoid StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) {\n  this->encodeHeader(key.data(), key.size(), value.data(), value.size());\n}\n\nvoid StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::string_view value) {\n  if (header_key_formatter_ != nullptr) {\n    encodeHeader(header_key_formatter_->format(key), value);\n  } else {\n    encodeHeader(key, value);\n  }\n}\n\nvoid ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) {\n  ASSERT(headers.Status()->value() == \"100\");\n  encodeHeaders(headers, false);\n}\n\nvoid StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers,\n                                          absl::optional<uint64_t> status, bool end_stream) {\n  bool saw_content_length = false;\n  headers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate {\n    absl::string_view key_to_use = header.key().getStringView();\n    uint32_t key_size_to_use = header.key().size();\n    // Translate :authority -> host so that upper layers do not need to deal with this.\n    if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') {\n      key_to_use = absl::string_view(Headers::get().HostLegacy.get());\n      key_size_to_use = Headers::get().HostLegacy.get().size();\n    }\n\n    // Skip all headers starting with ':' that make it here.\n    if (key_to_use[0] == ':') {\n      return HeaderMap::Iterate::Continue;\n    }\n\n    encodeFormattedHeader(key_to_use, header.value().getStringView());\n\n    return HeaderMap::Iterate::Continue;\n  });\n\n  if (headers.ContentLength()) {\n    saw_content_length = true;\n  }\n\n  ASSERT(!headers.TransferEncoding());\n\n  // Assume we are chunk encoding unless we are passed a content length or this is a header only\n  // response. Upper layers generally should strip transfer-encoding since it only applies to\n  // HTTP/1.1. The codec will infer it based on the type of response.\n  // for streaming (e.g. SSE stream sent to hystrix dashboard), we do not want\n  // chunk transfer encoding but we don't have a content-length so disable_chunk_encoding_ is\n  // consulted before enabling chunk encoding.\n  //\n  // Note that for HEAD requests Envoy does best-effort guessing when there is no\n  // content-length. If a client makes a HEAD request for an upstream resource\n  // with no bytes but the upstream response doesn't include \"Content-length: 0\",\n  // Envoy will incorrectly assume a subsequent response to GET will be chunk encoded.\n  if (saw_content_length || disable_chunk_encoding_) {\n    chunk_encoding_ = false;\n  } else {\n    if (status && *status == 100) {\n      // Make sure we don't serialize chunk information with 100-Continue headers.\n      chunk_encoding_ = false;\n    } else if (end_stream && !is_response_to_head_request_) {\n      // If this is a headers-only stream, append an explicit \"Content-Length: 0\" unless it's a\n      // response to a HEAD request.\n      // For 204s and 1xx where content length is disallowed, don't append the content length but\n      // also don't chunk encode.\n      if (!status || (*status >= 200 && *status != 204)) {\n        encodeFormattedHeader(Headers::get().ContentLength.get(), \"0\");\n      }\n      chunk_encoding_ = false;\n    } else if (connection_.protocol() == Protocol::Http10) {\n      chunk_encoding_ = false;\n    } else if (status && (*status < 200 || *status == 204) &&\n               connection_.strict1xxAnd204Headers()) {\n      // TODO(zuercher): when the \"envoy.reloadable_features.strict_1xx_and_204_response_headers\"\n      // feature flag is removed, this block can be coalesced with the 100 Continue logic above.\n\n      // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked\n      // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1\n      chunk_encoding_ = false;\n    } else {\n      // For responses to connect requests, do not send the chunked encoding header:\n      // https://tools.ietf.org/html/rfc7231#section-4.3.6.\n      if (!is_response_to_connect_request_) {\n        encodeFormattedHeader(Headers::get().TransferEncoding.get(),\n                              Headers::get().TransferEncodingValues.Chunked);\n      }\n      // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades.\n      // If there is a body in a response on the upgrade path, the chunks will be\n      // passed through via maybeDirectDispatch so we need to avoid appending\n      // extra chunk boundaries.\n      //\n      // When sending a response to a HEAD request Envoy may send an informational\n      // \"Transfer-Encoding: chunked\" header, but should not send a chunk encoded body.\n      chunk_encoding_ = !Utility::isUpgrade(headers) && !is_response_to_head_request_ &&\n                        !is_response_to_connect_request_;\n    }\n  }\n\n  connection_.addToBuffer(CRLF);\n\n  if (end_stream) {\n    endEncode();\n  } else {\n    connection_.flushOutput();\n  }\n}\n\nvoid StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) {\n  // end_stream may be indicated with a zero length data buffer. If that is the case, so not\n  // actually write the zero length buffer out.\n  if (data.length() > 0) {\n    if (chunk_encoding_) {\n      connection_.buffer().add(absl::StrCat(absl::Hex(data.length()), CRLF));\n    }\n\n    connection_.buffer().move(data);\n\n    if (chunk_encoding_) {\n      connection_.buffer().add(CRLF);\n    }\n  }\n\n  if (end_stream) {\n    endEncode();\n  } else {\n    connection_.flushOutput();\n  }\n}\n\nvoid StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) {\n  if (!connection_.enableTrailers()) {\n    return endEncode();\n  }\n  // Trailers only matter if it is a chunk transfer encoding\n  // https://tools.ietf.org/html/rfc7230#section-4.4\n  if (chunk_encoding_) {\n    // Finalize the body\n    connection_.buffer().add(LAST_CHUNK);\n\n    trailers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate {\n      encodeFormattedHeader(header.key().getStringView(), header.value().getStringView());\n      return HeaderMap::Iterate::Continue;\n    });\n\n    connection_.flushOutput();\n    connection_.buffer().add(CRLF);\n  }\n\n  connection_.flushOutput();\n  connection_.onEncodeComplete();\n}\n\nvoid StreamEncoderImpl::encodeMetadata(const MetadataMapVector&) {\n  connection_.stats().metadata_not_supported_error_.inc();\n}\n\nvoid StreamEncoderImpl::endEncode() {\n  if (chunk_encoding_) {\n    connection_.buffer().add(LAST_CHUNK);\n    connection_.buffer().add(CRLF);\n  }\n\n  connection_.flushOutput(true);\n  connection_.onEncodeComplete();\n}\n\nvoid ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) {\n  if (!flood_protection_) {\n    return;\n  }\n  // It's messy and complicated to try to tag the final write of an HTTP response for response\n  // tracking for flood protection. Instead, write an empty buffer fragment after the response,\n  // to allow for tracking.\n  // When the response is written out, the fragment will be deleted and the counter will be updated\n  // by ServerConnectionImpl::releaseOutboundResponse()\n  auto fragment =\n      Buffer::OwnedBufferFragmentImpl::create(absl::string_view(\"\", 0), response_buffer_releasor_);\n  output_buffer.addBufferFragment(*fragment.release());\n  ASSERT(outbound_responses_ < max_outbound_responses_);\n  outbound_responses_++;\n}\n\nStatus ServerConnectionImpl::doFloodProtectionChecks() const {\n  ASSERT(dispatching_);\n  if (!flood_protection_) {\n    return okStatus();\n  }\n  // Before processing another request, make sure that we are below the response flood protection\n  // threshold.\n  if (outbound_responses_ >= max_outbound_responses_) {\n    ENVOY_CONN_LOG(trace, \"error accepting request: too many pending responses queued\",\n                   connection_);\n    stats_.response_flood_.inc();\n    return bufferFloodError(\"Too many responses queued.\");\n  }\n  return okStatus();\n}\n\nvoid ConnectionImpl::flushOutput(bool end_encode) {\n  if (end_encode) {\n    // If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood\n    // protection\n    maybeAddSentinelBufferFragment(output_buffer_);\n  }\n  connection().write(output_buffer_, false);\n  ASSERT(0UL == output_buffer_.length());\n}\n\nvoid ConnectionImpl::addToBuffer(absl::string_view data) { output_buffer_.add(data); }\n\nvoid ConnectionImpl::addCharToBuffer(char c) { output_buffer_.add(&c, 1); }\n\nvoid ConnectionImpl::addIntToBuffer(uint64_t i) { output_buffer_.add(absl::StrCat(i)); }\n\nvoid ConnectionImpl::copyToBuffer(const char* data, uint64_t length) {\n  output_buffer_.add(data, length);\n}\n\nvoid StreamEncoderImpl::resetStream(StreamResetReason reason) {\n  connection_.onResetStreamBase(reason);\n}\n\nvoid StreamEncoderImpl::readDisable(bool disable) {\n  if (disable) {\n    ++read_disable_calls_;\n  } else {\n    ASSERT(read_disable_calls_ != 0);\n    if (read_disable_calls_ != 0) {\n      --read_disable_calls_;\n    }\n  }\n  connection_.readDisable(disable);\n}\n\nuint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); }\n\nconst Network::Address::InstanceConstSharedPtr& StreamEncoderImpl::connectionLocalAddress() {\n  return connection_.connection().localAddress();\n}\n\nstatic const char RESPONSE_PREFIX[] = \"HTTP/1.1 \";\nstatic const char HTTP_10_RESPONSE_PREFIX[] = \"HTTP/1.0 \";\n\nvoid ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) {\n  started_response_ = true;\n\n  // The contract is that client codecs must ensure that :status is present.\n  ASSERT(headers.Status() != nullptr);\n  uint64_t numeric_status = Utility::getResponseStatus(headers);\n\n  if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) {\n    connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1);\n  } else {\n    connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1);\n  }\n  connection_.addIntToBuffer(numeric_status);\n  connection_.addCharToBuffer(' ');\n\n  const char* status_string = CodeUtility::toString(static_cast<Code>(numeric_status));\n  uint32_t status_string_len = strlen(status_string);\n  connection_.copyToBuffer(status_string, status_string_len);\n\n  connection_.addCharToBuffer('\\r');\n  connection_.addCharToBuffer('\\n');\n\n  if (numeric_status >= 300) {\n    // Don't do special CONNECT logic if the CONNECT was rejected.\n    is_response_to_connect_request_ = false;\n  }\n\n  encodeHeadersBase(headers, absl::make_optional<uint64_t>(numeric_status), end_stream);\n}\n\nstatic const char REQUEST_POSTFIX[] = \" HTTP/1.1\\r\\n\";\n\nvoid RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end_stream) {\n  const HeaderEntry* method = headers.Method();\n  const HeaderEntry* path = headers.Path();\n  const HeaderEntry* host = headers.Host();\n  bool is_connect = HeaderUtility::isConnect(headers);\n\n  // TODO(#10878): Include missing host header for CONNECT.\n  // The RELEASE_ASSERT below does not change the existing behavior of `encodeHeaders`.\n  // The `encodeHeaders` used to throw on errors. Callers of `encodeHeaders()` do not catch\n  // exceptions and this would cause abnormal process termination in error cases. This change\n  // replaces abnormal process termination from unhandled exception with the RELEASE_ASSERT. Further\n  // work will replace this RELEASE_ASSERT with proper error handling.\n  RELEASE_ASSERT(method && (path || is_connect), \":method and :path must be specified\");\n\n  if (method->value() == Headers::get().MethodValues.Head) {\n    head_request_ = true;\n  } else if (method->value() == Headers::get().MethodValues.Connect) {\n    disableChunkEncoding();\n    connect_request_ = true;\n  }\n  if (Utility::isUpgrade(headers)) {\n    upgrade_request_ = true;\n  }\n\n  connection_.copyToBuffer(method->value().getStringView().data(), method->value().size());\n  connection_.addCharToBuffer(' ');\n  if (is_connect) {\n    connection_.copyToBuffer(host->value().getStringView().data(), host->value().size());\n  } else {\n    connection_.copyToBuffer(path->value().getStringView().data(), path->value().size());\n  }\n  connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1);\n\n  encodeHeadersBase(headers, absl::nullopt, end_stream);\n}\n\nint ConnectionImpl::setAndCheckCallbackStatus(Status&& status) {\n  ASSERT(codec_status_.ok());\n  codec_status_ = std::move(status);\n  return codec_status_.ok() ? enumToInt(HttpParserCode::Success) : enumToInt(HttpParserCode::Error);\n}\n\nint ConnectionImpl::setAndCheckCallbackStatusOr(Envoy::StatusOr<int>&& statusor) {\n  ASSERT(codec_status_.ok());\n  if (statusor.ok()) {\n    return statusor.value();\n  } else {\n    codec_status_ = std::move(statusor.status());\n    return enumToInt(HttpParserCode::Error);\n  }\n}\n\nhttp_parser_settings ConnectionImpl::settings_{\n    [](http_parser* parser) -> int {\n      auto* conn_impl = static_cast<ConnectionImpl*>(parser->data);\n      auto status = conn_impl->onMessageBeginBase();\n      return conn_impl->setAndCheckCallbackStatus(std::move(status));\n    },\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      auto* conn_impl = static_cast<ConnectionImpl*>(parser->data);\n      auto status = conn_impl->onUrl(at, length);\n      return conn_impl->setAndCheckCallbackStatus(std::move(status));\n    },\n    nullptr, // on_status\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      auto* conn_impl = static_cast<ConnectionImpl*>(parser->data);\n      auto status = conn_impl->onHeaderField(at, length);\n      return conn_impl->setAndCheckCallbackStatus(std::move(status));\n    },\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      auto* conn_impl = static_cast<ConnectionImpl*>(parser->data);\n      auto status = conn_impl->onHeaderValue(at, length);\n      return conn_impl->setAndCheckCallbackStatus(std::move(status));\n    },\n    [](http_parser* parser) -> int {\n      auto* conn_impl = static_cast<ConnectionImpl*>(parser->data);\n      auto statusor = conn_impl->onHeadersCompleteBase();\n      return conn_impl->setAndCheckCallbackStatusOr(std::move(statusor));\n    },\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      static_cast<ConnectionImpl*>(parser->data)->bufferBody(at, length);\n      return 0;\n    },\n    [](http_parser* parser) -> int {\n      auto* conn_impl = static_cast<ConnectionImpl*>(parser->data);\n      auto status = conn_impl->onMessageCompleteBase();\n      return conn_impl->setAndCheckCallbackStatus(std::move(status));\n    },\n    [](http_parser* parser) -> int {\n      // A 0-byte chunk header is used to signal the end of the chunked body.\n      // When this function is called, http-parser holds the size of the chunk in\n      // parser->content_length. See\n      // https://github.com/nodejs/http-parser/blob/v2.9.3/http_parser.h#L336\n      const bool is_final_chunk = (parser->content_length == 0);\n      static_cast<ConnectionImpl*>(parser->data)->onChunkHeader(is_final_chunk);\n      return 0;\n    },\n    nullptr // on_chunk_complete\n};\n\nConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                               const Http1Settings& settings, http_parser_type type,\n                               uint32_t max_headers_kb, const uint32_t max_headers_count,\n                               HeaderKeyFormatterPtr&& header_key_formatter)\n    : connection_(connection), stats_(stats), codec_settings_(settings),\n      header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false),\n      handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false),\n      strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.strict_1xx_and_204_response_headers\")),\n      dispatching_(false),\n      output_buffer_([&]() -> void { this->onBelowLowWatermark(); },\n                     [&]() -> void { this->onAboveHighWatermark(); },\n                     []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }),\n      max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) {\n  output_buffer_.setWatermarks(connection.bufferLimit());\n  http_parser_init(&parser_, type);\n  parser_.allow_chunked_length = 1;\n  parser_.data = this;\n}\n\nStatus ConnectionImpl::completeLastHeader() {\n  ASSERT(dispatching_);\n  ENVOY_CONN_LOG(trace, \"completed header: key={} value={}\", connection_,\n                 current_header_field_.getStringView(), current_header_value_.getStringView());\n\n  RETURN_IF_ERROR(checkHeaderNameForUnderscores());\n  auto& headers_or_trailers = headersOrTrailers();\n  if (!current_header_field_.empty()) {\n    current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); });\n    // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed\n    // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace\n    // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4\n    current_header_value_.rtrim();\n    headers_or_trailers.addViaMove(std::move(current_header_field_),\n                                   std::move(current_header_value_));\n  }\n\n  // Check if the number of headers exceeds the limit.\n  if (headers_or_trailers.size() > max_headers_count_) {\n    error_code_ = Http::Code::RequestHeaderFieldsTooLarge;\n    RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders));\n    const absl::string_view header_type =\n        processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers;\n    return codecProtocolError(absl::StrCat(header_type, \" size exceeds limit\"));\n  }\n\n  header_parsing_state_ = HeaderParsingState::Field;\n  ASSERT(current_header_field_.empty());\n  ASSERT(current_header_value_.empty());\n  return okStatus();\n}\n\nuint32_t ConnectionImpl::getHeadersSize() {\n  return current_header_field_.size() + current_header_value_.size() +\n         headersOrTrailers().byteSize();\n}\n\nStatus ConnectionImpl::checkMaxHeadersSize() {\n  const uint32_t total = getHeadersSize();\n  if (total > (max_headers_kb_ * 1024)) {\n    const absl::string_view header_type =\n        processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers;\n    error_code_ = Http::Code::RequestHeaderFieldsTooLarge;\n    RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge));\n    return codecProtocolError(absl::StrCat(header_type, \" size exceeds limit\"));\n  }\n  return okStatus();\n}\n\nbool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) {\n  if (!handling_upgrade_) {\n    // Only direct dispatch for Upgrade requests.\n    return false;\n  }\n\n  ENVOY_CONN_LOG(trace, \"direct-dispatched {} bytes\", connection_, data.length());\n  onBody(data);\n  data.drain(data.length());\n  return true;\n}\n\nHttp::Status ConnectionImpl::dispatch(Buffer::Instance& data) {\n  // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either\n  // throw an exception or return an error status. The utility wrapper catches exceptions and\n  // converts them to error statuses.\n  return Utility::exceptionToStatus(\n      [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data);\n}\n\nHttp::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) {\n  ENVOY_CONN_LOG(trace, \"parsing {} bytes\", connection_, data.length());\n  // Make sure that dispatching_ is set to false after dispatching, even when\n  // http_parser exits early with an error code.\n  Cleanup cleanup([this]() { dispatching_ = false; });\n  ASSERT(!dispatching_);\n  ASSERT(codec_status_.ok());\n  ASSERT(buffered_body_.length() == 0);\n\n  dispatching_ = true;\n  if (maybeDirectDispatch(data)) {\n    return Http::okStatus();\n  }\n\n  // Always unpause before dispatch.\n  http_parser_pause(&parser_, 0);\n\n  ssize_t total_parsed = 0;\n  if (data.length() > 0) {\n    for (const Buffer::RawSlice& slice : data.getRawSlices()) {\n      auto statusor_parsed = dispatchSlice(static_cast<const char*>(slice.mem_), slice.len_);\n      if (!statusor_parsed.ok()) {\n        return statusor_parsed.status();\n      }\n      total_parsed += statusor_parsed.value();\n      if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) {\n        // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at\n        // this point.\n        ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED);\n        break;\n      }\n    }\n    dispatchBufferedBody();\n  } else {\n    auto result = dispatchSlice(nullptr, 0);\n    if (!result.ok()) {\n      return result.status();\n    }\n  }\n  ASSERT(buffered_body_.length() == 0);\n\n  ENVOY_CONN_LOG(trace, \"parsed {} bytes\", connection_, total_parsed);\n  data.drain(total_parsed);\n\n  // If an upgrade has been handled and there is body data or early upgrade\n  // payload to send on, send it on.\n  maybeDirectDispatch(data);\n  return Http::okStatus();\n}\n\nEnvoy::StatusOr<size_t> ConnectionImpl::dispatchSlice(const char* slice, size_t len) {\n  ASSERT(codec_status_.ok() && dispatching_);\n  ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len);\n  if (!codec_status_.ok()) {\n    return codec_status_;\n  }\n  if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) {\n    RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError));\n    // Avoid overwriting the codec_status_ set in the callbacks.\n    ASSERT(codec_status_.ok());\n    codec_status_ = codecProtocolError(\n        absl::StrCat(\"http/1.1 protocol error: \", http_errno_name(HTTP_PARSER_ERRNO(&parser_))));\n    return codec_status_;\n  }\n\n  return rc;\n}\n\nStatus ConnectionImpl::onHeaderField(const char* data, size_t length) {\n  ASSERT(dispatching_);\n  // We previously already finished up the headers, these headers are\n  // now trailers.\n  if (header_parsing_state_ == HeaderParsingState::Done) {\n    if (!enableTrailers()) {\n      // Ignore trailers.\n      return okStatus();\n    }\n    processing_trailers_ = true;\n    header_parsing_state_ = HeaderParsingState::Field;\n    allocTrailers();\n  }\n  if (header_parsing_state_ == HeaderParsingState::Value) {\n    RETURN_IF_ERROR(completeLastHeader());\n  }\n\n  current_header_field_.append(data, length);\n\n  return checkMaxHeadersSize();\n}\n\nStatus ConnectionImpl::onHeaderValue(const char* data, size_t length) {\n  ASSERT(dispatching_);\n  if (header_parsing_state_ == HeaderParsingState::Done && !enableTrailers()) {\n    // Ignore trailers.\n    return okStatus();\n  }\n\n  absl::string_view header_value{data, length};\n  if (!Http::HeaderUtility::headerValueIsValid(header_value)) {\n    ENVOY_CONN_LOG(debug, \"invalid header value: {}\", connection_, header_value);\n    error_code_ = Http::Code::BadRequest;\n    RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters));\n    return codecProtocolError(\"http/1.1 protocol error: header value contains invalid chars\");\n  }\n\n  header_parsing_state_ = HeaderParsingState::Value;\n  if (current_header_value_.empty()) {\n    // Strip leading whitespace if the current header value input contains the first bytes of the\n    // encoded header value. Trailing whitespace is stripped once the full header value is known in\n    // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace\n    // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 .\n    header_value = StringUtil::ltrim(header_value);\n  }\n  current_header_value_.append(header_value.data(), header_value.length());\n\n  return checkMaxHeadersSize();\n}\n\nEnvoy::StatusOr<int> ConnectionImpl::onHeadersCompleteBase() {\n  ASSERT(!processing_trailers_);\n  ASSERT(dispatching_);\n  ENVOY_CONN_LOG(trace, \"onHeadersCompleteBase\", connection_);\n  RETURN_IF_ERROR(completeLastHeader());\n\n  if (!(parser_.http_major == 1 && parser_.http_minor == 1)) {\n    // This is not necessarily true, but it's good enough since higher layers only care if this is\n    // HTTP/1.1 or not.\n    protocol_ = Protocol::Http10;\n  }\n  RequestOrResponseHeaderMap& request_or_response_headers = requestOrResponseHeaders();\n  if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) {\n    // Ignore h2c upgrade requests until we support them.\n    // See https://github.com/envoyproxy/envoy/issues/7161 for details.\n    if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(),\n                               Http::Headers::get().UpgradeValues.H2c)) {\n      ENVOY_CONN_LOG(trace, \"removing unsupported h2c upgrade headers.\", connection_);\n      request_or_response_headers.removeUpgrade();\n      if (request_or_response_headers.Connection()) {\n        const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings();\n        std::string new_value = StringUtil::removeTokens(\n            request_or_response_headers.getConnectionValue(), \",\", tokens_to_remove, \",\");\n        if (new_value.empty()) {\n          request_or_response_headers.removeConnection();\n        } else {\n          request_or_response_headers.setConnection(new_value);\n        }\n      }\n      request_or_response_headers.remove(Headers::get().Http2Settings);\n    } else {\n      ENVOY_CONN_LOG(trace, \"codec entering upgrade mode.\", connection_);\n      handling_upgrade_ = true;\n    }\n  }\n  if (parser_.method == HTTP_CONNECT) {\n    if (request_or_response_headers.ContentLength()) {\n      if (request_or_response_headers.getContentLengthValue() == \"0\") {\n        request_or_response_headers.removeContentLength();\n      } else {\n        // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a\n        // CONNECT request has no defined semantics, and may be rejected.\n        error_code_ = Http::Code::BadRequest;\n        RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed));\n        return codecProtocolError(\"http/1.1 protocol error: unsupported content length\");\n      }\n    }\n    ENVOY_CONN_LOG(trace, \"codec entering upgrade mode for CONNECT request.\", connection_);\n    handling_upgrade_ = true;\n  }\n\n  // https://tools.ietf.org/html/rfc7230#section-3.3.3\n  // If a message is received with both a Transfer-Encoding and a\n  // Content-Length header field, the Transfer-Encoding overrides the\n  // Content-Length. Such a message might indicate an attempt to\n  // perform request smuggling (Section 9.5) or response splitting\n  // (Section 9.4) and ought to be handled as an error. A sender MUST\n  // remove the received Content-Length field prior to forwarding such\n  // a message.\n\n  // Reject message with Http::Code::BadRequest if both Transfer-Encoding and Content-Length\n  // headers are present or if allowed by http1 codec settings and 'Transfer-Encoding'\n  // is chunked - remove Content-Length and serve request.\n  if (parser_.uses_transfer_encoding != 0 && request_or_response_headers.ContentLength()) {\n    if ((parser_.flags & F_CHUNKED) && codec_settings_.allow_chunked_length_) {\n      request_or_response_headers.removeContentLength();\n    } else {\n      error_code_ = Http::Code::BadRequest;\n      RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().ChunkedContentLength));\n      return codecProtocolError(\n          \"http/1.1 protocol error: both 'Content-Length' and 'Transfer-Encoding' are set.\");\n    }\n  }\n\n  // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject\n  // transfer-codings it does not understand.\n  // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a\n  // CONNECT request has no defined semantics, and may be rejected.\n  if (request_or_response_headers.TransferEncoding()) {\n    const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue();\n    if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) ||\n        parser_.method == HTTP_CONNECT) {\n      error_code_ = Http::Code::NotImplemented;\n      RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding));\n      return codecProtocolError(\"http/1.1 protocol error: unsupported transfer encoding\");\n    }\n  }\n\n  auto statusor = onHeadersComplete();\n  if (!statusor.ok()) {\n    RETURN_IF_ERROR(statusor.status());\n  }\n\n  header_parsing_state_ = HeaderParsingState::Done;\n\n  // Returning 2 informs http_parser to not expect a body or further data on this connection.\n  return handling_upgrade_ ? 2 : statusor.value();\n}\n\nvoid ConnectionImpl::bufferBody(const char* data, size_t length) {\n  buffered_body_.add(data, length);\n}\n\nvoid ConnectionImpl::dispatchBufferedBody() {\n  ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED);\n  ASSERT(codec_status_.ok());\n  if (buffered_body_.length() > 0) {\n    onBody(buffered_body_);\n    buffered_body_.drain(buffered_body_.length());\n  }\n}\n\nvoid ConnectionImpl::onChunkHeader(bool is_final_chunk) {\n  if (is_final_chunk) {\n    // Dispatch body before parsing trailers, so body ends up dispatched even if an error is found\n    // while processing trailers.\n    dispatchBufferedBody();\n  }\n}\n\nStatus ConnectionImpl::onMessageCompleteBase() {\n  ENVOY_CONN_LOG(trace, \"message complete\", connection_);\n\n  dispatchBufferedBody();\n\n  if (handling_upgrade_) {\n    // If this is an upgrade request, swallow the onMessageComplete. The\n    // upgrade payload will be treated as stream body.\n    ASSERT(!deferred_end_stream_headers_);\n    ENVOY_CONN_LOG(trace, \"Pausing parser due to upgrade.\", connection_);\n    http_parser_pause(&parser_, 1);\n    return okStatus();\n  }\n\n  // If true, this indicates we were processing trailers and must\n  // move the last header into current_header_map_\n  if (header_parsing_state_ == HeaderParsingState::Value) {\n    RETURN_IF_ERROR(completeLastHeader());\n  }\n\n  onMessageComplete();\n  return okStatus();\n}\n\nStatus ConnectionImpl::onMessageBeginBase() {\n  ENVOY_CONN_LOG(trace, \"message begin\", connection_);\n  // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets\n  // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable\n  // in onHeadersCompleteBase\n  protocol_ = Protocol::Http11;\n  processing_trailers_ = false;\n  header_parsing_state_ = HeaderParsingState::Field;\n  allocHeaders();\n  return onMessageBegin();\n}\n\nvoid ConnectionImpl::onResetStreamBase(StreamResetReason reason) {\n  ASSERT(!reset_stream_called_);\n  reset_stream_called_ = true;\n  onResetStream(reason);\n}\n\nServerConnectionImpl::ServerConnectionImpl(\n    Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks,\n    const Http1Settings& settings, uint32_t max_request_headers_kb,\n    const uint32_t max_request_headers_count,\n    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n        headers_with_underscores_action)\n    : ConnectionImpl(connection, stats, settings, HTTP_REQUEST, max_request_headers_kb,\n                     max_request_headers_count, formatter(settings)),\n      callbacks_(callbacks),\n      response_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) {\n        releaseOutboundResponse(fragment);\n      }),\n      // Pipelining is generally not well supported on the internet and has a series of dangerous\n      // overflow bugs. As such we are disabling it for now, and removing this temporary override if\n      // no one objects. If you use this integer to restore prior behavior, contact the\n      // maintainer team as it will otherwise be removed entirely soon.\n      max_outbound_responses_(\n          Runtime::getInteger(\"envoy.do_not_use_going_away_max_http2_outbound_responses\", 2)),\n      flood_protection_(\n          Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.http1_flood_protection\")),\n      headers_with_underscores_action_(headers_with_underscores_action) {}\n\nuint32_t ServerConnectionImpl::getHeadersSize() {\n  // Add in the size of the request URL if processing request headers.\n  const uint32_t url_size = (!processing_trailers_ && active_request_.has_value())\n                                ? active_request_.value().request_url_.size()\n                                : 0;\n  return url_size + ConnectionImpl::getHeadersSize();\n}\n\nvoid ServerConnectionImpl::onEncodeComplete() {\n  if (active_request_.value().remote_complete_) {\n    // Only do this if remote is complete. If we are replying before the request is complete the\n    // only logical thing to do is for higher level code to reset() / close the connection so we\n    // leave the request around so that it can fire reset callbacks.\n    active_request_.reset();\n  }\n}\n\nStatus ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) {\n  HeaderString path(Headers::get().Path);\n\n  bool is_connect = (method == HTTP_CONNECT);\n\n  // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here.\n  auto& active_request = active_request_.value();\n  if (!is_connect && !active_request.request_url_.getStringView().empty() &&\n      (active_request.request_url_.getStringView()[0] == '/' ||\n       ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) {\n    headers.addViaMove(std::move(path), std::move(active_request.request_url_));\n    return okStatus();\n  }\n\n  // If absolute_urls and/or connect are not going be handled, copy the url and return.\n  // This forces the behavior to be backwards compatible with the old codec behavior.\n  // CONNECT \"urls\" are actually host:port so look like absolute URLs to the above checks.\n  // Absolute URLS in CONNECT requests will be rejected below by the URL class validation.\n  if (!codec_settings_.allow_absolute_url_ && !is_connect) {\n    headers.addViaMove(std::move(path), std::move(active_request.request_url_));\n    return okStatus();\n  }\n\n  Utility::Url absolute_url;\n  if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) {\n    RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl));\n    return codecProtocolError(\"http/1.1 protocol error: invalid url in request line\");\n  }\n  // RFC7230#5.7\n  // When a proxy receives a request with an absolute-form of\n  // request-target, the proxy MUST ignore the received Host header field\n  // (if any) and instead replace it with the host information of the\n  // request-target. A proxy that forwards such a request MUST generate a\n  // new Host field-value based on the received request-target rather than\n  // forward the received Host field-value.\n  headers.setHost(absolute_url.hostAndPort());\n\n  if (!absolute_url.pathAndQueryParams().empty()) {\n    headers.setPath(absolute_url.pathAndQueryParams());\n  }\n  active_request.request_url_.clear();\n  return okStatus();\n}\n\nEnvoy::StatusOr<int> ServerConnectionImpl::onHeadersComplete() {\n  // Handle the case where response happens prior to request complete. It's up to upper layer code\n  // to disconnect the connection but we shouldn't fire any more events since it doesn't make\n  // sense.\n  if (active_request_.has_value()) {\n    auto& active_request = active_request_.value();\n    auto& headers = absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n    ENVOY_CONN_LOG(trace, \"Server: onHeadersComplete size={}\", connection_, headers->size());\n    const char* method_string = http_method_str(static_cast<http_method>(parser_.method));\n\n    if (!handling_upgrade_ && headers->Connection()) {\n      // If we fail to sanitize the request, return a 400 to the client\n      if (!Utility::sanitizeConnectionHeader(*headers)) {\n        absl::string_view header_value = headers->getConnectionValue();\n        ENVOY_CONN_LOG(debug, \"Invalid nominated headers in Connection: {}\", connection_,\n                       header_value);\n        error_code_ = Http::Code::BadRequest;\n        RETURN_IF_ERROR(\n            sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization));\n        return codecProtocolError(\"Invalid nominated headers in Connection.\");\n      }\n    }\n\n    // Inform the response encoder about any HEAD method, so it can set content\n    // length and transfer encoding headers correctly.\n    active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD);\n    active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT);\n\n    RETURN_IF_ERROR(handlePath(*headers, parser_.method));\n    ASSERT(active_request.request_url_.empty());\n\n    headers->setMethod(method_string);\n\n    // Make sure the host is valid.\n    auto details = HeaderUtility::requestHeadersValid(*headers);\n    if (details.has_value()) {\n      RETURN_IF_ERROR(sendProtocolError(details.value().get()));\n      return codecProtocolError(\n          \"http/1.1 protocol error: request headers failed spec compliance checks\");\n    }\n\n    // Determine here whether we have a body or not. This uses the new RFC semantics where the\n    // presence of content-length or chunked transfer-encoding indicates a body vs. a particular\n    // method. If there is no body, we defer raising decodeHeaders() until the parser is flushed\n    // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy\n    // scenario where the higher layers stream through and implicitly switch to chunked transfer\n    // encoding because end stream with zero body length has not yet been indicated.\n    if (parser_.flags & F_CHUNKED ||\n        (parser_.content_length > 0 && parser_.content_length != ULLONG_MAX) || handling_upgrade_) {\n      active_request.request_decoder_->decodeHeaders(std::move(headers), false);\n\n      // If the connection has been closed (or is closing) after decoding headers, pause the parser\n      // so we return control to the caller.\n      if (connection_.state() != Network::Connection::State::Open) {\n        http_parser_pause(&parser_, 1);\n      }\n    } else {\n      deferred_end_stream_headers_ = true;\n    }\n  }\n\n  return 0;\n}\n\nStatus ServerConnectionImpl::onMessageBegin() {\n  if (!resetStreamCalled()) {\n    ASSERT(!active_request_.has_value());\n    active_request_.emplace(*this, header_key_formatter_.get());\n    auto& active_request = active_request_.value();\n    if (resetStreamCalled()) {\n      return codecClientError(\"cannot create new streams after calling reset\");\n    }\n    active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_);\n\n    // Check for pipelined request flood as we prepare to accept a new request.\n    // Parse errors that happen prior to onMessageBegin result in stream termination, it is not\n    // possible to overflow output buffers with early parse errors.\n    RETURN_IF_ERROR(doFloodProtectionChecks());\n  }\n  return okStatus();\n}\n\nStatus ServerConnectionImpl::onUrl(const char* data, size_t length) {\n  if (active_request_.has_value()) {\n    active_request_.value().request_url_.append(data, length);\n\n    RETURN_IF_ERROR(checkMaxHeadersSize());\n  }\n  return okStatus();\n}\n\nvoid ServerConnectionImpl::onBody(Buffer::Instance& data) {\n  ASSERT(!deferred_end_stream_headers_);\n  if (active_request_.has_value()) {\n    ENVOY_CONN_LOG(trace, \"body size={}\", connection_, data.length());\n    active_request_.value().request_decoder_->decodeData(data, false);\n  }\n}\n\nvoid ServerConnectionImpl::onMessageComplete() {\n  ASSERT(!handling_upgrade_);\n  if (active_request_.has_value()) {\n    auto& active_request = active_request_.value();\n\n    if (active_request.request_decoder_) {\n      active_request.response_encoder_.readDisable(true);\n    }\n    active_request.remote_complete_ = true;\n    if (deferred_end_stream_headers_) {\n      active_request.request_decoder_->decodeHeaders(\n          std::move(absl::get<RequestHeaderMapPtr>(headers_or_trailers_)), true);\n      deferred_end_stream_headers_ = false;\n    } else if (processing_trailers_) {\n      active_request.request_decoder_->decodeTrailers(\n          std::move(absl::get<RequestTrailerMapPtr>(headers_or_trailers_)));\n    } else {\n      Buffer::OwnedImpl buffer;\n      active_request.request_decoder_->decodeData(buffer, true);\n    }\n\n    // Reset to ensure no information from one requests persists to the next.\n    headers_or_trailers_.emplace<RequestHeaderMapPtr>(nullptr);\n  }\n\n  // Always pause the parser so that the calling code can process 1 request at a time and apply\n  // back pressure. However this means that the calling code needs to detect if there is more data\n  // in the buffer and dispatch it again.\n  http_parser_pause(&parser_, 1);\n}\n\nvoid ServerConnectionImpl::onResetStream(StreamResetReason reason) {\n  active_request_.value().response_encoder_.runResetCallbacks(reason);\n  active_request_.reset();\n}\n\nvoid ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) {\n  if (active_request_.has_value()) {\n    active_request_.value().response_encoder_.setDetails(details);\n  }\n  // We do this here because we may get a protocol error before we have a logical stream. Higher\n  // layers can only operate on streams, so there is no coherent way to allow them to send an error\n  // \"out of band.\" On one hand this is kind of a hack but on the other hand it normalizes HTTP/1.1\n  // to look more like HTTP/2 to higher layers.\n  if (!active_request_.has_value() ||\n      !active_request_.value().response_encoder_.startedResponse()) {\n    Buffer::OwnedImpl bad_request_response(\n        absl::StrCat(\"HTTP/1.1 \", error_code_, \" \", CodeUtility::toString(error_code_),\n                     \"\\r\\ncontent-length: 0\\r\\nconnection: close\\r\\n\\r\\n\"));\n\n    connection_.write(bad_request_response, false);\n  }\n}\n\nStatus ServerConnectionImpl::sendProtocolError(absl::string_view details) {\n  if (!Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.early_errors_via_hcm\")) {\n    sendProtocolErrorOld(details);\n    return okStatus();\n  }\n  // We do this here because we may get a protocol error before we have a logical stream.\n  if (!active_request_.has_value()) {\n    RETURN_IF_ERROR(onMessageBeginBase());\n  }\n  ASSERT(active_request_.has_value());\n\n  active_request_.value().response_encoder_.setDetails(details);\n  if (!active_request_.value().response_encoder_.startedResponse()) {\n    // Note that the correctness of is_grpc_request and is_head_request is best-effort.\n    // If headers have not been fully parsed they may not be inferred correctly.\n    bool is_grpc_request = false;\n    if (absl::holds_alternative<RequestHeaderMapPtr>(headers_or_trailers_) &&\n        absl::get<RequestHeaderMapPtr>(headers_or_trailers_) != nullptr) {\n      is_grpc_request =\n          Grpc::Common::isGrpcRequestHeaders(*absl::get<RequestHeaderMapPtr>(headers_or_trailers_));\n    }\n    active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_,\n                                                      CodeUtility::toString(error_code_), nullptr,\n                                                      absl::nullopt, details);\n  }\n  return okStatus();\n}\n\nvoid ServerConnectionImpl::onAboveHighWatermark() {\n  if (active_request_.has_value()) {\n    active_request_.value().response_encoder_.runHighWatermarkCallbacks();\n  }\n}\nvoid ServerConnectionImpl::onBelowLowWatermark() {\n  if (active_request_.has_value()) {\n    active_request_.value().response_encoder_.runLowWatermarkCallbacks();\n  }\n}\n\nvoid ServerConnectionImpl::releaseOutboundResponse(\n    const Buffer::OwnedBufferFragmentImpl* fragment) {\n  ASSERT(outbound_responses_ >= 1);\n  --outbound_responses_;\n  delete fragment;\n}\n\nStatus ServerConnectionImpl::checkHeaderNameForUnderscores() {\n  if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW &&\n      Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) {\n    if (headers_with_underscores_action_ ==\n        envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) {\n      ENVOY_CONN_LOG(debug, \"Dropping header with invalid characters in its name: {}\", connection_,\n                     current_header_field_.getStringView());\n      stats_.dropped_headers_with_underscores_.inc();\n      current_header_field_.clear();\n      current_header_value_.clear();\n    } else {\n      ENVOY_CONN_LOG(debug, \"Rejecting request due to header name with underscores: {}\",\n                     connection_, current_header_field_.getStringView());\n      error_code_ = Http::Code::BadRequest;\n      RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore));\n      stats_.requests_rejected_with_underscores_in_headers_.inc();\n      return codecProtocolError(\"http/1.1 protocol error: header name contains underscores\");\n    }\n  }\n  return okStatus();\n}\n\nClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                                           ConnectionCallbacks&, const Http1Settings& settings,\n                                           const uint32_t max_response_headers_count)\n    : ConnectionImpl(connection, stats, settings, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB,\n                     max_response_headers_count, formatter(settings)) {}\n\nbool ClientConnectionImpl::cannotHaveBody() {\n  if (pending_response_.has_value() && pending_response_.value().encoder_.headRequest()) {\n    ASSERT(!pending_response_done_);\n    return true;\n  } else if (parser_.status_code == 204 || parser_.status_code == 304 ||\n             (parser_.status_code >= 200 && parser_.content_length == 0 &&\n              !(parser_.flags & F_CHUNKED))) {\n    return true;\n  } else {\n    return false;\n  }\n}\n\nRequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) {\n  // If reads were disabled due to flow control, we expect reads to always be enabled again before\n  // reusing this connection. This is done when the response is received.\n  ASSERT(connection_.readEnabled());\n\n  ASSERT(!pending_response_.has_value());\n  ASSERT(pending_response_done_);\n  pending_response_.emplace(*this, header_key_formatter_.get(), &response_decoder);\n  pending_response_done_ = false;\n  return pending_response_.value().encoder_;\n}\n\nEnvoy::StatusOr<int> ClientConnectionImpl::onHeadersComplete() {\n  ENVOY_CONN_LOG(trace, \"status_code {}\", connection_, parser_.status_code);\n\n  // Handle the case where the client is closing a kept alive connection (by sending a 408\n  // with a 'Connection: close' header). In this case we just let response flush out followed\n  // by the remote close.\n  if (!pending_response_.has_value() && !resetStreamCalled()) {\n    return prematureResponseError(\"\", static_cast<Http::Code>(parser_.status_code));\n  } else if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    auto& headers = absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n    ENVOY_CONN_LOG(trace, \"Client: onHeadersComplete size={}\", connection_, headers->size());\n    headers->setStatus(parser_.status_code);\n\n    if (parser_.status_code >= 200 && parser_.status_code < 300 &&\n        pending_response_.value().encoder_.connectRequest()) {\n      ENVOY_CONN_LOG(trace, \"codec entering upgrade mode for CONNECT response.\", connection_);\n      handling_upgrade_ = true;\n\n      // For responses to connect requests, do not accept the chunked\n      // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6\n      if (headers->TransferEncoding() &&\n          absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(),\n                                 Headers::get().TransferEncodingValues.Chunked)) {\n        RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding));\n        return codecProtocolError(\"http/1.1 protocol error: unsupported transfer encoding\");\n      }\n    }\n\n    if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) {\n      if (headers->TransferEncoding()) {\n        RETURN_IF_ERROR(\n            sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed));\n        return codecProtocolError(\n            \"http/1.1 protocol error: transfer encoding not allowed in 1xx or 204\");\n      }\n\n      if (headers->ContentLength()) {\n        // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length.\n        if (headers->ContentLength()->value().getStringView() != \"0\") {\n          RETURN_IF_ERROR(\n              sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed));\n          return codecProtocolError(\n              \"http/1.1 protocol error: content length not allowed in 1xx or 204\");\n        }\n\n        headers->removeContentLength();\n      }\n    }\n\n    if (parser_.status_code == enumToInt(Http::Code::Continue)) {\n      pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers));\n    } else if (cannotHaveBody() && !handling_upgrade_) {\n      deferred_end_stream_headers_ = true;\n    } else {\n      pending_response_.value().decoder_->decodeHeaders(std::move(headers), false);\n    }\n\n    // http-parser treats 1xx headers as their own complete response. Swallow the spurious\n    // onMessageComplete and continue processing for purely informational headers.\n    // 101-SwitchingProtocols is exempt as all data after the header is proxied through after\n    // upgrading.\n    if (CodeUtility::is1xx(parser_.status_code) &&\n        parser_.status_code != enumToInt(Http::Code::SwitchingProtocols)) {\n      ignore_message_complete_for_1xx_ = true;\n      // Reset to ensure no information from the 1xx headers is used for the response headers.\n      headers_or_trailers_.emplace<ResponseHeaderMapPtr>(nullptr);\n    }\n  }\n\n  // Here we deal with cases where the response cannot have a body by returning 1, but http_parser\n  // does not deal with it for us.\n  return cannotHaveBody() ? 1 : 0;\n}\n\nbool ClientConnectionImpl::upgradeAllowed() const {\n  if (pending_response_.has_value()) {\n    return pending_response_->encoder_.upgradeRequest();\n  }\n  return false;\n}\n\nvoid ClientConnectionImpl::onBody(Buffer::Instance& data) {\n  ASSERT(!deferred_end_stream_headers_);\n  if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    pending_response_.value().decoder_->decodeData(data, false);\n  }\n}\n\nvoid ClientConnectionImpl::onMessageComplete() {\n  ENVOY_CONN_LOG(trace, \"message complete\", connection_);\n  if (ignore_message_complete_for_1xx_) {\n    ignore_message_complete_for_1xx_ = false;\n    return;\n  }\n  if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    // After calling decodeData() with end stream set to true, we should no longer be able to reset.\n    PendingResponse& response = pending_response_.value();\n    // Encoder is used as part of decode* calls later in this function so pending_response_ can not\n    // be reset just yet. Preserve the state in pending_response_done_ instead.\n    pending_response_done_ = true;\n\n    if (deferred_end_stream_headers_) {\n      response.decoder_->decodeHeaders(\n          std::move(absl::get<ResponseHeaderMapPtr>(headers_or_trailers_)), true);\n      deferred_end_stream_headers_ = false;\n    } else if (processing_trailers_) {\n      response.decoder_->decodeTrailers(\n          std::move(absl::get<ResponseTrailerMapPtr>(headers_or_trailers_)));\n    } else {\n      Buffer::OwnedImpl buffer;\n      response.decoder_->decodeData(buffer, true);\n    }\n\n    // Reset to ensure no information from one requests persists to the next.\n    pending_response_.reset();\n    headers_or_trailers_.emplace<ResponseHeaderMapPtr>(nullptr);\n  }\n}\n\nvoid ClientConnectionImpl::onResetStream(StreamResetReason reason) {\n  // Only raise reset if we did not already dispatch a complete response.\n  if (pending_response_.has_value() && !pending_response_done_) {\n    pending_response_.value().encoder_.runResetCallbacks(reason);\n    pending_response_done_ = true;\n    pending_response_.reset();\n  }\n}\n\nStatus ClientConnectionImpl::sendProtocolError(absl::string_view details) {\n  if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    pending_response_.value().encoder_.setDetails(details);\n  }\n  return okStatus();\n}\n\nvoid ClientConnectionImpl::onAboveHighWatermark() {\n  // This should never happen without an active stream/request.\n  pending_response_.value().encoder_.runHighWatermarkCallbacks();\n}\n\nvoid ClientConnectionImpl::onBelowLowWatermark() {\n  // This can get called without an active stream/request when the response completion causes us to\n  // close the connection, but in doing so go below low watermark.\n  if (pending_response_.has_value() && !pending_response_done_) {\n    pending_response_.value().encoder_.runLowWatermarkCallbacks();\n  }\n}\n\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/codec_impl.h",
    "content": "#pragma once\n\n#include <http_parser.h>\n\n#include <array>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/statusor.h\"\n#include \"common/http/codec_helper.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http1/header_formatter.h\"\n#include \"common/http/status.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\n\nclass ConnectionImpl;\n\n/**\n * Base class for HTTP/1.1 request and response encoders.\n */\nclass StreamEncoderImpl : public virtual StreamEncoder,\n                          public Stream,\n                          Logger::Loggable<Logger::Id::http>,\n                          public StreamCallbackHelper,\n                          public Http1StreamEncoderOptions {\npublic:\n  ~StreamEncoderImpl() override {\n    // When the stream goes away, undo any read blocks to resume reading.\n    while (read_disable_calls_ != 0) {\n      StreamEncoderImpl::readDisable(false);\n    }\n  }\n  // Http::StreamEncoder\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void encodeMetadata(const MetadataMapVector&) override;\n  Stream& getStream() override { return *this; }\n  Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return *this; }\n\n  // Http::Http1StreamEncoderOptions\n  void disableChunkEncoding() override { disable_chunk_encoding_ = true; }\n\n  // Http::Stream\n  void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); }\n  void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); }\n  // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further\n  // progress may be made with the codec.\n  void resetStream(StreamResetReason reason) override;\n  void readDisable(bool disable) override;\n  uint32_t bufferLimit() override;\n  absl::string_view responseDetails() override { return details_; }\n  const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override;\n  void setFlushTimeout(std::chrono::milliseconds) override {\n    // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the\n    // connection, invoking any watermarks as necessary. There is no internal buffering that would\n    // require a flush timeout not already covered by other timeouts.\n  }\n\n  void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; }\n  void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; }\n  void setDetails(absl::string_view details) { details_ = details; }\n\n  void clearReadDisableCallsForTests() { read_disable_calls_ = 0; }\n\nprotected:\n  StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter);\n  void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional<uint64_t> status,\n                         bool end_stream);\n  void encodeTrailersBase(const HeaderMap& headers);\n\n  static const std::string CRLF;\n  static const std::string LAST_CHUNK;\n\n  ConnectionImpl& connection_;\n  uint32_t read_disable_calls_{};\n  bool disable_chunk_encoding_ : 1;\n  bool chunk_encoding_ : 1;\n  bool is_response_to_head_request_ : 1;\n  bool is_response_to_connect_request_ : 1;\n\nprivate:\n  /**\n   * Called to encode an individual header.\n   * @param key supplies the header to encode.\n   * @param key_size supplies the byte size of the key.\n   * @param value supplies the value to encode.\n   * @param value_size supplies the byte size of the value.\n   */\n  void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size);\n\n  /**\n   * Called to encode an individual header.\n   * @param key supplies the header to encode as a string_view.\n   * @param value supplies the value to encode as a string_view.\n   */\n  void encodeHeader(absl::string_view key, absl::string_view value);\n\n  /**\n   * Called to finalize a stream encode.\n   */\n  void endEncode();\n\n  void encodeFormattedHeader(absl::string_view key, absl::string_view value);\n\n  const HeaderKeyFormatter* const header_key_formatter_;\n  absl::string_view details_;\n};\n\n/**\n * HTTP/1.1 response encoder.\n */\nclass ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder {\npublic:\n  ResponseEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter,\n                      bool stream_error_on_invalid_http_message)\n      : StreamEncoderImpl(connection, header_key_formatter),\n        stream_error_on_invalid_http_message_(stream_error_on_invalid_http_message) {}\n\n  bool startedResponse() { return started_response_; }\n\n  // Http::ResponseEncoder\n  void encode100ContinueHeaders(const ResponseHeaderMap& headers) override;\n  void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override;\n  void encodeTrailers(const ResponseTrailerMap& trailers) override { encodeTrailersBase(trailers); }\n\n  bool streamErrorOnInvalidHttpMessage() const override {\n    return stream_error_on_invalid_http_message_;\n  }\n\nprivate:\n  bool started_response_{};\n  const bool stream_error_on_invalid_http_message_;\n};\n\n/**\n * HTTP/1.1 request encoder.\n */\nclass RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder {\npublic:\n  RequestEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter)\n      : StreamEncoderImpl(connection, header_key_formatter) {}\n  bool upgradeRequest() const { return upgrade_request_; }\n  bool headRequest() const { return head_request_; }\n  bool connectRequest() const { return connect_request_; }\n\n  // Http::RequestEncoder\n  void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override;\n  void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); }\n\nprivate:\n  bool upgrade_request_{};\n  bool head_request_{};\n  bool connect_request_{};\n};\n\n/**\n * Base class for HTTP/1.1 client and server connections.\n * Handles the callbacks of http_parser with its own base routine and then\n * virtual dispatches to its subclasses.\n */\nclass ConnectionImpl : public virtual Connection, protected Logger::Loggable<Logger::Id::http> {\npublic:\n  /**\n   * @return Network::Connection& the backing network connection.\n   */\n  Network::Connection& connection() { return connection_; }\n\n  /**\n   * Called when the active encoder has completed encoding the outbound half of the stream.\n   */\n  virtual void onEncodeComplete() PURE;\n\n  /**\n   * Called when resetStream() has been called on an active stream. In HTTP/1.1 the only\n   * valid operation after this point is for the connection to get blown away, but we will not\n   * fire any more callbacks in case some stack has to unwind.\n   */\n  void onResetStreamBase(StreamResetReason reason);\n\n  /**\n   * Flush all pending output from encoding.\n   */\n  void flushOutput(bool end_encode = false);\n\n  void addToBuffer(absl::string_view data);\n  void addCharToBuffer(char c);\n  void addIntToBuffer(uint64_t i);\n  Buffer::WatermarkBuffer& buffer() { return output_buffer_; }\n  uint64_t bufferRemainingSize();\n  void copyToBuffer(const char* data, uint64_t length);\n  void reserveBuffer(uint64_t size);\n  void readDisable(bool disable) {\n    if (connection_.state() == Network::Connection::State::Open) {\n      connection_.readDisable(disable);\n    }\n  }\n  uint32_t bufferLimit() { return connection_.bufferLimit(); }\n  virtual bool supportsHttp10() { return false; }\n  bool maybeDirectDispatch(Buffer::Instance& data);\n  virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {}\n  CodecStats& stats() { return stats_; }\n  bool enableTrailers() const { return codec_settings_.enable_trailers_; }\n\n  // Http::Connection\n  Http::Status dispatch(Buffer::Instance& data) override;\n  void goAway() override {} // Called during connection manager drain flow\n  Protocol protocol() override { return protocol_; }\n  void shutdownNotice() override {} // Called during connection manager drain flow\n  bool wantsToWrite() override { return false; }\n  void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { onAboveHighWatermark(); }\n  void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { onBelowLowWatermark(); }\n\n  bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; }\n\n  int setAndCheckCallbackStatus(Status&& status);\n  int setAndCheckCallbackStatusOr(Envoy::StatusOr<int>&& statusor);\n\n  // Codec errors found in callbacks are overridden within the http_parser library. This holds those\n  // errors to propagate them through to dispatch() where we can handle the error.\n  Envoy::Http::Status codec_status_;\n\nprotected:\n  ConnectionImpl(Network::Connection& connection, CodecStats& stats, const Http1Settings& settings,\n                 http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count,\n                 HeaderKeyFormatterPtr&& header_key_formatter);\n\n  // The following define special return values for http_parser callbacks. See:\n  // https://github.com/nodejs/http-parser/blob/5c5b3ac62662736de9e71640a8dc16da45b32503/http_parser.h#L72\n  // These codes do not overlap with standard HTTP Status codes. They are only used for user\n  // callbacks.\n  enum class HttpParserCode {\n    // Callbacks other than on_headers_complete should return a non-zero int to indicate an error\n    // and\n    // halt execution.\n    Error = -1,\n    Success = 0,\n    // Returning '1' from on_headers_complete will tell http_parser that it should not expect a\n    // body.\n    NoBody = 1,\n    // Returning '2' from on_headers_complete will tell http_parser that it should not expect a body\n    // nor any further data on the connection.\n    NoBodyData = 2,\n  };\n\n  bool resetStreamCalled() { return reset_stream_called_; }\n  Status onMessageBeginBase();\n\n  /**\n   * Get memory used to represent HTTP headers or trailers currently being parsed.\n   * Computed by adding the partial header field and value that is currently being parsed and the\n   * estimated header size for previous header lines provided by HeaderMap::byteSize().\n   */\n  virtual uint32_t getHeadersSize();\n\n  /**\n   * Called from onUrl, onHeaderField and onHeaderValue to verify that the headers do not exceed the\n   * configured max header size limit.\n   * @return A codecProtocolError status if headers exceed the size limit.\n   */\n  Status checkMaxHeadersSize();\n\n  Network::Connection& connection_;\n  CodecStats& stats_;\n  const Http1Settings codec_settings_;\n  http_parser parser_;\n  Http::Code error_code_{Http::Code::BadRequest};\n  const HeaderKeyFormatterPtr header_key_formatter_;\n  HeaderString current_header_field_;\n  HeaderString current_header_value_;\n  bool processing_trailers_ : 1;\n  bool handling_upgrade_ : 1;\n  bool reset_stream_called_ : 1;\n  // Deferred end stream headers indicate that we are not going to raise headers until the full\n  // HTTP/1 message has been flushed from the parser. This allows raising an HTTP/2 style headers\n  // block with end stream set to true with no further protocol data remaining.\n  bool deferred_end_stream_headers_ : 1;\n  const bool strict_1xx_and_204_headers_ : 1;\n  bool dispatching_ : 1;\n\nprivate:\n  enum class HeaderParsingState { Field, Value, Done };\n\n  virtual HeaderMap& headersOrTrailers() PURE;\n  virtual RequestOrResponseHeaderMap& requestOrResponseHeaders() PURE;\n  virtual void allocHeaders() PURE;\n  virtual void allocTrailers() PURE;\n\n  /**\n   * Called in order to complete an in progress header decode.\n   * @return A status representing success.\n   */\n  Status completeLastHeader();\n\n  /**\n   * Check if header name contains underscore character.\n   * Underscore character is allowed in header names by the RFC-7230 and this check is implemented\n   * as a security measure due to systems that treat '_' and '-' as interchangeable.\n   * The ServerConnectionImpl may drop header or reject request based on the\n   * `common_http_protocol_options.headers_with_underscores_action` configuration option in the\n   * HttpConnectionManager.\n   */\n  virtual bool shouldDropHeaderWithUnderscoresInNames(absl::string_view /* header_name */) const {\n    return false;\n  }\n\n  /**\n   * An inner dispatch call that executes the dispatching logic. While exception removal is in\n   * migration (#10878), this function may either throw an exception or return an error status.\n   * Exceptions are caught and translated to their corresponding statuses in the outer level\n   * dispatch.\n   * TODO(#10878): Remove this when exception removal is complete.\n   */\n  Http::Status innerDispatch(Buffer::Instance& data);\n\n  /**\n   * Dispatch a memory span.\n   * @param slice supplies the start address.\n   * @len supplies the length of the span.\n   */\n  Envoy::StatusOr<size_t> dispatchSlice(const char* slice, size_t len);\n\n  /**\n   * Called by the http_parser when body data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   */\n  void bufferBody(const char* data, size_t length);\n\n  /**\n   * Push the accumulated body through the filter pipeline.\n   */\n  void dispatchBufferedBody();\n\n  /**\n   * Called when a request/response is beginning. A base routine happens first then a virtual\n   * dispatch is invoked.\n   */\n  virtual Status onMessageBegin() PURE;\n\n  /**\n   * Called when URL data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   */\n  virtual Status onUrl(const char* data, size_t length) PURE;\n\n  /**\n   * Called when header field data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   * @return A status representing success.\n   */\n  Status onHeaderField(const char* data, size_t length);\n\n  /**\n   * Called when header value data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   * @return A status representing success.\n   */\n  Status onHeaderValue(const char* data, size_t length);\n\n  /**\n   * Called when headers are complete. A base routine happens first then a virtual dispatch is\n   * invoked. Note that this only applies to headers and NOT trailers. End of\n   * trailers are signaled via onMessageCompleteBase().\n   * @return An error status or an integer representing 0 if no error, 1 if there should be no body.\n   */\n  Envoy::StatusOr<int> onHeadersCompleteBase();\n  virtual Envoy::StatusOr<int> onHeadersComplete() PURE;\n\n  /**\n   * Called to see if upgrade transition is allowed.\n   */\n  virtual bool upgradeAllowed() const PURE;\n\n  /**\n   * Called with body data is available for processing when either:\n   * - There is an accumulated partial body after the parser is done processing bytes read from the\n   * socket\n   * - The parser encounters the last byte of the body\n   * - The codec does a direct dispatch from the read buffer\n   * For performance reasons there is at most one call to onBody per call to HTTP/1\n   * ConnectionImpl::dispatch call.\n   * @param data supplies the body data\n   */\n  virtual void onBody(Buffer::Instance& data) PURE;\n\n  /**\n   * Called when the request/response is complete.\n   * @return A status representing success.\n   */\n  Status onMessageCompleteBase();\n  virtual void onMessageComplete() PURE;\n\n  /**\n   * Called when accepting a chunk header.\n   */\n  void onChunkHeader(bool is_final_chunk);\n\n  /**\n   * @see onResetStreamBase().\n   */\n  virtual void onResetStream(StreamResetReason reason) PURE;\n\n  /**\n   * Send a protocol error response to remote.\n   */\n  virtual Status sendProtocolError(absl::string_view details) PURE;\n\n  /**\n   * Called when output_buffer_ or the underlying connection go from below a low watermark to over\n   * a high watermark.\n   */\n  virtual void onAboveHighWatermark() PURE;\n\n  /**\n   * Called when output_buffer_ or the underlying connection  go from above a high watermark to\n   * below a low watermark.\n   */\n  virtual void onBelowLowWatermark() PURE;\n\n  /**\n   * Check if header name contains underscore character.\n   * The ServerConnectionImpl may drop header or reject request based on configuration.\n   * @return A status representing whether the request is rejected.\n   */\n  virtual Status checkHeaderNameForUnderscores() { return okStatus(); }\n\n  static http_parser_settings settings_;\n\n  HeaderParsingState header_parsing_state_{HeaderParsingState::Field};\n  // Used to accumulate the HTTP message body during the current dispatch call. The accumulated body\n  // is pushed through the filter pipeline either at the end of the current dispatch call, or when\n  // the last byte of the body is processed (whichever happens first).\n  Buffer::OwnedImpl buffered_body_;\n  Buffer::WatermarkBuffer output_buffer_;\n  Protocol protocol_{Protocol::Http11};\n  const uint32_t max_headers_kb_;\n  const uint32_t max_headers_count_;\n};\n\n/**\n * Implementation of Http::ServerConnection for HTTP/1.1.\n */\nclass ServerConnectionImpl : public ServerConnection, public ConnectionImpl {\npublic:\n  ServerConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                       ServerConnectionCallbacks& callbacks, const Http1Settings& settings,\n                       uint32_t max_request_headers_kb, const uint32_t max_request_headers_count,\n                       envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n                           headers_with_underscores_action);\n  bool supportsHttp10() override { return codec_settings_.accept_http_10_; }\n\nprotected:\n  /**\n   * An active HTTP/1.1 request.\n   */\n  struct ActiveRequest {\n    ActiveRequest(ServerConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter)\n        : response_encoder_(connection, header_key_formatter,\n                            connection.codec_settings_.stream_error_on_invalid_http_message_) {}\n\n    HeaderString request_url_;\n    RequestDecoder* request_decoder_{};\n    ResponseEncoderImpl response_encoder_;\n    bool remote_complete_{};\n  };\n  absl::optional<ActiveRequest>& activeRequest() { return active_request_; }\n  // ConnectionImpl\n  void onMessageComplete() override;\n  // Add the size of the request_url to the reported header size when processing request headers.\n  uint32_t getHeadersSize() override;\n\nprivate:\n  /**\n   * Manipulate the request's first line, parsing the url and converting to a relative path if\n   * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6\n   *\n   * @param is_connect true if the request has the CONNECT method\n   * @param headers the request's headers\n   * @return Status representing success or failure. This will fail if there is an invalid url in\n   * the request line.\n   */\n  Status handlePath(RequestHeaderMap& headers, unsigned int method);\n\n  // ConnectionImpl\n  void onEncodeComplete() override;\n  Status onMessageBegin() override;\n  Status onUrl(const char* data, size_t length) override;\n  Envoy::StatusOr<int> onHeadersComplete() override;\n  // If upgrade behavior is not allowed, the HCM will have sanitized the headers out.\n  bool upgradeAllowed() const override { return true; }\n  void onBody(Buffer::Instance& data) override;\n  void onResetStream(StreamResetReason reason) override;\n  Status sendProtocolError(absl::string_view details) override;\n  void onAboveHighWatermark() override;\n  void onBelowLowWatermark() override;\n  HeaderMap& headersOrTrailers() override {\n    if (absl::holds_alternative<RequestHeaderMapPtr>(headers_or_trailers_)) {\n      return *absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n    } else {\n      return *absl::get<RequestTrailerMapPtr>(headers_or_trailers_);\n    }\n  }\n  RequestOrResponseHeaderMap& requestOrResponseHeaders() override {\n    return *absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n  }\n  void allocHeaders() override {\n    ASSERT(nullptr == absl::get<RequestHeaderMapPtr>(headers_or_trailers_));\n    ASSERT(!processing_trailers_);\n    headers_or_trailers_.emplace<RequestHeaderMapPtr>(RequestHeaderMapImpl::create());\n  }\n  void allocTrailers() override {\n    ASSERT(processing_trailers_);\n    if (!absl::holds_alternative<RequestTrailerMapPtr>(headers_or_trailers_)) {\n      headers_or_trailers_.emplace<RequestTrailerMapPtr>(RequestTrailerMapImpl::create());\n    }\n  }\n\n  void sendProtocolErrorOld(absl::string_view details);\n\n  void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment);\n  void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override;\n  Status doFloodProtectionChecks() const;\n  Status checkHeaderNameForUnderscores() override;\n\n  ServerConnectionCallbacks& callbacks_;\n  absl::optional<ActiveRequest> active_request_;\n  const Buffer::OwnedBufferFragmentImpl::Releasor response_buffer_releasor_;\n  uint32_t outbound_responses_{};\n  // This defaults to 2, which functionally disables pipelining. If any users\n  // of Envoy wish to enable pipelining (which is dangerous and ill supported)\n  // we could make this configurable.\n  uint32_t max_outbound_responses_{};\n  bool flood_protection_{};\n  // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated\n  // thought as some of the reset and no header code paths make this difficult. Headers are\n  // populated on message begin. Trailers are populated on the first parsed trailer field (if\n  // trailers are enabled). The variant is reset to null headers on message complete for assertion\n  // purposes.\n  absl::variant<RequestHeaderMapPtr, RequestTrailerMapPtr> headers_or_trailers_;\n  // The action to take when a request header name contains underscore characters.\n  const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_;\n};\n\n/**\n * Implementation of Http::ClientConnection for HTTP/1.1.\n */\nclass ClientConnectionImpl : public ClientConnection, public ConnectionImpl {\npublic:\n  ClientConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                       ConnectionCallbacks& callbacks, const Http1Settings& settings,\n                       const uint32_t max_response_headers_count);\n  // Http::ClientConnection\n  RequestEncoder& newStream(ResponseDecoder& response_decoder) override;\n\nprivate:\n  struct PendingResponse {\n    PendingResponse(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter,\n                    ResponseDecoder* decoder)\n        : encoder_(connection, header_key_formatter), decoder_(decoder) {}\n\n    RequestEncoderImpl encoder_;\n    ResponseDecoder* decoder_;\n  };\n\n  bool cannotHaveBody();\n\n  // ConnectionImpl\n  void onEncodeComplete() override {}\n  Status onMessageBegin() override { return okStatus(); }\n  Status onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  Envoy::StatusOr<int> onHeadersComplete() override;\n  bool upgradeAllowed() const override;\n  void onBody(Buffer::Instance& data) override;\n  void onMessageComplete() override;\n  void onResetStream(StreamResetReason reason) override;\n  Status sendProtocolError(absl::string_view details) override;\n  void onAboveHighWatermark() override;\n  void onBelowLowWatermark() override;\n  HeaderMap& headersOrTrailers() override {\n    if (absl::holds_alternative<ResponseHeaderMapPtr>(headers_or_trailers_)) {\n      return *absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n    } else {\n      return *absl::get<ResponseTrailerMapPtr>(headers_or_trailers_);\n    }\n  }\n  RequestOrResponseHeaderMap& requestOrResponseHeaders() override {\n    return *absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n  }\n  void allocHeaders() override {\n    ASSERT(nullptr == absl::get<ResponseHeaderMapPtr>(headers_or_trailers_));\n    ASSERT(!processing_trailers_);\n    headers_or_trailers_.emplace<ResponseHeaderMapPtr>(ResponseHeaderMapImpl::create());\n  }\n  void allocTrailers() override {\n    ASSERT(processing_trailers_);\n    if (!absl::holds_alternative<ResponseTrailerMapPtr>(headers_or_trailers_)) {\n      headers_or_trailers_.emplace<ResponseTrailerMapPtr>(ResponseTrailerMapImpl::create());\n    }\n  }\n\n  absl::optional<PendingResponse> pending_response_;\n  // TODO(mattklein123): The following bool tracks whether a pending response is complete before\n  // dispatching callbacks. This is needed so that pending_response_ stays valid during callbacks\n  // in order to access the stream, but to avoid invoking callbacks that shouldn't be called once\n  // the response is complete. The existence of this variable is hard to reason about and it should\n  // be combined with pending_response_ somehow in a follow up cleanup.\n  bool pending_response_done_{true};\n  // Set true between receiving non-101 1xx headers and receiving the spurious onMessageComplete.\n  bool ignore_message_complete_for_1xx_{};\n  // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated\n  // thought as some of the reset and no header code paths make this difficult. Headers are\n  // populated on message begin. Trailers are populated when the switch to trailer processing is\n  // detected while parsing the first trailer field (if trailers are enabled). The variant is reset\n  // to null headers on message complete for assertion purposes.\n  absl::variant<ResponseHeaderMapPtr, ResponseTrailerMapPtr> headers_or_trailers_;\n\n  // The default limit of 80 KiB is the vanilla http_parser behaviour.\n  static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80;\n};\n\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/codec_impl_legacy.cc",
    "content": "#include \"common/http/http1/codec_impl_legacy.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/http1/header_formatter.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/ascii.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Legacy {\nnamespace Http1 {\nnamespace {\n\nstruct Http1ResponseCodeDetailValues {\n  const absl::string_view TooManyHeaders = \"http1.too_many_headers\";\n  const absl::string_view HeadersTooLarge = \"http1.headers_too_large\";\n  const absl::string_view HttpCodecError = \"http1.codec_error\";\n  const absl::string_view InvalidCharacters = \"http1.invalid_characters\";\n  const absl::string_view ConnectionHeaderSanitization = \"http1.connection_header_rejected\";\n  const absl::string_view InvalidUrl = \"http1.invalid_url\";\n  const absl::string_view InvalidTransferEncoding = \"http1.invalid_transfer_encoding\";\n  const absl::string_view BodyDisallowed = \"http1.body_disallowed\";\n  const absl::string_view TransferEncodingNotAllowed = \"http1.transfer_encoding_not_allowed\";\n  const absl::string_view ContentLengthNotAllowed = \"http1.content_length_not_allowed\";\n  const absl::string_view InvalidUnderscore = \"http1.unexpected_underscore\";\n  const absl::string_view ChunkedContentLength = \"http1.content_length_and_chunked_not_allowed\";\n};\n\nstruct Http1HeaderTypesValues {\n  const absl::string_view Headers = \"headers\";\n  const absl::string_view Trailers = \"trailers\";\n};\n\nusing Http1ResponseCodeDetails = ConstSingleton<Http1ResponseCodeDetailValues>;\nusing Http1HeaderTypes = ConstSingleton<Http1HeaderTypesValues>;\nusing Http::Http1::CodecStats;\nusing Http::Http1::HeaderKeyFormatter;\nusing Http::Http1::HeaderKeyFormatterPtr;\nusing Http::Http1::ProperCaseHeaderKeyFormatter;\n\nconst StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() {\n  CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet,\n                         Http::Headers::get().ConnectionValues.Upgrade,\n                         Http::Headers::get().ConnectionValues.Http2Settings);\n}\n\nHeaderKeyFormatterPtr formatter(const Http::Http1Settings& settings) {\n  if (settings.header_key_format_ == Http1Settings::HeaderKeyFormat::ProperCase) {\n    return std::make_unique<ProperCaseHeaderKeyFormatter>();\n  }\n\n  return nullptr;\n}\n\n} // namespace\n\nconst std::string StreamEncoderImpl::CRLF = \"\\r\\n\";\n// Last chunk as defined here https://tools.ietf.org/html/rfc7230#section-4.1\nconst std::string StreamEncoderImpl::LAST_CHUNK = \"0\\r\\n\";\n\nStreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection,\n                                     HeaderKeyFormatter* header_key_formatter)\n    : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true),\n      is_response_to_head_request_(false), is_response_to_connect_request_(false),\n      header_key_formatter_(header_key_formatter) {\n  if (connection_.connection().aboveHighWatermark()) {\n    runHighWatermarkCallbacks();\n  }\n}\n\nvoid StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const char* value,\n                                     uint32_t value_size) {\n\n  ASSERT(key_size > 0);\n\n  connection_.copyToBuffer(key, key_size);\n  connection_.addCharToBuffer(':');\n  connection_.addCharToBuffer(' ');\n  connection_.copyToBuffer(value, value_size);\n  connection_.addToBuffer(CRLF);\n}\nvoid StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) {\n  this->encodeHeader(key.data(), key.size(), value.data(), value.size());\n}\n\nvoid StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::string_view value) {\n  if (header_key_formatter_ != nullptr) {\n    encodeHeader(header_key_formatter_->format(key), value);\n  } else {\n    encodeHeader(key, value);\n  }\n}\n\nvoid ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) {\n  ASSERT(headers.Status()->value() == \"100\");\n  encodeHeaders(headers, false);\n}\n\nvoid StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers,\n                                          absl::optional<uint64_t> status, bool end_stream) {\n  bool saw_content_length = false;\n  headers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate {\n    absl::string_view key_to_use = header.key().getStringView();\n    uint32_t key_size_to_use = header.key().size();\n    // Translate :authority -> host so that upper layers do not need to deal with this.\n    if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') {\n      key_to_use = absl::string_view(Headers::get().HostLegacy.get());\n      key_size_to_use = Headers::get().HostLegacy.get().size();\n    }\n\n    // Skip all headers starting with ':' that make it here.\n    if (key_to_use[0] == ':') {\n      return HeaderMap::Iterate::Continue;\n    }\n\n    encodeFormattedHeader(key_to_use, header.value().getStringView());\n\n    return HeaderMap::Iterate::Continue;\n  });\n\n  if (headers.ContentLength()) {\n    saw_content_length = true;\n  }\n\n  ASSERT(!headers.TransferEncoding());\n\n  // Assume we are chunk encoding unless we are passed a content length or this is a header only\n  // response. Upper layers generally should strip transfer-encoding since it only applies to\n  // HTTP/1.1. The codec will infer it based on the type of response.\n  // for streaming (e.g. SSE stream sent to hystrix dashboard), we do not want\n  // chunk transfer encoding but we don't have a content-length so disable_chunk_encoding_ is\n  // consulted before enabling chunk encoding.\n  //\n  // Note that for HEAD requests Envoy does best-effort guessing when there is no\n  // content-length. If a client makes a HEAD request for an upstream resource\n  // with no bytes but the upstream response doesn't include \"Content-length: 0\",\n  // Envoy will incorrectly assume a subsequent response to GET will be chunk encoded.\n  if (saw_content_length || disable_chunk_encoding_) {\n    chunk_encoding_ = false;\n  } else {\n    if (status && *status == 100) {\n      // Make sure we don't serialize chunk information with 100-Continue headers.\n      chunk_encoding_ = false;\n    } else if (end_stream && !is_response_to_head_request_) {\n      // If this is a headers-only stream, append an explicit \"Content-Length: 0\" unless it's a\n      // response to a HEAD request.\n      // For 204s and 1xx where content length is disallowed, don't append the content length but\n      // also don't chunk encode.\n      if (!status || (*status >= 200 && *status != 204)) {\n        encodeFormattedHeader(Headers::get().ContentLength.get(), \"0\");\n      }\n      chunk_encoding_ = false;\n    } else if (connection_.protocol() == Protocol::Http10) {\n      chunk_encoding_ = false;\n    } else if (status && (*status < 200 || *status == 204) &&\n               connection_.strict1xxAnd204Headers()) {\n      // TODO(zuercher): when the \"envoy.reloadable_features.strict_1xx_and_204_response_headers\"\n      // feature flag is removed, this block can be coalesced with the 100 Continue logic above.\n\n      // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked\n      // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1\n      chunk_encoding_ = false;\n    } else {\n      // For responses to connect requests, do not send the chunked encoding header:\n      // https://tools.ietf.org/html/rfc7231#section-4.3.6.\n      if (!is_response_to_connect_request_) {\n        encodeFormattedHeader(Headers::get().TransferEncoding.get(),\n                              Headers::get().TransferEncodingValues.Chunked);\n      }\n      // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades.\n      // If there is a body in a response on the upgrade path, the chunks will be\n      // passed through via maybeDirectDispatch so we need to avoid appending\n      // extra chunk boundaries.\n      //\n      // When sending a response to a HEAD request Envoy may send an informational\n      // \"Transfer-Encoding: chunked\" header, but should not send a chunk encoded body.\n      chunk_encoding_ = !Utility::isUpgrade(headers) && !is_response_to_head_request_ &&\n                        !is_response_to_connect_request_;\n    }\n  }\n\n  connection_.addToBuffer(CRLF);\n\n  if (end_stream) {\n    endEncode();\n  } else {\n    connection_.flushOutput();\n  }\n}\n\nvoid StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) {\n  // end_stream may be indicated with a zero length data buffer. If that is the case, so not\n  // actually write the zero length buffer out.\n  if (data.length() > 0) {\n    if (chunk_encoding_) {\n      connection_.buffer().add(absl::StrCat(absl::Hex(data.length()), CRLF));\n    }\n\n    connection_.buffer().move(data);\n\n    if (chunk_encoding_) {\n      connection_.buffer().add(CRLF);\n    }\n  }\n\n  if (end_stream) {\n    endEncode();\n  } else {\n    connection_.flushOutput();\n  }\n}\n\nvoid StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) {\n  if (!connection_.enableTrailers()) {\n    return endEncode();\n  }\n  // Trailers only matter if it is a chunk transfer encoding\n  // https://tools.ietf.org/html/rfc7230#section-4.4\n  if (chunk_encoding_) {\n    // Finalize the body\n    connection_.buffer().add(LAST_CHUNK);\n\n    trailers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate {\n      encodeFormattedHeader(header.key().getStringView(), header.value().getStringView());\n      return HeaderMap::Iterate::Continue;\n    });\n\n    connection_.flushOutput();\n    connection_.buffer().add(CRLF);\n  }\n\n  connection_.flushOutput();\n  connection_.onEncodeComplete();\n}\n\nvoid StreamEncoderImpl::encodeMetadata(const MetadataMapVector&) {\n  connection_.stats().metadata_not_supported_error_.inc();\n}\n\nvoid StreamEncoderImpl::endEncode() {\n  if (chunk_encoding_) {\n    connection_.buffer().add(LAST_CHUNK);\n    connection_.buffer().add(CRLF);\n  }\n\n  connection_.flushOutput(true);\n  connection_.onEncodeComplete();\n}\n\nvoid ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) {\n  if (!flood_protection_) {\n    return;\n  }\n  // It's messy and complicated to try to tag the final write of an HTTP response for response\n  // tracking for flood protection. Instead, write an empty buffer fragment after the response,\n  // to allow for tracking.\n  // When the response is written out, the fragment will be deleted and the counter will be updated\n  // by ServerConnectionImpl::releaseOutboundResponse()\n  auto fragment =\n      Buffer::OwnedBufferFragmentImpl::create(absl::string_view(\"\", 0), response_buffer_releasor_);\n  output_buffer.addBufferFragment(*fragment.release());\n  ASSERT(outbound_responses_ < max_outbound_responses_);\n  outbound_responses_++;\n}\n\nvoid ServerConnectionImpl::doFloodProtectionChecks() const {\n  if (!flood_protection_) {\n    return;\n  }\n  // Before processing another request, make sure that we are below the response flood protection\n  // threshold.\n  if (outbound_responses_ >= max_outbound_responses_) {\n    ENVOY_CONN_LOG(trace, \"error accepting request: too many pending responses queued\",\n                   connection_);\n    stats_.response_flood_.inc();\n    throw FrameFloodException(\"Too many responses queued.\");\n  }\n}\n\nvoid ConnectionImpl::flushOutput(bool end_encode) {\n  if (end_encode) {\n    // If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood\n    // protection\n    maybeAddSentinelBufferFragment(output_buffer_);\n  }\n  connection().write(output_buffer_, false);\n  ASSERT(0UL == output_buffer_.length());\n}\n\nvoid ConnectionImpl::addToBuffer(absl::string_view data) { output_buffer_.add(data); }\n\nvoid ConnectionImpl::addCharToBuffer(char c) { output_buffer_.add(&c, 1); }\n\nvoid ConnectionImpl::addIntToBuffer(uint64_t i) { output_buffer_.add(absl::StrCat(i)); }\n\nvoid ConnectionImpl::copyToBuffer(const char* data, uint64_t length) {\n  output_buffer_.add(data, length);\n}\n\nvoid StreamEncoderImpl::resetStream(StreamResetReason reason) {\n  connection_.onResetStreamBase(reason);\n}\n\nvoid StreamEncoderImpl::readDisable(bool disable) {\n  if (disable) {\n    ++read_disable_calls_;\n  } else {\n    ASSERT(read_disable_calls_ != 0);\n    if (read_disable_calls_ != 0) {\n      --read_disable_calls_;\n    }\n  }\n  connection_.readDisable(disable);\n}\n\nuint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); }\n\nconst Network::Address::InstanceConstSharedPtr& StreamEncoderImpl::connectionLocalAddress() {\n  return connection_.connection().localAddress();\n}\n\nstatic const char RESPONSE_PREFIX[] = \"HTTP/1.1 \";\nstatic const char HTTP_10_RESPONSE_PREFIX[] = \"HTTP/1.0 \";\n\nvoid ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) {\n  started_response_ = true;\n\n  // The contract is that client codecs must ensure that :status is present.\n  ASSERT(headers.Status() != nullptr);\n  uint64_t numeric_status = Utility::getResponseStatus(headers);\n\n  if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) {\n    connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1);\n  } else {\n    connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1);\n  }\n  connection_.addIntToBuffer(numeric_status);\n  connection_.addCharToBuffer(' ');\n\n  const char* status_string = CodeUtility::toString(static_cast<Code>(numeric_status));\n  uint32_t status_string_len = strlen(status_string);\n  connection_.copyToBuffer(status_string, status_string_len);\n\n  connection_.addCharToBuffer('\\r');\n  connection_.addCharToBuffer('\\n');\n\n  if (numeric_status >= 300) {\n    // Don't do special CONNECT logic if the CONNECT was rejected.\n    is_response_to_connect_request_ = false;\n  }\n\n  encodeHeadersBase(headers, absl::make_optional<uint64_t>(numeric_status), end_stream);\n}\n\nstatic const char REQUEST_POSTFIX[] = \" HTTP/1.1\\r\\n\";\n\nvoid RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end_stream) {\n  const HeaderEntry* method = headers.Method();\n  const HeaderEntry* path = headers.Path();\n  const HeaderEntry* host = headers.Host();\n  bool is_connect = HeaderUtility::isConnect(headers);\n\n  if (!method || (!path && !is_connect)) {\n    // TODO(#10878): This exception does not occur during dispatch and would not be triggered under\n    // normal circumstances since inputs would fail parsing at ingress. Replace with proper error\n    // handling when exceptions are removed. Include missing host header for CONNECT.\n    throw CodecClientException(\":method and :path must be specified\");\n  }\n  if (method->value() == Headers::get().MethodValues.Head) {\n    head_request_ = true;\n  } else if (method->value() == Headers::get().MethodValues.Connect) {\n    disableChunkEncoding();\n    connect_request_ = true;\n  }\n  if (Utility::isUpgrade(headers)) {\n    upgrade_request_ = true;\n  }\n\n  connection_.copyToBuffer(method->value().getStringView().data(), method->value().size());\n  connection_.addCharToBuffer(' ');\n  if (is_connect) {\n    connection_.copyToBuffer(host->value().getStringView().data(), host->value().size());\n  } else {\n    connection_.copyToBuffer(path->value().getStringView().data(), path->value().size());\n  }\n  connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1);\n\n  encodeHeadersBase(headers, absl::nullopt, end_stream);\n}\n\nhttp_parser_settings ConnectionImpl::settings_{\n    [](http_parser* parser) -> int {\n      static_cast<ConnectionImpl*>(parser->data)->onMessageBeginBase();\n      return 0;\n    },\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      static_cast<ConnectionImpl*>(parser->data)->onUrl(at, length);\n      return 0;\n    },\n    nullptr, // on_status\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      static_cast<ConnectionImpl*>(parser->data)->onHeaderField(at, length);\n      return 0;\n    },\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      static_cast<ConnectionImpl*>(parser->data)->onHeaderValue(at, length);\n      return 0;\n    },\n    [](http_parser* parser) -> int {\n      return static_cast<ConnectionImpl*>(parser->data)->onHeadersCompleteBase();\n    },\n    [](http_parser* parser, const char* at, size_t length) -> int {\n      static_cast<ConnectionImpl*>(parser->data)->bufferBody(at, length);\n      return 0;\n    },\n    [](http_parser* parser) -> int {\n      static_cast<ConnectionImpl*>(parser->data)->onMessageCompleteBase();\n      return 0;\n    },\n    [](http_parser* parser) -> int {\n      // A 0-byte chunk header is used to signal the end of the chunked body.\n      // When this function is called, http-parser holds the size of the chunk in\n      // parser->content_length. See\n      // https://github.com/nodejs/http-parser/blob/v2.9.3/http_parser.h#L336\n      const bool is_final_chunk = (parser->content_length == 0);\n      static_cast<ConnectionImpl*>(parser->data)->onChunkHeader(is_final_chunk);\n      return 0;\n    },\n    nullptr // on_chunk_complete\n};\n\nConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                               const Http1Settings& settings, http_parser_type type,\n                               uint32_t max_headers_kb, const uint32_t max_headers_count,\n                               HeaderKeyFormatterPtr&& header_key_formatter)\n    : connection_(connection), stats_(stats), codec_settings_(settings),\n      header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false),\n      handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false),\n      strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.strict_1xx_and_204_response_headers\")),\n      output_buffer_([&]() -> void { this->onBelowLowWatermark(); },\n                     [&]() -> void { this->onAboveHighWatermark(); },\n                     []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }),\n      max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) {\n  output_buffer_.setWatermarks(connection.bufferLimit());\n  http_parser_init(&parser_, type);\n  parser_.allow_chunked_length = 1;\n  parser_.data = this;\n}\n\nvoid ConnectionImpl::completeLastHeader() {\n  ENVOY_CONN_LOG(trace, \"completed header: key={} value={}\", connection_,\n                 current_header_field_.getStringView(), current_header_value_.getStringView());\n\n  checkHeaderNameForUnderscores();\n  auto& headers_or_trailers = headersOrTrailers();\n  if (!current_header_field_.empty()) {\n    current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); });\n    // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed\n    // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace\n    // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4\n    current_header_value_.rtrim();\n    headers_or_trailers.addViaMove(std::move(current_header_field_),\n                                   std::move(current_header_value_));\n  }\n\n  // Check if the number of headers exceeds the limit.\n  if (headers_or_trailers.size() > max_headers_count_) {\n    error_code_ = Http::Code::RequestHeaderFieldsTooLarge;\n    sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders);\n    const absl::string_view header_type =\n        processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers;\n    throw CodecProtocolException(absl::StrCat(header_type, \" size exceeds limit\"));\n  }\n\n  header_parsing_state_ = HeaderParsingState::Field;\n  ASSERT(current_header_field_.empty());\n  ASSERT(current_header_value_.empty());\n}\n\nuint32_t ConnectionImpl::getHeadersSize() {\n  return current_header_field_.size() + current_header_value_.size() +\n         headersOrTrailers().byteSize();\n}\n\nvoid ConnectionImpl::checkMaxHeadersSize() {\n  const uint32_t total = getHeadersSize();\n  if (total > (max_headers_kb_ * 1024)) {\n    const absl::string_view header_type =\n        processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers;\n    error_code_ = Http::Code::RequestHeaderFieldsTooLarge;\n    sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge);\n    throw CodecProtocolException(absl::StrCat(header_type, \" size exceeds limit\"));\n  }\n}\n\nbool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) {\n  if (!handling_upgrade_) {\n    // Only direct dispatch for Upgrade requests.\n    return false;\n  }\n\n  ENVOY_CONN_LOG(trace, \"direct-dispatched {} bytes\", connection_, data.length());\n  onBody(data);\n  data.drain(data.length());\n  return true;\n}\n\nHttp::Status ConnectionImpl::dispatch(Buffer::Instance& data) {\n  // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either\n  // throw an exception or return an error status. The utility wrapper catches exceptions and\n  // converts them to error statuses.\n  return Utility::exceptionToStatus(\n      [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data);\n}\n\nHttp::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) {\n  ENVOY_CONN_LOG(trace, \"parsing {} bytes\", connection_, data.length());\n  ASSERT(buffered_body_.length() == 0);\n\n  if (maybeDirectDispatch(data)) {\n    return Http::okStatus();\n  }\n\n  // Always unpause before dispatch.\n  http_parser_pause(&parser_, 0);\n\n  ssize_t total_parsed = 0;\n  if (data.length() > 0) {\n    for (const Buffer::RawSlice& slice : data.getRawSlices()) {\n      total_parsed += dispatchSlice(static_cast<const char*>(slice.mem_), slice.len_);\n      if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) {\n        // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at\n        // this point.\n        ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED);\n        break;\n      }\n    }\n    dispatchBufferedBody();\n  } else {\n    dispatchSlice(nullptr, 0);\n  }\n  ASSERT(buffered_body_.length() == 0);\n\n  ENVOY_CONN_LOG(trace, \"parsed {} bytes\", connection_, total_parsed);\n  data.drain(total_parsed);\n\n  // If an upgrade has been handled and there is body data or early upgrade\n  // payload to send on, send it on.\n  maybeDirectDispatch(data);\n  return Http::okStatus();\n}\n\nsize_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) {\n  ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len);\n  if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) {\n    sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError);\n    throw CodecProtocolException(\"http/1.1 protocol error: \" +\n                                 std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_))));\n  }\n\n  return rc;\n}\n\nvoid ConnectionImpl::onHeaderField(const char* data, size_t length) {\n  // We previously already finished up the headers, these headers are\n  // now trailers.\n  if (header_parsing_state_ == HeaderParsingState::Done) {\n    if (!enableTrailers()) {\n      // Ignore trailers.\n      return;\n    }\n    processing_trailers_ = true;\n    header_parsing_state_ = HeaderParsingState::Field;\n    allocTrailers();\n  }\n  if (header_parsing_state_ == HeaderParsingState::Value) {\n    completeLastHeader();\n  }\n\n  current_header_field_.append(data, length);\n\n  checkMaxHeadersSize();\n}\n\nvoid ConnectionImpl::onHeaderValue(const char* data, size_t length) {\n  if (header_parsing_state_ == HeaderParsingState::Done && !enableTrailers()) {\n    // Ignore trailers.\n    return;\n  }\n\n  absl::string_view header_value{data, length};\n  if (!Http::HeaderUtility::headerValueIsValid(header_value)) {\n    ENVOY_CONN_LOG(debug, \"invalid header value: {}\", connection_, header_value);\n    error_code_ = Http::Code::BadRequest;\n    sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters);\n    throw CodecProtocolException(\"http/1.1 protocol error: header value contains invalid chars\");\n  }\n\n  header_parsing_state_ = HeaderParsingState::Value;\n  if (current_header_value_.empty()) {\n    // Strip leading whitespace if the current header value input contains the first bytes of the\n    // encoded header value. Trailing whitespace is stripped once the full header value is known in\n    // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace\n    // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 .\n    header_value = StringUtil::ltrim(header_value);\n  }\n  current_header_value_.append(header_value.data(), header_value.length());\n\n  checkMaxHeadersSize();\n}\n\nint ConnectionImpl::onHeadersCompleteBase() {\n  ASSERT(!processing_trailers_);\n  ENVOY_CONN_LOG(trace, \"onHeadersCompleteBase\", connection_);\n  completeLastHeader();\n\n  if (!(parser_.http_major == 1 && parser_.http_minor == 1)) {\n    // This is not necessarily true, but it's good enough since higher layers only care if this is\n    // HTTP/1.1 or not.\n    protocol_ = Protocol::Http10;\n  }\n  RequestOrResponseHeaderMap& request_or_response_headers = requestOrResponseHeaders();\n  if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) {\n    // Ignore h2c upgrade requests until we support them.\n    // See https://github.com/envoyproxy/envoy/issues/7161 for details.\n    if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(),\n                               Http::Headers::get().UpgradeValues.H2c)) {\n      ENVOY_CONN_LOG(trace, \"removing unsupported h2c upgrade headers.\", connection_);\n      request_or_response_headers.removeUpgrade();\n      if (request_or_response_headers.Connection()) {\n        const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings();\n        std::string new_value = StringUtil::removeTokens(\n            request_or_response_headers.getConnectionValue(), \",\", tokens_to_remove, \",\");\n        if (new_value.empty()) {\n          request_or_response_headers.removeConnection();\n        } else {\n          request_or_response_headers.setConnection(new_value);\n        }\n      }\n      request_or_response_headers.remove(Headers::get().Http2Settings);\n    } else {\n      ENVOY_CONN_LOG(trace, \"codec entering upgrade mode.\", connection_);\n      handling_upgrade_ = true;\n    }\n  }\n  if (parser_.method == HTTP_CONNECT) {\n    if (request_or_response_headers.ContentLength()) {\n      if (request_or_response_headers.getContentLengthValue() == \"0\") {\n        request_or_response_headers.removeContentLength();\n      } else {\n        // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a\n        // CONNECT request has no defined semantics, and may be rejected.\n        error_code_ = Http::Code::BadRequest;\n        sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed);\n        throw CodecProtocolException(\"http/1.1 protocol error: unsupported content length\");\n      }\n    }\n    ENVOY_CONN_LOG(trace, \"codec entering upgrade mode for CONNECT request.\", connection_);\n    handling_upgrade_ = true;\n  }\n\n  // https://tools.ietf.org/html/rfc7230#section-3.3.3\n  // If a message is received with both a Transfer-Encoding and a\n  // Content-Length header field, the Transfer-Encoding overrides the\n  // Content-Length. Such a message might indicate an attempt to\n  // perform request smuggling (Section 9.5) or response splitting\n  // (Section 9.4) and ought to be handled as an error. A sender MUST\n  // remove the received Content-Length field prior to forwarding such\n  // a message.\n\n  // Reject message with Http::Code::BadRequest if both Transfer-Encoding and Content-Length\n  // headers are present or if allowed by http1 codec settings and 'Transfer-Encoding'\n  // is chunked - remove Content-Length and serve request.\n  if (parser_.uses_transfer_encoding != 0 && request_or_response_headers.ContentLength()) {\n    if ((parser_.flags & F_CHUNKED) && codec_settings_.allow_chunked_length_) {\n      request_or_response_headers.removeContentLength();\n    } else {\n      error_code_ = Http::Code::BadRequest;\n      sendProtocolError(Http1ResponseCodeDetails::get().ChunkedContentLength);\n      throw CodecProtocolException(\n          \"http/1.1 protocol error: both 'Content-Length' and 'Transfer-Encoding' are set.\");\n    }\n  }\n\n  // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject\n  // transfer-codings it does not understand.\n  // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a\n  // CONNECT request has no defined semantics, and may be rejected.\n  if (request_or_response_headers.TransferEncoding()) {\n    const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue();\n    if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) ||\n        parser_.method == HTTP_CONNECT) {\n      error_code_ = Http::Code::NotImplemented;\n      sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding);\n      throw CodecProtocolException(\"http/1.1 protocol error: unsupported transfer encoding\");\n    }\n  }\n\n  int rc = onHeadersComplete();\n  header_parsing_state_ = HeaderParsingState::Done;\n\n  // Returning 2 informs http_parser to not expect a body or further data on this connection.\n  return handling_upgrade_ ? 2 : rc;\n}\n\nvoid ConnectionImpl::bufferBody(const char* data, size_t length) {\n  buffered_body_.add(data, length);\n}\n\nvoid ConnectionImpl::dispatchBufferedBody() {\n  ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED);\n  if (buffered_body_.length() > 0) {\n    onBody(buffered_body_);\n    buffered_body_.drain(buffered_body_.length());\n  }\n}\n\nvoid ConnectionImpl::onChunkHeader(bool is_final_chunk) {\n  if (is_final_chunk) {\n    // Dispatch body before parsing trailers, so body ends up dispatched even if an error is found\n    // while processing trailers.\n    dispatchBufferedBody();\n  }\n}\n\nvoid ConnectionImpl::onMessageCompleteBase() {\n  ENVOY_CONN_LOG(trace, \"message complete\", connection_);\n\n  dispatchBufferedBody();\n\n  if (handling_upgrade_) {\n    // If this is an upgrade request, swallow the onMessageComplete. The\n    // upgrade payload will be treated as stream body.\n    ASSERT(!deferred_end_stream_headers_);\n    ENVOY_CONN_LOG(trace, \"Pausing parser due to upgrade.\", connection_);\n    http_parser_pause(&parser_, 1);\n    return;\n  }\n\n  // If true, this indicates we were processing trailers and must\n  // move the last header into current_header_map_\n  if (header_parsing_state_ == HeaderParsingState::Value) {\n    completeLastHeader();\n  }\n\n  onMessageComplete();\n}\n\nvoid ConnectionImpl::onMessageBeginBase() {\n  ENVOY_CONN_LOG(trace, \"message begin\", connection_);\n  // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets\n  // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable\n  // in onHeadersCompleteBase\n  protocol_ = Protocol::Http11;\n  processing_trailers_ = false;\n  header_parsing_state_ = HeaderParsingState::Field;\n  allocHeaders();\n  onMessageBegin();\n}\n\nvoid ConnectionImpl::onResetStreamBase(StreamResetReason reason) {\n  ASSERT(!reset_stream_called_);\n  reset_stream_called_ = true;\n  onResetStream(reason);\n}\n\nServerConnectionImpl::ServerConnectionImpl(\n    Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks,\n    const Http1Settings& settings, uint32_t max_request_headers_kb,\n    const uint32_t max_request_headers_count,\n    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n        headers_with_underscores_action)\n    : ConnectionImpl(connection, stats, settings, HTTP_REQUEST, max_request_headers_kb,\n                     max_request_headers_count, formatter(settings)),\n      callbacks_(callbacks),\n      response_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) {\n        releaseOutboundResponse(fragment);\n      }),\n      // Pipelining is generally not well supported on the internet and has a series of dangerous\n      // overflow bugs. As such we are disabling it for now, and removing this temporary override if\n      // no one objects. If you use this integer to restore prior behavior, contact the\n      // maintainer team as it will otherwise be removed entirely soon.\n      max_outbound_responses_(\n          Runtime::getInteger(\"envoy.do_not_use_going_away_max_http2_outbound_responses\", 2)),\n      flood_protection_(\n          Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.http1_flood_protection\")),\n      headers_with_underscores_action_(headers_with_underscores_action) {}\n\nuint32_t ServerConnectionImpl::getHeadersSize() {\n  // Add in the size of the request URL if processing request headers.\n  const uint32_t url_size = (!processing_trailers_ && active_request_.has_value())\n                                ? active_request_.value().request_url_.size()\n                                : 0;\n  return url_size + ConnectionImpl::getHeadersSize();\n}\n\nvoid ServerConnectionImpl::onEncodeComplete() {\n  if (active_request_.value().remote_complete_) {\n    // Only do this if remote is complete. If we are replying before the request is complete the\n    // only logical thing to do is for higher level code to reset() / close the connection so we\n    // leave the request around so that it can fire reset callbacks.\n    active_request_.reset();\n  }\n}\n\nvoid ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) {\n  HeaderString path(Headers::get().Path);\n\n  bool is_connect = (method == HTTP_CONNECT);\n\n  // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here.\n  auto& active_request = active_request_.value();\n  if (!is_connect && !active_request.request_url_.getStringView().empty() &&\n      (active_request.request_url_.getStringView()[0] == '/' ||\n       ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) {\n    headers.addViaMove(std::move(path), std::move(active_request.request_url_));\n    return;\n  }\n\n  // If absolute_urls and/or connect are not going be handled, copy the url and return.\n  // This forces the behavior to be backwards compatible with the old codec behavior.\n  // CONNECT \"urls\" are actually host:port so look like absolute URLs to the above checks.\n  // Absolute URLS in CONNECT requests will be rejected below by the URL class validation.\n  if (!codec_settings_.allow_absolute_url_ && !is_connect) {\n    headers.addViaMove(std::move(path), std::move(active_request.request_url_));\n    return;\n  }\n\n  Utility::Url absolute_url;\n  if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) {\n    sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl);\n    throw CodecProtocolException(\"http/1.1 protocol error: invalid url in request line\");\n  }\n  // RFC7230#5.7\n  // When a proxy receives a request with an absolute-form of\n  // request-target, the proxy MUST ignore the received Host header field\n  // (if any) and instead replace it with the host information of the\n  // request-target. A proxy that forwards such a request MUST generate a\n  // new Host field-value based on the received request-target rather than\n  // forward the received Host field-value.\n  headers.setHost(absolute_url.hostAndPort());\n\n  if (!absolute_url.pathAndQueryParams().empty()) {\n    headers.setPath(absolute_url.pathAndQueryParams());\n  }\n  active_request.request_url_.clear();\n}\n\nint ServerConnectionImpl::onHeadersComplete() {\n  // Handle the case where response happens prior to request complete. It's up to upper layer code\n  // to disconnect the connection but we shouldn't fire any more events since it doesn't make\n  // sense.\n  if (active_request_.has_value()) {\n    auto& active_request = active_request_.value();\n    auto& headers = absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n    ENVOY_CONN_LOG(trace, \"Server: onHeadersComplete size={}\", connection_, headers->size());\n    const char* method_string = http_method_str(static_cast<http_method>(parser_.method));\n\n    if (!handling_upgrade_ && headers->Connection()) {\n      // If we fail to sanitize the request, return a 400 to the client\n      if (!Utility::sanitizeConnectionHeader(*headers)) {\n        absl::string_view header_value = headers->getConnectionValue();\n        ENVOY_CONN_LOG(debug, \"Invalid nominated headers in Connection: {}\", connection_,\n                       header_value);\n        error_code_ = Http::Code::BadRequest;\n        sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization);\n        throw CodecProtocolException(\"Invalid nominated headers in Connection.\");\n      }\n    }\n\n    // Inform the response encoder about any HEAD method, so it can set content\n    // length and transfer encoding headers correctly.\n    active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD);\n    active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT);\n\n    handlePath(*headers, parser_.method);\n    ASSERT(active_request.request_url_.empty());\n\n    headers->setMethod(method_string);\n\n    // Make sure the host is valid.\n    auto details = HeaderUtility::requestHeadersValid(*headers);\n    if (details.has_value()) {\n      sendProtocolError(details.value().get());\n      throw CodecProtocolException(\n          \"http/1.1 protocol error: request headers failed spec compliance checks\");\n    }\n\n    // Determine here whether we have a body or not. This uses the new RFC semantics where the\n    // presence of content-length or chunked transfer-encoding indicates a body vs. a particular\n    // method. If there is no body, we defer raising decodeHeaders() until the parser is flushed\n    // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy\n    // scenario where the higher layers stream through and implicitly switch to chunked transfer\n    // encoding because end stream with zero body length has not yet been indicated.\n    if (parser_.flags & F_CHUNKED ||\n        (parser_.content_length > 0 && parser_.content_length != ULLONG_MAX) || handling_upgrade_) {\n      active_request.request_decoder_->decodeHeaders(std::move(headers), false);\n\n      // If the connection has been closed (or is closing) after decoding headers, pause the parser\n      // so we return control to the caller.\n      if (connection_.state() != Network::Connection::State::Open) {\n        http_parser_pause(&parser_, 1);\n      }\n    } else {\n      deferred_end_stream_headers_ = true;\n    }\n  }\n\n  return 0;\n}\n\nvoid ServerConnectionImpl::onMessageBegin() {\n  if (!resetStreamCalled()) {\n    ASSERT(!active_request_.has_value());\n    active_request_.emplace(*this, header_key_formatter_.get());\n    auto& active_request = active_request_.value();\n    active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_);\n\n    // Check for pipelined request flood as we prepare to accept a new request.\n    // Parse errors that happen prior to onMessageBegin result in stream termination, it is not\n    // possible to overflow output buffers with early parse errors.\n    doFloodProtectionChecks();\n  }\n}\n\nvoid ServerConnectionImpl::onUrl(const char* data, size_t length) {\n  if (active_request_.has_value()) {\n    active_request_.value().request_url_.append(data, length);\n\n    checkMaxHeadersSize();\n  }\n}\n\nvoid ServerConnectionImpl::onBody(Buffer::Instance& data) {\n  ASSERT(!deferred_end_stream_headers_);\n  if (active_request_.has_value()) {\n    ENVOY_CONN_LOG(trace, \"body size={}\", connection_, data.length());\n    active_request_.value().request_decoder_->decodeData(data, false);\n  }\n}\n\nvoid ServerConnectionImpl::onMessageComplete() {\n  ASSERT(!handling_upgrade_);\n  if (active_request_.has_value()) {\n    auto& active_request = active_request_.value();\n\n    if (active_request.request_decoder_) {\n      active_request.response_encoder_.readDisable(true);\n    }\n    active_request.remote_complete_ = true;\n    if (deferred_end_stream_headers_) {\n      active_request.request_decoder_->decodeHeaders(\n          std::move(absl::get<RequestHeaderMapPtr>(headers_or_trailers_)), true);\n      deferred_end_stream_headers_ = false;\n    } else if (processing_trailers_) {\n      active_request.request_decoder_->decodeTrailers(\n          std::move(absl::get<RequestTrailerMapPtr>(headers_or_trailers_)));\n    } else {\n      Buffer::OwnedImpl buffer;\n      active_request.request_decoder_->decodeData(buffer, true);\n    }\n\n    // Reset to ensure no information from one requests persists to the next.\n    headers_or_trailers_.emplace<RequestHeaderMapPtr>(nullptr);\n  }\n\n  // Always pause the parser so that the calling code can process 1 request at a time and apply\n  // back pressure. However this means that the calling code needs to detect if there is more data\n  // in the buffer and dispatch it again.\n  http_parser_pause(&parser_, 1);\n}\n\nvoid ServerConnectionImpl::onResetStream(StreamResetReason reason) {\n  active_request_.value().response_encoder_.runResetCallbacks(reason);\n  active_request_.reset();\n}\n\nvoid ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) {\n  if (active_request_.has_value()) {\n    active_request_.value().response_encoder_.setDetails(details);\n  }\n  // We do this here because we may get a protocol error before we have a logical stream. Higher\n  // layers can only operate on streams, so there is no coherent way to allow them to send an error\n  // \"out of band.\" On one hand this is kind of a hack but on the other hand it normalizes HTTP/1.1\n  // to look more like HTTP/2 to higher layers.\n  if (!active_request_.has_value() ||\n      !active_request_.value().response_encoder_.startedResponse()) {\n    Buffer::OwnedImpl bad_request_response(\n        absl::StrCat(\"HTTP/1.1 \", error_code_, \" \", CodeUtility::toString(error_code_),\n                     \"\\r\\ncontent-length: 0\\r\\nconnection: close\\r\\n\\r\\n\"));\n\n    connection_.write(bad_request_response, false);\n  }\n}\n\nvoid ServerConnectionImpl::sendProtocolError(absl::string_view details) {\n  if (!Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.early_errors_via_hcm\")) {\n    sendProtocolErrorOld(details);\n    return;\n  }\n  // We do this here because we may get a protocol error before we have a logical stream.\n  if (!active_request_.has_value()) {\n    onMessageBeginBase();\n  }\n  ASSERT(active_request_.has_value());\n\n  active_request_.value().response_encoder_.setDetails(details);\n  if (!active_request_.value().response_encoder_.startedResponse()) {\n    // Note that the correctness of is_grpc_request and is_head_request is best-effort.\n    // If headers have not been fully parsed they may not be inferred correctly.\n    bool is_grpc_request = false;\n    if (absl::holds_alternative<RequestHeaderMapPtr>(headers_or_trailers_) &&\n        absl::get<RequestHeaderMapPtr>(headers_or_trailers_) != nullptr) {\n      is_grpc_request =\n          Grpc::Common::isGrpcRequestHeaders(*absl::get<RequestHeaderMapPtr>(headers_or_trailers_));\n    }\n    active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_,\n                                                      CodeUtility::toString(error_code_), nullptr,\n                                                      absl::nullopt, details);\n    return;\n  }\n}\n\nvoid ServerConnectionImpl::onAboveHighWatermark() {\n  if (active_request_.has_value()) {\n    active_request_.value().response_encoder_.runHighWatermarkCallbacks();\n  }\n}\nvoid ServerConnectionImpl::onBelowLowWatermark() {\n  if (active_request_.has_value()) {\n    active_request_.value().response_encoder_.runLowWatermarkCallbacks();\n  }\n}\n\nvoid ServerConnectionImpl::releaseOutboundResponse(\n    const Buffer::OwnedBufferFragmentImpl* fragment) {\n  ASSERT(outbound_responses_ >= 1);\n  --outbound_responses_;\n  delete fragment;\n}\n\nvoid ServerConnectionImpl::checkHeaderNameForUnderscores() {\n  if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW &&\n      Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) {\n    if (headers_with_underscores_action_ ==\n        envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) {\n      ENVOY_CONN_LOG(debug, \"Dropping header with invalid characters in its name: {}\", connection_,\n                     current_header_field_.getStringView());\n      stats_.dropped_headers_with_underscores_.inc();\n      current_header_field_.clear();\n      current_header_value_.clear();\n    } else {\n      ENVOY_CONN_LOG(debug, \"Rejecting request due to header name with underscores: {}\",\n                     connection_, current_header_field_.getStringView());\n      error_code_ = Http::Code::BadRequest;\n      sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore);\n      stats_.requests_rejected_with_underscores_in_headers_.inc();\n      throw CodecProtocolException(\"http/1.1 protocol error: header name contains underscores\");\n    }\n  }\n}\n\nClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                                           ConnectionCallbacks&, const Http1Settings& settings,\n                                           const uint32_t max_response_headers_count)\n    : ConnectionImpl(connection, stats, settings, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB,\n                     max_response_headers_count, formatter(settings)) {}\n\nbool ClientConnectionImpl::cannotHaveBody() {\n  if (pending_response_.has_value() && pending_response_.value().encoder_.headRequest()) {\n    ASSERT(!pending_response_done_);\n    return true;\n  } else if (parser_.status_code == 204 || parser_.status_code == 304 ||\n             (parser_.status_code >= 200 && parser_.content_length == 0 &&\n              !(parser_.flags & F_CHUNKED))) {\n    return true;\n  } else {\n    return false;\n  }\n}\n\nRequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) {\n  if (resetStreamCalled()) {\n    throw CodecClientException(\"cannot create new streams after calling reset\");\n  }\n\n  // If reads were disabled due to flow control, we expect reads to always be enabled again before\n  // reusing this connection. This is done when the response is received.\n  ASSERT(connection_.readEnabled());\n\n  ASSERT(!pending_response_.has_value());\n  ASSERT(pending_response_done_);\n  pending_response_.emplace(*this, header_key_formatter_.get(), &response_decoder);\n  pending_response_done_ = false;\n  return pending_response_.value().encoder_;\n}\n\nint ClientConnectionImpl::onHeadersComplete() {\n  ENVOY_CONN_LOG(trace, \"status_code {}\", connection_, parser_.status_code);\n\n  // Handle the case where the client is closing a kept alive connection (by sending a 408\n  // with a 'Connection: close' header). In this case we just let response flush out followed\n  // by the remote close.\n  if (!pending_response_.has_value() && !resetStreamCalled()) {\n    throw PrematureResponseException(static_cast<Http::Code>(parser_.status_code));\n  } else if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    auto& headers = absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n    ENVOY_CONN_LOG(trace, \"Client: onHeadersComplete size={}\", connection_, headers->size());\n    headers->setStatus(parser_.status_code);\n\n    if (parser_.status_code >= 200 && parser_.status_code < 300 &&\n        pending_response_.value().encoder_.connectRequest()) {\n      ENVOY_CONN_LOG(trace, \"codec entering upgrade mode for CONNECT response.\", connection_);\n      handling_upgrade_ = true;\n\n      // For responses to connect requests, do not accept the chunked\n      // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6\n      if (headers->TransferEncoding() &&\n          absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(),\n                                 Headers::get().TransferEncodingValues.Chunked)) {\n        sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding);\n        throw CodecProtocolException(\"http/1.1 protocol error: unsupported transfer encoding\");\n      }\n    }\n\n    if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) {\n      if (headers->TransferEncoding()) {\n        sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed);\n        throw CodecProtocolException(\n            \"http/1.1 protocol error: transfer encoding not allowed in 1xx or 204\");\n      }\n\n      if (headers->ContentLength()) {\n        // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length.\n        if (headers->ContentLength()->value().getStringView() != \"0\") {\n          sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed);\n          throw CodecProtocolException(\n              \"http/1.1 protocol error: content length not allowed in 1xx or 204\");\n        }\n\n        headers->removeContentLength();\n      }\n    }\n\n    if (parser_.status_code == enumToInt(Http::Code::Continue)) {\n      pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers));\n    } else if (cannotHaveBody() && !handling_upgrade_) {\n      deferred_end_stream_headers_ = true;\n    } else {\n      pending_response_.value().decoder_->decodeHeaders(std::move(headers), false);\n    }\n\n    // http-parser treats 1xx headers as their own complete response. Swallow the spurious\n    // onMessageComplete and continue processing for purely informational headers.\n    // 101-SwitchingProtocols is exempt as all data after the header is proxied through after\n    // upgrading.\n    if (CodeUtility::is1xx(parser_.status_code) &&\n        parser_.status_code != enumToInt(Http::Code::SwitchingProtocols)) {\n      ignore_message_complete_for_1xx_ = true;\n      // Reset to ensure no information from the 1xx headers is used for the response headers.\n      headers_or_trailers_.emplace<ResponseHeaderMapPtr>(nullptr);\n    }\n  }\n\n  // Here we deal with cases where the response cannot have a body, but http_parser does not deal\n  // with it for us.\n  return cannotHaveBody() ? 1 : 0;\n}\n\nbool ClientConnectionImpl::upgradeAllowed() const {\n  if (pending_response_.has_value()) {\n    return pending_response_->encoder_.upgradeRequest();\n  }\n  return false;\n}\n\nvoid ClientConnectionImpl::onBody(Buffer::Instance& data) {\n  ASSERT(!deferred_end_stream_headers_);\n  if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    pending_response_.value().decoder_->decodeData(data, false);\n  }\n}\n\nvoid ClientConnectionImpl::onMessageComplete() {\n  ENVOY_CONN_LOG(trace, \"message complete\", connection_);\n  if (ignore_message_complete_for_1xx_) {\n    ignore_message_complete_for_1xx_ = false;\n    return;\n  }\n  if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    // After calling decodeData() with end stream set to true, we should no longer be able to reset.\n    PendingResponse& response = pending_response_.value();\n    // Encoder is used as part of decode* calls later in this function so pending_response_ can not\n    // be reset just yet. Preserve the state in pending_response_done_ instead.\n    pending_response_done_ = true;\n\n    if (deferred_end_stream_headers_) {\n      response.decoder_->decodeHeaders(\n          std::move(absl::get<ResponseHeaderMapPtr>(headers_or_trailers_)), true);\n      deferred_end_stream_headers_ = false;\n    } else if (processing_trailers_) {\n      response.decoder_->decodeTrailers(\n          std::move(absl::get<ResponseTrailerMapPtr>(headers_or_trailers_)));\n    } else {\n      Buffer::OwnedImpl buffer;\n      response.decoder_->decodeData(buffer, true);\n    }\n\n    // Reset to ensure no information from one requests persists to the next.\n    pending_response_.reset();\n    headers_or_trailers_.emplace<ResponseHeaderMapPtr>(nullptr);\n  }\n}\n\nvoid ClientConnectionImpl::onResetStream(StreamResetReason reason) {\n  // Only raise reset if we did not already dispatch a complete response.\n  if (pending_response_.has_value() && !pending_response_done_) {\n    pending_response_.value().encoder_.runResetCallbacks(reason);\n    pending_response_done_ = true;\n    pending_response_.reset();\n  }\n}\n\nvoid ClientConnectionImpl::sendProtocolError(absl::string_view details) {\n  if (pending_response_.has_value()) {\n    ASSERT(!pending_response_done_);\n    pending_response_.value().encoder_.setDetails(details);\n  }\n}\n\nvoid ClientConnectionImpl::onAboveHighWatermark() {\n  // This should never happen without an active stream/request.\n  pending_response_.value().encoder_.runHighWatermarkCallbacks();\n}\n\nvoid ClientConnectionImpl::onBelowLowWatermark() {\n  // This can get called without an active stream/request when the response completion causes us to\n  // close the connection, but in doing so go below low watermark.\n  if (pending_response_.has_value() && !pending_response_done_) {\n    pending_response_.value().encoder_.runLowWatermarkCallbacks();\n  }\n}\n\n} // namespace Http1\n} // namespace Legacy\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/codec_impl_legacy.h",
    "content": "#pragma once\n\n#include <http_parser.h>\n\n#include <array>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/statusor.h\"\n#include \"common/http/codec_helper.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http1/header_formatter.h\"\n#include \"common/http/status.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Legacy {\nnamespace Http1 {\n\nclass ConnectionImpl;\n\n/**\n * Base class for HTTP/1.1 request and response encoders.\n */\nclass StreamEncoderImpl : public virtual StreamEncoder,\n                          public Stream,\n                          Logger::Loggable<Logger::Id::http>,\n                          public StreamCallbackHelper,\n                          public Http1StreamEncoderOptions {\npublic:\n  ~StreamEncoderImpl() override {\n    // When the stream goes away, undo any read blocks to resume reading.\n    while (read_disable_calls_ != 0) {\n      StreamEncoderImpl::readDisable(false);\n    }\n  }\n  // Http::StreamEncoder\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void encodeMetadata(const MetadataMapVector&) override;\n  Stream& getStream() override { return *this; }\n  Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return *this; }\n\n  // Http::Http1StreamEncoderOptions\n  void disableChunkEncoding() override { disable_chunk_encoding_ = true; }\n\n  // Http::Stream\n  void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); }\n  void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); }\n  // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further\n  // progress may be made with the codec.\n  void resetStream(StreamResetReason reason) override;\n  void readDisable(bool disable) override;\n  uint32_t bufferLimit() override;\n  absl::string_view responseDetails() override { return details_; }\n  const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override;\n  void setFlushTimeout(std::chrono::milliseconds) override {\n    // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the\n    // connection, invoking any watermarks as necessary. There is no internal buffering that would\n    // require a flush timeout not already covered by other timeouts.\n  }\n\n  void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; }\n  void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; }\n  void setDetails(absl::string_view details) { details_ = details; }\n\n  void clearReadDisableCallsForTests() { read_disable_calls_ = 0; }\n\nprotected:\n  StreamEncoderImpl(ConnectionImpl& connection,\n                    Http::Http1::HeaderKeyFormatter* header_key_formatter);\n  void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional<uint64_t> status,\n                         bool end_stream);\n  void encodeTrailersBase(const HeaderMap& headers);\n\n  static const std::string CRLF;\n  static const std::string LAST_CHUNK;\n\n  ConnectionImpl& connection_;\n  uint32_t read_disable_calls_{};\n  bool disable_chunk_encoding_ : 1;\n  bool chunk_encoding_ : 1;\n  bool is_response_to_head_request_ : 1;\n  bool is_response_to_connect_request_ : 1;\n\nprivate:\n  /**\n   * Called to encode an individual header.\n   * @param key supplies the header to encode.\n   * @param key_size supplies the byte size of the key.\n   * @param value supplies the value to encode.\n   * @param value_size supplies the byte size of the value.\n   */\n  void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size);\n\n  /**\n   * Called to encode an individual header.\n   * @param key supplies the header to encode as a string_view.\n   * @param value supplies the value to encode as a string_view.\n   */\n  void encodeHeader(absl::string_view key, absl::string_view value);\n\n  /**\n   * Called to finalize a stream encode.\n   */\n  void endEncode();\n\n  void encodeFormattedHeader(absl::string_view key, absl::string_view value);\n\n  const Http::Http1::HeaderKeyFormatter* const header_key_formatter_;\n  absl::string_view details_;\n};\n\n/**\n * HTTP/1.1 response encoder.\n */\nclass ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder {\npublic:\n  ResponseEncoderImpl(ConnectionImpl& connection,\n                      Http::Http1::HeaderKeyFormatter* header_key_formatter,\n                      bool stream_error_on_invalid_http_message)\n      : StreamEncoderImpl(connection, header_key_formatter),\n        stream_error_on_invalid_http_message_(stream_error_on_invalid_http_message) {}\n\n  bool startedResponse() { return started_response_; }\n\n  // Http::ResponseEncoder\n  void encode100ContinueHeaders(const ResponseHeaderMap& headers) override;\n  void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override;\n  void encodeTrailers(const ResponseTrailerMap& trailers) override { encodeTrailersBase(trailers); }\n\n  bool streamErrorOnInvalidHttpMessage() const override {\n    return stream_error_on_invalid_http_message_;\n  }\n\nprivate:\n  bool started_response_{};\n  const bool stream_error_on_invalid_http_message_;\n};\n\n/**\n * HTTP/1.1 request encoder.\n */\nclass RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder {\npublic:\n  RequestEncoderImpl(ConnectionImpl& connection,\n                     Http::Http1::HeaderKeyFormatter* header_key_formatter)\n      : StreamEncoderImpl(connection, header_key_formatter) {}\n  bool upgradeRequest() const { return upgrade_request_; }\n  bool headRequest() const { return head_request_; }\n  bool connectRequest() const { return connect_request_; }\n\n  // Http::RequestEncoder\n  void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override;\n  void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); }\n\nprivate:\n  bool upgrade_request_{};\n  bool head_request_{};\n  bool connect_request_{};\n};\n\n/**\n * Base class for HTTP/1.1 client and server connections.\n * Handles the callbacks of http_parser with its own base routine and then\n * virtual dispatches to its subclasses.\n */\nclass ConnectionImpl : public virtual Connection, protected Logger::Loggable<Logger::Id::http> {\npublic:\n  /**\n   * @return Network::Connection& the backing network connection.\n   */\n  Network::Connection& connection() { return connection_; }\n\n  /**\n   * Called when the active encoder has completed encoding the outbound half of the stream.\n   */\n  virtual void onEncodeComplete() PURE;\n\n  /**\n   * Called when resetStream() has been called on an active stream. In HTTP/1.1 the only\n   * valid operation after this point is for the connection to get blown away, but we will not\n   * fire any more callbacks in case some stack has to unwind.\n   */\n  void onResetStreamBase(StreamResetReason reason);\n\n  /**\n   * Flush all pending output from encoding.\n   */\n  void flushOutput(bool end_encode = false);\n\n  void addToBuffer(absl::string_view data);\n  void addCharToBuffer(char c);\n  void addIntToBuffer(uint64_t i);\n  Buffer::WatermarkBuffer& buffer() { return output_buffer_; }\n  uint64_t bufferRemainingSize();\n  void copyToBuffer(const char* data, uint64_t length);\n  void reserveBuffer(uint64_t size);\n  void readDisable(bool disable) {\n    if (connection_.state() == Network::Connection::State::Open) {\n      connection_.readDisable(disable);\n    }\n  }\n  uint32_t bufferLimit() { return connection_.bufferLimit(); }\n  virtual bool supportsHttp10() { return false; }\n  bool maybeDirectDispatch(Buffer::Instance& data);\n  virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {}\n  Http::Http1::CodecStats& stats() { return stats_; }\n  bool enableTrailers() const { return codec_settings_.enable_trailers_; }\n\n  // Http::Connection\n  Http::Status dispatch(Buffer::Instance& data) override;\n  void goAway() override {} // Called during connection manager drain flow\n  Protocol protocol() override { return protocol_; }\n  void shutdownNotice() override {} // Called during connection manager drain flow\n  bool wantsToWrite() override { return false; }\n  void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { onAboveHighWatermark(); }\n  void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { onBelowLowWatermark(); }\n\n  bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; }\n\nprotected:\n  ConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats,\n                 const Http1Settings& settings, http_parser_type type, uint32_t max_headers_kb,\n                 const uint32_t max_headers_count,\n                 Http::Http1::HeaderKeyFormatterPtr&& header_key_formatter);\n\n  bool resetStreamCalled() { return reset_stream_called_; }\n  void onMessageBeginBase();\n\n  /**\n   * Get memory used to represent HTTP headers or trailers currently being parsed.\n   * Computed by adding the partial header field and value that is currently being parsed and the\n   * estimated header size for previous header lines provided by HeaderMap::byteSize().\n   */\n  virtual uint32_t getHeadersSize();\n\n  /**\n   * Called from onUrl, onHeaderField and onHeaderValue to verify that the headers do not exceed the\n   * configured max header size limit. Throws a  CodecProtocolException if headers exceed the size\n   * limit.\n   */\n  void checkMaxHeadersSize();\n\n  Network::Connection& connection_;\n  Http::Http1::CodecStats& stats_;\n  const Http1Settings codec_settings_;\n  http_parser parser_;\n  Http::Code error_code_{Http::Code::BadRequest};\n  const Http::Http1::HeaderKeyFormatterPtr header_key_formatter_;\n  HeaderString current_header_field_;\n  HeaderString current_header_value_;\n  bool processing_trailers_ : 1;\n  bool handling_upgrade_ : 1;\n  bool reset_stream_called_ : 1;\n  // Deferred end stream headers indicate that we are not going to raise headers until the full\n  // HTTP/1 message has been flushed from the parser. This allows raising an HTTP/2 style headers\n  // block with end stream set to true with no further protocol data remaining.\n  bool deferred_end_stream_headers_ : 1;\n  const bool strict_1xx_and_204_headers_ : 1;\n\nprivate:\n  enum class HeaderParsingState { Field, Value, Done };\n\n  virtual HeaderMap& headersOrTrailers() PURE;\n  virtual RequestOrResponseHeaderMap& requestOrResponseHeaders() PURE;\n  virtual void allocHeaders() PURE;\n  virtual void allocTrailers() PURE;\n\n  /**\n   * Called in order to complete an in progress header decode.\n   */\n  void completeLastHeader();\n\n  /**\n   * Check if header name contains underscore character.\n   * Underscore character is allowed in header names by the RFC-7230 and this check is implemented\n   * as a security measure due to systems that treat '_' and '-' as interchangeable.\n   * The ServerConnectionImpl may drop header or reject request based on the\n   * `common_http_protocol_options.headers_with_underscores_action` configuration option in the\n   * HttpConnectionManager.\n   */\n  virtual bool shouldDropHeaderWithUnderscoresInNames(absl::string_view /* header_name */) const {\n    return false;\n  }\n\n  /**\n   * An inner dispatch call that executes the dispatching logic. While exception removal is in\n   * migration (#10878), this function may either throw an exception or return an error status.\n   * Exceptions are caught and translated to their corresponding statuses in the outer level\n   * dispatch.\n   * TODO(#10878): Remove this when exception removal is complete.\n   */\n  Http::Status innerDispatch(Buffer::Instance& data);\n\n  /**\n   * Dispatch a memory span.\n   * @param slice supplies the start address.\n   * @len supplies the length of the span.\n   */\n  size_t dispatchSlice(const char* slice, size_t len);\n\n  /**\n   * Called by the http_parser when body data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   */\n  void bufferBody(const char* data, size_t length);\n\n  /**\n   * Push the accumulated body through the filter pipeline.\n   */\n  void dispatchBufferedBody();\n\n  /**\n   * Called when a request/response is beginning. A base routine happens first then a virtual\n   * dispatch is invoked.\n   */\n  virtual void onMessageBegin() PURE;\n\n  /**\n   * Called when URL data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   */\n  virtual void onUrl(const char* data, size_t length) PURE;\n\n  /**\n   * Called when header field data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   */\n  void onHeaderField(const char* data, size_t length);\n\n  /**\n   * Called when header value data is received.\n   * @param data supplies the start address.\n   * @param length supplies the length.\n   */\n  void onHeaderValue(const char* data, size_t length);\n\n  /**\n   * Called when headers are complete. A base routine happens first then a virtual dispatch is\n   * invoked. Note that this only applies to headers and NOT trailers. End of\n   * trailers are signaled via onMessageCompleteBase().\n   * @return 0 if no error, 1 if there should be no body.\n   */\n  int onHeadersCompleteBase();\n  virtual int onHeadersComplete() PURE;\n\n  /**\n   * Called to see if upgrade transition is allowed.\n   */\n  virtual bool upgradeAllowed() const PURE;\n\n  /**\n   * Called with body data is available for processing when either:\n   * - There is an accumulated partial body after the parser is done processing bytes read from the\n   * socket\n   * - The parser encounters the last byte of the body\n   * - The codec does a direct dispatch from the read buffer\n   * For performance reasons there is at most one call to onBody per call to HTTP/1\n   * ConnectionImpl::dispatch call.\n   * @param data supplies the body data\n   */\n  virtual void onBody(Buffer::Instance& data) PURE;\n\n  /**\n   * Called when the request/response is complete.\n   */\n  void onMessageCompleteBase();\n  virtual void onMessageComplete() PURE;\n\n  /**\n   * Called when accepting a chunk header.\n   */\n  void onChunkHeader(bool is_final_chunk);\n\n  /**\n   * @see onResetStreamBase().\n   */\n  virtual void onResetStream(StreamResetReason reason) PURE;\n\n  /**\n   * Send a protocol error response to remote.\n   */\n  virtual void sendProtocolError(absl::string_view details) PURE;\n\n  /**\n   * Called when output_buffer_ or the underlying connection go from below a low watermark to over\n   * a high watermark.\n   */\n  virtual void onAboveHighWatermark() PURE;\n\n  /**\n   * Called when output_buffer_ or the underlying connection  go from above a high watermark to\n   * below a low watermark.\n   */\n  virtual void onBelowLowWatermark() PURE;\n\n  /**\n   * Check if header name contains underscore character.\n   * The ServerConnectionImpl may drop header or reject request based on configuration.\n   */\n  virtual void checkHeaderNameForUnderscores() {}\n\n  static http_parser_settings settings_;\n\n  HeaderParsingState header_parsing_state_{HeaderParsingState::Field};\n  // Used to accumulate the HTTP message body during the current dispatch call. The accumulated body\n  // is pushed through the filter pipeline either at the end of the current dispatch call, or when\n  // the last byte of the body is processed (whichever happens first).\n  Buffer::OwnedImpl buffered_body_;\n  Buffer::WatermarkBuffer output_buffer_;\n  Protocol protocol_{Protocol::Http11};\n  const uint32_t max_headers_kb_;\n  const uint32_t max_headers_count_;\n};\n\n/**\n * Implementation of Http::ServerConnection for HTTP/1.1.\n */\nclass ServerConnectionImpl : public ServerConnection, public ConnectionImpl {\npublic:\n  ServerConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats,\n                       ServerConnectionCallbacks& callbacks, const Http1Settings& settings,\n                       uint32_t max_request_headers_kb, const uint32_t max_request_headers_count,\n                       envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n                           headers_with_underscores_action);\n  bool supportsHttp10() override { return codec_settings_.accept_http_10_; }\n\nprotected:\n  /**\n   * An active HTTP/1.1 request.\n   */\n  struct ActiveRequest {\n    ActiveRequest(ServerConnectionImpl& connection,\n                  Http::Http1::HeaderKeyFormatter* header_key_formatter)\n        : response_encoder_(connection, header_key_formatter,\n                            connection.codec_settings_.stream_error_on_invalid_http_message_) {}\n\n    HeaderString request_url_;\n    RequestDecoder* request_decoder_{};\n    ResponseEncoderImpl response_encoder_;\n    bool remote_complete_{};\n  };\n  absl::optional<ActiveRequest>& activeRequest() { return active_request_; }\n  // ConnectionImpl\n  void onMessageComplete() override;\n  // Add the size of the request_url to the reported header size when processing request headers.\n  uint32_t getHeadersSize() override;\n\nprivate:\n  /**\n   * Manipulate the request's first line, parsing the url and converting to a relative path if\n   * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6\n   *\n   * @param is_connect true if the request has the CONNECT method\n   * @param headers the request's headers\n   * @throws CodecProtocolException on an invalid url in the request line\n   */\n  void handlePath(RequestHeaderMap& headers, unsigned int method);\n\n  // ConnectionImpl\n  void onEncodeComplete() override;\n  void onMessageBegin() override;\n  void onUrl(const char* data, size_t length) override;\n  int onHeadersComplete() override;\n  // If upgrade behavior is not allowed, the HCM will have sanitized the headers out.\n  bool upgradeAllowed() const override { return true; }\n  void onBody(Buffer::Instance& data) override;\n  void onResetStream(StreamResetReason reason) override;\n  void sendProtocolError(absl::string_view details) override;\n  void onAboveHighWatermark() override;\n  void onBelowLowWatermark() override;\n  HeaderMap& headersOrTrailers() override {\n    if (absl::holds_alternative<RequestHeaderMapPtr>(headers_or_trailers_)) {\n      return *absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n    } else {\n      return *absl::get<RequestTrailerMapPtr>(headers_or_trailers_);\n    }\n  }\n  RequestOrResponseHeaderMap& requestOrResponseHeaders() override {\n    return *absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n  }\n  void allocHeaders() override {\n    ASSERT(nullptr == absl::get<RequestHeaderMapPtr>(headers_or_trailers_));\n    ASSERT(!processing_trailers_);\n    headers_or_trailers_.emplace<RequestHeaderMapPtr>(RequestHeaderMapImpl::create());\n  }\n  void allocTrailers() override {\n    ASSERT(processing_trailers_);\n    if (!absl::holds_alternative<RequestTrailerMapPtr>(headers_or_trailers_)) {\n      headers_or_trailers_.emplace<RequestTrailerMapPtr>(RequestTrailerMapImpl::create());\n    }\n  }\n\n  void sendProtocolErrorOld(absl::string_view details);\n\n  void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment);\n  void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override;\n  void doFloodProtectionChecks() const;\n  void checkHeaderNameForUnderscores() override;\n\n  ServerConnectionCallbacks& callbacks_;\n  absl::optional<ActiveRequest> active_request_;\n  const Buffer::OwnedBufferFragmentImpl::Releasor response_buffer_releasor_;\n  uint32_t outbound_responses_{};\n  // This defaults to 2, which functionally disables pipelining. If any users\n  // of Envoy wish to enable pipelining (which is dangerous and ill supported)\n  // we could make this configurable.\n  uint32_t max_outbound_responses_{};\n  bool flood_protection_{};\n  // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated\n  // thought as some of the reset and no header code paths make this difficult. Headers are\n  // populated on message begin. Trailers are populated on the first parsed trailer field (if\n  // trailers are enabled). The variant is reset to null headers on message complete for assertion\n  // purposes.\n  absl::variant<RequestHeaderMapPtr, RequestTrailerMapPtr> headers_or_trailers_;\n  // The action to take when a request header name contains underscore characters.\n  const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_;\n};\n\n/**\n * Implementation of Http::ClientConnection for HTTP/1.1.\n */\nclass ClientConnectionImpl : public ClientConnection, public ConnectionImpl {\npublic:\n  ClientConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats,\n                       ConnectionCallbacks& callbacks, const Http1Settings& settings,\n                       const uint32_t max_response_headers_count);\n\n  // Http::ClientConnection\n  RequestEncoder& newStream(ResponseDecoder& response_decoder) override;\n\nprivate:\n  struct PendingResponse {\n    PendingResponse(ConnectionImpl& connection,\n                    Http::Http1::HeaderKeyFormatter* header_key_formatter, ResponseDecoder* decoder)\n        : encoder_(connection, header_key_formatter), decoder_(decoder) {}\n\n    RequestEncoderImpl encoder_;\n    ResponseDecoder* decoder_;\n  };\n\n  bool cannotHaveBody();\n\n  // ConnectionImpl\n  void onEncodeComplete() override {}\n  void onMessageBegin() override {}\n  void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  int onHeadersComplete() override;\n  bool upgradeAllowed() const override;\n  void onBody(Buffer::Instance& data) override;\n  void onMessageComplete() override;\n  void onResetStream(StreamResetReason reason) override;\n  void sendProtocolError(absl::string_view details) override;\n  void onAboveHighWatermark() override;\n  void onBelowLowWatermark() override;\n  HeaderMap& headersOrTrailers() override {\n    if (absl::holds_alternative<ResponseHeaderMapPtr>(headers_or_trailers_)) {\n      return *absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n    } else {\n      return *absl::get<ResponseTrailerMapPtr>(headers_or_trailers_);\n    }\n  }\n  RequestOrResponseHeaderMap& requestOrResponseHeaders() override {\n    return *absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n  }\n  void allocHeaders() override {\n    ASSERT(nullptr == absl::get<ResponseHeaderMapPtr>(headers_or_trailers_));\n    ASSERT(!processing_trailers_);\n    headers_or_trailers_.emplace<ResponseHeaderMapPtr>(ResponseHeaderMapImpl::create());\n  }\n  void allocTrailers() override {\n    ASSERT(processing_trailers_);\n    if (!absl::holds_alternative<ResponseTrailerMapPtr>(headers_or_trailers_)) {\n      headers_or_trailers_.emplace<ResponseTrailerMapPtr>(ResponseTrailerMapImpl::create());\n    }\n  }\n\n  absl::optional<PendingResponse> pending_response_;\n  // TODO(mattklein123): The following bool tracks whether a pending response is complete before\n  // dispatching callbacks. This is needed so that pending_response_ stays valid during callbacks\n  // in order to access the stream, but to avoid invoking callbacks that shouldn't be called once\n  // the response is complete. The existence of this variable is hard to reason about and it should\n  // be combined with pending_response_ somehow in a follow up cleanup.\n  bool pending_response_done_{true};\n  // Set true between receiving non-101 1xx headers and receiving the spurious onMessageComplete.\n  bool ignore_message_complete_for_1xx_{};\n  // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated\n  // thought as some of the reset and no header code paths make this difficult. Headers are\n  // populated on message begin. Trailers are populated when the switch to trailer processing is\n  // detected while parsing the first trailer field (if trailers are enabled). The variant is reset\n  // to null headers on message complete for assertion purposes.\n  absl::variant<ResponseHeaderMapPtr, ResponseTrailerMapPtr> headers_or_trailers_;\n\n  // The default limit of 80 KiB is the vanilla http_parser behaviour.\n  static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80;\n};\n\n} // namespace Http1\n} // namespace Legacy\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/codec_stats.h",
    "content": "#pragma once\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/thread.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\n\n/**\n * All stats for the HTTP/1 codec. @see stats_macros.h\n */\n#define ALL_HTTP1_CODEC_STATS(COUNTER)                                                             \\\n  COUNTER(dropped_headers_with_underscores)                                                        \\\n  COUNTER(metadata_not_supported_error)                                                            \\\n  COUNTER(requests_rejected_with_underscores_in_headers)                                           \\\n  COUNTER(response_flood)\n\n/**\n * Wrapper struct for the HTTP/1 codec stats. @see stats_macros.h\n */\nstruct CodecStats {\n  using AtomicPtr = Thread::AtomicPtr<CodecStats, Thread::AtomicPtrAllocMode::DeleteOnDestruct>;\n\n  static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) {\n    return *ptr.get([&scope]() -> CodecStats* {\n      return new CodecStats{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(scope, \"http1.\"))};\n    });\n  }\n\n  ALL_HTTP1_CODEC_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/conn_pool.cc",
    "content": "#include \"common/http/http1/conn_pool.h\"\n\n#include <cstdint>\n#include <list>\n#include <memory>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/schedulable_cb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/http/codec_client.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\n\nConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n                           Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                           const Network::ConnectionSocket::OptionsSharedPtr& options,\n                           const Network::TransportSocketOptionsSharedPtr& transport_socket_options)\n    : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options,\n                           transport_socket_options, Protocol::Http11),\n      upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() {\n        upstream_ready_enabled_ = false;\n        onUpstreamReady();\n      })),\n      random_generator_(random_generator) {}\n\nConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); }\n\nEnvoy::ConnectionPool::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() {\n  return std::make_unique<ActiveClient>(*this);\n}\n\nvoid ConnPoolImpl::onDownstreamReset(ActiveClient& client) {\n  // If we get a downstream reset to an attached client, we just blow it away.\n  client.codec_client_->close();\n}\n\nvoid ConnPoolImpl::onResponseComplete(ActiveClient& client) {\n  ENVOY_CONN_LOG(debug, \"response complete\", *client.codec_client_);\n\n  if (!client.stream_wrapper_->encode_complete_) {\n    ENVOY_CONN_LOG(debug, \"response before request complete\", *client.codec_client_);\n    onDownstreamReset(client);\n  } else if (client.stream_wrapper_->close_connection_ || client.codec_client_->remoteClosed()) {\n    ENVOY_CONN_LOG(debug, \"saw upstream close connection\", *client.codec_client_);\n    onDownstreamReset(client);\n  } else {\n    client.stream_wrapper_.reset();\n\n    if (!pending_streams_.empty() && !upstream_ready_enabled_) {\n      upstream_ready_enabled_ = true;\n      upstream_ready_cb_->scheduleCallbackCurrentIteration();\n    }\n\n    checkForDrained();\n  }\n}\n\nConnPoolImpl::StreamWrapper::StreamWrapper(ResponseDecoder& response_decoder, ActiveClient& parent)\n    : RequestEncoderWrapper(parent.codec_client_->newStream(*this)),\n      ResponseDecoderWrapper(response_decoder), parent_(parent) {\n  RequestEncoderWrapper::inner_.getStream().addCallbacks(*this);\n}\n\nConnPoolImpl::StreamWrapper::~StreamWrapper() {\n  // Upstream connection might be closed right after response is complete. Setting delay=true\n  // here to attach pending requests in next dispatcher loop to handle that case.\n  // https://github.com/envoyproxy/envoy/issues/2715\n  parent_.parent().onStreamClosed(parent_, true);\n}\n\nvoid ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; }\n\nvoid ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) {\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.fixed_connection_close\")) {\n    close_connection_ =\n        HeaderUtility::shouldCloseConnection(parent_.codec_client_->protocol(), *headers);\n    if (close_connection_) {\n      parent_.parent_.host()->cluster().stats().upstream_cx_close_notify_.inc();\n    }\n  } else {\n    // If Connection: close OR\n    //    Http/1.0 and not Connection: keep-alive OR\n    //    Proxy-Connection: close\n    if ((absl::EqualsIgnoreCase(headers->getConnectionValue(),\n                                Headers::get().ConnectionValues.Close)) ||\n        (parent_.codec_client_->protocol() == Protocol::Http10 &&\n         !absl::EqualsIgnoreCase(headers->getConnectionValue(),\n                                 Headers::get().ConnectionValues.KeepAlive)) ||\n        (absl::EqualsIgnoreCase(headers->getProxyConnectionValue(),\n                                Headers::get().ConnectionValues.Close))) {\n      parent_.parent_.host()->cluster().stats().upstream_cx_close_notify_.inc();\n      close_connection_ = true;\n    }\n  }\n  ResponseDecoderWrapper::decodeHeaders(std::move(headers), end_stream);\n}\n\nvoid ConnPoolImpl::StreamWrapper::onDecodeComplete() {\n  decode_complete_ = encode_complete_;\n  parent_.parent().onResponseComplete(parent_);\n}\n\nConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent)\n    : Envoy::Http::ActiveClient(\n          parent, parent.host_->cluster().maxRequestsPerConnection(),\n          1 // HTTP1 always has a concurrent-request-limit of 1 per connection.\n      ) {\n  parent.host_->cluster().stats().upstream_cx_http1_total_.inc();\n}\n\nbool ConnPoolImpl::ActiveClient::closingWithIncompleteStream() const {\n  return (stream_wrapper_ != nullptr) && (!stream_wrapper_->decode_complete_);\n}\n\nRequestEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(ResponseDecoder& response_decoder) {\n  ASSERT(!stream_wrapper_);\n  stream_wrapper_ = std::make_unique<StreamWrapper>(response_decoder, *this);\n  return *stream_wrapper_;\n}\n\nCodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) {\n  CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_),\n                                           data.host_description_, dispatcher_, random_generator_)};\n  return codec;\n}\n\nConnectionPool::InstancePtr\nallocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n                 Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                 const Network::ConnectionSocket::OptionsSharedPtr& options,\n                 const Network::TransportSocketOptionsSharedPtr& transport_socket_options) {\n  return std::make_unique<Http::Http1::ProdConnPoolImpl>(\n      dispatcher, random_generator, host, priority, options, transport_socket_options);\n}\n\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/conn_pool.h",
    "content": "#pragma once\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/http/codec_wrappers.h\"\n#include \"common/http/conn_pool_base.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\n\n/**\n * A connection pool implementation for HTTP/1.1 connections.\n * NOTE: The connection pool does NOT do DNS resolution. It assumes it is being given a numeric IP\n *       address. Higher layer code should handle resolving DNS on error and creating a new pool\n *       bound to a different IP address.\n */\nclass ConnPoolImpl : public Http::HttpConnPoolImplBase {\npublic:\n  ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n               Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n               const Network::ConnectionSocket::OptionsSharedPtr& options,\n               const Network::TransportSocketOptionsSharedPtr& transport_socket_options);\n\n  ~ConnPoolImpl() override;\n\n  // ConnectionPool::Instance\n  Http::Protocol protocol() const override { return Http::Protocol::Http11; }\n\n  // ConnPoolImplBase\n  Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override;\n\nprotected:\n  class ActiveClient;\n\n  struct StreamWrapper : public RequestEncoderWrapper,\n                         public ResponseDecoderWrapper,\n                         public StreamCallbacks {\n    StreamWrapper(ResponseDecoder& response_decoder, ActiveClient& parent);\n    ~StreamWrapper() override;\n\n    // StreamEncoderWrapper\n    void onEncodeComplete() override;\n\n    // StreamDecoderWrapper\n    void decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override;\n    void onPreDecodeComplete() override {}\n    void onDecodeComplete() override;\n\n    // Http::StreamCallbacks\n    void onResetStream(StreamResetReason, absl::string_view) override {\n      parent_.parent().onDownstreamReset(parent_);\n    }\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    ActiveClient& parent_;\n    bool encode_complete_{};\n    bool close_connection_{};\n    bool decode_complete_{};\n  };\n\n  using StreamWrapperPtr = std::unique_ptr<StreamWrapper>;\n\n  class ActiveClient : public Envoy::Http::ActiveClient {\n  public:\n    ActiveClient(ConnPoolImpl& parent);\n\n    ConnPoolImpl& parent() { return static_cast<ConnPoolImpl&>(parent_); }\n\n    // ConnPoolImplBase::ActiveClient\n    bool closingWithIncompleteStream() const override;\n    RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override;\n\n    StreamWrapperPtr stream_wrapper_;\n  };\n\n  void onDownstreamReset(ActiveClient& client);\n  void onResponseComplete(ActiveClient& client);\n\n  Event::SchedulableCallbackPtr upstream_ready_cb_;\n  bool upstream_ready_enabled_{false};\n  Random::RandomGenerator& random_generator_;\n};\n\n/**\n * Production implementation of the ConnPoolImpl.\n */\nclass ProdConnPoolImpl : public ConnPoolImpl {\npublic:\n  using ConnPoolImpl::ConnPoolImpl;\n\n  // ConnPoolImpl\n  CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override;\n};\n\nConnectionPool::InstancePtr\nallocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n                 Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                 const Network::ConnectionSocket::OptionsSharedPtr& options,\n                 const Network::TransportSocketOptionsSharedPtr& transport_socket_options);\n\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/header_formatter.cc",
    "content": "#include \"common/http/http1/header_formatter.h\"\n\n#include <string>\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\nstd::string ProperCaseHeaderKeyFormatter::format(absl::string_view key) const {\n  auto copy = std::string(key);\n\n  bool should_capitalize = true;\n  for (char& c : copy) {\n    if (should_capitalize && isalpha(c)) {\n      c = static_cast<char>(toupper(c));\n    }\n\n    should_capitalize = !isalpha(c) && !isdigit(c);\n  }\n\n  return copy;\n}\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http1/header_formatter.h",
    "content": "#pragma once\n\n#include <cctype>\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\n\nclass HeaderKeyFormatter {\npublic:\n  virtual ~HeaderKeyFormatter() = default;\n\n  virtual std::string format(absl::string_view key) const PURE;\n};\n\nusing HeaderKeyFormatterPtr = std::unique_ptr<HeaderKeyFormatter>;\n\n/**\n * A HeaderKeyFormatter that upper cases the first character in each word: The\n * first character as well as any alpha character following a special\n * character is upper cased.\n */\nclass ProperCaseHeaderKeyFormatter : public HeaderKeyFormatter {\npublic:\n  std::string format(absl::string_view key) const override;\n};\n\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"codec_stats_lib\",\n    hdrs = [\"codec_stats.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n\nCODEC_LIB_DEPS = [\n    \":codec_stats_lib\",\n    \":metadata_decoder_lib\",\n    \":metadata_encoder_lib\",\n    \":protocol_constraints_lib\",\n    \"//include/envoy/event:deferred_deletable\",\n    \"//include/envoy/event:dispatcher_interface\",\n    \"//include/envoy/http:codec_interface\",\n    \"//include/envoy/http:codes_interface\",\n    \"//include/envoy/http:header_map_interface\",\n    \"//include/envoy/network:connection_interface\",\n    \"//include/envoy/stats:stats_interface\",\n    \"//source/common/buffer:buffer_lib\",\n    \"//source/common/buffer:watermark_buffer_lib\",\n    \"//source/common/common:assert_lib\",\n    \"//source/common/common:enum_to_int\",\n    \"//source/common/common:linked_object\",\n    \"//source/common/common:minimal_logger_lib\",\n    \"//source/common/common:statusor_lib\",\n    \"//source/common/common:utility_lib\",\n    \"//source/common/http:codec_helper_lib\",\n    \"//source/common/http:codes_lib\",\n    \"//source/common/http:exception_lib\",\n    \"//source/common/http:header_map_lib\",\n    \"//source/common/http:header_utility_lib\",\n    \"//source/common/http:headers_lib\",\n    \"//source/common/http:status_lib\",\n    \"//source/common/http:utility_lib\",\n    \"//source/common/runtime:runtime_features_lib\",\n    \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n]\n\nenvoy_cc_library(\n    name = \"codec_lib\",\n    srcs = [\"codec_impl.cc\"],\n    hdrs = [\"codec_impl.h\"],\n    external_deps = [\n        \"nghttp2\",\n        \"abseil_optional\",\n        \"abseil_inlined_vector\",\n        \"abseil_algorithm\",\n    ],\n    deps = CODEC_LIB_DEPS,\n)\n\nenvoy_cc_library(\n    name = \"codec_legacy_lib\",\n    srcs = [\"codec_impl_legacy.cc\"],\n    hdrs = [\n        \"codec_impl.h\",\n        \"codec_impl_legacy.h\",\n    ],\n    external_deps = [\n        \"nghttp2\",\n        \"abseil_optional\",\n        \"abseil_inlined_vector\",\n        \"abseil_algorithm\",\n    ],\n    deps = CODEC_LIB_DEPS,\n)\n\n# Separate library for some nghttp2 setup stuff to avoid having tests take a\n# dependency on everything in codec_lib.\nenvoy_cc_library(\n    name = \"nghttp2_lib\",\n    srcs = [\"nghttp2.cc\"],\n    hdrs = [\"nghttp2.h\"],\n    external_deps = [\"nghttp2\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_lib\",\n    srcs = [\"conn_pool.cc\"],\n    hdrs = [\"conn_pool.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/http:conn_pool_base_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metadata_encoder_lib\",\n    srcs = [\"metadata_encoder.cc\"],\n    hdrs = [\"metadata_encoder.h\"],\n    external_deps = [\n        \"nghttp2\",\n    ],\n    deps = [\n        \"//include/envoy/http:codec_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metadata_decoder_lib\",\n    srcs = [\"metadata_decoder.cc\"],\n    hdrs = [\"metadata_decoder.h\"],\n    external_deps = [\n        \"nghttp2\",\n    ],\n    deps = [\n        \"//include/envoy/http:codec_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protocol_constraints_lib\",\n    srcs = [\"protocol_constraints.cc\"],\n    hdrs = [\"protocol_constraints.h\"],\n    deps = [\n        \":codec_stats_lib\",\n        \"//bazel/foreign_cc:nghttp2\",\n        \"//include/envoy/network:connection_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/http:status_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/http/http2/codec_impl.cc",
    "content": "#include \"common/http/http2/codec_impl.h\"\n\n#include <cstdint>\n#include <memory>\n#include <vector>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n// Changes or additions to details should be reflected in\n// docs/root/configuration/http/http_conn_man/response_code_details_details.rst\nclass Http2ResponseCodeDetailValues {\npublic:\n  // Invalid HTTP header field was received and stream is going to be\n  // closed.\n  const absl::string_view ng_http2_err_http_header_ = \"http2.invalid.header.field\";\n  // Violation in HTTP messaging rule.\n  const absl::string_view ng_http2_err_http_messaging_ = \"http2.violation.of.messaging.rule\";\n  // none of the above\n  const absl::string_view ng_http2_err_unknown_ = \"http2.unknown.nghttp2.error\";\n  // The number of headers (or trailers) exceeded the configured limits\n  const absl::string_view too_many_headers = \"http2.too_many_headers\";\n  // Envoy detected an HTTP/2 frame flood from the server.\n  const absl::string_view outbound_frame_flood = \"http2.outbound_frames_flood\";\n  // Envoy detected an inbound HTTP/2 frame flood.\n  const absl::string_view inbound_empty_frame_flood = \"http2.inbound_empty_frames_flood\";\n  // Envoy was configured to drop requests with header keys beginning with underscores.\n  const absl::string_view invalid_underscore = \"http2.unexpected_underscore\";\n  // The peer refused the stream.\n  const absl::string_view remote_refused = \"http2.remote_refuse\";\n  // The peer reset the stream.\n  const absl::string_view remote_reset = \"http2.remote_reset\";\n\n  const absl::string_view errorDetails(int error_code) const {\n    switch (error_code) {\n    case NGHTTP2_ERR_HTTP_HEADER:\n      return ng_http2_err_http_header_;\n    case NGHTTP2_ERR_HTTP_MESSAGING:\n      return ng_http2_err_http_messaging_;\n    default:\n      return ng_http2_err_unknown_;\n    }\n  }\n};\n\nusing Http2ResponseCodeDetails = ConstSingleton<Http2ResponseCodeDetailValues>;\n\nbool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value,\n                                          HeaderString& cookies) {\n  if (key != Headers::get().Cookie.get().c_str()) {\n    return false;\n  }\n\n  if (!cookies.empty()) {\n    cookies.append(\"; \", 2);\n  }\n\n  const absl::string_view value_view = value.getStringView();\n  cookies.append(value_view.data(), value_view.size());\n  return true;\n}\n\nConnectionImpl::Http2Callbacks ConnectionImpl::http2_callbacks_;\n\nnghttp2_session* ProdNghttp2SessionFactory::create(const nghttp2_session_callbacks* callbacks,\n                                                   ConnectionImpl* connection,\n                                                   const nghttp2_option* options) {\n  nghttp2_session* session;\n  nghttp2_session_client_new2(&session, callbacks, connection, options);\n  return session;\n}\n\nvoid ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connection,\n                                     const envoy::config::core::v3::Http2ProtocolOptions& options) {\n  connection->sendSettings(options, true);\n}\n\n/**\n * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though\n * it copies them.\n */\ntemplate <typename T> static T* removeConst(const void* object) {\n  return const_cast<T*>(reinterpret_cast<const T*>(object));\n}\n\nConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit)\n    : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false),\n      data_deferred_(false), received_noninformational_headers_(false),\n      pending_receive_buffer_high_watermark_called_(false),\n      pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) {\n  parent_.stats_.streams_active_.inc();\n  if (buffer_limit > 0) {\n    setWriteBufferWatermarks(buffer_limit / 2, buffer_limit);\n  }\n}\n\nConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); }\n\nvoid ConnectionImpl::StreamImpl::destroy() {\n  disarmStreamIdleTimer();\n  parent_.stats_.streams_active_.dec();\n  parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length());\n}\n\nstatic void insertHeader(std::vector<nghttp2_nv>& headers, const HeaderEntry& header) {\n  uint8_t flags = 0;\n  if (header.key().isReference()) {\n    flags |= NGHTTP2_NV_FLAG_NO_COPY_NAME;\n  }\n  if (header.value().isReference()) {\n    flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE;\n  }\n  const absl::string_view header_key = header.key().getStringView();\n  const absl::string_view header_value = header.value().getStringView();\n  headers.push_back({removeConst<uint8_t>(header_key.data()),\n                     removeConst<uint8_t>(header_value.data()), header_key.size(),\n                     header_value.size(), flags});\n}\n\nvoid ConnectionImpl::StreamImpl::buildHeaders(std::vector<nghttp2_nv>& final_headers,\n                                              const HeaderMap& headers) {\n  final_headers.reserve(headers.size());\n  headers.iterate([&final_headers](const HeaderEntry& header) -> HeaderMap::Iterate {\n    insertHeader(final_headers, header);\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\nvoid ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) {\n  ASSERT(headers.Status()->value() == \"100\");\n  encodeHeaders(headers, false);\n}\n\nvoid ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector<nghttp2_nv>& final_headers,\n                                                   bool end_stream) {\n  nghttp2_data_provider provider;\n  if (!end_stream) {\n    provider.source.ptr = this;\n    provider.read_callback = [](nghttp2_session*, int32_t, uint8_t*, size_t length,\n                                uint32_t* data_flags, nghttp2_data_source* source,\n                                void*) -> ssize_t {\n      return static_cast<StreamImpl*>(source->ptr)->onDataSourceRead(length, data_flags);\n    };\n  }\n\n  local_end_stream_ = end_stream;\n  submitHeaders(final_headers, end_stream ? nullptr : &provider);\n  auto status = parent_.sendPendingFrames();\n  // The RELEASE_ASSERT below does not change the existing behavior of `sendPendingFrames()`.\n  // The `sendPendingFrames()` used to throw on errors and the only method that was catching\n  // these exceptions was the `dispatch()`. The `dispatch()` method still checks and handles\n  // errors returned by the `sendPendingFrames()`.\n  // Other callers of `sendPendingFrames()` do not catch exceptions from this method and\n  // would cause abnormal process termination in error cases. This change replaces abnormal\n  // process termination from unhandled exception with the RELEASE_ASSERT.\n  // Further work will replace this RELEASE_ASSERT with proper error handling.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n  parent_.checkProtocolConstraintViolation();\n}\n\nvoid ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers,\n                                                     bool end_stream) {\n  // This must exist outside of the scope of isUpgrade as the underlying memory is\n  // needed until encodeHeadersBase has been called.\n  std::vector<nghttp2_nv> final_headers;\n  Http::RequestHeaderMapPtr modified_headers;\n  if (Http::Utility::isUpgrade(headers)) {\n    modified_headers = createHeaderMap<RequestHeaderMapImpl>(headers);\n    upgrade_type_ = std::string(headers.getUpgradeValue());\n    Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers);\n    buildHeaders(final_headers, *modified_headers);\n  } else if (headers.Method() && headers.Method()->value() == \"CONNECT\") {\n    // If this is not an upgrade style connect (above branch) it is a bytestream\n    // connect and should have :path and :protocol set accordingly\n    // As HTTP/1.1 does not require a path for CONNECT, we may have to add one\n    // if shifting codecs. For now, default to \"/\" - this can be made\n    // configurable if necessary.\n    // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02\n    modified_headers = createHeaderMap<RequestHeaderMapImpl>(headers);\n    modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream);\n    if (!headers.Path()) {\n      modified_headers->setPath(\"/\");\n    }\n    buildHeaders(final_headers, *modified_headers);\n  } else {\n    buildHeaders(final_headers, headers);\n  }\n  encodeHeadersBase(final_headers, end_stream);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::encodeHeaders(const ResponseHeaderMap& headers,\n                                                     bool end_stream) {\n  // The contract is that client codecs must ensure that :status is present.\n  ASSERT(headers.Status() != nullptr);\n\n  // This must exist outside of the scope of isUpgrade as the underlying memory is\n  // needed until encodeHeadersBase has been called.\n  std::vector<nghttp2_nv> final_headers;\n  Http::ResponseHeaderMapPtr modified_headers;\n  if (Http::Utility::isUpgrade(headers)) {\n    modified_headers = createHeaderMap<ResponseHeaderMapImpl>(headers);\n    Http::Utility::transformUpgradeResponseFromH1toH2(*modified_headers);\n    buildHeaders(final_headers, *modified_headers);\n  } else {\n    buildHeaders(final_headers, headers);\n  }\n  encodeHeadersBase(final_headers, end_stream);\n}\n\nvoid ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) {\n  ASSERT(!local_end_stream_);\n  local_end_stream_ = true;\n  if (pending_send_data_.length() > 0) {\n    // In this case we want trailers to come after we release all pending body data that is\n    // waiting on window updates. We need to save the trailers so that we can emit them later.\n    // However, for empty trailers, we don't need to to save the trailers.\n    ASSERT(!pending_trailers_to_encode_);\n    const bool skip_encoding_empty_trailers =\n        trailers.empty() && parent_.skip_encoding_empty_trailers_;\n    if (!skip_encoding_empty_trailers) {\n      pending_trailers_to_encode_ = cloneTrailers(trailers);\n      createPendingFlushTimer();\n    }\n  } else {\n    submitTrailers(trailers);\n    auto status = parent_.sendPendingFrames();\n    // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n    RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadata_map_vector) {\n  ASSERT(parent_.allow_metadata_);\n  MetadataEncoder& metadata_encoder = getMetadataEncoder();\n  if (!metadata_encoder.createPayload(metadata_map_vector)) {\n    return;\n  }\n  for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) {\n    submitMetadata(flags);\n  }\n  auto status = parent_.sendPendingFrames();\n  // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n}\n\nvoid ConnectionImpl::StreamImpl::readDisable(bool disable) {\n  ENVOY_CONN_LOG(debug, \"Stream {} {}, unconsumed_bytes {} read_disable_count {}\",\n                 parent_.connection_, stream_id_, (disable ? \"disabled\" : \"enabled\"),\n                 unconsumed_bytes_, read_disable_count_);\n  if (disable) {\n    ++read_disable_count_;\n  } else {\n    ASSERT(read_disable_count_ > 0);\n    --read_disable_count_;\n    if (!buffersOverrun()) {\n      nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_);\n      unconsumed_bytes_ = 0;\n      auto status = parent_.sendPendingFrames();\n      // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n      RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n    }\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::pendingRecvBufferHighWatermark() {\n  ENVOY_CONN_LOG(debug, \"recv buffer over limit \", parent_.connection_);\n  ASSERT(!pending_receive_buffer_high_watermark_called_);\n  pending_receive_buffer_high_watermark_called_ = true;\n  readDisable(true);\n}\n\nvoid ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() {\n  ENVOY_CONN_LOG(debug, \"recv buffer under limit \", parent_.connection_);\n  ASSERT(pending_receive_buffer_high_watermark_called_);\n  pending_receive_buffer_high_watermark_called_ = false;\n  readDisable(false);\n}\n\nvoid ConnectionImpl::ClientStreamImpl::decodeHeaders() {\n  auto& headers = absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n  const uint64_t status = Http::Utility::getResponseStatus(*headers);\n\n  if (!upgrade_type_.empty() && headers->Status()) {\n    Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_);\n  }\n\n  // Non-informational headers are non-1xx OR 101-SwitchingProtocols, since 101 implies that further\n  // proxying is on an upgrade path.\n  received_noninformational_headers_ =\n      !CodeUtility::is1xx(status) || status == enumToInt(Http::Code::SwitchingProtocols);\n\n  if (status == enumToInt(Http::Code::Continue)) {\n    ASSERT(!remote_end_stream_);\n    response_decoder_.decode100ContinueHeaders(std::move(headers));\n  } else {\n    response_decoder_.decodeHeaders(std::move(headers), remote_end_stream_);\n  }\n}\n\nvoid ConnectionImpl::ClientStreamImpl::decodeTrailers() {\n  response_decoder_.decodeTrailers(\n      std::move(absl::get<ResponseTrailerMapPtr>(headers_or_trailers_)));\n}\n\nvoid ConnectionImpl::ServerStreamImpl::decodeHeaders() {\n  auto& headers = absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n  if (Http::Utility::isH2UpgradeRequest(*headers)) {\n    Http::Utility::transformUpgradeRequestFromH2toH1(*headers);\n  }\n  request_decoder_->decodeHeaders(std::move(headers), remote_end_stream_);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::decodeTrailers() {\n  request_decoder_->decodeTrailers(\n      std::move(absl::get<RequestTrailerMapPtr>(headers_or_trailers_)));\n}\n\nvoid ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark() {\n  ENVOY_CONN_LOG(debug, \"send buffer over limit \", parent_.connection_);\n  ASSERT(!pending_send_buffer_high_watermark_called_);\n  pending_send_buffer_high_watermark_called_ = true;\n  runHighWatermarkCallbacks();\n}\n\nvoid ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark() {\n  ENVOY_CONN_LOG(debug, \"send buffer under limit \", parent_.connection_);\n  ASSERT(pending_send_buffer_high_watermark_called_);\n  pending_send_buffer_high_watermark_called_ = false;\n  runLowWatermarkCallbacks();\n}\n\nvoid ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& value) {\n  if (!Utility::reconstituteCrumbledCookies(name, value, cookies_)) {\n    headers().addViaMove(std::move(name), std::move(value));\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) {\n  ASSERT(local_end_stream_);\n  const bool skip_encoding_empty_trailers =\n      trailers.empty() && parent_.skip_encoding_empty_trailers_;\n  if (skip_encoding_empty_trailers) {\n    ENVOY_CONN_LOG(debug, \"skipping submitting trailers\", parent_.connection_);\n\n    // Instead of submitting empty trailers, we send empty data instead.\n    Buffer::OwnedImpl empty_buffer;\n    encodeDataHelper(empty_buffer, /*end_stream=*/true, skip_encoding_empty_trailers);\n    return;\n  }\n\n  std::vector<nghttp2_nv> final_headers;\n  buildHeaders(final_headers, trailers);\n  int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(),\n                                  final_headers.size());\n  ASSERT(rc == 0);\n}\n\nvoid ConnectionImpl::StreamImpl::submitMetadata(uint8_t flags) {\n  ASSERT(stream_id_ > 0);\n  const int result =\n      nghttp2_submit_extension(parent_.session_, METADATA_FRAME_TYPE, flags, stream_id_, nullptr);\n  ASSERT(result == 0);\n}\n\nssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* data_flags) {\n  if (pending_send_data_.length() == 0 && !local_end_stream_) {\n    ASSERT(!data_deferred_);\n    data_deferred_ = true;\n    return NGHTTP2_ERR_DEFERRED;\n  } else {\n    *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;\n    if (local_end_stream_ && pending_send_data_.length() <= length) {\n      *data_flags |= NGHTTP2_DATA_FLAG_EOF;\n      if (pending_trailers_to_encode_) {\n        // We need to tell the library to not set end stream so that we can emit the trailers.\n        *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;\n        submitTrailers(*pending_trailers_to_encode_);\n        pending_trailers_to_encode_.reset();\n      }\n    }\n\n    return std::min(length, pending_send_data_.length());\n  }\n}\n\nStatus ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) {\n  // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we\n  // \"just know\" that the frame header is 9 bytes.\n  // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback\n  static const uint64_t FRAME_HEADER_SIZE = 9;\n\n  parent_.protocol_constraints_.incrementOutboundDataFrameCount();\n\n  Buffer::OwnedImpl output;\n  auto status = parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE);\n  if (!status.ok()) {\n    ENVOY_CONN_LOG(debug, \"error sending data frame: Too many frames in the outbound queue\",\n                   parent_.connection_);\n    setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood);\n    return status;\n  }\n\n  parent_.stats_.pending_send_bytes_.sub(length);\n  output.move(pending_send_data_, length);\n  parent_.connection_.write(output, false);\n  return status;\n}\n\nvoid ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                                                     nghttp2_data_provider* provider) {\n  ASSERT(stream_id_ == -1);\n  stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(),\n                                      final_headers.size(), provider, base());\n  ASSERT(stream_id_ > 0);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                                                     nghttp2_data_provider* provider) {\n  ASSERT(stream_id_ != -1);\n  int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(),\n                                   final_headers.size(), provider);\n  ASSERT(rc == 0);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() {\n  ASSERT(stream_idle_timer_ == nullptr);\n  if (stream_idle_timeout_.count() > 0) {\n    stream_idle_timer_ =\n        parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); });\n    stream_idle_timer_->enableTimer(stream_idle_timeout_);\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::onPendingFlushTimer() {\n  ENVOY_CONN_LOG(debug, \"pending stream flush timeout\", parent_.connection_);\n  stream_idle_timer_.reset();\n  parent_.stats_.tx_flush_timeout_.inc();\n  ASSERT(local_end_stream_ && !local_end_stream_sent_);\n  // This will emit a reset frame for this stream and close the stream locally. No reset callbacks\n  // will be run because higher layers think the stream is already finished.\n  resetStreamWorker(StreamResetReason::LocalReset);\n  auto status = parent_.sendPendingFrames();\n  // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n}\n\nvoid ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!local_end_stream_);\n  encodeDataHelper(data, end_stream, /*skip_encoding_empty_trailers=*/false);\n}\n\nvoid ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool end_stream,\n                                                  bool skip_encoding_empty_trailers) {\n  if (skip_encoding_empty_trailers) {\n    ASSERT(data.length() == 0 && end_stream);\n  }\n\n  local_end_stream_ = end_stream;\n  parent_.stats_.pending_send_bytes_.add(data.length());\n  pending_send_data_.move(data);\n  if (data_deferred_) {\n    int rc = nghttp2_session_resume_data(parent_.session_, stream_id_);\n    ASSERT(rc == 0);\n\n    data_deferred_ = false;\n  }\n\n  auto status = parent_.sendPendingFrames();\n  // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n  parent_.checkProtocolConstraintViolation();\n\n  if (local_end_stream_ && pending_send_data_.length() > 0) {\n    createPendingFlushTimer();\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) {\n  // Higher layers expect calling resetStream() to immediately raise reset callbacks.\n  runResetCallbacks(reason);\n\n  // If we submit a reset, nghttp2 will cancel outbound frames that have not yet been sent.\n  // We want these frames to go out so we defer the reset until we send all of the frames that\n  // end the local stream.\n  if (local_end_stream_ && !local_end_stream_sent_) {\n    parent_.pending_deferred_reset_ = true;\n    deferred_reset_ = reason;\n    ENVOY_CONN_LOG(trace, \"deferred reset stream\", parent_.connection_);\n  } else {\n    resetStreamWorker(reason);\n  }\n\n  // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces\n  // the cleanup logic to run which will reset the stream in all cases if all data frames could not\n  // be sent.\n  auto status = parent_.sendPendingFrames();\n  // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n}\n\nvoid ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) {\n  int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_,\n                                     reason == StreamResetReason::LocalRefusedStreamReset\n                                         ? NGHTTP2_REFUSED_STREAM\n                                         : NGHTTP2_NO_ERROR);\n  ASSERT(rc == 0);\n}\n\nMetadataEncoder& ConnectionImpl::StreamImpl::getMetadataEncoder() {\n  if (metadata_encoder_ == nullptr) {\n    metadata_encoder_ = std::make_unique<MetadataEncoder>();\n  }\n  return *metadata_encoder_;\n}\n\nMetadataDecoder& ConnectionImpl::StreamImpl::getMetadataDecoder() {\n  if (metadata_decoder_ == nullptr) {\n    auto cb = [this](MetadataMapPtr&& metadata_map_ptr) {\n      this->onMetadataDecoded(std::move(metadata_map_ptr));\n    };\n    metadata_decoder_ = std::make_unique<MetadataDecoder>(cb);\n  }\n  return *metadata_decoder_;\n}\n\nvoid ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr) {\n  decoder().decodeMetadata(std::move(metadata_map_ptr));\n}\n\nConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                               Random::RandomGenerator& random_generator,\n                               const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                               const uint32_t max_headers_kb, const uint32_t max_headers_count)\n    : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb),\n      max_headers_count_(max_headers_count),\n      per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()),\n      stream_error_on_invalid_http_messaging_(\n          http2_options.override_stream_error_on_invalid_http_message().value()),\n      protocol_constraints_(stats, http2_options),\n      skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.http2_skip_encoding_empty_trailers\")),\n      dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false),\n      random_(random_generator) {\n  if (http2_options.has_connection_keepalive()) {\n    keepalive_interval_ = std::chrono::milliseconds(\n        PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), interval));\n    keepalive_timeout_ = std::chrono::milliseconds(\n        PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), timeout));\n    keepalive_interval_jitter_percent_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n        http2_options.connection_keepalive(), interval_jitter, 15.0);\n\n    keepalive_send_timer_ = connection.dispatcher().createTimer([this]() { sendKeepalive(); });\n    keepalive_timeout_timer_ =\n        connection.dispatcher().createTimer([this]() { onKeepaliveResponseTimeout(); });\n\n    // This call schedules the initial interval, with jitter.\n    onKeepaliveResponse();\n  }\n}\n\nConnectionImpl::~ConnectionImpl() {\n  for (const auto& stream : active_streams_) {\n    stream->destroy();\n  }\n  nghttp2_session_del(session_);\n}\n\nvoid ConnectionImpl::sendKeepalive() {\n  // Include the current time as the payload to help with debugging.\n  SystemTime now = connection_.dispatcher().timeSource().systemTime();\n  uint64_t ms_since_epoch =\n      std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count();\n  ENVOY_CONN_LOG(trace, \"Sending keepalive PING {}\", connection_, ms_since_epoch);\n\n  // The last parameter is an opaque 8-byte buffer, so this cast is safe.\n  int rc = nghttp2_submit_ping(session_, 0 /*flags*/, reinterpret_cast<uint8_t*>(&ms_since_epoch));\n  ASSERT(rc == 0);\n\n  auto status = sendPendingFrames();\n  // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n\n  keepalive_timeout_timer_->enableTimer(keepalive_timeout_);\n}\nvoid ConnectionImpl::onKeepaliveResponse() {\n  // Check the timers for nullptr in case the peer sent an unsolicited PING ACK.\n  if (keepalive_timeout_timer_ != nullptr) {\n    keepalive_timeout_timer_->disableTimer();\n  }\n  if (keepalive_send_timer_ != nullptr) {\n    uint64_t interval_ms = keepalive_interval_.count();\n    const uint64_t jitter_percent_mod = keepalive_interval_jitter_percent_ * interval_ms / 100;\n    if (jitter_percent_mod > 0) {\n      interval_ms += random_.random() % jitter_percent_mod;\n    }\n    keepalive_send_timer_->enableTimer(std::chrono::milliseconds(interval_ms));\n  }\n}\n\nvoid ConnectionImpl::onKeepaliveResponseTimeout() {\n  ENVOY_CONN_LOG(debug, \"Closing connection due to keepalive timeout\", connection_);\n  stats_.keepalive_timeout_.inc();\n  connection_.close(Network::ConnectionCloseType::NoFlush);\n}\n\nHttp::Status ConnectionImpl::dispatch(Buffer::Instance& data) {\n  // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either\n  // throw an exception or return an error status. The utility wrapper catches exceptions and\n  // converts them to error statuses.\n  return Http::Utility::exceptionToStatus(\n      [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data);\n}\n\nHttp::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) {\n  ENVOY_CONN_LOG(trace, \"dispatching {} bytes\", connection_, data.length());\n  // Make sure that dispatching_ is set to false after dispatching, even when\n  // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a\n  // single return after exception removal (#10878)).\n  Cleanup cleanup([this]() { dispatching_ = false; });\n  for (const Buffer::RawSlice& slice : data.getRawSlices()) {\n    dispatching_ = true;\n    ssize_t rc =\n        nghttp2_session_mem_recv(session_, static_cast<const uint8_t*>(slice.mem_), slice.len_);\n    if (!nghttp2_callback_status_.ok()) {\n      return nghttp2_callback_status_;\n    }\n    // This error is returned when nghttp2 library detected a frame flood by one of its\n    // internal mechanisms. Most flood protection is done by Envoy's codec and this error\n    // should never be returned. However it is handled here in case nghttp2 has some flood\n    // protections that Envoy's codec does not have.\n    if (rc == NGHTTP2_ERR_FLOODED) {\n      return bufferFloodError(\n          \"Flooding was detected in this HTTP/2 session, and it must be closed\");\n    }\n    if (rc != static_cast<ssize_t>(slice.len_)) {\n      return codecProtocolError(nghttp2_strerror(rc));\n    }\n\n    dispatching_ = false;\n  }\n\n  ENVOY_CONN_LOG(trace, \"dispatched {} bytes\", connection_, data.length());\n  data.drain(data.length());\n\n  // Decoding incoming frames can generate outbound frames so flush pending.\n  return sendPendingFrames();\n}\n\nConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) {\n  return static_cast<StreamImpl*>(nghttp2_session_get_stream_user_data(session_, stream_id));\n}\n\nint ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) {\n  StreamImpl* stream = getStream(stream_id);\n  // If this results in buffering too much data, the watermark buffer will call\n  // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_\n  stream->pending_recv_data_.add(data, len);\n  // Update the window to the peer unless some consumer of this stream's data has hit a flow control\n  // limit and disabled reads on this stream\n  if (!stream->buffersOverrun()) {\n    nghttp2_session_consume(session_, stream_id, len);\n  } else {\n    stream->unconsumed_bytes_ += len;\n  }\n  return 0;\n}\n\nvoid ConnectionImpl::goAway() {\n  int rc = nghttp2_submit_goaway(session_, NGHTTP2_FLAG_NONE,\n                                 nghttp2_session_get_last_proc_stream_id(session_),\n                                 NGHTTP2_NO_ERROR, nullptr, 0);\n  ASSERT(rc == 0);\n\n  auto status = sendPendingFrames();\n  // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n}\n\nvoid ConnectionImpl::shutdownNotice() {\n  int rc = nghttp2_submit_shutdown_notice(session_);\n  ASSERT(rc == 0);\n\n  auto status = sendPendingFrames();\n  // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT.\n  RELEASE_ASSERT(status.ok(), \"sendPendingFrames() failure in non dispatching context\");\n}\n\nStatus ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) {\n  ENVOY_CONN_LOG(trace, \"about to recv frame type={}, flags={}\", connection_,\n                 static_cast<uint64_t>(hd->type), static_cast<uint64_t>(hd->flags));\n\n  // Track all the frames without padding here, since this is the only callback we receive\n  // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.).\n  // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived().\n  auto status = okStatus();\n  if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) {\n    status = trackInboundFrames(hd, 0);\n  }\n\n  return status;\n}\n\nABSL_MUST_USE_RESULT\nenum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept {\n  switch (code) {\n  case NGHTTP2_NO_ERROR:\n    return GoAwayErrorCode::NoError;\n  default:\n    return GoAwayErrorCode::Other;\n  }\n}\n\nStatus ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) {\n  ENVOY_CONN_LOG(trace, \"recv frame type={}\", connection_, static_cast<uint64_t>(frame->hd.type));\n\n  // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS\n  // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders()\n  // and CONTINUATION frames in onBeforeFrameReceived().\n  ASSERT(frame->hd.type != NGHTTP2_CONTINUATION);\n\n  if ((frame->hd.type == NGHTTP2_PING) && (frame->ping.hd.flags & NGHTTP2_FLAG_ACK)) {\n    // The ``opaque_data`` should be exactly what was sent in the ping, which is\n    // was the current time when the ping was sent. This can be useful while debugging\n    // to match the ping and ack.\n    uint64_t data;\n    static_assert(sizeof(data) == sizeof(frame->ping.opaque_data), \"Sizes are equal\");\n    memcpy(&data, frame->ping.opaque_data, sizeof(data));\n    ENVOY_CONN_LOG(trace, \"recv PING ACK {}\", connection_, data);\n\n    onKeepaliveResponse();\n    return okStatus();\n  }\n\n  if (frame->hd.type == NGHTTP2_DATA) {\n    RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->data.padlen));\n  }\n\n  // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown\n  // notifications are the same as a normal GOAWAY.\n  // TODO: handle multiple GOAWAY frames.\n  if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) {\n    ASSERT(frame->hd.stream_id == 0);\n    raised_goaway_ = true;\n    callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code));\n    return okStatus();\n  }\n\n  if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) {\n    onSettingsForTest(frame->settings);\n  }\n\n  StreamImpl* stream = getStream(frame->hd.stream_id);\n  if (!stream) {\n    return okStatus();\n  }\n\n  switch (frame->hd.type) {\n  case NGHTTP2_HEADERS: {\n    stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;\n    if (!stream->cookies_.empty()) {\n      HeaderString key(Headers::get().Cookie);\n      stream->headers().addViaMove(std::move(key), std::move(stream->cookies_));\n    }\n\n    switch (frame->headers.cat) {\n    case NGHTTP2_HCAT_RESPONSE:\n    case NGHTTP2_HCAT_REQUEST: {\n      stream->decodeHeaders();\n      break;\n    }\n\n    case NGHTTP2_HCAT_HEADERS: {\n      // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers\n      // if local is not complete.\n      if (!stream->deferred_reset_) {\n        if (nghttp2_session_check_server_session(session_) ||\n            stream->received_noninformational_headers_) {\n          ASSERT(stream->remote_end_stream_);\n          stream->decodeTrailers();\n        } else {\n          // We're a client session and still waiting for non-informational headers.\n          stream->decodeHeaders();\n        }\n      }\n      break;\n    }\n\n    default:\n      // We do not currently support push.\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n\n    break;\n  }\n  case NGHTTP2_DATA: {\n    stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;\n\n    // It's possible that we are waiting to send a deferred reset, so only raise data if local\n    // is not complete.\n    if (!stream->deferred_reset_) {\n      stream->decoder().decodeData(stream->pending_recv_data_, stream->remote_end_stream_);\n    }\n\n    stream->pending_recv_data_.drain(stream->pending_recv_data_.length());\n    break;\n  }\n  case NGHTTP2_RST_STREAM: {\n    ENVOY_CONN_LOG(trace, \"remote reset: {}\", connection_, frame->rst_stream.error_code);\n    stats_.rx_reset_.inc();\n    break;\n  }\n  }\n\n  return okStatus();\n}\n\nint ConnectionImpl::onFrameSend(const nghttp2_frame* frame) {\n  // The nghttp2 library does not cleanly give us a way to determine whether we received invalid\n  // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not.\n  // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see\n  // an outgoing frame of this type, we will return an error code so that we can abort execution.\n  ENVOY_CONN_LOG(trace, \"sent frame type={}\", connection_, static_cast<uint64_t>(frame->hd.type));\n  switch (frame->hd.type) {\n  case NGHTTP2_GOAWAY: {\n    ENVOY_CONN_LOG(debug, \"sent goaway code={}\", connection_, frame->goaway.error_code);\n    if (frame->goaway.error_code != NGHTTP2_NO_ERROR) {\n      // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting.\n      // As such, it is not reliable to call sendPendingFrames() again after this and we assume\n      // that the connection is going to get torn down immediately. One byproduct of this is that\n      // we need to cancel all pending flush stream timeouts since they can race with connection\n      // teardown. As part of the work to remove exceptions we should aim to clean up all of this\n      // error handling logic and only handle this type of case at the end of dispatch.\n      for (auto& stream : active_streams_) {\n        stream->disarmStreamIdleTimer();\n      }\n      return NGHTTP2_ERR_CALLBACK_FAILURE;\n    }\n    break;\n  }\n\n  case NGHTTP2_RST_STREAM: {\n    ENVOY_CONN_LOG(debug, \"sent reset code={}\", connection_, frame->rst_stream.error_code);\n    stats_.tx_reset_.inc();\n    break;\n  }\n\n  case NGHTTP2_HEADERS:\n  case NGHTTP2_DATA: {\n    StreamImpl* stream = getStream(frame->hd.stream_id);\n    stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;\n    break;\n  }\n  }\n\n  return 0;\n}\n\nint ConnectionImpl::onError(absl::string_view error) {\n  ENVOY_CONN_LOG(debug, \"invalid http2: {}\", connection_, error);\n  return 0;\n}\n\nint ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) {\n  ENVOY_CONN_LOG(debug, \"invalid frame: {} on stream {}\", connection_, nghttp2_strerror(error_code),\n                 stream_id);\n\n  // Set details of error_code in the stream whenever we have one.\n  StreamImpl* stream = getStream(stream_id);\n  if (stream != nullptr) {\n    stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code));\n  }\n\n  if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) {\n    stats_.rx_messaging_error_.inc();\n\n    if (stream_error_on_invalid_http_messaging_) {\n      // The stream is about to be closed due to an invalid header or messaging. Don't kill the\n      // entire connection if one stream has bad headers or messaging.\n      if (stream != nullptr) {\n        // See comment below in onStreamClose() for why we do this.\n        stream->reset_due_to_messaging_error_ = true;\n      }\n      return 0;\n    }\n  }\n\n  // Cause dispatch to return with an error code.\n  return NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\nint ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) {\n  ENVOY_CONN_LOG(trace, \"about to send frame type={}, flags={}\", connection_,\n                 static_cast<uint64_t>(frame->hd.type), static_cast<uint64_t>(frame->hd.flags));\n  ASSERT(!is_outbound_flood_monitored_control_frame_);\n  // Flag flood monitored outbound control frames.\n  is_outbound_flood_monitored_control_frame_ =\n      ((frame->hd.type == NGHTTP2_PING || frame->hd.type == NGHTTP2_SETTINGS) &&\n       frame->hd.flags & NGHTTP2_FLAG_ACK) ||\n      frame->hd.type == NGHTTP2_RST_STREAM;\n  return 0;\n}\n\nStatus ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data,\n                                                size_t length) {\n  // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the\n  // onBeforeFrameSend callback is not called for DATA frames.\n  bool is_outbound_flood_monitored_control_frame = false;\n  std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_);\n  auto status_or_releasor = trackOutboundFrames(is_outbound_flood_monitored_control_frame);\n  if (!status_or_releasor.ok()) {\n    return status_or_releasor.status();\n  }\n\n  output.add(data, length);\n  output.addDrainTracker(status_or_releasor.value());\n  return okStatus();\n}\n\nStatusOr<ssize_t> ConnectionImpl::onSend(const uint8_t* data, size_t length) {\n  ENVOY_CONN_LOG(trace, \"send data: bytes={}\", connection_, length);\n  Buffer::OwnedImpl buffer;\n  auto status = addOutboundFrameFragment(buffer, data, length);\n  if (!status.ok()) {\n    ENVOY_CONN_LOG(debug, \"error sending frame: Too many frames in the outbound queue.\",\n                   connection_);\n    return status;\n  }\n\n  // While the buffer is transient the fragment it contains will be moved into the\n  // write_buffer_ of the underlying connection_ by the write method below.\n  // This creates lifetime dependency between the write_buffer_ of the underlying connection\n  // and the codec object. Specifically the write_buffer_ MUST be either fully drained or\n  // deleted before the codec object is deleted. This is presently guaranteed by the\n  // destruction order of the Network::ConnectionImpl object where write_buffer_ is\n  // destroyed before the filter_manager_ which owns the codec through Http::ConnectionManagerImpl.\n  connection_.write(buffer, false);\n  return length;\n}\n\nint ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) {\n  StreamImpl* stream = getStream(stream_id);\n  if (stream) {\n    ENVOY_CONN_LOG(debug, \"stream closed: {}\", connection_, error_code);\n    if (!stream->remote_end_stream_ || !stream->local_end_stream_) {\n      StreamResetReason reason;\n      if (stream->reset_due_to_messaging_error_) {\n        // Unfortunately, the nghttp2 API makes it incredibly difficult to clearly understand\n        // the flow of resets. I.e., did the reset originate locally? Was it remote? Here,\n        // we attempt to track cases in which we sent a reset locally due to an invalid frame\n        // received from the remote. We only do that in two cases currently (HTTP messaging layer\n        // errors from https://tools.ietf.org/html/rfc7540#section-8 which nghttp2 is very strict\n        // about). In other cases we treat invalid frames as a protocol error and just kill\n        // the connection.\n        reason = StreamResetReason::LocalReset;\n      } else {\n        if (error_code == NGHTTP2_REFUSED_STREAM) {\n          reason = StreamResetReason::RemoteRefusedStreamReset;\n          stream->setDetails(Http2ResponseCodeDetails::get().remote_refused);\n        } else {\n          reason = StreamResetReason::RemoteReset;\n          stream->setDetails(Http2ResponseCodeDetails::get().remote_reset);\n        }\n      }\n\n      stream->runResetCallbacks(reason);\n    }\n\n    stream->destroy();\n    connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_));\n    // Any unconsumed data must be consumed before the stream is deleted.\n    // nghttp2 does not appear to track this internally, and any stream deleted\n    // with outstanding window will contribute to a slow connection-window leak.\n    nghttp2_session_consume(session_, stream_id, stream->unconsumed_bytes_);\n    stream->unconsumed_bytes_ = 0;\n    nghttp2_session_set_stream_user_data(session_, stream->stream_id_, nullptr);\n  }\n\n  return 0;\n}\n\nint ConnectionImpl::onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len) {\n  ENVOY_CONN_LOG(trace, \"recv {} bytes METADATA\", connection_, len);\n\n  StreamImpl* stream = getStream(stream_id);\n  if (!stream) {\n    return 0;\n  }\n\n  bool success = stream->getMetadataDecoder().receiveMetadata(data, len);\n  return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\nint ConnectionImpl::onMetadataFrameComplete(int32_t stream_id, bool end_metadata) {\n  ENVOY_CONN_LOG(trace, \"recv METADATA frame on stream {}, end_metadata: {}\", connection_,\n                 stream_id, end_metadata);\n\n  StreamImpl* stream = getStream(stream_id);\n  if (stream == nullptr) {\n    return 0;\n  }\n\n  bool result = stream->getMetadataDecoder().onMetadataFrameComplete(end_metadata);\n  return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\nssize_t ConnectionImpl::packMetadata(int32_t stream_id, uint8_t* buf, size_t len) {\n  ENVOY_CONN_LOG(trace, \"pack METADATA frame on stream {}\", connection_, stream_id);\n\n  StreamImpl* stream = getStream(stream_id);\n  if (stream == nullptr) {\n    return 0;\n  }\n\n  MetadataEncoder& encoder = stream->getMetadataEncoder();\n  return encoder.packNextFramePayload(buf, len);\n}\n\nint ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name,\n                               HeaderString&& value) {\n  StreamImpl* stream = getStream(frame->hd.stream_id);\n  if (!stream) {\n    // We have seen 1 or 2 crashes where we get a headers callback but there is no associated\n    // stream data. I honestly am not sure how this can happen. However, from reading the nghttp2\n    // code it looks possible that inflate_header_block() can safely inflate headers for an already\n    // closed stream, but will still call the headers callback. Since that seems possible, we should\n    // ignore this case here.\n    // TODO(mattklein123): Figure out a test case that can hit this.\n    stats_.headers_cb_no_stream_.inc();\n    return 0;\n  }\n\n  auto should_return = checkHeaderNameForUnderscores(name.getStringView());\n  if (should_return) {\n    stream->setDetails(Http2ResponseCodeDetails::get().invalid_underscore);\n    name.clear();\n    value.clear();\n    return should_return.value();\n  }\n\n  stream->saveHeader(std::move(name), std::move(value));\n\n  if (stream->headers().byteSize() > max_headers_kb_ * 1024 ||\n      stream->headers().size() > max_headers_count_) {\n    stream->setDetails(Http2ResponseCodeDetails::get().too_many_headers);\n    stats_.header_overflow_.inc();\n    // This will cause the library to reset/close the stream.\n    return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;\n  } else {\n    return 0;\n  }\n}\n\nStatus ConnectionImpl::sendPendingFrames() {\n  if (dispatching_ || connection_.state() == Network::Connection::State::Closed) {\n    return okStatus();\n  }\n\n  const int rc = nghttp2_session_send(session_);\n  if (rc != 0) {\n    ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE);\n\n    if (!nghttp2_callback_status_.ok()) {\n      return nghttp2_callback_status_;\n    }\n\n    // Protocol constrain violations should set the nghttp2_callback_status_ error, and return at\n    // the statement above.\n    ASSERT(protocol_constraints_.status().ok());\n\n    return codecProtocolError(nghttp2_strerror(rc));\n  }\n\n  // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event,\n  // so iterating through every stream to find the ones that have a deferred reset is not a big\n  // deal. Furthermore, queueing a reset frame does not actually invoke the close stream callback.\n  // This is only done when the reset frame is sent. Thus, it's safe to work directly with the\n  // stream map.\n  // NOTE: The way we handle deferred reset is essentially best effort. If we intend to do a\n  //       deferred reset, we try to finish the stream, including writing any pending data frames.\n  //       If we cannot do this (potentially due to not enough window), we just reset the stream.\n  //       In general this behavior occurs only when we are trying to send immediate error messages\n  //       to short circuit requests. In the best effort case, we complete the stream before\n  //       resetting. In other cases, we just do the reset now which will blow away pending data\n  //       frames and release any memory associated with the stream.\n  if (pending_deferred_reset_) {\n    pending_deferred_reset_ = false;\n    for (auto& stream : active_streams_) {\n      if (stream->deferred_reset_) {\n        stream->resetStreamWorker(stream->deferred_reset_.value());\n      }\n    }\n    RETURN_IF_ERROR(sendPendingFrames());\n  }\n  return okStatus();\n}\n\nvoid ConnectionImpl::sendSettings(\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options, bool disable_push) {\n  absl::InlinedVector<nghttp2_settings_entry, 10> settings;\n  auto insertParameter = [&settings](const nghttp2_settings_entry& entry) mutable -> bool {\n    const auto it = std::find_if(settings.cbegin(), settings.cend(),\n                                 [&entry](const nghttp2_settings_entry& existing) {\n                                   return entry.settings_id == existing.settings_id;\n                                 });\n    if (it != settings.end()) {\n      return false;\n    }\n    settings.push_back(entry);\n    return true;\n  };\n\n  // Universally disable receiving push promise frames as we don't currently support\n  // them. nghttp2 will fail the connection if the other side still sends them.\n  // TODO(mattklein123): Remove this when we correctly proxy push promise.\n  // NOTE: This is a special case with respect to custom parameter overrides in that server push is\n  // not supported and therefore not end user configurable.\n  if (disable_push) {\n    settings.push_back(\n        {static_cast<int32_t>(NGHTTP2_SETTINGS_ENABLE_PUSH), disable_push ? 0U : 1U});\n  }\n\n  for (const auto& it : http2_options.custom_settings_parameters()) {\n    ASSERT(it.identifier().value() <= std::numeric_limits<uint16_t>::max());\n    const bool result =\n        insertParameter({static_cast<int32_t>(it.identifier().value()), it.value().value()});\n    ASSERT(result);\n    ENVOY_CONN_LOG(debug, \"adding custom settings parameter with id {:#x} to {}\", connection_,\n                   it.identifier().value(), it.value().value());\n  }\n\n  // Insert named parameters.\n  settings.insert(\n      settings.end(),\n      {{NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, http2_options.hpack_table_size().value()},\n       {NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL, http2_options.allow_connect()},\n       {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()},\n       {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}});\n  if (!settings.empty()) {\n    int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size());\n    ASSERT(rc == 0);\n  } else {\n    // nghttp2_submit_settings need to be called at least once\n    int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, nullptr, 0);\n    ASSERT(rc == 0);\n  }\n\n  const uint32_t initial_connection_window_size =\n      http2_options.initial_connection_window_size().value();\n  // Increase connection window size up to our default size.\n  if (initial_connection_window_size != NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE) {\n    ENVOY_CONN_LOG(debug, \"updating connection-level initial window size to {}\", connection_,\n                   initial_connection_window_size);\n    int rc = nghttp2_submit_window_update(session_, NGHTTP2_FLAG_NONE, 0,\n                                          initial_connection_window_size -\n                                              NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE);\n    ASSERT(rc == 0);\n  }\n}\n\nint ConnectionImpl::setAndCheckNghttp2CallbackStatus(Status&& status) {\n  // Keep the error status that caused the original failure. Subsequent\n  // error statuses are silently discarded.\n  nghttp2_callback_status_.Update(std::move(status));\n  return nghttp2_callback_status_.ok() ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\nvoid ConnectionImpl::scheduleProtocolConstraintViolationCallback() {\n  if (!protocol_constraint_violation_callback_) {\n    protocol_constraint_violation_callback_ = connection_.dispatcher().createSchedulableCallback(\n        [this]() { onProtocolConstraintViolation(); });\n    protocol_constraint_violation_callback_->scheduleCallbackCurrentIteration();\n  }\n}\n\nvoid ConnectionImpl::onProtocolConstraintViolation() {\n  // Flooded outbound queue implies that peer is not reading and it does not\n  // make sense to try to flush pending bytes.\n  connection_.close(Envoy::Network::ConnectionCloseType::NoFlush);\n}\n\nConnectionImpl::Http2Callbacks::Http2Callbacks() {\n  nghttp2_session_callbacks_new(&callbacks_);\n  nghttp2_session_callbacks_set_send_callback(\n      callbacks_,\n      [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t {\n        auto status_or_len = static_cast<ConnectionImpl*>(user_data)->onSend(data, length);\n        if (status_or_len.ok()) {\n          return status_or_len.value();\n        }\n        auto status = status_or_len.status();\n        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(\n            std::move(status));\n      });\n\n  nghttp2_session_callbacks_set_send_data_callback(\n      callbacks_,\n      [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length,\n         nghttp2_data_source* source, void*) -> int {\n        ASSERT(frame->data.padlen == 0);\n        auto status = static_cast<StreamImpl*>(source->ptr)->onDataSourceSend(framehd, length);\n        return static_cast<StreamImpl*>(source->ptr)\n            ->parent_.setAndCheckNghttp2CallbackStatus(std::move(status));\n      });\n\n  nghttp2_session_callbacks_set_on_begin_headers_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        auto status = static_cast<ConnectionImpl*>(user_data)->onBeginHeaders(frame);\n        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(\n            std::move(status));\n      });\n\n  nghttp2_session_callbacks_set_on_header_callback(\n      callbacks_,\n      [](nghttp2_session*, const nghttp2_frame* frame, const uint8_t* raw_name, size_t name_length,\n         const uint8_t* raw_value, size_t value_length, uint8_t, void* user_data) -> int {\n        // TODO PERF: Can reference count here to avoid copies.\n        HeaderString name;\n        name.setCopy(reinterpret_cast<const char*>(raw_name), name_length);\n        HeaderString value;\n        value.setCopy(reinterpret_cast<const char*>(raw_value), value_length);\n        return static_cast<ConnectionImpl*>(user_data)->onHeader(frame, std::move(name),\n                                                                 std::move(value));\n      });\n\n  nghttp2_session_callbacks_set_on_data_chunk_recv_callback(\n      callbacks_,\n      [](nghttp2_session*, uint8_t, int32_t stream_id, const uint8_t* data, size_t len,\n         void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onData(stream_id, data, len);\n      });\n\n  nghttp2_session_callbacks_set_on_begin_frame_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int {\n        auto status = static_cast<ConnectionImpl*>(user_data)->onBeforeFrameReceived(hd);\n        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(\n            std::move(status));\n      });\n\n  nghttp2_session_callbacks_set_on_frame_recv_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        auto status = static_cast<ConnectionImpl*>(user_data)->onFrameReceived(frame);\n        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(\n            std::move(status));\n      });\n\n  nghttp2_session_callbacks_set_on_stream_close_callback(\n      callbacks_,\n      [](nghttp2_session*, int32_t stream_id, uint32_t error_code, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onStreamClose(stream_id, error_code);\n      });\n\n  nghttp2_session_callbacks_set_on_frame_send_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onFrameSend(frame);\n      });\n\n  nghttp2_session_callbacks_set_before_frame_send_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onBeforeFrameSend(frame);\n      });\n\n  nghttp2_session_callbacks_set_on_frame_not_send_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame*, int, void*) -> int {\n        // We used to always return failure here but it looks now this can get called if the other\n        // side sends GOAWAY and we are trying to send a SETTINGS ACK. Just ignore this for now.\n        return 0;\n      });\n\n  nghttp2_session_callbacks_set_on_invalid_frame_recv_callback(\n      callbacks_,\n      [](nghttp2_session*, const nghttp2_frame* frame, int error_code, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onInvalidFrame(frame->hd.stream_id,\n                                                                       error_code);\n      });\n\n  nghttp2_session_callbacks_set_on_extension_chunk_recv_callback(\n      callbacks_,\n      [](nghttp2_session*, const nghttp2_frame_hd* hd, const uint8_t* data, size_t len,\n         void* user_data) -> int {\n        ASSERT(hd->length >= len);\n        return static_cast<ConnectionImpl*>(user_data)->onMetadataReceived(hd->stream_id, data,\n                                                                           len);\n      });\n\n  nghttp2_session_callbacks_set_unpack_extension_callback(\n      callbacks_, [](nghttp2_session*, void**, const nghttp2_frame_hd* hd, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onMetadataFrameComplete(\n            hd->stream_id, hd->flags == END_METADATA_FLAG);\n      });\n\n  nghttp2_session_callbacks_set_pack_extension_callback(\n      callbacks_,\n      [](nghttp2_session*, uint8_t* buf, size_t len, const nghttp2_frame* frame,\n         void* user_data) -> ssize_t {\n        ASSERT(frame->hd.length <= len);\n        return static_cast<ConnectionImpl*>(user_data)->packMetadata(frame->hd.stream_id, buf, len);\n      });\n\n  nghttp2_session_callbacks_set_error_callback2(\n      callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onError(absl::string_view(msg, len));\n      });\n}\n\nConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); }\n\nConnectionImpl::Http2Options::Http2Options(\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options) {\n  nghttp2_option_new(&options_);\n  // Currently we do not do anything with stream priority. Setting the following option prevents\n  // nghttp2 from keeping around closed streams for use during stream priority dependency graph\n  // calculations. This saves a tremendous amount of memory in cases where there are a large\n  // number of kept alive HTTP/2 connections.\n  nghttp2_option_set_no_closed_streams(options_, 1);\n  nghttp2_option_set_no_auto_window_update(options_, 1);\n\n  // The max send header block length is configured to an arbitrarily high number so as to never\n  // trigger the check within nghttp2, as we check request headers length in\n  // codec_impl::saveHeader.\n  nghttp2_option_set_max_send_header_block_length(options_, 0x2000000);\n\n  if (http2_options.hpack_table_size().value() != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) {\n    nghttp2_option_set_max_deflate_dynamic_table_size(options_,\n                                                      http2_options.hpack_table_size().value());\n  }\n\n  if (http2_options.allow_metadata()) {\n    nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE);\n  } else {\n    ENVOY_LOG(trace, \"Codec does not have Metadata frame support.\");\n  }\n\n  // nghttp2 v1.39.2 lowered the internal flood protection limit from 10K to 1K of ACK frames.\n  // This new limit may cause the internal nghttp2 mitigation to trigger more often (as it\n  // requires just 9K of incoming bytes for smallest 9 byte SETTINGS frame), bypassing the same\n  // mitigation and its associated behavior in the envoy HTTP/2 codec. Since envoy does not rely\n  // on this mitigation, set back to the old 10K number to avoid any changes in the HTTP/2 codec\n  // behavior.\n  nghttp2_option_set_max_outbound_ack(options_, 10000);\n}\n\nConnectionImpl::Http2Options::~Http2Options() { nghttp2_option_del(options_); }\n\nConnectionImpl::ClientHttp2Options::ClientHttp2Options(\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options)\n    : Http2Options(http2_options) {\n  // Temporarily disable initial max streams limit/protection, since we might want to create\n  // more than 100 streams before receiving the HTTP/2 SETTINGS frame from the server.\n  //\n  // TODO(PiotrSikora): remove this once multiple upstream connections or queuing are implemented.\n  nghttp2_option_set_peer_max_concurrent_streams(\n      options_, ::Envoy::Http2::Utility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS);\n}\n\nClientConnectionImpl::ClientConnectionImpl(\n    Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats,\n    Random::RandomGenerator& random_generator,\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n    const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count,\n    Nghttp2SessionFactory& http2_session_factory)\n    : ConnectionImpl(connection, stats, random_generator, http2_options, max_response_headers_kb,\n                     max_response_headers_count),\n      callbacks_(callbacks) {\n  ClientHttp2Options client_http2_options(http2_options);\n  session_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(),\n                                          client_http2_options.options());\n  http2_session_factory.init(session_, base(), http2_options);\n  allow_metadata_ = http2_options.allow_metadata();\n}\n\nRequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) {\n  ClientStreamImplPtr stream(new ClientStreamImpl(*this, per_stream_buffer_limit_, decoder));\n  // If the connection is currently above the high watermark, make sure to inform the new stream.\n  // The connection can not pass this on automatically as it has no awareness that a new stream is\n  // created.\n  if (connection_.aboveHighWatermark()) {\n    stream->runHighWatermarkCallbacks();\n  }\n  ClientStreamImpl& stream_ref = *stream;\n  LinkedList::moveIntoList(std::move(stream), active_streams_);\n  return stream_ref;\n}\n\nStatus ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) {\n  // The client code explicitly does not currently support push promise.\n  RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, \"\");\n  RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE ||\n                     frame->headers.cat == NGHTTP2_HCAT_HEADERS,\n                 \"\");\n  if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) {\n    StreamImpl* stream = getStream(frame->hd.stream_id);\n    stream->allocTrailers();\n  }\n\n  return okStatus();\n}\n\nint ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name,\n                                   HeaderString&& value) {\n  // The client code explicitly does not currently support push promise.\n  ASSERT(frame->hd.type == NGHTTP2_HEADERS);\n  ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS);\n  return saveHeader(frame, std::move(name), std::move(value));\n}\n\nServerConnectionImpl::ServerConnectionImpl(\n    Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats,\n    Random::RandomGenerator& random_generator,\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n    const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count,\n    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n        headers_with_underscores_action)\n    : ConnectionImpl(connection, stats, random_generator, http2_options, max_request_headers_kb,\n                     max_request_headers_count),\n      callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) {\n  Http2Options h2_options(http2_options);\n\n  nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(),\n                              h2_options.options());\n  sendSettings(http2_options, false);\n  allow_metadata_ = http2_options.allow_metadata();\n}\n\nStatus ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) {\n  // For a server connection, we should never get push promise frames.\n  ASSERT(frame->hd.type == NGHTTP2_HEADERS);\n\n  RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->headers.padlen));\n\n  if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) {\n    stats_.trailers_.inc();\n    ASSERT(frame->headers.cat == NGHTTP2_HCAT_HEADERS);\n\n    StreamImpl* stream = getStream(frame->hd.stream_id);\n    stream->allocTrailers();\n    return okStatus();\n  }\n\n  ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_));\n  if (connection_.aboveHighWatermark()) {\n    stream->runHighWatermarkCallbacks();\n  }\n  stream->request_decoder_ = &callbacks_.newStream(*stream);\n  stream->stream_id_ = frame->hd.stream_id;\n  LinkedList::moveIntoList(std::move(stream), active_streams_);\n  nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id,\n                                       active_streams_.front().get());\n  return okStatus();\n}\n\nint ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name,\n                                   HeaderString&& value) {\n  // For a server connection, we should never get push promise frames.\n  ASSERT(frame->hd.type == NGHTTP2_HEADERS);\n  ASSERT(frame->headers.cat == NGHTTP2_HCAT_REQUEST || frame->headers.cat == NGHTTP2_HCAT_HEADERS);\n  return saveHeader(frame, std::move(name), std::move(value));\n}\n\nStatus ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd,\n                                                uint32_t padding_length) {\n  ENVOY_CONN_LOG(trace, \"track inbound frame type={} flags={} length={} padding_length={}\",\n                 connection_, static_cast<uint64_t>(hd->type), static_cast<uint64_t>(hd->flags),\n                 static_cast<uint64_t>(hd->length), padding_length);\n\n  auto result = protocol_constraints_.trackInboundFrames(hd, padding_length);\n  if (!result.ok()) {\n    ENVOY_CONN_LOG(trace, \"error reading frame: {} received in this HTTP/2 session.\", connection_,\n                   result.message());\n    if (isInboundFramesWithEmptyPayloadError(result)) {\n      ConnectionImpl::StreamImpl* stream = getStream(hd->stream_id);\n      if (stream) {\n        stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood);\n      }\n    }\n  }\n  return result;\n}\n\nStatusOr<ProtocolConstraints::ReleasorProc>\nServerConnectionImpl::trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) {\n  auto releasor =\n      protocol_constraints_.incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame);\n  if (dispatching_downstream_data_ && !protocol_constraints_.checkOutboundFrameLimits().ok()) {\n    return protocol_constraints_.status();\n  }\n  return releasor;\n}\n\nvoid ServerConnectionImpl::checkProtocolConstraintViolation() {\n  if (!protocol_constraints_.checkOutboundFrameLimits().ok()) {\n    scheduleProtocolConstraintViolationCallback();\n  }\n}\n\nHttp::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) {\n  // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either\n  // throw an exception or return an error status. The utility wrapper catches exceptions and\n  // converts them to error statuses.\n  return Http::Utility::exceptionToStatus(\n      [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data);\n}\n\nHttp::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) {\n  ASSERT(!dispatching_downstream_data_);\n  dispatching_downstream_data_ = true;\n\n  // Make sure the dispatching_downstream_data_ is set to false when innerDispatch ends.\n  Cleanup cleanup([this]() { dispatching_downstream_data_ = false; });\n\n  // Make sure downstream outbound queue was not flooded by the upstream frames.\n  RETURN_IF_ERROR(protocol_constraints_.checkOutboundFrameLimits());\n  return ConnectionImpl::innerDispatch(data);\n}\n\nabsl::optional<int>\nServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) {\n  if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW &&\n      Http::HeaderUtility::headerNameContainsUnderscore(header_name)) {\n    if (headers_with_underscores_action_ ==\n        envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) {\n      ENVOY_CONN_LOG(debug, \"Dropping header with invalid characters in its name: {}\", connection_,\n                     header_name);\n      stats_.dropped_headers_with_underscores_.inc();\n      return 0;\n    }\n    ENVOY_CONN_LOG(debug, \"Rejecting request due to header name with underscores: {}\", connection_,\n                   header_name);\n    stats_.requests_rejected_with_underscores_in_headers_.inc();\n    return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;\n  }\n  return absl::nullopt;\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/codec_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/statusor.h\"\n#include \"common/common/thread.h\"\n#include \"common/http/codec_helper.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/http/http2/metadata_decoder.h\"\n#include \"common/http/http2/metadata_encoder.h\"\n#include \"common/http/http2/protocol_constraints.h\"\n#include \"common/http/status.h\"\n#include \"common/http/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n// This is not the full client magic, but it's the smallest size that should be able to\n// differentiate between HTTP/1 and HTTP/2.\nconst std::string CLIENT_MAGIC_PREFIX = \"PRI * HTTP/2\";\n\nclass Utility {\npublic:\n  /**\n   * Deal with https://tools.ietf.org/html/rfc7540#section-8.1.2.5\n   * @param key supplies the incoming header key.\n   * @param value supplies the incoming header value.\n   * @param cookies supplies the header string to fill if this is a cookie header that needs to be\n   *                rebuilt.\n   */\n  static bool reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value,\n                                          HeaderString& cookies);\n};\n\nclass ConnectionImpl;\n\n// Abstract nghttp2_session factory. Used to enable injection of factories for testing.\nclass Nghttp2SessionFactory {\npublic:\n  using ConnectionImplType = ConnectionImpl;\n  virtual ~Nghttp2SessionFactory() = default;\n\n  // Returns a new nghttp2_session to be used with |connection|.\n  virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks,\n                                  ConnectionImplType* connection,\n                                  const nghttp2_option* options) PURE;\n\n  // Initializes the |session|.\n  virtual void init(nghttp2_session* session, ConnectionImplType* connection,\n                    const envoy::config::core::v3::Http2ProtocolOptions& options) PURE;\n};\n\nclass ProdNghttp2SessionFactory : public Nghttp2SessionFactory {\npublic:\n  nghttp2_session* create(const nghttp2_session_callbacks* callbacks, ConnectionImpl* connection,\n                          const nghttp2_option* options) override;\n\n  void init(nghttp2_session* session, ConnectionImpl* connection,\n            const envoy::config::core::v3::Http2ProtocolOptions& options) override;\n\n  // Returns a global factory instance. Note that this is possible because no internal state is\n  // maintained; the thread safety of create() and init()'s side effects is guaranteed by Envoy's\n  // worker based threading model.\n  static ProdNghttp2SessionFactory& get() {\n    static ProdNghttp2SessionFactory* instance = new ProdNghttp2SessionFactory();\n    return *instance;\n  }\n};\n\n/**\n * Base class for HTTP/2 client and server codecs.\n */\nclass ConnectionImpl : public virtual Connection, protected Logger::Loggable<Logger::Id::http2> {\npublic:\n  ConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                 Random::RandomGenerator& random_generator,\n                 const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                 const uint32_t max_headers_kb, const uint32_t max_headers_count);\n\n  ~ConnectionImpl() override;\n\n  // Http::Connection\n  // NOTE: the `dispatch` method is also overridden in the ServerConnectionImpl class\n  Http::Status dispatch(Buffer::Instance& data) override;\n  void goAway() override;\n  Protocol protocol() override { return Protocol::Http2; }\n  void shutdownNotice() override;\n  bool wantsToWrite() override { return nghttp2_session_want_write(session_); }\n  // Propagate network connection watermark events to each stream on the connection.\n  void onUnderlyingConnectionAboveWriteBufferHighWatermark() override {\n    for (auto& stream : active_streams_) {\n      stream->runHighWatermarkCallbacks();\n    }\n  }\n  void onUnderlyingConnectionBelowWriteBufferLowWatermark() override {\n    for (auto& stream : active_streams_) {\n      stream->runLowWatermarkCallbacks();\n    }\n  }\n\n  /**\n   * An inner dispatch call that executes the dispatching logic. While exception removal is in\n   * migration (#10878), this function may either throw an exception or return an error status.\n   * Exceptions are caught and translated to their corresponding statuses in the outer level\n   * dispatch.\n   * This needs to be virtual so that ServerConnectionImpl can override.\n   * TODO(#10878): Remove this when exception removal is complete.\n   */\n  virtual Http::Status innerDispatch(Buffer::Instance& data);\n\nprotected:\n  friend class ProdNghttp2SessionFactory;\n\n  /**\n   * Wrapper for static nghttp2 callback dispatchers.\n   */\n  class Http2Callbacks {\n  public:\n    Http2Callbacks();\n    ~Http2Callbacks();\n\n    const nghttp2_session_callbacks* callbacks() { return callbacks_; }\n\n  private:\n    nghttp2_session_callbacks* callbacks_;\n  };\n\n  /**\n   * Wrapper for static nghttp2 session options.\n   */\n  class Http2Options {\n  public:\n    Http2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options);\n    ~Http2Options();\n\n    const nghttp2_option* options() { return options_; }\n\n  protected:\n    nghttp2_option* options_;\n  };\n\n  class ClientHttp2Options : public Http2Options {\n  public:\n    ClientHttp2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options);\n  };\n\n  /**\n   * Base class for client and server side streams.\n   */\n  struct StreamImpl : public virtual StreamEncoder,\n                      public Stream,\n                      public LinkedObject<StreamImpl>,\n                      public Event::DeferredDeletable,\n                      public StreamCallbackHelper {\n\n    StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit);\n    ~StreamImpl() override;\n    // TODO(mattklein123): Optimally this would be done in the destructor but there are currently\n    // deferred delete lifetime issues that need sorting out if the destructor of the stream is\n    // going to be able to refer to the parent connection.\n    void destroy();\n    void disarmStreamIdleTimer() {\n      if (stream_idle_timer_ != nullptr) {\n        // To ease testing and the destructor assertion.\n        stream_idle_timer_->disableTimer();\n        stream_idle_timer_.reset();\n      }\n    }\n\n    StreamImpl* base() { return this; }\n    ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags);\n    Status onDataSourceSend(const uint8_t* framehd, size_t length);\n    void resetStreamWorker(StreamResetReason reason);\n    static void buildHeaders(std::vector<nghttp2_nv>& final_headers, const HeaderMap& headers);\n    void saveHeader(HeaderString&& name, HeaderString&& value);\n    void encodeHeadersBase(const std::vector<nghttp2_nv>& final_headers, bool end_stream);\n    virtual void submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                               nghttp2_data_provider* provider) PURE;\n    void encodeTrailersBase(const HeaderMap& headers);\n    void submitTrailers(const HeaderMap& trailers);\n    void submitMetadata(uint8_t flags);\n    virtual StreamDecoder& decoder() PURE;\n    virtual HeaderMap& headers() PURE;\n    virtual void allocTrailers() PURE;\n    virtual HeaderMapPtr cloneTrailers(const HeaderMap& trailers) PURE;\n    virtual void createPendingFlushTimer() PURE;\n    void onPendingFlushTimer();\n\n    // Http::StreamEncoder\n    void encodeData(Buffer::Instance& data, bool end_stream) override;\n    Stream& getStream() override { return *this; }\n    void encodeMetadata(const MetadataMapVector& metadata_map_vector) override;\n    Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return absl::nullopt; }\n\n    // Http::Stream\n    void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); }\n    void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); }\n    void resetStream(StreamResetReason reason) override;\n    void readDisable(bool disable) override;\n    uint32_t bufferLimit() override { return pending_recv_data_.highWatermark(); }\n    const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override {\n      return parent_.connection_.localAddress();\n    }\n    absl::string_view responseDetails() override { return details_; }\n    void setFlushTimeout(std::chrono::milliseconds timeout) override {\n      stream_idle_timeout_ = timeout;\n    }\n\n    // This code assumes that details is a static string, so that we\n    // can avoid copying it.\n    void setDetails(absl::string_view details) {\n      // TODO(asraa): In some cases nghttp2's error handling may cause processing of multiple\n      // invalid frames for a single stream. If a temporal stream error is returned from a callback,\n      // remaining frames in the buffer will still be partially processed. For example, remaining\n      // frames will still parse through nghttp2's push promise error handling and in\n      // onBeforeFrame(Send/Received) callbacks, which may return invalid frame errors and attempt\n      // to set details again. In these cases, we simply do not overwrite details. When internal\n      // error latching is implemented in the codec for exception removal, we should prevent calling\n      // setDetails in an error state.\n      if (details_.empty()) {\n        details_ = details;\n      }\n    }\n\n    void setWriteBufferWatermarks(uint32_t low_watermark, uint32_t high_watermark) {\n      pending_recv_data_.setWatermarks(low_watermark, high_watermark);\n      pending_send_data_.setWatermarks(low_watermark, high_watermark);\n    }\n\n    // If the receive buffer encounters watermark callbacks, enable/disable reads on this stream.\n    void pendingRecvBufferHighWatermark();\n    void pendingRecvBufferLowWatermark();\n\n    // If the send buffer encounters watermark callbacks, propagate this information to the streams.\n    // The router and connection manager will propagate them on as appropriate.\n    void pendingSendBufferHighWatermark();\n    void pendingSendBufferLowWatermark();\n\n    // Does any necessary WebSocket/Upgrade conversion, then passes the headers\n    // to the decoder_.\n    virtual void decodeHeaders() PURE;\n    virtual void decodeTrailers() PURE;\n\n    // Get MetadataEncoder for this stream.\n    MetadataEncoder& getMetadataEncoder();\n    // Get MetadataDecoder for this stream.\n    MetadataDecoder& getMetadataDecoder();\n    // Callback function for MetadataDecoder.\n    void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr);\n\n    bool buffersOverrun() const { return read_disable_count_ > 0; }\n\n    void encodeDataHelper(Buffer::Instance& data, bool end_stream,\n                          bool skip_encoding_empty_trailers);\n\n    ConnectionImpl& parent_;\n    int32_t stream_id_{-1};\n    uint32_t unconsumed_bytes_{0};\n    uint32_t read_disable_count_{0};\n    Buffer::WatermarkBuffer pending_recv_data_{\n        [this]() -> void { this->pendingRecvBufferLowWatermark(); },\n        [this]() -> void { this->pendingRecvBufferHighWatermark(); },\n        []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }};\n    Buffer::WatermarkBuffer pending_send_data_{\n        [this]() -> void { this->pendingSendBufferLowWatermark(); },\n        [this]() -> void { this->pendingSendBufferHighWatermark(); },\n        []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }};\n    HeaderMapPtr pending_trailers_to_encode_;\n    std::unique_ptr<MetadataDecoder> metadata_decoder_;\n    std::unique_ptr<MetadataEncoder> metadata_encoder_;\n    absl::optional<StreamResetReason> deferred_reset_;\n    HeaderString cookies_;\n    bool local_end_stream_sent_ : 1;\n    bool remote_end_stream_ : 1;\n    bool data_deferred_ : 1;\n    bool received_noninformational_headers_ : 1;\n    bool pending_receive_buffer_high_watermark_called_ : 1;\n    bool pending_send_buffer_high_watermark_called_ : 1;\n    bool reset_due_to_messaging_error_ : 1;\n    absl::string_view details_;\n    // See HttpConnectionManager.stream_idle_timeout.\n    std::chrono::milliseconds stream_idle_timeout_{};\n    Event::TimerPtr stream_idle_timer_;\n  };\n\n  using StreamImplPtr = std::unique_ptr<StreamImpl>;\n\n  /**\n   * Client side stream (request).\n   */\n  struct ClientStreamImpl : public StreamImpl, public RequestEncoder {\n    ClientStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit,\n                     ResponseDecoder& response_decoder)\n        : StreamImpl(parent, buffer_limit), response_decoder_(response_decoder),\n          headers_or_trailers_(ResponseHeaderMapImpl::create()) {}\n\n    // StreamImpl\n    void submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                       nghttp2_data_provider* provider) override;\n    StreamDecoder& decoder() override { return response_decoder_; }\n    void decodeHeaders() override;\n    void decodeTrailers() override;\n    HeaderMap& headers() override {\n      if (absl::holds_alternative<ResponseHeaderMapPtr>(headers_or_trailers_)) {\n        return *absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n      } else {\n        return *absl::get<ResponseTrailerMapPtr>(headers_or_trailers_);\n      }\n    }\n    void allocTrailers() override {\n      // If we are waiting for informational headers, make a new response header map, otherwise\n      // we are about to receive trailers. The codec makes sure this is the only valid sequence.\n      if (received_noninformational_headers_) {\n        headers_or_trailers_.emplace<ResponseTrailerMapPtr>(ResponseTrailerMapImpl::create());\n      } else {\n        headers_or_trailers_.emplace<ResponseHeaderMapPtr>(ResponseHeaderMapImpl::create());\n      }\n    }\n    HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override {\n      return createHeaderMap<RequestTrailerMapImpl>(trailers);\n    }\n    void createPendingFlushTimer() override {\n      // Client streams do not create a flush timer because we currently assume that any failure\n      // to flush would be covered by a request/stream/etc. timeout.\n    }\n\n    // RequestEncoder\n    void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override;\n    void encodeTrailers(const RequestTrailerMap& trailers) override {\n      encodeTrailersBase(trailers);\n    }\n\n    ResponseDecoder& response_decoder_;\n    absl::variant<ResponseHeaderMapPtr, ResponseTrailerMapPtr> headers_or_trailers_;\n    std::string upgrade_type_;\n  };\n\n  using ClientStreamImplPtr = std::unique_ptr<ClientStreamImpl>;\n\n  /**\n   * Server side stream (response).\n   */\n  struct ServerStreamImpl : public StreamImpl, public ResponseEncoder {\n    ServerStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit)\n        : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {}\n\n    // StreamImpl\n    void submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                       nghttp2_data_provider* provider) override;\n    StreamDecoder& decoder() override { return *request_decoder_; }\n    void decodeHeaders() override;\n    void decodeTrailers() override;\n    HeaderMap& headers() override {\n      if (absl::holds_alternative<RequestHeaderMapPtr>(headers_or_trailers_)) {\n        return *absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n      } else {\n        return *absl::get<RequestTrailerMapPtr>(headers_or_trailers_);\n      }\n    }\n    void allocTrailers() override {\n      headers_or_trailers_.emplace<RequestTrailerMapPtr>(RequestTrailerMapImpl::create());\n    }\n    HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override {\n      return createHeaderMap<ResponseTrailerMapImpl>(trailers);\n    }\n    void createPendingFlushTimer() override;\n\n    // ResponseEncoder\n    void encode100ContinueHeaders(const ResponseHeaderMap& headers) override;\n    void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override;\n    void encodeTrailers(const ResponseTrailerMap& trailers) override {\n      encodeTrailersBase(trailers);\n    }\n\n    RequestDecoder* request_decoder_{};\n    absl::variant<RequestHeaderMapPtr, RequestTrailerMapPtr> headers_or_trailers_;\n\n    bool streamErrorOnInvalidHttpMessage() const override {\n      return parent_.stream_error_on_invalid_http_messaging_;\n    }\n  };\n\n  using ServerStreamImplPtr = std::unique_ptr<ServerStreamImpl>;\n\n  ConnectionImpl* base() { return this; }\n  // NOTE: Always use non debug nullptr checks against the return value of this function. There are\n  // edge cases (such as for METADATA frames) where nghttp2 will issue a callback for a stream_id\n  // that is not associated with an existing stream.\n  StreamImpl* getStream(int32_t stream_id);\n  int saveHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value);\n\n  /**\n   * Copies any frames pending internally by nghttp2 into outbound buffer.\n   * The `sendPendingFrames()` can be called in 4 different contexts:\n   * 1. dispatching_ == true, aka the dispatching context. The `sendPendingFrames()` is no-op and\n   *    always returns success to avoid reentering nghttp2 library.\n   * 2. dispatching_ == false and ServerConnectionImpl::dispatching_downstream_data_ == true.\n   *    The `sendPendingFrames()` returns the status of the protocol constraint checks. Outbound\n   *    frame accounting is performed. Applies to server codec only.\n   * 3. dispatching_ == false and ServerConnectionImpl::dispatching_downstream_data_ == false.\n   *    The `sendPendingFrames()` always returns success. Outbound frame accounting is performed.\n   *    Applies to server codec only.\n   * 4. dispatching_ == false. The `sendPendingFrames()` always returns success. No outbound\n   *    frame accounting.\n   *\n   * TODO(yanavlasov): harmonize behavior for cases 2, 3 and 4.\n   */\n  Status sendPendingFrames();\n  void sendSettings(const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                    bool disable_push);\n  // Callback triggered when the peer's SETTINGS frame is received.\n  // NOTE: This is only used for tests.\n  virtual void onSettingsForTest(const nghttp2_settings&) {}\n\n  /**\n   * Check if header name contains underscore character.\n   * Underscore character is allowed in header names by the RFC-7230 and this check is implemented\n   * as a security measure due to systems that treat '_' and '-' as interchangeable.\n   * The ServerConnectionImpl may drop header or reject request based on the\n   * `common_http_protocol_options.headers_with_underscores_action` configuration option in the\n   * HttpConnectionManager.\n   */\n  virtual absl::optional<int> checkHeaderNameForUnderscores(absl::string_view /* header_name */) {\n    return absl::nullopt;\n  }\n\n  /**\n   * Save `status` into nghttp2_callback_status_.\n   * Return nghttp2 callback return code corresponding to `status`.\n   */\n  int setAndCheckNghttp2CallbackStatus(Status&& status);\n\n  /**\n   * This method checks if a protocol constraint had been violated in the sendPendingFrames() call.\n   * This method is a stop-gap solution for harmonizing sendPendingFrames() behavior in contexts 2\n   * and 3 (see comments for the sendPendingFrames() method). It allows each case where\n   * sendPendingFrames() is called outside of the dispatch context to be fixed in its own PR so it\n   * is easier to review and reason about. Once all error handling is implemented this method will\n   * be removed and the `sendPendingFrames()` will be changed to return error in both contexts 2\n   * and 3. At the same time the RELEASE_ASSERTs will be removed as well. The implementation in the\n   * ClientConnectionImpl is a no-op as client connections do not check protocol constraints. The\n   * implementation in the ServerConnectionImpl schedules callback to terminate connection if the\n   * protocol constraint was violated.\n   */\n  virtual void checkProtocolConstraintViolation() PURE;\n\n  /**\n   * Callback for terminating connection when protocol constrain has been violated\n   * outside of the dispatch context.\n   */\n  void scheduleProtocolConstraintViolationCallback();\n  void onProtocolConstraintViolation();\n\n  static Http2Callbacks http2_callbacks_;\n\n  std::list<StreamImplPtr> active_streams_;\n  nghttp2_session* session_{};\n  CodecStats& stats_;\n  Network::Connection& connection_;\n  const uint32_t max_headers_kb_;\n  const uint32_t max_headers_count_;\n  uint32_t per_stream_buffer_limit_;\n  bool allow_metadata_;\n  const bool stream_error_on_invalid_http_messaging_;\n\n  // Status for any errors encountered by the nghttp2 callbacks.\n  // nghttp2 library uses single return code to indicate callback failure and\n  // `nghttp2_callback_status_` is used to save right error information returned by a callback. The\n  // `nghttp2_callback_status_` is valid iff nghttp call returned NGHTTP2_ERR_CALLBACK_FAILURE.\n  Status nghttp2_callback_status_;\n\n  // Set if the type of frame that is about to be sent is PING or SETTINGS with the ACK flag set, or\n  // RST_STREAM.\n  bool is_outbound_flood_monitored_control_frame_ = 0;\n  ProtocolConstraints protocol_constraints_;\n\n  // For the flood mitigation to work the onSend callback must be called once for each outbound\n  // frame. This is what the nghttp2 library is doing, however this is not documented. The\n  // Http2FloodMitigationTest.* tests in test/integration/http2_integration_test.cc will break if\n  // this changes in the future. Also it is important that onSend does not do partial writes, as the\n  // nghttp2 library will keep calling this callback to write the rest of the frame.\n  StatusOr<ssize_t> onSend(const uint8_t* data, size_t length);\n\n  // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have\n  // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of\n  // an HTTP/2 response as reported here: https://github.com/envoyproxy/envoy/issues/10514. This is\n  // controlled by \"envoy.reloadable_features.http2_skip_encoding_empty_trailers\" runtime feature\n  // flag.\n  const bool skip_encoding_empty_trailers_;\n\nprivate:\n  virtual ConnectionCallbacks& callbacks() PURE;\n  virtual Status onBeginHeaders(const nghttp2_frame* frame) PURE;\n  int onData(int32_t stream_id, const uint8_t* data, size_t len);\n  Status onBeforeFrameReceived(const nghttp2_frame_hd* hd);\n  Status onFrameReceived(const nghttp2_frame* frame);\n  int onBeforeFrameSend(const nghttp2_frame* frame);\n  int onFrameSend(const nghttp2_frame* frame);\n  int onError(absl::string_view error);\n  virtual int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) PURE;\n  int onInvalidFrame(int32_t stream_id, int error_code);\n  int onStreamClose(int32_t stream_id, uint32_t error_code);\n  int onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len);\n  int onMetadataFrameComplete(int32_t stream_id, bool end_metadata);\n  ssize_t packMetadata(int32_t stream_id, uint8_t* buf, size_t len);\n  // Adds buffer fragment for a new outbound frame to the supplied Buffer::OwnedImpl.\n  // Returns Ok Status on success or error if outbound queue limits were exceeded.\n  Status addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length);\n  virtual StatusOr<ProtocolConstraints::ReleasorProc>\n  trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) PURE;\n  virtual Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE;\n  void sendKeepalive();\n  void onKeepaliveResponse();\n  void onKeepaliveResponseTimeout();\n\n  bool dispatching_ : 1;\n  bool raised_goaway_ : 1;\n  bool pending_deferred_reset_ : 1;\n  Event::SchedulableCallbackPtr protocol_constraint_violation_callback_;\n  Random::RandomGenerator& random_;\n  Event::TimerPtr keepalive_send_timer_;\n  Event::TimerPtr keepalive_timeout_timer_;\n  std::chrono::milliseconds keepalive_interval_;\n  std::chrono::milliseconds keepalive_timeout_;\n  uint32_t keepalive_interval_jitter_percent_;\n};\n\n/**\n * HTTP/2 client connection codec.\n */\nclass ClientConnectionImpl : public ClientConnection, public ConnectionImpl {\npublic:\n  using SessionFactory = Nghttp2SessionFactory;\n  ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks,\n                       CodecStats& stats, Random::RandomGenerator& random_generator,\n                       const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                       const uint32_t max_response_headers_kb,\n                       const uint32_t max_response_headers_count,\n                       SessionFactory& http2_session_factory);\n\n  // Http::ClientConnection\n  RequestEncoder& newStream(ResponseDecoder& response_decoder) override;\n\nprivate:\n  // ConnectionImpl\n  ConnectionCallbacks& callbacks() override { return callbacks_; }\n  Status onBeginHeaders(const nghttp2_frame* frame) override;\n  int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override;\n\n  // Presently client connections do not track or check queue limits for outbound frames and do not\n  // terminate connections when queue limits are exceeded. The primary reason is the complexity of\n  // the clean-up of upstream connections. The clean-up of upstream connection causes RST_STREAM\n  // messages to be sent on corresponding downstream connections. This may actually trigger flood\n  // mitigation on the downstream connections, however there is currently no mechanism for\n  // handling these types of errors.\n  // TODO(yanavlasov): add flood mitigation for upstream connections as well.\n  StatusOr<ProtocolConstraints::ReleasorProc> trackOutboundFrames(bool) override {\n    return ProtocolConstraints::ReleasorProc([]() {});\n  }\n  Status trackInboundFrames(const nghttp2_frame_hd*, uint32_t) override { return okStatus(); }\n  void checkProtocolConstraintViolation() override {}\n\n  Http::ConnectionCallbacks& callbacks_;\n};\n\n/**\n * HTTP/2 server connection codec.\n */\nclass ServerConnectionImpl : public ServerConnection, public ConnectionImpl {\npublic:\n  ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks,\n                       CodecStats& stats, Random::RandomGenerator& random_generator,\n                       const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                       const uint32_t max_request_headers_kb,\n                       const uint32_t max_request_headers_count,\n                       envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n                           headers_with_underscores_action);\n\nprivate:\n  // ConnectionImpl\n  ConnectionCallbacks& callbacks() override { return callbacks_; }\n  Status onBeginHeaders(const nghttp2_frame* frame) override;\n  int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override;\n  StatusOr<ProtocolConstraints::ReleasorProc>\n  trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) override;\n  Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override;\n  absl::optional<int> checkHeaderNameForUnderscores(absl::string_view header_name) override;\n\n  /**\n   * Check protocol constraint violations outside of the dispatching context.\n   * This method ASSERTs if it is called in the dispatching context.\n   */\n  void checkProtocolConstraintViolation() override;\n\n  // Http::Connection\n  // The reason for overriding the dispatch method is to do flood mitigation only when\n  // processing data from downstream client. Doing flood mitigation when processing upstream\n  // responses makes clean-up tricky, which needs to be improved (see comments for the\n  // ClientConnectionImpl::checkProtocolConstraintsStatus method). The dispatch method on the\n  // ServerConnectionImpl objects is called only when processing data from the downstream client in\n  // the ConnectionManagerImpl::onData method.\n  Http::Status dispatch(Buffer::Instance& data) override;\n  Http::Status innerDispatch(Buffer::Instance& data) override;\n\n  ServerConnectionCallbacks& callbacks_;\n\n  // This flag indicates that downstream data is being dispatched and turns on flood mitigation\n  // in the checkMaxOutbound*Framed methods.\n  bool dispatching_downstream_data_{false};\n\n  // The action to take when a request header name contains underscore characters.\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_;\n};\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/codec_impl_legacy.cc",
    "content": "#include \"common/http/http2/codec_impl_legacy.h\"\n\n#include <cstdint>\n#include <memory>\n#include <vector>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Legacy {\nnamespace Http2 {\n\nclass Http2ResponseCodeDetailValues {\npublic:\n  // Invalid HTTP header field was received and stream is going to be\n  // closed.\n  const absl::string_view ng_http2_err_http_header_ = \"http2.invalid.header.field\";\n  // Violation in HTTP messaging rule.\n  const absl::string_view ng_http2_err_http_messaging_ = \"http2.violation.of.messaging.rule\";\n  // none of the above\n  const absl::string_view ng_http2_err_unknown_ = \"http2.unknown.nghttp2.error\";\n  // The number of headers (or trailers) exceeded the configured limits\n  const absl::string_view too_many_headers = \"http2.too_many_headers\";\n  // Envoy detected an HTTP/2 frame flood from the server.\n  const absl::string_view outbound_frame_flood = \"http2.outbound_frames_flood\";\n  // Envoy detected an inbound HTTP/2 frame flood.\n  const absl::string_view inbound_empty_frame_flood = \"http2.inbound_empty_frames_flood\";\n  // Envoy was configured to drop requests with header keys beginning with underscores.\n  const absl::string_view invalid_underscore = \"http2.unexpected_underscore\";\n  // The upstream refused the stream.\n  const absl::string_view remote_refused = \"http2.remote_refuse\";\n  // The upstream reset the stream.\n  const absl::string_view remote_reset = \"http2.remote_reset\";\n\n  const absl::string_view errorDetails(int error_code) const {\n    switch (error_code) {\n    case NGHTTP2_ERR_HTTP_HEADER:\n      return ng_http2_err_http_header_;\n    case NGHTTP2_ERR_HTTP_MESSAGING:\n      return ng_http2_err_http_messaging_;\n    default:\n      return ng_http2_err_unknown_;\n    }\n  }\n};\n\nusing Http2ResponseCodeDetails = ConstSingleton<Http2ResponseCodeDetailValues>;\nusing Http::Http2::CodecStats;\nusing Http::Http2::MetadataDecoder;\nusing Http::Http2::MetadataEncoder;\n\nbool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value,\n                                          HeaderString& cookies) {\n  if (key != Headers::get().Cookie.get().c_str()) {\n    return false;\n  }\n\n  if (!cookies.empty()) {\n    cookies.append(\"; \", 2);\n  }\n\n  const absl::string_view value_view = value.getStringView();\n  cookies.append(value_view.data(), value_view.size());\n  return true;\n}\n\nConnectionImpl::Http2Callbacks ConnectionImpl::http2_callbacks_;\n\nnghttp2_session* ProdNghttp2SessionFactory::create(const nghttp2_session_callbacks* callbacks,\n                                                   ConnectionImpl* connection,\n                                                   const nghttp2_option* options) {\n  nghttp2_session* session;\n  nghttp2_session_client_new2(&session, callbacks, connection, options);\n  return session;\n}\n\nvoid ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connection,\n                                     const envoy::config::core::v3::Http2ProtocolOptions& options) {\n  connection->sendSettings(options, true);\n}\n\n/**\n * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though\n * it copies them.\n */\ntemplate <typename T> static T* removeConst(const void* object) {\n  return const_cast<T*>(reinterpret_cast<const T*>(object));\n}\n\nConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit)\n    : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false),\n      data_deferred_(false), received_noninformational_headers_(false),\n      pending_receive_buffer_high_watermark_called_(false),\n      pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) {\n  parent_.stats_.streams_active_.inc();\n  if (buffer_limit > 0) {\n    setWriteBufferWatermarks(buffer_limit / 2, buffer_limit);\n  }\n}\n\nConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); }\n\nvoid ConnectionImpl::StreamImpl::destroy() {\n  disarmStreamIdleTimer();\n  parent_.stats_.streams_active_.dec();\n  parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length());\n}\n\nstatic void insertHeader(std::vector<nghttp2_nv>& headers, const HeaderEntry& header) {\n  uint8_t flags = 0;\n  if (header.key().isReference()) {\n    flags |= NGHTTP2_NV_FLAG_NO_COPY_NAME;\n  }\n  if (header.value().isReference()) {\n    flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE;\n  }\n  const absl::string_view header_key = header.key().getStringView();\n  const absl::string_view header_value = header.value().getStringView();\n  headers.push_back({removeConst<uint8_t>(header_key.data()),\n                     removeConst<uint8_t>(header_value.data()), header_key.size(),\n                     header_value.size(), flags});\n}\n\nvoid ConnectionImpl::StreamImpl::buildHeaders(std::vector<nghttp2_nv>& final_headers,\n                                              const HeaderMap& headers) {\n  final_headers.reserve(headers.size());\n  headers.iterate([&final_headers](const HeaderEntry& header) -> HeaderMap::Iterate {\n    insertHeader(final_headers, header);\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\nvoid ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) {\n  ASSERT(headers.Status()->value() == \"100\");\n  encodeHeaders(headers, false);\n}\n\nvoid ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector<nghttp2_nv>& final_headers,\n                                                   bool end_stream) {\n  nghttp2_data_provider provider;\n  if (!end_stream) {\n    provider.source.ptr = this;\n    provider.read_callback = [](nghttp2_session*, int32_t, uint8_t*, size_t length,\n                                uint32_t* data_flags, nghttp2_data_source* source,\n                                void*) -> ssize_t {\n      return static_cast<StreamImpl*>(source->ptr)->onDataSourceRead(length, data_flags);\n    };\n  }\n\n  local_end_stream_ = end_stream;\n  submitHeaders(final_headers, end_stream ? nullptr : &provider);\n  parent_.sendPendingFrames();\n  parent_.checkProtocolConstraintViolation();\n}\n\nvoid ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers,\n                                                     bool end_stream) {\n  // This must exist outside of the scope of isUpgrade as the underlying memory is\n  // needed until encodeHeadersBase has been called.\n  std::vector<nghttp2_nv> final_headers;\n  Http::RequestHeaderMapPtr modified_headers;\n  if (Http::Utility::isUpgrade(headers)) {\n    modified_headers = createHeaderMap<RequestHeaderMapImpl>(headers);\n    upgrade_type_ = std::string(headers.getUpgradeValue());\n    Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers);\n    buildHeaders(final_headers, *modified_headers);\n  } else if (headers.Method() && headers.Method()->value() == \"CONNECT\") {\n    // If this is not an upgrade style connect (above branch) it is a bytestream\n    // connect and should have :path and :protocol set accordingly\n    // As HTTP/1.1 does not require a path for CONNECT, we may have to add one\n    // if shifting codecs. For now, default to \"/\" - this can be made\n    // configurable if necessary.\n    // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02\n    modified_headers = createHeaderMap<RequestHeaderMapImpl>(headers);\n    modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream);\n    if (!headers.Path()) {\n      modified_headers->setPath(\"/\");\n    }\n    buildHeaders(final_headers, *modified_headers);\n  } else {\n    buildHeaders(final_headers, headers);\n  }\n  encodeHeadersBase(final_headers, end_stream);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::encodeHeaders(const ResponseHeaderMap& headers,\n                                                     bool end_stream) {\n  // The contract is that client codecs must ensure that :status is present.\n  ASSERT(headers.Status() != nullptr);\n\n  // This must exist outside of the scope of isUpgrade as the underlying memory is\n  // needed until encodeHeadersBase has been called.\n  std::vector<nghttp2_nv> final_headers;\n  Http::ResponseHeaderMapPtr modified_headers;\n  if (Http::Utility::isUpgrade(headers)) {\n    modified_headers = createHeaderMap<ResponseHeaderMapImpl>(headers);\n    Http::Utility::transformUpgradeResponseFromH1toH2(*modified_headers);\n    buildHeaders(final_headers, *modified_headers);\n  } else {\n    buildHeaders(final_headers, headers);\n  }\n  encodeHeadersBase(final_headers, end_stream);\n}\n\nvoid ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) {\n  ASSERT(!local_end_stream_);\n  local_end_stream_ = true;\n  if (pending_send_data_.length() > 0) {\n    // In this case we want trailers to come after we release all pending body data that is\n    // waiting on window updates. We need to save the trailers so that we can emit them later.\n    // However, for empty trailers, we don't need to to save the trailers.\n    ASSERT(!pending_trailers_to_encode_);\n    const bool skip_encoding_empty_trailers =\n        trailers.empty() && parent_.skip_encoding_empty_trailers_;\n    if (!skip_encoding_empty_trailers) {\n      pending_trailers_to_encode_ = cloneTrailers(trailers);\n      createPendingFlushTimer();\n    }\n  } else {\n    submitTrailers(trailers);\n    parent_.sendPendingFrames();\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadata_map_vector) {\n  ASSERT(parent_.allow_metadata_);\n  MetadataEncoder& metadata_encoder = getMetadataEncoder();\n  if (!metadata_encoder.createPayload(metadata_map_vector)) {\n    return;\n  }\n  for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) {\n    submitMetadata(flags);\n  }\n  parent_.sendPendingFrames();\n}\n\nvoid ConnectionImpl::StreamImpl::readDisable(bool disable) {\n  ENVOY_CONN_LOG(debug, \"Stream {} {}, unconsumed_bytes {} read_disable_count {}\",\n                 parent_.connection_, stream_id_, (disable ? \"disabled\" : \"enabled\"),\n                 unconsumed_bytes_, read_disable_count_);\n  if (disable) {\n    ++read_disable_count_;\n  } else {\n    ASSERT(read_disable_count_ > 0);\n    --read_disable_count_;\n    if (!buffersOverrun()) {\n      nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_);\n      unconsumed_bytes_ = 0;\n      parent_.sendPendingFrames();\n    }\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::pendingRecvBufferHighWatermark() {\n  ENVOY_CONN_LOG(debug, \"recv buffer over limit \", parent_.connection_);\n  ASSERT(!pending_receive_buffer_high_watermark_called_);\n  pending_receive_buffer_high_watermark_called_ = true;\n  readDisable(true);\n}\n\nvoid ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() {\n  ENVOY_CONN_LOG(debug, \"recv buffer under limit \", parent_.connection_);\n  ASSERT(pending_receive_buffer_high_watermark_called_);\n  pending_receive_buffer_high_watermark_called_ = false;\n  readDisable(false);\n}\n\nvoid ConnectionImpl::ClientStreamImpl::decodeHeaders() {\n  auto& headers = absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n  const uint64_t status = Http::Utility::getResponseStatus(*headers);\n\n  if (!upgrade_type_.empty() && headers->Status()) {\n    Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_);\n  }\n\n  // Non-informational headers are non-1xx OR 101-SwitchingProtocols, since 101 implies that further\n  // proxying is on an upgrade path.\n  received_noninformational_headers_ =\n      !CodeUtility::is1xx(status) || status == enumToInt(Http::Code::SwitchingProtocols);\n\n  if (status == enumToInt(Http::Code::Continue)) {\n    ASSERT(!remote_end_stream_);\n    response_decoder_.decode100ContinueHeaders(std::move(headers));\n  } else {\n    response_decoder_.decodeHeaders(std::move(headers), remote_end_stream_);\n  }\n}\n\nvoid ConnectionImpl::ClientStreamImpl::decodeTrailers() {\n  response_decoder_.decodeTrailers(\n      std::move(absl::get<ResponseTrailerMapPtr>(headers_or_trailers_)));\n}\n\nvoid ConnectionImpl::ServerStreamImpl::decodeHeaders() {\n  auto& headers = absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n  if (Http::Utility::isH2UpgradeRequest(*headers)) {\n    Http::Utility::transformUpgradeRequestFromH2toH1(*headers);\n  }\n  request_decoder_->decodeHeaders(std::move(headers), remote_end_stream_);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::decodeTrailers() {\n  request_decoder_->decodeTrailers(\n      std::move(absl::get<RequestTrailerMapPtr>(headers_or_trailers_)));\n}\n\nvoid ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark() {\n  ENVOY_CONN_LOG(debug, \"send buffer over limit \", parent_.connection_);\n  ASSERT(!pending_send_buffer_high_watermark_called_);\n  pending_send_buffer_high_watermark_called_ = true;\n  runHighWatermarkCallbacks();\n}\n\nvoid ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark() {\n  ENVOY_CONN_LOG(debug, \"send buffer under limit \", parent_.connection_);\n  ASSERT(pending_send_buffer_high_watermark_called_);\n  pending_send_buffer_high_watermark_called_ = false;\n  runLowWatermarkCallbacks();\n}\n\nvoid ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& value) {\n  if (!Utility::reconstituteCrumbledCookies(name, value, cookies_)) {\n    headers().addViaMove(std::move(name), std::move(value));\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) {\n  ASSERT(local_end_stream_);\n  const bool skip_encoding_empty_trailers =\n      trailers.empty() && parent_.skip_encoding_empty_trailers_;\n  if (skip_encoding_empty_trailers) {\n    ENVOY_CONN_LOG(debug, \"skipping submitting trailers\", parent_.connection_);\n\n    // Instead of submitting empty trailers, we send empty data instead.\n    Buffer::OwnedImpl empty_buffer;\n    encodeDataHelper(empty_buffer, /*end_stream=*/true, skip_encoding_empty_trailers);\n    return;\n  }\n\n  std::vector<nghttp2_nv> final_headers;\n  buildHeaders(final_headers, trailers);\n  int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(),\n                                  final_headers.size());\n  ASSERT(rc == 0);\n}\n\nvoid ConnectionImpl::StreamImpl::submitMetadata(uint8_t flags) {\n  ASSERT(stream_id_ > 0);\n  const int result =\n      nghttp2_submit_extension(parent_.session_, METADATA_FRAME_TYPE, flags, stream_id_, nullptr);\n  ASSERT(result == 0);\n}\n\nssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* data_flags) {\n  if (pending_send_data_.length() == 0 && !local_end_stream_) {\n    ASSERT(!data_deferred_);\n    data_deferred_ = true;\n    return NGHTTP2_ERR_DEFERRED;\n  } else {\n    *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;\n    if (local_end_stream_ && pending_send_data_.length() <= length) {\n      *data_flags |= NGHTTP2_DATA_FLAG_EOF;\n      if (pending_trailers_to_encode_) {\n        // We need to tell the library to not set end stream so that we can emit the trailers.\n        *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;\n        submitTrailers(*pending_trailers_to_encode_);\n        pending_trailers_to_encode_.reset();\n      }\n    }\n\n    return std::min(length, pending_send_data_.length());\n  }\n}\n\nint ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) {\n  // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we\n  // \"just know\" that the frame header is 9 bytes.\n  // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback\n  static const uint64_t FRAME_HEADER_SIZE = 9;\n\n  parent_.protocol_constraints_.incrementOutboundDataFrameCount();\n\n  Buffer::OwnedImpl output;\n  if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) {\n    ENVOY_CONN_LOG(debug, \"error sending data frame: Too many frames in the outbound queue\",\n                   parent_.connection_);\n    setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood);\n    return NGHTTP2_ERR_FLOODED;\n  }\n\n  parent_.stats_.pending_send_bytes_.sub(length);\n  output.move(pending_send_data_, length);\n  parent_.connection_.write(output, false);\n  return 0;\n}\n\nvoid ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                                                     nghttp2_data_provider* provider) {\n  ASSERT(stream_id_ == -1);\n  stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(),\n                                      final_headers.size(), provider, base());\n  ASSERT(stream_id_ > 0);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                                                     nghttp2_data_provider* provider) {\n  ASSERT(stream_id_ != -1);\n  int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(),\n                                   final_headers.size(), provider);\n  ASSERT(rc == 0);\n}\n\nvoid ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() {\n  ASSERT(stream_idle_timer_ == nullptr);\n  if (stream_idle_timeout_.count() > 0) {\n    stream_idle_timer_ =\n        parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); });\n    stream_idle_timer_->enableTimer(stream_idle_timeout_);\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::onPendingFlushTimer() {\n  ENVOY_CONN_LOG(debug, \"pending stream flush timeout\", parent_.connection_);\n  stream_idle_timer_.reset();\n  parent_.stats_.tx_flush_timeout_.inc();\n  ASSERT(local_end_stream_ && !local_end_stream_sent_);\n  // This will emit a reset frame for this stream and close the stream locally. No reset callbacks\n  // will be run because higher layers think the stream is already finished.\n  resetStreamWorker(StreamResetReason::LocalReset);\n  parent_.sendPendingFrames();\n}\n\nvoid ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!local_end_stream_);\n  encodeDataHelper(data, end_stream, /*skip_encoding_empty_trailers=*/false);\n}\n\nvoid ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool end_stream,\n                                                  bool skip_encoding_empty_trailers) {\n  if (skip_encoding_empty_trailers) {\n    ASSERT(data.length() == 0 && end_stream);\n  }\n\n  local_end_stream_ = end_stream;\n  parent_.stats_.pending_send_bytes_.add(data.length());\n  pending_send_data_.move(data);\n  if (data_deferred_) {\n    int rc = nghttp2_session_resume_data(parent_.session_, stream_id_);\n    ASSERT(rc == 0);\n\n    data_deferred_ = false;\n  }\n\n  parent_.sendPendingFrames();\n  parent_.checkProtocolConstraintViolation();\n\n  if (local_end_stream_ && pending_send_data_.length() > 0) {\n    createPendingFlushTimer();\n  }\n}\n\nvoid ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) {\n  // Higher layers expect calling resetStream() to immediately raise reset callbacks.\n  runResetCallbacks(reason);\n\n  // If we submit a reset, nghttp2 will cancel outbound frames that have not yet been sent.\n  // We want these frames to go out so we defer the reset until we send all of the frames that\n  // end the local stream.\n  if (local_end_stream_ && !local_end_stream_sent_) {\n    parent_.pending_deferred_reset_ = true;\n    deferred_reset_ = reason;\n    ENVOY_CONN_LOG(trace, \"deferred reset stream\", parent_.connection_);\n  } else {\n    resetStreamWorker(reason);\n  }\n\n  // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces\n  // the cleanup logic to run which will reset the stream in all cases if all data frames could not\n  // be sent.\n  parent_.sendPendingFrames();\n}\n\nvoid ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) {\n  int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_,\n                                     reason == StreamResetReason::LocalRefusedStreamReset\n                                         ? NGHTTP2_REFUSED_STREAM\n                                         : NGHTTP2_NO_ERROR);\n  ASSERT(rc == 0);\n}\n\nMetadataEncoder& ConnectionImpl::StreamImpl::getMetadataEncoder() {\n  if (metadata_encoder_ == nullptr) {\n    metadata_encoder_ = std::make_unique<MetadataEncoder>();\n  }\n  return *metadata_encoder_;\n}\n\nMetadataDecoder& ConnectionImpl::StreamImpl::getMetadataDecoder() {\n  if (metadata_decoder_ == nullptr) {\n    auto cb = [this](MetadataMapPtr&& metadata_map_ptr) {\n      this->onMetadataDecoded(std::move(metadata_map_ptr));\n    };\n    metadata_decoder_ = std::make_unique<MetadataDecoder>(cb);\n  }\n  return *metadata_decoder_;\n}\n\nvoid ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr) {\n  decoder().decodeMetadata(std::move(metadata_map_ptr));\n}\n\nConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats,\n                               Random::RandomGenerator& random,\n                               const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                               const uint32_t max_headers_kb, const uint32_t max_headers_count)\n    : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb),\n      max_headers_count_(max_headers_count),\n      per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()),\n      stream_error_on_invalid_http_messaging_(\n          http2_options.override_stream_error_on_invalid_http_message().value()),\n      flood_detected_(false), protocol_constraints_(stats, http2_options),\n      skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.http2_skip_encoding_empty_trailers\")),\n      dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false), random_(random) {\n  if (http2_options.has_connection_keepalive()) {\n    keepalive_interval_ = std::chrono::milliseconds(\n        PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), interval));\n    keepalive_timeout_ = std::chrono::milliseconds(\n        PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), timeout));\n    keepalive_interval_jitter_percent_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n        http2_options.connection_keepalive(), interval_jitter, 15.0);\n\n    keepalive_send_timer_ = connection.dispatcher().createTimer([this]() { sendKeepalive(); });\n    keepalive_timeout_timer_ =\n        connection.dispatcher().createTimer([this]() { onKeepaliveResponseTimeout(); });\n\n    // This call schedules the initial interval, with jitter.\n    onKeepaliveResponse();\n  }\n}\n\nConnectionImpl::~ConnectionImpl() {\n  for (const auto& stream : active_streams_) {\n    stream->destroy();\n  }\n  nghttp2_session_del(session_);\n}\n\nvoid ConnectionImpl::sendKeepalive() {\n  // Include the current time as the payload to help with debugging.\n  SystemTime now = connection_.dispatcher().timeSource().systemTime();\n  uint64_t ms_since_epoch =\n      std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count();\n  ENVOY_CONN_LOG(trace, \"Sending keepalive PING {}\", connection_, ms_since_epoch);\n\n  // The last parameter is an opaque 8-byte buffer, so this cast is safe.\n  int rc = nghttp2_submit_ping(session_, 0 /*flags*/, reinterpret_cast<uint8_t*>(&ms_since_epoch));\n  ASSERT(rc == 0);\n  sendPendingFrames();\n\n  keepalive_timeout_timer_->enableTimer(keepalive_timeout_);\n}\n\nvoid ConnectionImpl::onKeepaliveResponse() {\n  // Check the timers for nullptr in case the peer sent an unsolicited PING ACK.\n  if (keepalive_timeout_timer_ != nullptr) {\n    keepalive_timeout_timer_->disableTimer();\n  }\n  if (keepalive_send_timer_ != nullptr) {\n    uint64_t interval_ms = keepalive_interval_.count();\n    const uint64_t jitter_percent_mod = keepalive_interval_jitter_percent_ * interval_ms / 100;\n    if (jitter_percent_mod > 0) {\n      interval_ms += random_.random() % jitter_percent_mod;\n    }\n    keepalive_send_timer_->enableTimer(std::chrono::milliseconds(interval_ms));\n  }\n}\n\nvoid ConnectionImpl::onKeepaliveResponseTimeout() {\n  ENVOY_CONN_LOG(debug, \"Closing connection due to keepalive timeout\", connection_);\n  stats_.keepalive_timeout_.inc();\n  connection_.close(Network::ConnectionCloseType::NoFlush);\n}\n\nHttp::Status ConnectionImpl::dispatch(Buffer::Instance& data) {\n  // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either\n  // throw an exception or return an error status. The utility wrapper catches exceptions and\n  // converts them to error statuses.\n  return Http::Utility::exceptionToStatus(\n      [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data);\n}\n\nHttp::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) {\n  ENVOY_CONN_LOG(trace, \"dispatching {} bytes\", connection_, data.length());\n  // Make sure that dispatching_ is set to false after dispatching, even when\n  // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a\n  // single return after exception removal (#10878)).\n  Cleanup cleanup([this]() { dispatching_ = false; });\n  for (const Buffer::RawSlice& slice : data.getRawSlices()) {\n    dispatching_ = true;\n    ssize_t rc =\n        nghttp2_session_mem_recv(session_, static_cast<const uint8_t*>(slice.mem_), slice.len_);\n    if (rc == NGHTTP2_ERR_FLOODED || flood_detected_) {\n      throw FrameFloodException(\n          \"Flooding was detected in this HTTP/2 session, and it must be closed\");\n    }\n    if (rc != static_cast<ssize_t>(slice.len_)) {\n      throw CodecProtocolException(fmt::format(\"{}\", nghttp2_strerror(rc)));\n    }\n\n    dispatching_ = false;\n  }\n\n  ENVOY_CONN_LOG(trace, \"dispatched {} bytes\", connection_, data.length());\n  data.drain(data.length());\n\n  // Decoding incoming frames can generate outbound frames so flush pending.\n  sendPendingFrames();\n  return Http::okStatus();\n}\n\nConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) {\n  return static_cast<StreamImpl*>(nghttp2_session_get_stream_user_data(session_, stream_id));\n}\n\nint ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) {\n  StreamImpl* stream = getStream(stream_id);\n  // If this results in buffering too much data, the watermark buffer will call\n  // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_\n  stream->pending_recv_data_.add(data, len);\n  // Update the window to the peer unless some consumer of this stream's data has hit a flow control\n  // limit and disabled reads on this stream\n  if (!stream->buffersOverrun()) {\n    nghttp2_session_consume(session_, stream_id, len);\n  } else {\n    stream->unconsumed_bytes_ += len;\n  }\n  return 0;\n}\n\nvoid ConnectionImpl::goAway() {\n  int rc = nghttp2_submit_goaway(session_, NGHTTP2_FLAG_NONE,\n                                 nghttp2_session_get_last_proc_stream_id(session_),\n                                 NGHTTP2_NO_ERROR, nullptr, 0);\n  ASSERT(rc == 0);\n\n  sendPendingFrames();\n}\n\nvoid ConnectionImpl::shutdownNotice() {\n  int rc = nghttp2_submit_shutdown_notice(session_);\n  ASSERT(rc == 0);\n\n  sendPendingFrames();\n}\n\nint ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) {\n  ENVOY_CONN_LOG(trace, \"about to recv frame type={}, flags={}\", connection_,\n                 static_cast<uint64_t>(hd->type), static_cast<uint64_t>(hd->flags));\n\n  // Track all the frames without padding here, since this is the only callback we receive\n  // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.).\n  // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived().\n  if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) {\n    if (!trackInboundFrames(hd, 0)) {\n      return NGHTTP2_ERR_FLOODED;\n    }\n  }\n\n  return 0;\n}\n\nABSL_MUST_USE_RESULT\nenum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept {\n  switch (code) {\n  case NGHTTP2_NO_ERROR:\n    return GoAwayErrorCode::NoError;\n  default:\n    return GoAwayErrorCode::Other;\n  }\n}\n\nint ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) {\n  ENVOY_CONN_LOG(trace, \"recv frame type={}\", connection_, static_cast<uint64_t>(frame->hd.type));\n\n  // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS\n  // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders()\n  // and CONTINUATION frames in onBeforeFrameReceived().\n  ASSERT(frame->hd.type != NGHTTP2_CONTINUATION);\n\n  if ((frame->hd.type == NGHTTP2_PING) && (frame->ping.hd.flags & NGHTTP2_FLAG_ACK)) {\n    // The ``opaque_data`` should be exactly what was sent in the ping, which is\n    // was the current time when the ping was sent. This can be useful while debugging\n    // to match the ping and ack.\n    uint64_t data;\n    static_assert(sizeof(data) == sizeof(frame->ping.opaque_data), \"Sizes are equal\");\n    memcpy(&data, frame->ping.opaque_data, sizeof(data));\n    ENVOY_CONN_LOG(trace, \"recv PING ACK {}\", connection_, data);\n\n    onKeepaliveResponse();\n    return 0;\n  }\n\n  if (frame->hd.type == NGHTTP2_DATA) {\n    if (!trackInboundFrames(&frame->hd, frame->data.padlen)) {\n      return NGHTTP2_ERR_FLOODED;\n    }\n  }\n\n  // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown\n  // notifications are the same as a normal GOAWAY.\n  // TODO: handle multiple GOAWAY frames.\n  if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) {\n    ASSERT(frame->hd.stream_id == 0);\n    raised_goaway_ = true;\n    callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code));\n    return 0;\n  }\n\n  if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) {\n    onSettingsForTest(frame->settings);\n  }\n\n  StreamImpl* stream = getStream(frame->hd.stream_id);\n  if (!stream) {\n    return 0;\n  }\n\n  switch (frame->hd.type) {\n  case NGHTTP2_HEADERS: {\n    stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;\n    if (!stream->cookies_.empty()) {\n      HeaderString key(Headers::get().Cookie);\n      stream->headers().addViaMove(std::move(key), std::move(stream->cookies_));\n    }\n\n    switch (frame->headers.cat) {\n    case NGHTTP2_HCAT_RESPONSE:\n    case NGHTTP2_HCAT_REQUEST: {\n      stream->decodeHeaders();\n      break;\n    }\n\n    case NGHTTP2_HCAT_HEADERS: {\n      // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers\n      // if local is not complete.\n      if (!stream->deferred_reset_) {\n        if (nghttp2_session_check_server_session(session_) ||\n            stream->received_noninformational_headers_) {\n          ASSERT(stream->remote_end_stream_);\n          stream->decodeTrailers();\n        } else {\n          // We're a client session and still waiting for non-informational headers.\n          stream->decodeHeaders();\n        }\n      }\n      break;\n    }\n\n    default:\n      // We do not currently support push.\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n\n    break;\n  }\n  case NGHTTP2_DATA: {\n    stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;\n\n    // It's possible that we are waiting to send a deferred reset, so only raise data if local\n    // is not complete.\n    if (!stream->deferred_reset_) {\n      stream->decoder().decodeData(stream->pending_recv_data_, stream->remote_end_stream_);\n    }\n\n    stream->pending_recv_data_.drain(stream->pending_recv_data_.length());\n    break;\n  }\n  case NGHTTP2_RST_STREAM: {\n    ENVOY_CONN_LOG(trace, \"remote reset: {}\", connection_, frame->rst_stream.error_code);\n    stats_.rx_reset_.inc();\n    break;\n  }\n  }\n\n  return 0;\n}\n\nint ConnectionImpl::onFrameSend(const nghttp2_frame* frame) {\n  // The nghttp2 library does not cleanly give us a way to determine whether we received invalid\n  // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not.\n  // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see\n  // an outgoing frame of this type, we will return an error code so that we can abort execution.\n  ENVOY_CONN_LOG(trace, \"sent frame type={}\", connection_, static_cast<uint64_t>(frame->hd.type));\n  switch (frame->hd.type) {\n  case NGHTTP2_GOAWAY: {\n    ENVOY_CONN_LOG(debug, \"sent goaway code={}\", connection_, frame->goaway.error_code);\n    if (frame->goaway.error_code != NGHTTP2_NO_ERROR) {\n      // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting.\n      // As such, it is not reliable to call sendPendingFrames() again after this and we assume\n      // that the connection is going to get torn down immediately. One byproduct of this is that\n      // we need to cancel all pending flush stream timeouts since they can race with connection\n      // teardown. As part of the work to remove exceptions we should aim to clean up all of this\n      // error handling logic and only handle this type of case at the end of dispatch.\n      for (auto& stream : active_streams_) {\n        stream->disarmStreamIdleTimer();\n      }\n      return NGHTTP2_ERR_CALLBACK_FAILURE;\n    }\n    break;\n  }\n\n  case NGHTTP2_RST_STREAM: {\n    ENVOY_CONN_LOG(debug, \"sent reset code={}\", connection_, frame->rst_stream.error_code);\n    stats_.tx_reset_.inc();\n    break;\n  }\n\n  case NGHTTP2_HEADERS:\n  case NGHTTP2_DATA: {\n    StreamImpl* stream = getStream(frame->hd.stream_id);\n    stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;\n    break;\n  }\n  }\n\n  return 0;\n}\n\nint ConnectionImpl::onError(absl::string_view error) {\n  ENVOY_CONN_LOG(debug, \"invalid http2: {}\", connection_, error);\n  return 0;\n}\n\nint ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) {\n  ENVOY_CONN_LOG(debug, \"invalid frame: {} on stream {}\", connection_, nghttp2_strerror(error_code),\n                 stream_id);\n\n  // Set details of error_code in the stream whenever we have one.\n  StreamImpl* stream = getStream(stream_id);\n  if (stream != nullptr) {\n    stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code));\n  }\n\n  if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) {\n    stats_.rx_messaging_error_.inc();\n\n    if (stream_error_on_invalid_http_messaging_) {\n      // The stream is about to be closed due to an invalid header or messaging. Don't kill the\n      // entire connection if one stream has bad headers or messaging.\n      if (stream != nullptr) {\n        // See comment below in onStreamClose() for why we do this.\n        stream->reset_due_to_messaging_error_ = true;\n      }\n      return 0;\n    }\n  }\n\n  // Cause dispatch to return with an error code.\n  return NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\nint ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) {\n  ENVOY_CONN_LOG(trace, \"about to send frame type={}, flags={}\", connection_,\n                 static_cast<uint64_t>(frame->hd.type), static_cast<uint64_t>(frame->hd.flags));\n  ASSERT(!is_outbound_flood_monitored_control_frame_);\n  // Flag flood monitored outbound control frames.\n  is_outbound_flood_monitored_control_frame_ =\n      ((frame->hd.type == NGHTTP2_PING || frame->hd.type == NGHTTP2_SETTINGS) &&\n       frame->hd.flags & NGHTTP2_FLAG_ACK) ||\n      frame->hd.type == NGHTTP2_RST_STREAM;\n  return 0;\n}\n\nbool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data,\n                                              size_t length) {\n  // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the\n  // onBeforeFrameSend callback is not called for DATA frames.\n  bool is_outbound_flood_monitored_control_frame = false;\n  std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_);\n  try {\n    auto releasor = trackOutboundFrames(is_outbound_flood_monitored_control_frame);\n    output.add(data, length);\n    output.addDrainTracker(releasor);\n  } catch (const FrameFloodException&) {\n    return false;\n  }\n  return true;\n}\n\nssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) {\n  ENVOY_CONN_LOG(trace, \"send data: bytes={}\", connection_, length);\n  Buffer::OwnedImpl buffer;\n  if (!addOutboundFrameFragment(buffer, data, length)) {\n    ENVOY_CONN_LOG(debug, \"error sending frame: Too many frames in the outbound queue.\",\n                   connection_);\n    return NGHTTP2_ERR_FLOODED;\n  }\n\n  // While the buffer is transient the fragment it contains will be moved into the\n  // write_buffer_ of the underlying connection_ by the write method below.\n  // This creates lifetime dependency between the write_buffer_ of the underlying connection\n  // and the codec object. Specifically the write_buffer_ MUST be either fully drained or\n  // deleted before the codec object is deleted. This is presently guaranteed by the\n  // destruction order of the Network::ConnectionImpl object where write_buffer_ is\n  // destroyed before the filter_manager_ which owns the codec through Http::ConnectionManagerImpl.\n  connection_.write(buffer, false);\n  return length;\n}\n\nint ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) {\n  StreamImpl* stream = getStream(stream_id);\n  if (stream) {\n    ENVOY_CONN_LOG(debug, \"stream closed: {}\", connection_, error_code);\n    if (!stream->remote_end_stream_ || !stream->local_end_stream_) {\n      StreamResetReason reason;\n      if (stream->reset_due_to_messaging_error_) {\n        // Unfortunately, the nghttp2 API makes it incredibly difficult to clearly understand\n        // the flow of resets. I.e., did the reset originate locally? Was it remote? Here,\n        // we attempt to track cases in which we sent a reset locally due to an invalid frame\n        // received from the remote. We only do that in two cases currently (HTTP messaging layer\n        // errors from https://tools.ietf.org/html/rfc7540#section-8 which nghttp2 is very strict\n        // about). In other cases we treat invalid frames as a protocol error and just kill\n        // the connection.\n        reason = StreamResetReason::LocalReset;\n      } else {\n        if (error_code == NGHTTP2_REFUSED_STREAM) {\n          reason = StreamResetReason::RemoteRefusedStreamReset;\n          stream->setDetails(Http2ResponseCodeDetails::get().remote_refused);\n        } else {\n          reason = StreamResetReason::RemoteReset;\n          stream->setDetails(Http2ResponseCodeDetails::get().remote_reset);\n        }\n      }\n\n      stream->runResetCallbacks(reason);\n    }\n\n    stream->destroy();\n    connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_));\n    // Any unconsumed data must be consumed before the stream is deleted.\n    // nghttp2 does not appear to track this internally, and any stream deleted\n    // with outstanding window will contribute to a slow connection-window leak.\n    nghttp2_session_consume(session_, stream_id, stream->unconsumed_bytes_);\n    stream->unconsumed_bytes_ = 0;\n    nghttp2_session_set_stream_user_data(session_, stream->stream_id_, nullptr);\n  }\n\n  return 0;\n}\n\nint ConnectionImpl::onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len) {\n  ENVOY_CONN_LOG(trace, \"recv {} bytes METADATA\", connection_, len);\n\n  StreamImpl* stream = getStream(stream_id);\n  if (!stream) {\n    return 0;\n  }\n\n  bool success = stream->getMetadataDecoder().receiveMetadata(data, len);\n  return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\nint ConnectionImpl::onMetadataFrameComplete(int32_t stream_id, bool end_metadata) {\n  ENVOY_CONN_LOG(trace, \"recv METADATA frame on stream {}, end_metadata: {}\", connection_,\n                 stream_id, end_metadata);\n\n  StreamImpl* stream = getStream(stream_id);\n  if (stream == nullptr) {\n    return 0;\n  }\n\n  bool result = stream->getMetadataDecoder().onMetadataFrameComplete(end_metadata);\n  return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\nssize_t ConnectionImpl::packMetadata(int32_t stream_id, uint8_t* buf, size_t len) {\n  ENVOY_CONN_LOG(trace, \"pack METADATA frame on stream {}\", connection_, stream_id);\n\n  StreamImpl* stream = getStream(stream_id);\n  if (stream == nullptr) {\n    return 0;\n  }\n\n  MetadataEncoder& encoder = stream->getMetadataEncoder();\n  return encoder.packNextFramePayload(buf, len);\n}\n\nint ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name,\n                               HeaderString&& value) {\n  StreamImpl* stream = getStream(frame->hd.stream_id);\n  if (!stream) {\n    // We have seen 1 or 2 crashes where we get a headers callback but there is no associated\n    // stream data. I honestly am not sure how this can happen. However, from reading the nghttp2\n    // code it looks possible that inflate_header_block() can safely inflate headers for an already\n    // closed stream, but will still call the headers callback. Since that seems possible, we should\n    // ignore this case here.\n    // TODO(mattklein123): Figure out a test case that can hit this.\n    stats_.headers_cb_no_stream_.inc();\n    return 0;\n  }\n\n  auto should_return = checkHeaderNameForUnderscores(name.getStringView());\n  if (should_return) {\n    stream->setDetails(Http2ResponseCodeDetails::get().invalid_underscore);\n    name.clear();\n    value.clear();\n    return should_return.value();\n  }\n\n  stream->saveHeader(std::move(name), std::move(value));\n\n  if (stream->headers().byteSize() > max_headers_kb_ * 1024 ||\n      stream->headers().size() > max_headers_count_) {\n    stream->setDetails(Http2ResponseCodeDetails::get().too_many_headers);\n    stats_.header_overflow_.inc();\n    // This will cause the library to reset/close the stream.\n    return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;\n  } else {\n    return 0;\n  }\n}\n\nvoid ConnectionImpl::sendPendingFrames() {\n  if (dispatching_ || connection_.state() == Network::Connection::State::Closed) {\n    return;\n  }\n\n  const int rc = nghttp2_session_send(session_);\n  if (rc != 0) {\n    ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE);\n    // For errors caused by the pending outbound frame flood the FrameFloodException has\n    // to be thrown. However the nghttp2 library returns only the generic error code for\n    // all failure types. Check queue limits and throw FrameFloodException if they were\n    // exceeded.\n    if (!protocol_constraints_.status().ok()) {\n      throw FrameFloodException(\"Too many frames in the outbound queue.\");\n    }\n\n    throw CodecProtocolException(std::string(nghttp2_strerror(rc)));\n  }\n\n  // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event,\n  // so iterating through every stream to find the ones that have a deferred reset is not a big\n  // deal. Furthermore, queueing a reset frame does not actually invoke the close stream callback.\n  // This is only done when the reset frame is sent. Thus, it's safe to work directly with the\n  // stream map.\n  // NOTE: The way we handle deferred reset is essentially best effort. If we intend to do a\n  //       deferred reset, we try to finish the stream, including writing any pending data frames.\n  //       If we cannot do this (potentially due to not enough window), we just reset the stream.\n  //       In general this behavior occurs only when we are trying to send immediate error messages\n  //       to short circuit requests. In the best effort case, we complete the stream before\n  //       resetting. In other cases, we just do the reset now which will blow away pending data\n  //       frames and release any memory associated with the stream.\n  if (pending_deferred_reset_) {\n    pending_deferred_reset_ = false;\n    for (auto& stream : active_streams_) {\n      if (stream->deferred_reset_) {\n        stream->resetStreamWorker(stream->deferred_reset_.value());\n      }\n    }\n    sendPendingFrames();\n  }\n}\n\nvoid ConnectionImpl::sendSettings(\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options, bool disable_push) {\n  absl::InlinedVector<nghttp2_settings_entry, 10> settings;\n  auto insertParameter = [&settings](const nghttp2_settings_entry& entry) mutable -> bool {\n    const auto it = std::find_if(settings.cbegin(), settings.cend(),\n                                 [&entry](const nghttp2_settings_entry& existing) {\n                                   return entry.settings_id == existing.settings_id;\n                                 });\n    if (it != settings.end()) {\n      return false;\n    }\n    settings.push_back(entry);\n    return true;\n  };\n\n  // Universally disable receiving push promise frames as we don't currently support\n  // them. nghttp2 will fail the connection if the other side still sends them.\n  // TODO(mattklein123): Remove this when we correctly proxy push promise.\n  // NOTE: This is a special case with respect to custom parameter overrides in that server push is\n  // not supported and therefore not end user configurable.\n  if (disable_push) {\n    settings.push_back(\n        {static_cast<int32_t>(NGHTTP2_SETTINGS_ENABLE_PUSH), disable_push ? 0U : 1U});\n  }\n\n  for (const auto& it : http2_options.custom_settings_parameters()) {\n    ASSERT(it.identifier().value() <= std::numeric_limits<uint16_t>::max());\n    const bool result =\n        insertParameter({static_cast<int32_t>(it.identifier().value()), it.value().value()});\n    ASSERT(result);\n    ENVOY_CONN_LOG(debug, \"adding custom settings parameter with id {:#x} to {}\", connection_,\n                   it.identifier().value(), it.value().value());\n  }\n\n  // Insert named parameters.\n  settings.insert(\n      settings.end(),\n      {{NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, http2_options.hpack_table_size().value()},\n       {NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL, http2_options.allow_connect()},\n       {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()},\n       {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}});\n  if (!settings.empty()) {\n    int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size());\n    ASSERT(rc == 0);\n  } else {\n    // nghttp2_submit_settings need to be called at least once\n    int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, nullptr, 0);\n    ASSERT(rc == 0);\n  }\n\n  const uint32_t initial_connection_window_size =\n      http2_options.initial_connection_window_size().value();\n  // Increase connection window size up to our default size.\n  if (initial_connection_window_size != NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE) {\n    ENVOY_CONN_LOG(debug, \"updating connection-level initial window size to {}\", connection_,\n                   initial_connection_window_size);\n    int rc = nghttp2_submit_window_update(session_, NGHTTP2_FLAG_NONE, 0,\n                                          initial_connection_window_size -\n                                              NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE);\n    ASSERT(rc == 0);\n  }\n}\n\nvoid ConnectionImpl::scheduleProtocolConstraintViolationCallback() {\n  if (!protocol_constraint_violation_callback_) {\n    protocol_constraint_violation_callback_ = connection_.dispatcher().createSchedulableCallback(\n        [this]() { onProtocolConstraintViolation(); });\n    protocol_constraint_violation_callback_->scheduleCallbackCurrentIteration();\n  }\n}\n\nvoid ConnectionImpl::onProtocolConstraintViolation() {\n  // Flooded outbound queue implies that peer is not reading and it does not\n  // make sense to try to flush pending bytes.\n  connection_.close(Envoy::Network::ConnectionCloseType::NoFlush);\n}\n\nConnectionImpl::Http2Callbacks::Http2Callbacks() {\n  nghttp2_session_callbacks_new(&callbacks_);\n  nghttp2_session_callbacks_set_send_callback(\n      callbacks_,\n      [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t {\n        return static_cast<ConnectionImpl*>(user_data)->onSend(data, length);\n      });\n\n  nghttp2_session_callbacks_set_send_data_callback(\n      callbacks_,\n      [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length,\n         nghttp2_data_source* source, void*) -> int {\n        ASSERT(frame->data.padlen == 0);\n        return static_cast<StreamImpl*>(source->ptr)->onDataSourceSend(framehd, length);\n      });\n\n  nghttp2_session_callbacks_set_on_begin_headers_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onBeginHeaders(frame);\n      });\n\n  nghttp2_session_callbacks_set_on_header_callback(\n      callbacks_,\n      [](nghttp2_session*, const nghttp2_frame* frame, const uint8_t* raw_name, size_t name_length,\n         const uint8_t* raw_value, size_t value_length, uint8_t, void* user_data) -> int {\n        // TODO PERF: Can reference count here to avoid copies.\n        HeaderString name;\n        name.setCopy(reinterpret_cast<const char*>(raw_name), name_length);\n        HeaderString value;\n        value.setCopy(reinterpret_cast<const char*>(raw_value), value_length);\n        return static_cast<ConnectionImpl*>(user_data)->onHeader(frame, std::move(name),\n                                                                 std::move(value));\n      });\n\n  nghttp2_session_callbacks_set_on_data_chunk_recv_callback(\n      callbacks_,\n      [](nghttp2_session*, uint8_t, int32_t stream_id, const uint8_t* data, size_t len,\n         void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onData(stream_id, data, len);\n      });\n\n  nghttp2_session_callbacks_set_on_begin_frame_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onBeforeFrameReceived(hd);\n      });\n\n  nghttp2_session_callbacks_set_on_frame_recv_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onFrameReceived(frame);\n      });\n\n  nghttp2_session_callbacks_set_on_stream_close_callback(\n      callbacks_,\n      [](nghttp2_session*, int32_t stream_id, uint32_t error_code, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onStreamClose(stream_id, error_code);\n      });\n\n  nghttp2_session_callbacks_set_on_frame_send_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onFrameSend(frame);\n      });\n\n  nghttp2_session_callbacks_set_before_frame_send_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onBeforeFrameSend(frame);\n      });\n\n  nghttp2_session_callbacks_set_on_frame_not_send_callback(\n      callbacks_, [](nghttp2_session*, const nghttp2_frame*, int, void*) -> int {\n        // We used to always return failure here but it looks now this can get called if the other\n        // side sends GOAWAY and we are trying to send a SETTINGS ACK. Just ignore this for now.\n        return 0;\n      });\n\n  nghttp2_session_callbacks_set_on_invalid_frame_recv_callback(\n      callbacks_,\n      [](nghttp2_session*, const nghttp2_frame* frame, int error_code, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onInvalidFrame(frame->hd.stream_id,\n                                                                       error_code);\n      });\n\n  nghttp2_session_callbacks_set_on_extension_chunk_recv_callback(\n      callbacks_,\n      [](nghttp2_session*, const nghttp2_frame_hd* hd, const uint8_t* data, size_t len,\n         void* user_data) -> int {\n        ASSERT(hd->length >= len);\n        return static_cast<ConnectionImpl*>(user_data)->onMetadataReceived(hd->stream_id, data,\n                                                                           len);\n      });\n\n  nghttp2_session_callbacks_set_unpack_extension_callback(\n      callbacks_, [](nghttp2_session*, void**, const nghttp2_frame_hd* hd, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onMetadataFrameComplete(\n            hd->stream_id, hd->flags == END_METADATA_FLAG);\n      });\n\n  nghttp2_session_callbacks_set_pack_extension_callback(\n      callbacks_,\n      [](nghttp2_session*, uint8_t* buf, size_t len, const nghttp2_frame* frame,\n         void* user_data) -> ssize_t {\n        ASSERT(frame->hd.length <= len);\n        return static_cast<ConnectionImpl*>(user_data)->packMetadata(frame->hd.stream_id, buf, len);\n      });\n\n  nghttp2_session_callbacks_set_error_callback2(\n      callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int {\n        return static_cast<ConnectionImpl*>(user_data)->onError(absl::string_view(msg, len));\n      });\n}\n\nConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); }\n\nConnectionImpl::Http2Options::Http2Options(\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options) {\n  nghttp2_option_new(&options_);\n  // Currently we do not do anything with stream priority. Setting the following option prevents\n  // nghttp2 from keeping around closed streams for use during stream priority dependency graph\n  // calculations. This saves a tremendous amount of memory in cases where there are a large\n  // number of kept alive HTTP/2 connections.\n  nghttp2_option_set_no_closed_streams(options_, 1);\n  nghttp2_option_set_no_auto_window_update(options_, 1);\n\n  // The max send header block length is configured to an arbitrarily high number so as to never\n  // trigger the check within nghttp2, as we check request headers length in\n  // codec_impl::saveHeader.\n  nghttp2_option_set_max_send_header_block_length(options_, 0x2000000);\n\n  if (http2_options.hpack_table_size().value() != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) {\n    nghttp2_option_set_max_deflate_dynamic_table_size(options_,\n                                                      http2_options.hpack_table_size().value());\n  }\n\n  if (http2_options.allow_metadata()) {\n    nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE);\n  } else {\n    ENVOY_LOG(trace, \"Codec does not have Metadata frame support.\");\n  }\n\n  // nghttp2 v1.39.2 lowered the internal flood protection limit from 10K to 1K of ACK frames.\n  // This new limit may cause the internal nghttp2 mitigation to trigger more often (as it\n  // requires just 9K of incoming bytes for smallest 9 byte SETTINGS frame), bypassing the same\n  // mitigation and its associated behavior in the envoy HTTP/2 codec. Since envoy does not rely\n  // on this mitigation, set back to the old 10K number to avoid any changes in the HTTP/2 codec\n  // behavior.\n  nghttp2_option_set_max_outbound_ack(options_, 10000);\n}\n\nConnectionImpl::Http2Options::~Http2Options() { nghttp2_option_del(options_); }\n\nConnectionImpl::ClientHttp2Options::ClientHttp2Options(\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options)\n    : Http2Options(http2_options) {\n  // Temporarily disable initial max streams limit/protection, since we might want to create\n  // more than 100 streams before receiving the HTTP/2 SETTINGS frame from the server.\n  //\n  // TODO(PiotrSikora): remove this once multiple upstream connections or queuing are implemented.\n  nghttp2_option_set_peer_max_concurrent_streams(\n      options_, ::Envoy::Http2::Utility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS);\n}\n\nClientConnectionImpl::ClientConnectionImpl(\n    Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats,\n    Random::RandomGenerator& random,\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n    const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count,\n    Nghttp2SessionFactory& http2_session_factory)\n    : ConnectionImpl(connection, stats, random, http2_options, max_response_headers_kb,\n                     max_response_headers_count),\n      callbacks_(callbacks) {\n  ClientHttp2Options client_http2_options(http2_options);\n  session_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(),\n                                          client_http2_options.options());\n  http2_session_factory.init(session_, base(), http2_options);\n  allow_metadata_ = http2_options.allow_metadata();\n}\n\nRequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) {\n  ClientStreamImplPtr stream(new ClientStreamImpl(*this, per_stream_buffer_limit_, decoder));\n  // If the connection is currently above the high watermark, make sure to inform the new stream.\n  // The connection can not pass this on automatically as it has no awareness that a new stream is\n  // created.\n  if (connection_.aboveHighWatermark()) {\n    stream->runHighWatermarkCallbacks();\n  }\n  ClientStreamImpl& stream_ref = *stream;\n  LinkedList::moveIntoList(std::move(stream), active_streams_);\n  return stream_ref;\n}\n\nint ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) {\n  // The client code explicitly does not currently support push promise.\n  RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, \"\");\n  RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE ||\n                     frame->headers.cat == NGHTTP2_HCAT_HEADERS,\n                 \"\");\n  if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) {\n    StreamImpl* stream = getStream(frame->hd.stream_id);\n    stream->allocTrailers();\n  }\n\n  return 0;\n}\n\nint ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name,\n                                   HeaderString&& value) {\n  // The client code explicitly does not currently support push promise.\n  ASSERT(frame->hd.type == NGHTTP2_HEADERS);\n  ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS);\n  return saveHeader(frame, std::move(name), std::move(value));\n}\n\nServerConnectionImpl::ServerConnectionImpl(\n    Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats,\n    Random::RandomGenerator& random,\n    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n    const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count,\n    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n        headers_with_underscores_action)\n    : ConnectionImpl(connection, stats, random, http2_options, max_request_headers_kb,\n                     max_request_headers_count),\n      callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) {\n  Http2Options h2_options(http2_options);\n\n  nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(),\n                              h2_options.options());\n  sendSettings(http2_options, false);\n  allow_metadata_ = http2_options.allow_metadata();\n}\n\nint ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) {\n  // For a server connection, we should never get push promise frames.\n  ASSERT(frame->hd.type == NGHTTP2_HEADERS);\n\n  if (!trackInboundFrames(&frame->hd, frame->headers.padlen)) {\n    return NGHTTP2_ERR_FLOODED;\n  }\n\n  if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) {\n    stats_.trailers_.inc();\n    ASSERT(frame->headers.cat == NGHTTP2_HCAT_HEADERS);\n\n    StreamImpl* stream = getStream(frame->hd.stream_id);\n    stream->allocTrailers();\n    return 0;\n  }\n\n  ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_));\n  if (connection_.aboveHighWatermark()) {\n    stream->runHighWatermarkCallbacks();\n  }\n  stream->request_decoder_ = &callbacks_.newStream(*stream);\n  stream->stream_id_ = frame->hd.stream_id;\n  LinkedList::moveIntoList(std::move(stream), active_streams_);\n  nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id,\n                                       active_streams_.front().get());\n  return 0;\n}\n\nint ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name,\n                                   HeaderString&& value) {\n  // For a server connection, we should never get push promise frames.\n  ASSERT(frame->hd.type == NGHTTP2_HEADERS);\n  ASSERT(frame->headers.cat == NGHTTP2_HCAT_REQUEST || frame->headers.cat == NGHTTP2_HCAT_HEADERS);\n  return saveHeader(frame, std::move(name), std::move(value));\n}\n\nbool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) {\n  ENVOY_CONN_LOG(trace, \"track inbound frame type={} flags={} length={} padding_length={}\",\n                 connection_, static_cast<uint64_t>(hd->type), static_cast<uint64_t>(hd->flags),\n                 static_cast<uint64_t>(hd->length), padding_length);\n  auto result = protocol_constraints_.trackInboundFrames(hd, padding_length);\n  if (!result.ok()) {\n    ENVOY_CONN_LOG(trace, \"error reading frame: {} received in this HTTP/2 session.\", connection_,\n                   result.message());\n    if (isInboundFramesWithEmptyPayloadError(result)) {\n      ConnectionImpl::StreamImpl* stream = getStream(hd->stream_id);\n      if (stream) {\n        stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood);\n      }\n    }\n    // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate\n    // all the way to nghttp2_session_mem_recv() where we need it.\n    flood_detected_ = true;\n    return false;\n  }\n\n  return true;\n}\n\nEnvoy::Http::Http2::ProtocolConstraints::ReleasorProc\nServerConnectionImpl::trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) {\n  auto releasor =\n      protocol_constraints_.incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame);\n  if (dispatching_downstream_data_ && !protocol_constraints_.checkOutboundFrameLimits().ok()) {\n    throw FrameFloodException(std::string(protocol_constraints_.status().message()));\n  }\n  return releasor;\n}\n\nvoid ServerConnectionImpl::checkProtocolConstraintViolation() {\n  if (!protocol_constraints_.checkOutboundFrameLimits().ok()) {\n    scheduleProtocolConstraintViolationCallback();\n  }\n}\n\nHttp::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) {\n  // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either\n  // throw an exception or return an error status. The utility wrapper catches exceptions and\n  // converts them to error statuses.\n  return Http::Utility::exceptionToStatus(\n      [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data);\n}\n\nHttp::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) {\n  ASSERT(!dispatching_downstream_data_);\n  dispatching_downstream_data_ = true;\n\n  // Make sure the dispatching_downstream_data_ is set to false even\n  // when ConnectionImpl::dispatch throws an exception.\n  Cleanup cleanup([this]() { dispatching_downstream_data_ = false; });\n\n  // Make sure downstream outbound queue was not flooded by the upstream frames.\n  if (!protocol_constraints_.checkOutboundFrameLimits().ok()) {\n    throw FrameFloodException(std::string(protocol_constraints_.status().message()));\n  }\n\n  return ConnectionImpl::innerDispatch(data);\n}\n\nabsl::optional<int>\nServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) {\n  if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW &&\n      Http::HeaderUtility::headerNameContainsUnderscore(header_name)) {\n    if (headers_with_underscores_action_ ==\n        envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) {\n      ENVOY_CONN_LOG(debug, \"Dropping header with invalid characters in its name: {}\", connection_,\n                     header_name);\n      stats_.dropped_headers_with_underscores_.inc();\n      return 0;\n    }\n    ENVOY_CONN_LOG(debug, \"Rejecting request due to header name with underscores: {}\", connection_,\n                   header_name);\n    stats_.requests_rejected_with_underscores_in_headers_.inc();\n    return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;\n  }\n  return absl::nullopt;\n}\n\n} // namespace Http2\n} // namespace Legacy\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/codec_impl_legacy.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/http/codec_helper.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/http/http2/metadata_decoder.h\"\n#include \"common/http/http2/metadata_encoder.h\"\n#include \"common/http/http2/protocol_constraints.h\"\n#include \"common/http/status.h\"\n#include \"common/http/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Legacy {\nnamespace Http2 {\n\n// This is not the full client magic, but it's the smallest size that should be able to\n// differentiate between HTTP/1 and HTTP/2.\nconst std::string CLIENT_MAGIC_PREFIX = \"PRI * HTTP/2\";\n\nclass Utility {\npublic:\n  /**\n   * Deal with https://tools.ietf.org/html/rfc7540#section-8.1.2.5\n   * @param key supplies the incoming header key.\n   * @param value supplies the incoming header value.\n   * @param cookies supplies the header string to fill if this is a cookie header that needs to be\n   *                rebuilt.\n   */\n  static bool reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value,\n                                          HeaderString& cookies);\n};\n\nclass ConnectionImpl;\n\n// Abstract nghttp2_session factory. Used to enable injection of factories for testing.\nclass Nghttp2SessionFactory {\npublic:\n  using ConnectionImplType = ConnectionImpl;\n  virtual ~Nghttp2SessionFactory() = default;\n\n  // Returns a new nghttp2_session to be used with |connection|.\n  virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks,\n                                  ConnectionImplType* connection,\n                                  const nghttp2_option* options) PURE;\n\n  // Initializes the |session|.\n  virtual void init(nghttp2_session* session, ConnectionImplType* connection,\n                    const envoy::config::core::v3::Http2ProtocolOptions& options) PURE;\n};\n\nclass ProdNghttp2SessionFactory : public Nghttp2SessionFactory {\npublic:\n  nghttp2_session* create(const nghttp2_session_callbacks* callbacks, ConnectionImpl* connection,\n                          const nghttp2_option* options) override;\n\n  void init(nghttp2_session* session, ConnectionImpl* connection,\n            const envoy::config::core::v3::Http2ProtocolOptions& options) override;\n\n  // Returns a global factory instance. Note that this is possible because no internal state is\n  // maintained; the thread safety of create() and init()'s side effects is guaranteed by Envoy's\n  // worker based threading model.\n  static ProdNghttp2SessionFactory& get() {\n    static ProdNghttp2SessionFactory* instance = new ProdNghttp2SessionFactory();\n    return *instance;\n  }\n};\n\n/**\n * Base class for HTTP/2 client and server codecs.\n */\nclass ConnectionImpl : public virtual Connection, protected Logger::Loggable<Logger::Id::http2> {\npublic:\n  ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats,\n                 Random::RandomGenerator& random,\n                 const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                 const uint32_t max_headers_kb, const uint32_t max_headers_count);\n\n  ~ConnectionImpl() override;\n\n  // Http::Connection\n  // NOTE: the `dispatch` method is also overridden in the ServerConnectionImpl class\n  Http::Status dispatch(Buffer::Instance& data) override;\n  void goAway() override;\n  Protocol protocol() override { return Protocol::Http2; }\n  void shutdownNotice() override;\n  bool wantsToWrite() override { return nghttp2_session_want_write(session_); }\n  // Propagate network connection watermark events to each stream on the connection.\n  void onUnderlyingConnectionAboveWriteBufferHighWatermark() override {\n    for (auto& stream : active_streams_) {\n      stream->runHighWatermarkCallbacks();\n    }\n  }\n  void onUnderlyingConnectionBelowWriteBufferLowWatermark() override {\n    for (auto& stream : active_streams_) {\n      stream->runLowWatermarkCallbacks();\n    }\n  }\n\n  /**\n   * An inner dispatch call that executes the dispatching logic. While exception removal is in\n   * migration (#10878), this function may either throw an exception or return an error status.\n   * Exceptions are caught and translated to their corresponding statuses in the outer level\n   * dispatch.\n   * This needs to be virtual so that ServerConnectionImpl can override.\n   * TODO(#10878): Remove this when exception removal is complete.\n   */\n  virtual Http::Status innerDispatch(Buffer::Instance& data);\n\nprotected:\n  friend class ProdNghttp2SessionFactory;\n\n  /**\n   * Wrapper for static nghttp2 callback dispatchers.\n   */\n  class Http2Callbacks {\n  public:\n    Http2Callbacks();\n    ~Http2Callbacks();\n\n    const nghttp2_session_callbacks* callbacks() { return callbacks_; }\n\n  private:\n    nghttp2_session_callbacks* callbacks_;\n  };\n\n  /**\n   * Wrapper for static nghttp2 session options.\n   */\n  class Http2Options {\n  public:\n    Http2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options);\n    ~Http2Options();\n\n    const nghttp2_option* options() { return options_; }\n\n  protected:\n    nghttp2_option* options_;\n  };\n\n  class ClientHttp2Options : public Http2Options {\n  public:\n    ClientHttp2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options);\n  };\n\n  /**\n   * Base class for client and server side streams.\n   */\n  struct StreamImpl : public virtual StreamEncoder,\n                      public Stream,\n                      public LinkedObject<StreamImpl>,\n                      public Event::DeferredDeletable,\n                      public StreamCallbackHelper {\n\n    StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit);\n    ~StreamImpl() override;\n    // TODO(mattklein123): Optimally this would be done in the destructor but there are currently\n    // deferred delete lifetime issues that need sorting out if the destructor of the stream is\n    // going to be able to refer to the parent connection.\n    void destroy();\n    void disarmStreamIdleTimer() {\n      if (stream_idle_timer_ != nullptr) {\n        // To ease testing and the destructor assertion.\n        stream_idle_timer_->disableTimer();\n        stream_idle_timer_.reset();\n      }\n    }\n\n    StreamImpl* base() { return this; }\n    ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags);\n    int onDataSourceSend(const uint8_t* framehd, size_t length);\n    void resetStreamWorker(StreamResetReason reason);\n    static void buildHeaders(std::vector<nghttp2_nv>& final_headers, const HeaderMap& headers);\n    void saveHeader(HeaderString&& name, HeaderString&& value);\n    void encodeHeadersBase(const std::vector<nghttp2_nv>& final_headers, bool end_stream);\n    virtual void submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                               nghttp2_data_provider* provider) PURE;\n    void encodeTrailersBase(const HeaderMap& headers);\n    void submitTrailers(const HeaderMap& trailers);\n    void submitMetadata(uint8_t flags);\n    virtual StreamDecoder& decoder() PURE;\n    virtual HeaderMap& headers() PURE;\n    virtual void allocTrailers() PURE;\n    virtual HeaderMapPtr cloneTrailers(const HeaderMap& trailers) PURE;\n    virtual void createPendingFlushTimer() PURE;\n    void onPendingFlushTimer();\n\n    // Http::StreamEncoder\n    void encodeData(Buffer::Instance& data, bool end_stream) override;\n    Stream& getStream() override { return *this; }\n    void encodeMetadata(const MetadataMapVector& metadata_map_vector) override;\n    Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return absl::nullopt; }\n\n    // Http::Stream\n    void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); }\n    void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); }\n    void resetStream(StreamResetReason reason) override;\n    void readDisable(bool disable) override;\n    uint32_t bufferLimit() override { return pending_recv_data_.highWatermark(); }\n    const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override {\n      return parent_.connection_.localAddress();\n    }\n    absl::string_view responseDetails() override { return details_; }\n    void setFlushTimeout(std::chrono::milliseconds timeout) override {\n      stream_idle_timeout_ = timeout;\n    }\n\n    // This code assumes that details is a static string, so that we\n    // can avoid copying it.\n    void setDetails(absl::string_view details) {\n      // TODO(asraa): In some cases nghttp2's error handling may cause processing of multiple\n      // invalid frames for a single stream. If a temporal stream error is returned from a callback,\n      // remaining frames in the buffer will still be partially processed. For example, remaining\n      // frames will still parse through nghttp2's push promise error handling and in\n      // onBeforeFrame(Send/Received) callbacks, which may return invalid frame errors and attempt\n      // to set details again. In these cases, we simply do not overwrite details. When internal\n      // error latching is implemented in the codec for exception removal, we should prevent calling\n      // setDetails in an error state.\n      if (details_.empty()) {\n        details_ = details;\n      }\n    }\n\n    void setWriteBufferWatermarks(uint32_t low_watermark, uint32_t high_watermark) {\n      pending_recv_data_.setWatermarks(low_watermark, high_watermark);\n      pending_send_data_.setWatermarks(low_watermark, high_watermark);\n    }\n\n    // If the receive buffer encounters watermark callbacks, enable/disable reads on this stream.\n    void pendingRecvBufferHighWatermark();\n    void pendingRecvBufferLowWatermark();\n\n    // If the send buffer encounters watermark callbacks, propagate this information to the streams.\n    // The router and connection manager will propagate them on as appropriate.\n    void pendingSendBufferHighWatermark();\n    void pendingSendBufferLowWatermark();\n\n    // Does any necessary WebSocket/Upgrade conversion, then passes the headers\n    // to the decoder_.\n    virtual void decodeHeaders() PURE;\n    virtual void decodeTrailers() PURE;\n\n    // Get MetadataEncoder for this stream.\n    Http::Http2::MetadataEncoder& getMetadataEncoder();\n    // Get MetadataDecoder for this stream.\n    Http::Http2::MetadataDecoder& getMetadataDecoder();\n    // Callback function for MetadataDecoder.\n    void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr);\n\n    bool buffersOverrun() const { return read_disable_count_ > 0; }\n\n    void encodeDataHelper(Buffer::Instance& data, bool end_stream,\n                          bool skip_encoding_empty_trailers);\n\n    ConnectionImpl& parent_;\n    int32_t stream_id_{-1};\n    uint32_t unconsumed_bytes_{0};\n    uint32_t read_disable_count_{0};\n    Buffer::WatermarkBuffer pending_recv_data_{\n        [this]() -> void { this->pendingRecvBufferLowWatermark(); },\n        [this]() -> void { this->pendingRecvBufferHighWatermark(); },\n        []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }};\n    Buffer::WatermarkBuffer pending_send_data_{\n        [this]() -> void { this->pendingSendBufferLowWatermark(); },\n        [this]() -> void { this->pendingSendBufferHighWatermark(); },\n        []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }};\n    HeaderMapPtr pending_trailers_to_encode_;\n    std::unique_ptr<Http::Http2::MetadataDecoder> metadata_decoder_;\n    std::unique_ptr<Http::Http2::MetadataEncoder> metadata_encoder_;\n    absl::optional<StreamResetReason> deferred_reset_;\n    HeaderString cookies_;\n    bool local_end_stream_sent_ : 1;\n    bool remote_end_stream_ : 1;\n    bool data_deferred_ : 1;\n    bool received_noninformational_headers_ : 1;\n    bool pending_receive_buffer_high_watermark_called_ : 1;\n    bool pending_send_buffer_high_watermark_called_ : 1;\n    bool reset_due_to_messaging_error_ : 1;\n    absl::string_view details_;\n    // See HttpConnectionManager.stream_idle_timeout.\n    std::chrono::milliseconds stream_idle_timeout_{};\n    Event::TimerPtr stream_idle_timer_;\n  };\n\n  using StreamImplPtr = std::unique_ptr<StreamImpl>;\n\n  /**\n   * Client side stream (request).\n   */\n  struct ClientStreamImpl : public StreamImpl, public RequestEncoder {\n    ClientStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit,\n                     ResponseDecoder& response_decoder)\n        : StreamImpl(parent, buffer_limit), response_decoder_(response_decoder),\n          headers_or_trailers_(ResponseHeaderMapImpl::create()) {}\n\n    // StreamImpl\n    void submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                       nghttp2_data_provider* provider) override;\n    StreamDecoder& decoder() override { return response_decoder_; }\n    void decodeHeaders() override;\n    void decodeTrailers() override;\n    HeaderMap& headers() override {\n      if (absl::holds_alternative<ResponseHeaderMapPtr>(headers_or_trailers_)) {\n        return *absl::get<ResponseHeaderMapPtr>(headers_or_trailers_);\n      } else {\n        return *absl::get<ResponseTrailerMapPtr>(headers_or_trailers_);\n      }\n    }\n    void allocTrailers() override {\n      // If we are waiting for informational headers, make a new response header map, otherwise\n      // we are about to receive trailers. The codec makes sure this is the only valid sequence.\n      if (received_noninformational_headers_) {\n        headers_or_trailers_.emplace<ResponseTrailerMapPtr>(ResponseTrailerMapImpl::create());\n      } else {\n        headers_or_trailers_.emplace<ResponseHeaderMapPtr>(ResponseHeaderMapImpl::create());\n      }\n    }\n    HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override {\n      return createHeaderMap<RequestTrailerMapImpl>(trailers);\n    }\n    void createPendingFlushTimer() override {\n      // Client streams do not create a flush timer because we currently assume that any failure\n      // to flush would be covered by a request/stream/etc. timeout.\n    }\n\n    // RequestEncoder\n    void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override;\n    void encodeTrailers(const RequestTrailerMap& trailers) override {\n      encodeTrailersBase(trailers);\n    }\n\n    ResponseDecoder& response_decoder_;\n    absl::variant<ResponseHeaderMapPtr, ResponseTrailerMapPtr> headers_or_trailers_;\n    std::string upgrade_type_;\n  };\n\n  using ClientStreamImplPtr = std::unique_ptr<ClientStreamImpl>;\n\n  /**\n   * Server side stream (response).\n   */\n  struct ServerStreamImpl : public StreamImpl, public ResponseEncoder {\n    ServerStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit)\n        : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {\n      stream_error_on_invalid_http_message_ = parent.stream_error_on_invalid_http_messaging_;\n    }\n\n    // StreamImpl\n    void submitHeaders(const std::vector<nghttp2_nv>& final_headers,\n                       nghttp2_data_provider* provider) override;\n    StreamDecoder& decoder() override { return *request_decoder_; }\n    void decodeHeaders() override;\n    void decodeTrailers() override;\n    HeaderMap& headers() override {\n      if (absl::holds_alternative<RequestHeaderMapPtr>(headers_or_trailers_)) {\n        return *absl::get<RequestHeaderMapPtr>(headers_or_trailers_);\n      } else {\n        return *absl::get<RequestTrailerMapPtr>(headers_or_trailers_);\n      }\n    }\n    void allocTrailers() override {\n      headers_or_trailers_.emplace<RequestTrailerMapPtr>(RequestTrailerMapImpl::create());\n    }\n    HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override {\n      return createHeaderMap<ResponseTrailerMapImpl>(trailers);\n    }\n    void createPendingFlushTimer() override;\n\n    // ResponseEncoder\n    void encode100ContinueHeaders(const ResponseHeaderMap& headers) override;\n    void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override;\n    void encodeTrailers(const ResponseTrailerMap& trailers) override {\n      encodeTrailersBase(trailers);\n    }\n\n    RequestDecoder* request_decoder_{};\n    absl::variant<RequestHeaderMapPtr, RequestTrailerMapPtr> headers_or_trailers_;\n    bool stream_error_on_invalid_http_message_;\n\n    bool streamErrorOnInvalidHttpMessage() const override {\n      return stream_error_on_invalid_http_message_;\n    }\n  };\n\n  using ServerStreamImplPtr = std::unique_ptr<ServerStreamImpl>;\n\n  ConnectionImpl* base() { return this; }\n  // NOTE: Always use non debug nullptr checks against the return value of this function. There are\n  // edge cases (such as for METADATA frames) where nghttp2 will issue a callback for a stream_id\n  // that is not associated with an existing stream.\n  StreamImpl* getStream(int32_t stream_id);\n  int saveHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value);\n  void sendPendingFrames();\n  void sendSettings(const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                    bool disable_push);\n  // Callback triggered when the peer's SETTINGS frame is received.\n  // NOTE: This is only used for tests.\n  virtual void onSettingsForTest(const nghttp2_settings&) {}\n\n  /**\n   * Check if header name contains underscore character.\n   * Underscore character is allowed in header names by the RFC-7230 and this check is implemented\n   * as a security measure due to systems that treat '_' and '-' as interchangeable.\n   * The ServerConnectionImpl may drop header or reject request based on the\n   * `common_http_protocol_options.headers_with_underscores_action` configuration option in the\n   * HttpConnectionManager.\n   */\n  virtual absl::optional<int> checkHeaderNameForUnderscores(absl::string_view /* header_name */) {\n    return absl::nullopt;\n  }\n\n  /**\n   * This method checks if a protocol constraint had been violated in the sendPendingFrames() call\n   * outside of the dispatch context.\n   * This method is a stop-gap solution for implementing checking of protocol constraint violations\n   * outside of the dispatch context (where at this point the sendPendingFrames() method always\n   * returns success). It allows each case where sendPendingFrames() is called outside of the\n   * dispatch context to be fixed in its own PR so it is easier to review and reason about. Once all\n   * error handling is implemented this method will be removed and the `sendPendingFrames()` will be\n   * changed to return error in both dispatching and non-dispatching contexts. At the same time the\n   * RELEASE_ASSERTs will be removed as well.\n   * The implementation in the ClientConnectionImpl is a no-op as client connections to not check\n   * protocol constraints.\n   * The implementation in the ServerConnectionImpl schedules callback to terminate connection if\n   * the protocol constraint was violated.\n   */\n  virtual void checkProtocolConstraintViolation() PURE;\n\n  /**\n   * Callback for terminating connection when protocol constrain has been violated\n   * outside of the dispatch context.\n   */\n  void scheduleProtocolConstraintViolationCallback();\n  void onProtocolConstraintViolation();\n\n  static Http2Callbacks http2_callbacks_;\n\n  std::list<StreamImplPtr> active_streams_;\n  nghttp2_session* session_{};\n  Http::Http2::CodecStats& stats_;\n  Network::Connection& connection_;\n  const uint32_t max_headers_kb_;\n  const uint32_t max_headers_count_;\n  uint32_t per_stream_buffer_limit_;\n  bool allow_metadata_;\n  const bool stream_error_on_invalid_http_messaging_;\n  bool flood_detected_;\n\n  // Set if the type of frame that is about to be sent is PING or SETTINGS with the ACK flag set, or\n  // RST_STREAM.\n  bool is_outbound_flood_monitored_control_frame_ = 0;\n  ::Envoy::Http::Http2::ProtocolConstraints protocol_constraints_;\n\n  // For the flood mitigation to work the onSend callback must be called once for each outbound\n  // frame. This is what the nghttp2 library is doing, however this is not documented. The\n  // Http2FloodMitigationTest.* tests in test/integration/http2_integration_test.cc will break if\n  // this changes in the future. Also it is important that onSend does not do partial writes, as the\n  // nghttp2 library will keep calling this callback to write the rest of the frame.\n  ssize_t onSend(const uint8_t* data, size_t length);\n\n  // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have\n  // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of\n  // an HTTP/2 response as reported here: https://github.com/envoyproxy/envoy/issues/10514. This is\n  // controlled by \"envoy.reloadable_features.http2_skip_encoding_empty_trailers\" runtime feature\n  // flag.\n  const bool skip_encoding_empty_trailers_;\n\nprivate:\n  virtual ConnectionCallbacks& callbacks() PURE;\n  virtual int onBeginHeaders(const nghttp2_frame* frame) PURE;\n  int onData(int32_t stream_id, const uint8_t* data, size_t len);\n  int onBeforeFrameReceived(const nghttp2_frame_hd* hd);\n  int onFrameReceived(const nghttp2_frame* frame);\n  int onBeforeFrameSend(const nghttp2_frame* frame);\n  int onFrameSend(const nghttp2_frame* frame);\n  int onError(absl::string_view error);\n  virtual int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) PURE;\n  int onInvalidFrame(int32_t stream_id, int error_code);\n  int onStreamClose(int32_t stream_id, uint32_t error_code);\n  int onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len);\n  int onMetadataFrameComplete(int32_t stream_id, bool end_metadata);\n  ssize_t packMetadata(int32_t stream_id, uint8_t* buf, size_t len);\n  // Adds buffer fragment for a new outbound frame to the supplied Buffer::OwnedImpl.\n  // Returns true on success or false if outbound queue limits were exceeded.\n  bool addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length);\n  virtual Envoy::Http::Http2::ProtocolConstraints::ReleasorProc\n  trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) PURE;\n  virtual bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE;\n  void sendKeepalive();\n  void onKeepaliveResponse();\n  void onKeepaliveResponseTimeout();\n\n  bool dispatching_ : 1;\n  bool raised_goaway_ : 1;\n  bool pending_deferred_reset_ : 1;\n  Event::SchedulableCallbackPtr protocol_constraint_violation_callback_;\n  Random::RandomGenerator& random_;\n  Event::TimerPtr keepalive_send_timer_;\n  Event::TimerPtr keepalive_timeout_timer_;\n  std::chrono::milliseconds keepalive_interval_;\n  std::chrono::milliseconds keepalive_timeout_;\n  uint32_t keepalive_interval_jitter_percent_;\n};\n\n/**\n * HTTP/2 client connection codec.\n */\nclass ClientConnectionImpl : public ClientConnection, public ConnectionImpl {\npublic:\n  using SessionFactory = Nghttp2SessionFactory;\n  ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks,\n                       Http::Http2::CodecStats& stats, Random::RandomGenerator& random,\n                       const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                       const uint32_t max_response_headers_kb,\n                       const uint32_t max_response_headers_count,\n                       SessionFactory& http2_session_factory);\n\n  // Http::ClientConnection\n  RequestEncoder& newStream(ResponseDecoder& response_decoder) override;\n\nprivate:\n  // ConnectionImpl\n  ConnectionCallbacks& callbacks() override { return callbacks_; }\n  int onBeginHeaders(const nghttp2_frame* frame) override;\n  int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override;\n\n  // Presently client connections do not track or check queue limits for outbound frames and do not\n  // terminate connections when queue limits are exceeded. The primary reason is the complexity of\n  // the clean-up of upstream connections. The clean-up of upstream connection causes RST_STREAM\n  // messages to be sent on corresponding downstream connections. This may actually trigger flood\n  // mitigation on the downstream connections, which causes an exception to be thrown in the middle\n  // of the clean-up loop, leaving resources in a half cleaned up state.\n  // TODO(yanavlasov): add flood mitigation for upstream connections as well.\n  Envoy::Http::Http2::ProtocolConstraints::ReleasorProc trackOutboundFrames(bool) override {\n    return Envoy::Http::Http2::ProtocolConstraints::ReleasorProc([]() {});\n  }\n  bool trackInboundFrames(const nghttp2_frame_hd*, uint32_t) override { return true; }\n  void checkProtocolConstraintViolation() override {}\n\n  Http::ConnectionCallbacks& callbacks_;\n};\n\n/**\n * HTTP/2 server connection codec.\n */\nclass ServerConnectionImpl : public ServerConnection, public ConnectionImpl {\npublic:\n  ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks,\n                       Http::Http2::CodecStats& stats, Random::RandomGenerator& random,\n                       const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                       const uint32_t max_request_headers_kb,\n                       const uint32_t max_request_headers_count,\n                       envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n                           headers_with_underscores_action);\n\nprivate:\n  // ConnectionImpl\n  ConnectionCallbacks& callbacks() override { return callbacks_; }\n  int onBeginHeaders(const nghttp2_frame* frame) override;\n  int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override;\n  Envoy::Http::Http2::ProtocolConstraints::ReleasorProc\n  trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) override;\n  bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override;\n  absl::optional<int> checkHeaderNameForUnderscores(absl::string_view header_name) override;\n\n  /**\n   * Check protocol constraint violations outside of the dispatching context.\n   * This method ASSERTs if it is called in the dispatching context.\n   */\n  void checkProtocolConstraintViolation() override;\n\n  // Http::Connection\n  // The reason for overriding the dispatch method is to do flood mitigation only when\n  // processing data from downstream client. Doing flood mitigation when processing upstream\n  // responses makes clean-up tricky, which needs to be improved (see comments for the\n  // ClientConnectionImpl::checkProtocolConstraintsStatus method). The dispatch method on the\n  // ServerConnectionImpl objects is called only when processing data from the downstream client in\n  // the ConnectionManagerImpl::onData method.\n  Http::Status dispatch(Buffer::Instance& data) override;\n  Http::Status innerDispatch(Buffer::Instance& data) override;\n\n  ServerConnectionCallbacks& callbacks_;\n\n  // This flag indicates that downstream data is being dispatched and turns on flood mitigation\n  // in the checkMaxOutbound*Framed methods.\n  bool dispatching_downstream_data_{false};\n\n  // The action to take when a request header name contains underscore characters.\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_;\n};\n\n} // namespace Http2\n} // namespace Legacy\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/codec_stats.h",
    "content": "#pragma once\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/thread.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n/**\n * All stats for the HTTP/2 codec. @see stats_macros.h\n */\n#define ALL_HTTP2_CODEC_STATS(COUNTER, GAUGE)                                                      \\\n  COUNTER(dropped_headers_with_underscores)                                                        \\\n  COUNTER(header_overflow)                                                                         \\\n  COUNTER(headers_cb_no_stream)                                                                    \\\n  COUNTER(inbound_empty_frames_flood)                                                              \\\n  COUNTER(inbound_priority_frames_flood)                                                           \\\n  COUNTER(inbound_window_update_frames_flood)                                                      \\\n  COUNTER(outbound_control_flood)                                                                  \\\n  COUNTER(outbound_flood)                                                                          \\\n  COUNTER(requests_rejected_with_underscores_in_headers)                                           \\\n  COUNTER(rx_messaging_error)                                                                      \\\n  COUNTER(rx_reset)                                                                                \\\n  COUNTER(trailers)                                                                                \\\n  COUNTER(tx_flush_timeout)                                                                        \\\n  COUNTER(tx_reset)                                                                                \\\n  COUNTER(keepalive_timeout)                                                                       \\\n  GAUGE(streams_active, Accumulate)                                                                \\\n  GAUGE(pending_send_bytes, Accumulate)\n\n/**\n * Wrapper struct for the HTTP/2 codec stats. @see stats_macros.h\n */\nstruct CodecStats {\n  using AtomicPtr = Thread::AtomicPtr<CodecStats, Thread::AtomicPtrAllocMode::DeleteOnDestruct>;\n\n  static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) {\n    return *ptr.get([&scope]() -> CodecStats* {\n      return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, \"http2.\"),\n                                                  POOL_GAUGE_PREFIX(scope, \"http2.\"))};\n    });\n  }\n\n  ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/conn_pool.cc",
    "content": "#include \"common/http/http2/conn_pool.h\"\n\n#include <cstdint>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n                           Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                           const Network::ConnectionSocket::OptionsSharedPtr& options,\n                           const Network::TransportSocketOptionsSharedPtr& transport_socket_options)\n    : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options,\n                           transport_socket_options, Protocol::Http2),\n      random_generator_(random_generator) {}\n\nConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); }\n\nEnvoy::ConnectionPool::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() {\n  return std::make_unique<ActiveClient>(*this);\n}\nvoid ConnPoolImpl::onGoAway(ActiveClient& client, Http::GoAwayErrorCode) {\n  ENVOY_CONN_LOG(debug, \"remote goaway\", *client.codec_client_);\n  host_->cluster().stats().upstream_cx_close_notify_.inc();\n  if (client.state_ != ActiveClient::State::DRAINING) {\n    if (client.codec_client_->numActiveRequests() == 0) {\n      client.codec_client_->close();\n    } else {\n      transitionActiveClientState(client, ActiveClient::State::DRAINING);\n    }\n  }\n}\n\nvoid ConnPoolImpl::onStreamDestroy(ActiveClient& client) {\n  onStreamClosed(client, false);\n\n  // If we are destroying this stream because of a disconnect, do not check for drain here. We will\n  // wait until the connection has been fully drained of streams and then check in the connection\n  // event callback.\n  if (!client.closed_with_active_rq_) {\n    checkForDrained();\n  }\n}\n\nvoid ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason reason) {\n  if (reason == StreamResetReason::ConnectionTermination ||\n      reason == StreamResetReason::ConnectionFailure) {\n    host_->cluster().stats().upstream_rq_pending_failure_eject_.inc();\n    client.closed_with_active_rq_ = true;\n  } else if (reason == StreamResetReason::LocalReset) {\n    host_->cluster().stats().upstream_rq_tx_reset_.inc();\n  } else if (reason == StreamResetReason::RemoteReset) {\n    host_->cluster().stats().upstream_rq_rx_reset_.inc();\n  }\n}\n\nuint64_t ConnPoolImpl::maxStreamsPerConnection() {\n  uint64_t max_streams_config = host_->cluster().maxRequestsPerConnection();\n  return (max_streams_config != 0) ? max_streams_config : DEFAULT_MAX_STREAMS;\n}\n\nConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent)\n    : Envoy::Http::ActiveClient(\n          parent, parent.maxStreamsPerConnection(),\n          parent.host_->cluster().http2Options().max_concurrent_streams().value()) {\n  codec_client_->setCodecClientCallbacks(*this);\n  codec_client_->setCodecConnectionCallbacks(*this);\n\n  parent.host_->cluster().stats().upstream_cx_http2_total_.inc();\n}\n\nbool ConnPoolImpl::ActiveClient::closingWithIncompleteStream() const {\n  return closed_with_active_rq_;\n}\n\nRequestEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(ResponseDecoder& response_decoder) {\n  return codec_client_->newStream(response_decoder);\n}\n\nCodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) {\n  CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP2, std::move(data.connection_),\n                                           data.host_description_, dispatcher_, random_generator_)};\n  return codec;\n}\n\nConnectionPool::InstancePtr\nallocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n                 Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                 const Network::ConnectionSocket::OptionsSharedPtr& options,\n                 const Network::TransportSocketOptionsSharedPtr& transport_socket_options) {\n  return std::make_unique<Http::Http2::ProdConnPoolImpl>(\n      dispatcher, random_generator, host, priority, options, transport_socket_options);\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/conn_pool.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/http/codec_client.h\"\n#include \"common/http/conn_pool_base.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n/**\n * Implementation of a \"connection pool\" for HTTP/2. This mainly handles stats as well as\n * shifting to a new connection if we reach max streams on the primary. This is a base class\n * used for both the prod implementation as well as the testing one.\n */\nclass ConnPoolImpl : public Envoy::Http::HttpConnPoolImplBase {\npublic:\n  ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n               Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n               const Network::ConnectionSocket::OptionsSharedPtr& options,\n               const Network::TransportSocketOptionsSharedPtr& transport_socket_options);\n\n  ~ConnPoolImpl() override;\n\n  // Http::ConnectionPool::Instance\n  Http::Protocol protocol() const override { return Http::Protocol::Http2; }\n\n  // ConnPoolImplBase\n  Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override;\n\nprotected:\n  class ActiveClient : public CodecClientCallbacks,\n                       public Http::ConnectionCallbacks,\n                       public Envoy::Http::ActiveClient {\n  public:\n    ActiveClient(ConnPoolImpl& parent);\n    ~ActiveClient() override = default;\n\n    ConnPoolImpl& parent() { return static_cast<ConnPoolImpl&>(parent_); }\n\n    // ConnPoolImpl::ActiveClient\n    bool closingWithIncompleteStream() const override;\n    RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override;\n\n    // CodecClientCallbacks\n    void onStreamDestroy() override { parent().onStreamDestroy(*this); }\n    void onStreamReset(Http::StreamResetReason reason) override {\n      parent().onStreamReset(*this, reason);\n    }\n\n    // Http::ConnectionCallbacks\n    void onGoAway(Http::GoAwayErrorCode error_code) override {\n      parent().onGoAway(*this, error_code);\n    }\n\n    bool closed_with_active_rq_{};\n  };\n\n  uint64_t maxStreamsPerConnection();\n  void movePrimaryClientToDraining();\n  void onGoAway(ActiveClient& client, Http::GoAwayErrorCode error_code);\n  void onStreamDestroy(ActiveClient& client);\n  void onStreamReset(ActiveClient& client, Http::StreamResetReason reason);\n\n  // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe\n  // side we do 2^29.\n  static const uint64_t DEFAULT_MAX_STREAMS = (1 << 29);\n\n  Random::RandomGenerator& random_generator_;\n};\n\n/**\n * Production implementation of the HTTP/2 connection pool.\n */\nclass ProdConnPoolImpl : public ConnPoolImpl {\npublic:\n  using ConnPoolImpl::ConnPoolImpl;\n\nprivate:\n  CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override;\n};\n\nConnectionPool::InstancePtr\nallocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator,\n                 Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority,\n                 const Network::ConnectionSocket::OptionsSharedPtr& options,\n                 const Network::TransportSocketOptionsSharedPtr& transport_socket_options);\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/metadata_decoder.cc",
    "content": "#include \"common/http/http2/metadata_decoder.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nMetadataDecoder::MetadataDecoder(MetadataCallback cb) : metadata_map_(new MetadataMap()) {\n  nghttp2_hd_inflater* inflater;\n  int rv = nghttp2_hd_inflate_new(&inflater);\n  ASSERT(rv == 0);\n  inflater_ = Inflater(inflater);\n\n  ASSERT(cb != nullptr);\n  callback_ = std::move(cb);\n}\n\nbool MetadataDecoder::receiveMetadata(const uint8_t* data, size_t len) {\n  ASSERT(data != nullptr && len != 0);\n  payload_.add(data, len);\n\n  total_payload_size_ += payload_.length();\n  return total_payload_size_ <= max_payload_size_bound_;\n}\n\nbool MetadataDecoder::onMetadataFrameComplete(bool end_metadata) {\n  bool success = decodeMetadataPayloadUsingNghttp2(end_metadata);\n  if (!success) {\n    return false;\n  }\n\n  if (end_metadata) {\n    callback_(std::move(metadata_map_));\n    metadata_map_ = std::make_unique<MetadataMap>();\n  }\n  return true;\n}\n\nbool MetadataDecoder::decodeMetadataPayloadUsingNghttp2(bool end_metadata) {\n  Buffer::RawSliceVector slices = payload_.getRawSlices();\n  const int num_slices = slices.size();\n\n  // Data consumed by nghttp2 so far.\n  ssize_t payload_size_consumed = 0;\n  // Decodes header block using nghttp2.\n  for (int i = 0; i < num_slices; i++) {\n    nghttp2_nv nv;\n    int inflate_flags = 0;\n    auto slice = slices[i];\n    // is_end indicates if the data in slice is the last data in the current\n    // header block.\n    bool is_end = i == (num_slices - 1) && end_metadata;\n\n    // Feeds data to nghttp2 to decode.\n    while (slice.len_ > 0) {\n      ssize_t result =\n          nghttp2_hd_inflate_hd2(inflater_.get(), &nv, &inflate_flags,\n                                 reinterpret_cast<uint8_t*>(slice.mem_), slice.len_, is_end);\n      if (result < 0 || (result == 0 && slice.len_ > 0)) {\n        // If decoding fails, or there is data left in slice, but no data can be consumed by\n        // nghttp2, return false.\n        ENVOY_LOG(error, \"Failed to decode payload.\");\n        return false;\n      }\n\n      slice.mem_ = reinterpret_cast<void*>(reinterpret_cast<uint8_t*>(slice.mem_) + result);\n      slice.len_ -= result;\n      payload_size_consumed += result;\n\n      if (inflate_flags & NGHTTP2_HD_INFLATE_EMIT) {\n        // One header key value pair has been successfully decoded.\n        metadata_map_->emplace(std::string(reinterpret_cast<char*>(nv.name), nv.namelen),\n                               std::string(reinterpret_cast<char*>(nv.value), nv.valuelen));\n      }\n    }\n\n    if (slice.len_ == 0 && is_end) {\n      // After one header block is decoded, reset inflater.\n      ASSERT(end_metadata);\n      nghttp2_hd_inflate_end_headers(inflater_.get());\n    }\n  }\n\n  payload_.drain(payload_size_consumed);\n  return true;\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/metadata_decoder.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/http/codec.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nclass MetadataEncoderDecoderTest_VerifyEncoderDecoderOnMultipleMetadataMaps_Test;\n\n// A class that decodes METADATA payload in the format of HTTP/2 header block into MetadataMap, a\n// map of string key value pairs.\nclass MetadataDecoder : Logger::Loggable<Logger::Id::http2> {\npublic:\n  /**\n   * @param cb is the decoder's callback function. The callback function is called when the decoder\n   * finishes decoding metadata.\n   */\n  MetadataDecoder(MetadataCallback cb);\n\n  /**\n   * Calls this function when METADATA frame payload is received. The payload doesn't need to be\n   * complete.\n   * @param data is the pointer to the start of the payload.\n   * @param len is the size of the received payload.\n   * @return whether Metadata is received successfully.\n   */\n  bool receiveMetadata(const uint8_t* data, size_t len);\n\n  /**\n   * Calls when a complete METADATA frame is received. The function will decode METADATA received.\n   * If the frame is the last one in the group, the function triggers the registered callback\n   * function callback_.\n   * @param end_metadata indicates if all the METADATA has been received.\n   * @return whether the operation succeeds.\n   */\n  bool onMetadataFrameComplete(bool end_metadata);\n\nprivate:\n  friend class MetadataEncoderDecoderTest_VerifyEncoderDecoderOnMultipleMetadataMaps_Test;\n  friend class MetadataEncoderDecoderTest_VerifyEncoderDecoderMultipleMetadataReachSizeLimit_Test;\n  /**\n   * Decodes METADATA payload using nghttp2.\n   * @param end_metadata indicates is END_METADATA is true.\n   * @return if decoding succeeds.\n   */\n  bool decodeMetadataPayloadUsingNghttp2(bool end_metadata);\n\n  // Metadata that is currently being decoded.\n  MetadataMapPtr metadata_map_;\n\n  // Metadata event callback function.\n  MetadataCallback callback_;\n\n  // Payload received.\n  Buffer::OwnedImpl payload_;\n\n  // Payload size limit. If the total payload received exceeds the limit, fails the connection.\n  const uint64_t max_payload_size_bound_ = 1024 * 1024;\n\n  uint64_t total_payload_size_ = 0;\n\n  // TODO(soya3129): consider sharing the inflater with all streams in a connection. Caveat:\n  // inflater failure on one stream can impact other streams.\n  using Inflater = CSmartPtr<nghttp2_hd_inflater, nghttp2_hd_inflate_del>;\n  Inflater inflater_;\n};\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/metadata_encoder.cc",
    "content": "#include \"common/http/http2/metadata_encoder.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nMetadataEncoder::MetadataEncoder() {\n  nghttp2_hd_deflater* deflater;\n  const int rv = nghttp2_hd_deflate_new(&deflater, header_table_size_);\n  ASSERT(rv == 0);\n  deflater_ = Deflater(deflater);\n}\n\nbool MetadataEncoder::createPayloadMetadataMap(const MetadataMap& metadata_map) {\n  ASSERT(!metadata_map.empty());\n\n  const uint64_t payload_size_before = payload_.length();\n  const bool success = createHeaderBlockUsingNghttp2(metadata_map);\n  const uint64_t payload_size_after = payload_.length();\n\n  if (!success || payload_size_after == payload_size_before) {\n    ENVOY_LOG(error, \"Failed to create payload.\");\n    return false;\n  }\n\n  payload_size_queue_.push_back(payload_size_after - payload_size_before);\n  return true;\n}\n\nbool MetadataEncoder::createPayload(const MetadataMapVector& metadata_map_vector) {\n  ASSERT(payload_.length() == 0);\n  ASSERT(payload_size_queue_.empty());\n\n  for (const auto& metadata_map : metadata_map_vector) {\n    if (!createPayloadMetadataMap(*metadata_map)) {\n      return false;\n    }\n  }\n  return true;\n}\n\nbool MetadataEncoder::createHeaderBlockUsingNghttp2(const MetadataMap& metadata_map) {\n  // Constructs input for nghttp2 deflater (encoder). Encoding method used is\n  // \"HPACK Literal Header Field Never Indexed\".\n  const size_t nvlen = metadata_map.size();\n  absl::FixedArray<nghttp2_nv> nva(nvlen);\n  size_t i = 0;\n  for (const auto& header : metadata_map) {\n    nva[i++] = {const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(header.first.data())),\n                const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(header.second.data())),\n                header.first.size(), header.second.size(), NGHTTP2_NV_FLAG_NO_INDEX};\n  }\n\n  // Estimates the upper bound of output payload.\n  const size_t buflen = nghttp2_hd_deflate_bound(deflater_.get(), nva.begin(), nvlen);\n  if (buflen + payload_.length() > max_payload_size_bound_) {\n    ENVOY_LOG(error, \"Payload size {} exceeds the max bound.\", buflen);\n    return false;\n  }\n  Buffer::RawSlice iovec;\n  payload_.reserve(buflen, &iovec, 1);\n  ASSERT(iovec.len_ >= buflen);\n\n  // Creates payload using nghttp2.\n  uint8_t* buf = reinterpret_cast<uint8_t*>(iovec.mem_);\n  const ssize_t result = nghttp2_hd_deflate_hd(deflater_.get(), buf, buflen, nva.begin(), nvlen);\n  RELEASE_ASSERT(result > 0,\n                 fmt::format(\"Failed to deflate metadata payload, with result {}.\", result));\n  iovec.len_ = result;\n\n  payload_.commit(&iovec, 1);\n\n  return true;\n}\n\nbool MetadataEncoder::hasNextFrame() { return payload_.length() > 0; }\n\nssize_t MetadataEncoder::packNextFramePayload(uint8_t* buf, const size_t len) {\n  // If this RELEASE_ASSERT fires, nghttp2 has requested that we pack more\n  // METADATA frames than we have payload to pack. This could mean that the\n  // HTTP/2 codec has submitted too many METADATA frames to nghttp2, or it could\n  // mean that nghttp2 has called its pack_extension_callback more than once per\n  // METADATA frame the codec submitted.\n  RELEASE_ASSERT(!payload_size_queue_.empty(),\n                 \"No payload remaining to pack into a METADATA frame.\");\n  const uint64_t current_payload_size =\n      std::min(METADATA_MAX_PAYLOAD_SIZE, payload_size_queue_.front());\n\n  // nghttp2 guarantees len is at least 16KiB. If the check fails, please verify\n  // NGHTTP2_MAX_PAYLOADLEN is consistent with METADATA_MAX_PAYLOAD_SIZE.\n  RELEASE_ASSERT(len >= current_payload_size,\n                 fmt::format(\"METADATA payload buffer is too small ({}, expected at least {}).\",\n                             len, METADATA_MAX_PAYLOAD_SIZE));\n\n  // Copies payload to the destination memory.\n  payload_.copyOut(0, current_payload_size, buf);\n\n  // Updates the remaining size of the current metadata_map. If no data left, removes the size entry\n  // from the queue.\n  payload_size_queue_.front() -= current_payload_size;\n  if (payload_size_queue_.front() == 0) {\n    payload_size_queue_.pop_front();\n  }\n\n  // Releases the payload that has been copied out.\n  payload_.drain(current_payload_size);\n\n  return current_payload_size;\n}\n\nstd::vector<uint8_t> MetadataEncoder::payloadFrameFlagBytes() {\n  std::vector<uint8_t> flags;\n  flags.reserve(payload_size_queue_.size());\n  for (uint64_t payload_size : payload_size_queue_) {\n    uint64_t frame_count = payload_size / METADATA_MAX_PAYLOAD_SIZE +\n                           ((payload_size % METADATA_MAX_PAYLOAD_SIZE) == 0 ? 0 : 1);\n    for (uint64_t i = 0; i < frame_count - 1; ++i) {\n      flags.push_back(0);\n    }\n    flags.push_back(END_METADATA_FLAG);\n  }\n  return flags;\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/metadata_encoder.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <queue>\n#include <string>\n\n#include \"envoy/http/codec.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/c_smart_ptr.h\"\n#include \"common/common/logger.h\"\n\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n/**\n * A class that creates and sends METADATA payload. The METADATA payload is a group of string key\n * value pairs encoded in HTTP/2 header blocks. METADATA frames are constructed in two steps: first,\n * the stream submits the frames' headers to nghttp2, and later, when nghttp2 prepares to send the\n * frames, it calls back into this class in order to construct their payloads.\n */\nclass MetadataEncoder : Logger::Loggable<Logger::Id::http2> {\npublic:\n  MetadataEncoder();\n\n  /**\n   * Creates wire format HTTP/2 header block from a vector of metadata maps.\n   * @param metadata_map_vector supplies the metadata map vector to encode.\n   * @return whether encoding is successful.\n   */\n  bool createPayload(const MetadataMapVector& metadata_map_vector);\n\n  /**\n   * @return true if there is payload left to be packed.\n   */\n  bool hasNextFrame();\n\n  /**\n   * Creates the metadata frame payload for the next metadata frame.\n   * @param buf is the pointer to the destination memory where the payload should be copied to. len\n   * is the largest length the memory can hold.\n   * @return the size of frame payload, or -1 for failure.\n   */\n  ssize_t packNextFramePayload(uint8_t* buf, const size_t len);\n\n  /**\n   * Returns a vector denoting the sequence of METADATA frames that this encoder expects to pack,\n   * and the flags to be set in each frame. This counts only frames that the encoder has not already\n   * packed; to get the full sequence of frames corresponding to the metadata map vector, call this\n   * before submitting any frames to nghttp2.\n   * @return A vector indicating the header byte in each METADATA frame, in sequence.\n   */\n  std::vector<uint8_t> payloadFrameFlagBytes();\n\nprivate:\n  /**\n   * Creates wire format HTTP/2 header block from metadata_map.\n   * @param metadata_map supplies METADATA to encode.\n   * @return whether encoding is successful.\n   */\n  bool createPayloadMetadataMap(const MetadataMap& metadata_map);\n\n  /**\n   * Creates wired format header blocks using nghttp2.\n   * @param metadata_map supplies METADATA to encode.\n   * @return true if the creation succeeds.\n   */\n  bool createHeaderBlockUsingNghttp2(const MetadataMap& metadata_map);\n\n  // The METADATA payload to be sent.\n  Buffer::OwnedImpl payload_;\n\n  // Max payload size bound.\n  const uint64_t max_payload_size_bound_ = 1024 * 1024;\n\n  // Default HPACK table size.\n  const size_t header_table_size_ = 4096;\n\n  // TODO(soya3129): share deflater among all encoders in the same connection. The benefit is less\n  // memory, and the caveat is encoding error on one stream can impact other streams.\n  using Deflater = CSmartPtr<nghttp2_hd_deflater, nghttp2_hd_deflate_del>;\n  Deflater deflater_;\n\n  // Stores the remaining payload size of each metadata_map to be packed. The payload size is needed\n  // so that we know where to delineate between different metadata_maps in the payload_ buffer. The\n  // payload size gets updated when the payload is packed into metadata frames.\n  std::deque<uint64_t> payload_size_queue_;\n};\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/nghttp2.cc",
    "content": "#include \"common/http/http2/nghttp2.h\"\n\n#include \"common/common/logger.h\"\n\n// nghttp2 fails to convey the POSIX ssize_t declaration\n// that Microsoft declines to implement. Pick up a valid\n// ssize_t declaration for win32 in our platform.h\n#include \"envoy/common/platform.h\"\n\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nvoid initializeNghttp2Logging() {\n  // Event when ENVOY_NGHTTP2_TRACE is not set, we install a debug logger, to prevent nghttp2\n  // logging directly to stdout at -l trace.\n  nghttp2_set_debug_vprintf_callback([](const char* format, va_list args) {\n    if (std::getenv(\"ENVOY_NGHTTP2_TRACE\") != nullptr) {\n      char buf[2048];\n      const int n = ::vsnprintf(buf, sizeof(buf), format, args);\n      // nghttp2 inserts new lines, but we also insert a new line in the ENVOY_LOG\n      // below, so avoid double \\n.\n      if (n >= 1 && static_cast<size_t>(n) < sizeof(buf) && buf[n - 1] == '\\n') {\n        buf[n - 1] = '\\0';\n      }\n      ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::http2), trace, \"nghttp2: {}\", buf);\n    }\n  });\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/nghttp2.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n/**\n * Setup nghttp2 trace-level logging for when debugging.\n */\nvoid initializeNghttp2Logging();\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/protocol_constraints.cc",
    "content": "#include \"common/http/http2/protocol_constraints.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nProtocolConstraints::ProtocolConstraints(\n    CodecStats& stats, const envoy::config::core::v3::Http2ProtocolOptions& http2_options)\n    : stats_(stats), max_outbound_frames_(http2_options.max_outbound_frames().value()),\n      frame_buffer_releasor_([this]() { releaseOutboundFrame(); }),\n      max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()),\n      control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }),\n      max_consecutive_inbound_frames_with_empty_payload_(\n          http2_options.max_consecutive_inbound_frames_with_empty_payload().value()),\n      max_inbound_priority_frames_per_stream_(\n          http2_options.max_inbound_priority_frames_per_stream().value()),\n      max_inbound_window_update_frames_per_data_frame_sent_(\n          http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()) {}\n\nProtocolConstraints::ReleasorProc\nProtocolConstraints::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) {\n  ++outbound_frames_;\n  if (is_outbound_flood_monitored_control_frame) {\n    ++outbound_control_frames_;\n  }\n  return is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_\n                                                   : frame_buffer_releasor_;\n}\n\nvoid ProtocolConstraints::releaseOutboundFrame() {\n  ASSERT(outbound_frames_ >= 1);\n  --outbound_frames_;\n}\n\nvoid ProtocolConstraints::releaseOutboundControlFrame() {\n  ASSERT(outbound_control_frames_ >= 1);\n  --outbound_control_frames_;\n  releaseOutboundFrame();\n}\n\nStatus ProtocolConstraints::checkOutboundFrameLimits() {\n  // Stop checking for further violations after the first failure.\n  if (!status_.ok()) {\n    return status_;\n  }\n\n  if (outbound_frames_ > max_outbound_frames_) {\n    stats_.outbound_flood_.inc();\n    return status_ = bufferFloodError(\"Too many frames in the outbound queue.\");\n  }\n  if (outbound_control_frames_ > max_outbound_control_frames_) {\n    stats_.outbound_control_flood_.inc();\n    return status_ = bufferFloodError(\"Too many control frames in the outbound queue.\");\n  }\n  return okStatus();\n}\n\nStatus ProtocolConstraints::trackInboundFrames(const nghttp2_frame_hd* hd,\n                                               uint32_t padding_length) {\n  switch (hd->type) {\n  case NGHTTP2_HEADERS:\n  case NGHTTP2_CONTINUATION:\n    // Track new streams.\n    if (hd->flags & NGHTTP2_FLAG_END_HEADERS) {\n      inbound_streams_++;\n    }\n    FALLTHRU;\n  case NGHTTP2_DATA:\n    // Track frames with an empty payload and no end stream flag.\n    if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) {\n      consecutive_inbound_frames_with_empty_payload_++;\n    } else {\n      consecutive_inbound_frames_with_empty_payload_ = 0;\n    }\n    break;\n  case NGHTTP2_PRIORITY:\n    inbound_priority_frames_++;\n    break;\n  case NGHTTP2_WINDOW_UPDATE:\n    inbound_window_update_frames_++;\n    break;\n  default:\n    break;\n  }\n\n  status_.Update(checkInboundFrameLimits());\n  return status_;\n}\n\nStatus ProtocolConstraints::checkInboundFrameLimits() {\n  // Stop checking for further violations after the first failure.\n  if (!status_.ok()) {\n    return status_;\n  }\n\n  if (consecutive_inbound_frames_with_empty_payload_ >\n      max_consecutive_inbound_frames_with_empty_payload_) {\n    stats_.inbound_empty_frames_flood_.inc();\n    return inboundFramesWithEmptyPayloadError();\n  }\n\n  if (inbound_priority_frames_ >\n      static_cast<uint64_t>(max_inbound_priority_frames_per_stream_) * (1 + inbound_streams_)) {\n    stats_.inbound_priority_frames_flood_.inc();\n    return bufferFloodError(\"Too many PRIORITY frames\");\n  }\n\n  if (inbound_window_update_frames_ >\n      1 + 2 * (inbound_streams_ +\n               max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) {\n    stats_.inbound_window_update_frames_flood_.inc();\n    return bufferFloodError(\"Too many WINDOW_UPDATE frames\");\n  }\n\n  return okStatus();\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http2/protocol_constraints.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/http/status.h\"\n\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n//  Class for detecting abusive peers and validating additional constraints imposed by Envoy.\n//  This class does not check protocol compliance with the H/2 standard, as this is checked by\n//  protocol framer/codec. Currently implemented constraints:\n//  1. detection of control frame (i.e. PING) initiated floods.\n//  2. detection of outbound DATA or HEADER frame floods.\n//  4. zero length, PRIORITY and WINDOW_UPDATE floods.\n\nclass ProtocolConstraints {\npublic:\n  using ReleasorProc = std::function<void()>;\n\n  explicit ProtocolConstraints(CodecStats& stats,\n                               const envoy::config::core::v3::Http2ProtocolOptions& http2_options);\n\n  // Return ok status if no protocol constraints were violated.\n  // Return error status of the first detected violation. Subsequent violations of constraints\n  // do not reset the error status or increment stat counters.\n  const Status& status() const { return status_; }\n\n  // Increment counters of pending (buffered for sending to the peer) outbound frames.\n  // If the `is_outbound_flood_monitored_control_frame` is false only the counter for all frame\n  // types is incremented. If the `is_outbound_flood_monitored_control_frame` is true, both the\n  // control frame and all frame types counters are incremented.\n  // Returns callable for decrementing frame counters when frames was successfully written to\n  // the underlying transport socket object.\n  // To check if outbound frame constraints were violated call the `status()` method.\n  // TODO(yanavlasov): return StatusOr<ReleasorProc> when flood checks are implemented for both\n  // directions.\n  ReleasorProc incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame);\n\n  // Track received frames of various types.\n  // Return an error status if inbound frame constraints were violated.\n  Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length);\n  // Increment the number of DATA frames sent to the peer.\n  void incrementOutboundDataFrameCount() { ++outbound_data_frames_; }\n\n  Status checkOutboundFrameLimits();\n\nprivate:\n  void releaseOutboundFrame();\n  void releaseOutboundControlFrame();\n  Status checkInboundFrameLimits();\n\n  Status status_;\n  CodecStats& stats_;\n  // This counter keeps track of the number of outbound frames of all types (these that were\n  // buffered in the underlying connection but not yet written into the socket). If this counter\n  // exceeds the `max_outbound_frames_' value the connection is terminated.\n  uint32_t outbound_frames_ = 0;\n  // Maximum number of outbound frames. Initialized from corresponding http2_protocol_options.\n  // Default value is 10000.\n  const uint32_t max_outbound_frames_;\n  ReleasorProc frame_buffer_releasor_;\n\n  // This counter keeps track of the number of outbound frames of types PING, SETTINGS and\n  // RST_STREAM (these that were buffered in the underlying connection but not yet written into the\n  // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is\n  // terminated.\n  uint32_t outbound_control_frames_ = 0;\n  // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from\n  // corresponding http2_protocol_options. Default value is 1000.\n  const uint32_t max_outbound_control_frames_;\n  ReleasorProc control_frame_buffer_releasor_;\n\n  // This counter keeps track of the number of consecutive inbound frames of types HEADERS,\n  // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds\n  // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated.\n  uint32_t consecutive_inbound_frames_with_empty_payload_ = 0;\n  // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without\n  // a payload. Initialized from corresponding http2_protocol_options. Default value is 1.\n  const uint32_t max_consecutive_inbound_frames_with_empty_payload_;\n\n  // This counter keeps track of the number of inbound streams.\n  uint32_t inbound_streams_ = 0;\n  // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds\n  // the value calculated using this formula:\n  //\n  //     max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)\n  //\n  // the connection is terminated.\n  uint64_t inbound_priority_frames_ = 0;\n  // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding\n  // http2_protocol_options. Default value is 100.\n  const uint32_t max_inbound_priority_frames_per_stream_;\n\n  // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds\n  // the value calculated using this formula:\n  //\n  //     1 + 2 * (inbound_streams_ +\n  //              max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)\n  //\n  // the connection is terminated.\n  uint64_t inbound_window_update_frames_ = 0;\n  // This counter keeps track of the number of outbound DATA frames.\n  uint64_t outbound_data_frames_ = 0;\n  // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized\n  // from corresponding http2_protocol_options. Default value is 10.\n  const uint32_t max_inbound_window_update_frames_per_data_frame_sent_;\n};\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http3/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"quic_codec_factory_lib\",\n    hdrs = [\"quic_codec_factory.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/network:connection_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    deps = [\"//source/common/singleton:const_singleton\"],\n)\n"
  },
  {
    "path": "source/common/http/http3/quic_codec_factory.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n// A factory to create Http::ServerConnection instance for QUIC.\nclass QuicHttpServerConnectionFactory : public Config::UntypedFactory {\npublic:\n  ~QuicHttpServerConnectionFactory() override = default;\n\n  virtual std::unique_ptr<ServerConnection>\n  createQuicServerConnection(Network::Connection& connection, ConnectionCallbacks& callbacks) PURE;\n\n  std::string category() const override { return \"envoy.quic_client_codec\"; }\n};\n\n// A factory to create Http::ClientConnection instance for QUIC.\nclass QuicHttpClientConnectionFactory : public Config::UntypedFactory {\npublic:\n  ~QuicHttpClientConnectionFactory() override = default;\n\n  virtual std::unique_ptr<ClientConnection>\n  createQuicClientConnection(Network::Connection& connection, ConnectionCallbacks& callbacks) PURE;\n\n  std::string category() const override { return \"envoy.quic_server_codec\"; }\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/http3/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass QuicCodecNameValues {\npublic:\n  // QUICHE is the only QUIC implementation for now.\n  const std::string Quiche = \"quiche\";\n};\n\nusing QuicCodecNames = ConstSingleton<QuicCodecNameValues>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/message_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/message.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/non_copyable.h\"\n#include \"common/http/header_map_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Implementation of Http::Message. This implementation does not support streaming.\n */\ntemplate <class HeadersInterfaceType, class HeadersImplType, class TrailersInterfaceType,\n          class TrailersImplType>\nclass MessageImpl : public Message<HeadersInterfaceType, TrailersInterfaceType> {\npublic:\n  MessageImpl() : headers_(HeadersImplType::create()) {}\n  MessageImpl(std::unique_ptr<HeadersInterfaceType>&& headers) : headers_(std::move(headers)) {}\n\n  // Http::Message\n  HeadersInterfaceType& headers() override { return *headers_; }\n  Buffer::Instance& body() override { return body_; }\n  TrailersInterfaceType* trailers() override { return trailers_.get(); }\n  void trailers(std::unique_ptr<TrailersInterfaceType>&& trailers) override {\n    trailers_ = std::move(trailers);\n  }\n  std::string bodyAsString() const override { return body_.toString(); }\n\nprivate:\n  std::unique_ptr<HeadersInterfaceType> headers_;\n  Buffer::OwnedImpl body_;\n  std::unique_ptr<TrailersInterfaceType> trailers_;\n};\n\nusing RequestMessageImpl =\n    MessageImpl<RequestHeaderMap, RequestHeaderMapImpl, RequestTrailerMap, RequestTrailerMapImpl>;\nusing ResponseMessageImpl = MessageImpl<ResponseHeaderMap, ResponseHeaderMapImpl,\n                                        ResponseTrailerMap, ResponseTrailerMapImpl>;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/path_utility.cc",
    "content": "#include \"common/http/path_utility.h\"\n\n#include \"common/chromium_url/url_canon.h\"\n#include \"common/chromium_url/url_canon_stdstring.h\"\n#include \"common/common/logger.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nnamespace {\nabsl::optional<std::string> canonicalizePath(absl::string_view original_path) {\n  std::string canonical_path;\n  chromium_url::Component in_component(0, original_path.size());\n  chromium_url::Component out_component;\n  chromium_url::StdStringCanonOutput output(&canonical_path);\n  if (!chromium_url::CanonicalizePath(original_path.data(), in_component, &output,\n                                      &out_component)) {\n    return absl::nullopt;\n  } else {\n    output.Complete();\n    return absl::make_optional(std::move(canonical_path));\n  }\n}\n} // namespace\n\n/* static */\nbool PathUtil::canonicalPath(RequestHeaderMap& headers) {\n  ASSERT(headers.Path());\n  const auto original_path = headers.getPathValue();\n  // canonicalPath is supposed to apply on path component in URL instead of :path header\n  const auto query_pos = original_path.find('?');\n  auto normalized_path_opt = canonicalizePath(\n      query_pos == original_path.npos\n          ? original_path\n          : absl::string_view(original_path.data(), query_pos) // '?' is not included\n  );\n\n  if (!normalized_path_opt.has_value()) {\n    return false;\n  }\n  auto& normalized_path = normalized_path_opt.value();\n  const absl::string_view query_suffix =\n      query_pos == original_path.npos\n          ? absl::string_view{}\n          : absl::string_view{original_path.data() + query_pos, original_path.size() - query_pos};\n  if (!query_suffix.empty()) {\n    normalized_path.insert(normalized_path.end(), query_suffix.begin(), query_suffix.end());\n  }\n  headers.setPath(normalized_path);\n  return true;\n}\n\nvoid PathUtil::mergeSlashes(RequestHeaderMap& headers) {\n  ASSERT(headers.Path());\n  const auto original_path = headers.getPathValue();\n  // Only operate on path component in URL.\n  const absl::string_view::size_type query_start = original_path.find('?');\n  const absl::string_view path = original_path.substr(0, query_start);\n  const absl::string_view query = absl::ClippedSubstr(original_path, query_start);\n  if (path.find(\"//\") == absl::string_view::npos) {\n    return;\n  }\n  const absl::string_view path_prefix = absl::StartsWith(path, \"/\") ? \"/\" : absl::string_view();\n  const absl::string_view path_suffix = absl::EndsWith(path, \"/\") ? \"/\" : absl::string_view();\n  headers.setPath(absl::StrCat(path_prefix,\n                               absl::StrJoin(absl::StrSplit(path, '/', absl::SkipEmpty()), \"/\"),\n                               path_suffix, query));\n}\n\nabsl::string_view PathUtil::removeQueryAndFragment(const absl::string_view path) {\n  absl::string_view ret = path;\n  // Trim query parameters and/or fragment if present.\n  size_t offset = ret.find_first_of(\"?#\");\n  if (offset != absl::string_view::npos) {\n    ret.remove_suffix(ret.length() - offset);\n  }\n  return ret;\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/path_utility.h",
    "content": "#pragma once\n\n#include \"envoy/http/header_map.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Path helper extracted from chromium project.\n */\nclass PathUtil {\npublic:\n  // Returns if the normalization succeeds.\n  // If it is successful, the path header in header path will be updated with the normalized path.\n  // Requires the Path header be present.\n  static bool canonicalPath(RequestHeaderMap& headers);\n  // Merges two or more adjacent slashes in path part of URI into one.\n  // Requires the Path header be present.\n  static void mergeSlashes(RequestHeaderMap& headers);\n  // Removes the query and/or fragment string (if present) from the input path.\n  // For example, this function returns \"/data\" for the input path \"/data#fragment?param=value\".\n  static absl::string_view removeQueryAndFragment(const absl::string_view path);\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/request_id_extension_impl.cc",
    "content": "#include \"common/http/request_id_extension_impl.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/request_id_extension_uuid_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nnamespace {\n\n// NoopRequestIDExtension is the implementation used outside of HTTP context.\nclass NoopRequestIDExtension : public RequestIDExtension {\npublic:\n  void set(RequestHeaderMap&, bool) override {}\n  void setInResponse(ResponseHeaderMap&, const RequestHeaderMap&) override {}\n  bool modBy(const RequestHeaderMap&, uint64_t&, uint64_t) override { return false; }\n  TraceStatus getTraceStatus(const RequestHeaderMap&) override { return TraceStatus::NoTrace; }\n  void setTraceStatus(RequestHeaderMap&, TraceStatus) override {}\n};\n\n} // namespace\n\nRequestIDExtensionSharedPtr RequestIDExtensionFactory::fromProto(\n    const envoy::extensions::filters::network::http_connection_manager::v3::RequestIDExtension&\n        config,\n    Server::Configuration::FactoryContext& context) {\n  const std::string type{TypeUtil::typeUrlToDescriptorFullName(config.typed_config().type_url())};\n  auto* factory =\n      Registry::FactoryRegistry<Server::Configuration::RequestIDExtensionFactory>::getFactoryByType(\n          type);\n  if (factory == nullptr) {\n    throw EnvoyException(\n        fmt::format(\"Didn't find a registered implementation for type: '{}'\", type));\n  }\n\n  ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig(\n      config.typed_config(), context.messageValidationVisitor(), *factory);\n  return factory->createExtensionInstance(*message, context);\n}\n\nRequestIDExtensionSharedPtr\nRequestIDExtensionFactory::defaultInstance(Envoy::Random::RandomGenerator& random) {\n  return std::make_shared<UUIDRequestIDExtension>(random);\n}\n\nRequestIDExtensionSharedPtr RequestIDExtensionFactory::noopInstance() {\n  MUTABLE_CONSTRUCT_ON_FIRST_USE(std::shared_ptr<RequestIDExtension>,\n                                 std::make_shared<NoopRequestIDExtension>());\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/request_id_extension_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/server/request_id_extension_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Http {\n/**\n * Request ID Utilities factory that reads the configuration from proto.\n */\nclass RequestIDExtensionFactory {\npublic:\n  /**\n   * Return a newly created instance of the default RequestIDExtension implementation.\n   */\n  static RequestIDExtensionSharedPtr defaultInstance(Envoy::Random::RandomGenerator& random);\n\n  /**\n   * Return a globally shared instance of the noop RequestIDExtension implementation.\n   */\n  static RequestIDExtensionSharedPtr noopInstance();\n\n  /**\n   * Read a RequestIDExtension definition from proto and create it.\n   */\n  static RequestIDExtensionSharedPtr fromProto(\n      const envoy::extensions::filters::network::http_connection_manager::v3::RequestIDExtension&\n          config,\n      Server::Configuration::FactoryContext& context);\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/request_id_extension_uuid_impl.cc",
    "content": "#include \"common/http/request_id_extension_uuid_impl.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/random_generator.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nvoid UUIDRequestIDExtension::set(RequestHeaderMap& request_headers, bool force) {\n  if (!force && request_headers.RequestId()) {\n    return;\n  }\n\n  // TODO(PiotrSikora) PERF: Write UUID directly to the header map.\n  std::string uuid = random_.uuid();\n  ASSERT(!uuid.empty());\n  request_headers.setRequestId(uuid);\n}\n\nvoid UUIDRequestIDExtension::setInResponse(ResponseHeaderMap& response_headers,\n                                           const RequestHeaderMap& request_headers) {\n  if (request_headers.RequestId()) {\n    response_headers.setRequestId(request_headers.getRequestIdValue());\n  }\n}\n\nbool UUIDRequestIDExtension::modBy(const RequestHeaderMap& request_headers, uint64_t& out,\n                                   uint64_t mod) {\n  if (request_headers.RequestId() == nullptr) {\n    return false;\n  }\n  const std::string uuid(request_headers.getRequestIdValue());\n  if (uuid.length() < 8) {\n    return false;\n  }\n\n  uint64_t value;\n  if (!StringUtil::atoull(uuid.substr(0, 8).c_str(), value, 16)) {\n    return false;\n  }\n\n  out = value % mod;\n  return true;\n}\n\nTraceStatus UUIDRequestIDExtension::getTraceStatus(const RequestHeaderMap& request_headers) {\n  if (request_headers.RequestId() == nullptr) {\n    return TraceStatus::NoTrace;\n  }\n  absl::string_view uuid = request_headers.getRequestIdValue();\n  if (uuid.length() != Random::RandomGeneratorImpl::UUID_LENGTH) {\n    return TraceStatus::NoTrace;\n  }\n\n  switch (uuid[TRACE_BYTE_POSITION]) {\n  case TRACE_FORCED:\n    return TraceStatus::Forced;\n  case TRACE_SAMPLED:\n    return TraceStatus::Sampled;\n  case TRACE_CLIENT:\n    return TraceStatus::Client;\n  default:\n    return TraceStatus::NoTrace;\n  }\n}\n\nvoid UUIDRequestIDExtension::setTraceStatus(RequestHeaderMap& request_headers, TraceStatus status) {\n  if (request_headers.RequestId() == nullptr) {\n    return;\n  }\n  absl::string_view uuid_view = request_headers.getRequestIdValue();\n  if (uuid_view.length() != Random::RandomGeneratorImpl::UUID_LENGTH) {\n    return;\n  }\n  std::string uuid(uuid_view);\n\n  switch (status) {\n  case TraceStatus::Forced:\n    uuid[TRACE_BYTE_POSITION] = TRACE_FORCED;\n    break;\n  case TraceStatus::Client:\n    uuid[TRACE_BYTE_POSITION] = TRACE_CLIENT;\n    break;\n  case TraceStatus::Sampled:\n    uuid[TRACE_BYTE_POSITION] = TRACE_SAMPLED;\n    break;\n  case TraceStatus::NoTrace:\n    uuid[TRACE_BYTE_POSITION] = NO_TRACE;\n    break;\n  }\n  request_headers.setRequestId(uuid);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/request_id_extension_uuid_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/http/request_id_extension.h\"\n\n#include \"common/runtime/runtime_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n// UUIDRequestIDExtension is the default implementation if no other extension is explicitly\n// configured.\nclass UUIDRequestIDExtension : public RequestIDExtension {\npublic:\n  explicit UUIDRequestIDExtension(Envoy::Random::RandomGenerator& random) : random_(random) {}\n\n  void set(RequestHeaderMap& request_headers, bool force) override;\n  void setInResponse(ResponseHeaderMap& response_headers,\n                     const RequestHeaderMap& request_headers) override;\n  bool modBy(const RequestHeaderMap& request_headers, uint64_t& out, uint64_t mod) override;\n  TraceStatus getTraceStatus(const RequestHeaderMap& request_headers) override;\n  void setTraceStatus(RequestHeaderMap& request_headers, TraceStatus status) override;\n\nprivate:\n  // Reference to the random generator used to generate new request IDs\n  Envoy::Random::RandomGenerator& random_;\n\n  // Byte on this position has predefined value of 4 for UUID4.\n  static const int TRACE_BYTE_POSITION = 14;\n\n  // Value of '9' is chosen randomly to distinguish between freshly generated uuid4 and the\n  // one modified because we sample trace.\n  static const char TRACE_SAMPLED = '9';\n\n  // Value of 'a' is chosen randomly to distinguish between freshly generated uuid4 and the\n  // one modified because we force trace.\n  static const char TRACE_FORCED = 'a';\n\n  // Value of 'b' is chosen randomly to distinguish between freshly generated uuid4 and the\n  // one modified because of client trace.\n  static const char TRACE_CLIENT = 'b';\n\n  // Initial value for freshly generated uuid4.\n  static const char NO_TRACE = '4';\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/rest_api_fetcher.cc",
    "content": "#include \"common/http/rest_api_fetcher.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nRestApiFetcher::RestApiFetcher(Upstream::ClusterManager& cm, const std::string& remote_cluster_name,\n                               Event::Dispatcher& dispatcher, Random::RandomGenerator& random,\n                               std::chrono::milliseconds refresh_interval,\n                               std::chrono::milliseconds request_timeout)\n    : remote_cluster_name_(remote_cluster_name), cm_(cm), random_(random),\n      refresh_interval_(refresh_interval), request_timeout_(request_timeout),\n      refresh_timer_(dispatcher.createTimer([this]() -> void { refresh(); })) {}\n\nRestApiFetcher::~RestApiFetcher() {\n  if (active_request_) {\n    active_request_->cancel();\n  }\n}\n\nvoid RestApiFetcher::initialize() { refresh(); }\n\nvoid RestApiFetcher::onSuccess(const Http::AsyncClient::Request& request,\n                               Http::ResponseMessagePtr&& response) {\n  uint64_t response_code = Http::Utility::getResponseStatus(response->headers());\n  if (response_code == enumToInt(Http::Code::NotModified)) {\n    requestComplete();\n    return;\n  } else if (response_code != enumToInt(Http::Code::OK)) {\n    onFailure(request, Http::AsyncClient::FailureReason::Reset);\n    return;\n  }\n\n  try {\n    parseResponse(*response);\n  } catch (EnvoyException& e) {\n    onFetchFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e);\n  }\n\n  requestComplete();\n}\n\nvoid RestApiFetcher::onFailure(const Http::AsyncClient::Request&,\n                               Http::AsyncClient::FailureReason reason) {\n  // Currently Http::AsyncClient::FailureReason only has one value: \"Reset\".\n  ASSERT(reason == Http::AsyncClient::FailureReason::Reset);\n  onFetchFailure(Config::ConfigUpdateFailureReason::ConnectionFailure, nullptr);\n  requestComplete();\n}\n\nvoid RestApiFetcher::refresh() {\n  RequestMessagePtr message(new RequestMessageImpl());\n  createRequest(*message);\n  message->headers().setHost(remote_cluster_name_);\n  active_request_ = cm_.httpAsyncClientForCluster(remote_cluster_name_)\n                        .send(std::move(message), *this,\n                              AsyncClient::RequestOptions().setTimeout(request_timeout_));\n}\n\nvoid RestApiFetcher::requestComplete() {\n  onFetchComplete();\n  active_request_ = nullptr;\n\n  // Add refresh jitter based on the configured interval.\n  std::chrono::milliseconds final_delay =\n      refresh_interval_ + std::chrono::milliseconds(random_.random() % refresh_interval_.count());\n\n  refresh_timer_->enableTimer(final_delay);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/rest_api_fetcher.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * A helper base class used to fetch a REST API at a jittered periodic interval. Once initialize()\n * is called, the API will be fetched and events raised.\n */\nclass RestApiFetcher : public Http::AsyncClient::Callbacks {\nprotected:\n  RestApiFetcher(Upstream::ClusterManager& cm, const std::string& remote_cluster_name,\n                 Event::Dispatcher& dispatcher, Random::RandomGenerator& random,\n                 std::chrono::milliseconds refresh_interval,\n                 std::chrono::milliseconds request_timeout);\n  ~RestApiFetcher() override;\n\n  /**\n   * Start the fetch sequence. This should be called once.\n   */\n  void initialize();\n\n  /**\n   * This will be called when a fetch is about to happen. It should be overridden to fill the\n   * request message with a valid request.\n   */\n  virtual void createRequest(RequestMessage& request) PURE;\n\n  /**\n   * This will be called when a 200 response is returned by the API with the response message.\n   */\n  virtual void parseResponse(const ResponseMessage& response) PURE;\n\n  /**\n   * This will be called either in the success case or in the failure case for each fetch. It can\n   * be used to hold common post request logic.\n   */\n  virtual void onFetchComplete() PURE;\n\n  /**\n   * This will be called if the fetch fails (either due to non-200 response, network error, etc.).\n   * @param reason supplies the fetch failure reason.\n   * @param e supplies any exception data on why the fetch failed. May be nullptr.\n   */\n  virtual void onFetchFailure(Config::ConfigUpdateFailureReason reason,\n                              const EnvoyException* e) PURE;\n\nprotected:\n  const std::string remote_cluster_name_;\n  Upstream::ClusterManager& cm_;\n\nprivate:\n  void refresh();\n  void requestComplete();\n\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override;\n  void onFailure(const Http::AsyncClient::Request&,\n                 Http::AsyncClient::FailureReason reason) override;\n  void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&,\n                                    const Http::ResponseHeaderMap*) override {}\n\n  Random::RandomGenerator& random_;\n  const std::chrono::milliseconds refresh_interval_;\n  const std::chrono::milliseconds request_timeout_;\n  Event::TimerPtr refresh_timer_;\n  Http::AsyncClient::Request* active_request_{};\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/status.cc",
    "content": "#include \"common/http/status.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nnamespace {\n\nconstexpr absl::string_view EnvoyPayloadUrl = \"Envoy\";\n\nabsl::string_view statusCodeToString(StatusCode code) {\n  switch (code) {\n  case StatusCode::Ok:\n    return \"OK\";\n  case StatusCode::CodecProtocolError:\n    return \"CodecProtocolError\";\n  case StatusCode::BufferFloodError:\n    return \"BufferFloodError\";\n  case StatusCode::PrematureResponseError:\n    return \"PrematureResponseError\";\n  case StatusCode::CodecClientError:\n    return \"CodecClientError\";\n  case StatusCode::InboundFramesWithEmptyPayload:\n    return \"InboundFramesWithEmptyPayloadError\";\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nstruct EnvoyStatusPayload {\n  EnvoyStatusPayload(StatusCode status_code) : status_code_(status_code) {}\n  const StatusCode status_code_;\n};\n\nstruct PrematureResponsePayload : public EnvoyStatusPayload {\n  PrematureResponsePayload(Http::Code http_code)\n      : EnvoyStatusPayload(StatusCode::PrematureResponseError), http_code_(http_code) {}\n  const Http::Code http_code_;\n};\n\ntemplate <typename T> void storePayload(absl::Status& status, const T& payload) {\n  absl::Cord cord(absl::string_view(reinterpret_cast<const char*>(&payload), sizeof(payload)));\n  cord.Flatten(); // Flatten ahead of time for easier access later.\n  status.SetPayload(EnvoyPayloadUrl, std::move(cord));\n}\n\ntemplate <typename T = EnvoyStatusPayload> const T& getPayload(const absl::Status& status) {\n  // The only way to get a reference to the payload owned by the absl::Status is through the\n  // ForEachPayload method. All other methods create a copy of the payload, which is not convenient\n  // for peeking at the payload value.\n  const T* payload = nullptr;\n  status.ForEachPayload([&payload](absl::string_view url, const absl::Cord& cord) {\n    if (url == EnvoyPayloadUrl) {\n      ASSERT(!payload); // Status API guarantees to have one payload with given URL\n      auto data = cord.TryFlat();\n      ASSERT(data.has_value()); // EnvoyPayloadUrl cords are flattened ahead of time\n      ASSERT(data.value().length() >= sizeof(T), \"Invalid payload length\");\n      payload = reinterpret_cast<const T*>(data.value().data());\n    }\n  });\n  ASSERT(payload);\n  return *payload;\n}\n\n} // namespace\n\nstd::string toString(const Status& status) {\n  if (status.ok()) {\n    return status.ToString();\n  }\n  std::string text;\n  auto status_code = getStatusCode(status);\n  if (status_code != StatusCode::PrematureResponseError) {\n    absl::StrAppend(&text, statusCodeToString(status_code), \": \", status.message());\n  } else {\n    auto http_code = getPrematureResponseHttpCode(status);\n    absl::StrAppend(&text, \"PrematureResponseError: HTTP code: \", http_code, \": \",\n                    status.message());\n  }\n  return text;\n}\n\nStatus codecProtocolError(absl::string_view message) {\n  absl::Status status(absl::StatusCode::kInternal, message);\n  storePayload(status, EnvoyStatusPayload(StatusCode::CodecProtocolError));\n  return status;\n}\n\nStatus bufferFloodError(absl::string_view message) {\n  absl::Status status(absl::StatusCode::kInternal, message);\n  storePayload(status, EnvoyStatusPayload(StatusCode::BufferFloodError));\n  return status;\n}\n\nStatus prematureResponseError(absl::string_view message, Http::Code http_code) {\n  absl::Status status(absl::StatusCode::kInternal, message);\n  storePayload(status, PrematureResponsePayload(http_code));\n  return status;\n}\n\nStatus codecClientError(absl::string_view message) {\n  absl::Status status(absl::StatusCode::kInternal, message);\n  storePayload(status, EnvoyStatusPayload(StatusCode::CodecClientError));\n  return status;\n}\n\nStatus inboundFramesWithEmptyPayloadError() {\n  absl::Status status(absl::StatusCode::kInternal,\n                      \"Too many consecutive frames with an empty payload\");\n  storePayload(status, EnvoyStatusPayload(StatusCode::InboundFramesWithEmptyPayload));\n  return status;\n}\n\n// Methods for checking and extracting error information\nStatusCode getStatusCode(const Status& status) {\n  return status.ok() ? StatusCode::Ok : getPayload(status).status_code_;\n}\n\nbool isCodecProtocolError(const Status& status) {\n  return getStatusCode(status) == StatusCode::CodecProtocolError;\n}\n\nbool isBufferFloodError(const Status& status) {\n  return getStatusCode(status) == StatusCode::BufferFloodError;\n}\n\nbool isPrematureResponseError(const Status& status) {\n  return getStatusCode(status) == StatusCode::PrematureResponseError;\n}\n\nHttp::Code getPrematureResponseHttpCode(const Status& status) {\n  const auto& payload = getPayload<PrematureResponsePayload>(status);\n  ASSERT(payload.status_code_ == StatusCode::PrematureResponseError,\n         \"Must be PrematureResponseError\");\n  return payload.http_code_;\n}\n\nbool isCodecClientError(const Status& status) {\n  return getStatusCode(status) == StatusCode::CodecClientError;\n}\n\nbool isInboundFramesWithEmptyPayloadError(const Status& status) {\n  return getStatusCode(status) == StatusCode::InboundFramesWithEmptyPayload;\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/status.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <string>\n\n#include \"envoy/http/codes.h\"\n\n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n\n/**\n * Facility for returning rich error information.\n * This facility is to be used in place of exceptions, in components where\n * exceptions safety is not guaranteed (i.e. codecs).\n *\n * Envoy::Status is an alias of absl::Status.\n * IMPORTANT: `absl::Status` constructor `absl::Status::code()` and absl::Status::ToString()`\n * methods must not be used as they will not return correct error information. Instead the error\n * value creating and corresponding error checking functions defined below must be used.\n * TODO(yanavlasov): add clang-tidy or lint check to enforce this.\n *\n * Usage example:\n *\n *  Envoy::Status Foo() {\n *    ...\n *    if (codec_error) {\n *      return CodecProtocolError(\"Invalid protocol\");\n *    }\n *    return Envoy::OkStatus();\n *  }\n *\n *  void Bar() {\n *    auto status = Foo();\n *    if (status.ok()) {\n *      ...\n *    } else {\n *      ASSERT(IsCodecProtocolError(status));\n *      ENVOY_LOG(debug, \"Codec error encountered: {}\", status.message());\n *    }\n *  }\n */\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Status codes for representing classes of Envoy errors.\n */\nenum class StatusCode : int {\n  Ok = 0,\n\n  /**\n   * Indicates a non-recoverable protocol error that should result in connection termination.\n   */\n  CodecProtocolError = 1,\n\n  /**\n   * Indicates detection of outbound frame queue flood.\n   */\n  BufferFloodError = 2,\n\n  /**\n   * Indicates a response is received on a connection that did not send a request. In practice\n   * this can only happen on HTTP/1.1 connections.\n   */\n  PrematureResponseError = 3,\n\n  /**\n   * Indicates a client (local) side error which should not happen.\n   */\n  CodecClientError = 4,\n\n  /**\n   * Indicates that peer sent too many consecutive DATA frames with empty payload.\n   */\n  InboundFramesWithEmptyPayload = 5,\n};\n\nusing Status = absl::Status;\n\ninline Status okStatus() { return absl::OkStatus(); }\n\n/**\n * Returns the combination of the error code name, message and any additional error attributes.\n */\nstd::string toString(const Status& status);\n\n/**\n * Functions for creating error values. The error code of the returned status object matches the\n * name of the function.\n */\nStatus codecProtocolError(absl::string_view message);\nStatus bufferFloodError(absl::string_view message);\nStatus prematureResponseError(absl::string_view message, Http::Code http_code);\nStatus codecClientError(absl::string_view message);\nStatus inboundFramesWithEmptyPayloadError();\n\n/**\n * Returns Envoy::StatusCode of the given status object.\n * If the status object does not contain valid Envoy::Status value the function will ASSERT.\n */\nStatusCode getStatusCode(const Status& status);\n\n/**\n * Returns true if the given status matches error code implied by the name of the function.\n */\nABSL_MUST_USE_RESULT bool isCodecProtocolError(const Status& status);\nABSL_MUST_USE_RESULT bool isBufferFloodError(const Status& status);\nABSL_MUST_USE_RESULT bool isPrematureResponseError(const Status& status);\nABSL_MUST_USE_RESULT bool isCodecClientError(const Status& status);\nABSL_MUST_USE_RESULT bool isInboundFramesWithEmptyPayloadError(const Status& status);\n\n/**\n * Returns Http::Code value of the PrematureResponseError status.\n * IsPrematureResponseError(status) must be true which is checked by ASSERT.\n */\nHttp::Code getPrematureResponseHttpCode(const Status& status);\n\n/**\n * Macro that checks return value of expression that results in Status and returns from\n * the current function is status is not OK.\n *\n * Example usage:\n *   Status foo() {\n *     RETURN_IF_ERROR(bar());\n *     return okStatus();\n *   }\n */\n\n#define RETURN_IF_ERROR(expr)                                                                      \\\n  do {                                                                                             \\\n    if (::Envoy::Http::Details::StatusAdapter adapter{(expr)}) {                                   \\\n    } else {                                                                                       \\\n      return std::move(adapter.status_);                                                           \\\n    }                                                                                              \\\n  } while (false)\n\nnamespace Details {\n// Helper class to convert `Status` to `bool` so it can be used inside `if` statements.\nstruct StatusAdapter {\n  StatusAdapter(const Status& status) : status_(status) {}\n  StatusAdapter(Status&& status) : status_(std::move(status)) {}\n\n  StatusAdapter(const StatusAdapter&) = delete;\n  StatusAdapter& operator=(const StatusAdapter&) = delete;\n\n  explicit operator bool() const { return status_.ok(); }\n\n  Status status_;\n};\n} // namespace Details\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/user_agent.cc",
    "content": "#include \"common/http/user_agent.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nUserAgentContext::UserAgentContext(Stats::SymbolTable& symbol_table)\n    : symbol_table_(symbol_table), pool_(symbol_table),\n      downstream_cx_length_ms_(pool_.add(\"downstream_cx_length_ms\")),\n      ios_(pool_.add(\"user_agent.ios\")), android_(pool_.add(\"user_agent.android\")),\n      downstream_cx_total_(pool_.add(\"downstream_cx_total\")),\n      downstream_cx_destroy_remote_active_rq_(pool_.add(\"downstream_cx_destroy_remote_active_rq\")),\n      downstream_rq_total_(pool_.add(\"downstream_rq_total\")) {}\n\nvoid UserAgent::completeConnectionLength(Stats::Timespan& span) {\n  if (stats_ != nullptr) {\n    stats_->downstream_cx_length_ms_.recordValue(span.elapsed().count());\n  }\n}\n\nUserAgentStats::UserAgentStats(Stats::StatName prefix, Stats::StatName device, Stats::Scope& scope,\n                               const UserAgentContext& context)\n    : downstream_cx_total_(Stats::Utility::counterFromElements(\n          scope, {prefix, device, context.downstream_cx_total_})),\n      downstream_cx_destroy_remote_active_rq_(Stats::Utility::counterFromElements(\n          scope, {prefix, device, context.downstream_cx_destroy_remote_active_rq_})),\n      downstream_rq_total_(Stats::Utility::counterFromElements(\n          scope, {prefix, device, context.downstream_rq_total_})),\n      downstream_cx_length_ms_(Stats::Utility::histogramFromElements(\n          scope, {prefix, device, context.downstream_cx_length_ms_},\n          Stats::Histogram::Unit::Milliseconds)) {\n  downstream_cx_total_.inc();\n  downstream_rq_total_.inc();\n}\n\nvoid UserAgent::initializeFromHeaders(const RequestHeaderMap& headers, Stats::StatName prefix,\n                                      Stats::Scope& scope) {\n  // We assume that the user-agent is consistent based on the first request.\n  if (stats_ == nullptr && !initialized_) {\n    initialized_ = true;\n\n    const absl::string_view user_agent = headers.getUserAgentValue();\n    if (!user_agent.empty()) {\n      if (user_agent.find(\"iOS\") != absl::string_view::npos) {\n        stats_ = std::make_unique<UserAgentStats>(prefix, context_.ios_, scope, context_);\n      } else if (user_agent.find(\"android\") != absl::string_view::npos) {\n        stats_ = std::make_unique<UserAgentStats>(prefix, context_.android_, scope, context_);\n      }\n    }\n  }\n}\n\nvoid UserAgent::onConnectionDestroy(Network::ConnectionEvent event, bool active_streams) {\n  if (stats_ != nullptr && active_streams && event == Network::ConnectionEvent::RemoteClose) {\n    stats_->downstream_cx_destroy_remote_active_rq_.inc();\n  }\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/user_agent.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Captures the stat tokens used for recording user-agent stats. These are\n * independent of scope.\n */\nstruct UserAgentContext {\n  UserAgentContext(Stats::SymbolTable& symbol_table);\n\n  Stats::SymbolTable& symbol_table_;\n  Stats::StatNamePool pool_;\n  Stats::StatName downstream_cx_length_ms_;\n  Stats::StatName ios_;\n  Stats::StatName android_;\n  Stats::StatName downstream_cx_total_;\n  Stats::StatName downstream_cx_destroy_remote_active_rq_;\n  Stats::StatName downstream_rq_total_;\n};\n\n/**\n * Captures the stats (counters and histograms) for user-agents. These are\n * established within a stats scope. You must supply a UserAgentContext so that\n * none of the symbols have to be looked up in the symbol-table in the\n * request-path.\n */\nstruct UserAgentStats {\n  UserAgentStats(Stats::StatName prefix, Stats::StatName device, Stats::Scope& scope,\n                 const UserAgentContext& context);\n\n  Stats::Counter& downstream_cx_total_;\n  Stats::Counter& downstream_cx_destroy_remote_active_rq_;\n  Stats::Counter& downstream_rq_total_;\n  Stats::Histogram& downstream_cx_length_ms_;\n};\n\n/**\n * Stats support for specific user agents.\n */\nclass UserAgent {\npublic:\n  UserAgent(const UserAgentContext& context) : context_(context) {}\n\n  /**\n   * Complete a connection length timespan for the target user agent.\n   * @param span supplies the timespan to complete.\n   */\n  void completeConnectionLength(Stats::Timespan& span);\n\n  /**\n   * Initialize the user agent from request headers. This is only done once and the user-agent\n   * is assumed to be the same for further requests.\n   * @param headers supplies the request headers.\n   * @param prefix supplies the stat prefix for the UA stats.\n   * @param scope supplies the backing stat scope.\n   */\n  void initializeFromHeaders(const RequestHeaderMap& headers, Stats::StatName prefix,\n                             Stats::Scope& scope);\n\n  /**\n   * Called when a connection is being destroyed.\n   * @param event supplies the network event that caused destruction.\n   * @param active_streams supplies whether there are still active streams at the time of closing.\n   */\n  void onConnectionDestroy(Network::ConnectionEvent event, bool active_streams);\n\n  void initStats(Stats::StatName prefix, Stats::StatName device, Stats::Scope& scope);\n\nprivate:\n  const UserAgentContext& context_;\n  bool initialized_{false};\n  std::unique_ptr<UserAgentStats> stats_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/utility.cc",
    "content": "#include \"common/http/utility.h\"\n\n#include <http_parser.h>\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/status.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Utility {\nHttp::Status exceptionToStatus(std::function<Http::Status(Buffer::Instance&)> dispatch,\n                               Buffer::Instance& data) {\n  Http::Status status;\n  try {\n    status = dispatch(data);\n    // TODO(#10878): Remove this when exception removal is complete. It is currently in migration,\n    // so dispatch may either return an error status or throw an exception. Soon we won't need to\n    // catch these exceptions, as all codec errors will be migrated to using error statuses that are\n    // returned from dispatch.\n  } catch (FrameFloodException& e) {\n    status = bufferFloodError(e.what());\n  } catch (CodecProtocolException& e) {\n    status = codecProtocolError(e.what());\n  } catch (PrematureResponseException& e) {\n    status = prematureResponseError(e.what(), e.responseCode());\n  }\n  return status;\n}\n} // namespace Utility\n} // namespace Http\nnamespace Http2 {\nnamespace Utility {\n\nnamespace {\n\nvoid validateCustomSettingsParameters(\n    const envoy::config::core::v3::Http2ProtocolOptions& options) {\n  std::vector<std::string> parameter_collisions, custom_parameter_collisions;\n  absl::node_hash_set<nghttp2_settings_entry, SettingsEntryHash, SettingsEntryEquals>\n      custom_parameters;\n  // User defined and named parameters with the same SETTINGS identifier can not both be set.\n  for (const auto& it : options.custom_settings_parameters()) {\n    ASSERT(it.identifier().value() <= std::numeric_limits<uint16_t>::max());\n    // Check for custom parameter inconsistencies.\n    const auto result = custom_parameters.insert(\n        {static_cast<int32_t>(it.identifier().value()), it.value().value()});\n    if (!result.second) {\n      if (result.first->value != it.value().value()) {\n        custom_parameter_collisions.push_back(\n            absl::StrCat(\"0x\", absl::Hex(it.identifier().value(), absl::kZeroPad2)));\n        // Fall through to allow unbatched exceptions to throw first.\n      }\n    }\n    switch (it.identifier().value()) {\n    case NGHTTP2_SETTINGS_ENABLE_PUSH:\n      if (it.value().value() == 1) {\n        throw EnvoyException(\"server push is not supported by Envoy and can not be enabled via a \"\n                             \"SETTINGS parameter.\");\n      }\n      break;\n    case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL:\n      // An exception is made for `allow_connect` which can't be checked for presence due to the\n      // use of a primitive type (bool).\n      throw EnvoyException(\"the \\\"allow_connect\\\" SETTINGS parameter must only be configured \"\n                           \"through the named field\");\n    case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE:\n      if (options.has_hpack_table_size()) {\n        parameter_collisions.push_back(\"hpack_table_size\");\n      }\n      break;\n    case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS:\n      if (options.has_max_concurrent_streams()) {\n        parameter_collisions.push_back(\"max_concurrent_streams\");\n      }\n      break;\n    case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE:\n      if (options.has_initial_stream_window_size()) {\n        parameter_collisions.push_back(\"initial_stream_window_size\");\n      }\n      break;\n    default:\n      // Ignore unknown parameters.\n      break;\n    }\n  }\n\n  if (!custom_parameter_collisions.empty()) {\n    throw EnvoyException(fmt::format(\n        \"inconsistent HTTP/2 custom SETTINGS parameter(s) detected; identifiers = {{{}}}\",\n        absl::StrJoin(custom_parameter_collisions, \",\")));\n  }\n  if (!parameter_collisions.empty()) {\n    throw EnvoyException(fmt::format(\n        \"the {{{}}} HTTP/2 SETTINGS parameter(s) can not be configured through both named and \"\n        \"custom parameters\",\n        absl::StrJoin(parameter_collisions, \",\")));\n  }\n}\n\n} // namespace\n\nconst uint32_t OptionsLimits::MIN_HPACK_TABLE_SIZE;\nconst uint32_t OptionsLimits::DEFAULT_HPACK_TABLE_SIZE;\nconst uint32_t OptionsLimits::MAX_HPACK_TABLE_SIZE;\nconst uint32_t OptionsLimits::MIN_MAX_CONCURRENT_STREAMS;\nconst uint32_t OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS;\nconst uint32_t OptionsLimits::MAX_MAX_CONCURRENT_STREAMS;\nconst uint32_t OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE;\nconst uint32_t OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE;\nconst uint32_t OptionsLimits::MAX_INITIAL_STREAM_WINDOW_SIZE;\nconst uint32_t OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE;\nconst uint32_t OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE;\nconst uint32_t OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE;\nconst uint32_t OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES;\nconst uint32_t OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES;\nconst uint32_t OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD;\nconst uint32_t OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM;\nconst uint32_t OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT;\n\nenvoy::config::core::v3::Http2ProtocolOptions\ninitializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options,\n                             bool hcm_stream_error_set,\n                             const Protobuf::BoolValue& hcm_stream_error) {\n  auto ret = initializeAndValidateOptions(options);\n  if (Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.hcm_stream_error_on_invalid_message\") &&\n      !options.has_override_stream_error_on_invalid_http_message() && hcm_stream_error_set) {\n    ret.mutable_override_stream_error_on_invalid_http_message()->set_value(\n        hcm_stream_error.value());\n  }\n  return ret;\n}\n\nenvoy::config::core::v3::Http2ProtocolOptions\ninitializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options) {\n  envoy::config::core::v3::Http2ProtocolOptions options_clone(options);\n  // This will throw an exception when a custom parameter and a named parameter collide.\n  validateCustomSettingsParameters(options);\n\n  if (!options.has_override_stream_error_on_invalid_http_message()) {\n    options_clone.mutable_override_stream_error_on_invalid_http_message()->set_value(\n        options.stream_error_on_invalid_http_messaging());\n  }\n\n  if (!options_clone.has_hpack_table_size()) {\n    options_clone.mutable_hpack_table_size()->set_value(OptionsLimits::DEFAULT_HPACK_TABLE_SIZE);\n  }\n  ASSERT(options_clone.hpack_table_size().value() <= OptionsLimits::MAX_HPACK_TABLE_SIZE);\n  if (!options_clone.has_max_concurrent_streams()) {\n    options_clone.mutable_max_concurrent_streams()->set_value(\n        OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS);\n  }\n  ASSERT(\n      options_clone.max_concurrent_streams().value() >= OptionsLimits::MIN_MAX_CONCURRENT_STREAMS &&\n      options_clone.max_concurrent_streams().value() <= OptionsLimits::MAX_MAX_CONCURRENT_STREAMS);\n  if (!options_clone.has_initial_stream_window_size()) {\n    options_clone.mutable_initial_stream_window_size()->set_value(\n        OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE);\n  }\n  ASSERT(options_clone.initial_stream_window_size().value() >=\n             OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE &&\n         options_clone.initial_stream_window_size().value() <=\n             OptionsLimits::MAX_INITIAL_STREAM_WINDOW_SIZE);\n  if (!options_clone.has_initial_connection_window_size()) {\n    options_clone.mutable_initial_connection_window_size()->set_value(\n        OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE);\n  }\n  ASSERT(options_clone.initial_connection_window_size().value() >=\n             OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE &&\n         options_clone.initial_connection_window_size().value() <=\n             OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE);\n  if (!options_clone.has_max_outbound_frames()) {\n    options_clone.mutable_max_outbound_frames()->set_value(\n        OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES);\n  }\n  if (!options_clone.has_max_outbound_control_frames()) {\n    options_clone.mutable_max_outbound_control_frames()->set_value(\n        OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES);\n  }\n  if (!options_clone.has_max_consecutive_inbound_frames_with_empty_payload()) {\n    options_clone.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(\n        OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD);\n  }\n  if (!options_clone.has_max_inbound_priority_frames_per_stream()) {\n    options_clone.mutable_max_inbound_priority_frames_per_stream()->set_value(\n        OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM);\n  }\n  if (!options_clone.has_max_inbound_window_update_frames_per_data_frame_sent()) {\n    options_clone.mutable_max_inbound_window_update_frames_per_data_frame_sent()->set_value(\n        OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT);\n  }\n\n  return options_clone;\n}\n\n} // namespace Utility\n} // namespace Http2\n\nnamespace Http {\n\nstatic const char kDefaultPath[] = \"/\";\n\nbool Utility::Url::initialize(absl::string_view absolute_url, bool is_connect) {\n  struct http_parser_url u;\n  http_parser_url_init(&u);\n  const int result =\n      http_parser_parse_url(absolute_url.data(), absolute_url.length(), is_connect, &u);\n\n  if (result != 0) {\n    return false;\n  }\n  if ((u.field_set & (1 << UF_HOST)) != (1 << UF_HOST) &&\n      (u.field_set & (1 << UF_SCHEMA)) != (1 << UF_SCHEMA)) {\n    return false;\n  }\n  scheme_ = absl::string_view(absolute_url.data() + u.field_data[UF_SCHEMA].off,\n                              u.field_data[UF_SCHEMA].len);\n\n  uint16_t authority_len = u.field_data[UF_HOST].len;\n  if ((u.field_set & (1 << UF_PORT)) == (1 << UF_PORT)) {\n    authority_len = authority_len + u.field_data[UF_PORT].len + 1;\n  }\n  host_and_port_ =\n      absl::string_view(absolute_url.data() + u.field_data[UF_HOST].off, authority_len);\n\n  // RFC allows the absolute-uri to not end in /, but the absolute path form\n  // must start with\n  uint64_t path_len = absolute_url.length() - (u.field_data[UF_HOST].off + hostAndPort().length());\n  if (path_len > 0) {\n    uint64_t path_beginning = u.field_data[UF_HOST].off + hostAndPort().length();\n    path_and_query_params_ = absl::string_view(absolute_url.data() + path_beginning, path_len);\n  } else if (!is_connect) {\n    path_and_query_params_ = absl::string_view(kDefaultPath, 1);\n  }\n  return true;\n}\n\nvoid Utility::appendXff(RequestHeaderMap& headers,\n                        const Network::Address::Instance& remote_address) {\n  if (remote_address.type() != Network::Address::Type::Ip) {\n    return;\n  }\n\n  headers.appendForwardedFor(remote_address.ip()->addressAsString(), \",\");\n}\n\nvoid Utility::appendVia(RequestOrResponseHeaderMap& headers, const std::string& via) {\n  // TODO(asraa): Investigate whether it is necessary to append with whitespace here by:\n  //     (a) Validating we do not expect whitespace in via headers\n  //     (b) Add runtime guarding in case users have upstreams which expect it.\n  headers.appendVia(via, \", \");\n}\n\nstd::string Utility::createSslRedirectPath(const RequestHeaderMap& headers) {\n  ASSERT(headers.Host());\n  ASSERT(headers.Path());\n  return fmt::format(\"https://{}{}\", headers.getHostValue(), headers.getPathValue());\n}\n\nUtility::QueryParams Utility::parseQueryString(absl::string_view url) {\n  size_t start = url.find('?');\n  if (start == std::string::npos) {\n    QueryParams params;\n    return params;\n  }\n\n  start++;\n  return parseParameters(url, start, /*decode_params=*/false);\n}\n\nUtility::QueryParams Utility::parseAndDecodeQueryString(absl::string_view url) {\n  size_t start = url.find('?');\n  if (start == std::string::npos) {\n    QueryParams params;\n    return params;\n  }\n\n  start++;\n  return parseParameters(url, start, /*decode_params=*/true);\n}\n\nUtility::QueryParams Utility::parseFromBody(absl::string_view body) {\n  return parseParameters(body, 0, /*decode_params=*/true);\n}\n\nUtility::QueryParams Utility::parseParameters(absl::string_view data, size_t start,\n                                              bool decode_params) {\n  QueryParams params;\n\n  while (start < data.size()) {\n    size_t end = data.find('&', start);\n    if (end == std::string::npos) {\n      end = data.size();\n    }\n    absl::string_view param(data.data() + start, end - start);\n\n    const size_t equal = param.find('=');\n    if (equal != std::string::npos) {\n      const auto param_name = StringUtil::subspan(data, start, start + equal);\n      const auto param_value = StringUtil::subspan(data, start + equal + 1, end);\n      params.emplace(decode_params ? PercentEncoding::decode(param_name) : param_name,\n                     decode_params ? PercentEncoding::decode(param_value) : param_value);\n    } else {\n      params.emplace(StringUtil::subspan(data, start, end), \"\");\n    }\n\n    start = end + 1;\n  }\n\n  return params;\n}\n\nabsl::string_view Utility::findQueryStringStart(const HeaderString& path) {\n  absl::string_view path_str = path.getStringView();\n  size_t query_offset = path_str.find('?');\n  if (query_offset == absl::string_view::npos) {\n    query_offset = path_str.length();\n  }\n  path_str.remove_prefix(query_offset);\n  return path_str;\n}\n\nstd::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) {\n\n  std::string ret;\n\n  headers.iterateReverse([&key, &ret](const HeaderEntry& header) -> HeaderMap::Iterate {\n    // Find the cookie headers in the request (typically, there's only one).\n    if (header.key() == Http::Headers::get().Cookie.get()) {\n\n      // Split the cookie header into individual cookies.\n      for (const auto& s : StringUtil::splitToken(header.value().getStringView(), \";\")) {\n        // Find the key part of the cookie (i.e. the name of the cookie).\n        size_t first_non_space = s.find_first_not_of(\" \");\n        size_t equals_index = s.find('=');\n        if (equals_index == absl::string_view::npos) {\n          // The cookie is malformed if it does not have an `=`. Continue\n          // checking other cookies in this header.\n          continue;\n        }\n        const absl::string_view k = s.substr(first_non_space, equals_index - first_non_space);\n        // If the key matches, parse the value from the rest of the cookie string.\n        if (k == key) {\n          absl::string_view v = s.substr(equals_index + 1, s.size() - 1);\n\n          // Cookie values may be wrapped in double quotes.\n          // https://tools.ietf.org/html/rfc6265#section-4.1.1\n          if (v.size() >= 2 && v.back() == '\"' && v[0] == '\"') {\n            v = v.substr(1, v.size() - 2);\n          }\n          ret = std::string{v};\n          return HeaderMap::Iterate::Break;\n        }\n      }\n    }\n    return HeaderMap::Iterate::Continue;\n  });\n\n  return ret;\n}\n\nstd::string Utility::makeSetCookieValue(const std::string& key, const std::string& value,\n                                        const std::string& path, const std::chrono::seconds max_age,\n                                        bool httponly) {\n  std::string cookie_value;\n  // Best effort attempt to avoid numerous string copies.\n  cookie_value.reserve(value.size() + path.size() + 30);\n\n  cookie_value = absl::StrCat(key, \"=\\\"\", value, \"\\\"\");\n  if (max_age != std::chrono::seconds::zero()) {\n    absl::StrAppend(&cookie_value, \"; Max-Age=\", max_age.count());\n  }\n  if (!path.empty()) {\n    absl::StrAppend(&cookie_value, \"; Path=\", path);\n  }\n  if (httponly) {\n    absl::StrAppend(&cookie_value, \"; HttpOnly\");\n  }\n  return cookie_value;\n}\n\nuint64_t Utility::getResponseStatus(const ResponseHeaderMap& headers) {\n  const HeaderEntry* header = headers.Status();\n  uint64_t response_code;\n  if (!header || !absl::SimpleAtoi(headers.getStatusValue(), &response_code)) {\n    throw CodecClientException(\":status must be specified and a valid unsigned long\");\n  }\n  return response_code;\n}\n\nbool Utility::isUpgrade(const RequestOrResponseHeaderMap& headers) {\n  // In firefox the \"Connection\" request header value is \"keep-alive, Upgrade\",\n  // we should check if it contains the \"Upgrade\" token.\n  return (headers.Upgrade() &&\n          Envoy::StringUtil::caseFindToken(headers.getConnectionValue(), \",\",\n                                           Http::Headers::get().ConnectionValues.Upgrade.c_str()));\n}\n\nbool Utility::isH2UpgradeRequest(const RequestHeaderMap& headers) {\n  return headers.getMethodValue() == Http::Headers::get().MethodValues.Connect &&\n         headers.Protocol() && !headers.Protocol()->value().empty() &&\n         headers.Protocol()->value() != Headers::get().ProtocolValues.Bytestream;\n}\n\nbool Utility::isWebSocketUpgradeRequest(const RequestHeaderMap& headers) {\n  return (isUpgrade(headers) &&\n          absl::EqualsIgnoreCase(headers.getUpgradeValue(),\n                                 Http::Headers::get().UpgradeValues.WebSocket));\n}\n\nHttp1Settings\nUtility::parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& config) {\n  Http1Settings ret;\n  ret.allow_absolute_url_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, allow_absolute_url, true);\n  ret.accept_http_10_ = config.accept_http_10();\n  ret.default_host_for_http_10_ = config.default_host_for_http_10();\n  ret.enable_trailers_ = config.enable_trailers();\n  ret.allow_chunked_length_ = config.allow_chunked_length();\n\n  if (config.header_key_format().has_proper_case_words()) {\n    ret.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase;\n  } else {\n    ret.header_key_format_ = Http1Settings::HeaderKeyFormat::Default;\n  }\n\n  return ret;\n}\n\nHttp1Settings\nUtility::parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& config,\n                            const Protobuf::BoolValue& hcm_stream_error) {\n  Http1Settings ret = parseHttp1Settings(config);\n\n  if (config.has_override_stream_error_on_invalid_http_message()) {\n    // override_stream_error_on_invalid_http_message, if set, takes precedence over any HCM\n    // stream_error_on_invalid_http_message\n    ret.stream_error_on_invalid_http_message_ =\n        config.override_stream_error_on_invalid_http_message().value();\n  } else {\n    // fallback to HCM value\n    ret.stream_error_on_invalid_http_message_ = hcm_stream_error.value();\n  }\n\n  return ret;\n}\n\nvoid Utility::sendLocalReply(const bool& is_reset, StreamDecoderFilterCallbacks& callbacks,\n                             const LocalReplyData& local_reply_data) {\n  absl::string_view details;\n  if (callbacks.streamInfo().responseCodeDetails().has_value()) {\n    details = callbacks.streamInfo().responseCodeDetails().value();\n  };\n\n  sendLocalReply(\n      is_reset,\n      Utility::EncodeFunctions{nullptr, nullptr,\n                               [&](ResponseHeaderMapPtr&& headers, bool end_stream) -> void {\n                                 callbacks.encodeHeaders(std::move(headers), end_stream, details);\n                               },\n                               [&](Buffer::Instance& data, bool end_stream) -> void {\n                                 callbacks.encodeData(data, end_stream);\n                               }},\n      local_reply_data);\n}\n\nvoid Utility::sendLocalReply(const bool& is_reset, const EncodeFunctions& encode_functions,\n                             const LocalReplyData& local_reply_data) {\n  // encode_headers() may reset the stream, so the stream must not be reset before calling it.\n  ASSERT(!is_reset);\n\n  // rewrite_response will rewrite response code and body text.\n  Code response_code = local_reply_data.response_code_;\n  std::string body_text(local_reply_data.body_text_);\n  absl::string_view content_type(Headers::get().ContentTypeValues.Text);\n\n  ResponseHeaderMapPtr response_headers{createHeaderMap<ResponseHeaderMapImpl>(\n      {{Headers::get().Status, std::to_string(enumToInt(response_code))}})};\n\n  if (encode_functions.modify_headers_) {\n    encode_functions.modify_headers_(*response_headers);\n  }\n  if (encode_functions.rewrite_) {\n    encode_functions.rewrite_(*response_headers, response_code, body_text, content_type);\n  }\n\n  // Respond with a gRPC trailers-only response if the request is gRPC\n  if (local_reply_data.is_grpc_) {\n    response_headers->setStatus(std::to_string(enumToInt(Code::OK)));\n    response_headers->setReferenceContentType(Headers::get().ContentTypeValues.Grpc);\n    response_headers->setGrpcStatus(\n        std::to_string(enumToInt(local_reply_data.grpc_status_\n                                     ? local_reply_data.grpc_status_.value()\n                                     : Grpc::Utility::httpToGrpcStatus(enumToInt(response_code)))));\n    if (!body_text.empty() && !local_reply_data.is_head_request_) {\n      // TODO(dio): Probably it is worth to consider caching the encoded message based on gRPC\n      // status.\n      // JsonFormatter adds a '\\n' at the end. For header value, it should be removed.\n      // https://github.com/envoyproxy/envoy/blob/master/source/common/formatter/substitution_formatter.cc#L129\n      if (body_text[body_text.length() - 1] == '\\n') {\n        body_text = body_text.substr(0, body_text.length() - 1);\n      }\n      response_headers->setGrpcMessage(PercentEncoding::encode(body_text));\n    }\n    // The `modify_headers` function may have added content-length, remove it.\n    response_headers->removeContentLength();\n    encode_functions.encode_headers_(std::move(response_headers), true); // Trailers only response\n    return;\n  }\n\n  if (!body_text.empty()) {\n    response_headers->setContentLength(body_text.size());\n    // If the `rewrite` function has changed body_text or content-type is not set, set it.\n    // This allows `modify_headers` function to set content-type for the body. For example,\n    // router.direct_response is calling sendLocalReply and may need to set content-type for\n    // the body.\n    if (body_text != local_reply_data.body_text_ || response_headers->ContentType() == nullptr) {\n      response_headers->setReferenceContentType(content_type);\n    }\n  } else {\n    response_headers->removeContentLength();\n    response_headers->removeContentType();\n  }\n\n  if (local_reply_data.is_head_request_) {\n    encode_functions.encode_headers_(std::move(response_headers), true);\n    return;\n  }\n\n  encode_functions.encode_headers_(std::move(response_headers), body_text.empty());\n  // encode_headers() may have changed the referenced is_reset so we need to test it\n  if (!body_text.empty() && !is_reset) {\n    Buffer::OwnedImpl buffer(body_text);\n    encode_functions.encode_data_(buffer, true);\n  }\n}\n\nUtility::GetLastAddressFromXffInfo\nUtility::getLastAddressFromXFF(const Http::RequestHeaderMap& request_headers,\n                               uint32_t num_to_skip) {\n  const auto xff_header = request_headers.ForwardedFor();\n  if (xff_header == nullptr) {\n    return {nullptr, false};\n  }\n\n  absl::string_view xff_string(xff_header->value().getStringView());\n  static const std::string separator(\",\");\n  // Ignore the last num_to_skip addresses at the end of XFF.\n  for (uint32_t i = 0; i < num_to_skip; i++) {\n    const std::string::size_type last_comma = xff_string.rfind(separator);\n    if (last_comma == std::string::npos) {\n      return {nullptr, false};\n    }\n    xff_string = xff_string.substr(0, last_comma);\n  }\n  // The text after the last remaining comma, or the entirety of the string if there\n  // is no comma, is the requested IP address.\n  const std::string::size_type last_comma = xff_string.rfind(separator);\n  if (last_comma != std::string::npos && last_comma + separator.size() < xff_string.size()) {\n    xff_string = xff_string.substr(last_comma + separator.size());\n  }\n\n  // Ignore the whitespace, since they are allowed in HTTP lists (see RFC7239#section-7.1).\n  xff_string = StringUtil::ltrim(xff_string);\n  xff_string = StringUtil::rtrim(xff_string);\n\n  try {\n    // This technically requires a copy because inet_pton takes a null terminated string. In\n    // practice, we are working with a view at the end of the owning string, and could pass the\n    // raw pointer.\n    // TODO(mattklein123) PERF: Avoid the copy here.\n    return {\n        Network::Utility::parseInternetAddress(std::string(xff_string.data(), xff_string.size())),\n        last_comma == std::string::npos && num_to_skip == 0};\n  } catch (const EnvoyException&) {\n    return {nullptr, false};\n  }\n}\n\nbool Utility::sanitizeConnectionHeader(Http::RequestHeaderMap& headers) {\n  static const size_t MAX_ALLOWED_NOMINATED_HEADERS = 10;\n  static const size_t MAX_ALLOWED_TE_VALUE_SIZE = 256;\n\n  // Remove any headers nominated by the Connection header. The TE header\n  // is sanitized and removed only if it's empty after removing unsupported values\n  // See https://github.com/envoyproxy/envoy/issues/8623\n  const auto& cv = Http::Headers::get().ConnectionValues;\n  const auto& connection_header_value = headers.Connection()->value();\n\n  StringUtil::CaseUnorderedSet headers_to_remove{};\n  std::vector<absl::string_view> connection_header_tokens =\n      StringUtil::splitToken(connection_header_value.getStringView(), \",\", false);\n\n  // If we have 10 or more nominated headers, fail this request\n  if (connection_header_tokens.size() >= MAX_ALLOWED_NOMINATED_HEADERS) {\n    ENVOY_LOG_MISC(trace, \"Too many nominated headers in request\");\n    return false;\n  }\n\n  // Split the connection header and evaluate each nominated header\n  for (const auto& token : connection_header_tokens) {\n\n    const auto token_sv = StringUtil::trim(token);\n\n    // Build the LowerCaseString for header lookup\n    const LowerCaseString lcs_header_to_remove{std::string(token_sv)};\n\n    // If the Connection token value is not a nominated header, ignore it here since\n    // the connection header is removed elsewhere when the H1 request is upgraded to H2\n    if ((lcs_header_to_remove.get() == cv.Close) ||\n        (lcs_header_to_remove.get() == cv.Http2Settings) ||\n        (lcs_header_to_remove.get() == cv.KeepAlive) ||\n        (lcs_header_to_remove.get() == cv.Upgrade)) {\n      continue;\n    }\n\n    // By default we will remove any nominated headers\n    bool keep_header = false;\n\n    // Determine whether the nominated header contains invalid values\n    const HeaderEntry* nominated_header = nullptr;\n\n    if (lcs_header_to_remove == Http::Headers::get().Connection) {\n      // Remove the connection header from the nominated tokens if it's self nominated\n      // The connection header itself is *not removed*\n      ENVOY_LOG_MISC(trace, \"Skipping self nominated header [{}]\", token_sv);\n      keep_header = true;\n      headers_to_remove.emplace(token_sv);\n\n    } else if ((lcs_header_to_remove == Http::Headers::get().ForwardedFor) ||\n               (lcs_header_to_remove == Http::Headers::get().ForwardedHost) ||\n               (lcs_header_to_remove == Http::Headers::get().ForwardedProto) ||\n               !token_sv.find(':')) {\n\n      // An attacker could nominate an X-Forwarded* header, and its removal may mask\n      // the origin of the incoming request and potentially alter its handling.\n      // Additionally, pseudo headers should never be nominated. In both cases, we\n      // should fail the request.\n      // See: https://nathandavison.com/blog/abusing-http-hop-by-hop-request-headers\n\n      ENVOY_LOG_MISC(trace, \"Invalid nomination of {} header\", token_sv);\n      return false;\n    } else {\n      // Examine the value of all other nominated headers\n      nominated_header = headers.get(lcs_header_to_remove);\n    }\n\n    if (nominated_header) {\n      auto nominated_header_value_sv = nominated_header->value().getStringView();\n\n      const bool is_te_header = (lcs_header_to_remove == Http::Headers::get().TE);\n\n      // reject the request if the TE header is too large\n      if (is_te_header && (nominated_header_value_sv.size() >= MAX_ALLOWED_TE_VALUE_SIZE)) {\n        ENVOY_LOG_MISC(trace, \"TE header contains a value that exceeds the allowable length\");\n        return false;\n      }\n\n      if (is_te_header) {\n        for (const auto& header_value :\n             StringUtil::splitToken(nominated_header_value_sv, \",\", false)) {\n\n          const absl::string_view header_sv = StringUtil::trim(header_value);\n\n          // If trailers exist in the TE value tokens, keep the header, removing any other values\n          // that may exist\n          if (StringUtil::CaseInsensitiveCompare()(header_sv,\n                                                   Http::Headers::get().TEValues.Trailers)) {\n            keep_header = true;\n            break;\n          }\n        }\n\n        if (keep_header) {\n          headers.setTE(Http::Headers::get().TEValues.Trailers);\n        }\n      }\n    }\n\n    if (!keep_header) {\n      ENVOY_LOG_MISC(trace, \"Removing nominated header [{}]\", token_sv);\n      headers.remove(lcs_header_to_remove);\n      headers_to_remove.emplace(token_sv);\n    }\n  }\n\n  // Lastly remove extra nominated headers from the Connection header\n  if (!headers_to_remove.empty()) {\n    const std::string new_value = StringUtil::removeTokens(connection_header_value.getStringView(),\n                                                           \",\", headers_to_remove, \",\");\n\n    if (new_value.empty()) {\n      headers.removeConnection();\n    } else {\n      headers.setConnection(new_value);\n    }\n  }\n\n  return true;\n}\n\nconst std::string& Utility::getProtocolString(const Protocol protocol) {\n  switch (protocol) {\n  case Protocol::Http10:\n    return Headers::get().ProtocolStrings.Http10String;\n  case Protocol::Http11:\n    return Headers::get().ProtocolStrings.Http11String;\n  case Protocol::Http2:\n    return Headers::get().ProtocolStrings.Http2String;\n  case Protocol::Http3:\n    return Headers::get().ProtocolStrings.Http3String;\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid Utility::extractHostPathFromUri(const absl::string_view& uri, absl::string_view& host,\n                                     absl::string_view& path) {\n  /**\n   *  URI RFC: https://www.ietf.org/rfc/rfc2396.txt\n   *\n   *  Example:\n   *  uri  = \"https://example.com:8443/certs\"\n   *  pos:         ^\n   *  host_pos:       ^\n   *  path_pos:                       ^\n   *  host = \"example.com:8443\"\n   *  path = \"/certs\"\n   */\n  const auto pos = uri.find(\"://\");\n  // Start position of the host\n  const auto host_pos = (pos == std::string::npos) ? 0 : pos + 3;\n  // Start position of the path\n  const auto path_pos = uri.find(\"/\", host_pos);\n  if (path_pos == std::string::npos) {\n    // If uri doesn't have \"/\", the whole string is treated as host.\n    host = uri.substr(host_pos);\n    path = \"/\";\n  } else {\n    host = uri.substr(host_pos, path_pos - host_pos);\n    path = uri.substr(path_pos);\n  }\n}\n\nstd::string Utility::localPathFromFilePath(const absl::string_view& file_path) {\n  if (file_path.size() >= 3 && file_path[1] == ':' && file_path[2] == '/' &&\n      std::isalpha(file_path[0])) {\n    return std::string(file_path);\n  }\n  return absl::StrCat(\"/\", file_path);\n}\n\nRequestMessagePtr Utility::prepareHeaders(const envoy::config::core::v3::HttpUri& http_uri) {\n  absl::string_view host, path;\n  extractHostPathFromUri(http_uri.uri(), host, path);\n\n  RequestMessagePtr message(new RequestMessageImpl());\n  message->headers().setPath(path);\n  message->headers().setHost(host);\n\n  return message;\n}\n\n// TODO(jmarantz): make QueryParams a real class and put this serializer there,\n// along with proper URL escaping of the name and value.\nstd::string Utility::queryParamsToString(const QueryParams& params) {\n  std::string out;\n  std::string delim = \"?\";\n  for (const auto& p : params) {\n    absl::StrAppend(&out, delim, p.first, \"=\", p.second);\n    delim = \"&\";\n  }\n  return out;\n}\n\nconst std::string Utility::resetReasonToString(const Http::StreamResetReason reset_reason) {\n  switch (reset_reason) {\n  case Http::StreamResetReason::ConnectionFailure:\n    return \"connection failure\";\n  case Http::StreamResetReason::ConnectionTermination:\n    return \"connection termination\";\n  case Http::StreamResetReason::LocalReset:\n    return \"local reset\";\n  case Http::StreamResetReason::LocalRefusedStreamReset:\n    return \"local refused stream reset\";\n  case Http::StreamResetReason::Overflow:\n    return \"overflow\";\n  case Http::StreamResetReason::RemoteReset:\n    return \"remote reset\";\n  case Http::StreamResetReason::RemoteRefusedStreamReset:\n    return \"remote refused stream reset\";\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid Utility::transformUpgradeRequestFromH1toH2(RequestHeaderMap& headers) {\n  ASSERT(Utility::isUpgrade(headers));\n\n  headers.setReferenceMethod(Http::Headers::get().MethodValues.Connect);\n  headers.setProtocol(headers.getUpgradeValue());\n  headers.removeUpgrade();\n  headers.removeConnection();\n  // nghttp2 rejects upgrade requests/responses with content length, so strip\n  // any unnecessary content length header.\n  if (headers.getContentLengthValue() == \"0\") {\n    headers.removeContentLength();\n  }\n}\n\nvoid Utility::transformUpgradeResponseFromH1toH2(ResponseHeaderMap& headers) {\n  if (getResponseStatus(headers) == 101) {\n    headers.setStatus(200);\n  }\n  headers.removeUpgrade();\n  headers.removeConnection();\n  if (headers.getContentLengthValue() == \"0\") {\n    headers.removeContentLength();\n  }\n}\n\nvoid Utility::transformUpgradeRequestFromH2toH1(RequestHeaderMap& headers) {\n  ASSERT(Utility::isH2UpgradeRequest(headers));\n\n  headers.setReferenceMethod(Http::Headers::get().MethodValues.Get);\n  headers.setUpgrade(headers.getProtocolValue());\n  headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Upgrade);\n  headers.removeProtocol();\n}\n\nvoid Utility::transformUpgradeResponseFromH2toH1(ResponseHeaderMap& headers,\n                                                 absl::string_view upgrade) {\n  if (getResponseStatus(headers) == 200) {\n    headers.setUpgrade(upgrade);\n    headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Upgrade);\n    headers.setStatus(101);\n  }\n}\n\nconst Router::RouteSpecificFilterConfig*\nUtility::resolveMostSpecificPerFilterConfigGeneric(const std::string& filter_name,\n                                                   const Router::RouteConstSharedPtr& route) {\n\n  const Router::RouteSpecificFilterConfig* maybe_filter_config{};\n  traversePerFilterConfigGeneric(\n      filter_name, route, [&maybe_filter_config](const Router::RouteSpecificFilterConfig& cfg) {\n        maybe_filter_config = &cfg;\n      });\n  return maybe_filter_config;\n}\n\nvoid Utility::traversePerFilterConfigGeneric(\n    const std::string& filter_name, const Router::RouteConstSharedPtr& route,\n    std::function<void(const Router::RouteSpecificFilterConfig&)> cb) {\n  if (!route) {\n    return;\n  }\n\n  const Router::RouteEntry* routeEntry = route->routeEntry();\n\n  if (routeEntry != nullptr) {\n    auto maybe_vhost_config = routeEntry->virtualHost().perFilterConfig(filter_name);\n    if (maybe_vhost_config != nullptr) {\n      cb(*maybe_vhost_config);\n    }\n  }\n\n  auto maybe_route_config = route->perFilterConfig(filter_name);\n  if (maybe_route_config != nullptr) {\n    cb(*maybe_route_config);\n  }\n\n  if (routeEntry != nullptr) {\n    auto maybe_weighted_cluster_config = routeEntry->perFilterConfig(filter_name);\n    if (maybe_weighted_cluster_config != nullptr) {\n      cb(*maybe_weighted_cluster_config);\n    }\n  }\n}\n\nstd::string Utility::PercentEncoding::encode(absl::string_view value,\n                                             absl::string_view reserved_chars) {\n  absl::flat_hash_set<char> reserved_char_set{reserved_chars.begin(), reserved_chars.end()};\n  for (size_t i = 0; i < value.size(); ++i) {\n    const char& ch = value[i];\n    // The escaping characters are defined in\n    // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses.\n    //\n    // We do checking for each char in the string. If the current char is included in the defined\n    // escaping characters, we jump to \"the slow path\" (append the char [encoded or not encoded]\n    // to the returned string one by one) started from the current index.\n    if (ch < ' ' || ch >= '~' || reserved_char_set.find(ch) != reserved_char_set.end()) {\n      return PercentEncoding::encode(value, i, reserved_char_set);\n    }\n  }\n  return std::string(value);\n}\n\nstd::string Utility::PercentEncoding::encode(absl::string_view value, const size_t index,\n                                             const absl::flat_hash_set<char>& reserved_char_set) {\n  std::string encoded;\n  if (index > 0) {\n    absl::StrAppend(&encoded, value.substr(0, index));\n  }\n\n  for (size_t i = index; i < value.size(); ++i) {\n    const char& ch = value[i];\n    if (ch < ' ' || ch >= '~' || reserved_char_set.find(ch) != reserved_char_set.end()) {\n      // For consistency, URI producers should use uppercase hexadecimal digits for all\n      // percent-encodings. https://tools.ietf.org/html/rfc3986#section-2.1.\n      absl::StrAppend(&encoded, fmt::format(\"%{:02X}\", ch));\n    } else {\n      encoded.push_back(ch);\n    }\n  }\n  return encoded;\n}\n\nstd::string Utility::PercentEncoding::decode(absl::string_view encoded) {\n  std::string decoded;\n  decoded.reserve(encoded.size());\n  for (size_t i = 0; i < encoded.size(); ++i) {\n    char ch = encoded[i];\n    if (ch == '%' && i + 2 < encoded.size()) {\n      const char& hi = encoded[i + 1];\n      const char& lo = encoded[i + 2];\n      if (absl::ascii_isdigit(hi)) {\n        ch = hi - '0';\n      } else {\n        ch = absl::ascii_toupper(hi) - 'A' + 10;\n      }\n\n      ch *= 16;\n      if (absl::ascii_isdigit(lo)) {\n        ch += lo - '0';\n      } else {\n        ch += absl::ascii_toupper(lo) - 'A' + 10;\n      }\n      i += 2;\n    }\n    decoded.push_back(ch);\n  }\n  return decoded;\n}\n\nUtility::AuthorityAttributes Utility::parseAuthority(absl::string_view host) {\n  // First try to see if there is a port included. This also checks to see that there is not a ']'\n  // as the last character which is indicative of an IPv6 address without a port. This is a best\n  // effort attempt.\n  const auto colon_pos = host.rfind(':');\n  absl::string_view host_to_resolve = host;\n  absl::optional<uint16_t> port;\n  if (colon_pos != absl::string_view::npos && host_to_resolve.back() != ']') {\n    const absl::string_view string_view_host = host;\n    host_to_resolve = string_view_host.substr(0, colon_pos);\n    const auto port_str = string_view_host.substr(colon_pos + 1);\n    uint64_t port64;\n    if (port_str.empty() || !absl::SimpleAtoi(port_str, &port64) || port64 > 65535) {\n      // Just attempt to resolve whatever we were given. This will very likely fail.\n      host_to_resolve = host;\n    } else {\n      port = static_cast<uint16_t>(port64);\n    }\n  }\n\n  // Now see if this is an IP address. We need to know this because some things (such as setting\n  // SNI) are special cased if this is an IP address. Either way, we still go through the normal\n  // resolver flow. We could short-circuit the DNS resolver in this case, but the extra code to do\n  // so is not worth it since the DNS resolver should handle it for us.\n  bool is_ip_address = false;\n  try {\n    absl::string_view potential_ip_address = host_to_resolve;\n    // TODO(mattklein123): Optimally we would support bracket parsing in parseInternetAddress(),\n    // but we still need to trim the brackets to send the IPv6 address into the DNS resolver. For\n    // now, just do all the trimming here, but in the future we should consider whether we can\n    // have unified [] handling as low as possible in the stack.\n    if (!potential_ip_address.empty() && potential_ip_address.front() == '[' &&\n        potential_ip_address.back() == ']') {\n      potential_ip_address.remove_prefix(1);\n      potential_ip_address.remove_suffix(1);\n    }\n    Network::Utility::parseInternetAddress(std::string(potential_ip_address));\n    is_ip_address = true;\n    host_to_resolve = potential_ip_address;\n  } catch (const EnvoyException&) {\n  }\n\n  return {is_ip_address, host_to_resolve, port};\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/http/utility.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/message.h\"\n#include \"envoy/http/metadata_interface.h\"\n#include \"envoy/http/query_params.h\"\n\n#include \"common/http/exception.h\"\n#include \"common/http/status.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"nghttp2/nghttp2.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Utility {\n\n// This is a wrapper around dispatch calls that may throw an exception or may return an error status\n// while exception removal is in migration.\n// TODO(#10878): Remove this.\nHttp::Status exceptionToStatus(std::function<Http::Status(Buffer::Instance&)> dispatch,\n                               Buffer::Instance& data);\n\n/**\n * Well-known HTTP ALPN values.\n */\nclass AlpnNameValues {\npublic:\n  const std::string Http10 = \"http/1.0\";\n  const std::string Http11 = \"http/1.1\";\n  const std::string Http2 = \"h2\";\n  const std::string Http2c = \"h2c\";\n};\n\nusing AlpnNames = ConstSingleton<AlpnNameValues>;\n\n} // namespace Utility\n} // namespace Http\n\nnamespace Http2 {\nnamespace Utility {\n\nstruct SettingsEntryHash {\n  size_t operator()(const nghttp2_settings_entry& entry) const {\n    return absl::Hash<decltype(entry.settings_id)>()(entry.settings_id);\n  }\n};\n\nstruct SettingsEntryEquals {\n  bool operator()(const nghttp2_settings_entry& lhs, const nghttp2_settings_entry& rhs) const {\n    return lhs.settings_id == rhs.settings_id;\n  }\n};\n\n// Limits and defaults for `envoy::config::core::v3::Http2ProtocolOptions` protos.\nstruct OptionsLimits {\n  // disable HPACK compression\n  static const uint32_t MIN_HPACK_TABLE_SIZE = 0;\n  // initial value from HTTP/2 spec, same as NGHTTP2_DEFAULT_HEADER_TABLE_SIZE from nghttp2\n  static const uint32_t DEFAULT_HPACK_TABLE_SIZE = (1 << 12);\n  // no maximum from HTTP/2 spec, use unsigned 32-bit maximum\n  static const uint32_t MAX_HPACK_TABLE_SIZE = std::numeric_limits<uint32_t>::max();\n  // TODO(jwfang): make this 0, the HTTP/2 spec minimum\n  static const uint32_t MIN_MAX_CONCURRENT_STREAMS = 1;\n  // defaults to maximum, same as nghttp2\n  static const uint32_t DEFAULT_MAX_CONCURRENT_STREAMS = (1U << 31) - 1;\n  // no maximum from HTTP/2 spec, total streams is unsigned 32-bit maximum,\n  // one-side (client/server) is half that, and we need to exclude stream 0.\n  // same as NGHTTP2_INITIAL_MAX_CONCURRENT_STREAMS from nghttp2\n  static const uint32_t MAX_MAX_CONCURRENT_STREAMS = (1U << 31) - 1;\n\n  // initial value from HTTP/2 spec, same as NGHTTP2_INITIAL_WINDOW_SIZE from nghttp2\n  // NOTE: we only support increasing window size now, so this is also the minimum\n  // TODO(jwfang): make this 0 to support decrease window size\n  static const uint32_t MIN_INITIAL_STREAM_WINDOW_SIZE = (1 << 16) - 1;\n  // initial value from HTTP/2 spec is 65535, but we want more (256MiB)\n  static const uint32_t DEFAULT_INITIAL_STREAM_WINDOW_SIZE = 256 * 1024 * 1024;\n  // maximum from HTTP/2 spec, same as NGHTTP2_MAX_WINDOW_SIZE from nghttp2\n  static const uint32_t MAX_INITIAL_STREAM_WINDOW_SIZE = (1U << 31) - 1;\n\n  // CONNECTION_WINDOW_SIZE is similar to STREAM_WINDOW_SIZE, but for connection-level window\n  // TODO(jwfang): make this 0 to support decrease window size\n  static const uint32_t MIN_INITIAL_CONNECTION_WINDOW_SIZE = (1 << 16) - 1;\n  // nghttp2's default connection-level window equals to its stream-level,\n  // our default connection-level window also equals to our stream-level\n  static const uint32_t DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE = 256 * 1024 * 1024;\n  static const uint32_t MAX_INITIAL_CONNECTION_WINDOW_SIZE = (1U << 31) - 1;\n\n  // Default limit on the number of outbound frames of all types.\n  static const uint32_t DEFAULT_MAX_OUTBOUND_FRAMES = 10000;\n  // Default limit on the number of outbound frames of types PING, SETTINGS and RST_STREAM.\n  static const uint32_t DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES = 1000;\n  // Default limit on the number of consecutive inbound frames with an empty payload\n  // and no end stream flag.\n  static const uint32_t DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD = 1;\n  // Default limit on the number of inbound frames of type PRIORITY (per stream).\n  static const uint32_t DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM = 100;\n  // Default limit on the number of inbound frames of type WINDOW_UPDATE (per DATA frame sent).\n  static const uint32_t DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT = 10;\n};\n\n/**\n * Validates settings/options already set in |options| and initializes any remaining fields with\n * defaults.\n */\nenvoy::config::core::v3::Http2ProtocolOptions\ninitializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options);\n\nenvoy::config::core::v3::Http2ProtocolOptions\ninitializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options,\n                             bool hcm_stream_error_set,\n                             const Protobuf::BoolValue& hcm_stream_error);\n} // namespace Utility\n} // namespace Http2\n\nnamespace Http {\nnamespace Utility {\n\n/**\n * Given a fully qualified URL, splits the string_view provided into scheme,\n * host and path with query parameters components.\n */\nclass Url {\npublic:\n  bool initialize(absl::string_view absolute_url, bool is_connect_request);\n  absl::string_view scheme() { return scheme_; }\n  absl::string_view hostAndPort() { return host_and_port_; }\n  absl::string_view pathAndQueryParams() { return path_and_query_params_; }\n\nprivate:\n  absl::string_view scheme_;\n  absl::string_view host_and_port_;\n  absl::string_view path_and_query_params_;\n};\n\nclass PercentEncoding {\npublic:\n  /**\n   * Encodes string view to its percent encoded representation. Non-visible ASCII is always escaped,\n   * in addition to a given list of reserved chars.\n   *\n   * @param value supplies string to be encoded.\n   * @param reserved_chars list of reserved chars to escape. By default the escaped chars in\n   *        https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses are used.\n   * @return std::string percent-encoded string.\n   */\n  static std::string encode(absl::string_view value, absl::string_view reserved_chars = \"%\");\n\n  /**\n   * Decodes string view from its percent encoded representation.\n   * @param encoded supplies string to be decoded.\n   * @return std::string decoded string https://tools.ietf.org/html/rfc3986#section-2.1.\n   */\n  static std::string decode(absl::string_view value);\n\nprivate:\n  // Encodes string view to its percent encoded representation, with start index.\n  static std::string encode(absl::string_view value, const size_t index,\n                            const absl::flat_hash_set<char>& reserved_char_set);\n};\n\n/**\n * Append to x-forwarded-for header.\n * @param headers supplies the headers to append to.\n * @param remote_address supplies the remote address to append.\n */\nvoid appendXff(RequestHeaderMap& headers, const Network::Address::Instance& remote_address);\n\n/**\n * Append to via header.\n * @param headers supplies the headers to append to.\n * @param via supplies the via header to append.\n */\nvoid appendVia(RequestOrResponseHeaderMap& headers, const std::string& via);\n\n/**\n * Creates an SSL (https) redirect path based on the input host and path headers.\n * @param headers supplies the request headers.\n * @return std::string the redirect path.\n */\nstd::string createSslRedirectPath(const RequestHeaderMap& headers);\n\n/**\n * Parse a URL into query parameters.\n * @param url supplies the url to parse.\n * @return QueryParams the parsed parameters, if any.\n */\nQueryParams parseQueryString(absl::string_view url);\n\n/**\n * Parse a URL into query parameters.\n * @param url supplies the url to parse.\n * @return QueryParams the parsed and percent-decoded parameters, if any.\n */\nQueryParams parseAndDecodeQueryString(absl::string_view url);\n\n/**\n * Parse a a request body into query parameters.\n * @param body supplies the body to parse.\n * @return QueryParams the parsed parameters, if any.\n */\nQueryParams parseFromBody(absl::string_view body);\n\n/**\n * Parse query parameters from a URL or body.\n * @param data supplies the data to parse.\n * @param start supplies the offset within the data.\n * @param decode_params supplies the flag whether to percent-decode the parsed parameters (both name\n *        and value). Set to false to keep the parameters encoded.\n * @return QueryParams the parsed parameters, if any.\n */\nQueryParams parseParameters(absl::string_view data, size_t start, bool decode_params);\n\n/**\n * Finds the start of the query string in a path\n * @param path supplies a HeaderString& to search for the query string\n * @return absl::string_view starting at the beginning of the query string,\n *         or a string_view starting at the end of the path if there was\n *         no query string.\n */\nabsl::string_view findQueryStringStart(const HeaderString& path);\n\n/**\n * Parse a particular value out of a cookie\n * @param headers supplies the headers to get the cookie from.\n * @param key the key for the particular cookie value to return\n * @return std::string the parsed cookie value, or \"\" if none exists\n **/\nstd::string parseCookieValue(const HeaderMap& headers, const std::string& key);\n\n/**\n * Produce the value for a Set-Cookie header with the given parameters.\n * @param key is the name of the cookie that is being set.\n * @param value the value to set the cookie to; this value is trusted.\n * @param path the path for the cookie, or the empty string to not set a path.\n * @param max_age the length of time for which the cookie is valid, or zero\n * @param httponly true if the cookie should have HttpOnly appended.\n * to create a session cookie.\n * @return std::string a valid Set-Cookie header value string\n */\nstd::string makeSetCookieValue(const std::string& key, const std::string& value,\n                               const std::string& path, const std::chrono::seconds max_age,\n                               bool httponly);\n\n/**\n * Get the response status from the response headers.\n * @param headers supplies the headers to get the status from.\n * @return uint64_t the response code or throws an exception if the headers are invalid.\n */\nuint64_t getResponseStatus(const ResponseHeaderMap& headers);\n\n/**\n * Determine whether these headers are a valid Upgrade request or response.\n * This function returns true if the following HTTP headers and values are present:\n * - Connection: Upgrade\n * - Upgrade: [any value]\n */\nbool isUpgrade(const RequestOrResponseHeaderMap& headers);\n\n/**\n * @return true if this is a CONNECT request with a :protocol header present, false otherwise.\n */\nbool isH2UpgradeRequest(const RequestHeaderMap& headers);\n\n/**\n * Determine whether this is a WebSocket Upgrade request.\n * This function returns true if the following HTTP headers and values are present:\n * - Connection: Upgrade\n * - Upgrade: websocket\n */\nbool isWebSocketUpgradeRequest(const RequestHeaderMap& headers);\n\n/**\n * @return Http1Settings An Http1Settings populated from the\n * envoy::config::core::v3::Http1ProtocolOptions config.\n */\nHttp1Settings parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& config);\n\nHttp1Settings parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& config,\n                                 const Protobuf::BoolValue& hcm_stream_error);\n\nstruct EncodeFunctions {\n  // Function to modify locally generated response headers.\n  std::function<void(ResponseHeaderMap& headers)> modify_headers_;\n  // Function to rewrite locally generated response.\n  std::function<void(ResponseHeaderMap& response_headers, Code& code, std::string& body,\n                     absl::string_view& content_type)>\n      rewrite_;\n  // Function to encode response headers.\n  std::function<void(ResponseHeaderMapPtr&& headers, bool end_stream)> encode_headers_;\n  // Function to encode the response body.\n  std::function<void(Buffer::Instance& data, bool end_stream)> encode_data_;\n};\n\nstruct LocalReplyData {\n  // Tells if this is a response to a gRPC request.\n  bool is_grpc_;\n  // Supplies the HTTP response code.\n  Code response_code_;\n  // Supplies the optional body text which is returned.\n  absl::string_view body_text_;\n  // gRPC status code to override the httpToGrpcStatus mapping with.\n  const absl::optional<Grpc::Status::GrpcStatus> grpc_status_;\n  // Tells if this is a response to a HEAD request.\n  bool is_head_request_ = false;\n};\n\n/**\n * Create a locally generated response using filter callbacks.\n * @param is_reset boolean reference that indicates whether a stream has been reset. It is the\n *        responsibility of the caller to ensure that this is set to false if onDestroy()\n *        is invoked in the context of sendLocalReply().\n * @param callbacks supplies the filter callbacks to use.\n * @param local_reply_data struct which keeps data related to generate reply.\n */\nvoid sendLocalReply(const bool& is_reset, StreamDecoderFilterCallbacks& callbacks,\n                    const LocalReplyData& local_reply_data);\n\n/**\n * Create a locally generated response using the provided lambdas.\n\n * @param is_reset boolean reference that indicates whether a stream has been reset. It is the\n *                 responsibility of the caller to ensure that this is set to false if onDestroy()\n *                 is invoked in the context of sendLocalReply().\n * @param encode_functions supplies the functions to encode response body and headers.\n * @param local_reply_data struct which keeps data related to generate reply.\n */\nvoid sendLocalReply(const bool& is_reset, const EncodeFunctions& encode_functions,\n                    const LocalReplyData& local_reply_data);\n\nstruct GetLastAddressFromXffInfo {\n  // Last valid address pulled from the XFF header.\n  Network::Address::InstanceConstSharedPtr address_;\n  // Whether this is the only address in the XFF header.\n  bool single_address_;\n};\n\n/**\n * Retrieves the last IPv4/IPv6 address in the x-forwarded-for header.\n * @param request_headers supplies the request headers.\n * @param num_to_skip specifies the number of addresses at the end of the XFF header\n *        to ignore when identifying the \"last\" address.\n * @return GetLastAddressFromXffInfo information about the last address in the XFF header.\n *         @see GetLastAddressFromXffInfo for more information.\n */\nGetLastAddressFromXffInfo getLastAddressFromXFF(const Http::RequestHeaderMap& request_headers,\n                                                uint32_t num_to_skip = 0);\n\n/**\n * Remove any headers nominated by the Connection header\n * Sanitize the TE header if it contains unsupported values\n *\n * @param headers the client request headers\n * @return whether the headers were sanitized successfully\n */\nbool sanitizeConnectionHeader(Http::RequestHeaderMap& headers);\n\n/**\n * Get the string for the given http protocol.\n * @param protocol for which to return the string representation.\n * @return string representation of the protocol.\n */\nconst std::string& getProtocolString(const Protocol p);\n\n/**\n * Extract host and path from a URI. The host may contain port.\n * This function doesn't validate if the URI is valid. It only parses the URI with following\n * format: scheme://host/path.\n * @param the input URI string\n * @param the output host string.\n * @param the output path string.\n */\nvoid extractHostPathFromUri(const absl::string_view& uri, absl::string_view& host,\n                            absl::string_view& path);\n\n/**\n * Takes a the path component from a file:/// URI and returns a local path for file access.\n * @param file_path if we have file:///foo/bar, the file_path is foo/bar. For file:///c:/foo/bar\n *                  it is c:/foo/bar. This is not prefixed with /.\n * @return std::string with absolute path for local access, e.g. /foo/bar, c:/foo/bar.\n */\nstd::string localPathFromFilePath(const absl::string_view& file_path);\n\n/**\n * Prepare headers for a HttpUri.\n */\nRequestMessagePtr prepareHeaders(const envoy::config::core::v3::HttpUri& http_uri);\n\n/**\n * Serialize query-params into a string.\n */\nstd::string queryParamsToString(const QueryParams& query_params);\n\n/**\n * Returns string representation of StreamResetReason.\n */\nconst std::string resetReasonToString(const Http::StreamResetReason reset_reason);\n\n/**\n * Transforms the supplied headers from an HTTP/1 Upgrade request to an H2 style upgrade.\n * Changes the method to connection, moves the Upgrade to a :protocol header,\n * @param headers the headers to convert.\n */\nvoid transformUpgradeRequestFromH1toH2(RequestHeaderMap& headers);\n\n/**\n * Transforms the supplied headers from an HTTP/1 Upgrade response to an H2 style upgrade response.\n * Changes the 101 upgrade response to a 200 for the CONNECT response.\n * @param headers the headers to convert.\n */\nvoid transformUpgradeResponseFromH1toH2(ResponseHeaderMap& headers);\n\n/**\n * Transforms the supplied headers from an H2 \"CONNECT\"-with-:protocol-header to an HTTP/1 style\n * Upgrade response.\n * @param headers the headers to convert.\n */\nvoid transformUpgradeRequestFromH2toH1(RequestHeaderMap& headers);\n\n/**\n * Transforms the supplied headers from an H2 \"CONNECT success\" to an HTTP/1 style Upgrade response.\n * The caller is responsible for ensuring this only happens on upgraded streams.\n * @param headers the headers to convert.\n */\nvoid transformUpgradeResponseFromH2toH1(ResponseHeaderMap& headers, absl::string_view upgrade);\n\n/**\n * The non template implementation of resolveMostSpecificPerFilterConfig. see\n * resolveMostSpecificPerFilterConfig for docs.\n */\nconst Router::RouteSpecificFilterConfig*\nresolveMostSpecificPerFilterConfigGeneric(const std::string& filter_name,\n                                          const Router::RouteConstSharedPtr& route);\n\n/**\n * Retrieves the route specific config. Route specific config can be in a few\n * places, that are checked in order. The first config found is returned. The\n * order is:\n * - the routeEntry() (for config that's applied on weighted clusters)\n * - the route\n * - and finally from the virtual host object (routeEntry()->virtualhost()).\n *\n * To use, simply:\n *\n *     const auto* config =\n *         Utility::resolveMostSpecificPerFilterConfig<ConcreteType>(FILTER_NAME,\n * stream_callbacks_.route());\n *\n * See notes about config's lifetime below.\n *\n * @param filter_name The name of the filter who's route config should be\n * fetched.\n * @param route The route to check for route configs. nullptr routes will\n * result in nullptr being returned.\n *\n * @return The route config if found. nullptr if not found. The returned\n * pointer's lifetime is the same as the route parameter.\n */\ntemplate <class ConfigType>\nconst ConfigType* resolveMostSpecificPerFilterConfig(const std::string& filter_name,\n                                                     const Router::RouteConstSharedPtr& route) {\n  static_assert(std::is_base_of<Router::RouteSpecificFilterConfig, ConfigType>::value,\n                \"ConfigType must be a subclass of Router::RouteSpecificFilterConfig\");\n  const Router::RouteSpecificFilterConfig* generic_config =\n      resolveMostSpecificPerFilterConfigGeneric(filter_name, route);\n  return dynamic_cast<const ConfigType*>(generic_config);\n}\n\n/**\n * The non template implementation of traversePerFilterConfig. see\n * traversePerFilterConfig for docs.\n */\nvoid traversePerFilterConfigGeneric(\n    const std::string& filter_name, const Router::RouteConstSharedPtr& route,\n    std::function<void(const Router::RouteSpecificFilterConfig&)> cb);\n\n/**\n * Fold all the available per route filter configs, invoking the callback with each config (if\n * it is present). Iteration of the configs is in order of specificity. That means that the callback\n * will be called first for a config on a Virtual host, then a route, and finally a route entry\n * (weighted cluster). If a config is not present, the callback will not be invoked.\n */\ntemplate <class ConfigType>\nvoid traversePerFilterConfig(const std::string& filter_name,\n                             const Router::RouteConstSharedPtr& route,\n                             std::function<void(const ConfigType&)> cb) {\n  static_assert(std::is_base_of<Router::RouteSpecificFilterConfig, ConfigType>::value,\n                \"ConfigType must be a subclass of Router::RouteSpecificFilterConfig\");\n\n  traversePerFilterConfigGeneric(\n      filter_name, route, [&cb](const Router::RouteSpecificFilterConfig& cfg) {\n        const ConfigType* typed_cfg = dynamic_cast<const ConfigType*>(&cfg);\n        if (typed_cfg != nullptr) {\n          cb(*typed_cfg);\n        }\n      });\n}\n\n/**\n * Merge all the available per route filter configs into one. To perform the merge,\n * the reduce function will be called on each two configs until a single merged config is left.\n *\n * @param reduce The first argument for this function will be the config from the previous level\n * and the second argument is the config from the current level (the more specific one). The\n * function should merge the second argument into the first argument.\n *\n * @return The merged config.\n */\ntemplate <class ConfigType>\nabsl::optional<ConfigType>\ngetMergedPerFilterConfig(const std::string& filter_name, const Router::RouteConstSharedPtr& route,\n                         std::function<void(ConfigType&, const ConfigType&)> reduce) {\n  static_assert(std::is_copy_constructible<ConfigType>::value,\n                \"ConfigType must be copy constructible\");\n\n  absl::optional<ConfigType> merged;\n\n  traversePerFilterConfig<ConfigType>(filter_name, route,\n                                      [&reduce, &merged](const ConfigType& cfg) {\n                                        if (!merged) {\n                                          merged.emplace(cfg);\n                                        } else {\n                                          reduce(merged.value(), cfg);\n                                        }\n                                      });\n\n  return merged;\n}\n\nstruct AuthorityAttributes {\n  // whether parsed authority is pure ip address(IPv4/IPv6), if it is true\n  // passed that are not FQDN\n  bool is_ip_address_{};\n\n  // If parsed authority has host, that is stored here.\n  absl::string_view host_;\n\n  // If parsed authority has port, that is stored here.\n  absl::optional<uint16_t> port_;\n};\n\n/**\n * Parse passed authority, and get that is valid FQDN or IPv4/IPv6 address, hostname and port-name.\n * @param host host/authority\n * @param default_port If passed authority does not have port, this value is returned\n * @return hostname parse result. that includes whether host is IP Address, hostname and port-name\n */\nAuthorityAttributes parseAuthority(absl::string_view host);\n} // namespace Utility\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/init/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"watcher_lib\",\n    srcs = [\"watcher_impl.cc\"],\n    hdrs = [\"watcher_impl.h\"],\n    deps = [\n        \"//include/envoy/init:watcher_interface\",\n        \"//source/common/common:logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"target_lib\",\n    srcs = [\"target_impl.cc\"],\n    hdrs = [\"target_impl.h\"],\n    deps = [\n        \"//include/envoy/init:target_interface\",\n        \"//source/common/common:logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"manager_lib\",\n    srcs = [\"manager_impl.cc\"],\n    hdrs = [\"manager_impl.h\"],\n    deps = [\n        \":watcher_lib\",\n        \"//include/envoy/init:manager_interface\",\n        \"//source/common/common:logger_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/init/manager_impl.cc",
    "content": "#include \"common/init/manager_impl.h\"\n\n#include <functional>\n\n#include \"common/common/assert.h\"\n#include \"common/init/watcher_impl.h\"\n\nnamespace Envoy {\nnamespace Init {\n\nManagerImpl::ManagerImpl(absl::string_view name)\n    : name_(fmt::format(\"init manager {}\", name)), state_(State::Uninitialized), count_(0),\n      watcher_(name_, [this](absl::string_view target_name) { onTargetReady(target_name); }) {}\n\nManager::State ManagerImpl::state() const { return state_; }\n\nvoid ManagerImpl::add(const Target& target) {\n  ++count_;\n  TargetHandlePtr target_handle(target.createHandle(name_));\n  ++target_names_count_[target.name()];\n  switch (state_) {\n  case State::Uninitialized:\n    // If the manager isn't initialized yet, save the target handle to be initialized later.\n    ENVOY_LOG(debug, \"added {} to {}\", target.name(), name_);\n    target_handles_.push_back(std::move(target_handle));\n    return;\n  case State::Initializing:\n    // If the manager is already initializing, initialize the new target immediately. Note that\n    // it's important in this case that count_ was incremented above before calling the target,\n    // because if the target calls the init manager back immediately, count_ will be decremented\n    // here (see the definition of watcher_ above).\n    target_handle->initialize(watcher_);\n    return;\n  case State::Initialized:\n    // If the manager has already completed initialization, consider this a programming error.\n    ASSERT(false, fmt::format(\"attempted to add {} to initialized {}\", target.name(), name_));\n  }\n}\n\nvoid ManagerImpl::initialize(const Watcher& watcher) {\n  // If the manager is already initializing or initialized, consider this a programming error.\n  ASSERT(state_ == State::Uninitialized, fmt::format(\"attempted to initialize {} twice\", name_));\n\n  // Create a handle to notify when initialization is complete.\n  watcher_handle_ = watcher.createHandle(name_);\n\n  if (count_ == 0) {\n    // If we have no targets, initialization trivially completes. This can happen, and is fine.\n    ENVOY_LOG(debug, \"{} contains no targets\", name_);\n    ready();\n  } else {\n    // If we have some targets, start initialization...\n    ENVOY_LOG(debug, \"{} initializing\", name_);\n    state_ = State::Initializing;\n\n    // Attempt to initialize each target. If a target is unavailable, treat it as though it\n    // completed immediately.\n    for (const auto& target_handle : target_handles_) {\n      if (!target_handle->initialize(watcher_)) {\n        onTargetReady(target_handle->name());\n      }\n    }\n  }\n}\n\nconst absl::flat_hash_map<std::string, uint32_t>& ManagerImpl::unreadyTargets() const {\n  return target_names_count_;\n}\n\nvoid ManagerImpl::dumpUnreadyTargets(envoy::admin::v3::UnreadyTargetsDumps& unready_targets_dumps) {\n  auto& message = *unready_targets_dumps.mutable_unready_targets_dumps()->Add();\n  message.set_name(name_);\n  for (const auto& [target_name, count] : target_names_count_) {\n    message.add_target_names(target_name);\n  }\n}\n\nvoid ManagerImpl::onTargetReady(absl::string_view target_name) {\n  // If there are no remaining targets and one mysteriously calls us back, this manager is haunted.\n  ASSERT(count_ != 0,\n         fmt::format(\"{} called back by target after initialization complete\", target_name));\n\n  // Decrease target_name count by 1.\n  ASSERT(target_names_count_.find(target_name) != target_names_count_.end());\n  if (--target_names_count_[target_name] == 0) {\n    target_names_count_.erase(target_name);\n  }\n\n  // If there are no uninitialized targets remaining when called back by a target, that means it was\n  // the last. Signal `ready` to the handle we saved in `initialize`.\n  if (--count_ == 0) {\n    ready();\n  }\n}\n\nvoid ManagerImpl::ready() {\n  state_ = State::Initialized;\n  watcher_handle_->ready();\n}\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/init/manager_impl.h",
    "content": "#pragma once\n\n#include <list>\n\n#include \"envoy/init/manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/init/watcher_impl.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Init {\n\n/**\n * Init::ManagerImpl coordinates initialization of one or more \"targets.\" See comments in\n * include/envoy/init/manager.h for an overview.\n *\n * When the logging level is set to \"debug\" or \"trace,\" the log will contain entries for all\n * significant events in the initialization flow:\n *\n *   - Targets added to the manager\n *   - Initialization started for the manager and for each target\n *   - Initialization completed for each target and for the manager\n *   - Destruction of targets and watchers\n *   - Callbacks to \"unavailable\" (deleted) targets, manager, or watchers\n */\nclass ManagerImpl : public Manager, Logger::Loggable<Logger::Id::init> {\npublic:\n  /**\n   * @param name a human-readable manager name, for logging / debugging.\n   */\n  ManagerImpl(absl::string_view name);\n\n  // Init::Manager\n  State state() const override;\n  void add(const Target& target) override;\n  void initialize(const Watcher& watcher) override;\n  const absl::flat_hash_map<std::string, uint32_t>& unreadyTargets() const override;\n  void dumpUnreadyTargets(envoy::admin::v3::UnreadyTargetsDumps& dumps) override;\n\nprivate:\n  // Callback function with an additional target_name parameter, decrease unready targets count by\n  // 1, update target_names_count_ hash map.\n  void onTargetReady(absl::string_view target_name);\n\n  void ready();\n\n  // Human-readable name for logging.\n  const std::string name_;\n\n  // Current state.\n  State state_;\n\n  // Current number of registered targets that have not yet initialized.\n  uint32_t count_;\n\n  // Handle to the watcher passed in `initialize`, to be called when initialization completes.\n  WatcherHandlePtr watcher_handle_;\n\n  // Watcher to receive ready notifications from each target. We restrict the watcher_ inside\n  // ManagerImpl to be constructed with the 'TargetAwareReadyFn' fn so that the init manager will\n  // get target name information when the watcher_ calls 'onTargetSendName(target_name)' For any\n  // other purpose, a watcher can be constructed with either TargetAwareReadyFn or ReadyFn.\n  const WatcherImpl watcher_;\n\n  // All registered targets.\n  std::list<TargetHandlePtr> target_handles_;\n\n  // Count of target_name of unready targets.\n  absl::flat_hash_map<std::string, uint32_t> target_names_count_;\n};\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/init/target_impl.cc",
    "content": "#include \"common/init/target_impl.h\"\n\nnamespace Envoy {\nnamespace Init {\n\nTargetHandleImpl::TargetHandleImpl(absl::string_view handle_name, absl::string_view name,\n                                   std::weak_ptr<InternalInitalizeFn> fn)\n    : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {}\n\nbool TargetHandleImpl::initialize(const Watcher& watcher) const {\n  auto locked_fn(fn_.lock());\n  if (locked_fn) {\n    // If we can \"lock\" a shared pointer to the target's callback function, call it\n    // with a new handle to the ManagerImpl's watcher that was passed in.\n    ENVOY_LOG(debug, \"{} initializing {}\", handle_name_, name_);\n    (*locked_fn)(watcher.createHandle(name_));\n    return true;\n  } else {\n    // If not, the target was already destroyed.\n    ENVOY_LOG(debug, \"{} can't initialize {} (unavailable)\", handle_name_, name_);\n    return false;\n  }\n}\n\nabsl::string_view TargetHandleImpl::name() const { return name_; }\n\nTargetImpl::TargetImpl(absl::string_view name, InitializeFn fn)\n    : name_(fmt::format(\"target {}\", name)),\n      fn_(std::make_shared<InternalInitalizeFn>([this, fn](WatcherHandlePtr watcher_handle) {\n        watcher_handle_ = std::move(watcher_handle);\n        fn();\n      })) {}\n\nTargetImpl::~TargetImpl() { ENVOY_LOG(debug, \"{} destroyed\", name_); }\n\nabsl::string_view TargetImpl::name() const { return name_; }\n\nTargetHandlePtr TargetImpl::createHandle(absl::string_view handle_name) const {\n  // Note: can't use std::make_unique here because TargetHandleImpl ctor is private.\n  return TargetHandlePtr(\n      new TargetHandleImpl(handle_name, name_, std::weak_ptr<InternalInitalizeFn>(fn_)));\n}\n\nbool TargetImpl::ready() {\n  if (watcher_handle_) {\n    // If we have a handle for the ManagerImpl's watcher, signal it and then reset so it can't be\n    // accidentally signaled again.\n    const bool result = watcher_handle_->ready();\n    watcher_handle_.reset();\n    return result;\n  }\n  return false;\n}\n\nSharedTargetImpl::SharedTargetImpl(absl::string_view name, InitializeFn fn)\n    : name_(fmt::format(\"shared target {}\", name)),\n      fn_(std::make_shared<InternalInitalizeFn>([this, fn](WatcherHandlePtr watcher_handle) {\n        if (initialized_) {\n          watcher_handle->ready();\n        } else {\n          watcher_handles_.push_back(std::move(watcher_handle));\n          std::call_once(once_flag_, fn);\n        }\n      })) {}\n\nSharedTargetImpl::~SharedTargetImpl() { ENVOY_LOG(debug, \"{} destroyed\", name_); }\n\nabsl::string_view SharedTargetImpl::name() const { return name_; }\n\nTargetHandlePtr SharedTargetImpl::createHandle(absl::string_view handle_name) const {\n  // Note: can't use std::make_unique here because TargetHandleImpl ctor is private.\n  return TargetHandlePtr(\n      new TargetHandleImpl(handle_name, name_, std::weak_ptr<InternalInitalizeFn>(fn_)));\n}\n\nbool SharedTargetImpl::ready() {\n  initialized_ = true;\n  bool all_notified = !watcher_handles_.empty();\n  for (auto& watcher_handle : watcher_handles_) {\n    all_notified = watcher_handle->ready() && all_notified;\n  }\n  // save heap and avoid repeatedly invoke\n  watcher_handles_.clear();\n  return all_notified;\n}\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/init/target_impl.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/init/target.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Init {\n\n/**\n * A target is just a glorified callback function, called by the manager it was registered with.\n */\nusing InitializeFn = std::function<void()>;\n\n/**\n * Internally, the callback is slightly more sophisticated: it actually takes a WatcherHandlePtr\n * that it uses to notify the manager when the target is ready. It saves this pointer when invoked\n * and resets it later in `ready`. Users needn't care about this implementation detail, they only\n * need to provide an `InitializeFn` above when constructing a target.\n */\nusing InternalInitalizeFn = std::function<void(WatcherHandlePtr)>;\n\n/**\n * A TargetHandleImpl functions as a weak reference to a TargetImpl. It is how a ManagerImpl safely\n * tells a target to `initialize` with no guarantees about the target's lifetime.\n */\nclass TargetHandleImpl : public TargetHandle, Logger::Loggable<Logger::Id::init> {\nprivate:\n  friend class TargetImpl;\n  friend class SharedTargetImpl;\n\n  TargetHandleImpl(absl::string_view handle_name, absl::string_view name,\n                   std::weak_ptr<InternalInitalizeFn> fn);\n\npublic:\n  // Init::TargetHandle\n  bool initialize(const Watcher& watcher) const override;\n\n  absl::string_view name() const override;\n\nprivate:\n  // Name of the handle (almost always the name of the ManagerImpl calling the target)\n  const std::string handle_name_;\n\n  // Name of the target\n  const std::string name_;\n\n  // The target's callback function, only called if the weak pointer can be \"locked\"\n  const std::weak_ptr<InternalInitalizeFn> fn_;\n};\n\n/**\n * A TargetImpl is an entity that can be registered with a Manager for initialization. It can only\n * be invoked through a TargetHandle.\n */\nclass TargetImpl : public Target, Logger::Loggable<Logger::Id::init> {\npublic:\n  /**\n   * @param name a human-readable target name, for logging / debugging\n   * @fn a callback function to invoke when `initialize` is called on the handle. Note that this\n   *     doesn't take a WatcherHandlePtr (like TargetFn does). Managing the watcher handle is done\n   *     internally to simplify usage.\n   */\n  TargetImpl(absl::string_view name, InitializeFn fn);\n  ~TargetImpl() override;\n\n  // Init::Target\n  absl::string_view name() const override;\n  TargetHandlePtr createHandle(absl::string_view handle_name) const override;\n\n  /**\n   * Signal to the init manager that this target has finished initializing. This is safe to call\n   * any time. Calling it before initialization begins or after initialization has already ended\n   * will have no effect.\n   * @return true if the init manager received this call, false otherwise.\n   */\n  bool ready();\n\nprivate:\n  // Human-readable name for logging\n  const std::string name_;\n\n  // Handle to the ManagerImpl's internal watcher, to call when this target is initialized\n  WatcherHandlePtr watcher_handle_;\n\n  // The callback function, called via TargetHandleImpl by the manager\n  const std::shared_ptr<InternalInitalizeFn> fn_;\n};\n\n/**\n * A specialized Target which can be added by multiple Managers.\n * The initialization will be triggered only once.\n */\nclass SharedTargetImpl : public Target, Logger::Loggable<Logger::Id::init> {\npublic:\n  /**\n   * @param name a human-readable target name, for logging / debugging\n   * @fn a callback function to invoke when `initialize` is called on the handle. Note that this\n   *     doesn't take a WatcherHandlePtr (like TargetFn does). Managing the watcher handle is done\n   *     internally to simplify usage.\n   */\n  SharedTargetImpl(absl::string_view name, InitializeFn fn);\n  ~SharedTargetImpl() override;\n\n  // Init::Target\n  absl::string_view name() const override;\n  TargetHandlePtr createHandle(absl::string_view handle_name) const override;\n\n  /**\n   * Signal to the init manager(s) that this target has finished initializing. This is safe to call\n   * any time. Calling it before initialization begins or after initialization has already ended\n   * will have no effect.\n   * @return true if all init managers received this call, false otherwise.\n   */\n  bool ready();\n\nprivate:\n  // Human-readable name for logging\n  const std::string name_;\n\n  // Handle to all the ManagerImpl's internal watcher, to call when this target is initialized.\n  std::vector<WatcherHandlePtr> watcher_handles_;\n\n  // The callback function, called via TargetHandleImpl by the manager\n  const std::shared_ptr<InternalInitalizeFn> fn_;\n\n  // The state so as to signal the manager when a ready target is added.\n  bool initialized_{false};\n\n  // To guarantee the initialization function is called once.\n  std::once_flag once_flag_;\n};\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/init/watcher_impl.cc",
    "content": "#include \"common/init/watcher_impl.h\"\n\nnamespace Envoy {\nnamespace Init {\n\nWatcherHandleImpl::WatcherHandleImpl(absl::string_view handle_name, absl::string_view name,\n                                     std::weak_ptr<TargetAwareReadyFn> fn)\n    : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {}\n\nbool WatcherHandleImpl::ready() const {\n  auto locked_fn(fn_.lock());\n  if (locked_fn) {\n    // If we can \"lock\" a shared pointer to the watcher's callback function, call it.\n    ENVOY_LOG(debug, \"{} initialized, notifying {}\", handle_name_, name_);\n    (*locked_fn)(handle_name_);\n    return true;\n  } else {\n    // If not, the watcher was already destroyed.\n    ENVOY_LOG(debug, \"{} initialized, but can't notify {}\", handle_name_, name_);\n    return false;\n  }\n}\n\nWatcherImpl::WatcherImpl(absl::string_view name, ReadyFn fn)\n    : name_(name), fn_(std::make_shared<TargetAwareReadyFn>(\n                       [callback = std::move(fn)](absl::string_view) { callback(); })) {}\n\nWatcherImpl::WatcherImpl(absl::string_view name, TargetAwareReadyFn fn)\n    : name_(name), fn_(std::make_shared<TargetAwareReadyFn>(std::move(fn))) {}\n\nWatcherImpl::~WatcherImpl() { ENVOY_LOG(debug, \"{} destroyed\", name_); }\n\nabsl::string_view WatcherImpl::name() const { return name_; }\n\nWatcherHandlePtr WatcherImpl::createHandle(absl::string_view handle_name) const {\n  // Note: can't use std::make_unique because WatcherHandleImpl ctor is private.\n  return std::unique_ptr<WatcherHandle>(\n      new WatcherHandleImpl(handle_name, name_, std::weak_ptr<TargetAwareReadyFn>(fn_)));\n}\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/init/watcher_impl.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/init/watcher.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Init {\n\n/**\n * A watcher is just a glorified callback function, called by a target or a manager when\n * initialization completes.\n */\nusing ReadyFn = std::function<void()>;\nusing TargetAwareReadyFn = std::function<void(absl::string_view)>;\n\n/**\n * A WatcherHandleImpl functions as a weak reference to a Watcher. It is how a TargetImpl safely\n * notifies a ManagerImpl that it has initialized, and likewise it's how ManagerImpl safely tells\n * its client that all registered targets have initialized, with no guarantees about the lifetimes\n * of the manager or client.\n */\nclass WatcherHandleImpl : public WatcherHandle, Logger::Loggable<Logger::Id::init> {\nprivate:\n  friend class WatcherImpl;\n  WatcherHandleImpl(absl::string_view handle_name, absl::string_view name,\n                    std::weak_ptr<TargetAwareReadyFn> fn);\n\npublic:\n  // Init::WatcherHandle.\n  bool ready() const override;\n\nprivate:\n  // Name of the handle (either the name of the target calling the manager, or the name of the\n  // manager calling the client).\n  const std::string handle_name_;\n\n  // Name of the watcher (either the name of the manager, or the name of the client).\n  const std::string name_;\n\n  // The watcher's callback function, only called if the weak pointer can be \"locked\".\n  const std::weak_ptr<TargetAwareReadyFn> fn_;\n};\n\n/**\n * A WatcherImpl is an entity that listens for notifications that either an initialization target or\n * all targets registered with a manager have initialized. It can only be invoked through a\n * WatcherHandleImpl.\n */\nclass WatcherImpl : public Watcher, Logger::Loggable<Logger::Id::init> {\npublic:\n  /**\n   * @param name a human-readable watcher name, for logging / debugging.\n   * @param fn a callback function to invoke when `ready` is called on the handle.\n   */\n  WatcherImpl(absl::string_view name, ReadyFn fn);\n  WatcherImpl(absl::string_view name, TargetAwareReadyFn fn);\n  ~WatcherImpl() override;\n\n  // Init::Watcher.\n  absl::string_view name() const override;\n  WatcherHandlePtr createHandle(absl::string_view handle_name) const override;\n\nprivate:\n  // Human-readable name for logging.\n  const std::string name_;\n\n  // The callback function, called via WatcherHandleImpl by either the target or the manager.\n  const std::shared_ptr<TargetAwareReadyFn> fn_;\n};\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/json/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"json_loader_lib\",\n    srcs = [\"json_loader.cc\"],\n    hdrs = [\"json_loader.h\"],\n    external_deps = [\n        \"rapidjson\",\n    ],\n    deps = [\n        \"//include/envoy/json:json_object_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/json/json_loader.cc",
    "content": "#include \"common/json/json_loader.h\"\n\n#include <cstdint>\n#include <fstream>\n#include <limits>\n#include <map>\n#include <sstream>\n#include <stack>\n#include <string>\n#include <vector>\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n// Do not let RapidJson leak outside of this file.\n#include \"rapidjson/document.h\"\n#include \"rapidjson/error/en.h\"\n#include \"rapidjson/reader.h\"\n#include \"rapidjson/schema.h\"\n#include \"rapidjson/stream.h\"\n#include \"rapidjson/stringbuffer.h\"\n#include \"rapidjson/writer.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Json {\n\nnamespace {\n/**\n * Internal representation of Object.\n */\nclass Field;\nusing FieldSharedPtr = std::shared_ptr<Field>;\n\nclass Field : public Object {\npublic:\n  void setLineNumberStart(uint64_t line_number) { line_number_start_ = line_number; }\n  void setLineNumberEnd(uint64_t line_number) { line_number_end_ = line_number; }\n\n  // Container factories for handler.\n  static FieldSharedPtr createObject() { return FieldSharedPtr{new Field(Type::Object)}; }\n  static FieldSharedPtr createArray() { return FieldSharedPtr{new Field(Type::Array)}; }\n  static FieldSharedPtr createNull() { return FieldSharedPtr{new Field(Type::Null)}; }\n\n  bool isNull() const override { return type_ == Type::Null; }\n  bool isArray() const override { return type_ == Type::Array; }\n  bool isObject() const override { return type_ == Type::Object; }\n\n  // Value factory.\n  template <typename T> static FieldSharedPtr createValue(T value) {\n    return FieldSharedPtr{new Field(value)}; // NOLINT(modernize-make-shared)\n  }\n\n  void append(FieldSharedPtr field_ptr) {\n    checkType(Type::Array);\n    value_.array_value_.push_back(field_ptr);\n  }\n  void insert(const std::string& key, FieldSharedPtr field_ptr) {\n    checkType(Type::Object);\n    value_.object_value_[key] = field_ptr;\n  }\n\n  uint64_t hash() const override;\n\n  bool getBoolean(const std::string& name) const override;\n  bool getBoolean(const std::string& name, bool default_value) const override;\n  double getDouble(const std::string& name) const override;\n  double getDouble(const std::string& name, double default_value) const override;\n  int64_t getInteger(const std::string& name) const override;\n  int64_t getInteger(const std::string& name, int64_t default_value) const override;\n  ObjectSharedPtr getObject(const std::string& name, bool allow_empty) const override;\n  std::vector<ObjectSharedPtr> getObjectArray(const std::string& name,\n                                              bool allow_empty) const override;\n  std::string getString(const std::string& name) const override;\n  std::string getString(const std::string& name, const std::string& default_value) const override;\n  std::vector<std::string> getStringArray(const std::string& name, bool allow_empty) const override;\n  std::vector<ObjectSharedPtr> asObjectArray() const override;\n  std::string asString() const override { return stringValue(); }\n  bool asBoolean() const override { return booleanValue(); }\n  double asDouble() const override { return doubleValue(); }\n  int64_t asInteger() const override { return integerValue(); }\n  std::string asJsonString() const override;\n\n  bool empty() const override;\n  bool hasObject(const std::string& name) const override;\n  void iterate(const ObjectCallback& callback) const override;\n  void validateSchema(const std::string& schema) const override;\n\nprivate:\n  enum class Type {\n    Array,\n    Boolean,\n    Double,\n    Integer,\n    Null,\n    Object,\n    String,\n  };\n  static const char* typeAsString(Type t) {\n    switch (t) {\n    case Type::Array:\n      return \"Array\";\n    case Type::Boolean:\n      return \"Boolean\";\n    case Type::Double:\n      return \"Double\";\n    case Type::Integer:\n      return \"Integer\";\n    case Type::Null:\n      return \"Null\";\n    case Type::Object:\n      return \"Object\";\n    case Type::String:\n      return \"String\";\n    }\n\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  struct Value {\n    std::vector<FieldSharedPtr> array_value_;\n    bool boolean_value_;\n    double double_value_;\n    int64_t integer_value_;\n    std::map<std::string, FieldSharedPtr> object_value_;\n    std::string string_value_;\n  };\n\n  explicit Field(Type type) : type_(type) {}\n  explicit Field(const std::string& value) : type_(Type::String) { value_.string_value_ = value; }\n  explicit Field(int64_t value) : type_(Type::Integer) { value_.integer_value_ = value; }\n  explicit Field(double value) : type_(Type::Double) { value_.double_value_ = value; }\n  explicit Field(bool value) : type_(Type::Boolean) { value_.boolean_value_ = value; }\n\n  bool isType(Type type) const { return type == type_; }\n  void checkType(Type type) const {\n    if (!isType(type)) {\n      throw Exception(fmt::format(\n          \"JSON field from line {} accessed with type '{}' does not match actual type '{}'.\",\n          line_number_start_, typeAsString(type), typeAsString(type_)));\n    }\n  }\n\n  // Value return type functions.\n  std::string stringValue() const {\n    checkType(Type::String);\n    return value_.string_value_;\n  }\n  std::vector<FieldSharedPtr> arrayValue() const {\n    checkType(Type::Array);\n    return value_.array_value_;\n  }\n  bool booleanValue() const {\n    checkType(Type::Boolean);\n    return value_.boolean_value_;\n  }\n  double doubleValue() const {\n    checkType(Type::Double);\n    return value_.double_value_;\n  }\n  int64_t integerValue() const {\n    checkType(Type::Integer);\n    return value_.integer_value_;\n  }\n\n  rapidjson::Document asRapidJsonDocument() const;\n  static void buildRapidJsonDocument(const Field& field, rapidjson::Value& value,\n                                     rapidjson::Document::AllocatorType& allocator);\n\n  uint64_t line_number_start_ = 0;\n  uint64_t line_number_end_ = 0;\n  const Type type_;\n  Value value_;\n};\n\n/**\n * Custom stream to allow access to the line number for each object.\n */\nclass LineCountingStringStream : public rapidjson::StringStream {\n  // Ch is typedef in parent class to handle character encoding.\npublic:\n  LineCountingStringStream(const Ch* src) : rapidjson::StringStream(src), line_number_(1) {}\n  Ch Take() {\n    Ch ret = rapidjson::StringStream::Take();\n    if (ret == '\\n') {\n      line_number_++;\n    }\n    return ret;\n  }\n  uint64_t getLineNumber() const { return line_number_; }\n\nprivate:\n  uint64_t line_number_;\n};\n\n/**\n * Consume events from SAX callbacks to build JSON Field.\n */\nclass ObjectHandler : public rapidjson::BaseReaderHandler<rapidjson::UTF8<>, ObjectHandler> {\npublic:\n  ObjectHandler(LineCountingStringStream& stream) : state_(State::ExpectRoot), stream_(stream){};\n\n  bool StartObject();\n  bool EndObject(rapidjson::SizeType);\n  bool Key(const char* value, rapidjson::SizeType size, bool);\n  bool StartArray();\n  bool EndArray(rapidjson::SizeType);\n  bool Bool(bool value);\n  bool Double(double value);\n  bool Int(int value);\n  bool Uint(unsigned value);\n  bool Int64(int64_t value);\n  bool Uint64(uint64_t value);\n  bool Null();\n  bool String(const char* value, rapidjson::SizeType size, bool);\n  bool RawNumber(const char*, rapidjson::SizeType, bool);\n\n  ObjectSharedPtr getRoot() { return root_; }\n\nprivate:\n  bool handleValueEvent(FieldSharedPtr ptr);\n\n  enum class State {\n    ExpectRoot,\n    ExpectKeyOrEndObject,\n    ExpectValueOrStartObjectArray,\n    ExpectArrayValueOrEndArray,\n    ExpectFinished,\n  };\n  State state_;\n  LineCountingStringStream& stream_;\n\n  std::stack<FieldSharedPtr> stack_;\n  std::string key_;\n\n  FieldSharedPtr root_;\n};\n\nvoid Field::buildRapidJsonDocument(const Field& field, rapidjson::Value& value,\n                                   rapidjson::Document::AllocatorType& allocator) {\n\n  switch (field.type_) {\n  case Type::Array: {\n    value.SetArray();\n    value.Reserve(field.value_.array_value_.size(), allocator);\n    for (const auto& element : field.value_.array_value_) {\n      switch (element->type_) {\n      case Type::Array:\n      case Type::Object: {\n        rapidjson::Value nested_value;\n        buildRapidJsonDocument(*element, nested_value, allocator);\n        value.PushBack(nested_value, allocator);\n        break;\n      }\n      case Type::Boolean:\n        value.PushBack(element->value_.boolean_value_, allocator);\n        break;\n      case Type::Double:\n        value.PushBack(element->value_.double_value_, allocator);\n        break;\n      case Type::Integer:\n        value.PushBack(element->value_.integer_value_, allocator);\n        break;\n      case Type::Null:\n        value.PushBack(rapidjson::Value(), allocator);\n        break;\n      case Type::String:\n        value.PushBack(rapidjson::StringRef(element->value_.string_value_.c_str()), allocator);\n      }\n    }\n    break;\n  }\n  case Type::Object: {\n    value.SetObject();\n    for (const auto& item : field.value_.object_value_) {\n      auto name = rapidjson::StringRef(item.first.c_str());\n\n      switch (item.second->type_) {\n      case Type::Array:\n      case Type::Object: {\n        rapidjson::Value nested_value;\n        buildRapidJsonDocument(*item.second, nested_value, allocator);\n        value.AddMember(name, nested_value, allocator);\n        break;\n      }\n      case Type::Boolean:\n        value.AddMember(name, item.second->value_.boolean_value_, allocator);\n        break;\n      case Type::Double:\n        value.AddMember(name, item.second->value_.double_value_, allocator);\n        break;\n      case Type::Integer:\n        value.AddMember(name, item.second->value_.integer_value_, allocator);\n        break;\n      case Type::Null:\n        value.AddMember(name, rapidjson::Value(), allocator);\n        break;\n      case Type::String:\n        value.AddMember(name, rapidjson::StringRef(item.second->value_.string_value_.c_str()),\n                        allocator);\n        break;\n      }\n    }\n    break;\n  }\n  case Type::Null: {\n    value.SetNull();\n    break;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nrapidjson::Document Field::asRapidJsonDocument() const {\n  rapidjson::Document document;\n  rapidjson::Document::AllocatorType& allocator = document.GetAllocator();\n  buildRapidJsonDocument(*this, document, allocator);\n  return document;\n}\n\nuint64_t Field::hash() const {\n  rapidjson::StringBuffer buffer;\n  rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);\n  asRapidJsonDocument().Accept(writer);\n  return HashUtil::xxHash64(buffer.GetString());\n}\n\nbool Field::getBoolean(const std::string& name) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr == value_.object_value_.end() || !value_itr->second->isType(Type::Boolean)) {\n    throw Exception(fmt::format(\"key '{}' missing or not a boolean from lines {}-{}\", name,\n                                line_number_start_, line_number_end_));\n  }\n  return value_itr->second->booleanValue();\n}\n\nbool Field::getBoolean(const std::string& name, bool default_value) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr != value_.object_value_.end()) {\n    return getBoolean(name);\n  } else {\n    return default_value;\n  }\n}\n\ndouble Field::getDouble(const std::string& name) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr == value_.object_value_.end() || !value_itr->second->isType(Type::Double)) {\n    throw Exception(fmt::format(\"key '{}' missing or not a double from lines {}-{}\", name,\n                                line_number_start_, line_number_end_));\n  }\n  return value_itr->second->doubleValue();\n}\n\ndouble Field::getDouble(const std::string& name, double default_value) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr != value_.object_value_.end()) {\n    return getDouble(name);\n  } else {\n    return default_value;\n  }\n}\n\nint64_t Field::getInteger(const std::string& name) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr == value_.object_value_.end() || !value_itr->second->isType(Type::Integer)) {\n    throw Exception(fmt::format(\"key '{}' missing or not an integer from lines {}-{}\", name,\n                                line_number_start_, line_number_end_));\n  }\n  return value_itr->second->integerValue();\n}\n\nint64_t Field::getInteger(const std::string& name, int64_t default_value) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr != value_.object_value_.end()) {\n    return getInteger(name);\n  } else {\n    return default_value;\n  }\n}\n\nObjectSharedPtr Field::getObject(const std::string& name, bool allow_empty) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr == value_.object_value_.end()) {\n    if (allow_empty) {\n      return createObject();\n    } else {\n      throw Exception(fmt::format(\"key '{}' missing from lines {}-{}\", name, line_number_start_,\n                                  line_number_end_));\n    }\n  } else if (!value_itr->second->isType(Type::Object)) {\n    throw Exception(fmt::format(\"key '{}' not an object from line {}\", name,\n                                value_itr->second->line_number_start_));\n  } else {\n    return value_itr->second;\n  }\n}\n\nstd::vector<ObjectSharedPtr> Field::getObjectArray(const std::string& name,\n                                                   bool allow_empty) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr == value_.object_value_.end() || !value_itr->second->isType(Type::Array)) {\n    if (allow_empty && value_itr == value_.object_value_.end()) {\n      return std::vector<ObjectSharedPtr>();\n    }\n    throw Exception(fmt::format(\"key '{}' missing or not an array from lines {}-{}\", name,\n                                line_number_start_, line_number_end_));\n  }\n\n  std::vector<FieldSharedPtr> array_value = value_itr->second->arrayValue();\n  return {array_value.begin(), array_value.end()};\n}\n\nstd::string Field::getString(const std::string& name) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr == value_.object_value_.end() || !value_itr->second->isType(Type::String)) {\n    throw Exception(fmt::format(\"key '{}' missing or not a string from lines {}-{}\", name,\n                                line_number_start_, line_number_end_));\n  }\n  return value_itr->second->stringValue();\n}\n\nstd::string Field::getString(const std::string& name, const std::string& default_value) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr != value_.object_value_.end()) {\n    return getString(name);\n  } else {\n    return default_value;\n  }\n}\n\nstd::vector<std::string> Field::getStringArray(const std::string& name, bool allow_empty) const {\n  checkType(Type::Object);\n  std::vector<std::string> string_array;\n  auto value_itr = value_.object_value_.find(name);\n  if (value_itr == value_.object_value_.end() || !value_itr->second->isType(Type::Array)) {\n    if (allow_empty && value_itr == value_.object_value_.end()) {\n      return string_array;\n    }\n    throw Exception(fmt::format(\"key '{}' missing or not an array from lines {}-{}\", name,\n                                line_number_start_, line_number_end_));\n  }\n\n  std::vector<FieldSharedPtr> array = value_itr->second->arrayValue();\n  string_array.reserve(array.size());\n  for (const auto& element : array) {\n    if (!element->isType(Type::String)) {\n      throw Exception(fmt::format(\"JSON array '{}' from line {} does not contain all strings\", name,\n                                  line_number_start_));\n    }\n    string_array.push_back(element->stringValue());\n  }\n\n  return string_array;\n}\n\nstd::vector<ObjectSharedPtr> Field::asObjectArray() const {\n  checkType(Type::Array);\n  return {value_.array_value_.begin(), value_.array_value_.end()};\n}\n\nstd::string Field::asJsonString() const {\n  rapidjson::StringBuffer buffer;\n  rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);\n  rapidjson::Document document = asRapidJsonDocument();\n  document.Accept(writer);\n  return buffer.GetString();\n}\n\nbool Field::empty() const {\n  if (isType(Type::Object)) {\n    return value_.object_value_.empty();\n  } else if (isType(Type::Array)) {\n    return value_.array_value_.empty();\n  } else {\n    throw Exception(\n        fmt::format(\"Json does not support empty() on types other than array and object\"));\n  }\n}\n\nbool Field::hasObject(const std::string& name) const {\n  checkType(Type::Object);\n  auto value_itr = value_.object_value_.find(name);\n  return value_itr != value_.object_value_.end();\n}\n\nvoid Field::iterate(const ObjectCallback& callback) const {\n  checkType(Type::Object);\n  for (const auto& item : value_.object_value_) {\n    bool stop_iteration = !callback(item.first, *item.second);\n    if (stop_iteration) {\n      break;\n    }\n  }\n}\n\nvoid Field::validateSchema(const std::string& schema) const {\n  rapidjson::Document schema_document;\n  if (schema_document.Parse<0>(schema.c_str()).HasParseError()) {\n    throw std::invalid_argument(fmt::format(\n        \"Schema supplied to validateSchema is not valid JSON\\n Error(offset {}) : {}\\n\",\n        schema_document.GetErrorOffset(), GetParseError_En(schema_document.GetParseError())));\n  }\n\n  rapidjson::SchemaDocument schema_document_for_validator(schema_document);\n  rapidjson::SchemaValidator schema_validator(schema_document_for_validator);\n\n  if (!asRapidJsonDocument().Accept(schema_validator)) {\n    rapidjson::StringBuffer schema_string_buffer;\n    rapidjson::StringBuffer document_string_buffer;\n\n    schema_validator.GetInvalidSchemaPointer().StringifyUriFragment(schema_string_buffer);\n    schema_validator.GetInvalidDocumentPointer().StringifyUriFragment(document_string_buffer);\n\n    throw Exception(fmt::format(\n        \"JSON at lines {}-{} does not conform to schema.\\n Invalid schema: {}\\n\"\n        \" Schema violation: {}\\n\"\n        \" Offending document key: {}\",\n        line_number_start_, line_number_end_, schema_string_buffer.GetString(),\n        schema_validator.GetInvalidSchemaKeyword(), document_string_buffer.GetString()));\n  }\n}\n\nbool ObjectHandler::StartObject() {\n  FieldSharedPtr object = Field::createObject();\n  object->setLineNumberStart(stream_.getLineNumber());\n\n  switch (state_) {\n  case State::ExpectValueOrStartObjectArray:\n    stack_.top()->insert(key_, object);\n    stack_.push(object);\n    state_ = State::ExpectKeyOrEndObject;\n    return true;\n  case State::ExpectArrayValueOrEndArray:\n    stack_.top()->append(object);\n    stack_.push(object);\n    state_ = State::ExpectKeyOrEndObject;\n    return true;\n  case State::ExpectRoot:\n    root_ = object;\n    stack_.push(object);\n    state_ = State::ExpectKeyOrEndObject;\n    return true;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool ObjectHandler::EndObject(rapidjson::SizeType) {\n  switch (state_) {\n  case State::ExpectKeyOrEndObject:\n    stack_.top()->setLineNumberEnd(stream_.getLineNumber());\n    stack_.pop();\n\n    if (stack_.empty()) {\n      state_ = State::ExpectFinished;\n    } else if (stack_.top()->isObject()) {\n      state_ = State::ExpectKeyOrEndObject;\n    } else if (stack_.top()->isArray()) {\n      state_ = State::ExpectArrayValueOrEndArray;\n    }\n    return true;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool ObjectHandler::Key(const char* value, rapidjson::SizeType size, bool) {\n  switch (state_) {\n  case State::ExpectKeyOrEndObject:\n    key_ = std::string(value, size);\n    state_ = State::ExpectValueOrStartObjectArray;\n    return true;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool ObjectHandler::StartArray() {\n  FieldSharedPtr array = Field::createArray();\n  array->setLineNumberStart(stream_.getLineNumber());\n\n  switch (state_) {\n  case State::ExpectValueOrStartObjectArray:\n    stack_.top()->insert(key_, array);\n    stack_.push(array);\n    state_ = State::ExpectArrayValueOrEndArray;\n    return true;\n  case State::ExpectArrayValueOrEndArray:\n    stack_.top()->append(array);\n    stack_.push(array);\n    return true;\n  case State::ExpectRoot:\n    root_ = array;\n    stack_.push(array);\n    state_ = State::ExpectArrayValueOrEndArray;\n    return true;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool ObjectHandler::EndArray(rapidjson::SizeType) {\n  switch (state_) {\n  case State::ExpectArrayValueOrEndArray:\n    stack_.top()->setLineNumberEnd(stream_.getLineNumber());\n    stack_.pop();\n\n    if (stack_.empty()) {\n      state_ = State::ExpectFinished;\n    } else if (stack_.top()->isObject()) {\n      state_ = State::ExpectKeyOrEndObject;\n    } else if (stack_.top()->isArray()) {\n      state_ = State::ExpectArrayValueOrEndArray;\n    }\n\n    return true;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n// Value handlers\nbool ObjectHandler::Bool(bool value) { return handleValueEvent(Field::createValue(value)); }\nbool ObjectHandler::Double(double value) { return handleValueEvent(Field::createValue(value)); }\nbool ObjectHandler::Int(int value) {\n  return handleValueEvent(Field::createValue(static_cast<int64_t>(value)));\n}\nbool ObjectHandler::Uint(unsigned value) {\n  return handleValueEvent(Field::createValue(static_cast<int64_t>(value)));\n}\nbool ObjectHandler::Int64(int64_t value) { return handleValueEvent(Field::createValue(value)); }\nbool ObjectHandler::Uint64(uint64_t value) {\n  if (value > static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {\n    throw Exception(fmt::format(\"JSON value from line {} is larger than int64_t (not supported)\",\n                                stream_.getLineNumber()));\n  }\n  return handleValueEvent(Field::createValue(static_cast<int64_t>(value)));\n}\n\nbool ObjectHandler::Null() { return handleValueEvent(Field::createNull()); }\n\nbool ObjectHandler::String(const char* value, rapidjson::SizeType size, bool) {\n  return handleValueEvent(Field::createValue(std::string(value, size)));\n}\n\nbool ObjectHandler::RawNumber(const char*, rapidjson::SizeType, bool) {\n  // Only called if kParseNumbersAsStrings is set as a parse flag, which it is not.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nbool ObjectHandler::handleValueEvent(FieldSharedPtr ptr) {\n  ptr->setLineNumberStart(stream_.getLineNumber());\n\n  switch (state_) {\n  case State::ExpectValueOrStartObjectArray:\n    state_ = State::ExpectKeyOrEndObject;\n    stack_.top()->insert(key_, ptr);\n    return true;\n  case State::ExpectArrayValueOrEndArray:\n    stack_.top()->append(ptr);\n    return true;\n  default:\n    return false;\n  }\n}\n\n} // namespace\n\nObjectSharedPtr Factory::loadFromString(const std::string& json) {\n  LineCountingStringStream json_stream(json.c_str());\n\n  ObjectHandler handler(json_stream);\n  rapidjson::Reader reader;\n  reader.Parse(json_stream, handler);\n\n  if (reader.HasParseError()) {\n    throw Exception(fmt::format(\"JSON supplied is not valid. Error(offset {}, line {}): {}\\n\",\n                                reader.GetErrorOffset(), json_stream.getLineNumber(),\n                                GetParseError_En(reader.GetParseErrorCode())));\n  }\n\n  return handler.getRoot();\n}\n\n} // namespace Json\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/json/json_loader.h",
    "content": "#pragma once\n\n#include <list>\n#include <string>\n\n#include \"envoy/json/json_object.h\"\n\nnamespace Envoy {\nnamespace Json {\n\nclass Factory {\npublic:\n  /**\n   * Constructs a Json Object from a string.\n   */\n  static ObjectSharedPtr loadFromString(const std::string& json);\n};\n\n} // namespace Json\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/local_info/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"local_info_lib\",\n    hdrs = [\"local_info_impl.h\"],\n    deps = [\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//source/common/config:version_converter_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/local_info/local_info_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/local_info/local_info.h\"\n\n#include \"common/config/version_converter.h\"\n\nnamespace Envoy {\nnamespace LocalInfo {\n\nclass LocalInfoImpl : public LocalInfo {\npublic:\n  LocalInfoImpl(const envoy::config::core::v3::Node& node,\n                const Network::Address::InstanceConstSharedPtr& address,\n                absl::string_view zone_name, absl::string_view cluster_name,\n                absl::string_view node_name)\n      : node_(node), address_(address) {\n    if (!zone_name.empty()) {\n      node_.mutable_locality()->set_zone(std::string(zone_name));\n    }\n    if (!cluster_name.empty()) {\n      node_.set_cluster(std::string(cluster_name));\n    }\n    if (!node_name.empty()) {\n      node_.set_id(std::string(node_name));\n    }\n  }\n\n  Network::Address::InstanceConstSharedPtr address() const override { return address_; }\n  const std::string& zoneName() const override { return node_.locality().zone(); }\n  const std::string& clusterName() const override { return node_.cluster(); }\n  const std::string& nodeName() const override { return node_.id(); }\n  const envoy::config::core::v3::Node& node() const override { return node_; }\n\nprivate:\n  envoy::config::core::v3::Node node_;\n  Network::Address::InstanceConstSharedPtr address_;\n};\n\n} // namespace LocalInfo\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/local_reply/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"local_reply_lib\",\n    srcs = [\"local_reply.cc\"],\n    hdrs = [\"local_reply.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/formatter:substitution_format_string_lib\",\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/router:header_parser_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/local_reply/local_reply.cc",
    "content": "#include \"common/local_reply/local_reply.h\"\n\n#include <string>\n#include <vector>\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/config/datasource.h\"\n#include \"common/formatter/substitution_format_string.h\"\n#include \"common/formatter/substitution_formatter.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/router/header_parser.h\"\n\nnamespace Envoy {\nnamespace LocalReply {\n\nclass BodyFormatter {\npublic:\n  BodyFormatter()\n      : formatter_(std::make_unique<Envoy::Formatter::FormatterImpl>(\"%LOCAL_REPLY_BODY%\")),\n        content_type_(Http::Headers::get().ContentTypeValues.Text) {}\n\n  BodyFormatter(const envoy::config::core::v3::SubstitutionFormatString& config)\n      : formatter_(Formatter::SubstitutionFormatStringUtils::fromProtoConfig(config)),\n        content_type_(\n            !config.content_type().empty()\n                ? config.content_type()\n                : config.format_case() ==\n                          envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat\n                      ? Http::Headers::get().ContentTypeValues.Json\n                      : Http::Headers::get().ContentTypeValues.Text) {}\n\n  void format(const Http::RequestHeaderMap& request_headers,\n              const Http::ResponseHeaderMap& response_headers,\n              const Http::ResponseTrailerMap& response_trailers,\n              const StreamInfo::StreamInfo& stream_info, std::string& body,\n              absl::string_view& content_type) const {\n    body =\n        formatter_->format(request_headers, response_headers, response_trailers, stream_info, body);\n    content_type = content_type_;\n  }\n\nprivate:\n  const Formatter::FormatterPtr formatter_;\n  const std::string content_type_;\n};\n\nusing BodyFormatterPtr = std::unique_ptr<BodyFormatter>;\nusing HeaderParserPtr = std::unique_ptr<Envoy::Router::HeaderParser>;\n\nclass ResponseMapper {\npublic:\n  ResponseMapper(\n      const envoy::extensions::filters::network::http_connection_manager::v3::ResponseMapper&\n          config,\n      Server::Configuration::FactoryContext& context)\n      : filter_(AccessLog::FilterFactory::fromProto(config.filter(), context.runtime(),\n                                                    context.api().randomGenerator(),\n                                                    context.messageValidationVisitor())) {\n    if (config.has_status_code()) {\n      status_code_ = static_cast<Http::Code>(config.status_code().value());\n    }\n    if (config.has_body()) {\n      body_ = Config::DataSource::read(config.body(), true, context.api());\n    }\n\n    if (config.has_body_format_override()) {\n      body_formatter_ = std::make_unique<BodyFormatter>(config.body_format_override());\n    }\n\n    header_parser_ = Envoy::Router::HeaderParser::configure(config.headers_to_add());\n  }\n\n  bool matchAndRewrite(const Http::RequestHeaderMap& request_headers,\n                       Http::ResponseHeaderMap& response_headers,\n                       const Http::ResponseTrailerMap& response_trailers,\n                       StreamInfo::StreamInfoImpl& stream_info, Http::Code& code, std::string& body,\n                       BodyFormatter*& final_formatter) const {\n    // If not matched, just bail out.\n    if (!filter_->evaluate(stream_info, request_headers, response_headers, response_trailers)) {\n      return false;\n    }\n\n    if (body_.has_value()) {\n      body = body_.value();\n    }\n\n    header_parser_->evaluateHeaders(response_headers, stream_info);\n\n    if (status_code_.has_value() && code != status_code_.value()) {\n      code = status_code_.value();\n      response_headers.setStatus(std::to_string(enumToInt(code)));\n      stream_info.response_code_ = static_cast<uint32_t>(code);\n    }\n\n    if (body_formatter_) {\n      final_formatter = body_formatter_.get();\n    }\n    return true;\n  }\n\nprivate:\n  const AccessLog::FilterPtr filter_;\n  absl::optional<Http::Code> status_code_;\n  absl::optional<std::string> body_;\n  HeaderParserPtr header_parser_;\n  BodyFormatterPtr body_formatter_;\n};\n\nusing ResponseMapperPtr = std::unique_ptr<ResponseMapper>;\n\nclass LocalReplyImpl : public LocalReply {\npublic:\n  LocalReplyImpl() : body_formatter_(std::make_unique<BodyFormatter>()) {}\n\n  LocalReplyImpl(\n      const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig&\n          config,\n      Server::Configuration::FactoryContext& context)\n      : body_formatter_(config.has_body_format()\n                            ? std::make_unique<BodyFormatter>(config.body_format())\n                            : std::make_unique<BodyFormatter>()) {\n    for (const auto& mapper : config.mappers()) {\n      mappers_.emplace_back(std::make_unique<ResponseMapper>(mapper, context));\n    }\n  }\n\n  void rewrite(const Http::RequestHeaderMap* request_headers,\n               Http::ResponseHeaderMap& response_headers, StreamInfo::StreamInfoImpl& stream_info,\n               Http::Code& code, std::string& body,\n               absl::string_view& content_type) const override {\n    // Set response code to stream_info and response_headers due to:\n    // 1) StatusCode filter is using response_code from stream_info,\n    // 2) %RESP(:status)% is from Status() in response_headers.\n    response_headers.setStatus(std::to_string(enumToInt(code)));\n    stream_info.response_code_ = static_cast<uint32_t>(code);\n\n    if (request_headers == nullptr) {\n      request_headers = Http::StaticEmptyHeaders::get().request_headers.get();\n    }\n\n    BodyFormatter* final_formatter{};\n    for (const auto& mapper : mappers_) {\n      if (mapper->matchAndRewrite(*request_headers, response_headers,\n                                  *Http::StaticEmptyHeaders::get().response_trailers, stream_info,\n                                  code, body, final_formatter)) {\n        break;\n      }\n    }\n\n    if (!final_formatter) {\n      final_formatter = body_formatter_.get();\n    }\n    return final_formatter->format(*request_headers, response_headers,\n                                   *Http::StaticEmptyHeaders::get().response_trailers, stream_info,\n                                   body, content_type);\n  }\n\nprivate:\n  std::list<ResponseMapperPtr> mappers_;\n  const BodyFormatterPtr body_formatter_;\n};\n\nLocalReplyPtr Factory::createDefault() { return std::make_unique<LocalReplyImpl>(); }\n\nLocalReplyPtr Factory::create(\n    const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig&\n        config,\n    Server::Configuration::FactoryContext& context) {\n  return std::make_unique<LocalReplyImpl>(config, context);\n}\n\n} // namespace LocalReply\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/local_reply/local_reply.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/stream_info/stream_info_impl.h\"\n\nnamespace Envoy {\nnamespace LocalReply {\n\nclass LocalReply {\npublic:\n  virtual ~LocalReply() = default;\n\n  /**\n   * rewrite the response status code, body and content_type.\n   * @param request_headers supplies the information about request headers required by filters.\n   * @param stream_info supplies the information about streams required by filters.\n   * @param code status code.\n   * @param body response body.\n   * @param content_type response content_type.\n   */\n  virtual void rewrite(const Http::RequestHeaderMap* request_headers,\n                       Http::ResponseHeaderMap& response_headers,\n                       StreamInfo::StreamInfoImpl& stream_info, Http::Code& code, std::string& body,\n                       absl::string_view& content_type) const PURE;\n};\n\nusing LocalReplyPtr = std::unique_ptr<LocalReply>;\n\n/**\n * Access log filter factory that reads from proto.\n */\nclass Factory {\npublic:\n  /**\n   * Create a LocalReply object from ProtoConfig\n   */\n  static LocalReplyPtr\n  create(const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig&\n             config,\n         Server::Configuration::FactoryContext& context);\n\n  /**\n   * Create a default LocalReply object with empty config.\n   * It is used at places without Server::Configuration::FactoryContext.\n   */\n  static LocalReplyPtr createDefault();\n};\n\n} // namespace LocalReply\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/memory/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"stats_lib\",\n    srcs = [\"stats.cc\"],\n    hdrs = [\"stats.h\"],\n    tcmalloc_dep = 1,\n    deps = [\n        \"//source/common/common:logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utils_lib\",\n    srcs = [\"utils.cc\"],\n    hdrs = [\"utils.h\"],\n    tcmalloc_dep = 1,\n    deps = [\n        \":stats_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"heap_shrinker_lib\",\n    srcs = [\"heap_shrinker.cc\"],\n    hdrs = [\"heap_shrinker.h\"],\n    deps = [\n        \":utils_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/server:overload_manager_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/stats:symbol_table_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/memory/heap_shrinker.cc",
    "content": "#include \"common/memory/heap_shrinker.h\"\n\n#include \"common/memory/utils.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Memory {\n\n// TODO(eziskind): make this configurable\nconstexpr std::chrono::milliseconds kTimerInterval = std::chrono::milliseconds(10000);\n\nHeapShrinker::HeapShrinker(Event::Dispatcher& dispatcher, Server::OverloadManager& overload_manager,\n                           Stats::Scope& stats)\n    : active_(false) {\n  const auto action_name = Server::OverloadActionNames::get().ShrinkHeap;\n  if (overload_manager.registerForAction(\n          action_name, dispatcher,\n          [this](Server::OverloadActionState state) { active_ = state.isSaturated(); })) {\n    Envoy::Stats::StatNameManagedStorage stat_name(\n        absl::StrCat(\"overload.\", action_name, \".shrink_count\"), stats.symbolTable());\n    shrink_counter_ = &stats.counterFromStatName(stat_name.statName());\n    timer_ = dispatcher.createTimer([this] {\n      shrinkHeap();\n      timer_->enableTimer(kTimerInterval);\n    });\n    timer_->enableTimer(kTimerInterval);\n  }\n}\n\nvoid HeapShrinker::shrinkHeap() {\n  if (active_) {\n    Utils::releaseFreeMemory();\n    shrink_counter_->inc();\n  }\n}\n\n} // namespace Memory\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/memory/heap_shrinker.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/server/overload_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n\nnamespace Envoy {\nnamespace Memory {\n\n/**\n * A utility class to periodically attempt to shrink the heap by releasing free memory\n * to the system if the \"shrink heap\" overload action has been configured and triggered.\n */\nclass HeapShrinker {\npublic:\n  HeapShrinker(Event::Dispatcher& dispatcher, Server::OverloadManager& overload_manager,\n               Envoy::Stats::Scope& stats);\n\nprivate:\n  void shrinkHeap();\n\n  bool active_;\n  Envoy::Stats::Counter* shrink_counter_;\n  Envoy::Event::TimerPtr timer_;\n};\n\n} // namespace Memory\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/memory/stats.cc",
    "content": "#include \"common/memory/stats.h\"\n\n#include <cstdint>\n\n#include \"common/common/logger.h\"\n\n#if defined(TCMALLOC)\n\n#include \"tcmalloc/malloc_extension.h\"\n\nnamespace Envoy {\nnamespace Memory {\n\nuint64_t Stats::totalCurrentlyAllocated() {\n  return tcmalloc::MallocExtension::GetNumericProperty(\"generic.current_allocated_bytes\")\n      .value_or(0);\n}\n\nuint64_t Stats::totalCurrentlyReserved() {\n  // In Google's tcmalloc the semantics of generic.heap_size has\n  // changed: it doesn't include unmapped bytes.\n  return tcmalloc::MallocExtension::GetNumericProperty(\"generic.heap_size\").value_or(0) +\n         tcmalloc::MallocExtension::GetNumericProperty(\"tcmalloc.pageheap_unmapped_bytes\")\n             .value_or(0);\n}\n\nuint64_t Stats::totalThreadCacheBytes() {\n  return tcmalloc::MallocExtension::GetNumericProperty(\"tcmalloc.current_total_thread_cache_bytes\")\n      .value_or(0);\n}\n\nuint64_t Stats::totalPageHeapFree() {\n  return tcmalloc::MallocExtension::GetNumericProperty(\"tcmalloc.pageheap_free_bytes\").value_or(0);\n}\n\nuint64_t Stats::totalPageHeapUnmapped() {\n  return tcmalloc::MallocExtension::GetNumericProperty(\"tcmalloc.pageheap_unmapped_bytes\")\n      .value_or(0);\n}\n\nuint64_t Stats::totalPhysicalBytes() {\n  return tcmalloc::MallocExtension::GetProperties()[\"generic.physical_memory_used\"].value;\n}\n\nvoid Stats::dumpStatsToLog() {\n  ENVOY_LOG_MISC(debug, \"TCMalloc stats:\\n{}\", tcmalloc::MallocExtension::GetStats());\n}\n\n} // namespace Memory\n} // namespace Envoy\n\n#elif defined(GPERFTOOLS_TCMALLOC)\n\n#include \"gperftools/malloc_extension.h\"\n\nnamespace Envoy {\nnamespace Memory {\n\nuint64_t Stats::totalCurrentlyAllocated() {\n  size_t value = 0;\n  MallocExtension::instance()->GetNumericProperty(\"generic.current_allocated_bytes\", &value);\n  return value;\n}\n\nuint64_t Stats::totalCurrentlyReserved() {\n  size_t value = 0;\n  MallocExtension::instance()->GetNumericProperty(\"generic.heap_size\", &value);\n  return value;\n}\n\nuint64_t Stats::totalThreadCacheBytes() {\n  size_t value = 0;\n  MallocExtension::instance()->GetNumericProperty(\"tcmalloc.current_total_thread_cache_bytes\",\n                                                  &value);\n  return value;\n}\n\nuint64_t Stats::totalPageHeapFree() {\n  size_t value = 0;\n  MallocExtension::instance()->GetNumericProperty(\"tcmalloc.pageheap_free_bytes\", &value);\n  return value;\n}\n\nuint64_t Stats::totalPageHeapUnmapped() {\n  size_t value = 0;\n  MallocExtension::instance()->GetNumericProperty(\"tcmalloc.pageheap_unmapped_bytes\", &value);\n  return value;\n}\n\nuint64_t Stats::totalPhysicalBytes() {\n  size_t value = 0;\n  MallocExtension::instance()->GetNumericProperty(\"generic.total_physical_bytes\", &value);\n  return value;\n}\n\nvoid Stats::dumpStatsToLog() {\n  constexpr int buffer_size = 100000;\n  auto buffer = std::make_unique<char[]>(buffer_size);\n  MallocExtension::instance()->GetStats(buffer.get(), buffer_size);\n  ENVOY_LOG_MISC(debug, \"TCMalloc stats:\\n{}\", buffer.get());\n}\n\n} // namespace Memory\n} // namespace Envoy\n\n#else\n\nnamespace Envoy {\nnamespace Memory {\n\nuint64_t Stats::totalCurrentlyAllocated() { return 0; }\nuint64_t Stats::totalThreadCacheBytes() { return 0; }\nuint64_t Stats::totalCurrentlyReserved() { return 0; }\nuint64_t Stats::totalPageHeapUnmapped() { return 0; }\nuint64_t Stats::totalPageHeapFree() { return 0; }\nuint64_t Stats::totalPhysicalBytes() { return 0; }\nvoid Stats::dumpStatsToLog() {}\n\n} // namespace Memory\n} // namespace Envoy\n\n#endif // #if defined(TCMALLOC)\n"
  },
  {
    "path": "source/common/memory/stats.h",
    "content": "#pragma once\n\n#include <cstdint>\n\nnamespace Envoy {\nnamespace Memory {\n\n/**\n * Runtime stats for process memory usage.\n */\nclass Stats {\npublic:\n  /**\n   * @return uint64_t the total memory currently allocated.\n   */\n  static uint64_t totalCurrentlyAllocated();\n\n  /**\n   * @return uint64_t the total memory reserved for the process by the heap but not necessarily\n   *                  allocated.\n   */\n  static uint64_t totalCurrentlyReserved();\n\n  /**\n   * @return uint64_t the amount of memory used by the TCMalloc thread caches (for small objects).\n   */\n  static uint64_t totalThreadCacheBytes();\n\n  /**\n   * @return uint64_t the number of bytes in free, unmapped pages in the page heap. These bytes\n   *                  always count towards virtual memory usage, and depending on the OS, typically\n   *                  do not count towards physical memory usage.\n   */\n  static uint64_t totalPageHeapUnmapped();\n\n  /**\n   * @return uint64_t the number of bytes in free, mapped pages in the page heap. These bytes always\n   *                  count towards virtual memory usage, and unless the underlying memory is\n   *                  swapped out by the OS, they also count towards physical memory usage.\n   */\n  static uint64_t totalPageHeapFree();\n\n  /**\n   * @return uint64_t estimate of total bytes of the physical memory usage by the allocator\n   */\n  static uint64_t totalPhysicalBytes();\n\n  /**\n   * Log detailed stats about current memory allocation. Intended for debugging purposes.\n   */\n  static void dumpStatsToLog();\n};\n\n} // namespace Memory\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/memory/utils.cc",
    "content": "#include \"common/memory/utils.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/memory/stats.h\"\n\n#if defined(TCMALLOC)\n#include \"tcmalloc/malloc_extension.h\"\n#elif defined(GPERFTOOLS_TCMALLOC)\n#include \"gperftools/malloc_extension.h\"\n#endif\n\nnamespace Envoy {\nnamespace Memory {\n\nnamespace {\n#if defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC)\n// TODO(zyfjeff): Make max unfreed memory byte configurable\nconstexpr uint64_t MAX_UNFREED_MEMORY_BYTE = 100 * 1024 * 1024;\n#endif\n} // namespace\n\nvoid Utils::releaseFreeMemory() {\n#if defined(TCMALLOC)\n  tcmalloc::MallocExtension::ReleaseMemoryToSystem(MAX_UNFREED_MEMORY_BYTE);\n#elif defined(GPERFTOOLS_TCMALLOC)\n  MallocExtension::instance()->ReleaseFreeMemory();\n#endif\n}\n\n/*\n  The purpose of this function is to release the cache introduced by tcmalloc,\n  mainly in xDS config updates, admin handler, and so on. all work on the main thread,\n  so the overall impact on performance is small.\n  Ref: https://github.com/envoyproxy/envoy/pull/9471#discussion_r363825985\n*/\nvoid Utils::tryShrinkHeap() {\n#if defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC)\n  auto total_physical_bytes = Stats::totalPhysicalBytes();\n  auto allocated_size_by_app = Stats::totalCurrentlyAllocated();\n\n  ASSERT(total_physical_bytes >= allocated_size_by_app);\n\n  if ((total_physical_bytes - allocated_size_by_app) >= MAX_UNFREED_MEMORY_BYTE) {\n    Utils::releaseFreeMemory();\n  }\n#endif\n}\n\n} // namespace Memory\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/memory/utils.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Memory {\n\nclass Utils {\npublic:\n  static void releaseFreeMemory();\n  static void tryShrinkHeap();\n};\n\n} // namespace Memory\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"address_lib\",\n    srcs = [\"address_impl.cc\"],\n    hdrs = [\"address_impl.h\"],\n    deps = [\n        \":socket_interface_lib\",\n        \"//include/envoy/network:address_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"application_protocol_lib\",\n    srcs = [\"application_protocol.cc\"],\n    hdrs = [\"application_protocol.h\"],\n    deps = [\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cidr_range_lib\",\n    srcs = [\"cidr_range.cc\"],\n    hdrs = [\"cidr_range.h\"],\n    deps = [\n        \":address_lib\",\n        \":utility_lib\",\n        \"//include/envoy/network:address_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"connection_balancer_lib\",\n    srcs = [\"connection_balancer_impl.cc\"],\n    hdrs = [\"connection_balancer_impl.h\"],\n    deps = [\n        \"//include/envoy/network:connection_balancer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"connection_base_lib\",\n    srcs = [\"connection_impl_base.cc\"],\n    hdrs = [\"connection_impl_base.h\"],\n    deps = [\n        \":filter_manager_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"connection_lib\",\n    srcs = [\"connection_impl.cc\"],\n    hdrs = [\"connection_impl.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":address_lib\",\n        \":connection_base_lib\",\n        \":raw_buffer_socket_lib\",\n        \":utility_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:watermark_buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/event:libevent_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"apple_dns_lib\",\n    srcs = select({\n        \"//bazel:apple\": [\"apple_dns_impl.cc\"],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"//bazel:apple\": [\"apple_dns_impl.h\"],\n        \"//conditions:default\": [],\n    }),\n    deps = [\n        \":address_lib\",\n        \":utility_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:linked_object\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dns_lib\",\n    srcs = [\"dns_impl.cc\"],\n    hdrs = [\"dns_impl.h\"],\n    external_deps = [\"ares\"],\n    deps = [\n        \":address_lib\",\n        \":utility_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:linked_object\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_lib\",\n    hdrs = [\"filter_impl.h\"],\n    deps = [\n        \"//include/envoy/network:filter_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_manager_lib\",\n    srcs = [\"filter_manager_impl.cc\"],\n    hdrs = [\"filter_manager_impl.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:linked_object\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hash_policy_lib\",\n    srcs = [\"hash_policy.cc\"],\n    hdrs = [\"hash_policy.h\"],\n    deps = [\n        \"//include/envoy/network:hash_policy_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hash_lib\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"io_socket_error_lib\",\n    srcs = [\"io_socket_error_impl.cc\"],\n    hdrs = [\"io_socket_error_impl.h\"],\n    deps = [\n        \"//include/envoy/api:io_error_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"lc_trie_lib\",\n    hdrs = [\"lc_trie.h\"],\n    external_deps = [\n        \"abseil_node_hash_set\",\n        \"abseil_int128\",\n    ],\n    deps = [\n        \":address_lib\",\n        \":cidr_range_lib\",\n        \":utility_lib\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"socket_interface_lib\",\n    hdrs = [\"socket_interface.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/network:socket_interface_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:bootstrap_extension_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"default_socket_interface_lib\",\n    srcs = [\n        \"io_socket_handle_impl.cc\",\n        \"socket_interface_impl.cc\",\n    ],\n    hdrs = [\n        \"io_socket_handle_impl.h\",\n        \"socket_interface_impl.h\",\n    ],\n    deps = [\n        \":address_lib\",\n        \":io_socket_error_lib\",\n        \":socket_interface_lib\",\n        \":socket_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:io_handle_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"@envoy_api//envoy/extensions/network/socket_interface/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"socket_lib\",\n    srcs = [\"socket_impl.cc\"],\n    hdrs = [\"socket_impl.h\"],\n    deps = [\n        \"//include/envoy/network:socket_interface\",\n        \"//include/envoy/network:socket_interface_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"listen_socket_lib\",\n    srcs = [\"listen_socket_impl.cc\"],\n    hdrs = [\"listen_socket_impl.h\"],\n    deps = [\n        \":socket_lib\",\n        \":utility_lib\",\n        \"//include/envoy/network:exception_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/common:assert_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"listener_lib\",\n    srcs = [\n        \"base_listener_impl.cc\",\n        \"tcp_listener_impl.cc\",\n        \"udp_listener_impl.cc\",\n    ],\n    hdrs = [\n        \"base_listener_impl.h\",\n        \"tcp_listener_impl.h\",\n        \"udp_listener_impl.h\",\n    ],\n    deps = [\n        \":address_lib\",\n        \":default_socket_interface_lib\",\n        \":listen_socket_lib\",\n        \":udp_default_writer_config\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/network:exception_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/event:dispatcher_includes\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"raw_buffer_socket_lib\",\n    srcs = [\"raw_buffer_socket.cc\"],\n    hdrs = [\"raw_buffer_socket.h\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resolver_lib\",\n    srcs = [\"resolver_impl.cc\"],\n    hdrs = [\"resolver_impl.h\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:resolver_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"socket_option_lib\",\n    srcs = [\"socket_option_impl.cc\"],\n    hdrs = [\"socket_option_impl.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":address_lib\",\n        \":listen_socket_lib\",\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"addr_family_aware_socket_option_lib\",\n    srcs = [\"addr_family_aware_socket_option_impl.cc\"],\n    hdrs = [\"addr_family_aware_socket_option_impl.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":address_lib\",\n        \":socket_lib\",\n        \":socket_option_lib\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:logger_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"socket_option_factory_lib\",\n    srcs = [\"socket_option_factory.cc\"],\n    hdrs = [\"socket_option_factory.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":addr_family_aware_socket_option_lib\",\n        \":address_lib\",\n        \":socket_option_lib\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/common:logger_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    deps = [\n        \":address_lib\",\n        \":default_socket_interface_lib\",\n        \":socket_lib\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"transport_socket_options_lib\",\n    srcs = [\"transport_socket_options_impl.cc\"],\n    hdrs = [\"transport_socket_options_impl.h\"],\n    deps = [\n        \":application_protocol_lib\",\n        \":proxy_protocol_filter_state_lib\",\n        \":upstream_server_name_lib\",\n        \":upstream_subject_alt_names_lib\",\n        \"//include/envoy/network:proxy_protocol_options_lib\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/common:scalar_to_byte_vector_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_server_name_lib\",\n    srcs = [\"upstream_server_name.cc\"],\n    hdrs = [\"upstream_server_name.h\"],\n    deps = [\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_subject_alt_names_lib\",\n    srcs = [\"upstream_subject_alt_names.cc\"],\n    hdrs = [\"upstream_subject_alt_names.h\"],\n    deps = [\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_matcher_lib\",\n    srcs = [\n        \"filter_matcher.cc\",\n    ],\n    hdrs = [\"filter_matcher.h\"],\n    external_deps = [\n        \"abseil_str_format\",\n    ],\n    deps = [\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udp_packet_writer_handler_lib\",\n    srcs = [\"udp_packet_writer_handler_impl.cc\"],\n    hdrs = [\"udp_packet_writer_handler_impl.h\"],\n    deps = [\n        \":io_socket_error_lib\",\n        \":utility_lib\",\n        \"//include/envoy/network:socket_interface\",\n        \"//include/envoy/network:udp_packet_writer_config_interface\",\n        \"//include/envoy/network:udp_packet_writer_handler_interface\",\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udp_default_writer_config\",\n    srcs = [\"udp_default_writer_config.cc\"],\n    hdrs = [\"udp_default_writer_config.h\"],\n    deps = [\n        \":udp_packet_writer_handler_lib\",\n        \"//include/envoy/network:udp_packet_writer_config_interface\",\n        \"//include/envoy/registry\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"proxy_protocol_filter_state_lib\",\n    srcs = [\"proxy_protocol_filter_state.cc\"],\n    hdrs = [\"proxy_protocol_filter_state.h\"],\n    deps = [\n        \"//include/envoy/network:proxy_protocol_options_lib\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/common:macros\",\n    ],\n)\n"
  },
  {
    "path": "source/common/network/addr_family_aware_socket_option_impl.cc",
    "content": "#include \"common/network/addr_family_aware_socket_option_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nnamespace {\n\nSocketOptionImplOptRef getOptionForSocket(const Socket& socket, SocketOptionImpl& ipv4_option,\n                                          SocketOptionImpl& ipv6_option) {\n  auto version = socket.ipVersion();\n  if (!version.has_value()) {\n    return absl::nullopt;\n  }\n\n  // If the FD is v4, we can only try the IPv4 variant.\n  if (*version == Network::Address::IpVersion::v4) {\n    return {ipv4_option};\n  }\n  // If the FD is v6, we first try the IPv6 variant if the platform supports it and fallback to the\n  // IPv4 variant otherwise.\n  ASSERT(*version == Network::Address::IpVersion::v6);\n  if (ipv6_option.isSupported()) {\n    return {ipv6_option};\n  }\n  return {ipv4_option};\n}\n\n} // namespace\n\nbool AddrFamilyAwareSocketOptionImpl::setOption(\n    Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const {\n  return setIpSocketOption(socket, state, ipv4_option_, ipv6_option_);\n}\n\nabsl::optional<Socket::Option::Details> AddrFamilyAwareSocketOptionImpl::getOptionDetails(\n    const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const {\n  auto option = getOptionForSocket(socket, *ipv4_option_, *ipv6_option_);\n\n  if (!option.has_value()) {\n    return absl::nullopt;\n  }\n\n  return option->get().getOptionDetails(socket, state);\n}\n\nbool AddrFamilyAwareSocketOptionImpl::setIpSocketOption(\n    Socket& socket, envoy::config::core::v3::SocketOption::SocketState state,\n    const std::unique_ptr<SocketOptionImpl>& ipv4_option,\n    const std::unique_ptr<SocketOptionImpl>& ipv6_option) {\n  auto option = getOptionForSocket(socket, *ipv4_option, *ipv6_option);\n\n  if (!option.has_value()) {\n    ENVOY_LOG(warn, \"Failed to set IP socket option on non-IP socket\");\n    return false;\n  }\n\n  return option->get().setOption(socket, state);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/addr_family_aware_socket_option_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/listen_socket.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/network/socket_option_impl.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass AddrFamilyAwareSocketOptionImpl : public Socket::Option,\n                                        Logger::Loggable<Logger::Id::connection> {\npublic:\n  AddrFamilyAwareSocketOptionImpl(envoy::config::core::v3::SocketOption::SocketState in_state,\n                                  SocketOptionName ipv4_optname, SocketOptionName ipv6_optname,\n                                  int value)\n      : ipv4_option_(std::make_unique<SocketOptionImpl>(in_state, ipv4_optname, value)),\n        ipv6_option_(std::make_unique<SocketOptionImpl>(in_state, ipv6_optname, value)) {}\n\n  // Socket::Option\n  bool setOption(Socket& socket,\n                 envoy::config::core::v3::SocketOption::SocketState state) const override;\n  // The common socket options don't require a hash key.\n  void hashKey(std::vector<uint8_t>&) const override {}\n\n  absl::optional<Details>\n  getOptionDetails(const Socket& socket,\n                   envoy::config::core::v3::SocketOption::SocketState state) const override;\n\n  /**\n   * Set a socket option that applies at both IPv4 and IPv6 socket levels. When the underlying FD\n   * is IPv6, this function will attempt to set at IPv6 unless the platform only supports the\n   * option at the IPv4 level.\n   * @param socket.\n   * @param ipv4_optname SocketOptionName for IPv4 level. Set to empty if not supported on\n   * platform.\n   * @param ipv6_optname SocketOptionName for IPv6 level. Set to empty if not supported on\n   * platform.\n   * @param optval as per setsockopt(2).\n   * @param optlen as per setsockopt(2).\n   * @return int as per setsockopt(2). ENOTSUP is returned if the option is not supported on the\n   * platform for fd after the above option level fallback semantics are taken into account or the\n   *         socket is non-IP.\n   */\n  static bool setIpSocketOption(Socket& socket,\n                                envoy::config::core::v3::SocketOption::SocketState state,\n                                const std::unique_ptr<SocketOptionImpl>& ipv4_option,\n                                const std::unique_ptr<SocketOptionImpl>& ipv6_option);\n\nprivate:\n  const std::unique_ptr<SocketOptionImpl> ipv4_option_;\n  const std::unique_ptr<SocketOptionImpl> ipv6_option_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/address_impl.cc",
    "content": "#include \"common/network/address_impl.h\"\n\n#include <array>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/socket_interface.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n\nnamespace {\n\n// Validate that IPv4 is supported on this platform, raise an exception for the\n// given address if not.\nvoid validateIpv4Supported(const std::string& address) {\n  static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET);\n  if (!supported) {\n    throw EnvoyException(\n        fmt::format(\"IPv4 addresses are not supported on this machine: {}\", address));\n  }\n}\n\n// Validate that IPv6 is supported on this platform, raise an exception for the\n// given address if not.\nvoid validateIpv6Supported(const std::string& address) {\n  static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6);\n  if (!supported) {\n    throw EnvoyException(\n        fmt::format(\"IPv6 addresses are not supported on this machine: {}\", address));\n  }\n}\n\n// Constructs a readable string with the embedded nulls in the abstract path replaced with '@'.\nstd::string friendlyNameFromAbstractPath(absl::string_view path) {\n  std::string friendly_name(path.data(), path.size());\n  std::replace(friendly_name.begin(), friendly_name.end(), '\\0', '@');\n  return friendly_name;\n}\n\nconst SocketInterface* sockInterfaceOrDefault(const SocketInterface* sock_interface) {\n  return sock_interface == nullptr ? &SocketInterfaceSingleton::get() : sock_interface;\n}\n\n} // namespace\n\nAddress::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t ss_len,\n                                                    bool v6only) {\n  RELEASE_ASSERT(ss_len == 0 || static_cast<unsigned int>(ss_len) >= sizeof(sa_family_t), \"\");\n  switch (ss.ss_family) {\n  case AF_INET: {\n    RELEASE_ASSERT(ss_len == 0 || static_cast<unsigned int>(ss_len) == sizeof(sockaddr_in), \"\");\n    const struct sockaddr_in* sin = reinterpret_cast<const struct sockaddr_in*>(&ss);\n    ASSERT(AF_INET == sin->sin_family);\n    return std::make_shared<Address::Ipv4Instance>(sin);\n  }\n  case AF_INET6: {\n    RELEASE_ASSERT(ss_len == 0 || static_cast<unsigned int>(ss_len) == sizeof(sockaddr_in6), \"\");\n    const struct sockaddr_in6* sin6 = reinterpret_cast<const struct sockaddr_in6*>(&ss);\n    ASSERT(AF_INET6 == sin6->sin6_family);\n    if (!v6only && IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {\n#if defined(__APPLE__)\n      struct sockaddr_in sin = {\n          {}, AF_INET, sin6->sin6_port, {sin6->sin6_addr.__u6_addr.__u6_addr32[3]}, {}};\n#elif defined(WIN32)\n      struct in_addr in_v4 = {};\n      in_v4.S_un.S_addr = reinterpret_cast<const uint32_t*>(sin6->sin6_addr.u.Byte)[3];\n      struct sockaddr_in sin = {AF_INET, sin6->sin6_port, in_v4, {}};\n#else\n      struct sockaddr_in sin = {AF_INET, sin6->sin6_port, {sin6->sin6_addr.s6_addr32[3]}, {}};\n#endif\n      return std::make_shared<Address::Ipv4Instance>(&sin);\n    } else {\n      return std::make_shared<Address::Ipv6Instance>(*sin6, v6only);\n    }\n  }\n  case AF_UNIX: {\n    const struct sockaddr_un* sun = reinterpret_cast<const struct sockaddr_un*>(&ss);\n    ASSERT(AF_UNIX == sun->sun_family);\n    RELEASE_ASSERT(ss_len == 0 || static_cast<unsigned int>(ss_len) >=\n                                      offsetof(struct sockaddr_un, sun_path) + 1,\n                   \"\");\n    return std::make_shared<Address::PipeInstance>(sun, ss_len);\n  }\n  default:\n    throw EnvoyException(fmt::format(\"Unexpected sockaddr family: {}\", ss.ss_family));\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nIpv4Instance::Ipv4Instance(const sockaddr_in* address, const SocketInterface* sock_interface)\n    : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) {\n  memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_));\n  ip_.ipv4_.address_ = *address;\n  ip_.friendly_address_ = sockaddrToString(*address);\n\n  // Based on benchmark testing, this reserve+append implementation runs faster than absl::StrCat.\n  fmt::format_int port(ntohs(address->sin_port));\n  friendly_name_.reserve(ip_.friendly_address_.size() + 1 + port.size());\n  friendly_name_.append(ip_.friendly_address_);\n  friendly_name_.push_back(':');\n  friendly_name_.append(port.data(), port.size());\n  validateIpv4Supported(friendly_name_);\n}\n\nIpv4Instance::Ipv4Instance(const std::string& address, const SocketInterface* sock_interface)\n    : Ipv4Instance(address, 0, sockInterfaceOrDefault(sock_interface)) {}\n\nIpv4Instance::Ipv4Instance(const std::string& address, uint32_t port,\n                           const SocketInterface* sock_interface)\n    : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) {\n  memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_));\n  ip_.ipv4_.address_.sin_family = AF_INET;\n  ip_.ipv4_.address_.sin_port = htons(port);\n  int rc = inet_pton(AF_INET, address.c_str(), &ip_.ipv4_.address_.sin_addr);\n  if (1 != rc) {\n    throw EnvoyException(fmt::format(\"invalid ipv4 address '{}'\", address));\n  }\n\n  friendly_name_ = absl::StrCat(address, \":\", port);\n  validateIpv4Supported(friendly_name_);\n  ip_.friendly_address_ = address;\n}\n\nIpv4Instance::Ipv4Instance(uint32_t port, const SocketInterface* sock_interface)\n    : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) {\n  memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_));\n  ip_.ipv4_.address_.sin_family = AF_INET;\n  ip_.ipv4_.address_.sin_port = htons(port);\n  ip_.ipv4_.address_.sin_addr.s_addr = INADDR_ANY;\n  friendly_name_ = absl::StrCat(\"0.0.0.0:\", port);\n  validateIpv4Supported(friendly_name_);\n  ip_.friendly_address_ = \"0.0.0.0\";\n}\n\nbool Ipv4Instance::operator==(const Instance& rhs) const {\n  const Ipv4Instance* rhs_casted = dynamic_cast<const Ipv4Instance*>(&rhs);\n  return (rhs_casted && (ip_.ipv4_.address() == rhs_casted->ip_.ipv4_.address()) &&\n          (ip_.port() == rhs_casted->ip_.port()));\n}\n\nstd::string Ipv4Instance::sockaddrToString(const sockaddr_in& addr) {\n  static constexpr size_t BufferSize = 16; // enough space to hold an IPv4 address in string form\n  char str[BufferSize];\n  // Write backwards from the end of the buffer for simplicity.\n  char* start = str + BufferSize;\n  uint32_t ipv4_addr = ntohl(addr.sin_addr.s_addr);\n  for (unsigned i = 4; i != 0; i--, ipv4_addr >>= 8) {\n    uint32_t octet = ipv4_addr & 0xff;\n    if (octet == 0) {\n      ASSERT(start > str);\n      *--start = '0';\n    } else {\n      do {\n        ASSERT(start > str);\n        *--start = '0' + (octet % 10);\n        octet /= 10;\n      } while (octet != 0);\n    }\n    if (i != 1) {\n      ASSERT(start > str);\n      *--start = '.';\n    }\n  }\n  return std::string(start, str + BufferSize - start);\n}\n\nabsl::uint128 Ipv6Instance::Ipv6Helper::address() const {\n  absl::uint128 result{0};\n  static_assert(sizeof(absl::uint128) == 16, \"The size of asbl::uint128 is not 16.\");\n  memcpy(static_cast<void*>(&result), static_cast<const void*>(&address_.sin6_addr.s6_addr),\n         sizeof(absl::uint128));\n  return result;\n}\n\nuint32_t Ipv6Instance::Ipv6Helper::port() const { return ntohs(address_.sin6_port); }\n\nbool Ipv6Instance::Ipv6Helper::v6only() const { return v6only_; };\n\nstd::string Ipv6Instance::Ipv6Helper::makeFriendlyAddress() const {\n  char str[INET6_ADDRSTRLEN];\n  const char* ptr = inet_ntop(AF_INET6, &address_.sin6_addr, str, INET6_ADDRSTRLEN);\n  ASSERT(str == ptr);\n  return ptr;\n}\n\nIpv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only,\n                           const SocketInterface* sock_interface)\n    : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) {\n  ip_.ipv6_.address_ = address;\n  ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress();\n  ip_.ipv6_.v6only_ = v6only;\n  friendly_name_ = fmt::format(\"[{}]:{}\", ip_.friendly_address_, ip_.port());\n  validateIpv6Supported(friendly_name_);\n}\n\nIpv6Instance::Ipv6Instance(const std::string& address, const SocketInterface* sock_interface)\n    : Ipv6Instance(address, 0, sockInterfaceOrDefault(sock_interface)) {}\n\nIpv6Instance::Ipv6Instance(const std::string& address, uint32_t port,\n                           const SocketInterface* sock_interface)\n    : InstanceBase(Type::Ip, sockInterfaceOrDefault(sock_interface)) {\n  ip_.ipv6_.address_.sin6_family = AF_INET6;\n  ip_.ipv6_.address_.sin6_port = htons(port);\n  if (!address.empty()) {\n    if (1 != inet_pton(AF_INET6, address.c_str(), &ip_.ipv6_.address_.sin6_addr)) {\n      throw EnvoyException(fmt::format(\"invalid ipv6 address '{}'\", address));\n    }\n  } else {\n    ip_.ipv6_.address_.sin6_addr = in6addr_any;\n  }\n  // Just in case address is in a non-canonical format, format from network address.\n  ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress();\n  friendly_name_ = fmt::format(\"[{}]:{}\", ip_.friendly_address_, ip_.port());\n  validateIpv6Supported(friendly_name_);\n}\n\nIpv6Instance::Ipv6Instance(uint32_t port, const SocketInterface* sock_interface)\n    : Ipv6Instance(\"\", port, sockInterfaceOrDefault(sock_interface)) {}\n\nbool Ipv6Instance::operator==(const Instance& rhs) const {\n  const auto* rhs_casted = dynamic_cast<const Ipv6Instance*>(&rhs);\n  return (rhs_casted && (ip_.ipv6_.address() == rhs_casted->ip_.ipv6_.address()) &&\n          (ip_.port() == rhs_casted->ip_.port()));\n}\n\nPipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode,\n                           const SocketInterface* sock_interface)\n    : InstanceBase(Type::Pipe, sockInterfaceOrDefault(sock_interface)) {\n  if (address->sun_path[0] == '\\0') {\n#if !defined(__linux__)\n    throw EnvoyException(\"Abstract AF_UNIX sockets are only supported on linux.\");\n#endif\n    RELEASE_ASSERT(static_cast<unsigned int>(ss_len) >= offsetof(struct sockaddr_un, sun_path) + 1,\n                   \"\");\n    pipe_.abstract_namespace_ = true;\n    pipe_.address_length_ = ss_len - offsetof(struct sockaddr_un, sun_path);\n  }\n  pipe_.address_ = *address;\n  if (pipe_.abstract_namespace_) {\n    if (mode != 0) {\n      throw EnvoyException(\"Cannot set mode for Abstract AF_UNIX sockets\");\n    }\n    // Replace all null characters with '@' in friendly_name_.\n    friendly_name_ = friendlyNameFromAbstractPath(\n        absl::string_view(pipe_.address_.sun_path, pipe_.address_length_));\n  } else {\n    friendly_name_ = address->sun_path;\n  }\n  pipe_.mode_ = mode;\n}\n\nPipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode,\n                           const SocketInterface* sock_interface)\n    : InstanceBase(Type::Pipe, sockInterfaceOrDefault(sock_interface)) {\n  if (pipe_path.size() >= sizeof(pipe_.address_.sun_path)) {\n    throw EnvoyException(\n        fmt::format(\"Path \\\"{}\\\" exceeds maximum UNIX domain socket path size of {}.\", pipe_path,\n                    sizeof(pipe_.address_.sun_path)));\n  }\n  memset(&pipe_.address_, 0, sizeof(pipe_.address_));\n  pipe_.address_.sun_family = AF_UNIX;\n  if (pipe_path[0] == '@') {\n    // This indicates an abstract namespace.\n    // In this case, null bytes in the name have no special significance, and so we copy all\n    // characters of pipe_path to sun_path, including null bytes in the name. The pathname must also\n    // be null terminated. The friendly name is the address path with embedded nulls replaced with\n    // '@' for consistency with the first character.\n#if !defined(__linux__)\n    throw EnvoyException(\"Abstract AF_UNIX sockets are only supported on linux.\");\n#endif\n    if (mode != 0) {\n      throw EnvoyException(\"Cannot set mode for Abstract AF_UNIX sockets\");\n    }\n    pipe_.abstract_namespace_ = true;\n    pipe_.address_length_ = pipe_path.size();\n    memcpy(&pipe_.address_.sun_path[0], pipe_path.data(), pipe_path.size());\n    pipe_.address_.sun_path[0] = '\\0';\n    pipe_.address_.sun_path[pipe_path.size()] = '\\0';\n    friendly_name_ = friendlyNameFromAbstractPath(\n        absl::string_view(pipe_.address_.sun_path, pipe_.address_length_));\n  } else {\n    // Throw an error if the pipe path has an embedded null character.\n    if (pipe_path.size() != strlen(pipe_path.c_str())) {\n      throw EnvoyException(\"UNIX domain socket pathname contains embedded null characters\");\n    }\n    StringUtil::strlcpy(&pipe_.address_.sun_path[0], pipe_path.c_str(),\n                        sizeof(pipe_.address_.sun_path));\n    friendly_name_ = pipe_.address_.sun_path;\n  }\n  pipe_.mode_ = mode;\n}\n\nbool PipeInstance::operator==(const Instance& rhs) const { return asString() == rhs.asString(); }\n\nEnvoyInternalInstance::EnvoyInternalInstance(const std::string& address_id,\n                                             const SocketInterface* sock_interface)\n    : InstanceBase(Type::EnvoyInternal, sockInterfaceOrDefault(sock_interface)),\n      internal_address_(address_id) {\n  friendly_name_ = absl::StrCat(\"envoy://\", address_id);\n}\n\nbool EnvoyInternalInstance::operator==(const Instance& rhs) const {\n  return rhs.type() == Type::EnvoyInternal && asString() == rhs.asString();\n}\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/address_impl.h",
    "content": "#pragma once\n\n#include <sys/types.h>\n\n#include <array>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/socket.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n\n/**\n * Convert an address in the form of the socket address struct defined by Posix, Linux, etc. into\n * a Network::Address::Instance and return a pointer to it. Raises an EnvoyException on failure.\n * @param ss a valid address with family AF_INET, AF_INET6 or AF_UNIX.\n * @param len length of the address (e.g. from accept, getsockname or getpeername). If len > 0,\n *        it is used to validate the structure contents; else if len == 0, it is ignored.\n * @param v6only disable IPv4-IPv6 mapping for IPv6 addresses?\n * @return InstanceConstSharedPtr the address.\n */\nInstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t len,\n                                           bool v6only = true);\n\n/**\n * Base class for all address types.\n */\nclass InstanceBase : public Instance {\npublic:\n  // Network::Address::Instance\n  const std::string& asString() const override { return friendly_name_; }\n  absl::string_view asStringView() const override { return friendly_name_; }\n  // Default logical name is the human-readable name.\n  const std::string& logicalName() const override { return asString(); }\n  Type type() const override { return type_; }\n\n  const SocketInterface& socketInterface() const override { return socket_interface_; }\n\nprotected:\n  InstanceBase(Type type, const SocketInterface* sock_interface)\n      : socket_interface_(*sock_interface), type_(type) {}\n\n  std::string friendly_name_;\n  const SocketInterface& socket_interface_;\n\nprivate:\n  const Type type_;\n};\n\n/**\n * Implementation of an IPv4 address.\n */\nclass Ipv4Instance : public InstanceBase {\npublic:\n  /**\n   * Construct from an existing unix IPv4 socket address (IP v4 address and port).\n   */\n  explicit Ipv4Instance(const sockaddr_in* address,\n                        const SocketInterface* sock_interface = nullptr);\n\n  /**\n   * Construct from a string IPv4 address such as \"1.2.3.4\". Port will be unset/0.\n   */\n  explicit Ipv4Instance(const std::string& address,\n                        const SocketInterface* sock_interface = nullptr);\n\n  /**\n   * Construct from a string IPv4 address such as \"1.2.3.4\" as well as a port.\n   */\n  Ipv4Instance(const std::string& address, uint32_t port,\n               const SocketInterface* sock_interface = nullptr);\n\n  /**\n   * Construct from a port. The IPv4 address will be set to \"any\" and is suitable for binding\n   * a port to any available address.\n   */\n  explicit Ipv4Instance(uint32_t port, const SocketInterface* sock_interface = nullptr);\n\n  // Network::Address::Instance\n  bool operator==(const Instance& rhs) const override;\n  const Ip* ip() const override { return &ip_; }\n  const Pipe* pipe() const override { return nullptr; }\n  const EnvoyInternalAddress* envoyInternalAddress() const override { return nullptr; }\n  const sockaddr* sockAddr() const override {\n    return reinterpret_cast<const sockaddr*>(&ip_.ipv4_.address_);\n  }\n  socklen_t sockAddrLen() const override { return sizeof(sockaddr_in); }\n\n  /**\n   * Convenience function to convert an IPv4 address to canonical string format.\n   * @note This works similarly to inet_ntop() but is faster.\n   * @param addr address to format.\n   * @return the address in dotted-decimal string format.\n   */\n  static std::string sockaddrToString(const sockaddr_in& addr);\n\nprivate:\n  struct Ipv4Helper : public Ipv4 {\n    uint32_t address() const override { return address_.sin_addr.s_addr; }\n\n    sockaddr_in address_;\n  };\n\n  struct IpHelper : public Ip {\n    const std::string& addressAsString() const override { return friendly_address_; }\n    bool isAnyAddress() const override { return ipv4_.address_.sin_addr.s_addr == INADDR_ANY; }\n    bool isUnicastAddress() const override {\n      return !isAnyAddress() && (ipv4_.address_.sin_addr.s_addr != INADDR_BROADCAST) &&\n             // inlined IN_MULTICAST() to avoid byte swapping\n             !((ipv4_.address_.sin_addr.s_addr & htonl(0xf0000000)) == htonl(0xe0000000));\n    }\n    const Ipv4* ipv4() const override { return &ipv4_; }\n    const Ipv6* ipv6() const override { return nullptr; }\n    uint32_t port() const override { return ntohs(ipv4_.address_.sin_port); }\n    IpVersion version() const override { return IpVersion::v4; }\n\n    Ipv4Helper ipv4_;\n    std::string friendly_address_;\n  };\n\n  IpHelper ip_;\n};\n\n/**\n * Implementation of an IPv6 address.\n */\nclass Ipv6Instance : public InstanceBase {\npublic:\n  /**\n   * Construct from an existing unix IPv6 socket address (IP v6 address and port).\n   */\n  Ipv6Instance(const sockaddr_in6& address, bool v6only = true,\n               const SocketInterface* sock_interface = nullptr);\n\n  /**\n   * Construct from a string IPv6 address such as \"12:34::5\". Port will be unset/0.\n   */\n  explicit Ipv6Instance(const std::string& address,\n                        const SocketInterface* sock_interface = nullptr);\n\n  /**\n   * Construct from a string IPv6 address such as \"12:34::5\" as well as a port.\n   */\n  Ipv6Instance(const std::string& address, uint32_t port,\n               const SocketInterface* sock_interface = nullptr);\n\n  /**\n   * Construct from a port. The IPv6 address will be set to \"any\" and is suitable for binding\n   * a port to any available address.\n   */\n  explicit Ipv6Instance(uint32_t port, const SocketInterface* sock_interface = nullptr);\n\n  // Network::Address::Instance\n  bool operator==(const Instance& rhs) const override;\n  const Ip* ip() const override { return &ip_; }\n  const Pipe* pipe() const override { return nullptr; }\n  const EnvoyInternalAddress* envoyInternalAddress() const override { return nullptr; }\n  const sockaddr* sockAddr() const override {\n    return reinterpret_cast<const sockaddr*>(&ip_.ipv6_.address_);\n  }\n  socklen_t sockAddrLen() const override { return sizeof(sockaddr_in6); }\n\nprivate:\n  struct Ipv6Helper : public Ipv6 {\n    Ipv6Helper() { memset(&address_, 0, sizeof(address_)); }\n    absl::uint128 address() const override;\n    bool v6only() const override;\n    uint32_t port() const;\n\n    std::string makeFriendlyAddress() const;\n\n    sockaddr_in6 address_;\n    // Is IPv4 compatibility (https://tools.ietf.org/html/rfc3493#page-11) disabled?\n    // Default initialized to true to preserve extant Envoy behavior where we don't explicitly set\n    // this in the constructor.\n    bool v6only_{true};\n  };\n\n  struct IpHelper : public Ip {\n    const std::string& addressAsString() const override { return friendly_address_; }\n    bool isAnyAddress() const override {\n      return 0 == memcmp(&ipv6_.address_.sin6_addr, &in6addr_any, sizeof(struct in6_addr));\n    }\n    bool isUnicastAddress() const override {\n      return !isAnyAddress() && !IN6_IS_ADDR_MULTICAST(&ipv6_.address_.sin6_addr);\n    }\n    const Ipv4* ipv4() const override { return nullptr; }\n    const Ipv6* ipv6() const override { return &ipv6_; }\n    uint32_t port() const override { return ipv6_.port(); }\n    IpVersion version() const override { return IpVersion::v6; }\n\n    Ipv6Helper ipv6_;\n    std::string friendly_address_;\n  };\n\n  IpHelper ip_;\n};\n\n/**\n * Implementation of a pipe address (unix domain socket on unix).\n */\nclass PipeInstance : public InstanceBase {\npublic:\n  /**\n   * Construct from an existing unix address.\n   */\n  explicit PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode = 0,\n                        const SocketInterface* sock_interface = nullptr);\n\n  /**\n   * Construct from a string pipe path.\n   */\n  explicit PipeInstance(const std::string& pipe_path, mode_t mode = 0,\n                        const SocketInterface* sock_interface = nullptr);\n\n  // Network::Address::Instance\n  bool operator==(const Instance& rhs) const override;\n  const Ip* ip() const override { return nullptr; }\n  const Pipe* pipe() const override { return &pipe_; }\n  const EnvoyInternalAddress* envoyInternalAddress() const override { return nullptr; }\n  const sockaddr* sockAddr() const override {\n    return reinterpret_cast<const sockaddr*>(&pipe_.address_);\n  }\n  socklen_t sockAddrLen() const override {\n    if (pipe_.abstract_namespace_) {\n      return offsetof(struct sockaddr_un, sun_path) + pipe_.address_length_;\n    }\n    return sizeof(pipe_.address_);\n  }\n\nprivate:\n  struct PipeHelper : public Pipe {\n\n    bool abstractNamespace() const override { return abstract_namespace_; }\n    mode_t mode() const override { return mode_; }\n\n    sockaddr_un address_;\n    // For abstract namespaces.\n    bool abstract_namespace_{false};\n    uint32_t address_length_{0};\n    mode_t mode_{0};\n  };\n\n  PipeHelper pipe_;\n};\n\nclass EnvoyInternalInstance : public InstanceBase {\npublic:\n  /**\n   * Construct from a string name.\n   */\n  explicit EnvoyInternalInstance(const std::string& address_id,\n                                 const SocketInterface* sock_interface = nullptr);\n\n  // Network::Address::Instance\n  bool operator==(const Instance& rhs) const override;\n  const Ip* ip() const override { return nullptr; }\n  const Pipe* pipe() const override { return nullptr; }\n  const EnvoyInternalAddress* envoyInternalAddress() const override { return &internal_address_; }\n  // TODO(lambdai): Verify all callers accepts nullptr.\n  const sockaddr* sockAddr() const override { return nullptr; }\n  socklen_t sockAddrLen() const override { return 0; }\n\nprivate:\n  struct EnvoyInternalAddressImpl : public EnvoyInternalAddress {\n    explicit EnvoyInternalAddressImpl(const std::string& address_id) : address_id_(address_id) {}\n    ~EnvoyInternalAddressImpl() override = default;\n    const std::string& addressId() const override { return address_id_; }\n    const std::string address_id_;\n  };\n  EnvoyInternalAddressImpl internal_address_;\n};\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/apple_dns_impl.cc",
    "content": "#include \"common/network/apple_dns_impl.h\"\n\n#include <dns_sd.h>\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nAppleDnsResolverImpl::AppleDnsResolverImpl(Event::Dispatcher& dispatcher)\n    : dispatcher_(dispatcher) {\n  ENVOY_LOG(debug, \"Constructing DNS resolver\");\n  initializeMainSdRef();\n}\n\nAppleDnsResolverImpl::~AppleDnsResolverImpl() {\n  ENVOY_LOG(debug, \"Destructing DNS resolver\");\n  deallocateMainSdRef();\n}\n\nvoid AppleDnsResolverImpl::deallocateMainSdRef() {\n  ENVOY_LOG(debug, \"DNSServiceRefDeallocate main sd ref\");\n  // dns_sd.h says:\n  //   If the reference's underlying socket is used in a run loop or select() call, it should\n  //   be removed BEFORE DNSServiceRefDeallocate() is called, as this function closes the\n  //   reference's socket.\n  sd_ref_event_.reset();\n  DNSServiceRefDeallocate(main_sd_ref_);\n}\n\nvoid AppleDnsResolverImpl::initializeMainSdRef() {\n  // This implementation uses a shared connection for three main reasons:\n  //    1. Efficiency of concurrent resolutions by sharing the same underlying UDS to the DNS\n  //       server.\n  //    2. An error on a connection to the DNS server is good indication that other connections,\n  //       even if not shared, would not succeed. So it is better to share one connection and\n  //       promptly cancel all outstanding queries, rather than individually wait for all\n  //       connections to error out.\n  //    3. It follows the precedent set in dns_impl with the c-ares library, for consistency of\n  //       style, performance, and expectations between the two implementations.\n  // However, using a shared connection brings some complexities detailed in the inline comments\n  // for kDNSServiceFlagsShareConnection in dns_sd.h, and copied (and edited) in this implementation\n  // where relevant.\n  auto error = DNSServiceCreateConnection(&main_sd_ref_);\n  RELEASE_ASSERT(!error, \"error in DNSServiceCreateConnection\");\n\n  auto fd = DNSServiceRefSockFD(main_sd_ref_);\n  RELEASE_ASSERT(fd != -1, \"error in DNSServiceRefSockFD\");\n  ENVOY_LOG(debug, \"DNS resolver has fd={}\", fd);\n\n  sd_ref_event_ = dispatcher_.createFileEvent(\n      fd,\n      // note: Event::FileTriggerType::Level is used here to closely resemble the c-ares\n      // implementation in dns_impl.cc.\n      [this](uint32_t events) { onEventCallback(events); }, Event::FileTriggerType::Level,\n      Event::FileReadyType::Read);\n  sd_ref_event_->setEnabled(Event::FileReadyType::Read);\n}\n\nvoid AppleDnsResolverImpl::onEventCallback(uint32_t events) {\n  ENVOY_LOG(debug, \"DNS resolver file event\");\n  ASSERT(events & Event::FileReadyType::Read);\n  DNSServiceProcessResult(main_sd_ref_);\n}\n\nActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name,\n                                              DnsLookupFamily dns_lookup_family,\n                                              ResolveCb callback) {\n  ENVOY_LOG(debug, \"DNS resolver resolve={}\", dns_name);\n  std::unique_ptr<PendingResolution> pending_resolution(\n      new PendingResolution(*this, callback, dispatcher_, main_sd_ref_, dns_name));\n\n  DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family);\n  if (error != kDNSServiceErr_NoError) {\n    ENVOY_LOG(warn, \"DNS resolver error in dnsServiceGetAddrInfo for {}\", dns_name);\n    return nullptr;\n  }\n\n  // If the query was synchronously resolved, there is no need to return the query.\n  if (pending_resolution->synchronously_completed_) {\n    return nullptr;\n  }\n\n  pending_resolution->owned_ = true;\n  return pending_resolution.release();\n}\n\nvoid AppleDnsResolverImpl::addPendingQuery(PendingResolution* query) {\n  ASSERT(queries_with_pending_cb_.count(query) == 0);\n  queries_with_pending_cb_.insert(query);\n}\n\nvoid AppleDnsResolverImpl::removePendingQuery(PendingResolution* query) {\n  auto erased = queries_with_pending_cb_.erase(query);\n  ASSERT(erased == 1);\n}\n\nvoid AppleDnsResolverImpl::flushPendingQueries(const bool with_error) {\n  ENVOY_LOG(debug, \"DNS Resolver flushing {} queries\", queries_with_pending_cb_.size());\n  for (std::set<PendingResolution*>::iterator it = queries_with_pending_cb_.begin();\n       it != queries_with_pending_cb_.end(); ++it) {\n    auto query = *it;\n    try {\n      ASSERT(query->pending_cb_);\n      query->callback_(query->pending_cb_->status_, std::move(query->pending_cb_->responses_));\n    } catch (const std::exception& e) {\n      ENVOY_LOG(warn, \"std::exception in DNSService callback: {}\", e.what());\n      throw EnvoyException(e.what());\n    } catch (...) {\n      ENVOY_LOG(warn, \"Unknown exception in DNSService callback\");\n      throw EnvoyException(\"unknown\");\n    }\n\n    if (query->owned_) {\n      ENVOY_LOG(debug, \"Resolution for {} completed (async)\", query->dns_name_);\n      delete *it;\n    } else {\n      ENVOY_LOG(debug, \"Resolution for {} completed (synchronously)\", query->dns_name_);\n      query->synchronously_completed_ = true;\n    }\n  }\n\n  // Purge the contents so no one tries to delete them again.\n  queries_with_pending_cb_.clear();\n\n  if (with_error) {\n    // The main sd ref is destroyed here because a callback with an error is good indication that\n    // the connection to the DNS server is faulty and needs to be torn down.\n    //\n    // Deallocation of the MainSdRef __has__ to happen __after__ flushing queries. Flushing queries\n    // de-allocates individual refs, so deallocating the main ref ahead would cause deallocation of\n    // invalid individual refs per dns_sd.h\n    deallocateMainSdRef();\n    initializeMainSdRef();\n  }\n}\n\nAppleDnsResolverImpl::PendingResolution::~PendingResolution() {\n  ENVOY_LOG(debug, \"Destroying PendingResolution for {}\", dns_name_);\n  DNSServiceRefDeallocate(individual_sd_ref_);\n}\n\nvoid AppleDnsResolverImpl::PendingResolution::cancel() {\n  ENVOY_LOG(debug, \"Cancelling PendingResolution for {}\", dns_name_);\n  ASSERT(owned_);\n  if (pending_cb_) {\n    /* (taken and edited from dns_sd.h)\n     * Canceling operations and kDNSServiceFlagsMoreComing\n     * Whenever you cancel any operation for which you had deferred [resolution]\n     * because of a kDNSServiceFlagsMoreComing flag, you should [flush]. This is because, after\n     * cancelling the operation, you can no longer wait for a callback *without* MoreComing set, to\n     * tell you [to flush] (the operation has been canceled, so there will be no more callbacks).\n     *\n     * [FURTHER] An implication of the collective\n     * kDNSServiceFlagsMoreComing flag for shared connections is that this\n     * guideline applies more broadly -- any time you cancel an operation on\n     * a shared connection, you should perform all deferred updates for all\n     * operations sharing that connection. This is because the MoreComing flag\n     * might have been referring to events coming for the operation you canceled,\n     * which will now not be coming because the operation has been canceled.\n     */\n    // First, get rid of the current query, because if it is canceled, its callback should not be\n    // executed during the subsequent flush.\n    parent_.removePendingQuery(this);\n    // Then, flush all other queries.\n    parent_.flushPendingQueries(false /* with_error */);\n  }\n  // Because the query is self-owned, delete now.\n  delete this;\n}\n\nvoid AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply(\n    DNSServiceFlags flags, uint32_t interface_index, DNSServiceErrorType error_code,\n    const char* hostname, const struct sockaddr* address, uint32_t ttl) {\n  ENVOY_LOG(debug,\n            \"DNS for {} resolved with: flags={}[MoreComing={}, Add={}], interface_index={}, \"\n            \"error_code={}, hostname={}\",\n            dns_name_, flags, flags & kDNSServiceFlagsMoreComing ? \"yes\" : \"no\",\n            flags & kDNSServiceFlagsAdd ? \"yes\" : \"no\", interface_index, error_code, hostname);\n  ASSERT(interface_index == 0);\n\n  // Generic error handling.\n  if (error_code != kDNSServiceErr_NoError) {\n    // TODO(junr03): consider creating stats for known error types (timeout, refused connection,\n    // etc.). Currently a bit challenging because there is no scope access wired through. Current\n    // query gets a failure status\n    if (!pending_cb_) {\n      ENVOY_LOG(warn, \"[Error path] Adding to queries pending callback\");\n      pending_cb_ = {ResolutionStatus::Failure, {}};\n      parent_.addPendingQuery(this);\n    } else {\n      ENVOY_LOG(warn, \"[Error path] Changing status for query already pending flush\");\n      pending_cb_->status_ = ResolutionStatus::Failure;\n    }\n\n    ENVOY_LOG(warn, \"[Error path] DNS Resolver flushing queries pending callback\");\n    parent_.flushPendingQueries(true /* with_error */);\n    // Note: Nothing can follow this call to flushPendingQueries due to deletion of this\n    // object upon resolution.\n    return;\n  }\n\n  // Only add this address to the list if kDNSServiceFlagsAdd is set. Callback targets are purely\n  // additive.\n  if (flags & kDNSServiceFlagsAdd) {\n    auto dns_response = buildDnsResponse(address, ttl);\n    ENVOY_LOG(debug, \"Address to add address={}, ttl={}\",\n              dns_response.address_->ip()->addressAsString(), ttl);\n\n    if (!pending_cb_) {\n      ENVOY_LOG(debug, \"Adding to queries pending callback\");\n      pending_cb_ = {ResolutionStatus::Success, {dns_response}};\n      parent_.addPendingQuery(this);\n    } else {\n      ENVOY_LOG(debug, \"New address for query already pending flush\");\n      pending_cb_->responses_.push_back(dns_response);\n    }\n  }\n\n  if (!(flags & kDNSServiceFlagsMoreComing)) {\n    /* (taken and edited from dns_sd.h)\n     * Collective kDNSServiceFlagsMoreComing flag:\n     * When [DNSServiceGetAddrInfoReply] are invoked using a shared DNSServiceRef, the\n     * kDNSServiceFlagsMoreComing flag applies collectively to *all* active\n     * operations sharing the same [main_sd_ref]. If the MoreComing flag is\n     * set it means that there are more results queued on this parent DNSServiceRef,\n     * but not necessarily more results for this particular callback function.\n     * The implication of this for client programmers is that when a callback\n     * is invoked with the MoreComing flag set, the code should update its\n     * internal data structures with the new result (as is done above when calling\n     * parent_.addPendingQuery(this))...Then, later when a callback is eventually invoked with the\n     * MoreComing flag not set, the code should update *all* [pending queries] related to that\n     * shared parent DNSServiceRef that need updating (i.e that have had DNSServiceGetAddrInfoReply\n     * called on them since the last flush), not just the [queries] related to the particular\n     * callback that happened to be the last one to be invoked.\n     */\n    ENVOY_LOG(debug, \"DNS Resolver flushing queries pending callback\");\n    parent_.flushPendingQueries(false /* with_error */);\n    // Note: Nothing can follow this call to flushPendingQueries due to deletion of this\n    // object upon resolution.\n    return;\n  }\n}\n\nDNSServiceErrorType\nAppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo(DnsLookupFamily dns_lookup_family) {\n  DNSServiceProtocol protocol;\n  switch (dns_lookup_family) {\n  case DnsLookupFamily::V4Only:\n    protocol = kDNSServiceProtocol_IPv4;\n    break;\n  case DnsLookupFamily::V6Only:\n    protocol = kDNSServiceProtocol_IPv6;\n    break;\n  case DnsLookupFamily::Auto:\n    protocol = kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6;\n    break;\n  }\n\n  // TODO: explore caching: there are caching flags in the dns_sd.h flags, allow expired answers\n  // from the cache?\n  // TODO: explore validation via DNSSEC?\n  return DNSServiceGetAddrInfo(\n      &individual_sd_ref_, kDNSServiceFlagsShareConnection | kDNSServiceFlagsTimeout, 0, protocol,\n      dns_name_.c_str(),\n      /*\n       * About Thread Safety (taken from inline documentation there):\n       * The dns_sd.h API does not presuppose any particular threading model, and consequently\n       * does no locking internally (which would require linking with a specific threading library).\n       * If the client concurrently, from multiple threads (or contexts), calls API routines using\n       * the same DNSServiceRef, it is the client's responsibility to provide mutual exclusion for\n       * that DNSServiceRef.\n       */\n\n      // Therefore, much like the c-ares implementation All calls and callbacks to the API need to\n      // happen on the thread that owns the creating dispatcher. This is the case as callbacks are\n      // driven by processing bytes in onEventCallback which run on the passed in dispatcher's event\n      // loop.\n      [](DNSServiceRef, DNSServiceFlags flags, uint32_t interface_index,\n         DNSServiceErrorType error_code, const char* hostname, const struct sockaddr* address,\n         uint32_t ttl, void* context) {\n        static_cast<PendingResolution*>(context)->onDNSServiceGetAddrInfoReply(\n            flags, interface_index, error_code, hostname, address, ttl);\n      },\n      this);\n}\n\nDnsResponse\nAppleDnsResolverImpl::PendingResolution::buildDnsResponse(const struct sockaddr* address,\n                                                          uint32_t ttl) {\n  switch (address->sa_family) {\n  case AF_INET:\n    sockaddr_in address_in;\n    memset(&address_in, 0, sizeof(address_in));\n    address_in.sin_family = AF_INET;\n    address_in.sin_port = 0;\n    address_in.sin_addr = reinterpret_cast<const sockaddr_in*>(address)->sin_addr;\n    return {std::make_shared<const Address::Ipv4Instance>(&address_in), std::chrono::seconds(ttl)};\n  case AF_INET6:\n    sockaddr_in6 address_in6;\n    memset(&address_in6, 0, sizeof(address_in6));\n    address_in6.sin6_family = AF_INET6;\n    address_in6.sin6_port = 0;\n    address_in6.sin6_addr = reinterpret_cast<const sockaddr_in6*>(address)->sin6_addr;\n    return {std::make_shared<const Address::Ipv6Instance>(address_in6), std::chrono::seconds(ttl)};\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/apple_dns_impl.h",
    "content": "#pragma once\n\n#include <dns_sd.h>\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/file_event.h\"\n#include \"envoy/network/dns.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Implementation of DnsResolver that uses Apple dns_sd.h APIs. All calls and callbacks are assumed\n * to happen on the thread that owns the creating dispatcher.\n */\nclass AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable<Logger::Id::upstream> {\npublic:\n  AppleDnsResolverImpl(Event::Dispatcher& dispatcher);\n  ~AppleDnsResolverImpl() override;\n\n  // Network::DnsResolver\n  ActiveDnsQuery* resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family,\n                          ResolveCb callback) override;\n\nprivate:\n  struct PendingResolution : public ActiveDnsQuery {\n    PendingResolution(AppleDnsResolverImpl& parent, ResolveCb callback,\n                      Event::Dispatcher& dispatcher, DNSServiceRef sd_ref,\n                      const std::string& dns_name)\n        : parent_(parent), callback_(callback), dispatcher_(dispatcher),\n          /* (taken and edited from dns_sd.h):\n           * For efficiency, clients that perform many concurrent operations may want to use a\n           * single Unix Domain Socket connection with the background daemon, instead of having a\n           * separate connection for each independent operation. To use this mode, clients first\n           * call DNSServiceCreateConnection(&SharedRef) to initialize the main DNSServiceRef.\n           * For each subsequent operation that is to share that same connection, the client copies\n           * the SharedRef, and then passes the address of that copy, setting the ShareConnection\n           * flag to tell the library that this DNSServiceRef is not a typical uninitialized\n           * DNSServiceRef; it's a copy of an existing DNSServiceRef whose connection information\n           * should be reused.\n           */\n          individual_sd_ref_(sd_ref), dns_name_(dns_name) {}\n    ~PendingResolution();\n\n    // Network::ActiveDnsQuery\n    void cancel() override;\n\n    static DnsResponse buildDnsResponse(const struct sockaddr* address, uint32_t ttl);\n    // Wrapper for the API call.\n    DNSServiceErrorType dnsServiceGetAddrInfo(DnsLookupFamily dns_lookup_family);\n    // Wrapper for the API callback.\n    void onDNSServiceGetAddrInfoReply(DNSServiceFlags flags, uint32_t interface_index,\n                                      DNSServiceErrorType error_code, const char* hostname,\n                                      const struct sockaddr* address, uint32_t ttl);\n\n    // Small wrapping struct to accumulate addresses from firings of the\n    // onDNSServiceGetAddrInfoReply callback.\n    struct FinalResponse {\n      ResolutionStatus status_;\n      std::list<DnsResponse> responses_;\n    };\n\n    AppleDnsResolverImpl& parent_;\n    // Caller supplied callback to invoke on query completion or error.\n    const ResolveCb callback_;\n    // Dispatcher to post any callback_ exceptions to.\n    Event::Dispatcher& dispatcher_;\n    DNSServiceRef individual_sd_ref_;\n    const std::string dns_name_;\n    bool synchronously_completed_{};\n    bool owned_{};\n    // DNSServiceGetAddrInfo fires one callback DNSServiceGetAddrInfoReply callback per IP address,\n    // and informs via flags if more IP addresses are incoming. Therefore, these addresses need to\n    // be accumulated before firing callback_.\n    absl::optional<FinalResponse> pending_cb_{};\n  };\n\n  void initializeMainSdRef();\n  void deallocateMainSdRef();\n  void onEventCallback(uint32_t events);\n  void addPendingQuery(PendingResolution* query);\n  void removePendingQuery(PendingResolution* query);\n  void flushPendingQueries(const bool with_error);\n\n  Event::Dispatcher& dispatcher_;\n  DNSServiceRef main_sd_ref_;\n  Event::FileEventPtr sd_ref_event_;\n  // When using a shared sd ref via DNSServiceCreateConnection, the DNSServiceGetAddrInfoReply\n  // callback with the kDNSServiceFlagsMoreComing flag might refer to addresses for various\n  // PendingResolutions. Therefore, the resolver needs to have a container of queries pending\n  // calling their own callback_s until a DNSServiceGetAddrInfoReply is called with\n  // kDNSServiceFlagsMoreComing not set or an error status is received in\n  // DNSServiceGetAddrInfoReply.\n  std::set<PendingResolution*> queries_with_pending_cb_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/application_protocol.cc",
    "content": "#include \"common/network/application_protocol.h\"\n\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nconst std::string& ApplicationProtocols::key() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.network.application_protocols\");\n}\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/application_protocol.h",
    "content": "#pragma once\n\n#include \"envoy/stream_info/filter_state.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * ALPN to set in the upstream connection. Filters can use this one to override the ALPN in TLS\n * context.\n */\nclass ApplicationProtocols : public StreamInfo::FilterState::Object {\npublic:\n  explicit ApplicationProtocols(const std::vector<std::string>& application_protocols)\n      : application_protocols_(application_protocols) {}\n  const std::vector<std::string>& value() const { return application_protocols_; }\n  static const std::string& key();\n\nprivate:\n  const std::vector<std::string> application_protocols_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/base_listener_impl.cc",
    "content": "#include \"common/network/base_listener_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/event/file_event_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_impl.h\"\n\n#include \"event2/listener.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nBaseListenerImpl::BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket)\n    : local_address_(nullptr), dispatcher_(dispatcher), socket_(std::move(socket)) {\n  const auto ip = socket_->localAddress()->ip();\n\n  // Only use the listen socket's local address for new connections if it is not the all hosts\n  // address (e.g., 0.0.0.0 for IPv4).\n  if (!(ip && ip->isAnyAddress())) {\n    local_address_ = socket_->localAddress();\n  }\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/base_listener_impl.h",
    "content": "#pragma once\n\n#include \"envoy/network/listener.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Base libevent implementation of Network::Listener.\n */\nclass BaseListenerImpl : public virtual Listener {\npublic:\n  /**\n   * @param socket the listening socket for this listener. It might be shared\n   * with other listeners if all listeners use single listen socket.\n   */\n  BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket);\n\nprotected:\n  Address::InstanceConstSharedPtr local_address_;\n  Event::DispatcherImpl& dispatcher_;\n  const SocketSharedPtr socket_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/cidr_range.cc",
    "content": "#include \"common/network/cidr_range.h\"\n\n#include <array>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n\nCidrRange::CidrRange() : length_(-1) {}\n\nCidrRange::CidrRange(InstanceConstSharedPtr address, int length)\n    : address_(std::move(address)), length_(length) {\n  // This is a private ctor, so only checking these asserts in debug builds.\n  if (address_ == nullptr) {\n    ASSERT(length_ == -1);\n  } else {\n    ASSERT(address_->type() == Type::Ip);\n    ASSERT(length_ >= 0);\n  }\n}\n\nCidrRange::CidrRange(const CidrRange& other) = default;\n\nCidrRange& CidrRange::operator=(const CidrRange& other) = default;\n\nbool CidrRange::operator==(const CidrRange& other) const {\n  // Lengths must be the same, and must be valid (i.e. not -1).\n  if (length_ != other.length_ || length_ == -1) {\n    return false;\n  }\n\n  if (address_ == nullptr || other.address_ == nullptr) {\n    return false;\n  }\n\n  if (address_->ip()->version() == IpVersion::v4) {\n    return other.address_->ip()->version() == IpVersion::v4 &&\n           address_->ip()->ipv4()->address() == other.address_->ip()->ipv4()->address();\n  } else {\n    return other.address_->ip()->version() == IpVersion::v6 &&\n           address_->ip()->ipv6()->address() == other.address_->ip()->ipv6()->address();\n  }\n}\n\nconst Ip* CidrRange::ip() const {\n  if (address_ != nullptr) {\n    return address_->ip();\n  }\n  return nullptr;\n}\n\nint CidrRange::length() const { return length_; }\n\nbool CidrRange::isInRange(const Instance& address) const {\n  if (address_ == nullptr || !isValid() || address.type() != Type::Ip ||\n      address_->ip()->version() != address.ip()->version()) {\n    return false;\n  }\n\n  // All addresses in range.\n  if (length_ == 0) {\n    return true;\n  }\n\n  switch (address.ip()->version()) {\n  case IpVersion::v4:\n    if (ntohl(address.ip()->ipv4()->address()) >> (32 - length_) ==\n        ntohl(address_->ip()->ipv4()->address()) >> (32 - length_)) {\n      return true;\n    }\n    break;\n  case IpVersion::v6:\n    if ((Utility::Ip6ntohl(address_->ip()->ipv6()->address()) >> (128 - length_)) ==\n        (Utility::Ip6ntohl(address.ip()->ipv6()->address()) >> (128 - length_))) {\n      return true;\n    }\n    break;\n  }\n  return false;\n}\n\nstd::string CidrRange::asString() const {\n  if (address_ == nullptr) {\n    return \"/-1\";\n  } else {\n    return fmt::format(\"{}/{}\", address_->ip()->addressAsString(), length_);\n  }\n}\n\n// static\nCidrRange CidrRange::create(InstanceConstSharedPtr address, int length) {\n  InstanceConstSharedPtr ptr = truncateIpAddressAndLength(std::move(address), &length);\n  return CidrRange(std::move(ptr), length);\n}\n\n// static\nCidrRange CidrRange::create(const std::string& address, int length) {\n  return create(Utility::parseInternetAddress(address), length);\n}\n\nCidrRange CidrRange::create(const envoy::config::core::v3::CidrRange& cidr) {\n  return create(Utility::parseInternetAddress(cidr.address_prefix()), cidr.prefix_len().value());\n}\n\n// static\nCidrRange CidrRange::create(const std::string& range) {\n  const auto parts = StringUtil::splitToken(range, \"/\");\n  if (parts.size() == 2) {\n    InstanceConstSharedPtr ptr = Utility::parseInternetAddress(std::string{parts[0]});\n    if (ptr->type() == Type::Ip) {\n      uint64_t length64;\n      if (absl::SimpleAtoi(parts[1], &length64)) {\n        if ((ptr->ip()->version() == IpVersion::v6 && length64 <= 128) ||\n            (ptr->ip()->version() == IpVersion::v4 && length64 <= 32)) {\n          return create(std::move(ptr), static_cast<uint32_t>(length64));\n        }\n      }\n    }\n  }\n  return CidrRange(nullptr, -1);\n}\n\n// static\nInstanceConstSharedPtr CidrRange::truncateIpAddressAndLength(InstanceConstSharedPtr address,\n                                                             int* length_io) {\n  int length = *length_io;\n  if (address == nullptr || length < 0 || address->type() != Type::Ip) {\n    *length_io = -1;\n    return nullptr;\n  }\n  switch (address->ip()->version()) {\n  case IpVersion::v4: {\n    if (length >= 32) {\n      // We're using all of the bits, so don't need to create a new address instance.\n      *length_io = 32;\n      return address;\n    } else if (length == 0) {\n      // Create an Ipv4Instance with only a port, which will thus have the any address.\n      return std::make_shared<Ipv4Instance>(uint32_t(0));\n    }\n    // Need to mask out the unused bits, and create an Ipv4Instance with this address.\n    uint32_t ip4 = ntohl(address->ip()->ipv4()->address());\n    ip4 &= ~0U << (32 - length);\n    sockaddr_in sa4;\n    sa4.sin_family = AF_INET;\n    sa4.sin_port = htons(0);\n    sa4.sin_addr.s_addr = htonl(ip4);\n    return std::make_shared<Ipv4Instance>(&sa4);\n  }\n\n  case IpVersion::v6: {\n    if (length >= 128) {\n      // We're using all of the bits, so don't need to create a new address instance.\n      *length_io = 128;\n      return address;\n    } else if (length == 0) {\n      // Create an Ipv6Instance with only a port, which will thus have the any address.\n      return std::make_shared<Ipv6Instance>(uint32_t(0));\n    }\n    sockaddr_in6 sa6;\n    sa6.sin6_family = AF_INET6;\n    sa6.sin6_port = htons(0);\n\n    // The maximum number stored in absl::uint128 has every bit set to 1.\n    absl::uint128 mask = absl::Uint128Max();\n    // Shifting the value to the left sets all bits between 128-length and 128 to zero.\n    mask <<= (128 - length);\n    // This will mask out the unused bits of the address.\n    absl::uint128 ip6 = Utility::Ip6ntohl(address->ip()->ipv6()->address()) & mask;\n\n    absl::uint128 ip6_htonl = Utility::Ip6htonl(ip6);\n    static_assert(sizeof(absl::uint128) == 16, \"The size of asbl::uint128 is not 16.\");\n    memcpy(&sa6.sin6_addr.s6_addr, &ip6_htonl, sizeof(absl::uint128));\n    return std::make_shared<Ipv6Instance>(sa6);\n  }\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nIpList::IpList(const Protobuf::RepeatedPtrField<envoy::config::core::v3::CidrRange>& cidrs) {\n  for (const envoy::config::core::v3::CidrRange& entry : cidrs) {\n    CidrRange list_entry = CidrRange::create(entry);\n    if (list_entry.isValid()) {\n      ip_list_.push_back(list_entry);\n    } else {\n      throw EnvoyException(\n          fmt::format(\"invalid ip/mask combo '{}/{}' (format is <ip>/<# mask bits>)\",\n                      entry.address_prefix(), entry.prefix_len().value()));\n    }\n  }\n}\n\nbool IpList::contains(const Instance& address) const {\n  for (const CidrRange& entry : ip_list_) {\n    if (entry.isInRange(address)) {\n      return true;\n    }\n  }\n  return false;\n}\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/cidr_range.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n\n/**\n * A \"Classless Inter-Domain Routing\" range of internet addresses, aka a CIDR range, consisting\n * of an Ip address and a count of leading bits included in the mask. Other than those leading\n * bits, all of the other bits of the Ip address are zero. For more info, see RFC1519 or\n * https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing.\n */\nclass CidrRange {\npublic:\n  /**\n   * Constructs an uninitialized range: length == -1, and there is no associated address.\n   */\n  CidrRange();\n\n  /**\n   * Copies an existing CidrRange.\n   */\n  CidrRange(const CidrRange& other);\n\n  /**\n   * Overwrites this with other.\n   */\n  CidrRange& operator=(const CidrRange& other);\n\n  /**\n   * @return true if the ranges are identical.\n   */\n  bool operator==(const CidrRange& other) const;\n\n  /**\n   * @return Ip address data IFF length >= 0, otherwise nullptr.\n   */\n  const Ip* ip() const;\n\n  /**\n   * TODO(jamessynge) Consider making this absl::optional<int> length, or modifying the create()\n   * methods below to return absl::optional<CidrRange> (the latter is probably better).\n   * @return the number of bits of the address that are included in the mask. -1 if uninitialized\n   *         or invalid, else in the range 0 to 32 for IPv4, and 0 to 128 for IPv6.\n   */\n  int length() const;\n\n  /**\n   * @return true if the address argument is in the range of this object, false if not, including\n             if the range is uninitialized or if the argument is not of the same IpVersion.\n   */\n  bool isInRange(const Instance& address) const;\n\n  /**\n   * @return a human readable string for the range. This string will be in the following format:\n   *         - For IPv4 ranges: \"1.2.3.4/32\" or \"10.240.0.0/16\"\n   *         - For IPv6 ranges: \"1234:5678::f/128\" or \"1234:5678::/64\"\n   */\n  std::string asString() const;\n\n  /**\n   * @return true if this instance is valid; address != nullptr && length is appropriate for the\n   *         IP version (these are checked during construction, and reduced down to a check of\n   *         the length).\n   */\n  bool isValid() const { return length_ >= 0; }\n\n  /**\n   * TODO(ccaraman): Update CidrRange::create to support only constructing valid ranges.\n   * @return a CidrRange instance with the specified address and length, modified so that the only\n   *         bits that might be non-zero are in the high-order length bits, and so that length is\n   *         in the appropriate range (0 to 32 for IPv4, 0 to 128 for IPv6). If the address or\n   *         length is invalid, then the range will be invalid (i.e. length == -1).\n   */\n  static CidrRange create(InstanceConstSharedPtr address, int length);\n  static CidrRange create(const std::string& address, int length);\n\n  /**\n   * Constructs an CidrRange from a string with this format (same as returned\n   * by CidrRange::asString above):\n   *      <address>/<length>    e.g. \"10.240.0.0/16\" or \"1234:5678::/64\"\n   * TODO(ccaraman): Update CidrRange::create to support only constructing valid ranges.\n   * @return a CidrRange instance with the specified address and length if parsed successfully,\n   *         else with no address and a length of -1.\n   */\n  static CidrRange create(const std::string& range);\n\n  /**\n   * Constructs a CidrRange from envoy::config::core::v3::CidrRange.\n   * TODO(ccaraman): Update CidrRange::create to support only constructing valid ranges.\n   */\n  static CidrRange create(const envoy::config::core::v3::CidrRange& cidr);\n\n  /**\n   * Given an IP address and a length of high order bits to keep, returns an address\n   * where those high order bits are unmodified, and the remaining bits are all zero.\n   * length_io is reduced to be at most 32 for IPv4 address and at most 128 for IPv6\n   * addresses. If the address is invalid or the length is less than zero, then *length_io\n   * is set to -1 and nullptr is returned.\n   * @return a pointer to an address where the high order *length_io bits are unmodified\n   *         from address, and *length_io is in the range 0 to N, where N is the number of bits\n   *         in an address of the IP version (i.e. address->ip()->version()).\n   */\n  static InstanceConstSharedPtr truncateIpAddressAndLength(InstanceConstSharedPtr address,\n                                                           int* length_io);\n\nprivate:\n  CidrRange(InstanceConstSharedPtr address, int length);\n\n  InstanceConstSharedPtr address_;\n  int length_;\n};\n\n/**\n * Class for keeping a list of CidrRanges, and then determining whether an\n * IP address is in the CidrRange list.\n */\nclass IpList {\npublic:\n  explicit IpList(const Protobuf::RepeatedPtrField<envoy::config::core::v3::CidrRange>& cidrs);\n  IpList() = default;\n\n  bool contains(const Instance& address) const;\n  bool empty() const { return ip_list_.empty(); }\n\nprivate:\n  std::vector<CidrRange> ip_list_;\n};\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/connection_balancer_impl.cc",
    "content": "#include \"common/network/connection_balancer_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nvoid ExactConnectionBalancerImpl::registerHandler(BalancedConnectionHandler& handler) {\n  absl::MutexLock lock(&lock_);\n  handlers_.push_back(&handler);\n}\n\nvoid ExactConnectionBalancerImpl::unregisterHandler(BalancedConnectionHandler& handler) {\n  absl::MutexLock lock(&lock_);\n  // This could be made more efficient in various ways, but the number of listeners is generally\n  // small and this is a rare operation so we can start with this and optimize later if this\n  // becomes a perf bottleneck.\n  handlers_.erase(std::find(handlers_.begin(), handlers_.end(), &handler));\n}\n\nBalancedConnectionHandler&\nExactConnectionBalancerImpl::pickTargetHandler(BalancedConnectionHandler&) {\n  BalancedConnectionHandler* min_connection_handler = nullptr;\n  {\n    absl::MutexLock lock(&lock_);\n    for (BalancedConnectionHandler* handler : handlers_) {\n      if (min_connection_handler == nullptr ||\n          handler->numConnections() < min_connection_handler->numConnections()) {\n        min_connection_handler = handler;\n      }\n    }\n\n    min_connection_handler->incNumConnections();\n  }\n\n  return *min_connection_handler;\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/connection_balancer_impl.h",
    "content": "#pragma once\n\n#include \"envoy/network/connection_balancer.h\"\n\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Implementation of connection balancer that does exact balancing. This means that a lock is held\n * during balancing so that connection counts are nearly exactly balanced between handlers. This\n * is \"nearly\" exact in the sense that a connection might close in parallel thus making the counts\n * incorrect, but this should be rectified on the next accept. This balancer sacrifices accept\n * throughput for accuracy and should be used when there are a small number of connections that\n * rarely cycle (e.g., service mesh gRPC egress).\n */\nclass ExactConnectionBalancerImpl : public ConnectionBalancer {\npublic:\n  // ConnectionBalancer\n  void registerHandler(BalancedConnectionHandler& handler) override;\n  void unregisterHandler(BalancedConnectionHandler& handler) override;\n  BalancedConnectionHandler& pickTargetHandler(BalancedConnectionHandler& current_handler) override;\n\nprivate:\n  absl::Mutex lock_;\n  std::vector<BalancedConnectionHandler*> handlers_ ABSL_GUARDED_BY(lock_);\n};\n\n/**\n * A NOP connection balancer implementation that always continues execution after incrementing\n * the handler's connection count.\n */\nclass NopConnectionBalancerImpl : public ConnectionBalancer {\npublic:\n  // ConnectionBalancer\n  void registerHandler(BalancedConnectionHandler&) override {}\n  void unregisterHandler(BalancedConnectionHandler&) override {}\n  BalancedConnectionHandler&\n  pickTargetHandler(BalancedConnectionHandler& current_handler) override {\n    // In the NOP case just increment the connection count and return the current handler.\n    current_handler.incNumConnections();\n    return current_handler;\n  }\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/connection_impl.cc",
    "content": "#include \"common/network/connection_impl.h\"\n\n#include <atomic>\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/socket.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nvoid ConnectionImplUtility::updateBufferStats(uint64_t delta, uint64_t new_total,\n                                              uint64_t& previous_total, Stats::Counter& stat_total,\n                                              Stats::Gauge& stat_current) {\n  if (delta) {\n    stat_total.add(delta);\n  }\n\n  if (new_total != previous_total) {\n    if (new_total > previous_total) {\n      stat_current.add(new_total - previous_total);\n    } else {\n      stat_current.sub(previous_total - new_total);\n    }\n\n    previous_total = new_total;\n  }\n}\n\nstd::atomic<uint64_t> ConnectionImpl::next_global_id_;\n\nConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket,\n                               TransportSocketPtr&& transport_socket,\n                               StreamInfo::StreamInfo& stream_info, bool connected)\n    : ConnectionImplBase(dispatcher, next_global_id_++),\n      transport_socket_(std::move(transport_socket)), socket_(std::move(socket)),\n      stream_info_(stream_info), filter_manager_(*this),\n      read_buffer_([this]() -> void { this->onReadBufferLowWatermark(); },\n                   [this]() -> void { this->onReadBufferHighWatermark(); },\n                   []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }),\n      write_buffer_(dispatcher.getWatermarkFactory().create(\n          [this]() -> void { this->onWriteBufferLowWatermark(); },\n          [this]() -> void { this->onWriteBufferHighWatermark(); },\n          []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })),\n      write_buffer_above_high_watermark_(false), detect_early_close_(true),\n      enable_half_close_(false), read_end_stream_raised_(false), read_end_stream_(false),\n      write_end_stream_(false), current_write_end_stream_(false), dispatch_buffered_data_(false) {\n\n  if (!connected) {\n    connecting_ = true;\n  }\n\n  Event::FileTriggerType trigger = Event::PlatformDefaultTriggerType;\n\n  // We never ask for both early close and read at the same time. If we are reading, we want to\n  // consume all available data.\n  file_event_ = socket_->ioHandle().createFileEvent(\n      dispatcher_, [this](uint32_t events) -> void { onFileEvent(events); }, trigger,\n      Event::FileReadyType::Read | Event::FileReadyType::Write);\n\n  transport_socket_->setTransportSocketCallbacks(*this);\n}\n\nConnectionImpl::~ConnectionImpl() {\n  ASSERT(!ioHandle().isOpen() && delayed_close_timer_ == nullptr,\n         \"ConnectionImpl was unexpectedly torn down without being closed.\");\n\n  // In general we assume that owning code has called close() previously to the destructor being\n  // run. This generally must be done so that callbacks run in the correct context (vs. deferred\n  // deletion). Hence the assert above. However, call close() here just to be completely sure that\n  // the fd is closed and make it more likely that we crash from a bad close callback.\n  close(ConnectionCloseType::NoFlush);\n}\n\nvoid ConnectionImpl::addWriteFilter(WriteFilterSharedPtr filter) {\n  filter_manager_.addWriteFilter(filter);\n}\n\nvoid ConnectionImpl::addFilter(FilterSharedPtr filter) { filter_manager_.addFilter(filter); }\n\nvoid ConnectionImpl::addReadFilter(ReadFilterSharedPtr filter) {\n  filter_manager_.addReadFilter(filter);\n}\n\nbool ConnectionImpl::initializeReadFilters() { return filter_manager_.initializeReadFilters(); }\n\nvoid ConnectionImpl::close(ConnectionCloseType type) {\n  if (!ioHandle().isOpen()) {\n    return;\n  }\n\n  uint64_t data_to_write = write_buffer_->length();\n  ENVOY_CONN_LOG(debug, \"closing data_to_write={} type={}\", *this, data_to_write, enumToInt(type));\n  const bool delayed_close_timeout_set = delayed_close_timeout_.count() > 0;\n  if (data_to_write == 0 || type == ConnectionCloseType::NoFlush ||\n      !transport_socket_->canFlushClose()) {\n    if (data_to_write > 0) {\n      // We aren't going to wait to flush, but try to write as much as we can if there is pending\n      // data.\n      transport_socket_->doWrite(*write_buffer_, true);\n    }\n\n    if (type == ConnectionCloseType::FlushWriteAndDelay && delayed_close_timeout_set) {\n      // The socket is being closed and either there is no more data to write or the data can not be\n      // flushed (!transport_socket_->canFlushClose()). Since a delayed close has been requested,\n      // start the delayed close timer if it hasn't been done already by a previous close().\n      // NOTE: Even though the delayed_close_state_ is being set to CloseAfterFlushAndWait, since\n      // a write event is not being registered for the socket, this logic is simply setting the\n      // timer and waiting for it to trigger to close the socket.\n      if (!inDelayedClose()) {\n        initializeDelayedCloseTimer();\n        delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait;\n        // Monitor for the peer closing the connection.\n        file_event_->setEnabled(enable_half_close_ ? 0 : Event::FileReadyType::Closed);\n      }\n    } else {\n      closeConnectionImmediately();\n    }\n  } else {\n    ASSERT(type == ConnectionCloseType::FlushWrite ||\n           type == ConnectionCloseType::FlushWriteAndDelay);\n\n    // If there is a pending delayed close, simply update the delayed close state.\n    //\n    // An example of this condition manifests when a downstream connection is closed early by Envoy,\n    // such as when a route can't be matched:\n    //   In ConnectionManagerImpl::onData()\n    //     1) Via codec_->dispatch(), a local reply with a 404 is sent to the client\n    //       a) ConnectionManagerImpl::doEndStream() issues the first connection close() via\n    //          ConnectionManagerImpl::checkForDeferredClose()\n    //     2) A second close is issued by a subsequent call to\n    //        ConnectionManagerImpl::checkForDeferredClose() prior to returning from onData()\n    if (inDelayedClose()) {\n      // Validate that a delayed close timer is already enabled unless it was disabled via\n      // configuration.\n      ASSERT(!delayed_close_timeout_set || delayed_close_timer_ != nullptr);\n      if (type == ConnectionCloseType::FlushWrite || !delayed_close_timeout_set) {\n        delayed_close_state_ = DelayedCloseState::CloseAfterFlush;\n      } else {\n        delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait;\n      }\n      return;\n    }\n\n    // NOTE: At this point, it's already been validated that the connection is not already in\n    // delayed close processing and therefore the timer has not yet been created.\n    if (delayed_close_timeout_set) {\n      initializeDelayedCloseTimer();\n      delayed_close_state_ = (type == ConnectionCloseType::FlushWrite)\n                                 ? DelayedCloseState::CloseAfterFlush\n                                 : DelayedCloseState::CloseAfterFlushAndWait;\n    } else {\n      delayed_close_state_ = DelayedCloseState::CloseAfterFlush;\n    }\n\n    file_event_->setEnabled(Event::FileReadyType::Write |\n                            (enable_half_close_ ? 0 : Event::FileReadyType::Closed));\n  }\n}\n\nConnection::State ConnectionImpl::state() const {\n  if (!ioHandle().isOpen()) {\n    return State::Closed;\n  } else if (inDelayedClose()) {\n    return State::Closing;\n  } else {\n    return State::Open;\n  }\n}\n\nvoid ConnectionImpl::closeConnectionImmediately() { closeSocket(ConnectionEvent::LocalClose); }\n\nbool ConnectionImpl::consumerWantsToRead() {\n  return read_disable_count_ == 0 ||\n         (read_disable_count_ == 1 && read_buffer_.highWatermarkTriggered());\n}\n\nvoid ConnectionImpl::closeSocket(ConnectionEvent close_type) {\n  if (!ConnectionImpl::ioHandle().isOpen()) {\n    return;\n  }\n\n  // No need for a delayed close (if pending) now that the socket is being closed.\n  if (delayed_close_timer_) {\n    delayed_close_timer_->disableTimer();\n    delayed_close_timer_ = nullptr;\n  }\n\n  ENVOY_CONN_LOG(debug, \"closing socket: {}\", *this, static_cast<uint32_t>(close_type));\n  transport_socket_->closeSocket(close_type);\n\n  // Drain input and output buffers.\n  updateReadBufferStats(0, 0);\n  updateWriteBufferStats(0, 0);\n\n  // As the socket closes, drain any remaining data.\n  // The data won't be written out at this point, and where there are reference\n  // counted buffer fragments, it helps avoid lifetime issues with the\n  // connection outlasting the subscriber.\n  write_buffer_->drain(write_buffer_->length());\n\n  connection_stats_.reset();\n\n  file_event_.reset();\n\n  socket_->close();\n\n  // Call the base class directly as close() is called in the destructor.\n  ConnectionImpl::raiseEvent(close_type);\n}\n\nvoid ConnectionImpl::noDelay(bool enable) {\n  // There are cases where a connection to localhost can immediately fail (e.g., if the other end\n  // does not have enough fds, reaches a backlog limit, etc.). Because we run with deferred error\n  // events, the calling code may not yet know that the connection has failed. This is one call\n  // where we go outside of libevent and hit the fd directly and this case can fail if the fd is\n  // invalid. For this call instead of plumbing through logic that will immediately indicate that a\n  // connect failed, we will just ignore the noDelay() call if the socket is invalid since error is\n  // going to be raised shortly anyway and it makes the calling code simpler.\n  if (!ioHandle().isOpen()) {\n    return;\n  }\n\n  // Don't set NODELAY for unix domain sockets\n  if (socket_->addressType() == Address::Type::Pipe) {\n    return;\n  }\n\n  // Set NODELAY\n  int new_value = enable;\n  Api::SysCallIntResult result =\n      socket_->setSocketOption(IPPROTO_TCP, TCP_NODELAY, &new_value, sizeof(new_value));\n#if defined(__APPLE__)\n  if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_INVAL) {\n    // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is\n    // enabled despite this result.\n    return;\n  }\n#elif defined(WIN32)\n  if (SOCKET_FAILURE(result.rc_) &&\n      (result.errno_ == SOCKET_ERROR_AGAIN || result.errno_ == SOCKET_ERROR_INVAL)) {\n    // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is\n    // enabled despite this result.\n    return;\n  }\n#endif\n\n  RELEASE_ASSERT(result.rc_ == 0, fmt::format(\"Failed to set TCP_NODELAY with error {}, {}\",\n                                              result.errno_, errorDetails(result.errno_)));\n}\n\nvoid ConnectionImpl::onRead(uint64_t read_buffer_size) {\n  if (inDelayedClose() || !consumerWantsToRead()) {\n    return;\n  }\n  ASSERT(ioHandle().isOpen());\n\n  if (read_buffer_size == 0 && !read_end_stream_) {\n    return;\n  }\n\n  if (read_end_stream_) {\n    // read() on a raw socket will repeatedly return 0 (EOF) once EOF has\n    // occurred, so filter out the repeats so that filters don't have\n    // to handle repeats.\n    //\n    // I don't know of any cases where this actually happens (we should stop\n    // reading the socket after EOF), but this check guards against any bugs\n    // in ConnectionImpl or strangeness in the OS events (epoll, kqueue, etc)\n    // and maintains the guarantee for filters.\n    if (read_end_stream_raised_) {\n      // No further data can be delivered after end_stream\n      ASSERT(read_buffer_size == 0);\n      return;\n    }\n    read_end_stream_raised_ = true;\n  }\n\n  filter_manager_.onRead();\n}\n\nvoid ConnectionImpl::enableHalfClose(bool enabled) {\n  // This code doesn't correctly ensure that EV_CLOSE isn't set if reading is disabled\n  // when enabling half-close. This could be fixed, but isn't needed right now, so just\n  // ASSERT that it doesn't happen.\n  ASSERT(!enabled || read_disable_count_ == 0);\n\n  enable_half_close_ = enabled;\n}\n\nvoid ConnectionImpl::readDisable(bool disable) {\n  // Calls to readEnabled on a closed socket are considered to be an error.\n  ASSERT(state() == State::Open);\n  ASSERT(file_event_ != nullptr);\n\n  ENVOY_CONN_LOG(trace, \"readDisable: disable={} disable_count={} state={} buffer_length={}\", *this,\n                 disable, read_disable_count_, static_cast<int>(state()), read_buffer_.length());\n\n  // When we disable reads, we still allow for early close notifications (the equivalent of\n  // EPOLLRDHUP for an epoll backend). For backends that support it, this allows us to apply\n  // back pressure at the kernel layer, but still get timely notification of a FIN. Note that\n  // we are not guaranteed to get notified, so even if the remote has closed, we may not know\n  // until we try to write. Further note that currently we optionally don't correctly handle half\n  // closed TCP connections in the sense that we assume that a remote FIN means the remote intends a\n  // full close.\n  if (disable) {\n    ++read_disable_count_;\n\n    if (state() != State::Open || file_event_ == nullptr) {\n      // If readDisable is called on a closed connection, do not crash.\n      return;\n    }\n    if (read_disable_count_ > 1) {\n      // The socket has already been read disabled.\n      return;\n    }\n\n    // If half-close semantics are enabled, we never want early close notifications; we\n    // always want to read all available data, even if the other side has closed.\n    if (detect_early_close_ && !enable_half_close_) {\n      file_event_->setEnabled(Event::FileReadyType::Write | Event::FileReadyType::Closed);\n    } else {\n      file_event_->setEnabled(Event::FileReadyType::Write);\n    }\n  } else {\n    ASSERT(read_disable_count_ != 0);\n    --read_disable_count_;\n    if (state() != State::Open || file_event_ == nullptr) {\n      // If readDisable is called on a closed connection, do not crash.\n      return;\n    }\n\n    if (read_disable_count_ == 0) {\n      // We never ask for both early close and read at the same time. If we are reading, we want to\n      // consume all available data.\n      file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write);\n    }\n\n    if (consumerWantsToRead() && read_buffer_.length() > 0) {\n      // If the connection has data buffered there's no guarantee there's also data in the kernel\n      // which will kick off the filter chain. Alternately if the read buffer has data the fd could\n      // be read disabled. To handle these cases, fake an event to make sure the buffered data gets\n      // processed regardless and ensure that we dispatch it via onRead.\n      dispatch_buffered_data_ = true;\n      setReadBufferReady();\n    }\n  }\n}\n\nvoid ConnectionImpl::raiseEvent(ConnectionEvent event) {\n  ConnectionImplBase::raiseConnectionEvent(event);\n  // We may have pending data in the write buffer on transport handshake\n  // completion, which may also have completed in the context of onReadReady(),\n  // where no check of the write buffer is made. Provide an opportunity to flush\n  // here. If connection write is not ready, this is harmless. We should only do\n  // this if we're still open (the above callbacks may have closed).\n  if (event == ConnectionEvent::Connected) {\n    flushWriteBuffer();\n  }\n}\n\nbool ConnectionImpl::readEnabled() const {\n  // Calls to readEnabled on a closed socket are considered to be an error.\n  ASSERT(state() == State::Open);\n  ASSERT(file_event_ != nullptr);\n  return read_disable_count_ == 0;\n}\n\nvoid ConnectionImpl::addBytesSentCallback(BytesSentCb cb) {\n  bytes_sent_callbacks_.emplace_back(cb);\n}\n\nvoid ConnectionImpl::rawWrite(Buffer::Instance& data, bool end_stream) {\n  write(data, end_stream, false);\n}\n\nvoid ConnectionImpl::write(Buffer::Instance& data, bool end_stream) {\n  write(data, end_stream, true);\n}\n\nvoid ConnectionImpl::write(Buffer::Instance& data, bool end_stream, bool through_filter_chain) {\n  ASSERT(!end_stream || enable_half_close_);\n\n  if (write_end_stream_) {\n    // It is an API violation to write more data after writing end_stream, but a duplicate\n    // end_stream with no data is harmless. This catches misuse of the API that could result in data\n    // being lost.\n    ASSERT(data.length() == 0 && end_stream);\n\n    return;\n  }\n\n  if (through_filter_chain) {\n    // NOTE: This is kind of a hack, but currently we don't support restart/continue on the write\n    //       path, so we just pass around the buffer passed to us in this function. If we ever\n    //       support buffer/restart/continue on the write path this needs to get more complicated.\n    current_write_buffer_ = &data;\n    current_write_end_stream_ = end_stream;\n    FilterStatus status = filter_manager_.onWrite();\n    current_write_buffer_ = nullptr;\n\n    if (FilterStatus::StopIteration == status) {\n      return;\n    }\n  }\n\n  write_end_stream_ = end_stream;\n  if (data.length() > 0 || end_stream) {\n    ENVOY_CONN_LOG(trace, \"writing {} bytes, end_stream {}\", *this, data.length(), end_stream);\n    // TODO(mattklein123): All data currently gets moved from the source buffer to the write buffer.\n    // This can lead to inefficient behavior if writing a bunch of small chunks. In this case, it\n    // would likely be more efficient to copy data below a certain size. VERY IMPORTANT: If this is\n    // ever changed, read the comment in SslSocket::doWrite() VERY carefully. That code assumes that\n    // we never change existing write_buffer_ chain elements between calls to SSL_write(). That code\n    // might need to change if we ever copy here.\n    write_buffer_->move(data);\n\n    // Activating a write event before the socket is connected has the side-effect of tricking\n    // doWriteReady into thinking the socket is connected. On macOS, the underlying write may fail\n    // with a connection error if a call to write(2) occurs before the connection is completed.\n    if (!connecting_) {\n      ASSERT(file_event_ != nullptr, \"ConnectionImpl file event was unexpectedly reset\");\n      file_event_->activate(Event::FileReadyType::Write);\n    }\n  }\n}\n\nvoid ConnectionImpl::setBufferLimits(uint32_t limit) {\n  read_buffer_limit_ = limit;\n\n  // Due to the fact that writes to the connection and flushing data from the connection are done\n  // asynchronously, we have the option of either setting the watermarks aggressively, and regularly\n  // enabling/disabling reads from the socket, or allowing more data, but then not triggering\n  // based on watermarks until 2x the data is buffered in the common case. Given these are all soft\n  // limits we err on the side of buffering more triggering watermark callbacks less often.\n  //\n  // Given the current implementation for straight up TCP proxying, the common case is reading\n  // |limit| bytes through the socket, passing |limit| bytes to the connection (triggering the high\n  // watermarks) and the immediately draining |limit| bytes to the socket (triggering the low\n  // watermarks). We avoid this by setting the high watermark to limit + 1 so a single read will\n  // not trigger watermarks if the socket is not blocked.\n  //\n  // If the connection class is changed to write to the buffer and flush to the socket in the same\n  // stack then instead of checking watermarks after the write and again after the flush it can\n  // check once after both operations complete. At that point it would be better to change the high\n  // watermark from |limit + 1| to |limit| as the common case (move |limit| bytes, flush |limit|\n  // bytes) would not trigger watermarks but a blocked socket (move |limit| bytes, flush 0 bytes)\n  // would result in respecting the exact buffer limit.\n  if (limit > 0) {\n    static_cast<Buffer::WatermarkBuffer*>(write_buffer_.get())->setWatermarks(limit + 1);\n    read_buffer_.setWatermarks(limit + 1);\n  }\n}\n\nvoid ConnectionImpl::onReadBufferLowWatermark() {\n  ENVOY_CONN_LOG(debug, \"onBelowReadBufferLowWatermark\", *this);\n  if (state() == State::Open) {\n    readDisable(false);\n  }\n}\n\nvoid ConnectionImpl::onReadBufferHighWatermark() {\n  ENVOY_CONN_LOG(debug, \"onAboveReadBufferHighWatermark\", *this);\n  if (state() == State::Open) {\n    readDisable(true);\n  }\n}\n\nvoid ConnectionImpl::onWriteBufferLowWatermark() {\n  ENVOY_CONN_LOG(debug, \"onBelowWriteBufferLowWatermark\", *this);\n  ASSERT(write_buffer_above_high_watermark_);\n  write_buffer_above_high_watermark_ = false;\n  for (ConnectionCallbacks* callback : callbacks_) {\n    callback->onBelowWriteBufferLowWatermark();\n  }\n}\n\nvoid ConnectionImpl::onWriteBufferHighWatermark() {\n  ENVOY_CONN_LOG(debug, \"onAboveWriteBufferHighWatermark\", *this);\n  ASSERT(!write_buffer_above_high_watermark_);\n  write_buffer_above_high_watermark_ = true;\n  for (ConnectionCallbacks* callback : callbacks_) {\n    callback->onAboveWriteBufferHighWatermark();\n  }\n}\n\nvoid ConnectionImpl::onFileEvent(uint32_t events) {\n  ENVOY_CONN_LOG(trace, \"socket event: {}\", *this, events);\n\n  if (immediate_error_event_ != ConnectionEvent::Connected) {\n    if (bind_error_) {\n      ENVOY_CONN_LOG(debug, \"raising bind error\", *this);\n      // Update stats here, rather than on bind failure, to give the caller a chance to\n      // setConnectionStats.\n      if (connection_stats_ && connection_stats_->bind_errors_) {\n        connection_stats_->bind_errors_->inc();\n      }\n    } else {\n      ENVOY_CONN_LOG(debug, \"raising immediate error\", *this);\n    }\n    closeSocket(immediate_error_event_);\n    return;\n  }\n\n  if (events & Event::FileReadyType::Closed) {\n    // We never ask for both early close and read at the same time. If we are reading, we want to\n    // consume all available data.\n    ASSERT(!(events & Event::FileReadyType::Read));\n    ENVOY_CONN_LOG(debug, \"remote early close\", *this);\n    closeSocket(ConnectionEvent::RemoteClose);\n    return;\n  }\n\n  if (events & Event::FileReadyType::Write) {\n    onWriteReady();\n  }\n\n  // It's possible for a write event callback to close the socket (which will cause fd_ to be -1).\n  // In this case ignore write event processing.\n  if (ioHandle().isOpen() && (events & Event::FileReadyType::Read)) {\n    onReadReady();\n  }\n}\n\nvoid ConnectionImpl::onReadReady() {\n  ENVOY_CONN_LOG(trace, \"read ready. dispatch_buffered_data={}\", *this, dispatch_buffered_data_);\n  const bool latched_dispatch_buffered_data = dispatch_buffered_data_;\n  dispatch_buffered_data_ = false;\n\n  ASSERT(!connecting_);\n\n  // We get here while read disabled in two ways.\n  // 1) There was a call to setReadBufferReady(), for example if a raw buffer socket ceded due to\n  //    shouldDrainReadBuffer(). In this case we defer the event until the socket is read enabled.\n  // 2) The consumer of connection data called readDisable(true), and instead of reading from the\n  //    socket we simply need to dispatch already read data.\n  if (read_disable_count_ != 0) {\n    if (latched_dispatch_buffered_data && consumerWantsToRead()) {\n      onRead(read_buffer_.length());\n    }\n    return;\n  }\n\n  IoResult result = transport_socket_->doRead(read_buffer_);\n  uint64_t new_buffer_size = read_buffer_.length();\n  updateReadBufferStats(result.bytes_processed_, new_buffer_size);\n\n  // If this connection doesn't have half-close semantics, translate end_stream into\n  // a connection close.\n  if ((!enable_half_close_ && result.end_stream_read_)) {\n    result.end_stream_read_ = false;\n    result.action_ = PostIoAction::Close;\n  }\n\n  read_end_stream_ |= result.end_stream_read_;\n  if (result.bytes_processed_ != 0 || result.end_stream_read_ ||\n      (latched_dispatch_buffered_data && read_buffer_.length() > 0)) {\n    // Skip onRead if no bytes were processed unless we explicitly want to force onRead for\n    // buffered data. For instance, skip onRead if the connection was closed without producing\n    // more data.\n    onRead(new_buffer_size);\n  }\n\n  // The read callback may have already closed the connection.\n  if (result.action_ == PostIoAction::Close || bothSidesHalfClosed()) {\n    ENVOY_CONN_LOG(debug, \"remote close\", *this);\n    closeSocket(ConnectionEvent::RemoteClose);\n  }\n}\n\nabsl::optional<Connection::UnixDomainSocketPeerCredentials>\nConnectionImpl::unixSocketPeerCredentials() const {\n  // TODO(snowp): Support non-linux platforms.\n#ifndef SO_PEERCRED\n  return absl::nullopt;\n#else\n  struct ucred ucred;\n  socklen_t ucred_size = sizeof(ucred);\n  int rc = socket_->getSocketOption(SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size).rc_;\n  if (SOCKET_FAILURE(rc)) {\n    return absl::nullopt;\n  }\n\n  return {{ucred.pid, ucred.uid, ucred.gid}};\n#endif\n}\n\nvoid ConnectionImpl::onWriteReady() {\n  ENVOY_CONN_LOG(trace, \"write ready\", *this);\n\n  if (connecting_) {\n    int error;\n    socklen_t error_size = sizeof(error);\n    RELEASE_ASSERT(socket_->getSocketOption(SOL_SOCKET, SO_ERROR, &error, &error_size).rc_ == 0,\n                   \"\");\n\n    if (error == 0) {\n      ENVOY_CONN_LOG(debug, \"connected\", *this);\n      connecting_ = false;\n      transport_socket_->onConnected();\n      // It's possible that we closed during the connect callback.\n      if (state() != State::Open) {\n        ENVOY_CONN_LOG(debug, \"close during connected callback\", *this);\n        return;\n      }\n    } else {\n      ENVOY_CONN_LOG(debug, \"delayed connection error: {}\", *this, error);\n      closeSocket(ConnectionEvent::RemoteClose);\n      return;\n    }\n  }\n\n  IoResult result = transport_socket_->doWrite(*write_buffer_, write_end_stream_);\n  ASSERT(!result.end_stream_read_); // The interface guarantees that only read operations set this.\n  uint64_t new_buffer_size = write_buffer_->length();\n  updateWriteBufferStats(result.bytes_processed_, new_buffer_size);\n\n  // NOTE: If the delayed_close_timer_ is set, it must only trigger after a delayed_close_timeout_\n  // period of inactivity from the last write event. Therefore, the timer must be reset to its\n  // original timeout value unless the socket is going to be closed as a result of the doWrite().\n\n  if (result.action_ == PostIoAction::Close) {\n    // It is possible (though unlikely) for the connection to have already been closed during the\n    // write callback. This can happen if we manage to complete the SSL handshake in the write\n    // callback, raise a connected event, and close the connection.\n    closeSocket(ConnectionEvent::RemoteClose);\n  } else if ((inDelayedClose() && new_buffer_size == 0) || bothSidesHalfClosed()) {\n    ENVOY_CONN_LOG(debug, \"write flush complete\", *this);\n    if (delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) {\n      ASSERT(delayed_close_timer_ != nullptr && delayed_close_timer_->enabled());\n      if (result.bytes_processed_ > 0) {\n        delayed_close_timer_->enableTimer(delayed_close_timeout_);\n      }\n    } else {\n      ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush);\n      closeConnectionImmediately();\n    }\n  } else {\n    ASSERT(result.action_ == PostIoAction::KeepOpen);\n    ASSERT(!delayed_close_timer_ || delayed_close_timer_->enabled());\n    if (delayed_close_timer_ != nullptr && result.bytes_processed_ > 0) {\n      delayed_close_timer_->enableTimer(delayed_close_timeout_);\n    }\n    if (result.bytes_processed_ > 0) {\n      for (BytesSentCb& cb : bytes_sent_callbacks_) {\n        cb(result.bytes_processed_);\n\n        // If a callback closes the socket, stop iterating.\n        if (!ioHandle().isOpen()) {\n          return;\n        }\n      }\n    }\n  }\n}\n\nvoid ConnectionImpl::updateReadBufferStats(uint64_t num_read, uint64_t new_size) {\n  if (!connection_stats_) {\n    return;\n  }\n\n  ConnectionImplUtility::updateBufferStats(num_read, new_size, last_read_buffer_size_,\n                                           connection_stats_->read_total_,\n                                           connection_stats_->read_current_);\n}\n\nvoid ConnectionImpl::updateWriteBufferStats(uint64_t num_written, uint64_t new_size) {\n  if (!connection_stats_) {\n    return;\n  }\n\n  ConnectionImplUtility::updateBufferStats(num_written, new_size, last_write_buffer_size_,\n                                           connection_stats_->write_total_,\n                                           connection_stats_->write_current_);\n}\n\nbool ConnectionImpl::bothSidesHalfClosed() {\n  // If the write_buffer_ is not empty, then the end_stream has not been sent to the transport yet.\n  return read_end_stream_ && write_end_stream_ && write_buffer_->length() == 0;\n}\n\nabsl::string_view ConnectionImpl::transportFailureReason() const {\n  return transport_socket_->failureReason();\n}\n\nabsl::optional<std::chrono::milliseconds> ConnectionImpl::lastRoundTripTime() const {\n  return socket_->lastRoundTripTime();\n};\n\nvoid ConnectionImpl::flushWriteBuffer() {\n  if (state() == State::Open && write_buffer_->length() > 0) {\n    onWriteReady();\n  }\n}\n\nClientConnectionImpl::ClientConnectionImpl(\n    Event::Dispatcher& dispatcher, const Address::InstanceConstSharedPtr& remote_address,\n    const Network::Address::InstanceConstSharedPtr& source_address,\n    Network::TransportSocketPtr&& transport_socket,\n    const Network::ConnectionSocket::OptionsSharedPtr& options)\n    : ConnectionImpl(dispatcher, std::make_unique<ClientSocketImpl>(remote_address, options),\n                     std::move(transport_socket), stream_info_, false),\n      stream_info_(dispatcher.timeSource()) {\n  // There are no meaningful socket options or source address semantics for\n  // non-IP sockets, so skip.\n  if (remote_address->ip() != nullptr) {\n    if (!Network::Socket::applyOptions(options, *socket_,\n                                       envoy::config::core::v3::SocketOption::STATE_PREBIND)) {\n      // Set a special error state to ensure asynchronous close to give the owner of the\n      // ConnectionImpl a chance to add callbacks and detect the \"disconnect\".\n      immediate_error_event_ = ConnectionEvent::LocalClose;\n      // Trigger a write event to close this connection out-of-band.\n      file_event_->activate(Event::FileReadyType::Write);\n      return;\n    }\n\n    const Network::Address::InstanceConstSharedPtr* source = &source_address;\n\n    if (socket_->localAddress()) {\n      source = &socket_->localAddress();\n    }\n\n    if (*source != nullptr) {\n      Api::SysCallIntResult result = socket_->bind(*source);\n      if (result.rc_ < 0) {\n        // TODO(lizan): consider add this error into transportFailureReason.\n        ENVOY_LOG_MISC(debug, \"Bind failure. Failed to bind to {}: {}\", source->get()->asString(),\n                       errorDetails(result.errno_));\n        bind_error_ = true;\n        // Set a special error state to ensure asynchronous close to give the owner of the\n        // ConnectionImpl a chance to add callbacks and detect the \"disconnect\".\n        immediate_error_event_ = ConnectionEvent::LocalClose;\n\n        // Trigger a write event to close this connection out-of-band.\n        file_event_->activate(Event::FileReadyType::Write);\n      }\n    }\n  }\n}\n\nvoid ClientConnectionImpl::connect() {\n  ENVOY_CONN_LOG(debug, \"connecting to {}\", *this, socket_->remoteAddress()->asString());\n  const Api::SysCallIntResult result = socket_->connect(socket_->remoteAddress());\n  if (result.rc_ == 0) {\n    // write will become ready.\n    ASSERT(connecting_);\n  } else {\n    ASSERT(SOCKET_FAILURE(result.rc_));\n#ifdef WIN32\n    // winsock2 connect returns EWOULDBLOCK if the socket is non-blocking and the connection\n    // cannot be completed immediately. We do not check for EINPROGRESS as that error is for\n    // blocking operations.\n    if (result.errno_ == SOCKET_ERROR_AGAIN) {\n#else\n    if (result.errno_ == SOCKET_ERROR_IN_PROGRESS) {\n#endif\n      ASSERT(connecting_);\n      ENVOY_CONN_LOG(debug, \"connection in progress\", *this);\n    } else {\n      immediate_error_event_ = ConnectionEvent::RemoteClose;\n      connecting_ = false;\n      ENVOY_CONN_LOG(debug, \"immediate connection error: {}\", *this, result.errno_);\n\n      // Trigger a write event. This is needed on macOS and seems harmless on Linux.\n      file_event_->activate(Event::FileReadyType::Write);\n    }\n  }\n}\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/connection_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/event/libevent.h\"\n#include \"common/network/connection_impl_base.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nclass RandomPauseFilter;\nclass TestPauseFilter;\n\nnamespace Network {\n\n/**\n * Utility functions for the connection implementation.\n */\nclass ConnectionImplUtility {\npublic:\n  /**\n   * Update the buffer stats for a connection.\n   * @param delta supplies the data read/written.\n   * @param new_total supplies the final total buffer size.\n   * @param previous_total supplies the previous final total buffer size. previous_total will be\n   *        updated to new_total when the call is complete.\n   * @param stat_total supplies the counter to increment with the delta.\n   * @param stat_current supplies the gauge that should be updated with the delta of previous_total\n   *        and new_total.\n   */\n  static void updateBufferStats(uint64_t delta, uint64_t new_total, uint64_t& previous_total,\n                                Stats::Counter& stat_total, Stats::Gauge& stat_current);\n};\n\n/**\n * Implementation of Network::Connection and Network::FilterManagerConnection.\n */\nclass ConnectionImpl : public ConnectionImplBase, public TransportSocketCallbacks {\npublic:\n  ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket,\n                 TransportSocketPtr&& transport_socket, StreamInfo::StreamInfo& stream_info,\n                 bool connected);\n\n  ~ConnectionImpl() override;\n\n  // Network::FilterManager\n  void addWriteFilter(WriteFilterSharedPtr filter) override;\n  void addFilter(FilterSharedPtr filter) override;\n  void addReadFilter(ReadFilterSharedPtr filter) override;\n  bool initializeReadFilters() override;\n\n  // Network::Connection\n  void addBytesSentCallback(BytesSentCb cb) override;\n  void enableHalfClose(bool enabled) override;\n  void close(ConnectionCloseType type) final;\n  std::string nextProtocol() const override { return transport_socket_->protocol(); }\n  void noDelay(bool enable) override;\n  void readDisable(bool disable) override;\n  void detectEarlyCloseWhenReadDisabled(bool value) override { detect_early_close_ = value; }\n  bool readEnabled() const override;\n  const Address::InstanceConstSharedPtr& remoteAddress() const override {\n    return socket_->remoteAddress();\n  }\n  const Address::InstanceConstSharedPtr& directRemoteAddress() const override {\n    return socket_->directRemoteAddress();\n  }\n  const Address::InstanceConstSharedPtr& localAddress() const override {\n    return socket_->localAddress();\n  }\n  absl::optional<UnixDomainSocketPeerCredentials> unixSocketPeerCredentials() const override;\n  Ssl::ConnectionInfoConstSharedPtr ssl() const override { return transport_socket_->ssl(); }\n  State state() const override;\n  void write(Buffer::Instance& data, bool end_stream) override;\n  void setBufferLimits(uint32_t limit) override;\n  uint32_t bufferLimit() const override { return read_buffer_limit_; }\n  bool localAddressRestored() const override { return socket_->localAddressRestored(); }\n  bool aboveHighWatermark() const override { return write_buffer_above_high_watermark_; }\n  const ConnectionSocket::OptionsSharedPtr& socketOptions() const override {\n    return socket_->options();\n  }\n  absl::string_view requestedServerName() const override { return socket_->requestedServerName(); }\n  StreamInfo::StreamInfo& streamInfo() override { return stream_info_; }\n  const StreamInfo::StreamInfo& streamInfo() const override { return stream_info_; }\n  absl::string_view transportFailureReason() const override;\n  absl::optional<std::chrono::milliseconds> lastRoundTripTime() const override;\n\n  // Network::FilterManagerConnection\n  void rawWrite(Buffer::Instance& data, bool end_stream) override;\n\n  // Network::ReadBufferSource\n  StreamBuffer getReadBuffer() override { return {read_buffer_, read_end_stream_}; }\n  // Network::WriteBufferSource\n  StreamBuffer getWriteBuffer() override {\n    return {*current_write_buffer_, current_write_end_stream_};\n  }\n\n  // Network::TransportSocketCallbacks\n  IoHandle& ioHandle() final { return socket_->ioHandle(); }\n  const IoHandle& ioHandle() const override { return socket_->ioHandle(); }\n  Connection& connection() override { return *this; }\n  void raiseEvent(ConnectionEvent event) final;\n  // Should the read buffer be drained?\n  bool shouldDrainReadBuffer() override {\n    return read_buffer_limit_ > 0 && read_buffer_.length() >= read_buffer_limit_;\n  }\n  // Mark read buffer ready to read in the event loop. This is used when yielding following\n  // shouldDrainReadBuffer().\n  // TODO(htuch): While this is the basis for also yielding to other connections to provide some\n  // fair sharing of CPU resources, the underlying event loop does not make any fairness guarantees.\n  // Reconsider how to make fairness happen.\n  void setReadBufferReady() override { file_event_->activate(Event::FileReadyType::Read); }\n  void flushWriteBuffer() override;\n\n  // Obtain global next connection ID. This should only be used in tests.\n  static uint64_t nextGlobalIdForTest() { return next_global_id_; }\n\nprotected:\n  // A convenience function which returns true if\n  // 1) The read disable count is zero or\n  // 2) The read disable count is one due to the read buffer being overrun.\n  // In either case the consumer of the data would like to read from the buffer.\n  // If the read count is greater than one, or equal to one when the buffer is\n  // not overrun, then the consumer of the data has called readDisable, and does\n  // not want to read.\n  bool consumerWantsToRead();\n\n  // Network::ConnectionImplBase\n  void closeConnectionImmediately() final;\n\n  void closeSocket(ConnectionEvent close_type);\n\n  void onReadBufferLowWatermark();\n  void onReadBufferHighWatermark();\n  void onWriteBufferLowWatermark();\n  void onWriteBufferHighWatermark();\n\n  TransportSocketPtr transport_socket_;\n  ConnectionSocketPtr socket_;\n  StreamInfo::StreamInfo& stream_info_;\n  FilterManagerImpl filter_manager_;\n\n  // Ensure that if the consumer of the data from this connection isn't\n  // consuming, that the connection eventually stops reading from the wire.\n  Buffer::WatermarkBuffer read_buffer_;\n  // This must be a WatermarkBuffer, but as it is created by a factory the ConnectionImpl only has\n  // a generic pointer.\n  // It MUST be defined after the filter_manager_ as some filters may have callbacks that\n  // write_buffer_ invokes during its clean up.\n  Buffer::InstancePtr write_buffer_;\n  uint32_t read_buffer_limit_ = 0;\n  bool connecting_{false};\n  ConnectionEvent immediate_error_event_{ConnectionEvent::Connected};\n  bool bind_error_{false};\n  Event::FileEventPtr file_event_;\n\nprivate:\n  friend class Envoy::RandomPauseFilter;\n  friend class Envoy::TestPauseFilter;\n\n  void onFileEvent(uint32_t events);\n  void onRead(uint64_t read_buffer_size);\n  void onReadReady();\n  void onWriteReady();\n  void updateReadBufferStats(uint64_t num_read, uint64_t new_size);\n  void updateWriteBufferStats(uint64_t num_written, uint64_t new_size);\n\n  // Write data to the connection bypassing filter chain (optionally).\n  void write(Buffer::Instance& data, bool end_stream, bool through_filter_chain);\n\n  // Returns true iff end of stream has been both written and read.\n  bool bothSidesHalfClosed();\n\n  static std::atomic<uint64_t> next_global_id_;\n\n  std::list<BytesSentCb> bytes_sent_callbacks_;\n  // Tracks the number of times reads have been disabled. If N different components call\n  // readDisabled(true) this allows the connection to only resume reads when readDisabled(false)\n  // has been called N times.\n  uint64_t last_read_buffer_size_{};\n  uint64_t last_write_buffer_size_{};\n  Buffer::Instance* current_write_buffer_{};\n  uint32_t read_disable_count_{0};\n  bool write_buffer_above_high_watermark_ : 1;\n  bool detect_early_close_ : 1;\n  bool enable_half_close_ : 1;\n  bool read_end_stream_raised_ : 1;\n  bool read_end_stream_ : 1;\n  bool write_end_stream_ : 1;\n  bool current_write_end_stream_ : 1;\n  bool dispatch_buffered_data_ : 1;\n};\n\n/**\n * libevent implementation of Network::ClientConnection.\n */\nclass ClientConnectionImpl : public ConnectionImpl, virtual public ClientConnection {\npublic:\n  ClientConnectionImpl(Event::Dispatcher& dispatcher,\n                       const Address::InstanceConstSharedPtr& remote_address,\n                       const Address::InstanceConstSharedPtr& source_address,\n                       Network::TransportSocketPtr&& transport_socket,\n                       const Network::ConnectionSocket::OptionsSharedPtr& options);\n\n  // Network::ClientConnection\n  void connect() override;\n\nprivate:\n  StreamInfo::StreamInfoImpl stream_info_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/connection_impl_base.cc",
    "content": "#include \"common/network/connection_impl_base.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nvoid ConnectionImplBase::addIdToHashKey(std::vector<uint8_t>& hash_key, uint64_t connection_id) {\n  // Pack the connection_id into sizeof(connection_id) uint8_t entries in the hash_key vector.\n  hash_key.reserve(hash_key.size() + sizeof(connection_id));\n  for (unsigned i = 0; i < sizeof(connection_id); ++i) {\n    hash_key.push_back(0xFF & (connection_id >> (8 * i)));\n  }\n}\n\nConnectionImplBase::ConnectionImplBase(Event::Dispatcher& dispatcher, uint64_t id)\n    : dispatcher_(dispatcher), id_(id) {}\n\nvoid ConnectionImplBase::addConnectionCallbacks(ConnectionCallbacks& cb) {\n  callbacks_.push_back(&cb);\n}\n\nvoid ConnectionImplBase::hashKey(std::vector<uint8_t>& hash) const { addIdToHashKey(hash, id()); }\n\nvoid ConnectionImplBase::setConnectionStats(const ConnectionStats& stats) {\n  ASSERT(!connection_stats_,\n         \"Two network filters are attempting to set connection stats. This indicates an issue \"\n         \"with the configured filter chain.\");\n  connection_stats_ = std::make_unique<ConnectionStats>(stats);\n}\n\nvoid ConnectionImplBase::setDelayedCloseTimeout(std::chrono::milliseconds timeout) {\n  // Validate that this is only called prior to issuing a close() or closeSocket().\n  ASSERT(delayed_close_timer_ == nullptr && state() == State::Open);\n  delayed_close_timeout_ = timeout;\n}\n\nvoid ConnectionImplBase::initializeDelayedCloseTimer() {\n  const auto timeout = delayed_close_timeout_.count();\n  ASSERT(delayed_close_timer_ == nullptr && timeout > 0);\n  delayed_close_timer_ = dispatcher_.createTimer([this]() -> void { onDelayedCloseTimeout(); });\n  ENVOY_CONN_LOG(debug, \"setting delayed close timer with timeout {} ms\", *this, timeout);\n  delayed_close_timer_->enableTimer(delayed_close_timeout_);\n}\n\nvoid ConnectionImplBase::raiseConnectionEvent(ConnectionEvent event) {\n  for (ConnectionCallbacks* callback : callbacks_) {\n    // TODO(mattklein123): If we close while raising a connected event we should not raise further\n    // connected events.\n    callback->onEvent(event);\n  }\n}\n\nvoid ConnectionImplBase::onDelayedCloseTimeout() {\n  delayed_close_timer_.reset();\n  ENVOY_CONN_LOG(debug, \"triggered delayed close\", *this);\n  if (connection_stats_ != nullptr && connection_stats_->delayed_close_timeouts_ != nullptr) {\n    connection_stats_->delayed_close_timeouts_->inc();\n  }\n  closeConnectionImmediately();\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/connection_impl_base.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/network/filter_manager_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass ConnectionImplBase : public FilterManagerConnection,\n                           protected Logger::Loggable<Logger::Id::connection> {\npublic:\n  /**\n   * Add a connection id to a hash key\n   * @param hash_key the current hash key -- the function will only append to this vector\n   * @param connection_id the 64-bit connection id\n   */\n  static void addIdToHashKey(std::vector<uint8_t>& hash_key, uint64_t connection_id);\n\n  ConnectionImplBase(Event::Dispatcher& dispatcher, uint64_t id);\n\n  // Network::Connection\n  void addConnectionCallbacks(ConnectionCallbacks& cb) override;\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n  uint64_t id() const override { return id_; }\n  void hashKey(std::vector<uint8_t>& hash) const override;\n  void setConnectionStats(const ConnectionStats& stats) override;\n  void setDelayedCloseTimeout(std::chrono::milliseconds timeout) override;\n\nprotected:\n  void initializeDelayedCloseTimer();\n\n  bool inDelayedClose() const { return delayed_close_state_ != DelayedCloseState::None; }\n\n  void raiseConnectionEvent(ConnectionEvent event);\n\n  virtual void closeConnectionImmediately() PURE;\n\n  // States associated with delayed closing of the connection (i.e., when the underlying socket is\n  // not immediately close()d as a result of a ConnectionImpl::close()).\n  enum class DelayedCloseState {\n    None,\n    // The socket will be closed immediately after the buffer is flushed _or_ if a period of\n    // inactivity after the last write event greater than or equal to delayed_close_timeout_ has\n    // elapsed.\n    CloseAfterFlush,\n    // The socket will be closed after a grace period of delayed_close_timeout_ has elapsed after\n    // the socket is flushed _or_ if a period of inactivity after the last write event greater than\n    // or equal to delayed_close_timeout_ has elapsed.\n    CloseAfterFlushAndWait\n  };\n  DelayedCloseState delayed_close_state_{DelayedCloseState::None};\n\n  Event::TimerPtr delayed_close_timer_;\n  std::chrono::milliseconds delayed_close_timeout_{0};\n  Event::Dispatcher& dispatcher_;\n  const uint64_t id_;\n  std::list<ConnectionCallbacks*> callbacks_;\n  std::unique_ptr<ConnectionStats> connection_stats_;\n\nprivate:\n  // Callback issued when a delayed close timeout triggers.\n  void onDelayedCloseTimeout();\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/dns_impl.cc",
    "content": "#include \"common/network/dns_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"ares.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nDnsResolverImpl::DnsResolverImpl(\n    Event::Dispatcher& dispatcher,\n    const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n    const bool use_tcp_for_dns_lookups)\n    : dispatcher_(dispatcher),\n      timer_(dispatcher.createTimer([this] { onEventCallback(ARES_SOCKET_BAD, 0); })),\n      use_tcp_for_dns_lookups_(use_tcp_for_dns_lookups) {\n\n  AresOptions options = defaultAresOptions();\n  initializeChannel(&options.options_, options.optmask_);\n\n  if (!resolvers.empty()) {\n    std::vector<std::string> resolver_addrs;\n    resolver_addrs.reserve(resolvers.size());\n    for (const auto& resolver : resolvers) {\n      // This should be an IP address (i.e. not a pipe).\n      if (resolver->ip() == nullptr) {\n        ares_destroy(channel_);\n        throw EnvoyException(\n            fmt::format(\"DNS resolver '{}' is not an IP address\", resolver->asString()));\n      }\n      // Note that the ip()->port() may be zero if the port is not fully specified by the\n      // Address::Instance.\n      // resolver->asString() is avoided as that format may be modified by custom\n      // Address::Instance implementations in ways that make the <port> not a simple\n      // integer. See https://github.com/envoyproxy/envoy/pull/3366.\n      resolver_addrs.push_back(fmt::format(resolver->ip()->ipv6() ? \"[{}]:{}\" : \"{}:{}\",\n                                           resolver->ip()->addressAsString(),\n                                           resolver->ip()->port()));\n    }\n    const std::string resolvers_csv = absl::StrJoin(resolver_addrs, \",\");\n    int result = ares_set_servers_ports_csv(channel_, resolvers_csv.c_str());\n    RELEASE_ASSERT(result == ARES_SUCCESS, \"\");\n  }\n}\n\nDnsResolverImpl::~DnsResolverImpl() {\n  timer_->disableTimer();\n  ares_destroy(channel_);\n}\n\nDnsResolverImpl::AresOptions DnsResolverImpl::defaultAresOptions() {\n  AresOptions options{};\n\n  if (use_tcp_for_dns_lookups_) {\n    options.optmask_ |= ARES_OPT_FLAGS;\n    options.options_.flags |= ARES_FLAG_USEVC;\n  }\n\n  return options;\n}\n\nvoid DnsResolverImpl::initializeChannel(ares_options* options, int optmask) {\n  options->sock_state_cb = [](void* arg, os_fd_t fd, int read, int write) {\n    static_cast<DnsResolverImpl*>(arg)->onAresSocketStateChange(fd, read, write);\n  };\n  options->sock_state_cb_data = this;\n  ares_init_options(&channel_, options, optmask | ARES_OPT_SOCK_STATE_CB);\n}\n\nvoid DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, int timeouts,\n                                                                   ares_addrinfo* addrinfo) {\n  // We receive ARES_EDESTRUCTION when destructing with pending queries.\n  if (status == ARES_EDESTRUCTION) {\n    ASSERT(owned_);\n    // This destruction might have been triggered by a peer PendingResolution that received a\n    // ARES_ECONNREFUSED. If the PendingResolution has not been cancelled that means that the\n    // callback_ target _should_ still be around. In that case, raise the callback_ so the target\n    // can be done with this query and initiate a new one.\n    if (!cancelled_) {\n      callback_(ResolutionStatus::Failure, {});\n    }\n    delete this;\n    return;\n  }\n  if (!fallback_if_failed_) {\n    completed_ = true;\n\n    // If c-ares returns ARES_ECONNREFUSED and there is no fallback we assume that the channel_ is\n    // broken. Mark the channel dirty so that it is destroyed and reinitialized on a subsequent call\n    // to DnsResolver::resolve(). The optimal solution would be for c-ares to reinitialize the\n    // channel, and not have Envoy track side effects.\n    // context: https://github.com/envoyproxy/envoy/issues/4543 and\n    // https://github.com/c-ares/c-ares/issues/301.\n    //\n    // The channel cannot be destroyed and reinitialized here because that leads to a c-ares\n    // segfault.\n    if (status == ARES_ECONNREFUSED) {\n      parent_.dirty_channel_ = true;\n    }\n  }\n\n  std::list<DnsResponse> address_list;\n  ResolutionStatus resolution_status;\n  if (status == ARES_SUCCESS) {\n    resolution_status = ResolutionStatus::Success;\n    if (addrinfo != nullptr && addrinfo->nodes != nullptr) {\n      if (addrinfo->nodes->ai_family == AF_INET) {\n        for (const ares_addrinfo_node* ai = addrinfo->nodes; ai != nullptr; ai = ai->ai_next) {\n          sockaddr_in address;\n          memset(&address, 0, sizeof(address));\n          address.sin_family = AF_INET;\n          address.sin_port = 0;\n          address.sin_addr = reinterpret_cast<sockaddr_in*>(ai->ai_addr)->sin_addr;\n\n          address_list.emplace_back(\n              DnsResponse(std::make_shared<const Address::Ipv4Instance>(&address),\n                          std::chrono::seconds(ai->ai_ttl)));\n        }\n      } else if (addrinfo->nodes->ai_family == AF_INET6) {\n        for (const ares_addrinfo_node* ai = addrinfo->nodes; ai != nullptr; ai = ai->ai_next) {\n          sockaddr_in6 address;\n          memset(&address, 0, sizeof(address));\n          address.sin6_family = AF_INET6;\n          address.sin6_port = 0;\n          address.sin6_addr = reinterpret_cast<sockaddr_in6*>(ai->ai_addr)->sin6_addr;\n          address_list.emplace_back(\n              DnsResponse(std::make_shared<const Address::Ipv6Instance>(address),\n                          std::chrono::seconds(ai->ai_ttl)));\n        }\n      }\n    }\n\n    if (!address_list.empty()) {\n      completed_ = true;\n    }\n\n    ASSERT(addrinfo != nullptr);\n    ares_freeaddrinfo(addrinfo);\n  } else {\n    resolution_status = ResolutionStatus::Failure;\n  }\n\n  if (timeouts > 0) {\n    ENVOY_LOG(debug, \"DNS request timed out {} times\", timeouts);\n  }\n\n  if (completed_) {\n    if (!cancelled_) {\n      try {\n        callback_(resolution_status, std::move(address_list));\n      } catch (const EnvoyException& e) {\n        ENVOY_LOG(critical, \"EnvoyException in c-ares callback: {}\", e.what());\n        dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); });\n      } catch (const std::exception& e) {\n        ENVOY_LOG(critical, \"std::exception in c-ares callback: {}\", e.what());\n        dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); });\n      } catch (...) {\n        ENVOY_LOG(critical, \"Unknown exception in c-ares callback\");\n        dispatcher_.post([] { throw EnvoyException(\"unknown\"); });\n      }\n    }\n    if (owned_) {\n      delete this;\n      return;\n    }\n  }\n\n  if (!completed_ && fallback_if_failed_) {\n    fallback_if_failed_ = false;\n    getAddrInfo(AF_INET);\n    // Note: Nothing can follow this call to getAddrInfo due to deletion of this\n    // object upon synchronous resolution.\n    return;\n  }\n}\n\nvoid DnsResolverImpl::updateAresTimer() {\n  // Update the timeout for events.\n  timeval timeout;\n  timeval* timeout_result = ares_timeout(channel_, nullptr, &timeout);\n  if (timeout_result != nullptr) {\n    const auto ms =\n        std::chrono::milliseconds(timeout_result->tv_sec * 1000 + timeout_result->tv_usec / 1000);\n    ENVOY_LOG(trace, \"Setting DNS resolution timer for {} milliseconds\", ms.count());\n    timer_->enableTimer(ms);\n  } else {\n    timer_->disableTimer();\n  }\n}\n\nvoid DnsResolverImpl::onEventCallback(os_fd_t fd, uint32_t events) {\n  const ares_socket_t read_fd = events & Event::FileReadyType::Read ? fd : ARES_SOCKET_BAD;\n  const ares_socket_t write_fd = events & Event::FileReadyType::Write ? fd : ARES_SOCKET_BAD;\n  ares_process_fd(channel_, read_fd, write_fd);\n  updateAresTimer();\n}\n\nvoid DnsResolverImpl::onAresSocketStateChange(os_fd_t fd, int read, int write) {\n  updateAresTimer();\n  auto it = events_.find(fd);\n  // Stop tracking events for fd if no more state change events.\n  if (read == 0 && write == 0) {\n    if (it != events_.end()) {\n      events_.erase(it);\n    }\n    return;\n  }\n\n  // If we weren't tracking the fd before, create a new FileEvent.\n  if (it == events_.end()) {\n    events_[fd] = dispatcher_.createFileEvent(\n        fd, [this, fd](uint32_t events) { onEventCallback(fd, events); },\n        Event::FileTriggerType::Level, Event::FileReadyType::Read | Event::FileReadyType::Write);\n  }\n  events_[fd]->setEnabled((read ? Event::FileReadyType::Read : 0) |\n                          (write ? Event::FileReadyType::Write : 0));\n}\n\nActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name,\n                                         DnsLookupFamily dns_lookup_family, ResolveCb callback) {\n  // TODO(hennna): Add DNS caching which will allow testing the edge case of a\n  // failed initial call to getAddrInfo followed by a synchronous IPv4\n  // resolution.\n\n  // @see DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback for why this is done.\n  if (dirty_channel_) {\n    dirty_channel_ = false;\n    ares_destroy(channel_);\n\n    AresOptions options = defaultAresOptions();\n    initializeChannel(&options.options_, options.optmask_);\n  }\n  std::unique_ptr<PendingResolution> pending_resolution(\n      new PendingResolution(*this, callback, dispatcher_, channel_, dns_name));\n  if (dns_lookup_family == DnsLookupFamily::Auto) {\n    pending_resolution->fallback_if_failed_ = true;\n  }\n\n  if (dns_lookup_family == DnsLookupFamily::V4Only) {\n    pending_resolution->getAddrInfo(AF_INET);\n  } else {\n    pending_resolution->getAddrInfo(AF_INET6);\n  }\n\n  if (pending_resolution->completed_) {\n    // Resolution does not need asynchronous behavior or network events. For\n    // example, localhost lookup.\n    return nullptr;\n  } else {\n    // Enable timer to wake us up if the request times out.\n    updateAresTimer();\n\n    // The PendingResolution will self-delete when the request completes (including if cancelled or\n    // if ~DnsResolverImpl() happens via ares_destroy() and subsequent handling of ARES_EDESTRUCTION\n    // in DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback()).\n    pending_resolution->owned_ = true;\n    return pending_resolution.release();\n  }\n}\n\nvoid DnsResolverImpl::PendingResolution::getAddrInfo(int family) {\n  struct ares_addrinfo_hints hints = {};\n  hints.ai_family = family;\n\n  /**\n   * ARES_AI_NOSORT result addresses will not be sorted and no connections to resolved addresses\n   * will be attempted\n   */\n  hints.ai_flags = ARES_AI_NOSORT;\n\n  ares_getaddrinfo(\n      channel_, dns_name_.c_str(), /* service */ nullptr, &hints,\n      [](void* arg, int status, int timeouts, ares_addrinfo* addrinfo) {\n        static_cast<PendingResolution*>(arg)->onAresGetAddrInfoCallback(status, timeouts, addrinfo);\n      },\n      this);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/dns_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/file_event.h\"\n#include \"envoy/network/dns.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"ares.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass DnsResolverImplPeer;\n\n/**\n * Implementation of DnsResolver that uses c-ares. All calls and callbacks are assumed to\n * happen on the thread that owns the creating dispatcher.\n */\nclass DnsResolverImpl : public DnsResolver, protected Logger::Loggable<Logger::Id::upstream> {\npublic:\n  DnsResolverImpl(Event::Dispatcher& dispatcher,\n                  const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n                  const bool use_tcp_for_dns_lookups);\n  ~DnsResolverImpl() override;\n\n  // Network::DnsResolver\n  ActiveDnsQuery* resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family,\n                          ResolveCb callback) override;\n\nprivate:\n  friend class DnsResolverImplPeer;\n  struct PendingResolution : public ActiveDnsQuery {\n    // Network::ActiveDnsQuery\n    PendingResolution(DnsResolverImpl& parent, ResolveCb callback, Event::Dispatcher& dispatcher,\n                      ares_channel channel, const std::string& dns_name)\n        : parent_(parent), callback_(callback), dispatcher_(dispatcher), channel_(channel),\n          dns_name_(dns_name) {}\n\n    void cancel() override {\n      // c-ares only supports channel-wide cancellation, so we just allow the\n      // network events to continue but don't invoke the callback on completion.\n      cancelled_ = true;\n    }\n\n    /**\n     * ares_getaddrinfo query callback.\n     * @param status return status of call to ares_getaddrinfo.\n     * @param timeouts the number of times the request timed out.\n     * @param addrinfo structure to store address info.\n     */\n    void onAresGetAddrInfoCallback(int status, int timeouts, ares_addrinfo* addrinfo);\n    /**\n     * wrapper function of call to ares_getaddrinfo.\n     * @param family currently AF_INET and AF_INET6 are supported.\n     */\n    void getAddrInfo(int family);\n\n    DnsResolverImpl& parent_;\n    // Caller supplied callback to invoke on query completion or error.\n    const ResolveCb callback_;\n    // Dispatcher to post any callback_ exceptions to.\n    Event::Dispatcher& dispatcher_;\n    // Does the object own itself? Resource reclamation occurs via self-deleting\n    // on query completion or error.\n    bool owned_ = false;\n    // Has the query completed? Only meaningful if !owned_;\n    bool completed_ = false;\n    // Was the query cancelled via cancel()?\n    bool cancelled_ = false;\n    // If dns_lookup_family is \"fallback\", fallback to v4 address if v6\n    // resolution failed.\n    bool fallback_if_failed_ = false;\n    const ares_channel channel_;\n    const std::string dns_name_;\n  };\n\n  struct AresOptions {\n    ares_options options_;\n    int optmask_;\n  };\n\n  // Callback for events on sockets tracked in events_.\n  void onEventCallback(os_fd_t fd, uint32_t events);\n  // c-ares callback when a socket state changes, indicating that libevent\n  // should listen for read/write events.\n  void onAresSocketStateChange(os_fd_t fd, int read, int write);\n  // Initialize the channel.\n  void initializeChannel(ares_options* options, int optmask);\n  // Update timer for c-ares timeouts.\n  void updateAresTimer();\n  // Return default AresOptions.\n  AresOptions defaultAresOptions();\n\n  Event::Dispatcher& dispatcher_;\n  Event::TimerPtr timer_;\n  ares_channel channel_;\n  bool dirty_channel_{};\n  const bool use_tcp_for_dns_lookups_;\n  absl::node_hash_map<int, Event::FileEventPtr> events_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/filter_impl.h",
    "content": "#pragma once\n\n#include \"envoy/network/filter.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Implementation of Network::ReadFilter that discards read callbacks.\n */\nclass ReadFilterBaseImpl : public ReadFilter {\npublic:\n  void initializeReadFilterCallbacks(ReadFilterCallbacks&) override {}\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n};\n\n/**\n * Implementation of Network::Filter that discards read callbacks.\n */\nclass FilterBaseImpl : public Filter {\npublic:\n  void initializeReadFilterCallbacks(ReadFilterCallbacks&) override {}\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/filter_manager_impl.cc",
    "content": "#include \"common/network/filter_manager_impl.h\"\n\n#include <list>\n\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nvoid FilterManagerImpl::addWriteFilter(WriteFilterSharedPtr filter) {\n  ASSERT(connection_.state() == Connection::State::Open);\n  ActiveWriteFilterPtr new_filter(new ActiveWriteFilter{*this, filter});\n  filter->initializeWriteFilterCallbacks(*new_filter);\n  LinkedList::moveIntoList(std::move(new_filter), downstream_filters_);\n}\n\nvoid FilterManagerImpl::addFilter(FilterSharedPtr filter) {\n  addReadFilter(filter);\n  addWriteFilter(filter);\n}\n\nvoid FilterManagerImpl::addReadFilter(ReadFilterSharedPtr filter) {\n  ASSERT(connection_.state() == Connection::State::Open);\n  ActiveReadFilterPtr new_filter(new ActiveReadFilter{*this, filter});\n  filter->initializeReadFilterCallbacks(*new_filter);\n  LinkedList::moveIntoListBack(std::move(new_filter), upstream_filters_);\n}\n\nbool FilterManagerImpl::initializeReadFilters() {\n  if (upstream_filters_.empty()) {\n    return false;\n  }\n  onContinueReading(nullptr, connection_);\n  return true;\n}\n\nvoid FilterManagerImpl::onContinueReading(ActiveReadFilter* filter,\n                                          ReadBufferSource& buffer_source) {\n  // Filter could return status == FilterStatus::StopIteration immediately, close the connection and\n  // use callback to call this function.\n  if (connection_.state() != Connection::State::Open) {\n    return;\n  }\n\n  std::list<ActiveReadFilterPtr>::iterator entry;\n  if (!filter) {\n    connection_.streamInfo().addBytesReceived(buffer_source.getReadBuffer().buffer.length());\n    entry = upstream_filters_.begin();\n  } else {\n    entry = std::next(filter->entry());\n  }\n\n  for (; entry != upstream_filters_.end(); entry++) {\n    if (!(*entry)->initialized_) {\n      (*entry)->initialized_ = true;\n      FilterStatus status = (*entry)->filter_->onNewConnection();\n      if (status == FilterStatus::StopIteration || connection_.state() != Connection::State::Open) {\n        return;\n      }\n    }\n\n    StreamBuffer read_buffer = buffer_source.getReadBuffer();\n    if (read_buffer.buffer.length() > 0 || read_buffer.end_stream) {\n      FilterStatus status = (*entry)->filter_->onData(read_buffer.buffer, read_buffer.end_stream);\n      if (status == FilterStatus::StopIteration || connection_.state() != Connection::State::Open) {\n        return;\n      }\n    }\n  }\n}\n\nvoid FilterManagerImpl::onRead() {\n  ASSERT(!upstream_filters_.empty());\n  onContinueReading(nullptr, connection_);\n}\n\nFilterStatus FilterManagerImpl::onWrite() { return onWrite(nullptr, connection_); }\n\nFilterStatus FilterManagerImpl::onWrite(ActiveWriteFilter* filter,\n                                        WriteBufferSource& buffer_source) {\n  // Filter could return status == FilterStatus::StopIteration immediately, close the connection and\n  // use callback to call this function.\n  if (connection_.state() != Connection::State::Open) {\n    return FilterStatus::StopIteration;\n  }\n\n  std::list<ActiveWriteFilterPtr>::iterator entry;\n  if (!filter) {\n    entry = downstream_filters_.begin();\n  } else {\n    entry = std::next(filter->entry());\n  }\n\n  for (; entry != downstream_filters_.end(); entry++) {\n    StreamBuffer write_buffer = buffer_source.getWriteBuffer();\n    FilterStatus status = (*entry)->filter_->onWrite(write_buffer.buffer, write_buffer.end_stream);\n    if (status == FilterStatus::StopIteration || connection_.state() != Connection::State::Open) {\n      return FilterStatus::StopIteration;\n    }\n  }\n\n  // Report the final bytes written to the wire\n  connection_.streamInfo().addBytesSent(buffer_source.getWriteBuffer().buffer.length());\n  return FilterStatus::Continue;\n}\n\nvoid FilterManagerImpl::onResumeWriting(ActiveWriteFilter* filter,\n                                        WriteBufferSource& buffer_source) {\n  auto status = onWrite(filter, buffer_source);\n  if (status == FilterStatus::Continue) {\n    StreamBuffer write_buffer = buffer_source.getWriteBuffer();\n    connection_.rawWrite(write_buffer.buffer, write_buffer.end_stream);\n  }\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/filter_manager_impl.h",
    "content": "#pragma once\n\n#include <list>\n#include <memory>\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/linked_object.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nstruct StreamBuffer {\n  Buffer::Instance& buffer;\n  const bool end_stream;\n};\n\n/**\n * Interface used to obtain read buffers.\n */\nclass ReadBufferSource {\npublic:\n  virtual ~ReadBufferSource() = default;\n\n  /**\n   * Fetch the read buffer for the source.\n   */\n  virtual StreamBuffer getReadBuffer() PURE;\n};\n\n/**\n * Interface used to obtain write buffers.\n */\nclass WriteBufferSource {\npublic:\n  virtual ~WriteBufferSource() = default;\n\n  /**\n   * Fetch the write buffer for the source.\n   */\n  virtual StreamBuffer getWriteBuffer() PURE;\n};\n\n/**\n * Adapter that masquerades a given buffer instance as a ReadBufferSource.\n */\nclass FixedReadBufferSource : public ReadBufferSource {\npublic:\n  FixedReadBufferSource(Buffer::Instance& data, bool end_stream)\n      : data_(data), end_stream_(end_stream) {}\n\n  StreamBuffer getReadBuffer() override { return {data_, end_stream_}; }\n\nprivate:\n  Buffer::Instance& data_;\n  const bool end_stream_;\n};\n\n/**\n * Adapter that masquerades a given buffer instance as a WriteBufferSource.\n */\nclass FixedWriteBufferSource : public WriteBufferSource {\npublic:\n  FixedWriteBufferSource(Buffer::Instance& data, bool end_stream)\n      : data_(data), end_stream_(end_stream) {}\n\n  StreamBuffer getWriteBuffer() override { return {data_, end_stream_}; }\n\nprivate:\n  Buffer::Instance& data_;\n  const bool end_stream_;\n};\n\n/**\n * Connection enriched with methods for advanced cases, i.e. write data bypassing filter chain.\n *\n * Since FilterManager is only user of those methods for now, the class is named after it.\n */\nclass FilterManagerConnection : public virtual Connection,\n                                public ReadBufferSource,\n                                public WriteBufferSource {\npublic:\n  ~FilterManagerConnection() override = default;\n\n  /**\n   * Write data to the connection bypassing filter chain.\n   *\n   * I.e., consider a scenario where iteration over the filter chain is stopped at some point\n   * and later is resumed via a call to WriteFilterCallbacks::injectWriteDataToFilterChain().\n   *\n   * @param data supplies the data to write to the connection.\n   * @param end_stream supplies whether this is the last byte to write on the connection.\n   */\n  virtual void rawWrite(Buffer::Instance& data, bool end_stream) PURE;\n};\n\n/**\n * This is a filter manager for TCP (L4) filters. It is split out for ease of testing.\n */\nclass FilterManagerImpl {\npublic:\n  FilterManagerImpl(FilterManagerConnection& connection) : connection_(connection) {}\n\n  void addWriteFilter(WriteFilterSharedPtr filter);\n  void addFilter(FilterSharedPtr filter);\n  void addReadFilter(ReadFilterSharedPtr filter);\n  bool initializeReadFilters();\n  void onRead();\n  FilterStatus onWrite();\n\nprivate:\n  struct ActiveReadFilter : public ReadFilterCallbacks, LinkedObject<ActiveReadFilter> {\n    ActiveReadFilter(FilterManagerImpl& parent, ReadFilterSharedPtr filter)\n        : parent_(parent), filter_(filter) {}\n\n    Connection& connection() override { return parent_.connection_; }\n    void continueReading() override { parent_.onContinueReading(this, parent_.connection_); }\n    void injectReadDataToFilterChain(Buffer::Instance& data, bool end_stream) override {\n      FixedReadBufferSource buffer_source{data, end_stream};\n      parent_.onContinueReading(this, buffer_source);\n    }\n    Upstream::HostDescriptionConstSharedPtr upstreamHost() override {\n      return parent_.host_description_;\n    }\n    void upstreamHost(Upstream::HostDescriptionConstSharedPtr host) override {\n      parent_.host_description_ = host;\n    }\n\n    FilterManagerImpl& parent_;\n    ReadFilterSharedPtr filter_;\n    bool initialized_{};\n  };\n\n  using ActiveReadFilterPtr = std::unique_ptr<ActiveReadFilter>;\n\n  struct ActiveWriteFilter : public WriteFilterCallbacks, LinkedObject<ActiveWriteFilter> {\n    ActiveWriteFilter(FilterManagerImpl& parent, WriteFilterSharedPtr filter)\n        : parent_(parent), filter_(std::move(filter)) {}\n\n    Connection& connection() override { return parent_.connection_; }\n    void injectWriteDataToFilterChain(Buffer::Instance& data, bool end_stream) override {\n      FixedWriteBufferSource buffer_source{data, end_stream};\n      parent_.onResumeWriting(this, buffer_source);\n    }\n\n    FilterManagerImpl& parent_;\n    WriteFilterSharedPtr filter_;\n  };\n\n  using ActiveWriteFilterPtr = std::unique_ptr<ActiveWriteFilter>;\n\n  void onContinueReading(ActiveReadFilter* filter, ReadBufferSource& buffer_source);\n\n  FilterStatus onWrite(ActiveWriteFilter* filter, WriteBufferSource& buffer_source);\n  void onResumeWriting(ActiveWriteFilter* filter, WriteBufferSource& buffer_source);\n\n  FilterManagerConnection& connection_;\n  Upstream::HostDescriptionConstSharedPtr host_description_;\n  std::list<ActiveReadFilterPtr> upstream_filters_;\n  std::list<ActiveWriteFilterPtr> downstream_filters_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/filter_matcher.cc",
    "content": "#include \"common/network/filter_matcher.h\"\n\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/strings/str_format.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nListenerFilterMatcherPtr ListenerFilterMatcherBuilder::buildListenerFilterMatcher(\n    const envoy::config::listener::v3::ListenerFilterChainMatchPredicate& match_config) {\n  switch (match_config.rule_case()) {\n  case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kAnyMatch:\n    return std::make_unique<ListenerFilterAnyMatcher>();\n  case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kNotMatch: {\n    return std::make_unique<ListenerFilterNotMatcher>(match_config.not_match());\n  }\n  case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kAndMatch: {\n    return std::make_unique<ListenerFilterAndMatcher>(match_config.and_match().rules());\n  }\n  case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kOrMatch: {\n    return std::make_unique<ListenerFilterOrMatcher>(match_config.or_match().rules());\n  }\n  case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::\n      kDestinationPortRange: {\n    return std::make_unique<ListenerFilterDstPortMatcher>(match_config.destination_port_range());\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nListenerFilterSetLogicMatcher::ListenerFilterSetLogicMatcher(\n    absl::Span<const ::envoy::config::listener::v3::ListenerFilterChainMatchPredicate* const>\n        predicates)\n    : sub_matchers_(predicates.length()) {\n  std::transform(predicates.begin(), predicates.end(), sub_matchers_.begin(), [](const auto* pred) {\n    return ListenerFilterMatcherBuilder::buildListenerFilterMatcher(*pred);\n  });\n}\n\nbool ListenerFilterOrMatcher::matches(ListenerFilterCallbacks& cb) const {\n  return std::any_of(sub_matchers_.begin(), sub_matchers_.end(),\n                     [&cb](const auto& matcher) { return matcher->matches(cb); });\n}\n\nbool ListenerFilterAndMatcher::matches(ListenerFilterCallbacks& cb) const {\n  return std::all_of(sub_matchers_.begin(), sub_matchers_.end(),\n                     [&cb](const auto& matcher) { return matcher->matches(cb); });\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/filter_matcher.h",
    "content": "#pragma once\n\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listen_socket.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * The helper to transform ListenerFilterChainMatchPredicate message to single matcher.\n */\nclass ListenerFilterMatcherBuilder {\npublic:\n  static ListenerFilterMatcherPtr buildListenerFilterMatcher(\n      const envoy::config::listener::v3::ListenerFilterChainMatchPredicate& match_config);\n};\n\n/**\n * Any matcher (always matches).\n */\nclass ListenerFilterAnyMatcher final : public ListenerFilterMatcher {\npublic:\n  bool matches(ListenerFilterCallbacks&) const override { return true; }\n};\n\nclass ListenerFilterNotMatcher final : public ListenerFilterMatcher {\npublic:\n  ListenerFilterNotMatcher(\n      const envoy::config::listener::v3::ListenerFilterChainMatchPredicate& match_config)\n      : sub_matcher_(ListenerFilterMatcherBuilder::buildListenerFilterMatcher(match_config)) {}\n  bool matches(ListenerFilterCallbacks& cb) const override { return !sub_matcher_->matches(cb); }\n\nprivate:\n  const ListenerFilterMatcherPtr sub_matcher_;\n};\n\n/**\n * Destination port matcher.\n */\nclass ListenerFilterDstPortMatcher final : public ListenerFilterMatcher {\npublic:\n  explicit ListenerFilterDstPortMatcher(const ::envoy::type::v3::Int32Range& range)\n      : start_(range.start()), end_(range.end()) {}\n  bool matches(ListenerFilterCallbacks& cb) const override {\n    const auto& address = cb.socket().localAddress();\n    // Match on destination port (only for IP addresses).\n    if (address->type() == Address::Type::Ip) {\n      const auto port = address->ip()->port();\n      return start_ <= port && port < end_;\n    } else {\n      return true;\n    }\n  }\n\nprivate:\n  const uint32_t start_;\n  const uint32_t end_;\n};\n\n/**\n * Matcher for implementing set logic.\n */\nclass ListenerFilterSetLogicMatcher : public ListenerFilterMatcher {\npublic:\n  explicit ListenerFilterSetLogicMatcher(\n      absl::Span<const ::envoy::config::listener::v3::ListenerFilterChainMatchPredicate* const>\n          predicates);\n\nprotected:\n  absl::FixedArray<ListenerFilterMatcherPtr> sub_matchers_;\n};\n\nclass ListenerFilterAndMatcher final : public ListenerFilterSetLogicMatcher {\npublic:\n  ListenerFilterAndMatcher(\n      absl::Span<const ::envoy::config::listener::v3::ListenerFilterChainMatchPredicate* const>\n          predicates)\n      : ListenerFilterSetLogicMatcher(predicates) {}\n  bool matches(ListenerFilterCallbacks& cb) const override;\n};\n\nclass ListenerFilterOrMatcher final : public ListenerFilterSetLogicMatcher {\npublic:\n  ListenerFilterOrMatcher(\n      absl::Span<const ::envoy::config::listener::v3::ListenerFilterChainMatchPredicate* const>\n          predicates)\n      : ListenerFilterSetLogicMatcher(predicates) {}\n  bool matches(ListenerFilterCallbacks& cb) const override;\n};\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "source/common/network/hash_policy.cc",
    "content": "#include \"common/network/hash_policy.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/type/v3/hash_policy.pb.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass SourceIpHashMethod : public HashPolicyImpl::HashMethod {\npublic:\n  absl::optional<uint64_t> evaluate(const Network::Address::Instance* downstream_addr,\n                                    const Network::Address::Instance*) const override {\n    if (downstream_addr && downstream_addr->ip()) {\n      ASSERT(!downstream_addr->ip()->addressAsString().empty());\n      return HashUtil::xxHash64(downstream_addr->ip()->addressAsString());\n    }\n\n    return absl::nullopt;\n  }\n};\n\nHashPolicyImpl::HashPolicyImpl(\n    const absl::Span<const envoy::type::v3::HashPolicy* const>& hash_policies) {\n  ASSERT(hash_policies.size() == 1);\n  switch (hash_policies[0]->policy_specifier_case()) {\n  case envoy::type::v3::HashPolicy::PolicySpecifierCase::kSourceIp:\n    hash_impl_ = std::make_unique<SourceIpHashMethod>();\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nabsl::optional<uint64_t>\nHashPolicyImpl::generateHash(const Network::Address::Instance* downstream_addr,\n                             const Network::Address::Instance* upstream_addr) const {\n  return hash_impl_->evaluate(downstream_addr, upstream_addr);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/hash_policy.h",
    "content": "#pragma once\n\n#include \"envoy/network/hash_policy.h\"\n#include \"envoy/type/v3/hash_policy.pb.h\"\n\n#include \"common/common/hash.h\"\n\nnamespace Envoy {\nnamespace Network {\n/**\n * Implementation of HashPolicy that reads from the proto TCP proxy config.\n */\nclass HashPolicyImpl : public Network::HashPolicy {\npublic:\n  explicit HashPolicyImpl(const absl::Span<const envoy::type::v3::HashPolicy* const>& hash_policy);\n\n  // Network::HashPolicy\n  absl::optional<uint64_t>\n  generateHash(const Network::Address::Instance* downstream_addr,\n               const Network::Address::Instance* upstream_addr) const override;\n\n  class HashMethod {\n  public:\n    virtual ~HashMethod() = default;\n    virtual absl::optional<uint64_t>\n    evaluate(const Network::Address::Instance* downstream_addr,\n             const Network::Address::Instance* upstream_addr) const PURE;\n  };\n\n  using HashMethodPtr = std::unique_ptr<HashMethod>;\n\nprivate:\n  HashMethodPtr hash_impl_;\n};\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/io_socket_error_impl.cc",
    "content": "#include \"common/network/io_socket_error_impl.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nApi::IoError::IoErrorCode IoSocketError::getErrorCode() const {\n  switch (errno_) {\n  case SOCKET_ERROR_AGAIN:\n    ASSERT(this == IoSocketError::getIoSocketEagainInstance(),\n           \"Didn't use getIoSocketEagainInstance() to generate `Again`.\");\n    return IoErrorCode::Again;\n  case SOCKET_ERROR_NOT_SUP:\n    return IoErrorCode::NoSupport;\n  case SOCKET_ERROR_AF_NO_SUP:\n    return IoErrorCode::AddressFamilyNoSupport;\n  case SOCKET_ERROR_IN_PROGRESS:\n    return IoErrorCode::InProgress;\n  case SOCKET_ERROR_PERM:\n    return IoErrorCode::Permission;\n  case SOCKET_ERROR_MSG_SIZE:\n    return IoErrorCode::MessageTooBig;\n  case SOCKET_ERROR_INTR:\n    return IoErrorCode::Interrupt;\n  case SOCKET_ERROR_ADDR_NOT_AVAIL:\n    return IoErrorCode::AddressNotAvailable;\n  default:\n    ENVOY_LOG_MISC(debug, \"Unknown error code {} details {}\", errno_, getErrorDetails());\n    return IoErrorCode::UnknownError;\n  }\n}\n\nstd::string IoSocketError::getErrorDetails() const { return errorDetails(errno_); }\n\nIoSocketError* IoSocketError::getIoSocketEagainInstance() {\n  static auto* instance = new IoSocketError(SOCKET_ERROR_AGAIN);\n  return instance;\n}\n\nvoid IoSocketError::deleteIoError(Api::IoError* err) {\n  ASSERT(err != nullptr);\n  if (err != getIoSocketEagainInstance()) {\n    delete err;\n  }\n}\n\ninline IoSocketError* getIoSocketInvalidAddressInstance() {\n  static auto* instance = new IoSocketError(SOCKET_ERROR_NOT_SUP);\n  return instance;\n}\n\nApi::IoCallUint64Result IoSocketError::ioResultSocketInvalidAddress() {\n  return Api::IoCallUint64Result(\n      0, Api::IoErrorPtr(getIoSocketInvalidAddressInstance(), [](IoError*) {}));\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/io_socket_error_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/io_error.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass IoSocketError : public Api::IoError {\npublic:\n  explicit IoSocketError(int sys_errno) : errno_(sys_errno) {}\n\n  ~IoSocketError() override = default;\n\n  Api::IoError::IoErrorCode getErrorCode() const override;\n  std::string getErrorDetails() const override;\n\n  // IoErrorCode::Again is used frequently. Define it to be a singleton to avoid frequent memory\n  // allocation of such instance. If this is used, IoHandleCallResult has to be instantiated with\n  // deleter deleteIoError() below to avoid deallocating memory for this error.\n  static IoSocketError* getIoSocketEagainInstance();\n\n  // This error is introduced when Envoy create socket for unsupported address. It is either a bug,\n  // or this Envoy instance received config which is not yet supported. This should not be fatal\n  // error.\n  static Api::IoCallUint64Result ioResultSocketInvalidAddress();\n\n  // Deallocate memory only if the error is not Again.\n  static void deleteIoError(Api::IoError* err);\n\nprivate:\n  int errno_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/io_socket_handle_impl.cc",
    "content": "#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/event/file_event_impl.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/types/optional.h\"\n\nusing Envoy::Api::SysCallIntResult;\nusing Envoy::Api::SysCallSizeResult;\n\nnamespace Envoy {\n\nnamespace {\n/**\n * On different platforms the sockaddr struct for unix domain\n * sockets is different. We use this function to get the\n * length of the platform specific struct.\n */\nconstexpr socklen_t udsAddressLength() {\n#if defined(__APPLE__)\n  return sizeof(sockaddr);\n#elif defined(WIN32)\n  return sizeof(sockaddr_un);\n#else\n  return sizeof(sa_family_t);\n#endif\n}\n\nconstexpr int messageTypeContainsIP() {\n#ifdef IP_RECVDSTADDR\n  return IP_RECVDSTADDR;\n#else\n  return IP_PKTINFO;\n#endif\n}\n\nin_addr addressFromMessage(const cmsghdr& cmsg) {\n#ifdef IP_RECVDSTADDR\n  return *reinterpret_cast<const in_addr*>(CMSG_DATA(&cmsg));\n#else\n  auto info = reinterpret_cast<const in_pktinfo*>(CMSG_DATA(&cmsg));\n  return info->ipi_addr;\n#endif\n}\n\n} // namespace\n\nnamespace Network {\n\nIoSocketHandleImpl::~IoSocketHandleImpl() {\n  if (SOCKET_VALID(fd_)) {\n    IoSocketHandleImpl::close();\n  }\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::close() {\n  ASSERT(SOCKET_VALID(fd_));\n  const int rc = Api::OsSysCallsSingleton::get().close(fd_).rc_;\n  SET_SOCKET_INVALID(fd_);\n  return Api::IoCallUint64Result(rc, Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError));\n}\n\nbool IoSocketHandleImpl::isOpen() const { return SOCKET_VALID(fd_); }\n\nApi::IoCallUint64Result IoSocketHandleImpl::readv(uint64_t max_length, Buffer::RawSlice* slices,\n                                                  uint64_t num_slice) {\n  absl::FixedArray<iovec> iov(num_slice);\n  uint64_t num_slices_to_read = 0;\n  uint64_t num_bytes_to_read = 0;\n  for (; num_slices_to_read < num_slice && num_bytes_to_read < max_length; num_slices_to_read++) {\n    iov[num_slices_to_read].iov_base = slices[num_slices_to_read].mem_;\n    const size_t slice_length = std::min(slices[num_slices_to_read].len_,\n                                         static_cast<size_t>(max_length - num_bytes_to_read));\n    iov[num_slices_to_read].iov_len = slice_length;\n    num_bytes_to_read += slice_length;\n  }\n  ASSERT(num_bytes_to_read <= max_length);\n  return sysCallResultToIoCallResult(Api::OsSysCallsSingleton::get().readv(\n      fd_, iov.begin(), static_cast<int>(num_slices_to_read)));\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::read(Buffer::Instance& buffer, uint64_t max_length) {\n  if (max_length == 0) {\n    return Api::ioCallUint64ResultNoError();\n  }\n  constexpr uint64_t MaxSlices = 2;\n  Buffer::RawSlice slices[MaxSlices];\n  const uint64_t num_slices = buffer.reserve(max_length, slices, MaxSlices);\n  Api::IoCallUint64Result result = readv(max_length, slices, num_slices);\n  uint64_t bytes_to_commit = result.ok() ? result.rc_ : 0;\n  ASSERT(bytes_to_commit <= max_length);\n  for (uint64_t i = 0; i < num_slices; i++) {\n    slices[i].len_ = std::min(slices[i].len_, static_cast<size_t>(bytes_to_commit));\n    bytes_to_commit -= slices[i].len_;\n  }\n  buffer.commit(slices, num_slices);\n  return result;\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::writev(const Buffer::RawSlice* slices,\n                                                   uint64_t num_slice) {\n  absl::FixedArray<iovec> iov(num_slice);\n  uint64_t num_slices_to_write = 0;\n  for (uint64_t i = 0; i < num_slice; i++) {\n    if (slices[i].mem_ != nullptr && slices[i].len_ != 0) {\n      iov[num_slices_to_write].iov_base = slices[i].mem_;\n      iov[num_slices_to_write].iov_len = slices[i].len_;\n      num_slices_to_write++;\n    }\n  }\n  if (num_slices_to_write == 0) {\n    return Api::ioCallUint64ResultNoError();\n  }\n  return sysCallResultToIoCallResult(\n      Api::OsSysCallsSingleton::get().writev(fd_, iov.begin(), num_slices_to_write));\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::write(Buffer::Instance& buffer) {\n  constexpr uint64_t MaxSlices = 16;\n  Buffer::RawSliceVector slices = buffer.getRawSlices(MaxSlices);\n  Api::IoCallUint64Result result = writev(slices.begin(), slices.size());\n  if (result.ok() && result.rc_ > 0) {\n    buffer.drain(static_cast<uint64_t>(result.rc_));\n  }\n  return result;\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::sendmsg(const Buffer::RawSlice* slices,\n                                                    uint64_t num_slice, int flags,\n                                                    const Address::Ip* self_ip,\n                                                    const Address::Instance& peer_address) {\n  const auto* address_base = dynamic_cast<const Address::InstanceBase*>(&peer_address);\n  sockaddr* sock_addr = const_cast<sockaddr*>(address_base->sockAddr());\n  if (sock_addr == nullptr) {\n    // Unlikely to happen unless the wrong peer address is passed.\n    return IoSocketError::ioResultSocketInvalidAddress();\n  }\n  absl::FixedArray<iovec> iov(num_slice);\n  uint64_t num_slices_to_write = 0;\n  for (uint64_t i = 0; i < num_slice; i++) {\n    if (slices[i].mem_ != nullptr && slices[i].len_ != 0) {\n      iov[num_slices_to_write].iov_base = slices[i].mem_;\n      iov[num_slices_to_write].iov_len = slices[i].len_;\n      num_slices_to_write++;\n    }\n  }\n  if (num_slices_to_write == 0) {\n    return Api::ioCallUint64ResultNoError();\n  }\n\n  msghdr message;\n  message.msg_name = reinterpret_cast<void*>(sock_addr);\n  message.msg_namelen = address_base->sockAddrLen();\n  message.msg_iov = iov.begin();\n  message.msg_iovlen = num_slices_to_write;\n  message.msg_flags = 0;\n  auto& os_syscalls = Api::OsSysCallsSingleton::get();\n  if (self_ip == nullptr) {\n    message.msg_control = nullptr;\n    message.msg_controllen = 0;\n    const Api::SysCallSizeResult result = os_syscalls.sendmsg(fd_, &message, flags);\n    return sysCallResultToIoCallResult(result);\n  } else {\n    const size_t space_v6 = CMSG_SPACE(sizeof(in6_pktinfo));\n    const size_t space_v4 = CMSG_SPACE(sizeof(in_pktinfo));\n\n    // FreeBSD only needs in_addr size, but allocates more to unify code in two platforms.\n    const size_t cmsg_space = (self_ip->version() == Address::IpVersion::v4) ? space_v4 : space_v6;\n    // kSpaceForIp should be big enough to hold both IPv4 and IPv6 packet info.\n    absl::FixedArray<char> cbuf(cmsg_space);\n    memset(cbuf.begin(), 0, cmsg_space);\n\n    message.msg_control = cbuf.begin();\n    message.msg_controllen = cmsg_space;\n    cmsghdr* const cmsg = CMSG_FIRSTHDR(&message);\n    RELEASE_ASSERT(cmsg != nullptr, fmt::format(\"cbuf with size {} is not enough, cmsghdr size {}\",\n                                                sizeof(cbuf), sizeof(cmsghdr)));\n    if (self_ip->version() == Address::IpVersion::v4) {\n      cmsg->cmsg_level = IPPROTO_IP;\n#ifndef IP_SENDSRCADDR\n      cmsg->cmsg_len = CMSG_LEN(sizeof(in_pktinfo));\n      cmsg->cmsg_type = IP_PKTINFO;\n      auto pktinfo = reinterpret_cast<in_pktinfo*>(CMSG_DATA(cmsg));\n      pktinfo->ipi_ifindex = 0;\n#ifdef WIN32\n      pktinfo->ipi_addr.s_addr = self_ip->ipv4()->address();\n#else\n      pktinfo->ipi_spec_dst.s_addr = self_ip->ipv4()->address();\n#endif\n#else\n      cmsg->cmsg_type = IP_SENDSRCADDR;\n      cmsg->cmsg_len = CMSG_LEN(sizeof(in_addr));\n      *(reinterpret_cast<struct in_addr*>(CMSG_DATA(cmsg))).s_addr = self_ip->ipv4()->address();\n#endif\n    } else if (self_ip->version() == Address::IpVersion::v6) {\n      cmsg->cmsg_len = CMSG_LEN(sizeof(in6_pktinfo));\n      cmsg->cmsg_level = IPPROTO_IPV6;\n      cmsg->cmsg_type = IPV6_PKTINFO;\n      auto pktinfo = reinterpret_cast<in6_pktinfo*>(CMSG_DATA(cmsg));\n      pktinfo->ipi6_ifindex = 0;\n      *(reinterpret_cast<absl::uint128*>(pktinfo->ipi6_addr.s6_addr)) = self_ip->ipv6()->address();\n    }\n    const Api::SysCallSizeResult result = os_syscalls.sendmsg(fd_, &message, flags);\n    return sysCallResultToIoCallResult(result);\n  }\n}\n\nAddress::InstanceConstSharedPtr getAddressFromSockAddrOrDie(const sockaddr_storage& ss,\n                                                            socklen_t ss_len, os_fd_t fd) {\n  try {\n    // Set v6only to false so that mapped-v6 address can be normalize to v4\n    // address. Though dual stack may be disabled, it's still okay to assume the\n    // address is from a dual stack socket. This is because mapped-v6 address\n    // must come from a dual stack socket. An actual v6 address can come from\n    // both dual stack socket and v6 only socket. If |peer_addr| is an actual v6\n    // address and the socket is actually v6 only, the returned address will be\n    // regarded as a v6 address from dual stack socket. However, this address is not going to be\n    // used to create socket. Wrong knowledge of dual stack support won't hurt.\n    return Address::addressFromSockAddr(ss, ss_len, /*v6only=*/false);\n  } catch (const EnvoyException& e) {\n    PANIC(fmt::format(\"Invalid address for fd: {}, error: {}\", fd, e.what()));\n  }\n}\n\nAddress::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg,\n                                                             uint32_t self_port, os_fd_t fd) {\n  if (cmsg.cmsg_type == IPV6_PKTINFO) {\n    auto info = reinterpret_cast<const in6_pktinfo*>(CMSG_DATA(&cmsg));\n    sockaddr_storage ss;\n    auto ipv6_addr = reinterpret_cast<sockaddr_in6*>(&ss);\n    memset(ipv6_addr, 0, sizeof(sockaddr_in6));\n    ipv6_addr->sin6_family = AF_INET6;\n    ipv6_addr->sin6_addr = info->ipi6_addr;\n    ipv6_addr->sin6_port = htons(self_port);\n    return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in6), fd);\n  }\n\n  if (cmsg.cmsg_type == messageTypeContainsIP()) {\n    sockaddr_storage ss;\n    auto ipv4_addr = reinterpret_cast<sockaddr_in*>(&ss);\n    memset(ipv4_addr, 0, sizeof(sockaddr_in));\n    ipv4_addr->sin_family = AF_INET;\n    ipv4_addr->sin_addr = addressFromMessage(cmsg);\n    ipv4_addr->sin_port = htons(self_port);\n    return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in), fd);\n  }\n\n  return nullptr;\n}\n\nabsl::optional<uint32_t> maybeGetPacketsDroppedFromHeader([[maybe_unused]] const cmsghdr& cmsg) {\n#ifdef SO_RXQ_OVFL\n  if (cmsg.cmsg_type == SO_RXQ_OVFL) {\n    return *reinterpret_cast<const uint32_t*>(CMSG_DATA(&cmsg));\n  }\n#endif\n  return absl::nullopt;\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices,\n                                                    const uint64_t num_slice, uint32_t self_port,\n                                                    RecvMsgOutput& output) {\n  ASSERT(!output.msg_.empty());\n\n  absl::FixedArray<char> cbuf(cmsg_space_);\n  memset(cbuf.begin(), 0, cmsg_space_);\n\n  absl::FixedArray<iovec> iov(num_slice);\n  uint64_t num_slices_for_read = 0;\n  for (uint64_t i = 0; i < num_slice; i++) {\n    if (slices[i].mem_ != nullptr && slices[i].len_ != 0) {\n      iov[num_slices_for_read].iov_base = slices[i].mem_;\n      iov[num_slices_for_read].iov_len = slices[i].len_;\n      ++num_slices_for_read;\n    }\n  }\n  if (num_slices_for_read == 0) {\n    return Api::ioCallUint64ResultNoError();\n  }\n\n  sockaddr_storage peer_addr;\n  msghdr hdr;\n  hdr.msg_name = &peer_addr;\n  hdr.msg_namelen = sizeof(sockaddr_storage);\n  hdr.msg_iov = iov.begin();\n  hdr.msg_iovlen = num_slices_for_read;\n  hdr.msg_flags = 0;\n  hdr.msg_control = cbuf.begin();\n  hdr.msg_controllen = cmsg_space_;\n  const Api::SysCallSizeResult result = Api::OsSysCallsSingleton::get().recvmsg(fd_, &hdr, 0);\n  if (result.rc_ < 0) {\n    return sysCallResultToIoCallResult(result);\n  }\n\n  RELEASE_ASSERT((hdr.msg_flags & MSG_CTRUNC) == 0,\n                 fmt::format(\"Incorrectly set control message length: {}\", hdr.msg_controllen));\n  RELEASE_ASSERT(hdr.msg_namelen > 0,\n                 fmt::format(\"Unable to get remote address from recvmsg() for fd: {}\", fd_));\n  output.msg_[0].peer_address_ = getAddressFromSockAddrOrDie(peer_addr, hdr.msg_namelen, fd_);\n  output.msg_[0].gso_size_ = 0;\n\n  if (hdr.msg_controllen > 0) {\n    // Get overflow, local address and gso_size from control message.\n    for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr;\n         cmsg = CMSG_NXTHDR(&hdr, cmsg)) {\n\n      if (output.msg_[0].local_address_ == nullptr) {\n        Address::InstanceConstSharedPtr addr = maybeGetDstAddressFromHeader(*cmsg, self_port, fd_);\n        if (addr != nullptr) {\n          // This is a IP packet info message.\n          output.msg_[0].local_address_ = std::move(addr);\n          continue;\n        }\n      }\n      if (output.dropped_packets_ != nullptr) {\n        absl::optional<uint32_t> maybe_dropped = maybeGetPacketsDroppedFromHeader(*cmsg);\n        if (maybe_dropped) {\n          *output.dropped_packets_ = *maybe_dropped;\n          continue;\n        }\n      }\n#ifdef UDP_GRO\n      if (cmsg->cmsg_level == SOL_UDP && cmsg->cmsg_type == UDP_GRO) {\n        output.msg_[0].gso_size_ = *reinterpret_cast<uint16_t*>(CMSG_DATA(cmsg));\n      }\n#endif\n    }\n  }\n\n  return sysCallResultToIoCallResult(result);\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::recvmmsg(RawSliceArrays& slices, uint32_t self_port,\n                                                     RecvMsgOutput& output) {\n  ASSERT(output.msg_.size() == slices.size());\n  if (slices.empty()) {\n    return sysCallResultToIoCallResult(Api::SysCallIntResult{0, SOCKET_ERROR_AGAIN});\n  }\n  const uint32_t num_packets_per_mmsg_call = slices.size();\n  absl::FixedArray<mmsghdr> mmsg_hdr(num_packets_per_mmsg_call);\n  absl::FixedArray<absl::FixedArray<struct iovec>> iovs(\n      num_packets_per_mmsg_call, absl::FixedArray<struct iovec>(slices[0].size()));\n  absl::FixedArray<sockaddr_storage> raw_addresses(num_packets_per_mmsg_call);\n  absl::FixedArray<absl::FixedArray<char>> cbufs(num_packets_per_mmsg_call,\n                                                 absl::FixedArray<char>(cmsg_space_));\n\n  for (uint32_t i = 0; i < num_packets_per_mmsg_call; ++i) {\n    memset(&raw_addresses[i], 0, sizeof(sockaddr_storage));\n    memset(cbufs[i].data(), 0, cbufs[i].size());\n\n    mmsg_hdr[i].msg_len = 0;\n\n    msghdr* hdr = &mmsg_hdr[i].msg_hdr;\n    hdr->msg_name = &raw_addresses[i];\n    hdr->msg_namelen = sizeof(sockaddr_storage);\n    ASSERT(!slices[i].empty());\n\n    for (size_t j = 0; j < slices[i].size(); ++j) {\n      iovs[i][j].iov_base = slices[i][j].mem_;\n      iovs[i][j].iov_len = slices[i][j].len_;\n    }\n    hdr->msg_iov = iovs[i].data();\n    hdr->msg_iovlen = slices[i].size();\n    hdr->msg_control = cbufs[i].data();\n    hdr->msg_controllen = cbufs[i].size();\n  }\n\n  // Set MSG_WAITFORONE so that recvmmsg will not waiting for\n  // |num_packets_per_mmsg_call| packets to arrive before returning when the\n  // socket is a blocking socket.\n  const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().recvmmsg(\n      fd_, mmsg_hdr.data(), num_packets_per_mmsg_call, MSG_TRUNC | MSG_WAITFORONE, nullptr);\n\n  if (result.rc_ <= 0) {\n    return sysCallResultToIoCallResult(result);\n  }\n\n  int num_packets_read = result.rc_;\n\n  for (int i = 0; i < num_packets_read; ++i) {\n    if (mmsg_hdr[i].msg_len == 0) {\n      continue;\n    }\n    msghdr& hdr = mmsg_hdr[i].msg_hdr;\n    RELEASE_ASSERT((hdr.msg_flags & MSG_CTRUNC) == 0,\n                   fmt::format(\"Incorrectly set control message length: {}\", hdr.msg_controllen));\n    RELEASE_ASSERT(hdr.msg_namelen > 0,\n                   fmt::format(\"Unable to get remote address from recvmmsg() for fd: {}\", fd_));\n    if ((hdr.msg_flags & MSG_TRUNC) != 0) {\n      ENVOY_LOG_MISC(warn, \"Dropping truncated UDP packet with size: {}.\", mmsg_hdr[i].msg_len);\n      continue;\n    }\n\n    output.msg_[i].msg_len_ = mmsg_hdr[i].msg_len;\n    // Get local and peer addresses for each packet.\n    output.msg_[i].peer_address_ =\n        getAddressFromSockAddrOrDie(raw_addresses[i], hdr.msg_namelen, fd_);\n    if (hdr.msg_controllen > 0) {\n      struct cmsghdr* cmsg;\n      for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) {\n        Address::InstanceConstSharedPtr addr = maybeGetDstAddressFromHeader(*cmsg, self_port, fd_);\n        if (addr != nullptr) {\n          // This is a IP packet info message.\n          output.msg_[i].local_address_ = std::move(addr);\n          break;\n        }\n      }\n    }\n  }\n  // Get overflow from first packet header.\n  if (output.dropped_packets_ != nullptr) {\n    msghdr& hdr = mmsg_hdr[0].msg_hdr;\n    if (hdr.msg_controllen > 0) {\n      struct cmsghdr* cmsg;\n      for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) {\n        absl::optional<uint32_t> maybe_dropped = maybeGetPacketsDroppedFromHeader(*cmsg);\n        if (maybe_dropped) {\n          *output.dropped_packets_ = *maybe_dropped;\n        }\n      }\n    }\n  }\n  return sysCallResultToIoCallResult(result);\n}\n\nApi::IoCallUint64Result IoSocketHandleImpl::recv(void* buffer, size_t length, int flags) {\n  const Api::SysCallSizeResult result =\n      Api::OsSysCallsSingleton::get().recv(fd_, buffer, length, flags);\n  return sysCallResultToIoCallResult(result);\n}\n\nbool IoSocketHandleImpl::supportsMmsg() const {\n  return Api::OsSysCallsSingleton::get().supportsMmsg();\n}\n\nbool IoSocketHandleImpl::supportsUdpGro() const {\n  return Api::OsSysCallsSingleton::get().supportsUdpGro();\n}\n\nApi::SysCallIntResult IoSocketHandleImpl::bind(Address::InstanceConstSharedPtr address) {\n  return Api::OsSysCallsSingleton::get().bind(fd_, address->sockAddr(), address->sockAddrLen());\n}\n\nApi::SysCallIntResult IoSocketHandleImpl::listen(int backlog) {\n  return Api::OsSysCallsSingleton::get().listen(fd_, backlog);\n}\n\nIoHandlePtr IoSocketHandleImpl::accept(struct sockaddr* addr, socklen_t* addrlen) {\n  auto result = Api::OsSysCallsSingleton::get().accept(fd_, addr, addrlen);\n  if (SOCKET_INVALID(result.rc_)) {\n    return nullptr;\n  }\n\n  return std::make_unique<IoSocketHandleImpl>(result.rc_, socket_v6only_, domain_);\n}\n\nApi::SysCallIntResult IoSocketHandleImpl::connect(Address::InstanceConstSharedPtr address) {\n  return Api::OsSysCallsSingleton::get().connect(fd_, address->sockAddr(), address->sockAddrLen());\n}\n\nApi::SysCallIntResult IoSocketHandleImpl::setOption(int level, int optname, const void* optval,\n                                                    socklen_t optlen) {\n  return Api::OsSysCallsSingleton::get().setsockopt(fd_, level, optname, optval, optlen);\n}\n\nApi::SysCallIntResult IoSocketHandleImpl::getOption(int level, int optname, void* optval,\n                                                    socklen_t* optlen) {\n  return Api::OsSysCallsSingleton::get().getsockopt(fd_, level, optname, optval, optlen);\n}\n\nApi::SysCallIntResult IoSocketHandleImpl::setBlocking(bool blocking) {\n  return Api::OsSysCallsSingleton::get().setsocketblocking(fd_, blocking);\n}\n\nabsl::optional<int> IoSocketHandleImpl::domain() { return domain_; }\n\nAddress::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() {\n  sockaddr_storage ss;\n  socklen_t ss_len = sizeof(ss);\n  auto& os_sys_calls = Api::OsSysCallsSingleton::get();\n  Api::SysCallIntResult result =\n      os_sys_calls.getsockname(fd_, reinterpret_cast<sockaddr*>(&ss), &ss_len);\n  if (result.rc_ != 0) {\n    throw EnvoyException(fmt::format(\"getsockname failed for '{}': ({}) {}\", fd_, result.errno_,\n                                     errorDetails(result.errno_)));\n  }\n  return Address::addressFromSockAddr(ss, ss_len, socket_v6only_);\n}\n\nAddress::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() {\n  sockaddr_storage ss;\n  socklen_t ss_len = sizeof ss;\n  auto& os_sys_calls = Api::OsSysCallsSingleton::get();\n  Api::SysCallIntResult result =\n      os_sys_calls.getpeername(fd_, reinterpret_cast<sockaddr*>(&ss), &ss_len);\n  if (result.rc_ != 0) {\n    throw EnvoyException(\n        fmt::format(\"getpeername failed for '{}': {}\", fd_, errorDetails(result.errno_)));\n  }\n\n  if (ss_len == udsAddressLength() && ss.ss_family == AF_UNIX) {\n    // For Unix domain sockets, can't find out the peer name, but it should match our own\n    // name for the socket (i.e. the path should match, barring any namespace or other\n    // mechanisms to hide things, of which there are many).\n    ss_len = sizeof ss;\n    result = os_sys_calls.getsockname(fd_, reinterpret_cast<sockaddr*>(&ss), &ss_len);\n    if (result.rc_ != 0) {\n      throw EnvoyException(\n          fmt::format(\"getsockname failed for '{}': {}\", fd_, errorDetails(result.errno_)));\n    }\n  }\n  return Address::addressFromSockAddr(ss, ss_len);\n}\n\nEvent::FileEventPtr IoSocketHandleImpl::createFileEvent(Event::Dispatcher& dispatcher,\n                                                        Event::FileReadyCb cb,\n                                                        Event::FileTriggerType trigger,\n                                                        uint32_t events) {\n  return dispatcher.createFileEvent(fd_, cb, trigger, events);\n}\n\nApi::SysCallIntResult IoSocketHandleImpl::shutdown(int how) {\n  return Api::OsSysCallsSingleton::get().shutdown(fd_, how);\n}\n\nabsl::optional<std::chrono::milliseconds> IoSocketHandleImpl::lastRoundTripTime() {\n#ifdef TCP_INFO\n  struct tcp_info ti;\n  socklen_t len = sizeof(ti);\n  if (!SOCKET_FAILURE(\n          Api::OsSysCallsSingleton::get().getsockopt(fd_, IPPROTO_TCP, TCP_INFO, &ti, &len).rc_)) {\n    return std::chrono::milliseconds(ti.tcpi_rtt);\n  }\n#endif\n\n  return {};\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/io_socket_handle_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/io_error.h\"\n#include \"envoy/api/os_sys_calls.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/io_handle.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/network/io_socket_error_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * IoHandle derivative for sockets.\n */\nclass IoSocketHandleImpl : public IoHandle, protected Logger::Loggable<Logger::Id::io> {\npublic:\n  explicit IoSocketHandleImpl(os_fd_t fd = INVALID_SOCKET, bool socket_v6only = false,\n                              absl::optional<int> domain = absl::nullopt)\n      : fd_(fd), socket_v6only_(socket_v6only), domain_(domain) {}\n\n  // Close underlying socket if close() hasn't been call yet.\n  ~IoSocketHandleImpl() override;\n\n  // TODO(sbelair2)  To be removed when the fd is fully abstracted from clients.\n  os_fd_t fdDoNotUse() const override { return fd_; }\n\n  Api::IoCallUint64Result close() override;\n\n  bool isOpen() const override;\n\n  Api::IoCallUint64Result readv(uint64_t max_length, Buffer::RawSlice* slices,\n                                uint64_t num_slice) override;\n  Api::IoCallUint64Result read(Buffer::Instance& buffer, uint64_t max_length) override;\n\n  Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) override;\n\n  Api::IoCallUint64Result write(Buffer::Instance& buffer) override;\n\n  Api::IoCallUint64Result sendmsg(const Buffer::RawSlice* slices, uint64_t num_slice, int flags,\n                                  const Address::Ip* self_ip,\n                                  const Address::Instance& peer_address) override;\n\n  Api::IoCallUint64Result recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice,\n                                  uint32_t self_port, RecvMsgOutput& output) override;\n\n  Api::IoCallUint64Result recvmmsg(RawSliceArrays& slices, uint32_t self_port,\n                                   RecvMsgOutput& output) override;\n  Api::IoCallUint64Result recv(void* buffer, size_t length, int flags) override;\n\n  bool supportsMmsg() const override;\n  bool supportsUdpGro() const override;\n\n  Api::SysCallIntResult bind(Address::InstanceConstSharedPtr address) override;\n  Api::SysCallIntResult listen(int backlog) override;\n  IoHandlePtr accept(struct sockaddr* addr, socklen_t* addrlen) override;\n  Api::SysCallIntResult connect(Address::InstanceConstSharedPtr address) override;\n  Api::SysCallIntResult setOption(int level, int optname, const void* optval,\n                                  socklen_t optlen) override;\n  Api::SysCallIntResult getOption(int level, int optname, void* optval, socklen_t* optlen) override;\n  Api::SysCallIntResult setBlocking(bool blocking) override;\n  absl::optional<int> domain() override;\n  Address::InstanceConstSharedPtr localAddress() override;\n  Address::InstanceConstSharedPtr peerAddress() override;\n  Event::FileEventPtr createFileEvent(Event::Dispatcher& dispatcher, Event::FileReadyCb cb,\n                                      Event::FileTriggerType trigger, uint32_t events) override;\n  Api::SysCallIntResult shutdown(int how) override;\n  absl::optional<std::chrono::milliseconds> lastRoundTripTime() override;\n\nprotected:\n  // Converts a SysCallSizeResult to IoCallUint64Result.\n  template <typename T>\n  Api::IoCallUint64Result sysCallResultToIoCallResult(const Api::SysCallResult<T>& result) {\n    if (result.rc_ >= 0) {\n      // Return nullptr as IoError upon success.\n      return Api::IoCallUint64Result(result.rc_,\n                                     Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError));\n    }\n    RELEASE_ASSERT(result.errno_ != SOCKET_ERROR_INVAL, \"Invalid argument passed in.\");\n    return Api::IoCallUint64Result(\n        /*rc=*/0,\n        (result.errno_ == SOCKET_ERROR_AGAIN\n             // EAGAIN is frequent enough that its memory allocation should be avoided.\n             ? Api::IoErrorPtr(IoSocketError::getIoSocketEagainInstance(),\n                               IoSocketError::deleteIoError)\n             : Api::IoErrorPtr(new IoSocketError(result.errno_), IoSocketError::deleteIoError)));\n  }\n\n  os_fd_t fd_;\n  int socket_v6only_{false};\n  const absl::optional<int> domain_;\n\n  // The minimum cmsg buffer size to filled in destination address, packets dropped and gso\n  // size when receiving a packet. It is possible for a received packet to contain both IPv4\n  // and IPV6 addresses.\n  const size_t cmsg_space_{CMSG_SPACE(sizeof(int)) + CMSG_SPACE(sizeof(struct in_pktinfo)) +\n                           CMSG_SPACE(sizeof(struct in6_pktinfo)) + CMSG_SPACE(sizeof(uint16_t))};\n};\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/lc_trie.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <climits>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/cidr_range.h\"\n#include \"common/network/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/numeric/int128.h\"\n#include \"fmt/format.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace LcTrie {\n\n/**\n * Maximum number of nodes an LC trie can hold.\n * @note If the size of LcTrieInternal::LcNode::address_ ever changes, this constant\n *       should be changed to match.\n */\nconstexpr size_t MaxLcTrieNodes = (1 << 20);\n\n/**\n * Level Compressed Trie for associating data with CIDR ranges. Both IPv4 and IPv6 addresses are\n * supported within this class with no calling pattern changes.\n *\n * The algorithm to build the LC-Trie is described in the paper 'IP-address lookup using LC-tries'\n * by 'S. Nilsson' and 'G. Karlsson'. The paper and reference C implementation can be found here:\n * https://www.nada.kth.se/~snilsson/publications/IP-address-lookup-using-LC-tries/\n *\n * Refer to LcTrieInternal for implementation and algorithm details.\n */\ntemplate <class T> class LcTrie {\npublic:\n  /**\n   * @param data supplies a vector of data and CIDR ranges.\n   * @param exclusive if true then only data for the most specific subnet will be returned\n                      (i.e. data isn't inherited from wider ranges).\n   * @param fill_factor supplies the fraction of completeness to use when calculating the branch\n   *                    value for a sub-trie.\n   * @param root_branching_factor supplies the branching factor at the root.\n   *\n   * TODO(ccaraman): Investigate if a non-zero root branching factor should be the default. The\n   * paper suggests for large LC-Tries to use the value '16'. It reduces the depth of the trie.\n   * However, there is no suggested values for smaller LC-Tries. With perf tests, it is possible to\n   * get this data for smaller LC-Tries. Another option is to expose this in the configuration and\n   * let consumers decide.\n   */\n  LcTrie(const std::vector<std::pair<T, std::vector<Address::CidrRange>>>& data,\n         bool exclusive = false, double fill_factor = 0.5, uint32_t root_branching_factor = 0) {\n\n    // The LcTrie implementation uses 20-bit \"pointers\" in its compact internal representation,\n    // so it cannot hold more than 2^20 nodes. But the number of nodes can be greater than the\n    // number of supported prefixes. Given N prefixes in the data input list, step 2 below can\n    // produce a new list of up to 2*N prefixes to insert in the LC trie. And the LC trie can\n    // use up to 2*N/fill_factor nodes.\n    size_t num_prefixes = 0;\n    for (const auto& pair_data : data) {\n      num_prefixes += pair_data.second.size();\n    }\n    const size_t max_prefixes = MaxLcTrieNodes * fill_factor / 2;\n    if (num_prefixes > max_prefixes) {\n      ExceptionUtil::throwEnvoyException(\n          fmt::format(\"The input vector has '{0}' CIDR range entries. LC-Trie \"\n                      \"can only support '{1}' CIDR ranges with the specified \"\n                      \"fill factor.\",\n                      num_prefixes, max_prefixes));\n    }\n\n    // Step 1: separate the provided prefixes by protocol (IPv4 vs IPv6),\n    // and build a Binary Trie per protocol.\n    //\n    // For example, if the input prefixes are\n    //   A: 0.0.0.0/0\n    //   B: 128.0.0.0/2  (10000000.0.0.0/2 in binary)\n    //   C: 192.0.0.0/2  (11000000.0.0.0/2)\n    // the Binary Trie for IPv4 will look like this at the end of step 1:\n    //          +---+\n    //          | A |\n    //          +---+\n    //               \\ 1\n    //              +---+\n    //              |   |\n    //              +---+\n    //            0/     \\1\n    //          +---+   +---+\n    //          | B |   | C |\n    //          +---+   +---+\n    //\n    // Note that the prefixes in this example are nested: any IPv4 address\n    // that matches B or C will also match A. Unfortunately, the classic LC Trie\n    // algorithm does not support nested prefixes. The next step will solve that\n    // problem.\n\n    BinaryTrie<Ipv4> ipv4_temp(exclusive);\n    BinaryTrie<Ipv6> ipv6_temp(exclusive);\n    for (const auto& pair_data : data) {\n      for (const auto& cidr_range : pair_data.second) {\n        if (cidr_range.ip()->version() == Address::IpVersion::v4) {\n          IpPrefix<Ipv4> ip_prefix(ntohl(cidr_range.ip()->ipv4()->address()), cidr_range.length(),\n                                   pair_data.first);\n          ipv4_temp.insert(ip_prefix);\n        } else {\n          IpPrefix<Ipv6> ip_prefix(Utility::Ip6ntohl(cidr_range.ip()->ipv6()->address()),\n                                   cidr_range.length(), pair_data.first);\n          ipv6_temp.insert(ip_prefix);\n        }\n      }\n    }\n\n    // Step 2: push each Binary Trie's prefixes to its leaves.\n    //\n    // Continuing the previous example, the Binary Trie will look like this\n    // at the end of step 2:\n    //          +---+\n    //          |   |\n    //          +---+\n    //        0/     \\ 1\n    //      +---+   +---+\n    //      | A |   |   |\n    //      +---+   +---+\n    //            0/     \\1\n    //          +---+   +---+\n    //          |A,B|   |A,C|\n    //          +---+   +---+\n    //\n    // This trie yields the same match results as the original trie from\n    // step 1. But it has a useful new property: now that all the prefixes\n    // are at the leaves, they are disjoint: no prefix is nested under another.\n\n    std::vector<IpPrefix<Ipv4>> ipv4_prefixes = ipv4_temp.push_leaves();\n    std::vector<IpPrefix<Ipv6>> ipv6_prefixes = ipv6_temp.push_leaves();\n\n    // Step 3: take the disjoint prefixes from the leaves of each Binary Trie\n    // and use them to construct an LC Trie.\n    //\n    // Example inputs (from the leaves of the Binary Trie at the end of step 2)\n    //   A:   0.0.0.0/1\n    //   A,B: 128.0.0.0/2\n    //   A,C: 192.0.0.0/2\n    //\n    // The LC Trie generated from these inputs with fill_factor=0.5 and root_branching_factor=0\n    // will be:\n    //\n    //       +---------------------------+\n    //       | branch_factor=2, skip = 0 |\n    //       +---------------------------+\n    //    00/       01|         |10       \\11\n    //   +---+      +---+     +---+      +---+\n    //   | A |      | A |     |A,B|      |A,C|\n    //   +---+      +---+     +---+      +---+\n    //\n    // Or, in the internal vector form that the LcTrie class uses for memory-efficiency,\n    //    # | branch | skip | first_child | data | note\n    //   ---+--------+------+-------------+------+--------------------------------------------------\n    //    0 |      2 |    0 |           1 |  -   | (1 << branch) == 4 children, starting at offset 1\n    //    1 |      - |    0 |           - |  A   | 1st child of node 0, reached if next bits are 00\n    //    2 |      - |    0 |           - |  A   |   .\n    //    3 |      - |    0 |           - |  A,B |   .\n    //    4 |      - |    0 |           - |  A,C | 4th child of node 0, reached if next bits are 11\n    //\n    // The Nilsson and Karlsson paper linked in lc_trie.h has a more thorough example.\n\n    ipv4_trie_.reset(new LcTrieInternal<Ipv4>(ipv4_prefixes, fill_factor, root_branching_factor));\n    ipv6_trie_.reset(new LcTrieInternal<Ipv6>(ipv6_prefixes, fill_factor, root_branching_factor));\n  }\n\n  /**\n   * Retrieve data associated with the CIDR range that contains `ip_address`. Both IPv4 and IPv6\n   * addresses are supported.\n   * @param  ip_address supplies the IP address.\n   * @return a vector of data from the CIDR ranges and IP addresses that contains 'ip_address'. An\n   * empty vector is returned if no prefix contains 'ip_address' or there is no data for the IP\n   * version of the ip_address.\n   */\n  std::vector<T> getData(const Network::Address::InstanceConstSharedPtr& ip_address) const {\n    if (ip_address->ip()->version() == Address::IpVersion::v4) {\n      Ipv4 ip = ntohl(ip_address->ip()->ipv4()->address());\n      return ipv4_trie_->getData(ip);\n    } else {\n      Ipv6 ip = Utility::Ip6ntohl(ip_address->ip()->ipv6()->address());\n      return ipv6_trie_->getData(ip);\n    }\n  }\n\nprivate:\n  /**\n   * Extract n bits from input starting at position p.\n   * @param p supplies the position.\n   * @param n supplies the number of bits to extract.\n   * @param input supplies the IP address to extract bits from. The IP address is stored in host\n   *              byte order.\n   * @return extracted bits in the format of IpType.\n   */\n  template <class IpType, uint32_t address_size = CHAR_BIT * sizeof(IpType)>\n  static IpType extractBits(uint32_t p, uint32_t n, IpType input) {\n    // The IP's are stored in host byte order.\n    // By shifting the value to the left by p bits(and back), the bits between 0 and p-1 are\n    // zero'd out. Then to get the n bits, shift the IP back by the address_size minus the number\n    // of desired bits.\n    if (n == 0) {\n      return IpType(0);\n    }\n    return input << p >> (address_size - n);\n  }\n\n  /**\n   * Removes n bits from input starting at 0.\n   * @param n supplies the number of bits to remove.\n   * @param input supplies the IP address to remove bits from. The IP address is stored in host\n   *              byte order.\n   * @return input with 0 through n-1 bits cleared.\n   */\n  template <class IpType, uint32_t address_size = CHAR_BIT * sizeof(IpType)>\n  static IpType removeBits(uint32_t n, IpType input) {\n    // The IP's are stored in host byte order.\n    // By shifting the value to the left by n bits and back, the bits between 0 and n-1\n    // (inclusively) are zero'd out.\n    return input << n >> n;\n  }\n\n  // IP addresses are stored in host byte order to simplify\n  using Ipv4 = uint32_t;\n  using Ipv6 = absl::uint128;\n\n  using DataSet = absl::node_hash_set<T>;\n  using DataSetSharedPtr = std::shared_ptr<DataSet>;\n\n  /**\n   * Structure to hold a CIDR range and the data associated with it.\n   */\n  template <class IpType, uint32_t address_size = CHAR_BIT * sizeof(IpType)> struct IpPrefix {\n\n    IpPrefix() = default;\n\n    IpPrefix(const IpType& ip, uint32_t length, const T& data) : ip_(ip), length_(length) {\n      data_.insert(data);\n    }\n\n    IpPrefix(const IpType& ip, int length, const DataSet& data)\n        : ip_(ip), length_(length), data_(data) {}\n\n    /**\n     * @return -1 if the current object is less than other. 0 if they are the same. 1\n     * if other is smaller than the current object.\n     */\n    int compare(const IpPrefix& other) const {\n      {\n        if (ip_ < other.ip_) {\n          return -1;\n        } else if (ip_ > other.ip_) {\n          return 1;\n        } else if (length_ < other.length_) {\n          return -1;\n        } else if (length_ > other.length_) {\n          return 1;\n        } else {\n          return 0;\n        }\n      }\n    }\n\n    bool operator<(const IpPrefix& other) const { return (this->compare(other) == -1); }\n\n    bool operator!=(const IpPrefix& other) const { return (this->compare(other) != 0); }\n\n    /**\n     * @return true if other is a prefix of this.\n     */\n    bool isPrefix(const IpPrefix& other) {\n      return (length_ == 0 || (length_ <= other.length_ && contains(other.ip_)));\n    }\n\n    /**\n     * @param address supplies an IP address to check against this prefix.\n     * @return bool true if this prefix contains the address.\n     */\n    bool contains(const IpType& address) const {\n      return (extractBits<IpType, address_size>(0, length_, ip_) ==\n              extractBits<IpType, address_size>(0, length_, address));\n    }\n\n    std::string asString() { return fmt::format(\"{}/{}\", toString(ip_), length_); }\n\n    // The address represented either in Ipv4(uint32_t) or Ipv6(absl::uint128).\n    IpType ip_{0};\n    // Length of the cidr range.\n    uint32_t length_{0};\n    // Data for this entry.\n    DataSet data_;\n  };\n\n  /**\n   * Binary trie used to simplify the construction of Level Compressed Tries.\n   * This data type supports two operations:\n   *   1. Add a prefix to the trie.\n   *   2. Push the prefixes to the leaves of the trie.\n   * That second operation produces a new set of prefixes that yield the same\n   * match results as the original set of prefixes from which the BinaryTrie\n   * was constructed, but with an important difference: the new prefixes are\n   * guaranteed not to be nested within each other. That allows the use of the\n   * classic LC Trie construction algorithm, which is fast and (relatively)\n   * simple but does not work properly with nested prefixes.\n   */\n  template <class IpType, uint32_t address_size = CHAR_BIT * sizeof(IpType)> class BinaryTrie {\n  public:\n    BinaryTrie(bool exclusive) : root_(std::make_unique<Node>()), exclusive_(exclusive) {}\n\n    /**\n     * Add a CIDR prefix and associated data to the binary trie. If an entry already\n     * exists for the prefix, merge the data into the existing entry.\n     */\n    void insert(const IpPrefix<IpType>& prefix) {\n      Node* node = root_.get();\n      for (uint32_t i = 0; i < prefix.length_; i++) {\n        auto bit = static_cast<uint32_t>(extractBits(i, 1, prefix.ip_));\n        NodePtr& next_node = node->children[bit];\n        if (next_node == nullptr) {\n          next_node = std::make_unique<Node>();\n        }\n        node = next_node.get();\n      }\n      if (node->data == nullptr) {\n        node->data = std::make_shared<DataSet>();\n      }\n      node->data->insert(prefix.data_.begin(), prefix.data_.end());\n    }\n\n    /**\n     * Update each node in the trie to inherit/override its ancestors' data,\n     * and then push the prefixes in the binary trie to the leaves so that:\n     *  1) each leaf contains a prefix, and\n     *  2) given the set of prefixes now located at the leaves, a useful\n     *     new property applies: no prefix in that set is nested under any\n     *     other prefix in the set (since, by definition, no leaf of the\n     *     trie can be nested under another leaf)\n     * @return the prefixes associated with the leaf nodes.\n     */\n    std::vector<IpPrefix<IpType>> push_leaves() {\n      std::vector<IpPrefix<IpType>> prefixes;\n      std::function<void(Node*, DataSetSharedPtr, unsigned, IpType)> visit =\n          [&](Node* node, DataSetSharedPtr data, unsigned depth, IpType prefix) {\n            // Inherit any data set by ancestor nodes.\n            if (data != nullptr) {\n              if (node->data == nullptr) {\n                node->data = data;\n              } else if (!exclusive_) {\n                node->data->insert(data->begin(), data->end());\n              }\n            }\n            // If a node has exactly one child, create a second child node\n            // that inherits the union of all data set by any ancestor nodes.\n            // This gives the trie an important new property: all the configured\n            // prefixes end up at the leaves of the trie. As no leaf is nested\n            // under another leaf (or one of them would not be a leaf!), the\n            // leaves of the trie upon completion of this leaf-push operation\n            // will form a set of disjoint prefixes (no nesting) that can be\n            // used to build an LC trie.\n            if (node->children[0] != nullptr && node->children[1] == nullptr) {\n              node->children[1] = std::make_unique<Node>();\n            } else if (node->children[0] == nullptr && node->children[1] != nullptr) {\n              node->children[0] = std::make_unique<Node>();\n            }\n            if (node->children[0] != nullptr) {\n              visit(node->children[0].get(), node->data, depth + 1, (prefix << 1) + IpType(0));\n              visit(node->children[1].get(), node->data, depth + 1, (prefix << 1) + IpType(1));\n            } else {\n              if (node->data != nullptr) {\n                // Compute the CIDR prefix from the path we've taken to get to this point in the\n                // tree.\n                IpType ip = prefix;\n                if (depth != 0) {\n                  ip <<= (address_size - depth);\n                }\n                prefixes.emplace_back(IpPrefix<IpType>(ip, depth, *node->data));\n              }\n            }\n          };\n      visit(root_.get(), nullptr, 0, IpType(0));\n      return prefixes;\n    }\n\n  private:\n    struct Node {\n      std::unique_ptr<Node> children[2];\n      DataSetSharedPtr data;\n    };\n    using NodePtr = std::unique_ptr<Node>;\n    NodePtr root_;\n    bool exclusive_;\n  };\n\n  /**\n   * Level Compressed Trie (LC-Trie) that contains CIDR ranges and its corresponding data.\n   *\n   * The following is an implementation of the algorithm described in the paper\n   * 'IP-address lookup using LC-tries' by'S. Nilsson' and 'G. Karlsson'.\n   *\n   * 'https://github.com/beevek/libkrb/blob/master/krb/lc_trie.hpp' and\n   * 'http://www.csc.kth.se/~snilsson/software/router/C/' were used as reference during\n   * implementation.\n   *\n   * Note: The trie can only support up 524288(2^19) prefixes with a fill_factor of 1 and\n   * root_branching_factor not set. Refer to LcTrieInternal::build() method for more details.\n   */\n  template <class IpType, uint32_t address_size = CHAR_BIT * sizeof(IpType)> class LcTrieInternal {\n  public:\n    /**\n     * Construct a LC-Trie for IpType.\n     * @param data supplies a vector of data and CIDR ranges (in IpPrefix format).\n     * @param fill_factor supplies the fraction of completeness to use when calculating the branch\n     *                    value for a sub-trie.\n     * @param root_branching_factor supplies the branching factor at the root. The paper suggests\n     *                              for large LC-Tries to use the value '16' for the root\n     *                              branching factor. It reduces the depth of the trie.\n     */\n    LcTrieInternal(std::vector<IpPrefix<IpType>>& data, double fill_factor,\n                   uint32_t root_branching_factor);\n\n    /**\n     * Retrieve the data associated with the CIDR range that contains `ip_address`.\n     * @param  ip_address supplies the IP address in host byte order.\n     * @return a vector of data from the CIDR ranges and IP addresses that encompasses the input.\n     * An empty vector is returned if the LC Trie is empty.\n     */\n    std::vector<T> getData(const IpType& ip_address) const;\n\n  private:\n    /**\n     * Builds the Level Compressed Trie, by first sorting the data, removing duplicated\n     * prefixes and invoking buildRecursive() to build the trie.\n     */\n    void build(std::vector<IpPrefix<IpType>>& data) {\n      if (data.empty()) {\n        return;\n      }\n\n      ip_prefixes_ = data;\n      std::sort(ip_prefixes_.begin(), ip_prefixes_.end());\n\n      // Build the trie_.\n      trie_.reserve(static_cast<size_t>(ip_prefixes_.size() / fill_factor_));\n      uint32_t next_free_index = 1;\n      buildRecursive(0u, 0u, ip_prefixes_.size(), 0u, next_free_index);\n\n      // The value of next_free_index is the final size of the trie_.\n      ASSERT(next_free_index <= trie_.size());\n      trie_.resize(next_free_index);\n      trie_.shrink_to_fit();\n    }\n\n    // Thin wrapper around computeBranch output to facilitate code readability.\n    struct ComputePair {\n      ComputePair(int branch, int prefix) : branch_(branch), prefix_(prefix) {}\n\n      uint32_t branch_;\n      // The total number of bits that have the same prefix for subset of ip_prefixes_.\n      uint32_t prefix_;\n    };\n\n    /**\n     * Compute the branch and skip values for the trie starting at position 'first' through\n     * 'first+n-1' while disregarding the prefix.\n     * @param prefix supplies the common prefix in the ip_prefixes_ array.\n     * @param first supplies the index where computing the branch should begin with.\n     * @param n supplies the number of nodes to use while computing the branch.\n     * @return pair of integers for the branching factor and the skip.\n     */\n    ComputePair computeBranchAndSkip(uint32_t prefix, uint32_t first, uint32_t n) const {\n      ComputePair compute(0, 0);\n\n      // Compute the new prefix for the range between ip_prefixes_[first] and\n      // ip_prefixes_[first + n - 1].\n      IpType high = removeBits<IpType, address_size>(prefix, ip_prefixes_[first].ip_);\n      IpType low = removeBits<IpType, address_size>(prefix, ip_prefixes_[first + n - 1].ip_);\n      uint32_t index = prefix;\n\n      // Find the index at which low and high diverge to get the skip.\n      while (extractBits<IpType, address_size>(index, 1, low) ==\n             extractBits<IpType, address_size>(index, 1, high)) {\n        ++index;\n      }\n      compute.prefix_ = index;\n\n      // For 2 elements, use a branching factor of 2(2^1).\n      if (n == 2) {\n        compute.branch_ = 1;\n        return compute;\n      }\n\n      // According to the original LC-Trie paper, a large branching factor(suggested value: 16)\n      // at the root increases performance.\n      if (root_branching_factor_ > 0 && prefix == 0 && first == 0) {\n        compute.branch_ = root_branching_factor_;\n        return compute;\n      }\n\n      // Compute the number of bits required for branching by checking all patterns in the set are\n      // covered. Ex (b=2 {00, 01, 10, 11}; b=3 {000,001,010,011,100,101,110,111}, etc)\n      uint32_t branch = 1;\n      uint32_t count;\n      do {\n        ++branch;\n\n        // Check if the current branch factor with the fill factor can contain all of the nodes\n        // in the current range or if the current branching factor is larger than the\n        // IP address_size.\n        if (n < fill_factor_ * (1 << branch) ||\n            static_cast<uint32_t>(compute.prefix_ + branch) > address_size) {\n          break;\n        }\n\n        // Start by checking the bit patterns at ip_prefixes_[first] through\n        // ip_prefixes_[first + n-1].\n        index = first;\n        // Pattern to search for.\n        uint32_t pattern = 0;\n        // Number of patterns found while looping through the list.\n        count = 0;\n\n        // Search for all patterns(values) within 1<<branch.\n        while (pattern < static_cast<uint32_t>(1 << branch)) {\n          bool pattern_found = false;\n          // Keep on looping until either all nodes in the range have been visited or\n          // an IP prefix doesn't match the pattern.\n          while (index < first + n &&\n                 static_cast<uint32_t>(extractBits<IpType, address_size>(\n                     compute.prefix_, branch, ip_prefixes_[index].ip_)) == pattern) {\n            ++index;\n            pattern_found = true;\n          }\n\n          if (pattern_found) {\n            ++count;\n          }\n          ++pattern;\n        }\n        // Stop iterating once the size of the branch (with the fill factor ratio)\n        // can no longer contain all of the prefixes within the current range of\n        // ip_prefixes_[first] to ip_prefixes_[first+n-1].\n      } while (count >= fill_factor_ * (1 << branch));\n\n      // The branching factor is decremented because the algorithm requires the largest branching\n      // factor that covers all(most when a fill factor is specified) of the CIDR ranges in the\n      // current sub-trie. When the loops above exits, the branch factor value is\n      // 1. greater than the address size with the prefix.\n      // 2. greater than the number of entries.\n      // 3. less than the total number of patterns seen in the range.\n      // In all of the cases above, branch - 1 is guaranteed to cover all of CIDR\n      // ranges in the sub-trie.\n      compute.branch_ = branch - 1;\n      return compute;\n    }\n\n    /**\n     * Recursively build a trie for IP prefixes from position 'first' to 'first+n-1'.\n     * @param prefix supplies the prefix to ignore when building the sub-trie.\n     * @param first supplies the index into ip_prefixes_ for this sub-trie.\n     * @param n supplies the number of entries for the sub-trie.\n     * @param position supplies the root for this sub-trie.\n     * @param next_free_index supplies the next available index in the trie_.\n     */\n    void buildRecursive(uint32_t prefix, uint32_t first, uint32_t n, uint32_t position,\n                        uint32_t& next_free_index) {\n      if (position >= trie_.size()) {\n        // There is no way to predictably determine the number of trie nodes required to build a\n        // LC-Trie. If while building the trie the position that is being set exceeds the maximum\n        // number of supported trie_ entries, throw an Envoy Exception.\n        if (position >= MaxLcTrieNodes) {\n          // Adding 1 to the position to count how many nodes are trying to be set.\n          ExceptionUtil::throwEnvoyException(\n              fmt::format(\"The number of internal nodes required for the LC-Trie \"\n                          \"exceeded the maximum number of \"\n                          \"supported nodes. Minimum number of internal nodes required: \"\n                          \"'{0}'. Maximum number of supported nodes: '{1}'.\",\n                          (position + 1), MaxLcTrieNodes));\n        }\n        trie_.resize(position + 1);\n      }\n      // Setting a leaf, the branch and skip are 0.\n      if (n == 1) {\n        trie_[position].address_ = first;\n        return;\n      }\n\n      ComputePair output = computeBranchAndSkip(prefix, first, n);\n\n      uint32_t address = next_free_index;\n      trie_[position].branch_ = output.branch_;\n      // The skip value is the number of bits between the newly calculated prefix(output.prefix_)\n      // and the previous prefix(prefix).\n      trie_[position].skip_ = output.prefix_ - prefix;\n      trie_[position].address_ = address;\n\n      // The next available free index to populate in the trie_ is at next_free_index +\n      // 2^(branching factor).\n      next_free_index += 1 << output.branch_;\n\n      uint32_t new_position = first;\n\n      // Build the subtrees.\n      for (uint32_t bit_pattern = 0; bit_pattern < static_cast<uint32_t>(1 << output.branch_);\n           ++bit_pattern) {\n\n        // count is the number of entries in the ip_prefixes_ vector that have the same bit\n        // pattern as the ip_prefixes_[new_position].\n        int count = 0;\n        while (new_position + count < first + n &&\n               static_cast<uint32_t>(extractBits<IpType, address_size>(\n                   output.prefix_, output.branch_, ip_prefixes_[new_position + count].ip_)) ==\n                   bit_pattern) {\n          ++count;\n        }\n\n        // This logic was taken from\n        // https://github.com/beevek/libkrb/blob/24a224d3ea840e2e7d2926e17d8849aefecc1101/krb/lc_trie.hpp#L396.\n        // When there are no entries that match the current pattern, set a leaf at trie_[address +\n        // bit_pattern].\n        if (count == 0) {\n          // This case is hit when the last CIDR range(ip_prefixes_[first+n-1]) is being inserted\n          // into the trie_. new_position is decremented by one because the count added to\n          // new_position at line 445 are the number of entries already visited.\n          if (new_position == first + n) {\n            buildRecursive(output.prefix_ + output.branch_, new_position - 1, 1,\n                           address + bit_pattern, next_free_index);\n          } else {\n            buildRecursive(output.prefix_ + output.branch_, new_position, 1, address + bit_pattern,\n                           next_free_index);\n          }\n        } else if (count == 1 &&\n                   ip_prefixes_[new_position].length_ - output.prefix_ < output.branch_) {\n          // All Ip address that have the prefix of `bit_pattern` will map to the only CIDR range\n          // with the bit_pattern as a prefix.\n          uint32_t bits = output.branch_ + output.prefix_ - ip_prefixes_[new_position].length_;\n          for (uint32_t i = bit_pattern; i < bit_pattern + (1 << bits); ++i) {\n            buildRecursive(output.prefix_ + output.branch_, new_position, 1, address + i,\n                           next_free_index);\n          }\n          // Update the bit_pattern to skip over the trie_ entries initialized above.\n          bit_pattern += (1 << bits) - 1;\n        } else {\n          // Recursively build sub-tries for ip_prefixes_[new_position] to\n          // ip_prefixes_[new_position+count-1].\n          buildRecursive(output.prefix_ + output.branch_, new_position, count,\n                         address + bit_pattern, next_free_index);\n        }\n        new_position += count;\n      }\n    }\n\n    /**\n     * LcNode is a uint32_t. A wrapper is provided to simplify getting/setting the branch, the\n     * skip and the address values held within the structure.\n     *\n     * The LcNode has three parts to it\n     * - Branch: the first 5 bits represent the branching factor. The branching factor is used to\n     * determine the number of descendants for the current node. The number represents a power of\n     * 2, so there can be at most 2^31 descendant nodes.\n     * - Skip: the next 7 bits represent the number of bits to skip when looking at an IP address.\n     * This value can be between 0 and 127, so IPv6 is supported.\n     * - Address: the remaining 20 bits represent an index either into the trie_ or the\n     * ip_prefixes_. If branch_ != 0, the index is for the trie_. If branch == zero, the index is\n     * for the ip_prefixes_.\n     *\n     * Note: If more than 2^19-1 CIDR ranges are to be stored in trie_, uint64_t should be used\n     * instead.\n     */\n    struct LcNode {\n      uint32_t branch_ : 5;\n      uint32_t skip_ : 7;\n      uint32_t address_ : 20; // If this 20-bit size changes, please change MaxLcTrieNodes too.\n    };\n\n    // The CIDR range and data needs to be maintained separately from the LC-Trie. A LC-Trie skips\n    // chunks of data while searching for a match. This means that the node found in the LC-Trie\n    // is not guaranteed to have the IP address in range. The last step prior to returning\n    // associated data is to check the CIDR range pointed to by the node in the LC-Trie has\n    // the IP address in range.\n    std::vector<IpPrefix<IpType>> ip_prefixes_;\n\n    // Main trie search structure.\n    std::vector<LcNode> trie_;\n\n    const double fill_factor_;\n    const uint32_t root_branching_factor_;\n  };\n\n  std::unique_ptr<LcTrieInternal<Ipv4>> ipv4_trie_;\n  std::unique_ptr<LcTrieInternal<Ipv6>> ipv6_trie_;\n};\n\ntemplate <class T>\ntemplate <class IpType, uint32_t address_size>\nLcTrie<T>::LcTrieInternal<IpType, address_size>::LcTrieInternal(std::vector<IpPrefix<IpType>>& data,\n                                                                double fill_factor,\n                                                                uint32_t root_branching_factor)\n    : fill_factor_(fill_factor), root_branching_factor_(root_branching_factor) {\n  build(data);\n}\n\ntemplate <class T>\ntemplate <class IpType, uint32_t address_size>\nstd::vector<T>\nLcTrie<T>::LcTrieInternal<IpType, address_size>::getData(const IpType& ip_address) const {\n  std::vector<T> return_vector;\n  if (trie_.empty()) {\n    return return_vector;\n  }\n\n  LcNode node = trie_[0];\n  uint32_t branch = node.branch_;\n  uint32_t position = node.skip_;\n  uint32_t address = node.address_;\n\n  // branch == 0 is a leaf node.\n  while (branch != 0) {\n    // branch is at most 2^5-1= 31 bits to extract, so we can safely cast the\n    // output of extractBits to uint32_t without any data loss.\n    node = trie_[address + static_cast<uint32_t>(\n                               extractBits<IpType, address_size>(position, branch, ip_address))];\n    position += branch + node.skip_;\n    branch = node.branch_;\n    address = node.address_;\n  }\n\n  // The path taken through the trie to match the ip_address may have contained skips,\n  // so it is necessary to check whether the matched prefix really contains the\n  // ip_address.\n  const auto& prefix = ip_prefixes_[address];\n  if (prefix.contains(ip_address)) {\n    return std::vector<T>(prefix.data_.begin(), prefix.data_.end());\n  }\n  return std::vector<T>();\n}\n\n} // namespace LcTrie\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/listen_socket_impl.cc",
    "content": "#include \"common/network/listen_socket_impl.h\"\n\n#include <sys/types.h>\n\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nApi::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstSharedPtr address) {\n  local_address_ = address;\n\n  const Api::SysCallIntResult result = SocketImpl::bind(local_address_);\n  if (SOCKET_FAILURE(result.rc_)) {\n    close();\n    throw SocketBindException(fmt::format(\"cannot bind '{}': {}\", local_address_->asString(),\n                                          errorDetails(result.errno_)),\n                              result.errno_);\n  }\n  return {0, 0};\n}\n\nvoid ListenSocketImpl::setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) {\n  if (!Network::Socket::applyOptions(options, *this,\n                                     envoy::config::core::v3::SocketOption::STATE_PREBIND)) {\n    throw CreateListenerException(\"ListenSocket: Setting socket options failed\");\n  }\n}\n\nvoid ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& options,\n                                   bool bind_to_port) {\n  setListenSocketOptions(options);\n\n  if (bind_to_port) {\n    bind(local_address_);\n  }\n}\n\ntemplate <>\nvoid NetworkListenSocket<NetworkSocketTrait<Socket::Type::Stream>>::setPrebindSocketOptions() {\n// On Windows, SO_REUSEADDR does not restrict subsequent bind calls when there is a listener as on\n// Linux and later BSD socket stacks\n#ifndef WIN32\n  int on = 1;\n  auto status = setSocketOption(SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));\n  RELEASE_ASSERT(status.rc_ != -1, \"failed to set SO_REUSEADDR socket option\");\n#endif\n}\n\ntemplate <>\nvoid NetworkListenSocket<NetworkSocketTrait<Socket::Type::Datagram>>::setPrebindSocketOptions() {}\n\nUdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address)\n    : ListenSocketImpl(ioHandleForAddr(Socket::Type::Stream, address), address) {\n  RELEASE_ASSERT(io_handle_->isOpen(), \"\");\n  bind(local_address_);\n}\n\nUdsListenSocket::UdsListenSocket(IoHandlePtr&& io_handle,\n                                 const Address::InstanceConstSharedPtr& address)\n    : ListenSocketImpl(std::move(io_handle), address) {}\n\nstd::atomic<uint64_t> AcceptedSocketImpl::global_accepted_socket_count_;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/listen_socket_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/socket.h\"\n#include \"envoy/network/socket_interface.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/socket_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass ListenSocketImpl : public SocketImpl {\nprotected:\n  ListenSocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address)\n      : SocketImpl(std::move(io_handle), local_address) {}\n\n  void setupSocket(const Network::Socket::OptionsSharedPtr& options, bool bind_to_port);\n  void setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options);\n  Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override;\n};\n\n/**\n * Wraps a unix socket.\n */\ntemplate <Socket::Type T> struct NetworkSocketTrait {};\n\ntemplate <> struct NetworkSocketTrait<Socket::Type::Stream> {\n  static constexpr Socket::Type type = Socket::Type::Stream;\n};\n\ntemplate <> struct NetworkSocketTrait<Socket::Type::Datagram> {\n  static constexpr Socket::Type type = Socket::Type::Datagram;\n};\n\ntemplate <typename T> class NetworkListenSocket : public ListenSocketImpl {\npublic:\n  NetworkListenSocket(const Address::InstanceConstSharedPtr& address,\n                      const Network::Socket::OptionsSharedPtr& options, bool bind_to_port)\n      : ListenSocketImpl(Network::ioHandleForAddr(T::type, address), address) {\n    RELEASE_ASSERT(io_handle_->isOpen(), \"\");\n\n    setPrebindSocketOptions();\n\n    setupSocket(options, bind_to_port);\n  }\n\n  NetworkListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address,\n                      const Network::Socket::OptionsSharedPtr& options)\n      : ListenSocketImpl(std::move(io_handle), address) {\n    setListenSocketOptions(options);\n  }\n\n  Socket::Type socketType() const override { return T::type; }\n\nprotected:\n  void setPrebindSocketOptions();\n};\n\nusing TcpListenSocket = NetworkListenSocket<NetworkSocketTrait<Socket::Type::Stream>>;\nusing TcpListenSocketPtr = std::unique_ptr<TcpListenSocket>;\n\nusing UdpListenSocket = NetworkListenSocket<NetworkSocketTrait<Socket::Type::Datagram>>;\nusing UdpListenSocketPtr = std::unique_ptr<UdpListenSocket>;\n\nclass UdsListenSocket : public ListenSocketImpl {\npublic:\n  UdsListenSocket(const Address::InstanceConstSharedPtr& address);\n  UdsListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address);\n  Socket::Type socketType() const override { return Socket::Type::Stream; }\n};\n\nclass ConnectionSocketImpl : public SocketImpl, public ConnectionSocket {\npublic:\n  ConnectionSocketImpl(IoHandlePtr&& io_handle,\n                       const Address::InstanceConstSharedPtr& local_address,\n                       const Address::InstanceConstSharedPtr& remote_address)\n      : SocketImpl(std::move(io_handle), local_address), remote_address_(remote_address),\n        direct_remote_address_(remote_address) {}\n\n  ConnectionSocketImpl(Socket::Type type, const Address::InstanceConstSharedPtr& local_address,\n                       const Address::InstanceConstSharedPtr& remote_address)\n      : SocketImpl(type, local_address), remote_address_(remote_address),\n        direct_remote_address_(remote_address) {\n    setLocalAddress(local_address);\n  }\n\n  // Network::Socket\n  Socket::Type socketType() const override { return Socket::Type::Stream; }\n\n  // Network::ConnectionSocket\n  const Address::InstanceConstSharedPtr& remoteAddress() const override { return remote_address_; }\n  const Address::InstanceConstSharedPtr& directRemoteAddress() const override {\n    return direct_remote_address_;\n  }\n  void restoreLocalAddress(const Address::InstanceConstSharedPtr& local_address) override {\n    setLocalAddress(local_address);\n    local_address_restored_ = true;\n  }\n  void setRemoteAddress(const Address::InstanceConstSharedPtr& remote_address) override {\n    remote_address_ = remote_address;\n  }\n  bool localAddressRestored() const override { return local_address_restored_; }\n\n  void setDetectedTransportProtocol(absl::string_view protocol) override {\n    transport_protocol_ = std::string(protocol);\n  }\n  absl::string_view detectedTransportProtocol() const override { return transport_protocol_; }\n\n  void setRequestedApplicationProtocols(const std::vector<absl::string_view>& protocols) override {\n    application_protocols_.clear();\n    for (const auto& protocol : protocols) {\n      application_protocols_.emplace_back(protocol);\n    }\n  }\n  const std::vector<std::string>& requestedApplicationProtocols() const override {\n    return application_protocols_;\n  }\n\n  void setRequestedServerName(absl::string_view server_name) override {\n    server_name_ = std::string(server_name);\n  }\n  absl::string_view requestedServerName() const override { return server_name_; }\n\n  absl::optional<std::chrono::milliseconds> lastRoundTripTime() override {\n    return ioHandle().lastRoundTripTime();\n  }\n\nprotected:\n  Address::InstanceConstSharedPtr remote_address_;\n  const Address::InstanceConstSharedPtr direct_remote_address_;\n  bool local_address_restored_{false};\n  std::string transport_protocol_;\n  std::vector<std::string> application_protocols_;\n  std::string server_name_;\n};\n\n// ConnectionSocket used with server connections.\nclass AcceptedSocketImpl : public ConnectionSocketImpl {\npublic:\n  AcceptedSocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address,\n                     const Address::InstanceConstSharedPtr& remote_address)\n      : ConnectionSocketImpl(std::move(io_handle), local_address, remote_address) {\n    ++global_accepted_socket_count_;\n  }\n\n  ~AcceptedSocketImpl() override {\n    ASSERT(global_accepted_socket_count_.load() > 0);\n    --global_accepted_socket_count_;\n  }\n\n  // TODO (tonya11en): Global connection count tracking is temporarily performed via a static\n  // variable until the logic is moved into the overload manager.\n  static uint64_t acceptedSocketCount() { return global_accepted_socket_count_.load(); }\n\nprivate:\n  static std::atomic<uint64_t> global_accepted_socket_count_;\n};\n\n// ConnectionSocket used with client connections.\nclass ClientSocketImpl : public ConnectionSocketImpl {\npublic:\n  ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address,\n                   const OptionsSharedPtr& options)\n      : ConnectionSocketImpl(Network::ioHandleForAddr(Socket::Type::Stream, remote_address),\n                             nullptr, remote_address) {\n    if (options) {\n      addOptions(options);\n    }\n  }\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/proxy_protocol_filter_state.cc",
    "content": "#include \"common/network/proxy_protocol_filter_state.h\"\n\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nconst std::string& ProxyProtocolFilterState::key() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.network.proxy_protocol_options\");\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/proxy_protocol_filter_state.h",
    "content": "#pragma once\n\n#include \"envoy/network/proxy_protocol.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * PROXY protocol info to be used in connections.\n */\nclass ProxyProtocolFilterState : public StreamInfo::FilterState::Object {\npublic:\n  ProxyProtocolFilterState(Network::ProxyProtocolData options) : options_(options) {}\n  const Network::ProxyProtocolData& value() const { return options_; }\n  static const std::string& key();\n\nprivate:\n  const Network::ProxyProtocolData options_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/raw_buffer_socket.cc",
    "content": "#include \"common/network/raw_buffer_socket.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/http/headers.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nvoid RawBufferSocket::setTransportSocketCallbacks(TransportSocketCallbacks& callbacks) {\n  ASSERT(!callbacks_);\n  callbacks_ = &callbacks;\n}\n\nIoResult RawBufferSocket::doRead(Buffer::Instance& buffer) {\n  PostIoAction action = PostIoAction::KeepOpen;\n  uint64_t bytes_read = 0;\n  bool end_stream = false;\n  do {\n    // 16K read is arbitrary. TODO(mattklein123) PERF: Tune the read size.\n    Api::IoCallUint64Result result = callbacks_->ioHandle().read(buffer, 16384);\n\n    if (result.ok()) {\n      ENVOY_CONN_LOG(trace, \"read returns: {}\", callbacks_->connection(), result.rc_);\n      if (result.rc_ == 0) {\n        // Remote close.\n        end_stream = true;\n        break;\n      }\n      bytes_read += result.rc_;\n      if (callbacks_->shouldDrainReadBuffer()) {\n        callbacks_->setReadBufferReady();\n        break;\n      }\n    } else {\n      // Remote error (might be no data).\n      ENVOY_CONN_LOG(trace, \"read error: {}\", callbacks_->connection(),\n                     result.err_->getErrorDetails());\n      if (result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) {\n        action = PostIoAction::Close;\n      }\n      break;\n    }\n  } while (true);\n\n  return {action, bytes_read, end_stream};\n}\n\nIoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) {\n  PostIoAction action;\n  uint64_t bytes_written = 0;\n  ASSERT(!shutdown_ || buffer.length() == 0);\n  do {\n    if (buffer.length() == 0) {\n      if (end_stream && !shutdown_) {\n        // Ignore the result. This can only fail if the connection failed. In that case, the\n        // error will be detected on the next read, and dealt with appropriately.\n        callbacks_->ioHandle().shutdown(ENVOY_SHUT_WR);\n        shutdown_ = true;\n      }\n      action = PostIoAction::KeepOpen;\n      break;\n    }\n    Api::IoCallUint64Result result = callbacks_->ioHandle().write(buffer);\n\n    if (result.ok()) {\n      ENVOY_CONN_LOG(trace, \"write returns: {}\", callbacks_->connection(), result.rc_);\n      bytes_written += result.rc_;\n    } else {\n      ENVOY_CONN_LOG(trace, \"write error: {}\", callbacks_->connection(),\n                     result.err_->getErrorDetails());\n      if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) {\n        action = PostIoAction::KeepOpen;\n      } else {\n        action = PostIoAction::Close;\n      }\n      break;\n    }\n  } while (true);\n\n  return {action, bytes_written, false};\n}\n\nstd::string RawBufferSocket::protocol() const { return EMPTY_STRING; }\nabsl::string_view RawBufferSocket::failureReason() const { return EMPTY_STRING; }\n\nvoid RawBufferSocket::onConnected() { callbacks_->raiseEvent(ConnectionEvent::Connected); }\n\nTransportSocketPtr\nRawBufferSocketFactory::createTransportSocket(TransportSocketOptionsSharedPtr) const {\n  return std::make_unique<RawBufferSocket>();\n}\n\nbool RawBufferSocketFactory::implementsSecureTransport() const { return false; }\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/raw_buffer_socket.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass RawBufferSocket : public TransportSocket, protected Logger::Loggable<Logger::Id::connection> {\npublic:\n  // Network::TransportSocket\n  void setTransportSocketCallbacks(TransportSocketCallbacks& callbacks) override;\n  std::string protocol() const override;\n  absl::string_view failureReason() const override;\n  bool canFlushClose() override { return true; }\n  void closeSocket(Network::ConnectionEvent) override {}\n  void onConnected() override;\n  IoResult doRead(Buffer::Instance& buffer) override;\n  IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override;\n  Ssl::ConnectionInfoConstSharedPtr ssl() const override { return nullptr; }\n\nprivate:\n  TransportSocketCallbacks* callbacks_{};\n  bool shutdown_{};\n};\n\nclass RawBufferSocketFactory : public TransportSocketFactory {\npublic:\n  // Network::TransportSocketFactory\n  TransportSocketPtr createTransportSocket(TransportSocketOptionsSharedPtr options) const override;\n  bool implementsSecureTransport() const override;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/resolver_impl.cc",
    "content": "#include \"common/network/resolver_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/resolver.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/well_known_names.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n\n/**\n * Implementation of a resolver for IP addresses.\n */\nclass IpResolver : public Resolver {\n\npublic:\n  InstanceConstSharedPtr\n  resolve(const envoy::config::core::v3::SocketAddress& socket_address) override {\n    switch (socket_address.port_specifier_case()) {\n    case envoy::config::core::v3::SocketAddress::PortSpecifierCase::kPortValue:\n    // Default to port 0 if no port value is specified.\n    case envoy::config::core::v3::SocketAddress::PortSpecifierCase::PORT_SPECIFIER_NOT_SET:\n      return Network::Utility::parseInternetAddress(\n          socket_address.address(), socket_address.port_value(), !socket_address.ipv4_compat());\n\n    default:\n      throw EnvoyException(fmt::format(\"IP resolver can't handle port specifier type {}\",\n                                       socket_address.port_specifier_case()));\n    }\n  }\n\n  std::string name() const override { return Config::AddressResolverNames::get().IP; }\n};\n\n/**\n * Static registration for the IP resolver. @see RegisterFactory.\n */\nREGISTER_FACTORY(IpResolver, Resolver);\n\nInstanceConstSharedPtr resolveProtoAddress(const envoy::config::core::v3::Address& address) {\n  switch (address.address_case()) {\n  case envoy::config::core::v3::Address::AddressCase::kSocketAddress:\n    return resolveProtoSocketAddress(address.socket_address());\n  case envoy::config::core::v3::Address::AddressCase::kPipe:\n    return std::make_shared<PipeInstance>(address.pipe().path());\n  case envoy::config::core::v3::Address::AddressCase::kEnvoyInternalAddress:\n    switch (address.envoy_internal_address().address_name_specifier_case()) {\n    case envoy::config::core::v3::EnvoyInternalAddress::AddressNameSpecifierCase::\n        kServerListenerName:\n      return std::make_shared<EnvoyInternalInstance>(\n          address.envoy_internal_address().server_listener_name());\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  default:\n    throw EnvoyException(\"Address must be set: \" + address.DebugString());\n  }\n}\n\nInstanceConstSharedPtr\nresolveProtoSocketAddress(const envoy::config::core::v3::SocketAddress& socket_address) {\n  Resolver* resolver = nullptr;\n  const std::string& resolver_name = socket_address.resolver_name();\n  if (resolver_name.empty()) {\n    resolver =\n        Registry::FactoryRegistry<Resolver>::getFactory(Config::AddressResolverNames::get().IP);\n  } else {\n    resolver = Registry::FactoryRegistry<Resolver>::getFactory(resolver_name);\n  }\n  if (resolver == nullptr) {\n    throw EnvoyException(fmt::format(\"Unknown address resolver: {}\", resolver_name));\n  }\n  return resolver->resolve(socket_address);\n}\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/resolver_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/resolver.h\"\n\n#include \"common/network/address_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n/**\n * Create an Instance from a envoy::config::core::v3::Address.\n * @param address supplies the address proto to resolve.\n * @return pointer to the Instance.\n */\nAddress::InstanceConstSharedPtr\nresolveProtoAddress(const envoy::config::core::v3::Address& address);\n\n/**\n * Create an Instance from a envoy::config::core::v3::SocketAddress.\n * @param address supplies the socket address proto to resolve.\n * @return pointer to the Instance.\n */\nAddress::InstanceConstSharedPtr\nresolveProtoSocketAddress(const envoy::config::core::v3::SocketAddress& address);\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/socket_impl.cc",
    "content": "#include \"common/network/socket_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/network/socket_interface.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nSocketImpl::SocketImpl(Socket::Type sock_type, const Address::InstanceConstSharedPtr addr)\n    : io_handle_(ioHandleForAddr(sock_type, addr)), sock_type_(sock_type),\n      addr_type_(addr->type()) {}\n\nSocketImpl::SocketImpl(IoHandlePtr&& io_handle,\n                       const Address::InstanceConstSharedPtr& local_address)\n    : io_handle_(std::move(io_handle)), local_address_(local_address) {\n\n  if (local_address_ != nullptr) {\n    addr_type_ = local_address_->type();\n    return;\n  }\n\n  // Should not happen but some tests inject -1 fds\n  if (!io_handle_->isOpen()) {\n    return;\n  }\n\n  auto domain = io_handle_->domain();\n  // This should never happen in practice but too many tests inject fake fds ...\n  if (!domain.has_value()) {\n    return;\n  }\n\n  addr_type_ = *domain == AF_UNIX ? Address::Type::Pipe : Address::Type::Ip;\n}\n\nApi::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr address) {\n  Api::SysCallIntResult bind_result;\n\n  if (address->type() == Address::Type::Pipe) {\n    const Address::Pipe* pipe = address->pipe();\n    const auto* pipe_sa = reinterpret_cast<const sockaddr_un*>(address->sockAddr());\n    bool abstract_namespace = address->pipe()->abstractNamespace();\n    if (!abstract_namespace) {\n      // Try to unlink an existing filesystem object at the requested path. Ignore\n      // errors -- it's fine if the path doesn't exist, and if it exists but can't\n      // be unlinked then `::bind()` will generate a reasonable errno.\n      unlink(pipe_sa->sun_path);\n    }\n    // Not storing a reference to syscalls singleton because of unit test mocks\n    bind_result = io_handle_->bind(address);\n    if (pipe->mode() != 0 && !abstract_namespace && bind_result.rc_ == 0) {\n      auto set_permissions = Api::OsSysCallsSingleton::get().chmod(pipe_sa->sun_path, pipe->mode());\n      if (set_permissions.rc_ != 0) {\n        throw EnvoyException(fmt::format(\"Failed to create socket with mode {}: {}\",\n                                         std::to_string(pipe->mode()),\n                                         errorDetails(set_permissions.errno_)));\n      }\n    }\n    return bind_result;\n  }\n\n  bind_result = io_handle_->bind(address);\n  if (bind_result.rc_ == 0 && address->ip()->port() == 0) {\n    local_address_ = io_handle_->localAddress();\n  }\n  return bind_result;\n}\n\nApi::SysCallIntResult SocketImpl::listen(int backlog) { return io_handle_->listen(backlog); }\n\nApi::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) {\n  auto result = io_handle_->connect(address);\n  if (address->type() == Address::Type::Ip) {\n    local_address_ = io_handle_->localAddress();\n  }\n  return result;\n}\n\nApi::SysCallIntResult SocketImpl::setSocketOption(int level, int optname, const void* optval,\n                                                  socklen_t optlen) {\n  return io_handle_->setOption(level, optname, optval, optlen);\n}\n\nApi::SysCallIntResult SocketImpl::getSocketOption(int level, int optname, void* optval,\n                                                  socklen_t* optlen) const {\n  return io_handle_->getOption(level, optname, optval, optlen);\n}\n\nApi::SysCallIntResult SocketImpl::setBlockingForTest(bool blocking) {\n  return io_handle_->setBlocking(blocking);\n}\n\nabsl::optional<Address::IpVersion> SocketImpl::ipVersion() const {\n  if (addr_type_ == Address::Type::Ip) {\n    // Always hit after socket is initialized, i.e., accepted or connected\n    if (local_address_ != nullptr) {\n      return local_address_->ip()->version();\n    } else {\n      auto domain = io_handle_->domain();\n      if (!domain.has_value()) {\n        return absl::nullopt;\n      }\n      if (*domain == AF_INET) {\n        return Address::IpVersion::v4;\n      } else if (*domain == AF_INET6) {\n        return Address::IpVersion::v6;\n      } else {\n        return absl::nullopt;\n      }\n    }\n  }\n  return absl::nullopt;\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/socket_impl.h",
    "content": "#pragma once\n\n#include \"envoy/network/socket.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass SocketImpl : public virtual Socket {\npublic:\n  SocketImpl(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr);\n\n  // Network::Socket\n  const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; }\n  void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override {\n    local_address_ = local_address;\n  }\n\n  IoHandle& ioHandle() override { return *io_handle_; }\n  const IoHandle& ioHandle() const override { return *io_handle_; }\n  void close() override {\n    if (io_handle_->isOpen()) {\n      io_handle_->close();\n    }\n  }\n  bool isOpen() const override { return io_handle_->isOpen(); }\n  void ensureOptions() {\n    if (!options_) {\n      options_ = std::make_shared<std::vector<OptionConstSharedPtr>>();\n    }\n  }\n  void addOption(const OptionConstSharedPtr& option) override {\n    ensureOptions();\n    options_->emplace_back(std::move(option));\n  }\n  void addOptions(const OptionsSharedPtr& options) override {\n    ensureOptions();\n    Network::Socket::appendOptions(options_, options);\n  }\n\n  Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override;\n  Api::SysCallIntResult listen(int backlog) override;\n  Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr addr) override;\n  Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval,\n                                        socklen_t optlen) override;\n  Api::SysCallIntResult getSocketOption(int level, int optname, void* optval,\n                                        socklen_t* optlen) const override;\n  Api::SysCallIntResult setBlockingForTest(bool blocking) override;\n\n  const OptionsSharedPtr& options() const override { return options_; }\n  Socket::Type socketType() const override { return sock_type_; }\n  Address::Type addressType() const override { return addr_type_; }\n  absl::optional<Address::IpVersion> ipVersion() const override;\n\nprotected:\n  SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address);\n\n  const IoHandlePtr io_handle_;\n  Address::InstanceConstSharedPtr local_address_;\n  OptionsSharedPtr options_;\n  Socket::Type sock_type_;\n  Address::Type addr_type_;\n};\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "source/common/network/socket_interface.h",
    "content": "#pragma once\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/network/socket_interface.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/bootstrap_extension_config.h\"\n\n#include \"common/singleton/threadsafe_singleton.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n// Wrapper for SocketInterface instances returned by createBootstrapExtension() which must be\n// implemented by all factories that derive SocketInterfaceBase\nclass SocketInterfaceExtension : public Server::BootstrapExtension {\npublic:\n  SocketInterfaceExtension(SocketInterface& sock_interface) : sock_interface_(sock_interface) {}\n  SocketInterface& socketInterface() { return sock_interface_; }\n\nprivate:\n  SocketInterface& sock_interface_;\n};\n\n// Class to be derived by all SocketInterface implementations.\n//\n// It acts both as a SocketInterface and as a BootstrapExtensionFactory. The latter is used, on the\n// one hand, to configure and initialize the interface and, on the other, for SocketInterface lookup\n// by leveraging the FactoryRegistry. As required for all bootstrap extensions, all derived classes\n// should register via the REGISTER_FACTORY() macro as BootstrapExtensionFactory.\n//\n// SocketInterface instances can be retrieved using the factory name, i.e., string returned by\n// name() function implemented by all classes that derive SocketInterfaceBase, via\n// Network::socketInterface(). When instantiating addresses, address resolvers should\n// set the socket interface field to the name of the socket interface implementation that should\n// be used to create sockets for said addresses.\nclass SocketInterfaceBase : public SocketInterface,\n                            public Server::Configuration::BootstrapExtensionFactory {};\n\n/**\n * Lookup SocketInterface instance by name\n * @param name Name of the socket interface to be looked up\n * @return Pointer to @ref SocketInterface instance that registered using the name of nullptr\n */\nstatic inline const SocketInterface* socketInterface(std::string name) {\n  auto factory =\n      Registry::FactoryRegistry<Server::Configuration::BootstrapExtensionFactory>::getFactory(name);\n  return dynamic_cast<SocketInterface*>(factory);\n}\n\nusing SocketInterfaceSingleton = InjectableSingleton<SocketInterface>;\nusing SocketInterfaceLoader = ScopedInjectableLoader<SocketInterface>;\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "source/common/network/socket_interface_impl.cc",
    "content": "#include \"common/network/socket_interface_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/network/socket_interface/v3/default_socket_interface.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nIoHandlePtr SocketInterfaceImpl::makeSocket(int socket_fd, bool socket_v6only,\n                                            absl::optional<int> domain) const {\n  return std::make_unique<IoSocketHandleImpl>(socket_fd, socket_v6only, domain);\n}\n\nIoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type addr_type,\n                                        Address::IpVersion version, bool socket_v6only) const {\n#if defined(__APPLE__) || defined(WIN32)\n  int flags = 0;\n#else\n  int flags = SOCK_NONBLOCK;\n#endif\n\n  if (socket_type == Socket::Type::Stream) {\n    flags |= SOCK_STREAM;\n  } else {\n    flags |= SOCK_DGRAM;\n  }\n\n  int domain;\n  if (addr_type == Address::Type::Ip) {\n    if (version == Address::IpVersion::v6) {\n      domain = AF_INET6;\n    } else {\n      ASSERT(version == Address::IpVersion::v4);\n      domain = AF_INET;\n    }\n  } else if (addr_type == Address::Type::Pipe) {\n    domain = AF_UNIX;\n  } else {\n    ASSERT(addr_type == Address::Type::EnvoyInternal);\n    // TODO(lambdai): Add InternalIoSocketHandleImpl to support internal address.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0);\n  RELEASE_ASSERT(SOCKET_VALID(result.rc_),\n                 fmt::format(\"socket(2) failed, got error: {}\", errorDetails(result.errno_)));\n  IoHandlePtr io_handle = makeSocket(result.rc_, socket_v6only, domain);\n\n#if defined(__APPLE__) || defined(WIN32)\n  // Cannot set SOCK_NONBLOCK as a ::socket flag.\n  const int rc = io_handle->setBlocking(false).rc_;\n  RELEASE_ASSERT(!SOCKET_FAILURE(rc), \"\");\n#endif\n\n  return io_handle;\n}\n\nIoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type,\n                                        const Address::InstanceConstSharedPtr addr) const {\n  Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4;\n  int v6only = 0;\n  if (addr->type() == Address::Type::Ip && ip_version == Address::IpVersion::v6) {\n    v6only = addr->ip()->ipv6()->v6only();\n  }\n\n  IoHandlePtr io_handle =\n      SocketInterfaceImpl::socket(socket_type, addr->type(), ip_version, v6only);\n  if (addr->type() == Address::Type::Ip && ip_version == Address::IpVersion::v6) {\n    // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only.\n    const Api::SysCallIntResult result = io_handle->setOption(\n        IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast<const char*>(&v6only), sizeof(v6only));\n    RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), \"\");\n  }\n  return io_handle;\n}\n\nIoHandlePtr SocketInterfaceImpl::socket(os_fd_t fd) {\n  return std::make_unique<IoSocketHandleImpl>(fd);\n}\n\nbool SocketInterfaceImpl::ipFamilySupported(int domain) {\n  Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get();\n  const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0);\n  if (SOCKET_VALID(result.rc_)) {\n    RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0,\n                   fmt::format(\"Fail to close fd: response code {}\", errorDetails(result.rc_)));\n  }\n  return SOCKET_VALID(result.rc_);\n}\n\nServer::BootstrapExtensionPtr\nSocketInterfaceImpl::createBootstrapExtension(const Protobuf::Message&,\n                                              Server::Configuration::ServerFactoryContext&) {\n  return std::make_unique<SocketInterfaceExtension>(*this);\n}\n\nProtobufTypes::MessagePtr SocketInterfaceImpl::createEmptyConfigProto() {\n  return std::make_unique<\n      envoy::extensions::network::socket_interface::v3::DefaultSocketInterface>();\n}\n\nREGISTER_FACTORY(SocketInterfaceImpl, Server::Configuration::BootstrapExtensionFactory);\n\nstatic SocketInterfaceLoader* socket_interface_ =\n    new SocketInterfaceLoader(std::make_unique<SocketInterfaceImpl>());\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/socket_interface_impl.h",
    "content": "#pragma once\n\n#include \"envoy/network/socket.h\"\n\n#include \"common/network/socket_interface.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass SocketInterfaceImpl : public SocketInterfaceBase {\npublic:\n  // SocketInterface\n  IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version,\n                     bool socket_v6only) const override;\n  IoHandlePtr socket(Socket::Type socket_type,\n                     const Address::InstanceConstSharedPtr addr) const override;\n  IoHandlePtr socket(os_fd_t fd) override;\n  bool ipFamilySupported(int domain) override;\n\n  // Server::Configuration::BootstrapExtensionFactory\n  Server::BootstrapExtensionPtr\n  createBootstrapExtension(const Protobuf::Message& config,\n                           Server::Configuration::ServerFactoryContext& context) override;\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n  std::string name() const override {\n    return \"envoy.extensions.network.socket_interface.default_socket_interface\";\n  };\n\nprotected:\n  virtual IoHandlePtr makeSocket(int socket_fd, bool socket_v6only,\n                                 absl::optional<int> domain) const;\n};\n\nDECLARE_FACTORY(SocketInterfaceImpl);\n\n} // namespace Network\n} // namespace Envoy"
  },
  {
    "path": "source/common/network/socket_option_factory.cc",
    "content": "#include \"common/network/socket_option_factory.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/network/addr_family_aware_socket_option_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nstd::unique_ptr<Socket::Options>\nSocketOptionFactory::buildTcpKeepaliveOptions(Network::TcpKeepaliveConfig keepalive_config) {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_SO_KEEPALIVE, 1));\n\n  if (keepalive_config.keepalive_probes_.has_value()) {\n    options->push_back(std::make_shared<Network::SocketOptionImpl>(\n        envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_TCP_KEEPCNT,\n        keepalive_config.keepalive_probes_.value()));\n  }\n  if (keepalive_config.keepalive_interval_.has_value()) {\n    options->push_back(std::make_shared<Network::SocketOptionImpl>(\n        envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_TCP_KEEPINTVL,\n        keepalive_config.keepalive_interval_.value()));\n  }\n  if (keepalive_config.keepalive_time_.has_value()) {\n    options->push_back(std::make_shared<Network::SocketOptionImpl>(\n        envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_TCP_KEEPIDLE,\n        keepalive_config.keepalive_time_.value()));\n  }\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildIpFreebindOptions() {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<Network::AddrFamilyAwareSocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_IP_FREEBIND,\n      ENVOY_SOCKET_IPV6_FREEBIND, 1));\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildIpTransparentOptions() {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<Network::AddrFamilyAwareSocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_IP_TRANSPARENT,\n      ENVOY_SOCKET_IPV6_TRANSPARENT, 1));\n  options->push_back(std::make_shared<Network::AddrFamilyAwareSocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_SOCKET_IP_TRANSPARENT,\n      ENVOY_SOCKET_IPV6_TRANSPARENT, 1));\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildSocketMarkOptions(uint32_t mark) {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  // we need this to happen prior to binding or prior to connecting. In both cases, PREBIND will\n  // fire.\n  options->push_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_SO_MARK, mark));\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildSocketNoSigpipeOptions() {\n  // Provide additional handling for `SIGPIPE` at the socket layer by converting it to `EPIPE`.\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_SO_NOSIGPIPE, 1));\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildLiteralOptions(\n    const Protobuf::RepeatedPtrField<envoy::config::core::v3::SocketOption>& socket_options) {\n  auto options = std::make_unique<Socket::Options>();\n  for (const auto& socket_option : socket_options) {\n    std::string buf;\n    int int_value;\n    switch (socket_option.value_case()) {\n    case envoy::config::core::v3::SocketOption::ValueCase::kIntValue:\n      int_value = socket_option.int_value();\n      buf.append(reinterpret_cast<char*>(&int_value), sizeof(int_value));\n      break;\n    case envoy::config::core::v3::SocketOption::ValueCase::kBufValue:\n      buf.append(socket_option.buf_value());\n      break;\n    default:\n      ENVOY_LOG(warn, \"Socket option specified with no or unknown value: {}\",\n                socket_option.DebugString());\n      continue;\n    }\n    options->emplace_back(std::make_shared<Network::SocketOptionImpl>(\n        socket_option.state(),\n        Network::SocketOptionName(\n            socket_option.level(), socket_option.name(),\n            fmt::format(\"{}/{}\", socket_option.level(), socket_option.name())),\n        buf));\n  }\n  return options;\n}\n\nstd::unique_ptr<Socket::Options>\nSocketOptionFactory::buildTcpFastOpenOptions(uint32_t queue_length) {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_LISTENING, ENVOY_SOCKET_TCP_FASTOPEN,\n      queue_length));\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildIpPacketInfoOptions() {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<AddrFamilyAwareSocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_SELF_IP_ADDR, ENVOY_SELF_IPV6_ADDR,\n      1));\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildRxQueueOverFlowOptions() {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n#ifdef SO_RXQ_OVFL\n  options->push_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_BOUND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RXQ_OVFL), 1));\n#endif\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildReusePortOptions() {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_SO_REUSEPORT, 1));\n  return options;\n}\n\nstd::unique_ptr<Socket::Options> SocketOptionFactory::buildUdpGroOptions() {\n  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();\n  options->push_back(std::make_shared<SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_SOCKET_UDP_GRO, 1));\n  return options;\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/socket_option_factory.h",
    "content": "#pragma once\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/socket.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nstruct TcpKeepaliveConfig {\n  absl::optional<uint32_t>\n      keepalive_probes_; // Number of unanswered probes before the connection is dropped\n  absl::optional<uint32_t> keepalive_time_; // Connection idle time before probing will start, in ms\n  absl::optional<uint32_t> keepalive_interval_; // Interval between probes, in ms\n};\n\nclass SocketOptionFactory : Logger::Loggable<Logger::Id::connection> {\npublic:\n  static std::unique_ptr<Socket::Options>\n  buildTcpKeepaliveOptions(Network::TcpKeepaliveConfig keepalive_config);\n  static std::unique_ptr<Socket::Options> buildIpFreebindOptions();\n  static std::unique_ptr<Socket::Options> buildIpTransparentOptions();\n  static std::unique_ptr<Socket::Options> buildSocketMarkOptions(uint32_t mark);\n  static std::unique_ptr<Socket::Options> buildSocketNoSigpipeOptions();\n  static std::unique_ptr<Socket::Options> buildTcpFastOpenOptions(uint32_t queue_length);\n  static std::unique_ptr<Socket::Options> buildLiteralOptions(\n      const Protobuf::RepeatedPtrField<envoy::config::core::v3::SocketOption>& socket_options);\n  static std::unique_ptr<Socket::Options> buildIpPacketInfoOptions();\n  static std::unique_ptr<Socket::Options> buildRxQueueOverFlowOptions();\n  static std::unique_ptr<Socket::Options> buildReusePortOptions();\n  static std::unique_ptr<Socket::Options> buildUdpGroOptions();\n};\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/socket_option_impl.cc",
    "content": "#include \"common/network/socket_option_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n// Socket::Option\nbool SocketOptionImpl::setOption(Socket& socket,\n                                 envoy::config::core::v3::SocketOption::SocketState state) const {\n  if (in_state_ == state) {\n    if (!optname_.hasValue()) {\n      ENVOY_LOG(warn, \"Failed to set unsupported option on socket\");\n      return false;\n    }\n\n    const Api::SysCallIntResult result =\n        SocketOptionImpl::setSocketOption(socket, optname_, value_.data(), value_.size());\n    if (result.rc_ != 0) {\n      ENVOY_LOG(warn, \"Setting {} option on socket failed: {}\", optname_.name(),\n                errorDetails(result.errno_));\n      return false;\n    }\n  }\n\n  return true;\n}\n\nabsl::optional<Socket::Option::Details>\nSocketOptionImpl::getOptionDetails(const Socket&,\n                                   envoy::config::core::v3::SocketOption::SocketState state) const {\n  if (state != in_state_ || !isSupported()) {\n    return absl::nullopt;\n  }\n\n  Socket::Option::Details info;\n  info.name_ = optname_;\n  info.value_ = {value_.begin(), value_.end()};\n  return absl::make_optional(std::move(info));\n}\n\nbool SocketOptionImpl::isSupported() const { return optname_.hasValue(); }\n\nApi::SysCallIntResult SocketOptionImpl::setSocketOption(Socket& socket,\n                                                        const Network::SocketOptionName& optname,\n                                                        const void* value, size_t size) {\n  if (!optname.hasValue()) {\n    return {-1, SOCKET_ERROR_NOT_SUP};\n  }\n\n  return socket.setSocketOption(optname.level(), optname.option(), value, size);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/socket_option_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/os_sys_calls.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/listen_socket.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n#ifdef IP_TRANSPARENT\n#define ENVOY_SOCKET_IP_TRANSPARENT ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_IP, IP_TRANSPARENT)\n#else\n#define ENVOY_SOCKET_IP_TRANSPARENT Network::SocketOptionName()\n#endif\n\n#ifdef IPV6_TRANSPARENT\n#define ENVOY_SOCKET_IPV6_TRANSPARENT ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_IPV6, IPV6_TRANSPARENT)\n#else\n#define ENVOY_SOCKET_IPV6_TRANSPARENT Network::SocketOptionName()\n#endif\n\n#ifdef IP_FREEBIND\n#define ENVOY_SOCKET_IP_FREEBIND ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_IP, IP_FREEBIND)\n#else\n#define ENVOY_SOCKET_IP_FREEBIND Network::SocketOptionName()\n#endif\n\n#ifdef IPV6_FREEBIND\n#define ENVOY_SOCKET_IPV6_FREEBIND ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_IPV6, IPV6_FREEBIND)\n#else\n#define ENVOY_SOCKET_IPV6_FREEBIND Network::SocketOptionName()\n#endif\n\n#ifdef SO_KEEPALIVE\n#define ENVOY_SOCKET_SO_KEEPALIVE ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_KEEPALIVE)\n#else\n#define ENVOY_SOCKET_SO_KEEPALIVE Network::SocketOptionName()\n#endif\n\n#ifdef SO_MARK\n#define ENVOY_SOCKET_SO_MARK ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_MARK)\n#else\n#define ENVOY_SOCKET_SO_MARK Network::SocketOptionName()\n#endif\n\n#ifdef SO_NOSIGPIPE\n#define ENVOY_SOCKET_SO_NOSIGPIPE ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_NOSIGPIPE)\n#else\n#define ENVOY_SOCKET_SO_NOSIGPIPE Network::SocketOptionName()\n#endif\n\n#ifdef SO_REUSEPORT\n#define ENVOY_SOCKET_SO_REUSEPORT ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_REUSEPORT)\n#else\n#define ENVOY_SOCKET_SO_REUSEPORT Network::SocketOptionName()\n#endif\n\n#ifdef UDP_GRO\n#define ENVOY_SOCKET_UDP_GRO ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_UDP, UDP_GRO)\n#else\n#define ENVOY_SOCKET_UDP_GRO Network::SocketOptionName()\n#endif\n\n#ifdef TCP_KEEPCNT\n#define ENVOY_SOCKET_TCP_KEEPCNT ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_KEEPCNT)\n#else\n#define ENVOY_SOCKET_TCP_KEEPCNT Network::SocketOptionName()\n#endif\n\n#ifdef TCP_KEEPIDLE\n#define ENVOY_SOCKET_TCP_KEEPIDLE ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_KEEPIDLE)\n#elif TCP_KEEPALIVE // macOS uses a different name from Linux for just this option.\n#define ENVOY_SOCKET_TCP_KEEPIDLE ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_KEEPALIVE)\n#else\n#define ENVOY_SOCKET_TCP_KEEPIDLE Network::SocketOptionName()\n#endif\n\n#ifdef TCP_KEEPINTVL\n#define ENVOY_SOCKET_TCP_KEEPINTVL ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_KEEPINTVL)\n#else\n#define ENVOY_SOCKET_TCP_KEEPINTVL Network::SocketOptionName()\n#endif\n\n#ifdef TCP_FASTOPEN\n#define ENVOY_SOCKET_TCP_FASTOPEN ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_FASTOPEN)\n#else\n#define ENVOY_SOCKET_TCP_FASTOPEN Network::SocketOptionName()\n#endif\n\n// Linux uses IP_PKTINFO for both sending source address and receiving destination\n// address.\n// FreeBSD uses IP_RECVDSTADDR for receiving destination address and IP_SENDSRCADDR for sending\n// source address. And these two have same value for convenience purpose.\n#ifdef IP_RECVDSTADDR\n#ifdef IP_SENDSRCADDR\nstatic_assert(IP_RECVDSTADDR == IP_SENDSRCADDR);\n#endif\n#define ENVOY_IP_PKTINFO IP_RECVDSTADDR\n#elif IP_PKTINFO\n#define ENVOY_IP_PKTINFO IP_PKTINFO\n#endif\n\n#define ENVOY_SELF_IP_ADDR ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_IP, ENVOY_IP_PKTINFO)\n\n// Both Linux and FreeBSD use IPV6_RECVPKTINFO for both sending source address and\n// receiving destination address.\n#define ENVOY_SELF_IPV6_ADDR ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_IPV6, IPV6_RECVPKTINFO)\n\n#ifdef SO_ATTACH_REUSEPORT_CBPF\n#define ENVOY_ATTACH_REUSEPORT_CBPF                                                                \\\n  ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF)\n#else\n#define ENVOY_ATTACH_REUSEPORT_CBPF Network::SocketOptionName()\n#endif\n\nclass SocketOptionImpl : public Socket::Option, Logger::Loggable<Logger::Id::connection> {\npublic:\n  SocketOptionImpl(envoy::config::core::v3::SocketOption::SocketState in_state,\n                   Network::SocketOptionName optname,\n                   int value) // Yup, int. See setsockopt(2).\n      : SocketOptionImpl(in_state, optname,\n                         absl::string_view(reinterpret_cast<char*>(&value), sizeof(value))) {}\n\n  SocketOptionImpl(envoy::config::core::v3::SocketOption::SocketState in_state,\n                   Network::SocketOptionName optname, absl::string_view value)\n      : in_state_(in_state), optname_(optname), value_(value.begin(), value.end()) {\n    ASSERT(reinterpret_cast<uintptr_t>(value_.data()) % alignof(void*) == 0);\n  }\n\n  // Socket::Option\n  bool setOption(Socket& socket,\n                 envoy::config::core::v3::SocketOption::SocketState state) const override;\n\n  // The common socket options don't require a hash key.\n  void hashKey(std::vector<uint8_t>&) const override {}\n\n  absl::optional<Details>\n  getOptionDetails(const Socket& socket,\n                   envoy::config::core::v3::SocketOption::SocketState state) const override;\n\n  bool isSupported() const;\n\n  /**\n   * Set the option on the given socket.\n   * @param socket the socket on which to apply the option.\n   * @param optname the option name.\n   * @param value the option value.\n   * @param size the option value size.\n   * @return a Api::SysCallIntResult with rc_ = 0 for success and rc = -1 for failure. If the call\n   * is successful, errno_ shouldn't be used.\n   */\n  static Api::SysCallIntResult setSocketOption(Socket& socket,\n                                               const Network::SocketOptionName& optname,\n                                               const void* value, size_t size);\n\nprivate:\n  const envoy::config::core::v3::SocketOption::SocketState in_state_;\n  const Network::SocketOptionName optname_;\n  // This has to be a std::vector<uint8_t> but not std::string because std::string might inline\n  // the buffer so its data() is not aligned in to alignof(void*).\n  const std::vector<uint8_t> value_;\n};\n\nusing SocketOptionImplOptRef = absl::optional<std::reference_wrapper<SocketOptionImpl>>;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/tcp_listener_impl.cc",
    "content": "#include \"common/network/tcp_listener_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/event/file_event_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nconst absl::string_view TcpListenerImpl::GlobalMaxCxRuntimeKey =\n    \"overload.global_downstream_max_connections\";\n\nbool TcpListenerImpl::rejectCxOverGlobalLimit() {\n  // Enforce the global connection limit if necessary, immediately closing the accepted connection.\n  Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting();\n\n  if (runtime == nullptr) {\n    // The runtime singleton won't exist in most unit tests that do not need global downstream limit\n    // enforcement. Therefore, there is no need to enforce limits if the singleton doesn't exist.\n    // TODO(tonya11en): Revisit this once runtime is made globally available.\n    return false;\n  }\n\n  // If the connection limit is not set, don't limit the connections, but still track them.\n  // TODO(tonya11en): In integration tests, threadsafeSnapshot is necessary since the FakeUpstreams\n  // use a listener and do not run in a worker thread. In practice, this code path will always be\n  // run on a worker thread, but to prevent failed assertions in test environments, threadsafe\n  // snapshots must be used. This must be revisited.\n  const uint64_t global_cx_limit = runtime->threadsafeSnapshot()->getInteger(\n      GlobalMaxCxRuntimeKey, std::numeric_limits<uint64_t>::max());\n  return AcceptedSocketImpl::acceptedSocketCount() >= global_cx_limit;\n}\n\nvoid TcpListenerImpl::onSocketEvent(short flags) {\n  ASSERT(flags & (Event::FileReadyType::Read));\n\n  // TODO(fcoras): Add limit on number of accepted calls per wakeup\n  while (1) {\n    if (!socket_->ioHandle().isOpen()) {\n      PANIC(fmt::format(\"listener accept failure: {}\", errorDetails(errno)));\n    }\n\n    sockaddr_storage remote_addr;\n    socklen_t remote_addr_len = sizeof(remote_addr);\n\n    IoHandlePtr io_handle =\n        socket_->ioHandle().accept(reinterpret_cast<sockaddr*>(&remote_addr), &remote_addr_len);\n    if (io_handle == nullptr) {\n      break;\n    }\n\n    if (rejectCxOverGlobalLimit()) {\n      // The global connection limit has been reached.\n      io_handle->close();\n      cb_.onReject();\n      continue;\n    }\n\n    // Get the local address from the new socket if the listener is listening on IP ANY\n    // (e.g., 0.0.0.0 for IPv4) (local_address_ is nullptr in this case).\n    const Address::InstanceConstSharedPtr& local_address =\n        local_address_ ? local_address_ : io_handle->localAddress();\n\n    // The accept() call that filled in remote_addr doesn't fill in more than the sa_family field\n    // for Unix domain sockets; apparently there isn't a mechanism in the kernel to get the\n    // `sockaddr_un` associated with the client socket when starting from the server socket.\n    // We work around this by using our own name for the socket in this case.\n    // Pass the 'v6only' parameter as true if the local_address is an IPv6 address. This has no\n    // effect if the socket is a v4 socket, but for v6 sockets this will create an IPv4 remote\n    // address if an IPv4 local_address was created from an IPv6 mapped IPv4 address.\n    const Address::InstanceConstSharedPtr& remote_address =\n        (remote_addr.ss_family == AF_UNIX)\n            ? io_handle->peerAddress()\n            : Address::addressFromSockAddr(remote_addr, remote_addr_len,\n                                           local_address->ip()->version() ==\n                                               Address::IpVersion::v6);\n\n    cb_.onAccept(\n        std::make_unique<AcceptedSocketImpl>(std::move(io_handle), local_address, remote_address));\n  }\n}\n\nvoid TcpListenerImpl::setupServerSocket(Event::DispatcherImpl& dispatcher, Socket& socket) {\n  socket.ioHandle().listen(backlog_size_);\n\n  // Although onSocketEvent drains to completion, use level triggered mode to avoid potential\n  // loss of the trigger due to transient accept errors.\n  file_event_ = socket.ioHandle().createFileEvent(\n      dispatcher, [this](uint32_t events) -> void { onSocketEvent(events); },\n      Event::FileTriggerType::Level, Event::FileReadyType::Read);\n\n  if (!Network::Socket::applyOptions(socket.options(), socket,\n                                     envoy::config::core::v3::SocketOption::STATE_LISTENING)) {\n    throw CreateListenerException(fmt::format(\"cannot set post-listen socket option on socket: {}\",\n                                              socket.localAddress()->asString()));\n  }\n}\n\nTcpListenerImpl::TcpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket,\n                                 TcpListenerCallbacks& cb, bool bind_to_port, uint32_t backlog_size)\n    : BaseListenerImpl(dispatcher, std::move(socket)), cb_(cb), backlog_size_(backlog_size) {\n  if (bind_to_port) {\n    setupServerSocket(dispatcher, *socket_);\n  }\n}\n\nvoid TcpListenerImpl::enable() { file_event_->setEnabled(Event::FileReadyType::Read); }\n\nvoid TcpListenerImpl::disable() { file_event_->setEnabled(0); }\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/tcp_listener_impl.h",
    "content": "#pragma once\n\n#include \"envoy/runtime/runtime.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"base_listener_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * libevent implementation of Network::Listener for TCP.\n */\nclass TcpListenerImpl : public BaseListenerImpl {\npublic:\n  TcpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket,\n                  TcpListenerCallbacks& cb, bool bind_to_port, uint32_t backlog_size);\n  void disable() override;\n  void enable() override;\n\n  static const absl::string_view GlobalMaxCxRuntimeKey;\n\nprotected:\n  void setupServerSocket(Event::DispatcherImpl& dispatcher, Socket& socket);\n\n  TcpListenerCallbacks& cb_;\n  const uint32_t backlog_size_;\n\nprivate:\n  void onSocketEvent(short flags);\n\n  // Returns true if global connection limit has been reached and the accepted socket should be\n  // rejected/closed. If the accepted socket is to be admitted, false is returned.\n  static bool rejectCxOverGlobalLimit();\n\n  Event::FileEventPtr file_event_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/transport_socket_options_impl.cc",
    "content": "#include \"common/network/transport_socket_options_impl.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"common/common/scalar_to_byte_vector.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/application_protocol.h\"\n#include \"common/network/proxy_protocol_filter_state.h\"\n#include \"common/network/upstream_server_name.h\"\n#include \"common/network/upstream_subject_alt_names.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\nvoid commonHashKey(const TransportSocketOptions& options, std::vector<std::uint8_t>& key) {\n  const auto& server_name_overide = options.serverNameOverride();\n  if (server_name_overide.has_value()) {\n    pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(server_name_overide.value()), key);\n  }\n\n  const auto& verify_san_list = options.verifySubjectAltNameListOverride();\n  if (!verify_san_list.empty()) {\n    for (const auto& san : verify_san_list) {\n      pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(san), key);\n    }\n  }\n\n  const auto& alpn_list = options.applicationProtocolListOverride();\n  if (!alpn_list.empty()) {\n    for (const auto& protocol : alpn_list) {\n      pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(protocol), key);\n    }\n  }\n  const auto& alpn_fallback = options.applicationProtocolFallback();\n  if (alpn_fallback.has_value()) {\n    pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(*alpn_fallback), key);\n  }\n}\n} // namespace\n\nvoid AlpnDecoratingTransportSocketOptions::hashKey(std::vector<uint8_t>& key) const {\n  commonHashKey(*this, key);\n}\n\nvoid TransportSocketOptionsImpl::hashKey(std::vector<uint8_t>& key) const {\n  commonHashKey(*this, key);\n}\n\nTransportSocketOptionsSharedPtr\nTransportSocketOptionsUtility::fromFilterState(const StreamInfo::FilterState& filter_state) {\n  absl::string_view server_name;\n  std::vector<std::string> application_protocols;\n  std::vector<std::string> subject_alt_names;\n  absl::optional<Network::ProxyProtocolData> proxy_protocol_options;\n\n  bool needs_transport_socket_options = false;\n  if (filter_state.hasData<UpstreamServerName>(UpstreamServerName::key())) {\n    const auto& upstream_server_name =\n        filter_state.getDataReadOnly<UpstreamServerName>(UpstreamServerName::key());\n    server_name = upstream_server_name.value();\n    needs_transport_socket_options = true;\n  }\n\n  if (filter_state.hasData<Network::ApplicationProtocols>(Network::ApplicationProtocols::key())) {\n    const auto& alpn = filter_state.getDataReadOnly<Network::ApplicationProtocols>(\n        Network::ApplicationProtocols::key());\n    application_protocols = alpn.value();\n    needs_transport_socket_options = true;\n  }\n\n  if (filter_state.hasData<UpstreamSubjectAltNames>(UpstreamSubjectAltNames::key())) {\n    const auto& upstream_subject_alt_names =\n        filter_state.getDataReadOnly<UpstreamSubjectAltNames>(UpstreamSubjectAltNames::key());\n    subject_alt_names = upstream_subject_alt_names.value();\n    needs_transport_socket_options = true;\n  }\n\n  if (filter_state.hasData<ProxyProtocolFilterState>(ProxyProtocolFilterState::key())) {\n    const auto& proxy_protocol_filter_state =\n        filter_state.getDataReadOnly<ProxyProtocolFilterState>(ProxyProtocolFilterState::key());\n    proxy_protocol_options.emplace(proxy_protocol_filter_state.value());\n    needs_transport_socket_options = true;\n  }\n\n  if (needs_transport_socket_options) {\n    return std::make_shared<Network::TransportSocketOptionsImpl>(\n        server_name, std::move(subject_alt_names), std::move(application_protocols), absl::nullopt,\n        proxy_protocol_options);\n  } else {\n    return nullptr;\n  }\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/transport_socket_options_impl.h",
    "content": "#pragma once\n\n#include \"envoy/network/proxy_protocol.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n// A wrapper around another TransportSocketOptions that overrides the ALPN fallback.\nclass AlpnDecoratingTransportSocketOptions : public TransportSocketOptions {\npublic:\n  AlpnDecoratingTransportSocketOptions(std::string&& alpn,\n                                       TransportSocketOptionsSharedPtr inner_options)\n      : alpn_fallback_(std::move(alpn)), inner_options_(std::move(inner_options)) {}\n  // Network::TransportSocketOptions\n  const absl::optional<std::string>& serverNameOverride() const override {\n    return inner_options_->serverNameOverride();\n  }\n  const std::vector<std::string>& verifySubjectAltNameListOverride() const override {\n    return inner_options_->verifySubjectAltNameListOverride();\n  }\n  const std::vector<std::string>& applicationProtocolListOverride() const override {\n    return inner_options_->applicationProtocolListOverride();\n  }\n  const absl::optional<std::string>& applicationProtocolFallback() const override {\n    return alpn_fallback_;\n  }\n  absl::optional<Network::ProxyProtocolData> proxyProtocolOptions() const override {\n    return inner_options_->proxyProtocolOptions();\n  }\n  void hashKey(std::vector<uint8_t>& key) const override;\n\nprivate:\n  const absl::optional<std::string> alpn_fallback_;\n  const TransportSocketOptionsSharedPtr inner_options_;\n};\n\nclass TransportSocketOptionsImpl : public TransportSocketOptions {\npublic:\n  TransportSocketOptionsImpl(\n      absl::string_view override_server_name = \"\",\n      std::vector<std::string>&& override_verify_san_list = {},\n      std::vector<std::string>&& override_alpn = {},\n      absl::optional<std::string>&& fallback_alpn = {},\n      absl::optional<Network::ProxyProtocolData> proxy_proto_options = absl::nullopt)\n      : override_server_name_(override_server_name.empty()\n                                  ? absl::nullopt\n                                  : absl::optional<std::string>(override_server_name)),\n        override_verify_san_list_{std::move(override_verify_san_list)},\n        override_alpn_list_{std::move(override_alpn)}, alpn_fallback_{std::move(fallback_alpn)},\n        proxy_protocol_options_(proxy_proto_options) {}\n\n  // Network::TransportSocketOptions\n  const absl::optional<std::string>& serverNameOverride() const override {\n    return override_server_name_;\n  }\n  const std::vector<std::string>& verifySubjectAltNameListOverride() const override {\n    return override_verify_san_list_;\n  }\n  const std::vector<std::string>& applicationProtocolListOverride() const override {\n    return override_alpn_list_;\n  }\n  const absl::optional<std::string>& applicationProtocolFallback() const override {\n    return alpn_fallback_;\n  }\n  absl::optional<Network::ProxyProtocolData> proxyProtocolOptions() const override {\n    return proxy_protocol_options_;\n  }\n  void hashKey(std::vector<uint8_t>& key) const override;\n\nprivate:\n  const absl::optional<std::string> override_server_name_;\n  const std::vector<std::string> override_verify_san_list_;\n  const std::vector<std::string> override_alpn_list_;\n  const absl::optional<std::string> alpn_fallback_;\n  const absl::optional<Network::ProxyProtocolData> proxy_protocol_options_;\n};\n\nclass TransportSocketOptionsUtility {\npublic:\n  /**\n   * Construct TransportSocketOptions from StreamInfo::FilterState, using UpstreamServerName\n   * and ApplicationProtocols key in the filter state.\n   * @returns TransportSocketOptionsSharedPtr a shared pointer to the transport socket options,\n   * nullptr if nothing is in the filter state.\n   */\n  static TransportSocketOptionsSharedPtr\n  fromFilterState(const StreamInfo::FilterState& stream_info);\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/udp_default_writer_config.cc",
    "content": "#include \"common/network/udp_default_writer_config.h\"\n\n#include <memory>\n#include <string>\n\n#include \"envoy/config/listener/v3/udp_default_writer_config.pb.h\"\n\n#include \"common/network/udp_packet_writer_handler_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nUdpPacketWriterPtr UdpDefaultWriterFactory::createUdpPacketWriter(Network::IoHandle& io_handle,\n                                                                  Stats::Scope& /*scope*/) {\n  return std::make_unique<UdpDefaultWriter>(io_handle);\n}\n\nProtobufTypes::MessagePtr UdpDefaultWriterConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::listener::v3::UdpDefaultWriterOptions>();\n}\n\nUdpPacketWriterFactoryPtr\nUdpDefaultWriterConfigFactory::createUdpPacketWriterFactory(const Protobuf::Message& /*message*/) {\n  return std::make_unique<UdpDefaultWriterFactory>();\n}\n\nstd::string UdpDefaultWriterConfigFactory::name() const { return \"udp_default_writer\"; }\n\nREGISTER_FACTORY(UdpDefaultWriterConfigFactory, Network::UdpPacketWriterConfigFactory);\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/udp_default_writer_config.h",
    "content": "#pragma once\n\n#include \"envoy/network/udp_packet_writer_config.h\"\n#include \"envoy/network/udp_packet_writer_handler.h\"\n#include \"envoy/registry/registry.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass UdpDefaultWriterFactory : public Network::UdpPacketWriterFactory {\npublic:\n  Network::UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle,\n                                                    Stats::Scope& scope) override;\n};\n\n// UdpPacketWriterConfigFactory to create UdpDefaultWriterFactory based on given protobuf\n// This is the default UdpPacketWriterConfigFactory if not specified in config.\nclass UdpDefaultWriterConfigFactory : public UdpPacketWriterConfigFactory {\npublic:\n  // UdpPacketWriterConfigFactory\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  Network::UdpPacketWriterFactoryPtr\n  createUdpPacketWriterFactory(const Protobuf::Message&) override;\n\n  std::string name() const override;\n};\n\nDECLARE_FACTORY(UdpDefaultWriterConfigFactory);\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/udp_listener_impl.cc",
    "content": "#include \"common/network/udp_listener_impl.h\"\n\n#include <cerrno>\n#include <csetjmp>\n#include <cstring>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/exception.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/io_socket_error_impl.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"event2/listener.h\"\n\n#define ENVOY_UDP_LOG(LEVEL, FORMAT, ...)                                                          \\\n  ENVOY_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, \"Listener at {} :\" FORMAT,                            \\\n                      this->localAddress()->asString(), ##__VA_ARGS__)\n\nnamespace Envoy {\nnamespace Network {\n\nUdpListenerImpl::UdpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket,\n                                 UdpListenerCallbacks& cb, TimeSource& time_source)\n    : BaseListenerImpl(dispatcher, std::move(socket)), cb_(cb), time_source_(time_source) {\n  file_event_ = socket_->ioHandle().createFileEvent(\n      dispatcher, [this](uint32_t events) -> void { onSocketEvent(events); },\n      Event::PlatformDefaultTriggerType, Event::FileReadyType::Read | Event::FileReadyType::Write);\n\n  ASSERT(file_event_);\n\n  if (!Network::Socket::applyOptions(socket_->options(), *socket_,\n                                     envoy::config::core::v3::SocketOption::STATE_BOUND)) {\n    throw CreateListenerException(fmt::format(\"cannot set post-bound socket option on socket: {}\",\n                                              socket_->localAddress()->asString()));\n  }\n}\n\nUdpListenerImpl::~UdpListenerImpl() {\n  disableEvent();\n  file_event_.reset();\n}\n\nvoid UdpListenerImpl::disable() { disableEvent(); }\n\nvoid UdpListenerImpl::enable() {\n  file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write);\n}\n\nvoid UdpListenerImpl::disableEvent() { file_event_->setEnabled(0); }\n\nvoid UdpListenerImpl::onSocketEvent(short flags) {\n  ASSERT((flags & (Event::FileReadyType::Read | Event::FileReadyType::Write)));\n  ENVOY_UDP_LOG(trace, \"socket event: {}\", flags);\n\n  if (flags & Event::FileReadyType::Read) {\n    handleReadCallback();\n  }\n\n  if (flags & Event::FileReadyType::Write) {\n    handleWriteCallback();\n  }\n}\n\nvoid UdpListenerImpl::handleReadCallback() {\n  ENVOY_UDP_LOG(trace, \"handleReadCallback\");\n  cb_.onReadReady();\n  const Api::IoErrorPtr result = Utility::readPacketsFromSocket(\n      socket_->ioHandle(), *socket_->localAddress(), *this, time_source_, packets_dropped_);\n  // TODO(mattklein123): Handle no error when we limit the number of packets read.\n  if (result->getErrorCode() != Api::IoError::IoErrorCode::Again) {\n    // TODO(mattklein123): When rate limited logging is implemented log this at error level\n    // on a periodic basis.\n    ENVOY_UDP_LOG(debug, \"recvmsg result {}: {}\", static_cast<int>(result->getErrorCode()),\n                  result->getErrorDetails());\n    cb_.onReceiveError(result->getErrorCode());\n  }\n}\n\nvoid UdpListenerImpl::processPacket(Address::InstanceConstSharedPtr local_address,\n                                    Address::InstanceConstSharedPtr peer_address,\n                                    Buffer::InstancePtr buffer, MonotonicTime receive_time) {\n  // UDP listeners are always configured with the socket option that allows pulling the local\n  // address. This should never be null.\n  ASSERT(local_address != nullptr);\n  UdpRecvData recvData{\n      {std::move(local_address), std::move(peer_address)}, std::move(buffer), receive_time};\n  cb_.onData(std::move(recvData));\n}\n\nvoid UdpListenerImpl::handleWriteCallback() {\n  ENVOY_UDP_LOG(trace, \"handleWriteCallback\");\n  cb_.onWriteReady(*socket_);\n}\n\nEvent::Dispatcher& UdpListenerImpl::dispatcher() { return dispatcher_; }\n\nconst Address::InstanceConstSharedPtr& UdpListenerImpl::localAddress() const {\n  return socket_->localAddress();\n}\n\nApi::IoCallUint64Result UdpListenerImpl::send(const UdpSendData& send_data) {\n  ENVOY_UDP_LOG(trace, \"send\");\n  Buffer::Instance& buffer = send_data.buffer_;\n\n  Api::IoCallUint64Result send_result =\n      cb_.udpPacketWriter().writePacket(buffer, send_data.local_ip_, send_data.peer_address_);\n\n  // The send_result normalizes the rc_ value to 0 in error conditions.\n  // The drain call is hence 'safe' in success and failure cases.\n  buffer.drain(send_result.rc_);\n  return send_result;\n}\n\nApi::IoCallUint64Result UdpListenerImpl::flush() {\n  ENVOY_UDP_LOG(trace, \"flush\");\n  return cb_.udpPacketWriter().flush();\n}\n\nvoid UdpListenerImpl::activateRead() { file_event_->activate(Event::FileReadyType::Read); }\n\nUdpListenerWorkerRouterImpl::UdpListenerWorkerRouterImpl(uint32_t concurrency)\n    : workers_(concurrency) {}\n\nvoid UdpListenerWorkerRouterImpl::registerWorkerForListener(UdpListenerCallbacks& listener) {\n  absl::WriterMutexLock lock(&mutex_);\n\n  ASSERT(listener.workerIndex() < workers_.size());\n  ASSERT(workers_.at(listener.workerIndex()) == nullptr);\n  workers_.at(listener.workerIndex()) = &listener;\n}\n\nvoid UdpListenerWorkerRouterImpl::unregisterWorkerForListener(UdpListenerCallbacks& listener) {\n  absl::WriterMutexLock lock(&mutex_);\n\n  ASSERT(workers_.at(listener.workerIndex()) == &listener);\n  workers_.at(listener.workerIndex()) = nullptr;\n}\n\nvoid UdpListenerWorkerRouterImpl::deliver(uint32_t dest_worker_index, UdpRecvData&& data) {\n  absl::ReaderMutexLock lock(&mutex_);\n\n  ASSERT(dest_worker_index < workers_.size(),\n         \"UdpListenerCallbacks::destination returned out-of-range value\");\n  auto* worker = workers_[dest_worker_index];\n\n  // When a listener is being removed, packets could be processed on some workers after the\n  // listener is removed from other workers, which could result in a nullptr for that worker.\n  if (worker != nullptr) {\n    worker->post(std::move(data));\n  }\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/udp_listener_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n\n#include \"envoy/common/time.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/event/event_impl_base.h\"\n#include \"common/event/file_event_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"base_listener_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * libevent implementation of Network::Listener for UDP.\n */\nclass UdpListenerImpl : public BaseListenerImpl,\n                        public virtual UdpListener,\n                        public UdpPacketProcessor,\n                        protected Logger::Loggable<Logger::Id::udp> {\npublic:\n  UdpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket,\n                  UdpListenerCallbacks& cb, TimeSource& time_source);\n\n  ~UdpListenerImpl() override;\n\n  // Network::Listener Interface\n  void disable() override;\n  void enable() override;\n\n  // Network::UdpListener Interface\n  Event::Dispatcher& dispatcher() override;\n  const Address::InstanceConstSharedPtr& localAddress() const override;\n  Api::IoCallUint64Result send(const UdpSendData& data) override;\n  Api::IoCallUint64Result flush() override;\n  void activateRead() override;\n\n  void processPacket(Address::InstanceConstSharedPtr local_address,\n                     Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer,\n                     MonotonicTime receive_time) override;\n\n  uint64_t maxPacketSize() const override {\n    // TODO(danzh) make this variable configurable to support jumbo frames.\n    return MAX_UDP_PACKET_SIZE;\n  }\n\nprotected:\n  void handleWriteCallback();\n  void handleReadCallback();\n\n  UdpListenerCallbacks& cb_;\n  uint32_t packets_dropped_{0};\n\nprivate:\n  void onSocketEvent(short flags);\n  void disableEvent();\n\n  TimeSource& time_source_;\n  Event::FileEventPtr file_event_;\n};\n\nclass UdpListenerWorkerRouterImpl : public UdpListenerWorkerRouter {\npublic:\n  UdpListenerWorkerRouterImpl(uint32_t concurrency);\n\n  // UdpListenerWorkerRouter\n  void registerWorkerForListener(UdpListenerCallbacks& listener) override;\n  void unregisterWorkerForListener(UdpListenerCallbacks& listener) override;\n  void deliver(uint32_t dest_worker_index, UdpRecvData&& data) override;\n\nprivate:\n  absl::Mutex mutex_;\n  std::vector<UdpListenerCallbacks*> workers_ ABSL_GUARDED_BY(mutex_);\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/udp_packet_writer_handler_impl.cc",
    "content": "#include \"common/network/udp_packet_writer_handler_impl.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nUdpDefaultWriter::UdpDefaultWriter(Network::IoHandle& io_handle)\n    : write_blocked_(false), io_handle_(io_handle) {}\n\nUdpDefaultWriter::~UdpDefaultWriter() = default;\n\nApi::IoCallUint64Result UdpDefaultWriter::writePacket(const Buffer::Instance& buffer,\n                                                      const Address::Ip* local_ip,\n                                                      const Address::Instance& peer_address) {\n  ASSERT(!write_blocked_, \"Cannot write while IO handle is blocked.\");\n  Api::IoCallUint64Result result =\n      Utility::writeToSocket(io_handle_, buffer, local_ip, peer_address);\n  if (result.err_ && result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) {\n    // Writer is blocked when error code received is EWOULDBLOCK/EAGAIN\n    write_blocked_ = true;\n  }\n  return result;\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/udp_packet_writer_handler_impl.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/socket.h\"\n#include \"envoy/network/udp_packet_writer_handler.h\"\n\n#include \"common/network/io_socket_error_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass UdpDefaultWriter : public UdpPacketWriter {\npublic:\n  UdpDefaultWriter(Network::IoHandle& io_handle);\n\n  ~UdpDefaultWriter() override;\n\n  // Following writePacket utilizes Utility::writeToSocket() implementation\n  Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer, const Address::Ip* local_ip,\n                                      const Address::Instance& peer_address) override;\n\n  bool isWriteBlocked() const override { return write_blocked_; }\n  void setWritable() override { write_blocked_ = false; }\n  uint64_t getMaxPacketSize(const Address::Instance& /*peer_address*/) const override {\n    return Network::UdpMaxOutgoingPacketSize;\n  }\n  bool isBatchMode() const override { return false; }\n  Network::UdpPacketWriterBuffer\n  getNextWriteLocation(const Address::Ip* /*local_ip*/,\n                       const Address::Instance& /*peer_address*/) override {\n    return {nullptr, 0, nullptr};\n  }\n  Api::IoCallUint64Result flush() override {\n    return Api::IoCallUint64Result(\n        /*rc=*/0,\n        /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError));\n  }\n\nprivate:\n  bool write_blocked_;\n  Network::IoHandle& io_handle_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/upstream_server_name.cc",
    "content": "#include \"common/network/upstream_server_name.h\"\n\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nconst std::string& UpstreamServerName::key() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.network.upstream_server_name\");\n}\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/upstream_server_name.h",
    "content": "#pragma once\n\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Server name to set in the upstream connection. The filters like tcp_proxy should use this\n * value to override the server name specified in the upstream cluster, for example to override\n * the SNI value in the upstream TLS context.\n */\nclass UpstreamServerName : public StreamInfo::FilterState::Object {\npublic:\n  UpstreamServerName(absl::string_view server_name) : server_name_(server_name) {}\n  const std::string& value() const { return server_name_; }\n  static const std::string& key();\n\nprivate:\n  const std::string server_name_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/upstream_subject_alt_names.cc",
    "content": "#include \"common/network/upstream_subject_alt_names.h\"\n\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nconst std::string& UpstreamSubjectAltNames::key() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.network.upstream_subject_alt_names\");\n}\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/upstream_subject_alt_names.h",
    "content": "#pragma once\n\n#include \"envoy/stream_info/filter_state.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * SANs to validate certificate to set in the upstream connection. Filters can use this one to\n * override the SAN in TLS context.\n */\nclass UpstreamSubjectAltNames : public StreamInfo::FilterState::Object {\npublic:\n  explicit UpstreamSubjectAltNames(const std::vector<std::string>& upstream_subject_alt_names)\n      : upstream_subject_alt_names_(upstream_subject_alt_names) {}\n  const std::vector<std::string>& value() const { return upstream_subject_alt_names_; }\n  static const std::string& key();\n\nprivate:\n  const std::vector<std::string> upstream_subject_alt_names_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/utility.cc",
    "content": "#include \"common/network/utility.h\"\n\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <sstream>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/io_socket_error_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n// TODO(lambdai): Remove below re-declare in C++17.\nconstexpr absl::string_view Utility::TCP_SCHEME;\nconstexpr absl::string_view Utility::UDP_SCHEME;\nconstexpr absl::string_view Utility::UNIX_SCHEME;\n\nAddress::InstanceConstSharedPtr Utility::resolveUrl(const std::string& url) {\n  if (urlIsTcpScheme(url)) {\n    return parseInternetAddressAndPort(url.substr(TCP_SCHEME.size()));\n  } else if (urlIsUdpScheme(url)) {\n    return parseInternetAddressAndPort(url.substr(UDP_SCHEME.size()));\n  } else if (urlIsUnixScheme(url)) {\n    return Address::InstanceConstSharedPtr{\n        new Address::PipeInstance(url.substr(UNIX_SCHEME.size()))};\n  } else {\n    throw EnvoyException(absl::StrCat(\"unknown protocol scheme: \", url));\n  }\n}\n\nbool Utility::urlIsTcpScheme(absl::string_view url) { return absl::StartsWith(url, TCP_SCHEME); }\n\nbool Utility::urlIsUdpScheme(absl::string_view url) { return absl::StartsWith(url, UDP_SCHEME); }\n\nbool Utility::urlIsUnixScheme(absl::string_view url) { return absl::StartsWith(url, UNIX_SCHEME); }\n\nnamespace {\n\nstd::string hostFromUrl(const std::string& url, absl::string_view scheme,\n                        absl::string_view scheme_name) {\n  if (!absl::StartsWith(url, scheme)) {\n    throw EnvoyException(fmt::format(\"expected {} scheme, got: {}\", scheme_name, url));\n  }\n\n  const size_t colon_index = url.find(':', scheme.size());\n\n  if (colon_index == std::string::npos) {\n    throw EnvoyException(absl::StrCat(\"malformed url: \", url));\n  }\n\n  return url.substr(scheme.size(), colon_index - scheme.size());\n}\n\nuint32_t portFromUrl(const std::string& url, absl::string_view scheme,\n                     absl::string_view scheme_name) {\n  if (!absl::StartsWith(url, scheme)) {\n    throw EnvoyException(fmt::format(\"expected {} scheme, got: {}\", scheme_name, url));\n  }\n\n  const size_t colon_index = url.find(':', scheme.size());\n\n  if (colon_index == std::string::npos) {\n    throw EnvoyException(absl::StrCat(\"malformed url: \", url));\n  }\n\n  const size_t rcolon_index = url.rfind(':');\n  if (colon_index != rcolon_index) {\n    throw EnvoyException(absl::StrCat(\"malformed url: \", url));\n  }\n\n  try {\n    return std::stoi(url.substr(colon_index + 1));\n  } catch (const std::invalid_argument& e) {\n    throw EnvoyException(e.what());\n  } catch (const std::out_of_range& e) {\n    throw EnvoyException(e.what());\n  }\n}\n\nApi::IoCallUint64Result receiveMessage(uint64_t max_packet_size, Buffer::InstancePtr& buffer,\n                                       IoHandle::RecvMsgOutput& output, IoHandle& handle,\n                                       const Address::Instance& local_address) {\n\n  Buffer::RawSlice slice;\n  const uint64_t num_slices = buffer->reserve(max_packet_size, &slice, 1);\n  ASSERT(num_slices == 1u);\n\n  Api::IoCallUint64Result result =\n      handle.recvmsg(&slice, num_slices, local_address.ip()->port(), output);\n\n  if (!result.ok()) {\n    return result;\n  }\n\n  // Adjust memory length and commit slice to buffer\n  slice.len_ = std::min(slice.len_, static_cast<size_t>(result.rc_));\n  buffer->commit(&slice, 1);\n\n  return result;\n}\n\n} // namespace\n\nstd::string Utility::hostFromTcpUrl(const std::string& url) {\n  return hostFromUrl(url, TCP_SCHEME, \"TCP\");\n}\n\nuint32_t Utility::portFromTcpUrl(const std::string& url) {\n  return portFromUrl(url, TCP_SCHEME, \"TCP\");\n}\n\nstd::string Utility::hostFromUdpUrl(const std::string& url) {\n  return hostFromUrl(url, UDP_SCHEME, \"UDP\");\n}\n\nuint32_t Utility::portFromUdpUrl(const std::string& url) {\n  return portFromUrl(url, UDP_SCHEME, \"UDP\");\n}\n\nAddress::InstanceConstSharedPtr Utility::parseInternetAddress(const std::string& ip_address,\n                                                              uint16_t port, bool v6only) {\n  sockaddr_in sa4;\n  if (inet_pton(AF_INET, ip_address.c_str(), &sa4.sin_addr) == 1) {\n    sa4.sin_family = AF_INET;\n    sa4.sin_port = htons(port);\n    return std::make_shared<Address::Ipv4Instance>(&sa4);\n  }\n  sockaddr_in6 sa6;\n  memset(&sa6, 0, sizeof(sa6));\n  if (inet_pton(AF_INET6, ip_address.c_str(), &sa6.sin6_addr) == 1) {\n    sa6.sin6_family = AF_INET6;\n    sa6.sin6_port = htons(port);\n    return std::make_shared<Address::Ipv6Instance>(sa6, v6only);\n  }\n  throwWithMalformedIp(ip_address);\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nAddress::InstanceConstSharedPtr Utility::parseInternetAddressAndPort(const std::string& ip_address,\n                                                                     bool v6only) {\n  if (ip_address.empty()) {\n    throwWithMalformedIp(ip_address);\n  }\n  if (ip_address[0] == '[') {\n    // Appears to be an IPv6 address. Find the \"]:\" that separates the address from the port.\n    const auto pos = ip_address.rfind(\"]:\");\n    if (pos == std::string::npos) {\n      throwWithMalformedIp(ip_address);\n    }\n    const auto ip_str = ip_address.substr(1, pos - 1);\n    const auto port_str = ip_address.substr(pos + 2);\n    uint64_t port64 = 0;\n    if (port_str.empty() || !absl::SimpleAtoi(port_str, &port64) || port64 > 65535) {\n      throwWithMalformedIp(ip_address);\n    }\n    sockaddr_in6 sa6;\n    memset(&sa6, 0, sizeof(sa6));\n    if (ip_str.empty() || inet_pton(AF_INET6, ip_str.c_str(), &sa6.sin6_addr) != 1) {\n      throwWithMalformedIp(ip_address);\n    }\n    sa6.sin6_family = AF_INET6;\n    sa6.sin6_port = htons(port64);\n    return std::make_shared<Address::Ipv6Instance>(sa6, v6only);\n  }\n  // Treat it as an IPv4 address followed by a port.\n  const auto pos = ip_address.rfind(':');\n  if (pos == std::string::npos) {\n    throwWithMalformedIp(ip_address);\n  }\n  const auto ip_str = ip_address.substr(0, pos);\n  const auto port_str = ip_address.substr(pos + 1);\n  uint64_t port64 = 0;\n  if (port_str.empty() || !absl::SimpleAtoi(port_str, &port64) || port64 > 65535) {\n    throwWithMalformedIp(ip_address);\n  }\n  sockaddr_in sa4;\n  memset(&sa4, 0, sizeof(sa4));\n  if (ip_str.empty() || inet_pton(AF_INET, ip_str.c_str(), &sa4.sin_addr) != 1) {\n    throwWithMalformedIp(ip_address);\n  }\n  sa4.sin_family = AF_INET;\n  sa4.sin_port = htons(port64);\n  return std::make_shared<Address::Ipv4Instance>(&sa4);\n}\n\nAddress::InstanceConstSharedPtr Utility::copyInternetAddressAndPort(const Address::Ip& ip) {\n  if (ip.version() == Address::IpVersion::v4) {\n    return std::make_shared<Address::Ipv4Instance>(ip.addressAsString(), ip.port());\n  }\n  return std::make_shared<Address::Ipv6Instance>(ip.addressAsString(), ip.port());\n}\n\nvoid Utility::throwWithMalformedIp(absl::string_view ip_address) {\n  throw EnvoyException(absl::StrCat(\"malformed IP address: \", ip_address));\n}\n\n// TODO(hennna): Currently getLocalAddress does not support choosing between\n// multiple interfaces and addresses not returned by getifaddrs. In addition,\n// the default is to return a loopback address of type version. This function may\n// need to be updated in the future. Discussion can be found at Github issue #939.\nAddress::InstanceConstSharedPtr Utility::getLocalAddress(const Address::IpVersion version) {\n  Address::InstanceConstSharedPtr ret;\n#ifdef SUPPORTS_GETIFADDRS\n  struct ifaddrs* ifaddr;\n  struct ifaddrs* ifa;\n\n  const int rc = getifaddrs(&ifaddr);\n  RELEASE_ASSERT(!rc, \"\");\n\n  // man getifaddrs(3)\n  for (ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) {\n    if (ifa->ifa_addr == nullptr) {\n      continue;\n    }\n\n    if ((ifa->ifa_addr->sa_family == AF_INET && version == Address::IpVersion::v4) ||\n        (ifa->ifa_addr->sa_family == AF_INET6 && version == Address::IpVersion::v6)) {\n      const struct sockaddr_storage* addr =\n          reinterpret_cast<const struct sockaddr_storage*>(ifa->ifa_addr);\n      ret = Address::addressFromSockAddr(\n          *addr, (version == Address::IpVersion::v4) ? sizeof(sockaddr_in) : sizeof(sockaddr_in6));\n      if (!isLoopbackAddress(*ret)) {\n        break;\n      }\n    }\n  }\n\n  if (ifaddr) {\n    freeifaddrs(ifaddr);\n  }\n#endif\n\n  // If the local address is not found above, then return the loopback address by default.\n  if (ret == nullptr) {\n    if (version == Address::IpVersion::v4) {\n      ret = std::make_shared<Address::Ipv4Instance>(\"127.0.0.1\");\n    } else if (version == Address::IpVersion::v6) {\n      ret = std::make_shared<Address::Ipv6Instance>(\"::1\");\n    }\n  }\n  return ret;\n}\n\nbool Utility::isSameIpOrLoopback(const ConnectionSocket& socket) {\n  // These are local:\n  // - Pipes\n  // - Sockets to a loopback address\n  // - Sockets where the local and remote address (ignoring port) are the same\n  const auto& remote_address = socket.remoteAddress();\n  if (remote_address->type() == Address::Type::Pipe || isLoopbackAddress(*remote_address)) {\n    return true;\n  }\n  const auto local_ip = socket.localAddress()->ip();\n  const auto remote_ip = remote_address->ip();\n  if (remote_ip != nullptr && local_ip != nullptr &&\n      remote_ip->addressAsString() == local_ip->addressAsString()) {\n    return true;\n  }\n  return false;\n}\n\nbool Utility::isInternalAddress(const Address::Instance& address) {\n  if (address.type() != Address::Type::Ip) {\n    return false;\n  }\n\n  if (address.ip()->version() == Address::IpVersion::v4) {\n    // Handle the RFC1918 space for IPV4. Also count loopback as internal.\n    const uint32_t address4 = address.ip()->ipv4()->address();\n    const uint8_t* address4_bytes = reinterpret_cast<const uint8_t*>(&address4);\n    if ((address4_bytes[0] == 10) || (address4_bytes[0] == 192 && address4_bytes[1] == 168) ||\n        (address4_bytes[0] == 172 && address4_bytes[1] >= 16 && address4_bytes[1] <= 31) ||\n        address4 == htonl(INADDR_LOOPBACK)) {\n      return true;\n    } else {\n      return false;\n    }\n  }\n\n  // Local IPv6 address prefix defined in RFC4193. Local addresses have prefix FC00::/7.\n  // Currently, the FD00::/8 prefix is locally assigned and FC00::/8 may be defined in the\n  // future.\n  static_assert(sizeof(absl::uint128) == sizeof(in6addr_loopback),\n                \"sizeof(absl::uint128) != sizeof(in6addr_loopback)\");\n  const absl::uint128 address6 = address.ip()->ipv6()->address();\n  const uint8_t* address6_bytes = reinterpret_cast<const uint8_t*>(&address6);\n  if (address6_bytes[0] == 0xfd ||\n      memcmp(&address6, &in6addr_loopback, sizeof(in6addr_loopback)) == 0) {\n    return true;\n  }\n\n  return false;\n}\n\nbool Utility::isLoopbackAddress(const Address::Instance& address) {\n  if (address.type() != Address::Type::Ip) {\n    return false;\n  }\n\n  if (address.ip()->version() == Address::IpVersion::v4) {\n    // Compare to the canonical v4 loopback address: 127.0.0.1.\n    return address.ip()->ipv4()->address() == htonl(INADDR_LOOPBACK);\n  } else if (address.ip()->version() == Address::IpVersion::v6) {\n    static_assert(sizeof(absl::uint128) == sizeof(in6addr_loopback),\n                  \"sizeof(absl::uint128) != sizeof(in6addr_loopback)\");\n    absl::uint128 addr = address.ip()->ipv6()->address();\n    return 0 == memcmp(&addr, &in6addr_loopback, sizeof(in6addr_loopback));\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nAddress::InstanceConstSharedPtr Utility::getCanonicalIpv4LoopbackAddress() {\n  CONSTRUCT_ON_FIRST_USE(Address::InstanceConstSharedPtr,\n                         new Address::Ipv4Instance(\"127.0.0.1\", 0, nullptr));\n}\n\nAddress::InstanceConstSharedPtr Utility::getIpv6LoopbackAddress() {\n  CONSTRUCT_ON_FIRST_USE(Address::InstanceConstSharedPtr,\n                         new Address::Ipv6Instance(\"::1\", 0, nullptr));\n}\n\nAddress::InstanceConstSharedPtr Utility::getIpv4AnyAddress() {\n  CONSTRUCT_ON_FIRST_USE(Address::InstanceConstSharedPtr,\n                         new Address::Ipv4Instance(static_cast<uint32_t>(0)));\n}\n\nAddress::InstanceConstSharedPtr Utility::getIpv6AnyAddress() {\n  CONSTRUCT_ON_FIRST_USE(Address::InstanceConstSharedPtr,\n                         new Address::Ipv6Instance(static_cast<uint32_t>(0)));\n}\n\nconst std::string& Utility::getIpv4CidrCatchAllAddress() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"0.0.0.0/0\");\n}\n\nconst std::string& Utility::getIpv6CidrCatchAllAddress() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"::/0\");\n}\n\nAddress::InstanceConstSharedPtr Utility::getAddressWithPort(const Address::Instance& address,\n                                                            uint32_t port) {\n  switch (address.ip()->version()) {\n  case Address::IpVersion::v4:\n    return std::make_shared<Address::Ipv4Instance>(address.ip()->addressAsString(), port);\n  case Address::IpVersion::v6:\n    return std::make_shared<Address::Ipv6Instance>(address.ip()->addressAsString(), port);\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nAddress::InstanceConstSharedPtr Utility::getOriginalDst(Socket& sock) {\n#ifdef SOL_IP\n\n  if (sock.addressType() != Address::Type::Ip) {\n    return nullptr;\n  }\n\n  auto ipVersion = sock.ipVersion();\n  if (!ipVersion.has_value()) {\n    return nullptr;\n  }\n\n  sockaddr_storage orig_addr;\n  memset(&orig_addr, 0, sizeof(orig_addr));\n  socklen_t addr_len = sizeof(sockaddr_storage);\n  int status;\n\n  if (*ipVersion == Address::IpVersion::v4) {\n    status = sock.getSocketOption(SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_;\n  } else {\n    status = sock.getSocketOption(SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_;\n  }\n\n  if (status != 0) {\n    return nullptr;\n  }\n\n  return Address::addressFromSockAddr(orig_addr, 0, true /* default for v6 constructor */);\n#else\n  // TODO(zuercher): determine if connection redirection is possible under macOS (c.f. pfctl and\n  // divert), and whether it's possible to find the learn destination address.\n  UNREFERENCED_PARAMETER(sock);\n  return nullptr;\n#endif\n}\n\nvoid Utility::parsePortRangeList(absl::string_view string, std::list<PortRange>& list) {\n  const auto ranges = StringUtil::splitToken(string, \",\");\n  for (const auto& s : ranges) {\n    const std::string s_string{s};\n    std::stringstream ss(s_string);\n    uint32_t min = 0;\n    uint32_t max = 0;\n\n    if (s.find(\"-\") != std::string::npos) {\n      char dash = 0;\n      ss >> min;\n      ss >> dash;\n      ss >> max;\n    } else {\n      ss >> min;\n      max = min;\n    }\n\n    if (s.empty() || (min > 65535) || (max > 65535) || ss.fail() || !ss.eof()) {\n      throw EnvoyException(fmt::format(\"invalid port number or range '{}'\", s_string));\n    }\n\n    list.emplace_back(PortRange(min, max));\n  }\n}\n\nbool Utility::portInRangeList(const Address::Instance& address, const std::list<PortRange>& list) {\n  if (address.type() != Address::Type::Ip) {\n    return false;\n  }\n\n  for (const PortRange& p : list) {\n    if (p.contains(address.ip()->port())) {\n      return true;\n    }\n  }\n  return false;\n}\n\nabsl::uint128 Utility::Ip6ntohl(const absl::uint128& address) {\n#ifdef ABSL_IS_LITTLE_ENDIAN\n  return flipOrder(address);\n#else\n  return address;\n#endif\n}\n\nabsl::uint128 Utility::Ip6htonl(const absl::uint128& address) {\n#ifdef ABSL_IS_LITTLE_ENDIAN\n  return flipOrder(address);\n#else\n  return address;\n#endif\n}\n\nabsl::uint128 Utility::flipOrder(const absl::uint128& input) {\n  absl::uint128 result{0};\n  absl::uint128 data = input;\n  for (int i = 0; i < 16; i++) {\n    result <<= 8;\n    result |= (data & 0x000000000000000000000000000000FF);\n    data >>= 8;\n  }\n  return result;\n}\n\nAddress::InstanceConstSharedPtr\nUtility::protobufAddressToAddress(const envoy::config::core::v3::Address& proto_address) {\n  switch (proto_address.address_case()) {\n  case envoy::config::core::v3::Address::AddressCase::kSocketAddress:\n    return Utility::parseInternetAddress(proto_address.socket_address().address(),\n                                         proto_address.socket_address().port_value(),\n                                         !proto_address.socket_address().ipv4_compat());\n  case envoy::config::core::v3::Address::AddressCase::kPipe:\n    return std::make_shared<Address::PipeInstance>(proto_address.pipe().path(),\n                                                   proto_address.pipe().mode());\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid Utility::addressToProtobufAddress(const Address::Instance& address,\n                                       envoy::config::core::v3::Address& proto_address) {\n  if (address.type() == Address::Type::Pipe) {\n    proto_address.mutable_pipe()->set_path(address.asString());\n  } else {\n    ASSERT(address.type() == Address::Type::Ip);\n    auto* socket_address = proto_address.mutable_socket_address();\n    socket_address->set_address(address.ip()->addressAsString());\n    socket_address->set_port_value(address.ip()->port());\n  }\n}\n\nSocket::Type\nUtility::protobufAddressSocketType(const envoy::config::core::v3::Address& proto_address) {\n  switch (proto_address.address_case()) {\n  case envoy::config::core::v3::Address::AddressCase::kSocketAddress: {\n    const auto protocol = proto_address.socket_address().protocol();\n    switch (protocol) {\n    case envoy::config::core::v3::SocketAddress::TCP:\n      return Socket::Type::Stream;\n    case envoy::config::core::v3::SocketAddress::UDP:\n      return Socket::Type::Datagram;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n  case envoy::config::core::v3::Address::AddressCase::kPipe:\n    return Socket::Type::Stream;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nApi::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, const Buffer::Instance& buffer,\n                                               const Address::Ip* local_ip,\n                                               const Address::Instance& peer_address) {\n  Buffer::RawSliceVector slices = buffer.getRawSlices();\n  return writeToSocket(handle, slices.data(), slices.size(), local_ip, peer_address);\n}\n\nApi::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlice* slices,\n                                               uint64_t num_slices, const Address::Ip* local_ip,\n                                               const Address::Instance& peer_address) {\n  Api::IoCallUint64Result send_result(\n      /*rc=*/0, /*err=*/Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError));\n  do {\n    send_result = handle.sendmsg(slices, num_slices, 0, local_ip, peer_address);\n  } while (!send_result.ok() &&\n           // Send again if interrupted.\n           send_result.err_->getErrorCode() == Api::IoError::IoErrorCode::Interrupt);\n\n  if (send_result.ok()) {\n    ENVOY_LOG_MISC(trace, \"sendmsg bytes {}\", send_result.rc_);\n  } else {\n    ENVOY_LOG_MISC(debug, \"sendmsg failed with error code {}: {}\",\n                   static_cast<int>(send_result.err_->getErrorCode()),\n                   send_result.err_->getErrorDetails());\n  }\n  return send_result;\n}\n\nvoid passPayloadToProcessor(uint64_t bytes_read, Buffer::InstancePtr buffer,\n                            Address::InstanceConstSharedPtr peer_addess,\n                            Address::InstanceConstSharedPtr local_address,\n                            UdpPacketProcessor& udp_packet_processor, MonotonicTime receive_time) {\n  RELEASE_ASSERT(\n      peer_addess != nullptr,\n      fmt::format(\"Unable to get remote address on the socket bount to local address: {} \",\n                  local_address->asString()));\n\n  // Unix domain sockets are not supported\n  RELEASE_ASSERT(peer_addess->type() == Address::Type::Ip,\n                 fmt::format(\"Unsupported remote address: {} local address: {}, receive size: \"\n                             \"{}\",\n                             peer_addess->asString(), local_address->asString(), bytes_read));\n  udp_packet_processor.processPacket(std::move(local_address), std::move(peer_addess),\n                                     std::move(buffer), receive_time);\n}\n\nApi::IoCallUint64Result Utility::readFromSocket(IoHandle& handle,\n                                                const Address::Instance& local_address,\n                                                UdpPacketProcessor& udp_packet_processor,\n                                                MonotonicTime receive_time,\n                                                uint32_t* packets_dropped) {\n\n  if (handle.supportsUdpGro()) {\n    Buffer::InstancePtr buffer = std::make_unique<Buffer::OwnedImpl>();\n    IoHandle::RecvMsgOutput output(1, packets_dropped);\n\n    // TODO(yugant): Avoid allocating 24k for each read by getting memory from UdpPacketProcessor\n    const uint64_t max_packet_size_with_gro = 16 * udp_packet_processor.maxPacketSize();\n\n    Api::IoCallUint64Result result =\n        receiveMessage(max_packet_size_with_gro, buffer, output, handle, local_address);\n\n    if (!result.ok()) {\n      return result;\n    }\n\n    const uint64_t gso_size = output.msg_[0].gso_size_;\n    ENVOY_LOG_MISC(trace, \"recvmsg bytes {} with gso_size as {}\", result.rc_, gso_size);\n\n    // Skip gso segmentation and proceed as a single payload.\n    if (gso_size == 0u) {\n      passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_),\n                             std::move(output.msg_[0].local_address_), udp_packet_processor,\n                             receive_time);\n      return result;\n    }\n\n    // Segment the buffer read by the recvmsg syscall into gso_sized sub buffers.\n    while (buffer->length() > 0) {\n      const uint64_t bytes_to_copy = std::min(buffer->length(), gso_size);\n      Buffer::InstancePtr sub_buffer = std::make_unique<Buffer::OwnedImpl>();\n      sub_buffer->move(*buffer, bytes_to_copy);\n      passPayloadToProcessor(bytes_to_copy, std::move(sub_buffer), output.msg_[0].peer_address_,\n                             output.msg_[0].local_address_, udp_packet_processor, receive_time);\n    }\n\n    return result;\n  }\n\n  if (handle.supportsMmsg()) {\n    const uint32_t num_packets_per_mmsg_call = 16u;\n    const uint32_t num_slices_per_packet = 1u;\n    absl::FixedArray<Buffer::InstancePtr> buffers(num_packets_per_mmsg_call);\n    RawSliceArrays slices(num_packets_per_mmsg_call,\n                          absl::FixedArray<Buffer::RawSlice>(num_slices_per_packet));\n    for (uint32_t i = 0; i < num_packets_per_mmsg_call; ++i) {\n      buffers[i] = std::make_unique<Buffer::OwnedImpl>();\n      const uint64_t num_slices = buffers[i]->reserve(udp_packet_processor.maxPacketSize(),\n                                                      slices[i].data(), num_slices_per_packet);\n      ASSERT(num_slices == num_slices_per_packet);\n    }\n\n    IoHandle::RecvMsgOutput output(num_packets_per_mmsg_call, packets_dropped);\n    Api::IoCallUint64Result result = handle.recvmmsg(slices, local_address.ip()->port(), output);\n    if (!result.ok()) {\n      return result;\n    }\n\n    uint64_t packets_read = result.rc_;\n    ENVOY_LOG_MISC(trace, \"recvmmsg read {} packets\", packets_read);\n    for (uint64_t i = 0; i < packets_read; ++i) {\n      Buffer::RawSlice* slice = slices[i].data();\n      const uint64_t msg_len = output.msg_[i].msg_len_;\n      ASSERT(msg_len <= slice->len_);\n      ENVOY_LOG_MISC(debug, \"Receive a packet with {} bytes from {}\", msg_len,\n                     output.msg_[i].peer_address_->asString());\n\n      // Adjust used memory length and commit slice to buffer\n      slice->len_ = std::min(slice->len_, static_cast<size_t>(msg_len));\n      buffers[i]->commit(slice, 1);\n\n      passPayloadToProcessor(msg_len, std::move(buffers[i]), output.msg_[i].peer_address_,\n                             output.msg_[i].local_address_, udp_packet_processor, receive_time);\n    }\n    return result;\n  }\n\n  Buffer::InstancePtr buffer = std::make_unique<Buffer::OwnedImpl>();\n  IoHandle::RecvMsgOutput output(1, packets_dropped);\n\n  Api::IoCallUint64Result result =\n      receiveMessage(udp_packet_processor.maxPacketSize(), buffer, output, handle, local_address);\n\n  if (!result.ok()) {\n    return result;\n  }\n\n  ENVOY_LOG_MISC(trace, \"recvmsg bytes {}\", result.rc_);\n\n  passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_),\n                         std::move(output.msg_[0].local_address_), udp_packet_processor,\n                         receive_time);\n  return result;\n}\n\nApi::IoErrorPtr Utility::readPacketsFromSocket(IoHandle& handle,\n                                               const Address::Instance& local_address,\n                                               UdpPacketProcessor& udp_packet_processor,\n                                               TimeSource& time_source, uint32_t& packets_dropped) {\n  do {\n    const uint32_t old_packets_dropped = packets_dropped;\n    const MonotonicTime receive_time = time_source.monotonicTime();\n    Api::IoCallUint64Result result = Utility::readFromSocket(\n        handle, local_address, udp_packet_processor, receive_time, &packets_dropped);\n\n    if (!result.ok()) {\n      // No more to read or encountered a system error.\n      return std::move(result.err_);\n    }\n\n    if (result.rc_ == 0) {\n      // TODO(conqerAtapple): Is zero length packet interesting? If so add stats\n      // for it. Otherwise remove the warning log below.\n      ENVOY_LOG_MISC(trace, \"received 0-length packet\");\n    }\n\n    if (packets_dropped != old_packets_dropped) {\n      // The kernel tracks SO_RXQ_OVFL as a uint32 which can overflow to a smaller\n      // value. So as long as this count differs from previously recorded value,\n      // more packets are dropped by kernel.\n      const uint32_t delta =\n          (packets_dropped > old_packets_dropped)\n              ? (packets_dropped - old_packets_dropped)\n              : (packets_dropped + (std::numeric_limits<uint32_t>::max() - old_packets_dropped) +\n                 1);\n      // TODO(danzh) add stats for this.\n      ENVOY_LOG_MISC(\n          debug, \"Kernel dropped {} more packets. Consider increase receive buffer size.\", delta);\n    }\n  } while (true);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/network/utility.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/listener.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * Utility class to represent TCP/UDP port range\n */\nclass PortRange {\npublic:\n  PortRange(uint32_t min, uint32_t max) : min_(min), max_(max) {}\n\n  bool contains(uint32_t port) const { return (port >= min_ && port <= max_); }\n\nprivate:\n  const uint32_t min_;\n  const uint32_t max_;\n};\n\nusing PortRangeList = std::list<PortRange>;\n\n/**\n * A callback interface used by readFromSocket() to pass packets read from\n * socket.\n */\nclass UdpPacketProcessor {\npublic:\n  virtual ~UdpPacketProcessor() = default;\n\n  /**\n   * Consume the packet read out of the socket with the information from UDP\n   * header.\n   * @param local_address is the destination address in the UDP header.\n   * @param peer_address is the source address in the UDP header.\n   * @param buffer contains the packet read.\n   * @param receive_time is the time when the packet is read.\n   */\n  virtual void processPacket(Address::InstanceConstSharedPtr local_address,\n                             Address::InstanceConstSharedPtr peer_address,\n                             Buffer::InstancePtr buffer, MonotonicTime receive_time) PURE;\n\n  /**\n   * The expected max size of the packet to be read. If it's smaller than\n   * actually packets received, the payload will be truncated.\n   */\n  virtual uint64_t maxPacketSize() const PURE;\n};\n\nstatic const uint64_t MAX_UDP_PACKET_SIZE = 1500;\n\n/**\n * Common network utility routines.\n */\nclass Utility {\npublic:\n  static constexpr absl::string_view TCP_SCHEME{\"tcp://\"};\n  static constexpr absl::string_view UDP_SCHEME{\"udp://\"};\n  static constexpr absl::string_view UNIX_SCHEME{\"unix://\"};\n\n  /**\n   * Resolve a URL.\n   * @param url supplies the url to resolve.\n   * @return Address::InstanceConstSharedPtr the resolved address.\n   */\n  static Address::InstanceConstSharedPtr resolveUrl(const std::string& url);\n\n  /**\n   * Match a URL to the TCP scheme\n   * @param url supplies the URL to match.\n   * @return bool true if the URL matches the TCP scheme, false otherwise.\n   */\n  static bool urlIsTcpScheme(absl::string_view url);\n\n  /**\n   * Match a URL to the UDP scheme\n   * @param url supplies the URL to match.\n   * @return bool true if the URL matches the UDP scheme, false otherwise.\n   */\n  static bool urlIsUdpScheme(absl::string_view url);\n\n  /**\n   * Match a URL to the Unix scheme\n   * @param url supplies the Unix to match.\n   * @return bool true if the URL matches the Unix scheme, false otherwise.\n   */\n  static bool urlIsUnixScheme(absl::string_view url);\n\n  /**\n   * Parses the host from a TCP URL\n   * @param the URL to parse host from\n   * @return std::string the parsed host\n   */\n  static std::string hostFromTcpUrl(const std::string& url);\n\n  /**\n   * Parses the port from a TCP URL\n   * @param the URL to parse port from\n   * @return uint32_t the parsed port\n   */\n  static uint32_t portFromTcpUrl(const std::string& url);\n\n  /**\n   * Parses the host from a UDP URL\n   * @param the URL to parse host from\n   * @return std::string the parsed host\n   */\n  static std::string hostFromUdpUrl(const std::string& url);\n\n  /**\n   * Parses the port from a UDP URL\n   * @param the URL to parse port from\n   * @return uint32_t the parsed port\n   */\n  static uint32_t portFromUdpUrl(const std::string& url);\n\n  /**\n   * Parse an internet host address (IPv4 or IPv6) and create an Instance from it. The address must\n   * not include a port number. Throws EnvoyException if unable to parse the address.\n   * @param ip_address string to be parsed as an internet address.\n   * @param port optional port to include in Instance created from ip_address, 0 by default.\n   * @param v6only disable IPv4-IPv6 mapping for IPv6 addresses?\n   * @return pointer to the Instance, or nullptr if unable to parse the address.\n   */\n  static Address::InstanceConstSharedPtr\n  parseInternetAddress(const std::string& ip_address, uint16_t port = 0, bool v6only = true);\n\n  /**\n   * Parse an internet host address (IPv4 or IPv6) AND port, and create an Instance from it. Throws\n   * EnvoyException if unable to parse the address. This is needed when a shared pointer is needed\n   * but only a raw instance is available.\n   * @param Address::Ip& to be copied to the new instance.\n   * @return pointer to the Instance.\n   */\n  static Address::InstanceConstSharedPtr copyInternetAddressAndPort(const Address::Ip& ip);\n\n  /**\n   * Create a new Instance from an internet host address (IPv4 or IPv6) and port.\n   * @param ip_addr string to be parsed as an internet address and port. Examples:\n   *        - \"1.2.3.4:80\"\n   *        - \"[1234:5678::9]:443\"\n   * @param v6only disable IPv4-IPv6 mapping for IPv6 addresses?\n   * @return pointer to the Instance.\n   */\n  static Address::InstanceConstSharedPtr parseInternetAddressAndPort(const std::string& ip_address,\n                                                                     bool v6only = true);\n\n  /**\n   * Get the local address of the first interface address that is of type\n   * version and is not a loopback address. If no matches are found, return the\n   * loopback address of type version.\n   * @param the local address IP version.\n   * @return the local IP address of the server\n   */\n  static Address::InstanceConstSharedPtr getLocalAddress(const Address::IpVersion version);\n\n  /**\n   * Determine whether this is a local connection.\n   * @return bool the address is a local connection.\n   */\n  static bool isSameIpOrLoopback(const ConnectionSocket& socket);\n\n  /**\n   * Determine whether this is an internal (RFC1918) address.\n   * @return bool the address is an RFC1918 address.\n   */\n  static bool isInternalAddress(const Address::Instance& address);\n\n  /**\n   * Check if address is loopback address.\n   * @param address IP address to check.\n   * @return true if so, otherwise false\n   */\n  static bool isLoopbackAddress(const Address::Instance& address);\n\n  /**\n   * @return Address::InstanceConstSharedPtr an address that represents the canonical IPv4 loopback\n   *         address (i.e. \"127.0.0.1\"). Note that the range \"127.0.0.0/8\" is all defined as the\n   *         loopback range, but the address typically used (e.g. in tests) is \"127.0.0.1\".\n   */\n  static Address::InstanceConstSharedPtr getCanonicalIpv4LoopbackAddress();\n\n  /**\n   * @return Address::InstanceConstSharedPtr an address that represents the IPv6 loopback address\n   *         (i.e. \"::1\").\n   */\n  static Address::InstanceConstSharedPtr getIpv6LoopbackAddress();\n\n  /**\n   * @return Address::InstanceConstSharedPtr an address that represents the IPv4 wildcard address\n   *         (i.e. \"0.0.0.0\"). Used during binding to indicate that incoming connections to any\n   *         local IPv4 address are to be accepted.\n   */\n  static Address::InstanceConstSharedPtr getIpv4AnyAddress();\n\n  /**\n   * @return Address::InstanceConstSharedPtr an address that represents the IPv6 wildcard address\n   *         (i.e. \"::\"). Used during binding to indicate that incoming connections to any local\n   *         IPv6 address are to be accepted.\n   */\n  static Address::InstanceConstSharedPtr getIpv6AnyAddress();\n\n  /**\n   * @return the IPv4 CIDR catch-all address (0.0.0.0/0).\n   */\n  static const std::string& getIpv4CidrCatchAllAddress();\n\n  /**\n   * @return the IPv6 CIDR catch-all address (::/0).\n   */\n  static const std::string& getIpv6CidrCatchAllAddress();\n\n  /**\n   * @param address IP address instance.\n   * @param port to update.\n   * @return Address::InstanceConstSharedPtr a new address instance with updated port.\n   */\n  static Address::InstanceConstSharedPtr getAddressWithPort(const Address::Instance& address,\n                                                            uint32_t port);\n\n  /**\n   * Retrieve the original destination address from an accepted socket.\n   * The address (IP and port) may be not local and the port may differ from\n   * the listener port if the packets were redirected using iptables\n   * @param sock is accepted socket\n   * @return the original destination or nullptr if not available.\n   */\n  static Address::InstanceConstSharedPtr getOriginalDst(Socket& sock);\n\n  /**\n   * Parses a string containing a comma-separated list of port numbers and/or\n   * port ranges and appends the values to a caller-provided list of PortRange structures.\n   * For example, the string \"1-1024,2048-4096,12345\" causes 3 PortRange structures\n   * to be appended to the supplied list.\n   * @param str is the string containing the port numbers and ranges\n   * @param list is the list to append the new data structures to\n   */\n  static void parsePortRangeList(absl::string_view string, std::list<PortRange>& list);\n\n  /**\n   * Checks whether a given port number appears in at least one of the port ranges in a list\n   * @param address supplies the IP address to compare.\n   * @param list the list of port ranges in which the port may appear\n   * @return whether the port appears in at least one of the ranges in the list\n   */\n  static bool portInRangeList(const Address::Instance& address, const std::list<PortRange>& list);\n\n  /**\n   * Converts IPv6 absl::uint128 in network byte order to host byte order.\n   * @param address supplies the IPv6 address in network byte order.\n   * @return the absl::uint128 IPv6 address in host byte order.\n   */\n  static absl::uint128 Ip6ntohl(const absl::uint128& address);\n\n  /**\n   * Converts IPv6 absl::uint128 in host byte order to network byte order.\n   * @param address supplies the IPv6 address in host byte order.\n   * @return the absl::uint128 IPv6 address in network byte order.\n   */\n  static absl::uint128 Ip6htonl(const absl::uint128& address);\n\n  static Address::InstanceConstSharedPtr\n  protobufAddressToAddress(const envoy::config::core::v3::Address& proto_address);\n\n  /**\n   * Copies the address instance into the protobuf representation of an address.\n   * @param address is the address to be copied into the protobuf representation of this address.\n   * @param proto_address is the protobuf address to which the address instance is copied into.\n   */\n  static void addressToProtobufAddress(const Address::Instance& address,\n                                       envoy::config::core::v3::Address& proto_address);\n\n  /**\n   * Returns socket type corresponding to SocketAddress.protocol value of the\n   * given address, or SocketType::Stream if the address is a pipe address.\n   * @param proto_address the address protobuf\n   * @return socket type\n   */\n  static Socket::Type\n  protobufAddressSocketType(const envoy::config::core::v3::Address& proto_address);\n\n  /**\n   * Send a packet via given UDP socket with specific source address.\n   * @param handle is the UDP socket used to send.\n   * @param slices points to the buffers containing the packet.\n   * @param num_slices is the number of buffers.\n   * @param local_ip is the source address to be used to send.\n   * @param peer_address is the destination address to send to.\n   */\n  static Api::IoCallUint64Result writeToSocket(IoHandle& handle, Buffer::RawSlice* slices,\n                                               uint64_t num_slices, const Address::Ip* local_ip,\n                                               const Address::Instance& peer_address);\n  static Api::IoCallUint64Result writeToSocket(IoHandle& handle, const Buffer::Instance& buffer,\n                                               const Address::Ip* local_ip,\n                                               const Address::Instance& peer_address);\n\n  /**\n   * Read a packet from a given UDP socket and pass the packet to given UdpPacketProcessor.\n   * @param handle is the UDP socket to read from.\n   * @param local_address is the socket's local address used to populate port.\n   * @param udp_packet_processor is the callback to receive the packet.\n   * @param receive_time is the timestamp passed to udp_packet_processor for the\n   * receive time of the packet.\n   * @param packets_dropped is the output parameter for number of packets dropped in kernel. If the\n   * caller is not interested in it, nullptr can be passed in.\n   */\n  static Api::IoCallUint64Result readFromSocket(IoHandle& handle,\n                                                const Address::Instance& local_address,\n                                                UdpPacketProcessor& udp_packet_processor,\n                                                MonotonicTime receive_time,\n                                                uint32_t* packets_dropped);\n\n  /**\n   * Read available packets from a given UDP socket and pass the packet to a given\n   * UdpPacketProcessor.\n   * @param handle is the UDP socket to read from.\n   * @param local_address is the socket's local address used to populate port.\n   * @param udp_packet_processor is the callback to receive the packets.\n   * @param time_source is the time source used to generate the time stamp of the received packets.\n   * @param packets_dropped is the output parameter for number of packets dropped in kernel.\n   *\n   * TODO(mattklein123): Allow the number of packets read to be limited for fairness. Currently\n   *                     this function will always return an error, even if EAGAIN. In the future\n   *                     we can return no error if we limited the number of packets read and have\n   *                     to fake another read event.\n   * TODO(mattklein123): Can we potentially share this with the TCP stack somehow? Similar code\n   *                     exists there.\n   */\n  static Api::IoErrorPtr readPacketsFromSocket(IoHandle& handle,\n                                               const Address::Instance& local_address,\n                                               UdpPacketProcessor& udp_packet_processor,\n                                               TimeSource& time_source, uint32_t& packets_dropped);\n\nprivate:\n  static void throwWithMalformedIp(absl::string_view ip_address);\n\n  /**\n   * Takes a number and flips the order in byte chunks. The last byte of the input will be the\n   * first byte in the output. The second to last byte will be the second to first byte in the\n   * output. Etc..\n   * @param input supplies the input to have the bytes flipped.\n   * @return the absl::uint128 of the input having the bytes flipped.\n   */\n  static absl::uint128 flipOrder(const absl::uint128& input);\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/profiler/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"profiler_lib\",\n    srcs = [\"profiler.cc\"],\n    hdrs = [\"profiler.h\"],\n    tcmalloc_dep = 1,\n)\n"
  },
  {
    "path": "source/common/profiler/profiler.cc",
    "content": "#include \"common/profiler/profiler.h\"\n\n#include <string>\n\n#ifdef PROFILER_AVAILABLE\n\n#include \"gperftools/heap-profiler.h\"\n#include \"gperftools/profiler.h\"\n\nnamespace Envoy {\nnamespace Profiler {\n\nbool Cpu::profilerEnabled() { return ProfilingIsEnabledForAllThreads(); }\n\nbool Cpu::startProfiler(const std::string& output_path) {\n  return ProfilerStart(output_path.c_str());\n}\n\nvoid Cpu::stopProfiler() { ProfilerStop(); }\n\nbool Heap::profilerEnabled() {\n  // determined by PROFILER_AVAILABLE\n  return true;\n}\n\nbool Heap::isProfilerStarted() { return IsHeapProfilerRunning(); }\nbool Heap::startProfiler(const std::string& output_file_name_prefix) {\n  HeapProfilerStart(output_file_name_prefix.c_str());\n  return true;\n}\n\nbool Heap::stopProfiler() {\n  if (!IsHeapProfilerRunning()) {\n    return false;\n  }\n  HeapProfilerDump(\"stop and dump\");\n  HeapProfilerStop();\n  return true;\n}\n\n} // namespace Profiler\n} // namespace Envoy\n\n#else\n\nnamespace Envoy {\nnamespace Profiler {\n\nbool Cpu::profilerEnabled() { return false; }\nbool Cpu::startProfiler(const std::string&) { return false; }\nvoid Cpu::stopProfiler() {}\n\nbool Heap::profilerEnabled() { return false; }\nbool Heap::isProfilerStarted() { return false; }\nbool Heap::startProfiler(const std::string&) { return false; }\nbool Heap::stopProfiler() { return false; }\n} // namespace Profiler\n} // namespace Envoy\n\n#endif // #ifdef TCMALLOC\n"
  },
  {
    "path": "source/common/profiler/profiler.h",
    "content": "#pragma once\n\n#include <string>\n\n// Profiling support is provided in the release tcmalloc of `gperftools`, but not in the library\n// that supplies the debug tcmalloc. So all the profiling code must be ifdef'd\n// on PROFILER_AVAILABLE which is dependent on those two settings.\n#if defined(GPERFTOOLS_TCMALLOC) && !defined(ENVOY_MEMORY_DEBUG_ENABLED)\n#define PROFILER_AVAILABLE\n#endif\n\nnamespace Envoy {\nnamespace Profiler {\n\n/**\n * Process wide CPU profiling.\n */\nclass Cpu {\npublic:\n  /**\n   * @return whether the profiler is enabled or not.\n   */\n  static bool profilerEnabled();\n\n  /**\n   * Start the profiler and write to the specified path.\n   * @return bool whether the call to start the profiler succeeded.\n   */\n  static bool startProfiler(const std::string& output_path);\n\n  /**\n   * Stop the profiler.\n   */\n  static void stopProfiler();\n};\n\n/**\n * Process wide heap profiling\n */\nclass Heap {\npublic:\n  /**\n   * @return whether the profiler is enabled in this build or not.\n   */\n  static bool profilerEnabled();\n\n  /**\n   * @return whether the profiler is started or not\n   */\n  static bool isProfilerStarted();\n\n  /**\n   * Start the profiler and write to the specified path.\n   * @return bool whether the call to start the profiler succeeded.\n   */\n  static bool startProfiler(const std::string& output_path);\n\n  /**\n   * Stop the profiler.\n   * @return bool whether the file is dumped\n   */\n  static bool stopProfiler();\n};\n\n} // namespace Profiler\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_proto_library\")\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\nload(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nproto_library(\n    name = \"wkt_protos\",\n    deps = [\n        \"@com_google_protobuf//:any_proto\",\n        \"@com_google_protobuf//:descriptor_proto\",\n        \"@com_google_protobuf//:empty_proto\",\n        \"@com_google_protobuf//:struct_proto\",\n        \"@com_google_protobuf//:wrappers_proto\",\n    ],\n)\n\ncc_proto_library(\n    name = \"cc_wkt_protos\",\n    deps = [\":wkt_protos\"],\n)\n\nenvoy_cc_library(\n    name = \"message_validator_lib\",\n    srcs = [\"message_validator_impl.cc\"],\n    hdrs = [\"message_validator_impl.h\"],\n    external_deps = [\"protobuf\"],\n    deps = [\n        \"//include/envoy/protobuf:message_validator_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:documentation_url_lib\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protobuf\",\n    hdrs = [\"protobuf.h\"],\n    external_deps = [\n        \"protobuf\",\n    ],\n    deps = [\":cc_wkt_protos\"],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    external_deps = [\n        \"protobuf\",\n        \"yaml_cpp\",\n    ],\n    deps = [\n        \":message_validator_lib\",\n        \":protobuf\",\n        \":type_util_lib\",\n        \":well_known_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/protobuf:message_validator_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:documentation_url_lib\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:api_type_oracle_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/protobuf:visitor_lib\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto\",\n        \"@envoy_api//envoy/annotations:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"type_util_lib\",\n    srcs = [\"type_util.cc\"],\n    hdrs = [\"type_util.h\"],\n    deps = [\n        \"//source/common/protobuf\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"visitor_lib\",\n    srcs = [\"visitor.cc\"],\n    hdrs = [\"visitor.h\"],\n    deps = [\":protobuf\"],\n)\n\nenvoy_cc_library(\n    name = \"well_known_lib\",\n    hdrs = [\"well_known.h\"],\n)\n"
  },
  {
    "path": "source/common/protobuf/message_validator_impl.cc",
    "content": "#include \"common/protobuf/message_validator_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/macros.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace ProtobufMessage {\n\nnamespace {\nconst char deprecation_error[] = \" If continued use of this field is absolutely necessary, \"\n                                 \"see \" ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED \" for \"\n                                 \"how to apply a temporary and highly discouraged override.\";\n\nvoid onDeprecatedFieldCommon(absl::string_view description, bool soft_deprecation) {\n  if (soft_deprecation) {\n    ENVOY_LOG_MISC(warn, \"Deprecated field: {}\", absl::StrCat(description, deprecation_error));\n  } else {\n    throw DeprecatedProtoFieldException(absl::StrCat(description, deprecation_error));\n  }\n}\n} // namespace\n\nvoid WarningValidationVisitorImpl::setUnknownCounter(Stats::Counter& counter) {\n  ASSERT(unknown_counter_ == nullptr);\n  unknown_counter_ = &counter;\n  counter.add(prestats_unknown_count_);\n}\n\nvoid WarningValidationVisitorImpl::onUnknownField(absl::string_view description) {\n  const uint64_t hash = HashUtil::xxHash64(description);\n  auto it = descriptions_.insert(hash);\n  // If we've seen this before, skip.\n  if (!it.second) {\n    return;\n  }\n\n  // It's a new field, log and bump stat.\n  ENVOY_LOG(warn, \"Unknown field: {}\", description);\n  if (unknown_counter_ == nullptr) {\n    ++prestats_unknown_count_;\n  } else {\n    unknown_counter_->inc();\n  }\n}\n\nvoid WarningValidationVisitorImpl::onDeprecatedField(absl::string_view description,\n                                                     bool soft_deprecation) {\n  onDeprecatedFieldCommon(description, soft_deprecation);\n}\n\nvoid StrictValidationVisitorImpl::onUnknownField(absl::string_view description) {\n  throw UnknownProtoFieldException(\n      absl::StrCat(\"Protobuf message (\", description, \") has unknown fields\"));\n}\n\nvoid StrictValidationVisitorImpl::onDeprecatedField(absl::string_view description,\n                                                    bool soft_deprecation) {\n  onDeprecatedFieldCommon(description, soft_deprecation);\n}\n\nValidationVisitor& getNullValidationVisitor() {\n  MUTABLE_CONSTRUCT_ON_FIRST_USE(NullValidationVisitorImpl);\n}\n\nValidationVisitor& getStrictValidationVisitor() {\n  MUTABLE_CONSTRUCT_ON_FIRST_USE(StrictValidationVisitorImpl);\n}\n\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/message_validator_impl.h",
    "content": "#pragma once\n\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/common/documentation_url.h\"\n#include \"common/common/logger.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace ProtobufMessage {\n\nclass NullValidationVisitorImpl : public ValidationVisitor {\npublic:\n  // Envoy::ProtobufMessage::ValidationVisitor\n  void onUnknownField(absl::string_view) override {}\n  void onDeprecatedField(absl::string_view, bool) override {}\n\n  // Envoy::ProtobufMessage::ValidationVisitor\n  bool skipValidation() override { return true; }\n};\n\nValidationVisitor& getNullValidationVisitor();\n\nclass WarningValidationVisitorImpl : public ValidationVisitor,\n                                     public Logger::Loggable<Logger::Id::config> {\npublic:\n  void setUnknownCounter(Stats::Counter& counter);\n\n  // Envoy::ProtobufMessage::ValidationVisitor\n  void onUnknownField(absl::string_view description) override;\n  void onDeprecatedField(absl::string_view description, bool soft_deprecation) override;\n\n  // Envoy::ProtobufMessage::ValidationVisitor\n  bool skipValidation() override { return false; }\n\nprivate:\n  // Track hashes of descriptions we've seen, to avoid log spam. A hash is used here to avoid\n  // wasting memory with unused strings.\n  absl::flat_hash_set<uint64_t> descriptions_;\n  // This can be late initialized via setUnknownCounter(), enabling the server bootstrap loading\n  // which occurs prior to the initialization of the stats subsystem.\n  Stats::Counter* unknown_counter_{};\n  uint64_t prestats_unknown_count_{};\n};\n\nclass StrictValidationVisitorImpl : public ValidationVisitor {\npublic:\n  // Envoy::ProtobufMessage::ValidationVisitor\n  void onUnknownField(absl::string_view description) override;\n\n  // Envoy::ProtobufMessage::ValidationVisitor\n  bool skipValidation() override { return false; }\n  void onDeprecatedField(absl::string_view description, bool soft_deprecation) override;\n};\n\nValidationVisitor& getStrictValidationVisitor();\n\nclass ValidationContextImpl : public ValidationContext {\npublic:\n  ValidationContextImpl(ValidationVisitor& static_validation_visitor,\n                        ValidationVisitor& dynamic_validation_visitor)\n      : static_validation_visitor_(static_validation_visitor),\n        dynamic_validation_visitor_(dynamic_validation_visitor) {}\n\n  // Envoy::ProtobufMessage::ValidationContext\n  ValidationVisitor& staticValidationVisitor() override { return static_validation_visitor_; }\n  ValidationVisitor& dynamicValidationVisitor() override { return dynamic_validation_visitor_; }\n\nprivate:\n  ValidationVisitor& static_validation_visitor_;\n  ValidationVisitor& dynamic_validation_visitor_;\n};\n\nclass ProdValidationContextImpl : public ValidationContextImpl {\npublic:\n  ProdValidationContextImpl(bool allow_unknown_static_fields, bool allow_unknown_dynamic_fields,\n                            bool ignore_unknown_dynamic_fields)\n      : ValidationContextImpl(allow_unknown_static_fields ? static_warning_validation_visitor_\n                                                          : getStrictValidationVisitor(),\n                              allow_unknown_dynamic_fields\n                                  ? (ignore_unknown_dynamic_fields\n                                         ? ProtobufMessage::getNullValidationVisitor()\n                                         : dynamic_warning_validation_visitor_)\n                                  : ProtobufMessage::getStrictValidationVisitor()) {}\n\n  ProtobufMessage::WarningValidationVisitorImpl& staticWarningValidationVisitor() {\n    return static_warning_validation_visitor_;\n  }\n\n  ProtobufMessage::WarningValidationVisitorImpl& dynamicWarningValidationVisitor() {\n    return dynamic_warning_validation_visitor_;\n  }\n\nprivate:\n  ProtobufMessage::WarningValidationVisitorImpl static_warning_validation_visitor_;\n  ProtobufMessage::WarningValidationVisitorImpl dynamic_warning_validation_visitor_;\n};\n\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/protobuf.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/platform.h\"\n\n#include \"google/protobuf/any.pb.h\"\n#include \"google/protobuf/descriptor.h\"\n#include \"google/protobuf/descriptor.pb.h\"\n#include \"google/protobuf/descriptor_database.h\"\n#include \"google/protobuf/dynamic_message.h\"\n#include \"google/protobuf/empty.pb.h\"\n#include \"google/protobuf/io/coded_stream.h\"\n#include \"google/protobuf/io/zero_copy_stream.h\"\n#include \"google/protobuf/io/zero_copy_stream_impl.h\"\n#include \"google/protobuf/map.h\"\n#include \"google/protobuf/message.h\"\n#include \"google/protobuf/repeated_field.h\"\n#include \"google/protobuf/service.h\"\n#include \"google/protobuf/struct.pb.h\"\n#include \"google/protobuf/stubs/status.h\"\n#include \"google/protobuf/text_format.h\"\n#include \"google/protobuf/util/field_mask_util.h\"\n#include \"google/protobuf/util/json_util.h\"\n#include \"google/protobuf/util/message_differencer.h\"\n#include \"google/protobuf/util/time_util.h\"\n#include \"google/protobuf/util/type_resolver.h\"\n#include \"google/protobuf/util/type_resolver_util.h\"\n#include \"google/protobuf/wrappers.pb.h\"\n\nnamespace Envoy {\n\n// All references to google::protobuf in Envoy need to be made via the\n// Envoy::Protobuf namespace. This is required to allow remapping of protobuf to\n// alternative implementations during import into other repositories. E.g. at\n// Google we have more than one protobuf implementation.\nnamespace Protobuf = google::protobuf;\n\n// Allows mapping from google::protobuf::util to other util libraries.\nnamespace ProtobufUtil = google::protobuf::util;\n\n// Protobuf well-known types (WKT) should be referenced via the ProtobufWkt\n// namespace.\nnamespace ProtobufWkt = google::protobuf;\n\n// Alternative protobuf implementations might not have the same basic types.\n// Below we provide wrappers to facilitate remapping of the type during import.\nnamespace ProtobufTypes {\n\nusing MessagePtr = std::unique_ptr<Protobuf::Message>;\nusing ConstMessagePtrVector = std::vector<std::unique_ptr<const Protobuf::Message>>;\n\nusing Int64 = int64_t;\n\n} // namespace ProtobufTypes\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/type_util.cc",
    "content": "#include \"common/protobuf/type_util.h\"\n\nnamespace Envoy {\n\nabsl::string_view TypeUtil::typeUrlToDescriptorFullName(absl::string_view type_url) {\n  const size_t pos = type_url.rfind('/');\n  if (pos != absl::string_view::npos) {\n    type_url = type_url.substr(pos + 1);\n  }\n  return type_url;\n}\n\nstd::string TypeUtil::descriptorFullNameToTypeUrl(absl::string_view type) {\n  return \"type.googleapis.com/\" + std::string(type);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/type_util.h",
    "content": "#pragma once\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\nclass TypeUtil {\npublic:\n  static absl::string_view typeUrlToDescriptorFullName(absl::string_view type_url);\n\n  static std::string descriptorFullNameToTypeUrl(absl::string_view type);\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/utility.cc",
    "content": "#include \"common/protobuf/utility.h\"\n\n#include <limits>\n#include <numeric>\n\n#include \"envoy/annotations/deprecation.pb.h\"\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/documentation_url.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/api_type_oracle.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/visitor.h\"\n#include \"common/protobuf/well_known.h\"\n\n#include \"absl/strings/match.h\"\n#include \"udpa/annotations/sensitive.pb.h\"\n#include \"yaml-cpp/yaml.h\"\n\nusing namespace std::chrono_literals;\n\nnamespace Envoy {\nnamespace {\n\nabsl::string_view filenameFromPath(absl::string_view full_path) {\n  size_t index = full_path.rfind(\"/\");\n  if (index == std::string::npos || index == full_path.size()) {\n    return full_path;\n  }\n  return full_path.substr(index + 1, full_path.size());\n}\n\nvoid blockFormat(YAML::Node node) {\n  node.SetStyle(YAML::EmitterStyle::Block);\n\n  if (node.Type() == YAML::NodeType::Sequence) {\n    for (const auto& it : node) {\n      blockFormat(it);\n    }\n  }\n  if (node.Type() == YAML::NodeType::Map) {\n    for (const auto& it : node) {\n      blockFormat(it.second);\n    }\n  }\n}\n\nProtobufWkt::Value parseYamlNode(const YAML::Node& node) {\n  ProtobufWkt::Value value;\n  switch (node.Type()) {\n  case YAML::NodeType::Null:\n    value.set_null_value(ProtobufWkt::NULL_VALUE);\n    break;\n  case YAML::NodeType::Scalar: {\n    if (node.Tag() == \"!\") {\n      value.set_string_value(node.as<std::string>());\n      break;\n    }\n    bool bool_value;\n    if (YAML::convert<bool>::decode(node, bool_value)) {\n      value.set_bool_value(bool_value);\n      break;\n    }\n    int64_t int_value;\n    if (YAML::convert<int64_t>::decode(node, int_value)) {\n      if (std::numeric_limits<int32_t>::min() <= int_value &&\n          std::numeric_limits<int32_t>::max() >= int_value) {\n        // We could convert all integer values to string but it will break some stuff relying on\n        // ProtobufWkt::Struct itself, only convert small numbers into number_value here.\n        value.set_number_value(int_value);\n      } else {\n        // Proto3 JSON mapping allows use string for integer, this still has to be converted from\n        // int_value to support hexadecimal and octal literals.\n        value.set_string_value(std::to_string(int_value));\n      }\n      break;\n    }\n    // Fall back on string, including float/double case. When protobuf parse the JSON into a message\n    // it will convert based on the type in the message definition.\n    value.set_string_value(node.as<std::string>());\n    break;\n  }\n  case YAML::NodeType::Sequence: {\n    auto& list_values = *value.mutable_list_value()->mutable_values();\n    for (const auto& it : node) {\n      *list_values.Add() = parseYamlNode(it);\n    }\n    break;\n  }\n  case YAML::NodeType::Map: {\n    auto& struct_fields = *value.mutable_struct_value()->mutable_fields();\n    for (const auto& it : node) {\n      struct_fields[it.first.as<std::string>()] = parseYamlNode(it.second);\n    }\n    break;\n  }\n  case YAML::NodeType::Undefined:\n    throw EnvoyException(\"Undefined YAML value\");\n  }\n  return value;\n}\n\nvoid jsonConvertInternal(const Protobuf::Message& source,\n                         ProtobufMessage::ValidationVisitor& validation_visitor,\n                         Protobuf::Message& dest, bool do_boosting = true) {\n  Protobuf::util::JsonPrintOptions json_options;\n  json_options.preserve_proto_field_names = true;\n  std::string json;\n  const auto status = Protobuf::util::MessageToJsonString(source, &json, json_options);\n  if (!status.ok()) {\n    throw EnvoyException(fmt::format(\"Unable to convert protobuf message to JSON string: {} {}\",\n                                     status.ToString(), source.DebugString()));\n  }\n  MessageUtil::loadFromJson(json, dest, validation_visitor, do_boosting);\n}\n\nenum class MessageVersion {\n  // This is an earlier version of a message, a later one exists.\n  EarlierVersion,\n  // This is the latest version of a message.\n  LatestVersion,\n  // Validating to see if the latest version will also be accepted; only apply message validators\n  // without side effects, validations should be strict.\n  LatestVersionValidate,\n};\n\nusing MessageXformFn = std::function<void(Protobuf::Message&, MessageVersion)>;\n\nclass ApiBoostRetryException : public EnvoyException {\npublic:\n  ApiBoostRetryException(const std::string& message) : EnvoyException(message) {}\n};\n\n// Apply a function transforming a message (e.g. loading JSON into the message).\n// First we try with the message's earlier type, and if unsuccessful (or no\n// earlier) type, then the current type. This allows us to take a v3 Envoy\n// internal proto and ingest both v2 and v3 in methods such as loadFromJson.\n// This relies on the property that any v3 configuration that is readable as\n// v2 has the same semantics in v2/v3, which holds due to the highly structured\n// vN/v(N+1) mechanical transforms.\nvoid tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) {\n  const Protobuf::Descriptor* earlier_version_desc =\n      Config::ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name());\n  // If there is no earlier version of a message, just apply f directly.\n  if (earlier_version_desc == nullptr) {\n    f(message, MessageVersion::LatestVersion);\n    return;\n  }\n\n  Protobuf::DynamicMessageFactory dmf;\n  auto earlier_message = ProtobufTypes::MessagePtr(dmf.GetPrototype(earlier_version_desc)->New());\n  ASSERT(earlier_message != nullptr);\n  try {\n    // Try apply f with an earlier version of the message, then upgrade the\n    // result.\n    f(*earlier_message, MessageVersion::EarlierVersion);\n    // If we succeed at the earlier version, we ask the counterfactual, would this have worked at a\n    // later version? If not, this is v2 only and we need to warn. This is a waste of CPU cycles but\n    // we expect that JSON/YAML fragments will not be in use by any CPU limited use cases.\n    try {\n      f(message, MessageVersion::LatestVersionValidate);\n    } catch (EnvoyException& e) {\n      MessageUtil::onVersionUpgradeWarn(e.what());\n    }\n    // Now we do the real work of upgrading.\n    Config::VersionConverter::upgrade(*earlier_message, message);\n  } catch (ApiBoostRetryException&) {\n    // If we fail at the earlier version, try f at the current version of the\n    // message.\n    f(message, MessageVersion::LatestVersion);\n  }\n}\n\n// Logs a warning for use of a deprecated field or runtime-overridden use of an\n// otherwise fatal field. Throws a warning on use of a fatal by default field.\nvoid deprecatedFieldHelper(Runtime::Loader* runtime, bool proto_annotated_as_deprecated,\n                           bool proto_annotated_as_disallowed, const std::string& feature_name,\n                           std::string error, const Protobuf::Message& message,\n                           ProtobufMessage::ValidationVisitor& validation_visitor) {\n// This option is for Envoy builds with --define deprecated_features=disabled\n// The build options CI then verifies that as Envoy developers deprecate fields,\n// that they update canonical configs and unit tests to not use those deprecated\n// fields, by making their use fatal in the build options CI.\n#ifdef ENVOY_DISABLE_DEPRECATED_FEATURES\n  bool warn_only = false;\n#else\n  bool warn_only = true;\n#endif\n\n  bool warn_default = warn_only;\n  // Allow runtime to be null both to not crash if this is called before server initialization,\n  // and so proto validation works in context where runtime singleton is not set up (e.g.\n  // standalone config validation utilities)\n  if (runtime && proto_annotated_as_deprecated) {\n    // This is set here, rather than above, so that in the absence of a\n    // registry (i.e. test) the default for if a feature is allowed or not is\n    // based on ENVOY_DISABLE_DEPRECATED_FEATURES.\n    warn_only &= !proto_annotated_as_disallowed;\n    warn_default = warn_only;\n    warn_only = runtime->snapshot().deprecatedFeatureEnabled(feature_name, warn_only);\n  }\n  // Note this only checks if the runtime override has an actual effect. It\n  // does not change the logged warning if someone \"allows\" a deprecated but not\n  // yet fatal field.\n  const bool runtime_overridden = (warn_default == false && warn_only == true);\n\n  std::string with_overridden = fmt::format(\n      error,\n      (runtime_overridden ? \"runtime overrides to continue using now fatal-by-default \" : \"\"));\n\n  validation_visitor.onDeprecatedField(\"type \" + message.GetTypeName() + \" \" + with_overridden,\n                                       warn_only);\n}\n\n} // namespace\n\nnamespace ProtobufPercentHelper {\n\nuint64_t checkAndReturnDefault(uint64_t default_value, uint64_t max_value) {\n  ASSERT(default_value <= max_value);\n  return default_value;\n}\n\nuint64_t convertPercent(double percent, uint64_t max_value) {\n  // Checked by schema.\n  ASSERT(percent >= 0.0 && percent <= 100.0);\n  return max_value * (percent / 100.0);\n}\n\nbool evaluateFractionalPercent(envoy::type::v3::FractionalPercent percent, uint64_t random_value) {\n  return random_value % fractionalPercentDenominatorToInt(percent.denominator()) <\n         percent.numerator();\n}\n\nuint64_t fractionalPercentDenominatorToInt(\n    const envoy::type::v3::FractionalPercent::DenominatorType& denominator) {\n  switch (denominator) {\n  case envoy::type::v3::FractionalPercent::HUNDRED:\n    return 100;\n  case envoy::type::v3::FractionalPercent::TEN_THOUSAND:\n    return 10000;\n  case envoy::type::v3::FractionalPercent::MILLION:\n    return 1000000;\n  default:\n    // Checked by schema.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace ProtobufPercentHelper\n\nMissingFieldException::MissingFieldException(const std::string& field_name,\n                                             const Protobuf::Message& message)\n    : EnvoyException(\n          fmt::format(\"Field '{}' is missing in: {}\", field_name, message.DebugString())) {}\n\nProtoValidationException::ProtoValidationException(const std::string& validation_error,\n                                                   const Protobuf::Message& message)\n    : EnvoyException(fmt::format(\"Proto constraint validation failed ({}): {}\", validation_error,\n                                 message.DebugString())) {\n  ENVOY_LOG_MISC(debug, \"Proto validation error; throwing {}\", what());\n}\n\nvoid ProtoExceptionUtil::throwMissingFieldException(const std::string& field_name,\n                                                    const Protobuf::Message& message) {\n  throw MissingFieldException(field_name, message);\n}\n\nvoid ProtoExceptionUtil::throwProtoValidationException(const std::string& validation_error,\n                                                       const Protobuf::Message& message) {\n  throw ProtoValidationException(validation_error, message);\n}\n\n// TODO(htuch): this is where we will also reject v2 configs by default.\nvoid MessageUtil::onVersionUpgradeWarn(absl::string_view desc) {\n  const std::string& warning_str =\n      fmt::format(\"Configuration does not parse cleanly as v3. v2 configuration is \"\n                  \"deprecated and will be removed from Envoy at the start of Q1 2021: {}\",\n                  desc);\n  // Always log at trace level. This is useful for tests that don't want to rely on possible\n  // elision.\n  ENVOY_LOG_MISC(trace, warning_str);\n  // Log each distinct message at warn level once every 5s. We use a static map here, which is fine\n  // as we are always on the main thread.\n  static auto* last_warned = new absl::flat_hash_map<std::string, int64_t>();\n  const auto now = t_logclock::now().time_since_epoch().count();\n  const auto it = last_warned->find(warning_str);\n  if (it == last_warned->end() ||\n      (now - it->second) > std::chrono::duration_cast<std::chrono::nanoseconds>(5s).count()) {\n    ENVOY_LOG_MISC(warn, warning_str);\n    (*last_warned)[warning_str] = now;\n  }\n  Runtime::Loader* loader = Runtime::LoaderSingleton::getExisting();\n  // We only log, and don't bump stats, if we're sufficiently early in server initialization (i.e.\n  // bootstrap).\n  if (loader != nullptr) {\n    loader->countDeprecatedFeatureUse();\n  }\n}\n\nsize_t MessageUtil::hash(const Protobuf::Message& message) {\n  std::string text_format;\n\n  {\n    Protobuf::TextFormat::Printer printer;\n    printer.SetExpandAny(true);\n    printer.SetUseFieldNumber(true);\n    printer.SetSingleLineMode(true);\n    printer.SetHideUnknownFields(true);\n    printer.PrintToString(message, &text_format);\n  }\n\n  return HashUtil::xxHash64(text_format);\n}\n\nvoid MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& message,\n                               ProtobufMessage::ValidationVisitor& validation_visitor,\n                               bool do_boosting) {\n  auto load_json = [&json, &validation_visitor](Protobuf::Message& message,\n                                                MessageVersion message_version) {\n    Protobuf::util::JsonParseOptions options;\n    options.case_insensitive_enum_parsing = true;\n    // Let's first try and get a clean parse when checking for unknown fields;\n    // this should be the common case.\n    options.ignore_unknown_fields = false;\n    const auto strict_status = Protobuf::util::JsonStringToMessage(json, &message, options);\n    if (strict_status.ok()) {\n      // Success, no need to do any extra work.\n      return;\n    }\n    // If we fail, we see if we get a clean parse when allowing unknown fields.\n    // This is essentially a workaround\n    // for https://github.com/protocolbuffers/protobuf/issues/5967.\n    // TODO(htuch): clean this up when protobuf supports JSON/YAML unknown field\n    // detection directly.\n    options.ignore_unknown_fields = true;\n    const auto relaxed_status = Protobuf::util::JsonStringToMessage(json, &message, options);\n    // If we still fail with relaxed unknown field checking, the error has nothing\n    // to do with unknown fields.\n    if (!relaxed_status.ok()) {\n      throw EnvoyException(\"Unable to parse JSON as proto (\" + relaxed_status.ToString() +\n                           \"): \" + json);\n    }\n    // We know it's an unknown field at this point. If we're at the latest\n    // version, then it's definitely an unknown field, otherwise we try to\n    // load again at a later version.\n    if (message_version == MessageVersion::LatestVersion) {\n      validation_visitor.onUnknownField(\"type \" + message.GetTypeName() + \" reason \" +\n                                        strict_status.ToString());\n    } else if (message_version == MessageVersion::LatestVersionValidate) {\n      throw ProtobufMessage::UnknownProtoFieldException(absl::StrCat(\"Unknown field in: \", json));\n    } else {\n      throw ApiBoostRetryException(\"Unknown field, possibly a rename, try again.\");\n    }\n  };\n\n  if (do_boosting) {\n    tryWithApiBoosting(load_json, message);\n  } else {\n    load_json(message, MessageVersion::LatestVersion);\n  }\n}\n\nvoid MessageUtil::loadFromJson(const std::string& json, ProtobufWkt::Struct& message) {\n  // No need to validate if converting to a Struct, since there are no unknown\n  // fields possible.\n  loadFromJson(json, message, ProtobufMessage::getNullValidationVisitor());\n}\n\nvoid MessageUtil::loadFromYaml(const std::string& yaml, Protobuf::Message& message,\n                               ProtobufMessage::ValidationVisitor& validation_visitor,\n                               bool do_boosting) {\n  ProtobufWkt::Value value = ValueUtil::loadFromYaml(yaml);\n  if (value.kind_case() == ProtobufWkt::Value::kStructValue ||\n      value.kind_case() == ProtobufWkt::Value::kListValue) {\n    jsonConvertInternal(value, validation_visitor, message, do_boosting);\n    return;\n  }\n  throw EnvoyException(\"Unable to convert YAML as JSON: \" + yaml);\n}\n\nvoid MessageUtil::loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& message) {\n  // No need to validate if converting to a Struct, since there are no unknown\n  // fields possible.\n  return loadFromYaml(yaml, message, ProtobufMessage::getNullValidationVisitor());\n}\n\nvoid MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& message,\n                               ProtobufMessage::ValidationVisitor& validation_visitor,\n                               Api::Api& api, bool do_boosting) {\n  const std::string contents = api.fileSystem().fileReadToEnd(path);\n  // If the filename ends with .pb, attempt to parse it as a binary proto.\n  if (absl::EndsWith(path, FileExtensions::get().ProtoBinary)) {\n    // Attempt to parse the binary format.\n    auto read_proto_binary = [&contents, &validation_visitor](Protobuf::Message& message,\n                                                              MessageVersion message_version) {\n      try {\n        if (message.ParseFromString(contents)) {\n          MessageUtil::checkForUnexpectedFields(\n              message, message_version == MessageVersion::LatestVersionValidate\n                           ? ProtobufMessage::getStrictValidationVisitor()\n                           : validation_visitor);\n        }\n        return;\n      } catch (EnvoyException& ex) {\n        if (message_version == MessageVersion::LatestVersion ||\n            message_version == MessageVersion::LatestVersionValidate) {\n          // Failed reading the latest version - pass the same error upwards\n          throw ex;\n        }\n      }\n      throw ApiBoostRetryException(\n          \"Failed to parse at earlier version, trying again at later version.\");\n    };\n\n    if (do_boosting) {\n      // Attempts to read as the previous version and upgrade, and if it fails\n      // attempts to read as latest version.\n      tryWithApiBoosting(read_proto_binary, message);\n    } else {\n      read_proto_binary(message, MessageVersion::LatestVersion);\n    }\n    return;\n  }\n\n  // If the filename ends with .pb_text, attempt to parse it as a text proto.\n  if (absl::EndsWith(path, FileExtensions::get().ProtoText)) {\n    auto read_proto_text = [&contents, &path](Protobuf::Message& message,\n                                              MessageVersion message_version) {\n      if (Protobuf::TextFormat::ParseFromString(contents, &message)) {\n        return;\n      }\n      if (message_version == MessageVersion::LatestVersion ||\n          message_version == MessageVersion::LatestVersionValidate) {\n        throw EnvoyException(\"Unable to parse file \\\"\" + path + \"\\\" as a text protobuf (type \" +\n                             message.GetTypeName() + \")\");\n      } else {\n        throw ApiBoostRetryException(\n            \"Failed to parse at earlier version, trying again at later version.\");\n      }\n    };\n\n    if (do_boosting) {\n      tryWithApiBoosting(read_proto_text, message);\n    } else {\n      read_proto_text(message, MessageVersion::LatestVersion);\n    }\n    return;\n  }\n  if (absl::EndsWith(path, FileExtensions::get().Yaml)) {\n    loadFromYaml(contents, message, validation_visitor, do_boosting);\n  } else {\n    loadFromJson(contents, message, validation_visitor, do_boosting);\n  }\n}\n\nnamespace {\n\nvoid checkForDeprecatedNonRepeatedEnumValue(\n    const Protobuf::Message& message, absl::string_view filename,\n    const Protobuf::FieldDescriptor* field, const Protobuf::Reflection* reflection,\n    Runtime::Loader* runtime, ProtobufMessage::ValidationVisitor& validation_visitor) {\n  // Repeated fields will be handled by recursion in checkForUnexpectedFields.\n  if (field->is_repeated() || field->cpp_type() != Protobuf::FieldDescriptor::CPPTYPE_ENUM) {\n    return;\n  }\n\n  bool default_value = !reflection->HasField(message, field);\n\n  const Protobuf::EnumValueDescriptor* enum_value_descriptor = reflection->GetEnum(message, field);\n  if (!enum_value_descriptor->options().deprecated()) {\n    return;\n  }\n\n  const std::string error =\n      absl::StrCat(\"Using {}\", (default_value ? \"the default now-\" : \"\"), \"deprecated value \",\n                   enum_value_descriptor->name(), \" for enum '\", field->full_name(), \"' from file \",\n                   filename, \". This enum value will be removed from Envoy soon\",\n                   (default_value ? \" so a non-default value must now be explicitly set\" : \"\"),\n                   \". Please see \" ENVOY_DOC_URL_VERSION_HISTORY \" for details.\");\n  deprecatedFieldHelper(\n      runtime, true /*deprecated*/,\n      enum_value_descriptor->options().GetExtension(envoy::annotations::disallowed_by_default_enum),\n      absl::StrCat(\"envoy.deprecated_features:\", enum_value_descriptor->full_name()), error,\n      message, validation_visitor);\n}\n\nclass UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor {\npublic:\n  UnexpectedFieldProtoVisitor(ProtobufMessage::ValidationVisitor& validation_visitor,\n                              Runtime::Loader* runtime)\n      : validation_visitor_(validation_visitor), runtime_(runtime) {}\n\n  const void* onField(const Protobuf::Message& message, const Protobuf::FieldDescriptor& field,\n                      const void*) override {\n    const Protobuf::Reflection* reflection = message.GetReflection();\n    absl::string_view filename = filenameFromPath(field.file()->name());\n\n    // Before we check to see if the field is in use, see if there's a\n    // deprecated default enum value.\n    checkForDeprecatedNonRepeatedEnumValue(message, filename, &field, reflection, runtime_,\n                                           validation_visitor_);\n\n    // If this field is not in use, continue.\n    if ((field.is_repeated() && reflection->FieldSize(message, &field) == 0) ||\n        (!field.is_repeated() && !reflection->HasField(message, &field))) {\n      return nullptr;\n    }\n\n    // If this field is deprecated, warn or throw an error.\n    if (field.options().deprecated()) {\n      if (absl::StartsWith(field.name(), Config::VersionUtil::DeprecatedFieldShadowPrefix)) {\n        // The field was marked as hidden_envoy_deprecated and an error must be thrown,\n        // unless it is part of an explicit test that needs access to the deprecated field\n        // when we enable runtime deprecation override to allow point field overrides for tests.\n        if (!runtime_ ||\n            !runtime_->snapshot().deprecatedFeatureEnabled(\n                absl::StrCat(\"envoy.deprecated_features:\", field.full_name()), false)) {\n          const std::string fatal_error = absl::StrCat(\n              \"Illegal use of hidden_envoy_deprecated_ V2 field '\", field.full_name(),\n              \"' from file \", filename,\n              \" while using the latest V3 configuration. This field has been removed from the \"\n              \"current Envoy API. Please see \" ENVOY_DOC_URL_VERSION_HISTORY \" for details.\");\n          throw ProtoValidationException(fatal_error, message);\n        }\n      }\n      const std::string warning =\n          absl::StrCat(\"Using {}deprecated option '\", field.full_name(), \"' from file \", filename,\n                       \". This configuration will be removed from \"\n                       \"Envoy soon. Please see \" ENVOY_DOC_URL_VERSION_HISTORY \" for details.\");\n\n      deprecatedFieldHelper(runtime_, true /*deprecated*/,\n                            field.options().GetExtension(envoy::annotations::disallowed_by_default),\n                            absl::StrCat(\"envoy.deprecated_features:\", field.full_name()), warning,\n                            message, validation_visitor_);\n    }\n    return nullptr;\n  }\n\n  void onMessage(const Protobuf::Message& message, const void*) override {\n    // Reject unknown fields.\n    const auto& unknown_fields = message.GetReflection()->GetUnknownFields(message);\n    if (!unknown_fields.empty()) {\n      std::string error_msg;\n      for (int n = 0; n < unknown_fields.field_count(); ++n) {\n        if (unknown_fields.field(n).number() == ProtobufWellKnown::OriginalTypeFieldNumber) {\n          continue;\n        }\n        error_msg += absl::StrCat(n > 0 ? \", \" : \"\", unknown_fields.field(n).number());\n      }\n      // We use the validation visitor but have hard coded behavior below for deprecated fields.\n      // TODO(htuch): Unify the deprecated and unknown visitor handling behind the validation\n      // visitor pattern. https://github.com/envoyproxy/envoy/issues/8092.\n      if (!error_msg.empty()) {\n        validation_visitor_.onUnknownField(\"type \" + message.GetTypeName() +\n                                           \" with unknown field set {\" + error_msg + \"}\");\n      }\n    }\n  }\n\nprivate:\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  Runtime::Loader* runtime_;\n};\n\n} // namespace\n\nvoid MessageUtil::checkForUnexpectedFields(const Protobuf::Message& message,\n                                           ProtobufMessage::ValidationVisitor& validation_visitor,\n                                           Runtime::Loader* runtime) {\n  UnexpectedFieldProtoVisitor unexpected_field_visitor(validation_visitor, runtime);\n  ProtobufMessage::traverseMessage(unexpected_field_visitor, API_RECOVER_ORIGINAL(message),\n                                   nullptr);\n}\n\nstd::string MessageUtil::getYamlStringFromMessage(const Protobuf::Message& message,\n                                                  const bool block_print,\n                                                  const bool always_print_primitive_fields) {\n  std::string json = getJsonStringFromMessage(message, false, always_print_primitive_fields);\n  YAML::Node node;\n  try {\n    node = YAML::Load(json);\n  } catch (YAML::ParserException& e) {\n    throw EnvoyException(e.what());\n  } catch (YAML::BadConversion& e) {\n    throw EnvoyException(e.what());\n  } catch (std::exception& e) {\n    // There is a potentially wide space of exceptions thrown by the YAML parser,\n    // and enumerating them all may be difficult. Envoy doesn't work well with\n    // unhandled exceptions, so we capture them and record the exception name in\n    // the Envoy Exception text.\n    throw EnvoyException(fmt::format(\"Unexpected YAML exception: {}\", +e.what()));\n  }\n  if (block_print) {\n    blockFormat(node);\n  }\n  YAML::Emitter out;\n  out << node;\n  return out.c_str();\n}\n\nstd::string MessageUtil::getJsonStringFromMessage(const Protobuf::Message& message,\n                                                  const bool pretty_print,\n                                                  const bool always_print_primitive_fields) {\n  Protobuf::util::JsonPrintOptions json_options;\n  // By default, proto field names are converted to camelCase when the message is converted to JSON.\n  // Setting this option makes debugging easier because it keeps field names consistent in JSON\n  // printouts.\n  json_options.preserve_proto_field_names = true;\n  if (pretty_print) {\n    json_options.add_whitespace = true;\n  }\n  // Primitive types such as int32s and enums will not be serialized if they have the default value.\n  // This flag disables that behavior.\n  if (always_print_primitive_fields) {\n    json_options.always_print_primitive_fields = true;\n  }\n  std::string json;\n  const auto status = Protobuf::util::MessageToJsonString(message, &json, json_options);\n  // This should always succeed unless something crash-worthy such as out-of-memory.\n  RELEASE_ASSERT(status.ok(), \"\");\n  return json;\n}\n\nvoid MessageUtil::unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Message& message) {\n  // If we don't have a type URL match, try an earlier version.\n  const absl::string_view any_full_name =\n      TypeUtil::typeUrlToDescriptorFullName(any_message.type_url());\n  if (any_full_name != message.GetDescriptor()->full_name()) {\n    const Protobuf::Descriptor* earlier_version_desc =\n        Config::ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name());\n    // If the earlier version matches, unpack and upgrade.\n    if (earlier_version_desc != nullptr && any_full_name == earlier_version_desc->full_name()) {\n      // Take the Any message but adjust its type URL, since earlier/later versions are wire\n      // compatible.\n      ProtobufWkt::Any any_message_with_fixup;\n      any_message_with_fixup.MergeFrom(any_message);\n      any_message_with_fixup.set_type_url(\"type.googleapis.com/\" +\n                                          message.GetDescriptor()->full_name());\n      if (!any_message_with_fixup.UnpackTo(&message)) {\n        throw EnvoyException(fmt::format(\"Unable to unpack as {}: {}\",\n                                         earlier_version_desc->full_name(),\n                                         any_message_with_fixup.DebugString()));\n      }\n      Config::VersionConverter::annotateWithOriginalType(*earlier_version_desc, message);\n      MessageUtil::onVersionUpgradeWarn(any_full_name);\n      return;\n    }\n  }\n  // Otherwise, just unpack to the message. Type URL mismatches will be signaled\n  // by UnpackTo failure.\n  if (!any_message.UnpackTo(&message)) {\n    throw EnvoyException(fmt::format(\"Unable to unpack as {}: {}\",\n                                     message.GetDescriptor()->full_name(),\n                                     any_message.DebugString()));\n  }\n}\n\nvoid MessageUtil::jsonConvert(const Protobuf::Message& source, ProtobufWkt::Struct& dest) {\n  // Any proto3 message can be transformed to Struct, so there is no need to check for unknown\n  // fields. There is one catch; Duration/Timestamp etc. which have non-object canonical JSON\n  // representations don't work.\n  jsonConvertInternal(source, ProtobufMessage::getNullValidationVisitor(), dest);\n}\n\nvoid MessageUtil::jsonConvert(const ProtobufWkt::Struct& source,\n                              ProtobufMessage::ValidationVisitor& validation_visitor,\n                              Protobuf::Message& dest) {\n  jsonConvertInternal(source, validation_visitor, dest);\n}\n\nvoid MessageUtil::jsonConvertValue(const Protobuf::Message& source, ProtobufWkt::Value& dest) {\n  jsonConvertInternal(source, ProtobufMessage::getNullValidationVisitor(), dest);\n}\n\nProtobufWkt::Struct MessageUtil::keyValueStruct(const std::string& key, const std::string& value) {\n  ProtobufWkt::Struct struct_obj;\n  ProtobufWkt::Value val;\n  val.set_string_value(value);\n  (*struct_obj.mutable_fields())[key] = val;\n  return struct_obj;\n}\n\nProtobufWkt::Struct MessageUtil::keyValueStruct(const std::map<std::string, std::string>& fields) {\n  ProtobufWkt::Struct struct_obj;\n  ProtobufWkt::Value val;\n  for (const auto& pair : fields) {\n    val.set_string_value(pair.second);\n    (*struct_obj.mutable_fields())[pair.first] = val;\n  }\n  return struct_obj;\n}\n\nstd::string MessageUtil::CodeEnumToString(ProtobufUtil::error::Code code) {\n  return ProtobufUtil::Status(code, \"\").ToString();\n}\n\nnamespace {\n\n// Forward declaration for mutually-recursive helper functions.\nvoid redact(Protobuf::Message* message, bool ancestor_is_sensitive);\n\nusing Transform = std::function<void(Protobuf::Message*, const Protobuf::Reflection*,\n                                     const Protobuf::FieldDescriptor*)>;\n\n// To redact opaque types, namely `Any` and `TypedStruct`, we have to reify them to the concrete\n// message types specified by their `type_url` before we can redact their contents. This is mostly\n// identical between `Any` and `TypedStruct`, the only difference being how they are packed and\n// unpacked. Note that we have to use reflection on the opaque type here, rather than downcasting\n// to `Any` or `TypedStruct`, because any message we might be handling could have originated from\n// a `DynamicMessageFactory`.\nbool redactOpaque(Protobuf::Message* message, bool ancestor_is_sensitive,\n                  absl::string_view opaque_type_name, Transform unpack, Transform repack) {\n  // Ensure this message has the opaque type we're expecting.\n  const auto* opaque_descriptor = message->GetDescriptor();\n  if (opaque_descriptor->full_name() != opaque_type_name) {\n    return false;\n  }\n\n  // Find descriptors for the `type_url` and `value` fields. The `type_url` field must not be\n  // empty, but `value` may be (in which case our work is done).\n  const auto* reflection = message->GetReflection();\n  const auto* type_url_field_descriptor = opaque_descriptor->FindFieldByName(\"type_url\");\n  const auto* value_field_descriptor = opaque_descriptor->FindFieldByName(\"value\");\n  ASSERT(type_url_field_descriptor != nullptr && value_field_descriptor != nullptr &&\n         reflection->HasField(*message, type_url_field_descriptor));\n  if (!reflection->HasField(*message, value_field_descriptor)) {\n    return true;\n  }\n\n  // Try to find a descriptor for `type_url` in the pool and instantiate a new message of the\n  // correct concrete type.\n  const std::string type_url(reflection->GetString(*message, type_url_field_descriptor));\n  const std::string concrete_type_name(TypeUtil::typeUrlToDescriptorFullName(type_url));\n  const auto* concrete_descriptor =\n      Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(concrete_type_name);\n  if (concrete_descriptor == nullptr) {\n    // If the type URL doesn't correspond to a known proto, don't try to reify it, just treat it\n    // like any other message. See the documented limitation on `MessageUtil::redact()` for more\n    // context.\n    ENVOY_LOG_MISC(warn, \"Could not reify {} with unknown type URL {}\", opaque_type_name, type_url);\n    return false;\n  }\n  Protobuf::DynamicMessageFactory message_factory;\n  std::unique_ptr<Protobuf::Message> typed_message(\n      message_factory.GetPrototype(concrete_descriptor)->New());\n\n  // Finally we can unpack, redact, and repack the opaque message using the provided callbacks.\n  unpack(typed_message.get(), reflection, value_field_descriptor);\n  redact(typed_message.get(), ancestor_is_sensitive);\n  repack(typed_message.get(), reflection, value_field_descriptor);\n  return true;\n}\n\nbool redactAny(Protobuf::Message* message, bool ancestor_is_sensitive) {\n  return redactOpaque(\n      message, ancestor_is_sensitive, \"google.protobuf.Any\",\n      [message](Protobuf::Message* typed_message, const Protobuf::Reflection* reflection,\n                const Protobuf::FieldDescriptor* field_descriptor) {\n        // To unpack an `Any`, parse the serialized proto.\n        typed_message->ParseFromString(reflection->GetString(*message, field_descriptor));\n      },\n      [message](Protobuf::Message* typed_message, const Protobuf::Reflection* reflection,\n                const Protobuf::FieldDescriptor* field_descriptor) {\n        // To repack an `Any`, reserialize its proto.\n        reflection->SetString(message, field_descriptor, typed_message->SerializeAsString());\n      });\n}\n\n// To redact a `TypedStruct`, we have to reify it based on its `type_url` to redact it.\nbool redactTypedStruct(Protobuf::Message* message, bool ancestor_is_sensitive) {\n  return redactOpaque(\n      message, ancestor_is_sensitive, \"udpa.type.v1.TypedStruct\",\n      [message](Protobuf::Message* typed_message, const Protobuf::Reflection* reflection,\n                const Protobuf::FieldDescriptor* field_descriptor) {\n        // To unpack a `TypedStruct`, convert the struct from JSON.\n        jsonConvertInternal(reflection->GetMessage(*message, field_descriptor),\n                            ProtobufMessage::getNullValidationVisitor(), *typed_message);\n      },\n      [message](Protobuf::Message* typed_message, const Protobuf::Reflection* reflection,\n                const Protobuf::FieldDescriptor* field_descriptor) {\n        // To repack a `TypedStruct`, convert the message back to JSON.\n        jsonConvertInternal(*typed_message, ProtobufMessage::getNullValidationVisitor(),\n                            *(reflection->MutableMessage(message, field_descriptor)));\n      });\n}\n\n// Recursive helper method for MessageUtil::redact() below.\nvoid redact(Protobuf::Message* message, bool ancestor_is_sensitive) {\n  if (redactAny(message, ancestor_is_sensitive) ||\n      redactTypedStruct(message, ancestor_is_sensitive)) {\n    return;\n  }\n\n  const auto* descriptor = message->GetDescriptor();\n  const auto* reflection = message->GetReflection();\n  for (int i = 0; i < descriptor->field_count(); ++i) {\n    const auto* field_descriptor = descriptor->field(i);\n\n    // Redact if this field or any of its ancestors have the `sensitive` option set.\n    const bool sensitive = ancestor_is_sensitive ||\n                           field_descriptor->options().GetExtension(udpa::annotations::sensitive);\n\n    if (field_descriptor->type() == Protobuf::FieldDescriptor::TYPE_MESSAGE) {\n      // Recursive case: traverse message fields.\n      if (field_descriptor->is_repeated()) {\n        const int field_size = reflection->FieldSize(*message, field_descriptor);\n        for (int i = 0; i < field_size; ++i) {\n          redact(reflection->MutableRepeatedMessage(message, field_descriptor, i), sensitive);\n        }\n      } else if (reflection->HasField(*message, field_descriptor)) {\n        redact(reflection->MutableMessage(message, field_descriptor), sensitive);\n      }\n    } else if (sensitive) {\n      // Base case: replace strings and bytes with \"[redacted]\" and clear all others.\n      if (field_descriptor->type() == Protobuf::FieldDescriptor::TYPE_STRING ||\n          field_descriptor->type() == Protobuf::FieldDescriptor::TYPE_BYTES) {\n        if (field_descriptor->is_repeated()) {\n          const int field_size = reflection->FieldSize(*message, field_descriptor);\n          for (int i = 0; i < field_size; ++i) {\n            reflection->SetRepeatedString(message, field_descriptor, i, \"[redacted]\");\n          }\n        } else if (reflection->HasField(*message, field_descriptor)) {\n          reflection->SetString(message, field_descriptor, \"[redacted]\");\n        }\n      } else {\n        reflection->ClearField(message, field_descriptor);\n      }\n    }\n  }\n}\n\n} // namespace\n\nvoid MessageUtil::redact(Protobuf::Message& message) {\n  ::Envoy::redact(&message, /* ancestor_is_sensitive = */ false);\n}\n\nProtobufWkt::Value ValueUtil::loadFromYaml(const std::string& yaml) {\n  try {\n    return parseYamlNode(YAML::Load(yaml));\n  } catch (YAML::ParserException& e) {\n    throw EnvoyException(e.what());\n  } catch (YAML::BadConversion& e) {\n    throw EnvoyException(e.what());\n  } catch (std::exception& e) {\n    // There is a potentially wide space of exceptions thrown by the YAML parser,\n    // and enumerating them all may be difficult. Envoy doesn't work well with\n    // unhandled exceptions, so we capture them and record the exception name in\n    // the Envoy Exception text.\n    throw EnvoyException(fmt::format(\"Unexpected YAML exception: {}\", +e.what()));\n  }\n}\n\nbool ValueUtil::equal(const ProtobufWkt::Value& v1, const ProtobufWkt::Value& v2) {\n  ProtobufWkt::Value::KindCase kind = v1.kind_case();\n  if (kind != v2.kind_case()) {\n    return false;\n  }\n\n  switch (kind) {\n  case ProtobufWkt::Value::KIND_NOT_SET:\n    return v2.kind_case() == ProtobufWkt::Value::KIND_NOT_SET;\n\n  case ProtobufWkt::Value::kNullValue:\n    return true;\n\n  case ProtobufWkt::Value::kNumberValue:\n    return v1.number_value() == v2.number_value();\n\n  case ProtobufWkt::Value::kStringValue:\n    return v1.string_value() == v2.string_value();\n\n  case ProtobufWkt::Value::kBoolValue:\n    return v1.bool_value() == v2.bool_value();\n\n  case ProtobufWkt::Value::kStructValue: {\n    const ProtobufWkt::Struct& s1 = v1.struct_value();\n    const ProtobufWkt::Struct& s2 = v2.struct_value();\n    if (s1.fields_size() != s2.fields_size()) {\n      return false;\n    }\n    for (const auto& it1 : s1.fields()) {\n      const auto& it2 = s2.fields().find(it1.first);\n      if (it2 == s2.fields().end()) {\n        return false;\n      }\n\n      if (!equal(it1.second, it2->second)) {\n        return false;\n      }\n    }\n    return true;\n  }\n\n  case ProtobufWkt::Value::kListValue: {\n    const ProtobufWkt::ListValue& l1 = v1.list_value();\n    const ProtobufWkt::ListValue& l2 = v2.list_value();\n    if (l1.values_size() != l2.values_size()) {\n      return false;\n    }\n    for (int i = 0; i < l1.values_size(); i++) {\n      if (!equal(l1.values(i), l2.values(i))) {\n        return false;\n      }\n    }\n    return true;\n  }\n\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nconst ProtobufWkt::Value& ValueUtil::nullValue() {\n  static const auto* v = []() -> ProtobufWkt::Value* {\n    auto* vv = new ProtobufWkt::Value();\n    vv->set_null_value(ProtobufWkt::NULL_VALUE);\n    return vv;\n  }();\n  return *v;\n}\n\nProtobufWkt::Value ValueUtil::stringValue(const std::string& str) {\n  ProtobufWkt::Value val;\n  val.set_string_value(str);\n  return val;\n}\n\nProtobufWkt::Value ValueUtil::optionalStringValue(const absl::optional<std::string>& str) {\n  if (str.has_value()) {\n    return ValueUtil::stringValue(str.value());\n  }\n  return ValueUtil::nullValue();\n}\n\nProtobufWkt::Value ValueUtil::boolValue(bool b) {\n  ProtobufWkt::Value val;\n  val.set_bool_value(b);\n  return val;\n}\n\nProtobufWkt::Value ValueUtil::structValue(const ProtobufWkt::Struct& obj) {\n  ProtobufWkt::Value val;\n  (*val.mutable_struct_value()) = obj;\n  return val;\n}\n\nProtobufWkt::Value ValueUtil::listValue(const std::vector<ProtobufWkt::Value>& values) {\n  auto list = std::make_unique<ProtobufWkt::ListValue>();\n  for (const auto& value : values) {\n    *list->add_values() = value;\n  }\n  ProtobufWkt::Value val;\n  val.set_allocated_list_value(list.release());\n  return val;\n}\n\nnamespace {\n\nvoid validateDuration(const ProtobufWkt::Duration& duration) {\n  if (duration.seconds() < 0 || duration.nanos() < 0) {\n    throw DurationUtil::OutOfRangeException(\n        fmt::format(\"Expected positive duration: {}\", duration.DebugString()));\n  }\n  if (duration.nanos() > 999999999 ||\n      duration.seconds() > Protobuf::util::TimeUtil::kDurationMaxSeconds) {\n    throw DurationUtil::OutOfRangeException(\n        fmt::format(\"Duration out-of-range: {}\", duration.DebugString()));\n  }\n}\n\n} // namespace\n\nuint64_t DurationUtil::durationToMilliseconds(const ProtobufWkt::Duration& duration) {\n  validateDuration(duration);\n  return Protobuf::util::TimeUtil::DurationToMilliseconds(duration);\n}\n\nuint64_t DurationUtil::durationToSeconds(const ProtobufWkt::Duration& duration) {\n  validateDuration(duration);\n  return Protobuf::util::TimeUtil::DurationToSeconds(duration);\n}\n\nvoid TimestampUtil::systemClockToTimestamp(const SystemTime system_clock_time,\n                                           ProtobufWkt::Timestamp& timestamp) {\n  // Converts to millisecond-precision Timestamp by explicitly casting to millisecond-precision\n  // time_point.\n  timestamp.MergeFrom(Protobuf::util::TimeUtil::MillisecondsToTimestamp(\n      std::chrono::time_point_cast<std::chrono::milliseconds>(system_clock_time)\n          .time_since_epoch()\n          .count()));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/utility.h",
    "content": "#pragma once\n\n#include <numeric>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"absl/strings/str_join.h\"\n\n// Obtain the value of a wrapped field (e.g. google.protobuf.UInt32Value) if set. Otherwise, return\n// the default value.\n#define PROTOBUF_GET_WRAPPED_OR_DEFAULT(message, field_name, default_value)                        \\\n  ((message).has_##field_name() ? (message).field_name().value() : (default_value))\n\n// Obtain the value of a wrapped field (e.g. google.protobuf.UInt32Value) if set. Otherwise, throw\n// a MissingFieldException.\n\n#define PROTOBUF_GET_WRAPPED_REQUIRED(message, field_name)                                         \\\n  ([](const auto& msg) {                                                                           \\\n    if (!msg.has_##field_name()) {                                                                 \\\n      ::Envoy::ProtoExceptionUtil::throwMissingFieldException(#field_name, msg);                   \\\n    }                                                                                              \\\n    return msg.field_name().value();                                                               \\\n  }((message)))\n// Obtain the milliseconds value of a google.protobuf.Duration field if set. Otherwise, return the\n// default value.\n#define PROTOBUF_GET_MS_OR_DEFAULT(message, field_name, default_value)                             \\\n  ((message).has_##field_name() ? DurationUtil::durationToMilliseconds((message).field_name())     \\\n                                : (default_value))\n\n// Obtain the string value if the field is set. Otherwise, return the default value.\n#define PROTOBUF_GET_STRING_OR_DEFAULT(message, field_name, default_value)                         \\\n  (!(message).field_name().empty() ? (message).field_name() : (default_value))\n\n// Obtain the milliseconds value of a google.protobuf.Duration field if set. Otherwise, return\n// absl::nullopt.\n#define PROTOBUF_GET_OPTIONAL_MS(message, field_name)                                              \\\n  ((message).has_##field_name()                                                                    \\\n       ? absl::optional<std::chrono::milliseconds>(                                                \\\n             DurationUtil::durationToMilliseconds((message).field_name()))                         \\\n       : absl::nullopt)\n\n// Obtain the milliseconds value of a google.protobuf.Duration field if set. Otherwise, throw a\n// MissingFieldException.\n#define PROTOBUF_GET_MS_REQUIRED(message, field_name)                                              \\\n  ([](const auto& msg) {                                                                           \\\n    if (!msg.has_##field_name()) {                                                                 \\\n      ::Envoy::ProtoExceptionUtil::throwMissingFieldException(#field_name, msg);                   \\\n    }                                                                                              \\\n    return DurationUtil::durationToMilliseconds(msg.field_name());                                 \\\n  }((message)))\n\n// Obtain the seconds value of a google.protobuf.Duration field if set. Otherwise, throw a\n// MissingFieldException.\n#define PROTOBUF_GET_SECONDS_REQUIRED(message, field_name)                                         \\\n  ([](const auto& msg) {                                                                           \\\n    if (!msg.has_##field_name()) {                                                                 \\\n      ::Envoy::ProtoExceptionUtil::throwMissingFieldException(#field_name, msg);                   \\\n    }                                                                                              \\\n    return DurationUtil::durationToSeconds(msg.field_name());                                      \\\n  }((message)))\n\nnamespace Envoy {\nnamespace ProtobufPercentHelper {\n\n// The following are helpers used in the PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT macro.\n// This avoids a giant macro mess when trying to do asserts, casts, etc.\nuint64_t checkAndReturnDefault(uint64_t default_value, uint64_t max_value);\nuint64_t convertPercent(double percent, uint64_t max_value);\n\n/**\n * Given a fractional percent chance of a given event occurring, evaluate to a yes/no decision\n * based on a provided random value.\n * @param percent the chance of a given event happening.\n * @param random_value supplies a numerical value to use to evaluate the event.\n * @return bool decision about whether the event should occur.\n */\nbool evaluateFractionalPercent(envoy::type::v3::FractionalPercent percent, uint64_t random_value);\n\n/**\n * Convert a fractional percent denominator enum into an integer.\n * @param denominator supplies denominator to convert.\n * @return the converted denominator.\n */\nuint64_t fractionalPercentDenominatorToInt(\n    const envoy::type::v3::FractionalPercent::DenominatorType& denominator);\n\n} // namespace ProtobufPercentHelper\n} // namespace Envoy\n\n// Convert an envoy::type::v3::Percent to a double or a default.\n// @param message supplies the proto message containing the field.\n// @param field_name supplies the field name in the message.\n// @param default_value supplies the default if the field is not present.\n#define PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(message, field_name, default_value)                  \\\n  ([](const auto& msg) -> double {                                                                 \\\n    if (std::isnan(msg.field_name().value())) {                                                    \\\n      ::Envoy::ExceptionUtil::throwEnvoyException(                                                 \\\n          fmt::format(\"Value not in the range of 0..100 range.\"));                                 \\\n    }                                                                                              \\\n    return (msg).has_##field_name() ? (msg).field_name().value() : default_value;                  \\\n  }((message)))\n// Convert an envoy::type::v3::Percent to a rounded integer or a default.\n// @param message supplies the proto message containing the field.\n// @param field_name supplies the field name in the message.\n// @param max_value supplies the maximum allowed integral value (e.g., 100, 10000, etc.).\n// @param default_value supplies the default if the field is not present.\n//\n// TODO(anirudhmurali): Recommended to capture and validate NaN values in PGV\n// Issue: https://github.com/envoyproxy/protoc-gen-validate/issues/85\n#define PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT(message, field_name, max_value,             \\\n                                                       default_value)                              \\\n  ([](const auto& msg) {                                                                           \\\n    if (std::isnan(msg.field_name().value())) {                                                    \\\n      ::Envoy::ExceptionUtil::throwEnvoyException(                                                 \\\n          fmt::format(\"Value not in the range of 0..100 range.\"));                                 \\\n    }                                                                                              \\\n    return (msg).has_##field_name()                                                                \\\n               ? ProtobufPercentHelper::convertPercent((msg).field_name().value(), max_value)      \\\n               : ProtobufPercentHelper::checkAndReturnDefault(default_value, max_value);           \\\n  }((message)))\n\nnamespace Envoy {\n\nclass MissingFieldException : public EnvoyException {\npublic:\n  MissingFieldException(const std::string& field_name, const Protobuf::Message& message);\n};\n\nclass RepeatedPtrUtil {\npublic:\n  static std::string join(const Protobuf::RepeatedPtrField<std::string>& source,\n                          const std::string& delimiter) {\n    return absl::StrJoin(std::vector<std::string>(source.begin(), source.end()), delimiter);\n  }\n\n  template <class ProtoType>\n  static std::string debugString(const Protobuf::RepeatedPtrField<ProtoType>& source) {\n    if (source.empty()) {\n      return \"[]\";\n    }\n    return std::accumulate(std::next(source.begin()), source.end(), \"[\" + source[0].DebugString(),\n                           [](std::string debug_string, const Protobuf::Message& message) {\n                             return debug_string + \", \" + message.DebugString();\n                           }) +\n           \"]\";\n  }\n\n  // Based on MessageUtil::hash() defined below.\n  template <class ProtoType>\n  static std::size_t hash(const Protobuf::RepeatedPtrField<ProtoType>& source) {\n    // Use Protobuf::io::CodedOutputStream to force deterministic serialization, so that the same\n    // message doesn't hash to different values.\n    std::string text;\n    {\n      // For memory safety, the StringOutputStream needs to be destroyed before\n      // we read the string.\n      Protobuf::io::StringOutputStream string_stream(&text);\n      Protobuf::io::CodedOutputStream coded_stream(&string_stream);\n      coded_stream.SetSerializationDeterministic(true);\n      for (const auto& message : source) {\n        message.SerializeToCodedStream(&coded_stream);\n      }\n    }\n    return HashUtil::xxHash64(text);\n  }\n\n  /**\n   * Converts a proto repeated field into a container of const Protobuf::Message unique_ptr's.\n   *\n   * @param repeated_field the proto repeated field to convert.\n   * @return ReturnType the container of const Message pointers.\n   */\n  template <typename ProtoType, typename ReturnType>\n  static ReturnType\n  convertToConstMessagePtrContainer(const Protobuf::RepeatedPtrField<ProtoType>& repeated_field) {\n    ReturnType ret_container;\n    std::transform(repeated_field.begin(), repeated_field.end(), std::back_inserter(ret_container),\n                   [](const ProtoType& proto_message) -> std::unique_ptr<const Protobuf::Message> {\n                     Protobuf::Message* clone = proto_message.New();\n                     clone->MergeFrom(proto_message);\n                     return std::unique_ptr<const Protobuf::Message>(clone);\n                   });\n    return ret_container;\n  }\n};\n\nclass ProtoValidationException : public EnvoyException {\npublic:\n  ProtoValidationException(const std::string& validation_error, const Protobuf::Message& message);\n};\n\n/**\n * utility functions to call when throwing exceptions in header files\n */\nclass ProtoExceptionUtil {\npublic:\n  static void throwMissingFieldException(const std::string& field_name,\n                                         const Protobuf::Message& message);\n  static void throwProtoValidationException(const std::string& validation_error,\n                                            const Protobuf::Message& message);\n};\n\nclass MessageUtil {\npublic:\n  // std::hash\n  std::size_t operator()(const Protobuf::Message& message) const { return hash(message); }\n\n  // std::equals_to\n  bool operator()(const Protobuf::Message& lhs, const Protobuf::Message& rhs) const {\n    return Protobuf::util::MessageDifferencer::Equivalent(lhs, rhs);\n  }\n\n  class FileExtensionValues {\n  public:\n    const std::string ProtoBinary = \".pb\";\n    const std::string ProtoBinaryLengthDelimited = \".pb_length_delimited\";\n    const std::string ProtoText = \".pb_text\";\n    const std::string Json = \".json\";\n    const std::string Yaml = \".yaml\";\n  };\n\n  using FileExtensions = ConstSingleton<FileExtensionValues>;\n\n  /**\n   * A hash function uses Protobuf::TextFormat to force deterministic serialization recursively\n   * including known types in google.protobuf.Any. See\n   * https://github.com/protocolbuffers/protobuf/issues/5731 for the context.\n   * Using this function is discouraged, see discussion in\n   * https://github.com/envoyproxy/envoy/issues/8301.\n   */\n  static std::size_t hash(const Protobuf::Message& message);\n\n  static void loadFromJson(const std::string& json, Protobuf::Message& message,\n                           ProtobufMessage::ValidationVisitor& validation_visitor,\n                           bool do_boosting = true);\n  static void loadFromJson(const std::string& json, ProtobufWkt::Struct& message);\n  static void loadFromYaml(const std::string& yaml, Protobuf::Message& message,\n                           ProtobufMessage::ValidationVisitor& validation_visitor,\n                           bool do_boosting = true);\n  static void loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& message);\n  static void loadFromFile(const std::string& path, Protobuf::Message& message,\n                           ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api,\n                           bool do_boosting = true);\n\n  /**\n   * Checks for use of deprecated fields in message and all sub-messages.\n   * @param message message to validate.\n   * @param loader optional a pointer to the runtime loader for live deprecation status.\n   * @throw ProtoValidationException if deprecated fields are used and listed\n   *    in disallowed_features in runtime_features.h\n   */\n  static void\n  checkForUnexpectedFields(const Protobuf::Message& message,\n                           ProtobufMessage::ValidationVisitor& validation_visitor,\n                           Runtime::Loader* loader = Runtime::LoaderSingleton::getExisting());\n\n  /**\n   * Validate protoc-gen-validate constraints on a given protobuf.\n   * Note the corresponding `.pb.validate.h` for the message has to be included in the source file\n   * of caller.\n   * @param message message to validate.\n   * @throw ProtoValidationException if the message does not satisfy its type constraints.\n   */\n  template <class MessageType>\n  static void validate(const MessageType& message,\n                       ProtobufMessage::ValidationVisitor& validation_visitor) {\n    // Log warnings or throw errors if deprecated fields or unknown fields are in use.\n    if (!validation_visitor.skipValidation()) {\n      checkForUnexpectedFields(message, validation_visitor);\n    }\n\n    std::string err;\n    if (!Validate(message, &err)) {\n      ProtoExceptionUtil::throwProtoValidationException(err, API_RECOVER_ORIGINAL(message));\n    }\n  }\n\n  template <class MessageType>\n  static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message,\n                                      ProtobufMessage::ValidationVisitor& validation_visitor,\n                                      bool avoid_boosting = false) {\n    loadFromYaml(yaml, message, validation_visitor, !avoid_boosting);\n    validate(message, validation_visitor);\n  }\n\n  /**\n   * Downcast and validate protoc-gen-validate constraints on a given protobuf.\n   * Note the corresponding `.pb.validate.h` for the message has to be included in the source file\n   * of caller.\n   * @param message const Protobuf::Message& to downcast and validate.\n   * @return const MessageType& the concrete message type downcasted to on success.\n   * @throw ProtoValidationException if the message does not satisfy its type constraints.\n   */\n  template <class MessageType>\n  static const MessageType&\n  downcastAndValidate(const Protobuf::Message& config,\n                      ProtobufMessage::ValidationVisitor& validation_visitor) {\n    const auto& typed_config = dynamic_cast<MessageType>(config);\n    validate(typed_config, validation_visitor);\n    return typed_config;\n  }\n\n  /**\n   * Convert from google.protobuf.Any to a typed message. This should be used\n   * instead of the inbuilt UnpackTo as it performs validation of results.\n   *\n   * @param any_message source google.protobuf.Any message.\n   * @param message destination to unpack to.\n   *\n   * @throw EnvoyException if the message does not unpack.\n   */\n  static void unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Message& message);\n\n  /**\n   * Convert from google.protobuf.Any to a typed message.\n   * @param message source google.protobuf.Any message.\n   *\n   * @return MessageType the typed message inside the Any.\n   */\n  template <class MessageType>\n  static inline void anyConvert(const ProtobufWkt::Any& message, MessageType& typed_message) {\n    unpackTo(message, typed_message);\n  };\n\n  template <class MessageType>\n  static inline MessageType anyConvert(const ProtobufWkt::Any& message) {\n    MessageType typed_message;\n    anyConvert(message, typed_message);\n    return typed_message;\n  };\n\n  /**\n   * Convert and validate from google.protobuf.Any to a typed message.\n   * @param message source google.protobuf.Any message.\n   *\n   * @return MessageType the typed message inside the Any.\n   * @throw ProtoValidationException if the message does not satisfy its type constraints.\n   */\n  template <class MessageType>\n  static inline void anyConvertAndValidate(const ProtobufWkt::Any& message,\n                                           MessageType& typed_message,\n                                           ProtobufMessage::ValidationVisitor& validation_visitor) {\n    anyConvert<MessageType>(message, typed_message);\n    validate(typed_message, validation_visitor);\n  };\n\n  template <class MessageType>\n  static inline MessageType\n  anyConvertAndValidate(const ProtobufWkt::Any& message,\n                        ProtobufMessage::ValidationVisitor& validation_visitor) {\n    MessageType typed_message;\n    anyConvertAndValidate<MessageType>(message, typed_message, validation_visitor);\n    return typed_message;\n  };\n\n  /**\n   * Invoke when a version upgrade (e.g. v2 -> v3) is detected. This may warn or throw\n   * depending on where we are in the major version deprecation cycle.\n   * @param desc description of upgrade to include in warning or exception.\n   */\n  static void onVersionUpgradeWarn(absl::string_view desc);\n\n  /**\n   * Obtain a string field from a protobuf message dynamically.\n   *\n   * @param message message to extract from.\n   * @param field_name field name.\n   *\n   * @return std::string with field value.\n   */\n  static inline std::string getStringField(const Protobuf::Message& message,\n                                           const std::string& field_name) {\n    const Protobuf::Descriptor* descriptor = message.GetDescriptor();\n    const Protobuf::FieldDescriptor* name_field = descriptor->FindFieldByName(field_name);\n    const Protobuf::Reflection* reflection = message.GetReflection();\n    return reflection->GetString(message, name_field);\n  }\n\n  /**\n   * Convert between two protobufs via a JSON round-trip. This is used to translate arbitrary\n   * messages to/from google.protobuf.Struct.\n   * TODO(htuch): Avoid round-tripping via JSON strings by doing whatever\n   * Protobuf::util::MessageToJsonString does but generating a google.protobuf.Struct instead.\n   * @param source message.\n   * @param dest message.\n   */\n  static void jsonConvert(const Protobuf::Message& source, ProtobufWkt::Struct& dest);\n  static void jsonConvert(const ProtobufWkt::Struct& source,\n                          ProtobufMessage::ValidationVisitor& validation_visitor,\n                          Protobuf::Message& dest);\n  static void jsonConvertValue(const Protobuf::Message& source, ProtobufWkt::Value& dest);\n\n  /**\n   * Extract YAML as string from a google.protobuf.Message.\n   * @param message message of type type.googleapis.com/google.protobuf.Message.\n   * @param block_print whether the returned JSON should be in block style rather than flow style.\n   * @param always_print_primitive_fields whether to include primitive fields set to their default\n   * values, e.g. an int32 set to 0 or a bool set to false.\n   * @return std::string of formatted YAML object.\n   */\n  static std::string getYamlStringFromMessage(const Protobuf::Message& message,\n                                              const bool block_print = true,\n                                              const bool always_print_primitive_fields = false);\n\n  /**\n   * Extract JSON as string from a google.protobuf.Message.\n   * @param message message of type type.googleapis.com/google.protobuf.Message.\n   * @param pretty_print whether the returned JSON should be formatted.\n   * @param always_print_primitive_fields whether to include primitive fields set to their default\n   * values, e.g. an int32 set to 0 or a bool set to false.\n   * @return std::string of formatted JSON object.\n   */\n  static std::string getJsonStringFromMessage(const Protobuf::Message& message,\n                                              bool pretty_print = false,\n                                              bool always_print_primitive_fields = false);\n\n  /**\n   * Utility method to create a Struct containing the passed in key/value strings.\n   *\n   * @param key the key to use to set the value\n   * @param value the string value to associate with the key\n   */\n  static ProtobufWkt::Struct keyValueStruct(const std::string& key, const std::string& value);\n\n  /**\n   * Utility method to create a Struct containing the passed in key/value map.\n   *\n   * @param fields the key/value pairs to initialize the Struct proto\n   */\n  static ProtobufWkt::Struct keyValueStruct(const std::map<std::string, std::string>& fields);\n\n  /**\n   * Utility method to print a human readable string of the code passed in.\n   *\n   * @param code the protobuf error code\n   */\n  static std::string CodeEnumToString(ProtobufUtil::error::Code code);\n\n  /**\n   * Modifies a message such that all sensitive data (that is, fields annotated as\n   * `udpa.annotations.sensitive`) is redacted for display. String-typed fields annotated as\n   * `sensitive` will be replaced with the string \"[redacted]\", bytes-typed fields will be replaced\n   * with the bytes `5B72656461637465645D` (the ASCII / UTF-8 encoding of the string \"[redacted]\"),\n   * primitive-typed fields (including enums) will be cleared, and message-typed fields will be\n   * traversed recursively to redact their contents.\n   *\n   * LIMITATION: This works properly for strongly-typed messages, as well as for messages packed in\n   * a `ProtobufWkt::Any` with a `type_url` corresponding to a proto that was compiled into the\n   * Envoy binary. However it does not work for messages encoded as `ProtobufWkt::Struct`, since\n   * structs are missing the \"sensitive\" annotations that this function expects. Similarly, it fails\n   * for messages encoded as `ProtobufWkt::Any` with a `type_url` that isn't registered with the\n   * binary. If you're working with struct-typed messages, including those that might be hiding\n   * within strongly-typed messages, please reify them to strongly-typed messages using\n   * `MessageUtil::jsonConvert()` before calling `MessageUtil::redact()`.\n   *\n   * @param message message to redact.\n   */\n  static void redact(Protobuf::Message& message);\n};\n\nclass ValueUtil {\npublic:\n  static std::size_t hash(const ProtobufWkt::Value& value) { return MessageUtil::hash(value); }\n\n  /**\n   * Load YAML string into ProtobufWkt::Value.\n   */\n  static ProtobufWkt::Value loadFromYaml(const std::string& yaml);\n\n  /**\n   * Compare two ProtobufWkt::Values for equality.\n   * @param v1 message of type type.googleapis.com/google.protobuf.Value\n   * @param v2 message of type type.googleapis.com/google.protobuf.Value\n   * @return true if v1 and v2 are identical\n   */\n  static bool equal(const ProtobufWkt::Value& v1, const ProtobufWkt::Value& v2);\n\n  /**\n   * @return wrapped ProtobufWkt::NULL_VALUE.\n   */\n  static const ProtobufWkt::Value& nullValue();\n\n  /**\n   * Wrap std::string into ProtobufWkt::Value string value.\n   * @param str string to be wrapped.\n   * @return wrapped string.\n   */\n  static ProtobufWkt::Value stringValue(const std::string& str);\n\n  /**\n   * Wrap optional std::string into ProtobufWkt::Value string value.\n   * If the argument contains a null optional, return ProtobufWkt::NULL_VALUE.\n   * @param str string to be wrapped.\n   * @return wrapped string.\n   */\n  static ProtobufWkt::Value optionalStringValue(const absl::optional<std::string>& str);\n\n  /**\n   * Wrap boolean into ProtobufWkt::Value boolean value.\n   * @param str boolean to be wrapped.\n   * @return wrapped boolean.\n   */\n  static ProtobufWkt::Value boolValue(bool b);\n\n  /**\n   * Wrap ProtobufWkt::Struct into ProtobufWkt::Value struct value.\n   * @param obj struct to be wrapped.\n   * @return wrapped struct.\n   */\n  static ProtobufWkt::Value structValue(const ProtobufWkt::Struct& obj);\n\n  /**\n   * Wrap number into ProtobufWkt::Value double value.\n   * @param num number to be wrapped.\n   * @return wrapped number.\n   */\n  template <typename T> static ProtobufWkt::Value numberValue(const T num) {\n    ProtobufWkt::Value val;\n    val.set_number_value(static_cast<double>(num));\n    return val;\n  }\n\n  /**\n   * Wrap a collection of ProtobufWkt::Values into ProtobufWkt::Value list value.\n   * @param values collection of ProtobufWkt::Values to be wrapped.\n   * @return wrapped list value.\n   */\n  static ProtobufWkt::Value listValue(const std::vector<ProtobufWkt::Value>& values);\n};\n\n/**\n * HashedValue is a wrapper around ProtobufWkt::Value that computes\n * and stores a hash code for the Value at construction.\n */\nclass HashedValue {\npublic:\n  HashedValue(const ProtobufWkt::Value& value) : value_(value), hash_(ValueUtil::hash(value)){};\n  HashedValue(const HashedValue& v) = default;\n\n  const ProtobufWkt::Value& value() const { return value_; }\n  std::size_t hash() const { return hash_; }\n\n  bool operator==(const HashedValue& rhs) const {\n    return hash_ == rhs.hash_ && ValueUtil::equal(value_, rhs.value_);\n  }\n\n  bool operator!=(const HashedValue& rhs) const { return !(*this == rhs); }\n\nprivate:\n  const ProtobufWkt::Value value_;\n  const std::size_t hash_;\n};\n\nclass DurationUtil {\npublic:\n  class OutOfRangeException : public EnvoyException {\n  public:\n    OutOfRangeException(const std::string& error) : EnvoyException(error) {}\n  };\n\n  /**\n   * Same as DurationUtil::durationToMilliseconds but with extra validation logic.\n   * Same as Protobuf::util::TimeUtil::DurationToSeconds but with extra validation logic.\n   * Specifically, we ensure that the duration is positive.\n   * @param duration protobuf.\n   * @return duration in milliseconds.\n   * @throw OutOfRangeException when duration is out-of-range.\n   */\n  static uint64_t durationToMilliseconds(const ProtobufWkt::Duration& duration);\n\n  /**\n   * Same as Protobuf::util::TimeUtil::DurationToSeconds but with extra validation logic.\n   * Specifically, we ensure that the duration is positive.\n   * @param duration protobuf.\n   * @return duration in seconds.\n   * @throw OutOfRangeException when duration is out-of-range.\n   */\n  static uint64_t durationToSeconds(const ProtobufWkt::Duration& duration);\n};\n\nclass TimestampUtil {\npublic:\n  /**\n   * Writes a time_point<system_clock> (SystemTime) to a protobuf Timestamp, by way of time_t.\n   * @param system_clock_time the time to write\n   * @param timestamp a pointer to the mutable protobuf member to be written into.\n   */\n  static void systemClockToTimestamp(const SystemTime system_clock_time,\n                                     ProtobufWkt::Timestamp& timestamp);\n};\n\n} // namespace Envoy\n\nnamespace std {\n// Inject an implementation of std::hash for Envoy::HashedValue into the std namespace.\ntemplate <> struct hash<Envoy::HashedValue> {\n  std::size_t operator()(Envoy::HashedValue const& v) const { return v.hash(); }\n};\n\n} // namespace std\n"
  },
  {
    "path": "source/common/protobuf/visitor.cc",
    "content": "#include \"common/protobuf/visitor.h\"\n\nnamespace Envoy {\nnamespace ProtobufMessage {\n\nvoid traverseMutableMessage(ProtoVisitor& visitor, Protobuf::Message& message, const void* ctxt) {\n  visitor.onMessage(message, ctxt);\n  const Protobuf::Descriptor* descriptor = message.GetDescriptor();\n  const Protobuf::Reflection* reflection = message.GetReflection();\n  for (int i = 0; i < descriptor->field_count(); ++i) {\n    const Protobuf::FieldDescriptor* field = descriptor->field(i);\n    const void* field_ctxt = visitor.onField(message, *field, ctxt);\n    // If this is a message, recurse to visit fields in the sub-message.\n    if (field->cpp_type() == Protobuf::FieldDescriptor::CPPTYPE_MESSAGE) {\n      if (field->is_repeated()) {\n        const int size = reflection->FieldSize(message, field);\n        for (int j = 0; j < size; ++j) {\n          traverseMutableMessage(visitor, *reflection->MutableRepeatedMessage(&message, field, j),\n                                 field_ctxt);\n        }\n      } else if (reflection->HasField(message, field)) {\n        traverseMutableMessage(visitor, *reflection->MutableMessage(&message, field), field_ctxt);\n      }\n    }\n  }\n}\nvoid traverseMessage(ConstProtoVisitor& visitor, const Protobuf::Message& message,\n                     const void* ctxt) {\n  visitor.onMessage(message, ctxt);\n  const Protobuf::Descriptor* descriptor = message.GetDescriptor();\n  const Protobuf::Reflection* reflection = message.GetReflection();\n  for (int i = 0; i < descriptor->field_count(); ++i) {\n    const Protobuf::FieldDescriptor* field = descriptor->field(i);\n    const void* field_ctxt = visitor.onField(message, *field, ctxt);\n    // If this is a message, recurse to scrub deprecated fields in the sub-message.\n    if (field->cpp_type() == Protobuf::FieldDescriptor::CPPTYPE_MESSAGE) {\n      if (field->is_repeated()) {\n        const int size = reflection->FieldSize(message, field);\n        for (int j = 0; j < size; ++j) {\n          traverseMessage(visitor, reflection->GetRepeatedMessage(message, field, j), field_ctxt);\n        }\n      } else if (reflection->HasField(message, field)) {\n        traverseMessage(visitor, reflection->GetMessage(message, field), field_ctxt);\n      }\n    }\n  }\n}\n\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/visitor.h",
    "content": "#pragma once\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace ProtobufMessage {\n\nclass ProtoVisitor {\npublic:\n  virtual ~ProtoVisitor() = default;\n\n  // Invoked when a field is visited, with the message, field descriptor and context. Returns a new\n  // context for use when traversing the sub-message in a field.\n  virtual const void* onField(Protobuf::Message&, const Protobuf::FieldDescriptor&,\n                              const void* ctxt) {\n    return ctxt;\n  }\n\n  // Invoked when a message is visited, with the message and a context.\n  virtual void onMessage(Protobuf::Message&, const void*){};\n};\n\nclass ConstProtoVisitor {\npublic:\n  virtual ~ConstProtoVisitor() = default;\n\n  // Invoked when a field is visited, with the message, field descriptor and context. Returns a new\n  // context for use when traversing the sub-message in a field.\n  virtual const void* onField(const Protobuf::Message&, const Protobuf::FieldDescriptor&,\n                              const void* ctxt) {\n    return ctxt;\n  }\n\n  // Invoked when a message is visited, with the message and a context.\n  virtual void onMessage(const Protobuf::Message&, const void*){};\n};\n\nvoid traverseMutableMessage(ProtoVisitor& visitor, Protobuf::Message& message, const void* ctxt);\nvoid traverseMessage(ConstProtoVisitor& visitor, const Protobuf::Message& message,\n                     const void* ctxt);\n\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/protobuf/well_known.h",
    "content": "#pragma once\n\n#include <cstdint>\n\nnamespace Envoy {\nnamespace ProtobufWellKnown {\n\n// Used by VersionConverter to track the original type of an upgraded message.\n// Magic number in this file derived from top 28bit of SHA256 digest of\n// \"original type\".\nconstexpr uint32_t OriginalTypeFieldNumber = 183412668;\n\n} // namespace ProtobufWellKnown\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"metadatamatchcriteria_lib\",\n    srcs = [\"metadatamatchcriteria_impl.cc\"],\n    hdrs = [\"metadatamatchcriteria_impl.h\"],\n    deps = [\n        \"//include/envoy/router:router_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tls_context_match_criteria_lib\",\n    srcs = [\"tls_context_match_criteria_impl.cc\"],\n    hdrs = [\"tls_context_match_criteria_impl.h\"],\n    deps = [\n        \"//include/envoy/router:router_interface\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config_lib\",\n    srcs = [\"config_impl.cc\"],\n    hdrs = [\"config_impl.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":config_utility_lib\",\n        \":header_formatter_lib\",\n        \":header_parser_lib\",\n        \":metadatamatchcriteria_lib\",\n        \":reset_header_parser_lib\",\n        \":retry_state_lib\",\n        \":router_ratelimit_lib\",\n        \":tls_context_match_criteria_lib\",\n        \"//include/envoy/config:typed_metadata_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:filter_config_interface\",  # TODO(rodaine): break dependency on server\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/http:hash_policy_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:path_utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config_utility_lib\",\n    srcs = [\"config_utility.cc\"],\n    hdrs = [\"config_utility.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/upstream:resource_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"debug_config_lib\",\n    srcs = [\"debug_config.cc\"],\n    hdrs = [\"debug_config.h\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"route_config_update_impl_lib\",\n    srcs = [\"route_config_update_receiver_impl.cc\"],\n    hdrs = [\"route_config_update_receiver_impl.h\"],\n    deps = [\n        \":config_lib\",\n        \"//include/envoy/router:rds_interface\",\n        \"//include/envoy/router:route_config_update_info_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"vhds_lib\",\n    srcs = [\"vhds.cc\"],\n    hdrs = [\"vhds.h\"],\n    deps = [\n        \":config_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/router:rds_interface\",\n        \"//include/envoy/router:route_config_provider_manager_interface\",\n        \"//include/envoy/router:route_config_update_info_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/router:route_config_update_impl_lib\",\n        \"@envoy_api//envoy/api/v2/route:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"rds_lib\",\n    srcs = [\"rds_impl.cc\"],\n    hdrs = [\"rds_impl.h\"],\n    deps = [\n        \":config_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/router:rds_interface\",\n        \"//include/envoy/router:route_config_provider_manager_interface\",\n        \"//include/envoy/router:route_config_update_info_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:callback_impl_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:subscription_factory_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/init:watcher_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/router:route_config_update_impl_lib\",\n        \"//source/common/router:vhds_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"scoped_config_lib\",\n    srcs = [\"scoped_config_impl.cc\"],\n    hdrs = [\"scoped_config_impl.h\"],\n    external_deps = [\n        \"abseil_str_format\",\n    ],\n    deps = [\n        \":config_lib\",\n        \"//include/envoy/router:rds_interface\",\n        \"//include/envoy/router:scopes_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"scoped_rds_lib\",\n    srcs = [\"scoped_rds.cc\"],\n    hdrs = [\"scoped_rds.h\"],\n    deps = [\n        \":rds_lib\",\n        \":scoped_config_lib\",\n        \"//include/envoy/config:config_provider_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/router:route_config_provider_manager_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:config_provider_lib\",\n        \"//source/common/config:resource_name_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/init:watcher_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"retry_state_lib\",\n    srcs = [\"retry_state_impl.cc\"],\n    hdrs = [\"retry_state_impl.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:backoff_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_lib\",\n    srcs = [\n        \"router.cc\",\n        \"upstream_request.cc\",\n    ],\n    hdrs = [\n        \"router.h\",\n        \"upstream_request.h\",\n    ],\n    deps = [\n        \":config_lib\",\n        \":debug_config_lib\",\n        \":header_parser_lib\",\n        \":retry_state_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/grpc:status\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/router:shadow_writer_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/common/buffer:watermark_buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:scope_tracker\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/network:application_protocol_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/common/stream_info:uint32_accessor_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_ratelimit_lib\",\n    srcs = [\"router_ratelimit.cc\"],\n    hdrs = [\"router_ratelimit.h\"],\n    deps = [\n        \":config_utility_lib\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/router:router_ratelimit_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"shadow_writer_lib\",\n    srcs = [\"shadow_writer_impl.cc\"],\n    hdrs = [\"shadow_writer_impl.h\"],\n    deps = [\n        \"//include/envoy/router:shadow_writer_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"header_parser_lib\",\n    srcs = [\"header_parser.cc\"],\n    hdrs = [\"header_parser.h\"],\n    deps = [\n        \":header_formatter_lib\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"header_formatter_lib\",\n    srcs = [\"header_formatter.cc\"],\n    hdrs = [\"header_formatter.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/router:string_accessor_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/json:json_loader_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"reset_header_parser_lib\",\n    srcs = [\"reset_header_parser.cc\"],\n    hdrs = [\"reset_header_parser.h\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"string_accessor_lib\",\n    hdrs = [\"string_accessor_impl.h\"],\n    deps = [\n        \"//include/envoy/router:string_accessor_interface\",\n    ],\n)\n"
  },
  {
    "path": "source/common/router/config_impl.cc",
    "content": "#include \"common/router/config_impl.h\"\n\n#include <algorithm>\n#include <chrono>\n#include <cstdint>\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/regex.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/path_utility.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/reset_header_parser.h\"\n#include \"common/router/retry_state_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/http/common/utility.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nconst std::string DEPRECATED_ROUTER_NAME = \"envoy.router\";\n\n} // namespace\n\nstd::string SslRedirector::newPath(const Http::RequestHeaderMap& headers) const {\n  return Http::Utility::createSslRedirectPath(headers);\n}\n\nHedgePolicyImpl::HedgePolicyImpl(const envoy::config::route::v3::HedgePolicy& hedge_policy)\n    : initial_requests_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(hedge_policy, initial_requests, 1)),\n      additional_request_chance_(hedge_policy.additional_request_chance()),\n      hedge_on_per_try_timeout_(hedge_policy.hedge_on_per_try_timeout()) {}\n\nHedgePolicyImpl::HedgePolicyImpl() : initial_requests_(1), hedge_on_per_try_timeout_(false) {}\n\nRetryPolicyImpl::RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& retry_policy,\n                                 ProtobufMessage::ValidationVisitor& validation_visitor)\n    : retriable_headers_(\n          Http::HeaderUtility::buildHeaderMatcherVector(retry_policy.retriable_headers())),\n      retriable_request_headers_(\n          Http::HeaderUtility::buildHeaderMatcherVector(retry_policy.retriable_request_headers())),\n      validation_visitor_(&validation_visitor) {\n  per_try_timeout_ =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(retry_policy, per_try_timeout, 0));\n  num_retries_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(retry_policy, num_retries, 1);\n  retry_on_ = RetryStateImpl::parseRetryOn(retry_policy.retry_on()).first;\n  retry_on_ |= RetryStateImpl::parseRetryGrpcOn(retry_policy.retry_on()).first;\n\n  for (const auto& host_predicate : retry_policy.retry_host_predicate()) {\n    auto& factory = Envoy::Config::Utility::getAndCheckFactory<Upstream::RetryHostPredicateFactory>(\n        host_predicate);\n    auto config = Envoy::Config::Utility::translateToFactoryConfig(host_predicate,\n                                                                   validation_visitor, factory);\n    retry_host_predicate_configs_.emplace_back(factory, std::move(config));\n  }\n\n  const auto& retry_priority = retry_policy.retry_priority();\n  if (!retry_priority.name().empty()) {\n    auto& factory =\n        Envoy::Config::Utility::getAndCheckFactory<Upstream::RetryPriorityFactory>(retry_priority);\n    retry_priority_config_ =\n        std::make_pair(&factory, Envoy::Config::Utility::translateToFactoryConfig(\n                                     retry_priority, validation_visitor, factory));\n  }\n\n  auto host_selection_attempts = retry_policy.host_selection_retry_max_attempts();\n  if (host_selection_attempts) {\n    host_selection_attempts_ = host_selection_attempts;\n  }\n\n  for (auto code : retry_policy.retriable_status_codes()) {\n    retriable_status_codes_.emplace_back(code);\n  }\n\n  if (retry_policy.has_retry_back_off()) {\n    base_interval_ = std::chrono::milliseconds(\n        PROTOBUF_GET_MS_REQUIRED(retry_policy.retry_back_off(), base_interval));\n    if ((*base_interval_).count() < 1) {\n      base_interval_ = std::chrono::milliseconds(1);\n    }\n\n    max_interval_ = PROTOBUF_GET_OPTIONAL_MS(retry_policy.retry_back_off(), max_interval);\n    if (max_interval_) {\n      // Apply the same rounding to max interval in case both are set to sub-millisecond values.\n      if ((*max_interval_).count() < 1) {\n        max_interval_ = std::chrono::milliseconds(1);\n      }\n\n      if ((*max_interval_).count() < (*base_interval_).count()) {\n        throw EnvoyException(\n            \"retry_policy.max_interval must greater than or equal to the base_interval\");\n      }\n    }\n  }\n\n  if (retry_policy.has_rate_limited_retry_back_off()) {\n    reset_headers_ = ResetHeaderParserImpl::buildResetHeaderParserVector(\n        retry_policy.rate_limited_retry_back_off().reset_headers());\n\n    absl::optional<std::chrono::milliseconds> reset_max_interval =\n        PROTOBUF_GET_OPTIONAL_MS(retry_policy.rate_limited_retry_back_off(), max_interval);\n    if (reset_max_interval.has_value()) {\n      std::chrono::milliseconds max_interval = reset_max_interval.value();\n      if (max_interval.count() < 1) {\n        max_interval = std::chrono::milliseconds(1);\n      }\n      reset_max_interval_ = max_interval;\n    }\n  }\n}\n\nstd::vector<Upstream::RetryHostPredicateSharedPtr> RetryPolicyImpl::retryHostPredicates() const {\n  std::vector<Upstream::RetryHostPredicateSharedPtr> predicates;\n\n  for (const auto& config : retry_host_predicate_configs_) {\n    predicates.emplace_back(config.first.createHostPredicate(*config.second, num_retries_));\n  }\n\n  return predicates;\n}\n\nUpstream::RetryPrioritySharedPtr RetryPolicyImpl::retryPriority() const {\n  if (retry_priority_config_.first == nullptr) {\n    return nullptr;\n  }\n\n  return retry_priority_config_.first->createRetryPriority(*retry_priority_config_.second,\n                                                           *validation_visitor_, num_retries_);\n}\n\nInternalRedirectPolicyImpl::InternalRedirectPolicyImpl(\n    const envoy::config::route::v3::InternalRedirectPolicy& policy_config,\n    ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name)\n    : current_route_name_(current_route_name),\n      redirect_response_codes_(buildRedirectResponseCodes(policy_config)),\n      max_internal_redirects_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(policy_config, max_internal_redirects, 1)),\n      enabled_(true), allow_cross_scheme_redirect_(policy_config.allow_cross_scheme_redirect()) {\n  for (const auto& predicate : policy_config.predicates()) {\n    auto& factory =\n        Envoy::Config::Utility::getAndCheckFactory<InternalRedirectPredicateFactory>(predicate);\n    auto config = factory.createEmptyConfigProto();\n    Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), {}, validator, *config);\n    predicate_factories_.emplace_back(&factory, std::move(config));\n  }\n}\n\nstd::vector<InternalRedirectPredicateSharedPtr> InternalRedirectPolicyImpl::predicates() const {\n  std::vector<InternalRedirectPredicateSharedPtr> predicates;\n  for (const auto& predicate_factory : predicate_factories_) {\n    predicates.emplace_back(predicate_factory.first->createInternalRedirectPredicate(\n        *predicate_factory.second, current_route_name_));\n  }\n  return predicates;\n}\n\nabsl::flat_hash_set<Http::Code> InternalRedirectPolicyImpl::buildRedirectResponseCodes(\n    const envoy::config::route::v3::InternalRedirectPolicy& policy_config) const {\n  if (policy_config.redirect_response_codes_size() == 0) {\n    return absl::flat_hash_set<Http::Code>{Http::Code::Found};\n  }\n  absl::flat_hash_set<Http::Code> ret;\n  std::for_each(policy_config.redirect_response_codes().begin(),\n                policy_config.redirect_response_codes().end(), [&ret](uint32_t response_code) {\n                  const absl::flat_hash_set<uint32_t> valid_redirect_response_code = {301, 302, 303,\n                                                                                      307, 308};\n                  if (valid_redirect_response_code.contains(response_code)) {\n                    ret.insert(static_cast<Http::Code>(response_code));\n                  }\n                });\n  return ret;\n}\n\nCorsPolicyImpl::CorsPolicyImpl(const envoy::config::route::v3::CorsPolicy& config,\n                               Runtime::Loader& loader)\n    : config_(config), loader_(loader), allow_methods_(config.allow_methods()),\n      allow_headers_(config.allow_headers()), expose_headers_(config.expose_headers()),\n      max_age_(config.max_age()),\n      legacy_enabled_(config.has_hidden_envoy_deprecated_enabled()\n                          ? config.hidden_envoy_deprecated_enabled().value()\n                          : true) {\n  for (const auto& origin : config.hidden_envoy_deprecated_allow_origin()) {\n    envoy::type::matcher::v3::StringMatcher matcher_config;\n    matcher_config.set_exact(origin);\n    allow_origins_.push_back(std::make_unique<Matchers::StringMatcherImpl>(matcher_config));\n  }\n  for (const auto& regex : config.hidden_envoy_deprecated_allow_origin_regex()) {\n    envoy::type::matcher::v3::StringMatcher matcher_config;\n    matcher_config.set_hidden_envoy_deprecated_regex(regex);\n    allow_origins_.push_back(std::make_unique<Matchers::StringMatcherImpl>(matcher_config));\n  }\n  for (const auto& string_match : config.allow_origin_string_match()) {\n    allow_origins_.push_back(std::make_unique<Matchers::StringMatcherImpl>(string_match));\n  }\n  if (config.has_allow_credentials()) {\n    allow_credentials_ = PROTOBUF_GET_WRAPPED_REQUIRED(config, allow_credentials);\n  }\n}\n\nShadowPolicyImpl::ShadowPolicyImpl(const RequestMirrorPolicy& config) {\n\n  cluster_ = config.cluster();\n\n  if (config.has_runtime_fraction()) {\n    runtime_key_ = config.runtime_fraction().runtime_key();\n    default_value_ = config.runtime_fraction().default_value();\n  } else {\n    runtime_key_ = config.hidden_envoy_deprecated_runtime_key();\n    default_value_.set_numerator(0);\n  }\n  trace_sampled_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, trace_sampled, true);\n}\n\nDecoratorImpl::DecoratorImpl(const envoy::config::route::v3::Decorator& decorator)\n    : operation_(decorator.operation()),\n      propagate_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(decorator, propagate, true)) {}\n\nvoid DecoratorImpl::apply(Tracing::Span& span) const {\n  if (!operation_.empty()) {\n    span.setOperation(operation_);\n  }\n}\n\nconst std::string& DecoratorImpl::getOperation() const { return operation_; }\n\nbool DecoratorImpl::propagate() const { return propagate_; }\n\nRouteTracingImpl::RouteTracingImpl(const envoy::config::route::v3::Tracing& tracing) {\n  if (!tracing.has_client_sampling()) {\n    client_sampling_.set_numerator(100);\n    client_sampling_.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  } else {\n    client_sampling_ = tracing.client_sampling();\n  }\n  if (!tracing.has_random_sampling()) {\n    random_sampling_.set_numerator(100);\n    random_sampling_.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  } else {\n    random_sampling_ = tracing.random_sampling();\n  }\n  if (!tracing.has_overall_sampling()) {\n    overall_sampling_.set_numerator(100);\n    overall_sampling_.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  } else {\n    overall_sampling_ = tracing.overall_sampling();\n  }\n  for (const auto& tag : tracing.custom_tags()) {\n    custom_tags_.emplace(tag.tag(), Tracing::HttpTracerUtility::createCustomTag(tag));\n  }\n}\n\nconst envoy::type::v3::FractionalPercent& RouteTracingImpl::getClientSampling() const {\n  return client_sampling_;\n}\n\nconst envoy::type::v3::FractionalPercent& RouteTracingImpl::getRandomSampling() const {\n  return random_sampling_;\n}\n\nconst envoy::type::v3::FractionalPercent& RouteTracingImpl::getOverallSampling() const {\n  return overall_sampling_;\n}\nconst Tracing::CustomTagMap& RouteTracingImpl::getCustomTags() const { return custom_tags_; }\n\nRouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost,\n                                       const envoy::config::route::v3::Route& route,\n                                       Server::Configuration::ServerFactoryContext& factory_context,\n                                       ProtobufMessage::ValidationVisitor& validator)\n    : case_sensitive_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.match(), case_sensitive, true)),\n      prefix_rewrite_(route.route().prefix_rewrite()),\n      host_rewrite_(route.route().host_rewrite_literal()), vhost_(vhost),\n      auto_host_rewrite_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route(), auto_host_rewrite, false)),\n      auto_host_rewrite_header_(!route.route().host_rewrite_header().empty()\n                                    ? absl::optional<Http::LowerCaseString>(Http::LowerCaseString(\n                                          route.route().host_rewrite_header()))\n                                    : absl::nullopt),\n      host_rewrite_path_regex_(\n          route.route().has_host_rewrite_path_regex()\n              ? Regex::Utility::parseRegex(route.route().host_rewrite_path_regex().pattern())\n              : nullptr),\n      host_rewrite_path_regex_substitution_(\n          route.route().has_host_rewrite_path_regex()\n              ? route.route().host_rewrite_path_regex().substitution()\n              : \"\"),\n      cluster_name_(route.route().cluster()), cluster_header_name_(route.route().cluster_header()),\n      cluster_not_found_response_code_(ConfigUtility::parseClusterNotFoundResponseCode(\n          route.route().cluster_not_found_response_code())),\n      timeout_(PROTOBUF_GET_MS_OR_DEFAULT(route.route(), timeout, DEFAULT_ROUTE_TIMEOUT_MS)),\n      idle_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), idle_timeout)),\n      max_stream_duration_(\n          PROTOBUF_GET_OPTIONAL_MS(route.route().max_stream_duration(), max_stream_duration)),\n      grpc_timeout_header_max_(\n          PROTOBUF_GET_OPTIONAL_MS(route.route().max_stream_duration(), grpc_timeout_header_max)),\n      grpc_timeout_header_offset_(PROTOBUF_GET_OPTIONAL_MS(route.route().max_stream_duration(),\n                                                           grpc_timeout_header_offset)),\n      max_grpc_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), max_grpc_timeout)),\n      grpc_timeout_offset_(PROTOBUF_GET_OPTIONAL_MS(route.route(), grpc_timeout_offset)),\n      loader_(factory_context.runtime()), runtime_(loadRuntimeData(route.match())),\n      scheme_redirect_(route.redirect().scheme_redirect()),\n      host_redirect_(route.redirect().host_redirect()),\n      port_redirect_(route.redirect().port_redirect()\n                         ? \":\" + std::to_string(route.redirect().port_redirect())\n                         : \"\"),\n      path_redirect_(route.redirect().path_redirect()),\n      path_redirect_has_query_(path_redirect_.find('?') != absl::string_view::npos),\n      enable_preserve_query_in_path_redirects_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.preserve_query_string_in_path_redirects\")),\n      https_redirect_(route.redirect().https_redirect()),\n      prefix_rewrite_redirect_(route.redirect().prefix_rewrite()),\n      strip_query_(route.redirect().strip_query()),\n      hedge_policy_(buildHedgePolicy(vhost.hedgePolicy(), route.route())),\n      retry_policy_(buildRetryPolicy(vhost.retryPolicy(), route.route(), validator)),\n      internal_redirect_policy_(\n          buildInternalRedirectPolicy(route.route(), validator, route.name())),\n      rate_limit_policy_(route.route().rate_limits()),\n      priority_(ConfigUtility::parsePriority(route.route().priority())),\n      config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())),\n      total_cluster_weight_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route().weighted_clusters(), total_weight, 100UL)),\n      request_headers_parser_(HeaderParser::configure(route.request_headers_to_add(),\n                                                      route.request_headers_to_remove())),\n      response_headers_parser_(HeaderParser::configure(route.response_headers_to_add(),\n                                                       route.response_headers_to_remove())),\n      retry_shadow_buffer_limit_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          route, per_request_buffer_limit_bytes, vhost.retryShadowBufferLimit())),\n      metadata_(route.metadata()), typed_metadata_(route.metadata()),\n      match_grpc_(route.match().has_grpc()), opaque_config_(parseOpaqueConfig(route)),\n      decorator_(parseDecorator(route)), route_tracing_(parseRouteTracing(route)),\n      direct_response_code_(ConfigUtility::parseDirectResponseCode(route)),\n      direct_response_body_(ConfigUtility::parseDirectResponseBody(route, factory_context.api())),\n      per_filter_configs_(route.typed_per_filter_config(),\n                          route.hidden_envoy_deprecated_per_filter_config(), factory_context,\n                          validator),\n      route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()) {\n  if (route.route().has_metadata_match()) {\n    const auto filter_it = route.route().metadata_match().filter_metadata().find(\n        Envoy::Config::MetadataFilters::get().ENVOY_LB);\n    if (filter_it != route.route().metadata_match().filter_metadata().end()) {\n      metadata_match_criteria_ = std::make_unique<MetadataMatchCriteriaImpl>(filter_it->second);\n    }\n  }\n\n  if (!route.route().request_mirror_policies().empty()) {\n    if (route.route().has_hidden_envoy_deprecated_request_mirror_policy()) {\n      // protobuf does not allow `oneof` to contain a field labeled `repeated`, so we do our own\n      // xor-like check.\n      // https://github.com/protocolbuffers/protobuf/issues/2592\n      // The alternative solution suggested (wrapping the oneof in a repeated message) would still\n      // break wire compatibility.\n      // (see https://github.com/envoyproxy/envoy/issues/439#issuecomment-383622723)\n      throw EnvoyException(\"Cannot specify both request_mirror_policy and request_mirror_policies\");\n    }\n    for (const auto& mirror_policy_config : route.route().request_mirror_policies()) {\n      shadow_policies_.push_back(std::make_unique<ShadowPolicyImpl>(mirror_policy_config));\n    }\n  } else if (route.route().has_hidden_envoy_deprecated_request_mirror_policy()) {\n    shadow_policies_.push_back(std::make_unique<ShadowPolicyImpl>(\n        route.route().hidden_envoy_deprecated_request_mirror_policy()));\n  }\n\n  // If this is a weighted_cluster, we create N internal route entries\n  // (called WeightedClusterEntry), such that each object is a simple\n  // single cluster, pointing back to the parent. Metadata criteria\n  // from the weighted cluster (if any) are merged with and override\n  // the criteria from the route.\n  if (route.route().cluster_specifier_case() ==\n      envoy::config::route::v3::RouteAction::ClusterSpecifierCase::kWeightedClusters) {\n    ASSERT(total_cluster_weight_ > 0);\n\n    uint64_t total_weight = 0UL;\n    const std::string& runtime_key_prefix = route.route().weighted_clusters().runtime_key_prefix();\n\n    for (const auto& cluster : route.route().weighted_clusters().clusters()) {\n      auto cluster_entry = std::make_unique<WeightedClusterEntry>(\n          this, runtime_key_prefix + \".\" + cluster.name(), factory_context, validator, cluster);\n      weighted_clusters_.emplace_back(std::move(cluster_entry));\n      total_weight += weighted_clusters_.back()->clusterWeight();\n    }\n\n    if (total_weight != total_cluster_weight_) {\n      throw EnvoyException(fmt::format(\"Sum of weights in the weighted_cluster should add up to {}\",\n                                       total_cluster_weight_));\n    }\n  }\n\n  for (const auto& query_parameter : route.match().query_parameters()) {\n    config_query_parameters_.push_back(\n        std::make_unique<ConfigUtility::QueryParameterMatcher>(query_parameter));\n  }\n\n  if (!route.route().hash_policy().empty()) {\n    hash_policy_ = std::make_unique<Http::HashPolicyImpl>(route.route().hash_policy());\n  }\n\n  if (route.match().has_tls_context()) {\n    tls_context_match_criteria_ =\n        std::make_unique<TlsContextMatchCriteriaImpl>(route.match().tls_context());\n  }\n\n  // Returns true if include_vh_rate_limits is explicitly set to true otherwise it defaults to false\n  // which is similar to VhRateLimitOptions::Override and will only use virtual host rate limits if\n  // the route is empty\n  include_vh_rate_limits_ =\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route(), include_vh_rate_limits, false);\n\n  if (route.route().has_cors()) {\n    cors_policy_ =\n        std::make_unique<CorsPolicyImpl>(route.route().cors(), factory_context.runtime());\n  }\n  for (const auto& upgrade_config : route.route().upgrade_configs()) {\n    const bool enabled = upgrade_config.has_enabled() ? upgrade_config.enabled().value() : true;\n    const bool success =\n        upgrade_map_\n            .emplace(std::make_pair(\n                Envoy::Http::LowerCaseString(upgrade_config.upgrade_type()).get(), enabled))\n            .second;\n    if (!success) {\n      throw EnvoyException(absl::StrCat(\"Duplicate upgrade \", upgrade_config.upgrade_type()));\n    }\n    if (upgrade_config.upgrade_type() == Http::Headers::get().MethodValues.Connect) {\n      connect_config_ = upgrade_config.connect_config();\n    } else if (upgrade_config.has_connect_config()) {\n      throw EnvoyException(absl::StrCat(\"Non-CONNECT upgrade type \", upgrade_config.upgrade_type(),\n                                        \" has ConnectConfig\"));\n    }\n  }\n\n  if (route.route().has_regex_rewrite()) {\n    if (!prefix_rewrite_.empty()) {\n      throw EnvoyException(\"Cannot specify both prefix_rewrite and regex_rewrite\");\n    }\n    auto rewrite_spec = route.route().regex_rewrite();\n    regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern());\n    regex_rewrite_substitution_ = rewrite_spec.substitution();\n  }\n\n  if (enable_preserve_query_in_path_redirects_ && path_redirect_has_query_ && strip_query_) {\n    ENVOY_LOG(warn,\n              \"`strip_query` is set to true, but `path_redirect` contains query string and it will \"\n              \"not be stripped: {}\",\n              path_redirect_);\n  }\n}\n\nbool RouteEntryImplBase::evaluateRuntimeMatch(const uint64_t random_value) const {\n  return !runtime_ ? true\n                   : loader_.snapshot().featureEnabled(runtime_->fractional_runtime_key_,\n                                                       runtime_->fractional_runtime_default_,\n                                                       random_value);\n}\n\nbool RouteEntryImplBase::evaluateTlsContextMatch(const StreamInfo::StreamInfo& stream_info) const {\n  bool matches = true;\n\n  if (!tlsContextMatchCriteria()) {\n    return matches;\n  }\n\n  const TlsContextMatchCriteria& criteria = *tlsContextMatchCriteria();\n\n  if (criteria.presented().has_value()) {\n    const bool peer_presented = stream_info.downstreamSslConnection() &&\n                                stream_info.downstreamSslConnection()->peerCertificatePresented();\n    matches &= criteria.presented().value() == peer_presented;\n  }\n\n  if (criteria.validated().has_value()) {\n    const bool peer_validated = stream_info.downstreamSslConnection() &&\n                                stream_info.downstreamSslConnection()->peerCertificateValidated();\n    matches &= criteria.validated().value() == peer_validated;\n  }\n\n  return matches;\n}\n\nbool RouteEntryImplBase::matchRoute(const Http::RequestHeaderMap& headers,\n                                    const StreamInfo::StreamInfo& stream_info,\n                                    uint64_t random_value) const {\n  bool matches = true;\n\n  matches &= evaluateRuntimeMatch(random_value);\n  if (!matches) {\n    // No need to waste further cycles calculating a route match.\n    return false;\n  }\n\n  if (match_grpc_) {\n    matches &= Grpc::Common::isGrpcRequestHeaders(headers);\n  }\n\n  matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_);\n  if (!config_query_parameters_.empty()) {\n    Http::Utility::QueryParams query_parameters =\n        Http::Utility::parseQueryString(headers.getPathValue());\n    matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_);\n  }\n\n  matches &= evaluateTlsContextMatch(stream_info);\n\n  return matches;\n}\n\nconst std::string& RouteEntryImplBase::clusterName() const { return cluster_name_; }\n\nvoid RouteEntryImplBase::finalizeRequestHeaders(Http::RequestHeaderMap& headers,\n                                                const StreamInfo::StreamInfo& stream_info,\n                                                bool insert_envoy_original_path) const {\n  if (!vhost_.globalRouteConfig().mostSpecificHeaderMutationsWins()) {\n    // Append user-specified request headers from most to least specific: route-level headers,\n    // virtual host level headers and finally global connection manager level headers.\n    request_headers_parser_->evaluateHeaders(headers, stream_info);\n    vhost_.requestHeaderParser().evaluateHeaders(headers, stream_info);\n    vhost_.globalRouteConfig().requestHeaderParser().evaluateHeaders(headers, stream_info);\n  } else {\n    // Most specific mutations take precedence.\n    vhost_.globalRouteConfig().requestHeaderParser().evaluateHeaders(headers, stream_info);\n    vhost_.requestHeaderParser().evaluateHeaders(headers, stream_info);\n    request_headers_parser_->evaluateHeaders(headers, stream_info);\n  }\n\n  if (!host_rewrite_.empty()) {\n    headers.setHost(host_rewrite_);\n  } else if (auto_host_rewrite_header_) {\n    const Http::HeaderEntry* header = headers.get(*auto_host_rewrite_header_);\n    if (header != nullptr) {\n      absl::string_view header_value = header->value().getStringView();\n      if (!header_value.empty()) {\n        headers.setHost(header_value);\n      }\n    }\n  } else if (host_rewrite_path_regex_ != nullptr) {\n    const std::string path(headers.getPathValue());\n    absl::string_view just_path(Http::PathUtil::removeQueryAndFragment(path));\n    headers.setHost(\n        host_rewrite_path_regex_->replaceAll(just_path, host_rewrite_path_regex_substitution_));\n  }\n\n  // Handle path rewrite\n  if (!getPathRewrite().empty() || regex_rewrite_ != nullptr) {\n    rewritePathHeader(headers, insert_envoy_original_path);\n  }\n}\n\nvoid RouteEntryImplBase::finalizeResponseHeaders(Http::ResponseHeaderMap& headers,\n                                                 const StreamInfo::StreamInfo& stream_info) const {\n  if (!vhost_.globalRouteConfig().mostSpecificHeaderMutationsWins()) {\n    // Append user-specified request headers from most to least specific: route-level headers,\n    // virtual host level headers and finally global connection manager level headers.\n    response_headers_parser_->evaluateHeaders(headers, stream_info);\n    vhost_.responseHeaderParser().evaluateHeaders(headers, stream_info);\n    vhost_.globalRouteConfig().responseHeaderParser().evaluateHeaders(headers, stream_info);\n  } else {\n    // Most specific mutations take precedence.\n    vhost_.globalRouteConfig().responseHeaderParser().evaluateHeaders(headers, stream_info);\n    vhost_.responseHeaderParser().evaluateHeaders(headers, stream_info);\n    response_headers_parser_->evaluateHeaders(headers, stream_info);\n  }\n}\n\nabsl::optional<RouteEntryImplBase::RuntimeData>\nRouteEntryImplBase::loadRuntimeData(const envoy::config::route::v3::RouteMatch& route_match) {\n  absl::optional<RuntimeData> runtime;\n  RuntimeData runtime_data;\n\n  if (route_match.has_runtime_fraction()) {\n    runtime_data.fractional_runtime_default_ = route_match.runtime_fraction().default_value();\n    runtime_data.fractional_runtime_key_ = route_match.runtime_fraction().runtime_key();\n    return runtime_data;\n  }\n\n  return runtime;\n}\n\n// finalizePathHeaders does the \"standard\" path rewriting, meaning that it\n// handles the \"prefix_rewrite\" and \"regex_rewrite\" route actions, only one of\n// which can be specified. The \"matched_path\" argument applies only to the\n// prefix rewriting, and describes the portion of the path (excluding query\n// parameters) that should be replaced by the rewrite. A \"regex_rewrite\"\n// applies to the entire path (excluding query parameters), regardless of what\n// portion was matched.\nvoid RouteEntryImplBase::finalizePathHeader(Http::RequestHeaderMap& headers,\n                                            absl::string_view matched_path,\n                                            bool insert_envoy_original_path) const {\n  const auto& rewrite = getPathRewrite();\n  if (rewrite.empty() && regex_rewrite_ == nullptr) {\n    // There are no rewrites configured. Just return.\n    return;\n  }\n\n  // TODO(perf): can we avoid the string copy for the common case?\n  std::string path(headers.getPathValue());\n  if (insert_envoy_original_path) {\n    headers.setEnvoyOriginalPath(path);\n  }\n\n  if (!rewrite.empty()) {\n    ASSERT(case_sensitive_ ? absl::StartsWith(path, matched_path)\n                           : absl::StartsWithIgnoreCase(path, matched_path));\n    headers.setPath(path.replace(0, matched_path.size(), rewrite));\n    return;\n  }\n\n  if (regex_rewrite_ != nullptr) {\n    // Replace the entire path, but preserve the query parameters\n    auto just_path(Http::PathUtil::removeQueryAndFragment(path));\n    headers.setPath(path.replace(\n        0, just_path.size(), regex_rewrite_->replaceAll(just_path, regex_rewrite_substitution_)));\n    return;\n  }\n}\n\nabsl::string_view RouteEntryImplBase::processRequestHost(const Http::RequestHeaderMap& headers,\n                                                         absl::string_view new_scheme,\n                                                         absl::string_view new_port) const {\n\n  absl::string_view request_host = headers.getHostValue();\n  size_t host_end;\n  if (request_host.empty()) {\n    return request_host;\n  }\n  // Detect if IPv6 URI\n  if (request_host[0] == '[') {\n    host_end = request_host.rfind(\"]:\");\n    if (host_end != absl::string_view::npos) {\n      host_end += 1; // advance to :\n    }\n  } else {\n    host_end = request_host.rfind(\":\");\n  }\n\n  if (host_end != absl::string_view::npos) {\n    absl::string_view request_port = request_host.substr(host_end);\n    absl::string_view request_protocol = headers.getForwardedProtoValue();\n    bool remove_port = !new_port.empty();\n\n    if (new_scheme != request_protocol) {\n      remove_port |= (request_protocol == Http::Headers::get().SchemeValues.Https.c_str()) &&\n                     request_port == \":443\";\n      remove_port |= (request_protocol == Http::Headers::get().SchemeValues.Http.c_str()) &&\n                     request_port == \":80\";\n    }\n\n    if (remove_port) {\n      return request_host.substr(0, host_end);\n    }\n  }\n\n  return request_host;\n}\n\nstd::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) const {\n  ASSERT(isDirectResponse());\n\n  absl::string_view final_scheme;\n  absl::string_view final_host;\n  absl::string_view final_port;\n  absl::string_view final_path;\n\n  if (!scheme_redirect_.empty()) {\n    final_scheme = scheme_redirect_.c_str();\n  } else if (https_redirect_) {\n    final_scheme = Http::Headers::get().SchemeValues.Https;\n  } else {\n    ASSERT(headers.ForwardedProto());\n    final_scheme = headers.getForwardedProtoValue();\n  }\n\n  if (!port_redirect_.empty()) {\n    final_port = port_redirect_.c_str();\n  } else {\n    final_port = \"\";\n  }\n\n  if (!host_redirect_.empty()) {\n    final_host = host_redirect_.c_str();\n  } else {\n    ASSERT(headers.Host());\n    final_host = processRequestHost(headers, final_scheme, final_port);\n  }\n\n  std::string final_path_value;\n  if (enable_preserve_query_in_path_redirects_) {\n    if (!path_redirect_.empty()) {\n      // The path_redirect query string, if any, takes precedence over the request's query string,\n      // and it will not be stripped regardless of `strip_query`.\n      if (path_redirect_has_query_) {\n        final_path = path_redirect_.c_str();\n      } else {\n        const absl::string_view current_path = headers.getPathValue();\n        const size_t path_end = current_path.find('?');\n        const bool current_path_has_query = path_end != absl::string_view::npos;\n        if (current_path_has_query) {\n          final_path_value = path_redirect_;\n          final_path_value.append(current_path.data() + path_end, current_path.length() - path_end);\n          final_path = final_path_value;\n        } else {\n          final_path = path_redirect_.c_str();\n        }\n      }\n    } else {\n      final_path = headers.getPathValue();\n    }\n    if (!path_redirect_has_query_ && strip_query_) {\n      const size_t path_end = final_path.find('?');\n      if (path_end != absl::string_view::npos) {\n        final_path = final_path.substr(0, path_end);\n      }\n    }\n  } else {\n    if (!path_redirect_.empty()) {\n      final_path = path_redirect_.c_str();\n    } else {\n      final_path = headers.getPathValue();\n      if (strip_query_) {\n        const size_t path_end = final_path.find(\"?\");\n        if (path_end != absl::string_view::npos) {\n          final_path = final_path.substr(0, path_end);\n        }\n      }\n    }\n  }\n\n  return fmt::format(\"{}://{}{}{}\", final_scheme, final_host, final_port, final_path);\n}\n\nstd::multimap<std::string, std::string>\nRouteEntryImplBase::parseOpaqueConfig(const envoy::config::route::v3::Route& route) {\n  std::multimap<std::string, std::string> ret;\n  if (route.has_metadata()) {\n    auto filter_metadata = route.metadata().filter_metadata().find(\n        Extensions::HttpFilters::HttpFilterNames::get().Router);\n    if (filter_metadata == route.metadata().filter_metadata().end()) {\n      // TODO(zuercher): simply return `ret` when deprecated filter names are removed.\n      filter_metadata = route.metadata().filter_metadata().find(DEPRECATED_ROUTER_NAME);\n      if (filter_metadata == route.metadata().filter_metadata().end()) {\n        return ret;\n      }\n\n      Extensions::Common::Utility::ExtensionNameUtil::checkDeprecatedExtensionName(\n          \"http filter\", DEPRECATED_ROUTER_NAME,\n          Extensions::HttpFilters::HttpFilterNames::get().Router);\n    }\n    for (const auto& it : filter_metadata->second.fields()) {\n      if (it.second.kind_case() == ProtobufWkt::Value::kStringValue) {\n        ret.emplace(it.first, it.second.string_value());\n      }\n    }\n  }\n  return ret;\n}\n\nHedgePolicyImpl RouteEntryImplBase::buildHedgePolicy(\n    const absl::optional<envoy::config::route::v3::HedgePolicy>& vhost_hedge_policy,\n    const envoy::config::route::v3::RouteAction& route_config) const {\n  // Route specific policy wins, if available.\n  if (route_config.has_hedge_policy()) {\n    return HedgePolicyImpl(route_config.hedge_policy());\n  }\n\n  // If not, we fall back to the virtual host policy if there is one.\n  if (vhost_hedge_policy) {\n    return HedgePolicyImpl(vhost_hedge_policy.value());\n  }\n\n  // Otherwise, an empty policy will do.\n  return HedgePolicyImpl();\n}\n\nRetryPolicyImpl RouteEntryImplBase::buildRetryPolicy(\n    const absl::optional<envoy::config::route::v3::RetryPolicy>& vhost_retry_policy,\n    const envoy::config::route::v3::RouteAction& route_config,\n    ProtobufMessage::ValidationVisitor& validation_visitor) const {\n  // Route specific policy wins, if available.\n  if (route_config.has_retry_policy()) {\n    return RetryPolicyImpl(route_config.retry_policy(), validation_visitor);\n  }\n\n  // If not, we fallback to the virtual host policy if there is one.\n  if (vhost_retry_policy) {\n    return RetryPolicyImpl(vhost_retry_policy.value(), validation_visitor);\n  }\n\n  // Otherwise, an empty policy will do.\n  return RetryPolicyImpl();\n}\n\nInternalRedirectPolicyImpl RouteEntryImplBase::buildInternalRedirectPolicy(\n    const envoy::config::route::v3::RouteAction& route_config,\n    ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) const {\n  if (route_config.has_internal_redirect_policy()) {\n    return InternalRedirectPolicyImpl(route_config.internal_redirect_policy(), validator,\n                                      current_route_name);\n  }\n  envoy::config::route::v3::InternalRedirectPolicy policy_config;\n  switch (route_config.internal_redirect_action()) {\n  case envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT:\n    break;\n  case envoy::config::route::v3::RouteAction::PASS_THROUGH_INTERNAL_REDIRECT:\n    FALLTHRU;\n  default:\n    return InternalRedirectPolicyImpl();\n  }\n  if (route_config.has_max_internal_redirects()) {\n    *policy_config.mutable_max_internal_redirects() = route_config.max_internal_redirects();\n  }\n  return InternalRedirectPolicyImpl(policy_config, validator, current_route_name);\n}\n\nDecoratorConstPtr RouteEntryImplBase::parseDecorator(const envoy::config::route::v3::Route& route) {\n  DecoratorConstPtr ret;\n  if (route.has_decorator()) {\n    ret = DecoratorConstPtr(new DecoratorImpl(route.decorator()));\n  }\n  return ret;\n}\n\nRouteTracingConstPtr\nRouteEntryImplBase::parseRouteTracing(const envoy::config::route::v3::Route& route) {\n  RouteTracingConstPtr ret;\n  if (route.has_tracing()) {\n    ret = RouteTracingConstPtr(new RouteTracingImpl(route.tracing()));\n  }\n  return ret;\n}\n\nconst DirectResponseEntry* RouteEntryImplBase::directResponseEntry() const {\n  // A route for a request can exclusively be a route entry, a direct response entry,\n  // or a redirect entry.\n  if (isDirectResponse()) {\n    return this;\n  } else {\n    return nullptr;\n  }\n}\n\nconst RouteEntry* RouteEntryImplBase::routeEntry() const {\n  // A route for a request can exclusively be a route entry, a direct response entry,\n  // or a redirect entry.\n  if (isDirectResponse()) {\n    return nullptr;\n  } else {\n    return this;\n  }\n}\n\nRouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& headers,\n                                                     uint64_t random_value) const {\n  // Gets the route object chosen from the list of weighted clusters\n  // (if there is one) or returns self.\n  if (weighted_clusters_.empty()) {\n    if (!cluster_name_.empty() || isDirectResponse()) {\n      return shared_from_this();\n    } else {\n      ASSERT(!cluster_header_name_.get().empty());\n      const Http::HeaderEntry* entry = headers.get(cluster_header_name_);\n      std::string final_cluster_name;\n      if (entry) {\n        final_cluster_name = std::string(entry->value().getStringView());\n      }\n\n      // NOTE: Though we return a shared_ptr here, the current ownership model assumes that\n      //       the route table sticks around. See snapped_route_config_ in\n      //       ConnectionManagerImpl::ActiveStream.\n      return std::make_shared<DynamicRouteEntry>(this, final_cluster_name);\n    }\n  }\n\n  return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_, random_value,\n                                          true);\n}\n\nvoid RouteEntryImplBase::validateClusters(Upstream::ClusterManager& cm) const {\n  if (isDirectResponse()) {\n    return;\n  }\n\n  // Currently, we verify that the cluster exists in the CM if we have an explicit cluster or\n  // weighted cluster rule. We obviously do not verify a cluster_header rule. This means that\n  // trying to use all CDS clusters with a static route table will not work. In the upcoming RDS\n  // change we will make it so that dynamically loaded route tables do *not* perform CM checks.\n  // In the future we might decide to also have a config option that turns off checks for static\n  // route tables. This would enable the all CDS with static route table case.\n  if (!cluster_name_.empty()) {\n    if (!cm.get(cluster_name_)) {\n      throw EnvoyException(fmt::format(\"route: unknown cluster '{}'\", cluster_name_));\n    }\n  } else if (!weighted_clusters_.empty()) {\n    for (const WeightedClusterEntrySharedPtr& cluster : weighted_clusters_) {\n      if (!cm.get(cluster->clusterName())) {\n        throw EnvoyException(\n            fmt::format(\"route: unknown weighted cluster '{}'\", cluster->clusterName()));\n      }\n    }\n  }\n}\n\nconst RouteSpecificFilterConfig*\nRouteEntryImplBase::perFilterConfig(const std::string& name) const {\n  return per_filter_configs_.get(name);\n}\n\nRouteEntryImplBase::WeightedClusterEntry::WeightedClusterEntry(\n    const RouteEntryImplBase* parent, const std::string& runtime_key,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator,\n    const envoy::config::route::v3::WeightedCluster::ClusterWeight& cluster)\n    : DynamicRouteEntry(parent, cluster.name()), runtime_key_(runtime_key),\n      loader_(factory_context.runtime()),\n      cluster_weight_(PROTOBUF_GET_WRAPPED_REQUIRED(cluster, weight)),\n      request_headers_parser_(HeaderParser::configure(cluster.request_headers_to_add(),\n                                                      cluster.request_headers_to_remove())),\n      response_headers_parser_(HeaderParser::configure(cluster.response_headers_to_add(),\n                                                       cluster.response_headers_to_remove())),\n      per_filter_configs_(cluster.typed_per_filter_config(),\n                          cluster.hidden_envoy_deprecated_per_filter_config(), factory_context,\n                          validator) {\n  if (cluster.has_metadata_match()) {\n    const auto filter_it = cluster.metadata_match().filter_metadata().find(\n        Envoy::Config::MetadataFilters::get().ENVOY_LB);\n    if (filter_it != cluster.metadata_match().filter_metadata().end()) {\n      if (parent->metadata_match_criteria_) {\n        cluster_metadata_match_criteria_ =\n            parent->metadata_match_criteria_->mergeMatchCriteria(filter_it->second);\n      } else {\n        cluster_metadata_match_criteria_ =\n            std::make_unique<MetadataMatchCriteriaImpl>(filter_it->second);\n      }\n    }\n  }\n}\n\nconst RouteSpecificFilterConfig*\nRouteEntryImplBase::WeightedClusterEntry::perFilterConfig(const std::string& name) const {\n  const auto cfg = per_filter_configs_.get(name);\n  return cfg != nullptr ? cfg : DynamicRouteEntry::perFilterConfig(name);\n}\n\nPrefixRouteEntryImpl::PrefixRouteEntryImpl(\n    const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator)\n    : RouteEntryImplBase(vhost, route, factory_context, validator), prefix_(route.match().prefix()),\n      path_matcher_(Matchers::PathMatcher::createPrefix(prefix_, !case_sensitive_)) {}\n\nvoid PrefixRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers,\n                                             bool insert_envoy_original_path) const {\n  finalizePathHeader(headers, prefix_, insert_envoy_original_path);\n}\n\nRouteConstSharedPtr PrefixRouteEntryImpl::matches(const Http::RequestHeaderMap& headers,\n                                                  const StreamInfo::StreamInfo& stream_info,\n                                                  uint64_t random_value) const {\n  if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) &&\n      path_matcher_->match(headers.getPathValue())) {\n    return clusterEntry(headers, random_value);\n  }\n  return nullptr;\n}\n\nPathRouteEntryImpl::PathRouteEntryImpl(const VirtualHostImpl& vhost,\n                                       const envoy::config::route::v3::Route& route,\n                                       Server::Configuration::ServerFactoryContext& factory_context,\n                                       ProtobufMessage::ValidationVisitor& validator)\n    : RouteEntryImplBase(vhost, route, factory_context, validator), path_(route.match().path()),\n      path_matcher_(Matchers::PathMatcher::createExact(path_, !case_sensitive_)) {}\n\nvoid PathRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers,\n                                           bool insert_envoy_original_path) const {\n  finalizePathHeader(headers, path_, insert_envoy_original_path);\n}\n\nRouteConstSharedPtr PathRouteEntryImpl::matches(const Http::RequestHeaderMap& headers,\n                                                const StreamInfo::StreamInfo& stream_info,\n                                                uint64_t random_value) const {\n  if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) &&\n      path_matcher_->match(headers.getPathValue())) {\n    return clusterEntry(headers, random_value);\n  }\n\n  return nullptr;\n}\n\nRegexRouteEntryImpl::RegexRouteEntryImpl(\n    const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator)\n    : RouteEntryImplBase(vhost, route, factory_context, validator) {\n  // TODO(yangminzhu): Use PathMatcher once hidden_envoy_deprecated_regex is removed.\n  if (route.match().path_specifier_case() ==\n      envoy::config::route::v3::RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex) {\n    regex_ = Regex::Utility::parseStdRegexAsCompiledMatcher(\n        route.match().hidden_envoy_deprecated_regex());\n    regex_str_ = route.match().hidden_envoy_deprecated_regex();\n  } else {\n    ASSERT(route.match().path_specifier_case() ==\n           envoy::config::route::v3::RouteMatch::PathSpecifierCase::kSafeRegex);\n    regex_ = Regex::Utility::parseRegex(route.match().safe_regex());\n    regex_str_ = route.match().safe_regex().regex();\n  }\n}\n\nvoid RegexRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers,\n                                            bool insert_envoy_original_path) const {\n  const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue());\n  // TODO(yuval-k): This ASSERT can happen if the path was changed by a filter without clearing the\n  // route cache. We should consider if ASSERT-ing is the desired behavior in this case.\n  ASSERT(regex_->match(path));\n  finalizePathHeader(headers, path, insert_envoy_original_path);\n}\n\nRouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::RequestHeaderMap& headers,\n                                                 const StreamInfo::StreamInfo& stream_info,\n                                                 uint64_t random_value) const {\n  if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) {\n    const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue());\n    if (regex_->match(path)) {\n      return clusterEntry(headers, random_value);\n    }\n  }\n  return nullptr;\n}\n\nConnectRouteEntryImpl::ConnectRouteEntryImpl(\n    const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator)\n    : RouteEntryImplBase(vhost, route, factory_context, validator) {}\n\nvoid ConnectRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers,\n                                              bool insert_envoy_original_path) const {\n  const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue());\n  finalizePathHeader(headers, path, insert_envoy_original_path);\n}\n\nRouteConstSharedPtr ConnectRouteEntryImpl::matches(const Http::RequestHeaderMap& headers,\n                                                   const StreamInfo::StreamInfo& stream_info,\n                                                   uint64_t random_value) const {\n  if (Http::HeaderUtility::isConnect(headers) &&\n      RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) {\n    return clusterEntry(headers, random_value);\n  }\n  return nullptr;\n}\n\nVirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& virtual_host,\n                                 const ConfigImpl& global_route_config,\n                                 Server::Configuration::ServerFactoryContext& factory_context,\n                                 Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validator,\n                                 bool validate_clusters)\n    : stat_name_pool_(factory_context.scope().symbolTable()),\n      stat_name_(stat_name_pool_.add(virtual_host.name())),\n      vcluster_scope_(scope.createScope(virtual_host.name() + \".vcluster\")),\n      rate_limit_policy_(virtual_host.rate_limits()), global_route_config_(global_route_config),\n      request_headers_parser_(HeaderParser::configure(virtual_host.request_headers_to_add(),\n                                                      virtual_host.request_headers_to_remove())),\n      response_headers_parser_(HeaderParser::configure(virtual_host.response_headers_to_add(),\n                                                       virtual_host.response_headers_to_remove())),\n      per_filter_configs_(virtual_host.typed_per_filter_config(),\n                          virtual_host.hidden_envoy_deprecated_per_filter_config(), factory_context,\n                          validator),\n      retry_shadow_buffer_limit_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          virtual_host, per_request_buffer_limit_bytes, std::numeric_limits<uint32_t>::max())),\n      include_attempt_count_in_request_(virtual_host.include_request_attempt_count()),\n      include_attempt_count_in_response_(virtual_host.include_attempt_count_in_response()),\n      virtual_cluster_catch_all_(stat_name_pool_, *vcluster_scope_) {\n\n  switch (virtual_host.require_tls()) {\n  case envoy::config::route::v3::VirtualHost::NONE:\n    ssl_requirements_ = SslRequirements::None;\n    break;\n  case envoy::config::route::v3::VirtualHost::EXTERNAL_ONLY:\n    ssl_requirements_ = SslRequirements::ExternalOnly;\n    break;\n  case envoy::config::route::v3::VirtualHost::ALL:\n    ssl_requirements_ = SslRequirements::All;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  // Retry and Hedge policies must be set before routes, since they may use them.\n  if (virtual_host.has_retry_policy()) {\n    retry_policy_ = virtual_host.retry_policy();\n  }\n  if (virtual_host.has_hedge_policy()) {\n    hedge_policy_ = virtual_host.hedge_policy();\n  }\n\n  for (const auto& route : virtual_host.routes()) {\n    switch (route.match().path_specifier_case()) {\n    case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kPrefix: {\n      routes_.emplace_back(new PrefixRouteEntryImpl(*this, route, factory_context, validator));\n      break;\n    }\n    case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kPath: {\n      routes_.emplace_back(new PathRouteEntryImpl(*this, route, factory_context, validator));\n      break;\n    }\n    case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex:\n    case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kSafeRegex: {\n      routes_.emplace_back(new RegexRouteEntryImpl(*this, route, factory_context, validator));\n      break;\n    }\n    case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kConnectMatcher: {\n      routes_.emplace_back(new ConnectRouteEntryImpl(*this, route, factory_context, validator));\n      break;\n    }\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n\n    if (validate_clusters) {\n      routes_.back()->validateClusters(factory_context.clusterManager());\n      for (const auto& shadow_policy : routes_.back()->shadowPolicies()) {\n        ASSERT(!shadow_policy->cluster().empty());\n        if (!factory_context.clusterManager().get(shadow_policy->cluster())) {\n          throw EnvoyException(\n              fmt::format(\"route: unknown shadow cluster '{}'\", shadow_policy->cluster()));\n        }\n      }\n    }\n  }\n\n  for (const auto& virtual_cluster : virtual_host.virtual_clusters()) {\n    virtual_clusters_.push_back(\n        VirtualClusterEntry(virtual_cluster, stat_name_pool_, *vcluster_scope_));\n  }\n\n  if (virtual_host.has_cors()) {\n    cors_policy_ = std::make_unique<CorsPolicyImpl>(virtual_host.cors(), factory_context.runtime());\n  }\n}\n\nVirtualHostImpl::VirtualClusterEntry::VirtualClusterEntry(\n    const envoy::config::route::v3::VirtualCluster& virtual_cluster, Stats::StatNamePool& pool,\n    Stats::Scope& scope)\n    : VirtualClusterBase(pool.add(virtual_cluster.name()),\n                         scope.createScope(virtual_cluster.name())) {\n  if (virtual_cluster.hidden_envoy_deprecated_pattern().empty() ==\n      virtual_cluster.headers().empty()) {\n    throw EnvoyException(\"virtual clusters must define either 'pattern' or 'headers'\");\n  }\n\n  if (!virtual_cluster.hidden_envoy_deprecated_pattern().empty()) {\n    envoy::config::route::v3::HeaderMatcher matcher_config;\n    matcher_config.set_name(Http::Headers::get().Path.get());\n    matcher_config.set_hidden_envoy_deprecated_regex_match(\n        virtual_cluster.hidden_envoy_deprecated_pattern());\n    headers_.push_back(std::make_unique<Http::HeaderUtility::HeaderData>(matcher_config));\n  } else {\n    ASSERT(!virtual_cluster.headers().empty());\n    headers_ = Http::HeaderUtility::buildHeaderDataVector(virtual_cluster.headers());\n  }\n\n  if (virtual_cluster.hidden_envoy_deprecated_method() !=\n      envoy::config::core::v3::METHOD_UNSPECIFIED) {\n    envoy::config::route::v3::HeaderMatcher matcher_config;\n    matcher_config.set_name(Http::Headers::get().Method.get());\n    matcher_config.set_exact_match(envoy::config::core::v3::RequestMethod_Name(\n        virtual_cluster.hidden_envoy_deprecated_method()));\n    headers_.push_back(std::make_unique<Http::HeaderUtility::HeaderData>(matcher_config));\n  }\n}\n\nconst Config& VirtualHostImpl::routeConfig() const { return global_route_config_; }\n\nconst RouteSpecificFilterConfig* VirtualHostImpl::perFilterConfig(const std::string& name) const {\n  return per_filter_configs_.get(name);\n}\n\nconst VirtualHostImpl* RouteMatcher::findWildcardVirtualHost(\n    const std::string& host, const RouteMatcher::WildcardVirtualHosts& wildcard_virtual_hosts,\n    RouteMatcher::SubstringFunction substring_function) const {\n  // We do a longest wildcard match against the host that's passed in\n  // (e.g. \"foo-bar.baz.com\" should match \"*-bar.baz.com\" before matching \"*.baz.com\" for suffix\n  // wildcards). This is done by scanning the length => wildcards map looking for every wildcard\n  // whose size is < length.\n  for (const auto& iter : wildcard_virtual_hosts) {\n    const uint32_t wildcard_length = iter.first;\n    const auto& wildcard_map = iter.second;\n    // >= because *.foo.com shouldn't match .foo.com.\n    if (wildcard_length >= host.size()) {\n      continue;\n    }\n    const auto& match = wildcard_map.find(substring_function(host, wildcard_length));\n    if (match != wildcard_map.end()) {\n      return match->second.get();\n    }\n  }\n  return nullptr;\n}\n\nRouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& route_config,\n                           const ConfigImpl& global_route_config,\n                           Server::Configuration::ServerFactoryContext& factory_context,\n                           ProtobufMessage::ValidationVisitor& validator, bool validate_clusters)\n    : vhost_scope_(factory_context.scope().createScope(\"vhost\")) {\n  for (const auto& virtual_host_config : route_config.virtual_hosts()) {\n    VirtualHostSharedPtr virtual_host(new VirtualHostImpl(virtual_host_config, global_route_config,\n                                                          factory_context, *vhost_scope_, validator,\n                                                          validate_clusters));\n    for (const std::string& domain_name : virtual_host_config.domains()) {\n      const std::string domain = Http::LowerCaseString(domain_name).get();\n      bool duplicate_found = false;\n      if (\"*\" == domain) {\n        if (default_virtual_host_) {\n          throw EnvoyException(fmt::format(\"Only a single wildcard domain is permitted in route {}\",\n                                           route_config.name()));\n        }\n        default_virtual_host_ = virtual_host;\n      } else if (!domain.empty() && '*' == domain[0]) {\n        duplicate_found = !wildcard_virtual_host_suffixes_[domain.size() - 1]\n                               .emplace(domain.substr(1), virtual_host)\n                               .second;\n      } else if (!domain.empty() && '*' == domain[domain.size() - 1]) {\n        duplicate_found = !wildcard_virtual_host_prefixes_[domain.size() - 1]\n                               .emplace(domain.substr(0, domain.size() - 1), virtual_host)\n                               .second;\n      } else {\n        duplicate_found = !virtual_hosts_.emplace(domain, virtual_host).second;\n      }\n      if (duplicate_found) {\n        throw EnvoyException(fmt::format(\"Only unique values for domains are permitted. Duplicate \"\n                                         \"entry of domain {} in route {}\",\n                                         domain, route_config.name()));\n      }\n    }\n  }\n}\n\nRouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const RouteCallback& cb,\n                                                         const Http::RequestHeaderMap& headers,\n                                                         const StreamInfo::StreamInfo& stream_info,\n                                                         uint64_t random_value) const {\n  // No x-forwarded-proto header. This normally only happens when ActiveStream::decodeHeaders\n  // bails early (as it rejects a request), so there is no routing is going to happen anyway.\n  const auto* forwarded_proto_header = headers.ForwardedProto();\n  if (forwarded_proto_header == nullptr) {\n    return nullptr;\n  }\n\n  // First check for ssl redirect.\n  if (ssl_requirements_ == SslRequirements::All && forwarded_proto_header->value() != \"https\") {\n    return SSL_REDIRECT_ROUTE;\n  } else if (ssl_requirements_ == SslRequirements::ExternalOnly &&\n             forwarded_proto_header->value() != \"https\" &&\n             !Http::HeaderUtility::isEnvoyInternalRequest(headers)) {\n    return SSL_REDIRECT_ROUTE;\n  }\n\n  // Check for a route that matches the request.\n  for (auto route = routes_.begin(); route != routes_.end(); ++route) {\n    if (!headers.Path() && !(*route)->supportsPathlessHeaders()) {\n      continue;\n    }\n\n    RouteConstSharedPtr route_entry = (*route)->matches(headers, stream_info, random_value);\n    if (nullptr == route_entry) {\n      continue;\n    }\n\n    if (cb) {\n      RouteEvalStatus eval_status = (std::next(route) == routes_.end())\n                                        ? RouteEvalStatus::NoMoreRoutes\n                                        : RouteEvalStatus::HasMoreRoutes;\n      RouteMatchStatus match_status = cb(route_entry, eval_status);\n      if (match_status == RouteMatchStatus::Accept) {\n        return route_entry;\n      }\n      if (match_status == RouteMatchStatus::Continue &&\n          eval_status == RouteEvalStatus::NoMoreRoutes) {\n        return nullptr;\n      }\n      continue;\n    }\n\n    return route_entry;\n  }\n\n  return nullptr;\n}\n\nconst VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::RequestHeaderMap& headers) const {\n  // Fast path the case where we only have a default virtual host.\n  if (virtual_hosts_.empty() && wildcard_virtual_host_suffixes_.empty() &&\n      wildcard_virtual_host_prefixes_.empty()) {\n    return default_virtual_host_.get();\n  }\n\n  // There may be no authority in early reply paths in the HTTP connection manager.\n  if (headers.Host() == nullptr) {\n    return nullptr;\n  }\n\n  // TODO (@rshriram) Match Origin header in WebSocket\n  // request with VHost, using wildcard match\n  // Lower-case the value of the host header, as hostnames are case insensitive.\n  const std::string host = absl::AsciiStrToLower(headers.getHostValue());\n  const auto& iter = virtual_hosts_.find(host);\n  if (iter != virtual_hosts_.end()) {\n    return iter->second.get();\n  }\n  if (!wildcard_virtual_host_suffixes_.empty()) {\n    const VirtualHostImpl* vhost = findWildcardVirtualHost(\n        host, wildcard_virtual_host_suffixes_,\n        [](const std::string& h, int l) -> std::string { return h.substr(h.size() - l); });\n    if (vhost != nullptr) {\n      return vhost;\n    }\n  }\n  if (!wildcard_virtual_host_prefixes_.empty()) {\n    const VirtualHostImpl* vhost = findWildcardVirtualHost(\n        host, wildcard_virtual_host_prefixes_,\n        [](const std::string& h, int l) -> std::string { return h.substr(0, l); });\n    if (vhost != nullptr) {\n      return vhost;\n    }\n  }\n  return default_virtual_host_.get();\n}\n\nRouteConstSharedPtr RouteMatcher::route(const RouteCallback& cb,\n                                        const Http::RequestHeaderMap& headers,\n                                        const StreamInfo::StreamInfo& stream_info,\n                                        uint64_t random_value) const {\n\n  const VirtualHostImpl* virtual_host = findVirtualHost(headers);\n  if (virtual_host) {\n    return virtual_host->getRouteFromEntries(cb, headers, stream_info, random_value);\n  } else {\n    return nullptr;\n  }\n}\n\nconst SslRedirector SslRedirectRoute::SSL_REDIRECTOR;\nconst std::shared_ptr<const SslRedirectRoute> VirtualHostImpl::SSL_REDIRECT_ROUTE{\n    new SslRedirectRoute()};\n\nconst VirtualCluster*\nVirtualHostImpl::virtualClusterFromEntries(const Http::HeaderMap& headers) const {\n  for (const VirtualClusterEntry& entry : virtual_clusters_) {\n    if (Http::HeaderUtility::matchHeaders(headers, entry.headers_)) {\n      return &entry;\n    }\n  }\n\n  if (!virtual_clusters_.empty()) {\n    return &virtual_cluster_catch_all_;\n  }\n\n  return nullptr;\n}\n\nConfigImpl::ConfigImpl(const envoy::config::route::v3::RouteConfiguration& config,\n                       Server::Configuration::ServerFactoryContext& factory_context,\n                       ProtobufMessage::ValidationVisitor& validator,\n                       bool validate_clusters_default)\n    : name_(config.name()), symbol_table_(factory_context.scope().symbolTable()),\n      uses_vhds_(config.has_vhds()),\n      most_specific_header_mutations_wins_(config.most_specific_header_mutations_wins()) {\n  route_matcher_ = std::make_unique<RouteMatcher>(\n      config, *this, factory_context, validator,\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, validate_clusters, validate_clusters_default));\n\n  for (const std::string& header : config.internal_only_headers()) {\n    internal_only_headers_.push_back(Http::LowerCaseString(header));\n  }\n\n  request_headers_parser_ =\n      HeaderParser::configure(config.request_headers_to_add(), config.request_headers_to_remove());\n  response_headers_parser_ = HeaderParser::configure(config.response_headers_to_add(),\n                                                     config.response_headers_to_remove());\n}\n\nRouteConstSharedPtr ConfigImpl::route(const RouteCallback& cb,\n                                      const Http::RequestHeaderMap& headers,\n                                      const StreamInfo::StreamInfo& stream_info,\n                                      uint64_t random_value) const {\n  return route_matcher_->route(cb, headers, stream_info, random_value);\n}\n\nnamespace {\n\nRouteSpecificFilterConfigConstSharedPtr\ncreateRouteSpecificFilterConfig(const std::string& name, const ProtobufWkt::Any& typed_config,\n                                const ProtobufWkt::Struct& config,\n                                Server::Configuration::ServerFactoryContext& factory_context,\n                                ProtobufMessage::ValidationVisitor& validator) {\n  auto& factory = Envoy::Config::Utility::getAndCheckFactoryByName<\n      Server::Configuration::NamedHttpFilterConfigFactory>(name);\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  Envoy::Config::Utility::translateOpaqueConfig(typed_config, config, validator, *proto_config);\n  return factory.createRouteSpecificFilterConfig(*proto_config, factory_context, validator);\n}\n\n} // namespace\n\nPerFilterConfigs::PerFilterConfigs(\n    const Protobuf::Map<std::string, ProtobufWkt::Any>& typed_configs,\n    const Protobuf::Map<std::string, ProtobufWkt::Struct>& configs,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator) {\n  if (!typed_configs.empty() && !configs.empty()) {\n    throw EnvoyException(\"Only one of typed_configs or configs can be specified\");\n  }\n\n  for (const auto& it : typed_configs) {\n    // TODO(zuercher): canonicalization may be removed when deprecated filter names are removed\n    const auto& name =\n        Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName(it.first);\n\n    auto object = createRouteSpecificFilterConfig(\n        name, it.second, ProtobufWkt::Struct::default_instance(), factory_context, validator);\n    if (object != nullptr) {\n      configs_[name] = std::move(object);\n    }\n  }\n\n  for (const auto& it : configs) {\n    // TODO(zuercher): canonicalization may be removed when deprecated filter names are removed\n    const auto& name =\n        Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName(it.first);\n\n    auto object = createRouteSpecificFilterConfig(name, ProtobufWkt::Any::default_instance(),\n                                                  it.second, factory_context, validator);\n    if (object != nullptr) {\n      configs_[name] = std::move(object);\n    }\n  }\n}\n\nconst RouteSpecificFilterConfig* PerFilterConfigs::get(const std::string& name) const {\n  auto it = configs_.find(name);\n  return it == configs_.end() ? nullptr : it->second.get();\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/config_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <iterator>\n#include <list>\n#include <map>\n#include <memory>\n#include <regex>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/config/metadata.h\"\n#include \"common/http/hash_policy.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/router/config_utility.h\"\n#include \"common/router/header_formatter.h\"\n#include \"common/router/header_parser.h\"\n#include \"common/router/metadatamatchcriteria_impl.h\"\n#include \"common/router/router_ratelimit.h\"\n#include \"common/router/tls_context_match_criteria_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Base interface for something that matches a header.\n */\nclass Matchable {\npublic:\n  virtual ~Matchable() = default;\n\n  /**\n   * See if this object matches the incoming headers.\n   * @param headers supplies the headers to match.\n   * @param random_value supplies the random seed to use if a runtime choice is required. This\n   *        allows stable choices between calls if desired.\n   * @return true if input headers match this object.\n   */\n  virtual RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers,\n                                      const StreamInfo::StreamInfo& stream_info,\n                                      uint64_t random_value) const PURE;\n\n  // By default, matchers do not support null Path headers.\n  virtual bool supportsPathlessHeaders() const { return false; }\n};\n\nclass PerFilterConfigs {\npublic:\n  PerFilterConfigs(const Protobuf::Map<std::string, ProtobufWkt::Any>& typed_configs,\n                   const Protobuf::Map<std::string, ProtobufWkt::Struct>& configs,\n                   Server::Configuration::ServerFactoryContext& factory_context,\n                   ProtobufMessage::ValidationVisitor& validator);\n\n  const RouteSpecificFilterConfig* get(const std::string& name) const;\n\nprivate:\n  absl::node_hash_map<std::string, RouteSpecificFilterConfigConstSharedPtr> configs_;\n};\n\nclass RouteEntryImplBase;\nusing RouteEntryImplBaseConstSharedPtr = std::shared_ptr<const RouteEntryImplBase>;\n\n/**\n * Direct response entry that does an SSL redirect.\n */\nclass SslRedirector : public DirectResponseEntry {\npublic:\n  // Router::DirectResponseEntry\n  void finalizeResponseHeaders(Http::ResponseHeaderMap&,\n                               const StreamInfo::StreamInfo&) const override {}\n  std::string newPath(const Http::RequestHeaderMap& headers) const override;\n  void rewritePathHeader(Http::RequestHeaderMap&, bool) const override {}\n  Http::Code responseCode() const override { return Http::Code::MovedPermanently; }\n  const std::string& responseBody() const override { return EMPTY_STRING; }\n  const std::string& routeName() const override { return route_name_; }\n\nprivate:\n  const std::string route_name_;\n};\n\nclass SslRedirectRoute : public Route {\npublic:\n  // Router::Route\n  const DirectResponseEntry* directResponseEntry() const override { return &SSL_REDIRECTOR; }\n  const RouteEntry* routeEntry() const override { return nullptr; }\n  const Decorator* decorator() const override { return nullptr; }\n  const RouteTracing* tracingConfig() const override { return nullptr; }\n  const RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override {\n    return nullptr;\n  }\n\nprivate:\n  static const SslRedirector SSL_REDIRECTOR;\n};\n\n/**\n * Implementation of CorsPolicy that reads from the proto route and virtual host config.\n */\nclass CorsPolicyImpl : public CorsPolicy {\npublic:\n  CorsPolicyImpl(const envoy::config::route::v3::CorsPolicy& config, Runtime::Loader& loader);\n\n  // Router::CorsPolicy\n  const std::vector<Matchers::StringMatcherPtr>& allowOrigins() const override {\n    return allow_origins_;\n  };\n  const std::string& allowMethods() const override { return allow_methods_; };\n  const std::string& allowHeaders() const override { return allow_headers_; };\n  const std::string& exposeHeaders() const override { return expose_headers_; };\n  const std::string& maxAge() const override { return max_age_; };\n  const absl::optional<bool>& allowCredentials() const override { return allow_credentials_; };\n  bool enabled() const override {\n    if (config_.has_filter_enabled()) {\n      const auto& filter_enabled = config_.filter_enabled();\n      return loader_.snapshot().featureEnabled(filter_enabled.runtime_key(),\n                                               filter_enabled.default_value());\n    }\n    return legacy_enabled_;\n  };\n  bool shadowEnabled() const override {\n    if (config_.has_shadow_enabled()) {\n      const auto& shadow_enabled = config_.shadow_enabled();\n      return loader_.snapshot().featureEnabled(shadow_enabled.runtime_key(),\n                                               shadow_enabled.default_value());\n    }\n    return false;\n  };\n\nprivate:\n  const envoy::config::route::v3::CorsPolicy config_;\n  Runtime::Loader& loader_;\n  std::vector<Matchers::StringMatcherPtr> allow_origins_;\n  const std::string allow_methods_;\n  const std::string allow_headers_;\n  const std::string expose_headers_;\n  const std::string max_age_;\n  absl::optional<bool> allow_credentials_{};\n  const bool legacy_enabled_;\n};\n\nclass ConfigImpl;\n/**\n * Holds all routing configuration for an entire virtual host.\n */\nclass VirtualHostImpl : public VirtualHost {\npublic:\n  VirtualHostImpl(const envoy::config::route::v3::VirtualHost& virtual_host,\n                  const ConfigImpl& global_route_config,\n                  Server::Configuration::ServerFactoryContext& factory_context, Stats::Scope& scope,\n                  ProtobufMessage::ValidationVisitor& validator, bool validate_clusters);\n\n  RouteConstSharedPtr getRouteFromEntries(const RouteCallback& cb,\n                                          const Http::RequestHeaderMap& headers,\n                                          const StreamInfo::StreamInfo& stream_info,\n                                          uint64_t random_value) const;\n  const VirtualCluster* virtualClusterFromEntries(const Http::HeaderMap& headers) const;\n  const ConfigImpl& globalRouteConfig() const { return global_route_config_; }\n  const HeaderParser& requestHeaderParser() const { return *request_headers_parser_; }\n  const HeaderParser& responseHeaderParser() const { return *response_headers_parser_; }\n\n  // Router::VirtualHost\n  const CorsPolicy* corsPolicy() const override { return cors_policy_.get(); }\n  Stats::StatName statName() const override { return stat_name_; }\n  const RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; }\n  const Config& routeConfig() const override;\n  const RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override;\n  bool includeAttemptCountInRequest() const override { return include_attempt_count_in_request_; }\n  bool includeAttemptCountInResponse() const override { return include_attempt_count_in_response_; }\n  const absl::optional<envoy::config::route::v3::RetryPolicy>& retryPolicy() const {\n    return retry_policy_;\n  }\n  const absl::optional<envoy::config::route::v3::HedgePolicy>& hedgePolicy() const {\n    return hedge_policy_;\n  }\n  uint32_t retryShadowBufferLimit() const override { return retry_shadow_buffer_limit_; }\n\nprivate:\n  enum class SslRequirements { None, ExternalOnly, All };\n\n  struct VirtualClusterBase : public VirtualCluster {\n  public:\n    VirtualClusterBase(Stats::StatName stat_name, Stats::ScopePtr&& scope)\n        : stat_name_(stat_name), scope_(std::move(scope)), stats_(generateStats(*scope_)) {}\n\n    // Router::VirtualCluster\n    Stats::StatName statName() const override { return stat_name_; }\n    VirtualClusterStats& stats() const override { return stats_; }\n\n  private:\n    const Stats::StatName stat_name_;\n    Stats::ScopePtr scope_;\n    mutable VirtualClusterStats stats_;\n  };\n\n  struct VirtualClusterEntry : public VirtualClusterBase {\n    VirtualClusterEntry(const envoy::config::route::v3::VirtualCluster& virtual_cluster,\n                        Stats::StatNamePool& pool, Stats::Scope& scope);\n\n    std::vector<Http::HeaderUtility::HeaderDataPtr> headers_;\n  };\n\n  struct CatchAllVirtualCluster : public VirtualClusterBase {\n    explicit CatchAllVirtualCluster(Stats::StatNamePool& pool, Stats::Scope& scope)\n        : VirtualClusterBase(pool.add(\"other\"), scope.createScope(\"other\")) {}\n  };\n\n  static const std::shared_ptr<const SslRedirectRoute> SSL_REDIRECT_ROUTE;\n\n  Stats::StatNamePool stat_name_pool_;\n  const Stats::StatName stat_name_;\n  Stats::ScopePtr vcluster_scope_;\n  std::vector<RouteEntryImplBaseConstSharedPtr> routes_;\n  std::vector<VirtualClusterEntry> virtual_clusters_;\n  SslRequirements ssl_requirements_;\n  const RateLimitPolicyImpl rate_limit_policy_;\n  std::unique_ptr<const CorsPolicyImpl> cors_policy_;\n  const ConfigImpl& global_route_config_; // See note in RouteEntryImplBase::clusterEntry() on why\n                                          // raw ref to the top level config is currently safe.\n  HeaderParserPtr request_headers_parser_;\n  HeaderParserPtr response_headers_parser_;\n  PerFilterConfigs per_filter_configs_;\n  uint32_t retry_shadow_buffer_limit_{std::numeric_limits<uint32_t>::max()};\n  const bool include_attempt_count_in_request_;\n  const bool include_attempt_count_in_response_;\n  absl::optional<envoy::config::route::v3::RetryPolicy> retry_policy_;\n  absl::optional<envoy::config::route::v3::HedgePolicy> hedge_policy_;\n  const CatchAllVirtualCluster virtual_cluster_catch_all_;\n};\n\nusing VirtualHostSharedPtr = std::shared_ptr<VirtualHostImpl>;\n\n/**\n * Implementation of RetryPolicy that reads from the proto route or virtual host config.\n */\nclass RetryPolicyImpl : public RetryPolicy {\n\npublic:\n  RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& retry_policy,\n                  ProtobufMessage::ValidationVisitor& validation_visitor);\n  RetryPolicyImpl() = default;\n\n  // Router::RetryPolicy\n  std::chrono::milliseconds perTryTimeout() const override { return per_try_timeout_; }\n  uint32_t numRetries() const override { return num_retries_; }\n  uint32_t retryOn() const override { return retry_on_; }\n  std::vector<Upstream::RetryHostPredicateSharedPtr> retryHostPredicates() const override;\n  Upstream::RetryPrioritySharedPtr retryPriority() const override;\n  uint32_t hostSelectionMaxAttempts() const override { return host_selection_attempts_; }\n  const std::vector<uint32_t>& retriableStatusCodes() const override {\n    return retriable_status_codes_;\n  }\n  const std::vector<Http::HeaderMatcherSharedPtr>& retriableHeaders() const override {\n    return retriable_headers_;\n  }\n  const std::vector<Http::HeaderMatcherSharedPtr>& retriableRequestHeaders() const override {\n    return retriable_request_headers_;\n  }\n  absl::optional<std::chrono::milliseconds> baseInterval() const override { return base_interval_; }\n  absl::optional<std::chrono::milliseconds> maxInterval() const override { return max_interval_; }\n  const std::vector<ResetHeaderParserSharedPtr>& resetHeaders() const override {\n    return reset_headers_;\n  }\n  std::chrono::milliseconds resetMaxInterval() const override { return reset_max_interval_; }\n\nprivate:\n  std::chrono::milliseconds per_try_timeout_{0};\n  // We set the number of retries to 1 by default (i.e. when no route or vhost level retry policy is\n  // set) so that when retries get enabled through the x-envoy-retry-on header we default to 1\n  // retry.\n  uint32_t num_retries_{1};\n  uint32_t retry_on_{};\n  // Each pair contains the name and config proto to be used to create the RetryHostPredicates\n  // that should be used when with this policy.\n  std::vector<std::pair<Upstream::RetryHostPredicateFactory&, ProtobufTypes::MessagePtr>>\n      retry_host_predicate_configs_;\n  Upstream::RetryPrioritySharedPtr retry_priority_;\n  // Name and config proto to use to create the RetryPriority to use with this policy. Default\n  // initialized when no RetryPriority should be used.\n  std::pair<Upstream::RetryPriorityFactory*, ProtobufTypes::MessagePtr> retry_priority_config_;\n  uint32_t host_selection_attempts_{1};\n  std::vector<uint32_t> retriable_status_codes_;\n  std::vector<Http::HeaderMatcherSharedPtr> retriable_headers_;\n  std::vector<Http::HeaderMatcherSharedPtr> retriable_request_headers_;\n  absl::optional<std::chrono::milliseconds> base_interval_;\n  absl::optional<std::chrono::milliseconds> max_interval_;\n  std::vector<ResetHeaderParserSharedPtr> reset_headers_{};\n  std::chrono::milliseconds reset_max_interval_{300000};\n  ProtobufMessage::ValidationVisitor* validation_visitor_{};\n};\n\n/**\n * Implementation of ShadowPolicy that reads from the proto route config.\n */\nclass ShadowPolicyImpl : public ShadowPolicy {\npublic:\n  using RequestMirrorPolicy = envoy::config::route::v3::RouteAction::RequestMirrorPolicy;\n  explicit ShadowPolicyImpl(const RequestMirrorPolicy& config);\n\n  // Router::ShadowPolicy\n  const std::string& cluster() const override { return cluster_; }\n  const std::string& runtimeKey() const override { return runtime_key_; }\n  const envoy::type::v3::FractionalPercent& defaultValue() const override { return default_value_; }\n  bool traceSampled() const override { return trace_sampled_; }\n\nprivate:\n  std::string cluster_;\n  std::string runtime_key_;\n  envoy::type::v3::FractionalPercent default_value_;\n  bool trace_sampled_;\n};\n\n/**\n * Implementation of HedgePolicy that reads from the proto route or virtual host config.\n */\nclass HedgePolicyImpl : public HedgePolicy {\n\npublic:\n  explicit HedgePolicyImpl(const envoy::config::route::v3::HedgePolicy& hedge_policy);\n  HedgePolicyImpl();\n\n  // Router::HedgePolicy\n  uint32_t initialRequests() const override { return initial_requests_; }\n  const envoy::type::v3::FractionalPercent& additionalRequestChance() const override {\n    return additional_request_chance_;\n  }\n  bool hedgeOnPerTryTimeout() const override { return hedge_on_per_try_timeout_; }\n\nprivate:\n  const uint32_t initial_requests_;\n  const envoy::type::v3::FractionalPercent additional_request_chance_;\n  const bool hedge_on_per_try_timeout_;\n};\n\n/**\n * Implementation of Decorator that reads from the proto route decorator.\n */\nclass DecoratorImpl : public Decorator {\npublic:\n  explicit DecoratorImpl(const envoy::config::route::v3::Decorator& decorator);\n\n  // Decorator::apply\n  void apply(Tracing::Span& span) const override;\n\n  // Decorator::getOperation\n  const std::string& getOperation() const override;\n\n  // Decorator::getOperation\n  bool propagate() const override;\n\nprivate:\n  const std::string operation_;\n  const bool propagate_;\n};\n\n/**\n * Implementation of RouteTracing that reads from the proto route tracing.\n */\nclass RouteTracingImpl : public RouteTracing {\npublic:\n  explicit RouteTracingImpl(const envoy::config::route::v3::Tracing& tracing);\n\n  // Tracing::getClientSampling\n  const envoy::type::v3::FractionalPercent& getClientSampling() const override;\n\n  // Tracing::getRandomSampling\n  const envoy::type::v3::FractionalPercent& getRandomSampling() const override;\n\n  // Tracing::getOverallSampling\n  const envoy::type::v3::FractionalPercent& getOverallSampling() const override;\n\n  const Tracing::CustomTagMap& getCustomTags() const override;\n\nprivate:\n  envoy::type::v3::FractionalPercent client_sampling_;\n  envoy::type::v3::FractionalPercent random_sampling_;\n  envoy::type::v3::FractionalPercent overall_sampling_;\n  Tracing::CustomTagMap custom_tags_;\n};\n\n/**\n * Implementation of InternalRedirectPolicy that reads from the proto\n * InternalRedirectPolicy of the RouteAction.\n */\nclass InternalRedirectPolicyImpl : public InternalRedirectPolicy {\npublic:\n  // Constructor that enables internal redirect with policy_config controlling the configurable\n  // behaviors.\n  explicit InternalRedirectPolicyImpl(\n      const envoy::config::route::v3::InternalRedirectPolicy& policy_config,\n      ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name);\n  // Default constructor that disables internal redirect.\n  InternalRedirectPolicyImpl() = default;\n\n  bool enabled() const override { return enabled_; }\n\n  bool shouldRedirectForResponseCode(const Http::Code& response_code) const override {\n    return redirect_response_codes_.contains(response_code);\n  }\n\n  std::vector<InternalRedirectPredicateSharedPtr> predicates() const override;\n\n  uint32_t maxInternalRedirects() const override { return max_internal_redirects_; }\n\n  bool isCrossSchemeRedirectAllowed() const override { return allow_cross_scheme_redirect_; }\n\nprivate:\n  absl::flat_hash_set<Http::Code> buildRedirectResponseCodes(\n      const envoy::config::route::v3::InternalRedirectPolicy& policy_config) const;\n\n  const std::string current_route_name_;\n  const absl::flat_hash_set<Http::Code> redirect_response_codes_;\n  const uint32_t max_internal_redirects_{1};\n  const bool enabled_{false};\n  const bool allow_cross_scheme_redirect_{false};\n\n  std::vector<std::pair<InternalRedirectPredicateFactory*, ProtobufTypes::MessagePtr>>\n      predicate_factories_;\n};\n\n/**\n * Base implementation for all route entries.\n */\nclass RouteEntryImplBase : public RouteEntry,\n                           public Matchable,\n                           public DirectResponseEntry,\n                           public Route,\n                           public PathMatchCriterion,\n                           public std::enable_shared_from_this<RouteEntryImplBase>,\n                           Logger::Loggable<Logger::Id::router> {\npublic:\n  /**\n   * @throw EnvoyException with reason if the route configuration contains any errors\n   */\n  RouteEntryImplBase(const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n                     Server::Configuration::ServerFactoryContext& factory_context,\n                     ProtobufMessage::ValidationVisitor& validator);\n\n  bool isDirectResponse() const { return direct_response_code_.has_value(); }\n\n  bool isRedirect() const {\n    if (!isDirectResponse()) {\n      return false;\n    }\n    return !host_redirect_.empty() || !path_redirect_.empty() || !prefix_rewrite_redirect_.empty();\n  }\n\n  bool matchRoute(const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info,\n                  uint64_t random_value) const;\n  void validateClusters(Upstream::ClusterManager& cm) const;\n\n  // Router::RouteEntry\n  const std::string& clusterName() const override;\n  Http::Code clusterNotFoundResponseCode() const override {\n    return cluster_not_found_response_code_;\n  }\n  const std::string& routeName() const override { return route_name_; }\n  const CorsPolicy* corsPolicy() const override { return cors_policy_.get(); }\n  void finalizeRequestHeaders(Http::RequestHeaderMap& headers,\n                              const StreamInfo::StreamInfo& stream_info,\n                              bool insert_envoy_original_path) const override;\n  void finalizeResponseHeaders(Http::ResponseHeaderMap& headers,\n                               const StreamInfo::StreamInfo& stream_info) const override;\n  const Http::HashPolicy* hashPolicy() const override { return hash_policy_.get(); }\n\n  const HedgePolicy& hedgePolicy() const override { return hedge_policy_; }\n\n  const MetadataMatchCriteria* metadataMatchCriteria() const override {\n    return metadata_match_criteria_.get();\n  }\n  const TlsContextMatchCriteria* tlsContextMatchCriteria() const override {\n    return tls_context_match_criteria_.get();\n  }\n  Upstream::ResourcePriority priority() const override { return priority_; }\n  const RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; }\n  const RetryPolicy& retryPolicy() const override { return retry_policy_; }\n  const InternalRedirectPolicy& internalRedirectPolicy() const override {\n    return internal_redirect_policy_;\n  }\n  uint32_t retryShadowBufferLimit() const override { return retry_shadow_buffer_limit_; }\n  const std::vector<ShadowPolicyPtr>& shadowPolicies() const override { return shadow_policies_; }\n  const VirtualCluster* virtualCluster(const Http::HeaderMap& headers) const override {\n    return vhost_.virtualClusterFromEntries(headers);\n  }\n  std::chrono::milliseconds timeout() const override { return timeout_; }\n  absl::optional<std::chrono::milliseconds> idleTimeout() const override { return idle_timeout_; }\n  absl::optional<std::chrono::milliseconds> maxStreamDuration() const override {\n    return max_stream_duration_;\n  }\n  absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderMax() const override {\n    return grpc_timeout_header_max_;\n  }\n  absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderOffset() const override {\n    return grpc_timeout_header_offset_;\n  }\n  absl::optional<std::chrono::milliseconds> maxGrpcTimeout() const override {\n    return max_grpc_timeout_;\n  }\n  absl::optional<std::chrono::milliseconds> grpcTimeoutOffset() const override {\n    return grpc_timeout_offset_;\n  }\n  const VirtualHost& virtualHost() const override { return vhost_; }\n  bool autoHostRewrite() const override { return auto_host_rewrite_; }\n  const std::multimap<std::string, std::string>& opaqueConfig() const override {\n    return opaque_config_;\n  }\n  bool includeVirtualHostRateLimits() const override { return include_vh_rate_limits_; }\n  const envoy::config::core::v3::Metadata& metadata() const override { return metadata_; }\n  const Envoy::Config::TypedMetadata& typedMetadata() const override { return typed_metadata_; }\n  const PathMatchCriterion& pathMatchCriterion() const override { return *this; }\n  bool includeAttemptCountInRequest() const override {\n    return vhost_.includeAttemptCountInRequest();\n  }\n  bool includeAttemptCountInResponse() const override {\n    return vhost_.includeAttemptCountInResponse();\n  }\n  const absl::optional<ConnectConfig>& connectConfig() const override { return connect_config_; }\n  const UpgradeMap& upgradeMap() const override { return upgrade_map_; }\n\n  // Router::DirectResponseEntry\n  std::string newPath(const Http::RequestHeaderMap& headers) const override;\n  absl::string_view processRequestHost(const Http::RequestHeaderMap& headers,\n                                       absl::string_view new_scheme,\n                                       absl::string_view new_port) const;\n  void rewritePathHeader(Http::RequestHeaderMap&, bool) const override {}\n  Http::Code responseCode() const override { return direct_response_code_.value(); }\n  const std::string& responseBody() const override { return direct_response_body_; }\n\n  // Router::Route\n  const DirectResponseEntry* directResponseEntry() const override;\n  const RouteEntry* routeEntry() const override;\n  const Decorator* decorator() const override { return decorator_.get(); }\n  const RouteTracing* tracingConfig() const override { return route_tracing_.get(); }\n  const RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override;\n\nprotected:\n  const bool case_sensitive_;\n  const std::string prefix_rewrite_;\n  Regex::CompiledMatcherPtr regex_rewrite_;\n  std::string regex_rewrite_substitution_;\n  const std::string host_rewrite_;\n  bool include_vh_rate_limits_;\n  absl::optional<ConnectConfig> connect_config_;\n\n  RouteConstSharedPtr clusterEntry(const Http::HeaderMap& headers, uint64_t random_value) const;\n\n  /**\n   * returns the correct path rewrite string for this route.\n   */\n  const std::string& getPathRewrite() const {\n    return (isRedirect()) ? prefix_rewrite_redirect_ : prefix_rewrite_;\n  }\n\n  void finalizePathHeader(Http::RequestHeaderMap& headers, absl::string_view matched_path,\n                          bool insert_envoy_original_path) const;\n\nprivate:\n  struct RuntimeData {\n    std::string fractional_runtime_key_{};\n    envoy::type::v3::FractionalPercent fractional_runtime_default_{};\n  };\n\n  class DynamicRouteEntry : public RouteEntry, public Route {\n  public:\n    DynamicRouteEntry(const RouteEntryImplBase* parent, const std::string& name)\n        : parent_(parent), cluster_name_(name) {}\n\n    const std::string& routeName() const override { return parent_->routeName(); }\n    // Router::RouteEntry\n    const std::string& clusterName() const override { return cluster_name_; }\n    Http::Code clusterNotFoundResponseCode() const override {\n      return parent_->clusterNotFoundResponseCode();\n    }\n\n    void finalizeRequestHeaders(Http::RequestHeaderMap& headers,\n                                const StreamInfo::StreamInfo& stream_info,\n                                bool insert_envoy_original_path) const override {\n      return parent_->finalizeRequestHeaders(headers, stream_info, insert_envoy_original_path);\n    }\n    void finalizeResponseHeaders(Http::ResponseHeaderMap& headers,\n                                 const StreamInfo::StreamInfo& stream_info) const override {\n      return parent_->finalizeResponseHeaders(headers, stream_info);\n    }\n\n    const CorsPolicy* corsPolicy() const override { return parent_->corsPolicy(); }\n    const Http::HashPolicy* hashPolicy() const override { return parent_->hashPolicy(); }\n    const HedgePolicy& hedgePolicy() const override { return parent_->hedgePolicy(); }\n    Upstream::ResourcePriority priority() const override { return parent_->priority(); }\n    const RateLimitPolicy& rateLimitPolicy() const override { return parent_->rateLimitPolicy(); }\n    const RetryPolicy& retryPolicy() const override { return parent_->retryPolicy(); }\n    const InternalRedirectPolicy& internalRedirectPolicy() const override {\n      return parent_->internalRedirectPolicy();\n    }\n    uint32_t retryShadowBufferLimit() const override { return parent_->retryShadowBufferLimit(); }\n    const std::vector<ShadowPolicyPtr>& shadowPolicies() const override {\n      return parent_->shadowPolicies();\n    }\n    std::chrono::milliseconds timeout() const override { return parent_->timeout(); }\n    absl::optional<std::chrono::milliseconds> idleTimeout() const override {\n      return parent_->idleTimeout();\n    }\n    absl::optional<std::chrono::milliseconds> maxStreamDuration() const override {\n      return parent_->max_stream_duration_;\n    }\n    absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderMax() const override {\n      return parent_->grpc_timeout_header_max_;\n    }\n    absl::optional<std::chrono::milliseconds> grpcTimeoutHeaderOffset() const override {\n      return parent_->grpc_timeout_header_offset_;\n    }\n    absl::optional<std::chrono::milliseconds> maxGrpcTimeout() const override {\n      return parent_->maxGrpcTimeout();\n    }\n    absl::optional<std::chrono::milliseconds> grpcTimeoutOffset() const override {\n      return parent_->grpcTimeoutOffset();\n    }\n    const MetadataMatchCriteria* metadataMatchCriteria() const override {\n      return parent_->metadataMatchCriteria();\n    }\n    const TlsContextMatchCriteria* tlsContextMatchCriteria() const override {\n      return parent_->tlsContextMatchCriteria();\n    }\n\n    const VirtualCluster* virtualCluster(const Http::HeaderMap& headers) const override {\n      return parent_->virtualCluster(headers);\n    }\n\n    const std::multimap<std::string, std::string>& opaqueConfig() const override {\n      return parent_->opaqueConfig();\n    }\n\n    const VirtualHost& virtualHost() const override { return parent_->virtualHost(); }\n    bool autoHostRewrite() const override { return parent_->autoHostRewrite(); }\n    bool includeVirtualHostRateLimits() const override {\n      return parent_->includeVirtualHostRateLimits();\n    }\n    const envoy::config::core::v3::Metadata& metadata() const override {\n      return parent_->metadata();\n    }\n    const Envoy::Config::TypedMetadata& typedMetadata() const override {\n      return parent_->typedMetadata();\n    }\n    const PathMatchCriterion& pathMatchCriterion() const override {\n      return parent_->pathMatchCriterion();\n    }\n\n    bool includeAttemptCountInRequest() const override {\n      return parent_->includeAttemptCountInRequest();\n    }\n    bool includeAttemptCountInResponse() const override {\n      return parent_->includeAttemptCountInResponse();\n    }\n    const absl::optional<ConnectConfig>& connectConfig() const override {\n      return parent_->connectConfig();\n    }\n    const UpgradeMap& upgradeMap() const override { return parent_->upgradeMap(); }\n\n    // Router::Route\n    const DirectResponseEntry* directResponseEntry() const override { return nullptr; }\n    const RouteEntry* routeEntry() const override { return this; }\n    const Decorator* decorator() const override { return parent_->decorator(); }\n    const RouteTracing* tracingConfig() const override { return parent_->tracingConfig(); }\n\n    const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const override {\n      return parent_->perFilterConfig(name);\n    };\n\n  private:\n    const RouteEntryImplBase* parent_;\n    const std::string cluster_name_;\n  };\n\n  /**\n   * Route entry implementation for weighted clusters. The RouteEntryImplBase object holds\n   * one or more weighted cluster objects, where each object has a back pointer to the parent\n   * RouteEntryImplBase object. Almost all functions in this class forward calls back to the\n   * parent, with the exception of clusterName, routeEntry, and metadataMatchCriteria.\n   */\n  class WeightedClusterEntry : public DynamicRouteEntry {\n  public:\n    WeightedClusterEntry(const RouteEntryImplBase* parent, const std::string& rutime_key,\n                         Server::Configuration::ServerFactoryContext& factory_context,\n                         ProtobufMessage::ValidationVisitor& validator,\n                         const envoy::config::route::v3::WeightedCluster::ClusterWeight& cluster);\n\n    uint64_t clusterWeight() const {\n      return loader_.snapshot().getInteger(runtime_key_, cluster_weight_);\n    }\n\n    const MetadataMatchCriteria* metadataMatchCriteria() const override {\n      if (cluster_metadata_match_criteria_) {\n        return cluster_metadata_match_criteria_.get();\n      }\n      return DynamicRouteEntry::metadataMatchCriteria();\n    }\n\n    void finalizeRequestHeaders(Http::RequestHeaderMap& headers,\n                                const StreamInfo::StreamInfo& stream_info,\n                                bool insert_envoy_original_path) const override {\n      request_headers_parser_->evaluateHeaders(headers, stream_info);\n      DynamicRouteEntry::finalizeRequestHeaders(headers, stream_info, insert_envoy_original_path);\n    }\n    void finalizeResponseHeaders(Http::ResponseHeaderMap& headers,\n                                 const StreamInfo::StreamInfo& stream_info) const override {\n      response_headers_parser_->evaluateHeaders(headers, stream_info);\n      DynamicRouteEntry::finalizeResponseHeaders(headers, stream_info);\n    }\n\n    const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const override;\n\n  private:\n    const std::string runtime_key_;\n    Runtime::Loader& loader_;\n    const uint64_t cluster_weight_;\n    MetadataMatchCriteriaConstPtr cluster_metadata_match_criteria_;\n    HeaderParserPtr request_headers_parser_;\n    HeaderParserPtr response_headers_parser_;\n    PerFilterConfigs per_filter_configs_;\n  };\n\n  using WeightedClusterEntrySharedPtr = std::shared_ptr<WeightedClusterEntry>;\n\n  absl::optional<RuntimeData> loadRuntimeData(const envoy::config::route::v3::RouteMatch& route);\n\n  static std::multimap<std::string, std::string>\n  parseOpaqueConfig(const envoy::config::route::v3::Route& route);\n\n  static DecoratorConstPtr parseDecorator(const envoy::config::route::v3::Route& route);\n\n  static RouteTracingConstPtr parseRouteTracing(const envoy::config::route::v3::Route& route);\n\n  bool evaluateRuntimeMatch(const uint64_t random_value) const;\n\n  bool evaluateTlsContextMatch(const StreamInfo::StreamInfo& stream_info) const;\n\n  HedgePolicyImpl\n  buildHedgePolicy(const absl::optional<envoy::config::route::v3::HedgePolicy>& vhost_hedge_policy,\n                   const envoy::config::route::v3::RouteAction& route_config) const;\n\n  RetryPolicyImpl\n  buildRetryPolicy(const absl::optional<envoy::config::route::v3::RetryPolicy>& vhost_retry_policy,\n                   const envoy::config::route::v3::RouteAction& route_config,\n                   ProtobufMessage::ValidationVisitor& validation_visitor) const;\n\n  InternalRedirectPolicyImpl\n  buildInternalRedirectPolicy(const envoy::config::route::v3::RouteAction& route_config,\n                              ProtobufMessage::ValidationVisitor& validator,\n                              absl::string_view current_route_name) const;\n\n  // Default timeout is 15s if nothing is specified in the route config.\n  static const uint64_t DEFAULT_ROUTE_TIMEOUT_MS = 15000;\n\n  std::unique_ptr<const CorsPolicyImpl> cors_policy_;\n  const VirtualHostImpl& vhost_; // See note in RouteEntryImplBase::clusterEntry() on why raw ref\n                                 // to virtual host is currently safe.\n  const bool auto_host_rewrite_;\n  const absl::optional<Http::LowerCaseString> auto_host_rewrite_header_;\n  const Regex::CompiledMatcherPtr host_rewrite_path_regex_;\n  const std::string host_rewrite_path_regex_substitution_;\n  const std::string cluster_name_;\n  const Http::LowerCaseString cluster_header_name_;\n  const Http::Code cluster_not_found_response_code_;\n  const std::chrono::milliseconds timeout_;\n  const absl::optional<std::chrono::milliseconds> idle_timeout_;\n  const absl::optional<std::chrono::milliseconds> max_stream_duration_;\n  const absl::optional<std::chrono::milliseconds> grpc_timeout_header_max_;\n  const absl::optional<std::chrono::milliseconds> grpc_timeout_header_offset_;\n  const absl::optional<std::chrono::milliseconds> max_grpc_timeout_;\n  const absl::optional<std::chrono::milliseconds> grpc_timeout_offset_;\n  Runtime::Loader& loader_;\n  const absl::optional<RuntimeData> runtime_;\n  const std::string scheme_redirect_;\n  const std::string host_redirect_;\n  const std::string port_redirect_;\n  const std::string path_redirect_;\n  const bool path_redirect_has_query_;\n  const bool enable_preserve_query_in_path_redirects_;\n  const bool https_redirect_;\n  const std::string prefix_rewrite_redirect_;\n  const bool strip_query_;\n  const HedgePolicyImpl hedge_policy_;\n  const RetryPolicyImpl retry_policy_;\n  const InternalRedirectPolicyImpl internal_redirect_policy_;\n  const RateLimitPolicyImpl rate_limit_policy_;\n  std::vector<ShadowPolicyPtr> shadow_policies_;\n  const Upstream::ResourcePriority priority_;\n  std::vector<Http::HeaderUtility::HeaderDataPtr> config_headers_;\n  std::vector<ConfigUtility::QueryParameterMatcherPtr> config_query_parameters_;\n  std::vector<WeightedClusterEntrySharedPtr> weighted_clusters_;\n\n  UpgradeMap upgrade_map_;\n  const uint64_t total_cluster_weight_;\n  std::unique_ptr<const Http::HashPolicyImpl> hash_policy_;\n  MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n  TlsContextMatchCriteriaConstPtr tls_context_match_criteria_;\n  HeaderParserPtr request_headers_parser_;\n  HeaderParserPtr response_headers_parser_;\n  uint32_t retry_shadow_buffer_limit_{std::numeric_limits<uint32_t>::max()};\n  envoy::config::core::v3::Metadata metadata_;\n  Envoy::Config::TypedMetadataImpl<HttpRouteTypedMetadataFactory> typed_metadata_;\n  const bool match_grpc_;\n\n  // TODO(danielhochman): refactor multimap into unordered_map since JSON is unordered map.\n  const std::multimap<std::string, std::string> opaque_config_;\n\n  const DecoratorConstPtr decorator_;\n  const RouteTracingConstPtr route_tracing_;\n  const absl::optional<Http::Code> direct_response_code_;\n  std::string direct_response_body_;\n  PerFilterConfigs per_filter_configs_;\n  const std::string route_name_;\n  TimeSource& time_source_;\n};\n\n/**\n * Route entry implementation for prefix path match routing.\n */\nclass PrefixRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  PrefixRouteEntryImpl(const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n                       Server::Configuration::ServerFactoryContext& factory_context,\n                       ProtobufMessage::ValidationVisitor& validator);\n\n  // Router::PathMatchCriterion\n  const std::string& matcher() const override { return prefix_; }\n  PathMatchType matchType() const override { return PathMatchType::Prefix; }\n\n  // Router::Matchable\n  RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers,\n                              const StreamInfo::StreamInfo& stream_info,\n                              uint64_t random_value) const override;\n\n  // Router::DirectResponseEntry\n  void rewritePathHeader(Http::RequestHeaderMap& headers,\n                         bool insert_envoy_original_path) const override;\n\nprivate:\n  const std::string prefix_;\n  const Matchers::PathMatcherConstSharedPtr path_matcher_;\n};\n\n/**\n * Route entry implementation for exact path match routing.\n */\nclass PathRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  PathRouteEntryImpl(const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n                     Server::Configuration::ServerFactoryContext& factory_context,\n                     ProtobufMessage::ValidationVisitor& validator);\n\n  // Router::PathMatchCriterion\n  const std::string& matcher() const override { return path_; }\n  PathMatchType matchType() const override { return PathMatchType::Exact; }\n\n  // Router::Matchable\n  RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers,\n                              const StreamInfo::StreamInfo& stream_info,\n                              uint64_t random_value) const override;\n\n  // Router::DirectResponseEntry\n  void rewritePathHeader(Http::RequestHeaderMap& headers,\n                         bool insert_envoy_original_path) const override;\n\nprivate:\n  const std::string path_;\n  const Matchers::PathMatcherConstSharedPtr path_matcher_;\n};\n\n/**\n * Route entry implementation for regular expression match routing.\n */\nclass RegexRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  RegexRouteEntryImpl(const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n                      Server::Configuration::ServerFactoryContext& factory_context,\n                      ProtobufMessage::ValidationVisitor& validator);\n\n  // Router::PathMatchCriterion\n  const std::string& matcher() const override { return regex_str_; }\n  PathMatchType matchType() const override { return PathMatchType::Regex; }\n\n  // Router::Matchable\n  RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers,\n                              const StreamInfo::StreamInfo& stream_info,\n                              uint64_t random_value) const override;\n\n  // Router::DirectResponseEntry\n  void rewritePathHeader(Http::RequestHeaderMap& headers,\n                         bool insert_envoy_original_path) const override;\n\nprivate:\n  Regex::CompiledMatcherPtr regex_;\n  std::string regex_str_;\n};\n\n/**\n * Route entry implementation for CONNECT requests.\n */\nclass ConnectRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  ConnectRouteEntryImpl(const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route,\n                        Server::Configuration::ServerFactoryContext& factory_context,\n                        ProtobufMessage::ValidationVisitor& validator);\n\n  // Router::PathMatchCriterion\n  const std::string& matcher() const override { return EMPTY_STRING; }\n  PathMatchType matchType() const override { return PathMatchType::None; }\n\n  // Router::Matchable\n  RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers,\n                              const StreamInfo::StreamInfo& stream_info,\n                              uint64_t random_value) const override;\n\n  // Router::DirectResponseEntry\n  void rewritePathHeader(Http::RequestHeaderMap&, bool) const override;\n\n  bool supportsPathlessHeaders() const override { return true; }\n};\n/**\n * Wraps the route configuration which matches an incoming request headers to a backend cluster.\n * This is split out mainly to help with unit testing.\n */\nclass RouteMatcher {\npublic:\n  RouteMatcher(const envoy::config::route::v3::RouteConfiguration& config,\n               const ConfigImpl& global_http_config,\n               Server::Configuration::ServerFactoryContext& factory_context,\n               ProtobufMessage::ValidationVisitor& validator, bool validate_clusters);\n\n  RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers,\n                            const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const;\n\n  const VirtualHostImpl* findVirtualHost(const Http::RequestHeaderMap& headers) const;\n\nprivate:\n  using WildcardVirtualHosts =\n      std::map<int64_t, absl::node_hash_map<std::string, VirtualHostSharedPtr>, std::greater<>>;\n  using SubstringFunction = std::function<std::string(const std::string&, int)>;\n  const VirtualHostImpl* findWildcardVirtualHost(const std::string& host,\n                                                 const WildcardVirtualHosts& wildcard_virtual_hosts,\n                                                 SubstringFunction substring_function) const;\n\n  Stats::ScopePtr vhost_scope_;\n  absl::node_hash_map<std::string, VirtualHostSharedPtr> virtual_hosts_;\n  // std::greater as a minor optimization to iterate from more to less specific\n  //\n  // A note on using an unordered_map versus a vector of (string, VirtualHostSharedPtr) pairs:\n  //\n  // Based on local benchmarks, each vector entry costs around 20ns for recall and (string)\n  // comparison with a fixed cost of about 25ns. For unordered_map, the empty map costs about 65ns\n  // and climbs to about 110ns once there are any entries.\n  //\n  // The break-even is 4 entries.\n  WildcardVirtualHosts wildcard_virtual_host_suffixes_;\n  WildcardVirtualHosts wildcard_virtual_host_prefixes_;\n\n  VirtualHostSharedPtr default_virtual_host_;\n};\n\n/**\n * Implementation of Config that reads from a proto file.\n */\nclass ConfigImpl : public Config {\npublic:\n  ConfigImpl(const envoy::config::route::v3::RouteConfiguration& config,\n             Server::Configuration::ServerFactoryContext& factory_context,\n             ProtobufMessage::ValidationVisitor& validator, bool validate_clusters_default);\n\n  const HeaderParser& requestHeaderParser() const { return *request_headers_parser_; };\n  const HeaderParser& responseHeaderParser() const { return *response_headers_parser_; };\n\n  bool virtualHostExists(const Http::RequestHeaderMap& headers) const {\n    return route_matcher_->findVirtualHost(headers) != nullptr;\n  }\n\n  // Router::Config\n  RouteConstSharedPtr route(const Http::RequestHeaderMap& headers,\n                            const StreamInfo::StreamInfo& stream_info,\n                            uint64_t random_value) const override {\n    return route(nullptr, headers, stream_info, random_value);\n  }\n\n  RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers,\n                            const StreamInfo::StreamInfo& stream_info,\n                            uint64_t random_value) const override;\n\n  const std::list<Http::LowerCaseString>& internalOnlyHeaders() const override {\n    return internal_only_headers_;\n  }\n\n  const std::string& name() const override { return name_; }\n\n  bool usesVhds() const override { return uses_vhds_; }\n\n  bool mostSpecificHeaderMutationsWins() const override {\n    return most_specific_header_mutations_wins_;\n  }\n\nprivate:\n  std::unique_ptr<RouteMatcher> route_matcher_;\n  std::list<Http::LowerCaseString> internal_only_headers_;\n  HeaderParserPtr request_headers_parser_;\n  HeaderParserPtr response_headers_parser_;\n  const std::string name_;\n  Stats::SymbolTable& symbol_table_;\n  const bool uses_vhds_;\n  const bool most_specific_header_mutations_wins_;\n};\n\n/**\n * Implementation of Config that is empty.\n */\nclass NullConfigImpl : public Config {\npublic:\n  // Router::Config\n  RouteConstSharedPtr route(const Http::RequestHeaderMap&, const StreamInfo::StreamInfo&,\n                            uint64_t) const override {\n    return nullptr;\n  }\n\n  RouteConstSharedPtr route(const RouteCallback&, const Http::RequestHeaderMap&,\n                            const StreamInfo::StreamInfo&, uint64_t) const override {\n    return nullptr;\n  }\n\n  const std::list<Http::LowerCaseString>& internalOnlyHeaders() const override {\n    return internal_only_headers_;\n  }\n\n  const std::string& name() const override { return name_; }\n  bool usesVhds() const override { return false; }\n  bool mostSpecificHeaderMutationsWins() const override { return false; }\n\nprivate:\n  std::list<Http::LowerCaseString> internal_only_headers_;\n  const std::string name_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/config_utility.cc",
    "content": "#include \"common/router/config_utility.h\"\n\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/regex.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nabsl::optional<Matchers::StringMatcherImpl>\nmaybeCreateStringMatcher(const envoy::config::route::v3::QueryParameterMatcher& config) {\n  switch (config.query_parameter_match_specifier_case()) {\n  case envoy::config::route::v3::QueryParameterMatcher::QueryParameterMatchSpecifierCase::\n      kStringMatch: {\n    return Matchers::StringMatcherImpl(config.string_match());\n  }\n  case envoy::config::route::v3::QueryParameterMatcher::QueryParameterMatchSpecifierCase::\n      kPresentMatch: {\n    return absl::nullopt;\n  }\n  case envoy::config::route::v3::QueryParameterMatcher::QueryParameterMatchSpecifierCase::\n      QUERY_PARAMETER_MATCH_SPECIFIER_NOT_SET: {\n    if (config.hidden_envoy_deprecated_value().empty()) {\n      // Present match.\n      return absl::nullopt;\n    }\n\n    envoy::type::matcher::v3::StringMatcher matcher_config;\n    if (config.has_hidden_envoy_deprecated_regex() ? config.hidden_envoy_deprecated_regex().value()\n                                                   : false) {\n      matcher_config.set_hidden_envoy_deprecated_regex(config.hidden_envoy_deprecated_value());\n    } else {\n      matcher_config.set_exact(config.hidden_envoy_deprecated_value());\n    }\n    return Matchers::StringMatcherImpl(matcher_config);\n  }\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE; // Needed for gcc\n}\n\n} // namespace\n\nConfigUtility::QueryParameterMatcher::QueryParameterMatcher(\n    const envoy::config::route::v3::QueryParameterMatcher& config)\n    : name_(config.name()), matcher_(maybeCreateStringMatcher(config)) {}\n\nbool ConfigUtility::QueryParameterMatcher::matches(\n    const Http::Utility::QueryParams& request_query_params) const {\n  auto query_param = request_query_params.find(name_);\n  if (query_param == request_query_params.end()) {\n    return false;\n  } else if (!matcher_.has_value()) {\n    // Present match.\n    return true;\n  } else {\n    return matcher_.value().match(query_param->second);\n  }\n}\n\nUpstream::ResourcePriority\nConfigUtility::parsePriority(const envoy::config::core::v3::RoutingPriority& priority) {\n  switch (priority) {\n  case envoy::config::core::v3::DEFAULT:\n    return Upstream::ResourcePriority::Default;\n  case envoy::config::core::v3::HIGH:\n    return Upstream::ResourcePriority::High;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool ConfigUtility::matchQueryParams(\n    const Http::Utility::QueryParams& query_params,\n    const std::vector<QueryParameterMatcherPtr>& config_query_params) {\n  for (const auto& config_query_param : config_query_params) {\n    if (!config_query_param->matches(query_params)) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\nHttp::Code ConfigUtility::parseRedirectResponseCode(\n    const envoy::config::route::v3::RedirectAction::RedirectResponseCode& code) {\n  switch (code) {\n  case envoy::config::route::v3::RedirectAction::MOVED_PERMANENTLY:\n    return Http::Code::MovedPermanently;\n  case envoy::config::route::v3::RedirectAction::FOUND:\n    return Http::Code::Found;\n  case envoy::config::route::v3::RedirectAction::SEE_OTHER:\n    return Http::Code::SeeOther;\n  case envoy::config::route::v3::RedirectAction::TEMPORARY_REDIRECT:\n    return Http::Code::TemporaryRedirect;\n  case envoy::config::route::v3::RedirectAction::PERMANENT_REDIRECT:\n    return Http::Code::PermanentRedirect;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nabsl::optional<Http::Code>\nConfigUtility::parseDirectResponseCode(const envoy::config::route::v3::Route& route) {\n  if (route.has_redirect()) {\n    return parseRedirectResponseCode(route.redirect().response_code());\n  } else if (route.has_direct_response()) {\n    return static_cast<Http::Code>(route.direct_response().status());\n  }\n  return {};\n}\n\nstd::string ConfigUtility::parseDirectResponseBody(const envoy::config::route::v3::Route& route,\n                                                   Api::Api& api) {\n  static const ssize_t MaxBodySize = 4096;\n  if (!route.has_direct_response() || !route.direct_response().has_body()) {\n    return EMPTY_STRING;\n  }\n  const auto& body = route.direct_response().body();\n  const std::string& filename = body.filename();\n  if (!filename.empty()) {\n    if (!api.fileSystem().fileExists(filename)) {\n      throw EnvoyException(fmt::format(\"response body file {} does not exist\", filename));\n    }\n    const ssize_t size = api.fileSystem().fileSize(filename);\n    if (size < 0) {\n      throw EnvoyException(absl::StrCat(\"cannot determine size of response body file \", filename));\n    }\n    if (size > MaxBodySize) {\n      throw EnvoyException(fmt::format(\"response body file {} size is {} bytes; maximum is {}\",\n                                       filename, size, MaxBodySize));\n    }\n    return api.fileSystem().fileReadToEnd(filename);\n  }\n  const std::string inline_body(body.inline_bytes().empty() ? body.inline_string()\n                                                            : body.inline_bytes());\n  if (inline_body.length() > MaxBodySize) {\n    throw EnvoyException(fmt::format(\"response body size is {} bytes; maximum is {}\",\n                                     inline_body.length(), MaxBodySize));\n  }\n  return inline_body;\n}\n\nHttp::Code ConfigUtility::parseClusterNotFoundResponseCode(\n    const envoy::config::route::v3::RouteAction::ClusterNotFoundResponseCode& code) {\n  switch (code) {\n  case envoy::config::route::v3::RouteAction::SERVICE_UNAVAILABLE:\n    return Http::Code::ServiceUnavailable;\n  case envoy::config::route::v3::RouteAction::NOT_FOUND:\n    return Http::Code::NotFound;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/config_utility.h",
    "content": "#pragma once\n\n#include <cinttypes>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/upstream/resource_manager.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/matchers.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Utility routines for loading route configuration and matching runtime request headers.\n */\nclass ConfigUtility {\npublic:\n  // A QueryParameterMatcher specifies one \"name\" or \"name=value\" element\n  // to match in a request's query string. It is the optimized, runtime\n  // equivalent of the QueryParameterMatcher proto in the RDS v2 API.\n  class QueryParameterMatcher {\n  public:\n    QueryParameterMatcher(const envoy::config::route::v3::QueryParameterMatcher& config);\n\n    /**\n     * Check if the query parameters for a request contain a match for this\n     * QueryParameterMatcher.\n     * @param request_query_params supplies the parsed query parameters from a request.\n     * @return bool true if a match for this QueryParameterMatcher exists in request_query_params.\n     */\n    bool matches(const Http::Utility::QueryParams& request_query_params) const;\n\n  private:\n    const std::string name_;\n    const absl::optional<Matchers::StringMatcherImpl> matcher_;\n  };\n\n  using QueryParameterMatcherPtr = std::unique_ptr<const QueryParameterMatcher>;\n\n  /**\n   * @return the resource priority parsed from proto.\n   */\n  static Upstream::ResourcePriority\n  parsePriority(const envoy::config::core::v3::RoutingPriority& priority);\n\n  /**\n   * See if the query parameters specified in the config are present in a request.\n   * @param query_params supplies the query parameters from the request's query string.\n   * @param config_params supplies the list of configured query param conditions on which to match.\n   * @return bool true if all the query params (and values) in the config_params are found in the\n   *         query_params\n   */\n  static bool matchQueryParams(const Http::Utility::QueryParams& query_params,\n                               const std::vector<QueryParameterMatcherPtr>& config_query_params);\n\n  /**\n   * Returns the redirect HTTP Status Code enum parsed from proto.\n   * @param code supplies the RedirectResponseCode enum.\n   * @return Returns the Http::Code version of the RedirectResponseCode.\n   */\n  static Http::Code parseRedirectResponseCode(\n      const envoy::config::route::v3::RedirectAction::RedirectResponseCode& code);\n\n  /**\n   * Returns the HTTP Status Code enum parsed from the route's redirect or direct_response.\n   * @param route supplies the Route configuration.\n   * @return absl::optional<Http::Code> the HTTP status from the route's direct_response if\n   * specified, or the HTTP status code from the route's redirect if specified, or an empty\n   * absl::optional otherwise.\n   */\n  static absl::optional<Http::Code>\n  parseDirectResponseCode(const envoy::config::route::v3::Route& route);\n\n  /**\n   * Returns the content of the response body to send with direct responses from a route.\n   * @param route supplies the Route configuration.\n   * @param api reference to the Api object\n   * @return absl::optional<std::string> the response body provided inline in the route's\n   *         direct_response if specified, or the contents of the file named in the\n   *         route's direct_response if specified, or an empty string otherwise.\n   * @throw EnvoyException if the route configuration contains an error.\n   */\n  static std::string parseDirectResponseBody(const envoy::config::route::v3::Route& route,\n                                             Api::Api& api);\n\n  /**\n   * Returns the HTTP Status Code enum parsed from proto.\n   * @param code supplies the ClusterNotFoundResponseCode enum.\n   * @return Returns the Http::Code version of the ClusterNotFoundResponseCode enum.\n   */\n  static Http::Code parseClusterNotFoundResponseCode(\n      const envoy::config::route::v3::RouteAction::ClusterNotFoundResponseCode& code);\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/debug_config.cc",
    "content": "#include \"common/router/debug_config.h\"\n\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nDebugConfig::DebugConfig(bool append_cluster, absl::optional<Http::LowerCaseString> cluster_header,\n                         bool append_upstream_host,\n                         absl::optional<Http::LowerCaseString> hostname_header,\n                         absl::optional<Http::LowerCaseString> host_address_header,\n                         bool do_not_forward,\n                         absl::optional<Http::LowerCaseString> not_forwarded_header)\n    : append_cluster_(append_cluster), cluster_header_(std::move(cluster_header)),\n      append_upstream_host_(append_upstream_host), hostname_header_(std::move(hostname_header)),\n      host_address_header_(std::move(host_address_header)), do_not_forward_(do_not_forward),\n      not_forwarded_header_(std::move(not_forwarded_header)) {}\n\nconst std::string& DebugConfig::key() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.router.debug_config\");\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/debug_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Configuration for debugging router behavior. The router tries to look up an instance of this\n * class by its `key`, in FilterState (via StreamInfo). If it's not present, debugging features are\n * not enabled.\n *\n * There is currently no public API for populating this configuration -- neither globally nor\n * per-request -- users desiring to use the router debugging features should create and install\n * their own custom StreamDecoderFilter to set DebugConfig as desired before the router consumes\n * it.\n *\n * This is intended to be temporary, and should be replaced by some proper configuration (e.g. in\n * router.proto) when we get unified matchers. See https://github.com/envoyproxy/envoy/issues/5569.\n *\n * TODO(mergeconflict): Keep this promise.\n */\nstruct DebugConfig : public StreamInfo::FilterState::Object {\n\n  DebugConfig(bool append_cluster, absl::optional<Http::LowerCaseString> cluster_header,\n              bool append_upstream_host, absl::optional<Http::LowerCaseString> hostname_header,\n              absl::optional<Http::LowerCaseString> host_address_header, bool do_not_forward,\n              absl::optional<Http::LowerCaseString> not_forwarded_header);\n\n  /**\n   * @return the string key for finding DebugConfig, if present, in FilterState.\n   */\n  static const std::string& key();\n\n  /**\n   * Append cluster information as a response header if `append_cluster_` is true. The router will\n   * use `cluster_header_` as the header name, if specified, or 'x-envoy-cluster' by default.\n   */\n  bool append_cluster_{};\n  absl::optional<Http::LowerCaseString> cluster_header_;\n\n  /**\n   * Append upstream host name and address as response headers, if `append_upstream_host_` is true.\n   * The router will use `hostname_header_` and `host_address_header_` as the header names, if\n   * specified, or 'x-envoy-upstream-hostname' and 'x-envoy-upstream-host-address' by default.\n   */\n  bool append_upstream_host_{};\n  absl::optional<Http::LowerCaseString> hostname_header_;\n  absl::optional<Http::LowerCaseString> host_address_header_;\n\n  /**\n   * Do not forward the associated request to the upstream cluster, if `do_not_forward_` is true.\n   * If the router would have forwarded it (assuming all other preconditions are met), it will\n   * instead respond with a 204 \"no content.\" Append `not_forwarded_header_`, if specified, or\n   * 'x-envoy-not-forwarded' by default. Any debug headers specified above (or others introduced by\n   * other filters) will be appended to this empty response.\n   */\n  bool do_not_forward_{};\n  absl::optional<Http::LowerCaseString> not_forwarded_header_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/header_formatter.cc",
    "content": "#include \"common/router/header_formatter.h\"\n\n#include <string>\n\n#include \"envoy/router/string_accessor.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/metadata.h\"\n#include \"common/formatter/substitution_formatter.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/stream_info/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nnamespace {\n\nstd::string formatUpstreamMetadataParseException(absl::string_view params,\n                                                 const EnvoyException* cause = nullptr) {\n  std::string reason;\n  if (cause != nullptr) {\n    reason = absl::StrCat(\", because \", cause->what());\n  }\n\n  return absl::StrCat(\"Invalid header configuration. Expected format \"\n                      \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                      \"UPSTREAM_METADATA\",\n                      params, reason);\n}\n\nstd::string formatPerRequestStateParseException(absl::string_view params) {\n  return absl::StrCat(\"Invalid header configuration. Expected format \"\n                      \"PER_REQUEST_STATE(<data_name>), actual format \"\n                      \"PER_REQUEST_STATE\",\n                      params);\n}\n\n// Parses the parameters for UPSTREAM_METADATA and returns a function suitable for accessing the\n// specified metadata from an StreamInfo::StreamInfo. Expects a string formatted as:\n//   ([\"a\", \"b\", \"c\"])\n// There must be at least 2 array elements (a metadata namespace and at least 1 key).\nstd::function<std::string(const Envoy::StreamInfo::StreamInfo&)>\nparseMetadataField(absl::string_view params_str, bool upstream = true) {\n  params_str = StringUtil::trim(params_str);\n  if (params_str.empty() || params_str.front() != '(' || params_str.back() != ')') {\n    throw EnvoyException(formatUpstreamMetadataParseException(params_str));\n  }\n\n  absl::string_view json = params_str.substr(1, params_str.size() - 2); // trim parens\n\n  std::vector<std::string> params;\n  try {\n    Json::ObjectSharedPtr parsed_params = Json::Factory::loadFromString(std::string(json));\n\n    for (const auto& param : parsed_params->asObjectArray()) {\n      params.emplace_back(param->asString());\n    }\n  } catch (Json::Exception& e) {\n    throw EnvoyException(formatUpstreamMetadataParseException(params_str, &e));\n  }\n\n  // Minimum parameters are a metadata namespace (e.g. \"envoy.lb\") and a metadata key.\n  if (params.size() < 2) {\n    throw EnvoyException(formatUpstreamMetadataParseException(params_str));\n  }\n\n  return [upstream, params](const Envoy::StreamInfo::StreamInfo& stream_info) -> std::string {\n    const envoy::config::core::v3::Metadata* metadata = nullptr;\n    if (upstream) {\n      Upstream::HostDescriptionConstSharedPtr host = stream_info.upstreamHost();\n      if (!host) {\n        return std::string();\n      }\n      metadata = host->metadata().get();\n    } else {\n      metadata = &(stream_info.dynamicMetadata());\n    }\n\n    const ProtobufWkt::Value* value =\n        &::Envoy::Config::Metadata::metadataValue(metadata, params[0], params[1]);\n    if (value->kind_case() == ProtobufWkt::Value::KIND_NOT_SET) {\n      // No kind indicates default ProtobufWkt::Value which means namespace or key not\n      // found.\n      return std::string();\n    }\n\n    size_t i = 2;\n    while (i < params.size()) {\n      if (!value->has_struct_value()) {\n        break;\n      }\n\n      const auto field_it = value->struct_value().fields().find(params[i]);\n      if (field_it == value->struct_value().fields().end()) {\n        return std::string();\n      }\n\n      value = &field_it->second;\n      i++;\n    }\n\n    if (i < params.size()) {\n      // Didn't find all the keys.\n      return std::string();\n    }\n\n    switch (value->kind_case()) {\n    case ProtobufWkt::Value::kNumberValue:\n      return fmt::format(\"{:g}\", value->number_value());\n\n    case ProtobufWkt::Value::kStringValue:\n      return value->string_value();\n\n    case ProtobufWkt::Value::kBoolValue:\n      return value->bool_value() ? \"true\" : \"false\";\n\n    default:\n      // Unsupported type or null value.\n      ENVOY_LOG_MISC(debug, \"unsupported value type for metadata [{}]\",\n                     absl::StrJoin(params, \", \"));\n      return std::string();\n    }\n  };\n}\n\n// Parses the parameters for PER_REQUEST_STATE and returns a function suitable for accessing the\n// specified metadata from an StreamInfo::StreamInfo. Expects a string formatted as:\n//   (<state_name>)\n// The state name is expected to be in reverse DNS format, though this is not enforced by\n// this function.\nstd::function<std::string(const Envoy::StreamInfo::StreamInfo&)>\nparsePerRequestStateField(absl::string_view param_str) {\n  absl::string_view modified_param_str = StringUtil::trim(param_str);\n  if (modified_param_str.empty() || modified_param_str.front() != '(' ||\n      modified_param_str.back() != ')') {\n    throw EnvoyException(formatPerRequestStateParseException(param_str));\n  }\n  modified_param_str = modified_param_str.substr(1, modified_param_str.size() - 2); // trim parens\n  if (modified_param_str.empty()) {\n    throw EnvoyException(formatPerRequestStateParseException(param_str));\n  }\n\n  std::string param(modified_param_str);\n  return [param](const Envoy::StreamInfo::StreamInfo& stream_info) -> std::string {\n    const Envoy::StreamInfo::FilterState& filter_state = stream_info.filterState();\n\n    // No such value means don't output anything.\n    if (!filter_state.hasDataWithName(param)) {\n      return std::string();\n    }\n\n    // Value exists but isn't string accessible is a contract violation; throw an error.\n    if (!filter_state.hasData<StringAccessor>(param)) {\n      ENVOY_LOG_MISC(debug,\n                     \"Invalid header information: PER_REQUEST_STATE value \\\"{}\\\" \"\n                     \"exists but is not string accessible\",\n                     param);\n      return std::string();\n    }\n\n    return std::string(filter_state.getDataReadOnly<StringAccessor>(param).asString());\n  };\n}\n\n// Parses the parameter for REQ and returns a function suitable for accessing the specified\n// request header from an StreamInfo::StreamInfo. Expects a string formatted as:\n//   (<header_name>)\nstd::function<std::string(const Envoy::StreamInfo::StreamInfo&)>\nparseRequestHeader(absl::string_view param) {\n  param = StringUtil::trim(param);\n  if (param.empty() || param.front() != '(') {\n    throw EnvoyException(fmt::format(\"Invalid header configuration. Expected format \"\n                                     \"REQ(<header-name>), actual format REQ{}\",\n                                     param));\n  }\n  ASSERT(param.back() == ')');               // Ensured by header_parser.\n  param = param.substr(1, param.size() - 2); // Trim parens.\n  Http::LowerCaseString header_name{std::string(param)};\n  return [header_name](const Envoy::StreamInfo::StreamInfo& stream_info) -> std::string {\n    if (const auto* request_headers = stream_info.getRequestHeaders()) {\n      if (const auto* entry = request_headers->get(header_name)) {\n        return std::string(entry->value().getStringView());\n      }\n    }\n    return std::string();\n  };\n}\n\n// Helper that handles the case when the ConnectionInfo is missing or if the desired value is\n// empty.\nStreamInfoHeaderFormatter::FieldExtractor sslConnectionInfoStringHeaderExtractor(\n    std::function<std::string(const Ssl::ConnectionInfo& connection_info)> string_extractor) {\n  return [string_extractor](const StreamInfo::StreamInfo& stream_info) {\n    if (stream_info.downstreamSslConnection() == nullptr) {\n      return std::string();\n    }\n\n    return string_extractor(*stream_info.downstreamSslConnection());\n  };\n}\n\n// Helper that handles the case when the desired time field is empty.\nStreamInfoHeaderFormatter::FieldExtractor sslConnectionInfoStringTimeHeaderExtractor(\n    std::function<absl::optional<SystemTime>(const Ssl::ConnectionInfo& connection_info)>\n        time_extractor) {\n  return sslConnectionInfoStringHeaderExtractor(\n      [time_extractor](const Ssl::ConnectionInfo& connection_info) {\n        absl::optional<SystemTime> time = time_extractor(connection_info);\n        if (!time.has_value()) {\n          return std::string();\n        }\n\n        return AccessLogDateTimeFormatter::fromTime(time.value());\n      });\n}\n\n} // namespace\n\nStreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_name, bool append)\n    : append_(append) {\n  if (field_name == \"PROTOCOL\") {\n    field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) {\n      return Envoy::Formatter::SubstitutionFormatUtils::protocolToStringOrDefault(\n          stream_info.protocol());\n    };\n  } else if (field_name == \"DOWNSTREAM_REMOTE_ADDRESS\") {\n    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {\n      return stream_info.downstreamRemoteAddress()->asString();\n    };\n  } else if (field_name == \"DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT\") {\n    field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) {\n      return StreamInfo::Utility::formatDownstreamAddressNoPort(\n          *stream_info.downstreamRemoteAddress());\n    };\n  } else if (field_name == \"DOWNSTREAM_LOCAL_ADDRESS\") {\n    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {\n      return stream_info.downstreamLocalAddress()->asString();\n    };\n  } else if (field_name == \"DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT\") {\n    field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) {\n      return StreamInfo::Utility::formatDownstreamAddressNoPort(\n          *stream_info.downstreamLocalAddress());\n    };\n  } else if (field_name == \"DOWNSTREAM_LOCAL_PORT\") {\n    field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) {\n      return StreamInfo::Utility::formatDownstreamAddressJustPort(\n          *stream_info.downstreamLocalAddress());\n    };\n  } else if (field_name == \"DOWNSTREAM_PEER_URI_SAN\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return absl::StrJoin(connection_info.uriSanPeerCertificate(), \",\");\n        });\n  } else if (field_name == \"DOWNSTREAM_LOCAL_URI_SAN\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return absl::StrJoin(connection_info.uriSanLocalCertificate(), \",\");\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_ISSUER\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.issuerPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_SUBJECT\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.subjectPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_LOCAL_SUBJECT\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.subjectLocalCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_TLS_SESSION_ID\") {\n    field_extractor_ = sslConnectionInfoStringHeaderExtractor(\n        [](const Ssl::ConnectionInfo& connection_info) { return connection_info.sessionId(); });\n  } else if (field_name == \"DOWNSTREAM_TLS_CIPHER\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.ciphersuiteString();\n        });\n  } else if (field_name == \"DOWNSTREAM_TLS_VERSION\") {\n    field_extractor_ = sslConnectionInfoStringHeaderExtractor(\n        [](const Ssl::ConnectionInfo& connection_info) { return connection_info.tlsVersion(); });\n  } else if (field_name == \"DOWNSTREAM_PEER_FINGERPRINT_256\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.sha256PeerCertificateDigest();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_FINGERPRINT_1\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.sha1PeerCertificateDigest();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_SERIAL\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.serialNumberPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_CERT\") {\n    field_extractor_ =\n        sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.urlEncodedPemEncodedPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_CERT_V_START\") {\n    field_extractor_ =\n        sslConnectionInfoStringTimeHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.validFromPeerCertificate();\n        });\n  } else if (field_name == \"DOWNSTREAM_PEER_CERT_V_END\") {\n    field_extractor_ =\n        sslConnectionInfoStringTimeHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) {\n          return connection_info.expirationPeerCertificate();\n        });\n  } else if (field_name == \"UPSTREAM_REMOTE_ADDRESS\") {\n    field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) -> std::string {\n      if (stream_info.upstreamHost()) {\n        return stream_info.upstreamHost()->address()->asString();\n      }\n      return \"\";\n    };\n  } else if (absl::StartsWith(field_name, \"START_TIME\")) {\n    const std::string pattern = fmt::format(\"%{}%\", field_name);\n    if (start_time_formatters_.find(pattern) == start_time_formatters_.end()) {\n      start_time_formatters_.emplace(\n          std::make_pair(pattern, Formatter::SubstitutionFormatParser::parse(pattern)));\n    }\n    field_extractor_ = [this, pattern](const Envoy::StreamInfo::StreamInfo& stream_info) {\n      const auto& formatters = start_time_formatters_.at(pattern);\n      std::string formatted;\n      for (const auto& formatter : formatters) {\n        const auto bit = formatter->format(*Http::StaticEmptyHeaders::get().request_headers,\n                                           *Http::StaticEmptyHeaders::get().response_headers,\n                                           *Http::StaticEmptyHeaders::get().response_trailers,\n                                           stream_info, absl::string_view());\n        absl::StrAppend(&formatted, bit.value_or(\"-\"));\n      }\n      return formatted;\n    };\n  } else if (absl::StartsWith(field_name, \"UPSTREAM_METADATA\")) {\n    field_extractor_ = parseMetadataField(field_name.substr(STATIC_STRLEN(\"UPSTREAM_METADATA\")));\n  } else if (absl::StartsWith(field_name, \"DYNAMIC_METADATA\")) {\n    field_extractor_ =\n        parseMetadataField(field_name.substr(STATIC_STRLEN(\"DYNAMIC_METADATA\")), false);\n  } else if (absl::StartsWith(field_name, \"PER_REQUEST_STATE\")) {\n    field_extractor_ =\n        parsePerRequestStateField(field_name.substr(STATIC_STRLEN(\"PER_REQUEST_STATE\")));\n  } else if (absl::StartsWith(field_name, \"REQ\")) {\n    field_extractor_ = parseRequestHeader(field_name.substr(STATIC_STRLEN(\"REQ\")));\n  } else if (field_name == \"HOSTNAME\") {\n    std::string hostname = Envoy::Formatter::SubstitutionFormatUtils::getHostnameOrDefault();\n    field_extractor_ = [hostname](const StreamInfo::StreamInfo&) { return hostname; };\n  } else if (field_name == \"RESPONSE_FLAGS\") {\n    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {\n      return StreamInfo::ResponseFlagUtils::toShortString(stream_info);\n    };\n  } else if (field_name == \"RESPONSE_CODE_DETAILS\") {\n    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) -> std::string {\n      if (stream_info.responseCodeDetails().has_value()) {\n        return stream_info.responseCodeDetails().value();\n      }\n      return \"\";\n    };\n  } else {\n    throw EnvoyException(fmt::format(\"field '{}' not supported as custom header\", field_name));\n  }\n}\n\nconst std::string\nStreamInfoHeaderFormatter::format(const Envoy::StreamInfo::StreamInfo& stream_info) const {\n  return field_extractor_(stream_info);\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/header_formatter.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/formatter/substitution_formatter.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Interface for all types of header formatters used for custom request headers.\n */\nclass HeaderFormatter {\npublic:\n  virtual ~HeaderFormatter() = default;\n\n  virtual const std::string format(const Envoy::StreamInfo::StreamInfo& stream_info) const PURE;\n\n  /**\n   * @return bool indicating whether the formatted header should be appended to the existing\n   *              headers or replace any existing values for the header\n   */\n  virtual bool append() const PURE;\n};\n\nusing HeaderFormatterPtr = std::unique_ptr<HeaderFormatter>;\n\n/**\n * A formatter that expands the request header variable to a value based on info in StreamInfo.\n */\nclass StreamInfoHeaderFormatter : public HeaderFormatter {\npublic:\n  StreamInfoHeaderFormatter(absl::string_view field_name, bool append);\n\n  // HeaderFormatter::format\n  const std::string format(const Envoy::StreamInfo::StreamInfo& stream_info) const override;\n  bool append() const override { return append_; }\n\n  using FieldExtractor = std::function<std::string(const Envoy::StreamInfo::StreamInfo&)>;\n\nprivate:\n  FieldExtractor field_extractor_;\n  const bool append_;\n  absl::node_hash_map<std::string, std::vector<Envoy::Formatter::FormatterProviderPtr>>\n      start_time_formatters_;\n};\n\n/**\n * A formatter that returns back the same static header value.\n */\nclass PlainHeaderFormatter : public HeaderFormatter {\npublic:\n  PlainHeaderFormatter(const std::string& static_header_value, bool append)\n      : static_value_(static_header_value), append_(append) {}\n\n  // HeaderFormatter::format\n  const std::string format(const Envoy::StreamInfo::StreamInfo&) const override {\n    return static_value_;\n  };\n  bool append() const override { return append_; }\n\nprivate:\n  const std::string static_value_;\n  const bool append_;\n};\n\n/**\n * A formatter that produces a value by concatenating the results of multiple HeaderFormatters.\n */\nclass CompoundHeaderFormatter : public HeaderFormatter {\npublic:\n  CompoundHeaderFormatter(std::vector<HeaderFormatterPtr>&& formatters, bool append)\n      : formatters_(std::move(formatters)), append_(append) {}\n\n  // HeaderFormatter::format\n  const std::string format(const Envoy::StreamInfo::StreamInfo& stream_info) const override {\n    std::string buf;\n    for (const auto& formatter : formatters_) {\n      buf += formatter->format(stream_info);\n    }\n    return buf;\n  };\n  bool append() const override { return append_; }\n\nprivate:\n  const std::vector<HeaderFormatterPtr> formatters_;\n  const bool append_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/header_parser.cc",
    "content": "#include \"common/router/header_parser.h\"\n\n#include <cctype>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/headers.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_replace.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nnamespace {\n\nenum class ParserState {\n  Literal,                   // processing literal data\n  VariableName,              // consuming a %VAR% name\n  ExpectArray,               // expect starting [ in %VAR([...])%\n  ExpectString,              // expect starting \" in array of strings\n  String,                    // consuming an array element string\n  ExpectArrayDelimiterOrEnd, // expect array delimiter (,) or end of array (])\n  ExpectArgsEnd,             // expect closing ) in %VAR(...)%\n  ExpectVariableEnd          // expect closing % in %VAR(...)%\n};\n\nstd::string unescape(absl::string_view sv) { return absl::StrReplaceAll(sv, {{\"%%\", \"%\"}}); }\n\n// Implements a state machine to parse custom headers. Each character of the custom header format\n// is either literal text (with % escaped as %%) or part of a %VAR% or %VAR([\"args\"])% expression.\n// The statement machine does minimal validation of the arguments (if any) and does not know the\n// names of valid variables. Interpretation of the variable name and arguments is delegated to\n// StreamInfoHeaderFormatter.\nHeaderFormatterPtr parseInternal(const envoy::config::core::v3::HeaderValue& header_value,\n                                 bool append) {\n  const std::string& key = header_value.key();\n  // PGV constraints provide this guarantee.\n  ASSERT(!key.empty());\n  // We reject :path/:authority rewriting, there is already a well defined mechanism to\n  // perform this in the RouteAction, and doing this via request_headers_to_add\n  // will cause us to have to worry about interaction with other aspects of the\n  // RouteAction, e.g. prefix rewriting. We also reject other :-prefixed\n  // headers, since it seems dangerous and there doesn't appear a use case.\n  if (key[0] == ':') {\n    throw EnvoyException(\":-prefixed headers may not be modified\");\n  }\n\n  absl::string_view format(header_value.value());\n  if (format.empty()) {\n    return std::make_unique<PlainHeaderFormatter>(\"\", append);\n  }\n\n  std::vector<HeaderFormatterPtr> formatters;\n\n  size_t pos = 0, start = 0;\n  ParserState state = ParserState::Literal;\n  do {\n    const char ch = format[pos];\n    const bool has_next_ch = (pos + 1) < format.size();\n\n    switch (state) {\n    case ParserState::Literal:\n      // Searching for start of %VARIABLE% expression.\n      if (ch != '%') {\n        break;\n      }\n\n      if (!has_next_ch) {\n        throw EnvoyException(\n            fmt::format(\"Invalid header configuration. Un-escaped % at position {}\", pos));\n      }\n\n      if (format[pos + 1] == '%') {\n        // Escaped %, skip next character.\n        pos++;\n        break;\n      }\n\n      // Un-escaped %: start of variable name. Create a formatter for preceding characters, if\n      // any.\n      state = ParserState::VariableName;\n      if (pos > start) {\n        absl::string_view literal = format.substr(start, pos - start);\n        formatters.emplace_back(new PlainHeaderFormatter(unescape(literal), append));\n      }\n      start = pos + 1;\n      break;\n\n    case ParserState::VariableName:\n      // Consume \"VAR\" from \"%VAR%\" or \"%VAR(...)%\"\n      if (ch == '%') {\n        // Found complete variable name, add formatter.\n        formatters.emplace_back(\n            new StreamInfoHeaderFormatter(format.substr(start, pos - start), append));\n        start = pos + 1;\n        state = ParserState::Literal;\n        break;\n      }\n\n      if (ch == '(') {\n        // Variable with arguments, search for start of arg array.\n        state = ParserState::ExpectArray;\n      }\n      break;\n\n    case ParserState::ExpectArray:\n      // Skip over whitespace searching for the start of JSON array args.\n      if (ch == '[') {\n        // Search for first argument string\n        state = ParserState::ExpectString;\n      } else if (!isspace(ch)) {\n        // Consume it as a string argument.\n        state = ParserState::String;\n      }\n      break;\n\n    case ParserState::ExpectArrayDelimiterOrEnd:\n      // Skip over whitespace searching for a comma or close bracket.\n      if (ch == ',') {\n        state = ParserState::ExpectString;\n      } else if (ch == ']') {\n        state = ParserState::ExpectArgsEnd;\n      } else if (!isspace(ch)) {\n        throw EnvoyException(fmt::format(\n            \"Invalid header configuration. Expecting ',', ']', or whitespace after '{}', but \"\n            \"found '{}'\",\n            absl::StrCat(format.substr(start, pos - start)), ch));\n      }\n      break;\n\n    case ParserState::ExpectString:\n      // Skip over whitespace looking for the starting quote of a JSON string.\n      if (ch == '\"') {\n        state = ParserState::String;\n      } else if (!isspace(ch)) {\n        throw EnvoyException(fmt::format(\n            \"Invalid header configuration. Expecting '\\\"' or whitespace after '{}', but found '{}'\",\n            absl::StrCat(format.substr(start, pos - start)), ch));\n      }\n      break;\n\n    case ParserState::String:\n      // Consume a JSON string (ignoring backslash-escaped chars).\n      if (ch == '\\\\') {\n        if (!has_next_ch) {\n          throw EnvoyException(fmt::format(\n              \"Invalid header configuration. Un-terminated backslash in JSON string after '{}'\",\n              absl::StrCat(format.substr(start, pos - start))));\n        }\n\n        // Skip escaped char.\n        pos++;\n      } else if (ch == ')') {\n        state = ParserState::ExpectVariableEnd;\n      } else if (ch == '\"') {\n        state = ParserState::ExpectArrayDelimiterOrEnd;\n      }\n      break;\n\n    case ParserState::ExpectArgsEnd:\n      // Search for the closing paren of a %VAR(...)% expression.\n      if (ch == ')') {\n        state = ParserState::ExpectVariableEnd;\n      } else if (!isspace(ch)) {\n        throw EnvoyException(fmt::format(\n            \"Invalid header configuration. Expecting ')' or whitespace after '{}', but found '{}'\",\n            absl::StrCat(format.substr(start, pos - start)), ch));\n      }\n      break;\n\n    case ParserState::ExpectVariableEnd:\n      // Search for closing % of a %VAR(...)% expression\n      if (ch == '%') {\n        formatters.emplace_back(\n            new StreamInfoHeaderFormatter(format.substr(start, pos - start), append));\n        start = pos + 1;\n        state = ParserState::Literal;\n        break;\n      }\n\n      if (!isspace(ch)) {\n        throw EnvoyException(fmt::format(\n            \"Invalid header configuration. Expecting '%' or whitespace after '{}', but found '{}'\",\n            absl::StrCat(format.substr(start, pos - start)), ch));\n      }\n      break;\n\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  } while (++pos < format.size());\n\n  if (state != ParserState::Literal) {\n    // Parsing terminated mid-variable.\n    throw EnvoyException(\n        fmt::format(\"Invalid header configuration. Un-terminated variable expression '{}'\",\n                    absl::StrCat(format.substr(start, pos - start))));\n  }\n\n  if (pos > start) {\n    // Trailing constant data.\n    absl::string_view literal = format.substr(start, pos - start);\n    formatters.emplace_back(new PlainHeaderFormatter(unescape(literal), append));\n  }\n\n  ASSERT(!formatters.empty());\n\n  if (formatters.size() == 1) {\n    return std::move(formatters[0]);\n  }\n\n  return std::make_unique<CompoundHeaderFormatter>(std::move(formatters), append);\n}\n\n} // namespace\n\nHeaderParserPtr HeaderParser::configure(\n    const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>& headers_to_add) {\n  HeaderParserPtr header_parser(new HeaderParser());\n\n  for (const auto& header_value_option : headers_to_add) {\n    const bool append = PROTOBUF_GET_WRAPPED_OR_DEFAULT(header_value_option, append, true);\n    HeaderFormatterPtr header_formatter = parseInternal(header_value_option.header(), append);\n\n    header_parser->headers_to_add_.emplace_back(\n        Http::LowerCaseString(header_value_option.header().key()), std::move(header_formatter));\n  }\n\n  return header_parser;\n}\n\nHeaderParserPtr HeaderParser::configure(\n    const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValue>& headers_to_add,\n    bool append) {\n  HeaderParserPtr header_parser(new HeaderParser());\n\n  for (const auto& header_value : headers_to_add) {\n    HeaderFormatterPtr header_formatter = parseInternal(header_value, append);\n\n    header_parser->headers_to_add_.emplace_back(Http::LowerCaseString(header_value.key()),\n                                                std::move(header_formatter));\n  }\n\n  return header_parser;\n}\n\nHeaderParserPtr HeaderParser::configure(\n    const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>& headers_to_add,\n    const Protobuf::RepeatedPtrField<std::string>& headers_to_remove) {\n  HeaderParserPtr header_parser = configure(headers_to_add);\n\n  for (const auto& header : headers_to_remove) {\n    // We reject :-prefix (e.g. :path) removal here. This is dangerous, since other aspects of\n    // request finalization assume their existence and they are needed for well-formedness in most\n    // cases.\n    if (header[0] == ':' || Http::LowerCaseString(header).get() == \"host\") {\n      throw EnvoyException(\":-prefixed or host headers may not be removed\");\n    }\n    header_parser->headers_to_remove_.emplace_back(header);\n  }\n\n  return header_parser;\n}\n\nvoid HeaderParser::evaluateHeaders(Http::HeaderMap& headers,\n                                   const StreamInfo::StreamInfo& stream_info) const {\n  // Removing headers in the headers_to_remove_ list first makes\n  // remove-before-add the default behavior as expected by users.\n  for (const auto& header : headers_to_remove_) {\n    headers.remove(header);\n  }\n\n  for (const auto& formatter : headers_to_add_) {\n    const std::string value = formatter.second->format(stream_info);\n    if (!value.empty()) {\n      if (formatter.second->append()) {\n        headers.addReferenceKey(formatter.first, value);\n      } else {\n        headers.setReferenceKey(formatter.first, value);\n      }\n    }\n  }\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/header_parser.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/router/header_formatter.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nclass HeaderParser;\nusing HeaderParserPtr = std::unique_ptr<HeaderParser>;\n\n/**\n * HeaderParser manipulates Http::HeaderMap instances. Headers to be added are pre-parsed to select\n * between a constant value implementation and a dynamic value implementation based on\n * StreamInfo::StreamInfo fields.\n */\nclass HeaderParser {\npublic:\n  /*\n   * @param headers_to_add defines the headers to add during calls to evaluateHeaders\n   * @return HeaderParserPtr a configured HeaderParserPtr\n   */\n  static HeaderParserPtr configure(\n      const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>& headers_to_add);\n\n  /*\n   * @param headers_to_add defines headers to add during calls to evaluateHeaders.\n   * @param append defines whether headers will be appended or replaced.\n   * @return HeaderParserPtr a configured HeaderParserPtr.\n   */\n  static HeaderParserPtr\n  configure(const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValue>& headers_to_add,\n            bool append);\n\n  /*\n   * @param headers_to_add defines headers to add during calls to evaluateHeaders\n   * @param headers_to_remove defines headers to remove during calls to evaluateHeaders\n   * @return HeaderParserPtr a configured HeaderParserPtr\n   */\n  static HeaderParserPtr configure(\n      const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>& headers_to_add,\n      const Protobuf::RepeatedPtrField<std::string>& headers_to_remove);\n\n  void evaluateHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info) const;\n\nprotected:\n  HeaderParser() = default;\n\nprivate:\n  std::vector<std::pair<Http::LowerCaseString, HeaderFormatterPtr>> headers_to_add_;\n  std::vector<Http::LowerCaseString> headers_to_remove_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/metadatamatchcriteria_impl.cc",
    "content": "#include \"common/router/metadatamatchcriteria_impl.h\"\n\nnamespace Envoy {\nnamespace Router {\nstd::vector<MetadataMatchCriterionConstSharedPtr>\nMetadataMatchCriteriaImpl::extractMetadataMatchCriteria(const MetadataMatchCriteriaImpl* parent,\n                                                        const ProtobufWkt::Struct& matches) {\n  std::vector<MetadataMatchCriterionConstSharedPtr> v;\n\n  // Track locations of each name (from the parent) in v to make it\n  // easier to replace them when the same name exists in matches.\n  absl::node_hash_map<std::string, std::size_t> existing;\n\n  if (parent) {\n    for (const auto& it : parent->metadata_match_criteria_) {\n      // v.size() is the index of the emplaced name.\n      existing.emplace(it->name(), v.size());\n      v.emplace_back(it);\n    }\n  }\n\n  // Add values from matches, replacing name/values copied from parent.\n  for (const auto& it : matches.fields()) {\n    const auto index_it = existing.find(it.first);\n    if (index_it != existing.end()) {\n      v[index_it->second] = std::make_shared<MetadataMatchCriterionImpl>(it.first, it.second);\n    } else {\n      v.emplace_back(std::make_shared<MetadataMatchCriterionImpl>(it.first, it.second));\n    }\n  }\n\n  // Sort criteria by name to speed matching in the subset load balancer.\n  // See source/docs/subset_load_balancer.md.\n  std::sort(\n      v.begin(), v.end(),\n      [](const MetadataMatchCriterionConstSharedPtr& a,\n         const MetadataMatchCriterionConstSharedPtr& b) -> bool { return a->name() < b->name(); });\n\n  return v;\n}\n\nMetadataMatchCriteriaConstPtr\nMetadataMatchCriteriaImpl::filterMatchCriteria(const std::set<std::string>& names) const {\n\n  std::vector<MetadataMatchCriterionConstSharedPtr> v;\n\n  // iterating over metadata_match_criteria_ ensures correct order without sorting\n  for (const auto& it : metadata_match_criteria_) {\n    if (names.count(it->name()) == 1) {\n      v.emplace_back(it);\n    }\n  }\n  if (v.empty()) {\n    return nullptr;\n  }\n  return MetadataMatchCriteriaImplConstPtr(new MetadataMatchCriteriaImpl(v));\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/metadatamatchcriteria_impl.h",
    "content": "#pragma once\n\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nclass MetadataMatchCriteriaImpl;\nusing MetadataMatchCriteriaImplConstPtr = std::unique_ptr<const MetadataMatchCriteriaImpl>;\n\nclass MetadataMatchCriteriaImpl : public MetadataMatchCriteria {\npublic:\n  MetadataMatchCriteriaImpl(const ProtobufWkt::Struct& metadata_matches)\n      : metadata_match_criteria_(extractMetadataMatchCriteria(nullptr, metadata_matches)){};\n\n  // MetadataMatchCriteria\n  const std::vector<MetadataMatchCriterionConstSharedPtr>& metadataMatchCriteria() const override {\n    return metadata_match_criteria_;\n  }\n\n  MetadataMatchCriteriaConstPtr\n  mergeMatchCriteria(const ProtobufWkt::Struct& metadata_matches) const override {\n    return MetadataMatchCriteriaImplConstPtr(\n        new MetadataMatchCriteriaImpl(extractMetadataMatchCriteria(this, metadata_matches)));\n  }\n\n  MetadataMatchCriteriaConstPtr\n  filterMatchCriteria(const std::set<std::string>& names) const override;\n\nprivate:\n  MetadataMatchCriteriaImpl(const std::vector<MetadataMatchCriterionConstSharedPtr>& criteria)\n      : metadata_match_criteria_(criteria){};\n\n  static std::vector<MetadataMatchCriterionConstSharedPtr>\n  extractMetadataMatchCriteria(const MetadataMatchCriteriaImpl* parent,\n                               const ProtobufWkt::Struct& metadata_matches);\n\n  const std::vector<MetadataMatchCriterionConstSharedPtr> metadata_match_criteria_;\n};\n\nclass MetadataMatchCriterionImpl : public MetadataMatchCriterion {\npublic:\n  MetadataMatchCriterionImpl(const std::string& name, const HashedValue& value)\n      : name_(name), value_(value) {}\n\n  const std::string& name() const override { return name_; }\n  const HashedValue& value() const override { return value_; }\n\nprivate:\n  const std::string name_;\n  const HashedValue value_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/rds_impl.cc",
    "content": "#include \"common/router/rds_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/api/v2/route.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/config_impl.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nRouteConfigProviderSharedPtr RouteConfigProviderUtil::create(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        config,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator, Init::Manager& init_manager,\n    const std::string& stat_prefix, RouteConfigProviderManager& route_config_provider_manager) {\n  switch (config.route_specifier_case()) {\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      RouteSpecifierCase::kRouteConfig:\n    return route_config_provider_manager.createStaticRouteConfigProvider(\n        config.route_config(), factory_context, validator);\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      RouteSpecifierCase::kRds:\n    return route_config_provider_manager.createRdsRouteConfigProvider(\n        // At the creation of a RDS route config provider, the factory_context's initManager is\n        // always valid, though the init manager may go away later when the listener goes away.\n        config.rds(), factory_context, stat_prefix, init_manager);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nStaticRouteConfigProviderImpl::StaticRouteConfigProviderImpl(\n    const envoy::config::route::v3::RouteConfiguration& config,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator,\n    RouteConfigProviderManagerImpl& route_config_provider_manager)\n    : config_(new ConfigImpl(config, factory_context, validator, true)),\n      route_config_proto_{config}, last_updated_(factory_context.timeSource().systemTime()),\n      route_config_provider_manager_(route_config_provider_manager) {\n  route_config_provider_manager_.static_route_config_providers_.insert(this);\n}\n\nStaticRouteConfigProviderImpl::~StaticRouteConfigProviderImpl() {\n  route_config_provider_manager_.static_route_config_providers_.erase(this);\n}\n\n// TODO(htuch): If support for multiple clusters is added per #1170 cluster_name_\nRdsRouteConfigSubscription::RdsRouteConfigSubscription(\n    const envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n    const uint64_t manager_identifier, Server::Configuration::ServerFactoryContext& factory_context,\n    const std::string& stat_prefix,\n    Envoy::Router::RouteConfigProviderManagerImpl& route_config_provider_manager)\n    : Envoy::Config::SubscriptionBase<envoy::config::route::v3::RouteConfiguration>(\n          rds.config_source().resource_api_version(),\n          factory_context.messageValidationContext().dynamicValidationVisitor(), \"name\"),\n      route_config_name_(rds.route_config_name()),\n      scope_(factory_context.scope().createScope(stat_prefix + \"rds.\" + route_config_name_ + \".\")),\n      factory_context_(factory_context),\n      parent_init_target_(fmt::format(\"RdsRouteConfigSubscription init {}\", route_config_name_),\n                          [this]() { local_init_manager_.initialize(local_init_watcher_); }),\n      local_init_watcher_(fmt::format(\"RDS local-init-watcher {}\", rds.route_config_name()),\n                          [this]() { parent_init_target_.ready(); }),\n      local_init_target_(\n          fmt::format(\"RdsRouteConfigSubscription local-init-target {}\", route_config_name_),\n          [this]() { subscription_->start({route_config_name_}); }),\n      local_init_manager_(fmt::format(\"RDS local-init-manager {}\", route_config_name_)),\n      stat_prefix_(stat_prefix), stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}),\n      route_config_provider_manager_(route_config_provider_manager),\n      manager_identifier_(manager_identifier) {\n  const auto resource_name = getResourceName();\n  subscription_ =\n      factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource(\n          rds.config_source(), Grpc::Common::typeUrl(resource_name), *scope_, *this,\n          resource_decoder_);\n  local_init_manager_.add(local_init_target_);\n  config_update_info_ =\n      std::make_unique<RouteConfigUpdateReceiverImpl>(factory_context.timeSource());\n}\n\nRdsRouteConfigSubscription::~RdsRouteConfigSubscription() {\n  // If we get destroyed during initialization, make sure we signal that we \"initialized\".\n  local_init_target_.ready();\n\n  // The ownership of RdsRouteConfigProviderImpl is shared among all HttpConnectionManagers that\n  // hold a shared_ptr to it. The RouteConfigProviderManager holds weak_ptrs to the\n  // RdsRouteConfigProviders. Therefore, the map entry for the RdsRouteConfigProvider has to get\n  // cleaned by the RdsRouteConfigProvider's destructor.\n  route_config_provider_manager_.dynamic_route_config_providers_.erase(manager_identifier_);\n}\n\nvoid RdsRouteConfigSubscription::onConfigUpdate(\n    const std::vector<Envoy::Config::DecodedResourceRef>& resources,\n    const std::string& version_info) {\n  if (!validateUpdateSize(resources.size())) {\n    return;\n  }\n  const auto& route_config = dynamic_cast<const envoy::config::route::v3::RouteConfiguration&>(\n      resources[0].get().resource());\n  if (route_config.name() != route_config_name_) {\n    throw EnvoyException(fmt::format(\"Unexpected RDS configuration (expecting {}): {}\",\n                                     route_config_name_, route_config.name()));\n  }\n  for (auto* provider : route_config_providers_) {\n    // This seems inefficient, though it is necessary to validate config in each context,\n    // especially when it comes with per_filter_config,\n    provider->validateConfig(route_config);\n  }\n  std::unique_ptr<Init::ManagerImpl> noop_init_manager;\n  std::unique_ptr<Cleanup> resume_rds;\n  if (config_update_info_->onRdsUpdate(route_config, version_info)) {\n    stats_.config_reload_.inc();\n    if (config_update_info_->routeConfiguration().has_vhds() &&\n        config_update_info_->vhdsConfigurationChanged()) {\n      ENVOY_LOG(\n          debug,\n          \"rds: vhds configuration present/changed, (re)starting vhds: config_name={} hash={}\",\n          route_config_name_, config_update_info_->configHash());\n      maybeCreateInitManager(version_info, noop_init_manager, resume_rds);\n      vhds_subscription_ = std::make_unique<VhdsSubscription>(\n          config_update_info_, factory_context_, stat_prefix_, route_config_providers_,\n          config_update_info_->routeConfiguration().vhds().config_source().resource_api_version());\n      vhds_subscription_->registerInitTargetWithInitManager(\n          noop_init_manager == nullptr ? local_init_manager_ : *noop_init_manager);\n    }\n\n    ENVOY_LOG(debug, \"rds: loading new configuration: config_name={} hash={}\", route_config_name_,\n              config_update_info_->configHash());\n\n    for (auto* provider : route_config_providers_) {\n      provider->onConfigUpdate();\n    }\n    // RDS update removed VHDS configuration\n    if (!config_update_info_->routeConfiguration().has_vhds()) {\n      vhds_subscription_.release();\n    }\n\n    update_callback_manager_.runCallbacks();\n  }\n\n  local_init_target_.ready();\n}\n\n// Initialize a no-op InitManager in case the one in the factory_context has completed\n// initialization. This can happen if an RDS config update for an already established RDS\n// subscription contains VHDS configuration.\nvoid RdsRouteConfigSubscription::maybeCreateInitManager(\n    const std::string& version_info, std::unique_ptr<Init::ManagerImpl>& init_manager,\n    std::unique_ptr<Cleanup>& init_vhds) {\n  if (local_init_manager_.state() == Init::Manager::State::Initialized) {\n    init_manager = std::make_unique<Init::ManagerImpl>(\n        fmt::format(\"VHDS {}:{}\", route_config_name_, version_info));\n    init_vhds = std::make_unique<Cleanup>([this, &init_manager, version_info] {\n      // For new RDS subscriptions created after listener warming up, we don't wait for them to warm\n      // up.\n      Init::WatcherImpl noop_watcher(\n          // Note: we just throw it away.\n          fmt::format(\"VHDS ConfigUpdate watcher {}:{}\", route_config_name_, version_info),\n          []() { /*Do nothing.*/ });\n      init_manager->initialize(noop_watcher);\n    });\n  }\n}\n\nvoid RdsRouteConfigSubscription::onConfigUpdate(\n    const std::vector<Envoy::Config::DecodedResourceRef>& added_resources,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources, const std::string&) {\n  if (!removed_resources.empty()) {\n    // TODO(#2500) when on-demand resource loading is supported, an RDS removal may make sense\n    // (see discussion in #6879), and so we should do something other than ignoring here.\n    ENVOY_LOG(\n        error,\n        \"Server sent a delta RDS update attempting to remove a resource (name: {}). Ignoring.\",\n        removed_resources[0]);\n  }\n  if (!added_resources.empty()) {\n    onConfigUpdate(added_resources, added_resources[0].get().version());\n  }\n}\n\nvoid RdsRouteConfigSubscription::onConfigUpdateFailed(\n    Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) {\n  ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n  // We need to allow server startup to continue, even if we have a bad\n  // config.\n  local_init_target_.ready();\n}\n\nvoid RdsRouteConfigSubscription::updateOnDemand(const std::string& aliases) {\n  if (vhds_subscription_.get() == nullptr) {\n    return;\n  }\n  vhds_subscription_->updateOnDemand(aliases);\n}\n\nbool RdsRouteConfigSubscription::validateUpdateSize(int num_resources) {\n  if (num_resources == 0) {\n    ENVOY_LOG(debug, \"Missing RouteConfiguration for {} in onConfigUpdate()\", route_config_name_);\n    stats_.update_empty_.inc();\n    local_init_target_.ready();\n    return false;\n  }\n  if (num_resources != 1) {\n    throw EnvoyException(fmt::format(\"Unexpected RDS resource length: {}\", num_resources));\n    // (would be a return false here)\n  }\n  return true;\n}\n\nRdsRouteConfigProviderImpl::RdsRouteConfigProviderImpl(\n    RdsRouteConfigSubscriptionSharedPtr&& subscription,\n    Server::Configuration::ServerFactoryContext& factory_context)\n    : subscription_(std::move(subscription)),\n      config_update_info_(subscription_->routeConfigUpdate()), factory_context_(factory_context),\n      validator_(factory_context.messageValidationContext().dynamicValidationVisitor()),\n      tls_(factory_context.threadLocal().allocateSlot()) {\n  ConfigConstSharedPtr initial_config;\n  if (config_update_info_->configInfo().has_value()) {\n    initial_config = std::make_shared<ConfigImpl>(config_update_info_->routeConfiguration(),\n                                                  factory_context_, validator_, false);\n  } else {\n    initial_config = std::make_shared<NullConfigImpl>();\n  }\n  tls_->set([initial_config](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<ThreadLocalConfig>(initial_config);\n  });\n  // It should be 1:1 mapping due to shared rds config.\n  ASSERT(subscription_->routeConfigProviders().empty());\n  subscription_->routeConfigProviders().insert(this);\n}\n\nRdsRouteConfigProviderImpl::~RdsRouteConfigProviderImpl() {\n  subscription_->routeConfigProviders().erase(this);\n  // It should be 1:1 mapping due to shared rds config.\n  ASSERT(subscription_->routeConfigProviders().empty());\n}\n\nRouter::ConfigConstSharedPtr RdsRouteConfigProviderImpl::config() {\n  return tls_->getTyped<ThreadLocalConfig>().config_;\n}\n\nvoid RdsRouteConfigProviderImpl::onConfigUpdate() {\n  ConfigConstSharedPtr new_config(new ConfigImpl(config_update_info_->routeConfiguration(),\n                                                 factory_context_, validator_, false));\n  tls_->runOnAllThreads([new_config](ThreadLocal::ThreadLocalObjectSharedPtr previous)\n                            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    auto prev_config = std::dynamic_pointer_cast<ThreadLocalConfig>(previous);\n    prev_config->config_ = new_config;\n    return previous;\n  });\n\n  const auto aliases = config_update_info_->resourceIdsInLastVhdsUpdate();\n  // Regular (non-VHDS) RDS updates don't populate aliases fields in resources.\n  if (aliases.empty()) {\n    return;\n  }\n\n  const auto config = std::static_pointer_cast<const ConfigImpl>(new_config);\n  // Notifies connections that RouteConfiguration update has been propagated.\n  // Callbacks processing is performed in FIFO order. The callback is skipped if alias used in\n  // the VHDS update request do not match the aliases in the update response\n  for (auto it = config_update_callbacks_.begin(); it != config_update_callbacks_.end();) {\n    auto found = aliases.find(it->alias_);\n    if (found != aliases.end()) {\n      // TODO(dmitri-d) HeaderMapImpl is expensive, need to profile this\n      auto host_header = Http::RequestHeaderMapImpl::create();\n      host_header->setHost(VhdsSubscription::aliasToDomainName(it->alias_));\n      const bool host_exists = config->virtualHostExists(*host_header);\n      std::weak_ptr<Http::RouteConfigUpdatedCallback> current_cb(it->cb_);\n      it->thread_local_dispatcher_.post([current_cb, host_exists] {\n        if (auto cb = current_cb.lock()) {\n          (*cb)(host_exists);\n        }\n      });\n      it = config_update_callbacks_.erase(it);\n    } else {\n      it++;\n    }\n  }\n}\n\nvoid RdsRouteConfigProviderImpl::validateConfig(\n    const envoy::config::route::v3::RouteConfiguration& config) const {\n  // TODO(lizan): consider cache the config here until onConfigUpdate.\n  ConfigImpl validation_config(config, factory_context_, validator_, false);\n}\n\n// Schedules a VHDS request on the main thread and queues up the callback to use when the VHDS\n// response has been propagated to the worker thread that was the request origin.\nvoid RdsRouteConfigProviderImpl::requestVirtualHostsUpdate(\n    const std::string& for_domain, Event::Dispatcher& thread_local_dispatcher,\n    std::weak_ptr<Http::RouteConfigUpdatedCallback> route_config_updated_cb) {\n  auto alias =\n      VhdsSubscription::domainNameToAlias(config_update_info_->routeConfigName(), for_domain);\n  // The RdsRouteConfigProviderImpl instance can go away before the dispatcher has a chance to\n  // execute the callback. still_alive shared_ptr will be deallocated when the current instance of\n  // the RdsRouteConfigProviderImpl is deallocated; we rely on a weak_ptr to still_alive flag to\n  // determine if the RdsRouteConfigProviderImpl instance is still valid.\n  factory_context_.dispatcher().post([this, maybe_still_alive = std::weak_ptr<bool>(still_alive_),\n                                      alias, &thread_local_dispatcher,\n                                      route_config_updated_cb]() -> void {\n    if (maybe_still_alive.lock()) {\n      subscription_->updateOnDemand(alias);\n      config_update_callbacks_.push_back({alias, thread_local_dispatcher, route_config_updated_cb});\n    }\n  });\n}\n\nRouteConfigProviderManagerImpl::RouteConfigProviderManagerImpl(Server::Admin& admin) {\n  config_tracker_entry_ =\n      admin.getConfigTracker().add(\"routes\", [this] { return dumpRouteConfigs(); });\n  // ConfigTracker keys must be unique. We are asserting that no one has stolen the \"routes\" key\n  // from us, since the returned entry will be nullptr if the key already exists.\n  RELEASE_ASSERT(config_tracker_entry_, \"\");\n}\n\nRouter::RouteConfigProviderSharedPtr RouteConfigProviderManagerImpl::createRdsRouteConfigProvider(\n    const envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n    Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix,\n    Init::Manager& init_manager) {\n  // RdsRouteConfigSubscriptions are unique based on their serialized RDS config.\n  const uint64_t manager_identifier = MessageUtil::hash(rds);\n  auto it = dynamic_route_config_providers_.find(manager_identifier);\n\n  if (it == dynamic_route_config_providers_.end()) {\n    // std::make_shared does not work for classes with private constructors. There are ways\n    // around it. However, since this is not a performance critical path we err on the side\n    // of simplicity.\n    RdsRouteConfigSubscriptionSharedPtr subscription(new RdsRouteConfigSubscription(\n        rds, manager_identifier, factory_context, stat_prefix, *this));\n    init_manager.add(subscription->parent_init_target_);\n    RdsRouteConfigProviderImplSharedPtr new_provider{\n        new RdsRouteConfigProviderImpl(std::move(subscription), factory_context)};\n    dynamic_route_config_providers_.insert(\n        {manager_identifier, std::weak_ptr<RdsRouteConfigProviderImpl>(new_provider)});\n    return new_provider;\n  } else {\n    // Because the RouteConfigProviderManager's weak_ptrs only get cleaned up\n    // in the RdsRouteConfigSubscription destructor, and the single threaded nature\n    // of this code, locking the weak_ptr will not fail.\n    auto existing_provider = it->second.lock();\n    RELEASE_ASSERT(existing_provider != nullptr,\n                   absl::StrCat(\"cannot find subscribed rds resource \", rds.route_config_name()));\n    init_manager.add(existing_provider->subscription_->parent_init_target_);\n    return existing_provider;\n  }\n}\n\nRouteConfigProviderPtr RouteConfigProviderManagerImpl::createStaticRouteConfigProvider(\n    const envoy::config::route::v3::RouteConfiguration& route_config,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ProtobufMessage::ValidationVisitor& validator) {\n  auto provider = std::make_unique<StaticRouteConfigProviderImpl>(route_config, factory_context,\n                                                                  validator, *this);\n  static_route_config_providers_.insert(provider.get());\n  return provider;\n}\n\nstd::unique_ptr<envoy::admin::v3::RoutesConfigDump>\nRouteConfigProviderManagerImpl::dumpRouteConfigs() const {\n  auto config_dump = std::make_unique<envoy::admin::v3::RoutesConfigDump>();\n\n  for (const auto& element : dynamic_route_config_providers_) {\n    const auto& subscription = element.second.lock()->subscription_;\n    // Because the RouteConfigProviderManager's weak_ptrs only get cleaned up\n    // in the RdsRouteConfigSubscription destructor, and the single threaded nature\n    // of this code, locking the weak_ptr will not fail.\n    ASSERT(subscription);\n    ASSERT(!subscription->route_config_providers_.empty());\n\n    if (subscription->routeConfigUpdate()->configInfo()) {\n      auto* dynamic_config = config_dump->mutable_dynamic_route_configs()->Add();\n      dynamic_config->set_version_info(subscription->routeConfigUpdate()->configVersion());\n      dynamic_config->mutable_route_config()->PackFrom(\n          API_RECOVER_ORIGINAL(subscription->routeConfigUpdate()->routeConfiguration()));\n      TimestampUtil::systemClockToTimestamp(subscription->routeConfigUpdate()->lastUpdated(),\n                                            *dynamic_config->mutable_last_updated());\n    }\n  }\n\n  for (const auto& provider : static_route_config_providers_) {\n    ASSERT(provider->configInfo());\n    auto* static_config = config_dump->mutable_static_route_configs()->Add();\n    static_config->mutable_route_config()->PackFrom(\n        API_RECOVER_ORIGINAL(provider->configInfo().value().config_));\n    TimestampUtil::systemClockToTimestamp(provider->lastUpdated(),\n                                          *static_config->mutable_last_updated());\n  }\n\n  return config_dump;\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/rds_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <queue>\n#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route.pb.validate.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/router/route_config_provider_manager.h\"\n#include \"envoy/router/route_config_update_receiver.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/callback_impl.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/logger.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/init/target_impl.h\"\n#include \"common/init/watcher_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/route_config_update_receiver_impl.h\"\n#include \"common/router/vhds.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/container/node_hash_set.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n// For friend class declaration in RdsRouteConfigSubscription.\nclass ScopedRdsConfigSubscription;\n\n/**\n * Route configuration provider utilities.\n */\nclass RouteConfigProviderUtil {\npublic:\n  /**\n   * @return RouteConfigProviderSharedPtr a new route configuration provider based on the supplied\n   * proto configuration. Notes the provider object could be shared among multiple listeners.\n   */\n  static RouteConfigProviderSharedPtr create(\n      const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n          config,\n      Server::Configuration::ServerFactoryContext& factory_context,\n      ProtobufMessage::ValidationVisitor& validator, Init::Manager& init_manager,\n      const std::string& stat_prefix, RouteConfigProviderManager& route_config_provider_manager);\n};\n\nclass RouteConfigProviderManagerImpl;\n\n/**\n * Implementation of RouteConfigProvider that holds a static route configuration.\n */\nclass StaticRouteConfigProviderImpl : public RouteConfigProvider {\npublic:\n  StaticRouteConfigProviderImpl(const envoy::config::route::v3::RouteConfiguration& config,\n                                Server::Configuration::ServerFactoryContext& factory_context,\n                                ProtobufMessage::ValidationVisitor& validator,\n                                RouteConfigProviderManagerImpl& route_config_provider_manager);\n  ~StaticRouteConfigProviderImpl() override;\n\n  // Router::RouteConfigProvider\n  Router::ConfigConstSharedPtr config() override { return config_; }\n  absl::optional<ConfigInfo> configInfo() const override {\n    return ConfigInfo{route_config_proto_, \"\"};\n  }\n  SystemTime lastUpdated() const override { return last_updated_; }\n  void onConfigUpdate() override {}\n  void validateConfig(const envoy::config::route::v3::RouteConfiguration&) const override {}\n  void requestVirtualHostsUpdate(const std::string&, Event::Dispatcher&,\n                                 std::weak_ptr<Http::RouteConfigUpdatedCallback>) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\nprivate:\n  ConfigConstSharedPtr config_;\n  envoy::config::route::v3::RouteConfiguration route_config_proto_;\n  SystemTime last_updated_;\n  RouteConfigProviderManagerImpl& route_config_provider_manager_;\n};\n\n/**\n * All RDS stats. @see stats_macros.h\n */\n#define ALL_RDS_STATS(COUNTER)                                                                     \\\n  COUNTER(config_reload)                                                                           \\\n  COUNTER(update_empty)\n\n/**\n * Struct definition for all RDS stats. @see stats_macros.h\n */\nstruct RdsStats {\n  ALL_RDS_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass RdsRouteConfigProviderImpl;\n\n/**\n * A class that fetches the route configuration dynamically using the RDS API and updates them to\n * RDS config providers.\n */\nclass RdsRouteConfigSubscription\n    : Envoy::Config::SubscriptionBase<envoy::config::route::v3::RouteConfiguration>,\n      Logger::Loggable<Logger::Id::router> {\npublic:\n  ~RdsRouteConfigSubscription() override;\n\n  absl::node_hash_set<RouteConfigProvider*>& routeConfigProviders() {\n    ASSERT(route_config_providers_.size() == 1 || route_config_providers_.empty());\n    return route_config_providers_;\n  }\n  RouteConfigUpdatePtr& routeConfigUpdate() { return config_update_info_; }\n  void updateOnDemand(const std::string& aliases);\n  void maybeCreateInitManager(const std::string& version_info,\n                              std::unique_ptr<Init::ManagerImpl>& init_manager,\n                              std::unique_ptr<Cleanup>& resume_rds);\n\nprivate:\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Envoy::Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Envoy::Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException* e) override;\n\n  Common::CallbackHandle* addUpdateCallback(std::function<void()> callback) {\n    return update_callback_manager_.add(callback);\n  }\n\n  RdsRouteConfigSubscription(\n      const envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n      const uint64_t manager_identifier,\n      Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix,\n      RouteConfigProviderManagerImpl& route_config_provider_manager);\n\n  bool validateUpdateSize(int num_resources);\n\n  const std::string route_config_name_;\n  // This scope must outlive the subscription_ below as the subscription has derived stats.\n  Stats::ScopePtr scope_;\n  Envoy::Config::SubscriptionPtr subscription_;\n  Server::Configuration::ServerFactoryContext& factory_context_;\n\n  // Init target used to notify the parent init manager that the subscription [and its sub resource]\n  // is ready.\n  Init::SharedTargetImpl parent_init_target_;\n  // Init watcher on RDS and VHDS ready event. This watcher marks parent_init_target_ ready.\n  Init::WatcherImpl local_init_watcher_;\n  // Target which starts the RDS subscription.\n  Init::TargetImpl local_init_target_;\n  Init::ManagerImpl local_init_manager_;\n  std::string stat_prefix_;\n  RdsStats stats_;\n  RouteConfigProviderManagerImpl& route_config_provider_manager_;\n  const uint64_t manager_identifier_;\n  // TODO(lambdai): Prove that a subscription has exactly one provider and remove the container.\n  absl::node_hash_set<RouteConfigProvider*> route_config_providers_;\n  VhdsSubscriptionPtr vhds_subscription_;\n  RouteConfigUpdatePtr config_update_info_;\n  Common::CallbackManager<> update_callback_manager_;\n\n  friend class RouteConfigProviderManagerImpl;\n  // Access to addUpdateCallback\n  friend class ScopedRdsConfigSubscription;\n};\n\nusing RdsRouteConfigSubscriptionSharedPtr = std::shared_ptr<RdsRouteConfigSubscription>;\n\nstruct UpdateOnDemandCallback {\n  const std::string alias_;\n  Event::Dispatcher& thread_local_dispatcher_;\n  std::weak_ptr<Http::RouteConfigUpdatedCallback> cb_;\n};\n\n/**\n * Implementation of RouteConfigProvider that fetches the route configuration dynamically using\n * the subscription.\n */\nclass RdsRouteConfigProviderImpl : public RouteConfigProvider,\n                                   Logger::Loggable<Logger::Id::router> {\npublic:\n  ~RdsRouteConfigProviderImpl() override;\n\n  RdsRouteConfigSubscription& subscription() { return *subscription_; }\n\n  // Router::RouteConfigProvider\n  Router::ConfigConstSharedPtr config() override;\n  absl::optional<ConfigInfo> configInfo() const override {\n    return config_update_info_->configInfo();\n  }\n  SystemTime lastUpdated() const override { return config_update_info_->lastUpdated(); }\n  void onConfigUpdate() override;\n  void requestVirtualHostsUpdate(\n      const std::string& for_domain, Event::Dispatcher& thread_local_dispatcher,\n      std::weak_ptr<Http::RouteConfigUpdatedCallback> route_config_updated_cb) override;\n  void validateConfig(const envoy::config::route::v3::RouteConfiguration& config) const override;\n\nprivate:\n  struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject {\n    ThreadLocalConfig(ConfigConstSharedPtr initial_config) : config_(std::move(initial_config)) {}\n    ConfigConstSharedPtr config_;\n  };\n\n  RdsRouteConfigProviderImpl(RdsRouteConfigSubscriptionSharedPtr&& subscription,\n                             Server::Configuration::ServerFactoryContext& factory_context);\n\n  RdsRouteConfigSubscriptionSharedPtr subscription_;\n  RouteConfigUpdatePtr& config_update_info_;\n  Server::Configuration::ServerFactoryContext& factory_context_;\n  ProtobufMessage::ValidationVisitor& validator_;\n  ThreadLocal::SlotPtr tls_;\n  std::list<UpdateOnDemandCallback> config_update_callbacks_;\n  // A flag used to determine if this instance of RdsRouteConfigProviderImpl hasn't been\n  // deallocated. Please also see a comment in requestVirtualHostsUpdate() method implementation.\n  std::shared_ptr<bool> still_alive_{std::make_shared<bool>(true)};\n\n  friend class RouteConfigProviderManagerImpl;\n};\n\nusing RdsRouteConfigProviderImplSharedPtr = std::shared_ptr<RdsRouteConfigProviderImpl>;\n\nclass RouteConfigProviderManagerImpl : public RouteConfigProviderManager,\n                                       public Singleton::Instance {\npublic:\n  RouteConfigProviderManagerImpl(Server::Admin& admin);\n\n  std::unique_ptr<envoy::admin::v3::RoutesConfigDump> dumpRouteConfigs() const;\n\n  // RouteConfigProviderManager\n  RouteConfigProviderSharedPtr createRdsRouteConfigProvider(\n      const envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n      Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix,\n      Init::Manager& init_manager) override;\n\n  RouteConfigProviderPtr\n  createStaticRouteConfigProvider(const envoy::config::route::v3::RouteConfiguration& route_config,\n                                  Server::Configuration::ServerFactoryContext& factory_context,\n                                  ProtobufMessage::ValidationVisitor& validator) override;\n\nprivate:\n  // TODO(jsedgwick) These two members are prime candidates for the owned-entry list/map\n  // as in ConfigTracker. I.e. the ProviderImpls would have an EntryOwner for these lists\n  // Then the lifetime management stuff is centralized and opaque.\n  absl::node_hash_map<uint64_t, std::weak_ptr<RdsRouteConfigProviderImpl>>\n      dynamic_route_config_providers_;\n  absl::node_hash_set<RouteConfigProvider*> static_route_config_providers_;\n  Server::ConfigTracker::EntryOwnerPtr config_tracker_entry_;\n\n  friend class RdsRouteConfigSubscription;\n  friend class StaticRouteConfigProviderImpl;\n};\n\nusing RouteConfigProviderManagerImplPtr = std::unique_ptr<RouteConfigProviderManagerImpl>;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/reset_header_parser.cc",
    "content": "#include \"common/router/reset_header_parser.h\"\n\n#include <cstdint>\n\n#include \"common/common/assert.h\"\n\n#include \"absl/strings/numbers.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nResetHeaderParserImpl::ResetHeaderParserImpl(\n    const envoy::config::route::v3::RetryPolicy::ResetHeader& config)\n    : name_(config.name()) {\n  switch (config.format()) {\n  case envoy::config::route::v3::RetryPolicy::SECONDS:\n    format_ = ResetHeaderFormat::Seconds;\n    break;\n  case envoy::config::route::v3::RetryPolicy::UNIX_TIMESTAMP:\n    format_ = ResetHeaderFormat::UnixTimestamp;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nabsl::optional<std::chrono::milliseconds>\nResetHeaderParserImpl::parseInterval(TimeSource& time_source,\n                                     const Http::HeaderMap& headers) const {\n  const Http::HeaderEntry* header = headers.get(name_);\n\n  if (header == nullptr) {\n    return absl::nullopt;\n  }\n\n  const auto& header_value = header->value().getStringView();\n  uint64_t num_seconds{};\n\n  switch (format_) {\n  case ResetHeaderFormat::Seconds:\n    if (absl::SimpleAtoi(header_value, &num_seconds)) {\n      return absl::optional<std::chrono::milliseconds>(num_seconds * 1000UL);\n    }\n    break;\n\n  case ResetHeaderFormat::UnixTimestamp:\n    if (absl::SimpleAtoi(header_value, &num_seconds)) {\n      const auto time_now = time_source.systemTime().time_since_epoch();\n      const uint64_t timestamp = std::chrono::duration_cast<std::chrono::seconds>(time_now).count();\n\n      if (num_seconds < timestamp) {\n        return absl::nullopt;\n      }\n\n      const uint64_t interval = num_seconds - timestamp;\n      return absl::optional<std::chrono::milliseconds>(interval * 1000UL);\n    }\n    break;\n\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  return absl::nullopt;\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/reset_header_parser.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/router/router.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nenum class ResetHeaderFormat { Seconds, UnixTimestamp };\n\n/**\n * A ResetHeaderParser specifies a header name and a format to match against\n * response headers that are used to signal a rate limit interval reset, such\n * as Retry-After or X-RateLimit-Reset.\n */\nclass ResetHeaderParserImpl : public ResetHeaderParser {\npublic:\n  /**\n   * Build a vector of ResetHeaderParserSharedPtr given input config.\n   */\n  static std::vector<ResetHeaderParserSharedPtr> buildResetHeaderParserVector(\n      const Protobuf::RepeatedPtrField<envoy::config::route::v3::RetryPolicy::ResetHeader>&\n          reset_headers) {\n    std::vector<ResetHeaderParserSharedPtr> ret;\n    for (const auto& reset_header : reset_headers) {\n      ret.emplace_back(std::make_shared<ResetHeaderParserImpl>(reset_header));\n    }\n    return ret;\n  }\n\n  ResetHeaderParserImpl(const envoy::config::route::v3::RetryPolicy::ResetHeader& config);\n\n  const Http::LowerCaseString& name() const { return name_; }\n  ResetHeaderFormat format() const { return format_; }\n\n  /**\n   * Iterate over the headers, choose the first one that matches by name, and try to parse its\n   * value.\n   */\n  absl::optional<std::chrono::milliseconds>\n  parseInterval(TimeSource& time_source, const Http::HeaderMap& headers) const override;\n\nprivate:\n  const Http::LowerCaseString name_;\n  ResetHeaderFormat format_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/retry_state_impl.cc",
    "content": "#include \"common/router/retry_state_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n// These are defined in envoy/router/router.h, however during certain cases the compiler is\n// refusing to use the header version so allocate space here.\nconst uint32_t RetryPolicy::RETRY_ON_5XX;\nconst uint32_t RetryPolicy::RETRY_ON_GATEWAY_ERROR;\nconst uint32_t RetryPolicy::RETRY_ON_CONNECT_FAILURE;\nconst uint32_t RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED;\nconst uint32_t RetryPolicy::RETRY_ON_RETRIABLE_4XX;\nconst uint32_t RetryPolicy::RETRY_ON_RETRIABLE_HEADERS;\nconst uint32_t RetryPolicy::RETRY_ON_RETRIABLE_STATUS_CODES;\nconst uint32_t RetryPolicy::RETRY_ON_RESET;\nconst uint32_t RetryPolicy::RETRY_ON_GRPC_CANCELLED;\nconst uint32_t RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED;\nconst uint32_t RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED;\nconst uint32_t RetryPolicy::RETRY_ON_GRPC_UNAVAILABLE;\n\nRetryStatePtr RetryStateImpl::create(const RetryPolicy& route_policy,\n                                     Http::RequestHeaderMap& request_headers,\n                                     const Upstream::ClusterInfo& cluster,\n                                     const VirtualCluster* vcluster, Runtime::Loader& runtime,\n                                     Random::RandomGenerator& random, Event::Dispatcher& dispatcher,\n                                     TimeSource& time_source, Upstream::ResourcePriority priority) {\n  RetryStatePtr ret;\n\n  // We short circuit here and do not bother with an allocation if there is no chance we will retry.\n  if (request_headers.EnvoyRetryOn() || request_headers.EnvoyRetryGrpcOn() ||\n      route_policy.retryOn()) {\n    ret.reset(new RetryStateImpl(route_policy, request_headers, cluster, vcluster, runtime, random,\n                                 dispatcher, time_source, priority));\n  }\n\n  // Consume all retry related headers to avoid them being propagated to the upstream\n  request_headers.removeEnvoyRetryOn();\n  request_headers.removeEnvoyRetryGrpcOn();\n  request_headers.removeEnvoyMaxRetries();\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.consume_all_retry_headers\")) {\n    request_headers.removeEnvoyHedgeOnPerTryTimeout();\n    request_headers.removeEnvoyRetriableHeaderNames();\n    request_headers.removeEnvoyRetriableStatusCodes();\n    request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs();\n  }\n\n  return ret;\n}\n\nRetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy,\n                               Http::RequestHeaderMap& request_headers,\n                               const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster,\n                               Runtime::Loader& runtime, Random::RandomGenerator& random,\n                               Event::Dispatcher& dispatcher, TimeSource& time_source,\n                               Upstream::ResourcePriority priority)\n    : cluster_(cluster), vcluster_(vcluster), runtime_(runtime), random_(random),\n      dispatcher_(dispatcher), time_source_(time_source), retry_on_(route_policy.retryOn()),\n      retries_remaining_(route_policy.numRetries()), priority_(priority),\n      retry_host_predicates_(route_policy.retryHostPredicates()),\n      retry_priority_(route_policy.retryPriority()),\n      retriable_status_codes_(route_policy.retriableStatusCodes()),\n      retriable_headers_(route_policy.retriableHeaders()),\n      reset_headers_(route_policy.resetHeaders()),\n      reset_max_interval_(route_policy.resetMaxInterval()) {\n\n  std::chrono::milliseconds base_interval(\n      runtime_.snapshot().getInteger(\"upstream.base_retry_backoff_ms\", 25));\n  if (route_policy.baseInterval()) {\n    base_interval = *route_policy.baseInterval();\n  }\n\n  // By default, cap the max interval to 10 times the base interval to ensure reasonable back-off\n  // intervals.\n  std::chrono::milliseconds max_interval = base_interval * 10;\n  if (route_policy.maxInterval()) {\n    max_interval = *route_policy.maxInterval();\n  }\n\n  backoff_strategy_ = std::make_unique<JitteredExponentialBackOffStrategy>(\n      base_interval.count(), max_interval.count(), random_);\n  host_selection_max_attempts_ = route_policy.hostSelectionMaxAttempts();\n\n  // Merge in the headers.\n  if (request_headers.EnvoyRetryOn()) {\n    retry_on_ |= parseRetryOn(request_headers.getEnvoyRetryOnValue()).first;\n  }\n  if (request_headers.EnvoyRetryGrpcOn()) {\n    retry_on_ |= parseRetryGrpcOn(request_headers.getEnvoyRetryGrpcOnValue()).first;\n  }\n\n  const auto& retriable_request_headers = route_policy.retriableRequestHeaders();\n  if (!retriable_request_headers.empty()) {\n    // If this route limits retries by request headers, make sure there is a match.\n    bool request_header_match = false;\n    for (const auto& retriable_header : retriable_request_headers) {\n      if (retriable_header->matchesHeaders(request_headers)) {\n        request_header_match = true;\n        break;\n      }\n    }\n\n    if (!request_header_match) {\n      retry_on_ = 0;\n    }\n  }\n  if (retry_on_ != 0 && request_headers.EnvoyMaxRetries()) {\n    uint64_t temp;\n    if (absl::SimpleAtoi(request_headers.getEnvoyMaxRetriesValue(), &temp)) {\n      // The max retries header takes precedence if set.\n      retries_remaining_ = temp;\n    }\n  }\n\n  if (request_headers.EnvoyRetriableStatusCodes()) {\n    for (const auto& code :\n         StringUtil::splitToken(request_headers.getEnvoyRetriableStatusCodesValue(), \",\")) {\n      unsigned int out;\n      if (absl::SimpleAtoi(code, &out)) {\n        retriable_status_codes_.emplace_back(out);\n      }\n    }\n  }\n\n  if (request_headers.EnvoyRetriableHeaderNames()) {\n    // Retriable headers in the configuration are specified via HeaderMatcher.\n    // Giving the same flexibility via request header would require the user\n    // to provide HeaderMatcher serialized into a string. To avoid this extra\n    // complexity we only support name-only header matchers via request\n    // header. Anything more sophisticated needs to be provided via config.\n    for (const auto& header_name : StringUtil::splitToken(\n             request_headers.EnvoyRetriableHeaderNames()->value().getStringView(), \",\")) {\n      envoy::config::route::v3::HeaderMatcher header_matcher;\n      header_matcher.set_name(std::string(absl::StripAsciiWhitespace(header_name)));\n      retriable_headers_.emplace_back(\n          std::make_shared<Http::HeaderUtility::HeaderData>(header_matcher));\n    }\n  }\n}\n\nRetryStateImpl::~RetryStateImpl() { resetRetry(); }\n\nvoid RetryStateImpl::enableBackoffTimer() {\n  if (!retry_timer_) {\n    retry_timer_ = dispatcher_.createTimer([this]() -> void { callback_(); });\n  }\n\n  if (ratelimited_backoff_strategy_ != nullptr) {\n    // If we have a backoff strategy based on rate limit feedback from the response we use it.\n    retry_timer_->enableTimer(\n        std::chrono::milliseconds(ratelimited_backoff_strategy_->nextBackOffMs()));\n\n    // The strategy is only valid for the response that sent the ratelimit reset header and cannot\n    // be reused.\n    ratelimited_backoff_strategy_.reset();\n\n    cluster_.stats().upstream_rq_retry_backoff_ratelimited_.inc();\n\n  } else {\n    // Otherwise we use a fully jittered exponential backoff algorithm.\n    retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs()));\n\n    cluster_.stats().upstream_rq_retry_backoff_exponential_.inc();\n  }\n}\n\nstd::pair<uint32_t, bool> RetryStateImpl::parseRetryOn(absl::string_view config) {\n  uint32_t ret = 0;\n  bool all_fields_valid = true;\n  for (const auto& retry_on : StringUtil::splitToken(config, \",\", false, true)) {\n    if (retry_on == Http::Headers::get().EnvoyRetryOnValues._5xx) {\n      ret |= RetryPolicy::RETRY_ON_5XX;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.GatewayError) {\n      ret |= RetryPolicy::RETRY_ON_GATEWAY_ERROR;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.ConnectFailure) {\n      ret |= RetryPolicy::RETRY_ON_CONNECT_FAILURE;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.EnvoyRateLimited) {\n      ret |= RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.Retriable4xx) {\n      ret |= RetryPolicy::RETRY_ON_RETRIABLE_4XX;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.RefusedStream) {\n      ret |= RetryPolicy::RETRY_ON_REFUSED_STREAM;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.RetriableStatusCodes) {\n      ret |= RetryPolicy::RETRY_ON_RETRIABLE_STATUS_CODES;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.RetriableHeaders) {\n      ret |= RetryPolicy::RETRY_ON_RETRIABLE_HEADERS;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.Reset) {\n      ret |= RetryPolicy::RETRY_ON_RESET;\n    } else {\n      all_fields_valid = false;\n    }\n  }\n\n  return {ret, all_fields_valid};\n}\n\nstd::pair<uint32_t, bool> RetryStateImpl::parseRetryGrpcOn(absl::string_view retry_grpc_on_header) {\n  uint32_t ret = 0;\n  bool all_fields_valid = true;\n  for (const auto& retry_on : StringUtil::splitToken(retry_grpc_on_header, \",\", false, true)) {\n    if (retry_on == Http::Headers::get().EnvoyRetryOnGrpcValues.Cancelled) {\n      ret |= RetryPolicy::RETRY_ON_GRPC_CANCELLED;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnGrpcValues.DeadlineExceeded) {\n      ret |= RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnGrpcValues.ResourceExhausted) {\n      ret |= RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnGrpcValues.Unavailable) {\n      ret |= RetryPolicy::RETRY_ON_GRPC_UNAVAILABLE;\n    } else if (retry_on == Http::Headers::get().EnvoyRetryOnGrpcValues.Internal) {\n      ret |= RetryPolicy::RETRY_ON_GRPC_INTERNAL;\n    } else {\n      all_fields_valid = false;\n    }\n  }\n\n  return {ret, all_fields_valid};\n}\n\nabsl::optional<std::chrono::milliseconds>\nRetryStateImpl::parseResetInterval(const Http::ResponseHeaderMap& response_headers) const {\n  for (const auto& reset_header : reset_headers_) {\n    const auto& interval = reset_header->parseInterval(time_source_, response_headers);\n    if (interval.has_value() && interval.value() <= reset_max_interval_) {\n      return interval;\n    }\n  }\n\n  return absl::nullopt;\n}\n\nvoid RetryStateImpl::resetRetry() {\n  if (callback_) {\n    cluster_.resourceManager(priority_).retries().dec();\n    callback_ = nullptr;\n  }\n}\n\nRetryStatus RetryStateImpl::shouldRetry(bool would_retry, DoRetryCallback callback) {\n  // If a callback is armed from a previous shouldRetry and we don't need to\n  // retry this particular request, we can infer that we did a retry earlier\n  // and it was successful.\n  if (callback_ && !would_retry) {\n    cluster_.stats().upstream_rq_retry_success_.inc();\n    if (vcluster_) {\n      vcluster_->stats().upstream_rq_retry_success_.inc();\n    }\n  }\n\n  resetRetry();\n\n  if (!would_retry) {\n    return RetryStatus::No;\n  }\n\n  // The request has exhausted the number of retries allotted to it by the retry policy configured\n  // (or the x-envoy-max-retries header).\n  if (retries_remaining_ == 0) {\n    cluster_.stats().upstream_rq_retry_limit_exceeded_.inc();\n    if (vcluster_) {\n      vcluster_->stats().upstream_rq_retry_limit_exceeded_.inc();\n    }\n    return RetryStatus::NoRetryLimitExceeded;\n  }\n\n  retries_remaining_--;\n\n  if (!cluster_.resourceManager(priority_).retries().canCreate()) {\n    cluster_.stats().upstream_rq_retry_overflow_.inc();\n    if (vcluster_) {\n      vcluster_->stats().upstream_rq_retry_overflow_.inc();\n    }\n    return RetryStatus::NoOverflow;\n  }\n\n  if (!runtime_.snapshot().featureEnabled(\"upstream.use_retry\", 100)) {\n    return RetryStatus::No;\n  }\n\n  ASSERT(!callback_);\n  callback_ = callback;\n  cluster_.resourceManager(priority_).retries().inc();\n  cluster_.stats().upstream_rq_retry_.inc();\n  if (vcluster_) {\n    vcluster_->stats().upstream_rq_retry_.inc();\n  }\n  enableBackoffTimer();\n  return RetryStatus::Yes;\n}\n\nRetryStatus RetryStateImpl::shouldRetryHeaders(const Http::ResponseHeaderMap& response_headers,\n                                               DoRetryCallback callback) {\n  const bool would_retry = wouldRetryFromHeaders(response_headers);\n\n  // Yes, we will retry based on the headers - try to parse a rate limited reset interval from the\n  // response.\n  if (would_retry && !reset_headers_.empty()) {\n    const auto backoff_interval = parseResetInterval(response_headers);\n    if (backoff_interval.has_value() && (backoff_interval.value().count() > 1L)) {\n      ratelimited_backoff_strategy_ = std::make_unique<JitteredLowerBoundBackOffStrategy>(\n          backoff_interval.value().count(), random_);\n    }\n  }\n\n  return shouldRetry(would_retry, callback);\n}\n\nRetryStatus RetryStateImpl::shouldRetryReset(Http::StreamResetReason reset_reason,\n                                             DoRetryCallback callback) {\n  return shouldRetry(wouldRetryFromReset(reset_reason), callback);\n}\n\nRetryStatus RetryStateImpl::shouldHedgeRetryPerTryTimeout(DoRetryCallback callback) {\n  // A hedged retry on per try timeout is always retried if there are retries\n  // left. NOTE: this is a bit different than non-hedged per try timeouts which\n  // are only retried if the applicable retry policy specifies either\n  // RETRY_ON_5XX or RETRY_ON_GATEWAY_ERROR. This is because these types of\n  // retries are associated with a stream reset which is analogous to a gateway\n  // error. When hedging on per try timeout is enabled, however, there is no\n  // stream reset.\n  return shouldRetry(true, callback);\n}\n\nbool RetryStateImpl::wouldRetryFromHeaders(const Http::ResponseHeaderMap& response_headers) {\n  // A response that contains the x-envoy-ratelimited header comes from an upstream envoy.\n  // We retry these only when the envoy-ratelimited policy is in effect.\n  if (response_headers.EnvoyRateLimited() != nullptr) {\n    return retry_on_ & RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED;\n  }\n\n  if (retry_on_ & RetryPolicy::RETRY_ON_5XX) {\n    if (Http::CodeUtility::is5xx(Http::Utility::getResponseStatus(response_headers))) {\n      return true;\n    }\n  }\n\n  if (retry_on_ & RetryPolicy::RETRY_ON_GATEWAY_ERROR) {\n    if (Http::CodeUtility::isGatewayError(Http::Utility::getResponseStatus(response_headers))) {\n      return true;\n    }\n  }\n\n  if ((retry_on_ & RetryPolicy::RETRY_ON_RETRIABLE_4XX)) {\n    Http::Code code = static_cast<Http::Code>(Http::Utility::getResponseStatus(response_headers));\n    if (code == Http::Code::Conflict) {\n      return true;\n    }\n  }\n\n  if ((retry_on_ & RetryPolicy::RETRY_ON_RETRIABLE_STATUS_CODES)) {\n    for (auto code : retriable_status_codes_) {\n      if (Http::Utility::getResponseStatus(response_headers) == code) {\n        return true;\n      }\n    }\n  }\n\n  if (retry_on_ & RetryPolicy::RETRY_ON_RETRIABLE_HEADERS) {\n    for (const auto& retriable_header : retriable_headers_) {\n      if (retriable_header->matchesHeaders(response_headers)) {\n        return true;\n      }\n    }\n  }\n\n  if (retry_on_ &\n      (RetryPolicy::RETRY_ON_GRPC_CANCELLED | RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED |\n       RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED | RetryPolicy::RETRY_ON_GRPC_UNAVAILABLE |\n       RetryPolicy::RETRY_ON_GRPC_INTERNAL)) {\n    absl::optional<Grpc::Status::GrpcStatus> status = Grpc::Common::getGrpcStatus(response_headers);\n    if (status) {\n      if ((status.value() == Grpc::Status::Canceled &&\n           (retry_on_ & RetryPolicy::RETRY_ON_GRPC_CANCELLED)) ||\n          (status.value() == Grpc::Status::DeadlineExceeded &&\n           (retry_on_ & RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED)) ||\n          (status.value() == Grpc::Status::ResourceExhausted &&\n           (retry_on_ & RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED)) ||\n          (status.value() == Grpc::Status::Unavailable &&\n           (retry_on_ & RetryPolicy::RETRY_ON_GRPC_UNAVAILABLE)) ||\n          (status.value() == Grpc::Status::Internal &&\n           (retry_on_ & RetryPolicy::RETRY_ON_GRPC_INTERNAL))) {\n        return true;\n      }\n    }\n  }\n\n  return false;\n}\n\nbool RetryStateImpl::wouldRetryFromReset(const Http::StreamResetReason reset_reason) {\n  // First check \"never retry\" conditions so we can short circuit (we never\n  // retry if the reset reason is overflow).\n  if (reset_reason == Http::StreamResetReason::Overflow) {\n    return false;\n  }\n\n  if (retry_on_ & RetryPolicy::RETRY_ON_RESET) {\n    return true;\n  }\n\n  if (retry_on_ & (RetryPolicy::RETRY_ON_5XX | RetryPolicy::RETRY_ON_GATEWAY_ERROR)) {\n    // Currently we count an upstream reset as a \"5xx\" (since it will result in\n    // one). With RETRY_ON_RESET we may eventually remove these policies.\n    return true;\n  }\n\n  if ((retry_on_ & RetryPolicy::RETRY_ON_REFUSED_STREAM) &&\n      reset_reason == Http::StreamResetReason::RemoteRefusedStreamReset) {\n    return true;\n  }\n\n  if ((retry_on_ & RetryPolicy::RETRY_ON_CONNECT_FAILURE) &&\n      reset_reason == Http::StreamResetReason::ConnectionFailure) {\n    return true;\n  }\n\n  return false;\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/retry_state_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/backoff_strategy.h\"\n#include \"common/http/header_utility.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Wraps retry state for the router.\n */\nclass RetryStateImpl : public RetryState {\npublic:\n  static RetryStatePtr create(const RetryPolicy& route_policy,\n                              Http::RequestHeaderMap& request_headers,\n                              const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster,\n                              Runtime::Loader& runtime, Random::RandomGenerator& random,\n                              Event::Dispatcher& dispatcher, TimeSource& time_source,\n                              Upstream::ResourcePriority priority);\n  ~RetryStateImpl() override;\n\n  /**\n   * Returns the RetryPolicy extracted from the x-envoy-retry-on header.\n   * @param config is the value of the header.\n   * @return std::pair<uint32_t, bool> the uint32_t is a bitset representing the\n   *         valid retry policies in @param config. The bool is TRUE iff all the\n   *         policies specified in @param config are valid.\n   */\n  static std::pair<uint32_t, bool> parseRetryOn(absl::string_view config);\n\n  /**\n   * Returns the RetryPolicy extracted from the x-envoy-retry-grpc-on header.\n   * @param config is the value of the header.\n   * @return std::pair<uint32_t, bool> the uint32_t is a bitset representing the\n   *         valid retry policies in @param config. The bool is TRUE iff all the\n   *         policies specified in @param config are valid.\n   */\n  static std::pair<uint32_t, bool> parseRetryGrpcOn(absl::string_view retry_grpc_on_header);\n\n  // Router::RetryState\n  bool enabled() override { return retry_on_ != 0; }\n  absl::optional<std::chrono::milliseconds>\n  parseResetInterval(const Http::ResponseHeaderMap& response_headers) const override;\n  RetryStatus shouldRetryHeaders(const Http::ResponseHeaderMap& response_headers,\n                                 DoRetryCallback callback) override;\n  // Returns true if the retry policy would retry the passed headers. Does not\n  // take into account circuit breaking or remaining tries.\n  bool wouldRetryFromHeaders(const Http::ResponseHeaderMap& response_headers) override;\n  RetryStatus shouldRetryReset(const Http::StreamResetReason reset_reason,\n                               DoRetryCallback callback) override;\n  RetryStatus shouldHedgeRetryPerTryTimeout(DoRetryCallback callback) override;\n\n  void onHostAttempted(Upstream::HostDescriptionConstSharedPtr host) override {\n    std::for_each(retry_host_predicates_.begin(), retry_host_predicates_.end(),\n                  [&host](auto predicate) { predicate->onHostAttempted(host); });\n    if (retry_priority_) {\n      retry_priority_->onHostAttempted(host);\n    }\n  }\n\n  bool shouldSelectAnotherHost(const Upstream::Host& host) override {\n    return std::any_of(\n        retry_host_predicates_.begin(), retry_host_predicates_.end(),\n        [&host](auto predicate) { return predicate->shouldSelectAnotherHost(host); });\n  }\n\n  const Upstream::HealthyAndDegradedLoad& priorityLoadForRetry(\n      const Upstream::PrioritySet& priority_set,\n      const Upstream::HealthyAndDegradedLoad& original_priority_load,\n      const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override {\n    if (!retry_priority_) {\n      return original_priority_load;\n    }\n    return retry_priority_->determinePriorityLoad(priority_set, original_priority_load,\n                                                  priority_mapping_func);\n  }\n\n  uint32_t hostSelectionMaxAttempts() const override { return host_selection_max_attempts_; }\n\nprivate:\n  RetryStateImpl(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers,\n                 const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster,\n                 Runtime::Loader& runtime, Random::RandomGenerator& random,\n                 Event::Dispatcher& dispatcher, TimeSource& time_source,\n                 Upstream::ResourcePriority priority);\n\n  void enableBackoffTimer();\n  void resetRetry();\n  bool wouldRetryFromReset(const Http::StreamResetReason reset_reason);\n  RetryStatus shouldRetry(bool would_retry, DoRetryCallback callback);\n\n  const Upstream::ClusterInfo& cluster_;\n  const VirtualCluster* vcluster_;\n  Runtime::Loader& runtime_;\n  Random::RandomGenerator& random_;\n  Event::Dispatcher& dispatcher_;\n  TimeSource& time_source_;\n  uint32_t retry_on_{};\n  uint32_t retries_remaining_{};\n  DoRetryCallback callback_;\n  Event::TimerPtr retry_timer_;\n  Upstream::ResourcePriority priority_;\n  BackOffStrategyPtr backoff_strategy_;\n  BackOffStrategyPtr ratelimited_backoff_strategy_{};\n  std::vector<Upstream::RetryHostPredicateSharedPtr> retry_host_predicates_;\n  Upstream::RetryPrioritySharedPtr retry_priority_;\n  uint32_t host_selection_max_attempts_;\n  std::vector<uint32_t> retriable_status_codes_;\n  std::vector<Http::HeaderMatcherSharedPtr> retriable_headers_;\n  std::vector<ResetHeaderParserSharedPtr> reset_headers_{};\n  std::chrono::milliseconds reset_max_interval_{};\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/route_config_update_receiver_impl.cc",
    "content": "#include \"common/router/route_config_update_receiver_impl.h\"\n\n#include <string>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/config_impl.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nbool RouteConfigUpdateReceiverImpl::onRdsUpdate(\n    const envoy::config::route::v3::RouteConfiguration& rc, const std::string& version_info) {\n  const uint64_t new_hash = MessageUtil::hash(rc);\n  if (new_hash == last_config_hash_) {\n    return false;\n  }\n  route_config_proto_ = rc;\n  last_config_hash_ = new_hash;\n  const uint64_t new_vhds_config_hash = rc.has_vhds() ? MessageUtil::hash(rc.vhds()) : 0ul;\n  vhds_configuration_changed_ = new_vhds_config_hash != last_vhds_config_hash_;\n  last_vhds_config_hash_ = new_vhds_config_hash;\n  initializeRdsVhosts(route_config_proto_);\n  onUpdateCommon(route_config_proto_, version_info);\n  return true;\n}\n\nvoid RouteConfigUpdateReceiverImpl::onUpdateCommon(\n    const envoy::config::route::v3::RouteConfiguration& rc, const std::string& version_info) {\n  last_config_version_ = version_info;\n  last_updated_ = time_source_.systemTime();\n  rebuildRouteConfig(rds_virtual_hosts_, vhds_virtual_hosts_, route_config_proto_);\n  config_info_.emplace(RouteConfigProvider::ConfigInfo{rc, last_config_version_});\n}\n\nbool RouteConfigUpdateReceiverImpl::onVhdsUpdate(\n    const VirtualHostRefVector& added_vhosts, const std::set<std::string>& added_resource_ids,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n    const std::string& version_info) {\n  resource_ids_in_last_update_ = added_resource_ids;\n  const bool removed = removeVhosts(vhds_virtual_hosts_, removed_resources);\n  const bool updated = updateVhosts(vhds_virtual_hosts_, added_vhosts);\n  onUpdateCommon(route_config_proto_, version_info);\n  return removed || updated || !resource_ids_in_last_update_.empty();\n}\n\nvoid RouteConfigUpdateReceiverImpl::initializeRdsVhosts(\n    const envoy::config::route::v3::RouteConfiguration& route_configuration) {\n  rds_virtual_hosts_.clear();\n  for (const auto& vhost : route_configuration.virtual_hosts()) {\n    rds_virtual_hosts_.emplace(vhost.name(), vhost);\n  }\n}\n\nbool RouteConfigUpdateReceiverImpl::removeVhosts(\n    std::map<std::string, envoy::config::route::v3::VirtualHost>& vhosts,\n    const Protobuf::RepeatedPtrField<std::string>& removed_vhost_names) {\n  bool vhosts_removed = false;\n  for (const auto& vhost_name : removed_vhost_names) {\n    auto found = vhosts.find(vhost_name);\n    if (found != vhosts.end()) {\n      vhosts_removed = true;\n      vhosts.erase(vhost_name);\n    }\n  }\n  return vhosts_removed;\n}\n\nbool RouteConfigUpdateReceiverImpl::updateVhosts(\n    std::map<std::string, envoy::config::route::v3::VirtualHost>& vhosts,\n    const VirtualHostRefVector& added_vhosts) {\n  bool vhosts_added = false;\n  for (const auto& vhost : added_vhosts) {\n    auto found = vhosts.find(vhost.get().name());\n    if (found != vhosts.end()) {\n      vhosts.erase(found);\n    }\n    vhosts.emplace(vhost.get().name(), vhost.get());\n    vhosts_added = true;\n  }\n  return vhosts_added;\n}\n\nvoid RouteConfigUpdateReceiverImpl::rebuildRouteConfig(\n    const std::map<std::string, envoy::config::route::v3::VirtualHost>& rds_vhosts,\n    const std::map<std::string, envoy::config::route::v3::VirtualHost>& vhds_vhosts,\n    envoy::config::route::v3::RouteConfiguration& route_config) {\n  route_config.clear_virtual_hosts();\n  for (const auto& vhost : rds_vhosts) {\n    route_config.mutable_virtual_hosts()->Add()->CopyFrom(vhost.second);\n  }\n  for (const auto& vhost : vhds_vhosts) {\n    route_config.mutable_virtual_hosts()->Add()->CopyFrom(vhost.second);\n  }\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/route_config_update_receiver_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/router/route_config_update_receiver.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nclass RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver {\npublic:\n  RouteConfigUpdateReceiverImpl(TimeSource& time_source)\n      : time_source_(time_source), last_config_hash_(0ull), last_vhds_config_hash_(0ul),\n        vhds_configuration_changed_(true) {}\n\n  void initializeRdsVhosts(const envoy::config::route::v3::RouteConfiguration& route_configuration);\n  bool removeVhosts(std::map<std::string, envoy::config::route::v3::VirtualHost>& vhosts,\n                    const Protobuf::RepeatedPtrField<std::string>& removed_vhost_names);\n  bool updateVhosts(std::map<std::string, envoy::config::route::v3::VirtualHost>& vhosts,\n                    const VirtualHostRefVector& added_vhosts);\n  void rebuildRouteConfig(\n      const std::map<std::string, envoy::config::route::v3::VirtualHost>& rds_vhosts,\n      const std::map<std::string, envoy::config::route::v3::VirtualHost>& vhds_vhosts,\n      envoy::config::route::v3::RouteConfiguration& route_config);\n  bool onDemandFetchFailed(const envoy::service::discovery::v3::Resource& resource) const;\n  void onUpdateCommon(const envoy::config::route::v3::RouteConfiguration& rc,\n                      const std::string& version_info);\n\n  // Router::RouteConfigUpdateReceiver\n  bool onRdsUpdate(const envoy::config::route::v3::RouteConfiguration& rc,\n                   const std::string& version_info) override;\n  bool onVhdsUpdate(const VirtualHostRefVector& added_vhosts,\n                    const std::set<std::string>& added_resource_ids,\n                    const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                    const std::string& version_info) override;\n  const std::string& routeConfigName() const override { return route_config_proto_.name(); }\n  const std::string& configVersion() const override { return last_config_version_; }\n  uint64_t configHash() const override { return last_config_hash_; }\n  absl::optional<RouteConfigProvider::ConfigInfo> configInfo() const override {\n    return config_info_;\n  }\n  bool vhdsConfigurationChanged() const override { return vhds_configuration_changed_; }\n  const envoy::config::route::v3::RouteConfiguration& routeConfiguration() override {\n    return route_config_proto_;\n  }\n  SystemTime lastUpdated() const override { return last_updated_; }\n  const std::set<std::string>& resourceIdsInLastVhdsUpdate() override {\n    return resource_ids_in_last_update_;\n  }\n\nprivate:\n  TimeSource& time_source_;\n  envoy::config::route::v3::RouteConfiguration route_config_proto_;\n  uint64_t last_config_hash_;\n  uint64_t last_vhds_config_hash_;\n  std::string last_config_version_;\n  SystemTime last_updated_;\n  std::map<std::string, envoy::config::route::v3::VirtualHost> rds_virtual_hosts_;\n  std::map<std::string, envoy::config::route::v3::VirtualHost> vhds_virtual_hosts_;\n  absl::optional<RouteConfigProvider::ConfigInfo> config_info_;\n  std::set<std::string> resource_ids_in_last_update_;\n  bool vhds_configuration_changed_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/router.cc",
    "content": "#include \"common/router/router.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/scope_tracker.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/application_protocol.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/network/upstream_server_name.h\"\n#include \"common/network/upstream_subject_alt_names.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/router/debug_config.h\"\n#include \"common/router/retry_state_impl.h\"\n#include \"common/router/upstream_request.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stream_info/uint32_accessor_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\nconstexpr char NumInternalRedirectsFilterStateName[] = \"num_internal_redirects\";\n\nuint32_t getLength(const Buffer::Instance* instance) { return instance ? instance->length() : 0; }\n\nbool schemeIsHttp(const Http::RequestHeaderMap& downstream_headers,\n                  const Network::Connection& connection) {\n  if (downstream_headers.getForwardedProtoValue() == Http::Headers::get().SchemeValues.Http) {\n    return true;\n  }\n  if (!connection.ssl()) {\n    return true;\n  }\n  return false;\n}\n\nconstexpr uint64_t TimeoutPrecisionFactor = 100;\n\n} // namespace\n\n// Express percentage as [0, TimeoutPrecisionFactor] because stats do not accept floating point\n// values, and getting multiple significant figures on the histogram would be nice.\nuint64_t FilterUtility::percentageOfTimeout(const std::chrono::milliseconds response_time,\n                                            const std::chrono::milliseconds timeout) {\n  // Timeouts of 0 are considered infinite. Any portion of an infinite timeout used is still\n  // none of it.\n  if (timeout.count() == 0) {\n    return 0;\n  }\n\n  return static_cast<uint64_t>(response_time.count() * TimeoutPrecisionFactor / timeout.count());\n}\n\nvoid FilterUtility::setUpstreamScheme(Http::RequestHeaderMap& headers, bool use_secure_transport) {\n  if (use_secure_transport) {\n    headers.setReferenceScheme(Http::Headers::get().SchemeValues.Https);\n  } else {\n    headers.setReferenceScheme(Http::Headers::get().SchemeValues.Http);\n  }\n}\n\nbool FilterUtility::shouldShadow(const ShadowPolicy& policy, Runtime::Loader& runtime,\n                                 uint64_t stable_random) {\n  if (policy.cluster().empty()) {\n    return false;\n  }\n\n  if (policy.defaultValue().numerator() > 0) {\n    return runtime.snapshot().featureEnabled(policy.runtimeKey(), policy.defaultValue(),\n                                             stable_random);\n  }\n\n  if (!policy.runtimeKey().empty() &&\n      !runtime.snapshot().featureEnabled(policy.runtimeKey(), 0, stable_random, 10000UL)) {\n    return false;\n  }\n\n  return true;\n}\n\nFilterUtility::TimeoutData\nFilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& request_headers,\n                            bool insert_envoy_expected_request_timeout_ms, bool grpc_request,\n                            bool per_try_timeout_hedging_enabled,\n                            bool respect_expected_rq_timeout) {\n  // See if there is a user supplied timeout in a request header. If there is we take that.\n  // Otherwise if the request is gRPC and a maximum gRPC timeout is configured we use the timeout\n  // in the gRPC headers (or infinity when gRPC headers have no timeout), but cap that timeout to\n  // the configured maximum gRPC timeout (which may also be infinity, represented by a 0 value),\n  // or the default from the route config otherwise.\n  TimeoutData timeout;\n  if (grpc_request && route.maxGrpcTimeout()) {\n    const std::chrono::milliseconds max_grpc_timeout = route.maxGrpcTimeout().value();\n    auto header_timeout = Grpc::Common::getGrpcTimeout(request_headers);\n    std::chrono::milliseconds grpc_timeout =\n        header_timeout ? header_timeout.value() : std::chrono::milliseconds(0);\n    if (route.grpcTimeoutOffset()) {\n      // We only apply the offset if it won't result in grpc_timeout hitting 0 or below, as\n      // setting it to 0 means infinity and a negative timeout makes no sense.\n      const auto offset = *route.grpcTimeoutOffset();\n      if (offset < grpc_timeout) {\n        grpc_timeout -= offset;\n      }\n    }\n\n    // Cap gRPC timeout to the configured maximum considering that 0 means infinity.\n    if (max_grpc_timeout != std::chrono::milliseconds(0) &&\n        (grpc_timeout == std::chrono::milliseconds(0) || grpc_timeout > max_grpc_timeout)) {\n      grpc_timeout = max_grpc_timeout;\n    }\n    timeout.global_timeout_ = grpc_timeout;\n  } else {\n    timeout.global_timeout_ = route.timeout();\n  }\n  timeout.per_try_timeout_ = route.retryPolicy().perTryTimeout();\n\n  uint64_t header_timeout;\n\n  if (respect_expected_rq_timeout) {\n    // Check if there is timeout set by egress Envoy.\n    // If present, use that value as route timeout and don't override\n    // *x-envoy-expected-rq-timeout-ms* header. At this point *x-envoy-upstream-rq-timeout-ms*\n    // header should have been sanitized by egress Envoy.\n    const Http::HeaderEntry* header_expected_timeout_entry =\n        request_headers.EnvoyExpectedRequestTimeoutMs();\n    if (header_expected_timeout_entry) {\n      trySetGlobalTimeout(header_expected_timeout_entry, timeout);\n    } else {\n      const Http::HeaderEntry* header_timeout_entry =\n          request_headers.EnvoyUpstreamRequestTimeoutMs();\n\n      if (trySetGlobalTimeout(header_timeout_entry, timeout)) {\n        request_headers.removeEnvoyUpstreamRequestTimeoutMs();\n      }\n    }\n  } else {\n    const Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs();\n    if (trySetGlobalTimeout(header_timeout_entry, timeout)) {\n      request_headers.removeEnvoyUpstreamRequestTimeoutMs();\n    }\n  }\n\n  // See if there is a per try/retry timeout. If it's >= global we just ignore it.\n  const absl::string_view per_try_timeout_entry =\n      request_headers.getEnvoyUpstreamRequestPerTryTimeoutMsValue();\n  if (!per_try_timeout_entry.empty()) {\n    if (absl::SimpleAtoi(per_try_timeout_entry, &header_timeout)) {\n      timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout);\n    }\n    request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs();\n  }\n\n  if (timeout.per_try_timeout_ >= timeout.global_timeout_ && timeout.global_timeout_.count() != 0) {\n    timeout.per_try_timeout_ = std::chrono::milliseconds(0);\n  }\n\n  // See if there is any timeout to write in the expected timeout header.\n  uint64_t expected_timeout = timeout.per_try_timeout_.count();\n  // Use the global timeout if no per try timeout was specified or if we're\n  // doing hedging when there are per try timeouts. Either of these scenarios\n  // mean that the upstream server can use the full global timeout.\n  if (per_try_timeout_hedging_enabled || expected_timeout == 0) {\n    expected_timeout = timeout.global_timeout_.count();\n  }\n\n  if (insert_envoy_expected_request_timeout_ms && expected_timeout > 0) {\n    request_headers.setEnvoyExpectedRequestTimeoutMs(expected_timeout);\n  }\n\n  // If we've configured max_grpc_timeout, override the grpc-timeout header with\n  // the expected timeout. This ensures that the optional per try timeout is reflected\n  // in grpc-timeout, ensuring that the upstream gRPC server is aware of the actual timeout.\n  // If the expected timeout is 0 set no timeout, as Envoy treats 0 as infinite timeout.\n  if (grpc_request && route.maxGrpcTimeout() && expected_timeout != 0) {\n    Grpc::Common::toGrpcTimeout(std::chrono::milliseconds(expected_timeout), request_headers);\n  }\n\n  return timeout;\n}\n\nbool FilterUtility::trySetGlobalTimeout(const Http::HeaderEntry* header_timeout_entry,\n                                        TimeoutData& timeout) {\n  if (header_timeout_entry) {\n    uint64_t header_timeout;\n    if (absl::SimpleAtoi(header_timeout_entry->value().getStringView(), &header_timeout)) {\n      timeout.global_timeout_ = std::chrono::milliseconds(header_timeout);\n    }\n    return true;\n  }\n  return false;\n}\n\nFilterUtility::HedgingParams\nFilterUtility::finalHedgingParams(const RouteEntry& route,\n                                  Http::RequestHeaderMap& request_headers) {\n  HedgingParams hedging_params;\n  hedging_params.hedge_on_per_try_timeout_ = route.hedgePolicy().hedgeOnPerTryTimeout();\n\n  const Http::HeaderEntry* hedge_on_per_try_timeout_entry =\n      request_headers.EnvoyHedgeOnPerTryTimeout();\n  if (hedge_on_per_try_timeout_entry) {\n    if (hedge_on_per_try_timeout_entry->value() == \"true\") {\n      hedging_params.hedge_on_per_try_timeout_ = true;\n    }\n    if (hedge_on_per_try_timeout_entry->value() == \"false\") {\n      hedging_params.hedge_on_per_try_timeout_ = false;\n    }\n\n    request_headers.removeEnvoyHedgeOnPerTryTimeout();\n  }\n\n  return hedging_params;\n}\n\nFilter::~Filter() {\n  // Upstream resources should already have been cleaned.\n  ASSERT(upstream_requests_.empty());\n  ASSERT(!retry_state_);\n}\n\nconst FilterUtility::StrictHeaderChecker::HeaderCheckResult\nFilterUtility::StrictHeaderChecker::checkHeader(Http::RequestHeaderMap& headers,\n                                                const Http::LowerCaseString& target_header) {\n  if (target_header == Http::Headers::get().EnvoyUpstreamRequestTimeoutMs) {\n    return isInteger(headers.EnvoyUpstreamRequestTimeoutMs());\n  } else if (target_header == Http::Headers::get().EnvoyUpstreamRequestPerTryTimeoutMs) {\n    return isInteger(headers.EnvoyUpstreamRequestPerTryTimeoutMs());\n  } else if (target_header == Http::Headers::get().EnvoyMaxRetries) {\n    return isInteger(headers.EnvoyMaxRetries());\n  } else if (target_header == Http::Headers::get().EnvoyRetryOn) {\n    return hasValidRetryFields(headers.EnvoyRetryOn(), &Router::RetryStateImpl::parseRetryOn);\n  } else if (target_header == Http::Headers::get().EnvoyRetryGrpcOn) {\n    return hasValidRetryFields(headers.EnvoyRetryGrpcOn(),\n                               &Router::RetryStateImpl::parseRetryGrpcOn);\n  }\n  // Should only validate headers for which we have implemented a validator.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nStats::StatName Filter::upstreamZone(Upstream::HostDescriptionConstSharedPtr upstream_host) {\n  return upstream_host ? upstream_host->localityZoneStatName() : config_.empty_stat_name_;\n}\n\nvoid Filter::chargeUpstreamCode(uint64_t response_status_code,\n                                const Http::ResponseHeaderMap& response_headers,\n                                Upstream::HostDescriptionConstSharedPtr upstream_host,\n                                bool dropped) {\n  // Passing the response_status_code explicitly is an optimization to avoid\n  // multiple calls to slow Http::Utility::getResponseStatus.\n  ASSERT(response_status_code == Http::Utility::getResponseStatus(response_headers));\n  if (config_.emit_dynamic_stats_ && !callbacks_->streamInfo().healthCheck()) {\n    const Http::HeaderEntry* upstream_canary_header = response_headers.EnvoyUpstreamCanary();\n    const bool is_canary = (upstream_canary_header && upstream_canary_header->value() == \"true\") ||\n                           (upstream_host ? upstream_host->canary() : false);\n    const bool internal_request = Http::HeaderUtility::isEnvoyInternalRequest(*downstream_headers_);\n\n    Stats::StatName upstream_zone = upstreamZone(upstream_host);\n    Http::CodeStats::ResponseStatInfo info{config_.scope_,\n                                           cluster_->statsScope(),\n                                           config_.empty_stat_name_,\n                                           response_status_code,\n                                           internal_request,\n                                           route_entry_->virtualHost().statName(),\n                                           request_vcluster_ ? request_vcluster_->statName()\n                                                             : config_.empty_stat_name_,\n                                           config_.zone_name_,\n                                           upstream_zone,\n                                           is_canary};\n\n    Http::CodeStats& code_stats = httpContext().codeStats();\n    code_stats.chargeResponseStat(info);\n\n    if (alt_stat_prefix_ != nullptr) {\n      Http::CodeStats::ResponseStatInfo alt_info{config_.scope_,\n                                                 cluster_->statsScope(),\n                                                 alt_stat_prefix_->statName(),\n                                                 response_status_code,\n                                                 internal_request,\n                                                 config_.empty_stat_name_,\n                                                 config_.empty_stat_name_,\n                                                 config_.zone_name_,\n                                                 upstream_zone,\n                                                 is_canary};\n      code_stats.chargeResponseStat(alt_info);\n    }\n\n    if (dropped) {\n      cluster_->loadReportStats().upstream_rq_dropped_.inc();\n    }\n    if (upstream_host && Http::CodeUtility::is5xx(response_status_code)) {\n      upstream_host->stats().rq_error_.inc();\n    }\n  }\n}\n\nvoid Filter::chargeUpstreamCode(Http::Code code,\n                                Upstream::HostDescriptionConstSharedPtr upstream_host,\n                                bool dropped) {\n  const uint64_t response_status_code = enumToInt(code);\n  const auto fake_response_headers = Http::createHeaderMap<Http::ResponseHeaderMapImpl>(\n      {{Http::Headers::get().Status, std::to_string(response_status_code)}});\n  chargeUpstreamCode(response_status_code, *fake_response_headers, upstream_host, dropped);\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) {\n  // Do a common header check. We make sure that all outgoing requests have all HTTP/2 headers.\n  // These get stripped by HTTP/1 codec where applicable.\n  ASSERT(headers.Method());\n  ASSERT(headers.Host());\n\n  downstream_headers_ = &headers;\n\n  // Extract debug configuration from filter state. This is used further along to determine whether\n  // we should append cluster and host headers to the response, and whether to forward the request\n  // upstream.\n  const StreamInfo::FilterStateSharedPtr& filter_state = callbacks_->streamInfo().filterState();\n  const DebugConfig* debug_config =\n      filter_state->hasData<DebugConfig>(DebugConfig::key())\n          ? &(filter_state->getDataReadOnly<DebugConfig>(DebugConfig::key()))\n          : nullptr;\n\n  // TODO: Maybe add a filter API for this.\n  grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers);\n\n  // Only increment rq total stat if we actually decode headers here. This does not count requests\n  // that get handled by earlier filters.\n  config_.stats_.rq_total_.inc();\n\n  // Initialize the `modify_headers` function as a no-op (so we don't have to remember to check it\n  // against nullptr before calling it), and feed it behavior later if/when we have cluster info\n  // headers to append.\n  std::function<void(Http::ResponseHeaderMap&)> modify_headers = [](Http::ResponseHeaderMap&) {};\n\n  // Determine if there is a route entry or a direct response for the request.\n  route_ = callbacks_->route();\n  if (!route_) {\n    config_.stats_.no_route_.inc();\n    ENVOY_STREAM_LOG(debug, \"no cluster match for URL '{}'\", *callbacks_, headers.getPathValue());\n\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n    callbacks_->sendLocalReply(Http::Code::NotFound, \"\", modify_headers, absl::nullopt,\n                               StreamInfo::ResponseCodeDetails::get().RouteNotFound);\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  // Determine if there is a direct response for the request.\n  const auto* direct_response = route_->directResponseEntry();\n  if (direct_response != nullptr) {\n    config_.stats_.rq_direct_response_.inc();\n    direct_response->rewritePathHeader(headers, !config_.suppress_envoy_headers_);\n    callbacks_->sendLocalReply(\n        direct_response->responseCode(), direct_response->responseBody(),\n        [this, direct_response,\n         &request_headers = headers](Http::ResponseHeaderMap& response_headers) -> void {\n          std::string new_path;\n          if (request_headers.Path()) {\n            new_path = direct_response->newPath(request_headers);\n          }\n          // See https://tools.ietf.org/html/rfc7231#section-7.1.2.\n          const auto add_location =\n              direct_response->responseCode() == Http::Code::Created ||\n              Http::CodeUtility::is3xx(enumToInt(direct_response->responseCode()));\n          if (!new_path.empty() && add_location) {\n            response_headers.addReferenceKey(Http::Headers::get().Location, new_path);\n          }\n          direct_response->finalizeResponseHeaders(response_headers, callbacks_->streamInfo());\n        },\n        absl::nullopt, StreamInfo::ResponseCodeDetails::get().DirectResponse);\n    callbacks_->streamInfo().setRouteName(direct_response->routeName());\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  // A route entry matches for the request.\n  route_entry_ = route_->routeEntry();\n  // If there's a route specific limit and it's smaller than general downstream\n  // limits, apply the new cap.\n  retry_shadow_buffer_limit_ =\n      std::min(retry_shadow_buffer_limit_, route_entry_->retryShadowBufferLimit());\n  callbacks_->streamInfo().setRouteName(route_entry_->routeName());\n  if (debug_config && debug_config->append_cluster_) {\n    // The cluster name will be appended to any local or upstream responses from this point.\n    modify_headers = [this, debug_config](Http::ResponseHeaderMap& headers) {\n      headers.addCopy(debug_config->cluster_header_.value_or(Http::Headers::get().EnvoyCluster),\n                      route_entry_->clusterName());\n    };\n  }\n  Upstream::ThreadLocalCluster* cluster = config_.cm_.get(route_entry_->clusterName());\n  if (!cluster) {\n    config_.stats_.no_cluster_.inc();\n    ENVOY_STREAM_LOG(debug, \"unknown cluster '{}'\", *callbacks_, route_entry_->clusterName());\n\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n    callbacks_->sendLocalReply(route_entry_->clusterNotFoundResponseCode(), \"\", modify_headers,\n                               absl::nullopt,\n                               StreamInfo::ResponseCodeDetails::get().ClusterNotFound);\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n  cluster_ = cluster->info();\n\n  // Set up stat prefixes, etc.\n  request_vcluster_ = route_entry_->virtualCluster(headers);\n  ENVOY_STREAM_LOG(debug, \"cluster '{}' match for URL '{}'\", *callbacks_,\n                   route_entry_->clusterName(), headers.getPathValue());\n\n  if (config_.strict_check_headers_ != nullptr) {\n    for (const auto& header : *config_.strict_check_headers_) {\n      const auto res = FilterUtility::StrictHeaderChecker::checkHeader(headers, header);\n      if (!res.valid_) {\n        callbacks_->streamInfo().setResponseFlag(\n            StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders);\n        const std::string body = fmt::format(\"invalid header '{}' with value '{}'\",\n                                             std::string(res.entry_->key().getStringView()),\n                                             std::string(res.entry_->value().getStringView()));\n        const std::string details =\n            absl::StrCat(StreamInfo::ResponseCodeDetails::get().InvalidEnvoyRequestHeaders, \"{\",\n                         res.entry_->key().getStringView(), \"}\");\n        callbacks_->sendLocalReply(Http::Code::BadRequest, body, nullptr, absl::nullopt, details);\n        return Http::FilterHeadersStatus::StopIteration;\n      }\n    }\n  }\n\n  const Http::HeaderEntry* request_alt_name = headers.EnvoyUpstreamAltStatName();\n  if (request_alt_name) {\n    alt_stat_prefix_ = std::make_unique<Stats::StatNameDynamicStorage>(\n        request_alt_name->value().getStringView(), config_.scope_.symbolTable());\n    headers.removeEnvoyUpstreamAltStatName();\n  }\n\n  // See if we are supposed to immediately kill some percentage of this cluster's traffic.\n  if (cluster_->maintenanceMode()) {\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);\n    chargeUpstreamCode(Http::Code::ServiceUnavailable, nullptr, true);\n    callbacks_->sendLocalReply(\n        Http::Code::ServiceUnavailable, \"maintenance mode\",\n        [modify_headers, this](Http::ResponseHeaderMap& headers) {\n          if (!config_.suppress_envoy_headers_) {\n            headers.addReference(Http::Headers::get().EnvoyOverloaded,\n                                 Http::Headers::get().EnvoyOverloadedValues.True);\n          }\n          // Note: append_cluster_info does not respect suppress_envoy_headers.\n          modify_headers(headers);\n        },\n        absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaintenanceMode);\n    cluster_->stats().upstream_rq_maintenance_mode_.inc();\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  // Fetch a connection pool for the upstream cluster.\n  const auto& upstream_http_protocol_options = cluster_->upstreamHttpProtocolOptions();\n\n  if (upstream_http_protocol_options.has_value()) {\n    const auto parsed_authority = Http::Utility::parseAuthority(headers.getHostValue());\n    if (!parsed_authority.is_ip_address_ && upstream_http_protocol_options.value().auto_sni()) {\n      callbacks_->streamInfo().filterState()->setData(\n          Network::UpstreamServerName::key(),\n          std::make_unique<Network::UpstreamServerName>(parsed_authority.host_),\n          StreamInfo::FilterState::StateType::Mutable);\n    }\n\n    if (upstream_http_protocol_options.value().auto_san_validation()) {\n      callbacks_->streamInfo().filterState()->setData(\n          Network::UpstreamSubjectAltNames::key(),\n          std::make_unique<Network::UpstreamSubjectAltNames>(\n              std::vector<std::string>{std::string(parsed_authority.host_)}),\n          StreamInfo::FilterState::StateType::Mutable);\n    }\n  }\n\n  transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState(\n      *callbacks_->streamInfo().filterState());\n  std::unique_ptr<GenericConnPool> generic_conn_pool = createConnPool();\n\n  if (!generic_conn_pool) {\n    sendNoHealthyUpstreamResponse();\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n  Upstream::HostDescriptionConstSharedPtr host = generic_conn_pool->host();\n\n  if (debug_config && debug_config->append_upstream_host_) {\n    // The hostname and address will be appended to any local or upstream responses from this point,\n    // possibly in addition to the cluster name.\n    modify_headers = [modify_headers, debug_config, host](Http::ResponseHeaderMap& headers) {\n      modify_headers(headers);\n      headers.addCopy(\n          debug_config->hostname_header_.value_or(Http::Headers::get().EnvoyUpstreamHostname),\n          host->hostname());\n      headers.addCopy(debug_config->host_address_header_.value_or(\n                          Http::Headers::get().EnvoyUpstreamHostAddress),\n                      host->address()->asString());\n    };\n  }\n\n  // If we've been instructed not to forward the request upstream, send an empty local response.\n  if (debug_config && debug_config->do_not_forward_) {\n    modify_headers = [modify_headers, debug_config](Http::ResponseHeaderMap& headers) {\n      modify_headers(headers);\n      headers.addCopy(\n          debug_config->not_forwarded_header_.value_or(Http::Headers::get().EnvoyNotForwarded),\n          \"true\");\n    };\n    callbacks_->sendLocalReply(Http::Code::NoContent, \"\", modify_headers, absl::nullopt, \"\");\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  hedging_params_ = FilterUtility::finalHedgingParams(*route_entry_, headers);\n\n  timeout_ = FilterUtility::finalTimeout(*route_entry_, headers, !config_.suppress_envoy_headers_,\n                                         grpc_request_, hedging_params_.hedge_on_per_try_timeout_,\n                                         config_.respect_expected_rq_timeout_);\n\n  // If this header is set with any value, use an alternate response code on timeout\n  if (headers.EnvoyUpstreamRequestTimeoutAltResponse()) {\n    timeout_response_code_ = Http::Code::NoContent;\n    headers.removeEnvoyUpstreamRequestTimeoutAltResponse();\n  }\n\n  include_attempt_count_in_request_ = route_entry_->includeAttemptCountInRequest();\n  if (include_attempt_count_in_request_) {\n    headers.setEnvoyAttemptCount(attempt_count_);\n  }\n\n  // The router has reached a point where it is going to try to send a request upstream,\n  // so now modify_headers should attach x-envoy-attempt-count to the downstream response if the\n  // config flag is true.\n  if (route_entry_->includeAttemptCountInResponse()) {\n    modify_headers = [modify_headers, this](Http::ResponseHeaderMap& headers) {\n      modify_headers(headers);\n\n      // This header is added without checking for config_.suppress_envoy_headers_ to mirror what is\n      // done for upstream requests.\n      headers.setEnvoyAttemptCount(attempt_count_);\n    };\n  }\n\n  // Inject the active span's tracing context into the request headers.\n  callbacks_->activeSpan().injectContext(headers);\n\n  route_entry_->finalizeRequestHeaders(headers, callbacks_->streamInfo(),\n                                       !config_.suppress_envoy_headers_);\n  FilterUtility::setUpstreamScheme(headers,\n                                   host->transportSocketFactory().implementsSecureTransport());\n\n  // Ensure an http transport scheme is selected before continuing with decoding.\n  ASSERT(headers.Scheme());\n\n  retry_state_ = createRetryState(\n      route_entry_->retryPolicy(), headers, *cluster_, request_vcluster_, config_.runtime_,\n      config_.random_, callbacks_->dispatcher(), config_.timeSource(), route_entry_->priority());\n\n  // Determine which shadow policies to use. It's possible that we don't do any shadowing due to\n  // runtime keys.\n  for (const auto& shadow_policy : route_entry_->shadowPolicies()) {\n    const auto& policy_ref = *shadow_policy;\n    if (FilterUtility::shouldShadow(policy_ref, config_.runtime_, callbacks_->streamId())) {\n      active_shadow_policies_.push_back(std::cref(policy_ref));\n    }\n  }\n\n  ENVOY_STREAM_LOG(debug, \"router decoding headers:\\n{}\", *callbacks_, headers);\n\n  // Hang onto the modify_headers function for later use in handling upstream responses.\n  modify_headers_ = modify_headers;\n\n  UpstreamRequestPtr upstream_request =\n      std::make_unique<UpstreamRequest>(*this, std::move(generic_conn_pool));\n  LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_);\n  upstream_requests_.front()->encodeHeaders(end_stream);\n  if (end_stream) {\n    onRequestComplete();\n  }\n\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nstd::unique_ptr<GenericConnPool> Filter::createConnPool() {\n  GenericConnPoolFactory* factory = nullptr;\n  if (cluster_->upstreamConfig().has_value()) {\n    factory = &Envoy::Config::Utility::getAndCheckFactory<GenericConnPoolFactory>(\n        cluster_->upstreamConfig().value());\n  } else {\n    factory = &Envoy::Config::Utility::getAndCheckFactoryByName<GenericConnPoolFactory>(\n        \"envoy.filters.connection_pools.http.generic\");\n  }\n  const bool should_tcp_proxy =\n      route_entry_->connectConfig().has_value() &&\n      downstream_headers_->getMethodValue() == Http::Headers::get().MethodValues.Connect;\n  return factory->createGenericConnPool(config_.cm_, should_tcp_proxy, *route_entry_,\n                                        callbacks_->streamInfo().protocol(), this);\n}\n\nvoid Filter::sendNoHealthyUpstreamResponse() {\n  callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream);\n  chargeUpstreamCode(Http::Code::ServiceUnavailable, nullptr, false);\n  callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, \"no healthy upstream\", modify_headers_,\n                             absl::nullopt,\n                             StreamInfo::ResponseCodeDetails::get().NoHealthyUpstream);\n}\n\nHttp::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) {\n  // upstream_requests_.size() cannot be > 1 because that only happens when a per\n  // try timeout occurs with hedge_on_per_try_timeout enabled but the per\n  // try timeout timer is not started until onRequestComplete(). It could be zero\n  // if the first request attempt has already failed and a retry is waiting for\n  // a backoff timer.\n  ASSERT(upstream_requests_.size() <= 1);\n\n  bool buffering = (retry_state_ && retry_state_->enabled()) || !active_shadow_policies_.empty();\n  if (buffering &&\n      getLength(callbacks_->decodingBuffer()) + data.length() > retry_shadow_buffer_limit_) {\n    // The request is larger than we should buffer. Give up on the retry/shadow\n    cluster_->stats().retry_or_shadow_abandoned_.inc();\n    retry_state_.reset();\n    buffering = false;\n    active_shadow_policies_.clear();\n\n    // If we had to abandon buffering and there's no request in progress, abort the request and\n    // clean up. This happens if the initial upstream request failed, and we are currently waiting\n    // for a backoff timer before starting the next upstream attempt.\n    if (upstream_requests_.empty()) {\n      cleanup();\n      callbacks_->sendLocalReply(\n          Http::Code::InsufficientStorage, \"exceeded request buffer limit while retrying upstream\",\n          modify_headers_, absl::nullopt,\n          StreamInfo::ResponseCodeDetails::get().RequestPayloadExceededRetryBufferLimit);\n      return Http::FilterDataStatus::StopIterationNoBuffer;\n    }\n  }\n\n  // If we aren't buffering and there is no active request, an abort should have occurred\n  // already.\n  ASSERT(buffering || !upstream_requests_.empty());\n\n  if (buffering) {\n    // If we are going to buffer for retries or shadowing, we need to make a copy before encoding\n    // since it's all moves from here on.\n    if (!upstream_requests_.empty()) {\n      Buffer::OwnedImpl copy(data);\n      upstream_requests_.front()->encodeData(copy, end_stream);\n    }\n\n    // If we are potentially going to retry or shadow this request we need to buffer.\n    // This will not cause the connection manager to 413 because before we hit the\n    // buffer limit we give up on retries and buffering. We must buffer using addDecodedData()\n    // so that all buffered data is available by the time we do request complete processing and\n    // potentially shadow.\n    callbacks_->addDecodedData(data, true);\n  } else {\n    upstream_requests_.front()->encodeData(data, end_stream);\n  }\n\n  if (end_stream) {\n    onRequestComplete();\n  }\n\n  return Http::FilterDataStatus::StopIterationNoBuffer;\n}\n\nHttp::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap& trailers) {\n  ENVOY_STREAM_LOG(debug, \"router decoding trailers:\\n{}\", *callbacks_, trailers);\n\n  // upstream_requests_.size() cannot be > 1 because that only happens when a per\n  // try timeout occurs with hedge_on_per_try_timeout enabled but the per\n  // try timeout timer is not started until onRequestComplete(). It could be zero\n  // if the first request attempt has already failed and a retry is waiting for\n  // a backoff timer.\n  ASSERT(upstream_requests_.size() <= 1);\n  downstream_trailers_ = &trailers;\n  for (auto& upstream_request : upstream_requests_) {\n    upstream_request->encodeTrailers(trailers);\n  }\n  onRequestComplete();\n  return Http::FilterTrailersStatus::StopIteration;\n}\n\nHttp::FilterMetadataStatus Filter::decodeMetadata(Http::MetadataMap& metadata_map) {\n  Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  if (!upstream_requests_.empty()) {\n    // TODO(soya3129): Save metadata for retry, redirect and shadowing case.\n    upstream_requests_.front()->encodeMetadata(std::move(metadata_map_ptr));\n  }\n  return Http::FilterMetadataStatus::Continue;\n}\n\nvoid Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n  // As the decoder filter only pushes back via watermarks once data has reached\n  // it, it can latch the current buffer limit and does not need to update the\n  // limit if another filter increases it.\n  //\n  // The default is \"do not limit\". If there are configured (non-zero) buffer\n  // limits, apply them here.\n  if (callbacks_->decoderBufferLimit() != 0) {\n    retry_shadow_buffer_limit_ = callbacks_->decoderBufferLimit();\n  }\n}\n\nvoid Filter::cleanup() {\n  // All callers of cleanup() should have cleaned out the upstream_requests_\n  // list as appropriate.\n  ASSERT(upstream_requests_.empty());\n\n  retry_state_.reset();\n  if (response_timeout_) {\n    response_timeout_->disableTimer();\n    response_timeout_.reset();\n  }\n}\n\nvoid Filter::maybeDoShadowing() {\n  for (const auto& shadow_policy_wrapper : active_shadow_policies_) {\n    const auto& shadow_policy = shadow_policy_wrapper.get();\n\n    ASSERT(!shadow_policy.cluster().empty());\n    Http::RequestMessagePtr request(new Http::RequestMessageImpl(\n        Http::createHeaderMap<Http::RequestHeaderMapImpl>(*downstream_headers_)));\n    if (callbacks_->decodingBuffer()) {\n      request->body().add(*callbacks_->decodingBuffer());\n    }\n    if (downstream_trailers_) {\n      request->trailers(Http::createHeaderMap<Http::RequestTrailerMapImpl>(*downstream_trailers_));\n    }\n\n    auto options = Http::AsyncClient::RequestOptions()\n                       .setTimeout(timeout_.global_timeout_)\n                       .setParentSpan(callbacks_->activeSpan())\n                       .setChildSpanName(\"mirror\")\n                       .setSampled(shadow_policy.traceSampled());\n    config_.shadowWriter().shadow(shadow_policy.cluster(), std::move(request), options);\n  }\n}\n\nvoid Filter::onRequestComplete() {\n  // This should be called exactly once, when the downstream request has been received in full.\n  ASSERT(!downstream_end_stream_);\n  downstream_end_stream_ = true;\n  Event::Dispatcher& dispatcher = callbacks_->dispatcher();\n  downstream_request_complete_time_ = dispatcher.timeSource().monotonicTime();\n\n  // Possible that we got an immediate reset.\n  if (!upstream_requests_.empty()) {\n    // Even if we got an immediate reset, we could still shadow, but that is a riskier change and\n    // seems unnecessary right now.\n    maybeDoShadowing();\n\n    if (timeout_.global_timeout_.count() > 0) {\n      response_timeout_ = dispatcher.createTimer([this]() -> void { onResponseTimeout(); });\n      response_timeout_->enableTimer(timeout_.global_timeout_);\n    }\n\n    for (auto& upstream_request : upstream_requests_) {\n      if (upstream_request->createPerTryTimeoutOnRequestComplete()) {\n        upstream_request->setupPerTryTimeout();\n      }\n    }\n  }\n}\n\nvoid Filter::onDestroy() {\n  // Reset any in-flight upstream requests.\n  resetAll();\n  cleanup();\n}\n\nvoid Filter::onResponseTimeout() {\n  ENVOY_STREAM_LOG(debug, \"upstream timeout\", *callbacks_);\n\n  // If we had an upstream request that got a \"good\" response, save its\n  // upstream timing information into the downstream stream info.\n  if (final_upstream_request_) {\n    callbacks_->streamInfo().setUpstreamTiming(final_upstream_request_->upstreamTiming());\n  }\n\n  // Reset any upstream requests that are still in flight.\n  while (!upstream_requests_.empty()) {\n    UpstreamRequestPtr upstream_request =\n        upstream_requests_.back()->removeFromList(upstream_requests_);\n\n    // Don't do work for upstream requests we've already seen headers for.\n    if (upstream_request->awaitingHeaders()) {\n      cluster_->stats().upstream_rq_timeout_.inc();\n      if (request_vcluster_) {\n        request_vcluster_->stats().upstream_rq_timeout_.inc();\n      }\n\n      if (cluster_->timeoutBudgetStats().has_value()) {\n        // Cancel firing per-try timeout information, because the per-try timeout did not come into\n        // play when the global timeout was hit.\n        upstream_request->recordTimeoutBudget(false);\n      }\n\n      if (upstream_request->upstreamHost()) {\n        upstream_request->upstreamHost()->stats().rq_timeout_.inc();\n      }\n\n      // If this upstream request already hit a \"soft\" timeout, then it\n      // already recorded a timeout into outlier detection. Don't do it again.\n      if (!upstream_request->outlierDetectionTimeoutRecorded()) {\n        updateOutlierDetection(Upstream::Outlier::Result::LocalOriginTimeout, *upstream_request,\n                               absl::optional<uint64_t>(enumToInt(timeout_response_code_)));\n      }\n\n      chargeUpstreamAbort(timeout_response_code_, false, *upstream_request);\n    }\n    upstream_request->resetStream();\n  }\n\n  onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout,\n                         StreamInfo::ResponseCodeDetails::get().UpstreamTimeout);\n}\n\n// Called when the per try timeout is hit but we didn't reset the request\n// (hedge_on_per_try_timeout enabled).\nvoid Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) {\n  // Track this as a timeout for outlier detection purposes even though we didn't\n  // cancel the request yet and might get a 2xx later.\n  updateOutlierDetection(Upstream::Outlier::Result::LocalOriginTimeout, upstream_request,\n                         absl::optional<uint64_t>(enumToInt(timeout_response_code_)));\n  upstream_request.outlierDetectionTimeoutRecorded(true);\n\n  if (!downstream_response_started_ && retry_state_) {\n    RetryStatus retry_status =\n        retry_state_->shouldHedgeRetryPerTryTimeout([this]() -> void { doRetry(); });\n\n    if (retry_status == RetryStatus::Yes) {\n      pending_retries_++;\n\n      // Don't increment upstream_host->stats().rq_error_ here, we'll do that\n      // later if 1) we hit global timeout or 2) we get bad response headers\n      // back.\n      upstream_request.retried(true);\n\n      // TODO: cluster stat for hedge attempted.\n    } else if (retry_status == RetryStatus::NoOverflow) {\n      callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);\n    } else if (retry_status == RetryStatus::NoRetryLimitExceeded) {\n      callbacks_->streamInfo().setResponseFlag(\n          StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded);\n    }\n  }\n}\n\nvoid Filter::onPerTryTimeout(UpstreamRequest& upstream_request) {\n  if (hedging_params_.hedge_on_per_try_timeout_) {\n    onSoftPerTryTimeout(upstream_request);\n    return;\n  }\n\n  cluster_->stats().upstream_rq_per_try_timeout_.inc();\n  if (upstream_request.upstreamHost()) {\n    upstream_request.upstreamHost()->stats().rq_timeout_.inc();\n  }\n\n  upstream_request.resetStream();\n\n  updateOutlierDetection(Upstream::Outlier::Result::LocalOriginTimeout, upstream_request,\n                         absl::optional<uint64_t>(enumToInt(timeout_response_code_)));\n\n  if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) {\n    return;\n  }\n\n  chargeUpstreamAbort(timeout_response_code_, false, upstream_request);\n\n  // Remove this upstream request from the list now that we're done with it.\n  upstream_request.removeFromList(upstream_requests_);\n  onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout,\n                         StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout);\n}\n\nvoid Filter::onStreamMaxDurationReached(UpstreamRequest& upstream_request) {\n  upstream_request.resetStream();\n\n  if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) {\n    return;\n  }\n\n  upstream_request.removeFromList(upstream_requests_);\n  cleanup();\n\n  if (downstream_response_started_ &&\n      !Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.allow_500_after_100\")) {\n    callbacks_->streamInfo().setResponseCodeDetails(\n        StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached);\n    callbacks_->resetStream();\n  } else {\n    callbacks_->streamInfo().setResponseFlag(\n        StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached);\n    // sendLocalReply may instead reset the stream if downstream_response_started_ is true.\n    callbacks_->sendLocalReply(\n        Http::Code::RequestTimeout, \"upstream max stream duration reached\", modify_headers_,\n        absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached);\n  }\n}\n\nvoid Filter::updateOutlierDetection(Upstream::Outlier::Result result,\n                                    UpstreamRequest& upstream_request,\n                                    absl::optional<uint64_t> code) {\n  if (upstream_request.upstreamHost()) {\n    upstream_request.upstreamHost()->outlierDetector().putResult(result, code);\n  }\n}\n\nvoid Filter::chargeUpstreamAbort(Http::Code code, bool dropped, UpstreamRequest& upstream_request) {\n  if (downstream_response_started_) {\n    if (upstream_request.grpcRqSuccessDeferred()) {\n      upstream_request.upstreamHost()->stats().rq_error_.inc();\n      config_.stats_.rq_reset_after_downstream_response_started_.inc();\n    }\n  } else {\n    Upstream::HostDescriptionConstSharedPtr upstream_host = upstream_request.upstreamHost();\n\n    chargeUpstreamCode(code, upstream_host, dropped);\n    // If we had non-5xx but still have been reset by backend or timeout before\n    // starting response, we treat this as an error. We only get non-5xx when\n    // timeout_response_code_ is used for code above, where this member can\n    // assume values such as 204 (NoContent).\n    if (upstream_host != nullptr && !Http::CodeUtility::is5xx(enumToInt(code))) {\n      upstream_host->stats().rq_error_.inc();\n    }\n  }\n}\n\nvoid Filter::onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flags,\n                                    absl::string_view details) {\n  Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = cluster()->timeoutBudgetStats();\n  if (tb_stats.has_value()) {\n    Event::Dispatcher& dispatcher = callbacks_->dispatcher();\n    std::chrono::milliseconds response_time = std::chrono::duration_cast<std::chrono::milliseconds>(\n        dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_);\n\n    tb_stats->get().upstream_rq_timeout_budget_percent_used_.recordValue(\n        FilterUtility::percentageOfTimeout(response_time, timeout_.global_timeout_));\n  }\n\n  const absl::string_view body =\n      timeout_response_code_ == Http::Code::GatewayTimeout ? \"upstream request timeout\" : \"\";\n  onUpstreamAbort(timeout_response_code_, response_flags, body, false, details);\n}\n\nvoid Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flags,\n                             absl::string_view body, bool dropped, absl::string_view details) {\n  // If we have not yet sent anything downstream, send a response with an appropriate status code.\n  // Otherwise just reset the ongoing response.\n  if (downstream_response_started_ &&\n      !Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.allow_500_after_100\")) {\n    // This will destroy any created retry timers.\n    callbacks_->streamInfo().setResponseCodeDetails(details);\n    cleanup();\n    callbacks_->resetStream();\n  } else {\n    // This will destroy any created retry timers.\n    cleanup();\n\n    callbacks_->streamInfo().setResponseFlag(response_flags);\n\n    // sendLocalReply may instead reset the stream if downstream_response_started_ is true.\n    callbacks_->sendLocalReply(\n        code, body,\n        [dropped, this](Http::ResponseHeaderMap& headers) {\n          if (dropped && !config_.suppress_envoy_headers_) {\n            headers.addReference(Http::Headers::get().EnvoyOverloaded,\n                                 Http::Headers::get().EnvoyOverloadedValues.True);\n          }\n          modify_headers_(headers);\n        },\n        absl::nullopt, details);\n  }\n}\n\nbool Filter::maybeRetryReset(Http::StreamResetReason reset_reason,\n                             UpstreamRequest& upstream_request) {\n  // We don't retry if we already started the response, don't have a retry policy defined,\n  // or if we've already retried this upstream request (currently only possible if a per\n  // try timeout occurred and hedge_on_per_try_timeout is enabled).\n  if (downstream_response_started_ || !retry_state_ || upstream_request.retried()) {\n    return false;\n  }\n\n  const RetryStatus retry_status =\n      retry_state_->shouldRetryReset(reset_reason, [this]() -> void { doRetry(); });\n  if (retry_status == RetryStatus::Yes) {\n    pending_retries_++;\n\n    if (upstream_request.upstreamHost()) {\n      upstream_request.upstreamHost()->stats().rq_error_.inc();\n    }\n\n    upstream_request.removeFromList(upstream_requests_);\n    return true;\n  } else if (retry_status == RetryStatus::NoOverflow) {\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);\n  } else if (retry_status == RetryStatus::NoRetryLimitExceeded) {\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded);\n  }\n\n  return false;\n}\n\nvoid Filter::onUpstreamReset(Http::StreamResetReason reset_reason,\n                             absl::string_view transport_failure_reason,\n                             UpstreamRequest& upstream_request) {\n  ENVOY_STREAM_LOG(debug, \"upstream reset: reset reason: {}, transport failure reason: {}\",\n                   *callbacks_, Http::Utility::resetReasonToString(reset_reason),\n                   transport_failure_reason);\n\n  // TODO: The reset may also come from upstream over the wire. In this case it should be\n  // treated as external origin error and distinguished from local origin error.\n  // This matters only when running OutlierDetection with split_external_local_origin_errors\n  // config param set to true.\n  updateOutlierDetection(Upstream::Outlier::Result::LocalOriginConnectFailed, upstream_request,\n                         absl::nullopt);\n\n  if (maybeRetryReset(reset_reason, upstream_request)) {\n    return;\n  }\n\n  const bool dropped = reset_reason == Http::StreamResetReason::Overflow;\n  chargeUpstreamAbort(Http::Code::ServiceUnavailable, dropped, upstream_request);\n  upstream_request.removeFromList(upstream_requests_);\n\n  // If there are other in-flight requests that might see an upstream response,\n  // don't return anything downstream.\n  if (numRequestsAwaitingHeaders() > 0 || pending_retries_ > 0) {\n    return;\n  }\n\n  const StreamInfo::ResponseFlag response_flags = streamResetReasonToResponseFlag(reset_reason);\n\n  const std::string body =\n      absl::StrCat(\"upstream connect error or disconnect/reset before headers. reset reason: \",\n                   Http::Utility::resetReasonToString(reset_reason),\n                   Runtime::runtimeFeatureEnabled(\n                       \"envoy.reloadable_features.http_transport_failure_reason_in_body\") &&\n                           !transport_failure_reason.empty()\n                       ? \", transport failure reason: \"\n                       : \"\",\n                   transport_failure_reason);\n  callbacks_->streamInfo().setUpstreamTransportFailureReason(transport_failure_reason);\n  const std::string& basic_details =\n      downstream_response_started_ ? StreamInfo::ResponseCodeDetails::get().LateUpstreamReset\n                                   : StreamInfo::ResponseCodeDetails::get().EarlyUpstreamReset;\n  const std::string details = absl::StrCat(\n      basic_details, \"{\", Http::Utility::resetReasonToString(reset_reason),\n      transport_failure_reason.empty() ? \"\" : absl::StrCat(\",\", transport_failure_reason), \"}\");\n  onUpstreamAbort(Http::Code::ServiceUnavailable, response_flags, body, dropped, details);\n}\n\nvoid Filter::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) {\n  if (retry_state_ && host) {\n    retry_state_->onHostAttempted(host);\n  }\n}\n\nStreamInfo::ResponseFlag\nFilter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) {\n  switch (reset_reason) {\n  case Http::StreamResetReason::ConnectionFailure:\n    return StreamInfo::ResponseFlag::UpstreamConnectionFailure;\n  case Http::StreamResetReason::ConnectionTermination:\n    return StreamInfo::ResponseFlag::UpstreamConnectionTermination;\n  case Http::StreamResetReason::LocalReset:\n  case Http::StreamResetReason::LocalRefusedStreamReset:\n    return StreamInfo::ResponseFlag::LocalReset;\n  case Http::StreamResetReason::Overflow:\n    return StreamInfo::ResponseFlag::UpstreamOverflow;\n  case Http::StreamResetReason::RemoteReset:\n  case Http::StreamResetReason::RemoteRefusedStreamReset:\n    return StreamInfo::ResponseFlag::UpstreamRemoteReset;\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid Filter::handleNon5xxResponseHeaders(absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                                         UpstreamRequest& upstream_request, bool end_stream,\n                                         uint64_t grpc_to_http_status) {\n  // We need to defer gRPC success until after we have processed grpc-status in\n  // the trailers.\n  if (grpc_request_) {\n    if (end_stream) {\n      if (grpc_status && !Http::CodeUtility::is5xx(grpc_to_http_status)) {\n        upstream_request.upstreamHost()->stats().rq_success_.inc();\n      } else {\n        upstream_request.upstreamHost()->stats().rq_error_.inc();\n      }\n    } else {\n      upstream_request.grpcRqSuccessDeferred(true);\n    }\n  } else {\n    upstream_request.upstreamHost()->stats().rq_success_.inc();\n  }\n}\n\nvoid Filter::onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers,\n                                          UpstreamRequest& upstream_request) {\n  chargeUpstreamCode(100, *headers, upstream_request.upstreamHost(), false);\n  ENVOY_STREAM_LOG(debug, \"upstream 100 continue\", *callbacks_);\n\n  downstream_response_started_ = true;\n  final_upstream_request_ = &upstream_request;\n  resetOtherUpstreams(upstream_request);\n\n  // Don't send retries after 100-Continue has been sent on. Arguably we could attempt to do a\n  // retry, assume the next upstream would also send an 100-Continue and swallow the second one\n  // but it's sketchy (as the subsequent upstream might not send a 100-Continue) and not worth\n  // the complexity until someone asks for it.\n  retry_state_.reset();\n\n  // We coalesce 100-continue headers here, to prevent encoder filters and HCM from having to worry\n  // about this. This is done in the router filter, rather than UpstreamRequest, since we want to\n  // potentially coalesce across retries and multiple upstream requests in the future, even though\n  // we currently don't support retry after 100.\n  // It's plausible that this functionality might need to move to HCM in the future for internal\n  // redirects, but we would need to maintain the \"only call encode100ContinueHeaders() once\"\n  // invariant.\n  if (!downstream_100_continue_headers_encoded_) {\n    downstream_100_continue_headers_encoded_ = true;\n    callbacks_->encode100ContinueHeaders(std::move(headers));\n  }\n}\n\nvoid Filter::resetAll() {\n  while (!upstream_requests_.empty()) {\n    upstream_requests_.back()->removeFromList(upstream_requests_)->resetStream();\n  }\n}\n\nvoid Filter::resetOtherUpstreams(UpstreamRequest& upstream_request) {\n  // Pop each upstream request on the list and reset it if it's not the one\n  // provided. At the end we'll move it back into the list.\n  UpstreamRequestPtr final_upstream_request;\n  while (!upstream_requests_.empty()) {\n    UpstreamRequestPtr upstream_request_tmp =\n        upstream_requests_.back()->removeFromList(upstream_requests_);\n    if (upstream_request_tmp.get() != &upstream_request) {\n      upstream_request_tmp->resetStream();\n      // TODO: per-host stat for hedge abandoned.\n      // TODO: cluster stat for hedge abandoned.\n    } else {\n      final_upstream_request = std::move(upstream_request_tmp);\n    }\n  }\n\n  ASSERT(final_upstream_request);\n  // Now put the final request back on this list.\n  LinkedList::moveIntoList(std::move(final_upstream_request), upstream_requests_);\n}\n\nvoid Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers,\n                               UpstreamRequest& upstream_request, bool end_stream) {\n  ENVOY_STREAM_LOG(debug, \"upstream headers complete: end_stream={}\", *callbacks_, end_stream);\n\n  modify_headers_(*headers);\n  // When grpc-status appears in response headers, convert grpc-status to HTTP status code\n  // for outlier detection. This does not currently change any stats or logging and does not\n  // handle the case when an error grpc-status is sent as a trailer.\n  absl::optional<Grpc::Status::GrpcStatus> grpc_status;\n  uint64_t grpc_to_http_status = 0;\n  if (grpc_request_) {\n    grpc_status = Grpc::Common::getGrpcStatus(*headers);\n    if (grpc_status.has_value()) {\n      grpc_to_http_status = Grpc::Utility::grpcToHttpStatus(grpc_status.value());\n    }\n  }\n\n  if (grpc_status.has_value()) {\n    upstream_request.upstreamHost()->outlierDetector().putHttpResponseCode(grpc_to_http_status);\n  } else {\n    upstream_request.upstreamHost()->outlierDetector().putHttpResponseCode(response_code);\n  }\n\n  if (headers->EnvoyImmediateHealthCheckFail() != nullptr) {\n    upstream_request.upstreamHost()->healthChecker().setUnhealthy();\n  }\n\n  bool could_not_retry = false;\n\n  // Check if this upstream request was already retried, for instance after\n  // hitting a per try timeout. Don't retry it if we already have.\n  if (retry_state_) {\n    if (upstream_request.retried()) {\n      // We already retried this request (presumably for a per try timeout) so\n      // we definitely won't retry it again. Check if we would have retried it\n      // if we could.\n      could_not_retry = retry_state_->wouldRetryFromHeaders(*headers);\n    } else {\n      const RetryStatus retry_status =\n          retry_state_->shouldRetryHeaders(*headers, [this]() -> void { doRetry(); });\n      if (retry_status == RetryStatus::Yes) {\n        pending_retries_++;\n        upstream_request.upstreamHost()->stats().rq_error_.inc();\n        Http::CodeStats& code_stats = httpContext().codeStats();\n        code_stats.chargeBasicResponseStat(cluster_->statsScope(), config_.retry_,\n                                           static_cast<Http::Code>(response_code));\n\n        if (!end_stream || !upstream_request.encodeComplete()) {\n          upstream_request.resetStream();\n        }\n        upstream_request.removeFromList(upstream_requests_);\n\n        return;\n      } else if (retry_status == RetryStatus::NoOverflow) {\n        callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);\n        could_not_retry = true;\n      } else if (retry_status == RetryStatus::NoRetryLimitExceeded) {\n        callbacks_->streamInfo().setResponseFlag(\n            StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded);\n        could_not_retry = true;\n      }\n    }\n  }\n\n  if (route_entry_->internalRedirectPolicy().enabled() &&\n      route_entry_->internalRedirectPolicy().shouldRedirectForResponseCode(\n          static_cast<Http::Code>(response_code)) &&\n      setupRedirect(*headers, upstream_request)) {\n    return;\n    // If the redirect could not be handled, fail open and let it pass to the\n    // next downstream.\n  }\n\n  // Check if we got a \"bad\" response, but there are still upstream requests in\n  // flight awaiting headers or scheduled retries. If so, exit to give them a\n  // chance to return before returning a response downstream.\n  if (could_not_retry && (numRequestsAwaitingHeaders() > 0 || pending_retries_ > 0)) {\n    upstream_request.upstreamHost()->stats().rq_error_.inc();\n\n    // Reset the stream because there are other in-flight requests that we'll\n    // wait around for and we're not interested in consuming any body/trailers.\n    upstream_request.removeFromList(upstream_requests_)->resetStream();\n    return;\n  }\n\n  // Make sure any retry timers are destroyed since we may not call cleanup() if end_stream is\n  // false.\n  if (retry_state_) {\n    retry_state_.reset();\n  }\n\n  // Only send upstream service time if we received the complete request and this is not a\n  // premature response.\n  if (DateUtil::timePointValid(downstream_request_complete_time_)) {\n    Event::Dispatcher& dispatcher = callbacks_->dispatcher();\n    MonotonicTime response_received_time = dispatcher.timeSource().monotonicTime();\n    std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(\n        response_received_time - downstream_request_complete_time_);\n    if (!config_.suppress_envoy_headers_) {\n      headers->setEnvoyUpstreamServiceTime(ms.count());\n    }\n  }\n\n  upstream_request.upstreamCanary(\n      (headers->EnvoyUpstreamCanary() && headers->EnvoyUpstreamCanary()->value() == \"true\") ||\n      upstream_request.upstreamHost()->canary());\n  chargeUpstreamCode(response_code, *headers, upstream_request.upstreamHost(), false);\n  if (!Http::CodeUtility::is5xx(response_code)) {\n    handleNon5xxResponseHeaders(grpc_status, upstream_request, end_stream, grpc_to_http_status);\n  }\n\n  // Append routing cookies\n  for (const auto& header_value : downstream_set_cookies_) {\n    headers->addReferenceKey(Http::Headers::get().SetCookie, header_value);\n  }\n\n  // TODO(zuercher): If access to response_headers_to_add (at any level) is ever needed outside\n  // Router::Filter we'll need to find a better location for this work. One possibility is to\n  // provide finalizeResponseHeaders functions on the Router::Config and VirtualHost interfaces.\n  route_entry_->finalizeResponseHeaders(*headers, callbacks_->streamInfo());\n\n  downstream_response_started_ = true;\n  final_upstream_request_ = &upstream_request;\n  resetOtherUpstreams(upstream_request);\n  if (end_stream) {\n    onUpstreamComplete(upstream_request);\n  }\n\n  callbacks_->encodeHeaders(std::move(headers), end_stream,\n                            StreamInfo::ResponseCodeDetails::get().ViaUpstream);\n}\n\nvoid Filter::onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request,\n                            bool end_stream) {\n  // This should be true because when we saw headers we either reset the stream\n  // (hence wouldn't have made it to onUpstreamData) or all other in-flight\n  // streams.\n  ASSERT(upstream_requests_.size() == 1);\n  if (end_stream) {\n    // gRPC request termination without trailers is an error.\n    if (upstream_request.grpcRqSuccessDeferred()) {\n      upstream_request.upstreamHost()->stats().rq_error_.inc();\n    }\n    onUpstreamComplete(upstream_request);\n  }\n\n  callbacks_->encodeData(data, end_stream);\n}\n\nvoid Filter::onUpstreamTrailers(Http::ResponseTrailerMapPtr&& trailers,\n                                UpstreamRequest& upstream_request) {\n  // This should be true because when we saw headers we either reset the stream\n  // (hence wouldn't have made it to onUpstreamTrailers) or all other in-flight\n  // streams.\n  ASSERT(upstream_requests_.size() == 1);\n\n  if (upstream_request.grpcRqSuccessDeferred()) {\n    absl::optional<Grpc::Status::GrpcStatus> grpc_status = Grpc::Common::getGrpcStatus(*trailers);\n    if (grpc_status &&\n        !Http::CodeUtility::is5xx(Grpc::Utility::grpcToHttpStatus(grpc_status.value()))) {\n      upstream_request.upstreamHost()->stats().rq_success_.inc();\n    } else {\n      upstream_request.upstreamHost()->stats().rq_error_.inc();\n    }\n  }\n\n  onUpstreamComplete(upstream_request);\n\n  callbacks_->encodeTrailers(std::move(trailers));\n}\n\nvoid Filter::onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) {\n  callbacks_->encodeMetadata(std::move(metadata_map));\n}\n\nvoid Filter::onUpstreamComplete(UpstreamRequest& upstream_request) {\n  if (!downstream_end_stream_) {\n    upstream_request.resetStream();\n  }\n  callbacks_->streamInfo().setUpstreamTiming(final_upstream_request_->upstreamTiming());\n\n  Event::Dispatcher& dispatcher = callbacks_->dispatcher();\n  std::chrono::milliseconds response_time = std::chrono::duration_cast<std::chrono::milliseconds>(\n      dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_);\n\n  Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = cluster()->timeoutBudgetStats();\n  if (tb_stats.has_value()) {\n    tb_stats->get().upstream_rq_timeout_budget_percent_used_.recordValue(\n        FilterUtility::percentageOfTimeout(response_time, timeout_.global_timeout_));\n  }\n\n  if (config_.emit_dynamic_stats_ && !callbacks_->streamInfo().healthCheck() &&\n      DateUtil::timePointValid(downstream_request_complete_time_)) {\n    upstream_request.upstreamHost()->outlierDetector().putResponseTime(response_time);\n    const bool internal_request = Http::HeaderUtility::isEnvoyInternalRequest(*downstream_headers_);\n\n    Http::CodeStats& code_stats = httpContext().codeStats();\n    Http::CodeStats::ResponseTimingInfo info{config_.scope_,\n                                             cluster_->statsScope(),\n                                             config_.empty_stat_name_,\n                                             response_time,\n                                             upstream_request.upstreamCanary(),\n                                             internal_request,\n                                             route_entry_->virtualHost().statName(),\n                                             request_vcluster_ ? request_vcluster_->statName()\n                                                               : config_.empty_stat_name_,\n                                             config_.zone_name_,\n                                             upstreamZone(upstream_request.upstreamHost())};\n\n    code_stats.chargeResponseTiming(info);\n\n    if (alt_stat_prefix_ != nullptr) {\n      Http::CodeStats::ResponseTimingInfo info{config_.scope_,\n                                               cluster_->statsScope(),\n                                               alt_stat_prefix_->statName(),\n                                               response_time,\n                                               upstream_request.upstreamCanary(),\n                                               internal_request,\n                                               config_.empty_stat_name_,\n                                               config_.empty_stat_name_,\n                                               config_.zone_name_,\n                                               upstreamZone(upstream_request.upstreamHost())};\n\n      code_stats.chargeResponseTiming(info);\n    }\n  }\n\n  upstream_request.removeFromList(upstream_requests_);\n  cleanup();\n}\n\nbool Filter::setupRedirect(const Http::ResponseHeaderMap& headers,\n                           UpstreamRequest& upstream_request) {\n  ENVOY_STREAM_LOG(debug, \"attempting internal redirect\", *callbacks_);\n  const Http::HeaderEntry* location = headers.Location();\n\n  // If the internal redirect succeeds, callbacks_->recreateStream() will result in the\n  // destruction of this filter before the stream is marked as complete, and onDestroy will reset\n  // the stream.\n  //\n  // Normally when a stream is complete we signal this by resetting the upstream but this cam not\n  // be done in this case because if recreateStream fails, the \"failure\" path continues to call\n  // code in onUpstreamHeaders which requires the upstream *not* be reset. To avoid onDestroy\n  // performing a spurious stream reset in the case recreateStream() succeeds, we explicitly track\n  // stream completion here and check it in onDestroy. This is annoyingly complicated but is\n  // better than needlessly resetting streams.\n  attempting_internal_redirect_with_complete_stream_ =\n      upstream_request.upstreamTiming().last_upstream_rx_byte_received_ && downstream_end_stream_;\n\n  // Redirects are not supported for streaming requests yet.\n  if (downstream_end_stream_ &&\n      !callbacks_->decodingBuffer() && // Redirects with body not yet supported.\n      location != nullptr &&\n      convertRequestHeadersForInternalRedirect(*downstream_headers_, *location) &&\n      callbacks_->recreateStream()) {\n    cluster_->stats().upstream_internal_redirect_succeeded_total_.inc();\n    return true;\n  }\n\n  attempting_internal_redirect_with_complete_stream_ = false;\n\n  ENVOY_STREAM_LOG(debug, \"Internal redirect failed\", *callbacks_);\n  cluster_->stats().upstream_internal_redirect_failed_total_.inc();\n  return false;\n}\n\nbool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers,\n                                                      const Http::HeaderEntry& internal_redirect) {\n  if (!downstream_headers.Path()) {\n    ENVOY_STREAM_LOG(trace, \"no path in downstream_headers\", *callbacks_);\n    return false;\n  }\n\n  // Make sure the redirect response contains a URL to redirect to.\n  if (internal_redirect.value().getStringView().empty()) {\n    config_.stats_.passthrough_internal_redirect_bad_location_.inc();\n    return false;\n  }\n  Http::Utility::Url absolute_url;\n  if (!absolute_url.initialize(internal_redirect.value().getStringView(), false)) {\n    config_.stats_.passthrough_internal_redirect_bad_location_.inc();\n    return false;\n  }\n\n  const auto& policy = route_entry_->internalRedirectPolicy();\n  // Don't allow serving TLS responses over plaintext unless allowed by policy.\n  const bool scheme_is_http = schemeIsHttp(downstream_headers, *callbacks_->connection());\n  const bool target_is_http = absolute_url.scheme() == Http::Headers::get().SchemeValues.Http;\n  if (!policy.isCrossSchemeRedirectAllowed() && scheme_is_http != target_is_http) {\n    config_.stats_.passthrough_internal_redirect_unsafe_scheme_.inc();\n    return false;\n  }\n\n  const StreamInfo::FilterStateSharedPtr& filter_state = callbacks_->streamInfo().filterState();\n  // Make sure that performing the redirect won't result in exceeding the configured number of\n  // redirects allowed for this route.\n  if (!filter_state->hasData<StreamInfo::UInt32Accessor>(NumInternalRedirectsFilterStateName)) {\n    filter_state->setData(\n        NumInternalRedirectsFilterStateName, std::make_shared<StreamInfo::UInt32AccessorImpl>(0),\n        StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Request);\n  }\n  StreamInfo::UInt32Accessor& num_internal_redirect =\n      filter_state->getDataMutable<StreamInfo::UInt32Accessor>(NumInternalRedirectsFilterStateName);\n\n  if (num_internal_redirect.value() >= policy.maxInternalRedirects()) {\n    config_.stats_.passthrough_internal_redirect_too_many_redirects_.inc();\n    return false;\n  }\n  // Copy the old values, so they can be restored if the redirect fails.\n  const std::string original_host(downstream_headers.getHostValue());\n  const std::string original_path(downstream_headers.getPathValue());\n  const bool scheme_is_set = (downstream_headers.Scheme() != nullptr);\n  Cleanup restore_original_headers(\n      [&downstream_headers, original_host, original_path, scheme_is_set, scheme_is_http]() {\n        downstream_headers.setHost(original_host);\n        downstream_headers.setPath(original_path);\n        if (scheme_is_set) {\n          downstream_headers.setScheme(scheme_is_http ? Http::Headers::get().SchemeValues.Http\n                                                      : Http::Headers::get().SchemeValues.Https);\n        }\n      });\n\n  // Replace the original host, scheme and path.\n  downstream_headers.setScheme(absolute_url.scheme());\n  downstream_headers.setHost(absolute_url.hostAndPort());\n  downstream_headers.setPath(absolute_url.pathAndQueryParams());\n\n  callbacks_->clearRouteCache();\n  const auto route = callbacks_->route();\n  // Don't allow a redirect to a non existing route.\n  if (!route) {\n    config_.stats_.passthrough_internal_redirect_no_route_.inc();\n    return false;\n  }\n\n  const auto& route_name = route->routeEntry()->routeName();\n  for (const auto& predicate : policy.predicates()) {\n    if (!predicate->acceptTargetRoute(*filter_state, route_name, !scheme_is_http,\n                                      !target_is_http)) {\n      config_.stats_.passthrough_internal_redirect_predicate_.inc();\n      ENVOY_STREAM_LOG(trace, \"rejecting redirect targeting {}, by {} predicate\", *callbacks_,\n                       route_name, predicate->name());\n      return false;\n    }\n  }\n\n  num_internal_redirect.increment();\n  restore_original_headers.cancel();\n  // Preserve the original request URL for the second pass.\n  downstream_headers.setEnvoyOriginalUrl(absl::StrCat(scheme_is_http\n                                                          ? Http::Headers::get().SchemeValues.Http\n                                                          : Http::Headers::get().SchemeValues.Https,\n                                                      \"://\", original_host, original_path));\n  return true;\n}\n\nvoid Filter::doRetry() {\n  ENVOY_STREAM_LOG(debug, \"performing retry\", *callbacks_);\n\n  is_retry_ = true;\n  attempt_count_++;\n  ASSERT(pending_retries_ > 0);\n  pending_retries_--;\n\n  std::unique_ptr<GenericConnPool> generic_conn_pool = createConnPool();\n  if (!generic_conn_pool) {\n    sendNoHealthyUpstreamResponse();\n    cleanup();\n    return;\n  }\n  UpstreamRequestPtr upstream_request =\n      std::make_unique<UpstreamRequest>(*this, std::move(generic_conn_pool));\n\n  if (include_attempt_count_in_request_) {\n    downstream_headers_->setEnvoyAttemptCount(attempt_count_);\n  }\n\n  UpstreamRequest* upstream_request_tmp = upstream_request.get();\n  LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_);\n  upstream_requests_.front()->encodeHeaders(!callbacks_->decodingBuffer() &&\n                                            !downstream_trailers_ && downstream_end_stream_);\n  // It's possible we got immediately reset which means the upstream request we just\n  // added to the front of the list might have been removed, so we need to check to make\n  // sure we don't encodeData on the wrong request.\n  if (!upstream_requests_.empty() && (upstream_requests_.front().get() == upstream_request_tmp)) {\n    if (callbacks_->decodingBuffer()) {\n      // If we are doing a retry we need to make a copy.\n      Buffer::OwnedImpl copy(*callbacks_->decodingBuffer());\n      upstream_requests_.front()->encodeData(copy, !downstream_trailers_ && downstream_end_stream_);\n    }\n\n    if (downstream_trailers_) {\n      upstream_requests_.front()->encodeTrailers(*downstream_trailers_);\n    }\n  }\n}\n\nuint32_t Filter::numRequestsAwaitingHeaders() {\n  return std::count_if(upstream_requests_.begin(), upstream_requests_.end(),\n                       [](const auto& req) -> bool { return req->awaitingHeaders(); });\n}\n\nRetryStatePtr ProdFilter::createRetryState(const RetryPolicy& policy,\n                                           Http::RequestHeaderMap& request_headers,\n                                           const Upstream::ClusterInfo& cluster,\n                                           const VirtualCluster* vcluster, Runtime::Loader& runtime,\n                                           Random::RandomGenerator& random,\n                                           Event::Dispatcher& dispatcher, TimeSource& time_source,\n                                           Upstream::ResourcePriority priority) {\n  return RetryStateImpl::create(policy, request_headers, cluster, vcluster, runtime, random,\n                                dispatcher, time_source, priority);\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/router.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/extensions/filters/http/router/v3/router.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/router/shadow_writer.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/hex.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/http/utility.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/router/upstream_request.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * All router filter stats. @see stats_macros.h\n */\n// clang-format off\n#define ALL_ROUTER_STATS(COUNTER)                                                                  \\\n  COUNTER(passthrough_internal_redirect_bad_location)                                              \\\n  COUNTER(passthrough_internal_redirect_unsafe_scheme)                                             \\\n  COUNTER(passthrough_internal_redirect_too_many_redirects)                                        \\\n  COUNTER(passthrough_internal_redirect_no_route)                                                  \\\n  COUNTER(passthrough_internal_redirect_predicate)                                                 \\\n  COUNTER(no_route)                                                                                \\\n  COUNTER(no_cluster)                                                                              \\\n  COUNTER(rq_redirect)                                                                             \\\n  COUNTER(rq_direct_response)                                                                      \\\n  COUNTER(rq_total)                                                                                \\\n  COUNTER(rq_reset_after_downstream_response_started)\n// clang-format on\n\n/**\n * Struct definition for all router filter stats. @see stats_macros.h\n */\nstruct FilterStats {\n  ALL_ROUTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Router filter utilities split out for ease of testing.\n */\nclass FilterUtility {\npublic:\n  struct TimeoutData {\n    std::chrono::milliseconds global_timeout_{0};\n    std::chrono::milliseconds per_try_timeout_{0};\n  };\n\n  struct HedgingParams {\n    bool hedge_on_per_try_timeout_;\n  };\n\n  class StrictHeaderChecker {\n  public:\n    struct HeaderCheckResult {\n      bool valid_ = true;\n      const Http::HeaderEntry* entry_;\n    };\n\n    /**\n     * Determine whether a given header's value passes the strict validation\n     * defined for that header.\n     * @param headers supplies the headers from which to get the target header.\n     * @param target_header is the header to be validated.\n     * @return HeaderCheckResult containing the entry for @param target_header\n     *         and valid_ set to FALSE if @param target_header is set to an\n     *         invalid value. If @param target_header doesn't appear in\n     *         @param headers, return a result with valid_ set to TRUE.\n     */\n    static const HeaderCheckResult checkHeader(Http::RequestHeaderMap& headers,\n                                               const Http::LowerCaseString& target_header);\n\n    using ParseRetryFlagsFunc = std::function<std::pair<uint32_t, bool>(absl::string_view)>;\n\n  private:\n    static HeaderCheckResult hasValidRetryFields(const Http::HeaderEntry* header_entry,\n                                                 const ParseRetryFlagsFunc& parse_fn) {\n      HeaderCheckResult r;\n      if (header_entry) {\n        const auto flags_and_validity = parse_fn(header_entry->value().getStringView());\n        r.valid_ = flags_and_validity.second;\n        r.entry_ = header_entry;\n      }\n      return r;\n    }\n\n    static HeaderCheckResult isInteger(const Http::HeaderEntry* header_entry) {\n      HeaderCheckResult r;\n      if (header_entry) {\n        uint64_t out;\n        r.valid_ = absl::SimpleAtoi(header_entry->value().getStringView(), &out);\n        r.entry_ = header_entry;\n      }\n      return r;\n    }\n  };\n\n  /**\n   * Returns response_time / timeout, as a  percentage as [0, 100]. Returns 0\n   * if there is no timeout.\n   * @param response_time supplies the response time thus far.\n   * @param timeout supplies  the timeout to get the percentage of.\n   * @return the percentage of timeout [0, 100] for stats use.\n   */\n  static uint64_t percentageOfTimeout(const std::chrono::milliseconds response_time,\n                                      const std::chrono::milliseconds timeout);\n\n  /**\n   * Set the :scheme header based on whether the underline transport is secure.\n   */\n  static void setUpstreamScheme(Http::RequestHeaderMap& headers, bool use_secure_transport);\n\n  /**\n   * Determine whether a request should be shadowed.\n   * @param policy supplies the route's shadow policy.\n   * @param runtime supplies the runtime to lookup the shadow key in.\n   * @param stable_random supplies the random number to use when determining whether shadowing\n   *        should take place.\n   * @return TRUE if shadowing should take place.\n   */\n  static bool shouldShadow(const ShadowPolicy& policy, Runtime::Loader& runtime,\n                           uint64_t stable_random);\n\n  /**\n   * Determine the final timeout to use based on the route as well as the request headers.\n   * @param route supplies the request route.\n   * @param request_headers supplies the request headers.\n   * @param insert_envoy_expected_request_timeout_ms insert\n   *        x-envoy-expected-request-timeout-ms?\n   * @param grpc_request tells if the request is a gRPC request.\n   * @return TimeoutData for both the global and per try timeouts.\n   */\n  static TimeoutData finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& request_headers,\n                                  bool insert_envoy_expected_request_timeout_ms, bool grpc_request,\n                                  bool per_try_timeout_hedging_enabled,\n                                  bool respect_expected_rq_timeout);\n\n  static bool trySetGlobalTimeout(const Http::HeaderEntry* header_timeout_entry,\n                                  TimeoutData& timeout);\n\n  /**\n   * Determine the final hedging settings after applying randomized behavior.\n   * @param route supplies the request route.\n   * @param request_headers supplies the request headers.\n   * @return HedgingParams the final parameters to use for request hedging.\n   */\n  static HedgingParams finalHedgingParams(const RouteEntry& route,\n                                          Http::RequestHeaderMap& request_headers);\n};\n\n/**\n * Configuration for the router filter.\n */\nclass FilterConfig {\npublic:\n  FilterConfig(const std::string& stat_prefix, const LocalInfo::LocalInfo& local_info,\n               Stats::Scope& scope, Upstream::ClusterManager& cm, Runtime::Loader& runtime,\n               Random::RandomGenerator& random, ShadowWriterPtr&& shadow_writer,\n               bool emit_dynamic_stats, bool start_child_span, bool suppress_envoy_headers,\n               bool respect_expected_rq_timeout,\n               const Protobuf::RepeatedPtrField<std::string>& strict_check_headers,\n               TimeSource& time_source, Http::Context& http_context)\n      : scope_(scope), local_info_(local_info), cm_(cm), runtime_(runtime),\n        random_(random), stats_{ALL_ROUTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix))},\n        emit_dynamic_stats_(emit_dynamic_stats), start_child_span_(start_child_span),\n        suppress_envoy_headers_(suppress_envoy_headers),\n        respect_expected_rq_timeout_(respect_expected_rq_timeout), http_context_(http_context),\n        stat_name_pool_(scope_.symbolTable()), retry_(stat_name_pool_.add(\"retry\")),\n        zone_name_(stat_name_pool_.add(local_info_.zoneName())),\n        empty_stat_name_(stat_name_pool_.add(\"\")), shadow_writer_(std::move(shadow_writer)),\n        time_source_(time_source) {\n    if (!strict_check_headers.empty()) {\n      strict_check_headers_ = std::make_unique<HeaderVector>();\n      for (const auto& header : strict_check_headers) {\n        strict_check_headers_->emplace_back(Http::LowerCaseString(header));\n      }\n    }\n  }\n\n  FilterConfig(const std::string& stat_prefix, Server::Configuration::FactoryContext& context,\n               ShadowWriterPtr&& shadow_writer,\n               const envoy::extensions::filters::http::router::v3::Router& config)\n      : FilterConfig(stat_prefix, context.localInfo(), context.scope(), context.clusterManager(),\n                     context.runtime(), context.api().randomGenerator(), std::move(shadow_writer),\n                     PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, dynamic_stats, true),\n                     config.start_child_span(), config.suppress_envoy_headers(),\n                     config.respect_expected_rq_timeout(), config.strict_check_headers(),\n                     context.api().timeSource(), context.httpContext()) {\n    for (const auto& upstream_log : config.upstream_log()) {\n      upstream_logs_.push_back(AccessLog::AccessLogFactory::fromProto(upstream_log, context));\n    }\n  }\n  using HeaderVector = std::vector<Http::LowerCaseString>;\n  using HeaderVectorPtr = std::unique_ptr<HeaderVector>;\n\n  ShadowWriter& shadowWriter() { return *shadow_writer_; }\n  TimeSource& timeSource() { return time_source_; }\n\n  Stats::Scope& scope_;\n  const LocalInfo::LocalInfo& local_info_;\n  Upstream::ClusterManager& cm_;\n  Runtime::Loader& runtime_;\n  Random::RandomGenerator& random_;\n  FilterStats stats_;\n  const bool emit_dynamic_stats_;\n  const bool start_child_span_;\n  const bool suppress_envoy_headers_;\n  const bool respect_expected_rq_timeout_;\n  // TODO(xyu-stripe): Make this a bitset to keep cluster memory footprint down.\n  HeaderVectorPtr strict_check_headers_;\n  std::list<AccessLog::InstanceSharedPtr> upstream_logs_;\n  Http::Context& http_context_;\n  Stats::StatNamePool stat_name_pool_;\n  Stats::StatName retry_;\n  Stats::StatName zone_name_;\n  Stats::StatName empty_stat_name_;\n\nprivate:\n  ShadowWriterPtr shadow_writer_;\n  TimeSource& time_source_;\n};\n\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\nclass UpstreamRequest;\nusing UpstreamRequestPtr = std::unique_ptr<UpstreamRequest>;\n\n// The interface the UpstreamRequest has to interact with the router filter.\n// Split out primarily for unit test mocks.\nclass RouterFilterInterface {\npublic:\n  virtual ~RouterFilterInterface() = default;\n\n  virtual void onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers,\n                                            UpstreamRequest& upstream_request) PURE;\n  virtual void onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers,\n                                 UpstreamRequest& upstream_request, bool end_stream) PURE;\n  virtual void onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request,\n                              bool end_stream) PURE;\n  virtual void onUpstreamTrailers(Http::ResponseTrailerMapPtr&& trailers,\n                                  UpstreamRequest& upstream_request) PURE;\n  virtual void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) PURE;\n  virtual void onUpstreamReset(Http::StreamResetReason reset_reason,\n                               absl::string_view transport_failure,\n                               UpstreamRequest& upstream_request) PURE;\n  virtual void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) PURE;\n  virtual void onPerTryTimeout(UpstreamRequest& upstream_request) PURE;\n  virtual void onStreamMaxDurationReached(UpstreamRequest& upstream_request) PURE;\n\n  virtual Http::StreamDecoderFilterCallbacks* callbacks() PURE;\n  virtual Upstream::ClusterInfoConstSharedPtr cluster() PURE;\n  virtual FilterConfig& config() PURE;\n  virtual FilterUtility::TimeoutData timeout() PURE;\n  virtual Http::RequestHeaderMap* downstreamHeaders() PURE;\n  virtual Http::RequestTrailerMap* downstreamTrailers() PURE;\n  virtual bool downstreamResponseStarted() const PURE;\n  virtual bool downstreamEndStream() const PURE;\n  virtual uint32_t attemptCount() const PURE;\n  virtual const VirtualCluster* requestVcluster() const PURE;\n  virtual const RouteEntry* routeEntry() const PURE;\n  virtual const std::list<UpstreamRequestPtr>& upstreamRequests() const PURE;\n  virtual const UpstreamRequest* finalUpstreamRequest() const PURE;\n  virtual TimeSource& timeSource() PURE;\n};\n\n/**\n * Service routing filter.\n */\nclass Filter : Logger::Loggable<Logger::Id::router>,\n               public Http::StreamDecoderFilter,\n               public Upstream::LoadBalancerContextBase,\n               public RouterFilterInterface {\npublic:\n  Filter(FilterConfig& config)\n      : config_(config), final_upstream_request_(nullptr),\n        downstream_100_continue_headers_encoded_(false), downstream_response_started_(false),\n        downstream_end_stream_(false), is_retry_(false),\n        attempting_internal_redirect_with_complete_stream_(false) {}\n\n  ~Filter() override;\n\n  static StreamInfo::ResponseFlag\n  streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason);\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  Http::FilterMetadataStatus decodeMetadata(Http::MetadataMap& metadata_map) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override {\n    if (route_entry_ && downstream_headers_) {\n      auto hash_policy = route_entry_->hashPolicy();\n      if (hash_policy) {\n        return hash_policy->generateHash(\n            callbacks_->streamInfo().downstreamRemoteAddress().get(), *downstream_headers_,\n            [this](const std::string& key, const std::string& path, std::chrono::seconds max_age) {\n              return addDownstreamSetCookie(key, path, max_age);\n            },\n            callbacks_->streamInfo().filterState());\n      }\n    }\n    return {};\n  }\n  const Router::MetadataMatchCriteria* metadataMatchCriteria() override {\n    if (route_entry_) {\n      // Have we been called before? If so, there's no need to recompute because\n      // by the time this method is called for the first time, route_entry_ should\n      // not change anymore.\n      if (metadata_match_ != nullptr) {\n        return metadata_match_.get();\n      }\n\n      // The request's metadata, if present, takes precedence over the route's.\n      const auto& request_metadata = callbacks_->streamInfo().dynamicMetadata().filter_metadata();\n      const auto filter_it = request_metadata.find(Envoy::Config::MetadataFilters::get().ENVOY_LB);\n      if (filter_it != request_metadata.end()) {\n        if (route_entry_->metadataMatchCriteria() != nullptr) {\n          metadata_match_ =\n              route_entry_->metadataMatchCriteria()->mergeMatchCriteria(filter_it->second);\n        } else {\n          metadata_match_ = std::make_unique<Router::MetadataMatchCriteriaImpl>(filter_it->second);\n        }\n        return metadata_match_.get();\n      }\n      return route_entry_->metadataMatchCriteria();\n    }\n    return nullptr;\n  }\n  const Network::Connection* downstreamConnection() const override {\n    return callbacks_->connection();\n  }\n  const Http::RequestHeaderMap* downstreamHeaders() const override { return downstream_headers_; }\n\n  bool shouldSelectAnotherHost(const Upstream::Host& host) override {\n    // We only care about host selection when performing a retry, at which point we consult the\n    // RetryState to see if we're configured to avoid certain hosts during retries.\n    if (!is_retry_) {\n      return false;\n    }\n\n    ASSERT(retry_state_);\n    return retry_state_->shouldSelectAnotherHost(host);\n  }\n\n  const Upstream::HealthyAndDegradedLoad& determinePriorityLoad(\n      const Upstream::PrioritySet& priority_set,\n      const Upstream::HealthyAndDegradedLoad& original_priority_load,\n      const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override {\n    // We only modify the priority load on retries.\n    if (!is_retry_) {\n      return original_priority_load;\n    }\n\n    return retry_state_->priorityLoadForRetry(priority_set, original_priority_load,\n                                              priority_mapping_func);\n  }\n\n  uint32_t hostSelectionRetryCount() const override {\n    if (!is_retry_) {\n      return 1;\n    }\n\n    return retry_state_->hostSelectionMaxAttempts();\n  }\n\n  Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override {\n    return callbacks_->getUpstreamSocketOptions();\n  }\n\n  Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override {\n    return transport_socket_options_;\n  }\n\n  /**\n   * Set a computed cookie to be sent with the downstream headers.\n   * @param key supplies the size of the cookie\n   * @param max_age the lifetime of the cookie\n   * @param  path the path of the cookie, or \"\"\n   * @return std::string the value of the new cookie\n   */\n  std::string addDownstreamSetCookie(const std::string& key, const std::string& path,\n                                     std::chrono::seconds max_age) {\n    // The cookie value should be the same per connection so that if multiple\n    // streams race on the same path, they all receive the same cookie.\n    // Since the downstream port is part of the hashed value, multiple HTTP1\n    // connections can receive different cookies if they race on requests.\n    std::string value;\n    const Network::Connection* conn = downstreamConnection();\n    // Need to check for null conn if this is ever used by Http::AsyncClient in the future.\n    value = conn->remoteAddress()->asString() + conn->localAddress()->asString();\n\n    const std::string cookie_value = Hex::uint64ToHex(HashUtil::xxHash64(value));\n    downstream_set_cookies_.emplace_back(\n        Http::Utility::makeSetCookieValue(key, cookie_value, path, max_age, true));\n    return cookie_value;\n  }\n\n  // RouterFilterInterface\n  void onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers,\n                                    UpstreamRequest& upstream_request) override;\n  void onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers,\n                         UpstreamRequest& upstream_request, bool end_stream) override;\n  void onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request,\n                      bool end_stream) override;\n  void onUpstreamTrailers(Http::ResponseTrailerMapPtr&& trailers,\n                          UpstreamRequest& upstream_request) override;\n  void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) override;\n  void onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure,\n                       UpstreamRequest& upstream_request) override;\n  void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override;\n  void onPerTryTimeout(UpstreamRequest& upstream_request) override;\n  void onStreamMaxDurationReached(UpstreamRequest& upstream_request) override;\n  Http::StreamDecoderFilterCallbacks* callbacks() override { return callbacks_; }\n  Upstream::ClusterInfoConstSharedPtr cluster() override { return cluster_; }\n  FilterConfig& config() override { return config_; }\n  FilterUtility::TimeoutData timeout() override { return timeout_; }\n  Http::RequestHeaderMap* downstreamHeaders() override { return downstream_headers_; }\n  Http::RequestTrailerMap* downstreamTrailers() override { return downstream_trailers_; }\n  bool downstreamResponseStarted() const override { return downstream_response_started_; }\n  bool downstreamEndStream() const override { return downstream_end_stream_; }\n  uint32_t attemptCount() const override { return attempt_count_; }\n  const VirtualCluster* requestVcluster() const override { return request_vcluster_; }\n  const RouteEntry* routeEntry() const override { return route_entry_; }\n  const std::list<UpstreamRequestPtr>& upstreamRequests() const override {\n    return upstream_requests_;\n  }\n  const UpstreamRequest* finalUpstreamRequest() const override { return final_upstream_request_; }\n  TimeSource& timeSource() override { return config_.timeSource(); }\n\nprivate:\n  friend class UpstreamRequest;\n\n  RetryStatePtr retry_state_;\n\n  Stats::StatName upstreamZone(Upstream::HostDescriptionConstSharedPtr upstream_host);\n  void chargeUpstreamCode(uint64_t response_status_code,\n                          const Http::ResponseHeaderMap& response_headers,\n                          Upstream::HostDescriptionConstSharedPtr upstream_host, bool dropped);\n  void chargeUpstreamCode(Http::Code code, Upstream::HostDescriptionConstSharedPtr upstream_host,\n                          bool dropped);\n  void chargeUpstreamAbort(Http::Code code, bool dropped, UpstreamRequest& upstream_request);\n  void cleanup();\n  virtual RetryStatePtr createRetryState(const RetryPolicy& policy,\n                                         Http::RequestHeaderMap& request_headers,\n                                         const Upstream::ClusterInfo& cluster,\n                                         const VirtualCluster* vcluster, Runtime::Loader& runtime,\n                                         Random::RandomGenerator& random,\n                                         Event::Dispatcher& dispatcher, TimeSource& time_source,\n                                         Upstream::ResourcePriority priority) PURE;\n\n  std::unique_ptr<GenericConnPool> createConnPool();\n  UpstreamRequestPtr createUpstreamRequest();\n\n  void maybeDoShadowing();\n  bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request);\n  uint32_t numRequestsAwaitingHeaders();\n  void onGlobalTimeout();\n  void onRequestComplete();\n  void onResponseTimeout();\n  // Handle an upstream request aborted due to a local timeout.\n  void onSoftPerTryTimeout();\n  void onSoftPerTryTimeout(UpstreamRequest& upstream_request);\n  void onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flag, absl::string_view details);\n  // Handle an \"aborted\" upstream request, meaning we didn't see response\n  // headers (e.g. due to a reset). Handles recording stats and responding\n  // downstream if appropriate.\n  void onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flag,\n                       absl::string_view body, bool dropped, absl::string_view details);\n  void onUpstreamComplete(UpstreamRequest& upstream_request);\n  // Reset all in-flight upstream requests.\n  void resetAll();\n  // Reset all in-flight upstream requests that do NOT match the passed argument. This is used\n  // if a \"good\" response comes back and we return downstream, so there is no point in waiting\n  // for the remaining upstream requests to return.\n  void resetOtherUpstreams(UpstreamRequest& upstream_request);\n  void sendNoHealthyUpstreamResponse();\n  bool setupRedirect(const Http::ResponseHeaderMap& headers, UpstreamRequest& upstream_request);\n  bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers,\n                                                const Http::HeaderEntry& internal_redirect);\n  void updateOutlierDetection(Upstream::Outlier::Result result, UpstreamRequest& upstream_request,\n                              absl::optional<uint64_t> code);\n  void doRetry();\n  // Called immediately after a non-5xx header is received from upstream, performs stats accounting\n  // and handle difference between gRPC and non-gRPC requests.\n  void handleNon5xxResponseHeaders(absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                                   UpstreamRequest& upstream_request, bool end_stream,\n                                   uint64_t grpc_to_http_status);\n  Http::Context& httpContext() { return config_.http_context_; }\n\n  FilterConfig& config_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n  RouteConstSharedPtr route_;\n  const RouteEntry* route_entry_{};\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  std::unique_ptr<Stats::StatNameDynamicStorage> alt_stat_prefix_;\n  const VirtualCluster* request_vcluster_;\n  Event::TimerPtr response_timeout_;\n  FilterUtility::TimeoutData timeout_;\n  FilterUtility::HedgingParams hedging_params_;\n  Http::Code timeout_response_code_ = Http::Code::GatewayTimeout;\n  std::list<UpstreamRequestPtr> upstream_requests_;\n  // Tracks which upstream request \"wins\" and will have the corresponding\n  // response forwarded downstream\n  UpstreamRequest* final_upstream_request_;\n  bool grpc_request_{};\n  Http::RequestHeaderMap* downstream_headers_{};\n  Http::RequestTrailerMap* downstream_trailers_{};\n  MonotonicTime downstream_request_complete_time_;\n  uint32_t retry_shadow_buffer_limit_{std::numeric_limits<uint32_t>::max()};\n  MetadataMatchCriteriaConstPtr metadata_match_;\n  std::function<void(Http::ResponseHeaderMap&)> modify_headers_;\n  std::vector<std::reference_wrapper<const ShadowPolicy>> active_shadow_policies_{};\n\n  // list of cookies to add to upstream headers\n  std::vector<std::string> downstream_set_cookies_;\n\n  bool downstream_100_continue_headers_encoded_ : 1;\n  bool downstream_response_started_ : 1;\n  bool downstream_end_stream_ : 1;\n  bool is_retry_ : 1;\n  bool include_attempt_count_in_request_ : 1;\n  bool attempting_internal_redirect_with_complete_stream_ : 1;\n  uint32_t attempt_count_{1};\n  uint32_t pending_retries_{0};\n\n  Network::TransportSocketOptionsSharedPtr transport_socket_options_;\n};\n\nclass ProdFilter : public Filter {\npublic:\n  using Filter::Filter;\n\nprivate:\n  // Filter\n  RetryStatePtr createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers,\n                                 const Upstream::ClusterInfo& cluster,\n                                 const VirtualCluster* vcluster, Runtime::Loader& runtime,\n                                 Random::RandomGenerator& random, Event::Dispatcher& dispatcher,\n                                 TimeSource& time_source,\n                                 Upstream::ResourcePriority priority) override;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/router_ratelimit.cc",
    "content": "#include \"common/router/router_ratelimit.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/config/metadata.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nconst uint64_t RateLimitPolicyImpl::MAX_STAGE_NUMBER = 10UL;\n\nbool DynamicMetadataRateLimitOverride::populateOverride(\n    RateLimit::Descriptor& descriptor, const envoy::config::core::v3::Metadata* metadata) const {\n  const ProtobufWkt::Value& metadata_value =\n      Envoy::Config::Metadata::metadataValue(metadata, metadata_key_);\n  if (metadata_value.kind_case() != ProtobufWkt::Value::kStructValue) {\n    return false;\n  }\n\n  const auto& override_value = metadata_value.struct_value().fields();\n  const auto& limit_it = override_value.find(\"requests_per_unit\");\n  const auto& unit_it = override_value.find(\"unit\");\n  if (limit_it != override_value.end() &&\n      limit_it->second.kind_case() == ProtobufWkt::Value::kNumberValue &&\n      unit_it != override_value.end() &&\n      unit_it->second.kind_case() == ProtobufWkt::Value::kStringValue) {\n    envoy::type::v3::RateLimitUnit unit;\n    if (envoy::type::v3::RateLimitUnit_Parse(unit_it->second.string_value(), &unit)) {\n      descriptor.limit_.emplace(RateLimit::RateLimitOverride{\n          static_cast<uint32_t>(limit_it->second.number_value()), unit});\n      return true;\n    }\n  }\n  return false;\n}\n\nbool SourceClusterAction::populateDescriptor(const Router::RouteEntry&,\n                                             RateLimit::Descriptor& descriptor,\n                                             const std::string& local_service_cluster,\n                                             const Http::HeaderMap&,\n                                             const Network::Address::Instance&,\n                                             const envoy::config::core::v3::Metadata*) const {\n  descriptor.entries_.push_back({\"source_cluster\", local_service_cluster});\n  return true;\n}\n\nbool DestinationClusterAction::populateDescriptor(const Router::RouteEntry& route,\n                                                  RateLimit::Descriptor& descriptor,\n                                                  const std::string&, const Http::HeaderMap&,\n                                                  const Network::Address::Instance&,\n                                                  const envoy::config::core::v3::Metadata*) const {\n  descriptor.entries_.push_back({\"destination_cluster\", route.clusterName()});\n  return true;\n}\n\nbool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&,\n                                              RateLimit::Descriptor& descriptor, const std::string&,\n                                              const Http::HeaderMap& headers,\n                                              const Network::Address::Instance&,\n                                              const envoy::config::core::v3::Metadata*) const {\n  const Http::HeaderEntry* header_value = headers.get(header_name_);\n\n  // If header is not present in the request and if skip_if_absent is true skip this descriptor,\n  // while calling rate limiting service. If skip_if_absent is false, do not call rate limiting\n  // service.\n  if (!header_value) {\n    return skip_if_absent_;\n  }\n  descriptor.entries_.push_back(\n      {descriptor_key_, std::string(header_value->value().getStringView())});\n  return true;\n}\n\nbool RemoteAddressAction::populateDescriptor(const Router::RouteEntry&,\n                                             RateLimit::Descriptor& descriptor, const std::string&,\n                                             const Http::HeaderMap&,\n                                             const Network::Address::Instance& remote_address,\n                                             const envoy::config::core::v3::Metadata*) const {\n  if (remote_address.type() != Network::Address::Type::Ip) {\n    return false;\n  }\n\n  descriptor.entries_.push_back({\"remote_address\", remote_address.ip()->addressAsString()});\n  return true;\n}\n\nbool GenericKeyAction::populateDescriptor(const Router::RouteEntry&,\n                                          RateLimit::Descriptor& descriptor, const std::string&,\n                                          const Http::HeaderMap&, const Network::Address::Instance&,\n                                          const envoy::config::core::v3::Metadata*) const {\n  descriptor.entries_.push_back({descriptor_key_, descriptor_value_});\n  return true;\n}\n\nDynamicMetaDataAction::DynamicMetaDataAction(\n    const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action)\n    : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()),\n      default_value_(action.default_value()) {}\n\nbool DynamicMetaDataAction::populateDescriptor(\n    const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&,\n    const Http::HeaderMap&, const Network::Address::Instance&,\n    const envoy::config::core::v3::Metadata* dynamic_metadata) const {\n  const ProtobufWkt::Value& metadata_value =\n      Envoy::Config::Metadata::metadataValue(dynamic_metadata, metadata_key_);\n\n  if (!metadata_value.string_value().empty()) {\n    descriptor.entries_.push_back({descriptor_key_, metadata_value.string_value()});\n    return true;\n  } else if (metadata_value.string_value().empty() && !default_value_.empty()) {\n    descriptor.entries_.push_back({descriptor_key_, default_value_});\n    return true;\n  }\n\n  return false;\n}\n\nHeaderValueMatchAction::HeaderValueMatchAction(\n    const envoy::config::route::v3::RateLimit::Action::HeaderValueMatch& action)\n    : descriptor_value_(action.descriptor_value()),\n      expect_match_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(action, expect_match, true)),\n      action_headers_(Http::HeaderUtility::buildHeaderDataVector(action.headers())) {}\n\nbool HeaderValueMatchAction::populateDescriptor(const Router::RouteEntry&,\n                                                RateLimit::Descriptor& descriptor,\n                                                const std::string&, const Http::HeaderMap& headers,\n                                                const Network::Address::Instance&,\n                                                const envoy::config::core::v3::Metadata*) const {\n  if (expect_match_ == Http::HeaderUtility::matchHeaders(headers, action_headers_)) {\n    descriptor.entries_.push_back({\"header_match\", descriptor_value_});\n    return true;\n  } else {\n    return false;\n  }\n}\n\nRateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl(\n    const envoy::config::route::v3::RateLimit& config)\n    : disable_key_(config.disable_key()),\n      stage_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, stage, 0))) {\n  for (const auto& action : config.actions()) {\n    switch (action.action_specifier_case()) {\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kSourceCluster:\n      actions_.emplace_back(new SourceClusterAction());\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kDestinationCluster:\n      actions_.emplace_back(new DestinationClusterAction());\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kRequestHeaders:\n      actions_.emplace_back(new RequestHeadersAction(action.request_headers()));\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kRemoteAddress:\n      actions_.emplace_back(new RemoteAddressAction());\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kGenericKey:\n      actions_.emplace_back(new GenericKeyAction(action.generic_key()));\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kDynamicMetadata:\n      actions_.emplace_back(new DynamicMetaDataAction(action.dynamic_metadata()));\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kHeaderValueMatch:\n      actions_.emplace_back(new HeaderValueMatchAction(action.header_value_match()));\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n  if (config.has_limit()) {\n    switch (config.limit().override_specifier_case()) {\n    case envoy::config::route::v3::RateLimit_Override::OverrideSpecifierCase::kDynamicMetadata:\n      limit_override_.emplace(\n          new DynamicMetadataRateLimitOverride(config.limit().dynamic_metadata()));\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n}\n\nvoid RateLimitPolicyEntryImpl::populateDescriptors(\n    const Router::RouteEntry& route, std::vector<RateLimit::Descriptor>& descriptors,\n    const std::string& local_service_cluster, const Http::HeaderMap& headers,\n    const Network::Address::Instance& remote_address,\n    const envoy::config::core::v3::Metadata* dynamic_metadata) const {\n  RateLimit::Descriptor descriptor;\n  bool result = true;\n  for (const RateLimitActionPtr& action : actions_) {\n    result = result && action->populateDescriptor(route, descriptor, local_service_cluster, headers,\n                                                  remote_address, dynamic_metadata);\n    if (!result) {\n      break;\n    }\n  }\n\n  if (limit_override_) {\n    limit_override_.value()->populateOverride(descriptor, dynamic_metadata);\n  }\n\n  if (result) {\n    descriptors.emplace_back(descriptor);\n  }\n}\n\nRateLimitPolicyImpl::RateLimitPolicyImpl(\n    const Protobuf::RepeatedPtrField<envoy::config::route::v3::RateLimit>& rate_limits)\n    : rate_limit_entries_reference_(RateLimitPolicyImpl::MAX_STAGE_NUMBER + 1) {\n  for (const auto& rate_limit : rate_limits) {\n    std::unique_ptr<RateLimitPolicyEntry> rate_limit_policy_entry(\n        new RateLimitPolicyEntryImpl(rate_limit));\n    uint64_t stage = rate_limit_policy_entry->stage();\n    ASSERT(stage < rate_limit_entries_reference_.size());\n    rate_limit_entries_reference_[stage].emplace_back(*rate_limit_policy_entry);\n    rate_limit_entries_.emplace_back(std::move(rate_limit_policy_entry));\n  }\n}\n\nconst std::vector<std::reference_wrapper<const Router::RateLimitPolicyEntry>>&\nRateLimitPolicyImpl::getApplicableRateLimit(uint64_t stage) const {\n  ASSERT(stage < rate_limit_entries_reference_.size());\n  return rate_limit_entries_reference_[stage];\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/router_ratelimit.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/router/router_ratelimit.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/http/header_utility.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Populate rate limit override from dynamic metadata.\n */\nclass DynamicMetadataRateLimitOverride : public RateLimitOverrideAction {\npublic:\n  DynamicMetadataRateLimitOverride(\n      const envoy::config::route::v3::RateLimit::Override::DynamicMetadata& config)\n      : metadata_key_(config.metadata_key()) {}\n\n  // Router::RateLimitOverrideAction\n  bool populateOverride(RateLimit::Descriptor& descriptor,\n                        const envoy::config::core::v3::Metadata* metadata) const override;\n\nprivate:\n  const Envoy::Config::MetadataKey metadata_key_;\n};\n\n/**\n * Action for source cluster rate limiting.\n */\nclass SourceClusterAction : public RateLimitAction {\npublic:\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                          const Network::Address::Instance& remote_address,\n                          const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n};\n\n/**\n * Action for destination cluster rate limiting.\n */\nclass DestinationClusterAction : public RateLimitAction {\npublic:\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                          const Network::Address::Instance& remote_address,\n                          const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n};\n\n/**\n * Action for request headers rate limiting.\n */\nclass RequestHeadersAction : public RateLimitAction {\npublic:\n  RequestHeadersAction(const envoy::config::route::v3::RateLimit::Action::RequestHeaders& action)\n      : header_name_(action.header_name()), descriptor_key_(action.descriptor_key()),\n        skip_if_absent_(action.skip_if_absent()) {}\n\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                          const Network::Address::Instance& remote_address,\n                          const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n\nprivate:\n  const Http::LowerCaseString header_name_;\n  const std::string descriptor_key_;\n  const bool skip_if_absent_;\n};\n\n/**\n * Action for remote address rate limiting.\n */\nclass RemoteAddressAction : public RateLimitAction {\npublic:\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                          const Network::Address::Instance& remote_address,\n                          const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n};\n\n/**\n * Action for generic key rate limiting.\n */\nclass GenericKeyAction : public RateLimitAction {\npublic:\n  GenericKeyAction(const envoy::config::route::v3::RateLimit::Action::GenericKey& action)\n      : descriptor_value_(action.descriptor_value()),\n        descriptor_key_(!action.descriptor_key().empty() ? action.descriptor_key()\n                                                         : \"generic_key\") {}\n\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                          const Network::Address::Instance& remote_address,\n                          const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n\nprivate:\n  const std::string descriptor_value_;\n  const std::string descriptor_key_;\n};\n\n/**\n * Action for dynamic metadata rate limiting.\n */\nclass DynamicMetaDataAction : public RateLimitAction {\npublic:\n  DynamicMetaDataAction(const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action);\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                          const Network::Address::Instance& remote_address,\n                          const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n\nprivate:\n  const Envoy::Config::MetadataKey metadata_key_;\n  const std::string descriptor_key_;\n  const std::string default_value_;\n};\n\n/**\n * Action for header value match rate limiting.\n */\nclass HeaderValueMatchAction : public RateLimitAction {\npublic:\n  HeaderValueMatchAction(\n      const envoy::config::route::v3::RateLimit::Action::HeaderValueMatch& action);\n\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const Http::HeaderMap& headers,\n                          const Network::Address::Instance& remote_address,\n                          const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n\nprivate:\n  const std::string descriptor_value_;\n  const bool expect_match_;\n  const std::vector<Http::HeaderUtility::HeaderDataPtr> action_headers_;\n};\n\n/*\n * Implementation of RateLimitPolicyEntry that holds the action for the configuration.\n */\nclass RateLimitPolicyEntryImpl : public RateLimitPolicyEntry {\npublic:\n  RateLimitPolicyEntryImpl(const envoy::config::route::v3::RateLimit& config);\n\n  // Router::RateLimitPolicyEntry\n  uint64_t stage() const override { return stage_; }\n  const std::string& disableKey() const override { return disable_key_; }\n  void\n  populateDescriptors(const Router::RouteEntry& route,\n                      std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n                      const std::string& local_service_cluster, const Http::HeaderMap&,\n                      const Network::Address::Instance& remote_address,\n                      const envoy::config::core::v3::Metadata* dynamic_metadata) const override;\n\nprivate:\n  const std::string disable_key_;\n  uint64_t stage_;\n  std::vector<RateLimitActionPtr> actions_;\n  absl::optional<RateLimitOverrideActionPtr> limit_override_ = absl::nullopt;\n};\n\n/**\n * Implementation of RateLimitPolicy that reads from the JSON route config.\n */\nclass RateLimitPolicyImpl : public RateLimitPolicy {\npublic:\n  RateLimitPolicyImpl(\n      const Protobuf::RepeatedPtrField<envoy::config::route::v3::RateLimit>& rate_limits);\n\n  // Router::RateLimitPolicy\n  const std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>&\n  getApplicableRateLimit(uint64_t stage = 0) const override;\n  bool empty() const override { return rate_limit_entries_.empty(); }\n\nprivate:\n  std::vector<std::unique_ptr<RateLimitPolicyEntry>> rate_limit_entries_;\n  std::vector<std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>>\n      rate_limit_entries_reference_;\n  // The maximum stage number supported. This value should match the maximum stage number in\n  // Json::Schema::HTTP_RATE_LIMITS_CONFIGURATION_SCHEMA and\n  // Json::Schema::RATE_LIMIT_HTTP_FILTER_SCHEMA from common/json/config_schemas.cc.\n  static const uint64_t MAX_STAGE_NUMBER;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/scoped_config_impl.cc",
    "content": "#include \"common/router/scoped_config_impl.h\"\n\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nbool ScopeKey::operator!=(const ScopeKey& other) const { return !(*this == other); }\n\nbool ScopeKey::operator==(const ScopeKey& other) const {\n  if (fragments_.empty() || other.fragments_.empty()) {\n    // An empty key equals to nothing, \"NULL\" != \"NULL\".\n    return false;\n  }\n  return this->hash() == other.hash();\n}\n\nHeaderValueExtractorImpl::HeaderValueExtractorImpl(\n    ScopedRoutes::ScopeKeyBuilder::FragmentBuilder&& config)\n    : FragmentBuilderBase(std::move(config)),\n      header_value_extractor_config_(config_.header_value_extractor()) {\n  ASSERT(config_.type_case() ==\n             ScopedRoutes::ScopeKeyBuilder::FragmentBuilder::kHeaderValueExtractor,\n         \"header_value_extractor is not set.\");\n  if (header_value_extractor_config_.extract_type_case() ==\n      ScopedRoutes::ScopeKeyBuilder::FragmentBuilder::HeaderValueExtractor::kIndex) {\n    if (header_value_extractor_config_.index() != 0 &&\n        header_value_extractor_config_.element_separator().empty()) {\n      throw ProtoValidationException(\"Index > 0 for empty string element separator.\",\n                                     header_value_extractor_config_);\n    }\n  }\n  if (header_value_extractor_config_.extract_type_case() ==\n      ScopedRoutes::ScopeKeyBuilder::FragmentBuilder::HeaderValueExtractor::EXTRACT_TYPE_NOT_SET) {\n    throw ProtoValidationException(\"HeaderValueExtractor extract_type not set.\",\n                                   header_value_extractor_config_);\n  }\n}\n\nstd::unique_ptr<ScopeKeyFragmentBase>\nHeaderValueExtractorImpl::computeFragment(const Http::HeaderMap& headers) const {\n  const Envoy::Http::HeaderEntry* header_entry =\n      headers.get(Envoy::Http::LowerCaseString(header_value_extractor_config_.name()));\n  if (header_entry == nullptr) {\n    return nullptr;\n  }\n\n  std::vector<absl::string_view> elements{header_entry->value().getStringView()};\n  if (header_value_extractor_config_.element_separator().length() > 0) {\n    elements = absl::StrSplit(header_entry->value().getStringView(),\n                              header_value_extractor_config_.element_separator());\n  }\n  switch (header_value_extractor_config_.extract_type_case()) {\n  case ScopedRoutes::ScopeKeyBuilder::FragmentBuilder::HeaderValueExtractor::kElement:\n    for (const auto& element : elements) {\n      std::pair<absl::string_view, absl::string_view> key_value = absl::StrSplit(\n          element, absl::MaxSplits(header_value_extractor_config_.element().separator(), 1));\n      if (key_value.first == header_value_extractor_config_.element().key()) {\n        return std::make_unique<StringKeyFragment>(key_value.second);\n      }\n    }\n    break;\n  case ScopedRoutes::ScopeKeyBuilder::FragmentBuilder::HeaderValueExtractor::kIndex:\n    if (header_value_extractor_config_.index() < elements.size()) {\n      return std::make_unique<StringKeyFragment>(elements[header_value_extractor_config_.index()]);\n    }\n    break;\n  default:                       // EXTRACT_TYPE_NOT_SET\n    NOT_REACHED_GCOVR_EXCL_LINE; // Caught in constructor already.\n  }\n\n  return nullptr;\n}\n\nScopedRouteInfo::ScopedRouteInfo(envoy::config::route::v3::ScopedRouteConfiguration&& config_proto,\n                                 ConfigConstSharedPtr&& route_config)\n    : config_proto_(std::move(config_proto)), route_config_(std::move(route_config)) {\n  // TODO(stevenzzzz): Maybe worth a KeyBuilder abstraction when there are more than one type of\n  // Fragment.\n  for (const auto& fragment : config_proto_.key().fragments()) {\n    switch (fragment.type_case()) {\n    case envoy::config::route::v3::ScopedRouteConfiguration::Key::Fragment::TypeCase::kStringKey:\n      scope_key_.addFragment(std::make_unique<StringKeyFragment>(fragment.string_key()));\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n}\n\nScopeKeyBuilderImpl::ScopeKeyBuilderImpl(ScopedRoutes::ScopeKeyBuilder&& config)\n    : ScopeKeyBuilderBase(std::move(config)) {\n  for (const auto& fragment_builder : config_.fragments()) {\n    switch (fragment_builder.type_case()) {\n    case ScopedRoutes::ScopeKeyBuilder::FragmentBuilder::kHeaderValueExtractor:\n      fragment_builders_.emplace_back(std::make_unique<HeaderValueExtractorImpl>(\n          ScopedRoutes::ScopeKeyBuilder::FragmentBuilder(fragment_builder)));\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n}\n\nScopeKeyPtr ScopeKeyBuilderImpl::computeScopeKey(const Http::HeaderMap& headers) const {\n  ScopeKey key;\n  for (const auto& builder : fragment_builders_) {\n    // returns nullopt if a null fragment is found.\n    std::unique_ptr<ScopeKeyFragmentBase> fragment = builder->computeFragment(headers);\n    if (fragment == nullptr) {\n      return nullptr;\n    }\n    key.addFragment(std::move(fragment));\n  }\n  return std::make_unique<ScopeKey>(std::move(key));\n}\n\nvoid ScopedConfigImpl::addOrUpdateRoutingScopes(\n    const std::vector<ScopedRouteInfoConstSharedPtr>& scoped_route_infos) {\n  for (auto& scoped_route_info : scoped_route_infos) {\n    const auto iter = scoped_route_info_by_name_.find(scoped_route_info->scopeName());\n    if (iter != scoped_route_info_by_name_.end()) {\n      ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash()));\n      scoped_route_info_by_key_.erase(iter->second->scopeKey().hash());\n    }\n    scoped_route_info_by_name_[scoped_route_info->scopeName()] = scoped_route_info;\n    scoped_route_info_by_key_[scoped_route_info->scopeKey().hash()] = scoped_route_info;\n  }\n}\n\nvoid ScopedConfigImpl::removeRoutingScopes(const std::vector<std::string>& scope_names) {\n  for (std::string const& scope_name : scope_names) {\n    const auto iter = scoped_route_info_by_name_.find(scope_name);\n    if (iter != scoped_route_info_by_name_.end()) {\n      ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash()));\n      scoped_route_info_by_key_.erase(iter->second->scopeKey().hash());\n      scoped_route_info_by_name_.erase(iter);\n    }\n  }\n}\n\nRouter::ConfigConstSharedPtr\nScopedConfigImpl::getRouteConfig(const Http::HeaderMap& headers) const {\n  ScopeKeyPtr scope_key = scope_key_builder_.computeScopeKey(headers);\n  if (scope_key == nullptr) {\n    return nullptr;\n  }\n  auto iter = scoped_route_info_by_key_.find(scope_key->hash());\n  if (iter != scoped_route_info_by_key_.end()) {\n    return iter->second->routeConfig();\n  }\n  return nullptr;\n}\n\nScopeKeyPtr ScopedConfigImpl::computeScopeKey(const Http::HeaderMap& headers) const {\n  ScopeKeyPtr scope_key = scope_key_builder_.computeScopeKey(headers);\n  if (scope_key &&\n      scoped_route_info_by_key_.find(scope_key->hash()) != scoped_route_info_by_key_.end()) {\n    return scope_key;\n  }\n  return nullptr;\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/scoped_config_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <typeinfo>\n\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/router/scopes.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/config_impl.h\"\n\n#include \"absl/numeric/int128.h\"\n#include \"absl/strings/str_format.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nusing envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes;\n\n/**\n * Base class for fragment builders.\n */\nclass FragmentBuilderBase {\npublic:\n  explicit FragmentBuilderBase(ScopedRoutes::ScopeKeyBuilder::FragmentBuilder&& config)\n      : config_(std::move(config)) {}\n  virtual ~FragmentBuilderBase() = default;\n\n  // Returns a fragment if the fragment rule applies, a nullptr indicates no fragment could be\n  // generated from the headers.\n  virtual std::unique_ptr<ScopeKeyFragmentBase>\n  computeFragment(const Http::HeaderMap& headers) const PURE;\n\nprotected:\n  const ScopedRoutes::ScopeKeyBuilder::FragmentBuilder config_;\n};\n\nclass HeaderValueExtractorImpl : public FragmentBuilderBase {\npublic:\n  explicit HeaderValueExtractorImpl(ScopedRoutes::ScopeKeyBuilder::FragmentBuilder&& config);\n\n  std::unique_ptr<ScopeKeyFragmentBase>\n  computeFragment(const Http::HeaderMap& headers) const override;\n\nprivate:\n  const ScopedRoutes::ScopeKeyBuilder::FragmentBuilder::HeaderValueExtractor&\n      header_value_extractor_config_;\n};\n\n/**\n * Base class for ScopeKeyBuilder implementations.\n */\nclass ScopeKeyBuilderBase {\npublic:\n  explicit ScopeKeyBuilderBase(ScopedRoutes::ScopeKeyBuilder&& config)\n      : config_(std::move(config)) {}\n  virtual ~ScopeKeyBuilderBase() = default;\n\n  // Computes scope key for given headers, returns nullptr if a key can't be computed.\n  virtual ScopeKeyPtr computeScopeKey(const Http::HeaderMap& headers) const PURE;\n\nprotected:\n  const ScopedRoutes::ScopeKeyBuilder config_;\n};\n\nclass ScopeKeyBuilderImpl : public ScopeKeyBuilderBase {\npublic:\n  explicit ScopeKeyBuilderImpl(ScopedRoutes::ScopeKeyBuilder&& config);\n\n  ScopeKeyPtr computeScopeKey(const Http::HeaderMap& headers) const override;\n\nprivate:\n  std::vector<std::unique_ptr<FragmentBuilderBase>> fragment_builders_;\n};\n\n// ScopedRouteConfiguration and corresponding RouteConfigProvider.\nclass ScopedRouteInfo {\npublic:\n  ScopedRouteInfo(envoy::config::route::v3::ScopedRouteConfiguration&& config_proto,\n                  ConfigConstSharedPtr&& route_config);\n\n  const ConfigConstSharedPtr& routeConfig() const { return route_config_; }\n  const ScopeKey& scopeKey() const { return scope_key_; }\n  const envoy::config::route::v3::ScopedRouteConfiguration& configProto() const {\n    return config_proto_;\n  }\n  const std::string& scopeName() const { return config_proto_.name(); }\n\nprivate:\n  envoy::config::route::v3::ScopedRouteConfiguration config_proto_;\n  ScopeKey scope_key_;\n  ConfigConstSharedPtr route_config_;\n};\nusing ScopedRouteInfoConstSharedPtr = std::shared_ptr<const ScopedRouteInfo>;\n// Ordered map for consistent config dumping.\nusing ScopedRouteMap = std::map<std::string, ScopedRouteInfoConstSharedPtr>;\n\n/**\n * Each Envoy worker is assigned an instance of this type. When config updates are received,\n * addOrUpdateRoutingScope() and removeRoutingScope() are called to update the set of scoped routes.\n *\n * ConnectionManagerImpl::refreshCachedRoute() will call getRouterConfig() to obtain the\n * Router::ConfigConstSharedPtr to use for route selection.\n */\nclass ScopedConfigImpl : public ScopedConfig {\npublic:\n  ScopedConfigImpl(ScopedRoutes::ScopeKeyBuilder&& scope_key_builder)\n      : scope_key_builder_(std::move(scope_key_builder)) {}\n\n  void\n  addOrUpdateRoutingScopes(const std::vector<ScopedRouteInfoConstSharedPtr>& scoped_route_infos);\n\n  void removeRoutingScopes(const std::vector<std::string>& scope_names);\n\n  // Envoy::Router::ScopedConfig\n  Router::ConfigConstSharedPtr getRouteConfig(const Http::HeaderMap& headers) const override;\n  // The return value is not null only if the scope corresponding to the header exists.\n  ScopeKeyPtr computeScopeKey(const Http::HeaderMap& headers) const override;\n\nprivate:\n  ScopeKeyBuilderImpl scope_key_builder_;\n  // From scope name to cached ScopedRouteInfo.\n  absl::flat_hash_map<std::string, ScopedRouteInfoConstSharedPtr> scoped_route_info_by_name_;\n  // Hash by ScopeKey hash to lookup in constant time.\n  absl::flat_hash_map<uint64_t, ScopedRouteInfoConstSharedPtr> scoped_route_info_by_key_;\n};\n\n/**\n * A NULL implementation of the scoped routing configuration.\n */\nclass NullScopedConfigImpl : public ScopedConfig {\npublic:\n  Router::ConfigConstSharedPtr getRouteConfig(const Http::HeaderMap&) const override {\n    return std::make_shared<const NullConfigImpl>();\n  }\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/scoped_rds.cc",
    "content": "#include \"common/router/scoped_rds.h\"\n\n#include <memory>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/api/v2/scoped_route.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/resource_name.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/init/watcher_impl.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/router/scoped_config_impl.h\"\n\n#include \"absl/strings/str_join.h\"\n\n// Types are deeply nested under Envoy::Config::ConfigProvider; use 'using-directives' across all\n// ConfigProvider related types for consistency.\nusing Envoy::Config::ConfigProvider;\nusing Envoy::Config::ConfigProviderInstanceType;\nusing Envoy::Config::ConfigProviderManager;\nusing Envoy::Config::ConfigProviderPtr;\nusing Envoy::Config::ScopedResume;\n\nnamespace Envoy {\nnamespace Router {\nnamespace ScopedRoutesConfigProviderUtil {\nConfigProviderPtr create(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        config,\n    Server::Configuration::ServerFactoryContext& factory_context, Init::Manager& init_manager,\n    const std::string& stat_prefix, ConfigProviderManager& scoped_routes_config_provider_manager) {\n  ASSERT(config.route_specifier_case() ==\n         envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n             RouteSpecifierCase::kScopedRoutes);\n\n  switch (config.scoped_routes().config_specifier_case()) {\n  case envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n      ConfigSpecifierCase::kScopedRouteConfigurationsList: {\n    const envoy::extensions::filters::network::http_connection_manager::v3::\n        ScopedRouteConfigurationsList& scoped_route_list =\n            config.scoped_routes().scoped_route_configurations_list();\n    return scoped_routes_config_provider_manager.createStaticConfigProvider(\n        RepeatedPtrUtil::convertToConstMessagePtrContainer<\n            envoy::config::route::v3::ScopedRouteConfiguration,\n            ProtobufTypes::ConstMessagePtrVector>(scoped_route_list.scoped_route_configurations()),\n        factory_context,\n        ScopedRoutesConfigProviderManagerOptArg(config.scoped_routes().name(),\n                                                config.scoped_routes().rds_config_source(),\n                                                config.scoped_routes().scope_key_builder()));\n  }\n  case envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n      ConfigSpecifierCase::kScopedRds:\n    return scoped_routes_config_provider_manager.createXdsConfigProvider(\n        config.scoped_routes().scoped_rds(), factory_context, init_manager, stat_prefix,\n        ScopedRoutesConfigProviderManagerOptArg(config.scoped_routes().name(),\n                                                config.scoped_routes().rds_config_source(),\n                                                config.scoped_routes().scope_key_builder()));\n  default:\n    // Proto validation enforces that is not reached.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace ScopedRoutesConfigProviderUtil\n\nInlineScopedRoutesConfigProvider::InlineScopedRoutesConfigProvider(\n    ProtobufTypes::ConstMessagePtrVector&& config_protos, std::string name,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    ScopedRoutesConfigProviderManager& config_provider_manager,\n    envoy::config::core::v3::ConfigSource rds_config_source,\n    envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::ScopeKeyBuilder\n        scope_key_builder)\n    : Envoy::Config::ImmutableConfigProviderBase(factory_context, config_provider_manager,\n                                                 ConfigProviderInstanceType::Inline,\n                                                 ConfigProvider::ApiType::Delta),\n      name_(std::move(name)),\n      config_(std::make_shared<ScopedConfigImpl>(std::move(scope_key_builder))),\n      config_protos_(std::make_move_iterator(config_protos.begin()),\n                     std::make_move_iterator(config_protos.end())),\n      rds_config_source_(std::move(rds_config_source)) {}\n\nScopedRdsConfigSubscription::ScopedRdsConfigSubscription(\n    const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRds& scoped_rds,\n    const uint64_t manager_identifier, const std::string& name,\n    const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n        ScopeKeyBuilder& scope_key_builder,\n    Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix,\n    envoy::config::core::v3::ConfigSource rds_config_source,\n    RouteConfigProviderManager& route_config_provider_manager,\n    ScopedRoutesConfigProviderManager& config_provider_manager)\n    : DeltaConfigSubscriptionInstance(\"SRDS\", manager_identifier, config_provider_manager,\n                                      factory_context),\n      Envoy::Config::SubscriptionBase<envoy::config::route::v3::ScopedRouteConfiguration>(\n          scoped_rds.scoped_rds_config_source().resource_api_version(),\n          factory_context.messageValidationContext().dynamicValidationVisitor(), \"name\"),\n      factory_context_(factory_context), name_(name), scope_key_builder_(scope_key_builder),\n      scope_(factory_context.scope().createScope(stat_prefix + \"scoped_rds.\" + name + \".\")),\n      stats_({ALL_SCOPED_RDS_STATS(POOL_COUNTER(*scope_), POOL_GAUGE(*scope_))}),\n      rds_config_source_(std::move(rds_config_source)), stat_prefix_(stat_prefix),\n      route_config_provider_manager_(route_config_provider_manager) {\n  const auto resource_name = getResourceName();\n  subscription_ =\n      factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource(\n          scoped_rds.scoped_rds_config_source(), Grpc::Common::typeUrl(resource_name), *scope_,\n          *this, resource_decoder_);\n\n  initialize([scope_key_builder]() -> Envoy::Config::ConfigProvider::ConfigConstSharedPtr {\n    return std::make_shared<ScopedConfigImpl>(\n        envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n            ScopeKeyBuilder(scope_key_builder));\n  });\n}\n\n// Constructor for RdsRouteConfigProviderHelper when scope is eager loading.\n// Initialize RdsRouteConfigProvider by default.\nScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::RdsRouteConfigProviderHelper(\n    ScopedRdsConfigSubscription& parent, std::string scope_name,\n    envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n    Init::Manager& init_manager)\n    : parent_(parent), scope_name_(scope_name), on_demand_(false) {\n  initRdsConfigProvider(rds, init_manager);\n}\n\n// Constructor for RdsRouteConfigProviderHelper when scope is on demand.\n// Leave the RdsRouteConfigProvider uninitialized.\nScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::RdsRouteConfigProviderHelper(\n    ScopedRdsConfigSubscription& parent, std::string scope_name)\n    : parent_(parent), scope_name_(scope_name), on_demand_(true) {\n  parent_.stats_.on_demand_scopes_.inc();\n}\n\n// When on demand callback is received from main thread, there are 4 cases.\n// 1. Scope is not found, post a scope not found callback back to worker thread.\n// 2. Scope is found but route provider has not been initialized, create route provider.\n// 3. After route provider has been initialized, if RouteConfiguration has been fetched,\n// post scope found callback to worker thread.\n// 4. After route provider has been initialized, if RouteConfiguration is null,\n// cache the callback and wait for RouteConfiguration to come.\nvoid ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::addOnDemandUpdateCallback(\n    std::function<void()> callback) {\n  // If RouteConfiguration has been initialized, run the callback to continue in filter chain,\n  // otherwise cache it and wait for the route table to be initialized. If RouteConfiguration hasn't\n  // been initialized, routeConfig() return a shared_ptr to NullConfigImpl. The name of\n  // NullConfigImpl is an empty string.\n  if (route_provider_ != nullptr && !routeConfig()->name().empty()) {\n    callback();\n    return;\n  }\n  on_demand_update_callbacks_.push_back(callback);\n  // Initialize the rds provider if it has not been initialized. There is potential race here\n  // because other worker threads may also post callback to on demand update the RouteConfiguration\n  // associated with this scope. If rds provider has been initialized, just wait for\n  // RouteConfiguration to be updated.\n  maybeInitRdsConfigProvider();\n}\n\nvoid ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::runOnDemandUpdateCallback() {\n  for (auto& callback : on_demand_update_callbacks_) {\n    callback();\n  }\n  on_demand_update_callbacks_.clear();\n}\n\nvoid ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::initRdsConfigProvider(\n    envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n    Init::Manager& init_manager) {\n  route_provider_ = std::dynamic_pointer_cast<RdsRouteConfigProviderImpl>(\n      parent_.route_config_provider_manager_.createRdsRouteConfigProvider(\n          rds, parent_.factory_context_, parent_.stat_prefix_, init_manager));\n\n  rds_update_callback_handle_ = route_provider_->subscription().addUpdateCallback([this]() {\n    // Subscribe to RDS update.\n    parent_.onRdsConfigUpdate(scope_name_, route_provider_->subscription());\n  });\n  parent_.stats_.active_scopes_.inc();\n}\n\nvoid ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::maybeInitRdsConfigProvider() {\n  // If the route provider have been initialized, return and wait for rds config update.\n  if (route_provider_ != nullptr) {\n    return;\n  }\n\n  // Create a init_manager to create a rds provider.\n  // No transitive warming dependency here because only on demand update reach this point.\n  std::unique_ptr<Init::ManagerImpl> srds_init_mgr =\n      std::make_unique<Init::ManagerImpl>(fmt::format(\"SRDS on demand init manager.\"));\n  std::unique_ptr<Cleanup> srds_initialization_continuation =\n      std::make_unique<Cleanup>([this, &srds_init_mgr] {\n        Init::WatcherImpl noop_watcher(\n            fmt::format(\"SRDS on demand ConfigUpdate watcher: {}\", scope_name_),\n            []() { /*Do nothing.*/ });\n        srds_init_mgr->initialize(noop_watcher);\n      });\n  // Create route provider.\n  envoy::extensions::filters::network::http_connection_manager::v3::Rds rds;\n  rds.mutable_config_source()->MergeFrom(parent_.rds_config_source_);\n  rds.set_route_config_name(\n      parent_.scoped_route_map_[scope_name_]->configProto().route_configuration_name());\n  initRdsConfigProvider(rds, *srds_init_mgr);\n  ENVOY_LOG(debug, fmt::format(\"Scope on demand update: {}\", scope_name_));\n  // If RouteConfiguration hasn't been initialized, routeConfig() return a shared_ptr to\n  // NullConfigImpl. The name of NullConfigImpl is an empty string.\n  if (routeConfig()->name().empty()) {\n    return;\n  }\n  // If RouteConfiguration has been initialized, apply update to all the threads.\n  parent_.onRdsConfigUpdate(scope_name_, route_provider_->subscription());\n}\n\nbool ScopedRdsConfigSubscription::addOrUpdateScopes(\n    const std::vector<Envoy::Config::DecodedResourceRef>& resources, Init::Manager& init_manager,\n    const std::string& version_info) {\n  bool any_applied = false;\n  envoy::extensions::filters::network::http_connection_manager::v3::Rds rds;\n  rds.mutable_config_source()->MergeFrom(rds_config_source_);\n  std::vector<ScopedRouteInfoConstSharedPtr> updated_scopes;\n  for (const auto& resource : resources) {\n    // Explicit copy so that we can std::move later.\n    envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config =\n        dynamic_cast<const envoy::config::route::v3::ScopedRouteConfiguration&>(\n            resource.get().resource());\n    const std::string scope_name = scoped_route_config.name();\n    rds.set_route_config_name(scoped_route_config.route_configuration_name());\n    std::unique_ptr<RdsRouteConfigProviderHelper> rds_config_provider_helper;\n    std::shared_ptr<ScopedRouteInfo> scoped_route_info = nullptr;\n    if (scoped_route_config.on_demand() == false) {\n      // For default scopes, create a rds helper with rds provider initialized.\n      rds_config_provider_helper =\n          std::make_unique<RdsRouteConfigProviderHelper>(*this, scope_name, rds, init_manager);\n      scoped_route_info = std::make_shared<ScopedRouteInfo>(\n          std::move(scoped_route_config), rds_config_provider_helper->routeConfig());\n    } else {\n      // For on demand scopes, create a rds helper with rds provider uninitialized.\n      rds_config_provider_helper =\n          std::make_unique<RdsRouteConfigProviderHelper>(*this, scope_name);\n      // scope_route_info->routeConfig() will be nullptr, because RouteConfiguration is not loaded.\n      scoped_route_info =\n          std::make_shared<ScopedRouteInfo>(std::move(scoped_route_config), nullptr);\n    }\n    route_provider_by_scope_[scope_name] = std::move(rds_config_provider_helper);\n    scope_name_by_hash_[scoped_route_info->scopeKey().hash()] = scoped_route_info->scopeName();\n    scoped_route_map_[scoped_route_info->scopeName()] = scoped_route_info;\n    updated_scopes.push_back(scoped_route_info);\n    any_applied = true;\n    ENVOY_LOG(debug, \"srds: queueing add/update of scoped_route '{}', version: {}\",\n              scoped_route_info->scopeName(), version_info);\n  }\n\n  // scoped_route_info of both eager loading and on demand scopes will be propagated to work\n  // threads. Upon a scoped RouteConfiguration miss, if the scope exists, an on demand update\n  // callback will be posted to main thread.\n  if (!updated_scopes.empty()) {\n    applyConfigUpdate([updated_scopes](ConfigProvider::ConfigConstSharedPtr config)\n                          -> ConfigProvider::ConfigConstSharedPtr {\n      auto* thread_local_scoped_config =\n          const_cast<ScopedConfigImpl*>(static_cast<const ScopedConfigImpl*>(config.get()));\n      thread_local_scoped_config->addOrUpdateRoutingScopes(updated_scopes);\n      return config;\n    });\n  }\n  return any_applied;\n}\n\nstd::list<ScopedRdsConfigSubscription::RdsRouteConfigProviderHelperPtr>\nScopedRdsConfigSubscription::removeScopes(\n    const Protobuf::RepeatedPtrField<std::string>& scope_names, const std::string& version_info) {\n  std::list<ScopedRdsConfigSubscription::RdsRouteConfigProviderHelperPtr>\n      to_be_removed_rds_providers;\n  std::vector<std::string> removed_scope_names;\n  for (const auto& scope_name : scope_names) {\n    auto iter = scoped_route_map_.find(scope_name);\n    if (iter != scoped_route_map_.end()) {\n      auto rds_config_provider_helper_iter = route_provider_by_scope_.find(scope_name);\n      if (rds_config_provider_helper_iter != route_provider_by_scope_.end()) {\n        to_be_removed_rds_providers.emplace_back(\n            std::move(rds_config_provider_helper_iter->second));\n        route_provider_by_scope_.erase(rds_config_provider_helper_iter);\n      }\n      scope_name_by_hash_.erase(iter->second->scopeKey().hash());\n      scoped_route_map_.erase(iter);\n      removed_scope_names.push_back(scope_name);\n      ENVOY_LOG(debug, \"srds: queueing removal of scoped route '{}', version: {}\", scope_name,\n                version_info);\n    }\n  }\n  if (!removed_scope_names.empty()) {\n    applyConfigUpdate([removed_scope_names](ConfigProvider::ConfigConstSharedPtr config)\n                          -> ConfigProvider::ConfigConstSharedPtr {\n      auto* thread_local_scoped_config =\n          const_cast<ScopedConfigImpl*>(static_cast<const ScopedConfigImpl*>(config.get()));\n      thread_local_scoped_config->removeRoutingScopes(removed_scope_names);\n      return config;\n    });\n  }\n  return to_be_removed_rds_providers;\n}\n\nvoid ScopedRdsConfigSubscription::onConfigUpdate(\n    const std::vector<Envoy::Config::DecodedResourceRef>& added_resources,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n    const std::string& version_info) {\n  // NOTE: deletes are done before adds/updates.\n  absl::flat_hash_map<std::string, ScopedRouteInfoConstSharedPtr> to_be_removed_scopes;\n  // Destruction of resume_rds will lift the floodgate for new RDS subscriptions.\n  // Note in the case of partial acceptance, accepted RDS subscriptions should be started\n  // despite of any error.\n  ScopedResume resume_rds;\n  // If new route config sources come after the local init manager's initialize() been\n  // called, the init manager can't accept new targets. Instead we use a local override which will\n  // start new subscriptions but not wait on them to be ready.\n  std::unique_ptr<Init::ManagerImpl> srds_init_mgr;\n  // NOTE: This should be defined after srds_init_mgr and resume_rds, as it depends on the\n  // srds_init_mgr, and we want a single RDS discovery request to be sent to management\n  // server.\n  std::unique_ptr<Cleanup> srds_initialization_continuation;\n  ASSERT(localInitManager().state() > Init::Manager::State::Uninitialized);\n  const auto type_urls =\n      Envoy::Config::getAllVersionTypeUrls<envoy::config::route::v3::RouteConfiguration>();\n  // Pause RDS to not send a burst of RDS requests until we start all the new subscriptions.\n  // In the case that localInitManager is uninitialized, RDS is already paused\n  // either by Server init or LDS init.\n  if (factory_context_.clusterManager().adsMux()) {\n    resume_rds = factory_context_.clusterManager().adsMux()->pause(type_urls);\n  }\n  // if local init manager is initialized, the parent init manager may have gone away.\n  if (localInitManager().state() == Init::Manager::State::Initialized) {\n    srds_init_mgr =\n        std::make_unique<Init::ManagerImpl>(fmt::format(\"SRDS {}:{}\", name_, version_info));\n    srds_initialization_continuation =\n        std::make_unique<Cleanup>([this, &srds_init_mgr, version_info] {\n          // For new RDS subscriptions created after listener warming up, we don't wait for them to\n          // warm up.\n          Init::WatcherImpl noop_watcher(\n              // Note: we just throw it away.\n              fmt::format(\"SRDS ConfigUpdate watcher {}:{}\", name_, version_info),\n              []() { /*Do nothing.*/ });\n          srds_init_mgr->initialize(noop_watcher);\n        });\n  }\n\n  std::string exception_msg;\n  Protobuf::RepeatedPtrField<std::string> clean_removed_resources =\n      detectUpdateConflictAndCleanupRemoved(added_resources, removed_resources, exception_msg);\n  if (!exception_msg.empty()) {\n    throw EnvoyException(fmt::format(\"Error adding/updating scoped route(s): {}\", exception_msg));\n  }\n\n  // Do not delete RDS config providers just yet, in case the to be deleted RDS subscriptions could\n  // be reused by some to be added scopes.\n  std::list<ScopedRdsConfigSubscription::RdsRouteConfigProviderHelperPtr>\n      to_be_removed_rds_providers = removeScopes(clean_removed_resources, version_info);\n\n  bool any_applied =\n      addOrUpdateScopes(added_resources,\n                        (srds_init_mgr == nullptr ? localInitManager() : *srds_init_mgr),\n                        version_info) ||\n      !to_be_removed_rds_providers.empty();\n  ConfigSubscriptionCommonBase::onConfigUpdate();\n  if (any_applied) {\n    setLastConfigInfo(absl::optional<LastConfigInfo>({absl::nullopt, version_info}));\n  }\n  stats_.all_scopes_.set(scoped_route_map_.size());\n  stats_.config_reload_.inc();\n}\n\nvoid ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_name,\n                                                    RdsRouteConfigSubscription& rds_subscription) {\n  auto iter = scoped_route_map_.find(scope_name);\n  ASSERT(iter != scoped_route_map_.end(),\n         fmt::format(\"trying to update route config for non-existing scope {}\", scope_name));\n  auto new_scoped_route_info = std::make_shared<ScopedRouteInfo>(\n      envoy::config::route::v3::ScopedRouteConfiguration(iter->second->configProto()),\n      std::make_shared<ConfigImpl>(\n          rds_subscription.routeConfigUpdate()->routeConfiguration(), factory_context_,\n          factory_context_.messageValidationContext().dynamicValidationVisitor(), false));\n  applyConfigUpdate([new_scoped_route_info](ConfigProvider::ConfigConstSharedPtr config)\n                        -> ConfigProvider::ConfigConstSharedPtr {\n    auto* thread_local_scoped_config =\n        const_cast<ScopedConfigImpl*>(static_cast<const ScopedConfigImpl*>(config.get()));\n    thread_local_scoped_config->addOrUpdateRoutingScopes({new_scoped_route_info});\n    return config;\n  });\n  // The data plane may wait for the route configuration to come back.\n  route_provider_by_scope_[scope_name]->runOnDemandUpdateCallback();\n}\n\n// TODO(stevenzzzz): see issue #7508, consider generalizing this function as it overlaps with\n// CdsApiImpl::onConfigUpdate.\nvoid ScopedRdsConfigSubscription::onConfigUpdate(\n    const std::vector<Envoy::Config::DecodedResourceRef>& resources,\n    const std::string& version_info) {\n  Protobuf::RepeatedPtrField<std::string> to_remove_repeated;\n  for (const auto& scoped_route : scoped_route_map_) {\n    *to_remove_repeated.Add() = scoped_route.first;\n  }\n  onConfigUpdate(resources, to_remove_repeated, version_info);\n}\n\nProtobuf::RepeatedPtrField<std::string>\nScopedRdsConfigSubscription::detectUpdateConflictAndCleanupRemoved(\n    const std::vector<Envoy::Config::DecodedResourceRef>& resources,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources, std::string& exception_msg) {\n  Protobuf::RepeatedPtrField<std::string> clean_removed_resources;\n  // All the scope names to be removed or updated.\n  absl::flat_hash_set<std::string> updated_or_removed_scopes;\n  for (const std::string& removed_resource : removed_resources) {\n    updated_or_removed_scopes.insert(removed_resource);\n  }\n  for (const auto& resource : resources) {\n    const auto& scoped_route =\n        dynamic_cast<const envoy::config::route::v3::ScopedRouteConfiguration&>(\n            resource.get().resource());\n    updated_or_removed_scopes.insert(scoped_route.name());\n  }\n\n  absl::flat_hash_map<uint64_t, std::string> scope_name_by_hash = scope_name_by_hash_;\n  absl::erase_if(scope_name_by_hash, [&updated_or_removed_scopes](const auto& key_name) {\n    auto const& [key, name] = key_name;\n    return updated_or_removed_scopes.contains(name);\n  });\n  absl::flat_hash_map<std::string, envoy::config::route::v3::ScopedRouteConfiguration>\n      scoped_routes;\n  for (const auto& resource : resources) {\n    // Throws (thus rejects all) on any error.\n    const auto& scoped_route =\n        dynamic_cast<const envoy::config::route::v3::ScopedRouteConfiguration&>(\n            resource.get().resource());\n    const std::string& scope_name = scoped_route.name();\n    auto scope_config_inserted = scoped_routes.try_emplace(scope_name, std::move(scoped_route));\n    if (!scope_config_inserted.second) {\n      exception_msg = fmt::format(\"duplicate scoped route configuration '{}' found\", scope_name);\n      return clean_removed_resources;\n    }\n    const envoy::config::route::v3::ScopedRouteConfiguration& scoped_route_config =\n        scope_config_inserted.first->second;\n    const uint64_t key_fingerprint = MessageUtil::hash(scoped_route_config.key());\n    if (!scope_name_by_hash.try_emplace(key_fingerprint, scope_name).second) {\n      exception_msg =\n          fmt::format(\"scope key conflict found, first scope is '{}', second scope is '{}'\",\n                      scope_name_by_hash[key_fingerprint], scope_name);\n      return clean_removed_resources;\n    }\n  }\n\n  // only remove resources that is not going to be updated.\n  for (const std::string& removed_resource : removed_resources) {\n    if (!scoped_routes.contains(removed_resource)) {\n      *clean_removed_resources.Add() = removed_resource;\n    }\n  }\n  return clean_removed_resources;\n}\n\nvoid ScopedRdsConfigSubscription::onDemandRdsUpdate(\n    std::shared_ptr<Router::ScopeKey> scope_key, Event::Dispatcher& thread_local_dispatcher,\n    Http::RouteConfigUpdatedCallback&& route_config_updated_cb,\n    std::weak_ptr<Envoy::Config::ConfigSubscriptionCommonBase> weak_subscription) {\n  factory_context_.dispatcher().post([this, &thread_local_dispatcher, scope_key,\n                                      route_config_updated_cb, weak_subscription]() {\n    // If the subscription has been destroyed, return immediately.\n    if (!weak_subscription.lock()) {\n      thread_local_dispatcher.post([route_config_updated_cb] { route_config_updated_cb(false); });\n      return;\n    }\n\n    auto iter = scope_name_by_hash_.find(scope_key->hash());\n    // Return to filter chain if we can't find the scope.\n    // The scope may have been destroyed when callback reach the main thread.\n    if (iter == scope_name_by_hash_.end()) {\n      thread_local_dispatcher.post([route_config_updated_cb] { route_config_updated_cb(false); });\n      return;\n    }\n    // Wrap the thread local dispatcher inside the callback.\n    std::function<void()> thread_local_updated_callback = [route_config_updated_cb,\n                                                           &thread_local_dispatcher]() {\n      thread_local_dispatcher.post([route_config_updated_cb] { route_config_updated_cb(true); });\n    };\n    std::string scope_name = iter->second;\n    // On demand initialization inside main thread.\n    route_provider_by_scope_[scope_name]->addOnDemandUpdateCallback(thread_local_updated_callback);\n  });\n}\n\nScopedRdsConfigProvider::ScopedRdsConfigProvider(\n    ScopedRdsConfigSubscriptionSharedPtr&& subscription)\n    : MutableConfigProviderCommonBase(std::move(subscription), ConfigProvider::ApiType::Delta) {}\n\nProtobufTypes::MessagePtr ScopedRoutesConfigProviderManager::dumpConfigs() const {\n  auto config_dump = std::make_unique<envoy::admin::v3::ScopedRoutesConfigDump>();\n  for (const auto& element : configSubscriptions()) {\n    auto subscription = element.second.lock();\n    ASSERT(subscription);\n\n    if (subscription->configInfo()) {\n      auto* dynamic_config = config_dump->mutable_dynamic_scoped_route_configs()->Add();\n      dynamic_config->set_version_info(subscription->configInfo().value().last_config_version_);\n      const ScopedRdsConfigSubscription* typed_subscription =\n          static_cast<ScopedRdsConfigSubscription*>(subscription.get());\n      dynamic_config->set_name(typed_subscription->name());\n      const ScopedRouteMap& scoped_route_map = typed_subscription->scopedRouteMap();\n      for (const auto& it : scoped_route_map) {\n        dynamic_config->mutable_scoped_route_configs()->Add()->PackFrom(\n            API_RECOVER_ORIGINAL(it.second->configProto()));\n      }\n      TimestampUtil::systemClockToTimestamp(subscription->lastUpdated(),\n                                            *dynamic_config->mutable_last_updated());\n    }\n  }\n\n  for (const auto& provider : immutableConfigProviders(ConfigProviderInstanceType::Inline)) {\n    const auto protos_info =\n        provider->configProtoInfoVector<envoy::config::route::v3::ScopedRouteConfiguration>();\n    ASSERT(protos_info != absl::nullopt);\n    auto* inline_config = config_dump->mutable_inline_scoped_route_configs()->Add();\n    inline_config->set_name(static_cast<InlineScopedRoutesConfigProvider*>(provider)->name());\n    for (const auto& config_proto : protos_info.value().config_protos_) {\n      inline_config->mutable_scoped_route_configs()->Add()->PackFrom(\n          API_RECOVER_ORIGINAL(*config_proto));\n    }\n    TimestampUtil::systemClockToTimestamp(provider->lastUpdated(),\n                                          *inline_config->mutable_last_updated());\n  }\n\n  return config_dump;\n}\n\nConfigProviderPtr ScopedRoutesConfigProviderManager::createXdsConfigProvider(\n    const Protobuf::Message& config_source_proto,\n    Server::Configuration::ServerFactoryContext& factory_context, Init::Manager& init_manager,\n    const std::string& stat_prefix, const ConfigProviderManager::OptionalArg& optarg) {\n  const auto& typed_optarg = static_cast<const ScopedRoutesConfigProviderManagerOptArg&>(optarg);\n  ScopedRdsConfigSubscriptionSharedPtr subscription =\n      ConfigProviderManagerImplBase::getSubscription<ScopedRdsConfigSubscription>(\n          config_source_proto, init_manager,\n          [&config_source_proto, &factory_context, &stat_prefix,\n           &typed_optarg](const uint64_t manager_identifier,\n                          ConfigProviderManagerImplBase& config_provider_manager)\n              -> Envoy::Config::ConfigSubscriptionCommonBaseSharedPtr {\n            const auto& scoped_rds_config_source = dynamic_cast<\n                const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRds&>(\n                config_source_proto);\n            return std::make_shared<ScopedRdsConfigSubscription>(\n                scoped_rds_config_source, manager_identifier, typed_optarg.scoped_routes_name_,\n                typed_optarg.scope_key_builder_, factory_context, stat_prefix,\n                typed_optarg.rds_config_source_,\n                static_cast<ScopedRoutesConfigProviderManager&>(config_provider_manager)\n                    .routeConfigProviderPanager(),\n                static_cast<ScopedRoutesConfigProviderManager&>(config_provider_manager));\n          });\n\n  return std::make_unique<ScopedRdsConfigProvider>(std::move(subscription));\n}\n\nConfigProviderPtr ScopedRoutesConfigProviderManager::createStaticConfigProvider(\n    ProtobufTypes::ConstMessagePtrVector&& config_protos,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    const ConfigProviderManager::OptionalArg& optarg) {\n  const auto& typed_optarg = static_cast<const ScopedRoutesConfigProviderManagerOptArg&>(optarg);\n  return std::make_unique<InlineScopedRoutesConfigProvider>(\n      std::move(config_protos), typed_optarg.scoped_routes_name_, factory_context, *this,\n      typed_optarg.rds_config_source_, typed_optarg.scope_key_builder_);\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/scoped_rds.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/callback.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.validate.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/router/route_config_provider_manager.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/config_provider_impl.h\"\n#include \"common/config/subscription_base.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/router/scoped_config_impl.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n// Scoped routing configuration utilities.\nnamespace ScopedRoutesConfigProviderUtil {\n\n// If enabled in the HttpConnectionManager config, returns a ConfigProvider for scoped routing\n// configuration.\nEnvoy::Config::ConfigProviderPtr create(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        config,\n    Server::Configuration::ServerFactoryContext& factory_context, Init::Manager& init_manager,\n    const std::string& stat_prefix,\n    Envoy::Config::ConfigProviderManager& scoped_routes_config_provider_manager);\n\n} // namespace ScopedRoutesConfigProviderUtil\n\nclass ScopedRoutesConfigProviderManager;\n\n// A ConfigProvider for inline scoped routing configuration.\n// InlineScopedRoutesConfigProvider is not fully implemented at this point. It doesn't load\n// ScopedRouteConfigurations and propagate them to worker threads. If\n// InlineScopedRoutesConfigProvider is fully implemented, when it is loading\n// ScopedRouteConfiguration, the on demand field should be ignored and all scopes should be loaded\n// eagerly.\nclass InlineScopedRoutesConfigProvider : public Envoy::Config::ImmutableConfigProviderBase {\npublic:\n  InlineScopedRoutesConfigProvider(ProtobufTypes::ConstMessagePtrVector&& config_protos,\n                                   std::string name,\n                                   Server::Configuration::ServerFactoryContext& factory_context,\n                                   ScopedRoutesConfigProviderManager& config_provider_manager,\n                                   envoy::config::core::v3::ConfigSource rds_config_source,\n                                   envoy::extensions::filters::network::http_connection_manager::\n                                       v3::ScopedRoutes::ScopeKeyBuilder scope_key_builder);\n\n  ~InlineScopedRoutesConfigProvider() override = default;\n\n  const std::string& name() const { return name_; }\n\n  // Envoy::Config::ConfigProvider\n  Envoy::Config::ConfigProvider::ConfigProtoVector getConfigProtos() const override {\n    Envoy::Config::ConfigProvider::ConfigProtoVector out_protos;\n    std::for_each(config_protos_.begin(), config_protos_.end(),\n                  [&out_protos](const std::unique_ptr<const Protobuf::Message>& message) {\n                    out_protos.push_back(message.get());\n                  });\n    return out_protos;\n  }\n\n  std::string getConfigVersion() const override { return \"\"; }\n  ConfigConstSharedPtr getConfig() const override { return config_; }\n\nprivate:\n  const std::string name_;\n  ConfigConstSharedPtr config_;\n  const std::vector<std::unique_ptr<const Protobuf::Message>> config_protos_;\n  const envoy::config::core::v3::ConfigSource rds_config_source_;\n};\n\n/**\n * All SRDS stats. @see stats_macros.h\n */\n// clang-format off\n#define ALL_SCOPED_RDS_STATS(COUNTER, GAUGE)                                                       \\\n  COUNTER(config_reload)                                                                           \\\n  COUNTER(update_empty)                                                                            \\\n  GAUGE(all_scopes, Accumulate)                                                                    \\\n  GAUGE(on_demand_scopes, Accumulate)                                                              \\\n  GAUGE(active_scopes, Accumulate)\n\n// clang-format on\n\nstruct ScopedRdsStats {\n  ALL_SCOPED_RDS_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n\n  static ScopedRdsStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return ScopedRdsStats{\n        ALL_SCOPED_RDS_STATS(POOL_COUNTER_PREFIX(scope, prefix), POOL_GAUGE_PREFIX(scope, prefix))};\n  }\n};\n\n// A scoped RDS subscription to be used with the dynamic scoped RDS ConfigProvider.\nclass ScopedRdsConfigSubscription\n    : public Envoy::Config::DeltaConfigSubscriptionInstance,\n      Envoy::Config::SubscriptionBase<envoy::config::route::v3::ScopedRouteConfiguration> {\npublic:\n  using ScopedRouteConfigurationMap =\n      std::map<std::string, envoy::config::route::v3::ScopedRouteConfiguration>;\n\n  ScopedRdsConfigSubscription(\n      const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRds& scoped_rds,\n      const uint64_t manager_identifier, const std::string& name,\n      const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n          ScopeKeyBuilder& scope_key_builder,\n      Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix,\n      envoy::config::core::v3::ConfigSource rds_config_source,\n      RouteConfigProviderManager& route_config_provider_manager,\n      ScopedRoutesConfigProviderManager& config_provider_manager);\n\n  ~ScopedRdsConfigSubscription() override = default;\n\n  const std::string& name() const { return name_; }\n\n  const ScopedRouteMap& scopedRouteMap() const { return scoped_route_map_; }\n\n  void\n  onDemandRdsUpdate(std::shared_ptr<Router::ScopeKey> scope_key,\n                    Event::Dispatcher& thread_local_dispatcher,\n                    Http::RouteConfigUpdatedCallback&& route_config_updated_cb,\n                    std::weak_ptr<Envoy::Config::ConfigSubscriptionCommonBase> weak_subscription);\n\nprivate:\n  // A helper class that takes care of the life cycle management of a RDS route provider and the\n  // update callback handle.\n  struct RdsRouteConfigProviderHelper {\n    RdsRouteConfigProviderHelper(\n        ScopedRdsConfigSubscription& parent, std::string scope_name,\n        envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n        Init::Manager& init_manager);\n\n    RdsRouteConfigProviderHelper(ScopedRdsConfigSubscription& parent, std::string scope_name);\n\n    ~RdsRouteConfigProviderHelper() {\n      // Only remove the rds update when the rds provider has been initialized.\n      if (route_provider_) {\n        rds_update_callback_handle_->remove();\n        parent_.stats_.active_scopes_.dec();\n      }\n      if (on_demand_) {\n        parent_.stats_.on_demand_scopes_.dec();\n      }\n    }\n    ConfigConstSharedPtr routeConfig() { return route_provider_->config(); }\n\n    void addOnDemandUpdateCallback(std::function<void()> callback);\n\n    // Runs all the callback from worker thread to continue filter chain.\n    void runOnDemandUpdateCallback();\n\n    // If route provider has not been initialized, initialize it.\n    void maybeInitRdsConfigProvider();\n\n    // Initialize route provider and register for rds update.\n    void initRdsConfigProvider(\n        envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n        Init::Manager& init_manager);\n\n    ScopedRdsConfigSubscription& parent_;\n    std::string scope_name_;\n    bool on_demand_;\n    RdsRouteConfigProviderImplSharedPtr route_provider_;\n    // This handle_ is owned by the route config provider's RDS subscription, when the helper\n    // destructs, the handle is deleted as well.\n    Common::CallbackHandle* rds_update_callback_handle_;\n    std::vector<std::function<void()>> on_demand_update_callbacks_;\n  };\n\n  using RdsRouteConfigProviderHelperPtr = std::unique_ptr<RdsRouteConfigProviderHelper>;\n\n  // Adds or updates scopes, create a new RDS provider for each resource, if an exception is thrown\n  // during updating, the exception message is collected via the exception messages vector.\n  // Returns true if any scope updated, false otherwise.\n  bool addOrUpdateScopes(const std::vector<Envoy::Config::DecodedResourceRef>& resources,\n                         Init::Manager& init_manager, const std::string& version_info);\n  // Removes given scopes from the managed set of scopes.\n  // Returns a list of to be removed helpers which is temporally held in the onConfigUpdate method,\n  // to make sure new scopes sharing the same RDS source configs could reuse the subscriptions.\n  std::list<RdsRouteConfigProviderHelperPtr>\n  removeScopes(const Protobuf::RepeatedPtrField<std::string>& scope_names,\n               const std::string& version_info);\n\n  // Envoy::Config::DeltaConfigSubscriptionInstance\n  void start() override { subscription_->start({}); }\n\n  // Detect scope name and scope key conflict between added scopes or between added scopes and old\n  // scopes. Some removed scopes may be in added resources list, instead of being removed, they\n  // should be updated, so only return scope names that will disappear after update. If conflict\n  // detected, fill exception_msg with information about scope conflict and return.\n  Protobuf::RepeatedPtrField<std::string> detectUpdateConflictAndCleanupRemoved(\n      const std::vector<Envoy::Config::DecodedResourceRef>& added_resources,\n      const Protobuf::RepeatedPtrField<std::string>& removed_resources, std::string& exception_msg);\n\n  // Envoy::Config::SubscriptionCallbacks\n\n  // NOTE: both delta form and state-of-the-world form onConfigUpdate(resources, version_info) will\n  // throw an EnvoyException on any error and essentially reject an update.\n  void onConfigUpdate(const std::vector<Envoy::Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Envoy::Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException*) override {\n    ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n    DeltaConfigSubscriptionInstance::onConfigUpdateFailed();\n  }\n  // Propagate RDS updates to ScopeConfigImpl in workers.\n  void onRdsConfigUpdate(const std::string& scope_name,\n                         RdsRouteConfigSubscription& rds_subscription);\n\n  // ScopedRouteInfo by scope name.\n  ScopedRouteMap scoped_route_map_;\n\n  // For creating RDS subscriptions.\n  Server::Configuration::ServerFactoryContext& factory_context_;\n  const std::string name_;\n  Envoy::Config::SubscriptionPtr subscription_;\n  const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n      ScopeKeyBuilder scope_key_builder_;\n  Stats::ScopePtr scope_;\n  ScopedRdsStats stats_;\n  const envoy::config::core::v3::ConfigSource rds_config_source_;\n  const std::string stat_prefix_;\n  RouteConfigProviderManager& route_config_provider_manager_;\n\n  // RdsRouteConfigProvider by scope name.\n  absl::flat_hash_map<std::string, RdsRouteConfigProviderHelperPtr> route_provider_by_scope_;\n  // A map of (hash, scope-name), used to detect the key conflict between scopes.\n  absl::flat_hash_map<uint64_t, std::string> scope_name_by_hash_;\n};\n\nusing ScopedRdsConfigSubscriptionSharedPtr = std::shared_ptr<ScopedRdsConfigSubscription>;\n\n// A ConfigProvider for scoped RDS that dynamically fetches scoped routing configuration via a\n// subscription.\nclass ScopedRdsConfigProvider : public Envoy::Config::MutableConfigProviderCommonBase {\npublic:\n  ScopedRdsConfigProvider(ScopedRdsConfigSubscriptionSharedPtr&& subscription);\n\n  ScopedRdsConfigSubscription& subscription() const {\n    return *static_cast<ScopedRdsConfigSubscription*>(subscription_.get());\n  }\n  void onDemandRdsUpdate(std::shared_ptr<Router::ScopeKey> scope_key,\n                         Event::Dispatcher& thread_local_dispatcher,\n                         Http::RouteConfigUpdatedCallback&& route_config_updated_cb) const {\n    subscription().onDemandRdsUpdate(\n        std::move(scope_key), thread_local_dispatcher, std::move(route_config_updated_cb),\n        std::weak_ptr<Envoy::Config::ConfigSubscriptionCommonBase>(subscription_));\n  }\n};\n\n// A ConfigProviderManager for scoped routing configuration that creates static/inline and dynamic\n// (xds) config providers.\nclass ScopedRoutesConfigProviderManager : public Envoy::Config::ConfigProviderManagerImplBase {\npublic:\n  ScopedRoutesConfigProviderManager(\n      Server::Admin& admin, Router::RouteConfigProviderManager& route_config_provider_manager)\n      : Envoy::Config::ConfigProviderManagerImplBase(admin, \"route_scopes\"),\n        route_config_provider_manager_(route_config_provider_manager) {}\n\n  ~ScopedRoutesConfigProviderManager() override = default;\n\n  // Envoy::Config::ConfigProviderManagerImplBase\n  ProtobufTypes::MessagePtr dumpConfigs() const override;\n\n  // Envoy::Config::ConfigProviderManager\n  Envoy::Config::ConfigProviderPtr\n  createXdsConfigProvider(const Protobuf::Message& config_source_proto,\n                          Server::Configuration::ServerFactoryContext& factory_context,\n                          Init::Manager& init_manager, const std::string& stat_prefix,\n                          const Envoy::Config::ConfigProviderManager::OptionalArg& optarg) override;\n  Envoy::Config::ConfigProviderPtr\n  createStaticConfigProvider(const Protobuf::Message&, Server::Configuration::ServerFactoryContext&,\n                             const Envoy::Config::ConfigProviderManager::OptionalArg&) override {\n    ASSERT(false,\n           \"SRDS supports delta updates and requires the use of the createStaticConfigProvider() \"\n           \"overload that accepts a config proto set as an argument.\");\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  Envoy::Config::ConfigProviderPtr createStaticConfigProvider(\n      std::vector<std::unique_ptr<const Protobuf::Message>>&& config_protos,\n      Server::Configuration::ServerFactoryContext& factory_context,\n      const Envoy::Config::ConfigProviderManager::OptionalArg& optarg) override;\n\n  RouteConfigProviderManager& routeConfigProviderPanager() {\n    return route_config_provider_manager_;\n  }\n\nprivate:\n  RouteConfigProviderManager& route_config_provider_manager_;\n};\n\nusing ScopedRoutesConfigProviderManagerPtr = std::unique_ptr<ScopedRoutesConfigProviderManager>;\nusing ScopedRoutesConfigProviderManagerSharedPtr =\n    std::shared_ptr<ScopedRoutesConfigProviderManager>;\n\n// The optional argument passed to the ConfigProviderManager::create*() functions.\nclass ScopedRoutesConfigProviderManagerOptArg\n    : public Envoy::Config::ConfigProviderManager::OptionalArg {\npublic:\n  ScopedRoutesConfigProviderManagerOptArg(\n      std::string scoped_routes_name,\n      const envoy::config::core::v3::ConfigSource& rds_config_source,\n      const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n          ScopeKeyBuilder& scope_key_builder)\n      : scoped_routes_name_(std::move(scoped_routes_name)), rds_config_source_(rds_config_source),\n        scope_key_builder_(scope_key_builder) {}\n\n  const std::string scoped_routes_name_;\n  const envoy::config::core::v3::ConfigSource& rds_config_source_;\n  const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n      ScopeKeyBuilder& scope_key_builder_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/shadow_writer_impl.cc",
    "content": "#include \"common/router/shadow_writer_impl.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/http/headers.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nvoid ShadowWriterImpl::shadow(const std::string& cluster, Http::RequestMessagePtr&& request,\n                              const Http::AsyncClient::RequestOptions& options) {\n  // It's possible that the cluster specified in the route configuration no longer exists due\n  // to a CDS removal. Check that it still exists before shadowing.\n  // TODO(mattklein123): Optimally we would have a stat but for now just fix the crashing issue.\n  if (!cm_.get(cluster)) {\n    ENVOY_LOG(debug, \"shadow cluster '{}' does not exist\", cluster);\n    return;\n  }\n\n  ASSERT(!request->headers().getHostValue().empty());\n  // Switch authority to add a shadow postfix. This allows upstream logging to make more sense.\n  auto parts = StringUtil::splitToken(request->headers().getHostValue(), \":\");\n  ASSERT(!parts.empty() && parts.size() <= 2);\n  request->headers().setHost(parts.size() == 2\n                                 ? absl::StrJoin(parts, \"-shadow:\")\n                                 : absl::StrCat(request->headers().getHostValue(), \"-shadow\"));\n  // This is basically fire and forget. We don't handle cancelling.\n  cm_.httpAsyncClientForCluster(cluster).send(std::move(request), *this, options);\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/shadow_writer_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/router/shadow_writer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Implementation of ShadowWriter that takes incoming requests to shadow and implements \"fire and\n * forget\" behavior using an async client.\n */\nclass ShadowWriterImpl : Logger::Loggable<Logger::Id::router>,\n                         public ShadowWriter,\n                         public Http::AsyncClient::Callbacks {\npublic:\n  ShadowWriterImpl(Upstream::ClusterManager& cm) : cm_(cm) {}\n\n  // Router::ShadowWriter\n  void shadow(const std::string& cluster, Http::RequestMessagePtr&& request,\n              const Http::AsyncClient::RequestOptions& options) override;\n\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override {}\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override {}\n  void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&,\n                                    const Http::ResponseHeaderMap*) override {}\n\nprivate:\n  Upstream::ClusterManager& cm_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/string_accessor_impl.h",
    "content": "#pragma once\n\n#include \"envoy/router/string_accessor.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nclass StringAccessorImpl : public StringAccessor {\npublic:\n  StringAccessorImpl(absl::string_view value) : value_(value) {}\n\n  // StringAccessor\n  absl::string_view asString() const override { return value_; }\n\n  // FilterState::Object\n  ProtobufTypes::MessagePtr serializeAsProto() const override {\n    auto message = std::make_unique<ProtobufWkt::StringValue>();\n    message->set_value(value_);\n    return message;\n  }\n\n  absl::optional<std::string> serializeAsString() const override { return value_; }\n\nprivate:\n  std::string value_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/tls_context_match_criteria_impl.cc",
    "content": "#include \"common/router/tls_context_match_criteria_impl.h\"\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nTlsContextMatchCriteriaImpl::TlsContextMatchCriteriaImpl(\n    const envoy::config::route::v3::RouteMatch::TlsContextMatchOptions& options) {\n  if (options.has_presented()) {\n    presented_ = options.presented().value();\n  }\n\n  if (options.has_validated()) {\n    validated_ = options.validated().value();\n  }\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/tls_context_match_criteria_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nclass TlsContextMatchCriteriaImpl : public TlsContextMatchCriteria {\npublic:\n  TlsContextMatchCriteriaImpl(\n      const envoy::config::route::v3::RouteMatch::TlsContextMatchOptions& options);\n\n  const absl::optional<bool>& presented() const override { return presented_; }\n  const absl::optional<bool>& validated() const override { return validated_; }\n\nprivate:\n  absl::optional<bool> presented_;\n  absl::optional<bool> validated_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/upstream_request.cc",
    "content": "#include \"common/router/upstream_request.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/scope_tracker.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/application_protocol.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/network/upstream_server_name.h\"\n#include \"common/network/upstream_subject_alt_names.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/router/debug_config.h\"\n#include \"common/router/router.h\"\n#include \"common/stream_info/uint32_accessor_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nUpstreamRequest::UpstreamRequest(RouterFilterInterface& parent,\n                                 std::unique_ptr<GenericConnPool>&& conn_pool)\n    : parent_(parent), conn_pool_(std::move(conn_pool)), grpc_rq_success_deferred_(false),\n      stream_info_(parent_.callbacks()->dispatcher().timeSource()),\n      start_time_(parent_.callbacks()->dispatcher().timeSource().monotonicTime()),\n      calling_encode_headers_(false), upstream_canary_(false), decode_complete_(false),\n      encode_complete_(false), encode_trailers_(false), retried_(false), awaiting_headers_(true),\n      outlier_detection_timeout_recorded_(false),\n      create_per_try_timeout_on_request_complete_(false), paused_for_connect_(false),\n      record_timeout_budget_(parent_.cluster()->timeoutBudgetStats().has_value()) {\n  if (parent_.config().start_child_span_) {\n    span_ = parent_.callbacks()->activeSpan().spawnChild(\n        parent_.callbacks()->tracingConfig(), \"router \" + parent.cluster()->name() + \" egress\",\n        parent.timeSource().systemTime());\n    if (parent.attemptCount() != 1) {\n      // This is a retry request, add this metadata to span.\n      span_->setTag(Tracing::Tags::get().RetryCount, std::to_string(parent.attemptCount() - 1));\n    }\n  }\n\n  stream_info_.healthCheck(parent_.callbacks()->streamInfo().healthCheck());\n  if (conn_pool_->protocol().has_value()) {\n    stream_info_.protocol(conn_pool_->protocol().value());\n  }\n}\n\nUpstreamRequest::~UpstreamRequest() {\n  if (span_ != nullptr) {\n    Tracing::HttpTracerUtility::finalizeUpstreamSpan(*span_, upstream_headers_.get(),\n                                                     upstream_trailers_.get(), stream_info_,\n                                                     Tracing::EgressConfig::get());\n  }\n\n  if (per_try_timeout_ != nullptr) {\n    // Allows for testing.\n    per_try_timeout_->disableTimer();\n  }\n  if (max_stream_duration_timer_ != nullptr) {\n    max_stream_duration_timer_->disableTimer();\n  }\n  clearRequestEncoder();\n\n  // If desired, fire the per-try histogram when the UpstreamRequest\n  // completes.\n  if (record_timeout_budget_) {\n    Event::Dispatcher& dispatcher = parent_.callbacks()->dispatcher();\n    const MonotonicTime end_time = dispatcher.timeSource().monotonicTime();\n    const std::chrono::milliseconds response_time =\n        std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time_);\n    Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = parent_.cluster()->timeoutBudgetStats();\n    tb_stats->get().upstream_rq_timeout_budget_per_try_percent_used_.recordValue(\n        FilterUtility::percentageOfTimeout(response_time, parent_.timeout().per_try_timeout_));\n  }\n\n  stream_info_.setUpstreamTiming(upstream_timing_);\n  stream_info_.onRequestComplete();\n  for (const auto& upstream_log : parent_.config().upstream_logs_) {\n    upstream_log->log(parent_.downstreamHeaders(), upstream_headers_.get(),\n                      upstream_trailers_.get(), stream_info_);\n  }\n\n  while (downstream_data_disabled_ != 0) {\n    parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark();\n    parent_.cluster()->stats().upstream_flow_control_drained_total_.inc();\n    --downstream_data_disabled_;\n  }\n}\n\nvoid UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) {\n  ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());\n\n  ASSERT(100 == Http::Utility::getResponseStatus(*headers));\n  parent_.onUpstream100ContinueHeaders(std::move(headers), *this);\n}\n\nvoid UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) {\n  ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());\n\n  // We drop 1xx other than 101 on the floor; 101 upgrade headers need to be passed to the client as\n  // part of the final response. 100-continue headers are handled in onUpstream100ContinueHeaders.\n  //\n  // We could in principle handle other headers here, but this might result in the double invocation\n  // of decodeHeaders() (once for informational, again for non-informational), which is likely an\n  // easy to miss corner case in the filter and HCM contract.\n  //\n  // This filtering is done early in upstream request, unlike 100 coalescing which is performed in\n  // the router filter, since the filtering only depends on the state of a single upstream, and we\n  // don't want to confuse accounting such as onFirstUpstreamRxByteReceived() with informational\n  // headers.\n  const uint64_t response_code = Http::Utility::getResponseStatus(*headers);\n  if (Http::CodeUtility::is1xx(response_code) &&\n      response_code != enumToInt(Http::Code::SwitchingProtocols)) {\n    return;\n  }\n\n  // TODO(rodaine): This is actually measuring after the headers are parsed and not the first\n  // byte.\n  upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource());\n  maybeEndDecode(end_stream);\n\n  awaiting_headers_ = false;\n  if (!parent_.config().upstream_logs_.empty()) {\n    upstream_headers_ = Http::createHeaderMap<Http::ResponseHeaderMapImpl>(*headers);\n  }\n  stream_info_.response_code_ = static_cast<uint32_t>(response_code);\n\n  if (paused_for_connect_ && response_code == 200) {\n    encodeBodyAndTrailers();\n    paused_for_connect_ = false;\n  }\n\n  parent_.onUpstreamHeaders(response_code, std::move(headers), *this, end_stream);\n}\n\nvoid UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) {\n  ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());\n\n  maybeEndDecode(end_stream);\n  stream_info_.addBytesReceived(data.length());\n  parent_.onUpstreamData(data, *this, end_stream);\n}\n\nvoid UpstreamRequest::decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) {\n  ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());\n\n  maybeEndDecode(true);\n  if (!parent_.config().upstream_logs_.empty()) {\n    upstream_trailers_ = Http::createHeaderMap<Http::ResponseTrailerMapImpl>(*trailers);\n  }\n  parent_.onUpstreamTrailers(std::move(trailers), *this);\n}\nconst RouteEntry& UpstreamRequest::routeEntry() const { return *parent_.routeEntry(); }\n\nconst Network::Connection& UpstreamRequest::connection() const {\n  return *parent_.callbacks()->connection();\n}\n\nvoid UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) {\n  parent_.onUpstreamMetadata(std::move(metadata_map));\n}\n\nvoid UpstreamRequest::maybeEndDecode(bool end_stream) {\n  if (end_stream) {\n    upstream_timing_.onLastUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource());\n    decode_complete_ = true;\n  }\n}\n\nvoid UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) {\n  stream_info_.onUpstreamHostSelected(host);\n  upstream_host_ = host;\n  parent_.callbacks()->streamInfo().onUpstreamHostSelected(host);\n  parent_.onUpstreamHostSelected(host);\n}\n\nvoid UpstreamRequest::encodeHeaders(bool end_stream) {\n  ASSERT(!encode_complete_);\n  encode_complete_ = end_stream;\n\n  conn_pool_->newStream(this);\n}\n\nvoid UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!encode_complete_);\n  encode_complete_ = end_stream;\n\n  if (!upstream_ || paused_for_connect_) {\n    ENVOY_STREAM_LOG(trace, \"buffering {} bytes\", *parent_.callbacks(), data.length());\n    if (!buffered_request_body_) {\n      buffered_request_body_ = std::make_unique<Buffer::WatermarkBuffer>(\n          [this]() -> void { this->enableDataFromDownstreamForFlowControl(); },\n          [this]() -> void { this->disableDataFromDownstreamForFlowControl(); },\n          []() -> void { /* TODO(adisuissa): Handle overflow watermark */ });\n      buffered_request_body_->setWatermarks(parent_.callbacks()->decoderBufferLimit());\n    }\n\n    buffered_request_body_->move(data);\n  } else {\n    ASSERT(downstream_metadata_map_vector_.empty());\n\n    ENVOY_STREAM_LOG(trace, \"proxying {} bytes\", *parent_.callbacks(), data.length());\n    stream_info_.addBytesSent(data.length());\n    upstream_->encodeData(data, end_stream);\n    if (end_stream) {\n      upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());\n    }\n  }\n}\n\nvoid UpstreamRequest::encodeTrailers(const Http::RequestTrailerMap& trailers) {\n  ASSERT(!encode_complete_);\n  encode_complete_ = true;\n  encode_trailers_ = true;\n\n  if (!upstream_) {\n    ENVOY_STREAM_LOG(trace, \"buffering trailers\", *parent_.callbacks());\n  } else {\n    ASSERT(downstream_metadata_map_vector_.empty());\n\n    ENVOY_STREAM_LOG(trace, \"proxying trailers\", *parent_.callbacks());\n    upstream_->encodeTrailers(trailers);\n    upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());\n  }\n}\n\nvoid UpstreamRequest::encodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) {\n  if (!upstream_) {\n    ENVOY_STREAM_LOG(trace, \"upstream_ not ready. Store metadata_map to encode later: {}\",\n                     *parent_.callbacks(), *metadata_map_ptr);\n    downstream_metadata_map_vector_.emplace_back(std::move(metadata_map_ptr));\n  } else {\n    ENVOY_STREAM_LOG(trace, \"Encode metadata: {}\", *parent_.callbacks(), *metadata_map_ptr);\n    Http::MetadataMapVector metadata_map_vector;\n    metadata_map_vector.emplace_back(std::move(metadata_map_ptr));\n    upstream_->encodeMetadata(metadata_map_vector);\n  }\n}\n\nvoid UpstreamRequest::onResetStream(Http::StreamResetReason reason,\n                                    absl::string_view transport_failure_reason) {\n  ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());\n\n  if (span_ != nullptr) {\n    // Add tags about reset.\n    span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);\n    span_->setTag(Tracing::Tags::get().ErrorReason, Http::Utility::resetReasonToString(reason));\n  }\n\n  clearRequestEncoder();\n  awaiting_headers_ = false;\n  if (!calling_encode_headers_) {\n    stream_info_.setResponseFlag(Filter::streamResetReasonToResponseFlag(reason));\n    parent_.onUpstreamReset(reason, transport_failure_reason, *this);\n  } else {\n    deferred_reset_reason_ = reason;\n  }\n}\n\nvoid UpstreamRequest::resetStream() {\n  // Don't reset the stream if we're already done with it.\n  if (encode_complete_ && decode_complete_) {\n    return;\n  }\n\n  if (span_ != nullptr) {\n    // Add tags about the cancellation.\n    span_->setTag(Tracing::Tags::get().Canceled, Tracing::Tags::get().True);\n  }\n\n  if (conn_pool_->cancelAnyPendingStream()) {\n    ENVOY_STREAM_LOG(debug, \"canceled pool request\", *parent_.callbacks());\n    ASSERT(!upstream_);\n  }\n\n  if (upstream_) {\n    ENVOY_STREAM_LOG(debug, \"resetting pool request\", *parent_.callbacks());\n    upstream_->resetStream();\n    clearRequestEncoder();\n  }\n}\n\nvoid UpstreamRequest::setupPerTryTimeout() {\n  ASSERT(!per_try_timeout_);\n  if (parent_.timeout().per_try_timeout_.count() > 0) {\n    per_try_timeout_ =\n        parent_.callbacks()->dispatcher().createTimer([this]() -> void { onPerTryTimeout(); });\n    per_try_timeout_->enableTimer(parent_.timeout().per_try_timeout_);\n  }\n}\n\nvoid UpstreamRequest::onPerTryTimeout() {\n  // If we've sent anything downstream, ignore the per try timeout and let the response continue\n  // up to the global timeout\n  if (!parent_.downstreamResponseStarted()) {\n    ENVOY_STREAM_LOG(debug, \"upstream per try timeout\", *parent_.callbacks());\n\n    stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout);\n    parent_.onPerTryTimeout(*this);\n  } else {\n    ENVOY_STREAM_LOG(debug,\n                     \"ignored upstream per try timeout due to already started downstream response\",\n                     *parent_.callbacks());\n  }\n}\n\nvoid UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                                    absl::string_view transport_failure_reason,\n                                    Upstream::HostDescriptionConstSharedPtr host) {\n  Http::StreamResetReason reset_reason = Http::StreamResetReason::ConnectionFailure;\n  switch (reason) {\n  case ConnectionPool::PoolFailureReason::Overflow:\n    reset_reason = Http::StreamResetReason::Overflow;\n    break;\n  case ConnectionPool::PoolFailureReason::RemoteConnectionFailure:\n    FALLTHRU;\n  case ConnectionPool::PoolFailureReason::LocalConnectionFailure:\n    reset_reason = Http::StreamResetReason::ConnectionFailure;\n    break;\n  case ConnectionPool::PoolFailureReason::Timeout:\n    reset_reason = Http::StreamResetReason::LocalReset;\n  }\n\n  // Mimic an upstream reset.\n  onUpstreamHostSelected(host);\n  onResetStream(reset_reason, transport_failure_reason);\n}\n\nvoid UpstreamRequest::onPoolReady(\n    std::unique_ptr<GenericUpstream>&& upstream, Upstream::HostDescriptionConstSharedPtr host,\n    const Network::Address::InstanceConstSharedPtr& upstream_local_address,\n    const StreamInfo::StreamInfo& info) {\n  // This may be called under an existing ScopeTrackerScopeState but it will unwind correctly.\n  ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());\n  ENVOY_STREAM_LOG(debug, \"pool ready\", *parent_.callbacks());\n  upstream_ = std::move(upstream);\n\n  if (parent_.requestVcluster()) {\n    // The cluster increases its upstream_rq_total_ counter right before firing this onPoolReady\n    // callback. Hence, the upstream request increases the virtual cluster's upstream_rq_total_ stat\n    // here.\n    parent_.requestVcluster()->stats().upstream_rq_total_.inc();\n  }\n\n  host->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess);\n\n  onUpstreamHostSelected(host);\n\n  stream_info_.setUpstreamFilterState(std::make_shared<StreamInfo::FilterStateImpl>(\n      info.filterState().parent()->parent(), StreamInfo::FilterState::LifeSpan::Request));\n  stream_info_.setUpstreamLocalAddress(upstream_local_address);\n  parent_.callbacks()->streamInfo().setUpstreamLocalAddress(upstream_local_address);\n\n  stream_info_.setUpstreamSslConnection(info.downstreamSslConnection());\n  parent_.callbacks()->streamInfo().setUpstreamSslConnection(info.downstreamSslConnection());\n\n  if (parent_.downstreamEndStream()) {\n    setupPerTryTimeout();\n  } else {\n    create_per_try_timeout_on_request_complete_ = true;\n  }\n\n  // Make sure the connection manager will inform the downstream watermark manager when the\n  // downstream buffers are overrun. This may result in immediate watermark callbacks referencing\n  // the encoder.\n  parent_.callbacks()->addDownstreamWatermarkCallbacks(downstream_watermark_manager_);\n\n  calling_encode_headers_ = true;\n  auto* headers = parent_.downstreamHeaders();\n  if (parent_.routeEntry()->autoHostRewrite() && !host->hostname().empty()) {\n    parent_.downstreamHeaders()->setHost(host->hostname());\n  }\n\n  if (span_ != nullptr) {\n    span_->injectContext(*parent_.downstreamHeaders());\n  }\n\n  upstream_timing_.onFirstUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());\n\n  // Make sure that when we are forwarding CONNECT payload we do not do so until\n  // the upstream has accepted the CONNECT request.\n  if (conn_pool_->protocol().has_value() &&\n      headers->getMethodValue() == Http::Headers::get().MethodValues.Connect) {\n    paused_for_connect_ = true;\n  }\n\n  if (upstream_host_->cluster().commonHttpProtocolOptions().has_max_stream_duration()) {\n    const auto max_stream_duration = std::chrono::milliseconds(DurationUtil::durationToMilliseconds(\n        upstream_host_->cluster().commonHttpProtocolOptions().max_stream_duration()));\n    if (max_stream_duration.count()) {\n      max_stream_duration_timer_ = parent_.callbacks()->dispatcher().createTimer(\n          [this]() -> void { onStreamMaxDurationReached(); });\n      max_stream_duration_timer_->enableTimer(max_stream_duration);\n    }\n  }\n\n  upstream_->encodeHeaders(*parent_.downstreamHeaders(), shouldSendEndStream());\n\n  calling_encode_headers_ = false;\n\n  if (!paused_for_connect_) {\n    encodeBodyAndTrailers();\n  }\n}\n\nvoid UpstreamRequest::encodeBodyAndTrailers() {\n  // It is possible to get reset in the middle of an encodeHeaders() call. This happens for\n  // example in the HTTP/2 codec if the frame cannot be encoded for some reason. This should never\n  // happen but it's unclear if we have covered all cases so protect against it and test for it.\n  // One specific example of a case where this happens is if we try to encode a total header size\n  // that is too big in HTTP/2 (64K currently).\n  if (deferred_reset_reason_) {\n    onResetStream(deferred_reset_reason_.value(), absl::string_view());\n  } else {\n    // Encode metadata after headers and before any other frame type.\n    if (!downstream_metadata_map_vector_.empty()) {\n      ENVOY_STREAM_LOG(debug, \"Send metadata onPoolReady. {}\", *parent_.callbacks(),\n                       downstream_metadata_map_vector_);\n      upstream_->encodeMetadata(downstream_metadata_map_vector_);\n      downstream_metadata_map_vector_.clear();\n      if (shouldSendEndStream()) {\n        Buffer::OwnedImpl empty_data(\"\");\n        upstream_->encodeData(empty_data, true);\n      }\n    }\n\n    if (buffered_request_body_) {\n      stream_info_.addBytesSent(buffered_request_body_->length());\n      upstream_->encodeData(*buffered_request_body_, encode_complete_ && !encode_trailers_);\n    }\n\n    if (encode_trailers_) {\n      upstream_->encodeTrailers(*parent_.downstreamTrailers());\n    }\n\n    if (encode_complete_) {\n      upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());\n    }\n  }\n}\n\nvoid UpstreamRequest::onStreamMaxDurationReached() {\n  upstream_host_->cluster().stats().upstream_rq_max_duration_reached_.inc();\n\n  // The upstream had closed then try to retry along with retry policy.\n  parent_.onStreamMaxDurationReached(*this);\n}\n\nvoid UpstreamRequest::clearRequestEncoder() {\n  // Before clearing the encoder, unsubscribe from callbacks.\n  if (upstream_) {\n    parent_.callbacks()->removeDownstreamWatermarkCallbacks(downstream_watermark_manager_);\n  }\n  upstream_.reset();\n}\n\nvoid UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHighWatermark() {\n  ASSERT(parent_.upstream_);\n\n  // There are two states we should get this callback in: 1) the watermark was\n  // hit due to writes from a different filter instance over a shared\n  // downstream connection, or 2) the watermark was hit due to THIS filter\n  // instance writing back the \"winning\" upstream request. In either case we\n  // can disable reads from upstream.\n  ASSERT(!parent_.parent_.finalUpstreamRequest() ||\n         &parent_ == parent_.parent_.finalUpstreamRequest());\n  // The downstream connection is overrun. Pause reads from upstream.\n  // If there are multiple calls to readDisable either the codec (H2) or the underlying\n  // Network::Connection (H1) will handle reference counting.\n  parent_.parent_.cluster()->stats().upstream_flow_control_paused_reading_total_.inc();\n  parent_.upstream_->readDisable(true);\n}\n\nvoid UpstreamRequest::DownstreamWatermarkManager::onBelowWriteBufferLowWatermark() {\n  ASSERT(parent_.upstream_);\n\n  // One source of connection blockage has buffer available. Pass this on to the stream, which\n  // will resume reads if this was the last remaining high watermark.\n  parent_.parent_.cluster()->stats().upstream_flow_control_resumed_reading_total_.inc();\n  parent_.upstream_->readDisable(false);\n}\n\nvoid UpstreamRequest::disableDataFromDownstreamForFlowControl() {\n  // If there is only one upstream request, we can be assured that\n  // disabling reads will not slow down other upstream requests. If we've\n  // already seen the full downstream request (downstream_end_stream_) then\n  // disabling reads is a noop.\n  // This assert condition must be true because\n  // parent_.upstreamRequests().size() can only be greater than 1 in the\n  // case of a per-try-timeout with hedge_on_per_try_timeout enabled, and\n  // the per try timeout timer is started only after downstream_end_stream_\n  // is true.\n  ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream());\n  parent_.cluster()->stats().upstream_flow_control_backed_up_total_.inc();\n  parent_.callbacks()->onDecoderFilterAboveWriteBufferHighWatermark();\n  ++downstream_data_disabled_;\n}\n\nvoid UpstreamRequest::enableDataFromDownstreamForFlowControl() {\n  // If there is only one upstream request, we can be assured that\n  // disabling reads will not overflow any write buffers in other upstream\n  // requests. If we've already seen the full downstream request\n  // (downstream_end_stream_) then enabling reads is a noop.\n  // This assert condition must be true because\n  // parent_.upstreamRequests().size() can only be greater than 1 in the\n  // case of a per-try-timeout with hedge_on_per_try_timeout enabled, and\n  // the per try timeout timer is started only after downstream_end_stream_\n  // is true.\n  ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream());\n  parent_.cluster()->stats().upstream_flow_control_drained_total_.inc();\n  parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark();\n  ASSERT(downstream_data_disabled_ != 0);\n  if (downstream_data_disabled_ > 0) {\n    --downstream_data_disabled_;\n  }\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/upstream_request.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/tcp/conn_pool.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/hex.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nclass GenericUpstream;\nclass GenericConnectionPoolCallbacks;\nclass RouterFilterInterface;\nclass UpstreamRequest;\n\n// The base request for Upstream.\nclass UpstreamRequest : public Logger::Loggable<Logger::Id::router>,\n                        public UpstreamToDownstream,\n                        public LinkedObject<UpstreamRequest>,\n                        public GenericConnectionPoolCallbacks {\npublic:\n  UpstreamRequest(RouterFilterInterface& parent, std::unique_ptr<GenericConnPool>&& conn_pool);\n  ~UpstreamRequest() override;\n\n  void encodeHeaders(bool end_stream);\n  void encodeData(Buffer::Instance& data, bool end_stream);\n  void encodeTrailers(const Http::RequestTrailerMap& trailers);\n  void encodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr);\n\n  void resetStream();\n  void setupPerTryTimeout();\n  void onPerTryTimeout();\n  void maybeEndDecode(bool end_stream);\n  void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host);\n\n  // Http::StreamDecoder\n  void decodeData(Buffer::Instance& data, bool end_stream) override;\n  void decodeMetadata(Http::MetadataMapPtr&& metadata_map) override;\n\n  // UpstreamToDownstream (Http::ResponseDecoder)\n  void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) override;\n  void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override;\n  void decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) override;\n  // UpstreamToDownstream (Http::StreamCallbacks)\n  void onResetStream(Http::StreamResetReason reason,\n                     absl::string_view transport_failure_reason) override;\n  void onAboveWriteBufferHighWatermark() override { disableDataFromDownstreamForFlowControl(); }\n  void onBelowWriteBufferLowWatermark() override { enableDataFromDownstreamForFlowControl(); }\n  // UpstreamToDownstream\n  const RouteEntry& routeEntry() const override;\n  const Network::Connection& connection() const override;\n\n  void disableDataFromDownstreamForFlowControl();\n  void enableDataFromDownstreamForFlowControl();\n\n  // GenericConnPool\n  void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                     absl::string_view transport_failure_reason,\n                     Upstream::HostDescriptionConstSharedPtr host) override;\n  void onPoolReady(std::unique_ptr<GenericUpstream>&& upstream,\n                   Upstream::HostDescriptionConstSharedPtr host,\n                   const Network::Address::InstanceConstSharedPtr& upstream_local_address,\n                   const StreamInfo::StreamInfo& info) override;\n  UpstreamToDownstream& upstreamToDownstream() override { return *this; }\n\n  void clearRequestEncoder();\n  void onStreamMaxDurationReached();\n\n  struct DownstreamWatermarkManager : public Http::DownstreamWatermarkCallbacks {\n    DownstreamWatermarkManager(UpstreamRequest& parent) : parent_(parent) {}\n\n    // Http::DownstreamWatermarkCallbacks\n    void onBelowWriteBufferLowWatermark() override;\n    void onAboveWriteBufferHighWatermark() override;\n\n    UpstreamRequest& parent_;\n  };\n\n  void readEnable();\n  void encodeBodyAndTrailers();\n\n  // Getters and setters\n  Upstream::HostDescriptionConstSharedPtr& upstreamHost() { return upstream_host_; }\n  void outlierDetectionTimeoutRecorded(bool recorded) {\n    outlier_detection_timeout_recorded_ = recorded;\n  }\n  bool outlierDetectionTimeoutRecorded() { return outlier_detection_timeout_recorded_; }\n  const StreamInfo::UpstreamTiming& upstreamTiming() { return upstream_timing_; }\n  void retried(bool value) { retried_ = value; }\n  bool retried() { return retried_; }\n  bool grpcRqSuccessDeferred() { return grpc_rq_success_deferred_; }\n  void grpcRqSuccessDeferred(bool deferred) { grpc_rq_success_deferred_ = deferred; }\n  void upstreamCanary(bool value) { upstream_canary_ = value; }\n  bool upstreamCanary() { return upstream_canary_; }\n  bool awaitingHeaders() { return awaiting_headers_; }\n  void recordTimeoutBudget(bool value) { record_timeout_budget_ = value; }\n  bool createPerTryTimeoutOnRequestComplete() {\n    return create_per_try_timeout_on_request_complete_;\n  }\n  bool encodeComplete() const { return encode_complete_; }\n  RouterFilterInterface& parent() { return parent_; }\n\nprivate:\n  bool shouldSendEndStream() {\n    // Only encode end stream if the full request has been received, the body\n    // has been sent, and any trailers or metadata have also been sent.\n    return encode_complete_ && !buffered_request_body_ && !encode_trailers_ &&\n           downstream_metadata_map_vector_.empty();\n  }\n\n  RouterFilterInterface& parent_;\n  std::unique_ptr<GenericConnPool> conn_pool_;\n  bool grpc_rq_success_deferred_;\n  Event::TimerPtr per_try_timeout_;\n  std::unique_ptr<GenericUpstream> upstream_;\n  absl::optional<Http::StreamResetReason> deferred_reset_reason_;\n  Buffer::WatermarkBufferPtr buffered_request_body_;\n  Upstream::HostDescriptionConstSharedPtr upstream_host_;\n  DownstreamWatermarkManager downstream_watermark_manager_{*this};\n  Tracing::SpanPtr span_;\n  StreamInfo::StreamInfoImpl stream_info_;\n  StreamInfo::UpstreamTiming upstream_timing_;\n  const MonotonicTime start_time_;\n  // Copies of upstream headers/trailers. These are only set if upstream\n  // access logging is configured.\n  Http::ResponseHeaderMapPtr upstream_headers_;\n  Http::ResponseTrailerMapPtr upstream_trailers_;\n  Http::MetadataMapVector downstream_metadata_map_vector_;\n\n  // Tracks the number of times the flow of data from downstream has been disabled.\n  uint32_t downstream_data_disabled_{};\n  bool calling_encode_headers_ : 1;\n  bool upstream_canary_ : 1;\n  bool decode_complete_ : 1;\n  bool encode_complete_ : 1;\n  bool encode_trailers_ : 1;\n  bool retried_ : 1;\n  bool awaiting_headers_ : 1;\n  bool outlier_detection_timeout_recorded_ : 1;\n  // Tracks whether we deferred a per try timeout because the downstream request\n  // had not been completed yet.\n  bool create_per_try_timeout_on_request_complete_ : 1;\n  // True if the CONNECT headers have been sent but proxying payload is paused\n  // waiting for response headers.\n  bool paused_for_connect_ : 1;\n\n  // Sentinel to indicate if timeout budget tracking is configured for the cluster,\n  // and if so, if the per-try histogram should record a value.\n  bool record_timeout_budget_ : 1;\n\n  Event::TimerPtr max_stream_duration_timer_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/vhds.cc",
    "content": "#include \"common/router/vhds.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/v2/route/route_components.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/config_impl.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n// Implements callbacks to handle DeltaDiscovery protocol for VirtualHostDiscoveryService\nVhdsSubscription::VhdsSubscription(\n    RouteConfigUpdatePtr& config_update_info,\n    Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix,\n    absl::node_hash_set<RouteConfigProvider*>& route_config_providers,\n    envoy::config::core::v3::ApiVersion resource_api_version)\n    : Envoy::Config::SubscriptionBase<envoy::config::route::v3::VirtualHost>(\n          resource_api_version,\n          factory_context.messageValidationContext().dynamicValidationVisitor(), \"name\"),\n      config_update_info_(config_update_info),\n      scope_(factory_context.scope().createScope(stat_prefix + \"vhds.\" +\n                                                 config_update_info_->routeConfigName() + \".\")),\n      stats_({ALL_VHDS_STATS(POOL_COUNTER(*scope_))}),\n      init_target_(\n          fmt::format(\"VhdsConfigSubscription {}\", config_update_info_->routeConfigName()),\n          [this]() { subscription_->start({config_update_info_->routeConfigName()}, true); }),\n      route_config_providers_(route_config_providers) {\n  const auto& config_source = config_update_info_->routeConfiguration()\n                                  .vhds()\n                                  .config_source()\n                                  .api_config_source()\n                                  .api_type();\n  if (config_source != envoy::config::core::v3::ApiConfigSource::DELTA_GRPC) {\n    throw EnvoyException(\"vhds: only 'DELTA_GRPC' is supported as an api_type.\");\n  }\n  const auto resource_name = getResourceName();\n  subscription_ =\n      factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource(\n          config_update_info_->routeConfiguration().vhds().config_source(),\n          Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_);\n}\n\nvoid VhdsSubscription::updateOnDemand(const std::string& with_route_config_name_prefix) {\n  subscription_->requestOnDemandUpdate({with_route_config_name_prefix});\n}\n\nvoid VhdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                                            const EnvoyException*) {\n  ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n  // We need to allow server startup to continue, even if we have a bad\n  // config.\n  init_target_.ready();\n}\n\nvoid VhdsSubscription::onConfigUpdate(\n    const std::vector<Envoy::Config::DecodedResourceRef>& added_resources,\n    const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n    const std::string& version_info) {\n  RouteConfigUpdateReceiver::VirtualHostRefVector added_vhosts;\n  std::set<std::string> added_resource_ids;\n  for (const auto& resource : added_resources) {\n    added_resource_ids.emplace(resource.get().name());\n    std::copy(resource.get().aliases().begin(), resource.get().aliases().end(),\n              std::inserter(added_resource_ids, added_resource_ids.end()));\n    // the management server returns empty resources (they contain no virtual hosts in this case)\n    // for aliases that it couldn't resolve.\n    if (!resource.get().hasResource()) {\n      continue;\n    }\n    added_vhosts.emplace_back(\n        dynamic_cast<const envoy::config::route::v3::VirtualHost&>(resource.get().resource()));\n  }\n  if (config_update_info_->onVhdsUpdate(added_vhosts, added_resource_ids, removed_resources,\n                                        version_info)) {\n    stats_.config_reload_.inc();\n    ENVOY_LOG(debug, \"vhds: loading new configuration: config_name={} hash={}\",\n              config_update_info_->routeConfigName(), config_update_info_->configHash());\n    for (auto* provider : route_config_providers_) {\n      provider->onConfigUpdate();\n    }\n  }\n\n  init_target_.ready();\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/router/vhds.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <string>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.validate.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/router/route_config_update_receiver.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/subscription_base.h\"\n#include \"common/init/target_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n\nnamespace Envoy {\nnamespace Router {\n\n#define ALL_VHDS_STATS(COUNTER)                                                                    \\\n  COUNTER(config_reload)                                                                           \\\n  COUNTER(update_empty)\n\nstruct VhdsStats {\n  ALL_VHDS_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass VhdsSubscription : Envoy::Config::SubscriptionBase<envoy::config::route::v3::VirtualHost>,\n                         Logger::Loggable<Logger::Id::router> {\npublic:\n  VhdsSubscription(RouteConfigUpdatePtr& config_update_info,\n                   Server::Configuration::ServerFactoryContext& factory_context,\n                   const std::string& stat_prefix,\n                   absl::node_hash_set<RouteConfigProvider*>& route_config_providers,\n                   const envoy::config::core::v3::ApiVersion resource_api_version =\n                       envoy::config::core::v3::ApiVersion::AUTO);\n  ~VhdsSubscription() override { init_target_.ready(); }\n\n  void registerInitTargetWithInitManager(Init::Manager& m) { m.add(init_target_); }\n  void updateOnDemand(const std::string& with_route_config_name_prefix);\n  static std::string domainNameToAlias(const std::string& route_config_name,\n                                       const std::string& domain) {\n    return route_config_name + \"/\" + domain;\n  }\n  static std::string aliasToDomainName(const std::string& alias) {\n    const auto pos = alias.find_last_of('/');\n    return pos == std::string::npos ? alias : alias.substr(pos + 1);\n  }\n\nprivate:\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Envoy::Config::DecodedResourceRef>&,\n                      const std::string&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  void onConfigUpdate(const std::vector<Envoy::Config::DecodedResourceRef>&,\n                      const Protobuf::RepeatedPtrField<std::string>&, const std::string&) override;\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException* e) override;\n\n  RouteConfigUpdatePtr& config_update_info_;\n  Stats::ScopePtr scope_;\n  VhdsStats stats_;\n  Envoy::Config::SubscriptionPtr subscription_;\n  Init::TargetImpl init_target_;\n  absl::node_hash_set<RouteConfigProvider*>& route_config_providers_;\n};\n\nusing VhdsSubscriptionPtr = std::unique_ptr<VhdsSubscription>;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/runtime/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"runtime_features_lib\",\n    srcs = [\n        \"runtime_features.cc\",\n    ],\n    hdrs = [\n        \"runtime_features.h\",\n    ],\n    deps = [\n        # AVOID ADDING TO THESE DEPENDENCIES IF POSSIBLE\n        # Any code using runtime guards depends on this library, and the more dependencies there are,\n        # the harder it is to runtime-guard without dependency loops.\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/common/singleton:threadsafe_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"runtime_protos_lib\",\n    hdrs = [\n        \"runtime_protos.h\",\n    ],\n    deps = [\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"runtime_lib\",\n    srcs = [\n        \"runtime_impl.cc\",\n    ],\n    hdrs = [\n        \"runtime_impl.h\",\n    ],\n    deps = [\n        \":runtime_features_lib\",\n        \":runtime_protos_lib\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/filesystem:directory_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/init:watcher_lib\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/runtime/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/runtime/runtime_features.cc",
    "content": "#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\nbool isRuntimeFeature(absl::string_view feature) {\n  return RuntimeFeaturesDefaults::get().enabledByDefault(feature) ||\n         RuntimeFeaturesDefaults::get().existsButDisabled(feature);\n}\n\nbool runtimeFeatureEnabled(absl::string_view feature) {\n  ASSERT(isRuntimeFeature(feature));\n  if (Runtime::LoaderSingleton::getExisting()) {\n    return Runtime::LoaderSingleton::getExisting()->threadsafeSnapshot()->runtimeFeatureEnabled(\n        feature);\n  }\n  ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::runtime), warn,\n                      \"Unable to use runtime singleton for feature {}\", feature);\n  return RuntimeFeaturesDefaults::get().enabledByDefault(feature);\n}\n\nuint64_t getInteger(absl::string_view feature, uint64_t default_value) {\n  ASSERT(absl::StartsWith(feature, \"envoy.\"));\n  if (Runtime::LoaderSingleton::getExisting()) {\n    return Runtime::LoaderSingleton::getExisting()->threadsafeSnapshot()->getInteger(\n        std::string(feature), default_value);\n  }\n  ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::runtime), warn,\n                      \"Unable to use runtime singleton for feature {}\", feature);\n  return default_value;\n}\n\n// Add additional features here to enable the new code paths by default.\n//\n// Per documentation in CONTRIBUTING.md is expected that new high risk code paths be guarded\n// by runtime feature guards, i.e\n//\n// if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.my_feature_name\")) {\n//   [new code path]\n// else {\n//   [old_code_path]\n// }\n//\n// Runtime features are false by default, so the old code path is exercised.\n// To make a runtime feature true by default, add it to the array below.\n// New features should be true-by-default for an Envoy release cycle before the\n// old code path is removed.\n//\n// If issues are found that require a runtime feature to be disabled, it should be reported\n// ASAP by filing a bug on github. Overriding non-buggy code is strongly discouraged to avoid the\n// problem of the bugs being found after the old code path has been removed.\nconstexpr const char* runtime_features[] = {\n    // Enabled\n    \"envoy.reloadable_features.http1_flood_protection\",\n    \"envoy.reloadable_features.test_feature_true\",\n    // Begin alphabetically sorted section.\n    \"envoy.deprecated_features.allow_deprecated_extension_names\",\n    \"envoy.reloadable_features.always_apply_route_header_rules\",\n    \"envoy.reloadable_features.activate_fds_next_event_loop\",\n    \"envoy.reloadable_features.activate_timers_next_event_loop\",\n    \"envoy.reloadable_features.allow_500_after_100\",\n    \"envoy.reloadable_features.allow_prefetch\",\n    \"envoy.reloadable_features.allow_response_for_timeout\",\n    \"envoy.reloadable_features.consume_all_retry_headers\",\n    \"envoy.reloadable_features.check_ocsp_policy\",\n    \"envoy.reloadable_features.disallow_unbounded_access_logs\",\n    \"envoy.reloadable_features.early_errors_via_hcm\",\n    \"envoy.reloadable_features.enable_deprecated_v2_api_warning\",\n    \"envoy.reloadable_features.enable_dns_cache_circuit_breakers\",\n    \"envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher\",\n    \"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\",\n    \"envoy.reloadable_features.fix_upgrade_response\",\n    \"envoy.reloadable_features.fix_wildcard_matching\",\n    \"envoy.reloadable_features.fixed_connection_close\",\n    \"envoy.reloadable_features.hcm_stream_error_on_invalid_message\",\n    \"envoy.reloadable_features.http_default_alpn\",\n    \"envoy.reloadable_features.http_match_on_all_headers\",\n    \"envoy.reloadable_features.http_set_copy_replace_all_headers\",\n    \"envoy.reloadable_features.http_transport_failure_reason_in_body\",\n    \"envoy.reloadable_features.http2_skip_encoding_empty_trailers\",\n    \"envoy.reloadable_features.listener_in_place_filterchain_update\",\n    \"envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2\",\n    \"envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing\",\n    \"envoy.reloadable_features.preserve_query_string_in_path_redirects\",\n    \"envoy.reloadable_features.preserve_upstream_date\",\n    \"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs\",\n    \"envoy.reloadable_features.stop_faking_paths\",\n    \"envoy.reloadable_features.strict_1xx_and_204_response_headers\",\n    \"envoy.reloadable_features.tls_use_io_handle_bio\",\n    \"envoy.reloadable_features.unify_grpc_handling\",\n    \"envoy.restart_features.use_apple_api_for_dns_lookups\",\n};\n\n// This is a section for officially sanctioned runtime features which are too\n// high risk to be enabled by default. Examples where we have opted to land\n// features without enabling by default are swapping the underlying buffer\n// implementation or the HTTP/1.1 codec implementation. Most features should be\n// enabled by default.\n//\n// When features are added here, there should be a tracking bug assigned to the\n// code owner to flip the default after sufficient testing.\nconstexpr const char* disabled_runtime_features[] = {\n    // Allow Envoy to upgrade or downgrade version of type url, should be removed when support for\n    // v2 url is removed from codebase.\n    \"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\",\n    // TODO(asraa) flip this feature after codec errors are handled\n    \"envoy.reloadable_features.new_codec_behavior\",\n    // TODO(alyssawilk) flip true after the release.\n    \"envoy.reloadable_features.new_tcp_connection_pool\",\n    // Sentinel and test flag.\n    \"envoy.reloadable_features.test_feature_false\",\n};\n\nRuntimeFeatures::RuntimeFeatures() {\n  for (auto& feature : runtime_features) {\n    enabled_features_.insert(feature);\n  }\n  for (auto& feature : disabled_runtime_features) {\n    disabled_features_.insert(feature);\n  }\n}\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/runtime/runtime_features.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/runtime/runtime.h\"\n\n#include \"common/singleton/const_singleton.h\"\n#include \"common/singleton/threadsafe_singleton.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\nbool isRuntimeFeature(absl::string_view feature);\nbool runtimeFeatureEnabled(absl::string_view feature);\nuint64_t getInteger(absl::string_view feature, uint64_t default_value);\n\nclass RuntimeFeatures {\npublic:\n  RuntimeFeatures();\n\n  // This tracks config-guarded code paths, to determine if a given\n  // runtime-guarded-code-path has the new code run by default or the old code.\n  bool enabledByDefault(absl::string_view feature) const {\n    return enabled_features_.find(feature) != enabled_features_.end();\n  }\n  bool existsButDisabled(absl::string_view feature) const {\n    return disabled_features_.find(feature) != disabled_features_.end();\n  }\n\nprivate:\n  friend class RuntimeFeaturesPeer;\n\n  absl::flat_hash_set<std::string> enabled_features_;\n  absl::flat_hash_set<std::string> disabled_features_;\n};\n\nusing RuntimeFeaturesDefaults = ConstSingleton<RuntimeFeatures>;\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/runtime/runtime_impl.cc",
    "content": "#include \"common/runtime/runtime_impl.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/service/discovery/v2/rtds.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/type/v3/percent.pb.validate.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/api_version.h\"\n#include \"common/filesystem/directory.h\"\n#include \"common/grpc/common.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/numbers.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\nnamespace {\n\nvoid countDeprecatedFeatureUseInternal(const RuntimeStats& stats) {\n  stats.deprecated_feature_use_.inc();\n  // Similar to the above, but a gauge that isn't imported during a hot restart.\n  stats.deprecated_feature_seen_since_process_start_.inc();\n}\n\n} // namespace\n\nbool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_value) const {\n  // If the value is not explicitly set as a runtime boolean, trust the proto annotations passed as\n  // default_value.\n  if (!getBoolean(key, default_value)) {\n    // If either disallowed by default or configured off, the feature is not enabled.\n    return false;\n  }\n\n  // The feature is allowed. It is assumed this check is called when the feature\n  // is about to be used, so increment the feature use stat.\n  countDeprecatedFeatureUseInternal(stats_);\n\n#ifdef ENVOY_DISABLE_DEPRECATED_FEATURES\n  return false;\n#endif\n\n  return true;\n}\n\nbool SnapshotImpl::runtimeFeatureEnabled(absl::string_view key) const {\n  // If the value is not explicitly set as a runtime boolean, the default value is based on\n  // enabledByDefault.\n  return getBoolean(key, RuntimeFeaturesDefaults::get().enabledByDefault(key));\n}\n\nbool SnapshotImpl::featureEnabled(absl::string_view key, uint64_t default_value,\n                                  uint64_t random_value, uint64_t num_buckets) const {\n  return random_value % num_buckets < std::min(getInteger(key, default_value), num_buckets);\n}\n\nbool SnapshotImpl::featureEnabled(absl::string_view key, uint64_t default_value) const {\n  // Avoid PRNG if we know we don't need it.\n  uint64_t cutoff = std::min(getInteger(key, default_value), static_cast<uint64_t>(100));\n  if (cutoff == 0) {\n    return false;\n  } else if (cutoff == 100) {\n    return true;\n  } else {\n    return generator_.random() % 100 < cutoff;\n  }\n}\n\nbool SnapshotImpl::featureEnabled(absl::string_view key, uint64_t default_value,\n                                  uint64_t random_value) const {\n  return featureEnabled(key, default_value, random_value, 100);\n}\n\nSnapshot::ConstStringOptRef SnapshotImpl::get(absl::string_view key) const {\n  ASSERT(!isRuntimeFeature(key)); // Make sure runtime guarding is only used for getBoolean\n  auto entry = key.empty() ? values_.end() : values_.find(key);\n  if (entry == values_.end()) {\n    return absl::nullopt;\n  } else {\n    return entry->second.raw_string_value_;\n  }\n}\n\nbool SnapshotImpl::featureEnabled(absl::string_view key,\n                                  const envoy::type::v3::FractionalPercent& default_value) const {\n  return featureEnabled(key, default_value, generator_.random());\n}\n\nbool SnapshotImpl::featureEnabled(absl::string_view key,\n                                  const envoy::type::v3::FractionalPercent& default_value,\n                                  uint64_t random_value) const {\n  const auto& entry = key.empty() ? values_.end() : values_.find(key);\n  envoy::type::v3::FractionalPercent percent;\n  if (entry != values_.end() && entry->second.fractional_percent_value_.has_value()) {\n    percent = entry->second.fractional_percent_value_.value();\n  } else if (entry != values_.end() && entry->second.uint_value_.has_value()) {\n    // Check for > 100 because the runtime value is assumed to be specified as\n    // an integer, and it also ensures that truncating the uint64_t runtime\n    // value into a uint32_t percent numerator later is safe\n    if (entry->second.uint_value_.value() > 100) {\n      return true;\n    }\n\n    // The runtime value was specified as an integer rather than a fractional\n    // percent proto. To preserve legacy semantics, we treat it as a percentage\n    // (i.e. denominator of 100).\n    percent.set_numerator(entry->second.uint_value_.value());\n    percent.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  } else {\n    percent = default_value;\n  }\n\n  // When numerator > denominator condition is always evaluates to TRUE\n  // It becomes hard to debug why configuration does not work in case of wrong numerator.\n  // Log debug message that numerator is invalid.\n  uint64_t denominator_value =\n      ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent.denominator());\n  if (percent.numerator() > denominator_value) {\n    ENVOY_LOG(debug,\n              \"WARNING runtime key '{}': numerator ({}) > denominator ({}), condition always \"\n              \"evaluates to true\",\n              key, percent.numerator(), denominator_value);\n  }\n\n  return ProtobufPercentHelper::evaluateFractionalPercent(percent, random_value);\n}\n\nuint64_t SnapshotImpl::getInteger(absl::string_view key, uint64_t default_value) const {\n  ASSERT(!isRuntimeFeature(key));\n  const auto& entry = key.empty() ? values_.end() : values_.find(key);\n  if (entry == values_.end() || !entry->second.uint_value_) {\n    return default_value;\n  } else {\n    return entry->second.uint_value_.value();\n  }\n}\n\ndouble SnapshotImpl::getDouble(absl::string_view key, double default_value) const {\n  ASSERT(!isRuntimeFeature(key)); // Make sure runtime guarding is only used for getBoolean\n  const auto& entry = key.empty() ? values_.end() : values_.find(key);\n  if (entry == values_.end() || !entry->second.double_value_) {\n    return default_value;\n  } else {\n    return entry->second.double_value_.value();\n  }\n}\n\nbool SnapshotImpl::getBoolean(absl::string_view key, bool default_value) const {\n  const auto& entry = key.empty() ? values_.end() : values_.find(key);\n  if (entry == values_.end() || !entry->second.bool_value_.has_value()) {\n    return default_value;\n  } else {\n    return entry->second.bool_value_.value();\n  }\n}\n\nconst std::vector<Snapshot::OverrideLayerConstPtr>& SnapshotImpl::getLayers() const {\n  return layers_;\n}\n\nSnapshotImpl::SnapshotImpl(Random::RandomGenerator& generator, RuntimeStats& stats,\n                           std::vector<OverrideLayerConstPtr>&& layers)\n    : layers_{std::move(layers)}, generator_{generator}, stats_{stats} {\n  for (const auto& layer : layers_) {\n    for (const auto& kv : layer->values()) {\n      values_.erase(kv.first);\n      values_.emplace(kv.first, kv.second);\n    }\n  }\n  stats.num_keys_.set(values_.size());\n}\n\nSnapshotImpl::Entry SnapshotImpl::createEntry(const std::string& value) {\n  Entry entry;\n  entry.raw_string_value_ = value;\n\n  // As a perf optimization, attempt to parse the entry's string and store it inside the struct. If\n  // we don't succeed that's fine.\n  resolveEntryType(entry);\n\n  return entry;\n}\n\nSnapshotImpl::Entry SnapshotImpl::createEntry(const ProtobufWkt::Value& value) {\n  // This isn't the smartest way to do it; we're round-tripping via YAML, this should be optimized\n  // if runtime parsing becomes performance sensitive.\n  return createEntry(MessageUtil::getYamlStringFromMessage(value, false, false));\n}\n\nbool SnapshotImpl::parseEntryBooleanValue(Entry& entry) {\n  absl::string_view stripped = entry.raw_string_value_;\n  stripped = absl::StripAsciiWhitespace(stripped);\n\n  uint64_t parse_int;\n  if (absl::SimpleAtoi(stripped, &parse_int)) {\n    entry.bool_value_ = (parse_int != 0);\n    // This is really an integer, so return false here not because of failure, but so we continue to\n    // parse doubles/int.\n    return false;\n  } else if (absl::EqualsIgnoreCase(stripped, \"true\")) {\n    entry.bool_value_ = true;\n    return true;\n  } else if (absl::EqualsIgnoreCase(stripped, \"false\")) {\n    entry.bool_value_ = false;\n    return true;\n  }\n  return false;\n}\n\nbool SnapshotImpl::parseEntryDoubleValue(Entry& entry) {\n  double converted_double;\n  if (absl::SimpleAtod(entry.raw_string_value_, &converted_double)) {\n    entry.double_value_ = converted_double;\n    return true;\n  }\n  return false;\n}\n\nvoid SnapshotImpl::parseEntryFractionalPercentValue(Entry& entry) {\n  envoy::type::v3::FractionalPercent converted_fractional_percent;\n  try {\n    MessageUtil::loadFromYamlAndValidate(entry.raw_string_value_, converted_fractional_percent,\n                                         ProtobufMessage::getStrictValidationVisitor());\n  } catch (const ProtoValidationException& ex) {\n    ENVOY_LOG(error, \"unable to validate fraction percent runtime proto: {}\", ex.what());\n    return;\n  } catch (const EnvoyException& ex) {\n    // An EnvoyException is thrown when we try to parse a bogus string as a protobuf. This is fine,\n    // since there was no expectation that the raw string was a valid proto.\n    return;\n  }\n\n  entry.fractional_percent_value_ = converted_fractional_percent;\n}\n\nvoid AdminLayer::mergeValues(const absl::node_hash_map<std::string, std::string>& values) {\n  for (const auto& kv : values) {\n    values_.erase(kv.first);\n    if (!kv.second.empty()) {\n      values_.emplace(kv.first, SnapshotImpl::createEntry(kv.second));\n    }\n  }\n  stats_.admin_overrides_active_.set(values_.empty() ? 0 : 1);\n}\n\nDiskLayer::DiskLayer(absl::string_view name, const std::string& path, Api::Api& api)\n    : OverrideLayerImpl{name} {\n  walkDirectory(path, \"\", 1, api);\n}\n\nvoid DiskLayer::walkDirectory(const std::string& path, const std::string& prefix, uint32_t depth,\n                              Api::Api& api) {\n  // Maximum recursion depth for walkDirectory().\n  static constexpr uint32_t MaxWalkDepth = 16;\n\n  ENVOY_LOG(debug, \"walking directory: {}\", path);\n  if (depth > MaxWalkDepth) {\n    throw EnvoyException(absl::StrCat(\"Walk recursion depth exceeded \", MaxWalkDepth));\n  }\n  // Check if this is an obviously bad path.\n  if (api.fileSystem().illegalPath(path)) {\n    throw EnvoyException(absl::StrCat(\"Invalid path: \", path));\n  }\n\n  Filesystem::Directory directory(path);\n  for (const Filesystem::DirectoryEntry& entry : directory) {\n    std::string full_path = path + \"/\" + entry.name_;\n    std::string full_prefix;\n    if (prefix.empty()) {\n      full_prefix = entry.name_;\n    } else {\n      full_prefix = prefix + \".\" + entry.name_;\n    }\n\n    if (entry.type_ == Filesystem::FileType::Directory && entry.name_ != \".\" &&\n        entry.name_ != \"..\") {\n      walkDirectory(full_path, full_prefix, depth + 1, api);\n    } else if (entry.type_ == Filesystem::FileType::Regular) {\n      // Suck the file into a string. This is not very efficient but it should be good enough\n      // for small files. Also, as noted elsewhere, none of this is non-blocking which could\n      // theoretically lead to issues.\n      ENVOY_LOG(debug, \"reading file: {}\", full_path);\n      std::string value;\n\n      // Read the file and remove any comments. A comment is a line starting with a '#' character.\n      // Comments are useful for placeholder files with no value.\n      const std::string text_file{api.fileSystem().fileReadToEnd(full_path)};\n      const auto lines = StringUtil::splitToken(text_file, \"\\n\");\n      for (const auto& line : lines) {\n        if (!line.empty() && line.front() == '#') {\n          continue;\n        }\n        if (line == lines.back()) {\n          const absl::string_view trimmed = StringUtil::rtrim(line);\n          value.append(trimmed.data(), trimmed.size());\n        } else {\n          value.append(std::string{line} + \"\\n\");\n        }\n      }\n      // Separate erase/insert calls required due to the value type being constant; this prevents\n      // the use of the [] operator. Can leverage insert_or_assign in C++17 in the future.\n      values_.erase(full_prefix);\n      values_.insert({full_prefix, SnapshotImpl::createEntry(value)});\n    }\n  }\n}\n\nProtoLayer::ProtoLayer(absl::string_view name, const ProtobufWkt::Struct& proto)\n    : OverrideLayerImpl{name} {\n  for (const auto& f : proto.fields()) {\n    walkProtoValue(f.second, f.first);\n  }\n}\n\nvoid ProtoLayer::walkProtoValue(const ProtobufWkt::Value& v, const std::string& prefix) {\n  switch (v.kind_case()) {\n  case ProtobufWkt::Value::KIND_NOT_SET:\n  case ProtobufWkt::Value::kListValue:\n  case ProtobufWkt::Value::kNullValue:\n    throw EnvoyException(absl::StrCat(\"Invalid runtime entry value for \", prefix));\n    break;\n  case ProtobufWkt::Value::kStringValue:\n    values_.emplace(prefix, SnapshotImpl::createEntry(v.string_value()));\n    break;\n  case ProtobufWkt::Value::kNumberValue:\n  case ProtobufWkt::Value::kBoolValue:\n    values_.emplace(prefix, SnapshotImpl::createEntry(v));\n    break;\n  case ProtobufWkt::Value::kStructValue: {\n    const ProtobufWkt::Struct& s = v.struct_value();\n    if (s.fields().empty() || s.fields().find(\"numerator\") != s.fields().end() ||\n        s.fields().find(\"denominator\") != s.fields().end()) {\n      values_.emplace(prefix, SnapshotImpl::createEntry(v));\n      break;\n    }\n    for (const auto& f : s.fields()) {\n      walkProtoValue(f.second, prefix + \".\" + f.first);\n    }\n    break;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nLoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls,\n                       const envoy::config::bootstrap::v3::LayeredRuntime& config,\n                       const LocalInfo::LocalInfo& local_info, Stats::Store& store,\n                       Random::RandomGenerator& generator,\n                       ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api)\n    : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()),\n      config_(config), service_cluster_(local_info.clusterName()), api_(api),\n      init_watcher_(\"RTDS\", [this]() { onRtdsReady(); }), store_(store) {\n  absl::node_hash_set<std::string> layer_names;\n  for (const auto& layer : config_.layers()) {\n    auto ret = layer_names.insert(layer.name());\n    if (!ret.second) {\n      throw EnvoyException(absl::StrCat(\"Duplicate layer name: \", layer.name()));\n    }\n    switch (layer.layer_specifier_case()) {\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kStaticLayer:\n      // Nothing needs to be done here.\n      break;\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kAdminLayer:\n      if (admin_layer_ != nullptr) {\n        throw EnvoyException(\n            \"Too many admin layers specified in LayeredRuntime, at most one may be specified\");\n      }\n      admin_layer_ = std::make_unique<AdminLayer>(layer.name(), stats_);\n      break;\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kDiskLayer:\n      if (watcher_ == nullptr) {\n        watcher_ = dispatcher.createFilesystemWatcher();\n      }\n      watcher_->addWatch(layer.disk_layer().symlink_root(), Filesystem::Watcher::Events::MovedTo,\n                         [this](uint32_t) -> void { loadNewSnapshot(); });\n      break;\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kRtdsLayer:\n      subscriptions_.emplace_back(\n          std::make_unique<RtdsSubscription>(*this, layer.rtds_layer(), store, validation_visitor));\n      init_manager_.add(subscriptions_.back()->init_target_);\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  loadNewSnapshot();\n}\n\nvoid LoaderImpl::initialize(Upstream::ClusterManager& cm) {\n  cm_ = &cm;\n\n  for (const auto& s : subscriptions_) {\n    s->createSubscription();\n  }\n}\n\nvoid LoaderImpl::startRtdsSubscriptions(ReadyCallback on_done) {\n  on_rtds_initialized_ = on_done;\n  init_manager_.initialize(init_watcher_);\n}\n\nvoid LoaderImpl::onRtdsReady() {\n  ENVOY_LOG(info, \"RTDS has finished initialization\");\n  on_rtds_initialized_();\n}\n\nRtdsSubscription::RtdsSubscription(\n    LoaderImpl& parent, const envoy::config::bootstrap::v3::RuntimeLayer::RtdsLayer& rtds_layer,\n    Stats::Store& store, ProtobufMessage::ValidationVisitor& validation_visitor)\n    : Envoy::Config::SubscriptionBase<envoy::service::runtime::v3::Runtime>(\n          rtds_layer.rtds_config().resource_api_version(), validation_visitor, \"name\"),\n      parent_(parent), config_source_(rtds_layer.rtds_config()), store_(store),\n      resource_name_(rtds_layer.name()),\n      init_target_(\"RTDS \" + resource_name_, [this]() { start(); }) {}\n\nvoid RtdsSubscription::createSubscription() {\n  const auto resource_name = getResourceName();\n  subscription_ = parent_.cm_->subscriptionFactory().subscriptionFromConfigSource(\n      config_source_, Grpc::Common::typeUrl(resource_name), store_, *this, resource_decoder_);\n}\n\nvoid RtdsSubscription::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                                      const std::string&) {\n  validateUpdateSize(resources.size());\n  const auto& runtime =\n      dynamic_cast<const envoy::service::runtime::v3::Runtime&>(resources[0].get().resource());\n  if (runtime.name() != resource_name_) {\n    throw EnvoyException(\n        fmt::format(\"Unexpected RTDS runtime (expecting {}): {}\", resource_name_, runtime.name()));\n  }\n  ENVOY_LOG(debug, \"Reloading RTDS snapshot for onConfigUpdate\");\n  proto_.CopyFrom(runtime.layer());\n  parent_.loadNewSnapshot();\n  init_target_.ready();\n}\n\nvoid RtdsSubscription::onConfigUpdate(\n    const std::vector<Config::DecodedResourceRef>& added_resources,\n    const Protobuf::RepeatedPtrField<std::string>&, const std::string&) {\n  validateUpdateSize(added_resources.size());\n  onConfigUpdate(added_resources, added_resources[0].get().version());\n}\n\nvoid RtdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                                            const EnvoyException*) {\n  ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n  // We need to allow server startup to continue, even if we have a bad\n  // config.\n  init_target_.ready();\n}\n\nvoid RtdsSubscription::start() { subscription_->start({resource_name_}); }\n\nvoid RtdsSubscription::validateUpdateSize(uint32_t num_resources) {\n  if (num_resources != 1) {\n    init_target_.ready();\n    throw EnvoyException(fmt::format(\"Unexpected RTDS resource length: {}\", num_resources));\n    // (would be a return false here)\n  }\n}\n\nvoid LoaderImpl::loadNewSnapshot() {\n  std::shared_ptr<SnapshotImpl> ptr = createNewSnapshot();\n  tls_->set([ptr](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::static_pointer_cast<ThreadLocal::ThreadLocalObject>(ptr);\n  });\n\n  {\n    absl::MutexLock lock(&snapshot_mutex_);\n    thread_safe_snapshot_ = ptr;\n  }\n}\n\nconst Snapshot& LoaderImpl::snapshot() {\n  ASSERT(tls_->currentThreadRegistered(),\n         \"snapshot can only be called from a worker thread or after the main thread is registered\");\n  return tls_->getTyped<Snapshot>();\n}\n\nSnapshotConstSharedPtr LoaderImpl::threadsafeSnapshot() {\n  if (tls_->currentThreadRegistered()) {\n    return std::dynamic_pointer_cast<const Snapshot>(tls_->get());\n  }\n\n  {\n    absl::ReaderMutexLock lock(&snapshot_mutex_);\n    return thread_safe_snapshot_;\n  }\n}\n\nvoid LoaderImpl::mergeValues(const absl::node_hash_map<std::string, std::string>& values) {\n  if (admin_layer_ == nullptr) {\n    throw EnvoyException(\"No admin layer specified\");\n  }\n  admin_layer_->mergeValues(values);\n  loadNewSnapshot();\n}\n\nStats::Scope& LoaderImpl::getRootScope() { return store_; }\n\nvoid LoaderImpl::countDeprecatedFeatureUse() const { countDeprecatedFeatureUseInternal(stats_); }\n\nRuntimeStats LoaderImpl::generateStats(Stats::Store& store) {\n  std::string prefix = \"runtime.\";\n  RuntimeStats stats{\n      ALL_RUNTIME_STATS(POOL_COUNTER_PREFIX(store, prefix), POOL_GAUGE_PREFIX(store, prefix))};\n  return stats;\n}\n\nSnapshotImplPtr LoaderImpl::createNewSnapshot() {\n  std::vector<Snapshot::OverrideLayerConstPtr> layers;\n  uint32_t disk_layers = 0;\n  uint32_t error_layers = 0;\n  uint32_t rtds_layer = 0;\n  for (const auto& layer : config_.layers()) {\n    switch (layer.layer_specifier_case()) {\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kStaticLayer:\n      layers.emplace_back(std::make_unique<const ProtoLayer>(layer.name(), layer.static_layer()));\n      break;\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kDiskLayer: {\n      std::string path =\n          layer.disk_layer().symlink_root() + \"/\" + layer.disk_layer().subdirectory();\n      if (layer.disk_layer().append_service_cluster()) {\n        path += \"/\" + service_cluster_;\n      }\n      if (api_.fileSystem().directoryExists(path)) {\n        try {\n          layers.emplace_back(std::make_unique<DiskLayer>(layer.name(), path, api_));\n          ++disk_layers;\n        } catch (EnvoyException& e) {\n          // TODO(htuch): Consider latching here, rather than ignoring the\n          // layer. This would be consistent with filesystem RTDS.\n          ++error_layers;\n          ENVOY_LOG(debug, \"error loading runtime values for layer {} from disk: {}\",\n                    layer.DebugString(), e.what());\n        }\n      }\n      break;\n    }\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kAdminLayer:\n      layers.push_back(std::make_unique<AdminLayer>(*admin_layer_));\n      break;\n    case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kRtdsLayer: {\n      auto* subscription = subscriptions_[rtds_layer++].get();\n      layers.emplace_back(std::make_unique<const ProtoLayer>(layer.name(), subscription->proto_));\n      break;\n    }\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n  stats_.num_layers_.set(layers.size());\n  if (error_layers == 0) {\n    stats_.load_success_.inc();\n  } else {\n    stats_.load_error_.inc();\n  }\n  if (disk_layers > 1) {\n    stats_.override_dir_exists_.inc();\n  } else {\n    stats_.override_dir_not_exists_.inc();\n  }\n  return std::make_unique<SnapshotImpl>(generator_, stats_, std::move(layers));\n}\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/runtime/runtime_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/service/runtime/v3/rtds.pb.h\"\n#include \"envoy/service/runtime/v3/rtds.pb.validate.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/config/subscription_base.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/init/target_impl.h\"\n#include \"common/singleton/threadsafe_singleton.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\nusing RuntimeSingleton = ThreadSafeSingleton<Loader>;\n\n/**\n * All runtime stats. @see stats_macros.h\n */\n#define ALL_RUNTIME_STATS(COUNTER, GAUGE)                                                          \\\n  COUNTER(deprecated_feature_use)                                                                  \\\n  COUNTER(load_error)                                                                              \\\n  COUNTER(load_success)                                                                            \\\n  COUNTER(override_dir_exists)                                                                     \\\n  COUNTER(override_dir_not_exists)                                                                 \\\n  GAUGE(admin_overrides_active, NeverImport)                                                       \\\n  GAUGE(deprecated_feature_seen_since_process_start, NeverImport)                                  \\\n  GAUGE(num_keys, NeverImport)                                                                     \\\n  GAUGE(num_layers, NeverImport)\n\n/**\n * Struct definition for all runtime stats. @see stats_macros.h\n */\nstruct RuntimeStats {\n  ALL_RUNTIME_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Implementation of Snapshot whose source is the vector of layers passed to the constructor.\n */\nclass SnapshotImpl : public Snapshot, Logger::Loggable<Logger::Id::runtime> {\npublic:\n  SnapshotImpl(Random::RandomGenerator& generator, RuntimeStats& stats,\n               std::vector<OverrideLayerConstPtr>&& layers);\n\n  // Runtime::Snapshot\n  bool deprecatedFeatureEnabled(absl::string_view key, bool default_value) const override;\n  bool runtimeFeatureEnabled(absl::string_view key) const override;\n  bool featureEnabled(absl::string_view key, uint64_t default_value, uint64_t random_value,\n                      uint64_t num_buckets) const override;\n  bool featureEnabled(absl::string_view key, uint64_t default_value) const override;\n  bool featureEnabled(absl::string_view key, uint64_t default_value,\n                      uint64_t random_value) const override;\n  bool featureEnabled(absl::string_view key,\n                      const envoy::type::v3::FractionalPercent& default_value) const override;\n  bool featureEnabled(absl::string_view key,\n                      const envoy::type::v3::FractionalPercent& default_value,\n                      uint64_t random_value) const override;\n  ConstStringOptRef get(absl::string_view key) const override;\n  uint64_t getInteger(absl::string_view key, uint64_t default_value) const override;\n  double getDouble(absl::string_view key, double default_value) const override;\n  bool getBoolean(absl::string_view key, bool value) const override;\n  const std::vector<OverrideLayerConstPtr>& getLayers() const override;\n\n  static Entry createEntry(const std::string& value);\n  static Entry createEntry(const ProtobufWkt::Value& value);\n\nprivate:\n  static void resolveEntryType(Entry& entry) {\n    if (parseEntryBooleanValue(entry)) {\n      return;\n    }\n\n    if (parseEntryDoubleValue(entry) && entry.double_value_ >= 0 &&\n        entry.double_value_ <= std::numeric_limits<uint64_t>::max()) {\n      // Valid uint values will always be parseable as doubles, so we assign the value to both the\n      // uint and double fields. In cases where the value is something like \"3.1\", we will floor the\n      // number by casting it to a uint and assigning the uint value.\n      entry.uint_value_ = entry.double_value_;\n      return;\n    }\n\n    parseEntryFractionalPercentValue(entry);\n  }\n\n  static bool parseEntryBooleanValue(Entry& entry);\n  static bool parseEntryDoubleValue(Entry& entry);\n  static void parseEntryFractionalPercentValue(Entry& entry);\n\n  const std::vector<OverrideLayerConstPtr> layers_;\n  EntryMap values_;\n  Random::RandomGenerator& generator_;\n  RuntimeStats& stats_;\n};\n\nusing SnapshotImplPtr = std::unique_ptr<SnapshotImpl>;\n\n/**\n * Base implementation of OverrideLayer that by itself provides an empty values map.\n */\nclass OverrideLayerImpl : public Snapshot::OverrideLayer {\npublic:\n  explicit OverrideLayerImpl(absl::string_view name) : name_{name} {}\n  const Snapshot::EntryMap& values() const override { return values_; }\n  const std::string& name() const override { return name_; }\n\nprotected:\n  Snapshot::EntryMap values_;\n  const std::string name_;\n};\n\n/**\n * Extension of OverrideLayerImpl that maintains an in-memory set of values. These values can be\n * modified programmatically via mergeValues(). AdminLayer is so named because it can be accessed\n * and manipulated by Envoy's admin interface.\n */\nclass AdminLayer : public OverrideLayerImpl {\npublic:\n  explicit AdminLayer(absl::string_view name, RuntimeStats& stats)\n      : OverrideLayerImpl{name}, stats_{stats} {}\n  /**\n   * Copy-constructible so that it can snapshotted.\n   */\n  AdminLayer(const AdminLayer& admin_layer) : AdminLayer{admin_layer.name_, admin_layer.stats_} {\n    values_ = admin_layer.values();\n  }\n\n  /**\n   * Merge the provided values into our entry map. An empty value indicates that a key should be\n   * removed from our map.\n   */\n  void mergeValues(const absl::node_hash_map<std::string, std::string>& values);\n\nprivate:\n  RuntimeStats& stats_;\n};\n\nusing AdminLayerPtr = std::unique_ptr<AdminLayer>;\n\n/**\n * Extension of OverrideLayerImpl that loads values from the file system upon construction.\n */\nclass DiskLayer : public OverrideLayerImpl, Logger::Loggable<Logger::Id::runtime> {\npublic:\n  DiskLayer(absl::string_view name, const std::string& path, Api::Api& api);\n\nprivate:\n  void walkDirectory(const std::string& path, const std::string& prefix, uint32_t depth,\n                     Api::Api& api);\n\n  const std::string path_;\n  const Filesystem::WatcherPtr watcher_;\n};\n\n/**\n * Extension of OverrideLayerImpl that loads values from a proto Struct representation.\n */\nclass ProtoLayer : public OverrideLayerImpl, Logger::Loggable<Logger::Id::runtime> {\npublic:\n  ProtoLayer(absl::string_view name, const ProtobufWkt::Struct& proto);\n\nprivate:\n  void walkProtoValue(const ProtobufWkt::Value& v, const std::string& prefix);\n};\n\nclass LoaderImpl;\n\nstruct RtdsSubscription : Envoy::Config::SubscriptionBase<envoy::service::runtime::v3::Runtime>,\n                          Logger::Loggable<Logger::Id::runtime> {\n  RtdsSubscription(LoaderImpl& parent,\n                   const envoy::config::bootstrap::v3::RuntimeLayer::RtdsLayer& rtds_layer,\n                   Stats::Store& store, ProtobufMessage::ValidationVisitor& validation_visitor);\n\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string&) override;\n\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException* e) override;\n\n  void start();\n  void validateUpdateSize(uint32_t num_resources);\n  void createSubscription();\n\n  LoaderImpl& parent_;\n  const envoy::config::core::v3::ConfigSource config_source_;\n  Stats::Store& store_;\n  Config::SubscriptionPtr subscription_;\n  std::string resource_name_;\n  Init::TargetImpl init_target_;\n  ProtobufWkt::Struct proto_;\n};\n\nusing RtdsSubscriptionPtr = std::unique_ptr<RtdsSubscription>;\n\n/**\n * Implementation of Loader that provides Snapshots of values added via mergeValues().\n * A single snapshot is shared among all threads and referenced by shared_ptr such that\n * a new runtime can be swapped in by the main thread while workers are still using the previous\n * version.\n */\nclass LoaderImpl : public Loader, Logger::Loggable<Logger::Id::runtime> {\npublic:\n  LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls,\n             const envoy::config::bootstrap::v3::LayeredRuntime& config,\n             const LocalInfo::LocalInfo& local_info, Stats::Store& store,\n             Random::RandomGenerator& generator,\n             ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api);\n\n  // Runtime::Loader\n  void initialize(Upstream::ClusterManager& cm) override;\n  const Snapshot& snapshot() override;\n  SnapshotConstSharedPtr threadsafeSnapshot() override;\n  void mergeValues(const absl::node_hash_map<std::string, std::string>& values) override;\n  void startRtdsSubscriptions(ReadyCallback on_done) override;\n  Stats::Scope& getRootScope() override;\n  void countDeprecatedFeatureUse() const override;\n\nprivate:\n  friend RtdsSubscription;\n\n  // Create a new Snapshot\n  SnapshotImplPtr createNewSnapshot();\n  // Load a new Snapshot into TLS\n  void loadNewSnapshot();\n  RuntimeStats generateStats(Stats::Store& store);\n  void onRtdsReady();\n\n  Random::RandomGenerator& generator_;\n  RuntimeStats stats_;\n  AdminLayerPtr admin_layer_;\n  ThreadLocal::SlotPtr tls_;\n  const envoy::config::bootstrap::v3::LayeredRuntime config_;\n  const std::string service_cluster_;\n  Filesystem::WatcherPtr watcher_;\n  Api::Api& api_;\n  ReadyCallback on_rtds_initialized_;\n  Init::WatcherImpl init_watcher_;\n  Init::ManagerImpl init_manager_{\"RTDS\"};\n  std::vector<RtdsSubscriptionPtr> subscriptions_;\n  Upstream::ClusterManager* cm_{};\n  Stats::Store& store_;\n\n  absl::Mutex snapshot_mutex_;\n  SnapshotConstSharedPtr thread_safe_snapshot_ ABSL_GUARDED_BY(snapshot_mutex_);\n};\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/runtime/runtime_protos.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\n// Helper class for runtime-derived boolean feature flags.\nclass FeatureFlag {\npublic:\n  FeatureFlag(const envoy::config::core::v3::RuntimeFeatureFlag& feature_flag_proto,\n              Runtime::Loader& runtime)\n      : runtime_key_(feature_flag_proto.runtime_key()),\n        default_value_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(feature_flag_proto, default_value, true)),\n        runtime_(runtime) {}\n\n  bool enabled() const { return runtime_.snapshot().getBoolean(runtime_key_, default_value_); }\n\nprivate:\n  const std::string runtime_key_;\n  const bool default_value_;\n  Runtime::Loader& runtime_;\n};\n\n// Helper class for runtime-derived doubles.\nclass Double {\npublic:\n  Double(const envoy::config::core::v3::RuntimeDouble& double_proto, Runtime::Loader& runtime)\n      : runtime_key_(double_proto.runtime_key()), default_value_(double_proto.default_value()),\n        runtime_(runtime) {}\n  Double(std::string runtime_key, double default_value, Runtime::Loader& runtime)\n      : runtime_key_(std::move(runtime_key)), default_value_(default_value), runtime_(runtime) {}\n  virtual ~Double() = default;\n\n  const std::string& runtimeKey() const { return runtime_key_; }\n\n  virtual double value() const {\n    return runtime_.snapshot().getDouble(runtime_key_, default_value_);\n  }\n\nprotected:\n  const std::string runtime_key_;\n  const double default_value_;\n  Runtime::Loader& runtime_;\n};\n\n// Helper class for runtime-derived fractional percent flags.\nclass FractionalPercent {\npublic:\n  FractionalPercent(\n      const envoy::config::core::v3::RuntimeFractionalPercent& fractional_percent_proto,\n      Runtime::Loader& runtime)\n      : runtime_key_(fractional_percent_proto.runtime_key()),\n        default_value_(fractional_percent_proto.default_value()), runtime_(runtime) {}\n\n  bool enabled() const { return runtime_.snapshot().featureEnabled(runtime_key_, default_value_); }\n\nprivate:\n  const std::string runtime_key_;\n  const envoy::type::v3::FractionalPercent default_value_;\n  Runtime::Loader& runtime_;\n};\n\n// Helper class for runtime-derived percentages.\nclass Percentage : public Double {\npublic:\n  Percentage(const envoy::config::core::v3::RuntimePercent& percent_proto, Runtime::Loader& runtime)\n      : Double(percent_proto.runtime_key(), percent_proto.default_value().value(), runtime) {}\n\n  double value() const override {\n    const auto val = Double::value();\n    if (val <= 100.0 && val >= 0.0) {\n      return val;\n    }\n    return default_value_;\n  }\n};\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/secret/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"secret_manager_impl_lib\",\n    srcs = [\"secret_manager_impl.cc\"],\n    hdrs = [\"secret_manager_impl.h\"],\n    deps = [\n        \":sds_api_lib\",\n        \":secret_provider_impl_lib\",\n        \"//include/envoy/secret:secret_manager_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"secret_provider_impl_lib\",\n    srcs = [\"secret_provider_impl.cc\"],\n    hdrs = [\"secret_provider_impl.h\"],\n    deps = [\n        \"//include/envoy/secret:secret_provider_interface\",\n        \"//source/common/ssl:certificate_validation_context_config_impl_lib\",\n        \"//source/common/ssl:tls_certificate_config_impl_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"sds_api_lib\",\n    srcs = [\"sds_api.cc\"],\n    hdrs = [\"sds_api.h\"],\n    deps = [\n        \"//include/envoy/config:subscription_factory_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/secret:secret_provider_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:callback_impl_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/ssl:certificate_validation_context_config_impl_lib\",\n        \"//source/common/ssl:tls_certificate_config_impl_lib\",\n        \"@envoy_api//envoy/api/v2/auth:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/secret/sds_api.cc",
    "content": "#include \"common/secret/sds_api.h\"\n\n#include \"envoy/api/v2/auth/cert.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/config/api_version.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\nSdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_view sds_config_name,\n               Config::SubscriptionFactory& subscription_factory, TimeSource& time_source,\n               ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats,\n               std::function<void()> destructor_cb, Event::Dispatcher& dispatcher, Api::Api& api)\n    : Envoy::Config::SubscriptionBase<envoy::extensions::transport_sockets::tls::v3::Secret>(\n          sds_config.resource_api_version(), validation_visitor, \"name\"),\n      init_target_(fmt::format(\"SdsApi {}\", sds_config_name), [this] { initialize(); }),\n      stats_(stats), sds_config_(std::move(sds_config)), sds_config_name_(sds_config_name),\n      secret_hash_(0), clean_up_(std::move(destructor_cb)),\n      subscription_factory_(subscription_factory),\n      time_source_(time_source), secret_data_{sds_config_name_, \"uninitialized\",\n                                              time_source_.systemTime()},\n      dispatcher_(dispatcher), api_(api) {\n  const auto resource_name = getResourceName();\n  // This has to happen here (rather than in initialize()) as it can throw exceptions.\n  subscription_ = subscription_factory_.subscriptionFromConfigSource(\n      sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this, resource_decoder_);\n\n  // TODO(JimmyCYJ): Implement chained_init_manager, so that multiple init_manager\n  // can be chained together to behave as one init_manager. In that way, we let\n  // two listeners which share same SdsApi to register at separate init managers, and\n  // each init manager has a chance to initialize its targets.\n}\n\nvoid SdsApi::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                            const std::string& version_info) {\n  validateUpdateSize(resources.size());\n  const auto& secret = dynamic_cast<const envoy::extensions::transport_sockets::tls::v3::Secret&>(\n      resources[0].get().resource());\n\n  if (secret.name() != sds_config_name_) {\n    throw EnvoyException(\n        fmt::format(\"Unexpected SDS secret (expecting {}): {}\", sds_config_name_, secret.name()));\n  }\n\n  uint64_t new_hash = MessageUtil::hash(secret);\n\n  if (new_hash != secret_hash_) {\n    validateConfig(secret);\n    secret_hash_ = new_hash;\n    setSecret(secret);\n    update_callback_manager_.runCallbacks();\n\n    // List DataSources that refer to files\n    auto files = getDataSourceFilenames();\n    if (!files.empty()) {\n      // Create new watch, also destroys the old watch if any.\n      watcher_ = dispatcher_.createFilesystemWatcher();\n      files_hash_ = getHashForFiles();\n      for (auto const& filename : files) {\n        // Watch for directory instead of file. This allows users to do atomic renames\n        // on directory level (e.g. Kubernetes secret update).\n        const auto result = api_.fileSystem().splitPathFromFilename(filename);\n        watcher_->addWatch(absl::StrCat(result.directory_, \"/\"),\n                           Filesystem::Watcher::Events::MovedTo, [this](uint32_t) {\n                             uint64_t new_hash = getHashForFiles();\n                             if (new_hash != files_hash_) {\n                               update_callback_manager_.runCallbacks();\n                               files_hash_ = new_hash;\n                             }\n                           });\n      }\n    } else {\n      watcher_.reset(); // Destroy the old watch if any\n    }\n  }\n  secret_data_.last_updated_ = time_source_.systemTime();\n  secret_data_.version_info_ = version_info;\n  init_target_.ready();\n}\n\nvoid SdsApi::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                            const Protobuf::RepeatedPtrField<std::string>&, const std::string&) {\n  validateUpdateSize(added_resources.size());\n  onConfigUpdate(added_resources, added_resources[0].get().version());\n}\n\nvoid SdsApi::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                                  const EnvoyException*) {\n  ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n  // We need to allow server startup to continue, even if we have a bad config.\n  init_target_.ready();\n}\n\nvoid SdsApi::validateUpdateSize(int num_resources) {\n  if (num_resources == 0) {\n    throw EnvoyException(\n        fmt::format(\"Missing SDS resources for {} in onConfigUpdate()\", sds_config_name_));\n  }\n  if (num_resources != 1) {\n    throw EnvoyException(fmt::format(\"Unexpected SDS secrets length: {}\", num_resources));\n  }\n}\n\nvoid SdsApi::initialize() {\n  // Don't put any code here that can throw exceptions, this has been the cause of multiple\n  // hard-to-diagnose regressions.\n  subscription_->start({sds_config_name_});\n}\n\nSdsApi::SecretData SdsApi::secretData() { return secret_data_; }\n\nuint64_t SdsApi::getHashForFiles() {\n  uint64_t hash = 0;\n  for (auto const& filename : getDataSourceFilenames()) {\n    hash = HashUtil::xxHash64(api_.fileSystem().fileReadToEnd(filename), hash);\n  }\n  return hash;\n}\n\nstd::vector<std::string> TlsCertificateSdsApi::getDataSourceFilenames() {\n  std::vector<std::string> files;\n  if (tls_certificate_secrets_ && tls_certificate_secrets_->has_certificate_chain() &&\n      tls_certificate_secrets_->certificate_chain().specifier_case() ==\n          envoy::config::core::v3::DataSource::SpecifierCase::kFilename) {\n    files.push_back(tls_certificate_secrets_->certificate_chain().filename());\n  }\n  if (tls_certificate_secrets_ && tls_certificate_secrets_->has_private_key() &&\n      tls_certificate_secrets_->private_key().specifier_case() ==\n          envoy::config::core::v3::DataSource::SpecifierCase::kFilename) {\n    files.push_back(tls_certificate_secrets_->private_key().filename());\n  }\n  return files;\n}\n\nstd::vector<std::string> CertificateValidationContextSdsApi::getDataSourceFilenames() {\n  std::vector<std::string> files;\n  if (certificate_validation_context_secrets_ &&\n      certificate_validation_context_secrets_->has_trusted_ca() &&\n      certificate_validation_context_secrets_->trusted_ca().specifier_case() ==\n          envoy::config::core::v3::DataSource::SpecifierCase::kFilename) {\n    files.push_back(certificate_validation_context_secrets_->trusted_ca().filename());\n  }\n  return files;\n}\n\nstd::vector<std::string> TlsSessionTicketKeysSdsApi::getDataSourceFilenames() { return {}; }\n\nstd::vector<std::string> GenericSecretSdsApi::getDataSourceFilenames() { return {}; }\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/secret/sds_api.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/config/subscription_factory.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/secret/secret_callbacks.h\"\n#include \"envoy/secret/secret_provider.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/callback_impl.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/config/subscription_base.h\"\n#include \"common/config/utility.h\"\n#include \"common/init/target_impl.h\"\n#include \"common/ssl/certificate_validation_context_config_impl.h\"\n#include \"common/ssl/tls_certificate_config_impl.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\n/**\n * SDS API implementation that fetches secrets from SDS server via Subscription.\n */\nclass SdsApi : public Envoy::Config::SubscriptionBase<\n                   envoy::extensions::transport_sockets::tls::v3::Secret> {\npublic:\n  struct SecretData {\n    const std::string resource_name_;\n    std::string version_info_;\n    SystemTime last_updated_;\n  };\n\n  SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_view sds_config_name,\n         Config::SubscriptionFactory& subscription_factory, TimeSource& time_source,\n         ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats,\n         std::function<void()> destructor_cb, Event::Dispatcher& dispatcher, Api::Api& api);\n  ~SdsApi() override {\n    RELEASE_ASSERT(registered_init_target_,\n                   \"Init target was not registered with an init manager. registerInitTarget() must \"\n                   \"be called after Sds api concrete class instantiation.\");\n  };\n\n  SecretData secretData();\n\n  void registerInitTarget(Init::Manager& init_manager) {\n    init_manager.add(init_target_);\n    registered_init_target_ = true;\n  }\n\nprotected:\n  // Creates new secrets.\n  virtual void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret&) PURE;\n  virtual void validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret&) PURE;\n  Common::CallbackManager<> update_callback_manager_;\n\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException* e) override;\n  virtual std::vector<std::string> getDataSourceFilenames() PURE;\n\n  Init::TargetImpl init_target_;\n\nprivate:\n  void validateUpdateSize(int num_resources);\n  void initialize();\n  uint64_t getHashForFiles();\n\n  Stats::Store& stats_;\n\n  const envoy::config::core::v3::ConfigSource sds_config_;\n  Config::SubscriptionPtr subscription_;\n  const std::string sds_config_name_;\n\n  uint64_t secret_hash_;\n  uint64_t files_hash_;\n  Cleanup clean_up_;\n  Config::SubscriptionFactory& subscription_factory_;\n  TimeSource& time_source_;\n  SecretData secret_data_;\n  Event::Dispatcher& dispatcher_;\n  Api::Api& api_;\n  std::unique_ptr<Filesystem::Watcher> watcher_;\n  bool registered_init_target_{false};\n};\n\nclass TlsCertificateSdsApi;\nclass CertificateValidationContextSdsApi;\nclass TlsSessionTicketKeysSdsApi;\nclass GenericSecretSdsApi;\nusing TlsCertificateSdsApiSharedPtr = std::shared_ptr<TlsCertificateSdsApi>;\nusing CertificateValidationContextSdsApiSharedPtr =\n    std::shared_ptr<CertificateValidationContextSdsApi>;\nusing TlsSessionTicketKeysSdsApiSharedPtr = std::shared_ptr<TlsSessionTicketKeysSdsApi>;\nusing GenericSecretSdsApiSharedPtr = std::shared_ptr<GenericSecretSdsApi>;\n\n/**\n * TlsCertificateSdsApi implementation maintains and updates dynamic TLS certificate secrets.\n */\nclass TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider {\npublic:\n  static TlsCertificateSdsApiSharedPtr\n  create(Server::Configuration::TransportSocketFactoryContext& secret_provider_context,\n         const envoy::config::core::v3::ConfigSource& sds_config,\n         const std::string& sds_config_name, std::function<void()> destructor_cb) {\n    // We need to do this early as we invoke the subscription factory during initialization, which\n    // is too late to throw.\n    Config::Utility::checkLocalInfo(\"TlsCertificateSdsApi\", secret_provider_context.localInfo());\n    auto ret = std::make_shared<TlsCertificateSdsApi>(\n        sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(),\n        secret_provider_context.dispatcher().timeSource(),\n        secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(),\n        destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api());\n    ret->registerInitTarget(secret_provider_context.initManager());\n    return ret;\n  }\n\n  TlsCertificateSdsApi(const envoy::config::core::v3::ConfigSource& sds_config,\n                       const std::string& sds_config_name,\n                       Config::SubscriptionFactory& subscription_factory, TimeSource& time_source,\n                       ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats,\n                       std::function<void()> destructor_cb, Event::Dispatcher& dispatcher,\n                       Api::Api& api)\n      : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor,\n               stats, std::move(destructor_cb), dispatcher, api) {}\n\n  // SecretProvider\n  const envoy::extensions::transport_sockets::tls::v3::TlsCertificate* secret() const override {\n    return tls_certificate_secrets_.get();\n  }\n  Common::CallbackHandle* addValidationCallback(\n      std::function<void(const envoy::extensions::transport_sockets::tls::v3::TlsCertificate&)>)\n      override {\n    return nullptr;\n  }\n  Common::CallbackHandle* addUpdateCallback(std::function<void()> callback) override {\n    if (secret()) {\n      callback();\n    }\n    return update_callback_manager_.add(callback);\n  }\n\nprotected:\n  void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override {\n    tls_certificate_secrets_ =\n        std::make_unique<envoy::extensions::transport_sockets::tls::v3::TlsCertificate>(\n            secret.tls_certificate());\n  }\n  void validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {}\n  std::vector<std::string> getDataSourceFilenames() override;\n\nprivate:\n  TlsCertificatePtr tls_certificate_secrets_;\n};\n\n/**\n * CertificateValidationContextSdsApi implementation maintains and updates dynamic certificate\n * validation context secrets.\n */\nclass CertificateValidationContextSdsApi : public SdsApi,\n                                           public CertificateValidationContextConfigProvider {\npublic:\n  static CertificateValidationContextSdsApiSharedPtr\n  create(Server::Configuration::TransportSocketFactoryContext& secret_provider_context,\n         const envoy::config::core::v3::ConfigSource& sds_config,\n         const std::string& sds_config_name, std::function<void()> destructor_cb) {\n    // We need to do this early as we invoke the subscription factory during initialization, which\n    // is too late to throw.\n    Config::Utility::checkLocalInfo(\"CertificateValidationContextSdsApi\",\n                                    secret_provider_context.localInfo());\n    auto ret = std::make_shared<CertificateValidationContextSdsApi>(\n        sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(),\n        secret_provider_context.dispatcher().timeSource(),\n        secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(),\n        destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api());\n    ret->registerInitTarget(secret_provider_context.initManager());\n    return ret;\n  }\n  CertificateValidationContextSdsApi(const envoy::config::core::v3::ConfigSource& sds_config,\n                                     const std::string& sds_config_name,\n                                     Config::SubscriptionFactory& subscription_factory,\n                                     TimeSource& time_source,\n                                     ProtobufMessage::ValidationVisitor& validation_visitor,\n                                     Stats::Store& stats, std::function<void()> destructor_cb,\n                                     Event::Dispatcher& dispatcher, Api::Api& api)\n      : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor,\n               stats, std::move(destructor_cb), dispatcher, api) {}\n\n  // SecretProvider\n  const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n  secret() const override {\n    return certificate_validation_context_secrets_.get();\n  }\n  Common::CallbackHandle* addUpdateCallback(std::function<void()> callback) override {\n    if (secret()) {\n      callback();\n    }\n    return update_callback_manager_.add(callback);\n  }\n\n  Common::CallbackHandle* addValidationCallback(\n      std::function<\n          void(const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&)>\n          callback) override {\n    return validation_callback_manager_.add(callback);\n  }\n\nprotected:\n  void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override {\n    certificate_validation_context_secrets_ = std::make_unique<\n        envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext>(\n        secret.validation_context());\n  }\n\n  void\n  validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override {\n    validation_callback_manager_.runCallbacks(secret.validation_context());\n  }\n  std::vector<std::string> getDataSourceFilenames() override;\n\nprivate:\n  CertificateValidationContextPtr certificate_validation_context_secrets_;\n  Common::CallbackManager<\n      const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&>\n      validation_callback_manager_;\n};\n\n/**\n * TlsSessionTicketKeysSdsApi implementation maintains and updates dynamic tls session ticket keys\n * secrets.\n */\nclass TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysConfigProvider {\npublic:\n  static TlsSessionTicketKeysSdsApiSharedPtr\n  create(Server::Configuration::TransportSocketFactoryContext& secret_provider_context,\n         const envoy::config::core::v3::ConfigSource& sds_config,\n         const std::string& sds_config_name, std::function<void()> destructor_cb) {\n    // We need to do this early as we invoke the subscription factory during initialization, which\n    // is too late to throw.\n    Config::Utility::checkLocalInfo(\"TlsSessionTicketKeysSdsApi\",\n                                    secret_provider_context.localInfo());\n    auto ret = std::make_shared<TlsSessionTicketKeysSdsApi>(\n        sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(),\n        secret_provider_context.dispatcher().timeSource(),\n        secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(),\n        destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api());\n    ret->registerInitTarget(secret_provider_context.initManager());\n    return ret;\n  }\n\n  TlsSessionTicketKeysSdsApi(const envoy::config::core::v3::ConfigSource& sds_config,\n                             const std::string& sds_config_name,\n                             Config::SubscriptionFactory& subscription_factory,\n                             TimeSource& time_source,\n                             ProtobufMessage::ValidationVisitor& validation_visitor,\n                             Stats::Store& stats, std::function<void()> destructor_cb,\n                             Event::Dispatcher& dispatcher, Api::Api& api)\n      : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor,\n               stats, std::move(destructor_cb), dispatcher, api) {}\n\n  // SecretProvider\n  const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys*\n  secret() const override {\n    return tls_session_ticket_keys_.get();\n  }\n\n  Common::CallbackHandle* addUpdateCallback(std::function<void()> callback) override {\n    if (secret()) {\n      callback();\n    }\n    return update_callback_manager_.add(callback);\n  }\n\n  Common::CallbackHandle* addValidationCallback(\n      std::function<\n          void(const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&)>\n          callback) override {\n    return validation_callback_manager_.add(callback);\n  }\n\nprotected:\n  void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override {\n    tls_session_ticket_keys_ =\n        std::make_unique<envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys>(\n            secret.session_ticket_keys());\n  }\n\n  void\n  validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override {\n    validation_callback_manager_.runCallbacks(secret.session_ticket_keys());\n  }\n  std::vector<std::string> getDataSourceFilenames() override;\n\nprivate:\n  Secret::TlsSessionTicketKeysPtr tls_session_ticket_keys_;\n  Common::CallbackManager<\n      const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&>\n      validation_callback_manager_;\n};\n\n/**\n * GenericSecretSdsApi implementation maintains and updates dynamic generic secret.\n */\nclass GenericSecretSdsApi : public SdsApi, public GenericSecretConfigProvider {\npublic:\n  static GenericSecretSdsApiSharedPtr\n  create(Server::Configuration::TransportSocketFactoryContext& secret_provider_context,\n         const envoy::config::core::v3::ConfigSource& sds_config,\n         const std::string& sds_config_name, std::function<void()> destructor_cb) {\n    // We need to do this early as we invoke the subscription factory during initialization, which\n    // is too late to throw.\n    Config::Utility::checkLocalInfo(\"GenericSecretSdsApi\", secret_provider_context.localInfo());\n    auto ret = std::make_shared<GenericSecretSdsApi>(\n        sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(),\n        secret_provider_context.dispatcher().timeSource(),\n        secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(),\n        destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api());\n    ret->registerInitTarget(secret_provider_context.initManager());\n    return ret;\n  }\n\n  GenericSecretSdsApi(const envoy::config::core::v3::ConfigSource& sds_config,\n                      const std::string& sds_config_name,\n                      Config::SubscriptionFactory& subscription_factory, TimeSource& time_source,\n                      ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats,\n                      std::function<void()> destructor_cb, Event::Dispatcher& dispatcher,\n                      Api::Api& api)\n      : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor,\n               stats, std::move(destructor_cb), dispatcher, api) {}\n\n  // SecretProvider\n  const envoy::extensions::transport_sockets::tls::v3::GenericSecret* secret() const override {\n    return generic_secret.get();\n  }\n  Common::CallbackHandle* addUpdateCallback(std::function<void()> callback) override {\n    return update_callback_manager_.add(callback);\n  }\n  Common::CallbackHandle* addValidationCallback(\n      std::function<void(const envoy::extensions::transport_sockets::tls::v3::GenericSecret&)>\n          callback) override {\n    return validation_callback_manager_.add(callback);\n  }\n\nprotected:\n  void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override {\n    generic_secret = std::make_unique<envoy::extensions::transport_sockets::tls::v3::GenericSecret>(\n        secret.generic_secret());\n  }\n  void\n  validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override {\n    validation_callback_manager_.runCallbacks(secret.generic_secret());\n  }\n  std::vector<std::string> getDataSourceFilenames() override;\n\nprivate:\n  GenericSecretPtr generic_secret;\n  Common::CallbackManager<const envoy::extensions::transport_sockets::tls::v3::GenericSecret&>\n      validation_callback_manager_;\n};\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/secret/secret_manager_impl.cc",
    "content": "#include \"common/secret/secret_manager_impl.h\"\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/secret/sds_api.h\"\n#include \"common/secret/secret_provider_impl.h\"\n#include \"common/ssl/certificate_validation_context_config_impl.h\"\n#include \"common/ssl/tls_certificate_config_impl.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\nSecretManagerImpl::SecretManagerImpl(Server::ConfigTracker& config_tracker)\n    : config_tracker_entry_(config_tracker.add(\"secrets\", [this] { return dumpSecretConfigs(); })) {\n}\nvoid SecretManagerImpl::addStaticSecret(\n    const envoy::extensions::transport_sockets::tls::v3::Secret& secret) {\n  switch (secret.type_case()) {\n  case envoy::extensions::transport_sockets::tls::v3::Secret::TypeCase::kTlsCertificate: {\n    auto secret_provider =\n        std::make_shared<TlsCertificateConfigProviderImpl>(secret.tls_certificate());\n    if (!static_tls_certificate_providers_.insert(std::make_pair(secret.name(), secret_provider))\n             .second) {\n      throw EnvoyException(\n          absl::StrCat(\"Duplicate static TlsCertificate secret name \", secret.name()));\n    }\n    break;\n  }\n  case envoy::extensions::transport_sockets::tls::v3::Secret::TypeCase::kValidationContext: {\n    auto secret_provider = std::make_shared<CertificateValidationContextConfigProviderImpl>(\n        secret.validation_context());\n    if (!static_certificate_validation_context_providers_\n             .insert(std::make_pair(secret.name(), secret_provider))\n             .second) {\n      throw EnvoyException(absl::StrCat(\n          \"Duplicate static CertificateValidationContext secret name \", secret.name()));\n    }\n    break;\n  }\n  case envoy::extensions::transport_sockets::tls::v3::Secret::TypeCase::kSessionTicketKeys: {\n    auto secret_provider =\n        std::make_shared<TlsSessionTicketKeysConfigProviderImpl>(secret.session_ticket_keys());\n    if (!static_session_ticket_keys_providers_\n             .insert(std::make_pair(secret.name(), secret_provider))\n             .second) {\n      throw EnvoyException(\n          absl::StrCat(\"Duplicate static TlsSessionTicketKeys secret name \", secret.name()));\n    }\n    break;\n  }\n  case envoy::extensions::transport_sockets::tls::v3::Secret::TypeCase::kGenericSecret: {\n    auto secret_provider =\n        std::make_shared<GenericSecretConfigProviderImpl>(secret.generic_secret());\n    if (!static_generic_secret_providers_.insert(std::make_pair(secret.name(), secret_provider))\n             .second) {\n      throw EnvoyException(\n          absl::StrCat(\"Duplicate static GenericSecret secret name \", secret.name()));\n    }\n    break;\n  }\n  default:\n    throw EnvoyException(\"Secret type not implemented\");\n  }\n}\n\nTlsCertificateConfigProviderSharedPtr\nSecretManagerImpl::findStaticTlsCertificateProvider(const std::string& name) const {\n  auto secret = static_tls_certificate_providers_.find(name);\n  return (secret != static_tls_certificate_providers_.end()) ? secret->second : nullptr;\n}\n\nCertificateValidationContextConfigProviderSharedPtr\nSecretManagerImpl::findStaticCertificateValidationContextProvider(const std::string& name) const {\n  auto secret = static_certificate_validation_context_providers_.find(name);\n  return (secret != static_certificate_validation_context_providers_.end()) ? secret->second\n                                                                            : nullptr;\n}\n\nTlsSessionTicketKeysConfigProviderSharedPtr\nSecretManagerImpl::findStaticTlsSessionTicketKeysContextProvider(const std::string& name) const {\n  auto secret = static_session_ticket_keys_providers_.find(name);\n  return (secret != static_session_ticket_keys_providers_.end()) ? secret->second : nullptr;\n}\n\nGenericSecretConfigProviderSharedPtr\nSecretManagerImpl::findStaticGenericSecretProvider(const std::string& name) const {\n  auto secret = static_generic_secret_providers_.find(name);\n  return (secret != static_generic_secret_providers_.end()) ? secret->second : nullptr;\n}\n\nTlsCertificateConfigProviderSharedPtr SecretManagerImpl::createInlineTlsCertificateProvider(\n    const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& tls_certificate) {\n  return std::make_shared<TlsCertificateConfigProviderImpl>(tls_certificate);\n}\n\nCertificateValidationContextConfigProviderSharedPtr\nSecretManagerImpl::createInlineCertificateValidationContextProvider(\n    const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n        certificate_validation_context) {\n  return std::make_shared<CertificateValidationContextConfigProviderImpl>(\n      certificate_validation_context);\n}\n\nTlsSessionTicketKeysConfigProviderSharedPtr\nSecretManagerImpl::createInlineTlsSessionTicketKeysProvider(\n    const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&\n        tls_session_ticket_keys) {\n  return std::make_shared<TlsSessionTicketKeysConfigProviderImpl>(tls_session_ticket_keys);\n}\n\nGenericSecretConfigProviderSharedPtr SecretManagerImpl::createInlineGenericSecretProvider(\n    const envoy::extensions::transport_sockets::tls::v3::GenericSecret& generic_secret) {\n  return std::make_shared<GenericSecretConfigProviderImpl>(generic_secret);\n}\n\nTlsCertificateConfigProviderSharedPtr SecretManagerImpl::findOrCreateTlsCertificateProvider(\n    const envoy::config::core::v3::ConfigSource& sds_config_source, const std::string& config_name,\n    Server::Configuration::TransportSocketFactoryContext& secret_provider_context) {\n  return certificate_providers_.findOrCreate(sds_config_source, config_name,\n                                             secret_provider_context);\n}\n\nCertificateValidationContextConfigProviderSharedPtr\nSecretManagerImpl::findOrCreateCertificateValidationContextProvider(\n    const envoy::config::core::v3::ConfigSource& sds_config_source, const std::string& config_name,\n    Server::Configuration::TransportSocketFactoryContext& secret_provider_context) {\n  return validation_context_providers_.findOrCreate(sds_config_source, config_name,\n                                                    secret_provider_context);\n}\n\nTlsSessionTicketKeysConfigProviderSharedPtr\nSecretManagerImpl::findOrCreateTlsSessionTicketKeysContextProvider(\n    const envoy::config::core::v3::ConfigSource& sds_config_source, const std::string& config_name,\n    Server::Configuration::TransportSocketFactoryContext& secret_provider_context) {\n  return session_ticket_keys_providers_.findOrCreate(sds_config_source, config_name,\n                                                     secret_provider_context);\n}\n\nGenericSecretConfigProviderSharedPtr SecretManagerImpl::findOrCreateGenericSecretProvider(\n    const envoy::config::core::v3::ConfigSource& sds_config_source, const std::string& config_name,\n    Server::Configuration::TransportSocketFactoryContext& secret_provider_context) {\n  return generic_secret_providers_.findOrCreate(sds_config_source, config_name,\n                                                secret_provider_context);\n}\n\nProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs() {\n  // TODO(htuch): unlike other config providers, we're recreating the original\n  // Secrets below. This makes it hard to support API_RECOVER_ORIGINAL()-style\n  // recovery of the original config message. As a result, for now we're\n  // providing v3 config dumps. For Secrets, the main deprecation of interest\n  // are the use of v2 Struct config() and verify_subject_alt_name.\n  auto config_dump = std::make_unique<envoy::admin::v3::SecretsConfigDump>();\n  // Handle static tls key/cert providers.\n  for (const auto& cert_iter : static_tls_certificate_providers_) {\n    const auto& tls_cert = cert_iter.second;\n    auto static_secret = config_dump->mutable_static_secrets()->Add();\n    static_secret->set_name(cert_iter.first);\n    ASSERT(tls_cert != nullptr);\n    envoy::extensions::transport_sockets::tls::v3::Secret dump_secret;\n    dump_secret.set_name(cert_iter.first);\n    dump_secret.mutable_tls_certificate()->MergeFrom(*tls_cert->secret());\n    MessageUtil::redact(dump_secret);\n    static_secret->mutable_secret()->PackFrom(dump_secret);\n  }\n\n  // Handle static certificate validation context providers.\n  for (const auto& context_iter : static_certificate_validation_context_providers_) {\n    const auto& validation_context = context_iter.second;\n    auto static_secret = config_dump->mutable_static_secrets()->Add();\n    static_secret->set_name(context_iter.first);\n    ASSERT(validation_context != nullptr);\n    envoy::extensions::transport_sockets::tls::v3::Secret dump_secret;\n    dump_secret.set_name(context_iter.first);\n    dump_secret.mutable_validation_context()->MergeFrom(*validation_context->secret());\n    static_secret->mutable_secret()->PackFrom(dump_secret);\n  }\n\n  // Handle static session keys providers.\n  for (const auto& context_iter : static_session_ticket_keys_providers_) {\n    const auto& session_ticket_keys = context_iter.second;\n    auto static_secret = config_dump->mutable_static_secrets()->Add();\n    static_secret->set_name(context_iter.first);\n    ASSERT(session_ticket_keys != nullptr);\n    envoy::extensions::transport_sockets::tls::v3::Secret dump_secret;\n    dump_secret.set_name(context_iter.first);\n    for (const auto& key : session_ticket_keys->secret()->keys()) {\n      dump_secret.mutable_session_ticket_keys()->add_keys()->MergeFrom(key);\n    }\n    MessageUtil::redact(dump_secret);\n    static_secret->mutable_secret()->PackFrom(dump_secret);\n  }\n\n  // Handle static generic secret providers.\n  for (const auto& secret_iter : static_generic_secret_providers_) {\n    const auto& generic_secret = secret_iter.second;\n    auto static_secret = config_dump->mutable_static_secrets()->Add();\n    static_secret->set_name(secret_iter.first);\n    ASSERT(generic_secret != nullptr);\n    envoy::extensions::transport_sockets::tls::v3::Secret dump_secret;\n    dump_secret.set_name(secret_iter.first);\n    dump_secret.mutable_generic_secret()->MergeFrom(*generic_secret->secret());\n    MessageUtil::redact(dump_secret);\n    static_secret->mutable_secret()->PackFrom(dump_secret);\n  }\n\n  // Handle dynamic tls_certificate providers.\n  const auto providers = certificate_providers_.allSecretProviders();\n  for (const auto& cert_secrets : providers) {\n    const auto& secret_data = cert_secrets->secretData();\n    const auto& tls_cert = cert_secrets->secret();\n    envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret;\n    const bool secret_ready = tls_cert != nullptr;\n    if (secret_ready) {\n      dump_secret = config_dump->mutable_dynamic_active_secrets()->Add();\n    } else {\n      dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add();\n    }\n    dump_secret->set_name(secret_data.resource_name_);\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(secret_data.resource_name_);\n    ProtobufWkt::Timestamp last_updated_ts;\n    TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts);\n    dump_secret->set_version_info(secret_data.version_info_);\n    *dump_secret->mutable_last_updated() = last_updated_ts;\n    secret.set_name(secret_data.resource_name_);\n    if (secret_ready) {\n      secret.mutable_tls_certificate()->MergeFrom(*tls_cert);\n    }\n    MessageUtil::redact(secret);\n    dump_secret->mutable_secret()->PackFrom(secret);\n  }\n\n  // Handling dynamic cert validation context providers.\n  const auto context_secret_provider = validation_context_providers_.allSecretProviders();\n  for (const auto& validation_context_secret : context_secret_provider) {\n    const auto& secret_data = validation_context_secret->secretData();\n    const auto& validation_context = validation_context_secret->secret();\n    envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret;\n    const bool secret_ready = validation_context != nullptr;\n    if (secret_ready) {\n      dump_secret = config_dump->mutable_dynamic_active_secrets()->Add();\n    } else {\n      dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add();\n    }\n    dump_secret->set_name(secret_data.resource_name_);\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(secret_data.resource_name_);\n    ProtobufWkt::Timestamp last_updated_ts;\n    TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts);\n    dump_secret->set_version_info(secret_data.version_info_);\n    *dump_secret->mutable_last_updated() = last_updated_ts;\n    if (secret_ready) {\n      secret.mutable_validation_context()->MergeFrom(*validation_context);\n    }\n    dump_secret->mutable_secret()->PackFrom(secret);\n  }\n\n  // Handle dynamic session keys providers providers.\n  const auto stek_providers = session_ticket_keys_providers_.allSecretProviders();\n  for (const auto& stek_secrets : stek_providers) {\n    const auto& secret_data = stek_secrets->secretData();\n    const auto& tls_stek = stek_secrets->secret();\n    envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret;\n    const bool secret_ready = tls_stek != nullptr;\n    if (secret_ready) {\n      dump_secret = config_dump->mutable_dynamic_active_secrets()->Add();\n    } else {\n      dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add();\n    }\n    dump_secret->set_name(secret_data.resource_name_);\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(secret_data.resource_name_);\n    ProtobufWkt::Timestamp last_updated_ts;\n    TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts);\n    dump_secret->set_version_info(secret_data.version_info_);\n    *dump_secret->mutable_last_updated() = last_updated_ts;\n    if (secret_ready) {\n      secret.mutable_session_ticket_keys()->MergeFrom(*tls_stek);\n    }\n    MessageUtil::redact(secret);\n    dump_secret->mutable_secret()->PackFrom(secret);\n  }\n\n  // Handle dynamic generic secret providers.\n  const auto generic_secret_providers = generic_secret_providers_.allSecretProviders();\n  for (const auto& provider : generic_secret_providers) {\n    const auto& secret_data = provider->secretData();\n    const auto& generic_secret = provider->secret();\n    envoy::admin::v3::SecretsConfigDump::DynamicSecret* dump_secret;\n    const bool secret_ready = generic_secret != nullptr;\n    if (secret_ready) {\n      dump_secret = config_dump->mutable_dynamic_active_secrets()->Add();\n    } else {\n      dump_secret = config_dump->mutable_dynamic_warming_secrets()->Add();\n    }\n    dump_secret->set_name(secret_data.resource_name_);\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(secret_data.resource_name_);\n    ProtobufWkt::Timestamp last_updated_ts;\n    TimestampUtil::systemClockToTimestamp(secret_data.last_updated_, last_updated_ts);\n    dump_secret->set_version_info(secret_data.version_info_);\n    *dump_secret->mutable_last_updated() = last_updated_ts;\n    if (secret_ready) {\n      secret.mutable_generic_secret()->MergeFrom(*generic_secret);\n    }\n    MessageUtil::redact(secret);\n    dump_secret->mutable_secret()->PackFrom(secret);\n  }\n\n  return config_dump;\n}\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/secret/secret_manager_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/secret/secret_provider.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/certificate_validation_context_config.h\"\n#include \"envoy/ssl/tls_certificate_config.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/secret/sds_api.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\nclass SecretManagerImpl : public SecretManager {\npublic:\n  SecretManagerImpl(Server::ConfigTracker& config_tracker);\n  void\n  addStaticSecret(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override;\n\n  TlsCertificateConfigProviderSharedPtr\n  findStaticTlsCertificateProvider(const std::string& name) const override;\n\n  CertificateValidationContextConfigProviderSharedPtr\n  findStaticCertificateValidationContextProvider(const std::string& name) const override;\n\n  TlsSessionTicketKeysConfigProviderSharedPtr\n  findStaticTlsSessionTicketKeysContextProvider(const std::string& name) const override;\n\n  GenericSecretConfigProviderSharedPtr\n  findStaticGenericSecretProvider(const std::string& name) const override;\n\n  TlsCertificateConfigProviderSharedPtr createInlineTlsCertificateProvider(\n      const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& tls_certificate)\n      override;\n\n  CertificateValidationContextConfigProviderSharedPtr\n  createInlineCertificateValidationContextProvider(\n      const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n          certificate_validation_context) override;\n\n  TlsSessionTicketKeysConfigProviderSharedPtr createInlineTlsSessionTicketKeysProvider(\n      const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&\n          tls_session_ticket_keys) override;\n\n  GenericSecretConfigProviderSharedPtr createInlineGenericSecretProvider(\n      const envoy::extensions::transport_sockets::tls::v3::GenericSecret& generic_secret) override;\n\n  TlsCertificateConfigProviderSharedPtr findOrCreateTlsCertificateProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) override;\n\n  CertificateValidationContextConfigProviderSharedPtr\n  findOrCreateCertificateValidationContextProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) override;\n\n  TlsSessionTicketKeysConfigProviderSharedPtr findOrCreateTlsSessionTicketKeysContextProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) override;\n\n  GenericSecretConfigProviderSharedPtr findOrCreateGenericSecretProvider(\n      const envoy::config::core::v3::ConfigSource& config_source, const std::string& config_name,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context) override;\n\nprivate:\n  ProtobufTypes::MessagePtr dumpSecretConfigs();\n\n  template <class SecretType>\n  class DynamicSecretProviders : public Logger::Loggable<Logger::Id::secret> {\n  public:\n    // Finds or creates SdsApi object.\n    std::shared_ptr<SecretType>\n    findOrCreate(const envoy::config::core::v3::ConfigSource& sds_config_source,\n                 const std::string& config_name,\n                 Server::Configuration::TransportSocketFactoryContext& secret_provider_context) {\n      const std::string map_key =\n          absl::StrCat(MessageUtil::hash(sds_config_source), \".\", config_name);\n\n      std::shared_ptr<SecretType> secret_provider = dynamic_secret_providers_[map_key].lock();\n      if (!secret_provider) {\n        // SdsApi is owned by ListenerImpl and ClusterInfo which are destroyed before\n        // SecretManagerImpl. It is safe to invoke this callback at the destructor of SdsApi.\n        std::function<void()> unregister_secret_provider = [map_key, this]() {\n          removeDynamicSecretProvider(map_key);\n        };\n        secret_provider = SecretType::create(secret_provider_context, sds_config_source,\n                                             config_name, unregister_secret_provider);\n        dynamic_secret_providers_[map_key] = secret_provider;\n      }\n      return secret_provider;\n    }\n\n    std::vector<std::shared_ptr<SecretType>> allSecretProviders() {\n      std::vector<std::shared_ptr<SecretType>> providers;\n      for (const auto& secret_entry : dynamic_secret_providers_) {\n        std::shared_ptr<SecretType> secret_provider = secret_entry.second.lock();\n        if (secret_provider) {\n          providers.push_back(std::move(secret_provider));\n        }\n      }\n      return providers;\n    }\n\n  private:\n    // Removes dynamic secret provider which has been deleted.\n    void removeDynamicSecretProvider(const std::string& map_key) {\n      ENVOY_LOG(debug, \"Unregister secret provider. hash key: {}\", map_key);\n\n      auto num_deleted = dynamic_secret_providers_.erase(map_key);\n      ASSERT(num_deleted == 1, \"\");\n    }\n\n    absl::node_hash_map<std::string, std::weak_ptr<SecretType>> dynamic_secret_providers_;\n  };\n\n  // Manages pairs of secret name and TlsCertificateConfigProviderSharedPtr.\n  absl::node_hash_map<std::string, TlsCertificateConfigProviderSharedPtr>\n      static_tls_certificate_providers_;\n\n  // Manages pairs of secret name and CertificateValidationContextConfigProviderSharedPtr.\n  absl::node_hash_map<std::string, CertificateValidationContextConfigProviderSharedPtr>\n      static_certificate_validation_context_providers_;\n\n  absl::node_hash_map<std::string, TlsSessionTicketKeysConfigProviderSharedPtr>\n      static_session_ticket_keys_providers_;\n\n  // Manages pairs of secret name and GenericSecretConfigProviderSharedPtr.\n  absl::node_hash_map<std::string, GenericSecretConfigProviderSharedPtr>\n      static_generic_secret_providers_;\n\n  // map hash code of SDS config source and SdsApi object.\n  DynamicSecretProviders<TlsCertificateSdsApi> certificate_providers_;\n  DynamicSecretProviders<CertificateValidationContextSdsApi> validation_context_providers_;\n  DynamicSecretProviders<TlsSessionTicketKeysSdsApi> session_ticket_keys_providers_;\n  DynamicSecretProviders<GenericSecretSdsApi> generic_secret_providers_;\n\n  Server::ConfigTracker::EntryOwnerPtr config_tracker_entry_;\n};\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/secret/secret_provider_impl.cc",
    "content": "#include \"common/secret/secret_provider_impl.h\"\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/ssl/certificate_validation_context_config_impl.h\"\n#include \"common/ssl/tls_certificate_config_impl.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\nTlsCertificateConfigProviderImpl::TlsCertificateConfigProviderImpl(\n    const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& tls_certificate)\n    : tls_certificate_(\n          std::make_unique<envoy::extensions::transport_sockets::tls::v3::TlsCertificate>(\n              tls_certificate)) {}\n\nCertificateValidationContextConfigProviderImpl::CertificateValidationContextConfigProviderImpl(\n    const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n        certificate_validation_context)\n    : certificate_validation_context_(\n          std::make_unique<\n              envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext>(\n              certificate_validation_context)) {}\n\nTlsSessionTicketKeysConfigProviderImpl::TlsSessionTicketKeysConfigProviderImpl(\n    const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&\n        tls_session_ticket_keys)\n    : tls_session_ticket_keys_(\n          std::make_unique<envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys>(\n              tls_session_ticket_keys)) {}\n\nGenericSecretConfigProviderImpl::GenericSecretConfigProviderImpl(\n    const envoy::extensions::transport_sockets::tls::v3::GenericSecret& generic_secret)\n    : generic_secret_(\n          std::make_unique<envoy::extensions::transport_sockets::tls::v3::GenericSecret>(\n              generic_secret)) {}\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/secret/secret_provider_impl.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/secret/secret_provider.h\"\n#include \"envoy/ssl/certificate_validation_context_config.h\"\n#include \"envoy/ssl/tls_certificate_config.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\nclass TlsCertificateConfigProviderImpl : public TlsCertificateConfigProvider {\npublic:\n  TlsCertificateConfigProviderImpl(\n      const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& tls_certificate);\n\n  const envoy::extensions::transport_sockets::tls::v3::TlsCertificate* secret() const override {\n    return tls_certificate_.get();\n  }\n\n  Common::CallbackHandle* addValidationCallback(\n      std::function<void(const envoy::extensions::transport_sockets::tls::v3::TlsCertificate&)>)\n      override {\n    return nullptr;\n  }\n\n  Common::CallbackHandle* addUpdateCallback(std::function<void()>) override { return nullptr; }\n\nprivate:\n  Secret::TlsCertificatePtr tls_certificate_;\n};\n\nclass CertificateValidationContextConfigProviderImpl\n    : public CertificateValidationContextConfigProvider {\npublic:\n  CertificateValidationContextConfigProviderImpl(\n      const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n          certificate_validation_context);\n\n  const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n  secret() const override {\n    return certificate_validation_context_.get();\n  }\n\n  Common::CallbackHandle* addValidationCallback(\n      std::function<\n          void(const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&)>)\n      override {\n    return nullptr;\n  }\n\n  Common::CallbackHandle* addUpdateCallback(std::function<void()>) override { return nullptr; }\n\nprivate:\n  Secret::CertificateValidationContextPtr certificate_validation_context_;\n};\n\nclass TlsSessionTicketKeysConfigProviderImpl : public TlsSessionTicketKeysConfigProvider {\npublic:\n  TlsSessionTicketKeysConfigProviderImpl(\n      const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&\n          tls_session_ticket_keys);\n\n  const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys*\n  secret() const override {\n    return tls_session_ticket_keys_.get();\n  }\n\n  Common::CallbackHandle* addValidationCallback(\n      std::function<void(\n          const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&)>) override {\n    return nullptr;\n  }\n\n  Common::CallbackHandle* addUpdateCallback(std::function<void()>) override { return nullptr; }\n\nprivate:\n  Secret::TlsSessionTicketKeysPtr tls_session_ticket_keys_;\n};\n\nclass GenericSecretConfigProviderImpl : public GenericSecretConfigProvider {\npublic:\n  GenericSecretConfigProviderImpl(\n      const envoy::extensions::transport_sockets::tls::v3::GenericSecret& generic_secret);\n\n  const envoy::extensions::transport_sockets::tls::v3::GenericSecret* secret() const override {\n    return generic_secret_.get();\n  }\n\n  Common::CallbackHandle* addValidationCallback(\n      std::function<void(const envoy::extensions::transport_sockets::tls::v3::GenericSecret&)>)\n      override {\n    return nullptr;\n  }\n\n  Common::CallbackHandle* addUpdateCallback(std::function<void()>) override { return nullptr; }\n\nprivate:\n  Secret::GenericSecretPtr generic_secret_;\n};\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/shared_pool/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"shared_pool_lib\",\n    hdrs = [\n        \"shared_pool.h\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:non_copyable\",\n        \"//source/common/common:thread_synchronizer_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/shared_pool/shared_pool.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n#include <thread>\n#include <type_traits>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/singleton/instance.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/non_copyable.h\"\n#include \"common/common/thread_synchronizer.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace SharedPool {\n\n/**\n * Used to share objects that have the same content.\n * control the life cycle of shared objects by reference counting\n *\n * Note:  ObjectSharedPool needs to be created in the main thread,\n * all the member methods can only be called in the main thread,\n * it does not have the ownership of object stored internally, the internal storage is weak_ptr,\n * when the internal storage object destructor executes the custom deleter to remove its own\n * weak_ptr from the ObjectSharedPool.\n *\n * There is also a need to ensure that the thread where ObjectSharedPool's destructor is also in the\n * main thread, or that ObjectSharedPool destruct before the program exit\n */\ntemplate <typename T, typename HashFunc = std::hash<T>,\n          class = typename std::enable_if<std::is_copy_constructible<T>::value>::type>\nclass ObjectSharedPool : public Singleton::Instance,\n                         public std::enable_shared_from_this<ObjectSharedPool<T, HashFunc>>,\n                         NonCopyable {\npublic:\n  ObjectSharedPool(Event::Dispatcher& dispatcher)\n      : thread_id_(std::this_thread::get_id()), dispatcher_(dispatcher) {}\n\n  void deleteObject(const size_t hash_key) {\n    if (std::this_thread::get_id() == thread_id_) {\n      // There may be new inserts with the same hash value before deleting the old element,\n      // so there is no need to delete it at this time.\n      if (object_pool_.find(hash_key) != object_pool_.end() &&\n          object_pool_[hash_key].use_count() == 0) {\n        object_pool_.erase(hash_key);\n      }\n    } else {\n      // Most of the time, the object's destructor occurs in the main thread, but with some\n      // exceptions, it is destructed in the worker thread. In order to keep the object_pool_ thread\n      // safe, the deleteObject needs to be delivered to the main thread.\n      auto this_shared_ptr = this->shared_from_this();\n      // Used for testing to simulate some race condition scenarios\n      sync_.syncPoint(DeleteObjectOnMainThread);\n      dispatcher_.post([hash_key, this_shared_ptr] { this_shared_ptr->deleteObject(hash_key); });\n    }\n  }\n\n  std::shared_ptr<T> getObject(const T& obj) {\n    ASSERT(std::this_thread::get_id() == thread_id_);\n    auto hashed_value = HashFunc{}(obj);\n    auto object_it = object_pool_.find(hashed_value);\n    if (object_it != object_pool_.end()) {\n      auto lock_object = object_it->second.lock();\n      if (lock_object) {\n        return lock_object;\n      }\n    }\n\n    auto this_shared_ptr = this->shared_from_this();\n    std::shared_ptr<T> obj_shared(new T(obj), [hashed_value, this_shared_ptr](T* ptr) {\n      this_shared_ptr->sync().syncPoint(ObjectSharedPool<T>::ObjectDeleterEntry);\n      // release ptr as early as possible to avoid exposure of ptr, resulting in undefined behavior.\n      delete ptr;\n      this_shared_ptr->deleteObject(hashed_value);\n    });\n\n    // When inserted, it is possible that the old elements still exist before they can be deleted,\n    // and the insertion will fail and therefore need to be overwritten.\n    auto ret = object_pool_.try_emplace(hashed_value, obj_shared);\n    if (!ret.second) {\n      ASSERT(ret.first->second.use_count() == 0);\n      ret.first->second = obj_shared;\n    }\n    return obj_shared;\n  }\n\n  std::size_t poolSize() const {\n    ASSERT(std::this_thread::get_id() == thread_id_);\n    return object_pool_.size();\n  }\n\n  /**\n   * @return a thread synchronizer object used for reproducing a race-condition in tests.\n   */\n  Thread::ThreadSynchronizer& sync() { return sync_; }\n  static const char DeleteObjectOnMainThread[];\n  static const char ObjectDeleterEntry[];\n\nprivate:\n  const std::thread::id thread_id_;\n  absl::flat_hash_map<size_t, std::weak_ptr<T>> object_pool_;\n  Event::Dispatcher& dispatcher_;\n  Thread::ThreadSynchronizer sync_;\n};\n\ntemplate <typename T, typename HashFunc, class V>\nconst char ObjectSharedPool<T, HashFunc, V>::DeleteObjectOnMainThread[] = \"delete-object-on-main\";\n\ntemplate <typename T, typename HashFunc, class V>\nconst char ObjectSharedPool<T, HashFunc, V>::ObjectDeleterEntry[] = \"deleter-entry\";\n\n} // namespace SharedPool\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/signal/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"fatal_error_handler_lib\",\n    srcs = [\"fatal_error_handler.cc\"],\n    hdrs = [\"fatal_error_handler.h\"],\n    deps = [\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"sigaction_lib\",\n    srcs = [\"signal_action.cc\"],\n    hdrs = [\"signal_action.h\"],\n    tags = [\"backtrace\"],\n    deps = [\n        \":fatal_error_handler_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:non_copyable\",\n        \"//source/common/singleton:threadsafe_singleton\",\n        \"//source/server:backtrace_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/signal/fatal_error_handler.cc",
    "content": "#include \"common/signal/fatal_error_handler.h\"\n\n#include <list>\n\n#include \"common/common/macros.h\"\n\n#include \"absl/base/attributes.h\"\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace FatalErrorHandler {\n\nnamespace {\n\nABSL_CONST_INIT static absl::Mutex failure_mutex(absl::kConstInit);\n// Since we can't grab the failure mutex on fatal error (snagging locks under\n// fatal crash causing potential deadlocks) access the handler list as an atomic\n// operation, which is async-signal-safe. If the crash handler runs at the same\n// time as another thread tries to modify the list, one of them will get the\n// list and the other will get nullptr instead. If the crash handler loses the\n// race and gets nullptr, it won't run any of the registered error handlers.\nusing FailureFunctionList = std::list<const FatalErrorHandlerInterface*>;\nABSL_CONST_INIT std::atomic<FailureFunctionList*> fatal_error_handlers{nullptr};\n\n} // namespace\n\nvoid registerFatalErrorHandler(const FatalErrorHandlerInterface& handler) {\n#ifdef ENVOY_OBJECT_TRACE_ON_DUMP\n  absl::MutexLock l(&failure_mutex);\n  FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed);\n  if (list == nullptr) {\n    list = new FailureFunctionList;\n  }\n  list->push_back(&handler);\n  fatal_error_handlers.store(list, std::memory_order_release);\n#else\n  UNREFERENCED_PARAMETER(handler);\n#endif\n}\n\nvoid removeFatalErrorHandler(const FatalErrorHandlerInterface& handler) {\n#ifdef ENVOY_OBJECT_TRACE_ON_DUMP\n  absl::MutexLock l(&failure_mutex);\n  FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed);\n  if (list == nullptr) {\n    // removeFatalErrorHandler() may see an empty list of fatal error handlers\n    // if it's called at the same time as callFatalErrorHandlers(). In that case\n    // Envoy is in the middle of crashing anyway, but don't add a segfault on\n    // top of the crash.\n    return;\n  }\n  list->remove(&handler);\n  if (list->empty()) {\n    delete list;\n  } else {\n    fatal_error_handlers.store(list, std::memory_order_release);\n  }\n#else\n  UNREFERENCED_PARAMETER(handler);\n#endif\n}\n\nvoid callFatalErrorHandlers(std::ostream& os) {\n  FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed);\n  if (list != nullptr) {\n    for (const auto* handler : *list) {\n      handler->onFatalError(os);\n    }\n    delete list;\n  }\n}\n\n} // namespace FatalErrorHandler\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/signal/fatal_error_handler.h",
    "content": "#pragma once\n\n#include <ostream>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\n\n// A simple class which allows registering functions to be called when Envoy\n// receives one of the fatal signals, documented in signal_action.h.\nclass FatalErrorHandlerInterface {\npublic:\n  virtual ~FatalErrorHandlerInterface() = default;\n  // Called when Envoy receives a fatal signal. Must be async-signal-safe: in\n  // particular, it can't allocate memory.\n  virtual void onFatalError(std::ostream& os) const PURE;\n};\n\nnamespace FatalErrorHandler {\n/**\n * Add this handler to the list of functions which will be called if Envoy\n * receives a fatal signal.\n */\nvoid registerFatalErrorHandler(const FatalErrorHandlerInterface& handler);\n\n/**\n * Removes this handler from the list of functions which will be called if Envoy\n * receives a fatal signal.\n */\nvoid removeFatalErrorHandler(const FatalErrorHandlerInterface& handler);\n\n/**\n * Calls and unregisters the fatal error handlers registered with\n * registerFatalErrorHandler. This is async-signal-safe and intended to be\n * called from a fatal signal handler.\n */\nvoid callFatalErrorHandlers(std::ostream& os);\n} // namespace FatalErrorHandler\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/signal/signal_action.cc",
    "content": "#include \"common/signal/signal_action.h\"\n\n#include <sys/mman.h>\n\n#include <csignal>\n\n#include \"common/common/assert.h\"\n#include \"common/version/version.h\"\n\nnamespace Envoy {\n\nconstexpr int SignalAction::FATAL_SIGS[];\n\nvoid SignalAction::sigHandler(int sig, siginfo_t* info, void* context) {\n  BackwardsTrace tracer;\n\n  tracer.logFault(strsignal(sig), info->si_addr);\n  if (context != nullptr) {\n    tracer.captureFrom(context);\n  } else {\n    tracer.capture();\n  }\n  tracer.logTrace();\n\n  // Finally after logging the stack trace, call any registered crash handlers.\n  FatalErrorHandler::callFatalErrorHandlers(std::cerr);\n\n  signal(sig, SIG_DFL);\n  raise(sig);\n}\n\nvoid SignalAction::installSigHandlers() {\n  stack_t stack;\n  stack.ss_sp = altstack_ + guard_size_; // Guard page at one end ...\n  stack.ss_size = altstack_size_;        // ... guard page at the other\n  stack.ss_flags = 0;\n\n  RELEASE_ASSERT(sigaltstack(&stack, &previous_altstack_) == 0, \"\");\n\n  // Make sure VersionInfo::version() is initialized so we don't allocate std::string in signal\n  // handlers.\n  RELEASE_ASSERT(!VersionInfo::version().empty(), \"\");\n\n  int hidx = 0;\n  for (const auto& sig : FATAL_SIGS) {\n    struct sigaction saction;\n    std::memset(&saction, 0, sizeof(saction));\n    sigemptyset(&saction.sa_mask);\n    saction.sa_flags = (SA_SIGINFO | SA_ONSTACK | SA_RESETHAND | SA_NODEFER);\n    saction.sa_sigaction = sigHandler;\n    auto* handler = &previous_handlers_[hidx++];\n    RELEASE_ASSERT(sigaction(sig, &saction, handler) == 0, \"\");\n  }\n}\n\nvoid SignalAction::removeSigHandlers() {\n#if defined(__APPLE__)\n  // ss_flags contains SS_DISABLE, but Darwin still checks the size, contrary to the man page\n  if (previous_altstack_.ss_size < MINSIGSTKSZ) {\n    previous_altstack_.ss_size = MINSIGSTKSZ;\n  }\n#endif\n  RELEASE_ASSERT(sigaltstack(&previous_altstack_, nullptr) == 0, \"\");\n\n  int hidx = 0;\n  for (const auto& sig : FATAL_SIGS) {\n    auto* handler = &previous_handlers_[hidx++];\n    RELEASE_ASSERT(sigaction(sig, handler, nullptr) == 0, \"\");\n  }\n}\n\n#if defined(__APPLE__) && !defined(MAP_STACK)\n#define MAP_STACK (0)\n#endif\n\nvoid SignalAction::mapAndProtectStackMemory() {\n  // Per docs MAP_STACK doesn't actually do anything today but provides a\n  // library hint that might be used in the future.\n  altstack_ = static_cast<char*>(mmap(nullptr, mapSizeWithGuards(), PROT_READ | PROT_WRITE,\n                                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0));\n  RELEASE_ASSERT(altstack_, \"\");\n  RELEASE_ASSERT(mprotect(altstack_, guard_size_, PROT_NONE) == 0, \"\");\n  RELEASE_ASSERT(mprotect(altstack_ + guard_size_ + altstack_size_, guard_size_, PROT_NONE) == 0,\n                 \"\");\n}\n\nvoid SignalAction::unmapStackMemory() { munmap(altstack_, mapSizeWithGuards()); }\n\nvoid SignalAction::doGoodAccessForTest() {\n  volatile char* altaltstack = altstack_;\n  for (size_t i = 0; i < altstack_size_; ++i) {\n    *(altaltstack + guard_size_ + i) = 42;\n  }\n  for (size_t i = 0; i < altstack_size_; ++i) {\n    ASSERT(*(altaltstack + guard_size_ + i) == 42);\n  }\n}\n\nvoid SignalAction::tryEvilAccessForTest(bool end) {\n  volatile char* altaltstack = altstack_;\n  if (end) {\n    // One byte past the valid region\n    // http://oeis.org/A001969\n    *(altaltstack + guard_size_ + altstack_size_) = 43;\n  } else {\n    // One byte before the valid region\n    *(altaltstack + guard_size_ - 1) = 43;\n  }\n}\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/signal/signal_action.h",
    "content": "#pragma once\n\n#include <unistd.h>\n\n#include <algorithm>\n#include <csignal>\n#include <list>\n\n#include \"common/common/non_copyable.h\"\n#include \"common/signal/fatal_error_handler.h\"\n\n#include \"server/backtrace.h\"\n\nnamespace Envoy {\n\n/**\n * This class installs signal handlers for fatal signal types.\n *\n * These signals are handled:\n *   SIGABRT\n *   SIGBUS\n *   SIGFPE\n *   SIGILL\n *   SIGSEGV\n *\n * Upon intercepting the signal the following actions are taken:\n *\n *   A Backtrace is printed from the address the signal was encountered at, if\n *   it is possible to retrieve.\n *\n *   The signal handler is reset to the default handler (which is expected to\n *   terminate the process).\n *\n *   The signal is raised again (which ultimately kills the process)\n *\n * The signal handler must run on an alternative stack so that we can do the\n * stack unwind on the original stack. Memory is allocated for this purpose when\n * this object is constructed. When this object goes out of scope the memory\n * used for the alternate signal stack is destroyed and the previous signal handler\n * and alt stack if previously used are restored.\n *\n * Note that we do NOT restore the previously saved sigaction and alt stack in\n * the signal handler itself. This is fraught with complexity and has little\n * benefit. The inner most scope SignalAction will terminate the process by\n * re-raising the fatal signal with default handler.\n *\n * It is recommended that this object be instantiated at the highest possible\n * scope, e.g., in main(). This enables fatal signal handling for almost all code\n * executed. Because of the save-and-restore behavior it is possible for\n * SignalAction to be used at both wider and tighter scopes without issue.\n */\nclass SignalAction : NonCopyable {\npublic:\n  SignalAction()\n      : guard_size_(sysconf(_SC_PAGE_SIZE)),\n        altstack_size_(std::max(guard_size_ * 4, static_cast<size_t>(MINSIGSTKSZ))) {\n    mapAndProtectStackMemory();\n    installSigHandlers();\n  }\n  ~SignalAction() {\n    removeSigHandlers();\n    unmapStackMemory();\n  }\n  /**\n   * Helpers for testing guarded stack memory\n   */\n  void doGoodAccessForTest();\n  void tryEvilAccessForTest(bool end);\n  /**\n   * The actual signal handler function with prototype matching signal.h\n   *\n   * Public so that we can exercise it directly from a test.\n   */\n  static void sigHandler(int sig, siginfo_t* info, void* context);\n\nprivate:\n  /**\n   * Allocate this many bytes on each side of the area used for alt stack.\n   *\n   * Set to system page size.\n   *\n   * The memory will be protected from read and write.\n   */\n  const size_t guard_size_;\n  /**\n   * Use this many bytes for the alternate signal handling stack.\n   *\n   * Initialized as a multiple of page size (although signalstack will\n   * do alignment as needed).\n   *\n   * Additionally, two guard pages will be allocated to bookend the usable area.\n   */\n  const size_t altstack_size_;\n  /**\n   * This constant array defines the signals we will insert handlers for.\n   *\n   * Essentially this is the list of signals that would cause a core dump.\n   * The object will contain an array of struct sigactions with the same number\n   * of elements that are in this array.\n   */\n  static constexpr int FATAL_SIGS[] = {SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV};\n  /**\n   * Return the memory size we actually map including two guard pages.\n   */\n  size_t mapSizeWithGuards() const { return altstack_size_ + guard_size_ * 2; }\n  /**\n   * Install all signal handlers and setup signal handling stack.\n   */\n  void installSigHandlers();\n  /**\n   * Remove all signal handlers.\n   *\n   * Must be executed before the alt stack memory goes away.\n   *\n   * Signal handlers will be reset to the default, NOT back to any signal\n   * handler existing before InstallSigHandlers().\n   */\n  void removeSigHandlers();\n  /**\n   * Use mmap to map anonymous memory for the alternative stack.\n   *\n   * GUARD_SIZE on either end of the memory will be marked PROT_NONE, protected\n   * from all access.\n   */\n  void mapAndProtectStackMemory();\n  /**\n   * Unmap alternative stack memory.\n   */\n  void unmapStackMemory();\n  char* altstack_{};\n  std::array<struct sigaction, sizeof(FATAL_SIGS) / sizeof(int)> previous_handlers_;\n  stack_t previous_altstack_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/singleton/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"const_singleton\",\n    hdrs = [\"const_singleton.h\"],\n)\n\nenvoy_cc_library(\n    name = \"manager_impl_lib\",\n    srcs = [\"manager_impl.cc\"],\n    hdrs = [\"manager_impl.h\"],\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:non_copyable\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"threadsafe_singleton\",\n    hdrs = [\"threadsafe_singleton.h\"],\n    external_deps = [\"abseil_base\"],\n    deps = [\"//source/common/common:assert_lib\"],\n)\n"
  },
  {
    "path": "source/common/singleton/const_singleton.h",
    "content": "#pragma once\n\nnamespace Envoy {\n\n/**\n * ConstSingleton allows easy global cross-thread access to a const object.\n *\n * This singleton should be used for data which is initialized once at\n * start-up and then be treated as immutable const data thereafter.\n */\ntemplate <class T> class ConstSingleton {\npublic:\n  /**\n   * Obtain an instance of the singleton for class T.\n   * @return const T& a reference to the singleton for class T.\n   */\n  static const T& get() {\n    static T* instance = new T();\n    return *instance;\n  }\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/singleton/manager_impl.cc",
    "content": "#include \"common/singleton/manager_impl.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n\nnamespace Envoy {\nnamespace Singleton {\n\nInstanceSharedPtr ManagerImpl::get(const std::string& name, SingletonFactoryCb cb) {\n  ASSERT(run_tid_ == thread_factory_.currentThreadId());\n\n  if (nullptr == Registry::FactoryRegistry<Registration>::getFactory(name)) {\n    PANIC(fmt::format(\"invalid singleton name '{}'. Make sure it is registered.\", name));\n  }\n\n  if (nullptr == singletons_[name].lock()) {\n    InstanceSharedPtr singleton = cb();\n    singletons_[name] = singleton;\n    return singleton;\n  } else {\n    return singletons_[name].lock();\n  }\n}\n\n} // namespace Singleton\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/singleton/manager_impl.h",
    "content": "#pragma once\n\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/thread/thread.h\"\n\n#include \"common/common/non_copyable.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Singleton {\n\n/**\n * Implementation of the singleton manager that checks the registry for name validity. It is\n * assumed the singleton manager is only used on the main thread so it is not thread safe. Asserts\n * verify that.\n */\nclass ManagerImpl : public Manager, NonCopyable {\npublic:\n  explicit ManagerImpl(Thread::ThreadFactory& thread_factory)\n      : thread_factory_(thread_factory), run_tid_(thread_factory.currentThreadId()) {}\n\n  // Singleton::Manager\n  InstanceSharedPtr get(const std::string& name, SingletonFactoryCb cb) override;\n\nprivate:\n  absl::node_hash_map<std::string, std::weak_ptr<Instance>> singletons_;\n  Thread::ThreadFactory& thread_factory_;\n  const Thread::ThreadId run_tid_;\n};\n\n} // namespace Singleton\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/singleton/threadsafe_singleton.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"common/common/assert.h\"\n\n#include \"absl/base/call_once.h\"\n\nnamespace Envoy {\n\n/**\n * ThreadSafeSingleton allows easy global cross-thread access to a non-const object.\n *\n * This singleton class should be used for singletons which must be globally\n * accessible but can not be marked as const. All functions in the singleton class\n * *must* be threadsafe.\n *\n * Note that there is heavy resistance in Envoy to adding this type of singleton\n * if data will persist with state changes across tests, as it becomes difficult\n * to write clean unit tests if a state change in one test will persist into\n * another test. Be wary of using it. A example of acceptable usage is OsSyscallsImpl,\n * where the functions are not strictly speaking const, but affect the OS rather than the\n * class itself. An example of unacceptable usage upstream would be for\n * globally accessible stat counters, it would have the aforementioned problem\n * where state \"leaks\" across tests.\n *\n * */\ntemplate <class T> class TestThreadsafeSingletonInjector;\ntemplate <class T> class ThreadSafeSingleton {\npublic:\n  static T& get() {\n    absl::call_once(ThreadSafeSingleton<T>::create_once_, &ThreadSafeSingleton<T>::Create);\n    return *ThreadSafeSingleton<T>::instance_;\n  }\n\nprotected:\n  template <typename A> friend class TestThreadsafeSingletonInjector;\n\n  static void Create() { instance_ = new T(); }\n\n  static absl::once_flag create_once_;\n  static T* instance_;\n};\n\ntemplate <class T> absl::once_flag ThreadSafeSingleton<T>::create_once_;\n\ntemplate <class T> T* ThreadSafeSingleton<T>::instance_ = nullptr;\n\n// An instance of a singleton class which has the same thread safety properties\n// as ThreadSafeSingleton, but must be created via initialize prior to access.\n//\n// As with ThreadSafeSingleton the use of this class is generally discouraged.\ntemplate <class T> class InjectableSingleton {\npublic:\n  static T& get() {\n    RELEASE_ASSERT(loader_ != nullptr, \"InjectableSingleton used prior to initialization\");\n    return *loader_;\n  }\n\n  static T* getExisting() { return loader_; }\n\n  static void initialize(T* value) {\n    RELEASE_ASSERT(value != nullptr, \"InjectableSingleton initialized with non-null value.\");\n    RELEASE_ASSERT(loader_ == nullptr, \"InjectableSingleton initialized multiple times.\");\n    loader_ = value;\n  }\n  static void clear() { loader_ = nullptr; }\n\nprotected:\n  static T* loader_;\n};\n\ntemplate <class T> T* InjectableSingleton<T>::loader_ = nullptr;\n\ntemplate <class T> class ScopedInjectableLoader {\npublic:\n  ScopedInjectableLoader(std::unique_ptr<T>&& instance) {\n    instance_ = std::move(instance);\n    InjectableSingleton<T>::initialize(instance_.get());\n  }\n  ~ScopedInjectableLoader() { InjectableSingleton<T>::clear(); }\n\nprivate:\n  std::unique_ptr<T> instance_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/ssl/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"tls_certificate_config_impl_lib\",\n    srcs = [\"tls_certificate_config_impl.cc\"],\n    hdrs = [\"tls_certificate_config_impl.h\"],\n    deps = [\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl:tls_certificate_config_interface\",\n        \"//include/envoy/ssl/private_key:private_key_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:datasource_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"certificate_validation_context_config_impl_lib\",\n    srcs = [\"certificate_validation_context_config_impl.cc\"],\n    hdrs = [\"certificate_validation_context_config_impl.h\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/ssl:certificate_validation_context_config_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:datasource_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/ssl/certificate_validation_context_config_impl.cc",
    "content": "#include \"common/ssl/certificate_validation_context_config_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/datasource.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nstatic const std::string INLINE_STRING = \"<inline>\";\n\nCertificateValidationContextConfigImpl::CertificateValidationContextConfigImpl(\n    const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext& config,\n    Api::Api& api)\n    : ca_cert_(Config::DataSource::read(config.trusted_ca(), true, api)),\n      ca_cert_path_(Config::DataSource::getPath(config.trusted_ca())\n                        .value_or(ca_cert_.empty() ? EMPTY_STRING : INLINE_STRING)),\n      certificate_revocation_list_(Config::DataSource::read(config.crl(), true, api)),\n      certificate_revocation_list_path_(\n          Config::DataSource::getPath(config.crl())\n              .value_or(certificate_revocation_list_.empty() ? EMPTY_STRING : INLINE_STRING)),\n      verify_subject_alt_name_list_(\n          config.hidden_envoy_deprecated_verify_subject_alt_name().begin(),\n          config.hidden_envoy_deprecated_verify_subject_alt_name().end()),\n      subject_alt_name_matchers_(config.match_subject_alt_names().begin(),\n                                 config.match_subject_alt_names().end()),\n      verify_certificate_hash_list_(config.verify_certificate_hash().begin(),\n                                    config.verify_certificate_hash().end()),\n      verify_certificate_spki_list_(config.verify_certificate_spki().begin(),\n                                    config.verify_certificate_spki().end()),\n      allow_expired_certificate_(config.allow_expired_certificate()),\n      trust_chain_verification_(config.trust_chain_verification()) {\n  if (ca_cert_.empty()) {\n    if (!certificate_revocation_list_.empty()) {\n      throw EnvoyException(fmt::format(\"Failed to load CRL from {} without trusted CA\",\n                                       certificateRevocationListPath()));\n    }\n    if (!subject_alt_name_matchers_.empty() || !verify_subject_alt_name_list_.empty()) {\n      throw EnvoyException(\"SAN-based verification of peer certificates without \"\n                           \"trusted CA is insecure and not allowed\");\n    }\n    if (allow_expired_certificate_) {\n      throw EnvoyException(\"Certificate validity period is always ignored without trusted CA\");\n    }\n  }\n}\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/ssl/certificate_validation_context_config_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/ssl/certificate_validation_context_config.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass CertificateValidationContextConfigImpl : public CertificateValidationContextConfig {\npublic:\n  CertificateValidationContextConfigImpl(\n      const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext& config,\n      Api::Api& api);\n\n  const std::string& caCert() const override { return ca_cert_; }\n  const std::string& caCertPath() const override { return ca_cert_path_; }\n  const std::string& certificateRevocationList() const override {\n    return certificate_revocation_list_;\n  }\n  const std::string& certificateRevocationListPath() const final {\n    return certificate_revocation_list_path_;\n  }\n  const std::vector<std::string>& verifySubjectAltNameList() const override {\n    return verify_subject_alt_name_list_;\n  }\n  const std::vector<envoy::type::matcher::v3::StringMatcher>&\n  subjectAltNameMatchers() const override {\n    return subject_alt_name_matchers_;\n  }\n  const std::vector<std::string>& verifyCertificateHashList() const override {\n    return verify_certificate_hash_list_;\n  }\n  const std::vector<std::string>& verifyCertificateSpkiList() const override {\n    return verify_certificate_spki_list_;\n  }\n  bool allowExpiredCertificate() const override { return allow_expired_certificate_; }\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext::\n      TrustChainVerification\n      trustChainVerification() const override {\n    return trust_chain_verification_;\n  }\n\nprivate:\n  const std::string ca_cert_;\n  const std::string ca_cert_path_;\n  const std::string certificate_revocation_list_;\n  const std::string certificate_revocation_list_path_;\n  const std::vector<std::string> verify_subject_alt_name_list_;\n  const std::vector<envoy::type::matcher::v3::StringMatcher> subject_alt_name_matchers_;\n  const std::vector<std::string> verify_certificate_hash_list_;\n  const std::vector<std::string> verify_certificate_spki_list_;\n  const bool allow_expired_certificate_;\n  const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext::\n      TrustChainVerification trust_chain_verification_;\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/ssl/tls_certificate_config_impl.cc",
    "content": "#include \"common/ssl/tls_certificate_config_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/datasource.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nnamespace {\nstd::vector<uint8_t> readOcspStaple(const envoy::config::core::v3::DataSource& source,\n                                    Api::Api& api) {\n  std::string staple = Config::DataSource::read(source, true, api);\n  if (source.specifier_case() ==\n      envoy::config::core::v3::DataSource::SpecifierCase::kInlineString) {\n    throw EnvoyException(\"OCSP staple cannot be provided via inline_string\");\n  }\n\n  return {staple.begin(), staple.end()};\n}\n} // namespace\n\nstatic const std::string INLINE_STRING = \"<inline>\";\n\nTlsCertificateConfigImpl::TlsCertificateConfigImpl(\n    const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& config,\n    Server::Configuration::TransportSocketFactoryContext* factory_context, Api::Api& api)\n    : certificate_chain_(Config::DataSource::read(config.certificate_chain(), true, api)),\n      certificate_chain_path_(\n          Config::DataSource::getPath(config.certificate_chain())\n              .value_or(certificate_chain_.empty() ? EMPTY_STRING : INLINE_STRING)),\n      private_key_(Config::DataSource::read(config.private_key(), true, api)),\n      private_key_path_(Config::DataSource::getPath(config.private_key())\n                            .value_or(private_key_.empty() ? EMPTY_STRING : INLINE_STRING)),\n      password_(Config::DataSource::read(config.password(), true, api)),\n      password_path_(Config::DataSource::getPath(config.password())\n                         .value_or(password_.empty() ? EMPTY_STRING : INLINE_STRING)),\n      ocsp_staple_(readOcspStaple(config.ocsp_staple(), api)),\n      ocsp_staple_path_(Config::DataSource::getPath(config.ocsp_staple())\n                            .value_or(ocsp_staple_.empty() ? EMPTY_STRING : INLINE_STRING)),\n      private_key_method_(\n          factory_context != nullptr && config.has_private_key_provider()\n              ? factory_context->sslContextManager()\n                    .privateKeyMethodManager()\n                    .createPrivateKeyMethodProvider(config.private_key_provider(), *factory_context)\n              : nullptr) {\n  if (config.has_private_key_provider() && config.has_private_key()) {\n    throw EnvoyException(fmt::format(\n        \"Certificate configuration can't have both private_key and private_key_provider\"));\n  }\n  if (certificate_chain_.empty() || (private_key_.empty() && private_key_method_ == nullptr)) {\n    throw EnvoyException(fmt::format(\"Failed to load incomplete certificate from {}, {}\",\n                                     certificate_chain_path_, private_key_path_));\n  }\n}\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/ssl/tls_certificate_config_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n#include \"envoy/ssl/tls_certificate_config.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass TlsCertificateConfigImpl : public TlsCertificateConfig {\npublic:\n  TlsCertificateConfigImpl(\n      const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& config,\n      Server::Configuration::TransportSocketFactoryContext* factory_context, Api::Api& api);\n\n  const std::string& certificateChain() const override { return certificate_chain_; }\n  const std::string& certificateChainPath() const override { return certificate_chain_path_; }\n  const std::string& privateKey() const override { return private_key_; }\n  const std::string& privateKeyPath() const override { return private_key_path_; }\n  const std::string& password() const override { return password_; }\n  const std::string& passwordPath() const override { return password_path_; }\n  const std::vector<uint8_t>& ocspStaple() const override { return ocsp_staple_; }\n  const std::string& ocspStaplePath() const override { return ocsp_staple_path_; }\n  Envoy::Ssl::PrivateKeyMethodProviderSharedPtr privateKeyMethod() const override {\n    return private_key_method_;\n  }\n\nprivate:\n  const std::string certificate_chain_;\n  const std::string certificate_chain_path_;\n  const std::string private_key_;\n  const std::string private_key_path_;\n  const std::string password_;\n  const std::string password_path_;\n  const std::vector<uint8_t> ocsp_staple_;\n  const std::string ocsp_staple_path_;\n  Envoy::Ssl::PrivateKeyMethodProviderSharedPtr private_key_method_{};\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"allocator_lib\",\n    srcs = [\"allocator_impl.cc\"],\n    hdrs = [\"allocator_impl.h\"],\n    deps = [\n        \":metric_impl_lib\",\n        \":stat_merger_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:thread_annotations\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:thread_synchronizer_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"histogram_lib\",\n    srcs = [\"histogram_impl.cc\"],\n    hdrs = [\"histogram_impl.h\"],\n    external_deps = [\n        \"libcircllhist\",\n    ],\n    deps = [\n        \":metric_impl_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/common:utility_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"isolated_store_lib\",\n    srcs = [\"isolated_store_impl.cc\"],\n    hdrs = [\"isolated_store_impl.h\"],\n    deps = [\n        \":histogram_lib\",\n        \":null_counter_lib\",\n        \":null_gauge_lib\",\n        \":null_text_readout_lib\",\n        \":scope_prefixer_lib\",\n        \":stats_lib\",\n        \":store_impl_lib\",\n        \":tag_utility_lib\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/stats:allocator_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metric_impl_lib\",\n    srcs = [\"metric_impl.cc\"],\n    hdrs = [\"metric_impl.h\"],\n    deps = [\n        \":symbol_table_lib\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tag_utility_lib\",\n    srcs = [\"tag_utility.cc\"],\n    hdrs = [\"tag_utility.h\"],\n    deps = [\n        \":symbol_table_lib\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:symbol_table_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"null_counter_lib\",\n    hdrs = [\"null_counter.h\"],\n    deps = [\n        \":metric_impl_lib\",\n        \":symbol_table_lib\",\n        \"//include/envoy/stats:stats_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"null_gauge_lib\",\n    hdrs = [\"null_gauge.h\"],\n    deps = [\n        \":metric_impl_lib\",\n        \":symbol_table_lib\",\n        \"//include/envoy/stats:stats_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"null_text_readout_lib\",\n    hdrs = [\"null_text_readout.h\"],\n    deps = [\n        \":metric_impl_lib\",\n        \":symbol_table_lib\",\n        \"//include/envoy/stats:stats_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"recent_lookups_lib\",\n    srcs = [\"recent_lookups.cc\"],\n    hdrs = [\"recent_lookups.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"store_impl_lib\",\n    hdrs = [\"store_impl.h\"],\n    deps = [\n        \":symbol_table_lib\",\n        \"//include/envoy/stats:stats_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"scope_prefixer_lib\",\n    srcs = [\"scope_prefixer.cc\"],\n    hdrs = [\"scope_prefixer.h\"],\n    deps = [\n        \":symbol_table_lib\",\n        \":utility_lib\",\n        \"//include/envoy/stats:stats_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stat_merger_lib\",\n    srcs = [\"stat_merger.cc\"],\n    hdrs = [\"stat_merger.h\"],\n    deps = [\n        \":symbol_table_lib\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stats_lib\",\n    deps = [\n        \":histogram_lib\",\n        \":metric_impl_lib\",\n        \":symbol_table_lib\",\n        \":tag_extractor_lib\",\n        \":utility_lib\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/server:options_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/common:non_copyable\",\n        \"//source/common/common:perf_annotation_lib\",\n        \"//source/common/common:thread_annotations\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"symbol_table_lib\",\n    srcs = [\"symbol_table_impl.cc\"],\n    hdrs = [\"symbol_table_impl.h\"],\n    external_deps = [\"abseil_base\"],\n    deps = [\n        \":recent_lookups_lib\",\n        \"//include/envoy/stats:symbol_table_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:mem_block_builder_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tag_extractor_lib\",\n    srcs = [\"tag_extractor_impl.cc\"],\n    hdrs = [\"tag_extractor_impl.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:perf_annotation_lib\",\n        \"//source/common/common:regex_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tag_producer_lib\",\n    srcs = [\"tag_producer_impl.cc\"],\n    hdrs = [\"tag_producer_impl.h\"],\n    external_deps = [\"abseil_node_hash_set\"],\n    deps = [\n        \":tag_extractor_lib\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:perf_annotation_lib\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stats_matcher_lib\",\n    srcs = [\"stats_matcher_impl.cc\"],\n    hdrs = [\"stats_matcher_impl.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"thread_local_store_lib\",\n    srcs = [\"thread_local_store.cc\"],\n    hdrs = [\"thread_local_store.h\"],\n    deps = [\n        \":allocator_lib\",\n        \":histogram_lib\",\n        \":null_counter_lib\",\n        \":null_gauge_lib\",\n        \":null_text_readout_lib\",\n        \":scope_prefixer_lib\",\n        \":stats_lib\",\n        \":stats_matcher_lib\",\n        \":tag_producer_lib\",\n        \":tag_utility_lib\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"timespan_lib\",\n    srcs = [\"timespan_impl.cc\"],\n    hdrs = [\"timespan_impl.h\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    deps = [\":symbol_table_lib\"],\n)\n"
  },
  {
    "path": "source/common/stats/allocator_impl.cc",
    "content": "#include \"common/stats/allocator_impl.h\"\n\n#include <cstdint>\n\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/symbol_table.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/common/thread_annotations.h\"\n#include \"common/common/utility.h\"\n#include \"common/stats/metric_impl.h\"\n#include \"common/stats/stat_merger.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nconst char AllocatorImpl::DecrementToZeroSyncPoint[] = \"decrement-zero\";\n\nAllocatorImpl::~AllocatorImpl() {\n  ASSERT(counters_.empty());\n  ASSERT(gauges_.empty());\n}\n\n#ifndef ENVOY_CONFIG_COVERAGE\nvoid AllocatorImpl::debugPrint() {\n  Thread::LockGuard lock(mutex_);\n  for (Counter* counter : counters_) {\n    ENVOY_LOG_MISC(info, \"counter: {}\", symbolTable().toString(counter->statName()));\n  }\n  for (Gauge* gauge : gauges_) {\n    ENVOY_LOG_MISC(info, \"gauge: {}\", symbolTable().toString(gauge->statName()));\n  }\n}\n#endif\n\n// Counter, Gauge and TextReadout inherit from RefcountInterface and\n// Metric. MetricImpl takes care of most of the Metric API, but we need to cover\n// symbolTable() here, which we don't store directly, but get it via the alloc,\n// which we need in order to clean up the counter and gauge maps in that class\n// when they are destroyed.\n//\n// We implement the RefcountInterface API, using 16 bits that would otherwise be\n// wasted in the alignment padding next to flags_.\ntemplate <class BaseClass> class StatsSharedImpl : public MetricImpl<BaseClass> {\npublic:\n  StatsSharedImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,\n                  const StatNameTagVector& stat_name_tags)\n      : MetricImpl<BaseClass>(name, tag_extracted_name, stat_name_tags, alloc.symbolTable()),\n        alloc_(alloc) {}\n\n  ~StatsSharedImpl() override {\n    // MetricImpl must be explicitly cleared() before destruction, otherwise it\n    // will not be able to access the SymbolTable& to free the symbols. An RAII\n    // alternative would be to store the SymbolTable reference in the\n    // MetricImpl, costing 8 bytes per stat.\n    this->clear(symbolTable());\n  }\n\n  // Metric\n  SymbolTable& symbolTable() final { return alloc_.symbolTable(); }\n  bool used() const override { return flags_ & Metric::Flags::Used; }\n\n  // RefcountInterface\n  void incRefCount() override { ++ref_count_; }\n  bool decRefCount() override {\n    // We must, unfortunately, hold the allocator's lock when decrementing the\n    // refcount. Otherwise another thread may simultaneously try to allocate the\n    // same name'd stat after we decrement it, and we'll wind up with a\n    // dtor/update race. To avoid this we must hold the lock until the stat is\n    // removed from the map.\n    //\n    // It might be worth thinking about a race-free way to decrement ref-counts\n    // without a lock, for the case where ref_count > 2, and we don't need to\n    // destruct anything. But it seems preferable at to be conservative here,\n    // as stats will only go out of scope when a scope is destructed (during\n    // xDS) or during admin stats operations.\n    Thread::LockGuard lock(alloc_.mutex_);\n    ASSERT(ref_count_ >= 1);\n    if (--ref_count_ == 0) {\n      alloc_.sync().syncPoint(AllocatorImpl::DecrementToZeroSyncPoint);\n      removeFromSetLockHeld();\n      return true;\n    }\n    return false;\n  }\n  uint32_t use_count() const override { return ref_count_; }\n\n  /**\n   * We must atomically remove the counter/gauges from the allocator's sets when\n   * our ref-count decrement hits zero. The counters and gauges are held in\n   * distinct sets so we virtualize this removal helper.\n   */\n  virtual void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) PURE;\n\nprotected:\n  AllocatorImpl& alloc_;\n\n  // ref_count_ can be incremented as an atomic, without taking a new lock, as\n  // the critical 0->1 transition occurs in makeCounter and makeGauge, which\n  // already hold the lock. Increment also occurs when copying shared pointers,\n  // but these are always in transition to ref-count 2 or higher, and thus\n  // cannot race with a decrement to zero.\n  //\n  // However, we must hold alloc_.mutex_ when decrementing ref_count_ so that\n  // when it hits zero we can atomically remove it from alloc_.counters_ or\n  // alloc_.gauges_. We leave it atomic to avoid taking the lock on increment.\n  std::atomic<uint32_t> ref_count_{0};\n\n  std::atomic<uint16_t> flags_{0};\n};\n\nclass CounterImpl : public StatsSharedImpl<Counter> {\npublic:\n  CounterImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,\n              const StatNameTagVector& stat_name_tags)\n      : StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {}\n\n  void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override {\n    const size_t count = alloc_.counters_.erase(statName());\n    ASSERT(count == 1);\n  }\n\n  // Stats::Counter\n  void add(uint64_t amount) override {\n    // Note that a reader may see a new value but an old pending_increment_ or\n    // used(). From a system perspective this should be eventually consistent.\n    value_ += amount;\n    pending_increment_ += amount;\n    flags_ |= Flags::Used;\n  }\n  void inc() override { add(1); }\n  uint64_t latch() override { return pending_increment_.exchange(0); }\n  void reset() override { value_ = 0; }\n  uint64_t value() const override { return value_; }\n\nprivate:\n  std::atomic<uint64_t> value_{0};\n  std::atomic<uint64_t> pending_increment_{0};\n};\n\nclass GaugeImpl : public StatsSharedImpl<Gauge> {\npublic:\n  GaugeImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,\n            const StatNameTagVector& stat_name_tags, ImportMode import_mode)\n      : StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {\n    switch (import_mode) {\n    case ImportMode::Accumulate:\n      flags_ |= Flags::LogicAccumulate;\n      break;\n    case ImportMode::NeverImport:\n      flags_ |= Flags::NeverImport;\n      break;\n    case ImportMode::Uninitialized:\n      // Note that we don't clear any flag bits for import_mode==Uninitialized,\n      // as we may have an established import_mode when this stat was created in\n      // an alternate scope. See\n      // https://github.com/envoyproxy/envoy/issues/7227.\n      break;\n    }\n  }\n\n  void removeFromSetLockHeld() override ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) {\n    const size_t count = alloc_.gauges_.erase(statName());\n    ASSERT(count == 1);\n  }\n\n  // Stats::Gauge\n  void add(uint64_t amount) override {\n    child_value_ += amount;\n    flags_ |= Flags::Used;\n  }\n  void dec() override { sub(1); }\n  void inc() override { add(1); }\n  void set(uint64_t value) override {\n    child_value_ = value;\n    flags_ |= Flags::Used;\n  }\n  void sub(uint64_t amount) override {\n    ASSERT(child_value_ >= amount);\n    ASSERT(used() || amount == 0);\n    child_value_ -= amount;\n  }\n  uint64_t value() const override { return child_value_ + parent_value_; }\n\n  ImportMode importMode() const override {\n    if (flags_ & Flags::NeverImport) {\n      return ImportMode::NeverImport;\n    } else if (flags_ & Flags::LogicAccumulate) {\n      return ImportMode::Accumulate;\n    }\n    return ImportMode::Uninitialized;\n  }\n\n  void mergeImportMode(ImportMode import_mode) override {\n    ImportMode current = importMode();\n    if (current == import_mode) {\n      return;\n    }\n\n    switch (import_mode) {\n    case ImportMode::Uninitialized:\n      // mergeImportNode(ImportMode::Uninitialized) is called when merging an\n      // existing stat with importMode() == Accumulate or NeverImport.\n      break;\n    case ImportMode::Accumulate:\n      ASSERT(current == ImportMode::Uninitialized);\n      flags_ |= Flags::LogicAccumulate;\n      break;\n    case ImportMode::NeverImport:\n      ASSERT(current == ImportMode::Uninitialized);\n      // A previous revision of Envoy may have transferred a gauge that it\n      // thought was Accumulate. But the new version thinks it's NeverImport, so\n      // we clear the accumulated value.\n      parent_value_ = 0;\n      flags_ &= ~Flags::Used;\n      flags_ |= Flags::NeverImport;\n      break;\n    }\n  }\n\n  void setParentValue(uint64_t value) override { parent_value_ = value; }\n\nprivate:\n  std::atomic<uint64_t> parent_value_{0};\n  std::atomic<uint64_t> child_value_{0};\n};\n\nclass TextReadoutImpl : public StatsSharedImpl<TextReadout> {\npublic:\n  TextReadoutImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,\n                  const StatNameTagVector& stat_name_tags)\n      : StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {}\n\n  void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override {\n    const size_t count = alloc_.text_readouts_.erase(statName());\n    ASSERT(count == 1);\n  }\n\n  // Stats::TextReadout\n  void set(absl::string_view value) override {\n    std::string value_copy(value);\n    absl::MutexLock lock(&mutex_);\n    value_ = std::move(value_copy);\n  }\n  std::string value() const override {\n    absl::MutexLock lock(&mutex_);\n    return value_;\n  }\n\nprivate:\n  mutable absl::Mutex mutex_;\n  std::string value_ ABSL_GUARDED_BY(mutex_);\n};\n\nCounterSharedPtr AllocatorImpl::makeCounter(StatName name, StatName tag_extracted_name,\n                                            const StatNameTagVector& stat_name_tags) {\n  Thread::LockGuard lock(mutex_);\n  ASSERT(gauges_.find(name) == gauges_.end());\n  ASSERT(text_readouts_.find(name) == text_readouts_.end());\n  auto iter = counters_.find(name);\n  if (iter != counters_.end()) {\n    return CounterSharedPtr(*iter);\n  }\n  auto counter = CounterSharedPtr(makeCounterInternal(name, tag_extracted_name, stat_name_tags));\n  counters_.insert(counter.get());\n  return counter;\n}\n\nGaugeSharedPtr AllocatorImpl::makeGauge(StatName name, StatName tag_extracted_name,\n                                        const StatNameTagVector& stat_name_tags,\n                                        Gauge::ImportMode import_mode) {\n  Thread::LockGuard lock(mutex_);\n  ASSERT(counters_.find(name) == counters_.end());\n  ASSERT(text_readouts_.find(name) == text_readouts_.end());\n  auto iter = gauges_.find(name);\n  if (iter != gauges_.end()) {\n    return GaugeSharedPtr(*iter);\n  }\n  auto gauge =\n      GaugeSharedPtr(new GaugeImpl(name, *this, tag_extracted_name, stat_name_tags, import_mode));\n  gauges_.insert(gauge.get());\n  return gauge;\n}\n\nTextReadoutSharedPtr AllocatorImpl::makeTextReadout(StatName name, StatName tag_extracted_name,\n                                                    const StatNameTagVector& stat_name_tags) {\n  Thread::LockGuard lock(mutex_);\n  ASSERT(counters_.find(name) == counters_.end());\n  ASSERT(gauges_.find(name) == gauges_.end());\n  auto iter = text_readouts_.find(name);\n  if (iter != text_readouts_.end()) {\n    return TextReadoutSharedPtr(*iter);\n  }\n  auto text_readout =\n      TextReadoutSharedPtr(new TextReadoutImpl(name, *this, tag_extracted_name, stat_name_tags));\n  text_readouts_.insert(text_readout.get());\n  return text_readout;\n}\n\nbool AllocatorImpl::isMutexLockedForTest() {\n  bool locked = mutex_.tryLock();\n  if (locked) {\n    mutex_.unlock();\n  }\n  return !locked;\n}\n\nCounter* AllocatorImpl::makeCounterInternal(StatName name, StatName tag_extracted_name,\n                                            const StatNameTagVector& stat_name_tags) {\n  return new CounterImpl(name, *this, tag_extracted_name, stat_name_tags);\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/allocator_impl.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/stats/allocator.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/symbol_table.h\"\n\n#include \"common/common/thread_synchronizer.h\"\n#include \"common/stats/metric_impl.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass AllocatorImpl : public Allocator {\npublic:\n  static const char DecrementToZeroSyncPoint[];\n\n  AllocatorImpl(SymbolTable& symbol_table) : symbol_table_(symbol_table) {}\n  ~AllocatorImpl() override;\n\n  // Allocator\n  CounterSharedPtr makeCounter(StatName name, StatName tag_extracted_name,\n                               const StatNameTagVector& stat_name_tags) override;\n  GaugeSharedPtr makeGauge(StatName name, StatName tag_extracted_name,\n                           const StatNameTagVector& stat_name_tags,\n                           Gauge::ImportMode import_mode) override;\n  TextReadoutSharedPtr makeTextReadout(StatName name, StatName tag_extracted_name,\n                                       const StatNameTagVector& stat_name_tags) override;\n  SymbolTable& symbolTable() override { return symbol_table_; }\n  const SymbolTable& constSymbolTable() const override { return symbol_table_; }\n\n#ifndef ENVOY_CONFIG_COVERAGE\n  void debugPrint();\n#endif\n\n  /**\n   * @return a thread synchronizer object used for reproducing a race-condition in tests.\n   */\n  Thread::ThreadSynchronizer& sync() { return sync_; }\n\n  /**\n   * @return whether the allocator's mutex is locked, exposed for testing purposes.\n   */\n  bool isMutexLockedForTest();\n\nprotected:\n  virtual Counter* makeCounterInternal(StatName name, StatName tag_extracted_name,\n                                       const StatNameTagVector& stat_name_tags);\n\nprivate:\n  template <class BaseClass> friend class StatsSharedImpl;\n  friend class CounterImpl;\n  friend class GaugeImpl;\n  friend class TextReadoutImpl;\n  friend class NotifyingAllocatorImpl;\n\n  void removeCounterFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);\n  void removeGaugeFromSetLockHeld(Gauge* gauge) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);\n  void removeTextReadoutFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);\n\n  StatSet<Counter> counters_ ABSL_GUARDED_BY(mutex_);\n  StatSet<Gauge> gauges_ ABSL_GUARDED_BY(mutex_);\n  StatSet<TextReadout> text_readouts_ ABSL_GUARDED_BY(mutex_);\n\n  SymbolTable& symbol_table_;\n\n  // A mutex is needed here to protect both the stats_ object from both\n  // alloc() and free() operations. Although alloc() operations are called under existing locking,\n  // free() operations are made from the destructors of the individual stat objects, which are not\n  // protected by locks.\n  Thread::MutexBasicLockable mutex_;\n\n  Thread::ThreadSynchronizer sync_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/histogram_impl.cc",
    "content": "#include \"common/stats/histogram_impl.h\"\n\n#include <algorithm>\n#include <string>\n\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nnamespace {\nconst ConstSupportedBuckets default_buckets{};\n}\n\nHistogramStatisticsImpl::HistogramStatisticsImpl()\n    : supported_buckets_(default_buckets), computed_quantiles_(supportedQuantiles().size(), 0.0) {}\n\nHistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr,\n                                                 ConstSupportedBuckets& supported_buckets)\n    : supported_buckets_(supported_buckets),\n      computed_quantiles_(HistogramStatisticsImpl::supportedQuantiles().size(), 0.0) {\n  hist_approx_quantile(histogram_ptr, supportedQuantiles().data(),\n                       HistogramStatisticsImpl::supportedQuantiles().size(),\n                       computed_quantiles_.data());\n\n  sample_count_ = hist_sample_count(histogram_ptr);\n  sample_sum_ = hist_approx_sum(histogram_ptr);\n\n  computed_buckets_.reserve(supported_buckets_.size());\n  for (const auto bucket : supported_buckets_) {\n    computed_buckets_.emplace_back(hist_approx_count_below(histogram_ptr, bucket));\n  }\n}\n\nconst std::vector<double>& HistogramStatisticsImpl::supportedQuantiles() const {\n  CONSTRUCT_ON_FIRST_USE(std::vector<double>,\n                         {0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.995, 0.999, 1});\n}\n\nstd::string HistogramStatisticsImpl::quantileSummary() const {\n  std::vector<std::string> summary;\n  const std::vector<double>& supported_quantiles = supportedQuantiles();\n  summary.reserve(supported_quantiles.size());\n  for (size_t i = 0; i < supported_quantiles.size(); ++i) {\n    summary.push_back(\n        fmt::format(\"P{:g}: {:g}\", 100 * supported_quantiles[i], computed_quantiles_[i]));\n  }\n  return absl::StrJoin(summary, \", \");\n}\n\nstd::string HistogramStatisticsImpl::bucketSummary() const {\n  std::vector<std::string> bucket_summary;\n  ConstSupportedBuckets& supported_buckets = supportedBuckets();\n  bucket_summary.reserve(supported_buckets.size());\n  for (size_t i = 0; i < supported_buckets.size(); ++i) {\n    bucket_summary.push_back(fmt::format(\"B{:g}: {}\", supported_buckets[i], computed_buckets_[i]));\n  }\n  return absl::StrJoin(bucket_summary, \", \");\n}\n\n/**\n * Clears the old computed values and refreshes it with values computed from passed histogram.\n */\nvoid HistogramStatisticsImpl::refresh(const histogram_t* new_histogram_ptr) {\n  std::fill(computed_quantiles_.begin(), computed_quantiles_.end(), 0.0);\n  ASSERT(supportedQuantiles().size() == computed_quantiles_.size());\n  hist_approx_quantile(new_histogram_ptr, supportedQuantiles().data(), supportedQuantiles().size(),\n                       computed_quantiles_.data());\n\n  sample_count_ = hist_sample_count(new_histogram_ptr);\n  sample_sum_ = hist_approx_sum(new_histogram_ptr);\n\n  ASSERT(supportedBuckets().size() == computed_buckets_.size());\n  computed_buckets_.clear();\n  ConstSupportedBuckets& supported_buckets = supportedBuckets();\n  computed_buckets_.reserve(supported_buckets.size());\n  for (const auto bucket : supported_buckets) {\n    computed_buckets_.emplace_back(hist_approx_count_below(new_histogram_ptr, bucket));\n  }\n}\n\nHistogramSettingsImpl::HistogramSettingsImpl(const envoy::config::metrics::v3::StatsConfig& config)\n    : configs_([&config]() {\n        std::vector<Config> configs;\n        for (const auto& matcher : config.histogram_bucket_settings()) {\n          std::vector<double> buckets{matcher.buckets().begin(), matcher.buckets().end()};\n          std::sort(buckets.begin(), buckets.end());\n          configs.emplace_back(matcher.match(), std::move(buckets));\n        }\n\n        return configs;\n      }()) {}\n\nconst ConstSupportedBuckets& HistogramSettingsImpl::buckets(absl::string_view stat_name) const {\n  for (const auto& config : configs_) {\n    if (config.first.match(stat_name)) {\n      return config.second;\n    }\n  }\n  return defaultBuckets();\n}\n\nconst ConstSupportedBuckets& HistogramSettingsImpl::defaultBuckets() {\n  CONSTRUCT_ON_FIRST_USE(ConstSupportedBuckets,\n                         {0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000,\n                          60000, 300000, 600000, 1800000, 3600000});\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/histogram_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/store.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/common/non_copyable.h\"\n#include \"common/stats/metric_impl.h\"\n\n#include \"circllhist.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass HistogramSettingsImpl : public HistogramSettings {\npublic:\n  HistogramSettingsImpl() = default;\n  HistogramSettingsImpl(const envoy::config::metrics::v3::StatsConfig& config);\n\n  // HistogramSettings\n  const ConstSupportedBuckets& buckets(absl::string_view stat_name) const override;\n\n  static ConstSupportedBuckets& defaultBuckets();\n\nprivate:\n  using Config = std::pair<Matchers::StringMatcherImpl, ConstSupportedBuckets>;\n  const std::vector<Config> configs_{};\n};\n\n/**\n * Implementation of HistogramStatistics for circllhist.\n */\nclass HistogramStatisticsImpl : public HistogramStatistics, NonCopyable {\npublic:\n  HistogramStatisticsImpl();\n\n  /**\n   * HistogramStatisticsImpl object is constructed using the passed in histogram.\n   * @param histogram_ptr pointer to the histogram for which stats will be calculated. This pointer\n   * will not be retained.\n   */\n  HistogramStatisticsImpl(\n      const histogram_t* histogram_ptr,\n      ConstSupportedBuckets& supported_buckets = HistogramSettingsImpl::defaultBuckets());\n\n  static ConstSupportedBuckets& defaultSupportedBuckets();\n\n  void refresh(const histogram_t* new_histogram_ptr);\n\n  // HistogramStatistics\n  std::string quantileSummary() const override;\n  std::string bucketSummary() const override;\n  const std::vector<double>& supportedQuantiles() const final;\n  const std::vector<double>& computedQuantiles() const override { return computed_quantiles_; }\n  ConstSupportedBuckets& supportedBuckets() const override { return supported_buckets_; }\n  const std::vector<uint64_t>& computedBuckets() const override { return computed_buckets_; }\n  uint64_t sampleCount() const override { return sample_count_; }\n  double sampleSum() const override { return sample_sum_; }\n\nprivate:\n  ConstSupportedBuckets& supported_buckets_;\n  std::vector<double> computed_quantiles_;\n  std::vector<uint64_t> computed_buckets_;\n  uint64_t sample_count_;\n  double sample_sum_;\n};\n\nclass HistogramImplHelper : public MetricImpl<Histogram> {\npublic:\n  HistogramImplHelper(StatName name, StatName tag_extracted_name,\n                      const StatNameTagVector& stat_name_tags, SymbolTable& symbol_table)\n      : MetricImpl<Histogram>(name, tag_extracted_name, stat_name_tags, symbol_table) {}\n  HistogramImplHelper(SymbolTable& symbol_table) : MetricImpl<Histogram>(symbol_table) {}\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\nprivate:\n  RefcountHelper refcount_helper_;\n};\n\n/**\n * Histogram implementation for the heap.\n */\nclass HistogramImpl : public HistogramImplHelper {\npublic:\n  HistogramImpl(StatName name, Unit unit, Store& parent, StatName tag_extracted_name,\n                const StatNameTagVector& stat_name_tags)\n      : HistogramImplHelper(name, tag_extracted_name, stat_name_tags, parent.symbolTable()),\n        unit_(unit), parent_(parent) {}\n  ~HistogramImpl() override {\n    // We must explicitly free the StatName here in order to supply the\n    // SymbolTable reference. An RAII alternative would be to store a\n    // reference to the SymbolTable in MetricImpl, which would cost 8 bytes\n    // per stat.\n    MetricImpl::clear(symbolTable());\n  }\n\n  // Stats::Histogram\n  Unit unit() const override { return unit_; };\n  void recordValue(uint64_t value) override { parent_.deliverHistogramToSinks(*this, value); }\n\n  bool used() const override { return true; }\n  SymbolTable& symbolTable() final { return parent_.symbolTable(); }\n\nprivate:\n  Unit unit_;\n\n  // This is used for delivering the histogram data to sinks.\n  Store& parent_;\n};\n\n/**\n * Null histogram implementation.\n * No-ops on all calls and requires no underlying metric or data.\n */\nclass NullHistogramImpl : public HistogramImplHelper {\npublic:\n  explicit NullHistogramImpl(SymbolTable& symbol_table)\n      : HistogramImplHelper(symbol_table), symbol_table_(symbol_table) {}\n  ~NullHistogramImpl() override { MetricImpl::clear(symbol_table_); }\n\n  bool used() const override { return false; }\n  SymbolTable& symbolTable() override { return symbol_table_; }\n\n  Unit unit() const override { return Unit::Null; };\n  void recordValue(uint64_t) override {}\n\nprivate:\n  SymbolTable& symbol_table_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/isolated_store_impl.cc",
    "content": "#include \"common/stats/isolated_store_impl.h\"\n\n#include <algorithm>\n#include <cstring>\n#include <string>\n\n#include \"common/common/utility.h\"\n#include \"common/stats/histogram_impl.h\"\n#include \"common/stats/scope_prefixer.h\"\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nIsolatedStoreImpl::IsolatedStoreImpl() : IsolatedStoreImpl(std::make_unique<SymbolTableImpl>()) {}\n\nIsolatedStoreImpl::IsolatedStoreImpl(std::unique_ptr<SymbolTable>&& symbol_table)\n    : IsolatedStoreImpl(*symbol_table) {\n  symbol_table_storage_ = std::move(symbol_table);\n}\n\nIsolatedStoreImpl::IsolatedStoreImpl(SymbolTable& symbol_table)\n    : StoreImpl(symbol_table), alloc_(symbol_table),\n      counters_([this](StatName name) -> CounterSharedPtr {\n        return alloc_.makeCounter(name, name, StatNameTagVector{});\n      }),\n      gauges_([this](StatName name, Gauge::ImportMode import_mode) -> GaugeSharedPtr {\n        return alloc_.makeGauge(name, name, StatNameTagVector{}, import_mode);\n      }),\n      histograms_([this](StatName name, Histogram::Unit unit) -> HistogramSharedPtr {\n        return HistogramSharedPtr(new HistogramImpl(name, unit, *this, name, StatNameTagVector{}));\n      }),\n      text_readouts_([this](StatName name, TextReadout::Type) -> TextReadoutSharedPtr {\n        return alloc_.makeTextReadout(name, name, StatNameTagVector{});\n      }),\n      null_counter_(new NullCounterImpl(symbol_table)),\n      null_gauge_(new NullGaugeImpl(symbol_table)) {}\n\nScopePtr IsolatedStoreImpl::createScope(const std::string& name) {\n  return std::make_unique<ScopePrefixer>(name, *this);\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/isolated_store_impl.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <cstring>\n#include <string>\n\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/store.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/stats/allocator_impl.h\"\n#include \"common/stats/null_counter.h\"\n#include \"common/stats/null_gauge.h\"\n#include \"common/stats/store_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/tag_utility.h\"\n#include \"common/stats/utility.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * A stats cache template that is used by the isolated store.\n */\ntemplate <class Base> class IsolatedStatsCache {\npublic:\n  using CounterAllocator = std::function<RefcountPtr<Base>(StatName name)>;\n  using GaugeAllocator = std::function<RefcountPtr<Base>(StatName, Gauge::ImportMode)>;\n  using HistogramAllocator = std::function<RefcountPtr<Base>(StatName, Histogram::Unit)>;\n  using TextReadoutAllocator = std::function<RefcountPtr<Base>(StatName name, TextReadout::Type)>;\n  using BaseOptConstRef = absl::optional<std::reference_wrapper<const Base>>;\n\n  IsolatedStatsCache(CounterAllocator alloc) : counter_alloc_(alloc) {}\n  IsolatedStatsCache(GaugeAllocator alloc) : gauge_alloc_(alloc) {}\n  IsolatedStatsCache(HistogramAllocator alloc) : histogram_alloc_(alloc) {}\n  IsolatedStatsCache(TextReadoutAllocator alloc) : text_readout_alloc_(alloc) {}\n\n  Base& get(StatName name) {\n    auto stat = stats_.find(name);\n    if (stat != stats_.end()) {\n      return *stat->second;\n    }\n\n    RefcountPtr<Base> new_stat = counter_alloc_(name);\n    stats_.emplace(new_stat->statName(), new_stat);\n    return *new_stat;\n  }\n\n  Base& get(StatName name, Gauge::ImportMode import_mode) {\n    auto stat = stats_.find(name);\n    if (stat != stats_.end()) {\n      return *stat->second;\n    }\n\n    RefcountPtr<Base> new_stat = gauge_alloc_(name, import_mode);\n    stats_.emplace(new_stat->statName(), new_stat);\n    return *new_stat;\n  }\n\n  Base& get(StatName name, Histogram::Unit unit) {\n    auto stat = stats_.find(name);\n    if (stat != stats_.end()) {\n      return *stat->second;\n    }\n\n    RefcountPtr<Base> new_stat = histogram_alloc_(name, unit);\n    stats_.emplace(new_stat->statName(), new_stat);\n    return *new_stat;\n  }\n\n  Base& get(StatName name, TextReadout::Type type) {\n    auto stat = stats_.find(name);\n    if (stat != stats_.end()) {\n      return *stat->second;\n    }\n\n    RefcountPtr<Base> new_stat = text_readout_alloc_(name, type);\n    stats_.emplace(new_stat->statName(), new_stat);\n    return *new_stat;\n  }\n\n  std::vector<RefcountPtr<Base>> toVector() const {\n    std::vector<RefcountPtr<Base>> vec;\n    vec.reserve(stats_.size());\n    for (auto& stat : stats_) {\n      vec.push_back(stat.second);\n    }\n\n    return vec;\n  }\n\n  bool iterate(const IterateFn<Base>& fn) const {\n    for (auto& stat : stats_) {\n      if (!fn(stat.second)) {\n        return false;\n      }\n    }\n    return true;\n  }\n\nprivate:\n  friend class IsolatedStoreImpl;\n\n  BaseOptConstRef find(StatName name) const {\n    auto stat = stats_.find(name);\n    if (stat == stats_.end()) {\n      return absl::nullopt;\n    }\n    return std::cref(*stat->second);\n  }\n\n  StatNameHashMap<RefcountPtr<Base>> stats_;\n  CounterAllocator counter_alloc_;\n  GaugeAllocator gauge_alloc_;\n  HistogramAllocator histogram_alloc_;\n  TextReadoutAllocator text_readout_alloc_;\n};\n\nclass IsolatedStoreImpl : public StoreImpl {\npublic:\n  IsolatedStoreImpl();\n  explicit IsolatedStoreImpl(SymbolTable& symbol_table);\n\n  // Stats::Scope\n  Counter& counterFromStatNameWithTags(const StatName& name,\n                                       StatNameTagVectorOptConstRef tags) override {\n    TagUtility::TagStatNameJoiner joiner(name, tags, symbolTable());\n    Counter& counter = counters_.get(joiner.nameWithTags());\n    return counter;\n  }\n  ScopePtr createScope(const std::string& name) override;\n  void deliverHistogramToSinks(const Histogram&, uint64_t) override {}\n  Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                   Gauge::ImportMode import_mode) override {\n    TagUtility::TagStatNameJoiner joiner(name, tags, symbolTable());\n    Gauge& gauge = gauges_.get(joiner.nameWithTags(), import_mode);\n    gauge.mergeImportMode(import_mode);\n    return gauge;\n  }\n  NullCounterImpl& nullCounter() { return *null_counter_; }\n  NullGaugeImpl& nullGauge(const std::string&) override { return *null_gauge_; }\n  Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                           Histogram::Unit unit) override {\n    TagUtility::TagStatNameJoiner joiner(name, tags, symbolTable());\n    Histogram& histogram = histograms_.get(joiner.nameWithTags(), unit);\n    return histogram;\n  }\n  TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                               StatNameTagVectorOptConstRef tags) override {\n    TagUtility::TagStatNameJoiner joiner(name, tags, symbolTable());\n    TextReadout& text_readout =\n        text_readouts_.get(joiner.nameWithTags(), TextReadout::Type::Default);\n    return text_readout;\n  }\n  CounterOptConstRef findCounter(StatName name) const override { return counters_.find(name); }\n  GaugeOptConstRef findGauge(StatName name) const override { return gauges_.find(name); }\n  HistogramOptConstRef findHistogram(StatName name) const override {\n    return histograms_.find(name);\n  }\n  TextReadoutOptConstRef findTextReadout(StatName name) const override {\n    return text_readouts_.find(name);\n  }\n\n  bool iterate(const IterateFn<Counter>& fn) const override { return counters_.iterate(fn); }\n  bool iterate(const IterateFn<Gauge>& fn) const override { return gauges_.iterate(fn); }\n  bool iterate(const IterateFn<Histogram>& fn) const override { return histograms_.iterate(fn); }\n  bool iterate(const IterateFn<TextReadout>& fn) const override {\n    return text_readouts_.iterate(fn);\n  }\n\n  // Stats::Store\n  std::vector<CounterSharedPtr> counters() const override { return counters_.toVector(); }\n  std::vector<GaugeSharedPtr> gauges() const override {\n    // TODO(jmarantz): should we filter out gauges where\n    // gauge.importMode() != Gauge::ImportMode::Uninitialized ?\n    // I don't think this matters because that should only occur for gauges\n    // received in a hot-restart transfer, and isolated-store gauges should\n    // never be transmitted that way.\n    return gauges_.toVector();\n  }\n  std::vector<ParentHistogramSharedPtr> histograms() const override {\n    return std::vector<ParentHistogramSharedPtr>{};\n  }\n  std::vector<TextReadoutSharedPtr> textReadouts() const override {\n    return text_readouts_.toVector();\n  }\n\n  Counter& counterFromString(const std::string& name) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return counterFromStatName(storage.statName());\n  }\n  Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return gaugeFromStatName(storage.statName(), import_mode);\n  }\n  Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return histogramFromStatName(storage.statName(), unit);\n  }\n  TextReadout& textReadoutFromString(const std::string& name) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return textReadoutFromStatName(storage.statName());\n  }\n\nprivate:\n  IsolatedStoreImpl(std::unique_ptr<SymbolTable>&& symbol_table);\n\n  SymbolTablePtr symbol_table_storage_;\n  AllocatorImpl alloc_;\n  IsolatedStatsCache<Counter> counters_;\n  IsolatedStatsCache<Gauge> gauges_;\n  IsolatedStatsCache<Histogram> histograms_;\n  IsolatedStatsCache<TextReadout> text_readouts_;\n  RefcountPtr<NullCounterImpl> null_counter_;\n  RefcountPtr<NullGaugeImpl> null_gauge_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/metric_impl.cc",
    "content": "#include \"common/stats/metric_impl.h\"\n\n#include \"envoy/stats/tag.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nMetricHelper::~MetricHelper() {\n  // The storage must be cleaned by a subclass of MetricHelper in its\n  // destructor, because the symbol-table is owned by the subclass.\n  // Simply call MetricHelper::clear() in the subclass dtor.\n  ASSERT(!stat_names_.populated());\n}\n\nMetricHelper::MetricHelper(StatName name, StatName tag_extracted_name,\n                           const StatNameTagVector& stat_name_tags, SymbolTable& symbol_table) {\n  // Encode all the names and tags into transient storage so we can count the\n  // required bytes. 2 is added to account for the name and tag_extracted_name,\n  // and we multiply the number of tags by 2 to account for the name and value\n  // of each tag.\n  const uint32_t num_names = 2 + 2 * stat_name_tags.size();\n  absl::FixedArray<StatName> names(num_names);\n  names[0] = name;\n  names[1] = tag_extracted_name;\n  int index = 1;\n  for (auto& stat_name_tag : stat_name_tags) {\n    names[++index] = stat_name_tag.first;\n    names[++index] = stat_name_tag.second;\n  }\n  symbol_table.populateList(names.begin(), num_names, stat_names_);\n}\n\nStatName MetricHelper::statName() const {\n  StatName stat_name;\n  stat_names_.iterate([&stat_name](StatName s) -> bool {\n    stat_name = s;\n    return false; // Returning 'false' stops the iteration.\n  });\n  return stat_name;\n}\n\nStatName MetricHelper::tagExtractedStatName() const {\n  // The name is the first element in stat_names_. The second is the\n  // tag-extracted-name. We don't have random access in that format,\n  // so we iterate through them, skipping the first element (name),\n  // and terminating the iteration after capturing the tag-extracted\n  // name by returning false from the lambda.\n  StatName tag_extracted_stat_name;\n  bool skip = true;\n  stat_names_.iterate([&tag_extracted_stat_name, &skip](StatName s) -> bool {\n    if (skip) {\n      skip = false;\n      return true;\n    }\n    tag_extracted_stat_name = s;\n    return false; // Returning 'false' stops the iteration.\n  });\n  return tag_extracted_stat_name;\n}\n\nvoid MetricHelper::iterateTagStatNames(const Metric::TagStatNameIterFn& fn) const {\n  enum { Name, TagExtractedName, TagName, TagValue } state = Name;\n  StatName tag_name;\n\n  // StatNameList maintains a linear ordered collection of StatNames, and we\n  // are mapping that into a tag-extracted name (the first element), followed\n  // by alternating TagName and TagValue. So we use a little state machine\n  // as we iterate through the stat_names_.\n  stat_names_.iterate([&state, &tag_name, &fn](StatName stat_name) -> bool {\n    switch (state) {\n    case Name:\n      state = TagExtractedName;\n      break;\n    case TagExtractedName:\n      state = TagName;\n      break;\n    case TagName:\n      tag_name = stat_name;\n      state = TagValue;\n      break;\n    case TagValue:\n      state = TagName;\n      if (!fn(tag_name, stat_name)) {\n        return false; // early exit.\n      }\n      break;\n    }\n    return true;\n  });\n  ASSERT(state != TagValue);\n}\n\nTagVector MetricHelper::tags(const SymbolTable& symbol_table) const {\n  TagVector tags;\n  iterateTagStatNames([&tags, &symbol_table](StatName name, StatName value) -> bool {\n    tags.emplace_back(Tag{symbol_table.toString(name), symbol_table.toString(value)});\n    return true;\n  });\n  return tags;\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/metric_impl.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/stats/allocator.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/tag.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Helper class for implementing Metrics. This does not participate in any\n * inheritance chains, but can be instantiated by classes that do. It just\n * implements the mechanics of representing the name, tag-extracted-name,\n * and all tags as a StatNameList.\n */\nclass MetricHelper {\npublic:\n  MetricHelper(StatName name, StatName tag_extracted_name, const StatNameTagVector& stat_name_tags,\n               SymbolTable& symbol_table);\n  ~MetricHelper();\n\n  StatName statName() const;\n  std::string name(const SymbolTable& symbol_table) const;\n  TagVector tags(const SymbolTable& symbol_table) const;\n  StatName tagExtractedStatName() const;\n  void iterateTagStatNames(const Metric::TagStatNameIterFn& fn) const;\n  void clear(SymbolTable& symbol_table) { stat_names_.clear(symbol_table); }\n\n  // Hasher for metrics.\n  struct Hash {\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n    size_t operator()(const Metric* a) const { return a->statName().hash(); }\n    size_t operator()(StatName a) const { return a.hash(); }\n  };\n\n  // Comparator for metrics.\n  struct Compare {\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n    bool operator()(const Metric* a, const Metric* b) const {\n      return a->statName() == b->statName();\n    }\n    bool operator()(const Metric* a, StatName b) const { return a->statName() == b; }\n  };\n\nprivate:\n  StatNameList stat_names_;\n};\n\n// An unordered set of stat pointers. which keys off Metric::statName().\n// This necessitates a custom comparator and hasher, using the StatNamePtr's\n// own StatNamePtrHash and StatNamePtrCompare operators.\n//\n// This is used by AllocatorImpl for counters, gauges, and text-readouts, and\n// is also used by thread_local_store.h for histograms.\ntemplate <class StatType>\nusing StatSet = absl::flat_hash_set<StatType*, MetricHelper::Hash, MetricHelper::Compare>;\n\n/**\n * Partial implementation of the Metric interface on behalf of Counters, Gauges,\n * and Histograms. It leaves symbolTable() unimplemented so that implementations\n * of stats managed by an allocator, specifically Counters and Gauges, can keep\n * a reference to the allocator instead, and derive the symbolTable() from that.\n *\n * We templatize on the base class (Counter, Gauge, or Histogram), rather than\n * using multiple virtual inheritance, as this avoids the overhead of an extra\n * vptr per instance. This is important for stats because there can be many\n * stats in systems with large numbers of clusters and hosts, and a few 8-byte\n * pointers per-stat here and there can add up to significant amounts of memory.\n *\n * Note the delegation of the implementation to a helper class, which is neither\n * templatized nor virtual. This avoids having the compiler elaborate complete\n * copies of the underlying implementation for each base class during template\n * expansion.\n */\ntemplate <class BaseClass> class MetricImpl : public BaseClass {\npublic:\n  MetricImpl(StatName name, StatName tag_extracted_name, const StatNameTagVector& stat_name_tags,\n             SymbolTable& symbol_table)\n      : helper_(name, tag_extracted_name, stat_name_tags, symbol_table) {}\n\n  // Empty construction of a MetricImpl; used for null stats.\n  explicit MetricImpl(SymbolTable& symbol_table)\n      : MetricImpl(StatName(), StatName(), StatNameTagVector(), symbol_table) {}\n\n  TagVector tags() const override { return helper_.tags(constSymbolTable()); }\n  StatName statName() const override { return helper_.statName(); }\n  StatName tagExtractedStatName() const override { return helper_.tagExtractedStatName(); }\n  void iterateTagStatNames(const Metric::TagStatNameIterFn& fn) const override {\n    helper_.iterateTagStatNames(fn);\n  }\n\n  const SymbolTable& constSymbolTable() const override {\n    // Cast our 'this', which is of type `const MetricImpl*` to a non-const\n    // pointer, so we can use it to call the subclass implementation of\n    // symbolTable(). That will be returned as a non-const SymbolTable&,\n    // which will become const on return.\n    //\n    // This pattern is used to share a single non-trivial implementation to\n    // provide const and non-const variants of a method.\n    return const_cast<MetricImpl*>(this)->symbolTable();\n  }\n  std::string name() const override { return constSymbolTable().toString(this->statName()); }\n  std::string tagExtractedName() const override {\n    return constSymbolTable().toString(this->tagExtractedStatName());\n  }\n\nprotected:\n  void clear(SymbolTable& symbol_table) { helper_.clear(symbol_table); }\n\nprivate:\n  MetricHelper helper_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/null_counter.h",
    "content": "#pragma once\n\n#include \"envoy/stats/stats.h\"\n\n#include \"common/stats/metric_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Null counter implementation.\n * No-ops on all calls and requires no underlying metric or data.\n */\nclass NullCounterImpl : public MetricImpl<Counter> {\npublic:\n  explicit NullCounterImpl(SymbolTable& symbol_table)\n      : MetricImpl<Counter>(symbol_table), symbol_table_(symbol_table) {}\n  ~NullCounterImpl() override {\n    // MetricImpl must be explicitly cleared() before destruction, otherwise it\n    // will not be able to access the SymbolTable& to free the symbols. An RAII\n    // alternative would be to store the SymbolTable reference in the\n    // MetricImpl, costing 8 bytes per stat.\n    MetricImpl::clear(symbol_table_);\n  }\n\n  void add(uint64_t) override {}\n  void inc() override {}\n  uint64_t latch() override { return 0; }\n  void reset() override {}\n  uint64_t value() const override { return 0; }\n\n  // Metric\n  bool used() const override { return false; }\n  SymbolTable& symbolTable() override { return symbol_table_; }\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\nprivate:\n  RefcountHelper refcount_helper_;\n  SymbolTable& symbol_table_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/null_gauge.h",
    "content": "#pragma once\n\n#include \"envoy/stats/stats.h\"\n\n#include \"common/stats/metric_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Null gauge implementation.\n * No-ops on all calls and requires no underlying metric or data.\n */\nclass NullGaugeImpl : public MetricImpl<Gauge> {\npublic:\n  explicit NullGaugeImpl(SymbolTable& symbol_table)\n      : MetricImpl<Gauge>(symbol_table), symbol_table_(symbol_table) {}\n  ~NullGaugeImpl() override {\n    // MetricImpl must be explicitly cleared() before destruction, otherwise it\n    // will not be able to access the SymbolTable& to free the symbols. An RAII\n    // alternative would be to store the SymbolTable reference in the\n    // MetricImpl, costing 8 bytes per stat.\n    MetricImpl::clear(symbol_table_);\n  }\n\n  void add(uint64_t) override {}\n  void inc() override {}\n  void dec() override {}\n  void set(uint64_t) override {}\n  void setParentValue(uint64_t) override {}\n  void sub(uint64_t) override {}\n  uint64_t value() const override { return 0; }\n  ImportMode importMode() const override { return ImportMode::NeverImport; }\n  void mergeImportMode(ImportMode /* import_mode */) override {}\n\n  // Metric\n  bool used() const override { return false; }\n  SymbolTable& symbolTable() override { return symbol_table_; }\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\nprivate:\n  RefcountHelper refcount_helper_;\n  SymbolTable& symbol_table_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/null_text_readout.h",
    "content": "#pragma once\n\n#include \"envoy/stats/stats.h\"\n\n#include \"common/stats/metric_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Null text readout implementation.\n * No-ops on all calls and requires no underlying metric or data.\n */\nclass NullTextReadoutImpl : public MetricImpl<TextReadout> {\npublic:\n  explicit NullTextReadoutImpl(SymbolTable& symbol_table)\n      : MetricImpl<TextReadout>(symbol_table), symbol_table_(symbol_table) {}\n  ~NullTextReadoutImpl() override {\n    // MetricImpl must be explicitly cleared() before destruction, otherwise it\n    // will not be able to access the SymbolTable& to free the symbols. An RAII\n    // alternative would be to store the SymbolTable reference in the\n    // MetricImpl, costing 8 bytes per stat.\n    MetricImpl::clear(symbol_table_);\n  }\n\n  void set(absl::string_view) override {}\n  std::string value() const override { return std::string(); }\n\n  // Metric\n  bool used() const override { return false; }\n  SymbolTable& symbolTable() override { return symbol_table_; }\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\nprivate:\n  RefcountHelper refcount_helper_;\n  SymbolTable& symbol_table_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/recent_lookups.cc",
    "content": "#include \"common/stats/recent_lookups.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nvoid RecentLookups::lookup(absl::string_view str) {\n  ++total_;\n  if (capacity_ == 0) {\n    return;\n  }\n  auto map_iter = map_.find(str);\n  if (map_iter != map_.end()) {\n    // The item is already in the list. Bump its reference-count and move it to\n    // the front of the list.\n    auto list_iter = map_iter->second;\n    ++list_iter->count_;\n    if (list_iter != list_.begin()) {\n      list_.splice(list_.begin(), list_, list_iter);\n    }\n  } else {\n    ASSERT(list_.size() <= capacity_);\n    // Evict oldest item if needed.\n    if (list_.size() >= capacity_) {\n      evictOne();\n    }\n\n    // The string storage is in the list entry.\n    list_.push_front(ItemCount{std::string(str), 1});\n    auto list_iter = list_.begin();\n    map_[list_iter->item_] = list_iter;\n  }\n  ASSERT(list_.size() == map_.size());\n}\n\nvoid RecentLookups::forEach(const IterFn& fn) const {\n  for (const ItemCount& item_count : list_) {\n    fn(item_count.item_, item_count.count_);\n  }\n}\n\nvoid RecentLookups::setCapacity(uint64_t capacity) {\n  capacity_ = capacity;\n  while (capacity_ < list_.size()) {\n    evictOne();\n  }\n}\n\nvoid RecentLookups::evictOne() {\n  ASSERT(!list_.empty());\n  ASSERT(!map_.empty());\n  const ItemCount& item_count = list_.back();\n  int erased = map_.erase(item_count.item_);\n  ASSERT(erased == 1);\n  list_.pop_back();\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/recent_lookups.h",
    "content": "#pragma once\n\n#include <functional>\n#include <list>\n#include <utility>\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n// Remembers the last 'Capacity' items passed to lookup().\nclass RecentLookups {\npublic:\n  /**\n   * Records a lookup of a string. Only the last 'Capacity' lookups are remembered.\n   *\n   * @param str the item being looked up.\n   */\n  void lookup(absl::string_view str);\n\n  using IterFn = std::function<void(absl::string_view, uint64_t)>;\n\n  /**\n   * Calls fn(item, count) for each of the remembered lookups.\n   *\n   * @param fn The function to call for every recently looked up item.\n   */\n  void forEach(const IterFn& fn) const;\n\n  /**\n   * @return the total number of lookups since tracking began.\n   */\n  uint64_t total() const { return total_; }\n\n  /**\n   * Clears out all contents.\n   */\n  void clear() {\n    total_ = 0;\n    map_.clear();\n    list_.clear();\n  }\n\n  /**\n   * Controls the maximum number of recent lookups to remember. If set to 0,\n   * then only lookup counts is tracked.\n   * @param capacity The number of lookups to remember.\n   */\n  void setCapacity(uint64_t capacity);\n\n  /**\n   * @return The configured capacity.\n   */\n  uint64_t capacity() const { return capacity_; }\n\nprivate:\n  void evictOne();\n\n  struct ItemCount {\n    std::string item_;\n    uint64_t count_;\n  };\n  using List = std::list<ItemCount>;\n  List list_;\n\n  // TODO(jmarantz): we could make this more compact by making this a set of\n  // list-iterators with heterogeneous hash/compare functors.\n  using Map = absl::flat_hash_map<absl::string_view, List::iterator>;\n  Map map_;\n  uint64_t total_{0};\n  uint64_t capacity_{0};\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/scope_prefixer.cc",
    "content": "#include \"common/stats/scope_prefixer.h\"\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nScopePrefixer::ScopePrefixer(absl::string_view prefix, Scope& scope)\n    : scope_(scope), prefix_(Utility::sanitizeStatsName(prefix), symbolTable()) {}\n\nScopePrefixer::ScopePrefixer(StatName prefix, Scope& scope)\n    : scope_(scope), prefix_(prefix, symbolTable()) {}\n\nScopePrefixer::~ScopePrefixer() { prefix_.free(symbolTable()); }\n\nScopePtr ScopePrefixer::createScopeFromStatName(StatName name) {\n  SymbolTable::StoragePtr joined = symbolTable().join({prefix_.statName(), name});\n  return std::make_unique<ScopePrefixer>(StatName(joined.get()), scope_);\n}\n\nScopePtr ScopePrefixer::createScope(const std::string& name) {\n  StatNameManagedStorage stat_name_storage(Utility::sanitizeStatsName(name), symbolTable());\n  return createScopeFromStatName(stat_name_storage.statName());\n}\n\nCounter& ScopePrefixer::counterFromStatNameWithTags(const StatName& name,\n                                                    StatNameTagVectorOptConstRef tags) {\n  Stats::SymbolTable::StoragePtr stat_name_storage =\n      scope_.symbolTable().join({prefix_.statName(), name});\n  return scope_.counterFromStatNameWithTags(StatName(stat_name_storage.get()), tags);\n}\n\nGauge& ScopePrefixer::gaugeFromStatNameWithTags(const StatName& name,\n                                                StatNameTagVectorOptConstRef tags,\n                                                Gauge::ImportMode import_mode) {\n  Stats::SymbolTable::StoragePtr stat_name_storage =\n      scope_.symbolTable().join({prefix_.statName(), name});\n  return scope_.gaugeFromStatNameWithTags(StatName(stat_name_storage.get()), tags, import_mode);\n}\n\nHistogram& ScopePrefixer::histogramFromStatNameWithTags(const StatName& name,\n                                                        StatNameTagVectorOptConstRef tags,\n                                                        Histogram::Unit unit) {\n  Stats::SymbolTable::StoragePtr stat_name_storage =\n      scope_.symbolTable().join({prefix_.statName(), name});\n  return scope_.histogramFromStatNameWithTags(StatName(stat_name_storage.get()), tags, unit);\n}\n\nTextReadout& ScopePrefixer::textReadoutFromStatNameWithTags(const StatName& name,\n                                                            StatNameTagVectorOptConstRef tags) {\n  Stats::SymbolTable::StoragePtr stat_name_storage =\n      scope_.symbolTable().join({prefix_.statName(), name});\n  return scope_.textReadoutFromStatNameWithTags(StatName(stat_name_storage.get()), tags);\n}\n\nCounterOptConstRef ScopePrefixer::findCounter(StatName name) const {\n  return scope_.findCounter(name);\n}\n\nGaugeOptConstRef ScopePrefixer::findGauge(StatName name) const { return scope_.findGauge(name); }\n\nHistogramOptConstRef ScopePrefixer::findHistogram(StatName name) const {\n  return scope_.findHistogram(name);\n}\n\nTextReadoutOptConstRef ScopePrefixer::findTextReadout(StatName name) const {\n  return scope_.findTextReadout(name);\n}\n\nvoid ScopePrefixer::deliverHistogramToSinks(const Histogram& histograms, uint64_t val) {\n  scope_.deliverHistogramToSinks(histograms, val);\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/scope_prefixer.h",
    "content": "#include \"envoy/stats/scope.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n// Implements a Scope that delegates to a passed-in scope, prefixing all names\n// prior to creation.\nclass ScopePrefixer : public Scope {\npublic:\n  ScopePrefixer(absl::string_view prefix, Scope& scope);\n  ScopePrefixer(StatName prefix, Scope& scope);\n  ~ScopePrefixer() override;\n\n  ScopePtr createScopeFromStatName(StatName name);\n\n  // Scope\n  ScopePtr createScope(const std::string& name) override;\n  Counter& counterFromStatNameWithTags(const StatName& name,\n                                       StatNameTagVectorOptConstRef tags) override;\n  Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                   Gauge::ImportMode import_mode) override;\n  Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                           Histogram::Unit unit) override;\n  TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                               StatNameTagVectorOptConstRef tags) override;\n  void deliverHistogramToSinks(const Histogram& histograms, uint64_t val) override;\n\n  Counter& counterFromString(const std::string& name) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return Scope::counterFromStatName(storage.statName());\n  }\n  Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return Scope::gaugeFromStatName(storage.statName(), import_mode);\n  }\n  Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return Scope::histogramFromStatName(storage.statName(), unit);\n  }\n  TextReadout& textReadoutFromString(const std::string& name) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return Scope::textReadoutFromStatName(storage.statName());\n  }\n\n  CounterOptConstRef findCounter(StatName name) const override;\n  GaugeOptConstRef findGauge(StatName name) const override;\n  HistogramOptConstRef findHistogram(StatName name) const override;\n  TextReadoutOptConstRef findTextReadout(StatName name) const override;\n\n  const SymbolTable& constSymbolTable() const final { return scope_.constSymbolTable(); }\n  SymbolTable& symbolTable() final { return scope_.symbolTable(); }\n\n  NullGaugeImpl& nullGauge(const std::string& str) override { return scope_.nullGauge(str); }\n\n  bool iterate(const IterateFn<Counter>& fn) const override { return iterHelper(fn); }\n  bool iterate(const IterateFn<Gauge>& fn) const override { return iterHelper(fn); }\n  bool iterate(const IterateFn<Histogram>& fn) const override { return iterHelper(fn); }\n  bool iterate(const IterateFn<TextReadout>& fn) const override { return iterHelper(fn); }\n\nprivate:\n  template <class StatType> bool iterHelper(const IterateFn<StatType>& fn) const {\n    // We determine here what's in the scope by looking at name\n    // prefixes. Strictly speaking this is not correct, as a stat name can be in\n    // different scopes. But there is no data in `ScopePrefixer` to resurrect\n    // actual membership of a stat in a scope, so we go by name matching. Note\n    // that `ScopePrefixer` is not used in `ThreadLocalStore`, which has\n    // accurate maps describing which stats are in which scopes.\n    //\n    // TODO(jmarantz): In the scope of this limited implementation, it would be\n    // faster to match on the StatName prefix. This would be possible if\n    // SymbolTable exposed a split() method.\n    std::string prefix_str = scope_.symbolTable().toString(prefix_.statName());\n    if (!prefix_str.empty() && !absl::EndsWith(prefix_str, \".\")) {\n      prefix_str += \".\";\n    }\n    IterateFn<StatType> filter_scope = [&fn,\n                                        &prefix_str](const RefcountPtr<StatType>& stat) -> bool {\n      return !absl::StartsWith(stat->name(), prefix_str) || fn(stat);\n    };\n    return scope_.iterate(filter_scope);\n  }\n\n  Scope& scope_;\n  StatNameStorage prefix_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/stat_merger.cc",
    "content": "#include \"common/stats/stat_merger.h\"\n\n#include <algorithm>\n\nnamespace Envoy {\nnamespace Stats {\n\nStatMerger::StatMerger(Store& target_store) : temp_scope_(target_store.createScope(\"\")) {}\n\nStatMerger::~StatMerger() {\n  // By the time a parent exits, all its contributions to accumulated gauges\n  // should be zero. But depending on the timing of the stat-merger\n  // communication shutdown and other shutdown activities on the parent, the\n  // gauges may not all be zero yet. So simply erase all the parent\n  // contributions.\n  for (StatName stat_name : parent_gauges_) {\n    Gauge& gauge = temp_scope_->gaugeFromStatName(stat_name, Gauge::ImportMode::Uninitialized);\n    gauge.setParentValue(0);\n  }\n}\n\nStatName StatMerger::DynamicContext::makeDynamicStatName(const std::string& name,\n                                                         const DynamicsMap& map) {\n  auto iter = map.find(name);\n  if (iter == map.end()) {\n    return symbolic_pool_.add(name);\n  }\n\n  const DynamicSpans& dynamic_spans = iter->second;\n  auto dynamic = dynamic_spans.begin();\n  auto dynamic_end = dynamic_spans.end();\n\n  // Name has embedded dynamic segments; we'll need to join together the\n  // static/dynamic StatName segments.\n  StatNameVec segments;\n  uint32_t segment_index = 0;\n  std::vector<absl::string_view> dynamic_segments;\n\n  for (auto segment : absl::StrSplit(name, '.')) {\n    if (dynamic != dynamic_end && dynamic->first == segment_index) {\n      // Handle start of dynamic span. We note that we are in a dynamic\n      // span by adding to dynamic_segments, which should of course be\n      // non-empty.\n      ASSERT(dynamic_segments.empty());\n      if (dynamic->second == segment_index) {\n        // Handle start==end (a one-segment span).\n        segments.push_back(dynamic_pool_.add(segment));\n        ++dynamic;\n      } else {\n        // Handle start<end, so we save the first segment in dynamic_segments.\n        dynamic_segments.push_back(segment);\n      }\n    } else if (dynamic_segments.empty()) {\n      // Handle that we are not in dynamic mode; we are just allocating\n      // a symbolic segment.\n      segments.push_back(symbolic_pool_.add(segment));\n    } else {\n      // Handle the next dynamic segment.\n      dynamic_segments.push_back(segment);\n      if (dynamic->second == segment_index) {\n        // Handle that this dynamic segment is the last one, and we're flipping\n        // back to symbolic mode.\n        segments.push_back(dynamic_pool_.add(absl::StrJoin(dynamic_segments, \".\")));\n        dynamic_segments.clear();\n        ++dynamic;\n      }\n    }\n    ++segment_index;\n  }\n  ASSERT(dynamic_segments.empty());\n  ASSERT(dynamic == dynamic_end);\n\n  storage_ptr_ = symbol_table_.join(segments);\n  return StatName(storage_ptr_.get());\n}\n\nvoid StatMerger::mergeCounters(const Protobuf::Map<std::string, uint64_t>& counter_deltas,\n                               const DynamicsMap& dynamic_map) {\n  for (const auto& counter : counter_deltas) {\n    const std::string& name = counter.first;\n    StatMerger::DynamicContext dynamic_context(temp_scope_->symbolTable());\n    StatName stat_name = dynamic_context.makeDynamicStatName(name, dynamic_map);\n    temp_scope_->counterFromStatName(stat_name).add(counter.second);\n  }\n}\n\nvoid StatMerger::mergeGauges(const Protobuf::Map<std::string, uint64_t>& gauges,\n                             const DynamicsMap& dynamic_map) {\n  for (const auto& gauge : gauges) {\n    // Merging gauges via RPC from the parent has 3 cases; case 1 and 3b are the\n    // most common.\n    //\n    // 1. Child thinks gauge is Accumulate : data is combined in\n    //    gauge_ref.add() below.\n    // 2. Child thinks gauge is NeverImport: we skip this loop entry via\n    //    'continue'.\n    // 3. Child has not yet initialized gauge yet -- this merge is the\n    //    first time the child learns of the gauge. It's possible the child\n    //    will think the gauge is NeverImport due to a code change. But for\n    //    now we will leave the gauge in the child process as\n    //    import_mode==Uninitialized, and accumulate the parent value in\n    //    gauge_ref.add(). Gauges in this mode will not be included in\n    //    stats-sinks or the admin /stats calls, until the child initializes\n    //    the gauge, in which case:\n    // 3a. Child later initializes gauges as NeverImport: the parent value is\n    //     cleared during the mergeImportMode call.\n    // 3b. Child later initializes gauges as Accumulate: the parent value is\n    //     retained.\n\n    StatMerger::DynamicContext dynamic_context(temp_scope_->symbolTable());\n    StatName stat_name = dynamic_context.makeDynamicStatName(gauge.first, dynamic_map);\n    GaugeOptConstRef gauge_opt = temp_scope_->findGauge(stat_name);\n\n    Gauge::ImportMode import_mode = Gauge::ImportMode::Uninitialized;\n    if (gauge_opt) {\n      import_mode = gauge_opt->get().importMode();\n      if (import_mode == Gauge::ImportMode::NeverImport) {\n        continue;\n      }\n    }\n\n    // TODO(snowp): Propagate tag values during hot restarts.\n    auto& gauge_ref = temp_scope_->gaugeFromStatName(stat_name, import_mode);\n    if (gauge_ref.importMode() == Gauge::ImportMode::NeverImport) {\n      // On the first iteration through the loop, the gauge will not be loaded into the scope\n      // cache even though it might exist in another scope. Thus, we need to check again for\n      // the import status to see if we should skip this gauge.\n      //\n      // TODO(mattklein123): There is a race condition here. It's technically possible that\n      // between the time we created this stat, the stat might be created by the child as a\n      // never import stat, making the below math invalid. A follow up solution is to take the\n      // store lock starting from gaugeFromStatName() to the end of this function, but this will\n      // require adding some type of mergeGauge() function to the scope and dealing with recursive\n      // lock acquisition, etc. so we will leave this as a follow up. This race should be incredibly\n      // rare.\n      continue;\n    }\n\n    const uint64_t new_parent_value = gauge.second;\n    parent_gauges_.insert(gauge_ref.statName());\n    gauge_ref.setParentValue(new_parent_value);\n  }\n}\n\nvoid StatMerger::retainParentGaugeValue(Stats::StatName gauge_name) {\n  parent_gauges_.erase(gauge_name);\n}\n\nvoid StatMerger::mergeStats(const Protobuf::Map<std::string, uint64_t>& counter_deltas,\n                            const Protobuf::Map<std::string, uint64_t>& gauges,\n                            const DynamicsMap& dynamics) {\n  mergeCounters(counter_deltas, dynamics);\n  mergeGauges(gauges, dynamics);\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/stat_merger.h",
    "content": "#pragma once\n\n#include \"envoy/stats/store.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n// Responsible for the sensible merging of two instances of the same stat from two different\n// (typically hot restart parent+child) Envoy processes.\nclass StatMerger {\npublic:\n  using DynamicsMap = absl::flat_hash_map<std::string, DynamicSpans>;\n\n  // Holds state needed to construct StatName with mixed dynamic/symbolic\n  // components, based on a map.\n  class DynamicContext {\n  public:\n    DynamicContext(SymbolTable& symbol_table)\n        : symbol_table_(symbol_table), symbolic_pool_(symbol_table), dynamic_pool_(symbol_table) {}\n\n    /**\n     * Generates a StatName with mixed dynamic/symbolic components based on\n     * the string and the dynamic_map obtained from encodeSegments.\n     *\n     * @param name The string corresponding to the desired StatName.\n     * @param map a map indicating which spans of tokens in the stat-name are dynamic.\n     * @return the generated StatName, valid as long as the DynamicContext.\n     */\n    StatName makeDynamicStatName(const std::string& name, const DynamicsMap& map);\n\n  private:\n    SymbolTable& symbol_table_;\n    StatNamePool symbolic_pool_;\n    StatNameDynamicPool dynamic_pool_;\n    SymbolTable::StoragePtr storage_ptr_;\n  };\n\n  StatMerger(Stats::Store& target_store);\n  ~StatMerger();\n\n  /**\n   * Merge the values of stats_proto into stats_store. Counters are always\n   * straightforward addition, while gauges default to addition but have\n   * exceptions.\n   *\n   * @param counter_deltas map of counter changes from parent\n   * @param gauges map of gauge changes from parent\n   * @param dynamics information about which segments of the names are dynamic.\n   */\n  void mergeStats(const Protobuf::Map<std::string, uint64_t>& counter_deltas,\n                  const Protobuf::Map<std::string, uint64_t>& gauges,\n                  const DynamicsMap& dynamics = DynamicsMap());\n\n  /**\n   * Indicates that a gauge's value from the hot-restart parent should be\n   * retained, combining it with the child data. By default, data is transferred\n   * from parent gauges only during the hot-restart process, but the parent\n   * contribution is subtracted from the child when the parent terminates. This\n   * makes sense for gauges such as active connection counts, but is not\n   * appropriate for server.hot_restart_generation.\n   *\n   * This function must be called immediately prior to destruction of the\n   * StatMerger instance.\n   *\n   * @param gauge_name The gauge to be retained.\n   */\n  void retainParentGaugeValue(Stats::StatName gauge_name);\n\nprivate:\n  void mergeCounters(const Protobuf::Map<std::string, uint64_t>& counter_deltas,\n                     const DynamicsMap& dynamics_map);\n  void mergeGauges(const Protobuf::Map<std::string, uint64_t>& gauges,\n                   const DynamicsMap& dynamics_map);\n\n  StatNameHashSet parent_gauges_;\n  // A stats Scope for our in-the-merging-process counters to live in. Scopes conceptually hold\n  // shared_ptrs to the stats that live in them, with the question of which stats are living in a\n  // given scope determined by which stat names have been accessed via that scope. E.g., if you\n  // access a stat named \"some.shared\" directly through the ordinary store, and then access a\n  // stat named \"shared\" in a scope configured with the prefix \"some.\", there is now a single\n  // stat named some.shared pointed to by both. As another example, if you access the stat\n  // \"single\" in the \"some\" scope, there will be a stat named \"some.single\" pointed to by just\n  // that scope. Now, if you delete the scope, some.shared will stick around, but some.single\n  // will be destroyed.\n  //\n  // All of that is relevant here because it is used to get a certain desired behavior.\n  // Specifically, stats must be kept up to date with values from the parent throughout hot\n  // restart, but once the restart completes, they must be dropped without a trace if the child has\n  // not taken action (independent of the hot restart stat merging) that would lead to them getting\n  // created in the store. By storing these stats in a scope (with an empty prefix), we can\n  // preserve all stats throughout the hot restart. Then, when the restart completes, dropping\n  // the scope will drop exactly those stats whose names have not already been accessed through\n  // another store/scope.\n  ScopePtr temp_scope_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/stats_matcher_impl.cc",
    "content": "#include \"common/stats/stats_matcher_impl.h\"\n\n#include <regex>\n#include <string>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n// TODO(ambuc): Refactor this into common/matchers.cc, since StatsMatcher is really just a thin\n// wrapper around what might be called a StringMatcherList.\nStatsMatcherImpl::StatsMatcherImpl(const envoy::config::metrics::v3::StatsConfig& config) {\n  switch (config.stats_matcher().stats_matcher_case()) {\n  case envoy::config::metrics::v3::StatsMatcher::StatsMatcherCase::kRejectAll:\n    // In this scenario, there are no matchers to store.\n    is_inclusive_ = !config.stats_matcher().reject_all();\n    break;\n  case envoy::config::metrics::v3::StatsMatcher::StatsMatcherCase::kInclusionList:\n    // If we have an inclusion list, we are being default-exclusive.\n    for (const auto& stats_matcher : config.stats_matcher().inclusion_list().patterns()) {\n      matchers_.push_back(Matchers::StringMatcherImpl(stats_matcher));\n    }\n    is_inclusive_ = false;\n    break;\n  case envoy::config::metrics::v3::StatsMatcher::StatsMatcherCase::kExclusionList:\n    // If we have an exclusion list, we are being default-inclusive.\n    for (const auto& stats_matcher : config.stats_matcher().exclusion_list().patterns()) {\n      matchers_.push_back(Matchers::StringMatcherImpl(stats_matcher));\n    }\n    FALLTHRU;\n  default:\n    // No matcher was supplied, so we default to inclusion.\n    is_inclusive_ = true;\n    break;\n  }\n}\n\nbool StatsMatcherImpl::rejects(const std::string& name) const {\n  //\n  //  is_inclusive_ | match | return\n  // ---------------+-------+--------\n  //        T       |   T   |   T     Default-inclusive and matching an (exclusion) matcher, deny.\n  //        T       |   F   |   F     Otherwise, allow.\n  //        F       |   T   |   F     Default-exclusive and matching an (inclusion) matcher, allow.\n  //        F       |   F   |   T     Otherwise, deny.\n  //\n  // This is an XNOR, which can be evaluated by checking for equality.\n\n  return (is_inclusive_ == std::any_of(matchers_.begin(), matchers_.end(),\n                                       [&name](auto& matcher) { return matcher.match(name); }));\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/stats_matcher_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/stats/stats_matcher.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Supplies a stats matcher.\n */\nclass StatsMatcherImpl : public StatsMatcher {\npublic:\n  explicit StatsMatcherImpl(const envoy::config::metrics::v3::StatsConfig& config);\n\n  // Default constructor simply allows everything.\n  StatsMatcherImpl() = default;\n\n  // StatsMatcher\n  bool rejects(const std::string& name) const override;\n  bool acceptsAll() const override { return is_inclusive_ && matchers_.empty(); }\n  bool rejectsAll() const override { return !is_inclusive_ && matchers_.empty(); }\n\nprivate:\n  // Bool indicating whether or not the StatsMatcher is including or excluding stats by default. See\n  // StatsMatcherImpl::rejects() for much more detail.\n  bool is_inclusive_{true};\n\n  std::vector<Matchers::StringMatcherImpl> matchers_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/store_impl.h",
    "content": "#pragma once\n\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/store.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Implements common parts of the Store API needed by multiple derivations of Store.\n */\nclass StoreImpl : public Store {\npublic:\n  explicit StoreImpl(SymbolTable& symbol_table) : symbol_table_(symbol_table) {}\n\n  SymbolTable& symbolTable() override { return symbol_table_; }\n  const SymbolTable& constSymbolTable() const override { return symbol_table_; }\n\nprivate:\n  SymbolTable& symbol_table_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/symbol_table_impl.cc",
    "content": "#include \"common/stats/symbol_table_impl.h\"\n\n#include <algorithm>\n#include <iostream>\n#include <memory>\n#include <vector>\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n// Masks used for variable-length encoding of arbitrary-sized integers into a\n// uint8-array. The integers are typically small, so we try to store them in as\n// few bytes as possible. The bottom 7 bits hold values, and the top bit is used\n// to determine whether another byte is needed for more data.\nstatic constexpr uint32_t SpilloverMask = 0x80;\nstatic constexpr uint32_t Low7Bits = 0x7f;\n\n// When storing Symbol arrays, we disallow Symbol 0, which is the only Symbol\n// that will decode into uint8_t array starting (and ending) with {0}. Thus we\n// can use a leading 0 in the first byte to indicate that what follows is\n// literal char data, rather than symbol-table entries. Literal char data is\n// used for dynamically discovered stat-name tokens where you don't want to take\n// a symbol table lock, and would rather pay extra memory overhead to store the\n// tokens as fully elaborated strings.\nstatic constexpr Symbol FirstValidSymbol = 1;\nstatic constexpr uint8_t LiteralStringIndicator = 0;\n\nsize_t StatName::dataSize() const {\n  if (size_and_data_ == nullptr) {\n    return 0;\n  }\n  return SymbolTableImpl::Encoding::decodeNumber(size_and_data_).first;\n}\n\n#ifndef ENVOY_CONFIG_COVERAGE\nvoid StatName::debugPrint() {\n  // TODO(jmarantz): capture this functionality (always prints regardless of\n  // loglevel) in an ENVOY_LOG macro variant or similar, perhaps\n  // ENVOY_LOG_MISC(stderr, ...);\n  if (size_and_data_ == nullptr) {\n    std::cerr << \"Null StatName\" << std::endl;\n  } else {\n    const size_t nbytes = dataSize();\n    std::cerr << \"dataSize=\" << nbytes << \":\";\n    for (size_t i = 0; i < nbytes; ++i) {\n      std::cerr << \" \" << static_cast<uint64_t>(data()[i]);\n    }\n    const SymbolVec encoding = SymbolTableImpl::Encoding::decodeSymbols(data(), dataSize());\n    std::cerr << \", numSymbols=\" << encoding.size() << \":\";\n    for (Symbol symbol : encoding) {\n      std::cerr << \" \" << symbol;\n    }\n    std::cerr << std::endl;\n  }\n}\n#endif\n\nSymbolTableImpl::Encoding::~Encoding() {\n  // Verifies that moveToMemBlock() was called on this encoding. Failure\n  // to call moveToMemBlock() will result in leaks symbols.\n  ASSERT(mem_block_.capacity() == 0);\n}\n\nsize_t SymbolTableImpl::Encoding::encodingSizeBytes(uint64_t number) {\n  size_t num_bytes = 0;\n  do {\n    ++num_bytes;\n    number >>= 7;\n  } while (number != 0);\n  return num_bytes;\n}\n\nvoid SymbolTableImpl::Encoding::appendEncoding(uint64_t number,\n                                               MemBlockBuilder<uint8_t>& mem_block) {\n  // UTF-8-like encoding where a value 127 or less gets written as a single\n  // byte. For higher values we write the low-order 7 bits with a 1 in\n  // the high-order bit. Then we right-shift 7 bits and keep adding more bytes\n  // until we have consumed all the non-zero bits in symbol.\n  //\n  // When decoding, we stop consuming uint8_t when we see a uint8_t with\n  // high-order bit 0.\n  do {\n    if (number < (1 << 7)) {\n      mem_block.appendOne(number); // number <= 127 gets encoded in one byte.\n    } else {\n      mem_block.appendOne((number & Low7Bits) | SpilloverMask); // >= 128 need spillover bytes.\n    }\n    number >>= 7;\n  } while (number != 0);\n}\n\nvoid SymbolTableImpl::Encoding::addSymbols(const std::vector<Symbol>& symbols) {\n  ASSERT(data_bytes_required_ == 0);\n  for (Symbol symbol : symbols) {\n    data_bytes_required_ += encodingSizeBytes(symbol);\n  }\n  mem_block_.setCapacity(data_bytes_required_);\n  for (Symbol symbol : symbols) {\n    appendEncoding(symbol, mem_block_);\n  }\n}\n\nstd::pair<uint64_t, size_t> SymbolTableImpl::Encoding::decodeNumber(const uint8_t* encoding) {\n  uint64_t number = 0;\n  uint64_t uc = SpilloverMask;\n  const uint8_t* start = encoding;\n  for (uint32_t shift = 0; (uc & SpilloverMask) != 0; ++encoding, shift += 7) {\n    uc = static_cast<uint32_t>(*encoding);\n    number |= (uc & Low7Bits) << shift;\n  }\n  return std::make_pair(number, encoding - start);\n}\n\nSymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage array, size_t size) {\n  SymbolVec symbol_vec;\n  symbol_vec.reserve(size);\n  decodeTokens(\n      array, size, [&symbol_vec](Symbol symbol) { symbol_vec.push_back(symbol); },\n      [](absl::string_view) {});\n  return symbol_vec;\n}\n\nvoid SymbolTableImpl::Encoding::decodeTokens(\n    const SymbolTable::Storage array, size_t size,\n    const std::function<void(Symbol)>& symbol_token_fn,\n    const std::function<void(absl::string_view)>& string_view_token_fn) {\n  while (size > 0) {\n    if (*array == LiteralStringIndicator) {\n      // To avoid scanning memory to find the literal size during decode, we\n      // var-length encode the size of the literal string prior to the data.\n      ASSERT(size > 1);\n      ++array;\n      --size;\n      std::pair<uint64_t, size_t> length_consumed = decodeNumber(array);\n      uint64_t length = length_consumed.first;\n      array += length_consumed.second;\n      size -= length_consumed.second;\n      ASSERT(size >= length);\n      string_view_token_fn(absl::string_view(reinterpret_cast<const char*>(array), length));\n      size -= length;\n      array += length;\n    } else {\n      std::pair<uint64_t, size_t> symbol_consumed = decodeNumber(array);\n      symbol_token_fn(symbol_consumed.first);\n      size -= symbol_consumed.second;\n      array += symbol_consumed.second;\n    }\n  }\n}\n\nstd::vector<absl::string_view> SymbolTableImpl::decodeStrings(const SymbolTable::Storage array,\n                                                              size_t size) const {\n  std::vector<absl::string_view> strings;\n  Thread::LockGuard lock(lock_);\n  Encoding::decodeTokens(\n      array, size,\n      [this, &strings](Symbol symbol)\n          ABSL_NO_THREAD_SAFETY_ANALYSIS { strings.push_back(fromSymbol(symbol)); },\n      [&strings](absl::string_view str) { strings.push_back(str); });\n  return strings;\n}\n\nvoid SymbolTableImpl::Encoding::moveToMemBlock(MemBlockBuilder<uint8_t>& mem_block) {\n  appendEncoding(data_bytes_required_, mem_block);\n  mem_block.appendBlock(mem_block_);\n  mem_block_.reset(); // Logically transfer ownership, enabling empty assert on destruct.\n}\n\nvoid SymbolTableImpl::Encoding::appendToMemBlock(StatName stat_name,\n                                                 MemBlockBuilder<uint8_t>& mem_block) {\n  const uint8_t* data = stat_name.dataIncludingSize();\n  if (data == nullptr) {\n    mem_block.appendOne(0);\n  } else {\n    mem_block.appendData(absl::MakeSpan(data, stat_name.size()));\n  }\n}\n\nSymbolTableImpl::SymbolTableImpl()\n    // Have to be explicitly initialized, if we want to use the ABSL_GUARDED_BY macro.\n    : next_symbol_(FirstValidSymbol), monotonic_counter_(FirstValidSymbol) {}\n\nSymbolTableImpl::~SymbolTableImpl() {\n  // To avoid leaks into the symbol table, we expect all StatNames to be freed.\n  // Note: this could potentially be short-circuited if we decide a fast exit\n  // is needed in production. But it would be good to ensure clean up during\n  // tests.\n  ASSERT(numSymbols() == 0);\n}\n\n// TODO(ambuc): There is a possible performance optimization here for avoiding\n// the encoding of IPs / numbers if they appear in stat names. We don't want to\n// waste time symbolizing an integer as an integer, if we can help it.\nvoid SymbolTableImpl::addTokensToEncoding(const absl::string_view name, Encoding& encoding) {\n  if (name.empty()) {\n    return;\n  }\n\n  // We want to hold the lock for the minimum amount of time, so we do the\n  // string-splitting and prepare a temp vector of Symbol first.\n  const std::vector<absl::string_view> tokens = absl::StrSplit(name, '.');\n  std::vector<Symbol> symbols;\n  symbols.reserve(tokens.size());\n\n  // Now take the lock and populate the Symbol objects, which involves bumping\n  // ref-counts in this.\n  {\n    Thread::LockGuard lock(lock_);\n    recent_lookups_.lookup(name);\n    for (auto& token : tokens) {\n      // TODO(jmarantz): consider using StatNameDynamicStorage for tokens with\n      // length below some threshold, say 4 bytes. It might be preferable not to\n      // reserve Symbols for every 3 digit number found (for example) in ipv4\n      // addresses.\n      symbols.push_back(toSymbol(token));\n    }\n  }\n\n  // Now efficiently encode the array of 32-bit symbols into a uint8_t array.\n  encoding.addSymbols(symbols);\n}\n\nuint64_t SymbolTableImpl::numSymbols() const {\n  Thread::LockGuard lock(lock_);\n  ASSERT(encode_map_.size() == decode_map_.size());\n  return encode_map_.size();\n}\n\nstd::string SymbolTableImpl::toString(const StatName& stat_name) const {\n  return absl::StrJoin(decodeStrings(stat_name.data(), stat_name.dataSize()), \".\");\n}\n\nvoid SymbolTableImpl::callWithStringView(StatName stat_name,\n                                         const std::function<void(absl::string_view)>& fn) const {\n  fn(toString(stat_name));\n}\n\nvoid SymbolTableImpl::incRefCount(const StatName& stat_name) {\n  // Before taking the lock, decode the array of symbols from the SymbolTable::Storage.\n  const SymbolVec symbols = Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize());\n\n  Thread::LockGuard lock(lock_);\n  for (Symbol symbol : symbols) {\n    auto decode_search = decode_map_.find(symbol);\n    ASSERT(decode_search != decode_map_.end());\n\n    auto encode_search = encode_map_.find(decode_search->second->toStringView());\n    ASSERT(encode_search != encode_map_.end());\n\n    ++encode_search->second.ref_count_;\n  }\n}\n\nvoid SymbolTableImpl::free(const StatName& stat_name) {\n  // Before taking the lock, decode the array of symbols from the SymbolTable::Storage.\n  const SymbolVec symbols = Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize());\n\n  Thread::LockGuard lock(lock_);\n  for (Symbol symbol : symbols) {\n    auto decode_search = decode_map_.find(symbol);\n    ASSERT(decode_search != decode_map_.end());\n\n    auto encode_search = encode_map_.find(decode_search->second->toStringView());\n    ASSERT(encode_search != encode_map_.end());\n\n    // If that was the last remaining client usage of the symbol, erase the\n    // current mappings and add the now-unused symbol to the reuse pool.\n    //\n    // The \"if (--EXPR.ref_count_)\" pattern speeds up BM_CreateRace by 20% in\n    // symbol_table_speed_test.cc, relative to breaking out the decrement into a\n    // separate step, likely due to the non-trivial dereferences in EXPR.\n    if (--encode_search->second.ref_count_ == 0) {\n      decode_map_.erase(decode_search);\n      encode_map_.erase(encode_search);\n      pool_.push(symbol);\n    }\n  }\n}\n\nuint64_t SymbolTableImpl::getRecentLookups(const RecentLookupsFn& iter) const {\n  uint64_t total = 0;\n  absl::flat_hash_map<std::string, uint64_t> name_count_map;\n\n  // We don't want to hold lock_ while calling the iterator, but we need it to\n  // access recent_lookups_, so we buffer in name_count_map.\n  {\n    Thread::LockGuard lock(lock_);\n    recent_lookups_.forEach(\n        [&name_count_map](absl::string_view str, uint64_t count)\n            ABSL_NO_THREAD_SAFETY_ANALYSIS { name_count_map[std::string(str)] += count; });\n    total += recent_lookups_.total();\n  }\n\n  // Now we have the collated name-count map data: we need to vectorize and\n  // sort. We define the pair with the count first as std::pair::operator<\n  // prioritizes its first element over its second.\n  using LookupCount = std::pair<uint64_t, absl::string_view>;\n  std::vector<LookupCount> lookup_data;\n  lookup_data.reserve(name_count_map.size());\n  for (const auto& iter : name_count_map) {\n    lookup_data.emplace_back(LookupCount(iter.second, iter.first));\n  }\n  std::sort(lookup_data.begin(), lookup_data.end());\n  for (const LookupCount& lookup_count : lookup_data) {\n    iter(lookup_count.second, lookup_count.first);\n  }\n  return total;\n}\n\nDynamicSpans SymbolTableImpl::getDynamicSpans(StatName stat_name) const {\n  DynamicSpans dynamic_spans;\n\n  uint32_t index = 0;\n  auto record_dynamic = [&dynamic_spans, &index](absl::string_view str) {\n    DynamicSpan span;\n    span.first = index;\n    index += std::count(str.begin(), str.end(), '.');\n    span.second = index;\n    ++index;\n    dynamic_spans.push_back(span);\n  };\n\n  // Use decodeTokens to suss out which components of stat_name are\n  // symbolic vs dynamic. The lambda that takes a Symbol is called\n  // for symbolic components. The lambda called with a string_view\n  // is called for dynamic components.\n  //\n  // Note that with fake symbol tables, the Symbol lambda is called\n  // once for each character in the string, and no dynamics will\n  // be recorded.\n  Encoding::decodeTokens(\n      stat_name.data(), stat_name.dataSize(), [&index](Symbol) { ++index; }, record_dynamic);\n  return dynamic_spans;\n}\n\nvoid SymbolTableImpl::setRecentLookupCapacity(uint64_t capacity) {\n  Thread::LockGuard lock(lock_);\n  recent_lookups_.setCapacity(capacity);\n}\n\nvoid SymbolTableImpl::clearRecentLookups() {\n  Thread::LockGuard lock(lock_);\n  recent_lookups_.clear();\n}\n\nuint64_t SymbolTableImpl::recentLookupCapacity() const {\n  Thread::LockGuard lock(lock_);\n  return recent_lookups_.capacity();\n}\n\nStatNameSetPtr SymbolTableImpl::makeSet(absl::string_view name) {\n  // make_unique does not work with private ctor, even though SymbolTableImpl is a friend.\n  StatNameSetPtr stat_name_set(new StatNameSet(*this, name));\n  return stat_name_set;\n}\n\nSymbol SymbolTableImpl::toSymbol(absl::string_view sv) {\n  Symbol result;\n  auto encode_find = encode_map_.find(sv);\n  // If the string segment doesn't already exist,\n  if (encode_find == encode_map_.end()) {\n    // We create the actual string, place it in the decode_map_, and then insert\n    // a string_view pointing to it in the encode_map_. This allows us to only\n    // store the string once. We use unique_ptr so copies are not made as\n    // flat_hash_map moves values around.\n    InlineStringPtr str = InlineString::create(sv);\n    auto encode_insert = encode_map_.insert({str->toStringView(), SharedSymbol(next_symbol_)});\n    ASSERT(encode_insert.second);\n    auto decode_insert = decode_map_.insert({next_symbol_, std::move(str)});\n    ASSERT(decode_insert.second);\n\n    result = next_symbol_;\n    newSymbol();\n  } else {\n    // If the insertion didn't take place, return the actual value at that location and up the\n    // refcount at that location\n    result = encode_find->second.symbol_;\n    ++(encode_find->second.ref_count_);\n  }\n  return result;\n}\n\nabsl::string_view SymbolTableImpl::fromSymbol(const Symbol symbol) const\n    ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {\n  auto search = decode_map_.find(symbol);\n  RELEASE_ASSERT(search != decode_map_.end(), \"no such symbol\");\n  return search->second->toStringView();\n}\n\nvoid SymbolTableImpl::newSymbol() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {\n  if (pool_.empty()) {\n    next_symbol_ = ++monotonic_counter_;\n  } else {\n    next_symbol_ = pool_.top();\n    pool_.pop();\n  }\n  // This should catch integer overflow for the new symbol.\n  ASSERT(monotonic_counter_ != 0);\n}\n\nbool SymbolTableImpl::lessThan(const StatName& a, const StatName& b) const {\n  // Constructing two temp vectors during lessThan is not strictly necessary.\n  // If this becomes a performance bottleneck (e.g. during sorting), we could\n  // provide an iterator-like interface for incrementally comparing the tokens\n  // without allocating memory.\n  const std::vector<absl::string_view> av = decodeStrings(a.data(), a.dataSize());\n  const std::vector<absl::string_view> bv = decodeStrings(b.data(), b.dataSize());\n\n  for (uint64_t i = 0, n = std::min(av.size(), bv.size()); i < n; ++i) {\n    if (av[i] != bv[i]) {\n      const bool ret = av[i] < bv[i];\n      return ret;\n    }\n  }\n  return av.size() < bv.size();\n}\n\n#ifndef ENVOY_CONFIG_COVERAGE\nvoid SymbolTableImpl::debugPrint() const {\n  Thread::LockGuard lock(lock_);\n  std::vector<Symbol> symbols;\n  for (const auto& p : decode_map_) {\n    symbols.push_back(p.first);\n  }\n  std::sort(symbols.begin(), symbols.end());\n  for (Symbol symbol : symbols) {\n    const InlineString& token = *decode_map_.find(symbol)->second;\n    const SharedSymbol& shared_symbol = encode_map_.find(token.toStringView())->second;\n    ENVOY_LOG_MISC(info, \"{}: '{}' ({})\", symbol, token.toStringView(), shared_symbol.ref_count_);\n  }\n}\n#endif\n\nSymbolTable::StoragePtr SymbolTableImpl::encode(absl::string_view name) {\n  name = StringUtil::removeTrailingCharacters(name, '.');\n  Encoding encoding;\n  addTokensToEncoding(name, encoding);\n  MemBlockBuilder<uint8_t> mem_block(Encoding::totalSizeBytes(encoding.bytesRequired()));\n  encoding.moveToMemBlock(mem_block);\n  return mem_block.release();\n}\n\nStatNameStorage::StatNameStorage(absl::string_view name, SymbolTable& table)\n    : StatNameStorageBase(table.encode(name)) {}\n\nStatNameStorage::StatNameStorage(StatName src, SymbolTable& table) {\n  const size_t size = src.size();\n  MemBlockBuilder<uint8_t> storage(size); // Note: MemBlockBuilder takes uint64_t.\n  src.copyToMemBlock(storage);\n  setBytes(storage.release());\n  table.incRefCount(statName());\n}\n\nSymbolTable::StoragePtr SymbolTableImpl::makeDynamicStorage(absl::string_view name) {\n  name = StringUtil::removeTrailingCharacters(name, '.');\n\n  // For all StatName objects, we first have the total number of bytes in the\n  // representation. But for inlined dynamic string StatName variants, we must\n  // store the length of the payload separately, so that if this token gets\n  // joined with others, we'll know much space it consumes until the next token.\n  // So the layout is\n  //   [ length-of-whole-StatName, LiteralStringIndicator, length-of-name, name ]\n  // So we need to figure out how many bytes we need to represent length-of-name\n  // and name.\n\n  // payload_bytes is the total number of bytes needed to represent the\n  // characters in name, plus their encoded size, plus the literal indicator.\n  const size_t payload_bytes = SymbolTableImpl::Encoding::totalSizeBytes(name.size()) + 1;\n\n  // total_bytes includes the payload_bytes, plus the LiteralStringIndicator, and\n  // the length of those.\n  const size_t total_bytes = SymbolTableImpl::Encoding::totalSizeBytes(payload_bytes);\n  MemBlockBuilder<uint8_t> mem_block(total_bytes);\n\n  SymbolTableImpl::Encoding::appendEncoding(payload_bytes, mem_block);\n  mem_block.appendOne(LiteralStringIndicator);\n  SymbolTableImpl::Encoding::appendEncoding(name.size(), mem_block);\n  mem_block.appendData(absl::MakeSpan(reinterpret_cast<const uint8_t*>(name.data()), name.size()));\n  ASSERT(mem_block.capacityRemaining() == 0);\n  return mem_block.release();\n}\n\nStatNameStorage::~StatNameStorage() {\n  // StatNameStorage is not fully RAII: you must call free(SymbolTable&) to\n  // decrement the reference counts held by the SymbolTable on behalf of\n  // this StatName. This saves 8 bytes of storage per stat, relative to\n  // holding a SymbolTable& in each StatNameStorage object.\n  ASSERT(bytes() == nullptr);\n}\n\nvoid StatNameStorage::free(SymbolTable& table) {\n  table.free(statName());\n  clear();\n}\n\nvoid StatNamePool::clear() {\n  for (StatNameStorage& storage : storage_vector_) {\n    storage.free(symbol_table_);\n  }\n  storage_vector_.clear();\n}\n\nconst uint8_t* StatNamePool::addReturningStorage(absl::string_view str) {\n  storage_vector_.push_back(Stats::StatNameStorage(str, symbol_table_));\n  return storage_vector_.back().bytes();\n}\n\nStatName StatNamePool::add(absl::string_view str) { return StatName(addReturningStorage(str)); }\n\nStatName StatNameDynamicPool::add(absl::string_view str) {\n  storage_vector_.push_back(Stats::StatNameDynamicStorage(str, symbol_table_));\n  return StatName(storage_vector_.back().bytes());\n}\n\nStatNameStorageSet::~StatNameStorageSet() {\n  // free() must be called before destructing StatNameStorageSet to decrement\n  // references to all symbols.\n  ASSERT(hash_set_.empty());\n}\n\nvoid StatNameStorageSet::free(SymbolTable& symbol_table) {\n  // We must free() all symbols referenced in the set, otherwise the symbols\n  // will leak when the flat_hash_map superclass is destructed. They cannot\n  // self-destruct without an explicit free() as each individual StatNameStorage\n  // object does not have a reference to the symbol table, which would waste 8\n  // bytes per stat-name. The easiest way to safely free all the contents of the\n  // symbol table set is to use flat_hash_map::extract(), which removes and\n  // returns an element from the set without destructing the element\n  // immediately. This gives us a chance to call free() on each one before they\n  // are destroyed.\n  //\n  // There's a performance risk here, if removing elements via\n  // flat_hash_set::begin() is inefficient to use in a loop like this. One can\n  // imagine a hash-table implementation where the performance of this\n  // usage-model would be poor. However, tests with 100k elements appeared to\n  // run quickly when compiled for optimization, so at present this is not a\n  // performance issue.\n\n  while (!hash_set_.empty()) {\n    auto storage = hash_set_.extract(hash_set_.begin());\n    storage.value().free(symbol_table);\n  }\n}\n\nSymbolTable::StoragePtr SymbolTableImpl::join(const StatNameVec& stat_names) const {\n  size_t num_bytes = 0;\n  for (StatName stat_name : stat_names) {\n    if (!stat_name.empty()) {\n      num_bytes += stat_name.dataSize();\n    }\n  }\n  MemBlockBuilder<uint8_t> mem_block(Encoding::totalSizeBytes(num_bytes));\n  Encoding::appendEncoding(num_bytes, mem_block);\n  for (StatName stat_name : stat_names) {\n    stat_name.appendDataToMemBlock(mem_block);\n  }\n  ASSERT(mem_block.capacityRemaining() == 0);\n  return mem_block.release();\n}\n\nvoid SymbolTableImpl::populateList(const StatName* names, uint32_t num_names, StatNameList& list) {\n  RELEASE_ASSERT(num_names < 256, \"Maximum number elements in a StatNameList exceeded\");\n\n  // First encode all the names.\n  size_t total_size_bytes = 1; /* one byte for holding the number of names */\n\n  for (uint32_t i = 0; i < num_names; ++i) {\n    total_size_bytes += names[i].size();\n  }\n\n  // Now allocate the exact number of bytes required and move the encodings\n  // into storage.\n  MemBlockBuilder<uint8_t> mem_block(total_size_bytes);\n  mem_block.appendOne(num_names);\n  for (uint32_t i = 0; i < num_names; ++i) {\n    const StatName stat_name = names[i];\n    Encoding::appendToMemBlock(stat_name, mem_block);\n    incRefCount(stat_name);\n  }\n\n  // This assertion double-checks the arithmetic where we computed\n  // total_size_bytes. After appending all the encoded data into the\n  // allocated byte array, we should have exhausted all the memory\n  // we though we needed.\n  ASSERT(mem_block.capacityRemaining() == 0);\n  list.moveStorageIntoList(mem_block.release());\n}\n\nStatNameList::~StatNameList() { ASSERT(!populated()); }\n\nvoid StatNameList::iterate(const std::function<bool(StatName)>& f) const {\n  const uint8_t* p = &storage_[0];\n  const uint32_t num_elements = *p++;\n  for (uint32_t i = 0; i < num_elements; ++i) {\n    const StatName stat_name(p);\n    p += stat_name.size();\n    if (!f(stat_name)) {\n      break;\n    }\n  }\n}\n\nvoid StatNameList::clear(SymbolTable& symbol_table) {\n  iterate([&symbol_table](StatName stat_name) -> bool {\n    symbol_table.free(stat_name);\n    return true;\n  });\n  storage_.reset();\n}\n\nStatNameSet::StatNameSet(SymbolTable& symbol_table, absl::string_view name)\n    : name_(std::string(name)), symbol_table_(symbol_table), pool_(symbol_table) {\n  builtin_stat_names_[\"\"] = StatName();\n}\n\nvoid StatNameSet::rememberBuiltin(absl::string_view str) {\n  StatName stat_name;\n  {\n    absl::MutexLock lock(&mutex_);\n    stat_name = pool_.add(str);\n  }\n  builtin_stat_names_[str] = stat_name;\n}\n\nStatName StatNameSet::getBuiltin(absl::string_view token, StatName fallback) const {\n  // If token was recorded as a built-in during initialization, we can\n  // service this request lock-free.\n  const auto iter = builtin_stat_names_.find(token);\n  if (iter != builtin_stat_names_.end()) {\n    return iter->second;\n  }\n  return fallback;\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/symbol_table_impl.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <cstring>\n#include <memory>\n#include <stack>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/stats/symbol_table.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/hash.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/mem_block_builder.h\"\n#include \"common/common/non_copyable.h\"\n#include \"common/common/thread.h\"\n#include \"common/common/utility.h\"\n#include \"common/stats/recent_lookups.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_split.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/** A Symbol represents a string-token with a small index. */\nusing Symbol = uint32_t;\n\n/** Transient representations of a vector of 32-bit symbols */\nusing SymbolVec = std::vector<Symbol>;\n\n/**\n * SymbolTableImpl manages a namespace optimized for stats, which are typically\n * composed of arrays of \".\"-separated tokens, with a significant overlap\n * between the tokens. Each token is mapped to a Symbol (uint32_t) and\n * reference-counted so that no-longer-used symbols can be reclaimed.\n *\n * We use a uint8_t array to encode a \".\"-deliminated stat-name into arrays of\n * integer symbol IDs in order to conserve space, as in practice the\n * majority of token instances in stat names draw from a fairly small set of\n * common names, typically less than 100. The format is somewhat similar to\n * UTF-8, with a variable-length array of uint8_t. See the implementation for\n * details.\n *\n * StatNameStorage can be used to manage memory for the byte-encoding. Not all\n * StatNames are backed by StatNameStorage -- the storage may be inlined into\n * another object such as HeapStatData. StaNameStorage is not fully RAII --\n * instead the owner must call free(SymbolTable&) explicitly before\n * StatNameStorage is destructed. This saves 8 bytes of storage per stat,\n * relative to holding a SymbolTable& in each StatNameStorage object.\n *\n * A StatName is a copyable and assignable reference to this storage. It does\n * not own the storage or keep it alive via reference counts; the owner must\n * ensure the backing store lives as long as the StatName.\n *\n * The underlying Symbol / SymbolVec data structures are private to the\n * impl. One side effect of the non-monotonically-increasing symbol counter is\n * that if a string is encoded, the resulting stat is destroyed, and then that\n * same string is re-encoded, it may or may not encode to the same underlying\n * symbol.\n */\nclass SymbolTableImpl : public SymbolTable {\npublic:\n  /**\n   * Intermediate representation for a stat-name. This helps store multiple\n   * names in a single packed allocation. First we encode each desired name,\n   * then sum their sizes for the single packed allocation. This is used to\n   * store MetricImpl's tags and tagExtractedName.\n   */\n  class Encoding {\n  public:\n    /**\n     * Before destructing SymbolEncoding, you must call moveToMemBlock. This\n     * transfers ownership, and in particular, the responsibility to call\n     * SymbolTable::clear() on all referenced symbols. If we ever wanted to be\n     * able to destruct a SymbolEncoding without transferring it we could add a\n     * clear(SymbolTable&) method.\n     */\n    ~Encoding();\n\n    /**\n     * Encodes a token into the vec.\n     *\n     * @param symbol the symbol to encode.\n     */\n    void addSymbols(const SymbolVec& symbols);\n\n    /**\n     * Decodes a uint8_t array into a SymbolVec.\n     */\n    static SymbolVec decodeSymbols(const SymbolTable::Storage array, size_t size);\n\n    /**\n     * Decodes a uint8_t array into a sequence of symbols and literal strings.\n     * There are distinct lambdas for these two options. Calls to these lambdas\n     * will be interleaved based on the sequencing of literal strings and\n     * symbols held in the data.\n     *\n     * @param array the StatName encoded as a uint8_t array.\n     * @param size the size of the array in bytes.\n     * @param symbol_token_fn a function to be called whenever a symbol is encountered in the array.\n     * @param string_view_token_fn a function to be called whenever a string literal is encountered.\n     */\n    static void decodeTokens(const SymbolTable::Storage array, size_t size,\n                             const std::function<void(Symbol)>& symbol_token_fn,\n                             const std::function<void(absl::string_view)>& string_view_token_fn);\n\n    /**\n     * Returns the number of bytes required to represent StatName as a uint8_t\n     * array, including the encoded size.\n     */\n    size_t bytesRequired() const {\n      return data_bytes_required_ + encodingSizeBytes(data_bytes_required_);\n    }\n\n    /**\n     * Moves the contents of the vector into an allocated array. The array\n     * must have been allocated with bytesRequired() bytes.\n     *\n     * @param mem_block_builder memory block to receive the encoded bytes.\n     */\n    void moveToMemBlock(MemBlockBuilder<uint8_t>& mem_block_builder);\n\n    /**\n     * @param number A number to encode in a variable length byte-array.\n     * @return The number of bytes it would take to encode the number.\n     */\n    static size_t encodingSizeBytes(uint64_t number);\n\n    /**\n     * @param num_data_bytes The number of bytes in a data-block.\n     * @return The total number of bytes required for the data-block and its encoded size.\n     */\n    static size_t totalSizeBytes(size_t num_data_bytes) {\n      return encodingSizeBytes(num_data_bytes) + num_data_bytes;\n    }\n\n    /**\n     * Saves the specified number into the byte array, returning the next byte.\n     * There is no guarantee that bytes will be aligned, so we can't cast to a\n     * uint16_t* and assign, but must individually copy the bytes.\n     *\n     * Requires that the buffer be sized to accommodate encodingSizeBytes(number).\n     *\n     * @param number the number to write.\n     * @param mem_block the memory into which to append the number.\n     */\n    static void appendEncoding(uint64_t number, MemBlockBuilder<uint8_t>& mem_block);\n\n    /**\n     * Appends stat_name's bytes into mem_block, which must have been allocated to\n     * allow for stat_name.size() bytes.\n     *\n     * @param stat_name the stat_name to append.\n     * @param mem_block the block of memory to append to.\n     */\n    static void appendToMemBlock(StatName stat_name, MemBlockBuilder<uint8_t>& mem_block);\n\n    /**\n     * Decodes a byte-array containing a variable-length number.\n     *\n     * @param The encoded byte array, written previously by appendEncoding.\n     * @return A pair containing the decoded number, and the number of bytes consumed from encoding.\n     */\n    static std::pair<uint64_t, size_t> decodeNumber(const uint8_t* encoding);\n\n  private:\n    size_t data_bytes_required_{0};\n    MemBlockBuilder<uint8_t> mem_block_;\n  };\n\n  SymbolTableImpl();\n  ~SymbolTableImpl() override;\n\n  // SymbolTable\n  std::string toString(const StatName& stat_name) const override;\n  uint64_t numSymbols() const override;\n  bool lessThan(const StatName& a, const StatName& b) const override;\n  void free(const StatName& stat_name) override;\n  void incRefCount(const StatName& stat_name) override;\n  StoragePtr join(const StatNameVec& stat_names) const override;\n  void populateList(const StatName* names, uint32_t num_names, StatNameList& list) override;\n  StoragePtr encode(absl::string_view name) override;\n  StoragePtr makeDynamicStorage(absl::string_view name) override;\n  void callWithStringView(StatName stat_name,\n                          const std::function<void(absl::string_view)>& fn) const override;\n\n#ifndef ENVOY_CONFIG_COVERAGE\n  void debugPrint() const override;\n#endif\n\n  StatNameSetPtr makeSet(absl::string_view name) override;\n  uint64_t getRecentLookups(const RecentLookupsFn&) const override;\n  void clearRecentLookups() override;\n  void setRecentLookupCapacity(uint64_t capacity) override;\n  uint64_t recentLookupCapacity() const override;\n  DynamicSpans getDynamicSpans(StatName stat_name) const override;\n\nprivate:\n  friend class StatName;\n  friend class StatNameTest;\n  friend class StatNameDeathTest;\n\n  struct SharedSymbol {\n    SharedSymbol(Symbol symbol) : symbol_(symbol), ref_count_(1) {}\n\n    Symbol symbol_;\n    uint32_t ref_count_;\n  };\n\n  // This must be held during both encode() and free().\n  mutable Thread::MutexBasicLockable lock_;\n\n  /**\n   * Decodes a uint8_t array into an array of period-delimited strings. Note\n   * that some of the strings may have periods in them, in the case where\n   * StatNameDynamicStorage was used.\n   *\n   * If decoding fails on any part of the encoding, we RELEASE_ASSERT and crash,\n   * since this should never happen, and we don't want to continue running with\n   * corrupt stat names.\n   *\n   * @param array the uint8_t array of encoded symbols and dynamic strings.\n   * @param size the size of the array in bytes.\n   * @return std::string the retrieved stat name.\n   */\n  std::vector<absl::string_view> decodeStrings(const Storage array, size_t size) const;\n\n  /**\n   * Convenience function for encode(), symbolizing one string segment at a time.\n   *\n   * @param sv the individual string to be encoded as a symbol.\n   * @return Symbol the encoded string.\n   */\n  Symbol toSymbol(absl::string_view sv) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);\n\n  /**\n   * Convenience function for decode(), decoding one symbol at a time.\n   *\n   * @param symbol the individual symbol to be decoded.\n   * @return absl::string_view the decoded string.\n   */\n  absl::string_view fromSymbol(Symbol symbol) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);\n\n  /**\n   * Stages a new symbol for use. To be called after a successful insertion.\n   */\n  void newSymbol();\n\n  /**\n   * Tokenizes name, finds or allocates symbols for each token, and adds them\n   * to encoding.\n   *\n   * @param name The name to tokenize.\n   * @param encoding The encoding to write to.\n   */\n  void addTokensToEncoding(absl::string_view name, Encoding& encoding);\n\n  Symbol monotonicCounter() {\n    Thread::LockGuard lock(lock_);\n    return monotonic_counter_;\n  }\n\n  // Stores the symbol to be used at next insertion. This should exist ahead of insertion time so\n  // that if insertion succeeds, the value written is the correct one.\n  Symbol next_symbol_ ABSL_GUARDED_BY(lock_);\n\n  // If the free pool is exhausted, we monotonically increase this counter.\n  Symbol monotonic_counter_;\n\n  // Bitmap implementation.\n  // The encode map stores both the symbol and the ref count of that symbol.\n  // Using absl::string_view lets us only store the complete string once, in the decode map.\n  using EncodeMap = absl::flat_hash_map<absl::string_view, SharedSymbol>;\n  using DecodeMap = absl::flat_hash_map<Symbol, InlineStringPtr>;\n  EncodeMap encode_map_ ABSL_GUARDED_BY(lock_);\n  DecodeMap decode_map_ ABSL_GUARDED_BY(lock_);\n\n  // Free pool of symbols for re-use.\n  // TODO(ambuc): There might be an optimization here relating to storing ranges of freed symbols\n  // using an Envoy::IntervalSet.\n  std::stack<Symbol> pool_ ABSL_GUARDED_BY(lock_);\n  RecentLookups recent_lookups_ ABSL_GUARDED_BY(lock_);\n};\n\n// Base class for holding the backing-storing for a StatName. The two derived\n// classes, StatNameStorage and StatNameDynamicStorage, share a need to hold an\n// array of bytes, but use different representations.\nclass StatNameStorageBase {\npublic:\n  StatNameStorageBase(SymbolTable::StoragePtr&& bytes) : bytes_(std::move(bytes)) {}\n  StatNameStorageBase() = default;\n\n  /**\n   * @return a reference to the owned storage.\n   */\n  inline StatName statName() const;\n\n  /**\n   * @return the encoded data as a const pointer.\n   */\n  const uint8_t* bytes() const { return bytes_.get(); }\n\nprotected:\n  void setBytes(SymbolTable::StoragePtr&& bytes) { bytes_ = std::move(bytes); }\n  void clear() { bytes_.reset(); }\n\nprivate:\n  SymbolTable::StoragePtr bytes_;\n};\n\n/**\n * Holds backing storage for a StatName. Usage of this is not required, as some\n * applications may want to hold multiple StatName objects in one contiguous\n * uint8_t array, or embed the characters directly in another structure.\n *\n * This is intended for embedding in other data structures that have access\n * to a SymbolTable. StatNameStorage::free(symbol_table) must be called prior\n * to allowing the StatNameStorage object to be destructed, otherwise an assert\n * will fire to guard against symbol-table leaks.\n *\n * Thus this class is inconvenient to directly use as temp storage for building\n * a StatName from a string. Instead it should be used via StatNameManagedStorage.\n */\nclass StatNameStorage : public StatNameStorageBase {\npublic:\n  // Basic constructor for when you have a name as a string, and need to\n  // generate symbols for it.\n  StatNameStorage(absl::string_view name, SymbolTable& table);\n\n  // Move constructor; needed for using StatNameStorage as an\n  // absl::flat_hash_map value.\n  StatNameStorage(StatNameStorage&& src) noexcept : StatNameStorageBase(std::move(src)) {}\n\n  // Obtains new backing storage for an already existing StatName. Used to\n  // record a computed StatName held in a temp into a more persistent data\n  // structure.\n  StatNameStorage(StatName src, SymbolTable& table);\n\n  /**\n   * Before allowing a StatNameStorage to be destroyed, you must call free()\n   * on it, to drop the references to the symbols, allowing the SymbolTable\n   * to shrink.\n   */\n  ~StatNameStorage();\n\n  /**\n   * Decrements the reference counts in the SymbolTable.\n   *\n   * @param table the symbol table.\n   */\n  void free(SymbolTable& table);\n};\n\n/**\n * Efficiently represents a stat name using a variable-length array of uint8_t.\n * This class does not own the backing store for this array; the backing-store\n * can be held in StatNameStorage, or it can be packed more tightly into another\n * object.\n *\n * When Envoy is configured with a large numbers of clusters, there are a huge\n * number of StatNames, so avoiding extra per-stat pointers has a significant\n * memory impact.\n */\nclass StatName {\npublic:\n  // Constructs a StatName object directly referencing the storage of another\n  // StatName.\n  explicit StatName(const SymbolTable::Storage size_and_data) : size_and_data_(size_and_data) {}\n\n  // Constructs an empty StatName object.\n  StatName() = default;\n\n  /**\n   * Defines default hash function so StatName can be used as a key in an absl\n   * hash-table without specifying a functor. See\n   * https://abseil.io/docs/cpp/guides/hash for details.\n   */\n  template <typename H> friend H AbslHashValue(H h, StatName stat_name) {\n    if (stat_name.empty()) {\n      return H::combine(std::move(h), absl::string_view());\n    }\n\n    return H::combine(std::move(h), stat_name.dataAsStringView());\n  }\n\n  /**\n   * Note that this hash function will return a different hash than that of\n   * the elaborated string.\n   *\n   * @return uint64_t a hash of the underlying representation.\n   */\n  uint64_t hash() const { return absl::Hash<StatName>()(*this); }\n\n  bool operator==(const StatName& rhs) const {\n    return dataAsStringView() == rhs.dataAsStringView();\n  }\n  bool operator!=(const StatName& rhs) const { return !(*this == rhs); }\n\n  /**\n   * @return size_t the number of bytes in the symbol array, excluding the\n   *                overhead for the size itself.\n   */\n  size_t dataSize() const;\n\n  /**\n   * @return size_t the number of bytes in the symbol array, including the\n   *                  overhead for the size itself.\n   */\n  size_t size() const { return SymbolTableImpl::Encoding::totalSizeBytes(dataSize()); }\n\n  /**\n   * Copies the entire StatName representation into a MemBlockBuilder, including\n   * the length metadata at the beginning. The MemBlockBuilder must not have\n   * any other data in it.\n   *\n   * @param mem_block_builder the builder to receive the storage.\n   */\n  void copyToMemBlock(MemBlockBuilder<uint8_t>& mem_block_builder) {\n    ASSERT(mem_block_builder.size() == 0);\n    mem_block_builder.appendData(absl::MakeSpan(size_and_data_, size()));\n  }\n\n  /**\n   * Appends the data portion of the StatName representation into a\n   * MemBlockBuilder, excluding the length metadata. This is appropriate for\n   * join(), where several stat-names are combined, and we only need the\n   * aggregated length metadata.\n   *\n   * @param mem_block_builder the builder to receive the storage.\n   */\n  void appendDataToMemBlock(MemBlockBuilder<uint8_t>& storage) {\n    storage.appendData(absl::MakeSpan(data(), dataSize()));\n  }\n\n#ifndef ENVOY_CONFIG_COVERAGE\n  void debugPrint();\n#endif\n\n  /**\n   * @return A pointer to the first byte of data (skipping over size bytes).\n   */\n  const uint8_t* data() const {\n    if (size_and_data_ == nullptr) {\n      return nullptr;\n    }\n    return size_and_data_ + SymbolTableImpl::Encoding::encodingSizeBytes(dataSize());\n  }\n\n  const uint8_t* dataIncludingSize() const { return size_and_data_; }\n\n  /**\n   * @return whether this is empty.\n   */\n  bool empty() const { return size_and_data_ == nullptr || dataSize() == 0; }\n\nprivate:\n  /**\n   * Casts the raw data as a string_view. Note that this string_view will not\n   * be in human-readable form, but it will be compatible with a string-view\n   * hasher and comparator.\n   */\n  absl::string_view dataAsStringView() const {\n    return {reinterpret_cast<const char*>(data()),\n            static_cast<absl::string_view::size_type>(dataSize())};\n  }\n\n  const uint8_t* size_and_data_{nullptr};\n};\n\nStatName StatNameStorageBase::statName() const { return StatName(bytes_.get()); }\n\n/**\n * Contains the backing store for a StatName and enough context so it can\n * self-delete through RAII. This works by augmenting StatNameStorage with a\n * reference to the SymbolTable&, so it has an extra 8 bytes of footprint. It\n * is intended to be used in cases where simplicity of implementation is more\n * important than byte-savings, for example:\n *   - outside the stats system\n *   - in tests\n *   - as a scoped temp in a function\n * Due to the extra 8 bytes per instance, scalability should be taken into\n * account before using this as (say) a value or key in a map. In those\n * scenarios, it would be better to store the SymbolTable reference once\n * for the entire map.\n *\n * In the stat structures, we generally use StatNameStorage to avoid the\n * per-stat overhead.\n */\nclass StatNameManagedStorage : public StatNameStorage {\npublic:\n  // Basic constructor for when you have a name as a string, and need to\n  // generate symbols for it.\n  StatNameManagedStorage(absl::string_view name, SymbolTable& table)\n      : StatNameStorage(name, table), symbol_table_(table) {}\n  StatNameManagedStorage(StatNameManagedStorage&& src) noexcept\n      : StatNameStorage(std::move(src)), symbol_table_(src.symbol_table_) {}\n\n  ~StatNameManagedStorage() { free(symbol_table_); }\n\nprivate:\n  SymbolTable& symbol_table_;\n};\n\n/**\n * Holds backing-store for a dynamic stat, where are no global locks needed\n * to create a StatName from a string, but there is no sharing of token data\n * between names, so there may be more memory consumed.\n */\nclass StatNameDynamicStorage : public StatNameStorageBase {\npublic:\n  // Basic constructor based on a name. Note the table is used for\n  // a virtual-function call to encode the name, but no locks are taken\n  // in either implementation of the SymbolTable api.\n  StatNameDynamicStorage(absl::string_view name, SymbolTable& table)\n      : StatNameStorageBase(table.makeDynamicStorage(name)) {}\n  // Move constructor.\n  StatNameDynamicStorage(StatNameDynamicStorage&& src) noexcept\n      : StatNameStorageBase(std::move(src)) {}\n};\n\n/**\n * Maintains storage for a collection of StatName objects. Like\n * StatNameManagedStorage, this has an RAII usage model, taking\n * care of decrementing ref-counts in the SymbolTable for all\n * contained StatNames on destruction or on clear();\n *\n * Example usage:\n *   StatNamePool pool(symbol_table);\n *   StatName name1 = pool.add(\"name1\");\n *   StatName name2 = pool.add(\"name2\");\n *   const uint8_t* storage = pool.addReturningStorage(\"name3\");\n *   StatName name3(storage);\n */\nclass StatNamePool {\npublic:\n  explicit StatNamePool(SymbolTable& symbol_table) : symbol_table_(symbol_table) {}\n  ~StatNamePool() { clear(); }\n\n  /**\n   * Removes all StatNames from the pool.\n   */\n  void clear();\n\n  /**\n   * @param name the name to add the container.\n   * @return the StatName held in the container for this name.\n   */\n  StatName add(absl::string_view name);\n\n  /**\n   * Does essentially the same thing as add(), but returns the storage as a\n   * pointer which can later be used to create a StatName. This can be used\n   * to accumulate a vector of uint8_t* which can later be used to create\n   * StatName objects on demand.\n   *\n   * The use-case for this is in source/common/http/codes.cc, where we have a\n   * fixed sized array of atomic pointers, indexed by HTTP code. This API\n   * enables storing the allocated stat-name in that array of atomics, which\n   * enables content-avoidance when finding StatNames for frequently used HTTP\n   * codes.\n   *\n   * @param name the name to add the container.\n   * @return a pointer to the bytes held in the container for this name, suitable for\n   *         using to construct a StatName.\n   */\n  const uint8_t* addReturningStorage(absl::string_view name);\n\nprivate:\n  // We keep the stat names in a vector of StatNameStorage, storing the\n  // SymbolTable reference separately. This saves 8 bytes per StatName,\n  // at the cost of having a destructor that calls clear().\n  SymbolTable& symbol_table_;\n  std::vector<StatNameStorage> storage_vector_;\n};\n\n/**\n * Maintains storage for a collection of StatName objects constructed from\n * dynamically discovered strings. Like StatNameDynamicStorage, this has an RAII\n * usage model. Creating StatNames with this interface do not incur a\n * SymbolTable lock, but tokens are not shared across StatNames.\n *\n * The SymbolTable is required as a constructor argument to assist in encoding\n * the stat-names, which differs between FakeSymbolTableImpl and SymbolTableImpl.\n *\n * Example usage:\n *   StatNameDynamicPool pool(symbol_table);\n *   StatName name1 = pool.add(\"name1\");\n *   StatName name2 = pool.add(\"name2\");\n *\n * Note; StatNameDynamicPool::add(\"foo\") != StatNamePool::add(\"foo\"), even\n * though their string representations are identical. They also will not match\n * in map lookups. Tests for StatName with dynamic components must therefore\n * be looked up by string, via Stats::TestUtil::TestStore.\n */\nclass StatNameDynamicPool {\npublic:\n  explicit StatNameDynamicPool(SymbolTable& symbol_table) : symbol_table_(symbol_table) {}\n\n  /**\n   * @param name the name to add the container.\n   * @return the StatName held in the container for this name.\n   */\n  StatName add(absl::string_view name);\n\nprivate:\n  // We keep the stat names in a vector of StatNameStorage, storing the\n  // SymbolTable reference separately. This saves 8 bytes per StatName,\n  // at the cost of having a destructor that calls clear().\n  SymbolTable& symbol_table_;\n  std::vector<StatNameDynamicStorage> storage_vector_;\n};\n\n// Represents an ordered container of StatNames. The encoding for each StatName\n// is byte-packed together, so this carries less overhead than allocating the\n// storage separately. The trade-off is there is no random access; you can only\n// iterate through the StatNames.\n//\n// The maximum size of the list is 255 elements, so the length can fit in a\n// byte. It would not be difficult to increase this, but there does not appear\n// to be a current need.\nclass StatNameList {\npublic:\n  ~StatNameList();\n\n  /**\n   * @return true if populate() has been called on this list.\n   */\n  bool populated() const { return storage_ != nullptr; }\n\n  /**\n   * Iterates over each StatName in the list, calling f(StatName). f() should\n   * return true to keep iterating, or false to end the iteration.\n   *\n   * @param f The function to call on each stat.\n   */\n  void iterate(const std::function<bool(StatName)>& f) const;\n\n  /**\n   * Frees each StatName in the list. Failure to call this before destruction\n   * results in an ASSERT at destruction of the list and the SymbolTable.\n   *\n   * This is not done as part of destruction as the SymbolTable may already\n   * be destroyed.\n   *\n   * @param symbol_table the symbol table.\n   */\n  void clear(SymbolTable& symbol_table);\n\nprivate:\n  friend class FakeSymbolTableImpl;\n  friend class SymbolTableImpl;\n\n  /**\n   * Moves the specified storage into the list. The storage format is an\n   * array of bytes, organized like this:\n   *\n   * [0] The number of elements in the list (must be < 256).\n   * [1] low order 8 bits of the number of symbols in the first element.\n   * [2] high order 8 bits of the number of symbols in the first element.\n   * [3...] the symbols in the first element.\n   * ...\n   *\n   *\n   * For FakeSymbolTableImpl, each symbol is a single char, casted into a\n   * uint8_t. For SymbolTableImpl, each symbol is 1 or more bytes, in a\n   * variable-length encoding. See SymbolTableImpl::Encoding::addSymbol for\n   * details.\n   */\n  void moveStorageIntoList(SymbolTable::StoragePtr&& storage) { storage_ = std::move(storage); }\n\n  SymbolTable::StoragePtr storage_;\n};\n\n// Value-templatized hash-map with StatName key.\ntemplate <class T> using StatNameHashMap = absl::flat_hash_map<StatName, T>;\n\n// Hash-set of StatNames\nusing StatNameHashSet = absl::flat_hash_set<StatName>;\n\n// Helper class for sorting StatNames.\nstruct StatNameLessThan {\n  StatNameLessThan(const SymbolTable& symbol_table) : symbol_table_(symbol_table) {}\n  bool operator()(const StatName& a, const StatName& b) const {\n    return symbol_table_.lessThan(a, b);\n  }\n\n  const SymbolTable& symbol_table_;\n};\n\nstruct HeterogeneousStatNameHash {\n  // Specifying is_transparent indicates to the library infrastructure that\n  // type-conversions should not be applied when calling find(), but instead\n  // pass the actual types of the contained and searched-for objects directly to\n  // these functors. See\n  // https://en.cppreference.com/w/cpp/utility/functional/less_void for an\n  // official reference, and https://abseil.io/tips/144 for a description of\n  // using it in the context of absl.\n  using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n  size_t operator()(StatName a) const { return a.hash(); }\n  size_t operator()(const StatNameStorage& a) const { return a.statName().hash(); }\n};\n\nstruct HeterogeneousStatNameEqual {\n  // See description for HeterogeneousStatNameHash::is_transparent.\n  using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n  size_t operator()(StatName a, StatName b) const { return a == b; }\n  size_t operator()(const StatNameStorage& a, const StatNameStorage& b) const {\n    return a.statName() == b.statName();\n  }\n  size_t operator()(StatName a, const StatNameStorage& b) const { return a == b.statName(); }\n  size_t operator()(const StatNameStorage& a, StatName b) const { return a.statName() == b; }\n};\n\n// Encapsulates a set<StatNameStorage>. We use containment here rather than a\n// 'using' alias because we need to ensure that when the set is destructed,\n// StatNameStorage::free(symbol_table) is called on each entry. It is a little\n// easier at the call-sites in thread_local_store.cc to implement this an\n// explicit free() method, analogous to StatNameStorage::free(), compared to\n// storing a SymbolTable reference in the class and doing the free in the\n// destructor, like StatNameManagedStorage.\nclass StatNameStorageSet {\npublic:\n  using HashSet =\n      absl::flat_hash_set<StatNameStorage, HeterogeneousStatNameHash, HeterogeneousStatNameEqual>;\n  using Iterator = HashSet::iterator;\n\n  ~StatNameStorageSet();\n\n  /**\n   * Releases all symbols held in this set. Must be called prior to destruction.\n   *\n   * @param symbol_table The symbol table that owns the symbols.\n   */\n  void free(SymbolTable& symbol_table);\n\n  /**\n   * @param storage The StatNameStorage to add to the set.\n   */\n  std::pair<HashSet::iterator, bool> insert(StatNameStorage&& storage) {\n    return hash_set_.insert(std::move(storage));\n  }\n\n  /**\n   * @param stat_name The stat_name to find.\n   * @return the iterator pointing to the stat_name, or end() if not found.\n   */\n  Iterator find(StatName stat_name) { return hash_set_.find(stat_name); }\n\n  /**\n   * @return the end-marker.\n   */\n  Iterator end() { return hash_set_.end(); }\n\n  /**\n   * @return the number of elements in the set.\n   */\n  size_t size() const { return hash_set_.size(); }\n\nprivate:\n  HashSet hash_set_;\n};\n\n// Captures StatNames for lookup by string, keeping a map of 'built-ins' that is\n// expected to be populated during initialization.\n//\n// Ideally, builtins should be added during process initialization, in the\n// outermost relevant context. And as the builtins map is not mutex protected,\n// builtins must *not* be added to an existing StatNameSet in the request-path.\n//\n// It is fine to populate a new StatNameSet when (for example) an xDS\n// message reveals a new set of names to be used as stats. The population must\n// be completed prior to exposing the new StatNameSet to worker threads.\n//\n// To create stats using names discovered in the request path, dynamic stat\n// names must be used (see StatNameDynamicStorage). Consider using helper\n// methods such as Stats::Utility::counterFromElements in common/stats/utility.h\n// to simplify the process of allocating and combining stat names and creating\n// counters, gauges, and histograms from them.\nclass StatNameSet {\npublic:\n  // This object must be instantiated via SymbolTable::makeSet(), thus constructor is private.\n\n  /**\n   * Adds a string to the builtin map, which is not mutex protected. This map is\n   * always consulted first as a hit there means no lock is required.\n   *\n   * Builtins can only be added immediately after construction, as the builtins\n   * map is not mutex-protected.\n   */\n  void rememberBuiltin(absl::string_view str);\n\n  /**\n   * Remembers every string in a container as builtins.\n   */\n  template <class StringContainer> void rememberBuiltins(const StringContainer& container) {\n    for (const auto& str : container) {\n      rememberBuiltin(str);\n    }\n  }\n  void rememberBuiltins(const std::vector<const char*>& container) {\n    rememberBuiltins<std::vector<const char*>>(container);\n  }\n\n  /**\n   * Finds a builtin StatName by name. If the builtin has not been registered,\n   * then the fallback is returned.\n   *\n   * @return the StatName or fallback.\n   */\n  StatName getBuiltin(absl::string_view token, StatName fallback) const;\n\n  /**\n   * Adds a StatName using the pool, but without remembering it in any maps.\n   *\n   * For convenience, StatNameSet offers pass-through thread-safe access to\n   * its mutex-protected pool. This is useful in constructor initializers, when\n   * StatNames are needed both from compile-time constants, as well as from\n   * other constructor args, e.g.\n   *    MyClass(const std::vector<absl::string_view>& strings, Stats::SymbolTable& symbol_table)\n   *        : stat_name_set_(symbol_table),\n   *          known_const_(stat_name_set_.add(\"known_const\")) { // unmapped constants from pool\n   *      stat_name_set_.rememberBuiltins(strings); // mapped builtins.\n   *    }\n   * This avoids the need to make two different pools; one backing the\n   * StatNameSet mapped entries, and the other backing the set passed in via the\n   * constructor.\n   *\n   * @param str The string to add as a StatName\n   * @return The StatName for str.\n   */\n  StatName add(absl::string_view str) {\n    absl::MutexLock lock(&mutex_);\n    return pool_.add(str);\n  }\n\nprivate:\n  friend class FakeSymbolTableImpl;\n  friend class SymbolTableImpl;\n\n  StatNameSet(SymbolTable& symbol_table, absl::string_view name);\n\n  const std::string name_;\n  Stats::SymbolTable& symbol_table_;\n  Stats::StatNamePool pool_ ABSL_GUARDED_BY(mutex_);\n  mutable absl::Mutex mutex_;\n  using StringStatNameMap = absl::flat_hash_map<std::string, Stats::StatName>;\n  StringStatNameMap builtin_stat_names_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/tag_extractor_impl.cc",
    "content": "#include \"common/stats/tag_extractor_impl.h\"\n\n#include <cstring>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/perf_annotation.h\"\n#include \"common/common/regex.h\"\n\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nnamespace {\n\nbool regexStartsWithDot(absl::string_view regex) {\n  return absl::StartsWith(regex, \"\\\\.\") || absl::StartsWith(regex, \"(?=\\\\.)\");\n}\n\n} // namespace\n\nTagExtractorImpl::TagExtractorImpl(const std::string& name, const std::string& regex,\n                                   const std::string& substr)\n    : name_(name), prefix_(std::string(extractRegexPrefix(regex))), substr_(substr),\n      regex_(Regex::Utility::parseStdRegex(regex)) {}\n\nstd::string TagExtractorImpl::extractRegexPrefix(absl::string_view regex) {\n  std::string prefix;\n  if (absl::StartsWith(regex, \"^\")) {\n    for (absl::string_view::size_type i = 1; i < regex.size(); ++i) {\n      if (!absl::ascii_isalnum(regex[i]) && (regex[i] != '_')) {\n        if (i > 1) {\n          const bool last_char = i == regex.size() - 1;\n          if ((!last_char && regexStartsWithDot(regex.substr(i))) ||\n              (last_char && (regex[i] == '$'))) {\n            prefix.append(regex.data() + 1, i - 1);\n          }\n        }\n        break;\n      }\n    }\n  }\n  return prefix;\n}\n\nTagExtractorPtr TagExtractorImpl::createTagExtractor(const std::string& name,\n                                                     const std::string& regex,\n                                                     const std::string& substr) {\n\n  if (name.empty()) {\n    throw EnvoyException(\"tag_name cannot be empty\");\n  }\n\n  if (regex.empty()) {\n    throw EnvoyException(fmt::format(\n        \"No regex specified for tag specifier and no default regex for name: '{}'\", name));\n  }\n  return TagExtractorPtr{new TagExtractorImpl(name, regex, substr)};\n}\n\nbool TagExtractorImpl::substrMismatch(absl::string_view stat_name) const {\n  return !substr_.empty() && stat_name.find(substr_) == absl::string_view::npos;\n}\n\nbool TagExtractorImpl::extractTag(absl::string_view stat_name, TagVector& tags,\n                                  IntervalSet<size_t>& remove_characters) const {\n  PERF_OPERATION(perf);\n\n  if (substrMismatch(stat_name)) {\n    PERF_RECORD(perf, \"re-skip-substr\", name_);\n    return false;\n  }\n\n  std::match_results<absl::string_view::iterator> match;\n  // The regex must match and contain one or more subexpressions (all after the first are ignored).\n  if (std::regex_search<absl::string_view::iterator>(stat_name.begin(), stat_name.end(), match,\n                                                     regex_) &&\n      match.size() > 1) {\n    // remove_subexpr is the first submatch. It represents the portion of the string to be removed.\n    const auto& remove_subexpr = match[1];\n\n    // value_subexpr is the optional second submatch. It is usually inside the first submatch\n    // (remove_subexpr) to allow the expression to strip off extra characters that should be removed\n    // from the string but also not necessary in the tag value (\".\" for example). If there is no\n    // second submatch, then the value_subexpr is the same as the remove_subexpr.\n    const auto& value_subexpr = match.size() > 2 ? match[2] : remove_subexpr;\n\n    tags.emplace_back();\n    Tag& tag = tags.back();\n    tag.name_ = name_;\n    tag.value_ = value_subexpr.str();\n\n    // Determines which characters to remove from stat_name to elide remove_subexpr.\n    std::string::size_type start = remove_subexpr.first - stat_name.begin();\n    std::string::size_type end = remove_subexpr.second - stat_name.begin();\n    remove_characters.insert(start, end);\n    PERF_RECORD(perf, \"re-match\", name_);\n    return true;\n  }\n  PERF_RECORD(perf, \"re-miss\", name_);\n  return false;\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/tag_extractor_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <regex>\n#include <string>\n\n#include \"envoy/stats/tag_extractor.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass TagExtractorImpl : public TagExtractor {\npublic:\n  /**\n   * Creates a tag extractor from the regex provided. name and regex must be non-empty.\n   * @param name name for tag extractor.\n   * @param regex regex expression.\n   * @param substr a substring that -- if provided -- must be present in a stat name\n   *               in order to match the regex. This is an optional performance tweak\n   *               to avoid large numbers of failed regex lookups.\n   * @return TagExtractorPtr newly constructed TagExtractor.\n   */\n  static TagExtractorPtr createTagExtractor(const std::string& name, const std::string& regex,\n                                            const std::string& substr = \"\");\n\n  TagExtractorImpl(const std::string& name, const std::string& regex,\n                   const std::string& substr = \"\");\n  std::string name() const override { return name_; }\n  bool extractTag(absl::string_view tag_extracted_name, TagVector& tags,\n                  IntervalSet<size_t>& remove_characters) const override;\n  absl::string_view prefixToken() const override { return prefix_; }\n\n  /**\n   * @param stat_name The stat name\n   * @return bool indicates whether tag extraction should be skipped for this stat_name due\n   * to a substring mismatch.\n   */\n  bool substrMismatch(absl::string_view stat_name) const;\n\nprivate:\n  /**\n   * Examines a regex string, looking for the pattern: ^alphanumerics_with_underscores\\.\n   * Returns \"alphanumerics_with_underscores\" if that pattern is found, empty-string otherwise.\n   * @param regex absl::string_view the regex to scan for prefixes.\n   * @return std::string the prefix, or \"\" if no prefix found.\n   */\n  static std::string extractRegexPrefix(absl::string_view regex);\n  const std::string name_;\n  const std::string prefix_;\n  const std::string substr_;\n  const std::regex regex_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/tag_producer_impl.cc",
    "content": "#include \"common/stats/tag_producer_impl.h\"\n\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/stats/tag_extractor_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nTagProducerImpl::TagProducerImpl(const envoy::config::metrics::v3::StatsConfig& config) {\n  // To check name conflict.\n  reserveResources(config);\n  absl::node_hash_set<std::string> names = addDefaultExtractors(config);\n\n  for (const auto& tag_specifier : config.stats_tags()) {\n    const std::string& name = tag_specifier.tag_name();\n    if (!names.emplace(name).second) {\n      throw EnvoyException(fmt::format(\"Tag name '{}' specified twice.\", name));\n    }\n\n    // If no tag value is found, fallback to default regex to keep backward compatibility.\n    if (tag_specifier.tag_value_case() ==\n            envoy::config::metrics::v3::TagSpecifier::TagValueCase::TAG_VALUE_NOT_SET ||\n        tag_specifier.tag_value_case() ==\n            envoy::config::metrics::v3::TagSpecifier::TagValueCase::kRegex) {\n\n      if (tag_specifier.regex().empty()) {\n        if (addExtractorsMatching(name) == 0) {\n          throw EnvoyException(fmt::format(\n              \"No regex specified for tag specifier and no default regex for name: '{}'\", name));\n        }\n      } else {\n        addExtractor(Stats::TagExtractorImpl::createTagExtractor(name, tag_specifier.regex()));\n      }\n    } else if (tag_specifier.tag_value_case() ==\n               envoy::config::metrics::v3::TagSpecifier::TagValueCase::kFixedValue) {\n      default_tags_.emplace_back(Stats::Tag{name, tag_specifier.fixed_value()});\n    }\n  }\n}\n\nint TagProducerImpl::addExtractorsMatching(absl::string_view name) {\n  int num_found = 0;\n  for (const auto& desc : Config::TagNames::get().descriptorVec()) {\n    if (desc.name_ == name) {\n      addExtractor(\n          Stats::TagExtractorImpl::createTagExtractor(desc.name_, desc.regex_, desc.substr_));\n      ++num_found;\n    }\n  }\n  return num_found;\n}\n\nvoid TagProducerImpl::addExtractor(TagExtractorPtr extractor) {\n  const absl::string_view prefix = extractor->prefixToken();\n  if (prefix.empty()) {\n    tag_extractors_without_prefix_.emplace_back(std::move(extractor));\n  } else {\n    tag_extractor_prefix_map_[prefix].emplace_back(std::move(extractor));\n  }\n}\n\nvoid TagProducerImpl::forEachExtractorMatching(\n    absl::string_view stat_name, std::function<void(const TagExtractorPtr&)> f) const {\n  IntervalSetImpl<size_t> remove_characters;\n  for (const TagExtractorPtr& tag_extractor : tag_extractors_without_prefix_) {\n    f(tag_extractor);\n  }\n  const absl::string_view::size_type dot = stat_name.find('.');\n  if (dot != std::string::npos) {\n    const absl::string_view token = absl::string_view(stat_name.data(), dot);\n    const auto iter = tag_extractor_prefix_map_.find(token);\n    if (iter != tag_extractor_prefix_map_.end()) {\n      for (const TagExtractorPtr& tag_extractor : iter->second) {\n        f(tag_extractor);\n      }\n    }\n  }\n}\n\nstd::string TagProducerImpl::produceTags(absl::string_view metric_name, TagVector& tags) const {\n  // TODO(jmarantz): Skip the creation of string-based tags, creating a StatNameTagVector instead.\n  tags.insert(tags.end(), default_tags_.begin(), default_tags_.end());\n  IntervalSetImpl<size_t> remove_characters;\n  forEachExtractorMatching(\n      metric_name, [&remove_characters, &tags, &metric_name](const TagExtractorPtr& tag_extractor) {\n        tag_extractor->extractTag(metric_name, tags, remove_characters);\n      });\n  return StringUtil::removeCharacters(metric_name, remove_characters);\n}\n\nvoid TagProducerImpl::reserveResources(const envoy::config::metrics::v3::StatsConfig& config) {\n  default_tags_.reserve(config.stats_tags().size());\n}\n\nabsl::node_hash_set<std::string>\nTagProducerImpl::addDefaultExtractors(const envoy::config::metrics::v3::StatsConfig& config) {\n  absl::node_hash_set<std::string> names;\n  if (!config.has_use_all_default_tags() || config.use_all_default_tags().value()) {\n    for (const auto& desc : Config::TagNames::get().descriptorVec()) {\n      names.emplace(desc.name_);\n      addExtractor(\n          Stats::TagExtractorImpl::createTagExtractor(desc.name_, desc.regex_, desc.substr_));\n    }\n  }\n  return names;\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/tag_producer_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/stats/tag_extractor.h\"\n#include \"envoy/stats/tag_producer.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Organizes a collection of TagExtractors so that stat-names can be processed without\n * iterating through all extractors.\n */\nclass TagProducerImpl : public TagProducer {\npublic:\n  TagProducerImpl(const envoy::config::metrics::v3::StatsConfig& config);\n  TagProducerImpl() = default;\n\n  /**\n   * Take a metric name and a vector then add proper tags into the vector and\n   * return an extracted metric name.\n   * @param metric_name std::string a name of Stats::Metric (Counter, Gauge, Histogram).\n   * @param tags std::vector a set of Stats::Tag.\n   */\n  std::string produceTags(absl::string_view metric_name, TagVector& tags) const override;\n\nprivate:\n  friend class DefaultTagRegexTester;\n\n  /**\n   * Adds a TagExtractor to the collection of tags, tracking prefixes to help make\n   * produceTags run efficiently by trying only extractors that have a chance to match.\n   * @param extractor TagExtractorPtr the extractor to add.\n   */\n  void addExtractor(TagExtractorPtr extractor);\n\n  /**\n   * Adds all default extractors matching the specified tag name. In this model,\n   * more than one TagExtractor can be used to generate a given tag. The default\n   * extractors are specified in common/config/well_known_names.cc.\n   * @param name absl::string_view the extractor to add.\n   * @return int the number of matching extractors.\n   */\n  int addExtractorsMatching(absl::string_view name);\n\n  /**\n   * Roughly estimate the size of the vectors.\n   * @param config const envoy::config::metrics::v2::StatsConfig& the config.\n   */\n  void reserveResources(const envoy::config::metrics::v3::StatsConfig& config);\n\n  /**\n   * Adds all default extractors from well_known_names.cc into the\n   * collection. Returns a set of names of all default extractors\n   * into a string-set for dup-detection against new stat names\n   * specified in the configuration.\n   * @param config const envoy::config::metrics::v2::StatsConfig& the config.\n   * @return names absl::node_hash_set<std::string> the set of names to populate\n   */\n  absl::node_hash_set<std::string>\n  addDefaultExtractors(const envoy::config::metrics::v3::StatsConfig& config);\n\n  /**\n   * Iterates over every tag extractor that might possibly match stat_name, calling\n   * callback f for each one. This is broken out this way to reduce code redundancy\n   * during testing, where we want to verify that extraction is order-independent.\n   * The possibly-matching-extractors list is computed by:\n   *   1. Finding the first '.' separated token in stat_name.\n   *   2. Collecting the TagExtractors whose regexes have that same prefix \"^prefix\\\\.\"\n   *   3. Collecting also the TagExtractors whose regexes don't start with any prefix.\n   * In the future, we may also do substring searches in some cases.\n   * See DefaultTagRegexTester::produceTagsReverse in test/common/stats/stats_impl_test.cc.\n   *\n   * @param stat_name const std::string& the stat name.\n   * @param f std::function<void(const TagExtractorPtr&)> function to call for each extractor.\n   */\n  void forEachExtractorMatching(absl::string_view stat_name,\n                                std::function<void(const TagExtractorPtr&)> f) const;\n\n  std::vector<TagExtractorPtr> tag_extractors_without_prefix_;\n\n  // Maps a prefix word extracted out of a regex to a vector of TagExtractors. Note that\n  // the storage for the prefix string is owned by the TagExtractor, which, depending on\n  // implementation, may need make a copy of the prefix.\n  absl::flat_hash_map<absl::string_view, std::vector<TagExtractorPtr>> tag_extractor_prefix_map_;\n  TagVector default_tags_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/tag_utility.cc",
    "content": "#include \"common/stats/tag_utility.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace TagUtility {\n\nTagStatNameJoiner::TagStatNameJoiner(StatName prefix, StatName stat_name,\n                                     StatNameTagVectorOptConstRef stat_name_tags,\n                                     SymbolTable& symbol_table) {\n  prefix_storage_ = symbol_table.join({prefix, stat_name});\n  tag_extracted_name_ = StatName(prefix_storage_.get());\n\n  if (stat_name_tags) {\n    full_name_storage_ =\n        joinNameAndTags(StatName(prefix_storage_.get()), *stat_name_tags, symbol_table);\n    name_with_tags_ = StatName(full_name_storage_.get());\n  } else {\n    name_with_tags_ = StatName(prefix_storage_.get());\n  }\n}\n\nTagStatNameJoiner::TagStatNameJoiner(StatName stat_name,\n                                     StatNameTagVectorOptConstRef stat_name_tags,\n                                     SymbolTable& symbol_table) {\n  tag_extracted_name_ = stat_name;\n\n  if (stat_name_tags) {\n    full_name_storage_ = joinNameAndTags(stat_name, *stat_name_tags, symbol_table);\n    name_with_tags_ = StatName(full_name_storage_.get());\n  } else {\n    name_with_tags_ = stat_name;\n  }\n}\n\nSymbolTable::StoragePtr TagStatNameJoiner::joinNameAndTags(StatName name,\n                                                           const StatNameTagVector& tags,\n                                                           SymbolTable& symbol_table) {\n  StatNameVec stat_names;\n  stat_names.reserve(1 + 2 * tags.size());\n  stat_names.emplace_back(name);\n\n  for (const auto& tag : tags) {\n    stat_names.emplace_back(tag.first);\n    stat_names.emplace_back(tag.second);\n  }\n\n  return symbol_table.join(stat_names);\n}\n} // namespace TagUtility\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/tag_utility.h",
    "content": "#pragma once\n\n#include \"envoy/stats/symbol_table.h\"\n#include \"envoy/stats/tag.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace TagUtility {\n\n/**\n * Combines a stat name with an optional set of tag to create the final stat name to use. The\n * resulting StatNames will be valid through the lifetime of this object and all provided stat\n * names.\n */\nclass TagStatNameJoiner {\npublic:\n  /**\n   * Combines a prefix, stat name and tags into a single stat name.\n   * @param prefix StaName the stat prefix to use.\n   * @param name StaName the stat name to use.\n   * @param stat_name_tags optionally StatNameTagVector the stat name tags to add to the stat name.\n   */\n  TagStatNameJoiner(StatName prefix, StatName stat_name,\n                    StatNameTagVectorOptConstRef stat_name_tags, SymbolTable& symbol_table);\n\n  /**\n   * Combines a stat name and tags into a single stat name.\n   * @param name StaName the stat name to use.\n   * @param stat_name_tags StatNameTagVector the stat name tags to optionally add to the stat name.\n   */\n  TagStatNameJoiner(StatName stat_name, StatNameTagVectorOptConstRef stat_name_tags,\n                    SymbolTable& symbol_table);\n\n  /**\n   * @return StatName the full stat name, including the tag suffix.\n   */\n  StatName nameWithTags() const { return name_with_tags_; }\n\n  /**\n   * @return StatName the stat name without the tags appended.\n   */\n  StatName tagExtractedName() const { return tag_extracted_name_; }\n\nprivate:\n  // TODO(snowp): This isn't really \"tag extracted\", but we'll use this for the sake of consistency\n  // until we can change the naming convention throughout.\n  StatName tag_extracted_name_;\n  SymbolTable::StoragePtr prefix_storage_;\n  SymbolTable::StoragePtr full_name_storage_;\n  StatName name_with_tags_;\n\n  SymbolTable::StoragePtr joinNameAndTags(StatName name, const StatNameTagVector& stat_name_tags,\n                                          SymbolTable& symbol_table);\n};\n} // namespace TagUtility\n} // namespace Stats\n} // namespace Envoy"
  },
  {
    "path": "source/common/stats/thread_local_store.cc",
    "content": "#include \"common/stats/thread_local_store.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/stats/allocator.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/sink.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/stats/histogram_impl.h\"\n#include \"common/stats/stats_matcher_impl.h\"\n#include \"common/stats/tag_producer_impl.h\"\n#include \"common/stats/tag_utility.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nconst char ThreadLocalStoreImpl::MainDispatcherCleanupSync[] = \"main-dispatcher-cleanup\";\n\nThreadLocalStoreImpl::ThreadLocalStoreImpl(Allocator& alloc)\n    : alloc_(alloc), default_scope_(ThreadLocalStoreImpl::createScope(\"\")),\n      tag_producer_(std::make_unique<TagProducerImpl>()),\n      stats_matcher_(std::make_unique<StatsMatcherImpl>()),\n      histogram_settings_(std::make_unique<HistogramSettingsImpl>()),\n      heap_allocator_(alloc.symbolTable()), null_counter_(alloc.symbolTable()),\n      null_gauge_(alloc.symbolTable()), null_histogram_(alloc.symbolTable()),\n      null_text_readout_(alloc.symbolTable()),\n      well_known_tags_(alloc.symbolTable().makeSet(\"well_known_tags\")) {\n  for (const auto& desc : Config::TagNames::get().descriptorVec()) {\n    well_known_tags_->rememberBuiltin(desc.name_);\n  }\n}\n\nThreadLocalStoreImpl::~ThreadLocalStoreImpl() {\n  ASSERT(shutting_down_ || !threading_ever_initialized_);\n  default_scope_.reset();\n  ASSERT(scopes_.empty());\n}\n\nvoid ThreadLocalStoreImpl::setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) {\n  Thread::LockGuard lock(lock_);\n  for (ScopeImpl* scope : scopes_) {\n    ASSERT(scope->central_cache_->histograms_.empty());\n  }\n  histogram_settings_ = std::move(histogram_settings);\n}\n\nvoid ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) {\n  stats_matcher_ = std::move(stats_matcher);\n  if (stats_matcher_->acceptsAll()) {\n    return;\n  }\n\n  // The Filesystem and potentially other stat-registering objects are\n  // constructed prior to the stat-matcher, and those add stats\n  // in the default_scope. There should be no requests, so there will\n  // be no copies in TLS caches.\n  Thread::LockGuard lock(lock_);\n  const uint32_t first_histogram_index = deleted_histograms_.size();\n  for (ScopeImpl* scope : scopes_) {\n    removeRejectedStats(scope->central_cache_->counters_, deleted_counters_);\n    removeRejectedStats(scope->central_cache_->gauges_, deleted_gauges_);\n    removeRejectedStats(scope->central_cache_->histograms_, deleted_histograms_);\n    removeRejectedStats(scope->central_cache_->text_readouts_, deleted_text_readouts_);\n  }\n\n  // Remove any newly rejected histograms from histogram_set_.\n  {\n    Thread::LockGuard hist_lock(hist_mutex_);\n    for (uint32_t i = first_histogram_index; i < deleted_histograms_.size(); ++i) {\n      uint32_t erased = histogram_set_.erase(deleted_histograms_[i].get());\n      ASSERT(erased == 1);\n    }\n  }\n}\n\ntemplate <class StatMapClass, class StatListClass>\nvoid ThreadLocalStoreImpl::removeRejectedStats(StatMapClass& map, StatListClass& list) {\n  StatNameVec remove_list;\n  for (auto& stat : map) {\n    if (rejects(stat.first)) {\n      remove_list.push_back(stat.first);\n    }\n  }\n  for (StatName stat_name : remove_list) {\n    auto iter = map.find(stat_name);\n    ASSERT(iter != map.end());\n    list.push_back(iter->second); // Save SharedPtr to the list to avoid invalidating refs to stat.\n    map.erase(iter);\n  }\n}\n\nbool ThreadLocalStoreImpl::rejects(StatName stat_name) const {\n  ASSERT(!stats_matcher_->acceptsAll());\n\n  // TODO(ambuc): If stats_matcher_ depends on regexes, this operation (on the\n  // hot path) could become prohibitively expensive. Revisit this usage in the\n  // future.\n  //\n  // Also note that the elaboration of the stat-name into a string is expensive,\n  // so I think it might be better to move the matcher test until after caching,\n  // unless its acceptsAll/rejectsAll.\n  return stats_matcher_->rejectsAll() ||\n         stats_matcher_->rejects(constSymbolTable().toString(stat_name));\n}\n\nstd::vector<CounterSharedPtr> ThreadLocalStoreImpl::counters() const {\n  // Handle de-dup due to overlapping scopes.\n  std::vector<CounterSharedPtr> ret;\n  StatNameHashSet names;\n  Thread::LockGuard lock(lock_);\n  for (ScopeImpl* scope : scopes_) {\n    for (auto& counter : scope->central_cache_->counters_) {\n      if (names.insert(counter.first).second) {\n        ret.push_back(counter.second);\n      }\n    }\n  }\n\n  return ret;\n}\n\nScopePtr ThreadLocalStoreImpl::createScope(const std::string& name) {\n  auto new_scope = std::make_unique<ScopeImpl>(*this, name);\n  Thread::LockGuard lock(lock_);\n  scopes_.emplace(new_scope.get());\n  return new_scope;\n}\n\nstd::vector<GaugeSharedPtr> ThreadLocalStoreImpl::gauges() const {\n  // Handle de-dup due to overlapping scopes.\n  std::vector<GaugeSharedPtr> ret;\n  StatNameHashSet names;\n  Thread::LockGuard lock(lock_);\n  for (ScopeImpl* scope : scopes_) {\n    for (auto& gauge_iter : scope->central_cache_->gauges_) {\n      const GaugeSharedPtr& gauge = gauge_iter.second;\n      if (gauge->importMode() != Gauge::ImportMode::Uninitialized &&\n          names.insert(gauge_iter.first).second) {\n        ret.push_back(gauge);\n      }\n    }\n  }\n\n  return ret;\n}\n\nstd::vector<TextReadoutSharedPtr> ThreadLocalStoreImpl::textReadouts() const {\n  // Handle de-dup due to overlapping scopes.\n  std::vector<TextReadoutSharedPtr> ret;\n  StatNameHashSet names;\n  Thread::LockGuard lock(lock_);\n  for (ScopeImpl* scope : scopes_) {\n    for (auto& text_readout : scope->central_cache_->text_readouts_) {\n      if (names.insert(text_readout.first).second) {\n        ret.push_back(text_readout.second);\n      }\n    }\n  }\n\n  return ret;\n}\n\nstd::vector<ParentHistogramSharedPtr> ThreadLocalStoreImpl::histograms() const {\n  std::vector<ParentHistogramSharedPtr> ret;\n  Thread::LockGuard lock(hist_mutex_);\n  {\n    ret.reserve(histogram_set_.size());\n    for (const auto& histogram_ptr : histogram_set_) {\n      ret.emplace_back(histogram_ptr);\n    }\n  }\n\n  return ret;\n}\n\nvoid ThreadLocalStoreImpl::initializeThreading(Event::Dispatcher& main_thread_dispatcher,\n                                               ThreadLocal::Instance& tls) {\n  threading_ever_initialized_ = true;\n  main_thread_dispatcher_ = &main_thread_dispatcher;\n  tls_ = tls.allocateSlot();\n  tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<TlsCache>();\n  });\n}\n\nvoid ThreadLocalStoreImpl::shutdownThreading() {\n  // This will block both future cache fills as well as cache flushes.\n  shutting_down_ = true;\n  Thread::LockGuard lock(hist_mutex_);\n  for (ParentHistogramImpl* histogram : histogram_set_) {\n    histogram->setShuttingDown(true);\n  }\n  histogram_set_.clear();\n}\n\nvoid ThreadLocalStoreImpl::mergeHistograms(PostMergeCb merge_complete_cb) {\n  if (!shutting_down_) {\n    ASSERT(!merge_in_progress_);\n    merge_in_progress_ = true;\n    tls_->runOnAllThreads(\n        [](ThreadLocal::ThreadLocalObjectSharedPtr object)\n            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n          for (const auto& id_hist : object->asType<TlsCache>().tls_histogram_cache_) {\n            const TlsHistogramSharedPtr& tls_hist = id_hist.second;\n            tls_hist->beginMerge();\n          }\n          return object;\n        },\n        [this, merge_complete_cb]() -> void { mergeInternal(merge_complete_cb); });\n  } else {\n    // If server is shutting down, just call the callback to allow flush to continue.\n    merge_complete_cb();\n  }\n}\n\nvoid ThreadLocalStoreImpl::mergeInternal(PostMergeCb merge_complete_cb) {\n  if (!shutting_down_) {\n    for (const ParentHistogramSharedPtr& histogram : histograms()) {\n      histogram->merge();\n    }\n    merge_complete_cb();\n    merge_in_progress_ = false;\n  }\n}\n\nThreadLocalStoreImpl::CentralCacheEntry::~CentralCacheEntry() {\n  // Assert that the symbol-table is valid, so we get good test coverage of\n  // the validity of the symbol table at the time this destructor runs. This\n  // is because many tests will not populate rejected_stats_.\n  ASSERT(symbol_table_.toString(StatNameManagedStorage(\"Hello.world\", symbol_table_).statName()) ==\n         \"Hello.world\");\n  rejected_stats_.free(symbol_table_);\n}\n\nvoid ThreadLocalStoreImpl::releaseScopeCrossThread(ScopeImpl* scope) {\n  Thread::ReleasableLockGuard lock(lock_);\n  ASSERT(scopes_.count(scope) == 1);\n  scopes_.erase(scope);\n\n  // This method is called directly from the ScopeImpl destructor, but we can't\n  // destroy scope->central_cache_ until all the TLS caches are be destroyed, as\n  // the TLS caches reference the Counters and Gauges owned by the central\n  // cache. We don't want the maps in the TLS caches to bump the\n  // reference-count, as decrementing the count requires an allocator lock,\n  // which would cause a storm of contention during scope destruction.\n  //\n  // So instead we have a 2-phase destroy:\n  //   1. destroy all the TLS caches\n  //   2. destroy the central cache.\n  //\n  // Since this is called from ScopeImpl's destructor, we must bump the\n  // ref-count of the central-cache by copying to a local scoped pointer, and\n  // keep that reference alive until all the TLS caches are clear.\n  CentralCacheEntrySharedPtr central_cache = scope->central_cache_;\n\n  // This can happen from any thread. We post() back to the main thread which will initiate the\n  // cache flush operation.\n  if (!shutting_down_ && main_thread_dispatcher_) {\n    const uint64_t scope_id = scope->scope_id_;\n    lock.release();\n\n    // TODO(jmarantz): consider batching all the scope IDs that should be\n    // cleared from TLS caches to reduce bursts of runOnAllThreads on a large\n    // config update. See the pattern below used for histograms.\n    main_thread_dispatcher_->post([this, central_cache, scope_id]() {\n      sync_.syncPoint(MainDispatcherCleanupSync);\n      clearScopeFromCaches(scope_id, central_cache);\n    });\n  }\n}\n\nvoid ThreadLocalStoreImpl::releaseHistogramCrossThread(uint64_t histogram_id) {\n  // This can happen from any thread. We post() back to the main thread which will initiate the\n  // cache flush operation.\n  if (!shutting_down_ && main_thread_dispatcher_) {\n    main_thread_dispatcher_->post(\n        [this, histogram_id]() { clearHistogramFromCaches(histogram_id); });\n  }\n}\n\nThreadLocalStoreImpl::TlsCacheEntry&\nThreadLocalStoreImpl::TlsCache::insertScope(uint64_t scope_id) {\n  return scope_cache_[scope_id];\n}\n\nvoid ThreadLocalStoreImpl::TlsCache::eraseScope(uint64_t scope_id) { scope_cache_.erase(scope_id); }\nvoid ThreadLocalStoreImpl::TlsCache::eraseHistogram(uint64_t histogram_id) {\n  // This is called for every histogram in every thread, even though the\n  // histogram may not have been cached in each thread yet. So we don't\n  // want to check whether the erase() call erased anything.\n  tls_histogram_cache_.erase(histogram_id);\n}\n\nvoid ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id,\n                                                CentralCacheEntrySharedPtr central_cache) {\n  // If we are shutting down we no longer perform cache flushes as workers may be shutting down\n  // at the same time.\n  if (!shutting_down_) {\n    // Perform a cache flush on all threads.\n    tls_->runOnAllThreads(\n        [scope_id](ThreadLocal::ThreadLocalObjectSharedPtr object)\n            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n          object->asType<TlsCache>().eraseScope(scope_id);\n          return object;\n        },\n        [central_cache]() { /* Holds onto central_cache until all tls caches are clear */ });\n  }\n}\n\nvoid ThreadLocalStoreImpl::clearHistogramFromCaches(uint64_t histogram_id) {\n  // If we are shutting down we no longer perform cache flushes as workers may be shutting down\n  // at the same time.\n  if (!shutting_down_) {\n    // Perform a cache flush on all threads.\n    //\n    // TODO(jmarantz): If this cross-thread posting proves to be a performance\n    // bottleneck,\n    // https://gist.github.com/jmarantz/838cb6de7e74c0970ea6b63eded0139a\n    // contains a patch that will implement batching together to clear multiple\n    // histograms.\n    tls_->runOnAllThreads([histogram_id](ThreadLocal::ThreadLocalObjectSharedPtr object)\n                              -> ThreadLocal::ThreadLocalObjectSharedPtr {\n      object->asType<TlsCache>().eraseHistogram(histogram_id);\n      return object;\n    });\n  }\n}\n\nThreadLocalStoreImpl::ScopeImpl::ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix)\n    : scope_id_(parent.next_scope_id_++), parent_(parent),\n      prefix_(Utility::sanitizeStatsName(prefix), parent.alloc_.symbolTable()),\n      central_cache_(new CentralCacheEntry(parent.alloc_.symbolTable())) {}\n\nThreadLocalStoreImpl::ScopeImpl::~ScopeImpl() {\n  parent_.releaseScopeCrossThread(this);\n  prefix_.free(symbolTable());\n}\n\n// Helper for managing the potential truncation of tags from the metric names and\n// converting them to StatName. Making the tag extraction optional within this class simplifies the\n// RAII ergonomics (as opposed to making the construction of this object conditional).\n//\n// The StatNameTagVector returned by this object will be valid as long as this object is in scope\n// and the provided stat_name_tags are valid.\n//\n// When tag extraction is not done, this class is just a passthrough for the provided name/tags.\nclass StatNameTagHelper {\npublic:\n  StatNameTagHelper(ThreadLocalStoreImpl& tls, StatName name,\n                    const absl::optional<StatNameTagVector>& stat_name_tags)\n      : pool_(tls.symbolTable()), stat_name_tags_(stat_name_tags.value_or(StatNameTagVector())) {\n    if (!stat_name_tags) {\n      TagVector tags;\n      tls.symbolTable().callWithStringView(name, [&tags, &tls, this](absl::string_view name_str) {\n        tag_extracted_name_ = pool_.add(tls.tagProducer().produceTags(name_str, tags));\n      });\n      StatName empty;\n      for (const auto& tag : tags) {\n        StatName tag_name = tls.wellKnownTags().getBuiltin(tag.name_, empty);\n        if (tag_name.empty()) {\n          tag_name = pool_.add(tag.name_);\n        }\n        stat_name_tags_.emplace_back(tag_name, pool_.add(tag.value_));\n      }\n    } else {\n      tag_extracted_name_ = name;\n    }\n  }\n\n  const StatNameTagVector& statNameTags() const { return stat_name_tags_; }\n  StatName tagExtractedName() const { return tag_extracted_name_; }\n\nprivate:\n  StatNamePool pool_;\n  StatNameTagVector stat_name_tags_;\n  StatName tag_extracted_name_;\n};\n\nbool ThreadLocalStoreImpl::checkAndRememberRejection(StatName name,\n                                                     StatNameStorageSet& central_rejected_stats,\n                                                     StatNameHashSet* tls_rejected_stats) {\n  if (stats_matcher_->acceptsAll()) {\n    return false;\n  }\n\n  auto iter = central_rejected_stats.find(name);\n  const StatNameStorage* rejected_name = nullptr;\n  if (iter != central_rejected_stats.end()) {\n    rejected_name = &(*iter);\n  } else {\n    if (rejects(name)) {\n      auto insertion = central_rejected_stats.insert(StatNameStorage(name, symbolTable()));\n      const StatNameStorage& rejected_name_ref = *(insertion.first);\n      rejected_name = &rejected_name_ref;\n    }\n  }\n  if (rejected_name != nullptr) {\n    if (tls_rejected_stats != nullptr) {\n      tls_rejected_stats->insert(rejected_name->statName());\n    }\n    return true;\n  }\n  return false;\n}\n\ntemplate <class StatType>\nStatType& ThreadLocalStoreImpl::ScopeImpl::safeMakeStat(\n    StatName full_stat_name, StatName name_no_tags,\n    const absl::optional<StatNameTagVector>& stat_name_tags,\n    StatNameHashMap<RefcountPtr<StatType>>& central_cache_map,\n    StatNameStorageSet& central_rejected_stats, MakeStatFn<StatType> make_stat,\n    StatRefMap<StatType>* tls_cache, StatNameHashSet* tls_rejected_stats, StatType& null_stat) {\n\n  if (tls_rejected_stats != nullptr &&\n      tls_rejected_stats->find(full_stat_name) != tls_rejected_stats->end()) {\n    return null_stat;\n  }\n\n  // If we have a valid cache entry, return it.\n  if (tls_cache) {\n    auto pos = tls_cache->find(full_stat_name);\n    if (pos != tls_cache->end()) {\n      return pos->second;\n    }\n  }\n\n  // We must now look in the central store so we must be locked. We grab a reference to the\n  // central store location. It might contain nothing. In this case, we allocate a new stat.\n  Thread::LockGuard lock(parent_.lock_);\n  auto iter = central_cache_map.find(full_stat_name);\n  RefcountPtr<StatType>* central_ref = nullptr;\n  if (iter != central_cache_map.end()) {\n    central_ref = &(iter->second);\n  } else if (parent_.checkAndRememberRejection(full_stat_name, central_rejected_stats,\n                                               tls_rejected_stats)) {\n    return null_stat;\n  } else {\n    StatNameTagHelper tag_helper(parent_, name_no_tags, stat_name_tags);\n\n    RefcountPtr<StatType> stat = make_stat(\n        parent_.alloc_, full_stat_name, tag_helper.tagExtractedName(), tag_helper.statNameTags());\n    ASSERT(stat != nullptr);\n    central_ref = &central_cache_map[stat->statName()];\n    *central_ref = stat;\n  }\n\n  // If we have a TLS cache, insert the stat.\n  StatType& ret = **central_ref;\n  if (tls_cache) {\n    tls_cache->insert(std::make_pair(ret.statName(), std::reference_wrapper<StatType>(ret)));\n  }\n\n  // Finally we return the reference.\n  return ret;\n}\n\ntemplate <class StatType>\nusing StatTypeOptConstRef = absl::optional<std::reference_wrapper<const StatType>>;\n\ntemplate <class StatType>\nStatTypeOptConstRef<StatType> ThreadLocalStoreImpl::ScopeImpl::findStatLockHeld(\n    StatName name, StatNameHashMap<RefcountPtr<StatType>>& central_cache_map) const {\n  auto iter = central_cache_map.find(name);\n  if (iter == central_cache_map.end()) {\n    return absl::nullopt;\n  }\n\n  return std::cref(*iter->second);\n}\n\nCounter& ThreadLocalStoreImpl::ScopeImpl::counterFromStatNameWithTags(\n    const StatName& name, StatNameTagVectorOptConstRef stat_name_tags) {\n  if (parent_.rejectsAll()) {\n    return parent_.null_counter_;\n  }\n\n  // Determine the final name based on the prefix and the passed name.\n  //\n  // Note that we can do map.find(final_name.c_str()), but we cannot do\n  // map[final_name.c_str()] as the char*-keyed maps would then save the pointer\n  // to a temporary, and address sanitization errors would follow. Instead we\n  // must do a find() first, using the value if it succeeds. If it fails, then\n  // after we construct the stat we can insert it into the required maps. This\n  // strategy costs an extra hash lookup for each miss, but saves time\n  // re-copying the string and significant memory overhead.\n  TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable());\n  Stats::StatName final_stat_name = joiner.nameWithTags();\n\n  // We now find the TLS cache. This might remain null if we don't have TLS\n  // initialized currently.\n  StatRefMap<Counter>* tls_cache = nullptr;\n  StatNameHashSet* tls_rejected_stats = nullptr;\n  if (!parent_.shutting_down_ && parent_.tls_) {\n    TlsCacheEntry& entry = parent_.tls_->getTyped<TlsCache>().insertScope(this->scope_id_);\n    tls_cache = &entry.counters_;\n    tls_rejected_stats = &entry.rejected_stats_;\n  }\n\n  return safeMakeStat<Counter>(\n      final_stat_name, joiner.tagExtractedName(), stat_name_tags, central_cache_->counters_,\n      central_cache_->rejected_stats_,\n      [](Allocator& allocator, StatName name, StatName tag_extracted_name,\n         const StatNameTagVector& tags) -> CounterSharedPtr {\n        return allocator.makeCounter(name, tag_extracted_name, tags);\n      },\n      tls_cache, tls_rejected_stats, parent_.null_counter_);\n}\n\nvoid ThreadLocalStoreImpl::ScopeImpl::deliverHistogramToSinks(const Histogram& histogram,\n                                                              uint64_t value) {\n  // Thread local deliveries must be blocked outright for histograms and timers during shutdown.\n  // This is because the sinks may end up trying to create new connections via the thread local\n  // cluster manager which may already be destroyed (there is no way to sequence this because the\n  // cluster manager destroying can create deliveries). We special case this explicitly to avoid\n  // having to implement a shutdown() method (or similar) on every TLS object.\n  if (parent_.shutting_down_) {\n    return;\n  }\n\n  for (Sink& sink : parent_.timer_sinks_) {\n    sink.onHistogramComplete(histogram, value);\n  }\n}\n\nGauge& ThreadLocalStoreImpl::ScopeImpl::gaugeFromStatNameWithTags(\n    const StatName& name, StatNameTagVectorOptConstRef stat_name_tags,\n    Gauge::ImportMode import_mode) {\n  if (parent_.rejectsAll()) {\n    return parent_.null_gauge_;\n  }\n\n  // See comments in counter(). There is no super clean way (via templates or otherwise) to\n  // share this code so I'm leaving it largely duplicated for now.\n  //\n  // Note that we can do map.find(final_name.c_str()), but we cannot do\n  // map[final_name.c_str()] as the char*-keyed maps would then save the pointer to\n  // a temporary, and address sanitization errors would follow. Instead we must\n  // do a find() first, using that if it succeeds. If it fails, then after we\n  // construct the stat we can insert it into the required maps.\n  TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable());\n  StatName final_stat_name = joiner.nameWithTags();\n\n  StatRefMap<Gauge>* tls_cache = nullptr;\n  StatNameHashSet* tls_rejected_stats = nullptr;\n  if (!parent_.shutting_down_ && parent_.tls_) {\n    TlsCacheEntry& entry = parent_.tls_->getTyped<TlsCache>().scope_cache_[this->scope_id_];\n    tls_cache = &entry.gauges_;\n    tls_rejected_stats = &entry.rejected_stats_;\n  }\n\n  Gauge& gauge = safeMakeStat<Gauge>(\n      final_stat_name, joiner.tagExtractedName(), stat_name_tags, central_cache_->gauges_,\n      central_cache_->rejected_stats_,\n      [import_mode](Allocator& allocator, StatName name, StatName tag_extracted_name,\n                    const StatNameTagVector& tags) -> GaugeSharedPtr {\n        return allocator.makeGauge(name, tag_extracted_name, tags, import_mode);\n      },\n      tls_cache, tls_rejected_stats, parent_.null_gauge_);\n  gauge.mergeImportMode(import_mode);\n  return gauge;\n}\n\nHistogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags(\n    const StatName& name, StatNameTagVectorOptConstRef stat_name_tags, Histogram::Unit unit) {\n  if (parent_.rejectsAll()) {\n    return parent_.null_histogram_;\n  }\n\n  // See comments in counter(). There is no super clean way (via templates or otherwise) to\n  // share this code so I'm leaving it largely duplicated for now.\n  //\n  // Note that we can do map.find(final_name.c_str()), but we cannot do\n  // map[final_name.c_str()] as the char*-keyed maps would then save the pointer to\n  // a temporary, and address sanitization errors would follow. Instead we must\n  // do a find() first, using that if it succeeds. If it fails, then after we\n  // construct the stat we can insert it into the required maps.\n\n  TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable());\n  StatName final_stat_name = joiner.nameWithTags();\n\n  StatNameHashMap<ParentHistogramSharedPtr>* tls_cache = nullptr;\n  StatNameHashSet* tls_rejected_stats = nullptr;\n  if (!parent_.shutting_down_ && parent_.tls_) {\n    TlsCacheEntry& entry = parent_.tls_->getTyped<TlsCache>().scope_cache_[this->scope_id_];\n    tls_cache = &entry.parent_histograms_;\n    auto iter = tls_cache->find(final_stat_name);\n    if (iter != tls_cache->end()) {\n      return *iter->second;\n    }\n    tls_rejected_stats = &entry.rejected_stats_;\n    if (tls_rejected_stats->find(final_stat_name) != tls_rejected_stats->end()) {\n      return parent_.null_histogram_;\n    }\n  }\n\n  Thread::LockGuard lock(parent_.lock_);\n  auto iter = central_cache_->histograms_.find(final_stat_name);\n  ParentHistogramImplSharedPtr* central_ref = nullptr;\n  if (iter != central_cache_->histograms_.end()) {\n    central_ref = &iter->second;\n  } else if (parent_.checkAndRememberRejection(final_stat_name, central_cache_->rejected_stats_,\n                                               tls_rejected_stats)) {\n    return parent_.null_histogram_;\n  } else {\n    StatNameTagHelper tag_helper(parent_, joiner.tagExtractedName(), stat_name_tags);\n\n    ConstSupportedBuckets* buckets = nullptr;\n    symbolTable().callWithStringView(final_stat_name,\n                                     [&buckets, this](absl::string_view stat_name) {\n                                       buckets = &parent_.histogram_settings_->buckets(stat_name);\n                                     });\n\n    RefcountPtr<ParentHistogramImpl> stat;\n    {\n      Thread::LockGuard lock(parent_.hist_mutex_);\n      auto iter = parent_.histogram_set_.find(final_stat_name);\n      if (iter != parent_.histogram_set_.end()) {\n        stat = RefcountPtr<ParentHistogramImpl>(*iter);\n      } else {\n        stat = new ParentHistogramImpl(final_stat_name, unit, parent_,\n                                       tag_helper.tagExtractedName(), tag_helper.statNameTags(),\n                                       *buckets, parent_.next_histogram_id_++);\n        if (!parent_.shutting_down_) {\n          parent_.histogram_set_.insert(stat.get());\n        }\n      }\n    }\n\n    central_ref = &central_cache_->histograms_[stat->statName()];\n    *central_ref = stat;\n  }\n\n  if (tls_cache != nullptr) {\n    tls_cache->insert(std::make_pair((*central_ref)->statName(), *central_ref));\n  }\n  return **central_ref;\n}\n\nTextReadout& ThreadLocalStoreImpl::ScopeImpl::textReadoutFromStatNameWithTags(\n    const StatName& name, StatNameTagVectorOptConstRef stat_name_tags) {\n  if (parent_.rejectsAll()) {\n    return parent_.null_text_readout_;\n  }\n\n  // Determine the final name based on the prefix and the passed name.\n  //\n  // Note that we can do map.find(final_name.c_str()), but we cannot do\n  // map[final_name.c_str()] as the char*-keyed maps would then save the pointer\n  // to a temporary, and address sanitization errors would follow. Instead we\n  // must do a find() first, using the value if it succeeds. If it fails, then\n  // after we construct the stat we can insert it into the required maps. This\n  // strategy costs an extra hash lookup for each miss, but saves time\n  // re-copying the string and significant memory overhead.\n  TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable());\n  Stats::StatName final_stat_name = joiner.nameWithTags();\n\n  // We now find the TLS cache. This might remain null if we don't have TLS\n  // initialized currently.\n  StatRefMap<TextReadout>* tls_cache = nullptr;\n  StatNameHashSet* tls_rejected_stats = nullptr;\n  if (!parent_.shutting_down_ && parent_.tls_) {\n    TlsCacheEntry& entry = parent_.tls_->getTyped<TlsCache>().insertScope(this->scope_id_);\n    tls_cache = &entry.text_readouts_;\n    tls_rejected_stats = &entry.rejected_stats_;\n  }\n\n  return safeMakeStat<TextReadout>(\n      final_stat_name, joiner.tagExtractedName(), stat_name_tags, central_cache_->text_readouts_,\n      central_cache_->rejected_stats_,\n      [](Allocator& allocator, StatName name, StatName tag_extracted_name,\n         const StatNameTagVector& tags) -> TextReadoutSharedPtr {\n        return allocator.makeTextReadout(name, tag_extracted_name, tags);\n      },\n      tls_cache, tls_rejected_stats, parent_.null_text_readout_);\n}\n\nCounterOptConstRef ThreadLocalStoreImpl::ScopeImpl::findCounter(StatName name) const {\n  return findStatLockHeld<Counter>(name, central_cache_->counters_);\n}\n\nGaugeOptConstRef ThreadLocalStoreImpl::ScopeImpl::findGauge(StatName name) const {\n  return findStatLockHeld<Gauge>(name, central_cache_->gauges_);\n}\n\nHistogramOptConstRef ThreadLocalStoreImpl::ScopeImpl::findHistogram(StatName name) const {\n  auto iter = central_cache_->histograms_.find(name);\n  if (iter == central_cache_->histograms_.end()) {\n    return absl::nullopt;\n  }\n\n  RefcountPtr<Histogram> histogram_ref(iter->second);\n  return std::cref(*histogram_ref);\n}\n\nTextReadoutOptConstRef ThreadLocalStoreImpl::ScopeImpl::findTextReadout(StatName name) const {\n  return findStatLockHeld<TextReadout>(name, central_cache_->text_readouts_);\n}\n\nHistogram& ThreadLocalStoreImpl::tlsHistogram(ParentHistogramImpl& parent, uint64_t id) {\n  // tlsHistogram() is generally not called for a histogram that is rejected by\n  // the matcher, so no further rejection-checking is needed at this level.\n  // TlsHistogram inherits its reject/accept status from ParentHistogram.\n\n  // See comments in counterFromStatName() which explains the logic here.\n\n  TlsHistogramSharedPtr* tls_histogram = nullptr;\n  if (!shutting_down_ && tls_ != nullptr) {\n    TlsCache& tls_cache = tls_->getTyped<TlsCache>();\n    tls_histogram = &tls_cache.tls_histogram_cache_[id];\n    if (*tls_histogram != nullptr) {\n      return **tls_histogram;\n    }\n  }\n\n  StatNameTagHelper tag_helper(*this, parent.statName(), absl::nullopt);\n\n  TlsHistogramSharedPtr hist_tls_ptr(\n      new ThreadLocalHistogramImpl(parent.statName(), parent.unit(), tag_helper.tagExtractedName(),\n                                   tag_helper.statNameTags(), symbolTable()));\n\n  parent.addTlsHistogram(hist_tls_ptr);\n\n  if (tls_histogram != nullptr) {\n    *tls_histogram = hist_tls_ptr;\n  }\n\n  return *hist_tls_ptr;\n}\n\nThreadLocalHistogramImpl::ThreadLocalHistogramImpl(StatName name, Histogram::Unit unit,\n                                                   StatName tag_extracted_name,\n                                                   const StatNameTagVector& stat_name_tags,\n                                                   SymbolTable& symbol_table)\n    : HistogramImplHelper(name, tag_extracted_name, stat_name_tags, symbol_table), unit_(unit),\n      current_active_(0), used_(false), created_thread_id_(std::this_thread::get_id()),\n      symbol_table_(symbol_table) {\n  histograms_[0] = hist_alloc();\n  histograms_[1] = hist_alloc();\n}\n\nThreadLocalHistogramImpl::~ThreadLocalHistogramImpl() {\n  MetricImpl::clear(symbol_table_);\n  hist_free(histograms_[0]);\n  hist_free(histograms_[1]);\n}\n\nvoid ThreadLocalHistogramImpl::recordValue(uint64_t value) {\n  ASSERT(std::this_thread::get_id() == created_thread_id_);\n  hist_insert_intscale(histograms_[current_active_], value, 0, 1);\n  used_ = true;\n}\n\nvoid ThreadLocalHistogramImpl::merge(histogram_t* target) {\n  histogram_t** other_histogram = &histograms_[otherHistogramIndex()];\n  hist_accumulate(target, other_histogram, 1);\n  hist_clear(*other_histogram);\n}\n\nParentHistogramImpl::ParentHistogramImpl(StatName name, Histogram::Unit unit,\n                                         ThreadLocalStoreImpl& thread_local_store,\n                                         StatName tag_extracted_name,\n                                         const StatNameTagVector& stat_name_tags,\n                                         ConstSupportedBuckets& supported_buckets, uint64_t id)\n    : MetricImpl(name, tag_extracted_name, stat_name_tags, thread_local_store.symbolTable()),\n      unit_(unit), thread_local_store_(thread_local_store), interval_histogram_(hist_alloc()),\n      cumulative_histogram_(hist_alloc()),\n      interval_statistics_(interval_histogram_, supported_buckets),\n      cumulative_statistics_(cumulative_histogram_, supported_buckets), merged_(false), id_(id) {}\n\nParentHistogramImpl::~ParentHistogramImpl() {\n  thread_local_store_.releaseHistogramCrossThread(id_);\n  ASSERT(ref_count_ == 0);\n  MetricImpl::clear(thread_local_store_.symbolTable());\n  hist_free(interval_histogram_);\n  hist_free(cumulative_histogram_);\n}\n\nvoid ParentHistogramImpl::incRefCount() { ++ref_count_; }\n\nbool ParentHistogramImpl::decRefCount() {\n  bool ret;\n  if (shutting_down_) {\n    // When shutting down, we cannot reference thread_local_store_, as\n    // histograms can outlive the store. So we decrement the ref-count without\n    // the stores' lock. We will not be removing the object from the store's\n    // histogram map in this scenario, as the set was cleared during shutdown,\n    // and will not be repopulated in histogramFromStatNameWithTags after\n    // initiating shutdown.\n    ret = --ref_count_ == 0;\n  } else {\n    // We delegate to the Store object to decrement the ref-count so it can hold\n    // the lock to the map. If we don't hold a lock, another thread may\n    // simultaneously try to allocate the same name'd histogram after we\n    // decrement it, and we'll wind up with a dtor/update race. To avoid this we\n    // must hold the lock until the histogram is removed from the map.\n    //\n    // See also StatsSharedImpl::decRefCount() in allocator_impl.cc, which has\n    // the same issue.\n    ret = thread_local_store_.decHistogramRefCount(*this, ref_count_);\n  }\n  return ret;\n}\n\nbool ThreadLocalStoreImpl::decHistogramRefCount(ParentHistogramImpl& hist,\n                                                std::atomic<uint32_t>& ref_count) {\n  // We must hold the store's histogram lock when decrementing the\n  // refcount. Otherwise another thread may simultaneously try to allocate the\n  // same name'd stat after we decrement it, and we'll wind up with a\n  // dtor/update race. To avoid this we must hold the lock until the stat is\n  // removed from the map.\n  Thread::LockGuard lock(hist_mutex_);\n  ASSERT(ref_count >= 1);\n  if (--ref_count == 0) {\n    if (!shutting_down_) {\n      const size_t count = histogram_set_.erase(hist.statName());\n      ASSERT(shutting_down_ || count == 1);\n    }\n    return true;\n  }\n  return false;\n}\n\nSymbolTable& ParentHistogramImpl::symbolTable() { return thread_local_store_.symbolTable(); }\n\nHistogram::Unit ParentHistogramImpl::unit() const { return unit_; }\n\nvoid ParentHistogramImpl::recordValue(uint64_t value) {\n  Histogram& tls_histogram = thread_local_store_.tlsHistogram(*this, id_);\n  tls_histogram.recordValue(value);\n  thread_local_store_.deliverHistogramToSinks(*this, value);\n}\n\nbool ParentHistogramImpl::used() const {\n  // Consider ParentHistogram used only if has ever been merged.\n  return merged_;\n}\n\nvoid ParentHistogramImpl::merge() {\n  Thread::ReleasableLockGuard lock(merge_lock_);\n  if (merged_ || usedLockHeld()) {\n    hist_clear(interval_histogram_);\n    // Here we could copy all the pointers to TLS histograms in the tls_histogram_ list,\n    // then release the lock before we do the actual merge. However it is not a big deal\n    // because the tls_histogram merge is not that expensive as it is a single histogram\n    // merge and adding TLS histograms is rare.\n    for (const TlsHistogramSharedPtr& tls_histogram : tls_histograms_) {\n      tls_histogram->merge(interval_histogram_);\n    }\n    // Since TLS merge is done, we can release the lock here.\n    lock.release();\n    hist_accumulate(cumulative_histogram_, &interval_histogram_, 1);\n    cumulative_statistics_.refresh(cumulative_histogram_);\n    interval_statistics_.refresh(interval_histogram_);\n    merged_ = true;\n  }\n}\n\nconst std::string ParentHistogramImpl::quantileSummary() const {\n  if (used()) {\n    std::vector<std::string> summary;\n    const std::vector<double>& supported_quantiles_ref = interval_statistics_.supportedQuantiles();\n    summary.reserve(supported_quantiles_ref.size());\n    for (size_t i = 0; i < supported_quantiles_ref.size(); ++i) {\n      summary.push_back(fmt::format(\"P{:g}({},{})\", 100 * supported_quantiles_ref[i],\n                                    interval_statistics_.computedQuantiles()[i],\n                                    cumulative_statistics_.computedQuantiles()[i]));\n    }\n    return absl::StrJoin(summary, \" \");\n  } else {\n    return std::string(\"No recorded values\");\n  }\n}\n\nconst std::string ParentHistogramImpl::bucketSummary() const {\n  if (used()) {\n    std::vector<std::string> bucket_summary;\n    ConstSupportedBuckets& supported_buckets = interval_statistics_.supportedBuckets();\n    bucket_summary.reserve(supported_buckets.size());\n    for (size_t i = 0; i < supported_buckets.size(); ++i) {\n      bucket_summary.push_back(fmt::format(\"B{:g}({},{})\", supported_buckets[i],\n                                           interval_statistics_.computedBuckets()[i],\n                                           cumulative_statistics_.computedBuckets()[i]));\n    }\n    return absl::StrJoin(bucket_summary, \" \");\n  } else {\n    return std::string(\"No recorded values\");\n  }\n}\n\nvoid ParentHistogramImpl::addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr) {\n  Thread::LockGuard lock(merge_lock_);\n  tls_histograms_.emplace_back(hist_ptr);\n}\n\nbool ParentHistogramImpl::usedLockHeld() const {\n  for (const TlsHistogramSharedPtr& tls_histogram : tls_histograms_) {\n    if (tls_histogram->used()) {\n      return true;\n    }\n  }\n  return false;\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/thread_local_store.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/stats/tag.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/common/thread_synchronizer.h\"\n#include \"common/stats/allocator_impl.h\"\n#include \"common/stats/histogram_impl.h\"\n#include \"common/stats/null_counter.h\"\n#include \"common/stats/null_gauge.h\"\n#include \"common/stats/null_text_readout.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"circllhist.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * A histogram that is stored in TLS and used to record values per thread. This holds two\n * histograms, one to collect the values and other as backup that is used for merge process. The\n * swap happens during the merge process.\n */\nclass ThreadLocalHistogramImpl : public HistogramImplHelper {\npublic:\n  ThreadLocalHistogramImpl(StatName name, Histogram::Unit unit, StatName tag_extracted_name,\n                           const StatNameTagVector& stat_name_tags, SymbolTable& symbol_table);\n  ~ThreadLocalHistogramImpl() override;\n\n  void merge(histogram_t* target);\n\n  /**\n   * Called in the beginning of merge process. Swaps the histogram used for collection so that we do\n   * not have to lock the histogram in high throughput TLS writes.\n   */\n  void beginMerge() {\n    // This switches the current_active_ between 1 and 0.\n    ASSERT(std::this_thread::get_id() == created_thread_id_);\n    current_active_ = otherHistogramIndex();\n  }\n\n  // Stats::Histogram\n  Histogram::Unit unit() const override {\n    // If at some point ThreadLocalHistogramImpl will hold a pointer to its parent we can just\n    // return parent's unit here and not store it separately.\n    return unit_;\n  }\n  void recordValue(uint64_t value) override;\n\n  // Stats::Metric\n  SymbolTable& symbolTable() final { return symbol_table_; }\n  bool used() const override { return used_; }\n\nprivate:\n  Histogram::Unit unit_;\n  uint64_t otherHistogramIndex() const { return 1 - current_active_; }\n  uint64_t current_active_;\n  histogram_t* histograms_[2];\n  std::atomic<bool> used_;\n  std::thread::id created_thread_id_;\n  SymbolTable& symbol_table_;\n};\n\nusing TlsHistogramSharedPtr = RefcountPtr<ThreadLocalHistogramImpl>;\n\nclass ThreadLocalStoreImpl;\n\n/**\n * Log Linear Histogram implementation that is stored in the main thread.\n */\nclass ParentHistogramImpl : public MetricImpl<ParentHistogram> {\npublic:\n  ParentHistogramImpl(StatName name, Histogram::Unit unit, ThreadLocalStoreImpl& parent,\n                      StatName tag_extracted_name, const StatNameTagVector& stat_name_tags,\n                      ConstSupportedBuckets& supported_buckets, uint64_t id);\n  ~ParentHistogramImpl() override;\n\n  void addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr);\n\n  // Stats::Histogram\n  Histogram::Unit unit() const override;\n  void recordValue(uint64_t value) override;\n\n  /**\n   * This method is called during the main stats flush process for each of the histograms. It\n   * iterates through the TLS histograms and collects the histogram data of all of them\n   * in to \"interval_histogram\". Then the collected \"interval_histogram\" is merged to a\n   * \"cumulative_histogram\".\n   */\n  void merge() override;\n\n  const HistogramStatistics& intervalStatistics() const override { return interval_statistics_; }\n  const HistogramStatistics& cumulativeStatistics() const override {\n    return cumulative_statistics_;\n  }\n  const std::string quantileSummary() const override;\n  const std::string bucketSummary() const override;\n\n  // Stats::Metric\n  SymbolTable& symbolTable() override;\n  bool used() const override;\n\n  // RefcountInterface\n  void incRefCount() override;\n  bool decRefCount() override;\n  uint32_t use_count() const override { return ref_count_; }\n\n  // Indicates that the ThreadLocalStore is shutting down, so no need to clear its histogram_set_.\n  void setShuttingDown(bool shutting_down) { shutting_down_ = shutting_down; }\n  bool shuttingDown() const { return shutting_down_; }\n\nprivate:\n  bool usedLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(merge_lock_);\n\n  Histogram::Unit unit_;\n  ThreadLocalStoreImpl& thread_local_store_;\n  histogram_t* interval_histogram_;\n  histogram_t* cumulative_histogram_;\n  HistogramStatisticsImpl interval_statistics_;\n  HistogramStatisticsImpl cumulative_statistics_;\n  mutable Thread::MutexBasicLockable merge_lock_;\n  std::list<TlsHistogramSharedPtr> tls_histograms_ ABSL_GUARDED_BY(merge_lock_);\n  bool merged_;\n  std::atomic<bool> shutting_down_{false};\n  std::atomic<uint32_t> ref_count_{0};\n  const uint64_t id_; // Index into TlsCache::histogram_cache_.\n};\n\nusing ParentHistogramImplSharedPtr = RefcountPtr<ParentHistogramImpl>;\n\n/**\n * Store implementation with thread local caching. For design details see\n * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md\n */\nclass ThreadLocalStoreImpl : Logger::Loggable<Logger::Id::stats>, public StoreRoot {\npublic:\n  static const char MainDispatcherCleanupSync[];\n\n  ThreadLocalStoreImpl(Allocator& alloc);\n  ~ThreadLocalStoreImpl() override;\n\n  // Stats::Scope\n  Counter& counterFromStatNameWithTags(const StatName& name,\n                                       StatNameTagVectorOptConstRef tags) override {\n    return default_scope_->counterFromStatNameWithTags(name, tags);\n  }\n  Counter& counterFromString(const std::string& name) override {\n    return default_scope_->counterFromString(name);\n  }\n  ScopePtr createScope(const std::string& name) override;\n  void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override {\n    return default_scope_->deliverHistogramToSinks(histogram, value);\n  }\n  Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                   Gauge::ImportMode import_mode) override {\n    return default_scope_->gaugeFromStatNameWithTags(name, tags, import_mode);\n  }\n  Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {\n    return default_scope_->gaugeFromString(name, import_mode);\n  }\n  Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                           Histogram::Unit unit) override {\n    return default_scope_->histogramFromStatNameWithTags(name, tags, unit);\n  }\n  Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {\n    return default_scope_->histogramFromString(name, unit);\n  }\n  TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                               StatNameTagVectorOptConstRef tags) override {\n    return default_scope_->textReadoutFromStatNameWithTags(name, tags);\n  }\n  TextReadout& textReadoutFromString(const std::string& name) override {\n    return default_scope_->textReadoutFromString(name);\n  }\n  NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; }\n  const SymbolTable& constSymbolTable() const override { return alloc_.constSymbolTable(); }\n  SymbolTable& symbolTable() override { return alloc_.symbolTable(); }\n  const TagProducer& tagProducer() const { return *tag_producer_; }\n  CounterOptConstRef findCounter(StatName name) const override {\n    CounterOptConstRef found_counter;\n    Thread::LockGuard lock(lock_);\n    for (ScopeImpl* scope : scopes_) {\n      found_counter = scope->findCounter(name);\n      if (found_counter.has_value()) {\n        return found_counter;\n      }\n    }\n    return absl::nullopt;\n  }\n  GaugeOptConstRef findGauge(StatName name) const override {\n    GaugeOptConstRef found_gauge;\n    Thread::LockGuard lock(lock_);\n    for (ScopeImpl* scope : scopes_) {\n      found_gauge = scope->findGauge(name);\n      if (found_gauge.has_value()) {\n        return found_gauge;\n      }\n    }\n    return absl::nullopt;\n  }\n  HistogramOptConstRef findHistogram(StatName name) const override {\n    HistogramOptConstRef found_histogram;\n    Thread::LockGuard lock(lock_);\n    for (ScopeImpl* scope : scopes_) {\n      found_histogram = scope->findHistogram(name);\n      if (found_histogram.has_value()) {\n        return found_histogram;\n      }\n    }\n    return absl::nullopt;\n  }\n  TextReadoutOptConstRef findTextReadout(StatName name) const override {\n    TextReadoutOptConstRef found_text_readout;\n    Thread::LockGuard lock(lock_);\n    for (ScopeImpl* scope : scopes_) {\n      found_text_readout = scope->findTextReadout(name);\n      if (found_text_readout.has_value()) {\n        return found_text_readout;\n      }\n    }\n    return absl::nullopt;\n  }\n\n  bool iterate(const IterateFn<Counter>& fn) const override { return iterHelper(fn); }\n  bool iterate(const IterateFn<Gauge>& fn) const override { return iterHelper(fn); }\n  bool iterate(const IterateFn<Histogram>& fn) const override { return iterHelper(fn); }\n  bool iterate(const IterateFn<TextReadout>& fn) const override { return iterHelper(fn); }\n\n  // Stats::Store\n  std::vector<CounterSharedPtr> counters() const override;\n  std::vector<GaugeSharedPtr> gauges() const override;\n  std::vector<TextReadoutSharedPtr> textReadouts() const override;\n  std::vector<ParentHistogramSharedPtr> histograms() const override;\n\n  // Stats::StoreRoot\n  void addSink(Sink& sink) override { timer_sinks_.push_back(sink); }\n  void setTagProducer(TagProducerPtr&& tag_producer) override {\n    tag_producer_ = std::move(tag_producer);\n  }\n  void setStatsMatcher(StatsMatcherPtr&& stats_matcher) override;\n  void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) override;\n  void initializeThreading(Event::Dispatcher& main_thread_dispatcher,\n                           ThreadLocal::Instance& tls) override;\n  void shutdownThreading() override;\n  void mergeHistograms(PostMergeCb merge_cb) override;\n\n  Histogram& tlsHistogram(ParentHistogramImpl& parent, uint64_t id);\n\n  /**\n   * @return a thread synchronizer object used for controlling thread behavior in tests.\n   */\n  Thread::ThreadSynchronizer& sync() { return sync_; }\n\n  /**\n   * @return a set of well known tag names; used to reduce symbol table churn.\n   */\n  const StatNameSet& wellKnownTags() const { return *well_known_tags_; }\n\n  bool decHistogramRefCount(ParentHistogramImpl& histogram, std::atomic<uint32_t>& ref_count);\n  void releaseHistogramCrossThread(uint64_t histogram_id);\n\nprivate:\n  friend class ThreadLocalStoreTestingPeer;\n\n  template <class Stat> using StatRefMap = StatNameHashMap<std::reference_wrapper<Stat>>;\n\n  struct TlsCacheEntry {\n    // The counters, gauges and text readouts in the TLS cache are stored by reference,\n    // depending on the CentralCache for backing store. This avoids a potential\n    // contention-storm when destructing a scope, as the counter/gauge ref-count\n    // decrement in allocator_impl.cc needs to hold the single allocator mutex.\n    StatRefMap<Counter> counters_;\n    StatRefMap<Gauge> gauges_;\n    StatRefMap<TextReadout> text_readouts_;\n\n    // Histograms also require holding a mutex while decrementing reference\n    // counts. The only difference from other stats is that the histogram_set_\n    // lives in the ThreadLocalStore object, rather than in\n    // AllocatorImpl. Histograms are removed from that set when all scopes\n    // referencing the histogram are dropped. Each ParentHistogram has a unique\n    // index, which is not re-used during the process lifetime.\n    //\n    // There is also a tls_histogram_cache_ in the TlsCache object, which is\n    // not tied to a scope. It maps from parent histogram's unique index to\n    // a TlsHistogram. This enables continuity between same-named histograms\n    // in same-named scopes. That scenario is common when re-creating scopes in\n    // response to xDS.\n    StatNameHashMap<ParentHistogramSharedPtr> parent_histograms_;\n\n    // We keep a TLS cache of rejected stat names. This costs memory, but\n    // reduces runtime overhead running the matcher. Moreover, once symbol\n    // tables are integrated, rejection will need the fully elaborated string,\n    // and it we need to take a global symbol-table lock to run. We keep this\n    // StatName set here in the TLS cache to avoid taking a lock to compute\n    // rejection.\n    StatNameHashSet rejected_stats_;\n  };\n\n  struct CentralCacheEntry : public RefcountHelper {\n    explicit CentralCacheEntry(SymbolTable& symbol_table) : symbol_table_(symbol_table) {}\n    ~CentralCacheEntry();\n\n    StatNameHashMap<CounterSharedPtr> counters_;\n    StatNameHashMap<GaugeSharedPtr> gauges_;\n    StatNameHashMap<ParentHistogramImplSharedPtr> histograms_;\n    StatNameHashMap<TextReadoutSharedPtr> text_readouts_;\n    StatNameStorageSet rejected_stats_;\n    SymbolTable& symbol_table_;\n  };\n  using CentralCacheEntrySharedPtr = RefcountPtr<CentralCacheEntry>;\n\n  struct ScopeImpl : public Scope {\n    ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix);\n    ~ScopeImpl() override;\n\n    // Stats::Scope\n    Counter& counterFromStatNameWithTags(const StatName& name,\n                                         StatNameTagVectorOptConstRef tags) override;\n    void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override;\n    Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                     Gauge::ImportMode import_mode) override;\n    Histogram& histogramFromStatNameWithTags(const StatName& name,\n                                             StatNameTagVectorOptConstRef tags,\n                                             Histogram::Unit unit) override;\n    TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                                 StatNameTagVectorOptConstRef tags) override;\n    ScopePtr createScope(const std::string& name) override {\n      return parent_.createScope(symbolTable().toString(prefix_.statName()) + \".\" + name);\n    }\n    const SymbolTable& constSymbolTable() const final { return parent_.constSymbolTable(); }\n    SymbolTable& symbolTable() final { return parent_.symbolTable(); }\n\n    Counter& counterFromString(const std::string& name) override {\n      StatNameManagedStorage storage(name, symbolTable());\n      return counterFromStatName(storage.statName());\n    }\n\n    Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {\n      StatNameManagedStorage storage(name, symbolTable());\n      return gaugeFromStatName(storage.statName(), import_mode);\n    }\n    Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {\n      StatNameManagedStorage storage(name, symbolTable());\n      return histogramFromStatName(storage.statName(), unit);\n    }\n    TextReadout& textReadoutFromString(const std::string& name) override {\n      StatNameManagedStorage storage(name, symbolTable());\n      return textReadoutFromStatName(storage.statName());\n    }\n\n    NullGaugeImpl& nullGauge(const std::string&) override { return parent_.null_gauge_; }\n\n    template <class StatMap, class StatFn> bool iterHelper(StatFn fn, const StatMap& map) const {\n      for (auto& iter : map) {\n        if (!fn(iter.second)) {\n          return false;\n        }\n      }\n      return true;\n    }\n\n    bool iterate(const IterateFn<Counter>& fn) const override {\n      return iterHelper(fn, central_cache_->counters_);\n    }\n    bool iterate(const IterateFn<Gauge>& fn) const override {\n      return iterHelper(fn, central_cache_->gauges_);\n    }\n    bool iterate(const IterateFn<Histogram>& fn) const override {\n      return iterHelper(fn, central_cache_->histograms_);\n    }\n    bool iterate(const IterateFn<TextReadout>& fn) const override {\n      return iterHelper(fn, central_cache_->text_readouts_);\n    }\n\n    // NOTE: The find methods assume that `name` is fully-qualified.\n    // Implementations will not add the scope prefix.\n    CounterOptConstRef findCounter(StatName name) const override;\n    GaugeOptConstRef findGauge(StatName name) const override;\n    HistogramOptConstRef findHistogram(StatName name) const override;\n    TextReadoutOptConstRef findTextReadout(StatName name) const override;\n\n    template <class StatType>\n    using MakeStatFn = std::function<RefcountPtr<StatType>(\n        Allocator&, StatName name, StatName tag_extracted_name, const StatNameTagVector& tags)>;\n\n    /**\n     * Makes a stat either by looking it up in the central cache,\n     * generating it from the parent allocator, or as a last\n     * result, creating it with the heap allocator.\n     *\n     * @param full_stat_name the full name of the stat with appended tags.\n     * @param name_no_tags the full name of the stat (not tag extracted) without appended tags.\n     * @param stat_name_tags the tags provided at creation time. If empty, tag extraction occurs.\n     * @param central_cache_map a map from name to the desired object in the central cache.\n     * @param make_stat a function to generate the stat object, called if it's not in cache.\n     * @param tls_ref possibly null reference to a cache entry for this stat, which will be\n     *     used if non-empty, or filled in if empty (and non-null).\n     */\n    template <class StatType>\n    StatType& safeMakeStat(StatName full_stat_name, StatName name_no_tags,\n                           const absl::optional<StatNameTagVector>& stat_name_tags,\n                           StatNameHashMap<RefcountPtr<StatType>>& central_cache_map,\n                           StatNameStorageSet& central_rejected_stats,\n                           MakeStatFn<StatType> make_stat, StatRefMap<StatType>* tls_cache,\n                           StatNameHashSet* tls_rejected_stats, StatType& null_stat);\n\n    template <class StatType>\n    using StatTypeOptConstRef = absl::optional<std::reference_wrapper<const StatType>>;\n\n    /**\n     * Looks up an existing stat, populating the local cache if necessary. Does\n     * not check the TLS or rejects, and does not create a stat if it does not\n     * exist.\n     *\n     * @param name the full name of the stat (not tag extracted).\n     * @param central_cache_map a map from name to the desired object in the central cache.\n     * @return a reference to the stat, if it exists.\n     */\n    template <class StatType>\n    StatTypeOptConstRef<StatType>\n    findStatLockHeld(StatName name,\n                     StatNameHashMap<RefcountPtr<StatType>>& central_cache_map) const;\n\n    const uint64_t scope_id_;\n    ThreadLocalStoreImpl& parent_;\n    StatNameStorage prefix_;\n    mutable CentralCacheEntrySharedPtr central_cache_;\n  };\n\n  struct TlsCache : public ThreadLocal::ThreadLocalObject {\n    TlsCacheEntry& insertScope(uint64_t scope_id);\n    void eraseScope(uint64_t scope_id);\n    void eraseHistogram(uint64_t histogram);\n\n    // The TLS scope cache is keyed by scope ID. This is used to avoid complex circular references\n    // during scope destruction. An ID is required vs. using the address of the scope pointer\n    // because it's possible that the memory allocator will recycle the scope pointer immediately\n    // upon destruction, leading to a situation in which a new scope with the same address is used\n    // to reference the cache, and then subsequently cache flushed, leaving nothing in the central\n    // store. See the overview for more information. This complexity is required for lockless\n    // operation in the fast path.\n    absl::flat_hash_map<uint64_t, TlsCacheEntry> scope_cache_;\n\n    // Maps from histogram ID (monotonically increasing) to a TLS histogram.\n    absl::flat_hash_map<uint64_t, TlsHistogramSharedPtr> tls_histogram_cache_;\n  };\n\n  template <class StatFn> bool iterHelper(StatFn fn) const {\n    Thread::LockGuard lock(lock_);\n    for (ScopeImpl* scope : scopes_) {\n      if (!scope->iterate(fn)) {\n        return false;\n      }\n    }\n    return true;\n  }\n\n  std::string getTagsForName(const std::string& name, TagVector& tags) const;\n  void clearScopeFromCaches(uint64_t scope_id, CentralCacheEntrySharedPtr central_cache);\n  void clearHistogramFromCaches(uint64_t histogram_id);\n  void releaseScopeCrossThread(ScopeImpl* scope);\n  void mergeInternal(PostMergeCb merge_cb);\n  bool rejects(StatName name) const;\n  bool rejectsAll() const { return stats_matcher_->rejectsAll(); }\n  template <class StatMapClass, class StatListClass>\n  void removeRejectedStats(StatMapClass& map, StatListClass& list);\n  bool checkAndRememberRejection(StatName name, StatNameStorageSet& central_rejected_stats,\n                                 StatNameHashSet* tls_rejected_stats);\n\n  Allocator& alloc_;\n  Event::Dispatcher* main_thread_dispatcher_{};\n  ThreadLocal::SlotPtr tls_;\n  mutable Thread::MutexBasicLockable lock_;\n  absl::flat_hash_set<ScopeImpl*> scopes_ ABSL_GUARDED_BY(lock_);\n  ScopePtr default_scope_;\n  std::list<std::reference_wrapper<Sink>> timer_sinks_;\n  TagProducerPtr tag_producer_;\n  StatsMatcherPtr stats_matcher_;\n  HistogramSettingsConstPtr histogram_settings_;\n  std::atomic<bool> threading_ever_initialized_{};\n  std::atomic<bool> shutting_down_{};\n  std::atomic<bool> merge_in_progress_{};\n  AllocatorImpl heap_allocator_;\n\n  NullCounterImpl null_counter_;\n  NullGaugeImpl null_gauge_;\n  NullHistogramImpl null_histogram_;\n  NullTextReadoutImpl null_text_readout_;\n\n  Thread::ThreadSynchronizer sync_;\n  std::atomic<uint64_t> next_scope_id_{};\n  uint64_t next_histogram_id_ ABSL_GUARDED_BY(hist_mutex_) = 0;\n\n  StatNameSetPtr well_known_tags_;\n\n  mutable Thread::MutexBasicLockable hist_mutex_;\n  StatSet<ParentHistogramImpl> histogram_set_ ABSL_GUARDED_BY(hist_mutex_);\n\n  // Retain storage for deleted stats; these are no longer in maps because the\n  // matcher-pattern was established after they were created. Since the stats\n  // are held by reference in code that expects them to be there, we can't\n  // actually delete the stats.\n  //\n  // It seems like it would be better to have each client that expects a stat\n  // to exist to hold it as (e.g.) a CounterSharedPtr rather than a Counter&\n  // but that would be fairly complex to change.\n  std::vector<CounterSharedPtr> deleted_counters_ ABSL_GUARDED_BY(lock_);\n  std::vector<GaugeSharedPtr> deleted_gauges_ ABSL_GUARDED_BY(lock_);\n  std::vector<HistogramSharedPtr> deleted_histograms_ ABSL_GUARDED_BY(lock_);\n  std::vector<TextReadoutSharedPtr> deleted_text_readouts_ ABSL_GUARDED_BY(lock_);\n};\n\nusing ThreadLocalStoreImplPtr = std::unique_ptr<ThreadLocalStoreImpl>;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/timespan_impl.cc",
    "content": "#include \"common/stats/timespan_impl.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nHistogramCompletableTimespanImpl::HistogramCompletableTimespanImpl(Histogram& histogram,\n                                                                   TimeSource& time_source)\n    : time_source_(time_source), histogram_(histogram), start_(time_source.monotonicTime()) {\n  ensureTimeHistogram(histogram);\n}\n\nstd::chrono::milliseconds HistogramCompletableTimespanImpl::elapsed() const {\n  return HistogramCompletableTimespanImpl::elapsedDuration<std::chrono::milliseconds>();\n}\n\nvoid HistogramCompletableTimespanImpl::complete() { histogram_.recordValue(tickCount()); }\n\nvoid HistogramCompletableTimespanImpl::ensureTimeHistogram(const Histogram& histogram) const {\n  switch (histogram.unit()) {\n  case Histogram::Unit::Null:\n  case Histogram::Unit::Microseconds:\n  case Histogram::Unit::Milliseconds:\n    return;\n  case Histogram::Unit::Unspecified:\n  case Histogram::Unit::Bytes:\n    RELEASE_ASSERT(\n        false,\n        fmt::format(\"Cannot create a timespan flushing the duration to histogram '{}' because \"\n                    \"it does not measure time. This is a programming error, either pass a \"\n                    \"histogram measuring time or fix the unit of the passed histogram.\",\n                    histogram.name()));\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nuint64_t HistogramCompletableTimespanImpl::tickCount() const {\n  switch (histogram_.unit()) {\n  case Histogram::Unit::Null:\n    return 0;\n  case Histogram::Unit::Microseconds:\n    return HistogramCompletableTimespanImpl::elapsedDuration<std::chrono::microseconds>().count();\n  case Histogram::Unit::Milliseconds:\n    return HistogramCompletableTimespanImpl::elapsedDuration<std::chrono::milliseconds>().count();\n  case Histogram::Unit::Unspecified:\n  case Histogram::Unit::Bytes:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/timespan_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/time.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/timespan.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * An individual timespan that flushes its measured value to the histogram on completion.\n * The start time is captured on construction. The timespan must be\n * completed via complete() for it to be stored. If the timespan is deleted this will be treated as\n * a cancellation. The target histogram must represent a quantity of time.\n */\nclass HistogramCompletableTimespanImpl : public CompletableTimespan {\npublic:\n  HistogramCompletableTimespanImpl(Histogram& histogram, TimeSource& time_source);\n\n  // Stats::CompletableTimespan\n  std::chrono::milliseconds elapsed() const override;\n  void complete() override;\n\nprivate:\n  void ensureTimeHistogram(const Histogram& histogram) const;\n  template <typename TimeUnit> TimeUnit elapsedDuration() const {\n    return std::chrono::duration_cast<TimeUnit>(time_source_.monotonicTime() - start_);\n  }\n  uint64_t tickCount() const;\n\n  TimeSource& time_source_;\n  Histogram& histogram_;\n  const MonotonicTime start_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/utility.cc",
    "content": "#include \"common/stats/utility.h\"\n\n#include <algorithm>\n#include <string>\n\n#include \"absl/strings/match.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nstd::string Utility::sanitizeStatsName(absl::string_view name) {\n  if (absl::EndsWith(name, \".\")) {\n    name.remove_suffix(1);\n  }\n  if (absl::StartsWith(name, \".\")) {\n    name.remove_prefix(1);\n  }\n  std::string stats_name = std::string(name);\n  std::replace(stats_name.begin(), stats_name.end(), ':', '_');\n  std::replace(stats_name.begin(), stats_name.end(), '\\0', '_');\n  return stats_name;\n}\n\nabsl::optional<StatName> Utility::findTag(const Metric& metric, StatName find_tag_name) {\n  absl::optional<StatName> value;\n  metric.iterateTagStatNames(\n      [&value, &find_tag_name](Stats::StatName tag_name, Stats::StatName tag_value) -> bool {\n        if (tag_name == find_tag_name) {\n          value = tag_value;\n          return false;\n        }\n        return true;\n      });\n  return value;\n}\n\nnamespace {\n\n// Helper class for the three Utility::*FromElements implementations to build up\n// a joined StatName from a mix of StatName and string_view.\nstruct ElementVisitor {\n  ElementVisitor(SymbolTable& symbol_table, const ElementVec& elements)\n      : symbol_table_(symbol_table), pool_(symbol_table) {\n    stat_names_.resize(elements.size());\n    for (const Element& element : elements) {\n      absl::visit(*this, element);\n    }\n    joined_ = symbol_table_.join(stat_names_);\n  }\n\n  // Overloads provides for absl::visit to call.\n  void operator()(StatName stat_name) { stat_names_.push_back(stat_name); }\n  void operator()(absl::string_view name) { stat_names_.push_back(pool_.add(name)); }\n\n  /**\n   * @return the StatName constructed by joining the elements.\n   */\n  StatName statName() { return StatName(joined_.get()); }\n\n  SymbolTable& symbol_table_;\n  StatNameVec stat_names_;\n  StatNameDynamicPool pool_;\n  SymbolTable::StoragePtr joined_;\n};\n\n} // namespace\n\nCounter& Utility::counterFromElements(Scope& scope, const ElementVec& elements,\n                                      StatNameTagVectorOptConstRef tags) {\n  ElementVisitor visitor(scope.symbolTable(), elements);\n  return scope.counterFromStatNameWithTags(visitor.statName(), tags);\n}\n\nCounter& Utility::counterFromStatNames(Scope& scope, const StatNameVec& elements,\n                                       StatNameTagVectorOptConstRef tags) {\n  SymbolTable::StoragePtr joined = scope.symbolTable().join(elements);\n  return scope.counterFromStatNameWithTags(StatName(joined.get()), tags);\n}\n\nGauge& Utility::gaugeFromElements(Scope& scope, const ElementVec& elements,\n                                  Gauge::ImportMode import_mode,\n                                  StatNameTagVectorOptConstRef tags) {\n  ElementVisitor visitor(scope.symbolTable(), elements);\n  return scope.gaugeFromStatNameWithTags(visitor.statName(), tags, import_mode);\n}\n\nGauge& Utility::gaugeFromStatNames(Scope& scope, const StatNameVec& elements,\n                                   Gauge::ImportMode import_mode,\n                                   StatNameTagVectorOptConstRef tags) {\n  SymbolTable::StoragePtr joined = scope.symbolTable().join(elements);\n  return scope.gaugeFromStatNameWithTags(StatName(joined.get()), tags, import_mode);\n}\n\nHistogram& Utility::histogramFromElements(Scope& scope, const ElementVec& elements,\n                                          Histogram::Unit unit, StatNameTagVectorOptConstRef tags) {\n  ElementVisitor visitor(scope.symbolTable(), elements);\n  return scope.histogramFromStatNameWithTags(visitor.statName(), tags, unit);\n}\n\nHistogram& Utility::histogramFromStatNames(Scope& scope, const StatNameVec& elements,\n                                           Histogram::Unit unit,\n                                           StatNameTagVectorOptConstRef tags) {\n  SymbolTable::StoragePtr joined = scope.symbolTable().join(elements);\n  return scope.histogramFromStatNameWithTags(StatName(joined.get()), tags, unit);\n}\n\nTextReadout& Utility::textReadoutFromElements(Scope& scope, const ElementVec& elements,\n                                              StatNameTagVectorOptConstRef tags) {\n  ElementVisitor visitor(scope.symbolTable(), elements);\n  return scope.textReadoutFromStatNameWithTags(visitor.statName(), tags);\n}\n\nTextReadout& Utility::textReadoutFromStatNames(Scope& scope, const StatNameVec& elements,\n                                               StatNameTagVectorOptConstRef tags) {\n  SymbolTable::StoragePtr joined = scope.symbolTable().join(elements);\n  return scope.textReadoutFromStatNameWithTags(StatName(joined.get()), tags);\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stats/utility.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n/**\n * Represents a dynamically created stat name token based on absl::string_view.\n * This class wrapper is used in the 'Element' variant so that call-sites\n * can express explicit intent to create dynamic stat names, which are more\n * expensive than symbolic stat names. We use dynamic stat names only for\n * building stats based on names discovered in the line of a request.\n */\nclass DynamicName : public absl::string_view {\npublic:\n  // This is intentionally left as an implicit conversion from string_view to\n  // make call-sites easier to read, e.g.\n  //    Utility::counterFromElements(*scope, {DynamicName(\"a\"), DynamicName(\"b\")});\n  explicit DynamicName(absl::string_view str) : absl::string_view(str) {}\n};\n\n/**\n * Holds either a symbolic StatName or a dynamic string, for the purpose of\n * composing a vector to pass to Utility::counterFromElements, etc. This is\n * a programming convenience to create joined stat names. It is easier to\n * call the above helpers than to use SymbolTable::join(), because the helpers\n * hide the memory management of the joined storage, and they allow easier\n * co-mingling of symbolic and dynamic stat-name components.\n */\nusing Element = absl::variant<StatName, DynamicName>;\nusing ElementVec = absl::InlinedVector<Element, 8>;\n\n/**\n * Common stats utility routines.\n */\nclass Utility {\npublic:\n  /**\n   * ':' is a reserved char in statsd. Do a character replacement to avoid\n   * costly inline translations later.\n   *\n   * @param name the stat name to sanitize.\n   * @return the sanitized stat name.\n   */\n  static std::string sanitizeStatsName(absl::string_view name);\n\n  /**\n   * Finds a metric tag with the specified name.\n   *\n   * @param metric The metric in which the tag is expected to exist.\n   * @param find_tag_name The name of the tag to search for.\n   * @return The value of the tag, if found.\n   */\n  static absl::optional<StatName> findTag(const Metric& metric, StatName find_tag_name);\n\n  /**\n   * Creates a counter from a vector of tokens which are used to create the\n   * name. The tokens can be specified as DynamicName or StatName. For\n   * tokens specified as DynamicName, a dynamic StatName will be created. See\n   * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens\n   * for more detail on why symbolic StatNames are preferred when possible.\n   *\n   * See also counterFromStatNames, which is slightly faster but does not allow\n   * passing DynamicName(string)s as names.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param elements The vector of mixed DynamicName and StatName\n   * @param tags optionally specified tags.\n   * @return A counter named using the joined elements.\n   */\n  static Counter& counterFromElements(Scope& scope, const ElementVec& elements,\n                                      StatNameTagVectorOptConstRef tags = absl::nullopt);\n\n  /**\n   * Creates a counter from a vector of tokens which are used to create the\n   * name. The tokens must be of type StatName.\n   *\n   * See also counterFromElements, which is slightly slower, but allows\n   * passing DynamicName(string)s as elements.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param names The vector of StatNames\n   * @param tags optionally specified tags.\n   * @return A counter named using the joined elements.\n   */\n  static Counter& counterFromStatNames(Scope& scope, const StatNameVec& names,\n                                       StatNameTagVectorOptConstRef tags = absl::nullopt);\n\n  /**\n   * Creates a gauge from a vector of tokens which are used to create the\n   * name. The tokens can be specified as DynamicName or StatName. For\n   * tokens specified as DynamicName, a dynamic StatName will be created. See\n   * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens\n   * for more detail on why symbolic StatNames are preferred when possible.\n   *\n   * See also gaugeFromStatNames, which is slightly faster but does not allow\n   * passing DynamicName(string)s as names.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param elements The vector of mixed DynamicName and StatName\n   * @param import_mode Whether hot-restart should accumulate this value.\n   * @param tags optionally specified tags.\n   * @return A gauge named using the joined elements.\n   */\n  static Gauge& gaugeFromElements(Scope& scope, const ElementVec& elements,\n                                  Gauge::ImportMode import_mode,\n                                  StatNameTagVectorOptConstRef tags = absl::nullopt);\n\n  /**\n   * Creates a gauge from a vector of tokens which are used to create the\n   * name. The tokens must be of type StatName.\n   *\n   * See also gaugeFromElements, which is slightly slower, but allows\n   * passing DynamicName(string)s as elements.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param names The vector of StatNames\n   * @param import_mode Whether hot-restart should accumulate this value.\n   * @param tags optionally specified tags.\n   * @return A gauge named using the joined elements.\n   */\n  static Gauge& gaugeFromStatNames(Scope& scope, const StatNameVec& elements,\n                                   Gauge::ImportMode import_mode,\n                                   StatNameTagVectorOptConstRef tags = absl::nullopt);\n\n  /**\n   * Creates a histogram from a vector of tokens which are used to create the\n   * name. The tokens can be specified as DynamicName or StatName. For\n   * tokens specified as DynamicName, a dynamic StatName will be created. See\n   * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens\n   * for more detail on why symbolic StatNames are preferred when possible.\n   *\n   * See also histogramFromStatNames, which is slightly faster but does not allow\n   * passing DynamicName(string)s as names.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param elements The vector of mixed DynamicName and StatName\n   * @param unit The unit of measurement.\n   * @param tags optionally specified tags.\n   * @return A histogram named using the joined elements.\n   */\n  static Histogram& histogramFromElements(Scope& scope, const ElementVec& elements,\n                                          Histogram::Unit unit,\n                                          StatNameTagVectorOptConstRef tags = absl::nullopt);\n\n  /**\n   * Creates a histogram from a vector of tokens which are used to create the\n   * name. The tokens must be of type StatName.\n   *\n   * See also histogramFromElements, which is slightly slower, but allows\n   * passing DynamicName(string)s as elements.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param elements The vector of mixed DynamicName and StatName\n   * @param unit The unit of measurement.\n   * @param tags optionally specified tags.\n   * @return A histogram named using the joined elements.\n   */\n  static Histogram& histogramFromStatNames(Scope& scope, const StatNameVec& elements,\n                                           Histogram::Unit unit,\n                                           StatNameTagVectorOptConstRef tags = absl::nullopt);\n\n  /**\n   * Creates a TextReadout from a vector of tokens which are used to create the\n   * name. The tokens can be specified as DynamicName or StatName. For\n   * tokens specified as DynamicName, a dynamic StatName will be created. See\n   * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens\n   * for more detail on why symbolic StatNames are preferred when possible.\n   *\n   * See also TextReadoutFromStatNames, which is slightly faster but does not allow\n   * passing DynamicName(string)s as names.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param elements The vector of mixed DynamicName and StatName\n   * @param unit The unit of measurement.\n   * @param tags optionally specified tags.\n   * @return A TextReadout named using the joined elements.\n   */\n  static TextReadout& textReadoutFromElements(Scope& scope, const ElementVec& elements,\n                                              StatNameTagVectorOptConstRef tags = absl::nullopt);\n\n  /**\n   * Creates a TextReadout from a vector of tokens which are used to create the\n   * name. The tokens must be of type StatName.\n   *\n   * See also TextReadoutFromElements, which is slightly slower, but allows\n   * passing DynamicName(string)s as elements.\n   *\n   * @param scope The scope in which to create the counter.\n   * @param elements The vector of mixed DynamicName and StatName\n   * @param unit The unit of measurement.\n   * @param tags optionally specified tags.\n   * @return A TextReadout named using the joined elements.\n   */\n  static TextReadout& textReadoutFromStatNames(Scope& scope, const StatNameVec& elements,\n                                               StatNameTagVectorOptConstRef tags = absl::nullopt);\n};\n\n/**\n * Holds a reference to a stat by name. Note that the stat may not be created\n * yet at the time CachedReference is created. Calling get() then does a lazy\n * lookup, potentially returning absl::nullopt if the stat doesn't exist yet.\n * StatReference works whether the name was constructed symbolically, or with\n * StatNameDynamicStorage.\n *\n * Lookups are very slow, taking time proportional to the size of the scope,\n * holding mutexes during the lookup. However once the lookup succeeds, the\n * result is cached atomically, and further calls to get() are thus fast and\n * mutex-free. The implementation may be faster for stats that are named\n * symbolically.\n *\n * CachedReference is valid for the lifetime of the Scope. When the Scope\n * becomes invalid, CachedReferences must also be dropped as they will hold\n * pointers into the scope.\n */\ntemplate <class StatType> class CachedReference {\npublic:\n  CachedReference(Scope& scope, absl::string_view name) : scope_(scope), name_(std::string(name)) {}\n\n  /**\n   * Finds the named stat, if it exists, returning it as an optional.\n   */\n  absl::optional<std::reference_wrapper<StatType>> get() {\n    StatType* stat = stat_.get([this]() -> StatType* {\n      StatType* stat = nullptr;\n      IterateFn<StatType> check_stat = [this,\n                                        &stat](const RefcountPtr<StatType>& shared_stat) -> bool {\n        if (shared_stat->name() == name_) {\n          stat = shared_stat.get();\n          return false; // Stop iteration.\n        }\n        return true;\n      };\n      scope_.iterate(check_stat);\n      return stat;\n    });\n    if (stat == nullptr) {\n      return absl::nullopt;\n    }\n    return *stat;\n  }\n\nprivate:\n  Scope& scope_;\n  const std::string name_;\n  Thread::AtomicPtr<StatType, Thread::AtomicPtrAllocMode::DoNotDelete> stat_;\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stream_info/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"stream_info_lib\",\n    hdrs = [\"stream_info_impl.h\"],\n    deps = [\n        \":filter_state_lib\",\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:dump_state_utils\",\n        \"//source/common/http:request_id_extension_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_state_lib\",\n    srcs = [\"filter_state_impl.cc\"],\n    hdrs = [\"filter_state_impl.h\"],\n    deps = [\n        \"//include/envoy/stream_info:filter_state_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"uint32_accessor_lib\",\n    hdrs = [\"uint32_accessor_impl.h\"],\n    deps = [\n        \"//include/envoy/stream_info:uint32_accessor_interface\",\n    ],\n)\n"
  },
  {
    "path": "source/common/stream_info/filter_state_impl.cc",
    "content": "#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nvoid FilterStateImpl::setData(absl::string_view data_name, std::shared_ptr<Object> data,\n                              FilterState::StateType state_type, FilterState::LifeSpan life_span) {\n  if (life_span > life_span_) {\n    if (hasDataWithNameInternally(data_name)) {\n      throw EnvoyException(\n          \"FilterState::setData<T> called twice with conflicting life_span on the same data_name.\");\n    }\n    maybeCreateParent(ParentAccessMode::ReadWrite);\n    parent_->setData(data_name, data, state_type, life_span);\n    return;\n  }\n  if (parent_ && parent_->hasDataWithName(data_name)) {\n    throw EnvoyException(\n        \"FilterState::setData<T> called twice with conflicting life_span on the same data_name.\");\n  }\n  const auto& it = data_storage_.find(data_name);\n  if (it != data_storage_.end()) {\n    // We have another object with same data_name. Check for mutability\n    // violations namely: readonly data cannot be overwritten. mutable data\n    // cannot be overwritten by readonly data.\n    const FilterStateImpl::FilterObject* current = it->second.get();\n    if (current->state_type_ == FilterState::StateType::ReadOnly) {\n      throw EnvoyException(\"FilterState::setData<T> called twice on same ReadOnly state.\");\n    }\n\n    if (current->state_type_ != state_type) {\n      throw EnvoyException(\"FilterState::setData<T> called twice with different state types.\");\n    }\n  }\n\n  std::unique_ptr<FilterStateImpl::FilterObject> filter_object(new FilterStateImpl::FilterObject());\n  filter_object->data_ = data;\n  filter_object->state_type_ = state_type;\n  data_storage_[data_name] = std::move(filter_object);\n}\n\nbool FilterStateImpl::hasDataWithName(absl::string_view data_name) const {\n  return hasDataWithNameInternally(data_name) || (parent_ && parent_->hasDataWithName(data_name));\n}\n\nconst FilterState::Object*\nFilterStateImpl::getDataReadOnlyGeneric(absl::string_view data_name) const {\n  const auto& it = data_storage_.find(data_name);\n\n  if (it == data_storage_.end()) {\n    if (parent_) {\n      return &(parent_->getDataReadOnly<FilterState::Object>(data_name));\n    }\n    throw EnvoyException(\"FilterState::getDataReadOnly<T> called for unknown data name.\");\n  }\n\n  const FilterStateImpl::FilterObject* current = it->second.get();\n  return current->data_.get();\n}\n\nFilterState::Object* FilterStateImpl::getDataMutableGeneric(absl::string_view data_name) {\n  const auto& it = data_storage_.find(data_name);\n\n  if (it == data_storage_.end()) {\n    if (parent_) {\n      return &(parent_->getDataMutable<FilterState::Object>(data_name));\n    }\n    throw EnvoyException(\"FilterState::getDataMutable<T> called for unknown data name.\");\n  }\n\n  FilterStateImpl::FilterObject* current = it->second.get();\n  if (current->state_type_ == FilterState::StateType::ReadOnly) {\n    throw EnvoyException(\n        \"FilterState::getDataMutable<T> tried to access immutable data as mutable.\");\n  }\n\n  return current->data_.get();\n}\n\nbool FilterStateImpl::hasDataAtOrAboveLifeSpan(FilterState::LifeSpan life_span) const {\n  if (life_span > life_span_) {\n    return parent_ && parent_->hasDataAtOrAboveLifeSpan(life_span);\n  }\n  return !data_storage_.empty() || (parent_ && parent_->hasDataAtOrAboveLifeSpan(life_span));\n}\n\nbool FilterStateImpl::hasDataWithNameInternally(absl::string_view data_name) const {\n  return data_storage_.count(data_name) > 0;\n}\n\nvoid FilterStateImpl::maybeCreateParent(ParentAccessMode parent_access_mode) {\n  if (parent_ != nullptr) {\n    return;\n  }\n  if (life_span_ >= FilterState::LifeSpan::TopSpan) {\n    return;\n  }\n  if (absl::holds_alternative<FilterStateSharedPtr>(ancestor_)) {\n    FilterStateSharedPtr ancestor = absl::get<FilterStateSharedPtr>(ancestor_);\n    if (ancestor == nullptr || ancestor->lifeSpan() != life_span_ + 1) {\n      parent_ = std::make_shared<FilterStateImpl>(ancestor, FilterState::LifeSpan(life_span_ + 1));\n    } else {\n      parent_ = ancestor;\n    }\n    return;\n  }\n\n  auto lazy_create_ancestor = absl::get<LazyCreateAncestor>(ancestor_);\n  // If we're only going to read data from our parent, we don't need to create lazy ancestor,\n  // because they're empty anyways.\n  if (parent_access_mode == ParentAccessMode::ReadOnly && lazy_create_ancestor.first == nullptr) {\n    return;\n  }\n\n  // Lazy ancestor is not our immediate parent.\n  if (lazy_create_ancestor.second != life_span_ + 1) {\n    parent_ = std::make_shared<FilterStateImpl>(lazy_create_ancestor,\n                                                FilterState::LifeSpan(life_span_ + 1));\n    return;\n  }\n  // Lazy parent is our immediate parent.\n  if (lazy_create_ancestor.first == nullptr) {\n    lazy_create_ancestor.first =\n        std::make_shared<FilterStateImpl>(FilterState::LifeSpan(life_span_ + 1));\n  }\n  parent_ = lazy_create_ancestor.first;\n}\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stream_info/filter_state_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <utility>\n#include <vector>\n\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nclass FilterStateImpl : public FilterState {\npublic:\n  FilterStateImpl(FilterState::LifeSpan life_span) : life_span_(life_span) {\n    maybeCreateParent(ParentAccessMode::ReadOnly);\n  }\n\n  /**\n   * @param ancestor a std::shared_ptr storing an already created ancestor.\n   * @param life_span the life span this is handling.\n   */\n  FilterStateImpl(FilterStateSharedPtr ancestor, FilterState::LifeSpan life_span)\n      : ancestor_(ancestor), life_span_(life_span) {\n    maybeCreateParent(ParentAccessMode::ReadOnly);\n  }\n\n  using LazyCreateAncestor = std::pair<FilterStateSharedPtr, FilterState::LifeSpan>;\n  /**\n   * @param ancestor a std::pair storing an ancestor, that can be passed in as a way to lazy\n   * initialize a FilterState that's owned by an object with bigger scope than this. This is to\n   * avoid creating a FilterState that's empty in most cases.\n   * @param life_span the life span this is handling.\n   */\n  FilterStateImpl(LazyCreateAncestor lazy_create_ancestor, FilterState::LifeSpan life_span)\n      : ancestor_(lazy_create_ancestor), life_span_(life_span) {\n    maybeCreateParent(ParentAccessMode::ReadOnly);\n  }\n\n  // FilterState\n  void setData(absl::string_view data_name, std::shared_ptr<Object> data,\n               FilterState::StateType state_type,\n               FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain) override;\n  bool hasDataWithName(absl::string_view) const override;\n  const Object* getDataReadOnlyGeneric(absl::string_view data_name) const override;\n  Object* getDataMutableGeneric(absl::string_view data_name) override;\n  bool hasDataAtOrAboveLifeSpan(FilterState::LifeSpan life_span) const override;\n\n  FilterState::LifeSpan lifeSpan() const override { return life_span_; }\n  FilterStateSharedPtr parent() const override { return parent_; }\n\nprivate:\n  // This only checks the local data_storage_ for data_name existence.\n  bool hasDataWithNameInternally(absl::string_view data_name) const;\n  enum class ParentAccessMode { ReadOnly, ReadWrite };\n  void maybeCreateParent(ParentAccessMode parent_access_mode);\n\n  struct FilterObject {\n    std::shared_ptr<Object> data_;\n    FilterState::StateType state_type_;\n  };\n\n  absl::variant<FilterStateSharedPtr, LazyCreateAncestor> ancestor_;\n  FilterStateSharedPtr parent_;\n  const FilterState::LifeSpan life_span_;\n  absl::flat_hash_map<std::string, std::unique_ptr<FilterObject>> data_storage_;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stream_info/stream_info_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/dump_state_utils.h\"\n#include \"common/http/request_id_extension_impl.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nstruct StreamInfoImpl : public StreamInfo {\n  StreamInfoImpl(TimeSource& time_source,\n                 FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain)\n      : StreamInfoImpl(absl::nullopt, time_source, std::make_shared<FilterStateImpl>(life_span)) {}\n\n  StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source)\n      : StreamInfoImpl(protocol, time_source,\n                       std::make_shared<FilterStateImpl>(FilterState::LifeSpan::FilterChain)) {}\n\n  StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source,\n                 FilterStateSharedPtr parent_filter_state, FilterState::LifeSpan life_span)\n      : StreamInfoImpl(\n            protocol, time_source,\n            std::make_shared<FilterStateImpl>(\n                FilterStateImpl::LazyCreateAncestor(std::move(parent_filter_state), life_span),\n                FilterState::LifeSpan::FilterChain)) {}\n\n  SystemTime startTime() const override { return start_time_; }\n\n  MonotonicTime startTimeMonotonic() const override { return start_time_monotonic_; }\n\n  absl::optional<std::chrono::nanoseconds> duration(absl::optional<MonotonicTime> time) const {\n    if (!time) {\n      return {};\n    }\n\n    return std::chrono::duration_cast<std::chrono::nanoseconds>(time.value() -\n                                                                start_time_monotonic_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastDownstreamRxByteReceived() const override {\n    return duration(last_downstream_rx_byte_received);\n  }\n\n  void onLastDownstreamRxByteReceived() override {\n    ASSERT(!last_downstream_rx_byte_received);\n    last_downstream_rx_byte_received = time_source_.monotonicTime();\n  }\n\n  void setUpstreamTiming(const UpstreamTiming& upstream_timing) override {\n    upstream_timing_ = upstream_timing;\n  }\n\n  absl::optional<std::chrono::nanoseconds> firstUpstreamTxByteSent() const override {\n    return duration(upstream_timing_.first_upstream_tx_byte_sent_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastUpstreamTxByteSent() const override {\n    return duration(upstream_timing_.last_upstream_tx_byte_sent_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> firstUpstreamRxByteReceived() const override {\n    return duration(upstream_timing_.first_upstream_rx_byte_received_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastUpstreamRxByteReceived() const override {\n    return duration(upstream_timing_.last_upstream_rx_byte_received_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> firstDownstreamTxByteSent() const override {\n    return duration(first_downstream_tx_byte_sent_);\n  }\n\n  void onFirstDownstreamTxByteSent() override {\n    ASSERT(!first_downstream_tx_byte_sent_);\n    first_downstream_tx_byte_sent_ = time_source_.monotonicTime();\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastDownstreamTxByteSent() const override {\n    return duration(last_downstream_tx_byte_sent_);\n  }\n\n  void onLastDownstreamTxByteSent() override {\n    ASSERT(!last_downstream_tx_byte_sent_);\n    last_downstream_tx_byte_sent_ = time_source_.monotonicTime();\n  }\n\n  absl::optional<std::chrono::nanoseconds> requestComplete() const override {\n    return duration(final_time_);\n  }\n\n  void onRequestComplete() override {\n    ASSERT(!final_time_);\n    final_time_ = time_source_.monotonicTime();\n  }\n\n  void addBytesReceived(uint64_t bytes_received) override { bytes_received_ += bytes_received; }\n\n  uint64_t bytesReceived() const override { return bytes_received_; }\n\n  absl::optional<Http::Protocol> protocol() const override { return protocol_; }\n\n  void protocol(Http::Protocol protocol) override { protocol_ = protocol; }\n\n  absl::optional<uint32_t> responseCode() const override { return response_code_; }\n\n  const absl::optional<std::string>& responseCodeDetails() const override {\n    return response_code_details_;\n  }\n\n  void setResponseCodeDetails(absl::string_view rc_details) override {\n    response_code_details_.emplace(rc_details);\n  }\n\n  const absl::optional<std::string>& connectionTerminationDetails() const override {\n    return connection_termination_details_;\n  }\n\n  void setConnectionTerminationDetails(absl::string_view connection_termination_details) override {\n    connection_termination_details_.emplace(connection_termination_details);\n  }\n\n  void addBytesSent(uint64_t bytes_sent) override { bytes_sent_ += bytes_sent; }\n\n  uint64_t bytesSent() const override { return bytes_sent_; }\n\n  void setResponseFlag(ResponseFlag response_flag) override { response_flags_ |= response_flag; }\n\n  bool intersectResponseFlags(uint64_t response_flags) const override {\n    return (response_flags_ & response_flags) != 0;\n  }\n\n  bool hasResponseFlag(ResponseFlag flag) const override { return response_flags_ & flag; }\n\n  bool hasAnyResponseFlag() const override { return response_flags_ != 0; }\n\n  uint64_t responseFlags() const override { return response_flags_; }\n\n  void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override {\n    upstream_host_ = host;\n  }\n\n  Upstream::HostDescriptionConstSharedPtr upstreamHost() const override { return upstream_host_; }\n\n  void setRouteName(absl::string_view route_name) override {\n    route_name_ = std::string(route_name);\n  }\n\n  const std::string& getRouteName() const override { return route_name_; }\n\n  void setUpstreamLocalAddress(\n      const Network::Address::InstanceConstSharedPtr& upstream_local_address) override {\n    upstream_local_address_ = upstream_local_address;\n  }\n\n  const Network::Address::InstanceConstSharedPtr& upstreamLocalAddress() const override {\n    return upstream_local_address_;\n  }\n\n  bool healthCheck() const override { return health_check_request_; }\n\n  void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; }\n\n  void setDownstreamLocalAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_local_address) override {\n    downstream_local_address_ = downstream_local_address;\n  }\n\n  const Network::Address::InstanceConstSharedPtr& downstreamLocalAddress() const override {\n    return downstream_local_address_;\n  }\n\n  void setDownstreamDirectRemoteAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_direct_remote_address) override {\n    downstream_direct_remote_address_ = downstream_direct_remote_address;\n  }\n\n  const Network::Address::InstanceConstSharedPtr& downstreamDirectRemoteAddress() const override {\n    return downstream_direct_remote_address_;\n  }\n\n  void setDownstreamRemoteAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_remote_address) override {\n    downstream_remote_address_ = downstream_remote_address;\n  }\n\n  const Network::Address::InstanceConstSharedPtr& downstreamRemoteAddress() const override {\n    return downstream_remote_address_;\n  }\n\n  void\n  setDownstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override {\n    downstream_ssl_info_ = connection_info;\n  }\n\n  Ssl::ConnectionInfoConstSharedPtr downstreamSslConnection() const override {\n    return downstream_ssl_info_;\n  }\n\n  void setUpstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override {\n    upstream_ssl_info_ = connection_info;\n  }\n\n  Ssl::ConnectionInfoConstSharedPtr upstreamSslConnection() const override {\n    return upstream_ssl_info_;\n  }\n\n  const Router::RouteEntry* routeEntry() const override { return route_entry_; }\n\n  envoy::config::core::v3::Metadata& dynamicMetadata() override { return metadata_; };\n  const envoy::config::core::v3::Metadata& dynamicMetadata() const override { return metadata_; };\n\n  void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) override {\n    (*metadata_.mutable_filter_metadata())[name].MergeFrom(value);\n  };\n\n  const FilterStateSharedPtr& filterState() override { return filter_state_; }\n  const FilterState& filterState() const override { return *filter_state_; }\n\n  const FilterStateSharedPtr& upstreamFilterState() const override {\n    return upstream_filter_state_;\n  }\n  void setUpstreamFilterState(const FilterStateSharedPtr& filter_state) override {\n    upstream_filter_state_ = filter_state;\n  }\n\n  void setRequestedServerName(absl::string_view requested_server_name) override {\n    requested_server_name_ = std::string(requested_server_name);\n  }\n\n  const std::string& requestedServerName() const override { return requested_server_name_; }\n\n  void setUpstreamTransportFailureReason(absl::string_view failure_reason) override {\n    upstream_transport_failure_reason_ = std::string(failure_reason);\n  }\n\n  const std::string& upstreamTransportFailureReason() const override {\n    return upstream_transport_failure_reason_;\n  }\n\n  void setRequestHeaders(const Http::RequestHeaderMap& headers) override {\n    request_headers_ = &headers;\n  }\n\n  const Http::RequestHeaderMap* getRequestHeaders() const override { return request_headers_; }\n\n  void setRequestIDExtension(Http::RequestIDExtensionSharedPtr utils) override {\n    request_id_extension_ = utils;\n  }\n  Http::RequestIDExtensionSharedPtr getRequestIDExtension() const override {\n    return request_id_extension_;\n  }\n\n  void dumpState(std::ostream& os, int indent_level = 0) const {\n    const char* spaces = spacesForLevel(indent_level);\n    os << spaces << \"StreamInfoImpl \" << this << DUMP_OPTIONAL_MEMBER(protocol_)\n       << DUMP_OPTIONAL_MEMBER(response_code_) << DUMP_OPTIONAL_MEMBER(response_code_details_)\n       << DUMP_MEMBER(health_check_request_) << DUMP_MEMBER(route_name_) << \"\\n\";\n  }\n\n  void setUpstreamClusterInfo(\n      const Upstream::ClusterInfoConstSharedPtr& upstream_cluster_info) override {\n    upstream_cluster_info_ = upstream_cluster_info;\n  }\n\n  absl::optional<Upstream::ClusterInfoConstSharedPtr> upstreamClusterInfo() const override {\n    return upstream_cluster_info_;\n  }\n\n  void setConnectionID(uint64_t id) override { connection_id_ = id; }\n\n  absl::optional<uint64_t> connectionID() const override { return connection_id_; }\n\n  TimeSource& time_source_;\n  const SystemTime start_time_;\n  const MonotonicTime start_time_monotonic_;\n\n  absl::optional<MonotonicTime> last_downstream_rx_byte_received;\n  absl::optional<MonotonicTime> first_downstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> last_downstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> final_time_;\n\n  absl::optional<Http::Protocol> protocol_;\n  absl::optional<uint32_t> response_code_;\n  absl::optional<std::string> response_code_details_;\n  absl::optional<std::string> connection_termination_details_;\n  uint64_t response_flags_{};\n  Upstream::HostDescriptionConstSharedPtr upstream_host_{};\n  bool health_check_request_{};\n  const Router::RouteEntry* route_entry_{};\n  envoy::config::core::v3::Metadata metadata_{};\n  FilterStateSharedPtr filter_state_;\n  FilterStateSharedPtr upstream_filter_state_;\n  std::string route_name_;\n\nprivate:\n  StreamInfoImpl(absl::optional<Http::Protocol> protocol, TimeSource& time_source,\n                 FilterStateSharedPtr filter_state)\n      : time_source_(time_source), start_time_(time_source.systemTime()),\n        start_time_monotonic_(time_source.monotonicTime()), protocol_(protocol),\n        filter_state_(std::move(filter_state)),\n        request_id_extension_(Http::RequestIDExtensionFactory::noopInstance()) {}\n\n  uint64_t bytes_received_{};\n  uint64_t bytes_sent_{};\n  Network::Address::InstanceConstSharedPtr upstream_local_address_;\n  Network::Address::InstanceConstSharedPtr downstream_local_address_;\n  Network::Address::InstanceConstSharedPtr downstream_direct_remote_address_;\n  Network::Address::InstanceConstSharedPtr downstream_remote_address_;\n  Ssl::ConnectionInfoConstSharedPtr downstream_ssl_info_;\n  Ssl::ConnectionInfoConstSharedPtr upstream_ssl_info_;\n  std::string requested_server_name_;\n  const Http::RequestHeaderMap* request_headers_{};\n  Http::RequestIDExtensionSharedPtr request_id_extension_;\n  UpstreamTiming upstream_timing_;\n  std::string upstream_transport_failure_reason_;\n  absl::optional<Upstream::ClusterInfoConstSharedPtr> upstream_cluster_info_;\n  absl::optional<uint64_t> connection_id_;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stream_info/uint32_accessor_impl.h",
    "content": "#pragma once\n\n#include \"envoy/stream_info/uint32_accessor.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\n/*\n * A FilterState object that tracks a single uint32_t value.\n */\nclass UInt32AccessorImpl : public UInt32Accessor {\npublic:\n  UInt32AccessorImpl(uint32_t value) : value_(value) {}\n\n  // From FilterState::Object\n  ProtobufTypes::MessagePtr serializeAsProto() const override {\n    auto message = std::make_unique<ProtobufWkt::UInt32Value>();\n    message->set_value(value_);\n    return message;\n  }\n\n  // From UInt32Accessor.\n  void increment() override { value_++; }\n  uint32_t value() const override { return value_; }\n\nprivate:\n  uint32_t value_;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stream_info/utility.cc",
    "content": "#include \"common/stream_info/utility.h\"\n\n#include <string>\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nconst std::string ResponseFlagUtils::NONE = \"-\";\nconst std::string ResponseFlagUtils::DOWNSTREAM_CONNECTION_TERMINATION = \"DC\";\nconst std::string ResponseFlagUtils::FAILED_LOCAL_HEALTH_CHECK = \"LH\";\nconst std::string ResponseFlagUtils::NO_HEALTHY_UPSTREAM = \"UH\";\nconst std::string ResponseFlagUtils::UPSTREAM_REQUEST_TIMEOUT = \"UT\";\nconst std::string ResponseFlagUtils::LOCAL_RESET = \"LR\";\nconst std::string ResponseFlagUtils::UPSTREAM_REMOTE_RESET = \"UR\";\nconst std::string ResponseFlagUtils::UPSTREAM_CONNECTION_FAILURE = \"UF\";\nconst std::string ResponseFlagUtils::UPSTREAM_CONNECTION_TERMINATION = \"UC\";\nconst std::string ResponseFlagUtils::UPSTREAM_OVERFLOW = \"UO\";\nconst std::string ResponseFlagUtils::UPSTREAM_RETRY_LIMIT_EXCEEDED = \"URX\";\nconst std::string ResponseFlagUtils::NO_ROUTE_FOUND = \"NR\";\nconst std::string ResponseFlagUtils::DELAY_INJECTED = \"DI\";\nconst std::string ResponseFlagUtils::FAULT_INJECTED = \"FI\";\nconst std::string ResponseFlagUtils::RATE_LIMITED = \"RL\";\nconst std::string ResponseFlagUtils::UNAUTHORIZED_EXTERNAL_SERVICE = \"UAEX\";\nconst std::string ResponseFlagUtils::RATELIMIT_SERVICE_ERROR = \"RLSE\";\nconst std::string ResponseFlagUtils::STREAM_IDLE_TIMEOUT = \"SI\";\nconst std::string ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS = \"IH\";\nconst std::string ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR = \"DPE\";\nconst std::string ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED = \"UMSDR\";\nconst std::string ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER = \"RFCF\";\nconst std::string ResponseFlagUtils::NO_FILTER_CONFIG_FOUND = \"NFCF\";\nconst std::string ResponseFlagUtils::DURATION_TIMEOUT = \"DT\";\n\nvoid ResponseFlagUtils::appendString(std::string& result, const std::string& append) {\n  if (result.empty()) {\n    result = append;\n  } else {\n    result += \",\" + append;\n  }\n}\n\nconst std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info) {\n  std::string result;\n\n  static_assert(ResponseFlag::LastFlag == 0x400000, \"A flag has been added. Fix this code.\");\n\n  if (stream_info.hasResponseFlag(ResponseFlag::FailedLocalHealthCheck)) {\n    appendString(result, FAILED_LOCAL_HEALTH_CHECK);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::NoHealthyUpstream)) {\n    appendString(result, NO_HEALTHY_UPSTREAM);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UpstreamRequestTimeout)) {\n    appendString(result, UPSTREAM_REQUEST_TIMEOUT);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::LocalReset)) {\n    appendString(result, LOCAL_RESET);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UpstreamRemoteReset)) {\n    appendString(result, UPSTREAM_REMOTE_RESET);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UpstreamConnectionFailure)) {\n    appendString(result, UPSTREAM_CONNECTION_FAILURE);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UpstreamConnectionTermination)) {\n    appendString(result, UPSTREAM_CONNECTION_TERMINATION);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UpstreamOverflow)) {\n    appendString(result, UPSTREAM_OVERFLOW);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::NoRouteFound)) {\n    appendString(result, NO_ROUTE_FOUND);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::DelayInjected)) {\n    appendString(result, DELAY_INJECTED);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::FaultInjected)) {\n    appendString(result, FAULT_INJECTED);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::RateLimited)) {\n    appendString(result, RATE_LIMITED);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UnauthorizedExternalService)) {\n    appendString(result, UNAUTHORIZED_EXTERNAL_SERVICE);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::RateLimitServiceError)) {\n    appendString(result, RATELIMIT_SERVICE_ERROR);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::DownstreamConnectionTermination)) {\n    appendString(result, DOWNSTREAM_CONNECTION_TERMINATION);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UpstreamRetryLimitExceeded)) {\n    appendString(result, UPSTREAM_RETRY_LIMIT_EXCEEDED);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::StreamIdleTimeout)) {\n    appendString(result, STREAM_IDLE_TIMEOUT);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::InvalidEnvoyRequestHeaders)) {\n    appendString(result, INVALID_ENVOY_REQUEST_HEADERS);\n  }\n  if (stream_info.hasResponseFlag(ResponseFlag::DownstreamProtocolError)) {\n    appendString(result, DOWNSTREAM_PROTOCOL_ERROR);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::UpstreamMaxStreamDurationReached)) {\n    appendString(result, UPSTREAM_MAX_STREAM_DURATION_REACHED);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::ResponseFromCacheFilter)) {\n    appendString(result, RESPONSE_FROM_CACHE_FILTER);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::NoFilterConfigFound)) {\n    appendString(result, NO_FILTER_CONFIG_FOUND);\n  }\n\n  if (stream_info.hasResponseFlag(ResponseFlag::DurationTimeout)) {\n    appendString(result, DURATION_TIMEOUT);\n  }\n\n  return result.empty() ? NONE : result;\n}\n\nabsl::optional<ResponseFlag> ResponseFlagUtils::toResponseFlag(const std::string& flag) {\n  static const std::map<std::string, ResponseFlag> map = {\n      {ResponseFlagUtils::FAILED_LOCAL_HEALTH_CHECK, ResponseFlag::FailedLocalHealthCheck},\n      {ResponseFlagUtils::NO_HEALTHY_UPSTREAM, ResponseFlag::NoHealthyUpstream},\n      {ResponseFlagUtils::UPSTREAM_REQUEST_TIMEOUT, ResponseFlag::UpstreamRequestTimeout},\n      {ResponseFlagUtils::LOCAL_RESET, ResponseFlag::LocalReset},\n      {ResponseFlagUtils::UPSTREAM_REMOTE_RESET, ResponseFlag::UpstreamRemoteReset},\n      {ResponseFlagUtils::UPSTREAM_CONNECTION_FAILURE, ResponseFlag::UpstreamConnectionFailure},\n      {ResponseFlagUtils::UPSTREAM_CONNECTION_TERMINATION,\n       ResponseFlag::UpstreamConnectionTermination},\n      {ResponseFlagUtils::UPSTREAM_OVERFLOW, ResponseFlag::UpstreamOverflow},\n      {ResponseFlagUtils::NO_ROUTE_FOUND, ResponseFlag::NoRouteFound},\n      {ResponseFlagUtils::DELAY_INJECTED, ResponseFlag::DelayInjected},\n      {ResponseFlagUtils::FAULT_INJECTED, ResponseFlag::FaultInjected},\n      {ResponseFlagUtils::RATE_LIMITED, ResponseFlag::RateLimited},\n      {ResponseFlagUtils::UNAUTHORIZED_EXTERNAL_SERVICE, ResponseFlag::UnauthorizedExternalService},\n      {ResponseFlagUtils::RATELIMIT_SERVICE_ERROR, ResponseFlag::RateLimitServiceError},\n      {ResponseFlagUtils::DOWNSTREAM_CONNECTION_TERMINATION,\n       ResponseFlag::DownstreamConnectionTermination},\n      {ResponseFlagUtils::UPSTREAM_RETRY_LIMIT_EXCEEDED, ResponseFlag::UpstreamRetryLimitExceeded},\n      {ResponseFlagUtils::STREAM_IDLE_TIMEOUT, ResponseFlag::StreamIdleTimeout},\n      {ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS, ResponseFlag::InvalidEnvoyRequestHeaders},\n      {ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR, ResponseFlag::DownstreamProtocolError},\n      {ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED,\n       ResponseFlag::UpstreamMaxStreamDurationReached},\n      {ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER, ResponseFlag::ResponseFromCacheFilter},\n      {ResponseFlagUtils::NO_FILTER_CONFIG_FOUND, ResponseFlag::NoFilterConfigFound},\n      {ResponseFlagUtils::DURATION_TIMEOUT, ResponseFlag::DurationTimeout},\n  };\n  const auto& it = map.find(flag);\n  if (it != map.end()) {\n    return absl::make_optional<ResponseFlag>(it->second);\n  }\n  return absl::nullopt;\n}\n\nconst std::string&\nUtility::formatDownstreamAddressNoPort(const Network::Address::Instance& address) {\n  if (address.type() == Network::Address::Type::Ip) {\n    return address.ip()->addressAsString();\n  } else {\n    return address.asString();\n  }\n}\n\nconst std::string\nUtility::formatDownstreamAddressJustPort(const Network::Address::Instance& address) {\n  std::string port;\n  if (address.type() == Network::Address::Type::Ip) {\n    port = std::to_string(address.ip()->port());\n  }\n  return port;\n}\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/stream_info/utility.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n\n#include \"envoy/stream_info/stream_info.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\n/**\n * Util class for ResponseFlags.\n */\nclass ResponseFlagUtils {\npublic:\n  static const std::string toShortString(const StreamInfo& stream_info);\n  static absl::optional<ResponseFlag> toResponseFlag(const std::string& response_flag);\n\nprivate:\n  ResponseFlagUtils();\n  static void appendString(std::string& result, const std::string& append);\n\n  const static std::string NONE;\n  const static std::string DOWNSTREAM_CONNECTION_TERMINATION;\n  const static std::string FAILED_LOCAL_HEALTH_CHECK;\n  const static std::string NO_HEALTHY_UPSTREAM;\n  const static std::string UPSTREAM_REQUEST_TIMEOUT;\n  const static std::string LOCAL_RESET;\n  const static std::string UPSTREAM_REMOTE_RESET;\n  const static std::string UPSTREAM_CONNECTION_FAILURE;\n  const static std::string UPSTREAM_CONNECTION_TERMINATION;\n  const static std::string UPSTREAM_OVERFLOW;\n  const static std::string UPSTREAM_RETRY_LIMIT_EXCEEDED;\n  const static std::string NO_ROUTE_FOUND;\n  const static std::string DELAY_INJECTED;\n  const static std::string FAULT_INJECTED;\n  const static std::string RATE_LIMITED;\n  const static std::string UNAUTHORIZED_EXTERNAL_SERVICE;\n  const static std::string RATELIMIT_SERVICE_ERROR;\n  const static std::string STREAM_IDLE_TIMEOUT;\n  const static std::string INVALID_ENVOY_REQUEST_HEADERS;\n  const static std::string DOWNSTREAM_PROTOCOL_ERROR;\n  const static std::string UPSTREAM_MAX_STREAM_DURATION_REACHED;\n  const static std::string RESPONSE_FROM_CACHE_FILTER;\n  const static std::string NO_FILTER_CONFIG_FOUND;\n  const static std::string DURATION_TIMEOUT;\n};\n\n/**\n * Utility class for StreamInfo.\n */\nclass Utility {\npublic:\n  /**\n   * @param address supplies the downstream address.\n   * @return a properly formatted address for logs, header expansion, etc.\n   */\n  static const std::string&\n  formatDownstreamAddressNoPort(const Network::Address::Instance& address);\n\n  /**\n   * @param address supplies the downstream address.\n   * @return a port, extracted from the provided downstream address for logs, header expansion, etc.\n   */\n  static const std::string\n  formatDownstreamAddressJustPort(const Network::Address::Instance& address);\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"conn_pool_lib\",\n    srcs = [\n        \"conn_pool.cc\",\n        \"original_conn_pool.cc\",\n    ],\n    hdrs = [\n        \"conn_pool.h\",\n        \"original_conn_pool.h\",\n    ],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:conn_pool_base_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/upstream:upstream_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/tcp/conn_pool.cc",
    "content": "#include \"common/tcp/conn_pool.h\"\n\n#include <memory>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/stats/timespan_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Tcp {\n\nActiveTcpClient::ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host,\n                                 uint64_t concurrent_stream_limit)\n    : Envoy::ConnectionPool::ActiveClient(parent, host->cluster().maxRequestsPerConnection(),\n                                          concurrent_stream_limit),\n      parent_(parent) {\n  Upstream::Host::CreateConnectionData data = host->createConnection(\n      parent_.dispatcher(), parent_.socketOptions(), parent_.transportSocketOptions());\n  real_host_description_ = data.host_description_;\n  connection_ = std::move(data.connection_);\n  connection_->addConnectionCallbacks(*this);\n  connection_->detectEarlyCloseWhenReadDisabled(false);\n  connection_->addReadFilter(std::make_shared<ConnReadFilter>(*this));\n  connection_->connect();\n}\n\nActiveTcpClient::~ActiveTcpClient() {\n  // Handle the case where deferred delete results in the ActiveClient being destroyed before\n  // TcpConnectionData. Make sure the TcpConnectionData will not refer to this ActiveTcpClient\n  // and handle clean up normally done in clearCallbacks()\n  if (tcp_connection_data_) {\n    ASSERT(state_ == ActiveClient::State::CLOSED);\n    tcp_connection_data_->release();\n    parent_.onStreamClosed(*this, true);\n    parent_.checkForDrained();\n  }\n  parent_.onConnDestroyed();\n}\n\nvoid ActiveTcpClient::clearCallbacks() {\n  if (state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY ||\n      state_ == Envoy::ConnectionPool::ActiveClient::State::DRAINING) {\n    parent_.onConnReleased(*this);\n  }\n  callbacks_ = nullptr;\n  tcp_connection_data_ = nullptr;\n  parent_.onStreamClosed(*this, true);\n  parent_.checkForDrained();\n}\n\nvoid ActiveTcpClient::onEvent(Network::ConnectionEvent event) {\n  Envoy::ConnectionPool::ActiveClient::onEvent(event);\n  // Do not pass the Connected event to any session which registered during onEvent above.\n  // Consumers of connection pool connections assume they are receiving already connected\n  // connections.\n  if (callbacks_ && event != Network::ConnectionEvent::Connected) {\n    callbacks_->onEvent(event);\n    // After receiving a disconnect event, the owner of callbacks_ will likely self-destruct.\n    // Clear the pointer to avoid using it again.\n    callbacks_ = nullptr;\n  }\n}\n\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp/conn_pool.h",
    "content": "#pragma once\n\n#include <list>\n#include <memory>\n\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/timespan.h\"\n#include \"envoy/tcp/conn_pool.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/conn_pool_base.h\"\n#include \"common/network/filter_impl.h\"\n\nnamespace Envoy {\nnamespace Tcp {\n\nclass ConnPoolImpl;\n\nstruct TcpAttachContext : public Envoy::ConnectionPool::AttachContext {\n  TcpAttachContext(Tcp::ConnectionPool::Callbacks* callbacks) : callbacks_(callbacks) {}\n  Tcp::ConnectionPool::Callbacks* callbacks_;\n};\n\nclass TcpPendingStream : public Envoy::ConnectionPool::PendingStream {\npublic:\n  TcpPendingStream(Envoy::ConnectionPool::ConnPoolImplBase& parent, TcpAttachContext& context)\n      : Envoy::ConnectionPool::PendingStream(parent), context_(context) {}\n  Envoy::ConnectionPool::AttachContext& context() override { return context_; }\n\n  TcpAttachContext context_;\n};\n\nclass ActiveTcpClient : public Envoy::ConnectionPool::ActiveClient {\npublic:\n  struct ConnReadFilter : public Network::ReadFilterBaseImpl {\n    ConnReadFilter(ActiveTcpClient& parent) : parent_(parent) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override {\n      parent_.onUpstreamData(data, end_stream);\n      return Network::FilterStatus::StopIteration;\n    }\n    ActiveTcpClient& parent_;\n  };\n\n  // This acts as the bridge between the ActiveTcpClient and an individual TCP connection.\n  class TcpConnectionData : public Envoy::Tcp::ConnectionPool::ConnectionData {\n  public:\n    TcpConnectionData(ActiveTcpClient& parent, Network::ClientConnection& connection)\n        : parent_(&parent), connection_(connection) {\n      parent_->tcp_connection_data_ = this;\n    }\n    ~TcpConnectionData() override {\n      // Generally it is the case that TcpConnectionData will be destroyed before the\n      // ActiveTcpClient. Because ordering on the deferred delete list is not guaranteed in the\n      // case of a disconnect, make sure parent_ is valid before doing clean-up.\n      if (parent_) {\n        parent_->clearCallbacks();\n      }\n    }\n\n    Network::ClientConnection& connection() override { return connection_; }\n    void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) override {\n      parent_->connection_state_ = std::move(state);\n    }\n\n    void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks) override {\n      parent_->callbacks_ = &callbacks;\n    }\n    void release() { parent_ = nullptr; }\n\n  protected:\n    ConnectionPool::ConnectionState* connectionState() override {\n      return parent_->connection_state_.get();\n    }\n\n  private:\n    ActiveTcpClient* parent_;\n    Network::ClientConnection& connection_;\n  };\n\n  ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host,\n                  uint64_t concurrent_stream_limit);\n  ~ActiveTcpClient() override;\n\n  // Override the default's of Envoy::ConnectionPool::ActiveClient for class-specific functions.\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override { callbacks_->onAboveWriteBufferHighWatermark(); }\n  void onBelowWriteBufferLowWatermark() override { callbacks_->onBelowWriteBufferLowWatermark(); }\n\n  void close() override { connection_->close(Network::ConnectionCloseType::NoFlush); }\n  size_t numActiveStreams() const override { return callbacks_ ? 1 : 0; }\n  bool closingWithIncompleteStream() const override { return false; }\n  uint64_t id() const override { return connection_->id(); }\n\n  void onUpstreamData(Buffer::Instance& data, bool end_stream) {\n    if (callbacks_) {\n      callbacks_->onUpstreamData(data, end_stream);\n    } else {\n      close();\n    }\n  }\n  void clearCallbacks();\n\n  ConnPoolImpl& parent_;\n  ConnectionPool::UpstreamCallbacks* callbacks_{};\n  Network::ClientConnectionPtr connection_;\n  ConnectionPool::ConnectionStatePtr connection_state_;\n  TcpConnectionData* tcp_connection_data_{};\n};\n\nclass ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase,\n                     public Tcp::ConnectionPool::Instance {\npublic:\n  ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host,\n               Upstream::ResourcePriority priority,\n               const Network::ConnectionSocket::OptionsSharedPtr& options,\n               Network::TransportSocketOptionsSharedPtr transport_socket_options)\n      : Envoy::ConnectionPool::ConnPoolImplBase(host, priority, dispatcher, options,\n                                                transport_socket_options),\n        upstream_ready_cb_(dispatcher.createSchedulableCallback([this]() {\n          upstream_ready_enabled_ = false;\n          onUpstreamReady();\n        })) {}\n  ~ConnPoolImpl() override { destructAllConnections(); }\n\n  void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); }\n  void drainConnections() override {\n    drainConnectionsImpl();\n    // Legacy behavior for the TCP connection pool marks all connecting clients\n    // as draining.\n    for (auto& connecting_client : connecting_clients_) {\n      if (connecting_client->remaining_streams_ > 1) {\n        uint64_t old_limit = connecting_client->effectiveConcurrentStreamLimit();\n        connecting_client->remaining_streams_ = 1;\n        if (connecting_client->effectiveConcurrentStreamLimit() < old_limit) {\n          connecting_stream_capacity_ -=\n              (old_limit - connecting_client->effectiveConcurrentStreamLimit());\n        }\n      }\n    }\n  }\n\n  void closeConnections() override {\n    for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) {\n      while (!list->empty()) {\n        list->front()->close();\n      }\n    }\n  }\n  ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override {\n    TcpAttachContext context(&callbacks);\n    return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context);\n  }\n  bool maybePrefetch(float prefetch_ratio) override {\n    return Envoy::ConnectionPool::ConnPoolImplBase::maybePrefetch(prefetch_ratio);\n  }\n\n  ConnectionPool::Cancellable*\n  newPendingStream(Envoy::ConnectionPool::AttachContext& context) override {\n    Envoy::ConnectionPool::PendingStreamPtr pending_stream =\n        std::make_unique<TcpPendingStream>(*this, typedContext<TcpAttachContext>(context));\n    LinkedList::moveIntoList(std::move(pending_stream), pending_streams_);\n    return pending_streams_.front().get();\n  }\n\n  Upstream::HostDescriptionConstSharedPtr host() const override {\n    return Envoy::ConnectionPool::ConnPoolImplBase::host();\n  }\n\n  Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override {\n    return std::make_unique<ActiveTcpClient>(*this, Envoy::ConnectionPool::ConnPoolImplBase::host(),\n                                             1);\n  }\n\n  void onPoolReady(Envoy::ConnectionPool::ActiveClient& client,\n                   Envoy::ConnectionPool::AttachContext& context) override {\n    ActiveTcpClient* tcp_client = static_cast<ActiveTcpClient*>(&client);\n    auto* callbacks = typedContext<TcpAttachContext>(context).callbacks_;\n    std::unique_ptr<Envoy::Tcp::ConnectionPool::ConnectionData> connection_data =\n        std::make_unique<ActiveTcpClient::TcpConnectionData>(*tcp_client, *tcp_client->connection_);\n    callbacks->onPoolReady(std::move(connection_data), tcp_client->real_host_description_);\n  }\n\n  void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description,\n                     absl::string_view, ConnectionPool::PoolFailureReason reason,\n                     Envoy::ConnectionPool::AttachContext& context) override {\n    auto* callbacks = typedContext<TcpAttachContext>(context).callbacks_;\n    callbacks->onPoolFailure(reason, host_description);\n  }\n\n  // These two functions exist for testing parity between old and new Tcp Connection Pools.\n  virtual void onConnReleased(Envoy::ConnectionPool::ActiveClient& client) {\n    if (client.state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY) {\n      if (!pending_streams_.empty() && !upstream_ready_enabled_) {\n        upstream_ready_cb_->scheduleCallbackCurrentIteration();\n      }\n    }\n  }\n  virtual void onConnDestroyed() {}\n\nprotected:\n  Event::SchedulableCallbackPtr upstream_ready_cb_;\n  bool upstream_ready_enabled_{};\n};\n\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp/original_conn_pool.cc",
    "content": "#include \"common/tcp/original_conn_pool.h\"\n\n#include <memory>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/stats/timespan_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Tcp {\n\nOriginalConnPoolImpl::OriginalConnPoolImpl(\n    Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host,\n    Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options,\n    Network::TransportSocketOptionsSharedPtr transport_socket_options)\n    : dispatcher_(dispatcher), host_(host), priority_(priority), socket_options_(options),\n      transport_socket_options_(transport_socket_options),\n      upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { onUpstreamReady(); })) {}\n\nOriginalConnPoolImpl::~OriginalConnPoolImpl() {\n  while (!ready_conns_.empty()) {\n    ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  while (!busy_conns_.empty()) {\n    busy_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  while (!pending_conns_.empty()) {\n    pending_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  // Make sure all connections are destroyed before we are destroyed.\n  dispatcher_.clearDeferredDeleteList();\n}\n\nvoid OriginalConnPoolImpl::drainConnections() {\n  while (!ready_conns_.empty()) {\n    ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  // We drain busy and pending connections by manually setting remaining requests to 1. Thus, when\n  // the next response completes the connection will be destroyed.\n  for (const auto& conn : busy_conns_) {\n    conn->remaining_requests_ = 1;\n  }\n\n  for (const auto& conn : pending_conns_) {\n    conn->remaining_requests_ = 1;\n  }\n}\n\nvoid OriginalConnPoolImpl::closeConnections() {\n  while (!ready_conns_.empty()) {\n    ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  while (!busy_conns_.empty()) {\n    busy_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  while (!pending_conns_.empty()) {\n    pending_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n}\n\nvoid OriginalConnPoolImpl::addDrainedCallback(DrainedCb cb) {\n  drained_callbacks_.push_back(cb);\n  checkForDrained();\n}\n\nvoid OriginalConnPoolImpl::assignConnection(ActiveConn& conn,\n                                            ConnectionPool::Callbacks& callbacks) {\n  ASSERT(conn.wrapper_ == nullptr);\n  conn.wrapper_ = std::make_shared<ConnectionWrapper>(conn);\n\n  callbacks.onPoolReady(std::make_unique<ConnectionDataImpl>(conn.wrapper_),\n                        conn.real_host_description_);\n}\n\nvoid OriginalConnPoolImpl::checkForDrained() {\n  if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_conns_.empty() &&\n      pending_conns_.empty()) {\n    while (!ready_conns_.empty()) {\n      ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush);\n    }\n\n    for (const DrainedCb& cb : drained_callbacks_) {\n      cb();\n    }\n  }\n}\n\nvoid OriginalConnPoolImpl::createNewConnection() {\n  ENVOY_LOG(debug, \"creating a new connection\");\n  ActiveConnPtr conn(new ActiveConn(*this));\n  LinkedList::moveIntoList(std::move(conn), pending_conns_);\n}\n\nConnectionPool::Cancellable*\nOriginalConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) {\n  if (!ready_conns_.empty()) {\n    ready_conns_.front()->moveBetweenLists(ready_conns_, busy_conns_);\n    ENVOY_CONN_LOG(debug, \"using existing connection\", *busy_conns_.front()->conn_);\n    assignConnection(*busy_conns_.front(), callbacks);\n    return nullptr;\n  }\n\n  if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) {\n    bool can_create_connection =\n        host_->cluster().resourceManager(priority_).connections().canCreate();\n    if (!can_create_connection) {\n      host_->cluster().stats().upstream_cx_overflow_.inc();\n    }\n\n    // If we have no connections at all, make one no matter what so we don't starve.\n    if ((ready_conns_.empty() && busy_conns_.empty() && pending_conns_.empty()) ||\n        can_create_connection) {\n      createNewConnection();\n    }\n\n    ENVOY_LOG(debug, \"queueing request due to no available connections\");\n    PendingRequestPtr pending_request(new PendingRequest(*this, callbacks));\n    LinkedList::moveIntoList(std::move(pending_request), pending_requests_);\n    return pending_requests_.front().get();\n  } else {\n    ENVOY_LOG(debug, \"max pending requests overflow\");\n    callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, nullptr);\n    host_->cluster().stats().upstream_rq_pending_overflow_.inc();\n    return nullptr;\n  }\n}\n\nvoid OriginalConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    ENVOY_CONN_LOG(debug, \"client disconnected\", *conn.conn_);\n\n    Envoy::Upstream::reportUpstreamCxDestroy(host_, event);\n\n    ActiveConnPtr removed;\n    bool check_for_drained = true;\n    if (conn.wrapper_ != nullptr) {\n      if (!conn.wrapper_->released_) {\n        Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event);\n\n        conn.wrapper_->release(true);\n      }\n\n      removed = conn.removeFromList(busy_conns_);\n    } else if (!conn.connect_timer_) {\n      // The connect timer is destroyed on connect. The lack of a connect timer means that this\n      // connection is idle and in the ready pool.\n      removed = conn.removeFromList(ready_conns_);\n      check_for_drained = false;\n    } else {\n      // The only time this happens is if we actually saw a connect failure.\n      host_->cluster().stats().upstream_cx_connect_fail_.inc();\n      host_->stats().cx_connect_fail_.inc();\n      removed = conn.removeFromList(pending_conns_);\n\n      // Raw connect failures should never happen under normal circumstances. If we have an upstream\n      // that is behaving badly, requests can get stuck here in the pending state. If we see a\n      // connect failure, we purge all pending requests so that calling code can determine what to\n      // do with the request.\n      // NOTE: We move the existing pending requests to a temporary list. This is done so that\n      //       if retry logic submits a new request to the pool, we don't fail it inline.\n      // TODO(lizan): If pool failure due to transport socket, propagate the reason to access log.\n      ConnectionPool::PoolFailureReason reason;\n      if (conn.timed_out_) {\n        reason = ConnectionPool::PoolFailureReason::Timeout;\n      } else if (event == Network::ConnectionEvent::RemoteClose) {\n        reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure;\n      } else {\n        reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure;\n      }\n\n      std::list<PendingRequestPtr> pending_requests_to_purge;\n      pending_requests_to_purge.swap(pending_requests_);\n      while (!pending_requests_to_purge.empty()) {\n        PendingRequestPtr request =\n            pending_requests_to_purge.front()->removeFromList(pending_requests_to_purge);\n        host_->cluster().stats().upstream_rq_pending_failure_eject_.inc();\n        request->callbacks_.onPoolFailure(reason, conn.real_host_description_);\n      }\n    }\n\n    dispatcher_.deferredDelete(std::move(removed));\n\n    // If we have pending requests and we just lost a connection we should make a new one.\n    if (pending_requests_.size() >\n        (ready_conns_.size() + busy_conns_.size() + pending_conns_.size())) {\n      createNewConnection();\n    }\n\n    if (check_for_drained) {\n      checkForDrained();\n    }\n  }\n\n  if (conn.connect_timer_) {\n    conn.connect_timer_->disableTimer();\n    conn.connect_timer_.reset();\n  }\n\n  // Note that the order in this function is important. Concretely, we must destroy the connect\n  // timer before we process an idle connection, because if this results in an immediate\n  // drain/destruction event, we key off of the existence of the connect timer above to determine\n  // whether the connection is in the ready list (connected) or the pending list (failed to\n  // connect).\n  if (event == Network::ConnectionEvent::Connected) {\n    conn.conn_->streamInfo().setDownstreamSslConnection(conn.conn_->ssl());\n    conn_connect_ms_->complete();\n    processIdleConnection(conn, true, false);\n  }\n}\n\nvoid OriginalConnPoolImpl::onPendingRequestCancel(PendingRequest& request,\n                                                  ConnectionPool::CancelPolicy cancel_policy) {\n  ENVOY_LOG(debug, \"canceling pending request\");\n  request.removeFromList(pending_requests_);\n  host_->cluster().stats().upstream_rq_cancelled_.inc();\n\n  // If the cancel requests closure of excess connections and there are more pending connections\n  // than requests, close the most recently created pending connection.\n  if (cancel_policy == ConnectionPool::CancelPolicy::CloseExcess &&\n      pending_requests_.size() < pending_conns_.size()) {\n    ENVOY_LOG(debug, \"canceling pending connection\");\n    pending_conns_.back()->conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  checkForDrained();\n}\n\nvoid OriginalConnPoolImpl::onConnReleased(ActiveConn& conn) {\n  ENVOY_CONN_LOG(debug, \"connection released\", *conn.conn_);\n\n  if (conn.remaining_requests_ > 0 && --conn.remaining_requests_ == 0) {\n    ENVOY_CONN_LOG(debug, \"maximum requests per connection\", *conn.conn_);\n    host_->cluster().stats().upstream_cx_max_requests_.inc();\n\n    conn.conn_->close(Network::ConnectionCloseType::NoFlush);\n  } else {\n    // Upstream connection might be closed right after response is complete. Setting delay=true\n    // here to assign pending requests in next dispatcher loop to handle that case.\n    // https://github.com/envoyproxy/envoy/issues/2715\n    processIdleConnection(conn, false, true);\n  }\n}\n\nvoid OriginalConnPoolImpl::onConnDestroyed(ActiveConn& conn) {\n  ENVOY_CONN_LOG(debug, \"connection destroyed\", *conn.conn_);\n}\n\nvoid OriginalConnPoolImpl::onUpstreamReady() {\n  upstream_ready_enabled_ = false;\n  while (!pending_requests_.empty() && !ready_conns_.empty()) {\n    ActiveConn& conn = *ready_conns_.front();\n    ENVOY_CONN_LOG(debug, \"assigning connection\", *conn.conn_);\n    // There is work to do so bind a connection to the caller and move it to the busy list. Pending\n    // requests are pushed onto the front, so pull from the back.\n    conn.moveBetweenLists(ready_conns_, busy_conns_);\n    assignConnection(conn, pending_requests_.back()->callbacks_);\n    pending_requests_.pop_back();\n  }\n}\n\nvoid OriginalConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_connection,\n                                                 bool delay) {\n  if (conn.wrapper_) {\n    conn.wrapper_->invalidate();\n    conn.wrapper_.reset();\n  }\n\n  // TODO(zuercher): As a future improvement, we may wish to close extra connections when there are\n  // no pending requests rather than moving them to ready_conns_. For conn pool callers that re-use\n  // connections it is possible that a busy connection may be re-assigned to a pending request\n  // while a new connection is pending. The current behavior is to move the pending connection to\n  // the ready list to await a future request. For some protocols, e.g. mysql which has the server\n  // transmit handshake data on connect, it may be desirable to close the connection if no pending\n  // request is available. The CloseExcess flag for cancel is related: if we close pending\n  // connections without requests here it becomes superfluous (instead of closing connections at\n  // cancel time we'd wait until they completed and close them here). Finally, we want to avoid\n  // requiring operators to correct configure clusters to get the necessary pending connection\n  // behavior (e.g. we want to find a way to enable the new behavior without having to configure\n  // it on a cluster).\n\n  if (pending_requests_.empty() || delay) {\n    // There is nothing to service or delayed processing is requested, so just move the connection\n    // into the ready list.\n    ENVOY_CONN_LOG(debug, \"moving to ready\", *conn.conn_);\n    if (new_connection) {\n      conn.moveBetweenLists(pending_conns_, ready_conns_);\n    } else {\n      conn.moveBetweenLists(busy_conns_, ready_conns_);\n    }\n  } else {\n    // There is work to do immediately so bind a request to the caller and move it to the busy list.\n    // Pending requests are pushed onto the front, so pull from the back.\n    ENVOY_CONN_LOG(debug, \"assigning connection\", *conn.conn_);\n    if (new_connection) {\n      conn.moveBetweenLists(pending_conns_, busy_conns_);\n    }\n    assignConnection(conn, pending_requests_.back()->callbacks_);\n    pending_requests_.pop_back();\n  }\n\n  if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) {\n    upstream_ready_enabled_ = true;\n    upstream_ready_cb_->scheduleCallbackCurrentIteration();\n  }\n\n  checkForDrained();\n}\n\nOriginalConnPoolImpl::ConnectionWrapper::ConnectionWrapper(ActiveConn& parent) : parent_(parent) {\n  parent_.parent_.host_->cluster().stats().upstream_rq_total_.inc();\n  parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc();\n  parent_.parent_.host_->stats().rq_total_.inc();\n  parent_.parent_.host_->stats().rq_active_.inc();\n}\n\nNetwork::ClientConnection& OriginalConnPoolImpl::ConnectionWrapper::connection() {\n  ASSERT(conn_valid_);\n  return *parent_.conn_;\n}\n\nvoid OriginalConnPoolImpl::ConnectionWrapper::addUpstreamCallbacks(\n    ConnectionPool::UpstreamCallbacks& cb) {\n  ASSERT(!released_);\n  callbacks_ = &cb;\n}\n\nvoid OriginalConnPoolImpl::ConnectionWrapper::release(bool closed) {\n  // Allow multiple calls: connection close and destruction of ConnectionDataImplPtr will both\n  // result in this call.\n  if (!released_) {\n    released_ = true;\n    callbacks_ = nullptr;\n    if (!closed) {\n      parent_.parent_.onConnReleased(parent_);\n    }\n\n    parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec();\n    parent_.parent_.host_->stats().rq_active_.dec();\n  }\n}\n\nOriginalConnPoolImpl::PendingRequest::PendingRequest(OriginalConnPoolImpl& parent,\n                                                     ConnectionPool::Callbacks& callbacks)\n    : parent_(parent), callbacks_(callbacks) {\n  parent_.host_->cluster().stats().upstream_rq_pending_total_.inc();\n  parent_.host_->cluster().stats().upstream_rq_pending_active_.inc();\n  parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc();\n}\n\nOriginalConnPoolImpl::PendingRequest::~PendingRequest() {\n  parent_.host_->cluster().stats().upstream_rq_pending_active_.dec();\n  parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec();\n}\n\nOriginalConnPoolImpl::ActiveConn::ActiveConn(OriginalConnPoolImpl& parent)\n    : parent_(parent),\n      connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })),\n      remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()), timed_out_(false) {\n\n  parent_.conn_connect_ms_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource());\n\n  Upstream::Host::CreateConnectionData data = parent_.host_->createConnection(\n      parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_);\n  real_host_description_ = data.host_description_;\n\n  conn_ = std::move(data.connection_);\n\n  conn_->detectEarlyCloseWhenReadDisabled(false);\n  conn_->addConnectionCallbacks(*this);\n  conn_->addReadFilter(Network::ReadFilterSharedPtr{new ConnReadFilter(*this)});\n\n  ENVOY_CONN_LOG(debug, \"connecting\", *conn_);\n  conn_->connect();\n\n  parent_.host_->cluster().stats().upstream_cx_total_.inc();\n  parent_.host_->cluster().stats().upstream_cx_active_.inc();\n  parent_.host_->stats().cx_total_.inc();\n  parent_.host_->stats().cx_active_.inc();\n  conn_length_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource());\n  connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout());\n  parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc();\n\n  conn_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_,\n                             parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_,\n                             parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_,\n                             parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_,\n                             &parent_.host_->cluster().stats().bind_errors_, nullptr});\n\n  // We just universally set no delay on connections. Theoretically we might at some point want\n  // to make this configurable.\n  conn_->noDelay(true);\n}\n\nOriginalConnPoolImpl::ActiveConn::~ActiveConn() {\n  if (wrapper_) {\n    wrapper_->invalidate();\n  }\n\n  parent_.host_->cluster().stats().upstream_cx_active_.dec();\n  parent_.host_->stats().cx_active_.dec();\n  conn_length_->complete();\n  parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec();\n\n  parent_.onConnDestroyed(*this);\n}\n\nvoid OriginalConnPoolImpl::ActiveConn::onConnectTimeout() {\n  // We just close the connection at this point. This will result in both a timeout and a connect\n  // failure and will fold into all the normal connect failure logic.\n  ENVOY_CONN_LOG(debug, \"connect timeout\", *conn_);\n  timed_out_ = true;\n  parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc();\n  conn_->close(Network::ConnectionCloseType::NoFlush);\n}\n\nvoid OriginalConnPoolImpl::ActiveConn::onUpstreamData(Buffer::Instance& data, bool end_stream) {\n  if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) {\n    // Delegate to the connection owner.\n    wrapper_->callbacks_->onUpstreamData(data, end_stream);\n  } else {\n    // Unexpected data from upstream, close down the connection.\n    ENVOY_CONN_LOG(debug, \"unexpected data from upstream, closing connection\", *conn_);\n    conn_->close(Network::ConnectionCloseType::NoFlush);\n  }\n}\n\nvoid OriginalConnPoolImpl::ActiveConn::onEvent(Network::ConnectionEvent event) {\n  ConnectionPool::UpstreamCallbacks* cb = nullptr;\n  if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) {\n    cb = wrapper_->callbacks_;\n  }\n\n  // In the event of a close event, we want to update the pool's state before triggering callbacks,\n  // preventing the case where we attempt to return a closed connection to the ready pool.\n  parent_.onConnectionEvent(*this, event);\n\n  if (cb) {\n    cb->onEvent(event);\n  }\n}\n\nvoid OriginalConnPoolImpl::ActiveConn::onAboveWriteBufferHighWatermark() {\n  if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) {\n    wrapper_->callbacks_->onAboveWriteBufferHighWatermark();\n  }\n}\n\nvoid OriginalConnPoolImpl::ActiveConn::onBelowWriteBufferLowWatermark() {\n  if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) {\n    wrapper_->callbacks_->onBelowWriteBufferLowWatermark();\n  }\n}\n\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp/original_conn_pool.h",
    "content": "#pragma once\n\n#include <list>\n#include <memory>\n\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/event/schedulable_cb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/timespan.h\"\n#include \"envoy/tcp/conn_pool.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/network/filter_impl.h\"\n\nnamespace Envoy {\nnamespace Tcp {\n\nclass OriginalConnPoolImpl : Logger::Loggable<Logger::Id::pool>, public ConnectionPool::Instance {\npublic:\n  OriginalConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host,\n                       Upstream::ResourcePriority priority,\n                       const Network::ConnectionSocket::OptionsSharedPtr& options,\n                       Network::TransportSocketOptionsSharedPtr transport_socket_options);\n\n  ~OriginalConnPoolImpl() override;\n\n  // ConnectionPool::Instance\n  void addDrainedCallback(DrainedCb cb) override;\n  void drainConnections() override;\n  void closeConnections() override;\n  ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override;\n  // The old pool does not implement prefetching.\n  bool maybePrefetch(float) override { return false; }\n  Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }\n\nprotected:\n  struct ActiveConn;\n\n  struct ConnectionWrapper {\n    ConnectionWrapper(ActiveConn& parent);\n\n    Network::ClientConnection& connection();\n    void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks);\n    void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) {\n      parent_.setConnectionState(std::move(state));\n    };\n    ConnectionPool::ConnectionState* connectionState() { return parent_.connectionState(); }\n\n    void release(bool closed);\n\n    void invalidate() { conn_valid_ = false; }\n\n    ActiveConn& parent_;\n    ConnectionPool::UpstreamCallbacks* callbacks_{};\n    bool released_{false};\n    bool conn_valid_{true};\n  };\n\n  using ConnectionWrapperSharedPtr = std::shared_ptr<ConnectionWrapper>;\n\n  struct ConnectionDataImpl : public ConnectionPool::ConnectionData {\n    ConnectionDataImpl(ConnectionWrapperSharedPtr wrapper) : wrapper_(std::move(wrapper)) {}\n    ~ConnectionDataImpl() override { wrapper_->release(false); }\n\n    // ConnectionPool::ConnectionData\n    Network::ClientConnection& connection() override { return wrapper_->connection(); }\n    void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks) override {\n      wrapper_->addUpstreamCallbacks(callbacks);\n    };\n    void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) override {\n      wrapper_->setConnectionState(std::move(state));\n    }\n    ConnectionPool::ConnectionState* connectionState() override {\n      return wrapper_->connectionState();\n    }\n\n    ConnectionWrapperSharedPtr wrapper_;\n  };\n\n  struct ConnReadFilter : public Network::ReadFilterBaseImpl {\n    ConnReadFilter(ActiveConn& parent) : parent_(parent) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override {\n      parent_.onUpstreamData(data, end_stream);\n      return Network::FilterStatus::StopIteration;\n    }\n\n    ActiveConn& parent_;\n  };\n\n  struct ActiveConn : LinkedObject<ActiveConn>,\n                      public Network::ConnectionCallbacks,\n                      public Event::DeferredDeletable {\n    ActiveConn(OriginalConnPoolImpl& parent);\n    ~ActiveConn() override;\n\n    void onConnectTimeout();\n    void onUpstreamData(Buffer::Instance& data, bool end_stream);\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override;\n    void onBelowWriteBufferLowWatermark() override;\n\n    void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) {\n      conn_state_ = std::move(state);\n    }\n    ConnectionPool::ConnectionState* connectionState() { return conn_state_.get(); }\n\n    OriginalConnPoolImpl& parent_;\n    Upstream::HostDescriptionConstSharedPtr real_host_description_;\n    ConnectionWrapperSharedPtr wrapper_;\n    Network::ClientConnectionPtr conn_;\n    ConnectionPool::ConnectionStatePtr conn_state_;\n    Event::TimerPtr connect_timer_;\n    Stats::TimespanPtr conn_length_;\n    uint64_t remaining_requests_;\n    bool timed_out_;\n  };\n\n  using ActiveConnPtr = std::unique_ptr<ActiveConn>;\n\n  struct PendingRequest : LinkedObject<PendingRequest>, public ConnectionPool::Cancellable {\n    PendingRequest(OriginalConnPoolImpl& parent, ConnectionPool::Callbacks& callbacks);\n    ~PendingRequest() override;\n\n    // ConnectionPool::Cancellable\n    void cancel(ConnectionPool::CancelPolicy cancel_policy) override {\n      parent_.onPendingRequestCancel(*this, cancel_policy);\n    }\n\n    OriginalConnPoolImpl& parent_;\n    ConnectionPool::Callbacks& callbacks_;\n  };\n\n  using PendingRequestPtr = std::unique_ptr<PendingRequest>;\n\n  void assignConnection(ActiveConn& conn, ConnectionPool::Callbacks& callbacks);\n  void createNewConnection();\n  void onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event);\n  void onPendingRequestCancel(PendingRequest& request, ConnectionPool::CancelPolicy cancel_policy);\n  virtual void onConnReleased(ActiveConn& conn);\n  virtual void onConnDestroyed(ActiveConn& conn);\n  void onUpstreamReady();\n  void processIdleConnection(ActiveConn& conn, bool new_connection, bool delay);\n  void checkForDrained();\n\n  Event::Dispatcher& dispatcher_;\n  Upstream::HostConstSharedPtr host_;\n  Upstream::ResourcePriority priority_;\n  const Network::ConnectionSocket::OptionsSharedPtr socket_options_;\n  Network::TransportSocketOptionsSharedPtr transport_socket_options_;\n\n  std::list<ActiveConnPtr> pending_conns_; // conns awaiting connected event\n  std::list<ActiveConnPtr> ready_conns_;   // conns ready for assignment\n  std::list<ActiveConnPtr> busy_conns_;    // conns assigned\n  std::list<PendingRequestPtr> pending_requests_;\n  std::list<DrainedCb> drained_callbacks_;\n  Stats::TimespanPtr conn_connect_ms_;\n  Event::SchedulableCallbackPtr upstream_ready_cb_;\n  bool upstream_ready_enabled_{false};\n};\n\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"tcp_proxy\",\n    srcs = [\n        \"tcp_proxy.cc\",\n        \"upstream.cc\",\n    ],\n    hdrs = [\n        \"tcp_proxy.h\",\n        \"upstream.h\",\n    ],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/network:application_protocol_lib\",\n        \"//source/common/network:cidr_range_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/network:hash_policy_lib\",\n        \"//source/common/network:proxy_protocol_filter_state_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/network:upstream_server_name_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/router:metadatamatchcriteria_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/tcp_proxy/tcp_proxy.cc",
    "content": "#include \"common/tcp_proxy/tcp_proxy.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/network/application_protocol.h\"\n#include \"common/network/proxy_protocol_filter_state.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/network/upstream_server_name.h\"\n#include \"common/router/metadatamatchcriteria_impl.h\"\n\nnamespace Envoy {\nnamespace TcpProxy {\n\nconst std::string& PerConnectionCluster::key() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.tcp_proxy.cluster\");\n}\n\nConfig::RouteImpl::RouteImpl(\n    const Config& parent,\n    const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute&\n        config)\n    : parent_(parent) {\n  cluster_name_ = config.cluster();\n\n  source_ips_ = Network::Address::IpList(config.source_ip_list());\n  destination_ips_ = Network::Address::IpList(config.destination_ip_list());\n\n  if (!config.source_ports().empty()) {\n    Network::Utility::parsePortRangeList(config.source_ports(), source_port_ranges_);\n  }\n\n  if (!config.destination_ports().empty()) {\n    Network::Utility::parsePortRangeList(config.destination_ports(), destination_port_ranges_);\n  }\n}\n\nbool Config::RouteImpl::matches(Network::Connection& connection) const {\n  if (!source_port_ranges_.empty() &&\n      !Network::Utility::portInRangeList(*connection.remoteAddress(), source_port_ranges_)) {\n    return false;\n  }\n\n  if (!source_ips_.empty() && !source_ips_.contains(*connection.remoteAddress())) {\n    return false;\n  }\n\n  if (!destination_port_ranges_.empty() &&\n      !Network::Utility::portInRangeList(*connection.localAddress(), destination_port_ranges_)) {\n    return false;\n  }\n\n  if (!destination_ips_.empty() && !destination_ips_.contains(*connection.localAddress())) {\n    return false;\n  }\n\n  // if we made it past all checks, the route matches\n  return true;\n}\n\nConfig::WeightedClusterEntry::WeightedClusterEntry(\n    const Config& parent, const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::\n                              WeightedCluster::ClusterWeight& config)\n    : parent_(parent), cluster_name_(config.name()), cluster_weight_(config.weight()) {\n  if (config.has_metadata_match()) {\n    const auto filter_it = config.metadata_match().filter_metadata().find(\n        Envoy::Config::MetadataFilters::get().ENVOY_LB);\n    if (filter_it != config.metadata_match().filter_metadata().end()) {\n      if (parent.cluster_metadata_match_criteria_) {\n        metadata_match_criteria_ =\n            parent.cluster_metadata_match_criteria_->mergeMatchCriteria(filter_it->second);\n      } else {\n        metadata_match_criteria_ =\n            std::make_unique<Router::MetadataMatchCriteriaImpl>(filter_it->second);\n      }\n    }\n  }\n}\n\nConfig::SharedConfig::SharedConfig(\n    const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config,\n    Server::Configuration::FactoryContext& context)\n    : stats_scope_(context.scope().createScope(fmt::format(\"tcp.{}\", config.stat_prefix()))),\n      stats_(generateStats(*stats_scope_)) {\n  if (config.has_idle_timeout()) {\n    const uint64_t timeout = DurationUtil::durationToMilliseconds(config.idle_timeout());\n    if (timeout > 0) {\n      idle_timeout_ = std::chrono::milliseconds(timeout);\n    }\n  } else {\n    idle_timeout_ = std::chrono::hours(1);\n  }\n  if (config.has_tunneling_config()) {\n    tunneling_config_ = config.tunneling_config();\n  }\n  if (config.has_max_downstream_connection_duration()) {\n    const uint64_t connection_duration =\n        DurationUtil::durationToMilliseconds(config.max_downstream_connection_duration());\n    max_downstream_connection_duration_ = std::chrono::milliseconds(connection_duration);\n  }\n}\n\nConfig::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config,\n               Server::Configuration::FactoryContext& context)\n    : max_connect_attempts_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_connect_attempts, 1)),\n      upstream_drain_manager_slot_(context.threadLocal().allocateSlot()),\n      shared_config_(std::make_shared<SharedConfig>(config, context)),\n      random_generator_(context.api().randomGenerator()) {\n\n  upstream_drain_manager_slot_->set([](Event::Dispatcher&) {\n    ThreadLocal::ThreadLocalObjectSharedPtr drain_manager =\n        std::make_shared<UpstreamDrainManager>();\n    return drain_manager;\n  });\n\n  if (config.has_hidden_envoy_deprecated_deprecated_v1()) {\n    for (const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute&\n             route_desc : config.hidden_envoy_deprecated_deprecated_v1().routes()) {\n      routes_.emplace_back(std::make_shared<const RouteImpl>(*this, route_desc));\n    }\n  }\n\n  if (!config.cluster().empty()) {\n    envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute\n        default_route;\n    default_route.set_cluster(config.cluster());\n    routes_.emplace_back(std::make_shared<const RouteImpl>(*this, default_route));\n  }\n\n  if (config.has_metadata_match()) {\n    const auto& filter_metadata = config.metadata_match().filter_metadata();\n\n    const auto filter_it = filter_metadata.find(Envoy::Config::MetadataFilters::get().ENVOY_LB);\n\n    if (filter_it != filter_metadata.end()) {\n      cluster_metadata_match_criteria_ =\n          std::make_unique<Router::MetadataMatchCriteriaImpl>(filter_it->second);\n    }\n  }\n\n  // Weighted clusters will be enabled only if both the default cluster and\n  // deprecated v1 routes are absent.\n  if (routes_.empty() && config.has_weighted_clusters()) {\n    total_cluster_weight_ = 0;\n    for (const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::WeightedCluster::\n             ClusterWeight& cluster_desc : config.weighted_clusters().clusters()) {\n      WeightedClusterEntryConstSharedPtr cluster_entry(\n          std::make_shared<const WeightedClusterEntry>(*this, cluster_desc));\n      weighted_clusters_.emplace_back(std::move(cluster_entry));\n      total_cluster_weight_ += weighted_clusters_.back()->clusterWeight();\n    }\n  }\n\n  for (const envoy::config::accesslog::v3::AccessLog& log_config : config.access_log()) {\n    access_logs_.emplace_back(AccessLog::AccessLogFactory::fromProto(log_config, context));\n  }\n\n  if (!config.hash_policy().empty()) {\n    hash_policy_ = std::make_unique<Network::HashPolicyImpl>(config.hash_policy());\n  }\n}\n\nRouteConstSharedPtr Config::getRegularRouteFromEntries(Network::Connection& connection) {\n  // First check if the per-connection state to see if we need to route to a pre-selected cluster\n  if (connection.streamInfo().filterState()->hasData<PerConnectionCluster>(\n          PerConnectionCluster::key())) {\n    const PerConnectionCluster& per_connection_cluster =\n        connection.streamInfo().filterState()->getDataReadOnly<PerConnectionCluster>(\n            PerConnectionCluster::key());\n\n    envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute\n        per_connection_route;\n    per_connection_route.set_cluster(per_connection_cluster.value());\n    return std::make_shared<const RouteImpl>(*this, per_connection_route);\n  }\n\n  for (const RouteConstSharedPtr& route : routes_) {\n    if (route->matches(connection)) {\n      return route;\n    }\n  }\n\n  // no match, no more routes to try\n  return nullptr;\n}\n\nRouteConstSharedPtr Config::getRouteFromEntries(Network::Connection& connection) {\n  if (weighted_clusters_.empty()) {\n    return getRegularRouteFromEntries(connection);\n  }\n  return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_,\n                                          random_generator_.random(), false);\n}\n\nUpstreamDrainManager& Config::drainManager() {\n  return upstream_drain_manager_slot_->getTyped<UpstreamDrainManager>();\n}\n\nFilter::Filter(ConfigSharedPtr config, Upstream::ClusterManager& cluster_manager)\n    : config_(config), cluster_manager_(cluster_manager), downstream_callbacks_(*this),\n      upstream_callbacks_(new UpstreamCallbacks(this)) {\n  ASSERT(config != nullptr);\n}\n\nFilter::~Filter() {\n  for (const auto& access_log : config_->accessLogs()) {\n    access_log->log(nullptr, nullptr, nullptr, getStreamInfo());\n  }\n\n  ASSERT(upstream_handle_ == nullptr);\n  ASSERT(upstream_ == nullptr);\n}\n\nTcpProxyStats Config::SharedConfig::generateStats(Stats::Scope& scope) {\n  return {ALL_TCP_PROXY_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))};\n}\n\nvoid Filter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  initialize(callbacks, true);\n}\n\nvoid Filter::initialize(Network::ReadFilterCallbacks& callbacks, bool set_connection_stats) {\n  read_callbacks_ = &callbacks;\n  ENVOY_CONN_LOG(debug, \"new tcp proxy session\", read_callbacks_->connection());\n\n  read_callbacks_->connection().addConnectionCallbacks(downstream_callbacks_);\n  read_callbacks_->connection().enableHalfClose(true);\n\n  // Need to disable reads so that we don't write to an upstream that might fail\n  // in onData(). This will get re-enabled when the upstream connection is\n  // established.\n  read_callbacks_->connection().readDisable(true);\n\n  config_->stats().downstream_cx_total_.inc();\n  if (set_connection_stats) {\n    read_callbacks_->connection().setConnectionStats(\n        {config_->stats().downstream_cx_rx_bytes_total_,\n         config_->stats().downstream_cx_rx_bytes_buffered_,\n         config_->stats().downstream_cx_tx_bytes_total_,\n         config_->stats().downstream_cx_tx_bytes_buffered_, nullptr, nullptr});\n  }\n}\n\nvoid Filter::readDisableUpstream(bool disable) {\n  bool success = false;\n  if (upstream_) {\n    success = upstream_->readDisable(disable);\n  }\n  if (!success) {\n    return;\n  }\n  if (disable) {\n    read_callbacks_->upstreamHost()\n        ->cluster()\n        .stats()\n        .upstream_flow_control_paused_reading_total_.inc();\n  } else {\n    read_callbacks_->upstreamHost()\n        ->cluster()\n        .stats()\n        .upstream_flow_control_resumed_reading_total_.inc();\n  }\n}\n\nvoid Filter::readDisableDownstream(bool disable) {\n  if (read_callbacks_->connection().state() != Network::Connection::State::Open) {\n    // During idle timeouts, we close both upstream and downstream with NoFlush.\n    // Envoy still does a best-effort flush which can case readDisableDownstream to be called\n    // despite the downstream connection being closed.\n    return;\n  }\n  read_callbacks_->connection().readDisable(disable);\n\n  if (disable) {\n    config_->stats().downstream_flow_control_paused_reading_total_.inc();\n  } else {\n    config_->stats().downstream_flow_control_resumed_reading_total_.inc();\n  }\n}\n\nStreamInfo::StreamInfo& Filter::getStreamInfo() {\n  return read_callbacks_->connection().streamInfo();\n}\n\nvoid Filter::DownstreamCallbacks::onAboveWriteBufferHighWatermark() {\n  ASSERT(!on_high_watermark_called_);\n  on_high_watermark_called_ = true;\n  // If downstream has too much data buffered, stop reading on the upstream connection.\n  parent_.readDisableUpstream(true);\n}\n\nvoid Filter::DownstreamCallbacks::onBelowWriteBufferLowWatermark() {\n  ASSERT(on_high_watermark_called_);\n  on_high_watermark_called_ = false;\n  // The downstream buffer has been drained. Resume reading from upstream.\n  parent_.readDisableUpstream(false);\n}\n\nvoid Filter::UpstreamCallbacks::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::Connected) {\n    return;\n  }\n  if (drainer_ == nullptr) {\n    parent_->onUpstreamEvent(event);\n  } else {\n    drainer_->onEvent(event);\n  }\n}\n\nvoid Filter::UpstreamCallbacks::onAboveWriteBufferHighWatermark() {\n  ASSERT(!on_high_watermark_called_);\n  on_high_watermark_called_ = true;\n\n  if (parent_ != nullptr) {\n    // There's too much data buffered in the upstream write buffer, so stop reading.\n    parent_->readDisableDownstream(true);\n  }\n}\n\nvoid Filter::UpstreamCallbacks::onBelowWriteBufferLowWatermark() {\n  ASSERT(on_high_watermark_called_);\n  on_high_watermark_called_ = false;\n\n  if (parent_ != nullptr) {\n    // The upstream write buffer is drained. Resume reading.\n    parent_->readDisableDownstream(false);\n  }\n}\n\nvoid Filter::UpstreamCallbacks::onUpstreamData(Buffer::Instance& data, bool end_stream) {\n  if (parent_) {\n    parent_->onUpstreamData(data, end_stream);\n  } else {\n    drainer_->onData(data, end_stream);\n  }\n}\n\nvoid Filter::UpstreamCallbacks::onBytesSent() {\n  if (drainer_ == nullptr) {\n    parent_->resetIdleTimer();\n  } else {\n    drainer_->onBytesSent();\n  }\n}\n\nvoid Filter::UpstreamCallbacks::onIdleTimeout() {\n  if (drainer_ == nullptr) {\n    parent_->onIdleTimeout();\n  } else {\n    drainer_->onIdleTimeout();\n  }\n}\n\nvoid Filter::UpstreamCallbacks::drain(Drainer& drainer) {\n  ASSERT(drainer_ == nullptr); // This should only get set once.\n  drainer_ = &drainer;\n  parent_ = nullptr;\n}\n\nNetwork::FilterStatus Filter::initializeUpstreamConnection() {\n  ASSERT(upstream_ == nullptr);\n\n  route_ = pickRoute();\n\n  const std::string& cluster_name = route_ ? route_->clusterName() : EMPTY_STRING;\n\n  Upstream::ThreadLocalCluster* thread_local_cluster = cluster_manager_.get(cluster_name);\n\n  if (thread_local_cluster) {\n    ENVOY_CONN_LOG(debug, \"Creating connection to cluster {}\", read_callbacks_->connection(),\n                   cluster_name);\n  } else {\n    config_->stats().downstream_cx_no_route_.inc();\n    getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n    onInitFailure(UpstreamFailureReason::NoRoute);\n    return Network::FilterStatus::StopIteration;\n  }\n\n  Upstream::ClusterInfoConstSharedPtr cluster = thread_local_cluster->info();\n  getStreamInfo().setUpstreamClusterInfo(cluster);\n\n  // Check this here because the TCP conn pool will queue our request waiting for a connection that\n  // will never be released.\n  if (!cluster->resourceManager(Upstream::ResourcePriority::Default).connections().canCreate()) {\n    getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);\n    cluster->stats().upstream_cx_overflow_.inc();\n    onInitFailure(UpstreamFailureReason::ResourceLimitExceeded);\n    return Network::FilterStatus::StopIteration;\n  }\n\n  const uint32_t max_connect_attempts = config_->maxConnectAttempts();\n  if (connect_attempts_ >= max_connect_attempts) {\n    getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded);\n    cluster->stats().upstream_cx_connect_attempts_exceeded_.inc();\n    onInitFailure(UpstreamFailureReason::ConnectFailed);\n    return Network::FilterStatus::StopIteration;\n  }\n\n  if (downstreamConnection()) {\n    if (!read_callbacks_->connection()\n             .streamInfo()\n             .filterState()\n             ->hasData<Network::ProxyProtocolFilterState>(\n                 Network::ProxyProtocolFilterState::key())) {\n      read_callbacks_->connection().streamInfo().filterState()->setData(\n          Network::ProxyProtocolFilterState::key(),\n          std::make_unique<Network::ProxyProtocolFilterState>(Network::ProxyProtocolData{\n              downstreamConnection()->remoteAddress(), downstreamConnection()->localAddress()}),\n          StreamInfo::FilterState::StateType::ReadOnly,\n          StreamInfo::FilterState::LifeSpan::Connection);\n    }\n    transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState(\n        downstreamConnection()->streamInfo().filterState());\n  }\n\n  if (!maybeTunnel(cluster_name)) {\n    // Either cluster is unknown or there are no healthy hosts. tcpConnPoolForCluster() increments\n    // cluster->stats().upstream_cx_none_healthy in the latter case.\n    getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream);\n    onInitFailure(UpstreamFailureReason::NoHealthyUpstream);\n  }\n  return Network::FilterStatus::StopIteration;\n}\n\nbool Filter::maybeTunnel(const std::string& cluster_name) {\n  if (!config_->tunnelingConfig()) {\n    Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster(\n        cluster_name, Upstream::ResourcePriority::Default, this);\n    if (conn_pool) {\n      connecting_ = true;\n      connect_attempts_++;\n\n      // Given this function is reentrant, make sure we only reset the upstream_handle_ if given a\n      // valid connection handle. If newConnection fails inline it may result in attempting to\n      // select a new host, and a recursive call to initializeUpstreamConnection. In this case the\n      // first call to newConnection will return null and the inner call will persist.\n      Tcp::ConnectionPool::Cancellable* handle = conn_pool->newConnection(*this);\n      if (handle) {\n        ASSERT(upstream_handle_.get() == nullptr);\n        upstream_handle_ = std::make_shared<TcpConnectionHandle>(handle);\n      }\n      // Because we never return open connections to the pool, this either has a handle waiting on\n      // connection completion, or onPoolFailure has been invoked. Either way, stop iteration.\n      return true;\n    }\n  } else {\n    auto* cluster = cluster_manager_.get(cluster_name);\n    if (!cluster) {\n      return false;\n    }\n    // TODO(snowp): Ideally we should prevent this from being configured, but that's tricky to get\n    // right since whether a cluster is invalid depends on both the tcp_proxy config + cluster\n    // config.\n    if ((cluster->info()->features() & Upstream::ClusterInfo::Features::HTTP2) == 0) {\n      ENVOY_LOG(error, \"Attempted to tunnel over HTTP/1.1, this is not supported. Set \"\n                       \"http2_protocol_options on the cluster.\");\n      return false;\n    }\n    Http::ConnectionPool::Instance* conn_pool = cluster_manager_.httpConnPoolForCluster(\n        cluster_name, Upstream::ResourcePriority::Default, absl::nullopt, this);\n    if (conn_pool) {\n      upstream_ = std::make_unique<HttpUpstream>(*upstream_callbacks_,\n                                                 config_->tunnelingConfig()->hostname());\n      HttpUpstream* http_upstream = static_cast<HttpUpstream*>(upstream_.get());\n      Http::ConnectionPool::Cancellable* cancellable =\n          conn_pool->newStream(http_upstream->responseDecoder(), *this);\n      if (cancellable) {\n        ASSERT(upstream_handle_.get() == nullptr);\n        upstream_handle_ = std::make_shared<HttpConnectionHandle>(cancellable);\n      }\n      return true;\n    }\n  }\n\n  return false;\n}\nvoid Filter::onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                           Upstream::HostDescriptionConstSharedPtr host) {\n  upstream_handle_.reset();\n\n  read_callbacks_->upstreamHost(host);\n  getStreamInfo().onUpstreamHostSelected(host);\n\n  switch (reason) {\n  case ConnectionPool::PoolFailureReason::Overflow:\n  case ConnectionPool::PoolFailureReason::LocalConnectionFailure:\n    upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n    break;\n\n  case ConnectionPool::PoolFailureReason::RemoteConnectionFailure:\n    upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose);\n    break;\n\n  case ConnectionPool::PoolFailureReason::Timeout:\n    onConnectTimeout();\n    break;\n\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid Filter::onPoolReadyBase(Upstream::HostDescriptionConstSharedPtr& host,\n                             const Network::Address::InstanceConstSharedPtr& local_address,\n                             Ssl::ConnectionInfoConstSharedPtr ssl_info) {\n  upstream_handle_.reset();\n  read_callbacks_->upstreamHost(host);\n  getStreamInfo().onUpstreamHostSelected(host);\n  getStreamInfo().setUpstreamLocalAddress(local_address);\n  getStreamInfo().setUpstreamSslConnection(ssl_info);\n  onUpstreamConnection();\n  read_callbacks_->continueReading();\n}\n\nvoid Filter::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,\n                         Upstream::HostDescriptionConstSharedPtr host) {\n  Tcp::ConnectionPool::ConnectionData* latched_data = conn_data.get();\n\n  upstream_ = std::make_unique<TcpUpstream>(std::move(conn_data), *upstream_callbacks_);\n  onPoolReadyBase(host, latched_data->connection().localAddress(),\n                  latched_data->connection().streamInfo().downstreamSslConnection());\n  read_callbacks_->connection().streamInfo().setUpstreamFilterState(\n      latched_data->connection().streamInfo().filterState());\n}\n\nvoid Filter::onPoolFailure(ConnectionPool::PoolFailureReason failure, absl::string_view,\n                           Upstream::HostDescriptionConstSharedPtr host) {\n  onPoolFailure(failure, host);\n}\n\nvoid Filter::onPoolReady(Http::RequestEncoder& request_encoder,\n                         Upstream::HostDescriptionConstSharedPtr host,\n                         const StreamInfo::StreamInfo& info) {\n  Http::RequestEncoder* latched_encoder = &request_encoder;\n  HttpUpstream* http_upstream = static_cast<HttpUpstream*>(upstream_.get());\n  http_upstream->setRequestEncoder(request_encoder,\n                                   host->transportSocketFactory().implementsSecureTransport());\n\n  onPoolReadyBase(host, latched_encoder->getStream().connectionLocalAddress(),\n                  info.downstreamSslConnection());\n}\n\nconst Router::MetadataMatchCriteria* Filter::metadataMatchCriteria() {\n  const Router::MetadataMatchCriteria* route_criteria =\n      (route_ != nullptr) ? route_->metadataMatchCriteria() : nullptr;\n\n  const auto& request_metadata = getStreamInfo().dynamicMetadata().filter_metadata();\n  const auto filter_it = request_metadata.find(Envoy::Config::MetadataFilters::get().ENVOY_LB);\n\n  if (filter_it != request_metadata.end() && route_criteria != nullptr) {\n    metadata_match_criteria_ = route_criteria->mergeMatchCriteria(filter_it->second);\n    return metadata_match_criteria_.get();\n  } else if (filter_it != request_metadata.end()) {\n    metadata_match_criteria_ =\n        std::make_unique<Router::MetadataMatchCriteriaImpl>(filter_it->second);\n    return metadata_match_criteria_.get();\n  } else {\n    return route_criteria;\n  }\n}\n\nvoid Filter::onConnectTimeout() {\n  ENVOY_CONN_LOG(debug, \"connect timeout\", read_callbacks_->connection());\n  read_callbacks_->upstreamHost()->outlierDetector().putResult(\n      Upstream::Outlier::Result::LocalOriginTimeout);\n  getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure);\n\n  // Raise LocalClose, which will trigger a reconnect if needed/configured.\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n}\n\nNetwork::FilterStatus Filter::onData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_CONN_LOG(trace, \"downstream connection received {} bytes, end_stream={}\",\n                 read_callbacks_->connection(), data.length(), end_stream);\n  if (upstream_) {\n    upstream_->encodeData(data, end_stream);\n  }\n  // The upstream should consume all of the data.\n  // Before there is an upstream the connection should be readDisabled. If the upstream is\n  // destroyed, there should be no further reads as well.\n  ASSERT(0 == data.length());\n  resetIdleTimer(); // TODO(ggreenway) PERF: do we need to reset timer on both send and receive?\n  return Network::FilterStatus::StopIteration;\n}\n\nNetwork::FilterStatus Filter::onNewConnection() {\n  if (config_->maxDownstreamConnectionDuration()) {\n    connection_duration_timer_ = read_callbacks_->connection().dispatcher().createTimer(\n        [this]() -> void { onMaxDownstreamConnectionDuration(); });\n    connection_duration_timer_->enableTimer(config_->maxDownstreamConnectionDuration().value());\n  }\n  return initializeUpstreamConnection();\n}\n\nvoid Filter::onDownstreamEvent(Network::ConnectionEvent event) {\n  if (upstream_) {\n    Tcp::ConnectionPool::ConnectionDataPtr conn_data(upstream_->onDownstreamEvent(event));\n    if (conn_data != nullptr &&\n        conn_data->connection().state() != Network::Connection::State::Closed) {\n      config_->drainManager().add(config_->sharedConfig(), std::move(conn_data),\n                                  std::move(upstream_callbacks_), std::move(idle_timer_),\n                                  read_callbacks_->upstreamHost());\n    }\n    if (event != Network::ConnectionEvent::Connected) {\n      upstream_.reset();\n      disableIdleTimer();\n    }\n  }\n  if (upstream_handle_) {\n    if (event == Network::ConnectionEvent::LocalClose ||\n        event == Network::ConnectionEvent::RemoteClose) {\n      // Cancel the conn pool request and close any excess pending requests.\n      upstream_handle_->cancel();\n      upstream_handle_.reset();\n    }\n  }\n}\n\nvoid Filter::onUpstreamData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_CONN_LOG(trace, \"upstream connection received {} bytes, end_stream={}\",\n                 read_callbacks_->connection(), data.length(), end_stream);\n  read_callbacks_->connection().write(data, end_stream);\n  ASSERT(0 == data.length());\n  resetIdleTimer(); // TODO(ggreenway) PERF: do we need to reset timer on both send and receive?\n}\n\nvoid Filter::onUpstreamEvent(Network::ConnectionEvent event) {\n  // Update the connecting flag before processing the event because we may start a new connection\n  // attempt in initializeUpstreamConnection.\n  bool connecting = connecting_;\n  connecting_ = false;\n\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    upstream_.reset();\n    disableIdleTimer();\n\n    if (connecting) {\n      if (event == Network::ConnectionEvent::RemoteClose) {\n        getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure);\n        read_callbacks_->upstreamHost()->outlierDetector().putResult(\n            Upstream::Outlier::Result::LocalOriginConnectFailed);\n      }\n\n      initializeUpstreamConnection();\n    } else {\n      if (read_callbacks_->connection().state() == Network::Connection::State::Open) {\n        read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n      }\n    }\n  }\n}\n\nvoid Filter::onUpstreamConnection() {\n  connecting_ = false;\n  // Re-enable downstream reads now that the upstream connection is established\n  // so we have a place to send downstream data to.\n  read_callbacks_->connection().readDisable(false);\n\n  read_callbacks_->upstreamHost()->outlierDetector().putResult(\n      Upstream::Outlier::Result::LocalOriginConnectSuccessFinal);\n\n  getStreamInfo().setRequestedServerName(read_callbacks_->connection().requestedServerName());\n  ENVOY_LOG(debug, \"TCP:onUpstreamEvent(), requestedServerName: {}\",\n            getStreamInfo().requestedServerName());\n\n  if (config_->idleTimeout()) {\n    // The idle_timer_ can be moved to a Drainer, so related callbacks call into\n    // the UpstreamCallbacks, which has the same lifetime as the timer, and can dispatch\n    // the call to either TcpProxy or to Drainer, depending on the current state.\n    idle_timer_ = read_callbacks_->connection().dispatcher().createTimer(\n        [upstream_callbacks = upstream_callbacks_]() { upstream_callbacks->onIdleTimeout(); });\n    resetIdleTimer();\n    read_callbacks_->connection().addBytesSentCallback([this](uint64_t) { resetIdleTimer(); });\n    if (upstream_) {\n      upstream_->addBytesSentCallback([upstream_callbacks = upstream_callbacks_](uint64_t) {\n        upstream_callbacks->onBytesSent();\n      });\n    }\n  }\n}\n\nvoid Filter::onIdleTimeout() {\n  ENVOY_CONN_LOG(debug, \"Session timed out\", read_callbacks_->connection());\n  config_->stats().idle_timeout_.inc();\n\n  // This results in also closing the upstream connection.\n  read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n}\n\nvoid Filter::onMaxDownstreamConnectionDuration() {\n  ENVOY_CONN_LOG(debug, \"max connection duration reached\", read_callbacks_->connection());\n  getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::DurationTimeout);\n  config_->stats().max_downstream_connection_duration_.inc();\n  read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n}\n\nvoid Filter::resetIdleTimer() {\n  if (idle_timer_ != nullptr) {\n    ASSERT(config_->idleTimeout());\n    idle_timer_->enableTimer(config_->idleTimeout().value());\n  }\n}\n\nvoid Filter::disableIdleTimer() {\n  if (idle_timer_ != nullptr) {\n    idle_timer_->disableTimer();\n    idle_timer_.reset();\n  }\n}\n\nUpstreamDrainManager::~UpstreamDrainManager() {\n  // If connections aren't closed before they are destructed an ASSERT fires,\n  // so cancel all pending drains, which causes the connections to be closed.\n  while (!drainers_.empty()) {\n    auto begin = drainers_.begin();\n    Drainer* key = begin->first;\n    begin->second->cancelDrain();\n\n    // cancelDrain() should cause that drainer to be removed from drainers_.\n    // ASSERT so that we don't end up in an infinite loop.\n    ASSERT(drainers_.find(key) == drainers_.end());\n  }\n}\n\nvoid UpstreamDrainManager::add(const Config::SharedConfigSharedPtr& config,\n                               Tcp::ConnectionPool::ConnectionDataPtr&& upstream_conn_data,\n                               const std::shared_ptr<Filter::UpstreamCallbacks>& callbacks,\n                               Event::TimerPtr&& idle_timer,\n                               const Upstream::HostDescriptionConstSharedPtr& upstream_host) {\n  DrainerPtr drainer(new Drainer(*this, config, callbacks, std::move(upstream_conn_data),\n                                 std::move(idle_timer), upstream_host));\n  callbacks->drain(*drainer);\n\n  // Use temporary to ensure we get the pointer before we move it out of drainer\n  Drainer* ptr = drainer.get();\n  drainers_[ptr] = std::move(drainer);\n}\n\nvoid UpstreamDrainManager::remove(Drainer& drainer, Event::Dispatcher& dispatcher) {\n  auto it = drainers_.find(&drainer);\n  ASSERT(it != drainers_.end());\n  dispatcher.deferredDelete(std::move(it->second));\n  drainers_.erase(it);\n}\n\nDrainer::Drainer(UpstreamDrainManager& parent, const Config::SharedConfigSharedPtr& config,\n                 const std::shared_ptr<Filter::UpstreamCallbacks>& callbacks,\n                 Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, Event::TimerPtr&& idle_timer,\n                 const Upstream::HostDescriptionConstSharedPtr& upstream_host)\n    : parent_(parent), callbacks_(callbacks), upstream_conn_data_(std::move(conn_data)),\n      timer_(std::move(idle_timer)), upstream_host_(upstream_host), config_(config) {\n  config_->stats().upstream_flush_total_.inc();\n  config_->stats().upstream_flush_active_.inc();\n}\n\nvoid Drainer::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    if (timer_ != nullptr) {\n      timer_->disableTimer();\n    }\n    config_->stats().upstream_flush_active_.dec();\n    parent_.remove(*this, upstream_conn_data_->connection().dispatcher());\n  }\n}\n\nvoid Drainer::onData(Buffer::Instance& data, bool) {\n  if (data.length() > 0) {\n    // There is no downstream connection to send any data to, but the upstream\n    // sent some data. Try to behave similar to what the kernel would do\n    // when it receives data on a connection where the application has closed\n    // the socket or ::shutdown(fd, SHUT_RD), and close/reset the connection.\n    cancelDrain();\n  }\n}\n\nvoid Drainer::onIdleTimeout() {\n  config_->stats().idle_timeout_.inc();\n  cancelDrain();\n}\n\nvoid Drainer::onBytesSent() {\n  if (timer_ != nullptr) {\n    timer_->enableTimer(config_->idleTimeout().value());\n  }\n}\n\nvoid Drainer::cancelDrain() {\n  // This sends onEvent(LocalClose).\n  upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush);\n}\n\n} // namespace TcpProxy\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp_proxy/tcp_proxy.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/timespan.h\"\n#include \"envoy/stream_info/filter_state.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/network/cidr_range.h\"\n#include \"common/network/filter_impl.h\"\n#include \"common/network/hash_policy.h\"\n#include \"common/network/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n#include \"common/tcp_proxy/upstream.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace TcpProxy {\n\n/**\n * All tcp proxy stats. @see stats_macros.h\n */\n#define ALL_TCP_PROXY_STATS(COUNTER, GAUGE)                                                        \\\n  COUNTER(downstream_cx_no_route)                                                                  \\\n  COUNTER(downstream_cx_rx_bytes_total)                                                            \\\n  COUNTER(downstream_cx_total)                                                                     \\\n  COUNTER(downstream_cx_tx_bytes_total)                                                            \\\n  COUNTER(downstream_flow_control_paused_reading_total)                                            \\\n  COUNTER(downstream_flow_control_resumed_reading_total)                                           \\\n  COUNTER(idle_timeout)                                                                            \\\n  COUNTER(max_downstream_connection_duration)                                                      \\\n  COUNTER(upstream_flush_total)                                                                    \\\n  GAUGE(downstream_cx_rx_bytes_buffered, Accumulate)                                               \\\n  GAUGE(downstream_cx_tx_bytes_buffered, Accumulate)                                               \\\n  GAUGE(upstream_flush_active, Accumulate)\n\n/**\n * Struct definition for all tcp proxy stats. @see stats_macros.h\n */\nstruct TcpProxyStats {\n  ALL_TCP_PROXY_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\nclass Drainer;\nclass UpstreamDrainManager;\n\n/**\n * Route is an individual resolved route for a connection.\n */\nclass Route {\npublic:\n  virtual ~Route() = default;\n\n  /**\n   * Check whether this route matches a given connection.\n   * @param connection supplies the connection to test against.\n   * @return bool true if this route matches a given connection.\n   */\n  virtual bool matches(Network::Connection& connection) const PURE;\n\n  /**\n   * @return const std::string& the upstream cluster that owns the route.\n   */\n  virtual const std::string& clusterName() const PURE;\n\n  /**\n   * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when\n   * selecting an upstream host\n   */\n  virtual const Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE;\n};\n\nusing RouteConstSharedPtr = std::shared_ptr<const Route>;\nusing TunnelingConfig =\n    envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy_TunnelingConfig;\n/**\n * Filter configuration.\n *\n * This configuration holds a TLS slot, and therefore it must be destructed\n * on the main thread.\n */\nclass Config {\npublic:\n  /**\n   * Configuration that can be shared and have an arbitrary lifetime safely.\n   */\n  class SharedConfig {\n  public:\n    SharedConfig(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config,\n                 Server::Configuration::FactoryContext& context);\n    const TcpProxyStats& stats() { return stats_; }\n    const absl::optional<std::chrono::milliseconds>& idleTimeout() { return idle_timeout_; }\n    const absl::optional<TunnelingConfig> tunnelingConfig() { return tunneling_config_; }\n    const absl::optional<std::chrono::milliseconds>& maxDownstreamConnectinDuration() const {\n      return max_downstream_connection_duration_;\n    }\n\n  private:\n    static TcpProxyStats generateStats(Stats::Scope& scope);\n\n    // Hold a Scope for the lifetime of the configuration because connections in\n    // the UpstreamDrainManager can live longer than the listener.\n    const Stats::ScopePtr stats_scope_;\n\n    const TcpProxyStats stats_;\n    absl::optional<std::chrono::milliseconds> idle_timeout_;\n    absl::optional<TunnelingConfig> tunneling_config_;\n    absl::optional<std::chrono::milliseconds> max_downstream_connection_duration_;\n  };\n\n  using SharedConfigSharedPtr = std::shared_ptr<SharedConfig>;\n\n  Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config,\n         Server::Configuration::FactoryContext& context);\n\n  /**\n   * Find out which cluster an upstream connection should be opened to based on the\n   * parameters of a downstream connection.\n   * @param connection supplies the parameters of the downstream connection for\n   * which the proxy needs to open the corresponding upstream.\n   * @return the route to be used for the upstream connection.\n   * If no route applies, returns nullptr.\n   */\n  RouteConstSharedPtr getRouteFromEntries(Network::Connection& connection);\n  RouteConstSharedPtr getRegularRouteFromEntries(Network::Connection& connection);\n\n  const TcpProxyStats& stats() { return shared_config_->stats(); }\n  const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() { return access_logs_; }\n  uint32_t maxConnectAttempts() const { return max_connect_attempts_; }\n  const absl::optional<std::chrono::milliseconds>& idleTimeout() {\n    return shared_config_->idleTimeout();\n  }\n  const absl::optional<std::chrono::milliseconds>& maxDownstreamConnectionDuration() const {\n    return shared_config_->maxDownstreamConnectinDuration();\n  }\n  const absl::optional<TunnelingConfig> tunnelingConfig() {\n    return shared_config_->tunnelingConfig();\n  }\n  UpstreamDrainManager& drainManager();\n  SharedConfigSharedPtr sharedConfig() { return shared_config_; }\n  const Router::MetadataMatchCriteria* metadataMatchCriteria() const {\n    return cluster_metadata_match_criteria_.get();\n  }\n  const Network::HashPolicy* hashPolicy() { return hash_policy_.get(); }\n\nprivate:\n  struct RouteImpl : public Route {\n    RouteImpl(\n        const Config& parent,\n        const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute&\n            config);\n\n    // Route\n    bool matches(Network::Connection& connection) const override;\n    const std::string& clusterName() const override { return cluster_name_; }\n    const Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n      return parent_.metadataMatchCriteria();\n    }\n\n    const Config& parent_;\n    Network::Address::IpList source_ips_;\n    Network::PortRangeList source_port_ranges_;\n    Network::Address::IpList destination_ips_;\n    Network::PortRangeList destination_port_ranges_;\n    std::string cluster_name_;\n  };\n\n  class WeightedClusterEntry : public Route {\n  public:\n    WeightedClusterEntry(const Config& parent,\n                         const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::\n                             WeightedCluster::ClusterWeight& config);\n\n    uint64_t clusterWeight() const { return cluster_weight_; }\n\n    // Route\n    bool matches(Network::Connection&) const override { return false; }\n    const std::string& clusterName() const override { return cluster_name_; }\n    const Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n      if (metadata_match_criteria_) {\n        return metadata_match_criteria_.get();\n      }\n      return parent_.metadataMatchCriteria();\n    }\n\n  private:\n    const Config& parent_;\n    const std::string cluster_name_;\n    const uint64_t cluster_weight_;\n    Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n  };\n  using WeightedClusterEntryConstSharedPtr = std::shared_ptr<const WeightedClusterEntry>;\n\n  std::vector<RouteConstSharedPtr> routes_;\n  std::vector<WeightedClusterEntryConstSharedPtr> weighted_clusters_;\n  uint64_t total_cluster_weight_;\n  std::vector<AccessLog::InstanceSharedPtr> access_logs_;\n  const uint32_t max_connect_attempts_;\n  ThreadLocal::SlotPtr upstream_drain_manager_slot_;\n  SharedConfigSharedPtr shared_config_;\n  std::unique_ptr<const Router::MetadataMatchCriteria> cluster_metadata_match_criteria_;\n  Random::RandomGenerator& random_generator_;\n  std::unique_ptr<const Network::HashPolicyImpl> hash_policy_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * Per-connection TCP Proxy Cluster configuration.\n */\nclass PerConnectionCluster : public StreamInfo::FilterState::Object {\npublic:\n  PerConnectionCluster(absl::string_view cluster) : cluster_(cluster) {}\n  const std::string& value() const { return cluster_; }\n  static const std::string& key();\n\nprivate:\n  const std::string cluster_;\n};\n\n/**\n * An implementation of a TCP (L3/L4) proxy. This filter will instantiate a new outgoing TCP\n * connection using the defined load balancing proxy for the configured cluster. All data will\n * be proxied back and forth between the two connections.\n */\nclass Filter : public Network::ReadFilter,\n               public Upstream::LoadBalancerContextBase,\n               Tcp::ConnectionPool::Callbacks,\n               public Http::ConnectionPool::Callbacks,\n               protected Logger::Loggable<Logger::Id::filter> {\npublic:\n  Filter(ConfigSharedPtr config, Upstream::ClusterManager& cluster_manager);\n  ~Filter() override;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n\n  // Tcp::ConnectionPool::Callbacks\n  void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                     Upstream::HostDescriptionConstSharedPtr host) override;\n  void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,\n                   Upstream::HostDescriptionConstSharedPtr host) override;\n\n  // Http::ConnectionPool::Callbacks,\n  void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                     absl::string_view transport_failure_reason,\n                     Upstream::HostDescriptionConstSharedPtr host) override;\n  void onPoolReady(Http::RequestEncoder& request_encoder,\n                   Upstream::HostDescriptionConstSharedPtr host,\n                   const StreamInfo::StreamInfo& info) override;\n\n  void onPoolReadyBase(Upstream::HostDescriptionConstSharedPtr& host,\n                       const Network::Address::InstanceConstSharedPtr& local_address,\n                       Ssl::ConnectionInfoConstSharedPtr ssl_info);\n\n  // Upstream::LoadBalancerContext\n  const Router::MetadataMatchCriteria* metadataMatchCriteria() override;\n  absl::optional<uint64_t> computeHashKey() override {\n    auto hash_policy = config_->hashPolicy();\n    if (hash_policy) {\n      return hash_policy->generateHash(downstreamConnection()->remoteAddress().get(),\n                                       downstreamConnection()->localAddress().get());\n    }\n\n    return {};\n  }\n\n  const Network::Connection* downstreamConnection() const override {\n    return &read_callbacks_->connection();\n  }\n\n  Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override {\n    return transport_socket_options_;\n  }\n\n  // These two functions allow enabling/disabling reads on the upstream and downstream connections.\n  // They are called by the Downstream/Upstream Watermark callbacks to limit buffering.\n  void readDisableUpstream(bool disable);\n  void readDisableDownstream(bool disable);\n\n  struct UpstreamCallbacks : public Tcp::ConnectionPool::UpstreamCallbacks {\n    UpstreamCallbacks(Filter* parent) : parent_(parent) {}\n\n    // Tcp::ConnectionPool::UpstreamCallbacks\n    void onUpstreamData(Buffer::Instance& data, bool end_stream) override;\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override;\n    void onBelowWriteBufferLowWatermark() override;\n\n    void onBytesSent();\n    void onIdleTimeout();\n    void drain(Drainer& drainer);\n\n    // Either parent_ or drainer_ will be non-NULL, but never both. This could be\n    // logically be represented as a union, but saving one pointer of memory is\n    // outweighed by more type safety/better error handling.\n    //\n    // Parent starts out as non-NULL. If the downstream connection is closed while\n    // the upstream connection still has buffered data to flush, drainer_ becomes\n    // non-NULL and parent_ is set to NULL.\n    Filter* parent_{};\n    Drainer* drainer_{};\n\n    bool on_high_watermark_called_{false};\n  };\n\n  StreamInfo::StreamInfo& getStreamInfo();\n\nprotected:\n  struct DownstreamCallbacks : public Network::ConnectionCallbacks {\n    DownstreamCallbacks(Filter& parent) : parent_(parent) {}\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override { parent_.onDownstreamEvent(event); }\n    void onAboveWriteBufferHighWatermark() override;\n    void onBelowWriteBufferLowWatermark() override;\n\n    Filter& parent_;\n    bool on_high_watermark_called_{false};\n  };\n\n  enum class UpstreamFailureReason {\n    ConnectFailed,\n    NoHealthyUpstream,\n    ResourceLimitExceeded,\n    NoRoute,\n  };\n\n  // Callbacks for different error and success states during connection establishment\n  virtual RouteConstSharedPtr pickRoute() {\n    return config_->getRouteFromEntries(read_callbacks_->connection());\n  }\n\n  virtual void onInitFailure(UpstreamFailureReason) {\n    read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  void initialize(Network::ReadFilterCallbacks& callbacks, bool set_connection_stats);\n  Network::FilterStatus initializeUpstreamConnection();\n  bool maybeTunnel(const std::string& cluster_name);\n  void onConnectTimeout();\n  void onDownstreamEvent(Network::ConnectionEvent event);\n  void onUpstreamData(Buffer::Instance& data, bool end_stream);\n  void onUpstreamEvent(Network::ConnectionEvent event);\n  void onUpstreamConnection();\n  void onIdleTimeout();\n  void resetIdleTimer();\n  void disableIdleTimer();\n  void onMaxDownstreamConnectionDuration();\n\n  const ConfigSharedPtr config_;\n  Upstream::ClusterManager& cluster_manager_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n\n  DownstreamCallbacks downstream_callbacks_;\n  Event::TimerPtr idle_timer_;\n  Event::TimerPtr connection_duration_timer_;\n\n  std::shared_ptr<ConnectionHandle> upstream_handle_;\n  std::shared_ptr<UpstreamCallbacks> upstream_callbacks_; // shared_ptr required for passing as a\n                                                          // read filter.\n  std::unique_ptr<GenericUpstream> upstream_;\n  RouteConstSharedPtr route_;\n  Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n  Network::TransportSocketOptionsSharedPtr transport_socket_options_;\n  uint32_t connect_attempts_{};\n  bool connecting_{};\n};\n\n// This class deals with an upstream connection that needs to finish flushing, when the downstream\n// connection has been closed. The TcpProxy is destroyed when the downstream connection is closed,\n// so handling the upstream connection here allows it to finish draining or timeout.\nclass Drainer : public Event::DeferredDeletable {\npublic:\n  Drainer(UpstreamDrainManager& parent, const Config::SharedConfigSharedPtr& config,\n          const std::shared_ptr<Filter::UpstreamCallbacks>& callbacks,\n          Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, Event::TimerPtr&& idle_timer,\n          const Upstream::HostDescriptionConstSharedPtr& upstream_host);\n\n  void onEvent(Network::ConnectionEvent event);\n  void onData(Buffer::Instance& data, bool end_stream);\n  void onIdleTimeout();\n  void onBytesSent();\n  void cancelDrain();\n\nprivate:\n  UpstreamDrainManager& parent_;\n  std::shared_ptr<Filter::UpstreamCallbacks> callbacks_;\n  Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_;\n  Event::TimerPtr timer_;\n  Upstream::HostDescriptionConstSharedPtr upstream_host_;\n  Config::SharedConfigSharedPtr config_;\n};\n\nusing DrainerPtr = std::unique_ptr<Drainer>;\n\nclass UpstreamDrainManager : public ThreadLocal::ThreadLocalObject {\npublic:\n  ~UpstreamDrainManager() override;\n  void add(const Config::SharedConfigSharedPtr& config,\n           Tcp::ConnectionPool::ConnectionDataPtr&& upstream_conn_data,\n           const std::shared_ptr<Filter::UpstreamCallbacks>& callbacks,\n           Event::TimerPtr&& idle_timer,\n           const Upstream::HostDescriptionConstSharedPtr& upstream_host);\n  void remove(Drainer& drainer, Event::Dispatcher& dispatcher);\n\nprivate:\n  // This must be a map instead of set because there is no way to move elements\n  // out of a set, and these elements get passed to deferredDelete() instead of\n  // being deleted in-place. The key and value will always be equal.\n  absl::node_hash_map<Drainer*, DrainerPtr> drainers_;\n};\n\n} // namespace TcpProxy\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp_proxy/upstream.cc",
    "content": "#include \"common/tcp_proxy/upstream.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace TcpProxy {\n\nTcpUpstream::TcpUpstream(Tcp::ConnectionPool::ConnectionDataPtr&& data,\n                         Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks)\n    : upstream_conn_data_(std::move(data)) {\n  Network::ClientConnection& connection = upstream_conn_data_->connection();\n  connection.enableHalfClose(true);\n  upstream_conn_data_->addUpstreamCallbacks(upstream_callbacks);\n}\n\nbool TcpUpstream::readDisable(bool disable) {\n  if (upstream_conn_data_ == nullptr ||\n      upstream_conn_data_->connection().state() != Network::Connection::State::Open) {\n    // Because we flush write downstream, we can have a case where upstream has already disconnected\n    // and we are waiting to flush. If we had a watermark event during this time we should no\n    // longer touch the upstream connection.\n    return false;\n  }\n\n  upstream_conn_data_->connection().readDisable(disable);\n  return true;\n}\n\nvoid TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) {\n  upstream_conn_data_->connection().write(data, end_stream);\n}\n\nvoid TcpUpstream::addBytesSentCallback(Network::Connection::BytesSentCb cb) {\n  upstream_conn_data_->connection().addBytesSentCallback(cb);\n}\n\nTcp::ConnectionPool::ConnectionData*\nTcpUpstream::onDownstreamEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose) {\n    // The close call may result in this object being deleted. Latch the\n    // connection locally so it can be returned for potential draining.\n    auto* conn_data = upstream_conn_data_.release();\n    conn_data->connection().close(Network::ConnectionCloseType::FlushWrite);\n    return conn_data;\n  } else if (event == Network::ConnectionEvent::LocalClose) {\n    upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush);\n  }\n  return nullptr;\n}\n\nHttpUpstream::HttpUpstream(Tcp::ConnectionPool::UpstreamCallbacks& callbacks,\n                           const std::string& hostname)\n    : upstream_callbacks_(callbacks), response_decoder_(*this), hostname_(hostname) {}\n\nHttpUpstream::~HttpUpstream() { resetEncoder(Network::ConnectionEvent::LocalClose); }\n\nbool HttpUpstream::isValidBytestreamResponse(const Http::ResponseHeaderMap& headers) {\n  if (Http::Utility::getResponseStatus(headers) != 200) {\n    return false;\n  }\n  return true;\n}\n\nbool HttpUpstream::readDisable(bool disable) {\n  if (!request_encoder_) {\n    return false;\n  }\n  request_encoder_->getStream().readDisable(disable);\n  return true;\n}\n\nvoid HttpUpstream::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (!request_encoder_) {\n    return;\n  }\n  request_encoder_->encodeData(data, end_stream);\n  if (end_stream) {\n    doneWriting();\n  }\n}\n\nvoid HttpUpstream::addBytesSentCallback(Network::Connection::BytesSentCb) {\n  // The HTTP tunneling mode does not tickle the idle timeout when bytes are\n  // sent to the kernel.\n  // This can be implemented if any user cares about the difference in time\n  // between it being sent to the HTTP/2 stack and out to the kernel.\n}\n\nTcp::ConnectionPool::ConnectionData*\nHttpUpstream::onDownstreamEvent(Network::ConnectionEvent event) {\n  if (event != Network::ConnectionEvent::Connected) {\n    resetEncoder(Network::ConnectionEvent::LocalClose, false);\n  }\n  return nullptr;\n}\n\nvoid HttpUpstream::onResetStream(Http::StreamResetReason, absl::string_view) {\n  read_half_closed_ = true;\n  write_half_closed_ = true;\n  resetEncoder(Network::ConnectionEvent::LocalClose);\n}\n\nvoid HttpUpstream::onAboveWriteBufferHighWatermark() {\n  upstream_callbacks_.onAboveWriteBufferHighWatermark();\n}\n\nvoid HttpUpstream::onBelowWriteBufferLowWatermark() {\n  upstream_callbacks_.onBelowWriteBufferLowWatermark();\n}\n\nvoid HttpUpstream::setRequestEncoder(Http::RequestEncoder& request_encoder, bool is_ssl) {\n  request_encoder_ = &request_encoder;\n  request_encoder_->getStream().addCallbacks(*this);\n  const std::string& scheme =\n      is_ssl ? Http::Headers::get().SchemeValues.Https : Http::Headers::get().SchemeValues.Http;\n  auto headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(\n      {{Http::Headers::get().Method, \"CONNECT\"},\n       {Http::Headers::get().Protocol, Http::Headers::get().ProtocolValues.Bytestream},\n       {Http::Headers::get().Scheme, scheme},\n       {Http::Headers::get().Path, \"/\"},\n       {Http::Headers::get().Host, hostname_}});\n  request_encoder_->encodeHeaders(*headers, false);\n}\n\nvoid HttpUpstream::resetEncoder(Network::ConnectionEvent event, bool inform_downstream) {\n  if (!request_encoder_) {\n    return;\n  }\n  request_encoder_->getStream().removeCallbacks(*this);\n  if (!write_half_closed_ || !read_half_closed_) {\n    request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset);\n  }\n  request_encoder_ = nullptr;\n  if (inform_downstream) {\n    upstream_callbacks_.onEvent(event);\n  }\n}\n\nvoid HttpUpstream::doneReading() {\n  read_half_closed_ = true;\n  if (write_half_closed_) {\n    resetEncoder(Network::ConnectionEvent::LocalClose);\n  }\n}\n\nvoid HttpUpstream::doneWriting() {\n  write_half_closed_ = true;\n  if (read_half_closed_) {\n    resetEncoder(Network::ConnectionEvent::LocalClose);\n  }\n}\n\n} // namespace TcpProxy\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tcp_proxy/upstream.h",
    "content": "#pragma once\n\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/tcp/conn_pool.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace TcpProxy {\n\n// Interface for a generic ConnectionHandle, which can wrap a TcpConnectionHandle\n// or an HttpConnectionHandle\nclass ConnectionHandle {\npublic:\n  virtual ~ConnectionHandle() = default;\n  // Cancel the conn pool request and close any excess pending requests.\n  virtual void cancel() PURE;\n};\n\n// An implementation of ConnectionHandle which works with the Tcp::ConnectionPool.\nclass TcpConnectionHandle : public ConnectionHandle {\npublic:\n  TcpConnectionHandle(Tcp::ConnectionPool::Cancellable* handle) : upstream_handle_(handle) {}\n\n  void cancel() override {\n    upstream_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n\nprivate:\n  Tcp::ConnectionPool::Cancellable* upstream_handle_{};\n};\n\nclass HttpConnectionHandle : public ConnectionHandle {\npublic:\n  HttpConnectionHandle(Http::ConnectionPool::Cancellable* handle) : upstream_http_handle_(handle) {}\n  void cancel() override {\n    upstream_http_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default);\n  }\n\nprivate:\n  Http::ConnectionPool::Cancellable* upstream_http_handle_{};\n};\n\n// Interface for a generic Upstream, which can communicate with a TCP or HTTP\n// upstream.\nclass GenericUpstream {\npublic:\n  virtual ~GenericUpstream() = default;\n  // Calls readDisable on the upstream connection. Returns false if readDisable could not be\n  // performed (e.g. if the connection is closed)\n  virtual bool readDisable(bool disable) PURE;\n  // Encodes data upstream.\n  virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE;\n  // Adds a callback to be called when the data is sent to the kernel.\n  virtual void addBytesSentCallback(Network::Connection::BytesSentCb cb) PURE;\n  // Called when a Network::ConnectionEvent is received on the downstream connection, to allow the\n  // upstream to do any cleanup.\n  virtual Tcp::ConnectionPool::ConnectionData*\n  onDownstreamEvent(Network::ConnectionEvent event) PURE;\n};\n\nclass TcpUpstream : public GenericUpstream {\npublic:\n  TcpUpstream(Tcp::ConnectionPool::ConnectionDataPtr&& data,\n              Tcp::ConnectionPool::UpstreamCallbacks& callbacks);\n\n  // GenericUpstream\n  bool readDisable(bool disable) override;\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void addBytesSentCallback(Network::Connection::BytesSentCb cb) override;\n  Tcp::ConnectionPool::ConnectionData* onDownstreamEvent(Network::ConnectionEvent event) override;\n\nprivate:\n  Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_;\n};\n\nclass HttpUpstream : public GenericUpstream, Http::StreamCallbacks {\npublic:\n  HttpUpstream(Tcp::ConnectionPool::UpstreamCallbacks& callbacks, const std::string& hostname);\n  ~HttpUpstream() override;\n\n  static bool isValidBytestreamResponse(const Http::ResponseHeaderMap& headers);\n\n  void doneReading();\n  void doneWriting();\n\n  // GenericUpstream\n  bool readDisable(bool disable) override;\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void addBytesSentCallback(Network::Connection::BytesSentCb cb) override;\n  Tcp::ConnectionPool::ConnectionData* onDownstreamEvent(Network::ConnectionEvent event) override;\n\n  // Http::StreamCallbacks\n  void onResetStream(Http::StreamResetReason reason,\n                     absl::string_view transport_failure_reason) override;\n  void onAboveWriteBufferHighWatermark() override;\n  void onBelowWriteBufferLowWatermark() override;\n\n  void setRequestEncoder(Http::RequestEncoder& request_encoder, bool is_ssl);\n\n  Http::ResponseDecoder& responseDecoder() { return response_decoder_; }\n\nprivate:\n  void resetEncoder(Network::ConnectionEvent event, bool inform_downstream = true);\n\n  class DecoderShim : public Http::ResponseDecoder {\n  public:\n    DecoderShim(HttpUpstream& parent) : parent_(parent) {}\n    // Http::ResponseDecoder\n    void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&&) override {}\n    void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override {\n      if (!isValidBytestreamResponse(*headers) || end_stream) {\n        parent_.resetEncoder(Network::ConnectionEvent::LocalClose);\n      }\n    }\n    void decodeData(Buffer::Instance& data, bool end_stream) override {\n      parent_.upstream_callbacks_.onUpstreamData(data, end_stream);\n      if (end_stream) {\n        parent_.doneReading();\n      }\n    }\n    void decodeTrailers(Http::ResponseTrailerMapPtr&&) override {}\n    void decodeMetadata(Http::MetadataMapPtr&&) override {}\n\n  private:\n    HttpUpstream& parent_;\n  };\n\n  Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks_;\n  DecoderShim response_decoder_;\n  Http::RequestEncoder* request_encoder_{};\n  const std::string hostname_;\n  bool read_half_closed_{};\n  bool write_half_closed_{};\n};\n\n} // namespace TcpProxy\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/thread_local/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"thread_local_lib\",\n    srcs = [\"thread_local_impl.cc\"],\n    hdrs = [\"thread_local_impl.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:stl_helpers\",\n    ],\n)\n"
  },
  {
    "path": "source/common/thread_local/thread_local_impl.cc",
    "content": "#include \"common/thread_local/thread_local_impl.h\"\n\n#include <algorithm>\n#include <atomic>\n#include <cstdint>\n#include <list>\n\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/stl_helpers.h\"\n\nnamespace Envoy {\nnamespace ThreadLocal {\n\nthread_local InstanceImpl::ThreadLocalData InstanceImpl::thread_local_data_;\n\nInstanceImpl::~InstanceImpl() {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n  ASSERT(shutdown_);\n  thread_local_data_.data_.clear();\n}\n\nSlotPtr InstanceImpl::allocateSlot() {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n  ASSERT(!shutdown_);\n\n  if (free_slot_indexes_.empty()) {\n    SlotPtr slot = std::make_unique<SlotImpl>(*this, slots_.size());\n    slots_.push_back(slot.get());\n    return slot;\n  }\n  const uint32_t idx = free_slot_indexes_.front();\n  free_slot_indexes_.pop_front();\n  ASSERT(idx < slots_.size());\n  SlotPtr slot = std::make_unique<SlotImpl>(*this, idx);\n  slots_[idx] = slot.get();\n  return slot;\n}\n\nInstanceImpl::SlotImpl::SlotImpl(InstanceImpl& parent, uint32_t index)\n    : parent_(parent), index_(index), still_alive_guard_(std::make_shared<bool>(true)) {}\n\nEvent::PostCb InstanceImpl::SlotImpl::wrapCallback(Event::PostCb&& cb) {\n  // See the header file comments for still_alive_guard_ for the purpose of this capture and the\n  // expired check below.\n  return [still_alive_guard = std::weak_ptr<bool>(still_alive_guard_), cb] {\n    if (!still_alive_guard.expired()) {\n      cb();\n    }\n  };\n}\n\nbool InstanceImpl::SlotImpl::currentThreadRegisteredWorker(uint32_t index) {\n  return thread_local_data_.data_.size() > index;\n}\n\nbool InstanceImpl::SlotImpl::currentThreadRegistered() {\n  return currentThreadRegisteredWorker(index_);\n}\n\nThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::getWorker(uint32_t index) {\n  ASSERT(currentThreadRegisteredWorker(index));\n  return thread_local_data_.data_[index];\n}\n\nThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::get() { return getWorker(index_); }\n\nvoid InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb, Event::PostCb complete_cb) {\n  // See the header file comments for still_alive_guard_ for why we capture index_.\n  parent_.runOnAllThreads(\n      wrapCallback([cb, index = index_]() { setThreadLocal(index, cb(getWorker(index))); }),\n      complete_cb);\n}\n\nvoid InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb) {\n  // See the header file comments for still_alive_guard_ for why we capture index_.\n  parent_.runOnAllThreads(\n      wrapCallback([cb, index = index_]() { setThreadLocal(index, cb(getWorker(index))); }));\n}\n\nvoid InstanceImpl::SlotImpl::set(InitializeCb cb) {\n  ASSERT(std::this_thread::get_id() == parent_.main_thread_id_);\n  ASSERT(!parent_.shutdown_);\n\n  for (Event::Dispatcher& dispatcher : parent_.registered_threads_) {\n    // See the header file comments for still_alive_guard_ for why we capture index_.\n    dispatcher.post(wrapCallback(\n        [index = index_, cb, &dispatcher]() -> void { setThreadLocal(index, cb(dispatcher)); }));\n  }\n\n  // Handle main thread.\n  setThreadLocal(index_, cb(*parent_.main_thread_dispatcher_));\n}\n\nvoid InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_thread) {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n  ASSERT(!shutdown_);\n\n  if (main_thread) {\n    main_thread_dispatcher_ = &dispatcher;\n    thread_local_data_.dispatcher_ = &dispatcher;\n  } else {\n    ASSERT(!containsReference(registered_threads_, dispatcher));\n    registered_threads_.push_back(dispatcher);\n    dispatcher.post([&dispatcher] { thread_local_data_.dispatcher_ = &dispatcher; });\n  }\n}\n\nvoid InstanceImpl::removeSlot(uint32_t slot) {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n\n  // When shutting down, we do not post slot removals to other threads. This is because the other\n  // threads have already shut down and the dispatcher is no longer alive. There is also no reason\n  // to do removal, because no allocations happen during shutdown and shutdownThread() will clean\n  // things up on the other thread.\n  if (shutdown_) {\n    return;\n  }\n\n  slots_[slot] = nullptr;\n  ASSERT(std::find(free_slot_indexes_.begin(), free_slot_indexes_.end(), slot) ==\n             free_slot_indexes_.end(),\n         fmt::format(\"slot index {} already in free slot set!\", slot));\n  free_slot_indexes_.push_back(slot);\n  runOnAllThreads([slot]() -> void {\n    // This runs on each thread and clears the slot, making it available for a new allocations.\n    // This is safe even if a new allocation comes in, because everything happens with post() and\n    // will be sequenced after this removal. It is also safe if there are callbacks pending on\n    // other threads because they will run first.\n    if (slot < thread_local_data_.data_.size()) {\n      thread_local_data_.data_[slot] = nullptr;\n    }\n  });\n}\n\nvoid InstanceImpl::runOnAllThreads(Event::PostCb cb) {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n  ASSERT(!shutdown_);\n\n  for (Event::Dispatcher& dispatcher : registered_threads_) {\n    dispatcher.post(cb);\n  }\n\n  // Handle main thread.\n  cb();\n}\n\nvoid InstanceImpl::runOnAllThreads(Event::PostCb cb, Event::PostCb all_threads_complete_cb) {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n  ASSERT(!shutdown_);\n  // Handle main thread first so that when the last worker thread wins, we could just call the\n  // all_threads_complete_cb method. Parallelism of main thread execution is being traded off\n  // for programming simplicity here.\n  cb();\n\n  Event::PostCbSharedPtr cb_guard(new Event::PostCb(cb),\n                                  [this, all_threads_complete_cb](Event::PostCb* cb) {\n                                    main_thread_dispatcher_->post(all_threads_complete_cb);\n                                    delete cb;\n                                  });\n\n  for (Event::Dispatcher& dispatcher : registered_threads_) {\n    dispatcher.post([cb_guard]() -> void { (*cb_guard)(); });\n  }\n}\n\nvoid InstanceImpl::setThreadLocal(uint32_t index, ThreadLocalObjectSharedPtr object) {\n  if (thread_local_data_.data_.size() <= index) {\n    thread_local_data_.data_.resize(index + 1);\n  }\n\n  thread_local_data_.data_[index] = object;\n}\n\nvoid InstanceImpl::shutdownGlobalThreading() {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n  ASSERT(!shutdown_);\n  shutdown_ = true;\n}\n\nvoid InstanceImpl::shutdownThread() {\n  ASSERT(shutdown_);\n\n  // Destruction of slots is done in *reverse* order. This is so that filters and higher layer\n  // things that are built on top of the cluster manager, stats, etc. will be destroyed before\n  // more base layer things. The reason reverse ordering is done is to deal with the case that leaf\n  // objects depend in some way on \"persistent\" objects (particularly the cluster manager) that are\n  // created very early on with a known slot number and never destroyed until shutdown. For example,\n  // if we chose to create persistent per-thread gRPC clients we would potentially run into shutdown\n  // issues if that thing got destroyed after the cluster manager. This happens in practice\n  // currently when a redis connection pool is destroyed and removes its member update callback from\n  // the backing cluster. Examples of things with TLS that are created early on and are never\n  // destroyed until server shutdown are stats, runtime, and the cluster manager (see server.cc).\n  //\n  // It's possible this might need to become more complicated later but it's OK for now. Note that\n  // this is always safe to do because:\n  // 1) All slot updates come in via post().\n  // 2) No updates or removals will come in during shutdown().\n  //\n  // TODO(mattklein123): Deletion should really be in reverse *allocation* order. This could be\n  //                     implemented relatively easily by keeping a parallel list of slot #s. This\n  //                     would fix the case where something allocates two slots, but is interleaved\n  //                     with a deletion, such that the second allocation is actually a lower slot\n  //                     number than the first. This is an edge case that does not exist anywhere\n  //                     in the code today, but we can keep this in mind if things become more\n  //                     complicated in the future.\n  for (auto it = thread_local_data_.data_.rbegin(); it != thread_local_data_.data_.rend(); ++it) {\n    it->reset();\n  }\n  thread_local_data_.data_.clear();\n}\n\nEvent::Dispatcher& InstanceImpl::dispatcher() {\n  ASSERT(thread_local_data_.dispatcher_ != nullptr);\n  return *thread_local_data_.dispatcher_;\n}\n\n} // namespace ThreadLocal\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/thread_local/thread_local_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <vector>\n\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/non_copyable.h\"\n\nnamespace Envoy {\nnamespace ThreadLocal {\n\n/**\n * Implementation of ThreadLocal that relies on static thread_local objects.\n */\nclass InstanceImpl : Logger::Loggable<Logger::Id::main>, public NonCopyable, public Instance {\npublic:\n  InstanceImpl() : main_thread_id_(std::this_thread::get_id()) {}\n  ~InstanceImpl() override;\n\n  // ThreadLocal::Instance\n  SlotPtr allocateSlot() override;\n  void registerThread(Event::Dispatcher& dispatcher, bool main_thread) override;\n  void shutdownGlobalThreading() override;\n  void shutdownThread() override;\n  Event::Dispatcher& dispatcher() override;\n\nprivate:\n  // On destruction returns the slot index to the deferred delete queue (detaches it). This allows\n  // a slot to be destructed on the main thread while controlling the lifetime of the underlying\n  // slot as callbacks drain from workers.\n  struct SlotImpl : public Slot {\n    SlotImpl(InstanceImpl& parent, uint32_t index);\n    ~SlotImpl() override { parent_.removeSlot(index_); }\n    Event::PostCb wrapCallback(Event::PostCb&& cb);\n    static bool currentThreadRegisteredWorker(uint32_t index);\n    static ThreadLocalObjectSharedPtr getWorker(uint32_t index);\n\n    // ThreadLocal::Slot\n    ThreadLocalObjectSharedPtr get() override;\n    void runOnAllThreads(const UpdateCb& cb) override;\n    void runOnAllThreads(const UpdateCb& cb, Event::PostCb complete_cb) override;\n    bool currentThreadRegistered() override;\n    void set(InitializeCb cb) override;\n\n    InstanceImpl& parent_;\n    const uint32_t index_;\n    // The following is used to safely verify via weak_ptr that this slot is still alive. This\n    // does not prevent all races if a callback does not capture appropriately, but it does fix\n    // the common case of a slot destroyed immediately before anything is posted to a worker.\n    // NOTE: The general safety model of a slot is that it is destroyed immediately on the main\n    //       thread. This means that *all* captures must not reference the slot object directly.\n    //       this is why index_ is captured manually in callbacks that require it.\n    // NOTE: When the slot is destroyed, the index is immediately recycled. This is safe because\n    //       any new posts for a recycled index must come after any previous callbacks for the\n    //       previous owner of the index.\n    // TODO(mattklein123): Add clang-tidy analysis rule to check that \"this\" is not captured by\n    // a TLS function call. This check will not prevent all bad captures, but it will at least\n    // make the programmer more aware of potential issues.\n    std::shared_ptr<bool> still_alive_guard_;\n  };\n\n  struct ThreadLocalData {\n    Event::Dispatcher* dispatcher_{};\n    std::vector<ThreadLocalObjectSharedPtr> data_;\n  };\n\n  void removeSlot(uint32_t slot);\n  void runOnAllThreads(Event::PostCb cb);\n  void runOnAllThreads(Event::PostCb cb, Event::PostCb main_callback);\n  static void setThreadLocal(uint32_t index, ThreadLocalObjectSharedPtr object);\n\n  static thread_local ThreadLocalData thread_local_data_;\n\n  std::vector<Slot*> slots_;\n  // A list of index of freed slots.\n  std::list<uint32_t> free_slot_indexes_;\n  std::list<std::reference_wrapper<Event::Dispatcher>> registered_threads_;\n  std::thread::id main_thread_id_;\n  Event::Dispatcher* main_thread_dispatcher_{};\n  std::atomic<bool> shutdown_{};\n\n  // Test only.\n  friend class ThreadLocalInstanceImplTest;\n};\n\nusing InstanceImplPtr = std::unique_ptr<InstanceImpl>;\n\n} // namespace ThreadLocal\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tracing/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"http_tracer_lib\",\n    srcs = [\n        \"http_tracer_impl.cc\",\n    ],\n    hdrs = [\n        \"http_tracer_impl.h\",\n    ],\n    deps = [\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stream_info:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/metadata/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/tracing/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http_tracer_config_lib\",\n    hdrs = [\n        \"http_tracer_config_impl.h\",\n    ],\n    deps = [\n        \"//include/envoy/server:tracer_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http_tracer_manager_lib\",\n    srcs = [\n        \"http_tracer_manager_impl.cc\",\n    ],\n    hdrs = [\n        \"http_tracer_manager_impl.h\",\n    ],\n    deps = [\n        \"//include/envoy/server:tracer_config_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//include/envoy/tracing:http_tracer_manager_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/common/tracing/http_tracer_config_impl.h",
    "content": "#pragma once\n\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/server/tracer_config.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\nclass TracerFactoryContextImpl : public Server::Configuration::TracerFactoryContext {\npublic:\n  TracerFactoryContextImpl(Server::Configuration::ServerFactoryContext& server_factory_context,\n                           ProtobufMessage::ValidationVisitor& validation_visitor)\n      : server_factory_context_(server_factory_context), validation_visitor_(validation_visitor) {}\n  Server::Configuration::ServerFactoryContext& serverFactoryContext() override {\n    return server_factory_context_;\n  }\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override {\n    return validation_visitor_;\n  }\n\nprivate:\n  Server::Configuration::ServerFactoryContext& server_factory_context_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n};\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tracing/http_tracer_impl.cc",
    "content": "#include \"common/tracing/http_tracer_impl.h\"\n\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/type/metadata/v3/metadata.pb.h\"\n#include \"envoy/type/tracing/v3/custom_tag.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/formatter/substitution_formatter.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stream_info/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\n// TODO(perf): Avoid string creations/copies in this entire file.\nstatic std::string buildResponseCode(const StreamInfo::StreamInfo& info) {\n  return info.responseCode() ? std::to_string(info.responseCode().value()) : \"0\";\n}\n\nstatic absl::string_view valueOrDefault(const Http::HeaderEntry* header,\n                                        const char* default_value) {\n  return header ? header->value().getStringView() : default_value;\n}\n\nstatic std::string buildUrl(const Http::RequestHeaderMap& request_headers,\n                            const uint32_t max_path_length) {\n  if (!request_headers.Path()) {\n    return \"\";\n  }\n  absl::string_view path(request_headers.EnvoyOriginalPath()\n                             ? request_headers.getEnvoyOriginalPathValue()\n                             : request_headers.getPathValue());\n\n  if (path.length() > max_path_length) {\n    path = path.substr(0, max_path_length);\n  }\n\n  return absl::StrCat(request_headers.getForwardedProtoValue(), \"://\",\n                      request_headers.getHostValue(), path);\n}\n\nconst std::string HttpTracerUtility::IngressOperation = \"ingress\";\nconst std::string HttpTracerUtility::EgressOperation = \"egress\";\n\nconst std::string& HttpTracerUtility::toString(OperationName operation_name) {\n  switch (operation_name) {\n  case OperationName::Ingress:\n    return IngressOperation;\n  case OperationName::Egress:\n    return EgressOperation;\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nDecision HttpTracerUtility::isTracing(const StreamInfo::StreamInfo& stream_info,\n                                      const Http::RequestHeaderMap& request_headers) {\n  // Exclude health check requests immediately.\n  if (stream_info.healthCheck()) {\n    return {Reason::HealthCheck, false};\n  }\n\n  Http::TraceStatus trace_status =\n      stream_info.getRequestIDExtension()->getTraceStatus(request_headers);\n\n  switch (trace_status) {\n  case Http::TraceStatus::Client:\n    return {Reason::ClientForced, true};\n  case Http::TraceStatus::Forced:\n    return {Reason::ServiceForced, true};\n  case Http::TraceStatus::Sampled:\n    return {Reason::Sampling, true};\n  case Http::TraceStatus::NoTrace:\n    return {Reason::NotTraceableRequestId, false};\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nstatic void addTagIfNotNull(Span& span, const std::string& tag, const Http::HeaderEntry* entry) {\n  if (entry != nullptr) {\n    span.setTag(tag, entry->value().getStringView());\n  }\n}\n\nstatic void addGrpcRequestTags(Span& span, const Http::RequestHeaderMap& headers) {\n  addTagIfNotNull(span, Tracing::Tags::get().GrpcPath, headers.Path());\n  addTagIfNotNull(span, Tracing::Tags::get().GrpcAuthority, headers.Host());\n  addTagIfNotNull(span, Tracing::Tags::get().GrpcContentType, headers.ContentType());\n  addTagIfNotNull(span, Tracing::Tags::get().GrpcTimeout, headers.GrpcTimeout());\n}\n\ntemplate <class T> static void addGrpcResponseTags(Span& span, const T& headers) {\n  addTagIfNotNull(span, Tracing::Tags::get().GrpcStatusCode, headers.GrpcStatus());\n  addTagIfNotNull(span, Tracing::Tags::get().GrpcMessage, headers.GrpcMessage());\n  // Set error tag when status is not OK.\n  absl::optional<Grpc::Status::GrpcStatus> grpc_status_code = Grpc::Common::getGrpcStatus(headers);\n  if (grpc_status_code && grpc_status_code.value() != Grpc::Status::WellKnownGrpcStatus::Ok) {\n    span.setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);\n  }\n}\n\nstatic void annotateVerbose(Span& span, const StreamInfo::StreamInfo& stream_info) {\n  const auto start_time = stream_info.startTime();\n  if (stream_info.lastDownstreamRxByteReceived()) {\n    span.log(start_time + std::chrono::duration_cast<SystemTime::duration>(\n                              *stream_info.lastDownstreamRxByteReceived()),\n             Tracing::Logs::get().LastDownstreamRxByteReceived);\n  }\n  if (stream_info.firstUpstreamTxByteSent()) {\n    span.log(start_time + std::chrono::duration_cast<SystemTime::duration>(\n                              *stream_info.firstUpstreamTxByteSent()),\n             Tracing::Logs::get().FirstUpstreamTxByteSent);\n  }\n  if (stream_info.lastUpstreamTxByteSent()) {\n    span.log(start_time + std::chrono::duration_cast<SystemTime::duration>(\n                              *stream_info.lastUpstreamTxByteSent()),\n             Tracing::Logs::get().LastUpstreamTxByteSent);\n  }\n  if (stream_info.firstUpstreamRxByteReceived()) {\n    span.log(start_time + std::chrono::duration_cast<SystemTime::duration>(\n                              *stream_info.firstUpstreamRxByteReceived()),\n             Tracing::Logs::get().FirstUpstreamRxByteReceived);\n  }\n  if (stream_info.lastUpstreamRxByteReceived()) {\n    span.log(start_time + std::chrono::duration_cast<SystemTime::duration>(\n                              *stream_info.lastUpstreamRxByteReceived()),\n             Tracing::Logs::get().LastUpstreamRxByteReceived);\n  }\n  if (stream_info.firstDownstreamTxByteSent()) {\n    span.log(start_time + std::chrono::duration_cast<SystemTime::duration>(\n                              *stream_info.firstDownstreamTxByteSent()),\n             Tracing::Logs::get().FirstDownstreamTxByteSent);\n  }\n  if (stream_info.lastDownstreamTxByteSent()) {\n    span.log(start_time + std::chrono::duration_cast<SystemTime::duration>(\n                              *stream_info.lastDownstreamTxByteSent()),\n             Tracing::Logs::get().LastDownstreamTxByteSent);\n  }\n}\n\nvoid HttpTracerUtility::finalizeDownstreamSpan(Span& span,\n                                               const Http::RequestHeaderMap* request_headers,\n                                               const Http::ResponseHeaderMap* response_headers,\n                                               const Http::ResponseTrailerMap* response_trailers,\n                                               const StreamInfo::StreamInfo& stream_info,\n                                               const Config& tracing_config) {\n  // Pre response data.\n  if (request_headers) {\n    if (request_headers->RequestId()) {\n      span.setTag(Tracing::Tags::get().GuidXRequestId, request_headers->getRequestIdValue());\n    }\n    span.setTag(Tracing::Tags::get().HttpUrl,\n                buildUrl(*request_headers, tracing_config.maxPathTagLength()));\n    span.setTag(Tracing::Tags::get().HttpMethod, request_headers->getMethodValue());\n    span.setTag(Tracing::Tags::get().DownstreamCluster,\n                valueOrDefault(request_headers->EnvoyDownstreamServiceCluster(), \"-\"));\n    span.setTag(Tracing::Tags::get().UserAgent, valueOrDefault(request_headers->UserAgent(), \"-\"));\n    span.setTag(\n        Tracing::Tags::get().HttpProtocol,\n        Formatter::SubstitutionFormatUtils::protocolToStringOrDefault(stream_info.protocol()));\n\n    const auto& remote_address = stream_info.downstreamDirectRemoteAddress();\n\n    if (remote_address->type() == Network::Address::Type::Ip) {\n      const auto remote_ip = remote_address->ip();\n      span.setTag(Tracing::Tags::get().PeerAddress, remote_ip->addressAsString());\n    } else {\n      span.setTag(Tracing::Tags::get().PeerAddress, remote_address->logicalName());\n    }\n\n    if (request_headers->ClientTraceId()) {\n      span.setTag(Tracing::Tags::get().GuidXClientTraceId,\n                  request_headers->getClientTraceIdValue());\n    }\n\n    if (Grpc::Common::isGrpcRequestHeaders(*request_headers)) {\n      addGrpcRequestTags(span, *request_headers);\n    }\n  }\n  CustomTagContext ctx{request_headers, stream_info};\n\n  const CustomTagMap* custom_tag_map = tracing_config.customTags();\n  if (custom_tag_map) {\n    for (const auto& it : *custom_tag_map) {\n      it.second->apply(span, ctx);\n    }\n  }\n  span.setTag(Tracing::Tags::get().RequestSize, std::to_string(stream_info.bytesReceived()));\n  span.setTag(Tracing::Tags::get().ResponseSize, std::to_string(stream_info.bytesSent()));\n\n  setCommonTags(span, response_headers, response_trailers, stream_info, tracing_config);\n\n  span.finishSpan();\n}\n\nvoid HttpTracerUtility::finalizeUpstreamSpan(Span& span,\n                                             const Http::ResponseHeaderMap* response_headers,\n                                             const Http::ResponseTrailerMap* response_trailers,\n                                             const StreamInfo::StreamInfo& stream_info,\n                                             const Config& tracing_config) {\n  span.setTag(\n      Tracing::Tags::get().HttpProtocol,\n      Formatter::SubstitutionFormatUtils::protocolToStringOrDefault(stream_info.protocol()));\n\n  if (stream_info.upstreamHost()) {\n    span.setTag(Tracing::Tags::get().UpstreamAddress,\n                stream_info.upstreamHost()->address()->asStringView());\n  }\n\n  setCommonTags(span, response_headers, response_trailers, stream_info, tracing_config);\n\n  span.finishSpan();\n}\n\nvoid HttpTracerUtility::setCommonTags(Span& span, const Http::ResponseHeaderMap* response_headers,\n                                      const Http::ResponseTrailerMap* response_trailers,\n                                      const StreamInfo::StreamInfo& stream_info,\n                                      const Config& tracing_config) {\n\n  span.setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy);\n\n  if (nullptr != stream_info.upstreamHost()) {\n    span.setTag(Tracing::Tags::get().UpstreamCluster, stream_info.upstreamHost()->cluster().name());\n  }\n\n  // Post response data.\n  span.setTag(Tracing::Tags::get().HttpStatusCode, buildResponseCode(stream_info));\n  span.setTag(Tracing::Tags::get().ResponseFlags,\n              StreamInfo::ResponseFlagUtils::toShortString(stream_info));\n\n  // GRPC data.\n  if (response_trailers && response_trailers->GrpcStatus() != nullptr) {\n    addGrpcResponseTags(span, *response_trailers);\n  } else if (response_headers && response_headers->GrpcStatus() != nullptr) {\n    addGrpcResponseTags(span, *response_headers);\n  }\n\n  if (tracing_config.verbose()) {\n    annotateVerbose(span, stream_info);\n  }\n\n  if (!stream_info.responseCode() || Http::CodeUtility::is5xx(stream_info.responseCode().value())) {\n    span.setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);\n  }\n}\n\nCustomTagConstSharedPtr\nHttpTracerUtility::createCustomTag(const envoy::type::tracing::v3::CustomTag& tag) {\n  switch (tag.type_case()) {\n  case envoy::type::tracing::v3::CustomTag::TypeCase::kLiteral:\n    return std::make_shared<const Tracing::LiteralCustomTag>(tag.tag(), tag.literal());\n  case envoy::type::tracing::v3::CustomTag::TypeCase::kEnvironment:\n    return std::make_shared<const Tracing::EnvironmentCustomTag>(tag.tag(), tag.environment());\n  case envoy::type::tracing::v3::CustomTag::TypeCase::kRequestHeader:\n    return std::make_shared<const Tracing::RequestHeaderCustomTag>(tag.tag(), tag.request_header());\n  case envoy::type::tracing::v3::CustomTag::TypeCase::kMetadata:\n    return std::make_shared<const Tracing::MetadataCustomTag>(tag.tag(), tag.metadata());\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nHttpTracerImpl::HttpTracerImpl(DriverPtr&& driver, const LocalInfo::LocalInfo& local_info)\n    : driver_(std::move(driver)), local_info_(local_info) {}\n\nSpanPtr HttpTracerImpl::startSpan(const Config& config, Http::RequestHeaderMap& request_headers,\n                                  const StreamInfo::StreamInfo& stream_info,\n                                  const Tracing::Decision tracing_decision) {\n  std::string span_name = HttpTracerUtility::toString(config.operationName());\n\n  if (config.operationName() == OperationName::Egress) {\n    span_name.append(\" \");\n    span_name.append(std::string(request_headers.getHostValue()));\n  }\n\n  SpanPtr active_span = driver_->startSpan(config, request_headers, span_name,\n                                           stream_info.startTime(), tracing_decision);\n\n  // Set tags related to the local environment\n  if (active_span) {\n    active_span->setTag(Tracing::Tags::get().NodeId, local_info_.nodeName());\n    active_span->setTag(Tracing::Tags::get().Zone, local_info_.zoneName());\n  }\n\n  return active_span;\n}\n\nvoid CustomTagBase::apply(Span& span, const CustomTagContext& ctx) const {\n  absl::string_view tag_value = value(ctx);\n  if (!tag_value.empty()) {\n    span.setTag(tag(), tag_value);\n  }\n}\n\nEnvironmentCustomTag::EnvironmentCustomTag(\n    const std::string& tag, const envoy::type::tracing::v3::CustomTag::Environment& environment)\n    : CustomTagBase(tag), name_(environment.name()), default_value_(environment.default_value()) {\n  const char* env = std::getenv(name_.data());\n  final_value_ = env ? env : default_value_;\n}\n\nRequestHeaderCustomTag::RequestHeaderCustomTag(\n    const std::string& tag, const envoy::type::tracing::v3::CustomTag::Header& request_header)\n    : CustomTagBase(tag), name_(Http::LowerCaseString(request_header.name())),\n      default_value_(request_header.default_value()) {}\n\nabsl::string_view RequestHeaderCustomTag::value(const CustomTagContext& ctx) const {\n  if (!ctx.request_headers) {\n    return default_value_;\n  }\n  const Http::HeaderEntry* entry = ctx.request_headers->get(name_);\n  return entry ? entry->value().getStringView() : default_value_;\n}\n\nMetadataCustomTag::MetadataCustomTag(const std::string& tag,\n                                     const envoy::type::tracing::v3::CustomTag::Metadata& metadata)\n    : CustomTagBase(tag), kind_(metadata.kind().kind_case()),\n      metadata_key_(metadata.metadata_key()), default_value_(metadata.default_value()) {}\n\nvoid MetadataCustomTag::apply(Span& span, const CustomTagContext& ctx) const {\n  const envoy::config::core::v3::Metadata* meta = metadata(ctx);\n  if (!meta) {\n    if (!default_value_.empty()) {\n      span.setTag(tag(), default_value_);\n    }\n    return;\n  }\n  const ProtobufWkt::Value& value = Envoy::Config::Metadata::metadataValue(meta, metadata_key_);\n  switch (value.kind_case()) {\n  case ProtobufWkt::Value::kBoolValue:\n    span.setTag(tag(), value.bool_value() ? \"true\" : \"false\");\n    return;\n  case ProtobufWkt::Value::kNumberValue:\n    span.setTag(tag(), absl::StrCat(\"\", value.number_value()));\n    return;\n  case ProtobufWkt::Value::kStringValue:\n    span.setTag(tag(), value.string_value());\n    return;\n  case ProtobufWkt::Value::kListValue:\n    span.setTag(tag(), MessageUtil::getJsonStringFromMessage(value.list_value()));\n    return;\n  case ProtobufWkt::Value::kStructValue:\n    span.setTag(tag(), MessageUtil::getJsonStringFromMessage(value.struct_value()));\n    return;\n  default:\n    break;\n  }\n  if (!default_value_.empty()) {\n    span.setTag(tag(), default_value_);\n  }\n}\n\nconst envoy::config::core::v3::Metadata*\nMetadataCustomTag::metadata(const CustomTagContext& ctx) const {\n  const StreamInfo::StreamInfo& info = ctx.stream_info;\n  switch (kind_) {\n  case envoy::type::metadata::v3::MetadataKind::KindCase::kRequest:\n    return &info.dynamicMetadata();\n  case envoy::type::metadata::v3::MetadataKind::KindCase::kRoute: {\n    const Router::RouteEntry* route_entry = info.routeEntry();\n    return route_entry ? &route_entry->metadata() : nullptr;\n  }\n  case envoy::type::metadata::v3::MetadataKind::KindCase::kCluster: {\n    const auto& hostPtr = info.upstreamHost();\n    return hostPtr ? &hostPtr->cluster().metadata() : nullptr;\n  }\n  case envoy::type::metadata::v3::MetadataKind::KindCase::kHost: {\n    const auto& hostPtr = info.upstreamHost();\n    return hostPtr ? hostPtr->metadata().get() : nullptr;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tracing/http_tracer_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/type/metadata/v3/metadata.pb.h\"\n#include \"envoy/type/tracing/v3/custom_tag.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/json/json_loader.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\n/**\n * Tracing tag names.\n */\nclass TracingTagValues {\npublic:\n  // OpenTracing standard tag names.\n  const std::string Component = \"component\";\n  const std::string DbInstance = \"db.instance\";\n  const std::string DbStatement = \"db.statement\";\n  const std::string DbUser = \"db.user\";\n  const std::string DbType = \"db.type\";\n  const std::string Error = \"error\";\n  const std::string HttpMethod = \"http.method\";\n  const std::string HttpStatusCode = \"http.status_code\";\n  const std::string HttpUrl = \"http.url\";\n  const std::string MessageBusDestination = \"message_bus.destination\";\n  const std::string PeerAddress = \"peer.address\";\n  const std::string PeerHostname = \"peer.hostname\";\n  const std::string PeerIpv4 = \"peer.ipv4\";\n  const std::string PeerIpv6 = \"peer.ipv6\";\n  const std::string PeerPort = \"peer.port\";\n  const std::string PeerService = \"peer.service\";\n  const std::string SpanKind = \"span.kind\";\n\n  // Non-standard tag names.\n  const std::string DownstreamCluster = \"downstream_cluster\";\n  const std::string ErrorReason = \"error.reason\";\n  const std::string GrpcAuthority = \"grpc.authority\";\n  const std::string GrpcContentType = \"grpc.content_type\";\n  const std::string GrpcMessage = \"grpc.message\";\n  const std::string GrpcPath = \"grpc.path\";\n  const std::string GrpcStatusCode = \"grpc.status_code\";\n  const std::string GrpcTimeout = \"grpc.timeout\";\n  const std::string GuidXClientTraceId = \"guid:x-client-trace-id\";\n  const std::string GuidXRequestId = \"guid:x-request-id\";\n  const std::string HttpProtocol = \"http.protocol\";\n  const std::string NodeId = \"node_id\";\n  const std::string RequestSize = \"request_size\";\n  const std::string ResponseFlags = \"response_flags\";\n  const std::string ResponseSize = \"response_size\";\n  const std::string RetryCount = \"retry.count\";\n  const std::string Status = \"status\";\n  const std::string UpstreamAddress = \"upstream_address\";\n  const std::string UpstreamCluster = \"upstream_cluster\";\n  const std::string UserAgent = \"user_agent\";\n  const std::string Zone = \"zone\";\n\n  // Tag values.\n  const std::string Canceled = \"canceled\";\n  const std::string Proxy = \"proxy\";\n  const std::string True = \"true\";\n};\n\nusing Tags = ConstSingleton<TracingTagValues>;\n\nclass TracingLogValues {\npublic:\n  // OpenTracing standard key names.\n  const std::string EventKey = \"event\";\n\n  // Event names\n  const std::string LastDownstreamRxByteReceived = \"last_downstream_rx_byte_received\";\n  const std::string FirstUpstreamTxByteSent = \"first_upstream_tx_byte_sent\";\n  const std::string LastUpstreamTxByteSent = \"last_upstream_tx_byte_sent\";\n  const std::string FirstUpstreamRxByteReceived = \"first_upstream_rx_byte_received\";\n  const std::string LastUpstreamRxByteReceived = \"last_upstream_rx_byte_received\";\n  const std::string FirstDownstreamTxByteSent = \"first_downstream_tx_byte_sent\";\n  const std::string LastDownstreamTxByteSent = \"last_downstream_tx_byte_sent\";\n};\n\nusing Logs = ConstSingleton<TracingLogValues>;\n\nclass HttpTracerUtility {\npublic:\n  /**\n   * Get string representation of the operation.\n   * @param operation name to convert.\n   * @return string representation of the operation.\n   */\n  static const std::string& toString(OperationName operation_name);\n\n  /**\n   * Request might be traceable if x-request-id is traceable uuid or we do sampling tracing.\n   * Note: there is a global switch which turns off tracing completely on server side.\n   *\n   * @return decision if request is traceable or not and Reason why.\n   **/\n  static Decision isTracing(const StreamInfo::StreamInfo& stream_info,\n                            const Http::RequestHeaderMap& request_headers);\n\n  /**\n   * Adds information obtained from the downstream request headers as tags to the active span.\n   * Then finishes the span.\n   */\n  static void finalizeDownstreamSpan(Span& span, const Http::RequestHeaderMap* request_headers,\n                                     const Http::ResponseHeaderMap* response_headers,\n                                     const Http::ResponseTrailerMap* response_trailers,\n                                     const StreamInfo::StreamInfo& stream_info,\n                                     const Config& tracing_config);\n\n  /**\n   * Adds information obtained from the upstream request headers as tags to the active span.\n   * Then finishes the span.\n   */\n  static void finalizeUpstreamSpan(Span& span, const Http::ResponseHeaderMap* response_headers,\n                                   const Http::ResponseTrailerMap* response_trailers,\n                                   const StreamInfo::StreamInfo& stream_info,\n                                   const Config& tracing_config);\n\n  /**\n   * Create a custom tag according to the configuration.\n   * @param tag a tracing custom tag configuration.\n   */\n  static CustomTagConstSharedPtr createCustomTag(const envoy::type::tracing::v3::CustomTag& tag);\n\nprivate:\n  static void setCommonTags(Span& span, const Http::ResponseHeaderMap* response_headers,\n                            const Http::ResponseTrailerMap* response_trailers,\n                            const StreamInfo::StreamInfo& stream_info,\n                            const Config& tracing_config);\n\n  static const std::string IngressOperation;\n  static const std::string EgressOperation;\n};\n\nclass EgressConfigImpl : public Config {\npublic:\n  // Tracing::Config\n  Tracing::OperationName operationName() const override { return Tracing::OperationName::Egress; }\n  const CustomTagMap* customTags() const override { return nullptr; }\n  bool verbose() const override { return false; }\n  uint32_t maxPathTagLength() const override { return Tracing::DefaultMaxPathTagLength; }\n};\n\nusing EgressConfig = ConstSingleton<EgressConfigImpl>;\n\nclass NullSpan : public Span {\npublic:\n  static NullSpan& instance() {\n    static NullSpan* instance = new NullSpan();\n    return *instance;\n  }\n\n  // Tracing::Span\n  void setOperation(absl::string_view) override {}\n  void setTag(absl::string_view, absl::string_view) override {}\n  void log(SystemTime, const std::string&) override {}\n  void finishSpan() override {}\n  void injectContext(Http::RequestHeaderMap&) override {}\n  void setBaggage(absl::string_view, absl::string_view) override {}\n  std::string getBaggage(absl::string_view) override { return std::string(); }\n  SpanPtr spawnChild(const Config&, const std::string&, SystemTime) override {\n    return SpanPtr{new NullSpan()};\n  }\n  void setSampled(bool) override {}\n};\n\nclass HttpNullTracer : public HttpTracer {\npublic:\n  // Tracing::HttpTracer\n  SpanPtr startSpan(const Config&, Http::RequestHeaderMap&, const StreamInfo::StreamInfo&,\n                    const Tracing::Decision) override {\n    return SpanPtr{new NullSpan()};\n  }\n};\n\nclass HttpTracerImpl : public HttpTracer {\npublic:\n  HttpTracerImpl(DriverPtr&& driver, const LocalInfo::LocalInfo& local_info);\n\n  // Tracing::HttpTracer\n  SpanPtr startSpan(const Config& config, Http::RequestHeaderMap& request_headers,\n                    const StreamInfo::StreamInfo& stream_info,\n                    const Tracing::Decision tracing_decision) override;\n\nprivate:\n  DriverPtr driver_;\n  const LocalInfo::LocalInfo& local_info_;\n};\n\nclass CustomTagBase : public CustomTag {\npublic:\n  explicit CustomTagBase(const std::string& tag) : tag_(tag) {}\n  absl::string_view tag() const override { return tag_; }\n  void apply(Span& span, const CustomTagContext& ctx) const override;\n\n  virtual absl::string_view value(const CustomTagContext& ctx) const PURE;\n\nprotected:\n  const std::string tag_;\n};\n\nclass LiteralCustomTag : public CustomTagBase {\npublic:\n  LiteralCustomTag(const std::string& tag,\n                   const envoy::type::tracing::v3::CustomTag::Literal& literal)\n      : CustomTagBase(tag), value_(literal.value()) {}\n  absl::string_view value(const CustomTagContext&) const override { return value_; }\n\nprivate:\n  const std::string value_;\n};\n\nclass EnvironmentCustomTag : public CustomTagBase {\npublic:\n  EnvironmentCustomTag(const std::string& tag,\n                       const envoy::type::tracing::v3::CustomTag::Environment& environment);\n  absl::string_view value(const CustomTagContext&) const override { return final_value_; }\n\nprivate:\n  const std::string name_;\n  const std::string default_value_;\n  std::string final_value_;\n};\n\nclass RequestHeaderCustomTag : public CustomTagBase {\npublic:\n  RequestHeaderCustomTag(const std::string& tag,\n                         const envoy::type::tracing::v3::CustomTag::Header& request_header);\n  absl::string_view value(const CustomTagContext& ctx) const override;\n\nprivate:\n  const Http::LowerCaseString name_;\n  const std::string default_value_;\n};\n\nclass MetadataCustomTag : public CustomTagBase {\npublic:\n  MetadataCustomTag(const std::string& tag,\n                    const envoy::type::tracing::v3::CustomTag::Metadata& metadata);\n  void apply(Span& span, const CustomTagContext& ctx) const override;\n  absl::string_view value(const CustomTagContext&) const override { return default_value_; }\n  const envoy::config::core::v3::Metadata* metadata(const CustomTagContext& ctx) const;\n\nprotected:\n  const envoy::type::metadata::v3::MetadataKind::KindCase kind_;\n  const Envoy::Config::MetadataKey metadata_key_;\n  const std::string default_value_;\n};\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tracing/http_tracer_manager_impl.cc",
    "content": "#include \"common/tracing/http_tracer_manager_impl.h\"\n\n#include \"common/config/utility.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\nHttpTracerManagerImpl::HttpTracerManagerImpl(\n    Server::Configuration::TracerFactoryContextPtr factory_context)\n    : factory_context_(std::move(factory_context)) {}\n\nHttpTracerSharedPtr\nHttpTracerManagerImpl::getOrCreateHttpTracer(const envoy::config::trace::v3::Tracing_Http* config) {\n  if (!config) {\n    return null_tracer_;\n  }\n\n  const auto cache_key = MessageUtil::hash(*config);\n  const auto it = http_tracers_.find(cache_key);\n  if (it != http_tracers_.end()) {\n    auto http_tracer = it->second.lock();\n    if (http_tracer) { // HttpTracer might have been released since it's a weak reference\n      return http_tracer;\n    }\n  }\n\n  // Free memory held by expired weak references.\n  //\n  // Given that:\n  //\n  // * HttpTracer is obtained only once per listener lifecycle\n  // * in a typical case, all listeners will have identical tracing configuration and, consequently,\n  //   will share the same HttpTracer instance\n  // * amount of memory held by an expired weak reference is minimal\n  //\n  // it seems reasonable to avoid introducing an external sweeper and only reclaim memory at\n  // the moment when a new HttpTracer instance is about to be created.\n  removeExpiredCacheEntries();\n\n  // Initialize a new tracer.\n  ENVOY_LOG(info, \"instantiating a new tracer: {}\", config->name());\n\n  // Now see if there is a factory that will accept the config.\n  auto& factory =\n      Envoy::Config::Utility::getAndCheckFactory<Server::Configuration::TracerFactory>(*config);\n  ProtobufTypes::MessagePtr message = Envoy::Config::Utility::translateToFactoryConfig(\n      *config, factory_context_->messageValidationVisitor(), factory);\n\n  HttpTracerSharedPtr http_tracer = factory.createHttpTracer(*message, *factory_context_);\n  http_tracers_.emplace(cache_key, http_tracer); // cache a weak reference\n  return http_tracer;\n}\n\nvoid HttpTracerManagerImpl::removeExpiredCacheEntries() {\n  absl::erase_if(http_tracers_,\n                 [](const std::pair<const std::size_t, std::weak_ptr<HttpTracer>>& entry) {\n                   return entry.second.expired();\n                 });\n}\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/tracing/http_tracer_manager_impl.h",
    "content": "#pragma once\n\n#include \"envoy/server/tracer_config.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/tracing/http_tracer_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\nclass HttpTracerManagerImpl : public HttpTracerManager,\n                              public Singleton::Instance,\n                              Logger::Loggable<Logger::Id::tracing> {\npublic:\n  HttpTracerManagerImpl(Server::Configuration::TracerFactoryContextPtr factory_context);\n\n  // HttpTracerManager\n  HttpTracerSharedPtr\n  getOrCreateHttpTracer(const envoy::config::trace::v3::Tracing_Http* config) override;\n\n  // Take a peek into the cache of HttpTracers. This should only be used in tests.\n  const absl::flat_hash_map<std::size_t, std::weak_ptr<HttpTracer>>&\n  peekCachedTracersForTest() const {\n    return http_tracers_;\n  }\n\nprivate:\n  void removeExpiredCacheEntries();\n\n  Server::Configuration::TracerFactoryContextPtr factory_context_;\n  const HttpTracerSharedPtr null_tracer_{std::make_shared<Tracing::HttpNullTracer>()};\n\n  // HttpTracers indexed by the hash of their configuration.\n  absl::flat_hash_map<std::size_t, std::weak_ptr<HttpTracer>> http_tracers_;\n};\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"cds_api_lib\",\n    srcs = [\"cds_api_impl.cc\"],\n    hdrs = [\"cds_api_impl.h\"],\n    deps = [\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cluster_manager_lib\",\n    srcs = [\"cluster_manager_impl.cc\"],\n    hdrs = [\"cluster_manager_impl.h\"],\n    deps = [\n        \":cds_api_lib\",\n        \":load_balancer_lib\",\n        \":load_stats_reporter_lib\",\n        \":ring_hash_lb_lib\",\n        \":subset_lb_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:grpc_mux_lib\",\n        \"//source/common/config:subscription_factory_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/grpc:async_client_manager_lib\",\n        \"//source/common/http:async_client_lib\",\n        \"//source/common/http/http1:conn_pool_lib\",\n        \"//source/common/http/http2:conn_pool_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/router:shadow_writer_lib\",\n        \"//source/common/shared_pool:shared_pool_lib\",\n        \"//source/common/tcp:conn_pool_lib\",\n        \"//source/common/upstream:priority_conn_pool_map_impl_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cluster_update_tracker_lib\",\n    srcs = [\"cluster_update_tracker.cc\"],\n    hdrs = [\"cluster_update_tracker.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_map\",\n    hdrs = [\"conn_pool_map.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/upstream:resource_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:debug_recursion_checker_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_map_impl_lib\",\n    hdrs = [\"conn_pool_map_impl.h\"],\n    deps = [\n        \":conn_pool_map\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"priority_conn_pool_map\",\n    hdrs = [\"priority_conn_pool_map.h\"],\n    deps = [\n        \":conn_pool_map\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/upstream:resource_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:debug_recursion_checker_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"priority_conn_pool_map_impl_lib\",\n    hdrs = [\"priority_conn_pool_map_impl.h\"],\n    deps = [\n        \":conn_pool_map_impl_lib\",\n        \":priority_conn_pool_map\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"edf_scheduler_lib\",\n    hdrs = [\"edf_scheduler.h\"],\n    deps = [\"//source/common/common:assert_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"health_checker_base_lib\",\n    srcs = [\"health_checker_base_impl.cc\"],\n    hdrs = [\"health_checker_base_impl.h\"],\n    deps = [\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"//source/common/router:router_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"health_checker_lib\",\n    srcs = [\"health_checker_impl.cc\"],\n    hdrs = [\"health_checker_impl.h\"],\n    external_deps = [\n        \"grpc_health_proto\",\n    ],\n    deps = [\n        \":health_checker_base_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n        # TODO(dio): Remove dependency to server.\n        \"//include/envoy/server:health_checker_config_interface\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/upstream:host_utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"host_utility_lib\",\n    srcs = [\"host_utility.cc\"],\n    hdrs = [\"host_utility.h\"],\n    deps = [\"//include/envoy/upstream:upstream_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"load_balancer_lib\",\n    srcs = [\"load_balancer_impl.cc\"],\n    hdrs = [\"load_balancer_impl.h\"],\n    deps = [\n        \":edf_scheduler_lib\",\n        \"//include/envoy/common:random_generator_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_protos_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"load_stats_reporter_lib\",\n    srcs = [\"load_stats_reporter.cc\"],\n    hdrs = [\"load_stats_reporter.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/grpc:async_client_lib\",\n        \"@envoy_api//envoy/service/load_stats/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"health_discovery_service_lib\",\n    srcs = [\"health_discovery_service.cc\"],\n    hdrs = [\"health_discovery_service.h\"],\n    deps = [\n        \":health_checker_lib\",\n        \":upstream_includes\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:backoff_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/grpc:async_client_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"//source/server:transport_socket_config_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/health/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"logical_host_lib\",\n    srcs = [\"logical_host.cc\"],\n    hdrs = [\"logical_host.h\"],\n    deps = [\n        \":upstream_includes\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"logical_dns_cluster_lib\",\n    srcs = [\"logical_dns_cluster.cc\"],\n    hdrs = [\"logical_dns_cluster.h\"],\n    deps = [\n        \":cluster_factory_lib\",\n        \":logical_host_lib\",\n        \":upstream_includes\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"original_dst_cluster_lib\",\n    srcs = [\"original_dst_cluster.cc\"],\n    hdrs = [\"original_dst_cluster.h\"],\n    deps = [\n        \":cluster_factory_lib\",\n        \":upstream_includes\",\n        \"//include/envoy/secret:secret_manager_interface\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"outlier_detection_lib\",\n    srcs = [\"outlier_detection_impl.cc\"],\n    hdrs = [\"outlier_detection_impl.h\"],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/upstream:outlier_detection_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/cluster/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resource_manager_lib\",\n    hdrs = [\"resource_manager_impl.h\"],\n    deps = [\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/upstream:resource_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:basic_resource_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"thread_aware_lb_lib\",\n    srcs = [\"thread_aware_lb_impl.cc\"],\n    hdrs = [\"thread_aware_lb_impl.h\"],\n    external_deps = [\"abseil_synchronization\"],\n    deps = [\n        \":load_balancer_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"maglev_lb_lib\",\n    srcs = [\"maglev_lb.cc\"],\n    hdrs = [\"maglev_lb.h\"],\n    deps = [\n        \":thread_aware_lb_lib\",\n        \":upstream_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ring_hash_lb_lib\",\n    srcs = [\"ring_hash_lb.cc\"],\n    hdrs = [\"ring_hash_lb.h\"],\n    external_deps = [\n        \"abseil_inlined_vector\",\n    ],\n    deps = [\n        \":thread_aware_lb_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"eds_lib\",\n    srcs = [\"eds.cc\"],\n    hdrs = [\"eds.h\"],\n    deps = [\n        \":cluster_factory_lib\",\n        \":upstream_includes\",\n        \"//include/envoy/config:grpc_mux_interface\",\n        \"//include/envoy/config:subscription_factory_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/secret:secret_manager_interface\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//include/envoy/upstream:locality_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:decoded_resource_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:subscription_factory_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"subset_lb_lib\",\n    srcs = [\"subset_lb.cc\"],\n    hdrs = [\"subset_lb.h\"],\n    deps = [\n        \":load_balancer_lib\",\n        \":maglev_lb_lib\",\n        \":ring_hash_lb_lib\",\n        \":upstream_lib\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_lib\",\n    srcs = [\"upstream_impl.cc\"],\n    deps = [\n        \":eds_lib\",\n        \":health_checker_lib\",\n        # TODO(mattklein123): Move the clusters to extensions so they can be compiled out.\n        \":logical_dns_cluster_lib\",\n        \":original_dst_cluster_lib\",\n        \":static_cluster_lib\",\n        \":strict_dns_cluster_lib\",\n        \":upstream_includes\",\n        \":transport_socket_match_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http/http1:codec_stats_lib\",\n        \"//source/common/http/http2:codec_stats_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/network/common:utility_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"//source/server:transport_socket_config_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"transport_socket_match_lib\",\n    srcs = [\"transport_socket_match_impl.cc\"],\n    deps = [\n        \":upstream_includes\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"static_cluster_lib\",\n    srcs = [\"static_cluster.cc\"],\n    hdrs = [\"static_cluster.h\"],\n    deps = [\n        \":cluster_factory_includes\",\n        \":upstream_includes\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"strict_dns_cluster_lib\",\n    srcs = [\"strict_dns_cluster.cc\"],\n    hdrs = [\"strict_dns_cluster.h\"],\n    deps = [\n        \":cluster_factory_includes\",\n        \":upstream_includes\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_includes\",\n    hdrs = [\n        \"transport_socket_match_impl.h\",\n        \"upstream_impl.h\",\n    ],\n    external_deps = [\"abseil_synchronization\"],\n    deps = [\n        \":load_balancer_lib\",\n        \":outlier_detection_lib\",\n        \":resource_manager_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//include/envoy/upstream:locality_lib\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:callback_impl_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/http/http1:codec_stats_lib\",\n        \"//source/common/http/http2:codec_stats_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/shared_pool:shared_pool_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/server:transport_socket_config_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cluster_factory_lib\",\n    srcs = [\"cluster_factory_impl.cc\"],\n    deps = [\n        \":cluster_factory_includes\",\n        \":health_checker_lib\",\n        \":upstream_includes\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"//source/server:transport_socket_config_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cluster_factory_includes\",\n    hdrs = [\"cluster_factory_impl.h\"],\n    deps = [\n        \":load_balancer_lib\",\n        \":outlier_detection_lib\",\n        \":resource_manager_lib\",\n        \":upstream_includes\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//include/envoy/upstream:locality_lib\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:callback_impl_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/config:subscription_factory_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"//source/server:transport_socket_config_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/common/upstream/cds_api_impl.cc",
    "content": "#include \"common/upstream/cds_api_impl.h\"\n\n#include <string>\n\n#include \"envoy/api/v2/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nCdsApiPtr CdsApiImpl::create(const envoy::config::core::v3::ConfigSource& cds_config,\n                             ClusterManager& cm, Stats::Scope& scope,\n                             ProtobufMessage::ValidationVisitor& validation_visitor) {\n  return CdsApiPtr{new CdsApiImpl(cds_config, cm, scope, validation_visitor)};\n}\n\nCdsApiImpl::CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm,\n                       Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor)\n    : Envoy::Config::SubscriptionBase<envoy::config::cluster::v3::Cluster>(\n          cds_config.resource_api_version(), validation_visitor, \"name\"),\n      cm_(cm), scope_(scope.createScope(\"cluster_manager.cds.\")) {\n  const auto resource_name = getResourceName();\n  subscription_ = cm_.subscriptionFactory().subscriptionFromConfigSource(\n      cds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_);\n}\n\nvoid CdsApiImpl::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                                const std::string& version_info) {\n  ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters();\n  std::vector<envoy::config::cluster::v3::Cluster> clusters;\n  for (const auto& resource : resources) {\n    clusters_to_remove.erase(resource.get().name());\n  }\n  Protobuf::RepeatedPtrField<std::string> to_remove_repeated;\n  for (const auto& [cluster_name, _] : clusters_to_remove) {\n    *to_remove_repeated.Add() = cluster_name;\n  }\n  onConfigUpdate(resources, to_remove_repeated, version_info);\n}\n\nvoid CdsApiImpl::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                                const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                                const std::string& system_version_info) {\n  Config::ScopedResume maybe_resume_eds;\n  if (cm_.adsMux()) {\n    const auto type_urls =\n        Config::getAllVersionTypeUrls<envoy::config::endpoint::v3::ClusterLoadAssignment>();\n    maybe_resume_eds = cm_.adsMux()->pause(type_urls);\n  }\n\n  ENVOY_LOG(info, \"cds: add {} cluster(s), remove {} cluster(s)\", added_resources.size(),\n            removed_resources.size());\n\n  std::vector<std::string> exception_msgs;\n  absl::node_hash_set<std::string> cluster_names;\n  bool any_applied = false;\n  for (const auto& resource : added_resources) {\n    envoy::config::cluster::v3::Cluster cluster;\n    try {\n      cluster = dynamic_cast<const envoy::config::cluster::v3::Cluster&>(resource.get().resource());\n      if (!cluster_names.insert(cluster.name()).second) {\n        // NOTE: at this point, the first of these duplicates has already been successfully applied.\n        throw EnvoyException(fmt::format(\"duplicate cluster {} found\", cluster.name()));\n      }\n      if (cm_.addOrUpdateCluster(cluster, resource.get().version())) {\n        any_applied = true;\n        ENVOY_LOG(info, \"cds: add/update cluster '{}'\", cluster.name());\n      } else {\n        ENVOY_LOG(debug, \"cds: add/update cluster '{}' skipped\", cluster.name());\n      }\n    } catch (const EnvoyException& e) {\n      exception_msgs.push_back(fmt::format(\"{}: {}\", cluster.name(), e.what()));\n    }\n  }\n  for (const auto& resource_name : removed_resources) {\n    if (cm_.removeCluster(resource_name)) {\n      any_applied = true;\n      ENVOY_LOG(info, \"cds: remove cluster '{}'\", resource_name);\n    }\n  }\n\n  if (any_applied) {\n    system_version_info_ = system_version_info;\n  }\n  runInitializeCallbackIfAny();\n  if (!exception_msgs.empty()) {\n    throw EnvoyException(\n        fmt::format(\"Error adding/updating cluster(s) {}\", absl::StrJoin(exception_msgs, \", \")));\n  }\n}\n\nvoid CdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                                      const EnvoyException*) {\n  ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n  // We need to allow server startup to continue, even if we have a bad\n  // config.\n  runInitializeCallbackIfAny();\n}\n\nvoid CdsApiImpl::runInitializeCallbackIfAny() {\n  if (initialize_callback_) {\n    initialize_callback_();\n    initialize_callback_ = nullptr;\n  }\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/cds_api_impl.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.validate.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/subscription_base.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * CDS API implementation that fetches via Subscription.\n */\nclass CdsApiImpl : public CdsApi,\n                   Envoy::Config::SubscriptionBase<envoy::config::cluster::v3::Cluster>,\n                   Logger::Loggable<Logger::Id::upstream> {\npublic:\n  static CdsApiPtr create(const envoy::config::core::v3::ConfigSource& cds_config,\n                          ClusterManager& cm, Stats::Scope& scope,\n                          ProtobufMessage::ValidationVisitor& validation_visitor);\n\n  // Upstream::CdsApi\n  void initialize() override { subscription_->start({}); }\n  void setInitializedCb(std::function<void()> callback) override {\n    initialize_callback_ = callback;\n  }\n  const std::string versionInfo() const override { return system_version_info_; }\n\nprivate:\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException* e) override;\n  CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm,\n             Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor);\n  void runInitializeCallbackIfAny();\n\n  ClusterManager& cm_;\n  Config::SubscriptionPtr subscription_;\n  std::string system_version_info_;\n  std::function<void()> initialize_callback_;\n  Stats::ScopePtr scope_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/cluster_factory_impl.cc",
    "content": "#include \"common/upstream/cluster_factory_impl.h\"\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/http/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/upstream/health_checker_impl.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nnamespace {\n\nStats::ScopePtr generateStatsScope(const envoy::config::cluster::v3::Cluster& config,\n                                   Stats::Store& stats) {\n  return stats.createScope(fmt::format(\n      \"cluster.{}.\", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name()));\n}\n\n} // namespace\n\nstd::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr> ClusterFactoryImplBase::create(\n    const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager,\n    Stats::Store& stats, ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver,\n    Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime,\n    Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager,\n    const LocalInfo::LocalInfo& local_info, Server::Admin& admin,\n    Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger,\n    bool added_via_api, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) {\n  std::string cluster_type;\n\n  if (!cluster.has_cluster_type()) {\n    switch (cluster.type()) {\n    case envoy::config::cluster::v3::Cluster::STATIC:\n      cluster_type = Extensions::Clusters::ClusterTypes::get().Static;\n      break;\n    case envoy::config::cluster::v3::Cluster::STRICT_DNS:\n      cluster_type = Extensions::Clusters::ClusterTypes::get().StrictDns;\n      break;\n    case envoy::config::cluster::v3::Cluster::LOGICAL_DNS:\n      cluster_type = Extensions::Clusters::ClusterTypes::get().LogicalDns;\n      break;\n    case envoy::config::cluster::v3::Cluster::ORIGINAL_DST:\n      cluster_type = Extensions::Clusters::ClusterTypes::get().OriginalDst;\n      break;\n    case envoy::config::cluster::v3::Cluster::EDS:\n      cluster_type = Extensions::Clusters::ClusterTypes::get().Eds;\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  } else {\n    cluster_type = cluster.cluster_type().name();\n  }\n\n  if (cluster.common_lb_config().has_consistent_hashing_lb_config() &&\n      cluster.common_lb_config().consistent_hashing_lb_config().use_hostname_for_hashing() &&\n      cluster.type() != envoy::config::cluster::v3::Cluster::STRICT_DNS) {\n    throw EnvoyException(fmt::format(\n        \"Cannot use hostname for consistent hashing loadbalancing for cluster of type: '{}'\",\n        cluster_type));\n  }\n  ClusterFactory* factory = Registry::FactoryRegistry<ClusterFactory>::getFactory(cluster_type);\n\n  if (factory == nullptr) {\n    throw EnvoyException(fmt::format(\n        \"Didn't find a registered cluster factory implementation for name: '{}'\", cluster_type));\n  }\n\n  ClusterFactoryContextImpl context(\n      cluster_manager, stats, tls, std::move(dns_resolver), ssl_context_manager, runtime,\n      dispatcher, log_manager, local_info, admin, singleton_manager,\n      std::move(outlier_event_logger), added_via_api, validation_visitor, api);\n  return factory->create(cluster, context);\n}\n\nNetwork::DnsResolverSharedPtr\nClusterFactoryImplBase::selectDnsResolver(const envoy::config::cluster::v3::Cluster& cluster,\n                                          ClusterFactoryContext& context) {\n  // We make this a shared pointer to deal with the distinct ownership\n  // scenarios that can exist: in one case, we pass in the \"default\"\n  // DNS resolver that is owned by the Server::Instance. In the case\n  // where 'dns_resolvers' is specified, we have per-cluster DNS\n  // resolvers that are created here but ownership resides with\n  // StrictDnsClusterImpl/LogicalDnsCluster.\n  if (!cluster.dns_resolvers().empty()) {\n    const auto& resolver_addrs = cluster.dns_resolvers();\n    std::vector<Network::Address::InstanceConstSharedPtr> resolvers;\n    resolvers.reserve(resolver_addrs.size());\n    for (const auto& resolver_addr : resolver_addrs) {\n      resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr));\n    }\n    const bool use_tcp_for_dns_lookups = cluster.use_tcp_for_dns_lookups();\n    return context.dispatcher().createDnsResolver(resolvers, use_tcp_for_dns_lookups);\n  }\n\n  return context.dnsResolver();\n}\n\nstd::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>\nClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluster,\n                               ClusterFactoryContext& context) {\n  auto stats_scope = generateStatsScope(cluster, context.stats());\n  Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(),\n      context.localInfo(), context.dispatcher(), context.stats(), context.singletonManager(),\n      context.tls(), context.messageValidationVisitor(), context.api());\n\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> new_cluster_pair =\n      createClusterImpl(cluster, context, factory_context, std::move(stats_scope));\n\n  if (!cluster.health_checks().empty()) {\n    // TODO(htuch): Need to support multiple health checks in v2.\n    if (cluster.health_checks().size() != 1) {\n      throw EnvoyException(\"Multiple health checks not supported\");\n    } else {\n      new_cluster_pair.first->setHealthChecker(HealthCheckerFactory::create(\n          cluster.health_checks()[0], *new_cluster_pair.first, context.runtime(),\n          context.dispatcher(), context.logManager(), context.messageValidationVisitor(),\n          context.api()));\n    }\n  }\n\n  new_cluster_pair.first->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster(\n      *new_cluster_pair.first, cluster, context.dispatcher(), context.runtime(),\n      context.outlierEventLogger()));\n  return new_cluster_pair;\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/cluster_factory_impl.h",
    "content": "#pragma once\n\n#include <array>\n#include <atomic>\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_factory.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/health_checker.h\"\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/locality.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/callback_impl.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/outlier_detection_impl.h\"\n#include \"common/upstream/resource_manager_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass ClusterFactoryContextImpl : public ClusterFactoryContext {\n\npublic:\n  ClusterFactoryContextImpl(ClusterManager& cluster_manager, Stats::Store& stats,\n                            ThreadLocal::SlotAllocator& tls,\n                            Network::DnsResolverSharedPtr dns_resolver,\n                            Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime,\n                            Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager,\n                            const LocalInfo::LocalInfo& local_info, Server::Admin& admin,\n                            Singleton::Manager& singleton_manager,\n                            Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api,\n                            ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api)\n      : cluster_manager_(cluster_manager), stats_(stats), tls_(tls),\n        dns_resolver_(std::move(dns_resolver)), ssl_context_manager_(ssl_context_manager),\n        runtime_(runtime), dispatcher_(dispatcher), log_manager_(log_manager),\n        local_info_(local_info), admin_(admin), singleton_manager_(singleton_manager),\n        outlier_event_logger_(std::move(outlier_event_logger)), added_via_api_(added_via_api),\n        validation_visitor_(validation_visitor), api_(api) {}\n\n  ClusterManager& clusterManager() override { return cluster_manager_; }\n  Stats::Store& stats() override { return stats_; }\n  ThreadLocal::SlotAllocator& tls() override { return tls_; }\n  Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; }\n  Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; }\n  Runtime::Loader& runtime() override { return runtime_; }\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n  AccessLog::AccessLogManager& logManager() override { return log_manager_; }\n  const LocalInfo::LocalInfo& localInfo() override { return local_info_; }\n  Server::Admin& admin() override { return admin_; }\n  Singleton::Manager& singletonManager() override { return singleton_manager_; }\n  Outlier::EventLoggerSharedPtr outlierEventLogger() override { return outlier_event_logger_; }\n  bool addedViaApi() override { return added_via_api_; }\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override {\n    return validation_visitor_;\n  }\n  Api::Api& api() override { return api_; }\n\nprivate:\n  ClusterManager& cluster_manager_;\n  Stats::Store& stats_;\n  ThreadLocal::SlotAllocator& tls_;\n  Network::DnsResolverSharedPtr dns_resolver_;\n  Ssl::ContextManager& ssl_context_manager_;\n  Runtime::Loader& runtime_;\n  Event::Dispatcher& dispatcher_;\n  AccessLog::AccessLogManager& log_manager_;\n  const LocalInfo::LocalInfo& local_info_;\n  Server::Admin& admin_;\n  Singleton::Manager& singleton_manager_;\n  Outlier::EventLoggerSharedPtr outlier_event_logger_;\n  const bool added_via_api_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  Api::Api& api_;\n};\n\n/**\n * Base class for all cluster factory implementation. This class can be directly extended if the\n * custom cluster does not have any custom configuration. For custom cluster with custom\n * configuration, use ConfigurableClusterFactoryBase instead.\n */\nclass ClusterFactoryImplBase : public ClusterFactory {\npublic:\n  /**\n   * Static method to get the registered cluster factory and create an instance of cluster.\n   */\n  static std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>\n  create(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager,\n         Stats::Store& stats, ThreadLocal::Instance& tls,\n         Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager,\n         Runtime::Loader& runtime, Event::Dispatcher& dispatcher,\n         AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info,\n         Server::Admin& admin, Singleton::Manager& singleton_manager,\n         Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api,\n         ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api);\n\n  /**\n   * Create a dns resolver to be used by the cluster.\n   */\n  Network::DnsResolverSharedPtr\n  selectDnsResolver(const envoy::config::cluster::v3::Cluster& cluster,\n                    ClusterFactoryContext& context);\n\n  // Upstream::ClusterFactory\n  std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>\n  create(const envoy::config::cluster::v3::Cluster& cluster,\n         ClusterFactoryContext& context) override;\n  std::string name() const override { return name_; }\n\nprotected:\n  ClusterFactoryImplBase(const std::string& name) : name_(name) {}\n\nprivate:\n  /**\n   * Create an instance of ClusterImplBase.\n   */\n  virtual std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) PURE;\n  const std::string name_;\n};\n\n/**\n * Common base class for custom cluster factory with custom configuration.\n * @param ConfigProto is the configuration protobuf.\n */\ntemplate <class ConfigProto> class ConfigurableClusterFactoryBase : public ClusterFactoryImplBase {\npublic:\n  /**\n   * @return ProtobufTypes::MessagePtr create empty config proto message.\n   */\n  virtual ProtobufTypes::MessagePtr createEmptyConfigProto() {\n    return std::make_unique<ConfigProto>();\n  }\n\nprotected:\n  ConfigurableClusterFactoryBase(const std::string& name) : ClusterFactoryImplBase(name) {}\n\nprivate:\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override {\n    ProtobufTypes::MessagePtr config = createEmptyConfigProto();\n    Config::Utility::translateOpaqueConfig(\n        cluster.cluster_type().typed_config(), ProtobufWkt::Struct::default_instance(),\n        socket_factory_context.messageValidationVisitor(), *config);\n    return createClusterWithConfig(cluster,\n                                   MessageUtil::downcastAndValidate<const ConfigProto&>(\n                                       *config, context.messageValidationVisitor()),\n                                   context, socket_factory_context, std::move(stats_scope));\n  }\n\n  virtual std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterWithConfig(\n      const envoy::config::cluster::v3::Cluster& cluster, const ConfigProto& proto_config,\n      ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) PURE;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/cluster_manager_impl.cc",
    "content": "#include \"common/upstream/cluster_manager_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/new_grpc_mux_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/grpc/async_client_manager_impl.h\"\n#include \"common/http/async_client_impl.h\"\n#include \"common/http/http1/conn_pool.h\"\n#include \"common/http/http2/conn_pool.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/shadow_writer_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/tcp/conn_pool.h\"\n#include \"common/tcp/original_conn_pool.h\"\n#include \"common/upstream/cds_api_impl.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/maglev_lb.h\"\n#include \"common/upstream/original_dst_cluster.h\"\n#include \"common/upstream/priority_conn_pool_map_impl.h\"\n#include \"common/upstream/ring_hash_lb.h\"\n#include \"common/upstream/subset_lb.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nvoid addOptionsIfNotNull(Network::Socket::OptionsSharedPtr& options,\n                         const Network::Socket::OptionsSharedPtr& to_add) {\n  if (to_add != nullptr) {\n    Network::Socket::appendOptions(options, to_add);\n  }\n}\n\n} // namespace\n\nvoid ClusterManagerInitHelper::addCluster(Cluster& cluster) {\n  // See comments in ClusterManagerImpl::addOrUpdateCluster() for why this is only called during\n  // server initialization.\n  ASSERT(state_ != State::AllClustersInitialized);\n\n  const auto initialize_cb = [&cluster, this] { onClusterInit(cluster); };\n  if (cluster.initializePhase() == Cluster::InitializePhase::Primary) {\n    primary_init_clusters_.push_back(&cluster);\n    cluster.initialize(initialize_cb);\n  } else {\n    ASSERT(cluster.initializePhase() == Cluster::InitializePhase::Secondary);\n    secondary_init_clusters_.push_back(&cluster);\n    if (started_secondary_initialize_) {\n      // This can happen if we get a second CDS update that adds new clusters after we have\n      // already started secondary init. In this case, just immediately initialize.\n      cluster.initialize(initialize_cb);\n    }\n  }\n\n  ENVOY_LOG(debug, \"cm init: adding: cluster={} primary={} secondary={}\", cluster.info()->name(),\n            primary_init_clusters_.size(), secondary_init_clusters_.size());\n}\n\nvoid ClusterManagerInitHelper::onClusterInit(Cluster& cluster) {\n  ASSERT(state_ != State::AllClustersInitialized);\n  per_cluster_init_callback_(cluster);\n  removeCluster(cluster);\n}\n\nvoid ClusterManagerInitHelper::removeCluster(Cluster& cluster) {\n  if (state_ == State::AllClustersInitialized) {\n    return;\n  }\n\n  // There is a remote edge case where we can remove a cluster via CDS that has not yet been\n  // initialized. When called via the remove cluster API this code catches that case.\n  std::list<Cluster*>* cluster_list;\n  if (cluster.initializePhase() == Cluster::InitializePhase::Primary) {\n    cluster_list = &primary_init_clusters_;\n  } else {\n    ASSERT(cluster.initializePhase() == Cluster::InitializePhase::Secondary);\n    cluster_list = &secondary_init_clusters_;\n  }\n\n  // It is possible that the cluster we are removing has already been initialized, and is not\n  // present in the initializer list. If so, this is fine.\n  cluster_list->remove(&cluster);\n  ENVOY_LOG(debug, \"cm init: init complete: cluster={} primary={} secondary={}\",\n            cluster.info()->name(), primary_init_clusters_.size(), secondary_init_clusters_.size());\n  maybeFinishInitialize();\n}\n\nvoid ClusterManagerInitHelper::initializeSecondaryClusters() {\n  started_secondary_initialize_ = true;\n  // Cluster::initialize() method can modify the list of secondary_init_clusters_ to remove\n  // the item currently being initialized, so we eschew range-based-for and do this complicated\n  // dance to increment the iterator before calling initialize.\n  for (auto iter = secondary_init_clusters_.begin(); iter != secondary_init_clusters_.end();) {\n    Cluster* cluster = *iter;\n    ++iter;\n    ENVOY_LOG(debug, \"initializing secondary cluster {}\", cluster->info()->name());\n    cluster->initialize([cluster, this] { onClusterInit(*cluster); });\n  }\n}\n\nvoid ClusterManagerInitHelper::maybeFinishInitialize() {\n  // Do not do anything if we are still doing the initial static load or if we are waiting for\n  // CDS initialize.\n  ENVOY_LOG(debug, \"maybe finish initialize state: {}\", enumToInt(state_));\n  if (state_ == State::Loading || state_ == State::WaitingToStartCdsInitialization) {\n    return;\n  }\n\n  ASSERT(state_ == State::WaitingToStartSecondaryInitialization ||\n         state_ == State::CdsInitialized ||\n         state_ == State::WaitingForPrimaryInitializationToComplete);\n  ENVOY_LOG(debug, \"maybe finish initialize primary init clusters empty: {}\",\n            primary_init_clusters_.empty());\n  // If we are still waiting for primary clusters to initialize, do nothing.\n  if (!primary_init_clusters_.empty()) {\n    return;\n  } else if (state_ == State::WaitingForPrimaryInitializationToComplete) {\n    state_ = State::WaitingToStartSecondaryInitialization;\n    if (primary_clusters_initialized_callback_) {\n      primary_clusters_initialized_callback_();\n    }\n    return;\n  }\n\n  // If we are still waiting for secondary clusters to initialize, see if we need to first call\n  // initialize on them. This is only done once.\n  ENVOY_LOG(debug, \"maybe finish initialize secondary init clusters empty: {}\",\n            secondary_init_clusters_.empty());\n  if (!secondary_init_clusters_.empty()) {\n    if (!started_secondary_initialize_) {\n      ENVOY_LOG(info, \"cm init: initializing secondary clusters\");\n      // If the first CDS response doesn't have any primary cluster, ClusterLoadAssignment\n      // should be already paused by CdsApiImpl::onConfigUpdate(). Need to check that to\n      // avoid double pause ClusterLoadAssignment.\n      Config::ScopedResume maybe_resume_eds;\n      if (cm_.adsMux()) {\n        const auto type_urls =\n            Config::getAllVersionTypeUrls<envoy::config::endpoint::v3::ClusterLoadAssignment>();\n        maybe_resume_eds = cm_.adsMux()->pause(type_urls);\n      }\n      initializeSecondaryClusters();\n    }\n    return;\n  }\n\n  // At this point, if we are doing static init, and we have CDS, start CDS init. Otherwise, move\n  // directly to initialized.\n  started_secondary_initialize_ = false;\n  ENVOY_LOG(debug, \"maybe finish initialize cds api ready: {}\", cds_ != nullptr);\n  if (state_ == State::WaitingToStartSecondaryInitialization && cds_) {\n    ENVOY_LOG(info, \"cm init: initializing cds\");\n    state_ = State::WaitingToStartCdsInitialization;\n    cds_->initialize();\n  } else {\n    ENVOY_LOG(info, \"cm init: all clusters initialized\");\n    state_ = State::AllClustersInitialized;\n    if (initialized_callback_) {\n      initialized_callback_();\n    }\n  }\n}\n\nvoid ClusterManagerInitHelper::onStaticLoadComplete() {\n  ASSERT(state_ == State::Loading);\n  // After initialization of primary clusters has completed, transition to\n  // waiting for signal to initialize secondary clusters and then CDS.\n  state_ = State::WaitingForPrimaryInitializationToComplete;\n  maybeFinishInitialize();\n}\n\nvoid ClusterManagerInitHelper::startInitializingSecondaryClusters() {\n  ASSERT(state_ == State::WaitingToStartSecondaryInitialization);\n  ENVOY_LOG(debug, \"continue initializing secondary clusters\");\n  maybeFinishInitialize();\n}\n\nvoid ClusterManagerInitHelper::setCds(CdsApi* cds) {\n  ASSERT(state_ == State::Loading);\n  cds_ = cds;\n  if (cds_) {\n    cds_->setInitializedCb([this]() -> void {\n      ASSERT(state_ == State::WaitingToStartCdsInitialization);\n      state_ = State::CdsInitialized;\n      maybeFinishInitialize();\n    });\n  }\n}\n\nvoid ClusterManagerInitHelper::setInitializedCb(\n    ClusterManager::InitializationCompleteCallback callback) {\n  if (state_ == State::AllClustersInitialized) {\n    callback();\n  } else {\n    initialized_callback_ = callback;\n  }\n}\n\nvoid ClusterManagerInitHelper::setPrimaryClustersInitializedCb(\n    ClusterManager::PrimaryClustersReadyCallback callback) {\n  // The callback must be set before or at the `WaitingToStartSecondaryInitialization` state.\n  ASSERT(state_ == State::WaitingToStartSecondaryInitialization ||\n         state_ == State::WaitingForPrimaryInitializationToComplete || state_ == State::Loading);\n  if (state_ == State::WaitingToStartSecondaryInitialization) {\n    // This is the case where all clusters are STATIC and without health checking.\n    callback();\n  } else {\n    primary_clusters_initialized_callback_ = callback;\n  }\n}\n\nClusterManagerImpl::ClusterManagerImpl(\n    const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory,\n    Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime,\n    const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager,\n    Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin,\n    ProtobufMessage::ValidationContext& validation_context, Api::Api& api,\n    Http::Context& http_context, Grpc::Context& grpc_context)\n    : factory_(factory), runtime_(runtime), stats_(stats), tls_(tls.allocateSlot()),\n      random_(api.randomGenerator()),\n      bind_config_(bootstrap.cluster_manager().upstream_bind_config()), local_info_(local_info),\n      cm_stats_(generateStats(stats)),\n      init_helper_(*this, [this](Cluster& cluster) { onClusterInit(cluster); }),\n      config_tracker_entry_(\n          admin.getConfigTracker().add(\"clusters\", [this] { return dumpClusterConfigs(); })),\n      time_source_(main_thread_dispatcher.timeSource()), dispatcher_(main_thread_dispatcher),\n      http_context_(http_context),\n      subscription_factory_(local_info, main_thread_dispatcher, *this,\n                            validation_context.dynamicValidationVisitor(), api, runtime_) {\n  async_client_manager_ = std::make_unique<Grpc::AsyncClientManagerImpl>(\n      *this, tls, time_source_, api, grpc_context.statNames());\n  const auto& cm_config = bootstrap.cluster_manager();\n  if (cm_config.has_outlier_detection()) {\n    const std::string event_log_file_path = cm_config.outlier_detection().event_log_path();\n    if (!event_log_file_path.empty()) {\n      outlier_event_logger_ = std::make_shared<Outlier::EventLoggerImpl>(\n          log_manager, event_log_file_path, time_source_);\n    }\n  }\n\n  // We need to know whether we're zone aware early on, so make sure we do this lookup\n  // before we load any clusters.\n  if (!cm_config.local_cluster_name().empty()) {\n    local_cluster_name_ = cm_config.local_cluster_name();\n  }\n\n  const auto& dyn_resources = bootstrap.dynamic_resources();\n\n  // Cluster loading happens in two phases: first all the primary clusters are loaded, and then all\n  // the secondary clusters are loaded. As it currently stands all non-EDS clusters and EDS which\n  // load endpoint definition from file are primary and\n  // (REST,GRPC,DELTA_GRPC) EDS clusters are secondary. This two phase\n  // loading is done because in v2 configuration each EDS cluster individually sets up a\n  // subscription. When this subscription is an API source the cluster will depend on a non-EDS\n  // cluster, so the non-EDS clusters must be loaded first.\n  auto is_primary_cluster = [](const envoy::config::cluster::v3::Cluster& cluster) -> bool {\n    return cluster.type() != envoy::config::cluster::v3::Cluster::EDS ||\n           (cluster.type() == envoy::config::cluster::v3::Cluster::EDS &&\n            cluster.eds_cluster_config().eds_config().config_source_specifier_case() ==\n                envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath);\n  };\n  // Build book-keeping for which clusters are primary. This is useful when we\n  // invoke loadCluster() below and it needs the complete set of primaries.\n  for (const auto& cluster : bootstrap.static_resources().clusters()) {\n    if (is_primary_cluster(cluster)) {\n      primary_clusters_.insert(cluster.name());\n    }\n  }\n  // Load all the primary clusters.\n  for (const auto& cluster : bootstrap.static_resources().clusters()) {\n    if (is_primary_cluster(cluster)) {\n      loadCluster(cluster, \"\", false, active_clusters_);\n    }\n  }\n\n  // Now setup ADS if needed, this might rely on a primary cluster.\n  // This is the only point where distinction between delta ADS and state-of-the-world ADS is made.\n  // After here, we just have a GrpcMux interface held in ads_mux_, which hides\n  // whether the backing implementation is delta or SotW.\n  if (dyn_resources.has_ads_config()) {\n    if (dyn_resources.ads_config().api_type() ==\n        envoy::config::core::v3::ApiConfigSource::DELTA_GRPC) {\n      ads_mux_ = std::make_shared<Config::NewGrpcMuxImpl>(\n          Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_,\n                                                         dyn_resources.ads_config(), stats, false)\n              ->create(),\n          main_thread_dispatcher,\n          *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n              dyn_resources.ads_config().transport_api_version() ==\n                      envoy::config::core::v3::ApiVersion::V3\n                  // TODO(htuch): consolidate with type_to_endpoint.cc, once we sort out the future\n                  // direction of that module re: https://github.com/envoyproxy/envoy/issues/10650.\n                  ? \"envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources\"\n                  : \"envoy.service.discovery.v2.AggregatedDiscoveryService.\"\n                    \"DeltaAggregatedResources\"),\n          dyn_resources.ads_config().transport_api_version(), random_, stats_,\n          Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), local_info);\n    } else {\n      ads_mux_ = std::make_shared<Config::GrpcMuxImpl>(\n          local_info,\n          Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_,\n                                                         dyn_resources.ads_config(), stats, false)\n              ->create(),\n          main_thread_dispatcher,\n          *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n              dyn_resources.ads_config().transport_api_version() ==\n                      envoy::config::core::v3::ApiVersion::V3\n                  // TODO(htuch): consolidate with type_to_endpoint.cc, once we sort out the future\n                  // direction of that module re: https://github.com/envoyproxy/envoy/issues/10650.\n                  ? \"envoy.service.discovery.v3.AggregatedDiscoveryService.\"\n                    \"StreamAggregatedResources\"\n                  : \"envoy.service.discovery.v2.AggregatedDiscoveryService.\"\n                    \"StreamAggregatedResources\"),\n          dyn_resources.ads_config().transport_api_version(), random_, stats_,\n          Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()),\n          bootstrap.dynamic_resources().ads_config().set_node_on_first_message_only());\n    }\n  } else {\n    ads_mux_ = std::make_unique<Config::NullGrpcMuxImpl>();\n  }\n\n  // After ADS is initialized, load EDS static clusters as EDS config may potentially need ADS.\n  for (const auto& cluster : bootstrap.static_resources().clusters()) {\n    // Now load all the secondary clusters.\n    if (cluster.type() == envoy::config::cluster::v3::Cluster::EDS &&\n        cluster.eds_cluster_config().eds_config().config_source_specifier_case() !=\n            envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath) {\n      loadCluster(cluster, \"\", false, active_clusters_);\n    }\n  }\n\n  cm_stats_.cluster_added_.add(bootstrap.static_resources().clusters().size());\n  updateClusterCounts();\n\n  if (local_cluster_name_ &&\n      (active_clusters_.find(local_cluster_name_.value()) == active_clusters_.end())) {\n    throw EnvoyException(\n        fmt::format(\"local cluster '{}' must be defined\", local_cluster_name_.value()));\n  }\n\n  // Once the initial set of static bootstrap clusters are created (including the local cluster),\n  // we can instantiate the thread local cluster manager.\n  tls_->set([this, local_cluster_name = local_cluster_name_](\n                Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<ThreadLocalClusterManagerImpl>(*this, dispatcher, local_cluster_name);\n  });\n\n  // We can now potentially create the CDS API once the backing cluster exists.\n  if (dyn_resources.has_cds_config()) {\n    cds_api_ = factory_.createCds(dyn_resources.cds_config(), *this);\n    init_helper_.setCds(cds_api_.get());\n  } else {\n    init_helper_.setCds(nullptr);\n  }\n\n  // Proceed to add all static bootstrap clusters to the init manager. This will immediately\n  // initialize any primary clusters. Post-init processing further initializes any thread\n  // aware load balancer and sets up the per-worker host set updates.\n  for (auto& cluster : active_clusters_) {\n    init_helper_.addCluster(*cluster.second->cluster_);\n  }\n\n  // Potentially move to secondary initialization on the static bootstrap clusters if all primary\n  // clusters have already initialized. (E.g., if all static).\n  init_helper_.onStaticLoadComplete();\n\n  ads_mux_->start();\n}\n\nvoid ClusterManagerImpl::initializeSecondaryClusters(\n    const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n  init_helper_.startInitializingSecondaryClusters();\n\n  const auto& cm_config = bootstrap.cluster_manager();\n  if (cm_config.has_load_stats_config()) {\n    const auto& load_stats_config = cm_config.load_stats_config();\n\n    load_stats_reporter_ = std::make_unique<LoadStatsReporter>(\n        local_info_, *this, stats_,\n        Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, load_stats_config,\n                                                       stats_, false)\n            ->create(),\n        load_stats_config.transport_api_version(), dispatcher_);\n  }\n}\n\nClusterManagerStats ClusterManagerImpl::generateStats(Stats::Scope& scope) {\n  const std::string final_prefix = \"cluster_manager.\";\n  return {ALL_CLUSTER_MANAGER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                    POOL_GAUGE_PREFIX(scope, final_prefix))};\n}\n\nvoid ClusterManagerImpl::onClusterInit(Cluster& cluster) {\n  // This routine is called when a cluster has finished initializing. The cluster has not yet\n  // been setup for cross-thread updates to avoid needless updates during initialization. The order\n  // of operations here is important. We start by initializing the thread aware load balancer if\n  // needed. This must happen first so cluster updates are heard first by the load balancer.\n  auto cluster_data = active_clusters_.find(cluster.info()->name());\n  if (cluster_data->second->thread_aware_lb_ != nullptr) {\n    cluster_data->second->thread_aware_lb_->initialize();\n  }\n\n  // Now setup for cross-thread updates.\n  cluster.prioritySet().addMemberUpdateCb(\n      [&cluster, this](const HostVector&, const HostVector& hosts_removed) -> void {\n        if (cluster.info()->lbConfig().close_connections_on_host_set_change()) {\n          for (const auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n            // This will drain all tcp and http connection pools.\n            postThreadLocalDrainConnections(cluster, host_set->hosts());\n          }\n        } else {\n          // TODO(snowp): Should this be subject to merge windows?\n\n          // Whenever hosts are removed from the cluster, we make each TLS cluster drain it's\n          // connection pools for the removed hosts. If `close_connections_on_host_set_change` is\n          // enabled, this case will be covered by first `if` statement, where all\n          // connection pools are drained.\n          if (!hosts_removed.empty()) {\n            postThreadLocalDrainConnections(cluster, hosts_removed);\n          }\n        }\n      });\n\n  cluster.prioritySet().addPriorityUpdateCb([&cluster, this](uint32_t priority,\n                                                             const HostVector& hosts_added,\n                                                             const HostVector& hosts_removed) {\n    // This fires when a cluster is about to have an updated member set. We need to send this\n    // out to all of the thread local configurations.\n\n    // Should we save this update and merge it with other updates?\n    //\n    // Note that we can only _safely_ merge updates that have no added/removed hosts. That is,\n    // only those updates that signal a change in host healthcheck state, weight or metadata.\n    //\n    // We've discussed merging updates related to hosts being added/removed, but it's really\n    // tricky to merge those given that downstream consumers of these updates expect to see the\n    // full list of updates, not a condensed one. This is because they use the broadcasted\n    // HostSharedPtrs within internal maps to track hosts. If we fail to broadcast the entire list\n    // of removals, these maps will leak those HostSharedPtrs.\n    //\n    // See https://github.com/envoyproxy/envoy/pull/3941 for more context.\n    bool scheduled = false;\n    const auto merge_timeout =\n        PROTOBUF_GET_MS_OR_DEFAULT(cluster.info()->lbConfig(), update_merge_window, 1000);\n    // Remember: we only merge updates with no adds/removes — just hc/weight/metadata changes.\n    const bool is_mergeable = hosts_added.empty() && hosts_removed.empty();\n\n    if (merge_timeout > 0) {\n      // If this is not mergeable, we should cancel any scheduled updates since\n      // we'll deliver it immediately.\n      scheduled = scheduleUpdate(cluster, priority, is_mergeable, merge_timeout);\n    }\n\n    // If an update was not scheduled for later, deliver it immediately.\n    if (!scheduled) {\n      cm_stats_.cluster_updated_.inc();\n      postThreadLocalClusterUpdate(cluster, priority, hosts_added, hosts_removed);\n    }\n  });\n\n  // Finally, if the cluster has any hosts, post updates cross-thread so the per-thread load\n  // balancers are ready.\n  for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n    if (host_set->hosts().empty()) {\n      continue;\n    }\n    postThreadLocalClusterUpdate(cluster, host_set->priority(), host_set->hosts(), HostVector{});\n  }\n}\n\nbool ClusterManagerImpl::scheduleUpdate(const Cluster& cluster, uint32_t priority, bool mergeable,\n                                        const uint64_t timeout) {\n  // Find pending updates for this cluster.\n  auto& updates_by_prio = updates_map_[cluster.info()->name()];\n  if (!updates_by_prio) {\n    updates_by_prio = std::make_unique<PendingUpdatesByPriorityMap>();\n  }\n\n  // Find pending updates for this priority.\n  auto& updates = (*updates_by_prio)[priority];\n  if (!updates) {\n    updates = std::make_unique<PendingUpdates>();\n  }\n\n  // Has an update_merge_window gone by since the last update? If so, don't schedule\n  // the update so it can be applied immediately. Ditto if this is not a mergeable update.\n  const auto delta = time_source_.monotonicTime() - updates->last_updated_;\n  const uint64_t delta_ms = std::chrono::duration_cast<std::chrono::milliseconds>(delta).count();\n  const bool out_of_merge_window = delta_ms > timeout;\n  if (out_of_merge_window || !mergeable) {\n    // If there was a pending update, we cancel the pending merged update.\n    //\n    // Note: it's possible that even though we are outside of a merge window (delta_ms > timeout),\n    // a timer is enabled. This race condition is fine, since we'll disable the timer here and\n    // deliver the update immediately.\n\n    // Why wasn't the update scheduled for later delivery? We keep some stats that are helpful\n    // to understand why merging did not happen. There's 2 things we are tracking here:\n\n    // 1) Was this update out of a merge window?\n    if (mergeable && out_of_merge_window) {\n      cm_stats_.update_out_of_merge_window_.inc();\n    }\n\n    // 2) Were there previous updates that we are cancelling (and delivering immediately)?\n    if (updates->disableTimer()) {\n      cm_stats_.update_merge_cancelled_.inc();\n    }\n\n    updates->last_updated_ = time_source_.monotonicTime();\n    return false;\n  }\n\n  // If there's no timer, create one.\n  if (updates->timer_ == nullptr) {\n    updates->timer_ = dispatcher_.createTimer([this, &cluster, priority, &updates]() -> void {\n      applyUpdates(cluster, priority, *updates);\n    });\n  }\n\n  // Ensure there's a timer set to deliver these updates.\n  if (!updates->timer_->enabled()) {\n    updates->enableTimer(timeout);\n  }\n\n  return true;\n}\n\nvoid ClusterManagerImpl::applyUpdates(const Cluster& cluster, uint32_t priority,\n                                      PendingUpdates& updates) {\n  // Deliver pending updates.\n\n  // Remember that these merged updates are _only_ for updates related to\n  // HC/weight/metadata changes. That's why added/removed are empty. All\n  // adds/removals were already immediately broadcasted.\n  static const HostVector hosts_added;\n  static const HostVector hosts_removed;\n\n  postThreadLocalClusterUpdate(cluster, priority, hosts_added, hosts_removed);\n\n  cm_stats_.cluster_updated_via_merge_.inc();\n  updates.last_updated_ = time_source_.monotonicTime();\n}\n\nbool ClusterManagerImpl::addOrUpdateCluster(const envoy::config::cluster::v3::Cluster& cluster,\n                                            const std::string& version_info) {\n  // First we need to see if this new config is new or an update to an existing dynamic cluster.\n  // We don't allow updates to statically configured clusters in the main configuration. We check\n  // both the warming clusters and the active clusters to see if we need an update or the update\n  // should be blocked.\n  const std::string& cluster_name = cluster.name();\n  const auto existing_active_cluster = active_clusters_.find(cluster_name);\n  const auto existing_warming_cluster = warming_clusters_.find(cluster_name);\n  const uint64_t new_hash = MessageUtil::hash(cluster);\n  if ((existing_active_cluster != active_clusters_.end() &&\n       existing_active_cluster->second->blockUpdate(new_hash)) ||\n      (existing_warming_cluster != warming_clusters_.end() &&\n       existing_warming_cluster->second->blockUpdate(new_hash))) {\n    return false;\n  }\n\n  if (existing_active_cluster != active_clusters_.end() ||\n      existing_warming_cluster != warming_clusters_.end()) {\n    if (existing_active_cluster != active_clusters_.end()) {\n      // The following init manager remove call is a NOP in the case we are already initialized.\n      // It's just kept here to avoid additional logic.\n      init_helper_.removeCluster(*existing_active_cluster->second->cluster_);\n    } else {\n      // Validate that warming clusters are not added to the init_helper_.\n      // NOTE: This loop is compiled out in optimized builds.\n      for (const std::list<Cluster*>& cluster_list :\n           {std::cref(init_helper_.primary_init_clusters_),\n            std::cref(init_helper_.secondary_init_clusters_)}) {\n        ASSERT(!std::any_of(cluster_list.begin(), cluster_list.end(),\n                            [&existing_warming_cluster](Cluster* cluster) {\n                              return existing_warming_cluster->second->cluster_.get() == cluster;\n                            }));\n      }\n    }\n    cm_stats_.cluster_modified_.inc();\n  } else {\n    cm_stats_.cluster_added_.inc();\n  }\n\n  // There are two discrete paths here depending on when we are adding/updating a cluster.\n  // 1) During initial server load we use the init manager which handles complex logic related to\n  //    primary/secondary init, static/CDS init, warming all clusters, etc.\n  // 2) After initial server load, we handle warming independently for each cluster in the warming\n  //    map.\n  // Note: It's likely possible that all warming logic could be centralized in the init manager, but\n  //       a decision was made to split the logic given how complex the init manager already is. In\n  //       the future we may decide to undergo a refactor to unify the logic but the effort/risk to\n  //       do that right now does not seem worth it given that the logic is generally pretty clean\n  //       and easy to understand.\n  const bool use_active_map =\n      init_helper_.state() != ClusterManagerInitHelper::State::AllClustersInitialized;\n  loadCluster(cluster, version_info, true, use_active_map ? active_clusters_ : warming_clusters_);\n\n  if (use_active_map) {\n    ENVOY_LOG(debug, \"add/update cluster {} during init\", cluster_name);\n    auto& cluster_entry = active_clusters_.at(cluster_name);\n    createOrUpdateThreadLocalCluster(*cluster_entry);\n    init_helper_.addCluster(*cluster_entry->cluster_);\n  } else {\n    auto& cluster_entry = warming_clusters_.at(cluster_name);\n    ENVOY_LOG(debug, \"add/update cluster {} starting warming\", cluster_name);\n    cluster_entry->cluster_->initialize([this, cluster_name] {\n      auto warming_it = warming_clusters_.find(cluster_name);\n      auto& cluster_entry = *warming_it->second;\n\n      // If the cluster is being updated, we need to cancel any pending merged updates.\n      // Otherwise, applyUpdates() will fire with a dangling cluster reference.\n      updates_map_.erase(cluster_name);\n\n      active_clusters_[cluster_name] = std::move(warming_it->second);\n      warming_clusters_.erase(warming_it);\n\n      ENVOY_LOG(debug, \"warming cluster {} complete\", cluster_name);\n      createOrUpdateThreadLocalCluster(cluster_entry);\n      onClusterInit(*cluster_entry.cluster_);\n      updateClusterCounts();\n    });\n  }\n\n  updateClusterCounts();\n  return true;\n}\n\nvoid ClusterManagerImpl::createOrUpdateThreadLocalCluster(ClusterData& cluster) {\n  tls_->runOnAllThreads([new_cluster = cluster.cluster_->info(),\n                         thread_aware_lb_factory = cluster.loadBalancerFactory()](\n                            ThreadLocal::ThreadLocalObjectSharedPtr object)\n                            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    ThreadLocalClusterManagerImpl& cluster_manager =\n        object->asType<ThreadLocalClusterManagerImpl>();\n\n    if (cluster_manager.thread_local_clusters_.count(new_cluster->name()) > 0) {\n      ENVOY_LOG(debug, \"updating TLS cluster {}\", new_cluster->name());\n    } else {\n      ENVOY_LOG(debug, \"adding TLS cluster {}\", new_cluster->name());\n    }\n\n    auto thread_local_cluster = new ThreadLocalClusterManagerImpl::ClusterEntry(\n        cluster_manager, new_cluster, thread_aware_lb_factory);\n    cluster_manager.thread_local_clusters_[new_cluster->name()].reset(thread_local_cluster);\n    for (auto& cb : cluster_manager.update_callbacks_) {\n      cb->onClusterAddOrUpdate(*thread_local_cluster);\n    }\n\n    return object;\n  });\n}\n\nbool ClusterManagerImpl::removeCluster(const std::string& cluster_name) {\n  bool removed = false;\n  auto existing_active_cluster = active_clusters_.find(cluster_name);\n  if (existing_active_cluster != active_clusters_.end() &&\n      existing_active_cluster->second->added_via_api_) {\n    removed = true;\n    init_helper_.removeCluster(*existing_active_cluster->second->cluster_);\n    active_clusters_.erase(existing_active_cluster);\n\n    ENVOY_LOG(info, \"removing cluster {}\", cluster_name);\n    tls_->runOnAllThreads([cluster_name](ThreadLocal::ThreadLocalObjectSharedPtr object)\n                              -> ThreadLocal::ThreadLocalObjectSharedPtr {\n      ThreadLocalClusterManagerImpl& cluster_manager =\n          object->asType<ThreadLocalClusterManagerImpl>();\n\n      ASSERT(cluster_manager.thread_local_clusters_.count(cluster_name) == 1);\n      ENVOY_LOG(debug, \"removing TLS cluster {}\", cluster_name);\n      for (auto& cb : cluster_manager.update_callbacks_) {\n        cb->onClusterRemoval(cluster_name);\n      }\n      cluster_manager.thread_local_clusters_.erase(cluster_name);\n      return object;\n    });\n  }\n\n  auto existing_warming_cluster = warming_clusters_.find(cluster_name);\n  if (existing_warming_cluster != warming_clusters_.end() &&\n      existing_warming_cluster->second->added_via_api_) {\n    removed = true;\n    warming_clusters_.erase(existing_warming_cluster);\n    ENVOY_LOG(info, \"removing warming cluster {}\", cluster_name);\n  }\n\n  if (removed) {\n    cm_stats_.cluster_removed_.inc();\n    updateClusterCounts();\n    // Cancel any pending merged updates.\n    updates_map_.erase(cluster_name);\n  }\n\n  return removed;\n}\n\nvoid ClusterManagerImpl::loadCluster(const envoy::config::cluster::v3::Cluster& cluster,\n                                     const std::string& version_info, bool added_via_api,\n                                     ClusterMap& cluster_map) {\n  std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr> new_cluster_pair =\n      factory_.clusterFromProto(cluster, *this, outlier_event_logger_, added_via_api);\n  auto& new_cluster = new_cluster_pair.first;\n  Cluster& cluster_reference = *new_cluster;\n\n  if (!added_via_api) {\n    if (cluster_map.find(new_cluster->info()->name()) != cluster_map.end()) {\n      throw EnvoyException(\n          fmt::format(\"cluster manager: duplicate cluster '{}'\", new_cluster->info()->name()));\n    }\n  }\n\n  if (cluster_reference.info()->lbType() == LoadBalancerType::ClusterProvided &&\n      new_cluster_pair.second == nullptr) {\n    throw EnvoyException(fmt::format(\"cluster manager: cluster provided LB specified but cluster \"\n                                     \"'{}' did not provide one. Check cluster documentation.\",\n                                     new_cluster->info()->name()));\n  }\n\n  if (cluster_reference.info()->lbType() != LoadBalancerType::ClusterProvided &&\n      new_cluster_pair.second != nullptr) {\n    throw EnvoyException(\n        fmt::format(\"cluster manager: cluster provided LB not specified but cluster \"\n                    \"'{}' provided one. Check cluster documentation.\",\n                    new_cluster->info()->name()));\n  }\n\n  if (new_cluster->healthChecker() != nullptr) {\n    new_cluster->healthChecker()->addHostCheckCompleteCb(\n        [this](HostSharedPtr host, HealthTransition changed_state) {\n          if (changed_state == HealthTransition::Changed &&\n              host->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) {\n            postThreadLocalHealthFailure(host);\n          }\n        });\n  }\n\n  if (new_cluster->outlierDetector() != nullptr) {\n    new_cluster->outlierDetector()->addChangedStateCb([this](HostSharedPtr host) {\n      if (host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) {\n        postThreadLocalHealthFailure(host);\n      }\n    });\n  }\n\n  cluster_map[cluster_reference.info()->name()] = std::make_unique<ClusterData>(\n      cluster, version_info, added_via_api, std::move(new_cluster), time_source_);\n  const auto cluster_entry_it = cluster_map.find(cluster_reference.info()->name());\n\n  // If an LB is thread aware, create it here. The LB is not initialized until cluster pre-init\n  // finishes. For RingHash/Maglev don't create the LB here if subset balancing is enabled,\n  // because the thread_aware_lb_ field takes precedence over the subset lb).\n  if (cluster_reference.info()->lbType() == LoadBalancerType::RingHash) {\n    if (!cluster_reference.info()->lbSubsetInfo().isEnabled()) {\n      cluster_entry_it->second->thread_aware_lb_ = std::make_unique<RingHashLoadBalancer>(\n          cluster_reference.prioritySet(), cluster_reference.info()->stats(),\n          cluster_reference.info()->statsScope(), runtime_, random_,\n          cluster_reference.info()->lbRingHashConfig(), cluster_reference.info()->lbConfig());\n    }\n  } else if (cluster_reference.info()->lbType() == LoadBalancerType::Maglev) {\n    if (!cluster_reference.info()->lbSubsetInfo().isEnabled()) {\n      cluster_entry_it->second->thread_aware_lb_ = std::make_unique<MaglevLoadBalancer>(\n          cluster_reference.prioritySet(), cluster_reference.info()->stats(),\n          cluster_reference.info()->statsScope(), runtime_, random_,\n          cluster_reference.info()->lbMaglevConfig(), cluster_reference.info()->lbConfig());\n    }\n  } else if (cluster_reference.info()->lbType() == LoadBalancerType::ClusterProvided) {\n    cluster_entry_it->second->thread_aware_lb_ = std::move(new_cluster_pair.second);\n  }\n\n  updateClusterCounts();\n}\n\nvoid ClusterManagerImpl::updateClusterCounts() {\n  // This if/else block implements a control flow mechanism that can be used by an ADS\n  // implementation to properly sequence CDS and RDS updates. It is not enforcing on ADS. ADS can\n  // use it to detect when a previously sent cluster becomes warm before sending routes that depend\n  // on it. This can improve incidence of HTTP 503 responses from Envoy when a route is used before\n  // it's supporting cluster is ready.\n  //\n  // We achieve that by leaving CDS in the paused state as long as there is at least\n  // one cluster in the warming state. This prevents CDS ACK from being sent to ADS.\n  // Once cluster is warmed up, CDS is resumed, and ACK is sent to ADS, providing a\n  // signal to ADS to proceed with RDS updates.\n  // If we're in the middle of shutting down (ads_mux_ already gone) then this is irrelevant.\n  if (ads_mux_) {\n    const auto type_urls = Config::getAllVersionTypeUrls<envoy::config::cluster::v3::Cluster>();\n    const uint64_t previous_warming = cm_stats_.warming_clusters_.value();\n    if (previous_warming == 0 && !warming_clusters_.empty()) {\n      resume_cds_ = ads_mux_->pause(type_urls);\n    } else if (previous_warming > 0 && warming_clusters_.empty()) {\n      ASSERT(resume_cds_ != nullptr);\n      resume_cds_.reset();\n    }\n  }\n  cm_stats_.active_clusters_.set(active_clusters_.size());\n  cm_stats_.warming_clusters_.set(warming_clusters_.size());\n}\n\nThreadLocalCluster* ClusterManagerImpl::get(absl::string_view cluster) {\n  auto& cluster_manager = tls_->getTyped<ThreadLocalClusterManagerImpl>();\n\n  auto entry = cluster_manager.thread_local_clusters_.find(cluster);\n  if (entry != cluster_manager.thread_local_clusters_.end()) {\n    return entry->second.get();\n  } else {\n    return nullptr;\n  }\n}\n\nvoid ClusterManagerImpl::maybePrefetch(\n    ThreadLocalClusterManagerImpl::ClusterEntryPtr& cluster_entry,\n    std::function<ConnectionPool::Instance*()> pick_prefetch_pool) {\n  // TODO(alyssawilk) As currently implemented, this will always just prefetch\n  // one connection ahead of actually needed connections.\n  //\n  // Instead we want to track the following metrics across the entire connection\n  // pool and use the same algorithm we do for per-upstream prefetch:\n  // ((pending_streams_ + num_active_streams_) * global_prefetch_ratio >\n  //  (connecting_stream_capacity_ + num_active_streams_)))\n  //  and allow multiple prefetches per pick.\n  //  Also cap prefetches such that\n  //  num_unused_prefetch < num hosts\n  //  since if we have more prefetches than hosts, we should consider kicking into\n  //  per-upstream prefetch.\n  //\n  //  Once we do this, this should loop capped number of times while shouldPrefetch is true.\n  if (cluster_entry->cluster_info_->peekaheadRatio() > 1.0) {\n    ConnectionPool::Instance* prefetch_pool = pick_prefetch_pool();\n    if (prefetch_pool) {\n      prefetch_pool->maybePrefetch(cluster_entry->cluster_info_->peekaheadRatio());\n    }\n  }\n}\n\nHttp::ConnectionPool::Instance*\nClusterManagerImpl::httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority,\n                                           absl::optional<Http::Protocol> protocol,\n                                           LoadBalancerContext* context) {\n  ThreadLocalClusterManagerImpl& cluster_manager = tls_->getTyped<ThreadLocalClusterManagerImpl>();\n\n  auto entry = cluster_manager.thread_local_clusters_.find(cluster);\n  if (entry == cluster_manager.thread_local_clusters_.end()) {\n    return nullptr;\n  }\n\n  // Select a host and create a connection pool for it if it does not already exist.\n  auto ret = entry->second->connPool(priority, protocol, context, false);\n\n  // Now see if another host should be prefetched.\n  // httpConnPoolForCluster is called immediately before a call for newStream. newStream doesn't\n  // have the load balancer context needed to make selection decisions so prefetching must be\n  // performed here in anticipation of the new stream.\n  // TODO(alyssawilk) refactor to have one function call and return a pair, so this invariant is\n  // code-enforced.\n  maybePrefetch(entry->second, [&entry, &priority, &protocol, &context]() {\n    return entry->second->connPool(priority, protocol, context, true);\n  });\n\n  return ret;\n}\n\nTcp::ConnectionPool::Instance*\nClusterManagerImpl::tcpConnPoolForCluster(const std::string& cluster, ResourcePriority priority,\n                                          LoadBalancerContext* context) {\n  ThreadLocalClusterManagerImpl& cluster_manager = tls_->getTyped<ThreadLocalClusterManagerImpl>();\n\n  auto entry = cluster_manager.thread_local_clusters_.find(cluster);\n  if (entry == cluster_manager.thread_local_clusters_.end()) {\n    return nullptr;\n  }\n\n  // Select a host and create a connection pool for it if it does not already exist.\n  auto ret = entry->second->tcpConnPool(priority, context, false);\n\n  // tcpConnPoolForCluster is called immediately before a call for newConnection. newConnection\n  // doesn't have the load balancer context needed to make selection decisions so prefetching must\n  // be performed here in anticipation of the new connection.\n  // TODO(alyssawilk) refactor to have one function call and return a pair, so this invariant is\n  // code-enforced.\n  // Now see if another host should be prefetched.\n  maybePrefetch(entry->second, [&entry, &priority, &context]() {\n    return entry->second->tcpConnPool(priority, context, true);\n  });\n\n  return ret;\n}\n\nvoid ClusterManagerImpl::postThreadLocalDrainConnections(const Cluster& cluster,\n                                                         const HostVector& hosts_removed) {\n  tls_->runOnAllThreads(\n      [name = cluster.info()->name(), hosts_removed](ThreadLocal::ThreadLocalObjectSharedPtr object)\n          -> ThreadLocal::ThreadLocalObjectSharedPtr {\n        object->asType<ThreadLocalClusterManagerImpl>().removeHosts(name, hosts_removed);\n        return object;\n      });\n}\n\nvoid ClusterManagerImpl::postThreadLocalClusterUpdate(const Cluster& cluster, uint32_t priority,\n                                                      const HostVector& hosts_added,\n                                                      const HostVector& hosts_removed) {\n  const auto& host_set = cluster.prioritySet().hostSetsPerPriority()[priority];\n\n  tls_->runOnAllThreads([name = cluster.info()->name(), priority,\n                         update_params = HostSetImpl::updateHostsParams(*host_set),\n                         locality_weights = host_set->localityWeights(), hosts_added, hosts_removed,\n                         overprovisioning_factor = host_set->overprovisioningFactor()](\n                            ThreadLocal::ThreadLocalObjectSharedPtr object)\n                            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    object->asType<ThreadLocalClusterManagerImpl>().updateClusterMembership(\n        name, priority, update_params, locality_weights, hosts_added, hosts_removed,\n        overprovisioning_factor);\n    return object;\n  });\n}\n\nvoid ClusterManagerImpl::postThreadLocalHealthFailure(const HostSharedPtr& host) {\n  tls_->runOnAllThreads([host](ThreadLocal::ThreadLocalObjectSharedPtr object)\n                            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    object->asType<ThreadLocalClusterManagerImpl>().onHostHealthFailure(host);\n    return object;\n  });\n}\n\nHost::CreateConnectionData ClusterManagerImpl::tcpConnForCluster(const std::string& cluster,\n                                                                 LoadBalancerContext* context) {\n  ThreadLocalClusterManagerImpl& cluster_manager = tls_->getTyped<ThreadLocalClusterManagerImpl>();\n\n  auto entry = cluster_manager.thread_local_clusters_.find(cluster);\n  if (entry == cluster_manager.thread_local_clusters_.end()) {\n    throw EnvoyException(fmt::format(\"unknown cluster '{}'\", cluster));\n  }\n\n  HostConstSharedPtr logical_host = entry->second->lb_->chooseHost(context);\n  if (logical_host) {\n    auto conn_info = logical_host->createConnection(\n        cluster_manager.thread_local_dispatcher_, nullptr,\n        context == nullptr ? nullptr : context->upstreamTransportSocketOptions());\n    if ((entry->second->cluster_info_->features() &\n         ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) &&\n        conn_info.connection_ != nullptr) {\n      auto& conn_map = cluster_manager.host_tcp_conn_map_[logical_host];\n      conn_map.emplace(conn_info.connection_.get(),\n                       std::make_unique<ThreadLocalClusterManagerImpl::TcpConnContainer>(\n                           cluster_manager, logical_host, *conn_info.connection_));\n    }\n    return conn_info;\n  } else {\n    entry->second->cluster_info_->stats().upstream_cx_none_healthy_.inc();\n    return {nullptr, nullptr};\n  }\n}\n\nHttp::AsyncClient& ClusterManagerImpl::httpAsyncClientForCluster(const std::string& cluster) {\n  ThreadLocalClusterManagerImpl& cluster_manager = tls_->getTyped<ThreadLocalClusterManagerImpl>();\n  auto entry = cluster_manager.thread_local_clusters_.find(cluster);\n  if (entry != cluster_manager.thread_local_clusters_.end()) {\n    return entry->second->http_async_client_;\n  } else {\n    throw EnvoyException(fmt::format(\"unknown cluster '{}'\", cluster));\n  }\n}\n\nClusterUpdateCallbacksHandlePtr\nClusterManagerImpl::addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& cb) {\n  ThreadLocalClusterManagerImpl& cluster_manager = tls_->getTyped<ThreadLocalClusterManagerImpl>();\n  return std::make_unique<ClusterUpdateCallbacksHandleImpl>(cb, cluster_manager.update_callbacks_);\n}\n\nProtobufTypes::MessagePtr ClusterManagerImpl::dumpClusterConfigs() {\n  auto config_dump = std::make_unique<envoy::admin::v3::ClustersConfigDump>();\n  config_dump->set_version_info(cds_api_ != nullptr ? cds_api_->versionInfo() : \"\");\n  for (const auto& active_cluster_pair : active_clusters_) {\n    const auto& cluster = *active_cluster_pair.second;\n    if (!cluster.added_via_api_) {\n      auto& static_cluster = *config_dump->mutable_static_clusters()->Add();\n      static_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_));\n      TimestampUtil::systemClockToTimestamp(cluster.last_updated_,\n                                            *(static_cluster.mutable_last_updated()));\n    } else {\n      auto& dynamic_cluster = *config_dump->mutable_dynamic_active_clusters()->Add();\n      dynamic_cluster.set_version_info(cluster.version_info_);\n      dynamic_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_));\n      TimestampUtil::systemClockToTimestamp(cluster.last_updated_,\n                                            *(dynamic_cluster.mutable_last_updated()));\n    }\n  }\n\n  for (const auto& warming_cluster_pair : warming_clusters_) {\n    const auto& cluster = *warming_cluster_pair.second;\n    auto& dynamic_cluster = *config_dump->mutable_dynamic_warming_clusters()->Add();\n    dynamic_cluster.set_version_info(cluster.version_info_);\n    dynamic_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_));\n    TimestampUtil::systemClockToTimestamp(cluster.last_updated_,\n                                          *(dynamic_cluster.mutable_last_updated()));\n  }\n\n  return config_dump;\n}\n\nClusterManagerImpl::ThreadLocalClusterManagerImpl::ThreadLocalClusterManagerImpl(\n    ClusterManagerImpl& parent, Event::Dispatcher& dispatcher,\n    const absl::optional<std::string>& local_cluster_name)\n    : parent_(parent), thread_local_dispatcher_(dispatcher) {\n  // If local cluster is defined then we need to initialize it first.\n  if (local_cluster_name) {\n    ENVOY_LOG(debug, \"adding TLS local cluster {}\", local_cluster_name.value());\n    auto& local_cluster = parent.active_clusters_.at(local_cluster_name.value());\n    thread_local_clusters_[local_cluster_name.value()] = std::make_unique<ClusterEntry>(\n        *this, local_cluster->cluster_->info(), local_cluster->loadBalancerFactory());\n  }\n\n  local_priority_set_ = local_cluster_name\n                            ? &thread_local_clusters_[local_cluster_name.value()]->priority_set_\n                            : nullptr;\n\n  for (auto& cluster : parent.active_clusters_) {\n    // If local cluster name is set then we already initialized this cluster.\n    if (local_cluster_name && local_cluster_name.value() == cluster.first) {\n      continue;\n    }\n\n    ENVOY_LOG(debug, \"adding TLS initial cluster {}\", cluster.first);\n    ASSERT(thread_local_clusters_.count(cluster.first) == 0);\n    thread_local_clusters_[cluster.first] = std::make_unique<ClusterEntry>(\n        *this, cluster.second->cluster_->info(), cluster.second->loadBalancerFactory());\n  }\n}\n\nClusterManagerImpl::ThreadLocalClusterManagerImpl::~ThreadLocalClusterManagerImpl() {\n  // Clear out connection pools as well as the thread local cluster map so that we release all\n  // cluster pointers. Currently we have to free all non-local clusters before we free\n  // the local cluster. This is because non-local clusters with a zone aware load balancer have a\n  // member update callback registered with the local cluster.\n  ENVOY_LOG(debug, \"shutting down thread local cluster manager\");\n  destroying_ = true;\n  host_http_conn_pool_map_.clear();\n  host_tcp_conn_pool_map_.clear();\n  ASSERT(host_tcp_conn_map_.empty());\n  for (auto& cluster : thread_local_clusters_) {\n    if (&cluster.second->priority_set_ != local_priority_set_) {\n      cluster.second.reset();\n    }\n  }\n  thread_local_clusters_.clear();\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(const HostVector& hosts) {\n  for (const HostSharedPtr& host : hosts) {\n    {\n      auto container = getHttpConnPoolsContainer(host);\n      if (container != nullptr) {\n        drainConnPools(host, *container);\n      }\n    }\n    {\n      auto container = host_tcp_conn_pool_map_.find(host);\n      if (container != host_tcp_conn_pool_map_.end()) {\n        drainTcpConnPools(host, container->second);\n      }\n    }\n  }\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(\n    HostSharedPtr old_host, ConnPoolsContainer& container) {\n  container.drains_remaining_ += container.pools_->size();\n\n  // Make a copy to protect against erasure in the callback.\n  std::shared_ptr<ConnPoolsContainer::ConnPools> pools = container.pools_;\n  pools->addDrainedCallback([this, old_host]() -> void {\n    if (destroying_) {\n      // It is possible for a connection pool to fire drain callbacks during destruction. Instead\n      // of checking if old_host actually exists in the map, it's clearer and cleaner to keep\n      // track of destruction as a separate state and check for it here. This also allows us to\n      // do this check here versus inside every different connection pool implementation.\n      return;\n    }\n\n    ConnPoolsContainer* to_clear = getHttpConnPoolsContainer(old_host);\n    if (to_clear == nullptr) {\n      // This could happen if we have cleaned out the host before iterating through every connection\n      // pool. Handle it by just continuing.\n      return;\n    }\n\n    ASSERT(to_clear->drains_remaining_ > 0);\n    to_clear->drains_remaining_--;\n    if (to_clear->drains_remaining_ == 0 && to_clear->ready_to_drain_) {\n      clearContainer(old_host, *to_clear);\n    }\n  });\n\n  // We need to hold off on actually emptying out the container until we have finished processing\n  // `addDrainedCallback`. If we do not, then it's possible that the container could be erased in\n  // the middle of its iteration, which leads to undefined behaviour. We handle that case by\n  // checking here to see if the drains have completed.\n  container.ready_to_drain_ = true;\n  if (container.drains_remaining_ == 0) {\n    clearContainer(old_host, container);\n  }\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::clearContainer(\n    HostSharedPtr old_host, ConnPoolsContainer& container) {\n  container.pools_->clear();\n  host_http_conn_pool_map_.erase(old_host);\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainTcpConnPools(\n    HostSharedPtr old_host, TcpConnPoolsContainer& container) {\n  container.drains_remaining_ += container.pools_.size();\n\n  for (const auto& pair : container.pools_) {\n    pair.second->addDrainedCallback([this, old_host]() -> void {\n      if (destroying_) {\n        // It is possible for a connection pool to fire drain callbacks during destruction. Instead\n        // of checking if old_host actually exists in the map, it's clearer and cleaner to keep\n        // track of destruction as a separate state and check for it here. This also allows us to\n        // do this check here versus inside every different connection pool implementation.\n        return;\n      }\n\n      TcpConnPoolsContainer& container = host_tcp_conn_pool_map_[old_host];\n      ASSERT(container.drains_remaining_ > 0);\n      container.drains_remaining_--;\n      if (container.drains_remaining_ == 0) {\n        for (auto& pair : container.pools_) {\n          thread_local_dispatcher_.deferredDelete(std::move(pair.second));\n        }\n        host_tcp_conn_pool_map_.erase(old_host);\n      }\n    });\n\n    // The above addDrainedCallback() drain completion callback might execute immediately. This can\n    // then effectively nuke 'container', which means we can't continue to loop on its contents\n    // (we're done here).\n    if (host_tcp_conn_pool_map_.count(old_host) == 0) {\n      break;\n    }\n  }\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeTcpConn(\n    const HostConstSharedPtr& host, Network::ClientConnection& connection) {\n  auto host_tcp_conn_map_it = host_tcp_conn_map_.find(host);\n  ASSERT(host_tcp_conn_map_it != host_tcp_conn_map_.end());\n  TcpConnectionsMap& connections_map = host_tcp_conn_map_it->second;\n  auto it = connections_map.find(&connection);\n  ASSERT(it != connections_map.end());\n  connection.dispatcher().deferredDelete(std::move(it->second));\n  connections_map.erase(it);\n  if (connections_map.empty()) {\n    host_tcp_conn_map_.erase(host_tcp_conn_map_it);\n  }\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeHosts(\n    const std::string& name, const HostVector& hosts_removed) {\n  ASSERT(thread_local_clusters_.find(name) != thread_local_clusters_.end());\n  const auto& cluster_entry = thread_local_clusters_[name];\n  ENVOY_LOG(debug, \"removing hosts for TLS cluster {} removed {}\", name, hosts_removed.size());\n\n  // We need to go through and purge any connection pools for hosts that got deleted.\n  // Even if two hosts actually point to the same address this will be safe, since if a\n  // host is readded it will be a different physical HostSharedPtr.\n  cluster_entry->parent_.drainConnPools(hosts_removed);\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::updateClusterMembership(\n    const std::string& name, uint32_t priority, PrioritySet::UpdateHostsParams update_hosts_params,\n    LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added,\n    const HostVector& hosts_removed, uint64_t overprovisioning_factor) {\n  ASSERT(thread_local_clusters_.find(name) != thread_local_clusters_.end());\n  const auto& cluster_entry = thread_local_clusters_[name];\n  ENVOY_LOG(debug, \"membership update for TLS cluster {} added {} removed {}\", name,\n            hosts_added.size(), hosts_removed.size());\n  cluster_entry->priority_set_.updateHosts(priority, std::move(update_hosts_params),\n                                           std::move(locality_weights), hosts_added, hosts_removed,\n                                           overprovisioning_factor);\n\n  // If an LB is thread aware, create a new worker local LB on membership changes.\n  if (cluster_entry->lb_factory_ != nullptr) {\n    ENVOY_LOG(debug, \"re-creating local LB for TLS cluster {}\", name);\n    cluster_entry->lb_ = cluster_entry->lb_factory_->create();\n  }\n}\n\nvoid ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure(\n    const HostSharedPtr& host) {\n\n  // Drain all HTTP connection pool connections in the case of a host health failure. If outlier/\n  // health is due to ECMP flow hashing issues for example, a new set of connections might do\n  // better.\n  // TODO(mattklein123): This function is currently very specific, but in the future when we do\n  // more granular host set changes, we should be able to capture single host changes and make them\n  // more targeted.\n  {\n    const auto container = getHttpConnPoolsContainer(host);\n    if (container != nullptr) {\n      container->pools_->drainConnections();\n    }\n  }\n  {\n    // Drain or close any TCP connection pool for the host. Draining a TCP pool doesn't lead to\n    // connections being closed, it only prevents new connections through the pool. The\n    // CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE can be used to make the pool close any\n    // active connections.\n    const auto& container = host_tcp_conn_pool_map_.find(host);\n    if (container != host_tcp_conn_pool_map_.end()) {\n      for (const auto& pair : container->second.pools_) {\n        const Tcp::ConnectionPool::InstancePtr& pool = pair.second;\n        if (host->cluster().features() &\n            ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) {\n          pool->closeConnections();\n        } else {\n          pool->drainConnections();\n        }\n      }\n    }\n  }\n\n  if (host->cluster().features() &\n      ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) {\n    // Close non connection pool TCP connections obtained from tcpConnForCluster()\n    //\n    // TODO(jono): The only remaining user of the non-pooled connections seems to be the statsd\n    // TCP client. Perhaps it could be rewritten to use a connection pool, and this code deleted.\n    //\n    // Each connection will remove itself from the TcpConnectionsMap when it closes, via its\n    // Network::ConnectionCallbacks. The last removed tcp conn will remove the TcpConnectionsMap\n    // from host_tcp_conn_map_, so do not cache it between iterations.\n    //\n    // TODO(ggreenway) PERF: If there are a large number of connections, this could take a long time\n    // and halt other useful work. Consider breaking up this work. Note that this behavior is noted\n    // in the configuration documentation in cluster setting\n    // \"close_connections_on_host_health_failure\". Update the docs if this if this changes.\n    while (true) {\n      const auto& it = host_tcp_conn_map_.find(host);\n      if (it == host_tcp_conn_map_.end()) {\n        break;\n      }\n      TcpConnectionsMap& container = it->second;\n      container.begin()->first->close(Network::ConnectionCloseType::NoFlush);\n    }\n  }\n}\n\nClusterManagerImpl::ThreadLocalClusterManagerImpl::ConnPoolsContainer*\nClusterManagerImpl::ThreadLocalClusterManagerImpl::getHttpConnPoolsContainer(\n    const HostConstSharedPtr& host, bool allocate) {\n  auto container_iter = host_http_conn_pool_map_.find(host);\n  if (container_iter == host_http_conn_pool_map_.end()) {\n    if (!allocate) {\n      return nullptr;\n    }\n    ConnPoolsContainer container{thread_local_dispatcher_, host};\n    container_iter = host_http_conn_pool_map_.emplace(host, std::move(container)).first;\n  }\n\n  return &container_iter->second;\n}\n\nClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry(\n    ThreadLocalClusterManagerImpl& parent, ClusterInfoConstSharedPtr cluster,\n    const LoadBalancerFactorySharedPtr& lb_factory)\n    : parent_(parent), lb_factory_(lb_factory), cluster_info_(cluster),\n      http_async_client_(cluster, parent.parent_.stats_, parent.thread_local_dispatcher_,\n                         parent.parent_.local_info_, parent.parent_, parent.parent_.runtime_,\n                         parent.parent_.random_,\n                         Router::ShadowWriterPtr{new Router::ShadowWriterImpl(parent.parent_)},\n                         parent_.parent_.http_context_) {\n  priority_set_.getOrCreateHostSet(0);\n\n  // TODO(mattklein123): Consider converting other LBs over to thread local. All of them could\n  // benefit given the healthy panic, locality, and priority calculations that take place.\n  if (cluster->lbSubsetInfo().isEnabled()) {\n    lb_ = std::make_unique<SubsetLoadBalancer>(\n        cluster->lbType(), priority_set_, parent_.local_priority_set_, cluster->stats(),\n        cluster->statsScope(), parent.parent_.runtime_, parent.parent_.random_,\n        cluster->lbSubsetInfo(), cluster->lbRingHashConfig(), cluster->lbMaglevConfig(),\n        cluster->lbLeastRequestConfig(), cluster->lbConfig());\n  } else {\n    switch (cluster->lbType()) {\n    case LoadBalancerType::LeastRequest: {\n      ASSERT(lb_factory_ == nullptr);\n      lb_ = std::make_unique<LeastRequestLoadBalancer>(\n          priority_set_, parent_.local_priority_set_, cluster->stats(), parent.parent_.runtime_,\n          parent.parent_.random_, cluster->lbConfig(), cluster->lbLeastRequestConfig());\n      break;\n    }\n    case LoadBalancerType::Random: {\n      ASSERT(lb_factory_ == nullptr);\n      lb_ = std::make_unique<RandomLoadBalancer>(priority_set_, parent_.local_priority_set_,\n                                                 cluster->stats(), parent.parent_.runtime_,\n                                                 parent.parent_.random_, cluster->lbConfig());\n      break;\n    }\n    case LoadBalancerType::RoundRobin: {\n      ASSERT(lb_factory_ == nullptr);\n      lb_ = std::make_unique<RoundRobinLoadBalancer>(priority_set_, parent_.local_priority_set_,\n                                                     cluster->stats(), parent.parent_.runtime_,\n                                                     parent.parent_.random_, cluster->lbConfig());\n      break;\n    }\n    case LoadBalancerType::ClusterProvided:\n    case LoadBalancerType::RingHash:\n    case LoadBalancerType::Maglev:\n    case LoadBalancerType::OriginalDst: {\n      ASSERT(lb_factory_ != nullptr);\n      lb_ = lb_factory_->create();\n      break;\n    }\n    }\n  }\n}\n\nClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::~ClusterEntry() {\n  // We need to drain all connection pools for the cluster being removed. Then we can remove the\n  // cluster.\n  //\n  // TODO(mattklein123): Optimally, we would just fire member changed callbacks and remove all of\n  // the hosts inside of the HostImpl destructor. That is a change with wide implications, so we are\n  // going with a more targeted approach for now.\n  for (auto& host_set : priority_set_.hostSetsPerPriority()) {\n    parent_.drainConnPools(host_set->hosts());\n  }\n}\n\nHttp::ConnectionPool::Instance*\nClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool(\n    ResourcePriority priority, absl::optional<Http::Protocol> downstream_protocol,\n    LoadBalancerContext* context, bool peek) {\n  HostConstSharedPtr host = (peek ? lb_->peekAnotherHost(context) : lb_->chooseHost(context));\n  if (!host) {\n    ENVOY_LOG(debug, \"no healthy host for HTTP connection pool\");\n    cluster_info_->stats().upstream_cx_none_healthy_.inc();\n    return nullptr;\n  }\n\n  auto upstream_protocol = host->cluster().upstreamHttpProtocol(downstream_protocol);\n  std::vector<uint8_t> hash_key = {uint8_t(upstream_protocol)};\n\n  Network::Socket::OptionsSharedPtr upstream_options(std::make_shared<Network::Socket::Options>());\n  if (context) {\n    // Inherit socket options from downstream connection, if set.\n    if (context->downstreamConnection()) {\n      addOptionsIfNotNull(upstream_options, context->downstreamConnection()->socketOptions());\n    }\n    addOptionsIfNotNull(upstream_options, context->upstreamSocketOptions());\n  }\n\n  // Use the socket options for computing connection pool hash key, if any.\n  // This allows socket options to control connection pooling so that connections with\n  // different options are not pooled together.\n  for (const auto& option : *upstream_options) {\n    option->hashKey(hash_key);\n  }\n\n  bool have_transport_socket_options = false;\n  if (context && context->upstreamTransportSocketOptions()) {\n    context->upstreamTransportSocketOptions()->hashKey(hash_key);\n    have_transport_socket_options = true;\n  }\n\n  // If configured, use the downstream connection id in pool hash key\n  if (cluster_info_->connectionPoolPerDownstreamConnection() && context &&\n      context->downstreamConnection()) {\n    context->downstreamConnection()->hashKey(hash_key);\n  }\n\n  ConnPoolsContainer& container = *parent_.getHttpConnPoolsContainer(host, true);\n\n  // Note: to simplify this, we assume that the factory is only called in the scope of this\n  // function. Otherwise, we'd need to capture a few of these variables by value.\n  ConnPoolsContainer::ConnPools::PoolOptRef pool =\n      container.pools_->getPool(priority, hash_key, [&]() {\n        return parent_.parent_.factory_.allocateConnPool(\n            parent_.thread_local_dispatcher_, host, priority, upstream_protocol,\n            !upstream_options->empty() ? upstream_options : nullptr,\n            have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr);\n      });\n\n  if (pool.has_value()) {\n    return &(pool.value().get());\n  } else {\n    return nullptr;\n  }\n}\n\nTcp::ConnectionPool::Instance*\nClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool(\n    ResourcePriority priority, LoadBalancerContext* context, bool peek) {\n  HostConstSharedPtr host = (peek ? lb_->peekAnotherHost(context) : lb_->chooseHost(context));\n  if (!host) {\n    ENVOY_LOG(debug, \"no healthy host for TCP connection pool\");\n    cluster_info_->stats().upstream_cx_none_healthy_.inc();\n    return nullptr;\n  }\n\n  // Inherit socket options from downstream connection, if set.\n  std::vector<uint8_t> hash_key = {uint8_t(priority)};\n\n  // Use downstream connection socket options for computing connection pool hash key, if any.\n  // This allows socket options to control connection pooling so that connections with\n  // different options are not pooled together.\n  bool have_options = false;\n  if (context != nullptr && context->downstreamConnection()) {\n    const Network::ConnectionSocket::OptionsSharedPtr& options =\n        context->downstreamConnection()->socketOptions();\n    if (options) {\n      for (const auto& option : *options) {\n        have_options = true;\n        option->hashKey(hash_key);\n      }\n    }\n  }\n\n  bool have_transport_socket_options = false;\n  if (context != nullptr && context->upstreamTransportSocketOptions() != nullptr) {\n    have_transport_socket_options = true;\n    context->upstreamTransportSocketOptions()->hashKey(hash_key);\n  }\n\n  TcpConnPoolsContainer& container = parent_.host_tcp_conn_pool_map_[host];\n  if (!container.pools_[hash_key]) {\n    container.pools_[hash_key] = parent_.parent_.factory_.allocateTcpConnPool(\n        parent_.thread_local_dispatcher_, host, priority,\n        have_options ? context->downstreamConnection()->socketOptions() : nullptr,\n        have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr);\n  }\n\n  return container.pools_[hash_key].get();\n}\n\nClusterManagerPtr ProdClusterManagerFactory::clusterManagerFromProto(\n    const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n  return ClusterManagerPtr{new ClusterManagerImpl(\n      bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_,\n      admin_, validation_context_, api_, http_context_, grpc_context_)};\n}\n\nHttp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool(\n    Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority,\n    Http::Protocol protocol, const Network::ConnectionSocket::OptionsSharedPtr& options,\n    const Network::TransportSocketOptionsSharedPtr& transport_socket_options) {\n  if (protocol == Http::Protocol::Http2 &&\n      runtime_.snapshot().featureEnabled(\"upstream.use_http2\", 100)) {\n    return Http::Http2::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority,\n                                         options, transport_socket_options);\n  } else if (protocol == Http::Protocol::Http3) {\n    // Quic connection pool is not implemented.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  } else {\n    return Http::Http1::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority,\n                                         options, transport_socket_options);\n  }\n}\n\nTcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool(\n    Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority,\n    const Network::ConnectionSocket::OptionsSharedPtr& options,\n    Network::TransportSocketOptionsSharedPtr transport_socket_options) {\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_tcp_connection_pool\")) {\n    return std::make_unique<Tcp::ConnPoolImpl>(dispatcher, host, priority, options,\n                                               transport_socket_options);\n  } else {\n    return Tcp::ConnectionPool::InstancePtr{new Tcp::OriginalConnPoolImpl(\n        dispatcher, host, priority, options, transport_socket_options)};\n  }\n}\n\nstd::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr> ProdClusterManagerFactory::clusterFromProto(\n    const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm,\n    Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) {\n  return ClusterFactoryImplBase::create(\n      cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_,\n      main_thread_dispatcher_, log_manager_, local_info_, admin_, singleton_manager_,\n      outlier_event_logger, added_via_api,\n      added_via_api ? validation_context_.dynamicValidationVisitor()\n                    : validation_context_.staticValidationVisitor(),\n      api_);\n}\n\nCdsApiPtr\nProdClusterManagerFactory::createCds(const envoy::config::core::v3::ConfigSource& cds_config,\n                                     ClusterManager& cm) {\n  // TODO(htuch): Differentiate static vs. dynamic validation visitors.\n  return CdsApiImpl::create(cds_config, cm, stats_, validation_context_.dynamicValidationVisitor());\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/cluster_manager_impl.h",
    "content": "#pragma once\n\n#include <array>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/config/grpc_mux_impl.h\"\n#include \"common/config/subscription_factory_impl.h\"\n#include \"common/http/async_client_impl.h\"\n#include \"common/upstream/load_stats_reporter.h\"\n#include \"common/upstream/priority_conn_pool_map.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Production implementation of ClusterManagerFactory.\n */\nclass ProdClusterManagerFactory : public ClusterManagerFactory {\npublic:\n  ProdClusterManagerFactory(Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats,\n                            ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver,\n                            Ssl::ContextManager& ssl_context_manager,\n                            Event::Dispatcher& main_thread_dispatcher,\n                            const LocalInfo::LocalInfo& local_info,\n                            Secret::SecretManager& secret_manager,\n                            ProtobufMessage::ValidationContext& validation_context, Api::Api& api,\n                            Http::Context& http_context, Grpc::Context& grpc_context,\n                            AccessLog::AccessLogManager& log_manager,\n                            Singleton::Manager& singleton_manager)\n      : main_thread_dispatcher_(main_thread_dispatcher), validation_context_(validation_context),\n        api_(api), http_context_(http_context), grpc_context_(grpc_context), admin_(admin),\n        runtime_(runtime), stats_(stats), tls_(tls), dns_resolver_(dns_resolver),\n        ssl_context_manager_(ssl_context_manager), local_info_(local_info),\n        secret_manager_(secret_manager), log_manager_(log_manager),\n        singleton_manager_(singleton_manager) {}\n\n  // Upstream::ClusterManagerFactory\n  ClusterManagerPtr\n  clusterManagerFromProto(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) override;\n  Http::ConnectionPool::InstancePtr allocateConnPool(\n      Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority,\n      Http::Protocol protocol, const Network::ConnectionSocket::OptionsSharedPtr& options,\n      const Network::TransportSocketOptionsSharedPtr& transport_socket_options) override;\n  Tcp::ConnectionPool::InstancePtr\n  allocateTcpConnPool(Event::Dispatcher& dispatcher, HostConstSharedPtr host,\n                      ResourcePriority priority,\n                      const Network::ConnectionSocket::OptionsSharedPtr& options,\n                      Network::TransportSocketOptionsSharedPtr transport_socket_options) override;\n  std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>\n  clusterFromProto(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm,\n                   Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) override;\n  CdsApiPtr createCds(const envoy::config::core::v3::ConfigSource& cds_config,\n                      ClusterManager& cm) override;\n  Secret::SecretManager& secretManager() override { return secret_manager_; }\n\nprotected:\n  Event::Dispatcher& main_thread_dispatcher_;\n  ProtobufMessage::ValidationContext& validation_context_;\n  Api::Api& api_;\n  Http::Context& http_context_;\n  Grpc::Context& grpc_context_;\n  Server::Admin& admin_;\n  Runtime::Loader& runtime_;\n  Stats::Store& stats_;\n  ThreadLocal::Instance& tls_;\n  Network::DnsResolverSharedPtr dns_resolver_;\n  Ssl::ContextManager& ssl_context_manager_;\n  const LocalInfo::LocalInfo& local_info_;\n  Secret::SecretManager& secret_manager_;\n  AccessLog::AccessLogManager& log_manager_;\n  Singleton::Manager& singleton_manager_;\n};\n\n// For friend declaration in ClusterManagerInitHelper.\nclass ClusterManagerImpl;\n\n/**\n * This is a helper class used during cluster management initialization. Dealing with primary\n * clusters, secondary clusters, and CDS, is quite complicated, so this makes it easier to test.\n */\nclass ClusterManagerInitHelper : Logger::Loggable<Logger::Id::upstream> {\npublic:\n  /**\n   * @param per_cluster_init_callback supplies the callback to call when a cluster has itself\n   *        initialized. The cluster manager can use this for post-init processing.\n   */\n  ClusterManagerInitHelper(ClusterManager& cm,\n                           const std::function<void(Cluster&)>& per_cluster_init_callback)\n      : cm_(cm), per_cluster_init_callback_(per_cluster_init_callback) {}\n\n  enum class State {\n    // Initial state. During this state all static clusters are loaded. Any primary clusters\n    // immediately begin initialization.\n    Loading,\n    // In this state cluster manager waits for all primary clusters to finish initialization.\n    // This state may immediately transition to the next state iff all clusters are STATIC and\n    // without health checks enabled or health checks have failed immediately, since their\n    // initialization completes immediately.\n    WaitingForPrimaryInitializationToComplete,\n    // During this state cluster manager waits to start initializing secondary clusters. In this\n    // state all primary clusters have completed initialization. Initialization of the\n    // secondary clusters is started by the `initializeSecondaryClusters` method.\n    WaitingToStartSecondaryInitialization,\n    // In this state cluster manager waits for all secondary clusters (if configured) to finish\n    // initialization. Then, if CDS is configured, this state tracks waiting for the first CDS\n    // response to populate dynamically configured clusters.\n    WaitingToStartCdsInitialization,\n    // During this state, all CDS populated clusters are undergoing either phase 1 or phase 2\n    // initialization.\n    CdsInitialized,\n    // All clusters are fully initialized.\n    AllClustersInitialized\n  };\n\n  void addCluster(Cluster& cluster);\n  void onStaticLoadComplete();\n  void removeCluster(Cluster& cluster);\n  void setCds(CdsApi* cds);\n  void setPrimaryClustersInitializedCb(ClusterManager::PrimaryClustersReadyCallback callback);\n  void setInitializedCb(ClusterManager::InitializationCompleteCallback callback);\n  State state() const { return state_; }\n\n  void startInitializingSecondaryClusters();\n\nprivate:\n  // To enable invariant assertions on the cluster lists.\n  friend ClusterManagerImpl;\n\n  void initializeSecondaryClusters();\n  void maybeFinishInitialize();\n  void onClusterInit(Cluster& cluster);\n\n  ClusterManager& cm_;\n  std::function<void(Cluster& cluster)> per_cluster_init_callback_;\n  CdsApi* cds_{};\n  ClusterManager::PrimaryClustersReadyCallback primary_clusters_initialized_callback_;\n  ClusterManager::InitializationCompleteCallback initialized_callback_;\n  std::list<Cluster*> primary_init_clusters_;\n  std::list<Cluster*> secondary_init_clusters_;\n  State state_{State::Loading};\n  bool started_secondary_initialize_{};\n};\n\n/**\n * All cluster manager stats. @see stats_macros.h\n */\n#define ALL_CLUSTER_MANAGER_STATS(COUNTER, GAUGE)                                                  \\\n  COUNTER(cluster_added)                                                                           \\\n  COUNTER(cluster_modified)                                                                        \\\n  COUNTER(cluster_removed)                                                                         \\\n  COUNTER(cluster_updated)                                                                         \\\n  COUNTER(cluster_updated_via_merge)                                                               \\\n  COUNTER(update_merge_cancelled)                                                                  \\\n  COUNTER(update_out_of_merge_window)                                                              \\\n  GAUGE(active_clusters, NeverImport)                                                              \\\n  GAUGE(warming_clusters, NeverImport)\n\n/**\n * Struct definition for all cluster manager stats. @see stats_macros.h\n */\nstruct ClusterManagerStats {\n  ALL_CLUSTER_MANAGER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Implementation of ClusterManager that reads from a proto configuration, maintains a central\n * cluster list, as well as thread local caches of each cluster and associated connection pools.\n */\nclass ClusterManagerImpl : public ClusterManager, Logger::Loggable<Logger::Id::upstream> {\npublic:\n  ClusterManagerImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                     ClusterManagerFactory& factory, Stats::Store& stats,\n                     ThreadLocal::Instance& tls, Runtime::Loader& runtime,\n                     const LocalInfo::LocalInfo& local_info,\n                     AccessLog::AccessLogManager& log_manager,\n                     Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin,\n                     ProtobufMessage::ValidationContext& validation_context, Api::Api& api,\n                     Http::Context& http_context, Grpc::Context& grpc_context);\n\n  std::size_t warmingClusterCount() const { return warming_clusters_.size(); }\n\n  // Upstream::ClusterManager\n  bool addOrUpdateCluster(const envoy::config::cluster::v3::Cluster& cluster,\n                          const std::string& version_info) override;\n\n  void setPrimaryClustersInitializedCb(PrimaryClustersReadyCallback callback) override {\n    init_helper_.setPrimaryClustersInitializedCb(callback);\n  }\n\n  void setInitializedCb(InitializationCompleteCallback callback) override {\n    init_helper_.setInitializedCb(callback);\n  }\n\n  ClusterInfoMap clusters() override {\n    // TODO(mattklein123): Add ability to see warming clusters in admin output.\n    ClusterInfoMap clusters_map;\n    for (auto& cluster : active_clusters_) {\n      clusters_map.emplace(cluster.first, *cluster.second->cluster_);\n    }\n\n    return clusters_map;\n  }\n  const ClusterSet& primaryClusters() override { return primary_clusters_; }\n  ThreadLocalCluster* get(absl::string_view cluster) override;\n\n  using ClusterManager::httpConnPoolForCluster;\n\n  Http::ConnectionPool::Instance*\n  httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority,\n                         absl::optional<Http::Protocol> downstream_protocol,\n                         LoadBalancerContext* context) override;\n  Tcp::ConnectionPool::Instance* tcpConnPoolForCluster(const std::string& cluster,\n                                                       ResourcePriority priority,\n                                                       LoadBalancerContext* context) override;\n  Host::CreateConnectionData tcpConnForCluster(const std::string& cluster,\n                                               LoadBalancerContext* context) override;\n  Http::AsyncClient& httpAsyncClientForCluster(const std::string& cluster) override;\n  bool removeCluster(const std::string& cluster) override;\n  void shutdown() override {\n    if (resume_cds_ != nullptr) {\n      resume_cds_->cancel();\n    }\n    // Make sure we destroy all potential outgoing connections before this returns.\n    cds_api_.reset();\n    ads_mux_.reset();\n    active_clusters_.clear();\n    warming_clusters_.clear();\n    updateClusterCounts();\n  }\n\n  const envoy::config::core::v3::BindConfig& bindConfig() const override { return bind_config_; }\n\n  Config::GrpcMuxSharedPtr adsMux() override { return ads_mux_; }\n  Grpc::AsyncClientManager& grpcAsyncClientManager() override { return *async_client_manager_; }\n\n  const absl::optional<std::string>& localClusterName() const override {\n    return local_cluster_name_;\n  }\n\n  ClusterUpdateCallbacksHandlePtr\n  addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks&) override;\n\n  ClusterManagerFactory& clusterManagerFactory() override { return factory_; }\n\n  Config::SubscriptionFactory& subscriptionFactory() override { return subscription_factory_; }\n\n  void\n  initializeSecondaryClusters(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) override;\n\nprotected:\n  virtual void postThreadLocalDrainConnections(const Cluster& cluster,\n                                               const HostVector& hosts_removed);\n  virtual void postThreadLocalClusterUpdate(const Cluster& cluster, uint32_t priority,\n                                            const HostVector& hosts_added,\n                                            const HostVector& hosts_removed);\n\nprivate:\n  /**\n   * Thread local cached cluster data. Each thread local cluster gets updates from the parent\n   * central dynamic cluster (if applicable). It maintains load balancer state and any created\n   * connection pools.\n   */\n  struct ThreadLocalClusterManagerImpl : public ThreadLocal::ThreadLocalObject {\n    struct ConnPoolsContainer {\n      ConnPoolsContainer(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host)\n          : pools_{std::make_shared<ConnPools>(dispatcher, host)} {}\n\n      using ConnPools = PriorityConnPoolMap<std::vector<uint8_t>, Http::ConnectionPool::Instance>;\n\n      // This is a shared_ptr so we can keep it alive while cleaning up.\n      std::shared_ptr<ConnPools> pools_;\n      bool ready_to_drain_{false};\n      uint64_t drains_remaining_{};\n    };\n\n    struct TcpConnPoolsContainer {\n      using ConnPools = std::map<std::vector<uint8_t>, Tcp::ConnectionPool::InstancePtr>;\n\n      ConnPools pools_;\n      uint64_t drains_remaining_{};\n    };\n\n    // Holds an unowned reference to a connection, and watches for Closed events. If the connection\n    // is closed, this container removes itself from the container that owns it.\n    struct TcpConnContainer : public Network::ConnectionCallbacks, public Event::DeferredDeletable {\n    public:\n      TcpConnContainer(ThreadLocalClusterManagerImpl& parent, const HostConstSharedPtr& host,\n                       Network::ClientConnection& connection)\n          : parent_(parent), host_(host), connection_(connection) {\n        connection_.addConnectionCallbacks(*this);\n      }\n\n      // Network::ConnectionCallbacks\n      void onEvent(Network::ConnectionEvent event) override {\n        if (event == Network::ConnectionEvent::LocalClose ||\n            event == Network::ConnectionEvent::RemoteClose) {\n          parent_.removeTcpConn(host_, connection_);\n        }\n      }\n      void onAboveWriteBufferHighWatermark() override {}\n      void onBelowWriteBufferLowWatermark() override {}\n\n      ThreadLocalClusterManagerImpl& parent_;\n      HostConstSharedPtr host_;\n      Network::ClientConnection& connection_;\n    };\n    using TcpConnectionsMap =\n        absl::node_hash_map<Network::ClientConnection*, std::unique_ptr<TcpConnContainer>>;\n\n    struct ClusterEntry : public ThreadLocalCluster {\n      ClusterEntry(ThreadLocalClusterManagerImpl& parent, ClusterInfoConstSharedPtr cluster,\n                   const LoadBalancerFactorySharedPtr& lb_factory);\n      ~ClusterEntry() override;\n\n      Http::ConnectionPool::Instance* connPool(ResourcePriority priority,\n                                               absl::optional<Http::Protocol> downstream_protocol,\n                                               LoadBalancerContext* context, bool peek);\n\n      Tcp::ConnectionPool::Instance* tcpConnPool(ResourcePriority priority,\n                                                 LoadBalancerContext* context, bool peek);\n\n      // Upstream::ThreadLocalCluster\n      const PrioritySet& prioritySet() override { return priority_set_; }\n      ClusterInfoConstSharedPtr info() override { return cluster_info_; }\n      LoadBalancer& loadBalancer() override { return *lb_; }\n\n      ThreadLocalClusterManagerImpl& parent_;\n      PrioritySetImpl priority_set_;\n      // LB factory if applicable. Not all load balancer types have a factory. LB types that have\n      // a factory will create a new LB on every membership update. LB types that don't have a\n      // factory will create an LB on construction and use it forever.\n      LoadBalancerFactorySharedPtr lb_factory_;\n      // Current active LB.\n      LoadBalancerPtr lb_;\n      ClusterInfoConstSharedPtr cluster_info_;\n      Http::AsyncClientImpl http_async_client_;\n    };\n\n    using ClusterEntryPtr = std::unique_ptr<ClusterEntry>;\n\n    ThreadLocalClusterManagerImpl(ClusterManagerImpl& parent, Event::Dispatcher& dispatcher,\n                                  const absl::optional<std::string>& local_cluster_name);\n    ~ThreadLocalClusterManagerImpl() override;\n    void drainConnPools(const HostVector& hosts);\n    void drainConnPools(HostSharedPtr old_host, ConnPoolsContainer& container);\n    void clearContainer(HostSharedPtr old_host, ConnPoolsContainer& container);\n    void drainTcpConnPools(HostSharedPtr old_host, TcpConnPoolsContainer& container);\n    void removeTcpConn(const HostConstSharedPtr& host, Network::ClientConnection& connection);\n    void removeHosts(const std::string& name, const HostVector& hosts_removed);\n    void updateClusterMembership(const std::string& name, uint32_t priority,\n                                 PrioritySet::UpdateHostsParams update_hosts_params,\n                                 LocalityWeightsConstSharedPtr locality_weights,\n                                 const HostVector& hosts_added, const HostVector& hosts_removed,\n                                 uint64_t overprovisioning_factor);\n    void onHostHealthFailure(const HostSharedPtr& host);\n\n    ConnPoolsContainer* getHttpConnPoolsContainer(const HostConstSharedPtr& host,\n                                                  bool allocate = false);\n\n    ClusterManagerImpl& parent_;\n    Event::Dispatcher& thread_local_dispatcher_;\n    absl::flat_hash_map<std::string, ClusterEntryPtr> thread_local_clusters_;\n\n    // These maps are owned by the ThreadLocalClusterManagerImpl instead of the ClusterEntry\n    // to prevent lifetime/ownership issues when a cluster is dynamically removed.\n    absl::node_hash_map<HostConstSharedPtr, ConnPoolsContainer> host_http_conn_pool_map_;\n    absl::node_hash_map<HostConstSharedPtr, TcpConnPoolsContainer> host_tcp_conn_pool_map_;\n    absl::node_hash_map<HostConstSharedPtr, TcpConnectionsMap> host_tcp_conn_map_;\n\n    std::list<Envoy::Upstream::ClusterUpdateCallbacks*> update_callbacks_;\n    const PrioritySet* local_priority_set_{};\n    bool destroying_{};\n  };\n\n  struct ClusterData {\n    ClusterData(const envoy::config::cluster::v3::Cluster& cluster_config,\n                const std::string& version_info, bool added_via_api, ClusterSharedPtr&& cluster,\n                TimeSource& time_source)\n        : cluster_config_(cluster_config), config_hash_(MessageUtil::hash(cluster_config)),\n          version_info_(version_info), added_via_api_(added_via_api), cluster_(std::move(cluster)),\n          last_updated_(time_source.systemTime()) {}\n\n    bool blockUpdate(uint64_t hash) { return !added_via_api_ || config_hash_ == hash; }\n\n    LoadBalancerFactorySharedPtr loadBalancerFactory() {\n      if (thread_aware_lb_ != nullptr) {\n        return thread_aware_lb_->factory();\n      } else {\n        return nullptr;\n      }\n    }\n\n    const envoy::config::cluster::v3::Cluster cluster_config_;\n    const uint64_t config_hash_;\n    const std::string version_info_;\n    const bool added_via_api_;\n    ClusterSharedPtr cluster_;\n    // Optional thread aware LB depending on the LB type. Not all clusters have one.\n    ThreadAwareLoadBalancerPtr thread_aware_lb_;\n    SystemTime last_updated_;\n  };\n\n  struct ClusterUpdateCallbacksHandleImpl : public ClusterUpdateCallbacksHandle,\n                                            RaiiListElement<ClusterUpdateCallbacks*> {\n    ClusterUpdateCallbacksHandleImpl(ClusterUpdateCallbacks& cb,\n                                     std::list<ClusterUpdateCallbacks*>& parent)\n        : RaiiListElement<ClusterUpdateCallbacks*>(parent, &cb) {}\n  };\n\n  using ClusterDataPtr = std::unique_ptr<ClusterData>;\n  // This map is ordered so that config dumping is consistent.\n  using ClusterMap = std::map<std::string, ClusterDataPtr>;\n\n  struct PendingUpdates {\n    ~PendingUpdates() { disableTimer(); }\n    void enableTimer(const uint64_t timeout) {\n      if (timer_ != nullptr) {\n        ASSERT(!timer_->enabled());\n        timer_->enableTimer(std::chrono::milliseconds(timeout));\n      }\n    }\n    bool disableTimer() {\n      if (timer_ == nullptr) {\n        return false;\n      }\n\n      const bool was_enabled = timer_->enabled();\n      timer_->disableTimer();\n      return was_enabled;\n    }\n\n    Event::TimerPtr timer_;\n    // This is default constructed to the clock's epoch:\n    // https://en.cppreference.com/w/cpp/chrono/time_point/time_point\n    //\n    // Depending on your execution environment this value can be different.\n    // When running as host process: This will usually be the computer's boot time, which means that\n    // given a not very large `Cluster.CommonLbConfig.update_merge_window`, the first update will\n    // trigger immediately (the expected behavior). When running in some sandboxed environment this\n    // value can be set to the start time of the sandbox, which means that the delta calculated\n    // between now and the start time may fall within the\n    // `Cluster.CommonLbConfig.update_merge_window`, with the side effect to delay the first update.\n    MonotonicTime last_updated_;\n  };\n\n  using PendingUpdatesPtr = std::unique_ptr<PendingUpdates>;\n  using PendingUpdatesByPriorityMap = absl::node_hash_map<uint32_t, PendingUpdatesPtr>;\n  using PendingUpdatesByPriorityMapPtr = std::unique_ptr<PendingUpdatesByPriorityMap>;\n  using ClusterUpdatesMap = absl::node_hash_map<std::string, PendingUpdatesByPriorityMapPtr>;\n\n  void applyUpdates(const Cluster& cluster, uint32_t priority, PendingUpdates& updates);\n  bool scheduleUpdate(const Cluster& cluster, uint32_t priority, bool mergeable,\n                      const uint64_t timeout);\n  void createOrUpdateThreadLocalCluster(ClusterData& cluster);\n  ProtobufTypes::MessagePtr dumpClusterConfigs();\n  static ClusterManagerStats generateStats(Stats::Scope& scope);\n  void loadCluster(const envoy::config::cluster::v3::Cluster& cluster,\n                   const std::string& version_info, bool added_via_api, ClusterMap& cluster_map);\n  void onClusterInit(Cluster& cluster);\n  void postThreadLocalHealthFailure(const HostSharedPtr& host);\n  void updateClusterCounts();\n  void maybePrefetch(ThreadLocalClusterManagerImpl::ClusterEntryPtr& cluster_entry,\n                     std::function<ConnectionPool::Instance*()> prefetch_pool);\n\n  ClusterManagerFactory& factory_;\n  Runtime::Loader& runtime_;\n  Stats::Store& stats_;\n  ThreadLocal::SlotPtr tls_;\n  Random::RandomGenerator& random_;\n\nprotected:\n  ClusterMap active_clusters_;\n\nprivate:\n  ClusterMap warming_clusters_;\n  envoy::config::core::v3::BindConfig bind_config_;\n  Outlier::EventLoggerSharedPtr outlier_event_logger_;\n  const LocalInfo::LocalInfo& local_info_;\n  CdsApiPtr cds_api_;\n  ClusterManagerStats cm_stats_;\n  ClusterManagerInitHelper init_helper_;\n  Config::GrpcMuxSharedPtr ads_mux_;\n  // Temporarily saved resume cds callback from updateClusterCounts invocation.\n  Config::ScopedResume resume_cds_;\n  LoadStatsReporterPtr load_stats_reporter_;\n  // The name of the local cluster of this Envoy instance if defined.\n  absl::optional<std::string> local_cluster_name_;\n  Grpc::AsyncClientManagerPtr async_client_manager_;\n  Server::ConfigTracker::EntryOwnerPtr config_tracker_entry_;\n  TimeSource& time_source_;\n  ClusterUpdatesMap updates_map_;\n  Event::Dispatcher& dispatcher_;\n  Http::Context& http_context_;\n  Config::SubscriptionFactoryImpl subscription_factory_;\n  ClusterSet primary_clusters_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/cluster_update_tracker.cc",
    "content": "#include \"common/upstream/cluster_update_tracker.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nClusterUpdateTracker::ClusterUpdateTracker(ClusterManager& cm, const std::string& cluster_name)\n    : cluster_name_(cluster_name),\n      cluster_update_callbacks_handle_(cm.addThreadLocalClusterUpdateCallbacks(*this)) {\n  Upstream::ThreadLocalCluster* cluster = cm.get(cluster_name_);\n  cluster_info_ = cluster ? cluster->info() : nullptr;\n}\n\nvoid ClusterUpdateTracker::onClusterAddOrUpdate(ThreadLocalCluster& cluster) {\n  if (cluster.info()->name() != cluster_name_) {\n    return;\n  }\n  cluster_info_ = cluster.info();\n}\n\nvoid ClusterUpdateTracker::onClusterRemoval(const std::string& cluster) {\n  if (cluster != cluster_name_) {\n    return;\n  }\n  cluster_info_.reset();\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/cluster_update_tracker.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/cluster_manager.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Keeps track of cluster updates in order to spot addition and removal.\n *\n * Use this class as a performance optimization to avoid going through ClusterManager::get()\n * on the hot path.\n */\nclass ClusterUpdateTracker : public ClusterUpdateCallbacks {\npublic:\n  ClusterUpdateTracker(ClusterManager& cm, const std::string& cluster_name);\n\n  bool exists() { return cluster_info_ != nullptr; }\n  ClusterInfoConstSharedPtr info() { return cluster_info_; }\n\n  // ClusterUpdateCallbacks\n  void onClusterAddOrUpdate(ThreadLocalCluster& cluster) override;\n  void onClusterRemoval(const std::string& cluster) override;\n\nprivate:\n  const std::string cluster_name_;\n  const ClusterUpdateCallbacksHandlePtr cluster_update_callbacks_handle_;\n\n  ClusterInfoConstSharedPtr cluster_info_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/conn_pool_map.h",
    "content": "#pragma once\n\n#include <functional>\n#include <vector>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/upstream/resource_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/debug_recursion_checker.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n/**\n *  A class mapping keys to connection pools, with some recycling logic built in.\n */\ntemplate <typename KEY_TYPE, typename POOL_TYPE> class ConnPoolMap {\npublic:\n  using PoolFactory = std::function<std::unique_ptr<POOL_TYPE>()>;\n  using DrainedCb = std::function<void()>;\n  using PoolOptRef = absl::optional<std::reference_wrapper<POOL_TYPE>>;\n\n  ConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host,\n              ResourcePriority priority);\n  ~ConnPoolMap();\n  /**\n   * Returns an existing pool for `key`, or creates a new one using `factory`. Note that it is\n   * possible for this to fail if a limit on the number of pools allowed is reached.\n   * @return The pool corresponding to `key`, or `absl::nullopt`.\n   */\n  PoolOptRef getPool(KEY_TYPE key, const PoolFactory& factory);\n\n  /**\n   * @return the number of pools.\n   */\n  size_t size() const;\n\n  /**\n   * Destroys all mapped pools.\n   */\n  void clear();\n\n  /**\n   * Adds a drain callback to all mapped pools. Any future mapped pools with have the callback\n   * automatically added. Be careful with the callback. If it itself calls into `this`, modifying\n   * the state of `this`, there is a good chance it will cause corruption due to the callback firing\n   * immediately.\n   */\n  void addDrainedCallback(const DrainedCb& cb);\n\n  /**\n   * Instructs each connection pool to drain its connections.\n   */\n  void drainConnections();\n\nprivate:\n  /**\n   * Frees the first idle pool in `active_pools_`.\n   * @return false if no pool was freed.\n   */\n  bool freeOnePool();\n\n  /**\n   * Cleans up the active_pools_ map and updates resource tracking\n   **/\n  void clearActivePools();\n\n  absl::flat_hash_map<KEY_TYPE, std::unique_ptr<POOL_TYPE>> active_pools_;\n  Event::Dispatcher& thread_local_dispatcher_;\n  std::vector<DrainedCb> cached_callbacks_;\n  Common::DebugRecursionChecker recursion_checker_;\n  const HostConstSharedPtr host_;\n  const ResourcePriority priority_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/conn_pool_map_impl.h",
    "content": "#pragma once\n\n#include \"common/upstream/conn_pool_map.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nConnPoolMap<KEY_TYPE, POOL_TYPE>::ConnPoolMap(Envoy::Event::Dispatcher& dispatcher,\n                                              const HostConstSharedPtr& host,\n                                              ResourcePriority priority)\n    : thread_local_dispatcher_(dispatcher), host_(host), priority_(priority) {}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE> ConnPoolMap<KEY_TYPE, POOL_TYPE>::~ConnPoolMap() {\n  // Clean up the pools to ensure resource tracking is kept up to date. Note that we do not call\n  // `clear()` here to avoid doing a deferred delete. This triggers some unwanted race conditions\n  // on shutdown where deleted resources end up putting stuff on the deferred delete list after the\n  // worker threads have shut down.\n  clearActivePools();\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\ntypename ConnPoolMap<KEY_TYPE, POOL_TYPE>::PoolOptRef\nConnPoolMap<KEY_TYPE, POOL_TYPE>::getPool(KEY_TYPE key, const PoolFactory& factory) {\n  Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_);\n  // TODO(klarose): Consider how we will change the connection pool's configuration in the future.\n  // The plan is to change the downstream socket options... We may want to take those as a parameter\n  // here. Maybe we'll pass them to the factory function?\n  auto pool_iter = active_pools_.find(key);\n  if (pool_iter != active_pools_.end()) {\n    return std::ref(*(pool_iter->second));\n  }\n  ResourceLimit& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools();\n  // We need a new pool. Check if we have room.\n  if (!connPoolResource.canCreate()) {\n    // We're full. Try to free up a pool. If we can't, bail out.\n    if (!freeOnePool()) {\n      host_->cluster().stats().upstream_cx_pool_overflow_.inc();\n      return absl::nullopt;\n    }\n\n    ASSERT(size() < connPoolResource.max(),\n           \"Freeing a pool should reduce the size to below the max.\");\n\n    // TODO(klarose): Consider some simple hysteresis here. How can we prevent iterating over all\n    // pools when we're at the limit every time we want to allocate a new one, even if most of the\n    // pools are not busy, while balancing that with not unnecessarily freeing all pools? If we\n    // start freeing once we cross a threshold, then stop after we cross another, we could\n    // achieve that balance.\n  }\n\n  // We have room for a new pool. Allocate one and let it know about any cached callbacks.\n  auto new_pool = factory();\n  connPoolResource.inc();\n  for (const auto& cb : cached_callbacks_) {\n    new_pool->addDrainedCallback(cb);\n  }\n\n  auto inserted = active_pools_.emplace(key, std::move(new_pool));\n  return std::ref(*inserted.first->second);\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nsize_t ConnPoolMap<KEY_TYPE, POOL_TYPE>::size() const {\n  return active_pools_.size();\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE> void ConnPoolMap<KEY_TYPE, POOL_TYPE>::clear() {\n  Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_);\n  for (auto& pool_pair : active_pools_) {\n    thread_local_dispatcher_.deferredDelete(std::move(pool_pair.second));\n  }\n  clearActivePools();\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nvoid ConnPoolMap<KEY_TYPE, POOL_TYPE>::addDrainedCallback(const DrainedCb& cb) {\n  Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_);\n  for (auto& pool_pair : active_pools_) {\n    pool_pair.second->addDrainedCallback(cb);\n  }\n\n  cached_callbacks_.emplace_back(std::move(cb));\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nvoid ConnPoolMap<KEY_TYPE, POOL_TYPE>::drainConnections() {\n  Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_);\n  for (auto& pool_pair : active_pools_) {\n    pool_pair.second->drainConnections();\n  }\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nbool ConnPoolMap<KEY_TYPE, POOL_TYPE>::freeOnePool() {\n  // Try to find a pool that isn't doing anything.\n  auto pool_iter = active_pools_.begin();\n  while (pool_iter != active_pools_.end()) {\n    if (!pool_iter->second->hasActiveConnections()) {\n      break;\n    }\n    ++pool_iter;\n  }\n\n  if (pool_iter != active_pools_.end()) {\n    // We found one. Free it up, and let the caller know.\n    active_pools_.erase(pool_iter);\n    host_->cluster().resourceManager(priority_).connectionPools().dec();\n    return true;\n  }\n\n  return false;\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nvoid ConnPoolMap<KEY_TYPE, POOL_TYPE>::clearActivePools() {\n  host_->cluster().resourceManager(priority_).connectionPools().decBy(active_pools_.size());\n  active_pools_.clear();\n}\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/edf_scheduler.h",
    "content": "#pragma once\n#include <cstdint>\n#include <iostream>\n#include <queue>\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n// It's not sufficient to use trace level logging, since it becomes far too noisy for a number of\n// tests, so we can kill trace debug here.\n#define EDF_DEBUG 0\n\n#if EDF_DEBUG\n#define EDF_TRACE(...) ENVOY_LOG_MISC(trace, __VA_ARGS__)\n#else\n#define EDF_TRACE(...)\n#endif\n\n// Earliest Deadline First (EDF) scheduler\n// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling) used for weighted round robin.\n// Each pick from the schedule has the earliest deadline entry selected. Entries have deadlines set\n// at current time + 1 / weight, providing weighted round robin behavior with floating point\n// weights and an O(log n) pick time.\ntemplate <class C> class EdfScheduler {\npublic:\n  // Each time peekAgain is called, it will return the best-effort subsequent\n  // pick, popping and reinserting the entry as if it had been picked, and\n  // inserting it into the pre-picked queue.\n  // The first time peekAgain is called, it will return the\n  // first item which will be picked, the second time it is called it will\n  // return the second item which will be picked. As picks occur, that window\n  // will shrink.\n  std::shared_ptr<C> peekAgain(std::function<double(const C&)> calculate_weight) {\n    if (hasEntry()) {\n      prepick_list_.push_back(std::move(queue_.top().entry_));\n      std::shared_ptr<C> ret{prepick_list_.back()};\n      add(calculate_weight(*ret), ret);\n      queue_.pop();\n      return ret;\n    }\n    return nullptr;\n  }\n\n  /**\n   * Pick queue entry with closest deadline and adds it back using the weight\n   *   from calculate_weight.\n   * @return std::shared_ptr<C> to next valid the queue entry if or nullptr if none exists.\n   */\n  std::shared_ptr<C> pickAndAdd(std::function<double(const C&)> calculate_weight) {\n    while (!prepick_list_.empty()) {\n      // In this case the entry was added back during peekAgain so don't re-add.\n      if (prepick_list_.front().expired()) {\n        prepick_list_.pop_front();\n        continue;\n      }\n      std::shared_ptr<C> ret{prepick_list_.front()};\n      prepick_list_.pop_front();\n      return ret;\n    }\n    if (hasEntry()) {\n      std::shared_ptr<C> ret{queue_.top().entry_};\n      queue_.pop();\n      add(calculate_weight(*ret), ret);\n      return ret;\n    }\n    return nullptr;\n  }\n\n  /**\n   * Insert entry into queue with a given weight. The deadline will be current_time_ + 1 / weight.\n   * @param weight floating point weight.\n   * @param entry shared pointer to entry, only a weak reference will be retained.\n   */\n  void add(double weight, std::shared_ptr<C> entry) {\n    ASSERT(weight > 0);\n    const double deadline = current_time_ + 1.0 / weight;\n    EDF_TRACE(\"Insertion {} in queue with deadline {} and weight {}.\",\n              static_cast<const void*>(entry.get()), deadline, weight);\n    queue_.push({deadline, order_offset_++, entry});\n    ASSERT(queue_.top().deadline_ >= current_time_);\n  }\n\n  /**\n   * Implements empty() on the internal queue. Does not attempt to discard expired elements.\n   * @return bool whether or not the internal queue is empty.\n   */\n  bool empty() const { return queue_.empty(); }\n\nprivate:\n  /**\n   * Clears expired entries, and returns true if there's still entries in the queue.\n   */\n  bool hasEntry() {\n    EDF_TRACE(\"Queue pick: queue_.size()={}, current_time_={}.\", queue_.size(), current_time_);\n    while (true) {\n      if (queue_.empty()) {\n        EDF_TRACE(\"Queue is empty.\");\n        return false;\n      }\n      const EdfEntry& edf_entry = queue_.top();\n      // Entry has been removed, let's see if there's another one.\n      if (edf_entry.entry_.expired()) {\n        EDF_TRACE(\"Entry has expired, repick.\");\n        queue_.pop();\n        continue;\n      }\n      std::shared_ptr<C> ret{edf_entry.entry_};\n      ASSERT(edf_entry.deadline_ >= current_time_);\n      current_time_ = edf_entry.deadline_;\n      EDF_TRACE(\"Picked {}, current_time_={}.\", static_cast<const void*>(ret.get()), current_time_);\n      return true;\n    }\n  }\n\n  struct EdfEntry {\n    double deadline_;\n    // Tie breaker for entries with the same deadline. This is used to provide FIFO behavior.\n    uint64_t order_offset_;\n    // We only hold a weak pointer, since we don't support a remove operator. This allows entries to\n    // be lazily unloaded from the queue.\n    std::weak_ptr<C> entry_;\n\n    // Flip < direction to make this a min queue.\n    bool operator<(const EdfEntry& other) const {\n      return deadline_ > other.deadline_ ||\n             (deadline_ == other.deadline_ && order_offset_ > other.order_offset_);\n    }\n  };\n\n  // Current time in EDF scheduler.\n  // TODO(htuch): Is it worth the small extra complexity to use integer time for performance\n  // reasons?\n  double current_time_{};\n  // Offset used during addition to break ties when entries have the same weight but should reflect\n  // FIFO insertion order in picks.\n  uint64_t order_offset_{};\n  // Min priority queue for EDF.\n  std::priority_queue<EdfEntry> queue_;\n  std::list<std::weak_ptr<C>> prepick_list_;\n};\n\n#undef EDF_DEBUG\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/eds.cc",
    "content": "#include \"common/upstream/eds.h\"\n\n#include \"envoy/api/v2/endpoint.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/decoded_resource_impl.h\"\n#include \"common/config/version_converter.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nEdsClusterImpl::EdsClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api)\n    : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope),\n                             added_via_api),\n      Envoy::Config::SubscriptionBase<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n          cluster.eds_cluster_config().eds_config().resource_api_version(),\n          factory_context.messageValidationVisitor(), \"cluster_name\"),\n      local_info_(factory_context.localInfo()),\n      cluster_name_(cluster.eds_cluster_config().service_name().empty()\n                        ? cluster.name()\n                        : cluster.eds_cluster_config().service_name()) {\n  Event::Dispatcher& dispatcher = factory_context.dispatcher();\n  assignment_timeout_ = dispatcher.createTimer([this]() -> void { onAssignmentTimeout(); });\n  const auto& eds_config = cluster.eds_cluster_config().eds_config();\n  if (eds_config.config_source_specifier_case() ==\n      envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath) {\n    initialize_phase_ = InitializePhase::Primary;\n  } else {\n    initialize_phase_ = InitializePhase::Secondary;\n  }\n  const auto resource_name = getResourceName();\n  subscription_ =\n      factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource(\n          eds_config, Grpc::Common::typeUrl(resource_name), info_->statsScope(), *this,\n          resource_decoder_);\n}\n\nvoid EdsClusterImpl::startPreInit() { subscription_->start({cluster_name_}); }\n\nvoid EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) {\n  absl::node_hash_map<std::string, HostSharedPtr> updated_hosts;\n  PriorityStateManager priority_state_manager(parent_, parent_.local_info_, &host_update_cb);\n  for (const auto& locality_lb_endpoint : cluster_load_assignment_.endpoints()) {\n    parent_.validateEndpointsForZoneAwareRouting(locality_lb_endpoint);\n\n    priority_state_manager.initializePriorityFor(locality_lb_endpoint);\n\n    for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) {\n      priority_state_manager.registerHostForPriority(\n          lb_endpoint.endpoint().hostname(),\n          parent_.resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint,\n          lb_endpoint);\n    }\n  }\n\n  // Track whether we rebuilt any LB structures.\n  bool cluster_rebuilt = false;\n\n  const uint32_t overprovisioning_factor = PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n      cluster_load_assignment_.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor);\n\n  LocalityWeightsMap empty_locality_map;\n\n  // Loop over all priorities that exist in the new configuration.\n  auto& priority_state = priority_state_manager.priorityState();\n  for (size_t i = 0; i < priority_state.size(); ++i) {\n    if (parent_.locality_weights_map_.size() <= i) {\n      parent_.locality_weights_map_.resize(i + 1);\n    }\n    if (priority_state[i].first != nullptr) {\n      cluster_rebuilt |= parent_.updateHostsPerLocality(\n          i, overprovisioning_factor, *priority_state[i].first, parent_.locality_weights_map_[i],\n          priority_state[i].second, priority_state_manager, updated_hosts);\n    } else {\n      // If the new update contains a priority with no hosts, call the update function with an empty\n      // set of hosts.\n      cluster_rebuilt |= parent_.updateHostsPerLocality(\n          i, overprovisioning_factor, {}, parent_.locality_weights_map_[i], empty_locality_map,\n          priority_state_manager, updated_hosts);\n    }\n  }\n\n  // Loop over all priorities not present in the config that already exists. This will\n  // empty out any remaining priority that the config update did not refer to.\n  for (size_t i = priority_state.size(); i < parent_.priority_set_.hostSetsPerPriority().size();\n       ++i) {\n    if (parent_.locality_weights_map_.size() <= i) {\n      parent_.locality_weights_map_.resize(i + 1);\n    }\n    cluster_rebuilt |= parent_.updateHostsPerLocality(\n        i, overprovisioning_factor, {}, parent_.locality_weights_map_[i], empty_locality_map,\n        priority_state_manager, updated_hosts);\n  }\n\n  parent_.all_hosts_ = std::move(updated_hosts);\n\n  if (!cluster_rebuilt) {\n    parent_.info_->stats().update_no_rebuild_.inc();\n  }\n\n  // If we didn't setup to initialize when our first round of health checking is complete, just\n  // do it now.\n  parent_.onPreInitComplete();\n}\n\nvoid EdsClusterImpl::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                                    const std::string&) {\n  if (!validateUpdateSize(resources.size())) {\n    return;\n  }\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment =\n      dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n          resources[0].get().resource());\n  if (cluster_load_assignment.cluster_name() != cluster_name_) {\n    throw EnvoyException(fmt::format(\"Unexpected EDS cluster (expecting {}): {}\", cluster_name_,\n                                     cluster_load_assignment.cluster_name()));\n  }\n  // Scrub original type information; we don't config dump endpoints today and\n  // this is significant memory overhead.\n  Config::VersionConverter::eraseOriginalTypeInformation(cluster_load_assignment);\n\n  // Disable timer (if enabled) as we have received new assignment.\n  if (assignment_timeout_->enabled()) {\n    assignment_timeout_->disableTimer();\n  }\n  // Check if endpoint_stale_after is set.\n  const uint64_t stale_after_ms =\n      PROTOBUF_GET_MS_OR_DEFAULT(cluster_load_assignment.policy(), endpoint_stale_after, 0);\n  if (stale_after_ms > 0) {\n    // Stat to track how often we receive valid assignment_timeout in response.\n    info_->stats().assignment_timeout_received_.inc();\n    assignment_timeout_->enableTimer(std::chrono::milliseconds(stale_after_ms));\n  }\n\n  BatchUpdateHelper helper(*this, cluster_load_assignment);\n  priority_set_.batchHostUpdate(helper);\n}\n\nvoid EdsClusterImpl::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                                    const Protobuf::RepeatedPtrField<std::string>&,\n                                    const std::string&) {\n  if (!validateUpdateSize(added_resources.size())) {\n    return;\n  }\n  onConfigUpdate(added_resources, added_resources[0].get().version());\n}\n\nbool EdsClusterImpl::validateUpdateSize(int num_resources) {\n  if (num_resources == 0) {\n    ENVOY_LOG(debug, \"Missing ClusterLoadAssignment for {} in onConfigUpdate()\", cluster_name_);\n    info_->stats().update_empty_.inc();\n    onPreInitComplete();\n    return false;\n  }\n  if (num_resources != 1) {\n    throw EnvoyException(fmt::format(\"Unexpected EDS resource length: {}\", num_resources));\n    // (would be a return false here)\n  }\n  return true;\n}\n\nvoid EdsClusterImpl::onAssignmentTimeout() {\n  // We can no longer use the assignments, remove them.\n  // TODO(vishalpowar) This is not going to work for incremental updates, and we\n  // need to instead change the health status to indicate the assignments are\n  // stale.\n  envoy::config::endpoint::v3::ClusterLoadAssignment resource;\n  resource.set_cluster_name(cluster_name_);\n  ProtobufWkt::Any any_resource;\n  any_resource.PackFrom(resource);\n  Config::DecodedResourceImpl decoded_resource(resource_decoder_, any_resource, \"\");\n  std::vector<Config::DecodedResourceRef> resource_refs = {decoded_resource};\n  onConfigUpdate(resource_refs, \"\");\n  // Stat to track how often we end up with stale assignments.\n  info_->stats().assignment_stale_.inc();\n}\n\nvoid EdsClusterImpl::reloadHealthyHostsHelper(const HostSharedPtr& host) {\n  // Here we will see if we have a host that has been marked for deletion by service discovery\n  // but has been stabilized due to passing active health checking. If such a host is now\n  // failing active health checking we can remove it during this health check update.\n  HostSharedPtr host_to_exclude = host;\n  if (host_to_exclude != nullptr &&\n      host_to_exclude->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) &&\n      host_to_exclude->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL)) {\n    // Empty for clarity.\n  } else {\n    // Do not exclude and remove the host during the update.\n    host_to_exclude = nullptr;\n  }\n\n  const auto& host_sets = prioritySet().hostSetsPerPriority();\n  for (size_t priority = 0; priority < host_sets.size(); ++priority) {\n    const auto& host_set = host_sets[priority];\n\n    // Filter current hosts in case we need to exclude a host.\n    HostVectorSharedPtr hosts_copy(new HostVector());\n    std::copy_if(host_set->hosts().begin(), host_set->hosts().end(),\n                 std::back_inserter(*hosts_copy),\n                 [&host_to_exclude](const HostSharedPtr& host) { return host_to_exclude != host; });\n\n    // Setup a hosts to remove vector in case we need to exclude a host.\n    HostVector hosts_to_remove;\n    if (hosts_copy->size() != host_set->hosts().size()) {\n      ASSERT(hosts_copy->size() == host_set->hosts().size() - 1);\n      hosts_to_remove.emplace_back(host_to_exclude);\n    }\n\n    // Filter hosts per locality in case we need to exclude a host.\n    HostsPerLocalityConstSharedPtr hosts_per_locality_copy = host_set->hostsPerLocality().filter(\n        {[&host_to_exclude](const Host& host) { return &host != host_to_exclude.get(); }})[0];\n\n    prioritySet().updateHosts(priority,\n                              HostSetImpl::partitionHosts(hosts_copy, hosts_per_locality_copy),\n                              host_set->localityWeights(), {}, hosts_to_remove, absl::nullopt);\n  }\n\n  if (host_to_exclude != nullptr) {\n    ASSERT(all_hosts_.find(host_to_exclude->address()->asString()) != all_hosts_.end());\n    all_hosts_.erase(host_to_exclude->address()->asString());\n  }\n}\n\nbool EdsClusterImpl::updateHostsPerLocality(\n    const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts,\n    LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map,\n    PriorityStateManager& priority_state_manager,\n    absl::node_hash_map<std::string, HostSharedPtr>& updated_hosts) {\n  const auto& host_set = priority_set_.getOrCreateHostSet(priority, overprovisioning_factor);\n  HostVectorSharedPtr current_hosts_copy(new HostVector(host_set.hosts()));\n\n  HostVector hosts_added;\n  HostVector hosts_removed;\n  // We need to trigger updateHosts with the new host vectors if they have changed. We also do this\n  // when the locality weight map or the overprovisioning factor. Note calling updateDynamicHostList\n  // is responsible for both determining whether there was a change and to perform the actual update\n  // to current_hosts_copy, so it must be called even if we know that we need to update (e.g. if the\n  // overprovisioning factor changes).\n  // TODO(htuch): We eagerly update all the host sets here on weight changes, which isn't great,\n  // since this has the knock on effect that we rebuild the load balancers and locality scheduler.\n  // We could make this happen lazily, as we do for host-level weight updates, where as things age\n  // out of the locality scheduler, we discover their new weights. We don't currently have a shared\n  // object for locality weights that we can update here, we should add something like this to\n  // improve performance and scalability of locality weight updates.\n  const bool hosts_updated = updateDynamicHostList(new_hosts, *current_hosts_copy, hosts_added,\n                                                   hosts_removed, updated_hosts, all_hosts_);\n  if (hosts_updated || host_set.overprovisioningFactor() != overprovisioning_factor ||\n      locality_weights_map != new_locality_weights_map) {\n    ASSERT(std::all_of(current_hosts_copy->begin(), current_hosts_copy->end(),\n                       [&](const auto& host) { return host->priority() == priority; }));\n    locality_weights_map = new_locality_weights_map;\n    ENVOY_LOG(debug,\n              \"EDS hosts or locality weights changed for cluster: {} current hosts {} priority {}\",\n              info_->name(), host_set.hosts().size(), host_set.priority());\n\n    priority_state_manager.updateClusterPrioritySet(priority, std::move(current_hosts_copy),\n                                                    hosts_added, hosts_removed, absl::nullopt,\n                                                    overprovisioning_factor);\n    return true;\n  }\n  return false;\n}\n\nvoid EdsClusterImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                                          const EnvoyException*) {\n  ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n  // We need to allow server startup to continue, even if we have a bad config.\n  onPreInitComplete();\n}\n\nstd::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr>\nEdsClusterFactory::createClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n    Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Stats::ScopePtr&& stats_scope) {\n  if (!cluster.has_eds_cluster_config()) {\n    throw EnvoyException(\"cannot create an EDS cluster without an EDS config\");\n  }\n\n  return std::make_pair(\n      std::make_unique<EdsClusterImpl>(cluster, context.runtime(), socket_factory_context,\n                                       std::move(stats_scope), context.addedViaApi()),\n      nullptr);\n}\n\n/**\n * Static registration for the Eds cluster factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(EdsClusterFactory, ClusterFactory);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/eds.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/config/subscription_factory.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/locality.h\"\n\n#include \"common/config/subscription_base.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Cluster implementation that reads host information from the Endpoint Discovery Service.\n */\nclass EdsClusterImpl\n    : public BaseDynamicClusterImpl,\n      Envoy::Config::SubscriptionBase<envoy::config::endpoint::v3::ClusterLoadAssignment> {\npublic:\n  EdsClusterImpl(const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n                 Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                 Stats::ScopePtr&& stats_scope, bool added_via_api);\n\n  // Upstream::Cluster\n  InitializePhase initializePhase() const override { return initialize_phase_; }\n\nprivate:\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException* e) override;\n  using LocalityWeightsMap = absl::node_hash_map<envoy::config::core::v3::Locality, uint32_t,\n                                                 LocalityHash, LocalityEqualTo>;\n  bool updateHostsPerLocality(const uint32_t priority, const uint32_t overprovisioning_factor,\n                              const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map,\n                              LocalityWeightsMap& new_locality_weights_map,\n                              PriorityStateManager& priority_state_manager,\n                              absl::node_hash_map<std::string, HostSharedPtr>& updated_hosts);\n  bool validateUpdateSize(int num_resources);\n\n  // ClusterImplBase\n  void reloadHealthyHostsHelper(const HostSharedPtr& host) override;\n  void startPreInit() override;\n  void onAssignmentTimeout();\n\n  class BatchUpdateHelper : public PrioritySet::BatchUpdateCb {\n  public:\n    BatchUpdateHelper(\n        EdsClusterImpl& parent,\n        const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment)\n        : parent_(parent), cluster_load_assignment_(cluster_load_assignment) {}\n\n    // Upstream::PrioritySet::BatchUpdateCb\n    void batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) override;\n\n  private:\n    EdsClusterImpl& parent_;\n    const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment_;\n  };\n\n  Config::SubscriptionPtr subscription_;\n  const LocalInfo::LocalInfo& local_info_;\n  const std::string cluster_name_;\n  std::vector<LocalityWeightsMap> locality_weights_map_;\n  HostMap all_hosts_;\n  Event::TimerPtr assignment_timeout_;\n  InitializePhase initialize_phase_;\n};\n\nusing EdsClusterImplSharedPtr = std::shared_ptr<EdsClusterImpl>;\n\nclass EdsClusterFactory : public ClusterFactoryImplBase {\npublic:\n  EdsClusterFactory() : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Eds) {}\n\nprivate:\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/health_checker_base_impl.cc",
    "content": "#include \"common/upstream/health_checker_base_impl.h\"\n\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/router/router.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nHealthCheckerImplBase::HealthCheckerImplBase(const Cluster& cluster,\n                                             const envoy::config::core::v3::HealthCheck& config,\n                                             Event::Dispatcher& dispatcher,\n                                             Runtime::Loader& runtime,\n                                             Random::RandomGenerator& random,\n                                             HealthCheckEventLoggerPtr&& event_logger)\n    : always_log_health_check_failures_(config.always_log_health_check_failures()),\n      cluster_(cluster), dispatcher_(dispatcher),\n      timeout_(PROTOBUF_GET_MS_REQUIRED(config, timeout)),\n      unhealthy_threshold_(PROTOBUF_GET_WRAPPED_REQUIRED(config, unhealthy_threshold)),\n      healthy_threshold_(PROTOBUF_GET_WRAPPED_REQUIRED(config, healthy_threshold)),\n      stats_(generateStats(cluster.info()->statsScope())), runtime_(runtime), random_(random),\n      reuse_connection_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, reuse_connection, true)),\n      event_logger_(std::move(event_logger)), interval_(PROTOBUF_GET_MS_REQUIRED(config, interval)),\n      no_traffic_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, no_traffic_interval, 60000)),\n      initial_jitter_(PROTOBUF_GET_MS_OR_DEFAULT(config, initial_jitter, 0)),\n      interval_jitter_(PROTOBUF_GET_MS_OR_DEFAULT(config, interval_jitter, 0)),\n      interval_jitter_percent_(config.interval_jitter_percent()),\n      unhealthy_interval_(\n          PROTOBUF_GET_MS_OR_DEFAULT(config, unhealthy_interval, interval_.count())),\n      unhealthy_edge_interval_(\n          PROTOBUF_GET_MS_OR_DEFAULT(config, unhealthy_edge_interval, unhealthy_interval_.count())),\n      healthy_edge_interval_(\n          PROTOBUF_GET_MS_OR_DEFAULT(config, healthy_edge_interval, interval_.count())),\n      transport_socket_options_(initTransportSocketOptions(config)),\n      transport_socket_match_metadata_(initTransportSocketMatchMetadata(config)) {\n  cluster_.prioritySet().addMemberUpdateCb(\n      [this](const HostVector& hosts_added, const HostVector& hosts_removed) -> void {\n        onClusterMemberUpdate(hosts_added, hosts_removed);\n      });\n}\n\nstd::shared_ptr<const Network::TransportSocketOptionsImpl>\nHealthCheckerImplBase::initTransportSocketOptions(\n    const envoy::config::core::v3::HealthCheck& config) {\n  if (config.has_tls_options()) {\n    std::vector<std::string> protocols{config.tls_options().alpn_protocols().begin(),\n                                       config.tls_options().alpn_protocols().end()};\n    return std::make_shared<const Network::TransportSocketOptionsImpl>(\n        \"\", std::vector<std::string>{}, std::move(protocols));\n  }\n\n  return std::make_shared<const Network::TransportSocketOptionsImpl>();\n}\n\nMetadataConstSharedPtr HealthCheckerImplBase::initTransportSocketMatchMetadata(\n    const envoy::config::core::v3::HealthCheck& config) {\n  if (config.has_transport_socket_match_criteria()) {\n    std::shared_ptr<envoy::config::core::v3::Metadata> metadata =\n        std::make_shared<envoy::config::core::v3::Metadata>();\n    (*metadata->mutable_filter_metadata())[Envoy::Config::MetadataFilters::get()\n                                               .ENVOY_TRANSPORT_SOCKET_MATCH] =\n        config.transport_socket_match_criteria();\n    return metadata;\n  }\n\n  return nullptr;\n}\n\nHealthCheckerImplBase::~HealthCheckerImplBase() {\n  // ASSERTs inside the session destructor check to make sure we have been previously deferred\n  // deleted. Unify that logic here before actual destruction happens.\n  for (auto& session : active_sessions_) {\n    session.second->onDeferredDeleteBase();\n  }\n}\n\nvoid HealthCheckerImplBase::decHealthy() { stats_.healthy_.sub(1); }\n\nvoid HealthCheckerImplBase::decDegraded() { stats_.degraded_.sub(1); }\n\nHealthCheckerStats HealthCheckerImplBase::generateStats(Stats::Scope& scope) {\n  std::string prefix(\"health_check.\");\n  return {ALL_HEALTH_CHECKER_STATS(POOL_COUNTER_PREFIX(scope, prefix),\n                                   POOL_GAUGE_PREFIX(scope, prefix))};\n}\n\nvoid HealthCheckerImplBase::incHealthy() { stats_.healthy_.add(1); }\n\nvoid HealthCheckerImplBase::incDegraded() { stats_.degraded_.add(1); }\n\nstd::chrono::milliseconds HealthCheckerImplBase::interval(HealthState state,\n                                                          HealthTransition changed_state) const {\n  // See if the cluster has ever made a connection. If not, we use a much slower interval to keep\n  // the host info relatively up to date in case we suddenly start sending traffic to this cluster.\n  // In general host updates are rare and this should greatly smooth out needless health checking.\n  // If a connection has been established, we choose an interval based on the host's health. Please\n  // refer to the HealthCheck API documentation for more details.\n  uint64_t base_time_ms;\n  if (cluster_.info()->stats().upstream_cx_total_.used()) {\n    // When healthy/unhealthy threshold is configured the health transition of a host will be\n    // delayed. In this situation Envoy should use the edge interval settings between health checks.\n    //\n    // Example scenario for an unhealthy host with healthy_threshold set to 3:\n    // - check fails, host is still unhealthy and next check happens after unhealthy_interval;\n    // - check succeeds, host is still unhealthy and next check happens after healthy_edge_interval;\n    // - check succeeds, host is still unhealthy and next check happens after healthy_edge_interval;\n    // - check succeeds, host is now healthy and next check happens after interval;\n    // - check succeeds, host is still healthy and next check happens after interval.\n    switch (state) {\n    case HealthState::Unhealthy:\n      base_time_ms = changed_state == HealthTransition::ChangePending\n                         ? unhealthy_edge_interval_.count()\n                         : unhealthy_interval_.count();\n      break;\n    default:\n      base_time_ms = changed_state == HealthTransition::ChangePending\n                         ? healthy_edge_interval_.count()\n                         : interval_.count();\n      break;\n    }\n  } else {\n    base_time_ms = no_traffic_interval_.count();\n  }\n  return intervalWithJitter(base_time_ms, interval_jitter_);\n}\n\nstd::chrono::milliseconds\nHealthCheckerImplBase::intervalWithJitter(uint64_t base_time_ms,\n                                          std::chrono::milliseconds interval_jitter) const {\n  const uint64_t jitter_percent_mod = interval_jitter_percent_ * base_time_ms / 100;\n  if (jitter_percent_mod > 0) {\n    base_time_ms += random_.random() % jitter_percent_mod;\n  }\n\n  if (interval_jitter.count() > 0) {\n    base_time_ms += (random_.random() % interval_jitter.count());\n  }\n\n  const uint64_t min_interval = runtime_.snapshot().getInteger(\"health_check.min_interval\", 0);\n  const uint64_t max_interval = runtime_.snapshot().getInteger(\n      \"health_check.max_interval\", std::numeric_limits<uint64_t>::max());\n\n  uint64_t final_ms = std::min(base_time_ms, max_interval);\n  // We force a non-zero final MS, to prevent live lock.\n  final_ms = std::max(uint64_t(1), std::max(final_ms, min_interval));\n  return std::chrono::milliseconds(final_ms);\n}\n\nvoid HealthCheckerImplBase::addHosts(const HostVector& hosts) {\n  for (const HostSharedPtr& host : hosts) {\n    active_sessions_[host] = makeSession(host);\n    host->setActiveHealthFailureType(Host::ActiveHealthFailureType::UNKNOWN);\n    host->setHealthChecker(\n        HealthCheckHostMonitorPtr{new HealthCheckHostMonitorImpl(shared_from_this(), host)});\n    active_sessions_[host]->start();\n  }\n}\n\nvoid HealthCheckerImplBase::onClusterMemberUpdate(const HostVector& hosts_added,\n                                                  const HostVector& hosts_removed) {\n  addHosts(hosts_added);\n  for (const HostSharedPtr& host : hosts_removed) {\n    auto session_iter = active_sessions_.find(host);\n    ASSERT(active_sessions_.end() != session_iter);\n    // This deletion can happen inline in response to a host failure, so we deferred delete.\n    session_iter->second->onDeferredDeleteBase();\n    dispatcher_.deferredDelete(std::move(session_iter->second));\n    active_sessions_.erase(session_iter);\n  }\n}\n\nvoid HealthCheckerImplBase::runCallbacks(HostSharedPtr host, HealthTransition changed_state) {\n  for (const HostStatusCb& cb : callbacks_) {\n    cb(host, changed_state);\n  }\n}\n\nvoid HealthCheckerImplBase::HealthCheckHostMonitorImpl::setUnhealthy() {\n  // This is called cross thread. The cluster/health checker might already be gone.\n  std::shared_ptr<HealthCheckerImplBase> health_checker = health_checker_.lock();\n  if (health_checker) {\n    health_checker->setUnhealthyCrossThread(host_.lock());\n  }\n}\n\nvoid HealthCheckerImplBase::setUnhealthyCrossThread(const HostSharedPtr& host) {\n  // The threading here is complex. The cluster owns the only strong reference to the health\n  // checker. It might go away when we post to the main thread from a worker thread. To deal with\n  // this we use the following sequence of events:\n  // 1) We capture a weak reference to the health checker and post it from worker thread to main\n  //    thread.\n  // 2) On the main thread, we make sure it is still valid (as the cluster may have been destroyed).\n  // 3) Additionally, the host/session may also be gone by then so we check that also.\n  std::weak_ptr<HealthCheckerImplBase> weak_this = shared_from_this();\n  dispatcher_.post([weak_this, host]() -> void {\n    std::shared_ptr<HealthCheckerImplBase> shared_this = weak_this.lock();\n    if (shared_this == nullptr) {\n      return;\n    }\n\n    const auto session = shared_this->active_sessions_.find(host);\n    if (session == shared_this->active_sessions_.end()) {\n      return;\n    }\n\n    session->second->setUnhealthy(envoy::data::core::v3::PASSIVE);\n  });\n}\n\nvoid HealthCheckerImplBase::start() {\n  for (auto& host_set : cluster_.prioritySet().hostSetsPerPriority()) {\n    addHosts(host_set->hosts());\n  }\n}\n\nHealthCheckerImplBase::ActiveHealthCheckSession::ActiveHealthCheckSession(\n    HealthCheckerImplBase& parent, HostSharedPtr host)\n    : host_(host), parent_(parent),\n      interval_timer_(parent.dispatcher_.createTimer([this]() -> void { onIntervalBase(); })),\n      timeout_timer_(parent.dispatcher_.createTimer([this]() -> void { onTimeoutBase(); })) {\n\n  if (!host->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) {\n    parent.incHealthy();\n  }\n\n  if (host->healthFlagGet(Host::HealthFlag::DEGRADED_ACTIVE_HC)) {\n    parent.incDegraded();\n  }\n}\n\nHealthCheckerImplBase::ActiveHealthCheckSession::~ActiveHealthCheckSession() {\n  // Make sure onDeferredDeleteBase() has been called. We should not reference our parent at this\n  // point since we may have been deferred deleted.\n  ASSERT(interval_timer_ == nullptr && timeout_timer_ == nullptr);\n}\n\nvoid HealthCheckerImplBase::ActiveHealthCheckSession::onDeferredDeleteBase() {\n  // The session is about to be deferred deleted. Make sure all timers are gone and any\n  // implementation specific state is destroyed.\n  interval_timer_.reset();\n  timeout_timer_.reset();\n  if (!host_->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) {\n    parent_.decHealthy();\n  }\n  if (host_->healthFlagGet(Host::HealthFlag::DEGRADED_ACTIVE_HC)) {\n    parent_.decDegraded();\n  }\n  onDeferredDelete();\n}\n\nvoid HealthCheckerImplBase::ActiveHealthCheckSession::handleSuccess(bool degraded) {\n  // If we are healthy, reset the # of unhealthy to zero.\n  num_unhealthy_ = 0;\n\n  HealthTransition changed_state = HealthTransition::Unchanged;\n  if (host_->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) {\n    // If this is the first time we ever got a check result on this host, we immediately move\n    // it to healthy. This makes startup faster with a small reduction in overall reliability\n    // depending on the HC settings.\n    if (first_check_ || ++num_healthy_ == parent_.healthy_threshold_) {\n      host_->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n      parent_.incHealthy();\n      changed_state = HealthTransition::Changed;\n      if (parent_.event_logger_) {\n        parent_.event_logger_->logAddHealthy(parent_.healthCheckerType(), host_, first_check_);\n      }\n    } else {\n      changed_state = HealthTransition::ChangePending;\n    }\n  }\n\n  changed_state = clearPendingFlag(changed_state);\n\n  if (degraded != host_->healthFlagGet(Host::HealthFlag::DEGRADED_ACTIVE_HC)) {\n    if (degraded) {\n      host_->healthFlagSet(Host::HealthFlag::DEGRADED_ACTIVE_HC);\n      parent_.incDegraded();\n      if (parent_.event_logger_) {\n        parent_.event_logger_->logDegraded(parent_.healthCheckerType(), host_);\n      }\n    } else {\n      if (parent_.event_logger_) {\n        parent_.event_logger_->logNoLongerDegraded(parent_.healthCheckerType(), host_);\n      }\n      host_->healthFlagClear(Host::HealthFlag::DEGRADED_ACTIVE_HC);\n    }\n\n    // This check ensures that we honor the decision made about Changed vs ChangePending in the\n    // above block.\n    // TODO(snowp): should there be degraded_threshold?\n    if (changed_state == HealthTransition::Unchanged) {\n      changed_state = HealthTransition::Changed;\n    }\n  }\n\n  parent_.stats_.success_.inc();\n  first_check_ = false;\n  parent_.runCallbacks(host_, changed_state);\n\n  timeout_timer_->disableTimer();\n  interval_timer_->enableTimer(parent_.interval(HealthState::Healthy, changed_state));\n}\n\nHealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::setUnhealthy(\n    envoy::data::core::v3::HealthCheckFailureType type) {\n  // If we are unhealthy, reset the # of healthy to zero.\n  num_healthy_ = 0;\n\n  HealthTransition changed_state = HealthTransition::Unchanged;\n  if (!host_->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) {\n    if (type != envoy::data::core::v3::NETWORK ||\n        ++num_unhealthy_ == parent_.unhealthy_threshold_) {\n      host_->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n      parent_.decHealthy();\n      changed_state = HealthTransition::Changed;\n      if (parent_.event_logger_) {\n        parent_.event_logger_->logEjectUnhealthy(parent_.healthCheckerType(), host_, type);\n      }\n    } else {\n      changed_state = HealthTransition::ChangePending;\n    }\n  }\n\n  changed_state = clearPendingFlag(changed_state);\n\n  if ((first_check_ || parent_.always_log_health_check_failures_) && parent_.event_logger_) {\n    parent_.event_logger_->logUnhealthy(parent_.healthCheckerType(), host_, type, first_check_);\n  }\n\n  parent_.stats_.failure_.inc();\n  if (type == envoy::data::core::v3::NETWORK) {\n    parent_.stats_.network_failure_.inc();\n  } else if (type == envoy::data::core::v3::PASSIVE) {\n    parent_.stats_.passive_failure_.inc();\n  }\n\n  first_check_ = false;\n  parent_.runCallbacks(host_, changed_state);\n  return changed_state;\n}\n\nvoid HealthCheckerImplBase::ActiveHealthCheckSession::handleFailure(\n    envoy::data::core::v3::HealthCheckFailureType type) {\n  HealthTransition changed_state = setUnhealthy(type);\n  // It's possible that the previous call caused this session to be deferred deleted.\n  if (timeout_timer_ != nullptr) {\n    timeout_timer_->disableTimer();\n  }\n\n  if (interval_timer_ != nullptr) {\n    interval_timer_->enableTimer(parent_.interval(HealthState::Unhealthy, changed_state));\n  }\n}\n\nHealthTransition\nHealthCheckerImplBase::ActiveHealthCheckSession::clearPendingFlag(HealthTransition changed_state) {\n  if (host_->healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC)) {\n    host_->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n    // Even though the health value of the host might have not changed, we set this to Changed to\n    // that the cluster can update its list of excluded hosts.\n    return HealthTransition::Changed;\n  }\n\n  return changed_state;\n}\n\nvoid HealthCheckerImplBase::ActiveHealthCheckSession::onIntervalBase() {\n  onInterval();\n  timeout_timer_->enableTimer(parent_.timeout_);\n  parent_.stats_.attempt_.inc();\n}\n\nvoid HealthCheckerImplBase::ActiveHealthCheckSession::onTimeoutBase() {\n  onTimeout();\n  handleFailure(envoy::data::core::v3::NETWORK);\n}\n\nvoid HealthCheckerImplBase::ActiveHealthCheckSession::onInitialInterval() {\n  if (parent_.initial_jitter_.count() == 0) {\n    onIntervalBase();\n  } else {\n    interval_timer_->enableTimer(\n        std::chrono::milliseconds(parent_.intervalWithJitter(0, parent_.initial_jitter_)));\n  }\n}\n\nvoid HealthCheckEventLoggerImpl::logEjectUnhealthy(\n    envoy::data::core::v3::HealthCheckerType health_checker_type,\n    const HostDescriptionConstSharedPtr& host,\n    envoy::data::core::v3::HealthCheckFailureType failure_type) {\n  createHealthCheckEvent(health_checker_type, *host, [&failure_type](auto& event) {\n    event.mutable_eject_unhealthy_event()->set_failure_type(failure_type);\n  });\n}\n\nvoid HealthCheckEventLoggerImpl::logUnhealthy(\n    envoy::data::core::v3::HealthCheckerType health_checker_type,\n    const HostDescriptionConstSharedPtr& host,\n    envoy::data::core::v3::HealthCheckFailureType failure_type, bool first_check) {\n  createHealthCheckEvent(health_checker_type, *host, [&first_check, &failure_type](auto& event) {\n    event.mutable_health_check_failure_event()->set_failure_type(failure_type);\n    event.mutable_health_check_failure_event()->set_first_check(first_check);\n  });\n}\n\nvoid HealthCheckEventLoggerImpl::logAddHealthy(\n    envoy::data::core::v3::HealthCheckerType health_checker_type,\n    const HostDescriptionConstSharedPtr& host, bool first_check) {\n  createHealthCheckEvent(health_checker_type, *host, [&first_check](auto& event) {\n    event.mutable_add_healthy_event()->set_first_check(first_check);\n  });\n}\n\nvoid HealthCheckEventLoggerImpl::logDegraded(\n    envoy::data::core::v3::HealthCheckerType health_checker_type,\n    const HostDescriptionConstSharedPtr& host) {\n  createHealthCheckEvent(health_checker_type, *host,\n                         [](auto& event) { event.mutable_degraded_healthy_host(); });\n}\n\nvoid HealthCheckEventLoggerImpl::logNoLongerDegraded(\n    envoy::data::core::v3::HealthCheckerType health_checker_type,\n    const HostDescriptionConstSharedPtr& host) {\n  createHealthCheckEvent(health_checker_type, *host,\n                         [](auto& event) { event.mutable_no_longer_degraded_host(); });\n}\n\nvoid HealthCheckEventLoggerImpl::createHealthCheckEvent(\n    envoy::data::core::v3::HealthCheckerType health_checker_type, const HostDescription& host,\n    std::function<void(envoy::data::core::v3::HealthCheckEvent&)> callback) const {\n  envoy::data::core::v3::HealthCheckEvent event;\n  event.set_cluster_name(host.cluster().name());\n  event.set_health_checker_type(health_checker_type);\n\n  envoy::config::core::v3::Address address;\n  Network::Utility::addressToProtobufAddress(*host.address(), address);\n  *event.mutable_host() = std::move(address);\n\n  TimestampUtil::systemClockToTimestamp(time_source_.systemTime(), *event.mutable_timestamp());\n\n  callback(event);\n\n  // Make sure the type enums make it into the JSON\n  const auto json = MessageUtil::getJsonStringFromMessage(event, /* pretty_print */ false,\n                                                          /* always_print_primitive_fields */ true);\n  file_->write(fmt::format(\"{}\\n\", json));\n}\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/health_checker_base_impl.h",
    "content": "#pragma once\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/type/matcher/string.pb.h\"\n#include \"envoy/upstream/health_checker.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/matchers.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * All health checker stats. @see stats_macros.h\n */\n#define ALL_HEALTH_CHECKER_STATS(COUNTER, GAUGE)                                                   \\\n  COUNTER(attempt)                                                                                 \\\n  COUNTER(failure)                                                                                 \\\n  COUNTER(network_failure)                                                                         \\\n  COUNTER(passive_failure)                                                                         \\\n  COUNTER(success)                                                                                 \\\n  COUNTER(verify_cluster)                                                                          \\\n  GAUGE(degraded, Accumulate)                                                                      \\\n  GAUGE(healthy, Accumulate)\n\n/**\n * Definition of all health checker stats. @see stats_macros.h\n */\nstruct HealthCheckerStats {\n  ALL_HEALTH_CHECKER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Base implementation for all health checkers.\n */\nclass HealthCheckerImplBase : public HealthChecker,\n                              protected Logger::Loggable<Logger::Id::hc>,\n                              public std::enable_shared_from_this<HealthCheckerImplBase> {\npublic:\n  // Upstream::HealthChecker\n  void addHostCheckCompleteCb(HostStatusCb callback) override { callbacks_.push_back(callback); }\n  void start() override;\n  std::shared_ptr<const Network::TransportSocketOptionsImpl> transportSocketOptions() const {\n    return transport_socket_options_;\n  }\n  MetadataConstSharedPtr transportSocketMatchMetadata() const {\n    return transport_socket_match_metadata_;\n  }\n\nprotected:\n  class ActiveHealthCheckSession : public Event::DeferredDeletable {\n  public:\n    ~ActiveHealthCheckSession() override;\n    HealthTransition setUnhealthy(envoy::data::core::v3::HealthCheckFailureType type);\n    void onDeferredDeleteBase();\n    void start() { onInitialInterval(); }\n\n  protected:\n    ActiveHealthCheckSession(HealthCheckerImplBase& parent, HostSharedPtr host);\n\n    void handleSuccess(bool degraded = false);\n    void handleDegraded();\n    void handleFailure(envoy::data::core::v3::HealthCheckFailureType type);\n\n    HostSharedPtr host_;\n\n  private:\n    // Clears the pending flag if it is set. By clearing this flag we're marking the host as having\n    // been health checked.\n    // Returns the changed state to use following the flag update.\n    HealthTransition clearPendingFlag(HealthTransition changed_state);\n    virtual void onInterval() PURE;\n    void onIntervalBase();\n    virtual void onTimeout() PURE;\n    void onTimeoutBase();\n    virtual void onDeferredDelete() PURE;\n    void onInitialInterval();\n\n    HealthCheckerImplBase& parent_;\n    Event::TimerPtr interval_timer_;\n    Event::TimerPtr timeout_timer_;\n    uint32_t num_unhealthy_{};\n    uint32_t num_healthy_{};\n    bool first_check_{true};\n  };\n\n  using ActiveHealthCheckSessionPtr = std::unique_ptr<ActiveHealthCheckSession>;\n\n  HealthCheckerImplBase(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config,\n                        Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                        Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger);\n  ~HealthCheckerImplBase() override;\n\n  virtual ActiveHealthCheckSessionPtr makeSession(HostSharedPtr host) PURE;\n  virtual envoy::data::core::v3::HealthCheckerType healthCheckerType() const PURE;\n\n  const bool always_log_health_check_failures_;\n  const Cluster& cluster_;\n  Event::Dispatcher& dispatcher_;\n  const std::chrono::milliseconds timeout_;\n  const uint32_t unhealthy_threshold_;\n  const uint32_t healthy_threshold_;\n  HealthCheckerStats stats_;\n  Runtime::Loader& runtime_;\n  Random::RandomGenerator& random_;\n  const bool reuse_connection_;\n  HealthCheckEventLoggerPtr event_logger_;\n\nprivate:\n  struct HealthCheckHostMonitorImpl : public HealthCheckHostMonitor {\n    HealthCheckHostMonitorImpl(const std::shared_ptr<HealthCheckerImplBase>& health_checker,\n                               const HostSharedPtr& host)\n        : health_checker_(health_checker), host_(host) {}\n\n    // Upstream::HealthCheckHostMonitor\n    void setUnhealthy() override;\n\n    std::weak_ptr<HealthCheckerImplBase> health_checker_;\n    std::weak_ptr<Host> host_;\n  };\n\n  void addHosts(const HostVector& hosts);\n  void decHealthy();\n  void decDegraded();\n  HealthCheckerStats generateStats(Stats::Scope& scope);\n  void incHealthy();\n  void incDegraded();\n  std::chrono::milliseconds interval(HealthState state, HealthTransition changed_state) const;\n  std::chrono::milliseconds intervalWithJitter(uint64_t base_time_ms,\n                                               std::chrono::milliseconds interval_jitter) const;\n  void onClusterMemberUpdate(const HostVector& hosts_added, const HostVector& hosts_removed);\n  void runCallbacks(HostSharedPtr host, HealthTransition changed_state);\n  void setUnhealthyCrossThread(const HostSharedPtr& host);\n  static std::shared_ptr<const Network::TransportSocketOptionsImpl>\n  initTransportSocketOptions(const envoy::config::core::v3::HealthCheck& config);\n  static MetadataConstSharedPtr\n  initTransportSocketMatchMetadata(const envoy::config::core::v3::HealthCheck& config);\n\n  static const std::chrono::milliseconds NO_TRAFFIC_INTERVAL;\n\n  std::list<HostStatusCb> callbacks_;\n  const std::chrono::milliseconds interval_;\n  const std::chrono::milliseconds no_traffic_interval_;\n  const std::chrono::milliseconds initial_jitter_;\n  const std::chrono::milliseconds interval_jitter_;\n  const uint32_t interval_jitter_percent_;\n  const std::chrono::milliseconds unhealthy_interval_;\n  const std::chrono::milliseconds unhealthy_edge_interval_;\n  const std::chrono::milliseconds healthy_edge_interval_;\n  absl::node_hash_map<HostSharedPtr, ActiveHealthCheckSessionPtr> active_sessions_;\n  const std::shared_ptr<const Network::TransportSocketOptionsImpl> transport_socket_options_;\n  const MetadataConstSharedPtr transport_socket_match_metadata_;\n};\n\nclass HealthCheckEventLoggerImpl : public HealthCheckEventLogger {\npublic:\n  HealthCheckEventLoggerImpl(AccessLog::AccessLogManager& log_manager, TimeSource& time_source,\n                             const std::string& file_name)\n      : time_source_(time_source), file_(log_manager.createAccessLog(file_name)) {}\n\n  void logEjectUnhealthy(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                         const HostDescriptionConstSharedPtr& host,\n                         envoy::data::core::v3::HealthCheckFailureType failure_type) override;\n  void logAddHealthy(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                     const HostDescriptionConstSharedPtr& host, bool first_check) override;\n  void logUnhealthy(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                    const HostDescriptionConstSharedPtr& host,\n                    envoy::data::core::v3::HealthCheckFailureType failure_type,\n                    bool first_check) override;\n  void logDegraded(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                   const HostDescriptionConstSharedPtr& host) override;\n  void logNoLongerDegraded(envoy::data::core::v3::HealthCheckerType health_checker_type,\n                           const HostDescriptionConstSharedPtr& host) override;\n\nprivate:\n  void createHealthCheckEvent(\n      envoy::data::core::v3::HealthCheckerType health_checker_type, const HostDescription& host,\n      std::function<void(envoy::data::core::v3::HealthCheckEvent&)> callback) const;\n  TimeSource& time_source_;\n  AccessLog::AccessLogFileSharedPtr file_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/health_checker_impl.cc",
    "content": "#include \"common/upstream/health_checker_impl.h\"\n\n#include <memory>\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/server/health_checker_config.h\"\n#include \"envoy/type/v3/http.pb.h\"\n#include \"envoy/type/v3/range.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/macros.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/router/router.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/upstream/host_utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nnamespace {\n\n// Helper functions to get the correct hostname for an L7 health check.\nconst std::string& getHostname(const HostSharedPtr& host, const std::string& config_hostname,\n                               const ClusterInfoConstSharedPtr& cluster) {\n  if (!host->hostnameForHealthChecks().empty()) {\n    return host->hostnameForHealthChecks();\n  }\n\n  if (!config_hostname.empty()) {\n    return config_hostname;\n  }\n\n  return cluster->name();\n}\n\nconst std::string& getHostname(const HostSharedPtr& host,\n                               const absl::optional<std::string>& config_hostname,\n                               const ClusterInfoConstSharedPtr& cluster) {\n  if (config_hostname.has_value()) {\n    return getHostname(host, config_hostname.value(), cluster);\n  }\n  return getHostname(host, EMPTY_STRING, cluster);\n}\n\n} // namespace\n\nclass HealthCheckerFactoryContextImpl : public Server::Configuration::HealthCheckerFactoryContext {\npublic:\n  HealthCheckerFactoryContextImpl(Upstream::Cluster& cluster, Envoy::Runtime::Loader& runtime,\n                                  Event::Dispatcher& dispatcher,\n                                  HealthCheckEventLoggerPtr&& event_logger,\n                                  ProtobufMessage::ValidationVisitor& validation_visitor,\n                                  Api::Api& api)\n      : cluster_(cluster), runtime_(runtime), dispatcher_(dispatcher),\n        event_logger_(std::move(event_logger)), validation_visitor_(validation_visitor), api_(api) {\n  }\n  Upstream::Cluster& cluster() override { return cluster_; }\n  Envoy::Runtime::Loader& runtime() override { return runtime_; }\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n  HealthCheckEventLoggerPtr eventLogger() override { return std::move(event_logger_); }\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override {\n    return validation_visitor_;\n  }\n  Api::Api& api() override { return api_; }\n\nprivate:\n  Upstream::Cluster& cluster_;\n  Envoy::Runtime::Loader& runtime_;\n  Event::Dispatcher& dispatcher_;\n  HealthCheckEventLoggerPtr event_logger_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  Api::Api& api_;\n};\n\nHealthCheckerSharedPtr HealthCheckerFactory::create(\n    const envoy::config::core::v3::HealthCheck& health_check_config, Upstream::Cluster& cluster,\n    Runtime::Loader& runtime, Event::Dispatcher& dispatcher,\n    AccessLog::AccessLogManager& log_manager,\n    ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) {\n  HealthCheckEventLoggerPtr event_logger;\n  if (!health_check_config.event_log_path().empty()) {\n    event_logger = std::make_unique<HealthCheckEventLoggerImpl>(\n        log_manager, dispatcher.timeSource(), health_check_config.event_log_path());\n  }\n  switch (health_check_config.health_checker_case()) {\n  case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kHttpHealthCheck:\n    return std::make_shared<ProdHttpHealthCheckerImpl>(cluster, health_check_config, dispatcher,\n                                                       runtime, api.randomGenerator(),\n                                                       std::move(event_logger));\n  case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kTcpHealthCheck:\n    return std::make_shared<TcpHealthCheckerImpl>(cluster, health_check_config, dispatcher, runtime,\n                                                  api.randomGenerator(), std::move(event_logger));\n  case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kGrpcHealthCheck:\n    if (!(cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2)) {\n      throw EnvoyException(fmt::format(\"{} cluster must support HTTP/2 for gRPC healthchecking\",\n                                       cluster.info()->name()));\n    }\n    return std::make_shared<ProdGrpcHealthCheckerImpl>(cluster, health_check_config, dispatcher,\n                                                       runtime, api.randomGenerator(),\n                                                       std::move(event_logger));\n  case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kCustomHealthCheck: {\n    auto& factory =\n        Config::Utility::getAndCheckFactory<Server::Configuration::CustomHealthCheckerFactory>(\n            health_check_config.custom_health_check());\n    std::unique_ptr<Server::Configuration::HealthCheckerFactoryContext> context(\n        new HealthCheckerFactoryContextImpl(cluster, runtime, dispatcher, std::move(event_logger),\n                                            validation_visitor, api));\n    return factory.createCustomHealthChecker(health_check_config, *context);\n  }\n  default:\n    // Checked by schema.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nHttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster,\n                                             const envoy::config::core::v3::HealthCheck& config,\n                                             Event::Dispatcher& dispatcher,\n                                             Runtime::Loader& runtime,\n                                             Random::RandomGenerator& random,\n                                             HealthCheckEventLoggerPtr&& event_logger)\n    : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)),\n      path_(config.http_health_check().path()), host_value_(config.http_health_check().host()),\n      request_headers_parser_(\n          Router::HeaderParser::configure(config.http_health_check().request_headers_to_add(),\n                                          config.http_health_check().request_headers_to_remove())),\n      http_status_checker_(config.http_health_check().expected_statuses(),\n                           static_cast<uint64_t>(Http::Code::OK)),\n      codec_client_type_(\n          codecClientType(config.http_health_check().hidden_envoy_deprecated_use_http2()\n                              ? envoy::type::v3::HTTP2\n                              : config.http_health_check().codec_client_type())),\n      random_generator_(random) {\n  // The deprecated service_name field was previously being used to compare with the health checked\n  // cluster name using a StartsWith comparison. Since StartsWith is essentially a prefix\n  // comparison, representing the intent by using a StringMatcher prefix is a more natural way.\n  if (!config.http_health_check().hidden_envoy_deprecated_service_name().empty()) {\n    envoy::type::matcher::v3::StringMatcher matcher;\n    matcher.set_prefix(config.http_health_check().hidden_envoy_deprecated_service_name());\n    service_name_matcher_.emplace(matcher);\n  } else if (config.http_health_check().has_service_name_matcher()) {\n    service_name_matcher_.emplace(config.http_health_check().service_name_matcher());\n  }\n}\n\nHttpHealthCheckerImpl::HttpStatusChecker::HttpStatusChecker(\n    const Protobuf::RepeatedPtrField<envoy::type::v3::Int64Range>& expected_statuses,\n    uint64_t default_expected_status) {\n  for (const auto& status_range : expected_statuses) {\n    const auto start = status_range.start();\n    const auto end = status_range.end();\n\n    if (start >= end) {\n      throw EnvoyException(fmt::format(\n          \"Invalid http status range: expecting start < end, but found start={} and end={}\", start,\n          end));\n    }\n\n    if (start < 100) {\n      throw EnvoyException(fmt::format(\n          \"Invalid http status range: expecting start >= 100, but found start={}\", start));\n    }\n\n    if (end > 600) {\n      throw EnvoyException(\n          fmt::format(\"Invalid http status range: expecting end <= 600, but found end={}\", end));\n    }\n\n    ranges_.emplace_back(std::make_pair(static_cast<uint64_t>(start), static_cast<uint64_t>(end)));\n  }\n\n  if (ranges_.empty()) {\n    ranges_.emplace_back(std::make_pair(default_expected_status, default_expected_status + 1));\n  }\n}\n\nbool HttpHealthCheckerImpl::HttpStatusChecker::inRange(uint64_t http_status) const {\n  for (const auto& range : ranges_) {\n    if (http_status >= range.first && http_status < range.second) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\nHttp::Protocol codecClientTypeToProtocol(Http::CodecClient::Type codec_client_type) {\n  switch (codec_client_type) {\n  case Http::CodecClient::Type::HTTP1:\n    return Http::Protocol::Http11;\n  case Http::CodecClient::Type::HTTP2:\n    return Http::Protocol::Http2;\n  case Http::CodecClient::Type::HTTP3:\n    return Http::Protocol::Http3;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nHttpHealthCheckerImpl::HttpActiveHealthCheckSession::HttpActiveHealthCheckSession(\n    HttpHealthCheckerImpl& parent, const HostSharedPtr& host)\n    : ActiveHealthCheckSession(parent, host), parent_(parent),\n      hostname_(getHostname(host, parent_.host_value_, parent_.cluster_.info())),\n      protocol_(codecClientTypeToProtocol(parent_.codec_client_type_)),\n      local_address_(std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\")) {}\n\nHttpHealthCheckerImpl::HttpActiveHealthCheckSession::~HttpActiveHealthCheckSession() {\n  ASSERT(client_ == nullptr);\n}\n\nvoid HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onDeferredDelete() {\n  if (client_) {\n    // If there is an active request it will get reset, so make sure we ignore the reset.\n    expect_reset_ = true;\n    client_->close();\n  }\n}\n\nvoid HttpHealthCheckerImpl::HttpActiveHealthCheckSession::decodeHeaders(\n    Http::ResponseHeaderMapPtr&& headers, bool end_stream) {\n  ASSERT(!response_headers_);\n  response_headers_ = std::move(headers);\n  if (end_stream) {\n    onResponseComplete();\n  }\n}\n\nvoid HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    // For the raw disconnect event, we are either between intervals in which case we already have\n    // a timer setup, or we did the close or got a reset, in which case we already setup a new\n    // timer. There is nothing to do here other than blow away the client.\n    response_headers_.reset();\n    parent_.dispatcher_.deferredDelete(std::move(client_));\n  }\n}\n\n// TODO(lilika) : Support connection pooling\nvoid HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onInterval() {\n  if (!client_) {\n    Upstream::Host::CreateConnectionData conn =\n        host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(),\n                                           parent_.transportSocketMatchMetadata().get());\n    client_.reset(parent_.createCodecClient(conn));\n    client_->addConnectionCallbacks(connection_callback_impl_);\n    expect_reset_ = false;\n  }\n\n  Http::RequestEncoder* request_encoder = &client_->newStream(*this);\n  request_encoder->getStream().addCallbacks(*this);\n\n  const auto request_headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(\n      {{Http::Headers::get().Method, \"GET\"},\n       {Http::Headers::get().Host, hostname_},\n       {Http::Headers::get().Path, parent_.path_},\n       {Http::Headers::get().UserAgent, Http::Headers::get().UserAgentValues.EnvoyHealthChecker}});\n  Router::FilterUtility::setUpstreamScheme(\n      *request_headers, host_->transportSocketFactory().implementsSecureTransport());\n  StreamInfo::StreamInfoImpl stream_info(protocol_, parent_.dispatcher_.timeSource());\n  stream_info.setDownstreamLocalAddress(local_address_);\n  stream_info.setDownstreamRemoteAddress(local_address_);\n  stream_info.onUpstreamHostSelected(host_);\n  parent_.request_headers_parser_->evaluateHeaders(*request_headers, stream_info);\n  request_encoder->encodeHeaders(*request_headers, true);\n}\n\nvoid HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onResetStream(Http::StreamResetReason,\n                                                                        absl::string_view) {\n  if (expect_reset_) {\n    return;\n  }\n\n  ENVOY_CONN_LOG(debug, \"connection/stream error health_flags={}\", *client_,\n                 HostUtility::healthFlagsToString(*host_));\n  handleFailure(envoy::data::core::v3::NETWORK);\n}\n\nHttpHealthCheckerImpl::HttpActiveHealthCheckSession::HealthCheckResult\nHttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() {\n  uint64_t response_code = Http::Utility::getResponseStatus(*response_headers_);\n  ENVOY_CONN_LOG(debug, \"hc response={} health_flags={}\", *client_, response_code,\n                 HostUtility::healthFlagsToString(*host_));\n\n  if (!parent_.http_status_checker_.inRange(response_code)) {\n    return HealthCheckResult::Failed;\n  }\n\n  const auto degraded = response_headers_->EnvoyDegraded() != nullptr;\n\n  if (parent_.service_name_matcher_.has_value() &&\n      parent_.runtime_.snapshot().featureEnabled(\"health_check.verify_cluster\", 100UL)) {\n    parent_.stats_.verify_cluster_.inc();\n    std::string service_cluster_healthchecked =\n        response_headers_->EnvoyUpstreamHealthCheckedCluster()\n            ? std::string(response_headers_->getEnvoyUpstreamHealthCheckedClusterValue())\n            : EMPTY_STRING;\n    if (parent_.service_name_matcher_->match(service_cluster_healthchecked)) {\n      return degraded ? HealthCheckResult::Degraded : HealthCheckResult::Succeeded;\n    } else {\n      return HealthCheckResult::Failed;\n    }\n  }\n\n  return degraded ? HealthCheckResult::Degraded : HealthCheckResult::Succeeded;\n}\n\nvoid HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onResponseComplete() {\n  switch (healthCheckResult()) {\n  case HealthCheckResult::Succeeded:\n    handleSuccess(false);\n    break;\n  case HealthCheckResult::Degraded:\n    handleSuccess(true);\n    break;\n  case HealthCheckResult::Failed:\n    host_->setActiveHealthFailureType(Host::ActiveHealthFailureType::UNHEALTHY);\n    handleFailure(envoy::data::core::v3::ACTIVE);\n    break;\n  }\n\n  if (shouldClose()) {\n    client_->close();\n  }\n\n  response_headers_.reset();\n}\n\n// It is possible for this session to have been deferred destroyed inline in handleFailure()\n// above so make sure we still have a connection that we might need to close.\nbool HttpHealthCheckerImpl::HttpActiveHealthCheckSession::shouldClose() const {\n  if (client_ == nullptr) {\n    return false;\n  }\n\n  if (!parent_.reuse_connection_) {\n    return true;\n  }\n\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.fixed_connection_close\")) {\n    return Http::HeaderUtility::shouldCloseConnection(client_->protocol(), *response_headers_);\n  }\n\n  if (response_headers_->Connection()) {\n    const bool close =\n        absl::EqualsIgnoreCase(response_headers_->Connection()->value().getStringView(),\n                               Http::Headers::get().ConnectionValues.Close);\n    if (close) {\n      return true;\n    }\n  }\n\n  if (response_headers_->ProxyConnection() && protocol_ < Http::Protocol::Http2) {\n    const bool close =\n        absl::EqualsIgnoreCase(response_headers_->ProxyConnection()->value().getStringView(),\n                               Http::Headers::get().ConnectionValues.Close);\n    if (close) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\nvoid HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onTimeout() {\n  if (client_) {\n    host_->setActiveHealthFailureType(Host::ActiveHealthFailureType::TIMEOUT);\n    ENVOY_CONN_LOG(debug, \"connection/stream timeout health_flags={}\", *client_,\n                   HostUtility::healthFlagsToString(*host_));\n\n    // If there is an active request it will get reset, so make sure we ignore the reset.\n    expect_reset_ = true;\n\n    client_->close();\n  }\n}\n\nHttp::CodecClient::Type\nHttpHealthCheckerImpl::codecClientType(const envoy::type::v3::CodecClientType& type) {\n  switch (type) {\n  case envoy::type::v3::HTTP3:\n    return Http::CodecClient::Type::HTTP3;\n  case envoy::type::v3::HTTP2:\n    return Http::CodecClient::Type::HTTP2;\n  case envoy::type::v3::HTTP1:\n    return Http::CodecClient::Type::HTTP1;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nHttp::CodecClient*\nProdHttpHealthCheckerImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) {\n  return new Http::CodecClientProd(codec_client_type_, std::move(data.connection_),\n                                   data.host_description_, dispatcher_, random_generator_);\n}\n\nTcpHealthCheckMatcher::MatchSegments TcpHealthCheckMatcher::loadProtoBytes(\n    const Protobuf::RepeatedPtrField<envoy::config::core::v3::HealthCheck::Payload>& byte_array) {\n  MatchSegments result;\n\n  for (const auto& entry : byte_array) {\n    const auto decoded = Hex::decode(entry.text());\n    if (decoded.empty()) {\n      throw EnvoyException(fmt::format(\"invalid hex string '{}'\", entry.text()));\n    }\n    result.push_back(decoded);\n  }\n\n  return result;\n}\n\nbool TcpHealthCheckMatcher::match(const MatchSegments& expected, const Buffer::Instance& buffer) {\n  uint64_t start_index = 0;\n  for (const std::vector<uint8_t>& segment : expected) {\n    ssize_t search_result = buffer.search(segment.data(), segment.size(), start_index);\n    if (search_result == -1) {\n      return false;\n    }\n\n    start_index = search_result + segment.size();\n  }\n\n  return true;\n}\n\nTcpHealthCheckerImpl::TcpHealthCheckerImpl(const Cluster& cluster,\n                                           const envoy::config::core::v3::HealthCheck& config,\n                                           Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                                           Random::RandomGenerator& random,\n                                           HealthCheckEventLoggerPtr&& event_logger)\n    : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)),\n      send_bytes_([&config] {\n        Protobuf::RepeatedPtrField<envoy::config::core::v3::HealthCheck::Payload> send_repeated;\n        if (!config.tcp_health_check().send().text().empty()) {\n          send_repeated.Add()->CopyFrom(config.tcp_health_check().send());\n        }\n        return TcpHealthCheckMatcher::loadProtoBytes(send_repeated);\n      }()),\n      receive_bytes_(TcpHealthCheckMatcher::loadProtoBytes(config.tcp_health_check().receive())) {}\n\nTcpHealthCheckerImpl::TcpActiveHealthCheckSession::~TcpActiveHealthCheckSession() {\n  ASSERT(client_ == nullptr);\n}\n\nvoid TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onDeferredDelete() {\n  if (client_) {\n    expect_close_ = true;\n    client_->close(Network::ConnectionCloseType::NoFlush);\n  }\n}\n\nvoid TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onData(Buffer::Instance& data) {\n  ENVOY_CONN_LOG(trace, \"total pending buffer={}\", *client_, data.length());\n  if (TcpHealthCheckMatcher::match(parent_.receive_bytes_, data)) {\n    ENVOY_CONN_LOG(trace, \"healthcheck passed\", *client_);\n    data.drain(data.length());\n    handleSuccess(false);\n    if (!parent_.reuse_connection_) {\n      expect_close_ = true;\n      client_->close(Network::ConnectionCloseType::NoFlush);\n    }\n  } else {\n    host_->setActiveHealthFailureType(Host::ActiveHealthFailureType::UNHEALTHY);\n  }\n}\n\nvoid TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    if (!expect_close_) {\n      handleFailure(envoy::data::core::v3::NETWORK);\n    }\n    parent_.dispatcher_.deferredDelete(std::move(client_));\n  }\n\n  if (event == Network::ConnectionEvent::Connected && parent_.receive_bytes_.empty()) {\n    // In this case we are just testing that we can connect, so immediately succeed. Also, since\n    // we are just doing a connection test, close the connection.\n    // NOTE(mattklein123): I've seen cases where the kernel will report a successful connection, and\n    // then proceed to fail subsequent calls (so the connection did not actually succeed). I'm not\n    // sure what situations cause this. If this turns into a problem, we may need to introduce a\n    // timer and see if the connection stays alive for some period of time while waiting to read.\n    // (Though we may never get a FIN and won't know until if/when we try to write). In short, this\n    // may need to get more complicated but we can start here.\n    // TODO(mattklein123): If we had a way on the connection interface to do an immediate read (vs.\n    // evented), that would be a good check to run here to make sure it returns the equivalent of\n    // EAGAIN. Need to think through how that would look from an interface perspective.\n    // TODO(mattklein123): In the case that a user configured bytes to write, they will not be\n    // be written, since we currently have no way to know if the bytes actually get written via\n    // the connection interface. We might want to figure out how to handle this better later.\n    expect_close_ = true;\n    client_->close(Network::ConnectionCloseType::NoFlush);\n    handleSuccess(false);\n  }\n}\n\n// TODO(lilika) : Support connection pooling\nvoid TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() {\n  if (!client_) {\n    client_ =\n        host_\n            ->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(),\n                                          parent_.transportSocketMatchMetadata().get())\n            .connection_;\n    session_callbacks_ = std::make_shared<TcpSessionCallbacks>(*this);\n    client_->addConnectionCallbacks(*session_callbacks_);\n    client_->addReadFilter(session_callbacks_);\n\n    expect_close_ = false;\n    client_->connect();\n    client_->noDelay(true);\n  }\n\n  if (!parent_.send_bytes_.empty()) {\n    Buffer::OwnedImpl data;\n    for (const std::vector<uint8_t>& segment : parent_.send_bytes_) {\n      data.add(segment.data(), segment.size());\n    }\n\n    client_->write(data, false);\n  }\n}\n\nvoid TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onTimeout() {\n  expect_close_ = true;\n  host_->setActiveHealthFailureType(Host::ActiveHealthFailureType::TIMEOUT);\n  client_->close(Network::ConnectionCloseType::NoFlush);\n}\n\nGrpcHealthCheckerImpl::GrpcHealthCheckerImpl(const Cluster& cluster,\n                                             const envoy::config::core::v3::HealthCheck& config,\n                                             Event::Dispatcher& dispatcher,\n                                             Runtime::Loader& runtime,\n                                             Random::RandomGenerator& random,\n                                             HealthCheckEventLoggerPtr&& event_logger)\n    : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)),\n      random_generator_(random),\n      service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n          \"grpc.health.v1.Health.Check\")) {\n  if (!config.grpc_health_check().service_name().empty()) {\n    service_name_ = config.grpc_health_check().service_name();\n  }\n\n  if (!config.grpc_health_check().authority().empty()) {\n    authority_value_ = config.grpc_health_check().authority();\n  }\n}\n\nGrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::GrpcActiveHealthCheckSession(\n    GrpcHealthCheckerImpl& parent, const HostSharedPtr& host)\n    : ActiveHealthCheckSession(parent, host), parent_(parent) {}\n\nGrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::~GrpcActiveHealthCheckSession() {\n  ASSERT(client_ == nullptr);\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onDeferredDelete() {\n  if (client_) {\n    // If there is an active request it will get reset, so make sure we ignore the reset.\n    expect_reset_ = true;\n    client_->close();\n  }\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeHeaders(\n    Http::ResponseHeaderMapPtr&& headers, bool end_stream) {\n  const auto http_response_status = Http::Utility::getResponseStatus(*headers);\n  if (http_response_status != enumToInt(Http::Code::OK)) {\n    // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md requires that\n    // grpc-status be used if available.\n    if (end_stream) {\n      const auto grpc_status = Grpc::Common::getGrpcStatus(*headers);\n      if (grpc_status) {\n        onRpcComplete(grpc_status.value(), Grpc::Common::getGrpcMessage(*headers), true);\n        return;\n      }\n    }\n    onRpcComplete(Grpc::Utility::httpToGrpcStatus(http_response_status), \"non-200 HTTP response\",\n                  end_stream);\n    return;\n  }\n  if (!Grpc::Common::isGrpcResponseHeaders(*headers, end_stream)) {\n    onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, \"not a gRPC request\", false);\n    return;\n  }\n  if (end_stream) {\n    // This is how, for instance, grpc-go signals about missing service - HTTP/2 200 OK with\n    // 'unimplemented' gRPC status.\n    const auto grpc_status = Grpc::Common::getGrpcStatus(*headers);\n    if (grpc_status) {\n      onRpcComplete(grpc_status.value(), Grpc::Common::getGrpcMessage(*headers), true);\n      return;\n    }\n    onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal,\n                  \"gRPC protocol violation: unexpected stream end\", true);\n  }\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeData(Buffer::Instance& data,\n                                                                     bool end_stream) {\n  if (end_stream) {\n    onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal,\n                  \"gRPC protocol violation: unexpected stream end\", true);\n    return;\n  }\n\n  // We should end up with only one frame here.\n  std::vector<Grpc::Frame> decoded_frames;\n  if (!decoder_.decode(data, decoded_frames)) {\n    onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, \"gRPC wire protocol decode error\",\n                  false);\n  }\n  for (auto& frame : decoded_frames) {\n    if (frame.length_ > 0) {\n      if (health_check_response_) {\n        // grpc.health.v1.Health.Check is unary RPC, so only one message is allowed.\n        onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, \"unexpected streaming\", false);\n        return;\n      }\n      health_check_response_ = std::make_unique<grpc::health::v1::HealthCheckResponse>();\n      Buffer::ZeroCopyInputStreamImpl stream(std::move(frame.data_));\n\n      if (frame.flags_ != Grpc::GRPC_FH_DEFAULT ||\n          !health_check_response_->ParseFromZeroCopyStream(&stream)) {\n        onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal,\n                      \"invalid grpc.health.v1 RPC payload\", false);\n        return;\n      }\n    }\n  }\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeTrailers(\n    Http::ResponseTrailerMapPtr&& trailers) {\n  auto maybe_grpc_status = Grpc::Common::getGrpcStatus(*trailers);\n  auto grpc_status =\n      maybe_grpc_status\n          ? maybe_grpc_status.value()\n          : static_cast<Grpc::Status::GrpcStatus>(Grpc::Status::WellKnownGrpcStatus::Internal);\n  const std::string grpc_message =\n      maybe_grpc_status ? Grpc::Common::getGrpcMessage(*trailers) : \"invalid gRPC status\";\n  onRpcComplete(grpc_status, grpc_message, true);\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    // For the raw disconnect event, we are either between intervals in which case we already have\n    // a timer setup, or we did the close or got a reset, in which case we already setup a new\n    // timer. There is nothing to do here other than blow away the client.\n    parent_.dispatcher_.deferredDelete(std::move(client_));\n  }\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() {\n  if (!client_) {\n    Upstream::Host::CreateConnectionData conn =\n        host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(),\n                                           parent_.transportSocketMatchMetadata().get());\n    client_ = parent_.createCodecClient(conn);\n    client_->addConnectionCallbacks(connection_callback_impl_);\n    client_->setCodecConnectionCallbacks(http_connection_callback_impl_);\n  }\n\n  request_encoder_ = &client_->newStream(*this);\n  request_encoder_->getStream().addCallbacks(*this);\n\n  const std::string& authority =\n      getHostname(host_, parent_.authority_value_, parent_.cluster_.info());\n  auto headers_message =\n      Grpc::Common::prepareHeaders(authority, parent_.service_method_.service()->full_name(),\n                                   parent_.service_method_.name(), absl::nullopt);\n  headers_message->headers().setReferenceUserAgent(\n      Http::Headers::get().UserAgentValues.EnvoyHealthChecker);\n\n  Grpc::Common::toGrpcTimeout(parent_.timeout_, headers_message->headers());\n\n  Router::FilterUtility::setUpstreamScheme(\n      headers_message->headers(), host_->transportSocketFactory().implementsSecureTransport());\n\n  request_encoder_->encodeHeaders(headers_message->headers(), false);\n\n  grpc::health::v1::HealthCheckRequest request;\n  if (parent_.service_name_.has_value()) {\n    request.set_service(parent_.service_name_.value());\n  }\n\n  request_encoder_->encodeData(*Grpc::Common::serializeToGrpcFrame(request), true);\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::StreamResetReason,\n                                                                        absl::string_view) {\n  const bool expected_reset = expect_reset_;\n  const bool goaway = received_no_error_goaway_;\n  resetState();\n\n  if (expected_reset) {\n    // Stream reset was initiated by us (bogus gRPC response, timeout or cluster host is going\n    // away). In these cases health check failure has already been reported and a GOAWAY (if any)\n    // has already been handled, so just return.\n    return;\n  }\n\n  ENVOY_CONN_LOG(debug, \"connection/stream error health_flags={}\", *client_,\n                 HostUtility::healthFlagsToString(*host_));\n\n  if (goaway || !parent_.reuse_connection_) {\n    // Stream reset was unexpected, so we haven't closed the connection\n    // yet in response to a GOAWAY or due to disabled connection reuse.\n    client_->close();\n  }\n\n  // TODO(baranov1ch): according to all HTTP standards, we should check if reason is one of\n  // Http::StreamResetReason::RemoteRefusedStreamReset (which may mean GOAWAY),\n  // Http::StreamResetReason::RemoteReset or Http::StreamResetReason::ConnectionTermination (both\n  // mean connection close), check if connection is not fresh (was used for at least 1 request)\n  // and silently retry request on the fresh connection. This is also true for HTTP/1.1 healthcheck.\n  handleFailure(envoy::data::core::v3::NETWORK);\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onGoAway(\n    Http::GoAwayErrorCode error_code) {\n  ENVOY_CONN_LOG(debug, \"connection going away health_flags={}\", *client_,\n                 HostUtility::healthFlagsToString(*host_));\n  // If we have an active health check probe and receive a GOAWAY indicating\n  // graceful shutdown, allow the probe to complete before closing the connection.\n  // The connection will be closed when the active check completes or another\n  // terminal condition occurs, such as a timeout or stream reset.\n  if (request_encoder_ && error_code == Http::GoAwayErrorCode::NoError) {\n    received_no_error_goaway_ = true;\n    return;\n  }\n\n  // Even if we have active health check probe, fail it on GOAWAY and schedule new one.\n  if (request_encoder_) {\n    handleFailure(envoy::data::core::v3::NETWORK);\n    expect_reset_ = true;\n    request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset);\n  }\n  client_->close();\n}\n\nbool GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::isHealthCheckSucceeded(\n    Grpc::Status::GrpcStatus grpc_status) const {\n  if (grpc_status != Grpc::Status::WellKnownGrpcStatus::Ok) {\n    return false;\n  }\n\n  if (!health_check_response_ ||\n      health_check_response_->status() != grpc::health::v1::HealthCheckResponse::SERVING) {\n    return false;\n  }\n\n  return true;\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onRpcComplete(\n    Grpc::Status::GrpcStatus grpc_status, const std::string& grpc_message, bool end_stream) {\n  logHealthCheckStatus(grpc_status, grpc_message);\n  if (isHealthCheckSucceeded(grpc_status)) {\n    handleSuccess(false);\n  } else {\n    handleFailure(envoy::data::core::v3::ACTIVE);\n  }\n\n  // Read the value as we may call resetState() and clear it.\n  const bool goaway = received_no_error_goaway_;\n\n  // |end_stream| will be false if we decided to stop healthcheck before HTTP stream has ended -\n  // invalid gRPC payload, unexpected message stream or wrong content-type.\n  if (end_stream) {\n    resetState();\n  } else {\n    // resetState() will be called by onResetStream().\n    expect_reset_ = true;\n    request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset);\n  }\n\n  if (!parent_.reuse_connection_ || goaway) {\n    client_->close();\n  }\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::resetState() {\n  expect_reset_ = false;\n  request_encoder_ = nullptr;\n  decoder_ = Grpc::Decoder();\n  health_check_response_.reset();\n  received_no_error_goaway_ = false;\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onTimeout() {\n  ENVOY_CONN_LOG(debug, \"connection/stream timeout health_flags={}\", *client_,\n                 HostUtility::healthFlagsToString(*host_));\n  expect_reset_ = true;\n  if (received_no_error_goaway_ || !parent_.reuse_connection_) {\n    client_->close();\n  } else {\n    request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset);\n  }\n}\n\nvoid GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::logHealthCheckStatus(\n    Grpc::Status::GrpcStatus grpc_status, const std::string& grpc_message) {\n  const char* service_status;\n  if (!health_check_response_) {\n    service_status = \"rpc_error\";\n  } else {\n    switch (health_check_response_->status()) {\n    case grpc::health::v1::HealthCheckResponse::SERVING:\n      service_status = \"serving\";\n      break;\n    case grpc::health::v1::HealthCheckResponse::NOT_SERVING:\n      service_status = \"not_serving\";\n      break;\n    case grpc::health::v1::HealthCheckResponse::UNKNOWN:\n      service_status = \"unknown\";\n      break;\n    case grpc::health::v1::HealthCheckResponse::SERVICE_UNKNOWN:\n      service_status = \"service_unknown\";\n      break;\n    default:\n      service_status = \"unknown_healthcheck_response\";\n      break;\n    }\n  }\n  std::string grpc_status_message;\n  if (grpc_status != Grpc::Status::WellKnownGrpcStatus::Ok && !grpc_message.empty()) {\n    grpc_status_message = fmt::format(\"{} ({})\", grpc_status, grpc_message);\n  } else {\n    grpc_status_message = absl::StrCat(\"\", grpc_status);\n  }\n\n  ENVOY_CONN_LOG(debug, \"hc grpc_status={} service_status={} health_flags={}\", *client_,\n                 grpc_status_message, service_status, HostUtility::healthFlagsToString(*host_));\n}\n\nHttp::CodecClientPtr\nProdGrpcHealthCheckerImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) {\n  return std::make_unique<Http::CodecClientProd>(\n      Http::CodecClient::Type::HTTP2, std::move(data.connection_), data.host_description_,\n      dispatcher_, random_generator_);\n}\n\nstd::ostream& operator<<(std::ostream& out, HealthState state) {\n  switch (state) {\n  case HealthState::Unhealthy:\n    out << \"Unhealthy\";\n    break;\n  case HealthState::Healthy:\n    out << \"Healthy\";\n    break;\n  }\n  return out;\n}\n\nstd::ostream& operator<<(std::ostream& out, HealthTransition changed_state) {\n  switch (changed_state) {\n  case HealthTransition::Unchanged:\n    out << \"Unchanged\";\n    break;\n  case HealthTransition::Changed:\n    out << \"Changed\";\n    break;\n  case HealthTransition::ChangePending:\n    out << \"ChangePending\";\n    break;\n  }\n  return out;\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/health_checker_impl.h",
    "content": "#pragma once\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/type/v3/http.pb.h\"\n#include \"envoy/type/v3/range.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/http/codec_client.h\"\n#include \"common/router/header_parser.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n#include \"common/upstream/health_checker_base_impl.h\"\n\n#include \"src/proto/grpc/health/v1/health.pb.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Factory for creating health checker implementations.\n */\nclass HealthCheckerFactory : public Logger::Loggable<Logger::Id::health_checker> {\npublic:\n  /**\n   * Create a health checker.\n   * @param health_check_config supplies the health check proto.\n   * @param cluster supplies the owning cluster.\n   * @param runtime supplies the runtime loader.\n   * @param dispatcher supplies the dispatcher.\n   * @param log_manager supplies the log_manager.\n   * @param validation_visitor message validation visitor instance.\n   * @param api reference to the Api object\n   * @return a health checker.\n   */\n  static HealthCheckerSharedPtr\n  create(const envoy::config::core::v3::HealthCheck& health_check_config,\n         Upstream::Cluster& cluster, Runtime::Loader& runtime, Event::Dispatcher& dispatcher,\n         AccessLog::AccessLogManager& log_manager,\n         ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api);\n};\n\n/**\n * HTTP health checker implementation. Connection keep alive is used where possible.\n */\nclass HttpHealthCheckerImpl : public HealthCheckerImplBase {\npublic:\n  HttpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config,\n                        Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                        Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger);\n\n  /**\n   * Utility class checking if given http status matches configured expectations.\n   */\n  class HttpStatusChecker {\n  public:\n    HttpStatusChecker(\n        const Protobuf::RepeatedPtrField<envoy::type::v3::Int64Range>& expected_statuses,\n        uint64_t default_expected_status);\n\n    bool inRange(uint64_t http_status) const;\n\n  private:\n    std::vector<std::pair<uint64_t, uint64_t>> ranges_;\n  };\n\nprivate:\n  struct HttpActiveHealthCheckSession : public ActiveHealthCheckSession,\n                                        public Http::ResponseDecoder,\n                                        public Http::StreamCallbacks {\n    HttpActiveHealthCheckSession(HttpHealthCheckerImpl& parent, const HostSharedPtr& host);\n    ~HttpActiveHealthCheckSession() override;\n\n    void onResponseComplete();\n    enum class HealthCheckResult { Succeeded, Degraded, Failed };\n    HealthCheckResult healthCheckResult();\n    bool shouldClose() const;\n\n    // ActiveHealthCheckSession\n    void onInterval() override;\n    void onTimeout() override;\n    void onDeferredDelete() final;\n\n    // Http::StreamDecoder\n    void decodeData(Buffer::Instance&, bool end_stream) override {\n      if (end_stream) {\n        onResponseComplete();\n      }\n    }\n    void decodeMetadata(Http::MetadataMapPtr&&) override {}\n\n    // Http::ResponseDecoder\n    void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&&) override {}\n    void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override;\n    void decodeTrailers(Http::ResponseTrailerMapPtr&&) override { onResponseComplete(); }\n\n    // Http::StreamCallbacks\n    void onResetStream(Http::StreamResetReason reason,\n                       absl::string_view transport_failure_reason) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    void onEvent(Network::ConnectionEvent event);\n\n    class ConnectionCallbackImpl : public Network::ConnectionCallbacks {\n    public:\n      ConnectionCallbackImpl(HttpActiveHealthCheckSession& parent) : parent_(parent) {}\n      // Network::ConnectionCallbacks\n      void onEvent(Network::ConnectionEvent event) override { parent_.onEvent(event); }\n      void onAboveWriteBufferHighWatermark() override {}\n      void onBelowWriteBufferLowWatermark() override {}\n\n    private:\n      HttpActiveHealthCheckSession& parent_;\n    };\n\n    ConnectionCallbackImpl connection_callback_impl_{*this};\n    HttpHealthCheckerImpl& parent_;\n    Http::CodecClientPtr client_;\n    Http::ResponseHeaderMapPtr response_headers_;\n    const std::string& hostname_;\n    const Http::Protocol protocol_;\n    Network::Address::InstanceConstSharedPtr local_address_;\n    bool expect_reset_{};\n  };\n\n  using HttpActiveHealthCheckSessionPtr = std::unique_ptr<HttpActiveHealthCheckSession>;\n\n  virtual Http::CodecClient* createCodecClient(Upstream::Host::CreateConnectionData& data) PURE;\n\n  // HealthCheckerImplBase\n  ActiveHealthCheckSessionPtr makeSession(HostSharedPtr host) override {\n    return std::make_unique<HttpActiveHealthCheckSession>(*this, host);\n  }\n  envoy::data::core::v3::HealthCheckerType healthCheckerType() const override {\n    return envoy::data::core::v3::HTTP;\n  }\n\n  Http::CodecClient::Type codecClientType(const envoy::type::v3::CodecClientType& type);\n\n  const std::string path_;\n  const std::string host_value_;\n  absl::optional<Matchers::StringMatcherImpl> service_name_matcher_;\n  Router::HeaderParserPtr request_headers_parser_;\n  const HttpStatusChecker http_status_checker_;\n\nprotected:\n  const Http::CodecClient::Type codec_client_type_;\n  Random::RandomGenerator& random_generator_;\n};\n\n/**\n * Production implementation of the HTTP health checker that allocates a real codec client.\n */\nclass ProdHttpHealthCheckerImpl : public HttpHealthCheckerImpl {\npublic:\n  using HttpHealthCheckerImpl::HttpHealthCheckerImpl;\n\n  // HttpHealthCheckerImpl\n  Http::CodecClient* createCodecClient(Upstream::Host::CreateConnectionData& data) override;\n};\n\n/**\n * Utility class for loading a binary health checking config and matching it against a buffer.\n * Split out for ease of testing. The type of matching performed is the following (this is the\n * MongoDB health check request and response):\n *\n * \"send\": [\n    {\"binary\": \"39000000\"},\n    {\"binary\": \"EEEEEEEE\"},\n    {\"binary\": \"00000000\"},\n    {\"binary\": \"d4070000\"},\n    {\"binary\": \"00000000\"},\n    {\"binary\": \"746573742e\"},\n    {\"binary\": \"24636d6400\"},\n    {\"binary\": \"00000000\"},\n    {\"binary\": \"FFFFFFFF\"},\n\n    {\"binary\": \"13000000\"},\n    {\"binary\": \"01\"},\n    {\"binary\": \"70696e6700\"},\n    {\"binary\": \"000000000000f03f\"},\n    {\"binary\": \"00\"}\n   ],\n   \"receive\": [\n    {\"binary\": \"EEEEEEEE\"},\n    {\"binary\": \"01000000\"},\n    {\"binary\": \"00000000\"},\n    {\"binary\": \"0000000000000000\"},\n    {\"binary\": \"00000000\"},\n    {\"binary\": \"11000000\"},\n    {\"binary\": \"01\"},\n    {\"binary\": \"6f6b\"},\n    {\"binary\": \"00000000000000f03f\"},\n    {\"binary\": \"00\"}\n   ]\n *\n * During each health check cycle, all of the \"send\" bytes are sent to the target server. Each\n * binary block can be of arbitrary length and is just concatenated together when sent.\n *\n * On the receive side, \"fuzzy\" matching is performed such that each binary block must be found,\n * and in the order specified, but not necessary contiguous. Thus, in the example above,\n * \"FFFFFFFF\" could be inserted in the response between \"EEEEEEEE\" and \"01000000\" and the check\n * would still pass.\n */\nclass TcpHealthCheckMatcher {\npublic:\n  using MatchSegments = std::list<std::vector<uint8_t>>;\n\n  static MatchSegments loadProtoBytes(\n      const Protobuf::RepeatedPtrField<envoy::config::core::v3::HealthCheck::Payload>& byte_array);\n  static bool match(const MatchSegments& expected, const Buffer::Instance& buffer);\n};\n\n/**\n * TCP health checker implementation.\n */\nclass TcpHealthCheckerImpl : public HealthCheckerImplBase {\npublic:\n  TcpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config,\n                       Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                       Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger);\n\nprivate:\n  struct TcpActiveHealthCheckSession;\n\n  struct TcpSessionCallbacks : public Network::ConnectionCallbacks,\n                               public Network::ReadFilterBaseImpl {\n    TcpSessionCallbacks(TcpActiveHealthCheckSession& parent) : parent_(parent) {}\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override { parent_.onEvent(event); }\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n      parent_.onData(data);\n      return Network::FilterStatus::StopIteration;\n    }\n\n    TcpActiveHealthCheckSession& parent_;\n  };\n\n  struct TcpActiveHealthCheckSession : public ActiveHealthCheckSession {\n    TcpActiveHealthCheckSession(TcpHealthCheckerImpl& parent, const HostSharedPtr& host)\n        : ActiveHealthCheckSession(parent, host), parent_(parent) {}\n    ~TcpActiveHealthCheckSession() override;\n\n    void onData(Buffer::Instance& data);\n    void onEvent(Network::ConnectionEvent event);\n\n    // ActiveHealthCheckSession\n    void onInterval() override;\n    void onTimeout() override;\n    void onDeferredDelete() final;\n\n    TcpHealthCheckerImpl& parent_;\n    Network::ClientConnectionPtr client_;\n    std::shared_ptr<TcpSessionCallbacks> session_callbacks_;\n    // If true, stream close was initiated by us, not e.g. remote close or TCP reset.\n    // In this case healthcheck status already reported, only state cleanup required.\n    bool expect_close_{};\n  };\n\n  using TcpActiveHealthCheckSessionPtr = std::unique_ptr<TcpActiveHealthCheckSession>;\n\n  // HealthCheckerImplBase\n  ActiveHealthCheckSessionPtr makeSession(HostSharedPtr host) override {\n    return std::make_unique<TcpActiveHealthCheckSession>(*this, host);\n  }\n  envoy::data::core::v3::HealthCheckerType healthCheckerType() const override {\n    return envoy::data::core::v3::TCP;\n  }\n\n  const TcpHealthCheckMatcher::MatchSegments send_bytes_;\n  const TcpHealthCheckMatcher::MatchSegments receive_bytes_;\n};\n\n/**\n * gRPC health checker implementation.\n */\nclass GrpcHealthCheckerImpl : public HealthCheckerImplBase {\npublic:\n  GrpcHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config,\n                        Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                        Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger);\n\nprivate:\n  struct GrpcActiveHealthCheckSession : public ActiveHealthCheckSession,\n                                        public Http::ResponseDecoder,\n                                        public Http::StreamCallbacks {\n    GrpcActiveHealthCheckSession(GrpcHealthCheckerImpl& parent, const HostSharedPtr& host);\n    ~GrpcActiveHealthCheckSession() override;\n\n    void onRpcComplete(Grpc::Status::GrpcStatus grpc_status, const std::string& grpc_message,\n                       bool end_stream);\n    bool isHealthCheckSucceeded(Grpc::Status::GrpcStatus grpc_status) const;\n    void resetState();\n    void logHealthCheckStatus(Grpc::Status::GrpcStatus grpc_status,\n                              const std::string& grpc_message);\n\n    // ActiveHealthCheckSession\n    void onInterval() override;\n    void onTimeout() override;\n    void onDeferredDelete() final;\n\n    // Http::StreamDecoder\n    void decodeData(Buffer::Instance&, bool end_stream) override;\n    void decodeMetadata(Http::MetadataMapPtr&&) override {}\n\n    // Http::ResponseDecoder\n    void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&&) override {}\n    void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override;\n    void decodeTrailers(Http::ResponseTrailerMapPtr&&) override;\n\n    // Http::StreamCallbacks\n    void onResetStream(Http::StreamResetReason reason,\n                       absl::string_view transport_failure_reason) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    void onEvent(Network::ConnectionEvent event);\n    void onGoAway(Http::GoAwayErrorCode error_code);\n\n    class ConnectionCallbackImpl : public Network::ConnectionCallbacks {\n    public:\n      ConnectionCallbackImpl(GrpcActiveHealthCheckSession& parent) : parent_(parent) {}\n      // Network::ConnectionCallbacks\n      void onEvent(Network::ConnectionEvent event) override { parent_.onEvent(event); }\n      void onAboveWriteBufferHighWatermark() override {}\n      void onBelowWriteBufferLowWatermark() override {}\n\n    private:\n      GrpcActiveHealthCheckSession& parent_;\n    };\n\n    class HttpConnectionCallbackImpl : public Http::ConnectionCallbacks {\n    public:\n      HttpConnectionCallbackImpl(GrpcActiveHealthCheckSession& parent) : parent_(parent) {}\n      // Http::ConnectionCallbacks\n      void onGoAway(Http::GoAwayErrorCode error_code) override { parent_.onGoAway(error_code); }\n\n    private:\n      GrpcActiveHealthCheckSession& parent_;\n    };\n\n    ConnectionCallbackImpl connection_callback_impl_{*this};\n    HttpConnectionCallbackImpl http_connection_callback_impl_{*this};\n    GrpcHealthCheckerImpl& parent_;\n    Http::CodecClientPtr client_;\n    Http::RequestEncoder* request_encoder_;\n    Grpc::Decoder decoder_;\n    std::unique_ptr<grpc::health::v1::HealthCheckResponse> health_check_response_;\n    // If true, stream reset was initiated by us (GrpcActiveHealthCheckSession), not by HTTP stack,\n    // e.g. remote reset. In this case healthcheck status has already been reported, only state\n    // cleanup is required.\n    bool expect_reset_ = false;\n    // If true, we received a GOAWAY (NO_ERROR code) and are deferring closing the connection\n    // until the active probe completes.\n    bool received_no_error_goaway_ = false;\n  };\n\n  virtual Http::CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE;\n\n  // HealthCheckerImplBase\n  ActiveHealthCheckSessionPtr makeSession(HostSharedPtr host) override {\n    return std::make_unique<GrpcActiveHealthCheckSession>(*this, host);\n  }\n  envoy::data::core::v3::HealthCheckerType healthCheckerType() const override {\n    return envoy::data::core::v3::GRPC;\n  }\n\nprotected:\n  Random::RandomGenerator& random_generator_;\n\nprivate:\n  const Protobuf::MethodDescriptor& service_method_;\n  absl::optional<std::string> service_name_;\n  absl::optional<std::string> authority_value_;\n};\n\n/**\n * Production implementation of the gRPC health checker that allocates a real codec client.\n */\nclass ProdGrpcHealthCheckerImpl : public GrpcHealthCheckerImpl {\npublic:\n  using GrpcHealthCheckerImpl::GrpcHealthCheckerImpl;\n\n  // GrpcHealthCheckerImpl\n  Http::CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/health_discovery_service.cc",
    "content": "#include \"common/upstream/health_discovery_service.h\"\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/service/health/v3/hds.pb.h\"\n#include \"envoy/service/health/v3/hds.pb.validate.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * TODO(lilika): Add API knob for RetryInitialDelayMilliseconds\n * and RetryMaxDelayMilliseconds, instead of hardcoding them.\n *\n * Parameters of the jittered backoff strategy that defines how often\n * we retry to establish a stream to the management server\n */\nstatic constexpr uint32_t RetryInitialDelayMilliseconds = 1000;\nstatic constexpr uint32_t RetryMaxDelayMilliseconds = 30000;\n\nHdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_client,\n                         envoy::config::core::v3::ApiVersion transport_api_version,\n                         Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                         Envoy::Stats::Store& stats, Ssl::ContextManager& ssl_context_manager,\n                         ClusterInfoFactory& info_factory,\n                         AccessLog::AccessLogManager& access_log_manager, ClusterManager& cm,\n                         const LocalInfo::LocalInfo& local_info, Server::Admin& admin,\n                         Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n                         ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api)\n    : stats_{ALL_HDS_STATS(POOL_COUNTER_PREFIX(scope, \"hds_delegate.\"))},\n      service_method_(Grpc::VersionedMethods(\n                          \"envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck\",\n                          \"envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck\")\n                          .getMethodDescriptorForVersion(transport_api_version)),\n      async_client_(std::move(async_client)), transport_api_version_(transport_api_version),\n      dispatcher_(dispatcher), runtime_(runtime), store_stats_(stats),\n      ssl_context_manager_(ssl_context_manager), info_factory_(info_factory),\n      access_log_manager_(access_log_manager), cm_(cm), local_info_(local_info), admin_(admin),\n      singleton_manager_(singleton_manager), tls_(tls), validation_visitor_(validation_visitor),\n      api_(api) {\n  health_check_request_.mutable_health_check_request()->mutable_node()->MergeFrom(\n      local_info_.node());\n  backoff_strategy_ = std::make_unique<JitteredExponentialBackOffStrategy>(\n      RetryInitialDelayMilliseconds, RetryMaxDelayMilliseconds, api_.randomGenerator());\n  hds_retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); });\n  hds_stream_response_timer_ = dispatcher.createTimer([this]() -> void { sendResponse(); });\n\n  // TODO(lilika): Add support for other types of healthchecks\n  health_check_request_.mutable_health_check_request()\n      ->mutable_capability()\n      ->add_health_check_protocols(envoy::service::health::v3::Capability::HTTP);\n  health_check_request_.mutable_health_check_request()\n      ->mutable_capability()\n      ->add_health_check_protocols(envoy::service::health::v3::Capability::TCP);\n\n  establishNewStream();\n}\n\nvoid HdsDelegate::setHdsRetryTimer() {\n  const auto retry_ms = std::chrono::milliseconds(backoff_strategy_->nextBackOffMs());\n  ENVOY_LOG(warn, \"HdsDelegate stream/connection failure, will retry in {} ms.\", retry_ms.count());\n\n  hds_retry_timer_->enableTimer(retry_ms);\n}\n\nvoid HdsDelegate::setHdsStreamResponseTimer() {\n  hds_stream_response_timer_->enableTimer(std::chrono::milliseconds(server_response_ms_));\n}\n\nvoid HdsDelegate::establishNewStream() {\n  ENVOY_LOG(debug, \"Establishing new gRPC bidi stream for {}\", service_method_.DebugString());\n  stream_ = async_client_->start(service_method_, *this, Http::AsyncClient::StreamOptions());\n  if (stream_ == nullptr) {\n    ENVOY_LOG(warn, \"Unable to establish new stream\");\n    handleFailure();\n    return;\n  }\n\n  Config::VersionConverter::prepareMessageForGrpcWire(health_check_request_,\n                                                      transport_api_version_);\n  ENVOY_LOG(debug, \"Sending HealthCheckRequest {} \", health_check_request_.DebugString());\n  stream_->sendMessage(health_check_request_, false);\n  stats_.responses_.inc();\n  backoff_strategy_->reset();\n}\n\nvoid HdsDelegate::handleFailure() {\n  stats_.errors_.inc();\n  setHdsRetryTimer();\n}\n\n// TODO(lilika): Add support for the same endpoint in different clusters/ports\nenvoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse HdsDelegate::sendResponse() {\n  envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse response;\n\n  for (const auto& cluster : hds_clusters_) {\n    // Add cluster health response and set name.\n    auto* cluster_health =\n        response.mutable_endpoint_health_response()->add_cluster_endpoints_health();\n    cluster_health->set_cluster_name(cluster->info()->name());\n\n    // Iterate through all hosts in our priority set.\n    for (const auto& hosts : cluster->prioritySet().hostSetsPerPriority()) {\n      // Get a grouping of hosts by locality.\n      for (const auto& locality_hosts : hosts->hostsPerLocality().get()) {\n        // For this locality, add the response grouping.\n        envoy::service::health::v3::LocalityEndpointsHealth* locality_health =\n            cluster_health->add_locality_endpoints_health();\n        locality_health->mutable_locality()->MergeFrom(locality_hosts[0]->locality());\n\n        // Add all hosts to this locality.\n        for (const auto& host : locality_hosts) {\n          // Add this endpoint's health status to this locality grouping.\n          auto* endpoint = locality_health->add_endpoints_health();\n          Network::Utility::addressToProtobufAddress(\n              *host->address(), *endpoint->mutable_endpoint()->mutable_address());\n          // TODO(lilika): Add support for more granular options of\n          // envoy::config::core::v3::HealthStatus\n          if (host->health() == Host::Health::Healthy) {\n            endpoint->set_health_status(envoy::config::core::v3::HEALTHY);\n          } else {\n            switch (host->getActiveHealthFailureType()) {\n            case Host::ActiveHealthFailureType::TIMEOUT:\n              endpoint->set_health_status(envoy::config::core::v3::TIMEOUT);\n              break;\n            case Host::ActiveHealthFailureType::UNHEALTHY:\n            case Host::ActiveHealthFailureType::UNKNOWN:\n              endpoint->set_health_status(envoy::config::core::v3::UNHEALTHY);\n              break;\n            default:\n              NOT_REACHED_GCOVR_EXCL_LINE;\n              break;\n            }\n          }\n\n          // TODO(drewsortega): remove this once we are on v4 and endpoint_health_response is\n          // removed. Copy this endpoint's health info to the legacy flat-list.\n          response.mutable_endpoint_health_response()->add_endpoints_health()->MergeFrom(*endpoint);\n        }\n      }\n    }\n  }\n  ENVOY_LOG(debug, \"Sending EndpointHealthResponse to server {}\", response.DebugString());\n  stream_->sendMessage(response, false);\n  stats_.responses_.inc();\n  setHdsStreamResponseTimer();\n  return response;\n}\n\nvoid HdsDelegate::onCreateInitialMetadata(Http::RequestHeaderMap& metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n}\n\nvoid HdsDelegate::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n}\n\nvoid HdsDelegate::processMessage(\n    std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier>&& message) {\n  ENVOY_LOG(debug, \"New health check response message {} \", message->DebugString());\n  ASSERT(message);\n\n  for (const auto& cluster_health_check : message->cluster_health_checks()) {\n    // Create HdsCluster config\n    static const envoy::config::core::v3::BindConfig bind_config;\n    envoy::config::cluster::v3::Cluster cluster_config;\n\n    cluster_config.set_name(cluster_health_check.cluster_name());\n    cluster_config.mutable_connect_timeout()->set_seconds(ClusterTimeoutSeconds);\n    cluster_config.mutable_per_connection_buffer_limit_bytes()->set_value(\n        ClusterConnectionBufferLimitBytes);\n\n    // Add endpoints to cluster\n    for (const auto& locality_endpoints : cluster_health_check.locality_endpoints()) {\n      // add endpoint group by locality to config\n      auto* endpoints = cluster_config.mutable_load_assignment()->add_endpoints();\n      // if this group contains locality information, save it.\n      if (locality_endpoints.has_locality()) {\n        endpoints->mutable_locality()->MergeFrom(locality_endpoints.locality());\n      }\n\n      // add all endpoints for this locality group to the config\n      for (const auto& endpoint : locality_endpoints.endpoints()) {\n        endpoints->add_lb_endpoints()->mutable_endpoint()->mutable_address()->MergeFrom(\n            endpoint.address());\n      }\n    }\n\n    // TODO(lilika): Add support for optional per-endpoint health checks\n\n    // Add healthchecks to cluster\n    for (auto& health_check : cluster_health_check.health_checks()) {\n      cluster_config.add_health_checks()->MergeFrom(health_check);\n    }\n\n    // Add transport_socket_match to cluster for use in host connections.\n    cluster_config.mutable_transport_socket_matches()->MergeFrom(\n        cluster_health_check.transport_socket_matches());\n\n    ENVOY_LOG(debug, \"New HdsCluster config {} \", cluster_config.DebugString());\n\n    // Create HdsCluster\n    hds_clusters_.emplace_back(new HdsCluster(admin_, runtime_, std::move(cluster_config),\n                                              bind_config, store_stats_, ssl_context_manager_,\n                                              false, info_factory_, cm_, local_info_, dispatcher_,\n                                              singleton_manager_, tls_, validation_visitor_, api_));\n    hds_clusters_.back()->initialize([] {});\n\n    hds_clusters_.back()->startHealthchecks(access_log_manager_, runtime_, dispatcher_, api_);\n  }\n}\n\n// TODO(lilika): Add support for subsequent HealthCheckSpecifier messages that\n// might modify the HdsClusters\nvoid HdsDelegate::onReceiveMessage(\n    std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier>&& message) {\n  stats_.requests_.inc();\n  ENVOY_LOG(debug, \"New health check response message {} \", message->DebugString());\n\n  // Validate message fields\n  try {\n    MessageUtil::validate(*message, validation_visitor_);\n  } catch (const ProtoValidationException& ex) {\n    // Increment error count\n    stats_.errors_.inc();\n    ENVOY_LOG(warn, \"Unable to validate health check specifier: {}\", ex.what());\n\n    // Do not continue processing message\n    return;\n  }\n\n  // Reset\n  hds_clusters_.clear();\n\n  // Set response\n  auto server_response_ms = PROTOBUF_GET_MS_OR_DEFAULT(*message, interval, 1000);\n\n  // Process the HealthCheckSpecifier message.\n  processMessage(std::move(message));\n\n  if (server_response_ms_ != server_response_ms) {\n    server_response_ms_ = server_response_ms;\n    setHdsStreamResponseTimer();\n  }\n}\n\nvoid HdsDelegate::onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n}\n\nvoid HdsDelegate::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) {\n  ENVOY_LOG(warn, \"{} gRPC config stream closed: {}, {}\", service_method_.name(), status, message);\n  hds_stream_response_timer_->disableTimer();\n  stream_ = nullptr;\n  server_response_ms_ = 0;\n  handleFailure();\n}\n\nHdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime,\n                       envoy::config::cluster::v3::Cluster cluster,\n                       const envoy::config::core::v3::BindConfig& bind_config, Stats::Store& stats,\n                       Ssl::ContextManager& ssl_context_manager, bool added_via_api,\n                       ClusterInfoFactory& info_factory, ClusterManager& cm,\n                       const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher,\n                       Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n                       ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api)\n    : runtime_(runtime), cluster_(std::move(cluster)), bind_config_(bind_config), stats_(stats),\n      ssl_context_manager_(ssl_context_manager), added_via_api_(added_via_api),\n      initial_hosts_(new HostVector()), validation_visitor_(validation_visitor) {\n  ENVOY_LOG(debug, \"Creating an HdsCluster\");\n  priority_set_.getOrCreateHostSet(0);\n\n  info_ = info_factory.createClusterInfo(\n      {admin, runtime_, cluster_, bind_config_, stats_, ssl_context_manager_, added_via_api_, cm,\n       local_info, dispatcher, singleton_manager, tls, validation_visitor, api});\n\n  // Temporary structure to hold Host pointers grouped by locality, to build\n  // initial_hosts_per_locality_.\n  std::vector<HostVector> hosts_by_locality;\n  hosts_by_locality.reserve(cluster_.load_assignment().endpoints_size());\n\n  // Iterate over every endpoint in every cluster.\n  for (const auto& locality_endpoints : cluster_.load_assignment().endpoints()) {\n    // Add a locality grouping to the hosts sorted by locality.\n    hosts_by_locality.emplace_back();\n    hosts_by_locality.back().reserve(locality_endpoints.lb_endpoints_size());\n\n    for (const auto& host : locality_endpoints.lb_endpoints()) {\n      // Initialize an endpoint host object.\n      HostSharedPtr endpoint = std::make_shared<HostImpl>(\n          info_, \"\", Network::Address::resolveProtoAddress(host.endpoint().address()), nullptr, 1,\n          locality_endpoints.locality(),\n          envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0,\n          envoy::config::core::v3::UNKNOWN);\n      // Add this host/endpoint pointer to our flat list of endpoints for health checking.\n      initial_hosts_->push_back(endpoint);\n      // Add this host/endpoint pointer to our structured list by locality so results can be\n      // requested by locality.\n      hosts_by_locality.back().push_back(endpoint);\n    }\n  }\n  // Create the HostsPerLocality.\n  initial_hosts_per_locality_ =\n      std::make_shared<Envoy::Upstream::HostsPerLocalityImpl>(std::move(hosts_by_locality), false);\n}\n\nClusterSharedPtr HdsCluster::create() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\nClusterInfoConstSharedPtr\nProdClusterInfoFactory::createClusterInfo(const CreateClusterInfoParams& params) {\n  Envoy::Stats::ScopePtr scope =\n      params.stats_.createScope(fmt::format(\"cluster.{}.\", params.cluster_.name()));\n\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      params.admin_, params.ssl_context_manager_, *scope, params.cm_, params.local_info_,\n      params.dispatcher_, params.stats_, params.singleton_manager_, params.tls_,\n      params.validation_visitor_, params.api_);\n\n  // TODO(JimmyCYJ): Support SDS for HDS cluster.\n  Network::TransportSocketFactoryPtr socket_factory =\n      Upstream::createTransportSocketFactory(params.cluster_, factory_context);\n  auto socket_matcher = std::make_unique<TransportSocketMatcherImpl>(\n      params.cluster_.transport_socket_matches(), factory_context, socket_factory, *scope);\n\n  return std::make_unique<ClusterInfoImpl>(params.cluster_, params.bind_config_, params.runtime_,\n                                           std::move(socket_matcher), std::move(scope),\n                                           params.added_via_api_, factory_context);\n}\n\nvoid HdsCluster::startHealthchecks(AccessLog::AccessLogManager& access_log_manager,\n                                   Runtime::Loader& runtime, Event::Dispatcher& dispatcher,\n                                   Api::Api& api) {\n  for (auto& health_check : cluster_.health_checks()) {\n    health_checkers_.push_back(Upstream::HealthCheckerFactory::create(\n        health_check, *this, runtime, dispatcher, access_log_manager, validation_visitor_, api));\n    health_checkers_.back()->start();\n  }\n}\n\nvoid HdsCluster::initialize(std::function<void()> callback) {\n  initialization_complete_callback_ = callback;\n  for (const auto& host : *initial_hosts_) {\n    host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  }\n  // Use the ungrouped and grouped hosts lists to retain locality structure in the priority set.\n  priority_set_.updateHosts(\n      0, HostSetImpl::partitionHosts(initial_hosts_, initial_hosts_per_locality_), {},\n      *initial_hosts_, {}, absl::nullopt);\n}\n\nvoid HdsCluster::setOutlierDetector(const Outlier::DetectorSharedPtr&) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/health_discovery_service.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/service/health/v3/hds.pb.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/backoff_strategy.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/macros.h\"\n#include \"common/config/utility.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/upstream/health_checker_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass ProdClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable<Logger::Id::upstream> {\npublic:\n  ClusterInfoConstSharedPtr createClusterInfo(const CreateClusterInfoParams& params) override;\n};\n\n// TODO(lilika): Add HdsClusters to the /clusters endpoint to get detailed stats about each HC host.\n\n/**\n * Implementation of Upstream::Cluster for hds clusters, clusters that are used\n * by HdsDelegates\n */\nclass HdsCluster : public Cluster, Logger::Loggable<Logger::Id::upstream> {\npublic:\n  static ClusterSharedPtr create();\n  HdsCluster(Server::Admin& admin, Runtime::Loader& runtime,\n             envoy::config::cluster::v3::Cluster cluster,\n             const envoy::config::core::v3::BindConfig& bind_config, Stats::Store& stats,\n             Ssl::ContextManager& ssl_context_manager, bool added_via_api,\n             ClusterInfoFactory& info_factory, ClusterManager& cm,\n             const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher,\n             Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n             ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api);\n\n  // Upstream::Cluster\n  InitializePhase initializePhase() const override { return InitializePhase::Primary; }\n  PrioritySet& prioritySet() override { return priority_set_; }\n  const PrioritySet& prioritySet() const override { return priority_set_; }\n  void setOutlierDetector(const Outlier::DetectorSharedPtr& outlier_detector);\n  HealthChecker* healthChecker() override { return health_checker_.get(); }\n  ClusterInfoConstSharedPtr info() const override { return info_; }\n  Outlier::Detector* outlierDetector() override { return outlier_detector_.get(); }\n  const Outlier::Detector* outlierDetector() const override { return outlier_detector_.get(); }\n  void initialize(std::function<void()> callback) override;\n\n  // Creates and starts healthcheckers to its endpoints\n  void startHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime,\n                         Event::Dispatcher& dispatcher, Api::Api& api);\n\n  std::vector<Upstream::HealthCheckerSharedPtr> healthCheckers() { return health_checkers_; };\n\nprotected:\n  PrioritySetImpl priority_set_;\n  HealthCheckerSharedPtr health_checker_;\n  Outlier::DetectorSharedPtr outlier_detector_;\n\nprivate:\n  std::function<void()> initialization_complete_callback_;\n\n  Runtime::Loader& runtime_;\n  const envoy::config::cluster::v3::Cluster cluster_;\n  const envoy::config::core::v3::BindConfig& bind_config_;\n  Stats::Store& stats_;\n  Ssl::ContextManager& ssl_context_manager_;\n  bool added_via_api_;\n\n  HostVectorSharedPtr initial_hosts_;\n  HostsPerLocalitySharedPtr initial_hosts_per_locality_;\n  ClusterInfoConstSharedPtr info_;\n  std::vector<Upstream::HealthCheckerSharedPtr> health_checkers_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n};\n\nusing HdsClusterPtr = std::shared_ptr<HdsCluster>;\n\n/**\n * All hds stats. @see stats_macros.h\n */\n#define ALL_HDS_STATS(COUNTER)                                                                     \\\n  COUNTER(requests)                                                                                \\\n  COUNTER(responses)                                                                               \\\n  COUNTER(errors)\n\n/**\n * Struct definition for all hds stats. @see stats_macros.h\n */\nstruct HdsDelegateStats {\n  ALL_HDS_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n// TODO(lilika): Add /config_dump support for HdsDelegate\n\n/**\n * The HdsDelegate class is responsible for receiving requests from a management\n * server with a set of hosts to healthcheck, healthchecking them, and reporting\n * back the results.\n */\nclass HdsDelegate : Grpc::AsyncStreamCallbacks<envoy::service::health::v3::HealthCheckSpecifier>,\n                    Logger::Loggable<Logger::Id::upstream> {\npublic:\n  HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_client,\n              envoy::config::core::v3::ApiVersion transport_api_version,\n              Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Envoy::Stats::Store& stats,\n              Ssl::ContextManager& ssl_context_manager, ClusterInfoFactory& info_factory,\n              AccessLog::AccessLogManager& access_log_manager, ClusterManager& cm,\n              const LocalInfo::LocalInfo& local_info, Server::Admin& admin,\n              Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n              ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api);\n\n  // Grpc::AsyncStreamCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap& metadata) override;\n  void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) override;\n  void onReceiveMessage(\n      std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier>&& message) override;\n  void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) override;\n  void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override;\n  envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse sendResponse();\n\n  std::vector<HdsClusterPtr> hdsClusters() { return hds_clusters_; };\n\nprivate:\n  friend class HdsDelegateFriend;\n\n  void setHdsRetryTimer();\n  void setHdsStreamResponseTimer();\n  void handleFailure();\n  // Establishes a connection with the management server\n  void establishNewStream();\n  void processMessage(std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier>&& message);\n\n  HdsDelegateStats stats_;\n  const Protobuf::MethodDescriptor& service_method_;\n\n  Grpc::AsyncClient<envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse,\n                    envoy::service::health::v3::HealthCheckSpecifier>\n      async_client_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n  Grpc::AsyncStream<envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse>\n      stream_{};\n  Event::Dispatcher& dispatcher_;\n  Runtime::Loader& runtime_;\n  Envoy::Stats::Store& store_stats_;\n  Ssl::ContextManager& ssl_context_manager_;\n  ClusterInfoFactory& info_factory_;\n  AccessLog::AccessLogManager& access_log_manager_;\n  ClusterManager& cm_;\n  const LocalInfo::LocalInfo& local_info_;\n  Server::Admin& admin_;\n  Singleton::Manager& singleton_manager_;\n  ThreadLocal::SlotAllocator& tls_;\n\n  envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse health_check_request_;\n  std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier> health_check_message_;\n\n  std::vector<std::string> clusters_;\n  std::vector<HdsClusterPtr> hds_clusters_;\n\n  Event::TimerPtr hds_stream_response_timer_;\n  Event::TimerPtr hds_retry_timer_;\n  BackOffStrategyPtr backoff_strategy_;\n\n  // Soft limit on size of the cluster’s connections read and write buffers.\n  static constexpr uint32_t ClusterConnectionBufferLimitBytes = 32768;\n\n  // TODO(lilika): Add API knob for ClusterTimeoutSeconds, instead of\n  // hardcoding it.\n  // The timeout for new network connections to hosts in the cluster.\n  static constexpr uint32_t ClusterTimeoutSeconds = 1;\n\n  // How often envoy reports the healthcheck results to the server\n  uint32_t server_response_ms_ = 0;\n\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  Api::Api& api_;\n};\n\nusing HdsDelegatePtr = std::unique_ptr<HdsDelegate>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/host_utility.cc",
    "content": "#include \"common/upstream/host_utility.h\"\n\n#include <string>\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nvoid setHealthFlag(Upstream::Host::HealthFlag flag, const Host& host, std::string& health_status) {\n  switch (flag) {\n  case Host::HealthFlag::FAILED_ACTIVE_HC: {\n    if (host.healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) {\n      health_status += \"/failed_active_hc\";\n    }\n    break;\n  }\n\n  case Host::HealthFlag::FAILED_OUTLIER_CHECK: {\n    if (host.healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) {\n      health_status += \"/failed_outlier_check\";\n    }\n    break;\n  }\n\n  case Host::HealthFlag::FAILED_EDS_HEALTH: {\n    if (host.healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH)) {\n      health_status += \"/failed_eds_health\";\n    }\n    break;\n  }\n\n  case Host::HealthFlag::DEGRADED_ACTIVE_HC: {\n    if (host.healthFlagGet(Host::HealthFlag::DEGRADED_ACTIVE_HC)) {\n      health_status += \"/degraded_active_hc\";\n    }\n    break;\n  }\n\n  case Host::HealthFlag::DEGRADED_EDS_HEALTH: {\n    if (host.healthFlagGet(Host::HealthFlag::DEGRADED_EDS_HEALTH)) {\n      health_status += \"/degraded_eds_health\";\n    }\n    break;\n  }\n\n  case Host::HealthFlag::PENDING_DYNAMIC_REMOVAL: {\n    if (host.healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL)) {\n      health_status += \"/pending_dynamic_removal\";\n    }\n    break;\n  }\n\n  case Host::HealthFlag::PENDING_ACTIVE_HC: {\n    if (host.healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC)) {\n      health_status += \"/pending_active_hc\";\n    }\n    break;\n  }\n  }\n}\n\n} // namespace\n\nstd::string HostUtility::healthFlagsToString(const Host& host) {\n  std::string health_status;\n\n  // Invokes setHealthFlag for each health flag.\n#define SET_HEALTH_FLAG(name, notused)                                                             \\\n  setHealthFlag(Upstream::Host::HealthFlag::name, host, health_status);\n  HEALTH_FLAG_ENUM_VALUES(SET_HEALTH_FLAG)\n#undef SET_HEALTH_FLAG\n\n  if (health_status.empty()) {\n    return \"healthy\";\n  } else {\n    return health_status;\n  }\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/host_utility.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Utility functions for hosts.\n */\nclass HostUtility {\npublic:\n  /**\n   * Convert a host's health flags into a debug string.\n   */\n  static std::string healthFlagsToString(const Host& host);\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/load_balancer_impl.cc",
    "content": "#include \"common/upstream/load_balancer_impl.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nnamespace {\nstatic const std::string RuntimeZoneEnabled = \"upstream.zone_routing.enabled\";\nstatic const std::string RuntimeMinClusterSize = \"upstream.zone_routing.min_cluster_size\";\nstatic const std::string RuntimePanicThreshold = \"upstream.healthy_panic_threshold\";\n\n// Distributes load between priorities based on the per priority availability and the normalized\n// total availability. Load is assigned to each priority according to how available each priority is\n// adjusted for the normalized total availability.\n//\n// @param per_priority_load vector of loads that should be populated.\n// @param per_priority_availability the percentage availability of each priority, used to determine\n// how much load each priority can handle.\n// @param total_load the amount of load that may be distributed. Will be updated with the amount of\n// load remaining after distribution.\n// @param normalized_total_availability the total availability, up to a max of 100. Used to\n// scale the load when the total availability is less than 100%.\n// @return the first available priority and the remaining load\nstd::pair<int32_t, size_t> distributeLoad(PriorityLoad& per_priority_load,\n                                          const PriorityAvailability& per_priority_availability,\n                                          size_t total_load, size_t normalized_total_availability) {\n  int32_t first_available_priority = -1;\n  for (size_t i = 0; i < per_priority_availability.get().size(); ++i) {\n    if (first_available_priority < 0 && per_priority_availability.get()[i] > 0) {\n      first_available_priority = i;\n    }\n    // Now assign as much load as possible to the high priority levels and cease assigning load\n    // when total_load runs out.\n    per_priority_load.get()[i] = std::min<uint32_t>(\n        total_load, per_priority_availability.get()[i] * 100 / normalized_total_availability);\n    total_load -= per_priority_load.get()[i];\n  }\n\n  return {first_available_priority, total_load};\n}\n\n// Returns true if the weights of all the hosts in the HostVector are equal.\nbool hostWeightsAreEqual(const HostVector& hosts) {\n  if (hosts.size() <= 1) {\n    return true;\n  }\n  const uint32_t weight = hosts[0]->weight();\n  for (size_t i = 1; i < hosts.size(); ++i) {\n    if (hosts[i]->weight() != weight) {\n      return false;\n    }\n  }\n  return true;\n}\n\n} // namespace\n\nstd::pair<uint32_t, LoadBalancerBase::HostAvailability>\nLoadBalancerBase::choosePriority(uint64_t hash, const HealthyLoad& healthy_per_priority_load,\n                                 const DegradedLoad& degraded_per_priority_load) {\n  hash = hash % 100 + 1; // 1-100\n  uint32_t aggregate_percentage_load = 0;\n  // As with tryChooseLocalLocalityHosts, this can be refactored for efficiency\n  // but O(N) is good enough for now given the expected number of priorities is\n  // small.\n\n  // We first attempt to select a priority based on healthy availability.\n  for (size_t priority = 0; priority < healthy_per_priority_load.get().size(); ++priority) {\n    aggregate_percentage_load += healthy_per_priority_load.get()[priority];\n    if (hash <= aggregate_percentage_load) {\n      return {static_cast<uint32_t>(priority), HostAvailability::Healthy};\n    }\n  }\n\n  // If no priorities were selected due to health, we'll select a priority based degraded\n  // availability.\n  for (size_t priority = 0; priority < degraded_per_priority_load.get().size(); ++priority) {\n    aggregate_percentage_load += degraded_per_priority_load.get()[priority];\n    if (hash <= aggregate_percentage_load) {\n      return {static_cast<uint32_t>(priority), HostAvailability::Degraded};\n    }\n  }\n\n  // The percentages should always add up to 100 but we have to have a return for the compiler.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nLoadBalancerBase::LoadBalancerBase(\n    const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime,\n    Random::RandomGenerator& random,\n    const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n    : stats_(stats), runtime_(runtime), random_(random),\n      default_healthy_panic_percent_(PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT(\n          common_config, healthy_panic_threshold, 100, 50)),\n      priority_set_(priority_set) {\n  for (auto& host_set : priority_set_.hostSetsPerPriority()) {\n    recalculatePerPriorityState(host_set->priority(), priority_set_, per_priority_load_,\n                                per_priority_health_, per_priority_degraded_);\n  }\n  // Recalculate panic mode for all levels.\n  recalculatePerPriorityPanic();\n\n  priority_set_.addPriorityUpdateCb(\n      [this](uint32_t priority, const HostVector&, const HostVector&) -> void {\n        recalculatePerPriorityState(priority, priority_set_, per_priority_load_,\n                                    per_priority_health_, per_priority_degraded_);\n      });\n\n  priority_set_.addPriorityUpdateCb(\n      [this](uint32_t priority, const HostVector&, const HostVector&) -> void {\n        UNREFERENCED_PARAMETER(priority);\n        recalculatePerPriorityPanic();\n        stashed_random_.clear();\n      });\n}\n\n// The following cases are handled by\n// recalculatePerPriorityState and recalculatePerPriorityPanic methods (normalized total health is\n// sum of all priorities' health values and capped at 100).\n// - normalized total health is = 100%. It means there are enough healthy hosts to handle the load.\n//   Do not enter panic mode, even if a specific priority has low number of healthy hosts.\n// - normalized total health is < 100%. There are not enough healthy hosts to handle the load.\n// Continue distributing the load among priority sets, but turn on panic mode for a given priority\n//   if # of healthy hosts in priority set is low.\n// - all host sets are in panic mode. Situation called TotalPanic. Load distribution is\n//   calculated based on the number of hosts in each priority regardless of their health.\n// - all hosts in all priorities are down (normalized total health is 0%). If panic\n//   threshold > 0% the cluster is in TotalPanic (see above). If panic threshold == 0\n//   then priorities are not in panic, but there are no healthy hosts to route to.\n//   In this case just mark P=0 as recipient of 100% of the traffic (nothing will be routed\n//   to P=0 anyways as there are no healthy hosts there).\nvoid LoadBalancerBase::recalculatePerPriorityState(uint32_t priority,\n                                                   const PrioritySet& priority_set,\n                                                   HealthyAndDegradedLoad& per_priority_load,\n                                                   HealthyAvailability& per_priority_health,\n                                                   DegradedAvailability& per_priority_degraded) {\n  per_priority_load.healthy_priority_load_.get().resize(priority_set.hostSetsPerPriority().size());\n  per_priority_load.degraded_priority_load_.get().resize(priority_set.hostSetsPerPriority().size());\n  per_priority_health.get().resize(priority_set.hostSetsPerPriority().size());\n  per_priority_degraded.get().resize(priority_set.hostSetsPerPriority().size());\n\n  // Determine the health of the newly modified priority level.\n  // Health ranges from 0-100, and is the ratio of healthy/degraded hosts to total hosts, modified\n  // by the overprovisioning factor.\n  HostSet& host_set = *priority_set.hostSetsPerPriority()[priority];\n  per_priority_health.get()[priority] = 0;\n  per_priority_degraded.get()[priority] = 0;\n  const auto host_count = host_set.hosts().size() - host_set.excludedHosts().size();\n\n  if (host_count > 0) {\n    // Each priority level's health is ratio of healthy hosts to total number of hosts in a priority\n    // multiplied by overprovisioning factor of 1.4 and capped at 100%. It means that if all\n    // hosts are healthy that priority's health is 100%*1.4=140% and is capped at 100% which results\n    // in 100%. If 80% of hosts are healthy, that priority's health is still 100% (80%*1.4=112% and\n    // capped at 100%).\n    per_priority_health.get()[priority] = std::min<uint32_t>(\n        100, (host_set.overprovisioningFactor() * host_set.healthyHosts().size() / host_count));\n\n    // We perform the same computation for degraded hosts.\n    per_priority_degraded.get()[priority] = std::min<uint32_t>(\n        100, (host_set.overprovisioningFactor() * host_set.degradedHosts().size() / host_count));\n  }\n\n  // Now that we've updated health for the changed priority level, we need to calculate percentage\n  // load for all priority levels.\n\n  // First, determine if the load needs to be scaled relative to availability (healthy + degraded).\n  // For example if there are 3 host sets with 10% / 20% / 10% health and 20% / 10% / 0% degraded\n  // they will get 16% / 28% / 14% load to healthy hosts and 28% / 14% / 0% load to degraded hosts\n  // to ensure total load adds up to 100. Note the first healthy priority is receiving 2% additional\n  // load due to rounding.\n  //\n  // Sum of priority levels' health and degraded values may exceed 100, so it is capped at 100 and\n  // referred as normalized total availability.\n  const uint32_t normalized_total_availability =\n      calculateNormalizedTotalAvailability(per_priority_health, per_priority_degraded);\n  if (normalized_total_availability == 0) {\n    // Everything is terrible. There is nothing to calculate here.\n    // Let recalculatePerPriorityPanic and recalculateLoadInTotalPanic deal with\n    // load calculation.\n    return;\n  }\n\n  // We start of with a total load of 100 and distribute it between priorities based on\n  // availability. We first attempt to distribute this load to healthy priorities based on healthy\n  // availability.\n  const auto first_healthy_and_remaining =\n      distributeLoad(per_priority_load.healthy_priority_load_, per_priority_health, 100,\n                     normalized_total_availability);\n\n  // Using the remaining load after allocating load to healthy priorities, distribute it based on\n  // degraded availability.\n  const auto remaining_load_for_degraded = first_healthy_and_remaining.second;\n  const auto first_degraded_and_remaining =\n      distributeLoad(per_priority_load.degraded_priority_load_, per_priority_degraded,\n                     remaining_load_for_degraded, normalized_total_availability);\n\n  // Anything that remains should just be rounding errors, so allocate that to the first available\n  // priority, either as healthy or degraded.\n  const auto remaining_load = first_degraded_and_remaining.second;\n  if (remaining_load != 0) {\n    const auto first_healthy = first_healthy_and_remaining.first;\n    const auto first_degraded = first_degraded_and_remaining.first;\n    ASSERT(first_healthy != -1 || first_degraded != -1);\n\n    // Attempt to allocate the remainder to the first healthy priority first. If no such priority\n    // exist, allocate to the first degraded priority.\n    ASSERT(remaining_load < per_priority_load.healthy_priority_load_.get().size() +\n                                per_priority_load.degraded_priority_load_.get().size());\n    if (first_healthy != -1) {\n      per_priority_load.healthy_priority_load_.get()[first_healthy] += remaining_load;\n    } else {\n      per_priority_load.degraded_priority_load_.get()[first_degraded] += remaining_load;\n    }\n  }\n\n  // The allocated load between healthy and degraded should be exactly 100.\n  ASSERT(100 == std::accumulate(per_priority_load.healthy_priority_load_.get().begin(),\n                                per_priority_load.healthy_priority_load_.get().end(), 0) +\n                    std::accumulate(per_priority_load.degraded_priority_load_.get().begin(),\n                                    per_priority_load.degraded_priority_load_.get().end(), 0));\n}\n\n// Method iterates through priority levels and turns on/off panic mode.\nvoid LoadBalancerBase::recalculatePerPriorityPanic() {\n  per_priority_panic_.resize(priority_set_.hostSetsPerPriority().size());\n\n  const uint32_t normalized_total_availability =\n      calculateNormalizedTotalAvailability(per_priority_health_, per_priority_degraded_);\n\n  const uint64_t panic_threshold = std::min<uint64_t>(\n      100, runtime_.snapshot().getInteger(RuntimePanicThreshold, default_healthy_panic_percent_));\n\n  // This is corner case when panic is disabled and there is no hosts available.\n  // LoadBalancerBase::choosePriority method expects that the sum of\n  // load percentages always adds up to 100.\n  // To satisfy that requirement 100% is assigned to P=0.\n  // In reality no traffic will be routed to P=0 priority, because\n  // the panic mode is disabled and LoadBalancer will try to find\n  // a healthy node and none is available.\n  if (panic_threshold == 0 && normalized_total_availability == 0) {\n    per_priority_load_.healthy_priority_load_.get()[0] = 100;\n    return;\n  }\n\n  bool total_panic = true;\n  for (size_t i = 0; i < per_priority_health_.get().size(); ++i) {\n    // For each level check if it should run in panic mode. Never set panic mode if\n    // normalized total health is 100%, even when individual priority level has very low # of\n    // healthy hosts.\n    const HostSet& priority_host_set = *priority_set_.hostSetsPerPriority()[i];\n    per_priority_panic_[i] =\n        (normalized_total_availability == 100 ? false : isHostSetInPanic(priority_host_set));\n    total_panic = total_panic && per_priority_panic_[i];\n  }\n\n  // If all priority levels are in panic mode, load distribution\n  // is done differently.\n  if (total_panic) {\n    recalculateLoadInTotalPanic();\n  }\n}\n\n// recalculateLoadInTotalPanic method is called when all priority levels\n// are in panic mode. The load distribution is done NOT based on number\n// of healthy hosts in the priority, but based on number of hosts\n// in each priority regardless of its health.\nvoid LoadBalancerBase::recalculateLoadInTotalPanic() {\n  // First calculate total number of hosts across all priorities regardless\n  // whether they are healthy or not.\n  const uint32_t total_hosts_count =\n      std::accumulate(priority_set_.hostSetsPerPriority().begin(),\n                      priority_set_.hostSetsPerPriority().end(), static_cast<size_t>(0),\n                      [](size_t acc, const std::unique_ptr<Envoy::Upstream::HostSet>& host_set) {\n                        return acc + host_set->hosts().size();\n                      });\n\n  if (0 == total_hosts_count) {\n    // Backend is empty, but load must be distributed somewhere.\n    per_priority_load_.healthy_priority_load_.get()[0] = 100;\n    return;\n  }\n\n  // Now iterate through all priority levels and calculate how much\n  // load is supposed to go to each priority. In panic mode the calculation\n  // is based not on the number of healthy hosts but based on the number of\n  // total hosts in the priority.\n  uint32_t total_load = 100;\n  int32_t first_noempty = -1;\n  for (size_t i = 0; i < per_priority_panic_.size(); i++) {\n    const HostSet& host_set = *priority_set_.hostSetsPerPriority()[i];\n    const auto hosts_num = host_set.hosts().size();\n\n    if ((-1 == first_noempty) && (0 != hosts_num)) {\n      first_noempty = i;\n    }\n    const uint32_t priority_load = 100 * hosts_num / total_hosts_count;\n    per_priority_load_.healthy_priority_load_.get()[i] = priority_load;\n    per_priority_load_.degraded_priority_load_.get()[i] = 0;\n    total_load -= priority_load;\n  }\n\n  // Add the remaining load to the first not empty load.\n  per_priority_load_.healthy_priority_load_.get()[first_noempty] += total_load;\n\n  // The total load should come up to 100%.\n  ASSERT(100 == std::accumulate(per_priority_load_.healthy_priority_load_.get().begin(),\n                                per_priority_load_.healthy_priority_load_.get().end(), 0));\n}\n\nstd::pair<HostSet&, LoadBalancerBase::HostAvailability>\nLoadBalancerBase::chooseHostSet(LoadBalancerContext* context, uint64_t hash) const {\n  if (context) {\n    const auto priority_loads = context->determinePriorityLoad(\n        priority_set_, per_priority_load_, Upstream::RetryPriority::defaultPriorityMapping);\n    const auto priority_and_source = choosePriority(hash, priority_loads.healthy_priority_load_,\n                                                    priority_loads.degraded_priority_load_);\n    return {*priority_set_.hostSetsPerPriority()[priority_and_source.first],\n            priority_and_source.second};\n  }\n\n  const auto priority_and_source = choosePriority(hash, per_priority_load_.healthy_priority_load_,\n                                                  per_priority_load_.degraded_priority_load_);\n  return {*priority_set_.hostSetsPerPriority()[priority_and_source.first],\n          priority_and_source.second};\n}\n\nZoneAwareLoadBalancerBase::ZoneAwareLoadBalancerBase(\n    const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats,\n    Runtime::Loader& runtime, Random::RandomGenerator& random,\n    const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n    : LoadBalancerBase(priority_set, stats, runtime, random, common_config),\n      local_priority_set_(local_priority_set),\n      routing_enabled_(PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT(\n          common_config.zone_aware_lb_config(), routing_enabled, 100, 100)),\n      min_cluster_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(common_config.zone_aware_lb_config(),\n                                                        min_cluster_size, 6U)),\n      fail_traffic_on_panic_(common_config.zone_aware_lb_config().fail_traffic_on_panic()) {\n  ASSERT(!priority_set.hostSetsPerPriority().empty());\n  resizePerPriorityState();\n  priority_set_.addPriorityUpdateCb(\n      [this](uint32_t priority, const HostVector&, const HostVector&) -> void {\n        // Make sure per_priority_state_ is as large as priority_set_.hostSetsPerPriority()\n        resizePerPriorityState();\n        // If P=0 changes, regenerate locality routing structures. Locality based routing is\n        // disabled at all other levels.\n        if (local_priority_set_ && priority == 0) {\n          regenerateLocalityRoutingStructures();\n        }\n      });\n  if (local_priority_set_) {\n    // Multiple priorities are unsupported for local priority sets.\n    // In order to support priorities correctly, one would have to make some assumptions about\n    // routing (all local Envoys fail over at the same time) and use all priorities when computing\n    // the locality routing structure.\n    ASSERT(local_priority_set_->hostSetsPerPriority().size() == 1);\n    local_priority_set_member_update_cb_handle_ = local_priority_set_->addPriorityUpdateCb(\n        [this](uint32_t priority, const HostVector&, const HostVector&) -> void {\n          ASSERT(priority == 0);\n          // If the set of local Envoys changes, regenerate routing for P=0 as it does priority\n          // based routing.\n          regenerateLocalityRoutingStructures();\n        });\n  }\n}\n\nZoneAwareLoadBalancerBase::~ZoneAwareLoadBalancerBase() {\n  if (local_priority_set_member_update_cb_handle_ != nullptr) {\n    local_priority_set_member_update_cb_handle_->remove();\n  }\n}\n\nvoid ZoneAwareLoadBalancerBase::regenerateLocalityRoutingStructures() {\n  ASSERT(local_priority_set_);\n  stats_.lb_recalculate_zone_structures_.inc();\n  // resizePerPriorityState should ensure these stay in sync.\n  ASSERT(per_priority_state_.size() == priority_set_.hostSetsPerPriority().size());\n\n  // We only do locality routing for P=0\n  uint32_t priority = 0;\n  PerPriorityState& state = *per_priority_state_[priority];\n  // Do not perform any calculations if we cannot perform locality routing based on non runtime\n  // params.\n  if (earlyExitNonLocalityRouting()) {\n    state.locality_routing_state_ = LocalityRoutingState::NoLocalityRouting;\n    return;\n  }\n  HostSet& host_set = *priority_set_.hostSetsPerPriority()[priority];\n  ASSERT(host_set.healthyHostsPerLocality().hasLocalLocality());\n  const size_t num_localities = host_set.healthyHostsPerLocality().get().size();\n  ASSERT(num_localities > 0);\n\n  // It is worth noting that all of the percentages calculated are orthogonal from\n  // how much load this priority level receives, percentageLoad(priority).\n  //\n  // If the host sets are such that 20% of load is handled locally and 80% is residual, and then\n  // half the hosts in all host sets go unhealthy, this priority set will\n  // still send half of the incoming load to the local locality and 80% to residual.\n  //\n  // Basically, fairness across localities within a priority is guaranteed. Fairness across\n  // localities across priorities is not.\n  absl::FixedArray<uint64_t> local_percentage(num_localities);\n  calculateLocalityPercentage(localHostSet().healthyHostsPerLocality(), local_percentage.begin());\n  absl::FixedArray<uint64_t> upstream_percentage(num_localities);\n  calculateLocalityPercentage(host_set.healthyHostsPerLocality(), upstream_percentage.begin());\n\n  // If we have lower percent of hosts in the local cluster in the same locality,\n  // we can push all of the requests directly to upstream cluster in the same locality.\n  if (upstream_percentage[0] >= local_percentage[0]) {\n    state.locality_routing_state_ = LocalityRoutingState::LocalityDirect;\n    return;\n  }\n\n  state.locality_routing_state_ = LocalityRoutingState::LocalityResidual;\n\n  // If we cannot route all requests to the same locality, calculate what percentage can be routed.\n  // For example, if local percentage is 20% and upstream is 10%\n  // we can route only 50% of requests directly.\n  state.local_percent_to_route_ = upstream_percentage[0] * 10000 / local_percentage[0];\n\n  // Local locality does not have additional capacity (we have already routed what we could).\n  // Now we need to figure out how much traffic we can route cross locality and to which exact\n  // locality we should route. Percentage of requests routed cross locality to a specific locality\n  // needed be proportional to the residual capacity upstream locality has.\n  //\n  // residual_capacity contains capacity left in a given locality, we keep accumulating residual\n  // capacity to make search for sampled value easier.\n  // For example, if we have the following upstream and local percentage:\n  // local_percentage: 40000 40000 20000\n  // upstream_percentage: 25000 50000 25000\n  // Residual capacity would look like: 0 10000 5000. Now we need to sample proportionally to\n  // bucket sizes (residual capacity). For simplicity of finding where specific\n  // sampled value is, we accumulate values in residual capacity. This is what it will look like:\n  // residual_capacity: 0 10000 15000\n  // Now to find a locality to route (bucket) we could simply iterate over residual_capacity\n  // searching where sampled value is placed.\n  state.residual_capacity_.resize(num_localities);\n\n  // Local locality (index 0) does not have residual capacity as we have routed all we could.\n  state.residual_capacity_[0] = 0;\n  for (size_t i = 1; i < num_localities; ++i) {\n    // Only route to the localities that have additional capacity.\n    if (upstream_percentage[i] > local_percentage[i]) {\n      state.residual_capacity_[i] =\n          state.residual_capacity_[i - 1] + upstream_percentage[i] - local_percentage[i];\n    } else {\n      // Locality with index \"i\" does not have residual capacity, but we keep accumulating previous\n      // values to make search easier on the next step.\n      state.residual_capacity_[i] = state.residual_capacity_[i - 1];\n    }\n  }\n}\n\nvoid ZoneAwareLoadBalancerBase::resizePerPriorityState() {\n  const uint32_t size = priority_set_.hostSetsPerPriority().size();\n  while (per_priority_state_.size() < size) {\n    // Note for P!=0, PerPriorityState is created with NoLocalityRouting and never changed.\n    per_priority_state_.push_back(std::make_unique<PerPriorityState>());\n  }\n}\n\nbool ZoneAwareLoadBalancerBase::earlyExitNonLocalityRouting() {\n  // We only do locality routing for P=0.\n  HostSet& host_set = *priority_set_.hostSetsPerPriority()[0];\n  if (host_set.healthyHostsPerLocality().get().size() < 2) {\n    return true;\n  }\n\n  // lb_local_cluster_not_ok is bumped for \"Local host set is not set or it is\n  // panic mode for local cluster\".\n  if (!host_set.healthyHostsPerLocality().hasLocalLocality() ||\n      host_set.healthyHostsPerLocality().get()[0].empty()) {\n    stats_.lb_local_cluster_not_ok_.inc();\n    return true;\n  }\n\n  // Same number of localities should be for local and upstream cluster.\n  if (host_set.healthyHostsPerLocality().get().size() !=\n      localHostSet().healthyHostsPerLocality().get().size()) {\n    stats_.lb_zone_number_differs_.inc();\n    return true;\n  }\n\n  // Do not perform locality routing for small clusters.\n  const uint64_t min_cluster_size =\n      runtime_.snapshot().getInteger(RuntimeMinClusterSize, min_cluster_size_);\n  if (host_set.healthyHosts().size() < min_cluster_size) {\n    stats_.lb_zone_cluster_too_small_.inc();\n    return true;\n  }\n\n  return false;\n}\n\nHostConstSharedPtr LoadBalancerBase::chooseHost(LoadBalancerContext* context) {\n  HostConstSharedPtr host;\n  const size_t max_attempts = context ? context->hostSelectionRetryCount() + 1 : 1;\n  for (size_t i = 0; i < max_attempts; ++i) {\n    host = chooseHostOnce(context);\n\n    // If host selection failed or the host is accepted by the filter, return.\n    // Otherwise, try again.\n    // Note: in the future we might want to allow retrying when chooseHostOnce returns nullptr.\n    if (!host || !context || !context->shouldSelectAnotherHost(*host)) {\n      return host;\n    }\n  }\n\n  // If we didn't find anything, return the last host.\n  return host;\n}\n\nbool LoadBalancerBase::isHostSetInPanic(const HostSet& host_set) const {\n  uint64_t global_panic_threshold = std::min<uint64_t>(\n      100, runtime_.snapshot().getInteger(RuntimePanicThreshold, default_healthy_panic_percent_));\n  const auto host_count = host_set.hosts().size() - host_set.excludedHosts().size();\n  double healthy_percent =\n      host_count == 0 ? 0.0 : 100.0 * host_set.healthyHosts().size() / host_count;\n\n  double degraded_percent =\n      host_count == 0 ? 0.0 : 100.0 * host_set.degradedHosts().size() / host_count;\n  // If the % of healthy hosts in the cluster is less than our panic threshold, we use all hosts.\n  if ((healthy_percent + degraded_percent) < global_panic_threshold) {\n    return true;\n  }\n\n  return false;\n}\n\nvoid ZoneAwareLoadBalancerBase::calculateLocalityPercentage(\n    const HostsPerLocality& hosts_per_locality, uint64_t* ret) {\n  uint64_t total_hosts = 0;\n  for (const auto& locality_hosts : hosts_per_locality.get()) {\n    total_hosts += locality_hosts.size();\n  }\n\n  // TODO(snowp): Should we ignore excluded hosts here too?\n\n  size_t i = 0;\n  for (const auto& locality_hosts : hosts_per_locality.get()) {\n    ret[i++] = total_hosts > 0 ? 10000ULL * locality_hosts.size() / total_hosts : 0;\n  }\n}\n\nuint32_t ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts(const HostSet& host_set) const {\n  PerPriorityState& state = *per_priority_state_[host_set.priority()];\n  ASSERT(state.locality_routing_state_ != LocalityRoutingState::NoLocalityRouting);\n\n  // At this point it's guaranteed to be at least 2 localities & local exists.\n  const size_t number_of_localities = host_set.healthyHostsPerLocality().get().size();\n  ASSERT(number_of_localities >= 2U);\n  ASSERT(host_set.healthyHostsPerLocality().hasLocalLocality());\n\n  // Try to push all of the requests to the same locality first.\n  if (state.locality_routing_state_ == LocalityRoutingState::LocalityDirect) {\n    stats_.lb_zone_routing_all_directly_.inc();\n    return 0;\n  }\n\n  ASSERT(state.locality_routing_state_ == LocalityRoutingState::LocalityResidual);\n\n  // If we cannot route all requests to the same locality, we already calculated how much we can\n  // push to the local locality, check if we can push to local locality on current iteration.\n  if (random_.random() % 10000 < state.local_percent_to_route_) {\n    stats_.lb_zone_routing_sampled_.inc();\n    return 0;\n  }\n\n  // At this point we must route cross locality as we cannot route to the local locality.\n  stats_.lb_zone_routing_cross_zone_.inc();\n\n  // This is *extremely* unlikely but possible due to rounding errors when calculating\n  // locality percentages. In this case just select random locality.\n  if (state.residual_capacity_[number_of_localities - 1] == 0) {\n    stats_.lb_zone_no_capacity_left_.inc();\n    return random_.random() % number_of_localities;\n  }\n\n  // Random sampling to select specific locality for cross locality traffic based on the additional\n  // capacity in localities.\n  uint64_t threshold = random_.random() % state.residual_capacity_[number_of_localities - 1];\n\n  // This potentially can be optimized to be O(log(N)) where N is the number of localities.\n  // Linear scan should be faster for smaller N, in most of the scenarios N will be small.\n  // TODO(htuch): is there a bug here when threshold == 0? Seems like we pick\n  // local locality in that situation. Probably should start iterating at 1.\n  int i = 0;\n  while (threshold > state.residual_capacity_[i]) {\n    i++;\n  }\n\n  return i;\n}\n\nabsl::optional<ZoneAwareLoadBalancerBase::HostsSource>\nZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context, uint64_t hash) const {\n  auto host_set_and_source = chooseHostSet(context, hash);\n\n  // The second argument tells us which availability we should target from the selected host set.\n  const auto host_availability = host_set_and_source.second;\n  auto& host_set = host_set_and_source.first;\n  HostsSource hosts_source;\n  hosts_source.priority_ = host_set.priority();\n\n  // If the selected host set has insufficient healthy hosts, return all hosts (unless we should\n  // fail traffic on panic, in which case return no host).\n  if (per_priority_panic_[hosts_source.priority_]) {\n    stats_.lb_healthy_panic_.inc();\n    if (fail_traffic_on_panic_) {\n      return absl::nullopt;\n    } else {\n      hosts_source.source_type_ = HostsSource::SourceType::AllHosts;\n      return hosts_source;\n    }\n  }\n\n  // If we're doing locality weighted balancing, pick locality.\n  absl::optional<uint32_t> locality;\n  if (host_availability == HostAvailability::Degraded) {\n    locality = host_set.chooseDegradedLocality();\n  } else {\n    locality = host_set.chooseHealthyLocality();\n  }\n\n  if (locality.has_value()) {\n    hosts_source.source_type_ = localitySourceType(host_availability);\n    hosts_source.locality_index_ = locality.value();\n    return hosts_source;\n  }\n\n  // If we've latched that we can't do priority-based routing, return healthy or degraded hosts\n  // for the selected host set.\n  if (per_priority_state_[host_set.priority()]->locality_routing_state_ ==\n      LocalityRoutingState::NoLocalityRouting) {\n    hosts_source.source_type_ = sourceType(host_availability);\n    return hosts_source;\n  }\n\n  // Determine if the load balancer should do zone based routing for this pick.\n  if (!runtime_.snapshot().featureEnabled(RuntimeZoneEnabled, routing_enabled_)) {\n    hosts_source.source_type_ = sourceType(host_availability);\n    return hosts_source;\n  }\n\n  if (isHostSetInPanic(localHostSet())) {\n    stats_.lb_local_cluster_not_ok_.inc();\n    // If the local Envoy instances are in global panic, and we should not fail traffic, do\n    // not do locality based routing.\n    if (fail_traffic_on_panic_) {\n      return absl::nullopt;\n    } else {\n      hosts_source.source_type_ = sourceType(host_availability);\n      return hosts_source;\n    }\n  }\n\n  hosts_source.source_type_ = localitySourceType(host_availability);\n  hosts_source.locality_index_ = tryChooseLocalLocalityHosts(host_set);\n  return hosts_source;\n}\n\nconst HostVector& ZoneAwareLoadBalancerBase::hostSourceToHosts(HostsSource hosts_source) const {\n  const HostSet& host_set = *priority_set_.hostSetsPerPriority()[hosts_source.priority_];\n  switch (hosts_source.source_type_) {\n  case HostsSource::SourceType::AllHosts:\n    return host_set.hosts();\n  case HostsSource::SourceType::HealthyHosts:\n    return host_set.healthyHosts();\n  case HostsSource::SourceType::DegradedHosts:\n    return host_set.degradedHosts();\n  case HostsSource::SourceType::LocalityHealthyHosts:\n    return host_set.healthyHostsPerLocality().get()[hosts_source.locality_index_];\n  case HostsSource::SourceType::LocalityDegradedHosts:\n    return host_set.degradedHostsPerLocality().get()[hosts_source.locality_index_];\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nEdfLoadBalancerBase::EdfLoadBalancerBase(\n    const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats,\n    Runtime::Loader& runtime, Random::RandomGenerator& random,\n    const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n    : ZoneAwareLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random,\n                                common_config),\n      seed_(random_.random()) {\n  // We fully recompute the schedulers for a given host set here on membership change, which is\n  // consistent with what other LB implementations do (e.g. thread aware).\n  // The downside of a full recompute is that time complexity is O(n * log n),\n  // so we will need to do better at delta tracking to scale (see\n  // https://github.com/envoyproxy/envoy/issues/2874).\n  priority_set.addPriorityUpdateCb(\n      [this](uint32_t priority, const HostVector&, const HostVector&) { refresh(priority); });\n}\n\nvoid EdfLoadBalancerBase::initialize() {\n  for (uint32_t priority = 0; priority < priority_set_.hostSetsPerPriority().size(); ++priority) {\n    refresh(priority);\n  }\n}\n\nvoid EdfLoadBalancerBase::refresh(uint32_t priority) {\n  const auto add_hosts_source = [this](HostsSource source, const HostVector& hosts) {\n    // Nuke existing scheduler if it exists.\n    auto& scheduler = scheduler_[source] = Scheduler{};\n    refreshHostSource(source);\n\n    // Check if the original host weights are equal and skip EDF creation if they are. When all\n    // original weights are equal we can rely on unweighted host pick to do optimal round robin and\n    // least-loaded host selection with lower memory and CPU overhead.\n    if (hostWeightsAreEqual(hosts)) {\n      // Skip edf creation.\n      return;\n    }\n\n    scheduler.edf_ = std::make_unique<EdfScheduler<const Host>>();\n\n    // Populate scheduler with host list.\n    // TODO(mattklein123): We must build the EDF schedule even if all of the hosts are currently\n    // weighted 1. This is because currently we don't refresh host sets if only weights change.\n    // We should probably change this to refresh at all times. See the comment in\n    // BaseDynamicClusterImpl::updateDynamicHostList about this.\n    for (const auto& host : hosts) {\n      // We use a fixed weight here. While the weight may change without\n      // notification, this will only be stale until this host is next picked,\n      // at which point it is reinserted into the EdfScheduler with its new\n      // weight in chooseHost().\n      scheduler.edf_->add(hostWeight(*host), host);\n    }\n\n    // Cycle through hosts to achieve the intended offset behavior.\n    // TODO(htuch): Consider how we can avoid biasing towards earlier hosts in the schedule across\n    // refreshes for the weighted case.\n    if (!hosts.empty()) {\n      for (uint32_t i = 0; i < seed_ % hosts.size(); ++i) {\n        auto host =\n            scheduler.edf_->pickAndAdd([this](const Host& host) { return hostWeight(host); });\n      }\n    }\n  };\n\n  // Populate EdfSchedulers for each valid HostsSource value for the host set at this priority.\n  const auto& host_set = priority_set_.hostSetsPerPriority()[priority];\n  add_hosts_source(HostsSource(priority, HostsSource::SourceType::AllHosts), host_set->hosts());\n  add_hosts_source(HostsSource(priority, HostsSource::SourceType::HealthyHosts),\n                   host_set->healthyHosts());\n  add_hosts_source(HostsSource(priority, HostsSource::SourceType::DegradedHosts),\n                   host_set->degradedHosts());\n  for (uint32_t locality_index = 0;\n       locality_index < host_set->healthyHostsPerLocality().get().size(); ++locality_index) {\n    add_hosts_source(\n        HostsSource(priority, HostsSource::SourceType::LocalityHealthyHosts, locality_index),\n        host_set->healthyHostsPerLocality().get()[locality_index]);\n  }\n  for (uint32_t locality_index = 0;\n       locality_index < host_set->degradedHostsPerLocality().get().size(); ++locality_index) {\n    add_hosts_source(\n        HostsSource(priority, HostsSource::SourceType::LocalityDegradedHosts, locality_index),\n        host_set->degradedHostsPerLocality().get()[locality_index]);\n  }\n}\n\nHostConstSharedPtr EdfLoadBalancerBase::peekAnotherHost(LoadBalancerContext* context) {\n  const absl::optional<HostsSource> hosts_source = hostSourceToUse(context, random(true));\n  if (!hosts_source) {\n    return nullptr;\n  }\n  auto scheduler_it = scheduler_.find(*hosts_source);\n  // We should always have a scheduler for any return value from\n  // hostSourceToUse() via the construction in refresh();\n  ASSERT(scheduler_it != scheduler_.end());\n  auto& scheduler = scheduler_it->second;\n\n  // As has been commented in both EdfLoadBalancerBase::refresh and\n  // BaseDynamicClusterImpl::updateDynamicHostList, we must do a runtime pivot here to determine\n  // whether to use EDF or do unweighted (fast) selection. EDF is non-null iff the original weights\n  // of 2 or more hosts differ.\n  if (scheduler.edf_ != nullptr) {\n    return scheduler.edf_->peekAgain([this](const Host& host) { return hostWeight(host); });\n  } else {\n    const HostVector& hosts_to_use = hostSourceToHosts(*hosts_source);\n    if (hosts_to_use.empty()) {\n      return nullptr;\n    }\n    return unweightedHostPeek(hosts_to_use, *hosts_source);\n  }\n}\n\nHostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* context) {\n  const absl::optional<HostsSource> hosts_source = hostSourceToUse(context, random(false));\n  if (!hosts_source) {\n    return nullptr;\n  }\n  auto scheduler_it = scheduler_.find(*hosts_source);\n  // We should always have a scheduler for any return value from\n  // hostSourceToUse() via the construction in refresh();\n  ASSERT(scheduler_it != scheduler_.end());\n  auto& scheduler = scheduler_it->second;\n\n  // As has been commented in both EdfLoadBalancerBase::refresh and\n  // BaseDynamicClusterImpl::updateDynamicHostList, we must do a runtime pivot here to determine\n  // whether to use EDF or do unweighted (fast) selection. EDF is non-null iff the original weights\n  // of 2 or more hosts differ.\n  if (scheduler.edf_ != nullptr) {\n    auto host = scheduler.edf_->pickAndAdd([this](const Host& host) { return hostWeight(host); });\n    return host;\n  } else {\n    const HostVector& hosts_to_use = hostSourceToHosts(*hosts_source);\n    if (hosts_to_use.empty()) {\n      return nullptr;\n    }\n    return unweightedHostPick(hosts_to_use, *hosts_source);\n  }\n}\n\nHostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPeek(const HostVector&,\n                                                                const HostsSource&) {\n  // LeastRequestLoadBalancer can not do deterministic prefetching, because\n  // any other thread might select the least-requested-host between prefetch and\n  // host-pick, and change the rq_active checks.\n  return nullptr;\n}\n\nHostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector& hosts_to_use,\n                                                                const HostsSource&) {\n  HostSharedPtr candidate_host = nullptr;\n  for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) {\n    const int rand_idx = random_.random() % hosts_to_use.size();\n    HostSharedPtr sampled_host = hosts_to_use[rand_idx];\n\n    if (candidate_host == nullptr) {\n      // Make a first choice to start the comparisons.\n      candidate_host = sampled_host;\n      continue;\n    }\n\n    const auto candidate_active_rq = candidate_host->stats().rq_active_.value();\n    const auto sampled_active_rq = sampled_host->stats().rq_active_.value();\n    if (sampled_active_rq < candidate_active_rq) {\n      candidate_host = sampled_host;\n    }\n  }\n\n  return candidate_host;\n}\n\nHostConstSharedPtr RandomLoadBalancer::peekAnotherHost(LoadBalancerContext* context) {\n  return peekOrChoose(context, true);\n}\n\nHostConstSharedPtr RandomLoadBalancer::chooseHostOnce(LoadBalancerContext* context) {\n  return peekOrChoose(context, false);\n}\n\nHostConstSharedPtr RandomLoadBalancer::peekOrChoose(LoadBalancerContext* context, bool peek) {\n  uint64_t random_hash = random(peek);\n  const absl::optional<HostsSource> hosts_source = hostSourceToUse(context, random_hash);\n  if (!hosts_source) {\n    return nullptr;\n  }\n\n  const HostVector& hosts_to_use = hostSourceToHosts(*hosts_source);\n  if (hosts_to_use.empty()) {\n    return nullptr;\n  }\n\n  return hosts_to_use[random_hash % hosts_to_use.size()];\n}\n\nSubsetSelectorImpl::SubsetSelectorImpl(\n    const Protobuf::RepeatedPtrField<std::string>& selector_keys,\n    envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n        LbSubsetSelectorFallbackPolicy fallback_policy,\n    const Protobuf::RepeatedPtrField<std::string>& fallback_keys_subset,\n    bool single_host_per_subset)\n    : selector_keys_(selector_keys.begin(), selector_keys.end()), fallback_policy_(fallback_policy),\n      fallback_keys_subset_(fallback_keys_subset.begin(), fallback_keys_subset.end()),\n      single_host_per_subset_(single_host_per_subset) {\n\n  if (fallback_policy_ !=\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET) {\n    // defining fallback_keys_subset_ for a fallback policy other than KEYS_SUBSET doesn't have\n    // any effect and it is probably a user mistake. We should let the user know about it.\n    if (!fallback_keys_subset_.empty()) {\n      throw EnvoyException(\"fallback_keys_subset can be set only for KEYS_SUBSET fallback_policy\");\n    }\n    return;\n  }\n\n  // if KEYS_SUBSET fallback policy is selected, fallback_keys_subset must not be empty, because\n  // it would be the same as not defining fallback policy at all (global fallback policy would be\n  // used)\n  if (fallback_keys_subset_.empty()) {\n    throw EnvoyException(\"fallback_keys_subset cannot be empty\");\n  }\n\n  // We allow only for a fallback to a subset of the selector keys because this is probably the\n  // only use case that makes sense (fallback from more specific selector to less specific\n  // selector). Potentially we can relax this constraint in the future if there will be a use case\n  // for this.\n  if (!std::includes(selector_keys_.begin(), selector_keys_.end(), fallback_keys_subset_.begin(),\n                     fallback_keys_subset_.end())) {\n    throw EnvoyException(\"fallback_keys_subset must be a subset of selector keys\");\n  }\n\n  // Enforce that the fallback_keys_subset_ set is smaller than the selector_keys_ set. Otherwise\n  // we could end up with a infinite recursion of SubsetLoadBalancer::chooseHost().\n  if (selector_keys_.size() == fallback_keys_subset_.size()) {\n    throw EnvoyException(\"fallback_keys_subset cannot be equal to keys\");\n  }\n}\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/load_balancer_impl.h",
    "content": "#pragma once\n\n#include <cmath>\n#include <cstdint>\n#include <memory>\n#include <queue>\n#include <set>\n#include <vector>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_protos.h\"\n#include \"common/upstream/edf_scheduler.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n// Priority levels and localities are considered overprovisioned with this factor.\nstatic constexpr uint32_t kDefaultOverProvisioningFactor = 140;\n\n/**\n * Base class for all LB implementations.\n */\nclass LoadBalancerBase : public LoadBalancer {\npublic:\n  enum class HostAvailability { Healthy, Degraded };\n\n  // A utility function to chose a priority level based on a precomputed hash and\n  // two PriorityLoad vectors, one for healthy load and one for degraded.\n  //\n  // Returns the priority, a number between 0 and per_priority_load.size()-1 as well as which host\n  // availability level was chosen.\n  static std::pair<uint32_t, HostAvailability>\n  choosePriority(uint64_t hash, const HealthyLoad& healthy_per_priority_load,\n                 const DegradedLoad& degraded_per_priority_load);\n\n  HostConstSharedPtr chooseHost(LoadBalancerContext* context) override;\n\nprotected:\n  /**\n   * By implementing this method instead of chooseHost, host selection will\n   * be subject to host filters specified by LoadBalancerContext.\n   *\n   * Host selection will be retried up to the number specified by\n   * hostSelectionRetryCount on LoadBalancerContext, and if no hosts are found\n   * within the allowed attempts, the host that was selected during the last\n   * attempt will be returned.\n   *\n   * If host selection is idempotent (i.e. retrying will not change the outcome),\n   * sub classes should override chooseHost to avoid the unnecessary overhead of\n   * retrying host selection.\n   */\n  virtual HostConstSharedPtr chooseHostOnce(LoadBalancerContext* context) PURE;\n\n  /**\n   * For the given host_set @return if we should be in a panic mode or not. For example, if the\n   * majority of hosts are unhealthy we'll be likely in a panic mode. In this case we'll route\n   * requests to hosts regardless of whether they are healthy or not.\n   */\n  bool isHostSetInPanic(const HostSet& host_set) const;\n\n  /**\n   * Method is called when all host sets are in panic mode.\n   * In such state the load is distributed based on the number of hosts\n   * in given priority regardless of their health.\n   */\n  void recalculateLoadInTotalPanic();\n\n  LoadBalancerBase(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime,\n                   Random::RandomGenerator& random,\n                   const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config);\n\n  // Choose host set randomly, based on the healthy_per_priority_load_ and\n  // degraded_per_priority_load_. per_priority_load_ is consulted first, spilling over to\n  // degraded_per_priority_load_ if necessary. When a host set is selected based on\n  // degraded_per_priority_load_, only degraded hosts should be selected from that host set.\n  //\n  // @return host set to use and which availability to target.\n  std::pair<HostSet&, HostAvailability> chooseHostSet(LoadBalancerContext* context,\n                                                      uint64_t hash) const;\n\n  uint32_t percentageLoad(uint32_t priority) const {\n    return per_priority_load_.healthy_priority_load_.get()[priority];\n  }\n  uint32_t percentageDegradedLoad(uint32_t priority) const {\n    return per_priority_load_.degraded_priority_load_.get()[priority];\n  }\n  bool isInPanic(uint32_t priority) const { return per_priority_panic_[priority]; }\n  uint64_t random(bool peeking) {\n    if (peeking) {\n      stashed_random_.push_back(random_.random());\n      return stashed_random_.back();\n    } else {\n      if (!stashed_random_.empty()) {\n        auto random = stashed_random_.front();\n        stashed_random_.pop_front();\n        return random;\n      } else {\n        return random_.random();\n      }\n    }\n  }\n\n  ClusterStats& stats_;\n  Runtime::Loader& runtime_;\n  std::deque<uint64_t> stashed_random_;\n  Random::RandomGenerator& random_;\n  const uint32_t default_healthy_panic_percent_;\n  // The priority-ordered set of hosts to use for load balancing.\n  const PrioritySet& priority_set_;\n\npublic:\n  // Called when a host set at the given priority level is updated. This updates\n  // per_priority_health for that priority level, and may update per_priority_load for all\n  // priority levels.\n  void static recalculatePerPriorityState(uint32_t priority, const PrioritySet& priority_set,\n                                          HealthyAndDegradedLoad& priority_load,\n                                          HealthyAvailability& per_priority_health,\n                                          DegradedAvailability& per_priority_degraded);\n  void recalculatePerPriorityPanic();\n\nprotected:\n  // Method calculates normalized total availability.\n  //\n  // The availability of a priority is ratio of available (healthy/degraded) hosts over the total\n  // number of hosts multiplied by 100 and the overprovisioning factor. The total availability is\n  // the sum of the availability of each priority, up to a maximum of 100.\n  //\n  // For example, using the default overprovisioning factor of 1.4, a if priority A has 4 hosts,\n  // of which 1 is degraded and 1 is healthy, it will have availability of 2/4 * 100 * 1.4 = 70.\n  //\n  // Assuming two priorities with availability 60 and 70, the total availability would be 100.\n  static uint32_t\n  calculateNormalizedTotalAvailability(HealthyAvailability& per_priority_health,\n                                       DegradedAvailability& per_priority_degraded) {\n    const auto health =\n        std::accumulate(per_priority_health.get().begin(), per_priority_health.get().end(), 0);\n    const auto degraded =\n        std::accumulate(per_priority_degraded.get().begin(), per_priority_degraded.get().end(), 0);\n\n    return std::min<uint32_t>(health + degraded, 100);\n  }\n  // The percentage load (0-100) for each priority level when targeting healthy hosts and\n  // the percentage load (0-100) for each priority level when targeting degraded hosts.\n  HealthyAndDegradedLoad per_priority_load_;\n  // The health percentage (0-100) for each priority level.\n  HealthyAvailability per_priority_health_;\n  // The degraded percentage (0-100) for each priority level.\n  DegradedAvailability per_priority_degraded_;\n  // Levels which are in panic\n  std::vector<bool> per_priority_panic_;\n};\n\nclass LoadBalancerContextBase : public LoadBalancerContext {\npublic:\n  absl::optional<uint64_t> computeHashKey() override { return {}; }\n\n  const Network::Connection* downstreamConnection() const override { return nullptr; }\n\n  const Router::MetadataMatchCriteria* metadataMatchCriteria() override { return nullptr; }\n\n  const Http::RequestHeaderMap* downstreamHeaders() const override { return nullptr; }\n\n  const HealthyAndDegradedLoad&\n  determinePriorityLoad(const PrioritySet&, const HealthyAndDegradedLoad& original_priority_load,\n                        const Upstream::RetryPriority::PriorityMappingFunc&) override {\n    return original_priority_load;\n  }\n\n  bool shouldSelectAnotherHost(const Host&) override { return false; }\n\n  uint32_t hostSelectionRetryCount() const override { return 1; }\n\n  Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override { return nullptr; }\n\n  Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override {\n    return nullptr;\n  }\n};\n\n/**\n * Base class for zone aware load balancers\n */\nclass ZoneAwareLoadBalancerBase : public LoadBalancerBase {\nprotected:\n  // Both priority_set and local_priority_set if non-null must have at least one host set.\n  ZoneAwareLoadBalancerBase(\n      const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats,\n      Runtime::Loader& runtime, Random::RandomGenerator& random,\n      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config);\n  ~ZoneAwareLoadBalancerBase() override;\n\n  // When deciding which hosts to use on an LB decision, we need to know how to index into the\n  // priority_set. This priority_set cursor is used by ZoneAwareLoadBalancerBase subclasses, e.g.\n  // RoundRobinLoadBalancer, to index into auxiliary data structures specific to the LB for\n  // a given host set selection.\n  struct HostsSource {\n    enum class SourceType {\n      // All hosts in the host set.\n      AllHosts,\n      // All healthy hosts in the host set.\n      HealthyHosts,\n      // All degraded hosts in the host set.\n      DegradedHosts,\n      // Healthy hosts for locality @ locality_index.\n      LocalityHealthyHosts,\n      // Degraded hosts for locality @ locality_index.\n      LocalityDegradedHosts,\n    };\n\n    HostsSource() = default;\n\n    HostsSource(uint32_t priority, SourceType source_type)\n        : priority_(priority), source_type_(source_type) {\n      ASSERT(source_type == SourceType::AllHosts || source_type == SourceType::HealthyHosts ||\n             source_type == SourceType::DegradedHosts);\n    }\n\n    HostsSource(uint32_t priority, SourceType source_type, uint32_t locality_index)\n        : priority_(priority), source_type_(source_type), locality_index_(locality_index) {\n      ASSERT(source_type == SourceType::LocalityHealthyHosts ||\n             source_type == SourceType::LocalityDegradedHosts);\n    }\n\n    // Priority in PrioritySet.\n    uint32_t priority_{};\n\n    // How to index into HostSet for a given priority.\n    SourceType source_type_{};\n\n    // Locality index into HostsPerLocality for SourceType::LocalityHealthyHosts.\n    uint32_t locality_index_{};\n\n    bool operator==(const HostsSource& other) const {\n      return priority_ == other.priority_ && source_type_ == other.source_type_ &&\n             locality_index_ == other.locality_index_;\n    }\n  };\n\n  struct HostsSourceHash {\n    size_t operator()(const HostsSource& hs) const {\n      // This is only used for absl::node_hash_map keys, so we don't need a deterministic hash.\n      size_t hash = std::hash<uint32_t>()(hs.priority_);\n      hash = 37 * hash + std::hash<size_t>()(static_cast<std::size_t>(hs.source_type_));\n      hash = 37 * hash + std::hash<uint32_t>()(hs.locality_index_);\n      return hash;\n    }\n  };\n\n  /**\n   * Pick the host source to use, doing zone aware routing when the hosts are sufficiently healthy.\n   * If no host is chosen (due to fail_traffic_on_panic being set), return absl::nullopt.\n   */\n  absl::optional<HostsSource> hostSourceToUse(LoadBalancerContext* context, uint64_t hash) const;\n\n  /**\n   * Index into priority_set via hosts source descriptor.\n   */\n  const HostVector& hostSourceToHosts(HostsSource hosts_source) const;\n\nprivate:\n  enum class LocalityRoutingState {\n    // Locality based routing is off.\n    NoLocalityRouting,\n    // All queries can be routed to the local locality.\n    LocalityDirect,\n    // The local locality can not handle the anticipated load. Residual load will be spread across\n    // various other localities.\n    LocalityResidual\n  };\n\n  /**\n   * Increase per_priority_state_ to at least priority_set.hostSetsPerPriority().size()\n   */\n  void resizePerPriorityState();\n\n  /**\n   * @return decision on quick exit from locality aware routing based on cluster configuration.\n   * This gets recalculated on update callback.\n   */\n  bool earlyExitNonLocalityRouting();\n\n  /**\n   * Try to select upstream hosts from the same locality.\n   * @param host_set the last host set returned by chooseHostSet()\n   */\n  uint32_t tryChooseLocalLocalityHosts(const HostSet& host_set) const;\n\n  /**\n   * @return (number of hosts in a given locality)/(total number of hosts) in `ret` param.\n   * The result is stored as integer number and scaled by 10000 multiplier for better precision.\n   * Caller is responsible for allocation/de-allocation of `ret`.\n   */\n  void calculateLocalityPercentage(const HostsPerLocality& hosts_per_locality, uint64_t* ret);\n\n  /**\n   * Regenerate locality aware routing structures for fast decisions on upstream locality selection.\n   */\n  void regenerateLocalityRoutingStructures();\n\n  HostSet& localHostSet() const { return *local_priority_set_->hostSetsPerPriority()[0]; }\n\n  static HostsSource::SourceType localitySourceType(HostAvailability host_availability) {\n    switch (host_availability) {\n    case HostAvailability::Healthy:\n      return HostsSource::SourceType::LocalityHealthyHosts;\n    case HostAvailability::Degraded:\n      return HostsSource::SourceType::LocalityDegradedHosts;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  static HostsSource::SourceType sourceType(HostAvailability host_availability) {\n    switch (host_availability) {\n    case HostAvailability::Healthy:\n      return HostsSource::SourceType::HealthyHosts;\n    case HostAvailability::Degraded:\n      return HostsSource::SourceType::DegradedHosts;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  // The set of local Envoy instances which are load balancing across priority_set_.\n  const PrioritySet* local_priority_set_;\n\n  const uint32_t routing_enabled_;\n  const uint64_t min_cluster_size_;\n  const bool fail_traffic_on_panic_;\n\n  struct PerPriorityState {\n    // The percent of requests which can be routed to the local locality.\n    uint64_t local_percent_to_route_{};\n    // Tracks the current state of locality based routing.\n    LocalityRoutingState locality_routing_state_{LocalityRoutingState::NoLocalityRouting};\n    // When locality_routing_state_ == LocalityResidual this tracks the capacity\n    // for each of the non-local localities to determine what traffic should be\n    // routed where.\n    std::vector<uint64_t> residual_capacity_;\n  };\n  using PerPriorityStatePtr = std::unique_ptr<PerPriorityState>;\n  // Routing state broken out for each priority level in priority_set_.\n  std::vector<PerPriorityStatePtr> per_priority_state_;\n  Common::CallbackHandle* local_priority_set_member_update_cb_handle_{};\n};\n\n/**\n * Base implementation of LoadBalancer that performs weighted RR selection across the hosts in the\n * cluster. This scheduler respects host weighting and utilizes an EdfScheduler to achieve O(log\n * n) pick and insertion time complexity, O(n) memory use. The key insight is that if we schedule\n * with 1 / weight deadline, we will achieve the desired pick frequency for weighted RR in a given\n * interval. Naive implementations of weighted RR are either O(n) pick time or O(m * n) memory use,\n * where m is the weight range. We also explicitly check for the unweighted special case and use a\n * simple index to achieve O(1) scheduling in that case.\n * TODO(htuch): We use EDF at Google, but the EDF scheduler may be overkill if we don't want to\n * support large ranges of weights or arbitrary precision floating weights, we could construct an\n * explicit schedule, since m will be a small constant factor in O(m * n). This\n * could also be done on a thread aware LB, avoiding creating multiple EDF\n * instances.\n *\n * This base class also supports unweighted selection which derived classes can use to customize\n * behavior. Derived classes can also override how host weight is determined when in weighted mode.\n */\nclass EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase {\npublic:\n  EdfLoadBalancerBase(const PrioritySet& priority_set, const PrioritySet* local_priority_set,\n                      ClusterStats& stats, Runtime::Loader& runtime,\n                      Random::RandomGenerator& random,\n                      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config);\n\n  // Upstream::LoadBalancerBase\n  HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) override;\n  HostConstSharedPtr chooseHostOnce(LoadBalancerContext* context) override;\n\nprotected:\n  struct Scheduler {\n    // EdfScheduler for weighted LB. The edf_ is only created when the original\n    // host weights of 2 or more hosts differ. When not present, the\n    // implementation of chooseHostOnce falls back to unweightedHostPick.\n    std::unique_ptr<EdfScheduler<const Host>> edf_;\n  };\n\n  void initialize();\n\n  virtual void refresh(uint32_t priority);\n\n  // Seed to allow us to desynchronize load balancers across a fleet. If we don't\n  // do this, multiple Envoys that receive an update at the same time (or even\n  // multiple load balancers on the same host) will send requests to\n  // backends in roughly lock step, causing significant imbalance and potential\n  // overload.\n  const uint64_t seed_;\n\nprivate:\n  virtual void refreshHostSource(const HostsSource& source) PURE;\n  virtual double hostWeight(const Host& host) PURE;\n  virtual HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use,\n                                                const HostsSource& source) PURE;\n  virtual HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use,\n                                                const HostsSource& source) PURE;\n\n  // Scheduler for each valid HostsSource.\n  absl::node_hash_map<HostsSource, Scheduler, HostsSourceHash> scheduler_;\n};\n\n/**\n * A round robin load balancer. When in weighted mode, EDF scheduling is used. When in not\n * weighted mode, simple RR index selection is used.\n */\nclass RoundRobinLoadBalancer : public EdfLoadBalancerBase {\npublic:\n  RoundRobinLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set,\n                         ClusterStats& stats, Runtime::Loader& runtime,\n                         Random::RandomGenerator& random,\n                         const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n      : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random,\n                            common_config) {\n    initialize();\n  }\n\nprivate:\n  void refreshHostSource(const HostsSource& source) override {\n    // insert() is used here on purpose so that we don't overwrite the index if the host source\n    // already exists. Note that host sources will never be removed, but given how uncommon this\n    // is it probably doesn't matter.\n    rr_indexes_.insert({source, seed_});\n    // If the list of hosts changes, the order of picks change. Discard the\n    // index.\n    peekahead_index_ = 0;\n  }\n  double hostWeight(const Host& host) override { return host.weight(); }\n  HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use,\n                                        const HostsSource& source) override {\n    auto i = rr_indexes_.find(source);\n    if (i == rr_indexes_.end()) {\n      return nullptr;\n    }\n    return hosts_to_use[(i->second + (peekahead_index_)++) % hosts_to_use.size()];\n  }\n\n  HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use,\n                                        const HostsSource& source) override {\n    if (peekahead_index_ > 0) {\n      --peekahead_index_;\n    }\n    // To avoid storing the RR index in the base class, we end up using a second map here with\n    // host source as the key. This means that each LB decision will require two map lookups in\n    // the unweighted case. We might consider trying to optimize this in the future.\n    ASSERT(rr_indexes_.find(source) != rr_indexes_.end());\n    return hosts_to_use[rr_indexes_[source]++ % hosts_to_use.size()];\n  }\n\n  uint64_t peekahead_index_{};\n  absl::node_hash_map<HostsSource, uint64_t, HostsSourceHash> rr_indexes_;\n};\n\n/**\n * Weighted Least Request load balancer.\n *\n * In a normal setup when all hosts have the same weight it randomly picks up N healthy hosts\n * (where N is specified in the LB configuration) and compares number of active requests. Technique\n * is based on http://www.eecs.harvard.edu/~michaelm/postscripts/mythesis.pdf and is known as P2C\n * (power of two choices).\n *\n * When hosts have different weights, an RR EDF schedule is used. Host weight is scaled\n * by the number of active requests at pick/insert time. Thus, hosts will never fully drain as\n * they would in normal P2C, though they will get picked less and less often. In the future, we\n * can consider two alternate algorithms:\n * 1) Expand out all hosts by weight (using more memory) and do standard P2C.\n * 2) Use a weighted Maglev table, and perform P2C on two random hosts selected from the table.\n *    The benefit of the Maglev table is at the expense of resolution, memory usage is capped.\n *    Additionally, the Maglev table can be shared amongst all threads.\n */\nclass LeastRequestLoadBalancer : public EdfLoadBalancerBase,\n                                 Logger::Loggable<Logger::Id::upstream> {\npublic:\n  LeastRequestLoadBalancer(\n      const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats,\n      Runtime::Loader& runtime, Random::RandomGenerator& random,\n      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config,\n      const absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>\n          least_request_config)\n      : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random,\n                            common_config),\n        choice_count_(\n            least_request_config.has_value()\n                ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.value(), choice_count, 2)\n                : 2),\n        active_request_bias_runtime_(\n            least_request_config.has_value() && least_request_config->has_active_request_bias()\n                ? std::make_unique<Runtime::Double>(least_request_config->active_request_bias(),\n                                                    runtime)\n                : nullptr) {\n    initialize();\n  }\n\nprotected:\n  void refresh(uint32_t priority) override {\n    active_request_bias_ =\n        active_request_bias_runtime_ != nullptr ? active_request_bias_runtime_->value() : 1.0;\n\n    if (active_request_bias_ < 0.0) {\n      ENVOY_LOG(warn, \"upstream: invalid active request bias supplied (runtime key {}), using 1.0\",\n                active_request_bias_runtime_->runtimeKey());\n      active_request_bias_ = 1.0;\n    }\n\n    EdfLoadBalancerBase::refresh(priority);\n  }\n\nprivate:\n  void refreshHostSource(const HostsSource&) override {}\n  double hostWeight(const Host& host) override {\n    // This method is called to calculate the dynamic weight as following when all load balancing\n    // weights are not equal:\n    //\n    // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`\n    //\n    // `active_request_bias` can be configured via runtime and its value is cached in\n    // `active_request_bias_` to avoid having to do a runtime lookup each time a host weight is\n    // calculated.\n    //\n    // When `active_request_bias == 0.0` we behave like `RoundRobinLoadBalancer` and return the\n    // host weight without considering the number of active requests at the time we do the pick.\n    //\n    // When `active_request_bias > 0.0` we scale the host weight by the number of active\n    // requests at the time we do the pick. We always add 1 to avoid division by 0.\n    //\n    // It might be possible to do better by picking two hosts off of the schedule, and selecting the\n    // one with fewer active requests at the time of selection.\n    if (active_request_bias_ == 0.0) {\n      return host.weight();\n    }\n\n    if (active_request_bias_ == 1.0) {\n      return static_cast<double>(host.weight()) / (host.stats().rq_active_.value() + 1);\n    }\n\n    return static_cast<double>(host.weight()) /\n           std::pow(host.stats().rq_active_.value() + 1, active_request_bias_);\n  }\n  HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use,\n                                        const HostsSource& source) override;\n  HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use,\n                                        const HostsSource& source) override;\n\n  const uint32_t choice_count_;\n\n  // The exponent used to calculate host weights can be configured via runtime. We cache it for\n  // performance reasons and refresh it in `LeastRequestLoadBalancer::refresh(uint32_t priority)`\n  // whenever a `HostSet` is updated.\n  double active_request_bias_{};\n\n  const std::unique_ptr<Runtime::Double> active_request_bias_runtime_;\n};\n\n/**\n * Random load balancer that picks a random host out of all hosts.\n */\nclass RandomLoadBalancer : public ZoneAwareLoadBalancerBase {\npublic:\n  RandomLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set,\n                     ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random,\n                     const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n      : ZoneAwareLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random,\n                                  common_config) {}\n\n  // Upstream::LoadBalancerBase\n  HostConstSharedPtr chooseHostOnce(LoadBalancerContext* context) override;\n  HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) override;\n\nprotected:\n  HostConstSharedPtr peekOrChoose(LoadBalancerContext* context, bool peek);\n};\n\n/**\n * Implementation of SubsetSelector\n */\nclass SubsetSelectorImpl : public SubsetSelector {\npublic:\n  SubsetSelectorImpl(const Protobuf::RepeatedPtrField<std::string>& selector_keys,\n                     envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n                         LbSubsetSelectorFallbackPolicy fallback_policy,\n                     const Protobuf::RepeatedPtrField<std::string>& fallback_keys_subset,\n                     bool single_host_per_subset);\n\n  // SubsetSelector\n  const std::set<std::string>& selectorKeys() const override { return selector_keys_; }\n  envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n      LbSubsetSelectorFallbackPolicy\n      fallbackPolicy() const override {\n    return fallback_policy_;\n  }\n  const std::set<std::string>& fallbackKeysSubset() const override { return fallback_keys_subset_; }\n  bool singleHostPerSubset() const override { return single_host_per_subset_; }\n\nprivate:\n  const std::set<std::string> selector_keys_;\n  const envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n      LbSubsetSelectorFallbackPolicy fallback_policy_;\n  const std::set<std::string> fallback_keys_subset_;\n  const bool single_host_per_subset_;\n};\n\n/**\n * Implementation of LoadBalancerSubsetInfo.\n */\nclass LoadBalancerSubsetInfoImpl : public LoadBalancerSubsetInfo {\npublic:\n  LoadBalancerSubsetInfoImpl(\n      const envoy::config::cluster::v3::Cluster::LbSubsetConfig& subset_config)\n      : enabled_(!subset_config.subset_selectors().empty()),\n        fallback_policy_(subset_config.fallback_policy()),\n        default_subset_(subset_config.default_subset()),\n        locality_weight_aware_(subset_config.locality_weight_aware()),\n        scale_locality_weight_(subset_config.scale_locality_weight()),\n        panic_mode_any_(subset_config.panic_mode_any()), list_as_any_(subset_config.list_as_any()) {\n    for (const auto& subset : subset_config.subset_selectors()) {\n      if (!subset.keys().empty()) {\n        subset_selectors_.emplace_back(std::make_shared<SubsetSelectorImpl>(\n            subset.keys(), subset.fallback_policy(), subset.fallback_keys_subset(),\n            subset.single_host_per_subset()));\n      }\n    }\n  }\n\n  // Upstream::LoadBalancerSubsetInfo\n  bool isEnabled() const override { return enabled_; }\n  envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetFallbackPolicy\n  fallbackPolicy() const override {\n    return fallback_policy_;\n  }\n  const ProtobufWkt::Struct& defaultSubset() const override { return default_subset_; }\n  const std::vector<SubsetSelectorPtr>& subsetSelectors() const override {\n    return subset_selectors_;\n  }\n  bool localityWeightAware() const override { return locality_weight_aware_; }\n  bool scaleLocalityWeight() const override { return scale_locality_weight_; }\n  bool panicModeAny() const override { return panic_mode_any_; }\n  bool listAsAny() const override { return list_as_any_; }\n\nprivate:\n  const bool enabled_;\n  const envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetFallbackPolicy\n      fallback_policy_;\n  const ProtobufWkt::Struct default_subset_;\n  std::vector<SubsetSelectorPtr> subset_selectors_;\n  const bool locality_weight_aware_;\n  const bool scale_locality_weight_;\n  const bool panic_mode_any_;\n  const bool list_as_any_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/load_stats_reporter.cc",
    "content": "#include \"common/upstream/load_stats_reporter.h\"\n\n#include \"envoy/service/load_stats/v3/lrs.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nLoadStatsReporter::LoadStatsReporter(const LocalInfo::LocalInfo& local_info,\n                                     ClusterManager& cluster_manager, Stats::Scope& scope,\n                                     Grpc::RawAsyncClientPtr async_client,\n                                     envoy::config::core::v3::ApiVersion transport_api_version,\n                                     Event::Dispatcher& dispatcher)\n    : cm_(cluster_manager), stats_{ALL_LOAD_REPORTER_STATS(\n                                POOL_COUNTER_PREFIX(scope, \"load_reporter.\"))},\n      async_client_(std::move(async_client)), transport_api_version_(transport_api_version),\n      service_method_(\n          Grpc::VersionedMethods(\"envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats\",\n                                 \"envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats\")\n              .getMethodDescriptorForVersion(transport_api_version)),\n      time_source_(dispatcher.timeSource()) {\n  request_.mutable_node()->MergeFrom(local_info.node());\n  request_.mutable_node()->add_client_features(\"envoy.lrs.supports_send_all_clusters\");\n  retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); });\n  response_timer_ = dispatcher.createTimer([this]() -> void { sendLoadStatsRequest(); });\n  establishNewStream();\n}\n\nvoid LoadStatsReporter::setRetryTimer() {\n  retry_timer_->enableTimer(std::chrono::milliseconds(RETRY_DELAY_MS));\n}\n\nvoid LoadStatsReporter::establishNewStream() {\n  ENVOY_LOG(debug, \"Establishing new gRPC bidi stream for {}\", service_method_.DebugString());\n  stream_ = async_client_->start(service_method_, *this, Http::AsyncClient::StreamOptions());\n  if (stream_ == nullptr) {\n    ENVOY_LOG(warn, \"Unable to establish new stream\");\n    handleFailure();\n    return;\n  }\n\n  request_.mutable_cluster_stats()->Clear();\n  sendLoadStatsRequest();\n}\n\nvoid LoadStatsReporter::sendLoadStatsRequest() {\n  // TODO(htuch): This sends load reports for only the set of clusters in clusters_, which\n  // was initialized in startLoadReportPeriod() the last time we either sent a load report\n  // or received a new LRS response (whichever happened more recently). The code in\n  // startLoadReportPeriod() adds to clusters_ only those clusters that exist in the\n  // ClusterManager at the moment when startLoadReportPeriod() runs. This means that if\n  // a cluster is selected by the LRS server (either by being explicitly listed or by using\n  // the send_all_clusters field), if that cluster was added to the ClusterManager since the\n  // last time startLoadReportPeriod() was invoked, we will not report its load here. In\n  // practice, this means that for any newly created cluster, we will always drop the data for\n  // the initial load report period. This seems sub-optimal.\n  //\n  // One possible way to deal with this would be to get a notification whenever a new cluster is\n  // added to the cluster manager. When we get the notification, we record the current time in\n  // clusters_ as the start time for the load reporting window for that cluster.\n  request_.mutable_cluster_stats()->Clear();\n  for (const auto& cluster_name_and_timestamp : clusters_) {\n    const std::string& cluster_name = cluster_name_and_timestamp.first;\n    auto cluster_info_map = cm_.clusters();\n    auto it = cluster_info_map.find(cluster_name);\n    if (it == cluster_info_map.end()) {\n      ENVOY_LOG(debug, \"Cluster {} does not exist\", cluster_name);\n      continue;\n    }\n    auto& cluster = it->second.get();\n    auto* cluster_stats = request_.add_cluster_stats();\n    cluster_stats->set_cluster_name(cluster_name);\n    if (cluster.info()->edsServiceName().has_value()) {\n      cluster_stats->set_cluster_service_name(cluster.info()->edsServiceName().value());\n    }\n    for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n      ENVOY_LOG(trace, \"Load report locality count {}\", host_set->hostsPerLocality().get().size());\n      for (auto& hosts : host_set->hostsPerLocality().get()) {\n        ASSERT(!hosts.empty());\n        uint64_t rq_success = 0;\n        uint64_t rq_error = 0;\n        uint64_t rq_active = 0;\n        uint64_t rq_issued = 0;\n        for (const auto& host : hosts) {\n          rq_success += host->stats().rq_success_.latch();\n          rq_error += host->stats().rq_error_.latch();\n          rq_active += host->stats().rq_active_.value();\n          rq_issued += host->stats().rq_total_.latch();\n        }\n        if (rq_success + rq_error + rq_active != 0) {\n          auto* locality_stats = cluster_stats->add_upstream_locality_stats();\n          locality_stats->mutable_locality()->MergeFrom(hosts[0]->locality());\n          locality_stats->set_priority(host_set->priority());\n          locality_stats->set_total_successful_requests(rq_success);\n          locality_stats->set_total_error_requests(rq_error);\n          locality_stats->set_total_requests_in_progress(rq_active);\n          locality_stats->set_total_issued_requests(rq_issued);\n        }\n      }\n    }\n    cluster_stats->set_total_dropped_requests(\n        cluster.info()->loadReportStats().upstream_rq_dropped_.latch());\n    const auto now = time_source_.monotonicTime().time_since_epoch();\n    const auto measured_interval = now - cluster_name_and_timestamp.second;\n    cluster_stats->mutable_load_report_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MicrosecondsToDuration(\n            std::chrono::duration_cast<std::chrono::microseconds>(measured_interval).count()));\n    clusters_[cluster_name] = now;\n  }\n\n  Config::VersionConverter::prepareMessageForGrpcWire(request_, transport_api_version_);\n  ENVOY_LOG(trace, \"Sending LoadStatsRequest: {}\", request_.DebugString());\n  stream_->sendMessage(request_, false);\n  stats_.responses_.inc();\n  // When the connection is established, the message has not yet been read so we\n  // will not have a load reporting period.\n  if (message_) {\n    startLoadReportPeriod();\n  }\n}\n\nvoid LoadStatsReporter::handleFailure() {\n  ENVOY_LOG(warn, \"Load reporter stats stream/connection failure, will retry in {} ms.\",\n            RETRY_DELAY_MS);\n  stats_.errors_.inc();\n  setRetryTimer();\n}\n\nvoid LoadStatsReporter::onCreateInitialMetadata(Http::RequestHeaderMap& metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n}\n\nvoid LoadStatsReporter::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n}\n\nvoid LoadStatsReporter::onReceiveMessage(\n    std::unique_ptr<envoy::service::load_stats::v3::LoadStatsResponse>&& message) {\n  ENVOY_LOG(debug, \"New load report epoch: {}\", message->DebugString());\n  message_ = std::move(message);\n  startLoadReportPeriod();\n  stats_.requests_.inc();\n}\n\nvoid LoadStatsReporter::startLoadReportPeriod() {\n  // Once a cluster is tracked, we don't want to reset its stats between reports\n  // to avoid racing between request/response.\n  // TODO(htuch): They key here could be absl::string_view, but this causes\n  // problems due to referencing of temporaries in the below loop with Google's\n  // internal string type. Consider this optimization when the string types\n  // converge.\n  absl::node_hash_map<std::string, std::chrono::steady_clock::duration> existing_clusters;\n  if (message_->send_all_clusters()) {\n    for (const auto& p : cm_.clusters()) {\n      const std::string& cluster_name = p.first;\n      if (clusters_.count(cluster_name) > 0) {\n        existing_clusters.emplace(cluster_name, clusters_[cluster_name]);\n      }\n    }\n  } else {\n    for (const std::string& cluster_name : message_->clusters()) {\n      if (clusters_.count(cluster_name) > 0) {\n        existing_clusters.emplace(cluster_name, clusters_[cluster_name]);\n      }\n    }\n  }\n  clusters_.clear();\n  // Reset stats for all hosts in clusters we are tracking.\n  auto handle_cluster_func = [this, &existing_clusters](const std::string& cluster_name) {\n    clusters_.emplace(cluster_name, existing_clusters.count(cluster_name) > 0\n                                        ? existing_clusters[cluster_name]\n                                        : time_source_.monotonicTime().time_since_epoch());\n    auto cluster_info_map = cm_.clusters();\n    auto it = cluster_info_map.find(cluster_name);\n    if (it == cluster_info_map.end()) {\n      return;\n    }\n    // Don't reset stats for existing tracked clusters.\n    if (existing_clusters.count(cluster_name) > 0) {\n      return;\n    }\n    auto& cluster = it->second.get();\n    for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n      for (const auto& host : host_set->hosts()) {\n        host->stats().rq_success_.latch();\n        host->stats().rq_error_.latch();\n        host->stats().rq_total_.latch();\n      }\n    }\n    cluster.info()->loadReportStats().upstream_rq_dropped_.latch();\n  };\n  if (message_->send_all_clusters()) {\n    for (const auto& p : cm_.clusters()) {\n      const std::string& cluster_name = p.first;\n      handle_cluster_func(cluster_name);\n    }\n  } else {\n    for (const std::string& cluster_name : message_->clusters()) {\n      handle_cluster_func(cluster_name);\n    }\n  }\n  response_timer_->enableTimer(std::chrono::milliseconds(\n      DurationUtil::durationToMilliseconds(message_->load_reporting_interval())));\n}\n\nvoid LoadStatsReporter::onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n}\n\nvoid LoadStatsReporter::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) {\n  ENVOY_LOG(warn, \"{} gRPC config stream closed: {}, {}\", service_method_.name(), status, message);\n  response_timer_->disableTimer();\n  stream_ = nullptr;\n  handleFailure();\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/load_stats_reporter.h",
    "content": "#include \"envoy/event/dispatcher.h\"\n#include \"envoy/service/load_stats/v3/lrs.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/grpc/typed_async_client.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * All load reporter stats. @see stats_macros.h\n */\n#define ALL_LOAD_REPORTER_STATS(COUNTER)                                                           \\\n  COUNTER(requests)                                                                                \\\n  COUNTER(responses)                                                                               \\\n  COUNTER(errors)\n\n/**\n * Struct definition for all load reporter stats. @see stats_macros.h\n */\nstruct LoadReporterStats {\n  ALL_LOAD_REPORTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass LoadStatsReporter\n    : Grpc::AsyncStreamCallbacks<envoy::service::load_stats::v3::LoadStatsResponse>,\n      Logger::Loggable<Logger::Id::upstream> {\npublic:\n  LoadStatsReporter(const LocalInfo::LocalInfo& local_info, ClusterManager& cluster_manager,\n                    Stats::Scope& scope, Grpc::RawAsyncClientPtr async_client,\n                    envoy::config::core::v3::ApiVersion transport_api_version,\n                    Event::Dispatcher& dispatcher);\n\n  // Grpc::AsyncStreamCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap& metadata) override;\n  void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) override;\n  void onReceiveMessage(\n      std::unique_ptr<envoy::service::load_stats::v3::LoadStatsResponse>&& message) override;\n  void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) override;\n  void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override;\n\n  // TODO(htuch): Make this configurable or some static.\n  const uint32_t RETRY_DELAY_MS = 5000;\n\nprivate:\n  void setRetryTimer();\n  void establishNewStream();\n  void sendLoadStatsRequest();\n  void handleFailure();\n  void startLoadReportPeriod();\n\n  ClusterManager& cm_;\n  LoadReporterStats stats_;\n  Grpc::AsyncClient<envoy::service::load_stats::v3::LoadStatsRequest,\n                    envoy::service::load_stats::v3::LoadStatsResponse>\n      async_client_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n  Grpc::AsyncStream<envoy::service::load_stats::v3::LoadStatsRequest> stream_{};\n  const Protobuf::MethodDescriptor& service_method_;\n  Event::TimerPtr retry_timer_;\n  Event::TimerPtr response_timer_;\n  envoy::service::load_stats::v3::LoadStatsRequest request_;\n  std::unique_ptr<envoy::service::load_stats::v3::LoadStatsResponse> message_;\n  // Map from cluster name to start of measurement interval.\n  absl::node_hash_map<std::string, std::chrono::steady_clock::duration> clusters_;\n  TimeSource& time_source_;\n};\n\nusing LoadStatsReporterPtr = std::unique_ptr<LoadStatsReporter>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/logical_dns_cluster.cc",
    "content": "#include \"common/upstream/logical_dns_cluster.h\"\n\n#include <chrono>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/config/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nnamespace {\nenvoy::config::endpoint::v3::ClusterLoadAssignment\nconvertPriority(const envoy::config::endpoint::v3::ClusterLoadAssignment& load_assignment) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment converted;\n  converted.MergeFrom(load_assignment);\n\n  // We convert the priority set by the configuration back to zero. This helps\n  // ensure that we don't blow up later on when using zone aware routing due\n  // to a check that all priorities are zero.\n  //\n  // Since LOGICAL_DNS is limited to exactly one host declared per load_assignment\n  // (checked in the ctor in this file), we can safely just rewrite the priority\n  // to zero.\n  for (auto& endpoint : *converted.mutable_endpoints()) {\n    endpoint.set_priority(0);\n  }\n\n  return converted;\n}\n} // namespace\n\nLogicalDnsCluster::LogicalDnsCluster(\n    const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n    Network::DnsResolverSharedPtr dns_resolver,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api)\n    : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api),\n      dns_resolver_(dns_resolver),\n      dns_refresh_rate_ms_(\n          std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))),\n      respect_dns_ttl_(cluster.respect_dns_ttl()),\n      resolve_timer_(\n          factory_context.dispatcher().createTimer([this]() -> void { startResolve(); })),\n      local_info_(factory_context.localInfo()),\n      load_assignment_(\n          cluster.has_load_assignment()\n              ? convertPriority(cluster.load_assignment())\n              : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts())) {\n  failure_backoff_strategy_ =\n      Config::Utility::prepareDnsRefreshStrategy<envoy::config::cluster::v3::Cluster>(\n          cluster, dns_refresh_rate_ms_.count(), factory_context.api().randomGenerator());\n\n  const auto& locality_lb_endpoints = load_assignment_.endpoints();\n  if (locality_lb_endpoints.size() != 1 || locality_lb_endpoints[0].lb_endpoints().size() != 1) {\n    if (cluster.has_load_assignment()) {\n      throw EnvoyException(\n          \"LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint\");\n    } else {\n      throw EnvoyException(\"LOGICAL_DNS clusters must have a single host\");\n    }\n  }\n\n  const envoy::config::core::v3::SocketAddress& socket_address =\n      lbEndpoint().endpoint().address().socket_address();\n\n  if (!socket_address.resolver_name().empty()) {\n    throw EnvoyException(\"LOGICAL_DNS clusters must NOT have a custom resolver name set\");\n  }\n\n  dns_url_ = fmt::format(\"tcp://{}:{}\", socket_address.address(), socket_address.port_value());\n  hostname_ = Network::Utility::hostFromTcpUrl(dns_url_);\n  Network::Utility::portFromTcpUrl(dns_url_);\n  dns_lookup_family_ = getDnsLookupFamilyFromCluster(cluster);\n}\n\nvoid LogicalDnsCluster::startPreInit() { startResolve(); }\n\nLogicalDnsCluster::~LogicalDnsCluster() {\n  if (active_dns_query_) {\n    active_dns_query_->cancel();\n  }\n}\n\nvoid LogicalDnsCluster::startResolve() {\n  std::string dns_address = Network::Utility::hostFromTcpUrl(dns_url_);\n  ENVOY_LOG(debug, \"starting async DNS resolution for {}\", dns_address);\n  info_->stats().update_attempt_.inc();\n\n  active_dns_query_ = dns_resolver_->resolve(\n      dns_address, dns_lookup_family_,\n      [this, dns_address](Network::DnsResolver::ResolutionStatus status,\n                          std::list<Network::DnsResponse>&& response) -> void {\n        active_dns_query_ = nullptr;\n        ENVOY_LOG(debug, \"async DNS resolution complete for {}\", dns_address);\n\n        std::chrono::milliseconds final_refresh_rate = dns_refresh_rate_ms_;\n\n        // If the DNS resolver successfully resolved with an empty response list, the logical DNS\n        // cluster does not update. This ensures that a potentially previously resolved address does\n        // not stabilize back to 0 hosts.\n        if (status == Network::DnsResolver::ResolutionStatus::Success && !response.empty()) {\n          info_->stats().update_success_.inc();\n          // TODO(mattklein123): Move port handling into the DNS interface.\n          ASSERT(response.front().address_ != nullptr);\n          Network::Address::InstanceConstSharedPtr new_address =\n              Network::Utility::getAddressWithPort(*(response.front().address_),\n                                                   Network::Utility::portFromTcpUrl(dns_url_));\n\n          if (!logical_host_) {\n            logical_host_ = std::make_shared<LogicalHost>(\n                info_, hostname_, new_address, localityLbEndpoint(), lbEndpoint(), nullptr);\n\n            const auto& locality_lb_endpoint = localityLbEndpoint();\n            PriorityStateManager priority_state_manager(*this, local_info_, nullptr);\n            priority_state_manager.initializePriorityFor(locality_lb_endpoint);\n            priority_state_manager.registerHostForPriority(logical_host_, locality_lb_endpoint);\n\n            const uint32_t priority = locality_lb_endpoint.priority();\n            priority_state_manager.updateClusterPrioritySet(\n                priority, std::move(priority_state_manager.priorityState()[priority].first),\n                absl::nullopt, absl::nullopt, absl::nullopt);\n          }\n\n          if (!current_resolved_address_ || !(*new_address == *current_resolved_address_)) {\n            current_resolved_address_ = new_address;\n\n            // Make sure that we have an updated address for admin display, health\n            // checking, and creating real host connections.\n            logical_host_->setNewAddress(new_address, lbEndpoint());\n          }\n\n          // reset failure backoff strategy because there was a success.\n          failure_backoff_strategy_->reset();\n\n          if (respect_dns_ttl_ && response.front().ttl_ != std::chrono::seconds(0)) {\n            final_refresh_rate = response.front().ttl_;\n          }\n          ENVOY_LOG(debug, \"DNS refresh rate reset for {}, refresh rate {} ms\", dns_address,\n                    final_refresh_rate.count());\n        } else {\n          info_->stats().update_failure_.inc();\n          final_refresh_rate =\n              std::chrono::milliseconds(failure_backoff_strategy_->nextBackOffMs());\n          ENVOY_LOG(debug, \"DNS refresh rate reset for {}, (failure) refresh rate {} ms\",\n                    dns_address, final_refresh_rate.count());\n        }\n\n        onPreInitComplete();\n        resolve_timer_->enableTimer(final_refresh_rate);\n      });\n}\n\nstd::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr>\nLogicalDnsClusterFactory::createClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n    Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Stats::ScopePtr&& stats_scope) {\n  auto selected_dns_resolver = selectDnsResolver(cluster, context);\n\n  return std::make_pair(std::make_shared<LogicalDnsCluster>(\n                            cluster, context.runtime(), selected_dns_resolver,\n                            socket_factory_context, std::move(stats_scope), context.addedViaApi()),\n                        nullptr);\n}\n\n/**\n * Static registration for the strict dns cluster factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(LogicalDnsClusterFactory, ClusterFactory);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/logical_dns_cluster.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <string>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/logical_host.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * The LogicalDnsCluster is a type of cluster that creates a single logical host that wraps\n * an async DNS resolver. The DNS resolver will continuously resolve, and swap in the first IP\n * address in the resolution set. However the logical owning host will not change. Any created\n * connections against this host will use the currently resolved IP. This means that a connection\n * pool using the logical host may end up with connections to many different real IPs.\n *\n * This cluster type is useful for large web services that use DNS in a round robin fashion, such\n * that DNS resolution may repeatedly return different results. A single connection pool can be\n * created that will internally have connections to different backends, while still allowing long\n * connection lengths and keep alive. The cluster type should only be used when an IP address change\n * means that connections using the IP should not drain.\n */\nclass LogicalDnsCluster : public ClusterImplBase {\npublic:\n  LogicalDnsCluster(const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n                    Network::DnsResolverSharedPtr dns_resolver,\n                    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                    Stats::ScopePtr&& stats_scope, bool added_via_api);\n\n  ~LogicalDnsCluster() override;\n\n  // Upstream::Cluster\n  InitializePhase initializePhase() const override { return InitializePhase::Primary; }\n\nprivate:\n  const envoy::config::endpoint::v3::LocalityLbEndpoints& localityLbEndpoint() const {\n    // This is checked in the constructor, i.e. at config load time.\n    ASSERT(load_assignment_.endpoints().size() == 1);\n    return load_assignment_.endpoints()[0];\n  }\n\n  const envoy::config::endpoint::v3::LbEndpoint& lbEndpoint() const {\n    // This is checked in the constructor, i.e. at config load time.\n    ASSERT(localityLbEndpoint().lb_endpoints().size() == 1);\n    return localityLbEndpoint().lb_endpoints()[0];\n  }\n\n  void startResolve();\n\n  // ClusterImplBase\n  void startPreInit() override;\n\n  Network::DnsResolverSharedPtr dns_resolver_;\n  const std::chrono::milliseconds dns_refresh_rate_ms_;\n  BackOffStrategyPtr failure_backoff_strategy_;\n  const bool respect_dns_ttl_;\n  Network::DnsLookupFamily dns_lookup_family_;\n  Event::TimerPtr resolve_timer_;\n  std::string dns_url_;\n  std::string hostname_;\n  Network::Address::InstanceConstSharedPtr current_resolved_address_;\n  LogicalHostSharedPtr logical_host_;\n  Network::ActiveDnsQuery* active_dns_query_{};\n  const LocalInfo::LocalInfo& local_info_;\n  const envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_;\n};\n\nclass LogicalDnsClusterFactory : public ClusterFactoryImplBase {\npublic:\n  LogicalDnsClusterFactory()\n      : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().LogicalDns) {}\n\nprivate:\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n\nDECLARE_FACTORY(LogicalDnsClusterFactory);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/logical_host.cc",
    "content": "#include \"common/upstream/logical_host.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nUpstream::Host::CreateConnectionData LogicalHost::createConnection(\n    Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options,\n    Network::TransportSocketOptionsSharedPtr transport_socket_options) const {\n  const auto current_address = address();\n  return {HostImpl::createConnection(\n              dispatcher, cluster(), current_address, transportSocketFactory(), options,\n              override_transport_socket_options_ != nullptr ? override_transport_socket_options_\n                                                            : transport_socket_options),\n          std::make_shared<RealHostDescription>(current_address, shared_from_this())};\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/logical_host.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * A host implementation that can have its address changed in order to create temporal \"real\"\n * hosts.\n */\nclass LogicalHost : public HostImpl {\npublic:\n  LogicalHost(const ClusterInfoConstSharedPtr& cluster, const std::string& hostname,\n              const Network::Address::InstanceConstSharedPtr& address,\n              const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint,\n              const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint,\n              const Network::TransportSocketOptionsSharedPtr& override_transport_socket_options)\n      : HostImpl(cluster, hostname, address,\n                 // TODO(zyfjeff): Created through metadata shared pool\n                 std::make_shared<const envoy::config::core::v3::Metadata>(lb_endpoint.metadata()),\n                 lb_endpoint.load_balancing_weight().value(), locality_lb_endpoint.locality(),\n                 lb_endpoint.endpoint().health_check_config(), locality_lb_endpoint.priority(),\n                 lb_endpoint.health_status()),\n        override_transport_socket_options_(override_transport_socket_options) {}\n\n  // Set the new address. Updates are typically rare so a R/W lock is used for address updates.\n  // Note that the health check address update requires no lock to be held since it is only\n  // used on the main thread, but we do so anyway since it shouldn't be perf critical and will\n  // future proof the code.\n  void setNewAddress(const Network::Address::InstanceConstSharedPtr& address,\n                     const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint) {\n    const auto& port_value = lb_endpoint.endpoint().health_check_config().port_value();\n    auto health_check_address =\n        port_value == 0 ? address : Network::Utility::getAddressWithPort(*address, port_value);\n\n    absl::WriterMutexLock lock(&address_lock_);\n    address_ = address;\n    health_check_address_ = health_check_address;\n  }\n\n  // Upstream::Host\n  CreateConnectionData createConnection(\n      Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options,\n      Network::TransportSocketOptionsSharedPtr transport_socket_options) const override;\n\n  // Upstream::HostDescription\n  Network::Address::InstanceConstSharedPtr address() const override {\n    absl::ReaderMutexLock lock(&address_lock_);\n    return HostImpl::address();\n  }\n  Network::Address::InstanceConstSharedPtr healthCheckAddress() const override {\n    absl::ReaderMutexLock lock(&address_lock_);\n    return HostImpl::healthCheckAddress();\n  }\n\nprivate:\n  const Network::TransportSocketOptionsSharedPtr override_transport_socket_options_;\n  mutable absl::Mutex address_lock_;\n};\n\nusing LogicalHostSharedPtr = std::shared_ptr<LogicalHost>;\n\n/**\n * A real host that forwards most of its calls to a logical host, but returns a snapped address.\n */\nclass RealHostDescription : public HostDescription {\npublic:\n  RealHostDescription(Network::Address::InstanceConstSharedPtr address,\n                      HostConstSharedPtr logical_host)\n      : address_(address), logical_host_(logical_host) {}\n\n  // Upstream:HostDescription\n  bool canary() const override { return logical_host_->canary(); }\n  void canary(bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  MetadataConstSharedPtr metadata() const override { return logical_host_->metadata(); }\n  void metadata(MetadataConstSharedPtr) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  Network::TransportSocketFactory& transportSocketFactory() const override {\n    return logical_host_->transportSocketFactory();\n  }\n  const ClusterInfo& cluster() const override { return logical_host_->cluster(); }\n  HealthCheckHostMonitor& healthChecker() const override { return logical_host_->healthChecker(); }\n  Outlier::DetectorHostMonitor& outlierDetector() const override {\n    return logical_host_->outlierDetector();\n  }\n  HostStats& stats() const override { return logical_host_->stats(); }\n  const std::string& hostnameForHealthChecks() const override {\n    return logical_host_->hostnameForHealthChecks();\n  }\n  const std::string& hostname() const override { return logical_host_->hostname(); }\n  Network::Address::InstanceConstSharedPtr address() const override { return address_; }\n  const envoy::config::core::v3::Locality& locality() const override {\n    return logical_host_->locality();\n  }\n  Stats::StatName localityZoneStatName() const override {\n    return logical_host_->localityZoneStatName();\n  }\n  Network::Address::InstanceConstSharedPtr healthCheckAddress() const override {\n    // Should never be called since real hosts are used only for forwarding and not health\n    // checking.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  uint32_t priority() const override { return logical_host_->priority(); }\n  void priority(uint32_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\nprivate:\n  const Network::Address::InstanceConstSharedPtr address_;\n  const HostConstSharedPtr logical_host_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/maglev_lb.cc",
    "content": "#include \"common/upstream/maglev_lb.h\"\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nMaglevTable::MaglevTable(const NormalizedHostWeightVector& normalized_host_weights,\n                         double max_normalized_weight, uint64_t table_size,\n                         bool use_hostname_for_hashing, MaglevLoadBalancerStats& stats)\n    : table_size_(table_size), stats_(stats) {\n  // We can't do anything sensible with no hosts.\n  if (normalized_host_weights.empty()) {\n    return;\n  }\n\n  // Implementation of pseudocode listing 1 in the paper (see header file for more info).\n  std::vector<TableBuildEntry> table_build_entries;\n  table_build_entries.reserve(normalized_host_weights.size());\n  for (const auto& host_weight : normalized_host_weights) {\n    const auto& host = host_weight.first;\n    const std::string& address =\n        use_hostname_for_hashing ? host->hostname() : host->address()->asString();\n    ASSERT(!address.empty());\n    table_build_entries.emplace_back(host, HashUtil::xxHash64(address) % table_size_,\n                                     (HashUtil::xxHash64(address, 1) % (table_size_ - 1)) + 1,\n                                     host_weight.second);\n  }\n\n  table_.resize(table_size_);\n\n  // Iterate through the table build entries as many times as it takes to fill up the table.\n  uint64_t table_index = 0;\n  for (uint32_t iteration = 1; table_index < table_size_; ++iteration) {\n    for (uint64_t i = 0; i < table_build_entries.size() && table_index < table_size; i++) {\n      TableBuildEntry& entry = table_build_entries[i];\n      // To understand how target_weight_ and weight_ are used below, consider a host with weight\n      // equal to max_normalized_weight. This would be picked on every single iteration. If it had\n      // weight equal to max_normalized_weight / 3, then it would only be picked every 3 iterations,\n      // etc.\n      if (iteration * entry.weight_ < entry.target_weight_) {\n        continue;\n      }\n      entry.target_weight_ += max_normalized_weight;\n      uint64_t c = permutation(entry);\n      while (table_[c] != nullptr) {\n        entry.next_++;\n        c = permutation(entry);\n      }\n\n      table_[c] = entry.host_;\n      entry.next_++;\n      entry.count_++;\n      table_index++;\n    }\n  }\n\n  uint64_t min_entries_per_host = table_size_;\n  uint64_t max_entries_per_host = 0;\n  for (const auto& entry : table_build_entries) {\n    min_entries_per_host = std::min(entry.count_, min_entries_per_host);\n    max_entries_per_host = std::max(entry.count_, max_entries_per_host);\n  }\n  stats_.min_entries_per_host_.set(min_entries_per_host);\n  stats_.max_entries_per_host_.set(max_entries_per_host);\n\n  if (ENVOY_LOG_CHECK_LEVEL(trace)) {\n    for (uint64_t i = 0; i < table_.size(); i++) {\n      ENVOY_LOG(trace, \"maglev: i={} host={}\", i,\n                use_hostname_for_hashing ? table_[i]->hostname()\n                                         : table_[i]->address()->asString());\n    }\n  }\n}\n\nHostConstSharedPtr MaglevTable::chooseHost(uint64_t hash, uint32_t attempt) const {\n  if (table_.empty()) {\n    return nullptr;\n  }\n\n  if (attempt > 0) {\n    // If a retry host predicate is being applied, mutate the hash to choose an alternate host.\n    // By using value with most bits set for the retry attempts, we achieve a larger change in\n    // the hash, thereby reducing the likelihood that all retries are directed to a single host.\n    hash ^= ~0ULL - attempt + 1;\n  }\n\n  return table_[hash % table_size_];\n}\n\nuint64_t MaglevTable::permutation(const TableBuildEntry& entry) {\n  return (entry.offset_ + (entry.skip_ * entry.next_)) % table_size_;\n}\n\nMaglevLoadBalancer::MaglevLoadBalancer(\n    const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope,\n    Runtime::Loader& runtime, Random::RandomGenerator& random,\n    const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig>& config,\n    const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n    : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config),\n      scope_(scope.createScope(\"maglev_lb.\")), stats_(generateStats(*scope_)),\n      table_size_(config ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.value(), table_size,\n                                                           MaglevTable::DefaultTableSize)\n                         : MaglevTable::DefaultTableSize),\n      use_hostname_for_hashing_(\n          common_config.has_consistent_hashing_lb_config()\n              ? common_config.consistent_hashing_lb_config().use_hostname_for_hashing()\n              : false),\n      hash_balance_factor_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          common_config.consistent_hashing_lb_config(), hash_balance_factor, 0)) {\n  ENVOY_LOG(debug, \"maglev table size: {}\", table_size_);\n  // The table size must be prime number.\n  if (!Primes::isPrime(table_size_)) {\n    throw EnvoyException(\"The table size of maglev must be prime number\");\n  }\n}\n\nMaglevLoadBalancerStats MaglevLoadBalancer::generateStats(Stats::Scope& scope) {\n  return {ALL_MAGLEV_LOAD_BALANCER_STATS(POOL_GAUGE(scope))};\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/maglev_lb.h",
    "content": "#pragma once\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/upstream/thread_aware_lb_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * All Maglev load balancer stats. @see stats_macros.h\n */\n#define ALL_MAGLEV_LOAD_BALANCER_STATS(GAUGE)                                                      \\\n  GAUGE(max_entries_per_host, Accumulate)                                                          \\\n  GAUGE(min_entries_per_host, Accumulate)\n\n/**\n * Struct definition for all Maglev load balancer stats. @see stats_macros.h\n */\nstruct MaglevLoadBalancerStats {\n  ALL_MAGLEV_LOAD_BALANCER_STATS(GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * This is an implementation of Maglev consistent hashing as described in:\n * https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/44824.pdf\n * section 3.4. Specifically, the algorithm shown in pseudocode listing 1 is implemented with a\n * fixed table size of 65537. This is the recommended table size in section 5.3.\n */\nclass MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer,\n                    Logger::Loggable<Logger::Id::upstream> {\npublic:\n  MaglevTable(const NormalizedHostWeightVector& normalized_host_weights,\n              double max_normalized_weight, uint64_t table_size, bool use_hostname_for_hashing,\n              MaglevLoadBalancerStats& stats);\n\n  // ThreadAwareLoadBalancerBase::HashingLoadBalancer\n  HostConstSharedPtr chooseHost(uint64_t hash, uint32_t attempt) const override;\n\n  // Recommended table size in section 5.3 of the paper.\n  static const uint64_t DefaultTableSize = 65537;\n\nprivate:\n  struct TableBuildEntry {\n    TableBuildEntry(const HostConstSharedPtr& host, uint64_t offset, uint64_t skip, double weight)\n        : host_(host), offset_(offset), skip_(skip), weight_(weight) {}\n\n    HostConstSharedPtr host_;\n    const uint64_t offset_;\n    const uint64_t skip_;\n    const double weight_;\n    double target_weight_{};\n    uint64_t next_{};\n    uint64_t count_{};\n  };\n\n  uint64_t permutation(const TableBuildEntry& entry);\n\n  const uint64_t table_size_;\n  std::vector<HostConstSharedPtr> table_;\n  MaglevLoadBalancerStats& stats_;\n};\n\n/**\n * Thread aware load balancer implementation for Maglev.\n */\nclass MaglevLoadBalancer : public ThreadAwareLoadBalancerBase,\n                           Logger::Loggable<Logger::Id::upstream> {\npublic:\n  MaglevLoadBalancer(\n      const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope,\n      Runtime::Loader& runtime, Random::RandomGenerator& random,\n      const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig>& config,\n      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config);\n\n  const MaglevLoadBalancerStats& stats() const { return stats_; }\n  uint64_t tableSize() const { return table_size_; }\n\nprivate:\n  // ThreadAwareLoadBalancerBase\n  HashingLoadBalancerSharedPtr\n  createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights,\n                     double /* min_normalized_weight */, double max_normalized_weight) override {\n    HashingLoadBalancerSharedPtr maglev_lb =\n        std::make_shared<MaglevTable>(normalized_host_weights, max_normalized_weight, table_size_,\n                                      use_hostname_for_hashing_, stats_);\n\n    if (hash_balance_factor_ == 0) {\n      return maglev_lb;\n    }\n\n    return std::make_shared<BoundedLoadHashingLoadBalancer>(\n        maglev_lb, std::move(normalized_host_weights), hash_balance_factor_);\n  }\n\n  static MaglevLoadBalancerStats generateStats(Stats::Scope& scope);\n\n  Stats::ScopePtr scope_;\n  MaglevLoadBalancerStats stats_;\n  const uint64_t table_size_;\n  const bool use_hostname_for_hashing_;\n  const uint32_t hash_balance_factor_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/original_dst_cluster.cc",
    "content": "#include \"common/upstream/original_dst_cluster.h\"\n\n#include <chrono>\n#include <list>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nHostConstSharedPtr OriginalDstCluster::LoadBalancer::chooseHost(LoadBalancerContext* context) {\n  if (context) {\n    // Check if override host header is present, if yes use it otherwise check local address.\n    Network::Address::InstanceConstSharedPtr dst_host = nullptr;\n    if (parent_->use_http_header_) {\n      dst_host = requestOverrideHost(context);\n    }\n    if (dst_host == nullptr) {\n      const Network::Connection* connection = context->downstreamConnection();\n      // The local address of the downstream connection is the original destination address,\n      // if localAddressRestored() returns 'true'.\n      if (connection && connection->localAddressRestored()) {\n        dst_host = connection->localAddress();\n      }\n    }\n\n    if (dst_host) {\n      const Network::Address::Instance& dst_addr = *dst_host.get();\n      // Check if a host with the destination address is already in the host set.\n      auto it = host_map_->find(dst_addr.asString());\n      if (it != host_map_->end()) {\n        HostSharedPtr host(it->second); // takes a reference\n        ENVOY_LOG(debug, \"Using existing host {}.\", host->address()->asString());\n        host->used(true); // Mark as used.\n        return host;\n      }\n      // Add a new host\n      const Network::Address::Ip* dst_ip = dst_addr.ip();\n      if (dst_ip) {\n        Network::Address::InstanceConstSharedPtr host_ip_port(\n            Network::Utility::copyInternetAddressAndPort(*dst_ip));\n        // Create a host we can use immediately.\n        auto info = parent_->info();\n        HostSharedPtr host(std::make_shared<HostImpl>(\n            info, info->name() + dst_addr.asString(), std::move(host_ip_port), nullptr, 1,\n            envoy::config::core::v3::Locality().default_instance(),\n            envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0,\n            envoy::config::core::v3::UNKNOWN));\n        ENVOY_LOG(debug, \"Created host {}.\", host->address()->asString());\n\n        // Tell the cluster about the new host\n        // lambda cannot capture a member by value.\n        std::weak_ptr<OriginalDstCluster> post_parent = parent_;\n        parent_->dispatcher_.post([post_parent, host]() mutable {\n          // The main cluster may have disappeared while this post was queued.\n          if (std::shared_ptr<OriginalDstCluster> parent = post_parent.lock()) {\n            parent->addHost(host);\n          }\n        });\n        return host;\n      } else {\n        ENVOY_LOG(debug, \"Failed to create host for {}.\", dst_addr.asString());\n      }\n    }\n  }\n  // TODO(ramaraochavali): add a stat and move this log line to debug.\n  ENVOY_LOG(warn, \"original_dst_load_balancer: No downstream connection or no original_dst.\");\n  return nullptr;\n}\n\nNetwork::Address::InstanceConstSharedPtr\nOriginalDstCluster::LoadBalancer::requestOverrideHost(LoadBalancerContext* context) {\n  Network::Address::InstanceConstSharedPtr request_host;\n  const Http::HeaderMap* downstream_headers = context->downstreamHeaders();\n  if (downstream_headers &&\n      downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost) != nullptr) {\n    const std::string request_override_host(\n        downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost)\n            ->value()\n            .getStringView());\n    try {\n      request_host = Network::Utility::parseInternetAddressAndPort(request_override_host, false);\n      ENVOY_LOG(debug, \"Using request override host {}.\", request_override_host);\n    } catch (const Envoy::EnvoyException& e) {\n      ENVOY_LOG(debug, \"original_dst_load_balancer: invalid override header value. {}\", e.what());\n      parent_->info()->stats().original_dst_host_invalid_.inc();\n    }\n  }\n  return request_host;\n}\n\nOriginalDstCluster::OriginalDstCluster(\n    const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api)\n    : ClusterImplBase(config, runtime, factory_context, std::move(stats_scope), added_via_api),\n      dispatcher_(factory_context.dispatcher()),\n      cleanup_interval_ms_(\n          std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, cleanup_interval, 5000))),\n      cleanup_timer_(dispatcher_.createTimer([this]() -> void { cleanup(); })),\n      use_http_header_(info_->lbOriginalDstConfig()\n                           ? info_->lbOriginalDstConfig().value().use_http_header()\n                           : false),\n      host_map_(std::make_shared<HostMap>()) {\n  // TODO(dio): Remove hosts check once the hosts field is removed.\n  if (config.has_load_assignment() || !config.hidden_envoy_deprecated_hosts().empty()) {\n    throw EnvoyException(\"ORIGINAL_DST clusters must have no load assignment or hosts configured\");\n  }\n  cleanup_timer_->enableTimer(cleanup_interval_ms_);\n}\n\nvoid OriginalDstCluster::addHost(HostSharedPtr& host) {\n  HostMapSharedPtr new_host_map = std::make_shared<HostMap>(*getCurrentHostMap());\n  auto pair = new_host_map->emplace(host->address()->asString(), host);\n  bool added = pair.second;\n  if (added) {\n    ENVOY_LOG(debug, \"addHost() adding {}\", host->address()->asString());\n    setHostMap(new_host_map);\n    // Given the current config, only EDS clusters support multiple priorities.\n    ASSERT(priority_set_.hostSetsPerPriority().size() == 1);\n    const auto& first_host_set = priority_set_.getOrCreateHostSet(0);\n    HostVectorSharedPtr all_hosts(new HostVector(first_host_set.hosts()));\n    all_hosts->emplace_back(host);\n    priority_set_.updateHosts(0,\n                              HostSetImpl::partitionHosts(all_hosts, HostsPerLocalityImpl::empty()),\n                              {}, {std::move(host)}, {}, absl::nullopt);\n  }\n}\n\nvoid OriginalDstCluster::cleanup() {\n  HostVectorSharedPtr keeping_hosts(new HostVector);\n  HostVector to_be_removed;\n  ENVOY_LOG(trace, \"Stale original dst hosts cleanup triggered.\");\n  auto host_map = getCurrentHostMap();\n  if (!host_map->empty()) {\n    ENVOY_LOG(trace, \"Cleaning up stale original dst hosts.\");\n    for (const auto& pair : *host_map) {\n      const std::string& addr = pair.first;\n      const HostSharedPtr& host = pair.second;\n      if (host->used()) {\n        ENVOY_LOG(trace, \"Keeping active host {}.\", addr);\n        keeping_hosts->emplace_back(host);\n        host->used(false); // Mark to be removed during the next round.\n      } else {\n        ENVOY_LOG(trace, \"Removing stale host {}.\", addr);\n        to_be_removed.emplace_back(host);\n      }\n    }\n  }\n\n  if (!to_be_removed.empty()) {\n    HostMapSharedPtr new_host_map = std::make_shared<HostMap>(*host_map);\n    for (const HostSharedPtr& host : to_be_removed) {\n      new_host_map->erase(host->address()->asString());\n    }\n    setHostMap(new_host_map);\n    priority_set_.updateHosts(\n        0, HostSetImpl::partitionHosts(keeping_hosts, HostsPerLocalityImpl::empty()), {}, {},\n        to_be_removed, absl::nullopt);\n  }\n\n  cleanup_timer_->enableTimer(cleanup_interval_ms_);\n}\n\nstd::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr>\nOriginalDstClusterFactory::createClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n    Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Stats::ScopePtr&& stats_scope) {\n  if (cluster.lb_policy() !=\n          envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB &&\n      cluster.lb_policy() != envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) {\n    throw EnvoyException(fmt::format(\n        \"cluster: LB policy {} is not valid for Cluster type {}. Only 'CLUSTER_PROVIDED' or \"\n        \"'ORIGINAL_DST_LB' is allowed with cluster type 'ORIGINAL_DST'\",\n        envoy::config::cluster::v3::Cluster::LbPolicy_Name(cluster.lb_policy()),\n        envoy::config::cluster::v3::Cluster::DiscoveryType_Name(cluster.type())));\n  }\n\n  // TODO(mattklein123): The original DST load balancer type should be deprecated and instead\n  //                     the cluster should directly supply the load balancer. This will remove\n  //                     a special case and allow this cluster to be compiled out as an extension.\n  auto new_cluster =\n      std::make_shared<OriginalDstCluster>(cluster, context.runtime(), socket_factory_context,\n                                           std::move(stats_scope), context.addedViaApi());\n  auto lb = std::make_unique<OriginalDstCluster::ThreadAwareLoadBalancer>(new_cluster);\n  return std::make_pair(new_cluster, std::move(lb));\n}\n\n/**\n * Static registration for the original dst cluster factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(OriginalDstClusterFactory, ClusterFactory);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/original_dst_cluster.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <string>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/logger.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nusing HostMapSharedPtr = std::shared_ptr<HostMap>;\nusing HostMapConstSharedPtr = std::shared_ptr<const HostMap>;\n\n/**\n * The OriginalDstCluster is a dynamic cluster that automatically adds hosts as needed based on the\n * original destination address of the downstream connection. These hosts are also automatically\n * cleaned up after they have not seen traffic for a configurable cleanup interval time\n * (\"cleanup_interval_ms\").\n */\nclass OriginalDstCluster : public ClusterImplBase {\npublic:\n  OriginalDstCluster(const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime,\n                     Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                     Stats::ScopePtr&& stats_scope, bool added_via_api);\n\n  // Upstream::Cluster\n  InitializePhase initializePhase() const override { return InitializePhase::Primary; }\n\n  /**\n   * Special Load Balancer for Original Dst Cluster.\n   *\n   * Load balancer gets called with the downstream context which can be used to make sure the\n   * Original Dst cluster has a Host for the original destination. Normally load balancers can't\n   * modify clusters, but in this case we access a singleton OriginalDstCluster that we can ask to\n   * add hosts on demand. Additions are synced with all other threads so that the host set in the\n   * cluster remains (eventually) consistent. If multiple threads add a host to the same upstream\n   * address then two distinct HostSharedPtr's (with the same upstream IP address) will be added,\n   * and both of them will eventually time out.\n   */\n  class LoadBalancer : public Upstream::LoadBalancer {\n  public:\n    LoadBalancer(const std::shared_ptr<OriginalDstCluster>& parent)\n        : parent_(parent), host_map_(parent->getCurrentHostMap()) {}\n\n    // Upstream::LoadBalancer\n    HostConstSharedPtr chooseHost(LoadBalancerContext* context) override;\n    // Prefetching is not implemented for OriginalDstCluster\n    HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; }\n\n  private:\n    Network::Address::InstanceConstSharedPtr requestOverrideHost(LoadBalancerContext* context);\n\n    const std::shared_ptr<OriginalDstCluster> parent_;\n    HostMapConstSharedPtr host_map_;\n  };\n\nprivate:\n  struct LoadBalancerFactory : public Upstream::LoadBalancerFactory {\n    LoadBalancerFactory(const std::shared_ptr<OriginalDstCluster>& cluster) : cluster_(cluster) {}\n\n    // Upstream::LoadBalancerFactory\n    Upstream::LoadBalancerPtr create() override { return std::make_unique<LoadBalancer>(cluster_); }\n\n    const std::shared_ptr<OriginalDstCluster> cluster_;\n  };\n\n  struct ThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer {\n    ThreadAwareLoadBalancer(const std::shared_ptr<OriginalDstCluster>& cluster)\n        : cluster_(cluster) {}\n\n    // Upstream::ThreadAwareLoadBalancer\n    Upstream::LoadBalancerFactorySharedPtr factory() override {\n      return std::make_shared<LoadBalancerFactory>(cluster_);\n    }\n    void initialize() override {}\n\n    const std::shared_ptr<OriginalDstCluster> cluster_;\n  };\n\n  HostMapConstSharedPtr getCurrentHostMap() {\n    absl::ReaderMutexLock lock(&host_map_lock_);\n    return host_map_;\n  }\n\n  void setHostMap(const HostMapConstSharedPtr& new_host_map) {\n    absl::WriterMutexLock lock(&host_map_lock_);\n    host_map_ = new_host_map;\n  }\n\n  void addHost(HostSharedPtr&);\n  void cleanup();\n\n  // ClusterImplBase\n  void startPreInit() override { onPreInitComplete(); }\n\n  Event::Dispatcher& dispatcher_;\n  const std::chrono::milliseconds cleanup_interval_ms_;\n  Event::TimerPtr cleanup_timer_;\n  const bool use_http_header_;\n\n  absl::Mutex host_map_lock_;\n  HostMapConstSharedPtr host_map_ ABSL_GUARDED_BY(host_map_lock_);\n\n  friend class OriginalDstClusterFactory;\n};\n\nusing OriginalDstClusterSharedPtr = std::shared_ptr<OriginalDstCluster>;\n\nclass OriginalDstClusterFactory : public ClusterFactoryImplBase {\npublic:\n  OriginalDstClusterFactory()\n      : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().OriginalDst) {}\n\nprivate:\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/outlier_detection_impl.cc",
    "content": "#include \"common/upstream/outlier_detection_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/cluster/v3/outlier_detection.pb.h\"\n#include \"envoy/data/cluster/v2alpha/outlier_detection_event.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/codes.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace Outlier {\n\nDetectorSharedPtr DetectorImplFactory::createForCluster(\n    Cluster& cluster, const envoy::config::cluster::v3::Cluster& cluster_config,\n    Event::Dispatcher& dispatcher, Runtime::Loader& runtime, EventLoggerSharedPtr event_logger) {\n  if (cluster_config.has_outlier_detection()) {\n\n    return DetectorImpl::create(cluster, cluster_config.outlier_detection(), dispatcher, runtime,\n                                dispatcher.timeSource(), std::move(event_logger));\n  } else {\n    return nullptr;\n  }\n}\n\nDetectorHostMonitorImpl::DetectorHostMonitorImpl(std::shared_ptr<DetectorImpl> detector,\n                                                 HostSharedPtr host)\n    : detector_(detector), host_(host),\n      // add Success Rate monitors\n      external_origin_sr_monitor_(envoy::data::cluster::v2alpha::SUCCESS_RATE),\n      local_origin_sr_monitor_(envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN) {\n  // Setup method to call when putResult is invoked. Depending on the config's\n  // split_external_local_origin_errors_ boolean value different method is called.\n  put_result_func_ = detector->config().splitExternalLocalOriginErrors()\n                         ? &DetectorHostMonitorImpl::putResultWithLocalExternalSplit\n                         : &DetectorHostMonitorImpl::putResultNoLocalExternalSplit;\n}\n\nvoid DetectorHostMonitorImpl::eject(MonotonicTime ejection_time) {\n  ASSERT(!host_.lock()->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  host_.lock()->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  num_ejections_++;\n  last_ejection_time_ = ejection_time;\n}\n\nvoid DetectorHostMonitorImpl::uneject(MonotonicTime unejection_time) {\n  last_unejection_time_ = (unejection_time);\n}\n\nvoid DetectorHostMonitorImpl::updateCurrentSuccessRateBucket() {\n  external_origin_sr_monitor_.updateCurrentSuccessRateBucket();\n  local_origin_sr_monitor_.updateCurrentSuccessRateBucket();\n}\n\nvoid DetectorHostMonitorImpl::putHttpResponseCode(uint64_t response_code) {\n  external_origin_sr_monitor_.incTotalReqCounter();\n  if (Http::CodeUtility::is5xx(response_code)) {\n    std::shared_ptr<DetectorImpl> detector = detector_.lock();\n    if (!detector) {\n      // It's possible for the cluster/detector to go away while we still have a host in use.\n      return;\n    }\n    if (Http::CodeUtility::isGatewayError(response_code)) {\n      if (++consecutive_gateway_failure_ == detector->runtime().snapshot().getInteger(\n                                                \"outlier_detection.consecutive_gateway_failure\",\n                                                detector->config().consecutiveGatewayFailure())) {\n        detector->onConsecutiveGatewayFailure(host_.lock());\n      }\n    } else {\n      consecutive_gateway_failure_ = 0;\n    }\n\n    if (++consecutive_5xx_ ==\n        detector->runtime().snapshot().getInteger(\"outlier_detection.consecutive_5xx\",\n                                                  detector->config().consecutive5xx())) {\n      detector->onConsecutive5xx(host_.lock());\n    }\n  } else {\n    external_origin_sr_monitor_.incSuccessReqCounter();\n    consecutive_5xx_ = 0;\n    consecutive_gateway_failure_ = 0;\n  }\n}\n\nabsl::optional<Http::Code> DetectorHostMonitorImpl::resultToHttpCode(Result result) {\n  Http::Code http_code = Http::Code::InternalServerError;\n\n  switch (result) {\n  case Result::ExtOriginRequestSuccess:\n  case Result::LocalOriginConnectSuccessFinal:\n    http_code = Http::Code::OK;\n    break;\n  case Result::LocalOriginTimeout:\n    http_code = Http::Code::GatewayTimeout;\n    break;\n  case Result::LocalOriginConnectFailed:\n    http_code = Http::Code::ServiceUnavailable;\n    break;\n  case Result::ExtOriginRequestFailed:\n    http_code = Http::Code::InternalServerError;\n    break;\n    // LOCAL_ORIGIN_CONNECT_SUCCESS  is used is 2-layer protocols, like HTTP.\n    // First connection is established and then higher level protocol runs.\n    // If error happens in higher layer protocol, it will be mapped to\n    // HTTP code indicating error. In order not to intervene with result of\n    // higher layer protocol, this code is not mapped to HTTP code.\n  case Result::LocalOriginConnectSuccess:\n    return absl::nullopt;\n  }\n\n  return {http_code};\n}\n\n// Method is called by putResult when external and local origin errors\n// are not treated differently. All errors are mapped to HTTP codes.\n// Depending on the value of the parameter *code* the function behaves differently:\n// - if the *code* is not defined, mapping uses resultToHttpCode method to do mapping.\n// - if *code* is defined, it is taken as HTTP code and reported as such to outlier detector.\nvoid DetectorHostMonitorImpl::putResultNoLocalExternalSplit(Result result,\n                                                            absl::optional<uint64_t> code) {\n  if (code) {\n    putHttpResponseCode(code.value());\n  } else {\n    absl::optional<Http::Code> http_code = resultToHttpCode(result);\n    if (http_code) {\n      putHttpResponseCode(enumToInt(http_code.value()));\n    }\n  }\n}\n\n// Method is called by putResult when external and local origin errors\n// are treated separately. Local origin errors have separate counters and\n// separate success rate monitor.\nvoid DetectorHostMonitorImpl::putResultWithLocalExternalSplit(Result result,\n                                                              absl::optional<uint64_t>) {\n  switch (result) {\n  // SUCCESS is used to report success for connection level. Server may still respond with\n  // error, but connection to server was OK.\n  case Result::LocalOriginConnectSuccess:\n  case Result::LocalOriginConnectSuccessFinal:\n    return localOriginNoFailure();\n  // Connectivity related errors.\n  case Result::LocalOriginTimeout:\n  case Result::LocalOriginConnectFailed:\n    return localOriginFailure();\n  // EXT_ORIGIN_REQUEST_FAILED is used when connection to server was successful, but transaction on\n  // server level failed. Since it it similar to HTTP 5xx, map it to 5xx handler.\n  case Result::ExtOriginRequestFailed:\n    // map it to http code and call http handler.\n    return putHttpResponseCode(enumToInt(Http::Code::ServiceUnavailable));\n  // EXT_ORIGIN_REQUEST_SUCCESS is used to report that transaction with non-http server was\n  // completed successfully. This means that connection and server level transactions were\n  // successful. Map it to http code 200 OK and indicate that there was no errors on connection\n  // level.\n  case Result::ExtOriginRequestSuccess:\n    putHttpResponseCode(enumToInt(Http::Code::OK));\n    localOriginNoFailure();\n    break;\n  }\n}\n\n// Method is used by other components to reports success or error.\n// It calls putResultWithLocalExternalSplit or put putResultNoLocalExternalSplit via\n// std::function. The setting happens in constructor based on split_external_local_origin_errors\n// config parameter.\nvoid DetectorHostMonitorImpl::putResult(Result result, absl::optional<uint64_t> code) {\n  put_result_func_(this, result, code);\n}\n\nvoid DetectorHostMonitorImpl::localOriginFailure() {\n  std::shared_ptr<DetectorImpl> detector = detector_.lock();\n  if (!detector) {\n    // It's possible for the cluster/detector to go away while we still have a host in use.\n    return;\n  }\n  local_origin_sr_monitor_.incTotalReqCounter();\n  if (++consecutive_local_origin_failure_ ==\n      detector->runtime().snapshot().getInteger(\n          \"outlier_detection.consecutive_local_origin_failure\",\n          detector->config().consecutiveLocalOriginFailure())) {\n    detector->onConsecutiveLocalOriginFailure(host_.lock());\n  }\n}\n\nvoid DetectorHostMonitorImpl::localOriginNoFailure() {\n  std::shared_ptr<DetectorImpl> detector = detector_.lock();\n  if (!detector) {\n    // It's possible for the cluster/detector to go away while we still have a host in use.\n    return;\n  }\n\n  local_origin_sr_monitor_.incTotalReqCounter();\n  local_origin_sr_monitor_.incSuccessReqCounter();\n\n  resetConsecutiveLocalOriginFailure();\n}\n\nDetectorConfig::DetectorConfig(const envoy::config::cluster::v3::OutlierDetection& config)\n    : interval_ms_(\n          static_cast<uint64_t>(PROTOBUF_GET_MS_OR_DEFAULT(config, interval, DEFAULT_INTERVAL_MS))),\n      base_ejection_time_ms_(static_cast<uint64_t>(\n          PROTOBUF_GET_MS_OR_DEFAULT(config, base_ejection_time, DEFAULT_BASE_EJECTION_TIME_MS))),\n      consecutive_5xx_(static_cast<uint64_t>(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, consecutive_5xx, DEFAULT_CONSECUTIVE_5XX))),\n      consecutive_gateway_failure_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, consecutive_gateway_failure, DEFAULT_CONSECUTIVE_GATEWAY_FAILURE))),\n      max_ejection_percent_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, max_ejection_percent, DEFAULT_MAX_EJECTION_PERCENT))),\n      success_rate_minimum_hosts_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, success_rate_minimum_hosts, DEFAULT_SUCCESS_RATE_MINIMUM_HOSTS))),\n      success_rate_request_volume_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, success_rate_request_volume, DEFAULT_SUCCESS_RATE_REQUEST_VOLUME))),\n      success_rate_stdev_factor_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, success_rate_stdev_factor, DEFAULT_SUCCESS_RATE_STDEV_FACTOR))),\n      failure_percentage_threshold_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, failure_percentage_threshold, DEFAULT_FAILURE_PERCENTAGE_THRESHOLD))),\n      failure_percentage_minimum_hosts_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, failure_percentage_minimum_hosts, DEFAULT_FAILURE_PERCENTAGE_MINIMUM_HOSTS))),\n      failure_percentage_request_volume_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, failure_percentage_request_volume, DEFAULT_FAILURE_PERCENTAGE_REQUEST_VOLUME))),\n      enforcing_consecutive_5xx_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, enforcing_consecutive_5xx, DEFAULT_ENFORCING_CONSECUTIVE_5XX))),\n      enforcing_consecutive_gateway_failure_(static_cast<uint64_t>(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, enforcing_consecutive_gateway_failure,\n                                          DEFAULT_ENFORCING_CONSECUTIVE_GATEWAY_FAILURE))),\n      enforcing_success_rate_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, enforcing_success_rate, DEFAULT_ENFORCING_SUCCESS_RATE))),\n      enforcing_failure_percentage_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, enforcing_failure_percentage, DEFAULT_ENFORCING_FAILURE_PERCENTAGE))),\n      enforcing_failure_percentage_local_origin_(static_cast<uint64_t>(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, enforcing_failure_percentage_local_origin,\n                                          DEFAULT_ENFORCING_FAILURE_PERCENTAGE_LOCAL_ORIGIN))),\n      split_external_local_origin_errors_(config.split_external_local_origin_errors()),\n      consecutive_local_origin_failure_(static_cast<uint64_t>(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, consecutive_local_origin_failure, DEFAULT_CONSECUTIVE_LOCAL_ORIGIN_FAILURE))),\n      enforcing_consecutive_local_origin_failure_(static_cast<uint64_t>(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, enforcing_consecutive_local_origin_failure,\n                                          DEFAULT_ENFORCING_CONSECUTIVE_LOCAL_ORIGIN_FAILURE))),\n      enforcing_local_origin_success_rate_(static_cast<uint64_t>(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, enforcing_local_origin_success_rate,\n                                          DEFAULT_ENFORCING_LOCAL_ORIGIN_SUCCESS_RATE))) {}\n\nDetectorImpl::DetectorImpl(const Cluster& cluster,\n                           const envoy::config::cluster::v3::OutlierDetection& config,\n                           Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                           TimeSource& time_source, EventLoggerSharedPtr event_logger)\n    : config_(config), dispatcher_(dispatcher), runtime_(runtime), time_source_(time_source),\n      stats_(generateStats(cluster.info()->statsScope())),\n      interval_timer_(dispatcher.createTimer([this]() -> void { onIntervalTimer(); })),\n      event_logger_(event_logger) {\n  // Insert success rate initial numbers for each type of SR detector\n  external_origin_sr_num_ = {-1, -1};\n  local_origin_sr_num_ = {-1, -1};\n}\n\nDetectorImpl::~DetectorImpl() {\n  for (const auto& host : host_monitors_) {\n    if (host.first->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) {\n      ASSERT(ejections_active_helper_.value() > 0);\n      ejections_active_helper_.dec();\n    }\n  }\n}\n\nstd::shared_ptr<DetectorImpl>\nDetectorImpl::create(const Cluster& cluster,\n                     const envoy::config::cluster::v3::OutlierDetection& config,\n                     Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                     TimeSource& time_source, EventLoggerSharedPtr event_logger) {\n  std::shared_ptr<DetectorImpl> detector(\n      new DetectorImpl(cluster, config, dispatcher, runtime, time_source, event_logger));\n  detector->initialize(cluster);\n\n  return detector;\n}\n\nvoid DetectorImpl::initialize(const Cluster& cluster) {\n  for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n    for (const HostSharedPtr& host : host_set->hosts()) {\n      addHostMonitor(host);\n    }\n  }\n  cluster.prioritySet().addMemberUpdateCb(\n      [this](const HostVector& hosts_added, const HostVector& hosts_removed) -> void {\n        for (const HostSharedPtr& host : hosts_added) {\n          addHostMonitor(host);\n        }\n\n        for (const HostSharedPtr& host : hosts_removed) {\n          ASSERT(host_monitors_.count(host) == 1);\n          if (host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) {\n            ASSERT(ejections_active_helper_.value() > 0);\n            ejections_active_helper_.dec();\n          }\n\n          host_monitors_.erase(host);\n        }\n      });\n\n  armIntervalTimer();\n}\n\nvoid DetectorImpl::addHostMonitor(HostSharedPtr host) {\n  ASSERT(host_monitors_.count(host) == 0);\n  DetectorHostMonitorImpl* monitor = new DetectorHostMonitorImpl(shared_from_this(), host);\n  host_monitors_[host] = monitor;\n  host->setOutlierDetector(DetectorHostMonitorPtr{monitor});\n}\n\nvoid DetectorImpl::armIntervalTimer() {\n  interval_timer_->enableTimer(std::chrono::milliseconds(\n      runtime_.snapshot().getInteger(\"outlier_detection.interval_ms\", config_.intervalMs())));\n}\n\nvoid DetectorImpl::checkHostForUneject(HostSharedPtr host, DetectorHostMonitorImpl* monitor,\n                                       MonotonicTime now) {\n  if (!host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) {\n    return;\n  }\n\n  std::chrono::milliseconds base_eject_time =\n      std::chrono::milliseconds(runtime_.snapshot().getInteger(\n          \"outlier_detection.base_ejection_time_ms\", config_.baseEjectionTimeMs()));\n  ASSERT(monitor->numEjections() > 0);\n  if ((base_eject_time * monitor->numEjections()) <= (now - monitor->lastEjectionTime().value())) {\n    ejections_active_helper_.dec();\n    host->healthFlagClear(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n    // Reset the consecutive failure counters to avoid re-ejection on very few new errors due\n    // to the non-triggering counter being close to its trigger value.\n    host_monitors_[host]->resetConsecutive5xx();\n    host_monitors_[host]->resetConsecutiveGatewayFailure();\n    monitor->uneject(now);\n    runCallbacks(host);\n\n    if (event_logger_) {\n      event_logger_->logUneject(host);\n    }\n  }\n}\n\nbool DetectorImpl::enforceEjection(envoy::data::cluster::v2alpha::OutlierEjectionType type) {\n  switch (type) {\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_5XX:\n    return runtime_.snapshot().featureEnabled(\"outlier_detection.enforcing_consecutive_5xx\",\n                                              config_.enforcingConsecutive5xx());\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE:\n    return runtime_.snapshot().featureEnabled(\n        \"outlier_detection.enforcing_consecutive_gateway_failure\",\n        config_.enforcingConsecutiveGatewayFailure());\n  case envoy::data::cluster::v2alpha::SUCCESS_RATE:\n    return runtime_.snapshot().featureEnabled(\"outlier_detection.enforcing_success_rate\",\n                                              config_.enforcingSuccessRate());\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE:\n    return runtime_.snapshot().featureEnabled(\n        \"outlier_detection.enforcing_consecutive_local_origin_failure\",\n        config_.enforcingConsecutiveLocalOriginFailure());\n  case envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN:\n    return runtime_.snapshot().featureEnabled(\n        \"outlier_detection.enforcing_local_origin_success_rate\",\n        config_.enforcingLocalOriginSuccessRate());\n  case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE:\n    return runtime_.snapshot().featureEnabled(\"outlier_detection.enforcing_failure_percentage\",\n                                              config_.enforcingFailurePercentage());\n  case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE_LOCAL_ORIGIN:\n    return runtime_.snapshot().featureEnabled(\n        \"outlier_detection.enforcing_failure_percentage_local_origin\",\n        config_.enforcingFailurePercentageLocalOrigin());\n  default:\n    // Checked by schema.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid DetectorImpl::updateEnforcedEjectionStats(\n    envoy::data::cluster::v2alpha::OutlierEjectionType type) {\n  stats_.ejections_enforced_total_.inc();\n  switch (type) {\n  case envoy::data::cluster::v2alpha::SUCCESS_RATE:\n    stats_.ejections_enforced_success_rate_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_5XX:\n    stats_.ejections_enforced_consecutive_5xx_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE:\n    stats_.ejections_enforced_consecutive_gateway_failure_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE:\n    stats_.ejections_enforced_consecutive_local_origin_failure_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN:\n    stats_.ejections_enforced_local_origin_success_rate_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE:\n    stats_.ejections_enforced_failure_percentage_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE_LOCAL_ORIGIN:\n    stats_.ejections_enforced_local_origin_failure_percentage_.inc();\n    break;\n  default:\n    // Checked by schema.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid DetectorImpl::updateDetectedEjectionStats(\n    envoy::data::cluster::v2alpha::OutlierEjectionType type) {\n  switch (type) {\n  case envoy::data::cluster::v2alpha::SUCCESS_RATE:\n    stats_.ejections_detected_success_rate_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_5XX:\n    stats_.ejections_detected_consecutive_5xx_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE:\n    stats_.ejections_detected_consecutive_gateway_failure_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE:\n    stats_.ejections_detected_consecutive_local_origin_failure_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN:\n    stats_.ejections_detected_local_origin_success_rate_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE:\n    stats_.ejections_detected_failure_percentage_.inc();\n    break;\n  case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE_LOCAL_ORIGIN:\n    stats_.ejections_detected_local_origin_failure_percentage_.inc();\n    break;\n  default:\n    // Checked by schema.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid DetectorImpl::ejectHost(HostSharedPtr host,\n                             envoy::data::cluster::v2alpha::OutlierEjectionType type) {\n  uint64_t max_ejection_percent = std::min<uint64_t>(\n      100, runtime_.snapshot().getInteger(\"outlier_detection.max_ejection_percent\",\n                                          config_.maxEjectionPercent()));\n  double ejected_percent = 100.0 * ejections_active_helper_.value() / host_monitors_.size();\n  // Note this is not currently checked per-priority level, so it is possible\n  // for outlier detection to eject all hosts at any given priority level.\n  if (ejected_percent < max_ejection_percent) {\n    if (type == envoy::data::cluster::v2alpha::CONSECUTIVE_5XX ||\n        type == envoy::data::cluster::v2alpha::SUCCESS_RATE) {\n      // Deprecated counter, preserving old behaviour until it's removed.\n      stats_.ejections_total_.inc();\n    }\n    if (enforceEjection(type)) {\n      ejections_active_helper_.inc();\n      updateEnforcedEjectionStats(type);\n      host_monitors_[host]->eject(time_source_.monotonicTime());\n      runCallbacks(host);\n      if (event_logger_) {\n        event_logger_->logEject(host, *this, type, true);\n      }\n    } else {\n      if (event_logger_) {\n        event_logger_->logEject(host, *this, type, false);\n      }\n    }\n  } else {\n    stats_.ejections_overflow_.inc();\n  }\n}\n\nDetectionStats DetectorImpl::generateStats(Stats::Scope& scope) {\n  std::string prefix(\"outlier_detection.\");\n  return {ALL_OUTLIER_DETECTION_STATS(POOL_COUNTER_PREFIX(scope, prefix),\n                                      POOL_GAUGE_PREFIX(scope, prefix))};\n}\n\nvoid DetectorImpl::notifyMainThreadConsecutiveError(\n    HostSharedPtr host, envoy::data::cluster::v2alpha::OutlierEjectionType type) {\n  // This event will come from all threads, so we synchronize with a post to the main thread.\n  // NOTE: Unfortunately consecutive errors are complicated from a threading perspective because\n  //       we catch consecutive errors on worker threads and then post back to the main thread.\n  //       Clusters can get removed, and this means there is a race condition with this\n  //       reverse post. The way we handle this is as follows:\n  //       1) The only strong pointer to the detector is owned by the cluster.\n  //       2) We post a weak pointer to the main thread.\n  //       3) If when running on the main thread the weak pointer can be converted to a strong\n  //          pointer, the detector/cluster must still exist so we can safely fire callbacks.\n  //          Otherwise we do nothing since the detector/cluster is already gone.\n  std::weak_ptr<DetectorImpl> weak_this = shared_from_this();\n  dispatcher_.post([weak_this, host, type]() -> void {\n    std::shared_ptr<DetectorImpl> shared_this = weak_this.lock();\n    if (shared_this) {\n      shared_this->onConsecutiveErrorWorker(host, type);\n    }\n  });\n}\n\nvoid DetectorImpl::onConsecutive5xx(HostSharedPtr host) {\n  notifyMainThreadConsecutiveError(host, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX);\n}\n\nvoid DetectorImpl::onConsecutiveGatewayFailure(HostSharedPtr host) {\n  notifyMainThreadConsecutiveError(host,\n                                   envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE);\n}\n\nvoid DetectorImpl::onConsecutiveLocalOriginFailure(HostSharedPtr host) {\n  notifyMainThreadConsecutiveError(host,\n                                   envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE);\n}\n\nvoid DetectorImpl::onConsecutiveErrorWorker(\n    HostSharedPtr host, envoy::data::cluster::v2alpha::OutlierEjectionType type) {\n  // Ejections come in cross thread. There is a chance that the host has already been removed from\n  // the set. If so, just ignore it.\n  if (host_monitors_.count(host) == 0) {\n    return;\n  }\n  if (host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) {\n    return;\n  }\n\n  // We also reset the appropriate counter here to allow the monitor to detect a bout of consecutive\n  // error responses even if the monitor is not charged with an interleaved non-error code.\n  updateDetectedEjectionStats(type);\n  ejectHost(host, type);\n\n  // reset counters\n  switch (type) {\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_5XX:\n    stats_.ejections_consecutive_5xx_.inc(); // Deprecated\n    host_monitors_[host]->resetConsecutive5xx();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE:\n    host_monitors_[host]->resetConsecutiveGatewayFailure();\n    break;\n  case envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE:\n    host_monitors_[host]->resetConsecutiveLocalOriginFailure();\n    break;\n  default:\n    // Checked by schema.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nDetectorImpl::EjectionPair DetectorImpl::successRateEjectionThreshold(\n    double success_rate_sum, const std::vector<HostSuccessRatePair>& valid_success_rate_hosts,\n    double success_rate_stdev_factor) {\n  // This function is using mean and standard deviation as statistical measures for outlier\n  // detection. First the mean is calculated by dividing the sum of success rate data over the\n  // number of data points. Then variance is calculated by taking the mean of the\n  // squared difference of data points to the mean of the data. Then standard deviation is\n  // calculated by taking the square root of the variance. Then the outlier threshold is\n  // calculated as the difference between the mean and the product of the standard\n  // deviation and a constant factor.\n  //\n  // For example with a data set that looks like success_rate_data = {50, 100, 100, 100, 100} the\n  // math would work as follows:\n  // success_rate_sum = 450\n  // mean = 90\n  // variance = 400\n  // stdev = 20\n  // threshold returned = 52\n  double mean = success_rate_sum / valid_success_rate_hosts.size();\n  double variance = 0;\n  std::for_each(valid_success_rate_hosts.begin(), valid_success_rate_hosts.end(),\n                [&variance, mean](HostSuccessRatePair v) {\n                  variance += std::pow(v.success_rate_ - mean, 2);\n                });\n  variance /= valid_success_rate_hosts.size();\n  double stdev = std::sqrt(variance);\n\n  return {mean, (mean - (success_rate_stdev_factor * stdev))};\n}\n\nvoid DetectorImpl::processSuccessRateEjections(\n    DetectorHostMonitor::SuccessRateMonitorType monitor_type) {\n  uint64_t success_rate_minimum_hosts = runtime_.snapshot().getInteger(\n      \"outlier_detection.success_rate_minimum_hosts\", config_.successRateMinimumHosts());\n  uint64_t success_rate_request_volume = runtime_.snapshot().getInteger(\n      \"outlier_detection.success_rate_request_volume\", config_.successRateRequestVolume());\n  uint64_t failure_percentage_minimum_hosts =\n      runtime_.snapshot().getInteger(\"outlier_detection.failure_percentage_minimum_hosts\",\n                                     config_.failurePercentageMinimumHosts());\n  uint64_t failure_percentage_request_volume =\n      runtime_.snapshot().getInteger(\"outlier_detection.failure_percentage_request_volume\",\n                                     config_.failurePercentageRequestVolume());\n\n  std::vector<HostSuccessRatePair> valid_success_rate_hosts;\n  std::vector<HostSuccessRatePair> valid_failure_percentage_hosts;\n  double success_rate_sum = 0;\n\n  // Reset the Detector's success rate mean and stdev.\n  getSRNums(monitor_type) = {-1, -1};\n\n  // Exit early if there are not enough hosts.\n  if (host_monitors_.size() < success_rate_minimum_hosts &&\n      host_monitors_.size() < failure_percentage_minimum_hosts) {\n    return;\n  }\n\n  // reserve upper bound of vector size to avoid reallocation.\n  valid_success_rate_hosts.reserve(host_monitors_.size());\n  valid_failure_percentage_hosts.reserve(host_monitors_.size());\n\n  for (const auto& host : host_monitors_) {\n    // Don't do work if the host is already ejected.\n    if (!host.first->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) {\n      absl::optional<std::pair<double, uint64_t>> host_success_rate_and_volume =\n          host.second->getSRMonitor(monitor_type)\n              .successRateAccumulator()\n              .getSuccessRateAndVolume();\n\n      if (!host_success_rate_and_volume) {\n        continue;\n      }\n      double success_rate = host_success_rate_and_volume.value().first;\n      double request_volume = host_success_rate_and_volume.value().second;\n\n      if (request_volume >=\n          std::min(success_rate_request_volume, failure_percentage_request_volume)) {\n        host.second->successRate(monitor_type, success_rate);\n      }\n\n      if (request_volume >= success_rate_request_volume) {\n        valid_success_rate_hosts.emplace_back(HostSuccessRatePair(host.first, success_rate));\n        success_rate_sum += success_rate;\n      }\n      if (request_volume >= failure_percentage_request_volume) {\n        valid_failure_percentage_hosts.emplace_back(HostSuccessRatePair(host.first, success_rate));\n      }\n    }\n  }\n\n  if (!valid_success_rate_hosts.empty() &&\n      valid_success_rate_hosts.size() >= success_rate_minimum_hosts) {\n    const double success_rate_stdev_factor =\n        runtime_.snapshot().getInteger(\"outlier_detection.success_rate_stdev_factor\",\n                                       config_.successRateStdevFactor()) /\n        1000.0;\n    getSRNums(monitor_type) = successRateEjectionThreshold(\n        success_rate_sum, valid_success_rate_hosts, success_rate_stdev_factor);\n    const double success_rate_ejection_threshold = getSRNums(monitor_type).ejection_threshold_;\n    for (const auto& host_success_rate_pair : valid_success_rate_hosts) {\n      if (host_success_rate_pair.success_rate_ < success_rate_ejection_threshold) {\n        stats_.ejections_success_rate_.inc(); // Deprecated.\n        const envoy::data::cluster::v2alpha::OutlierEjectionType type =\n            host_monitors_[host_success_rate_pair.host_]\n                ->getSRMonitor(monitor_type)\n                .getEjectionType();\n        updateDetectedEjectionStats(type);\n        ejectHost(host_success_rate_pair.host_, type);\n      }\n    }\n  }\n\n  if (!valid_failure_percentage_hosts.empty() &&\n      valid_failure_percentage_hosts.size() >= failure_percentage_minimum_hosts) {\n    const double failure_percentage_threshold = runtime_.snapshot().getInteger(\n        \"outlier_detection.failure_percentage_threshold\", config_.failurePercentageThreshold());\n\n    for (const auto& host_success_rate_pair : valid_failure_percentage_hosts) {\n      if ((100.0 - host_success_rate_pair.success_rate_) >= failure_percentage_threshold) {\n        // We should eject.\n\n        // The ejection type returned by the SuccessRateMonitor's getEjectionType() will be a\n        // SUCCESS_RATE type, so we need to figure it out for ourselves.\n        const envoy::data::cluster::v2alpha::OutlierEjectionType type =\n            (monitor_type == DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin)\n                ? envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE\n                : envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE_LOCAL_ORIGIN;\n        updateDetectedEjectionStats(type);\n        ejectHost(host_success_rate_pair.host_, type);\n      }\n    }\n  }\n}\n\nvoid DetectorImpl::onIntervalTimer() {\n  MonotonicTime now = time_source_.monotonicTime();\n\n  for (auto host : host_monitors_) {\n    checkHostForUneject(host.first, host.second, now);\n\n    // Need to update the writer bucket to keep the data valid.\n    host.second->updateCurrentSuccessRateBucket();\n    // Refresh host success rate stat for the /clusters endpoint. If there is a new valid value, it\n    // will get updated in processSuccessRateEjections().\n    host.second->successRate(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin, -1);\n    host.second->successRate(DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin, -1);\n  }\n\n  processSuccessRateEjections(DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin);\n  processSuccessRateEjections(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin);\n\n  armIntervalTimer();\n}\n\nvoid DetectorImpl::runCallbacks(HostSharedPtr host) {\n  for (const ChangeStateCb& cb : callbacks_) {\n    cb(host);\n  }\n}\n\nvoid EventLoggerImpl::logEject(const HostDescriptionConstSharedPtr& host, Detector& detector,\n                               envoy::data::cluster::v2alpha::OutlierEjectionType type,\n                               bool enforced) {\n  envoy::data::cluster::v2alpha::OutlierDetectionEvent event;\n  event.set_type(type);\n\n  absl::optional<MonotonicTime> time = host->outlierDetector().lastUnejectionTime();\n  setCommonEventParams(event, host, time);\n\n  event.set_action(envoy::data::cluster::v2alpha::EJECT);\n\n  event.set_enforced(enforced);\n\n  if ((type == envoy::data::cluster::v2alpha::SUCCESS_RATE) ||\n      (type == envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN)) {\n    const DetectorHostMonitor::SuccessRateMonitorType monitor_type =\n        (type == envoy::data::cluster::v2alpha::SUCCESS_RATE)\n            ? DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin\n            : DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin;\n    event.mutable_eject_success_rate_event()->set_cluster_average_success_rate(\n        detector.successRateAverage(monitor_type));\n    event.mutable_eject_success_rate_event()->set_cluster_success_rate_ejection_threshold(\n        detector.successRateEjectionThreshold(monitor_type));\n    event.mutable_eject_success_rate_event()->set_host_success_rate(\n        host->outlierDetector().successRate(monitor_type));\n  } else if ((type == envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE) ||\n             (type == envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE_LOCAL_ORIGIN)) {\n    const DetectorHostMonitor::SuccessRateMonitorType monitor_type =\n        (type == envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE)\n            ? DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin\n            : DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin;\n    event.mutable_eject_failure_percentage_event()->set_host_success_rate(\n        host->outlierDetector().successRate(monitor_type));\n  } else {\n    event.mutable_eject_consecutive_event();\n  }\n\n  const auto json = MessageUtil::getJsonStringFromMessage(event, /* pretty_print */ false,\n                                                          /* always_print_primitive_fields */ true);\n  file_->write(fmt::format(\"{}\\n\", json));\n}\n\nvoid EventLoggerImpl::logUneject(const HostDescriptionConstSharedPtr& host) {\n  envoy::data::cluster::v2alpha::OutlierDetectionEvent event;\n\n  absl::optional<MonotonicTime> time = host->outlierDetector().lastEjectionTime();\n  setCommonEventParams(event, host, time);\n\n  event.set_action(envoy::data::cluster::v2alpha::UNEJECT);\n\n  const auto json = MessageUtil::getJsonStringFromMessage(event, /* pretty_print */ false,\n                                                          /* always_print_primitive_fields */ true);\n  file_->write(fmt::format(\"{}\\n\", json));\n}\n\nvoid EventLoggerImpl::setCommonEventParams(\n    envoy::data::cluster::v2alpha::OutlierDetectionEvent& event,\n    const HostDescriptionConstSharedPtr& host, absl::optional<MonotonicTime> time) {\n  MonotonicTime monotonic_now = time_source_.monotonicTime();\n  if (time) {\n    std::chrono::seconds secsFromLastAction =\n        std::chrono::duration_cast<std::chrono::seconds>(monotonic_now - time.value());\n    event.mutable_secs_since_last_action()->set_value(secsFromLastAction.count());\n  }\n  event.set_cluster_name(host->cluster().name());\n  event.set_upstream_url(host->address()->asString());\n  event.set_num_ejections(host->outlierDetector().numEjections());\n  TimestampUtil::systemClockToTimestamp(time_source_.systemTime(), *event.mutable_timestamp());\n}\n\nSuccessRateAccumulatorBucket* SuccessRateAccumulator::updateCurrentWriter() {\n  // Right now current is being written to and backup is not. Flush the backup and swap.\n  backup_success_rate_bucket_->success_request_counter_ = 0;\n  backup_success_rate_bucket_->total_request_counter_ = 0;\n\n  current_success_rate_bucket_.swap(backup_success_rate_bucket_);\n\n  return current_success_rate_bucket_.get();\n}\n\nabsl::optional<std::pair<double, uint64_t>> SuccessRateAccumulator::getSuccessRateAndVolume() {\n  if (!backup_success_rate_bucket_->total_request_counter_) {\n    return absl::nullopt;\n  }\n\n  double success_rate = backup_success_rate_bucket_->success_request_counter_ * 100.0 /\n                        backup_success_rate_bucket_->total_request_counter_;\n\n  return {{success_rate, backup_success_rate_bucket_->total_request_counter_}};\n}\n\n} // namespace Outlier\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/outlier_detection_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/cluster/v3/outlier_detection.pb.h\"\n#include \"envoy/data/cluster/v2alpha/outlier_detection_event.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/upstream/outlier_detection.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace Outlier {\n\n/**\n * Null host monitor implementation.\n */\nclass DetectorHostMonitorNullImpl : public DetectorHostMonitor {\npublic:\n  // Upstream::Outlier::DetectorHostMonitor\n  uint32_t numEjections() override { return 0; }\n  void putHttpResponseCode(uint64_t) override {}\n  void putResult(Result, absl::optional<uint64_t>) override {}\n  void putResponseTime(std::chrono::milliseconds) override {}\n  const absl::optional<MonotonicTime>& lastEjectionTime() override { return time_; }\n  const absl::optional<MonotonicTime>& lastUnejectionTime() override { return time_; }\n  double successRate(SuccessRateMonitorType) const override { return -1; }\n\nprivate:\n  const absl::optional<MonotonicTime> time_{};\n};\n\n/**\n * Factory for creating a detector from a proto configuration.\n */\nclass DetectorImplFactory {\npublic:\n  static DetectorSharedPtr\n  createForCluster(Cluster& cluster, const envoy::config::cluster::v3::Cluster& cluster_config,\n                   Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n                   EventLoggerSharedPtr event_logger);\n};\n\n/**\n * Thin struct to facilitate calculations for success rate outlier detection.\n */\nstruct HostSuccessRatePair {\n  HostSuccessRatePair(HostSharedPtr host, double success_rate)\n      : host_(host), success_rate_(success_rate) {}\n  HostSharedPtr host_;\n  double success_rate_;\n};\n\nstruct SuccessRateAccumulatorBucket {\n  std::atomic<uint64_t> success_request_counter_;\n  std::atomic<uint64_t> total_request_counter_;\n};\n\n/**\n * The SuccessRateAccumulator uses the SuccessRateAccumulatorBucket to get per host success rate\n * stats. This implementation has a fixed window size of time, and thus only needs a\n * bucket to write to, and a bucket to accumulate/run stats over.\n */\nclass SuccessRateAccumulator {\npublic:\n  SuccessRateAccumulator()\n      : current_success_rate_bucket_(new SuccessRateAccumulatorBucket()),\n        backup_success_rate_bucket_(new SuccessRateAccumulatorBucket()) {}\n\n  /**\n   * This function updates the bucket to write data to.\n   * @return a pointer to the SuccessRateAccumulatorBucket.\n   */\n  SuccessRateAccumulatorBucket* updateCurrentWriter();\n  /**\n   * This function returns the success rate of a host over a window of time if the request volume is\n   * high enough. The underlying window of time could be dynamically adjusted. In the current\n   * implementation it is a fixed time window.\n   * @param success_rate_request_volume the threshold of requests an accumulator has to have in\n   *                                    order to be able to return a significant success rate value.\n   * @return a valid absl::optional<double> with the success rate. If there were not enough\n   * requests, an invalid absl::optional<double> is returned.\n   */\n  absl::optional<std::pair<double, uint64_t>> getSuccessRateAndVolume();\n\nprivate:\n  std::unique_ptr<SuccessRateAccumulatorBucket> current_success_rate_bucket_;\n  std::unique_ptr<SuccessRateAccumulatorBucket> backup_success_rate_bucket_;\n};\n\nclass SuccessRateMonitor {\npublic:\n  SuccessRateMonitor(envoy::data::cluster::v2alpha::OutlierEjectionType ejection_type)\n      : ejection_type_(ejection_type), success_rate_(-1) {\n    // Point the success_rate_accumulator_bucket_ pointer to a bucket.\n    updateCurrentSuccessRateBucket();\n  }\n  double getSuccessRate() const { return success_rate_; }\n  SuccessRateAccumulator& successRateAccumulator() { return success_rate_accumulator_; }\n  void setSuccessRate(double new_success_rate) { success_rate_ = new_success_rate; }\n  void updateCurrentSuccessRateBucket() {\n    success_rate_accumulator_bucket_.store(success_rate_accumulator_.updateCurrentWriter());\n  }\n  void incTotalReqCounter() { success_rate_accumulator_bucket_.load()->total_request_counter_++; }\n  void incSuccessReqCounter() {\n    success_rate_accumulator_bucket_.load()->success_request_counter_++;\n  }\n\n  envoy::data::cluster::v2alpha::OutlierEjectionType getEjectionType() const {\n    return ejection_type_;\n  }\n\nprivate:\n  SuccessRateAccumulator success_rate_accumulator_;\n  std::atomic<SuccessRateAccumulatorBucket*> success_rate_accumulator_bucket_;\n  envoy::data::cluster::v2alpha::OutlierEjectionType ejection_type_;\n  double success_rate_;\n};\n\nclass DetectorImpl;\n\n/**\n * Implementation of DetectorHostMonitor for the generic detector.\n */\nclass DetectorHostMonitorImpl : public DetectorHostMonitor {\npublic:\n  DetectorHostMonitorImpl(std::shared_ptr<DetectorImpl> detector, HostSharedPtr host);\n\n  void eject(MonotonicTime ejection_time);\n  void uneject(MonotonicTime ejection_time);\n\n  void resetConsecutive5xx() { consecutive_5xx_ = 0; }\n  void resetConsecutiveGatewayFailure() { consecutive_gateway_failure_ = 0; }\n  void resetConsecutiveLocalOriginFailure() { consecutive_local_origin_failure_ = 0; }\n  static absl::optional<Http::Code> resultToHttpCode(Result result);\n\n  // Upstream::Outlier::DetectorHostMonitor\n  uint32_t numEjections() override { return num_ejections_; }\n  void putHttpResponseCode(uint64_t response_code) override;\n  void putResult(Result result, absl::optional<uint64_t> code) override;\n  void putResponseTime(std::chrono::milliseconds) override {}\n  const absl::optional<MonotonicTime>& lastEjectionTime() override { return last_ejection_time_; }\n  const absl::optional<MonotonicTime>& lastUnejectionTime() override {\n    return last_unejection_time_;\n  }\n\n  const SuccessRateMonitor& getSRMonitor(SuccessRateMonitorType type) const {\n    return (SuccessRateMonitorType::ExternalOrigin == type) ? external_origin_sr_monitor_\n                                                            : local_origin_sr_monitor_;\n  }\n\n  SuccessRateMonitor& getSRMonitor(SuccessRateMonitorType type) {\n    // Call const version of the same method\n    return const_cast<SuccessRateMonitor&>(\n        const_cast<const DetectorHostMonitorImpl*>(this)->getSRMonitor(type));\n  }\n\n  double successRate(SuccessRateMonitorType type) const override {\n    return getSRMonitor(type).getSuccessRate();\n  }\n  void updateCurrentSuccessRateBucket();\n  void successRate(SuccessRateMonitorType type, double new_success_rate) {\n    getSRMonitor(type).setSuccessRate(new_success_rate);\n  }\n\n  // handlers for reporting local origin errors\n  void localOriginFailure();\n  void localOriginNoFailure();\n\nprivate:\n  std::weak_ptr<DetectorImpl> detector_;\n  std::weak_ptr<Host> host_;\n  absl::optional<MonotonicTime> last_ejection_time_;\n  absl::optional<MonotonicTime> last_unejection_time_;\n  uint32_t num_ejections_{};\n\n  // counters for externally generated failures\n  std::atomic<uint32_t> consecutive_5xx_{0};\n  std::atomic<uint32_t> consecutive_gateway_failure_{0};\n\n  // counters for local origin failures\n  std::atomic<uint32_t> consecutive_local_origin_failure_{0};\n\n  // success rate monitors:\n  // - external_origin: for all events when external/local are not split\n  //   and for external origin failures when external/local events are split\n  // - local origin: for local events when external/local events are split and\n  //   not used when external/local events are not split.\n  SuccessRateMonitor external_origin_sr_monitor_;\n  SuccessRateMonitor local_origin_sr_monitor_;\n\n  void putResultNoLocalExternalSplit(Result result, absl::optional<uint64_t> code);\n  void putResultWithLocalExternalSplit(Result result, absl::optional<uint64_t> code);\n  std::function<void(DetectorHostMonitorImpl*, Result, absl::optional<uint64_t> code)>\n      put_result_func_;\n};\n\n/**\n * All outlier detection stats. @see stats_macros.h\n */\n#define ALL_OUTLIER_DETECTION_STATS(COUNTER, GAUGE)                                                \\\n  COUNTER(ejections_consecutive_5xx)                                                               \\\n  COUNTER(ejections_detected_consecutive_5xx)                                                      \\\n  COUNTER(ejections_detected_consecutive_gateway_failure)                                          \\\n  COUNTER(ejections_detected_success_rate)                                                         \\\n  COUNTER(ejections_detected_failure_percentage)                                                   \\\n  COUNTER(ejections_enforced_consecutive_5xx)                                                      \\\n  COUNTER(ejections_enforced_consecutive_gateway_failure)                                          \\\n  COUNTER(ejections_enforced_success_rate)                                                         \\\n  COUNTER(ejections_enforced_failure_percentage)                                                   \\\n  COUNTER(ejections_detected_consecutive_local_origin_failure)                                     \\\n  COUNTER(ejections_enforced_consecutive_local_origin_failure)                                     \\\n  COUNTER(ejections_detected_local_origin_success_rate)                                            \\\n  COUNTER(ejections_enforced_local_origin_success_rate)                                            \\\n  COUNTER(ejections_detected_local_origin_failure_percentage)                                      \\\n  COUNTER(ejections_enforced_local_origin_failure_percentage)                                      \\\n  COUNTER(ejections_enforced_total)                                                                \\\n  COUNTER(ejections_overflow)                                                                      \\\n  COUNTER(ejections_success_rate)                                                                  \\\n  COUNTER(ejections_total)                                                                         \\\n  GAUGE(ejections_active, Accumulate)\n\n/**\n * Struct definition for all outlier detection stats. @see stats_macros.h\n */\nstruct DetectionStats {\n  ALL_OUTLIER_DETECTION_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Configuration for the outlier detection.\n */\nclass DetectorConfig {\npublic:\n  DetectorConfig(const envoy::config::cluster::v3::OutlierDetection& config);\n\n  uint64_t intervalMs() const { return interval_ms_; }\n  uint64_t baseEjectionTimeMs() const { return base_ejection_time_ms_; }\n  uint64_t consecutive5xx() const { return consecutive_5xx_; }\n  uint64_t consecutiveGatewayFailure() const { return consecutive_gateway_failure_; }\n  uint64_t maxEjectionPercent() const { return max_ejection_percent_; }\n  uint64_t successRateMinimumHosts() const { return success_rate_minimum_hosts_; }\n  uint64_t successRateRequestVolume() const { return success_rate_request_volume_; }\n  uint64_t successRateStdevFactor() const { return success_rate_stdev_factor_; }\n  uint64_t failurePercentageThreshold() const { return failure_percentage_threshold_; }\n  uint64_t failurePercentageMinimumHosts() const { return failure_percentage_minimum_hosts_; }\n  uint64_t failurePercentageRequestVolume() const { return failure_percentage_request_volume_; }\n  uint64_t enforcingConsecutive5xx() const { return enforcing_consecutive_5xx_; }\n  uint64_t enforcingConsecutiveGatewayFailure() const {\n    return enforcing_consecutive_gateway_failure_;\n  }\n  uint64_t enforcingSuccessRate() const { return enforcing_success_rate_; }\n  uint64_t enforcingFailurePercentage() const { return enforcing_failure_percentage_; }\n  uint64_t enforcingFailurePercentageLocalOrigin() const {\n    return enforcing_failure_percentage_local_origin_;\n  }\n  bool splitExternalLocalOriginErrors() const { return split_external_local_origin_errors_; }\n  uint64_t consecutiveLocalOriginFailure() const { return consecutive_local_origin_failure_; }\n  uint64_t enforcingConsecutiveLocalOriginFailure() const {\n    return enforcing_consecutive_local_origin_failure_;\n  }\n  uint64_t enforcingLocalOriginSuccessRate() const { return enforcing_local_origin_success_rate_; }\n\nprivate:\n  const uint64_t interval_ms_;\n  const uint64_t base_ejection_time_ms_;\n  const uint64_t consecutive_5xx_;\n  const uint64_t consecutive_gateway_failure_;\n  const uint64_t max_ejection_percent_;\n  const uint64_t success_rate_minimum_hosts_;\n  const uint64_t success_rate_request_volume_;\n  const uint64_t success_rate_stdev_factor_;\n  const uint64_t failure_percentage_threshold_;\n  const uint64_t failure_percentage_minimum_hosts_;\n  const uint64_t failure_percentage_request_volume_;\n  const uint64_t enforcing_consecutive_5xx_;\n  const uint64_t enforcing_consecutive_gateway_failure_;\n  const uint64_t enforcing_success_rate_;\n  const uint64_t enforcing_failure_percentage_;\n  const uint64_t enforcing_failure_percentage_local_origin_;\n  const bool split_external_local_origin_errors_;\n  const uint64_t consecutive_local_origin_failure_;\n  const uint64_t enforcing_consecutive_local_origin_failure_;\n  const uint64_t enforcing_local_origin_success_rate_;\n\n  static const uint64_t DEFAULT_INTERVAL_MS = 10000;\n  static const uint64_t DEFAULT_BASE_EJECTION_TIME_MS = 30000;\n  static const uint64_t DEFAULT_CONSECUTIVE_5XX = 5;\n  static const uint64_t DEFAULT_CONSECUTIVE_GATEWAY_FAILURE = 5;\n  static const uint64_t DEFAULT_MAX_EJECTION_PERCENT = 10;\n  static const uint64_t DEFAULT_SUCCESS_RATE_MINIMUM_HOSTS = 5;\n  static const uint64_t DEFAULT_SUCCESS_RATE_REQUEST_VOLUME = 100;\n  static const uint64_t DEFAULT_SUCCESS_RATE_STDEV_FACTOR = 1900;\n  static const uint64_t DEFAULT_FAILURE_PERCENTAGE_THRESHOLD = 85;\n  static const uint64_t DEFAULT_FAILURE_PERCENTAGE_MINIMUM_HOSTS = 5;\n  static const uint64_t DEFAULT_FAILURE_PERCENTAGE_REQUEST_VOLUME = 50;\n  static const uint64_t DEFAULT_ENFORCING_CONSECUTIVE_5XX = 100;\n  static const uint64_t DEFAULT_ENFORCING_CONSECUTIVE_GATEWAY_FAILURE = 0;\n  static const uint64_t DEFAULT_ENFORCING_SUCCESS_RATE = 100;\n  static const uint64_t DEFAULT_ENFORCING_FAILURE_PERCENTAGE = 0;\n  static const uint64_t DEFAULT_ENFORCING_FAILURE_PERCENTAGE_LOCAL_ORIGIN = 0;\n  static const uint64_t DEFAULT_CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 5;\n  static const uint64_t DEFAULT_ENFORCING_CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 100;\n  static const uint64_t DEFAULT_ENFORCING_LOCAL_ORIGIN_SUCCESS_RATE = 100;\n};\n\n/**\n * An implementation of an outlier detector. In the future we may support multiple outlier detection\n * implementations with different configuration. For now, as we iterate everything is contained\n * within this implementation.\n */\nclass DetectorImpl : public Detector, public std::enable_shared_from_this<DetectorImpl> {\npublic:\n  static std::shared_ptr<DetectorImpl>\n  create(const Cluster& cluster, const envoy::config::cluster::v3::OutlierDetection& config,\n         Event::Dispatcher& dispatcher, Runtime::Loader& runtime, TimeSource& time_source,\n         EventLoggerSharedPtr event_logger);\n  ~DetectorImpl() override;\n\n  void onConsecutive5xx(HostSharedPtr host);\n  void onConsecutiveGatewayFailure(HostSharedPtr host);\n  void onConsecutiveLocalOriginFailure(HostSharedPtr host);\n  Runtime::Loader& runtime() { return runtime_; }\n  DetectorConfig& config() { return config_; }\n\n  // Upstream::Outlier::Detector\n  void addChangedStateCb(ChangeStateCb cb) override { callbacks_.push_back(cb); }\n  double\n  successRateAverage(DetectorHostMonitor::SuccessRateMonitorType monitor_type) const override {\n    return getSRNums(monitor_type).success_rate_average_;\n  }\n  double successRateEjectionThreshold(\n      DetectorHostMonitor::SuccessRateMonitorType monitor_type) const override {\n    return getSRNums(monitor_type).ejection_threshold_;\n  }\n\n  /**\n   * This function returns pair of double values for success rate outlier detection. The pair\n   * contains the average success rate of all valid hosts in the cluster and the ejection threshold.\n   * If a host's success rate is under this threshold, the host is an outlier.\n   * @param success_rate_sum is the sum of the data in the success_rate_data vector.\n   * @param valid_success_rate_hosts is the vector containing the individual success rate data\n   *        points.\n   * @return EjectionPair\n   */\n  struct EjectionPair {\n    double success_rate_average_; // average success rate of all valid hosts in the cluster\n    double ejection_threshold_;   // ejection threshold for the cluster\n  };\n  static EjectionPair\n  successRateEjectionThreshold(double success_rate_sum,\n                               const std::vector<HostSuccessRatePair>& valid_success_rate_hosts,\n                               double success_rate_stdev_factor);\n\nprivate:\n  DetectorImpl(const Cluster& cluster, const envoy::config::cluster::v3::OutlierDetection& config,\n               Event::Dispatcher& dispatcher, Runtime::Loader& runtime, TimeSource& time_source,\n               EventLoggerSharedPtr event_logger);\n\n  void addHostMonitor(HostSharedPtr host);\n  void armIntervalTimer();\n  void checkHostForUneject(HostSharedPtr host, DetectorHostMonitorImpl* monitor, MonotonicTime now);\n  void ejectHost(HostSharedPtr host, envoy::data::cluster::v2alpha::OutlierEjectionType type);\n  static DetectionStats generateStats(Stats::Scope& scope);\n  void initialize(const Cluster& cluster);\n  void onConsecutiveErrorWorker(HostSharedPtr host,\n                                envoy::data::cluster::v2alpha::OutlierEjectionType type);\n  void notifyMainThreadConsecutiveError(HostSharedPtr host,\n                                        envoy::data::cluster::v2alpha::OutlierEjectionType type);\n  void onIntervalTimer();\n  void runCallbacks(HostSharedPtr host);\n  bool enforceEjection(envoy::data::cluster::v2alpha::OutlierEjectionType type);\n  void updateEnforcedEjectionStats(envoy::data::cluster::v2alpha::OutlierEjectionType type);\n  void updateDetectedEjectionStats(envoy::data::cluster::v2alpha::OutlierEjectionType type);\n  void processSuccessRateEjections(DetectorHostMonitor::SuccessRateMonitorType monitor_type);\n\n  // The helper to double write value and gauge. The gauge could be null value since because any\n  // stat might be deactivated.\n  class EjectionsActiveHelper {\n  public:\n    EjectionsActiveHelper(Envoy::Stats::Gauge& gauge) : ejections_active_ref_(gauge) {}\n    void inc() {\n      ejections_active_ref_.inc();\n      ++ejections_active_value_;\n    }\n    void dec() {\n      ejections_active_ref_.dec();\n      --ejections_active_value_;\n    }\n    uint64_t value() { return ejections_active_value_.load(); }\n    Envoy::Stats::Gauge& ejections_active_ref_;\n    std::atomic<uint64_t> ejections_active_value_{0};\n  };\n  DetectorConfig config_;\n  Event::Dispatcher& dispatcher_;\n  Runtime::Loader& runtime_;\n  TimeSource& time_source_;\n  DetectionStats stats_;\n  EjectionsActiveHelper ejections_active_helper_{stats_.ejections_active_};\n  Event::TimerPtr interval_timer_;\n  std::list<ChangeStateCb> callbacks_;\n  absl::node_hash_map<HostSharedPtr, DetectorHostMonitorImpl*> host_monitors_;\n  EventLoggerSharedPtr event_logger_;\n\n  // EjectionPair for external and local origin events.\n  // When external/local origin events are not split, external_origin_sr_num_ are used for\n  // both types of events: external and local. local_origin_sr_num_ is not used.\n  // When external/local origin events are split, external_origin_sr_num_ are used only\n  // for external events and local_origin_sr_num_ is used for local origin events.\n  EjectionPair external_origin_sr_num_;\n  EjectionPair local_origin_sr_num_;\n\n  const EjectionPair& getSRNums(DetectorHostMonitor::SuccessRateMonitorType monitor_type) const {\n    return (DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin == monitor_type)\n               ? external_origin_sr_num_\n               : local_origin_sr_num_;\n  }\n  EjectionPair& getSRNums(DetectorHostMonitor::SuccessRateMonitorType monitor_type) {\n    return const_cast<EjectionPair&>(\n        static_cast<const DetectorImpl&>(*this).getSRNums(monitor_type));\n  }\n};\n\nclass EventLoggerImpl : public EventLogger {\npublic:\n  EventLoggerImpl(AccessLog::AccessLogManager& log_manager, const std::string& file_name,\n                  TimeSource& time_source)\n      : file_(log_manager.createAccessLog(file_name)), time_source_(time_source) {}\n\n  // Upstream::Outlier::EventLogger\n  void logEject(const HostDescriptionConstSharedPtr& host, Detector& detector,\n                envoy::data::cluster::v2alpha::OutlierEjectionType type, bool enforced) override;\n\n  void logUneject(const HostDescriptionConstSharedPtr& host) override;\n\nprivate:\n  void setCommonEventParams(envoy::data::cluster::v2alpha::OutlierDetectionEvent& event,\n                            const HostDescriptionConstSharedPtr& host,\n                            absl::optional<MonotonicTime> time);\n\n  AccessLog::AccessLogFileSharedPtr file_;\n  TimeSource& time_source_;\n};\n\n} // namespace Outlier\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/priority_conn_pool_map.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/upstream/resource_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/upstream/conn_pool_map.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n/**\n *  A class mapping keys to connection pools, with some recycling logic built in.\n */\ntemplate <typename KEY_TYPE, typename POOL_TYPE> class PriorityConnPoolMap {\npublic:\n  using ConnPoolMapType = ConnPoolMap<KEY_TYPE, POOL_TYPE>;\n  using PoolFactory = typename ConnPoolMapType::PoolFactory;\n  using DrainedCb = typename ConnPoolMapType::DrainedCb;\n  using PoolOptRef = typename ConnPoolMapType::PoolOptRef;\n\n  PriorityConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host);\n  ~PriorityConnPoolMap();\n  /**\n   * Returns an existing pool for the given priority and `key`, or creates a new one using\n   * `factory`. Note that it is possible for this to fail if a limit on the number of pools allowed\n   * is reached.\n   * @return The pool corresponding to `key`, or `absl::nullopt`.\n   */\n  PoolOptRef getPool(ResourcePriority priority, KEY_TYPE key, const PoolFactory& factory);\n\n  /**\n   * @return the number of pools across all priorities.\n   */\n  size_t size() const;\n\n  /**\n   * Destroys all mapped pools.\n   */\n  void clear();\n\n  /**\n   * Adds a drain callback to all mapped pools. Any future mapped pools with have the callback\n   * automatically added. Be careful with the callback. If it itself calls into `this`, modifying\n   * the state of `this`, there is a good chance it will cause corruption due to the callback firing\n   * immediately.\n   */\n  void addDrainedCallback(const DrainedCb& cb);\n\n  /**\n   * Instructs each connection pool to drain its connections.\n   */\n  void drainConnections();\n\nprivate:\n  std::array<std::unique_ptr<ConnPoolMapType>, NumResourcePriorities> conn_pool_maps_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/priority_conn_pool_map_impl.h",
    "content": "#pragma once\n\n#include \"common/upstream/conn_pool_map_impl.h\"\n#include \"common/upstream/priority_conn_pool_map.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nPriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::PriorityConnPoolMap(Envoy::Event::Dispatcher& dispatcher,\n                                                              const HostConstSharedPtr& host) {\n  for (size_t pool_map_index = 0; pool_map_index < NumResourcePriorities; ++pool_map_index) {\n    auto priority = static_cast<ResourcePriority>(pool_map_index);\n    conn_pool_maps_[pool_map_index].reset(new ConnPoolMapType(dispatcher, host, priority));\n  }\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nPriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::~PriorityConnPoolMap() = default;\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\ntypename PriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::PoolOptRef\nPriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::getPool(ResourcePriority priority, KEY_TYPE key,\n                                                  const PoolFactory& factory) {\n  size_t index = static_cast<size_t>(priority);\n  ASSERT(index < conn_pool_maps_.size());\n  return conn_pool_maps_[index]->getPool(key, factory);\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nsize_t PriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::size() const {\n  size_t size = 0;\n  for (const auto& pool_map : conn_pool_maps_) {\n    size += pool_map->size();\n  }\n  return size;\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nvoid PriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::clear() {\n  for (auto& pool_map : conn_pool_maps_) {\n    pool_map->clear();\n  }\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nvoid PriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::addDrainedCallback(const DrainedCb& cb) {\n  for (auto& pool_map : conn_pool_maps_) {\n    pool_map->addDrainedCallback(cb);\n  }\n}\n\ntemplate <typename KEY_TYPE, typename POOL_TYPE>\nvoid PriorityConnPoolMap<KEY_TYPE, POOL_TYPE>::drainConnections() {\n  for (auto& pool_map : conn_pool_maps_) {\n    pool_map->drainConnections();\n  }\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/resource_manager_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/resource.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/upstream/resource_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/basic_resource_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nstruct ManagedResourceImpl : public BasicResourceLimitImpl {\n  ManagedResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key,\n                      Stats::Gauge& open_gauge, Stats::Gauge& remaining)\n      : BasicResourceLimitImpl(max, runtime, runtime_key), open_gauge_(open_gauge),\n        remaining_(remaining) {\n    remaining_.set(max);\n  }\n\n  // Upstream::Resource\n  bool canCreate() override { return current_ < max(); }\n  void inc() override {\n    BasicResourceLimitImpl::inc();\n    updateRemaining();\n    open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1);\n  }\n  void decBy(uint64_t amount) override {\n    BasicResourceLimitImpl::decBy(amount);\n    updateRemaining();\n    open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1);\n  }\n\n  /**\n   * We set the gauge instead of incrementing and decrementing because,\n   * though atomics are used, it is possible for the current resource count\n   * to be greater than the supplied max.\n   */\n  void updateRemaining() {\n    /**\n     * We cannot use std::max here because max() and current_ are\n     * unsigned and subtracting them may overflow.\n     */\n    const uint64_t current_copy = current_;\n    remaining_.set(max() > current_copy ? max() - current_copy : 0);\n  }\n\n  /**\n   * A gauge to notify the live circuit breaker state. The gauge is set to 0\n   * to notify that the circuit breaker is not yet triggered.\n   */\n  Stats::Gauge& open_gauge_;\n\n  /**\n   * The number of resources remaining before the circuit breaker opens.\n   */\n  Stats::Gauge& remaining_;\n};\n\n/**\n * Implementation of ResourceManager.\n * NOTE: This implementation makes some assumptions which favor simplicity over correctness.\n * 1) Primarily, it assumes that traffic will be mostly balanced over all the worker threads since\n *    no attempt is made to balance resources between them. It is possible that starvation can\n *    occur during high contention.\n * 2) Though atomics are used, it is possible for resources to temporarily go above the supplied\n *    maximums. This should not effect overall behavior.\n */\nclass ResourceManagerImpl : public ResourceManager {\npublic:\n  ResourceManagerImpl(Runtime::Loader& runtime, const std::string& runtime_key,\n                      uint64_t max_connections, uint64_t max_pending_requests,\n                      uint64_t max_requests, uint64_t max_retries, uint64_t max_connection_pools,\n                      ClusterCircuitBreakersStats cb_stats, absl::optional<double> budget_percent,\n                      absl::optional<uint32_t> min_retry_concurrency)\n      : connections_(max_connections, runtime, runtime_key + \"max_connections\", cb_stats.cx_open_,\n                     cb_stats.remaining_cx_),\n        pending_requests_(max_pending_requests, runtime, runtime_key + \"max_pending_requests\",\n                          cb_stats.rq_pending_open_, cb_stats.remaining_pending_),\n        requests_(max_requests, runtime, runtime_key + \"max_requests\", cb_stats.rq_open_,\n                  cb_stats.remaining_rq_),\n        connection_pools_(max_connection_pools, runtime, runtime_key + \"max_connection_pools\",\n                          cb_stats.cx_pool_open_, cb_stats.remaining_cx_pools_),\n        retries_(budget_percent, min_retry_concurrency, max_retries, runtime,\n                 runtime_key + \"retry_budget.\", runtime_key + \"max_retries\",\n                 cb_stats.rq_retry_open_, cb_stats.remaining_retries_, requests_,\n                 pending_requests_) {}\n\n  // Upstream::ResourceManager\n  ResourceLimit& connections() override { return connections_; }\n  ResourceLimit& pendingRequests() override { return pending_requests_; }\n  ResourceLimit& requests() override { return requests_; }\n  ResourceLimit& retries() override { return retries_; }\n  ResourceLimit& connectionPools() override { return connection_pools_; }\n\nprivate:\n  class RetryBudgetImpl : public ResourceLimit {\n  public:\n    RetryBudgetImpl(absl::optional<double> budget_percent,\n                    absl::optional<uint32_t> min_retry_concurrency, uint64_t max_retries,\n                    Runtime::Loader& runtime, const std::string& retry_budget_runtime_key,\n                    const std::string& max_retries_runtime_key, Stats::Gauge& open_gauge,\n                    Stats::Gauge& remaining, const ResourceLimit& requests,\n                    const ResourceLimit& pending_requests)\n        : runtime_(runtime),\n          max_retry_resource_(max_retries, runtime, max_retries_runtime_key, open_gauge, remaining),\n          budget_percent_(budget_percent), min_retry_concurrency_(min_retry_concurrency),\n          budget_percent_key_(retry_budget_runtime_key + \"budget_percent\"),\n          min_retry_concurrency_key_(retry_budget_runtime_key + \"min_retry_concurrency\"),\n          requests_(requests), pending_requests_(pending_requests), remaining_(remaining) {}\n\n    // Envoy::ResourceLimit\n    bool canCreate() override {\n      if (!useRetryBudget()) {\n        return max_retry_resource_.canCreate();\n      }\n      clearRemainingGauge();\n      return count() < max();\n    }\n    void inc() override {\n      max_retry_resource_.inc();\n      clearRemainingGauge();\n    }\n    void dec() override {\n      max_retry_resource_.dec();\n      clearRemainingGauge();\n    }\n    void decBy(uint64_t amount) override {\n      max_retry_resource_.decBy(amount);\n      clearRemainingGauge();\n    }\n    uint64_t max() override {\n      if (!useRetryBudget()) {\n        return max_retry_resource_.max();\n      }\n\n      const uint64_t current_active = requests_.count() + pending_requests_.count();\n      const double budget_percent = runtime_.snapshot().getDouble(\n          budget_percent_key_, budget_percent_ ? *budget_percent_ : 20.0);\n      const uint32_t min_retry_concurrency = runtime_.snapshot().getInteger(\n          min_retry_concurrency_key_, min_retry_concurrency_ ? *min_retry_concurrency_ : 3);\n\n      clearRemainingGauge();\n\n      // We enforce that the retry concurrency is never allowed to go below the\n      // min_retry_concurrency, even if the configured percent of the current active requests\n      // yields a value that is smaller.\n      return std::max<uint64_t>(budget_percent / 100.0 * current_active, min_retry_concurrency);\n    }\n    uint64_t count() const override { return max_retry_resource_.count(); }\n\n  private:\n    bool useRetryBudget() const {\n      return runtime_.snapshot().get(budget_percent_key_).has_value() ||\n             runtime_.snapshot().get(min_retry_concurrency_key_).has_value() || budget_percent_ ||\n             min_retry_concurrency_;\n    }\n\n    // If the retry budget is in use, the stats tracking remaining retries do not make sense since\n    // they would dependent on other resources that can change without a call to this object.\n    // Therefore, the gauge should just be reset to 0.\n    void clearRemainingGauge() {\n      if (useRetryBudget()) {\n        remaining_.set(0);\n      }\n    }\n\n    Runtime::Loader& runtime_;\n    // The max_retry resource is nested within the budget to maintain state if the retry budget is\n    // toggled.\n    ManagedResourceImpl max_retry_resource_;\n    const absl::optional<double> budget_percent_;\n    const absl::optional<uint32_t> min_retry_concurrency_;\n    const std::string budget_percent_key_;\n    const std::string min_retry_concurrency_key_;\n    const ResourceLimit& requests_;\n    const ResourceLimit& pending_requests_;\n    Stats::Gauge& remaining_;\n  };\n\n  ManagedResourceImpl connections_;\n  ManagedResourceImpl pending_requests_;\n  ManagedResourceImpl requests_;\n  ManagedResourceImpl connection_pools_;\n  RetryBudgetImpl retries_;\n};\n\nusing ResourceManagerImplPtr = std::unique_ptr<ResourceManagerImpl>;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/ring_hash_lb.cc",
    "content": "#include \"common/upstream/ring_hash_lb.h\"\n\n#include <cstdint>\n#include <iostream>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nRingHashLoadBalancer::RingHashLoadBalancer(\n    const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope,\n    Runtime::Loader& runtime, Random::RandomGenerator& random,\n    const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig>& config,\n    const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n    : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config),\n      scope_(scope.createScope(\"ring_hash_lb.\")), stats_(generateStats(*scope_)),\n      min_ring_size_(config ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.value(), minimum_ring_size,\n                                                              DefaultMinRingSize)\n                            : DefaultMinRingSize),\n      max_ring_size_(config ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.value(), maximum_ring_size,\n                                                              DefaultMaxRingSize)\n                            : DefaultMaxRingSize),\n      hash_function_(config ? config.value().hash_function()\n                            : HashFunction::Cluster_RingHashLbConfig_HashFunction_XX_HASH),\n      use_hostname_for_hashing_(\n          common_config.has_consistent_hashing_lb_config()\n              ? common_config.consistent_hashing_lb_config().use_hostname_for_hashing()\n              : false),\n      hash_balance_factor_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          common_config.consistent_hashing_lb_config(), hash_balance_factor, 0)) {\n  // It's important to do any config validation here, rather than deferring to Ring's ctor,\n  // because any exceptions thrown here will be caught and handled properly.\n  if (min_ring_size_ > max_ring_size_) {\n    throw EnvoyException(fmt::format(\"ring hash: minimum_ring_size ({}) > maximum_ring_size ({})\",\n                                     min_ring_size_, max_ring_size_));\n  }\n}\n\nRingHashLoadBalancerStats RingHashLoadBalancer::generateStats(Stats::Scope& scope) {\n  return {ALL_RING_HASH_LOAD_BALANCER_STATS(POOL_GAUGE(scope))};\n}\n\nHostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(uint64_t h, uint32_t attempt) const {\n  if (ring_.empty()) {\n    return nullptr;\n  }\n\n  // Ported from https://github.com/RJ/ketama/blob/master/libketama/ketama.c (ketama_get_server)\n  // I've generally kept the variable names to make the code easier to compare.\n  // NOTE: The algorithm depends on using signed integers for lowp, midp, and highp. Do not\n  //       change them!\n  int64_t lowp = 0;\n  int64_t highp = ring_.size();\n  int64_t midp = 0;\n  while (true) {\n    midp = (lowp + highp) / 2;\n\n    if (midp == static_cast<int64_t>(ring_.size())) {\n      midp = 0;\n      break;\n    }\n\n    uint64_t midval = ring_[midp].hash_;\n    uint64_t midval1 = midp == 0 ? 0 : ring_[midp - 1].hash_;\n\n    if (h <= midval && h > midval1) {\n      break;\n    }\n\n    if (midval < h) {\n      lowp = midp + 1;\n    } else {\n      highp = midp - 1;\n    }\n\n    if (lowp > highp) {\n      midp = 0;\n      break;\n    }\n  }\n\n  // If a retry host predicate is being applied, behave as if this host was not in the ring.\n  // Note that this does not guarantee a different host: e.g., attempt == ring_.size() or\n  // when the offset causes us to select the same host at another location in the ring.\n  if (attempt > 0) {\n    midp = (midp + attempt) % ring_.size();\n  }\n\n  return ring_[midp].host_;\n}\n\nusing HashFunction = envoy::config::cluster::v3::Cluster::RingHashLbConfig::HashFunction;\nRingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_host_weights,\n                                 double min_normalized_weight, uint64_t min_ring_size,\n                                 uint64_t max_ring_size, HashFunction hash_function,\n                                 bool use_hostname_for_hashing, RingHashLoadBalancerStats& stats)\n    : stats_(stats) {\n  ENVOY_LOG(trace, \"ring hash: building ring\");\n\n  // We can't do anything sensible with no hosts.\n  if (normalized_host_weights.empty()) {\n    return;\n  }\n\n  // Scale up the number of hashes per host such that the least-weighted host gets a whole number\n  // of hashes on the ring. Other hosts might not end up with whole numbers, and that's fine (the\n  // ring-building algorithm below can handle this). This preserves the original implementation's\n  // behavior: when weights aren't provided, all hosts should get an equal number of hashes. In\n  // the case where this number exceeds the max_ring_size, it's scaled back down to fit.\n  const double scale =\n      std::min(std::ceil(min_normalized_weight * min_ring_size) / min_normalized_weight,\n               static_cast<double>(max_ring_size));\n\n  // Reserve memory for the entire ring up front.\n  const uint64_t ring_size = std::ceil(scale);\n  ring_.reserve(ring_size);\n\n  // Populate the hash ring by walking through the (host, weight) pairs in\n  // normalized_host_weights, and generating (scale * weight) hashes for each host. Since these\n  // aren't necessarily whole numbers, we maintain running sums -- current_hashes and\n  // target_hashes -- which allows us to populate the ring in a mostly stable way.\n  //\n  // For example, suppose we have 4 hosts, each with a normalized weight of 0.25, and a scale of\n  // 6.0 (because the max_ring_size is 6). That means we want to generate 1.5 hashes per host.\n  // We start the outer loop with current_hashes = 0 and target_hashes = 0.\n  //   - For the first host, we set target_hashes = 1.5. After one run of the inner loop,\n  //     current_hashes = 1. After another run, current_hashes = 2, so the inner loop ends.\n  //   - For the second host, target_hashes becomes 3.0, and current_hashes is 2 from before.\n  //     After only one run of the inner loop, current_hashes = 3, so the inner loop ends.\n  //   - Likewise, the third host gets two hashes, and the fourth host gets one hash.\n  //\n  // For stats reporting, keep track of the minimum and maximum actual number of hashes per host.\n  // Users should hopefully pay attention to these numbers and alert if min_hashes_per_host is too\n  // low, since that implies an inaccurate request distribution.\n\n  absl::InlinedVector<char, 196> hash_key_buffer;\n  double current_hashes = 0.0;\n  double target_hashes = 0.0;\n  uint64_t min_hashes_per_host = ring_size;\n  uint64_t max_hashes_per_host = 0;\n  for (const auto& entry : normalized_host_weights) {\n    const auto& host = entry.first;\n    const std::string& address_string =\n        use_hostname_for_hashing ? host->hostname() : host->address()->asString();\n    ASSERT(!address_string.empty());\n\n    hash_key_buffer.assign(address_string.begin(), address_string.end());\n    hash_key_buffer.emplace_back('_');\n    auto offset_start = hash_key_buffer.end();\n\n    // As noted above: maintain current_hashes and target_hashes as running sums across the entire\n    // host set. `i` is needed only to construct the hash key, and tally min/max hashes per host.\n    target_hashes += scale * entry.second;\n    uint64_t i = 0;\n    while (current_hashes < target_hashes) {\n      const std::string i_str = absl::StrCat(\"\", i);\n      hash_key_buffer.insert(offset_start, i_str.begin(), i_str.end());\n\n      absl::string_view hash_key(static_cast<char*>(hash_key_buffer.data()),\n                                 hash_key_buffer.size());\n\n      const uint64_t hash =\n          (hash_function == HashFunction::Cluster_RingHashLbConfig_HashFunction_MURMUR_HASH_2)\n              ? MurmurHash::murmurHash2(hash_key, MurmurHash::STD_HASH_SEED)\n              : HashUtil::xxHash64(hash_key);\n\n      ENVOY_LOG(trace, \"ring hash: hash_key={} hash={}\", hash_key.data(), hash);\n      ring_.push_back({hash, host});\n      ++i;\n      ++current_hashes;\n      hash_key_buffer.erase(offset_start, hash_key_buffer.end());\n    }\n    min_hashes_per_host = std::min(i, min_hashes_per_host);\n    max_hashes_per_host = std::max(i, max_hashes_per_host);\n  }\n\n  std::sort(ring_.begin(), ring_.end(), [](const RingEntry& lhs, const RingEntry& rhs) -> bool {\n    return lhs.hash_ < rhs.hash_;\n  });\n  if (ENVOY_LOG_CHECK_LEVEL(trace)) {\n    for (const auto& entry : ring_) {\n      ENVOY_LOG(trace, \"ring hash: host={} hash={}\",\n                use_hostname_for_hashing ? entry.host_->hostname()\n                                         : entry.host_->address()->asString(),\n                entry.hash_);\n    }\n  }\n\n  stats_.size_.set(ring_size);\n  stats_.min_hashes_per_host_.set(min_hashes_per_host);\n  stats_.max_hashes_per_host_.set(max_hashes_per_host);\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/ring_hash_lb.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/upstream/thread_aware_lb_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * All ring hash load balancer stats. @see stats_macros.h\n */\n#define ALL_RING_HASH_LOAD_BALANCER_STATS(GAUGE)                                                   \\\n  GAUGE(max_hashes_per_host, Accumulate)                                                           \\\n  GAUGE(min_hashes_per_host, Accumulate)                                                           \\\n  GAUGE(size, Accumulate)\n\n/**\n * Struct definition for all ring hash load balancer stats. @see stats_macros.h\n */\nstruct RingHashLoadBalancerStats {\n  ALL_RING_HASH_LOAD_BALANCER_STATS(GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * A load balancer that implements consistent modulo hashing (\"ketama\"). Currently, zone aware\n * routing is not supported. A ring is kept for all hosts as well as a ring for healthy hosts.\n * Unless we are in panic mode, the healthy host ring is used.\n * In the future it would be nice to support:\n * 1) Weighting.\n * 2) Per-zone rings and optional zone aware routing (not all applications will want this).\n * 3) Max request fallback to support hot shards (not all applications will want this).\n */\nclass RingHashLoadBalancer : public ThreadAwareLoadBalancerBase,\n                             Logger::Loggable<Logger::Id::upstream> {\npublic:\n  RingHashLoadBalancer(\n      const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope,\n      Runtime::Loader& runtime, Random::RandomGenerator& random,\n      const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig>& config,\n      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config);\n\n  const RingHashLoadBalancerStats& stats() const { return stats_; }\n\nprivate:\n  using HashFunction = envoy::config::cluster::v3::Cluster::RingHashLbConfig::HashFunction;\n\n  struct RingEntry {\n    uint64_t hash_;\n    HostConstSharedPtr host_;\n  };\n\n  struct Ring : public HashingLoadBalancer {\n    Ring(const NormalizedHostWeightVector& normalized_host_weights, double min_normalized_weight,\n         uint64_t min_ring_size, uint64_t max_ring_size, HashFunction hash_function,\n         bool use_hostname_for_hashing, RingHashLoadBalancerStats& stats);\n\n    // ThreadAwareLoadBalancerBase::HashingLoadBalancer\n    HostConstSharedPtr chooseHost(uint64_t hash, uint32_t attempt) const override;\n\n    std::vector<RingEntry> ring_;\n\n    RingHashLoadBalancerStats& stats_;\n  };\n  using RingConstSharedPtr = std::shared_ptr<const Ring>;\n\n  // ThreadAwareLoadBalancerBase\n  HashingLoadBalancerSharedPtr\n  createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights,\n                     double min_normalized_weight, double /* max_normalized_weight */) override {\n    HashingLoadBalancerSharedPtr ring_hash_lb =\n        std::make_shared<Ring>(normalized_host_weights, min_normalized_weight, min_ring_size_,\n                               max_ring_size_, hash_function_, use_hostname_for_hashing_, stats_);\n    if (hash_balance_factor_ == 0) {\n      return ring_hash_lb;\n    }\n\n    return std::make_shared<BoundedLoadHashingLoadBalancer>(\n        ring_hash_lb, std::move(normalized_host_weights), hash_balance_factor_);\n  }\n\n  static RingHashLoadBalancerStats generateStats(Stats::Scope& scope);\n\n  Stats::ScopePtr scope_;\n  RingHashLoadBalancerStats stats_;\n\n  static const uint64_t DefaultMinRingSize = 1024;\n  static const uint64_t DefaultMaxRingSize = 1024 * 1024 * 8;\n  const uint64_t min_ring_size_;\n  const uint64_t max_ring_size_;\n  const HashFunction hash_function_;\n  const bool use_hostname_for_hashing_;\n  const uint32_t hash_balance_factor_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/static_cluster.cc",
    "content": "#include \"common/upstream/static_cluster.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nStaticClusterImpl::StaticClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api)\n    : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api),\n      priority_state_manager_(\n          new PriorityStateManager(*this, factory_context.localInfo(), nullptr)) {\n  // TODO(dio): Use by-reference when cluster.hosts() is removed.\n  const envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment(\n      cluster.has_load_assignment()\n          ? cluster.load_assignment()\n          : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts()));\n\n  overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n      cluster_load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor);\n\n  for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) {\n    validateEndpointsForZoneAwareRouting(locality_lb_endpoint);\n    priority_state_manager_->initializePriorityFor(locality_lb_endpoint);\n    for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) {\n      priority_state_manager_->registerHostForPriority(\n          lb_endpoint.endpoint().hostname(), resolveProtoAddress(lb_endpoint.endpoint().address()),\n          locality_lb_endpoint, lb_endpoint);\n    }\n  }\n}\n\nvoid StaticClusterImpl::startPreInit() {\n  // At this point see if we have a health checker. If so, mark all the hosts unhealthy and\n  // then fire update callbacks to start the health checking process.\n  const auto& health_checker_flag =\n      health_checker_ != nullptr\n          ? absl::optional<Upstream::Host::HealthFlag>(Host::HealthFlag::FAILED_ACTIVE_HC)\n          : absl::nullopt;\n\n  auto& priority_state = priority_state_manager_->priorityState();\n  for (size_t i = 0; i < priority_state.size(); ++i) {\n    if (priority_state[i].first == nullptr) {\n      priority_state[i].first = std::make_unique<HostVector>();\n    }\n    priority_state_manager_->updateClusterPrioritySet(\n        i, std::move(priority_state[i].first), absl::nullopt, absl::nullopt, health_checker_flag,\n        overprovisioning_factor_);\n  }\n  priority_state_manager_.reset();\n\n  onPreInitComplete();\n}\n\nstd::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr>\nStaticClusterFactory::createClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n    Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Stats::ScopePtr&& stats_scope) {\n  return std::make_pair(\n      std::make_shared<StaticClusterImpl>(cluster, context.runtime(), socket_factory_context,\n                                          std::move(stats_scope), context.addedViaApi()),\n      nullptr);\n}\n\n/**\n * Static registration for the static cluster factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(StaticClusterFactory, ClusterFactory);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/static_cluster.h",
    "content": "#pragma once\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Implementation of Upstream::Cluster for static clusters (clusters that have a fixed number of\n * hosts with resolved IP addresses).\n */\nclass StaticClusterImpl : public ClusterImplBase {\npublic:\n  StaticClusterImpl(const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n                    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                    Stats::ScopePtr&& stats_scope, bool added_via_api);\n\n  // Upstream::Cluster\n  InitializePhase initializePhase() const override { return InitializePhase::Primary; }\n\nprivate:\n  // ClusterImplBase\n  void startPreInit() override;\n\n  PriorityStateManagerPtr priority_state_manager_;\n  uint32_t overprovisioning_factor_;\n};\n\n/**\n * Factory for StaticClusterImpl cluster.\n */\nclass StaticClusterFactory : public ClusterFactoryImplBase {\npublic:\n  StaticClusterFactory()\n      : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Static) {}\n\nprivate:\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n\nDECLARE_FACTORY(StaticClusterFactory);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/strict_dns_cluster.cc",
    "content": "#include \"common/upstream/strict_dns_cluster.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nStrictDnsClusterImpl::StrictDnsClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n    Network::DnsResolverSharedPtr dns_resolver,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api)\n    : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope),\n                             added_via_api),\n      local_info_(factory_context.localInfo()), dns_resolver_(dns_resolver),\n      dns_refresh_rate_ms_(\n          std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))),\n      respect_dns_ttl_(cluster.respect_dns_ttl()) {\n  failure_backoff_strategy_ =\n      Config::Utility::prepareDnsRefreshStrategy<envoy::config::cluster::v3::Cluster>(\n          cluster, dns_refresh_rate_ms_.count(), factory_context.api().randomGenerator());\n\n  std::list<ResolveTargetPtr> resolve_targets;\n  const envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment(\n      cluster.has_load_assignment()\n          ? cluster.load_assignment()\n          : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts()));\n  const auto& locality_lb_endpoints = load_assignment.endpoints();\n  for (const auto& locality_lb_endpoint : locality_lb_endpoints) {\n    validateEndpointsForZoneAwareRouting(locality_lb_endpoint);\n\n    for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) {\n      const auto& socket_address = lb_endpoint.endpoint().address().socket_address();\n      if (!socket_address.resolver_name().empty()) {\n        throw EnvoyException(\"STRICT_DNS clusters must NOT have a custom resolver name set\");\n      }\n\n      const std::string& url =\n          fmt::format(\"tcp://{}:{}\", socket_address.address(), socket_address.port_value());\n      resolve_targets.emplace_back(new ResolveTarget(*this, factory_context.dispatcher(), url,\n                                                     locality_lb_endpoint, lb_endpoint));\n    }\n  }\n  resolve_targets_ = std::move(resolve_targets);\n  dns_lookup_family_ = getDnsLookupFamilyFromCluster(cluster);\n\n  overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n      load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor);\n}\n\nvoid StrictDnsClusterImpl::startPreInit() {\n  for (const ResolveTargetPtr& target : resolve_targets_) {\n    target->startResolve();\n  }\n  // If the config provides no endpoints, the cluster is initialized immediately as if all hosts are\n  // resolved in failure.\n  if (resolve_targets_.empty()) {\n    onPreInitComplete();\n  }\n}\n\nvoid StrictDnsClusterImpl::updateAllHosts(const HostVector& hosts_added,\n                                          const HostVector& hosts_removed,\n                                          uint32_t current_priority) {\n  PriorityStateManager priority_state_manager(*this, local_info_, nullptr);\n  // At this point we know that we are different so make a new host list and notify.\n  //\n  // TODO(dio): The uniqueness of a host address resolved in STRICT_DNS cluster per priority is not\n  // guaranteed. Need a clear agreement on the behavior here, whether it is allowable to have\n  // duplicated hosts inside a priority. And if we want to enforce this behavior, it should be done\n  // inside the priority state manager.\n  for (const ResolveTargetPtr& target : resolve_targets_) {\n    priority_state_manager.initializePriorityFor(target->locality_lb_endpoint_);\n    for (const HostSharedPtr& host : target->hosts_) {\n      if (target->locality_lb_endpoint_.priority() == current_priority) {\n        priority_state_manager.registerHostForPriority(host, target->locality_lb_endpoint_);\n      }\n    }\n  }\n\n  // TODO(dio): Add assertion in here.\n  priority_state_manager.updateClusterPrioritySet(\n      current_priority, std::move(priority_state_manager.priorityState()[current_priority].first),\n      hosts_added, hosts_removed, absl::nullopt, overprovisioning_factor_);\n}\n\nStrictDnsClusterImpl::ResolveTarget::ResolveTarget(\n    StrictDnsClusterImpl& parent, Event::Dispatcher& dispatcher, const std::string& url,\n    const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint,\n    const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint)\n    : parent_(parent), dns_address_(Network::Utility::hostFromTcpUrl(url)),\n      port_(Network::Utility::portFromTcpUrl(url)),\n      resolve_timer_(dispatcher.createTimer([this]() -> void { startResolve(); })),\n      locality_lb_endpoint_(locality_lb_endpoint), lb_endpoint_(lb_endpoint) {}\n\nStrictDnsClusterImpl::ResolveTarget::~ResolveTarget() {\n  if (active_query_) {\n    active_query_->cancel();\n  }\n}\n\nvoid StrictDnsClusterImpl::ResolveTarget::startResolve() {\n  ENVOY_LOG(trace, \"starting async DNS resolution for {}\", dns_address_);\n  parent_.info_->stats().update_attempt_.inc();\n\n  active_query_ = parent_.dns_resolver_->resolve(\n      dns_address_, parent_.dns_lookup_family_,\n      [this](Network::DnsResolver::ResolutionStatus status,\n             std::list<Network::DnsResponse>&& response) -> void {\n        active_query_ = nullptr;\n        ENVOY_LOG(trace, \"async DNS resolution complete for {}\", dns_address_);\n\n        std::chrono::milliseconds final_refresh_rate = parent_.dns_refresh_rate_ms_;\n\n        if (status == Network::DnsResolver::ResolutionStatus::Success) {\n          parent_.info_->stats().update_success_.inc();\n\n          absl::node_hash_map<std::string, HostSharedPtr> updated_hosts;\n          HostVector new_hosts;\n          std::chrono::seconds ttl_refresh_rate = std::chrono::seconds::max();\n          for (const auto& resp : response) {\n            // TODO(mattklein123): Currently the DNS interface does not consider port. We need to\n            // make a new address that has port in it. We need to both support IPv6 as well as\n            // potentially move port handling into the DNS interface itself, which would work better\n            // for SRV.\n            ASSERT(resp.address_ != nullptr);\n            new_hosts.emplace_back(new HostImpl(\n                parent_.info_, dns_address_,\n                Network::Utility::getAddressWithPort(*(resp.address_), port_),\n                // TODO(zyfjeff): Created through metadata shared pool\n                std::make_shared<const envoy::config::core::v3::Metadata>(lb_endpoint_.metadata()),\n                lb_endpoint_.load_balancing_weight().value(), locality_lb_endpoint_.locality(),\n                lb_endpoint_.endpoint().health_check_config(), locality_lb_endpoint_.priority(),\n                lb_endpoint_.health_status()));\n\n            ttl_refresh_rate = min(ttl_refresh_rate, resp.ttl_);\n          }\n\n          HostVector hosts_added;\n          HostVector hosts_removed;\n          if (parent_.updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed,\n                                            updated_hosts, all_hosts_)) {\n            ENVOY_LOG(debug, \"DNS hosts have changed for {}\", dns_address_);\n            ASSERT(std::all_of(hosts_.begin(), hosts_.end(), [&](const auto& host) {\n              return host->priority() == locality_lb_endpoint_.priority();\n            }));\n            parent_.updateAllHosts(hosts_added, hosts_removed, locality_lb_endpoint_.priority());\n          } else {\n            parent_.info_->stats().update_no_rebuild_.inc();\n          }\n\n          all_hosts_ = std::move(updated_hosts);\n\n          // reset failure backoff strategy because there was a success.\n          parent_.failure_backoff_strategy_->reset();\n\n          if (!response.empty() && parent_.respect_dns_ttl_ &&\n              ttl_refresh_rate != std::chrono::seconds(0)) {\n            final_refresh_rate = ttl_refresh_rate;\n            ASSERT(ttl_refresh_rate != std::chrono::seconds::max() &&\n                   final_refresh_rate.count() > 0);\n          }\n          ENVOY_LOG(debug, \"DNS refresh rate reset for {}, refresh rate {} ms\", dns_address_,\n                    final_refresh_rate.count());\n        } else {\n          parent_.info_->stats().update_failure_.inc();\n\n          final_refresh_rate =\n              std::chrono::milliseconds(parent_.failure_backoff_strategy_->nextBackOffMs());\n          ENVOY_LOG(debug, \"DNS refresh rate reset for {}, (failure) refresh rate {} ms\",\n                    dns_address_, final_refresh_rate.count());\n        }\n\n        // If there is an initialize callback, fire it now. Note that if the cluster refers to\n        // multiple DNS names, this will return initialized after a single DNS resolution\n        // completes. This is not perfect but is easier to code and unclear if the extra\n        // complexity is needed so will start with this.\n        parent_.onPreInitComplete();\n        resolve_timer_->enableTimer(final_refresh_rate);\n      });\n}\n\nstd::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr>\nStrictDnsClusterFactory::createClusterImpl(\n    const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n    Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Stats::ScopePtr&& stats_scope) {\n  auto selected_dns_resolver = selectDnsResolver(cluster, context);\n\n  return std::make_pair(std::make_shared<StrictDnsClusterImpl>(\n                            cluster, context.runtime(), selected_dns_resolver,\n                            socket_factory_context, std::move(stats_scope), context.addedViaApi()),\n                        nullptr);\n}\n\n/**\n * Static registration for the strict dns cluster factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(StrictDnsClusterFactory, ClusterFactory);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/strict_dns_cluster.h",
    "content": "#pragma once\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Implementation of Upstream::Cluster that does periodic DNS resolution and updates the host\n * member set if the DNS members change.\n */\nclass StrictDnsClusterImpl : public BaseDynamicClusterImpl {\npublic:\n  StrictDnsClusterImpl(const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n                       Network::DnsResolverSharedPtr dns_resolver,\n                       Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                       Stats::ScopePtr&& stats_scope, bool added_via_api);\n\n  // Upstream::Cluster\n  InitializePhase initializePhase() const override { return InitializePhase::Primary; }\n\nprivate:\n  struct ResolveTarget {\n    ResolveTarget(StrictDnsClusterImpl& parent, Event::Dispatcher& dispatcher,\n                  const std::string& url,\n                  const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint,\n                  const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint);\n    ~ResolveTarget();\n    void startResolve();\n\n    StrictDnsClusterImpl& parent_;\n    Network::ActiveDnsQuery* active_query_{};\n    std::string dns_address_;\n    uint32_t port_;\n    Event::TimerPtr resolve_timer_;\n    HostVector hosts_;\n    const envoy::config::endpoint::v3::LocalityLbEndpoints locality_lb_endpoint_;\n    const envoy::config::endpoint::v3::LbEndpoint lb_endpoint_;\n    HostMap all_hosts_;\n  };\n\n  using ResolveTargetPtr = std::unique_ptr<ResolveTarget>;\n\n  void updateAllHosts(const HostVector& hosts_added, const HostVector& hosts_removed,\n                      uint32_t priority);\n\n  // ClusterImplBase\n  void startPreInit() override;\n\n  const LocalInfo::LocalInfo& local_info_;\n  Network::DnsResolverSharedPtr dns_resolver_;\n  std::list<ResolveTargetPtr> resolve_targets_;\n  const std::chrono::milliseconds dns_refresh_rate_ms_;\n  BackOffStrategyPtr failure_backoff_strategy_;\n  const bool respect_dns_ttl_;\n  Network::DnsLookupFamily dns_lookup_family_;\n  uint32_t overprovisioning_factor_;\n};\n\n/**\n * Factory for StrictDnsClusterImpl\n */\nclass StrictDnsClusterFactory : public ClusterFactoryImplBase {\npublic:\n  StrictDnsClusterFactory()\n      : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().StrictDns) {}\n\nprivate:\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/subset_lb.cc",
    "content": "#include \"common/upstream/subset_lb.h\"\n\n#include <memory>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/maglev_lb.h\"\n#include \"common/upstream/ring_hash_lb.h\"\n\n#include \"absl/container/node_hash_set.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nSubsetLoadBalancer::SubsetLoadBalancer(\n    LoadBalancerType lb_type, PrioritySet& priority_set, const PrioritySet* local_priority_set,\n    ClusterStats& stats, Stats::Scope& scope, Runtime::Loader& runtime,\n    Random::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets,\n    const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig>&\n        lb_ring_hash_config,\n    const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig>& lb_maglev_config,\n    const absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>&\n        least_request_config,\n    const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n    : lb_type_(lb_type), lb_ring_hash_config_(lb_ring_hash_config),\n      lb_maglev_config_(lb_maglev_config), least_request_config_(least_request_config),\n      common_config_(common_config), stats_(stats), scope_(scope), runtime_(runtime),\n      random_(random), fallback_policy_(subsets.fallbackPolicy()),\n      default_subset_metadata_(subsets.defaultSubset().fields().begin(),\n                               subsets.defaultSubset().fields().end()),\n      subset_selectors_(subsets.subsetSelectors()), original_priority_set_(priority_set),\n      original_local_priority_set_(local_priority_set),\n      locality_weight_aware_(subsets.localityWeightAware()),\n      scale_locality_weight_(subsets.scaleLocalityWeight()), list_as_any_(subsets.listAsAny()) {\n  ASSERT(subsets.isEnabled());\n\n  if (fallback_policy_ != envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK) {\n    HostPredicate predicate;\n    if (fallback_policy_ == envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT) {\n      ENVOY_LOG(debug, \"subset lb: creating any-endpoint fallback load balancer\");\n      initSubsetAnyOnce();\n      fallback_subset_ = subset_any_;\n    } else {\n      predicate = [this](const Host& host) -> bool {\n        return hostMatches(default_subset_metadata_, host);\n      };\n      ENVOY_LOG(debug, \"subset lb: creating fallback load balancer for {}\",\n                describeMetadata(default_subset_metadata_));\n      fallback_subset_ = std::make_shared<LbSubsetEntry>();\n      fallback_subset_->priority_subset_ = std::make_shared<PrioritySubsetImpl>(\n          *this, predicate, locality_weight_aware_, scale_locality_weight_);\n    }\n  }\n\n  if (subsets.panicModeAny()) {\n    initSubsetAnyOnce();\n    panic_mode_subset_ = subset_any_;\n  }\n\n  initSubsetSelectorMap();\n\n  // Create filtered default subset (if necessary) and other subsets based on current hosts.\n  refreshSubsets();\n\n  // This must happen after `initSubsetSelectorMap()` because that initializes `single_`.\n  rebuildSingle();\n\n  // Configure future updates.\n  original_priority_set_callback_handle_ = priority_set.addPriorityUpdateCb(\n      [this](uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed) {\n        // TODO(ggreenway) PERF: This is currently an O(n^2) operation in the edge case of\n        // many priorities and only one host per priority. This could be improved by either\n        // updating in a single pass across all priorities, or by having the callback give a\n        // list of modified hosts so that an incremental update of the data structure can be\n        // performed.\n        rebuildSingle();\n\n        if (hosts_added.empty() && hosts_removed.empty()) {\n          // It's possible that metadata changed, without hosts being added nor removed.\n          // If so we need to add any new subsets, remove unused ones, and regroup hosts into\n          // the right subsets.\n          //\n          // Note, note, note: if metadata for existing endpoints changed _and_ hosts were also\n          // added or removed, we don't need to hit this path. That's fine, given that\n          // findOrCreateSubset() will be called from processSubsets because it'll be triggered by\n          // either hosts_added or hosts_removed. That's where the new subsets will be created.\n          refreshSubsets(priority);\n        } else {\n          // This is a regular update with deltas.\n          update(priority, hosts_added, hosts_removed);\n        }\n\n        purgeEmptySubsets(subsets_);\n      });\n}\n\nSubsetLoadBalancer::~SubsetLoadBalancer() {\n  original_priority_set_callback_handle_->remove();\n\n  // Ensure gauges reflect correct values.\n  forEachSubset(subsets_, [&](LbSubsetEntryPtr entry) {\n    if (entry->active()) {\n      stats_.lb_subsets_removed_.inc();\n      stats_.lb_subsets_active_.dec();\n    }\n  });\n}\n\nvoid SubsetLoadBalancer::rebuildSingle() {\n  if (single_key_.empty()) {\n    return;\n  }\n\n  // Because PriorityUpdateCb doesn't give a modified list (only added and removed), it is\n  // faster to just rebuild this map than try to figure out if any hosts had their metadata\n  // changed, and then figure out the old and new value (to remove from the old key in this map\n  // and insert in the new key).\n  single_host_per_subset_map_.clear();\n\n  uint32_t collision_count = 0;\n  for (const auto& host_set : original_priority_set_.hostSetsPerPriority()) {\n    for (const auto& host : host_set->hosts()) {\n      MetadataConstSharedPtr metadata = host->metadata();\n      const auto& filter_metadata = metadata->filter_metadata();\n      auto filter_it = filter_metadata.find(Config::MetadataFilters::get().ENVOY_LB);\n      if (filter_it != filter_metadata.end()) {\n        const auto& fields = filter_it->second.fields();\n        auto fields_it = fields.find(single_key_);\n        if (fields_it != fields.end()) {\n          auto [iterator, did_insert] =\n              single_host_per_subset_map_.try_emplace(fields_it->second, host);\n          if (!did_insert) {\n            // Two hosts with the same metadata value were found. Ignore all but one of them, and\n            // set a metric for how many times this happened.\n            collision_count++;\n          }\n        }\n      }\n    }\n  }\n\n  // This stat isn't added to `ClusterStats` because it wouldn't be used\n  // for nearly all clusters, and is only set during configuration updates,\n  // not in the data path, so performance of looking up the stat isn't critical.\n  if (single_duplicate_stat_ == nullptr) {\n    Stats::StatNameManagedStorage name_storage(\"lb_subsets_single_host_per_subset_duplicate\",\n                                               scope_.symbolTable());\n\n    single_duplicate_stat_ = &Stats::Utility::gaugeFromElements(\n        scope_, {name_storage.statName()}, Stats::Gauge::ImportMode::Accumulate);\n  }\n  single_duplicate_stat_->set(collision_count);\n}\n\n// When in `single_host_per_subset` mode, select a host based on the provided match_criteria.\n// Set `host_chosen` to false if there is not a match.\nHostConstSharedPtr SubsetLoadBalancer::tryChooseHostFromMetadataMatchCriteriaSingle(\n    const Router::MetadataMatchCriteria& match_criteria, bool& host_chosen) {\n  ASSERT(!single_key_.empty());\n\n  for (const auto& entry : match_criteria.metadataMatchCriteria()) {\n    if (entry->name() == single_key_) {\n      auto it = single_host_per_subset_map_.find(entry->value());\n      if (it != single_host_per_subset_map_.end()) {\n        if (it->second->health() != Host::Health::Unhealthy) {\n          host_chosen = true;\n          stats_.lb_subsets_selected_.inc();\n          return it->second;\n        }\n      }\n      break;\n    }\n  }\n  return nullptr;\n}\n\nvoid SubsetLoadBalancer::refreshSubsets() {\n  for (auto& host_set : original_priority_set_.hostSetsPerPriority()) {\n    update(host_set->priority(), host_set->hosts(), {});\n  }\n}\n\nvoid SubsetLoadBalancer::refreshSubsets(uint32_t priority) {\n  const auto& host_sets = original_priority_set_.hostSetsPerPriority();\n  ASSERT(priority < host_sets.size());\n  update(priority, host_sets[priority]->hosts(), {});\n}\n\nvoid SubsetLoadBalancer::initSubsetAnyOnce() {\n  if (!subset_any_) {\n    HostPredicate predicate = [](const Host&) -> bool { return true; };\n    subset_any_ = std::make_shared<LbSubsetEntry>();\n    subset_any_->priority_subset_ = std::make_shared<PrioritySubsetImpl>(\n        *this, predicate, locality_weight_aware_, scale_locality_weight_);\n  }\n}\n\nvoid SubsetLoadBalancer::initSubsetSelectorMap() {\n  selectors_ = std::make_shared<SubsetSelectorMap>();\n  SubsetSelectorMapPtr selectors;\n  for (const auto& subset_selector : subset_selectors_) {\n    const auto& selector_keys = subset_selector->selectorKeys();\n    const auto& selector_fallback_policy = subset_selector->fallbackPolicy();\n    const auto& selector_fallback_keys_subset = subset_selector->fallbackKeysSubset();\n\n    if (subset_selector->singleHostPerSubset()) {\n      if (subset_selectors_.size() > 1) {\n        throw EnvoyException(\"subset_lb selector: single_host_per_subset cannot be set when there \"\n                             \"are multiple subset selectors.\");\n      }\n      if (selector_keys.size() != 1 || selector_keys.begin()->empty()) {\n        throw EnvoyException(\"subset_lb selector: single_host_per_subset cannot bet set when there \"\n                             \"isn't exactly 1 key or if that key is empty.\");\n      }\n      single_key_ = *selector_keys.begin();\n\n      subset_selectors_.clear();\n      return;\n    }\n\n    if (selector_fallback_policy ==\n        envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED) {\n      continue;\n    }\n    uint32_t pos = 0;\n    selectors = selectors_;\n    for (const auto& key : selector_keys) {\n      const auto& selector_it = selectors->subset_keys_.find(key);\n      pos++;\n      if (selector_it == selectors->subset_keys_.end()) {\n        selectors->subset_keys_.emplace(std::make_pair(key, std::make_shared<SubsetSelectorMap>()));\n        const auto& child_selector = selectors->subset_keys_.find(key);\n        // if this is last key for given selector, check if it has fallback specified\n        if (pos == selector_keys.size()) {\n          child_selector->second->fallback_params_.fallback_policy_ = selector_fallback_policy;\n          child_selector->second->fallback_params_.fallback_keys_subset_ =\n              &selector_fallback_keys_subset;\n          initSelectorFallbackSubset(selector_fallback_policy);\n        }\n        selectors = child_selector->second;\n      } else {\n        selectors = selector_it->second;\n      }\n    }\n    selectors = selectors_;\n  }\n}\n\nvoid SubsetLoadBalancer::initSelectorFallbackSubset(\n    const envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n        LbSubsetSelectorFallbackPolicy& fallback_policy) {\n  if (fallback_policy ==\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT &&\n      subset_any_ == nullptr) {\n    ENVOY_LOG(debug, \"subset lb: creating any-endpoint fallback load balancer for selector\");\n    initSubsetAnyOnce();\n  } else if (fallback_policy == envoy::config::cluster::v3::Cluster::LbSubsetConfig::\n                                    LbSubsetSelector::DEFAULT_SUBSET &&\n             selector_fallback_subset_default_ == nullptr) {\n    ENVOY_LOG(debug, \"subset lb: creating default subset fallback load balancer for selector\");\n    HostPredicate predicate = std::bind(&SubsetLoadBalancer::hostMatches, this,\n                                        default_subset_metadata_, std::placeholders::_1);\n    selector_fallback_subset_default_ = std::make_shared<LbSubsetEntry>();\n    selector_fallback_subset_default_->priority_subset_ = std::make_shared<PrioritySubsetImpl>(\n        *this, predicate, locality_weight_aware_, scale_locality_weight_);\n  }\n}\n\nHostConstSharedPtr SubsetLoadBalancer::chooseHost(LoadBalancerContext* context) {\n  if (context) {\n    bool host_chosen;\n    HostConstSharedPtr host = tryChooseHostFromContext(context, host_chosen);\n    if (host_chosen) {\n      // Subset lookup succeeded, return this result even if it's nullptr.\n      return host;\n    }\n    // otherwise check if there is fallback policy configured for given route metadata\n    absl::optional<SubsetSelectorFallbackParamsRef> selector_fallback_params =\n        tryFindSelectorFallbackParams(context);\n    if (selector_fallback_params &&\n        selector_fallback_params->get().fallback_policy_ !=\n            envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED) {\n      // return result according to configured fallback policy\n      return chooseHostForSelectorFallbackPolicy(*selector_fallback_params, context);\n    }\n  }\n\n  if (fallback_subset_ == nullptr) {\n    return nullptr;\n  }\n\n  HostConstSharedPtr host = fallback_subset_->priority_subset_->lb_->chooseHost(context);\n  if (host != nullptr) {\n    stats_.lb_subsets_fallback_.inc();\n    return host;\n  }\n\n  if (panic_mode_subset_ != nullptr) {\n    HostConstSharedPtr host = panic_mode_subset_->priority_subset_->lb_->chooseHost(context);\n    if (host != nullptr) {\n      stats_.lb_subsets_fallback_panic_.inc();\n      return host;\n    }\n  }\n\n  return nullptr;\n}\n\nabsl::optional<SubsetLoadBalancer::SubsetSelectorFallbackParamsRef>\nSubsetLoadBalancer::tryFindSelectorFallbackParams(LoadBalancerContext* context) {\n  const Router::MetadataMatchCriteria* match_criteria = context->metadataMatchCriteria();\n  if (!match_criteria) {\n    return absl::nullopt;\n  }\n  const auto match_criteria_vec = match_criteria->metadataMatchCriteria();\n  SubsetSelectorMapPtr selectors = selectors_;\n  if (selectors == nullptr) {\n    return absl::nullopt;\n  }\n  for (uint32_t i = 0; i < match_criteria_vec.size(); i++) {\n    const Router::MetadataMatchCriterion& match_criterion = *match_criteria_vec[i];\n    const auto& subset_it = selectors->subset_keys_.find(match_criterion.name());\n    if (subset_it == selectors->subset_keys_.end()) {\n      // No subsets with this key (at this level in the hierarchy).\n      break;\n    }\n\n    if (i + 1 == match_criteria_vec.size()) {\n      // We've reached the end of the criteria, and they all matched.\n      return subset_it->second->fallback_params_;\n    }\n    selectors = subset_it->second;\n  }\n\n  return absl::nullopt;\n}\n\nHostConstSharedPtr SubsetLoadBalancer::chooseHostForSelectorFallbackPolicy(\n    const SubsetSelectorFallbackParams& fallback_params, LoadBalancerContext* context) {\n  const auto& fallback_policy = fallback_params.fallback_policy_;\n  if (fallback_policy ==\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT &&\n      subset_any_ != nullptr) {\n    return subset_any_->priority_subset_->lb_->chooseHost(context);\n  } else if (fallback_policy == envoy::config::cluster::v3::Cluster::LbSubsetConfig::\n                                    LbSubsetSelector::DEFAULT_SUBSET &&\n             selector_fallback_subset_default_ != nullptr) {\n    return selector_fallback_subset_default_->priority_subset_->lb_->chooseHost(context);\n  } else if (fallback_policy ==\n             envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET) {\n    ASSERT(fallback_params.fallback_keys_subset_);\n    auto filtered_context = std::make_unique<LoadBalancerContextWrapper>(\n        context, *fallback_params.fallback_keys_subset_);\n    // Perform whole subset load balancing again with reduced metadata match criteria\n    return chooseHost(filtered_context.get());\n  } else {\n    return nullptr;\n  }\n}\n\n// Find a host from the subsets. Sets host_chosen to false and returns nullptr if the context has\n// no metadata match criteria, if there is no matching subset, or if the matching subset contains\n// no hosts (ignoring health). Otherwise, host_chosen is true and the returns HostConstSharedPtr is\n// from the subset's load balancer (technically, it may still be nullptr).\nHostConstSharedPtr SubsetLoadBalancer::tryChooseHostFromContext(LoadBalancerContext* context,\n                                                                bool& host_chosen) {\n  host_chosen = false;\n  const Router::MetadataMatchCriteria* match_criteria = context->metadataMatchCriteria();\n  if (!match_criteria) {\n    return nullptr;\n  }\n\n  if (!single_key_.empty()) {\n    return tryChooseHostFromMetadataMatchCriteriaSingle(*match_criteria, host_chosen);\n  }\n\n  // Route has metadata match criteria defined, see if we have a matching subset.\n  LbSubsetEntryPtr entry = findSubset(match_criteria->metadataMatchCriteria());\n  if (entry == nullptr || !entry->active()) {\n    // No matching subset or subset not active: use fallback policy.\n    return nullptr;\n  }\n\n  host_chosen = true;\n  stats_.lb_subsets_selected_.inc();\n  return entry->priority_subset_->lb_->chooseHost(context);\n}\n\n// Iterates over the given metadata match criteria (which must be lexically sorted by key) and find\n// a matching LbSubsetEntryPtr, if any.\nSubsetLoadBalancer::LbSubsetEntryPtr SubsetLoadBalancer::findSubset(\n    const std::vector<Router::MetadataMatchCriterionConstSharedPtr>& match_criteria) {\n  const LbSubsetMap* subsets = &subsets_;\n\n  // Because the match_criteria and the host metadata used to populate subsets_ are sorted in the\n  // same order, we can iterate over the criteria and perform a lookup for each key and value,\n  // starting with the root LbSubsetMap and using the previous iteration's LbSubsetMap thereafter\n  // (tracked in subsets). If ever a criterion's key or value is not found, there is no subset for\n  // this criteria. If we reach the last criterion, we've found the LbSubsetEntry for the criteria,\n  // which may or may not have a subset attached to it.\n  for (uint32_t i = 0; i < match_criteria.size(); i++) {\n    const Router::MetadataMatchCriterion& match_criterion = *match_criteria[i];\n    const auto& subset_it = subsets->find(match_criterion.name());\n    if (subset_it == subsets->end()) {\n      // No subsets with this key (at this level in the hierarchy).\n      break;\n    }\n\n    const ValueSubsetMap& vs_map = subset_it->second;\n    const auto& vs_it = vs_map.find(match_criterion.value());\n    if (vs_it == vs_map.end()) {\n      // No subsets with this value.\n      break;\n    }\n\n    const LbSubsetEntryPtr& entry = vs_it->second;\n    if (i + 1 == match_criteria.size()) {\n      // We've reached the end of the criteria, and they all matched.\n      return entry;\n    }\n\n    subsets = &entry->children_;\n  }\n\n  return nullptr;\n}\n\nvoid SubsetLoadBalancer::updateFallbackSubset(uint32_t priority, const HostVector& hosts_added,\n                                              const HostVector& hosts_removed) {\n\n  if (subset_any_ != nullptr) {\n    subset_any_->priority_subset_->update(priority, hosts_added, hosts_removed);\n  }\n\n  if (selector_fallback_subset_default_ != nullptr) {\n    selector_fallback_subset_default_->priority_subset_->update(priority, hosts_added,\n                                                                hosts_removed);\n  }\n\n  if (fallback_subset_ == nullptr) {\n    ENVOY_LOG(debug, \"subset lb: fallback load balancer disabled\");\n    return;\n  }\n\n  if (fallback_subset_ != subset_any_) {\n    // Add/remove hosts.\n    fallback_subset_->priority_subset_->update(priority, hosts_added, hosts_removed);\n  }\n\n  // Same thing for the panic mode subset.\n  ASSERT(panic_mode_subset_ == nullptr || panic_mode_subset_ == subset_any_);\n}\n\n// Iterates over the added and removed hosts, looking up an LbSubsetEntryPtr for each. For every\n// unique LbSubsetEntryPtr found, it either invokes new_cb or update_cb depending on whether the\n// LbSubsetEntryPtr is already initialized (update_cb) or not (new_cb). In addition, update_cb is\n// invoked for any otherwise unmodified but active and initialized LbSubsetEntryPtr to allow host\n// health to be updated.\nvoid SubsetLoadBalancer::processSubsets(\n    const HostVector& hosts_added, const HostVector& hosts_removed,\n    std::function<void(LbSubsetEntryPtr)> update_cb,\n    std::function<void(LbSubsetEntryPtr, HostPredicate, const SubsetMetadata&)> new_cb) {\n  absl::node_hash_set<LbSubsetEntryPtr> subsets_modified;\n\n  std::pair<const HostVector&, bool> steps[] = {{hosts_added, true}, {hosts_removed, false}};\n  for (const auto& step : steps) {\n    const auto& hosts = step.first;\n    const bool adding_hosts = step.second;\n    for (const auto& host : hosts) {\n      for (const auto& subset_selector : subset_selectors_) {\n        const auto& keys = subset_selector->selectorKeys();\n        // For each host, for each subset key, attempt to extract the metadata corresponding to the\n        // key from the host.\n        std::vector<SubsetMetadata> all_kvs = extractSubsetMetadata(keys, *host);\n        for (const auto& kvs : all_kvs) {\n          // The host has metadata for each key, find or create its subset.\n          auto entry = findOrCreateSubset(subsets_, kvs, 0);\n          if (entry != nullptr) {\n            if (subsets_modified.find(entry) != subsets_modified.end()) {\n              // We've already invoked the callback for this entry.\n              continue;\n            }\n            subsets_modified.emplace(entry);\n\n            if (entry->initialized()) {\n              update_cb(entry);\n            } else {\n              HostPredicate predicate = [this, kvs](const Host& host) -> bool {\n                return hostMatches(kvs, host);\n              };\n              if (adding_hosts) {\n                new_cb(entry, predicate, kvs);\n              }\n            }\n          }\n        }\n      }\n    }\n  }\n\n  forEachSubset(subsets_, [&](LbSubsetEntryPtr entry) {\n    if (subsets_modified.find(entry) != subsets_modified.end()) {\n      // Already handled due to hosts being added or removed.\n      return;\n    }\n\n    if (entry->initialized() && entry->active()) {\n      update_cb(entry);\n    }\n  });\n}\n\n// Given the addition and/or removal of hosts, update all subsets for this priority level, creating\n// new subsets as necessary.\nvoid SubsetLoadBalancer::update(uint32_t priority, const HostVector& hosts_added,\n                                const HostVector& hosts_removed) {\n  updateFallbackSubset(priority, hosts_added, hosts_removed);\n\n  processSubsets(\n      hosts_added, hosts_removed,\n      [&](LbSubsetEntryPtr entry) {\n        entry->priority_subset_->update(priority, hosts_added, hosts_removed);\n      },\n      [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs) {\n        ENVOY_LOG(debug, \"subset lb: creating load balancer for {}\", describeMetadata(kvs));\n\n        // Initialize new entry with hosts and update stats. (An uninitialized entry\n        // with only removed hosts is a degenerate case and we leave the entry\n        // uninitialized.)\n        entry->priority_subset_ = std::make_shared<PrioritySubsetImpl>(\n            *this, predicate, locality_weight_aware_, scale_locality_weight_);\n        stats_.lb_subsets_active_.inc();\n        stats_.lb_subsets_created_.inc();\n      });\n}\n\nbool SubsetLoadBalancer::hostMatches(const SubsetMetadata& kvs, const Host& host) {\n  return Config::Metadata::metadataLabelMatch(\n      kvs, host.metadata().get(), Config::MetadataFilters::get().ENVOY_LB, list_as_any_);\n}\n\n// Iterates over subset_keys looking up values from the given host's metadata. Each key-value pair\n// is appended to kvs. Returns a non-empty value if the host has a value for each key.\nstd::vector<SubsetLoadBalancer::SubsetMetadata>\nSubsetLoadBalancer::extractSubsetMetadata(const std::set<std::string>& subset_keys,\n                                          const Host& host) {\n  std::vector<SubsetMetadata> all_kvs;\n  if (!host.metadata()) {\n    return all_kvs;\n  }\n  const envoy::config::core::v3::Metadata& metadata = *host.metadata();\n  const auto& filter_it = metadata.filter_metadata().find(Config::MetadataFilters::get().ENVOY_LB);\n  if (filter_it == metadata.filter_metadata().end()) {\n    return all_kvs;\n  }\n\n  const auto& fields = filter_it->second.fields();\n  for (const auto& key : subset_keys) {\n    const auto it = fields.find(key);\n    if (it == fields.end()) {\n      all_kvs.clear();\n      break;\n    }\n\n    if (list_as_any_ && it->second.kind_case() == ProtobufWkt::Value::kListValue) {\n      // If the list of kvs is empty, we initialize one kvs for each value in the list.\n      // Otherwise, we branch the list of kvs by generating one new kvs per old kvs per\n      // new value.\n      //\n      // For example, two kvs (<a=1>, <a=2>) joined with the kv foo=[bar,baz] results in four kvs:\n      //   <a=1,foo=bar>\n      //   <a=1,foo=baz>\n      //   <a=2,foo=bar>\n      //   <a=2,foo=baz>\n      if (all_kvs.empty()) {\n        for (const auto& v : it->second.list_value().values()) {\n          all_kvs.emplace_back(SubsetMetadata({make_pair(key, v)}));\n        }\n      } else {\n        std::vector<SubsetMetadata> new_kvs;\n        for (const auto& kvs : all_kvs) {\n          for (const auto& v : it->second.list_value().values()) {\n            auto kv_copy = kvs;\n            kv_copy.emplace_back(make_pair(key, v));\n            new_kvs.emplace_back(kv_copy);\n          }\n        }\n        all_kvs = new_kvs;\n      }\n\n    } else {\n      if (all_kvs.empty()) {\n        all_kvs.emplace_back(SubsetMetadata({std::make_pair(key, it->second)}));\n      } else {\n        for (auto& kvs : all_kvs) {\n          kvs.emplace_back(std::make_pair(key, it->second));\n        }\n      }\n    }\n  }\n\n  return all_kvs;\n}\n\nstd::string SubsetLoadBalancer::describeMetadata(const SubsetLoadBalancer::SubsetMetadata& kvs) {\n  if (kvs.empty()) {\n    return \"<no metadata>\";\n  }\n\n  std::ostringstream buf;\n  bool first = true;\n  for (const auto& it : kvs) {\n    if (!first) {\n      buf << \", \";\n    } else {\n      first = false;\n    }\n\n    buf << it.first << \"=\" << MessageUtil::getJsonStringFromMessage(it.second);\n  }\n\n  return buf.str();\n}\n\n// Given a vector of key-values (from extractSubsetMetadata), recursively finds the matching\n// LbSubsetEntryPtr.\nSubsetLoadBalancer::LbSubsetEntryPtr\nSubsetLoadBalancer::findOrCreateSubset(LbSubsetMap& subsets, const SubsetMetadata& kvs,\n                                       uint32_t idx) {\n  ASSERT(idx < kvs.size());\n\n  const std::string& name = kvs[idx].first;\n  const ProtobufWkt::Value& pb_value = kvs[idx].second;\n  const HashedValue value(pb_value);\n\n  LbSubsetEntryPtr entry;\n\n  const auto& kv_it = subsets.find(name);\n\n  if (kv_it != subsets.end()) {\n    ValueSubsetMap& value_subset_map = kv_it->second;\n    const auto vs_it = value_subset_map.find(value);\n    if (vs_it != value_subset_map.end()) {\n      entry = vs_it->second;\n    }\n  }\n\n  if (!entry) {\n    // Not found. Create an uninitialized entry.\n    entry = std::make_shared<LbSubsetEntry>();\n    if (kv_it != subsets.end()) {\n      ValueSubsetMap& value_subset_map = kv_it->second;\n      value_subset_map.emplace(value, entry);\n    } else {\n      ValueSubsetMap value_subset_map = {{value, entry}};\n      subsets.emplace(name, value_subset_map);\n    }\n  }\n\n  idx++;\n  if (idx == kvs.size()) {\n    // We've matched all the key-values, return the entry.\n    return entry;\n  }\n\n  return findOrCreateSubset(entry->children_, kvs, idx);\n}\n\n// Invokes cb for each LbSubsetEntryPtr in subsets.\nvoid SubsetLoadBalancer::forEachSubset(LbSubsetMap& subsets,\n                                       std::function<void(LbSubsetEntryPtr)> cb) {\n  for (auto& vsm : subsets) {\n    for (auto& em : vsm.second) {\n      LbSubsetEntryPtr entry = em.second;\n      cb(entry);\n      forEachSubset(entry->children_, cb);\n    }\n  }\n}\n\nvoid SubsetLoadBalancer::purgeEmptySubsets(LbSubsetMap& subsets) {\n  for (auto subset_it = subsets.begin(); subset_it != subsets.end();) {\n    for (auto it = subset_it->second.begin(); it != subset_it->second.end();) {\n      LbSubsetEntryPtr entry = it->second;\n\n      purgeEmptySubsets(entry->children_);\n\n      if (entry->active() || entry->hasChildren()) {\n        it++;\n        continue;\n      }\n\n      // If it wasn't initialized, it wasn't accounted for.\n      if (entry->initialized()) {\n        stats_.lb_subsets_active_.dec();\n        stats_.lb_subsets_removed_.inc();\n      }\n\n      auto next_it = std::next(it);\n      subset_it->second.erase(it);\n      it = next_it;\n    }\n\n    if (subset_it->second.empty()) {\n      auto next_subset_it = std::next(subset_it);\n      subsets.erase(subset_it);\n      subset_it = next_subset_it;\n    } else {\n      subset_it++;\n    }\n  }\n}\n\n// Initialize a new HostSubsetImpl and LoadBalancer from the SubsetLoadBalancer, filtering hosts\n// with the given predicate.\nSubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalancer& subset_lb,\n                                                           HostPredicate predicate,\n                                                           bool locality_weight_aware,\n                                                           bool scale_locality_weight)\n    : original_priority_set_(subset_lb.original_priority_set_), predicate_(predicate),\n      locality_weight_aware_(locality_weight_aware), scale_locality_weight_(scale_locality_weight) {\n\n  for (size_t i = 0; i < original_priority_set_.hostSetsPerPriority().size(); ++i) {\n    empty_ &= getOrCreateHostSet(i).hosts().empty();\n  }\n\n  for (size_t i = 0; i < subset_lb.original_priority_set_.hostSetsPerPriority().size(); ++i) {\n    update(i, subset_lb.original_priority_set_.hostSetsPerPriority()[i]->hosts(), {});\n  }\n\n  switch (subset_lb.lb_type_) {\n  case LoadBalancerType::LeastRequest:\n    lb_ = std::make_unique<LeastRequestLoadBalancer>(\n        *this, subset_lb.original_local_priority_set_, subset_lb.stats_, subset_lb.runtime_,\n        subset_lb.random_, subset_lb.common_config_, subset_lb.least_request_config_);\n    break;\n\n  case LoadBalancerType::Random:\n    lb_ = std::make_unique<RandomLoadBalancer>(*this, subset_lb.original_local_priority_set_,\n                                               subset_lb.stats_, subset_lb.runtime_,\n                                               subset_lb.random_, subset_lb.common_config_);\n    break;\n\n  case LoadBalancerType::RoundRobin:\n    lb_ = std::make_unique<RoundRobinLoadBalancer>(*this, subset_lb.original_local_priority_set_,\n                                                   subset_lb.stats_, subset_lb.runtime_,\n                                                   subset_lb.random_, subset_lb.common_config_);\n    break;\n\n  case LoadBalancerType::RingHash:\n    // TODO(mattklein123): The ring hash LB is thread aware, but currently the subset LB is not.\n    // We should make the subset LB thread aware since the calculations are costly, and then we\n    // can also use a thread aware sub-LB properly. The following works fine but is not optimal.\n    thread_aware_lb_ = std::make_unique<RingHashLoadBalancer>(\n        *this, subset_lb.stats_, subset_lb.scope_, subset_lb.runtime_, subset_lb.random_,\n        subset_lb.lb_ring_hash_config_, subset_lb.common_config_);\n    thread_aware_lb_->initialize();\n    lb_ = thread_aware_lb_->factory()->create();\n    break;\n\n  case LoadBalancerType::Maglev:\n    // TODO(mattklein123): The Maglev LB is thread aware, but currently the subset LB is not.\n    // We should make the subset LB thread aware since the calculations are costly, and then we\n    // can also use a thread aware sub-LB properly. The following works fine but is not optimal.\n    thread_aware_lb_ = std::make_unique<MaglevLoadBalancer>(\n        *this, subset_lb.stats_, subset_lb.scope_, subset_lb.runtime_, subset_lb.random_,\n        subset_lb.lb_maglev_config_, subset_lb.common_config_);\n    thread_aware_lb_->initialize();\n    lb_ = thread_aware_lb_->factory()->create();\n    break;\n\n  case LoadBalancerType::OriginalDst:\n  case LoadBalancerType::ClusterProvided:\n    // LoadBalancerType::OriginalDst is blocked in the factory. LoadBalancerType::ClusterProvided\n    // is impossible because the subset LB returns a null load balancer from its factory.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  triggerCallbacks();\n}\n\n// Given hosts_added and hosts_removed, update the underlying HostSet. The hosts_added Hosts must\n// be filtered to match hosts that belong in this subset. The hosts_removed Hosts are ignored if\n// they are not currently a member of this subset.\nvoid SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added,\n                                                const HostVector& hosts_removed,\n                                                std::function<bool(const Host&)> predicate) {\n  // We cache the result of matching the host against the predicate. This ensures\n  // that we maintain a consistent view of the metadata and saves on computation\n  // since metadata lookups can be expensive.\n  //\n  // We use an unordered container because this can potentially be in the tens of thousands.\n  absl::node_hash_set<const Host*> matching_hosts;\n\n  auto cached_predicate = [&matching_hosts](const auto& host) {\n    return matching_hosts.count(&host) == 1;\n  };\n\n  // TODO(snowp): If we had a unhealthyHosts() function we could avoid potentially traversing\n  // the list of hosts twice.\n  auto hosts = std::make_shared<HostVector>();\n  hosts->reserve(original_host_set_.hosts().size());\n  for (const auto& host : original_host_set_.hosts()) {\n    if (predicate(*host)) {\n      matching_hosts.insert(host.get());\n      hosts->emplace_back(host);\n    }\n  }\n\n  auto healthy_hosts = std::make_shared<HealthyHostVector>();\n  healthy_hosts->get().reserve(original_host_set_.healthyHosts().size());\n  for (const auto& host : original_host_set_.healthyHosts()) {\n    if (cached_predicate(*host)) {\n      healthy_hosts->get().emplace_back(host);\n    }\n  }\n\n  auto degraded_hosts = std::make_shared<DegradedHostVector>();\n  degraded_hosts->get().reserve(original_host_set_.degradedHosts().size());\n  for (const auto& host : original_host_set_.degradedHosts()) {\n    if (cached_predicate(*host)) {\n      degraded_hosts->get().emplace_back(host);\n    }\n  }\n\n  auto excluded_hosts = std::make_shared<ExcludedHostVector>();\n  excluded_hosts->get().reserve(original_host_set_.excludedHosts().size());\n  for (const auto& host : original_host_set_.excludedHosts()) {\n    if (cached_predicate(*host)) {\n      excluded_hosts->get().emplace_back(host);\n    }\n  }\n\n  // If we only have one locality we can avoid the first call to filter() by\n  // just creating a new HostsPerLocality from the list of all hosts.\n  HostsPerLocalityConstSharedPtr hosts_per_locality;\n\n  if (original_host_set_.hostsPerLocality().get().size() == 1) {\n    hosts_per_locality = std::make_shared<HostsPerLocalityImpl>(\n        *hosts, original_host_set_.hostsPerLocality().hasLocalLocality());\n  } else {\n    hosts_per_locality = original_host_set_.hostsPerLocality().filter({cached_predicate})[0];\n  }\n\n  auto healthy_hosts_per_locality =\n      original_host_set_.healthyHostsPerLocality().filter({cached_predicate})[0];\n  auto degraded_hosts_per_locality =\n      original_host_set_.degradedHostsPerLocality().filter({cached_predicate})[0];\n  auto excluded_hosts_per_locality =\n      original_host_set_.excludedHostsPerLocality().filter({cached_predicate})[0];\n\n  // We can use the cached predicate here, since we trust that the hosts in hosts_added were also\n  // present in the list of all hosts.\n  HostVector filtered_added;\n  for (const auto& host : hosts_added) {\n    if (cached_predicate(*host)) {\n      filtered_added.emplace_back(host);\n    }\n  }\n\n  // Since the removed hosts would not be present in the list of all hosts, we need to evaluate\n  // the predicate directly for these hosts.\n  HostVector filtered_removed;\n  for (const auto& host : hosts_removed) {\n    if (predicate(*host)) {\n      filtered_removed.emplace_back(host);\n    }\n  }\n\n  HostSetImpl::updateHosts(HostSetImpl::updateHostsParams(\n                               hosts, hosts_per_locality, healthy_hosts, healthy_hosts_per_locality,\n                               degraded_hosts, degraded_hosts_per_locality, excluded_hosts,\n                               excluded_hosts_per_locality),\n                           determineLocalityWeights(*hosts_per_locality), filtered_added,\n                           filtered_removed, absl::nullopt);\n}\n\nLocalityWeightsConstSharedPtr SubsetLoadBalancer::HostSubsetImpl::determineLocalityWeights(\n    const HostsPerLocality& hosts_per_locality) const {\n  if (locality_weight_aware_) {\n    if (scale_locality_weight_) {\n      const auto& original_hosts_per_locality = original_host_set_.hostsPerLocality().get();\n      // E.g. we can be here in static clusters with actual locality weighting before pre-init\n      // completes.\n      if (!original_host_set_.localityWeights()) {\n        return {};\n      }\n      const auto& original_weights = *original_host_set_.localityWeights();\n\n      auto scaled_locality_weights = std::make_shared<LocalityWeights>(original_weights.size());\n      for (uint32_t i = 0; i < original_weights.size(); ++i) {\n        // If the original locality has zero hosts, skip it. This leaves the weight at zero.\n        if (original_hosts_per_locality[i].empty()) {\n          continue;\n        }\n\n        // Otherwise, scale it proportionally to the number of hosts removed by the subset\n        // predicate.\n        (*scaled_locality_weights)[i] =\n            std::round(float((original_weights[i] * hosts_per_locality.get()[i].size())) /\n                       original_hosts_per_locality[i].size());\n      }\n\n      return scaled_locality_weights;\n    } else {\n      return original_host_set_.localityWeights();\n    }\n  }\n  return {};\n}\n\nHostSetImplPtr SubsetLoadBalancer::PrioritySubsetImpl::createHostSet(\n    uint32_t priority, absl::optional<uint32_t> overprovisioning_factor) {\n  // Use original hostset's overprovisioning_factor.\n  RELEASE_ASSERT(priority < original_priority_set_.hostSetsPerPriority().size(), \"\");\n\n  const HostSetPtr& host_set = original_priority_set_.hostSetsPerPriority()[priority];\n\n  ASSERT(!overprovisioning_factor.has_value() ||\n         overprovisioning_factor.value() == host_set->overprovisioningFactor());\n  return HostSetImplPtr{\n      new HostSubsetImpl(*host_set, locality_weight_aware_, scale_locality_weight_)};\n}\n\nvoid SubsetLoadBalancer::PrioritySubsetImpl::update(uint32_t priority,\n                                                    const HostVector& hosts_added,\n                                                    const HostVector& hosts_removed) {\n  const auto& host_subset = getOrCreateHostSet(priority);\n  updateSubset(priority, hosts_added, hosts_removed, predicate_);\n\n  if (host_subset.hosts().empty() != empty_) {\n    empty_ = true;\n    for (auto& host_set : hostSetsPerPriority()) {\n      empty_ &= host_set->hosts().empty();\n    }\n  }\n\n  // Create a new worker local LB if needed.\n  // TODO(mattklein123): See the PrioritySubsetImpl constructor for additional comments on how\n  // we can do better here.\n  if (thread_aware_lb_ != nullptr) {\n    lb_ = thread_aware_lb_->factory()->create();\n  }\n}\n\nSubsetLoadBalancer::LoadBalancerContextWrapper::LoadBalancerContextWrapper(\n    LoadBalancerContext* wrapped,\n    const std::set<std::string>& filtered_metadata_match_criteria_names)\n    : wrapped_(wrapped) {\n  ASSERT(wrapped->metadataMatchCriteria());\n\n  metadata_match_ =\n      wrapped->metadataMatchCriteria()->filterMatchCriteria(filtered_metadata_match_criteria_names);\n}\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/subset_lb.h",
    "content": "#pragma once\n\n#include <functional>\n#include <map>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/load_balancer.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass SubsetLoadBalancer : public LoadBalancer, Logger::Loggable<Logger::Id::upstream> {\npublic:\n  SubsetLoadBalancer(\n      LoadBalancerType lb_type, PrioritySet& priority_set, const PrioritySet* local_priority_set,\n      ClusterStats& stats, Stats::Scope& scope, Runtime::Loader& runtime,\n      Random::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets,\n      const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig>&\n          lb_ring_hash_config,\n      const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig>& lb_maglev_config,\n      const absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>&\n          least_request_config,\n      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config);\n  ~SubsetLoadBalancer() override;\n\n  // Upstream::LoadBalancer\n  HostConstSharedPtr chooseHost(LoadBalancerContext* context) override;\n  // TODO(alyssawilk) implement for non-metadata match.\n  HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; }\n\nprivate:\n  using HostPredicate = std::function<bool(const Host&)>;\n  struct SubsetSelectorFallbackParams;\n\n  void initSubsetAnyOnce();\n  void initSubsetSelectorMap();\n  void initSelectorFallbackSubset(const envoy::config::cluster::v3::Cluster::LbSubsetConfig::\n                                      LbSubsetSelector::LbSubsetSelectorFallbackPolicy&);\n  HostConstSharedPtr\n  chooseHostForSelectorFallbackPolicy(const SubsetSelectorFallbackParams& fallback_params,\n                                      LoadBalancerContext* context);\n\n  // Represents a subset of an original HostSet.\n  class HostSubsetImpl : public HostSetImpl {\n  public:\n    HostSubsetImpl(const HostSet& original_host_set, bool locality_weight_aware,\n                   bool scale_locality_weight)\n        : HostSetImpl(original_host_set.priority(), original_host_set.overprovisioningFactor()),\n          original_host_set_(original_host_set), locality_weight_aware_(locality_weight_aware),\n          scale_locality_weight_(scale_locality_weight) {}\n\n    void update(const HostVector& hosts_added, const HostVector& hosts_removed,\n                HostPredicate predicate);\n    LocalityWeightsConstSharedPtr\n    determineLocalityWeights(const HostsPerLocality& hosts_per_locality) const;\n\n  private:\n    const HostSet& original_host_set_;\n    const bool locality_weight_aware_;\n    const bool scale_locality_weight_;\n  };\n\n  // Represents a subset of an original PrioritySet.\n  class PrioritySubsetImpl : public PrioritySetImpl {\n  public:\n    PrioritySubsetImpl(const SubsetLoadBalancer& subset_lb, HostPredicate predicate,\n                       bool locality_weight_aware, bool scale_locality_weight);\n\n    void update(uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed);\n\n    bool empty() { return empty_; }\n\n    void triggerCallbacks() {\n      for (size_t i = 0; i < hostSetsPerPriority().size(); ++i) {\n        runReferenceUpdateCallbacks(i, {}, {});\n      }\n    }\n\n    void updateSubset(uint32_t priority, const HostVector& hosts_added,\n                      const HostVector& hosts_removed, HostPredicate predicate) {\n      reinterpret_cast<HostSubsetImpl*>(host_sets_[priority].get())\n          ->update(hosts_added, hosts_removed, predicate);\n\n      runUpdateCallbacks(hosts_added, hosts_removed);\n    }\n\n    // Thread aware LB if applicable.\n    ThreadAwareLoadBalancerPtr thread_aware_lb_;\n    // Current active LB.\n    LoadBalancerPtr lb_;\n\n  protected:\n    HostSetImplPtr createHostSet(uint32_t priority,\n                                 absl::optional<uint32_t> overprovisioning_factor) override;\n\n  private:\n    const PrioritySet& original_priority_set_;\n    const HostPredicate predicate_;\n    const bool locality_weight_aware_;\n    const bool scale_locality_weight_;\n    bool empty_ = true;\n  };\n\n  using HostSubsetImplPtr = std::shared_ptr<HostSubsetImpl>;\n  using PrioritySubsetImplPtr = std::shared_ptr<PrioritySubsetImpl>;\n\n  using SubsetMetadata = std::vector<std::pair<std::string, ProtobufWkt::Value>>;\n\n  class LbSubsetEntry;\n  struct SubsetSelectorMap;\n\n  using LbSubsetEntryPtr = std::shared_ptr<LbSubsetEntry>;\n  using SubsetSelectorMapPtr = std::shared_ptr<SubsetSelectorMap>;\n  using ValueSubsetMap = absl::node_hash_map<HashedValue, LbSubsetEntryPtr>;\n  using LbSubsetMap = absl::node_hash_map<std::string, ValueSubsetMap>;\n  using SubsetSelectorFallbackParamsRef = std::reference_wrapper<SubsetSelectorFallbackParams>;\n\n  class LoadBalancerContextWrapper : public LoadBalancerContext {\n  public:\n    LoadBalancerContextWrapper(LoadBalancerContext* wrapped,\n                               const std::set<std::string>& filtered_metadata_match_criteria_names);\n\n    // LoadBalancerContext\n    absl::optional<uint64_t> computeHashKey() override { return wrapped_->computeHashKey(); }\n    const Router::MetadataMatchCriteria* metadataMatchCriteria() override {\n      return metadata_match_.get();\n    }\n    const Network::Connection* downstreamConnection() const override {\n      return wrapped_->downstreamConnection();\n    }\n    const Http::RequestHeaderMap* downstreamHeaders() const override {\n      return wrapped_->downstreamHeaders();\n    }\n    const HealthyAndDegradedLoad& determinePriorityLoad(\n        const PrioritySet& priority_set, const HealthyAndDegradedLoad& original_priority_load,\n        const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override {\n      return wrapped_->determinePriorityLoad(priority_set, original_priority_load,\n                                             priority_mapping_func);\n    }\n    bool shouldSelectAnotherHost(const Host& host) override {\n      return wrapped_->shouldSelectAnotherHost(host);\n    }\n    uint32_t hostSelectionRetryCount() const override {\n      return wrapped_->hostSelectionRetryCount();\n    }\n    Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override {\n      return wrapped_->upstreamSocketOptions();\n    }\n    Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override {\n      return wrapped_->upstreamTransportSocketOptions();\n    }\n\n  private:\n    LoadBalancerContext* wrapped_;\n    Router::MetadataMatchCriteriaConstPtr metadata_match_;\n  };\n\n  struct SubsetSelectorFallbackParams {\n    envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n        LbSubsetSelectorFallbackPolicy fallback_policy_;\n    const std::set<std::string>* fallback_keys_subset_ = nullptr;\n  };\n\n  struct SubsetSelectorMap {\n    absl::node_hash_map<std::string, SubsetSelectorMapPtr> subset_keys_;\n    SubsetSelectorFallbackParams fallback_params_;\n  };\n\n  // Entry in the subset hierarchy.\n  class LbSubsetEntry {\n  public:\n    LbSubsetEntry() = default;\n\n    bool initialized() const { return priority_subset_ != nullptr; }\n    bool active() const { return initialized() && !priority_subset_->empty(); }\n    bool hasChildren() const { return !children_.empty(); }\n\n    LbSubsetMap children_;\n\n    // Only initialized if a match exists at this level.\n    PrioritySubsetImplPtr priority_subset_;\n  };\n\n  // Create filtered default subset (if necessary) and other subsets based on current hosts.\n  void refreshSubsets();\n  void refreshSubsets(uint32_t priority);\n\n  // Called by HostSet::MemberUpdateCb\n  void update(uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed);\n\n  // Rebuild the map for single_host_per_subset mode.\n  void rebuildSingle();\n\n  void updateFallbackSubset(uint32_t priority, const HostVector& hosts_added,\n                            const HostVector& hosts_removed);\n  void\n  processSubsets(const HostVector& hosts_added, const HostVector& hosts_removed,\n                 std::function<void(LbSubsetEntryPtr)> update_cb,\n                 std::function<void(LbSubsetEntryPtr, HostPredicate, const SubsetMetadata&)> cb);\n\n  HostConstSharedPtr tryChooseHostFromContext(LoadBalancerContext* context, bool& host_chosen);\n  HostConstSharedPtr\n  tryChooseHostFromMetadataMatchCriteriaSingle(const Router::MetadataMatchCriteria& match_criteria,\n                                               bool& host_chosen);\n\n  absl::optional<SubsetSelectorFallbackParamsRef>\n  tryFindSelectorFallbackParams(LoadBalancerContext* context);\n\n  bool hostMatches(const SubsetMetadata& kvs, const Host& host);\n\n  LbSubsetEntryPtr\n  findSubset(const std::vector<Router::MetadataMatchCriterionConstSharedPtr>& matches);\n\n  LbSubsetEntryPtr findOrCreateSubset(LbSubsetMap& subsets, const SubsetMetadata& kvs,\n                                      uint32_t idx);\n  void forEachSubset(LbSubsetMap& subsets, std::function<void(LbSubsetEntryPtr)> cb);\n  void purgeEmptySubsets(LbSubsetMap& subsets);\n\n  std::vector<SubsetMetadata> extractSubsetMetadata(const std::set<std::string>& subset_keys,\n                                                    const Host& host);\n  std::string describeMetadata(const SubsetMetadata& kvs);\n\n  const LoadBalancerType lb_type_;\n  const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig> lb_ring_hash_config_;\n  const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig> lb_maglev_config_;\n  const absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>\n      least_request_config_;\n  const envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  ClusterStats& stats_;\n  Stats::Scope& scope_;\n  Runtime::Loader& runtime_;\n  Random::RandomGenerator& random_;\n\n  const envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetFallbackPolicy\n      fallback_policy_;\n  const SubsetMetadata default_subset_metadata_;\n  std::vector<SubsetSelectorPtr> subset_selectors_;\n\n  const PrioritySet& original_priority_set_;\n  const PrioritySet* original_local_priority_set_;\n  Common::CallbackHandle* original_priority_set_callback_handle_;\n\n  LbSubsetEntryPtr subset_any_;\n  LbSubsetEntryPtr fallback_subset_;\n  LbSubsetEntryPtr panic_mode_subset_;\n\n  LbSubsetEntryPtr selector_fallback_subset_default_;\n\n  // Forms a trie-like structure. Requires lexically sorted Host and Route metadata.\n  LbSubsetMap subsets_;\n  // Forms a trie-like structure of lexically sorted keys+fallback policy from subset\n  // selectors configuration\n  SubsetSelectorMapPtr selectors_;\n\n  std::string single_key_;\n  absl::flat_hash_map<HashedValue, HostConstSharedPtr> single_host_per_subset_map_;\n  Stats::Gauge* single_duplicate_stat_{};\n\n  const bool locality_weight_aware_;\n  const bool scale_locality_weight_;\n  const bool list_as_any_;\n\n  friend class SubsetLoadBalancerDescribeMetadataTester;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/thread_aware_lb_impl.cc",
    "content": "#include \"common/upstream/thread_aware_lb_impl.h\"\n\n#include <memory>\n#include <random>\n\nnamespace Envoy {\nnamespace Upstream {\n\n// TODO(mergeconflict): Adjust locality weights for partial availability, as is done in\n//                      HostSetImpl::effectiveLocalityWeight.\nnamespace {\n\nvoid normalizeHostWeights(const HostVector& hosts, double normalized_locality_weight,\n                          NormalizedHostWeightVector& normalized_host_weights,\n                          double& min_normalized_weight, double& max_normalized_weight) {\n  // sum should be at most uint32_t max value, so we can validate it by accumulating into unit64_t\n  // and making sure there was no overflow\n  uint64_t sum = 0;\n  for (const auto& host : hosts) {\n    sum += host->weight();\n    if (sum > std::numeric_limits<uint32_t>::max()) {\n      throw EnvoyException(\n          fmt::format(\"The sum of weights of all upstream hosts in a locality exceeds {}\",\n                      std::numeric_limits<uint32_t>::max()));\n    }\n  }\n\n  for (const auto& host : hosts) {\n    const double weight = host->weight() * normalized_locality_weight / sum;\n    normalized_host_weights.push_back({host, weight});\n    min_normalized_weight = std::min(min_normalized_weight, weight);\n    max_normalized_weight = std::max(max_normalized_weight, weight);\n  }\n}\n\nvoid normalizeLocalityWeights(const HostsPerLocality& hosts_per_locality,\n                              const LocalityWeights& locality_weights,\n                              NormalizedHostWeightVector& normalized_host_weights,\n                              double& min_normalized_weight, double& max_normalized_weight) {\n  ASSERT(locality_weights.size() == hosts_per_locality.get().size());\n\n  // sum should be at most uint32_t max value, so we can validate it by accumulating into unit64_t\n  // and making sure there was no overflow\n  uint64_t sum = 0;\n  for (const auto weight : locality_weights) {\n    sum += weight;\n    if (sum > std::numeric_limits<uint32_t>::max()) {\n      throw EnvoyException(\n          fmt::format(\"The sum of weights of all localities at the same priority exceeds {}\",\n                      std::numeric_limits<uint32_t>::max()));\n    }\n  }\n\n  // Locality weights (unlike host weights) may be 0. If _all_ locality weights were 0, bail out.\n  if (sum == 0) {\n    return;\n  }\n\n  // Compute normalized weights for all hosts in each locality. If a locality was assigned zero\n  // weight, all hosts in that locality will be skipped.\n  for (LocalityWeights::size_type i = 0; i < locality_weights.size(); ++i) {\n    if (locality_weights[i] != 0) {\n      const HostVector& hosts = hosts_per_locality.get()[i];\n      const double normalized_locality_weight = static_cast<double>(locality_weights[i]) / sum;\n      normalizeHostWeights(hosts, normalized_locality_weight, normalized_host_weights,\n                           min_normalized_weight, max_normalized_weight);\n    }\n  }\n}\n\nvoid normalizeWeights(const HostSet& host_set, bool in_panic,\n                      NormalizedHostWeightVector& normalized_host_weights,\n                      double& min_normalized_weight, double& max_normalized_weight) {\n  if (host_set.localityWeights() == nullptr || host_set.localityWeights()->empty()) {\n    // If we're not dealing with locality weights, just normalize weights for the flat set of hosts.\n    const auto& hosts = in_panic ? host_set.hosts() : host_set.healthyHosts();\n    normalizeHostWeights(hosts, 1.0, normalized_host_weights, min_normalized_weight,\n                         max_normalized_weight);\n  } else {\n    // Otherwise, normalize weights across all localities.\n    const auto& hosts_per_locality =\n        in_panic ? host_set.hostsPerLocality() : host_set.healthyHostsPerLocality();\n    normalizeLocalityWeights(hosts_per_locality, *(host_set.localityWeights()),\n                             normalized_host_weights, min_normalized_weight, max_normalized_weight);\n  }\n}\n\n} // namespace\n\nvoid ThreadAwareLoadBalancerBase::initialize() {\n  // TODO(mattklein123): In the future, once initialized and the initial LB is built, it would be\n  // better to use a background thread for computing LB updates. This has the substantial benefit\n  // that if the LB computation thread falls behind, host set updates can be trivially collapsed.\n  // I will look into doing this in a follow up. Doing everything using a background thread heavily\n  // complicated initialization as the load balancer would need its own initialized callback. I\n  // think the synchronous/asynchronous split is probably the best option.\n  priority_set_.addPriorityUpdateCb(\n      [this](uint32_t, const HostVector&, const HostVector&) -> void { refresh(); });\n\n  refresh();\n}\n\nvoid ThreadAwareLoadBalancerBase::refresh() {\n  auto per_priority_state_vector = std::make_shared<std::vector<PerPriorityStatePtr>>(\n      priority_set_.hostSetsPerPriority().size());\n  auto healthy_per_priority_load =\n      std::make_shared<HealthyLoad>(per_priority_load_.healthy_priority_load_);\n  auto degraded_per_priority_load =\n      std::make_shared<DegradedLoad>(per_priority_load_.degraded_priority_load_);\n\n  for (const auto& host_set : priority_set_.hostSetsPerPriority()) {\n    const uint32_t priority = host_set->priority();\n    (*per_priority_state_vector)[priority] = std::make_unique<PerPriorityState>();\n    const auto& per_priority_state = (*per_priority_state_vector)[priority];\n    // Copy panic flag from LoadBalancerBase. It is calculated when there is a change\n    // in hosts set or hosts' health.\n    per_priority_state->global_panic_ = per_priority_panic_[priority];\n\n    // Normalize host and locality weights such that the sum of all normalized weights is 1.\n    NormalizedHostWeightVector normalized_host_weights;\n    double min_normalized_weight = 1.0;\n    double max_normalized_weight = 0.0;\n    normalizeWeights(*host_set, per_priority_state->global_panic_, normalized_host_weights,\n                     min_normalized_weight, max_normalized_weight);\n    per_priority_state->current_lb_ = createLoadBalancer(\n        std::move(normalized_host_weights), min_normalized_weight, max_normalized_weight);\n  }\n\n  {\n    absl::WriterMutexLock lock(&factory_->mutex_);\n    factory_->healthy_per_priority_load_ = healthy_per_priority_load;\n    factory_->degraded_per_priority_load_ = degraded_per_priority_load;\n    factory_->per_priority_state_ = per_priority_state_vector;\n  }\n}\n\nHostConstSharedPtr\nThreadAwareLoadBalancerBase::LoadBalancerImpl::chooseHost(LoadBalancerContext* context) {\n  // Make sure we correctly return nullptr for any early chooseHost() calls.\n  if (per_priority_state_ == nullptr) {\n    return nullptr;\n  }\n\n  // If there is no hash in the context, just choose a random value (this effectively becomes\n  // the random LB but it won't crash if someone configures it this way).\n  // computeHashKey() may be computed on demand, so get it only once.\n  absl::optional<uint64_t> hash;\n  if (context) {\n    hash = context->computeHashKey();\n  }\n  const uint64_t h = hash ? hash.value() : random_.random();\n\n  const uint32_t priority =\n      LoadBalancerBase::choosePriority(h, *healthy_per_priority_load_, *degraded_per_priority_load_)\n          .first;\n  const auto& per_priority_state = (*per_priority_state_)[priority];\n  if (per_priority_state->global_panic_) {\n    stats_.lb_healthy_panic_.inc();\n  }\n\n  HostConstSharedPtr host;\n  const uint32_t max_attempts = context ? context->hostSelectionRetryCount() + 1 : 1;\n  for (uint32_t i = 0; i < max_attempts; ++i) {\n    host = per_priority_state->current_lb_->chooseHost(h, i);\n\n    // If host selection failed or the host is accepted by the filter, return.\n    // Otherwise, try again.\n    if (!host || !context || !context->shouldSelectAnotherHost(*host)) {\n      return host;\n    }\n  }\n  return host;\n}\n\nLoadBalancerPtr ThreadAwareLoadBalancerBase::LoadBalancerFactoryImpl::create() {\n  auto lb = std::make_unique<LoadBalancerImpl>(stats_, random_);\n\n  // We must protect current_lb_ via a RW lock since it is accessed and written to by multiple\n  // threads. All complex processing has already been precalculated however.\n  absl::ReaderMutexLock lock(&mutex_);\n  lb->healthy_per_priority_load_ = healthy_per_priority_load_;\n  lb->degraded_per_priority_load_ = degraded_per_priority_load_;\n  lb->per_priority_state_ = per_priority_state_;\n\n  return lb;\n}\n\ndouble ThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer::hostOverloadFactor(\n    const Host& host, double weight) const {\n  // TODO(scheler): This will not work if rq_active cluster stat is disabled, need to detect\n  // and alert the user if that's the case.\n\n  const uint32_t overall_active = host.cluster().stats().upstream_rq_active_.value();\n  const uint32_t host_active = host.stats().rq_active_.value();\n\n  const uint32_t total_slots = ((overall_active + 1) * hash_balance_factor_ + 99) / 100;\n  const uint32_t slots =\n      std::max(static_cast<uint32_t>(std::ceil(total_slots * weight)), static_cast<uint32_t>(1));\n\n  if (host.stats().rq_active_.value() > slots) {\n    ENVOY_LOG_MISC(\n        debug,\n        \"ThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer::chooseHost: \"\n        \"host {} overloaded; overall_active {}, host_weight {}, host_active {} > slots {}\",\n        host.address()->asString(), overall_active, weight, host_active, slots);\n  }\n  return static_cast<double>(host.stats().rq_active_.value()) / slots;\n}\n\nHostConstSharedPtr\nThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer::chooseHost(uint64_t hash,\n                                                                        uint32_t attempt) const {\n\n  // This is implemented based on the method described in the paper\n  // https://arxiv.org/abs/1608.01350. For the specified `hash_balance_factor`, requests to any\n  // upstream host are capped at `hash_balance_factor/100` times the average number of requests\n  // across the cluster. When a request arrives for an upstream host that is currently serving at\n  // its max capacity, linear probing is used to identify an eligible host. Further, the linear\n  // probe is implemented using a random jump on hosts ring/table to identify the eligible host\n  // (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump\n  // avoids the cascading overflow effect when choosing the next host on the ring/table).\n  //\n  // If weights are specified on the hosts, they are respected.\n  //\n  // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor`\n  // results in more hosts being probed, so use a higher value if you require better performance.\n\n  if (normalized_host_weights_.empty()) {\n    return nullptr;\n  }\n\n  HostConstSharedPtr host = hashing_lb_ptr_->chooseHost(hash, attempt);\n  if (host == nullptr) {\n    return nullptr;\n  }\n  const double weight = normalized_host_weights_map_.at(host);\n  double overload_factor = hostOverloadFactor(*host, weight);\n  if (overload_factor <= 1.0) {\n    ENVOY_LOG_MISC(debug,\n                   \"ThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer::chooseHost: \"\n                   \"selected host #{} (attempt:1)\",\n                   host->address()->asString());\n    return host;\n  }\n\n  // When a host is overloaded, we choose the next host in a random manner rather than picking the\n  // next one in the ring. The random sequence is seeded by the hash, so the same input gets the\n  // same sequence of hosts all the time.\n  const uint32_t num_hosts = normalized_host_weights_.size();\n  auto host_index = std::vector<uint32_t>(num_hosts);\n  for (uint32_t i = 0; i < num_hosts; i++) {\n    host_index[i] = i;\n  }\n\n  // Not using Random::RandomGenerator as it does not take a seed. Seeded RNG is a requirement\n  // here as we need the same shuffle sequence for the same hash every time.\n  // Further, not using std::default_random_engine and std::uniform_int_distribution as they\n  // are not consistent across Linux and Windows platforms.\n  const uint64_t seed = hash;\n  std::mt19937 random(seed);\n\n  // generates a random number in the range [0,k) uniformly.\n  auto uniform_int = [](std::mt19937& random, uint32_t k) -> uint32_t {\n    uint32_t x = k;\n    while (x >= k) {\n      x = random() / ((static_cast<uint64_t>(random.max()) + 1u) / k);\n    }\n    return x;\n  };\n\n  HostConstSharedPtr alt_host, least_overloaded_host = host;\n  double least_overload_factor = overload_factor;\n  for (uint32_t i = 0; i < num_hosts; i++) {\n    // The random shuffle algorithm\n    const uint32_t j = uniform_int(random, num_hosts - i);\n    std::swap(host_index[i], host_index[i + j]);\n\n    const uint32_t k = host_index[i];\n    alt_host = normalized_host_weights_[k].first;\n    if (alt_host == host) {\n      continue;\n    }\n\n    const double alt_host_weight = normalized_host_weights_[k].second;\n    overload_factor = hostOverloadFactor(*alt_host, alt_host_weight);\n\n    if (overload_factor <= 1.0) {\n      ENVOY_LOG_MISC(debug,\n                     \"ThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer::chooseHost: \"\n                     \"selected host #{}:{} (attempt:{})\",\n                     k, alt_host->address()->asString(), i + 2);\n      return alt_host;\n    }\n\n    if (least_overload_factor > overload_factor) {\n      least_overloaded_host = alt_host;\n      least_overload_factor = overload_factor;\n    }\n  }\n\n  return least_overloaded_host;\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/thread_aware_lb_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nusing NormalizedHostWeightVector = std::vector<std::pair<HostConstSharedPtr, double>>;\nusing NormalizedHostWeightMap = std::map<HostConstSharedPtr, double>;\n\nclass ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareLoadBalancer {\npublic:\n  /**\n   * Base class for a hashing load balancer implemented for use in a thread aware load balancer.\n   * TODO(mattklein123): Currently only RingHash and Maglev use the thread aware load balancer.\n   *                     The hash is pre-computed prior to getting to the real load balancer for\n   *                     use in priority selection. In the future we likely we will want to pass\n   *                     through the full load balancer context in case a future implementation\n   *                     wants to use it.\n   */\n  class HashingLoadBalancer {\n  public:\n    virtual ~HashingLoadBalancer() = default;\n    virtual HostConstSharedPtr chooseHost(uint64_t hash, uint32_t attempt) const PURE;\n  };\n  using HashingLoadBalancerSharedPtr = std::shared_ptr<HashingLoadBalancer>;\n\n  /**\n   * Class for consistent hashing load balancer (CH-LB) with bounded loads.\n   * It is common to both RingHash and Maglev load balancers, because the logic of selecting the\n   * next host when one is overloaded is independent of the CH-LB type.\n   */\n  class BoundedLoadHashingLoadBalancer : public HashingLoadBalancer {\n  public:\n    BoundedLoadHashingLoadBalancer(HashingLoadBalancerSharedPtr hashing_lb_ptr,\n                                   NormalizedHostWeightVector normalized_host_weights,\n                                   uint32_t hash_balance_factor)\n        : normalized_host_weights_map_(initNormalizedHostWeightMap(normalized_host_weights)),\n          hashing_lb_ptr_(std::move(hashing_lb_ptr)),\n          normalized_host_weights_(std::move(normalized_host_weights)),\n          hash_balance_factor_(hash_balance_factor) {\n      ASSERT(hashing_lb_ptr_ != nullptr);\n      ASSERT(hash_balance_factor > 0);\n    }\n    HostConstSharedPtr chooseHost(uint64_t hash, uint32_t attempt) const override;\n\n  protected:\n    virtual double hostOverloadFactor(const Host& host, double weight) const;\n    const NormalizedHostWeightMap normalized_host_weights_map_;\n\n  private:\n    const NormalizedHostWeightMap\n    initNormalizedHostWeightMap(const NormalizedHostWeightVector& normalized_host_weights) {\n      NormalizedHostWeightMap normalized_host_weights_map;\n      for (auto const& item : normalized_host_weights) {\n        normalized_host_weights_map[item.first] = item.second;\n      }\n      return normalized_host_weights_map;\n    }\n    const HashingLoadBalancerSharedPtr hashing_lb_ptr_;\n    const NormalizedHostWeightVector normalized_host_weights_;\n    const uint32_t hash_balance_factor_;\n  };\n  // Upstream::ThreadAwareLoadBalancer\n  LoadBalancerFactorySharedPtr factory() override { return factory_; }\n  void initialize() override;\n\n  // Upstream::LoadBalancerBase\n  HostConstSharedPtr chooseHostOnce(LoadBalancerContext*) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  // Prefetch not implemented for hash based load balancing\n  HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; }\n\nprotected:\n  ThreadAwareLoadBalancerBase(\n      const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime,\n      Random::RandomGenerator& random,\n      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n      : LoadBalancerBase(priority_set, stats, runtime, random, common_config),\n        factory_(new LoadBalancerFactoryImpl(stats, random)) {}\n\nprivate:\n  struct PerPriorityState {\n    std::shared_ptr<HashingLoadBalancer> current_lb_;\n    bool global_panic_{};\n  };\n  using PerPriorityStatePtr = std::unique_ptr<PerPriorityState>;\n\n  struct LoadBalancerImpl : public LoadBalancer {\n    LoadBalancerImpl(ClusterStats& stats, Random::RandomGenerator& random)\n        : stats_(stats), random_(random) {}\n\n    // Upstream::LoadBalancer\n    HostConstSharedPtr chooseHost(LoadBalancerContext* context) override;\n    // Prefetch not implemented for hash based load balancing\n    HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; }\n\n    ClusterStats& stats_;\n    Random::RandomGenerator& random_;\n    std::shared_ptr<std::vector<PerPriorityStatePtr>> per_priority_state_;\n    std::shared_ptr<HealthyLoad> healthy_per_priority_load_;\n    std::shared_ptr<DegradedLoad> degraded_per_priority_load_;\n  };\n\n  struct LoadBalancerFactoryImpl : public LoadBalancerFactory {\n    LoadBalancerFactoryImpl(ClusterStats& stats, Random::RandomGenerator& random)\n        : stats_(stats), random_(random) {}\n\n    // Upstream::LoadBalancerFactory\n    LoadBalancerPtr create() override;\n\n    ClusterStats& stats_;\n    Random::RandomGenerator& random_;\n    absl::Mutex mutex_;\n    std::shared_ptr<std::vector<PerPriorityStatePtr>> per_priority_state_ ABSL_GUARDED_BY(mutex_);\n    // This is split out of PerPriorityState so LoadBalancerBase::ChoosePriority can be reused.\n    std::shared_ptr<HealthyLoad> healthy_per_priority_load_ ABSL_GUARDED_BY(mutex_);\n    std::shared_ptr<DegradedLoad> degraded_per_priority_load_ ABSL_GUARDED_BY(mutex_);\n  };\n\n  virtual HashingLoadBalancerSharedPtr\n  createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights,\n                     double min_normalized_weight, double max_normalized_weight) PURE;\n  void refresh();\n\n  std::shared_ptr<LoadBalancerFactoryImpl> factory_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/transport_socket_match_impl.cc",
    "content": "#include \"common/upstream/transport_socket_match_impl.h\"\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"common/config/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nTransportSocketMatcherImpl::TransportSocketMatcherImpl(\n    const Protobuf::RepeatedPtrField<envoy::config::cluster::v3::Cluster::TransportSocketMatch>&\n        socket_matches,\n    Server::Configuration::TransportSocketFactoryContext& factory_context,\n    Network::TransportSocketFactoryPtr& default_factory, Stats::Scope& stats_scope)\n    : stats_scope_(stats_scope),\n      default_match_(\"default\", std::move(default_factory), generateStats(\"default\")) {\n  for (const auto& socket_match : socket_matches) {\n    const auto& socket_config = socket_match.transport_socket();\n    auto& config_factory = Config::Utility::getAndCheckFactory<\n        Server::Configuration::UpstreamTransportSocketConfigFactory>(socket_config);\n    ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n        socket_config, factory_context.messageValidationVisitor(), config_factory);\n    FactoryMatch factory_match(\n        socket_match.name(), config_factory.createTransportSocketFactory(*message, factory_context),\n        generateStats(absl::StrCat(socket_match.name(), \".\")));\n    for (const auto& kv : socket_match.match().fields()) {\n      factory_match.label_set.emplace_back(kv.first, kv.second);\n    }\n    matches_.emplace_back(std::move(factory_match));\n  }\n}\n\nTransportSocketMatchStats TransportSocketMatcherImpl::generateStats(const std::string& prefix) {\n  return {ALL_TRANSPORT_SOCKET_MATCH_STATS(POOL_COUNTER_PREFIX(stats_scope_, prefix))};\n}\n\nTransportSocketMatcher::MatchData\nTransportSocketMatcherImpl::resolve(const envoy::config::core::v3::Metadata* metadata) const {\n  for (const auto& match : matches_) {\n    if (Config::Metadata::metadataLabelMatch(\n            match.label_set, metadata,\n            Envoy::Config::MetadataFilters::get().ENVOY_TRANSPORT_SOCKET_MATCH, false)) {\n      return MatchData(*match.factory, match.stats, match.name);\n    }\n  }\n  return MatchData(*default_match_.factory, default_match_.stats, default_match_.name);\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/transport_socket_match_impl.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/host_description.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass TransportSocketMatcherImpl : public Logger::Loggable<Logger::Id::upstream>,\n                                   public TransportSocketMatcher {\npublic:\n  struct FactoryMatch {\n    FactoryMatch(std::string match_name, Network::TransportSocketFactoryPtr socket_factory,\n                 TransportSocketMatchStats match_stats)\n        : name(std::move(match_name)), factory(std::move(socket_factory)), stats(match_stats) {}\n    const std::string name;\n    Network::TransportSocketFactoryPtr factory;\n    Config::Metadata::LabelSet label_set;\n    mutable TransportSocketMatchStats stats;\n  };\n\n  TransportSocketMatcherImpl(\n      const Protobuf::RepeatedPtrField<envoy::config::cluster::v3::Cluster::TransportSocketMatch>&\n          socket_matches,\n      Server::Configuration::TransportSocketFactoryContext& factory_context,\n      Network::TransportSocketFactoryPtr& default_factory, Stats::Scope& stats_scope);\n\n  MatchData resolve(const envoy::config::core::v3::Metadata* metadata) const override;\n\nprotected:\n  TransportSocketMatchStats generateStats(const std::string& prefix);\n  Stats::Scope& stats_scope_;\n  FactoryMatch default_match_;\n  std::vector<FactoryMatch> matches_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/upstream_impl.cc",
    "content": "#include \"common/upstream/upstream_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <limits>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/circuit_breaker.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/health_checker.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/config_utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/upstream/eds.h\"\n#include \"common/upstream/health_checker_impl.h\"\n#include \"common/upstream/logical_dns_cluster.h\"\n#include \"common/upstream/original_dst_cluster.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"extensions/filters/network/common/utility.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nconst Network::Address::InstanceConstSharedPtr\ngetSourceAddress(const envoy::config::cluster::v3::Cluster& cluster,\n                 const envoy::config::core::v3::BindConfig& bind_config) {\n  // The source address from cluster config takes precedence.\n  if (cluster.upstream_bind_config().has_source_address()) {\n    return Network::Address::resolveProtoSocketAddress(\n        cluster.upstream_bind_config().source_address());\n  }\n  // If there's no source address in the cluster config, use any default from the bootstrap proto.\n  if (bind_config.has_source_address()) {\n    return Network::Address::resolveProtoSocketAddress(bind_config.source_address());\n  }\n\n  return nullptr;\n}\n\nuint64_t parseFeatures(const envoy::config::cluster::v3::Cluster& config) {\n  uint64_t features = 0;\n  if (config.has_http2_protocol_options()) {\n    features |= ClusterInfoImpl::Features::HTTP2;\n  }\n  if (config.protocol_selection() == envoy::config::cluster::v3::Cluster::USE_DOWNSTREAM_PROTOCOL) {\n    features |= ClusterInfoImpl::Features::USE_DOWNSTREAM_PROTOCOL;\n  }\n  if (config.close_connections_on_host_health_failure()) {\n    features |= ClusterInfoImpl::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE;\n  }\n  return features;\n}\n\nNetwork::TcpKeepaliveConfig\nparseTcpKeepaliveConfig(const envoy::config::cluster::v3::Cluster& config) {\n  const envoy::config::core::v3::TcpKeepalive& options =\n      config.upstream_connection_options().tcp_keepalive();\n  return Network::TcpKeepaliveConfig{\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(options, keepalive_probes, absl::optional<uint32_t>()),\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(options, keepalive_time, absl::optional<uint32_t>()),\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(options, keepalive_interval, absl::optional<uint32_t>())};\n}\n\nconst Network::ConnectionSocket::OptionsSharedPtr\nparseClusterSocketOptions(const envoy::config::cluster::v3::Cluster& config,\n                          const envoy::config::core::v3::BindConfig bind_config) {\n  Network::ConnectionSocket::OptionsSharedPtr cluster_options =\n      std::make_shared<Network::ConnectionSocket::Options>();\n  // The process-wide `signal()` handling may fail to handle SIGPIPE if overridden\n  // in the process (i.e., on a mobile client). Some OSes support handling it at the socket layer:\n  if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n    Network::Socket::appendOptions(cluster_options,\n                                   Network::SocketOptionFactory::buildSocketNoSigpipeOptions());\n  }\n  // Cluster IP_FREEBIND settings, when set, will override the cluster manager wide settings.\n  if ((bind_config.freebind().value() && !config.upstream_bind_config().has_freebind()) ||\n      config.upstream_bind_config().freebind().value()) {\n    Network::Socket::appendOptions(cluster_options,\n                                   Network::SocketOptionFactory::buildIpFreebindOptions());\n  }\n  if (config.upstream_connection_options().has_tcp_keepalive()) {\n    Network::Socket::appendOptions(\n        cluster_options,\n        Network::SocketOptionFactory::buildTcpKeepaliveOptions(parseTcpKeepaliveConfig(config)));\n  }\n  // Cluster socket_options trump cluster manager wide.\n  if (bind_config.socket_options().size() + config.upstream_bind_config().socket_options().size() >\n      0) {\n    auto socket_options = !config.upstream_bind_config().socket_options().empty()\n                              ? config.upstream_bind_config().socket_options()\n                              : bind_config.socket_options();\n    Network::Socket::appendOptions(\n        cluster_options, Network::SocketOptionFactory::buildLiteralOptions(socket_options));\n  }\n  if (cluster_options->empty()) {\n    return nullptr;\n  }\n  return cluster_options;\n}\n\nProtocolOptionsConfigConstSharedPtr\ncreateProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typed_config,\n                            const ProtobufWkt::Struct& config,\n                            Server::Configuration::ProtocolOptionsFactoryContext& factory_context) {\n  Server::Configuration::ProtocolOptionsFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          name);\n  if (factory == nullptr) {\n    factory =\n        Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n            name);\n  }\n\n  if (factory == nullptr) {\n    throw EnvoyException(fmt::format(\n        \"Didn't find a registered network or http filter implementation for name: '{}'\", name));\n  }\n\n  ProtobufTypes::MessagePtr proto_config = factory->createEmptyProtocolOptionsProto();\n\n  if (proto_config == nullptr) {\n    throw EnvoyException(fmt::format(\"filter {} does not support protocol options\", name));\n  }\n\n  Envoy::Config::Utility::translateOpaqueConfig(\n      typed_config, config, factory_context.messageValidationVisitor(), *proto_config);\n\n  return factory->createProtocolOptionsConfig(*proto_config, factory_context);\n}\n\nstd::map<std::string, ProtocolOptionsConfigConstSharedPtr> parseExtensionProtocolOptions(\n    const envoy::config::cluster::v3::Cluster& config,\n    Server::Configuration::ProtocolOptionsFactoryContext& factory_context) {\n  if (!config.typed_extension_protocol_options().empty() &&\n      !config.hidden_envoy_deprecated_extension_protocol_options().empty()) {\n    throw EnvoyException(\"Only one of typed_extension_protocol_options or \"\n                         \"extension_protocol_options can be specified\");\n  }\n\n  std::map<std::string, ProtocolOptionsConfigConstSharedPtr> options;\n\n  for (const auto& it : config.typed_extension_protocol_options()) {\n    // TODO(zuercher): canonicalization may be removed when deprecated filter names are removed\n    // We only handle deprecated network filter names here because no existing HTTP filter has\n    // protocol options.\n    auto& name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(it.first);\n\n    auto object = createProtocolOptionsConfig(\n        name, it.second, ProtobufWkt::Struct::default_instance(), factory_context);\n    if (object != nullptr) {\n      options[name] = std::move(object);\n    }\n  }\n\n  for (const auto& it : config.hidden_envoy_deprecated_extension_protocol_options()) {\n    // TODO(zuercher): canonicalization may be removed when deprecated filter names are removed\n    // We only handle deprecated network filter names here because no existing HTTP filter has\n    // protocol options.\n    auto& name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(it.first);\n\n    auto object = createProtocolOptionsConfig(name, ProtobufWkt::Any::default_instance(), it.second,\n                                              factory_context);\n    if (object != nullptr) {\n      options[name] = std::move(object);\n    }\n  }\n\n  return options;\n}\n\n// Updates the health flags for an existing host to match the new host.\n// @param updated_host the new host to read health flag values from.\n// @param existing_host the host to update.\n// @param flag the health flag to update.\n// @return bool whether the flag update caused the host health to change.\nbool updateHealthFlag(const Host& updated_host, Host& existing_host, Host::HealthFlag flag) {\n  // Check if the health flag has changed.\n  if (existing_host.healthFlagGet(flag) != updated_host.healthFlagGet(flag)) {\n    // Keep track of the previous health value of the host.\n    const auto previous_health = existing_host.health();\n\n    if (updated_host.healthFlagGet(flag)) {\n      existing_host.healthFlagSet(flag);\n    } else {\n      existing_host.healthFlagClear(flag);\n    }\n\n    // Rebuild if changing the flag affected the host health.\n    return previous_health != existing_host.health();\n  }\n\n  return false;\n}\n\n// Converts a set of hosts into a HostVector, excluding certain hosts.\n// @param hosts hosts to convert\n// @param excluded_hosts hosts to exclude from the resulting vector.\nHostVector filterHosts(const absl::node_hash_set<HostSharedPtr>& hosts,\n                       const absl::node_hash_set<HostSharedPtr>& excluded_hosts) {\n  HostVector net_hosts;\n  net_hosts.reserve(hosts.size());\n\n  for (const auto& host : hosts) {\n    if (excluded_hosts.find(host) == excluded_hosts.end()) {\n      net_hosts.emplace_back(host);\n    }\n  }\n\n  return net_hosts;\n}\n\n} // namespace\n\nHostDescriptionImpl::HostDescriptionImpl(\n    ClusterInfoConstSharedPtr cluster, const std::string& hostname,\n    Network::Address::InstanceConstSharedPtr dest_address, MetadataConstSharedPtr metadata,\n    const envoy::config::core::v3::Locality& locality,\n    const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config,\n    uint32_t priority)\n    : cluster_(cluster), hostname_(hostname),\n      health_checks_hostname_(health_check_config.hostname()), address_(dest_address),\n      canary_(Config::Metadata::metadataValue(metadata.get(),\n                                              Config::MetadataFilters::get().ENVOY_LB,\n                                              Config::MetadataEnvoyLbKeys::get().CANARY)\n                  .bool_value()),\n      metadata_(metadata), locality_(locality),\n      locality_zone_stat_name_(locality.zone(), cluster->statsScope().symbolTable()),\n      priority_(priority),\n      socket_factory_(resolveTransportSocketFactory(dest_address, metadata_.get())) {\n  if (health_check_config.port_value() != 0 && dest_address->type() != Network::Address::Type::Ip) {\n    // Setting the health check port to non-0 only works for IP-type addresses. Setting the port\n    // for a pipe address is a misconfiguration. Throw an exception.\n    throw EnvoyException(\n        fmt::format(\"Invalid host configuration: non-zero port for non-IP address\"));\n  }\n  health_check_address_ =\n      health_check_config.port_value() == 0\n          ? dest_address\n          : Network::Utility::getAddressWithPort(*dest_address, health_check_config.port_value());\n}\n\nNetwork::TransportSocketFactory& HostDescriptionImpl::resolveTransportSocketFactory(\n    const Network::Address::InstanceConstSharedPtr& dest_address,\n    const envoy::config::core::v3::Metadata* metadata) const {\n  auto match = cluster_->transportSocketMatcher().resolve(metadata);\n  match.stats_.total_match_count_.inc();\n  ENVOY_LOG(debug, \"transport socket match, socket {} selected for host with address {}\",\n            match.name_, dest_address ? dest_address->asString() : \"empty\");\n\n  return match.factory_;\n}\n\nHost::CreateConnectionData HostImpl::createConnection(\n    Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options,\n    Network::TransportSocketOptionsSharedPtr transport_socket_options) const {\n  return {createConnection(dispatcher, *cluster_, address_, socket_factory_, options,\n                           transport_socket_options),\n          shared_from_this()};\n}\n\nvoid HostImpl::setEdsHealthFlag(envoy::config::core::v3::HealthStatus health_status) {\n  switch (health_status) {\n  case envoy::config::core::v3::UNHEALTHY:\n    FALLTHRU;\n  case envoy::config::core::v3::DRAINING:\n    FALLTHRU;\n  case envoy::config::core::v3::TIMEOUT:\n    healthFlagSet(Host::HealthFlag::FAILED_EDS_HEALTH);\n    break;\n  case envoy::config::core::v3::DEGRADED:\n    healthFlagSet(Host::HealthFlag::DEGRADED_EDS_HEALTH);\n    break;\n  default:;\n    break;\n    // No health flags should be set.\n  }\n}\n\nHost::CreateConnectionData HostImpl::createHealthCheckConnection(\n    Event::Dispatcher& dispatcher,\n    Network::TransportSocketOptionsSharedPtr transport_socket_options,\n    const envoy::config::core::v3::Metadata* metadata) const {\n\n  Network::TransportSocketFactory& factory =\n      (metadata != nullptr) ? resolveTransportSocketFactory(healthCheckAddress(), metadata)\n                            : socket_factory_;\n  return {createConnection(dispatcher, *cluster_, healthCheckAddress(), factory, nullptr,\n                           transport_socket_options),\n          shared_from_this()};\n}\n\nNetwork::ClientConnectionPtr\nHostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& cluster,\n                           const Network::Address::InstanceConstSharedPtr& address,\n                           Network::TransportSocketFactory& socket_factory,\n                           const Network::ConnectionSocket::OptionsSharedPtr& options,\n                           Network::TransportSocketOptionsSharedPtr transport_socket_options) {\n  Network::ConnectionSocket::OptionsSharedPtr connection_options;\n  if (cluster.clusterSocketOptions() != nullptr) {\n    if (options) {\n      connection_options = std::make_shared<Network::ConnectionSocket::Options>();\n      *connection_options = *options;\n      std::copy(cluster.clusterSocketOptions()->begin(), cluster.clusterSocketOptions()->end(),\n                std::back_inserter(*connection_options));\n    } else {\n      connection_options = cluster.clusterSocketOptions();\n    }\n  } else {\n    connection_options = options;\n  }\n  ASSERT(!address->envoyInternalAddress());\n  Network::ClientConnectionPtr connection = dispatcher.createClientConnection(\n      address, cluster.sourceAddress(),\n      socket_factory.createTransportSocket(std::move(transport_socket_options)),\n      connection_options);\n  connection->setBufferLimits(cluster.perConnectionBufferLimitBytes());\n  cluster.createNetworkFilterChain(*connection);\n  return connection;\n}\n\nvoid HostImpl::weight(uint32_t new_weight) { weight_ = std::max(1U, new_weight); }\n\nstd::vector<HostsPerLocalityConstSharedPtr> HostsPerLocalityImpl::filter(\n    const std::vector<std::function<bool(const Host&)>>& predicates) const {\n  // We keep two lists: one for being able to mutate the clone and one for returning to the caller.\n  // Creating them both at the start avoids iterating over the mutable values at the end to convert\n  // them to a const pointer.\n  std::vector<std::shared_ptr<HostsPerLocalityImpl>> mutable_clones;\n  std::vector<HostsPerLocalityConstSharedPtr> filtered_clones;\n\n  for (size_t i = 0; i < predicates.size(); ++i) {\n    mutable_clones.emplace_back(std::make_shared<HostsPerLocalityImpl>());\n    filtered_clones.emplace_back(mutable_clones.back());\n    mutable_clones.back()->local_ = local_;\n  }\n\n  for (const auto& hosts_locality : hosts_per_locality_) {\n    std::vector<HostVector> current_locality_hosts;\n    current_locality_hosts.resize(predicates.size());\n\n    // Since # of hosts >> # of predicates, we iterate over the hosts in the outer loop.\n    for (const auto& host : hosts_locality) {\n      for (size_t i = 0; i < predicates.size(); ++i) {\n        if (predicates[i](*host)) {\n          current_locality_hosts[i].emplace_back(host);\n        }\n      }\n    }\n\n    for (size_t i = 0; i < predicates.size(); ++i) {\n      mutable_clones[i]->hosts_per_locality_.push_back(std::move(current_locality_hosts[i]));\n    }\n  }\n\n  return filtered_clones;\n}\n\nvoid HostSetImpl::updateHosts(PrioritySet::UpdateHostsParams&& update_hosts_params,\n                              LocalityWeightsConstSharedPtr locality_weights,\n                              const HostVector& hosts_added, const HostVector& hosts_removed,\n                              absl::optional<uint32_t> overprovisioning_factor) {\n  if (overprovisioning_factor.has_value()) {\n    ASSERT(overprovisioning_factor.value() > 0);\n    overprovisioning_factor_ = overprovisioning_factor.value();\n  }\n  hosts_ = std::move(update_hosts_params.hosts);\n  healthy_hosts_ = std::move(update_hosts_params.healthy_hosts);\n  degraded_hosts_ = std::move(update_hosts_params.degraded_hosts);\n  excluded_hosts_ = std::move(update_hosts_params.excluded_hosts);\n  hosts_per_locality_ = std::move(update_hosts_params.hosts_per_locality);\n  healthy_hosts_per_locality_ = std::move(update_hosts_params.healthy_hosts_per_locality);\n  degraded_hosts_per_locality_ = std::move(update_hosts_params.degraded_hosts_per_locality);\n  excluded_hosts_per_locality_ = std::move(update_hosts_params.excluded_hosts_per_locality);\n  locality_weights_ = std::move(locality_weights);\n\n  rebuildLocalityScheduler(healthy_locality_scheduler_, healthy_locality_entries_,\n                           *healthy_hosts_per_locality_, healthy_hosts_->get(), hosts_per_locality_,\n                           excluded_hosts_per_locality_, locality_weights_,\n                           overprovisioning_factor_);\n  rebuildLocalityScheduler(degraded_locality_scheduler_, degraded_locality_entries_,\n                           *degraded_hosts_per_locality_, degraded_hosts_->get(),\n                           hosts_per_locality_, excluded_hosts_per_locality_, locality_weights_,\n                           overprovisioning_factor_);\n\n  runUpdateCallbacks(hosts_added, hosts_removed);\n}\n\nvoid HostSetImpl::rebuildLocalityScheduler(\n    std::unique_ptr<EdfScheduler<LocalityEntry>>& locality_scheduler,\n    std::vector<std::shared_ptr<LocalityEntry>>& locality_entries,\n    const HostsPerLocality& eligible_hosts_per_locality, const HostVector& eligible_hosts,\n    HostsPerLocalityConstSharedPtr all_hosts_per_locality,\n    HostsPerLocalityConstSharedPtr excluded_hosts_per_locality,\n    LocalityWeightsConstSharedPtr locality_weights, uint32_t overprovisioning_factor) {\n  // Rebuild the locality scheduler by computing the effective weight of each\n  // locality in this priority. The scheduler is reset by default, and is rebuilt only if we have\n  // locality weights (i.e. using EDS) and there is at least one eligible host in this priority.\n  //\n  // We omit building a scheduler when there are zero eligible hosts in the priority as\n  // all the localities will have zero effective weight. At selection time, we'll either select\n  // from a different scheduler or there will be no available hosts in the priority. At that point\n  // we'll rely on other mechanisms such as panic mode to select a host, none of which rely on the\n  // scheduler.\n  //\n  // TODO(htuch): if the underlying locality index ->\n  // envoy::config::core::v3::Locality hasn't changed in hosts_/healthy_hosts_/degraded_hosts_, we\n  // could just update locality_weight_ without rebuilding. Similar to how host\n  // level WRR works, we would age out the existing entries via picks and lazily\n  // apply the new weights.\n  locality_scheduler = nullptr;\n  if (all_hosts_per_locality != nullptr && locality_weights != nullptr &&\n      !locality_weights->empty() && !eligible_hosts.empty()) {\n    locality_scheduler = std::make_unique<EdfScheduler<LocalityEntry>>();\n    locality_entries.clear();\n    for (uint32_t i = 0; i < all_hosts_per_locality->get().size(); ++i) {\n      const double effective_weight = effectiveLocalityWeight(\n          i, eligible_hosts_per_locality, *excluded_hosts_per_locality, *all_hosts_per_locality,\n          *locality_weights, overprovisioning_factor);\n      if (effective_weight > 0) {\n        locality_entries.emplace_back(std::make_shared<LocalityEntry>(i, effective_weight));\n        locality_scheduler->add(effective_weight, locality_entries.back());\n      }\n    }\n    // If all effective weights were zero, reset the scheduler.\n    if (locality_scheduler->empty()) {\n      locality_scheduler = nullptr;\n    }\n  }\n}\n\nabsl::optional<uint32_t> HostSetImpl::chooseHealthyLocality() {\n  return chooseLocality(healthy_locality_scheduler_.get());\n}\n\nabsl::optional<uint32_t> HostSetImpl::chooseDegradedLocality() {\n  return chooseLocality(degraded_locality_scheduler_.get());\n}\n\nabsl::optional<uint32_t>\nHostSetImpl::chooseLocality(EdfScheduler<LocalityEntry>* locality_scheduler) {\n  if (locality_scheduler == nullptr) {\n    return {};\n  }\n  const std::shared_ptr<LocalityEntry> locality = locality_scheduler->pickAndAdd(\n      [](const LocalityEntry& locality) { return locality.effective_weight_; });\n  // We don't build a schedule if there are no weighted localities, so we should always succeed.\n  ASSERT(locality != nullptr);\n  // If we picked it before, its weight must have been positive.\n  ASSERT(locality->effective_weight_ > 0);\n  return locality->index_;\n}\n\nPrioritySet::UpdateHostsParams\nHostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts,\n                               HostsPerLocalityConstSharedPtr hosts_per_locality,\n                               HealthyHostVectorConstSharedPtr healthy_hosts,\n                               HostsPerLocalityConstSharedPtr healthy_hosts_per_locality,\n                               DegradedHostVectorConstSharedPtr degraded_hosts,\n                               HostsPerLocalityConstSharedPtr degraded_hosts_per_locality,\n                               ExcludedHostVectorConstSharedPtr excluded_hosts,\n                               HostsPerLocalityConstSharedPtr excluded_hosts_per_locality) {\n  return PrioritySet::UpdateHostsParams{std::move(hosts),\n                                        std::move(healthy_hosts),\n                                        std::move(degraded_hosts),\n                                        std::move(excluded_hosts),\n                                        std::move(hosts_per_locality),\n                                        std::move(healthy_hosts_per_locality),\n                                        std::move(degraded_hosts_per_locality),\n                                        std::move(excluded_hosts_per_locality)};\n}\n\nPrioritySet::UpdateHostsParams HostSetImpl::updateHostsParams(const HostSet& host_set) {\n  return updateHostsParams(host_set.hostsPtr(), host_set.hostsPerLocalityPtr(),\n                           host_set.healthyHostsPtr(), host_set.healthyHostsPerLocalityPtr(),\n                           host_set.degradedHostsPtr(), host_set.degradedHostsPerLocalityPtr(),\n                           host_set.excludedHostsPtr(), host_set.excludedHostsPerLocalityPtr());\n}\nPrioritySet::UpdateHostsParams\nHostSetImpl::partitionHosts(HostVectorConstSharedPtr hosts,\n                            HostsPerLocalityConstSharedPtr hosts_per_locality) {\n  auto partitioned_hosts = ClusterImplBase::partitionHostList(*hosts);\n  auto healthy_degraded_excluded_hosts_per_locality =\n      ClusterImplBase::partitionHostsPerLocality(*hosts_per_locality);\n\n  return updateHostsParams(std::move(hosts), std::move(hosts_per_locality),\n                           std::move(std::get<0>(partitioned_hosts)),\n                           std::move(std::get<0>(healthy_degraded_excluded_hosts_per_locality)),\n                           std::move(std::get<1>(partitioned_hosts)),\n                           std::move(std::get<1>(healthy_degraded_excluded_hosts_per_locality)),\n                           std::move(std::get<2>(partitioned_hosts)),\n                           std::move(std::get<2>(healthy_degraded_excluded_hosts_per_locality)));\n}\n\ndouble HostSetImpl::effectiveLocalityWeight(uint32_t index,\n                                            const HostsPerLocality& eligible_hosts_per_locality,\n                                            const HostsPerLocality& excluded_hosts_per_locality,\n                                            const HostsPerLocality& all_hosts_per_locality,\n                                            const LocalityWeights& locality_weights,\n                                            uint32_t overprovisioning_factor) {\n  const auto& locality_eligible_hosts = eligible_hosts_per_locality.get()[index];\n  const uint32_t excluded_count = excluded_hosts_per_locality.get().size() > index\n                                      ? excluded_hosts_per_locality.get()[index].size()\n                                      : 0;\n  const auto host_count = all_hosts_per_locality.get()[index].size() - excluded_count;\n  if (host_count == 0) {\n    return 0.0;\n  }\n  const double locality_availability_ratio = 1.0 * locality_eligible_hosts.size() / host_count;\n  const uint32_t weight = locality_weights[index];\n  // Availability ranges from 0-1.0, and is the ratio of eligible hosts to total hosts, modified by\n  // the overprovisioning factor.\n  const double effective_locality_availability_ratio =\n      std::min(1.0, (overprovisioning_factor / 100.0) * locality_availability_ratio);\n  return weight * effective_locality_availability_ratio;\n}\n\nconst HostSet&\nPrioritySetImpl::getOrCreateHostSet(uint32_t priority,\n                                    absl::optional<uint32_t> overprovisioning_factor) {\n  if (host_sets_.size() < priority + 1) {\n    for (size_t i = host_sets_.size(); i <= priority; ++i) {\n      HostSetImplPtr host_set = createHostSet(i, overprovisioning_factor);\n      host_set->addPriorityUpdateCb([this](uint32_t priority, const HostVector& hosts_added,\n                                           const HostVector& hosts_removed) {\n        runReferenceUpdateCallbacks(priority, hosts_added, hosts_removed);\n      });\n      host_sets_.push_back(std::move(host_set));\n    }\n  }\n  return *host_sets_[priority];\n}\n\nvoid PrioritySetImpl::updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params,\n                                  LocalityWeightsConstSharedPtr locality_weights,\n                                  const HostVector& hosts_added, const HostVector& hosts_removed,\n                                  absl::optional<uint32_t> overprovisioning_factor) {\n  // Ensure that we have a HostSet for the given priority.\n  getOrCreateHostSet(priority, overprovisioning_factor);\n  static_cast<HostSetImpl*>(host_sets_[priority].get())\n      ->updateHosts(std::move(update_hosts_params), std::move(locality_weights), hosts_added,\n                    hosts_removed, overprovisioning_factor);\n\n  if (!batch_update_) {\n    runUpdateCallbacks(hosts_added, hosts_removed);\n  }\n}\n\nvoid PrioritySetImpl::batchHostUpdate(BatchUpdateCb& callback) {\n  BatchUpdateScope scope(*this);\n\n  // We wrap the update call with a lambda that tracks all the hosts that have been added/removed.\n  callback.batchUpdate(scope);\n\n  // Now that all the updates have been complete, we can compute the diff.\n  HostVector net_hosts_added = filterHosts(scope.all_hosts_added_, scope.all_hosts_removed_);\n  HostVector net_hosts_removed = filterHosts(scope.all_hosts_removed_, scope.all_hosts_added_);\n\n  runUpdateCallbacks(net_hosts_added, net_hosts_removed);\n}\n\nvoid PrioritySetImpl::BatchUpdateScope::updateHosts(\n    uint32_t priority, PrioritySet::UpdateHostsParams&& update_hosts_params,\n    LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added,\n    const HostVector& hosts_removed, absl::optional<uint32_t> overprovisioning_factor) {\n  // We assume that each call updates a different priority.\n  ASSERT(priorities_.find(priority) == priorities_.end());\n  priorities_.insert(priority);\n\n  for (const auto& host : hosts_added) {\n    all_hosts_added_.insert(host);\n  }\n\n  for (const auto& host : hosts_removed) {\n    all_hosts_removed_.insert(host);\n  }\n\n  parent_.updateHosts(priority, std::move(update_hosts_params), locality_weights, hosts_added,\n                      hosts_removed, overprovisioning_factor);\n}\n\nClusterStats ClusterInfoImpl::generateStats(Stats::Scope& scope) {\n  return {ALL_CLUSTER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))};\n}\n\nClusterRequestResponseSizeStats\nClusterInfoImpl::generateRequestResponseSizeStats(Stats::Scope& scope) {\n  return {ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(POOL_HISTOGRAM(scope))};\n}\n\nClusterLoadReportStats ClusterInfoImpl::generateLoadReportStats(Stats::Scope& scope) {\n  return {ALL_CLUSTER_LOAD_REPORT_STATS(POOL_COUNTER(scope))};\n}\n\nClusterTimeoutBudgetStats ClusterInfoImpl::generateTimeoutBudgetStats(Stats::Scope& scope) {\n  return {ALL_CLUSTER_TIMEOUT_BUDGET_STATS(POOL_HISTOGRAM(scope))};\n}\n\n// Implements the FactoryContext interface required by network filters.\nclass FactoryContextImpl : public Server::Configuration::CommonFactoryContext {\npublic:\n  // Create from a TransportSocketFactoryContext using parent stats_scope and runtime\n  // other contexts taken from TransportSocketFactoryContext.\n  FactoryContextImpl(Stats::Scope& stats_scope, Envoy::Runtime::Loader& runtime,\n                     Server::Configuration::TransportSocketFactoryContext& c)\n      : admin_(c.admin()), stats_scope_(stats_scope), cluster_manager_(c.clusterManager()),\n        local_info_(c.localInfo()), dispatcher_(c.dispatcher()), runtime_(runtime),\n        singleton_manager_(c.singletonManager()), tls_(c.threadLocal()), api_(c.api()) {}\n\n  Upstream::ClusterManager& clusterManager() override { return cluster_manager_; }\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n  const LocalInfo::LocalInfo& localInfo() const override { return local_info_; }\n  Envoy::Runtime::Loader& runtime() override { return runtime_; }\n  Stats::Scope& scope() override { return stats_scope_; }\n  Singleton::Manager& singletonManager() override { return singleton_manager_; }\n  ThreadLocal::SlotAllocator& threadLocal() override { return tls_; }\n  Server::Admin& admin() override { return admin_; }\n  TimeSource& timeSource() override { return api().timeSource(); }\n  ProtobufMessage::ValidationContext& messageValidationContext() override {\n    // Not used.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  Api::Api& api() override { return api_; }\n\nprivate:\n  Server::Admin& admin_;\n  Stats::Scope& stats_scope_;\n  Upstream::ClusterManager& cluster_manager_;\n  const LocalInfo::LocalInfo& local_info_;\n  Event::Dispatcher& dispatcher_;\n  Envoy::Runtime::Loader& runtime_;\n  Singleton::Manager& singleton_manager_;\n  ThreadLocal::SlotAllocator& tls_;\n  Api::Api& api_;\n};\n\nClusterInfoImpl::ClusterInfoImpl(\n    const envoy::config::cluster::v3::Cluster& config,\n    const envoy::config::core::v3::BindConfig& bind_config, Runtime::Loader& runtime,\n    TransportSocketMatcherPtr&& socket_matcher, Stats::ScopePtr&& stats_scope, bool added_via_api,\n    Server::Configuration::TransportSocketFactoryContext& factory_context)\n    : runtime_(runtime), name_(config.name()), type_(config.type()),\n      max_requests_per_connection_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_requests_per_connection, 0)),\n      max_response_headers_count_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config.common_http_protocol_options(), max_headers_count,\n          runtime_.snapshot().getInteger(Http::MaxResponseHeadersCountOverrideKey,\n                                         Http::DEFAULT_MAX_HEADERS_COUNT))),\n      connect_timeout_(\n          std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(config, connect_timeout))),\n      per_upstream_prefetch_ratio_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config.prefetch_policy(), per_upstream_prefetch_ratio, 1.0)),\n      peekahead_ratio_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.prefetch_policy(), predictive_prefetch_ratio, 0)),\n      per_connection_buffer_limit_bytes_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)),\n      socket_matcher_(std::move(socket_matcher)), stats_scope_(std::move(stats_scope)),\n      stats_(generateStats(*stats_scope_)), load_report_stats_store_(stats_scope_->symbolTable()),\n      load_report_stats_(generateLoadReportStats(load_report_stats_store_)),\n      optional_cluster_stats_((config.has_track_cluster_stats() || config.track_timeout_budgets())\n                                  ? std::make_unique<OptionalClusterStats>(config, *stats_scope_)\n                                  : nullptr),\n      features_(parseFeatures(config)),\n      http1_settings_(Http::Utility::parseHttp1Settings(config.http_protocol_options())),\n      http2_options_(Http2::Utility::initializeAndValidateOptions(config.http2_protocol_options())),\n      common_http_protocol_options_(config.common_http_protocol_options()),\n      extension_protocol_options_(parseExtensionProtocolOptions(config, factory_context)),\n      resource_managers_(config, runtime, name_, *stats_scope_),\n      maintenance_mode_runtime_key_(absl::StrCat(\"upstream.maintenance_mode.\", name_)),\n      source_address_(getSourceAddress(config, bind_config)),\n      lb_least_request_config_(config.least_request_lb_config()),\n      lb_ring_hash_config_(config.ring_hash_lb_config()),\n      lb_maglev_config_(config.maglev_lb_config()),\n      lb_original_dst_config_(config.original_dst_lb_config()),\n      upstream_config_(config.has_upstream_config()\n                           ? absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>(\n                                 config.upstream_config())\n                           : absl::nullopt),\n      added_via_api_(added_via_api),\n      lb_subset_(LoadBalancerSubsetInfoImpl(config.lb_subset_config())),\n      metadata_(config.metadata()), typed_metadata_(config.metadata()),\n      common_lb_config_(config.common_lb_config()),\n      cluster_socket_options_(parseClusterSocketOptions(config, bind_config)),\n      drain_connections_on_host_removal_(config.ignore_health_on_host_removal()),\n      connection_pool_per_downstream_connection_(\n          config.connection_pool_per_downstream_connection()),\n      warm_hosts_(!config.health_checks().empty() &&\n                  common_lb_config_.ignore_new_hosts_until_first_hc()),\n      upstream_http_protocol_options_(\n          config.has_upstream_http_protocol_options()\n              ? absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>(\n                    config.upstream_http_protocol_options())\n              : absl::nullopt),\n      cluster_type_(\n          config.has_cluster_type()\n              ? absl::make_optional<envoy::config::cluster::v3::Cluster::CustomClusterType>(\n                    config.cluster_type())\n              : absl::nullopt),\n      factory_context_(\n          std::make_unique<FactoryContextImpl>(*stats_scope_, runtime, factory_context)) {\n  switch (config.lb_policy()) {\n  case envoy::config::cluster::v3::Cluster::ROUND_ROBIN:\n    lb_type_ = LoadBalancerType::RoundRobin;\n    break;\n  case envoy::config::cluster::v3::Cluster::LEAST_REQUEST:\n    lb_type_ = LoadBalancerType::LeastRequest;\n    break;\n  case envoy::config::cluster::v3::Cluster::RANDOM:\n    lb_type_ = LoadBalancerType::Random;\n    break;\n  case envoy::config::cluster::v3::Cluster::RING_HASH:\n    lb_type_ = LoadBalancerType::RingHash;\n    break;\n  case envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB:\n    if (config.type() != envoy::config::cluster::v3::Cluster::ORIGINAL_DST) {\n      throw EnvoyException(\n          fmt::format(\"cluster: LB policy {} is not valid for Cluster type {}. 'ORIGINAL_DST_LB' \"\n                      \"is allowed only with cluster type 'ORIGINAL_DST'\",\n                      envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()),\n                      envoy::config::cluster::v3::Cluster::DiscoveryType_Name(config.type())));\n    }\n    if (config.has_lb_subset_config()) {\n      throw EnvoyException(\n          fmt::format(\"cluster: LB policy {} cannot be combined with lb_subset_config\",\n                      envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy())));\n    }\n\n    lb_type_ = LoadBalancerType::ClusterProvided;\n    break;\n  case envoy::config::cluster::v3::Cluster::MAGLEV:\n    lb_type_ = LoadBalancerType::Maglev;\n    break;\n  case envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED:\n    if (config.has_lb_subset_config()) {\n      throw EnvoyException(\n          fmt::format(\"cluster: LB policy {} cannot be combined with lb_subset_config\",\n                      envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy())));\n    }\n\n    lb_type_ = LoadBalancerType::ClusterProvided;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  if (config.lb_subset_config().locality_weight_aware() &&\n      !config.common_lb_config().has_locality_weighted_lb_config()) {\n    throw EnvoyException(fmt::format(\n        \"Locality weight aware subset LB requires that a locality_weighted_lb_config be set in {}\",\n        name_));\n  }\n\n  if (config.protocol_selection() == envoy::config::cluster::v3::Cluster::USE_CONFIGURED_PROTOCOL) {\n    // Make sure multiple protocol configurations are not present\n    if (config.has_http_protocol_options() && config.has_http2_protocol_options()) {\n      throw EnvoyException(fmt::format(\"cluster: Both HTTP1 and HTTP2 options may only be \"\n                                       \"configured with non-default 'protocol_selection' values\"));\n    }\n  }\n\n  if (config.common_http_protocol_options().has_idle_timeout()) {\n    idle_timeout_ = std::chrono::milliseconds(\n        DurationUtil::durationToMilliseconds(config.common_http_protocol_options().idle_timeout()));\n    if (idle_timeout_.value().count() == 0) {\n      idle_timeout_ = absl::nullopt;\n    }\n  } else {\n    idle_timeout_ = std::chrono::hours(1);\n  }\n\n  if (config.has_eds_cluster_config()) {\n    if (config.type() != envoy::config::cluster::v3::Cluster::EDS) {\n      throw EnvoyException(\"eds_cluster_config set in a non-EDS cluster\");\n    }\n    eds_service_name_ = config.eds_cluster_config().service_name();\n  }\n\n  // TODO(htuch): Remove this temporary workaround when we have\n  // https://github.com/envoyproxy/protoc-gen-validate/issues/97 resolved. This just provides\n  // early validation of sanity of fields that we should catch at config ingestion.\n  DurationUtil::durationToMilliseconds(common_lb_config_.update_merge_window());\n\n  // Create upstream filter factories\n  auto filters = config.filters();\n  for (ssize_t i = 0; i < filters.size(); i++) {\n    const auto& proto_config = filters[i];\n    ENVOY_LOG(debug, \"  upstream filter #{}:\", i);\n    ENVOY_LOG(debug, \"    name: {}\", proto_config.name());\n    auto& factory = Config::Utility::getAndCheckFactory<\n        Server::Configuration::NamedUpstreamNetworkFilterConfigFactory>(proto_config);\n    auto message = factory.createEmptyConfigProto();\n    Config::Utility::translateOpaqueConfig(proto_config.typed_config(), ProtobufWkt::Struct(),\n                                           factory_context.messageValidationVisitor(), *message);\n    Network::FilterFactoryCb callback =\n        factory.createFilterFactoryFromProto(*message, *factory_context_);\n    filter_factories_.push_back(callback);\n  }\n}\n\nProtocolOptionsConfigConstSharedPtr\nClusterInfoImpl::extensionProtocolOptions(const std::string& name) const {\n  auto i = extension_protocol_options_.find(name);\n  if (i != extension_protocol_options_.end()) {\n    return i->second;\n  }\n\n  return nullptr;\n}\n\nNetwork::TransportSocketFactoryPtr createTransportSocketFactory(\n    const envoy::config::cluster::v3::Cluster& config,\n    Server::Configuration::TransportSocketFactoryContext& factory_context) {\n  // If the cluster config doesn't have a transport socket configured, override with the default\n  // transport socket implementation based on the tls_context. We copy by value first then override\n  // if necessary.\n  auto transport_socket = config.transport_socket();\n  if (!config.has_transport_socket()) {\n    if (config.has_hidden_envoy_deprecated_tls_context()) {\n      transport_socket.set_name(Extensions::TransportSockets::TransportSocketNames::get().Tls);\n      transport_socket.mutable_typed_config()->PackFrom(\n          config.hidden_envoy_deprecated_tls_context());\n    } else {\n      transport_socket.set_name(\n          Extensions::TransportSockets::TransportSocketNames::get().RawBuffer);\n    }\n  }\n\n  auto& config_factory = Config::Utility::getAndCheckFactory<\n      Server::Configuration::UpstreamTransportSocketConfigFactory>(transport_socket);\n  ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n      transport_socket, factory_context.messageValidationVisitor(), config_factory);\n  return config_factory.createTransportSocketFactory(*message, factory_context);\n}\n\nvoid ClusterInfoImpl::createNetworkFilterChain(Network::Connection& connection) const {\n  for (const auto& factory : filter_factories_) {\n    factory(connection);\n  }\n}\n\nHttp::Protocol\nClusterInfoImpl::upstreamHttpProtocol(absl::optional<Http::Protocol> downstream_protocol) const {\n  if (downstream_protocol.has_value() &&\n      features_ & Upstream::ClusterInfo::Features::USE_DOWNSTREAM_PROTOCOL) {\n    return downstream_protocol.value();\n  } else {\n    return (features_ & Upstream::ClusterInfo::Features::HTTP2) ? Http::Protocol::Http2\n                                                                : Http::Protocol::Http11;\n  }\n}\n\nClusterImplBase::ClusterImplBase(\n    const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api)\n    : init_manager_(fmt::format(\"Cluster {}\", cluster.name())),\n      init_watcher_(\"ClusterImplBase\", [this]() { onInitDone(); }), runtime_(runtime),\n      local_cluster_(factory_context.clusterManager().localClusterName().value_or(\"\") ==\n                     cluster.name()),\n      const_metadata_shared_pool_(Config::Metadata::getConstMetadataSharedPool(\n          factory_context.singletonManager(), factory_context.dispatcher())) {\n  factory_context.setInitManager(init_manager_);\n  auto socket_factory = createTransportSocketFactory(cluster, factory_context);\n  auto socket_matcher = std::make_unique<TransportSocketMatcherImpl>(\n      cluster.transport_socket_matches(), factory_context, socket_factory, *stats_scope);\n  info_ = std::make_unique<ClusterInfoImpl>(cluster, factory_context.clusterManager().bindConfig(),\n                                            runtime, std::move(socket_matcher),\n                                            std::move(stats_scope), added_via_api, factory_context);\n  // Create the default (empty) priority set before registering callbacks to\n  // avoid getting an update the first time it is accessed.\n  priority_set_.getOrCreateHostSet(0);\n  priority_set_.addPriorityUpdateCb(\n      [this](uint32_t, const HostVector& hosts_added, const HostVector& hosts_removed) {\n        if (!hosts_added.empty() || !hosts_removed.empty()) {\n          info_->stats().membership_change_.inc();\n        }\n\n        uint32_t healthy_hosts = 0;\n        uint32_t degraded_hosts = 0;\n        uint32_t excluded_hosts = 0;\n        uint32_t hosts = 0;\n        for (const auto& host_set : prioritySet().hostSetsPerPriority()) {\n          hosts += host_set->hosts().size();\n          healthy_hosts += host_set->healthyHosts().size();\n          degraded_hosts += host_set->degradedHosts().size();\n          excluded_hosts += host_set->excludedHosts().size();\n        }\n        info_->stats().membership_total_.set(hosts);\n        info_->stats().membership_healthy_.set(healthy_hosts);\n        info_->stats().membership_degraded_.set(degraded_hosts);\n        info_->stats().membership_excluded_.set(excluded_hosts);\n      });\n}\n\nstd::tuple<HealthyHostVectorConstSharedPtr, DegradedHostVectorConstSharedPtr,\n           ExcludedHostVectorConstSharedPtr>\nClusterImplBase::partitionHostList(const HostVector& hosts) {\n  auto healthy_list = std::make_shared<HealthyHostVector>();\n  auto degraded_list = std::make_shared<DegradedHostVector>();\n  auto excluded_list = std::make_shared<ExcludedHostVector>();\n\n  for (const auto& host : hosts) {\n    if (host->health() == Host::Health::Healthy) {\n      healthy_list->get().emplace_back(host);\n    }\n    if (host->health() == Host::Health::Degraded) {\n      degraded_list->get().emplace_back(host);\n    }\n    if (host->healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC)) {\n      excluded_list->get().emplace_back(host);\n    }\n  }\n\n  return std::make_tuple(healthy_list, degraded_list, excluded_list);\n}\n\nstd::tuple<HostsPerLocalityConstSharedPtr, HostsPerLocalityConstSharedPtr,\n           HostsPerLocalityConstSharedPtr>\nClusterImplBase::partitionHostsPerLocality(const HostsPerLocality& hosts) {\n  auto filtered_clones = hosts.filter(\n      {[](const Host& host) { return host.health() == Host::Health::Healthy; },\n       [](const Host& host) { return host.health() == Host::Health::Degraded; },\n       [](const Host& host) { return host.healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC); }});\n\n  return std::make_tuple(std::move(filtered_clones[0]), std::move(filtered_clones[1]),\n                         std::move(filtered_clones[2]));\n}\n\nbool ClusterInfoImpl::maintenanceMode() const {\n  return runtime_.snapshot().featureEnabled(maintenance_mode_runtime_key_, 0);\n}\n\nResourceManager& ClusterInfoImpl::resourceManager(ResourcePriority priority) const {\n  ASSERT(enumToInt(priority) < resource_managers_.managers_.size());\n  return *resource_managers_.managers_[enumToInt(priority)];\n}\n\nvoid ClusterImplBase::initialize(std::function<void()> callback) {\n  ASSERT(!initialization_started_);\n  ASSERT(initialization_complete_callback_ == nullptr);\n  initialization_complete_callback_ = callback;\n  startPreInit();\n}\n\nvoid ClusterImplBase::onPreInitComplete() {\n  // Protect against multiple calls.\n  if (initialization_started_) {\n    return;\n  }\n  initialization_started_ = true;\n\n  ENVOY_LOG(debug, \"initializing {} cluster {} completed\",\n            initializePhase() == InitializePhase::Primary ? \"Primary\" : \"Secondary\",\n            info()->name());\n  init_manager_.initialize(init_watcher_);\n}\n\nvoid ClusterImplBase::onInitDone() {\n  if (health_checker_ && pending_initialize_health_checks_ == 0) {\n    for (auto& host_set : prioritySet().hostSetsPerPriority()) {\n      pending_initialize_health_checks_ += host_set->hosts().size();\n    }\n\n    // TODO(mattklein123): Remove this callback when done.\n    health_checker_->addHostCheckCompleteCb([this](HostSharedPtr, HealthTransition) -> void {\n      if (pending_initialize_health_checks_ > 0 && --pending_initialize_health_checks_ == 0) {\n        finishInitialization();\n      }\n    });\n  }\n\n  if (pending_initialize_health_checks_ == 0) {\n    finishInitialization();\n  }\n}\n\nvoid ClusterImplBase::finishInitialization() {\n  ASSERT(initialization_complete_callback_ != nullptr);\n  ASSERT(initialization_started_);\n\n  // Snap a copy of the completion callback so that we can set it to nullptr to unblock\n  // reloadHealthyHosts(). See that function for more info on why we do this.\n  auto snapped_callback = initialization_complete_callback_;\n  initialization_complete_callback_ = nullptr;\n\n  if (health_checker_ != nullptr) {\n    reloadHealthyHosts(nullptr);\n  }\n\n  if (snapped_callback != nullptr) {\n    snapped_callback();\n  }\n}\n\nvoid ClusterImplBase::setHealthChecker(const HealthCheckerSharedPtr& health_checker) {\n  ASSERT(!health_checker_);\n  health_checker_ = health_checker;\n  health_checker_->start();\n  health_checker_->addHostCheckCompleteCb(\n      [this](const HostSharedPtr& host, HealthTransition changed_state) -> void {\n        // If we get a health check completion that resulted in a state change, signal to\n        // update the host sets on all threads.\n        if (changed_state == HealthTransition::Changed) {\n          reloadHealthyHosts(host);\n        }\n      });\n}\n\nvoid ClusterImplBase::setOutlierDetector(const Outlier::DetectorSharedPtr& outlier_detector) {\n  if (!outlier_detector) {\n    return;\n  }\n\n  outlier_detector_ = outlier_detector;\n  outlier_detector_->addChangedStateCb(\n      [this](const HostSharedPtr& host) -> void { reloadHealthyHosts(host); });\n}\n\nvoid ClusterImplBase::reloadHealthyHosts(const HostSharedPtr& host) {\n  // Every time a host changes Health Check state we cause a full healthy host recalculation which\n  // for expensive LBs (ring, subset, etc.) can be quite time consuming. During startup, this\n  // can also block worker threads by doing this repeatedly. There is no reason to do this\n  // as we will not start taking traffic until we are initialized. By blocking Health Check updates\n  // while initializing we can avoid this.\n  if (initialization_complete_callback_ != nullptr) {\n    return;\n  }\n\n  reloadHealthyHostsHelper(host);\n}\n\nvoid ClusterImplBase::reloadHealthyHostsHelper(const HostSharedPtr&) {\n  const auto& host_sets = prioritySet().hostSetsPerPriority();\n  for (size_t priority = 0; priority < host_sets.size(); ++priority) {\n    const auto& host_set = host_sets[priority];\n    // TODO(htuch): Can we skip these copies by exporting out const shared_ptr from HostSet?\n    HostVectorConstSharedPtr hosts_copy(new HostVector(host_set->hosts()));\n\n    HostsPerLocalityConstSharedPtr hosts_per_locality_copy = host_set->hostsPerLocality().clone();\n    prioritySet().updateHosts(priority,\n                              HostSetImpl::partitionHosts(hosts_copy, hosts_per_locality_copy),\n                              host_set->localityWeights(), {}, {}, absl::nullopt);\n  }\n}\n\nconst Network::Address::InstanceConstSharedPtr\nClusterImplBase::resolveProtoAddress(const envoy::config::core::v3::Address& address) {\n  try {\n    return Network::Address::resolveProtoAddress(address);\n  } catch (EnvoyException& e) {\n    if (info_->type() == envoy::config::cluster::v3::Cluster::STATIC ||\n        info_->type() == envoy::config::cluster::v3::Cluster::EDS) {\n      throw EnvoyException(fmt::format(\"{}. Consider setting resolver_name or setting cluster type \"\n                                       \"to 'STRICT_DNS' or 'LOGICAL_DNS'\",\n                                       e.what()));\n    }\n    throw e;\n  }\n}\n\nvoid ClusterImplBase::validateEndpointsForZoneAwareRouting(\n    const envoy::config::endpoint::v3::LocalityLbEndpoints& endpoints) const {\n  if (local_cluster_ && endpoints.priority() > 0) {\n    throw EnvoyException(\n        fmt::format(\"Unexpected non-zero priority for local cluster '{}'.\", info()->name()));\n  }\n}\n\nClusterInfoImpl::OptionalClusterStats::OptionalClusterStats(\n    const envoy::config::cluster::v3::Cluster& config, Stats::Scope& stats_scope)\n    : timeout_budget_stats_(\n          (config.track_cluster_stats().timeout_budgets() || config.track_timeout_budgets())\n              ? std::make_unique<ClusterTimeoutBudgetStats>(generateTimeoutBudgetStats(stats_scope))\n              : nullptr),\n      request_response_size_stats_(config.track_cluster_stats().request_response_sizes()\n                                       ? std::make_unique<ClusterRequestResponseSizeStats>(\n                                             generateRequestResponseSizeStats(stats_scope))\n                                       : nullptr) {}\n\nClusterInfoImpl::ResourceManagers::ResourceManagers(\n    const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime,\n    const std::string& cluster_name, Stats::Scope& stats_scope) {\n  managers_[enumToInt(ResourcePriority::Default)] =\n      load(config, runtime, cluster_name, stats_scope, envoy::config::core::v3::DEFAULT);\n  managers_[enumToInt(ResourcePriority::High)] =\n      load(config, runtime, cluster_name, stats_scope, envoy::config::core::v3::HIGH);\n}\n\nClusterCircuitBreakersStats\nClusterInfoImpl::generateCircuitBreakersStats(Stats::Scope& scope, const std::string& stat_prefix,\n                                              bool track_remaining) {\n  std::string prefix(fmt::format(\"circuit_breakers.{}.\", stat_prefix));\n  if (track_remaining) {\n    return {ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, prefix),\n                                               POOL_GAUGE_PREFIX(scope, prefix))};\n  } else {\n    return {ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, prefix),\n                                               NULL_POOL_GAUGE(scope))};\n  }\n}\n\nHttp::Http1::CodecStats& ClusterInfoImpl::http1CodecStats() const {\n  return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, *stats_scope_);\n}\n\nHttp::Http2::CodecStats& ClusterInfoImpl::http2CodecStats() const {\n  return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, *stats_scope_);\n}\n\nResourceManagerImplPtr\nClusterInfoImpl::ResourceManagers::load(const envoy::config::cluster::v3::Cluster& config,\n                                        Runtime::Loader& runtime, const std::string& cluster_name,\n                                        Stats::Scope& stats_scope,\n                                        const envoy::config::core::v3::RoutingPriority& priority) {\n  uint64_t max_connections = 1024;\n  uint64_t max_pending_requests = 1024;\n  uint64_t max_requests = 1024;\n  uint64_t max_retries = 3;\n  uint64_t max_connection_pools = std::numeric_limits<uint64_t>::max();\n\n  bool track_remaining = false;\n\n  std::string priority_name;\n  switch (priority) {\n  case envoy::config::core::v3::DEFAULT:\n    priority_name = \"default\";\n    break;\n  case envoy::config::core::v3::HIGH:\n    priority_name = \"high\";\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  const std::string runtime_prefix =\n      fmt::format(\"circuit_breakers.{}.{}.\", cluster_name, priority_name);\n\n  const auto& thresholds = config.circuit_breakers().thresholds();\n  const auto it = std::find_if(\n      thresholds.cbegin(), thresholds.cend(),\n      [priority](const envoy::config::cluster::v3::CircuitBreakers::Thresholds& threshold) {\n        return threshold.priority() == priority;\n      });\n\n  absl::optional<double> budget_percent;\n  absl::optional<uint32_t> min_retry_concurrency;\n  if (it != thresholds.cend()) {\n    max_connections = PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_connections, max_connections);\n    max_pending_requests =\n        PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_pending_requests, max_pending_requests);\n    max_requests = PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_requests, max_requests);\n    max_retries = PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_retries, max_retries);\n    track_remaining = it->track_remaining();\n    max_connection_pools =\n        PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_connection_pools, max_connection_pools);\n    if (it->has_retry_budget()) {\n      // The budget_percent and min_retry_concurrency values do not set defaults like the other\n      // members of the 'threshold' message, because the behavior of the retry circuit breaker\n      // changes depending on whether it has been configured. Therefore, it's necessary to manually\n      // check if the threshold message has a retry budget configured and only set the values if so.\n      budget_percent = it->retry_budget().has_budget_percent()\n                           ? PROTOBUF_GET_WRAPPED_REQUIRED(it->retry_budget(), budget_percent)\n                           : budget_percent;\n      min_retry_concurrency =\n          it->retry_budget().has_min_retry_concurrency()\n              ? PROTOBUF_GET_WRAPPED_REQUIRED(it->retry_budget(), min_retry_concurrency)\n              : min_retry_concurrency;\n    }\n  }\n  return std::make_unique<ResourceManagerImpl>(\n      runtime, runtime_prefix, max_connections, max_pending_requests, max_requests, max_retries,\n      max_connection_pools,\n      ClusterInfoImpl::generateCircuitBreakersStats(stats_scope, priority_name, track_remaining),\n      budget_percent, min_retry_concurrency);\n}\n\nPriorityStateManager::PriorityStateManager(ClusterImplBase& cluster,\n                                           const LocalInfo::LocalInfo& local_info,\n                                           PrioritySet::HostUpdateCb* update_cb)\n    : parent_(cluster), local_info_node_(local_info.node()), update_cb_(update_cb) {}\n\nvoid PriorityStateManager::initializePriorityFor(\n    const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) {\n  const uint32_t priority = locality_lb_endpoint.priority();\n  if (priority_state_.size() <= priority) {\n    priority_state_.resize(priority + 1);\n  }\n  if (priority_state_[priority].first == nullptr) {\n    priority_state_[priority].first = std::make_unique<HostVector>();\n  }\n  if (locality_lb_endpoint.has_locality() && locality_lb_endpoint.has_load_balancing_weight()) {\n    priority_state_[priority].second[locality_lb_endpoint.locality()] =\n        locality_lb_endpoint.load_balancing_weight().value();\n  }\n}\n\nvoid PriorityStateManager::registerHostForPriority(\n    const std::string& hostname, Network::Address::InstanceConstSharedPtr address,\n    const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint,\n    const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint) {\n  auto metadata = lb_endpoint.has_metadata()\n                      ? parent_.constMetadataSharedPool()->getObject(lb_endpoint.metadata())\n                      : nullptr;\n  const HostSharedPtr host(new HostImpl(\n      parent_.info(), hostname, address, metadata, lb_endpoint.load_balancing_weight().value(),\n      locality_lb_endpoint.locality(), lb_endpoint.endpoint().health_check_config(),\n      locality_lb_endpoint.priority(), lb_endpoint.health_status()));\n  registerHostForPriority(host, locality_lb_endpoint);\n}\n\nvoid PriorityStateManager::registerHostForPriority(\n    const HostSharedPtr& host,\n    const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) {\n  const uint32_t priority = locality_lb_endpoint.priority();\n  // Should be called after initializePriorityFor.\n  ASSERT(priority_state_[priority].first);\n  priority_state_[priority].first->emplace_back(host);\n}\n\nvoid PriorityStateManager::updateClusterPrioritySet(\n    const uint32_t priority, HostVectorSharedPtr&& current_hosts,\n    const absl::optional<HostVector>& hosts_added, const absl::optional<HostVector>& hosts_removed,\n    const absl::optional<Upstream::Host::HealthFlag> health_checker_flag,\n    absl::optional<uint32_t> overprovisioning_factor) {\n  // If local locality is not defined then skip populating per locality hosts.\n  const auto& local_locality = local_info_node_.locality();\n  ENVOY_LOG(trace, \"Local locality: {}\", local_locality.DebugString());\n\n  // For non-EDS, most likely the current hosts are from priority_state_[priority].first.\n  HostVectorSharedPtr hosts(std::move(current_hosts));\n  LocalityWeightsMap empty_locality_map;\n  LocalityWeightsMap& locality_weights_map =\n      priority_state_.size() > priority ? priority_state_[priority].second : empty_locality_map;\n  ASSERT(priority_state_.size() > priority || locality_weights_map.empty());\n  LocalityWeightsSharedPtr locality_weights;\n  std::vector<HostVector> per_locality;\n\n  // If we are configured for locality weighted LB we populate the locality weights.\n  const bool locality_weighted_lb = parent_.info()->lbConfig().has_locality_weighted_lb_config();\n  if (locality_weighted_lb) {\n    locality_weights = std::make_shared<LocalityWeights>();\n  }\n\n  // We use std::map to guarantee a stable ordering for zone aware routing.\n  std::map<envoy::config::core::v3::Locality, HostVector, LocalityLess> hosts_per_locality;\n\n  for (const HostSharedPtr& host : *hosts) {\n    // Take into consideration when a non-EDS cluster has active health checking, i.e. to mark all\n    // the hosts unhealthy (host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC)) and then fire\n    // update callbacks to start the health checking process.\n    if (health_checker_flag.has_value()) {\n      host->healthFlagSet(health_checker_flag.value());\n    }\n    hosts_per_locality[host->locality()].push_back(host);\n  }\n\n  // Do we have hosts for the local locality?\n  const bool non_empty_local_locality =\n      local_info_node_.has_locality() &&\n      hosts_per_locality.find(local_locality) != hosts_per_locality.end();\n\n  // As per HostsPerLocality::get(), the per_locality vector must have the local locality hosts\n  // first if non_empty_local_locality.\n  if (non_empty_local_locality) {\n    per_locality.emplace_back(hosts_per_locality[local_locality]);\n    if (locality_weighted_lb) {\n      locality_weights->emplace_back(locality_weights_map[local_locality]);\n    }\n  }\n\n  // After the local locality hosts (if any), we place the remaining locality host groups in\n  // lexicographic order. This provides a stable ordering for zone aware routing.\n  for (auto& entry : hosts_per_locality) {\n    if (!non_empty_local_locality || !LocalityEqualTo()(local_locality, entry.first)) {\n      per_locality.emplace_back(entry.second);\n      if (locality_weighted_lb) {\n        locality_weights->emplace_back(locality_weights_map[entry.first]);\n      }\n    }\n  }\n\n  auto per_locality_shared =\n      std::make_shared<HostsPerLocalityImpl>(std::move(per_locality), non_empty_local_locality);\n\n  // If a batch update callback was provided, use that. Otherwise directly update\n  // the PrioritySet.\n  if (update_cb_ != nullptr) {\n    update_cb_->updateHosts(priority, HostSetImpl::partitionHosts(hosts, per_locality_shared),\n                            std::move(locality_weights), hosts_added.value_or(*hosts),\n                            hosts_removed.value_or<HostVector>({}), overprovisioning_factor);\n  } else {\n    parent_.prioritySet().updateHosts(\n        priority, HostSetImpl::partitionHosts(hosts, per_locality_shared),\n        std::move(locality_weights), hosts_added.value_or(*hosts),\n        hosts_removed.value_or<HostVector>({}), overprovisioning_factor);\n  }\n}\n\nbool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts,\n                                                   HostVector& current_priority_hosts,\n                                                   HostVector& hosts_added_to_current_priority,\n                                                   HostVector& hosts_removed_from_current_priority,\n                                                   HostMap& updated_hosts,\n                                                   const HostMap& all_hosts) {\n  uint64_t max_host_weight = 1;\n\n  // Did hosts change?\n  //\n  // Has the EDS health status changed the health of any endpoint? If so, we\n  // rebuild the hosts vectors. We only do this if the health status of an\n  // endpoint has materially changed (e.g. if previously failing active health\n  // checks, we just note it's now failing EDS health status but don't rebuild).\n  //\n  // Likewise, if metadata for an endpoint changed we rebuild the hosts vectors.\n  //\n  // TODO(htuch): We can be smarter about this potentially, and not force a full\n  // host set update on health status change. The way this would work is to\n  // implement a HealthChecker subclass that provides thread local health\n  // updates to the Cluster object. This will probably make sense to do in\n  // conjunction with https://github.com/envoyproxy/envoy/issues/2874.\n  bool hosts_changed = false;\n\n  // Go through and see if the list we have is different from what we just got. If it is, we make a\n  // new host list and raise a change notification. We also check for duplicates here. It's\n  // possible for DNS to return the same address multiple times, and a bad EDS implementation could\n  // do the same thing.\n\n  // Keep track of hosts we see in new_hosts that we are able to match up with an existing host.\n  absl::node_hash_set<std::string> existing_hosts_for_current_priority(\n      current_priority_hosts.size());\n  HostVector final_hosts;\n  for (const HostSharedPtr& host : new_hosts) {\n    if (updated_hosts.count(host->address()->asString())) {\n      continue;\n    }\n\n    // To match a new host with an existing host means comparing their addresses.\n    auto existing_host = all_hosts.find(host->address()->asString());\n    const bool existing_host_found = existing_host != all_hosts.end();\n\n    // Clear any pending deletion flag on an existing host in case it came back while it was\n    // being stabilized. We will set it again below if needed.\n    if (existing_host_found) {\n      existing_host->second->healthFlagClear(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL);\n    }\n\n    // Check if in-place host update should be skipped, i.e. when the following criteria are met\n    // (currently there is only one criterion, but we might add more in the future):\n    // - The cluster health checker is activated and a new host is matched with the existing one,\n    //   but the health check address is different.\n    const bool skip_inplace_host_update =\n        health_checker_ != nullptr && existing_host_found &&\n        *existing_host->second->healthCheckAddress() != *host->healthCheckAddress();\n\n    // When there is a match and we decided to do in-place update, we potentially update the host's\n    // health check flag and metadata. Afterwards, the host is pushed back into the final_hosts,\n    // i.e. hosts that should be preserved in the current priority.\n    if (existing_host_found && !skip_inplace_host_update) {\n      existing_hosts_for_current_priority.emplace(existing_host->first);\n      // If we find a host matched based on address, we keep it. However we do change weight inline\n      // so do that here.\n      if (host->weight() > max_host_weight) {\n        max_host_weight = host->weight();\n      }\n\n      hosts_changed |=\n          updateHealthFlag(*host, *existing_host->second, Host::HealthFlag::FAILED_EDS_HEALTH);\n      hosts_changed |=\n          updateHealthFlag(*host, *existing_host->second, Host::HealthFlag::DEGRADED_EDS_HEALTH);\n\n      // Did metadata change?\n      bool metadata_changed = true;\n      if (host->metadata() && existing_host->second->metadata()) {\n        metadata_changed = !Protobuf::util::MessageDifferencer::Equivalent(\n            *host->metadata(), *existing_host->second->metadata());\n      } else if (!host->metadata() && !existing_host->second->metadata()) {\n        metadata_changed = false;\n      }\n\n      if (metadata_changed) {\n        // First, update the entire metadata for the endpoint.\n        existing_host->second->metadata(host->metadata());\n\n        // Also, given that the canary attribute of an endpoint is derived from its metadata\n        // (e.g.: from envoy.lb/canary), we do a blind update here since it's cheaper than testing\n        // to see if it actually changed. We must update this besides just updating the metadata,\n        // because it'll be used by the router filter to compute upstream stats.\n        existing_host->second->canary(host->canary());\n\n        // If metadata changed, we need to rebuild. See github issue #3810.\n        hosts_changed = true;\n      }\n\n      // Did the priority change?\n      if (host->priority() != existing_host->second->priority()) {\n        existing_host->second->priority(host->priority());\n        hosts_added_to_current_priority.emplace_back(existing_host->second);\n      }\n\n      existing_host->second->weight(host->weight());\n      final_hosts.push_back(existing_host->second);\n      updated_hosts[existing_host->second->address()->asString()] = existing_host->second;\n    } else {\n      if (host->weight() > max_host_weight) {\n        max_host_weight = host->weight();\n      }\n\n      // If we are depending on a health checker, we initialize to unhealthy.\n      if (health_checker_ != nullptr) {\n        host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n\n        // If we want to exclude hosts until they have been health checked, mark them with\n        // a flag to indicate that they have not been health checked yet.\n        if (info_->warmHosts()) {\n          host->healthFlagSet(Host::HealthFlag::PENDING_ACTIVE_HC);\n        }\n      }\n\n      updated_hosts[host->address()->asString()] = host;\n      final_hosts.push_back(host);\n      hosts_added_to_current_priority.push_back(host);\n    }\n  }\n\n  // Remove hosts from current_priority_hosts that were matched to an existing host in the previous\n  // loop.\n  auto erase_from =\n      std::remove_if(current_priority_hosts.begin(), current_priority_hosts.end(),\n                     [&existing_hosts_for_current_priority](const HostSharedPtr& p) {\n                       auto existing_itr =\n                           existing_hosts_for_current_priority.find(p->address()->asString());\n\n                       if (existing_itr != existing_hosts_for_current_priority.end()) {\n                         existing_hosts_for_current_priority.erase(existing_itr);\n                         return true;\n                       }\n\n                       return false;\n                     });\n  current_priority_hosts.erase(erase_from, current_priority_hosts.end());\n\n  // If we saw existing hosts during this iteration from a different priority, then we've moved\n  // a host from another priority into this one, so we should mark the priority as having changed.\n  if (!existing_hosts_for_current_priority.empty()) {\n    hosts_changed = true;\n  }\n\n  // The remaining hosts are hosts that are not referenced in the config update. We remove them from\n  // the priority if any of the following is true:\n  // - Active health checking is not enabled.\n  // - The removed hosts are failing active health checking OR have been explicitly marked as\n  //   unhealthy by a previous EDS update. We do not count outlier as a reason to remove a host\n  //   or any other future health condition that may be added so we do not use the health() API.\n  // - We have explicitly configured the cluster to remove hosts regardless of active health status.\n  const bool dont_remove_healthy_hosts =\n      health_checker_ != nullptr && !info()->drainConnectionsOnHostRemoval();\n  if (!current_priority_hosts.empty() && dont_remove_healthy_hosts) {\n    erase_from =\n        std::remove_if(current_priority_hosts.begin(), current_priority_hosts.end(),\n                       [&updated_hosts, &final_hosts, &max_host_weight](const HostSharedPtr& p) {\n                         if (!(p->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) ||\n                               p->healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH))) {\n                           if (p->weight() > max_host_weight) {\n                             max_host_weight = p->weight();\n                           }\n\n                           final_hosts.push_back(p);\n                           updated_hosts[p->address()->asString()] = p;\n                           p->healthFlagSet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL);\n                           return true;\n                         }\n                         return false;\n                       });\n    current_priority_hosts.erase(erase_from, current_priority_hosts.end());\n  }\n\n  // At this point we've accounted for all the new hosts as well the hosts that previously\n  // existed in this priority.\n  info_->stats().max_host_weight_.set(max_host_weight);\n\n  // Whatever remains in current_priority_hosts should be removed.\n  if (!hosts_added_to_current_priority.empty() || !current_priority_hosts.empty()) {\n    hosts_removed_from_current_priority = std::move(current_priority_hosts);\n    hosts_changed = true;\n  }\n\n  // During the update we populated final_hosts with all the hosts that should remain\n  // in the current priority, so move them back into current_priority_hosts.\n  current_priority_hosts = std::move(final_hosts);\n  // We return false here in the absence of EDS health status or metadata changes, because we\n  // have no changes to host vector status (modulo weights). When we have EDS\n  // health status or metadata changed, we return true, causing updateHosts() to fire in the\n  // caller.\n  return hosts_changed;\n}\n\nNetwork::DnsLookupFamily\ngetDnsLookupFamilyFromCluster(const envoy::config::cluster::v3::Cluster& cluster) {\n  return getDnsLookupFamilyFromEnum(cluster.dns_lookup_family());\n}\n\nNetwork::DnsLookupFamily\ngetDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily family) {\n  switch (family) {\n  case envoy::config::cluster::v3::Cluster::V6_ONLY:\n    return Network::DnsLookupFamily::V6Only;\n  case envoy::config::cluster::v3::Cluster::V4_ONLY:\n    return Network::DnsLookupFamily::V4Only;\n  case envoy::config::cluster::v3::Cluster::AUTO:\n    return Network::DnsLookupFamily::Auto;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid reportUpstreamCxDestroy(const Upstream::HostDescriptionConstSharedPtr& host,\n                             Network::ConnectionEvent event) {\n  host->cluster().stats().upstream_cx_destroy_.inc();\n  if (event == Network::ConnectionEvent::RemoteClose) {\n    host->cluster().stats().upstream_cx_destroy_remote_.inc();\n  } else {\n    host->cluster().stats().upstream_cx_destroy_local_.inc();\n  }\n}\n\nvoid reportUpstreamCxDestroyActiveRequest(const Upstream::HostDescriptionConstSharedPtr& host,\n                                          Network::ConnectionEvent event) {\n  host->cluster().stats().upstream_cx_destroy_with_active_rq_.inc();\n  if (event == Network::ConnectionEvent::RemoteClose) {\n    host->cluster().stats().upstream_cx_destroy_remote_with_active_rq_.inc();\n  } else {\n    host->cluster().stats().upstream_cx_destroy_local_with_active_rq_.inc();\n  }\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/upstream/upstream_impl.h",
    "content": "#pragma once\n\n#include <array>\n#include <atomic>\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/health_checker.h\"\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/locality.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/callback_impl.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/shared_pool/shared_pool.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/outlier_detection_impl.h\"\n#include \"common/upstream/resource_manager_impl.h\"\n#include \"common/upstream/transport_socket_match_impl.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Null implementation of HealthCheckHostMonitor.\n */\nclass HealthCheckHostMonitorNullImpl : public HealthCheckHostMonitor {\npublic:\n  // Upstream::HealthCheckHostMonitor\n  void setUnhealthy() override {}\n};\n\n/**\n * Implementation of Upstream::HostDescription.\n */\nclass HostDescriptionImpl : virtual public HostDescription,\n                            protected Logger::Loggable<Logger::Id::upstream> {\npublic:\n  HostDescriptionImpl(\n      ClusterInfoConstSharedPtr cluster, const std::string& hostname,\n      Network::Address::InstanceConstSharedPtr dest_address, MetadataConstSharedPtr metadata,\n      const envoy::config::core::v3::Locality& locality,\n      const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config,\n      uint32_t priority);\n\n  Network::TransportSocketFactory& transportSocketFactory() const override {\n    return socket_factory_;\n  }\n\n  bool canary() const override { return canary_; }\n  void canary(bool is_canary) override { canary_ = is_canary; }\n\n  // Metadata getter/setter.\n  //\n  // It's possible that the lock that guards the metadata will become highly contended (e.g.:\n  // endpoints churning during a deploy of a large cluster). A possible improvement\n  // would be to use TLS and post metadata updates from the main thread. This model would\n  // possibly benefit other related and expensive computations too (e.g.: updating subsets).\n  MetadataConstSharedPtr metadata() const override {\n    absl::ReaderMutexLock lock(&metadata_mutex_);\n    return metadata_;\n  }\n  void metadata(MetadataConstSharedPtr new_metadata) override {\n    absl::WriterMutexLock lock(&metadata_mutex_);\n    metadata_ = new_metadata;\n  }\n\n  const ClusterInfo& cluster() const override { return *cluster_; }\n  HealthCheckHostMonitor& healthChecker() const override {\n    if (health_checker_) {\n      return *health_checker_;\n    } else {\n      static HealthCheckHostMonitorNullImpl* null_health_checker =\n          new HealthCheckHostMonitorNullImpl();\n      return *null_health_checker;\n    }\n  }\n  Outlier::DetectorHostMonitor& outlierDetector() const override {\n    if (outlier_detector_) {\n      return *outlier_detector_;\n    } else {\n      static Outlier::DetectorHostMonitorNullImpl* null_outlier_detector =\n          new Outlier::DetectorHostMonitorNullImpl();\n      return *null_outlier_detector;\n    }\n  }\n  HostStats& stats() const override { return stats_; }\n  const std::string& hostnameForHealthChecks() const override { return health_checks_hostname_; }\n  const std::string& hostname() const override { return hostname_; }\n  Network::Address::InstanceConstSharedPtr address() const override { return address_; }\n  Network::Address::InstanceConstSharedPtr healthCheckAddress() const override {\n    return health_check_address_;\n  }\n  const envoy::config::core::v3::Locality& locality() const override { return locality_; }\n  Stats::StatName localityZoneStatName() const override {\n    return locality_zone_stat_name_.statName();\n  }\n  uint32_t priority() const override { return priority_; }\n  void priority(uint32_t priority) override { priority_ = priority; }\n  Network::TransportSocketFactory&\n  resolveTransportSocketFactory(const Network::Address::InstanceConstSharedPtr& dest_address,\n                                const envoy::config::core::v3::Metadata* metadata) const;\n\nprotected:\n  ClusterInfoConstSharedPtr cluster_;\n  const std::string hostname_;\n  const std::string health_checks_hostname_;\n  Network::Address::InstanceConstSharedPtr address_;\n  Network::Address::InstanceConstSharedPtr health_check_address_;\n  std::atomic<bool> canary_;\n  mutable absl::Mutex metadata_mutex_;\n  MetadataConstSharedPtr metadata_ ABSL_GUARDED_BY(metadata_mutex_);\n  const envoy::config::core::v3::Locality locality_;\n  Stats::StatNameDynamicStorage locality_zone_stat_name_;\n  mutable HostStats stats_;\n  Outlier::DetectorHostMonitorPtr outlier_detector_;\n  HealthCheckHostMonitorPtr health_checker_;\n  std::atomic<uint32_t> priority_;\n  Network::TransportSocketFactory& socket_factory_;\n};\n\n/**\n * Implementation of Upstream::Host.\n */\nclass HostImpl : public HostDescriptionImpl,\n                 public Host,\n                 public std::enable_shared_from_this<HostImpl> {\npublic:\n  HostImpl(ClusterInfoConstSharedPtr cluster, const std::string& hostname,\n           Network::Address::InstanceConstSharedPtr address, MetadataConstSharedPtr metadata,\n           uint32_t initial_weight, const envoy::config::core::v3::Locality& locality,\n           const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config,\n           uint32_t priority, const envoy::config::core::v3::HealthStatus health_status)\n      : HostDescriptionImpl(cluster, hostname, address, metadata, locality, health_check_config,\n                            priority),\n        used_(true) {\n    setEdsHealthFlag(health_status);\n    HostImpl::weight(initial_weight);\n  }\n\n  // Upstream::Host\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveCounterReference>>\n  counters() const override {\n    return stats_.counters();\n  }\n  CreateConnectionData createConnection(\n      Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options,\n      Network::TransportSocketOptionsSharedPtr transport_socket_options) const override;\n  CreateConnectionData\n  createHealthCheckConnection(Event::Dispatcher& dispatcher,\n                              Network::TransportSocketOptionsSharedPtr transport_socket_options,\n                              const envoy::config::core::v3::Metadata* metadata) const override;\n\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>>\n  gauges() const override {\n    return stats_.gauges();\n  }\n  void healthFlagClear(HealthFlag flag) override { health_flags_ &= ~enumToInt(flag); }\n  bool healthFlagGet(HealthFlag flag) const override { return health_flags_ & enumToInt(flag); }\n  void healthFlagSet(HealthFlag flag) final { health_flags_ |= enumToInt(flag); }\n\n  ActiveHealthFailureType getActiveHealthFailureType() const override {\n    return active_health_failure_type_;\n  }\n  void setActiveHealthFailureType(ActiveHealthFailureType type) override {\n    active_health_failure_type_ = type;\n  }\n\n  void setHealthChecker(HealthCheckHostMonitorPtr&& health_checker) override {\n    health_checker_ = std::move(health_checker);\n  }\n  void setOutlierDetector(Outlier::DetectorHostMonitorPtr&& outlier_detector) override {\n    outlier_detector_ = std::move(outlier_detector);\n  }\n  Host::Health health() const override {\n    // If any of the unhealthy flags are set, host is unhealthy.\n    if (healthFlagGet(HealthFlag::FAILED_ACTIVE_HC) ||\n        healthFlagGet(HealthFlag::FAILED_OUTLIER_CHECK) ||\n        healthFlagGet(HealthFlag::FAILED_EDS_HEALTH)) {\n      return Host::Health::Unhealthy;\n    }\n\n    // If any of the degraded flags are set, host is degraded.\n    if (healthFlagGet(HealthFlag::DEGRADED_ACTIVE_HC) ||\n        healthFlagGet(HealthFlag::DEGRADED_EDS_HEALTH)) {\n      return Host::Health::Degraded;\n    }\n\n    // The host must have no flags or be pending removal.\n    ASSERT(health_flags_ == 0 || healthFlagGet(HealthFlag::PENDING_DYNAMIC_REMOVAL));\n    return Host::Health::Healthy;\n  }\n\n  uint32_t weight() const override { return weight_; }\n  void weight(uint32_t new_weight) override;\n  bool used() const override { return used_; }\n  void used(bool new_used) override { used_ = new_used; }\n\nprotected:\n  static Network::ClientConnectionPtr\n  createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& cluster,\n                   const Network::Address::InstanceConstSharedPtr& address,\n                   Network::TransportSocketFactory& socket_factory,\n                   const Network::ConnectionSocket::OptionsSharedPtr& options,\n                   Network::TransportSocketOptionsSharedPtr transport_socket_options);\n\nprivate:\n  void setEdsHealthFlag(envoy::config::core::v3::HealthStatus health_status);\n\n  std::atomic<uint32_t> health_flags_{};\n  ActiveHealthFailureType active_health_failure_type_{};\n  std::atomic<uint32_t> weight_;\n  std::atomic<bool> used_;\n};\n\nclass HostsPerLocalityImpl : public HostsPerLocality {\npublic:\n  HostsPerLocalityImpl() : HostsPerLocalityImpl(std::vector<HostVector>(), false) {}\n\n  // Single locality constructor\n  HostsPerLocalityImpl(const HostVector& hosts, bool has_local_locality = false)\n      : HostsPerLocalityImpl(std::vector<HostVector>({hosts}), has_local_locality) {}\n\n  HostsPerLocalityImpl(std::vector<HostVector>&& locality_hosts, bool has_local_locality)\n      : local_(has_local_locality), hosts_per_locality_(std::move(locality_hosts)) {\n    ASSERT(!has_local_locality || !hosts_per_locality_.empty());\n  }\n\n  bool hasLocalLocality() const override { return local_; }\n  const std::vector<HostVector>& get() const override { return hosts_per_locality_; }\n  std::vector<HostsPerLocalityConstSharedPtr>\n  filter(const std::vector<std::function<bool(const Host&)>>& predicate) const override;\n\n  // The const shared pointer for the empty HostsPerLocalityImpl.\n  static HostsPerLocalityConstSharedPtr empty() {\n    static HostsPerLocalityConstSharedPtr empty = std::make_shared<HostsPerLocalityImpl>();\n    return empty;\n  }\n\nprivate:\n  // Does an entry exist for the local locality?\n  bool local_{};\n  // The first entry is for local hosts in the local locality.\n  std::vector<HostVector> hosts_per_locality_;\n};\n\n/**\n * A class for management of the set of hosts for a given priority level.\n */\nclass HostSetImpl : public HostSet {\npublic:\n  HostSetImpl(uint32_t priority, absl::optional<uint32_t> overprovisioning_factor)\n      : priority_(priority), overprovisioning_factor_(overprovisioning_factor.has_value()\n                                                          ? overprovisioning_factor.value()\n                                                          : kDefaultOverProvisioningFactor),\n        hosts_(new HostVector()), healthy_hosts_(new HealthyHostVector()),\n        degraded_hosts_(new DegradedHostVector()), excluded_hosts_(new ExcludedHostVector()) {}\n\n  /**\n   * Install a callback that will be invoked when the host set membership changes.\n   * @param callback supplies the callback to invoke.\n   * @return Common::CallbackHandle* the callback handle.\n   */\n  Common::CallbackHandle* addPriorityUpdateCb(PrioritySet::PriorityUpdateCb callback) const {\n    return member_update_cb_helper_.add(callback);\n  }\n\n  // Upstream::HostSet\n  const HostVector& hosts() const override { return *hosts_; }\n  HostVectorConstSharedPtr hostsPtr() const override { return hosts_; }\n  const HostVector& healthyHosts() const override { return healthy_hosts_->get(); }\n  HealthyHostVectorConstSharedPtr healthyHostsPtr() const override { return healthy_hosts_; }\n  const HostVector& degradedHosts() const override { return degraded_hosts_->get(); }\n  DegradedHostVectorConstSharedPtr degradedHostsPtr() const override { return degraded_hosts_; }\n  const HostVector& excludedHosts() const override { return excluded_hosts_->get(); }\n  ExcludedHostVectorConstSharedPtr excludedHostsPtr() const override { return excluded_hosts_; }\n  const HostsPerLocality& hostsPerLocality() const override { return *hosts_per_locality_; }\n  HostsPerLocalityConstSharedPtr hostsPerLocalityPtr() const override {\n    return hosts_per_locality_;\n  }\n  const HostsPerLocality& healthyHostsPerLocality() const override {\n    return *healthy_hosts_per_locality_;\n  }\n  HostsPerLocalityConstSharedPtr healthyHostsPerLocalityPtr() const override {\n    return healthy_hosts_per_locality_;\n  }\n  const HostsPerLocality& degradedHostsPerLocality() const override {\n    return *degraded_hosts_per_locality_;\n  }\n  HostsPerLocalityConstSharedPtr degradedHostsPerLocalityPtr() const override {\n    return degraded_hosts_per_locality_;\n  }\n  const HostsPerLocality& excludedHostsPerLocality() const override {\n    return *excluded_hosts_per_locality_;\n  }\n  HostsPerLocalityConstSharedPtr excludedHostsPerLocalityPtr() const override {\n    return excluded_hosts_per_locality_;\n  }\n  LocalityWeightsConstSharedPtr localityWeights() const override { return locality_weights_; }\n  absl::optional<uint32_t> chooseHealthyLocality() override;\n  absl::optional<uint32_t> chooseDegradedLocality() override;\n  uint32_t priority() const override { return priority_; }\n  uint32_t overprovisioningFactor() const override { return overprovisioning_factor_; }\n\n  static PrioritySet::UpdateHostsParams\n  updateHostsParams(HostVectorConstSharedPtr hosts,\n                    HostsPerLocalityConstSharedPtr hosts_per_locality,\n                    HealthyHostVectorConstSharedPtr healthy_hosts,\n                    HostsPerLocalityConstSharedPtr healthy_hosts_per_locality,\n                    DegradedHostVectorConstSharedPtr degraded_hosts,\n                    HostsPerLocalityConstSharedPtr degraded_hosts_per_locality,\n                    ExcludedHostVectorConstSharedPtr excluded_hosts,\n                    HostsPerLocalityConstSharedPtr excluded_hosts_per_locality);\n  static PrioritySet::UpdateHostsParams updateHostsParams(const HostSet& host_set);\n  static PrioritySet::UpdateHostsParams\n  partitionHosts(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality);\n\n  void updateHosts(PrioritySet::UpdateHostsParams&& update_hosts_params,\n                   LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added,\n                   const HostVector& hosts_removed,\n                   absl::optional<uint32_t> overprovisioning_factor = absl::nullopt);\n\nprotected:\n  virtual void runUpdateCallbacks(const HostVector& hosts_added, const HostVector& hosts_removed) {\n    member_update_cb_helper_.runCallbacks(priority_, hosts_added, hosts_removed);\n  }\n\nprivate:\n  // Weight for a locality taking into account health status using the provided eligible hosts per\n  // locality.\n  static double effectiveLocalityWeight(uint32_t index,\n                                        const HostsPerLocality& eligible_hosts_per_locality,\n                                        const HostsPerLocality& excluded_hosts_per_locality,\n                                        const HostsPerLocality& all_hosts_per_locality,\n                                        const LocalityWeights& locality_weights,\n                                        uint32_t overprovisioning_factor);\n\n  uint32_t priority_;\n  uint32_t overprovisioning_factor_;\n  HostVectorConstSharedPtr hosts_;\n  HealthyHostVectorConstSharedPtr healthy_hosts_;\n  DegradedHostVectorConstSharedPtr degraded_hosts_;\n  ExcludedHostVectorConstSharedPtr excluded_hosts_;\n  HostsPerLocalityConstSharedPtr hosts_per_locality_{HostsPerLocalityImpl::empty()};\n  HostsPerLocalityConstSharedPtr healthy_hosts_per_locality_{HostsPerLocalityImpl::empty()};\n  HostsPerLocalityConstSharedPtr degraded_hosts_per_locality_{HostsPerLocalityImpl::empty()};\n  HostsPerLocalityConstSharedPtr excluded_hosts_per_locality_{HostsPerLocalityImpl::empty()};\n  // TODO(mattklein123): Remove mutable.\n  mutable Common::CallbackManager<uint32_t, const HostVector&, const HostVector&>\n      member_update_cb_helper_;\n  // Locality weights (used to build WRR locality_scheduler_);\n  LocalityWeightsConstSharedPtr locality_weights_;\n  // WRR locality scheduler state.\n  struct LocalityEntry {\n    LocalityEntry(uint32_t index, double effective_weight)\n        : index_(index), effective_weight_(effective_weight) {}\n    const uint32_t index_;\n    const double effective_weight_;\n  };\n\n  // Rebuilds the provided locality scheduler with locality entries based on the locality weights\n  // and eligible hosts.\n  //\n  // @param locality_scheduler the locality scheduler to rebuild. Will be set to nullptr if no\n  // localities are eligible.\n  // @param locality_entries the vector that holds locality entries. Will be reset and populated\n  // with entries corresponding to the new scheduler.\n  // @param eligible_hosts_per_locality eligible hosts for this scheduler grouped by locality.\n  // @param eligible_hosts all eligible hosts for this scheduler.\n  // @param all_hosts_per_locality all hosts for this HostSet grouped by locality.\n  // @param locality_weights the weighting of each locality.\n  // @param overprovisioning_factor the overprovisioning factor to use when computing the effective\n  // weight of a locality.\n  static void rebuildLocalityScheduler(\n      std::unique_ptr<EdfScheduler<LocalityEntry>>& locality_scheduler,\n      std::vector<std::shared_ptr<LocalityEntry>>& locality_entries,\n      const HostsPerLocality& eligible_hosts_per_locality, const HostVector& eligible_hosts,\n      HostsPerLocalityConstSharedPtr all_hosts_per_locality,\n      HostsPerLocalityConstSharedPtr excluded_hosts_per_locality,\n      LocalityWeightsConstSharedPtr locality_weights, uint32_t overprovisioning_factor);\n\n  static absl::optional<uint32_t> chooseLocality(EdfScheduler<LocalityEntry>* locality_scheduler);\n\n  std::vector<std::shared_ptr<LocalityEntry>> healthy_locality_entries_;\n  std::unique_ptr<EdfScheduler<LocalityEntry>> healthy_locality_scheduler_;\n  std::vector<std::shared_ptr<LocalityEntry>> degraded_locality_entries_;\n  std::unique_ptr<EdfScheduler<LocalityEntry>> degraded_locality_scheduler_;\n};\n\nusing HostSetImplPtr = std::unique_ptr<HostSetImpl>;\n\n/**\n * A class for management of the set of hosts in a given cluster.\n */\nclass PrioritySetImpl : public PrioritySet {\npublic:\n  PrioritySetImpl() : batch_update_(false) {}\n  // From PrioritySet\n  Common::CallbackHandle* addMemberUpdateCb(MemberUpdateCb callback) const override {\n    return member_update_cb_helper_.add(callback);\n  }\n  Common::CallbackHandle* addPriorityUpdateCb(PriorityUpdateCb callback) const override {\n    return priority_update_cb_helper_.add(callback);\n  }\n  const std::vector<std::unique_ptr<HostSet>>& hostSetsPerPriority() const override {\n    return host_sets_;\n  }\n  // Get the host set for this priority level, creating it if necessary.\n  const HostSet&\n  getOrCreateHostSet(uint32_t priority,\n                     absl::optional<uint32_t> overprovisioning_factor = absl::nullopt);\n\n  void updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params,\n                   LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added,\n                   const HostVector& hosts_removed,\n                   absl::optional<uint32_t> overprovisioning_factor = absl::nullopt) override;\n\n  void batchHostUpdate(BatchUpdateCb& callback) override;\n\nprotected:\n  // Allows subclasses of PrioritySetImpl to create their own type of HostSetImpl.\n  virtual HostSetImplPtr createHostSet(uint32_t priority,\n                                       absl::optional<uint32_t> overprovisioning_factor) {\n    return std::make_unique<HostSetImpl>(priority, overprovisioning_factor);\n  }\n\nprotected:\n  virtual void runUpdateCallbacks(const HostVector& hosts_added, const HostVector& hosts_removed) {\n    member_update_cb_helper_.runCallbacks(hosts_added, hosts_removed);\n  }\n  virtual void runReferenceUpdateCallbacks(uint32_t priority, const HostVector& hosts_added,\n                                           const HostVector& hosts_removed) {\n    priority_update_cb_helper_.runCallbacks(priority, hosts_added, hosts_removed);\n  }\n  // This vector will generally have at least one member, for priority level 0.\n  // It will expand as host sets are added but currently does not shrink to\n  // avoid any potential lifetime issues.\n  std::vector<std::unique_ptr<HostSet>> host_sets_;\n\nprivate:\n  // TODO(mattklein123): Remove mutable.\n  mutable Common::CallbackManager<const HostVector&, const HostVector&> member_update_cb_helper_;\n  mutable Common::CallbackManager<uint32_t, const HostVector&, const HostVector&>\n      priority_update_cb_helper_;\n  bool batch_update_ : 1;\n\n  // Helper class to maintain state as we perform multiple host updates. Keeps track of all hosts\n  // that have been added/removed throughout the batch update, and ensures that we properly manage\n  // the batch_update_ flag.\n  class BatchUpdateScope : public HostUpdateCb {\n  public:\n    explicit BatchUpdateScope(PrioritySetImpl& parent) : parent_(parent) {\n      ASSERT(!parent_.batch_update_);\n      parent_.batch_update_ = true;\n    }\n    ~BatchUpdateScope() override { parent_.batch_update_ = false; }\n\n    void updateHosts(uint32_t priority, PrioritySet::UpdateHostsParams&& update_hosts_params,\n                     LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added,\n                     const HostVector& hosts_removed,\n                     absl::optional<uint32_t> overprovisioning_factor) override;\n\n    absl::node_hash_set<HostSharedPtr> all_hosts_added_;\n    absl::node_hash_set<HostSharedPtr> all_hosts_removed_;\n\n  private:\n    PrioritySetImpl& parent_;\n    absl::node_hash_set<uint32_t> priorities_;\n  };\n};\n\n/**\n * Implementation of ClusterInfo that reads from JSON.\n */\nclass ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable<Logger::Id::upstream> {\npublic:\n  ClusterInfoImpl(const envoy::config::cluster::v3::Cluster& config,\n                  const envoy::config::core::v3::BindConfig& bind_config, Runtime::Loader& runtime,\n                  TransportSocketMatcherPtr&& socket_matcher, Stats::ScopePtr&& stats_scope,\n                  bool added_via_api, Server::Configuration::TransportSocketFactoryContext&);\n\n  static ClusterStats generateStats(Stats::Scope& scope);\n  static ClusterLoadReportStats generateLoadReportStats(Stats::Scope& scope);\n  static ClusterCircuitBreakersStats generateCircuitBreakersStats(Stats::Scope& scope,\n                                                                  const std::string& stat_prefix,\n                                                                  bool track_remaining);\n  static ClusterRequestResponseSizeStats generateRequestResponseSizeStats(Stats::Scope&);\n  static ClusterTimeoutBudgetStats generateTimeoutBudgetStats(Stats::Scope&);\n\n  // Upstream::ClusterInfo\n  bool addedViaApi() const override { return added_via_api_; }\n  const envoy::config::cluster::v3::Cluster::CommonLbConfig& lbConfig() const override {\n    return common_lb_config_;\n  }\n  std::chrono::milliseconds connectTimeout() const override { return connect_timeout_; }\n  const absl::optional<std::chrono::milliseconds> idleTimeout() const override {\n    return idle_timeout_;\n  }\n  float perUpstreamPrefetchRatio() const override { return per_upstream_prefetch_ratio_; }\n  float peekaheadRatio() const override { return peekahead_ratio_; }\n  uint32_t perConnectionBufferLimitBytes() const override {\n    return per_connection_buffer_limit_bytes_;\n  }\n  uint64_t features() const override { return features_; }\n  const Http::Http1Settings& http1Settings() const override { return http1_settings_; }\n  const envoy::config::core::v3::Http2ProtocolOptions& http2Options() const override {\n    return http2_options_;\n  }\n  const envoy::config::core::v3::HttpProtocolOptions& commonHttpProtocolOptions() const override {\n    return common_http_protocol_options_;\n  }\n  ProtocolOptionsConfigConstSharedPtr\n  extensionProtocolOptions(const std::string& name) const override;\n  LoadBalancerType lbType() const override { return lb_type_; }\n  envoy::config::cluster::v3::Cluster::DiscoveryType type() const override { return type_; }\n  const absl::optional<envoy::config::cluster::v3::Cluster::CustomClusterType>&\n  clusterType() const override {\n    return cluster_type_;\n  }\n  const absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>&\n  lbLeastRequestConfig() const override {\n    return lb_least_request_config_;\n  }\n  const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig>&\n  lbRingHashConfig() const override {\n    return lb_ring_hash_config_;\n  }\n  const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig>&\n  lbMaglevConfig() const override {\n    return lb_maglev_config_;\n  }\n  const absl::optional<envoy::config::cluster::v3::Cluster::OriginalDstLbConfig>&\n  lbOriginalDstConfig() const override {\n    return lb_original_dst_config_;\n  }\n  const absl::optional<envoy::config::core::v3::TypedExtensionConfig>&\n  upstreamConfig() const override {\n    return upstream_config_;\n  }\n  bool maintenanceMode() const override;\n  uint64_t maxRequestsPerConnection() const override { return max_requests_per_connection_; }\n  uint32_t maxResponseHeadersCount() const override { return max_response_headers_count_; }\n  const std::string& name() const override { return name_; }\n  ResourceManager& resourceManager(ResourcePriority priority) const override;\n  TransportSocketMatcher& transportSocketMatcher() const override { return *socket_matcher_; }\n  ClusterStats& stats() const override { return stats_; }\n  Stats::Scope& statsScope() const override { return *stats_scope_; }\n\n  ClusterRequestResponseSizeStatsOptRef requestResponseSizeStats() const override {\n    if (optional_cluster_stats_ == nullptr ||\n        optional_cluster_stats_->request_response_size_stats_ == nullptr) {\n      return absl::nullopt;\n    }\n\n    return std::ref(*(optional_cluster_stats_->request_response_size_stats_));\n  }\n\n  ClusterLoadReportStats& loadReportStats() const override { return load_report_stats_; }\n\n  ClusterTimeoutBudgetStatsOptRef timeoutBudgetStats() const override {\n    if (optional_cluster_stats_ == nullptr ||\n        optional_cluster_stats_->timeout_budget_stats_ == nullptr) {\n      return absl::nullopt;\n    }\n\n    return std::ref(*(optional_cluster_stats_->timeout_budget_stats_));\n  }\n\n  const Network::Address::InstanceConstSharedPtr& sourceAddress() const override {\n    return source_address_;\n  };\n  const LoadBalancerSubsetInfo& lbSubsetInfo() const override { return lb_subset_; }\n  const envoy::config::core::v3::Metadata& metadata() const override { return metadata_; }\n  const Envoy::Config::TypedMetadata& typedMetadata() const override { return typed_metadata_; }\n\n  const Network::ConnectionSocket::OptionsSharedPtr& clusterSocketOptions() const override {\n    return cluster_socket_options_;\n  };\n\n  bool drainConnectionsOnHostRemoval() const override { return drain_connections_on_host_removal_; }\n  bool connectionPoolPerDownstreamConnection() const override {\n    return connection_pool_per_downstream_connection_;\n  }\n  bool warmHosts() const override { return warm_hosts_; }\n  const absl::optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>&\n  upstreamHttpProtocolOptions() const override {\n    return upstream_http_protocol_options_;\n  }\n\n  absl::optional<std::string> edsServiceName() const override { return eds_service_name_; }\n\n  void createNetworkFilterChain(Network::Connection&) const override;\n  Http::Protocol\n  upstreamHttpProtocol(absl::optional<Http::Protocol> downstream_protocol) const override;\n\n  Http::Http1::CodecStats& http1CodecStats() const override;\n  Http::Http2::CodecStats& http2CodecStats() const override;\n\nprivate:\n  struct ResourceManagers {\n    ResourceManagers(const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime,\n                     const std::string& cluster_name, Stats::Scope& stats_scope);\n    ResourceManagerImplPtr load(const envoy::config::cluster::v3::Cluster& config,\n                                Runtime::Loader& runtime, const std::string& cluster_name,\n                                Stats::Scope& stats_scope,\n                                const envoy::config::core::v3::RoutingPriority& priority);\n\n    using Managers = std::array<ResourceManagerImplPtr, NumResourcePriorities>;\n\n    Managers managers_;\n  };\n\n  struct OptionalClusterStats {\n    OptionalClusterStats(const envoy::config::cluster::v3::Cluster& config,\n                         Stats::Scope& stats_scope);\n    const ClusterTimeoutBudgetStatsPtr timeout_budget_stats_;\n    const ClusterRequestResponseSizeStatsPtr request_response_size_stats_;\n  };\n\n  Runtime::Loader& runtime_;\n  const std::string name_;\n  const envoy::config::cluster::v3::Cluster::DiscoveryType type_;\n  const uint64_t max_requests_per_connection_;\n  const uint32_t max_response_headers_count_;\n  const std::chrono::milliseconds connect_timeout_;\n  absl::optional<std::chrono::milliseconds> idle_timeout_;\n  const float per_upstream_prefetch_ratio_;\n  const float peekahead_ratio_;\n  const uint32_t per_connection_buffer_limit_bytes_;\n  TransportSocketMatcherPtr socket_matcher_;\n  Stats::ScopePtr stats_scope_;\n  mutable ClusterStats stats_;\n  Stats::IsolatedStoreImpl load_report_stats_store_;\n  mutable ClusterLoadReportStats load_report_stats_;\n  const std::unique_ptr<OptionalClusterStats> optional_cluster_stats_;\n  const uint64_t features_;\n  const Http::Http1Settings http1_settings_;\n  const envoy::config::core::v3::Http2ProtocolOptions http2_options_;\n  const envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_;\n  const std::map<std::string, ProtocolOptionsConfigConstSharedPtr> extension_protocol_options_;\n  mutable ResourceManagers resource_managers_;\n  const std::string maintenance_mode_runtime_key_;\n  const Network::Address::InstanceConstSharedPtr source_address_;\n  LoadBalancerType lb_type_;\n  absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>\n      lb_least_request_config_;\n  absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig> lb_ring_hash_config_;\n  absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig> lb_maglev_config_;\n  absl::optional<envoy::config::cluster::v3::Cluster::OriginalDstLbConfig> lb_original_dst_config_;\n  absl::optional<envoy::config::core::v3::TypedExtensionConfig> upstream_config_;\n  const bool added_via_api_;\n  LoadBalancerSubsetInfoImpl lb_subset_;\n  const envoy::config::core::v3::Metadata metadata_;\n  Envoy::Config::TypedMetadataImpl<ClusterTypedMetadataFactory> typed_metadata_;\n  const envoy::config::cluster::v3::Cluster::CommonLbConfig common_lb_config_;\n  const Network::ConnectionSocket::OptionsSharedPtr cluster_socket_options_;\n  const bool drain_connections_on_host_removal_;\n  const bool connection_pool_per_downstream_connection_;\n  const bool warm_hosts_;\n  const absl::optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>\n      upstream_http_protocol_options_;\n  absl::optional<std::string> eds_service_name_;\n  const absl::optional<envoy::config::cluster::v3::Cluster::CustomClusterType> cluster_type_;\n  const std::unique_ptr<Server::Configuration::CommonFactoryContext> factory_context_;\n  std::vector<Network::FilterFactoryCb> filter_factories_;\n  mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_;\n  mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_;\n};\n\n/**\n * Function that creates a Network::TransportSocketFactoryPtr\n * given a cluster configuration and transport socket factory\n * context.\n */\nNetwork::TransportSocketFactoryPtr\ncreateTransportSocketFactory(const envoy::config::cluster::v3::Cluster& config,\n                             Server::Configuration::TransportSocketFactoryContext& factory_context);\n\n/**\n * Base class all primary clusters.\n */\nclass ClusterImplBase : public Cluster, protected Logger::Loggable<Logger::Id::upstream> {\n\npublic:\n  // Upstream::Cluster\n  PrioritySet& prioritySet() override { return priority_set_; }\n  const PrioritySet& prioritySet() const override { return priority_set_; }\n\n  /**\n   * Optionally set the health checker for the primary cluster. This is done after cluster\n   * creation since the health checker assumes that the cluster has already been fully initialized\n   * so there is a cyclic dependency. However we want the cluster to own the health checker.\n   */\n  void setHealthChecker(const HealthCheckerSharedPtr& health_checker);\n\n  /**\n   * Optionally set the outlier detector for the primary cluster. Done for the same reason as\n   * documented in setHealthChecker().\n   */\n  void setOutlierDetector(const Outlier::DetectorSharedPtr& outlier_detector);\n\n  /**\n   * Wrapper around Network::Address::resolveProtoAddress() that provides improved error message\n   * based on the cluster's type.\n   * @param address supplies the address proto to resolve.\n   * @return Network::Address::InstanceConstSharedPtr the resolved address.\n   */\n  const Network::Address::InstanceConstSharedPtr\n  resolveProtoAddress(const envoy::config::core::v3::Address& address);\n\n  // Partitions the provided list of hosts into three new lists containing the healthy, degraded\n  // and excluded hosts respectively.\n  static std::tuple<HealthyHostVectorConstSharedPtr, DegradedHostVectorConstSharedPtr,\n                    ExcludedHostVectorConstSharedPtr>\n  partitionHostList(const HostVector& hosts);\n\n  // Partitions the provided list of hosts per locality into three new lists containing the healthy,\n  // degraded and excluded hosts respectively.\n  static std::tuple<HostsPerLocalityConstSharedPtr, HostsPerLocalityConstSharedPtr,\n                    HostsPerLocalityConstSharedPtr>\n  partitionHostsPerLocality(const HostsPerLocality& hosts);\n  Config::ConstMetadataSharedPoolSharedPtr constMetadataSharedPool() {\n    return const_metadata_shared_pool_;\n  }\n\n  // Upstream::Cluster\n  HealthChecker* healthChecker() override { return health_checker_.get(); }\n  ClusterInfoConstSharedPtr info() const override { return info_; }\n  Outlier::Detector* outlierDetector() override { return outlier_detector_.get(); }\n  const Outlier::Detector* outlierDetector() const override { return outlier_detector_.get(); }\n  void initialize(std::function<void()> callback) override;\n\nprotected:\n  ClusterImplBase(const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n                  Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                  Stats::ScopePtr&& stats_scope, bool added_via_api);\n\n  /**\n   * Overridden by every concrete cluster. The cluster should do whatever pre-init is needed. E.g.,\n   * query DNS, contact EDS, etc.\n   */\n  virtual void startPreInit() PURE;\n\n  /**\n   * Called by every concrete cluster when pre-init is complete. At this point,\n   * shared init starts init_manager_ initialization and determines if there\n   * is an initial health check pass needed, etc.\n   */\n  void onPreInitComplete();\n\n  /**\n   * Called by every concrete cluster after all targets registered at init manager are\n   * initialized. At this point, shared init takes over and determines if there is an initial health\n   * check pass needed, etc.\n   */\n  void onInitDone();\n\n  virtual void reloadHealthyHostsHelper(const HostSharedPtr& host);\n\n  // This init manager is shared via TransportSocketFactoryContext. The initialization targets that\n  // register with this init manager are expected to be for implementations of SdsApi (see\n  // SdsApi::init_target_).\n  Init::ManagerImpl init_manager_;\n\n  // Once all targets are initialized (i.e. once all dynamic secrets are loaded), this watcher calls\n  // onInitDone() above.\n  Init::WatcherImpl init_watcher_;\n\n  Runtime::Loader& runtime_;\n  ClusterInfoConstSharedPtr info_; // This cluster info stores the stats scope so it must be\n                                   // initialized first and destroyed last.\n  HealthCheckerSharedPtr health_checker_;\n  Outlier::DetectorSharedPtr outlier_detector_;\n\nprotected:\n  PrioritySetImpl priority_set_;\n\n  void validateEndpointsForZoneAwareRouting(\n      const envoy::config::endpoint::v3::LocalityLbEndpoints& endpoints) const;\n\nprivate:\n  void finishInitialization();\n  void reloadHealthyHosts(const HostSharedPtr& host);\n\n  bool initialization_started_{};\n  std::function<void()> initialization_complete_callback_;\n  uint64_t pending_initialize_health_checks_{};\n  const bool local_cluster_;\n  Config::ConstMetadataSharedPoolSharedPtr const_metadata_shared_pool_;\n};\n\nusing ClusterImplBaseSharedPtr = std::shared_ptr<ClusterImplBase>;\n\n/**\n * Manages PriorityState of a cluster. PriorityState is a per-priority binding of a set of hosts\n * with its corresponding locality weight map. This is useful to store priorities/hosts/localities\n * before updating the cluster priority set.\n */\nclass PriorityStateManager : protected Logger::Loggable<Logger::Id::upstream> {\npublic:\n  PriorityStateManager(ClusterImplBase& cluster, const LocalInfo::LocalInfo& local_info,\n                       PrioritySet::HostUpdateCb* update_cb);\n\n  // Initializes the PriorityState vector based on the priority specified in locality_lb_endpoint.\n  void initializePriorityFor(\n      const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint);\n\n  // Registers a host based on its address to the PriorityState based on the specified priority (the\n  // priority is specified by locality_lb_endpoint.priority()).\n  //\n  // The specified health_checker_flag is used to set the registered-host's health-flag when the\n  // lb_endpoint health status is unhealthy, draining or timeout.\n  void registerHostForPriority(\n      const std::string& hostname, Network::Address::InstanceConstSharedPtr address,\n      const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint,\n      const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint);\n\n  void registerHostForPriority(\n      const HostSharedPtr& host,\n      const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint);\n\n  void\n  updateClusterPrioritySet(const uint32_t priority, HostVectorSharedPtr&& current_hosts,\n                           const absl::optional<HostVector>& hosts_added,\n                           const absl::optional<HostVector>& hosts_removed,\n                           const absl::optional<Upstream::Host::HealthFlag> health_checker_flag,\n                           absl::optional<uint32_t> overprovisioning_factor = absl::nullopt);\n\n  // Returns the saved priority state.\n  PriorityState& priorityState() { return priority_state_; }\n\nprivate:\n  ClusterImplBase& parent_;\n  PriorityState priority_state_;\n  const envoy::config::core::v3::Node& local_info_node_;\n  PrioritySet::HostUpdateCb* update_cb_;\n};\n\nusing PriorityStateManagerPtr = std::unique_ptr<PriorityStateManager>;\n\n/**\n * Base for all dynamic cluster types.\n */\nclass BaseDynamicClusterImpl : public ClusterImplBase {\nprotected:\n  using ClusterImplBase::ClusterImplBase;\n\n  /**\n   * Updates the host list of a single priority by reconciling the list of new hosts\n   * with existing hosts.\n   *\n   * @param new_hosts the full lists of hosts in the new configuration.\n   * @param current_priority_hosts the full lists of hosts for the priority to be updated. The list\n   * will be modified to contain the updated list of hosts.\n   * @param hosts_added_to_current_priority will be populated with hosts added to the priority.\n   * @param hosts_removed_from_current_priority will be populated with hosts removed from the\n   * priority.\n   * @param updated_hosts is used to aggregate the new state of all hosts across priority, and will\n   * be updated with the hosts that remain in this priority after the update.\n   * @param all_hosts all known hosts prior to this host update.\n   * @return whether the hosts for the priority changed.\n   */\n  bool updateDynamicHostList(const HostVector& new_hosts, HostVector& current_priority_hosts,\n                             HostVector& hosts_added_to_current_priority,\n                             HostVector& hosts_removed_from_current_priority,\n                             HostMap& updated_hosts, const HostMap& all_hosts);\n};\n\n/**\n * Utility function to get Dns from cluster/enum.\n */\nNetwork::DnsLookupFamily\ngetDnsLookupFamilyFromCluster(const envoy::config::cluster::v3::Cluster& cluster);\nNetwork::DnsLookupFamily\ngetDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily family);\n\n/**\n * Utility function to report upstream cx destroy metrics\n */\nvoid reportUpstreamCxDestroy(const Upstream::HostDescriptionConstSharedPtr& host,\n                             Network::ConnectionEvent event);\n\n/**\n * Utility function to report upstream cx destroy active request metrics\n */\nvoid reportUpstreamCxDestroyActiveRequest(const Upstream::HostDescriptionConstSharedPtr& host,\n                                          Network::ConnectionEvent event);\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/version/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_basic_cc_library\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n    \"envoy_select_boringssl\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\ngenrule(\n    name = \"generate_version_number\",\n    srcs = [\"//:VERSION\"],\n    outs = [\"version_number.h\"],\n    cmd = \"\"\"echo \"#define BUILD_VERSION_NUMBER \\\\\"$$(cat $<)\\\\\"\" >$@\"\"\",\n    visibility = [\"//visibility:private\"],\n)\n\ngenrule(\n    name = \"generate_version_linkstamp\",\n    outs = [\"manual_linkstamp.cc\"],\n    cmd = select({\n        # Only iOS builds typically follow this logic, OS/X is built as a normal binary\n        \"//bazel:apple\": \"$(location :generate_version_linkstamp.sh) Library >> $@\",\n        \"//conditions:default\": \"$(location :generate_version_linkstamp.sh) >> $@\",\n    }),\n    # Undocumented attr to depend on workspace status files.\n    # https://github.com/bazelbuild/bazel/issues/4942\n    # Used here because generate_version_linkstamp.sh depends on the workspace status files.\n    stamp = 1,\n    tools = [\":generate_version_linkstamp.sh\"],\n    visibility = [\"//visibility:private\"],\n)\n\nenvoy_cc_library(\n    name = \"version_includes\",\n    hdrs = [\n        \"version.h\",\n        \":generate_version_number\",\n    ],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"version_lib\",\n    srcs = [\"version.cc\"],\n    copts = envoy_select_boringssl(\n        [\"-DENVOY_SSL_VERSION=\\\\\\\"BoringSSL-FIPS\\\\\\\"\"],\n        [\"-DENVOY_SSL_VERSION=\\\\\\\"BoringSSL\\\\\\\"\"],\n    ),\n    deps = [\n        \":version_includes\",\n        \"//source/common/common:macros\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_basic_cc_library(\n    name = \"manual_version_linkstamp\",\n    srcs = [\":generate_version_linkstamp\"],\n    visibility = [\"//visibility:private\"],\n)\n\nenvoy_basic_cc_library(\n    name = \"version_linkstamp\",\n    linkstamp = select({\n        \"//bazel:manual_stamp\": None,\n        \"//conditions:default\": \"version_linkstamp.cc\",\n    }),\n    # Linking this library makes build cache inefficient, limiting this to //source/exe package only.\n    # Tests are linked with //test/test_common:test_version_linkstamp.\n    visibility = [\"//source/exe:__pkg__\"],\n    deps = select({\n        \"//bazel:manual_stamp\": [\":manual_version_linkstamp\"],\n        \"//conditions:default\": [],\n    }),\n    alwayslink = 1,\n)\n"
  },
  {
    "path": "source/common/version/generate_version_linkstamp.sh",
    "content": "#!/bin/bash\n\n# This script generates a header file that is used by version_lib whenever linkstamp is not allowed.\n# linkstamp is used to link in version_linkstamp.cc into the version_lib.\n# However, linkstamp is not available to non-binary bazel targets.\n# This means that if the topmost target being used to compile version_lib is a envoy_cc_library or related, linkstamp will not be in effect.\n# In turn this means that version_linkstamp.cc is not linked, and the build_scm_revision and build_scm_status are unknown symbols to the linker.\n\n# Unfortunately linkstamp is not well documented (https://github.com/bazelbuild/bazel/issues/2893).\n# But following the implicit trail one can deduce that linkstamp is in effect when \"stamping\" (https://github.com/bazelbuild/bazel/issues/2893) is on.\n# envoy_cc_library -- and the underlying cc_library rule -- does not support \"stamping\".\n# This makes sense as stamping mainly makes sense in the context of binaries for production releases, not static libraries.\nbuild_scm_revision=$(sed -n -E 's/^BUILD_SCM_REVISION ([0-9a-f]{40})$/\\1/p' < bazel-out/volatile-status.txt)\nif [ -z \"$1\" ]; then\n  build_scm_status=$(sed -n -E 's/^BUILD_SCM_STATUS ([a-zA-Z]*)$/\\1/p' < bazel-out/volatile-status.txt)\nelse\n  build_scm_status=$1\nfi\n\necho \"extern const char build_scm_revision[];\"\necho \"extern const char build_scm_status[];\"\necho \"const char build_scm_revision[] = \\\"$build_scm_revision\\\";\"\necho \"const char build_scm_status[] = \\\"$build_scm_status\\\";\"\n"
  },
  {
    "path": "source/common/version/version.cc",
    "content": "#include \"common/version/version.h\"\n\n#include <map>\n#include <regex>\n#include <string>\n\n#include \"common/common/fmt.h\"\n#include \"common/common/macros.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n\nextern const char build_scm_revision[];\nextern const char build_scm_status[];\n\nnamespace Envoy {\nconst std::string& VersionInfo::revision() {\n  CONSTRUCT_ON_FIRST_USE(std::string, build_scm_revision);\n}\n\nconst std::string& VersionInfo::revisionStatus() {\n  CONSTRUCT_ON_FIRST_USE(std::string, build_scm_status);\n}\n\nconst std::string& VersionInfo::version() {\n  CONSTRUCT_ON_FIRST_USE(std::string,\n                         fmt::format(\"{}/{}/{}/{}/{}\", revision(), BUILD_VERSION_NUMBER,\n                                     revisionStatus(), buildType(), sslVersion()));\n}\n\nconst envoy::config::core::v3::BuildVersion& VersionInfo::buildVersion() {\n  static const auto* result =\n      new envoy::config::core::v3::BuildVersion(makeBuildVersion(BUILD_VERSION_NUMBER));\n  return *result;\n}\n\nconst std::string& VersionInfo::buildType() {\n#ifdef NDEBUG\n  static const std::string release_type = \"RELEASE\";\n#else\n  static const std::string release_type = \"DEBUG\";\n#endif\n  return release_type;\n}\n\nconst std::string& VersionInfo::sslVersion() {\n#ifdef ENVOY_SSL_VERSION\n  static const std::string ssl_version = ENVOY_SSL_VERSION;\n#else\n  static const std::string ssl_version = \"no-ssl\";\n#endif\n  return ssl_version;\n}\n\nenvoy::config::core::v3::BuildVersion VersionInfo::makeBuildVersion(const char* version) {\n  envoy::config::core::v3::BuildVersion result;\n  // Split BUILD_VERSION_NUMBER into version and an optional build label after the '-'\n  std::regex ver_regex(\"([\\\\d]+)\\\\.([\\\\d]+)\\\\.([\\\\d]+)(-(.*))?\");\n  // Match indexes, given the regex above\n  constexpr std::cmatch::size_type major = 1;\n  constexpr std::cmatch::size_type minor = 2;\n  constexpr std::cmatch::size_type patch = 3;\n  constexpr std::cmatch::size_type label = 5;\n  std::cmatch match;\n  if (std::regex_match(version, match, ver_regex)) {\n    int value = 0;\n    if (absl::SimpleAtoi(match.str(major), &value)) {\n      result.mutable_version()->set_major_number(value);\n    }\n    if (absl::SimpleAtoi(match.str(minor), &value)) {\n      result.mutable_version()->set_minor_number(value);\n    }\n    if (absl::SimpleAtoi(match.str(patch), &value)) {\n      result.mutable_version()->set_patch(value);\n    }\n  }\n  std::map<std::string, std::string> fields;\n  if (!match.str(label).empty()) {\n    fields[BuildVersionMetadataKeys::get().BuildLabel] = match.str(label);\n  }\n  fields[BuildVersionMetadataKeys::get().BuildType] = buildType();\n  fields[BuildVersionMetadataKeys::get().SslVersion] = sslVersion();\n  fields[BuildVersionMetadataKeys::get().RevisionSHA] = revision();\n  fields[BuildVersionMetadataKeys::get().RevisionStatus] = revisionStatus();\n  *result.mutable_metadata() = MessageUtil::keyValueStruct(fields);\n  return result;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/version/version.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/singleton/const_singleton.h\"\n#include \"common/version/version_number.h\"\n\nnamespace Envoy {\n\nclass VersionInfoTestPeer;\n\n/**\n * Wraps compiled in code versioning.\n */\nclass VersionInfo {\npublic:\n  // Repository revision (e.g. git SHA1).\n  static const std::string& revision();\n  // Repository status (e.g. clean, modified).\n  static const std::string& revisionStatus();\n  // Repository information and build type.\n  static const std::string& version();\n\n  static const envoy::config::core::v3::BuildVersion& buildVersion();\n\nprivate:\n  friend class Envoy::VersionInfoTestPeer;\n  // RELEASE or DEBUG\n  static const std::string& buildType();\n  static const std::string& sslVersion();\n  static envoy::config::core::v3::BuildVersion makeBuildVersion(const char* version);\n};\n\nclass BuildVersionMetadata {\npublic:\n  // Type of build: RELEASE or DEBUG\n  const std::string BuildType = \"build.type\";\n  // Build label from the VERSION file\n  const std::string BuildLabel = \"build.label\";\n  // Version of the SSL implementation\n  const std::string SslVersion = \"ssl.version\";\n  // SCM revision of the source tree\n  const std::string RevisionSHA = \"revision.sha\";\n  // SCM status of the source tree\n  const std::string RevisionStatus = \"revision.status\";\n};\n\nusing BuildVersionMetadataKeys = ConstSingleton<BuildVersionMetadata>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/common/version/version_linkstamp.cc",
    "content": "// NOLINT(namespace-envoy)\nextern const char build_scm_revision[];\nextern const char build_scm_status[];\n\nconst char build_scm_revision[] = BUILD_SCM_REVISION;\nconst char build_scm_status[] = BUILD_SCM_STATUS;\n"
  },
  {
    "path": "source/docs/fancy_logger.md",
    "content": "## Fancy Logger: Flexible Granularity Log Control in Envoy\n\n### Overview\nFancy Logger is a logger with finer grained log level control and runtime logger update using administration interface. Compared to the existing logger in Envoy, Fancy Logger provides file level control which can be easily extended to finer grained logger with function or line level control, and it is completely automatic and never requires developers to explicitly specify the logging component. Besides, it has a comparable speed as Envoy's logger. \n\n### Basic Usage\nThe basic usage of Fancy Logger is to explicitly call its macros:\n```\n  FANCY_LOG(info, \"Hello world! Here's a line of fancy log!\");\n  FANCY_LOG(error, \"Fancy Error! Here's the second message!\");\n```\nIf the level of log message is higher than that of the file, macros above will print messages with the file name like this:\n```\n[2020-07-29 22:27:02.594][15][error][test/common/common/log_macros_test.cc:149] Fancy Error! Here\\'s the second message!\n```\nMore macros with connection and stream information:\n```\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> stream_;\n  FANCY_CONN_LOG(warn, \"Fake info {} of connection\", connection_, 1);\n  FANCY_STREAM_LOG(warn, \"Fake warning {} of stream\", stream_, 1);\n```\nTo flush a logger, `FANCY_FLUSH_LOG()` can be used. \n\n### Enable Fancy Logger using Command Line Option\nA command line option is provided to enable Fancy Logger: `--enable-fine-grain-logging`. It enables Fancy Logger for Envoy, i.e. replaces most Envoy's log macros (`ENVOY_LOG, ENVOY_FLUSH_LOG, ENVOY_CONN_LOG, ENVOY_STREAM_LOG`) with corresponding Fancy Logger's macros. \n\nIf Fancy Logger is enabled, the default log format is `\"[%Y-%m-%d %T.%e][%t][%l] [%g:%#] %v\"`, where the logger name is omitted compared to Envoy's default as it's the same as file name. The default log level is info, if not specidfied by user of any logging context.\n\nNote that Envoy's logger can still be used in Fancy mode. These macros are not replaced: `GET_MISC_LOGGER, ENVOY_LOG_MISC, ENVOY_LOGGER, ENVOY_LOG_TO_LOGGER, ENVOY_CONN_LOG_TO_LOGGER, ENVOY_STREAM_LOG_TO_LOGGER`. For example, `ENVOY_LOG_LOGGER(ENVOY_LOGGER(), LEVEL, ...)` is equivalent to `ENVOY_LOG` in Envoy mode. \n\nIf Fancy Logger is not enabled, existing Envoy's logger is used. In this mode, basic macros like `FANCY_LOG` can be used but the main part of `ENVOY_LOG` will keep the same. One limitation is that logger update in admin page is not supported by default as it detects Envoy mode. The reason is: Envoy mode is designed only to be back compatible. To address it, developers can use `Logger::Context::enableFancyLogger()` to manually enable Fancy Logger.\n\n### Runtime Update\nRuntime update of Fancy Logger is supported with administration interface, i.e. admin page, and Fancy mode needs to be enabled to use it. Same as Envoy's logger, the following functionalities are provided:\n\n1. `POST /logging`: List all names (i.e. file paths) of all active loggers and their levels;\n2. `POST /logging?<file_path>=<level>`: Given the current file path, change the log level of the file;\n3. `POST /logging?level=<level>`: Change levels of all loggers.\n\nUsers can view and change the log level in a granularity of file in runtime through admin page. Note that `file_path` is determined by `__FILE__` macro, which is the path seen by preprocessor.\n\n### Implementation Details\nFancy Logger can be divided into two parts: \n1. Core part: file level logger without explicit inheriting `Envoy::Logger::Loggable`;\n2. Hook part: control interfaces, such as command line and admin page. \n\n#### Core Part\nThe core of Fancy Logger is implemented in `class FancyContext`, which is a singleton class. Filenames (i.e. keys) and logger pointers are stored in `FancyContext::fancy_log_map_`. There are several code paths when `FANCY_LOG` is called.\n\n1. Slow path: if `FANCY_LOG` is first called in a file, `FancyContext::initFancyLogger(key, local_logger_ptr)` is called to create a new logger globally and store the `<key, global_logger_ptr>` pair in `fancy_log_map_`. Then local logger pointer is updated and will never be changed. \n2. Medium path: if `FANCY_LOG` is first called at the call site but not in the file, `FancyContext::initFancyLogger(key, local_logger_ptr)` is still called but local logger pointer is quickly set to the global logger, as there is already global record of the given filename.\n3. Fast path: if `FANCY_LOG` is called after first call at the site, the log is directly printed using local logger pointer.\n\n#### Hook Part\nFancy Logger provides control interfaces through command line and admin page. \n\nTo pass the arguments such as log format and default log level to Fancy Logger, `fancy_log_format` and `fancy_default_level` are added in `class Context` and they are updated when a new logging context is activated. `getFancyContext().setDefaultFancyLevelFormat(level, format)` is called in `Context::activate()` to set log format and update loggers' previous default level to the new level. \n\nTo support the runtime update in admin page, log handler in admin page uses `getFancyContext().listFancyLoggers()` to show all Fancy Loggers, `getFancyContext().setFancyLogger(name, level)` to set a specific logger and `getFancyContext().setAllFancyLoggers(level)` to set all loggers.\n\n"
  },
  {
    "path": "source/docs/filters/http/cache/cache_filter.md",
    "content": "### HTTP Cache Filter\nWork in Progress--not ready for deployment\n\nHTTP caching can improve system throughput, latency, and network/backend load\nlevels when the same content is requested multiple times. Caching is\nparticularly valuable for edge proxies and browser-based traffic, which\ntypically include many cacheable static resources, but it can be useful any time\nthere is enough repeatedly served cacheable content.\n\n## Configuration\nCacheFilter is configured with\nenvoy.config.filter.http.cache.v2alpha.CacheConfig. The only required\nconfiguration field is typed_config. The type of this message will select what\nHttpCache plugin to use, and will be passed to the selected plugin. HttpCache\nplugins are located in subdirectories of\nsource/extensions/filters/http/cache. Specifying a message of type\nenvoy.source.extensions.filters.http.cache.SimpleHttpCacheConfig will select a\nproof-of-concept implementation included in the Envoy source. More\nimplementations can be provided by implementing\nEnvoy::Extensions::HttpFilters::Cache::HttpCache. To write a cache storage\nimplementation, see [Writing Cache Filter\nImplementations](cache_filter_plugins.md).\n\nTODO(toddmgreer) Describe other fields as they get implemented.\nThe remaining configuration fields control caching behavior and limits. By\ndefault, this filter will cache almost all responses that are considered\ncacheable by [RFC7234](https://httpwg.org/specs/rfc7234.html), with handling\nof conditional ([RFC7232](https://httpwg.org/specs/rfc7232.html)), and *range*\n[RFC7233](https://httpwg.org/specs/rfc7233.html) requests. Those RFC define\nwhich request methods and response codes are cacheable, subject to the\ncache-related headers they also define: *cache-control*, *range*, *if-match*,\n*if-none-match*, *if-modified-since*, *if-unmodified-since*, *if-range*, *authorization*,\n*date*, *age*, *expires*, and *vary*. Responses with a *vary* header will only be cached\nif the named headers are accepted by one of the matching rules in *allowed_vary_headers*.\n\n## Status\n * ready for developers to write cache storage plugins; please contribute them\n  to the Envoy repository if possible.\n * ready for contributions to help finish its implementation of HTTP caching\n  semantics.\n * *not* ready for actual use. Please see TODOs in the code.\n"
  },
  {
    "path": "source/docs/filters/http/cache/cache_filter_plugins.md",
    "content": "### Overview\n\nThe HTTP Cache Filter handles most of the complexity of HTTP caching semantics,\nbut delegates the actual storage of HTTP responses to implementations of the\nHttpCache interface. These implementations can cover all points on the spectrum\nof persistence, performance, and distribution, from local RAM caches to globally\ndistributed persistent caches. They can be fully custom caches, or\nwrappers/adapters around local or remote open-source or proprietary caches.\n\nIf you write a new cache storage implementation, please add it to the Envoy\nrepository if possible. This is the only way to make sure it stays up-to-date\nwith Envoy changes, and lets other developers contribute fixes and improvements.\n\nAs you read this, also read the example implementation in `simple_http_cache.h/.cc`.\n\nYou need to write implementations of four small interfaces:\n\n## HttpCache\n * Example Implementation: `SimpleHttpCache`\n * `HttpCache` represents an actual cache of responses stored somewhere. It provides methods to set up cache lookups and inserts, and to update the headers of cached responses.\n\n## HttpCacheFactory\n * Example Implementation: `SimpleHttpCacheFactory`\n * `HttpCacheFactory` does what it sounds like: it creates HttpCache implementations, based on a name that came from the cache filter's config.\n\n## LookupContext\n * Example Implementation: `SimpleLookupContext`\n * `LookupContext` represents a single lookup operation; this is a good place to store whatever per-lookup state you may need during the lookup process. \n\n## InsertContext\n * Example Implementation: `SimpleInsertContext`\n * `LookupContext` represents a single insert operation; this is a good place to store whatever per-insert state you may need during the lookup process. \n\n### Flow\n\nTo initiate a lookup on an HttpCache implementation, the cache filter calls\n`HttpCache::makeLookupContext`, which should return a `LookupContextPtr`. The cache filter will\ncall `LookupContext::getHeaders` to find out if there's a cached response. If\na result is found, the `LookupContext` implementation must call\n`LookupRequest::makeLookupResult`, and pass the result to the callback.\n\nThe cache filter will then make a series of `getBody` requests followed by `getTrailers` (if needed).\n\nIf the `LookupResult` in the callback indicates that a response wasn't found, the cache filter will let the request pass upstream. If the origin replies with a cacheable response, the filter will call `HttpCache::makeInsertContext`, and use its methods to insert the response.\n\nThe following diagram shows a potential GET request for a 5M resource that is present and fresh in the cache, with no trailers. In the case of a synchronous in-memory cache, this all happens within `CacheFilter::decodeHeaders`. Solid arrows denote synchronous function calls, while dashed arrows denote asynchronous function calls or their callbacks. Objects that are part of the cache implementation (`HttpCache` and `LookupContext`) are blue. (Other objects are part of the cache filter, or of Envoy.\n\n![Cache Filter flow diagram](cache_filter_flow.png)\n"
  },
  {
    "path": "source/docs/flow_control.md",
    "content": "### Overview\n\nFlow control in Envoy is done by having limits on each buffer, and watermark callbacks. When a\nbuffer contains more data than the configured limit, the high watermark callback will fire, kicking\noff a chain of events which eventually informs the data source to stop sending data. This back-off\nmay be immediate (stop reading from a socket) or gradual (stop HTTP/2 window updates) so all\nbuffer limits in Envoy are considered soft limits. When the buffer eventually drains (generally to\nhalf of the high watermark to avoid thrashing back and forth) the low watermark callback will\nfire, informing the sender it can resume sending data.\n\n### TCP implementation details\n\nFlow control for TCP and TCP-with-TLS-termination are handled by coordination\nbetween the `Network::ConnectionImpl` write buffer, and the `Network::TcpProxy`\nfilter.\n\nThe downstream flow control goes as follows.\n\n * The downstream `Network::ConnectionImpl::write_buffer_` buffers too much\n   data. It calls\n   `Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark()`.\n * The `Network::TcpProxy::DownstreamCallbacks` receives\n   `onAboveWriteBufferHighWatermark()` and calls `readDisable(true)` on the upstream\n   connection.\n * When the downstream buffer is drained, it calls\n   `Network::ConnectionCallbacks::onBelowWriteBufferLowWatermark()`\n * The `Network::TcpProxy::DownstreamCallbacks` receives\n   `onBelowWriteBufferLowWatermark()` and calls `readDisable(false)` on the upstream\n   connection.\n\nFlow control for the upstream path is much the same.\n\n * The upstream `Network::ConnectionImpl::write_buffer_` buffers too much\n   data. It calls\n   `Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark()`.\n * The `Network::TcpProxy::UpstreamCallbacks` receives\n   `onAboveWriteBufferHighWatermark()` and calls `readDisable(true)` on the downstream\n   connection.\n * When the upstream buffer is drained, it calls\n   `Network::ConnectionCallbacks::onBelowWriteBufferLowWatermark()`\n * The `Network::TcpProxy::UpstreamCallbacks` receives\n   `onBelowWriteBufferLowWatermark()` and calls `readDisable(false)` on the downstream\n   connection.\n\n### HTTP2 implementation details\n\nBecause the various buffers in the HTTP/2 stack are fairly complicated, each path from a buffer\ngoing over the watermark limit to disabling data from the data source is documented separately.\n\n![HTTP2 data flow diagram](h2_buffers.png)\n\nFor HTTP/2, when filters, streams, or connections back up, the end result is `readDisable(true)`\nbeing called on the source stream. This results in the stream ceasing to consume window, and so\nnot sending further flow control window updates to the peer. This will result in the peer\neventually stopping sending data when the available window is consumed (or nghttp2 closing the\nconnection if the peer violates the flow control limit) and so limiting the amount of data Envoy\nwill buffer for each stream. When `readDisable(false)` is called, any outstanding unconsumed data\nis immediately consumed, which results in resuming window updates to the peer and the resumption of\ndata.\n\nNote that `readDisable(true)` on a stream may be called by multiple entities. It is called when any\nfilter buffers too much, when the stream backs up and has too much data buffered, or the\nconnection has too much data buffered. Because of this, `readDisable()` maintains a count of\nthe number of times it has been called to both enable and disable the stream,  resuming reads when\neach caller has called the equivalent low watermark callback. For example, if\nthe TCP window upstream fills up and results in the network buffer backing up,\nall the streams associated with that connection will `readDisable(true)` their\ndownstream data sources. When the HTTP/2 flow control window fills up an\nindividual stream may use all of the window available and call a second\n`readDisable(true)` on its downstream data source. When the upstream TCP socket drains,\nthe connection will go below its low watermark and each stream will call\n`readDisable(false)` to resume the flow of data. The stream which had both a\nnetwork level block and a H2 flow control block will still not be fully enabled.\nOnce the upstream peer sends window updates, the stream buffer will drain and\nthe second `readDisable(false)` will be called on the downstream data source,\nwhich will finally result in data flowing from downstream again.\n\nThe two main parties involved in flow control are the router filter (`Envoy::Router::Filter`) and\nthe connection manager (`Envoy::Http::ConnectionManagerImpl`). The router is\nresponsible for intercepting watermark events for its own buffers, the individual upstream streams\n(if codec buffers fill up) and the upstream connection (if the network buffer fills up). It passes\nany events to the connection manager, which has the ability to call `readDisable()` to enable and\ndisable further data from downstream. On the reverse path, when the downstream connection\nbacks up, the connection manager collects events for the downstream streams and\nthe downstream connection. It passes events to the router filter via\n`Envoy::Http::DownstreamWatermarkCallbacks` and the router can then call `readDisable()` on the\nupstream stream. Filters opt into subscribing to `DownstreamWatermarkCallbacks` as a performance\noptimization to avoid each watermark event on a downstream HTTP/2 connection resulting in\n\"number of streams * number of filters\" callbacks. Instead, only the router\nfilter is notified and only the \"number of streams\" multiplier applies. Because\nthe router filter only subscribes to notifications when it has an upstream\nconnection, the connection manager tracks how many outstanding high watermark\nevents have occurred and passes any on to the router filter when it subscribes.\n\nIt is worth noting that the router does not unwind `readDisable(true)` calls on\ndestruction. In the case of HTTP/2 the `Envoy::Http::Http2::ConnectionImpl` will consume\nany outstanding flow control window on stream deletion to avoid leaking the connection-level\nwindow. In the case of HTTP/1, the Envoy::Http::ConnectionManagerImpl unwinds any readDisable()\ncalls downstream to ensure that pipelined requests will be read. For HTTP/1\nupstream connections, the `readDisable(true)` calls are unwound in\nClientConnectionImpl::onMessageComplete() to make sure that as connections are\nreturned to the connection pool they are ready to read.\n\n## HTTP/2 codec recv buffer\n\nGiven the HTTP/2 `Envoy::Http::Http2::ConnectionImpl::StreamImpl::pending_recv_data_` is processed immediately\nthere's no real need for buffer limits, but for consistency and to future-proof the implementation,\nit is a WatermarkBuffer. The high watermark path goes as follows:\n\n * When `pending_recv_data_` has too much data it calls\n `ConnectionImpl::StreamImpl::pendingRecvBufferHighWatermark()`.\n * `pendingRecvBufferHighWatermark` calls `readDisable(true)` on the stream.\n\nThe low watermark path is similar\n\n * When `pending_recv_data_` is drained, it calls\n `ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark`.\n * `pendingRecvBufferLowWatermarkwhich` calls `readDisable(false)` on the stream.\n\n## HTTP/1 and HTTP/2 filters\n\nEach HTTP and HTTP/2 filter has an opportunity to call `decoderBufferLimit()` or\n`encoderBufferLimit()` on creation. No filter should buffer more than the\nconfigured bytes without calling the appropriate watermark callbacks or sending\nan error response.\n\nFilters may override the default limit with calls to `setDecoderBufferLimit()`\nand `setEncoderBufferLimit()`. These limits are applied as filters are created\nso filters later in the chain can override the limits set by prior filters.\n\nMost filters do not buffer internally, but instead push back on data by\nreturning a FilterDataStatus on `encodeData()`/`decodeData()` calls.\nIf a buffer is a streaming buffer, i.e. the buffered data will resolve over\ntime, it should return `FilterDataStatus::StopIterationAndWatermark` to pause\nfurther data processing, which will cause the `ConnectionManagerImpl` to trigger\nwatermark callbacks on behalf of the filter. If a filter can not make forward progress without the\ncomplete body, it should return `FilterDataStatus::StopIterationAndBuffer`.\nIn this case if the `ConnectionManagerImpl` buffers more than the allowed data\nit will return an error downstream: a 413 on the request path, 500 or `resetStream()` on the\nresponse path.\n\n# Decoder filters\n\nFor filters which do their own internal buffering, filters buffering more than the buffer limit\nshould call `onDecoderFilterAboveWriteBufferHighWatermark` if they are streaming filters, i.e.\nfilters which can process more bytes as the underlying buffer is drained. This causes the\ndownstream stream to be readDisabled and the flow of downstream data to be\nhalted. The filter is then responsible for calling `onDecoderFilterBelowWriteBufferLowWatermark`\nwhen the buffer is drained to resume the flow of data.\n\nDecoder filters which must buffer the full response should respond with a 413 (Payload Too Large)\nwhen encountering a response body too large to buffer.\n\nThe decoder high watermark path for streaming filters is as follows:\n\n * When an instance of `Envoy::Router::StreamDecoderFilter` buffers too much data it should call\n   `StreamDecoderFilterCallback::onDecoderFilterAboveWriteBufferHighWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl::ActiveStreamDecoderFilter` receives\n `onDecoderFilterAboveWriteBufferHighWatermark()` it calls `readDisable(true)` on the downstream\n stream to pause data.\n\nAnd the low watermark path:\n\n * When the buffer of the `Envoy::Router::StreamDecoderFilter` drains should call\n   `StreamDecoderFilterCallback::onDecoderFilterBelowWriteBufferLowWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl` receives\n `onDecoderFilterAboveWriteBufferHighWatermark()` it calls `readDisable(false)` on the downstream\n stream to resume data.\n\n# Encoder filters\n\n\nEncoder filters buffering more than the buffer limit should call\n`onEncoderFilterAboveWriteBufferHighWatermark` if they are streaming filters, i.e. filters which can\nprocess more bytes as the underlying buffer is drained. The high watermark\ncall will be passed from the `Envoy::Http::ConnectionManagerImpl` to the `Envoy::Router::Filter`\nwhich will `readDisable(true)` to stop the flow of upstream data. Streaming filters which\ncall `onEncoderFilterAboveWriteBufferHighWatermark` should call\n`onEncoderFilterBelowWriteBufferLowWatermark` when the underlying buffer drains.\n\nFilters which must buffer a full request body before processing further, should respond with a\n500 (Server Error) if encountering a request body which is larger than the buffer limits.\n\nThe encoder high watermark path for streaming filters is as follows:\n\n * When an instance of `Envoy::Router::StreamEncoderFilter` buffers too much data it should call\n   `StreamEncoderFilterCallback::onEncodeFilterAboveWriteBufferHighWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl::ActiveStreamEncoderFilter` receives\n `onEncoderFilterAboveWriteBufferHighWatermark()` it calls\n `ConnectionManagerImpl::ActiveStream::callHighWatermarkCallbacks()`\n * `callHighWatermarkCallbacks()` then in turn calls\n    `DownstreamWatermarkCallbacks::onAboveWriteBufferHighWatermark()` for all\n    filters which registered to receive watermark events\n * `Envoy::Router::Filter` receives `onAboveWriteBufferHighWatermark()` and calls\n   `readDisable(true)` on the upstream request.\n\nThe encoder low watermark path for streaming filters is as follows:\n\n * When an instance of `Envoy::Router::StreamEncoderFilter` buffers drains it should call\n   `StreamEncoderFilterCallback::onEncodeFilterBelowWriteBufferLowWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl::ActiveStreamEncoderFilter` receives\n `onEncoderFilterBelowWriteBufferLowWatermark()` it calls\n `ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks()`\n * `callLowWatermarkCallbacks()` then in turn calls\n    `DownstreamWatermarkCallbacks::onBelowWriteBufferLowWatermark()` for all\n    filters which registered to receive watermark events\n * `Envoy::Router::Filter` receives `onBelowWriteBufferLowWatermark()` and calls\n   `readDisable(false)` on the upstream request.\n\n# HTTP and HTTP/2 codec upstream send buffer\n\nThe upstream send buffer `Envoy::Http::Http2::ConnectionImpl::StreamImpl::pending_send_data_` is\nH2 stream data destined for an Envoy backend. Data is added to this buffer after each filter in\nthe chain is done processing, and it backs up if there is insufficient connection or stream window\nto send the data. The high watermark path goes as follows:\n\n * When `pending_send_data_` has too much data it calls\n   `ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark()`.\n * `pendingSendBufferHighWatermark()` calls `StreamCallbackHelper::runHighWatermarkCallbacks()`\n * `runHighWatermarkCallbacks()` results in all subscribers of `Envoy::Http::StreamCallbacks`\n receiving an `onAboveWriteBufferHighWatermark()` callback.\n * When `Envoy::Router::Filter` receives `onAboveWriteBufferHighWatermark()` it\n   calls `StreamDecoderFilterCallback::onDecoderFilterAboveWriteBufferHighWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl` receives\n `onDecoderFilterAboveWriteBufferHighWatermark()` it calls `readDisable(true)` on the downstream\n stream to pause data.\n\nFor the low watermark path:\n\n * When `pending_send_data_` drains it calls\n   `ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark()`\n * `pendingSendBufferLowWatermark()` calls `StreamCallbackHelper::runLowWatermarkCallbacks()`\n * `runLowWatermarkCallbacks()` results in all subscribers of `Envoy::Http::StreamCallbacks`\n receiving a `onBelowWriteBufferLowWatermark()` callback.\n * When `Envoy::Router::Filter` receives `onBelowWriteBufferLowWatermark()` it\n   calls `StreamDecoderFilterCallback::onDecoderFilterBelowWriteBufferLowWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl` receives `onDecoderFilterBelowWriteBufferLowWatermark()`\n   it calls `readDisable(false)` on the downstream stream to resume data.\n\n# HTTP and HTTP/2 network upstream network buffer\n\nThe upstream network buffer is HTTP/2 data for all streams destined for the\nEnvoy backend. If the network buffer fills up, all streams associated with the\nunderlying TCP connection will be informed of the back-up, and the data sources\n(HTTP/2 streams or HTTP connections) feeding into those streams will be\nreadDisabled.\n\nThe high watermark path is as follows:\n\n * When `Envoy::Network::ConnectionImpl::write_buffer_` has too much data it calls\n   `Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark()`.\n * When `Envoy::Http::CodecClient` receives `onAboveWriteBufferHighWatermark()` it\n   calls `onUnderlyingConnectionAboveWriteBufferHighWatermark()` on `codec_`.\n * When `Envoy::Http::ConnectionManagerImpl` receives `onAboveWriteBufferHighWatermark()` it calls\n   `runHighWatermarkCallbacks()` for each stream of the connection.\n * `runHighWatermarkCallbacks()` results in all subscribers of `Envoy::Http::StreamCallback`\n receiving an `onAboveWriteBufferHighWatermark()` callback.\n * When `Envoy::Router::Filter` receives `onAboveWriteBufferHighWatermark()` it\n   calls `StreamDecoderFilterCallback::onDecoderFilterAboveWriteBufferHighWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl` receives\n `onDecoderFilterAboveWriteBufferHighWatermark()` it calls `readDisable(true)` on the downstream\n stream to pause data.\n\nThe low watermark path is as follows:\n\n * When `Envoy::Network::ConnectionImpl::write_buffer_` is drained it calls\n   `Network::ConnectionCallbacks::onBelowWriteBufferLowWatermark()`.\n * When `Envoy::Http::CodecClient` receives `onBelowWriteBufferLowWatermark()` it\n   calls `onUnderlyingConnectionBelowWriteBufferLowWatermark()` on `codec_`.\n * When `Envoy::Http::ConnectionManagerImpl` receives `onBelowWriteBufferLowWatermark()` it calls\n   `runLowWatermarkCallbacks()` for each stream of the connection.\n * `runLowWatermarkCallbacks()` results in all subscribers of `Envoy::Http::StreamCallback`\n receiving a `onBelowWriteBufferLowWatermark()` callback.\n * When `Envoy::Router::Filter` receives `onBelowWriteBufferLowWatermark()` it\n   calls `StreamDecoderFilterCallback::onDecoderFilterBelowWriteBufferLowWatermark()`.\n * When `Envoy::Http::ConnectionManagerImpl` receives `onDecoderFilterBelowWriteBufferLowWatermark()`\n   it calls `readDisable(false)` on the downstream stream to resume data.\n\nAs with the downstream network buffer, it is important that as new upstream\nstreams are associated with an existing upstream connection over its buffer\nlimits that the new streams are created in the correct state. To handle this,\nthe `Envoy::Http::Http2::ClientConnectionImpl` tracks the state of the\nunderlying `Network::Connection` in `underlying_connection_above_watermark_`.\nIf a new stream is created when the connection is above the high watermark the\nnew stream has `runHighWatermarkCallbacks()` called on it immediately.\n\n\n# HTTP/2 codec downstream send buffer\n\nOn filter creation, all filters have the opportunity to subscribe to downstream\nwatermark events sent by the connection manager, and the router filter takes\nadvantage of this. When a particular downstream stream gets backed up, the router filter\ngets notified and can then `readDisable()` the upstream data source. The high\nwatermark path is as follows:\n\n * When `ConnectionImpl::StreamImpl::pending_send_data_` has too much data, it calls\n   `ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark()`\n * `pendingSendBufferHighWatermark()` calls `StreamCallbackHelper::runHighWatermarkCallbacks()`\n * `runHighWatermarkCallbacks()` results in all subscribers of `Envoy::Http::StreamCallbacks`\n   receiving an `onAboveWriteBufferHighWatermark()` callback. Currently,\n   ConnectionManagerImpl::ActiveStream is the only subscriber.\n * `ConnectionManagerImpl::ActiveStream::onAboveWriteBufferHighWatermark()` calls\n   `ConnectionManagerImpl::ActiveStream::callHighWatermarkCallbacks()`\n * `callHighWatermarkCallbacks()` then in turn calls\n    `DownstreamWatermarkCallbacks::onAboveWriteBufferHighWatermark()` for all\n    filters which registered to receive watermark events\n * `Envoy::Router::Filter` receives `onAboveWriteBufferHighWatermark()` and calls\n   `readDisable(true)` on the upstream request.\n\nThe low watermark path is as follows:\n\n * When `ConnectionImpl::StreamImpl::pending_send_data_` drains, it calls\n   `ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark()`.\n * `pendingSendBufferLowWatermark()` calls `StreamCallbackHelper::runLowWatermarkCallbacks()`\n * `runLowWatermarkCallbacks()` results in all subscribers of `Envoy::Http::StreamCallbacks`\n   receiving an `onBelowWriteBufferLowWatermark()` callback.\n * `ConnectionManagerImpl::ActiveStream::onBelowWriteBufferLowWatermark()` calls\n   `ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks()`\n * `callLowWatermarkCallbacks()` then in turn calls\n    `DownstreamWatermarkCallbacks::onBelowWriteBufferLowWatermark()` for all\n    filters which registered to receive watermark events.\n * `Envoy::Router::Filter` receives `onBelowWriteBufferLowWatermark()` and calls\n   `readDisable(false)` on the upstream request.\n\n# HTTP/2 network downstream network buffer\n\nWhen a downstream network connection buffers too much data, it informs the\nconnection manager which passes the high watermark event to all of the streams\non the connection. They pass the watermark event to the router, which calls\n`readDisable()` on the upstream streams.\n\nThe high watermark path is as follows:\n\n * The downstream `Network::ConnectionImpl::write_buffer_` buffers too much\n   data. It calls\n   `Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark()`.\n * `Envoy::Http::Http2::ConnectionManagerImpl::onAboveWriteBufferHighWatermark()`\n   calls `ConnectionImpl::onUnderlyingConnectionAboveWriteBufferHighWatermark()`\n   on `codec_`.\n * When `Envoy::Http::Http2::ConnectionImpl` receives `onAboveWriteBufferHighWatermark()` it calls\n   `runHighWatermarkCallbacks()` for each stream of the connection.\n * When `ConnectionManagerImpl::ActiveStream::onAboveWriteBufferHighWatermark()` is\n   called it calls `ConnectionImpl::ActiveStream::callHighWatermarkCallbacks()`\nFrom this point on, the flow is the same as when the downstream codec buffer\ngoes over its high watermark.\n\nThe low watermark path is as follows:\n\n * The downstream `Network::ConnectionImpl::write_buffer_` drains. It calls\n   `Network::ConnectionCallbacks::onBelowWriteBufferLowWatermark()`.\n * `Envoy::Http::Http2::ConnectionManagerImpl::onBelowWriteBufferLowWatermark()`\n   calls `ConnectionImpl::onUnderlyingConnectionBelowWriteBufferLowWatermark()`\n   on `codec_`.\n * When `Envoy::Http::Http2::ConnectionImpl` receives `onBelowWriteBufferLowWatermark()` it calls\n   `runLowWatermarkCallbacks()` for each stream of the connection.\n * When `ConnectionManagerImpl::ActiveStream::onBelowWriteBufferLowWatermark()` is\n   called it calls `ConnectionImpl::ActiveStream::callLowWatermarkCallbacks()`\n\nFrom this point on, the flow is the same as when the downstream codec buffer\ngoes under its low watermark.\n\nWhen the downstream buffer is overrun each new stream should be informed of this on stream creation.\nThis is handled by the connection manager latching the state of the underlying connection in\n`ConnectionManagerImpl::underlying_connection_above_high_watermark_` and if a new stream is created\nwhile the underlying connection is above the high watermark, the new stream has watermark callbacks\ncalled on creation.\n\n### HTTP implementation details\n\nHTTP flow control is extremely similar to HTTP/2 flow control, with the main exception being that\nthe method used to halt the flow of downstream/upstream data is to disable reads on the underlying\nNetwork::Connection. As the TCP data stops being consumed the peer will eventually fill their\ncongestion window and stop sending.\n\nAs with HTTP/2, a given stream may end in a state where the connection has had\n`readDisable(true)` called. When a new stream is created on that connection\nfor a subsequent request, any outstanding `readDisable(true)` calls are unwound\nin `Http::Http1::ConnectionImpl::newStream()`.\n\nFilter and network backups are identical in the HTTP and HTTP/2 cases and are\ndocumented above. Codec backup is slightly different and is documented below.\n\n# HTTP codec downstream send buffer\n\nAs with the HTTP/2 codec recv buffer, the HTTP codec send buffer is only expected to have data pass\nthrough it. It should never back up. Still, in the event it does, the high\nwatermark path is as follows:\n\n * When `Http::Http1::ConnectionImpl::output_buffer_` has too much data buffered\n   it calls `onOutputBufferAboveHighWatermark()`\n * Http::Http1::ConnectionImpl::ServerConnectionImpl::onOutputBufferAboveHighWatermark() calls\n `runHighWatermarkCallbacks()` which results in all subscribers of `Envoy::Http::StreamCallbacks`\n   receiving an `onAboveWriteBufferHighWatermark()` callback.\nFrom this point the `ConnectionManagerImpl` takes over and the code path is the same as for the\nHTTP/2 codec downstream send buffer.\n\nThe low watermark path is as follows:\n\n * When `Http::Http1::ConnectionImpl::output_buffer_` drains\n   it calls `onOutputBufferBelowLowWatermark()`\n * Http::Http1::ConnectionImpl::ServerConnectionImpl::onOutputBufferBelowLowWatermark() calls\n `runLowWatermarkCallbacks()` which results in all subscribers of `Envoy::Http::StreamCallbacks`\n   receiving an `onBelowWriteBufferLowWatermark()` callback.\nFrom this point the `ConnectionManagerImpl` takes over and the code path is the same as for the\nHTTP/2 codec downstream send buffer.\n\n# HTTP codec upstream send buffer\n\nAs with the HTTP downstream send buffer, the HTTP codec send buffer is only expected to have data\npass through it. It should never back up. Still, in the event it does, the high\nwatermark path is as follows:\n\n * When `Http::Http1::ConnectionImpl::output_buffer_` has too much data buffered\n   it calls `onOutputBufferAboveHighWatermark()`\n * Http::Http1::ConnectionImpl::ClientConnectionImpl::onOutputBufferAboveHighWatermark() calls\n `runHighWatermarkCallbacks()` which results in all subscribers of `Envoy::Http::StreamCallbacks`\n   receiving an `onAboveWriteBufferHighWatermark()` callback.\nFrom this point on the `Envoy::Router::Filter` picks up the event and the code path is the same as\nfor the HTTP/2 codec upstream send buffer.\n\nThe low watermark path is as follows:\n\n * When `Http::Http1::ConnectionImpl::output_buffer_` drains\n   it calls `onOutputBufferBelowLowWatermark()`\n * Http::Http1::ConnectionImpl::ClientConnectionImpl::onOutputBufferBelowLowWatermark() calls\n `runLowWatermarkCallbacks()` which results in all subscribers of `Envoy::Http::StreamCallbacks`\n   receiving an `onBelowWriteBufferLowWatermark()` callback.\nFrom this point on the `Envoy::Router::Filter` picks up the event and the code path is the same as\nfor the HTTP/2 codec upstream send buffer.\n\n### HTTP3 implementation details\nHTTP3 network buffer and stream send buffer works differently from HTTP2 and HTTP. See quiche_integration.md.\n"
  },
  {
    "path": "source/docs/h2_metadata.md",
    "content": "### Overview\n\nEnvoy provides a way for users to communicate extra information associating with a stream that is\nnot carried in the standard HTTP(s) and HTTP/2 headers and payloads. The\ninformation is represented by a string key-value pair. For example, users can\npass along RTT information associated with a stream using a key of \"rtt info\", and a value of\n\"100ms\". In Envoy, we call this type of information metadata.\nA stream can be associated with multiple metadata, and the multiple metadata\nare represented by a map.\n\nNote: the metadata implementation is still in progress, and the doc is in draft\nversion.\n\n### Limitation and conditions\n\nFor ease of implementation and compatibility purposes, metadata will only be\nsupported in HTTP/2. Metadata sent in any other protocol should result in protocol\nerrors or be ignored.\n\nTo simplify the implementation, we don't allow metadata frames to carry end of\nstream flag. Because metadata frames must be associated with an existing frame, users must\nensure metadata frames to be received before the end of stream is received by the\npeer.\n\nMetadata associated with a stream can be sent before headers, after headers,\nbetween data or after data. If metadata frames have to be sent last,\nusers must put the end of stream in an empty data frame and send the empty data frame after metadata frames.\n\nEnvoy only allows up to 1M metadata to be sent per stream. If the accumulated\nmetadata size exceeds the limit, the stream will be reset.\n\n### Envoy metadata handling\n\nEnvoy provides the functionality to proxy, process and add metadata.\n\n## Proxying metadata\n\nIf not specified, all the metadata received by Envoy is proxied to the next hop\nunmodified. Note that, we do not guarantee the same frame order will be preserved from\nhop by hop. That is, metadata from upstream at the beginning of a stream can be\nreceived by the downstream at the end of the stream.\n\n## Consuming metadata\n\nIf Envoy needs to take actions when a metadata frame is received, users should\ncreate a new filter.\n\nIf Envoy needs to parse a metadata sent on a request from downstream to upstream, a\nStreamDecodeFilter should be created. The interface to override is\n\nFilterMetadataStatus StreamDecoderFilter::decodeMetadata(MetadataMapPtr&& metadata);\n\nThe metadata passed in is a map of the metadata associated with the request stream. After metadata\nhave been parsed, the filter can choose to remove metadata from the map, or keep\nit untouched.\n\nIf Envoy needs to parse a metadata sent on a response from upstream to downstream, a\nStreamEncoderFilter should be created. The interface to override is\n\nFilterMetadatasStatus StreamEncoderFilter::encodeMetadata(MetadataMap& metadata);\n\nThe metadata passed in is a map of the metadata associated with the response stream. After metadata\nhave been parsed, the filter can choose to remove metadata from the map, or keep\nit untouched.\n\nNote that, if the metadata in a request or a response is removed from the map after consuming, the metadata\nwill not be passed to the next hop. An empty map means no metadata will be sent to the next hop.\nIf the metadata is left in the map, it will be passed to the next hop.\n\n## Inserting metadata\n\nEnvoy filters can be used to add new metadata to a stream.\n\nIf users need to add new metadata for a request from downstream to upstream, a\nStreamDecoderFilter should be created. The StreamDecoderFilterCallbacks object that Envoy passes to the\nStreamDecoderFilter has an interface MetadataMapVector&\nStreamDecoderFilterCallbacks::addDecodedMetadata(). By calling the interface,\nusers get a reference to a vector of metadata map associated with the request stream. Users can\ninsert new metadata map to the metadata map vector, and Envoy will proxy the new metadata\nmap to the upstream. StreamDecoderFilterCallbacks::addDecodedMetadata() can be called in\nStreamDecoderFilter::decodeHeaders(), StreamDecoderFilter::decodeData() and\nStreamDecoderFilter::decodeTrailers(). Do not call\nStreamDecoderFilterCallbacks::addDecodedMetadata() in\nStreamDecoderFilter::decodeMetadata(MetadataMap metadata\\_map). New metadata can\nbe added directly to metadata\\_map.\n\nIf users need to add new metadata for a response to downstream, a\nStreamEncoderFilter should be created. Users pass the metadata to be added to\nStreamEncoderFilterCallbacks::addEncodedMetadata(MetadataMapPtr&&\nmetadata\\_map\\_ptr). This function can be called in\nStreamEncoderFilter::encode100ContinueHeaders(HeaderMap& headers), StreamEncoderFilter::encodeHeaders(HeaderMap& headers, bool end\\_stream),\nStreamEncoderFilter::encodeData(Buffer::Instance& data, bool end\\_stream), StreamEncoderFilter::encodeTrailers(HeaderMap& trailers).\nConsequently, the new metadata will be passed through all the encoding filters that follow the filter\nwhere the new metadata are added.\n\nIf users receive metadata from upstream, new metadata can be added directly to\nthe input argument metadata\\_map in StreamFilter::encodeMetadata(MetadataMap& metadata\\_map).\n\n### Metadata implementation\n\n## Metadata as extension HTTP/2 frames\n\nEnvoy supports metadata by utilizing nghttp2 extension frames. Envoy defines a\nnew extension frame type METADATA frame in nghttp2:\n\ntype = 0x4D\n\nThe METADATA frame uses a standard frame header, as described in the\n[HTTP/2 spec](https://httpwg.github.io/specs/rfc7540.html#FrameHeader.)\nThe payload of the METADATA frame is a block of key-value pairs encoded using the [HPACK Literal\nHeader Field Never Indexed representation](\nhttps://httpwg.org/specs/rfc7541.html#literal.header.never.indexed). Each\nkey-value pair represents one piece of metadata.\n\nThe METADATA frame defines the following flags:\n\nEND\\_METADATA (0x4).\n\nIf the flag is set, it indicates that this frame ends a metadata\npayload.\n\nThe METADATA frame payload is not subject to HTTP/2 flow control, but the size\nof the payload is bounded by the maximum frame size negotiated in SETTINGS.\nThere are no restrictions on the set of octets that may be used in keys or values.\n\nWe do not allow METADATA frame to terminate a stream. DATA, HEADERS or RST\\_STREAM must\nbe used for that purpose.\n\n## Response metadata handling\n\nWe call metadata that need to be forwarded to downstream the response metadata.\nResponse metadata can be received from upstream or generated locally.\n\nResponse metadata is generally a hop by hop message, so Envoy doesn't\nneed to hold response metadata locally to wait for some events or data. As a result,\nfilters handling response metadata don't need to stop the filter iteration and wait. Instead response\nmetadata can be forwarded through targeted filters and sequentially to the\nnext hop as soon as they are\navailable, no matter if the metadata are locally generated or received from\nupstream. The same statement is also true for metadata from downstream to upstream (request metadata). However,\nrequest metadata may need to wait for the upstream connection to be ready before going to the next hop.\nIn this section, we focus on response metadata handling.\n\nWe first explain how response metadata get consumed or proxied.\nIn function EnvoyConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter\\* filter,\nMetadataMapPtr&& metadata\\_map\\_ptr), Envoy passes response metadata received from upstream to filters by\ncalling the following filter interface:\n\nFilterMetadatasStatus StreamEncoderFilter::encodeMetadata(MetadataMapVector& metadata\\_map).\n\nFilters, by implementing the interface, can consume response metadata. After going through\nthe filter chain, function EnvoyConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter\\* filter,\nMetadataMapPtr&& metadata\\_map\\_ptr) immediately forwards the updated or remaining response metadata to the next hop by\ncalling the metadata encoding function in codec:\n\nConnectionManagerImpl::ActiveStream::response\\_encoder\\_-\\>encodeMetadata(MetadataMapVector& metadata\\_map\\_vector).\n\nIf no filter consumes the response metadata, the response metadata is proxied to\nthe downstream untouched.\n\nEnvoy can also add new response metadata through filters's encoding interfaces (See section\n[Inserting metadata](#inserting-metadata) for detailed interfaces). Filters can add new\nmetadata by calling StreamDecoderFilterCallbacks::encodeMetadata(MetadataMapPtr&& metadata\\_map\\_ptr),\nwhich triggers\nConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter\\* filter, MetadataMapPtr&& metadata\\_map)\nto go through all the encoding filters.\nOr new metadata can be added to metadata\\_map in\nStreamFilter::encodeMetadata(MetadataMap& metadata\\_map) directly.\n\n## Request metadata handling\n\nWe first explain how request metadata get consumed or proxied.\nIn function EnvoyConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamDecoderFilter\\* filter,\nMetadataMap& metadata\\_map), Envoy passes request metadata received from downstream to filters by\ncalling the following filter interface:\n\nFilterMetadatasStatus StreamDecoderFilter::decodeMetadata(MetadataMap& metadata\\_map).\n\nFilters, by implementing the interface, can consume or modify request metadata. If no filter\ntouches the metadata, it is proxied to upstream unchanged.\n\nThe last filter in the filter chain is router filter. The router filter calls\nFilter::request\\_encoder\\_-\\>encodeMetadata(const MetadataMapVector& metadata\\_map\\_vector) to pass\nthe metadata to codec, and codec encodes and forwards the metadata to the upstream. If the connection\nto the upstream has not been established when metadata is received, the metadata is temporarily stored in\nFilter::downstream\\_metadata\\_map\\_vector\\_. When the connection is ready\n(Filter::UpstreamRequest::onPoolReady()), the metadata is then passed to codec, and forwarded to\nthe upstream.\n\nEnvoy can also add new request metadata through filters's decoding interfaces (See section\n[Inserting metadata](#inserting-metadata) for detailed interfaces). Filters can add new\nmetadata to ActiveStream::request\\_metadata\\_map\\_vector\\_ by calling\nStreamDecoderFilterCallbacks::addDecodedMetadata(). After calling each filter's decoding function,\nEnvoy checks if new metadata is added to ActiveStream::request\\_metadata\\_map\\_vector\\_. If so,\nthen Envoy calls ConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamEncoderFilter\\* filter,\nMetadataMapPtr&& metadata\\_map) to go through all the filters.\n\nNote that, because metadata frames do not carry end\\_stream, if new metadata is added to a headers\nonly request, Envoy moves end\\_stream from headers to an empty data frame which is sent after the new\nmetadata. In addition, Envoy drains metadata in router filter before any other types of\nframes except headers to make sure end\\_stream is handled correctly.\n\n"
  },
  {
    "path": "source/docs/header_map.md",
    "content": "# Header map implementation overview\n\nThe Envoy header map implementation (`HeaderMapImpl`) has the following properties:\n* Headers are stored in a linked list (`HeaderList`) in the order they are added, with pseudo\n  headers kept at the front of the list.\n* O(1) direct access is possible for common headers needed during data plane processing. This is\n  provided by a table of pointers that reach directly into a linked list that is populated when\n  headers are added or removed from the map. When O(1) headers are accessed by direct method\n  (`DEFINE_INLINE_HEADER` and `CustomInlineHeaderBase`) they use direct pointer access to see\n  whether a header is present, add it, modify it, etc. When headers are added by name a trie is used to lookup the pointer in the table (`StaticLookupTable`).\n* Custom headers can be registered statically against a specific implementation (request headers,\n  request trailers, response headers, and response trailers) via core code and extensions\n  (`CustomInlineHeaderRegistry`). Each registered header increases the size of the table by the size of a single pointer.\n* Operations that search, replace, etc. for a header by name that is not one of the O(1) headers\n  will incur an O(N) search through the linked list. This is an implementation deficiency for\n  certain usage patterns that will be improved in future changes.\n\n## Implementation details\n\n* O(1) registered headers are tracked during static initialization via the `CustomInlineHeaderBase`\n  class.\n* The first time a header map is constructed (in practice this is after bootstrap load and the \n  Envoy header prefix is finalized when `getAllHeaderMapImplInfo` is called), the\n  `StaticLookupTable` is finalized for each header map type. No further changes are possible after\n  this point. The `StaticLookupTable` defines the amount of variable pointer table space that is\n  require for each header map type.\n* Each concrete header map type derives from `InlineStorage` with a variable length member at the\n  end of the definition.\n* Each concrete header map type uses a factory function and a provide constructor. The required\n  size is determined via the `inlineHeadersSize` function."
  },
  {
    "path": "source/docs/network_filter_fuzzing.md",
    "content": "# Generic network-level filter fuzzers overview\n\nNetwork filters need to be fuzzed. Filters come in two flavors, each with their own fuzzer. Read filters should be added into the [Generic ReadFilter Fuzzer](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc). Write Filters should added into the [Generic WriteFilter Fuzzer](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc). Some filters are both raed and write filters: They should be added into both fuzzers.\n\nBefore adding the new filter into the fuzzers, please make sure the filter is designed to accept untrusted inputs, or ready to be hardened to accept untrusted inputs.\n\n\n# Add a new ReadFilter into Generic Readfilter Fuzzer\n## Step1. Make sure the filter can be linked into the fuzzer\nThere are two ways to link it into the fuzzer. \n* [Recommended] In the file [extensions_build_config.bzl](https://github.com/envoyproxy/envoy/blob/master/source/extensions/extensions_build_config.bzl), the name of the filter should have a prefix `envoy.filters.network`. If it has such a prefix, the filter will be automatically linked into Generic ReadFilter Fuzzer.\n* [Not recommended]If for some reasons the filter's name doesn't have such a prefix, the config of the filter must be added into the `deps` field of `network_readfilter_fuzz_test` module in the file [BUILD](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/BUILD).\n### Step2. Add the filter name into supported_filter_names\nIn [uber_per_readfilter.cc](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc), add the filter name into the vector `supported_filter_names` in method `UberFilterFuzzer::filterNames()`.\n```\nconst std::vector<absl::string_view> supported_filter_names = {\n...\nNetworkFilterNames::get().ExtAuthorization, NetworkFilterNames::get().TheNewFilterCreatedByYou,\n...\n};\n```\n\n# Add a new WriteFilter into Generic Writefilter Fuzzer\n## Step 1. Make sure the filter can be linked into the fuzzer\nFor WriteFilter, the config of the filter must be added into the `deps` field of `network_writefilter_fuzz_test` module in the file [BUILD](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/BUILD).\n```\nenvoy_cc_fuzz_test(\n    name = \"network_writefilter_fuzz_test\",\n    srcs = [\"network_writefilter_fuzz_test.cc\"],\n    corpus = \"network_writefilter_corpus\",\n    # All Envoy network filters must be linked to the test in order for the fuzzer to pick\n    # these up via the NamedNetworkFilterConfigFactory.\n    deps = [\n        \":uber_writefilter_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_broker_config_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:config\",\n        \"//source/extensions/filters/network/mysql_proxy:config\",\n        \"//source/extensions/filters/network/zookeeper_proxy:config\",\n        \"//source/extensions/filters/network/the_new_filter_created_by_you:config\", // <---Add the filter config module here\n        \"//test/config:utility_lib\",\n    ],\n)\n```\n## Step 2. Add the filter name into supported_filter_names\nIn [uber_per_writefilter.cc](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc), add the filter name into the vector `supported_filter_names` in method `UberWriteFilterFuzzer::filterNames()`.\n```\nconst std::vector<absl::string_view> supported_filter_names = {\n      ...\n      NetworkFilterNames::get().ExtAuthorization, NetworkFilterNames::get().TheNewFilterCreatedByYou,\n      ...\n    };\n```\n\n# Add test cases into corpus\nGood test cases can provide good examples for fuzzers to find more paths in the code, increase the coverage and help find bugs more efficiently.\nEach test case is a file under the folder [network_readfilter_corpus](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_readfilter_corpus) or [network_writefilter_corpus](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_writefilter_corpus). It consists of two parts: `config` and `actions`. \n`config` is the protobuf to instantiate a filter, and `actions` are sequences of actions to take in order to test the filter. \nAn example for testing MongoProxy filter:\n```\nconfig {\n  name: \"envoy.filters.network.mongo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    value: \"\\n\\001\\\\\\032\\t\\032\\002\\020\\010\\\"\\003\\010\\200t \\001\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\120\\0\\0\\0\\1\\0\\0\\0\\1\\0\\0\\0\\324\\7\\0\\0\\4\\0\\0\\0\\164\\145\\163\\164\\56\\164\\145\\163\\164\\0\\24\\0\\0\\0\\377\\377\\377\\377\\52\\0\\0\\0\\2\\163\\164\\162\\151\\156\\147\\137\\156\\145\\145\\144\\137\\145\\163\\143\\0\\20\\0\\0\\0\\173\\42\\146\\157\\157\\42\\72\\40\\42\\142\\141\\162\\12\\42\\175\\0\\0\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 10000\n  }\n}\n```\n* `config.name` is the name of the filter. \n* `config.typed_config.type_url` is the type url of the filter config API. \n* `config.typed_config.value` is the serialized string of the config protobuf, and in C++ we can call`config.SerializeAsString()` to obtain this. This string may contain special characters. Recommend using octal or hexadecimal sequence for the string.\n* `actions.on_data.data` (or `actions.on_write.data`) is the buffer parameter `data`(in string format) for testing ReadFilter's method onData() (or for testing WriteFilter's method onWrite()). This string may contain special characters. Recommend using octal or hexadecimal sequence for the string.\n* `actions.on_data.end_stream` (or `actions.on_write.end_stream`) is the bool parameter `end_stream` for testing ReadFilter's method onData() (or for testing WriteFilter's method onWrite()).\n* `actions.on_new_connection` is an action to call `onNewConnection` method of a ReadFilter.\n* `actions.advance_time.milliseconds` is the duration in milliseconds for the simulatedSystemTime to advance by.\nFor more details, see the APIs for [ReadFilter Fuzz Testcase](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto) and  [WriteFilter Fuzz Testcase](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto).\n\n## Convert a unit test case to a fuzz test case manually\nThis section explains an approach to generate a corpus from unit tests. It is an optional step for users who want to generate the highest possible coverage.\nUnit test cases usually leads the filter into interesting states. Currently there is no automatic way to convert a unit test case into a fuzz test case. However, there is a way to convert it manually.\nWe can write a utility function like this and use it to print the `buffer` and `protobuf` as a octal sequence to avoid invisible characters:\n```\nstatic std::string toOct(const std::string& source, const std::string& info) {\n   std::stringstream ss;\n   for (unsigned char c : source) {\n     int n=c-'\\0';\n     ss<<'\\\\'<<std::oct<<n;\n   }\n   std::cout<<\"info = \"<<info<<\", string:\"<<ss.str()<<std::endl;\n   return ss.str();\n }\n```\nIn the unit test code, we temporarily add a function(finally we will remove it) like the above one.\nThen we can fill in `config.typed_config.value` with the value returned or printed by \n```toOct(config.SerializeAsString(), \"config serialized string: \")``` \nwhere `config` is the config protobuf in a unit test case.\n\nWe can also fill in `actions.on_data.data` or `actions.on_write.data` with the value returned or printed by \n```toOct(buffer.toString(), \"buffer:\")``` \nwhere `buffer` is the buffer to pass to `onData()` or `onWrite()` in a unit test case.\n\nPlease note that the two fuzzers use the \"real input\" for fuzzers. If you are using a mock decoder and pass an empty buffer to onData(), that test case won't help cover much code in the fuzzers(but the config protobuf is still helpful).\n\n## Known issues in the fuzzers\n* Both of the fuzzers use static helper classes to speed up the fuzz tests. Once there is an issue which is unreproducile by a single testcase, please check the mock objects which are modified by the filters and have states inside (some of them have a vector or a map object inside). Please make sure they are correctly reset or cleared in `UberFilterFuzzer::reset()` or `UberWriteFilterFuzzer::reset()`.\n* The fuzzers don't support system calls like file IO or network IO. Please avoid triggering the related function calls by disabling the test case in `UberFilterFuzzer::checkInvalidInputForFuzzer`, or add some mock objects depending on your functionality.\n"
  },
  {
    "path": "source/docs/quiche_integration.md",
    "content": "### Overview\n\nQUICHE is integrated in the way described below:\n\nA combination of QUIC session and connection serves as a Network::Connection instance. More than that, QUIC session manages all the QUIC streams. The QUIC codec is a very thin layer between QUIC session and HCM. It doesn't do de-multiplexing or stream management, but only provides interfaces for the HCM to communicate with the QUIC session.\n\nQUIC's Http::StreamEncoder and Http::StreamDecoder implementation is decoupled. The encoder is implemented by EnvoyQuicStream which is a QUIC stream and owned by session. The HCM owns the decoder which can be accessed by QUIC stream instances. And the decoder also knows about stream encoder.\n\n### Request pipeline\n\nThe QUIC stream calls decodeHeaders() to deliver request headers. The request body needs to be delivered after headers are delivered as some QUICHE implementations allow the body to arrive earlier than headers. If not read disabled, it always deliver available data via decodeData(). The stream doesn't buffer any readable data in QUICHE stream buffers.\n\n### Response pipeline\n\nThe HCM will call encoder's encodeHeaders() to write response headers, and then encodeData() and encodeTrailers(). encodeData() calls WriteBodySlices() to write out response body. The quic stream in QUICHE configures its send buffer threshold QuicStream::buffered_data_threshold_ to be high enough to take all the data passed in, so stream functionally has unlimited buffer.\n\n### Flow Control\n\n#### Receive buffer\n\nAll arrived out-of-order data is buffered in QUICHE stream. This buffer is capped by max stream flow control window in QUICHE which is 16MB. Once bytes are put in sequence and ready to be used, OnBodyDataAvailable() is called. The stream implementation overrides this call and calls StreamDecoder::decodeData() in it. Request and response body are buffered in each L7 filter if desired, and the stream itself doesn't buffer any of them unless set as read blocked.\n\nWhen upstream or any L7 filter reaches its buffer limit, it will call Http::Stream::readDisable() with false to set QUIC stream to be read blocked. In this state, even if more request/response body is available to be delivered, OnBodyDataAvailable() will not be called. As a result, downstream flow control will not shift as no data will be consumed. As both filters and upstream buffers can call readDisable(), each stream has a counter of how many times the HCM blocks the stream. When the counter is cleared, the stream will set its state to unblocked and thus deliver any new and existing available data buffered in the QUICHE stream.\n\n#### Send buffer\n\nWe use the unlimited stream send buffer in QUICHE along with a book keeping data structure `EnvoyQuicSimulatedWatermarkBuffer` to serve the function of WatermarkBuffer in Envoy to prevent buffering too much in QUICHE.\n\nWhen the bytes buffered in a stream's send buffer exceeds its high watermark, its inherited method StreamCallbackHelper::runHighWatermarkCallbacks() is called. The buffered bytes will go below stream's low watermark as the stream writes out data gradually via QuicStream::OnCanWrite(). In this case StreamCallbackHelper::runLowWatermarkCallbacks() will be called. QUICHE buffers all the data upon QuicSpdyStream::WriteBodySlices(), assuming `buffered_data_threshold_` is set high enough, and then writes all or part of them according to flow control window and congestion control window. To prevent transient changes in buffered bytes from triggering these two watermark callbacks one immediately after the other, encodeData() and OnCanWrite() only update the watermark bookkeeping once at the end if buffered bytes are changed.\n\nQUICHE doesn't buffer data at the local connection layer. All the data is buffered in the respective streams.To prevent the case where all streams collectively buffers a lot of data, there is also a simulated watermark buffer for each QUIC connection which is updated upon each stream write.\n\nWhen the aggregated buffered bytes goes above high watermark, its registered network callbacks will call Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark(). The HCM will notify each stream via QUIC codec Http::Connection::onUnderlyingConnectionAboveWriteBufferHighWatermark() which will call each stream's StreamCallbackHelper::runHighWatermarkCallbacks(). There might be a way to simply the call stack as Quic connection already knows about all the stream, there is no need to call to HCM and notify each stream via codec. But here we just follow the same logic as HTTP2 codec does. In the same way, any QuicStream::OnCanWrite() may change the aggregated buffered bytes in the connection level bookkeeping as well. If the buffered bytes goes down below the low watermark, the same calls will be triggered to propagate onBelowWriteBufferLowWatermark() to each stream.\n\nAs to Http::StreamEncoder::encodeHeaders()/encodeTrailers(), the accounting is done differently between Google QUIC and IETF QUIC:\n * In Google QUIC, encodeHeaders()/encodeTrailers() check the buffer size increase on header stream before and after writing headers/trailers. In QuicSession::OnCanWrite(), may drain header stream send buffer, so there we also check send buffer size decrease on header stream.\n * In IETF QUIC, encodeHeaders()/encodeTrailers() check the buffer size increase on the corresponding data stream which is similar to encodeData(). The buffered headers/trailers are only drained via QuicStream::OnCanWrite() so there is no need to check QuicSession::OnCanWrite.\n"
  },
  {
    "path": "source/docs/repokitteh.md",
    "content": "# RepoKitteh\n\n## What is RepoKitteh?\n\n<img src=\"https://repokitteh.io/logo.svg\" height=\"100\" align=\"right\">\n\n[RepoKitteh](https://repokitteh.io) is a [GitHub application](https://developer.github.com/apps/) that provides an easy way to create, integrate and maintain GitHub bots. It is deployed in GCP and supplied to Envoy under a contract with the CNCF.\nThe application is installed on specific GitHub repositories and interacts with these by receiving webhooks and making GitHub API calls. A root `repokitteh.star` script tells the application what to do based on the webhook received.\n\n## Integration with Envoy\nThe file [repokitteh.star](https://github.com/envoyproxy/envoy/blob/master/repokitteh.star), which resides in the root of the Envoy repository tells RepoKitteh what functionality to use. The file is written in the [Starlark language](https://github.com/bazelbuild/starlark/), which is a Python dialect with well defined threading and hermeticity guarantees.\n\nFor example, the statement\n```\nuse(\"github.com/repokitteh/modules/assign.star\")\n```\ntells RepoKitteh to use the [assign.star](https://github.com/repokitteh/modules/blob/master/assign.star) module.\nSimilar modules can be integrated in the future into Envoy in the same way.\n\n## Current Functionality\n### [Assign](https://github.com/repokitteh/modules/blob/master/assign.star)\nSet assignees to issues or pull requests.\n\nExamples:\n```\n/assign @someone\n```\nAdds `@someone` as an assignee to the issue or pull request that this comment is made on.\n\n```\n/unassign @someone\n```\nRemoves `@someone` as an assignee.\n\nOnly organization members can assign or unassign other users, who must be organization members as well.\n\n[Demo PR](https://github.com/envoyproxy/envoybot/pull/6)\n\n### [Review](https://github.com/repokitteh/modules/blob/master/review.star)\nRequests a user to review a pull request.\n\nExamples:\n```\n/review @someone\n```\nAsks `@someone` to review the pull requests that this comment is made on.\n\n```\n/unreview @someone\n```\nRemoves `@someone` from the reviewers list.\n\nOnly organization members can request a review from other users or cancel it, who must be organization members as well.\n\n[Demo PR](https://github.com/envoyproxy/envoybot/pull/7)\n\n### [Wait](https://github.com/repokitteh/modules/blob/master/wait.star)\nWait for activity on an issue or a PR.\n\nExample:\n```\n/wait\n```\nSets the label `waiting` on a PR. When a new commit is pushed the label will be removed.\n\n[Demo PR](https://github.com/envoyproxy/envoybot/pull/14)\n\nIt is also possible to wait until any comment is submitted and/or a new commit is pushed.\n\nExample:\n```\n/wait-any\n```\nSets the label `waiting:any` on a PR. When a new commit is pushed or any comment is submitted the label will be removed.\n\n[Demo PR](https://github.com/envoyproxy/envoybot/pull/15)\n\n### [CircleCI Retest](https://github.com/repokitteh/modules/blob/master/circleci.star)\nRestart failed CircleCI tests.\n\nExample:\n```\n/retest-circle\n```\nRestarts all failed CircleCI tests, as reported in the commit statuses.\n\n[Demo PR](https://github.com/envoyproxy/envoy/pull/12613#issuecomment-676141200)\n\n### [Azure Pipelines Retest](https://github.com/envoyproxy/envoy/blob/master/ci/repokitteh/modules/azure_pipelines.star)\nRestart failed Azure pipelines.\n\nExample:\n```\n/retest\n```\nRestarts all failed Azure pipelines.\n\n[Demo PR](https://github.com/envoyproxy/envoy/pull/12860#issuecomment-684832313)\n\n### [Granular Ownerscheck](https://github.com/repokitteh/modules/blob/master/ownerscheck.star)\n\nTwo types of approvals:\n1. Global approvals, done by approving the PR using Github's review approval feature.\n2. Partial approval, done by commenting \"/lgtm [label]\" where label is the label\n   associated with the path. This does not affect GitHub's PR approve status, only\n   this module's maintained commit status. This approval is automatically revoked\n   if any further changes are done to the relevant files in this spec.\n\n#### Backport labeling\n\n```\n/backport\n```\n\nwill labels the PR commented on with `backport/review`.\n"
  },
  {
    "path": "source/docs/stats.md",
    "content": "# Envoy Stats System\n\nEnvoy statistics track numeric metrics on an Envoy instance, optionally spanning\nbinary program restarts. The metrics are tracked as:\n\n * Counters: strictly increasing 64-bit integers.\n * Gauges: 64-bit integers that can rise and fall.\n * Histograms: mapping ranges of values to frequency. The ranges are auto-adjusted as\n   data accumulates. Unlike counters and gauges, histogram data is not retained across\n   binary program restarts.\n * TextReadouts: Unicode strings. Unlike counters and gauges, text readout data\n   is not retained across binary program restarts.\n\nIn order to support restarting the Envoy binary program without losing counter and gauge\nvalues, they are passed from parent to child in an RPC protocol.\nThey were previously held in shared memory, which imposed various restrictions.\nUnlike the shared memory implementation, the RPC passing *requires a mode-bit specified\nwhen constructing gauges indicating whether it should be accumulated across hot-restarts*.\n\n## Performance and Thread Local Storage\n\nA key tenant of the Envoy architecture is high performance on machines with\nlarge numbers of cores. See\nhttps://blog.envoyproxy.io/envoy-threading-model-a8d44b922310 for details. This\nrequires lock-free access to stats on the fast path -- when proxying requests.\n\nFor stats, this is implemented in\n[ThreadLocalStore](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/thread_local_store.h), supporting the following features:\n\n * Thread local per scope stat caching.\n * Overlapping scopes with proper reference counting (2 scopes with the same name will point to\n   the same backing stats).\n * Scope deletion.\n * Lockless in the fast path.\n\nThis implementation is complicated so here is a rough overview of the threading model.\n\n * The store can be used before threading is initialized. This is needed during server init.\n * Scopes can be created from any thread, though in practice they are only created from the main\n   thread.\n * Scopes can be deleted from any thread, and they are in practice as scopes are likely to be\n   shared across all worker threads.\n * Per thread caches are checked, and if empty, they are populated from the central cache.\n * Scopes are entirely owned by the caller. The store only keeps weak pointers.\n * When a scope is destroyed, a cache flush operation is posted on all threads to flush any\n   cached data owned by the destroyed scope.\n * Scopes use a unique incrementing ID for the cache key. This ensures that if a new scope is\n   created at the same address as a recently deleted scope, cache references will not accidentally\n   reference the old scope which may be about to be cache flushed.\n * Since it's possible to have overlapping scopes, we de-dup stats when counters() or gauges() is\n   called since these are very uncommon operations.\n * Overlapping scopes will not share the same backing store. This is to keep things simple,\n   it could be done in the future if needed.\n\n### Histogram threading model\n\nEach Histogram implementation will have 2 parts.\n\n * *main* thread parent which is called `ParentHistogram`.\n * *per-thread* collector which is called `ThreadLocalHistogram`.\n\nWorker threads will write to ParentHistogram which checks whether a TLS\nhistogram is available. If there is one it will write to it, otherwise creates\nnew one and writes to it. During the flush process the following sequence is\nfollowed.\n\n * The main thread starts the flush process by posting a message to every worker which tells the\n   worker to swap its *active* histogram with its *backup* histogram. This is achieved via a call\n   to the `beginMerge` method.\n * Each TLS histogram has 2 histograms it makes use of, swapping back and forth. It manages a\n   current_active index via which it writes to the correct histogram.\n * When all workers have done, the main thread continues with the flush process where the\n   *actual* merging happens.\n * As the active histograms are swapped in TLS histograms, on the main thread, we can be sure\n   that no worker is writing into the *backup* histogram.\n * The main thread now goes through all histograms, collect them across each worker and\n   accumulates in to *interval* histograms.\n * Finally the main *interval* histogram is merged to *cumulative* histogram.\n\n`ParentHistogram`s are held weakly a set in ThreadLocalStore. Like other stats,\nthey keep an embedded reference count and are removed from the set and destroyed\nwhen the last strong reference disappears. Consequently, we must hold a lock for\nthe set when decrementing histogram reference counts. A similar process occurs for\nother types of stats, but in those cases it is taken care of in `AllocatorImpl`.\nThere are strong references to `ParentHistograms` in TlsCacheEntry::parent_histograms_.\n\nThread-local `TlsHistogram`s are created on behalf of a `ParentHistogram`\nwhenever accessed from a worker thread. They are strongly referenced in the\n`ParentHistogram` as well as in a cache in the `ThreadLocalStore`, to help\nmaintain data continuity as scopes are re-created during operation.\n\n## Stat naming infrastructure and memory consumption\n\nStat names are replicated in several places in various forms.\n\n * Held with the stat values, in `CounterImpl`, `GaugeImpl` and `TextReadoutImpl`, which are defined in\n   [allocator_impl.cc](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/allocator_impl.cc)\n * In [MetricImpl](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/metric_impl.h)\n   in a transformed state, with tags extracted into vectors of name/value strings.\n * In static strings across the codebase where stats are referenced\n * In a [set of\n   regexes](https://github.com/envoyproxy/envoy/blob/master/source/common/config/well_known_names.cc)\n   used to perform tag extraction.\n\nThere are stat maps in `ThreadLocalStore` for capturing all stats in a scope,\nand each per-thread caches. However, they don't duplicate the stat names.\nInstead, they reference the `StatName` held in the `CounterImpl` or `GaugeImpl`, and thus\nare relatively cheap; effectively those maps are all pointer-to-pointer.\n\nFor this to be safe, cache lookups from locally scoped strings must use `.find`\nrather than `operator[]`, as the latter would insert a pointer to a temporary as\nthe key. If the `.find` fails, the actual stat must be constructed first, and\nthen inserted into the map using its key storage. This strategy saves\nduplication of the keys, but costs an extra map lookup on each miss.\n\n### Naming Representation\n\nWhen stored as flat strings, stat names can dominate Envoy memory usage when\nthere are a large number of clusters. Stat names typically combine a small\nnumber of keywords, cluster names, host names, and response codes, separated by\n`.`. For example `CLUSTER.upstream_cx_connect_attempts_exceeded`. There may be\nthousands of clusters, and roughly 100 stats per cluster. Thus, the number\nof combinations can be large. It is significantly more efficient to symbolize\neach `.`-delimited token and represent stats as arrays of symbols.\n\nThe transformation between flattened string and symbolized form is CPU-intensive\nat scale. It requires parsing, encoding, and lookups in a shared map, which must\nbe mutex-protected. To avoid adding latency and CPU overhead while serving\nrequests, the tokens can be symbolized and saved in context classes, such as\n[Http::CodeStatsImpl](https://github.com/envoyproxy/envoy/blob/master/source/common/http/codes.h).\nSymbolization can occur on startup or when new hosts or clusters are configured\ndynamically. Users of stats that are allocated dynamically per cluster, host,\netc, must explicitly store partial stat-names their class instances, which later\ncan be composed dynamically at runtime in order to fully elaborate counters,\ngauges, etc, without taking symbol-table locks, via `SymbolTable::join()`.\n\n### `StatNamePool` and `StatNameSet`\n\nThese two helper classes evolved to make it easy to deploy the symbol table API\nacross the codebase.\n\n`StatNamePool` provides pooled allocation for any number of\n`StatName` objects, and is intended to be held in a data structure alongside the\n`const StatName` member variables. Most names should be established during\nprocess initializion or in response to xDS updates.\n\n`StatNameSet` provides some associative lookups at runtime. The associations\nshould be created before the set is used for requests, via\n`StatNameSet::rememberBuiltin`. This is useful in scenarios where stat-names are\nderived from data in a request, but there are limited set of known tokens, such\nas SSL ciphers or Redis commands.\n\n### Dynamic stat tokens\n\nWhile stats are usually composed of tokens that are known at compile-time, there\nare scenarios where the names are newly discovered from data in requests. To\navoid taking locks in this case, tokens can be formed dynamically using\n`StatNameDynamicStorage` or `StatNameDynamicPool`. In this case we lose\nsubstring sharing but we avoid taking locks. Dynamically generated tokens can\nbe combined with symbolized tokens from `StatNameSet` or `StatNamePool` using\n`SymbolTable::join()`.\n\nRelative to using symbolized tokens, The cost of using dynamic tokens is:\n\n * the StatName must be allocated and populated from the string data every time\n   `StatNameDynamicPool::add()` is called or `StatNameDynamicStorage` is constructed.\n * the resulting `StatName`s are as long as the string, rather than benefiting from\n   a symbolized representation, which is typically 4 bytes or less per token.\n\nHowever, the cost of using dynamic tokens is on par with the cost of not using\na StatName system at all, only adding one re-encoding. And it is hard to quantify\nthe benefit of avoiding mutex contention when there are large numbers of threads.\n\n### Symbol Table Memory Layout\n\nBelow is a diagram\n[(source)](https://docs.google.com/drawings/d/1eG6CHSUFQ5zkk-j-kcFCUay2-D_ktF39Tbzql5ypUDc/edit)\nshowing the memory layout for a few scenarios of constructing and joining symbolized\n`StatName` and dynamic `StatName`.\n\n![Symbol Table Memory Diagram](symtab.png)\n\n### Symbol Contention Risk\n\nThere are several ways to create hot-path contention looking up stats by name,\nand there is no bulletproof way to prevent it from occurring.\n * The [stats macros](https://github.com/envoyproxy/envoy/blob/master/include/envoy/stats/stats_macros.h) may be used in a data structure which is constructed in response to requests.\n * An explicit symbol-table lookup, via `StatNamePool` or `StatNameSet` can be\n   made in the hot path.\n\nIt is difficult to search for those scenarios in the source code or prevent them\nwith a format-check, but we can determine whether symbol-table lookups are\noccurring during via an admin endpoint that shows 20 recent lookups by name, at\n`ENVOY_HOST:ADMIN_PORT/stats?recentlookups`.\n\nAs of October 6, 2020, the \"fake\" symbol table implementation has been removed\nfrom the system, and the \"--use-fake-symbol-table\" option is now a no-op,\ntriggering a warning if set to \"1\". The option will be removed in a later\nrelease.\n\n### Symbol Table Class Overview\n\nClass | Superclass | Description\n-----| ---------- | ---------\nSymbolTable | | Abstract class providing an interface for symbol tables\nSymbolTableImpl | SymbolTable | Implementation of SymbolTable API where StatName share symbols held in a table\nSymbolTableImpl::Encoding | | Helper class for incrementally encoding strings into symbols\nStatName | | Provides an API and a view into a StatName (dynamic orsymbolized). Like absl::string_view, the backing store must be separately maintained.\nStatNameStorageBase | | Holds storage (an array of bytes) for a dynamic or symbolized StatName\nStatNameStorage  | StatNameStorageBase | Holds storage for a symbolized StatName. Must be explicitly freed (not just destructed).\nStatNameManagedStorage | StatNameStorage | Like StatNameStorage, but is 8 bytes larger, and can be destructed without free(). \nStatNameDynamicStorage | StatNameStorageBase | Holds StatName storage for a dynamic (not symbolized) StatName.\nStatNamePool | | Holds backing store for any number of symbolized StatNames.\nStatNameDynamicPool | | Holds backing store for any number of dynamic StatNames.\nStatNameList | | Provides packed backing store for an ordered collection of StatNames, that are only accessed sequentially. Used for MetricImpl.\nStatNameStorageSet | | Implements a set of StatName with lookup via StatName. Used for rejected stats.\nStatNameSet | | Implements a set of StatName with lookup via string_view. Used to remember well-known names during startup, e.g. Redis commands.\n\n### Hot Restart\n\nContinuity of stat counters and gauges over hot-restart is supported. This occurs via\na sequence of RPCs from parent to child, issued while child is in lame-duck. These\nRPCs contain a map of stat-name strings to values.\n\nOne implementation complexity is that when decoding these names in the child, we\nmust know which segments of the stat names were encoded dynamically. This is\nimplemented by sending an auxiliary map of stat-name strings to lists of spans,\nwhere the spans identify dynamic segments.\n\nDynamic segments are rare, used only by Dynamo, Mongo, IP Tagging Filter, Fault\nFilter, and `x-envoy-upstream-alt-stat-name` as of this writing. So in most\ncases this dynamic-segment map is empty.\n\n## Tags and Tag Extraction\n\nTBD\n\n## Disabling statistics by substring or regex\n\nTBD\n\n## Stats Memory Tests\n\nRegardless of the underlying data structures used to implement statistics,\nmemory usage will grow with the number of hosts and clusters. When a PR is\nissued that adds new per-host or per-cluster stats, this will have a\nmultiplicative effect on consumed memory. This can become significant for\ndeployments with O(10k) clusters or hosts.\n\nTo improve visibility for this memory growth, there are [memory-usage\nintegration\ntests](https://github.com/envoyproxy/envoy/blob/master/test/integration/stats_integration_test.cc).\n\nIf a PR fails the tests in that file due to unexpected memory consumption, it\ngives the author and reviewer an opportunity to consider the cost/value of the\nnew stats. If the test fails because the new byte-count is lower, then all\nthat's needed is to lock in the improvement by updating the expected values. If\nthe new per-cluster or per-host memory consumption is higher, then we must\ndecide whether the value from the added stats justify the overhead for all Envoy\ndeployments. In either case, we must update the golden values and add a comment\nto the table in the test indicating the memory impact of each PR.\n\nDevelopers trying to can iterate through changes in these tests locally with:\n\n```bash\n  bazel test -c opt --test_env=ENVOY_MEMORY_TEST_EXACT=true \\\n      test/integration:stats_integration_test\n```\n\n"
  },
  {
    "path": "source/docs/subset_load_balancer.md",
    "content": "### Overview\n\nThe subset load balancer (SLB) divides the upstream hosts in a cluster into one or more subsets. At\nrequest time the SLB uses information from the `LoadBalancerContext` to choose one of its\nsubsets. Choosing a host is then delegated to the subset's load balancer. If no subset matches the\ncontext, the SLB falls back (depending on configuration) to balancing over a default subset,\nbalancing over any upstream host in the cluster, or returning no host.\n\nLoad balancing within a subset is accomplished by constructing one of the existing load balancer\ntypes with `Upstream::HostSet` that presents a filtered copy of the upstream hosts. All load\nbalancer types except the Original DST load balancer may be used for subset load balancing.\n\n### Fallback\n\nThe SLB can be configured with one of three fallback policies. If no subset matching the\n`LoadBalancerContext` is found:\n\n1. `NO_FALLBACK` specifies that `chooseHost` returns `nullptr` and load balancing fails.\n2. `ANY_ENDPOINT` specifies that load balancing occurs over the entire set of upstream hosts.\n3. `DEFAULT_SUBSET` specifies that load balancing occurs over a specific subset of upstream\n   hosts. If the default subset is empty, `chooseHost` returns `nullptr` and load balancing fails.\n\nDuring construction, if the fallback policy is `ANY_ENDPOINT`, a default subset is constructed\nusing the original `Upstream::HostSet`. If the fallback policy is `DEFAULT_SUBSET`, but the\nconfiguration does not specify any metadata (e.g. all hosts match), the SLB changes the fallback\npolicy to `ANY_ENDPOINT`.\n\n### Selecting Subsets\n\nThe initial implementation supports selecting subsets by endpoint metadata provided via EDS.\n\nThe configuration specifies a list of subset selectors. Each selector is used, in turn, to create\nsubsets of hosts. The selectors exist to limit the combinations of endpoint metadata used for\ncreating subsets. We precompute the subsets outside the load balancing path to avoid locking.\n\nCurrently the only mechanism for specifying a selector is to provide a list of metadata keys:\n\n``` json\n{\n  \"subset_selectors\": [\n    { \"keys\": [ \"a\", \"b\" ] },\n    { \"keys\": [ \"x\" ] }\n  ]\n}\n```\n\nFor each selector, the SLB iterates over the hosts and inspects the host's metadata for the\n`\"envoy.lb\"` filter. If a host's metadata provides values for each key, a subset is created for the\nmetadata. For example, given the selectors above, if a host's metadata contains `{a=1, b=2}`, a\nsubset is created for `{a=1, b=2}`. Other hosts with `{a=1, b=2}` are also included in the subset.\nA host with metadata like `{a=1, b=2, x=3}` is included in two subsets (`{a=1, b=2}` and\n{`x=3`}). The same keys may appear in multiple selector entries: it is feasible to have both an\n`{a=1, b=2}` subset and an `{a=1}` subset.\n\nOn update, the SLB divides the hosts added into the appropriate subset(s) and triggers update\nevents on the filtered host sets. The SLB also manages the optional \"local HostSet\" used for\nzone-aware routing.\n\nThe CDS configuration for the subset selectors is meant to allow future extension. For example:\n\n1. Selecting endpoint metadata keys by a prefix or other string matching algorithm, or\n2. Using a list-typed metadata value to allow a single endpoint to have multiple values for a\n   metadata key.\n\nSubsets are stored in a trie-like fashion. Keys in the selectors are lexically sorted. An\n`LbSubsetMap` is an `unordered_map` of string keys to `ValueSubsetMap`. `ValueSubsetMap` is an\n`unordered_map` of (wrapped, see below) `ProtobufWkt::Value` to `LbSubsetEntry`. The\n`LbSubsetEntry` may contain an `LbSubsetMap` of additional keys or a `Subset`. `Subset`\nencapsulates the filtered `Upstream::HostSet` and `Upstream::LoadBalancer` for a subset.\n\n`ProtobufWkt::Value` is wrapped to provide a cached hash value for the value. Currently,\n`ProtobufWkt::Value` is hashed by first encoding the value as a string and then hashing the\nstring. By wrapping it, we can compute the hash value outside the request path for both the\nmetadata values provided in `LoadBalancerContext` and those used internally by the SLB.\n\n### Subset Lookup\n\nCurrently we require the metadata provided in `LoadBalancerContext` to match a subset exactly in\norder to select the subset for load balancing. Changing this behavior has implications for the\nperformance of the subset selection algorithm. The current algorithm, described below, runs in\n`O(N)` time with respect to the number of metadata key-value pairs in the `LoadBalancerContext`.\n\nThe metadata key-value pairs from `LoadBalancerContext` must be sorted by key for the algorithm to\nwork. Currently we expect lexical order, but the sort order doesn't matter as long as both the\ncontext and load balancer use the same ordering. Sorting of the `LoadBalancerContext` keys is\ncurrently handled by `Router::RouteEntryImplBase`.\n\nGiven a sequence of N metadata keys and values (previously sorted lexically by key) from\n`LoadBalancerContext`, we can look up the appropriate subset in `O(N)` time as follows. It may be\nhelpful to look at the [diagram](#diagram) provided in the example.\n\n1. Initialize `subsets` to refer to the root `LbSubsetMap` and `entry` to point at a null\n   `LbSubsetEntryPtr`.\n2. For each key-value in the metadata:\n   1. Lookup the key in `subsets` to find a `ValueSubsetMap`. (Average constant time.) If not\n      found, exit the loop.\n   2. Lookup the value in the `ValueSubsetMap` to find an `LbSubsetEntry`. (Average constant time.)\n      If not found, exit the loop.\n   3. Assign the `LbSubsetEntry`'s `LbSubsetMap` to `subsets`. (It may be empty.)\n   4. If this is the last key-value pair, assign the `LbSubsetEntry` to `entry`.\n3. If `entry` has been set and has a `Subset` value, we found a matching subset, delegate balancing\n   to the subset's load balancer.\n4. Otherwise, execute the fallback policy.\n\nN.B. `O(N)` complexity presumes that the delegate load balancer executes in constant time.\n\n### Example\n\nAssume a set of hosts from EDS with the following metadata, assigned to a single cluster.\n\nEndpoint | stage | version | type   | xlarge\n---------|-------|---------|--------|-------\ne1       | prod  | 1.0     | std    | true\ne2       | prod  | 1.0     | std    |\ne3       | prod  | 1.1     | std    |\ne4       | prod  | 1.1     | std    |\ne5       | prod  | 1.0     | bigmem |\ne6       | prod  | 1.1     | bigmem |\ne7       | dev   | 1.2-pre | std    |\n\nNote: Only e1 has the \"xlarge\" metadata key.\n\nGiven this CDS `envoy::config::cluster::v3::Cluster`:\n\n``` json\n{\n  \"name\": \"c1\",\n  \"lb_policy\": \"ROUND_ROBIN\",\n  \"lb_subset_config\": {\n    \"fallback_policy\": \"DEFAULT_SUBSET\",\n    \"default_subset\": {\n      \"stage\": \"prod\",\n      \"version\": \"1.0\",\n      \"type\": \"std\"\n    },\n    \"subset_selectors\": [\n      { \"keys\": [ \"stage\", \"type\" ] },\n      { \"keys\": [ \"stage\", \"version\" ] },\n      { \"keys\": [ \"version\" ] },\n      { \"keys\": [ \"xlarge\", \"version\" ] },\n    ]\n  }\n}\n```\n\nThe following subsets are created:\n\n`stage=prod, type=std` (e1, e2, e3, e4)\n`stage=prod, type=bigmem` (e5, e6)\n`stage=dev, type=std` (e7)\n`stage=prod, version=1.0` (e1, e2, e5)\n`stage=prod, version=1.1` (e3, e4, e6)\n`stage=dev, version=1.2-pre` (e7)\n`version=1.0` (e1, e2, e5)\n`version=1.1` (e3, e4, e6)\n`version=1.2-pre` (e7)\n`version=1.0, xlarge=true` (e1)\n\nIn addition, a default subset is created:\n\n`stage=prod, type=std, version=1.0` (e1, e2)\n\nAfter loading this configuration, the SLB's `LbSubsetMap` looks like this:\n\n<a name=\"diagram\"></a>\n![LbSubsetMap Diagram](subset_load_balancer_diagram.svg)\n\nGiven these `envoy::config::route::v3::Route` entries:\n\n``` json\n\"routes\": [\n  {\n    \"match\": {\n      \"prefix\": \"/\",\n      \"headers\": [\n        {\n          \"name\": \"x-custom-version\",\n          \"value\": \"pre-release\"\n        }\n      ]\n    },\n    \"route\": {\n      \"cluster\": \"c1\",\n      \"metadata_match\": {\n        \"filter_metadata\": {\n          \"envoy.lb\": {\n            \"version\": \"1.2-pre\",\n            \"stage\": \"dev\"\n          }\n        }\n      }\n    }\n  },\n  {\n    \"match\": {\n      \"prefix\": \"/\",\n      \"headers\": [\n        {\n          \"name\": \"x-hardware-test\",\n          \"value\": \"memory\"\n        }\n      ]\n    },\n    \"route\": {\n      \"cluster\": \"c1\",\n      \"metadata_match\": {\n        \"filter_metadata\": {\n          \"envoy.lb\": {\n            \"type\": \"bigmem\",\n            \"stage\": \"prod\"\n          }\n        }\n      }\n    }\n  },\n  {\n    \"match\": {\n      \"prefix\": \"/\"\n    },\n    \"route\": {\n      \"weighted_clusters\": {\n        \"clusters\": [\n          {\n            \"name\": \"c1\",\n            \"weight\": 90,\n            \"metadata_match\": {\n              \"filter_metadata\": {\n                \"envoy.lb\": {\n                  \"version\": \"1.0\"\n                }\n              }\n            }\n          },\n          {\n            \"name\": \"c1\",\n            \"weight\": 10,\n            \"metadata_match\": {\n              \"filter_metadata\": {\n                \"envoy.lb\": {\n                  \"version\": \"1.1\"\n                }\n              }\n            }\n          }\n        ]\n      },\n      \"metadata_match\": {\n        \"filter_metadata\": {\n          \"envoy.lb\": {\n            \"stage\": \"prod\",\n          }\n        }\n      }\n    }\n  }\n]\n```\n\nThe following headers may then be used to select subsets:\n\n`x-custom-version: pre-release` causes requests to be routed e7. This is an example of routing\nrequests to a developer launched instance for pre-release testing. If the e7 upstream leaves the\ncluster, the subset is removed and further requests with this header are routed to the default\nsubset (containing e1 and e2).\n\n`x-hardware-test: memory` causes requests to be load balanced over the e5 and e6 endpoints. This is\nan example of routing requests to upstreams running on a particular class of hardware, perhaps for\nload testing. If the bigmem hosts are removed from service, further requests with this header are\nrouted to the default subset.\n\nOtherwise, requests without those headers are split between two subsets. 90% of the requests are\nrouted to `stage=prod, version=1.0` (e1, e2, e5). 10% of the requests are routed to `stage=prod,\nversion=1.1` (e3, e4, e6). This is an example of gradually shifting traffic to a new version.\n"
  },
  {
    "path": "source/exe/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_binary\",\n    \"envoy_cc_library\",\n    \"envoy_cc_platform_dep\",\n    \"envoy_cc_posix_library\",\n    \"envoy_cc_win32_library\",\n    \"envoy_package\",\n)\nload(\"//source/extensions:all_extensions.bzl\", \"envoy_all_core_extensions\", \"envoy_all_extensions\")\nload(\"//bazel:repositories.bzl\", \"PPC_SKIP_TARGETS\", \"WINDOWS_SKIP_TARGETS\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nalias(\n    name = \"envoy\",\n    actual = \":envoy-static\",\n)\n\nenvoy_cc_binary(\n    name = \"envoy-static\",\n    stamped = True,\n    deps = [\":envoy_main_entry_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"envoy_common_lib\",\n    deps = [\n        \"//source/common/event:libevent_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/server:drain_manager_lib\",\n        \"//source/server:options_lib\",\n        \"//source/server:server_lib\",\n        \"//source/server:listener_hooks_lib\",\n    ] + select({\n        \"//bazel:windows_x86_64\": envoy_all_extensions(WINDOWS_SKIP_TARGETS),\n        \"//bazel:linux_ppc\": envoy_all_extensions(PPC_SKIP_TARGETS),\n        \"//conditions:default\": envoy_all_extensions(),\n    }),\n)\n\nenvoy_cc_library(\n    name = \"envoy_main_entry_lib\",\n    srcs = [\"main.cc\"],\n    external_deps = [\n        \"abseil_symbolize\",\n    ],\n    deps = [\n        \":envoy_main_common_lib\",\n        \":platform_impl_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"main_common_lib\",\n    srcs = [\"main_common.cc\"],\n    hdrs = [\"main_common.h\"],\n    deps = [\n        \":envoy_common_lib\",\n        \":platform_impl_lib\",\n        \":process_wide_lib\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:compiler_requirements_lib\",\n        \"//source/common/common:perf_annotation_lib\",\n        \"//source/common/grpc:google_grpc_context_lib\",\n        \"//source/server:hot_restart_lib\",\n        \"//source/server:hot_restart_nop_lib\",\n        \"//source/server/config_validation:server_lib\",\n    ] + select({\n        \"//bazel:disable_signal_trace\": [],\n        \"//conditions:default\": [\n            \"//source/common/signal:sigaction_lib\",\n            \":terminate_handler_lib\",\n        ],\n    }),\n)\n\nenvoy_cc_library(\n    name = \"envoy_main_common_lib\",\n    deps = [\n        \":main_common_lib\",\n        \"//source/common/version:version_linkstamp\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_common_with_core_extensions_lib\",\n    deps = [\n        \"//source/common/event:libevent_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/server:drain_manager_lib\",\n        \"//source/server:options_lib\",\n        \"//source/server:server_lib\",\n        \"//source/server:listener_hooks_lib\",\n    ] + envoy_all_core_extensions(),\n)\n\nenvoy_cc_library(\n    name = \"envoy_main_common_with_core_extensions_lib\",\n    srcs = [\"main_common.cc\"],\n    hdrs = [\"main_common.h\"],\n    deps = [\n        \":envoy_common_with_core_extensions_lib\",\n        \":platform_impl_lib\",\n        \":process_wide_lib\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:compiler_requirements_lib\",\n        \"//source/common/common:perf_annotation_lib\",\n        \"//source/common/grpc:google_grpc_context_lib\",\n        \"//source/server:hot_restart_lib\",\n        \"//source/server:hot_restart_nop_lib\",\n        \"//source/server/config_validation:server_lib\",\n    ] + select({\n        \"//bazel:disable_signal_trace\": [],\n        \"//conditions:default\": [\n            \"//source/common/signal:sigaction_lib\",\n            \":terminate_handler_lib\",\n        ],\n    }),\n)\n\nenvoy_cc_library(\n    name = \"process_wide_lib\",\n    srcs = [\"process_wide.cc\"],\n    hdrs = [\"process_wide.h\"],\n    external_deps = [\"ares\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/event:libevent_lib\",\n        \"//source/common/http/http2:nghttp2_lib\",\n        \"//source/server:proto_descriptors_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"platform_impl_lib\",\n    deps = [\":platform_header_lib\"] +\n           envoy_cc_platform_dep(\"platform_impl_lib\"),\n)\n\nenvoy_cc_library(\n    name = \"platform_header_lib\",\n    hdrs = [\"platform_impl.h\"],\n    deps = [\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//include/envoy/thread:thread_interface\",\n    ],\n)\n\nenvoy_cc_posix_library(\n    name = \"platform_impl_lib\",\n    srcs = [\"posix/platform_impl.cc\"],\n    deps = [\n        \":platform_header_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/filesystem:filesystem_lib\",\n    ],\n)\n\nenvoy_cc_win32_library(\n    name = \"platform_impl_lib\",\n    srcs = [\"win32/platform_impl.cc\"],\n    deps = [\n        \":platform_header_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/filesystem:filesystem_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"terminate_handler_lib\",\n    srcs = [\"terminate_handler.cc\"],\n    hdrs = [\"terminate_handler.h\"],\n    tags = [\"backtrace\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:non_copyable\",\n        \"//source/server:backtrace_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/exe/main.cc",
    "content": "#include \"exe/main_common.h\"\n\n// NOLINT(namespace-envoy)\n\n/**\n * Basic Site-Specific main()\n *\n * This should be used to do setup tasks specific to a particular site's\n * deployment such as initializing signal handling. It calls main_common\n * after setting up command line options.\n */\nint main(int argc, char** argv) { return Envoy::MainCommon::main(argc, argv); }\n"
  },
  {
    "path": "source/exe/main_common.cc",
    "content": "#include \"exe/main_common.h\"\n\n#include <fstream>\n#include <iostream>\n#include <memory>\n#include <new>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n\n#include \"common/common/compiler_requirements.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/perf_annotation.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/thread_local_store.h\"\n\n#include \"server/config_validation/server.h\"\n#include \"server/drain_manager_impl.h\"\n#include \"server/hot_restart_nop_impl.h\"\n#include \"server/listener_hooks.h\"\n#include \"server/options_impl.h\"\n#include \"server/server.h\"\n\n#include \"absl/debugging/symbolize.h\"\n#include \"absl/strings/str_split.h\"\n\n#ifdef ENVOY_HOT_RESTART\n#include \"server/hot_restart_impl.h\"\n#endif\n\nnamespace Envoy {\n\nServer::DrainManagerPtr ProdComponentFactory::createDrainManager(Server::Instance& server) {\n  // The global drain manager only triggers on listener modification, which effectively is\n  // hot restart at the global level. The per-listener drain managers decide whether to\n  // to include /healthcheck/fail status.\n  return std::make_unique<Server::DrainManagerImpl>(\n      server, envoy::config::listener::v3::Listener::MODIFY_ONLY);\n}\n\nRuntime::LoaderPtr ProdComponentFactory::createRuntime(Server::Instance& server,\n                                                       Server::Configuration::Initial& config) {\n  return Server::InstanceUtil::createRuntime(server, config);\n}\n\nMainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system,\n                               ListenerHooks& listener_hooks,\n                               Server::ComponentFactory& component_factory,\n                               std::unique_ptr<Random::RandomGenerator>&& random_generator,\n                               Thread::ThreadFactory& thread_factory,\n                               Filesystem::Instance& file_system,\n                               std::unique_ptr<ProcessContext> process_context)\n    : options_(options), component_factory_(component_factory), thread_factory_(thread_factory),\n      file_system_(file_system), stats_allocator_(symbol_table_) {\n  // Process the option to disable extensions as early as possible,\n  // before we do any configuration loading.\n  OptionsImpl::disableExtensions(options.disabledExtensions());\n\n  switch (options_.mode()) {\n  case Server::Mode::InitOnly:\n  case Server::Mode::Serve: {\n    configureHotRestarter(*random_generator);\n\n    tls_ = std::make_unique<ThreadLocal::InstanceImpl>();\n    Thread::BasicLockable& log_lock = restarter_->logLock();\n    Thread::BasicLockable& access_log_lock = restarter_->accessLogLock();\n    auto local_address = Network::Utility::getLocalAddress(options_.localAddressIpVersion());\n    logging_context_ = std::make_unique<Logger::Context>(options_.logLevel(), options_.logFormat(),\n                                                         log_lock, options_.logFormatEscaped(),\n                                                         options_.enableFineGrainLogging());\n\n    configureComponentLogLevels();\n\n    // Provide consistent behavior for out-of-memory, regardless of whether it occurs in a try/catch\n    // block or not.\n    std::set_new_handler([]() { PANIC(\"out of memory\"); });\n\n    stats_store_ = std::make_unique<Stats::ThreadLocalStoreImpl>(stats_allocator_);\n\n    server_ = std::make_unique<Server::InstanceImpl>(\n        *init_manager_, options_, time_system, local_address, listener_hooks, *restarter_,\n        *stats_store_, access_log_lock, component_factory, std::move(random_generator), *tls_,\n        thread_factory_, file_system_, std::move(process_context));\n\n    break;\n  }\n  case Server::Mode::Validate:\n    restarter_ = std::make_unique<Server::HotRestartNopImpl>();\n    logging_context_ =\n        std::make_unique<Logger::Context>(options_.logLevel(), options_.logFormat(),\n                                          restarter_->logLock(), options_.logFormatEscaped());\n    break;\n  }\n}\n\nvoid MainCommonBase::configureComponentLogLevels() {\n  for (auto& component_log_level : options_.componentLogLevels()) {\n    Logger::Logger* logger_to_change = Logger::Registry::logger(component_log_level.first);\n    ASSERT(logger_to_change);\n    logger_to_change->setLevel(component_log_level.second);\n  }\n}\n\nvoid MainCommonBase::configureHotRestarter(Random::RandomGenerator& random_generator) {\n#ifdef ENVOY_HOT_RESTART\n  if (!options_.hotRestartDisabled()) {\n    uint32_t base_id = options_.baseId();\n\n    if (options_.useDynamicBaseId()) {\n      ASSERT(options_.restartEpoch() == 0, \"cannot use dynamic base id during hot restart\");\n\n      std::unique_ptr<Server::HotRestart> restarter;\n\n      // Try 100 times to get an unused base ID and then give up under the assumption\n      // that some other problem has occurred to prevent binding the domain socket.\n      for (int i = 0; i < 100 && restarter == nullptr; i++) {\n        // HotRestartImpl is going to multiply this value by 10, so leave head room.\n        base_id = static_cast<uint32_t>(random_generator.random()) & 0x0FFFFFFF;\n\n        try {\n          restarter = std::make_unique<Server::HotRestartImpl>(base_id, 0, options_.socketPath(),\n                                                               options_.socketMode());\n        } catch (Server::HotRestartDomainSocketInUseException& ex) {\n          // No luck, try again.\n          ENVOY_LOG_MISC(debug, \"dynamic base id: {}\", ex.what());\n        }\n      }\n\n      if (restarter == nullptr) {\n        throw EnvoyException(\"unable to select a dynamic base id\");\n      }\n\n      restarter_.swap(restarter);\n    } else {\n      restarter_ = std::make_unique<Server::HotRestartImpl>(\n          base_id, options_.restartEpoch(), options_.socketPath(), options_.socketMode());\n    }\n\n    // Write the base-id to the requested path whether we selected it\n    // dynamically or not.\n    if (!options_.baseIdPath().empty()) {\n      std::ofstream base_id_out_file(options_.baseIdPath());\n      if (!base_id_out_file) {\n        ENVOY_LOG_MISC(critical, \"cannot open base id output file {} for writing.\",\n                       options_.baseIdPath());\n      } else {\n        base_id_out_file << base_id;\n      }\n    }\n  }\n#else\n  UNREFERENCED_PARAMETER(random_generator);\n#endif\n\n  if (restarter_ == nullptr) {\n    restarter_ = std::make_unique<Server::HotRestartNopImpl>();\n  }\n}\n\nbool MainCommonBase::run() {\n  switch (options_.mode()) {\n  case Server::Mode::Serve:\n    server_->run();\n    return true;\n  case Server::Mode::Validate: {\n    auto local_address = Network::Utility::getLocalAddress(options_.localAddressIpVersion());\n    return Server::validateConfig(options_, local_address, component_factory_, thread_factory_,\n                                  file_system_);\n  }\n  case Server::Mode::InitOnly:\n    PERF_DUMP();\n    return true;\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid MainCommonBase::adminRequest(absl::string_view path_and_query, absl::string_view method,\n                                  const AdminRequestFn& handler) {\n  std::string path_and_query_buf = std::string(path_and_query);\n  std::string method_buf = std::string(method);\n  server_->dispatcher().post([this, path_and_query_buf, method_buf, handler]() {\n    auto response_headers = Http::ResponseHeaderMapImpl::create();\n    std::string body;\n    server_->admin().request(path_and_query_buf, method_buf, *response_headers, body);\n    handler(*response_headers, body);\n  });\n}\n\nMainCommon::MainCommon(int argc, const char* const* argv)\n    : options_(argc, argv, &MainCommon::hotRestartVersion, spdlog::level::info),\n      base_(options_, real_time_system_, default_listener_hooks_, prod_component_factory_,\n            std::make_unique<Random::RandomGeneratorImpl>(), platform_impl_.threadFactory(),\n            platform_impl_.fileSystem(), nullptr) {}\n\nstd::string MainCommon::hotRestartVersion(bool hot_restart_enabled) {\n#ifdef ENVOY_HOT_RESTART\n  if (hot_restart_enabled) {\n    return Server::HotRestartImpl::hotRestartVersion();\n  }\n#else\n  UNREFERENCED_PARAMETER(hot_restart_enabled);\n#endif\n  return \"disabled\";\n}\n\nint MainCommon::main(int argc, char** argv, PostServerHook hook) {\n#ifndef __APPLE__\n  // absl::Symbolize mostly works without this, but this improves corner case\n  // handling, such as running in a chroot jail.\n  absl::InitializeSymbolizer(argv[0]);\n#endif\n  std::unique_ptr<Envoy::MainCommon> main_common;\n\n  // Initialize the server's main context under a try/catch loop and simply return EXIT_FAILURE\n  // as needed. Whatever code in the initialization path that fails is expected to log an error\n  // message so the user can diagnose.\n  try {\n    main_common = std::make_unique<Envoy::MainCommon>(argc, argv);\n    Envoy::Server::Instance* server = main_common->server();\n    if (server != nullptr && hook != nullptr) {\n      hook(*server);\n    }\n  } catch (const Envoy::NoServingException& e) {\n    return EXIT_SUCCESS;\n  } catch (const Envoy::MalformedArgvException& e) {\n    std::cerr << e.what() << std::endl;\n    return EXIT_FAILURE;\n  } catch (const Envoy::EnvoyException& e) {\n    std::cerr << e.what() << std::endl;\n    return EXIT_FAILURE;\n  }\n\n  // Run the server listener loop outside try/catch blocks, so that unexpected exceptions\n  // show up as a core-dumps for easier diagnostics.\n  return main_common->run() ? EXIT_SUCCESS : EXIT_FAILURE;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/main_common.h",
    "content": "#pragma once\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/runtime/runtime.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/event/real_time_system.h\"\n#include \"common/grpc/google_grpc_context.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/thread_local_store.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"exe/platform_impl.h\"\n#include \"exe/process_wide.h\"\n\n#include \"server/listener_hooks.h\"\n#include \"server/options_impl.h\"\n#include \"server/server.h\"\n\n#ifdef ENVOY_HANDLE_SIGNALS\n#include \"common/signal/signal_action.h\"\n#include \"exe/terminate_handler.h\"\n#endif\n\nnamespace Envoy {\n\nclass ProdComponentFactory : public Server::ComponentFactory {\npublic:\n  // Server::DrainManagerFactory\n  Server::DrainManagerPtr createDrainManager(Server::Instance& server) override;\n  Runtime::LoaderPtr createRuntime(Server::Instance& server,\n                                   Server::Configuration::Initial& config) override;\n};\n\nclass MainCommonBase {\npublic:\n  // Consumer must guarantee that all passed references are alive until this object is\n  // destructed.\n  MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system,\n                 ListenerHooks& listener_hooks, Server::ComponentFactory& component_factory,\n                 std::unique_ptr<Random::RandomGenerator>&& random_generator,\n                 Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system,\n                 std::unique_ptr<ProcessContext> process_context);\n\n  bool run();\n\n  // Will be null if options.mode() == Server::Mode::Validate\n  Server::Instance* server() { return server_.get(); }\n\n  using AdminRequestFn =\n      std::function<void(const Http::ResponseHeaderMap& response_headers, absl::string_view body)>;\n\n  // Makes an admin-console request by path, calling handler() when complete.\n  // The caller can initiate this from any thread, but it posts the request\n  // onto the main thread, so the handler is called asynchronously.\n  //\n  // This is designed to be called from downstream consoles, so they can access\n  // the admin console information stream without opening up a network port.\n  //\n  // This should only be called while run() is active; ensuring this is the\n  // responsibility of the caller.\n  //\n  // TODO(jmarantz): consider std::future for encapsulating this delayed request\n  // semantics, rather than a handler callback.\n  void adminRequest(absl::string_view path_and_query, absl::string_view method,\n                    const AdminRequestFn& handler);\n\nprotected:\n  ProcessWide process_wide_; // Process-wide state setup/teardown (excluding grpc).\n  // We instantiate this class regardless of ENVOY_GOOGLE_GRPC, to avoid having\n  // an ifdef in a header file exposed in a C++ library. It is too easy to have\n  // the ifdef be inconsistent across build-system boundaries.\n  Grpc::GoogleGrpcContext google_grpc_context_;\n  const Envoy::OptionsImpl& options_;\n  Server::ComponentFactory& component_factory_;\n  Thread::ThreadFactory& thread_factory_;\n  Filesystem::Instance& file_system_;\n  Stats::SymbolTableImpl symbol_table_;\n  Stats::AllocatorImpl stats_allocator_;\n\n  ThreadLocal::InstanceImplPtr tls_;\n  std::unique_ptr<Server::HotRestart> restarter_;\n  Stats::ThreadLocalStoreImplPtr stats_store_;\n  std::unique_ptr<Logger::Context> logging_context_;\n  std::unique_ptr<Init::Manager> init_manager_{std::make_unique<Init::ManagerImpl>(\"Server\")};\n  std::unique_ptr<Server::InstanceImpl> server_;\n\nprivate:\n  void configureComponentLogLevels();\n  void configureHotRestarter(Random::RandomGenerator& random_generator);\n};\n\n// TODO(jmarantz): consider removing this class; I think it'd be more useful to\n// go through MainCommonBase directly.\nclass MainCommon {\npublic:\n  // Hook to run after a server is created.\n  using PostServerHook = std::function<void(Server::Instance& server)>;\n\n  MainCommon(int argc, const char* const* argv);\n  bool run() { return base_.run(); }\n  // Only tests have a legitimate need for this today.\n  Event::Dispatcher& dispatcherForTest() { return base_.server()->dispatcher(); }\n\n  // Makes an admin-console request by path, calling handler() when complete.\n  // The caller can initiate this from any thread, but it posts the request\n  // onto the main thread, so the handler is called asynchronously.\n  //\n  // This is designed to be called from downstream consoles, so they can access\n  // the admin console information stream without opening up a network port.\n  //\n  // This should only be called while run() is active; ensuring this is the\n  // responsibility of the caller.\n  void adminRequest(absl::string_view path_and_query, absl::string_view method,\n                    const MainCommonBase::AdminRequestFn& handler) {\n    base_.adminRequest(path_and_query, method, handler);\n  }\n\n  static std::string hotRestartVersion(bool hot_restart_enabled);\n\n  /**\n   * @return a pointer to the server instance, or nullptr if initialized into\n   *         validation mode.\n   */\n  Server::Instance* server() { return base_.server(); }\n\n  /**\n   * Instantiates a MainCommon using default factory implements, parses args,\n   * and runs an event loop depending on the mode.\n   *\n   * Note that MainCommonBase can also be directly instantiated, providing the\n   * opportunity to override subsystem implementations for custom\n   * implementations.\n   *\n   * @param argc number of command-line args\n   * @param argv command-line argument array\n   * @param hook optional hook to run after a server is created\n   */\n  static int main(int argc, char** argv, PostServerHook hook = nullptr);\n\nprivate:\n#ifdef ENVOY_HANDLE_SIGNALS\n  Envoy::SignalAction handle_sigs_;\n  Envoy::TerminateHandler log_on_terminate_;\n#endif\n\n  PlatformImpl platform_impl_;\n  Envoy::OptionsImpl options_;\n  Event::RealTimeSystem real_time_system_;\n  DefaultListenerHooks default_listener_hooks_;\n  ProdComponentFactory prod_component_factory_;\n  MainCommonBase base_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/platform_impl.h",
    "content": "#pragma once\n\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\n\nclass PlatformImpl {\npublic:\n  PlatformImpl();\n  ~PlatformImpl();\n  Thread::ThreadFactory& threadFactory() { return *thread_factory_; }\n  Filesystem::Instance& fileSystem() { return *file_system_; }\n\nprivate:\n  Thread::ThreadFactoryPtr thread_factory_;\n  Filesystem::InstancePtr file_system_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/posix/platform_impl.cc",
    "content": "#include \"common/common/thread_impl.h\"\n#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"exe/platform_impl.h\"\n\nnamespace Envoy {\n\nPlatformImpl::PlatformImpl()\n    : thread_factory_(std::make_unique<Thread::ThreadFactoryImplPosix>()),\n      file_system_(std::make_unique<Filesystem::InstanceImplPosix>()) {}\n\nPlatformImpl::~PlatformImpl() = default;\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/process_wide.cc",
    "content": "#include \"exe/process_wide.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/event/libevent.h\"\n#include \"common/http/http2/nghttp2.h\"\n\n#include \"server/proto_descriptors.h\"\n\n#include \"ares.h\"\n\nnamespace Envoy {\nnamespace {\n// Static variable to count initialization pairs. For tests like\n// main_common_test, we need to count to avoid double initialization or\n// shutdown.\nuint32_t process_wide_initialized;\n} // namespace\n\nProcessWide::ProcessWide() : initialization_depth_(process_wide_initialized) {\n  if (process_wide_initialized++ == 0) {\n    ares_library_init(ARES_LIB_INIT_ALL);\n    Event::Libevent::Global::initialize();\n    Envoy::Server::validateProtoDescriptors();\n    Http::Http2::initializeNghttp2Logging();\n\n    // We do not initialize Google gRPC here -- we instead instantiate\n    // Grpc::GoogleGrpcContext in MainCommon immediately after instantiating\n    // ProcessWide. This is because ProcessWide is instantiated in the unit-test\n    // flow in test/test_runner.h, and grpc_init() instantiates threads which\n    // allocate memory asynchronous to running tests, making it hard to\n    // accurately measure memory consumption, and making unit-test debugging\n    // non-deterministic. See https://github.com/envoyproxy/envoy/issues/8282\n    // for details. Of course we also need grpc_init called in unit-tests that\n    // test Google gRPC, and the relevant classes must also instantiate\n    // Grpc::GoogleGrpcContext, which allows for nested instantiation.\n    //\n    // It appears that grpc_init() started instantiating threads in grpc 1.22.1,\n    // which was integrated in https://github.com/envoyproxy/envoy/pull/8196,\n    // around the time the flakes in #8282 started being reported.\n  }\n}\n\nProcessWide::~ProcessWide() {\n  ASSERT(process_wide_initialized > 0);\n  if (--process_wide_initialized == 0) {\n    process_wide_initialized = false;\n    ares_library_cleanup();\n  }\n  ASSERT(process_wide_initialized == initialization_depth_);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/process_wide.h",
    "content": "#pragma once\n\n#include <cstdint>\n\nnamespace Envoy {\n\n// Process-wide lifecycle events for global state in third-party dependencies,\n// e.g. c-ares. There should only ever be a single instance of this.\nclass ProcessWide {\npublic:\n  ProcessWide();\n  ~ProcessWide();\n\nprivate:\n  uint32_t initialization_depth_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/terminate_handler.cc",
    "content": "#include \"exe/terminate_handler.h\"\n\n#include <cstdlib>\n\n#include \"common/common/logger.h\"\n\n#include \"server/backtrace.h\"\n\nnamespace Envoy {\n\nstd::terminate_handler TerminateHandler::logOnTerminate() const {\n  return std::set_terminate([]() {\n    ENVOY_LOG(critical, \"std::terminate called! (possible uncaught exception, see trace)\");\n    BACKTRACE_LOG();\n    std::abort();\n  });\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/terminate_handler.h",
    "content": "#pragma once\n\n#include <exception>\n\n#include \"common/common/logger.h\"\n#include \"common/common/non_copyable.h\"\n\nnamespace Envoy {\n\nclass TerminateHandler : NonCopyable, protected Logger::Loggable<Logger::Id::main> {\npublic:\n  TerminateHandler() : previous_terminate_(logOnTerminate()) {}\n  ~TerminateHandler() { std::set_terminate(previous_terminate_); }\n\nprivate:\n  /**\n   * Sets the std::terminate to a function which will log as much of a backtrace as\n   * possible, then call abort. Returns the previous handler.\n   */\n  std::terminate_handler logOnTerminate() const;\n\n  const std::terminate_handler previous_terminate_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/exe/win32/platform_impl.cc",
    "content": "#include \"common/common/assert.h\"\n#include \"common/common/thread_impl.h\"\n#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"exe/platform_impl.h\"\n\nnamespace Envoy {\n\nPlatformImpl::PlatformImpl()\n    : thread_factory_(std::make_unique<Thread::ThreadFactoryImplWin32>()),\n      file_system_(std::make_unique<Filesystem::InstanceImplWin32>()) {\n  WSADATA wsa_data;\n  const WORD version_requested = MAKEWORD(2, 2);\n  RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, \"WSAStartup failed with error\");\n}\n\nPlatformImpl::~PlatformImpl() { ::WSACleanup(); }\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n"
  },
  {
    "path": "source/extensions/access_loggers/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/access_loggers/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Base class for implementations of AccessLog::Instance.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"access_log_base\",\n    srcs = [\"access_log_base.cc\"],\n    hdrs = [\"access_log_base.h\"],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/access_loggers/common/access_log_base.cc",
    "content": "#include \"extensions/access_loggers/common/access_log_base.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace Common {\n\nvoid ImplBase::log(const Http::RequestHeaderMap* request_headers,\n                   const Http::ResponseHeaderMap* response_headers,\n                   const Http::ResponseTrailerMap* response_trailers,\n                   const StreamInfo::StreamInfo& stream_info) {\n  if (!request_headers) {\n    request_headers = Http::StaticEmptyHeaders::get().request_headers.get();\n  }\n  if (!response_headers) {\n    response_headers = Http::StaticEmptyHeaders::get().response_headers.get();\n  }\n  if (!response_trailers) {\n    response_trailers = Http::StaticEmptyHeaders::get().response_trailers.get();\n  }\n  if (filter_ &&\n      !filter_->evaluate(stream_info, *request_headers, *response_headers, *response_trailers)) {\n    return;\n  }\n  return emitLog(*request_headers, *response_headers, *response_trailers, stream_info);\n}\n\n} // namespace Common\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/common/access_log_base.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/access_log_config.h\"\n\n#include \"common/http/header_utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace Common {\n\n/**\n * Base implementation of Accesslog::Instance handles common filter logic.\n */\nclass ImplBase : public AccessLog::Instance {\npublic:\n  ImplBase(AccessLog::FilterPtr filter) : filter_(std::move(filter)) {}\n\n  /**\n   * Log a completed request if the underlying AccessLog `filter_` allows it.\n   */\n  void log(const Http::RequestHeaderMap* request_headers,\n           const Http::ResponseHeaderMap* response_headers,\n           const Http::ResponseTrailerMap* response_trailers,\n           const StreamInfo::StreamInfo& stream_info) override;\n\nprivate:\n  /**\n   * Log a completed request.\n   * @param request_headers supplies the incoming request headers after filtering.\n   * @param response_headers supplies response headers.\n   * @param response_trailers supplies response trailers.\n   * @param stream_info supplies additional information about the request not\n   * contained in the request headers.\n   */\n  virtual void emitLog(const Http::RequestHeaderMap& request_headers,\n                       const Http::ResponseHeaderMap& response_headers,\n                       const Http::ResponseTrailerMap& response_trailers,\n                       const StreamInfo::StreamInfo& stream_info) PURE;\n\n  AccessLog::FilterPtr filter_;\n};\n\n} // namespace Common\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/file/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Access log implementation that writes to a file.\n# Public docs: docs/root/configuration/access_log.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"file_access_log_lib\",\n    srcs = [\"file_access_log_impl.cc\"],\n    hdrs = [\"file_access_log_impl.h\"],\n    # The file based access logger is core code.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/extensions/access_loggers/common:access_log_base\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) determine if this is core or should be cleaned up.\n    extra_visibility = [\n        \"//test:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":file_access_log_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/formatter:substitution_format_string_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/access_loggers:well_known_names\",\n        \"@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/access_loggers/file/config.cc",
    "content": "#include \"extensions/access_loggers/file/config.h\"\n\n#include <memory>\n\n#include \"envoy/extensions/access_loggers/file/v3/file.pb.h\"\n#include \"envoy/extensions/access_loggers/file/v3/file.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/formatter/substitution_format_string.h\"\n#include \"common/formatter/substitution_formatter.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/access_loggers/file/file_access_log_impl.h\"\n#include \"extensions/access_loggers/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace File {\n\nAccessLog::InstanceSharedPtr\nFileAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config,\n                                              AccessLog::FilterPtr&& filter,\n                                              Server::Configuration::FactoryContext& context) {\n  const auto& fal_config = MessageUtil::downcastAndValidate<\n      const envoy::extensions::access_loggers::file::v3::FileAccessLog&>(\n      config, context.messageValidationVisitor());\n  Formatter::FormatterPtr formatter;\n\n  switch (fal_config.access_log_format_case()) {\n  case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kFormat:\n    if (fal_config.format().empty()) {\n      formatter = Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter();\n    } else {\n      envoy::config::core::v3::SubstitutionFormatString sff_config;\n      sff_config.set_text_format(fal_config.format());\n      formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(sff_config);\n    }\n    break;\n  case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kJsonFormat:\n    formatter = Formatter::SubstitutionFormatStringUtils::createJsonFormatter(\n        fal_config.json_format(), false, false);\n    break;\n  case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::\n      kTypedJsonFormat: {\n    envoy::config::core::v3::SubstitutionFormatString sff_config;\n    *sff_config.mutable_json_format() = fal_config.typed_json_format();\n    formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(sff_config);\n    break;\n  }\n  case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kLogFormat:\n    formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(fal_config.log_format());\n    break;\n  case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::\n      ACCESS_LOG_FORMAT_NOT_SET:\n    formatter = Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter();\n    break;\n  }\n\n  return std::make_shared<FileAccessLog>(fal_config.path(), std::move(filter), std::move(formatter),\n                                         context.accessLogManager());\n}\n\nProtobufTypes::MessagePtr FileAccessLogFactory::createEmptyConfigProto() {\n  return ProtobufTypes::MessagePtr{\n      new envoy::extensions::access_loggers::file::v3::FileAccessLog()};\n}\n\nstd::string FileAccessLogFactory::name() const { return AccessLogNames::get().File; }\n\n/**\n * Static registration for the file access log. @see RegisterFactory.\n */\nREGISTER_FACTORY(FileAccessLogFactory,\n                 Server::Configuration::AccessLogInstanceFactory){\"envoy.file_access_log\"};\n\n} // namespace File\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/file/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/access_log_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace File {\n\n/**\n * Config registration for the file access log. @see AccessLogInstanceFactory.\n */\nclass FileAccessLogFactory : public Server::Configuration::AccessLogInstanceFactory {\npublic:\n  AccessLog::InstanceSharedPtr\n  createAccessLogInstance(const Protobuf::Message& config, AccessLog::FilterPtr&& filter,\n                          Server::Configuration::FactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n};\n\n} // namespace File\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/file/file_access_log_impl.cc",
    "content": "#include \"extensions/access_loggers/file/file_access_log_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace File {\n\nFileAccessLog::FileAccessLog(const std::string& access_log_path, AccessLog::FilterPtr&& filter,\n                             Formatter::FormatterPtr&& formatter,\n                             AccessLog::AccessLogManager& log_manager)\n    : ImplBase(std::move(filter)), formatter_(std::move(formatter)) {\n  log_file_ = log_manager.createAccessLog(access_log_path);\n}\n\nvoid FileAccessLog::emitLog(const Http::RequestHeaderMap& request_headers,\n                            const Http::ResponseHeaderMap& response_headers,\n                            const Http::ResponseTrailerMap& response_trailers,\n                            const StreamInfo::StreamInfo& stream_info) {\n  log_file_->write(formatter_->format(request_headers, response_headers, response_trailers,\n                                      stream_info, absl::string_view()));\n}\n\n} // namespace File\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/file/file_access_log_impl.h",
    "content": "#pragma once\n\n#include \"common/formatter/substitution_formatter.h\"\n\n#include \"extensions/access_loggers/common/access_log_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace File {\n\n/**\n * Access log Instance that writes logs to a file.\n */\nclass FileAccessLog : public Common::ImplBase {\npublic:\n  FileAccessLog(const std::string& access_log_path, AccessLog::FilterPtr&& filter,\n                Formatter::FormatterPtr&& formatter, AccessLog::AccessLogManager& log_manager);\n\nprivate:\n  // Common::ImplBase\n  void emitLog(const Http::RequestHeaderMap& request_headers,\n               const Http::ResponseHeaderMap& response_headers,\n               const Http::ResponseTrailerMap& response_trailers,\n               const StreamInfo::StreamInfo& stream_info) override;\n\n  AccessLog::AccessLogFileSharedPtr log_file_;\n  Formatter::FormatterPtr formatter_;\n};\n\n} // namespace File\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Access log implementation that writes to a gRPC service.\n# Public docs: TODO(rodaine): Docs needed.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"config_utils\",\n    srcs = [\"config_utils.cc\"],\n    hdrs = [\"config_utils.h\"],\n    deps = [\n        \":grpc_access_log_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"grpc_access_log_lib\",\n    srcs = [\"grpc_access_log_impl.cc\"],\n    hdrs = [\"grpc_access_log_impl.h\"],\n    deps = [\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//include/envoy/grpc:async_client_manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/grpc:async_client_lib\",\n        \"//source/common/grpc:typed_async_client_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"//source/extensions/access_loggers/common:access_log_base\",\n        \"@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/accesslog/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"grpc_access_log_utils\",\n    srcs = [\"grpc_access_log_utils.cc\"],\n    hdrs = [\"grpc_access_log_utils.h\"],\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/common/stream_info:utility_lib\",\n        \"@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http_grpc_access_log_lib\",\n    srcs = [\"http_grpc_access_log_impl.cc\"],\n    hdrs = [\"http_grpc_access_log_impl.h\"],\n    deps = [\n        \":grpc_access_log_lib\",\n        \":grpc_access_log_utils\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tcp_grpc_access_log_lib\",\n    srcs = [\"tcp_grpc_access_log_impl.cc\"],\n    hdrs = [\"tcp_grpc_access_log_impl.h\"],\n    deps = [\n        \":grpc_access_log_lib\",\n        \":grpc_access_log_utils\",\n        \"@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"grpc_access_log_proto_descriptors_lib\",\n    srcs = [\"grpc_access_log_proto_descriptors.cc\"],\n    hdrs = [\"grpc_access_log_proto_descriptors.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"http_config\",\n    srcs = [\"http_config.cc\"],\n    hdrs = [\"http_config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/common/access_log:__subpackages__\",\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":config_utils\",\n        \"//include/envoy/server:access_log_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/access_loggers:well_known_names\",\n        \"//source/extensions/access_loggers/grpc:grpc_access_log_proto_descriptors_lib\",\n        \"//source/extensions/access_loggers/grpc:http_grpc_access_log_lib\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"tcp_config\",\n    srcs = [\"tcp_config.cc\"],\n    hdrs = [\"tcp_config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/common/access_log:__subpackages__\",\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":config_utils\",\n        \"//include/envoy/server:access_log_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/access_loggers:well_known_names\",\n        \"//source/extensions/access_loggers/grpc:grpc_access_log_proto_descriptors_lib\",\n        \"//source/extensions/access_loggers/grpc:tcp_grpc_access_log_lib\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/config_utils.cc",
    "content": "#include \"extensions/access_loggers/grpc/config_utils.h\"\n\n#include \"envoy/singleton/manager.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\n// Singleton registration via macro defined in envoy/singleton/manager.h\nSINGLETON_MANAGER_REGISTRATION(grpc_access_logger_cache);\n\nGrpcCommon::GrpcAccessLoggerCacheSharedPtr\ngetGrpcAccessLoggerCacheSingleton(Server::Configuration::FactoryContext& context) {\n  return context.singletonManager().getTyped<GrpcCommon::GrpcAccessLoggerCache>(\n      SINGLETON_MANAGER_REGISTERED_NAME(grpc_access_logger_cache), [&context] {\n        return std::make_shared<GrpcCommon::GrpcAccessLoggerCacheImpl>(\n            context.clusterManager().grpcAsyncClientManager(), context.scope(),\n            context.threadLocal(), context.localInfo());\n      });\n}\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/config_utils.h",
    "content": "#pragma once\n\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/access_loggers/grpc/grpc_access_log_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\nGrpcAccessLoggerCacheSharedPtr\ngetGrpcAccessLoggerCacheSingleton(Server::Configuration::FactoryContext& context);\n\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/grpc_access_log_impl.cc",
    "content": "#include \"extensions/access_loggers/grpc/grpc_access_log_impl.h\"\n\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/grpc/typed_async_client.h\"\n#include \"common/network/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/stream_info/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\nvoid GrpcAccessLoggerImpl::LocalStream::onRemoteClose(Grpc::Status::GrpcStatus,\n                                                      const std::string&) {\n  ASSERT(parent_.stream_ != absl::nullopt);\n  if (parent_.stream_->stream_ != nullptr) {\n    // Only reset if we have a stream. Otherwise we had an inline failure and we will clear the\n    // stream data in send().\n    parent_.stream_.reset();\n  }\n}\n\nGrpcAccessLoggerImpl::GrpcAccessLoggerImpl(\n    Grpc::RawAsyncClientPtr&& client, std::string log_name,\n    std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes,\n    Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope,\n    envoy::config::core::v3::ApiVersion transport_api_version)\n    : stats_({ALL_GRPC_ACCESS_LOGGER_STATS(\n          POOL_COUNTER_PREFIX(scope, \"access_logs.grpc_access_log.\"))}),\n      client_(std::move(client)), log_name_(log_name),\n      buffer_flush_interval_msec_(buffer_flush_interval_msec),\n      flush_timer_(dispatcher.createTimer([this]() {\n        flush();\n        flush_timer_->enableTimer(buffer_flush_interval_msec_);\n      })),\n      max_buffer_size_bytes_(max_buffer_size_bytes), local_info_(local_info),\n      service_method_(\n          Grpc::VersionedMethods(\"envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs\",\n                                 \"envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs\")\n              .getMethodDescriptorForVersion(transport_api_version)),\n      transport_api_version_(transport_api_version) {\n  flush_timer_->enableTimer(buffer_flush_interval_msec_);\n}\n\nbool GrpcAccessLoggerImpl::canLogMore() {\n  if (max_buffer_size_bytes_ == 0 || approximate_message_size_bytes_ < max_buffer_size_bytes_) {\n    stats_.logs_written_.inc();\n    return true;\n  }\n  flush();\n  if (approximate_message_size_bytes_ < max_buffer_size_bytes_) {\n    stats_.logs_written_.inc();\n    return true;\n  }\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.disallow_unbounded_access_logs\")) {\n    stats_.logs_dropped_.inc();\n    return false;\n  }\n  stats_.logs_written_.inc();\n  return true;\n}\n\nvoid GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) {\n  if (!canLogMore()) {\n    return;\n  }\n  approximate_message_size_bytes_ += entry.ByteSizeLong();\n  message_.mutable_http_logs()->mutable_log_entry()->Add(std::move(entry));\n  if (approximate_message_size_bytes_ >= max_buffer_size_bytes_) {\n    flush();\n  }\n}\n\nvoid GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::TCPAccessLogEntry&& entry) {\n  approximate_message_size_bytes_ += entry.ByteSizeLong();\n  message_.mutable_tcp_logs()->mutable_log_entry()->Add(std::move(entry));\n  if (approximate_message_size_bytes_ >= max_buffer_size_bytes_) {\n    flush();\n  }\n}\n\nvoid GrpcAccessLoggerImpl::flush() {\n  if (!message_.has_http_logs() && !message_.has_tcp_logs()) {\n    // Nothing to flush.\n    return;\n  }\n\n  if (stream_ == absl::nullopt) {\n    stream_.emplace(*this);\n  }\n\n  if (stream_->stream_ == nullptr) {\n    stream_->stream_ =\n        client_->start(service_method_, *stream_, Http::AsyncClient::StreamOptions());\n\n    auto* identifier = message_.mutable_identifier();\n    *identifier->mutable_node() = local_info_.node();\n    identifier->set_log_name(log_name_);\n  }\n\n  if (stream_->stream_ != nullptr) {\n    if (stream_->stream_->isAboveWriteBufferHighWatermark()) {\n      return;\n    }\n    stream_->stream_->sendMessage(message_, transport_api_version_, false);\n  } else {\n    // Clear out the stream data due to stream creation failure.\n    stream_.reset();\n  }\n\n  // Clear the message regardless of the success.\n  approximate_message_size_bytes_ = 0;\n  message_.Clear();\n}\n\nGrpcAccessLoggerCacheImpl::GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& async_client_manager,\n                                                     Stats::Scope& scope,\n                                                     ThreadLocal::SlotAllocator& tls,\n                                                     const LocalInfo::LocalInfo& local_info)\n    : async_client_manager_(async_client_manager), scope_(scope), tls_slot_(tls.allocateSlot()),\n      local_info_(local_info) {\n  tls_slot_->set(\n      [](Event::Dispatcher& dispatcher) { return std::make_shared<ThreadLocalCache>(dispatcher); });\n}\n\nGrpcAccessLoggerSharedPtr GrpcAccessLoggerCacheImpl::getOrCreateLogger(\n    const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config,\n    GrpcAccessLoggerType logger_type, Stats::Scope& scope) {\n  // TODO(euroelessar): Consider cleaning up loggers.\n  auto& cache = tls_slot_->getTyped<ThreadLocalCache>();\n  const auto cache_key = std::make_pair(MessageUtil::hash(config), logger_type);\n  const auto it = cache.access_loggers_.find(cache_key);\n  if (it != cache.access_loggers_.end()) {\n    return it->second;\n  }\n  const Grpc::AsyncClientFactoryPtr factory =\n      async_client_manager_.factoryForGrpcService(config.grpc_service(), scope_, false);\n  const GrpcAccessLoggerSharedPtr logger = std::make_shared<GrpcAccessLoggerImpl>(\n      factory->create(), config.log_name(),\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)),\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_,\n      local_info_, scope, config.transport_api_version());\n  cache.access_loggers_.emplace(cache_key, logger);\n  return logger;\n}\n\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/grpc_access_log_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <vector>\n\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/service/accesslog/v3/als.pb.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/grpc/typed_async_client.h\"\n\n#include \"extensions/access_loggers/common/access_log_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\n/**\n * All stats for the grpc access logger. @see stats_macros.h\n */\n#define ALL_GRPC_ACCESS_LOGGER_STATS(COUNTER)                                                      \\\n  COUNTER(logs_written)                                                                            \\\n  COUNTER(logs_dropped)\n\n/**\n * Wrapper struct for the access log stats. @see stats_macros.h\n */\nstruct GrpcAccessLoggerStats {\n  ALL_GRPC_ACCESS_LOGGER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Interface for an access logger. The logger provides abstraction on top of gRPC stream, deals with\n * reconnects and performs batching.\n */\nclass GrpcAccessLogger {\npublic:\n  virtual ~GrpcAccessLogger() = default;\n\n  /**\n   * Log http access entry.\n   * @param entry supplies the access log to send.\n   */\n  virtual void log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) PURE;\n\n  /**\n   * Log tcp access entry.\n   * @param entry supplies the access log to send.\n   */\n  virtual void log(envoy::data::accesslog::v3::TCPAccessLogEntry&& entry) PURE;\n};\n\nusing GrpcAccessLoggerSharedPtr = std::shared_ptr<GrpcAccessLogger>;\n\nenum class GrpcAccessLoggerType { TCP, HTTP };\n\n/**\n * Interface for an access logger cache. The cache deals with threading and de-duplicates loggers\n * for the same configuration.\n */\nclass GrpcAccessLoggerCache {\npublic:\n  virtual ~GrpcAccessLoggerCache() = default;\n\n  /**\n   * Get existing logger or create a new one for the given configuration.\n   * @param config supplies the configuration for the logger.\n   * @return GrpcAccessLoggerSharedPtr ready for logging requests.\n   */\n  virtual GrpcAccessLoggerSharedPtr getOrCreateLogger(\n      const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config,\n      GrpcAccessLoggerType logger_type, Stats::Scope& scope) PURE;\n};\n\nusing GrpcAccessLoggerCacheSharedPtr = std::shared_ptr<GrpcAccessLoggerCache>;\n\nclass GrpcAccessLoggerImpl : public GrpcAccessLogger {\npublic:\n  GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std::string log_name,\n                       std::chrono::milliseconds buffer_flush_interval_msec,\n                       uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher,\n                       const LocalInfo::LocalInfo& local_info, Stats::Scope& scope,\n                       envoy::config::core::v3::ApiVersion transport_api_version);\n\n  // Extensions::AccessLoggers::GrpcCommon::GrpcAccessLogger\n  void log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) override;\n  void log(envoy::data::accesslog::v3::TCPAccessLogEntry&& entry) override;\n\nprivate:\n  struct LocalStream\n      : public Grpc::AsyncStreamCallbacks<envoy::service::accesslog::v3::StreamAccessLogsResponse> {\n    LocalStream(GrpcAccessLoggerImpl& parent) : parent_(parent) {}\n\n    // Grpc::AsyncStreamCallbacks\n    void onCreateInitialMetadata(Http::RequestHeaderMap&) override {}\n    void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&&) override {}\n    void onReceiveMessage(\n        std::unique_ptr<envoy::service::accesslog::v3::StreamAccessLogsResponse>&&) override {}\n    void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&&) override {}\n    void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override;\n\n    GrpcAccessLoggerImpl& parent_;\n    Grpc::AsyncStream<envoy::service::accesslog::v3::StreamAccessLogsMessage> stream_{};\n  };\n\n  void flush();\n\n  bool canLogMore();\n\n  GrpcAccessLoggerStats stats_;\n  Grpc::AsyncClient<envoy::service::accesslog::v3::StreamAccessLogsMessage,\n                    envoy::service::accesslog::v3::StreamAccessLogsResponse>\n      client_;\n  const std::string log_name_;\n  const std::chrono::milliseconds buffer_flush_interval_msec_;\n  const Event::TimerPtr flush_timer_;\n  const uint64_t max_buffer_size_bytes_;\n  uint64_t approximate_message_size_bytes_ = 0;\n  envoy::service::accesslog::v3::StreamAccessLogsMessage message_;\n  absl::optional<LocalStream> stream_;\n  const LocalInfo::LocalInfo& local_info_;\n  const Protobuf::MethodDescriptor& service_method_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n};\n\nusing GrpcAccessLoggerImplPtr = std::unique_ptr<GrpcAccessLoggerImpl>;\n\nclass GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessLoggerCache {\npublic:\n  GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope,\n                            ThreadLocal::SlotAllocator& tls,\n                            const LocalInfo::LocalInfo& local_info);\n\n  GrpcAccessLoggerSharedPtr getOrCreateLogger(\n      const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config,\n      GrpcAccessLoggerType logger_type, Stats::Scope& scope) override;\n\nprivate:\n  /**\n   * Per-thread cache.\n   */\n  struct ThreadLocalCache : public ThreadLocal::ThreadLocalObject {\n    ThreadLocalCache(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {}\n\n    Event::Dispatcher& dispatcher_;\n    // Access loggers indexed by the hash of logger's configuration and logger type.\n    absl::flat_hash_map<std::pair<std::size_t, GrpcAccessLoggerType>, GrpcAccessLoggerSharedPtr>\n        access_loggers_;\n  };\n\n  Grpc::AsyncClientManager& async_client_manager_;\n  Stats::Scope& scope_;\n  ThreadLocal::SlotPtr tls_slot_;\n  const LocalInfo::LocalInfo& local_info_;\n};\n\nusing GrpcAccessLoggerCacheImplPtr = std::unique_ptr<GrpcAccessLoggerCacheImpl>;\n\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.cc",
    "content": "#include \"extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\nvoid validateProtoDescriptors() {\n  const auto method = \"envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs\";\n\n  RELEASE_ASSERT(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) != nullptr,\n                 \"\");\n};\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\n// This function validates that the method descriptors for gRPC services and type descriptors that\n// are referenced in Any messages are available in the descriptor pool.\nvoid validateProtoDescriptors();\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/grpc_access_log_utils.cc",
    "content": "#include \"extensions/access_loggers/grpc/grpc_access_log_utils.h\"\n\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\nnamespace {\n\nusing namespace envoy::data::accesslog::v3;\n\n// Helper function to convert from a BoringSSL textual representation of the\n// TLS version to the corresponding enum value used in gRPC access logs.\nTLSProperties_TLSVersion tlsVersionStringToEnum(const std::string& tls_version) {\n  if (tls_version == \"TLSv1\") {\n    return TLSProperties::TLSv1;\n  } else if (tls_version == \"TLSv1.1\") {\n    return TLSProperties::TLSv1_1;\n  } else if (tls_version == \"TLSv1.2\") {\n    return TLSProperties::TLSv1_2;\n  } else if (tls_version == \"TLSv1.3\") {\n    return TLSProperties::TLSv1_3;\n  }\n\n  return TLSProperties::VERSION_UNSPECIFIED;\n}\n\n} // namespace\n\nvoid Utility::responseFlagsToAccessLogResponseFlags(\n    envoy::data::accesslog::v3::AccessLogCommon& common_access_log,\n    const StreamInfo::StreamInfo& stream_info) {\n\n  static_assert(StreamInfo::ResponseFlag::LastFlag == 0x400000,\n                \"A flag has been added. Fix this code.\");\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) {\n    common_access_log.mutable_response_flags()->set_failed_local_healthcheck(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream)) {\n    common_access_log.mutable_response_flags()->set_no_healthy_upstream(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)) {\n    common_access_log.mutable_response_flags()->set_upstream_request_timeout(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::LocalReset)) {\n    common_access_log.mutable_response_flags()->set_local_reset(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamRemoteReset)) {\n    common_access_log.mutable_response_flags()->set_upstream_remote_reset(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure)) {\n    common_access_log.mutable_response_flags()->set_upstream_connection_failure(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionTermination)) {\n    common_access_log.mutable_response_flags()->set_upstream_connection_termination(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow)) {\n    common_access_log.mutable_response_flags()->set_upstream_overflow(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoRouteFound)) {\n    common_access_log.mutable_response_flags()->set_no_route_found(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) {\n    common_access_log.mutable_response_flags()->set_delay_injected(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FaultInjected)) {\n    common_access_log.mutable_response_flags()->set_fault_injected(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::RateLimited)) {\n    common_access_log.mutable_response_flags()->set_rate_limited(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UnauthorizedExternalService)) {\n    common_access_log.mutable_response_flags()->mutable_unauthorized_details()->set_reason(\n        envoy::data::accesslog::v3::ResponseFlags::Unauthorized::EXTERNAL_SERVICE);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError)) {\n    common_access_log.mutable_response_flags()->set_rate_limit_service_error(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination)) {\n    common_access_log.mutable_response_flags()->set_downstream_connection_termination(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded)) {\n    common_access_log.mutable_response_flags()->set_upstream_retry_limit_exceeded(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout)) {\n    common_access_log.mutable_response_flags()->set_stream_idle_timeout(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders)) {\n    common_access_log.mutable_response_flags()->set_invalid_envoy_request_headers(true);\n  }\n\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)) {\n    common_access_log.mutable_response_flags()->set_downstream_protocol_error(true);\n  }\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached)) {\n    common_access_log.mutable_response_flags()->set_upstream_max_stream_duration_reached(true);\n  }\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter)) {\n    common_access_log.mutable_response_flags()->set_response_from_cache_filter(true);\n  }\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound)) {\n    common_access_log.mutable_response_flags()->set_no_filter_config_found(true);\n  }\n  if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DurationTimeout)) {\n    common_access_log.mutable_response_flags()->set_duration_timeout(true);\n  }\n}\n\nvoid Utility::extractCommonAccessLogProperties(\n    envoy::data::accesslog::v3::AccessLogCommon& common_access_log,\n    const StreamInfo::StreamInfo& stream_info,\n    const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config) {\n  // TODO(mattklein123): Populate sample_rate field.\n  if (stream_info.downstreamRemoteAddress() != nullptr) {\n    Network::Utility::addressToProtobufAddress(\n        *stream_info.downstreamRemoteAddress(),\n        *common_access_log.mutable_downstream_remote_address());\n  }\n  if (stream_info.downstreamDirectRemoteAddress() != nullptr) {\n    Network::Utility::addressToProtobufAddress(\n        *stream_info.downstreamDirectRemoteAddress(),\n        *common_access_log.mutable_downstream_direct_remote_address());\n  }\n  if (stream_info.downstreamLocalAddress() != nullptr) {\n    Network::Utility::addressToProtobufAddress(\n        *stream_info.downstreamLocalAddress(),\n        *common_access_log.mutable_downstream_local_address());\n  }\n  if (stream_info.downstreamSslConnection() != nullptr) {\n    auto* tls_properties = common_access_log.mutable_tls_properties();\n    const Ssl::ConnectionInfoConstSharedPtr downstream_ssl_connection =\n        stream_info.downstreamSslConnection();\n\n    tls_properties->set_tls_sni_hostname(stream_info.requestedServerName());\n\n    auto* local_properties = tls_properties->mutable_local_certificate_properties();\n    for (const auto& uri_san : downstream_ssl_connection->uriSanLocalCertificate()) {\n      auto* local_san = local_properties->add_subject_alt_name();\n      local_san->set_uri(uri_san);\n    }\n    local_properties->set_subject(downstream_ssl_connection->subjectLocalCertificate());\n\n    auto* peer_properties = tls_properties->mutable_peer_certificate_properties();\n    for (const auto& uri_san : downstream_ssl_connection->uriSanPeerCertificate()) {\n      auto* peer_san = peer_properties->add_subject_alt_name();\n      peer_san->set_uri(uri_san);\n    }\n\n    peer_properties->set_subject(downstream_ssl_connection->subjectPeerCertificate());\n    tls_properties->set_tls_session_id(downstream_ssl_connection->sessionId());\n    tls_properties->set_tls_version(\n        tlsVersionStringToEnum(downstream_ssl_connection->tlsVersion()));\n\n    auto* local_tls_cipher_suite = tls_properties->mutable_tls_cipher_suite();\n    local_tls_cipher_suite->set_value(downstream_ssl_connection->ciphersuiteId());\n  }\n  common_access_log.mutable_start_time()->MergeFrom(\n      Protobuf::util::TimeUtil::NanosecondsToTimestamp(\n          std::chrono::duration_cast<std::chrono::nanoseconds>(\n              stream_info.startTime().time_since_epoch())\n              .count()));\n\n  absl::optional<std::chrono::nanoseconds> dur = stream_info.lastDownstreamRxByteReceived();\n  if (dur) {\n    common_access_log.mutable_time_to_last_rx_byte()->MergeFrom(\n        Protobuf::util::TimeUtil::NanosecondsToDuration(dur.value().count()));\n  }\n\n  dur = stream_info.firstUpstreamTxByteSent();\n  if (dur) {\n    common_access_log.mutable_time_to_first_upstream_tx_byte()->MergeFrom(\n        Protobuf::util::TimeUtil::NanosecondsToDuration(dur.value().count()));\n  }\n\n  dur = stream_info.lastUpstreamTxByteSent();\n  if (dur) {\n    common_access_log.mutable_time_to_last_upstream_tx_byte()->MergeFrom(\n        Protobuf::util::TimeUtil::NanosecondsToDuration(dur.value().count()));\n  }\n\n  dur = stream_info.firstUpstreamRxByteReceived();\n  if (dur) {\n    common_access_log.mutable_time_to_first_upstream_rx_byte()->MergeFrom(\n        Protobuf::util::TimeUtil::NanosecondsToDuration(dur.value().count()));\n  }\n\n  dur = stream_info.lastUpstreamRxByteReceived();\n  if (dur) {\n    common_access_log.mutable_time_to_last_upstream_rx_byte()->MergeFrom(\n        Protobuf::util::TimeUtil::NanosecondsToDuration(dur.value().count()));\n  }\n\n  dur = stream_info.firstDownstreamTxByteSent();\n  if (dur) {\n    common_access_log.mutable_time_to_first_downstream_tx_byte()->MergeFrom(\n        Protobuf::util::TimeUtil::NanosecondsToDuration(dur.value().count()));\n  }\n\n  dur = stream_info.lastDownstreamTxByteSent();\n  if (dur) {\n    common_access_log.mutable_time_to_last_downstream_tx_byte()->MergeFrom(\n        Protobuf::util::TimeUtil::NanosecondsToDuration(dur.value().count()));\n  }\n\n  if (stream_info.upstreamHost() != nullptr) {\n    Network::Utility::addressToProtobufAddress(\n        *stream_info.upstreamHost()->address(),\n        *common_access_log.mutable_upstream_remote_address());\n    common_access_log.set_upstream_cluster(stream_info.upstreamHost()->cluster().name());\n  }\n\n  if (!stream_info.getRouteName().empty()) {\n    common_access_log.set_route_name(stream_info.getRouteName());\n  }\n\n  if (stream_info.upstreamLocalAddress() != nullptr) {\n    Network::Utility::addressToProtobufAddress(*stream_info.upstreamLocalAddress(),\n                                               *common_access_log.mutable_upstream_local_address());\n  }\n  responseFlagsToAccessLogResponseFlags(common_access_log, stream_info);\n  if (!stream_info.upstreamTransportFailureReason().empty()) {\n    common_access_log.set_upstream_transport_failure_reason(\n        stream_info.upstreamTransportFailureReason());\n  }\n  if (stream_info.dynamicMetadata().filter_metadata_size() > 0) {\n    common_access_log.mutable_metadata()->MergeFrom(stream_info.dynamicMetadata());\n  }\n\n  for (const auto& key : config.filter_state_objects_to_log()) {\n    if (stream_info.filterState().hasDataWithName(key)) {\n      const auto& obj =\n          stream_info.filterState().getDataReadOnly<StreamInfo::FilterState::Object>(key);\n      ProtobufTypes::MessagePtr serialized_proto = obj.serializeAsProto();\n      if (serialized_proto != nullptr) {\n        auto& filter_state_objects = *common_access_log.mutable_filter_state_objects();\n        ProtobufWkt::Any& any = filter_state_objects[key];\n        if (dynamic_cast<ProtobufWkt::Any*>(serialized_proto.get()) != nullptr) {\n          any.Swap(dynamic_cast<ProtobufWkt::Any*>(serialized_proto.get()));\n        } else {\n          any.PackFrom(*serialized_proto);\n        }\n      }\n    }\n  }\n}\n\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/grpc_access_log_utils.h",
    "content": "#pragma once\n\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\n\nclass Utility {\npublic:\n  static void extractCommonAccessLogProperties(\n      envoy::data::accesslog::v3::AccessLogCommon& common_access_log,\n      const StreamInfo::StreamInfo& stream_info,\n      const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig&\n          filter_states_to_log);\n\n  static void responseFlagsToAccessLogResponseFlags(\n      envoy::data::accesslog::v3::AccessLogCommon& common_access_log,\n      const StreamInfo::StreamInfo& stream_info);\n};\n\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/http_config.cc",
    "content": "#include \"extensions/access_loggers/grpc/http_config.h\"\n\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/access_loggers/grpc/config_utils.h\"\n#include \"extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.h\"\n#include \"extensions/access_loggers/grpc/http_grpc_access_log_impl.h\"\n#include \"extensions/access_loggers/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace HttpGrpc {\n\nAccessLog::InstanceSharedPtr\nHttpGrpcAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config,\n                                                  AccessLog::FilterPtr&& filter,\n                                                  Server::Configuration::FactoryContext& context) {\n  GrpcCommon::validateProtoDescriptors();\n\n  const auto& proto_config = MessageUtil::downcastAndValidate<\n      const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig&>(\n      config, context.messageValidationVisitor());\n\n  return std::make_shared<HttpGrpcAccessLog>(std::move(filter), proto_config, context.threadLocal(),\n                                             GrpcCommon::getGrpcAccessLoggerCacheSingleton(context),\n                                             context.scope());\n}\n\nProtobufTypes::MessagePtr HttpGrpcAccessLogFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig>();\n}\n\nstd::string HttpGrpcAccessLogFactory::name() const { return AccessLogNames::get().HttpGrpc; }\n\n/**\n * Static registration for the HTTP gRPC access log. @see RegisterFactory.\n */\nREGISTER_FACTORY(HttpGrpcAccessLogFactory,\n                 Server::Configuration::AccessLogInstanceFactory){\"envoy.http_grpc_access_log\"};\n\n} // namespace HttpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/http_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/access_log_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace HttpGrpc {\n\n/**\n * Config registration for the HTTP gRPC access log. @see AccessLogInstanceFactory.\n */\nclass HttpGrpcAccessLogFactory : public Server::Configuration::AccessLogInstanceFactory {\npublic:\n  AccessLog::InstanceSharedPtr\n  createAccessLogInstance(const Protobuf::Message& config, AccessLog::FilterPtr&& filter,\n                          Server::Configuration::FactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n};\n\n} // namespace HttpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc",
    "content": "#include \"extensions/access_loggers/grpc/http_grpc_access_log_impl.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/headers.h\"\n#include \"common/network/utility.h\"\n#include \"common/stream_info/utility.h\"\n\n#include \"extensions/access_loggers/grpc/grpc_access_log_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace HttpGrpc {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    referer_handle(Http::CustomHeaders::get().Referer);\n\nHttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger(\n    GrpcCommon::GrpcAccessLoggerSharedPtr logger)\n    : logger_(std::move(logger)) {}\n\nHttpGrpcAccessLog::HttpGrpcAccessLog(\n    AccessLog::FilterPtr&& filter,\n    envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config,\n    ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache,\n    Stats::Scope& scope)\n    : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)),\n      tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) {\n  for (const auto& header : config_.additional_request_headers_to_log()) {\n    request_headers_to_log_.emplace_back(header);\n  }\n\n  for (const auto& header : config_.additional_response_headers_to_log()) {\n    response_headers_to_log_.emplace_back(header);\n  }\n\n  for (const auto& header : config_.additional_response_trailers_to_log()) {\n    response_trailers_to_log_.emplace_back(header);\n  }\n\n  tls_slot_->set([this](Event::Dispatcher&) {\n    return std::make_shared<ThreadLocalLogger>(access_logger_cache_->getOrCreateLogger(\n        config_.common_config(), GrpcCommon::GrpcAccessLoggerType::HTTP, scope_));\n  });\n}\n\nvoid HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers,\n                                const Http::ResponseHeaderMap& response_headers,\n                                const Http::ResponseTrailerMap& response_trailers,\n                                const StreamInfo::StreamInfo& stream_info) {\n  // Common log properties.\n  // TODO(mattklein123): Populate sample_rate field.\n  envoy::data::accesslog::v3::HTTPAccessLogEntry log_entry;\n  GrpcCommon::Utility::extractCommonAccessLogProperties(*log_entry.mutable_common_properties(),\n                                                        stream_info, config_.common_config());\n\n  if (stream_info.protocol()) {\n    switch (stream_info.protocol().value()) {\n    case Http::Protocol::Http10:\n      log_entry.set_protocol_version(envoy::data::accesslog::v3::HTTPAccessLogEntry::HTTP10);\n      break;\n    case Http::Protocol::Http11:\n      log_entry.set_protocol_version(envoy::data::accesslog::v3::HTTPAccessLogEntry::HTTP11);\n      break;\n    case Http::Protocol::Http2:\n      log_entry.set_protocol_version(envoy::data::accesslog::v3::HTTPAccessLogEntry::HTTP2);\n      break;\n    case Http::Protocol::Http3:\n      log_entry.set_protocol_version(envoy::data::accesslog::v3::HTTPAccessLogEntry::HTTP3);\n      break;\n    }\n  }\n\n  // HTTP request properties.\n  // TODO(mattklein123): Populate port field.\n  auto* request_properties = log_entry.mutable_request();\n  if (request_headers.Scheme() != nullptr) {\n    request_properties->set_scheme(std::string(request_headers.getSchemeValue()));\n  }\n  if (request_headers.Host() != nullptr) {\n    request_properties->set_authority(std::string(request_headers.getHostValue()));\n  }\n  if (request_headers.Path() != nullptr) {\n    request_properties->set_path(std::string(request_headers.getPathValue()));\n  }\n  if (request_headers.UserAgent() != nullptr) {\n    request_properties->set_user_agent(std::string(request_headers.getUserAgentValue()));\n  }\n  if (request_headers.getInline(referer_handle.handle()) != nullptr) {\n    request_properties->set_referer(\n        std::string(request_headers.getInlineValue(referer_handle.handle())));\n  }\n  if (request_headers.ForwardedFor() != nullptr) {\n    request_properties->set_forwarded_for(std::string(request_headers.getForwardedForValue()));\n  }\n  if (request_headers.RequestId() != nullptr) {\n    request_properties->set_request_id(std::string(request_headers.getRequestIdValue()));\n  }\n  if (request_headers.EnvoyOriginalPath() != nullptr) {\n    request_properties->set_original_path(std::string(request_headers.getEnvoyOriginalPathValue()));\n  }\n  request_properties->set_request_headers_bytes(request_headers.byteSize());\n  request_properties->set_request_body_bytes(stream_info.bytesReceived());\n  if (request_headers.Method() != nullptr) {\n    envoy::config::core::v3::RequestMethod method = envoy::config::core::v3::METHOD_UNSPECIFIED;\n    envoy::config::core::v3::RequestMethod_Parse(std::string(request_headers.getMethodValue()),\n                                                 &method);\n    request_properties->set_request_method(method);\n  }\n  if (!request_headers_to_log_.empty()) {\n    auto* logged_headers = request_properties->mutable_request_headers();\n\n    for (const auto& header : request_headers_to_log_) {\n      const Http::HeaderEntry* entry = request_headers.get(header);\n      if (entry != nullptr) {\n        logged_headers->insert({header.get(), std::string(entry->value().getStringView())});\n      }\n    }\n  }\n\n  // HTTP response properties.\n  auto* response_properties = log_entry.mutable_response();\n  if (stream_info.responseCode()) {\n    response_properties->mutable_response_code()->set_value(stream_info.responseCode().value());\n  }\n  if (stream_info.responseCodeDetails()) {\n    response_properties->set_response_code_details(stream_info.responseCodeDetails().value());\n  }\n  response_properties->set_response_headers_bytes(response_headers.byteSize());\n  response_properties->set_response_body_bytes(stream_info.bytesSent());\n  if (!response_headers_to_log_.empty()) {\n    auto* logged_headers = response_properties->mutable_response_headers();\n\n    for (const auto& header : response_headers_to_log_) {\n      const Http::HeaderEntry* entry = response_headers.get(header);\n      if (entry != nullptr) {\n        logged_headers->insert({header.get(), std::string(entry->value().getStringView())});\n      }\n    }\n  }\n\n  if (!response_trailers_to_log_.empty()) {\n    auto* logged_headers = response_properties->mutable_response_trailers();\n\n    for (const auto& header : response_trailers_to_log_) {\n      const Http::HeaderEntry* entry = response_trailers.get(header);\n      if (entry != nullptr) {\n        logged_headers->insert({header.get(), std::string(entry->value().getStringView())});\n      }\n    }\n  }\n\n  tls_slot_->getTyped<ThreadLocalLogger>().logger_->log(std::move(log_entry));\n}\n\n} // namespace HttpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <vector>\n\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/grpc/typed_async_client.h\"\n\n#include \"extensions/access_loggers/common/access_log_base.h\"\n#include \"extensions/access_loggers/grpc/grpc_access_log_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace HttpGrpc {\n\n// TODO(mattklein123): Stats\n\n/**\n * Access log Instance that streams HTTP logs over gRPC.\n */\nclass HttpGrpcAccessLog : public Common::ImplBase {\npublic:\n  HttpGrpcAccessLog(AccessLog::FilterPtr&& filter,\n                    envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config,\n                    ThreadLocal::SlotAllocator& tls,\n                    GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache,\n                    Stats::Scope& scope);\n\nprivate:\n  /**\n   * Per-thread cached logger.\n   */\n  struct ThreadLocalLogger : public ThreadLocal::ThreadLocalObject {\n    ThreadLocalLogger(GrpcCommon::GrpcAccessLoggerSharedPtr logger);\n\n    const GrpcCommon::GrpcAccessLoggerSharedPtr logger_;\n  };\n\n  // Common::ImplBase\n  void emitLog(const Http::RequestHeaderMap& request_headers,\n               const Http::ResponseHeaderMap& response_headers,\n               const Http::ResponseTrailerMap& response_trailers,\n               const StreamInfo::StreamInfo& stream_info) override;\n\n  Stats::Scope& scope_;\n  const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_;\n  const ThreadLocal::SlotPtr tls_slot_;\n  const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_;\n  std::vector<Http::LowerCaseString> request_headers_to_log_;\n  std::vector<Http::LowerCaseString> response_headers_to_log_;\n  std::vector<Http::LowerCaseString> response_trailers_to_log_;\n  std::vector<std::string> filter_states_to_log_;\n};\n\nusing HttpGrpcAccessLogPtr = std::unique_ptr<HttpGrpcAccessLog>;\n\n} // namespace HttpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/tcp_config.cc",
    "content": "#include \"extensions/access_loggers/grpc/tcp_config.h\"\n\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/access_loggers/grpc/config_utils.h\"\n#include \"extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.h\"\n#include \"extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h\"\n#include \"extensions/access_loggers/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace TcpGrpc {\n\nAccessLog::InstanceSharedPtr\nTcpGrpcAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config,\n                                                 AccessLog::FilterPtr&& filter,\n                                                 Server::Configuration::FactoryContext& context) {\n  GrpcCommon::validateProtoDescriptors();\n\n  const auto& proto_config = MessageUtil::downcastAndValidate<\n      const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig&>(\n      config, context.messageValidationVisitor());\n\n  return std::make_shared<TcpGrpcAccessLog>(std::move(filter), proto_config, context.threadLocal(),\n                                            GrpcCommon::getGrpcAccessLoggerCacheSingleton(context),\n                                            context.scope());\n}\n\nProtobufTypes::MessagePtr TcpGrpcAccessLogFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig>();\n}\n\nstd::string TcpGrpcAccessLogFactory::name() const { return AccessLogNames::get().TcpGrpc; }\n\n/**\n * Static registration for the TCP gRPC access log. @see RegisterFactory.\n */\nREGISTER_FACTORY(TcpGrpcAccessLogFactory,\n                 Server::Configuration::AccessLogInstanceFactory){\"envoy.tcp_grpc_access_log\"};\n\n} // namespace TcpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/tcp_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/access_log_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace TcpGrpc {\n\n/**\n * Config registration for the TCP gRPC access log. @see AccessLogInstanceFactory.\n */\nclass TcpGrpcAccessLogFactory : public Server::Configuration::AccessLogInstanceFactory {\npublic:\n  AccessLog::InstanceSharedPtr\n  createAccessLogInstance(const Protobuf::Message& config, AccessLog::FilterPtr&& filter,\n                          Server::Configuration::FactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n};\n\n} // namespace TcpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc",
    "content": "#include \"extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h\"\n\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/utility.h\"\n#include \"common/stream_info/utility.h\"\n\n#include \"extensions/access_loggers/grpc/grpc_access_log_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace TcpGrpc {\n\nTcpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcCommon::GrpcAccessLoggerSharedPtr logger)\n    : logger_(std::move(logger)) {}\n\nTcpGrpcAccessLog::TcpGrpcAccessLog(\n    AccessLog::FilterPtr&& filter,\n    envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config,\n    ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache,\n    Stats::Scope& scope)\n    : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)),\n      tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) {\n  tls_slot_->set([this](Event::Dispatcher&) {\n    return std::make_shared<ThreadLocalLogger>(access_logger_cache_->getOrCreateLogger(\n        config_.common_config(), GrpcCommon::GrpcAccessLoggerType::TCP, scope_));\n  });\n}\n\nvoid TcpGrpcAccessLog::emitLog(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n                               const Http::ResponseTrailerMap&,\n                               const StreamInfo::StreamInfo& stream_info) {\n  // Common log properties.\n  envoy::data::accesslog::v3::TCPAccessLogEntry log_entry;\n  GrpcCommon::Utility::extractCommonAccessLogProperties(*log_entry.mutable_common_properties(),\n                                                        stream_info, config_.common_config());\n\n  envoy::data::accesslog::v3::ConnectionProperties& connection_properties =\n      *log_entry.mutable_connection_properties();\n  connection_properties.set_received_bytes(stream_info.bytesReceived());\n  connection_properties.set_sent_bytes(stream_info.bytesSent());\n\n  // request_properties->set_request_body_bytes(stream_info.bytesReceived());\n  tls_slot_->getTyped<ThreadLocalLogger>().logger_->log(std::move(log_entry));\n}\n\n} // namespace TcpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/grpc/typed_async_client.h\"\n\n#include \"extensions/access_loggers/common/access_log_base.h\"\n#include \"extensions/access_loggers/grpc/grpc_access_log_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace TcpGrpc {\n\n// TODO(mattklein123): Stats\n\n/**\n * Access log Instance that streams TCP logs over gRPC.\n */\nclass TcpGrpcAccessLog : public Common::ImplBase {\npublic:\n  TcpGrpcAccessLog(AccessLog::FilterPtr&& filter,\n                   envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config,\n                   ThreadLocal::SlotAllocator& tls,\n                   GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache,\n                   Stats::Scope& scope);\n\nprivate:\n  /**\n   * Per-thread cached logger.\n   */\n  struct ThreadLocalLogger : public ThreadLocal::ThreadLocalObject {\n    ThreadLocalLogger(GrpcCommon::GrpcAccessLoggerSharedPtr logger);\n\n    const GrpcCommon::GrpcAccessLoggerSharedPtr logger_;\n  };\n\n  // Common::ImplBase\n  void emitLog(const Http::RequestHeaderMap& request_headers,\n               const Http::ResponseHeaderMap& response_headers,\n               const Http::ResponseTrailerMap& response_trailers,\n               const StreamInfo::StreamInfo& stream_info) override;\n\n  Stats::Scope& scope_;\n  const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config_;\n  const ThreadLocal::SlotPtr tls_slot_;\n  const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_;\n};\n\n} // namespace TcpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# Access log implementation that calls into a WASM VM.\n\nenvoy_cc_library(\n    name = \"wasm_access_log_lib\",\n    hdrs = [\"wasm_access_log_impl.h\"],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/access_loggers:well_known_names\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \":wasm_access_log_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:access_log_config_interface\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/access_loggers:well_known_names\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"@envoy_api//envoy/extensions/access_loggers/wasm/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/access_loggers/wasm/config.cc",
    "content": "#include \"extensions/access_loggers/wasm/config.h\"\n\n#include \"envoy/extensions/access_loggers/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/access_loggers/wasm/wasm_access_log_impl.h\"\n#include \"extensions/access_loggers/well_known_names.h\"\n#include \"extensions/common/wasm/wasm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace Wasm {\n\nAccessLog::InstanceSharedPtr\nWasmAccessLogFactory::createAccessLogInstance(const Protobuf::Message& proto_config,\n                                              AccessLog::FilterPtr&& filter,\n                                              Server::Configuration::FactoryContext& context) {\n  const auto& config = MessageUtil::downcastAndValidate<\n      const envoy::extensions::access_loggers::wasm::v3::WasmAccessLog&>(\n      proto_config, context.messageValidationVisitor());\n  auto access_log =\n      std::make_shared<WasmAccessLog>(config.config().root_id(), nullptr, std::move(filter));\n\n  // Create a base WASM to verify that the code loads before setting/cloning the for the\n  // individual threads.\n  auto plugin = std::make_shared<Common::Wasm::Plugin>(\n      config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(),\n      config.config().vm_config().runtime(),\n      Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(),\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, context.localInfo(),\n      nullptr /* listener_metadata */);\n\n  auto callback = [access_log, &context, plugin](Common::Wasm::WasmHandleSharedPtr base_wasm) {\n    auto tls_slot = context.threadLocal().allocateSlot();\n\n    // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call.\n    tls_slot->set(\n        [base_wasm,\n         plugin](Event::Dispatcher& dispatcher) -> std::shared_ptr<ThreadLocal::ThreadLocalObject> {\n          if (!base_wasm) {\n            // There is no way to prevent the connection at this point. The user could choose to use\n            // an HTTP Wasm plugin and only handle onLog() which would correctly close the\n            // connection in onRequestHeaders().\n            if (!plugin->fail_open_) {\n              ENVOY_LOG(critical, \"Plugin configured to fail closed failed to load\");\n            }\n            return nullptr;\n          }\n          return std::static_pointer_cast<ThreadLocal::ThreadLocalObject>(\n              Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher));\n        });\n    access_log->setTlsSlot(std::move(tls_slot));\n  };\n\n  if (!Common::Wasm::createWasm(\n          config.config().vm_config(), plugin, context.scope().createScope(\"\"),\n          context.clusterManager(), context.initManager(), context.dispatcher(), context.api(),\n          context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) {\n    throw Common::Wasm::WasmException(\n        fmt::format(\"Unable to create Wasm access log {}\", plugin->name_));\n  }\n\n  return access_log;\n}\n\nProtobufTypes::MessagePtr WasmAccessLogFactory::createEmptyConfigProto() {\n  return ProtobufTypes::MessagePtr{\n      new envoy::extensions::access_loggers::wasm::v3::WasmAccessLog()};\n}\n\nstd::string WasmAccessLogFactory::name() const { return AccessLogNames::get().Wasm; }\n\n/**\n * Static registration for the wasm access log. @see RegisterFactory.\n */\nREGISTER_FACTORY(WasmAccessLogFactory,\n                 Server::Configuration::AccessLogInstanceFactory){\"envoy.wasm_access_log\"};\n\n} // namespace Wasm\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/wasm/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/access_log_config.h\"\n\n#include \"common/config/datasource.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace Wasm {\n\n/**\n * Config registration for the file access log. @see AccessLogInstanceFactory.\n */\nclass WasmAccessLogFactory : public Server::Configuration::AccessLogInstanceFactory,\n                             Logger::Loggable<Logger::Id::wasm> {\npublic:\n  AccessLog::InstanceSharedPtr\n  createAccessLogInstance(const Protobuf::Message& config, AccessLog::FilterPtr&& filter,\n                          Server::Configuration::FactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n\nprivate:\n  absl::flat_hash_map<std::string, std::string> convertJsonFormatToMap(ProtobufWkt::Struct config);\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_;\n};\n\n} // namespace Wasm\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/wasm/wasm_access_log_impl.h",
    "content": "#pragma once\n\n#include \"envoy/access_log/access_log.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/access_loggers/well_known_names.h\"\n#include \"extensions/common/wasm/wasm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace Wasm {\n\nusing Envoy::Extensions::Common::Wasm::WasmHandle;\n\nclass WasmAccessLog : public AccessLog::Instance {\npublic:\n  WasmAccessLog(absl::string_view root_id, ThreadLocal::SlotPtr tls_slot,\n                AccessLog::FilterPtr filter)\n      : root_id_(root_id), tls_slot_(std::move(tls_slot)), filter_(std::move(filter)) {}\n  void log(const Http::RequestHeaderMap* request_headers,\n           const Http::ResponseHeaderMap* response_headers,\n           const Http::ResponseTrailerMap* response_trailers,\n           const StreamInfo::StreamInfo& stream_info) override {\n    if (filter_ && request_headers && response_headers && response_trailers) {\n      if (!filter_->evaluate(stream_info, *request_headers, *response_headers,\n                             *response_trailers)) {\n        return;\n      }\n    }\n\n    if (tls_slot_->get()) {\n      tls_slot_->getTyped<WasmHandle>().wasm()->log(root_id_, request_headers, response_headers,\n                                                    response_trailers, stream_info);\n    }\n  }\n\n  void setTlsSlot(ThreadLocal::SlotPtr tls_slot) {\n    ASSERT(tls_slot_ == nullptr);\n    tls_slot_ = std::move(tls_slot);\n  }\n\nprivate:\n  std::string root_id_;\n  ThreadLocal::SlotPtr tls_slot_;\n  AccessLog::FilterPtr filter_;\n};\n\n} // namespace Wasm\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/access_loggers/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\n\n/**\n * Well-known access logger names.\n * NOTE: New access loggers should use the well known name: envoy.access_loggers.name.\n */\nclass AccessLogNameValues {\npublic:\n  // File access log\n  const std::string File = \"envoy.access_loggers.file\";\n  // HTTP gRPC access log\n  const std::string HttpGrpc = \"envoy.access_loggers.http_grpc\";\n  // TCP gRPC access log\n  const std::string TcpGrpc = \"envoy.access_loggers.tcp_grpc\";\n  // WASM access log\n  const std::string Wasm = \"envoy.access_loggers.wasm\";\n};\n\nusing AccessLogNames = ConstSingleton<AccessLogNameValues>;\n\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/all_extensions.bzl",
    "content": "load(\"@bazel_skylib//lib:dicts.bzl\", \"dicts\")\nload(\"@envoy_build_config//:extensions_build_config.bzl\", \"EXTENSIONS\")\n\n# These extensions are registered using the extension system but are required for the core Envoy build.\n# The map may be overridden by extensions specified in envoy_build_config.\n_required_extensions = {\n    \"envoy.common.crypto.utility_lib\": \"//source/extensions/common/crypto:utility_lib\",\n    \"envoy.transport_sockets.tls\": \"//source/extensions/transport_sockets/tls:config\",\n}\n\n# Return all extensions to be compiled into Envoy.\ndef envoy_all_extensions(denylist = []):\n    all_extensions = dicts.add(_required_extensions, EXTENSIONS)\n\n    # These extensions can be removed on a site specific basis.\n    return [v for k, v in all_extensions.items() if not k in denylist]\n\n# Core extensions needed to run Envoy's integration tests.\n_core_extensions = [\n    \"envoy.access_loggers.file\",\n    \"envoy.filters.http.router\",\n    \"envoy.filters.http.health_check\",\n    \"envoy.filters.network.http_connection_manager\",\n    \"envoy.stat_sinks.statsd\",\n    \"envoy.transport_sockets.raw_buffer\",\n]\n\n# Return all core extensions to be compiled into Envoy.\ndef envoy_all_core_extensions():\n    all_extensions = dicts.add(_required_extensions, EXTENSIONS)\n\n    # These extensions can be removed on a site specific basis.\n    return [v for k, v in all_extensions.items() if k in _core_extensions]\n\n_http_filter_prefix = \"envoy.filters.http\"\n\ndef envoy_all_http_filters():\n    all_extensions = dicts.add(_required_extensions, EXTENSIONS)\n\n    return [v for k, v in all_extensions.items() if k.startswith(_http_filter_prefix)]\n\n# All network-layer filters are extensions with names that have the following prefix.\n_network_filter_prefix = \"envoy.filters.network\"\n\n# All thrift filters are extensions with names that have the following prefix.\n_thrift_filter_prefix = \"envoy.filters.thrift\"\n\n# Return all network-layer filter extensions to be compiled into network-layer filter generic fuzzer.\ndef envoy_all_network_filters():\n    all_extensions = dicts.add(_required_extensions, EXTENSIONS)\n\n    return [v for k, v in all_extensions.items() if k.startswith(_network_filter_prefix) or k.startswith(_thrift_filter_prefix)]\n"
  },
  {
    "path": "source/extensions/bootstrap/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# WASM service.\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\n        \"config.h\",\n    ],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:bootstrap_extension_config_interface\",\n        \"//include/envoy/server:factory_context_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/grpc_credentials:well_known_names\",\n        \"@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/bootstrap/wasm/config.cc",
    "content": "#include \"extensions/bootstrap/wasm/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/factory_context.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/datasource.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Bootstrap {\nnamespace Wasm {\n\nstatic const std::string INLINE_STRING = \"<inline>\";\n\nvoid WasmFactory::createWasm(const envoy::extensions::wasm::v3::WasmService& config,\n                             Server::Configuration::ServerFactoryContext& context,\n                             CreateWasmServiceCallback&& cb) {\n  auto plugin = std::make_shared<Common::Wasm::Plugin>(\n      config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(),\n      config.config().vm_config().runtime(),\n      Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(),\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, context.localInfo(), nullptr);\n\n  bool singleton = config.singleton();\n  auto callback = [&context, singleton, plugin, cb](Common::Wasm::WasmHandleSharedPtr base_wasm) {\n    if (!base_wasm) {\n      if (plugin->fail_open_) {\n        ENVOY_LOG(error, \"Unable to create Wasm service {}\", plugin->name_);\n      } else {\n        ENVOY_LOG(critical, \"Unable to create Wasm service {}\", plugin->name_);\n      }\n      return;\n    }\n    if (singleton) {\n      // Return a Wasm VM which will be stored as a singleton by the Server.\n      cb(std::make_unique<WasmService>(\n          Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, context.dispatcher())));\n      return;\n    }\n    // Per-thread WASM VM.\n    // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call.\n    auto tls_slot = context.threadLocal().allocateSlot();\n    tls_slot->set([base_wasm, plugin](Event::Dispatcher& dispatcher) {\n      return std::static_pointer_cast<ThreadLocal::ThreadLocalObject>(\n          Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher));\n    });\n    cb(std::make_unique<WasmService>(std::move(tls_slot)));\n  };\n\n  if (!Common::Wasm::createWasm(\n          config.config().vm_config(), plugin, context.scope().createScope(\"\"),\n          context.clusterManager(), context.initManager(), context.dispatcher(), context.api(),\n          context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) {\n    // NB: throw if we get a synchronous configuration failures as this is how such failures are\n    // reported to xDS.\n    throw Common::Wasm::WasmException(\n        fmt::format(\"Unable to create Wasm service {}\", plugin->name_));\n  }\n}\n\nServer::BootstrapExtensionPtr\nWasmFactory::createBootstrapExtension(const Protobuf::Message& config,\n                                      Server::Configuration::ServerFactoryContext& context) {\n  auto typed_config =\n      MessageUtil::downcastAndValidate<const envoy::extensions::wasm::v3::WasmService&>(\n          config, context.messageValidationContext().staticValidationVisitor());\n\n  auto wasm_service_extension = std::make_unique<WasmServiceExtension>();\n  createWasm(typed_config, context,\n             [extension = wasm_service_extension.get()](WasmServicePtr wasm) {\n               extension->wasm_service_ = std::move(wasm);\n             });\n  return wasm_service_extension;\n}\n\n// /**\n//  * Static registration for the wasm factory. @see RegistryFactory.\n//  */\nREGISTER_FACTORY(WasmFactory, Server::Configuration::BootstrapExtensionFactory);\n\n} // namespace Wasm\n} // namespace Bootstrap\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/bootstrap/wasm/config.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/extensions/wasm/v3/wasm.pb.h\"\n#include \"envoy/extensions/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/server/bootstrap_extension_config.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Bootstrap {\nnamespace Wasm {\n\nclass WasmService {\npublic:\n  WasmService(Common::Wasm::WasmHandleSharedPtr singleton) : singleton_(std::move(singleton)) {}\n  WasmService(ThreadLocal::SlotPtr tls_slot) : tls_slot_(std::move(tls_slot)) {}\n\nprivate:\n  Common::Wasm::WasmHandleSharedPtr singleton_;\n  ThreadLocal::SlotPtr tls_slot_;\n};\n\nusing WasmServicePtr = std::unique_ptr<WasmService>;\nusing CreateWasmServiceCallback = std::function<void(WasmServicePtr)>;\n\nclass WasmFactory : public Server::Configuration::BootstrapExtensionFactory,\n                    Logger::Loggable<Logger::Id::wasm> {\npublic:\n  ~WasmFactory() override = default;\n  std::string name() const override { return \"envoy.bootstrap.wasm\"; }\n  void createWasm(const envoy::extensions::wasm::v3::WasmService& config,\n                  Server::Configuration::ServerFactoryContext& context,\n                  CreateWasmServiceCallback&& cb);\n  Server::BootstrapExtensionPtr\n  createBootstrapExtension(const Protobuf::Message& config,\n                           Server::Configuration::ServerFactoryContext& context) override;\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<envoy::extensions::wasm::v3::WasmService>();\n  }\n\nprivate:\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_;\n};\n\nclass WasmServiceExtension : public Server::BootstrapExtension {\npublic:\n  WasmService& wasmService() {\n    ASSERT(wasm_service_ != nullptr);\n    return *wasm_service_;\n  }\n\nprivate:\n  WasmServicePtr wasm_service_;\n  friend class WasmFactory;\n};\n\n} // namespace Wasm\n} // namespace Bootstrap\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/config:well_known_names\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/clusters/aggregate/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"cluster\",\n    srcs = [\"cluster.cc\"],\n    hdrs = [\n        \"cluster.h\",\n        \"lb_context.h\",\n    ],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/clusters/aggregate/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/clusters/aggregate/cluster.cc",
    "content": "#include \"extensions/clusters/aggregate/cluster.h\"\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Aggregate {\n\nCluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster,\n                 const envoy::extensions::clusters::aggregate::v3::ClusterConfig& config,\n                 Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime,\n                 Random::RandomGenerator& random,\n                 Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                 Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api)\n    : Upstream::ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope),\n                                added_via_api),\n      cluster_manager_(cluster_manager), runtime_(runtime), random_(random),\n      tls_(tls.allocateSlot()), clusters_(config.clusters().begin(), config.clusters().end()) {}\n\nPriorityContextPtr\nCluster::linearizePrioritySet(const std::function<bool(const std::string&)>& skip_predicate) {\n  PriorityContextPtr priority_context = std::make_unique<PriorityContext>();\n  uint32_t next_priority_after_linearizing = 0;\n\n  // Linearize the priority set. e.g. for clusters [C_0, C_1, C_2] referred in aggregate cluster\n  //    C_0 [P_0, P_1, P_2]\n  //    C_1 [P_0, P_1]\n  //    C_2 [P_0, P_1, P_2, P_3]\n  // The linearization result is:\n  //    [C_0.P_0, C_0.P_1, C_0.P_2, C_1.P_0, C_1.P_1, C_2.P_0, C_2.P_1, C_2.P_2, C_2.P_3]\n  // and the traffic will be distributed among these priorities.\n  for (const auto& cluster : clusters_) {\n    if (skip_predicate(cluster)) {\n      continue;\n    }\n    auto tlc = cluster_manager_.get(cluster);\n    // It is possible that the cluster doesn't exist, e.g., the cluster cloud be deleted or the\n    // cluster hasn't been added by xDS.\n    if (tlc == nullptr) {\n      continue;\n    }\n\n    uint32_t priority_in_current_cluster = 0;\n    for (const auto& host_set : tlc->prioritySet().hostSetsPerPriority()) {\n      if (!host_set->hosts().empty()) {\n        priority_context->priority_set_.updateHosts(\n            next_priority_after_linearizing, Upstream::HostSetImpl::updateHostsParams(*host_set),\n            host_set->localityWeights(), host_set->hosts(), {}, host_set->overprovisioningFactor());\n        priority_context->priority_to_cluster_.emplace_back(\n            std::make_pair(priority_in_current_cluster, tlc));\n\n        priority_context->cluster_and_priority_to_linearized_priority_[std::make_pair(\n            cluster, priority_in_current_cluster)] = next_priority_after_linearizing;\n        next_priority_after_linearizing++;\n      }\n      priority_in_current_cluster++;\n    }\n  }\n\n  return priority_context;\n}\n\nvoid Cluster::startPreInit() {\n  for (const auto& cluster : clusters_) {\n    auto tlc = cluster_manager_.get(cluster);\n    // It is possible when initializing the cluster, the included cluster doesn't exist. e.g., the\n    // cluster could be added dynamically by xDS.\n    if (tlc == nullptr) {\n      continue;\n    }\n\n    // Add callback for clusters initialized before aggregate cluster.\n    tlc->prioritySet().addMemberUpdateCb(\n        [this, cluster](const Upstream::HostVector&, const Upstream::HostVector&) {\n          ENVOY_LOG(debug, \"member update for cluster '{}' in aggregate cluster '{}'\", cluster,\n                    this->info()->name());\n          refresh();\n        });\n  }\n  refresh();\n  handle_ = cluster_manager_.addThreadLocalClusterUpdateCallbacks(*this);\n\n  onPreInitComplete();\n}\n\nvoid Cluster::refresh(const std::function<bool(const std::string&)>& skip_predicate) {\n  // Post the priority set to worker threads.\n  // TODO(mattklein123): Remove \"this\" capture.\n  tls_->runOnAllThreads([this, skip_predicate, cluster_name = this->info()->name()](\n                            ThreadLocal::ThreadLocalObjectSharedPtr object)\n                            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    PriorityContextPtr priority_context = linearizePrioritySet(skip_predicate);\n    Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name);\n    ASSERT(cluster != nullptr);\n    dynamic_cast<AggregateClusterLoadBalancer&>(cluster->loadBalancer())\n        .refresh(std::move(priority_context));\n    return object;\n  });\n}\n\nvoid Cluster::onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) {\n  if (std::find(clusters_.begin(), clusters_.end(), cluster.info()->name()) != clusters_.end()) {\n    ENVOY_LOG(debug, \"adding or updating cluster '{}' for aggregate cluster '{}'\",\n              cluster.info()->name(), info()->name());\n    refresh();\n    cluster.prioritySet().addMemberUpdateCb(\n        [this](const Upstream::HostVector&, const Upstream::HostVector&) { refresh(); });\n  }\n}\n\nvoid Cluster::onClusterRemoval(const std::string& cluster_name) {\n  //  The onClusterRemoval callback is called before the thread local cluster is removed. There\n  //  will be a dangling pointer to the thread local cluster if the deleted cluster is not skipped\n  //  when we refresh the load balancer.\n  if (std::find(clusters_.begin(), clusters_.end(), cluster_name) != clusters_.end()) {\n    ENVOY_LOG(debug, \"removing cluster '{}' from aggreagte cluster '{}'\", cluster_name,\n              info()->name());\n    refresh([cluster_name](const std::string& c) { return cluster_name == c; });\n  }\n}\n\nabsl::optional<uint32_t> AggregateClusterLoadBalancer::LoadBalancerImpl::hostToLinearizedPriority(\n    const Upstream::HostDescription& host) const {\n  auto it = priority_context_.cluster_and_priority_to_linearized_priority_.find(\n      std::make_pair(host.cluster().name(), host.priority()));\n\n  if (it != priority_context_.cluster_and_priority_to_linearized_priority_.end()) {\n    return it->second;\n  } else {\n    // The HostSet can change due to CDS/EDS updates between retries.\n    return absl::nullopt;\n  }\n}\n\nUpstream::HostConstSharedPtr\nAggregateClusterLoadBalancer::LoadBalancerImpl::chooseHost(Upstream::LoadBalancerContext* context) {\n  const Upstream::HealthyAndDegradedLoad* priority_loads = nullptr;\n  if (context != nullptr) {\n    priority_loads = &context->determinePriorityLoad(\n        priority_set_, per_priority_load_,\n        [this](const auto& host) { return hostToLinearizedPriority(host); });\n  } else {\n    priority_loads = &per_priority_load_;\n  }\n\n  const auto priority_pair =\n      choosePriority(random_.random(), priority_loads->healthy_priority_load_,\n                     priority_loads->degraded_priority_load_);\n\n  AggregateLoadBalancerContext aggregate_context(\n      context, priority_pair.second,\n      priority_context_.priority_to_cluster_[priority_pair.first].first);\n\n  Upstream::ThreadLocalCluster* cluster =\n      priority_context_.priority_to_cluster_[priority_pair.first].second;\n  return cluster->loadBalancer().chooseHost(&aggregate_context);\n}\n\nUpstream::HostConstSharedPtr\nAggregateClusterLoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) {\n  if (load_balancer_) {\n    return load_balancer_->chooseHost(context);\n  }\n  return nullptr;\n}\n\nstd::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>\nClusterFactory::createClusterWithConfig(\n    const envoy::config::cluster::v3::Cluster& cluster,\n    const envoy::extensions::clusters::aggregate::v3::ClusterConfig& proto_config,\n    Upstream::ClusterFactoryContext& context,\n    Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Stats::ScopePtr&& stats_scope) {\n  auto new_cluster =\n      std::make_shared<Cluster>(cluster, proto_config, context.clusterManager(), context.runtime(),\n                                context.api().randomGenerator(), socket_factory_context,\n                                std::move(stats_scope), context.tls(), context.addedViaApi());\n  auto lb = std::make_unique<AggregateThreadAwareLoadBalancer>(*new_cluster);\n  return std::make_pair(new_cluster, std::move(lb));\n}\n\nREGISTER_FACTORY(ClusterFactory, Upstream::ClusterFactory);\n\n} // namespace Aggregate\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/aggregate/cluster.h",
    "content": "#pragma once\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.h\"\n\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/clusters/aggregate/lb_context.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Aggregate {\n\nusing PriorityToClusterVector = std::vector<std::pair<uint32_t, Upstream::ThreadLocalCluster*>>;\n\n// Maps pair(host_cluster_name, host_priority) to the linearized priority of the Aggregate cluster.\nusing ClusterAndPriorityToLinearizedPriorityMap =\n    absl::flat_hash_map<std::pair<std::string, uint32_t>, uint32_t>;\n\nstruct PriorityContext {\n  Upstream::PrioritySetImpl priority_set_;\n  PriorityToClusterVector priority_to_cluster_;\n  ClusterAndPriorityToLinearizedPriorityMap cluster_and_priority_to_linearized_priority_;\n};\n\nusing PriorityContextPtr = std::unique_ptr<PriorityContext>;\n\nclass Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbacks {\npublic:\n  Cluster(const envoy::config::cluster::v3::Cluster& cluster,\n          const envoy::extensions::clusters::aggregate::v3::ClusterConfig& config,\n          Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime,\n          Random::RandomGenerator& random,\n          Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n          Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api);\n\n  // Upstream::Cluster\n  Upstream::Cluster::InitializePhase initializePhase() const override {\n    return Upstream::Cluster::InitializePhase::Secondary;\n  }\n\n  // Upstream::ClusterUpdateCallbacks\n  void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) override;\n  void onClusterRemoval(const std::string& cluster_name) override;\n\n  void refresh() {\n    refresh([](const std::string&) { return false; });\n  }\n\n  Upstream::ClusterUpdateCallbacksHandlePtr handle_;\n  Upstream::ClusterManager& cluster_manager_;\n  Runtime::Loader& runtime_;\n  Random::RandomGenerator& random_;\n  ThreadLocal::SlotPtr tls_;\n  const std::vector<std::string> clusters_;\n\nprivate:\n  // Upstream::ClusterImplBase\n  void startPreInit() override;\n\n  void refresh(const std::function<bool(const std::string&)>& skip_predicate);\n  PriorityContextPtr\n  linearizePrioritySet(const std::function<bool(const std::string&)>& skip_predicate);\n};\n\n// Load balancer used by each worker thread. It will be refreshed when clusters, hosts or priorities\n// are updated.\nclass AggregateClusterLoadBalancer : public Upstream::LoadBalancer {\npublic:\n  AggregateClusterLoadBalancer(\n      Upstream::ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random,\n      const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n      : stats_(stats), runtime_(runtime), random_(random), common_config_(common_config) {}\n\n  // Upstream::LoadBalancer\n  Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override;\n  // Prefetching not yet implemented for extensions.\n  Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override {\n    return nullptr;\n  }\n\nprivate:\n  // Use inner class to extend LoadBalancerBase. When initializing AggregateClusterLoadBalancer, the\n  // priority set could be empty, we cannot initialize LoadBalancerBase when priority set is empty.\n  class LoadBalancerImpl : public Upstream::LoadBalancerBase {\n  public:\n    LoadBalancerImpl(const PriorityContext& priority_context, Upstream::ClusterStats& stats,\n                     Runtime::Loader& runtime, Random::RandomGenerator& random,\n                     const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n        : Upstream::LoadBalancerBase(priority_context.priority_set_, stats, runtime, random,\n                                     common_config),\n          priority_context_(priority_context) {}\n\n    // Upstream::LoadBalancer\n    Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override;\n    // Prefetching not yet implemented for extensions.\n    Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override {\n      return nullptr;\n    }\n\n    // Upstream::LoadBalancerBase\n    Upstream::HostConstSharedPtr chooseHostOnce(Upstream::LoadBalancerContext*) override {\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n\n    absl::optional<uint32_t> hostToLinearizedPriority(const Upstream::HostDescription& host) const;\n\n  private:\n    const PriorityContext& priority_context_;\n  };\n\n  using LoadBalancerImplPtr = std::unique_ptr<LoadBalancerImpl>;\n\n  LoadBalancerImplPtr load_balancer_;\n  Upstream::ClusterStats& stats_;\n  Runtime::Loader& runtime_;\n  Random::RandomGenerator& random_;\n  const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config_;\n  PriorityContextPtr priority_context_;\n\npublic:\n  void refresh(PriorityContextPtr priority_context) {\n    if (!priority_context->priority_set_.hostSetsPerPriority().empty()) {\n      load_balancer_ = std::make_unique<LoadBalancerImpl>(*priority_context, stats_, runtime_,\n                                                          random_, common_config_);\n    } else {\n      load_balancer_ = nullptr;\n    }\n    priority_context_ = std::move(priority_context);\n  }\n};\n\n// Load balancer factory created by the main thread and will be called in each worker thread to\n// create the thread local load balancer.\nstruct AggregateLoadBalancerFactory : public Upstream::LoadBalancerFactory {\n  AggregateLoadBalancerFactory(const Cluster& cluster) : cluster_(cluster) {}\n  // Upstream::LoadBalancerFactory\n  Upstream::LoadBalancerPtr create() override {\n    return std::make_unique<AggregateClusterLoadBalancer>(\n        cluster_.info()->stats(), cluster_.runtime_, cluster_.random_, cluster_.info()->lbConfig());\n  }\n\n  const Cluster& cluster_;\n};\n\n// Thread aware load balancer created by the main thread.\nstruct AggregateThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer {\n  AggregateThreadAwareLoadBalancer(const Cluster& cluster) : cluster_(cluster) {}\n\n  // Upstream::ThreadAwareLoadBalancer\n  Upstream::LoadBalancerFactorySharedPtr factory() override {\n    return std::make_shared<AggregateLoadBalancerFactory>(cluster_);\n  }\n  void initialize() override {}\n\n  const Cluster& cluster_;\n};\n\nclass ClusterFactory : public Upstream::ConfigurableClusterFactoryBase<\n                           envoy::extensions::clusters::aggregate::v3::ClusterConfig> {\npublic:\n  ClusterFactory()\n      : ConfigurableClusterFactoryBase(Extensions::Clusters::ClusterTypes::get().Aggregate) {}\n\nprivate:\n  std::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>\n  createClusterWithConfig(\n      const envoy::config::cluster::v3::Cluster& cluster,\n      const envoy::extensions::clusters::aggregate::v3::ClusterConfig& proto_config,\n      Upstream::ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n\nDECLARE_FACTORY(ClusterFactory);\n\n} // namespace Aggregate\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/aggregate/lb_context.h",
    "content": "#pragma once\n\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Aggregate {\n\n// AggregateLoadBalancerContext wraps the load balancer context to re-assign priority load\n// according the to host priority selected by the aggregate load balancer.\nclass AggregateLoadBalancerContext : public Upstream::LoadBalancerContext {\npublic:\n  AggregateLoadBalancerContext(Upstream::LoadBalancerContext* context,\n                               Upstream::LoadBalancerBase::HostAvailability host_availability,\n                               uint32_t host_priority)\n      : host_availability_(host_availability), host_priority_(host_priority) {\n    if (context == nullptr) {\n      owned_context_ = std::make_unique<Upstream::LoadBalancerContextBase>();\n      context_ = owned_context_.get();\n    } else {\n      context_ = context;\n    }\n  }\n\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override { return context_->computeHashKey(); }\n  const Network::Connection* downstreamConnection() const override {\n    return context_->downstreamConnection();\n  }\n  const Router::MetadataMatchCriteria* metadataMatchCriteria() override {\n    return context_->metadataMatchCriteria();\n  }\n  const Http::RequestHeaderMap* downstreamHeaders() const override {\n    return context_->downstreamHeaders();\n  }\n  const Upstream::HealthyAndDegradedLoad&\n  determinePriorityLoad(const Upstream::PrioritySet&,\n                        const Upstream::HealthyAndDegradedLoad& original_priority_load,\n                        const Upstream::RetryPriority::PriorityMappingFunc&) override {\n    // Re-assign load. Set all traffic to the priority and availability selected in aggregate\n    // cluster.\n    //\n    // Note: context_->determinePriorityLoad() was already called and its result handled in\n    // AggregateClusterLoadBalancer::LoadBalancerImpl::chooseHost().\n    const size_t priorities = original_priority_load.healthy_priority_load_.get().size();\n    priority_load_.healthy_priority_load_.get().assign(priorities, 0);\n    priority_load_.degraded_priority_load_.get().assign(priorities, 0);\n\n    if (host_availability_ == Upstream::LoadBalancerBase::HostAvailability::Healthy) {\n      priority_load_.healthy_priority_load_.get()[host_priority_] = 100;\n    } else {\n      priority_load_.degraded_priority_load_.get()[host_priority_] = 100;\n    }\n    return priority_load_;\n  }\n  bool shouldSelectAnotherHost(const Upstream::Host& host) override {\n    return context_->shouldSelectAnotherHost(host);\n  }\n  uint32_t hostSelectionRetryCount() const override { return context_->hostSelectionRetryCount(); }\n  Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override {\n    return context_->upstreamSocketOptions();\n  }\n  Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override {\n    return context_->upstreamTransportSocketOptions();\n  }\n\nprivate:\n  Upstream::HealthyAndDegradedLoad priority_load_;\n  std::unique_ptr<Upstream::LoadBalancerContext> owned_context_;\n  Upstream::LoadBalancerContext* context_{nullptr};\n  const Upstream::LoadBalancerBase::HostAvailability host_availability_;\n  const uint32_t host_priority_;\n};\n\n} // namespace Aggregate\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"cluster\",\n    srcs = [\"cluster.cc\"],\n    hdrs = [\"cluster.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:logical_host_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_interface\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/clusters/dynamic_forward_proxy/cluster.cc",
    "content": "#include \"extensions/clusters/dynamic_forward_proxy/cluster.h\"\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.validate.h\"\n\n#include \"common/network/transport_socket_options_impl.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace DynamicForwardProxy {\n\nCluster::Cluster(\n    const envoy::config::cluster::v3::Cluster& cluster,\n    const envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig& config,\n    Runtime::Loader& runtime,\n    Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory,\n    const LocalInfo::LocalInfo& local_info,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api)\n    : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope),\n                                       added_via_api),\n      dns_cache_manager_(cache_manager_factory.get()),\n      dns_cache_(dns_cache_manager_->getCache(config.dns_cache_config())),\n      update_callbacks_handle_(dns_cache_->addUpdateCallbacks(*this)), local_info_(local_info),\n      host_map_(std::make_shared<HostInfoMap>()) {\n  // Block certain TLS context parameters that don't make sense on a cluster-wide scale. We will\n  // support these parameters dynamically in the future. This is not an exhaustive list of\n  // parameters that don't make sense but should be the most obvious ones that a user might set\n  // in error.\n  if (!cluster.hidden_envoy_deprecated_tls_context().sni().empty() ||\n      !cluster.hidden_envoy_deprecated_tls_context()\n           .common_tls_context()\n           .validation_context()\n           .hidden_envoy_deprecated_verify_subject_alt_name()\n           .empty()) {\n    throw EnvoyException(\n        \"dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'\");\n  }\n}\n\nvoid Cluster::startPreInit() {\n  // If we are attaching to a pre-populated cache we need to initialize our hosts.\n  auto existing_hosts = dns_cache_->hosts();\n  if (!existing_hosts.empty()) {\n    std::shared_ptr<HostInfoMap> new_host_map;\n    std::unique_ptr<Upstream::HostVector> hosts_added;\n    for (const auto& existing_host : existing_hosts) {\n      addOrUpdateWorker(existing_host.first, existing_host.second, new_host_map, hosts_added);\n    }\n    swapAndUpdateMap(new_host_map, *hosts_added, {});\n  }\n\n  onPreInitComplete();\n}\n\nvoid Cluster::addOrUpdateWorker(\n    const std::string& host,\n    const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr& host_info,\n    std::shared_ptr<HostInfoMap>& new_host_map,\n    std::unique_ptr<Upstream::HostVector>& hosts_added) {\n  // We should never get a host with no address from the cache.\n  ASSERT(host_info->address() != nullptr);\n\n  // NOTE: Right now we allow a DNS cache to be shared between multiple clusters. Though we have\n  // connection/request circuit breakers on the cluster, we don't have any way to control the\n  // maximum hosts on a cluster. We currently assume that host data shared via shared pointer is a\n  // marginal memory cost above that already used by connections and requests, so relying on\n  // connection/request circuit breakers is sufficient. We may have to revisit this in the future.\n\n  HostInfoMapSharedPtr current_map = getCurrentHostMap();\n  const auto host_map_it = current_map->find(host);\n  if (host_map_it != current_map->end()) {\n    // If we only have an address change, we can do that swap inline without any other updates.\n    // The appropriate R/W locking is in place to allow this. The details of this locking are:\n    //  - Hosts are not thread local, they are global.\n    //  - We take a read lock when reading the address and a write lock when changing it.\n    //  - Address updates are very rare.\n    //  - Address reads are only done when a connection is being made and a \"real\" host\n    //    description is created or the host is queried via the admin endpoint. Both of\n    //    these operations are relatively rare and the read lock is held for a short period\n    //    of time.\n    //\n    // TODO(mattklein123): Right now the dynamic forward proxy / DNS cache works similar to how\n    //                     logical DNS works, meaning that we only store a single address per\n    //                     resolution. It would not be difficult to also expose strict DNS\n    //                     semantics, meaning the cache would expose multiple addresses and the\n    //                     cluster would create multiple logical hosts based on those addresses.\n    //                     We will leave this is a follow up depending on need.\n    ASSERT(host_info == host_map_it->second.shared_host_info_);\n    ASSERT(host_map_it->second.shared_host_info_->address() !=\n           host_map_it->second.logical_host_->address());\n    ENVOY_LOG(debug, \"updating dfproxy cluster host address '{}'\", host);\n    host_map_it->second.logical_host_->setNewAddress(host_info->address(), dummy_lb_endpoint_);\n    return;\n  }\n\n  ENVOY_LOG(debug, \"adding new dfproxy cluster host '{}'\", host);\n\n  if (new_host_map == nullptr) {\n    new_host_map = std::make_shared<HostInfoMap>(*current_map);\n  }\n  const auto emplaced =\n      new_host_map->try_emplace(host, host_info,\n                                std::make_shared<Upstream::LogicalHost>(\n                                    info(), host, host_info->address(), dummy_locality_lb_endpoint_,\n                                    dummy_lb_endpoint_, nullptr));\n  if (hosts_added == nullptr) {\n    hosts_added = std::make_unique<Upstream::HostVector>();\n  }\n  hosts_added->emplace_back(emplaced.first->second.logical_host_);\n}\n\nvoid Cluster::onDnsHostAddOrUpdate(\n    const std::string& host,\n    const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr& host_info) {\n  std::shared_ptr<HostInfoMap> new_host_map;\n  std::unique_ptr<Upstream::HostVector> hosts_added;\n  addOrUpdateWorker(host, host_info, new_host_map, hosts_added);\n  if (hosts_added != nullptr) {\n    ASSERT(!new_host_map->empty());\n    ASSERT(!hosts_added->empty());\n    // Swap in the new map. This will be picked up when the per-worker LBs are recreated via\n    // the host set update.\n    swapAndUpdateMap(new_host_map, *hosts_added, {});\n  }\n}\n\nvoid Cluster::swapAndUpdateMap(const HostInfoMapSharedPtr& new_hosts_map,\n                               const Upstream::HostVector& hosts_added,\n                               const Upstream::HostVector& hosts_removed) {\n  {\n    absl::WriterMutexLock lock(&host_map_lock_);\n    host_map_ = new_hosts_map;\n  }\n\n  Upstream::PriorityStateManager priority_state_manager(*this, local_info_, nullptr);\n  priority_state_manager.initializePriorityFor(dummy_locality_lb_endpoint_);\n  for (const auto& host : (*new_hosts_map)) {\n    priority_state_manager.registerHostForPriority(host.second.logical_host_,\n                                                   dummy_locality_lb_endpoint_);\n  }\n  priority_state_manager.updateClusterPrioritySet(\n      0, std::move(priority_state_manager.priorityState()[0].first), hosts_added, hosts_removed,\n      absl::nullopt, absl::nullopt);\n}\n\nvoid Cluster::onDnsHostRemove(const std::string& host) {\n  HostInfoMapSharedPtr current_map = getCurrentHostMap();\n  const auto host_map_it = current_map->find(host);\n  ASSERT(host_map_it != current_map->end());\n  const auto new_host_map = std::make_shared<HostInfoMap>(*current_map);\n  Upstream::HostVector hosts_removed;\n  hosts_removed.emplace_back(host_map_it->second.logical_host_);\n  new_host_map->erase(host);\n  ENVOY_LOG(debug, \"removing dfproxy cluster host '{}'\", host);\n\n  // Swap in the new map. This will be picked up when the per-worker LBs are recreated via\n  // the host set update.\n  swapAndUpdateMap(new_host_map, {}, hosts_removed);\n}\n\nUpstream::HostConstSharedPtr\nCluster::LoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) {\n  if (!context) {\n    return nullptr;\n  }\n\n  absl::string_view host;\n  if (context->downstreamHeaders()) {\n    host = context->downstreamHeaders()->getHostValue();\n  } else if (context->downstreamConnection()) {\n    host = context->downstreamConnection()->requestedServerName();\n  }\n\n  if (host.empty()) {\n    return nullptr;\n  }\n\n  const auto host_it = host_map_->find(host);\n  if (host_it == host_map_->end()) {\n    return nullptr;\n  } else {\n    host_it->second.shared_host_info_->touch();\n    return host_it->second.logical_host_;\n  }\n}\n\nstd::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>\nClusterFactory::createClusterWithConfig(\n    const envoy::config::cluster::v3::Cluster& cluster,\n    const envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig& proto_config,\n    Upstream::ClusterFactoryContext& context,\n    Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Stats::ScopePtr&& stats_scope) {\n  Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory(\n      context.singletonManager(), context.dispatcher(), context.tls(),\n      context.api().randomGenerator(), context.runtime(), context.stats());\n  envoy::config::cluster::v3::Cluster cluster_config = cluster;\n  if (cluster_config.has_upstream_http_protocol_options()) {\n    if (!proto_config.allow_insecure_cluster_options() &&\n        (!cluster_config.upstream_http_protocol_options().auto_sni() ||\n         !cluster_config.upstream_http_protocol_options().auto_san_validation())) {\n      throw EnvoyException(\n          \"dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true when \"\n          \"configured with upstream_http_protocol_options\");\n    }\n  } else {\n    cluster_config.mutable_upstream_http_protocol_options()->set_auto_sni(true);\n    cluster_config.mutable_upstream_http_protocol_options()->set_auto_san_validation(true);\n  }\n\n  auto new_cluster = std::make_shared<Cluster>(\n      cluster_config, proto_config, context.runtime(), cache_manager_factory, context.localInfo(),\n      socket_factory_context, std::move(stats_scope), context.addedViaApi());\n  auto lb = std::make_unique<Cluster::ThreadAwareLoadBalancer>(*new_cluster);\n  return std::make_pair(new_cluster, std::move(lb));\n}\n\nREGISTER_FACTORY(ClusterFactory, Upstream::ClusterFactory);\n\n} // namespace DynamicForwardProxy\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/dynamic_forward_proxy/cluster.h",
    "content": "#pragma once\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.validate.h\"\n\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/logical_host.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n#include \"extensions/common/dynamic_forward_proxy/dns_cache.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace DynamicForwardProxy {\n\nclass Cluster : public Upstream::BaseDynamicClusterImpl,\n                public Extensions::Common::DynamicForwardProxy::DnsCache::UpdateCallbacks {\npublic:\n  Cluster(const envoy::config::cluster::v3::Cluster& cluster,\n          const envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig& config,\n          Runtime::Loader& runtime,\n          Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory,\n          const LocalInfo::LocalInfo& local_info,\n          Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n          Stats::ScopePtr&& stats_scope, bool added_via_api);\n\n  // Upstream::Cluster\n  Upstream::Cluster::InitializePhase initializePhase() const override {\n    return Upstream::Cluster::InitializePhase::Primary;\n  }\n\n  // Upstream::ClusterImplBase\n  void startPreInit() override;\n\n  // Extensions::Common::DynamicForwardProxy::DnsCache::UpdateCallbacks\n  void onDnsHostAddOrUpdate(\n      const std::string& host,\n      const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr& host_info) override;\n  void onDnsHostRemove(const std::string& host) override;\n\nprivate:\n  struct HostInfo {\n    HostInfo(const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr& shared_host_info,\n             const Upstream::LogicalHostSharedPtr& logical_host)\n        : shared_host_info_(shared_host_info), logical_host_(logical_host) {}\n\n    const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr shared_host_info_;\n    const Upstream::LogicalHostSharedPtr logical_host_;\n  };\n\n  using HostInfoMap = absl::flat_hash_map<std::string, HostInfo>;\n  using HostInfoMapSharedPtr = std::shared_ptr<const HostInfoMap>;\n\n  struct LoadBalancer : public Upstream::LoadBalancer {\n    LoadBalancer(const HostInfoMapSharedPtr& host_map) : host_map_(host_map) {}\n\n    // Upstream::LoadBalancer\n    Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override;\n    // Prefetching not implemented.\n    Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override {\n      return nullptr;\n    }\n\n    const HostInfoMapSharedPtr host_map_;\n  };\n\n  struct LoadBalancerFactory : public Upstream::LoadBalancerFactory {\n    LoadBalancerFactory(Cluster& cluster) : cluster_(cluster) {}\n\n    // Upstream::LoadBalancerFactory\n    Upstream::LoadBalancerPtr create() override {\n      return std::make_unique<LoadBalancer>(cluster_.getCurrentHostMap());\n    }\n\n    Cluster& cluster_;\n  };\n\n  struct ThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer {\n    ThreadAwareLoadBalancer(Cluster& cluster) : cluster_(cluster) {}\n\n    // Upstream::ThreadAwareLoadBalancer\n    Upstream::LoadBalancerFactorySharedPtr factory() override {\n      return std::make_shared<LoadBalancerFactory>(cluster_);\n    }\n    void initialize() override {}\n\n    Cluster& cluster_;\n  };\n\n  HostInfoMapSharedPtr getCurrentHostMap() {\n    absl::ReaderMutexLock lock(&host_map_lock_);\n    return host_map_;\n  }\n\n  void\n  addOrUpdateWorker(const std::string& host,\n                    const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr& host_info,\n                    std::shared_ptr<HostInfoMap>& new_host_map,\n                    std::unique_ptr<Upstream::HostVector>& hosts_added);\n  void swapAndUpdateMap(const HostInfoMapSharedPtr& new_hosts_map,\n                        const Upstream::HostVector& hosts_added,\n                        const Upstream::HostVector& hosts_removed);\n\n  const Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr dns_cache_manager_;\n  const Extensions::Common::DynamicForwardProxy::DnsCacheSharedPtr dns_cache_;\n  const Extensions::Common::DynamicForwardProxy::DnsCache::AddUpdateCallbacksHandlePtr\n      update_callbacks_handle_;\n  const envoy::config::endpoint::v3::LocalityLbEndpoints dummy_locality_lb_endpoint_;\n  const envoy::config::endpoint::v3::LbEndpoint dummy_lb_endpoint_;\n  const LocalInfo::LocalInfo& local_info_;\n\n  absl::Mutex host_map_lock_;\n  HostInfoMapSharedPtr host_map_ ABSL_GUARDED_BY(host_map_lock_);\n\n  friend class ClusterFactory;\n  friend class ClusterTest;\n};\n\nclass ClusterFactory : public Upstream::ConfigurableClusterFactoryBase<\n                           envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig> {\npublic:\n  ClusterFactory()\n      : ConfigurableClusterFactoryBase(\n            Extensions::Clusters::ClusterTypes::get().DynamicForwardProxy) {}\n\nprivate:\n  std::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>\n  createClusterWithConfig(\n      const envoy::config::cluster::v3::Cluster& cluster,\n      const envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig& proto_config,\n      Upstream::ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n\nDECLARE_FACTORY(ClusterFactory);\n\n} // namespace DynamicForwardProxy\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"crc16_lib\",\n    srcs = [\n        \"crc16.cc\",\n        \"crc16.h\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"redis_cluster_lb\",\n    srcs = [\n        \"redis_cluster_lb.cc\",\n        \"redis_cluster_lb.h\",\n    ],\n    deps = [\n        \":crc16_lib\",\n        \"//include/envoy/upstream:thread_local_cluster_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"//source/extensions/filters/network/common/redis:client_interface\",\n        \"//source/extensions/filters/network/common/redis:codec_interface\",\n        \"//source/extensions/filters/network/common/redis:supported_commands_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"redis_cluster\",\n    srcs = [\n        \"redis_cluster.cc\",\n        \"redis_cluster.h\",\n    ],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \"redis_cluster_lb\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/singleton:manager_impl_lib\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"//source/extensions/common/redis:cluster_refresh_manager_lib\",\n        \"//source/extensions/filters/network/common/redis:client_interface\",\n        \"//source/extensions/filters/network/common/redis:client_lib\",\n        \"//source/extensions/filters/network/common/redis:codec_interface\",\n        \"//source/extensions/filters/network/common/redis:utility_lib\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"@envoy_api//envoy/config/cluster/redis:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/clusters/redis/crc16.cc",
    "content": "#include \"crc16.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\n/**\n * XMODEM CRC16 implementation according to CITT standards. Based on\n * https://github.com/antirez/redis/blob/unstable/src/crc16.c\n * Based on (F).\n * @param key The string to hash.\n * @return The CRC16 hash code.\n */\nstatic const uint16_t crc16tab[256] = {\n    0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b,\n    0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,\n    0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 0x2462, 0x3443, 0x0420, 0x1401,\n    0x64e6, 0x74c7, 0x44a4, 0x5485, 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,\n    0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 0xb75b, 0xa77a, 0x9719, 0x8738,\n    0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,\n    0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 0x5af5, 0x4ad4, 0x7ab7, 0x6a96,\n    0x1a71, 0x0a50, 0x3a33, 0x2a12, 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,\n    0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 0xedae, 0xfd8f, 0xcdec, 0xddcd,\n    0xad2a, 0xbd0b, 0x8d68, 0x9d49, 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,\n    0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 0x9188, 0x81a9, 0xb1ca, 0xa1eb,\n    0xd10c, 0xc12d, 0xf14e, 0xe16f, 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,\n    0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 0x02b1, 0x1290, 0x22f3, 0x32d2,\n    0x4235, 0x5214, 0x6277, 0x7256, 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,\n    0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xa7db, 0xb7fa, 0x8799, 0x97b8,\n    0xe75f, 0xf77e, 0xc71d, 0xd73c, 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,\n    0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 0x5844, 0x4865, 0x7806, 0x6827,\n    0x18c0, 0x08e1, 0x3882, 0x28a3, 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,\n    0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d,\n    0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,\n    0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 0x6e17, 0x7e36, 0x4e55, 0x5e74,\n    0x2e93, 0x3eb2, 0x0ed1, 0x1ef0};\n\nuint16_t Crc16::crc16(absl::string_view key) {\n  const char* buf = static_cast<const char*>(key.data());\n  uint64_t len = key.size();\n  uint64_t counter;\n  uint16_t crc = 0;\n  for (counter = 0; counter < len; counter++) {\n    crc = (crc << 8) ^ crc16tab[((crc >> 8) ^ *buf++) & 0x00FF];\n  }\n  return crc;\n}\n\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/redis/crc16.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"absl/strings/ascii.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nclass Crc16 {\npublic:\n  /**\n   * XMODEM CRC16 implementation according to CITT standards.\n   * Based on (https://github.com/antirez/redis/blob/unstable/src/crc16.c).\n   * @param key The string to hash.\n   * @return The CRC16 hash code.\n   */\n  static uint16_t crc16(absl::string_view key);\n};\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/redis/redis_cluster.cc",
    "content": "#include \"redis_cluster.h\"\n\n#include \"envoy/config/cluster/redis/redis_cluster.pb.h\"\n#include \"envoy/config/cluster/redis/redis_cluster.pb.validate.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nnamespace {\nExtensions::NetworkFilters::Common::Redis::Client::DoNothingPoolCallbacks null_pool_callbacks;\n} // namespace\n\nRedisCluster::RedisCluster(\n    const envoy::config::cluster::v3::Cluster& cluster,\n    const envoy::config::cluster::redis::RedisClusterConfig& redis_cluster,\n    NetworkFilters::Common::Redis::Client::ClientFactory& redis_client_factory,\n    Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime, Api::Api& api,\n    Network::DnsResolverSharedPtr dns_resolver,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n    Stats::ScopePtr&& stats_scope, bool added_via_api,\n    ClusterSlotUpdateCallBackSharedPtr lb_factory)\n    : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope),\n                                       added_via_api),\n      cluster_manager_(cluster_manager),\n      cluster_refresh_rate_(std::chrono::milliseconds(\n          PROTOBUF_GET_MS_OR_DEFAULT(redis_cluster, cluster_refresh_rate, 5000))),\n      cluster_refresh_timeout_(std::chrono::milliseconds(\n          PROTOBUF_GET_MS_OR_DEFAULT(redis_cluster, cluster_refresh_timeout, 3000))),\n      redirect_refresh_interval_(std::chrono::milliseconds(\n          PROTOBUF_GET_MS_OR_DEFAULT(redis_cluster, redirect_refresh_interval, 5000))),\n      redirect_refresh_threshold_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(redis_cluster, redirect_refresh_threshold, 5)),\n      failure_refresh_threshold_(redis_cluster.failure_refresh_threshold()),\n      host_degraded_refresh_threshold_(redis_cluster.host_degraded_refresh_threshold()),\n      dispatcher_(factory_context.dispatcher()), dns_resolver_(std::move(dns_resolver)),\n      dns_lookup_family_(Upstream::getDnsLookupFamilyFromCluster(cluster)),\n      load_assignment_(\n          cluster.has_load_assignment()\n              ? cluster.load_assignment()\n              : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts())),\n      local_info_(factory_context.localInfo()), random_(api.randomGenerator()),\n      redis_discovery_session_(*this, redis_client_factory), lb_factory_(std::move(lb_factory)),\n      auth_username_(\n          NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(info(), api)),\n      auth_password_(\n          NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword(info(), api)),\n      cluster_name_(cluster.name()),\n      refresh_manager_(Common::Redis::getClusterRefreshManager(\n          factory_context.singletonManager(), factory_context.dispatcher(),\n          factory_context.clusterManager(), factory_context.api().timeSource())),\n      registration_handle_(refresh_manager_->registerCluster(\n          cluster_name_, redirect_refresh_interval_, redirect_refresh_threshold_,\n          failure_refresh_threshold_, host_degraded_refresh_threshold_, [&]() {\n            redis_discovery_session_.resolve_timer_->enableTimer(std::chrono::milliseconds(0));\n          })) {\n  const auto& locality_lb_endpoints = load_assignment_.endpoints();\n  for (const auto& locality_lb_endpoint : locality_lb_endpoints) {\n    for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) {\n      const auto& host = lb_endpoint.endpoint().address();\n      dns_discovery_resolve_targets_.emplace_back(new DnsDiscoveryResolveTarget(\n          *this, host.socket_address().address(), host.socket_address().port_value()));\n    }\n  }\n}\n\nvoid RedisCluster::startPreInit() {\n  for (const DnsDiscoveryResolveTargetPtr& target : dns_discovery_resolve_targets_) {\n    target->startResolveDns();\n  }\n}\n\nvoid RedisCluster::updateAllHosts(const Upstream::HostVector& hosts_added,\n                                  const Upstream::HostVector& hosts_removed,\n                                  uint32_t current_priority) {\n  Upstream::PriorityStateManager priority_state_manager(*this, local_info_, nullptr);\n\n  auto locality_lb_endpoint = localityLbEndpoint();\n  priority_state_manager.initializePriorityFor(locality_lb_endpoint);\n  for (const Upstream::HostSharedPtr& host : hosts_) {\n    if (locality_lb_endpoint.priority() == current_priority) {\n      priority_state_manager.registerHostForPriority(host, locality_lb_endpoint);\n    }\n  }\n\n  priority_state_manager.updateClusterPrioritySet(\n      current_priority, std::move(priority_state_manager.priorityState()[current_priority].first),\n      hosts_added, hosts_removed, absl::nullopt);\n}\n\nvoid RedisCluster::onClusterSlotUpdate(ClusterSlotsPtr&& slots) {\n  Upstream::HostVector new_hosts;\n\n  for (const ClusterSlot& slot : *slots) {\n    new_hosts.emplace_back(new RedisHost(info(), \"\", slot.primary(), *this, true));\n    for (auto const& replica : slot.replicas()) {\n      new_hosts.emplace_back(new RedisHost(info(), \"\", replica, *this, false));\n    }\n  }\n\n  absl::node_hash_map<std::string, Upstream::HostSharedPtr> updated_hosts;\n  Upstream::HostVector hosts_added;\n  Upstream::HostVector hosts_removed;\n  const bool host_updated = updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed,\n                                                  updated_hosts, all_hosts_);\n  const bool slot_updated =\n      lb_factory_ ? lb_factory_->onClusterSlotUpdate(std::move(slots), updated_hosts) : false;\n\n  // If slot is updated, call updateAllHosts regardless of if there's new hosts to force\n  // update of the thread local load balancers.\n  if (host_updated || slot_updated) {\n    ASSERT(std::all_of(hosts_.begin(), hosts_.end(), [&](const auto& host) {\n      return host->priority() == localityLbEndpoint().priority();\n    }));\n    updateAllHosts(hosts_added, hosts_removed, localityLbEndpoint().priority());\n  } else {\n    info_->stats().update_no_rebuild_.inc();\n  }\n\n  all_hosts_ = std::move(updated_hosts);\n\n  // TODO(hyang): If there is an initialize callback, fire it now. Note that if the\n  // cluster refers to multiple DNS names, this will return initialized after a single\n  // DNS resolution completes. This is not perfect but is easier to code and it is unclear\n  // if the extra complexity is needed so will start with this.\n  onPreInitComplete();\n}\n\nvoid RedisCluster::reloadHealthyHostsHelper(const Upstream::HostSharedPtr& host) {\n  if (lb_factory_) {\n    lb_factory_->onHostHealthUpdate();\n  }\n  if (host && (host->health() == Upstream::Host::Health::Degraded ||\n               host->health() == Upstream::Host::Health::Unhealthy)) {\n    refresh_manager_->onHostDegraded(cluster_name_);\n  }\n  ClusterImplBase::reloadHealthyHostsHelper(host);\n}\n\n// DnsDiscoveryResolveTarget\nRedisCluster::DnsDiscoveryResolveTarget::DnsDiscoveryResolveTarget(RedisCluster& parent,\n                                                                   const std::string& dns_address,\n                                                                   const uint32_t port)\n    : parent_(parent), dns_address_(dns_address), port_(port) {}\n\nRedisCluster::DnsDiscoveryResolveTarget::~DnsDiscoveryResolveTarget() {\n  if (active_query_) {\n    active_query_->cancel();\n  }\n  // Disable timer for mock tests.\n  if (resolve_timer_) {\n    resolve_timer_->disableTimer();\n  }\n}\n\nvoid RedisCluster::DnsDiscoveryResolveTarget::startResolveDns() {\n  ENVOY_LOG(trace, \"starting async DNS resolution for {}\", dns_address_);\n\n  active_query_ = parent_.dns_resolver_->resolve(\n      dns_address_, parent_.dns_lookup_family_,\n      [this](Network::DnsResolver::ResolutionStatus status,\n             std::list<Network::DnsResponse>&& response) -> void {\n        active_query_ = nullptr;\n        ENVOY_LOG(trace, \"async DNS resolution complete for {}\", dns_address_);\n        if (status == Network::DnsResolver::ResolutionStatus::Failure || response.empty()) {\n          if (status == Network::DnsResolver::ResolutionStatus::Failure) {\n            parent_.info_->stats().update_failure_.inc();\n          } else {\n            parent_.info_->stats().update_empty_.inc();\n          }\n\n          if (!resolve_timer_) {\n            resolve_timer_ =\n                parent_.dispatcher_.createTimer([this]() -> void { startResolveDns(); });\n          }\n          // if the initial dns resolved to empty, we'll skip the redis discovery phase and\n          // treat it as an empty cluster.\n          parent_.onPreInitComplete();\n          resolve_timer_->enableTimer(parent_.cluster_refresh_rate_);\n        } else {\n          // Once the DNS resolve the initial set of addresses, call startResolveRedis on\n          // the RedisDiscoverySession. The RedisDiscoverySession will using the \"cluster\n          // slots\" command for service discovery and slot allocation. All subsequent\n          // discoveries are handled by RedisDiscoverySession and will not use DNS\n          // resolution again.\n          parent_.redis_discovery_session_.registerDiscoveryAddress(std::move(response), port_);\n          parent_.redis_discovery_session_.startResolveRedis();\n        }\n      });\n}\n\n// RedisCluster\nRedisCluster::RedisDiscoverySession::RedisDiscoverySession(\n    Envoy::Extensions::Clusters::Redis::RedisCluster& parent,\n    NetworkFilters::Common::Redis::Client::ClientFactory& client_factory)\n    : parent_(parent), dispatcher_(parent.dispatcher_),\n      resolve_timer_(parent.dispatcher_.createTimer([this]() -> void { startResolveRedis(); })),\n      client_factory_(client_factory), buffer_timeout_(0),\n      redis_command_stats_(\n          NetworkFilters::Common::Redis::RedisCommandStats::createRedisCommandStats(\n              parent_.info()->statsScope().symbolTable())) {}\n\n// Convert the cluster slot IP/Port response to and address, return null if the response\n// does not match the expected type.\nNetwork::Address::InstanceConstSharedPtr\nRedisCluster::RedisDiscoverySession::RedisDiscoverySession::ProcessCluster(\n    const NetworkFilters::Common::Redis::RespValue& value) {\n  if (value.type() != NetworkFilters::Common::Redis::RespType::Array) {\n    return nullptr;\n  }\n  auto& array = value.asArray();\n\n  if (array.size() < 2 || array[0].type() != NetworkFilters::Common::Redis::RespType::BulkString ||\n      array[1].type() != NetworkFilters::Common::Redis::RespType::Integer) {\n    return nullptr;\n  }\n\n  try {\n    return Network::Utility::parseInternetAddress(array[0].asString(), array[1].asInteger(), false);\n  } catch (const EnvoyException& ex) {\n    ENVOY_LOG(debug, \"Invalid ip address in CLUSTER SLOTS response: {}\", ex.what());\n    return nullptr;\n  }\n}\n\nRedisCluster::RedisDiscoverySession::~RedisDiscoverySession() {\n  if (current_request_) {\n    current_request_->cancel();\n    current_request_ = nullptr;\n  }\n  // Disable timer for mock tests.\n  if (resolve_timer_) {\n    resolve_timer_->disableTimer();\n  }\n\n  while (!client_map_.empty()) {\n    client_map_.begin()->second->client_->close();\n  }\n}\n\nvoid RedisCluster::RedisDiscoveryClient::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    auto client_to_delete = parent_.client_map_.find(host_);\n    ASSERT(client_to_delete != parent_.client_map_.end());\n    parent_.dispatcher_.deferredDelete(std::move(client_to_delete->second->client_));\n    parent_.client_map_.erase(client_to_delete);\n  }\n}\n\nvoid RedisCluster::RedisDiscoverySession::registerDiscoveryAddress(\n    std::list<Envoy::Network::DnsResponse>&& response, const uint32_t port) {\n  // Since the address from DNS does not have port, we need to make a new address that has\n  // port in it.\n  for (const Network::DnsResponse& res : response) {\n    ASSERT(res.address_ != nullptr);\n    discovery_address_list_.push_back(Network::Utility::getAddressWithPort(*(res.address_), port));\n  }\n}\n\nvoid RedisCluster::RedisDiscoverySession::startResolveRedis() {\n  parent_.info_->stats().update_attempt_.inc();\n  // If a resolution is currently in progress, skip it.\n  if (current_request_) {\n    return;\n  }\n\n  // If hosts is empty, we haven't received a successful result from the CLUSTER SLOTS call\n  // yet. So, pick a random discovery address from dns and make a request.\n  Upstream::HostSharedPtr host;\n  if (parent_.hosts_.empty()) {\n    const int rand_idx = parent_.random_.random() % discovery_address_list_.size();\n    auto it = discovery_address_list_.begin();\n    std::next(it, rand_idx);\n    host = Upstream::HostSharedPtr{new RedisHost(parent_.info(), \"\", *it, parent_, true)};\n  } else {\n    const int rand_idx = parent_.random_.random() % parent_.hosts_.size();\n    host = parent_.hosts_[rand_idx];\n  }\n\n  current_host_address_ = host->address()->asString();\n  RedisDiscoveryClientPtr& client = client_map_[current_host_address_];\n  if (!client) {\n    client = std::make_unique<RedisDiscoveryClient>(*this);\n    client->host_ = current_host_address_;\n    client->client_ = client_factory_.create(host, dispatcher_, *this, redis_command_stats_,\n                                             parent_.info()->statsScope(), parent_.auth_username_,\n                                             parent_.auth_password_);\n    client->client_->addConnectionCallbacks(*client);\n  }\n\n  current_request_ = client->client_->makeRequest(ClusterSlotsRequest::instance_, *this);\n}\n\nvoid RedisCluster::RedisDiscoverySession::onResponse(\n    NetworkFilters::Common::Redis::RespValuePtr&& value) {\n  current_request_ = nullptr;\n\n  const uint32_t SlotRangeStart = 0;\n  const uint32_t SlotRangeEnd = 1;\n  const uint32_t SlotPrimary = 2;\n  const uint32_t SlotReplicaStart = 3;\n\n  // Do nothing if the cluster is empty.\n  if (value->type() != NetworkFilters::Common::Redis::RespType::Array || value->asArray().empty()) {\n    onUnexpectedResponse(value);\n    return;\n  }\n\n  auto slots = std::make_unique<std::vector<ClusterSlot>>();\n\n  // Loop through the cluster slot response and error checks for each field.\n  for (const NetworkFilters::Common::Redis::RespValue& part : value->asArray()) {\n    if (part.type() != NetworkFilters::Common::Redis::RespType::Array) {\n      onUnexpectedResponse(value);\n      return;\n    }\n    const std::vector<NetworkFilters::Common::Redis::RespValue>& slot_range = part.asArray();\n    if (slot_range.size() < 3 ||\n        slot_range[SlotRangeStart].type() !=\n            NetworkFilters::Common::Redis::RespType::Integer || // Start slot range is an\n                                                                // integer.\n        slot_range[SlotRangeEnd].type() !=\n            NetworkFilters::Common::Redis::RespType::Integer) { // End slot range is an\n                                                                // integer.\n      onUnexpectedResponse(value);\n      return;\n    }\n\n    // Field 2: Primary address for slot range\n    auto primary_address = ProcessCluster(slot_range[SlotPrimary]);\n    if (!primary_address) {\n      onUnexpectedResponse(value);\n      return;\n    }\n\n    slots->emplace_back(slot_range[SlotRangeStart].asInteger(),\n                        slot_range[SlotRangeEnd].asInteger(), primary_address);\n\n    for (auto replica = std::next(slot_range.begin(), SlotReplicaStart);\n         replica != slot_range.end(); ++replica) {\n      auto replica_address = ProcessCluster(*replica);\n      if (!replica_address) {\n        onUnexpectedResponse(value);\n        return;\n      }\n      slots->back().addReplica(std::move(replica_address));\n    }\n  }\n\n  parent_.onClusterSlotUpdate(std::move(slots));\n  resolve_timer_->enableTimer(parent_.cluster_refresh_rate_);\n}\n\nvoid RedisCluster::RedisDiscoverySession::onUnexpectedResponse(\n    const NetworkFilters::Common::Redis::RespValuePtr& value) {\n  ENVOY_LOG(warn, \"Unexpected response to cluster slot command: {}\", value->toString());\n  this->parent_.info_->stats().update_failure_.inc();\n  resolve_timer_->enableTimer(parent_.cluster_refresh_rate_);\n}\n\nvoid RedisCluster::RedisDiscoverySession::onFailure() {\n  current_request_ = nullptr;\n  if (!current_host_address_.empty()) {\n    auto client_to_delete = client_map_.find(current_host_address_);\n    client_to_delete->second->client_->close();\n  }\n  parent_.info()->stats().update_failure_.inc();\n  resolve_timer_->enableTimer(parent_.cluster_refresh_rate_);\n}\n\nRedisCluster::ClusterSlotsRequest RedisCluster::ClusterSlotsRequest::instance_;\n\nstd::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>\nRedisClusterFactory::createClusterWithConfig(\n    const envoy::config::cluster::v3::Cluster& cluster,\n    const envoy::config::cluster::redis::RedisClusterConfig& proto_config,\n    Upstream::ClusterFactoryContext& context,\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n    Envoy::Stats::ScopePtr&& stats_scope) {\n  if (!cluster.has_cluster_type() ||\n      cluster.cluster_type().name() != Extensions::Clusters::ClusterTypes::get().Redis) {\n    throw EnvoyException(\"Redis cluster can only created with redis cluster type.\");\n  }\n  // TODO(hyang): This is needed to migrate existing cluster, disallow using other lb_policy\n  // in the future\n  if (cluster.lb_policy() != envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) {\n    return std::make_pair(std::make_shared<RedisCluster>(\n                              cluster, proto_config,\n                              NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_,\n                              context.clusterManager(), context.runtime(), context.api(),\n                              selectDnsResolver(cluster, context), socket_factory_context,\n                              std::move(stats_scope), context.addedViaApi(), nullptr),\n                          nullptr);\n  }\n  auto lb_factory =\n      std::make_shared<RedisClusterLoadBalancerFactory>(context.api().randomGenerator());\n  return std::make_pair(std::make_shared<RedisCluster>(\n                            cluster, proto_config,\n                            NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_,\n                            context.clusterManager(), context.runtime(), context.api(),\n                            selectDnsResolver(cluster, context), socket_factory_context,\n                            std::move(stats_scope), context.addedViaApi(), lb_factory),\n                        std::make_unique<RedisClusterThreadAwareLoadBalancer>(lb_factory));\n}\n\nREGISTER_FACTORY(RedisClusterFactory, Upstream::ClusterFactory);\n\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/redis/redis_cluster.h",
    "content": "#pragma once\n\n#include <array>\n#include <atomic>\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <tuple>\n#include <utility>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/cluster/redis/redis_cluster.pb.h\"\n#include \"envoy/config/cluster/redis/redis_cluster.pb.validate.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/health_checker.h\"\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/locality.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/callback_impl.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/datasource.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/outlier_detection_impl.h\"\n#include \"common/upstream/resource_manager_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"source/extensions/clusters/redis/redis_cluster_lb.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n#include \"extensions/common/redis/cluster_refresh_manager_impl.h\"\n#include \"extensions/filters/network/common/redis/client.h\"\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/codec.h\"\n#include \"extensions/filters/network/common/redis/utility.h\"\n#include \"extensions/filters/network/redis_proxy/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\n/*\n * This class implements support for the topology part of `Redis Cluster\n * <https://redis.io/topics/cluster-spec>`_. Specifically, it allows Envoy to maintain an internal\n * representation of the topology of a Redis Cluster, and how often the topology should be\n * refreshed.\n *\n * The target Redis Cluster is obtained from the yaml config file as usual, and we choose a random\n * discovery address from DNS if there are no existing hosts (our startup condition). Otherwise, we\n * choose a random host from our known set of hosts. Then, against this host we make a topology\n * request.\n *\n * Topology requests are handled by RedisDiscoverySession, which handles the initialization of\n * the `CLUSTER SLOTS command <https://redis.io/commands/cluster-slots>`_, and the responses and\n * failure cases.\n *\n * Once the topology is fetched from Redis, the cluster will update the\n * RedisClusterLoadBalancerFactory, which will be used by the redis proxy filter for load balancing\n * purpose.\n */\n\nclass RedisCluster : public Upstream::BaseDynamicClusterImpl {\npublic:\n  RedisCluster(const envoy::config::cluster::v3::Cluster& cluster,\n               const envoy::config::cluster::redis::RedisClusterConfig& redis_cluster,\n               NetworkFilters::Common::Redis::Client::ClientFactory& client_factory,\n               Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime, Api::Api& api,\n               Network::DnsResolverSharedPtr dns_resolver,\n               Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n               Stats::ScopePtr&& stats_scope, bool added_via_api,\n               ClusterSlotUpdateCallBackSharedPtr factory);\n\n  struct ClusterSlotsRequest : public Extensions::NetworkFilters::Common::Redis::RespValue {\n  public:\n    ClusterSlotsRequest() {\n      type(Extensions::NetworkFilters::Common::Redis::RespType::Array);\n      std::vector<NetworkFilters::Common::Redis::RespValue> values(2);\n      values[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n      values[0].asString() = \"CLUSTER\";\n      values[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n      values[1].asString() = \"SLOTS\";\n      asArray().swap(values);\n    }\n    static ClusterSlotsRequest instance_;\n  };\n\n  InitializePhase initializePhase() const override { return InitializePhase::Primary; }\n\nprivate:\n  friend class RedisClusterTest;\n\n  void startPreInit() override;\n\n  void updateAllHosts(const Upstream::HostVector& hosts_added,\n                      const Upstream::HostVector& hosts_removed, uint32_t priority);\n\n  void onClusterSlotUpdate(ClusterSlotsPtr&&);\n\n  void reloadHealthyHostsHelper(const Upstream::HostSharedPtr& host) override;\n\n  const envoy::config::endpoint::v3::LocalityLbEndpoints& localityLbEndpoint() const {\n    // Always use the first endpoint.\n    return load_assignment_.endpoints()[0];\n  }\n\n  const envoy::config::endpoint::v3::LbEndpoint& lbEndpoint() const {\n    // Always use the first endpoint.\n    return localityLbEndpoint().lb_endpoints()[0];\n  }\n\n  // A redis node in the Redis cluster.\n  class RedisHost : public Upstream::HostImpl {\n  public:\n    RedisHost(Upstream::ClusterInfoConstSharedPtr cluster, const std::string& hostname,\n              Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool primary)\n        : Upstream::HostImpl(\n              cluster, hostname, address,\n              // TODO(zyfjeff): Created through metadata shared pool\n              std::make_shared<envoy::config::core::v3::Metadata>(parent.lbEndpoint().metadata()),\n              parent.lbEndpoint().load_balancing_weight().value(),\n              parent.localityLbEndpoint().locality(),\n              parent.lbEndpoint().endpoint().health_check_config(),\n              parent.localityLbEndpoint().priority(), parent.lbEndpoint().health_status()),\n          primary_(primary) {}\n\n    bool isPrimary() const { return primary_; }\n\n  private:\n    const bool primary_;\n  };\n\n  // Resolves the discovery endpoint.\n  struct DnsDiscoveryResolveTarget {\n    DnsDiscoveryResolveTarget(RedisCluster& parent, const std::string& dns_address,\n                              const uint32_t port);\n\n    ~DnsDiscoveryResolveTarget();\n\n    void startResolveDns();\n\n    RedisCluster& parent_;\n    Network::ActiveDnsQuery* active_query_{};\n    const std::string dns_address_;\n    const uint32_t port_;\n    Event::TimerPtr resolve_timer_;\n  };\n\n  using DnsDiscoveryResolveTargetPtr = std::unique_ptr<DnsDiscoveryResolveTarget>;\n\n  struct RedisDiscoverySession;\n\n  struct RedisDiscoveryClient : public Network::ConnectionCallbacks {\n    RedisDiscoveryClient(RedisDiscoverySession& parent) : parent_(parent) {}\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    RedisDiscoverySession& parent_;\n    std::string host_;\n    Extensions::NetworkFilters::Common::Redis::Client::ClientPtr client_;\n  };\n\n  using RedisDiscoveryClientPtr = std::unique_ptr<RedisDiscoveryClient>;\n\n  struct RedisDiscoverySession\n      : public Extensions::NetworkFilters::Common::Redis::Client::Config,\n        public Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks {\n    RedisDiscoverySession(RedisCluster& parent,\n                          NetworkFilters::Common::Redis::Client::ClientFactory& client_factory);\n\n    ~RedisDiscoverySession() override;\n\n    void registerDiscoveryAddress(std::list<Network::DnsResponse>&& response, const uint32_t port);\n\n    // Start discovery against a random host from existing hosts\n    void startResolveRedis();\n\n    // Extensions::NetworkFilters::Common::Redis::Client::Config\n    bool disableOutlierEvents() const override { return true; }\n    std::chrono::milliseconds opTimeout() const override {\n      // Allow the main Health Check infra to control timeout.\n      return parent_.cluster_refresh_timeout_;\n    }\n    bool enableHashtagging() const override { return false; }\n    bool enableRedirection() const override { return false; }\n    uint32_t maxBufferSizeBeforeFlush() const override { return 0; }\n    std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return buffer_timeout_; }\n    uint32_t maxUpstreamUnknownConnections() const override { return 0; }\n    bool enableCommandStats() const override { return false; }\n    // For any readPolicy other than Primary, the RedisClientFactory will send a READONLY command\n    // when establishing a new connection. Since we're only using this for making the \"cluster\n    // slots\" commands, the READONLY command is not relevant in this context. We're setting it to\n    // Primary to avoid the additional READONLY command.\n    Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override {\n      return Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy::Primary;\n    }\n\n    // Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks\n    void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override;\n    void onFailure() override;\n    // Note: Below callback isn't used in topology updates\n    bool onRedirection(NetworkFilters::Common::Redis::RespValuePtr&&, const std::string&,\n                       bool) override {\n      return true;\n    }\n    void onUnexpectedResponse(const NetworkFilters::Common::Redis::RespValuePtr&);\n\n    Network::Address::InstanceConstSharedPtr\n    ProcessCluster(const NetworkFilters::Common::Redis::RespValue& value);\n\n    RedisCluster& parent_;\n    Event::Dispatcher& dispatcher_;\n    std::string current_host_address_;\n    Extensions::NetworkFilters::Common::Redis::Client::PoolRequest* current_request_{};\n    absl::node_hash_map<std::string, RedisDiscoveryClientPtr> client_map_;\n\n    std::list<Network::Address::InstanceConstSharedPtr> discovery_address_list_;\n\n    Event::TimerPtr resolve_timer_;\n    NetworkFilters::Common::Redis::Client::ClientFactory& client_factory_;\n    const std::chrono::milliseconds buffer_timeout_;\n    NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_;\n  };\n\n  Upstream::ClusterManager& cluster_manager_;\n  const std::chrono::milliseconds cluster_refresh_rate_;\n  const std::chrono::milliseconds cluster_refresh_timeout_;\n  const std::chrono::milliseconds redirect_refresh_interval_;\n  const uint32_t redirect_refresh_threshold_;\n  const uint32_t failure_refresh_threshold_;\n  const uint32_t host_degraded_refresh_threshold_;\n  std::list<DnsDiscoveryResolveTargetPtr> dns_discovery_resolve_targets_;\n  Event::Dispatcher& dispatcher_;\n  Network::DnsResolverSharedPtr dns_resolver_;\n  Network::DnsLookupFamily dns_lookup_family_;\n  const envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_;\n  const LocalInfo::LocalInfo& local_info_;\n  Random::RandomGenerator& random_;\n  RedisDiscoverySession redis_discovery_session_;\n  const ClusterSlotUpdateCallBackSharedPtr lb_factory_;\n\n  Upstream::HostVector hosts_;\n  Upstream::HostMap all_hosts_;\n\n  const std::string auth_username_;\n  const std::string auth_password_;\n  const std::string cluster_name_;\n  const Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_;\n  const Common::Redis::ClusterRefreshManager::HandlePtr registration_handle_;\n};\n\nclass RedisClusterFactory : public Upstream::ConfigurableClusterFactoryBase<\n                                envoy::config::cluster::redis::RedisClusterConfig> {\npublic:\n  RedisClusterFactory()\n      : ConfigurableClusterFactoryBase(Extensions::Clusters::ClusterTypes::get().Redis) {}\n\nprivate:\n  friend class RedisClusterTest;\n\n  std::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>\n  createClusterWithConfig(\n      const envoy::config::cluster::v3::Cluster& cluster,\n      const envoy::config::cluster::redis::RedisClusterConfig& proto_config,\n      Upstream::ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override;\n};\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/redis/redis_cluster_lb.cc",
    "content": "#include \"redis_cluster_lb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nbool ClusterSlot::operator==(const Envoy::Extensions::Clusters::Redis::ClusterSlot& rhs) const {\n  return start_ == rhs.start_ && end_ == rhs.end_ && primary_ == rhs.primary_ &&\n         replicas_ == rhs.replicas_;\n}\n\n// RedisClusterLoadBalancerFactory\nbool RedisClusterLoadBalancerFactory::onClusterSlotUpdate(ClusterSlotsPtr&& slots,\n                                                          Envoy::Upstream::HostMap all_hosts) {\n  // The slots is sorted, allowing for a quick comparison to make sure we need to update the slot\n  // array sort based on start and end to enable efficient comparison\n  std::sort(\n      slots->begin(), slots->end(), [](const ClusterSlot& lhs, const ClusterSlot& rhs) -> bool {\n        return lhs.start() < rhs.start() || (!(lhs.start() < rhs.start()) && lhs.end() < rhs.end());\n      });\n\n  if (current_cluster_slot_ && *current_cluster_slot_ == *slots) {\n    return false;\n  }\n\n  auto updated_slots = std::make_shared<SlotArray>();\n  auto shard_vector = std::make_shared<std::vector<RedisShardSharedPtr>>();\n  absl::flat_hash_map<std::string, uint64_t> shards;\n\n  for (const ClusterSlot& slot : *slots) {\n    // look in the updated map\n    const std::string primary_address = slot.primary()->asString();\n\n    auto result = shards.try_emplace(primary_address, shard_vector->size());\n    if (result.second) {\n      auto primary_host = all_hosts.find(primary_address);\n      ASSERT(primary_host != all_hosts.end(),\n             \"we expect all address to be found in the updated_hosts\");\n\n      Upstream::HostVectorSharedPtr primary_and_replicas = std::make_shared<Upstream::HostVector>();\n      Upstream::HostVectorSharedPtr replicas = std::make_shared<Upstream::HostVector>();\n      primary_and_replicas->push_back(primary_host->second);\n\n      for (auto const& replica : slot.replicas()) {\n        auto replica_host = all_hosts.find(replica->asString());\n        ASSERT(replica_host != all_hosts.end(),\n               \"we expect all address to be found in the updated_hosts\");\n        replicas->push_back(replica_host->second);\n        primary_and_replicas->push_back(replica_host->second);\n      }\n\n      shard_vector->emplace_back(\n          std::make_shared<RedisShard>(primary_host->second, replicas, primary_and_replicas));\n    }\n\n    for (auto i = slot.start(); i <= slot.end(); ++i) {\n      updated_slots->at(i) = result.first->second;\n    }\n  }\n\n  {\n    absl::WriterMutexLock lock(&mutex_);\n    current_cluster_slot_ = std::move(slots);\n    slot_array_ = std::move(updated_slots);\n    shard_vector_ = std::move(shard_vector);\n  }\n  return true;\n}\n\nvoid RedisClusterLoadBalancerFactory::onHostHealthUpdate() {\n  ShardVectorSharedPtr current_shard_vector;\n  {\n    absl::ReaderMutexLock lock(&mutex_);\n    current_shard_vector = shard_vector_;\n  }\n\n  // This can get called by cluster initialization before the Redis Cluster topology is resolved.\n  if (!current_shard_vector) {\n    return;\n  }\n\n  auto shard_vector = std::make_shared<std::vector<RedisShardSharedPtr>>();\n\n  for (auto const& shard : *current_shard_vector) {\n    shard_vector->emplace_back(std::make_shared<RedisShard>(\n        shard->primary(), shard->replicas().hostsPtr(), shard->allHosts().hostsPtr()));\n  }\n\n  {\n    absl::WriterMutexLock lock(&mutex_);\n    shard_vector_ = std::move(shard_vector);\n  }\n}\n\nUpstream::LoadBalancerPtr RedisClusterLoadBalancerFactory::create() {\n  absl::ReaderMutexLock lock(&mutex_);\n  return std::make_unique<RedisClusterLoadBalancer>(slot_array_, shard_vector_, random_);\n}\n\nnamespace {\nUpstream::HostConstSharedPtr chooseRandomHost(const Upstream::HostSetImpl& host_set,\n                                              Random::RandomGenerator& random) {\n  auto hosts = host_set.healthyHosts();\n  if (hosts.empty()) {\n    hosts = host_set.degradedHosts();\n  }\n\n  if (hosts.empty()) {\n    hosts = host_set.hosts();\n  }\n\n  if (!hosts.empty()) {\n    return hosts[random.random() % hosts.size()];\n  } else {\n    return nullptr;\n  }\n}\n} // namespace\n\nUpstream::HostConstSharedPtr RedisClusterLoadBalancerFactory::RedisClusterLoadBalancer::chooseHost(\n    Envoy::Upstream::LoadBalancerContext* context) {\n  if (!slot_array_) {\n    return nullptr;\n  }\n  absl::optional<uint64_t> hash;\n  if (context) {\n    hash = context->computeHashKey();\n  }\n\n  if (!hash) {\n    return nullptr;\n  }\n\n  auto shard = shard_vector_->at(\n      slot_array_->at(hash.value() % Envoy::Extensions::Clusters::Redis::MaxSlot));\n\n  auto redis_context = dynamic_cast<RedisLoadBalancerContext*>(context);\n  if (redis_context && redis_context->isReadCommand()) {\n    switch (redis_context->readPolicy()) {\n    case NetworkFilters::Common::Redis::Client::ReadPolicy::Primary:\n      return shard->primary();\n    case NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary:\n      if (shard->primary()->health() == Upstream::Host::Health::Healthy) {\n        return shard->primary();\n      } else {\n        return chooseRandomHost(shard->allHosts(), random_);\n      }\n    case NetworkFilters::Common::Redis::Client::ReadPolicy::Replica:\n      return chooseRandomHost(shard->replicas(), random_);\n    case NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica:\n      if (!shard->replicas().healthyHosts().empty()) {\n        return chooseRandomHost(shard->replicas(), random_);\n      } else {\n        return chooseRandomHost(shard->allHosts(), random_);\n      }\n    case NetworkFilters::Common::Redis::Client::ReadPolicy::Any:\n      return chooseRandomHost(shard->allHosts(), random_);\n    }\n  }\n  return shard->primary();\n}\n\nbool RedisLoadBalancerContextImpl::isReadRequest(\n    const NetworkFilters::Common::Redis::RespValue& request) {\n  const NetworkFilters::Common::Redis::RespValue* command = nullptr;\n  if (request.type() == NetworkFilters::Common::Redis::RespType::Array) {\n    command = &(request.asArray()[0]);\n  } else if (request.type() == NetworkFilters::Common::Redis::RespType::CompositeArray) {\n    command = request.asCompositeArray().command();\n  }\n  if (!command) {\n    return false;\n  }\n  if (command->type() != NetworkFilters::Common::Redis::RespType::SimpleString &&\n      command->type() != NetworkFilters::Common::Redis::RespType::BulkString) {\n    return false;\n  }\n  std::string to_lower_string = absl::AsciiStrToLower(command->asString());\n  return NetworkFilters::Common::Redis::SupportedCommands::isReadCommand(to_lower_string);\n}\n\nRedisLoadBalancerContextImpl::RedisLoadBalancerContextImpl(\n    const std::string& key, bool enabled_hashtagging, bool is_redis_cluster,\n    const NetworkFilters::Common::Redis::RespValue& request,\n    NetworkFilters::Common::Redis::Client::ReadPolicy read_policy)\n    : hash_key_(is_redis_cluster ? Crc16::crc16(hashtag(key, true))\n                                 : MurmurHash::murmurHash2(hashtag(key, enabled_hashtagging))),\n      is_read_(isReadRequest(request)), read_policy_(read_policy) {}\n\n// Inspired by the redis-cluster hashtagging algorithm\n// https://redis.io/topics/cluster-spec#keys-hash-tags\nabsl::string_view RedisLoadBalancerContextImpl::hashtag(absl::string_view v, bool enabled) {\n  if (!enabled) {\n    return v;\n  }\n\n  auto start = v.find('{');\n  if (start == std::string::npos) {\n    return v;\n  }\n\n  auto end = v.find('}', start);\n  if (end == std::string::npos || end == start + 1) {\n    return v;\n  }\n\n  return v.substr(start + 1, end - start - 1);\n}\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/redis/redis_cluster_lb.h",
    "content": "#pragma once\n\n#include <array>\n#include <string>\n#include <vector>\n\n#include \"envoy/upstream/load_balancer.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"source/extensions/clusters/redis/crc16.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n#include \"extensions/filters/network/common/redis/client.h\"\n#include \"extensions/filters/network/common/redis/codec.h\"\n#include \"extensions/filters/network/common/redis/supported_commands.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nstatic const uint64_t MaxSlot = 16384;\n\nclass ClusterSlot {\npublic:\n  ClusterSlot(int64_t start, int64_t end, Network::Address::InstanceConstSharedPtr primary)\n      : start_(start), end_(end), primary_(std::move(primary)) {}\n\n  int64_t start() const { return start_; }\n  int64_t end() const { return end_; }\n  Network::Address::InstanceConstSharedPtr primary() const { return primary_; }\n  const absl::flat_hash_set<Network::Address::InstanceConstSharedPtr>& replicas() const {\n    return replicas_;\n  }\n  void addReplica(Network::Address::InstanceConstSharedPtr replica_address) {\n    replicas_.insert(std::move(replica_address));\n  }\n\n  bool operator==(const ClusterSlot& rhs) const;\n\nprivate:\n  int64_t start_;\n  int64_t end_;\n  Network::Address::InstanceConstSharedPtr primary_;\n  absl::flat_hash_set<Network::Address::InstanceConstSharedPtr> replicas_;\n};\n\nusing ClusterSlotsPtr = std::unique_ptr<std::vector<ClusterSlot>>;\nusing ClusterSlotsSharedPtr = std::shared_ptr<std::vector<ClusterSlot>>;\n\nclass RedisLoadBalancerContext {\npublic:\n  virtual ~RedisLoadBalancerContext() = default;\n\n  virtual bool isReadCommand() const PURE;\n  virtual NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const PURE;\n};\n\nclass RedisLoadBalancerContextImpl : public RedisLoadBalancerContext,\n                                     public Upstream::LoadBalancerContextBase {\npublic:\n  /**\n   * The load balancer context for Redis requests. Note that is_redis_cluster implies using Redis\n   * cluster which require us to always enable hashtagging.\n   * @param key specify the key for the Redis request.\n   * @param enabled_hashtagging specify whether to enable hashtagging, this will always be true if\n   * is_redis_cluster is true.\n   * @param is_redis_cluster specify whether this is a request for redis cluster, if true the key\n   * will be hashed using crc16.\n   * @param request specify the Redis request.\n   * @param read_policy specify the read policy.\n   */\n  RedisLoadBalancerContextImpl(const std::string& key, bool enabled_hashtagging,\n                               bool is_redis_cluster,\n                               const NetworkFilters::Common::Redis::RespValue& request,\n                               NetworkFilters::Common::Redis::Client::ReadPolicy read_policy =\n                                   NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  // Upstream::LoadBalancerContextBase\n  absl::optional<uint64_t> computeHashKey() override { return hash_key_; }\n\n  bool isReadCommand() const override { return is_read_; }\n\n  NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override {\n    return read_policy_;\n  }\n\nprivate:\n  absl::string_view hashtag(absl::string_view v, bool enabled);\n\n  static bool isReadRequest(const NetworkFilters::Common::Redis::RespValue& request);\n\n  const absl::optional<uint64_t> hash_key_;\n  const bool is_read_;\n  const NetworkFilters::Common::Redis::Client::ReadPolicy read_policy_;\n};\n\nclass ClusterSlotUpdateCallBack {\npublic:\n  virtual ~ClusterSlotUpdateCallBack() = default;\n\n  /**\n   * Callback when cluster slot is updated\n   * @param slots provides the updated cluster slots.\n   * @param all_hosts provides the updated hosts.\n   * @return indicate if the cluster slot is updated or not.\n   */\n  virtual bool onClusterSlotUpdate(ClusterSlotsPtr&& slots, Upstream::HostMap all_hosts) PURE;\n\n  /**\n   * Callback when a host's health status is updated\n   */\n  virtual void onHostHealthUpdate() PURE;\n};\n\nusing ClusterSlotUpdateCallBackSharedPtr = std::shared_ptr<ClusterSlotUpdateCallBack>;\n\n/**\n * This factory is created and returned by RedisCluster's factory() method, the create() method will\n * be called on each thread to create a thread local RedisClusterLoadBalancer.\n */\nclass RedisClusterLoadBalancerFactory : public ClusterSlotUpdateCallBack,\n                                        public Upstream::LoadBalancerFactory {\npublic:\n  RedisClusterLoadBalancerFactory(Random::RandomGenerator& random) : random_(random) {}\n\n  // ClusterSlotUpdateCallBack\n  bool onClusterSlotUpdate(ClusterSlotsPtr&& slots, Upstream::HostMap all_hosts) override;\n\n  void onHostHealthUpdate() override;\n\n  // Upstream::LoadBalancerFactory\n  Upstream::LoadBalancerPtr create() override;\n\nprivate:\n  class RedisShard {\n  public:\n    RedisShard(Upstream::HostConstSharedPtr primary, Upstream::HostVectorConstSharedPtr replicas,\n               Upstream::HostVectorConstSharedPtr all_hosts)\n        : primary_(std::move(primary)) {\n      replicas_.updateHosts(Upstream::HostSetImpl::partitionHosts(\n                                std::move(replicas), Upstream::HostsPerLocalityImpl::empty()),\n                            nullptr, {}, {});\n      all_hosts_.updateHosts(Upstream::HostSetImpl::partitionHosts(\n                                 std::move(all_hosts), Upstream::HostsPerLocalityImpl::empty()),\n                             nullptr, {}, {});\n    }\n    const Upstream::HostConstSharedPtr primary() const { return primary_; }\n    const Upstream::HostSetImpl& replicas() const { return replicas_; }\n    const Upstream::HostSetImpl& allHosts() const { return all_hosts_; }\n\n  private:\n    const Upstream::HostConstSharedPtr primary_;\n    Upstream::HostSetImpl replicas_{0, absl::nullopt};\n    Upstream::HostSetImpl all_hosts_{0, absl::nullopt};\n  };\n\n  using RedisShardSharedPtr = std::shared_ptr<const RedisShard>;\n  using ShardVectorSharedPtr = std::shared_ptr<std::vector<RedisShardSharedPtr>>;\n  using SlotArray = std::array<uint64_t, MaxSlot>;\n  using SlotArraySharedPtr = std::shared_ptr<const SlotArray>;\n\n  /*\n   * This class implements load balancing according to `Redis Cluster\n   * <https://redis.io/topics/cluster-spec>`_. This load balancer is thread local and created\n   * through the RedisClusterLoadBalancerFactory by the cluster manager.\n   *\n   * The topology is stored in slot_array_ and shard_vector_. According to the\n   * `Redis Cluster Spec <https://redis.io/topics/cluster-spec#keys-distribution-model`_, the key\n   * space is split into a fixed size 16384 slots. The current implementation uses a fixed size\n   * std::array() of the index of the shard in the shard_vector_. This has a fixed cpu and memory\n   * cost and provide a fast lookup constant time lookup similar to Maglev. This will be used by the\n   * redis proxy filter for load balancing purpose.\n   */\n  class RedisClusterLoadBalancer : public Upstream::LoadBalancer {\n  public:\n    RedisClusterLoadBalancer(SlotArraySharedPtr slot_array, ShardVectorSharedPtr shard_vector,\n                             Random::RandomGenerator& random)\n        : slot_array_(std::move(slot_array)), shard_vector_(std::move(shard_vector)),\n          random_(random) {}\n\n    // Upstream::LoadBalancerBase\n    Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext*) override;\n    Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override {\n      return nullptr;\n    }\n\n  private:\n    const SlotArraySharedPtr slot_array_;\n    const ShardVectorSharedPtr shard_vector_;\n    Random::RandomGenerator& random_;\n  };\n\n  absl::Mutex mutex_;\n  SlotArraySharedPtr slot_array_ ABSL_GUARDED_BY(mutex_);\n  ClusterSlotsSharedPtr current_cluster_slot_;\n  ShardVectorSharedPtr shard_vector_;\n  Random::RandomGenerator& random_;\n};\n\nclass RedisClusterThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer {\npublic:\n  RedisClusterThreadAwareLoadBalancer(Upstream::LoadBalancerFactorySharedPtr factory)\n      : factory_(std::move(factory)) {}\n\n  // Upstream::ThreadAwareLoadBalancer\n  Upstream::LoadBalancerFactorySharedPtr factory() override { return factory_; }\n  void initialize() override{};\n\nprivate:\n  Upstream::LoadBalancerFactorySharedPtr factory_;\n};\n\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/clusters/well_known_names.h",
    "content": "#pragma once\n\n#include \"common/config/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\n\n/**\n * Well-known cluster types, this supersede the service discovery types\n */\nclass ClusterTypeValues {\npublic:\n  // Static clusters (cluster that have a fixed number of hosts with resolved IP addresses).\n  const std::string Static = \"envoy.cluster.static\";\n\n  // Strict DNS (cluster that periodic DNS resolution and updates the host member set if the DNS\n  // members change).\n  const std::string StrictDns = \"envoy.cluster.strict_dns\";\n\n  // Logical DNS (cluster that creates a single logical host that wraps an async DNS resolver).\n  const std::string LogicalDns = \"envoy.cluster.logical_dns\";\n\n  // Endpoint Discovery Service (dynamic cluster that reads host information from the Endpoint\n  // Discovery Service).\n  const std::string Eds = \"envoy.cluster.eds\";\n\n  // Original destination (dynamic cluster that automatically adds hosts as needed based on the\n  // original destination address of the downstream connection).\n  const std::string OriginalDst = \"envoy.cluster.original_dst\";\n\n  // Redis cluster (cluster that reads host information using the redis cluster protocol).\n  const std::string Redis = \"envoy.clusters.redis\";\n\n  // Dynamic forward proxy cluster. This cluster is designed to work directly with the\n  // dynamic forward proxy HTTP filter.\n  const std::string DynamicForwardProxy = \"envoy.clusters.dynamic_forward_proxy\";\n\n  // Aggregate cluster which may contain different types of clusters. It allows load balance between\n  // different type of clusters.\n  const std::string Aggregate = \"envoy.clusters.aggregate\";\n};\n\nusing ClusterTypes = ConstSingleton<ClusterTypeValues>;\n\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    # Legacy.  TODO(#9953) clean up.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:documentation_url_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/aws/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"signer_interface\",\n    hdrs = [\"signer.h\"],\n    deps = [\n        \"//include/envoy/http:message_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"signer_impl_lib\",\n    srcs = [\"signer_impl.cc\"],\n    hdrs = [\"signer_impl.h\"],\n    deps = [\n        \":credentials_provider_interface\",\n        \":signer_interface\",\n        \":utility_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/extensions/common/crypto:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"credentials_provider_interface\",\n    hdrs = [\"credentials_provider.h\"],\n    external_deps = [\"abseil_optional\"],\n)\n\nenvoy_cc_library(\n    name = \"credentials_provider_impl_lib\",\n    srcs = [\"credentials_provider_impl.cc\"],\n    hdrs = [\"credentials_provider_impl.h\"],\n    external_deps = [\"abseil_time\"],\n    deps = [\n        \":credentials_provider_interface\",\n        \"//include/envoy/api:api_interface\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/json:json_loader_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    external_deps = [\"curl\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"region_provider_interface\",\n    hdrs = [\"region_provider.h\"],\n    external_deps = [\"abseil_optional\"],\n)\n\nenvoy_cc_library(\n    name = \"region_provider_impl_lib\",\n    srcs = [\"region_provider_impl.cc\"],\n    hdrs = [\"region_provider_impl.h\"],\n    deps = [\n        \":region_provider_interface\",\n        \"//source/common/common:logger_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/aws/credentials_provider.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\n/**\n * AWS credentials container\n *\n * If a credential component was not found in the execution environment, it's getter method will\n * return absl::nullopt. Credential components with the empty string value are treated as not found.\n */\nclass Credentials {\npublic:\n  Credentials(absl::string_view access_key_id = absl::string_view(),\n              absl::string_view secret_access_key = absl::string_view(),\n              absl::string_view session_token = absl::string_view()) {\n    if (!access_key_id.empty()) {\n      access_key_id_ = std::string(access_key_id);\n      if (!secret_access_key.empty()) {\n        secret_access_key_ = std::string(secret_access_key);\n        if (!session_token.empty()) {\n          session_token_ = std::string(session_token);\n        }\n      }\n    }\n  }\n\n  const absl::optional<std::string>& accessKeyId() const { return access_key_id_; }\n\n  const absl::optional<std::string>& secretAccessKey() const { return secret_access_key_; }\n\n  const absl::optional<std::string>& sessionToken() const { return session_token_; }\n\n  bool operator==(const Credentials& other) const {\n    return access_key_id_ == other.access_key_id_ &&\n           secret_access_key_ == other.secret_access_key_ && session_token_ == other.session_token_;\n  }\n\nprivate:\n  absl::optional<std::string> access_key_id_;\n  absl::optional<std::string> secret_access_key_;\n  absl::optional<std::string> session_token_;\n};\n\n/**\n * Interface for classes able to fetch AWS credentials from the execution environment.\n */\nclass CredentialsProvider {\npublic:\n  virtual ~CredentialsProvider() = default;\n\n  /**\n   * Get credentials from the environment.\n   *\n   * @return AWS credentials\n   */\n  virtual Credentials getCredentials() PURE;\n};\n\nusing CredentialsProviderSharedPtr = std::shared_ptr<CredentialsProvider>;\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/credentials_provider_impl.cc",
    "content": "#include \"extensions/common/aws/credentials_provider_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/http/utility.h\"\n#include \"common/json/json_loader.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nnamespace {\n\nconstexpr char AWS_ACCESS_KEY_ID[] = \"AWS_ACCESS_KEY_ID\";\nconstexpr char AWS_SECRET_ACCESS_KEY[] = \"AWS_SECRET_ACCESS_KEY\";\nconstexpr char AWS_SESSION_TOKEN[] = \"AWS_SESSION_TOKEN\";\n\nconstexpr char ACCESS_KEY_ID[] = \"AccessKeyId\";\nconstexpr char SECRET_ACCESS_KEY[] = \"SecretAccessKey\";\nconstexpr char TOKEN[] = \"Token\";\nconstexpr char EXPIRATION[] = \"Expiration\";\nconstexpr char EXPIRATION_FORMAT[] = \"%E4Y%m%dT%H%M%S%z\";\nconstexpr char TRUE[] = \"true\";\n\nconstexpr char AWS_CONTAINER_CREDENTIALS_RELATIVE_URI[] = \"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\";\nconstexpr char AWS_CONTAINER_CREDENTIALS_FULL_URI[] = \"AWS_CONTAINER_CREDENTIALS_FULL_URI\";\nconstexpr char AWS_CONTAINER_AUTHORIZATION_TOKEN[] = \"AWS_CONTAINER_AUTHORIZATION_TOKEN\";\nconstexpr char AWS_EC2_METADATA_DISABLED[] = \"AWS_EC2_METADATA_DISABLED\";\n\nconstexpr std::chrono::hours REFRESH_INTERVAL{1};\nconstexpr std::chrono::seconds REFRESH_GRACE_PERIOD{5};\nconstexpr char EC2_METADATA_HOST[] = \"169.254.169.254:80\";\nconstexpr char CONTAINER_METADATA_HOST[] = \"169.254.170.2:80\";\nconstexpr char SECURITY_CREDENTIALS_PATH[] = \"/latest/meta-data/iam/security-credentials\";\n\n} // namespace\n\nCredentials EnvironmentCredentialsProvider::getCredentials() {\n  ENVOY_LOG(debug, \"Getting AWS credentials from the environment\");\n\n  const auto access_key_id = absl::NullSafeStringView(std::getenv(AWS_ACCESS_KEY_ID));\n  if (access_key_id.empty()) {\n    return Credentials();\n  }\n\n  const auto secret_access_key = absl::NullSafeStringView(std::getenv(AWS_SECRET_ACCESS_KEY));\n  const auto session_token = absl::NullSafeStringView(std::getenv(AWS_SESSION_TOKEN));\n\n  ENVOY_LOG(debug, \"Found following AWS credentials in the environment: {}={}, {}={}, {}={}\",\n            AWS_ACCESS_KEY_ID, access_key_id, AWS_SECRET_ACCESS_KEY,\n            secret_access_key.empty() ? \"\" : \"*****\", AWS_SESSION_TOKEN,\n            session_token.empty() ? \"\" : \"*****\");\n\n  return Credentials(access_key_id, secret_access_key, session_token);\n}\n\nvoid MetadataCredentialsProviderBase::refreshIfNeeded() {\n  const Thread::LockGuard lock(lock_);\n  if (needsRefresh()) {\n    refresh();\n  }\n}\n\nbool InstanceProfileCredentialsProvider::needsRefresh() {\n  return api_.timeSource().systemTime() - last_updated_ > REFRESH_INTERVAL;\n}\n\nvoid InstanceProfileCredentialsProvider::refresh() {\n  ENVOY_LOG(debug, \"Getting AWS credentials from the instance metadata\");\n\n  // First discover the Role of this instance\n  const auto instance_role_string =\n      metadata_fetcher_(EC2_METADATA_HOST, SECURITY_CREDENTIALS_PATH, \"\");\n  if (!instance_role_string) {\n    ENVOY_LOG(error, \"Could not retrieve credentials listing from the instance metadata\");\n    return;\n  }\n\n  const auto instance_role_list =\n      StringUtil::splitToken(StringUtil::trim(instance_role_string.value()), \"\\n\");\n  if (instance_role_list.empty()) {\n    ENVOY_LOG(error, \"No AWS credentials were found in the instance metadata\");\n    return;\n  }\n  ENVOY_LOG(debug, \"AWS credentials list:\\n{}\", instance_role_string.value());\n\n  // Only one Role can be associated with an instance:\n  // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html\n  const auto credential_path =\n      std::string(SECURITY_CREDENTIALS_PATH) + \"/\" +\n      std::string(instance_role_list[0].data(), instance_role_list[0].size());\n  ENVOY_LOG(debug, \"AWS credentials path: {}\", credential_path);\n\n  // Then fetch and parse the credentials\n  const auto credential_document = metadata_fetcher_(EC2_METADATA_HOST, credential_path, \"\");\n  if (!credential_document) {\n    ENVOY_LOG(error, \"Could not load AWS credentials document from the instance metadata\");\n    return;\n  }\n\n  Json::ObjectSharedPtr document_json;\n  try {\n    document_json = Json::Factory::loadFromString(credential_document.value());\n  } catch (EnvoyException& e) {\n    ENVOY_LOG(error, \"Could not parse AWS credentials document: {}\", e.what());\n    return;\n  }\n\n  const auto access_key_id = document_json->getString(ACCESS_KEY_ID, \"\");\n  const auto secret_access_key = document_json->getString(SECRET_ACCESS_KEY, \"\");\n  const auto session_token = document_json->getString(TOKEN, \"\");\n\n  ENVOY_LOG(debug, \"Found following AWS credentials in the instance metadata: {}={}, {}={}, {}={}\",\n            AWS_ACCESS_KEY_ID, access_key_id, AWS_SECRET_ACCESS_KEY,\n            secret_access_key.empty() ? \"\" : \"*****\", AWS_SESSION_TOKEN,\n            session_token.empty() ? \"\" : \"*****\");\n\n  cached_credentials_ = Credentials(access_key_id, secret_access_key, session_token);\n  last_updated_ = api_.timeSource().systemTime();\n}\n\nbool TaskRoleCredentialsProvider::needsRefresh() {\n  const auto now = api_.timeSource().systemTime();\n  return (now - last_updated_ > REFRESH_INTERVAL) ||\n         (expiration_time_ - now < REFRESH_GRACE_PERIOD);\n}\n\nvoid TaskRoleCredentialsProvider::refresh() {\n  ENVOY_LOG(debug, \"Getting AWS credentials from the task role at URI: {}\", credential_uri_);\n\n  absl::string_view host;\n  absl::string_view path;\n  Http::Utility::extractHostPathFromUri(credential_uri_, host, path);\n  const auto credential_document =\n      metadata_fetcher_(std::string(host.data(), host.size()),\n                        std::string(path.data(), path.size()), authorization_token_);\n  if (!credential_document) {\n    ENVOY_LOG(error, \"Could not load AWS credentials document from the task role\");\n    return;\n  }\n\n  Json::ObjectSharedPtr document_json;\n  try {\n    document_json = Json::Factory::loadFromString(credential_document.value());\n  } catch (EnvoyException& e) {\n    ENVOY_LOG(error, \"Could not parse AWS credentials document from the task role: {}\", e.what());\n    return;\n  }\n\n  const auto access_key_id = document_json->getString(ACCESS_KEY_ID, \"\");\n  const auto secret_access_key = document_json->getString(SECRET_ACCESS_KEY, \"\");\n  const auto session_token = document_json->getString(TOKEN, \"\");\n\n  ENVOY_LOG(debug, \"Found following AWS credentials in the task role: {}={}, {}={}, {}={}\",\n            AWS_ACCESS_KEY_ID, access_key_id, AWS_SECRET_ACCESS_KEY,\n            secret_access_key.empty() ? \"\" : \"*****\", AWS_SESSION_TOKEN,\n            session_token.empty() ? \"\" : \"*****\");\n\n  const auto expiration_str = document_json->getString(EXPIRATION, \"\");\n  if (!expiration_str.empty()) {\n    absl::Time expiration_time;\n    if (absl::ParseTime(EXPIRATION_FORMAT, expiration_str, &expiration_time, nullptr)) {\n      ENVOY_LOG(debug, \"Task role AWS credentials expiration time: {}\", expiration_str);\n      expiration_time_ = absl::ToChronoTime(expiration_time);\n    }\n  }\n\n  last_updated_ = api_.timeSource().systemTime();\n  cached_credentials_ = Credentials(access_key_id, secret_access_key, session_token);\n}\n\nCredentials CredentialsProviderChain::getCredentials() {\n  for (auto& provider : providers_) {\n    const auto credentials = provider->getCredentials();\n    if (credentials.accessKeyId() && credentials.secretAccessKey()) {\n      return credentials;\n    }\n  }\n\n  ENVOY_LOG(debug, \"No AWS credentials found, using anonymous credentials\");\n  return Credentials();\n}\n\nDefaultCredentialsProviderChain::DefaultCredentialsProviderChain(\n    Api::Api& api, const MetadataCredentialsProviderBase::MetadataFetcher& metadata_fetcher,\n    const CredentialsProviderChainFactories& factories) {\n  ENVOY_LOG(debug, \"Using environment credentials provider\");\n  add(factories.createEnvironmentCredentialsProvider());\n\n  const auto relative_uri =\n      absl::NullSafeStringView(std::getenv(AWS_CONTAINER_CREDENTIALS_RELATIVE_URI));\n  const auto full_uri = absl::NullSafeStringView(std::getenv(AWS_CONTAINER_CREDENTIALS_FULL_URI));\n  const auto metadata_disabled = absl::NullSafeStringView(std::getenv(AWS_EC2_METADATA_DISABLED));\n\n  if (!relative_uri.empty()) {\n    const auto uri = absl::StrCat(CONTAINER_METADATA_HOST, relative_uri);\n    ENVOY_LOG(debug, \"Using task role credentials provider with URI: {}\", uri);\n    add(factories.createTaskRoleCredentialsProvider(api, metadata_fetcher, uri));\n  } else if (!full_uri.empty()) {\n    const auto authorization_token =\n        absl::NullSafeStringView(std::getenv(AWS_CONTAINER_AUTHORIZATION_TOKEN));\n    if (!authorization_token.empty()) {\n      ENVOY_LOG(debug,\n                \"Using task role credentials provider with URI: \"\n                \"{} and authorization token\",\n                full_uri);\n      add(factories.createTaskRoleCredentialsProvider(api, metadata_fetcher, full_uri,\n                                                      authorization_token));\n    } else {\n      ENVOY_LOG(debug, \"Using task role credentials provider with URI: {}\", full_uri);\n      add(factories.createTaskRoleCredentialsProvider(api, metadata_fetcher, full_uri));\n    }\n  } else if (metadata_disabled != TRUE) {\n    ENVOY_LOG(debug, \"Using instance profile credentials provider\");\n    add(factories.createInstanceProfileCredentialsProvider(api, metadata_fetcher));\n  }\n}\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/credentials_provider_impl.h",
    "content": "#pragma once\n\n#include <list>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n\n#include \"extensions/common/aws/credentials_provider.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\n/**\n * Retrieve AWS credentials from the environment variables.\n *\n * Adheres to conventions specified in:\n * https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html\n */\nclass EnvironmentCredentialsProvider : public CredentialsProvider,\n                                       public Logger::Loggable<Logger::Id::aws> {\npublic:\n  Credentials getCredentials() override;\n};\n\nclass MetadataCredentialsProviderBase : public CredentialsProvider,\n                                        public Logger::Loggable<Logger::Id::aws> {\npublic:\n  using MetadataFetcher = std::function<absl::optional<std::string>(\n      const std::string& host, const std::string& path, const std::string& auth_token)>;\n\n  MetadataCredentialsProviderBase(Api::Api& api, const MetadataFetcher& metadata_fetcher)\n      : api_(api), metadata_fetcher_(metadata_fetcher) {}\n\n  Credentials getCredentials() override {\n    refreshIfNeeded();\n    return cached_credentials_;\n  }\n\nprotected:\n  Api::Api& api_;\n  MetadataFetcher metadata_fetcher_;\n  SystemTime last_updated_;\n  Credentials cached_credentials_;\n  Thread::MutexBasicLockable lock_;\n\n  void refreshIfNeeded();\n\n  virtual bool needsRefresh() PURE;\n  virtual void refresh() PURE;\n};\n\n/**\n * Retrieve AWS credentials from the instance metadata.\n *\n * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials\n */\nclass InstanceProfileCredentialsProvider : public MetadataCredentialsProviderBase {\npublic:\n  InstanceProfileCredentialsProvider(Api::Api& api, const MetadataFetcher& metadata_fetcher)\n      : MetadataCredentialsProviderBase(api, metadata_fetcher) {}\n\nprivate:\n  bool needsRefresh() override;\n  void refresh() override;\n};\n\n/**\n * Retrieve AWS credentials from the task metadata.\n *\n * https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html#enable_task_iam_roles\n */\nclass TaskRoleCredentialsProvider : public MetadataCredentialsProviderBase {\npublic:\n  TaskRoleCredentialsProvider(Api::Api& api, const MetadataFetcher& metadata_fetcher,\n                              absl::string_view credential_uri,\n                              absl::string_view authorization_token = {})\n      : MetadataCredentialsProviderBase(api, metadata_fetcher), credential_uri_(credential_uri),\n        authorization_token_(authorization_token) {}\n\nprivate:\n  SystemTime expiration_time_;\n  std::string credential_uri_;\n  std::string authorization_token_;\n\n  bool needsRefresh() override;\n  void refresh() override;\n};\n\n/**\n * AWS credentials provider chain, able to fallback between multiple credential providers.\n */\nclass CredentialsProviderChain : public CredentialsProvider,\n                                 public Logger::Loggable<Logger::Id::aws> {\npublic:\n  ~CredentialsProviderChain() override = default;\n\n  void add(const CredentialsProviderSharedPtr& credentials_provider) {\n    providers_.emplace_back(credentials_provider);\n  }\n\n  Credentials getCredentials() override;\n\nprotected:\n  std::list<CredentialsProviderSharedPtr> providers_;\n};\n\nclass CredentialsProviderChainFactories {\npublic:\n  virtual ~CredentialsProviderChainFactories() = default;\n\n  virtual CredentialsProviderSharedPtr createEnvironmentCredentialsProvider() const PURE;\n\n  virtual CredentialsProviderSharedPtr createTaskRoleCredentialsProvider(\n      Api::Api& api, const MetadataCredentialsProviderBase::MetadataFetcher& metadata_fetcher,\n      absl::string_view credential_uri, absl::string_view authorization_token = {}) const PURE;\n\n  virtual CredentialsProviderSharedPtr createInstanceProfileCredentialsProvider(\n      Api::Api& api,\n      const MetadataCredentialsProviderBase::MetadataFetcher& metadata_fetcher) const PURE;\n};\n\n/**\n * Default AWS credentials provider chain.\n *\n * Reference implementation:\n * https://github.com/aws/aws-sdk-cpp/blob/master/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp#L44\n */\nclass DefaultCredentialsProviderChain : public CredentialsProviderChain,\n                                        public CredentialsProviderChainFactories {\npublic:\n  DefaultCredentialsProviderChain(\n      Api::Api& api, const MetadataCredentialsProviderBase::MetadataFetcher& metadata_fetcher)\n      : DefaultCredentialsProviderChain(api, metadata_fetcher, *this) {}\n\n  DefaultCredentialsProviderChain(\n      Api::Api& api, const MetadataCredentialsProviderBase::MetadataFetcher& metadata_fetcher,\n      const CredentialsProviderChainFactories& factories);\n\nprivate:\n  CredentialsProviderSharedPtr createEnvironmentCredentialsProvider() const override {\n    return std::make_shared<EnvironmentCredentialsProvider>();\n  }\n\n  CredentialsProviderSharedPtr createTaskRoleCredentialsProvider(\n      Api::Api& api, const MetadataCredentialsProviderBase::MetadataFetcher& metadata_fetcher,\n      absl::string_view credential_uri, absl::string_view authorization_token = {}) const override {\n    return std::make_shared<TaskRoleCredentialsProvider>(api, metadata_fetcher, credential_uri,\n                                                         authorization_token);\n  }\n\n  CredentialsProviderSharedPtr createInstanceProfileCredentialsProvider(\n      Api::Api& api,\n      const MetadataCredentialsProviderBase::MetadataFetcher& metadata_fetcher) const override {\n    return std::make_shared<InstanceProfileCredentialsProvider>(api, metadata_fetcher);\n  }\n};\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/region_provider.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\n/**\n * Interface for classes capable of discovering the AWS region from the execution environment.\n */\nclass RegionProvider {\npublic:\n  virtual ~RegionProvider() = default;\n\n  /**\n   * Discover and return the AWS region.\n   * @return AWS region, or nullopt if unable to discover the region.\n   */\n  virtual absl::optional<std::string> getRegion() PURE;\n};\n\nusing RegionProviderPtr = std::unique_ptr<RegionProvider>;\nusing RegionProviderSharedPtr = std::shared_ptr<RegionProvider>;\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/region_provider_impl.cc",
    "content": "#include \"extensions/common/aws/region_provider_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nstatic const char AWS_REGION[] = \"AWS_REGION\";\n\nStaticRegionProvider::StaticRegionProvider(const std::string& region) : region_(region) {}\n\nabsl::optional<std::string> StaticRegionProvider::getRegion() {\n  return absl::optional<std::string>(region_);\n}\n\nabsl::optional<std::string> EnvironmentRegionProvider::getRegion() {\n  const auto region = std::getenv(AWS_REGION);\n  if (region == nullptr) {\n    return absl::nullopt;\n  }\n  ENVOY_LOG(debug, \"Found environment region {}={}\", AWS_REGION, region);\n  return absl::optional<std::string>(region);\n}\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/region_provider_impl.h",
    "content": "#pragma once\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/aws/region_provider.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\n/**\n * Retrieve AWS region name from the environment\n */\nclass EnvironmentRegionProvider : public RegionProvider, public Logger::Loggable<Logger::Id::aws> {\npublic:\n  absl::optional<std::string> getRegion() override;\n};\n\n/**\n * Return statically configured AWS region name\n */\nclass StaticRegionProvider : public RegionProvider {\npublic:\n  StaticRegionProvider(const std::string& region);\n\n  absl::optional<std::string> getRegion() override;\n\nprivate:\n  const std::string region_;\n};\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/signer.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/message.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nclass Signer {\npublic:\n  virtual ~Signer() = default;\n\n  /**\n   * Sign an AWS request.\n   * @param message an AWS API request message.\n   * @param sign_body include the message body in the signature. The body must be fully buffered.\n   * @throws EnvoyException if the request cannot be signed.\n   */\n  virtual void sign(Http::RequestMessage& message, bool sign_body) PURE;\n\n  /**\n   * Sign an AWS request.\n   * @param headers AWS API request headers.\n   * @throws EnvoyException if the request cannot be signed.\n   */\n  virtual void sign(Http::RequestHeaderMap& headers) PURE;\n\n  /**\n   * Sign an AWS request.\n   * @param headers AWS API request headers.\n   * @param content_hash The Hex encoded SHA-256 of the body of the AWS API request.\n   * @throws EnvoyException if the request cannot be signed.\n   */\n  virtual void sign(Http::RequestHeaderMap& headers, const std::string& content_hash) PURE;\n};\n\nusing SignerPtr = std::unique_ptr<Signer>;\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/signer_impl.cc",
    "content": "#include \"extensions/common/aws/signer_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hex.h\"\n#include \"common/crypto/utility.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/common/aws/utility.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nvoid SignerImpl::sign(Http::RequestMessage& message, bool sign_body) {\n  const auto content_hash = createContentHash(message, sign_body);\n  auto& headers = message.headers();\n  sign(headers, content_hash);\n}\n\nvoid SignerImpl::sign(Http::RequestHeaderMap& headers) {\n  if (require_content_hash_) {\n    headers.setReference(SignatureHeaders::get().ContentSha256,\n                         SignatureConstants::get().UnsignedPayload);\n    sign(headers, SignatureConstants::get().UnsignedPayload);\n  } else {\n    headers.setReference(SignatureHeaders::get().ContentSha256,\n                         SignatureConstants::get().HashedEmptyString);\n    sign(headers, SignatureConstants::get().HashedEmptyString);\n  }\n}\n\nvoid SignerImpl::sign(Http::RequestHeaderMap& headers, const std::string& content_hash) {\n  headers.setReferenceKey(SignatureHeaders::get().ContentSha256, content_hash);\n  const auto& credentials = credentials_provider_->getCredentials();\n  if (!credentials.accessKeyId() || !credentials.secretAccessKey()) {\n    // Empty or \"anonymous\" credentials are a valid use-case for non-production environments.\n    // This behavior matches what the AWS SDK would do.\n    return;\n  }\n  const auto* method_header = headers.Method();\n  if (method_header == nullptr) {\n    throw EnvoyException(\"Message is missing :method header\");\n  }\n  const auto* path_header = headers.Path();\n  if (path_header == nullptr) {\n    throw EnvoyException(\"Message is missing :path header\");\n  }\n  if (credentials.sessionToken()) {\n    headers.addCopy(SignatureHeaders::get().SecurityToken, credentials.sessionToken().value());\n  }\n  const auto long_date = long_date_formatter_.now(time_source_);\n  const auto short_date = short_date_formatter_.now(time_source_);\n  headers.addCopy(SignatureHeaders::get().Date, long_date);\n  // Phase 1: Create a canonical request\n  const auto canonical_headers = Utility::canonicalizeHeaders(headers);\n  const auto canonical_request = Utility::createCanonicalRequest(\n      method_header->value().getStringView(), path_header->value().getStringView(),\n      canonical_headers, content_hash);\n  ENVOY_LOG(debug, \"Canonical request:\\n{}\", canonical_request);\n  // Phase 2: Create a string to sign\n  const auto credential_scope = createCredentialScope(short_date);\n  const auto string_to_sign = createStringToSign(canonical_request, long_date, credential_scope);\n  ENVOY_LOG(debug, \"String to sign:\\n{}\", string_to_sign);\n  // Phase 3: Create a signature\n  const auto signature =\n      createSignature(credentials.secretAccessKey().value(), short_date, string_to_sign);\n  // Phase 4: Sign request\n  const auto authorization_header = createAuthorizationHeader(\n      credentials.accessKeyId().value(), credential_scope, canonical_headers, signature);\n  ENVOY_LOG(debug, \"Signing request with: {}\", authorization_header);\n  headers.addCopy(Http::CustomHeaders::get().Authorization, authorization_header);\n}\n\nstd::string SignerImpl::createContentHash(Http::RequestMessage& message, bool sign_body) const {\n  if (!sign_body) {\n    return SignatureConstants::get().HashedEmptyString;\n  }\n  auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n  const auto content_hash = message.body().length() > 0\n                                ? Hex::encode(crypto_util.getSha256Digest(message.body()))\n                                : SignatureConstants::get().HashedEmptyString;\n  return content_hash;\n}\n\nstd::string SignerImpl::createCredentialScope(absl::string_view short_date) const {\n  return fmt::format(SignatureConstants::get().CredentialScopeFormat, short_date, region_,\n                     service_name_);\n}\n\nstd::string SignerImpl::createStringToSign(absl::string_view canonical_request,\n                                           absl::string_view long_date,\n                                           absl::string_view credential_scope) const {\n  auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n  return fmt::format(\n      SignatureConstants::get().StringToSignFormat, long_date, credential_scope,\n      Hex::encode(crypto_util.getSha256Digest(Buffer::OwnedImpl(canonical_request))));\n}\n\nstd::string SignerImpl::createSignature(absl::string_view secret_access_key,\n                                        absl::string_view short_date,\n                                        absl::string_view string_to_sign) const {\n  auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n  const auto secret_key =\n      absl::StrCat(SignatureConstants::get().SignatureVersion, secret_access_key);\n  const auto date_key = crypto_util.getSha256Hmac(\n      std::vector<uint8_t>(secret_key.begin(), secret_key.end()), short_date);\n  const auto region_key = crypto_util.getSha256Hmac(date_key, region_);\n  const auto service_key = crypto_util.getSha256Hmac(region_key, service_name_);\n  const auto signing_key =\n      crypto_util.getSha256Hmac(service_key, SignatureConstants::get().Aws4Request);\n  return Hex::encode(crypto_util.getSha256Hmac(signing_key, string_to_sign));\n}\n\nstd::string\nSignerImpl::createAuthorizationHeader(absl::string_view access_key_id,\n                                      absl::string_view credential_scope,\n                                      const std::map<std::string, std::string>& canonical_headers,\n                                      absl::string_view signature) const {\n  const auto signed_headers = Utility::joinCanonicalHeaderNames(canonical_headers);\n  return fmt::format(SignatureConstants::get().AuthorizationHeaderFormat, access_key_id,\n                     credential_scope, signed_headers, signature);\n}\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/signer_impl.h",
    "content": "#pragma once\n\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/common/aws/credentials_provider.h\"\n#include \"extensions/common/aws/signer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nclass SignatureHeaderValues {\npublic:\n  const Http::LowerCaseString ContentSha256{\"x-amz-content-sha256\"};\n  const Http::LowerCaseString Date{\"x-amz-date\"};\n  const Http::LowerCaseString SecurityToken{\"x-amz-security-token\"};\n};\n\nusing SignatureHeaders = ConstSingleton<SignatureHeaderValues>;\n\nclass SignatureConstantValues {\npublic:\n  const std::string Aws4Request{\"aws4_request\"};\n  const std::string AuthorizationHeaderFormat{\n      \"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}\"};\n  const std::string CredentialScopeFormat{\"{}/{}/{}/aws4_request\"};\n  const std::string HashedEmptyString{\n      \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\"};\n  const std::string SignatureVersion{\"AWS4\"};\n  const std::string StringToSignFormat{\"AWS4-HMAC-SHA256\\n{}\\n{}\\n{}\"};\n\n  const std::string LongDateFormat{\"%Y%m%dT%H%M00Z\"};\n  const std::string ShortDateFormat{\"%Y%m%d\"};\n  const std::string UnsignedPayload{\"UNSIGNED-PAYLOAD\"};\n};\n\nusing SignatureConstants = ConstSingleton<SignatureConstantValues>;\n\n/**\n * Implementation of the Signature V4 signing process.\n * See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html\n */\nclass SignerImpl : public Signer, public Logger::Loggable<Logger::Id::http> {\npublic:\n  SignerImpl(absl::string_view service_name, absl::string_view region,\n             const CredentialsProviderSharedPtr& credentials_provider, TimeSource& time_source)\n      : service_name_(service_name), region_(region),\n\n        // S3, Glacier, ES payloads require special treatment.\n        // S3:\n        // https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.\n        // ES:\n        // https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-request-signing.html.\n        // Glacier:\n        // https://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-signing-requests.html.\n        require_content_hash_{service_name_ == \"s3\" || service_name_ == \"glacier\" ||\n                              service_name_ == \"es\"},\n        credentials_provider_(credentials_provider), time_source_(time_source),\n        long_date_formatter_(SignatureConstants::get().LongDateFormat),\n        short_date_formatter_(SignatureConstants::get().ShortDateFormat) {}\n\n  void sign(Http::RequestMessage& message, bool sign_body = false) override;\n  void sign(Http::RequestHeaderMap& headers) override;\n\nprivate:\n  std::string createContentHash(Http::RequestMessage& message, bool sign_body) const;\n\n  std::string createCredentialScope(absl::string_view short_date) const;\n\n  std::string createStringToSign(absl::string_view canonical_request, absl::string_view long_date,\n                                 absl::string_view credential_scope) const;\n\n  std::string createSignature(absl::string_view secret_access_key, absl::string_view short_date,\n                              absl::string_view string_to_sign) const;\n\n  std::string createAuthorizationHeader(absl::string_view access_key_id,\n                                        absl::string_view credential_scope,\n                                        const std::map<std::string, std::string>& canonical_headers,\n                                        absl::string_view signature) const;\n\n  void sign(Http::RequestHeaderMap& headers, const std::string& content_hash) override;\n\n  const std::string service_name_;\n  const std::string region_;\n\n  const bool require_content_hash_;\n  CredentialsProviderSharedPtr credentials_provider_;\n  TimeSource& time_source_;\n  DateFormatter long_date_formatter_;\n  DateFormatter short_date_formatter_;\n};\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/utility.cc",
    "content": "#include \"extensions/common/aws/utility.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"curl/curl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nstd::map<std::string, std::string>\nUtility::canonicalizeHeaders(const Http::RequestHeaderMap& headers) {\n  std::map<std::string, std::string> out;\n  headers.iterate([&out](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n    // Skip empty headers\n    if (entry.key().empty() || entry.value().empty()) {\n      return Http::HeaderMap::Iterate::Continue;\n    }\n    // Pseudo-headers should not be canonicalized\n    if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') {\n      return Http::HeaderMap::Iterate::Continue;\n    }\n    // Skip headers that are likely to mutate, when crossing proxies\n    const auto key = entry.key().getStringView();\n    if (key == Http::Headers::get().ForwardedFor.get() ||\n        key == Http::Headers::get().ForwardedProto.get() || key == \"x-amzn-trace-id\") {\n      return Http::HeaderMap::Iterate::Continue;\n    }\n\n    std::string value(entry.value().getStringView());\n    // Remove leading, trailing, and deduplicate repeated ascii spaces\n    absl::RemoveExtraAsciiWhitespace(&value);\n    const auto iter = out.find(std::string(entry.key().getStringView()));\n    // If the entry already exists, append the new value to the end\n    if (iter != out.end()) {\n      iter->second += fmt::format(\",{}\", value);\n    } else {\n      out.emplace(std::string(entry.key().getStringView()), value);\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n  // The AWS SDK has a quirk where it removes \"default ports\" (80, 443) from the host headers\n  // Additionally, we canonicalize the :authority header as \"host\"\n  // TODO(lavignes): This may need to be tweaked to canonicalize :authority for HTTP/2 requests\n  const absl::string_view authority_header = headers.getHostValue();\n  if (!authority_header.empty()) {\n    const auto parts = StringUtil::splitToken(authority_header, \":\");\n    if (parts.size() > 1 && (parts[1] == \"80\" || parts[1] == \"443\")) {\n      // Has default port, so use only the host part\n      out.emplace(Http::Headers::get().HostLegacy.get(), std::string(parts[0]));\n    } else {\n      out.emplace(Http::Headers::get().HostLegacy.get(), std::string(authority_header));\n    }\n  }\n  return out;\n}\n\nstd::string\nUtility::createCanonicalRequest(absl::string_view method, absl::string_view path,\n                                const std::map<std::string, std::string>& canonical_headers,\n                                absl::string_view content_hash) {\n  std::vector<absl::string_view> parts;\n  parts.emplace_back(method);\n  // don't include the query part of the path\n  const auto path_part = StringUtil::cropRight(path, \"?\");\n  parts.emplace_back(path_part.empty() ? \"/\" : path_part);\n  const auto query_part = StringUtil::cropLeft(path, \"?\");\n  // if query_part == path_part, then there is no query\n  parts.emplace_back(query_part == path_part ? \"\" : query_part);\n  std::vector<std::string> formatted_headers;\n  formatted_headers.reserve(canonical_headers.size());\n  for (const auto& header : canonical_headers) {\n    formatted_headers.emplace_back(fmt::format(\"{}:{}\", header.first, header.second));\n    parts.emplace_back(formatted_headers.back());\n  }\n  // need an extra blank space after the canonical headers\n  parts.emplace_back(\"\");\n  const auto signed_headers = Utility::joinCanonicalHeaderNames(canonical_headers);\n  parts.emplace_back(signed_headers);\n  parts.emplace_back(content_hash);\n  return absl::StrJoin(parts, \"\\n\");\n}\n\nstd::string\nUtility::joinCanonicalHeaderNames(const std::map<std::string, std::string>& canonical_headers) {\n  return absl::StrJoin(canonical_headers, \";\", [](auto* out, const auto& pair) {\n    return absl::StrAppend(out, pair.first);\n  });\n}\n\nstatic size_t curlCallback(char* ptr, size_t, size_t nmemb, void* data) {\n  auto buf = static_cast<std::string*>(data);\n  buf->append(ptr, nmemb);\n  return nmemb;\n}\n\nabsl::optional<std::string> Utility::metadataFetcher(const std::string& host,\n                                                     const std::string& path,\n                                                     const std::string& auth_token) {\n  static const size_t MAX_RETRIES = 4;\n  static const std::chrono::milliseconds RETRY_DELAY{1000};\n  static const std::chrono::seconds TIMEOUT{5};\n\n  CURL* const curl = curl_easy_init();\n  if (!curl) {\n    return absl::nullopt;\n  };\n\n  const std::string url = fmt::format(\"http://{}/{}\", host, path);\n  curl_easy_setopt(curl, CURLOPT_URL, url.c_str());\n  curl_easy_setopt(curl, CURLOPT_TIMEOUT, TIMEOUT.count());\n  curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);\n  curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);\n\n  std::string buffer;\n  curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer);\n  curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, curlCallback);\n\n  struct curl_slist* headers = nullptr;\n  if (!auth_token.empty()) {\n    const std::string auth = fmt::format(\"Authorization: {}\", auth_token);\n    headers = curl_slist_append(headers, auth.c_str());\n    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);\n  }\n\n  for (size_t retry = 0; retry < MAX_RETRIES; retry++) {\n    const CURLcode res = curl_easy_perform(curl);\n    if (res == CURLE_OK) {\n      break;\n    }\n    ENVOY_LOG_MISC(debug, \"Could not fetch AWS metadata: {}\", curl_easy_strerror(res));\n    buffer.clear();\n    std::this_thread::sleep_for(RETRY_DELAY);\n  }\n\n  curl_easy_cleanup(curl);\n  curl_slist_free_all(headers);\n\n  return buffer.empty() ? absl::nullopt : absl::optional<std::string>(buffer);\n}\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/aws/utility.h",
    "content": "#pragma once\n\n#include \"common/http/headers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nclass Utility {\npublic:\n  /**\n   * Creates a canonicalized header map used in creating a AWS Signature V4 canonical request.\n   * See https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html\n   * @param headers a header map to canonicalize.\n   * @return a std::map of canonicalized headers to be used in building a canonical request.\n   */\n  static std::map<std::string, std::string>\n  canonicalizeHeaders(const Http::RequestHeaderMap& headers);\n\n  /**\n   * Creates an AWS Signature V4 canonical request string.\n   * See https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html\n   * @param method the HTTP request method.\n   * @param path the request path.\n   * @param canonical_headers the pre-canonicalized request headers.\n   * @param content_hash the hashed request body.\n   * @return the canonicalized request string.\n   */\n  static std::string\n  createCanonicalRequest(absl::string_view method, absl::string_view path,\n                         const std::map<std::string, std::string>& canonical_headers,\n                         absl::string_view content_hash);\n\n  /**\n   * Get the semicolon-delimited string of canonical header names.\n   * @param canonical_headers the pre-canonicalized request headers.\n   * @return the header names as a semicolon-delimited string.\n   */\n  static std::string\n  joinCanonicalHeaderNames(const std::map<std::string, std::string>& canonical_headers);\n\n  /**\n   * Fetch AWS instance or task metadata.\n   *\n   * @param host host or ip address of the metadata endpoint.\n   * @param path path of the metadata document.\n   * @auth_token authentication token to pass in the request, empty string indicates no auth.\n   * @return Metadata document or nullopt in case if unable to fetch it.\n   *\n   * @note In case of an error, function will log ENVOY_LOG_MISC(debug) message.\n   *\n   * @note This is not main loop safe method as it is blocking. It is intended to be used from the\n   * gRPC auth plugins that are able to schedule blocking plugins on a different thread.\n   */\n  static absl::optional<std::string>\n  metadataFetcher(const std::string& host, const std::string& path, const std::string& auth_token);\n};\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/crypto/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"utility_lib\",\n    srcs = [\n        \"crypto_impl.cc\",\n        \"utility_impl.cc\",\n    ],\n    hdrs = [\n        \"crypto_impl.h\",\n        \"utility_impl.h\",\n    ],\n    external_deps = [\n        \"ssl\",\n    ],\n    # Legacy test use. TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/common/config:__subpackages__\",\n        \"//test/common/crypto:__subpackages__\",\n    ],\n    security_posture = \"unknown\",\n    undocumented = True,\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/crypto:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/crypto/crypto_impl.cc",
    "content": "#include \"extensions/common/crypto/crypto_impl.h\"\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\n\nEVP_PKEY* PublicKeyObject::getEVP_PKEY() const { return pkey_.get(); }\n\nvoid PublicKeyObject::setEVP_PKEY(EVP_PKEY* pkey) { pkey_.reset(pkey); }\n\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/crypto/crypto_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/crypto/crypto.h\"\n\n#include \"openssl/base.h\"\n#include \"openssl/evp.h\"\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\n\nclass PublicKeyObject : public Envoy::Common::Crypto::CryptoObject {\npublic:\n  PublicKeyObject() = default;\n  PublicKeyObject(EVP_PKEY* pkey) : pkey_(pkey) {}\n  PublicKeyObject(const PublicKeyObject& pkey_wrapper);\n  EVP_PKEY* getEVP_PKEY() const;\n  void setEVP_PKEY(EVP_PKEY* pkey);\n\nprivate:\n  bssl::UniquePtr<EVP_PKEY> pkey_;\n};\n\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/crypto/utility_impl.cc",
    "content": "#include \"extensions/common/crypto/utility_impl.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/common/crypto/crypto_impl.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\n\nstd::vector<uint8_t> UtilityImpl::getSha256Digest(const Buffer::Instance& buffer) {\n  std::vector<uint8_t> digest(SHA256_DIGEST_LENGTH);\n  bssl::ScopedEVP_MD_CTX ctx;\n  auto rc = EVP_DigestInit(ctx.get(), EVP_sha256());\n  RELEASE_ASSERT(rc == 1, \"Failed to init digest context\");\n  for (const auto& slice : buffer.getRawSlices()) {\n    rc = EVP_DigestUpdate(ctx.get(), slice.mem_, slice.len_);\n    RELEASE_ASSERT(rc == 1, \"Failed to update digest\");\n  }\n  rc = EVP_DigestFinal(ctx.get(), digest.data(), nullptr);\n  RELEASE_ASSERT(rc == 1, \"Failed to finalize digest\");\n  return digest;\n}\n\nstd::vector<uint8_t> UtilityImpl::getSha256Hmac(const std::vector<uint8_t>& key,\n                                                absl::string_view message) {\n  std::vector<uint8_t> hmac(SHA256_DIGEST_LENGTH);\n  const auto ret =\n      HMAC(EVP_sha256(), key.data(), key.size(), reinterpret_cast<const uint8_t*>(message.data()),\n           message.size(), hmac.data(), nullptr);\n  RELEASE_ASSERT(ret != nullptr, \"Failed to create HMAC\");\n  return hmac;\n}\n\nconst VerificationOutput UtilityImpl::verifySignature(absl::string_view hash, CryptoObject& key,\n                                                      const std::vector<uint8_t>& signature,\n                                                      const std::vector<uint8_t>& text) {\n  // Step 1: initialize EVP_MD_CTX\n  bssl::ScopedEVP_MD_CTX ctx;\n\n  // Step 2: initialize EVP_MD\n  const EVP_MD* md = getHashFunction(hash);\n\n  if (md == nullptr) {\n    return {false, absl::StrCat(hash, \" is not supported.\")};\n  }\n  // Step 3: initialize EVP_DigestVerify\n  auto pkey_wrapper = Common::Crypto::Access::getTyped<Common::Crypto::PublicKeyObject>(key);\n  EVP_PKEY* pkey = pkey_wrapper->getEVP_PKEY();\n\n  if (pkey == nullptr) {\n    return {false, \"Failed to initialize digest verify.\"};\n  }\n\n  int ok = EVP_DigestVerifyInit(ctx.get(), nullptr, md, nullptr, pkey);\n  if (!ok) {\n    return {false, \"Failed to initialize digest verify.\"};\n  }\n\n  // Step 4: verify signature\n  ok = EVP_DigestVerify(ctx.get(), signature.data(), signature.size(), text.data(), text.size());\n\n  // Step 5: check result\n  if (ok == 1) {\n    return {true, \"\"};\n  }\n\n  return {false, absl::StrCat(\"Failed to verify digest. Error code: \", ok)};\n}\n\nCryptoObjectPtr UtilityImpl::importPublicKey(const std::vector<uint8_t>& key) {\n  CBS cbs({key.data(), key.size()});\n\n  return std::make_unique<PublicKeyObject>(EVP_parse_public_key(&cbs));\n}\n\nconst EVP_MD* UtilityImpl::getHashFunction(absl::string_view name) {\n  const std::string hash = absl::AsciiStrToLower(name);\n\n  // Hash algorithms set refers\n  // https://github.com/google/boringssl/blob/master/include/openssl/digest.h\n  if (hash == \"sha1\") {\n    return EVP_sha1();\n  } else if (hash == \"sha224\") {\n    return EVP_sha224();\n  } else if (hash == \"sha256\") {\n    return EVP_sha256();\n  } else if (hash == \"sha384\") {\n    return EVP_sha384();\n  } else if (hash == \"sha512\") {\n    return EVP_sha512();\n  } else {\n    return nullptr;\n  }\n}\n\n// Register the crypto utility singleton.\nstatic Crypto::ScopedUtilitySingleton* utility_ =\n    new Crypto::ScopedUtilitySingleton(std::make_unique<Crypto::UtilityImpl>());\n\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/crypto/utility_impl.h",
    "content": "#pragma once\n\n#include \"common/crypto/utility.h\"\n\n#include \"openssl/bytestring.h\"\n#include \"openssl/hmac.h\"\n#include \"openssl/sha.h\"\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\n\nclass UtilityImpl : public Envoy::Common::Crypto::Utility {\npublic:\n  std::vector<uint8_t> getSha256Digest(const Buffer::Instance& buffer) override;\n  std::vector<uint8_t> getSha256Hmac(const std::vector<uint8_t>& key,\n                                     absl::string_view message) override;\n  const VerificationOutput verifySignature(absl::string_view hash, CryptoObject& key,\n                                           const std::vector<uint8_t>& signature,\n                                           const std::vector<uint8_t>& text) override;\n  CryptoObjectPtr importPublicKey(const std::vector<uint8_t>& key) override;\n\nprivate:\n  const EVP_MD* getHashFunction(absl::string_view name);\n};\n\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"dns_cache_interface\",\n    hdrs = [\"dns_cache.h\"],\n    deps = [\n        \"//include/envoy/common:backoff_strategy_interface\",\n        \"//include/envoy/common:random_generator_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:resource_manager_interface\",\n        \"@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dns_cache_manager_impl\",\n    srcs = [\"dns_cache_manager_impl.cc\"],\n    hdrs = [\"dns_cache_manager_impl.h\"],\n    deps = [\n        \":dns_cache_impl\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dns_cache_impl\",\n    srcs = [\"dns_cache_impl.cc\"],\n    hdrs = [\"dns_cache_impl.h\"],\n    deps = [\n        \":dns_cache_interface\",\n        \":dns_cache_resource_manager\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dns_cache_resource_manager\",\n    srcs = [\"dns_cache_resource_manager.cc\"],\n    hdrs = [\"dns_cache_resource_manager.h\"],\n    deps = [\n        \":dns_cache_interface\",\n        \"//include/envoy/common:resource_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:basic_resource_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/upstream:resource_manager_lib\",\n        \"@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/dns_cache.h",
    "content": "#pragma once\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/resource_manager.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\n/**\n * A cached DNS host.\n */\nclass DnsHostInfo {\npublic:\n  virtual ~DnsHostInfo() = default;\n\n  /**\n   * Returns the host's currently resolved address. This address may change periodically due to\n   * async re-resolution.\n   */\n  virtual Network::Address::InstanceConstSharedPtr address() PURE;\n\n  /**\n   * Returns the host that was actually resolved via DNS. If port was originally specified it will\n   * be stripped from this return value.\n   */\n  virtual const std::string& resolvedHost() const PURE;\n\n  /**\n   * Returns whether the original host is an IP address.\n   */\n  virtual bool isIpAddress() const PURE;\n\n  /**\n   * Indicates that the host has been used and should not be purged depending on any configured\n   * TTL policy\n   */\n  virtual void touch() PURE;\n};\n\nusing DnsHostInfoSharedPtr = std::shared_ptr<DnsHostInfo>;\n\n#define ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(OPEN_GAUGE, REMAINING_GAUGE)                          \\\n  OPEN_GAUGE(rq_pending_open, Accumulate)                                                          \\\n  REMAINING_GAUGE(rq_pending_remaining, Accumulate)\n\nstruct DnsCacheCircuitBreakersStats {\n  ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * A resource manager of DNS Cache.\n */\nclass DnsCacheResourceManager {\npublic:\n  virtual ~DnsCacheResourceManager() = default;\n\n  /**\n   * Returns the resource limit of pending requests to DNS.\n   */\n  virtual ResourceLimit& pendingRequests() PURE;\n\n  /**\n   * Returns the reference of stats for dns cache circuit breakers.\n   */\n  virtual DnsCacheCircuitBreakersStats& stats() PURE;\n};\n\n/**\n * A cache of DNS hosts. Hosts will re-resolve their addresses or be automatically purged\n * depending on configured policy.\n */\nclass DnsCache {\npublic:\n  /**\n   * Callbacks used in the loadDnsCacheEntry() method.\n   */\n  class LoadDnsCacheEntryCallbacks {\n  public:\n    virtual ~LoadDnsCacheEntryCallbacks() = default;\n\n    /**\n     * Called when the DNS cache load is complete (or failed).\n     */\n    virtual void onLoadDnsCacheComplete() PURE;\n  };\n\n  /**\n   * Handle returned from loadDnsCacheEntry(). Destruction of the handle will cancel any future\n   * callback.\n   */\n  class LoadDnsCacheEntryHandle {\n  public:\n    virtual ~LoadDnsCacheEntryHandle() = default;\n  };\n\n  using LoadDnsCacheEntryHandlePtr = std::unique_ptr<LoadDnsCacheEntryHandle>;\n\n  /**\n   * Update callbacks that can be registered in the addUpdateCallbacks() method.\n   */\n  class UpdateCallbacks {\n  public:\n    virtual ~UpdateCallbacks() = default;\n\n    /**\n     * Called when a host has been added or has had its address updated.\n     * @param host supplies the added/updated host.\n     * @param host_info supplies the associated host info.\n     */\n    virtual void onDnsHostAddOrUpdate(const std::string& host,\n                                      const DnsHostInfoSharedPtr& host_info) PURE;\n\n    /**\n     * Called when a host has been removed.\n     * @param host supplies the removed host.\n     */\n    virtual void onDnsHostRemove(const std::string& host) PURE;\n  };\n\n  /**\n   * Handle returned from addUpdateCallbacks(). Destruction of the handle will remove the\n   * registered callbacks.\n   */\n  class AddUpdateCallbacksHandle {\n  public:\n    virtual ~AddUpdateCallbacksHandle() = default;\n  };\n\n  using AddUpdateCallbacksHandlePtr = std::unique_ptr<AddUpdateCallbacksHandle>;\n\n  virtual ~DnsCache() = default;\n\n  /**\n   * Initiate a DNS cache load.\n   * @param host supplies the host to load. Hosts are cached inclusive of port, even though the\n   *             port will be stripped during resolution. This means that 'a.b.c' and 'a.b.c:9001'\n   *             will both resolve 'a.b.c' but will generate different host entries with different\n   *             target ports.\n   * @param default_port supplies the port to use if the host does not have a port embedded in it.\n   * @param callbacks supplies the cache load callbacks to invoke if async processing is needed.\n   * @return a cache load result which includes both a status and handle. If the handle is non-null\n   *         the callbacks will be invoked at a later time, otherwise consult the status for the\n   *         reason the cache is not loading. In this case, callbacks will never be called.\n   */\n  enum class LoadDnsCacheEntryStatus {\n    // The cache entry is already loaded. Callbacks will not be called.\n    InCache,\n    // The cache entry is loading. Callbacks will be called at a later time unless cancelled.\n    Loading,\n    // The cache is full and the requested host is not in cache. Callbacks will not be called.\n    Overflow\n  };\n\n  struct LoadDnsCacheEntryResult {\n    LoadDnsCacheEntryStatus status_;\n    LoadDnsCacheEntryHandlePtr handle_;\n  };\n\n  virtual LoadDnsCacheEntryResult loadDnsCacheEntry(absl::string_view host, uint16_t default_port,\n                                                    LoadDnsCacheEntryCallbacks& callbacks) PURE;\n\n  /**\n   * Add update callbacks to the cache.\n   * @param callbacks supplies the callbacks to add.\n   * @return a handle that on destruction will de-register the callbacks.\n   */\n  virtual AddUpdateCallbacksHandlePtr addUpdateCallbacks(UpdateCallbacks& callbacks) PURE;\n\n  /**\n   * @return all hosts currently stored in the cache.\n   */\n  virtual absl::flat_hash_map<std::string, DnsHostInfoSharedPtr> hosts() PURE;\n\n  /**\n   * Check if a DNS request is allowed given resource limits.\n   * @param pending_request optional pending request resource limit. If no resource limit is\n   * provided the internal DNS cache limit is used.\n   * @return RAII handle for pending request circuit breaker if the request was allowed.\n   */\n  virtual Upstream::ResourceAutoIncDecPtr\n  canCreateDnsRequest(ResourceLimitOptRef pending_request) PURE;\n};\n\nusing DnsCacheSharedPtr = std::shared_ptr<DnsCache>;\n\n/**\n * A manager for multiple DNS caches.\n */\nclass DnsCacheManager {\npublic:\n  virtual ~DnsCacheManager() = default;\n\n  /**\n   * Get a DNS cache.\n   * @param config supplies the cache parameters. If a cache exists with the same parameters it\n   *               will be returned, otherwise a new one will be created.\n   */\n  virtual DnsCacheSharedPtr\n  getCache(const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) PURE;\n};\n\nusing DnsCacheManagerSharedPtr = std::shared_ptr<DnsCacheManager>;\n\n/**\n * Get the singleton cache manager for the entire server.\n */\nDnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& manager,\n                                         Event::Dispatcher& main_thread_dispatcher,\n                                         ThreadLocal::SlotAllocator& tls,\n                                         Random::RandomGenerator& random, Runtime::Loader& loader,\n                                         Stats::Scope& root_scope);\n\n/**\n * Factory for getting a DNS cache manager.\n */\nclass DnsCacheManagerFactory {\npublic:\n  virtual ~DnsCacheManagerFactory() = default;\n\n  /**\n   * Get a DNS cache manager.\n   */\n  virtual DnsCacheManagerSharedPtr get() PURE;\n};\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc",
    "content": "#include \"extensions/common/dynamic_forward_proxy/dns_cache_impl.h\"\n\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n\n// TODO(mattklein123): Move DNS family helpers to a smaller include.\n#include \"common/upstream/upstream_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\nDnsCacheImpl::DnsCacheImpl(\n    Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls,\n    Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope,\n    const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config)\n    : main_thread_dispatcher_(main_thread_dispatcher),\n      dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())),\n      resolver_(main_thread_dispatcher.createDnsResolver({}, config.use_tcp_for_dns_lookups())),\n      tls_slot_(tls.allocateSlot()),\n      scope_(root_scope.createScope(fmt::format(\"dns_cache.{}.\", config.name()))),\n      stats_(generateDnsCacheStats(*scope_)),\n      resource_manager_(*scope_, loader, config.name(), config.dns_cache_circuit_breaker()),\n      refresh_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_refresh_rate, 60000)),\n      failure_backoff_strategy_(\n          Config::Utility::prepareDnsRefreshStrategy<\n              envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>(\n              config, refresh_interval_.count(), random)),\n      host_ttl_(PROTOBUF_GET_MS_OR_DEFAULT(config, host_ttl, 300000)),\n      max_hosts_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_hosts, 1024)) {\n  tls_slot_->set([](Event::Dispatcher&) { return std::make_shared<ThreadLocalHostInfo>(); });\n  updateTlsHostsMap();\n}\n\nDnsCacheImpl::~DnsCacheImpl() {\n  for (const auto& primary_host : primary_hosts_) {\n    if (primary_host.second->active_query_ != nullptr) {\n      primary_host.second->active_query_->cancel();\n    }\n  }\n\n  for (auto update_callbacks : update_callbacks_) {\n    update_callbacks->cancel();\n  }\n}\n\nDnsCacheStats DnsCacheImpl::generateDnsCacheStats(Stats::Scope& scope) {\n  return {ALL_DNS_CACHE_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))};\n}\n\nDnsCacheImpl::LoadDnsCacheEntryResult\nDnsCacheImpl::loadDnsCacheEntry(absl::string_view host, uint16_t default_port,\n                                LoadDnsCacheEntryCallbacks& callbacks) {\n  ENVOY_LOG(debug, \"thread local lookup for host '{}'\", host);\n  auto& tls_host_info = tls_slot_->getTyped<ThreadLocalHostInfo>();\n  auto tls_host = tls_host_info.host_map_->find(host);\n  if (tls_host != tls_host_info.host_map_->end()) {\n    ENVOY_LOG(debug, \"thread local hit for host '{}'\", host);\n    return {LoadDnsCacheEntryStatus::InCache, nullptr};\n  } else if (tls_host_info.host_map_->size() >= max_hosts_) {\n    // Given that we do this check in thread local context, it's possible for two threads to race\n    // and potentially go slightly above the configured max hosts. This is an OK given compromise\n    // given how much simpler the implementation is.\n    ENVOY_LOG(debug, \"DNS cache overflow for host '{}'\", host);\n    stats_.host_overflow_.inc();\n    return {LoadDnsCacheEntryStatus::Overflow, nullptr};\n  } else {\n    ENVOY_LOG(debug, \"thread local miss for host '{}', posting to main thread\", host);\n    main_thread_dispatcher_.post(\n        [this, host = std::string(host), default_port]() { startCacheLoad(host, default_port); });\n    return {LoadDnsCacheEntryStatus::Loading,\n            std::make_unique<LoadDnsCacheEntryHandleImpl>(tls_host_info.pending_resolutions_, host,\n                                                          callbacks)};\n  }\n}\n\nUpstream::ResourceAutoIncDecPtr\nDnsCacheImpl::canCreateDnsRequest(ResourceLimitOptRef pending_requests) {\n  const auto has_pending_requests = pending_requests.has_value();\n  auto& current_pending_requests =\n      has_pending_requests ? pending_requests->get() : resource_manager_.pendingRequests();\n  if (!current_pending_requests.canCreate()) {\n    if (!has_pending_requests) {\n      stats_.dns_rq_pending_overflow_.inc();\n    }\n    return nullptr;\n  }\n  return std::make_unique<Upstream::ResourceAutoIncDec>(current_pending_requests);\n}\n\nabsl::flat_hash_map<std::string, DnsHostInfoSharedPtr> DnsCacheImpl::hosts() {\n  absl::flat_hash_map<std::string, DnsHostInfoSharedPtr> ret;\n  for (const auto& host : primary_hosts_) {\n    // Only include hosts that have ever resolved to an address.\n    if (host.second->host_info_->address_ != nullptr) {\n      ret.emplace(host.first, host.second->host_info_);\n    }\n  }\n  return ret;\n}\n\nDnsCacheImpl::AddUpdateCallbacksHandlePtr\nDnsCacheImpl::addUpdateCallbacks(UpdateCallbacks& callbacks) {\n  return std::make_unique<AddUpdateCallbacksHandleImpl>(update_callbacks_, callbacks);\n}\n\nvoid DnsCacheImpl::startCacheLoad(const std::string& host, uint16_t default_port) {\n  // It's possible for multiple requests to race trying to start a resolution. If a host is\n  // already in the map it's either in the process of being resolved or the resolution is already\n  // heading out to the worker threads. Either way the pending resolution will be completed.\n  const auto primary_host_it = primary_hosts_.find(host);\n  if (primary_host_it != primary_hosts_.end()) {\n    ENVOY_LOG(debug, \"main thread resolve for host '{}' skipped. Entry present\", host);\n    return;\n  }\n\n  const auto host_attributes = Http::Utility::parseAuthority(host);\n\n  // TODO(mattklein123): Right now, the same host with different ports will become two\n  // independent primary hosts with independent DNS resolutions. I'm not sure how much this will\n  // matter, but we could consider collapsing these down and sharing the underlying DNS resolution.\n  auto& primary_host = *primary_hosts_\n                            // try_emplace() is used here for direct argument forwarding.\n                            .try_emplace(host, std::make_unique<PrimaryHostInfo>(\n                                                   *this, std::string(host_attributes.host_),\n                                                   host_attributes.port_.value_or(default_port),\n                                                   host_attributes.is_ip_address_,\n                                                   [this, host]() { onReResolve(host); }))\n                            .first->second;\n  startResolve(host, primary_host);\n}\n\nvoid DnsCacheImpl::onReResolve(const std::string& host) {\n  const auto primary_host_it = primary_hosts_.find(host);\n  ASSERT(primary_host_it != primary_hosts_.end());\n\n  const std::chrono::steady_clock::duration now_duration =\n      main_thread_dispatcher_.timeSource().monotonicTime().time_since_epoch();\n  ENVOY_LOG(debug, \"host='{}' TTL check: now={} last_used={}\", primary_host_it->first,\n            now_duration.count(),\n            primary_host_it->second->host_info_->last_used_time_.load().count());\n  if (now_duration - primary_host_it->second->host_info_->last_used_time_.load() > host_ttl_) {\n    ENVOY_LOG(debug, \"host='{}' TTL expired, removing\", host);\n    // If the host has no address then that means that the DnsCacheImpl has never\n    // runAddUpdateCallbacks for this host, and thus the callback targets are not aware of it.\n    // Therefore, runRemoveCallbacks should only be ran if the host's address != nullptr.\n    if (primary_host_it->second->host_info_->address_) {\n      runRemoveCallbacks(host);\n    }\n    primary_hosts_.erase(primary_host_it);\n    updateTlsHostsMap();\n  } else {\n    startResolve(host, *primary_host_it->second);\n  }\n}\n\nvoid DnsCacheImpl::startResolve(const std::string& host, PrimaryHostInfo& host_info) {\n  ENVOY_LOG(debug, \"starting main thread resolve for host='{}' dns='{}' port='{}'\", host,\n            host_info.host_info_->resolved_host_, host_info.port_);\n  ASSERT(host_info.active_query_ == nullptr);\n\n  stats_.dns_query_attempt_.inc();\n  host_info.active_query_ =\n      resolver_->resolve(host_info.host_info_->resolved_host_, dns_lookup_family_,\n                         [this, host](Network::DnsResolver::ResolutionStatus status,\n                                      std::list<Network::DnsResponse>&& response) {\n                           finishResolve(host, status, std::move(response));\n                         });\n}\n\nvoid DnsCacheImpl::finishResolve(const std::string& host,\n                                 Network::DnsResolver::ResolutionStatus status,\n                                 std::list<Network::DnsResponse>&& response) {\n  ENVOY_LOG(debug, \"main thread resolve complete for host '{}'. {} results\", host, response.size());\n  const auto primary_host_it = primary_hosts_.find(host);\n  ASSERT(primary_host_it != primary_hosts_.end());\n\n  auto& primary_host_info = *primary_host_it->second;\n  primary_host_info.active_query_ = nullptr;\n  const bool first_resolve = !primary_host_info.host_info_->first_resolve_complete_;\n  primary_host_info.host_info_->first_resolve_complete_ = true;\n\n  // If the DNS resolver successfully resolved with an empty response list, the dns cache does not\n  // update. This ensures that a potentially previously resolved address does not stabilize back to\n  // 0 hosts.\n  const auto new_address = !response.empty()\n                               ? Network::Utility::getAddressWithPort(*(response.front().address_),\n                                                                      primary_host_info.port_)\n                               : nullptr;\n\n  if (status == Network::DnsResolver::ResolutionStatus::Failure) {\n    stats_.dns_query_failure_.inc();\n  } else {\n    stats_.dns_query_success_.inc();\n  }\n\n  // Only the change the address if:\n  // 1) The new address is valid &&\n  // 2a) The host doesn't yet have an address ||\n  // 2b) The host has a changed address.\n  //\n  // This means that once a host gets an address it will stick even in the case of a subsequent\n  // resolution failure.\n  bool address_changed = false;\n  if (new_address != nullptr && (primary_host_info.host_info_->address_ == nullptr ||\n                                 *primary_host_info.host_info_->address_ != *new_address)) {\n    ENVOY_LOG(debug, \"host '{}' address has changed\", host);\n    primary_host_info.host_info_->address_ = new_address;\n    runAddUpdateCallbacks(host, primary_host_info.host_info_);\n    address_changed = true;\n    stats_.host_address_changed_.inc();\n  }\n\n  if (first_resolve || address_changed) {\n    updateTlsHostsMap();\n  }\n\n  // Kick off the refresh timer.\n  // TODO(mattklein123): Consider jitter here. It may not be necessary since the initial host\n  // is populated dynamically.\n  if (status == Network::DnsResolver::ResolutionStatus::Success) {\n    failure_backoff_strategy_->reset();\n    primary_host_info.refresh_timer_->enableTimer(refresh_interval_);\n    ENVOY_LOG(debug, \"DNS refresh rate reset for host '{}', refresh rate {} ms\", host,\n              refresh_interval_.count());\n  } else {\n    const uint64_t refresh_interval = failure_backoff_strategy_->nextBackOffMs();\n    primary_host_info.refresh_timer_->enableTimer(std::chrono::milliseconds(refresh_interval));\n    ENVOY_LOG(debug, \"DNS refresh rate reset for host '{}', (failure) refresh rate {} ms\", host,\n              refresh_interval);\n  }\n}\n\nvoid DnsCacheImpl::runAddUpdateCallbacks(const std::string& host,\n                                         const DnsHostInfoSharedPtr& host_info) {\n  for (auto callbacks : update_callbacks_) {\n    callbacks->callbacks_.onDnsHostAddOrUpdate(host, host_info);\n  }\n}\n\nvoid DnsCacheImpl::runRemoveCallbacks(const std::string& host) {\n  for (auto callbacks : update_callbacks_) {\n    callbacks->callbacks_.onDnsHostRemove(host);\n  }\n}\n\nvoid DnsCacheImpl::updateTlsHostsMap() {\n  TlsHostMapSharedPtr new_host_map = std::make_shared<TlsHostMap>();\n  for (const auto& primary_host : primary_hosts_) {\n    // Do not include hosts that have not resolved at least once.\n    if (primary_host.second->host_info_->first_resolve_complete_) {\n      new_host_map->emplace(primary_host.first, primary_host.second->host_info_);\n    }\n  }\n\n  tls_slot_->runOnAllThreads([new_host_map](ThreadLocal::ThreadLocalObjectSharedPtr object)\n                                 -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    object->asType<ThreadLocalHostInfo>().updateHostMap(new_host_map);\n    return object;\n  });\n}\n\nDnsCacheImpl::ThreadLocalHostInfo::~ThreadLocalHostInfo() {\n  // Make sure we cancel any handles that still exist.\n  for (auto pending_resolution : pending_resolutions_) {\n    pending_resolution->cancel();\n  }\n}\n\nvoid DnsCacheImpl::ThreadLocalHostInfo::updateHostMap(const TlsHostMapSharedPtr& new_host_map) {\n  host_map_ = new_host_map;\n  for (auto pending_resolution_it = pending_resolutions_.begin();\n       pending_resolution_it != pending_resolutions_.end();) {\n    auto& pending_resolution = **pending_resolution_it;\n    if (host_map_->count(pending_resolution.host_) != 0) {\n      auto& callbacks = pending_resolution.callbacks_;\n      pending_resolution.cancel();\n      pending_resolution_it = pending_resolutions_.erase(pending_resolution_it);\n      callbacks.onLoadDnsCacheComplete();\n    } else {\n      ++pending_resolution_it;\n    }\n  }\n}\n\nDnsCacheImpl::PrimaryHostInfo::PrimaryHostInfo(DnsCacheImpl& parent,\n                                               absl::string_view host_to_resolve, uint16_t port,\n                                               bool is_ip_address, const Event::TimerCb& timer_cb)\n    : parent_(parent), port_(port),\n      refresh_timer_(parent.main_thread_dispatcher_.createTimer(timer_cb)),\n      host_info_(std::make_shared<DnsHostInfoImpl>(parent.main_thread_dispatcher_.timeSource(),\n                                                   host_to_resolve, is_ip_address)) {\n  parent_.stats_.host_added_.inc();\n  parent_.stats_.num_hosts_.inc();\n}\n\nDnsCacheImpl::PrimaryHostInfo::~PrimaryHostInfo() {\n  parent_.stats_.host_removed_.inc();\n  parent_.stats_.num_hosts_.dec();\n}\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/backoff_strategy.h\"\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/cleanup.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache.h\"\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\n/**\n * All DNS cache stats. @see stats_macros.h\n */\n#define ALL_DNS_CACHE_STATS(COUNTER, GAUGE)                                                        \\\n  COUNTER(dns_query_attempt)                                                                       \\\n  COUNTER(dns_query_failure)                                                                       \\\n  COUNTER(dns_query_success)                                                                       \\\n  COUNTER(host_added)                                                                              \\\n  COUNTER(host_address_changed)                                                                    \\\n  COUNTER(host_overflow)                                                                           \\\n  COUNTER(host_removed)                                                                            \\\n  COUNTER(dns_rq_pending_overflow)                                                                 \\\n  GAUGE(num_hosts, NeverImport)\n\n/**\n * Struct definition for all DNS cache stats. @see stats_macros.h\n */\nstruct DnsCacheStats {\n  ALL_DNS_CACHE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\nclass DnsCacheImpl : public DnsCache, Logger::Loggable<Logger::Id::forward_proxy> {\npublic:\n  DnsCacheImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls,\n               Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope,\n               const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config);\n  ~DnsCacheImpl() override;\n  static DnsCacheStats generateDnsCacheStats(Stats::Scope& scope);\n\n  // DnsCache\n  LoadDnsCacheEntryResult loadDnsCacheEntry(absl::string_view host, uint16_t default_port,\n                                            LoadDnsCacheEntryCallbacks& callbacks) override;\n  AddUpdateCallbacksHandlePtr addUpdateCallbacks(UpdateCallbacks& callbacks) override;\n  absl::flat_hash_map<std::string, DnsHostInfoSharedPtr> hosts() override;\n  Upstream::ResourceAutoIncDecPtr\n  canCreateDnsRequest(ResourceLimitOptRef pending_requests) override;\n\nprivate:\n  using TlsHostMap = absl::flat_hash_map<std::string, DnsHostInfoSharedPtr>;\n  using TlsHostMapSharedPtr = std::shared_ptr<TlsHostMap>;\n\n  struct LoadDnsCacheEntryHandleImpl : public LoadDnsCacheEntryHandle,\n                                       RaiiListElement<LoadDnsCacheEntryHandleImpl*> {\n    LoadDnsCacheEntryHandleImpl(std::list<LoadDnsCacheEntryHandleImpl*>& parent,\n                                absl::string_view host, LoadDnsCacheEntryCallbacks& callbacks)\n        : RaiiListElement<LoadDnsCacheEntryHandleImpl*>(parent, this), host_(host),\n          callbacks_(callbacks) {}\n\n    const std::string host_;\n    LoadDnsCacheEntryCallbacks& callbacks_;\n  };\n\n  // Per-thread DNS cache info including the currently known hosts as well as any pending callbacks.\n  struct ThreadLocalHostInfo : public ThreadLocal::ThreadLocalObject {\n    ~ThreadLocalHostInfo() override;\n    void updateHostMap(const TlsHostMapSharedPtr& new_host_map);\n\n    TlsHostMapSharedPtr host_map_;\n    std::list<LoadDnsCacheEntryHandleImpl*> pending_resolutions_;\n  };\n\n  struct DnsHostInfoImpl : public DnsHostInfo {\n    DnsHostInfoImpl(TimeSource& time_source, absl::string_view resolved_host, bool is_ip_address)\n        : time_source_(time_source), resolved_host_(resolved_host), is_ip_address_(is_ip_address) {\n      touch();\n    }\n\n    // DnsHostInfo\n    Network::Address::InstanceConstSharedPtr address() override { return address_; }\n    const std::string& resolvedHost() const override { return resolved_host_; }\n    bool isIpAddress() const override { return is_ip_address_; }\n    void touch() final { last_used_time_ = time_source_.monotonicTime().time_since_epoch(); }\n\n    TimeSource& time_source_;\n    const std::string resolved_host_;\n    const bool is_ip_address_;\n    bool first_resolve_complete_{};\n    Network::Address::InstanceConstSharedPtr address_;\n    // Using std::chrono::steady_clock::duration is required for compilation within an atomic vs.\n    // using MonotonicTime.\n    std::atomic<std::chrono::steady_clock::duration> last_used_time_;\n  };\n\n  using DnsHostInfoImplSharedPtr = std::shared_ptr<DnsHostInfoImpl>;\n\n  // Primary host information that accounts for TTL, re-resolution, etc.\n  struct PrimaryHostInfo {\n    PrimaryHostInfo(DnsCacheImpl& parent, absl::string_view host_to_resolve, uint16_t port,\n                    bool is_ip_address, const Event::TimerCb& timer_cb);\n    ~PrimaryHostInfo();\n\n    DnsCacheImpl& parent_;\n    const uint16_t port_;\n    const Event::TimerPtr refresh_timer_;\n    const DnsHostInfoImplSharedPtr host_info_;\n    Network::ActiveDnsQuery* active_query_{};\n  };\n\n  using PrimaryHostInfoPtr = std::unique_ptr<PrimaryHostInfo>;\n\n  struct AddUpdateCallbacksHandleImpl : public AddUpdateCallbacksHandle,\n                                        RaiiListElement<AddUpdateCallbacksHandleImpl*> {\n    AddUpdateCallbacksHandleImpl(std::list<AddUpdateCallbacksHandleImpl*>& parent,\n                                 UpdateCallbacks& callbacks)\n        : RaiiListElement<AddUpdateCallbacksHandleImpl*>(parent, this), callbacks_(callbacks) {}\n\n    UpdateCallbacks& callbacks_;\n  };\n\n  void startCacheLoad(const std::string& host, uint16_t default_port);\n  void startResolve(const std::string& host, PrimaryHostInfo& host_info);\n  void finishResolve(const std::string& host, Network::DnsResolver::ResolutionStatus status,\n                     std::list<Network::DnsResponse>&& response);\n  void runAddUpdateCallbacks(const std::string& host, const DnsHostInfoSharedPtr& host_info);\n  void runRemoveCallbacks(const std::string& host);\n  void updateTlsHostsMap();\n  void onReResolve(const std::string& host);\n\n  Event::Dispatcher& main_thread_dispatcher_;\n  const Network::DnsLookupFamily dns_lookup_family_;\n  const Network::DnsResolverSharedPtr resolver_;\n  const ThreadLocal::SlotPtr tls_slot_;\n  Stats::ScopePtr scope_;\n  DnsCacheStats stats_;\n  std::list<AddUpdateCallbacksHandleImpl*> update_callbacks_;\n  absl::flat_hash_map<std::string, PrimaryHostInfoPtr> primary_hosts_;\n  DnsCacheResourceManagerImpl resource_manager_;\n  const std::chrono::milliseconds refresh_interval_;\n  const BackOffStrategyPtr failure_backoff_strategy_;\n  const std::chrono::milliseconds host_ttl_;\n  const uint32_t max_hosts_;\n};\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc",
    "content": "#include \"extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h\"\n\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_impl.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\nSINGLETON_MANAGER_REGISTRATION(dns_cache_manager);\n\nDnsCacheSharedPtr DnsCacheManagerImpl::getCache(\n    const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) {\n  const auto& existing_cache = caches_.find(config.name());\n  if (existing_cache != caches_.end()) {\n    if (!Protobuf::util::MessageDifferencer::Equivalent(config, existing_cache->second.config_)) {\n      throw EnvoyException(\n          fmt::format(\"config specified DNS cache '{}' with different settings\", config.name()));\n    }\n\n    return existing_cache->second.cache_;\n  }\n\n  DnsCacheSharedPtr new_cache = std::make_shared<DnsCacheImpl>(\n      main_thread_dispatcher_, tls_, random_, loader_, root_scope_, config);\n  caches_.emplace(config.name(), ActiveCache{config, new_cache});\n  return new_cache;\n}\n\nDnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& singleton_manager,\n                                         Event::Dispatcher& main_thread_dispatcher,\n                                         ThreadLocal::SlotAllocator& tls,\n                                         Random::RandomGenerator& random, Runtime::Loader& loader,\n                                         Stats::Scope& root_scope) {\n  return singleton_manager.getTyped<DnsCacheManager>(\n      SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager),\n      [&main_thread_dispatcher, &tls, &random, &loader, &root_scope] {\n        return std::make_shared<DnsCacheManagerImpl>(main_thread_dispatcher, tls, random, loader,\n                                                     root_scope);\n      });\n}\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\nclass DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance {\npublic:\n  DnsCacheManagerImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls,\n                      Random::RandomGenerator& random, Runtime::Loader& loader,\n                      Stats::Scope& root_scope)\n      : main_thread_dispatcher_(main_thread_dispatcher), tls_(tls), random_(random),\n        loader_(loader), root_scope_(root_scope) {}\n\n  // DnsCacheManager\n  DnsCacheSharedPtr getCache(\n      const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) override;\n\nprivate:\n  struct ActiveCache {\n    ActiveCache(const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config,\n                DnsCacheSharedPtr cache)\n        : config_(config), cache_(cache) {}\n\n    const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config_;\n    DnsCacheSharedPtr cache_;\n  };\n\n  Event::Dispatcher& main_thread_dispatcher_;\n  ThreadLocal::SlotAllocator& tls_;\n  Random::RandomGenerator& random_;\n  Runtime::Loader& loader_;\n  Stats::Scope& root_scope_;\n  absl::flat_hash_map<std::string, ActiveCache> caches_;\n};\n\nclass DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory {\npublic:\n  DnsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, Event::Dispatcher& dispatcher,\n                             ThreadLocal::SlotAllocator& tls, Random::RandomGenerator& random,\n                             Runtime::Loader& loader, Stats::Scope& root_scope)\n      : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), random_(random),\n        loader_(loader), root_scope_(root_scope) {}\n\n  DnsCacheManagerSharedPtr get() override {\n    return getCacheManager(singleton_manager_, dispatcher_, tls_, random_, loader_, root_scope_);\n  }\n\nprivate:\n  Singleton::Manager& singleton_manager_;\n  Event::Dispatcher& dispatcher_;\n  ThreadLocal::SlotAllocator& tls_;\n  Random::RandomGenerator& random_;\n  Runtime::Loader& loader_;\n  Stats::Scope& root_scope_;\n};\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc",
    "content": "#include \"extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\nDnsCacheResourceManagerImpl::DnsCacheResourceManagerImpl(\n    Stats::Scope& scope, Runtime::Loader& loader, const std::string& config_name,\n    const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers& cb_config)\n    : cb_stats_(generateDnsCacheCircuitBreakersStats(scope)),\n      pending_requests_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(cb_config, max_pending_requests, 1024),\n                        loader, fmt::format(\"dns_cache.{}.circuit_breakers\", config_name),\n                        cb_stats_.rq_pending_open_, cb_stats_.rq_pending_remaining_) {}\n\nDnsCacheCircuitBreakersStats\nDnsCacheResourceManagerImpl::generateDnsCacheCircuitBreakersStats(Stats::Scope& scope) {\n  std::string stat_prefix = \"circuit_breakers\";\n  return {ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, stat_prefix),\n                                               POOL_GAUGE_PREFIX(scope, stat_prefix))};\n}\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/upstream/resource_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/basic_resource_impl.h\"\n#include \"common/upstream/resource_manager_impl.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\nclass DnsCacheResourceManagerImpl : public DnsCacheResourceManager {\npublic:\n  DnsCacheResourceManagerImpl(\n      Stats::Scope& scope, Runtime::Loader& loader, const std::string& config_name,\n      const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers&\n          cb_config);\n\n  static DnsCacheCircuitBreakersStats generateDnsCacheCircuitBreakersStats(Stats::Scope& scope);\n  // Envoy::Upstream::DnsCacheResourceManager\n  ResourceLimit& pendingRequests() override { return pending_requests_; }\n  DnsCacheCircuitBreakersStats& stats() override { return cb_stats_; }\n\nprivate:\n  DnsCacheCircuitBreakersStats cb_stats_;\n  Upstream::ManagedResourceImpl pending_requests_;\n};\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/common/matcher/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"matcher_lib\",\n    srcs = [\"matcher.cc\"],\n    hdrs = [\"matcher.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"@envoy_api//envoy/config/common/matcher/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/matcher/matcher.cc",
    "content": "#include \"extensions/common/matcher/matcher.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Matcher {\n\nvoid buildMatcher(const envoy::config::common::matcher::v3::MatchPredicate& match_config,\n                  std::vector<MatcherPtr>& matchers) {\n  // In order to store indexes and build our matcher tree inline, we must reserve a slot where\n  // the matcher we are about to create will go. This allows us to know its future index and still\n  // construct more of the tree in each called constructor (e.g., multiple OR/AND conditions).\n  // Once fully constructed, we move the matcher into its position below. See the matcher\n  // overview in matcher.h for more information.\n  matchers.emplace_back(nullptr);\n\n  MatcherPtr new_matcher;\n  switch (match_config.rule_case()) {\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kOrMatch:\n    new_matcher = std::make_unique<SetLogicMatcher>(match_config.or_match(), matchers,\n                                                    SetLogicMatcher::Type::Or);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kAndMatch:\n    new_matcher = std::make_unique<SetLogicMatcher>(match_config.and_match(), matchers,\n                                                    SetLogicMatcher::Type::And);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kNotMatch:\n    new_matcher = std::make_unique<NotMatcher>(match_config.not_match(), matchers);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kAnyMatch:\n    new_matcher = std::make_unique<AnyMatcher>(matchers);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestHeadersMatch:\n    new_matcher = std::make_unique<HttpRequestHeadersMatcher>(\n        match_config.http_request_headers_match(), matchers);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestTrailersMatch:\n    new_matcher = std::make_unique<HttpRequestTrailersMatcher>(\n        match_config.http_request_trailers_match(), matchers);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseHeadersMatch:\n    new_matcher = std::make_unique<HttpResponseHeadersMatcher>(\n        match_config.http_response_headers_match(), matchers);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseTrailersMatch:\n    new_matcher = std::make_unique<HttpResponseTrailersMatcher>(\n        match_config.http_response_trailers_match(), matchers);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestGenericBodyMatch:\n    new_matcher = std::make_unique<HttpRequestGenericBodyMatcher>(\n        match_config.http_request_generic_body_match(), matchers);\n    break;\n  case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseGenericBodyMatch:\n    new_matcher = std::make_unique<HttpResponseGenericBodyMatcher>(\n        match_config.http_response_generic_body_match(), matchers);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  // Per above, move the matcher into its position.\n  matchers[new_matcher->index()] = std::move(new_matcher);\n}\n\nSetLogicMatcher::SetLogicMatcher(\n    const envoy::config::common::matcher::v3::MatchPredicate::MatchSet& configs,\n    std::vector<MatcherPtr>& matchers, Type type)\n    : LogicMatcherBase(matchers), matchers_(matchers), type_(type) {\n  for (const auto& config : configs.rules()) {\n    indexes_.push_back(matchers_.size());\n    buildMatcher(config, matchers_);\n  }\n}\n\nvoid SetLogicMatcher::updateLocalStatus(MatchStatusVector& statuses,\n                                        const UpdateFunctor& functor) const {\n  if (!statuses[my_index_].might_change_status_) {\n    return;\n  }\n\n  for (size_t index : indexes_) {\n    functor(*matchers_[index], statuses);\n  }\n\n  auto predicate = [&statuses](size_t index) { return statuses[index].matches_; };\n  if (type_ == Type::And) {\n    statuses[my_index_].matches_ = std::all_of(indexes_.begin(), indexes_.end(), predicate);\n  } else {\n    ASSERT(type_ == Type::Or);\n    statuses[my_index_].matches_ = std::any_of(indexes_.begin(), indexes_.end(), predicate);\n  }\n\n  // TODO(mattklein123): We can potentially short circuit this even further if we git a single false\n  // in an AND set or a single true in an OR set.\n  statuses[my_index_].might_change_status_ =\n      std::any_of(indexes_.begin(), indexes_.end(),\n                  [&statuses](size_t index) { return statuses[index].might_change_status_; });\n}\n\nNotMatcher::NotMatcher(const envoy::config::common::matcher::v3::MatchPredicate& config,\n                       std::vector<MatcherPtr>& matchers)\n    : LogicMatcherBase(matchers), matchers_(matchers), not_index_(matchers.size()) {\n  buildMatcher(config, matchers);\n}\n\nvoid NotMatcher::updateLocalStatus(MatchStatusVector& statuses,\n                                   const UpdateFunctor& functor) const {\n  if (!statuses[my_index_].might_change_status_) {\n    return;\n  }\n\n  functor(*matchers_[not_index_], statuses);\n  statuses[my_index_].matches_ = !statuses[not_index_].matches_;\n  statuses[my_index_].might_change_status_ = statuses[not_index_].might_change_status_;\n}\n\nHttpHeaderMatcherBase::HttpHeaderMatcherBase(\n    const envoy::config::common::matcher::v3::HttpHeadersMatch& config,\n    const std::vector<MatcherPtr>& matchers)\n    : SimpleMatcher(matchers),\n      headers_to_match_(Http::HeaderUtility::buildHeaderDataVector(config.headers())) {}\n\nvoid HttpHeaderMatcherBase::matchHeaders(const Http::HeaderMap& headers,\n                                         MatchStatusVector& statuses) const {\n  ASSERT(statuses[my_index_].might_change_status_);\n  statuses[my_index_].matches_ = Http::HeaderUtility::matchHeaders(headers, headers_to_match_);\n  statuses[my_index_].might_change_status_ = false;\n}\n\n// HttpGenericBodyMatcher\n// Scans the HTTP body and looks for patterns.\n// HTTP body may be passed to the matcher in chunks. The search logic buffers\n// only as many bytes as is the length of the longest pattern to be found.\nHttpGenericBodyMatcher::HttpGenericBodyMatcher(\n    const envoy::config::common::matcher::v3::HttpGenericBodyMatch& config,\n    const std::vector<MatcherPtr>& matchers)\n    : HttpBodyMatcherBase(matchers) {\n  patterns_ = std::make_shared<std::vector<std::string>>();\n  for (const auto& i : config.patterns()) {\n    switch (i.rule_case()) {\n    // For binary match 'i' contains sequence of bytes to locate in the body.\n    case envoy::config::common::matcher::v3::HttpGenericBodyMatch::GenericTextMatch::kBinaryMatch: {\n      patterns_->push_back(i.binary_match());\n    } break;\n    // For string match 'i' contains exact string to locate in the body.\n    case envoy::config::common::matcher::v3::HttpGenericBodyMatch::GenericTextMatch::kStringMatch:\n      patterns_->push_back(i.string_match());\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n    // overlap_size_ indicates how many bytes from previous data chunk(s) are buffered.\n    overlap_size_ = std::max(overlap_size_, patterns_->back().length() - 1);\n  }\n  limit_ = config.bytes_limit();\n}\n\nvoid HttpGenericBodyMatcher::onBody(const Buffer::Instance& data, MatchStatusVector& statuses) {\n  // Get the context associated with this stream.\n  HttpGenericBodyMatcherCtx* ctx =\n      static_cast<HttpGenericBodyMatcherCtx*>(statuses[my_index_].ctx_.get());\n\n  if (statuses[my_index_].might_change_status_ == false) {\n    // End of search limit has been already reached or all patterns have been found.\n    // Status is not going to change.\n    ASSERT(((0 != limit_) && (limit_ == ctx->processed_bytes_)) || (ctx->patterns_index_.empty()));\n    return;\n  }\n\n  // Iterate through all patterns to be found and check if they are located across body\n  // chunks: part of the pattern was in previous body chunk and remaining of the pattern\n  // is in the current body chunk on in the current body chunk.\n  bool resize_required = false;\n  auto body_search_limit = limit_ - ctx->processed_bytes_;\n  auto it = ctx->patterns_index_.begin();\n  while (it != ctx->patterns_index_.end()) {\n    const auto& pattern = patterns_->at(*it);\n    if ((!ctx->overlap_.empty() && (locatePatternAcrossChunks(pattern, data, ctx))) ||\n        (-1 != data.search(static_cast<const void*>(pattern.data()), pattern.length(), 0,\n                           body_search_limit))) {\n      // Pattern found. Remove it from the list of patterns to be found.\n      // If the longest pattern has been found, resize of overlap buffer may be\n      // required.\n      resize_required = resize_required || (ctx->capacity_ == (pattern.length() - 1));\n      it = ctx->patterns_index_.erase(it);\n    } else {\n      it++;\n    }\n  }\n\n  if (ctx->patterns_index_.empty()) {\n    // All patterns were found.\n    statuses[my_index_].matches_ = true;\n    statuses[my_index_].might_change_status_ = false;\n    return;\n  }\n\n  // Check if next body chunks should be searched for patterns. If the search limit\n  // ends on the current body chunk, there is no need to check next chunks.\n  if (0 != limit_) {\n    ctx->processed_bytes_ = std::min(uint64_t(limit_), ctx->processed_bytes_ + data.length());\n    if (limit_ == ctx->processed_bytes_) {\n      // End of search limit has been reached and not all patterns have been found.\n      statuses[my_index_].matches_ = false;\n      statuses[my_index_].might_change_status_ = false;\n      return;\n    }\n  }\n\n  // If longest pattern has been located, there is possibility that overlap_\n  // buffer size may be reduced.\n  if (resize_required) {\n    resizeOverlapBuffer(ctx);\n  }\n\n  bufferLastBytes(data, ctx);\n}\n\n// Here we handle a situation when a pattern is spread across multiple body buffers.\n// overlap_ stores number of bytes from previous body chunks equal to longest pattern yet to be\n// found minus one byte (-1). The logic below tries to find the beginning of the pattern in\n// overlap_ buffer and the pattern should continue at the beginning of the next buffer.\nbool HttpGenericBodyMatcher::locatePatternAcrossChunks(const std::string& pattern,\n                                                       const Buffer::Instance& data,\n                                                       const HttpGenericBodyMatcherCtx* ctx) {\n  // Take the first character from the pattern and locate it in overlap_.\n  auto pattern_index = 0;\n  // Start position in overlap_. overlap_ size was calculated based on the longest pattern to be\n  // found, but search for shorter patterns may start from some offset, not the beginning of the\n  // buffer.\n  size_t start_index = (ctx->overlap_.size() > (pattern.size() - 1))\n                           ? ctx->overlap_.size() - (pattern.size() - 1)\n                           : 0;\n  auto match_iter = std::find(std::begin(ctx->overlap_) + start_index, std::end(ctx->overlap_),\n                              pattern.at(pattern_index));\n\n  if (match_iter == std::end(ctx->overlap_)) {\n    return false;\n  }\n\n  // Continue checking characters until end of overlap_ buffer.\n  while (match_iter != std::end(ctx->overlap_)) {\n    if (pattern[pattern_index] != *match_iter) {\n      return false;\n    }\n    pattern_index++;\n    match_iter++;\n  }\n\n  // Now check if the remaining of the pattern matches the beginning of the body\n  // buffer.i Do it only if there is sufficient number of bytes in the data buffer.\n  auto pattern_remainder = pattern.substr(pattern_index);\n  if ((0 != limit_) && (pattern_remainder.length() > (limit_ - ctx->processed_bytes_))) {\n    // Even if we got match it would be outside the search limit\n    return false;\n  }\n  return ((pattern_remainder.length() <= data.length()) && data.startsWith(pattern_remainder));\n}\n\n// Method buffers last bytes from the currently processed body in overlap_.\n// This is required to find patterns which spans across multiple body chunks.\nvoid HttpGenericBodyMatcher::bufferLastBytes(const Buffer::Instance& data,\n                                             HttpGenericBodyMatcherCtx* ctx) {\n  // The matcher buffers the last seen X bytes where X is equal to the length of the\n  // longest pattern - 1. With the arrival of the new 'data' the following situations\n  // are possible:\n  // 1. The new data's length is larger or equal to X. In this case just copy last X bytes\n  // from the data to overlap_ buffer.\n  // 2. The new data length is smaller than X and there is enough room in overlap buffer to just\n  // copy the bytes from data.\n  // 3. The new data length is smaller than X and there is not enough room in overlap buffer.\n  if (data.length() >= ctx->capacity_) {\n    // Case 1:\n    // Just overwrite the entire overlap_ buffer with new data.\n    ctx->overlap_.resize(ctx->capacity_);\n    data.copyOut(data.length() - ctx->capacity_, ctx->capacity_, ctx->overlap_.data());\n  } else {\n    if (data.length() <= (ctx->capacity_ - ctx->overlap_.size())) {\n      // Case 2. Just add the new data on top of already buffered.\n      const auto size = ctx->overlap_.size();\n      ctx->overlap_.resize(ctx->overlap_.size() + data.length());\n      data.copyOut(0, data.length(), ctx->overlap_.data() + size);\n    } else {\n      // Case 3. First shift data to make room for new data and then copy\n      // entire new buffer.\n      const size_t shift = ctx->overlap_.size() - (ctx->capacity_ - data.length());\n      for (size_t i = 0; i < (ctx->overlap_.size() - shift); i++) {\n        ctx->overlap_[i] = ctx->overlap_[i + shift];\n      }\n      const auto size = ctx->overlap_.size();\n      ctx->overlap_.resize(ctx->capacity_);\n      data.copyOut(0, data.length(), ctx->overlap_.data() + (size - shift));\n    }\n  }\n}\n\n// Method takes list of indexes of patterns not yet located in the http body and returns the\n// length of the longest pattern.\n// This is used by matcher to buffer as minimum bytes as possible.\nsize_t HttpGenericBodyMatcher::calcLongestPatternSize(const std::list<uint32_t>& indexes) const {\n  ASSERT(!indexes.empty());\n  size_t max_len = 0;\n  for (const auto& i : indexes) {\n    max_len = std::max(max_len, patterns_->at(i).length());\n  }\n  return max_len;\n}\n\n// Method checks if it is possible to reduce the size of overlap_ buffer.\nvoid HttpGenericBodyMatcher::resizeOverlapBuffer(HttpGenericBodyMatcherCtx* ctx) {\n  // Check if we need to resize overlap_ buffer. Since it was initialized to size of the longest\n  // pattern, it will be shrunk only and memory allocations do not happen.\n  // Depending on how many bytes were already in the buffer, shift may be required if\n  // the new size is smaller than number of already buffered bytes.\n  const size_t max_len = calcLongestPatternSize(ctx->patterns_index_);\n  if (ctx->capacity_ != (max_len - 1)) {\n    const size_t new_size = max_len - 1;\n    const size_t shift = (ctx->overlap_.size() > new_size) ? (ctx->overlap_.size() - new_size) : 0;\n    // Copy the last new_size bytes to the beginning of the buffer.\n    for (size_t i = 0; (i < new_size) && (shift > 0); i++) {\n      ctx->overlap_[i] = ctx->overlap_[i + shift];\n    }\n    ctx->capacity_ = new_size;\n    if (shift > 0) {\n      ctx->overlap_.resize(new_size);\n    }\n  }\n}\n\n} // namespace Matcher\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/matcher/matcher.h",
    "content": "#pragma once\n\n#include \"envoy/config/common/matcher/v3/matcher.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Matcher {\n\nclass Matcher;\nusing MatcherPtr = std::unique_ptr<Matcher>;\n\n/**\n * Base class for context used by individual matchers.\n * The context may be required by matchers which are called multiple times\n * and need to carry state between the calls. For example body matchers may\n * store information how any bytes of the body have been already processed\n * or what what has been already found in the body and what has yet to be found.\n */\nclass MatcherCtx {\npublic:\n  virtual ~MatcherCtx() = default;\n};\n\n/**\n * Base class for all matchers.\n *\n * A high level note on the design of matching which is different from other matching in Envoy\n * due to a requirement to support streaming matching (match as new data arrives versus\n * calculating the match given all available data at once).\n * - The matching system is composed of a constant matching configuration. This is essentially\n *   a tree of matchers given logical AND, OR, NOT, etc.\n * - A per-stream/request matching status must be kept in order to compute interim match status.\n * - In order to make this computationally efficient, the matching tree is kept in a vector, with\n *   all references to other matchers implemented using an index into the vector. The vector is\n *   effectively a preorder traversal flattened N-ary tree.\n * - The previous point allows the creation of a per-stream/request vector of match statuses of\n *   the same size as the matcher vector. Then, when match status is updated given new\n *   information, the vector of match statuses can be easily updated using the same indexes as in\n *   the constant match configuration.\n * - Finally, a matches() function can be trivially implemented by looking in the status vector at\n *   the index position that the current matcher is located in.\n */\nclass Matcher {\npublic:\n  struct MatchStatus {\n    bool operator==(const MatchStatus& rhs) const {\n      return matches_ == rhs.matches_ && might_change_status_ == rhs.might_change_status_;\n    }\n\n    bool matches_{false};            // Does the matcher currently match?\n    bool might_change_status_{true}; // Is it possible for matches_ to change in subsequent updates?\n    std::unique_ptr<MatcherCtx> ctx_{}; // Context used by matchers to save interim context.\n  };\n\n  using MatchStatusVector = std::vector<MatchStatus>;\n\n  /**\n   * Base class constructor for a matcher.\n   * @param matchers supplies the match tree vector being built.\n   */\n  Matcher(const std::vector<MatcherPtr>& matchers)\n      // NOTE: This code assumes that the index for the matcher being constructed has already been\n      // allocated, which is why my_index_ is set to size() - 1. See buildMatcher() in\n      // matcher.cc.\n      : my_index_(matchers.size() - 1) {}\n\n  virtual ~Matcher() = default;\n\n  /**\n   * @return the matcher's index in the match tree vector (see above).\n   */\n  size_t index() { return my_index_; }\n\n  /**\n   * Update match status when a stream is created. This might be an HTTP stream, a TCP connection,\n   * etc. This allows any matchers to flip to an initial state of true if applicable.\n   */\n  virtual void onNewStream(MatchStatusVector& statuses) const PURE;\n\n  /**\n   * Update match status given HTTP request headers.\n   * @param request_headers supplies the request headers.\n   * @param statuses supplies the per-stream-request match status vector which must be the same\n   *                 size as the match tree vector (see above).\n   */\n  virtual void onHttpRequestHeaders(const Http::RequestHeaderMap& request_headers,\n                                    MatchStatusVector& statuses) const PURE;\n\n  /**\n   * Update match status given HTTP request trailers.\n   * @param request_trailers supplies the request trailers.\n   * @param statuses supplies the per-stream-request match status vector which must be the same\n   *                 size as the match tree vector (see above).\n   */\n  virtual void onHttpRequestTrailers(const Http::RequestTrailerMap& request_trailers,\n                                     MatchStatusVector& statuses) const PURE;\n\n  /**\n   * Update match status given HTTP response headers.\n   * @param response_headers supplies the response headers.\n   * @param statuses supplies the per-stream-request match status vector which must be the same\n   *                 size as the match tree vector (see above).\n   */\n  virtual void onHttpResponseHeaders(const Http::ResponseHeaderMap& response_headers,\n                                     MatchStatusVector& statuses) const PURE;\n\n  /**\n   * Update match status given HTTP response trailers.\n   * @param response_headers supplies the response trailers.\n   * @param statuses supplies the per-stream-request match status vector which must be the same\n   *                 size as the match tree vector (see above).\n   */\n  virtual void onHttpResponseTrailers(const Http::ResponseTrailerMap& response_trailers,\n                                      MatchStatusVector& statuses) const PURE;\n\n  /**\n   * Update match status given HTTP request body.\n   * @param data supplies the request body.\n   * @param statuses supplies the per-stream-request match status vector which must be the same\n   *                 size as the match tree vector (see above).\n   */\n  virtual void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) PURE;\n\n  /**\n   * Update match status given HTTP response body.\n   * @param data supplies the response body.\n   * @param statuses supplies the per-stream-request match status vector which must be the same\n   *                 size as the match tree vector (see above).\n   */\n  virtual void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) PURE;\n\n  /**\n   * @return whether given currently available information, the matcher matches.\n   * @param statuses supplies the per-stream-request match status vector which must be the same\n   *                 size as the match tree vector (see above).\n   */\n  const MatchStatus& matchStatus(const MatchStatusVector& statuses) const {\n    return statuses[my_index_];\n  }\n\nprotected:\n  const size_t my_index_;\n};\n\n/**\n * Factory method to build a matcher given a match config. Calling this function may end\n * up recursively building many matchers, which will all be added to the passed in vector\n * of matchers. See the comments in matcher.h for the general structure of how matchers work.\n */\nvoid buildMatcher(const envoy::config::common::matcher::v3::MatchPredicate& match_config,\n                  std::vector<MatcherPtr>& matchers);\n\n/**\n * Base class for logic matchers that need to forward update calls to child matchers.\n */\nclass LogicMatcherBase : public Matcher {\npublic:\n  using Matcher::Matcher;\n\n  void onNewStream(MatchStatusVector& statuses) const override {\n    updateLocalStatus(statuses,\n                      [](Matcher& m, MatchStatusVector& statuses) { m.onNewStream(statuses); });\n  }\n  void onHttpRequestHeaders(const Http::RequestHeaderMap& request_headers,\n                            MatchStatusVector& statuses) const override {\n    updateLocalStatus(statuses, [&request_headers](Matcher& m, MatchStatusVector& statuses) {\n      m.onHttpRequestHeaders(request_headers, statuses);\n    });\n  }\n  void onHttpRequestTrailers(const Http::RequestTrailerMap& request_trailers,\n                             MatchStatusVector& statuses) const override {\n    updateLocalStatus(statuses, [&request_trailers](Matcher& m, MatchStatusVector& statuses) {\n      m.onHttpRequestTrailers(request_trailers, statuses);\n    });\n  }\n  void onHttpResponseHeaders(const Http::ResponseHeaderMap& response_headers,\n                             MatchStatusVector& statuses) const override {\n    updateLocalStatus(statuses, [&response_headers](Matcher& m, MatchStatusVector& statuses) {\n      m.onHttpResponseHeaders(response_headers, statuses);\n    });\n  }\n  void onHttpResponseTrailers(const Http::ResponseTrailerMap& response_trailers,\n                              MatchStatusVector& statuses) const override {\n    updateLocalStatus(statuses, [&response_trailers](Matcher& m, MatchStatusVector& statuses) {\n      m.onHttpResponseTrailers(response_trailers, statuses);\n    });\n  }\n  void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) override {\n    updateLocalStatus(statuses, [&data](Matcher& m, MatchStatusVector& statuses) {\n      m.onRequestBody(data, statuses);\n    });\n  }\n  void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) override {\n    updateLocalStatus(statuses, [&data](Matcher& m, MatchStatusVector& statuses) {\n      m.onResponseBody(data, statuses);\n    });\n  }\n\nprotected:\n  using UpdateFunctor = std::function<void(Matcher&, MatchStatusVector&)>;\n  virtual void updateLocalStatus(MatchStatusVector& statuses,\n                                 const UpdateFunctor& functor) const PURE;\n};\n\n/**\n * Matcher for implementing set logic.\n */\nclass SetLogicMatcher : public LogicMatcherBase {\npublic:\n  enum class Type { And, Or };\n\n  SetLogicMatcher(const envoy::config::common::matcher::v3::MatchPredicate::MatchSet& configs,\n                  std::vector<MatcherPtr>& matchers, Type type);\n\nprivate:\n  void updateLocalStatus(MatchStatusVector& statuses, const UpdateFunctor& functor) const override;\n\n  std::vector<MatcherPtr>& matchers_;\n  std::vector<size_t> indexes_;\n  const Type type_;\n};\n\n/**\n * Not matcher.\n */\nclass NotMatcher : public LogicMatcherBase {\npublic:\n  NotMatcher(const envoy::config::common::matcher::v3::MatchPredicate& config,\n             std::vector<MatcherPtr>& matchers);\n\nprivate:\n  void updateLocalStatus(MatchStatusVector& statuses, const UpdateFunctor& functor) const override;\n\n  std::vector<MatcherPtr>& matchers_;\n  const size_t not_index_;\n};\n\n/**\n * A base class for a matcher that generally wants to return default values, but might override\n * a single update function.\n */\nclass SimpleMatcher : public Matcher {\npublic:\n  using Matcher::Matcher;\n\n  void onNewStream(MatchStatusVector&) const override {}\n  void onHttpRequestHeaders(const Http::RequestHeaderMap&, MatchStatusVector&) const override {}\n  void onHttpRequestTrailers(const Http::RequestTrailerMap&, MatchStatusVector&) const override {}\n  void onHttpResponseHeaders(const Http::ResponseHeaderMap&, MatchStatusVector&) const override {}\n  void onHttpResponseTrailers(const Http::ResponseTrailerMap&, MatchStatusVector&) const override {}\n  void onRequestBody(const Buffer::Instance&, MatchStatusVector&) override {}\n  void onResponseBody(const Buffer::Instance&, MatchStatusVector&) override {}\n};\n\n/**\n * Any matcher (always matches).\n */\nclass AnyMatcher : public SimpleMatcher {\npublic:\n  using SimpleMatcher::SimpleMatcher;\n\n  void onNewStream(MatchStatusVector& statuses) const override {\n    statuses[my_index_].matches_ = true;\n    statuses[my_index_].might_change_status_ = false;\n  }\n};\n\n/**\n * Base class for the various HTTP header matchers.\n */\nclass HttpHeaderMatcherBase : public SimpleMatcher {\npublic:\n  HttpHeaderMatcherBase(const envoy::config::common::matcher::v3::HttpHeadersMatch& config,\n                        const std::vector<MatcherPtr>& matchers);\n\nprotected:\n  void matchHeaders(const Http::HeaderMap& headers, MatchStatusVector& statuses) const;\n\n  const std::vector<Http::HeaderUtility::HeaderDataPtr> headers_to_match_;\n};\n\n/**\n * HTTP request headers matcher.\n */\nclass HttpRequestHeadersMatcher : public HttpHeaderMatcherBase {\npublic:\n  using HttpHeaderMatcherBase::HttpHeaderMatcherBase;\n\n  void onHttpRequestHeaders(const Http::RequestHeaderMap& request_headers,\n                            MatchStatusVector& statuses) const override {\n    matchHeaders(request_headers, statuses);\n  }\n};\n\n/**\n * HTTP request trailers matcher.\n */\nclass HttpRequestTrailersMatcher : public HttpHeaderMatcherBase {\npublic:\n  using HttpHeaderMatcherBase::HttpHeaderMatcherBase;\n\n  void onHttpRequestTrailers(const Http::RequestTrailerMap& request_trailers,\n                             MatchStatusVector& statuses) const override {\n    matchHeaders(request_trailers, statuses);\n  }\n};\n\n/**\n * HTTP response headers matcher.\n */\nclass HttpResponseHeadersMatcher : public HttpHeaderMatcherBase {\npublic:\n  using HttpHeaderMatcherBase::HttpHeaderMatcherBase;\n\n  void onHttpResponseHeaders(const Http::ResponseHeaderMap& response_headers,\n                             MatchStatusVector& statuses) const override {\n    matchHeaders(response_headers, statuses);\n  }\n};\n\n/**\n * HTTP response trailers matcher.\n */\nclass HttpResponseTrailersMatcher : public HttpHeaderMatcherBase {\npublic:\n  using HttpHeaderMatcherBase::HttpHeaderMatcherBase;\n\n  void onHttpResponseTrailers(const Http::ResponseTrailerMap& response_trailers,\n                              MatchStatusVector& statuses) const override {\n    matchHeaders(response_trailers, statuses);\n  }\n};\n\n/**\n * Base class for body matchers.\n */\nclass HttpBodyMatcherBase : public SimpleMatcher {\npublic:\n  HttpBodyMatcherBase(const std::vector<MatcherPtr>& matchers) : SimpleMatcher(matchers) {}\n\nprotected:\n  // Limit search to specified number of bytes.\n  // Value equal to zero means no limit.\n  uint32_t limit_{};\n};\n\n/**\n * Context is used by HttpGenericBodyMatcher to:\n * - track how many bytes has been processed\n * - track patterns which have been found\n * - store last several seen bytes of the HTTP body (when pattern starts at the end of previous body\n *   chunk and continues at the beginning of the next body chunk)\n */\nclass HttpGenericBodyMatcherCtx : public MatcherCtx {\npublic:\n  HttpGenericBodyMatcherCtx(const std::shared_ptr<std::vector<std::string>>& patterns,\n                            size_t overlap_size)\n      : patterns_(patterns) {\n    // Initialize overlap_ buffer's capacity to fit the longest pattern - 1.\n    // The length of the longest pattern is known and passed here as overlap_size.\n    patterns_index_.resize(patterns_->size());\n    std::iota(patterns_index_.begin(), patterns_index_.end(), 0);\n    overlap_.reserve(overlap_size);\n    capacity_ = overlap_size;\n  }\n  ~HttpGenericBodyMatcherCtx() override = default;\n\n  // The context is initialized per each http request. The patterns_\n  // shared pointer attaches to matcher's list of patterns, so patterns\n  // can be referenced without copying data.\n  const std::shared_ptr<const std::vector<std::string>> patterns_;\n  // List stores indexes of patterns in patterns_ shared memory which\n  // still need to be located in the body. When a pattern is found\n  // its index is removed from the list.\n  // When all patterns have been found, the list is empty.\n  std::list<uint32_t> patterns_index_;\n  // Buffer to store the last bytes from previous body chunk(s).\n  // It will store only as many bytes as is the length of the longest\n  // pattern to be found minus 1.\n  // It is necessary to locate patterns which are spread across 2 or more\n  // body chunks.\n  std::vector<char> overlap_;\n  // capacity_ tells how many bytes should be buffered. overlap_'s initial\n  // capacity is set to the length of the longest pattern - 1. As patterns\n  // are found, there is a possibility that not as many bytes are required to be buffered.\n  // It must be tracked outside of vector, because vector::reserve does not\n  // change capacity when new value is lower than current capacity.\n  uint32_t capacity_{};\n  // processed_bytes_ tracks how many bytes of HTTP body have been processed.\n  uint32_t processed_bytes_{};\n};\n\nclass HttpGenericBodyMatcher : public HttpBodyMatcherBase {\npublic:\n  HttpGenericBodyMatcher(const envoy::config::common::matcher::v3::HttpGenericBodyMatch& config,\n                         const std::vector<MatcherPtr>& matchers);\n\nprotected:\n  void onBody(const Buffer::Instance&, MatchStatusVector&);\n  void onNewStream(MatchStatusVector& statuses) const override {\n    // Allocate a new context used for the new stream.\n    statuses[my_index_].ctx_ =\n        std::make_unique<HttpGenericBodyMatcherCtx>(patterns_, overlap_size_);\n    statuses[my_index_].matches_ = false;\n    statuses[my_index_].might_change_status_ = true;\n  }\n  bool locatePatternAcrossChunks(const std::string& pattern, const Buffer::Instance& data,\n                                 const HttpGenericBodyMatcherCtx* ctx);\n  void bufferLastBytes(const Buffer::Instance& data, HttpGenericBodyMatcherCtx* ctx);\n\n  size_t calcLongestPatternSize(const std::list<uint32_t>& indexes) const;\n  void resizeOverlapBuffer(HttpGenericBodyMatcherCtx* ctx);\n\nprivate:\n  // The following fields are initialized based on matcher config and are used\n  // by all HTTP matchers.\n  // List of strings which body must contain to get match.\n  std::shared_ptr<std::vector<std::string>> patterns_;\n  // Stores the length of the longest pattern.\n  size_t overlap_size_{};\n};\n\nclass HttpRequestGenericBodyMatcher : public HttpGenericBodyMatcher {\npublic:\n  using HttpGenericBodyMatcher::HttpGenericBodyMatcher;\n\n  void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) override {\n    onBody(data, statuses);\n  }\n};\n\nclass HttpResponseGenericBodyMatcher : public HttpGenericBodyMatcher {\npublic:\n  using HttpGenericBodyMatcher::HttpGenericBodyMatcher;\n\n  void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) override {\n    onBody(data, statuses);\n  }\n};\n\n} // namespace Matcher\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/proxy_protocol/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"proxy_protocol_header_lib\",\n    srcs = [\"proxy_protocol_header.cc\"],\n    hdrs = [\"proxy_protocol_header.h\"],\n    # This is used by the router, so considered core code.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//source/common/network:address_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/proxy_protocol/proxy_protocol_header.cc",
    "content": "#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n\n#include <sstream>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/network/address_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace ProxyProtocol {\n\nvoid generateV1Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port,\n                      uint32_t dst_port, Network::Address::IpVersion ip_version,\n                      Buffer::Instance& out) {\n  std::ostringstream stream;\n  stream << PROXY_PROTO_V1_SIGNATURE;\n\n  switch (ip_version) {\n  case Network::Address::IpVersion::v4:\n    stream << PROXY_PROTO_V1_AF_INET << \" \";\n    break;\n  case Network::Address::IpVersion::v6:\n    stream << PROXY_PROTO_V1_AF_INET6 << \" \";\n    break;\n  }\n\n  stream << src_addr << \" \";\n  stream << dst_addr << \" \";\n  stream << src_port << \" \";\n  stream << dst_port << \"\\r\\n\";\n\n  out.add(stream.str());\n}\n\nvoid generateV1Header(const Network::Address::Ip& source_address,\n                      const Network::Address::Ip& dest_address, Buffer::Instance& out) {\n  generateV1Header(source_address.addressAsString(), dest_address.addressAsString(),\n                   source_address.port(), dest_address.port(), source_address.version(), out);\n}\n\nvoid generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port,\n                      uint32_t dst_port, Network::Address::IpVersion ip_version,\n                      Buffer::Instance& out) {\n  out.add(PROXY_PROTO_V2_SIGNATURE, PROXY_PROTO_V2_SIGNATURE_LEN);\n\n  const uint8_t version_and_command = PROXY_PROTO_V2_VERSION << 4 | PROXY_PROTO_V2_ONBEHALF_OF;\n  out.add(&version_and_command, 1);\n\n  uint8_t address_family_and_protocol;\n  switch (ip_version) {\n  case Network::Address::IpVersion::v4:\n    address_family_and_protocol = PROXY_PROTO_V2_AF_INET << 4;\n    break;\n  case Network::Address::IpVersion::v6:\n    address_family_and_protocol = PROXY_PROTO_V2_AF_INET6 << 4;\n    break;\n  }\n  address_family_and_protocol |= PROXY_PROTO_V2_TRANSPORT_STREAM;\n  out.add(&address_family_and_protocol, 1);\n\n  uint8_t addr_length[2]{0, 0};\n  switch (ip_version) {\n  case Network::Address::IpVersion::v4: {\n    addr_length[1] = PROXY_PROTO_V2_ADDR_LEN_INET;\n    out.add(addr_length, 2);\n\n    uint8_t addrs[8];\n    const auto net_src_addr =\n        Network::Address::Ipv4Instance(src_addr, src_port).ip()->ipv4()->address();\n    const auto net_dst_addr =\n        Network::Address::Ipv4Instance(dst_addr, dst_port).ip()->ipv4()->address();\n    memcpy(addrs, &net_src_addr, 4);\n    memcpy(&addrs[4], &net_dst_addr, 4);\n    out.add(addrs, 8);\n    break;\n  }\n  case Network::Address::IpVersion::v6: {\n    addr_length[1] = PROXY_PROTO_V2_ADDR_LEN_INET6;\n    out.add(addr_length, 2);\n\n    uint8_t addrs[32];\n    const auto net_src_addr =\n        Network::Address::Ipv6Instance(src_addr, src_port).ip()->ipv6()->address();\n    const auto net_dst_addr =\n        Network::Address::Ipv6Instance(dst_addr, dst_port).ip()->ipv6()->address();\n    memcpy(addrs, &net_src_addr, 16);\n    memcpy(&addrs[16], &net_dst_addr, 16);\n    out.add(addrs, 32);\n    break;\n  }\n  }\n\n  uint8_t ports[4];\n  const auto net_src_port = htons(static_cast<uint16_t>(src_port));\n  const auto net_dst_port = htons(static_cast<uint16_t>(dst_port));\n  memcpy(ports, &net_src_port, 2);\n  memcpy(&ports[2], &net_dst_port, 2);\n  out.add(ports, 4);\n}\n\nvoid generateV2Header(const Network::Address::Ip& source_address,\n                      const Network::Address::Ip& dest_address, Buffer::Instance& out) {\n  generateV2Header(source_address.addressAsString(), dest_address.addressAsString(),\n                   source_address.port(), dest_address.port(), source_address.version(), out);\n}\n\nvoid generateProxyProtoHeader(const envoy::config::core::v3::ProxyProtocolConfig& config,\n                              const Network::Connection& connection, Buffer::Instance& out) {\n  const Network::Address::Ip& dest_address = *connection.localAddress()->ip();\n  const Network::Address::Ip& source_address = *connection.remoteAddress()->ip();\n  if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V1) {\n    generateV1Header(source_address, dest_address, out);\n  } else if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V2) {\n    generateV2Header(source_address, dest_address, out);\n  }\n}\n\nvoid generateV2LocalHeader(Buffer::Instance& out) {\n  out.add(PROXY_PROTO_V2_SIGNATURE, PROXY_PROTO_V2_SIGNATURE_LEN);\n  const uint8_t addr_fam_protocol_and_length[4]{PROXY_PROTO_V2_VERSION << 4, 0, 0, 0};\n  out.add(addr_fam_protocol_and_length, 4);\n}\n\n} // namespace ProxyProtocol\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/proxy_protocol/proxy_protocol_header.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/core/v3/proxy_protocol.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/connection.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace ProxyProtocol {\n\n// See https://github.com/haproxy/haproxy/blob/master/doc/proxy-protocol.txt for definitions\n\nconstexpr char PROXY_PROTO_V1_SIGNATURE[] = \"PROXY \";\nconstexpr auto PROXY_PROTO_V1_AF_INET = \"TCP4\";\nconstexpr auto PROXY_PROTO_V1_AF_INET6 = \"TCP6\";\nconstexpr auto PROXY_PROTO_V1_UNKNOWN = \"UNKNOWN\";\n\nconstexpr char PROXY_PROTO_V2_SIGNATURE[] = \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\";\n\nconstexpr uint32_t PROXY_PROTO_V1_SIGNATURE_LEN = 6;\nconstexpr uint32_t PROXY_PROTO_V2_SIGNATURE_LEN = 12;\nconstexpr uint32_t PROXY_PROTO_V2_HEADER_LEN = 16;\n\nconstexpr uint32_t PROXY_PROTO_V2_VERSION = 0x2;\nconstexpr uint32_t PROXY_PROTO_V2_ONBEHALF_OF = 0x1;\nconstexpr uint32_t PROXY_PROTO_V2_LOCAL = 0x0;\n\nconstexpr uint32_t PROXY_PROTO_V2_AF_INET = 0x1;\nconstexpr uint32_t PROXY_PROTO_V2_AF_INET6 = 0x2;\nconstexpr uint32_t PROXY_PROTO_V2_AF_UNIX = 0x3;\n\nconstexpr uint8_t PROXY_PROTO_V2_TRANSPORT_STREAM = 0x1;\nconstexpr uint8_t PROXY_PROTO_V2_TRANSPORT_DGRAM = 0x2;\n\nconstexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNSPEC = 0;\nconstexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET = 12;\nconstexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET6 = 36;\nconstexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNIX = 216;\n\n// Generates the v1 PROXY protocol header and adds it to the specified buffer\nvoid generateV1Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port,\n                      uint32_t dst_port, Network::Address::IpVersion ip_version,\n                      Buffer::Instance& out);\nvoid generateV1Header(const Network::Address::Ip& source_address,\n                      const Network::Address::Ip& dest_address, Buffer::Instance& out);\n\n// Generates the v2 PROXY protocol header and adds it to the specified buffer\n// TCP is assumed as the transport protocol\nvoid generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port,\n                      uint32_t dst_port, Network::Address::IpVersion ip_version,\n                      Buffer::Instance& out);\nvoid generateV2Header(const Network::Address::Ip& source_address,\n                      const Network::Address::Ip& dest_address, Buffer::Instance& out);\n\n// Generates the appropriate proxy proto header and appends it to the supplied buffer.\nvoid generateProxyProtoHeader(const envoy::config::core::v3::ProxyProtocolConfig& config,\n                              const Network::Connection& connection, Buffer::Instance& out);\n\n// Generates the v2 PROXY protocol local command header and adds it to the specified buffer\nvoid generateV2LocalHeader(Buffer::Instance& out);\n\n} // namespace ProxyProtocol\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis\n# clusters.\n# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"cluster_refresh_manager_interface\",\n    hdrs = [\"cluster_refresh_manager.h\"],\n    deps = [\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cluster_refresh_manager_lib\",\n    srcs = [\"cluster_refresh_manager_impl.cc\"],\n    hdrs = [\"cluster_refresh_manager_impl.h\"],\n    deps = [\n        \":cluster_refresh_manager_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:lock_guard_lib\",\n        \"//source/common/common:thread_annotations\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/redis/cluster_refresh_manager.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Redis {\n\nusing RefreshCB = std::function<void()>;\n\n/**\n * A manager for tracking events that would trigger a cluster refresh, and calling registered\n * callbacks when the error rate exceeds a configurable threshold (while ensuring that a minimum\n * time passes between calling the callback).\n */\nclass ClusterRefreshManager {\npublic:\n  class Handle {\n  public:\n    virtual ~Handle() = default;\n  };\n\n  using HandlePtr = std::unique_ptr<Handle>;\n\n  virtual ~ClusterRefreshManager() = default;\n\n  /**\n   * Notifies the manager that a redirection error has been received for a given cluster.\n   * @param cluster_name is the name of the cluster.\n   * @return bool true if a cluster's registered callback is scheduled on the main thread, false\n   * otherwise.\n   */\n  virtual bool onRedirection(const std::string& cluster_name) PURE;\n\n  /**\n   * Notifies the manager that a failure has been received for a given cluster.\n   * @param cluster_name is the name of the cluster.\n   * @return bool true if a cluster's registered callback is scheduled on the main thread, false\n   * otherwise.\n   */\n  virtual bool onFailure(const std::string& cluster_name) PURE;\n\n  /**\n   * Notifies the manager that a degraded host has been used for a given cluster.\n   * @param cluster_name is the name of the cluster.\n   * @return bool true if a cluster's registered callback is scheduled on the main thread, false\n   * otherwise.\n   */\n  virtual bool onHostDegraded(const std::string& cluster_name) PURE;\n\n  /**\n   * Register a cluster to be tracked by the manager (called by main thread only).\n   * @param cluster_name is the name of the cluster.\n   * @param min_time_between_triggering is the minimum amount of time that must pass between\n   * callback invocations (redirects ignored and not counted during this time).\n   * @param redirects_threshold is the number of redirects that must be reached to consider\n   * calling the callback.\n   * @param cb is the cluster callback function.\n   * @return HandlePtr is a smart pointer to an opaque Handle that will unregister the cluster upon\n   * destruction.\n   */\n  virtual HandlePtr registerCluster(const std::string& cluster_name,\n                                    std::chrono::milliseconds min_time_between_triggering,\n                                    const uint32_t redirects_threshold,\n                                    const uint32_t failure_threshold,\n                                    const uint32_t host_degraded_threshold,\n                                    const RefreshCB& cb) PURE;\n};\n\nusing ClusterRefreshManagerSharedPtr = std::shared_ptr<ClusterRefreshManager>;\n\n} // namespace Redis\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/redis/cluster_refresh_manager_impl.cc",
    "content": "#include \"extensions/common/redis/cluster_refresh_manager_impl.h\"\n\n#include \"envoy/singleton/manager.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Redis {\n\nSINGLETON_MANAGER_REGISTRATION(redis_refresh_manager);\n\nClusterRefreshManagerSharedPtr getClusterRefreshManager(Singleton::Manager& manager,\n                                                        Event::Dispatcher& main_thread_dispatcher,\n                                                        Upstream::ClusterManager& cm,\n                                                        TimeSource& time_source) {\n  return manager.getTyped<ClusterRefreshManager>(\n      SINGLETON_MANAGER_REGISTERED_NAME(redis_refresh_manager), [&] {\n        return std::make_shared<ClusterRefreshManagerImpl>(main_thread_dispatcher, cm, time_source);\n      });\n}\n\nbool ClusterRefreshManagerImpl::onFailure(const std::string& cluster_name) {\n  return onEvent(cluster_name, EventType::Failure);\n}\n\nbool ClusterRefreshManagerImpl::onHostDegraded(const std::string& cluster_name) {\n  return onEvent(cluster_name, EventType::DegradedHost);\n}\n\nbool ClusterRefreshManagerImpl::onRedirection(const std::string& cluster_name) {\n  return onEvent(cluster_name, EventType::Redirection);\n}\n\nbool ClusterRefreshManagerImpl::onEvent(const std::string& cluster_name, EventType event_type) {\n  ClusterInfoSharedPtr info;\n  {\n    // Hold the map lock to avoid a race condition with calls to unregisterCluster\n    // on the main thread.\n    Thread::LockGuard lock(map_mutex_);\n    auto it = info_map_.find(cluster_name);\n    if (it != info_map_.end()) {\n      info = it->second;\n    }\n  }\n  // No locks needed for thread safety while accessing clusterInfoSharedPtr members as\n  // all potentially modified members are atomic (redirects_count_, last_callback_time_ms_).\n  if (info.get()) {\n    const uint64_t now = std::chrono::duration_cast<std::chrono::milliseconds>(\n                             time_source_.monotonicTime().time_since_epoch())\n                             .count();\n    uint64_t last_callback_time_ms = info->last_callback_time_ms_.load();\n    if (!last_callback_time_ms ||\n        (now >= (last_callback_time_ms + info->min_time_between_triggering_.count()))) {\n      std::atomic<uint32_t>* count;\n      uint32_t threshold;\n      switch (event_type) {\n      case EventType::Redirection: {\n        count = &(info->redirects_count_);\n        threshold = info->redirects_threshold_;\n        break;\n      }\n      case EventType::DegradedHost: {\n        count = &(info->host_degraded_count_);\n        threshold = info->host_degraded_threshold_;\n        break;\n      }\n      case EventType::Failure: {\n        count = &(info->failures_count_);\n        threshold = info->failure_threshold_;\n        break;\n      }\n      }\n      if (threshold <= 0) {\n        return false;\n      }\n\n      // There're 3 updates to atomic values cross threads in this section of code\n      // a) ++(*count) >= threshold\n      // b) info->last_callback_time_ms_.compare_exchange_strong(last_callback_time_ms, now)\n      // c) *count = 0\n      // Let's say there're 2 threads T1 and T2, for all legal permutation of execution order a, b,\n      // c we need to ensure that post_callback is true for only 1 thread and if both a) and b) are\n      // true in 1 thread the count is 0 after this section. Here's a few different sequence that\n      // can potentially result in race conditions to consider\n\n      // Sequence 1:\n      // starting condition: threshold:2, count:1, T1.last_call_back = T2.last_call_back =\n      // info.last_call_back\n      // * T1.a (count: 2)\n      // * T1.b succeed (info.last_call_back: T1.now, T1.post_callback: true)\n      // * T1.c (count:0)\n      // * T2.a (count: 1, T2.post_callback: false)\n      // * T2.b is skip since T2.a is false\n      // * T2.c will still be triggered since info.last_call_back is now changed by T1 (count: 0)\n      //\n      // Sequence 2:\n      // starting condition: threshold:2, count:1, T1.last_call_back = T2.last_call_back =\n      // info.last_call_back\n      // * T1.a (count: 2)\n      // * T2.a (count: 3)\n      // * T1.b succeed (info.last_call_back: T1.now, post_callback: true)\n      // * T2.b failed due since info.last_call_back is now T1.now\n      // * T1.c (count:0)\n      // * T2.c (count:0) note we can't use count.decrement here since count is already 0\n      //\n      // Sequence 3:\n      // starting condition: threshold:2, count:1, T1.last_call_back == T2.last_call_back ==\n      // info.last_call_back\n      // * T1.a (count: 1, T1.post_callback: false)\n      // * T1.b skip since T1.a is false\n      // * T2.a (count: 2, T2.post_callback: true)\n      // * T2.b succeed (info.last_call_back = T2.now)\n      // * T2.c (count: 0)\n      // * T1.c will be triggered since info.last_call_back is changed by T2 (count: 0)\n\n      bool post_callback = false;\n      // ignore redirects during min time between triggering\n      if ((++(*count) >= threshold) &&\n          (info->last_callback_time_ms_.compare_exchange_strong(last_callback_time_ms, now))) {\n        // last_callback_time_ms_ successfully updated without any changes since it was\n        // initially read. This thread is allowed to post a call to the registered callback\n        // on the main thread. Otherwise, the thread would be ignored to prevent over-triggering\n        // cluster callbacks.\n        post_callback = true;\n      }\n\n      // If a callback should be triggered(in this or some other thread) signaled by the changed\n      // last callback time, we reset the count to 0\n      if (post_callback || info->last_callback_time_ms_.load() != last_callback_time_ms) {\n        *count = 0;\n      }\n\n      if (post_callback) {\n        main_thread_dispatcher_.post([this, cluster_name, info]() {\n          // Ensure that cluster is still active before calling callback.\n          auto map = cm_.clusters();\n          auto it = map.find(cluster_name);\n          if (it != map.end()) {\n            info->cb_();\n          }\n        });\n        return true;\n      }\n    }\n  }\n  return false;\n}\n\nClusterRefreshManagerImpl::HandlePtr ClusterRefreshManagerImpl::registerCluster(\n    const std::string& cluster_name, std::chrono::milliseconds min_time_between_triggering,\n    const uint32_t redirects_threshold, const uint32_t failure_threshold,\n    const uint32_t host_degraded_threshold, const RefreshCB& cb) {\n  Thread::LockGuard lock(map_mutex_);\n  ClusterInfoSharedPtr info =\n      std::make_shared<ClusterInfo>(cluster_name, min_time_between_triggering, redirects_threshold,\n                                    failure_threshold, host_degraded_threshold, cb);\n  info_map_[cluster_name] = info;\n\n  return std::make_unique<ClusterRefreshManagerImpl::HandleImpl>(this, info);\n}\n\nvoid ClusterRefreshManagerImpl::unregisterCluster(const ClusterInfoSharedPtr& cluster_info) {\n  Thread::LockGuard lock(map_mutex_);\n  info_map_.erase(cluster_info->cluster_name_);\n}\n\n} // namespace Redis\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/redis/cluster_refresh_manager_impl.h",
    "content": "#pragma once\n\n#include <array>\n#include <atomic>\n#include <numeric>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n\n#include \"extensions/common/redis/cluster_refresh_manager.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Redis {\n\nclass ClusterRefreshManagerImpl : public ClusterRefreshManager,\n                                  public Envoy::Singleton::Instance,\n                                  public std::enable_shared_from_this<ClusterRefreshManagerImpl> {\npublic:\n  friend class ClusterRefreshManagerTest;\n\n  /**\n   * The information that the manager keeps for each cluster upon registration.\n   */\n  struct ClusterInfo {\n    ClusterInfo(std::string cluster_name, std::chrono::milliseconds min_time_between_triggering,\n                const uint32_t redirects_threshold, const uint32_t failure_threshold,\n                const uint32_t host_degraded_threshold, RefreshCB cb)\n        : cluster_name_(std::move(cluster_name)),\n          min_time_between_triggering_(min_time_between_triggering),\n          redirects_threshold_(redirects_threshold), failure_threshold_(failure_threshold),\n          host_degraded_threshold_(host_degraded_threshold), cb_(std::move(cb)) {}\n    std::string cluster_name_;\n    std::atomic<uint64_t> last_callback_time_ms_{};\n    std::atomic<uint32_t> redirects_count_{};\n    std::atomic<uint32_t> failures_count_{};\n    std::atomic<uint32_t> host_degraded_count_{};\n    std::chrono::milliseconds min_time_between_triggering_;\n    const uint32_t redirects_threshold_;\n    const uint32_t failure_threshold_;\n    const uint32_t host_degraded_threshold_;\n    RefreshCB cb_;\n  };\n\n  using ClusterInfoSharedPtr = std::shared_ptr<ClusterInfo>;\n\n  class HandleImpl : public Handle {\n  public:\n    HandleImpl(ClusterRefreshManagerImpl* mgr, ClusterInfoSharedPtr& cluster_info)\n        : manager_(mgr->shared_from_this()), cluster_info_(cluster_info) {}\n\n    ~HandleImpl() override { manager_->unregisterCluster(cluster_info_); }\n\n  private:\n    const std::shared_ptr<ClusterRefreshManagerImpl> manager_;\n    const std::shared_ptr<ClusterInfo> cluster_info_;\n  };\n\n  ClusterRefreshManagerImpl(Event::Dispatcher& main_thread_dispatcher, Upstream::ClusterManager& cm,\n                            TimeSource& time_source)\n      : main_thread_dispatcher_(main_thread_dispatcher), cm_(cm), time_source_(time_source) {}\n\n  bool onRedirection(const std::string& cluster_name) override;\n  bool onFailure(const std::string& cluster_name) override;\n  bool onHostDegraded(const std::string& cluster_name) override;\n\n  HandlePtr registerCluster(const std::string& cluster_name,\n                            std::chrono::milliseconds min_time_between_triggering,\n                            const uint32_t redirects_threshold, const uint32_t failure_threshold,\n                            const uint32_t host_degraded_threshold, const RefreshCB& cb) override;\n\nprivate:\n  void unregisterCluster(const ClusterInfoSharedPtr& cluster_info);\n  /**\n   * The type of events that can trigger discovery\n   */\n  enum EventType {\n    // MOVE or ASK redirection\n    Redirection,\n    // Failure\n    Failure,\n    // Sending request to degraded/unhealthy host\n    DegradedHost\n  };\n\n  bool onEvent(const std::string& cluster_name, EventType event_type);\n\n  Event::Dispatcher& main_thread_dispatcher_;\n  Upstream::ClusterManager& cm_;\n  TimeSource& time_source_;\n  std::map<std::string, ClusterInfoSharedPtr> info_map_ ABSL_GUARDED_BY(map_mutex_);\n  Thread::MutexBasicLockable map_mutex_;\n};\n\nClusterRefreshManagerSharedPtr getClusterRefreshManager(Singleton::Manager& manager,\n                                                        Event::Dispatcher& main_thread_dispatcher,\n                                                        Upstream::ClusterManager& cm,\n                                                        TimeSource& time_source);\n} // namespace Redis\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/sqlutils/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"sqlutils_lib\",\n    srcs = [\"sqlutils.cc\"],\n    hdrs = [\"sqlutils.h\"],\n    external_deps = [\"sqlparser\"],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/sqlutils/sqlutils.cc",
    "content": "#include \"extensions/common/sqlutils/sqlutils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace SQLUtils {\n\nbool SQLUtils::setMetadata(const std::string& query, const DecoderAttributes& attr,\n                           ProtobufWkt::Struct& metadata) {\n  hsql::SQLParserResult result;\n\n  hsql::SQLParser::parse(query, &result);\n\n  if (!result.isValid()) {\n    return false;\n  }\n\n  std::string database;\n  // Check if the attributes map contains database name.\n  const auto it = attr.find(\"database\");\n  if (it != attr.end()) {\n    database = absl::StrCat(\".\", it->second);\n  }\n\n  auto& fields = *metadata.mutable_fields();\n\n  for (auto i = 0u; i < result.size(); ++i) {\n    if (result.getStatement(i)->type() == hsql::StatementType::kStmtShow) {\n      continue;\n    }\n    hsql::TableAccessMap table_access_map;\n    // Get names of accessed tables.\n    result.getStatement(i)->tablesAccessed(table_access_map);\n    for (auto& it : table_access_map) {\n      auto& operations = *fields[it.first + database].mutable_list_value();\n      // For each table get names of operations performed on that table.\n      for (const auto& ot : it.second) {\n        operations.add_values()->set_string_value(ot);\n      }\n    }\n  }\n\n  return true;\n}\n\n} // namespace SQLUtils\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/sqlutils/sqlutils.h",
    "content": "#include \"common/protobuf/utility.h\"\n\n#include \"include/sqlparser/SQLParser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace SQLUtils {\n\nclass SQLUtils {\npublic:\n  using DecoderAttributes = std::map<std::string, std::string>;\n  /**\n   * Method parses SQL query string and writes output to metadata.\n   * @param query supplies SQL statement.\n   * @param attr supplies attributes which cannot be extracted from SQL query but are\n   *    required to create proper metadata. For example database name may be sent\n   *    by a client when it initially connects to the server, not along each SQL query.\n   * @param metadata supplies placeholder where metadata should be written.\n   * @return True if parsing was successful and False if parsing failed.\n   *         If True was returned the metadata contains result of parsing. The results are\n   *         stored in metadata.mutable_fields.\n   **/\n  static bool setMetadata(const std::string& query, const DecoderAttributes& attr,\n                          ProtobufWkt::Struct& metadata);\n};\n\n} // namespace SQLUtils\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/tap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"tap_interface\",\n    hdrs = [\"tap.h\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/extensions/common/matcher:matcher_lib\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tap_config_base\",\n    srcs = [\"tap_config_base.cc\"],\n    hdrs = [\"tap_config_base.h\"],\n    deps = [\n        \":tap_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/extensions/common/matcher:matcher_lib\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"admin\",\n    srcs = [\"admin.cc\"],\n    hdrs = [\"admin.h\"],\n    deps = [\n        \":tap_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"extension_config_base\",\n    srcs = [\"extension_config_base.cc\"],\n    hdrs = [\"extension_config_base.h\"],\n    deps = [\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/extensions/common/tap:admin\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/common/tap/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/common/tap/admin.cc",
    "content": "#include \"extensions/common/tap/admin.h\"\n\n#include \"envoy/admin/v3/tap.pb.h\"\n#include \"envoy/admin/v3/tap.pb.validate.h\"\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\n// Singleton registration via macro defined in envoy/singleton/manager.h\nSINGLETON_MANAGER_REGISTRATION(tap_admin_handler);\n\nAdminHandlerSharedPtr AdminHandler::getSingleton(Server::Admin& admin,\n                                                 Singleton::Manager& singleton_manager,\n                                                 Event::Dispatcher& main_thread_dispatcher) {\n  return singleton_manager.getTyped<AdminHandler>(\n      SINGLETON_MANAGER_REGISTERED_NAME(tap_admin_handler), [&admin, &main_thread_dispatcher] {\n        return std::make_shared<AdminHandler>(admin, main_thread_dispatcher);\n      });\n}\n\nAdminHandler::AdminHandler(Server::Admin& admin, Event::Dispatcher& main_thread_dispatcher)\n    : admin_(admin), main_thread_dispatcher_(main_thread_dispatcher) {\n  const bool rc =\n      admin_.addHandler(\"/tap\", \"tap filter control\", MAKE_ADMIN_HANDLER(handler), true, true);\n  RELEASE_ASSERT(rc, \"/tap admin endpoint is taken\");\n}\n\nAdminHandler::~AdminHandler() {\n  const bool rc = admin_.removeHandler(\"/tap\");\n  ASSERT(rc);\n}\n\nHttp::Code AdminHandler::handler(absl::string_view, Http::HeaderMap&, Buffer::Instance& response,\n                                 Server::AdminStream& admin_stream) {\n  if (attached_request_.has_value()) {\n    // TODO(mattlklein123): Consider supporting concurrent admin /tap streams. Right now we support\n    // a single stream as a simplification.\n    return badRequest(response, \"An attached /tap admin stream already exists. Detach it.\");\n  }\n\n  if (admin_stream.getRequestBody() == nullptr) {\n    return badRequest(response, \"/tap requires a JSON/YAML body\");\n  }\n\n  envoy::admin::v3::TapRequest tap_request;\n  try {\n    MessageUtil::loadFromYamlAndValidate(admin_stream.getRequestBody()->toString(), tap_request,\n                                         ProtobufMessage::getStrictValidationVisitor());\n  } catch (EnvoyException& e) {\n    return badRequest(response, e.what());\n  }\n\n  ENVOY_LOG(debug, \"tap admin request for config_id={}\", tap_request.config_id());\n  if (config_id_map_.count(tap_request.config_id()) == 0) {\n    return badRequest(\n        response, fmt::format(\"Unknown config id '{}'. No extension has registered with this id.\",\n                              tap_request.config_id()));\n  }\n  for (auto config : config_id_map_[tap_request.config_id()]) {\n    config->newTapConfig(std::move(*tap_request.mutable_tap_config()), this);\n  }\n\n  admin_stream.setEndStreamOnComplete(false);\n  admin_stream.addOnDestroyCallback([this] {\n    for (auto config : config_id_map_[attached_request_.value().config_id_]) {\n      ENVOY_LOG(debug, \"detach tap admin request for config_id={}\",\n                attached_request_.value().config_id_);\n      config->clearTapConfig();\n      attached_request_ = absl::nullopt;\n    }\n  });\n  attached_request_.emplace(tap_request.config_id(), &admin_stream);\n  return Http::Code::OK;\n}\n\nHttp::Code AdminHandler::badRequest(Buffer::Instance& response, absl::string_view error) {\n  ENVOY_LOG(debug, \"handler bad request: {}\", error);\n  response.add(error);\n  return Http::Code::BadRequest;\n}\n\nvoid AdminHandler::registerConfig(ExtensionConfig& config, const std::string& config_id) {\n  ASSERT(!config_id.empty());\n  ASSERT(config_id_map_[config_id].count(&config) == 0);\n  config_id_map_[config_id].insert(&config);\n}\n\nvoid AdminHandler::unregisterConfig(ExtensionConfig& config) {\n  ASSERT(!config.adminId().empty());\n  std::string admin_id(config.adminId());\n  ASSERT(config_id_map_[admin_id].count(&config) == 1);\n  config_id_map_[admin_id].erase(&config);\n  if (config_id_map_[admin_id].empty()) {\n    config_id_map_.erase(admin_id);\n  }\n}\n\nvoid AdminHandler::AdminPerTapSinkHandle::submitTrace(\n    TraceWrapperPtr&& trace, envoy::config::tap::v3::OutputSink::Format format) {\n  ENVOY_LOG(debug, \"admin submitting buffered trace to main thread\");\n  // Convert to a shared_ptr, so we can send it to the main thread.\n  std::shared_ptr<envoy::data::tap::v3::TraceWrapper> shared_trace{std::move(trace)};\n  // The handle can be destroyed before the cross thread post is complete. Thus, we capture a\n  // reference to our parent.\n  parent_.main_thread_dispatcher_.post([&parent = parent_, trace = shared_trace, format]() {\n    if (!parent.attached_request_.has_value()) {\n      return;\n    }\n\n    std::string output_string;\n    switch (format) {\n    case envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING:\n    case envoy::config::tap::v3::OutputSink::JSON_BODY_AS_BYTES:\n      output_string = MessageUtil::getJsonStringFromMessage(*trace, true, true);\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n\n    ENVOY_LOG(debug, \"admin writing buffered trace to response\");\n    Buffer::OwnedImpl output_buffer{output_string};\n    parent.attached_request_.value().admin_stream_->getDecoderFilterCallbacks().encodeData(\n        output_buffer, false);\n  });\n}\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/tap/admin.h",
    "content": "#pragma once\n\n#include \"envoy/server/admin.h\"\n#include \"envoy/singleton/manager.h\"\n\n#include \"extensions/common/tap/tap.h\"\n\n#include \"absl/container/node_hash_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\nclass AdminHandler;\nusing AdminHandlerSharedPtr = std::shared_ptr<AdminHandler>;\n\n/**\n * Singleton /tap admin handler for admin management of tap configurations and output. This\n * handler is not installed and active unless the tap configuration specifically configures it.\n * TODO(mattklein123): We should allow the admin handler to always be installed in read only mode\n *                     so it's easier to debug the active tap configuration.\n */\nclass AdminHandler : public Singleton::Instance,\n                     public Extensions::Common::Tap::Sink,\n                     Logger::Loggable<Logger::Id::tap> {\npublic:\n  AdminHandler(Server::Admin& admin, Event::Dispatcher& main_thread_dispatcher);\n  ~AdminHandler() override;\n\n  /**\n   * Get the singleton admin handler. The handler will be created if it doesn't already exist,\n   * otherwise the existing handler will be returned.\n   */\n  static AdminHandlerSharedPtr getSingleton(Server::Admin& admin,\n                                            Singleton::Manager& singleton_manager,\n                                            Event::Dispatcher& main_thread_dispatcher);\n\n  /**\n   * Register a new extension config to the handler so that it can be admin managed.\n   * @param config supplies the config to register.\n   * @param config_id supplies the ID to use for managing the configuration. Multiple extensions\n   *        can use the same ID so they can be managed in aggregate (e.g., an HTTP filter on\n   *        many listeners).\n   */\n  void registerConfig(ExtensionConfig& config, const std::string& config_id);\n\n  /**\n   * Unregister an extension config from the handler.\n   * @param config supplies the previously registered config.\n   */\n  void unregisterConfig(ExtensionConfig& config);\n\n  // Extensions::Common::Tap::Sink\n  PerTapSinkHandlePtr createPerTapSinkHandle(uint64_t) override {\n    return std::make_unique<AdminPerTapSinkHandle>(*this);\n  }\n\nprivate:\n  struct AdminPerTapSinkHandle : public PerTapSinkHandle {\n    AdminPerTapSinkHandle(AdminHandler& parent) : parent_(parent) {}\n\n    // Extensions::Common::Tap::PerTapSinkHandle\n    void submitTrace(TraceWrapperPtr&& trace,\n                     envoy::config::tap::v3::OutputSink::Format format) override;\n\n    AdminHandler& parent_;\n  };\n\n  struct AttachedRequest {\n    AttachedRequest(std::string config_id, Server::AdminStream* admin_stream)\n        : config_id_(std::move(config_id)), admin_stream_(admin_stream) {}\n\n    const std::string config_id_;\n    const Server::AdminStream* admin_stream_;\n  };\n\n  Http::Code handler(absl::string_view path_and_query, Http::HeaderMap& response_headers,\n                     Buffer::Instance& response, Server::AdminStream& admin_stream);\n  Http::Code badRequest(Buffer::Instance& response, absl::string_view error);\n\n  Server::Admin& admin_;\n  Event::Dispatcher& main_thread_dispatcher_;\n  absl::node_hash_map<std::string, absl::node_hash_set<ExtensionConfig*>> config_id_map_;\n  absl::optional<const AttachedRequest> attached_request_;\n};\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/tap/extension_config_base.cc",
    "content": "#include \"extensions/common/tap/extension_config_base.h\"\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/extensions/common/tap/v3/common.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\nExtensionConfigBase::ExtensionConfigBase(\n    const envoy::extensions::common::tap::v3::CommonExtensionConfig proto_config,\n    TapConfigFactoryPtr&& config_factory, Server::Admin& admin,\n    Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n    Event::Dispatcher& main_thread_dispatcher)\n    : proto_config_(proto_config), config_factory_(std::move(config_factory)),\n      tls_slot_(tls.allocateSlot()) {\n  tls_slot_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<TlsFilterConfig>();\n  });\n\n  switch (proto_config_.config_type_case()) {\n  case envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kAdminConfig: {\n    admin_handler_ = AdminHandler::getSingleton(admin, singleton_manager, main_thread_dispatcher);\n    admin_handler_->registerConfig(*this, proto_config_.admin_config().config_id());\n    ENVOY_LOG(debug, \"initializing tap extension with admin endpoint (config_id={})\",\n              proto_config_.admin_config().config_id());\n    break;\n  }\n  case envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kStaticConfig: {\n    // Right now only one sink is supported.\n    ASSERT(proto_config_.static_config().output_config().sinks().size() == 1);\n    if (proto_config_.static_config().output_config().sinks()[0].output_sink_type_case() ==\n        envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kStreamingAdmin) {\n      // Require that users do not specify a streaming admin with static configuration.\n      throw EnvoyException(\n          fmt::format(\"Error: Specifying admin streaming output without configuring admin.\"));\n    }\n    installNewTap(envoy::config::tap::v3::TapConfig(proto_config_.static_config()), nullptr);\n    ENVOY_LOG(debug, \"initializing tap extension with static config\");\n    break;\n  }\n  case envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kTapdsConfig: {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  default: {\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  }\n}\n\nExtensionConfigBase::~ExtensionConfigBase() {\n  if (admin_handler_) {\n    admin_handler_->unregisterConfig(*this);\n  }\n}\n\nconst absl::string_view ExtensionConfigBase::adminId() {\n  // It is only possible to get here if we had an admin config and registered with the admin\n  // handler.\n  ASSERT(proto_config_.has_admin_config());\n  return proto_config_.admin_config().config_id();\n}\n\nvoid ExtensionConfigBase::clearTapConfig() {\n  tls_slot_->runOnAllThreads([](ThreadLocal::ThreadLocalObjectSharedPtr object)\n                                 -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    object->asType<TlsFilterConfig>().config_ = nullptr;\n    return object;\n  });\n}\n\nvoid ExtensionConfigBase::installNewTap(envoy::config::tap::v3::TapConfig&& proto_config,\n                                        Sink* admin_streamer) {\n  TapConfigSharedPtr new_config =\n      config_factory_->createConfigFromProto(std::move(proto_config), admin_streamer);\n  tls_slot_->runOnAllThreads([new_config](ThreadLocal::ThreadLocalObjectSharedPtr object)\n                                 -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    object->asType<TlsFilterConfig>().config_ = new_config;\n    return object;\n  });\n}\n\nvoid ExtensionConfigBase::newTapConfig(envoy::config::tap::v3::TapConfig&& proto_config,\n                                       Sink* admin_streamer) {\n  installNewTap(envoy::config::tap::v3::TapConfig(proto_config), admin_streamer);\n}\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/tap/extension_config_base.h",
    "content": "#pragma once\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/extensions/common/tap/v3/common.pb.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"extensions/common/tap/admin.h\"\n#include \"extensions/common/tap/tap.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\n/**\n * Base class for tap extension configuration. Used by all tap extensions.\n */\nclass ExtensionConfigBase : public ExtensionConfig, Logger::Loggable<Logger::Id::tap> {\npublic:\n  // Extensions::Common::Tap::ExtensionConfig\n  void clearTapConfig() override;\n  const absl::string_view adminId() override;\n  void newTapConfig(envoy::config::tap::v3::TapConfig&& proto_config,\n                    Sink* admin_streamer) override;\n\nprotected:\n  ExtensionConfigBase(const envoy::extensions::common::tap::v3::CommonExtensionConfig proto_config,\n                      TapConfigFactoryPtr&& config_factory, Server::Admin& admin,\n                      Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n                      Event::Dispatcher& main_thread_dispatcher);\n  ~ExtensionConfigBase() override;\n\n  // All tap configurations derive from TapConfig for type safety. In order to use a common\n  // extension base class (with TLS logic, etc.) we must dynamic cast to the actual tap\n  // configuration type that the extension expects (and is created by the configuration factory).\n  template <class T> std::shared_ptr<T> currentConfigHelper() const {\n    return std::dynamic_pointer_cast<T>(tls_slot_->getTyped<TlsFilterConfig>().config_);\n  }\n\nprivate:\n  // Holds the functionality of installing a new tap config. This is the underlying method to the\n  // virtual method newTapConfig.\n  void installNewTap(envoy::config::tap::v3::TapConfig&& proto_config, Sink* admin_streamer);\n\n  struct TlsFilterConfig : public ThreadLocal::ThreadLocalObject {\n    TapConfigSharedPtr config_;\n  };\n\n  const envoy::extensions::common::tap::v3::CommonExtensionConfig proto_config_;\n  TapConfigFactoryPtr config_factory_;\n  ThreadLocal::SlotPtr tls_slot_;\n  AdminHandlerSharedPtr admin_handler_;\n};\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/tap/tap.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"extensions/common/matcher/matcher.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\nusing Matcher = Envoy::Extensions::Common::Matcher::Matcher;\n\nusing TraceWrapperPtr = std::unique_ptr<envoy::data::tap::v3::TraceWrapper>;\ninline TraceWrapperPtr makeTraceWrapper() {\n  return std::make_unique<envoy::data::tap::v3::TraceWrapper>();\n}\n\n/**\n * A handle for a per-tap sink. This allows submitting either a single buffered trace, or a series\n * of trace segments that the sink can aggregate in whatever way it chooses.\n */\nclass PerTapSinkHandle {\npublic:\n  virtual ~PerTapSinkHandle() = default;\n\n  /**\n   * Send a trace wrapper to the sink. This may be a fully buffered trace or a segment of a larger\n   * trace depending on the contents of the wrapper.\n   * @param trace supplies the trace to send.\n   * @param format supplies the output format to use.\n   */\n  virtual void submitTrace(TraceWrapperPtr&& trace,\n                           envoy::config::tap::v3::OutputSink::Format format) PURE;\n};\n\nusing PerTapSinkHandlePtr = std::unique_ptr<PerTapSinkHandle>;\n\n/**\n * Wraps potentially multiple PerTapSinkHandle instances and any common pre-submit functionality.\n * Each active tap will have a reference to one of these, which in turn may have references to\n * one or more PerTapSinkHandle.\n */\nclass PerTapSinkHandleManager {\npublic:\n  virtual ~PerTapSinkHandleManager() = default;\n\n  /**\n   * Submit a buffered or streamed trace segment to all managed per-tap sink handles.\n   */\n  virtual void submitTrace(TraceWrapperPtr&& trace) PURE;\n};\n\nusing PerTapSinkHandleManagerPtr = std::unique_ptr<PerTapSinkHandleManager>;\n\n/**\n * Sink for sending tap messages.\n */\nclass Sink {\npublic:\n  virtual ~Sink() = default;\n\n  /**\n   * Create a per tap sink handle for use in submitting either buffered traces or trace segments.\n   * @param trace_id supplies a locally unique trace ID. Some sinks use this for output generation.\n   */\n  virtual PerTapSinkHandlePtr createPerTapSinkHandle(uint64_t trace_id) PURE;\n};\n\nusing SinkPtr = std::unique_ptr<Sink>;\n\n/**\n * Generic configuration for a tap extension (filter, transport socket, etc.).\n */\nclass ExtensionConfig {\npublic:\n  virtual ~ExtensionConfig() = default;\n\n  /**\n   * @return the ID to use for admin extension configuration tracking (if applicable).\n   */\n  virtual const absl::string_view adminId() PURE;\n\n  /**\n   * Clear any active tap configuration.\n   */\n  virtual void clearTapConfig() PURE;\n\n  /**\n   * Install a new tap configuration.\n   * @param proto_config supplies the generic tap config to install. Not all configuration fields\n   *        may be applicable to an extension (e.g. HTTP fields). The extension is free to fail\n   *        the configuration load via exception if it wishes.\n   * @param admin_streamer supplies the singleton admin sink to use for output if the configuration\n   *        specifies that output type. May not be used if the configuration does not specify\n   *        admin output. May be nullptr if admin is not used to supply the config.\n   */\n  virtual void newTapConfig(envoy::config::tap::v3::TapConfig&& proto_config,\n                            Sink* admin_streamer) PURE;\n};\n\n/**\n * Abstract tap configuration base class.\n */\nclass TapConfig {\npublic:\n  virtual ~TapConfig() = default;\n\n  /**\n   * Return a per-tap sink handle manager for use by a tap session.\n   * @param trace_id supplies a locally unique trace ID. Some sinks use this for output generation.\n   */\n  virtual PerTapSinkHandleManagerPtr createPerTapSinkHandleManager(uint64_t trace_id) PURE;\n\n  /**\n   * Return the maximum received bytes that can be buffered in memory. Streaming taps are still\n   * subject to this limit depending on match status.\n   */\n  virtual uint32_t maxBufferedRxBytes() const PURE;\n\n  /**\n   * Return the maximum transmitted bytes that can be buffered in memory. Streaming taps are still\n   * subject to this limit depending on match status.\n   */\n  virtual uint32_t maxBufferedTxBytes() const PURE;\n\n  /**\n   * Return a new match status vector that is correctly sized for the number of matchers that are in\n   * the configuration.\n   */\n  virtual Matcher::MatchStatusVector createMatchStatusVector() const PURE;\n\n  /**\n   * Return the root matcher for use in updating a match status vector.\n   */\n  virtual const Matcher& rootMatcher() const PURE;\n\n  /**\n   * Non-const version of rootMatcher method.\n   */\n  Matcher& rootMatcher() {\n    return const_cast<Matcher&>(static_cast<const TapConfig&>(*this).rootMatcher());\n  }\n\n  /**\n   * Return whether the tap session should run in streaming or buffering mode.\n   */\n  virtual bool streaming() const PURE;\n};\n\nusing TapConfigSharedPtr = std::shared_ptr<TapConfig>;\n\n/**\n * Abstract tap configuration factory. Given a new generic tap configuration, produces an\n * extension specific tap configuration.\n */\nclass TapConfigFactory {\npublic:\n  virtual ~TapConfigFactory() = default;\n\n  /**\n   * @return a new configuration given a raw tap service config proto. See\n   * ExtensionConfig::newTapConfig() for param info.\n   */\n  virtual TapConfigSharedPtr createConfigFromProto(envoy::config::tap::v3::TapConfig&& proto_config,\n                                                   Sink* admin_streamer) PURE;\n};\n\nusing TapConfigFactoryPtr = std::unique_ptr<TapConfigFactory>;\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/tap/tap_config_base.cc",
    "content": "#include \"extensions/common/tap/tap_config_base.h\"\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/common/matcher/matcher.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\nusing namespace Matcher;\n\nbool Utility::addBufferToProtoBytes(envoy::data::tap::v3::Body& output_body,\n                                    uint32_t max_buffered_bytes, const Buffer::Instance& data,\n                                    uint32_t buffer_start_offset, uint32_t buffer_length_to_copy) {\n  // TODO(mattklein123): Figure out if we can use the buffer API here directly in some way. This is\n  // is not trivial if we want to avoid extra copies since we end up appending to the existing\n  // protobuf string.\n\n  // Note that max_buffered_bytes is assumed to include any data already contained in output_bytes.\n  // This is to account for callers that may be tracking this over multiple body objects.\n  ASSERT(buffer_start_offset + buffer_length_to_copy <= data.length());\n  const uint32_t final_bytes_to_copy = std::min(max_buffered_bytes, buffer_length_to_copy);\n\n  Buffer::RawSliceVector slices = data.getRawSlices();\n  trimSlices(slices, buffer_start_offset, final_bytes_to_copy);\n  for (const Buffer::RawSlice& slice : slices) {\n    output_body.mutable_as_bytes()->append(static_cast<const char*>(slice.mem_), slice.len_);\n  }\n\n  if (final_bytes_to_copy < buffer_length_to_copy) {\n    output_body.set_truncated(true);\n    return true;\n  } else {\n    return false;\n  }\n}\n\nTapConfigBaseImpl::TapConfigBaseImpl(envoy::config::tap::v3::TapConfig&& proto_config,\n                                     Common::Tap::Sink* admin_streamer)\n    : max_buffered_rx_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          proto_config.output_config(), max_buffered_rx_bytes, DefaultMaxBufferedBytes)),\n      max_buffered_tx_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          proto_config.output_config(), max_buffered_tx_bytes, DefaultMaxBufferedBytes)),\n      streaming_(proto_config.output_config().streaming()) {\n  ASSERT(proto_config.output_config().sinks().size() == 1);\n  // TODO(mattklein123): Add per-sink checks to make sure format makes sense. I.e., when using\n  // streaming, we should require the length delimited version of binary proto, etc.\n  sink_format_ = proto_config.output_config().sinks()[0].format();\n  switch (proto_config.output_config().sinks()[0].output_sink_type_case()) {\n  case envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kStreamingAdmin:\n    ASSERT(admin_streamer != nullptr, \"admin output must be configured via admin\");\n    // TODO(mattklein123): Graceful failure, error message, and test if someone specifies an\n    // admin stream output with the wrong format.\n    RELEASE_ASSERT(sink_format_ == envoy::config::tap::v3::OutputSink::JSON_BODY_AS_BYTES ||\n                       sink_format_ == envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING,\n                   \"admin output only supports JSON formats\");\n    sink_to_use_ = admin_streamer;\n    break;\n  case envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kFilePerTap:\n    sink_ =\n        std::make_unique<FilePerTapSink>(proto_config.output_config().sinks()[0].file_per_tap());\n    sink_to_use_ = sink_.get();\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  envoy::config::common::matcher::v3::MatchPredicate match;\n  if (proto_config.has_match()) {\n    // Use the match field whenever it is set.\n    match = proto_config.match();\n  } else if (proto_config.has_match_config()) {\n    // Fallback to use the deprecated match_config field and upgrade (wire cast) it to the new\n    // MatchPredicate which is backward compatible with the old MatchPredicate originally\n    // introduced in the Tap filter.\n    Config::VersionConverter::upgrade(proto_config.match_config(), match);\n  } else {\n    throw EnvoyException(fmt::format(\"Neither match nor match_config is set in TapConfig: {}\",\n                                     proto_config.DebugString()));\n  }\n  buildMatcher(match, matchers_);\n}\n\nconst Matcher& TapConfigBaseImpl::rootMatcher() const {\n  ASSERT(!matchers_.empty());\n  return *matchers_[0];\n}\n\nnamespace {\nvoid swapBytesToString(envoy::data::tap::v3::Body& body) {\n  body.set_allocated_as_string(body.release_as_bytes());\n}\n} // namespace\n\nvoid Utility::bodyBytesToString(envoy::data::tap::v3::TraceWrapper& trace,\n                                envoy::config::tap::v3::OutputSink::Format sink_format) {\n  // Swap the \"bytes\" string into the \"string\" string. This is done purely so that JSON\n  // serialization will serialize as a string vs. doing base64 encoding.\n  if (sink_format != envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING) {\n    return;\n  }\n\n  switch (trace.trace_case()) {\n  case envoy::data::tap::v3::TraceWrapper::TraceCase::kHttpBufferedTrace: {\n    auto* http_trace = trace.mutable_http_buffered_trace();\n    if (http_trace->has_request() && http_trace->request().has_body()) {\n      swapBytesToString(*http_trace->mutable_request()->mutable_body());\n    }\n    if (http_trace->has_response() && http_trace->response().has_body()) {\n      swapBytesToString(*http_trace->mutable_response()->mutable_body());\n    }\n    break;\n  }\n  case envoy::data::tap::v3::TraceWrapper::TraceCase::kHttpStreamedTraceSegment: {\n    auto* http_trace = trace.mutable_http_streamed_trace_segment();\n    if (http_trace->has_request_body_chunk()) {\n      swapBytesToString(*http_trace->mutable_request_body_chunk());\n    }\n    if (http_trace->has_response_body_chunk()) {\n      swapBytesToString(*http_trace->mutable_response_body_chunk());\n    }\n    break;\n  }\n  case envoy::data::tap::v3::TraceWrapper::TraceCase::kSocketBufferedTrace: {\n    auto* socket_trace = trace.mutable_socket_buffered_trace();\n    for (auto& event : *socket_trace->mutable_events()) {\n      if (event.has_read()) {\n        swapBytesToString(*event.mutable_read()->mutable_data());\n      } else {\n        ASSERT(event.has_write());\n        swapBytesToString(*event.mutable_write()->mutable_data());\n      }\n    }\n    break;\n  }\n  case envoy::data::tap::v3::TraceWrapper::TraceCase::kSocketStreamedTraceSegment: {\n    auto& event = *trace.mutable_socket_streamed_trace_segment()->mutable_event();\n    if (event.has_read()) {\n      swapBytesToString(*event.mutable_read()->mutable_data());\n    } else if (event.has_write()) {\n      swapBytesToString(*event.mutable_write()->mutable_data());\n    }\n    break;\n  }\n  case envoy::data::tap::v3::TraceWrapper::TraceCase::TRACE_NOT_SET:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid TapConfigBaseImpl::PerTapSinkHandleManagerImpl::submitTrace(TraceWrapperPtr&& trace) {\n  Utility::bodyBytesToString(*trace, parent_.sink_format_);\n  handle_->submitTrace(std::move(trace), parent_.sink_format_);\n}\n\nvoid FilePerTapSink::FilePerTapSinkHandle::submitTrace(\n    TraceWrapperPtr&& trace, envoy::config::tap::v3::OutputSink::Format format) {\n  if (!output_file_.is_open()) {\n    std::string path = fmt::format(\"{}_{}\", parent_.config_.path_prefix(), trace_id_);\n    switch (format) {\n    case envoy::config::tap::v3::OutputSink::PROTO_BINARY:\n      path += MessageUtil::FileExtensions::get().ProtoBinary;\n      break;\n    case envoy::config::tap::v3::OutputSink::PROTO_BINARY_LENGTH_DELIMITED:\n      path += MessageUtil::FileExtensions::get().ProtoBinaryLengthDelimited;\n      break;\n    case envoy::config::tap::v3::OutputSink::PROTO_TEXT:\n      path += MessageUtil::FileExtensions::get().ProtoText;\n      break;\n    case envoy::config::tap::v3::OutputSink::JSON_BODY_AS_BYTES:\n    case envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING:\n      path += MessageUtil::FileExtensions::get().Json;\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n\n    ENVOY_LOG_MISC(debug, \"Opening tap file for [id={}] to {}\", trace_id_, path);\n    // When reading and writing binary files, we need to be sure std::ios_base::binary\n    // is set, otherwise we will not get the expected results on Windows\n    output_file_.open(path, std::ios_base::binary);\n  }\n\n  ENVOY_LOG_MISC(trace, \"Tap for [id={}]: {}\", trace_id_, trace->DebugString());\n\n  switch (format) {\n  case envoy::config::tap::v3::OutputSink::PROTO_BINARY:\n    trace->SerializeToOstream(&output_file_);\n    break;\n  case envoy::config::tap::v3::OutputSink::PROTO_BINARY_LENGTH_DELIMITED: {\n    Protobuf::io::OstreamOutputStream stream(&output_file_);\n    Protobuf::io::CodedOutputStream coded_stream(&stream);\n    coded_stream.WriteVarint32(trace->ByteSize());\n    trace->SerializeWithCachedSizes(&coded_stream);\n    break;\n  }\n  case envoy::config::tap::v3::OutputSink::PROTO_TEXT:\n    output_file_ << trace->DebugString();\n    break;\n  case envoy::config::tap::v3::OutputSink::JSON_BODY_AS_BYTES:\n  case envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING:\n    output_file_ << MessageUtil::getJsonStringFromMessage(*trace, true, true);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/tap/tap_config_base.h",
    "content": "#pragma once\n\n#include <fstream>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n\n#include \"extensions/common/matcher/matcher.h\"\n#include \"extensions/common/tap/tap.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\nusing Matcher = Envoy::Extensions::Common::Matcher::Matcher;\nusing MatcherPtr = Envoy::Extensions::Common::Matcher::MatcherPtr;\n\n/**\n * Common utilities for tapping.\n */\nclass Utility {\npublic:\n  /**\n   * Add body data to a tapped body message, taking into account the maximum bytes to buffer.\n   * @param output_body supplies the body message to buffer to.\n   * @param max_buffered_bytes supplies the maximum bytes to store, if truncation occurs the\n   *        truncation flag will be set.\n   * @param data supplies the data to buffer.\n   * @param buffer_start_offset supplies the offset within data to start buffering.\n   * @param buffer_length_to_copy supplies the length of the data to buffer.\n   * @return whether the buffered data was truncated or not.\n   */\n  static bool addBufferToProtoBytes(envoy::data::tap::v3::Body& output_body,\n                                    uint32_t max_buffered_bytes, const Buffer::Instance& data,\n                                    uint32_t buffer_start_offset, uint32_t buffer_length_to_copy);\n\n  /**\n   * Swap body as bytes to body as string if necessary in a trace wrapper.\n   */\n  static void bodyBytesToString(envoy::data::tap::v3::TraceWrapper& trace,\n                                envoy::config::tap::v3::OutputSink::Format sink_format);\n\n  /**\n   * Trim a container that contains buffer raw slices so that the slices start at an offset and\n   * only contain a specific length. No slices are removed from the container, but their length\n   * may be reduced to 0.\n   * TODO(mattklein123): This is split out to ease testing and also because we should ultimately\n   * move this directly into the buffer API. I would rather wait until the new buffer code merges\n   * before we do that.\n   */\n  template <typename T> static void trimSlices(T& slices, uint32_t start_offset, uint32_t length) {\n    for (auto& slice : slices) {\n      const uint32_t start_offset_trim = std::min<uint32_t>(start_offset, slice.len_);\n      slice.len_ -= start_offset_trim;\n      start_offset -= start_offset_trim;\n      if (slice.mem_ != nullptr) {\n        slice.mem_ = static_cast<char*>(slice.mem_) + start_offset_trim;\n      }\n\n      const uint32_t final_length = std::min<uint32_t>(length, slice.len_);\n      slice.len_ = final_length;\n      length -= final_length;\n    }\n  }\n};\n\n/**\n * Base class for all tap configurations.\n * TODO(mattklein123): This class will handle common functionality such as rate limiting, etc.\n */\nclass TapConfigBaseImpl : public virtual TapConfig {\npublic:\n  // A wrapper for a per tap sink handle and trace submission. If in the future we support\n  // multiple sinks we can easily do it here.\n  class PerTapSinkHandleManagerImpl : public PerTapSinkHandleManager {\n  public:\n    PerTapSinkHandleManagerImpl(TapConfigBaseImpl& parent, uint64_t trace_id)\n        : parent_(parent), handle_(parent.sink_to_use_->createPerTapSinkHandle(trace_id)) {}\n\n    // PerTapSinkHandleManager\n    void submitTrace(TraceWrapperPtr&& trace) override;\n\n  private:\n    TapConfigBaseImpl& parent_;\n    PerTapSinkHandlePtr handle_;\n  };\n\n  // TapConfig\n  PerTapSinkHandleManagerPtr createPerTapSinkHandleManager(uint64_t trace_id) override {\n    return std::make_unique<PerTapSinkHandleManagerImpl>(*this, trace_id);\n  }\n  uint32_t maxBufferedRxBytes() const override { return max_buffered_rx_bytes_; }\n  uint32_t maxBufferedTxBytes() const override { return max_buffered_tx_bytes_; }\n  Matcher::MatchStatusVector createMatchStatusVector() const override {\n    return Matcher::MatchStatusVector(matchers_.size());\n  }\n  const Matcher& rootMatcher() const override;\n  bool streaming() const override { return streaming_; }\n\nprotected:\n  TapConfigBaseImpl(envoy::config::tap::v3::TapConfig&& proto_config,\n                    Common::Tap::Sink* admin_streamer);\n\nprivate:\n  // This is the default setting for both RX/TX max buffered bytes. (This means that per tap, the\n  // maximum amount that can be buffered is 2x this value).\n  static constexpr uint32_t DefaultMaxBufferedBytes = 1024;\n\n  const uint32_t max_buffered_rx_bytes_;\n  const uint32_t max_buffered_tx_bytes_;\n  const bool streaming_;\n  Sink* sink_to_use_;\n  SinkPtr sink_;\n  envoy::config::tap::v3::OutputSink::Format sink_format_;\n  std::vector<MatcherPtr> matchers_;\n};\n\n/**\n * A tap sink that writes each tap trace to a discrete output file.\n */\nclass FilePerTapSink : public Sink {\npublic:\n  FilePerTapSink(const envoy::config::tap::v3::FilePerTapSink& config) : config_(config) {}\n\n  // Sink\n  PerTapSinkHandlePtr createPerTapSinkHandle(uint64_t trace_id) override {\n    return std::make_unique<FilePerTapSinkHandle>(*this, trace_id);\n  }\n\nprivate:\n  struct FilePerTapSinkHandle : public PerTapSinkHandle {\n    FilePerTapSinkHandle(FilePerTapSink& parent, uint64_t trace_id)\n        : parent_(parent), trace_id_(trace_id) {}\n\n    // PerTapSinkHandle\n    void submitTrace(TraceWrapperPtr&& trace,\n                     envoy::config::tap::v3::OutputSink::Format format) override;\n\n    FilePerTapSink& parent_;\n    const uint64_t trace_id_;\n    std::ofstream output_file_;\n  };\n\n  const envoy::config::tap::v3::FilePerTapSink config_;\n};\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/utility.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/runtime/runtime.h\"\n\n#include \"common/common/documentation_url.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Utility {\n\n/**\n * ExtensionNameUtil provides utilities for extension names.\n */\nclass ExtensionNameUtil {\npublic:\n  enum class Status { Warn, Block };\n\n  /**\n   * Checks the status of deprecated extension names and increments the deprecated feature stats\n   * counter if deprecated names are allowed.\n   *\n   * @param runtime Runtime::Loader used to determine if deprecated extension names are allowed.\n   * @return Status::Warn (allowed, warn) or Status::Block (disallowed, error)\n   */\n  static Status deprecatedExtensionNameStatus(\n      Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting()) {\n#ifdef ENVOY_DISABLE_DEPRECATED_FEATURES\n    UNREFERENCED_PARAMETER(runtime);\n    return Status::Block;\n#else\n    bool warn_only = true;\n\n    if (runtime && !runtime->snapshot().deprecatedFeatureEnabled(\n                       \"envoy.deprecated_features.allow_deprecated_extension_names\", true)) {\n      warn_only = false;\n    }\n\n    return warn_only ? Status::Warn : Status::Block;\n#endif\n  }\n\n  /**\n   * Checks the status of deprecated extension names. If deprecated extension names are allowed,\n   * it increments the deprecated feature stats counter. Generates a warning or error log message\n   * based on whether the name is allowed (warning) or not (error). The string parameters are used\n   * only to generate the log message.\n   *\n   * @param extension_type absl::string_view that contains the extension type, for logging\n   * @param deprecated_name absl::string_view that contains the deprecated name, for logging\n   * @param canonical_name absl::string_view that contains the canonical name, for logging\n   * @param runtime Runtime::Loader used to determine if deprecated extension names are allowed.\n   * @return true if deprecated extensions are allowed, false otherwise.\n   */\n  static bool\n  allowDeprecatedExtensionName(absl::string_view extension_type, absl::string_view deprecated_name,\n                               absl::string_view canonical_name,\n                               Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting()) {\n    auto status = deprecatedExtensionNameStatus(runtime);\n\n    if (status == Status::Warn) {\n      ENVOY_LOG_MISC(warn, \"{}\", message(extension_type, deprecated_name, canonical_name));\n      return true;\n    }\n\n    ENVOY_LOG_MISC(error, \"{}\", fatalMessage(extension_type, deprecated_name, canonical_name));\n    return false;\n  }\n\n  /**\n   * Checks the status of deprecated extension names. If deprecated extension names are allowed,\n   * it increments the deprecated feature stats counter and generates a log message. If not allowed,\n   * an exception is thrown. The passed strings are used only to generate the log or exception\n   * message.\n   *\n   * @param extension_type absl::string_view that contains the extension type, for logging\n   * @param deprecated_name absl::string_view that contains the deprecated name, for logging\n   * @param canonical_name absl::string_view that contains the canonical name, for logging\n   * @param runtime Runtime::Loader used to determine if deprecated extension names are allowed.\n   * @throw EnvoyException if the use of deprecated extension names is not allowed.\n   */\n  static void\n  checkDeprecatedExtensionName(absl::string_view extension_type, absl::string_view deprecated_name,\n                               absl::string_view canonical_name,\n                               Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting()) {\n    auto status = deprecatedExtensionNameStatus(runtime);\n\n    if (status == Status::Warn) {\n      ENVOY_LOG_MISC(warn, \"{}\", message(extension_type, deprecated_name, canonical_name));\n      return;\n    }\n\n    ExceptionUtil::throwEnvoyException(\n        fatalMessage(extension_type, deprecated_name, canonical_name));\n  }\n\nprivate:\n  static std::string message(absl::string_view extension_type, absl::string_view deprecated_name,\n                             absl::string_view canonical_name) {\n    absl::string_view spacing = extension_type.empty() ? \"\" : \" \";\n\n    return fmt::format(\n        \"Using deprecated {}{}extension name '{}' for '{}'. This name will be removed from Envoy \"\n        \"soon. Please see {} for details.\",\n        extension_type, spacing, deprecated_name, canonical_name, ENVOY_DOC_URL_VERSION_HISTORY);\n  }\n\n  static std::string fatalMessage(absl::string_view extension_type,\n                                  absl::string_view deprecated_name,\n                                  absl::string_view canonical_name) {\n    std::string err = message(extension_type, deprecated_name, canonical_name);\n\n    const char fatal_error[] = \" If continued use of this filter name is absolutely necessary, \"\n                               \"see \" ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED \" for \"\n                               \"how to apply a temporary and highly discouraged override.\";\n\n    return err + fatal_error;\n  }\n};\n\n} // namespace Utility\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\n# NB: Used to break the circular dependency between wasm_lib and null_plugin_lib.\nenvoy_cc_library(\n    name = \"wasm_hdr\",\n    hdrs = [\n        \"context.h\",\n        \"wasm.h\",\n        \"wasm_extension.h\",\n        \"wasm_state.h\",\n        \"wasm_vm.h\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":well_known_names\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/server:lifecycle_notifier_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/version:version_includes\",\n        \"//source/extensions/filters/common/expr:evaluator_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@com_google_cel_cpp//eval/public:activation\",\n        \"@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto\",\n        \"@proxy_wasm_cpp_host//:include\",\n        \"@proxy_wasm_cpp_sdk//:common_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"wasm_interoperation_lib\",\n    srcs = [\n        \"wasm_state.cc\",\n    ],\n    hdrs = [\n        \"wasm_state.h\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/protobuf\",\n        \"//source/common/singleton:const_singleton\",\n        \"@com_github_google_flatbuffers//:flatbuffers\",\n        \"@com_google_cel_cpp//eval/public:cel_value\",\n        \"@com_google_cel_cpp//tools:flatbuffers_backed_impl\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"wasm_lib\",\n    srcs = [\n        \"context.cc\",\n        \"foreign.cc\",\n        \"wasm.cc\",\n        \"wasm_extension.cc\",\n        \"wasm_vm.cc\",\n    ],\n    copts = select({\n        \"//bazel:windows_x86_64\": [],  # TODO: fix the windows ANTLR build\n        \"//conditions:default\": [\n            \"-DWASM_USE_CEL_PARSER\",\n        ],\n    }),\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":wasm_hdr\",\n        \":wasm_interoperation_lib\",\n        \"//external:abseil_base\",\n        \"//external:abseil_node_hash_map\",\n        \"//include/envoy/server:lifecycle_notifier_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/config:remote_data_fetcher_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/common/wasm/ext:declare_property_cc_proto\",\n        \"//source/extensions/common/wasm/ext:envoy_null_vm_wasm_api\",\n        \"//source/extensions/filters/common/expr:context_lib\",\n        \"@com_google_cel_cpp//eval/eval:field_access\",\n        \"@com_google_cel_cpp//eval/eval:field_backed_list_impl\",\n        \"@com_google_cel_cpp//eval/eval:field_backed_map_impl\",\n        \"@com_google_cel_cpp//eval/public:builtin_func_registrar\",\n        \"@com_google_cel_cpp//eval/public:cel_expr_builder_factory\",\n        \"@com_google_cel_cpp//eval/public:cel_value\",\n        \"@com_google_cel_cpp//eval/public:value_export_util\",\n        \"@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto\",\n        \"@proxy_wasm_cpp_host//:lib\",\n    ] + select(\n        {\n            \"//bazel:windows_x86_64\": [],\n            \"//conditions:default\": [\n                \"@com_google_cel_cpp//parser\",\n            ],\n        },\n    ),\n)\n"
  },
  {
    "path": "source/extensions/common/wasm/context.cc",
    "content": "#include <algorithm>\n#include <cctype>\n#include <cstring>\n#include <ctime>\n#include <limits>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/sink.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/common/wasm/well_known_names.h\"\n#include \"extensions/filters/common/expr/context.h\"\n\n#include \"absl/base/casts.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"eval/eval/field_access.h\"\n#include \"eval/eval/field_backed_list_impl.h\"\n#include \"eval/eval/field_backed_map_impl.h\"\n#include \"eval/public/cel_value.h\"\n#include \"openssl/bytestring.h\"\n#include \"openssl/hmac.h\"\n#include \"openssl/sha.h\"\n\nusing proxy_wasm::MetricType;\nusing proxy_wasm::Word;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\nnamespace {\n\nusing HashPolicy = envoy::config::route::v3::RouteAction::HashPolicy;\n\nHttp::RequestTrailerMapPtr buildRequestTrailerMapFromPairs(const Pairs& pairs) {\n  auto map = Http::RequestTrailerMapImpl::create();\n  for (auto& p : pairs) {\n    // Note: because of the lack of a string_view interface for addCopy and\n    // the lack of an interface to add an entry with an empty value and return\n    // the entry, there is no efficient way to prevent either a double copy\n    // of the value or a double lookup of the entry.\n    map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second));\n  }\n  return map;\n}\n\nHttp::RequestHeaderMapPtr buildRequestHeaderMapFromPairs(const Pairs& pairs) {\n  auto map = Http::RequestHeaderMapImpl::create();\n  for (auto& p : pairs) {\n    // Note: because of the lack of a string_view interface for addCopy and\n    // the lack of an interface to add an entry with an empty value and return\n    // the entry, there is no efficient way to prevent either a double copy\n    // of the value or a double lookup of the entry.\n    map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second));\n  }\n  return map;\n}\n\ntemplate <typename P> static uint32_t headerSize(const P& p) { return p ? p->size() : 0; }\n\nconstexpr absl::string_view FailStreamResponseDetails = \"wasm_fail_stream\";\n\n} // namespace\n\n// Test support.\n\nsize_t Buffer::size() const {\n  if (const_buffer_instance_) {\n    return const_buffer_instance_->length();\n  }\n  return proxy_wasm::BufferBase::size();\n}\n\nWasmResult Buffer::copyTo(WasmBase* wasm, size_t start, size_t length, uint64_t ptr_ptr,\n                          uint64_t size_ptr) const {\n  if (const_buffer_instance_) {\n    uint64_t pointer;\n    auto p = wasm->allocMemory(length, &pointer);\n    if (!p) {\n      return WasmResult::InvalidMemoryAccess;\n    }\n    const_buffer_instance_->copyOut(start, length, p);\n    if (!wasm->wasm_vm()->setWord(ptr_ptr, Word(pointer))) {\n      return WasmResult::InvalidMemoryAccess;\n    }\n    if (!wasm->wasm_vm()->setWord(size_ptr, Word(length))) {\n      return WasmResult::InvalidMemoryAccess;\n    }\n    return WasmResult::Ok;\n  }\n  return proxy_wasm::BufferBase::copyTo(wasm, start, length, ptr_ptr, size_ptr);\n}\n\nWasmResult Buffer::copyFrom(size_t start, size_t length, absl::string_view data) {\n  if (buffer_instance_) {\n    if (start == 0) {\n      if (length == 0) {\n        buffer_instance_->prepend(data);\n        return WasmResult::Ok;\n      } else if (length >= buffer_instance_->length()) {\n        buffer_instance_->drain(buffer_instance_->length());\n        buffer_instance_->add(data);\n        return WasmResult::Ok;\n      } else {\n        return WasmResult::BadArgument;\n      }\n    } else if (start >= buffer_instance_->length()) {\n      buffer_instance_->add(data);\n      return WasmResult::Ok;\n    } else {\n      return WasmResult::BadArgument;\n    }\n  }\n  if (const_buffer_instance_) { // This buffer is immutable.\n    return WasmResult::BadArgument;\n  }\n  return proxy_wasm::BufferBase::copyFrom(start, length, data);\n}\n\nContext::Context() = default;\nContext::Context(Wasm* wasm) : ContextBase(wasm) {}\nContext::Context(Wasm* wasm, const PluginSharedPtr& plugin) : ContextBase(wasm, plugin) {\n  root_local_info_ = &std::static_pointer_cast<Plugin>(plugin)->local_info_;\n}\nContext::Context(Wasm* wasm, uint32_t root_context_id, const PluginSharedPtr& plugin)\n    : ContextBase(wasm, root_context_id, plugin) {}\n\nWasm* Context::wasm() const { return static_cast<Wasm*>(wasm_); }\nPlugin* Context::plugin() const { return static_cast<Plugin*>(plugin_.get()); }\nContext* Context::rootContext() const { return static_cast<Context*>(root_context()); }\nUpstream::ClusterManager& Context::clusterManager() const { return wasm()->clusterManager(); }\n\nvoid Context::error(absl::string_view message) { ENVOY_LOG(trace, message); }\n\nuint64_t Context::getCurrentTimeNanoseconds() {\n  return std::chrono::duration_cast<std::chrono::nanoseconds>(\n             wasm()->time_source_.systemTime().time_since_epoch())\n      .count();\n}\n\nvoid Context::onCloseTCP() {\n  if (tcp_connection_closed_ || !in_vm_context_created_) {\n    return;\n  }\n  tcp_connection_closed_ = true;\n  onDone();\n  onLog();\n  onDelete();\n}\n\nvoid Context::onResolveDns(uint32_t token, Envoy::Network::DnsResolver::ResolutionStatus status,\n                           std::list<Envoy::Network::DnsResponse>&& response) {\n  proxy_wasm::DeferAfterCallActions actions(this);\n  if (wasm()->isFailed() || !wasm()->on_resolve_dns_) {\n    return;\n  }\n  if (status != Network::DnsResolver::ResolutionStatus::Success) {\n    buffer_.set(\"\");\n    wasm()->on_resolve_dns_(this, id_, token, 0);\n    return;\n  }\n  // buffer format:\n  //    4 bytes number of entries = N\n  //    N * 4 bytes TTL for each entry\n  //    N * null-terminated addresses\n  uint32_t s = 4; // length\n  for (auto& e : response) {\n    s += 4;                                     // for TTL\n    s += e.address_->asStringView().size() + 1; // null terminated.\n  }\n  auto buffer = std::unique_ptr<char[]>(new char[s]);\n  char* b = buffer.get();\n  uint32_t n = response.size();\n  memcpy(b, &n, sizeof(uint32_t));\n  b += sizeof(uint32_t);\n  for (auto& e : response) {\n    uint32_t ttl = e.ttl_.count();\n    memcpy(b, &ttl, sizeof(uint32_t));\n    b += sizeof(uint32_t);\n  };\n  for (auto& e : response) {\n    memcpy(b, e.address_->asStringView().data(), e.address_->asStringView().size());\n    b += e.address_->asStringView().size();\n    *b++ = 0;\n  };\n  buffer_.set(std::move(buffer), s);\n  wasm()->on_resolve_dns_(this, id_, token, s);\n}\n\ntemplate <typename I> inline uint32_t align(uint32_t i) {\n  return (i + sizeof(I) - 1) & ~(sizeof(I) - 1);\n}\n\ntemplate <typename I> inline char* align(char* p) {\n  return reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(p) + sizeof(I) - 1) &\n                                 ~(sizeof(I) - 1));\n}\n\nvoid Context::onStatsUpdate(Envoy::Stats::MetricSnapshot& snapshot) {\n  proxy_wasm::DeferAfterCallActions actions(this);\n  if (wasm()->isFailed() || !wasm()->on_stats_update_) {\n    return;\n  }\n  // buffer format:\n  //  uint32 size of block of this type\n  //  uint32 type\n  //  uint32 count\n  //    uint32 length of name\n  //    name\n  //    8 byte alignment padding\n  //    8 bytes of absolute value\n  //    8 bytes of delta  (if appropriate, e.g. for counters)\n  //  uint32 size of block of this type\n\n  uint32_t counter_block_size = 3 * sizeof(uint32_t); // type of stat\n  uint32_t num_counters = snapshot.counters().size();\n  uint32_t counter_type = 1;\n\n  uint32_t gauge_block_size = 3 * sizeof(uint32_t); // type of stat\n  uint32_t num_gauges = snapshot.gauges().size();\n  uint32_t gauge_type = 2;\n\n  uint32_t n = 0;\n  uint64_t v = 0;\n\n  for (const auto& counter : snapshot.counters()) {\n    if (counter.counter_.get().used()) {\n      counter_block_size += sizeof(uint32_t) + counter.counter_.get().name().size();\n      counter_block_size = align<uint64_t>(counter_block_size + 2 * sizeof(uint64_t));\n    }\n  }\n\n  for (const auto& gauge : snapshot.gauges()) {\n    if (gauge.get().used()) {\n      gauge_block_size += sizeof(uint32_t) + gauge.get().name().size();\n      gauge_block_size += align<uint64_t>(gauge_block_size + sizeof(uint64_t));\n    }\n  }\n\n  auto buffer = std::unique_ptr<char[]>(new char[counter_block_size + gauge_block_size]);\n  char* b = buffer.get();\n\n  memcpy(b, &counter_block_size, sizeof(uint32_t));\n  b += sizeof(uint32_t);\n  memcpy(b, &counter_type, sizeof(uint32_t));\n  b += sizeof(uint32_t);\n  memcpy(b, &num_counters, sizeof(uint32_t));\n  b += sizeof(uint32_t);\n\n  for (const auto& counter : snapshot.counters()) {\n    if (counter.counter_.get().used()) {\n      n = counter.counter_.get().name().size();\n      memcpy(b, &n, sizeof(uint32_t));\n      b += sizeof(uint32_t);\n      memcpy(b, counter.counter_.get().name().data(), counter.counter_.get().name().size());\n      b = align<uint64_t>(b + counter.counter_.get().name().size());\n      v = counter.counter_.get().value();\n      memcpy(b, &v, sizeof(uint64_t));\n      b += sizeof(uint64_t);\n      v = counter.delta_;\n      memcpy(b, &v, sizeof(uint64_t));\n      b += sizeof(uint64_t);\n    }\n  }\n\n  memcpy(b, &gauge_block_size, sizeof(uint32_t));\n  b += sizeof(uint32_t);\n  memcpy(b, &gauge_type, sizeof(uint32_t));\n  b += sizeof(uint32_t);\n  memcpy(b, &num_gauges, sizeof(uint32_t));\n  b += sizeof(uint32_t);\n\n  for (const auto& gauge : snapshot.gauges()) {\n    if (gauge.get().used()) {\n      n = gauge.get().name().size();\n      memcpy(b, &n, sizeof(uint32_t));\n      b += sizeof(uint32_t);\n      memcpy(b, gauge.get().name().data(), gauge.get().name().size());\n      b = align<uint64_t>(b + gauge.get().name().size());\n      v = gauge.get().value();\n      memcpy(b, &v, sizeof(uint64_t));\n      b += sizeof(uint64_t);\n    }\n  }\n  buffer_.set(std::move(buffer), counter_block_size + gauge_block_size);\n  wasm()->on_stats_update_(this, id_, counter_block_size + gauge_block_size);\n}\n\n// Native serializer carrying over bit representation from CEL value to the extension.\n// This implementation assumes that the value type is static and known to the consumer.\nWasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result) {\n  using Filters::Common::Expr::CelValue;\n  int64_t out_int64;\n  uint64_t out_uint64;\n  double out_double;\n  bool out_bool;\n  const Protobuf::Message* out_message;\n  switch (value.type()) {\n  case CelValue::Type::kString:\n    result->assign(value.StringOrDie().value().data(), value.StringOrDie().value().size());\n    return WasmResult::Ok;\n  case CelValue::Type::kBytes:\n    result->assign(value.BytesOrDie().value().data(), value.BytesOrDie().value().size());\n    return WasmResult::Ok;\n  case CelValue::Type::kInt64:\n    out_int64 = value.Int64OrDie();\n    result->assign(reinterpret_cast<const char*>(&out_int64), sizeof(int64_t));\n    return WasmResult::Ok;\n  case CelValue::Type::kUint64:\n    out_uint64 = value.Uint64OrDie();\n    result->assign(reinterpret_cast<const char*>(&out_uint64), sizeof(uint64_t));\n    return WasmResult::Ok;\n  case CelValue::Type::kDouble:\n    out_double = value.DoubleOrDie();\n    result->assign(reinterpret_cast<const char*>(&out_double), sizeof(double));\n    return WasmResult::Ok;\n  case CelValue::Type::kBool:\n    out_bool = value.BoolOrDie();\n    result->assign(reinterpret_cast<const char*>(&out_bool), sizeof(bool));\n    return WasmResult::Ok;\n  case CelValue::Type::kDuration:\n    // Warning: loss of precision to nanoseconds\n    out_int64 = absl::ToInt64Nanoseconds(value.DurationOrDie());\n    result->assign(reinterpret_cast<const char*>(&out_int64), sizeof(int64_t));\n    return WasmResult::Ok;\n  case CelValue::Type::kTimestamp:\n    // Warning: loss of precision to nanoseconds\n    out_int64 = absl::ToUnixNanos(value.TimestampOrDie());\n    result->assign(reinterpret_cast<const char*>(&out_int64), sizeof(int64_t));\n    return WasmResult::Ok;\n  case CelValue::Type::kMessage:\n    out_message = value.MessageOrDie();\n    result->clear();\n    if (!out_message || out_message->SerializeToString(result)) {\n      return WasmResult::Ok;\n    }\n    return WasmResult::SerializationFailure;\n  case CelValue::Type::kMap: {\n    const auto& map = *value.MapOrDie();\n    const auto& keys = *map.ListKeys();\n    std::vector<std::pair<std::string, std::string>> pairs(map.size(), std::make_pair(\"\", \"\"));\n    for (auto i = 0; i < map.size(); i++) {\n      if (serializeValue(keys[i], &pairs[i].first) != WasmResult::Ok) {\n        return WasmResult::SerializationFailure;\n      }\n      if (serializeValue(map[keys[i]].value(), &pairs[i].second) != WasmResult::Ok) {\n        return WasmResult::SerializationFailure;\n      }\n    }\n    auto size = proxy_wasm::exports::pairsSize(pairs);\n    // prevent string inlining which violates byte alignment\n    result->resize(std::max(size, static_cast<size_t>(30)));\n    proxy_wasm::exports::marshalPairs(pairs, result->data());\n    result->resize(size);\n    return WasmResult::Ok;\n  }\n  case CelValue::Type::kList: {\n    const auto& list = *value.ListOrDie();\n    std::vector<std::pair<std::string, std::string>> pairs(list.size(), std::make_pair(\"\", \"\"));\n    for (auto i = 0; i < list.size(); i++) {\n      if (serializeValue(list[i], &pairs[i].first) != WasmResult::Ok) {\n        return WasmResult::SerializationFailure;\n      }\n    }\n    auto size = proxy_wasm::exports::pairsSize(pairs);\n    // prevent string inlining which violates byte alignment\n    if (size < 30) {\n      result->reserve(30);\n    }\n    result->resize(size);\n    proxy_wasm::exports::marshalPairs(pairs, result->data());\n    return WasmResult::Ok;\n  }\n  default:\n    break;\n  }\n  return WasmResult::SerializationFailure;\n}\n\n#define PROPERTY_TOKENS(_f)                                                                        \\\n  _f(METADATA) _f(REQUEST) _f(RESPONSE) _f(CONNECTION) _f(UPSTREAM) _f(NODE) _f(SOURCE)            \\\n      _f(DESTINATION) _f(LISTENER_DIRECTION) _f(LISTENER_METADATA) _f(CLUSTER_NAME)                \\\n          _f(CLUSTER_METADATA) _f(ROUTE_NAME) _f(ROUTE_METADATA) _f(PLUGIN_NAME)                   \\\n              _f(PLUGIN_ROOT_ID) _f(PLUGIN_VM_ID) _f(CONNECTION_ID) _f(FILTER_STATE)\n\nstatic inline std::string downCase(std::string s) {\n  std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { return std::tolower(c); });\n  return s;\n}\n\n#define _DECLARE(_t) _t,\nenum class PropertyToken { PROPERTY_TOKENS(_DECLARE) };\n#undef _DECLARE\n\n#define _PAIR(_t) {downCase(#_t), PropertyToken::_t},\nstatic absl::flat_hash_map<std::string, PropertyToken> property_tokens = {PROPERTY_TOKENS(_PAIR)};\n#undef _PAIR\n\nabsl::optional<google::api::expr::runtime::CelValue>\nContext::findValue(absl::string_view name, Protobuf::Arena* arena, bool last) const {\n  using google::api::expr::runtime::CelValue;\n\n  const StreamInfo::StreamInfo* info = getConstRequestStreamInfo();\n\n  // Convert into a dense token to enable a jump table implementation.\n  auto part_token = property_tokens.find(name);\n  if (part_token == property_tokens.end()) {\n    if (info) {\n      std::string key;\n      absl::StrAppend(&key, WasmStateKeyPrefix, name);\n      const WasmState* state;\n      if (info->filterState().hasData<WasmState>(key)) {\n        state = &info->filterState().getDataReadOnly<WasmState>(key);\n      } else if (info->upstreamFilterState() &&\n                 info->upstreamFilterState()->hasData<WasmState>(key)) {\n        state = &info->upstreamFilterState()->getDataReadOnly<WasmState>(key);\n      } else {\n        return {};\n      }\n      return state->exprValue(arena, last);\n    }\n    return {};\n  }\n\n  switch (part_token->second) {\n  case PropertyToken::METADATA:\n    if (info) {\n      return CelValue::CreateMessage(&info->dynamicMetadata(), arena);\n    }\n    break;\n  case PropertyToken::REQUEST:\n    if (info) {\n      return CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::RequestWrapper>(\n          arena, *arena, request_headers_ ? request_headers_ : access_log_request_headers_, *info));\n    }\n    break;\n  case PropertyToken::RESPONSE:\n    if (info) {\n      return CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::ResponseWrapper>(\n          arena, *arena, response_headers_ ? response_headers_ : access_log_response_headers_,\n          response_trailers_ ? response_trailers_ : access_log_response_trailers_, *info));\n    }\n    break;\n  case PropertyToken::CONNECTION:\n    if (info) {\n      return CelValue::CreateMap(\n          Protobuf::Arena::Create<Filters::Common::Expr::ConnectionWrapper>(arena, *info));\n    }\n    break;\n  case PropertyToken::CONNECTION_ID: {\n    auto conn = getConnection();\n    if (conn) {\n      return CelValue::CreateUint64(conn->id());\n    }\n    break;\n  }\n  case PropertyToken::UPSTREAM:\n    if (info) {\n      return CelValue::CreateMap(\n          Protobuf::Arena::Create<Filters::Common::Expr::UpstreamWrapper>(arena, *info));\n    }\n    break;\n  case PropertyToken::NODE:\n    if (root_local_info_) {\n      return CelValue::CreateMessage(&root_local_info_->node(), arena);\n    } else if (plugin_) {\n      return CelValue::CreateMessage(&plugin()->local_info_.node(), arena);\n    }\n    break;\n  case PropertyToken::SOURCE:\n    if (info) {\n      return CelValue::CreateMap(\n          Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(arena, *info, false));\n    }\n    break;\n  case PropertyToken::DESTINATION:\n    if (info) {\n      return CelValue::CreateMap(\n          Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(arena, *info, true));\n    }\n    break;\n  case PropertyToken::LISTENER_DIRECTION:\n    if (plugin_) {\n      return CelValue::CreateInt64(plugin()->direction_);\n    }\n    break;\n  case PropertyToken::LISTENER_METADATA:\n    if (plugin_) {\n      return CelValue::CreateMessage(plugin()->listener_metadata_, arena);\n    }\n    break;\n  case PropertyToken::CLUSTER_NAME:\n    if (info && info->upstreamHost()) {\n      return CelValue::CreateString(&info->upstreamHost()->cluster().name());\n    } else if (info && info->routeEntry()) {\n      return CelValue::CreateString(&info->routeEntry()->clusterName());\n    } else if (info && info->upstreamClusterInfo().has_value() &&\n               info->upstreamClusterInfo().value()) {\n      return CelValue::CreateString(&info->upstreamClusterInfo().value()->name());\n    }\n    break;\n  case PropertyToken::CLUSTER_METADATA:\n    if (info && info->upstreamHost()) {\n      return CelValue::CreateMessage(&info->upstreamHost()->cluster().metadata(), arena);\n    }\n    break;\n  case PropertyToken::ROUTE_NAME:\n    if (info) {\n      return CelValue::CreateString(&info->getRouteName());\n    }\n    break;\n  case PropertyToken::ROUTE_METADATA:\n    if (info && info->routeEntry()) {\n      return CelValue::CreateMessage(&info->routeEntry()->metadata(), arena);\n    }\n    break;\n  case PropertyToken::PLUGIN_NAME:\n    if (plugin_) {\n      return CelValue::CreateStringView(plugin()->name_);\n    }\n    break;\n  case PropertyToken::PLUGIN_ROOT_ID:\n    return CelValue::CreateStringView(root_id());\n  case PropertyToken::PLUGIN_VM_ID:\n    return CelValue::CreateStringView(wasm()->vm_id());\n  case PropertyToken::FILTER_STATE:\n    return Protobuf::Arena::Create<Filters::Common::Expr::FilterStateWrapper>(arena,\n                                                                              info->filterState())\n        ->Produce(arena);\n  }\n  return {};\n}\n\nWasmResult Context::getProperty(absl::string_view path, std::string* result) {\n  using google::api::expr::runtime::CelValue;\n\n  bool first = true;\n  CelValue value;\n  Protobuf::Arena arena;\n\n  size_t start = 0;\n  while (true) {\n    if (start >= path.size()) {\n      break;\n    }\n\n    size_t end = path.find('\\0', start);\n    if (end == absl::string_view::npos) {\n      end = start + path.size();\n    }\n    auto part = path.substr(start, end - start);\n    start = end + 1;\n\n    if (first) {\n      // top-level identifier\n      first = false;\n      auto top_value = findValue(part, &arena, start >= path.size());\n      if (!top_value.has_value()) {\n        return WasmResult::NotFound;\n      }\n      value = top_value.value();\n    } else if (value.IsMap()) {\n      auto& map = *value.MapOrDie();\n      auto field = map[CelValue::CreateStringView(part)];\n      if (!field.has_value()) {\n        return WasmResult::NotFound;\n      }\n      value = field.value();\n    } else if (value.IsMessage()) {\n      auto msg = value.MessageOrDie();\n      if (msg == nullptr) {\n        return WasmResult::NotFound;\n      }\n      const Protobuf::Descriptor* desc = msg->GetDescriptor();\n      const Protobuf::FieldDescriptor* field_desc = desc->FindFieldByName(std::string(part));\n      if (field_desc == nullptr) {\n        return WasmResult::NotFound;\n      }\n      if (field_desc->is_map()) {\n        value = CelValue::CreateMap(\n            Protobuf::Arena::Create<google::api::expr::runtime::FieldBackedMapImpl>(\n                &arena, msg, field_desc, &arena));\n      } else if (field_desc->is_repeated()) {\n        value = CelValue::CreateList(\n            Protobuf::Arena::Create<google::api::expr::runtime::FieldBackedListImpl>(\n                &arena, msg, field_desc, &arena));\n      } else {\n        auto status =\n            google::api::expr::runtime::CreateValueFromSingleField(msg, field_desc, &arena, &value);\n        if (!status.ok()) {\n          return WasmResult::InternalFailure;\n        }\n      }\n    } else {\n      return WasmResult::NotFound;\n    }\n  }\n\n  return serializeValue(value, result);\n}\n\n// Header/Trailer/Metadata Maps.\nHttp::HeaderMap* Context::getMap(WasmHeaderMapType type) {\n  switch (type) {\n  case WasmHeaderMapType::RequestHeaders:\n    return request_headers_;\n  case WasmHeaderMapType::RequestTrailers:\n    return request_trailers_;\n  case WasmHeaderMapType::ResponseHeaders:\n    return response_headers_;\n  case WasmHeaderMapType::ResponseTrailers:\n    return response_trailers_;\n  default:\n    return nullptr;\n  }\n}\n\nconst Http::HeaderMap* Context::getConstMap(WasmHeaderMapType type) {\n  switch (type) {\n  case WasmHeaderMapType::RequestHeaders:\n    if (access_log_request_headers_) {\n      return access_log_request_headers_;\n    }\n    return request_headers_;\n  case WasmHeaderMapType::RequestTrailers:\n    return request_trailers_;\n  case WasmHeaderMapType::ResponseHeaders:\n    if (access_log_response_headers_) {\n      return access_log_response_headers_;\n    }\n    return response_headers_;\n  case WasmHeaderMapType::ResponseTrailers:\n    if (access_log_response_trailers_) {\n      return access_log_response_trailers_;\n    }\n    return response_trailers_;\n  case WasmHeaderMapType::GrpcReceiveInitialMetadata:\n    return rootContext()->grpc_receive_initial_metadata_.get();\n  case WasmHeaderMapType::GrpcReceiveTrailingMetadata:\n    return rootContext()->grpc_receive_trailing_metadata_.get();\n  case WasmHeaderMapType::HttpCallResponseHeaders: {\n    Envoy::Http::ResponseMessagePtr* response = rootContext()->http_call_response_;\n    if (response) {\n      return &(*response)->headers();\n    }\n    return nullptr;\n  }\n  case WasmHeaderMapType::HttpCallResponseTrailers: {\n    Envoy::Http::ResponseMessagePtr* response = rootContext()->http_call_response_;\n    if (response) {\n      return (*response)->trailers();\n    }\n    return nullptr;\n  }\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nWasmResult Context::addHeaderMapValue(WasmHeaderMapType type, absl::string_view key,\n                                      absl::string_view value) {\n  auto map = getMap(type);\n  if (!map) {\n    return WasmResult::BadArgument;\n  }\n  const Http::LowerCaseString lower_key{std::string(key)};\n  map->addCopy(lower_key, std::string(value));\n  return WasmResult::Ok;\n}\n\nWasmResult Context::getHeaderMapValue(WasmHeaderMapType type, absl::string_view key,\n                                      absl::string_view* value) {\n  auto map = getConstMap(type);\n  if (!map) {\n    return WasmResult::BadArgument;\n  }\n  const Http::LowerCaseString lower_key{std::string(key)};\n  auto entry = map->get(lower_key);\n  if (!entry) {\n    if (wasm()->abiVersion() == proxy_wasm::AbiVersion::ProxyWasm_0_1_0) {\n      *value = \"\";\n      return WasmResult::Ok;\n    } else {\n      return WasmResult::NotFound;\n    }\n  }\n  *value = entry->value().getStringView();\n  return WasmResult::Ok;\n}\n\nPairs headerMapToPairs(const Http::HeaderMap* map) {\n  if (!map) {\n    return {};\n  }\n  Pairs pairs;\n  pairs.reserve(map->size());\n  map->iterate([&pairs](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    pairs.push_back(std::make_pair(header.key().getStringView(), header.value().getStringView()));\n    return Http::HeaderMap::Iterate::Continue;\n  });\n  return pairs;\n}\n\nWasmResult Context::getHeaderMapPairs(WasmHeaderMapType type, Pairs* result) {\n  *result = headerMapToPairs(getConstMap(type));\n  return WasmResult::Ok;\n}\n\nWasmResult Context::setHeaderMapPairs(WasmHeaderMapType type, const Pairs& pairs) {\n  auto map = getMap(type);\n  if (!map) {\n    return WasmResult::BadArgument;\n  }\n  std::vector<std::string> keys;\n  map->iterate([&keys](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    keys.push_back(std::string(header.key().getStringView()));\n    return Http::HeaderMap::Iterate::Continue;\n  });\n  for (auto& k : keys) {\n    const Http::LowerCaseString lower_key{k};\n    map->remove(lower_key);\n  }\n  for (auto& p : pairs) {\n    const Http::LowerCaseString lower_key{std::string(p.first)};\n    map->addCopy(lower_key, std::string(p.second));\n  }\n  return WasmResult::Ok;\n}\n\nWasmResult Context::removeHeaderMapValue(WasmHeaderMapType type, absl::string_view key) {\n  auto map = getMap(type);\n  if (!map) {\n    return WasmResult::BadArgument;\n  }\n  const Http::LowerCaseString lower_key{std::string(key)};\n  map->remove(lower_key);\n  return WasmResult::Ok;\n}\n\nWasmResult Context::replaceHeaderMapValue(WasmHeaderMapType type, absl::string_view key,\n                                          absl::string_view value) {\n  auto map = getMap(type);\n  if (!map) {\n    return WasmResult::BadArgument;\n  }\n  const Http::LowerCaseString lower_key{std::string(key)};\n  map->setCopy(lower_key, value);\n  return WasmResult::Ok;\n}\n\nWasmResult Context::getHeaderMapSize(WasmHeaderMapType type, uint32_t* result) {\n  auto map = getMap(type);\n  if (!map) {\n    return WasmResult::BadArgument;\n  }\n  *result = map->byteSize();\n  return WasmResult::Ok;\n}\n\n// Buffer\n\nBufferInterface* Context::getBuffer(WasmBufferType type) {\n  Envoy::Http::ResponseMessagePtr* response = nullptr;\n  switch (type) {\n  case WasmBufferType::CallData:\n    // Set before the call.\n    return &buffer_;\n  case WasmBufferType::VmConfiguration:\n    return buffer_.set(wasm()->vm_configuration());\n  case WasmBufferType::PluginConfiguration:\n    if (plugin_) {\n      return buffer_.set(plugin_->plugin_configuration_);\n    }\n    return nullptr;\n  case WasmBufferType::HttpRequestBody:\n    if (buffering_request_body_) {\n      // We need the mutable version, so capture it using a callback.\n      // TODO: consider adding a mutableDecodingBuffer() interface.\n      ::Envoy::Buffer::Instance* buffer_instance{};\n      decoder_callbacks_->modifyDecodingBuffer(\n          [&buffer_instance](::Envoy::Buffer::Instance& buffer) { buffer_instance = &buffer; });\n      return buffer_.set(buffer_instance);\n    }\n    return buffer_.set(request_body_buffer_);\n  case WasmBufferType::HttpResponseBody:\n    if (buffering_response_body_) {\n      // TODO: consider adding a mutableDecodingBuffer() interface.\n      ::Envoy::Buffer::Instance* buffer_instance{};\n      encoder_callbacks_->modifyEncodingBuffer(\n          [&buffer_instance](::Envoy::Buffer::Instance& buffer) { buffer_instance = &buffer; });\n      return buffer_.set(buffer_instance);\n    }\n    return buffer_.set(response_body_buffer_);\n  case WasmBufferType::NetworkDownstreamData:\n    return buffer_.set(network_downstream_data_buffer_);\n  case WasmBufferType::NetworkUpstreamData:\n    return buffer_.set(network_upstream_data_buffer_);\n  case WasmBufferType::HttpCallResponseBody:\n    response = rootContext()->http_call_response_;\n    if (response) {\n      auto& body = (*response)->body();\n      return buffer_.set(absl::string_view(static_cast<const char*>(body.linearize(body.length())),\n                                           body.length()));\n    }\n    return nullptr;\n  case WasmBufferType::GrpcReceiveBuffer:\n    return buffer_.set(rootContext()->grpc_receive_buffer_.get());\n  default:\n    return nullptr;\n  }\n}\n\nvoid Context::onDownstreamConnectionClose(CloseType close_type) {\n  ContextBase::onDownstreamConnectionClose(close_type);\n  downstream_closed_ = true;\n  // Call close on TCP connection, if upstream connection closed or there was a failure seen in\n  // this connection.\n  if (upstream_closed_ || getRequestStreamInfo()->hasAnyResponseFlag()) {\n    onCloseTCP();\n  }\n}\n\nvoid Context::onUpstreamConnectionClose(CloseType close_type) {\n  ContextBase::onUpstreamConnectionClose(close_type);\n  upstream_closed_ = true;\n  if (downstream_closed_) {\n    onCloseTCP();\n  }\n}\n\nuint32_t Context::nextHttpCallToken() {\n  uint32_t token = next_http_call_token_++;\n  // Handle rollover.\n  for (;;) {\n    if (token == 0) {\n      token = next_http_call_token_++;\n    }\n    if (!http_request_.count(token)) {\n      break;\n    }\n    token = next_http_call_token_++;\n  }\n  return token;\n}\n\n// Async call via HTTP\nWasmResult Context::httpCall(absl::string_view cluster, const Pairs& request_headers,\n                             absl::string_view request_body, const Pairs& request_trailers,\n                             int timeout_milliseconds, uint32_t* token_ptr) {\n  if (timeout_milliseconds < 0) {\n    return WasmResult::BadArgument;\n  }\n  auto cluster_string = std::string(cluster);\n  if (clusterManager().get(cluster_string) == nullptr) {\n    return WasmResult::BadArgument;\n  }\n\n  Http::RequestMessagePtr message(\n      new Http::RequestMessageImpl(buildRequestHeaderMapFromPairs(request_headers)));\n\n  // Check that we were provided certain headers.\n  if (message->headers().Path() == nullptr || message->headers().Method() == nullptr ||\n      message->headers().Host() == nullptr) {\n    return WasmResult::BadArgument;\n  }\n\n  if (!request_body.empty()) {\n    message->body().add(request_body);\n    message->headers().setContentLength(request_body.size());\n  }\n\n  if (!request_trailers.empty()) {\n    message->trailers(buildRequestTrailerMapFromPairs(request_trailers));\n  }\n\n  absl::optional<std::chrono::milliseconds> timeout;\n  if (timeout_milliseconds > 0) {\n    timeout = std::chrono::milliseconds(timeout_milliseconds);\n  }\n\n  uint32_t token = nextHttpCallToken();\n  auto& handler = http_request_[token];\n\n  // set default hash policy to be based on :authority to enable consistent hash\n  Http::AsyncClient::RequestOptions options;\n  options.setTimeout(timeout);\n  Protobuf::RepeatedPtrField<HashPolicy> hash_policy;\n  hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get());\n  options.setHashPolicy(hash_policy);\n  auto http_request = clusterManager()\n                          .httpAsyncClientForCluster(cluster_string)\n                          .send(std::move(message), handler, options);\n  if (!http_request) {\n    http_request_.erase(token);\n    return WasmResult::InternalFailure;\n  }\n  handler.context_ = this;\n  handler.token_ = token;\n  handler.request_ = http_request;\n  *token_ptr = token;\n  return WasmResult::Ok;\n}\n\nuint32_t Context::nextGrpcCallToken() {\n  uint32_t token = next_grpc_token_++;\n  if (isGrpcStreamToken(token)) {\n    token = next_grpc_token_++;\n  }\n  // Handle rollover. Note: token is always odd.\n  for (;;) {\n    if (!grpc_call_request_.count(token)) {\n      break;\n    }\n    next_grpc_token_++; // Skip stream token.\n    token = next_grpc_token_++;\n  }\n  return token;\n}\n\nWasmResult Context::grpcCall(absl::string_view grpc_service, absl::string_view service_name,\n                             absl::string_view method_name, const Pairs& initial_metadata,\n                             absl::string_view request, std::chrono::milliseconds timeout,\n                             uint32_t* token_ptr) {\n  GrpcService service_proto;\n  if (!service_proto.ParseFromArray(grpc_service.data(), grpc_service.size())) {\n    return WasmResult::ParseFailure;\n  }\n  uint32_t token = nextGrpcCallToken();\n  auto& handler = grpc_call_request_[token];\n  handler.context_ = this;\n  handler.token_ = token;\n  auto grpc_client =\n      clusterManager()\n          .grpcAsyncClientManager()\n          .factoryForGrpcService(service_proto, *wasm()->scope_, true /* skip_cluster_check */)\n          ->create();\n  grpc_initial_metadata_ = buildRequestHeaderMapFromPairs(initial_metadata);\n\n  // set default hash policy to be based on :authority to enable consistent hash\n  Http::AsyncClient::RequestOptions options;\n  options.setTimeout(timeout);\n  Protobuf::RepeatedPtrField<HashPolicy> hash_policy;\n  hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get());\n  options.setHashPolicy(hash_policy);\n\n  auto grpc_request = grpc_client->sendRaw(service_name, method_name,\n                                           std::make_unique<::Envoy::Buffer::OwnedImpl>(request),\n                                           handler, Tracing::NullSpan::instance(), options);\n  if (!grpc_request) {\n    grpc_call_request_.erase(token);\n    return WasmResult::InternalFailure;\n  }\n  handler.client_ = std::move(grpc_client);\n  handler.request_ = grpc_request;\n  *token_ptr = token;\n  return WasmResult::Ok;\n}\n\nuint32_t Context::nextGrpcStreamToken() {\n  uint32_t token = next_grpc_token_++;\n  if (isGrpcCallToken(token)) {\n    token = next_grpc_token_++;\n  }\n  // Handle rollover. Note: token is always even.\n  for (;;) {\n    if (token == 0) {\n      next_grpc_token_++; // Skip call token.\n      token = next_grpc_token_++;\n    }\n    if (!grpc_stream_.count(token)) {\n      break;\n    }\n    next_grpc_token_++; // Skip call token.\n    token = next_grpc_token_++;\n  }\n  return token;\n}\n\nWasmResult Context::grpcStream(absl::string_view grpc_service, absl::string_view service_name,\n                               absl::string_view method_name, const Pairs& initial_metadata,\n                               uint32_t* token_ptr) {\n  GrpcService service_proto;\n  if (!service_proto.ParseFromArray(grpc_service.data(), grpc_service.size())) {\n    return WasmResult::ParseFailure;\n  }\n  uint32_t token = nextGrpcStreamToken();\n  auto& handler = grpc_stream_[token];\n  handler.context_ = this;\n  handler.token_ = token;\n  auto grpc_client =\n      clusterManager()\n          .grpcAsyncClientManager()\n          .factoryForGrpcService(service_proto, *wasm()->scope_, true /* skip_cluster_check */)\n          ->create();\n  grpc_initial_metadata_ = buildRequestHeaderMapFromPairs(initial_metadata);\n\n  // set default hash policy to be based on :authority to enable consistent hash\n  Http::AsyncClient::StreamOptions options;\n  Protobuf::RepeatedPtrField<HashPolicy> hash_policy;\n  hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get());\n  options.setHashPolicy(hash_policy);\n\n  auto grpc_stream = grpc_client->startRaw(service_name, method_name, handler, options);\n  if (!grpc_stream) {\n    grpc_stream_.erase(token);\n    return WasmResult::InternalFailure;\n  }\n  handler.client_ = std::move(grpc_client);\n  handler.stream_ = grpc_stream;\n  *token_ptr = token;\n  return WasmResult::Ok;\n}\n\n// NB: this is currently called inline, so the token is known to be that of the currently\n// executing grpcCall or grpcStream.\nvoid Context::onGrpcCreateInitialMetadata(uint32_t /* token */,\n                                          Http::RequestHeaderMap& initial_metadata) {\n  if (grpc_initial_metadata_) {\n    initial_metadata = std::move(*grpc_initial_metadata_);\n    grpc_initial_metadata_.reset();\n  }\n}\n\n// StreamInfo\nconst StreamInfo::StreamInfo* Context::getConstRequestStreamInfo() const {\n  if (encoder_callbacks_) {\n    return &encoder_callbacks_->streamInfo();\n  } else if (decoder_callbacks_) {\n    return &decoder_callbacks_->streamInfo();\n  } else if (access_log_stream_info_) {\n    return access_log_stream_info_;\n  } else if (network_read_filter_callbacks_) {\n    return &network_read_filter_callbacks_->connection().streamInfo();\n  } else if (network_write_filter_callbacks_) {\n    return &network_write_filter_callbacks_->connection().streamInfo();\n  }\n  return nullptr;\n}\n\nStreamInfo::StreamInfo* Context::getRequestStreamInfo() const {\n  if (encoder_callbacks_) {\n    return &encoder_callbacks_->streamInfo();\n  } else if (decoder_callbacks_) {\n    return &decoder_callbacks_->streamInfo();\n  } else if (network_read_filter_callbacks_) {\n    return &network_read_filter_callbacks_->connection().streamInfo();\n  } else if (network_write_filter_callbacks_) {\n    return &network_write_filter_callbacks_->connection().streamInfo();\n  }\n  return nullptr;\n}\n\nconst Network::Connection* Context::getConnection() const {\n  if (encoder_callbacks_) {\n    return encoder_callbacks_->connection();\n  } else if (decoder_callbacks_) {\n    return decoder_callbacks_->connection();\n  } else if (network_read_filter_callbacks_) {\n    return &network_read_filter_callbacks_->connection();\n  } else if (network_write_filter_callbacks_) {\n    return &network_write_filter_callbacks_->connection();\n  }\n  return nullptr;\n}\n\nWasmResult Context::setProperty(absl::string_view path, absl::string_view value) {\n  auto* stream_info = getRequestStreamInfo();\n  if (!stream_info) {\n    return WasmResult::NotFound;\n  }\n  std::string key;\n  absl::StrAppend(&key, WasmStateKeyPrefix, path);\n  WasmState* state;\n  if (stream_info->filterState()->hasData<WasmState>(key)) {\n    state = &stream_info->filterState()->getDataMutable<WasmState>(key);\n  } else {\n    const auto& it = rootContext()->state_prototypes_.find(path);\n    const WasmStatePrototype& prototype = it == rootContext()->state_prototypes_.end()\n                                              ? DefaultWasmStatePrototype::get()\n                                              : *it->second.get(); // NOLINT\n    auto state_ptr = std::make_unique<WasmState>(prototype);\n    state = state_ptr.get();\n    stream_info->filterState()->setData(key, std::move(state_ptr),\n                                        StreamInfo::FilterState::StateType::Mutable,\n                                        prototype.life_span_);\n  }\n  if (!state->setValue(value)) {\n    return WasmResult::BadArgument;\n  }\n  return WasmResult::Ok;\n}\n\nWasmResult Context::declareProperty(absl::string_view path,\n                                    std::unique_ptr<const WasmStatePrototype> state_prototype) {\n  // Do not delete existing schema since it can be referenced by state objects.\n  if (state_prototypes_.find(path) == state_prototypes_.end()) {\n    state_prototypes_[path] = std::move(state_prototype);\n    return WasmResult::Ok;\n  }\n  return WasmResult::BadArgument;\n}\n\nWasmResult Context::log(uint32_t level, absl::string_view message) {\n  switch (static_cast<spdlog::level::level_enum>(level)) {\n  case spdlog::level::trace:\n    ENVOY_LOG(trace, \"wasm log{}: {}\", log_prefix(), message);\n    return WasmResult::Ok;\n  case spdlog::level::debug:\n    ENVOY_LOG(debug, \"wasm log{}: {}\", log_prefix(), message);\n    return WasmResult::Ok;\n  case spdlog::level::info:\n    ENVOY_LOG(info, \"wasm log{}: {}\", log_prefix(), message);\n    return WasmResult::Ok;\n  case spdlog::level::warn:\n    ENVOY_LOG(warn, \"wasm log{}: {}\", log_prefix(), message);\n    return WasmResult::Ok;\n  case spdlog::level::err:\n    ENVOY_LOG(error, \"wasm log{}: {}\", log_prefix(), message);\n    return WasmResult::Ok;\n  case spdlog::level::critical:\n    ENVOY_LOG(critical, \"wasm log{}: {}\", log_prefix(), message);\n    return WasmResult::Ok;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nuint32_t Context::getLogLevel() {\n  // Like the \"log\" call above, assume that spdlog level as an int\n  // matches the enum in the SDK\n  return static_cast<uint32_t>(ENVOY_LOGGER().level());\n}\n\n//\n// Calls into the Wasm code.\n//\nbool Context::validateConfiguration(absl::string_view configuration,\n                                    const std::shared_ptr<PluginBase>& plugin_base) {\n  auto plugin = std::static_pointer_cast<Plugin>(plugin_base);\n  if (!wasm()->validate_configuration_) {\n    return true;\n  }\n  plugin_ = plugin_base;\n  auto result =\n      wasm()\n          ->validate_configuration_(this, id_, static_cast<uint32_t>(configuration.size()))\n          .u64_ != 0;\n  plugin_.reset();\n  return result;\n}\n\nabsl::string_view Context::getConfiguration() {\n  if (plugin_) {\n    return plugin_->plugin_configuration_;\n  } else {\n    return wasm()->vm_configuration();\n  }\n};\n\nstd::pair<uint32_t, absl::string_view> Context::getStatus() {\n  return std::make_pair(status_code_, status_message_);\n}\n\nvoid Context::onGrpcReceiveInitialMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata) {\n  grpc_receive_initial_metadata_ = std::move(metadata);\n  onGrpcReceiveInitialMetadata(token, headerSize(grpc_receive_initial_metadata_));\n  grpc_receive_initial_metadata_ = nullptr;\n}\n\nvoid Context::onGrpcReceiveTrailingMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata) {\n  grpc_receive_trailing_metadata_ = std::move(metadata);\n  onGrpcReceiveTrailingMetadata(token, headerSize(grpc_receive_trailing_metadata_));\n  grpc_receive_trailing_metadata_ = nullptr;\n}\n\nWasmResult Context::defineMetric(uint32_t metric_type, absl::string_view name,\n                                 uint32_t* metric_id_ptr) {\n  if (metric_type > static_cast<uint32_t>(MetricType::Max)) {\n    return WasmResult::BadArgument;\n  }\n  auto type = static_cast<MetricType>(metric_type);\n  // TODO: Consider rethinking the scoping policy as it does not help in this case.\n  Stats::StatNameManagedStorage storage(name, wasm()->scope_->symbolTable());\n  Stats::StatName stat_name = storage.statName();\n  if (type == MetricType::Counter) {\n    auto id = wasm()->nextCounterMetricId();\n    auto c = &wasm()->scope_->counterFromStatName(stat_name);\n    wasm()->counters_.emplace(id, c);\n    *metric_id_ptr = id;\n    return WasmResult::Ok;\n  }\n  if (type == MetricType::Gauge) {\n    auto id = wasm()->nextGaugeMetricId();\n    auto g = &wasm()->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate);\n    wasm()->gauges_.emplace(id, g);\n    *metric_id_ptr = id;\n    return WasmResult::Ok;\n  }\n  // (type == MetricType::Histogram) {\n  auto id = wasm()->nextHistogramMetricId();\n  auto h = &wasm()->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified);\n  wasm()->histograms_.emplace(id, h);\n  *metric_id_ptr = id;\n  return WasmResult::Ok;\n}\n\nWasmResult Context::incrementMetric(uint32_t metric_id, int64_t offset) {\n  auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask);\n  if (type == MetricType::Counter) {\n    auto it = wasm()->counters_.find(metric_id);\n    if (it != wasm()->counters_.end()) {\n      if (offset > 0) {\n        it->second->add(offset);\n        return WasmResult::Ok;\n      } else {\n        return WasmResult::BadArgument;\n      }\n    }\n    return WasmResult::NotFound;\n  } else if (type == MetricType::Gauge) {\n    auto it = wasm()->gauges_.find(metric_id);\n    if (it != wasm()->gauges_.end()) {\n      if (offset > 0) {\n        it->second->add(offset);\n        return WasmResult::Ok;\n      } else {\n        it->second->sub(-offset);\n        return WasmResult::Ok;\n      }\n    }\n    return WasmResult::NotFound;\n  }\n  return WasmResult::BadArgument;\n}\n\nWasmResult Context::recordMetric(uint32_t metric_id, uint64_t value) {\n  auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask);\n  if (type == MetricType::Counter) {\n    auto it = wasm()->counters_.find(metric_id);\n    if (it != wasm()->counters_.end()) {\n      it->second->add(value);\n      return WasmResult::Ok;\n    }\n  } else if (type == MetricType::Gauge) {\n    auto it = wasm()->gauges_.find(metric_id);\n    if (it != wasm()->gauges_.end()) {\n      it->second->set(value);\n      return WasmResult::Ok;\n    }\n  } else if (type == MetricType::Histogram) {\n    auto it = wasm()->histograms_.find(metric_id);\n    if (it != wasm()->histograms_.end()) {\n      it->second->recordValue(value);\n      return WasmResult::Ok;\n    }\n  }\n  return WasmResult::NotFound;\n}\n\nWasmResult Context::getMetric(uint32_t metric_id, uint64_t* result_uint64_ptr) {\n  auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask);\n  if (type == MetricType::Counter) {\n    auto it = wasm()->counters_.find(metric_id);\n    if (it != wasm()->counters_.end()) {\n      *result_uint64_ptr = it->second->value();\n      return WasmResult::Ok;\n    }\n    return WasmResult::NotFound;\n  } else if (type == MetricType::Gauge) {\n    auto it = wasm()->gauges_.find(metric_id);\n    if (it != wasm()->gauges_.end()) {\n      *result_uint64_ptr = it->second->value();\n      return WasmResult::Ok;\n    }\n    return WasmResult::NotFound;\n  }\n  return WasmResult::BadArgument;\n}\n\nContext::~Context() {\n  // Cancel any outstanding requests.\n  for (auto& p : http_request_) {\n    p.second.request_->cancel();\n  }\n  for (auto& p : grpc_call_request_) {\n    p.second.request_->cancel();\n  }\n  for (auto& p : grpc_stream_) {\n    p.second.stream_->resetStream();\n  }\n}\n\nNetwork::FilterStatus convertNetworkFilterStatus(proxy_wasm::FilterStatus status) {\n  switch (status) {\n  default:\n  case proxy_wasm::FilterStatus::Continue:\n    return Network::FilterStatus::Continue;\n  case proxy_wasm::FilterStatus::StopIteration:\n    return Network::FilterStatus::StopIteration;\n  }\n};\n\nHttp::FilterHeadersStatus convertFilterHeadersStatus(proxy_wasm::FilterHeadersStatus status) {\n  switch (status) {\n  default:\n  case proxy_wasm::FilterHeadersStatus::Continue:\n    return Http::FilterHeadersStatus::Continue;\n  case proxy_wasm::FilterHeadersStatus::StopIteration:\n    return Http::FilterHeadersStatus::StopIteration;\n  case proxy_wasm::FilterHeadersStatus::StopAllIterationAndBuffer:\n    return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n  case proxy_wasm::FilterHeadersStatus::StopAllIterationAndWatermark:\n    return Http::FilterHeadersStatus::StopAllIterationAndWatermark;\n  }\n};\n\nHttp::FilterTrailersStatus convertFilterTrailersStatus(proxy_wasm::FilterTrailersStatus status) {\n  switch (status) {\n  default:\n  case proxy_wasm::FilterTrailersStatus::Continue:\n    return Http::FilterTrailersStatus::Continue;\n  case proxy_wasm::FilterTrailersStatus::StopIteration:\n    return Http::FilterTrailersStatus::StopIteration;\n  }\n};\n\nHttp::FilterMetadataStatus convertFilterMetadataStatus(proxy_wasm::FilterMetadataStatus status) {\n  switch (status) {\n  default:\n  case proxy_wasm::FilterMetadataStatus::Continue:\n    return Http::FilterMetadataStatus::Continue;\n  }\n};\n\nHttp::FilterDataStatus convertFilterDataStatus(proxy_wasm::FilterDataStatus status) {\n  switch (status) {\n  default:\n  case proxy_wasm::FilterDataStatus::Continue:\n    return Http::FilterDataStatus::Continue;\n  case proxy_wasm::FilterDataStatus::StopIterationAndBuffer:\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  case proxy_wasm::FilterDataStatus::StopIterationAndWatermark:\n    return Http::FilterDataStatus::StopIterationAndWatermark;\n  case proxy_wasm::FilterDataStatus::StopIterationNoBuffer:\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n};\n\nNetwork::FilterStatus Context::onNewConnection() {\n  onCreate();\n  return convertNetworkFilterStatus(onNetworkNewConnection());\n};\n\nNetwork::FilterStatus Context::onData(::Envoy::Buffer::Instance& data, bool end_stream) {\n  if (!in_vm_context_created_) {\n    return Network::FilterStatus::Continue;\n  }\n  network_downstream_data_buffer_ = &data;\n  end_of_stream_ = end_stream;\n  auto result = convertNetworkFilterStatus(onDownstreamData(data.length(), end_stream));\n  if (result == Network::FilterStatus::Continue) {\n    network_downstream_data_buffer_ = nullptr;\n  }\n  return result;\n}\n\nNetwork::FilterStatus Context::onWrite(::Envoy::Buffer::Instance& data, bool end_stream) {\n  if (!in_vm_context_created_) {\n    return Network::FilterStatus::Continue;\n  }\n  network_upstream_data_buffer_ = &data;\n  end_of_stream_ = end_stream;\n  auto result = convertNetworkFilterStatus(onUpstreamData(data.length(), end_stream));\n  if (result == Network::FilterStatus::Continue) {\n    network_upstream_data_buffer_ = nullptr;\n  }\n  if (end_stream) {\n    // This is called when seeing end_stream=true and not on an upstream connection event,\n    // because registering for latter requires replicating the whole TCP proxy extension.\n    onUpstreamConnectionClose(CloseType::Unknown);\n  }\n  return result;\n}\n\nvoid Context::onEvent(Network::ConnectionEvent event) {\n  if (!in_vm_context_created_) {\n    return;\n  }\n  switch (event) {\n  case Network::ConnectionEvent::LocalClose:\n    onDownstreamConnectionClose(CloseType::Local);\n    break;\n  case Network::ConnectionEvent::RemoteClose:\n    onDownstreamConnectionClose(CloseType::Remote);\n    break;\n  default:\n    break;\n  }\n}\n\nvoid Context::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  network_read_filter_callbacks_ = &callbacks;\n  network_read_filter_callbacks_->connection().addConnectionCallbacks(*this);\n}\n\nvoid Context::initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) {\n  network_write_filter_callbacks_ = &callbacks;\n}\n\nvoid Context::log(const Http::RequestHeaderMap* request_headers,\n                  const Http::ResponseHeaderMap* response_headers,\n                  const Http::ResponseTrailerMap* response_trailers,\n                  const StreamInfo::StreamInfo& stream_info) {\n  if (!in_vm_context_created_) {\n    // If the request is invalid then onRequestHeaders() will not be called and neither will\n    // onCreate() in cases like sendLocalReply who short-circuits envoy\n    // lifecycle. This is because Envoy does not have a well defined lifetime for the combined\n    // HTTP\n    // + AccessLog filter. Thus, to log these scenarios, we call onCreate() in log function below.\n    onCreate();\n  }\n\n  access_log_request_headers_ = request_headers;\n  // ? request_trailers  ?\n  access_log_response_headers_ = response_headers;\n  access_log_response_trailers_ = response_trailers;\n  access_log_stream_info_ = &stream_info;\n\n  onLog();\n\n  access_log_request_headers_ = nullptr;\n  // ? request_trailers  ?\n  access_log_response_headers_ = nullptr;\n  access_log_response_trailers_ = nullptr;\n  access_log_stream_info_ = nullptr;\n}\n\nvoid Context::onDestroy() {\n  if (destroyed_ || !in_vm_context_created_) {\n    return;\n  }\n  destroyed_ = true;\n  onDone();\n  onDelete();\n}\n\nWasmResult Context::continueStream(WasmStreamType stream_type) {\n  switch (stream_type) {\n  case WasmStreamType::Request:\n    if (decoder_callbacks_) {\n      decoder_callbacks_->continueDecoding();\n    }\n    break;\n  case WasmStreamType::Response:\n    if (encoder_callbacks_) {\n      encoder_callbacks_->continueEncoding();\n    }\n    break;\n  default:\n    return WasmResult::BadArgument;\n  }\n  request_headers_ = nullptr;\n  request_body_buffer_ = nullptr;\n  request_trailers_ = nullptr;\n  request_metadata_ = nullptr;\n  return WasmResult::Ok;\n}\n\nWasmResult Context::closeStream(WasmStreamType stream_type) {\n  switch (stream_type) {\n  case WasmStreamType::Request:\n    if (decoder_callbacks_) {\n      if (!decoder_callbacks_->streamInfo().responseCodeDetails().has_value()) {\n        decoder_callbacks_->streamInfo().setResponseCodeDetails(FailStreamResponseDetails);\n      }\n      decoder_callbacks_->resetStream();\n    }\n    return WasmResult::Ok;\n  case WasmStreamType::Response:\n    if (encoder_callbacks_) {\n      if (!encoder_callbacks_->streamInfo().responseCodeDetails().has_value()) {\n        encoder_callbacks_->streamInfo().setResponseCodeDetails(FailStreamResponseDetails);\n      }\n      encoder_callbacks_->resetStream();\n    }\n    return WasmResult::Ok;\n  case WasmStreamType::Downstream:\n    if (network_read_filter_callbacks_) {\n      network_read_filter_callbacks_->connection().close(\n          Envoy::Network::ConnectionCloseType::FlushWrite);\n    }\n    return WasmResult::Ok;\n  case WasmStreamType::Upstream:\n    network_write_filter_callbacks_->connection().close(\n        Envoy::Network::ConnectionCloseType::FlushWrite);\n    return WasmResult::Ok;\n  }\n  return WasmResult::BadArgument;\n}\n\nWasmResult Context::sendLocalResponse(uint32_t response_code, absl::string_view body_text,\n                                      Pairs additional_headers, uint32_t grpc_status,\n                                      absl::string_view details) {\n  // \"additional_headers\" is a collection of string_views. These will no longer\n  // be valid when \"modify_headers\" is finally called below, so we must\n  // make copies of all the headers.\n  std::vector<std::pair<Http::LowerCaseString, std::string>> additional_headers_copy;\n  for (auto& p : additional_headers) {\n    const Http::LowerCaseString lower_key{std::string(p.first)};\n    additional_headers_copy.emplace_back(lower_key, std::string(p.second));\n  }\n\n  auto modify_headers = [additional_headers_copy](Http::HeaderMap& headers) {\n    for (auto& p : additional_headers_copy) {\n      headers.addCopy(p.first, p.second);\n    }\n  };\n\n  if (decoder_callbacks_) {\n    // This is a bit subtle because proxy_on_delete() does call DeferAfterCallActions(),\n    // so in theory it could call this and the Context in the VM would be invalid,\n    // but because it only gets called after the connections have drained, the call to\n    // sendLocalReply() will fail. Net net, this is safe.\n    wasm()->addAfterVmCallAction([this, response_code, body_text = std::string(body_text),\n                                  modify_headers = std::move(modify_headers), grpc_status,\n                                  details = std::string(details)] {\n      decoder_callbacks_->sendLocalReply(static_cast<Envoy::Http::Code>(response_code), body_text,\n                                         modify_headers, grpc_status, details);\n    });\n  }\n  return WasmResult::Ok;\n}\n\nHttp::FilterHeadersStatus Context::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) {\n  onCreate();\n  http_request_started_ = true;\n  request_headers_ = &headers;\n  end_of_stream_ = end_stream;\n  auto result = convertFilterHeadersStatus(onRequestHeaders(headerSize(&headers), end_stream));\n  if (result == Http::FilterHeadersStatus::Continue) {\n    request_headers_ = nullptr;\n  }\n  return result;\n}\n\nHttp::FilterDataStatus Context::decodeData(::Envoy::Buffer::Instance& data, bool end_stream) {\n  if (!http_request_started_) {\n    return Http::FilterDataStatus::Continue;\n  }\n  request_body_buffer_ = &data;\n  end_of_stream_ = end_stream;\n  const auto buffer = getBuffer(WasmBufferType::HttpRequestBody);\n  const auto buffer_size = (buffer == nullptr) ? 0 : buffer->size();\n  auto result = convertFilterDataStatus(onRequestBody(buffer_size, end_stream));\n  buffering_request_body_ = false;\n  switch (result) {\n  case Http::FilterDataStatus::Continue:\n    request_body_buffer_ = nullptr;\n    break;\n  case Http::FilterDataStatus::StopIterationAndBuffer:\n    buffering_request_body_ = true;\n    break;\n  case Http::FilterDataStatus::StopIterationAndWatermark:\n  case Http::FilterDataStatus::StopIterationNoBuffer:\n    break;\n  }\n  return result;\n}\n\nHttp::FilterTrailersStatus Context::decodeTrailers(Http::RequestTrailerMap& trailers) {\n  if (!http_request_started_) {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  request_trailers_ = &trailers;\n  auto result = convertFilterTrailersStatus(onRequestTrailers(headerSize(&trailers)));\n  if (result == Http::FilterTrailersStatus::Continue) {\n    request_trailers_ = nullptr;\n  }\n  return result;\n}\n\nHttp::FilterMetadataStatus Context::decodeMetadata(Http::MetadataMap& request_metadata) {\n  if (!http_request_started_) {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  request_metadata_ = &request_metadata;\n  auto result = convertFilterMetadataStatus(onRequestMetadata(headerSize(&request_metadata)));\n  if (result == Http::FilterMetadataStatus::Continue) {\n    request_metadata_ = nullptr;\n  }\n  return result;\n}\n\nvoid Context::setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) {\n  decoder_callbacks_ = &callbacks;\n}\n\nHttp::FilterHeadersStatus Context::encode100ContinueHeaders(Http::ResponseHeaderMap&) {\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus Context::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                 bool end_stream) {\n  if (!http_request_started_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  response_headers_ = &headers;\n  end_of_stream_ = end_stream;\n  auto result = convertFilterHeadersStatus(onResponseHeaders(headerSize(&headers), end_stream));\n  if (result == Http::FilterHeadersStatus::Continue) {\n    response_headers_ = nullptr;\n  }\n  return result;\n}\n\nHttp::FilterDataStatus Context::encodeData(::Envoy::Buffer::Instance& data, bool end_stream) {\n  if (!http_request_started_) {\n    return Http::FilterDataStatus::Continue;\n  }\n  response_body_buffer_ = &data;\n  end_of_stream_ = end_stream;\n  const auto buffer = getBuffer(WasmBufferType::HttpResponseBody);\n  const auto buffer_size = (buffer == nullptr) ? 0 : buffer->size();\n  auto result = convertFilterDataStatus(onResponseBody(buffer_size, end_stream));\n  buffering_response_body_ = false;\n  switch (result) {\n  case Http::FilterDataStatus::Continue:\n    request_body_buffer_ = nullptr;\n    break;\n  case Http::FilterDataStatus::StopIterationAndBuffer:\n    buffering_response_body_ = true;\n    break;\n  case Http::FilterDataStatus::StopIterationAndWatermark:\n  case Http::FilterDataStatus::StopIterationNoBuffer:\n    break;\n  }\n  return result;\n}\n\nHttp::FilterTrailersStatus Context::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  if (!http_request_started_) {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  response_trailers_ = &trailers;\n  auto result = convertFilterTrailersStatus(onResponseTrailers(headerSize(&trailers)));\n  if (result == Http::FilterTrailersStatus::Continue) {\n    response_trailers_ = nullptr;\n  }\n  return result;\n}\n\nHttp::FilterMetadataStatus Context::encodeMetadata(Http::MetadataMap& response_metadata) {\n  if (!http_request_started_) {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  response_metadata_ = &response_metadata;\n  auto result = convertFilterMetadataStatus(onResponseMetadata(headerSize(&response_metadata)));\n  if (result == Http::FilterMetadataStatus::Continue) {\n    response_metadata_ = nullptr;\n  }\n  return result;\n}\n\n//  Http::FilterMetadataStatus::Continue;\n\nvoid Context::setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) {\n  encoder_callbacks_ = &callbacks;\n}\n\nvoid Context::onHttpCallSuccess(uint32_t token, Envoy::Http::ResponseMessagePtr&& response) {\n  // TODO: convert this into a function in proxy-wasm-cpp-host and use here.\n  if (proxy_wasm::current_context_ != nullptr) {\n    // We are in a reentrant call, so defer.\n    wasm()->addAfterVmCallAction([this, token, response = response.release()] {\n      onHttpCallSuccess(token, std::unique_ptr<Envoy::Http::ResponseMessage>(response));\n    });\n    return;\n  }\n  http_call_response_ = &response;\n  uint32_t body_size = response->body().length();\n  onHttpCallResponse(token, response->headers().size(), body_size,\n                     headerSize(response->trailers()));\n  http_call_response_ = nullptr;\n  http_request_.erase(token);\n}\n\nvoid Context::onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason reason) {\n  if (proxy_wasm::current_context_ != nullptr) {\n    // We are in a reentrant call, so defer.\n    wasm()->addAfterVmCallAction([this, token, reason] { onHttpCallFailure(token, reason); });\n    return;\n  }\n  status_code_ = static_cast<uint32_t>(WasmResult::BrokenConnection);\n  // This is the only value currently.\n  ASSERT(reason == Http::AsyncClient::FailureReason::Reset);\n  status_message_ = \"reset\";\n  onHttpCallResponse(token, 0, 0, 0);\n  status_message_ = \"\";\n  http_request_.erase(token);\n}\n\nvoid Context::onGrpcReceiveWrapper(uint32_t token, ::Envoy::Buffer::InstancePtr response) {\n  ASSERT(proxy_wasm::current_context_ == nullptr); // Non-reentrant.\n  if (wasm()->on_grpc_receive_) {\n    grpc_receive_buffer_ = std::move(response);\n    uint32_t response_size = grpc_receive_buffer_->length();\n    ContextBase::onGrpcReceive(token, response_size);\n    grpc_receive_buffer_.reset();\n  }\n  if (isGrpcCallToken(token)) {\n    grpc_call_request_.erase(token);\n  }\n}\n\nvoid Context::onGrpcCloseWrapper(uint32_t token, const Grpc::Status::GrpcStatus& status,\n                                 const absl::string_view message) {\n  if (proxy_wasm::current_context_ != nullptr) {\n    // We are in a reentrant call, so defer.\n    wasm()->addAfterVmCallAction([this, token, status, message = std::string(message)] {\n      onGrpcCloseWrapper(token, status, message);\n    });\n    return;\n  }\n  if (wasm()->on_grpc_close_) {\n    status_code_ = static_cast<uint32_t>(status);\n    status_message_ = message;\n    onGrpcClose(token, status_code_);\n    status_message_ = \"\";\n  }\n  if (isGrpcCallToken(token)) {\n    grpc_call_request_.erase(token);\n  } else {\n    auto it = grpc_stream_.find(token);\n    if (it != grpc_stream_.end()) {\n      if (it->second.local_closed_) {\n        grpc_stream_.erase(token);\n      }\n    }\n  }\n}\n\nWasmResult Context::grpcSend(uint32_t token, absl::string_view message, bool end_stream) {\n  if (isGrpcCallToken(token)) {\n    return WasmResult::BadArgument;\n  }\n  auto it = grpc_stream_.find(token);\n  if (it == grpc_stream_.end()) {\n    return WasmResult::NotFound;\n  }\n  if (it->second.stream_) {\n    it->second.stream_->sendMessageRaw(::Envoy::Buffer::InstancePtr(new ::Envoy::Buffer::OwnedImpl(\n                                           message.data(), message.size())),\n                                       end_stream);\n  }\n  return WasmResult::Ok;\n}\n\nWasmResult Context::grpcClose(uint32_t token) {\n  if (isGrpcCallToken(token)) {\n    auto it = grpc_call_request_.find(token);\n    if (it == grpc_call_request_.end()) {\n      return WasmResult::NotFound;\n    }\n    if (it->second.request_) {\n      it->second.request_->cancel();\n    }\n    grpc_call_request_.erase(token);\n  } else {\n    auto it = grpc_stream_.find(token);\n    if (it == grpc_stream_.end()) {\n      return WasmResult::NotFound;\n    }\n    if (it->second.stream_) {\n      it->second.stream_->closeStream();\n    }\n    if (it->second.remote_closed_) {\n      grpc_stream_.erase(token);\n    } else {\n      it->second.local_closed_ = true;\n    }\n  }\n  return WasmResult::Ok;\n}\n\nWasmResult Context::grpcCancel(uint32_t token) {\n  if (isGrpcCallToken(token)) {\n    auto it = grpc_call_request_.find(token);\n    if (it == grpc_call_request_.end()) {\n      return WasmResult::NotFound;\n    }\n    if (it->second.request_) {\n      it->second.request_->cancel();\n    }\n    grpc_call_request_.erase(token);\n  } else {\n    auto it = grpc_stream_.find(token);\n    if (it == grpc_stream_.end()) {\n      return WasmResult::NotFound;\n    }\n    if (it->second.stream_) {\n      it->second.stream_->resetStream();\n    }\n    grpc_stream_.erase(token);\n  }\n  return WasmResult::Ok;\n}\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/context.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <map>\n#include <memory>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/extensions/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/sink.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/wasm/wasm_state.h\"\n#include \"extensions/filters/common/expr/evaluator.h\"\n\n#include \"eval/public/activation.h\"\n#include \"include/proxy-wasm/wasm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\nusing proxy_wasm::BufferInterface;\nusing proxy_wasm::CloseType;\nusing proxy_wasm::ContextBase;\nusing proxy_wasm::Pairs;\nusing proxy_wasm::PairsWithStringValues;\nusing proxy_wasm::PluginBase;\nusing proxy_wasm::SharedQueueDequeueToken;\nusing proxy_wasm::SharedQueueEnqueueToken;\nusing proxy_wasm::WasmBase;\nusing proxy_wasm::WasmBufferType;\nusing proxy_wasm::WasmHandleBase;\nusing proxy_wasm::WasmHeaderMapType;\nusing proxy_wasm::WasmResult;\nusing proxy_wasm::WasmStreamType;\n\nusing VmConfig = envoy::extensions::wasm::v3::VmConfig;\nusing GrpcService = envoy::config::core::v3::GrpcService;\n\nclass Wasm;\n\nusing WasmHandleBaseSharedPtr = std::shared_ptr<WasmHandleBase>;\n\n// Opaque context object.\nclass StorageObject {\npublic:\n  virtual ~StorageObject() = default;\n};\n\nclass Buffer : public proxy_wasm::BufferBase {\npublic:\n  Buffer() = default;\n\n  // proxy_wasm::BufferInterface\n  size_t size() const override;\n  WasmResult copyTo(WasmBase* wasm, size_t start, size_t length, uint64_t ptr_ptr,\n                    uint64_t size_ptr) const override;\n  WasmResult copyFrom(size_t start, size_t length, absl::string_view data) override;\n\n  // proxy_wasm::BufferBase\n  void clear() override {\n    proxy_wasm::BufferBase::clear();\n    const_buffer_instance_ = nullptr;\n    buffer_instance_ = nullptr;\n  }\n  Buffer* set(absl::string_view data) {\n    return static_cast<Buffer*>(proxy_wasm::BufferBase::set(data));\n  }\n  Buffer* set(std::unique_ptr<char[]> owned_data, uint32_t owned_data_size) {\n    return static_cast<Buffer*>(\n        proxy_wasm::BufferBase::set(std::move(owned_data), owned_data_size));\n  }\n\n  Buffer* set(::Envoy::Buffer::Instance* buffer_instance) {\n    clear();\n    buffer_instance_ = buffer_instance;\n    const_buffer_instance_ = buffer_instance;\n    return this;\n  }\n  Buffer* set(const ::Envoy::Buffer::Instance* buffer_instance) {\n    clear();\n    const_buffer_instance_ = buffer_instance;\n    return this;\n  }\n\nprivate:\n  const ::Envoy::Buffer::Instance* const_buffer_instance_{};\n  ::Envoy::Buffer::Instance* buffer_instance_{};\n};\n\n// Plugin contains the information for a filter/service.\nstruct Plugin : public PluginBase {\n  Plugin(absl::string_view name, absl::string_view root_id, absl::string_view vm_id,\n         absl::string_view runtime, absl::string_view plugin_configuration, bool fail_open,\n         envoy::config::core::v3::TrafficDirection direction,\n         const LocalInfo::LocalInfo& local_info,\n         const envoy::config::core::v3::Metadata* listener_metadata)\n      : PluginBase(name, root_id, vm_id, runtime, plugin_configuration, fail_open),\n        direction_(direction), local_info_(local_info), listener_metadata_(listener_metadata) {}\n\n  envoy::config::core::v3::TrafficDirection direction_;\n  const LocalInfo::LocalInfo& local_info_;\n  const envoy::config::core::v3::Metadata* listener_metadata_;\n};\nusing PluginSharedPtr = std::shared_ptr<Plugin>;\n\n// A context which will be the target of callbacks for a particular session\n// e.g. a handler of a stream.\nclass Context : public proxy_wasm::ContextBase,\n                public Logger::Loggable<Logger::Id::wasm>,\n                public AccessLog::Instance,\n                public Http::StreamFilter,\n                public Network::ConnectionCallbacks,\n                public Network::Filter,\n                public google::api::expr::runtime::BaseActivation,\n                public std::enable_shared_from_this<Context> {\npublic:\n  Context();                                                                    // Testing.\n  Context(Wasm* wasm);                                                          // Vm Context.\n  Context(Wasm* wasm, const PluginSharedPtr& plugin);                           // Root Context.\n  Context(Wasm* wasm, uint32_t root_context_id, const PluginSharedPtr& plugin); // Stream context.\n  ~Context() override;\n\n  Wasm* wasm() const;\n  Plugin* plugin() const;\n  Context* rootContext() const;\n  Upstream::ClusterManager& clusterManager() const;\n\n  // proxy_wasm::ContextBase\n  void error(absl::string_view message) override;\n\n  // Retrieves the stream info associated with the request (a.k.a active stream).\n  // It selects a value based on the following order: encoder callback, decoder\n  // callback, log callback, network read filter callback, network write filter\n  // callback. As long as any one of the callbacks is invoked, the value should be\n  // available.\n  const StreamInfo::StreamInfo* getConstRequestStreamInfo() const;\n  StreamInfo::StreamInfo* getRequestStreamInfo() const;\n\n  // Retrieves the connection object associated with the request (a.k.a active stream).\n  // It selects a value based on the following order: encoder callback, decoder\n  // callback. As long as any one of the callbacks is invoked, the value should be\n  // available.\n  const Network::Connection* getConnection() const;\n\n  //\n  // VM level down-calls into the Wasm code on Context(id == 0).\n  //\n  virtual bool validateConfiguration(absl::string_view configuration,\n                                     const std::shared_ptr<PluginBase>& plugin); // deprecated\n\n  // AccessLog::Instance\n  void log(const Http::RequestHeaderMap* request_headers,\n           const Http::ResponseHeaderMap* response_headers,\n           const Http::ResponseTrailerMap* response_trailers,\n           const StreamInfo::StreamInfo& stream_info) override;\n\n  uint32_t getLogLevel() override;\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  // Network::ReadFilter\n  Network::FilterStatus onNewConnection() override;\n  Network::FilterStatus onData(::Envoy::Buffer::Instance& data, bool end_stream) override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(::Envoy::Buffer::Instance& data, bool end_stream) override;\n  void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override;\n\n  // proxy_wasm::ContextBase\n  void onDownstreamConnectionClose(CloseType) override;\n  void onUpstreamConnectionClose(CloseType) override;\n\n  // Http::StreamFilterBase. Note: This calls onDone() in Wasm.\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(::Envoy::Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  Http::FilterMetadataStatus decodeMetadata(Http::MetadataMap& metadata_map) override;\n  void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override;\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(::Envoy::Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap& metadata_map) override;\n  void setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) override;\n\n  // VM calls out to host.\n  // proxy_wasm::ContextBase\n\n  // General\n  WasmResult log(uint32_t level, absl::string_view message) override;\n  uint64_t getCurrentTimeNanoseconds() override;\n  absl::string_view getConfiguration() override;\n  std::pair<uint32_t, absl::string_view> getStatus() override;\n\n  // State accessors\n  WasmResult getProperty(absl::string_view path, std::string* result) override;\n  WasmResult setProperty(absl::string_view path, absl::string_view value) override;\n  WasmResult declareProperty(absl::string_view path,\n                             std::unique_ptr<const WasmStatePrototype> state_prototype);\n\n  // Continue\n  WasmResult continueStream(WasmStreamType stream_type) override;\n  WasmResult closeStream(WasmStreamType stream_type) override;\n  WasmResult sendLocalResponse(uint32_t response_code, absl::string_view body_text,\n                               Pairs additional_headers, uint32_t grpc_status,\n                               absl::string_view details) override;\n  void clearRouteCache() override {\n    if (decoder_callbacks_) {\n      decoder_callbacks_->clearRouteCache();\n    }\n  }\n\n  // Header/Trailer/Metadata Maps\n  WasmResult addHeaderMapValue(WasmHeaderMapType type, absl::string_view key,\n                               absl::string_view value) override;\n  WasmResult getHeaderMapValue(WasmHeaderMapType type, absl::string_view key,\n                               absl::string_view* value) override;\n  WasmResult getHeaderMapPairs(WasmHeaderMapType type, Pairs* result) override;\n  WasmResult setHeaderMapPairs(WasmHeaderMapType type, const Pairs& pairs) override;\n\n  WasmResult removeHeaderMapValue(WasmHeaderMapType type, absl::string_view key) override;\n  WasmResult replaceHeaderMapValue(WasmHeaderMapType type, absl::string_view key,\n                                   absl::string_view value) override;\n\n  WasmResult getHeaderMapSize(WasmHeaderMapType type, uint32_t* size) override;\n\n  // Buffer\n  BufferInterface* getBuffer(WasmBufferType type) override;\n  // TODO: use stream_type.\n  bool endOfStream(WasmStreamType /* stream_type */) override { return end_of_stream_; }\n\n  // HTTP\n  WasmResult httpCall(absl::string_view cluster, const Pairs& request_headers,\n                      absl::string_view request_body, const Pairs& request_trailers,\n                      int timeout_millisconds, uint32_t* token_ptr) override;\n\n  // Stats/Metrics\n  WasmResult defineMetric(uint32_t type, absl::string_view name, uint32_t* metric_id_ptr) override;\n  WasmResult incrementMetric(uint32_t metric_id, int64_t offset) override;\n  WasmResult recordMetric(uint32_t metric_id, uint64_t value) override;\n  WasmResult getMetric(uint32_t metric_id, uint64_t* value_ptr) override;\n\n  // gRPC\n  WasmResult grpcCall(absl::string_view grpc_service, absl::string_view service_name,\n                      absl::string_view method_name, const Pairs& initial_metadata,\n                      absl::string_view request, std::chrono::milliseconds timeout,\n                      uint32_t* token_ptr) override;\n  WasmResult grpcStream(absl::string_view grpc_service, absl::string_view service_name,\n                        absl::string_view method_name, const Pairs& initial_metadat,\n                        uint32_t* token_ptr) override;\n\n  WasmResult grpcClose(uint32_t token) override;\n  WasmResult grpcCancel(uint32_t token) override;\n  WasmResult grpcSend(uint32_t token, absl::string_view message, bool end_stream) override;\n\n  // Envoy specific ABI\n  void onResolveDns(uint32_t token, Envoy::Network::DnsResolver::ResolutionStatus status,\n                    std::list<Envoy::Network::DnsResponse>&& response);\n\n  void onStatsUpdate(Envoy::Stats::MetricSnapshot& snapshot);\n\n  // CEL evaluation\n  std::vector<const google::api::expr::runtime::CelFunction*>\n  FindFunctionOverloads(absl::string_view) const override {\n    return {};\n  }\n  absl::optional<google::api::expr::runtime::CelValue>\n  findValue(absl::string_view name, Protobuf::Arena* arena, bool last) const;\n  absl::optional<google::api::expr::runtime::CelValue>\n  FindValue(absl::string_view name, Protobuf::Arena* arena) const override {\n    return findValue(name, arena, false);\n  }\n  bool IsPathUnknown(absl::string_view) const override { return false; }\n  const std::vector<google::api::expr::runtime::CelAttributePattern>&\n  unknown_attribute_patterns() const override {\n    static const std::vector<google::api::expr::runtime::CelAttributePattern> empty;\n    return empty;\n  }\n  const Protobuf::FieldMask& unknown_paths() const override {\n    return Protobuf::FieldMask::default_instance();\n  }\n\n  // Foreign function state\n  virtual void setForeignData(absl::string_view data_name, std::unique_ptr<StorageObject> data) {\n    data_storage_[data_name] = std::move(data);\n  }\n  template <typename T> T* getForeignData(absl::string_view data_name) {\n    const auto& it = data_storage_.find(data_name);\n    if (it == data_storage_.end()) {\n      return nullptr;\n    }\n    return dynamic_cast<T*>(it->second.get());\n  }\n\n  uint32_t nextGrpcCallToken();\n  uint32_t nextGrpcStreamToken();\n  uint32_t nextHttpCallToken();\n  void setNextGrpcTokenForTesting(uint32_t token) { next_grpc_token_ = token; }\n  void setNextHttpCallTokenForTesting(uint32_t token) { next_http_call_token_ = token; }\n\nprotected:\n  friend class Wasm;\n\n  void addAfterVmCallAction(std::function<void()> f);\n  void onCloseTCP();\n\n  struct AsyncClientHandler : public Http::AsyncClient::Callbacks {\n    // Http::AsyncClient::Callbacks\n    void onSuccess(const Http::AsyncClient::Request&,\n                   Envoy::Http::ResponseMessagePtr&& response) override {\n      context_->onHttpCallSuccess(token_, std::move(response));\n    }\n    void onFailure(const Http::AsyncClient::Request&,\n                   Http::AsyncClient::FailureReason reason) override {\n      context_->onHttpCallFailure(token_, reason);\n    }\n    void\n    onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span& /* span */,\n                                 const Http::ResponseHeaderMap* /* response_headers */) override {}\n\n    Context* context_;\n    uint32_t token_;\n    Http::AsyncClient::Request* request_;\n  };\n\n  struct GrpcCallClientHandler : public Grpc::RawAsyncRequestCallbacks {\n    // Grpc::AsyncRequestCallbacks\n    void onCreateInitialMetadata(Http::RequestHeaderMap& initial_metadata) override {\n      context_->onGrpcCreateInitialMetadata(token_, initial_metadata);\n    }\n    void onSuccessRaw(::Envoy::Buffer::InstancePtr&& response, Tracing::Span& /* span */) override {\n      context_->onGrpcReceiveWrapper(token_, std::move(response));\n    }\n    void onFailure(Grpc::Status::GrpcStatus status, const std::string& message,\n                   Tracing::Span& /* span */) override {\n      context_->onGrpcCloseWrapper(token_, status, message);\n    }\n\n    Context* context_;\n    uint32_t token_;\n    Grpc::RawAsyncClientPtr client_;\n    Grpc::AsyncRequest* request_;\n  };\n\n  struct GrpcStreamClientHandler : public Grpc::RawAsyncStreamCallbacks {\n    // Grpc::AsyncStreamCallbacks\n    void onCreateInitialMetadata(Http::RequestHeaderMap&) override {}\n    void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) override {\n      context_->onGrpcReceiveInitialMetadataWrapper(token_, std::move(metadata));\n    }\n    bool onReceiveMessageRaw(::Envoy::Buffer::InstancePtr&& response) override {\n      context_->onGrpcReceiveWrapper(token_, std::move(response));\n      return true;\n    }\n    void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) override {\n      context_->onGrpcReceiveTrailingMetadataWrapper(token_, std::move(metadata));\n    }\n    void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override {\n      remote_closed_ = true;\n      context_->onGrpcCloseWrapper(token_, status, message);\n    }\n\n    Context* context_;\n    uint32_t token_;\n    Grpc::RawAsyncClientPtr client_;\n    Grpc::RawAsyncStream* stream_;\n    bool local_closed_ = false;\n    bool remote_closed_ = false;\n  };\n\n  void onHttpCallSuccess(uint32_t token, Envoy::Http::ResponseMessagePtr&& response);\n  void onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason reason);\n\n  void onGrpcCreateInitialMetadata(uint32_t token, Http::RequestHeaderMap& metadata);\n  void onGrpcReceiveInitialMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata);\n  void onGrpcReceiveWrapper(uint32_t token, ::Envoy::Buffer::InstancePtr response);\n  void onGrpcReceiveTrailingMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata);\n  void onGrpcCloseWrapper(uint32_t token, const Grpc::Status::GrpcStatus& status,\n                          const absl::string_view message);\n\n  bool isGrpcStreamToken(uint32_t token) { return (token & 1) == 0; }\n  bool isGrpcCallToken(uint32_t token) { return (token & 1) == 1; }\n\n  Http::HeaderMap* getMap(WasmHeaderMapType type);\n  const Http::HeaderMap* getConstMap(WasmHeaderMapType type);\n\n  const LocalInfo::LocalInfo* root_local_info_{nullptr}; // set only for root_context.\n\n  uint32_t next_http_call_token_ = 1;\n  uint32_t next_grpc_token_ = 1; // Odd tokens are for Calls even for Streams.\n\n  // Network callbacks.\n  Network::ReadFilterCallbacks* network_read_filter_callbacks_{};\n  Network::WriteFilterCallbacks* network_write_filter_callbacks_{};\n\n  // HTTP callbacks.\n  Envoy::Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Envoy::Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n\n  // Status.\n  uint32_t status_code_{0};\n  absl::string_view status_message_;\n\n  // Network filter state.\n  ::Envoy::Buffer::Instance* network_downstream_data_buffer_{};\n  ::Envoy::Buffer::Instance* network_upstream_data_buffer_{};\n\n  // HTTP filter state.\n  bool http_request_started_ = false; // When decodeHeaders() is called the request is \"started\".\n  Http::RequestHeaderMap* request_headers_{};\n  Http::ResponseHeaderMap* response_headers_{};\n  ::Envoy::Buffer::Instance* request_body_buffer_{};\n  ::Envoy::Buffer::Instance* response_body_buffer_{};\n  Http::RequestTrailerMap* request_trailers_{};\n  Http::ResponseTrailerMap* response_trailers_{};\n  Http::MetadataMap* request_metadata_{};\n  Http::MetadataMap* response_metadata_{};\n\n  // Only available during onHttpCallResponse.\n  Envoy::Http::ResponseMessagePtr* http_call_response_{};\n\n  Http::HeaderMapPtr grpc_receive_initial_metadata_{};\n  Http::HeaderMapPtr grpc_receive_trailing_metadata_{};\n\n  // Only available (non-nullptr) during onGrpcReceive.\n  ::Envoy::Buffer::InstancePtr grpc_receive_buffer_;\n\n  // Only available (non-nullptr) during grpcCall and grpcStream.\n  Http::RequestHeaderMapPtr grpc_initial_metadata_;\n\n  // Access log state.\n  const StreamInfo::StreamInfo* access_log_stream_info_{};\n  const Http::RequestHeaderMap* access_log_request_headers_{};\n  const Http::ResponseHeaderMap* access_log_response_headers_{};\n  const Http::ResponseTrailerMap* access_log_response_trailers_{};\n\n  // Temporary state.\n  ProtobufWkt::Struct temporary_metadata_;\n  bool end_of_stream_;\n  bool buffering_request_body_ = false;\n  bool buffering_response_body_ = false;\n  Buffer buffer_;\n\n  // MB: must be a node-type map as we take persistent references to the entries.\n  std::map<uint32_t, AsyncClientHandler> http_request_;\n  std::map<uint32_t, GrpcCallClientHandler> grpc_call_request_;\n  std::map<uint32_t, GrpcStreamClientHandler> grpc_stream_;\n\n  // Opaque state.\n  absl::flat_hash_map<std::string, std::unique_ptr<StorageObject>> data_storage_;\n\n  // TCP State.\n  bool upstream_closed_ = false;\n  bool downstream_closed_ = false;\n  bool tcp_connection_closed_ = false;\n\n  // Filter state prototype declaration.\n  absl::flat_hash_map<std::string, std::unique_ptr<const WasmStatePrototype>> state_prototypes_;\n};\nusing ContextSharedPtr = std::shared_ptr<Context>;\n\nWasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result);\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\nload(\"@rules_cc//cc:defs.bzl\", \"cc_library\", \"cc_proto_library\")\nload(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"envoy_null_vm_wasm_api\",\n    hdrs = [\n        \"envoy_null_vm_wasm_api.h\",\n        \"envoy_proxy_wasm_api.h\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:api_lib\",\n        \"@proxy_wasm_cpp_sdk//:common_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_null_plugin\",\n    hdrs = [\n        \"envoy_null_plugin.h\",\n        \"envoy_proxy_wasm_api.h\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":declare_property_cc_proto\",\n        \"//source/common/grpc:async_client_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\n# NB: this target is compiled to Wasm. Hence the generic rule.\ncc_library(\n    name = \"envoy_proxy_wasm_api_lib\",\n    srcs = [\"envoy_proxy_wasm_api.cc\"],\n    hdrs = [\"envoy_proxy_wasm_api.h\"],\n    tags = [\"manual\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":declare_property_cc_proto\",\n        \":node_subset_cc_proto\",\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n    alwayslink = 1,\n)\n\n# NB: this target is compiled both to native code and to Wasm. Hence the generic rule.\nproto_library(\n    name = \"declare_property_proto\",\n    srcs = [\"declare_property.proto\"],\n    visibility = [\"//visibility:public\"],\n)\n\n# NB: this target is compiled both to native code and to Wasm. Hence the generic rule.\ncc_proto_library(\n    name = \"declare_property_cc_proto\",\n    visibility = [\"//visibility:public\"],\n    deps = [\":declare_property_proto\"],\n)\n\n# NB: this target is compiled both to native code and to Wasm. Hence the generic rule.\nproto_library(\n    name = \"node_subset_proto\",\n    srcs = [\"node_subset.proto\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"@com_google_protobuf//:struct_proto\",\n    ],\n)\n\n# NB: this target is compiled both to native code and to Wasm. Hence the generic rule.\ncc_proto_library(\n    name = \"node_subset_cc_proto\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":node_subset_proto\",\n        # \"//external:protobuf_clib\",\n    ],\n)\n\nfilegroup(\n    name = \"jslib\",\n    srcs = [\n        \"envoy_wasm_intrinsics.js\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/README.md",
    "content": "# Envoy specific extensions to the proxy-wasm SDK\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/declare_property.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.source.extensions.common.wasm;\n\nenum WasmType {\n  Bytes = 0;\n  String = 1;\n  FlatBuffers = 2;\n  Protobuf = 3;\n};\n\nenum LifeSpan {\n  FilterChain = 0;\n  DownstreamRequest = 1;\n  DownstreamConnection = 2;\n};\n\nmessage DeclarePropertyArguments {\n  string name = 1;\n  bool readonly = 2;\n  WasmType type = 3;\n  bytes schema = 4;\n  LifeSpan span = 5;\n};\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/envoy_null_plugin.h",
    "content": "// NOLINT(namespace-envoy)\n#pragma once\n\n#define PROXY_WASM_PROTOBUF 1\n#define PROXY_WASM_PROTOBUF_FULL 1\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"source/extensions/common/wasm/ext/declare_property.pb.h\"\n\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\nproxy_wasm::Word resolve_dns(void* raw_context, proxy_wasm::Word dns_address,\n                             proxy_wasm::Word dns_address_size, proxy_wasm::Word token_ptr);\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n\nnamespace proxy_wasm {\nnamespace null_plugin {\n\n#include \"extensions/common/wasm/ext/envoy_proxy_wasm_api.h\"\nusing GrpcService = envoy::config::core::v3::GrpcService;\nusing namespace proxy_wasm::null_plugin;\n\n#define WS(_x) Word(static_cast<uint64_t>(_x))\n#define WR(_x) Word(reinterpret_cast<uint64_t>(_x))\n\ninline WasmResult envoy_resolve_dns(const char* dns_address, size_t dns_address_size,\n                                    uint32_t* token) {\n  return static_cast<WasmResult>(\n      ::Envoy::Extensions::Common::Wasm::resolve_dns(proxy_wasm::current_context_, WR(dns_address),\n                                                     WS(dns_address_size), WR(token))\n          .u64_);\n}\n\n#undef WS\n#undef WR\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/envoy_null_vm_wasm_api.h",
    "content": "// NOLINT(namespace-envoy)\n#pragma once\n\nnamespace proxy_wasm {\nnamespace null_plugin {\n\n#include \"proxy_wasm_common.h\"\n#include \"proxy_wasm_enums.h\"\n#include \"proxy_wasm_externs.h\"\n\n/*\n * The following headers are used in two different environments, in the Null VM and in Wasm code\n * which require different headers to precede these  such that they can not include the above\n * headers directly. These macros prevent header reordering\n */\n#define _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ 1\n#include \"proxy_wasm_api.h\"\n#undef _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_\n#define _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ 1\n#include \"extensions/common/wasm/ext/envoy_proxy_wasm_api.h\"\n#undef _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/envoy_proxy_wasm_api.cc",
    "content": "// NOLINT(namespace-envoy)\n\n#include \"proxy_wasm_intrinsics.h\"\n\n/*\n * These headers span repositories and therefor the following header can not include the above\n * header to enforce the required order. This macros prevent header reordering.\n */\n#define _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ 1\n#include \"source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h\"\n#undef _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_\n\nEnvoyContextBase* getEnvoyContextBase(uint32_t context_id) {\n  auto context_base = getContextBase(context_id);\n  if (auto root = context_base->asRoot()) {\n    return static_cast<EnvoyContextBase*>(static_cast<EnvoyRootContext*>(root));\n  } else {\n    return static_cast<EnvoyContextBase*>(static_cast<EnvoyContext*>(context_base->asContext()));\n  }\n}\n\nEnvoyContext* getEnvoyContext(uint32_t context_id) {\n  auto context_base = getContextBase(context_id);\n  return static_cast<EnvoyContext*>(context_base->asContext());\n}\n\nEnvoyRootContext* getEnvoyRootContext(uint32_t context_id) {\n  auto context_base = getContextBase(context_id);\n  return static_cast<EnvoyRootContext*>(context_base->asRoot());\n}\n\nextern \"C\" PROXY_WASM_KEEPALIVE void envoy_on_resolve_dns(uint32_t context_id, uint32_t token,\n                                                          uint32_t data_size) {\n  getEnvoyRootContext(context_id)->onResolveDns(token, data_size);\n}\n\nextern \"C\" PROXY_WASM_KEEPALIVE void envoy_on_stats_update(uint32_t context_id,\n                                                           uint32_t data_size) {\n  getEnvoyRootContext(context_id)->onStatsUpdate(data_size);\n}\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h",
    "content": "// NOLINT(namespace-envoy)\n#pragma once\n\n// Note that this file is included in emscripten and NullVM environments and thus depends on\n// the context in which it is included, hence we need to disable clang-tidy warnings.\n\nextern \"C\" WasmResult envoy_resolve_dns(const char* dns_address, size_t dns_address_size,\n                                        uint32_t* token);\n\nclass EnvoyContextBase {\npublic:\n  virtual ~EnvoyContextBase() = default;\n};\n\nclass EnvoyRootContext : public RootContext, public EnvoyContextBase {\npublic:\n  EnvoyRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {}\n  ~EnvoyRootContext() override = default;\n\n  virtual void onResolveDns(uint32_t /* token */, uint32_t /* result_size */) {}\n  virtual void onStatsUpdate(uint32_t /* result_size */) {}\n};\n\nclass EnvoyContext : public Context, public EnvoyContextBase {\npublic:\n  EnvoyContext(uint32_t id, RootContext* root) : Context(id, root) {}\n  ~EnvoyContext() override = default;\n};\n\nstruct DnsResult {\n  uint32_t ttl_seconds;\n  std::string address;\n};\n\nstruct CounterResult {\n  uint64_t delta;\n  std::string_view name;\n  uint64_t value;\n};\n\nstruct GaugeResult {\n  uint64_t value;\n  std::string_view name;\n};\n\nstruct StatResult {\n  std::vector<CounterResult> counters;\n  std::vector<GaugeResult> gauges;\n};\n\nenum class StatType : uint32_t {\n  Counter = 1,\n  Gauge = 2,\n};\n\ninline std::vector<DnsResult> parseDnsResults(std::string_view data) {\n  if (data.size() < 4) {\n    return {};\n  }\n  const uint32_t* pn = reinterpret_cast<const uint32_t*>(data.data());\n  uint32_t n = *pn++;\n  std::vector<DnsResult> results;\n  results.resize(n);\n  const char* pa = data.data() + (1 + n) * sizeof(uint32_t); // skip n + n TTLs\n  for (uint32_t i = 0; i < n; i++) {\n    auto& e = results[i];\n    e.ttl_seconds = *pn++;\n    auto alen = strlen(pa);\n    e.address.assign(pa, alen);\n    pa += alen + 1;\n  }\n  return results;\n}\n\ntemplate <typename I> inline uint32_t align(uint32_t i) {\n  return (i + sizeof(I) - 1) & ~(sizeof(I) - 1);\n}\n\ninline StatResult parseStatResults(std::string_view data) {\n  StatResult results;\n  uint32_t data_len = 0;\n  while (data_len < data.length()) {\n    const uint32_t* n = reinterpret_cast<const uint32_t*>(data.data() + data_len);\n    uint32_t block_size = *n++;\n    uint32_t block_type = *n++;\n    uint32_t num_stats = *n++;\n    if (static_cast<StatType>(block_type) == StatType::Counter) { // counter\n      std::vector<CounterResult> counters(num_stats);\n      uint32_t stat_index = data_len + 3 * sizeof(uint32_t);\n      for (uint32_t i = 0; i < num_stats; i++) {\n        const uint32_t* stat_name = reinterpret_cast<const uint32_t*>(data.data() + stat_index);\n        uint32_t name_len = *stat_name;\n        stat_index += sizeof(uint32_t);\n\n        auto& e = counters[i];\n        e.name = {data.data() + stat_index, name_len};\n        stat_index = align<uint64_t>(stat_index + name_len);\n\n        const uint64_t* stat_vals = reinterpret_cast<const uint64_t*>(data.data() + stat_index);\n        e.value = *stat_vals++;\n        e.delta = *stat_vals++;\n\n        stat_index += 2 * sizeof(uint64_t);\n      }\n      results.counters = counters;\n    } else if (static_cast<StatType>(block_type) == StatType::Gauge) { // gauge\n      std::vector<GaugeResult> gauges(num_stats);\n      uint32_t stat_index = data_len + 3 * sizeof(uint32_t);\n      for (uint32_t i = 0; i < num_stats; i++) {\n        const uint32_t* stat_name = reinterpret_cast<const uint32_t*>(data.data() + stat_index);\n        uint32_t name_len = *stat_name;\n        stat_index += sizeof(uint32_t);\n\n        auto& e = gauges[i];\n        e.name = {data.data() + stat_index, name_len};\n        stat_index = align<uint64_t>(stat_index + name_len);\n\n        const uint64_t* stat_vals = reinterpret_cast<const uint64_t*>(data.data() + stat_index);\n        e.value = *stat_vals++;\n\n        stat_index += sizeof(uint64_t);\n      }\n      results.gauges = gauges;\n    }\n    data_len += block_size;\n  }\n\n  return results;\n}\n\nextern \"C\" WasmResult envoy_resolve_dns(const char* address, size_t address_size, uint32_t* token);\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js",
    "content": "mergeInto(LibraryManager.library, {\n  envoy_resolve_dns: function() {},\n});\n"
  },
  {
    "path": "source/extensions/common/wasm/ext/node_subset.proto",
    "content": "syntax = \"proto3\";\n\nimport \"google/protobuf/struct.proto\";\n\npackage envoy.source.extensions.common.wasm;\n\n// A subset of message Node from api/envoy/config/core/v?/base.proto.\nmessage NodeSubset {\n  string id = 1;\n  google.protobuf.Struct metadata = 3;\n};\n"
  },
  {
    "path": "source/extensions/common/wasm/foreign.cc",
    "content": "#include \"common/common/logger.h\"\n\n#include \"source/extensions/common/wasm/ext/declare_property.pb.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\n#if defined(WASM_USE_CEL_PARSER)\n#include \"eval/public/builtin_func_registrar.h\"\n#include \"eval/public/cel_expr_builder_factory.h\"\n#include \"parser/parser.h\"\n#endif\n#include \"zlib.h\"\n\nusing proxy_wasm::RegisterForeignFunction;\nusing proxy_wasm::WasmForeignFunction;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\ntemplate <typename T> WasmForeignFunction createFromClass() {\n  auto c = std::make_shared<T>();\n  return c->create(c);\n}\n\nRegisterForeignFunction registerCompressForeignFunction(\n    \"compress\",\n    [](WasmBase&, absl::string_view arguments,\n       const std::function<void*(size_t size)>& alloc_result) -> WasmResult {\n      unsigned long dest_len = compressBound(arguments.size());\n      std::unique_ptr<unsigned char[]> b(new unsigned char[dest_len]);\n      if (compress(b.get(), &dest_len, reinterpret_cast<const unsigned char*>(arguments.data()),\n                   arguments.size()) != Z_OK) {\n        return WasmResult::SerializationFailure;\n      }\n      auto result = alloc_result(dest_len);\n      memcpy(result, b.get(), dest_len);\n      return WasmResult::Ok;\n    });\n\nRegisterForeignFunction registerUncompressForeignFunction(\n    \"uncompress\",\n    [](WasmBase&, absl::string_view arguments,\n       const std::function<void*(size_t size)>& alloc_result) -> WasmResult {\n      unsigned long dest_len = arguments.size() * 2 + 2; // output estimate.\n      while (true) {\n        std::unique_ptr<unsigned char[]> b(new unsigned char[dest_len]);\n        auto r =\n            uncompress(b.get(), &dest_len, reinterpret_cast<const unsigned char*>(arguments.data()),\n                       arguments.size());\n        if (r == Z_OK) {\n          auto result = alloc_result(dest_len);\n          memcpy(result, b.get(), dest_len);\n          return WasmResult::Ok;\n        }\n        if (r != Z_BUF_ERROR) {\n          return WasmResult::SerializationFailure;\n        }\n        dest_len = dest_len * 2;\n      }\n    });\n\n#if defined(WASM_USE_CEL_PARSER)\nclass ExpressionFactory : public Logger::Loggable<Logger::Id::wasm> {\nprotected:\n  struct ExpressionData {\n    google::api::expr::v1alpha1::ParsedExpr parsed_expr_;\n    Filters::Common::Expr::ExpressionPtr compiled_expr_;\n  };\n\n  class ExpressionContext : public StorageObject {\n  public:\n    friend class ExpressionFactory;\n    ExpressionContext(Filters::Common::Expr::BuilderPtr builder) : builder_(std::move(builder)) {}\n    uint32_t createToken() {\n      uint32_t token = next_expr_token_++;\n      for (;;) {\n        if (!expr_.count(token)) {\n          break;\n        }\n        token = next_expr_token_++;\n      }\n      return token;\n    }\n    bool hasExpression(uint32_t token) { return expr_.contains(token); }\n    ExpressionData& getExpression(uint32_t token) { return expr_[token]; }\n    void deleteExpression(uint32_t token) { expr_.erase(token); }\n    Filters::Common::Expr::Builder* builder() { return builder_.get(); }\n\n  private:\n    Filters::Common::Expr::BuilderPtr builder_{};\n    uint32_t next_expr_token_ = 0;\n    absl::flat_hash_map<uint32_t, ExpressionData> expr_;\n  };\n\n  static ExpressionContext& getOrCreateContext(ContextBase* context_base) {\n    auto context = static_cast<Context*>(context_base);\n    std::string data_name = \"cel\";\n    auto expr_context = context->getForeignData<ExpressionContext>(data_name);\n    if (!expr_context) {\n      google::api::expr::runtime::InterpreterOptions options;\n      auto builder = google::api::expr::runtime::CreateCelExpressionBuilder(options);\n      auto status =\n          google::api::expr::runtime::RegisterBuiltinFunctions(builder->GetRegistry(), options);\n      if (!status.ok()) {\n        ENVOY_LOG(warn, \"failed to register built-in functions: {}\", status.message());\n      }\n      auto new_context = std::make_unique<ExpressionContext>(std::move(builder));\n      expr_context = new_context.get();\n      context->setForeignData(data_name, std::move(new_context));\n    }\n    return *expr_context;\n  }\n};\n\nclass CreateExpressionFactory : public ExpressionFactory {\npublic:\n  WasmForeignFunction create(std::shared_ptr<CreateExpressionFactory> self) const {\n    WasmForeignFunction f =\n        [self](WasmBase&, absl::string_view expr,\n               const std::function<void*(size_t size)>& alloc_result) -> WasmResult {\n      auto parse_status = google::api::expr::parser::Parse(std::string(expr));\n      if (!parse_status.ok()) {\n        ENVOY_LOG(info, \"expr_create parse error: {}\", parse_status.status().message());\n        return WasmResult::BadArgument;\n      }\n\n      auto& expr_context = getOrCreateContext(proxy_wasm::current_context_->root_context());\n      auto token = expr_context.createToken();\n      auto& handler = expr_context.getExpression(token);\n\n      handler.parsed_expr_ = parse_status.value();\n      auto cel_expression_status = expr_context.builder()->CreateExpression(\n          &handler.parsed_expr_.expr(), &handler.parsed_expr_.source_info());\n      if (!cel_expression_status.ok()) {\n        ENVOY_LOG(info, \"expr_create compile error: {}\", cel_expression_status.status().message());\n        expr_context.deleteExpression(token);\n        return WasmResult::BadArgument;\n      }\n\n      handler.compiled_expr_ = std::move(cel_expression_status.value());\n      auto result = reinterpret_cast<uint32_t*>(alloc_result(sizeof(uint32_t)));\n      *result = token;\n      return WasmResult::Ok;\n    };\n    return f;\n  }\n};\nRegisterForeignFunction\n    registerCreateExpressionForeignFunction(\"expr_create\",\n                                            createFromClass<CreateExpressionFactory>());\n\nclass EvaluateExpressionFactory : public ExpressionFactory {\npublic:\n  WasmForeignFunction create(std::shared_ptr<EvaluateExpressionFactory> self) const {\n    WasmForeignFunction f =\n        [self](WasmBase&, absl::string_view argument,\n               const std::function<void*(size_t size)>& alloc_result) -> WasmResult {\n      auto& expr_context = getOrCreateContext(proxy_wasm::current_context_->root_context());\n      if (argument.size() != sizeof(uint32_t)) {\n        return WasmResult::BadArgument;\n      }\n      uint32_t token = *reinterpret_cast<const uint32_t*>(argument.data());\n      if (!expr_context.hasExpression(token)) {\n        return WasmResult::NotFound;\n      }\n      Protobuf::Arena arena;\n      auto& handler = expr_context.getExpression(token);\n      auto context = static_cast<Context*>(proxy_wasm::current_context_);\n      auto eval_status = handler.compiled_expr_->Evaluate(*context, &arena);\n      if (!eval_status.ok()) {\n        ENVOY_LOG(debug, \"expr_evaluate error: {}\", eval_status.status().message());\n        return WasmResult::InternalFailure;\n      }\n      auto value = eval_status.value();\n      if (value.IsError()) {\n        ENVOY_LOG(debug, \"expr_evaluate value error: {}\", value.ErrorOrDie()->message());\n        return WasmResult::InternalFailure;\n      }\n      std::string result;\n      auto serialize_status = serializeValue(value, &result);\n      if (serialize_status != WasmResult::Ok) {\n        return serialize_status;\n      }\n      auto output = alloc_result(result.size());\n      memcpy(output, result.data(), result.size());\n      return WasmResult::Ok;\n    };\n    return f;\n  }\n};\nRegisterForeignFunction\n    registerEvaluateExpressionForeignFunction(\"expr_evaluate\",\n                                              createFromClass<EvaluateExpressionFactory>());\n\nclass DeleteExpressionFactory : public ExpressionFactory {\npublic:\n  WasmForeignFunction create(std::shared_ptr<DeleteExpressionFactory> self) const {\n    WasmForeignFunction f = [self](WasmBase&, absl::string_view argument,\n                                   const std::function<void*(size_t size)>&) -> WasmResult {\n      auto& expr_context = getOrCreateContext(proxy_wasm::current_context_->root_context());\n      if (argument.size() != sizeof(uint32_t)) {\n        return WasmResult::BadArgument;\n      }\n      uint32_t token = *reinterpret_cast<const uint32_t*>(argument.data());\n      expr_context.deleteExpression(token);\n      return WasmResult::Ok;\n    };\n    return f;\n  }\n};\nRegisterForeignFunction\n    registerDeleteExpressionForeignFunction(\"expr_delete\",\n                                            createFromClass<DeleteExpressionFactory>());\n#endif\n\n// TODO(kyessenov) The factories should be separated into individual compilation units.\n// TODO(kyessenov) Leverage the host argument marshaller instead of the protobuf argument list.\nclass DeclarePropertyFactory {\npublic:\n  WasmForeignFunction create(std::shared_ptr<DeclarePropertyFactory> self) const {\n    WasmForeignFunction f = [self](WasmBase&, absl::string_view arguments,\n                                   const std::function<void*(size_t size)>&) -> WasmResult {\n      envoy::source::extensions::common::wasm::DeclarePropertyArguments args;\n      if (args.ParseFromArray(arguments.data(), arguments.size())) {\n        WasmType type = WasmType::Bytes;\n        switch (args.type()) {\n        case envoy::source::extensions::common::wasm::WasmType::Bytes:\n          type = WasmType::Bytes;\n          break;\n        case envoy::source::extensions::common::wasm::WasmType::Protobuf:\n          type = WasmType::Protobuf;\n          break;\n        case envoy::source::extensions::common::wasm::WasmType::String:\n          type = WasmType::String;\n          break;\n        case envoy::source::extensions::common::wasm::WasmType::FlatBuffers:\n          type = WasmType::FlatBuffers;\n          break;\n        default:\n          // do nothing\n          break;\n        }\n        StreamInfo::FilterState::LifeSpan span = StreamInfo::FilterState::LifeSpan::FilterChain;\n        switch (args.span()) {\n        case envoy::source::extensions::common::wasm::LifeSpan::FilterChain:\n          span = StreamInfo::FilterState::LifeSpan::FilterChain;\n          break;\n        case envoy::source::extensions::common::wasm::LifeSpan::DownstreamRequest:\n          span = StreamInfo::FilterState::LifeSpan::Request;\n          break;\n        case envoy::source::extensions::common::wasm::LifeSpan::DownstreamConnection:\n          span = StreamInfo::FilterState::LifeSpan::Connection;\n          break;\n        default:\n          // do nothing\n          break;\n        }\n        auto context = static_cast<Context*>(proxy_wasm::current_context_);\n        return context->declareProperty(\n            args.name(),\n            std::make_unique<const WasmStatePrototype>(args.readonly(), type, args.schema(), span));\n      }\n      return WasmResult::BadArgument;\n    };\n    return f;\n  }\n};\nRegisterForeignFunction\n    registerDeclarePropertyForeignFunction(\"declare_property\",\n                                           createFromClass<DeclarePropertyFactory>());\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm.cc",
    "content": "#include \"extensions/common/wasm/wasm.h\"\n\n#include <algorithm>\n#include <chrono>\n\n#include \"envoy/event/deferred_deletable.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/wasm/wasm_extension.h\"\n\n#include \"absl/strings/str_cat.h\"\n\n#define WASM_CONTEXT(_c)                                                                           \\\n  static_cast<Context*>(proxy_wasm::exports::ContextOrEffectiveContext(                            \\\n      static_cast<proxy_wasm::ContextBase*>((void)_c, proxy_wasm::current_context_)))\n\nusing proxy_wasm::FailState;\nusing proxy_wasm::Word;\n\nnamespace Envoy {\n\nusing ScopeWeakPtr = std::weak_ptr<Stats::Scope>;\n\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\nnamespace {\n\nusing WasmEvent = EnvoyWasm::WasmEvent;\n\nstruct CodeCacheEntry {\n  std::string code;\n  bool in_progress;\n  MonotonicTime use_time;\n  MonotonicTime fetch_time;\n};\n\nclass RemoteDataFetcherAdapter : public Config::DataFetcher::RemoteDataFetcherCallback,\n                                 public Event::DeferredDeletable {\npublic:\n  RemoteDataFetcherAdapter(std::function<void(std::string cb)> cb) : cb_(cb) {}\n  ~RemoteDataFetcherAdapter() override = default;\n  void onSuccess(const std::string& data) override { cb_(data); }\n  void onFailure(Config::DataFetcher::FailureReason) override { cb_(\"\"); }\n  void setFetcher(std::unique_ptr<Config::DataFetcher::RemoteDataFetcher>&& fetcher) {\n    fetcher_ = std::move(fetcher);\n  }\n\nprivate:\n  std::function<void(std::string)> cb_;\n  std::unique_ptr<Config::DataFetcher::RemoteDataFetcher> fetcher_;\n};\n\nconst std::string INLINE_STRING = \"<inline>\";\nconst int CODE_CACHE_SECONDS_NEGATIVE_CACHING = 10;\nconst int CODE_CACHE_SECONDS_CACHING_TTL = 24 * 3600; // 24 hours.\nMonotonicTime::duration cache_time_offset_for_testing{};\n\nstd::atomic<int64_t> active_wasms;\nstd::mutex code_cache_mutex;\nabsl::flat_hash_map<std::string, CodeCacheEntry>* code_cache = nullptr;\n\n// Downcast WasmBase to the actual Wasm.\ninline Wasm* getWasm(WasmHandleSharedPtr& base_wasm_handle) {\n  return static_cast<Wasm*>(base_wasm_handle->wasm().get());\n}\n\n} // namespace\n\nstd::string anyToBytes(const ProtobufWkt::Any& any) {\n  if (any.Is<ProtobufWkt::StringValue>()) {\n    ProtobufWkt::StringValue s;\n    MessageUtil::unpackTo(any, s);\n    return s.value();\n  }\n  if (any.Is<ProtobufWkt::BytesValue>()) {\n    Protobuf::BytesValue b;\n    MessageUtil::unpackTo(any, b);\n    return b.value();\n  }\n  return any.value();\n}\n\nvoid Wasm::initializeStats() {\n  active_wasms++;\n  wasm_stats_.active_.set(active_wasms);\n  wasm_stats_.created_.inc();\n}\n\nvoid Wasm::initializeLifecycle(Server::ServerLifecycleNotifier& lifecycle_notifier) {\n  auto weak = std::weak_ptr<Wasm>(std::static_pointer_cast<Wasm>(shared_from_this()));\n  lifecycle_notifier.registerCallback(Server::ServerLifecycleNotifier::Stage::ShutdownExit,\n                                      [this, weak](Event::PostCb post_cb) {\n                                        auto lock = weak.lock();\n                                        if (lock) { // See if we are still alive.\n                                          server_shutdown_post_cb_ = post_cb;\n                                        }\n                                      });\n}\n\nWasm::Wasm(absl::string_view runtime, absl::string_view vm_id, absl::string_view vm_configuration,\n           absl::string_view vm_key, const Stats::ScopeSharedPtr& scope,\n           Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher)\n    : WasmBase(createWasmVm(runtime, scope), vm_id, vm_configuration, vm_key), scope_(scope),\n      cluster_manager_(cluster_manager), dispatcher_(dispatcher),\n      time_source_(dispatcher.timeSource()),\n      wasm_stats_(WasmStats{\n          ALL_WASM_STATS(POOL_COUNTER_PREFIX(*scope_, absl::StrCat(\"wasm.\", runtime, \".\")),\n                         POOL_GAUGE_PREFIX(*scope_, absl::StrCat(\"wasm.\", runtime, \".\")))}) {\n  initializeStats();\n  ENVOY_LOG(debug, \"Base Wasm created {} now active\", active_wasms);\n}\n\nWasm::Wasm(WasmHandleSharedPtr base_wasm_handle, Event::Dispatcher& dispatcher)\n    : WasmBase(base_wasm_handle,\n               [&base_wasm_handle]() {\n                 return createWasmVm(\n                     getEnvoyWasmIntegration(*base_wasm_handle->wasm()->wasm_vm()).runtime(),\n                     getWasm(base_wasm_handle)->scope_);\n               }),\n      scope_(getWasm(base_wasm_handle)->scope_),\n      cluster_manager_(getWasm(base_wasm_handle)->clusterManager()), dispatcher_(dispatcher),\n      time_source_(dispatcher.timeSource()), wasm_stats_(getWasm(base_wasm_handle)->wasm_stats_) {\n  initializeStats();\n  ENVOY_LOG(debug, \"Thread-Local Wasm created {} now active\", active_wasms);\n}\n\nvoid Wasm::error(absl::string_view message) { ENVOY_LOG(error, \"Wasm VM failed {}\", message); }\n\nvoid Wasm::setTimerPeriod(uint32_t context_id, std::chrono::milliseconds new_period) {\n  auto& period = timer_period_[context_id];\n  auto& timer = timer_[context_id];\n  bool was_running = timer && period.count() > 0;\n  period = new_period;\n  if (was_running) {\n    timer->disableTimer();\n  }\n  if (period.count() > 0) {\n    timer = dispatcher_.createTimer(\n        [weak = std::weak_ptr<Wasm>(std::static_pointer_cast<Wasm>(shared_from_this())),\n         context_id]() {\n          auto shared = weak.lock();\n          if (shared) {\n            shared->tickHandler(context_id);\n          }\n        });\n    timer->enableTimer(period);\n  }\n}\n\nvoid Wasm::tickHandler(uint32_t root_context_id) {\n  auto period = timer_period_.find(root_context_id);\n  auto timer = timer_.find(root_context_id);\n  if (period == timer_period_.end() || timer == timer_.end() || !on_tick_) {\n    return;\n  }\n  auto context = getContext(root_context_id);\n  if (context) {\n    context->onTick(0);\n  }\n  if (timer->second && period->second.count() > 0) {\n    timer->second->enableTimer(period->second);\n  }\n}\n\nWasm::~Wasm() {\n  active_wasms--;\n  wasm_stats_.active_.set(active_wasms);\n  ENVOY_LOG(debug, \"~Wasm {} remaining active\", active_wasms);\n  if (server_shutdown_post_cb_) {\n    dispatcher_.post(server_shutdown_post_cb_);\n  }\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nWord resolve_dns(void* raw_context, Word dns_address_ptr, Word dns_address_size, Word token_ptr) {\n  auto context = WASM_CONTEXT(raw_context);\n  auto root_context = context->isRootContext() ? context : context->rootContext();\n  auto address = context->wasmVm()->getMemory(dns_address_ptr, dns_address_size);\n  if (!address) {\n    return WasmResult::InvalidMemoryAccess;\n  }\n  // Verify set and verify token_ptr before initiating the async resolve.\n  uint32_t token = context->wasm()->nextDnsToken();\n  if (!context->wasm()->setDatatype(token_ptr, token)) {\n    return WasmResult::InvalidMemoryAccess;\n  }\n  auto callback = [weak_wasm = std::weak_ptr<Wasm>(context->wasm()->sharedThis()), root_context,\n                   context_id = context->id(),\n                   token](Envoy::Network::DnsResolver::ResolutionStatus status,\n                          std::list<Envoy::Network::DnsResponse>&& response) {\n    auto wasm = weak_wasm.lock();\n    if (!wasm) {\n      return;\n    }\n    root_context->onResolveDns(token, status, std::move(response));\n  };\n  if (!context->wasm()->dnsResolver()) {\n    context->wasm()->dnsResolver() = context->wasm()->dispatcher().createDnsResolver({}, false);\n  }\n  context->wasm()->dnsResolver()->resolve(std::string(address.value()),\n                                          Network::DnsLookupFamily::Auto, callback);\n  return WasmResult::Ok;\n}\n\nvoid Wasm::registerCallbacks() {\n  WasmBase::registerCallbacks();\n#define _REGISTER(_fn)                                                                             \\\n  wasm_vm_->registerCallback(                                                                      \\\n      \"env\", \"envoy_\" #_fn, &_fn,                                                                  \\\n      &proxy_wasm::ConvertFunctionWordToUint32<decltype(_fn), _fn>::convertFunctionWordToUint32)\n  _REGISTER(resolve_dns);\n#undef _REGISTER\n}\n\nvoid Wasm::getFunctions() {\n  WasmBase::getFunctions();\n#define _GET(_fn) wasm_vm_->getFunction(\"envoy_\" #_fn, &_fn##_);\n  _GET(on_resolve_dns)\n  _GET(on_stats_update)\n#undef _GET\n}\n\nproxy_wasm::CallOnThreadFunction Wasm::callOnThreadFunction() {\n  auto& dispatcher = dispatcher_;\n  return [&dispatcher](const std::function<void()>& f) { return dispatcher.post(f); };\n}\n\nContextBase* Wasm::createContext(const std::shared_ptr<PluginBase>& plugin) {\n  if (create_context_for_testing_) {\n    return create_context_for_testing_(this, std::static_pointer_cast<Plugin>(plugin));\n  }\n  return new Context(this, std::static_pointer_cast<Plugin>(plugin));\n}\n\nContextBase* Wasm::createRootContext(const std::shared_ptr<PluginBase>& plugin) {\n  if (create_root_context_for_testing_) {\n    return create_root_context_for_testing_(this, std::static_pointer_cast<Plugin>(plugin));\n  }\n  return new Context(this, std::static_pointer_cast<Plugin>(plugin));\n}\n\nContextBase* Wasm::createVmContext() { return new Context(this); }\n\nvoid Wasm::log(absl::string_view root_id, const Http::RequestHeaderMap* request_headers,\n               const Http::ResponseHeaderMap* response_headers,\n               const Http::ResponseTrailerMap* response_trailers,\n               const StreamInfo::StreamInfo& stream_info) {\n  auto context = getRootContext(root_id);\n  context->log(request_headers, response_headers, response_trailers, stream_info);\n}\n\nvoid Wasm::onStatsUpdate(absl::string_view root_id, Envoy::Stats::MetricSnapshot& snapshot) {\n  auto context = getRootContext(root_id);\n  context->onStatsUpdate(snapshot);\n}\n\nvoid clearCodeCacheForTesting() {\n  std::lock_guard<std::mutex> guard(code_cache_mutex);\n  if (code_cache) {\n    delete code_cache;\n    code_cache = nullptr;\n  }\n  getWasmExtension()->resetStatsForTesting();\n}\n\n// TODO: remove this post #4160: Switch default to SimulatedTimeSystem.\nvoid setTimeOffsetForCodeCacheForTesting(MonotonicTime::duration d) {\n  cache_time_offset_for_testing = d;\n}\n\nstatic proxy_wasm::WasmHandleCloneFactory\ngetCloneFactory(WasmExtension* wasm_extension, Event::Dispatcher& dispatcher,\n                CreateContextFn create_root_context_for_testing) {\n  auto wasm_clone_factory = wasm_extension->wasmCloneFactory();\n  return [&dispatcher, create_root_context_for_testing, wasm_clone_factory](\n             WasmHandleBaseSharedPtr base_wasm) -> std::shared_ptr<WasmHandleBase> {\n    return wasm_clone_factory(std::static_pointer_cast<WasmHandle>(base_wasm), dispatcher,\n                              create_root_context_for_testing);\n  };\n}\n\nWasmEvent toWasmEvent(const std::shared_ptr<WasmHandleBase>& wasm) {\n  if (!wasm) {\n    return WasmEvent::UnableToCreateVM;\n  }\n  switch (wasm->wasm()->fail_state()) {\n  case FailState::Ok:\n    return WasmEvent::Ok;\n  case FailState::UnableToCreateVM:\n    return WasmEvent::UnableToCreateVM;\n  case FailState::UnableToCloneVM:\n    return WasmEvent::UnableToCloneVM;\n  case FailState::MissingFunction:\n    return WasmEvent::MissingFunction;\n  case FailState::UnableToInitializeCode:\n    return WasmEvent::UnableToInitializeCode;\n  case FailState::StartFailed:\n    return WasmEvent::StartFailed;\n  case FailState::ConfigureFailed:\n    return WasmEvent::ConfigureFailed;\n  case FailState::RuntimeError:\n    return WasmEvent::RuntimeError;\n  }\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\nstatic bool createWasmInternal(const VmConfig& vm_config, const PluginSharedPtr& plugin,\n                               const Stats::ScopeSharedPtr& scope,\n                               Upstream::ClusterManager& cluster_manager,\n                               Init::Manager& init_manager, Event::Dispatcher& dispatcher,\n                               Api::Api& api, Server::ServerLifecycleNotifier& lifecycle_notifier,\n                               Config::DataSource::RemoteAsyncDataProviderPtr& remote_data_provider,\n                               CreateWasmCallback&& cb,\n                               CreateContextFn create_root_context_for_testing = nullptr) {\n  auto wasm_extension = getWasmExtension();\n  std::string source, code;\n  bool fetch = false;\n  if (vm_config.code().has_remote()) {\n    auto now = dispatcher.timeSource().monotonicTime() + cache_time_offset_for_testing;\n    source = vm_config.code().remote().http_uri().uri();\n    std::lock_guard<std::mutex> guard(code_cache_mutex);\n    if (!code_cache) {\n      code_cache = new std::remove_reference<decltype(*code_cache)>::type;\n    }\n    Stats::ScopeSharedPtr create_wasm_stats_scope =\n        wasm_extension->lockAndCreateStats(scope, plugin);\n    // Remove entries older than CODE_CACHE_SECONDS_CACHING_TTL except for our target.\n    for (auto it = code_cache->begin(); it != code_cache->end();) {\n      if (now - it->second.use_time > std::chrono::seconds(CODE_CACHE_SECONDS_CACHING_TTL) &&\n          it->first != vm_config.code().remote().sha256()) {\n        code_cache->erase(it++);\n      } else {\n        ++it;\n      }\n    }\n    wasm_extension->onRemoteCacheEntriesChanged(code_cache->size());\n    auto it = code_cache->find(vm_config.code().remote().sha256());\n    if (it != code_cache->end()) {\n      it->second.use_time = now;\n      if (it->second.in_progress) {\n        wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheMiss, plugin);\n        ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn,\n                            \"createWasm: failed to load (in progress) from {}\", source);\n        cb(nullptr);\n      }\n      code = it->second.code;\n      if (code.empty()) {\n        if (now - it->second.fetch_time <\n            std::chrono::seconds(CODE_CACHE_SECONDS_NEGATIVE_CACHING)) {\n          wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheNegativeHit, plugin);\n          ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn,\n                              \"createWasm: failed to load (cached) from {}\", source);\n          cb(nullptr);\n        }\n        fetch = true; // Fetch failed, retry.\n        it->second.in_progress = true;\n        it->second.fetch_time = now;\n      } else {\n        wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheHit, plugin);\n      }\n    } else {\n      fetch = true; // Not in cache, fetch.\n      auto& e = (*code_cache)[vm_config.code().remote().sha256()];\n      e.in_progress = true;\n      e.use_time = e.fetch_time = now;\n      wasm_extension->onRemoteCacheEntriesChanged(code_cache->size());\n      wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheMiss, plugin);\n    }\n  } else if (vm_config.code().has_local()) {\n    code = Config::DataSource::read(vm_config.code().local(), true, api);\n    source = Config::DataSource::getPath(vm_config.code().local())\n                 .value_or(code.empty() ? EMPTY_STRING : INLINE_STRING);\n  }\n\n  auto complete_cb = [cb, vm_config, plugin, scope, &cluster_manager, &dispatcher,\n                      &lifecycle_notifier, create_root_context_for_testing,\n                      wasm_extension](std::string code) -> bool {\n    if (code.empty()) {\n      cb(nullptr);\n      return false;\n    }\n    auto vm_key =\n        proxy_wasm::makeVmKey(vm_config.vm_id(), anyToBytes(vm_config.configuration()), code);\n    auto wasm_factory = wasm_extension->wasmFactory();\n    proxy_wasm::WasmHandleFactory proxy_wasm_factory =\n        [&vm_config, scope, &cluster_manager, &dispatcher, &lifecycle_notifier,\n         wasm_factory](absl::string_view vm_key) -> WasmHandleBaseSharedPtr {\n      return wasm_factory(vm_config, scope, cluster_manager, dispatcher, lifecycle_notifier,\n                          vm_key);\n    };\n    auto wasm = proxy_wasm::createWasm(\n        vm_key, code, plugin, proxy_wasm_factory,\n        getCloneFactory(wasm_extension, dispatcher, create_root_context_for_testing),\n        vm_config.allow_precompiled());\n    Stats::ScopeSharedPtr create_wasm_stats_scope =\n        wasm_extension->lockAndCreateStats(scope, plugin);\n    wasm_extension->onEvent(toWasmEvent(wasm), plugin);\n    if (!wasm || wasm->wasm()->isFailed()) {\n      ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), trace,\n                          \"Unable to create Wasm\");\n      cb(nullptr);\n      return false;\n    }\n    cb(std::static_pointer_cast<WasmHandle>(wasm));\n    return true;\n  };\n\n  if (fetch) {\n    auto holder = std::make_shared<std::unique_ptr<Event::DeferredDeletable>>();\n    auto fetch_callback = [vm_config, complete_cb, source, &dispatcher, scope, holder, plugin,\n                           wasm_extension](const std::string& code) {\n      {\n        std::lock_guard<std::mutex> guard(code_cache_mutex);\n        auto& e = (*code_cache)[vm_config.code().remote().sha256()];\n        e.in_progress = false;\n        e.code = code;\n        Stats::ScopeSharedPtr create_wasm_stats_scope =\n            wasm_extension->lockAndCreateStats(scope, plugin);\n        if (code.empty()) {\n          wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheFetchFailure, plugin);\n        } else {\n          wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheFetchSuccess, plugin);\n        }\n        wasm_extension->onRemoteCacheEntriesChanged(code_cache->size());\n      }\n      // NB: xDS currently does not support failing asynchronously, so we fail immediately\n      // if remote Wasm code is not cached and do a background fill.\n      if (!vm_config.nack_on_code_cache_miss()) {\n        if (code.empty()) {\n          ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), trace,\n                              \"Failed to load Wasm code (fetch failed) from {}\", source);\n        }\n        complete_cb(code);\n      }\n      // NB: must be deleted explicitly.\n      if (*holder) {\n        dispatcher.deferredDelete(Envoy::Event::DeferredDeletablePtr{holder->release()});\n      }\n    };\n    if (vm_config.nack_on_code_cache_miss()) {\n      auto adapter = std::make_unique<RemoteDataFetcherAdapter>(fetch_callback);\n      auto fetcher = std::make_unique<Config::DataFetcher::RemoteDataFetcher>(\n          cluster_manager, vm_config.code().remote().http_uri(), vm_config.code().remote().sha256(),\n          *adapter);\n      auto fetcher_ptr = fetcher.get();\n      adapter->setFetcher(std::move(fetcher));\n      *holder = std::move(adapter);\n      fetcher_ptr->fetch();\n      ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), trace,\n                          fmt::format(\"Failed to load Wasm code (fetching) from {}\", source));\n      cb(nullptr);\n      return false;\n    } else {\n      remote_data_provider = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n          cluster_manager, init_manager, vm_config.code().remote(), dispatcher,\n          api.randomGenerator(), true, fetch_callback);\n    }\n  } else {\n    return complete_cb(code);\n  }\n  return true;\n}\n\nbool createWasm(const VmConfig& vm_config, const PluginSharedPtr& plugin,\n                const Stats::ScopeSharedPtr& scope, Upstream::ClusterManager& cluster_manager,\n                Init::Manager& init_manager, Event::Dispatcher& dispatcher, Api::Api& api,\n                Envoy::Server::ServerLifecycleNotifier& lifecycle_notifier,\n                Config::DataSource::RemoteAsyncDataProviderPtr& remote_data_provider,\n                CreateWasmCallback&& cb, CreateContextFn create_root_context_for_testing) {\n  return createWasmInternal(vm_config, plugin, scope, cluster_manager, init_manager, dispatcher,\n                            api, lifecycle_notifier, remote_data_provider, std::move(cb),\n                            create_root_context_for_testing);\n}\n\nWasmHandleSharedPtr getOrCreateThreadLocalWasm(const WasmHandleSharedPtr& base_wasm,\n                                               const PluginSharedPtr& plugin,\n                                               Event::Dispatcher& dispatcher,\n                                               CreateContextFn create_root_context_for_testing) {\n  return std::static_pointer_cast<WasmHandle>(proxy_wasm::getOrCreateThreadLocalWasm(\n      std::static_pointer_cast<WasmHandle>(base_wasm), plugin,\n      getCloneFactory(getWasmExtension(), dispatcher, create_root_context_for_testing)));\n}\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <chrono>\n#include <map>\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/server/lifecycle_notifier.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/datasource.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/version/version.h\"\n\n#include \"extensions/common/wasm/context.h\"\n#include \"extensions/common/wasm/wasm_extension.h\"\n#include \"extensions/common/wasm/wasm_vm.h\"\n#include \"extensions/common/wasm/well_known_names.h\"\n\n#include \"include/proxy-wasm/exports.h\"\n#include \"include/proxy-wasm/wasm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\n#define ALL_WASM_STATS(COUNTER, GAUGE)                                                             \\\n  COUNTER(created)                                                                                 \\\n  GAUGE(active, NeverImport)\n\nclass WasmHandle;\n\nstruct WasmStats {\n  ALL_WASM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n// Wasm execution instance. Manages the Envoy side of the Wasm interface.\nclass Wasm : public WasmBase, Logger::Loggable<Logger::Id::wasm> {\npublic:\n  Wasm(absl::string_view runtime, absl::string_view vm_id, absl::string_view vm_configuration,\n       absl::string_view vm_key, const Stats::ScopeSharedPtr& scope,\n       Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher);\n  Wasm(std::shared_ptr<WasmHandle> other, Event::Dispatcher& dispatcher);\n  ~Wasm() override;\n\n  Upstream::ClusterManager& clusterManager() const { return cluster_manager_; }\n  Event::Dispatcher& dispatcher() { return dispatcher_; }\n  Context* getRootContext(absl::string_view root_id) {\n    return static_cast<Context*>(WasmBase::getRootContext(root_id));\n  }\n  void setTimerPeriod(uint32_t root_context_id, std::chrono::milliseconds period) override;\n  virtual void tickHandler(uint32_t root_context_id);\n  std::shared_ptr<Wasm> sharedThis() { return std::static_pointer_cast<Wasm>(shared_from_this()); }\n  Network::DnsResolverSharedPtr& dnsResolver() { return dns_resolver_; }\n\n  // WasmBase\n  void error(absl::string_view message) override;\n  proxy_wasm::CallOnThreadFunction callOnThreadFunction() override;\n  ContextBase* createContext(const std::shared_ptr<PluginBase>& plugin) override;\n  ContextBase* createRootContext(const std::shared_ptr<PluginBase>& plugin) override;\n  ContextBase* createVmContext() override;\n  void registerCallbacks() override;\n  void getFunctions() override;\n\n  // AccessLog::Instance\n  void log(absl::string_view root_id, const Http::RequestHeaderMap* request_headers,\n           const Http::ResponseHeaderMap* response_headers,\n           const Http::ResponseTrailerMap* response_trailers,\n           const StreamInfo::StreamInfo& stream_info);\n\n  void onStatsUpdate(absl::string_view root_id, Envoy::Stats::MetricSnapshot& snapshot);\n  virtual std::string buildVersion() { return BUILD_VERSION_NUMBER; }\n\n  void initializeLifecycle(Server::ServerLifecycleNotifier& lifecycle_notifier);\n  uint32_t nextDnsToken() {\n    do {\n      dns_token_++;\n    } while (!dns_token_);\n    return dns_token_;\n  }\n\n  void setCreateContextForTesting(CreateContextFn create_context,\n                                  CreateContextFn create_root_context) {\n    create_context_for_testing_ = create_context;\n    create_root_context_for_testing_ = create_root_context;\n  }\n  void setFailStateForTesting(proxy_wasm::FailState fail_state) { failed_ = fail_state; }\n\nprotected:\n  friend class Context;\n\n  void initializeStats();\n  // Calls into the VM.\n  proxy_wasm::WasmCallVoid<3> on_resolve_dns_;\n  proxy_wasm::WasmCallVoid<2> on_stats_update_;\n\n  Stats::ScopeSharedPtr scope_;\n  Upstream::ClusterManager& cluster_manager_;\n  Event::Dispatcher& dispatcher_;\n  Event::PostCb server_shutdown_post_cb_;\n  absl::flat_hash_map<uint32_t, Event::TimerPtr> timer_; // per root_id.\n  TimeSource& time_source_;\n\n  // Host Stats/Metrics\n  WasmStats wasm_stats_;\n\n  // Plugin Stats/Metrics\n  absl::flat_hash_map<uint32_t, Stats::Counter*> counters_;\n  absl::flat_hash_map<uint32_t, Stats::Gauge*> gauges_;\n  absl::flat_hash_map<uint32_t, Stats::Histogram*> histograms_;\n\n  CreateContextFn create_context_for_testing_;\n  CreateContextFn create_root_context_for_testing_;\n  Network::DnsResolverSharedPtr dns_resolver_;\n  uint32_t dns_token_ = 1;\n};\nusing WasmSharedPtr = std::shared_ptr<Wasm>;\n\nclass WasmHandle : public WasmHandleBase, public ThreadLocal::ThreadLocalObject {\npublic:\n  explicit WasmHandle(const WasmSharedPtr& wasm)\n      : WasmHandleBase(std::static_pointer_cast<WasmBase>(wasm)), wasm_(wasm) {}\n\n  WasmSharedPtr& wasm() { return wasm_; }\n\nprivate:\n  WasmSharedPtr wasm_;\n};\n\nusing CreateWasmCallback = std::function<void(WasmHandleSharedPtr)>;\n\n// Returns false if createWasm failed synchronously. This is necessary because xDS *MUST* report\n// all failures synchronously as it has no facility to report configuration update failures\n// asynchronously. Callers should throw an exception if they are part of a synchronous xDS update\n// because that is the mechanism for reporting configuration errors.\nbool createWasm(const VmConfig& vm_config, const PluginSharedPtr& plugin,\n                const Stats::ScopeSharedPtr& scope, Upstream::ClusterManager& cluster_manager,\n                Init::Manager& init_manager, Event::Dispatcher& dispatcher, Api::Api& api,\n                Envoy::Server::ServerLifecycleNotifier& lifecycle_notifier,\n                Config::DataSource::RemoteAsyncDataProviderPtr& remote_data_provider,\n                CreateWasmCallback&& callback,\n                CreateContextFn create_root_context_for_testing = nullptr);\n\nWasmHandleSharedPtr\ngetOrCreateThreadLocalWasm(const WasmHandleSharedPtr& base_wasm, const PluginSharedPtr& plugin,\n                           Event::Dispatcher& dispatcher,\n                           CreateContextFn create_root_context_for_testing = nullptr);\n\nvoid clearCodeCacheForTesting();\nstd::string anyToBytes(const ProtobufWkt::Any& any);\nvoid setTimeOffsetForCodeCacheForTesting(MonotonicTime::duration d);\nEnvoyWasm::WasmEvent toWasmEvent(const std::shared_ptr<WasmHandleBase>& wasm);\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm_extension.cc",
    "content": "#include \"extensions/common/wasm/wasm_extension.h\"\n\n#include \"extensions/common/wasm/context.h\"\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/common/wasm/wasm_vm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\nnamespace {\n\nWasmExtension* wasm_extension = nullptr;\n\n} // namespace\n\nStats::ScopeSharedPtr WasmExtension::lockAndCreateStats(const Stats::ScopeSharedPtr& scope,\n                                                        const PluginSharedPtr& plugin) {\n  absl::MutexLock l(&mutex_);\n  Stats::ScopeSharedPtr lock;\n  if (!(lock = scope_.lock())) {\n    resetStats();\n    createStats(scope, plugin);\n    scope_ = ScopeWeakPtr(scope);\n    return scope;\n  }\n  createStats(scope, plugin);\n  return lock;\n}\n\nvoid WasmExtension::resetStatsForTesting() {\n  absl::MutexLock l(&mutex_);\n  resetStats();\n}\n\n// Register a Wasm extension. Note: only one extension may be registered.\nRegisterWasmExtension::RegisterWasmExtension(WasmExtension* extension) {\n  RELEASE_ASSERT(!wasm_extension, \"Multiple Wasm extensions registered.\");\n  wasm_extension = extension;\n}\n\nstd::unique_ptr<EnvoyWasmVmIntegration>\nEnvoyWasm::createEnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope,\n                                        absl::string_view runtime,\n                                        absl::string_view short_runtime) {\n  return std::make_unique<EnvoyWasmVmIntegration>(scope, runtime, short_runtime);\n}\n\nWasmHandleExtensionFactory EnvoyWasm::wasmFactory() {\n  return [](const VmConfig vm_config, const Stats::ScopeSharedPtr& scope,\n            Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher,\n            Server::ServerLifecycleNotifier& lifecycle_notifier,\n            absl::string_view vm_key) -> WasmHandleBaseSharedPtr {\n    auto wasm = std::make_shared<Wasm>(vm_config.runtime(), vm_config.vm_id(),\n                                       anyToBytes(vm_config.configuration()), vm_key, scope,\n                                       cluster_manager, dispatcher);\n    wasm->initializeLifecycle(lifecycle_notifier);\n    return std::static_pointer_cast<WasmHandleBase>(std::make_shared<WasmHandle>(std::move(wasm)));\n  };\n}\n\nWasmHandleExtensionCloneFactory EnvoyWasm::wasmCloneFactory() {\n  return [](const WasmHandleSharedPtr& base_wasm, Event::Dispatcher& dispatcher,\n            CreateContextFn create_root_context_for_testing) -> WasmHandleBaseSharedPtr {\n    auto wasm = std::make_shared<Wasm>(base_wasm, dispatcher);\n    wasm->setCreateContextForTesting(nullptr, create_root_context_for_testing);\n    return std::static_pointer_cast<WasmHandleBase>(std::make_shared<WasmHandle>(std::move(wasm)));\n  };\n}\n\nvoid EnvoyWasm::onEvent(WasmEvent event, const PluginSharedPtr&) {\n  switch (event) {\n  case WasmEvent::RemoteLoadCacheHit:\n    create_wasm_stats_->remote_load_cache_hits_.inc();\n    break;\n  case WasmEvent::RemoteLoadCacheNegativeHit:\n    create_wasm_stats_->remote_load_cache_negative_hits_.inc();\n    break;\n  case WasmEvent::RemoteLoadCacheMiss:\n    create_wasm_stats_->remote_load_cache_misses_.inc();\n    break;\n  case WasmEvent::RemoteLoadCacheFetchSuccess:\n    create_wasm_stats_->remote_load_fetch_successes_.inc();\n    break;\n  case WasmEvent::RemoteLoadCacheFetchFailure:\n    create_wasm_stats_->remote_load_fetch_failures_.inc();\n    break;\n  default:\n    break;\n  }\n}\n\nvoid EnvoyWasm::onRemoteCacheEntriesChanged(int entries) {\n  create_wasm_stats_->remote_load_cache_entries_.set(entries);\n}\n\nvoid EnvoyWasm::createStats(const Stats::ScopeSharedPtr& scope, const PluginSharedPtr&) {\n  if (!create_wasm_stats_) {\n    create_wasm_stats_.reset(new CreateWasmStats{CREATE_WASM_STATS( // NOLINT\n        POOL_COUNTER_PREFIX(*scope, \"wasm.\"), POOL_GAUGE_PREFIX(*scope, \"wasm.\"))});\n  }\n}\n\nvoid EnvoyWasm::resetStats() { create_wasm_stats_.reset(); }\n\nWasmExtension* getWasmExtension() {\n  static WasmExtension* extension = wasm_extension ? wasm_extension : new EnvoyWasm();\n  return extension;\n}\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm_extension.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/server/lifecycle_notifier.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/common/wasm/context.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\n#define CREATE_WASM_STATS(COUNTER, GAUGE)                                                          \\\n  COUNTER(remote_load_cache_hits)                                                                  \\\n  COUNTER(remote_load_cache_negative_hits)                                                         \\\n  COUNTER(remote_load_cache_misses)                                                                \\\n  COUNTER(remote_load_fetch_successes)                                                             \\\n  COUNTER(remote_load_fetch_failures)                                                              \\\n  GAUGE(remote_load_cache_entries, NeverImport)\n\nclass WasmHandle;\nclass EnvoyWasmVmIntegration;\n\nusing WasmHandleSharedPtr = std::shared_ptr<WasmHandle>;\nusing CreateContextFn =\n    std::function<ContextBase*(Wasm* wasm, const std::shared_ptr<Plugin>& plugin)>;\nusing WasmHandleExtensionFactory = std::function<WasmHandleBaseSharedPtr(\n    const VmConfig& vm_config, const Stats::ScopeSharedPtr& scope,\n    Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher,\n    Server::ServerLifecycleNotifier& lifecycle_notifier, absl::string_view vm_key)>;\nusing WasmHandleExtensionCloneFactory = std::function<WasmHandleBaseSharedPtr(\n    const WasmHandleSharedPtr& base_wasm, Event::Dispatcher& dispatcher,\n    CreateContextFn create_root_context_for_testing)>;\nusing ScopeWeakPtr = std::weak_ptr<Stats::Scope>;\n\nstruct CreateWasmStats {\n  CREATE_WASM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n// Extension point for Wasm clients in embedded Envoy.\nclass WasmExtension : Logger::Loggable<Logger::Id::wasm> {\npublic:\n  WasmExtension() = default;\n  virtual ~WasmExtension() = default;\n\n  virtual void initialize() = 0;\n  virtual std::unique_ptr<EnvoyWasmVmIntegration>\n  createEnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope, absl::string_view runtime,\n                               absl::string_view short_runtime) = 0;\n  virtual WasmHandleExtensionFactory wasmFactory() = 0;\n  virtual WasmHandleExtensionCloneFactory wasmCloneFactory() = 0;\n  enum class WasmEvent : int {\n    Ok,\n    RemoteLoadCacheHit,\n    RemoteLoadCacheNegativeHit,\n    RemoteLoadCacheMiss,\n    RemoteLoadCacheFetchSuccess,\n    RemoteLoadCacheFetchFailure,\n    UnableToCreateVM,\n    UnableToCloneVM,\n    MissingFunction,\n    UnableToInitializeCode,\n    StartFailed,\n    ConfigureFailed,\n    RuntimeError,\n  };\n  virtual void onEvent(WasmEvent event, const PluginSharedPtr& plugin) = 0;\n  virtual void onRemoteCacheEntriesChanged(int remote_cache_entries) = 0;\n  virtual void createStats(const Stats::ScopeSharedPtr& scope, const PluginSharedPtr& plugin)\n      EXCLUSIVE_LOCKS_REQUIRED(mutex_) = 0;\n  virtual void resetStats() EXCLUSIVE_LOCKS_REQUIRED(mutex_) = 0; // Delete stats pointers\n\n  // NB: the Scope can become invalid if, for example, the owning FilterChain is deleted. When that\n  // happens the stats must be recreated. This hook verifies the Scope of any existing stats and if\n  // necessary recreates the stats with the newly provided scope.\n  // This call takes out the mutex_ and calls createStats and possibly resetStats().\n  Stats::ScopeSharedPtr lockAndCreateStats(const Stats::ScopeSharedPtr& scope,\n                                           const PluginSharedPtr& plugin);\n\n  void resetStatsForTesting();\n\nprotected:\n  absl::Mutex mutex_;\n  ScopeWeakPtr scope_;\n};\n\n// The default Envoy Wasm implementation.\nclass EnvoyWasm : public WasmExtension {\npublic:\n  EnvoyWasm() = default;\n  ~EnvoyWasm() override = default;\n  void initialize() override {}\n  std::unique_ptr<EnvoyWasmVmIntegration>\n  createEnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope, absl::string_view runtime,\n                               absl::string_view short_runtime) override;\n  WasmHandleExtensionFactory wasmFactory() override;\n  WasmHandleExtensionCloneFactory wasmCloneFactory() override;\n  void onEvent(WasmEvent event, const PluginSharedPtr& plugin) override;\n  void onRemoteCacheEntriesChanged(int remote_cache_entries) override;\n  void createStats(const Stats::ScopeSharedPtr& scope, const PluginSharedPtr& plugin) override;\n  void resetStats() override;\n\nprivate:\n  std::unique_ptr<CreateWasmStats> create_wasm_stats_;\n};\n\n// Register a Wasm extension. Note: only one extension may be registered.\nstruct RegisterWasmExtension {\n  RegisterWasmExtension(WasmExtension* extension);\n};\n#define REGISTER_WASM_EXTENSION(_class)                                                            \\\n  ::Envoy::Extensions::Common::Wasm::RegisterWasmExtension register_wasm_extension(new _class());\n\nWasmExtension* getWasmExtension();\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm_state.cc",
    "content": "#include \"extensions/common/wasm/wasm_state.h\"\n\n#include \"flatbuffers/reflection.h\"\n#include \"tools/flatbuffers_backed_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\nusing google::api::expr::runtime::CelValue;\n\nCelValue WasmState::exprValue(Protobuf::Arena* arena, bool last) const {\n  if (initialized_) {\n    switch (type_) {\n    case WasmType::String:\n      return CelValue::CreateString(&value_);\n    case WasmType::Bytes:\n      return CelValue::CreateBytes(&value_);\n    case WasmType::Protobuf: {\n      if (last) {\n        return CelValue::CreateBytes(&value_);\n      }\n      // Note that this is very expensive since it incurs a de-serialization\n      const auto any = serializeAsProto();\n      return CelValue::CreateMessage(any.get(), arena);\n    }\n    case WasmType::FlatBuffers:\n      if (last) {\n        return CelValue::CreateBytes(&value_);\n      }\n      return CelValue::CreateMap(google::api::expr::runtime::CreateFlatBuffersBackedObject(\n          reinterpret_cast<const uint8_t*>(value_.data()), *reflection::GetSchema(schema_.data()),\n          arena));\n    }\n  }\n  return CelValue::CreateNull();\n}\n\nProtobufTypes::MessagePtr WasmState::serializeAsProto() const {\n  auto any = std::make_unique<ProtobufWkt::Any>();\n\n  if (type_ != WasmType::Protobuf) {\n    ProtobufWkt::BytesValue value;\n    value.set_value(value_);\n    any->PackFrom(value);\n  } else {\n    // The Wasm extension serialized in its own type.\n    any->set_type_url(std::string(schema_));\n    any->set_value(value_);\n  }\n\n  return any;\n}\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm_state.h",
    "content": "/*\n * Wasm State Class available to Wasm/Non-Wasm modules.\n */\n\n#pragma once\n\n#include <string>\n\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"eval/public/cel_value.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\n// FilterState prefix for WasmState values.\nconst absl::string_view WasmStateKeyPrefix = \"wasm.\";\n\n// WasmState content declaration.\nenum class WasmType {\n  Bytes,\n  String,\n  // Schema contains the reflection flatbuffer\n  FlatBuffers,\n  // Schema contains the type URL\n  Protobuf,\n};\n\n// WasmState type declaration.\nclass WasmStatePrototype {\npublic:\n  WasmStatePrototype(bool readonly, WasmType type, absl::string_view schema,\n                     StreamInfo::FilterState::LifeSpan life_span)\n      : readonly_(readonly), type_(type), schema_(schema), life_span_(life_span) {}\n  WasmStatePrototype() = default;\n  const bool readonly_{false};\n  const WasmType type_{WasmType::Bytes};\n  const std::string schema_{\"\"};\n  const StreamInfo::FilterState::LifeSpan life_span_{\n      StreamInfo::FilterState::LifeSpan::FilterChain};\n};\n\nusing DefaultWasmStatePrototype = ConstSingleton<WasmStatePrototype>;\n\n// A simple wrapper around generic values\nclass WasmState : public StreamInfo::FilterState::Object {\npublic:\n  explicit WasmState(const WasmStatePrototype& proto)\n      : readonly_(proto.readonly_), type_(proto.type_), schema_(proto.schema_) {}\n\n  const std::string& value() const { return value_; }\n\n  // Create a value from the state, given an arena. Last argument indicates whether the value\n  // is de-referenced.\n  google::api::expr::runtime::CelValue exprValue(Protobuf::Arena* arena, bool last) const;\n\n  bool setValue(absl::string_view value) {\n    if (initialized_ && readonly_) {\n      return false;\n    }\n    value_.assign(value.data(), value.size());\n    initialized_ = true;\n    return true;\n  }\n\n  ProtobufTypes::MessagePtr serializeAsProto() const override;\n  absl::optional<std::string> serializeAsString() const override { return value_; }\n\nprivate:\n  const bool readonly_;\n  const WasmType type_;\n  absl::string_view schema_;\n  std::string value_{};\n  bool initialized_{false};\n};\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm_vm.cc",
    "content": "#include \"extensions/common/wasm/wasm_vm.h\"\n\n#include <algorithm>\n#include <memory>\n\n#include \"extensions/common/wasm/context.h\"\n#include \"extensions/common/wasm/ext/envoy_null_vm_wasm_api.h\"\n#include \"extensions/common/wasm/wasm_extension.h\"\n#include \"extensions/common/wasm/well_known_names.h\"\n\n#include \"include/proxy-wasm/null.h\"\n#include \"include/proxy-wasm/null_plugin.h\"\n\n#if defined(ENVOY_WASM_V8)\n#include \"include/proxy-wasm/v8.h\"\n#endif\n#if defined(ENVOY_WASM_WAVM)\n#include \"include/proxy-wasm/wavm.h\"\n#endif\n\nusing ContextBase = proxy_wasm::ContextBase;\nusing Word = proxy_wasm::Word;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\nvoid EnvoyWasmVmIntegration::error(absl::string_view message) { ENVOY_LOG(trace, message); }\n\nbool EnvoyWasmVmIntegration::getNullVmFunction(absl::string_view function_name, bool returns_word,\n                                               int number_of_arguments,\n                                               proxy_wasm::NullPlugin* plugin,\n                                               void* ptr_to_function_return) {\n  if (function_name == \"envoy_on_resolve_dns\" && returns_word == false &&\n      number_of_arguments == 3) {\n    *reinterpret_cast<proxy_wasm::WasmCallVoid<3>*>(ptr_to_function_return) =\n        [plugin](ContextBase* context, Word context_id, Word token, Word result_size) {\n          proxy_wasm::SaveRestoreContext saved_context(context);\n          // Need to add a new API header available to both .wasm and null vm targets.\n          auto context_base = plugin->getContextBase(context_id);\n          if (auto root = context_base->asRoot()) {\n            static_cast<proxy_wasm::null_plugin::EnvoyRootContext*>(root)->onResolveDns(\n                token, result_size);\n          }\n        };\n    return true;\n  } else if (function_name == \"envoy_on_stats_update\" && returns_word == false &&\n             number_of_arguments == 2) {\n    *reinterpret_cast<proxy_wasm::WasmCallVoid<2>*>(\n        ptr_to_function_return) = [plugin](ContextBase* context, Word context_id,\n                                           Word result_size) {\n      proxy_wasm::SaveRestoreContext saved_context(context);\n      // Need to add a new API header available to both .wasm and null vm targets.\n      auto context_base = plugin->getContextBase(context_id);\n      if (auto root = context_base->asRoot()) {\n        static_cast<proxy_wasm::null_plugin::EnvoyRootContext*>(root)->onStatsUpdate(result_size);\n      }\n    };\n    return true;\n  }\n  return false;\n}\n\nWasmVmPtr createWasmVm(absl::string_view runtime, const Stats::ScopeSharedPtr& scope) {\n  if (runtime.empty()) {\n    ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn,\n                        \"Failed to create Wasm VM with unspecified runtime\");\n    return nullptr;\n  } else if (runtime == WasmRuntimeNames::get().Null) {\n    auto wasm = proxy_wasm::createNullVm();\n    wasm->integration() = getWasmExtension()->createEnvoyWasmVmIntegration(scope, runtime, \"null\");\n    return wasm;\n#if defined(ENVOY_WASM_V8)\n  } else if (runtime == WasmRuntimeNames::get().V8) {\n    auto wasm = proxy_wasm::createV8Vm();\n    wasm->integration() = getWasmExtension()->createEnvoyWasmVmIntegration(scope, runtime, \"v8\");\n    return wasm;\n#endif\n#if defined(ENVOY_WASM_WAVM)\n  } else if (runtime == WasmRuntimeNames::get().Wavm) {\n    auto wasm = proxy_wasm::createWavmVm();\n    wasm->integration() = getWasmExtension()->createEnvoyWasmVmIntegration(scope, runtime, \"wavm\");\n    return wasm;\n#endif\n  } else {\n    ENVOY_LOG_TO_LOGGER(\n        Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn,\n        \"Failed to create Wasm VM using {} runtime. Envoy was compiled without support for it\",\n        runtime);\n    return nullptr;\n  }\n}\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm_vm.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"include/proxy-wasm/wasm_vm.h\"\n#include \"include/proxy-wasm/word.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\n/**\n * Wasm host stats.\n */\n#define ALL_VM_STATS(COUNTER, GAUGE)                                                               \\\n  COUNTER(created)                                                                                 \\\n  COUNTER(cloned)                                                                                  \\\n  GAUGE(active, NeverImport)\n\nstruct VmStats {\n  ALL_VM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n// Wasm VM data providing stats.\nclass EnvoyWasmVmIntegration : public proxy_wasm::WasmVmIntegration,\n                               Logger::Loggable<Logger::Id::wasm> {\npublic:\n  EnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope, absl::string_view runtime,\n                         absl::string_view short_runtime)\n      : scope_(scope), runtime_(std::string(runtime)), short_runtime_(std::string(short_runtime)),\n        runtime_prefix_(absl::StrCat(\"wasm_vm.\", short_runtime, \".\")),\n        stats_(VmStats{ALL_VM_STATS(POOL_COUNTER_PREFIX(*scope_, runtime_prefix_),\n                                    POOL_GAUGE_PREFIX(*scope_, runtime_prefix_))}) {\n    stats_.created_.inc();\n    stats_.active_.inc();\n    ENVOY_LOG(debug, \"WasmVm created {} now active\", runtime_, stats_.active_.value());\n  }\n  ~EnvoyWasmVmIntegration() override {\n    stats_.active_.dec();\n    ENVOY_LOG(debug, \"~WasmVm {} {} remaining active\", runtime_, stats_.active_.value());\n  }\n\n  // proxy_wasm::WasmVmIntegration\n  proxy_wasm::WasmVmIntegration* clone() override {\n    return new EnvoyWasmVmIntegration(scope_, runtime_, short_runtime_);\n  }\n  bool getNullVmFunction(absl::string_view function_name, bool returns_word,\n                         int number_of_arguments, proxy_wasm::NullPlugin* plugin,\n                         void* ptr_to_function_return) override;\n  void error(absl::string_view message) override;\n\n  const std::string& runtime() const { return runtime_; }\n\nprotected:\n  const Stats::ScopeSharedPtr scope_;\n  const std::string runtime_;\n  const std::string short_runtime_;\n  const std::string runtime_prefix_;\n  VmStats stats_;\n}; // namespace Wasm\n\ninline EnvoyWasmVmIntegration& getEnvoyWasmIntegration(proxy_wasm::WasmVm& wasm_vm) {\n  return *static_cast<EnvoyWasmVmIntegration*>(wasm_vm.integration().get());\n}\n\n// Exceptions for issues with the WebAssembly code.\nclass WasmException : public EnvoyException {\npublic:\n  using EnvoyException::EnvoyException;\n};\n\nusing WasmVmPtr = std::unique_ptr<proxy_wasm::WasmVm>;\n\n// Create a new low-level Wasm VM using runtime of the given type (e.g. \"envoy.wasm.runtime.wavm\").\nWasmVmPtr createWasmVm(absl::string_view runtime, const Stats::ScopeSharedPtr& scope);\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/wasm_vm_base.h",
    "content": "#pragma once\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"extensions/common/wasm/wasm_vm.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\n/**\n * Wasm host stats.\n */\n#define ALL_VM_STATS(COUNTER, GAUGE)                                                               \\\n  COUNTER(created)                                                                                 \\\n  COUNTER(cloned)                                                                                  \\\n  GAUGE(active, NeverImport)\n\nstruct VmStats {\n  ALL_VM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n// Wasm VM base instance. Provides common behavior (e.g. Stats).\nclass WasmVmBase : public WasmVm {\npublic:\n  WasmVmBase(const Stats::ScopeSharedPtr& scope, absl::string_view runtime)\n      : scope_(scope), runtime_prefix_(absl::StrCat(\"wasm_vm.\", runtime, \".\")),\n        runtime_(std::string(runtime)),\n        stats_(VmStats{ALL_VM_STATS(POOL_COUNTER_PREFIX(*scope_, runtime_prefix_),\n                                    POOL_GAUGE_PREFIX(*scope_, runtime_prefix_))}) {\n    stats_.created_.inc();\n    stats_.active_.inc();\n    ENVOY_LOG(debug, \"WasmVm created {} now active\", runtime_, stats_.active_.value());\n  }\n  ~WasmVmBase() override {\n    stats_.active_.dec();\n    ENVOY_LOG(debug, \"~WasmVm {} {} remaining active\", runtime_, stats_.active_.value());\n  }\n\nprotected:\n  const Stats::ScopeSharedPtr scope_;\n  const std::string runtime_prefix_;\n  const std::string runtime_; // The runtime e.g. \"v8\".\n  VmStats stats_;\n};\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/common/wasm/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\n/**\n * Well-known wasm runtime names.\n * NOTE: New wasm runtimes should use the well known name: envoy.wasm.runtime.name.\n */\nclass WasmRuntimeValues {\npublic:\n  // WAVM (https://github.com/WAVM/WAVM) Wasm VM.\n  const std::string Wavm = \"envoy.wasm.runtime.wavm\";\n  // Null sandbox: modules must be compiled into envoy and registered name is given in the\n  // DataSource.inline_string.\n  const std::string Null = \"envoy.wasm.runtime.null\";\n  // V8-based (https://v8.dev) WebAssembly runtime.\n  const std::string V8 = \"envoy.wasm.runtime.v8\";\n\n  // Filter state name\n  const std::string FilterState = \"envoy.wasm\";\n};\n\nusing WasmRuntimeNames = ConstSingleton<WasmRuntimeValues>;\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/common/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"compressor_factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    deps = [\n        \"//include/envoy/compression/compressor:compressor_config_interface\",\n        \"//include/envoy/compression/compressor:compressor_factory_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/compression/common/compressor/factory_base.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/config.h\"\n#include \"envoy/compression/compressor/factory.h\"\n#include \"envoy/server/filter_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Common {\nnamespace Compressor {\n\ntemplate <class ConfigProto>\nclass CompressorLibraryFactoryBase\n    : public Envoy::Compression::Compressor::NamedCompressorLibraryConfigFactory {\npublic:\n  Envoy::Compression::Compressor::CompressorFactoryPtr\n  createCompressorFactoryFromProto(const Protobuf::Message& proto_config,\n                                   Server::Configuration::FactoryContext& context) override {\n    return createCompressorFactoryFromProtoTyped(\n        MessageUtil::downcastAndValidate<const ConfigProto&>(proto_config,\n                                                             context.messageValidationVisitor()));\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  CompressorLibraryFactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual Envoy::Compression::Compressor::CompressorFactoryPtr\n  createCompressorFactoryFromProtoTyped(const ConfigProto&) PURE;\n\n  const std::string name_;\n};\n\n} // namespace Compressor\n} // namespace Common\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/common/decompressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"decompressor_factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    deps = [\n        \"//include/envoy/compression/decompressor:decompressor_config_interface\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/compression/common/decompressor/factory_base.h",
    "content": "#pragma once\n\n#include \"envoy/compression/decompressor/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Common {\nnamespace Decompressor {\n\ntemplate <class ConfigProto>\nclass DecompressorLibraryFactoryBase\n    : public Envoy::Compression::Decompressor::NamedDecompressorLibraryConfigFactory {\npublic:\n  Envoy::Compression::Decompressor::DecompressorFactoryPtr\n  createDecompressorFactoryFromProto(const Protobuf::Message& proto_config,\n                                     Server::Configuration::FactoryContext& context) override {\n    return createDecompressorFactoryFromProtoTyped(\n        MessageUtil::downcastAndValidate<const ConfigProto&>(proto_config,\n                                                             context.messageValidationVisitor()),\n        context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  DecompressorLibraryFactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual Envoy::Compression::Decompressor::DecompressorFactoryPtr\n  createDecompressorFactoryFromProtoTyped(const ConfigProto& proto_config,\n                                          Server::Configuration::FactoryContext& context) PURE;\n\n  const std::string name_;\n};\n\n} // namespace Decompressor\n} // namespace Common\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"zlib_base_lib\",\n    srcs = [\"base.cc\"],\n    hdrs = [\"base.h\"],\n    external_deps = [\"zlib\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/compression/gzip/common/base.cc",
    "content": "#include \"extensions/compression/gzip/common/base.h\"\n\nnamespace Envoy {\nnamespace Zlib {\n\nBase::Base(uint64_t chunk_size, std::function<void(z_stream*)> zstream_deleter)\n    : chunk_size_{chunk_size}, chunk_char_ptr_(new unsigned char[chunk_size]),\n      zstream_ptr_(new z_stream(), zstream_deleter) {}\n\nuint64_t Base::checksum() { return zstream_ptr_->adler; }\n\nvoid Base::updateOutput(Buffer::Instance& output_buffer) {\n  const uint64_t n_output = chunk_size_ - zstream_ptr_->avail_out;\n  if (n_output == 0) {\n    return;\n  }\n\n  output_buffer.add(static_cast<void*>(chunk_char_ptr_.get()), n_output);\n  zstream_ptr_->avail_out = chunk_size_;\n  zstream_ptr_->next_out = chunk_char_ptr_.get();\n}\n\n} // namespace Zlib\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/common/base.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"zlib.h\"\n\nnamespace Envoy {\nnamespace Zlib {\n\n/**\n * Shared code between the compressor and the decompressor.\n */\n// TODO(junr03): move to extensions tree once the compressor side is moved to extensions.\nclass Base {\npublic:\n  Base(uint64_t chunk_size, std::function<void(z_stream*)> zstream_deleter);\n\n  /**\n   * It returns the checksum of all output produced so far. Compressor's checksum at the end of\n   * the stream has to match decompressor's checksum produced at the end of the decompression.\n   * Likewise, the decompressor's checksum has to match the compressor's checksum at the end of\n   * compression.\n   * @return uint64_t CRC-32 if a gzip stream is being read or Adler-32 for other compression\n   * types.\n   */\n  uint64_t checksum();\n\nprotected:\n  void updateOutput(Buffer::Instance& output_buffer);\n\n  const uint64_t chunk_size_;\n  bool initialized_{false};\n\n  const std::unique_ptr<unsigned char[]> chunk_char_ptr_;\n  const std::unique_ptr<z_stream, std::function<void(z_stream*)>> zstream_ptr_;\n};\n\n} // namespace Zlib\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"compressor_lib\",\n    srcs = [\"zlib_compressor_impl.cc\"],\n    hdrs = [\"zlib_compressor_impl.h\"],\n    external_deps = [\"zlib\"],\n    deps = [\n        \"//include/envoy/compression/compressor:compressor_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/compression/gzip/common:zlib_base_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":compressor_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/compression/common/compressor:compressor_factory_base_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@envoy_api//envoy/extensions/compression/gzip/compressor/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/compression/gzip/compressor/config.cc",
    "content": "#include \"extensions/compression/gzip/compressor/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Compressor {\n\nnamespace {\n// Default zlib memory level.\nconst uint64_t DefaultMemoryLevel = 5;\n\n// Default and maximum compression window size.\nconst uint64_t DefaultWindowBits = 12;\n\n// When logical OR'ed to window bits, this sets a gzip header and trailer around the compressed\n// data.\nconst uint64_t GzipHeaderValue = 16;\n\n// Default zlib chunk size.\nconst uint32_t DefaultChunkSize = 4096;\n} // namespace\n\nGzipCompressorFactory::GzipCompressorFactory(\n    const envoy::extensions::compression::gzip::compressor::v3::Gzip& gzip)\n    : compression_level_(compressionLevelEnum(gzip.compression_level())),\n      compression_strategy_(compressionStrategyEnum(gzip.compression_strategy())),\n      memory_level_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, memory_level, DefaultMemoryLevel)),\n      window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) |\n                   GzipHeaderValue),\n      chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {}\n\nZlibCompressorImpl::CompressionLevel GzipCompressorFactory::compressionLevelEnum(\n    envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionLevel\n        compression_level) {\n  switch (compression_level) {\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::BEST_SPEED:\n    return ZlibCompressorImpl::CompressionLevel::Speed;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_2:\n    return ZlibCompressorImpl::CompressionLevel::Level2;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_3:\n    return ZlibCompressorImpl::CompressionLevel::Level3;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_4:\n    return ZlibCompressorImpl::CompressionLevel::Level4;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_5:\n    return ZlibCompressorImpl::CompressionLevel::Level5;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_6:\n    return ZlibCompressorImpl::CompressionLevel::Level6;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_7:\n    return ZlibCompressorImpl::CompressionLevel::Level7;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_8:\n    return ZlibCompressorImpl::CompressionLevel::Level8;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::BEST_COMPRESSION:\n    return ZlibCompressorImpl::CompressionLevel::Best;\n  default:\n    return ZlibCompressorImpl::CompressionLevel::Standard;\n  }\n}\n\nZlibCompressorImpl::CompressionStrategy GzipCompressorFactory::compressionStrategyEnum(\n    envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionStrategy\n        compression_strategy) {\n  switch (compression_strategy) {\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::FILTERED:\n    return ZlibCompressorImpl::CompressionStrategy::Filtered;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::FIXED:\n    return ZlibCompressorImpl::CompressionStrategy::Fixed;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::HUFFMAN_ONLY:\n    return ZlibCompressorImpl::CompressionStrategy::Huffman;\n  case envoy::extensions::compression::gzip::compressor::v3::Gzip::RLE:\n    return ZlibCompressorImpl::CompressionStrategy::Rle;\n  default:\n    return ZlibCompressorImpl::CompressionStrategy::Standard;\n  }\n}\n\nEnvoy::Compression::Compressor::CompressorPtr GzipCompressorFactory::createCompressor() {\n  auto compressor = std::make_unique<ZlibCompressorImpl>(chunk_size_);\n  compressor->init(compression_level_, compression_strategy_, window_bits_, memory_level_);\n  return compressor;\n}\n\nEnvoy::Compression::Compressor::CompressorFactoryPtr\nGzipCompressorLibraryFactory::createCompressorFactoryFromProtoTyped(\n    const envoy::extensions::compression::gzip::compressor::v3::Gzip& proto_config) {\n  return std::make_unique<GzipCompressorFactory>(proto_config);\n}\n\n/**\n * Static registration for the gzip compressor library. @see NamedCompressorLibraryConfigFactory.\n */\nREGISTER_FACTORY(GzipCompressorLibraryFactory,\n                 Envoy::Compression::Compressor::NamedCompressorLibraryConfigFactory);\n\n} // namespace Compressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/compressor/config.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/factory.h\"\n#include \"envoy/extensions/compression/gzip/compressor/v3/gzip.pb.h\"\n#include \"envoy/extensions/compression/gzip/compressor/v3/gzip.pb.validate.h\"\n\n#include \"common/http/headers.h\"\n\n#include \"extensions/compression/common/compressor/factory_base.h\"\n#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Compressor {\n\nnamespace {\n\nconst std::string& gzipStatsPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, \"gzip.\"); }\nconst std::string& gzipExtensionName() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.compression.gzip.compressor\");\n}\n\n} // namespace\n\nclass GzipCompressorFactory : public Envoy::Compression::Compressor::CompressorFactory {\npublic:\n  GzipCompressorFactory(const envoy::extensions::compression::gzip::compressor::v3::Gzip& gzip);\n\n  // Envoy::Compression::Compressor::CompressorFactory\n  Envoy::Compression::Compressor::CompressorPtr createCompressor() override;\n  const std::string& statsPrefix() const override { return gzipStatsPrefix(); }\n  const std::string& contentEncoding() const override {\n    return Http::CustomHeaders::get().ContentEncodingValues.Gzip;\n  }\n\nprivate:\n  static ZlibCompressorImpl::CompressionLevel\n  compressionLevelEnum(envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionLevel\n                           compression_level);\n  static ZlibCompressorImpl::CompressionStrategy compressionStrategyEnum(\n      envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionStrategy\n          compression_strategy);\n\n  ZlibCompressorImpl::CompressionLevel compression_level_;\n  ZlibCompressorImpl::CompressionStrategy compression_strategy_;\n  const int32_t memory_level_;\n  const int32_t window_bits_;\n  const uint32_t chunk_size_;\n};\n\nclass GzipCompressorLibraryFactory\n    : public Compression::Common::Compressor::CompressorLibraryFactoryBase<\n          envoy::extensions::compression::gzip::compressor::v3::Gzip> {\npublic:\n  GzipCompressorLibraryFactory() : CompressorLibraryFactoryBase(gzipExtensionName()) {}\n\nprivate:\n  Envoy::Compression::Compressor::CompressorFactoryPtr createCompressorFactoryFromProtoTyped(\n      const envoy::extensions::compression::gzip::compressor::v3::Gzip& config) override;\n};\n\nDECLARE_FACTORY(GzipCompressorLibraryFactory);\n\n} // namespace Compressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/compressor/zlib_compressor_impl.cc",
    "content": "#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Compressor {\n\nZlibCompressorImpl::ZlibCompressorImpl() : ZlibCompressorImpl(4096) {}\n\nZlibCompressorImpl::ZlibCompressorImpl(uint64_t chunk_size)\n    : Zlib::Base(chunk_size, [](z_stream* z) {\n        deflateEnd(z);\n        delete z;\n      }) {\n  zstream_ptr_->zalloc = Z_NULL;\n  zstream_ptr_->zfree = Z_NULL;\n  zstream_ptr_->opaque = Z_NULL;\n  zstream_ptr_->avail_out = chunk_size_;\n  zstream_ptr_->next_out = chunk_char_ptr_.get();\n}\n\nvoid ZlibCompressorImpl::init(CompressionLevel comp_level, CompressionStrategy comp_strategy,\n                              int64_t window_bits, uint64_t memory_level = 8) {\n  ASSERT(initialized_ == false);\n  const int result = deflateInit2(zstream_ptr_.get(), static_cast<int64_t>(comp_level), Z_DEFLATED,\n                                  window_bits, memory_level, static_cast<uint64_t>(comp_strategy));\n  RELEASE_ASSERT(result >= 0, \"\");\n  initialized_ = true;\n}\n\nvoid ZlibCompressorImpl::compress(Buffer::Instance& buffer,\n                                  Envoy::Compression::Compressor::State state) {\n  for (const Buffer::RawSlice& input_slice : buffer.getRawSlices()) {\n    zstream_ptr_->avail_in = input_slice.len_;\n    zstream_ptr_->next_in = static_cast<Bytef*>(input_slice.mem_);\n    // Z_NO_FLUSH tells the compressor to take the data in and compresses it as much as possible\n    // without flushing it out. However, if the data output is greater or equal to the allocated\n    // chunk size, process() outputs it to the end of the buffer. This is fine, since at the next\n    // step, the buffer is drained from the beginning of the buffer by the size of input.\n    process(buffer, Z_NO_FLUSH);\n    buffer.drain(input_slice.len_);\n  }\n\n  process(buffer, state == Envoy::Compression::Compressor::State::Finish ? Z_FINISH : Z_SYNC_FLUSH);\n}\n\nbool ZlibCompressorImpl::deflateNext(int64_t flush_state) {\n  const int result = deflate(zstream_ptr_.get(), flush_state);\n  switch (flush_state) {\n  case Z_FINISH:\n    if (result != Z_OK && result != Z_BUF_ERROR) {\n      RELEASE_ASSERT(result == Z_STREAM_END, \"\");\n      return false;\n    }\n    FALLTHRU;\n  default:\n    if (result == Z_BUF_ERROR && zstream_ptr_->avail_in == 0) {\n      return false; // This means that zlib needs more input, so stop here.\n    }\n    RELEASE_ASSERT(result == Z_OK, \"\");\n  }\n\n  return true;\n}\n\nvoid ZlibCompressorImpl::process(Buffer::Instance& output_buffer, int64_t flush_state) {\n  while (deflateNext(flush_state)) {\n    if (zstream_ptr_->avail_out == 0) {\n      updateOutput(output_buffer);\n    }\n  }\n\n  if (flush_state == Z_SYNC_FLUSH || flush_state == Z_FINISH) {\n    updateOutput(output_buffer);\n  }\n}\n\n} // namespace Compressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/compressor/zlib_compressor_impl.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/compressor.h\"\n\n#include \"extensions/compression/gzip/common/base.h\"\n\n#include \"zlib.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Compressor {\n\n/**\n * Implementation of compressor's interface.\n */\nclass ZlibCompressorImpl : public Zlib::Base, public Envoy::Compression::Compressor::Compressor {\npublic:\n  ZlibCompressorImpl();\n\n  /**\n   * Constructor that allows setting the size of compressor's output buffer. It\n   * should be called whenever a buffer size different than the 4096 bytes, normally set by the\n   * default constructor, is desired. If memory is available and it makes sense to output large\n   * chunks of compressed data, zlib documentation suggests buffers sizes on the order of 128K or\n   * 256K bytes. @see http://zlib.net/zlib_how.html\n   * @param chunk_size amount of memory reserved for the compressor output.\n   */\n  ZlibCompressorImpl(uint64_t chunk_size);\n\n  /**\n   * Enum values used to set compression level during initialization.\n   * best: gives best compression.\n   * speed: gives best performance.\n   * levelX: allows to adjust trad-offs more precisely - from level1 (best speed, but very\n   * low compression ratio) to level9 (best compression, but low speed).\n   * standard: requests a default compromise between speed and compression. (default) @see zlib\n   * manual.\n   */\n  enum class CompressionLevel : int64_t {\n    Best = Z_BEST_COMPRESSION,\n    Level1 = 1,\n    Level2 = 2,\n    Level3 = 3,\n    Level4 = 4,\n    Level5 = 5,\n    Level6 = 6,\n    Level7 = 7,\n    Level8 = 8,\n    Level9 = 9,\n    Speed = Z_BEST_SPEED,\n    Standard = Z_DEFAULT_COMPRESSION,\n  };\n\n  /**\n   * Enum values are used for setting the compression algorithm strategy.\n   * filtered: used for data produced by a filter. (or predictor) @see Z_FILTERED (zlib manual)\n   * fixed: disable dynamic Huffman codes. @see Z_FIXED (zlib manual)\n   * huffman: used to enforce Huffman encoding. @see RFC 1951\n   * rle: used to limit match distances to one. (Run-length encoding)\n   * standard: used for normal data. (default) @see Z_DEFAULT_STRATEGY in zlib manual.\n   */\n  enum class CompressionStrategy : uint64_t {\n    Filtered = Z_FILTERED,\n    Fixed = Z_FIXED,\n    Huffman = Z_HUFFMAN_ONLY,\n    Rle = Z_RLE,\n    Standard = Z_DEFAULT_STRATEGY,\n  };\n\n  /**\n   * Init must be called in order to initialize the compressor. Once compressor is initialized, it\n   * cannot be initialized again. Init should run before compressing any data.\n   * @param level @see CompressionLevel enum\n   * @param strategy @see CompressionStrategy enum\n   * @param window_bits sets the size of the history buffer. Larger values result in better\n   * compression, but will use more memory @see window_bits. (zlib manual)\n   * @param memory_level sets how much memory should be allocated for the internal compression, min\n   * 1 and max 9. @see memory_level (zlib manual)\n   */\n  void init(CompressionLevel level, CompressionStrategy strategy, int64_t window_bits,\n            uint64_t memory_level);\n\n  // Compression::Compressor::Compressor\n  void compress(Buffer::Instance& buffer, Envoy::Compression::Compressor::State state) override;\n\nprivate:\n  bool deflateNext(int64_t flush_state);\n  void process(Buffer::Instance& output_buffer, int64_t flush_state);\n};\n\n} // namespace Compressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/decompressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"zlib_decompressor_impl_lib\",\n    srcs = [\"zlib_decompressor_impl.cc\"],\n    hdrs = [\"zlib_decompressor_impl.h\"],\n    external_deps = [\"zlib\"],\n    deps = [\n        \"//include/envoy/compression/decompressor:decompressor_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/extensions/compression/gzip/common:zlib_base_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":zlib_decompressor_impl_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/compression/common/decompressor:decompressor_factory_base_lib\",\n        \"@envoy_api//envoy/extensions/compression/gzip/decompressor/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/compression/gzip/decompressor/config.cc",
    "content": "#include \"extensions/compression/gzip/decompressor/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Decompressor {\n\nnamespace {\nconst uint32_t DefaultWindowBits = 12;\nconst uint32_t DefaultChunkSize = 4096;\n// When logical OR'ed to window bits, this tells zlib library to decompress gzip data per:\n// inflateInit2 in https://www.zlib.net/manual.html\nconst uint32_t GzipHeaderValue = 16;\n} // namespace\n\nGzipDecompressorFactory::GzipDecompressorFactory(\n    const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip, Stats::Scope& scope)\n    : scope_(scope),\n      window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) |\n                   GzipHeaderValue),\n      chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {}\n\nEnvoy::Compression::Decompressor::DecompressorPtr\nGzipDecompressorFactory::createDecompressor(const std::string& stats_prefix) {\n  auto decompressor = std::make_unique<ZlibDecompressorImpl>(scope_, stats_prefix, chunk_size_);\n  decompressor->init(window_bits_);\n  return decompressor;\n}\n\nEnvoy::Compression::Decompressor::DecompressorFactoryPtr\nGzipDecompressorLibraryFactory::createDecompressorFactoryFromProtoTyped(\n    const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  return std::make_unique<GzipDecompressorFactory>(proto_config, context.scope());\n}\n\n/**\n * Static registration for the gzip decompressor. @see NamedDecompressorLibraryConfigFactory.\n */\nREGISTER_FACTORY(GzipDecompressorLibraryFactory,\n                 Envoy::Compression::Decompressor::NamedDecompressorLibraryConfigFactory);\n} // namespace Decompressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/decompressor/config.h",
    "content": "#pragma once\n\n#include \"envoy/compression/decompressor/config.h\"\n#include \"envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.h\"\n#include \"envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.validate.h\"\n\n#include \"common/http/headers.h\"\n\n#include \"extensions/compression/common/decompressor/factory_base.h\"\n#include \"extensions/compression/gzip/decompressor/zlib_decompressor_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Decompressor {\n\nnamespace {\nconst std::string& gzipStatsPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, \"gzip.\"); }\nconst std::string& gzipExtensionName() {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.compression.gzip.decompressor\");\n}\n\n} // namespace\n\nclass GzipDecompressorFactory : public Envoy::Compression::Decompressor::DecompressorFactory {\npublic:\n  GzipDecompressorFactory(const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip,\n                          Stats::Scope& scope);\n\n  // Envoy::Compression::Decompressor::DecompressorFactory\n  Envoy::Compression::Decompressor::DecompressorPtr\n  createDecompressor(const std::string& stats_prefix) override;\n  const std::string& statsPrefix() const override { return gzipStatsPrefix(); }\n  const std::string& contentEncoding() const override {\n    return Http::CustomHeaders::get().ContentEncodingValues.Gzip;\n  }\n\nprivate:\n  Stats::Scope& scope_;\n  const int32_t window_bits_;\n  const uint32_t chunk_size_;\n};\n\nclass GzipDecompressorLibraryFactory\n    : public Common::Decompressor::DecompressorLibraryFactoryBase<\n          envoy::extensions::compression::gzip::decompressor::v3::Gzip> {\npublic:\n  GzipDecompressorLibraryFactory() : DecompressorLibraryFactoryBase(gzipExtensionName()) {}\n\nprivate:\n  Envoy::Compression::Decompressor::DecompressorFactoryPtr createDecompressorFactoryFromProtoTyped(\n      const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\nDECLARE_FACTORY(GzipDecompressorLibraryFactory);\n\n} // namespace Decompressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc",
    "content": "#include \"extensions/compression/gzip/decompressor/zlib_decompressor_impl.h\"\n\n#include <zlib.h>\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Decompressor {\n\nZlibDecompressorImpl::ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix)\n    : ZlibDecompressorImpl(scope, stats_prefix, 4096) {}\n\nZlibDecompressorImpl::ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix,\n                                           uint64_t chunk_size)\n    : Zlib::Base(chunk_size,\n                 [](z_stream* z) {\n                   inflateEnd(z);\n                   delete z;\n                 }),\n      stats_(generateStats(stats_prefix, scope)) {\n  zstream_ptr_->zalloc = Z_NULL;\n  zstream_ptr_->zfree = Z_NULL;\n  zstream_ptr_->opaque = Z_NULL;\n  zstream_ptr_->avail_out = chunk_size_;\n  zstream_ptr_->next_out = chunk_char_ptr_.get();\n}\n\nvoid ZlibDecompressorImpl::init(int64_t window_bits) {\n  ASSERT(initialized_ == false);\n  const int result = inflateInit2(zstream_ptr_.get(), window_bits);\n  RELEASE_ASSERT(result >= 0, \"\");\n  initialized_ = true;\n}\n\nvoid ZlibDecompressorImpl::decompress(const Buffer::Instance& input_buffer,\n                                      Buffer::Instance& output_buffer) {\n  for (const Buffer::RawSlice& input_slice : input_buffer.getRawSlices()) {\n    zstream_ptr_->avail_in = input_slice.len_;\n    zstream_ptr_->next_in = static_cast<Bytef*>(input_slice.mem_);\n    while (inflateNext()) {\n      if (zstream_ptr_->avail_out == 0) {\n        updateOutput(output_buffer);\n      }\n    }\n  }\n\n  // Flush z_stream and reset its buffer. Otherwise the stale content of the buffer\n  // will pollute output upon the next call to decompress().\n  updateOutput(output_buffer);\n}\n\nbool ZlibDecompressorImpl::inflateNext() {\n  const int result = inflate(zstream_ptr_.get(), Z_NO_FLUSH);\n  if (result == Z_STREAM_END) {\n    // Z_FINISH informs inflate to not maintain a sliding window if the stream completes, which\n    // reduces inflate's memory footprint. Ref: https://www.zlib.net/manual.html.\n    inflate(zstream_ptr_.get(), Z_FINISH);\n    return false;\n  }\n\n  if (result == Z_BUF_ERROR && zstream_ptr_->avail_in == 0) {\n    return false; // This means that zlib needs more input, so stop here.\n  }\n\n  if (result < 0) {\n    decompression_error_ = result;\n    ENVOY_LOG(trace,\n              \"zlib decompression error: {}, msg: {}. Error codes are defined in \"\n              \"https://www.zlib.net/manual.html\",\n              result, zstream_ptr_->msg);\n    chargeErrorStats(result);\n    return false;\n  }\n\n  return true;\n}\n\nvoid ZlibDecompressorImpl::chargeErrorStats(const int result) {\n  switch (result) {\n  case Z_ERRNO:\n    stats_.zlib_errno_.inc();\n    break;\n  case Z_STREAM_ERROR:\n    stats_.zlib_stream_error_.inc();\n    break;\n  case Z_DATA_ERROR:\n    stats_.zlib_data_error_.inc();\n    break;\n  case Z_MEM_ERROR:\n    stats_.zlib_mem_error_.inc();\n    break;\n  case Z_BUF_ERROR:\n    stats_.zlib_buf_error_.inc();\n    break;\n  case Z_VERSION_ERROR:\n    stats_.zlib_version_error_.inc();\n    break;\n  }\n}\n\n} // namespace Decompressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h",
    "content": "#pragma once\n\n#include \"envoy/compression/decompressor/decompressor.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/compression/gzip/common/base.h\"\n\n#include \"zlib.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Decompressor {\n\n/**\n * All zlib decompressor stats. @see stats_macros.h\n */\n#define ALL_ZLIB_DECOMPRESSOR_STATS(COUNTER)                                                       \\\n  COUNTER(zlib_errno)                                                                              \\\n  COUNTER(zlib_stream_error)                                                                       \\\n  COUNTER(zlib_data_error)                                                                         \\\n  COUNTER(zlib_mem_error)                                                                          \\\n  COUNTER(zlib_buf_error)                                                                          \\\n  COUNTER(zlib_version_error)\n\n/**\n * Struct definition for zlib decompressor stats. @see stats_macros.h\n */\nstruct ZlibDecompressorStats {\n  ALL_ZLIB_DECOMPRESSOR_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Implementation of decompressor's interface.\n */\nclass ZlibDecompressorImpl : public Zlib::Base,\n                             public Envoy::Compression::Decompressor::Decompressor,\n                             public Logger::Loggable<Logger::Id::decompression> {\npublic:\n  ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix);\n\n  /**\n   * Constructor that allows setting the size of decompressor's output buffer. It\n   * should be called whenever a buffer size different than the 4096 bytes, normally set by the\n   * default constructor, is desired. If memory is available and it makes sense to output large\n   * chunks of compressed data, zlib documentation suggests buffers sizes on the order of 128K or\n   * 256K bytes. @see http://zlib.net/zlib_how.html\n   * @param chunk_size amount of memory reserved for the decompressor output.\n   */\n  ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, uint64_t chunk_size);\n\n  /**\n   * Init must be called in order to initialize the decompressor. Once decompressor is initialized,\n   * it cannot be initialized again. Init should run before decompressing any data.\n   * @param window_bits sets the size of the history buffer. It must be greater than or equal to\n   * the window_bits value provided when data was compressed (zlib manual).\n   */\n  void init(int64_t window_bits);\n\n  // Compression::Decompressor::Decompressor\n  void decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) override;\n\n  // Flag to track whether error occurred during decompression.\n  // When an error occurs, the error code (a negative int) will be stored in this variable.\n  int decompression_error_{0};\n\nprivate:\n  // TODO: clean up friend class. This is here to allow coverage of chargeErrorStats as it isn't\n  // completely straightforward\n  // to cause zlib's inflate function to return all the error codes necessary to hit all the cases\n  // in the switch statement.\n  friend class ZlibDecompressorStatsTest;\n  static ZlibDecompressorStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return ZlibDecompressorStats{ALL_ZLIB_DECOMPRESSOR_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n  }\n\n  bool inflateNext();\n  void chargeErrorStats(const int result);\n\n  const ZlibDecompressorStats stats_;\n};\n\n} // namespace Decompressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/extensions_build_config.bzl",
    "content": "# See bazel/README.md for details on how this system works.\nEXTENSIONS = {\n    #\n    # Access loggers\n    #\n\n    \"envoy.access_loggers.file\":                        \"//source/extensions/access_loggers/file:config\",\n    \"envoy.access_loggers.http_grpc\":                   \"//source/extensions/access_loggers/grpc:http_config\",\n    \"envoy.access_loggers.tcp_grpc\":                    \"//source/extensions/access_loggers/grpc:tcp_config\",\n    \"envoy.access_loggers.wasm\":                        \"//source/extensions/access_loggers/wasm:config\",\n\n    #\n    # Clusters\n    #\n\n    \"envoy.clusters.aggregate\":                         \"//source/extensions/clusters/aggregate:cluster\",\n    \"envoy.clusters.dynamic_forward_proxy\":             \"//source/extensions/clusters/dynamic_forward_proxy:cluster\",\n    \"envoy.clusters.redis\":                             \"//source/extensions/clusters/redis:redis_cluster\",\n\n    #\n    # Compression\n    #\n\n    \"envoy.compression.gzip.compressor\":                \"//source/extensions/compression/gzip/compressor:config\",\n    \"envoy.compression.gzip.decompressor\":              \"//source/extensions/compression/gzip/decompressor:config\",\n\n    #\n    # gRPC Credentials Plugins\n    #\n\n    \"envoy.grpc_credentials.file_based_metadata\":       \"//source/extensions/grpc_credentials/file_based_metadata:config\",\n    \"envoy.grpc_credentials.aws_iam\":                   \"//source/extensions/grpc_credentials/aws_iam:config\",\n\n    #\n    # WASM\n    #\n    \"envoy.bootstrap.wasm\":                             \"//source/extensions/bootstrap/wasm:config\",\n\n    #\n    # Health checkers\n    #\n\n    \"envoy.health_checkers.redis\":                      \"//source/extensions/health_checkers/redis:config\",\n\n    #\n    # HTTP filters\n    #\n\n    \"envoy.filters.http.adaptive_concurrency\":          \"//source/extensions/filters/http/adaptive_concurrency:config\",\n    \"envoy.filters.http.admission_control\":             \"//source/extensions/filters/http/admission_control:config\",\n    \"envoy.filters.http.aws_lambda\":                    \"//source/extensions/filters/http/aws_lambda:config\",\n    \"envoy.filters.http.aws_request_signing\":           \"//source/extensions/filters/http/aws_request_signing:config\",\n    \"envoy.filters.http.buffer\":                        \"//source/extensions/filters/http/buffer:config\",\n    \"envoy.filters.http.cache\":                         \"//source/extensions/filters/http/cache:config\",\n    \"envoy.filters.http.cdn_loop\":                      \"//source/extensions/filters/http/cdn_loop:config\",\n    \"envoy.filters.http.compressor\":                    \"//source/extensions/filters/http/compressor:config\",\n    \"envoy.filters.http.cors\":                          \"//source/extensions/filters/http/cors:config\",\n    \"envoy.filters.http.csrf\":                          \"//source/extensions/filters/http/csrf:config\",\n    \"envoy.filters.http.decompressor\":                  \"//source/extensions/filters/http/decompressor:config\",\n    \"envoy.filters.http.dynamic_forward_proxy\":         \"//source/extensions/filters/http/dynamic_forward_proxy:config\",\n    \"envoy.filters.http.dynamo\":                        \"//source/extensions/filters/http/dynamo:config\",\n    \"envoy.filters.http.ext_authz\":                     \"//source/extensions/filters/http/ext_authz:config\",\n    \"envoy.filters.http.fault\":                         \"//source/extensions/filters/http/fault:config\",\n    \"envoy.filters.http.grpc_http1_bridge\":             \"//source/extensions/filters/http/grpc_http1_bridge:config\",\n    \"envoy.filters.http.grpc_http1_reverse_bridge\":     \"//source/extensions/filters/http/grpc_http1_reverse_bridge:config\",\n    \"envoy.filters.http.grpc_json_transcoder\":          \"//source/extensions/filters/http/grpc_json_transcoder:config\",\n    \"envoy.filters.http.grpc_stats\":                    \"//source/extensions/filters/http/grpc_stats:config\",\n    \"envoy.filters.http.grpc_web\":                      \"//source/extensions/filters/http/grpc_web:config\",\n    \"envoy.filters.http.gzip\":                          \"//source/extensions/filters/http/gzip:config\",\n    \"envoy.filters.http.header_to_metadata\":            \"//source/extensions/filters/http/header_to_metadata:config\",\n    \"envoy.filters.http.health_check\":                  \"//source/extensions/filters/http/health_check:config\",\n    \"envoy.filters.http.ip_tagging\":                    \"//source/extensions/filters/http/ip_tagging:config\",\n    \"envoy.filters.http.jwt_authn\":                     \"//source/extensions/filters/http/jwt_authn:config\",\n    \"envoy.filters.http.local_ratelimit\":               \"//source/extensions/filters/http/local_ratelimit:config\",\n    \"envoy.filters.http.lua\":                           \"//source/extensions/filters/http/lua:config\",\n    \"envoy.filters.http.oauth2\":                         \"//source/extensions/filters/http/oauth2:config\",\n    \"envoy.filters.http.on_demand\":                     \"//source/extensions/filters/http/on_demand:config\",\n    \"envoy.filters.http.original_src\":                  \"//source/extensions/filters/http/original_src:config\",\n    \"envoy.filters.http.ratelimit\":                     \"//source/extensions/filters/http/ratelimit:config\",\n    \"envoy.filters.http.rbac\":                          \"//source/extensions/filters/http/rbac:config\",\n    \"envoy.filters.http.router\":                        \"//source/extensions/filters/http/router:config\",\n    \"envoy.filters.http.squash\":                        \"//source/extensions/filters/http/squash:config\",\n    \"envoy.filters.http.tap\":                           \"//source/extensions/filters/http/tap:config\",\n    \"envoy.filters.http.wasm\":                          \"//source/extensions/filters/http/wasm:config\",\n\n    #\n    # Listener filters\n    #\n\n    \"envoy.filters.listener.http_inspector\":            \"//source/extensions/filters/listener/http_inspector:config\",\n    # NOTE: The original_dst filter is implicitly loaded if original_dst functionality is\n    #       configured on the listener. Do not remove it in that case or configs will fail to load.\n    \"envoy.filters.listener.original_dst\":              \"//source/extensions/filters/listener/original_dst:config\",\n    \"envoy.filters.listener.original_src\":              \"//source/extensions/filters/listener/original_src:config\",\n    # NOTE: The proxy_protocol filter is implicitly loaded if proxy_protocol functionality is\n    #       configured on the listener. Do not remove it in that case or configs will fail to load.\n    \"envoy.filters.listener.proxy_protocol\":            \"//source/extensions/filters/listener/proxy_protocol:config\",\n    \"envoy.filters.listener.tls_inspector\":             \"//source/extensions/filters/listener/tls_inspector:config\",\n\n    #\n    # Network filters\n    #\n\n    \"envoy.filters.network.client_ssl_auth\":            \"//source/extensions/filters/network/client_ssl_auth:config\",\n    \"envoy.filters.network.direct_response\":            \"//source/extensions/filters/network/direct_response:config\",\n    \"envoy.filters.network.dubbo_proxy\":                \"//source/extensions/filters/network/dubbo_proxy:config\",\n    \"envoy.filters.network.echo\":                       \"//source/extensions/filters/network/echo:config\",\n    \"envoy.filters.network.ext_authz\":                  \"//source/extensions/filters/network/ext_authz:config\",\n    \"envoy.filters.network.http_connection_manager\":    \"//source/extensions/filters/network/http_connection_manager:config\",\n    # WiP\n    \"envoy.filters.network.kafka_broker\":               \"//source/extensions/filters/network/kafka:kafka_broker_config_lib\",\n    \"envoy.filters.network.local_ratelimit\":            \"//source/extensions/filters/network/local_ratelimit:config\",\n    \"envoy.filters.network.mongo_proxy\":                \"//source/extensions/filters/network/mongo_proxy:config\",\n    \"envoy.filters.network.mysql_proxy\":                \"//source/extensions/filters/network/mysql_proxy:config\",\n    \"envoy.filters.network.postgres_proxy\":             \"//source/extensions/filters/network/postgres_proxy:config\",\n    \"envoy.filters.network.ratelimit\":                  \"//source/extensions/filters/network/ratelimit:config\",\n    \"envoy.filters.network.rbac\":                       \"//source/extensions/filters/network/rbac:config\",\n    \"envoy.filters.network.redis_proxy\":                \"//source/extensions/filters/network/redis_proxy:config\",\n    \"envoy.filters.network.rocketmq_proxy\":             \"//source/extensions/filters/network/rocketmq_proxy:config\",\n    \"envoy.filters.network.tcp_proxy\":                  \"//source/extensions/filters/network/tcp_proxy:config\",\n    \"envoy.filters.network.thrift_proxy\":               \"//source/extensions/filters/network/thrift_proxy:config\",\n    \"envoy.filters.network.sni_cluster\":                \"//source/extensions/filters/network/sni_cluster:config\",\n    \"envoy.filters.network.sni_dynamic_forward_proxy\":  \"//source/extensions/filters/network/sni_dynamic_forward_proxy:config\",\n    \"envoy.filters.network.wasm\":                       \"//source/extensions/filters/network/wasm:config\",\n    \"envoy.filters.network.zookeeper_proxy\":            \"//source/extensions/filters/network/zookeeper_proxy:config\",\n\n    #\n    # UDP filters\n    #\n\n    \"envoy.filters.udp_listener.dns_filter\":            \"//source/extensions/filters/udp/dns_filter:config\",\n    \"envoy.filters.udp_listener.udp_proxy\":             \"//source/extensions/filters/udp/udp_proxy:config\",\n\n    #\n    # Resource monitors\n    #\n\n    \"envoy.resource_monitors.fixed_heap\":               \"//source/extensions/resource_monitors/fixed_heap:config\",\n    \"envoy.resource_monitors.injected_resource\":        \"//source/extensions/resource_monitors/injected_resource:config\",\n\n    #\n    # Stat sinks\n    #\n\n    \"envoy.stat_sinks.dog_statsd\":                      \"//source/extensions/stat_sinks/dog_statsd:config\",\n    \"envoy.stat_sinks.hystrix\":                         \"//source/extensions/stat_sinks/hystrix:config\",\n    \"envoy.stat_sinks.metrics_service\":                 \"//source/extensions/stat_sinks/metrics_service:config\",\n    \"envoy.stat_sinks.statsd\":                          \"//source/extensions/stat_sinks/statsd:config\",\n    \"envoy.stat_sinks.wasm\":                            \"//source/extensions/stat_sinks/wasm:config\",\n\n    #\n    # Thrift filters\n    #\n\n    \"envoy.filters.thrift.router\":                      \"//source/extensions/filters/network/thrift_proxy/router:config\",\n    \"envoy.filters.thrift.ratelimit\":                   \"//source/extensions/filters/network/thrift_proxy/filters/ratelimit:config\",\n\n    #\n    # Tracers\n    #\n\n    \"envoy.tracers.dynamic_ot\":                         \"//source/extensions/tracers/dynamic_ot:config\",\n    \"envoy.tracers.lightstep\":                          \"//source/extensions/tracers/lightstep:config\",\n    \"envoy.tracers.datadog\":                            \"//source/extensions/tracers/datadog:config\",\n    \"envoy.tracers.zipkin\":                             \"//source/extensions/tracers/zipkin:config\",\n    \"envoy.tracers.opencensus\":                         \"//source/extensions/tracers/opencensus:config\",\n    # WiP\n    \"envoy.tracers.xray\":                               \"//source/extensions/tracers/xray:config\",\n\n    #\n    # Transport sockets\n    #\n\n    \"envoy.transport_sockets.alts\":                     \"//source/extensions/transport_sockets/alts:config\",\n    \"envoy.transport_sockets.upstream_proxy_protocol\":  \"//source/extensions/transport_sockets/proxy_protocol:upstream_config\",\n    \"envoy.transport_sockets.raw_buffer\":               \"//source/extensions/transport_sockets/raw_buffer:config\",\n    \"envoy.transport_sockets.tap\":                      \"//source/extensions/transport_sockets/tap:config\",\n    \"envoy.transport_sockets.quic\":                     \"//source/extensions/quic_listeners/quiche:quic_factory_lib\",\n\n    #\n    # Retry host predicates\n    #\n\n    \"envoy.retry_host_predicates.previous_hosts\":       \"//source/extensions/retry/host/previous_hosts:config\",\n    \"envoy.retry_host_predicates.omit_canary_hosts\":    \"//source/extensions/retry/host/omit_canary_hosts:config\",\n    \"envoy.retry_host_predicates.omit_host_metadata\":   \"//source/extensions/retry/host/omit_host_metadata:config\",\n\n    #\n    # Retry priorities\n    #\n\n    \"envoy.retry_priorities.previous_priorities\":       \"//source/extensions/retry/priority/previous_priorities:config\",\n\n    #\n    # CacheFilter plugins\n    #\n\n    \"envoy.filters.http.cache.simple_http_cache\":       \"//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib\",\n\n    #\n    # Internal redirect predicates\n    #\n    \"envoy.internal_redirect_predicates.allow_listed_routes\": \"//source/extensions/internal_redirect/allow_listed_routes:config\",\n    \"envoy.internal_redirect_predicates.previous_routes\":     \"//source/extensions/internal_redirect/previous_routes:config\",\n    \"envoy.internal_redirect_predicates.safe_cross_scheme\":   \"//source/extensions/internal_redirect/safe_cross_scheme:config\",\n\n    #\n    # Http Upstreams (excepting envoy.upstreams.http.generic which is hard-coded into the build so not registered here)\n    #\n    \"envoy.upstreams.http.http\":                        \"//source/extensions/upstreams/http/http:config\",\n    \"envoy.upstreams.http.tcp\":                         \"//source/extensions/upstreams/http/tcp:config\",\n\n    #\n    # Watchdog actions\n    #\n    \"envoy.watchdog.profile_action\":                    \"//source/extensions/watchdog/profile_action:config\",\n    \"envoy.watchdog.abort_action\":                      \"//source/extensions/watchdog/abort_action:config\",\n\n}\n\n# These can be changed to [\"//visibility:public\"], for  downstream builds which\n# need to directly reference Envoy extensions.\nEXTENSION_CONFIG_VISIBILITY = [\"//:extension_config\"]\nEXTENSION_PACKAGE_VISIBILITY = [\"//:extension_library\"]\n"
  },
  {
    "path": "source/extensions/filters/common/expr/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"evaluator_lib\",\n    srcs = [\"evaluator.cc\"],\n    hdrs = [\"evaluator.h\"],\n    deps = [\n        \":context_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf\",\n        \"@com_google_cel_cpp//eval/public:builtin_func_registrar\",\n        \"@com_google_cel_cpp//eval/public:cel_expr_builder_factory\",\n        \"@com_google_cel_cpp//eval/public:cel_expression\",\n        \"@com_google_cel_cpp//eval/public:cel_value\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_lib\",\n    srcs = [\"context.cc\"],\n    hdrs = [\"context.h\"],\n    deps = [\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/stream_info:utility_lib\",\n        \"@com_google_cel_cpp//eval/public:cel_value\",\n        \"@com_google_cel_cpp//eval/public:cel_value_producer\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/expr/context.cc",
    "content": "#include \"extensions/filters/common/expr/context.h\"\n\n#include \"common/grpc/common.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/utility.h\"\n\n#include \"absl/strings/numbers.h\"\n#include \"absl/time/time.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Expr {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    referer_handle(Http::CustomHeaders::get().Referer);\n\nabsl::optional<CelValue> convertHeaderEntry(const Http::HeaderEntry* header) {\n  if (header == nullptr) {\n    return {};\n  }\n  return CelValue::CreateStringView(header->value().getStringView());\n}\n\nabsl::optional<CelValue>\nconvertHeaderEntry(Protobuf::Arena& arena,\n                   Http::HeaderUtility::GetAllOfHeaderAsStringResult&& result) {\n  if (!result.result().has_value()) {\n    return {};\n  } else if (!result.backingString().empty()) {\n    return CelValue::CreateString(\n        Protobuf::Arena::Create<std::string>(&arena, result.backingString()));\n  } else {\n    return CelValue::CreateStringView(result.result().value());\n  }\n}\n\nnamespace {\n\nabsl::optional<CelValue> extractSslInfo(const Ssl::ConnectionInfo& ssl_info,\n                                        absl::string_view value) {\n  if (value == TLSVersion) {\n    return CelValue::CreateString(&ssl_info.tlsVersion());\n  } else if (value == SubjectLocalCertificate) {\n    return CelValue::CreateString(&ssl_info.subjectLocalCertificate());\n  } else if (value == SubjectPeerCertificate) {\n    return CelValue::CreateString(&ssl_info.subjectPeerCertificate());\n  } else if (value == URISanLocalCertificate) {\n    if (!ssl_info.uriSanLocalCertificate().empty()) {\n      return CelValue::CreateString(&ssl_info.uriSanLocalCertificate()[0]);\n    }\n  } else if (value == URISanPeerCertificate) {\n    if (!ssl_info.uriSanPeerCertificate().empty()) {\n      return CelValue::CreateString(&ssl_info.uriSanPeerCertificate()[0]);\n    }\n  } else if (value == DNSSanLocalCertificate) {\n    if (!ssl_info.dnsSansLocalCertificate().empty()) {\n      return CelValue::CreateString(&ssl_info.dnsSansLocalCertificate()[0]);\n    }\n  } else if (value == DNSSanPeerCertificate) {\n    if (!ssl_info.dnsSansPeerCertificate().empty()) {\n      return CelValue::CreateString(&ssl_info.dnsSansPeerCertificate()[0]);\n    }\n  }\n  return {};\n}\n\n} // namespace\n\nabsl::optional<CelValue> RequestWrapper::operator[](CelValue key) const {\n  if (!key.IsString()) {\n    return {};\n  }\n  auto value = key.StringOrDie().value();\n\n  if (value == Headers) {\n    return CelValue::CreateMap(&headers_);\n  } else if (value == Time) {\n    return CelValue::CreateTimestamp(absl::FromChrono(info_.startTime()));\n  } else if (value == Size) {\n    // it is important to make a choice whether to rely on content-length vs stream info\n    // (which is not available at the time of the request headers)\n    if (headers_.value_ != nullptr && headers_.value_->ContentLength() != nullptr) {\n      int64_t length;\n      if (absl::SimpleAtoi(headers_.value_->getContentLengthValue(), &length)) {\n        return CelValue::CreateInt64(length);\n      }\n    } else {\n      return CelValue::CreateInt64(info_.bytesReceived());\n    }\n  } else if (value == TotalSize) {\n    return CelValue::CreateInt64(info_.bytesReceived() +\n                                 (headers_.value_ ? headers_.value_->byteSize() : 0));\n  } else if (value == Duration) {\n    auto duration = info_.requestComplete();\n    if (duration.has_value()) {\n      return CelValue::CreateDuration(absl::FromChrono(duration.value()));\n    }\n  } else if (value == Protocol) {\n    if (info_.protocol().has_value()) {\n      return CelValue::CreateString(&Http::Utility::getProtocolString(info_.protocol().value()));\n    } else {\n      return {};\n    }\n  }\n\n  if (headers_.value_ != nullptr) {\n    if (value == Path) {\n      return convertHeaderEntry(headers_.value_->Path());\n    } else if (value == UrlPath) {\n      absl::string_view path = headers_.value_->getPathValue();\n      size_t query_offset = path.find('?');\n      if (query_offset == absl::string_view::npos) {\n        return CelValue::CreateStringView(path);\n      }\n      return CelValue::CreateStringView(path.substr(0, query_offset));\n    } else if (value == Host) {\n      return convertHeaderEntry(headers_.value_->Host());\n    } else if (value == Scheme) {\n      return convertHeaderEntry(headers_.value_->Scheme());\n    } else if (value == Method) {\n      return convertHeaderEntry(headers_.value_->Method());\n    } else if (value == Referer) {\n      return convertHeaderEntry(headers_.value_->getInline(referer_handle.handle()));\n    } else if (value == ID) {\n      return convertHeaderEntry(headers_.value_->RequestId());\n    } else if (value == UserAgent) {\n      return convertHeaderEntry(headers_.value_->UserAgent());\n    }\n  }\n  return {};\n}\n\nabsl::optional<CelValue> ResponseWrapper::operator[](CelValue key) const {\n  if (!key.IsString()) {\n    return {};\n  }\n  auto value = key.StringOrDie().value();\n  if (value == Code) {\n    auto code = info_.responseCode();\n    if (code.has_value()) {\n      return CelValue::CreateInt64(code.value());\n    }\n    return {};\n  } else if (value == Size) {\n    return CelValue::CreateInt64(info_.bytesSent());\n  } else if (value == Headers) {\n    return CelValue::CreateMap(&headers_);\n  } else if (value == Trailers) {\n    return CelValue::CreateMap(&trailers_);\n  } else if (value == Flags) {\n    return CelValue::CreateInt64(info_.responseFlags());\n  } else if (value == GrpcStatus) {\n    auto const& optional_status = Grpc::Common::getGrpcStatus(\n        trailers_.value_ ? *trailers_.value_ : *Http::StaticEmptyHeaders::get().response_trailers,\n        headers_.value_ ? *headers_.value_ : *Http::StaticEmptyHeaders::get().response_headers,\n        info_);\n    if (optional_status.has_value()) {\n      return CelValue::CreateInt64(optional_status.value());\n    }\n    return {};\n  } else if (value == TotalSize) {\n    return CelValue::CreateInt64(info_.bytesSent() +\n                                 (headers_.value_ ? headers_.value_->byteSize() : 0) +\n                                 (trailers_.value_ ? trailers_.value_->byteSize() : 0));\n  } else if (value == CodeDetails) {\n    const absl::optional<std::string>& details = info_.responseCodeDetails();\n    if (details.has_value()) {\n      return CelValue::CreateString(&details.value());\n    }\n    return {};\n  }\n  return {};\n}\n\nabsl::optional<CelValue> ConnectionWrapper::operator[](CelValue key) const {\n  if (!key.IsString()) {\n    return {};\n  }\n  auto value = key.StringOrDie().value();\n  if (value == MTLS) {\n    return CelValue::CreateBool(info_.downstreamSslConnection() != nullptr &&\n                                info_.downstreamSslConnection()->peerCertificatePresented());\n  } else if (value == RequestedServerName) {\n    return CelValue::CreateString(&info_.requestedServerName());\n  } else if (value == ID) {\n    auto id = info_.connectionID();\n    if (id.has_value()) {\n      return CelValue::CreateUint64(id.value());\n    }\n    return {};\n  }\n\n  auto ssl_info = info_.downstreamSslConnection();\n  if (ssl_info != nullptr) {\n    return extractSslInfo(*ssl_info, value);\n  }\n\n  return {};\n}\n\nabsl::optional<CelValue> UpstreamWrapper::operator[](CelValue key) const {\n  if (!key.IsString()) {\n    return {};\n  }\n  auto value = key.StringOrDie().value();\n  if (value == Address) {\n    auto upstream_host = info_.upstreamHost();\n    if (upstream_host != nullptr && upstream_host->address() != nullptr) {\n      return CelValue::CreateStringView(upstream_host->address()->asStringView());\n    }\n  } else if (value == Port) {\n    auto upstream_host = info_.upstreamHost();\n    if (upstream_host != nullptr && upstream_host->address() != nullptr &&\n        upstream_host->address()->ip() != nullptr) {\n      return CelValue::CreateInt64(upstream_host->address()->ip()->port());\n    }\n  } else if (value == UpstreamLocalAddress) {\n    auto upstream_local_address = info_.upstreamLocalAddress();\n    if (upstream_local_address != nullptr) {\n      return CelValue::CreateStringView(upstream_local_address->asStringView());\n    }\n  } else if (value == UpstreamTransportFailureReason) {\n    return CelValue::CreateStringView(info_.upstreamTransportFailureReason());\n  }\n\n  auto ssl_info = info_.upstreamSslConnection();\n  if (ssl_info != nullptr) {\n    return extractSslInfo(*ssl_info, value);\n  }\n\n  return {};\n}\n\nabsl::optional<CelValue> PeerWrapper::operator[](CelValue key) const {\n  if (!key.IsString()) {\n    return {};\n  }\n  auto value = key.StringOrDie().value();\n  if (value == Address) {\n    if (local_) {\n      return CelValue::CreateStringView(info_.downstreamLocalAddress()->asStringView());\n    } else {\n      return CelValue::CreateStringView(info_.downstreamRemoteAddress()->asStringView());\n    }\n  } else if (value == Port) {\n    if (local_) {\n      if (info_.downstreamLocalAddress()->ip() != nullptr) {\n        return CelValue::CreateInt64(info_.downstreamLocalAddress()->ip()->port());\n      }\n    } else {\n      if (info_.downstreamRemoteAddress()->ip() != nullptr) {\n        return CelValue::CreateInt64(info_.downstreamRemoteAddress()->ip()->port());\n      }\n    }\n  }\n\n  return {};\n}\n\nabsl::optional<CelValue> FilterStateWrapper::operator[](CelValue key) const {\n  if (!key.IsString()) {\n    return {};\n  }\n  auto value = key.StringOrDie().value();\n  if (filter_state_.hasDataWithName(value)) {\n    const StreamInfo::FilterState::Object* object = filter_state_.getDataReadOnlyGeneric(value);\n    absl::optional<std::string> serialized = object->serializeAsString();\n    if (serialized.has_value()) {\n      std::string* out = ProtobufWkt::Arena::Create<std::string>(arena_, serialized.value());\n      return CelValue::CreateBytes(out);\n    }\n  }\n  return {};\n}\n\n} // namespace Expr\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/expr/context.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/grpc/status.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n\n#include \"eval/public/cel_value.h\"\n#include \"eval/public/cel_value_producer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Expr {\n\nusing CelValue = google::api::expr::runtime::CelValue;\n\n// Symbols for traversing the request properties\nconstexpr absl::string_view Request = \"request\";\nconstexpr absl::string_view Path = \"path\";\nconstexpr absl::string_view UrlPath = \"url_path\";\nconstexpr absl::string_view Host = \"host\";\nconstexpr absl::string_view Scheme = \"scheme\";\nconstexpr absl::string_view Method = \"method\";\nconstexpr absl::string_view Referer = \"referer\";\nconstexpr absl::string_view Headers = \"headers\";\nconstexpr absl::string_view Time = \"time\";\nconstexpr absl::string_view ID = \"id\";\nconstexpr absl::string_view UserAgent = \"useragent\";\nconstexpr absl::string_view Size = \"size\";\nconstexpr absl::string_view TotalSize = \"total_size\";\nconstexpr absl::string_view Duration = \"duration\";\nconstexpr absl::string_view Protocol = \"protocol\";\n\n// Symbols for traversing the response properties\nconstexpr absl::string_view Response = \"response\";\nconstexpr absl::string_view Code = \"code\";\nconstexpr absl::string_view CodeDetails = \"code_details\";\nconstexpr absl::string_view Trailers = \"trailers\";\nconstexpr absl::string_view Flags = \"flags\";\nconstexpr absl::string_view GrpcStatus = \"grpc_status\";\n\n// Per-request or per-connection metadata\nconstexpr absl::string_view Metadata = \"metadata\";\n\n// Per-request or per-connection filter state\nconstexpr absl::string_view FilterState = \"filter_state\";\n\n// Connection properties\nconstexpr absl::string_view Connection = \"connection\";\nconstexpr absl::string_view MTLS = \"mtls\";\nconstexpr absl::string_view RequestedServerName = \"requested_server_name\";\nconstexpr absl::string_view TLSVersion = \"tls_version\";\nconstexpr absl::string_view SubjectLocalCertificate = \"subject_local_certificate\";\nconstexpr absl::string_view SubjectPeerCertificate = \"subject_peer_certificate\";\nconstexpr absl::string_view URISanLocalCertificate = \"uri_san_local_certificate\";\nconstexpr absl::string_view URISanPeerCertificate = \"uri_san_peer_certificate\";\nconstexpr absl::string_view DNSSanLocalCertificate = \"dns_san_local_certificate\";\nconstexpr absl::string_view DNSSanPeerCertificate = \"dns_san_peer_certificate\";\n\n// Source properties\nconstexpr absl::string_view Source = \"source\";\nconstexpr absl::string_view Address = \"address\";\nconstexpr absl::string_view Port = \"port\";\n\n// Destination properties\nconstexpr absl::string_view Destination = \"destination\";\n\n// Upstream properties\nconstexpr absl::string_view Upstream = \"upstream\";\nconstexpr absl::string_view UpstreamLocalAddress = \"local_address\";\nconstexpr absl::string_view UpstreamTransportFailureReason = \"transport_failure_reason\";\n\nclass RequestWrapper;\n\nabsl::optional<CelValue> convertHeaderEntry(const Http::HeaderEntry* header);\nabsl::optional<CelValue>\nconvertHeaderEntry(Protobuf::Arena& arena,\n                   Http::HeaderUtility::GetAllOfHeaderAsStringResult&& result);\n\ntemplate <class T> class HeadersWrapper : public google::api::expr::runtime::CelMap {\npublic:\n  HeadersWrapper(Protobuf::Arena& arena, const T* value) : arena_(arena), value_(value) {}\n  absl::optional<CelValue> operator[](CelValue key) const override {\n    if (value_ == nullptr || !key.IsString()) {\n      return {};\n    }\n    auto str = std::string(key.StringOrDie().value());\n    if (!Http::validHeaderString(str)) {\n      // Reject key if it is an invalid header string\n      return {};\n    }\n    return convertHeaderEntry(\n        arena_, Http::HeaderUtility::getAllOfHeaderAsString(*value_, Http::LowerCaseString(str)));\n  }\n  int size() const override { return value_ == nullptr ? 0 : value_->size(); }\n  bool empty() const override { return value_ == nullptr ? true : value_->empty(); }\n  const google::api::expr::runtime::CelList* ListKeys() const override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\nprivate:\n  friend class RequestWrapper;\n  friend class ResponseWrapper;\n  Protobuf::Arena& arena_;\n  const T* value_;\n};\n\n// Wrapper for accessing properties from internal data structures.\n// Note that CEL assumes no ownership of the underlying data, so temporary\n// data must be arena-allocated.\nclass BaseWrapper : public google::api::expr::runtime::CelMap,\n                    public google::api::expr::runtime::CelValueProducer {\npublic:\n  int size() const override { return 0; }\n  bool empty() const override { return false; }\n  const google::api::expr::runtime::CelList* ListKeys() const override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  CelValue Produce(ProtobufWkt::Arena* arena) override {\n    // Producer is unique per evaluation arena since activation is re-created.\n    arena_ = arena;\n    return CelValue::CreateMap(this);\n  }\n\nprotected:\n  ProtobufWkt::Arena* arena_;\n};\n\nclass RequestWrapper : public BaseWrapper {\npublic:\n  RequestWrapper(Protobuf::Arena& arena, const Http::RequestHeaderMap* headers,\n                 const StreamInfo::StreamInfo& info)\n      : headers_(arena, headers), info_(info) {}\n  absl::optional<CelValue> operator[](CelValue key) const override;\n\nprivate:\n  const HeadersWrapper<Http::RequestHeaderMap> headers_;\n  const StreamInfo::StreamInfo& info_;\n};\n\nclass ResponseWrapper : public BaseWrapper {\npublic:\n  ResponseWrapper(Protobuf::Arena& arena, const Http::ResponseHeaderMap* headers,\n                  const Http::ResponseTrailerMap* trailers, const StreamInfo::StreamInfo& info)\n      : headers_(arena, headers), trailers_(arena, trailers), info_(info) {}\n  absl::optional<CelValue> operator[](CelValue key) const override;\n\nprivate:\n  const HeadersWrapper<Http::ResponseHeaderMap> headers_;\n  const HeadersWrapper<Http::ResponseTrailerMap> trailers_;\n  const StreamInfo::StreamInfo& info_;\n};\n\nclass ConnectionWrapper : public BaseWrapper {\npublic:\n  ConnectionWrapper(const StreamInfo::StreamInfo& info) : info_(info) {}\n  absl::optional<CelValue> operator[](CelValue key) const override;\n\nprivate:\n  const StreamInfo::StreamInfo& info_;\n};\n\nclass UpstreamWrapper : public BaseWrapper {\npublic:\n  UpstreamWrapper(const StreamInfo::StreamInfo& info) : info_(info) {}\n  absl::optional<CelValue> operator[](CelValue key) const override;\n\nprivate:\n  const StreamInfo::StreamInfo& info_;\n};\n\nclass PeerWrapper : public BaseWrapper {\npublic:\n  PeerWrapper(const StreamInfo::StreamInfo& info, bool local) : info_(info), local_(local) {}\n  absl::optional<CelValue> operator[](CelValue key) const override;\n\nprivate:\n  const StreamInfo::StreamInfo& info_;\n  const bool local_;\n};\n\nclass MetadataProducer : public google::api::expr::runtime::CelValueProducer {\npublic:\n  MetadataProducer(const envoy::config::core::v3::Metadata& metadata) : metadata_(metadata) {}\n  CelValue Produce(ProtobufWkt::Arena* arena) override {\n    return CelValue::CreateMessage(&metadata_, arena);\n  }\n\nprivate:\n  const envoy::config::core::v3::Metadata& metadata_;\n};\n\nclass FilterStateWrapper : public BaseWrapper {\npublic:\n  FilterStateWrapper(const StreamInfo::FilterState& filter_state) : filter_state_(filter_state) {}\n  absl::optional<CelValue> operator[](CelValue key) const override;\n\nprivate:\n  const StreamInfo::FilterState& filter_state_;\n};\n\n} // namespace Expr\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/expr/evaluator.cc",
    "content": "#include \"extensions/filters/common/expr/evaluator.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"eval/public/builtin_func_registrar.h\"\n#include \"eval/public/cel_expr_builder_factory.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Expr {\n\nActivationPtr createActivation(Protobuf::Arena& arena, const StreamInfo::StreamInfo& info,\n                               const Http::RequestHeaderMap* request_headers,\n                               const Http::ResponseHeaderMap* response_headers,\n                               const Http::ResponseTrailerMap* response_trailers) {\n  auto activation = std::make_unique<Activation>();\n  activation->InsertValueProducer(Request,\n                                  std::make_unique<RequestWrapper>(arena, request_headers, info));\n  activation->InsertValueProducer(Response, std::make_unique<ResponseWrapper>(\n                                                arena, response_headers, response_trailers, info));\n  activation->InsertValueProducer(Connection, std::make_unique<ConnectionWrapper>(info));\n  activation->InsertValueProducer(Upstream, std::make_unique<UpstreamWrapper>(info));\n  activation->InsertValueProducer(Source, std::make_unique<PeerWrapper>(info, false));\n  activation->InsertValueProducer(Destination, std::make_unique<PeerWrapper>(info, true));\n  activation->InsertValueProducer(Metadata,\n                                  std::make_unique<MetadataProducer>(info.dynamicMetadata()));\n  activation->InsertValueProducer(FilterState,\n                                  std::make_unique<FilterStateWrapper>(info.filterState()));\n  return activation;\n}\n\nBuilderPtr createBuilder(Protobuf::Arena* arena) {\n  google::api::expr::runtime::InterpreterOptions options;\n\n  // Security-oriented defaults\n  options.enable_comprehension = false;\n  options.enable_regex = true;\n  options.regex_max_program_size = 100;\n  options.enable_string_conversion = false;\n  options.enable_string_concat = false;\n  options.enable_list_concat = false;\n\n  // Enable constant folding (performance optimization)\n  if (arena != nullptr) {\n    options.constant_folding = true;\n    options.constant_arena = arena;\n  }\n\n  auto builder = google::api::expr::runtime::CreateCelExpressionBuilder(options);\n  auto register_status =\n      google::api::expr::runtime::RegisterBuiltinFunctions(builder->GetRegistry(), options);\n  if (!register_status.ok()) {\n    throw CelException(\n        absl::StrCat(\"failed to register built-in functions: \", register_status.message()));\n  }\n  return builder;\n}\n\nExpressionPtr createExpression(Builder& builder, const google::api::expr::v1alpha1::Expr& expr) {\n  google::api::expr::v1alpha1::SourceInfo source_info;\n  auto cel_expression_status = builder.CreateExpression(&expr, &source_info);\n  if (!cel_expression_status.ok()) {\n    throw CelException(\n        absl::StrCat(\"failed to create an expression: \", cel_expression_status.status().message()));\n  }\n  return std::move(cel_expression_status.value());\n}\n\nabsl::optional<CelValue> evaluate(const Expression& expr, Protobuf::Arena& arena,\n                                  const StreamInfo::StreamInfo& info,\n                                  const Http::RequestHeaderMap* request_headers,\n                                  const Http::ResponseHeaderMap* response_headers,\n                                  const Http::ResponseTrailerMap* response_trailers) {\n  auto activation =\n      createActivation(arena, info, request_headers, response_headers, response_trailers);\n  auto eval_status = expr.Evaluate(*activation, &arena);\n  if (!eval_status.ok()) {\n    return {};\n  }\n\n  return eval_status.value();\n}\n\nbool matches(const Expression& expr, const StreamInfo::StreamInfo& info,\n             const Http::RequestHeaderMap& headers) {\n  Protobuf::Arena arena;\n  auto eval_status = Expr::evaluate(expr, arena, info, &headers, nullptr, nullptr);\n  if (!eval_status.has_value()) {\n    return false;\n  }\n  auto result = eval_status.value();\n  return result.IsBool() ? result.BoolOrDie() : false;\n}\n\n} // namespace Expr\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/expr/evaluator.h",
    "content": "#pragma once\n\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/common/expr/context.h\"\n\n#include \"eval/public/cel_expression.h\"\n#include \"eval/public/cel_value.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Expr {\n\nusing Activation = google::api::expr::runtime::Activation;\nusing ActivationPtr = std::unique_ptr<Activation>;\nusing Builder = google::api::expr::runtime::CelExpressionBuilder;\nusing BuilderPtr = std::unique_ptr<Builder>;\nusing Expression = google::api::expr::runtime::CelExpression;\nusing ExpressionPtr = std::unique_ptr<Expression>;\n\n// Creates an activation providing the common context attributes.\n// The activation lazily creates wrappers during an evaluation using the evaluation arena.\nActivationPtr createActivation(Protobuf::Arena& arena, const StreamInfo::StreamInfo& info,\n                               const Http::RequestHeaderMap* request_headers,\n                               const Http::ResponseHeaderMap* response_headers,\n                               const Http::ResponseTrailerMap* response_trailers);\n\n// Creates an expression builder. The optional arena is used to enable constant folding\n// for intermediate evaluation results.\n// Throws an exception if fails to construct an expression builder.\nBuilderPtr createBuilder(Protobuf::Arena* arena);\n\n// Creates an interpretable expression from a protobuf representation.\n// Throws an exception if fails to construct a runtime expression.\nExpressionPtr createExpression(Builder& builder, const google::api::expr::v1alpha1::Expr& expr);\n\n// Evaluates an expression for a request. The arena is used to hold intermediate computational\n// results and potentially the final value.\nabsl::optional<CelValue> evaluate(const Expression& expr, Protobuf::Arena& arena,\n                                  const StreamInfo::StreamInfo& info,\n                                  const Http::RequestHeaderMap* request_headers,\n                                  const Http::ResponseHeaderMap* response_headers,\n                                  const Http::ResponseTrailerMap* response_trailers);\n\n// Evaluates an expression and returns true if the expression evaluates to \"true\".\n// Returns false if the expression fails to evaluate.\nbool matches(const Expression& expr, const StreamInfo::StreamInfo& info,\n             const Http::RequestHeaderMap& headers);\n\n// Thrown when there is an CEL library error.\nclass CelException : public EnvoyException {\npublic:\n  CelException(const std::string& what) : EnvoyException(what) {}\n};\n\n} // namespace Expr\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ext_authz_interface\",\n    hdrs = [\"ext_authz.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ext_authz_grpc_lib\",\n    srcs = [\"ext_authz_grpc_impl.cc\"],\n    hdrs = [\"ext_authz_grpc_impl.h\"],\n    deps = [\n        \":check_request_utils_lib\",\n        \":ext_authz_interface\",\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//include/envoy/grpc:async_client_manager_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:protocol_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/grpc:async_client_lib\",\n        \"//source/common/grpc:typed_async_client_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ext_authz_http_lib\",\n    srcs = [\"ext_authz_http_impl.cc\"],\n    hdrs = [\"ext_authz_http_impl.h\"],\n    deps = [\n        \":check_request_utils_lib\",\n        \":ext_authz_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:async_client_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"check_request_utils_lib\",\n    srcs = [\"check_request_utils.cc\"],\n    hdrs = [\"check_request_utils.h\"],\n    deps = [\n        \":ext_authz_interface\",\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//include/envoy/grpc:async_client_manager_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/grpc:async_client_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/check_request_utils.cc",
    "content": "#include \"extensions/filters/common/ext_authz/check_request_utils.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/service/auth/v3/attribute_context.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/ssl/connection.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nvoid CheckRequestUtils::setAttrContextPeer(envoy::service::auth::v3::AttributeContext::Peer& peer,\n                                           const Network::Connection& connection,\n                                           const std::string& service, const bool local,\n                                           bool include_certificate) {\n\n  // Set the address\n  auto addr = peer.mutable_address();\n  if (local) {\n    Envoy::Network::Utility::addressToProtobufAddress(*connection.localAddress(), *addr);\n  } else {\n    Envoy::Network::Utility::addressToProtobufAddress(*connection.remoteAddress(), *addr);\n  }\n\n  // Set the principal. Preferably the URI SAN, DNS SAN or Subject in that order from the peer's\n  // cert. Include the X.509 certificate of the source peer, if configured to do so.\n  auto ssl = connection.ssl();\n  if (ssl != nullptr) {\n    if (local) {\n      const auto uri_sans = ssl->uriSanLocalCertificate();\n      if (uri_sans.empty()) {\n        const auto dns_sans = ssl->dnsSansLocalCertificate();\n        if (dns_sans.empty()) {\n          peer.set_principal(ssl->subjectLocalCertificate());\n        } else {\n          peer.set_principal(dns_sans[0]);\n        }\n      } else {\n        peer.set_principal(uri_sans[0]);\n      }\n    } else {\n      const auto uri_sans = ssl->uriSanPeerCertificate();\n      if (uri_sans.empty()) {\n        const auto dns_sans = ssl->dnsSansPeerCertificate();\n        if (dns_sans.empty()) {\n          peer.set_principal(ssl->subjectPeerCertificate());\n        } else {\n          peer.set_principal(dns_sans[0]);\n        }\n      } else {\n        peer.set_principal(uri_sans[0]);\n      }\n      if (include_certificate) {\n        peer.set_certificate(ssl->urlEncodedPemEncodedPeerCertificate());\n      }\n    }\n  }\n\n  if (!service.empty()) {\n    peer.set_service(service);\n  }\n}\n\nstd::string CheckRequestUtils::getHeaderStr(const Envoy::Http::HeaderEntry* entry) {\n  if (entry) {\n    // TODO(jmarantz): plumb absl::string_view further here; there's no need\n    // to allocate a temp string in the local uses.\n    return std::string(entry->value().getStringView());\n  }\n  return EMPTY_STRING;\n}\n\nvoid CheckRequestUtils::setRequestTime(envoy::service::auth::v3::AttributeContext::Request& req,\n                                       const StreamInfo::StreamInfo& stream_info) {\n  // Set the timestamp when the proxy receives the first byte of the request.\n  req.mutable_time()->MergeFrom(Protobuf::util::TimeUtil::NanosecondsToTimestamp(\n      std::chrono::duration_cast<std::chrono::nanoseconds>(\n          stream_info.startTime().time_since_epoch())\n          .count()));\n}\n\nvoid CheckRequestUtils::setHttpRequest(\n    envoy::service::auth::v3::AttributeContext::HttpRequest& httpreq, uint64_t stream_id,\n    const StreamInfo::StreamInfo& stream_info, const Buffer::Instance* decoding_buffer,\n    const Envoy::Http::RequestHeaderMap& headers, uint64_t max_request_bytes, bool pack_as_bytes) {\n  httpreq.set_id(std::to_string(stream_id));\n  httpreq.set_method(getHeaderStr(headers.Method()));\n  httpreq.set_path(getHeaderStr(headers.Path()));\n  httpreq.set_host(getHeaderStr(headers.Host()));\n  httpreq.set_scheme(getHeaderStr(headers.Scheme()));\n  httpreq.set_size(stream_info.bytesReceived());\n\n  if (stream_info.protocol()) {\n    httpreq.set_protocol(Envoy::Http::Utility::getProtocolString(stream_info.protocol().value()));\n  }\n\n  // Fill in the headers.\n  auto* mutable_headers = httpreq.mutable_headers();\n  headers.iterate([mutable_headers](const Envoy::Http::HeaderEntry& e) {\n    // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use.\n    if (e.key().getStringView() != Headers::get().EnvoyAuthPartialBody.get()) {\n      (*mutable_headers)[std::string(e.key().getStringView())] =\n          std::string(e.value().getStringView());\n    }\n    return Envoy::Http::HeaderMap::Iterate::Continue;\n  });\n\n  // Set request body.\n  if (max_request_bytes > 0 && decoding_buffer != nullptr) {\n    const uint64_t length = std::min(decoding_buffer->length(), max_request_bytes);\n    std::string data(length, 0);\n    decoding_buffer->copyOut(0, length, &data[0]);\n\n    // This pack_as_bytes flag allows us to switch the content type (bytes or string) of \"body\" to\n    // be sent to the external authorization server without doing string encoding check (in this\n    // case UTF-8 check).\n    if (pack_as_bytes) {\n      httpreq.set_raw_body(std::move(data));\n    } else {\n      httpreq.set_body(std::move(data));\n    }\n\n    // Add in a header to detect when a partial body is used.\n    (*mutable_headers)[Headers::get().EnvoyAuthPartialBody.get()] =\n        length != decoding_buffer->length() ? \"true\" : \"false\";\n  }\n}\n\nvoid CheckRequestUtils::setAttrContextRequest(\n    envoy::service::auth::v3::AttributeContext::Request& req, const uint64_t stream_id,\n    const StreamInfo::StreamInfo& stream_info, const Buffer::Instance* decoding_buffer,\n    const Envoy::Http::RequestHeaderMap& headers, uint64_t max_request_bytes, bool pack_as_bytes) {\n  setRequestTime(req, stream_info);\n  setHttpRequest(*req.mutable_http(), stream_id, stream_info, decoding_buffer, headers,\n                 max_request_bytes, pack_as_bytes);\n}\n\nvoid CheckRequestUtils::createHttpCheck(\n    const Envoy::Http::StreamDecoderFilterCallbacks* callbacks,\n    const Envoy::Http::RequestHeaderMap& headers,\n    Protobuf::Map<std::string, std::string>&& context_extensions,\n    envoy::config::core::v3::Metadata&& metadata_context,\n    envoy::service::auth::v3::CheckRequest& request, uint64_t max_request_bytes, bool pack_as_bytes,\n    bool include_peer_certificate) {\n\n  auto attrs = request.mutable_attributes();\n  const std::string service = getHeaderStr(headers.EnvoyDownstreamServiceCluster());\n\n  // *cb->connection(), callbacks->streamInfo() and callbacks->decodingBuffer() are not qualified as\n  // const.\n  auto* cb = const_cast<Envoy::Http::StreamDecoderFilterCallbacks*>(callbacks);\n  setAttrContextPeer(*attrs->mutable_source(), *cb->connection(), service, false,\n                     include_peer_certificate);\n  setAttrContextPeer(*attrs->mutable_destination(), *cb->connection(), EMPTY_STRING, true,\n                     include_peer_certificate);\n  setAttrContextRequest(*attrs->mutable_request(), cb->streamId(), cb->streamInfo(),\n                        cb->decodingBuffer(), headers, max_request_bytes, pack_as_bytes);\n\n  // Fill in the context extensions and metadata context.\n  (*attrs->mutable_context_extensions()) = std::move(context_extensions);\n  (*attrs->mutable_metadata_context()) = std::move(metadata_context);\n}\n\nvoid CheckRequestUtils::createTcpCheck(const Network::ReadFilterCallbacks* callbacks,\n                                       envoy::service::auth::v3::CheckRequest& request,\n                                       bool include_peer_certificate) {\n\n  auto attrs = request.mutable_attributes();\n\n  auto* cb = const_cast<Network::ReadFilterCallbacks*>(callbacks);\n  setAttrContextPeer(*attrs->mutable_source(), cb->connection(), \"\", false,\n                     include_peer_certificate);\n  setAttrContextPeer(*attrs->mutable_destination(), cb->connection(), \"\", true,\n                     include_peer_certificate);\n}\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/check_request_utils.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/protocol.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/service/auth/v3/attribute_context.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/http/async_client_impl.h\"\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\n/**\n * For creating ext_authz.proto (authorization) request.\n * CheckRequestUtils is used to extract attributes from the TCP/HTTP request\n * and fill out the details in the authorization protobuf that is sent to authorization\n * service.\n * The specific information in the request is as per the specification in the\n * data plane API.\n */\nclass CheckRequestUtils {\npublic:\n  /**\n   * createHttpCheck is used to extract the attributes from the stream and the http headers\n   * and fill them up in the CheckRequest proto message.\n   * @param callbacks supplies the Http stream context from which data can be extracted.\n   * @param headers supplies the header map with http headers that will be used to create the\n   *        check request.\n   * @param request is the reference to the check request that will be filled up.\n   * @param with_request_body when true, will add the request body to the check request.\n   * @param pack_as_bytes when true, will set the check request body as bytes.\n   * @param include_peer_certificate whether to include the peer certificate in the check request.\n   */\n  static void createHttpCheck(const Envoy::Http::StreamDecoderFilterCallbacks* callbacks,\n                              const Envoy::Http::RequestHeaderMap& headers,\n                              Protobuf::Map<std::string, std::string>&& context_extensions,\n                              envoy::config::core::v3::Metadata&& metadata_context,\n                              envoy::service::auth::v3::CheckRequest& request,\n                              uint64_t max_request_bytes, bool pack_as_bytes,\n                              bool include_peer_certificate);\n\n  /**\n   * createTcpCheck is used to extract the attributes from the network layer and fill them up\n   * in the CheckRequest proto message.\n   * @param callbacks supplies the network layer context from which data can be extracted.\n   * @param request is the reference to the check request that will be filled up.\n   * @param include_peer_certificate whether to include the peer certificate in the check request.\n   */\n  static void createTcpCheck(const Network::ReadFilterCallbacks* callbacks,\n                             envoy::service::auth::v3::CheckRequest& request,\n                             bool include_peer_certificate);\n\nprivate:\n  static void setAttrContextPeer(envoy::service::auth::v3::AttributeContext::Peer& peer,\n                                 const Network::Connection& connection, const std::string& service,\n                                 const bool local, bool include_certificate);\n  static void setRequestTime(envoy::service::auth::v3::AttributeContext::Request& req,\n                             const StreamInfo::StreamInfo& stream_info);\n  static void setHttpRequest(envoy::service::auth::v3::AttributeContext::HttpRequest& httpreq,\n                             const uint64_t stream_id, const StreamInfo::StreamInfo& stream_info,\n                             const Buffer::Instance* decoding_buffer,\n                             const Envoy::Http::RequestHeaderMap& headers,\n                             uint64_t max_request_bytes, bool pack_as_bytes);\n  static void setAttrContextRequest(envoy::service::auth::v3::AttributeContext::Request& req,\n                                    const uint64_t stream_id,\n                                    const StreamInfo::StreamInfo& stream_info,\n                                    const Buffer::Instance* decoding_buffer,\n                                    const Envoy::Http::RequestHeaderMap& headers,\n                                    uint64_t max_request_bytes, bool pack_as_bytes);\n  static std::string getHeaderStr(const Envoy::Http::HeaderEntry* entry);\n  static Envoy::Http::HeaderMap::Iterate fillHttpHeaders(const Envoy::Http::HeaderEntry&, void*);\n};\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/ext_authz.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/stream_info/stream_info.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\n/**\n * Constant values used for tracing metadata.\n */\nstruct TracingConstantValues {\n  const std::string TraceStatus = \"ext_authz_status\";\n  const std::string TraceUnauthz = \"ext_authz_unauthorized\";\n  const std::string TraceOk = \"ext_authz_ok\";\n  const std::string HttpStatus = \"ext_authz_http_status\";\n};\n\nusing TracingConstants = ConstSingleton<TracingConstantValues>;\n\n/**\n * Constant auth related HTTP headers. All lower case. This group of headers can\n * contain prefix override headers.\n */\nclass HeaderValues {\npublic:\n  const char* prefix() const { return ThreadSafeSingleton<Http::PrefixValue>::get().prefix(); }\n\n  const Http::LowerCaseString EnvoyAuthPartialBody{absl::StrCat(prefix(), \"-auth-partial-body\")};\n  const Http::LowerCaseString EnvoyAuthHeadersToRemove{\n      absl::StrCat(prefix(), \"-auth-headers-to-remove\")};\n};\n\nusing Headers = ConstSingleton<HeaderValues>;\n\n/**\n * Possible async results for a check call.\n */\nenum class CheckStatus {\n  // The request is authorized.\n  OK,\n  // The authz service could not be queried.\n  Error,\n  // The request is denied.\n  Denied\n};\n\n/**\n * Possible error kind for Error status..\n */\nenum class ErrorKind {\n  // Other error.\n  Other,\n  // The request timed out. This will only be set if the timeout is measure when the check request\n  // was created.\n  Timedout,\n};\n\n/**\n * Authorization response object for a RequestCallback.\n */\nstruct Response {\n  // Call status.\n  CheckStatus status;\n\n  // In case status is Error, this will contain the kind of error that occurred.\n  ErrorKind error_kind{ErrorKind::Other};\n\n  // A set of HTTP headers returned by the authorization server, that will be optionally appended\n  // to the request to the upstream server.\n  Http::HeaderVector headers_to_append;\n  // A set of HTTP headers returned by the authorization server, will be optionally set\n  // (using \"setCopy\") to the request to the upstream server.\n  Http::HeaderVector headers_to_set;\n  // A set of HTTP headers returned by the authorization server, will be optionally added\n  // (using \"addCopy\") to the request to the upstream server.\n  Http::HeaderVector headers_to_add;\n  // A set of HTTP headers consumed by the authorization server, will be removed\n  // from the request to the upstream server.\n  std::vector<Envoy::Http::LowerCaseString> headers_to_remove;\n  // Optional http body used only on denied response.\n  std::string body;\n  // Optional http status used only on denied response.\n  Http::Code status_code{};\n\n  // A set of metadata returned by the authorization server, that will be emitted as filter's\n  // dynamic metadata that other filters can leverage.\n  ProtobufWkt::Struct dynamic_metadata;\n};\n\nusing ResponsePtr = std::unique_ptr<Response>;\n\n/**\n * Async callbacks used during check() calls.\n */\nclass RequestCallbacks {\npublic:\n  virtual ~RequestCallbacks() = default;\n\n  /**\n   * Called when a check request is complete. The resulting ResponsePtr is supplied.\n   */\n  virtual void onComplete(ResponsePtr&& response) PURE;\n};\n\nclass Client {\npublic:\n  // Destructor\n  virtual ~Client() = default;\n\n  /**\n   * Cancel an inflight Check request.\n   */\n  virtual void cancel() PURE;\n\n  /**\n   * Request a check call to an external authorization service which can use the\n   * passed request parameters to make a permit/deny decision.\n   * @param callback supplies the completion callbacks.\n   *        NOTE: The callback may happen within the calling stack.\n   * @param dispatcher is the dispatcher of the current thread.\n   * @param request is the proto message with the attributes of the specific payload.\n   * @param parent_span source for generating an egress child span as part of the trace.\n   * @param stream_info supplies the client's stream info.\n   */\n  virtual void check(RequestCallbacks& callback, Event::Dispatcher& dispatcher,\n                     const envoy::service::auth::v3::CheckRequest& request,\n                     Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) PURE;\n\nprotected:\n  /**\n   * @return should we start the request time out when the check request is created.\n   */\n  static bool timeoutStartsAtCheckCreation() {\n    return Runtime::runtimeFeatureEnabled(\n        \"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\");\n  }\n};\n\nusing ClientPtr = std::unique_ptr<Client>;\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc",
    "content": "#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/service/auth/v2alpha/external_auth.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nGrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientSharedPtr async_client,\n                               const absl::optional<std::chrono::milliseconds>& timeout,\n                               envoy::config::core::v3::ApiVersion transport_api_version,\n                               bool use_alpha)\n    : async_client_(async_client), timeout_(timeout),\n      service_method_(Grpc::VersionedMethods(\"envoy.service.auth.v3.Authorization.Check\",\n                                             \"envoy.service.auth.v2.Authorization.Check\",\n                                             \"envoy.service.auth.v2alpha.Authorization.Check\")\n                          .getMethodDescriptorForVersion(transport_api_version, use_alpha)),\n      transport_api_version_(transport_api_version) {}\n\nGrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); }\n\nvoid GrpcClientImpl::cancel() {\n  ASSERT(callbacks_ != nullptr);\n  request_->cancel();\n  callbacks_ = nullptr;\n  timeout_timer_.reset();\n}\n\nvoid GrpcClientImpl::check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher,\n                           const envoy::service::auth::v3::CheckRequest& request,\n                           Tracing::Span& parent_span, const StreamInfo::StreamInfo&) {\n  ASSERT(callbacks_ == nullptr);\n  callbacks_ = &callbacks;\n\n  Http::AsyncClient::RequestOptions options;\n  if (timeout_.has_value()) {\n    if (timeoutStartsAtCheckCreation()) {\n      // TODO(yuval-k): We currently use dispatcher based timeout even if the underlying client is\n      // google gRPC client, which has it's own timeout mechanism. We may want to change that in\n      // the future if the implementations converge.\n      timeout_timer_ = dispatcher.createTimer([this]() -> void { onTimeout(); });\n      timeout_timer_->enableTimer(timeout_.value());\n    } else {\n      // not starting timer on check creation, set the timeout on the request.\n      options.setTimeout(timeout_);\n    }\n  }\n\n  ENVOY_LOG(trace, \"Sending CheckRequest: {}\", request.DebugString());\n  request_ = async_client_->send(service_method_, request, *this, parent_span, options,\n                                 transport_api_version_);\n}\n\nvoid GrpcClientImpl::onSuccess(std::unique_ptr<envoy::service::auth::v3::CheckResponse>&& response,\n                               Tracing::Span& span) {\n  ENVOY_LOG(trace, \"Received CheckResponse: {}\", response->DebugString());\n  ResponsePtr authz_response = std::make_unique<Response>(Response{});\n  if (response->status().code() == Grpc::Status::WellKnownGrpcStatus::Ok) {\n    span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk);\n    authz_response->status = CheckStatus::OK;\n    if (response->has_ok_response()) {\n      toAuthzResponseHeader(authz_response, response->ok_response().headers());\n      if (response->ok_response().headers_to_remove_size() > 0) {\n        for (const auto& header : response->ok_response().headers_to_remove()) {\n          authz_response->headers_to_remove.push_back(Http::LowerCaseString(header));\n        }\n      }\n    }\n  } else {\n    span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz);\n    authz_response->status = CheckStatus::Denied;\n    if (response->has_denied_response()) {\n      toAuthzResponseHeader(authz_response, response->denied_response().headers());\n      authz_response->status_code =\n          static_cast<Http::Code>(response->denied_response().status().code());\n      authz_response->body = response->denied_response().body();\n    } else {\n      authz_response->status_code = Http::Code::Forbidden;\n    }\n  }\n\n  // OkHttpResponse.dynamic_metadata is deprecated. Until OkHttpResponse.dynamic_metadata is\n  // removed, it overrides dynamic_metadata field of the outer check response.\n  if (response->has_ok_response() && response->ok_response().has_dynamic_metadata()) {\n    authz_response->dynamic_metadata = response->ok_response().dynamic_metadata();\n  } else {\n    authz_response->dynamic_metadata = response->dynamic_metadata();\n  }\n\n  callbacks_->onComplete(std::move(authz_response));\n  callbacks_ = nullptr;\n  timeout_timer_.reset();\n}\n\nvoid GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string&,\n                               Tracing::Span&) {\n  ENVOY_LOG(trace, \"CheckRequest call failed with status: {}\",\n            Grpc::Utility::grpcStatusToString(status));\n  ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok);\n  timeout_timer_.reset();\n  respondFailure(ErrorKind::Other);\n}\n\nvoid GrpcClientImpl::onTimeout() {\n  ENVOY_LOG(trace, \"CheckRequest timed-out\");\n  ASSERT(request_ != nullptr);\n  request_->cancel();\n  // let the client know of failure:\n  respondFailure(ErrorKind::Timedout);\n}\n\nvoid GrpcClientImpl::respondFailure(ErrorKind kind) {\n  Response response{};\n  response.status = CheckStatus::Error;\n  response.status_code = Http::Code::Forbidden;\n  response.error_kind = kind;\n  callbacks_->onComplete(std::make_unique<Response>(response));\n  callbacks_ = nullptr;\n}\n\nvoid GrpcClientImpl::toAuthzResponseHeader(\n    ResponsePtr& response,\n    const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>& headers) {\n  for (const auto& header : headers) {\n    if (header.append().value()) {\n      response->headers_to_append.emplace_back(Http::LowerCaseString(header.header().key()),\n                                               header.header().value());\n    } else {\n      response->headers_to_set.emplace_back(Http::LowerCaseString(header.header().key()),\n                                            header.header().value());\n    }\n  }\n}\n\nconst Grpc::RawAsyncClientSharedPtr AsyncClientCache::getOrCreateAsyncClient(\n    const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& proto_config) {\n  // The cache stores Google gRPC client, so channel is not created for each request.\n  ASSERT(proto_config.has_grpc_service() && proto_config.grpc_service().has_google_grpc());\n  auto& cache = tls_slot_->getTyped<ThreadLocalCache>();\n  const std::size_t cache_key = MessageUtil::hash(proto_config.grpc_service().google_grpc());\n  const auto it = cache.async_clients_.find(cache_key);\n  if (it != cache.async_clients_.end()) {\n    return it->second;\n  }\n  const Grpc::AsyncClientFactoryPtr factory =\n      async_client_manager_.factoryForGrpcService(proto_config.grpc_service(), scope_, true);\n  const Grpc::RawAsyncClientSharedPtr async_client = factory->create();\n  cache.async_clients_.emplace(cache_key, async_client);\n  return async_client;\n}\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/protocol.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/grpc/typed_async_client.h\"\n\n#include \"extensions/filters/common/ext_authz/check_request_utils.h\"\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nusing ExtAuthzAsyncCallbacks = Grpc::AsyncRequestCallbacks<envoy::service::auth::v3::CheckResponse>;\n\n/*\n * This client implementation is used when the Ext_Authz filter needs to communicate with an gRPC\n * authorization server. Unlike the HTTP client, the gRPC allows the server to define response\n * objects which contain the HTTP attributes to be sent to the upstream or to the downstream client.\n * The gRPC client does not rewrite path. NOTE: We create gRPC client for each filter stack instead\n * of a client per thread. That is ok since this is unary RPC and the cost of doing this is minimal.\n */\nclass GrpcClientImpl : public Client,\n                       public ExtAuthzAsyncCallbacks,\n                       public Logger::Loggable<Logger::Id::ext_authz> {\npublic:\n  // TODO(gsagula): remove `use_alpha` param when V2Alpha gets deprecated.\n  GrpcClientImpl(Grpc::RawAsyncClientSharedPtr async_client,\n                 const absl::optional<std::chrono::milliseconds>& timeout,\n                 envoy::config::core::v3::ApiVersion transport_api_version, bool use_alpha);\n  ~GrpcClientImpl() override;\n\n  // ExtAuthz::Client\n  void cancel() override;\n  void check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher,\n             const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span,\n             const StreamInfo::StreamInfo& stream_info) override;\n\n  // Grpc::AsyncRequestCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap&) override {}\n  void onSuccess(std::unique_ptr<envoy::service::auth::v3::CheckResponse>&& response,\n                 Tracing::Span& span) override;\n  void onFailure(Grpc::Status::GrpcStatus status, const std::string& message,\n                 Tracing::Span& span) override;\n\nprivate:\n  void onTimeout();\n  void respondFailure(Filters::Common::ExtAuthz::ErrorKind kind);\n  void toAuthzResponseHeader(\n      ResponsePtr& response,\n      const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>& headers);\n\n  Grpc::AsyncClient<envoy::service::auth::v3::CheckRequest, envoy::service::auth::v3::CheckResponse>\n      async_client_;\n  Grpc::AsyncRequest* request_{};\n  absl::optional<std::chrono::milliseconds> timeout_;\n  RequestCallbacks* callbacks_{};\n  const Protobuf::MethodDescriptor& service_method_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n  Event::TimerPtr timeout_timer_;\n};\n\nusing GrpcClientImplPtr = std::unique_ptr<GrpcClientImpl>;\n\n// The client cache for RawAsyncClient for Google grpc so channel is not created for each request.\n// TODO(fpliu233): The cache will cause resource leak that a new channel is created every time a new\n// config is pushed. Improve gRPC channel cache with better solution.\nclass AsyncClientCache : public Singleton::Instance {\npublic:\n  AsyncClientCache(Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope,\n                   ThreadLocal::SlotAllocator& tls)\n      : async_client_manager_(async_client_manager), scope_(scope), tls_slot_(tls.allocateSlot()) {\n    tls_slot_->set([](Event::Dispatcher&) { return std::make_shared<ThreadLocalCache>(); });\n  }\n\n  const Grpc::RawAsyncClientSharedPtr getOrCreateAsyncClient(\n      const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& proto_config);\n\nprivate:\n  /**\n   * Per-thread cache.\n   */\n  struct ThreadLocalCache : public ThreadLocal::ThreadLocalObject {\n    ThreadLocalCache() = default;\n    // The client cache stored with key as hash of\n    // envoy::config::core::v3::GrpcService::GoogleGrpc config.\n    // TODO(fpliu233): Remove when the cleaner and generic solution for gRPC is live.\n    absl::flat_hash_map<std::size_t, Grpc::RawAsyncClientSharedPtr> async_clients_;\n  };\n\n  Grpc::AsyncClientManager& async_client_manager_;\n  Stats::Scope& scope_;\n  ThreadLocal::SlotPtr tls_slot_;\n};\n\nusing AsyncClientCacheSharedPtr = std::shared_ptr<AsyncClientCache>;\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc",
    "content": "#include \"extensions/filters/common/ext_authz/ext_authz_http_impl.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/matchers.h\"\n#include \"common/http/async_client_impl.h\"\n#include \"common/http/codes.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nnamespace {\n\n// Static header map used for creating authorization requests.\nconst Http::HeaderMap& lengthZeroHeader() {\n  static const auto headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(\n      {{Http::Headers::get().ContentLength, std::to_string(0)}});\n  return *headers;\n}\n\n// Static response used for creating authorization ERROR responses.\nconst Response& errorResponse() {\n  CONSTRUCT_ON_FIRST_USE(Response, Response{CheckStatus::Error,\n                                            ErrorKind::Other,\n                                            Http::HeaderVector{},\n                                            Http::HeaderVector{},\n                                            Http::HeaderVector{},\n                                            {{}},\n                                            EMPTY_STRING,\n                                            Http::Code::Forbidden,\n                                            ProtobufWkt::Struct{}});\n}\n\n// SuccessResponse used for creating either DENIED or OK authorization responses.\nstruct SuccessResponse {\n  SuccessResponse(const Http::HeaderMap& headers, const MatcherSharedPtr& matchers,\n                  const MatcherSharedPtr& append_matchers, Response&& response)\n      : headers_(headers), matchers_(matchers), append_matchers_(append_matchers),\n        response_(std::make_unique<Response>(response)) {\n    headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n      // UpstreamHeaderMatcher\n      if (matchers_->matches(header.key().getStringView())) {\n        response_->headers_to_set.emplace_back(\n            Http::LowerCaseString{std::string(header.key().getStringView())},\n            std::string(header.value().getStringView()));\n      }\n      if (append_matchers_->matches(header.key().getStringView())) {\n        // If there is an existing matching key in the current headers, the new entry will be\n        // appended with the same key. For example, given {\"key\": \"value1\"} headers, if there is\n        // a matching \"key\" from the authorization response headers {\"key\": \"value2\"}, the\n        // request to upstream server will have two entries for \"key\": {\"key\": \"value1\", \"key\":\n        // \"value2\"}.\n        response_->headers_to_add.emplace_back(\n            Http::LowerCaseString{std::string(header.key().getStringView())},\n            std::string(header.value().getStringView()));\n      }\n      return Http::HeaderMap::Iterate::Continue;\n    });\n  }\n\n  const Http::HeaderMap& headers_;\n  const MatcherSharedPtr& matchers_;\n  const MatcherSharedPtr& append_matchers_;\n  ResponsePtr response_;\n};\n\nenvoy::type::matcher::v3::StringMatcher\nignoreCaseStringMatcher(const envoy::type::matcher::v3::StringMatcher& matcher) {\n  const auto& match_pattern_case = matcher.match_pattern_case();\n  if (match_pattern_case == envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex ||\n      match_pattern_case ==\n          envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex) {\n    return matcher;\n  }\n\n  envoy::type::matcher::v3::StringMatcher ignore_case;\n  ignore_case.set_ignore_case(true);\n  switch (matcher.match_pattern_case()) {\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact:\n    ignore_case.set_exact(matcher.exact());\n    break;\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kPrefix:\n    ignore_case.set_prefix(matcher.prefix());\n    break;\n  case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSuffix:\n    ignore_case.set_suffix(matcher.suffix());\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  return ignore_case;\n}\n\nstd::vector<Matchers::StringMatcherPtr>\ncreateStringMatchers(const envoy::type::matcher::v3::ListStringMatcher& list,\n                     const bool disable_lowercase_string_matcher) {\n  std::vector<Matchers::StringMatcherPtr> matchers;\n  for (const auto& matcher : list.patterns()) {\n    matchers.push_back(std::make_unique<Matchers::StringMatcherImpl>(\n        disable_lowercase_string_matcher ? matcher : ignoreCaseStringMatcher(matcher)));\n  }\n  return matchers;\n}\n\n} // namespace\n\n// Matchers\nHeaderKeyMatcher::HeaderKeyMatcher(std::vector<Matchers::StringMatcherPtr>&& list)\n    : matchers_(std::move(list)) {}\n\nbool HeaderKeyMatcher::matches(absl::string_view key) const {\n  return std::any_of(matchers_.begin(), matchers_.end(),\n                     [&key](auto& matcher) { return matcher->match(key); });\n}\n\nNotHeaderKeyMatcher::NotHeaderKeyMatcher(std::vector<Matchers::StringMatcherPtr>&& list)\n    : matcher_(std::move(list)) {}\n\nbool NotHeaderKeyMatcher::matches(absl::string_view key) const { return !matcher_.matches(key); }\n\n// Config\nClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& config,\n                           uint32_t timeout, absl::string_view path_prefix)\n    : enable_case_sensitive_string_matcher_(Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher\")),\n      request_header_matchers_(\n          toRequestMatchers(config.http_service().authorization_request().allowed_headers(),\n                            enable_case_sensitive_string_matcher_)),\n      client_header_matchers_(\n          toClientMatchers(config.http_service().authorization_response().allowed_client_headers(),\n                           enable_case_sensitive_string_matcher_)),\n      upstream_header_matchers_(toUpstreamMatchers(\n          config.http_service().authorization_response().allowed_upstream_headers(),\n          enable_case_sensitive_string_matcher_)),\n      upstream_header_to_append_matchers_(toUpstreamMatchers(\n          config.http_service().authorization_response().allowed_upstream_headers_to_append(),\n          enable_case_sensitive_string_matcher_)),\n      cluster_name_(config.http_service().server_uri().cluster()), timeout_(timeout),\n      path_prefix_(path_prefix),\n      tracing_name_(fmt::format(\"async {} egress\", config.http_service().server_uri().cluster())),\n      request_headers_parser_(Router::HeaderParser::configure(\n          config.http_service().authorization_request().headers_to_add(), false)) {}\n\nMatcherSharedPtr\nClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list,\n                                const bool disable_lowercase_string_matcher) {\n  const std::vector<Http::LowerCaseString> keys{\n      {Http::CustomHeaders::get().Authorization, Http::Headers::get().Method,\n       Http::Headers::get().Path, Http::Headers::get().Host}};\n\n  std::vector<Matchers::StringMatcherPtr> matchers(\n      createStringMatchers(list, disable_lowercase_string_matcher));\n  for (const auto& key : keys) {\n    envoy::type::matcher::v3::StringMatcher matcher;\n    matcher.set_exact(key.get());\n    matchers.push_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n  }\n\n  return std::make_shared<HeaderKeyMatcher>(std::move(matchers));\n}\n\nMatcherSharedPtr\nClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& list,\n                               const bool disable_lowercase_string_matcher) {\n  std::vector<Matchers::StringMatcherPtr> matchers(\n      createStringMatchers(list, disable_lowercase_string_matcher));\n\n  // If list is empty, all authorization response headers, except Host, should be added to\n  // the client response.\n  if (matchers.empty()) {\n    envoy::type::matcher::v3::StringMatcher matcher;\n    matcher.set_exact(Http::Headers::get().Host.get());\n    matchers.push_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n\n    return std::make_shared<NotHeaderKeyMatcher>(std::move(matchers));\n  }\n\n  // If not empty, all user defined matchers and default matcher's list will\n  // be used instead.\n  std::vector<Http::LowerCaseString> keys{\n      {Http::Headers::get().Status, Http::Headers::get().ContentLength,\n       Http::Headers::get().WWWAuthenticate, Http::Headers::get().Location}};\n\n  for (const auto& key : keys) {\n    envoy::type::matcher::v3::StringMatcher matcher;\n    matcher.set_exact(key.get());\n    matchers.push_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n  }\n\n  return std::make_shared<HeaderKeyMatcher>(std::move(matchers));\n}\n\nMatcherSharedPtr\nClientConfig::toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& list,\n                                 const bool disable_lowercase_string_matcher) {\n  return std::make_unique<HeaderKeyMatcher>(\n      createStringMatchers(list, disable_lowercase_string_matcher));\n}\n\nRawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config)\n    : cm_(cm), config_(config) {}\n\nRawHttpClientImpl::~RawHttpClientImpl() { ASSERT(callbacks_ == nullptr); }\n\nvoid RawHttpClientImpl::cancel() {\n  ASSERT(callbacks_ != nullptr);\n  request_->cancel();\n  callbacks_ = nullptr;\n  timeout_timer_.reset();\n}\n\n// Client\nvoid RawHttpClientImpl::check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher,\n                              const envoy::service::auth::v3::CheckRequest& request,\n                              Tracing::Span& parent_span,\n                              const StreamInfo::StreamInfo& stream_info) {\n  ASSERT(callbacks_ == nullptr);\n  callbacks_ = &callbacks;\n\n  Http::RequestHeaderMapPtr headers;\n  const uint64_t request_length = request.attributes().request().http().body().size();\n  if (request_length > 0) {\n    headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(\n        {{Http::Headers::get().ContentLength, std::to_string(request_length)}});\n  } else {\n    headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(lengthZeroHeader());\n  }\n\n  for (const auto& header : request.attributes().request().http().headers()) {\n    const Http::LowerCaseString key{header.first};\n    // Skip setting content-length header since it is already configured at initialization.\n    if (key == Http::Headers::get().ContentLength) {\n      continue;\n    }\n\n    if (config_->requestHeaderMatchers()->matches(key.get())) {\n      if (key == Http::Headers::get().Path && !config_->pathPrefix().empty()) {\n        headers->addCopy(key, absl::StrCat(config_->pathPrefix(), header.second));\n      } else {\n        headers->addCopy(key, header.second);\n      }\n    }\n  }\n\n  config_->requestHeaderParser().evaluateHeaders(*headers, stream_info);\n\n  Http::RequestMessagePtr message =\n      std::make_unique<Envoy::Http::RequestMessageImpl>(std::move(headers));\n  if (request_length > 0) {\n    message->body().add(request.attributes().request().http().body());\n  }\n\n  const std::string& cluster = config_->cluster();\n\n  // It's possible that the cluster specified in the filter configuration no longer exists due to a\n  // CDS removal.\n  if (cm_.get(cluster) == nullptr) {\n    // TODO(dio): Add stats related to this.\n    ENVOY_LOG(debug, \"ext_authz cluster '{}' does not exist\", cluster);\n    callbacks_->onComplete(std::make_unique<Response>(errorResponse()));\n    callbacks_ = nullptr;\n  } else {\n    auto options = Http::AsyncClient::RequestOptions()\n                       .setParentSpan(parent_span)\n                       .setChildSpanName(config_->tracingName());\n\n    if (timeoutStartsAtCheckCreation()) {\n      timeout_timer_ = dispatcher.createTimer([this]() -> void { onTimeout(); });\n      timeout_timer_->enableTimer(config_->timeout());\n    } else {\n      options.setTimeout(config_->timeout());\n    }\n\n    request_ = cm_.httpAsyncClientForCluster(cluster).send(std::move(message), *this, options);\n  }\n}\n\nvoid RawHttpClientImpl::onSuccess(const Http::AsyncClient::Request&,\n                                  Http::ResponseMessagePtr&& message) {\n  timeout_timer_.reset();\n  callbacks_->onComplete(toResponse(std::move(message)));\n  callbacks_ = nullptr;\n}\n\nvoid RawHttpClientImpl::onFailure(const Http::AsyncClient::Request&,\n                                  Http::AsyncClient::FailureReason reason) {\n  ASSERT(reason == Http::AsyncClient::FailureReason::Reset);\n  timeout_timer_.reset();\n  callbacks_->onComplete(std::make_unique<Response>(errorResponse()));\n  callbacks_ = nullptr;\n}\n\nvoid RawHttpClientImpl::onBeforeFinalizeUpstreamSpan(\n    Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) {\n  if (response_headers != nullptr) {\n    const uint64_t status_code = Http::Utility::getResponseStatus(*response_headers);\n    span.setTag(TracingConstants::get().HttpStatus,\n                Http::CodeUtility::toString(static_cast<Http::Code>(status_code)));\n    span.setTag(TracingConstants::get().TraceStatus, status_code == enumToInt(Http::Code::OK)\n                                                         ? TracingConstants::get().TraceOk\n                                                         : TracingConstants::get().TraceUnauthz);\n  }\n}\n\nvoid RawHttpClientImpl::onTimeout() {\n  ENVOY_LOG(trace, \"CheckRequest timed-out\");\n  ASSERT(request_ != nullptr);\n  request_->cancel();\n  // let the client know of failure:\n  ASSERT(callbacks_ != nullptr);\n  Response response = errorResponse();\n  response.error_kind = ErrorKind::Timedout;\n  callbacks_->onComplete(std::make_unique<Response>(response));\n  callbacks_ = nullptr;\n}\n\nResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) {\n  const uint64_t status_code = Http::Utility::getResponseStatus(message->headers());\n\n  // Set an error status if the call to the authorization server returns any of the 5xx HTTP error\n  // codes. A Forbidden response is sent to the client if the filter has not been configured with\n  // failure_mode_allow.\n  if (Http::CodeUtility::is5xx(status_code)) {\n    return std::make_unique<Response>(errorResponse());\n  }\n\n  // Extract headers-to-remove from the storage header coming from the\n  // authorization server.\n  const auto& storage_header_name = Headers::get().EnvoyAuthHeadersToRemove;\n  // If we are going to construct an Ok response we need to save the\n  // headers_to_remove in a variable first.\n  std::vector<Http::LowerCaseString> headers_to_remove;\n  if (status_code == enumToInt(Http::Code::OK)) {\n    const auto& get_result = message->headers().getAll(storage_header_name);\n    for (size_t i = 0; i < get_result.size(); ++i) {\n      const Http::HeaderEntry* entry = get_result[i];\n      if (entry != nullptr) {\n        absl::string_view storage_header_value = entry->value().getStringView();\n        std::vector<absl::string_view> header_names = StringUtil::splitToken(\n            storage_header_value, \",\", /*keep_empty_string=*/false, /*trim_whitespace=*/true);\n        headers_to_remove.reserve(headers_to_remove.size() + header_names.size());\n        for (const auto& header_name : header_names) {\n          headers_to_remove.push_back(Http::LowerCaseString(std::string(header_name)));\n        }\n      }\n    }\n  }\n  // Now remove the storage header from the authz server response headers before\n  // we reuse them to construct an Ok/Denied authorization response below.\n  message->headers().remove(storage_header_name);\n\n  // Create an Ok authorization response.\n  if (status_code == enumToInt(Http::Code::OK)) {\n    SuccessResponse ok{message->headers(), config_->upstreamHeaderMatchers(),\n                       config_->upstreamHeaderToAppendMatchers(),\n                       Response{CheckStatus::OK, ErrorKind::Other, Http::HeaderVector{},\n                                Http::HeaderVector{}, Http::HeaderVector{},\n                                std::move(headers_to_remove), EMPTY_STRING, Http::Code::OK,\n                                ProtobufWkt::Struct{}}};\n    return std::move(ok.response_);\n  }\n\n  // Create a Denied authorization response.\n  SuccessResponse denied{message->headers(), config_->clientHeaderMatchers(),\n                         config_->upstreamHeaderToAppendMatchers(),\n                         Response{CheckStatus::Denied,\n                                  ErrorKind::Other,\n                                  Http::HeaderVector{},\n                                  Http::HeaderVector{},\n                                  Http::HeaderVector{},\n                                  {{}},\n                                  message->bodyAsString(),\n                                  static_cast<Http::Code>(status_code),\n                                  ProtobufWkt::Struct{}}};\n  return std::move(denied.response_);\n}\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ext_authz/ext_authz_http_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/matchers.h\"\n#include \"common/router/header_parser.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nclass Matcher;\nusing MatcherSharedPtr = std::shared_ptr<Matcher>;\n\n/**\n *  Matchers describe the rules for matching authorization request and response headers.\n */\nclass Matcher {\npublic:\n  virtual ~Matcher() = default;\n\n  /**\n   * Returns whether or not the header key matches the rules of the matcher.\n   *\n   * @param key supplies the header key to be evaluated.\n   */\n  virtual bool matches(absl::string_view key) const PURE;\n};\n\nclass HeaderKeyMatcher : public Matcher {\npublic:\n  HeaderKeyMatcher(std::vector<Matchers::StringMatcherPtr>&& list);\n\n  bool matches(absl::string_view key) const override;\n\nprivate:\n  const std::vector<Matchers::StringMatcherPtr> matchers_;\n};\n\nclass NotHeaderKeyMatcher : public Matcher {\npublic:\n  NotHeaderKeyMatcher(std::vector<Matchers::StringMatcherPtr>&& list);\n\n  bool matches(absl::string_view key) const override;\n\nprivate:\n  const HeaderKeyMatcher matcher_;\n};\n\n/**\n * HTTP client configuration for the HTTP authorization (ext_authz) filter.\n */\nclass ClientConfig {\npublic:\n  ClientConfig(const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& config,\n               uint32_t timeout, absl::string_view path_prefix);\n\n  /**\n   * Returns the name of the authorization cluster.\n   */\n  const std::string& cluster() { return cluster_name_; }\n\n  /**\n   * Returns the authorization request path prefix.\n   */\n  const std::string& pathPrefix() { return path_prefix_; }\n\n  /**\n   * Returns authorization request timeout.\n   */\n  const std::chrono::milliseconds& timeout() const { return timeout_; }\n\n  /**\n   * Returns a list of matchers used for selecting the request headers that should be sent to the\n   * authorization server.\n   */\n  const MatcherSharedPtr& requestHeaderMatchers() const { return request_header_matchers_; }\n\n  /**\n   * Returns a list of matchers used for selecting the authorization response headers that\n   * should be send back to the client.\n   */\n  const MatcherSharedPtr& clientHeaderMatchers() const { return client_header_matchers_; }\n\n  /**\n   * Returns a list of matchers used for selecting the authorization response headers that\n   * should be send to an the upstream server.\n   */\n  const MatcherSharedPtr& upstreamHeaderMatchers() const { return upstream_header_matchers_; }\n\n  /**\n   * Returns a list of matchers used for selecting the authorization response headers that\n   * should be sent to the upstream server. The same header keys will be appended, instead of\n   * be replaced.\n   */\n  const MatcherSharedPtr& upstreamHeaderToAppendMatchers() const {\n    return upstream_header_to_append_matchers_;\n  }\n\n  /**\n   * Returns the name used for tracing.\n   */\n  const std::string& tracingName() { return tracing_name_; }\n\n  /**\n   * Returns the configured request header parser.\n   */\n  const Router::HeaderParser& requestHeaderParser() const { return *request_headers_parser_; }\n\nprivate:\n  static MatcherSharedPtr\n  toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher,\n                    bool enable_case_sensitive_string_matcher);\n  static MatcherSharedPtr\n  toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher,\n                   bool enable_case_sensitive_string_matcher);\n  static MatcherSharedPtr\n  toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher,\n                     bool enable_case_sensitive_string_matcher);\n\n  const bool enable_case_sensitive_string_matcher_;\n  const MatcherSharedPtr request_header_matchers_;\n  const MatcherSharedPtr client_header_matchers_;\n  const MatcherSharedPtr upstream_header_matchers_;\n  const MatcherSharedPtr upstream_header_to_append_matchers_;\n  const Http::LowerCaseStrPairVector authorization_headers_to_add_;\n  const std::string cluster_name_;\n  const std::chrono::milliseconds timeout_;\n  const std::string path_prefix_;\n  const std::string tracing_name_;\n  Router::HeaderParserPtr request_headers_parser_;\n};\n\nusing ClientConfigSharedPtr = std::shared_ptr<ClientConfig>;\n\n/**\n * This client implementation is used when the Ext_Authz filter needs to communicate with an\n * HTTP authorization server. Unlike the gRPC client that allows the server to define the\n * response object, in the HTTP client, all headers and body provided in the response are\n * dispatched to the downstream, and some headers to the upstream. The HTTP client also allows\n * setting a path prefix witch is not available for gRPC.\n */\nclass RawHttpClientImpl : public Client,\n                          public Http::AsyncClient::Callbacks,\n                          Logger::Loggable<Logger::Id::config> {\npublic:\n  explicit RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config);\n  ~RawHttpClientImpl() override;\n\n  // ExtAuthz::Client\n  void cancel() override;\n  void check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher,\n             const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span,\n             const StreamInfo::StreamInfo& stream_info) override;\n\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) override;\n  void onFailure(const Http::AsyncClient::Request&,\n                 Http::AsyncClient::FailureReason reason) override;\n  void onBeforeFinalizeUpstreamSpan(Tracing::Span& span,\n                                    const Http::ResponseHeaderMap* response_headers) override;\n\nprivate:\n  void onTimeout();\n  ResponsePtr toResponse(Http::ResponseMessagePtr message);\n\n  Upstream::ClusterManager& cm_;\n  ClientConfigSharedPtr config_;\n  Http::AsyncClient::Request* request_{};\n  RequestCallbacks* callbacks_{};\n  Event::TimerPtr timeout_timer_;\n};\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/fault/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"fault_config_lib\",\n    srcs = [\"fault_config.cc\"],\n    hdrs = [\"fault_config.h\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/common/fault/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/fault/fault_config.cc",
    "content": "#include \"extensions/filters/common/fault/fault_config.h\"\n\n#include \"envoy/extensions/filters/common/fault/v3/fault.pb.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Fault {\n\nenvoy::type::v3::FractionalPercent\nHeaderPercentageProvider::percentage(const Http::RequestHeaderMap* request_headers) const {\n  if (request_headers == nullptr) {\n    // If request_headers is nullptr, return the default percentage.\n    return percentage_;\n  }\n  const auto header = request_headers->get(header_name_);\n  if (header == nullptr) {\n    return percentage_;\n  }\n\n  uint32_t header_numerator;\n  if (!absl::SimpleAtoi(header->value().getStringView(), &header_numerator)) {\n    return percentage_;\n  }\n\n  envoy::type::v3::FractionalPercent result;\n  result.set_numerator(std::min(header_numerator, percentage_.numerator()));\n  result.set_denominator(percentage_.denominator());\n  return result;\n}\n\nFaultAbortConfig::FaultAbortConfig(\n    const envoy::extensions::filters::http::fault::v3::FaultAbort& abort_config) {\n  switch (abort_config.error_type_case()) {\n  case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kHttpStatus:\n    provider_ =\n        std::make_unique<FixedAbortProvider>(static_cast<Http::Code>(abort_config.http_status()),\n                                             absl::nullopt, abort_config.percentage());\n    break;\n  case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kGrpcStatus:\n    provider_ = std::make_unique<FixedAbortProvider>(\n        absl::nullopt, static_cast<Grpc::Status::GrpcStatus>(abort_config.grpc_status()),\n        abort_config.percentage());\n    break;\n  case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kHeaderAbort:\n    provider_ = std::make_unique<HeaderAbortProvider>(abort_config.percentage());\n    break;\n  case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::ERROR_TYPE_NOT_SET:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nabsl::optional<Http::Code> FaultAbortConfig::HeaderAbortProvider::httpStatusCode(\n    const Http::RequestHeaderMap* request_headers) const {\n  absl::optional<Http::Code> ret = absl::nullopt;\n  auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortRequest);\n  if (header == nullptr) {\n    return ret;\n  }\n\n  uint64_t code;\n  if (!absl::SimpleAtoi(header->value().getStringView(), &code)) {\n    return ret;\n  }\n\n  if (code >= 200 && code < 600) {\n    ret = static_cast<Http::Code>(code);\n  }\n\n  return ret;\n}\n\nabsl::optional<Grpc::Status::GrpcStatus> FaultAbortConfig::HeaderAbortProvider::grpcStatusCode(\n    const Http::RequestHeaderMap* request_headers) const {\n  auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortGrpcRequest);\n  if (header == nullptr) {\n    return absl::nullopt;\n  }\n\n  uint64_t code;\n  if (!absl::SimpleAtoi(header->value().getStringView(), &code)) {\n    return absl::nullopt;\n  }\n\n  return static_cast<Grpc::Status::GrpcStatus>(code);\n}\n\nFaultDelayConfig::FaultDelayConfig(\n    const envoy::extensions::filters::common::fault::v3::FaultDelay& delay_config) {\n  switch (delay_config.fault_delay_secifier_case()) {\n  case envoy::extensions::filters::common::fault::v3::FaultDelay::FaultDelaySecifierCase::\n      kFixedDelay:\n    provider_ = std::make_unique<FixedDelayProvider>(\n        std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(delay_config, fixed_delay)),\n        delay_config.percentage());\n    break;\n  case envoy::extensions::filters::common::fault::v3::FaultDelay::FaultDelaySecifierCase::\n      kHeaderDelay:\n    provider_ = std::make_unique<HeaderDelayProvider>(delay_config.percentage());\n    break;\n  case envoy::extensions::filters::common::fault::v3::FaultDelay::FaultDelaySecifierCase::\n      FAULT_DELAY_SECIFIER_NOT_SET:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nabsl::optional<std::chrono::milliseconds> FaultDelayConfig::HeaderDelayProvider::duration(\n    const Http::RequestHeaderMap* request_headers) const {\n  const auto header = request_headers->get(HeaderNames::get().DelayRequest);\n  if (header == nullptr) {\n    return absl::nullopt;\n  }\n\n  uint64_t value;\n  if (!absl::SimpleAtoi(header->value().getStringView(), &value)) {\n    return absl::nullopt;\n  }\n\n  return std::chrono::milliseconds(value);\n}\n\nFaultRateLimitConfig::FaultRateLimitConfig(\n    const envoy::extensions::filters::common::fault::v3::FaultRateLimit& rate_limit_config) {\n  switch (rate_limit_config.limit_type_case()) {\n  case envoy::extensions::filters::common::fault::v3::FaultRateLimit::LimitTypeCase::kFixedLimit:\n    provider_ = std::make_unique<FixedRateLimitProvider>(\n        rate_limit_config.fixed_limit().limit_kbps(), rate_limit_config.percentage());\n    break;\n  case envoy::extensions::filters::common::fault::v3::FaultRateLimit::LimitTypeCase::kHeaderLimit:\n    provider_ = std::make_unique<HeaderRateLimitProvider>(rate_limit_config.percentage());\n    break;\n  case envoy::extensions::filters::common::fault::v3::FaultRateLimit::LimitTypeCase::\n      LIMIT_TYPE_NOT_SET:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nabsl::optional<uint64_t> FaultRateLimitConfig::HeaderRateLimitProvider::rateKbps(\n    const Http::RequestHeaderMap* request_headers) const {\n  const auto header = request_headers->get(HeaderNames::get().ThroughputResponse);\n  if (header == nullptr) {\n    return absl::nullopt;\n  }\n\n  uint64_t value;\n  if (!absl::SimpleAtoi(header->value().getStringView(), &value)) {\n    return absl::nullopt;\n  }\n\n  if (value == 0) {\n    return absl::nullopt;\n  }\n\n  return value;\n}\n\n} // namespace Fault\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/fault/fault_config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/common/fault/v3/fault.pb.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/http/codes.h\"\n#include \"common/http/headers.h\"\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Fault {\n\nclass HeaderNameValues {\npublic:\n  const char* prefix() const { return ThreadSafeSingleton<Http::PrefixValue>::get().prefix(); }\n\n  const Http::LowerCaseString AbortRequest{absl::StrCat(prefix(), \"-fault-abort-request\")};\n  const Http::LowerCaseString AbortRequestPercentage{\n      absl::StrCat(prefix(), \"-fault-abort-request-percentage\")};\n  const Http::LowerCaseString AbortGrpcRequest{absl::StrCat(prefix(), \"-fault-abort-grpc-request\")};\n  const Http::LowerCaseString DelayRequest{absl::StrCat(prefix(), \"-fault-delay-request\")};\n  const Http::LowerCaseString DelayRequestPercentage{\n      absl::StrCat(prefix(), \"-fault-delay-request-percentage\")};\n  const Http::LowerCaseString ThroughputResponse{\n      absl::StrCat(prefix(), \"-fault-throughput-response\")};\n  const Http::LowerCaseString ThroughputResponsePercentage{\n      absl::StrCat(prefix(), \"-fault-throughput-response-percentage\")};\n};\n\nusing HeaderNames = ConstSingleton<HeaderNameValues>;\n\nclass HeaderPercentageProvider {\npublic:\n  HeaderPercentageProvider(const Http::LowerCaseString& header_name,\n                           const envoy::type::v3::FractionalPercent& percentage)\n      : header_name_(header_name), percentage_(percentage) {}\n\n  // Return the percentage. Optionally passed HTTP headers that may contain the percentage number,\n  // otherwise the percentage passed at the initialized time is returned.\n  envoy::type::v3::FractionalPercent\n  percentage(const Http::RequestHeaderMap* request_headers) const;\n\nprivate:\n  const Http::LowerCaseString header_name_;\n  const envoy::type::v3::FractionalPercent percentage_;\n};\n\nclass FaultAbortConfig {\npublic:\n  FaultAbortConfig(const envoy::extensions::filters::http::fault::v3::FaultAbort& abort_config);\n\n  absl::optional<Http::Code> httpStatusCode(const Http::RequestHeaderMap* request_headers) const {\n    return provider_->httpStatusCode(request_headers);\n  }\n  absl::optional<Grpc::Status::GrpcStatus>\n  grpcStatusCode(const Http::RequestHeaderMap* request_headers) const {\n    return provider_->grpcStatusCode(request_headers);\n  }\n\n  envoy::type::v3::FractionalPercent\n  percentage(const Http::RequestHeaderMap* request_headers) const {\n    return provider_->percentage(request_headers);\n  }\n\nprivate:\n  // Abstract abort provider.\n  class AbortProvider {\n  public:\n    virtual ~AbortProvider() = default;\n\n    // Return the HTTP status code to use. Optionally passed HTTP headers that may contain the\n    // HTTP status code depending on the provider implementation.\n    virtual absl::optional<Http::Code>\n    httpStatusCode(const Http::RequestHeaderMap* request_headers) const PURE;\n\n    // Return the gRPC status code to use. Optionally passed an HTTP header that may contain the\n    // gRPC status code depending on the provider implementation.\n    virtual absl::optional<Grpc::Status::GrpcStatus>\n    grpcStatusCode(const Http::RequestHeaderMap* request_headers) const PURE;\n\n    // Return what percentage of requests abort faults should be applied to. Optionally passed\n    // HTTP headers that may contain the percentage depending on the provider implementation.\n    virtual envoy::type::v3::FractionalPercent\n    percentage(const Http::RequestHeaderMap* request_headers) const PURE;\n  };\n\n  // Abort provider that uses a fixed abort status code.\n  class FixedAbortProvider : public AbortProvider {\n  public:\n    FixedAbortProvider(absl::optional<Http::Code> http_status_code,\n                       absl::optional<Grpc::Status::GrpcStatus> grpc_status_code,\n                       const envoy::type::v3::FractionalPercent& percentage)\n        : http_status_code_(http_status_code), grpc_status_code_(grpc_status_code),\n          percentage_(percentage) {}\n\n    absl::optional<Http::Code> httpStatusCode(const Http::RequestHeaderMap*) const override {\n      return http_status_code_;\n    }\n\n    absl::optional<Grpc::Status::GrpcStatus>\n    grpcStatusCode(const Http::RequestHeaderMap*) const override {\n      return grpc_status_code_;\n    }\n\n    envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap*) const override {\n      return percentage_;\n    }\n\n  private:\n    const absl::optional<Http::Code> http_status_code_;\n    const absl::optional<Grpc::Status::GrpcStatus> grpc_status_code_;\n    const envoy::type::v3::FractionalPercent percentage_;\n  };\n\n  // Abort provider the reads a status code from an HTTP header.\n  class HeaderAbortProvider : public AbortProvider {\n  public:\n    HeaderAbortProvider(const envoy::type::v3::FractionalPercent& percentage)\n        : header_percentage_provider_(HeaderNames::get().AbortRequestPercentage, percentage) {}\n\n    absl::optional<Http::Code>\n    httpStatusCode(const Http::RequestHeaderMap* request_headers) const override;\n\n    absl::optional<Grpc::Status::GrpcStatus>\n    grpcStatusCode(const Http::RequestHeaderMap* request_headers) const override;\n\n    envoy::type::v3::FractionalPercent\n    percentage(const Http::RequestHeaderMap* request_headers) const override {\n      return header_percentage_provider_.percentage(request_headers);\n    }\n\n  private:\n    HeaderPercentageProvider header_percentage_provider_;\n  };\n\n  using AbortProviderPtr = std::unique_ptr<AbortProvider>;\n\n  AbortProviderPtr provider_;\n};\n\nusing FaultAbortConfigPtr = std::unique_ptr<FaultAbortConfig>;\n\n/**\n * Generic configuration for a delay fault.\n */\nclass FaultDelayConfig {\npublic:\n  FaultDelayConfig(const envoy::extensions::filters::common::fault::v3::FaultDelay& delay_config);\n\n  absl::optional<std::chrono::milliseconds>\n  duration(const Http::RequestHeaderMap* request_headers) const {\n    return provider_->duration(request_headers);\n  }\n\n  envoy::type::v3::FractionalPercent\n  percentage(const Http::RequestHeaderMap* request_headers) const {\n    return provider_->percentage(request_headers);\n  }\n\nprivate:\n  // Abstract delay provider.\n  class DelayProvider {\n  public:\n    virtual ~DelayProvider() = default;\n\n    // Return the duration to use. Optionally passed HTTP headers that may contain the delay\n    // depending on the provider implementation.\n    virtual absl::optional<std::chrono::milliseconds>\n    duration(const Http::RequestHeaderMap* request_headers) const PURE;\n    // Return what percentage of requests request faults should be applied to. Optionally passed\n    // HTTP headers that may contain the percentage depending on the provider implementation.\n    virtual envoy::type::v3::FractionalPercent\n    percentage(const Http::RequestHeaderMap* request_headers) const PURE;\n  };\n\n  // Delay provider that uses a fixed delay.\n  class FixedDelayProvider : public DelayProvider {\n  public:\n    FixedDelayProvider(std::chrono::milliseconds delay,\n                       const envoy::type::v3::FractionalPercent& percentage)\n        : delay_(delay), percentage_(percentage) {}\n\n    // DelayProvider\n    absl::optional<std::chrono::milliseconds>\n    duration(const Http::RequestHeaderMap*) const override {\n      return delay_;\n    }\n\n    envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap*) const override {\n      return percentage_;\n    }\n\n  private:\n    const std::chrono::milliseconds delay_;\n    const envoy::type::v3::FractionalPercent percentage_;\n  };\n\n  // Delay provider the reads a delay from an HTTP header.\n  class HeaderDelayProvider : public DelayProvider {\n  public:\n    HeaderDelayProvider(const envoy::type::v3::FractionalPercent& percentage)\n        : header_percentage_provider_(HeaderNames::get().DelayRequestPercentage, percentage) {}\n\n    // DelayProvider\n    absl::optional<std::chrono::milliseconds>\n    duration(const Http::RequestHeaderMap* request_headers) const override;\n\n    envoy::type::v3::FractionalPercent\n    percentage(const Http::RequestHeaderMap* request_headers) const override {\n      return header_percentage_provider_.percentage(request_headers);\n    }\n\n  private:\n    HeaderPercentageProvider header_percentage_provider_;\n  };\n\n  using DelayProviderPtr = std::unique_ptr<DelayProvider>;\n\n  DelayProviderPtr provider_;\n};\n\nusing FaultDelayConfigPtr = std::unique_ptr<FaultDelayConfig>;\nusing FaultDelayConfigSharedPtr = std::shared_ptr<FaultDelayConfig>;\n\n/**\n * Generic configuration for a rate limit fault.\n */\nclass FaultRateLimitConfig {\npublic:\n  FaultRateLimitConfig(\n      const envoy::extensions::filters::common::fault::v3::FaultRateLimit& rate_limit_config);\n\n  absl::optional<uint64_t> rateKbps(const Http::RequestHeaderMap* request_headers) const {\n    return provider_->rateKbps(request_headers);\n  }\n\n  envoy::type::v3::FractionalPercent\n  percentage(const Http::RequestHeaderMap* request_headers) const {\n    return provider_->percentage(request_headers);\n  }\n\nprivate:\n  // Abstract rate limit provider.\n  class RateLimitProvider {\n  public:\n    virtual ~RateLimitProvider() = default;\n\n    // Return the rate limit to use in KiB/s. Optionally passed HTTP headers that may contain the\n    // rate limit depending on the provider implementation.\n    virtual absl::optional<uint64_t>\n    rateKbps(const Http::RequestHeaderMap* request_headers) const PURE;\n    // Return what percentage of requests response rate limit faults should be applied to.\n    // Optionally passed HTTP headers that may contain the percentage depending on the provider\n    // implementation.\n    virtual envoy::type::v3::FractionalPercent\n    percentage(const Http::RequestHeaderMap* request_headers) const PURE;\n  };\n\n  // Rate limit provider that uses a fixed rate limit.\n  class FixedRateLimitProvider : public RateLimitProvider {\n  public:\n    FixedRateLimitProvider(uint64_t fixed_rate_kbps,\n                           const envoy::type::v3::FractionalPercent& percentage)\n        : fixed_rate_kbps_(fixed_rate_kbps), percentage_(percentage) {}\n    absl::optional<uint64_t> rateKbps(const Http::RequestHeaderMap*) const override {\n      return fixed_rate_kbps_;\n    }\n\n    envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap*) const override {\n      return percentage_;\n    }\n\n  private:\n    const uint64_t fixed_rate_kbps_;\n    const envoy::type::v3::FractionalPercent percentage_;\n  };\n\n  // Rate limit provider that reads the rate limit from an HTTP header.\n  class HeaderRateLimitProvider : public RateLimitProvider {\n  public:\n    HeaderRateLimitProvider(const envoy::type::v3::FractionalPercent& percentage)\n        : header_percentage_provider_(HeaderNames::get().ThroughputResponsePercentage, percentage) {\n    }\n    // RateLimitProvider\n    absl::optional<uint64_t> rateKbps(const Http::RequestHeaderMap* request_headers) const override;\n    envoy::type::v3::FractionalPercent\n    percentage(const Http::RequestHeaderMap* request_headers) const override {\n      return header_percentage_provider_.percentage(request_headers);\n    }\n\n  private:\n    HeaderPercentageProvider header_percentage_provider_;\n  };\n\n  using RateLimitProviderPtr = std::unique_ptr<RateLimitProvider>;\n\n  RateLimitProviderPtr provider_;\n};\n\nusing FaultRateLimitConfigPtr = std::unique_ptr<FaultRateLimitConfig>;\n\n} // namespace Fault\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/local_ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"local_ratelimit_lib\",\n    srcs = [\"local_ratelimit_impl.cc\"],\n    hdrs = [\"local_ratelimit_impl.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//source/common/common:thread_synchronizer_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.cc",
    "content": "#include \"extensions/filters/common/local_ratelimit/local_ratelimit_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace LocalRateLimit {\n\nLocalRateLimiterImpl::LocalRateLimiterImpl(const std::chrono::milliseconds fill_interval,\n                                           const uint32_t max_tokens,\n                                           const uint32_t tokens_per_fill,\n                                           Event::Dispatcher& dispatcher)\n    : fill_interval_(fill_interval), max_tokens_(max_tokens), tokens_per_fill_(tokens_per_fill),\n      fill_timer_(fill_interval_ > std::chrono::milliseconds(0)\n                      ? dispatcher.createTimer([this] { onFillTimer(); })\n                      : nullptr) {\n  if (fill_timer_ && fill_interval_ < std::chrono::milliseconds(50)) {\n    throw EnvoyException(\"local rate limit token bucket fill timer must be >= 50ms\");\n  }\n\n  tokens_ = max_tokens;\n\n  if (fill_timer_) {\n    fill_timer_->enableTimer(fill_interval_);\n  }\n}\n\nLocalRateLimiterImpl::~LocalRateLimiterImpl() {\n  if (fill_timer_ != nullptr) {\n    fill_timer_->disableTimer();\n  }\n}\n\nvoid LocalRateLimiterImpl::onFillTimer() {\n  // Relaxed consistency is used for all operations because we don't care about ordering, just the\n  // final atomic correctness.\n  uint32_t expected_tokens = tokens_.load(std::memory_order_relaxed);\n  uint32_t new_tokens_value;\n  do {\n    // expected_tokens is either initialized above or reloaded during the CAS failure below.\n    new_tokens_value = std::min(max_tokens_, expected_tokens + tokens_per_fill_);\n\n    // Testing hook.\n    synchronizer_.syncPoint(\"on_fill_timer_pre_cas\");\n\n    // Loop while the weak CAS fails trying to update the tokens value.\n  } while (\n      !tokens_.compare_exchange_weak(expected_tokens, new_tokens_value, std::memory_order_relaxed));\n\n  fill_timer_->enableTimer(fill_interval_);\n}\n\nbool LocalRateLimiterImpl::requestAllowed() const {\n  // Relaxed consistency is used for all operations because we don't care about ordering, just the\n  // final atomic correctness.\n  uint32_t expected_tokens = tokens_.load(std::memory_order_relaxed);\n  do {\n    // expected_tokens is either initialized above or reloaded during the CAS failure below.\n    if (expected_tokens == 0) {\n      return false;\n    }\n\n    // Testing hook.\n    synchronizer_.syncPoint(\"allowed_pre_cas\");\n\n    // Loop while the weak CAS fails trying to subtract 1 from expected.\n  } while (!tokens_.compare_exchange_weak(expected_tokens, expected_tokens - 1,\n                                          std::memory_order_relaxed));\n\n  // We successfully decremented the counter by 1.\n  return true;\n}\n\n} // namespace LocalRateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/thread_synchronizer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace LocalRateLimit {\n\nclass LocalRateLimiterImpl {\npublic:\n  LocalRateLimiterImpl(const std::chrono::milliseconds fill_interval, const uint32_t max_tokens,\n                       const uint32_t tokens_per_fill, Event::Dispatcher& dispatcher);\n  ~LocalRateLimiterImpl();\n\n  bool requestAllowed() const;\n\nprivate:\n  void onFillTimer();\n\n  const std::chrono::milliseconds fill_interval_;\n  const uint32_t max_tokens_;\n  const uint32_t tokens_per_fill_;\n  const Event::TimerPtr fill_timer_;\n  mutable std::atomic<uint32_t> tokens_;\n  mutable Thread::ThreadSynchronizer synchronizer_; // Used for testing only.\n\n  friend class LocalRateLimiterImplTest;\n};\n\n} // namespace LocalRateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/lua/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\nload(\"//bazel:envoy_internal.bzl\", \"envoy_external_dep_path\")\nload(\"@bazel_skylib//rules:common_settings.bzl\", \"bool_flag\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nbool_flag(\n    name = \"moonjit\",\n    build_setting_default = False,\n)\n\nconfig_setting(\n    name = \"with_moonjit\",\n    flag_values = {\n        \":moonjit\": \"True\",\n    },\n)\n\nenvoy_cc_library(\n    name = \"lua_lib\",\n    srcs = [\"lua.cc\"],\n    hdrs = [\"lua.h\"],\n    deps = [\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:lock_guard_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/common/protobuf\",\n    ] + select({\n        \":with_moonjit\": [envoy_external_dep_path(\"moonjit\")],\n        \"//conditions:default\": [envoy_external_dep_path(\"luajit\")],\n    }),\n)\n\nenvoy_cc_library(\n    name = \"wrappers_lib\",\n    srcs = [\"wrappers.cc\"],\n    hdrs = [\"wrappers.h\"],\n    deps = [\n        \":lua_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/lua/lua.cc",
    "content": "#include \"extensions/filters/common/lua/lua.h\"\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Lua {\n\nCoroutine::Coroutine(const std::pair<lua_State*, lua_State*>& new_thread_state)\n    : coroutine_state_(new_thread_state, false) {}\n\nvoid Coroutine::start(int function_ref, int num_args, const std::function<void()>& yield_callback) {\n  ASSERT(state_ == State::NotStarted);\n\n  state_ = State::Yielded;\n  lua_rawgeti(coroutine_state_.get(), LUA_REGISTRYINDEX, function_ref);\n  ASSERT(lua_isfunction(coroutine_state_.get(), -1));\n\n  // The function needs to come before the arguments but the arguments are already on the stack,\n  // so we need to move it into position.\n  lua_insert(coroutine_state_.get(), -(num_args + 1));\n  resume(num_args, yield_callback);\n}\n\nvoid Coroutine::resume(int num_args, const std::function<void()>& yield_callback) {\n  ASSERT(state_ == State::Yielded);\n  int rc = lua_resume(coroutine_state_.get(), num_args);\n\n  if (0 == rc) {\n    state_ = State::Finished;\n    ENVOY_LOG(debug, \"coroutine finished\");\n  } else if (LUA_YIELD == rc) {\n    state_ = State::Yielded;\n    ENVOY_LOG(debug, \"coroutine yielded\");\n    yield_callback();\n  } else {\n    state_ = State::Finished;\n    const char* error = lua_tostring(coroutine_state_.get(), -1);\n    throw LuaException(error);\n  }\n}\n\nThreadLocalState::ThreadLocalState(const std::string& code, ThreadLocal::SlotAllocator& tls)\n    : tls_slot_(tls.allocateSlot()) {\n\n  // First verify that the supplied code can be parsed.\n  CSmartPtr<lua_State, lua_close> state(lua_open());\n  RELEASE_ASSERT(state.get() != nullptr, \"unable to create new Lua state object\");\n  luaL_openlibs(state.get());\n\n  if (0 != luaL_dostring(state.get(), code.c_str())) {\n    throw LuaException(fmt::format(\"script load error: {}\", lua_tostring(state.get(), -1)));\n  }\n\n  // Now initialize on all threads.\n  tls_slot_->set([code](Event::Dispatcher&) {\n    return ThreadLocal::ThreadLocalObjectSharedPtr{new LuaThreadLocal(code)};\n  });\n}\n\nint ThreadLocalState::getGlobalRef(uint64_t slot) {\n  LuaThreadLocal& tls = tls_slot_->getTyped<LuaThreadLocal>();\n  ASSERT(tls.global_slots_.size() > slot);\n  return tls.global_slots_[slot];\n}\n\nuint64_t ThreadLocalState::registerGlobal(const std::string& global) {\n  tls_slot_->runOnAllThreads([global](ThreadLocal::ThreadLocalObjectSharedPtr previous) {\n    LuaThreadLocal& tls = *std::dynamic_pointer_cast<LuaThreadLocal>(previous);\n    lua_getglobal(tls.state_.get(), global.c_str());\n    if (lua_isfunction(tls.state_.get(), -1)) {\n      tls.global_slots_.push_back(luaL_ref(tls.state_.get(), LUA_REGISTRYINDEX));\n    } else {\n      ENVOY_LOG(debug, \"definition for '{}' not found in script\", global);\n      lua_pop(tls.state_.get(), 1);\n      tls.global_slots_.push_back(LUA_REFNIL);\n    }\n    return previous;\n  });\n\n  return current_global_slot_++;\n}\n\nCoroutinePtr ThreadLocalState::createCoroutine() {\n  lua_State* state = tls_slot_->getTyped<LuaThreadLocal>().state_.get();\n  return std::make_unique<Coroutine>(std::make_pair(lua_newthread(state), state));\n}\n\nThreadLocalState::LuaThreadLocal::LuaThreadLocal(const std::string& code) : state_(lua_open()) {\n  RELEASE_ASSERT(state_.get() != nullptr, \"unable to create new Lua state object\");\n  luaL_openlibs(state_.get());\n  int rc = luaL_dostring(state_.get(), code.c_str());\n  ASSERT(rc == 0);\n}\n\n} // namespace Lua\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/lua/lua.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/c_smart_ptr.h\"\n#include \"common/common/logger.h\"\n\n#include \"lua.hpp\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Lua {\n\n/**\n * Some general notes on everything in this file. Lua/C bindings are functional, but not the most\n * beautiful interfaces. For more general overview information see the following:\n * 1) https://www.lua.org/manual/5.1/manual.html#3\n * 2) https://doc.lagout.org/programmation/Lua/Programming%20in%20Lua%20Second%20Edition.pdf\n * 3) http://luajit.org/extensions.html\n *\n * Instead of delving into crazy template metaprogramming in all cases, I've tried to use a mix\n * of templates and macros to try to hide the majority of the pain. I.e., most Lua/C pain should\n * be in this file. We do still expose basic Lua/C programming (manipulating the stack, etc.) out\n * to callers which avoids the messy C++ template programming I mentioned above.\n */\n\n/**\n * Base macro for declaring a Lua/C function. Any function declared will need to be exported via\n * the exportedFunctions() function in BaseLuaObject. See BaseLuaObject below for more\n * information. This macro declares a static \"thunk\" which checks the user data, optionally checks\n * for object death (again see BaseLuaObject below for more info), and then invokes a normal\n * object method. The actual object method needs to be implemented by the class.\n * @param Class supplies the owning class name.\n * @param Name supplies the function name.\n * @param Index supplies the stack index where \"this\" (Lua/C userdata) is found.\n */\n#define DECLARE_LUA_FUNCTION_EX(Class, Name, Index)                                                \\\n  static int static_##Name(lua_State* state) {                                                     \\\n    Class* object = ::Envoy::Extensions::Filters::Common::Lua::alignAndCast<Class>(                \\\n        luaL_checkudata(state, Index, typeid(Class).name()));                                      \\\n    object->checkDead(state);                                                                      \\\n    return object->Name(state);                                                                    \\\n  }                                                                                                \\\n  int Name(lua_State* state);\n\n/**\n * Declare a Lua function in which userdata is in stack slot 1. See DECLARE_LUA_FUNCTION_EX()\n */\n#define DECLARE_LUA_FUNCTION(Class, Name) DECLARE_LUA_FUNCTION_EX(Class, Name, 1)\n\n/**\n * Declare a Lua function in which userdata is in upvalue slot 1. See DECLARE_LUA_FUNCTION_EX()\n */\n#define DECLARE_LUA_CLOSURE(Class, Name) DECLARE_LUA_FUNCTION_EX(Class, Name, lua_upvalueindex(1))\n\n/**\n * Calculate the maximum space needed to be aligned.\n */\ntemplate <typename T> constexpr size_t maximumSpaceNeededToAlign() {\n  // The allocated memory can be misaligned up to `alignof(T) - 1` bytes. Adding it to the size to\n  // allocate.\n  return sizeof(T) + alignof(T) - 1;\n}\n\ntemplate <typename T> inline T* alignAndCast(void* mem) {\n  size_t size = maximumSpaceNeededToAlign<T>();\n  return static_cast<T*>(std::align(alignof(T), sizeof(T), mem, size));\n}\n\n/**\n * Create a new user data and assign its metatable.\n */\ntemplate <typename T> inline T* allocateLuaUserData(lua_State* state) {\n  void* mem = lua_newuserdata(state, maximumSpaceNeededToAlign<T>());\n  luaL_getmetatable(state, typeid(T).name());\n  ASSERT(lua_istable(state, -1));\n  lua_setmetatable(state, -2);\n\n  return alignAndCast<T>(mem);\n}\n\n/**\n * This is the base class for all C++ objects that we expose out to Lua. The goal is to hide as\n * much ugliness as possible. In general, to use this, do the following:\n * 1) Make your class derive from BaseLuaObject<YourClass>\n * 2) Define methods using DECLARE_LUA_FUNCTION* macros\n * 3) Export your functions by declaring a static exportedFunctions() method in your class.\n * 4) Optionally manage \"death\" status on your object. (See checkDead() and markDead() below).\n * 5) Generally you will want to hold your objects inside a LuaRef or a LuaDeathRef. See below\n *    for more information on those containers.\n *\n * It's very important to understand the Lua memory model: Once an object is created, *it is\n * owned by Lua*. Lua can GC it at any time. If you want to make sure that does not happen, you\n * must hold a ref to it in C++, generally via LuaRef or LuaDeathRef.\n */\ntemplate <class T> class BaseLuaObject : protected Logger::Loggable<Logger::Id::lua> {\npublic:\n  using ExportedFunctions = std::vector<std::pair<const char*, lua_CFunction>>;\n\n  virtual ~BaseLuaObject() = default;\n\n  /**\n   * Create a new object of this type, owned by Lua. This type must have previously been registered\n   * via the registerType() routine below.\n   * @param state supplies the owning Lua state.\n   * @param args supplies the variadic constructor arguments for the object.\n   * @return a pair containing a pointer to the new object and the state it was created with. (This\n   *         is done for convenience when passing a created object to a LuaRef or a LuaDeathRef.\n   */\n  template <typename... ConstructorArgs>\n  static std::pair<T*, lua_State*> create(lua_State* state, ConstructorArgs&&... args) {\n    // Memory is allocated via Lua and it is raw. We use placement new to run the constructor.\n    T* mem = allocateLuaUserData<T>(state);\n    ENVOY_LOG(trace, \"creating {} at {}\", typeid(T).name(), static_cast<void*>(mem));\n    return {new (mem) T(std::forward<ConstructorArgs>(args)...), state};\n  }\n\n  /**\n   * Register a type with Lua.\n   * @param state supplies the state to register with.\n   */\n  static void registerType(lua_State* state) {\n    std::vector<luaL_Reg> to_register;\n\n    // Fetch all of the functions to be exported to Lua so that we can register them in the\n    // metatable.\n    ExportedFunctions functions = T::exportedFunctions();\n    for (auto function : functions) {\n      to_register.push_back({function.first, function.second});\n    }\n\n    // Always register a __gc method so that we can run the object's destructor. We do this\n    // manually because the memory is raw and was allocated by Lua.\n    to_register.push_back(\n        {\"__gc\", [](lua_State* state) {\n           T* object = alignAndCast<T>(luaL_checkudata(state, 1, typeid(T).name()));\n           ENVOY_LOG(trace, \"destroying {} at {}\", typeid(T).name(), static_cast<void*>(object));\n           object->~T();\n           return 0;\n         }});\n\n    // Add the sentinel.\n    to_register.push_back({nullptr, nullptr});\n\n    // Register the type by creating a new metatable, setting __index to itself, and then\n    // performing the register.\n    ENVOY_LOG(debug, \"registering new type: {}\", typeid(T).name());\n    int rc = luaL_newmetatable(state, typeid(T).name());\n    ASSERT(rc == 1);\n\n    lua_pushvalue(state, -1);\n    lua_setfield(state, -2, \"__index\");\n    luaL_register(state, nullptr, to_register.data());\n  }\n\n  /**\n   * This function is called as part of the DECLARE_LUA_FUNCTION* macros. The idea here is that\n   * we cannot control when Lua destroys things. However, we may expose wrappers to a script that\n   * should not be used after some event. This allows us to mark objects as dead so that if they\n   * are used again they will throw a Lua error and not reach our code.\n   * @param state supplies the calling LuaState.\n   */\n  int checkDead(lua_State* state) {\n    if (dead_) {\n      return luaL_error(state, \"object used outside of proper scope\");\n    }\n    return 0;\n  }\n\n  /**\n   * Mark an object as dead so that a checkDead() call will throw an error. See checkDead().\n   */\n  void markDead() {\n    dead_ = true;\n    ENVOY_LOG(trace, \"marking dead {} at {}\", typeid(T).name(), static_cast<void*>(this));\n    onMarkDead();\n  }\n\n  /**\n   * Mark an object as live so that a checkDead() call will not throw an error. See checkDead().\n   */\n  void markLive() {\n    dead_ = false;\n    ENVOY_LOG(trace, \"marking live {} at {}\", typeid(T).name(), static_cast<void*>(this));\n    onMarkLive();\n  }\n\nprotected:\n  /**\n   * Called from markDead() when an object is marked dead. This is effectively a C++ destructor for\n   * Lua/C objects. Objects can perform inline cleanup or mark other objects as dead if needed. It\n   * can also be used to protect objects from use if they get assigned to a global variable and\n   * used across coroutines.\n   */\n  virtual void onMarkDead() {}\n\n  /**\n   * Called from markLive() when an object is marked live. This is a companion to onMarkDead(). See\n   * the comments there.\n   */\n  virtual void onMarkLive() {}\n\nprivate:\n  bool dead_{};\n};\n\n/**\n * This is basically a Lua smart pointer. The idea is that given a Lua object, if we want to\n * guarantee that Lua won't destroy it, we need to reference it. This wraps the reference\n * functionality. While a LuaRef owns an object it's guaranteed that Lua will not GC it.\n * TODO(mattklein123): Add dedicated unit tests. This will require mocking a Lua state.\n */\ntemplate <typename T> class LuaRef {\npublic:\n  /**\n   * Create an empty LuaRef.\n   */\n  LuaRef() { reset(); }\n\n  /**\n   * Create a LuaRef from an object.\n   * @param object supplies the object. Generally this is the return value from a Object::create()\n   *        call. The object must be at the top of the Lua stack.\n   * @param leave_on_stack supplies whether to leave the object on the stack or not when the ref\n   *        is constructed.\n   */\n  LuaRef(const std::pair<T*, lua_State*>& object, bool leave_on_stack) {\n    reset(object, leave_on_stack);\n  }\n\n  ~LuaRef() { unref(); }\n  T* get() { return object_.first; }\n\n  /**\n   * Same as the LuaRef non-default constructor, but post-construction.\n   */\n  void reset(const std::pair<T*, lua_State*>& object, bool leave_on_stack) {\n    unref();\n\n    if (leave_on_stack) {\n      lua_pushvalue(object.second, -1);\n    }\n\n    object_ = object;\n    ref_ = luaL_ref(object_.second, LUA_REGISTRYINDEX);\n    ASSERT(ref_ != LUA_REFNIL);\n  }\n\n  /**\n   * Return a LuaRef to its default/empty state.\n   */\n  void reset() {\n    unref();\n    object_ = std::pair<T*, lua_State*>{};\n    ref_ = LUA_NOREF;\n  }\n\n  /**\n   * Push the referenced object back onto the stack.\n   */\n  void pushStack() {\n    ASSERT(object_.first);\n    lua_rawgeti(object_.second, LUA_REGISTRYINDEX, ref_);\n  }\n\nprotected:\n  void unref() {\n    if (object_.second != nullptr) {\n      luaL_unref(object_.second, LUA_REGISTRYINDEX, ref_);\n    }\n  }\n\n  std::pair<T*, lua_State*> object_;\n  int ref_;\n};\n\n/**\n * This is a variant of LuaRef which also marks an object as dead during destruction. This is\n * useful if an object should not be used after the scope of the pcall() or resume().\n * TODO(mattklein123): Add dedicated unit tests. This will require mocking a Lua state.\n */\ntemplate <typename T> class LuaDeathRef : public LuaRef<T> {\npublic:\n  using LuaRef<T>::LuaRef;\n\n  ~LuaDeathRef() { markDead(); }\n\n  void markDead() {\n    if (this->object_.first) {\n      this->object_.first->markDead();\n    }\n  }\n\n  void markLive() {\n    if (this->object_.first) {\n      this->object_.first->markLive();\n    }\n  }\n\n  void reset(const std::pair<T*, lua_State*>& object, bool leave_on_stack) {\n    markDead();\n    LuaRef<T>::reset(object, leave_on_stack);\n  }\n\n  void reset() {\n    markDead();\n    LuaRef<T>::reset();\n  }\n};\n\n/**\n * This is a wrapper for a Lua coroutine. Lua intermixes coroutine and \"thread.\" Lua does not have\n * real threads, only cooperatively scheduled coroutines.\n */\nclass Coroutine : Logger::Loggable<Logger::Id::lua> {\npublic:\n  enum class State { NotStarted, Yielded, Finished };\n\n  Coroutine(const std::pair<lua_State*, lua_State*>& new_thread_state);\n  lua_State* luaState() { return coroutine_state_.get(); }\n  State state() { return state_; }\n\n  /**\n   * Start a coroutine.\n   * @param function_ref supplies the previously registered function to call. Registered with\n   *        ThreadLocalState::registerGlobal().\n   * @param num_args supplies the number of arguments to start the coroutine with. They should be\n   *        on the stack already.\n   * @param yield_callback supplies a callback that will be invoked if the coroutine yields.\n   */\n  void start(int function_ref, int num_args, const std::function<void()>& yield_callback);\n\n  /**\n   * Resume a previously yielded coroutine.\n   * @param num_args supplies the number of arguments to resume the coroutine with. They should be\n   *        on the stack already.\n   * @param yield_callback supplies a callback that will be invoked if the coroutine yields.\n   */\n  void resume(int num_args, const std::function<void()>& yield_callback);\n\nprivate:\n  LuaRef<lua_State> coroutine_state_;\n  State state_{State::NotStarted};\n};\n\nusing CoroutinePtr = std::unique_ptr<Coroutine>;\n\n/**\n * This class wraps a Lua state that can be used safely across threads. The model is that every\n * worker gets its own independent state. There is no truly global state that a script can access.\n * This is something that might be provided in the future via an API (not via Lua itself).\n */\nclass ThreadLocalState : Logger::Loggable<Logger::Id::lua> {\npublic:\n  ThreadLocalState(const std::string& code, ThreadLocal::SlotAllocator& tls);\n\n  /**\n   * @return CoroutinePtr a new coroutine.\n   */\n  CoroutinePtr createCoroutine();\n\n  /**\n   * @return a global reference previously registered via registerGlobal(). This may return\n   *         LUA_REFNIL if there was no such global.\n   * @param slot supplies the global slot/index to lookup.\n   */\n  int getGlobalRef(uint64_t slot);\n\n  /**\n   * Register a global for later use.\n   * @param global supplies the name of the global.\n   * @return a slot/index for later use with getGlobalRef().\n   */\n  uint64_t registerGlobal(const std::string& global);\n\n  /**\n   * Register a type with the thread local state. After this call the type will be available on\n   * all threaded workers.\n   */\n  template <class T> void registerType() {\n    tls_slot_->runOnAllThreads([](ThreadLocal::ThreadLocalObjectSharedPtr previous) {\n      LuaThreadLocal& tls = *std::dynamic_pointer_cast<LuaThreadLocal>(previous);\n      T::registerType(tls.state_.get());\n      return previous;\n    });\n  }\n\n  /**\n   * Return the number of bytes used by the runtime.\n   */\n  uint64_t runtimeBytesUsed() {\n    uint64_t bytes_used =\n        lua_gc(tls_slot_->getTyped<LuaThreadLocal>().state_.get(), LUA_GCCOUNT, 0) * 1024;\n    bytes_used += lua_gc(tls_slot_->getTyped<LuaThreadLocal>().state_.get(), LUA_GCCOUNTB, 0);\n    return bytes_used;\n  }\n\n  /**\n   * Force a full runtime GC.\n   */\n  void runtimeGC() { lua_gc(tls_slot_->getTyped<LuaThreadLocal>().state_.get(), LUA_GCCOLLECT, 0); }\n\nprivate:\n  struct LuaThreadLocal : public ThreadLocal::ThreadLocalObject {\n    LuaThreadLocal(const std::string& code);\n\n    CSmartPtr<lua_State, lua_close> state_;\n    std::vector<int> global_slots_;\n  };\n\n  ThreadLocal::SlotPtr tls_slot_;\n  uint64_t current_global_slot_{};\n};\n\nusing ThreadLocalStatePtr = std::unique_ptr<ThreadLocalState>;\n\n/**\n * An exception specific to Lua errors.\n */\nclass LuaException : public EnvoyException {\npublic:\n  using EnvoyException::EnvoyException;\n};\n} // namespace Lua\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/lua/wrappers.cc",
    "content": "#include \"extensions/filters/common/lua/wrappers.h\"\n\n#include <lua.h>\n\n#include <cstdint>\n\n#include \"common/common/assert.h\"\n#include \"common/common/hex.h\"\n\n#include \"absl/time/time.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Lua {\n\nnamespace {\n\n// Builds a Lua table from a list of strings.\ntemplate <typename StringList>\nvoid createLuaTableFromStringList(lua_State* state, const StringList& list) {\n  lua_createtable(state, list.size(), 0);\n  for (size_t i = 0; i < list.size(); i++) {\n    lua_pushstring(state, list[i].c_str());\n    // After the list[i].c_str() is pushed to the stack, we need to set the \"current element\" with\n    // that value. The lua_rawseti(state, t, i) helps us to set the value of table t with key i.\n    // Given the index of the current element/table in the stack is below the pushed value i.e. -2\n    // and the key (refers to where the element is in the table) is i + 1 (note that in Lua index\n    // starts from 1), hence we have:\n    lua_rawseti(state, -2, i + 1);\n  }\n}\n\n// By default, LUA_INTEGER is https://en.cppreference.com/w/cpp/types/ptrdiff_t\n// (https://github.com/LuaJIT/LuaJIT/blob/8271c643c21d1b2f344e339f559f2de6f3663191/src/luaconf.h#L104),\n// which is large enough to hold timestamp-since-epoch in seconds. Note: In Lua, we usually use\n// os.time(os.date(\"!*t\")) to get current timestamp-since-epoch in seconds.\nint64_t timestampInSeconds(const absl::optional<SystemTime>& system_time) {\n  return system_time.has_value() ? std::chrono::duration_cast<std::chrono::seconds>(\n                                       system_time.value().time_since_epoch())\n                                       .count()\n                                 : 0;\n}\n\n} // namespace\n\nint BufferWrapper::luaLength(lua_State* state) {\n  lua_pushnumber(state, data_.length());\n  return 1;\n}\n\nint BufferWrapper::luaGetBytes(lua_State* state) {\n  const int index = luaL_checkint(state, 2);\n  const int length = luaL_checkint(state, 3);\n  if (index < 0 || length < 0 ||\n      static_cast<uint64_t>(index) + static_cast<uint64_t>(length) > data_.length()) {\n    luaL_error(state, \"index/length must be >= 0 and (index + length) must be <= buffer size\");\n  }\n\n  // TODO(mattklein123): Reduce copies here by using Lua direct buffer builds.\n  std::unique_ptr<char[]> data(new char[length]);\n  data_.copyOut(index, length, data.get());\n  lua_pushlstring(state, data.get(), length);\n  return 1;\n}\n\nint BufferWrapper::luaSetBytes(lua_State* state) {\n  data_.drain(data_.length());\n  absl::string_view bytes = luaL_checkstring(state, 2);\n  data_.add(bytes);\n  lua_pushnumber(state, data_.length());\n  return 1;\n}\n\nvoid MetadataMapHelper::setValue(lua_State* state, const ProtobufWkt::Value& value) {\n  ProtobufWkt::Value::KindCase kind = value.kind_case();\n\n  switch (kind) {\n  case ProtobufWkt::Value::kNullValue:\n    return lua_pushnil(state);\n  case ProtobufWkt::Value::kNumberValue:\n    return lua_pushnumber(state, value.number_value());\n  case ProtobufWkt::Value::kBoolValue:\n    return lua_pushboolean(state, value.bool_value());\n  case ProtobufWkt::Value::kStructValue:\n    return createTable(state, value.struct_value().fields());\n  case ProtobufWkt::Value::kStringValue: {\n    const auto& string_value = value.string_value();\n    return lua_pushstring(state, string_value.c_str());\n  }\n  case ProtobufWkt::Value::kListValue: {\n    const auto& list = value.list_value();\n    const int values_size = list.values_size();\n\n    lua_createtable(state, values_size, 0);\n    for (int i = 0; i < values_size; i++) {\n      // Here we want to build an array (or a list). Array in lua is just a name for table used in a\n      // specific way. Basically we want to have: 'elements' table. Where elements[i] is an entry\n      // in that table, where key = i and value = list.values[i].\n      //\n      // Firstly, we need to push the value to the stack.\n      setValue(state, list.values(i));\n\n      // Secondly, after the list.value(i) is pushed to the stack, we need to set the 'current\n      // element' with that value. The lua_rawseti(state, t, i) helps us to set the value of table t\n      // with key i. Given the index of the current element/table in the stack is below the pushed\n      // value i.e. -2 and the key (refers to where the element is in the table) is i + 1 (note that\n      // in lua index starts from 1), hence we have:\n      lua_rawseti(state, -2, i + 1);\n    }\n    return;\n  }\n\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid MetadataMapHelper::createTable(lua_State* state,\n                                    const Protobuf::Map<std::string, ProtobufWkt::Value>& fields) {\n  lua_createtable(state, 0, fields.size());\n  for (const auto& field : fields) {\n    int top = lua_gettop(state);\n    lua_pushstring(state, field.first.c_str());\n    setValue(state, field.second);\n    lua_settable(state, top);\n  }\n}\n\n/**\n * Converts the value on top of the Lua stack into a ProtobufWkt::Value.\n * Any Lua types that cannot be directly mapped to Value types will\n * yield an error.\n */\nProtobufWkt::Value MetadataMapHelper::loadValue(lua_State* state) {\n  ProtobufWkt::Value value;\n  int type = lua_type(state, -1);\n\n  switch (type) {\n  case LUA_TNIL:\n    value.set_null_value(ProtobufWkt::NullValue());\n    break;\n  case LUA_TNUMBER:\n    value.set_number_value(static_cast<double>(lua_tonumber(state, -1)));\n    break;\n  case LUA_TBOOLEAN:\n    value.set_bool_value(lua_toboolean(state, -1) != 0);\n    break;\n  case LUA_TTABLE: {\n    int length = MetadataMapHelper::tableLength(state);\n    if (length > 0) {\n      *value.mutable_list_value() = MetadataMapHelper::loadList(state, length);\n    } else {\n      *value.mutable_struct_value() = MetadataMapHelper::loadStruct(state);\n    }\n    break;\n  }\n  case LUA_TSTRING:\n    value.set_string_value(lua_tostring(state, -1));\n    break;\n  default:\n    luaL_error(state, \"unexpected type '%s' in dynamicMetadata\", lua_typename(state, type));\n  }\n\n  return value;\n}\n\n/**\n * Returns the length of a Lua table if it's actually shaped like a List,\n * i.e. if all the keys are consecutive number values. Otherwise, returns -1.\n */\nint MetadataMapHelper::tableLength(lua_State* state) {\n  double max = 0;\n\n  lua_pushnil(state);\n  while (lua_next(state, -2) != 0) {\n    if (lua_type(state, -2) == LUA_TNUMBER) {\n      double k = lua_tonumber(state, -2);\n      if (floor(k) == k && k >= 1) {\n        if (k > max) {\n          max = k;\n        }\n        lua_pop(state, 1);\n        continue;\n      }\n    }\n    lua_pop(state, 2);\n    return -1;\n  }\n  return static_cast<int>(max);\n}\n\nProtobufWkt::ListValue MetadataMapHelper::loadList(lua_State* state, int length) {\n  ProtobufWkt::ListValue list;\n\n  for (int i = 1; i <= length; i++) {\n    lua_rawgeti(state, -1, i);\n    *list.add_values() = MetadataMapHelper::loadValue(state);\n    lua_pop(state, 1);\n  }\n\n  return list;\n}\n\nProtobufWkt::Struct MetadataMapHelper::loadStruct(lua_State* state) {\n  ProtobufWkt::Struct struct_obj;\n\n  lua_pushnil(state);\n  while (lua_next(state, -2) != 0) {\n    int key_type = lua_type(state, -2);\n    if (key_type != LUA_TSTRING) {\n      luaL_error(state, \"unexpected type %s in table key (only string keys are supported)\",\n                 lua_typename(state, key_type));\n    }\n    const char* key = lua_tostring(state, -2);\n    (*struct_obj.mutable_fields())[key] = MetadataMapHelper::loadValue(state);\n    lua_pop(state, 1);\n  }\n\n  return struct_obj;\n}\n\nMetadataMapIterator::MetadataMapIterator(MetadataMapWrapper& parent)\n    : parent_{parent}, current_{parent.metadata_.fields().begin()} {}\n\nint MetadataMapIterator::luaPairsIterator(lua_State* state) {\n  if (current_ == parent_.metadata_.fields().end()) {\n    parent_.iterator_.reset();\n    return 0;\n  }\n\n  lua_pushstring(state, current_->first.c_str());\n  MetadataMapHelper::setValue(state, current_->second);\n\n  current_++;\n  return 2;\n}\n\nint MetadataMapWrapper::luaGet(lua_State* state) {\n  const char* key = luaL_checkstring(state, 2);\n  const auto filter_it = metadata_.fields().find(key);\n  if (filter_it == metadata_.fields().end()) {\n    return 0;\n  }\n\n  MetadataMapHelper::setValue(state, filter_it->second);\n  return 1;\n}\n\nint MetadataMapWrapper::luaPairs(lua_State* state) {\n  if (iterator_.get() != nullptr) {\n    luaL_error(state, \"cannot create a second iterator before completing the first\");\n  }\n\n  iterator_.reset(MetadataMapIterator::create(state, *this), true);\n  lua_pushcclosure(state, MetadataMapIterator::static_luaPairsIterator, 1);\n  return 1;\n}\n\nint SslConnectionWrapper::luaPeerCertificatePresented(lua_State* state) {\n  lua_pushboolean(state, connection_info_.peerCertificatePresented());\n  return 1;\n}\n\nint SslConnectionWrapper::luaPeerCertificateValidated(lua_State* state) {\n  lua_pushboolean(state, connection_info_.peerCertificateValidated());\n  return 1;\n}\n\nint SslConnectionWrapper::luaUriSanLocalCertificate(lua_State* state) {\n  createLuaTableFromStringList(state, connection_info_.uriSanLocalCertificate());\n  return 1;\n}\n\nint SslConnectionWrapper::luaSha256PeerCertificateDigest(lua_State* state) {\n  lua_pushstring(state, connection_info_.sha256PeerCertificateDigest().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaSerialNumberPeerCertificate(lua_State* state) {\n  lua_pushstring(state, connection_info_.serialNumberPeerCertificate().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaIssuerPeerCertificate(lua_State* state) {\n  lua_pushstring(state, connection_info_.issuerPeerCertificate().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaSubjectPeerCertificate(lua_State* state) {\n  lua_pushstring(state, connection_info_.subjectPeerCertificate().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaUriSanPeerCertificate(lua_State* state) {\n  createLuaTableFromStringList(state, connection_info_.uriSanPeerCertificate());\n  return 1;\n}\n\nint SslConnectionWrapper::luaSubjectLocalCertificate(lua_State* state) {\n  lua_pushstring(state, connection_info_.subjectLocalCertificate().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaDnsSansPeerCertificate(lua_State* state) {\n  createLuaTableFromStringList(state, connection_info_.dnsSansPeerCertificate());\n  return 1;\n}\n\nint SslConnectionWrapper::luaDnsSansLocalCertificate(lua_State* state) {\n  createLuaTableFromStringList(state, connection_info_.dnsSansLocalCertificate());\n  return 1;\n}\n\nint SslConnectionWrapper::luaValidFromPeerCertificate(lua_State* state) {\n  lua_pushinteger(state, timestampInSeconds(connection_info_.validFromPeerCertificate()));\n  return 1;\n}\n\nint SslConnectionWrapper::luaExpirationPeerCertificate(lua_State* state) {\n  lua_pushinteger(state, timestampInSeconds(connection_info_.expirationPeerCertificate()));\n  return 1;\n}\n\nint SslConnectionWrapper::luaSessionId(lua_State* state) {\n  lua_pushstring(state, connection_info_.sessionId().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaCiphersuiteId(lua_State* state) {\n  lua_pushstring(state,\n                 absl::StrCat(\"0x\", Hex::uint16ToHex(connection_info_.ciphersuiteId())).c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaCiphersuiteString(lua_State* state) {\n  lua_pushstring(state, connection_info_.ciphersuiteString().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaUrlEncodedPemEncodedPeerCertificate(lua_State* state) {\n  lua_pushstring(state, connection_info_.urlEncodedPemEncodedPeerCertificate().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaUrlEncodedPemEncodedPeerCertificateChain(lua_State* state) {\n  lua_pushstring(state, connection_info_.urlEncodedPemEncodedPeerCertificateChain().c_str());\n  return 1;\n}\n\nint SslConnectionWrapper::luaTlsVersion(lua_State* state) {\n  lua_pushstring(state, connection_info_.tlsVersion().c_str());\n  return 1;\n}\n\nint ConnectionWrapper::luaSsl(lua_State* state) {\n  const auto& ssl = connection_->ssl();\n  if (ssl != nullptr) {\n    if (ssl_connection_wrapper_.get() != nullptr) {\n      ssl_connection_wrapper_.pushStack();\n    } else {\n      ssl_connection_wrapper_.reset(SslConnectionWrapper::create(state, *ssl), true);\n    }\n  } else {\n    lua_pushnil(state);\n  }\n  return 1;\n}\n\n} // namespace Lua\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/lua/wrappers.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/common/lua/lua.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Lua {\n\n/**\n * A wrapper for a buffer.\n */\nclass BufferWrapper : public BaseLuaObject<BufferWrapper> {\npublic:\n  BufferWrapper(Buffer::Instance& data) : data_(data) {}\n\n  static ExportedFunctions exportedFunctions() {\n    return {{\"length\", static_luaLength},\n            {\"getBytes\", static_luaGetBytes},\n            {\"setBytes\", static_luaSetBytes}};\n  }\n\nprivate:\n  /**\n   * @return int the size in bytes of the buffer.\n   */\n  DECLARE_LUA_FUNCTION(BufferWrapper, luaLength);\n\n  /**\n   * Get bytes out of a buffer for inspection in Lua.\n   * @param 1 (int) starting index of bytes to extract.\n   * @param 2 (int) length of bytes to extract.\n   * @return string the extracted bytes. Throws an error if the index/length are out of range.\n   */\n  DECLARE_LUA_FUNCTION(BufferWrapper, luaGetBytes);\n\n  /**\n   * Set the wrapped data with the input string.\n   * @param 1 (string) input string.\n   * @return int the length of the input string.\n   */\n  DECLARE_LUA_FUNCTION(BufferWrapper, luaSetBytes);\n\n  Buffer::Instance& data_;\n};\n\nclass MetadataMapWrapper;\n\nstruct MetadataMapHelper {\n  static void setValue(lua_State* state, const ProtobufWkt::Value& value);\n  static void createTable(lua_State* state,\n                          const Protobuf::Map<std::string, ProtobufWkt::Value>& fields);\n  static ProtobufWkt::Value loadValue(lua_State* state);\n\nprivate:\n  static ProtobufWkt::Struct loadStruct(lua_State* state);\n  static ProtobufWkt::ListValue loadList(lua_State* state, int length);\n  static int tableLength(lua_State* state);\n};\n\n/**\n * Iterator over a metadata map.\n */\nclass MetadataMapIterator : public BaseLuaObject<MetadataMapIterator> {\npublic:\n  MetadataMapIterator(MetadataMapWrapper& parent);\n\n  static ExportedFunctions exportedFunctions() { return {}; }\n\n  DECLARE_LUA_CLOSURE(MetadataMapIterator, luaPairsIterator);\n\nprivate:\n  MetadataMapWrapper& parent_;\n  Protobuf::Map<std::string, ProtobufWkt::Value>::const_iterator current_;\n};\n\n/**\n * Lua wrapper for a metadata map.\n */\nclass MetadataMapWrapper : public BaseLuaObject<MetadataMapWrapper> {\npublic:\n  MetadataMapWrapper(const ProtobufWkt::Struct& metadata) : metadata_{metadata} {}\n\n  static ExportedFunctions exportedFunctions() {\n    return {{\"get\", static_luaGet}, {\"__pairs\", static_luaPairs}};\n  }\n\nprivate:\n  /**\n   * Get a metadata value from the map.\n   * @param 1 (string): filter.\n   * @return string value if found or nil.\n   */\n  DECLARE_LUA_FUNCTION(MetadataMapWrapper, luaGet);\n\n  /**\n   * Implementation of the __pairs metamethod so a metadata wrapper can be iterated over using\n   * pairs().\n   */\n  DECLARE_LUA_FUNCTION(MetadataMapWrapper, luaPairs);\n\n  // Envoy::Lua::BaseLuaObject\n  void onMarkDead() override {\n    // Iterators do not survive yields.\n    iterator_.reset();\n  }\n\n  const ProtobufWkt::Struct metadata_;\n  LuaDeathRef<MetadataMapIterator> iterator_;\n\n  friend class MetadataMapIterator;\n};\n\n/**\n * Lua wrapper for Ssl::ConnectionInfo.\n */\nclass SslConnectionWrapper : public BaseLuaObject<SslConnectionWrapper> {\npublic:\n  explicit SslConnectionWrapper(const Ssl::ConnectionInfo& info) : connection_info_{info} {}\n  static ExportedFunctions exportedFunctions() {\n    return {{\"peerCertificatePresented\", static_luaPeerCertificatePresented},\n            {\"peerCertificateValidated\", static_luaPeerCertificateValidated},\n            {\"uriSanLocalCertificate\", static_luaUriSanLocalCertificate},\n            {\"sha256PeerCertificateDigest\", static_luaSha256PeerCertificateDigest},\n            {\"serialNumberPeerCertificate\", static_luaSerialNumberPeerCertificate},\n            {\"issuerPeerCertificate\", static_luaIssuerPeerCertificate},\n            {\"subjectPeerCertificate\", static_luaSubjectPeerCertificate},\n            {\"uriSanPeerCertificate\", static_luaUriSanPeerCertificate},\n            {\"subjectLocalCertificate\", static_luaSubjectLocalCertificate},\n            {\"dnsSansPeerCertificate\", static_luaDnsSansPeerCertificate},\n            {\"dnsSansLocalCertificate\", static_luaDnsSansLocalCertificate},\n            {\"validFromPeerCertificate\", static_luaValidFromPeerCertificate},\n            {\"expirationPeerCertificate\", static_luaExpirationPeerCertificate},\n            {\"sessionId\", static_luaSessionId},\n            {\"ciphersuiteId\", static_luaCiphersuiteId},\n            {\"ciphersuiteString\", static_luaCiphersuiteString},\n            {\"urlEncodedPemEncodedPeerCertificate\", static_luaUrlEncodedPemEncodedPeerCertificate},\n            {\"urlEncodedPemEncodedPeerCertificateChain\",\n             static_luaUrlEncodedPemEncodedPeerCertificateChain},\n            {\"tlsVersion\", static_luaTlsVersion}};\n  }\n\nprivate:\n  /**\n   * Returns bool whether the peer certificate is presented.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaPeerCertificatePresented);\n\n  /**\n   * Returns bool whether the peer certificate is validated.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaPeerCertificateValidated);\n\n  /**\n   * Returns the URIs in the SAN field of the local certificate. Returns empty table if there is no\n   * local certificate, or no SAN field, or no URI in SAN.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUriSanLocalCertificate);\n\n  /**\n   * Returns the subject field of the local certificate in RFC 2253 format. Returns empty string if\n   * there is no local certificate, or no subject.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSubjectLocalCertificate);\n\n  /**\n   * Returns the SHA256 digest of the peer certificate. Returns empty string if there is no peer\n   * certificate which can happen in TLS (non mTLS) connections.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSha256PeerCertificateDigest);\n\n  /**\n   * Returns the serial number field of the peer certificate. Returns empty string if there is no\n   * peer certificate, or no serial number.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSerialNumberPeerCertificate);\n\n  /**\n   * Returns the issuer field of the peer certificate in RFC 2253 format. Returns empty string if\n   * there is no peer certificate, or no issuer.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaIssuerPeerCertificate);\n\n  /**\n   * Returns the subject field of the peer certificate in RFC 2253 format. Returns empty string if\n   * there is no peer certificate, or no subject.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSubjectPeerCertificate);\n\n  /**\n   * Returns the URIs in the SAN field of the peer certificate. Returns empty table if there is no\n   * peer certificate, or no SAN field, or no URI.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUriSanPeerCertificate);\n\n  /**\n   * Return string the URL-encoded PEM-encoded representation of the peer certificate. Returns empty\n   * string if there is no peer certificate or encoding fails.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUrlEncodedPemEncodedPeerCertificate);\n\n  /**\n   * Returns the URL-encoded PEM-encoded representation of the full peer certificate chain including\n   * the leaf certificate. Returns empty string if there is no peer certificate or encoding fails.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUrlEncodedPemEncodedPeerCertificateChain);\n\n  /**\n   * Returns the DNS entries in the SAN field of the peer certificate. Returns an empty table if\n   * there is no peer certificate, or no SAN field, or no DNS entries in SAN.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaDnsSansPeerCertificate);\n\n  /**\n   * Returns the DNS entries in the SAN field of the local certificate. Returns an empty table if\n   * there is no local certificate, or no SAN field, or no DNS entries in SAN.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaDnsSansLocalCertificate);\n\n  /**\n   * Returns the timestamp-since-epoch (in seconds) that the peer certificate was issued and should\n   * be considered valid from. Returns empty string if there is no peer certificate.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaValidFromPeerCertificate);\n\n  /**\n   * Returns the timestamp-since-epoch (in seconds) that the peer certificate expires and should not\n   * be considered valid after. Returns empty string if there is no peer certificate.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaExpirationPeerCertificate);\n\n  /**\n   * Returns the hex-encoded TLS session ID as defined in RFC 5246.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSessionId);\n\n  /**\n   * Returns the standard ID for the ciphers used in the established TLS connection. Returns 0xffff\n   * if there is no current negotiated ciphersuite.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaCiphersuiteId);\n\n  /**\n   * Returns the OpenSSL name for the set of ciphers used in the established TLS connection. Returns\n   * empty string if there is no current negotiated ciphersuite.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaCiphersuiteString);\n\n  /**\n   * Returns the TLS version (e.g. TLSv1.2, TLSv1.3) used in the established TLS connection. Returns\n   * string if secured and nil if not.\n   */\n  DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaTlsVersion);\n\n  // TODO(dio): Add luaX509Extension if required, since currently it is used out of tree.\n\n  const Ssl::ConnectionInfo& connection_info_;\n};\n\n/**\n * Lua wrapper for Network::Connection.\n */\nclass ConnectionWrapper : public BaseLuaObject<ConnectionWrapper> {\npublic:\n  ConnectionWrapper(const Network::Connection* connection) : connection_{connection} {}\n\n  // TODO(dio): Remove this in favor of StreamInfo::downstreamSslConnection wrapper since ssl() in\n  // envoy/network/connection.h is subject to removal.\n  static ExportedFunctions exportedFunctions() { return {{\"ssl\", static_luaSsl}}; }\n\nprivate:\n  /**\n   * Get the Ssl::Connection wrapper\n   * @return object if secured and nil if not.\n   */\n  DECLARE_LUA_FUNCTION(ConnectionWrapper, luaSsl);\n\n  // Envoy::Lua::BaseLuaObject\n  void onMarkDead() override { ssl_connection_wrapper_.reset(); }\n\n  const Network::Connection* connection_;\n  LuaDeathRef<SslConnectionWrapper> ssl_connection_wrapper_;\n};\n\n} // namespace Lua\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/original_src/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Helprs for filters for mirroring the downstream remote address on the upstream's source.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"original_src_socket_option_lib\",\n    srcs = [\"original_src_socket_option.cc\"],\n    hdrs = [\"original_src_socket_option.h\"],\n    deps = [\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"socket_option_factory_lib\",\n    srcs = [\"socket_option_factory.cc\"],\n    hdrs = [\"socket_option_factory.h\"],\n    deps = [\n        \":original_src_socket_option_lib\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/original_src/original_src_socket_option.cc",
    "content": "#include \"extensions/filters/common/original_src/original_src_socket_option.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace OriginalSrc {\n\nOriginalSrcSocketOption::OriginalSrcSocketOption(\n    Network::Address::InstanceConstSharedPtr src_address)\n    : src_address_(std::move(src_address)) {\n  // Source transparency only works on IP connections.\n  ASSERT(src_address_->type() == Network::Address::Type::Ip);\n}\n\nbool OriginalSrcSocketOption::setOption(\n    Network::Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const {\n\n  if (state == envoy::config::core::v3::SocketOption::STATE_PREBIND) {\n    socket.setLocalAddress(src_address_);\n  }\n\n  return true;\n}\n\n/**\n * Inserts an address, already in network order, to a byte array.\n */\ntemplate <typename T> void addressIntoVector(std::vector<uint8_t>& vec, const T& address) {\n  const uint8_t* byte_array = reinterpret_cast<const uint8_t*>(&address);\n  vec.insert(vec.end(), byte_array, byte_array + sizeof(T));\n}\n\nvoid OriginalSrcSocketOption::hashKey(std::vector<uint8_t>& key) const {\n\n  // Note: we're assuming that there cannot be a conflict between IPv6 addresses here. If an IPv4\n  // address is mapped into an IPv6 address using an IPv4-Mapped IPv6 Address (RFC4921), then it's\n  // possible the hashes will be different despite the IP address used by the connection being\n  // the same.\n  if (src_address_->ip()->version() == Network::Address::IpVersion::v4) {\n    // note raw_address is already in network order\n    uint32_t raw_address = src_address_->ip()->ipv4()->address();\n    addressIntoVector(key, raw_address);\n  } else if (src_address_->ip()->version() == Network::Address::IpVersion::v6) {\n    // note raw_address is already in network order\n    absl::uint128 raw_address = src_address_->ip()->ipv6()->address();\n    addressIntoVector(key, raw_address);\n  }\n}\n\nabsl::optional<Network::Socket::Option::Details> OriginalSrcSocketOption::getOptionDetails(\n    const Network::Socket&, envoy::config::core::v3::SocketOption::SocketState) const {\n  // no details for this option.\n  return absl::nullopt;\n}\n\n} // namespace OriginalSrc\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/original_src/original_src_socket_option.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/listen_socket.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace OriginalSrc {\n/**\n * A socket option implementation which allows a connection to spoof its source IP/port using\n * a provided IP address (and maybe port).\n */\nclass OriginalSrcSocketOption : public Network::Socket::Option {\npublic:\n  /**\n   * Constructs a socket option which will set the socket to use source @c src_address\n   */\n  OriginalSrcSocketOption(Network::Address::InstanceConstSharedPtr src_address);\n  ~OriginalSrcSocketOption() override = default;\n\n  /**\n   * Updates the source address of the socket to match `src_address_`.\n   * Adds socket options to the socket to allow this to work.\n   */\n  bool setOption(Network::Socket& socket,\n                 envoy::config::core::v3::SocketOption::SocketState state) const override;\n\n  /**\n   * Appends a key which uniquely identifies the address being tracked.\n   */\n  void hashKey(std::vector<uint8_t>& key) const override;\n\n  absl::optional<Details>\n  getOptionDetails(const Network::Socket& socket,\n                   envoy::config::core::v3::SocketOption::SocketState state) const override;\n\nprivate:\n  Network::Address::InstanceConstSharedPtr src_address_;\n};\n\n} // namespace OriginalSrc\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/original_src/socket_option_factory.cc",
    "content": "#include \"extensions/filters/common/original_src/socket_option_factory.h\"\n\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/common/original_src/original_src_socket_option.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace OriginalSrc {\n\nNetwork::Socket::OptionsSharedPtr\nbuildOriginalSrcOptions(Network::Address::InstanceConstSharedPtr source, uint32_t mark) {\n  const auto address_without_port = Network::Utility::getAddressWithPort(*source, 0);\n\n  // Note: we don't expect this to change the behaviour of the socket. We expect it to be copied\n  // into the upstream connection later.\n  auto options_to_add = std::make_shared<Network::Socket::Options>();\n  options_to_add->emplace_back(\n      std::make_shared<Filters::Common::OriginalSrc::OriginalSrcSocketOption>(\n          std::move(address_without_port)));\n\n  if (mark != 0) {\n    const auto mark_options = Network::SocketOptionFactory::buildSocketMarkOptions(mark);\n    options_to_add->insert(options_to_add->end(), mark_options->begin(), mark_options->end());\n  }\n\n  const auto transparent_options = Network::SocketOptionFactory::buildIpTransparentOptions();\n  options_to_add->insert(options_to_add->end(), transparent_options->begin(),\n                         transparent_options->end());\n  return options_to_add;\n}\n\n} // namespace OriginalSrc\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/original_src/socket_option_factory.h",
    "content": "#pragma once\n\n#include \"envoy/network/address.h\"\n#include \"envoy/network/listen_socket.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace OriginalSrc {\n\nNetwork::Socket::OptionsSharedPtr\nbuildOriginalSrcOptions(Network::Address::InstanceConstSharedPtr source, uint32_t mark);\n\n} // namespace OriginalSrc\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ratelimit_lib\",\n    srcs = [\"ratelimit_impl.cc\"],\n    hdrs = [\"ratelimit_impl.h\"],\n    deps = [\n        \":ratelimit_client_interface\",\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//include/envoy/grpc:async_client_manager_interface\",\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/grpc:typed_async_client_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/common/ratelimit/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ratelimit_client_interface\",\n    hdrs = [\"ratelimit.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n        \"//include/envoy/singleton:manager_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stat_names_lib\",\n    hdrs = [\"stat_names.h\"],\n    deps = [\n        \"//source/common/stats:symbol_table_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/ratelimit/ratelimit.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n#include \"envoy/service/ratelimit/v3/rls.pb.h\"\n#include \"envoy/singleton/manager.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RateLimit {\n\n/**\n * Possible async results for a limit call.\n */\nenum class LimitStatus {\n  // The request is not over limit.\n  OK,\n  // The rate limit service could not be queried.\n  Error,\n  // The request is over limit.\n  OverLimit\n};\n\nusing DescriptorStatusList =\n    std::vector<envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus>;\nusing DescriptorStatusListPtr = std::unique_ptr<DescriptorStatusList>;\n\n/**\n * Async callbacks used during limit() calls.\n */\nclass RequestCallbacks {\npublic:\n  virtual ~RequestCallbacks() = default;\n\n  /**\n   * Called when a limit request is complete. The resulting status,\n   * response headers and request headers to be forwarded to the upstream are supplied.\n   */\n  virtual void complete(LimitStatus status, DescriptorStatusListPtr&& descriptor_statuses,\n                        Http::ResponseHeaderMapPtr&& response_headers_to_add,\n                        Http::RequestHeaderMapPtr&& request_headers_to_add) PURE;\n};\n\n/**\n * A client used to query a centralized rate limit service.\n */\nclass Client {\npublic:\n  virtual ~Client() = default;\n\n  /**\n   * Cancel an inflight limit request.\n   */\n  virtual void cancel() PURE;\n\n  /**\n   * Request a limit check. Note that this abstract API matches the design of Lyft's GRPC based\n   * rate limit service. See ratelimit.proto for details. Any other rate limit implementations\n   * plugged in at this layer should support the same high level API.\n   * NOTE: It is possible for the completion callback to be called immediately on the same stack\n   *       frame. Calling code should account for this.\n   * @param callbacks supplies the completion callbacks.\n   * @param domain specifies the rate limit domain.\n   * @param descriptors specifies a list of descriptors to query.\n   * @param parent_span source for generating an egress child span as part of the trace.\n   *\n   */\n  virtual void limit(RequestCallbacks& callbacks, const std::string& domain,\n                     const std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n                     Tracing::Span& parent_span) PURE;\n};\n\nusing ClientPtr = std::unique_ptr<Client>;\n\n} // namespace RateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ratelimit/ratelimit_impl.cc",
    "content": "#include \"extensions/filters/common/ratelimit/ratelimit_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/common/ratelimit/v3/ratelimit.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RateLimit {\n\nGrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client,\n                               const absl::optional<std::chrono::milliseconds>& timeout,\n                               envoy::config::core::v3::ApiVersion transport_api_version)\n    : async_client_(std::move(async_client)), timeout_(timeout),\n      service_method_(\n          Grpc::VersionedMethods(\"envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit\",\n                                 \"envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit\")\n              .getMethodDescriptorForVersion(transport_api_version)),\n      transport_api_version_(transport_api_version) {}\n\nGrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); }\n\nvoid GrpcClientImpl::cancel() {\n  ASSERT(callbacks_ != nullptr);\n  request_->cancel();\n  callbacks_ = nullptr;\n}\n\nvoid GrpcClientImpl::createRequest(envoy::service::ratelimit::v3::RateLimitRequest& request,\n                                   const std::string& domain,\n                                   const std::vector<Envoy::RateLimit::Descriptor>& descriptors) {\n  request.set_domain(domain);\n  for (const Envoy::RateLimit::Descriptor& descriptor : descriptors) {\n    envoy::extensions::common::ratelimit::v3::RateLimitDescriptor* new_descriptor =\n        request.add_descriptors();\n    for (const Envoy::RateLimit::DescriptorEntry& entry : descriptor.entries_) {\n      envoy::extensions::common::ratelimit::v3::RateLimitDescriptor::Entry* new_entry =\n          new_descriptor->add_entries();\n      new_entry->set_key(entry.key_);\n      new_entry->set_value(entry.value_);\n    }\n    if (descriptor.limit_) {\n      envoy::extensions::common::ratelimit::v3::RateLimitDescriptor_RateLimitOverride* new_limit =\n          new_descriptor->mutable_limit();\n      new_limit->set_requests_per_unit(descriptor.limit_.value().requests_per_unit_);\n      new_limit->set_unit(descriptor.limit_.value().unit_);\n    }\n  }\n}\n\nvoid GrpcClientImpl::limit(RequestCallbacks& callbacks, const std::string& domain,\n                           const std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n                           Tracing::Span& parent_span) {\n  ASSERT(callbacks_ == nullptr);\n  callbacks_ = &callbacks;\n\n  envoy::service::ratelimit::v3::RateLimitRequest request;\n  createRequest(request, domain, descriptors);\n\n  request_ = async_client_->send(service_method_, request, *this, parent_span,\n                                 Http::AsyncClient::RequestOptions().setTimeout(timeout_),\n                                 transport_api_version_);\n}\n\nvoid GrpcClientImpl::onSuccess(\n    std::unique_ptr<envoy::service::ratelimit::v3::RateLimitResponse>&& response,\n    Tracing::Span& span) {\n  LimitStatus status = LimitStatus::OK;\n  ASSERT(response->overall_code() != envoy::service::ratelimit::v3::RateLimitResponse::UNKNOWN);\n  if (response->overall_code() == envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT) {\n    status = LimitStatus::OverLimit;\n    span.setTag(Constants::get().TraceStatus, Constants::get().TraceOverLimit);\n  } else {\n    span.setTag(Constants::get().TraceStatus, Constants::get().TraceOk);\n  }\n\n  Http::ResponseHeaderMapPtr response_headers_to_add;\n  Http::RequestHeaderMapPtr request_headers_to_add;\n  if (!response->response_headers_to_add().empty()) {\n    response_headers_to_add = Http::ResponseHeaderMapImpl::create();\n    for (const auto& h : response->response_headers_to_add()) {\n      response_headers_to_add->addCopy(Http::LowerCaseString(h.key()), h.value());\n    }\n  }\n\n  if (!response->request_headers_to_add().empty()) {\n    request_headers_to_add = Http::RequestHeaderMapImpl::create();\n    for (const auto& h : response->request_headers_to_add()) {\n      request_headers_to_add->addCopy(Http::LowerCaseString(h.key()), h.value());\n    }\n  }\n\n  DescriptorStatusListPtr descriptor_statuses = std::make_unique<DescriptorStatusList>(\n      response->statuses().begin(), response->statuses().end());\n  callbacks_->complete(status, std::move(descriptor_statuses), std::move(response_headers_to_add),\n                       std::move(request_headers_to_add));\n  callbacks_ = nullptr;\n}\n\nvoid GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string&,\n                               Tracing::Span&) {\n  ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok);\n  callbacks_->complete(LimitStatus::Error, nullptr, nullptr, nullptr);\n  callbacks_ = nullptr;\n}\n\nClientPtr rateLimitClient(Server::Configuration::FactoryContext& context,\n                          const envoy::config::core::v3::GrpcService& grpc_service,\n                          const std::chrono::milliseconds timeout,\n                          envoy::config::core::v3::ApiVersion transport_api_version) {\n  // TODO(ramaraochavali): register client to singleton when GrpcClientImpl supports concurrent\n  // requests.\n  const auto async_client_factory =\n      context.clusterManager().grpcAsyncClientManager().factoryForGrpcService(\n          grpc_service, context.scope(), true);\n  return std::make_unique<Filters::Common::RateLimit::GrpcClientImpl>(\n      async_client_factory->create(), timeout, transport_api_version);\n}\n\n} // namespace RateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ratelimit/ratelimit_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/service/ratelimit/v3/rls.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/grpc/typed_async_client.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RateLimit {\n\nusing RateLimitAsyncCallbacks =\n    Grpc::AsyncRequestCallbacks<envoy::service::ratelimit::v3::RateLimitResponse>;\n\nstruct ConstantValues {\n  const std::string TraceStatus = \"ratelimit_status\";\n  const std::string TraceOverLimit = \"over_limit\";\n  const std::string TraceOk = \"ok\";\n};\n\nusing Constants = ConstSingleton<ConstantValues>;\n\n// TODO(htuch): We should have only one client per thread, but today we create one per filter stack.\n// This will require support for more than one outstanding request per client (limit() assumes only\n// one today).\nclass GrpcClientImpl : public Client,\n                       public RateLimitAsyncCallbacks,\n                       public Logger::Loggable<Logger::Id::config> {\npublic:\n  GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client,\n                 const absl::optional<std::chrono::milliseconds>& timeout,\n                 envoy::config::core::v3::ApiVersion transport_api_version);\n  ~GrpcClientImpl() override;\n\n  static void createRequest(envoy::service::ratelimit::v3::RateLimitRequest& request,\n                            const std::string& domain,\n                            const std::vector<Envoy::RateLimit::Descriptor>& descriptors);\n\n  // Filters::Common::RateLimit::Client\n  void cancel() override;\n  void limit(RequestCallbacks& callbacks, const std::string& domain,\n             const std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n             Tracing::Span& parent_span) override;\n\n  // Grpc::AsyncRequestCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap&) override {}\n  void onSuccess(std::unique_ptr<envoy::service::ratelimit::v3::RateLimitResponse>&& response,\n                 Tracing::Span& span) override;\n  void onFailure(Grpc::Status::GrpcStatus status, const std::string& message,\n                 Tracing::Span& span) override;\n\nprivate:\n  Grpc::AsyncClient<envoy::service::ratelimit::v3::RateLimitRequest,\n                    envoy::service::ratelimit::v3::RateLimitResponse>\n      async_client_;\n  Grpc::AsyncRequest* request_{};\n  absl::optional<std::chrono::milliseconds> timeout_;\n  RequestCallbacks* callbacks_{};\n  const Protobuf::MethodDescriptor& service_method_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n};\n\n/**\n * Builds the rate limit client.\n */\nClientPtr rateLimitClient(Server::Configuration::FactoryContext& context,\n                          const envoy::config::core::v3::GrpcService& grpc_service,\n                          const std::chrono::milliseconds timeout,\n                          envoy::config::core::v3::ApiVersion transport_api_version);\n\n} // namespace RateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/ratelimit/stat_names.h",
    "content": "#pragma once\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RateLimit {\n\n// Captures a set of stat-names needed for recording during rate-limit\n// filters. These should generally be initialized once per process, and\n// not per-request, to avoid lock contention.\nstruct StatNames {\n  explicit StatNames(Stats::SymbolTable& symbol_table)\n      : pool_(symbol_table), ok_(pool_.add(\"ratelimit.ok\")), error_(pool_.add(\"ratelimit.error\")),\n        failure_mode_allowed_(pool_.add(\"ratelimit.failure_mode_allowed\")),\n        over_limit_(pool_.add(\"ratelimit.over_limit\")) {}\n  Stats::StatNamePool pool_;\n  Stats::StatName ok_;\n  Stats::StatName error_;\n  Stats::StatName failure_mode_allowed_;\n  Stats::StatName over_limit_;\n};\n\n} // namespace RateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    deps = [\n        \":engine_lib\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"matchers_lib\",\n    srcs = [\"matchers.cc\"],\n    hdrs = [\"matchers.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/network:cidr_range_lib\",\n        \"//source/extensions/filters/common/expr:evaluator_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"engine_interface\",\n    hdrs = [\"engine.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/network:connection_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"engine_lib\",\n    srcs = [\"engine_impl.cc\"],\n    hdrs = [\"engine_impl.h\"],\n    deps = [\n        \"//source/extensions/filters/common/rbac:engine_interface\",\n        \"//source/extensions/filters/common/rbac:matchers_lib\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/engine.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\n/**\n * Shared logic for evaluating RBAC policies.\n */\nclass RoleBasedAccessControlEngine {\npublic:\n  virtual ~RoleBasedAccessControlEngine() = default;\n\n  /**\n   * Handles action-specific operations and returns whether or not the request is permitted.\n   *\n   * @param connection the downstream connection used to identify the action/principal.\n   * @param headers    the headers of the incoming request used to identify the action/principal. An\n   *                   empty map should be used if there are no headers available.\n   * @param info       the per-request or per-connection stream info with additional information\n   *                   about the action/principal. Can be modified by the LOG Action.\n   * @param effective_policy_id  it will be filled by the matching policy's ID,\n   *                   which is used to identity the source of the allow/deny.\n   */\n  virtual bool handleAction(const Network::Connection& connection,\n                            const Envoy::Http::RequestHeaderMap& headers,\n                            StreamInfo::StreamInfo& info,\n                            std::string* effective_policy_id) const PURE;\n\n  /**\n   * Handles action-specific operations and returns whether or not the request is permitted.\n   *\n   * @param connection the downstream connection used to identify the action/principal.\n   * @param info       the per-request or per-connection stream info with additional information\n   *                   about the action/principal. Can be modified by the LOG Action.\n   * @param effective_policy_id  it will be filled by the matching policy's ID,\n   *                   which is used to identity the source of the allow/deny.\n   */\n  virtual bool handleAction(const Network::Connection& connection, StreamInfo::StreamInfo& info,\n                            std::string* effective_policy_id) const PURE;\n};\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/engine_impl.cc",
    "content": "#include \"extensions/filters/common/rbac/engine_impl.h\"\n\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\nRoleBasedAccessControlEngineImpl::RoleBasedAccessControlEngineImpl(\n    const envoy::config::rbac::v3::RBAC& rules, const EnforcementMode mode)\n    : action_(rules.action()), mode_(mode) {\n  // guard expression builder by presence of a condition in policies\n  for (const auto& policy : rules.policies()) {\n    if (policy.second.has_condition()) {\n      builder_ = Expr::createBuilder(&constant_arena_);\n      break;\n    }\n  }\n\n  for (const auto& policy : rules.policies()) {\n    policies_.emplace(policy.first, std::make_unique<PolicyMatcher>(policy.second, builder_.get()));\n  }\n}\n\nbool RoleBasedAccessControlEngineImpl::handleAction(const Network::Connection& connection,\n                                                    StreamInfo::StreamInfo& info,\n                                                    std::string* effective_policy_id) const {\n  return handleAction(connection, *Http::StaticEmptyHeaders::get().request_headers, info,\n                      effective_policy_id);\n}\n\nbool RoleBasedAccessControlEngineImpl::handleAction(const Network::Connection& connection,\n                                                    const Envoy::Http::RequestHeaderMap& headers,\n                                                    StreamInfo::StreamInfo& info,\n                                                    std::string* effective_policy_id) const {\n  bool matched = checkPolicyMatch(connection, info, headers, effective_policy_id);\n\n  switch (action_) {\n  case envoy::config::rbac::v3::RBAC::ALLOW:\n    return matched;\n  case envoy::config::rbac::v3::RBAC::DENY:\n    return !matched;\n  case envoy::config::rbac::v3::RBAC::LOG: {\n    // If not shadow enforcement, set shared log metadata\n    if (mode_ != EnforcementMode::Shadow) {\n      ProtobufWkt::Struct log_metadata;\n      auto& log_fields = *log_metadata.mutable_fields();\n      log_fields[DynamicMetadataKeysSingleton::get().AccessLogKey].set_bool_value(matched);\n      info.setDynamicMetadata(DynamicMetadataKeysSingleton::get().CommonNamespace, log_metadata);\n    }\n\n    return true;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nbool RoleBasedAccessControlEngineImpl::checkPolicyMatch(\n    const Network::Connection& connection, const StreamInfo::StreamInfo& info,\n    const Envoy::Http::RequestHeaderMap& headers, std::string* effective_policy_id) const {\n  bool matched = false;\n\n  for (const auto& policy : policies_) {\n    if (policy.second->matches(connection, headers, info)) {\n      matched = true;\n      if (effective_policy_id != nullptr) {\n        *effective_policy_id = policy.first;\n      }\n      break;\n    }\n  }\n\n  return matched;\n}\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/engine_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n\n#include \"extensions/filters/common/rbac/engine.h\"\n#include \"extensions/filters/common/rbac/matchers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\nclass DynamicMetadataKeys {\npublic:\n  const std::string ShadowEffectivePolicyIdField{\"shadow_effective_policy_id\"};\n  const std::string ShadowEngineResultField{\"shadow_engine_result\"};\n  const std::string EngineResultAllowed{\"allowed\"};\n  const std::string EngineResultDenied{\"denied\"};\n  const std::string AccessLogKey{\"access_log_hint\"};\n  const std::string CommonNamespace{\"envoy.common\"};\n};\n\nusing DynamicMetadataKeysSingleton = ConstSingleton<DynamicMetadataKeys>;\n\nenum class EnforcementMode { Enforced, Shadow };\n\nclass RoleBasedAccessControlEngineImpl : public RoleBasedAccessControlEngine, NonCopyable {\npublic:\n  RoleBasedAccessControlEngineImpl(const envoy::config::rbac::v3::RBAC& rules,\n                                   const EnforcementMode mode = EnforcementMode::Enforced);\n\n  bool handleAction(const Network::Connection& connection,\n                    const Envoy::Http::RequestHeaderMap& headers, StreamInfo::StreamInfo& info,\n                    std::string* effective_policy_id) const override;\n\n  bool handleAction(const Network::Connection& connection, StreamInfo::StreamInfo& info,\n                    std::string* effective_policy_id) const override;\n\nprivate:\n  // Checks whether the request matches any policies\n  bool checkPolicyMatch(const Network::Connection& connection, const StreamInfo::StreamInfo& info,\n                        const Envoy::Http::RequestHeaderMap& headers,\n                        std::string* effective_policy_id) const;\n\n  const envoy::config::rbac::v3::RBAC::Action action_;\n  const EnforcementMode mode_;\n\n  std::map<std::string, std::unique_ptr<PolicyMatcher>> policies_;\n\n  Protobuf::Arena constant_arena_;\n  Expr::BuilderPtr builder_;\n};\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/matchers.cc",
    "content": "#include \"extensions/filters/common/rbac/matchers.h\"\n\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\nMatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Permission& permission) {\n  switch (permission.rule_case()) {\n  case envoy::config::rbac::v3::Permission::RuleCase::kAndRules:\n    return std::make_shared<const AndMatcher>(permission.and_rules());\n  case envoy::config::rbac::v3::Permission::RuleCase::kOrRules:\n    return std::make_shared<const OrMatcher>(permission.or_rules());\n  case envoy::config::rbac::v3::Permission::RuleCase::kHeader:\n    return std::make_shared<const HeaderMatcher>(permission.header());\n  case envoy::config::rbac::v3::Permission::RuleCase::kDestinationIp:\n    return std::make_shared<const IPMatcher>(permission.destination_ip(),\n                                             IPMatcher::Type::DownstreamLocal);\n  case envoy::config::rbac::v3::Permission::RuleCase::kDestinationPort:\n    return std::make_shared<const PortMatcher>(permission.destination_port());\n  case envoy::config::rbac::v3::Permission::RuleCase::kAny:\n    return std::make_shared<const AlwaysMatcher>();\n  case envoy::config::rbac::v3::Permission::RuleCase::kMetadata:\n    return std::make_shared<const MetadataMatcher>(permission.metadata());\n  case envoy::config::rbac::v3::Permission::RuleCase::kNotRule:\n    return std::make_shared<const NotMatcher>(permission.not_rule());\n  case envoy::config::rbac::v3::Permission::RuleCase::kRequestedServerName:\n    return std::make_shared<const RequestedServerNameMatcher>(permission.requested_server_name());\n  case envoy::config::rbac::v3::Permission::RuleCase::kUrlPath:\n    return std::make_shared<const PathMatcher>(permission.url_path());\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nMatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Principal& principal) {\n  switch (principal.identifier_case()) {\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kAndIds:\n    return std::make_shared<const AndMatcher>(principal.and_ids());\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kOrIds:\n    return std::make_shared<const OrMatcher>(principal.or_ids());\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kAuthenticated:\n    return std::make_shared<const AuthenticatedMatcher>(principal.authenticated());\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kSourceIp:\n    return std::make_shared<const IPMatcher>(principal.source_ip(),\n                                             IPMatcher::Type::ConnectionRemote);\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kDirectRemoteIp:\n    return std::make_shared<const IPMatcher>(principal.direct_remote_ip(),\n                                             IPMatcher::Type::DownstreamDirectRemote);\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kRemoteIp:\n    return std::make_shared<const IPMatcher>(principal.remote_ip(),\n                                             IPMatcher::Type::DownstreamRemote);\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kHeader:\n    return std::make_shared<const HeaderMatcher>(principal.header());\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kAny:\n    return std::make_shared<const AlwaysMatcher>();\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kMetadata:\n    return std::make_shared<const MetadataMatcher>(principal.metadata());\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kNotId:\n    return std::make_shared<const NotMatcher>(principal.not_id());\n  case envoy::config::rbac::v3::Principal::IdentifierCase::kUrlPath:\n    return std::make_shared<const PathMatcher>(principal.url_path());\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nAndMatcher::AndMatcher(const envoy::config::rbac::v3::Permission::Set& set) {\n  for (const auto& rule : set.rules()) {\n    matchers_.push_back(Matcher::create(rule));\n  }\n}\n\nAndMatcher::AndMatcher(const envoy::config::rbac::v3::Principal::Set& set) {\n  for (const auto& id : set.ids()) {\n    matchers_.push_back(Matcher::create(id));\n  }\n}\n\nbool AndMatcher::matches(const Network::Connection& connection,\n                         const Envoy::Http::RequestHeaderMap& headers,\n                         const StreamInfo::StreamInfo& info) const {\n  for (const auto& matcher : matchers_) {\n    if (!matcher->matches(connection, headers, info)) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\nOrMatcher::OrMatcher(const Protobuf::RepeatedPtrField<envoy::config::rbac::v3::Permission>& rules) {\n  for (const auto& rule : rules) {\n    matchers_.push_back(Matcher::create(rule));\n  }\n}\n\nOrMatcher::OrMatcher(const Protobuf::RepeatedPtrField<envoy::config::rbac::v3::Principal>& ids) {\n  for (const auto& id : ids) {\n    matchers_.push_back(Matcher::create(id));\n  }\n}\n\nbool OrMatcher::matches(const Network::Connection& connection,\n                        const Envoy::Http::RequestHeaderMap& headers,\n                        const StreamInfo::StreamInfo& info) const {\n  for (const auto& matcher : matchers_) {\n    if (matcher->matches(connection, headers, info)) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\nbool NotMatcher::matches(const Network::Connection& connection,\n                         const Envoy::Http::RequestHeaderMap& headers,\n                         const StreamInfo::StreamInfo& info) const {\n  return !matcher_->matches(connection, headers, info);\n}\n\nbool HeaderMatcher::matches(const Network::Connection&,\n                            const Envoy::Http::RequestHeaderMap& headers,\n                            const StreamInfo::StreamInfo&) const {\n  return Envoy::Http::HeaderUtility::matchHeaders(headers, header_);\n}\n\nbool IPMatcher::matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap&,\n                        const StreamInfo::StreamInfo& info) const {\n  Envoy::Network::Address::InstanceConstSharedPtr ip;\n  switch (type_) {\n  case ConnectionRemote:\n    ip = connection.remoteAddress();\n    break;\n  case DownstreamLocal:\n    ip = info.downstreamLocalAddress();\n    break;\n  case DownstreamDirectRemote:\n    ip = info.downstreamDirectRemoteAddress();\n    break;\n  case DownstreamRemote:\n    ip = info.downstreamRemoteAddress();\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  return range_.isInRange(*ip.get());\n}\n\nbool PortMatcher::matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&,\n                          const StreamInfo::StreamInfo& info) const {\n  const Envoy::Network::Address::Ip* ip = info.downstreamLocalAddress().get()->ip();\n  return ip && ip->port() == port_;\n}\n\nbool AuthenticatedMatcher::matches(const Network::Connection& connection,\n                                   const Envoy::Http::RequestHeaderMap&,\n                                   const StreamInfo::StreamInfo&) const {\n  const auto& ssl = connection.ssl();\n  if (!ssl) { // connection was not authenticated\n    return false;\n  } else if (!matcher_.has_value()) { // matcher allows any subject\n    return true;\n  }\n\n  // If set, The URI SAN  or DNS SAN in that order is used as Principal, otherwise the subject field\n  // is used.\n  if (!ssl->uriSanPeerCertificate().empty()) {\n    for (const std::string& uri : ssl->uriSanPeerCertificate()) {\n      if (matcher_.value().match(uri)) {\n        return true;\n      }\n    }\n  }\n  if (!ssl->dnsSansPeerCertificate().empty()) {\n    for (const std::string& dns : ssl->dnsSansPeerCertificate()) {\n      if (matcher_.value().match(dns)) {\n        return true;\n      }\n    }\n  }\n  return matcher_.value().match(ssl->subjectPeerCertificate());\n}\n\nbool MetadataMatcher::matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&,\n                              const StreamInfo::StreamInfo& info) const {\n  return matcher_.match(info.dynamicMetadata());\n}\n\nbool PolicyMatcher::matches(const Network::Connection& connection,\n                            const Envoy::Http::RequestHeaderMap& headers,\n                            const StreamInfo::StreamInfo& info) const {\n  return permissions_.matches(connection, headers, info) &&\n         principals_.matches(connection, headers, info) &&\n         (expr_ == nullptr ? true : Expr::matches(*expr_, info, headers));\n}\n\nbool RequestedServerNameMatcher::matches(const Network::Connection& connection,\n                                         const Envoy::Http::RequestHeaderMap&,\n                                         const StreamInfo::StreamInfo&) const {\n  return match(connection.requestedServerName());\n}\n\nbool PathMatcher::matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap& headers,\n                          const StreamInfo::StreamInfo&) const {\n  if (headers.Path() == nullptr) {\n    return false;\n  }\n  return path_matcher_.match(headers.getPathValue());\n}\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/matchers.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/type/matcher/v3/path.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/network/cidr_range.h\"\n\n#include \"extensions/filters/common/expr/evaluator.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\nclass Matcher;\nusing MatcherConstSharedPtr = std::shared_ptr<const Matcher>;\n\n/**\n *  Matchers describe the rules for matching either a permission action or principal.\n */\nclass Matcher {\npublic:\n  virtual ~Matcher() = default;\n\n  /**\n   * Returns whether or not the permission/principal matches the rules of the matcher.\n   *\n   * @param connection the downstream connection used to match against.\n   * @param headers    the request headers used to match against. An empty map should be used if\n   *                   there are none headers available.\n   * @param info       the additional information about the action/principal.\n   */\n  virtual bool matches(const Network::Connection& connection,\n                       const Envoy::Http::RequestHeaderMap& headers,\n                       const StreamInfo::StreamInfo& info) const PURE;\n\n  /**\n   * Creates a shared instance of a matcher based off the rules defined in the Permission config\n   * proto message.\n   */\n  static MatcherConstSharedPtr create(const envoy::config::rbac::v3::Permission& permission);\n\n  /**\n   * Creates a shared instance of a matcher based off the rules defined in the Principal config\n   * proto message.\n   */\n  static MatcherConstSharedPtr create(const envoy::config::rbac::v3::Principal& principal);\n};\n\n/**\n * Always matches, returning true for any input.\n */\nclass AlwaysMatcher : public Matcher {\npublic:\n  bool matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&,\n               const StreamInfo::StreamInfo&) const override {\n    return true;\n  }\n};\n\n/**\n * A composite matcher where all sub-matchers must match for this to return true. Evaluation\n * short-circuits on the first non-match.\n */\nclass AndMatcher : public Matcher {\npublic:\n  AndMatcher(const envoy::config::rbac::v3::Permission::Set& rules);\n  AndMatcher(const envoy::config::rbac::v3::Principal::Set& ids);\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n\nprivate:\n  std::vector<MatcherConstSharedPtr> matchers_;\n};\n\n/**\n * A composite matcher where only one sub-matcher must match for this to return true. Evaluation\n * short-circuits on the first match.\n */\nclass OrMatcher : public Matcher {\npublic:\n  OrMatcher(const envoy::config::rbac::v3::Permission::Set& set) : OrMatcher(set.rules()) {}\n  OrMatcher(const envoy::config::rbac::v3::Principal::Set& set) : OrMatcher(set.ids()) {}\n  OrMatcher(const Protobuf::RepeatedPtrField<envoy::config::rbac::v3::Permission>& rules);\n  OrMatcher(const Protobuf::RepeatedPtrField<envoy::config::rbac::v3::Principal>& ids);\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n\nprivate:\n  std::vector<MatcherConstSharedPtr> matchers_;\n};\n\nclass NotMatcher : public Matcher {\npublic:\n  NotMatcher(const envoy::config::rbac::v3::Permission& permission)\n      : matcher_(Matcher::create(permission)) {}\n  NotMatcher(const envoy::config::rbac::v3::Principal& principal)\n      : matcher_(Matcher::create(principal)) {}\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n\nprivate:\n  MatcherConstSharedPtr matcher_;\n};\n\n/**\n * Perform a match against any HTTP header (or pseudo-header, such as `:path` or `:authority`). Will\n * always fail to match on any non-HTTP connection.\n */\nclass HeaderMatcher : public Matcher {\npublic:\n  HeaderMatcher(const envoy::config::route::v3::HeaderMatcher& matcher) : header_(matcher) {}\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n\nprivate:\n  const Envoy::Http::HeaderUtility::HeaderData header_;\n};\n\n/**\n * Perform a match against an IP CIDR range. This rule can be applied to connection remote,\n * downstream local address, downstream direct remote address or downstream remote address.\n */\nclass IPMatcher : public Matcher {\npublic:\n  enum Type { ConnectionRemote = 0, DownstreamLocal, DownstreamDirectRemote, DownstreamRemote };\n\n  IPMatcher(const envoy::config::core::v3::CidrRange& range, Type type)\n      : range_(Network::Address::CidrRange::create(range)), type_(type) {}\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo& info) const override;\n\nprivate:\n  const Network::Address::CidrRange range_;\n  const Type type_;\n};\n\n/**\n * Matches the port number of the destination (local) address.\n */\nclass PortMatcher : public Matcher {\npublic:\n  PortMatcher(const uint32_t port) : port_(port) {}\n\n  bool matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&,\n               const StreamInfo::StreamInfo& info) const override;\n\nprivate:\n  const uint32_t port_;\n};\n\n/**\n * Matches the principal name as described in the peer certificate. Uses the URI SAN first. If that\n * field is not present, uses the subject instead.\n */\nclass AuthenticatedMatcher : public Matcher {\npublic:\n  AuthenticatedMatcher(const envoy::config::rbac::v3::Principal::Authenticated& auth)\n      : matcher_(auth.has_principal_name()\n                     ? absl::make_optional<Matchers::StringMatcherImpl>(auth.principal_name())\n                     : absl::nullopt) {}\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n\nprivate:\n  const absl::optional<Matchers::StringMatcherImpl> matcher_;\n};\n\n/**\n * Matches a Policy which is a collection of permission and principal matchers. If any action\n * matches a permission, the principals are then checked for a match.\n * The condition is a conjunction clause.\n */\nclass PolicyMatcher : public Matcher, NonCopyable {\npublic:\n  PolicyMatcher(const envoy::config::rbac::v3::Policy& policy, Expr::Builder* builder)\n      : permissions_(policy.permissions()), principals_(policy.principals()),\n        condition_(policy.condition()) {\n    if (policy.has_condition()) {\n      expr_ = Expr::createExpression(*builder, condition_);\n    }\n  }\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n\nprivate:\n  const OrMatcher permissions_;\n  const OrMatcher principals_;\n\n  const google::api::expr::v1alpha1::Expr condition_;\n  Expr::ExpressionPtr expr_;\n};\n\nclass MetadataMatcher : public Matcher {\npublic:\n  MetadataMatcher(const Envoy::Matchers::MetadataMatcher& matcher) : matcher_(matcher) {}\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo& info) const override;\n\nprivate:\n  const Envoy::Matchers::MetadataMatcher matcher_;\n};\n\n/**\n * Perform a match against the request server from the client's connection\n * request. This is typically TLS SNI.\n */\nclass RequestedServerNameMatcher : public Matcher, Envoy::Matchers::StringMatcherImpl {\npublic:\n  RequestedServerNameMatcher(const envoy::type::matcher::v3::StringMatcher& requested_server_name)\n      : Envoy::Matchers::StringMatcherImpl(requested_server_name) {}\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n};\n\n/**\n * Perform a match against the path header on the HTTP request. The query and fragment string are\n * removed from the path header before matching.\n */\nclass PathMatcher : public Matcher {\npublic:\n  PathMatcher(const envoy::type::matcher::v3::PathMatcher& path_matcher)\n      : path_matcher_(path_matcher) {}\n\n  bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers,\n               const StreamInfo::StreamInfo&) const override;\n\nprivate:\n  const Matchers::PathMatcher path_matcher_;\n};\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/utility.cc",
    "content": "#include \"extensions/filters/common/rbac/utility.h\"\n\n#include <string>\n\n#include \"absl/strings/str_replace.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\nRoleBasedAccessControlFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n  const std::string final_prefix = prefix + \"rbac.\";\n  return {ALL_RBAC_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n}\n\nstd::string responseDetail(const std::string& policy_id) {\n  // TODO(alyssawilk): put this as a StreamInfo utility and apply to all response details.\n  // Replace whitespaces in policy_id with '_' to avoid breaking the access log (inconsistent number\n  // of segments between log entries when the separator is whitespace).\n  const absl::flat_hash_map<std::string, std::string> replacement{\n      {\" \", \"_\"}, {\"\\t\", \"_\"}, {\"\\f\", \"_\"}, {\"\\v\", \"_\"}, {\"\\n\", \"_\"}, {\"\\r\", \"_\"}};\n  std::string sanitized = absl::StrReplaceAll(policy_id, replacement);\n  return fmt::format(\"rbac_access_denied_matched_policy[{}]\", sanitized);\n}\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/common/rbac/utility.h",
    "content": "#pragma once\n\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/common/rbac/engine_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\n/**\n * All stats for the RBAC filter. @see stats_macros.h\n */\n#define ALL_RBAC_FILTER_STATS(COUNTER)                                                             \\\n  COUNTER(allowed)                                                                                 \\\n  COUNTER(denied)                                                                                  \\\n  COUNTER(shadow_allowed)                                                                          \\\n  COUNTER(shadow_denied)\n\n/**\n * Wrapper struct for RBAC filter stats. @see stats_macros.h\n */\nstruct RoleBasedAccessControlFilterStats {\n  ALL_RBAC_FILTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nRoleBasedAccessControlFilterStats generateStats(const std::string& prefix, Stats::Scope& scope);\n\ntemplate <class ConfigType>\nstd::unique_ptr<RoleBasedAccessControlEngineImpl> createEngine(const ConfigType& config) {\n  return config.has_rules() ? std::make_unique<RoleBasedAccessControlEngineImpl>(\n                                  config.rules(), EnforcementMode::Enforced)\n                            : nullptr;\n}\n\ntemplate <class ConfigType>\nstd::unique_ptr<RoleBasedAccessControlEngineImpl> createShadowEngine(const ConfigType& config) {\n  return config.has_shadow_rules() ? std::make_unique<RoleBasedAccessControlEngineImpl>(\n                                         config.shadow_rules(), EnforcementMode::Shadow)\n                                   : nullptr;\n}\n\nstd::string responseDetail(const std::string& policy_id);\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # Well known names are public.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/config:well_known_names\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that dynamically adjusts the number of allowed concurrent\n# requests based on sampled latencies.\n# Public docs: docs/root/configuration/http_filters/adaptive_concurrency_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"adaptive_concurrency_filter_lib\",\n    srcs = [\"adaptive_concurrency_filter.cc\"],\n    hdrs = [\"adaptive_concurrency_filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/adaptive_concurrency:adaptive_concurrency_filter_lib\",\n        \"//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc",
    "content": "#include \"extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/adaptive_concurrency/controller/controller.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\n\nAdaptiveConcurrencyFilterConfig::AdaptiveConcurrencyFilterConfig(\n    const envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency&\n        proto_config,\n    Runtime::Loader& runtime, std::string stats_prefix, Stats::Scope&, TimeSource& time_source)\n    : stats_prefix_(std::move(stats_prefix)), time_source_(time_source),\n      adaptive_concurrency_feature_(proto_config.enabled(), runtime) {}\n\nAdaptiveConcurrencyFilter::AdaptiveConcurrencyFilter(\n    AdaptiveConcurrencyFilterConfigSharedPtr config, ConcurrencyControllerSharedPtr controller)\n    : config_(std::move(config)), controller_(std::move(controller)) {}\n\nHttp::FilterHeadersStatus AdaptiveConcurrencyFilter::decodeHeaders(Http::RequestHeaderMap&, bool) {\n  // In addition to not sampling if the filter is disabled, health checks should also not be sampled\n  // by the concurrency controller since they may potentially bias the sample aggregate to lower\n  // latency measurements.\n  if (!config_->filterEnabled() || decoder_callbacks_->streamInfo().healthCheck()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (controller_->forwardingDecision() == Controller::RequestForwardingAction::Block) {\n    decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, \"reached concurrency limit\",\n                                       nullptr, absl::nullopt, \"reached_concurrency_limit\");\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  // When the deferred_sample_task_ object is destroyed, the request start time is sampled. This\n  // occurs either when encoding is complete or during destruction of this filter object.\n  const auto now = config_->timeSource().monotonicTime();\n  deferred_sample_task_ =\n      std::make_unique<Cleanup>([this, now]() { controller_->recordLatencySample(now); });\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nvoid AdaptiveConcurrencyFilter::encodeComplete() { deferred_sample_task_.reset(); }\n\nvoid AdaptiveConcurrencyFilter::onDestroy() {\n  if (deferred_sample_task_) {\n    // The sampling task hasn't been destroyed yet, so this implies we did not complete encoding.\n    // Let's stop the sampling from happening and perform request cleanup inside the controller.\n    //\n    // TODO (tonya11en): Return some RAII handle from the concurrency controller that performs this\n    // logic as part of its lifecycle.\n    deferred_sample_task_->cancel();\n    controller_->cancelLatencySample();\n  }\n}\n\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"extensions/filters/http/adaptive_concurrency/controller/controller.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\n\n/**\n * Configuration for the adaptive concurrency limit filter.\n */\nclass AdaptiveConcurrencyFilterConfig {\npublic:\n  AdaptiveConcurrencyFilterConfig(\n      const envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency&\n          proto_config,\n      Runtime::Loader& runtime, std::string stats_prefix, Stats::Scope& scope,\n      TimeSource& time_source);\n\n  bool filterEnabled() const { return adaptive_concurrency_feature_.enabled(); }\n  TimeSource& timeSource() const { return time_source_; }\n\nprivate:\n  const std::string stats_prefix_;\n  TimeSource& time_source_;\n  Runtime::FeatureFlag adaptive_concurrency_feature_;\n};\n\nusing AdaptiveConcurrencyFilterConfigSharedPtr =\n    std::shared_ptr<const AdaptiveConcurrencyFilterConfig>;\nusing ConcurrencyControllerSharedPtr = std::shared_ptr<Controller::ConcurrencyController>;\n\n/**\n * A filter that samples request latencies and dynamically adjusts the request\n * concurrency window.\n */\nclass AdaptiveConcurrencyFilter : public Http::PassThroughFilter,\n                                  Logger::Loggable<Logger::Id::filter> {\npublic:\n  AdaptiveConcurrencyFilter(AdaptiveConcurrencyFilterConfigSharedPtr config,\n                            ConcurrencyControllerSharedPtr controller);\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override;\n\n  // Http::StreamEncoderFilter\n  void encodeComplete() override;\n  void onDestroy() override;\n\nprivate:\n  AdaptiveConcurrencyFilterConfigSharedPtr config_;\n  const ConcurrencyControllerSharedPtr controller_;\n  std::unique_ptr<Cleanup> deferred_sample_task_;\n};\n\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/config.cc",
    "content": "#include \"extensions/filters/http/adaptive_concurrency/config.h\"\n\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h\"\n#include \"extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\n\nHttp::FilterFactoryCb AdaptiveConcurrencyFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency& config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n\n  auto acc_stats_prefix = stats_prefix + \"adaptive_concurrency.\";\n\n  std::shared_ptr<Controller::ConcurrencyController> controller;\n  using Proto = envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency;\n  ASSERT(config.concurrency_controller_config_case() ==\n         Proto::ConcurrencyControllerConfigCase::kGradientControllerConfig);\n  auto gradient_controller_config =\n      Controller::GradientControllerConfig(config.gradient_controller_config(), context.runtime());\n  controller = std::make_shared<Controller::GradientController>(\n      std::move(gradient_controller_config), context.dispatcher(), context.runtime(),\n      acc_stats_prefix + \"gradient_controller.\", context.scope(), context.api().randomGenerator(),\n      context.timeSource());\n\n  AdaptiveConcurrencyFilterConfigSharedPtr filter_config(\n      new AdaptiveConcurrencyFilterConfig(config, context.runtime(), std::move(acc_stats_prefix),\n                                          context.scope(), context.timeSource()));\n\n  return [filter_config, controller](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(\n        std::make_shared<AdaptiveConcurrencyFilter>(filter_config, controller));\n  };\n}\n\n/**\n * Static registration for the adaptive_concurrency filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(AdaptiveConcurrencyFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\n\n/**\n * Config registration for the adaptive concurrency limit filter. @see NamedHttpFilterConfigFactory.\n */\nclass AdaptiveConcurrencyFilterFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency> {\npublic:\n  AdaptiveConcurrencyFilterFactory() : FactoryBase(HttpFilterNames::get().AdaptiveConcurrency) {}\n\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency&\n          proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/controller/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that dynamically adjusts the number of allowed concurrent\n# requests based on sampled latencies.\n# Public docs: TODO (tonya11en)\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"controller_lib\",\n    srcs = [\"gradient_controller.cc\"],\n    hdrs = [\n        \"controller.h\",\n        \"gradient_controller.h\",\n    ],\n    external_deps = [\n        \"libcircllhist\",\n    ],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/controller/controller.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\nnamespace Controller {\n\n/**\n * The controller's decision on whether a request will be forwarded.\n */\nenum class RequestForwardingAction {\n  // The concurrency limit is exceeded, so the request cannot be forwarded.\n  Block,\n\n  // The controller has allowed the request through and changed its internal\n  // state. The request must be forwarded.\n  Forward\n};\n\n/**\n * Adaptive concurrency controller interface. All implementations of this\n * interface must be thread-safe.\n */\nclass ConcurrencyController {\npublic:\n  virtual ~ConcurrencyController() = default;\n\n  /**\n   * Called during decoding when the adaptive concurrency filter is attempting\n   * to forward a request. Returns its decision on whether to forward a request.\n   */\n  virtual RequestForwardingAction forwardingDecision() PURE;\n\n  /**\n   * Called during encoding when the request latency is known. Records the\n   * request latency to update the internal state of the controller for\n   * concurrency limit calculations.\n   *\n   * @param rq_send_time the time point which the sampled request was sent\n   */\n  virtual void recordLatencySample(MonotonicTime rq_send_time) PURE;\n\n  /**\n   * Omit sampling an outstanding request and update the internal state of the controller to reflect\n   * request completion.\n   */\n  virtual void cancelLatencySample() PURE;\n\n  /**\n   * Returns the current concurrency limit.\n   */\n  virtual uint32_t concurrencyLimit() const PURE;\n};\n\n} // namespace Controller\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc",
    "content": "#include \"extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h\"\n\n#include <atomic>\n#include <chrono>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/adaptive_concurrency/controller/controller.h\"\n\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\nnamespace Controller {\n\nGradientControllerConfig::GradientControllerConfig(\n    const envoy::extensions::filters::http::adaptive_concurrency::v3::GradientControllerConfig&\n        proto_config,\n    Runtime::Loader& runtime)\n    : runtime_(runtime),\n      min_rtt_calc_interval_(std::chrono::milliseconds(\n          DurationUtil::durationToMilliseconds(proto_config.min_rtt_calc_params().interval()))),\n      sample_rtt_calc_interval_(std::chrono::milliseconds(DurationUtil::durationToMilliseconds(\n          proto_config.concurrency_limit_params().concurrency_update_interval()))),\n      jitter_pct_(\n          PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(proto_config.min_rtt_calc_params(), jitter, 15)),\n      max_concurrency_limit_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          proto_config.concurrency_limit_params(), max_concurrency_limit, 1000)),\n      min_rtt_aggregate_request_count_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config.min_rtt_calc_params(), request_count, 50)),\n      sample_aggregate_percentile_(\n          PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(proto_config, sample_aggregate_percentile, 50)),\n      min_concurrency_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config.min_rtt_calc_params(), min_concurrency, 3)),\n      min_rtt_buffer_pct_(\n          PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(proto_config.min_rtt_calc_params(), buffer, 25)) {}\nGradientController::GradientController(GradientControllerConfig config,\n                                       Event::Dispatcher& dispatcher, Runtime::Loader&,\n                                       const std::string& stats_prefix, Stats::Scope& scope,\n                                       Random::RandomGenerator& random, TimeSource& time_source)\n    : config_(std::move(config)), dispatcher_(dispatcher), scope_(scope),\n      stats_(generateStats(scope_, stats_prefix)), random_(random), time_source_(time_source),\n      deferred_limit_value_(0), num_rq_outstanding_(0),\n      concurrency_limit_(config_.minConcurrency()),\n      latency_sample_hist_(hist_fast_alloc(), hist_free) {\n  min_rtt_calc_timer_ = dispatcher_.createTimer([this]() -> void { enterMinRTTSamplingWindow(); });\n\n  sample_reset_timer_ = dispatcher_.createTimer([this]() -> void {\n    if (inMinRTTSamplingWindow()) {\n      // The minRTT sampling window started since the sample reset timer was enabled last. Since the\n      // minRTT value is being calculated, let's give up on this timer to avoid blocking the\n      // dispatcher thread and rely on it being enabled again as part of the minRTT calculation.\n      return;\n    }\n\n    {\n      absl::MutexLock ml(&sample_mutation_mtx_);\n      resetSampleWindow();\n    }\n\n    sample_reset_timer_->enableTimer(config_.sampleRTTCalcInterval());\n  });\n\n  enterMinRTTSamplingWindow();\n  sample_reset_timer_->enableTimer(config_.sampleRTTCalcInterval());\n  stats_.concurrency_limit_.set(concurrency_limit_.load());\n}\n\nGradientControllerStats GradientController::generateStats(Stats::Scope& scope,\n                                                          const std::string& stats_prefix) {\n  return {ALL_GRADIENT_CONTROLLER_STATS(POOL_COUNTER_PREFIX(scope, stats_prefix),\n                                        POOL_GAUGE_PREFIX(scope, stats_prefix))};\n}\n\nvoid GradientController::enterMinRTTSamplingWindow() {\n  // There a potential race condition where setting the minimum concurrency multiple times in a row\n  // resets the minRTT sampling timer and triggers the calculation immediately. This could occur\n  // after the minRTT sampling window has already been entered, so we can simply return here knowing\n  // the desired action is already being performed.\n  if (inMinRTTSamplingWindow()) {\n    return;\n  }\n\n  absl::MutexLock ml(&sample_mutation_mtx_);\n\n  stats_.min_rtt_calculation_active_.set(1);\n\n  // Set the minRTT flag to indicate we're gathering samples to update the value. This will\n  // prevent the sample window from resetting until enough requests are gathered to complete the\n  // recalculation.\n  deferred_limit_value_.store(GradientController::concurrencyLimit());\n  updateConcurrencyLimit(config_.minConcurrency());\n\n  // Throw away any latency samples from before the recalculation window as it may not represent\n  // the minRTT.\n  hist_clear(latency_sample_hist_.get());\n\n  min_rtt_epoch_ = time_source_.monotonicTime();\n}\n\nvoid GradientController::updateMinRTT() {\n  ASSERT(inMinRTTSamplingWindow());\n\n  {\n    absl::MutexLock ml(&sample_mutation_mtx_);\n    min_rtt_ = processLatencySamplesAndClear();\n    stats_.min_rtt_msecs_.set(\n        std::chrono::duration_cast<std::chrono::milliseconds>(min_rtt_).count());\n    updateConcurrencyLimit(deferred_limit_value_.load());\n    deferred_limit_value_.store(0);\n    stats_.min_rtt_calculation_active_.set(0);\n  }\n\n  min_rtt_calc_timer_->enableTimer(\n      applyJitter(config_.minRTTCalcInterval(), config_.jitterPercent()));\n  sample_reset_timer_->enableTimer(config_.sampleRTTCalcInterval());\n}\n\nstd::chrono::milliseconds GradientController::applyJitter(std::chrono::milliseconds interval,\n                                                          double jitter_pct) const {\n  if (jitter_pct == 0) {\n    return interval;\n  }\n\n  const uint32_t jitter_range_ms = interval.count() * jitter_pct;\n  return std::chrono::milliseconds(interval.count() + (random_.random() % jitter_range_ms));\n}\n\nvoid GradientController::resetSampleWindow() {\n  // The sampling window must not be reset while sampling for the new minRTT value.\n  ASSERT(!inMinRTTSamplingWindow());\n\n  if (hist_sample_count(latency_sample_hist_.get()) == 0) {\n    return;\n  }\n\n  sample_rtt_ = processLatencySamplesAndClear();\n  stats_.sample_rtt_msecs_.set(\n      std::chrono::duration_cast<std::chrono::milliseconds>(sample_rtt_).count());\n  updateConcurrencyLimit(calculateNewLimit());\n}\n\nstd::chrono::microseconds GradientController::processLatencySamplesAndClear() {\n  const std::array<double, 1> quantile{config_.sampleAggregatePercentile()};\n  std::array<double, 1> calculated_quantile;\n  hist_approx_quantile(latency_sample_hist_.get(), quantile.data(), 1, calculated_quantile.data());\n  hist_clear(latency_sample_hist_.get());\n  return std::chrono::microseconds(static_cast<int>(calculated_quantile[0]));\n}\n\nuint32_t GradientController::calculateNewLimit() {\n  ASSERT(sample_rtt_.count() > 0);\n\n  // Calculate the gradient value, ensuring it's clamped between 0.5 and 2.0.\n  // This prevents extreme changes in the concurrency limit between each sample\n  // window.\n  const auto buffered_min_rtt = min_rtt_.count() + min_rtt_.count() * config_.minRTTBufferPercent();\n  const double raw_gradient = static_cast<double>(buffered_min_rtt) / sample_rtt_.count();\n  const double gradient = std::max<double>(0.5, std::min<double>(2.0, raw_gradient));\n  stats_.gradient_.set(gradient);\n\n  const double limit = concurrencyLimit() * gradient;\n  const double burst_headroom = sqrt(limit);\n  stats_.burst_queue_size_.set(burst_headroom);\n\n  // The final concurrency value factors in the burst headroom and must be clamped to keep the value\n  // in the range [configured_min, configured_max].\n  const uint32_t new_limit = limit + burst_headroom;\n  return std::max<uint32_t>(config_.minConcurrency(),\n                            std::min<uint32_t>(config_.maxConcurrencyLimit(), new_limit));\n}\n\nRequestForwardingAction GradientController::forwardingDecision() {\n  // Note that a race condition exists here which would allow more outstanding requests than the\n  // concurrency limit bounded by the number of worker threads. After loading num_rq_outstanding_\n  // and before loading concurrency_limit_, another thread could potentially swoop in and modify\n  // num_rq_outstanding_, causing us to move forward with stale values and increment\n  // num_rq_outstanding_.\n  //\n  // TODO (tonya11en): Reconsider using a CAS loop here.\n  if (num_rq_outstanding_.load() < concurrencyLimit()) {\n    ++num_rq_outstanding_;\n    return RequestForwardingAction::Forward;\n  }\n  stats_.rq_blocked_.inc();\n  return RequestForwardingAction::Block;\n}\n\nvoid GradientController::recordLatencySample(MonotonicTime rq_send_time) {\n  ASSERT(num_rq_outstanding_.load() > 0);\n  --num_rq_outstanding_;\n\n  if (rq_send_time < min_rtt_epoch_) {\n    // Disregard samples from requests started in the previous minRTT window.\n    return;\n  }\n\n  const std::chrono::microseconds rq_latency =\n      std::chrono::duration_cast<std::chrono::microseconds>(time_source_.monotonicTime() -\n                                                            rq_send_time);\n  uint32_t sample_count;\n  {\n    absl::MutexLock ml(&sample_mutation_mtx_);\n    hist_insert(latency_sample_hist_.get(), rq_latency.count(), 1);\n    sample_count = hist_sample_count(latency_sample_hist_.get());\n  }\n\n  if (inMinRTTSamplingWindow() && sample_count >= config_.minRTTAggregateRequestCount()) {\n    // This sample has pushed the request count over the request count requirement for the minRTT\n    // recalculation. It must now be finished.\n    updateMinRTT();\n  }\n}\n\nvoid GradientController::cancelLatencySample() {\n  ASSERT(num_rq_outstanding_.load() > 0);\n  --num_rq_outstanding_;\n}\n\nvoid GradientController::updateConcurrencyLimit(const uint32_t new_limit) {\n  const auto old_limit = concurrency_limit_.load();\n  concurrency_limit_.store(new_limit);\n  stats_.concurrency_limit_.set(concurrency_limit_.load());\n\n  if (!inMinRTTSamplingWindow() && old_limit == config_.minConcurrency() &&\n      new_limit == config_.minConcurrency()) {\n    ++consecutive_min_concurrency_set_;\n  } else {\n    consecutive_min_concurrency_set_ = 0;\n  }\n\n  // If the concurrency limit is being set to the minimum value for the 5th consecutive sample\n  // window while not in the middle of a minRTT measurement, this might be indicative of an\n  // inaccurate minRTT measurement. Since the limit is already where it needs to be for a minRTT\n  // measurement, we should measure it again.\n  //\n  // There is a possibility that the minRTT measurement begins before we are able to\n  // cancel/re-enable the timer below and triggers overlapping minRTT windows. To protect against\n  // this, there is an explicit check when entering the minRTT measurement that ensures there is\n  // only a single minRTT measurement active at a time.\n  if (consecutive_min_concurrency_set_ >= 5) {\n    min_rtt_calc_timer_->enableTimer(std::chrono::milliseconds(0));\n  }\n}\n\n} // namespace Controller\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <vector>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"extensions/filters/http/adaptive_concurrency/controller/controller.h\"\n\n#include \"absl/base/thread_annotations.h\"\n#include \"absl/strings/numbers.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"circllhist.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\nnamespace Controller {\n\n/**\n * All stats for the gradient controller.\n */\n#define ALL_GRADIENT_CONTROLLER_STATS(COUNTER, GAUGE)                                              \\\n  COUNTER(rq_blocked)                                                                              \\\n  GAUGE(burst_queue_size, NeverImport)                                                             \\\n  GAUGE(concurrency_limit, NeverImport)                                                            \\\n  GAUGE(gradient, NeverImport)                                                                     \\\n  GAUGE(min_rtt_calculation_active, Accumulate)                                                    \\\n  GAUGE(min_rtt_msecs, NeverImport)                                                                \\\n  GAUGE(sample_rtt_msecs, NeverImport)\n\n/**\n * Wrapper struct for gradient controller stats. @see stats_macros.h\n */\nstruct GradientControllerStats {\n  ALL_GRADIENT_CONTROLLER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\nclass GradientControllerConfig : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  GradientControllerConfig(\n      const envoy::extensions::filters::http::adaptive_concurrency::v3::GradientControllerConfig&\n          proto_config,\n      Runtime::Loader& runtime);\n\n  std::chrono::milliseconds minRTTCalcInterval() const {\n    const auto ms = runtime_.snapshot().getInteger(RuntimeKeys::get().MinRTTCalcIntervalKey,\n                                                   min_rtt_calc_interval_.count());\n    return std::chrono::milliseconds(ms);\n  }\n\n  std::chrono::milliseconds sampleRTTCalcInterval() const {\n    const auto ms = runtime_.snapshot().getInteger(RuntimeKeys::get().SampleRTTCalcIntervalKey,\n                                                   sample_rtt_calc_interval_.count());\n    return std::chrono::milliseconds(ms);\n  }\n\n  uint32_t maxConcurrencyLimit() const {\n    return runtime_.snapshot().getInteger(RuntimeKeys::get().MaxConcurrencyLimitKey,\n                                          max_concurrency_limit_);\n  }\n\n  uint32_t minRTTAggregateRequestCount() const {\n    return runtime_.snapshot().getInteger(RuntimeKeys::get().MinRTTAggregateRequestCountKey,\n                                          min_rtt_aggregate_request_count_);\n  }\n\n  // The percentage is normalized to the range [0.0, 1.0].\n  double sampleAggregatePercentile() const {\n    const double val = runtime_.snapshot().getDouble(\n        RuntimeKeys::get().SampleAggregatePercentileKey, sample_aggregate_percentile_);\n    return std::max(0.0, std::min(val, 100.0)) / 100.0;\n  }\n\n  // The percentage is normalized to the range [0.0, 1.0].\n  double jitterPercent() const {\n    const double val =\n        runtime_.snapshot().getDouble(RuntimeKeys::get().JitterPercentKey, jitter_pct_);\n    return std::max(0.0, std::min(val, 100.0)) / 100.0;\n  }\n\n  uint32_t minConcurrency() const {\n    return runtime_.snapshot().getInteger(RuntimeKeys::get().MinConcurrencyKey, min_concurrency_);\n  }\n\n  // The percentage is normalized to the range [0.0, 1.0].\n  double minRTTBufferPercent() const {\n    const double val = runtime_.snapshot().getDouble(RuntimeKeys::get().MinRTTBufferPercentKey,\n                                                     min_rtt_buffer_pct_);\n    return std::max(0.0, std::min(val, 100.0)) / 100.0;\n  }\n\nprivate:\n  class RuntimeKeyValues {\n  public:\n    const std::string MinRTTCalcIntervalKey =\n        \"adaptive_concurrency.gradient_controller.min_rtt_calc_interval_ms\";\n    const std::string SampleRTTCalcIntervalKey =\n        \"adaptive_concurrency.gradient_controller.sample_rtt_calc_interval_ms\";\n    const std::string MaxConcurrencyLimitKey =\n        \"adaptive_concurrency.gradient_controller.max_concurrency_limit\";\n    const std::string MinRTTAggregateRequestCountKey =\n        \"adaptive_concurrency.gradient_controller.min_rtt_aggregate_request_count\";\n    const std::string SampleAggregatePercentileKey =\n        \"adaptive_concurrency.gradient_controller.sample_aggregate_percentile\";\n    const std::string JitterPercentKey = \"adaptive_concurrency.gradient_controller.jitter\";\n    const std::string MinConcurrencyKey =\n        \"adaptive_concurrency.gradient_controller.min_concurrency\";\n    const std::string MinRTTBufferPercentKey =\n        \"adaptive_concurrency.gradient_controller.min_rtt_buffer\";\n  };\n\n  using RuntimeKeys = ConstSingleton<RuntimeKeyValues>;\n\n  Runtime::Loader& runtime_;\n\n  // The measured request round-trip time under ideal conditions.\n  const std::chrono::milliseconds min_rtt_calc_interval_;\n\n  // The measured sample round-trip milliseconds from the previous time window.\n  const std::chrono::milliseconds sample_rtt_calc_interval_;\n\n  // Randomized time delta added to the start of the minRTT calculation window.\n  const double jitter_pct_;\n\n  // The maximum allowed concurrency value.\n  const uint32_t max_concurrency_limit_;\n\n  // The number of requests to aggregate/sample during the minRTT recalculation.\n  const uint32_t min_rtt_aggregate_request_count_;\n\n  // The percentile value considered when processing samples.\n  const double sample_aggregate_percentile_;\n\n  // The concurrency limit set while measuring the minRTT.\n  const uint32_t min_concurrency_;\n\n  // The amount added to the measured minRTT as a hedge against natural variability in latency.\n  const double min_rtt_buffer_pct_;\n};\nusing GradientControllerConfigSharedPtr = std::shared_ptr<GradientControllerConfig>;\n\n/**\n * A concurrency controller that implements a variation of the Gradient algorithm described in:\n *\n * https://medium.com/@NetflixTechBlog/performance-under-load-3e6fa9a60581\n *\n * This is used to control the allowed request concurrency limit in the adaptive concurrency control\n * filter.\n *\n * The algorithm:\n * ==============\n * An ideal round-trip time (minRTT) is measured periodically by only allowing a small number of\n * outstanding requests at a time and measuring the round-trip time to the upstream. This\n * information is then used in the calculation of a number called the gradient, using time-sampled\n * latencies (sampleRTT):\n *\n *     gradient = minRTT / sampleRTT\n *\n * This gradient value has a useful property, such that it decreases as the sampled latencies\n * increase. The value is then used to periodically update the concurrency limit via:\n *\n *     limit = old_limit * gradient\n *     new_limit = limit + headroom\n *\n * The headroom value allows for request bursts and is also the driving factor behind increasing the\n * concurrency limit when the sampleRTT is in the same ballpark as the minRTT. This value must be\n * present in the calculation, since it forces the concurrency limit to increase until there is a\n * deviation from the minRTT latency. In its absence, the concurrency limit could remain stagnant at\n * an unnecessarily small value if sampleRTT ~= minRTT. Therefore, the headroom value is\n * unconfigurable and is set to the square-root of the new limit.\n *\n * Sampling:\n * =========\n * The controller makes use of latency samples to either determine the minRTT or the sampleRTT which\n * is used to periodically update the concurrency limit. Each calculation occurs at separate\n * configurable frequencies and they may not occur at the same time. To prevent this, there exists a\n * concept of mutually exclusive sampling windows.\n *\n * When the gradient controller is instantiated, it starts inside of a minRTT calculation window\n * (indicated by inMinRTTSamplingWindow() returning true) and the concurrency limit is pinned to the\n * configured min_concurrency. This window lasts until the configured number of requests is\n * received, the minRTT value is updated, and the minRTT value is set by a single worker thread. To\n * prevent sampleRTT calculations from triggering during this window, the update window mutex is\n * held. Since it's necessary for a worker thread to know which update window update window mutex is\n * held for, they check the state of inMinRTTSamplingWindow() after each sample. When the minRTT\n * calculation is complete, a timer is set to trigger the next minRTT sampling window by the worker\n * thread who updates the minRTT value.\n *\n * If the controller is not in a minRTT sampling window, it's possible that the controller is in a\n * sampleRTT calculation window. In this, all of the latency samples are consolidated into a\n * configurable quantile value to represent the measured latencies. This quantile value sets\n * sampleRTT and the concurrency limit is updated as described in the algorithm section above.\n *\n * When not in a sampling window, the controller is simply servicing the adaptive concurrency filter\n * via the public functions.\n *\n * Locking:\n * ========\n * There are 2 mutually exclusive calculation windows, so the sample mutation mutex is held to\n * prevent the overlap of these windows. It is necessary for a worker thread to know specifically if\n * the controller is inside of a minRTT recalculation window during the recording of a latency\n * sample, so this extra bit of information is stored in inMinRTTSamplingWindow().\n */\nclass GradientController : public ConcurrencyController {\npublic:\n  GradientController(GradientControllerConfig config, Event::Dispatcher& dispatcher,\n                     Runtime::Loader& runtime, const std::string& stats_prefix, Stats::Scope& scope,\n                     Random::RandomGenerator& random, TimeSource& time_source);\n\n  // ConcurrencyController.\n  RequestForwardingAction forwardingDecision() override;\n  void recordLatencySample(MonotonicTime rq_send_time) override;\n  void cancelLatencySample() override;\n  uint32_t concurrencyLimit() const override { return concurrency_limit_.load(); }\n\nprivate:\n  static GradientControllerStats generateStats(Stats::Scope& scope,\n                                               const std::string& stats_prefix);\n  void updateMinRTT();\n  std::chrono::microseconds processLatencySamplesAndClear()\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(sample_mutation_mtx_);\n  uint32_t calculateNewLimit() ABSL_EXCLUSIVE_LOCKS_REQUIRED(sample_mutation_mtx_);\n  void enterMinRTTSamplingWindow();\n  bool inMinRTTSamplingWindow() const { return deferred_limit_value_.load() > 0; }\n  void resetSampleWindow() ABSL_EXCLUSIVE_LOCKS_REQUIRED(sample_mutation_mtx_);\n  void updateConcurrencyLimit(const uint32_t new_limit)\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(sample_mutation_mtx_);\n  std::chrono::milliseconds applyJitter(std::chrono::milliseconds interval,\n                                        double jitter_pct) const;\n\n  const GradientControllerConfig config_;\n  Event::Dispatcher& dispatcher_;\n  Stats::Scope& scope_;\n  GradientControllerStats stats_;\n  Random::RandomGenerator& random_;\n  TimeSource& time_source_;\n\n  // Protects data related to latency sampling and RTT values. In addition to protecting the latency\n  // sample histogram, the mutex ensures that the minRTT calculation window and the sample window\n  // (where the new concurrency limit is determined) do not overlap.\n  absl::Mutex sample_mutation_mtx_;\n\n  // Stores the value of the concurrency limit prior to entering the minRTT update window. If this\n  // is non-zero, then we are actively in the minRTT sampling window.\n  std::atomic<uint32_t> deferred_limit_value_;\n\n  // Stores the expected upstream latency value under ideal conditions with the added buffer to\n  // account for variable latencies. This is the numerator in the gradient value.\n  std::chrono::nanoseconds min_rtt_;\n\n  // Stores the aggregated sampled latencies for use in the gradient calculation.\n  std::chrono::nanoseconds sample_rtt_ ABSL_GUARDED_BY(sample_mutation_mtx_);\n\n  // Tracks the count of requests that have been forwarded whose replies have\n  // not been sampled yet. Atomicity is required because this variable is used to make the\n  // forwarding decision without locking.\n  std::atomic<uint32_t> num_rq_outstanding_;\n\n  // Stores the current concurrency limit. Atomicity is required because this variable is used to\n  // make the forwarding decision without locking.\n  std::atomic<uint32_t> concurrency_limit_;\n\n  // Stores all sampled latencies and provides percentile estimations when using the sampled data to\n  // calculate a new concurrency limit.\n  std::unique_ptr<histogram_t, decltype(&hist_free)>\n      latency_sample_hist_ ABSL_GUARDED_BY(sample_mutation_mtx_);\n\n  // Tracks the number of consecutive times that the concurrency limit is set to the minimum. This\n  // is used to determine whether the controller should trigger an additional minRTT measurement\n  // after remaining at the minimum limit for too long.\n  uint32_t consecutive_min_concurrency_set_ ABSL_GUARDED_BY(sample_mutation_mtx_);\n\n  // We will disregard sampling any requests admitted before this timestamp to prevent sampling\n  // requests admitted before the start of a minRTT window and potentially skewing the minRTT.\n  MonotonicTime min_rtt_epoch_;\n\n  Event::TimerPtr min_rtt_calc_timer_;\n  Event::TimerPtr sample_reset_timer_;\n};\nusing GradientControllerSharedPtr = std::shared_ptr<GradientController>;\n\n} // namespace Controller\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that probabilistically rejects requests based on upstream success-rate.\n# Public docs: docs/root/configuration/http_filters/admission_control.rst\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"admission_control_filter_lib\",\n    srcs = [\n        \"admission_control.cc\",\n        \"thread_local_controller.cc\",\n    ],\n    hdrs = [\n        \"admission_control.h\",\n        \"thread_local_controller.h\",\n    ],\n    security_posture = \"unknown\",\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/admission_control:admission_control_filter_lib\",\n        \"//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/admission_control.cc",
    "content": "#include \"extensions/filters/http/admission_control/admission_control.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\nusing GrpcStatus = Grpc::Status::GrpcStatus;\n\nstatic constexpr double defaultAggression = 1.0;\nstatic constexpr double defaultSuccessRateThreshold = 95.0;\n\nAdmissionControlFilterConfig::AdmissionControlFilterConfig(\n    const AdmissionControlProto& proto_config, Runtime::Loader& runtime,\n    Random::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls,\n    std::shared_ptr<ResponseEvaluator> response_evaluator)\n    : random_(random), scope_(scope), tls_(std::move(tls)),\n      admission_control_feature_(proto_config.enabled(), runtime),\n      aggression_(proto_config.has_aggression()\n                      ? std::make_unique<Runtime::Double>(proto_config.aggression(), runtime)\n                      : nullptr),\n      sr_threshold_(proto_config.has_sr_threshold() ? std::make_unique<Runtime::Percentage>(\n                                                          proto_config.sr_threshold(), runtime)\n                                                    : nullptr),\n      response_evaluator_(std::move(response_evaluator)) {}\n\ndouble AdmissionControlFilterConfig::aggression() const {\n  return std::max<double>(1.0, aggression_ ? aggression_->value() : defaultAggression);\n}\n\ndouble AdmissionControlFilterConfig::successRateThreshold() const {\n  const double pct = sr_threshold_ ? sr_threshold_->value() : defaultSuccessRateThreshold;\n  return std::min<double>(pct, 100.0) / 100.0;\n}\n\nAdmissionControlFilter::AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config,\n                                               const std::string& stats_prefix)\n    : config_(std::move(config)), stats_(generateStats(config_->scope(), stats_prefix)),\n      record_request_(true) {}\n\nHttp::FilterHeadersStatus AdmissionControlFilter::decodeHeaders(Http::RequestHeaderMap&, bool) {\n  if (!config_->filterEnabled() || decoder_callbacks_->streamInfo().healthCheck()) {\n    // We must forego recording the success/failure of this request during encoding.\n    record_request_ = false;\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (shouldRejectRequest()) {\n    // We do not want to sample requests that we are rejecting, since this taints the measurements\n    // that should be describing the upstreams. In addition, if we were to record the requests\n    // rejected, the rejection probabilities would not converge back to 0 even if the upstream\n    // success rate returns to 100%.\n    record_request_ = false;\n\n    stats_.rq_rejected_.inc();\n    decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, \"\", nullptr, absl::nullopt,\n                                       \"denied by admission control\");\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus AdmissionControlFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                                bool end_stream) {\n  // TODO(tonya11en): It's not possible for an HTTP filter to understand why a stream is reset, so\n  // we are not currently accounting for resets when recording requests.\n\n  if (!record_request_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  bool successful_response = false;\n  if (Grpc::Common::isGrpcResponseHeaders(headers, end_stream)) {\n    absl::optional<GrpcStatus> grpc_status = Grpc::Common::getGrpcStatus(headers);\n\n    // If the GRPC status isn't found in the headers, it must be found in the trailers.\n    expect_grpc_status_in_trailer_ = !grpc_status.has_value();\n    if (expect_grpc_status_in_trailer_) {\n      return Http::FilterHeadersStatus::Continue;\n    }\n\n    const uint32_t status = enumToInt(grpc_status.value());\n    successful_response = config_->responseEvaluator().isGrpcSuccess(status);\n  } else {\n    // HTTP response.\n    const uint64_t http_status = Http::Utility::getResponseStatus(headers);\n    successful_response = config_->responseEvaluator().isHttpSuccess(http_status);\n  }\n\n  if (successful_response) {\n    recordSuccess();\n  } else {\n    recordFailure();\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterTrailersStatus\nAdmissionControlFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  if (expect_grpc_status_in_trailer_) {\n    absl::optional<GrpcStatus> grpc_status = Grpc::Common::getGrpcStatus(trailers, false);\n\n    if (grpc_status.has_value() &&\n        config_->responseEvaluator().isGrpcSuccess(grpc_status.value())) {\n      recordSuccess();\n    } else {\n      recordFailure();\n    }\n  }\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nbool AdmissionControlFilter::shouldRejectRequest() const {\n  // This formula is documented in the admission control filter documentation:\n  // https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/admission_control_filter.html\n  const auto request_counts = config_->getController().requestCounts();\n  const double total_requests = request_counts.requests;\n  const double successful_requests = request_counts.successes;\n  double probability = total_requests - successful_requests / config_->successRateThreshold();\n  probability = probability / (total_requests + 1);\n  const auto aggression = config_->aggression();\n  if (aggression != 1.0) {\n    probability = std::pow(probability, 1.0 / aggression);\n  }\n\n  // Choosing an accuracy of 4 significant figures for the probability.\n  static constexpr uint64_t accuracy = 1e4;\n  auto r = config_->random().random();\n  return (accuracy * std::max(probability, 0.0)) > (r % accuracy);\n}\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/admission_control.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/common/logger.h\"\n#include \"common/grpc/common.h\"\n#include \"common/grpc/status.h\"\n#include \"common/http/codes.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"extensions/filters/http/admission_control/evaluators/response_evaluator.h\"\n#include \"extensions/filters/http/admission_control/thread_local_controller.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\n/**\n * All stats for the admission control filter.\n */\n#define ALL_ADMISSION_CONTROL_STATS(COUNTER)                                                       \\\n  COUNTER(rq_rejected)                                                                             \\\n  COUNTER(rq_success)                                                                              \\\n  COUNTER(rq_failure)\n\n/**\n * Wrapper struct for admission control filter stats. @see stats_macros.h\n */\nstruct AdmissionControlStats {\n  ALL_ADMISSION_CONTROL_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nusing AdmissionControlProto =\n    envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl;\n\n/**\n * Configuration for the admission control filter.\n */\nclass AdmissionControlFilterConfig {\npublic:\n  AdmissionControlFilterConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime,\n                               Random::RandomGenerator& random, Stats::Scope& scope,\n                               ThreadLocal::SlotPtr&& tls,\n                               std::shared_ptr<ResponseEvaluator> response_evaluator);\n  virtual ~AdmissionControlFilterConfig() = default;\n\n  virtual ThreadLocalController& getController() const {\n    return tls_->getTyped<ThreadLocalControllerImpl>();\n  }\n\n  Random::RandomGenerator& random() const { return random_; }\n  bool filterEnabled() const { return admission_control_feature_.enabled(); }\n  Stats::Scope& scope() const { return scope_; }\n  double aggression() const;\n  double successRateThreshold() const;\n  ResponseEvaluator& responseEvaluator() const { return *response_evaluator_; }\n\nprivate:\n  Random::RandomGenerator& random_;\n  Stats::Scope& scope_;\n  const ThreadLocal::SlotPtr tls_;\n  Runtime::FeatureFlag admission_control_feature_;\n  std::unique_ptr<Runtime::Double> aggression_;\n  std::unique_ptr<Runtime::Percentage> sr_threshold_;\n  std::shared_ptr<ResponseEvaluator> response_evaluator_;\n};\n\nusing AdmissionControlFilterConfigSharedPtr = std::shared_ptr<const AdmissionControlFilterConfig>;\n\n/**\n * A filter that probabilistically rejects requests based on upstream success-rate.\n */\nclass AdmissionControlFilter : public Http::PassThroughFilter,\n                               protected Logger::Loggable<Logger::Id::filter> {\npublic:\n  AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config,\n                         const std::string& stats_prefix);\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n\nprivate:\n  static AdmissionControlStats generateStats(Stats::Scope& scope, const std::string& prefix) {\n    return {ALL_ADMISSION_CONTROL_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n  }\n\n  bool shouldRejectRequest() const;\n\n  void recordSuccess() {\n    stats_.rq_success_.inc();\n    config_->getController().recordSuccess();\n  }\n\n  void recordFailure() {\n    stats_.rq_failure_.inc();\n    config_->getController().recordFailure();\n  }\n\n  const AdmissionControlFilterConfigSharedPtr config_;\n  AdmissionControlStats stats_;\n  bool expect_grpc_status_in_trailer_{false};\n\n  // If false, the filter will forego recording a request success or failure during encoding.\n  bool record_request_;\n};\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/config.cc",
    "content": "#include \"extensions/filters/http/admission_control/config.h\"\n\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/enum_to_int.h\"\n\n#include \"extensions/filters/http/admission_control/admission_control.h\"\n#include \"extensions/filters/http/admission_control/evaluators/response_evaluator.h\"\n#include \"extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\nstatic constexpr std::chrono::seconds defaultSamplingWindow{30};\n\nHttp::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n\n  const std::string prefix = stats_prefix + \"admission_control.\";\n\n  // Create the thread-local controller.\n  auto tls = context.threadLocal().allocateSlot();\n  auto sampling_window = std::chrono::seconds(\n      PROTOBUF_GET_MS_OR_DEFAULT(config, sampling_window, 1000 * defaultSamplingWindow.count()) /\n      1000);\n  tls->set(\n      [sampling_window, &context](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n        return std::make_shared<ThreadLocalControllerImpl>(context.timeSource(), sampling_window);\n      });\n\n  std::unique_ptr<ResponseEvaluator> response_evaluator;\n  switch (config.evaluation_criteria_case()) {\n  case AdmissionControlProto::EvaluationCriteriaCase::kSuccessCriteria:\n    response_evaluator = std::make_unique<SuccessCriteriaEvaluator>(config.success_criteria());\n    break;\n  case AdmissionControlProto::EvaluationCriteriaCase::EVALUATION_CRITERIA_NOT_SET:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  AdmissionControlFilterConfigSharedPtr filter_config =\n      std::make_shared<AdmissionControlFilterConfig>(\n          config, context.runtime(), context.api().randomGenerator(), context.scope(),\n          std::move(tls), std::move(response_evaluator));\n\n  return [filter_config, prefix](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<AdmissionControlFilter>(filter_config, prefix));\n  };\n}\n\n/**\n * Static registration for the admission_control filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(AdmissionControlFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\n/**\n * Config registration for the adaptive concurrency limit filter. @see NamedHttpFilterConfigFactory.\n */\nclass AdmissionControlFilterFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl> {\npublic:\n  AdmissionControlFilterFactory() : FactoryBase(HttpFilterNames::get().AdmissionControl) {}\n\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl&\n          proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/evaluators/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that probabilistically rejects requests based on upstream success-rate.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"response_evaluator_lib\",\n    srcs = [\"success_criteria_evaluator.cc\"],\n    hdrs = [\n        \"response_evaluator.h\",\n        \"success_criteria_evaluator.h\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/grpc:status\",\n        \"//source/common/common:enum_to_int\",\n        \"@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/evaluators/response_evaluator.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\n/**\n * Determines of a request was successful based on response headers.\n */\nclass ResponseEvaluator {\npublic:\n  virtual ~ResponseEvaluator() = default;\n\n  /**\n   * Returns true if the provided HTTP code constitutes a success.\n   */\n  virtual bool isHttpSuccess(uint64_t code) const PURE;\n\n  /**\n   * Returns true if the provided gRPC status counts constitutes a success.\n   */\n  virtual bool isGrpcSuccess(uint32_t status) const PURE;\n};\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc",
    "content": "#include \"extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h\"\n\n#include <algorithm>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/grpc/status.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\nSuccessCriteriaEvaluator::SuccessCriteriaEvaluator(const SuccessCriteria& success_criteria) {\n  // HTTP status.\n  if (success_criteria.has_http_criteria()) {\n    for (const auto& range : success_criteria.http_criteria().http_success_status()) {\n      if (!validHttpRange(range.start(), range.end())) {\n        throw EnvoyException(\n            fmt::format(\"invalid HTTP range: [{}, {})\", range.start(), range.end()));\n      }\n\n      const auto start = static_cast<uint64_t>(range.start());\n      const auto end = static_cast<uint64_t>(range.end());\n      http_success_fns_.emplace_back(\n          [start, end](uint64_t status) { return (start <= status) && (status < end); });\n    }\n  } else {\n    // We default to all non-5xx codes as successes.\n    http_success_fns_.emplace_back([](uint64_t status) { return status < 500; });\n  }\n\n  // GRPC status.\n  if (success_criteria.has_grpc_criteria()) {\n    for (const auto& status : success_criteria.grpc_criteria().grpc_success_status()) {\n      if (status > 16) {\n        throw EnvoyException(fmt::format(\"invalid gRPC code {}\", status));\n      }\n\n      grpc_success_codes_.emplace_back(status);\n    }\n  } else {\n    grpc_success_codes_ = {\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::AlreadyExists),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::Canceled),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::FailedPrecondition),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::InvalidArgument),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::NotFound),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::Ok),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::OutOfRange),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::PermissionDenied),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::Unauthenticated),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::Unimplemented),\n        enumToInt(Grpc::Status::WellKnownGrpcStatus::Unknown),\n    };\n  }\n}\n\nbool SuccessCriteriaEvaluator::isGrpcSuccess(uint32_t status) const {\n  return std::count(grpc_success_codes_.begin(), grpc_success_codes_.end(), status) > 0;\n}\n\nbool SuccessCriteriaEvaluator::isHttpSuccess(uint64_t code) const {\n  return std::any_of(http_success_fns_.begin(), http_success_fns_.end(),\n                     [code](auto fn) { return fn(code); });\n}\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h\"\n\n#include \"extensions/filters/http/admission_control/evaluators/response_evaluator.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\nclass SuccessCriteriaEvaluator : public ResponseEvaluator {\npublic:\n  using SuccessCriteria = envoy::extensions::filters::http::admission_control::v3alpha::\n      AdmissionControl::SuccessCriteria;\n  SuccessCriteriaEvaluator(const SuccessCriteria& evaluation_criteria);\n  // ResponseEvaluator\n  bool isHttpSuccess(uint64_t code) const override;\n  bool isGrpcSuccess(uint32_t status) const override;\n\nprivate:\n  bool validHttpRange(const int32_t start, const int32_t end) const {\n    return start <= end && start < 600 && start >= 100 && end <= 600 && end >= 100;\n  }\n\n  std::vector<std::function<bool(uint64_t)>> http_success_fns_;\n  std::vector<uint32_t> grpc_success_codes_;\n};\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/thread_local_controller.cc",
    "content": "#include \"extensions/filters/http/admission_control/thread_local_controller.h\"\n\n#include <cstdint>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/http/codes.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\nstatic constexpr std::chrono::seconds defaultHistoryGranularity{1};\n\nThreadLocalControllerImpl::ThreadLocalControllerImpl(TimeSource& time_source,\n                                                     std::chrono::seconds sampling_window)\n    : time_source_(time_source), sampling_window_(sampling_window) {}\n\nvoid ThreadLocalControllerImpl::maybeUpdateHistoricalData() {\n  // Purge stale samples.\n  while (!historical_data_.empty() && ageOfOldestSample() >= sampling_window_) {\n    removeOldestSample();\n  }\n\n  // It's possible we purged stale samples from the history and are left with nothing, so it's\n  // necessary to add an empty entry. We will also need to roll over into a new entry in the\n  // historical data if we've exceeded the time specified by the granularity.\n  if (historical_data_.empty() || ageOfNewestSample() >= defaultHistoryGranularity) {\n    historical_data_.emplace_back(time_source_.monotonicTime(), RequestData());\n  }\n}\n\nvoid ThreadLocalControllerImpl::recordRequest(bool success) {\n  maybeUpdateHistoricalData();\n\n  // The back of the deque will be the most recent samples.\n  ++historical_data_.back().second.requests;\n  ++global_data_.requests;\n  if (success) {\n    ++historical_data_.back().second.successes;\n    ++global_data_.successes;\n  }\n}\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/admission_control/thread_local_controller.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\n\n/*\n * Thread-local admission controller interface.\n */\nclass ThreadLocalController {\npublic:\n  struct RequestData {\n    RequestData(uint32_t request_count, uint32_t success_count)\n        : requests(request_count), successes(success_count) {}\n    RequestData() = default;\n\n    inline bool operator==(const RequestData& rhs) const {\n      return (requests == rhs.requests) && (successes == rhs.successes);\n    }\n\n    uint32_t requests{0};\n    uint32_t successes{0};\n  };\n\n  virtual ~ThreadLocalController() = default;\n\n  // Record success/failure of a request and update the internal state of the controller to reflect\n  // this.\n  virtual void recordSuccess() PURE;\n  virtual void recordFailure() PURE;\n\n  // Returns the current number of requests and how many of them are successful.\n  virtual RequestData requestCounts() PURE;\n};\n\n/**\n * Thread-local object to track request counts and successes over a rolling time window. Request\n * data for the time window is kept recent via a circular buffer that phases out old request/success\n * counts when recording new samples.\n *\n * This controller is thread-local so that we do not need to take any locks on the sample histories\n * to update them, at the cost of decreasing the number of samples.\n *\n * The look-back window for request samples is accurate up to a hard-coded 1-second granularity.\n * TODO (tonya11en): Allow the granularity to be configurable.\n */\nclass ThreadLocalControllerImpl : public ThreadLocalController,\n                                  public ThreadLocal::ThreadLocalObject {\npublic:\n  ThreadLocalControllerImpl(TimeSource& time_source, std::chrono::seconds sampling_window);\n  ~ThreadLocalControllerImpl() override = default;\n  void recordSuccess() override { recordRequest(true); }\n  void recordFailure() override { recordRequest(false); }\n\n  RequestData requestCounts() override {\n    maybeUpdateHistoricalData();\n    return global_data_;\n  }\n\nprivate:\n  void recordRequest(bool success);\n\n  // Potentially remove any stale samples and record sample aggregates to the historical data.\n  void maybeUpdateHistoricalData();\n\n  // Returns the age of the oldest sample in the historical data.\n  std::chrono::microseconds ageOfOldestSample() const {\n    ASSERT(!historical_data_.empty());\n    using namespace std::chrono;\n    return duration_cast<microseconds>(time_source_.monotonicTime() -\n                                       historical_data_.front().first);\n  }\n\n  // Returns the age of the newest sample in the historical data.\n  std::chrono::microseconds ageOfNewestSample() const {\n    ASSERT(!historical_data_.empty());\n    using namespace std::chrono;\n    return duration_cast<microseconds>(time_source_.monotonicTime() -\n                                       historical_data_.back().first);\n  }\n\n  // Removes the oldest sample in the historical data and reconciles the global data.\n  void removeOldestSample() {\n    ASSERT(!historical_data_.empty());\n    global_data_.successes -= historical_data_.front().second.successes;\n    global_data_.requests -= historical_data_.front().second.requests;\n    historical_data_.pop_front();\n  }\n\n  TimeSource& time_source_;\n\n  // Stores samples from oldest (front) to newest (back). Since there is no need to read/modify\n  // entries that are not the oldest or newest (front/back), we can get away with using a deque\n  // which allocates memory in chunks and keeps most elements contiguous and cache-friendly.\n  std::deque<std::pair<MonotonicTime, RequestData>> historical_data_;\n\n  // Request data aggregated for the whole look-back window.\n  RequestData global_data_;\n\n  // The rolling time window size.\n  const std::chrono::seconds sampling_window_;\n};\n\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_lambda/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP AWS Lambda filter\n# Public docs: docs/root/configuration/http_filters/aws_lambda_filter.rst\n\nenvoy_extension_package()\n\nenvoy_proto_library(\n    name = \"request_response\",\n    srcs = [\"request_response.proto\"],\n)\n\nenvoy_cc_library(\n    name = \"aws_lambda_filter_lib\",\n    srcs = [\"aws_lambda_filter.cc\"],\n    hdrs = [\"aws_lambda_filter.h\"],\n    deps = [\n        \":request_response_cc_proto\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/common:base64_lib\",\n        \"//source/extensions/common/aws:credentials_provider_impl_lib\",\n        \"//source/extensions/common/aws:signer_impl_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":aws_lambda_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/aws_lambda/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc",
    "content": "#include \"extensions/filters/http/aws_lambda/aws_lambda_filter.h\"\n\n#include <string>\n#include <vector>\n\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hex.h\"\n#include \"common/crypto/utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"source/extensions/filters/http/aws_lambda/request_response.pb.validate.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsLambdaFilter {\n\nclass LambdaFilterNameValues {\npublic:\n  Http::LowerCaseString InvocationTypeHeader{std::string{\"x-amz-invocation-type\"}};\n  Http::LowerCaseString FunctionErrorHeader{std::string{\"x-amz-function-error\"}};\n};\n\nusing LambdaFilterNames = ConstSingleton<LambdaFilterNameValues>;\n\nnamespace {\n\nconstexpr auto filter_metadata_key = \"com.amazonaws.lambda\";\nconstexpr auto egress_gateway_metadata_key = \"egress_gateway\";\n\nvoid setLambdaHeaders(Http::RequestHeaderMap& headers, absl::string_view function_name,\n                      InvocationMode mode) {\n  headers.setMethod(Http::Headers::get().MethodValues.Post);\n  headers.setPath(fmt::format(\"/2015-03-31/functions/{}/invocations\", function_name));\n  if (mode == InvocationMode::Synchronous) {\n    headers.setReference(LambdaFilterNames::get().InvocationTypeHeader, \"RequestResponse\");\n  } else {\n    headers.setReference(LambdaFilterNames::get().InvocationTypeHeader, \"Event\");\n  }\n}\n\n/**\n * Determines if the target cluster has the AWS Lambda metadata on it.\n */\nbool isTargetClusterLambdaGateway(Upstream::ClusterInfo const& cluster_info) {\n  using ProtobufWkt::Value;\n  const auto& filter_metadata_map = cluster_info.metadata().filter_metadata();\n  auto metadata_it = filter_metadata_map.find(filter_metadata_key);\n  if (metadata_it == filter_metadata_map.end()) {\n    return false;\n  }\n\n  auto egress_gateway_it = metadata_it->second.fields().find(egress_gateway_metadata_key);\n  if (egress_gateway_it == metadata_it->second.fields().end()) {\n    return false;\n  }\n\n  if (egress_gateway_it->second.kind_case() != Value::KindCase::kBoolValue) {\n    return false;\n  }\n\n  return egress_gateway_it->second.bool_value();\n}\n\nbool isContentTypeTextual(const Http::RequestOrResponseHeaderMap& headers) {\n  // If transfer-encoding is anything other than 'identity' (i.e. chunked, compress, deflate or\n  // gzip) then we want to base64-encode the response body regardless of the content-type value.\n  if (auto encoding_header = headers.TransferEncoding()) {\n    if (!absl::EqualsIgnoreCase(encoding_header->value().getStringView(),\n                                Http::Headers::get().TransferEncodingValues.Identity)) {\n      return false;\n    }\n  }\n\n  // If we don't know the content-type, then we can't make any assumptions.\n  if (!headers.ContentType()) {\n    return false;\n  }\n\n  const Http::LowerCaseString content_type_value{std::string(headers.getContentTypeValue())};\n  if (content_type_value.get() == Http::Headers::get().ContentTypeValues.Json) {\n    return true;\n  }\n\n  if (content_type_value.get() == \"application/javascript\") {\n    return true;\n  }\n\n  if (content_type_value.get() == \"application/xml\") {\n    return true;\n  }\n\n  if (absl::StartsWith(content_type_value.get(), \"text/\")) {\n    return true;\n  }\n\n  return false;\n}\n\n} // namespace\n\nFilter::Filter(const FilterSettings& settings, const FilterStats& stats,\n               const std::shared_ptr<Extensions::Common::Aws::Signer>& sigv4_signer)\n    : settings_(settings), stats_(stats), sigv4_signer_(sigv4_signer) {}\n\nabsl::optional<FilterSettings> Filter::getRouteSpecificSettings() const {\n  if (!decoder_callbacks_->route() || !decoder_callbacks_->route()->routeEntry()) {\n    return absl::nullopt;\n  }\n  const auto* route_entry = decoder_callbacks_->route()->routeEntry();\n  const auto* settings = route_entry->mostSpecificPerFilterConfigTyped<FilterSettings>(\n      HttpFilterNames::get().AwsLambda);\n  if (!settings) {\n    return absl::nullopt;\n  }\n\n  return *settings;\n}\n\nvoid Filter::resolveSettings() {\n  if (auto route_settings = getRouteSpecificSettings()) {\n    payload_passthrough_ = route_settings->payloadPassthrough();\n    invocation_mode_ = route_settings->invocationMode();\n    arn_ = std::move(route_settings)->arn();\n  } else {\n    payload_passthrough_ = settings_.payloadPassthrough();\n    invocation_mode_ = settings_.invocationMode();\n  }\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) {\n  auto cluster_info_ptr = decoder_callbacks_->clusterInfo();\n  if (!cluster_info_ptr || !isTargetClusterLambdaGateway(*cluster_info_ptr)) {\n    skip_ = true;\n    ENVOY_LOG(trace, \"Target cluster does not have the Lambda metadata. Moving on.\");\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  resolveSettings();\n\n  if (!arn_) {\n    arn_ = settings_.arn();\n  }\n\n  if (!end_stream) {\n    request_headers_ = &headers;\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  if (payload_passthrough_) {\n    setLambdaHeaders(headers, arn_->functionName(), invocation_mode_);\n    sigv4_signer_->sign(headers);\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Buffer::OwnedImpl json_buf;\n  jsonizeRequest(headers, nullptr, json_buf);\n  // We must call setLambdaHeaders *after* the JSON transformation of the request. That way we\n  // reflect the actual incoming request headers instead of the overwritten ones.\n  setLambdaHeaders(headers, arn_->functionName(), invocation_mode_);\n  headers.setContentLength(json_buf.length());\n  headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  auto& hashing_util = Envoy::Common::Crypto::UtilitySingleton::get();\n  const auto hash = Hex::encode(hashing_util.getSha256Digest(json_buf));\n  sigv4_signer_->sign(headers, hash);\n  decoder_callbacks_->addDecodedData(json_buf, false);\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) {\n  if (skip_ || end_stream) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // Check for errors returned by Lambda.\n  // If we detect an error, we skip the encodeData step to hand the error back to the user as is.\n  // Errors can be in the form of HTTP status code or x-amz-function-error header\n  const auto http_status = Http::Utility::getResponseStatus(headers);\n  if (http_status >= 300) {\n    skip_ = true;\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // Just the existence of this header means we have an error, so skip.\n  if (headers.get(LambdaFilterNames::get().FunctionErrorHeader)) {\n    skip_ = true;\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  response_headers_ = &headers;\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nHttp::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) {\n  if (skip_) {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  if (!end_stream) {\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n\n  auto& hashing_util = Envoy::Common::Crypto::UtilitySingleton::get();\n  decoder_callbacks_->addDecodedData(data, false);\n\n  const Buffer::Instance& decoding_buffer = *decoder_callbacks_->decodingBuffer();\n\n  if (!payload_passthrough_) {\n    decoder_callbacks_->modifyDecodingBuffer([this](Buffer::Instance& dec_buf) {\n      Buffer::OwnedImpl json_buf;\n      jsonizeRequest(*request_headers_, &dec_buf, json_buf);\n      // effectively swap(data, json_buf)\n      dec_buf.drain(dec_buf.length());\n      dec_buf.move(json_buf);\n    });\n    request_headers_->setContentLength(decoding_buffer.length());\n    request_headers_->setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  }\n\n  setLambdaHeaders(*request_headers_, arn_->functionName(), invocation_mode_);\n  const auto hash = Hex::encode(hashing_util.getSha256Digest(decoding_buffer));\n  sigv4_signer_->sign(*request_headers_, hash);\n  stats().upstream_rq_payload_size_.recordValue(decoding_buffer.length());\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (skip_ || payload_passthrough_ || invocation_mode_ == InvocationMode::Asynchronous) {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  if (!end_stream) {\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n\n  ENVOY_LOG(trace, \"Tranforming JSON payload to HTTP response.\");\n  encoder_callbacks_->addEncodedData(data, false);\n  const Buffer::Instance& encoding_buffer = *encoder_callbacks_->encodingBuffer();\n  encoder_callbacks_->modifyEncodingBuffer([this](Buffer::Instance& enc_buf) {\n    Buffer::OwnedImpl body;\n    dejsonizeResponse(*response_headers_, enc_buf, body);\n    enc_buf.drain(enc_buf.length());\n    enc_buf.move(body);\n  });\n  response_headers_->setContentLength(encoding_buffer.length());\n  return Http::FilterDataStatus::Continue;\n}\n\nvoid Filter::jsonizeRequest(Http::RequestHeaderMap const& headers, const Buffer::Instance* body,\n                            Buffer::Instance& out) const {\n  using source::extensions::filters::http::aws_lambda::Request;\n  Request json_req;\n  if (headers.Path()) {\n    json_req.set_raw_path(std::string(headers.getPathValue()));\n  }\n\n  if (headers.Method()) {\n    json_req.set_method(std::string(headers.getMethodValue()));\n  }\n\n  // Wrap the headers\n  headers.iterate([&json_req](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n    // ignore H2 pseudo-headers\n    if (absl::StartsWith(entry.key().getStringView(), \":\")) {\n      return Http::HeaderMap::Iterate::Continue;\n    }\n    std::string name = std::string(entry.key().getStringView());\n    auto it = json_req.mutable_headers()->find(name);\n    if (it == json_req.headers().end()) {\n      json_req.mutable_headers()->insert({name, std::string(entry.value().getStringView())});\n    } else {\n      // Coalesce headers with multiple values\n      it->second += fmt::format(\",{}\", entry.value().getStringView());\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  // Wrap the Query String\n  if (headers.Path()) {\n    for (auto&& kv_pair : Http::Utility::parseQueryString(headers.getPathValue())) {\n      json_req.mutable_query_string_parameters()->insert({kv_pair.first, kv_pair.second});\n    }\n  }\n\n  // Wrap the body\n  if (body) {\n    if (isContentTypeTextual(headers)) {\n      json_req.set_body(body->toString());\n      json_req.set_is_base64_encoded(false);\n    } else {\n      json_req.set_body(Base64::encode(*body, body->length()));\n      json_req.set_is_base64_encoded(true);\n    }\n  }\n\n  MessageUtil::validate(json_req, ProtobufMessage::getStrictValidationVisitor());\n  const std::string json_data = MessageUtil::getJsonStringFromMessage(\n      json_req, false /* pretty_print  */, true /* always_print_primitive_fields */);\n  out.add(json_data);\n}\n\nvoid Filter::dejsonizeResponse(Http::ResponseHeaderMap& headers, const Buffer::Instance& json_buf,\n                               Buffer::Instance& body) {\n  using source::extensions::filters::http::aws_lambda::Response;\n  Response json_resp;\n  try {\n    MessageUtil::loadFromJson(json_buf.toString(), json_resp,\n                              ProtobufMessage::getNullValidationVisitor());\n  } catch (EnvoyException& ex) {\n    // We would only get here if all of the following are true:\n    // 1- Passthrough is set to false\n    // 2- Lambda returned a 200 OK\n    // 3- There was no x-amz-function-error header\n    // 4- The body contains invalid JSON\n    headers.setStatus(static_cast<int>(Http::Code::InternalServerError));\n    stats().server_error_.inc();\n    return;\n  }\n\n  for (auto&& kv : json_resp.headers()) {\n    // ignore H2 pseudo-headers (if any)\n    if (kv.first[0] == ':') {\n      continue;\n    }\n    headers.setCopy(Http::LowerCaseString(kv.first), kv.second);\n  }\n\n  for (auto&& cookie : json_resp.cookies()) {\n    headers.addReferenceKey(Http::Headers::get().SetCookie, cookie);\n  }\n\n  if (json_resp.status_code() != 0) {\n    headers.setStatus(json_resp.status_code());\n  }\n  headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  if (!json_resp.body().empty()) {\n    if (json_resp.is_base64_encoded()) {\n      body.add(Base64::decode(json_resp.body()));\n    } else {\n      body.add(json_resp.body());\n    }\n  }\n}\n\nabsl::optional<Arn> parseArn(absl::string_view arn) {\n  const std::vector<absl::string_view> parts = absl::StrSplit(arn, ':');\n  constexpr auto min_arn_size = 7;\n  if (parts.size() < min_arn_size) {\n    return absl::nullopt;\n  }\n\n  if (parts[0] != \"arn\") {\n    return absl::nullopt;\n  }\n\n  auto partition = parts[1];\n  auto service = parts[2];\n  auto region = parts[3];\n  auto account_id = parts[4];\n  auto resource_type = parts[5];\n  auto function_name = parts[6];\n\n  // If the ARN contains a function version/alias, then we want it to be part of the function name.\n  // For example:\n  // arn:aws:lambda:us-west-2:987654321:function:hello_envoy:v1\n  if (parts.size() > min_arn_size) {\n    std::string versioned_function_name = std::string(function_name);\n    versioned_function_name.push_back(':');\n    versioned_function_name += std::string(parts[7]);\n    return Arn{partition, service, region, account_id, resource_type, versioned_function_name};\n  }\n\n  return Arn{partition, service, region, account_id, resource_type, function_name};\n}\n\nFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n  const std::string final_prefix = prefix + \"aws_lambda.\";\n  return {ALL_AWS_LAMBDA_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                      POOL_HISTOGRAM_PREFIX(scope, final_prefix))};\n}\n\n} // namespace AwsLambdaFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_lambda/aws_lambda_filter.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/common/aws/signer.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsLambdaFilter {\n\nclass Arn {\npublic:\n  Arn(absl::string_view partition, absl::string_view service, absl::string_view region,\n      absl::string_view account_id, absl::string_view resource_type,\n      absl::string_view function_name)\n      : partition_(partition), service_(service), region_(region), account_id_(account_id),\n        resource_type_(resource_type), function_name_(function_name) {}\n\n  const std::string& partition() const { return partition_; }\n  const std::string& service() const { return service_; }\n  const std::string& region() const { return region_; }\n  const std::string& accountId() const { return account_id_; }\n  const std::string& resourceType() const { return resource_type_; }\n  const std::string& functionName() const { return function_name_; }\n\nprivate:\n  std::string partition_;\n  std::string service_;\n  std::string region_;\n  std::string account_id_;\n  std::string resource_type_;\n  std::string function_name_; // resource_id\n};\n\n/**\n * Parses the input string into a structured ARN.\n *\n * The format is expected to be as such:\n * arn:partition:service:region:account-id:resource-type:resource-id\n *\n * Lambda ARN Example:\n * arn:aws:lambda:us-west-2:987654321:function:hello_envoy\n */\nabsl::optional<Arn> parseArn(absl::string_view arn);\n\n/**\n * All stats for the AWS Lambda filter. @see stats_macros.h\n */\n#define ALL_AWS_LAMBDA_FILTER_STATS(COUNTER, HISTOGRAM)                                            \\\n  COUNTER(server_error)                                                                            \\\n  HISTOGRAM(upstream_rq_payload_size, Bytes)\n\n/**\n * Wrapper struct filter stats. @see stats_macros.h\n */\nstruct FilterStats {\n  ALL_AWS_LAMBDA_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\nFilterStats generateStats(const std::string& prefix, Stats::Scope& scope);\n\n/**\n * Lambda invocation mode.\n * Synchronous mode is analogous to a blocking call; Lambda responds when it's completed processing.\n * In the Asynchronous mode, Lambda responds immediately acknowledging it received the request.\n */\nenum class InvocationMode { Synchronous, Asynchronous };\n\nclass FilterSettings : public Router::RouteSpecificFilterConfig {\npublic:\n  FilterSettings(const Arn& arn, InvocationMode mode, bool payload_passthrough)\n      : arn_(arn), invocation_mode_(mode), payload_passthrough_(payload_passthrough) {}\n\n  const Arn& arn() const& { return arn_; }\n  bool payloadPassthrough() const { return payload_passthrough_; }\n  InvocationMode invocationMode() const { return invocation_mode_; }\n\nprivate:\n  Arn arn_;\n  InvocationMode invocation_mode_;\n  bool payload_passthrough_;\n};\n\nclass Filter : public Http::PassThroughFilter, Logger::Loggable<Logger::Id::filter> {\n\npublic:\n  Filter(const FilterSettings& config, const FilterStats& stats,\n         const std::shared_ptr<Extensions::Common::Aws::Signer>& sigv4_signer);\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override;\n\n  /**\n   * Calculates the function ARN, value of pass-through, etc. by checking per-filter configurations\n   * and general filter configuration. Ultimately, the most specific configuration wins.\n   * @return error message if settings are invalid. Otherwise, empty string.\n   */\n  void resolveSettings();\n  FilterStats& stats() { return stats_; }\n\n  /**\n   * Used for unit testing only\n   */\n  const FilterSettings& settingsForTest() const { return settings_; }\n\nprivate:\n  absl::optional<FilterSettings> getRouteSpecificSettings() const;\n  // Convert the HTTP request to JSON request.\n  void jsonizeRequest(const Http::RequestHeaderMap& headers, const Buffer::Instance* body,\n                      Buffer::Instance& out) const;\n  // Convert the JSON response to a standard HTTP response.\n  void dejsonizeResponse(Http::ResponseHeaderMap& headers, const Buffer::Instance& body,\n                         Buffer::Instance& out);\n  const FilterSettings settings_;\n  FilterStats stats_;\n  Http::RequestHeaderMap* request_headers_ = nullptr;\n  Http::ResponseHeaderMap* response_headers_ = nullptr;\n  std::shared_ptr<Extensions::Common::Aws::Signer> sigv4_signer_;\n  absl::optional<Arn> arn_;\n  InvocationMode invocation_mode_ = InvocationMode::Synchronous;\n  bool payload_passthrough_ = false;\n  bool skip_ = false;\n};\n\n} // namespace AwsLambdaFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_lambda/config.cc",
    "content": "#include \"extensions/filters/http/aws_lambda/config.h\"\n\n#include \"envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/fmt.h\"\n\n#include \"extensions/common/aws/credentials_provider_impl.h\"\n#include \"extensions/common/aws/signer_impl.h\"\n#include \"extensions/common/aws/utility.h\"\n#include \"extensions/filters/http/aws_lambda/aws_lambda_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsLambdaFilter {\nconstexpr auto service_name = \"lambda\";\nnamespace {\n\nInvocationMode\ngetInvocationMode(const envoy::extensions::filters::http::aws_lambda::v3::Config& proto_config) {\n  using namespace envoy::extensions::filters::http::aws_lambda::v3;\n  switch (proto_config.invocation_mode()) {\n  case Config_InvocationMode_ASYNCHRONOUS:\n    return InvocationMode::Asynchronous;\n  case Config_InvocationMode_SYNCHRONOUS:\n    return InvocationMode::Synchronous;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace\n\nHttp::FilterFactoryCb AwsLambdaFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::aws_lambda::v3::Config& proto_config,\n    const std::string& stat_prefix, Server::Configuration::FactoryContext& context) {\n\n  auto credentials_provider =\n      std::make_shared<Extensions::Common::Aws::DefaultCredentialsProviderChain>(\n          context.api(), Extensions::Common::Aws::Utility::metadataFetcher);\n\n  const auto arn = parseArn(proto_config.arn());\n  if (!arn) {\n    throw EnvoyException(fmt::format(\"aws_lambda_filter: Invalid ARN: {}\", proto_config.arn()));\n  }\n  const std::string region = arn->region();\n  auto signer = std::make_shared<Extensions::Common::Aws::SignerImpl>(\n      service_name, region, std::move(credentials_provider), context.dispatcher().timeSource());\n\n  FilterSettings filter_settings{*arn, getInvocationMode(proto_config),\n                                 proto_config.payload_passthrough()};\n\n  FilterStats stats = generateStats(stat_prefix, context.scope());\n  return [stats, signer, filter_settings](Http::FilterChainFactoryCallbacks& cb) {\n    auto filter = std::make_shared<Filter>(filter_settings, stats, signer);\n    cb.addStreamFilter(filter);\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nAwsLambdaFilterFactory::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::aws_lambda::v3::PerRouteConfig& proto_config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n\n  const auto arn = parseArn(proto_config.invoke_config().arn());\n  if (!arn) {\n    throw EnvoyException(\n        fmt::format(\"aws_lambda_filter: Invalid ARN: {}\", proto_config.invoke_config().arn()));\n  }\n  return std::make_shared<const FilterSettings>(\n      FilterSettings{*arn, getInvocationMode(proto_config.invoke_config()),\n                     proto_config.invoke_config().payload_passthrough()});\n}\n\n/*\n * Static registration for the AWS Lambda filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(AwsLambdaFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace AwsLambdaFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_lambda/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.pb.h\"\n#include \"envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsLambdaFilter {\n\nclass AwsLambdaFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::aws_lambda::v3::Config,\n                                 envoy::extensions::filters::http::aws_lambda::v3::PerRouteConfig> {\npublic:\n  AwsLambdaFilterFactory() : FactoryBase(HttpFilterNames::get().AwsLambda) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::aws_lambda::v3::Config& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::aws_lambda::v3::PerRouteConfig&,\n      Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override;\n};\n\n} // namespace AwsLambdaFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_lambda/request_response.proto",
    "content": "syntax = \"proto3\";\n\n// The structures are used for the purpose of JSON (de)serialization.\npackage source.extensions.filters.http.aws_lambda;\n\nimport \"validate/validate.proto\";\n\nmessage Request {\n  string raw_path = 1 [(validate.rules).string = {min_len: 1}];\n\n  string method = 2 [(validate.rules).string = {min_len: 1}];\n  // HTTP headers with the same name are coalesced into a single comma-separated value.\n  map<string, string> headers = 3;\n\n  // multi-value keys are overwritten. Last one wins.\n  map<string, string> query_string_parameters = 4;\n\n  string body = 5;\n\n  bool is_base64_encoded = 6;\n}\n\nmessage Response {\n  uint32 status_code = 1;\n  map<string, string> headers = 2;\n  // cookies are split from headers in the response because the headers are coalesced while the HTTP RFC prohibits\n  // coalescing multiple cookie values in the Set-Cookie header.\n  repeated string cookies = 3;\n  string body = 4;\n  bool is_base64_encoded = 5;\n}\n"
  },
  {
    "path": "source/extensions/filters/http/aws_request_signing/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP AWS request signing filter\n# Public docs: docs/root/configuration/http_filters/aws_request_signing_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"aws_request_signing_filter_lib\",\n    srcs = [\"aws_request_signing_filter.cc\"],\n    hdrs = [\"aws_request_signing_filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//source/extensions/common/aws:credentials_provider_impl_lib\",\n        \"//source/extensions/common/aws:signer_impl_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/aws_request_signing/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":aws_request_signing_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/common/aws:credentials_provider_impl_lib\",\n        \"//source/extensions/common/aws:signer_impl_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/aws_request_signing/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/aws_request_signing/aws_request_signing_filter.cc",
    "content": "#include \"extensions/filters/http/aws_request_signing/aws_request_signing_filter.h\"\n\n#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsRequestSigningFilter {\n\nFilterConfigImpl::FilterConfigImpl(Extensions::Common::Aws::SignerPtr&& signer,\n                                   const std::string& stats_prefix, Stats::Scope& scope,\n                                   const std::string& host_rewrite)\n    : signer_(std::move(signer)), stats_(Filter::generateStats(stats_prefix, scope)),\n      host_rewrite_(host_rewrite) {}\n\nFilter::Filter(const std::shared_ptr<FilterConfig>& config) : config_(config) {}\n\nExtensions::Common::Aws::Signer& FilterConfigImpl::signer() { return *signer_; }\n\nFilterStats& FilterConfigImpl::stats() { return stats_; }\n\nconst std::string& FilterConfigImpl::hostRewrite() const { return host_rewrite_; }\n\nFilterStats Filter::generateStats(const std::string& prefix, Stats::Scope& scope) {\n  const std::string final_prefix = prefix + \"aws_request_signing.\";\n  return {ALL_AWS_REQUEST_SIGNING_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  const auto& host_rewrite = config_->hostRewrite();\n  if (!host_rewrite.empty()) {\n    headers.setHost(host_rewrite);\n  }\n\n  try {\n    config_->signer().sign(headers);\n    config_->stats().signing_added_.inc();\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG(debug, \"signing failed: {}\", e.what());\n    config_->stats().signing_failed_.inc();\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\n} // namespace AwsRequestSigningFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_request_signing/aws_request_signing_filter.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"extensions/common/aws/signer.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsRequestSigningFilter {\n\n/**\n * All stats for the AWS request signing filter. @see stats_macros.h\n */\n// clang-format off\n#define ALL_AWS_REQUEST_SIGNING_FILTER_STATS(COUNTER)                                                           \\\n  COUNTER(signing_added)                                                                        \\\n  COUNTER(signing_failed)\n// clang-format on\n\n/**\n * Wrapper struct filter stats. @see stats_macros.h\n */\nstruct FilterStats {\n  ALL_AWS_REQUEST_SIGNING_FILTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Abstract filter configuration.\n */\nclass FilterConfig {\npublic:\n  virtual ~FilterConfig() = default;\n\n  /**\n   * @return the config's signer.\n   */\n  virtual Extensions::Common::Aws::Signer& signer() PURE;\n\n  /**\n   * @return the filter stats.\n   */\n  virtual FilterStats& stats() PURE;\n\n  /**\n   * @return the host rewrite value.\n   */\n  virtual const std::string& hostRewrite() const PURE;\n};\n\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\n/**\n * Configuration for the AWS request signing filter.\n */\nclass FilterConfigImpl : public FilterConfig {\npublic:\n  FilterConfigImpl(Extensions::Common::Aws::SignerPtr&& signer, const std::string& stats_prefix,\n                   Stats::Scope& scope, const std::string& host_rewrite);\n\n  Extensions::Common::Aws::Signer& signer() override;\n  FilterStats& stats() override;\n  const std::string& hostRewrite() const override;\n\nprivate:\n  Extensions::Common::Aws::SignerPtr signer_;\n  FilterStats stats_;\n  std::string host_rewrite_;\n};\n\n/**\n * HTTP AWS request signing auth filter.\n */\nclass Filter : public Http::PassThroughDecoderFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  Filter(const std::shared_ptr<FilterConfig>& config);\n\n  static FilterStats generateStats(const std::string& prefix, Stats::Scope& scope);\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n\nprivate:\n  std::shared_ptr<FilterConfig> config_;\n};\n\n} // namespace AwsRequestSigningFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_request_signing/config.cc",
    "content": "#include \"extensions/filters/http/aws_request_signing/config.h\"\n\n#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.h\"\n#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/common/aws/credentials_provider_impl.h\"\n#include \"extensions/common/aws/signer_impl.h\"\n#include \"extensions/common/aws/utility.h\"\n#include \"extensions/filters/http/aws_request_signing/aws_request_signing_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsRequestSigningFilter {\n\nHttp::FilterFactoryCb AwsRequestSigningFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::aws_request_signing::v3::AwsRequestSigning& config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n\n  auto credentials_provider =\n      std::make_shared<Extensions::Common::Aws::DefaultCredentialsProviderChain>(\n          context.api(), Extensions::Common::Aws::Utility::metadataFetcher);\n  auto signer = std::make_unique<Extensions::Common::Aws::SignerImpl>(\n      config.service_name(), config.region(), credentials_provider,\n      context.dispatcher().timeSource());\n\n  auto filter_config = std::make_shared<FilterConfigImpl>(std::move(signer), stats_prefix,\n                                                          context.scope(), config.host_rewrite());\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    auto filter = std::make_shared<Filter>(filter_config);\n    callbacks.addStreamDecoderFilter(filter);\n  };\n}\n\n/**\n * Static registration for the AWS request signing filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(AwsRequestSigningFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace AwsRequestSigningFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/aws_request_signing/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.h\"\n#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsRequestSigningFilter {\n\n/**\n * Config registration for the AWS request signing filter.\n */\nclass AwsRequestSigningFilterFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::aws_request_signing::v3::AwsRequestSigning> {\npublic:\n  AwsRequestSigningFilterFactory() : FactoryBase(HttpFilterNames::get().AwsRequestSigning) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::aws_request_signing::v3::AwsRequestSigning&\n          proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace AwsRequestSigningFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/buffer/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Request buffering and timeout L7 HTTP filter\n# Public docs: docs/root/configuration/http_filters/buffer_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"buffer_filter_lib\",\n    srcs = [\"buffer_filter.cc\"],\n    hdrs = [\"buffer_filter.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    # Legacy test use. TODO(#9953) clean up.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/buffer:buffer_filter_lib\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/buffer/buffer_filter.cc",
    "content": "#include \"extensions/filters/http/buffer/buffer_filter.h\"\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n#include \"envoy/http/codes.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace BufferFilter {\n\nBufferFilterSettings::BufferFilterSettings(\n    const envoy::extensions::filters::http::buffer::v3::Buffer& proto_config)\n    : disabled_(false),\n      max_request_bytes_(static_cast<uint64_t>(proto_config.max_request_bytes().value())) {}\n\nBufferFilterSettings::BufferFilterSettings(\n    const envoy::extensions::filters::http::buffer::v3::BufferPerRoute& proto_config)\n    : disabled_(proto_config.disabled()),\n      max_request_bytes_(\n          proto_config.has_buffer()\n              ? static_cast<uint64_t>(proto_config.buffer().max_request_bytes().value())\n              : 0) {}\n\nBufferFilterConfig::BufferFilterConfig(\n    const envoy::extensions::filters::http::buffer::v3::Buffer& proto_config)\n    : settings_(proto_config) {}\n\nBufferFilter::BufferFilter(BufferFilterConfigSharedPtr config)\n    : config_(config), settings_(config->settings()) {}\n\nvoid BufferFilter::initConfig() {\n  ASSERT(!config_initialized_);\n  config_initialized_ = true;\n\n  settings_ = config_->settings();\n\n  if (!callbacks_->route() || !callbacks_->route()->routeEntry()) {\n    return;\n  }\n\n  const std::string& name = HttpFilterNames::get().Buffer;\n  const auto* entry = callbacks_->route()->routeEntry();\n  const auto* route_local = entry->mostSpecificPerFilterConfigTyped<BufferFilterSettings>(name);\n\n  settings_ = route_local ? route_local : settings_;\n}\n\nHttp::FilterHeadersStatus BufferFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                      bool end_stream) {\n  if (end_stream) {\n    // If this is a header-only request, we don't need to do any buffering.\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  initConfig();\n  if (settings_->disabled()) {\n    // The filter has been disabled for this route.\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  callbacks_->setDecoderBufferLimit(settings_->maxRequestBytes());\n  request_headers_ = &headers;\n\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nHttp::FilterDataStatus BufferFilter::decodeData(Buffer::Instance& data, bool end_stream) {\n  content_length_ += data.length();\n  if (end_stream || settings_->disabled()) {\n    maybeAddContentLength();\n\n    return Http::FilterDataStatus::Continue;\n  }\n\n  // Buffer until the complete request has been processed or the ConnectionManagerImpl sends a 413.\n  return Http::FilterDataStatus::StopIterationAndBuffer;\n}\n\nHttp::FilterTrailersStatus BufferFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  maybeAddContentLength();\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid BufferFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n}\n\nvoid BufferFilter::maybeAddContentLength() {\n  // request_headers_ is initialized iff plugin is enabled.\n  if (request_headers_ != nullptr && request_headers_->ContentLength() == nullptr) {\n    ASSERT(!settings_->disabled());\n    request_headers_->setContentLength(content_length_);\n  }\n}\n\n} // namespace BufferFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/buffer/buffer_filter.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n#include \"envoy/http/filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace BufferFilter {\n\nclass BufferFilterSettings : public Router::RouteSpecificFilterConfig {\npublic:\n  BufferFilterSettings(const envoy::extensions::filters::http::buffer::v3::Buffer&);\n  BufferFilterSettings(const envoy::extensions::filters::http::buffer::v3::BufferPerRoute&);\n\n  bool disabled() const { return disabled_; }\n  uint64_t maxRequestBytes() const { return max_request_bytes_; }\n\nprivate:\n  bool disabled_;\n  uint64_t max_request_bytes_;\n};\n\n/**\n * Configuration for the buffer filter.\n */\nclass BufferFilterConfig {\npublic:\n  BufferFilterConfig(const envoy::extensions::filters::http::buffer::v3::Buffer& proto_config);\n\n  const BufferFilterSettings* settings() const { return &settings_; }\n\nprivate:\n  const BufferFilterSettings settings_;\n};\n\nusing BufferFilterConfigSharedPtr = std::shared_ptr<BufferFilterConfig>;\n\n/**\n * A filter that is capable of buffering an entire request before dispatching it upstream.\n */\nclass BufferFilter : public Http::StreamDecoderFilter {\npublic:\n  BufferFilter(BufferFilterConfigSharedPtr config);\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\nprivate:\n  void initConfig();\n  void maybeAddContentLength();\n\n  BufferFilterConfigSharedPtr config_;\n  const BufferFilterSettings* settings_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n  Http::RequestHeaderMap* request_headers_{};\n  uint64_t content_length_{};\n  bool config_initialized_{};\n};\n\n} // namespace BufferFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/buffer/config.cc",
    "content": "#include \"extensions/filters/http/buffer/config.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/buffer/buffer_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace BufferFilter {\n\nHttp::FilterFactoryCb BufferFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::buffer::v3::Buffer& proto_config, const std::string&,\n    Server::Configuration::FactoryContext&) {\n  ASSERT(proto_config.has_max_request_bytes());\n\n  BufferFilterConfigSharedPtr filter_config(new BufferFilterConfig(proto_config));\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<BufferFilter>(filter_config));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nBufferFilterFactory::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::buffer::v3::BufferPerRoute& proto_config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<const BufferFilterSettings>(proto_config);\n}\n\n/**\n * Static registration for the buffer filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(BufferFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.buffer\"};\n\n} // namespace BufferFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/buffer/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace BufferFilter {\n\n/**\n * Config registration for the buffer filter.\n */\nclass BufferFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::buffer::v3::Buffer,\n                                 envoy::extensions::filters::http::buffer::v3::BufferPerRoute> {\npublic:\n  BufferFilterFactory() : FactoryBase(HttpFilterNames::get().Buffer) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::buffer::v3::Buffer& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::buffer::v3::BufferPerRoute&,\n      Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override;\n};\n\nDECLARE_FACTORY(BufferFilterFactory);\n\n} // namespace BufferFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n## Pluggable HTTP cache filter\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"cache_filter_lib\",\n    srcs = [\"cache_filter.cc\"],\n    hdrs = [\"cache_filter.h\"],\n    deps = [\n        \":cache_headers_utils_lib\",\n        \":cacheability_utils_lib\",\n        \":http_cache_lib\",\n        \":inline_headers_handles\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cacheability_utils_lib\",\n    srcs = [\"cacheability_utils.cc\"],\n    hdrs = [\"cacheability_utils.h\"],\n    deps = [\n        \":cache_headers_utils_lib\",\n        \":inline_headers_handles\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"key\",\n    srcs = [\"key.proto\"],\n)\n\nenvoy_cc_library(\n    name = \"http_cache_lib\",\n    srcs = [\"http_cache.cc\"],\n    hdrs = [\"http_cache.h\"],\n    deps = [\n        \":cache_headers_utils_lib\",\n        \":inline_headers_handles\",\n        \":key_cc_proto\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cache_headers_utils_lib\",\n    srcs = [\"cache_headers_utils.cc\"],\n    hdrs = [\"cache_headers_utils.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":inline_headers_handles\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"inline_headers_handles\",\n    hdrs = [\"inline_headers_handles.h\"],\n    deps = [\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    status = \"wip\",\n    deps = [\n        \":cache_filter_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/cache/cache_filter.cc",
    "content": "#include \"extensions/filters/http/cache/cache_filter.h\"\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/cache/cacheability_utils.h\"\n#include \"extensions/filters/http/cache/inline_headers_handles.h\"\n\n#include \"absl/memory/memory.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\nnamespace {\ninline bool isResponseNotModified(const Http::ResponseHeaderMap& response_headers) {\n  return Http::Utility::getResponseStatus(response_headers) == enumToInt(Http::Code::NotModified);\n}\n} // namespace\n\nstruct CacheResponseCodeDetailValues {\n  const absl::string_view ResponseFromCacheFilter = \"cache.response_from_cache_filter\";\n};\n\nusing CacheResponseCodeDetails = ConstSingleton<CacheResponseCodeDetailValues>;\n\nCacheFilter::CacheFilter(\n    const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, const std::string&,\n    Stats::Scope&, TimeSource& time_source, HttpCache& http_cache)\n    : time_source_(time_source), cache_(http_cache),\n      vary_allow_list_(config.allowed_vary_headers()) {}\n\nvoid CacheFilter::onDestroy() {\n  filter_state_ = FilterState::Destroyed;\n  if (lookup_) {\n    lookup_->onDestroy();\n  }\n  if (insert_) {\n    insert_->onDestroy();\n  }\n}\n\nHttp::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                     bool end_stream) {\n  ENVOY_STREAM_LOG(debug, \"CacheFilter::decodeHeaders: {}\", *decoder_callbacks_, headers);\n  if (!end_stream) {\n    ENVOY_STREAM_LOG(\n        debug,\n        \"CacheFilter::decodeHeaders ignoring request because it has body and/or trailers: {}\",\n        *decoder_callbacks_, headers);\n    return Http::FilterHeadersStatus::Continue;\n  }\n  if (!CacheabilityUtils::isCacheableRequest(headers)) {\n    ENVOY_STREAM_LOG(debug, \"CacheFilter::decodeHeaders ignoring uncacheable request: {}\",\n                     *decoder_callbacks_, headers);\n    return Http::FilterHeadersStatus::Continue;\n  }\n  ASSERT(decoder_callbacks_);\n\n  LookupRequest lookup_request(headers, time_source_.systemTime(), vary_allow_list_);\n  request_allows_inserts_ = !lookup_request.requestCacheControl().no_store_;\n  lookup_ = cache_.makeLookupContext(std::move(lookup_request));\n\n  ASSERT(lookup_);\n  getHeaders(headers);\n  ENVOY_STREAM_LOG(debug, \"CacheFilter::decodeHeaders starting lookup\", *decoder_callbacks_);\n\n  // Stop the decoding stream until the cache lookup result is ready.\n  return Http::FilterHeadersStatus::StopAllIterationAndWatermark;\n}\n\nHttp::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                     bool end_stream) {\n  if (filter_state_ == FilterState::DecodeServingFromCache) {\n    // This call was invoked during decoding by decoder_callbacks_->encodeHeaders because a fresh\n    // cached response was found and is being added to the encoding stream -- ignore it.\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // If lookup_ is null, the request wasn't cacheable, so the response isn't either.\n  if (!lookup_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (filter_state_ == FilterState::ValidatingCachedResponse && isResponseNotModified(headers)) {\n    processSuccessfulValidation(headers);\n    // Stop the encoding stream until the cached response is fetched & added to the encoding stream.\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  // Either a cache miss or a cache entry that is no longer valid.\n  // Check if the new response can be cached.\n  if (request_allows_inserts_ &&\n      CacheabilityUtils::isCacheableResponse(headers, vary_allow_list_)) {\n    ENVOY_STREAM_LOG(debug, \"CacheFilter::encodeHeaders inserting headers\", *encoder_callbacks_);\n    insert_ = cache_.makeInsertContext(std::move(lookup_));\n    // Add metadata associated with the cached response. Right now this is only response_time;\n    const ResponseMetadata metadata = {time_source_.systemTime()};\n    insert_->insertHeaders(headers, metadata, end_stream);\n  }\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus CacheFilter::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (filter_state_ == FilterState::DecodeServingFromCache) {\n    // This call was invoked during decoding by decoder_callbacks_->encodeData because a fresh\n    // cached response was found and is being added to the encoding stream -- ignore it.\n    return Http::FilterDataStatus::Continue;\n  }\n  if (filter_state_ == FilterState::EncodeServingFromCache) {\n    // Stop the encoding stream until the cached response is fetched & added to the encoding stream.\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n  if (insert_) {\n    ENVOY_STREAM_LOG(debug, \"CacheFilter::encodeData inserting body\", *encoder_callbacks_);\n    // TODO(toddmgreer): Wait for the cache if necessary.\n    insert_->insertBody(\n        data, [](bool) {}, end_stream);\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nvoid CacheFilter::getHeaders(Http::RequestHeaderMap& request_headers) {\n  ASSERT(lookup_, \"CacheFilter is trying to call getHeaders with no LookupContext\");\n\n  // If the cache posts a callback to the dispatcher then the CacheFilter is destroyed for any\n  // reason (e.g client disconnected and HTTP stream terminated), then there is no guarantee that\n  // the posted callback will run before the filter is deleted. Hence, a weak_ptr to the CacheFilter\n  // is captured and used to make sure the CacheFilter is still alive before accessing it in the\n  // posted callback.\n  // TODO(yosrym93): Look into other options for handling this (also in getBody and getTrailers) as\n  // they arise, e.g. cancellable posts, guaranteed ordering of posted callbacks and deletions, etc.\n  CacheFilterWeakPtr self = weak_from_this();\n\n  // The dispatcher needs to be captured because there's no guarantee that\n  // decoder_callbacks_->dispatcher() is thread-safe.\n  lookup_->getHeaders([self, &request_headers,\n                       &dispatcher = decoder_callbacks_->dispatcher()](LookupResult&& result) {\n    // The callback is posted to the dispatcher to make sure it is called on the worker thread.\n    // The lambda passed to dispatcher.post() needs to be copyable as it will be used to\n    // initialize a std::function. Therefore, it cannot capture anything non-copyable.\n    // LookupResult is non-copyable as LookupResult::headers_ is a unique_ptr, which is\n    // non-copyable. Hence, \"result\" is decomposed when captured, and re-instantiated inside the\n    // lambda so that \"result.headers_\" can be captured as a raw pointer, then wrapped in a\n    // unique_ptr when the result is re-instantiated.\n    dispatcher.post([self, &request_headers, status = result.cache_entry_status_,\n                     headers_raw_ptr = result.headers_.release(),\n                     response_ranges = std::move(result.response_ranges_),\n                     content_length = result.content_length_,\n                     has_trailers = result.has_trailers_]() mutable {\n      // Wrap the raw pointer in a unique_ptr before checking to avoid memory leaks.\n      Http::ResponseHeaderMapPtr headers = absl::WrapUnique(headers_raw_ptr);\n      if (CacheFilterSharedPtr cache_filter = self.lock()) {\n        cache_filter->onHeaders(\n            LookupResult{status, std::move(headers), content_length, response_ranges, has_trailers},\n            request_headers);\n      }\n    });\n  });\n}\n\nvoid CacheFilter::getBody() {\n  ASSERT(lookup_, \"CacheFilter is trying to call getBody with no LookupContext\");\n  ASSERT(!remaining_ranges_.empty(), \"No reason to call getBody when there's no body to get.\");\n  // If the cache posts a callback to the dispatcher then the CacheFilter is destroyed for any\n  // reason (e.g client disconnected and HTTP stream terminated), then there is no guarantee that\n  // the posted callback will run before the filter is deleted. Hence, a weak_ptr to the CacheFilter\n  // is captured and used to make sure the CacheFilter is still alive before accessing it in the\n  // posted callback.\n  CacheFilterWeakPtr self = weak_from_this();\n\n  // The dispatcher needs to be captured because there's no guarantee that\n  // decoder_callbacks_->dispatcher() is thread-safe.\n  lookup_->getBody(remaining_ranges_[0], [self, &dispatcher = decoder_callbacks_->dispatcher()](\n                                             Buffer::InstancePtr&& body) {\n    // The callback is posted to the dispatcher to make sure it is called on the worker thread.\n    // The lambda passed to dispatcher.post() needs to be copyable as it will be used to\n    // initialize a std::function. Therefore, it cannot capture anything non-copyable.\n    // \"body\" is a unique_ptr, which is non-copyable. Hence, it is captured as a raw pointer then\n    // wrapped in a unique_ptr inside the lambda.\n    dispatcher.post([self, body_raw_ptr = body.release()] {\n      // Wrap the raw pointer in a unique_ptr before checking to avoid memory leaks.\n      Buffer::InstancePtr body = absl::WrapUnique(body_raw_ptr);\n      if (CacheFilterSharedPtr cache_filter = self.lock()) {\n        cache_filter->onBody(std::move(body));\n      }\n    });\n  });\n}\n\nvoid CacheFilter::getTrailers() {\n  ASSERT(lookup_, \"CacheFilter is trying to call getTrailers with no LookupContext\");\n  ASSERT(response_has_trailers_, \"No reason to call getTrailers when there's no trailers to get.\");\n\n  // If the cache posts a callback to the dispatcher then the CacheFilter is destroyed for any\n  // reason (e.g client disconnected and HTTP stream terminated), then there is no guarantee that\n  // the posted callback will run before the filter is deleted. Hence, a weak_ptr to the CacheFilter\n  // is captured and used to make sure the CacheFilter is still alive before accessing it in the\n  // posted callback.\n  CacheFilterWeakPtr self = weak_from_this();\n\n  // The dispatcher needs to be captured because there's no guarantee that\n  // decoder_callbacks_->dispatcher() is thread-safe.\n  lookup_->getTrailers([self, &dispatcher = decoder_callbacks_->dispatcher()](\n                           Http::ResponseTrailerMapPtr&& trailers) {\n    // The callback is posted to the dispatcher to make sure it is called on the worker thread.\n    // The lambda passed to dispatcher.post() needs to be copyable as it will be used to\n    // initialize a std::function. Therefore, it cannot capture anything non-copyable.\n    // \"trailers\" is a unique_ptr, which is non-copyable. Hence, it is captured as a raw\n    // pointer then wrapped in a unique_ptr inside the lambda.\n    dispatcher.post([self, trailers_raw_ptr = trailers.release()] {\n      // Wrap the raw pointer in a unique_ptr before checking to avoid memory leaks.\n      Http::ResponseTrailerMapPtr trailers = absl::WrapUnique(trailers_raw_ptr);\n      if (CacheFilterSharedPtr cache_filter = self.lock()) {\n        cache_filter->onTrailers(std::move(trailers));\n      }\n    });\n  });\n}\n\nvoid CacheFilter::onHeaders(LookupResult&& result, Http::RequestHeaderMap& request_headers) {\n  if (filter_state_ == FilterState::Destroyed) {\n    // The filter is being destroyed, any callbacks should be ignored.\n    return;\n  }\n  // TODO(yosrym93): Handle request only-if-cached directive\n  switch (result.cache_entry_status_) {\n  case CacheEntryStatus::FoundNotModified:\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // We don't yet return or support these codes.\n  case CacheEntryStatus::RequiresValidation:\n    // If a cache entry requires validation, inject validation headers in the request and let it\n    // pass through as if no cache entry was found.\n    // If the cache entry was valid, the response status should be 304 (unmodified) and the cache\n    // entry will be injected in the response body.\n    lookup_result_ = std::make_unique<LookupResult>(std::move(result));\n    filter_state_ = FilterState::ValidatingCachedResponse;\n    injectValidationHeaders(request_headers);\n    break;\n  case CacheEntryStatus::Unusable:\n    break;\n  case CacheEntryStatus::NotSatisfiableRange:\n    lookup_result_ = std::make_unique<LookupResult>(std::move(result));\n    filter_state_ = FilterState::DecodeServingFromCache;\n    lookup_result_->headers_->setStatus(static_cast<uint64_t>(Http::Code::RangeNotSatisfiable));\n    lookup_result_->headers_->addCopy(Http::Headers::get().ContentRange,\n                                      absl::StrCat(\"bytes */\", lookup_result_->content_length_));\n    // We shouldn't serve any of the body, so the response content length is 0.\n    lookup_result_->setContentLength(0);\n    encodeCachedResponse();\n    break;\n  case CacheEntryStatus::SatisfiableRange:\n    if (result.response_ranges_.size() == 1) {\n      lookup_result_ = std::make_unique<LookupResult>(std::move(result));\n      filter_state_ = FilterState::DecodeServingFromCache;\n      lookup_result_->headers_->setStatus(static_cast<uint64_t>(Http::Code::PartialContent));\n      lookup_result_->headers_->addCopy(\n          Http::Headers::get().ContentRange,\n          absl::StrCat(\"bytes \", lookup_result_->response_ranges_[0].begin(), \"-\",\n                       lookup_result_->response_ranges_[0].end() - 1, \"/\",\n                       lookup_result_->content_length_));\n      // We serve only the desired range, so adjust the length accordingly.\n      lookup_result_->setContentLength(lookup_result_->response_ranges_[0].length());\n      remaining_ranges_ = std::move(lookup_result_->response_ranges_);\n      encodeCachedResponse();\n      break;\n    }\n    // Multi-part responses are not supported, and they will be treated as a usual 200 response on\n    // ::Ok case below. A possible way to achieve that would be to move all ranges to\n    // remaining_ranges_, and add logic inside '::onBody' to interleave the body bytes with\n    // sub-headers and separator string for each part. Would need to keep track if the current range\n    // is over or not to know when to insert the separator, and calculate the length based on length\n    // of ranges + extra headers and separators.\n    ABSL_FALLTHROUGH_INTENDED;\n  case CacheEntryStatus::Ok:\n    lookup_result_ = std::make_unique<LookupResult>(std::move(result));\n    filter_state_ = FilterState::DecodeServingFromCache;\n    encodeCachedResponse();\n    // Return here so that continueDecoding is not called.\n    // No need to continue the decoding stream as a cached response is already being served.\n    return;\n  }\n  // decodeHeaders returned StopIteration waiting for this callback -- continue decoding\n  decoder_callbacks_->continueDecoding();\n}\n\n// TODO(toddmgreer): Handle downstream backpressure.\nvoid CacheFilter::onBody(Buffer::InstancePtr&& body) {\n  // Can be called during decoding if a valid cache hit is found,\n  // or during encoding if a cache entry was being validated.\n  if (filter_state_ == FilterState::Destroyed) {\n    // The filter is being destroyed, any callbacks should be ignored.\n    return;\n  }\n  ASSERT(!remaining_ranges_.empty(),\n         \"CacheFilter doesn't call getBody unless there's more body to get, so this is a \"\n         \"bogus callback.\");\n  ASSERT(body, \"Cache said it had a body, but isn't giving it to us.\");\n\n  const uint64_t bytes_from_cache = body->length();\n  if (bytes_from_cache < remaining_ranges_[0].length()) {\n    remaining_ranges_[0].trimFront(bytes_from_cache);\n  } else if (bytes_from_cache == remaining_ranges_[0].length()) {\n    remaining_ranges_.erase(remaining_ranges_.begin());\n  } else {\n    ASSERT(false, \"Received oversized body from cache.\");\n    filter_state_ == FilterState::DecodeServingFromCache ? decoder_callbacks_->resetStream()\n                                                         : encoder_callbacks_->resetStream();\n    return;\n  }\n\n  const bool end_stream = remaining_ranges_.empty() && !response_has_trailers_;\n\n  filter_state_ == FilterState::DecodeServingFromCache\n      ? decoder_callbacks_->encodeData(*body, end_stream)\n      : encoder_callbacks_->addEncodedData(*body, true);\n\n  if (!remaining_ranges_.empty()) {\n    getBody();\n  } else if (response_has_trailers_) {\n    getTrailers();\n  } else {\n    finalizeEncodingCachedResponse();\n  }\n}\n\nvoid CacheFilter::onTrailers(Http::ResponseTrailerMapPtr&& trailers) {\n  // Can be called during decoding if a valid cache hit is found,\n  // or during encoding if a cache entry was being validated.\n  if (filter_state_ == FilterState::Destroyed) {\n    // The filter is being destroyed, any callbacks should be ignored.\n    return;\n  }\n  if (filter_state_ == FilterState::DecodeServingFromCache) {\n    decoder_callbacks_->encodeTrailers(std::move(trailers));\n  } else {\n    Http::ResponseTrailerMap& response_trailers = encoder_callbacks_->addEncodedTrailers();\n    response_trailers = std::move(*trailers);\n  }\n  finalizeEncodingCachedResponse();\n}\n\nvoid CacheFilter::processSuccessfulValidation(Http::ResponseHeaderMap& response_headers) {\n  ASSERT(lookup_result_, \"CacheFilter trying to validate a non-existent lookup result\");\n  ASSERT(\n      filter_state_ == FilterState::ValidatingCachedResponse,\n      \"processSuccessfulValidation must only be called when a cached response is being validated\");\n  ASSERT(isResponseNotModified(response_headers),\n         \"processSuccessfulValidation must only be called with 304 responses\");\n\n  // Check whether the cached entry should be updated before modifying the 304 response.\n  const bool should_update_cached_entry = shouldUpdateCachedEntry(response_headers);\n\n  filter_state_ = FilterState::EncodeServingFromCache;\n\n  // Update the 304 response status code and content-length\n  response_headers.setStatus(lookup_result_->headers_->getStatusValue());\n  response_headers.setContentLength(lookup_result_->headers_->getContentLengthValue());\n\n  // A response that has been validated should not contain an Age header as it is equivalent to a\n  // freshly served response from the origin, unless the 304 response has an Age header, which\n  // means it was served by an upstream cache.\n  // Remove any existing Age header in the cached response.\n  lookup_result_->headers_->removeInline(age_handle.handle());\n\n  // Add any missing headers from the cached response to the 304 response.\n  lookup_result_->headers_->iterate([&response_headers](const Http::HeaderEntry& cached_header) {\n    // TODO(yosrym93): Try to avoid copying the header key twice.\n    Http::LowerCaseString key(std::string(cached_header.key().getStringView()));\n    absl::string_view value = cached_header.value().getStringView();\n    if (!response_headers.get(key)) {\n      response_headers.setCopy(key, value);\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  if (should_update_cached_entry) {\n    // TODO(yosrym93): else the cached entry should be deleted.\n    // Update metadata associated with the cached response. Right now this is only response_time;\n    const ResponseMetadata metadata = {time_source_.systemTime()};\n    cache_.updateHeaders(*lookup_, response_headers, metadata);\n  }\n\n  // A cache entry was successfully validated -> encode cached body and trailers.\n  encodeCachedResponse();\n}\n\n// TODO(yosrym93): Write a test that exercises this when SimpleHttpCache implements updateHeaders\nbool CacheFilter::shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const {\n  ASSERT(isResponseNotModified(response_headers),\n         \"shouldUpdateCachedEntry must only be called with 304 responses\");\n  ASSERT(lookup_result_, \"shouldUpdateCachedEntry precondition unsatisfied: lookup_result_ \"\n                         \"does not point to a cache lookup result\");\n  ASSERT(filter_state_ == FilterState::ValidatingCachedResponse,\n         \"shouldUpdateCachedEntry precondition unsatisfied: the \"\n         \"CacheFilter is not validating a cache lookup result\");\n\n  // According to: https://httpwg.org/specs/rfc7234.html#freshening.responses,\n  // and assuming a single cached response per key:\n  // If the 304 response contains a strong validator (etag) that does not match the cached response,\n  // the cached response should not be updated.\n  const Http::HeaderEntry* response_etag = response_headers.getInline(etag_handle.handle());\n  const Http::HeaderEntry* cached_etag = lookup_result_->headers_->getInline(etag_handle.handle());\n  return !response_etag || (cached_etag && cached_etag->value().getStringView() ==\n                                               response_etag->value().getStringView());\n}\n\nvoid CacheFilter::injectValidationHeaders(Http::RequestHeaderMap& request_headers) {\n  ASSERT(lookup_result_, \"injectValidationHeaders precondition unsatisfied: lookup_result_ \"\n                         \"does not point to a cache lookup result\");\n  ASSERT(filter_state_ == FilterState::ValidatingCachedResponse,\n         \"injectValidationHeaders precondition unsatisfied: the \"\n         \"CacheFilter is not validating a cache lookup result\");\n\n  const Http::HeaderEntry* etag_header = lookup_result_->headers_->getInline(etag_handle.handle());\n  const Http::HeaderEntry* last_modified_header =\n      lookup_result_->headers_->getInline(last_modified_handle.handle());\n\n  if (etag_header) {\n    absl::string_view etag = etag_header->value().getStringView();\n    request_headers.setInline(if_none_match_handle.handle(), etag);\n  }\n  if (DateUtil::timePointValid(CacheHeadersUtils::httpTime(last_modified_header))) {\n    // Valid Last-Modified header exists.\n    absl::string_view last_modified = last_modified_header->value().getStringView();\n    request_headers.setInline(if_modified_since_handle.handle(), last_modified);\n  } else {\n    // Either Last-Modified is missing or invalid, fallback to Date.\n    // A correct behaviour according to:\n    // https://httpwg.org/specs/rfc7232.html#header.if-modified-since\n    absl::string_view date = lookup_result_->headers_->getDateValue();\n    request_headers.setInline(if_modified_since_handle.handle(), date);\n  }\n}\n\nvoid CacheFilter::encodeCachedResponse() {\n  ASSERT(lookup_result_, \"encodeCachedResponse precondition unsatisfied: lookup_result_ \"\n                         \"does not point to a cache lookup result\");\n\n  response_has_trailers_ = lookup_result_->has_trailers_;\n  const bool end_stream = (lookup_result_->content_length_ == 0 && !response_has_trailers_);\n\n  // Set appropriate response flags and codes.\n  Http::StreamFilterCallbacks* callbacks =\n      filter_state_ == FilterState::DecodeServingFromCache\n          ? static_cast<Http::StreamFilterCallbacks*>(decoder_callbacks_)\n          : static_cast<Http::StreamFilterCallbacks*>(encoder_callbacks_);\n\n  callbacks->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter);\n  callbacks->streamInfo().setResponseCodeDetails(\n      CacheResponseCodeDetails::get().ResponseFromCacheFilter);\n\n  // If the filter is encoding, 304 response headers and cached headers are merged in encodeHeaders.\n  // If the filter is decoding, we need to serve response headers from cache directly.\n  if (filter_state_ == FilterState::DecodeServingFromCache) {\n    decoder_callbacks_->encodeHeaders(std::move(lookup_result_->headers_), end_stream,\n                                      CacheResponseCodeDetails::get().ResponseFromCacheFilter);\n  }\n\n  if (lookup_result_->content_length_ > 0) {\n    // No range has been added, so we add entire body to the response.\n    if (remaining_ranges_.empty()) {\n      remaining_ranges_.emplace_back(0, lookup_result_->content_length_);\n    }\n    getBody();\n  } else if (response_has_trailers_) {\n    getTrailers();\n  }\n}\n\nvoid CacheFilter::finalizeEncodingCachedResponse() {\n  if (filter_state_ == FilterState::EncodeServingFromCache) {\n    // encodeHeaders returned StopIteration waiting for finishing encoding the cached response --\n    // continue encoding.\n    encoder_callbacks_->continueEncoding();\n  }\n  filter_state_ = FilterState::ResponseServedFromCache;\n}\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/cache_filter.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/cache/v3alpha/cache.pb.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n#include \"extensions/filters/http/cache/http_cache.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\n/**\n * A filter that caches responses and attempts to satisfy requests from cache.\n */\nclass CacheFilter : public Http::PassThroughFilter,\n                    public Logger::Loggable<Logger::Id::cache_filter>,\n                    public std::enable_shared_from_this<CacheFilter> {\npublic:\n  CacheFilter(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config,\n              const std::string& stats_prefix, Stats::Scope& scope, TimeSource& time_source,\n              HttpCache& http_cache);\n  // Http::StreamFilterBase\n  void onDestroy() override;\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& buffer, bool end_stream) override;\n\nprivate:\n  // Utility functions; make any necessary checks and call the corresponding lookup_ functions\n  void getHeaders(Http::RequestHeaderMap& request_headers);\n  void getBody();\n  void getTrailers();\n\n  // Callbacks for HttpCache to call when headers/body/trailers are ready.\n  void onHeaders(LookupResult&& result, Http::RequestHeaderMap& request_headers);\n  void onBody(Buffer::InstancePtr&& body);\n  void onTrailers(Http::ResponseTrailerMapPtr&& trailers);\n\n  // Precondition: lookup_result_ points to a cache lookup result that requires validation.\n  //               filter_state_ is ValidatingCachedResponse.\n  // Serves a validated cached response after updating it with a 304 response.\n  void processSuccessfulValidation(Http::ResponseHeaderMap& response_headers);\n\n  // Precondition: lookup_result_ points to a cache lookup result that requires validation.\n  //               filter_state_ is ValidatingCachedResponse.\n  // Checks if a cached entry should be updated with a 304 response.\n  bool shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const;\n\n  // Precondition: lookup_result_ points to a cache lookup result that requires validation.\n  // Should only be called during onHeaders as it modifies RequestHeaderMap.\n  // Adds required conditional headers for cache validation to the request headers\n  // according to the present cache lookup result headers.\n  void injectValidationHeaders(Http::RequestHeaderMap& request_headers);\n\n  // Precondition: lookup_result_ points to a fresh or validated cache look up result.\n  // Adds a cache lookup result to the response encoding stream.\n  // Can be called during decoding if a valid cache hit is found,\n  // or during encoding if a cache entry was validated successfully.\n  void encodeCachedResponse();\n\n  // Precondition: finished adding a response from cache to the response encoding stream.\n  // Updates filter_state_ and continues the encoding stream if necessary.\n  void finalizeEncodingCachedResponse();\n\n  TimeSource& time_source_;\n  HttpCache& cache_;\n  LookupContextPtr lookup_;\n  InsertContextPtr insert_;\n  LookupResultPtr lookup_result_;\n\n  // Tracks what body bytes still need to be read from the cache. This is\n  // currently only one Range, but will expand when full range support is added. Initialized by\n  // onHeaders for Range Responses, otherwise initialized by encodeCachedResponse.\n  std::vector<AdjustedByteRange> remaining_ranges_;\n\n  // TODO(#12901): The allow list could be constructed only once directly from the config, instead\n  // of doing it per-request. A good example of such config is found in the gzip filter:\n  // source/extensions/filters/http/gzip/gzip_filter.h.\n  // Stores the allow list rules that decide if a header can be varied upon.\n  VaryHeader vary_allow_list_;\n\n  // True if the response has trailers.\n  // TODO(toddmgreer): cache trailers.\n  bool response_has_trailers_ = false;\n\n  // True if a request allows cache inserts according to:\n  // https://httpwg.org/specs/rfc7234.html#response.cacheability\n  bool request_allows_inserts_ = false;\n\n  enum class FilterState {\n    Initial,\n\n    // Cache lookup found a cached response that requires validation\n    ValidatingCachedResponse,\n\n    // Cache lookup found a fresh cached response and it is being added to the encoding stream.\n    DecodeServingFromCache,\n\n    // A cached response was successfully validated and it is being added to the encoding stream\n    EncodeServingFromCache,\n\n    // The cached response was successfully added to the encoding stream (either during decoding or\n    // encoding).\n    ResponseServedFromCache,\n\n    // CacheFilter::onDestroy has been called, the filter will be destroyed soon. Any triggered\n    // callbacks should be ignored.\n    Destroyed\n  };\n\n  FilterState filter_state_ = FilterState::Initial;\n};\n\nusing CacheFilterSharedPtr = std::shared_ptr<CacheFilter>;\nusing CacheFilterWeakPtr = std::weak_ptr<CacheFilter>;\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/cache_headers_utils.cc",
    "content": "#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n\n#include <array>\n#include <chrono>\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n\n#include \"extensions/filters/http/cache/inline_headers_handles.h\"\n\n#include \"absl/algorithm/container.h\"\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/str_split.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\n// Utility functions used in RequestCacheControl & ResponseCacheControl.\nnamespace {\n// A directive with an invalid duration is ignored, the RFC does not specify a behavior:\n// https://httpwg.org/specs/rfc7234.html#delta-seconds\nOptionalDuration parseDuration(absl::string_view s) {\n  OptionalDuration duration;\n  // Strip quotation marks if any.\n  if (s.size() > 1 && s.front() == '\"' && s.back() == '\"') {\n    s = s.substr(1, s.size() - 2);\n  }\n  long num;\n  if (absl::SimpleAtoi(s, &num) && num >= 0) {\n    // s is a valid string of digits representing a positive number.\n    duration = Seconds(num);\n  }\n  return duration;\n}\n\ninline std::pair<absl::string_view, absl::string_view>\nseparateDirectiveAndArgument(absl::string_view full_directive) {\n  return absl::StrSplit(absl::StripAsciiWhitespace(full_directive), absl::MaxSplits('=', 1));\n}\n} // namespace\n\n// The grammar for This Cache-Control header value should be:\n// Cache-Control   = 1#cache-directive\n// cache-directive = token [ \"=\" ( token / quoted-string ) ]\n// token           = 1*tchar\n// tchar           = \"!\" / \"#\" / \"$\" / \"%\" / \"&\" / \"'\" / \"*\" / \"+\"\n//                 / \"-\" / \".\" / \"^\" / \"_\" / \"`\" / \"|\" / \"~\" / DIGIT / ALPHA\n// quoted-string   = DQUOTE *( qdtext / quoted-pair ) DQUOTE\n// qdtext          = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text\n// obs-text        = %x80-FF\n// quoted-pair     = \"\\\" ( HTAB / SP / VCHAR / obs-text )\n// VCHAR           =  %x21-7E  ; visible (printing) characters\n\n// Multiple directives are comma separated according to:\n// https://httpwg.org/specs/rfc7234.html#collected.abnf\n\nRequestCacheControl::RequestCacheControl(absl::string_view cache_control_header) {\n  const std::vector<absl::string_view> directives = absl::StrSplit(cache_control_header, ',');\n\n  for (auto full_directive : directives) {\n    absl::string_view directive, argument;\n    std::tie(directive, argument) = separateDirectiveAndArgument(full_directive);\n\n    if (directive == \"no-cache\") {\n      must_validate_ = true;\n    } else if (directive == \"no-store\") {\n      no_store_ = true;\n    } else if (directive == \"no-transform\") {\n      no_transform_ = true;\n    } else if (directive == \"only-if-cached\") {\n      only_if_cached_ = true;\n    } else if (directive == \"max-age\") {\n      max_age_ = parseDuration(argument);\n    } else if (directive == \"min-fresh\") {\n      min_fresh_ = parseDuration(argument);\n    } else if (directive == \"max-stale\") {\n      max_stale_ = argument.empty() ? SystemTime::duration::max() : parseDuration(argument);\n    }\n  }\n}\n\nResponseCacheControl::ResponseCacheControl(absl::string_view cache_control_header) {\n  const std::vector<absl::string_view> directives = absl::StrSplit(cache_control_header, ',');\n\n  for (auto full_directive : directives) {\n    absl::string_view directive, argument;\n    std::tie(directive, argument) = separateDirectiveAndArgument(full_directive);\n\n    if (directive == \"no-cache\") {\n      // If no-cache directive has arguments they are ignored - not handled.\n      must_validate_ = true;\n    } else if (directive == \"must-revalidate\" || directive == \"proxy-revalidate\") {\n      no_stale_ = true;\n    } else if (directive == \"no-store\" || directive == \"private\") {\n      // If private directive has arguments they are ignored - not handled.\n      no_store_ = true;\n    } else if (directive == \"no-transform\") {\n      no_transform_ = true;\n    } else if (directive == \"public\") {\n      is_public_ = true;\n    } else if (directive == \"s-maxage\") {\n      max_age_ = parseDuration(argument);\n    } else if (!max_age_.has_value() && directive == \"max-age\") {\n      max_age_ = parseDuration(argument);\n    }\n  }\n}\n\nbool operator==(const RequestCacheControl& lhs, const RequestCacheControl& rhs) {\n  return (lhs.must_validate_ == rhs.must_validate_) && (lhs.no_store_ == rhs.no_store_) &&\n         (lhs.no_transform_ == rhs.no_transform_) && (lhs.only_if_cached_ == rhs.only_if_cached_) &&\n         (lhs.max_age_ == rhs.max_age_) && (lhs.min_fresh_ == rhs.min_fresh_) &&\n         (lhs.max_stale_ == rhs.max_stale_);\n}\n\nbool operator==(const ResponseCacheControl& lhs, const ResponseCacheControl& rhs) {\n  return (lhs.must_validate_ == rhs.must_validate_) && (lhs.no_store_ == rhs.no_store_) &&\n         (lhs.no_transform_ == rhs.no_transform_) && (lhs.no_stale_ == rhs.no_stale_) &&\n         (lhs.is_public_ == rhs.is_public_) && (lhs.max_age_ == rhs.max_age_);\n}\n\nSystemTime CacheHeadersUtils::httpTime(const Http::HeaderEntry* header_entry) {\n  if (!header_entry) {\n    return {};\n  }\n  absl::Time time;\n  const std::string input(header_entry->value().getStringView());\n\n  // Acceptable Date/Time Formats per:\n  // https://tools.ietf.org/html/rfc7231#section-7.1.1.1\n  //\n  // Sun, 06 Nov 1994 08:49:37 GMT    ; IMF-fixdate.\n  // Sunday, 06-Nov-94 08:49:37 GMT   ; obsolete RFC 850 format.\n  // Sun Nov  6 08:49:37 1994         ; ANSI C's asctime() format.\n  static const char* rfc7231_date_formats[] = {\"%a, %d %b %Y %H:%M:%S GMT\",\n                                               \"%A, %d-%b-%y %H:%M:%S GMT\", \"%a %b %e %H:%M:%S %Y\"};\n\n  for (const std::string& format : rfc7231_date_formats) {\n    if (absl::ParseTime(format, input, &time, nullptr)) {\n      return ToChronoTime(time);\n    }\n  }\n  return {};\n}\n\nSeconds CacheHeadersUtils::calculateAge(const Http::ResponseHeaderMap& response_headers,\n                                        const SystemTime response_time, const SystemTime now) {\n  // Age headers calculations follow: https://httpwg.org/specs/rfc7234.html#age.calculations\n  const SystemTime date_value = CacheHeadersUtils::httpTime(response_headers.Date());\n\n  long age_value;\n  const absl::string_view age_header = response_headers.getInlineValue(age_handle.handle());\n  if (!absl::SimpleAtoi(age_header, &age_value)) {\n    age_value = 0;\n  }\n\n  const SystemTime::duration apparent_age =\n      std::max(SystemTime::duration(0), response_time - date_value);\n\n  // Assumption: response_delay is negligible -> corrected_age_value = age_value.\n  const SystemTime::duration corrected_age_value = Seconds(age_value);\n  const SystemTime::duration corrected_initial_age = std::max(apparent_age, corrected_age_value);\n\n  // Calculate current_age:\n  const SystemTime::duration resident_time = now - response_time;\n  const SystemTime::duration current_age = corrected_initial_age + resident_time;\n\n  return std::chrono::duration_cast<Seconds>(current_age);\n}\n\nabsl::optional<uint64_t> CacheHeadersUtils::readAndRemoveLeadingDigits(absl::string_view& str) {\n  uint64_t val = 0;\n  uint32_t bytes_consumed = 0;\n\n  for (const char cur : str) {\n    if (!absl::ascii_isdigit(cur)) {\n      break;\n    }\n    uint64_t new_val = (val * 10) + (cur - '0');\n    if (new_val / 8 < val) {\n      // Overflow occurred\n      return absl::nullopt;\n    }\n    val = new_val;\n    ++bytes_consumed;\n  }\n\n  if (bytes_consumed) {\n    // Consume some digits\n    str.remove_prefix(bytes_consumed);\n    return val;\n  }\n  return absl::nullopt;\n}\n\nvoid CacheHeadersUtils::getAllMatchingHeaderNames(\n    const Http::HeaderMap& headers, const std::vector<Matchers::StringMatcherPtr>& ruleset,\n    absl::flat_hash_set<absl::string_view>& out) {\n  headers.iterate([&ruleset, &out](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    absl::string_view header_name = header.key().getStringView();\n    for (const auto& rule : ruleset) {\n      if (rule->match(header_name)) {\n        out.emplace(header_name);\n        break;\n      }\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n}\n\nstd::vector<std::string>\nCacheHeadersUtils::parseCommaDelimitedList(const Http::HeaderEntry* entry) {\n  if (!entry) {\n    return {};\n  }\n\n  std::vector<std::string> header_values = absl::StrSplit(entry->value().getStringView(), ',');\n  for (std::string& value : header_values) {\n    // TODO(cbdm): Might be able to improve the performance here by using StringUtil::trim to\n    // remove whitespace.\n    absl::StripAsciiWhitespace(&value);\n  }\n\n  return header_values;\n}\n\nVaryHeader::VaryHeader(\n    const Protobuf::RepeatedPtrField<envoy::type::matcher::v3::StringMatcher>& allow_list) {\n\n  for (const auto& rule : allow_list) {\n    allow_list_.emplace_back(std::make_unique<Matchers::StringMatcherImpl>(rule));\n  }\n}\n\nbool VaryHeader::isAllowed(const Http::ResponseHeaderMap& headers) const {\n  if (!VaryHeader::hasVary(headers)) {\n    return true;\n  }\n\n  std::vector<std::string> varied_headers =\n      CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary));\n\n  for (const std::string& header : varied_headers) {\n    bool valid = false;\n\n    // \"Vary: *\" should never be cached per:\n    // https://tools.ietf.org/html/rfc7231#section-7.1.4\n    if (header == \"*\") {\n      return false;\n    }\n\n    for (const auto& rule : allow_list_) {\n      if (rule->match(header)) {\n        valid = true;\n        break;\n      }\n    }\n\n    if (!valid) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\nbool VaryHeader::hasVary(const Http::ResponseHeaderMap& headers) {\n  const Http::HeaderEntry* vary_header = headers.get(Http::Headers::get().Vary);\n  return vary_header != nullptr && !vary_header->value().empty();\n}\n\nnamespace {\n// The separator characters are used to create the vary-key, and must be characters that are\n// invalid to be inside values and header names. The chosen characters are invalid per:\n// https://tools.ietf.org/html/rfc2616#section-4.2.\n\n// Used to separate the values of different headers.\nconstexpr absl::string_view header_separator = \"\\n\";\n// Used to separate multiple values of a same header.\nconstexpr absl::string_view in_value_separator = \"\\r\";\n}; // namespace\n\nstd::string VaryHeader::createVaryKey(const Http::HeaderEntry* vary_header,\n                                      const Http::RequestHeaderMap& entry_headers) {\n  if (vary_header == nullptr) {\n    return \"\";\n  }\n\n  ASSERT(vary_header->key() == \"vary\");\n\n  std::string vary_key = \"vary-key\\n\";\n\n  for (const std::string& header : CacheHeadersUtils::parseCommaDelimitedList(vary_header)) {\n    // TODO(cbdm): Can add some bucketing logic here based on header. For example, we could\n    // normalize the values for accept-language by making all of {en-CA, en-GB, en-US} into\n    // \"en\". This way we would not need to store multiple versions of the same payload, and any\n    // of those values would find the payload in the requested language. Another example would be to\n    // bucket UserAgent values into android/ios/desktop; UserAgent::initializeFromHeaders tries to\n    // do that normalization and could be used as an inspiration for some bucketing configuration.\n    // The config should enable and control the bucketing wanted.\n    std::vector<absl::string_view> header_values;\n    Http::HeaderUtility::getAllOfHeader(entry_headers, header, header_values);\n    absl::StrAppend(&vary_key, header, in_value_separator,\n                    absl::StrJoin(header_values, in_value_separator), header_separator);\n  }\n\n  return vary_key;\n}\n\nHttp::RequestHeaderMapPtr\nVaryHeader::possibleVariedHeaders(const Http::RequestHeaderMap& request_headers) const {\n  Http::RequestHeaderMapPtr possible_headers =\n      Http::createHeaderMap<Http::RequestHeaderMapImpl>({});\n\n  absl::flat_hash_set<absl::string_view> header_names;\n  CacheHeadersUtils::getAllMatchingHeaderNames(request_headers, allow_list_, header_names);\n\n  for (const absl::string_view& header : header_names) {\n    std::vector<absl::string_view> values;\n    Http::HeaderUtility::getAllOfHeader(request_headers, header, values);\n    for (const absl::string_view& value : values) {\n      possible_headers->addCopy(Http::LowerCaseString(std::string{header}), value);\n    }\n  }\n\n  return possible_headers;\n}\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/cache_headers_utils.h",
    "content": "#pragma once\n\n#include \"envoy/common/time.h\"\n#include \"envoy/extensions/filters/http/cache/v3alpha/cache.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/time.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\nusing OptionalDuration = absl::optional<SystemTime::duration>;\n\n// According to: https://httpwg.org/specs/rfc7234.html#cache-request-directive\nstruct RequestCacheControl {\n  RequestCacheControl() = default;\n  explicit RequestCacheControl(absl::string_view cache_control_header);\n\n  // must_validate is true if 'no-cache' directive is present\n  // A cached response must not be served without successful validation with the origin\n  bool must_validate_ = false;\n\n  // The response to this request must not be cached (stored)\n  bool no_store_ = false;\n\n  // 'no-transform' directive is not used now\n  // No transformations should be done to the response of this request, as defined by:\n  // https://httpwg.org/specs/rfc7230.html#message.transformations\n  bool no_transform_ = false;\n\n  // 'only-if-cached' directive is not used now\n  // The request should be satisfied using a cached response, or respond with 504 (Gateway Error)\n  bool only_if_cached_ = false;\n\n  // The client is unwilling to receive a cached response whose age exceeds the max-age\n  OptionalDuration max_age_;\n\n  // The client is unwilling to received a cached response that satisfies:\n  //   expiration_time - now < min-fresh\n  OptionalDuration min_fresh_;\n\n  // The client is willing to receive a stale response that satisfies:\n  //   now - expiration_time < max-stale\n  // If max-stale has no value then the client is willing to receive any stale response\n  OptionalDuration max_stale_;\n};\n\n// According to: https://httpwg.org/specs/rfc7234.html#cache-response-directive\nstruct ResponseCacheControl {\n  ResponseCacheControl() = default;\n  explicit ResponseCacheControl(absl::string_view cache_control_header);\n\n  // must_validate is true if 'no-cache' directive is present; arguments are ignored for now\n  // This response must not be used to satisfy subsequent requests without successful validation\n  // with the origin\n  bool must_validate_ = false;\n\n  // no_store is true if any of 'no-store' or 'private' directives is present.\n  // 'private' arguments are ignored for now so it is equivalent to 'no-store'\n  // This response must not be cached (stored)\n  bool no_store_ = false;\n\n  // 'no-transform' directive is not used now\n  // No transformations should be done to this response , as defined by:\n  // https://httpwg.org/specs/rfc7230.html#message.transformations\n  bool no_transform_ = false;\n\n  // no_stale is true if any of 'must-revalidate' or 'proxy-revalidate' directives is present\n  // This response must not be served stale without successful validation with the origin\n  bool no_stale_ = false;\n\n  // 'public' directive is not used now\n  // This response may be stored, even if the response would normally be non-cacheable or cacheable\n  // only within a private cache, see:\n  // https://httpwg.org/specs/rfc7234.html#cache-response-directive.public\n  bool is_public_ = false;\n\n  // max_age is set if to 's-maxage' if present, if not it is set to 'max-age' if present.\n  // Indicates the maximum time after which this response will be considered stale\n  OptionalDuration max_age_;\n};\n\nbool operator==(const RequestCacheControl& lhs, const RequestCacheControl& rhs);\nbool operator==(const ResponseCacheControl& lhs, const ResponseCacheControl& rhs);\n\nclass CacheHeadersUtils {\npublic:\n  // Parses header_entry as an HTTP time. Returns SystemTime() if\n  // header_entry is null or malformed.\n  static SystemTime httpTime(const Http::HeaderEntry* header_entry);\n\n  // Calculates the age of a cached response\n  static Seconds calculateAge(const Http::ResponseHeaderMap& response_headers,\n                              SystemTime response_time, SystemTime now);\n\n  /**\n   * Read a leading positive decimal integer value and advance \"*str\" past the\n   * digits read. If overflow occurs, or no digits exist, return\n   * absl::nullopt without advancing \"*str\".\n   */\n  static absl::optional<uint64_t> readAndRemoveLeadingDigits(absl::string_view& str);\n\n  // Add to out all header names from the given map that match any of the given rules.\n  static void getAllMatchingHeaderNames(const Http::HeaderMap& headers,\n                                        const std::vector<Matchers::StringMatcherPtr>& ruleset,\n                                        absl::flat_hash_set<absl::string_view>& out);\n\n  // Parses the values of a comma-delimited list as defined per\n  // https://tools.ietf.org/html/rfc7230#section-7.\n  static std::vector<std::string> parseCommaDelimitedList(const Http::HeaderEntry* entry);\n};\n\nclass VaryHeader {\npublic:\n  // Checks if the headers contain a non-empty value in the Vary header.\n  static bool hasVary(const Http::ResponseHeaderMap& headers);\n\n  // Creates a single string combining the values of the varied headers from entry_headers.\n  static std::string createVaryKey(const Http::HeaderEntry* vary_header,\n                                   const Http::RequestHeaderMap& entry_headers);\n\n  // Parses the allow list from the Cache Config into the object's private allow_list_.\n  VaryHeader(const Protobuf::RepeatedPtrField<envoy::type::matcher::v3::StringMatcher>& allow_list);\n\n  // Checks if the headers contain an allowed value in the Vary header.\n  bool isAllowed(const Http::ResponseHeaderMap& headers) const;\n\n  // Returns a header map containing the subset of the original headers that can be varied from the\n  // request.\n  Http::RequestHeaderMapPtr\n  possibleVariedHeaders(const Http::RequestHeaderMap& request_headers) const;\n\nprivate:\n  // Stores the matching rules that define whether a header is allowed to be varied.\n  std::vector<Matchers::StringMatcherPtr> allow_list_;\n};\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/cacheability_utils.cc",
    "content": "#include \"extensions/filters/http/cache/cacheability_utils.h\"\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n\n#include \"extensions/filters/http/cache/inline_headers_handles.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\nnamespace {\nconst absl::flat_hash_set<absl::string_view>& cacheableStatusCodes() {\n  // As defined by:\n  // https://tools.ietf.org/html/rfc7231#section-6.1,\n  // https://tools.ietf.org/html/rfc7538#section-3,\n  // https://tools.ietf.org/html/rfc7725#section-3\n  // TODO(yosrym93): the list of cacheable status codes should be configurable.\n  CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set<absl::string_view>, \"200\", \"203\", \"204\", \"206\", \"300\",\n                         \"301\", \"308\", \"404\", \"405\", \"410\", \"414\", \"451\", \"501\");\n}\n\nconst std::vector<const Http::LowerCaseString*>& conditionalHeaders() {\n  // As defined by: https://httpwg.org/specs/rfc7232.html#preconditions.\n  CONSTRUCT_ON_FIRST_USE(\n      std::vector<const Http::LowerCaseString*>, &Http::CustomHeaders::get().IfMatch,\n      &Http::CustomHeaders::get().IfNoneMatch, &Http::CustomHeaders::get().IfModifiedSince,\n      &Http::CustomHeaders::get().IfUnmodifiedSince, &Http::CustomHeaders::get().IfRange);\n}\n} // namespace\n\nbool CacheabilityUtils::isCacheableRequest(const Http::RequestHeaderMap& headers) {\n  const absl::string_view method = headers.getMethodValue();\n  const absl::string_view forwarded_proto = headers.getForwardedProtoValue();\n  const Http::HeaderValues& header_values = Http::Headers::get();\n\n  // Check if the request contains any conditional headers.\n  // For now, requests with conditional headers bypass the CacheFilter.\n  // This behavior does not cause any incorrect results, but may reduce the cache effectiveness.\n  // If needed to be handled properly refer to:\n  // https://httpwg.org/specs/rfc7234.html#validation.received\n  for (auto conditional_header : conditionalHeaders()) {\n    if (headers.get(*conditional_header)) {\n      return false;\n    }\n  }\n\n  // TODO(toddmgreer): Also serve HEAD requests from cache.\n  // Cache-related headers are checked in HttpCache::LookupRequest.\n  return headers.Path() && headers.Host() && !headers.getInline(authorization_handle.handle()) &&\n         (method == header_values.MethodValues.Get) &&\n         (forwarded_proto == header_values.SchemeValues.Http ||\n          forwarded_proto == header_values.SchemeValues.Https);\n}\n\nbool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers,\n                                            const VaryHeader& vary_allow_list) {\n  absl::string_view cache_control = headers.getInlineValue(response_cache_control_handle.handle());\n  ResponseCacheControl response_cache_control(cache_control);\n\n  // Only cache responses with enough data to calculate freshness lifetime as per:\n  // https://httpwg.org/specs/rfc7234.html#calculating.freshness.lifetime.\n  // Either:\n  //    \"no-cache\" cache-control directive (requires revalidation anyway).\n  //    \"max-age\" or \"s-maxage\" cache-control directives.\n  //    Both \"Expires\" and \"Date\" headers.\n  const bool has_validation_data = response_cache_control.must_validate_ ||\n                                   response_cache_control.max_age_.has_value() ||\n                                   (headers.Date() && headers.getInline(expires_handle.handle()));\n\n  return !response_cache_control.no_store_ &&\n         cacheableStatusCodes().contains((headers.getStatusValue())) && has_validation_data &&\n         vary_allow_list.isAllowed(headers);\n}\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/cacheability_utils.h",
    "content": "#pragma once\n\n#include \"common/common/utility.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nclass CacheabilityUtils {\npublic:\n  // Checks if a request can be served from cache.\n  // This does not depend on cache-control headers as\n  // request cache-control headers only decide whether\n  // validation is required and whether the response can be cached.\n  static bool isCacheableRequest(const Http::RequestHeaderMap& headers);\n\n  // Checks if a response can be stored in cache.\n  // Note that if a request is not cacheable according to 'isCacheableRequest'\n  // then its response is also not cacheable.\n  // Therefore, isCacheableRequest, isCacheableResponse and CacheFilter::request_allows_inserts_\n  // together should cover https://httpwg.org/specs/rfc7234.html#response.cacheability.\n  static bool isCacheableResponse(const Http::ResponseHeaderMap& headers,\n                                  const VaryHeader& vary_allow_list);\n};\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/config.cc",
    "content": "#include \"extensions/filters/http/cache/config.h\"\n\n#include \"extensions/filters/http/cache/cache_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\nHttp::FilterFactoryCb CacheFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  const std::string type{TypeUtil::typeUrlToDescriptorFullName(config.typed_config().type_url())};\n  HttpCacheFactory* const http_cache_factory =\n      Registry::FactoryRegistry<HttpCacheFactory>::getFactoryByType(type);\n  if (http_cache_factory == nullptr) {\n    throw EnvoyException(\n        fmt::format(\"Didn't find a registered implementation for type: '{}'\", type));\n  }\n\n  return [config, stats_prefix, &context,\n          http_cache_factory](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<CacheFilter>(config, stats_prefix, context.scope(),\n                                                            context.timeSource(),\n                                                            http_cache_factory->getCache(config)));\n  };\n}\n\nREGISTER_FACTORY(CacheFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/cache/v3alpha/cache.pb.h\"\n#include \"envoy/extensions/filters/http/cache/v3alpha/cache.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\nclass CacheFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::cache::v3alpha::CacheConfig> {\npublic:\n  CacheFilterFactory() : FactoryBase(HttpFilterNames::get().Cache) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/http_cache.cc",
    "content": "#include \"extensions/filters/http/cache/http_cache.h\"\n\n#include <algorithm>\n#include <ostream>\n#include <vector>\n\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n#include \"extensions/filters/http/cache/inline_headers_handles.h\"\n\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/time.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\nLookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp,\n                             const VaryHeader& vary_allow_list)\n    : timestamp_(timestamp) {\n  // These ASSERTs check prerequisites. A request without these headers can't be looked up in cache;\n  // CacheFilter doesn't create LookupRequests for such requests.\n  ASSERT(request_headers.Path(), \"Can't form cache lookup key for malformed Http::RequestHeaderMap \"\n                                 \"with null Path.\");\n  ASSERT(\n      request_headers.ForwardedProto(),\n      \"Can't form cache lookup key for malformed Http::RequestHeaderMap with null ForwardedProto.\");\n  ASSERT(request_headers.Host(), \"Can't form cache lookup key for malformed Http::RequestHeaderMap \"\n                                 \"with null Host.\");\n  const Http::HeaderString& forwarded_proto = request_headers.ForwardedProto()->value();\n  const auto& scheme_values = Http::Headers::get().SchemeValues;\n  ASSERT(forwarded_proto == scheme_values.Http || forwarded_proto == scheme_values.Https);\n\n  initializeRequestCacheControl(request_headers);\n  // TODO(toddmgreer): Let config determine whether to include forwarded_proto, host, and\n  // query params.\n  // TODO(toddmgreer): get cluster name.\n  if (request_headers.getMethodValue() == Http::Headers::get().MethodValues.Get) {\n    // TODO(cbdm): using a constant limit of 1 range since we don't support multi-part responses nor\n    // coalesce multiple overlapping ranges. Could make this into a parameter based on config.\n    const int RangeSpecifierLimit = 1;\n    request_range_spec_ = RangeRequests::parseRanges(request_headers, RangeSpecifierLimit);\n  }\n  key_.set_cluster_name(\"cluster_name_goes_here\");\n  key_.set_host(std::string(request_headers.getHostValue()));\n  key_.set_path(std::string(request_headers.getPathValue()));\n  key_.set_clear_http(forwarded_proto == scheme_values.Http);\n\n  vary_headers_ = vary_allow_list.possibleVariedHeaders(request_headers);\n}\n\n// Unless this API is still alpha, calls to stableHashKey() must always return\n// the same result, or a way must be provided to deal with a complete cache\n// flush. localHashKey however, can be changed at will.\nsize_t stableHashKey(const Key& key) { return MessageUtil::hash(key); }\nsize_t localHashKey(const Key& key) { return stableHashKey(key); }\n\nvoid LookupRequest::initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers) {\n  const absl::string_view cache_control =\n      request_headers.getInlineValue(request_cache_control_handle.handle());\n  const absl::string_view pragma = request_headers.getInlineValue(pragma_handle.handle());\n\n  if (!cache_control.empty()) {\n    request_cache_control_ = RequestCacheControl(cache_control);\n  } else {\n    // According to: https://httpwg.org/specs/rfc7234.html#header.pragma,\n    // when Cache-Control header is missing, \"Pragma:no-cache\" is equivalent to\n    // \"Cache-Control:no-cache\". Any other directives are ignored.\n    request_cache_control_.must_validate_ = RequestCacheControl(pragma).must_validate_;\n  }\n}\n\nbool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_headers,\n                                       SystemTime::duration response_age) const {\n  // TODO(yosrym93): Store parsed response cache-control in cache instead of parsing it on every\n  // lookup.\n  const absl::string_view cache_control =\n      response_headers.getInlineValue(response_cache_control_handle.handle());\n  const ResponseCacheControl response_cache_control(cache_control);\n\n  const bool request_max_age_exceeded = request_cache_control_.max_age_.has_value() &&\n                                        request_cache_control_.max_age_.value() < response_age;\n  if (response_cache_control.must_validate_ || request_cache_control_.must_validate_ ||\n      request_max_age_exceeded) {\n    // Either the request or response explicitly require validation, or a request max-age\n    // requirement is not satisfied.\n    return true;\n  }\n\n  // CacheabilityUtils::isCacheableResponse(..) guarantees that any cached response satisfies this.\n  ASSERT(response_cache_control.max_age_.has_value() ||\n             (response_headers.getInline(expires_handle.handle()) && response_headers.Date()),\n         \"Cache entry does not have valid expiration data.\");\n\n  SystemTime::duration freshness_lifetime;\n  if (response_cache_control.max_age_.has_value()) {\n    freshness_lifetime = response_cache_control.max_age_.value();\n  } else {\n    const SystemTime expires_value =\n        CacheHeadersUtils::httpTime(response_headers.getInline(expires_handle.handle()));\n    const SystemTime date_value = CacheHeadersUtils::httpTime(response_headers.Date());\n    freshness_lifetime = expires_value - date_value;\n  }\n\n  if (response_age > freshness_lifetime) {\n    // Response is stale, requires validation if\n    // the response does not allow being served stale,\n    // or the request max-stale directive does not allow it.\n    const bool allowed_by_max_stale =\n        request_cache_control_.max_stale_.has_value() &&\n        request_cache_control_.max_stale_.value() > response_age - freshness_lifetime;\n    return response_cache_control.no_stale_ || !allowed_by_max_stale;\n  } else {\n    // Response is fresh, requires validation only if there is an unsatisfied min-fresh requirement.\n    const bool min_fresh_unsatisfied =\n        request_cache_control_.min_fresh_.has_value() &&\n        request_cache_control_.min_fresh_.value() > freshness_lifetime - response_age;\n    return min_fresh_unsatisfied;\n  }\n}\n\nLookupResult LookupRequest::makeLookupResult(Http::ResponseHeaderMapPtr&& response_headers,\n                                             ResponseMetadata&& metadata,\n                                             uint64_t content_length) const {\n  // TODO(toddmgreer): Implement all HTTP caching semantics.\n  ASSERT(response_headers);\n  LookupResult result;\n\n  // Assumption: Cache lookup time is negligible. Therefore, now == timestamp_\n  const Seconds age =\n      CacheHeadersUtils::calculateAge(*response_headers, metadata.response_time_, timestamp_);\n  response_headers->setInline(age_handle.handle(), std::to_string(age.count()));\n\n  result.cache_entry_status_ = requiresValidation(*response_headers, age)\n                                   ? CacheEntryStatus::RequiresValidation\n                                   : CacheEntryStatus::Ok;\n  result.headers_ = std::move(response_headers);\n  result.content_length_ = content_length;\n  if (!adjustByteRangeSet(result.response_ranges_, request_range_spec_, content_length)) {\n    result.cache_entry_status_ = CacheEntryStatus::NotSatisfiableRange;\n  } else if (!result.response_ranges_.empty()) {\n    result.cache_entry_status_ = CacheEntryStatus::SatisfiableRange;\n  }\n  result.has_trailers_ = false;\n  return result;\n}\n\nbool adjustByteRangeSet(std::vector<AdjustedByteRange>& response_ranges,\n                        const std::vector<RawByteRange>& request_range_spec,\n                        uint64_t content_length) {\n  if (request_range_spec.empty()) {\n    // No range header, so the request can proceed.\n    return true;\n  }\n\n  if (content_length == 0) {\n    // There is a range header, but it's unsatisfiable.\n    return false;\n  }\n\n  for (const RawByteRange& spec : request_range_spec) {\n    if (spec.isSuffix()) {\n      // spec is a suffix-byte-range-spec.\n      if (spec.suffixLength() == 0) {\n        // This range is unsatisfiable, so skip it.\n        continue;\n      }\n      if (spec.suffixLength() >= content_length) {\n        // All bytes are being requested, so we may as well send a '200\n        // OK' response.\n        response_ranges.clear();\n        return true;\n      }\n      response_ranges.emplace_back(content_length - spec.suffixLength(), content_length);\n    } else {\n      // spec is a byte-range-spec\n      if (spec.firstBytePos() >= content_length) {\n        // This range is unsatisfiable, so skip it.\n        continue;\n      }\n      if (spec.lastBytePos() >= content_length - 1) {\n        if (spec.firstBytePos() == 0) {\n          // All bytes are being requested, so we may as well send a '200\n          // OK' response.\n          response_ranges.clear();\n          return true;\n        }\n        response_ranges.emplace_back(spec.firstBytePos(), content_length);\n      } else {\n        response_ranges.emplace_back(spec.firstBytePos(), spec.lastBytePos() + 1);\n      }\n    }\n  }\n  if (response_ranges.empty()) {\n    // All ranges were unsatisfiable.\n    return false;\n  }\n  return true;\n}\n\nstd::vector<RawByteRange> RangeRequests::parseRanges(const Http::RequestHeaderMap& request_headers,\n                                                     uint64_t max_byte_range_specs) {\n  // Makes sure we have a GET request, as Range headers are only valid with this type of request.\n  const absl::string_view method = request_headers.getMethodValue();\n  ASSERT(method == Http::Headers::get().MethodValues.Get);\n\n  // Multiple instances of range headers are invalid.\n  // https://tools.ietf.org/html/rfc7230#section-3.2.2\n  std::vector<absl::string_view> range_headers;\n  Http::HeaderUtility::getAllOfHeader(request_headers, Http::Headers::get().Range.get(),\n                                      range_headers);\n\n  absl::string_view header_value;\n  if (range_headers.size() == 1) {\n    header_value = range_headers.front();\n  } else {\n    if (range_headers.size() > 1) {\n      ENVOY_LOG(debug, \"Multiple range headers provided in request. Ignoring all range headers.\");\n    }\n    return {};\n  }\n\n  if (!absl::ConsumePrefix(&header_value, \"bytes=\")) {\n    ENVOY_LOG(debug, \"Invalid range header. range-unit not correctly specified, only 'bytes' \"\n                     \"supported. Ignoring range header.\");\n    return {};\n  }\n\n  std::vector<absl::string_view> ranges =\n      absl::StrSplit(header_value, absl::MaxSplits(',', max_byte_range_specs));\n  if (ranges.size() > max_byte_range_specs) {\n    ENVOY_LOG(debug,\n              \"There are more ranges than allowed by the byte range parse limit ({}). Ignoring \"\n              \"range header.\",\n              max_byte_range_specs);\n    return {};\n  }\n\n  std::vector<RawByteRange> parsed_ranges;\n  for (absl::string_view cur_range : ranges) {\n    absl::optional<uint64_t> first = CacheHeadersUtils::readAndRemoveLeadingDigits(cur_range);\n\n    if (!absl::ConsumePrefix(&cur_range, \"-\")) {\n      ENVOY_LOG(debug,\n                \"Invalid format for range header: missing range-end. Ignoring range header.\");\n      return {};\n    }\n\n    absl::optional<uint64_t> last = CacheHeadersUtils::readAndRemoveLeadingDigits(cur_range);\n\n    if (!cur_range.empty()) {\n      ENVOY_LOG(debug,\n                \"Unexpected characters after byte range in range header. Ignoring range header.\");\n      return {};\n    }\n\n    if (!first && !last) {\n      ENVOY_LOG(debug, \"Invalid format for range header: missing first-byte-pos AND last-byte-pos; \"\n                       \"at least one of them is required. Ignoring range header.\");\n      return {};\n    }\n\n    // Handle suffix range (e.g., -123).\n    if (!first) {\n      first = std::numeric_limits<uint64_t>::max();\n    }\n\n    // Handle optional range-end (e.g., 123-).\n    if (!last) {\n      last = std::numeric_limits<uint64_t>::max();\n    }\n\n    if (first != std::numeric_limits<uint64_t>::max() && first > last) {\n      ENVOY_LOG(debug, \"Invalid format for range header: range-start and range-end out of order. \"\n                       \"Ignoring range header.\");\n      return {};\n    }\n\n    parsed_ranges.push_back(RawByteRange(first.value(), last.value()));\n  }\n\n  return parsed_ranges;\n}\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/http_cache.h",
    "content": "#pragma once\n\n#include <iosfwd>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/extensions/filters/http/cache/v3alpha/cache.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"source/extensions/filters/http/cache/key.pb.h\"\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n// Whether a given cache entry is good for the current request.\nenum class CacheEntryStatus {\n  // This entry is fresh, and an appropriate response to the request.\n  Ok,\n  // No usable entry was found. If this was generated for a cache entry, the\n  // cache should delete that entry.\n  Unusable,\n  // This entry is stale, but appropriate for validating\n  RequiresValidation,\n  // This entry is fresh, and an appropriate basis for a 304 Not Modified\n  // response.\n  FoundNotModified,\n  // This entry is fresh, but cannot satisfy the requested range(s).\n  NotSatisfiableRange,\n  // This entry is fresh, and can satisfy the requested range(s).\n  SatisfiableRange,\n};\n\n// Byte range from an HTTP request.\nclass RawByteRange {\npublic:\n  // - If first==UINT64_MAX, construct a RawByteRange requesting the final last body bytes.\n  // - Otherwise, construct a RawByteRange requesting the [first,last] body bytes.\n  // Prereq: first == UINT64_MAX || first <= last\n  // Invariant: isSuffix() || firstBytePos() <= lastBytePos\n  // Examples: RawByteRange(0,4) requests the first 5 bytes.\n  //           RawByteRange(UINT64_MAX,4) requests the last 4 bytes.\n  RawByteRange(uint64_t first, uint64_t last) : first_byte_pos_(first), last_byte_pos_(last) {\n    ASSERT(isSuffix() || first <= last, \"Illegal byte range.\");\n  }\n  bool isSuffix() const { return first_byte_pos_ == UINT64_MAX; }\n  uint64_t firstBytePos() const {\n    ASSERT(!isSuffix());\n    return first_byte_pos_;\n  }\n  uint64_t lastBytePos() const {\n    ASSERT(!isSuffix());\n    return last_byte_pos_;\n  }\n  uint64_t suffixLength() const {\n    ASSERT(isSuffix());\n    return last_byte_pos_;\n  }\n\nprivate:\n  const uint64_t first_byte_pos_;\n  const uint64_t last_byte_pos_;\n};\n\nclass RangeRequests : Logger::Loggable<Logger::Id::cache_filter> {\npublic:\n  // Parses the ranges from the request headers into a vector<RawByteRange>.\n  // max_byte_range_specs defines how many byte ranges can be parsed from the header value.\n  // If there is no range header, multiple range headers, the header value is malformed, or there\n  // are more ranges than max_byte_range_specs, returns an empty vector.\n  static std::vector<RawByteRange> parseRanges(const Http::RequestHeaderMap& request_headers,\n                                               uint64_t max_byte_range_specs);\n};\n\n// Byte range from an HTTP request, adjusted for a known response body size, and converted from an\n// HTTP-style closed interval to a C++ style half-open interval.\nclass AdjustedByteRange {\npublic:\n  // Construct an AdjustedByteRange representing the [first,last) bytes in the\n  // response body. Prereq: first <= last Invariant: begin() <= end()\n  // Example: AdjustedByteRange(0,4) represents the first 4 bytes.\n  AdjustedByteRange(uint64_t first, uint64_t last) : first_(first), last_(last) {\n    ASSERT(first < last, \"Illegal byte range.\");\n  }\n  uint64_t begin() const { return first_; }\n  // Unlike RawByteRange, end() is one past the index of the last offset.\n  uint64_t end() const { return last_; }\n  uint64_t length() const { return last_ - first_; }\n  void trimFront(uint64_t n) {\n    ASSERT(n <= length(), \"Attempt to trim too much from range.\");\n    first_ += n;\n  }\n\nprivate:\n  uint64_t first_;\n  uint64_t last_;\n};\n\ninline bool operator==(const AdjustedByteRange& lhs, const AdjustedByteRange& rhs) {\n  return lhs.begin() == rhs.begin() && lhs.end() == rhs.end();\n}\n\n// Adjusts request_range_spec to fit a cached response of size content_length, putting the results\n// in response_ranges. Returns true if response_ranges is satisfiable (empty is considered\n// satisfiable, as it denotes the entire body).\n// TODO(toddmgreer): Merge/reorder ranges where appropriate.\nbool adjustByteRangeSet(std::vector<AdjustedByteRange>& response_ranges,\n                        const std::vector<RawByteRange>& request_range_spec,\n                        uint64_t content_length);\n\n// Result of a lookup operation, including cached headers and information needed\n// to serve a response based on it, or to attempt to validate.\nstruct LookupResult {\n  // If cache_entry_status_ == Unusable, none of the other members are\n  // meaningful.\n  CacheEntryStatus cache_entry_status_ = CacheEntryStatus::Unusable;\n\n  // Headers of the cached response.\n  Http::ResponseHeaderMapPtr headers_;\n\n  // Size of the full response body. Cache filter will generate a content-length\n  // header with this value, replacing any preexisting content-length header.\n  // (This lets us dechunk responses as we insert them, then later serve them\n  // with a content-length header.)\n  uint64_t content_length_;\n\n  // Represents the subset of the cached response body that should be served to\n  // the client. If response_ranges.empty(), the entire body should be served.\n  // Otherwise, each Range in response_ranges specifies an exact set of bytes to\n  // serve from the cached response's body. All byte positions in\n  // response_ranges must be in the range [0,content_length). Caches should\n  // ensure that they can efficiently serve these ranges, and may merge and/or\n  // reorder ranges as appropriate, or may clear() response_ranges entirely.\n  std::vector<AdjustedByteRange> response_ranges_;\n\n  // TODO(toddmgreer): Implement trailer support.\n  // True if the cached response has trailers.\n  bool has_trailers_ = false;\n\n  // Update the content length of the object and its response headers.\n  void setContentLength(uint64_t new_length) {\n    content_length_ = new_length;\n    headers_->setContentLength(new_length);\n  }\n};\nusing LookupResultPtr = std::unique_ptr<LookupResult>;\n\n// Produces a hash of key that is consistent across restarts, architectures,\n// builds, and configurations. Caches that store persistent entries based on a\n// 64-bit hash should (but are not required to) use stableHashKey. Once this API\n// leaves alpha, any improvements to stableHashKey that would change its output\n// for existing callers is a breaking change.\n//\n// For non-persistent storage, use MessageUtil, which has no long-term stability\n// guarantees.\n//\n// When providing a cached response, Caches must ensure that the keys (and not\n// just their hashes) match.\n//\n// TODO(toddmgreer): Ensure that stability guarantees above are accurate.\nsize_t stableHashKey(const Key& key);\n\n// The metadata associated with a cached response.\n// TODO(yosrym93): This could be changed to a proto if a need arises.\n// If a cache was created with the current interface, then it was changed to a proto, all the cache\n// entries will need to be invalidated.\nstruct ResponseMetadata {\n  // The time at which a response was was most recently inserted, updated, or validated in this\n  // cache. This represents \"response_time\" in the age header calculations at:\n  // https://httpwg.org/specs/rfc7234.html#age.calculations\n  SystemTime response_time_;\n};\n\n// LookupRequest holds everything about a request that's needed to look for a\n// response in a cache, to evaluate whether an entry from a cache is usable, and\n// to determine what ranges are needed.\nclass LookupRequest {\npublic:\n  // Prereq: request_headers's Path(), Scheme(), and Host() are non-null.\n  LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp,\n                const VaryHeader& vary_allow_list);\n\n  const RequestCacheControl& requestCacheControl() const { return request_cache_control_; }\n\n  // Caches may modify the key according to local needs, though care must be\n  // taken to ensure that meaningfully distinct responses have distinct keys.\n  const Key& key() const { return key_; }\n\n  // WARNING: Incomplete--do not use in production (yet).\n  // Returns a LookupResult suitable for sending to the cache filter's\n  // LookupHeadersCallback. Specifically,\n  // - LookupResult::cache_entry_status_ is set according to HTTP cache\n  // validation logic.\n  // - LookupResult::headers_ takes ownership of response_headers.\n  // - LookupResult::content_length_ == content_length.\n  // - LookupResult::response_ranges_ entries are satisfiable (as documented\n  // there).\n  LookupResult makeLookupResult(Http::ResponseHeaderMapPtr&& response_headers,\n                                ResponseMetadata&& metadata, uint64_t content_length) const;\n\n  // Warning: this should not be accessed out-of-thread!\n  const Http::RequestHeaderMap& getVaryHeaders() const { return *vary_headers_; }\n\nprivate:\n  void initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers);\n  bool requiresValidation(const Http::ResponseHeaderMap& response_headers,\n                          SystemTime::duration age) const;\n\n  Key key_;\n  std::vector<RawByteRange> request_range_spec_;\n  // Time when this LookupRequest was created (in response to an HTTP request).\n  SystemTime timestamp_;\n  // The subset of this request's headers that match one of the rules in\n  // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. If a cache\n  // storage implementation forwards lookup requests to a remote cache server that supports *vary*\n  // headers, that server may need to see these headers. For local implementations, it may be\n  // simpler to instead call makeLookupResult with each potential response.\n  Http::RequestHeaderMapPtr vary_headers_;\n\n  RequestCacheControl request_cache_control_;\n};\n\n// Statically known information about a cache.\nstruct CacheInfo {\n  absl::string_view name_;\n  bool supports_range_requests_ = false;\n};\n\nusing LookupBodyCallback = std::function<void(Buffer::InstancePtr&&)>;\nusing LookupHeadersCallback = std::function<void(LookupResult&&)>;\nusing LookupTrailersCallback = std::function<void(Http::ResponseTrailerMapPtr&&)>;\nusing InsertCallback = std::function<void(bool success_ready_for_more)>;\n\n// Manages the lifetime of an insertion.\nclass InsertContext {\npublic:\n  // Accepts response_headers for caching. Only called once.\n  virtual void insertHeaders(const Http::ResponseHeaderMap& response_headers,\n                             const ResponseMetadata& metadata, bool end_stream) PURE;\n\n  // The insertion is streamed into the cache in chunks whose size is determined\n  // by the client, but with a pace determined by the cache. To avoid streaming\n  // data into cache too fast for the cache to handle, clients should wait for\n  // the cache to call readyForNextChunk() before streaming the next chunk.\n  //\n  // The client can abort the streaming insertion by dropping the\n  // InsertContextPtr. A cache can abort the insertion by passing 'false' into\n  // ready_for_next_chunk.\n  virtual void insertBody(const Buffer::Instance& chunk, InsertCallback ready_for_next_chunk,\n                          bool end_stream) PURE;\n\n  // Inserts trailers into the cache.\n  virtual void insertTrailers(const Http::ResponseTrailerMap& trailers) PURE;\n\n  // This routine is called prior to an InsertContext being destroyed. InsertContext is responsible\n  // for making sure that any async activities are cleaned up before returning from onDestroy().\n  // This includes timers, network calls, etc. The reason there is an onDestroy() method vs. doing\n  // this type of cleanup in the destructor is to avoid potential data races between an async\n  // callback and the destructor in case the connection terminates abruptly.\n  // Example scenario with a hypothetical cache that uses RPC:\n  // 1. [Filter's thread] CacheFilter calls InsertContext::insertBody.\n  // 2. [Filter's thread] RPCInsertContext sends RPC and returns.\n  // 3. [Filter's thread] Client disconnects; Destroying stream; CacheFilter destructor begins.\n  // 4. [Filter's thread] RPCInsertContext destructor begins.\n  // 5. [Other thread] RPC completes and calls RPCInsertContext::onRPCDone.\n  // --> RPCInsertContext's destructor and onRpcDone cause a data race in RpcInsertContext.\n  // onDestroy() should cancel any outstanding async operations and, if necessary,\n  // it should block on that cancellation to avoid data races. InsertContext must not invoke any\n  // callbacks to the CacheFilter after having onDestroy() invoked.\n  virtual void onDestroy() PURE;\n\n  virtual ~InsertContext() = default;\n};\nusing InsertContextPtr = std::unique_ptr<InsertContext>;\n\n// Lookup context manages the lifetime of a lookup, helping clients to pull data\n// from the cache at a pace that works for them. At any time a client can abort\n// an in-progress lookup by simply dropping the LookupContextPtr.\nclass LookupContext {\npublic:\n  // Get the headers from the cache. It is a programming error to call this\n  // twice.\n  virtual void getHeaders(LookupHeadersCallback&& cb) PURE;\n\n  // Reads the next chunk from the cache, calling cb when the chunk is ready.\n  // The Buffer::InstancePtr passed to cb must not be null.\n  //\n  // The cache must call cb with a range of bytes starting at range.start() and\n  // ending at or before range.end(). Caller is responsible for tracking what\n  // ranges have been received, what to request next, and when to stop. A cache\n  // can report an error, and cause the response to be aborted, by calling cb\n  // with nullptr.\n  //\n  // If a cache happens to load data in chunks of a set size, it may be\n  // efficient to respond with fewer than the requested number of bytes. For\n  // example, assuming a 23 byte full-bodied response from a cache that reads in\n  // absurdly small 10 byte chunks:\n  //\n  // getBody requests bytes  0-23 .......... callback with bytes 0-9\n  // getBody requests bytes 10-23 .......... callback with bytes 10-19\n  // getBody requests bytes 20-23 .......... callback with bytes 20-23\n  virtual void getBody(const AdjustedByteRange& range, LookupBodyCallback&& cb) PURE;\n\n  // Get the trailers from the cache. Only called if LookupResult::has_trailers == true. The\n  // Http::ResponseTrailerMapPtr passed to cb must not be null.\n  virtual void getTrailers(LookupTrailersCallback&& cb) PURE;\n\n  // This routine is called prior to a LookupContext being destroyed. LookupContext is responsible\n  // for making sure that any async activities are cleaned up before returning from onDestroy().\n  // This includes timers, network calls, etc. The reason there is an onDestroy() method vs. doing\n  // this type of cleanup in the destructor is to avoid potential data races between an async\n  // callback and the destructor in case the connection terminates abruptly.\n  // Example scenario with a hypothetical cache that uses RPC:\n  // 1. [Filter's thread] CacheFilter calls LookupContext::getHeaders.\n  // 2. [Filter's thread] RPCLookupContext sends RPC and returns.\n  // 3. [Filter's thread] Client disconnects; Destroying stream; CacheFilter destructor begins.\n  // 4. [Filter's thread] RPCLookupContext destructor begins.\n  // 5. [Other thread] RPC completes and calls RPCLookupContext::onRPCDone.\n  // --> RPCLookupContext's destructor and onRpcDone cause a data race in RPCLookupContext.\n  // onDestroy() should cancel any outstanding async operations and, if necessary,\n  // it should block on that cancellation to avoid data races. InsertContext must not invoke any\n  // callbacks to the CacheFilter after having onDestroy() invoked.\n  virtual void onDestroy() PURE;\n\n  virtual ~LookupContext() = default;\n};\nusing LookupContextPtr = std::unique_ptr<LookupContext>;\n\n// Implement this interface to provide a cache implementation for use by\n// CacheFilter.\nclass HttpCache {\npublic:\n  // Returns a LookupContextPtr to manage the state of a cache lookup. On a cache\n  // miss, the returned LookupContext will be given to the insert call (if any).\n  virtual LookupContextPtr makeLookupContext(LookupRequest&& request) PURE;\n\n  // Returns an InsertContextPtr to manage the state of a cache insertion.\n  // Responses with a chunked transfer-encoding must be dechunked before\n  // insertion.\n  virtual InsertContextPtr makeInsertContext(LookupContextPtr&& lookup_context) PURE;\n\n  // Precondition: lookup_context represents a prior cache lookup that required\n  // validation.\n  //\n  // Update the headers of that cache entry to match response_headers. The cache\n  // entry's body and trailers (if any) will not be modified.\n  //\n  // This is called when an expired cache entry is successfully validated, to\n  // update the cache entry.\n  virtual void updateHeaders(const LookupContext& lookup_context,\n                             const Http::ResponseHeaderMap& response_headers,\n                             const ResponseMetadata& metadata) PURE;\n\n  // Returns statically known information about a cache.\n  virtual CacheInfo cacheInfo() const PURE;\n\n  virtual ~HttpCache() = default;\n};\n\n// Factory interface for cache implementations to implement and register.\nclass HttpCacheFactory : public Config::TypedFactory {\npublic:\n  // From UntypedFactory\n  std::string category() const override { return \"envoy.http.cache\"; }\n\n  // Returns an HttpCache that will remain valid indefinitely (at least as long\n  // as the calling CacheFilter).\n  virtual HttpCache&\n  getCache(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config) PURE;\n  ~HttpCacheFactory() override = default;\n\nprivate:\n  const std::string name_;\n};\nusing HttpCacheFactoryPtr = std::unique_ptr<HttpCacheFactory>;\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/inline_headers_handles.h",
    "content": "#pragma once\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/http/headers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\n// Request headers inline handles\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    authorization_handle(Http::CustomHeaders::get().Authorization);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    pragma_handle(Http::CustomHeaders::get().Pragma);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    request_cache_control_handle(Http::CustomHeaders::get().CacheControl);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    if_match_handle(Http::CustomHeaders::get().IfMatch);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    if_none_match_handle(Http::CustomHeaders::get().IfNoneMatch);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    if_modified_since_handle(Http::CustomHeaders::get().IfModifiedSince);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    if_unmodified_since_handle(Http::CustomHeaders::get().IfUnmodifiedSince);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    if_range_handle(Http::CustomHeaders::get().IfRange);\n\n// Response headers inline handles\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    response_cache_control_handle(Http::CustomHeaders::get().CacheControl);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    last_modified_handle(Http::CustomHeaders::get().LastModified);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    etag_handle(Http::CustomHeaders::get().Etag);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    age_handle(Http::Headers::get().Age);\n\ninline Http::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    expires_handle(Http::Headers::get().Expires);\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/key.proto",
    "content": "syntax = \"proto3\";\n\npackage Envoy.Extensions.HttpFilters.Cache;\n\n// Cache key for lookups and inserts.\nmessage Key {\n  string cluster_name = 1;\n  string host = 2;\n  string path = 3;\n  string query = 4;\n  // True for http://, false for https://.\n  bool clear_http = 5;\n  // Cache implementations can store arbitrary content in these fields; never set by cache filter.\n  repeated bytes custom_fields = 6;\n  repeated int64 custom_ints = 7;\n};\n"
  },
  {
    "path": "source/extensions/filters/http/cache/simple_http_cache/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n## WIP: Simple in-memory cache storage plugin. Not ready for deployment.\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"simple_http_cache_lib\",\n    srcs = [\"simple_http_cache.cc\"],\n    hdrs = [\"simple_http_cache.h\"],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    status = \"wip\",\n    deps = [\n        \":config_cc_proto\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/filters/http/cache:http_cache_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"config\",\n    srcs = [\"config.proto\"],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/cache/simple_http_cache/config.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.source.extensions.filters.http.cache;\n\n// [#protodoc-title: SimpleHttpCache CacheFilter storage plugin]\n// [#extension: envoy.extensions.http.cache]\n\nmessage SimpleHttpCacheConfig {\n}\n"
  },
  {
    "path": "source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc",
    "content": "#include \"extensions/filters/http/cache/simple_http_cache/simple_http_cache.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"source/extensions/filters/http/cache/simple_http_cache/config.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\nclass SimpleLookupContext : public LookupContext {\npublic:\n  SimpleLookupContext(SimpleHttpCache& cache, LookupRequest&& request)\n      : cache_(cache), request_(std::move(request)) {}\n\n  void getHeaders(LookupHeadersCallback&& cb) override {\n    auto entry = cache_.lookup(request_);\n    body_ = std::move(entry.body_);\n    cb(entry.response_headers_ ? request_.makeLookupResult(std::move(entry.response_headers_),\n                                                           std::move(entry.metadata_), body_.size())\n                               : LookupResult{});\n  }\n\n  void getBody(const AdjustedByteRange& range, LookupBodyCallback&& cb) override {\n    ASSERT(range.end() <= body_.length(), \"Attempt to read past end of body.\");\n    cb(std::make_unique<Buffer::OwnedImpl>(&body_[range.begin()], range.length()));\n  }\n\n  void getTrailers(LookupTrailersCallback&&) override {\n    // TODO(toddmgreer): Support trailers.\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  const LookupRequest& request() const { return request_; }\n  void onDestroy() override {}\n\nprivate:\n  SimpleHttpCache& cache_;\n  const LookupRequest request_;\n  std::string body_;\n};\n\nclass SimpleInsertContext : public InsertContext {\npublic:\n  SimpleInsertContext(LookupContext& lookup_context, SimpleHttpCache& cache)\n      : key_(dynamic_cast<SimpleLookupContext&>(lookup_context).request().key()),\n        entry_vary_headers_(\n            dynamic_cast<SimpleLookupContext&>(lookup_context).request().getVaryHeaders()),\n        cache_(cache) {}\n\n  void insertHeaders(const Http::ResponseHeaderMap& response_headers,\n                     const ResponseMetadata& metadata, bool end_stream) override {\n    ASSERT(!committed_);\n    response_headers_ = Http::createHeaderMap<Http::ResponseHeaderMapImpl>(response_headers);\n    metadata_ = metadata;\n    if (end_stream) {\n      commit();\n    }\n  }\n\n  void insertBody(const Buffer::Instance& chunk, InsertCallback ready_for_next_chunk,\n                  bool end_stream) override {\n    ASSERT(!committed_);\n    ASSERT(ready_for_next_chunk || end_stream);\n\n    body_.add(chunk);\n    if (end_stream) {\n      commit();\n    } else {\n      ready_for_next_chunk(true);\n    }\n  }\n\n  void insertTrailers(const Http::ResponseTrailerMap&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // TODO(toddmgreer): support trailers\n  }\n\n  void onDestroy() override {}\n\nprivate:\n  void commit() {\n    committed_ = true;\n    if (VaryHeader::hasVary(*response_headers_)) {\n      cache_.varyInsert(key_, std::move(response_headers_), std::move(metadata_), body_.toString(),\n                        entry_vary_headers_);\n    } else {\n      cache_.insert(key_, std::move(response_headers_), std::move(metadata_), body_.toString());\n    }\n  }\n\n  Key key_;\n  Http::ResponseHeaderMapPtr response_headers_;\n  ResponseMetadata metadata_;\n  const Http::RequestHeaderMap& entry_vary_headers_;\n  SimpleHttpCache& cache_;\n  Buffer::OwnedImpl body_;\n  bool committed_ = false;\n};\n} // namespace\n\nLookupContextPtr SimpleHttpCache::makeLookupContext(LookupRequest&& request) {\n  return std::make_unique<SimpleLookupContext>(*this, std::move(request));\n}\n\nvoid SimpleHttpCache::updateHeaders(const LookupContext&, const Http::ResponseHeaderMap&,\n                                    const ResponseMetadata&) {\n  // TODO(toddmgreer): Support updating headers.\n  // Not implemented yet, however this is called during tests\n  // NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\nSimpleHttpCache::Entry SimpleHttpCache::lookup(const LookupRequest& request) {\n  absl::ReaderMutexLock lock(&mutex_);\n  auto iter = map_.find(request.key());\n  if (iter == map_.end()) {\n    return Entry{};\n  }\n  ASSERT(iter->second.response_headers_);\n\n  if (VaryHeader::hasVary(*iter->second.response_headers_)) {\n    return varyLookup(request, iter->second.response_headers_);\n  } else {\n    return SimpleHttpCache::Entry{\n        Http::createHeaderMap<Http::ResponseHeaderMapImpl>(*iter->second.response_headers_),\n        iter->second.metadata_, iter->second.body_};\n  }\n}\n\nvoid SimpleHttpCache::insert(const Key& key, Http::ResponseHeaderMapPtr&& response_headers,\n                             ResponseMetadata&& metadata, std::string&& body) {\n  absl::WriterMutexLock lock(&mutex_);\n  map_[key] =\n      SimpleHttpCache::Entry{std::move(response_headers), std::move(metadata), std::move(body)};\n}\n\nSimpleHttpCache::Entry\nSimpleHttpCache::varyLookup(const LookupRequest& request,\n                            const Http::ResponseHeaderMapPtr& response_headers) {\n  // This method should be called from lookup, which holds the mutex for reading.\n  mutex_.AssertReaderHeld();\n\n  const Http::HeaderEntry* vary_header = response_headers->get(Http::Headers::get().Vary);\n  ASSERT(vary_header);\n\n  Key varied_request_key = request.key();\n  const std::string vary_key = VaryHeader::createVaryKey(vary_header, request.getVaryHeaders());\n  varied_request_key.add_custom_fields(vary_key);\n\n  auto iter = map_.find(varied_request_key);\n  if (iter == map_.end()) {\n    return SimpleHttpCache::Entry{};\n  }\n  ASSERT(iter->second.response_headers_);\n\n  return SimpleHttpCache::Entry{\n      Http::createHeaderMap<Http::ResponseHeaderMapImpl>(*iter->second.response_headers_),\n      iter->second.metadata_, iter->second.body_};\n}\n\nvoid SimpleHttpCache::varyInsert(const Key& request_key,\n                                 Http::ResponseHeaderMapPtr&& response_headers,\n                                 ResponseMetadata&& metadata, std::string&& body,\n                                 const Http::RequestHeaderMap& request_vary_headers) {\n  absl::WriterMutexLock lock(&mutex_);\n\n  const Http::HeaderEntry* vary_header = response_headers->get(Http::Headers::get().Vary);\n  ASSERT(vary_header);\n\n  // Insert the varied response.\n  Key varied_request_key = request_key;\n  const std::string vary_key = VaryHeader::createVaryKey(vary_header, request_vary_headers);\n  varied_request_key.add_custom_fields(vary_key);\n  map_[varied_request_key] =\n      SimpleHttpCache::Entry{std::move(response_headers), std::move(metadata), std::move(body)};\n\n  // Add a special entry to flag that this request generates varied responses.\n  auto iter = map_.find(request_key);\n  if (iter == map_.end()) {\n    Http::ResponseHeaderMapPtr vary_only_map =\n        Http::createHeaderMap<Http::ResponseHeaderMapImpl>({});\n    vary_only_map->setCopy(Http::Headers::get().Vary, vary_header->value().getStringView());\n    // TODO(cbdm): In a cache that evicts entries, we could maintain a list of the \"varykey\"s that\n    // we have inserted as the body for this first lookup. This way, we would know which keys we\n    // have inserted for that resource. For the first entry simply use vary_key as the entry_list,\n    // for future entries append vary_key to existing list.\n    std::string entry_list;\n    map_[request_key] = SimpleHttpCache::Entry{std::move(vary_only_map), {}, std::move(entry_list)};\n  }\n}\n\nInsertContextPtr SimpleHttpCache::makeInsertContext(LookupContextPtr&& lookup_context) {\n  ASSERT(lookup_context != nullptr);\n  return std::make_unique<SimpleInsertContext>(*lookup_context, *this);\n}\n\nconstexpr absl::string_view Name = \"envoy.extensions.http.cache.simple\";\n\nCacheInfo SimpleHttpCache::cacheInfo() const {\n  CacheInfo cache_info;\n  cache_info.name_ = Name;\n  return cache_info;\n}\n\nclass SimpleHttpCacheFactory : public HttpCacheFactory {\npublic:\n  // From UntypedFactory\n  std::string name() const override { return std::string(Name); }\n  // From TypedFactory\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::source::extensions::filters::http::cache::SimpleHttpCacheConfig>();\n  }\n  // From HttpCacheFactory\n  HttpCache&\n  getCache(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig&) override {\n    return cache_;\n  }\n\nprivate:\n  SimpleHttpCache cache_;\n};\n\nstatic Registry::RegisterFactory<SimpleHttpCacheFactory, HttpCacheFactory> register_;\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h",
    "content": "#pragma once\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/cache/http_cache.h\"\n\n#include \"absl/base/thread_annotations.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\n// Example cache backend that never evicts. Not suitable for production use.\nclass SimpleHttpCache : public HttpCache {\nprivate:\n  struct Entry {\n    Http::ResponseHeaderMapPtr response_headers_;\n    ResponseMetadata metadata_;\n    std::string body_;\n  };\n\n  // Looks for a response that has been varied. Only called from lookup.\n  Entry varyLookup(const LookupRequest& request,\n                   const Http::ResponseHeaderMapPtr& response_headers);\n\npublic:\n  // HttpCache\n  LookupContextPtr makeLookupContext(LookupRequest&& request) override;\n  InsertContextPtr makeInsertContext(LookupContextPtr&& lookup_context) override;\n  void updateHeaders(const LookupContext& lookup_context,\n                     const Http::ResponseHeaderMap& response_headers,\n                     const ResponseMetadata& metadata) override;\n  CacheInfo cacheInfo() const override;\n\n  Entry lookup(const LookupRequest& request);\n  void insert(const Key& key, Http::ResponseHeaderMapPtr&& response_headers,\n              ResponseMetadata&& metadata, std::string&& body);\n\n  // Inserts a response that has been varied on certain headers.\n  void varyInsert(const Key& request_key, Http::ResponseHeaderMapPtr&& response_headers,\n                  ResponseMetadata&& metadata, std::string&& body,\n                  const Http::RequestHeaderMap& request_vary_headers);\n\n  absl::Mutex mutex_;\n  absl::flat_hash_map<Key, Entry, MessageUtil, MessageUtil> map_ ABSL_GUARDED_BY(mutex_);\n};\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"filter_lib\",\n    srcs = [\"filter.cc\"],\n    hdrs = [\"filter.h\"],\n    deps = [\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/common:statusor_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"parser_lib\",\n    srcs = [\"parser.cc\"],\n    hdrs = [\"parser.h\"],\n    deps = [\"//source/common/common:statusor_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"utils_lib\",\n    srcs = [\"utils.cc\"],\n    hdrs = [\"utils.h\"],\n    deps = [\n        \":parser_lib\",\n        \"//source/common/common:statusor_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \":filter_lib\",\n        \":parser_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:factory_context_interface\",\n        \"//source/common/common:statusor_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/config.cc",
    "content": "#include \"extensions/filters/http/cdn_loop/config.h\"\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/factory_context.h\"\n\n#include \"common/common/statusor.h\"\n\n#include \"extensions/filters/http/cdn_loop/filter.h\"\n#include \"extensions/filters/http/cdn_loop/parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\n\nusing ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::parseCdnId;\nusing ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParseContext;\nusing ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParsedCdnId;\n\nHttp::FilterFactoryCb CdnLoopFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config,\n    const std::string& /*stats_prefix*/, Server::Configuration::FactoryContext& /*context*/) {\n  StatusOr<ParsedCdnId> context = parseCdnId(ParseContext(config.cdn_id()));\n  if (!context.ok()) {\n    throw EnvoyException(fmt::format(\"Provided cdn_id \\\"{}\\\" is not a valid CDN identifier: {}\",\n                                     config.cdn_id(), context.status()));\n  }\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(\n        std::make_shared<CdnLoopFilter>(config.cdn_id(), config.max_allowed_occurrences()));\n  };\n}\n\nREGISTER_FACTORY(CdnLoopFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h\"\n#include \"envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.validate.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/server/factory_context.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\n\nclass CdnLoopFilterFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig> {\npublic:\n  CdnLoopFilterFactory() : FactoryBase(HttpFilterNames::get().CdnLoop) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/filter.cc",
    "content": "#include \"extensions/filters/http/cdn_loop/filter.h\"\n\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/statusor.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/http/cdn_loop/utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\n\nnamespace {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    cdn_loop_handle(Http::CustomHeaders::get().CdnLoop);\n\nconstexpr absl::string_view ParseErrorMessage = \"Invalid CDN-Loop header in request.\";\nconstexpr absl::string_view ParseErrorDetails = \"invalid_cdn_loop_header\";\nconstexpr absl::string_view LoopDetectedMessage = \"The server has detected a loop between CDNs.\";\nconstexpr absl::string_view LoopDetectedDetails = \"cdn_loop_detected\";\n\n} // namespace\n\nHttp::FilterHeadersStatus CdnLoopFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                       bool /*end_stream*/) {\n\n  if (const Http::HeaderEntry* header_entry = headers.getInline(cdn_loop_handle.handle());\n      header_entry != nullptr) {\n    if (StatusOr<int> count =\n            countCdnLoopOccurrences(header_entry->value().getStringView(), cdn_id_);\n        !count) {\n      decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, ParseErrorMessage, nullptr,\n                                         absl::nullopt, ParseErrorDetails);\n      return Http::FilterHeadersStatus::StopIteration;\n    } else if (*count > max_allowed_occurrences_) {\n      decoder_callbacks_->sendLocalReply(Http::Code::BadGateway, LoopDetectedMessage, nullptr,\n                                         absl::nullopt, LoopDetectedDetails);\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n  }\n\n  headers.appendCopy(Http::CustomHeaders::get().CdnLoop, cdn_id_);\n  return Http::FilterHeadersStatus::Continue;\n}\n\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/filter.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\n\nclass CdnLoopFilter : public Http::PassThroughDecoderFilter {\npublic:\n  CdnLoopFilter(std::string cdn_id, int max_allowed_occurrences)\n      : cdn_id_(std::move(cdn_id)), max_allowed_occurrences_(max_allowed_occurrences) {}\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n\nprivate:\n  const std::string cdn_id_;\n  const int max_allowed_occurrences_;\n};\n\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/parser.cc",
    "content": "#include \"extensions/filters/http/cdn_loop/parser.h\"\n\n#include \"common/common/statusor.h\"\n\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\nnamespace Parser {\n\nnamespace {\n\n// RFC 5234 Appendix B.1 says:\n//\n// ALPHA          =  %x41-5A / %x61-7A   ; A-Z / a-z\nconstexpr bool isAlpha(char c) {\n  return ('\\x41' <= c && c <= '\\x5a') || ('\\x61' <= c && c <= '\\x7a');\n}\n\n// RFC 5234 Appendix B.1 says:\n//\n// DIGIT          =  %x30-39 ; 0-9\nconstexpr bool isDigit(char c) { return '\\x30' <= c && c <= '\\x39'; }\n\n// RFC 2234 Section 6.1 defines HEXDIG as:\n//\n// HEXDIG         =  DIGIT / \"A\" / \"B\" / \"C\" / \"D\" / \"E\" / \"F\"\n//\n// This rule allows lower case letters too in violation of the RFC since IPv6\n// addresses commonly contain lower-case hex digits.\nconstexpr bool isHexDigitCaseInsensitive(char c) {\n  return isDigit(c) || ('A' <= c && c <= 'F') || ('a' <= c && c <= 'f');\n}\n\n// RFC 7230 Section 3.2.6 defines obs-text as:\n//\n// obs-text       = %x80-FF\nconstexpr bool isObsText(char c) { return 0x80 & c; }\n\n// RFC 7230 Section 3.2.6 defines qdtext as:\n//\n// qdtext         = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text\nconstexpr bool isQdText(char c) {\n  return c == '\\t' || c == ' ' || c == '\\x21' || ('\\x23' <= c && c <= '\\x5B') ||\n         ('\\x5D' <= c && c <= '\\x7E') || isObsText(c);\n}\n\n// RFC 5234 Appendix B.1 says:\n//\n// VCHAR          =  %x21-7E\n//                        ; visible (printing) characters\nconstexpr bool isVChar(char c) { return '\\x21' <= c && c <= '\\x7e'; }\n\n} // namespace\n\nParseContext skipOptionalWhitespace(const ParseContext& input) {\n  ParseContext context = input;\n  while (!context.atEnd()) {\n    const char c = context.peek();\n    if (!(c == ' ' || c == '\\t')) {\n      break;\n    }\n    context.increment();\n  }\n  return context;\n}\n\nStatusOr<ParseContext> parseQuotedPair(const ParseContext& input) {\n  ParseContext context = input;\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(\n        absl::StrFormat(\"expected backslash at position %d; found end-of-input\", context.next()));\n  }\n\n  if (context.peek() != '\\\\') {\n    return absl::InvalidArgumentError(absl::StrFormat(\n        \"expected backslash at position %d; found '%c'\", input.next(), context.peek()));\n  }\n  context.increment();\n\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(absl::StrFormat(\n        \"expected escaped character at position %d; found end-of-input\", context.next()));\n  }\n\n  const char c = context.peek();\n  if (!(c == '\\t' || c == ' ' || isVChar(c) || isObsText(c))) {\n    return absl::InvalidArgumentError(\n        absl::StrFormat(\"expected escapable character at position %d; found '\\\\x%x'\", input.next(),\n                        context.peek()));\n  }\n  context.increment();\n\n  return context;\n}\n\nStatusOr<ParseContext> parseQuotedString(const ParseContext& input) {\n  ParseContext context = input;\n\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(absl::StrFormat(\n        \"expected opening '\\\"' at position %d; found end-of-input\", context.next()));\n  }\n\n  if (context.peek() != '\"') {\n    return absl::InvalidArgumentError(absl::StrFormat(\n        \"expected opening quote at position %d; found '%c'\", context.next(), context.peek()));\n  }\n  context.increment();\n\n  while (!context.atEnd() && context.peek() != '\"') {\n    if (isQdText(context.peek())) {\n      context.increment();\n      continue;\n    } else if (context.peek() == '\\\\') {\n      if (StatusOr<ParseContext> quoted_pair_context = parseQuotedPair(context);\n          !quoted_pair_context) {\n        return quoted_pair_context.status();\n      } else {\n        context.setNext(*quoted_pair_context);\n        continue;\n      }\n    } else {\n      break;\n    }\n  }\n\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(absl::StrFormat(\n        \"expected closing quote at position %d; found end-of-input\", context.next()));\n  }\n\n  if (context.peek() != '\"') {\n    return absl::InvalidArgumentError(absl::StrFormat(\n        \"expected closing quote at position %d; found '%c'\", input.next(), context.peek()));\n  }\n  context.increment();\n\n  return context;\n}\n\nStatusOr<ParseContext> parseToken(const ParseContext& input) {\n  ParseContext context = input;\n  while (!context.atEnd()) {\n    const char c = context.peek();\n    // Put alphanumeric, -, and _ characters at the head of the list since\n    // they're likely to be used most often.\n    if (isAlpha(c) || isDigit(c) || c == '-' || c == '_' || c == '!' || c == '#' || c == '$' ||\n        c == '%' || c == '&' || c == '\\'' || c == '*' || c == '+' || c == '.' || c == '^' ||\n        c == '`' || c == '|' || c == '~') {\n      context.increment();\n    } else {\n      break;\n    }\n  }\n  if (context.next() == input.next()) {\n    if (context.atEnd()) {\n      return absl::InvalidArgumentError(absl::StrFormat(\n          \"expected token starting at position %d; found end of input\", input.next()));\n    } else {\n      return absl::InvalidArgumentError(absl::StrFormat(\n          \"expected token starting at position %d; found '%c'\", input.next(), context.peek()));\n    }\n  }\n\n  return context;\n}\n\nStatusOr<ParseContext> parsePlausibleIpV6(const ParseContext& input) {\n  ParseContext context = input;\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(absl::StrFormat(\n        \"expected IPv6 literal at position %d; found end-of-input\", context.next()));\n  }\n\n  if (context.peek() != '[') {\n    return absl::InvalidArgumentError(absl::StrFormat(\"expected opening '[' of IPv6 literal at \"\n                                                      \"position %d; found '%c'\",\n                                                      context.next(), context.peek()));\n  }\n  context.increment();\n\n  while (true) {\n    if (context.atEnd()) {\n      break;\n    }\n    const char c = context.peek();\n    if (!(isHexDigitCaseInsensitive(c) || c == ':' || c == '.')) {\n      break;\n    }\n    context.increment();\n  }\n\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(\n        absl::StrFormat(\"expected closing ']' of IPv6 literal at position %d \"\n                        \"found end-of-input\",\n                        context.next()));\n  }\n  if (context.peek() != ']') {\n    return absl::InvalidArgumentError(absl::StrFormat(\"expected closing ']' of IPv6 literal at \"\n                                                      \"position %d; found '%c'\",\n                                                      context.next(), context.peek()));\n  }\n  context.increment();\n\n  return context;\n}\n\nStatusOr<ParsedCdnId> parseCdnId(const ParseContext& input) {\n  ParseContext context = input;\n\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(\n        absl::StrFormat(\"expected cdn-id at position %d; found end-of-input\", context.next()));\n  }\n\n  // Optimization: dispatch on the next character to avoid the StrFormat in the\n  // error path of an IPv6 parser when the value has a token (and vice versa).\n  if (context.peek() == '[') {\n    if (StatusOr<ParseContext> ipv6 = parsePlausibleIpV6(context); !ipv6) {\n      return ipv6.status();\n    } else {\n      context.setNext(*ipv6);\n    }\n  } else {\n    if (StatusOr<ParseContext> token = parseToken(context); !token) {\n      return token.status();\n    } else {\n      context.setNext(*token);\n    }\n  }\n\n  if (context.atEnd()) {\n    return ParsedCdnId(context,\n                       context.value().substr(input.next(), context.next() - input.next()));\n  }\n\n  if (context.peek() != ':') {\n    return ParsedCdnId(context,\n                       context.value().substr(input.next(), context.next() - input.next()));\n  }\n  context.increment();\n\n  while (!context.atEnd()) {\n    if (isDigit(context.value()[context.next()])) {\n      context.increment();\n    } else {\n      break;\n    }\n  }\n\n  return ParsedCdnId(context, context.value().substr(input.next(), context.next() - input.next()));\n}\n\nStatusOr<ParseContext> parseParameter(const ParseContext& input) {\n  ParseContext context = input;\n\n  if (StatusOr<ParseContext> parsed_token = parseToken(context); !parsed_token) {\n    return parsed_token.status();\n  } else {\n    context.setNext(*parsed_token);\n  }\n\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(\n        absl::StrFormat(\"expected '=' at position %d; found end-of-input\", context.next()));\n  }\n\n  if (context.peek() != '=') {\n    return absl::InvalidArgumentError(\n        absl::StrFormat(\"expected '=' at position %d; found '%c'\", context.next(), context.peek()));\n  }\n  context.increment();\n\n  if (context.atEnd()) {\n    return absl::InvalidArgumentError(absl::StrCat(\n        \"expected token or quoted-string at position %d; found end-of-input\", context.next()));\n  }\n\n  // Optimization: dispatch on the next character to avoid the StrFormat in the\n  // error path of an quoted string parser when the next item is a token (and\n  // vice versa).\n  if (context.peek() == '\"') {\n    if (StatusOr<ParseContext> value_quote = parseQuotedString(context); !value_quote) {\n      return value_quote.status();\n    } else {\n      return *value_quote;\n    }\n  } else {\n    if (StatusOr<ParseContext> value_token = parseToken(context); !value_token) {\n      return value_token.status();\n    } else {\n      return *value_token;\n    }\n  }\n}\n\nStatusOr<ParsedCdnInfo> parseCdnInfo(const ParseContext& input) {\n  absl::string_view cdn_id;\n  ParseContext context = input;\n  if (StatusOr<ParsedCdnId> parsed_id = parseCdnId(input); !parsed_id) {\n    return parsed_id.status();\n  } else {\n    context.setNext(parsed_id->context());\n    cdn_id = parsed_id->cdnId();\n  }\n\n  context.setNext(skipOptionalWhitespace(context));\n\n  while (!context.atEnd()) {\n    if (context.peek() != ';') {\n      break;\n    }\n    context.increment();\n\n    context.setNext(skipOptionalWhitespace(context));\n\n    if (StatusOr<ParseContext> parameter = parseParameter(context); !parameter) {\n      return parameter.status();\n    } else {\n      context.setNext(*parameter);\n    }\n\n    context.setNext(skipOptionalWhitespace(context));\n  }\n\n  return ParsedCdnInfo(context, cdn_id);\n}\n\nStatusOr<ParsedCdnInfoList> parseCdnInfoList(const ParseContext& input) {\n  std::vector<absl::string_view> cdn_infos;\n  ParseContext context = input;\n\n  context.setNext(skipOptionalWhitespace(context));\n\n  while (!context.atEnd()) {\n    // Loop invariant: we're always at the beginning of a new element.\n\n    if (context.peek() == ',') {\n      // Empty element case\n      context.increment();\n      context.setNext(skipOptionalWhitespace(context));\n      continue;\n    }\n\n    if (StatusOr<ParsedCdnInfo> parsed_cdn_info = parseCdnInfo(context); !parsed_cdn_info) {\n      return parsed_cdn_info.status();\n    } else {\n      cdn_infos.push_back(parsed_cdn_info->cdnId());\n      context.setNext(parsed_cdn_info->context());\n    }\n\n    context.setNext(skipOptionalWhitespace(context));\n\n    if (context.atEnd()) {\n      break;\n    }\n\n    if (context.peek() != ',') {\n      return absl::InvalidArgumentError(absl::StrFormat(\"expected ',' at position %d; found '%c'\",\n                                                        context.next(), context.peek()));\n    } else {\n      context.increment();\n    }\n\n    context.setNext(skipOptionalWhitespace(context));\n  }\n\n  return ParsedCdnInfoList(context, std::move(cdn_infos));\n}\n\n} // namespace Parser\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/parser.h",
    "content": "#pragma once\n\n#include <iostream>\n\n#include \"common/common/statusor.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n\n// This file defines a parser for the CDN-Loop header value.\n//\n// RFC 8586 Section 2 defined the CDN-Loop header as:\n//\n//   CDN-Loop  = #cdn-info\n//   cdn-info  = cdn-id *( OWS \";\" OWS parameter )\n//   cdn-id    = ( uri-host [ \":\" port ] ) / pseudonym\n//   pseudonym = token\n//\n// Each of those productions rely on definitions in RFC 3986, RFC 5234, RFC\n// 7230, and RFC 7231. Their use is noted in the individual parse functions.\n//\n// The parser is a top-down combined parser and lexer that implements just\n// enough of the RFC spec to make it possible count the number of times a\n// particular CDN value appears. The main differences between the RFC's grammar\n// and the parser defined here are:\n//\n// 1. the parser has a more lax interpretation of what's a valid uri-host. See\n//     ParseCdnId for details.\n//\n// 2. the parser allows leading and trailing whitespace around the header\n//     value. See ParseCdnInfoList for details.\n//\n// Each parse function takes as input a ParseContext that tells the\n// function where to start. Parse functions that just need to parse a portion\n// of the CDN-Loop header, but don't need to return a value, should return a\n// ParseContext pointing to the next character to parse. Parse functions that\n// need to return a value should return something that contains a ParseContext.\n//\n// Parse functions that can fail (most of them!) wrap their return value in an\n// Envoy::StatusOr.\n//\n// In the interest of performance, this parser works with string_views and\n// references instead of copying std::strings. The string_view passed into the\n// ParseContext of a parse function must outlive the return value of the\n// function.\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\nnamespace Parser {\n\n// A ParseContext contains the state of the recursive descent parser and some\n// helper methods.\nclass ParseContext {\npublic:\n  ParseContext(absl::string_view value) : value_(value), next_(0) {}\n  ParseContext(absl::string_view value, absl::string_view::size_type next)\n      : value_(value), next_(next) {}\n\n  // Returns true if we have reached the end of value.\n  constexpr bool atEnd() const { return value_.length() <= next_; }\n\n  // Returns the value we're parsing\n  constexpr absl::string_view value() const { return value_; }\n\n  // Returns the position of the next character to process.\n  constexpr absl::string_view::size_type next() const { return next_; }\n\n  // Returns the character at next.\n  //\n  // REQUIRES: !at_end()\n  constexpr char peek() const { return value_[next_]; }\n\n  // Moves to the next character.\n  constexpr void increment() { ++next_; }\n\n  // Sets next from another context.\n  constexpr void setNext(const ParseContext& other) { next_ = other.next_; }\n\n  constexpr bool operator==(const ParseContext& other) const {\n    return value_ == other.value_ && next_ == other.next_;\n  }\n  constexpr bool operator!=(const ParseContext& other) const { return !(*this == other); }\n\n  friend std::ostream& operator<<(std::ostream& os, ParseContext arg) {\n    return os << \"ParseContext{next=\" << arg.next_ << \"}\";\n  }\n\nprivate:\n  // The item we're parsing.\n  const absl::string_view value_;\n\n  // A pointer to the next value we should parse.\n  absl::string_view::size_type next_;\n};\n\n// A ParsedCdnId holds an extracted CDN-Loop cdn-id.\nclass ParsedCdnId {\npublic:\n  ParsedCdnId(ParseContext context, absl::string_view cdn_id)\n      : context_(context), cdn_id_(cdn_id) {}\n\n  ParseContext context() const { return context_; }\n\n  absl::string_view cdnId() const { return cdn_id_; }\n\n  constexpr bool operator==(const ParsedCdnId& other) const {\n    return context_ == other.context_ && cdn_id_ == other.cdn_id_;\n  }\n  constexpr bool operator!=(const ParsedCdnId& other) const { return !(*this == other); }\n\n  friend std::ostream& operator<<(std::ostream& os, ParsedCdnId arg) {\n    return os << \"ParsedCdnId{context=\" << arg.context_ << \", cdn_id=\" << arg.cdn_id_ << \"}\";\n  }\n\nprivate:\n  ParseContext context_;\n  absl::string_view cdn_id_;\n};\n\n// A ParsedCdnInfo holds the extracted cdn-id after parsing an entire cdn-info.\nstruct ParsedCdnInfo {\n  ParsedCdnInfo(ParseContext context, absl::string_view cdn_id)\n      : context_(context), cdn_id_(cdn_id) {}\n\n  ParseContext context() const { return context_; }\n\n  absl::string_view cdnId() const { return cdn_id_; }\n\n  constexpr bool operator==(const ParsedCdnInfo& other) const {\n    return context_ == other.context_ && cdn_id_ == other.cdn_id_;\n  }\n  constexpr bool operator!=(const ParsedCdnInfo& other) const { return !(*this == other); }\n\n  friend std::ostream& operator<<(std::ostream& os, ParsedCdnInfo arg) {\n    return os << \"ParsedCdnInfo{context=\" << arg.context_ << \", cdn_id=\" << arg.cdn_id_ << \"}\";\n  }\n\nprivate:\n  ParseContext context_;\n  absl::string_view cdn_id_;\n};\n\n// A ParsedCdnInfoList contains list of cdn-ids after parsing the entire\n// CDN-Loop production.\nstruct ParsedCdnInfoList {\n  ParsedCdnInfoList(ParseContext context, std::vector<absl::string_view> cdn_ids)\n      : context_(context), cdn_ids_(std::move(cdn_ids)) {}\n\n  constexpr const std::vector<absl::string_view>& cdnIds() { return cdn_ids_; }\n\n  constexpr bool operator==(const ParsedCdnInfoList& other) const {\n    return context_ == other.context_ && cdn_ids_ == other.cdn_ids_;\n  }\n  constexpr bool operator!=(const ParsedCdnInfoList& other) const { return !(*this == other); }\n\n  friend std::ostream& operator<<(std::ostream& os, ParsedCdnInfoList arg) {\n    return os << \"ParsedCdnInfoList{context=\" << arg.context_ << \", cdn_ids=[\"\n              << absl::StrJoin(arg.cdn_ids_, \", \") << \"]}\";\n  }\n\nprivate:\n  ParseContext context_;\n  std::vector<absl::string_view> cdn_ids_;\n};\n\n// Skips optional whitespace according to RFC 7230 Section 3.2.3.\n//\n// OWS  = *( SP / HTAB )\n//\n// Since this is completely optional, there's no way this call can fail.\nParseContext skipOptionalWhitespace(const ParseContext& input);\n\n// Parses a quoted-pair according to RFC 7230 Section 3.2.6.\n//\n// quoted-pair    = \"\\\" ( HTAB / SP / VCHAR / obs-text )\nStatusOr<ParseContext> parseQuotedPair(const ParseContext& input);\n\n// Parses a quoted-string according to RFC 7230 Section 3.2.6.\n//\n// quoted-string  = DQUOTE *( qdtext / quoted-pair ) DQUOTE\n// qdtext         = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text\n// obs-text       = %x80-FF\n//\n// quoted-pair    = \"\\\" ( HTAB / SP / VCHAR / obs-text )\nStatusOr<ParseContext> parseQuotedString(const ParseContext& input);\n\n// Parses a token according to RFC 7320 Section 3.2.6.\n//\n// token          = 1*tchar\n//\n// tchar          = \"!\" / \"#\" / \"$\" / \"%\" / \"&\" / \"'\" / \"*\"\n//                / \"+\" / \"-\" / \".\" / \"^\" / \"_\" / \"`\" / \"|\" / \"~\"\n//                / DIGIT / ALPHA\n//                ; any VCHAR, except delimiters\n//\n// According to RFC 5234 Appendix B.1:\n//\n// ALPHA          =  %x41-5A / %x61-7A   ; A-Z / a-z\n//\n// DIGIT          =  %x30-39\nStatusOr<ParseContext> parseToken(const ParseContext& input);\n\n// Parses something that looks like an IPv6 address literal.\n//\n// A proper IPv6 address literal is defined in RFC 3986, Section 3.2.2 as part\n// of the host rule. We're going to allow something simpler:\n//\n// plausible-ipv6 = \"[\" *( HEXDIGIT | \".\" | \":\" ) \"]\n// HEXDIGIT = DIGIT | %x41-46 | %x61-66 ; 0-9 | A-F | a-f\n//\n// Compared to the real rule, our rule:\n//\n// - allows lower-case hex digits\n// - allows address sections with more than 4 hex digits in a row\n// - allows embedded IPv4 addresses multiple times rather than just at the end.\nStatusOr<ParseContext> parsePlausibleIpV6(const ParseContext& input);\n\n// Parses a cdn-id in a lax way.\n//\n// According to to RFC 8586 Section 2, the cdn-id is:\n//\n// cdn-id    = ( uri-host [ \":\" port ] ) / pseudonym\n// pseudonym = token\n//\n// The uri-host portion of the cdn-id is the \"host\" rule from RFC 3986 Section\n// 3.2.2. Parsing the host rule is remarkably difficult because the host rule\n// tries to parse exactly valid IP addresses (e.g., disallowing values greater\n// than 255 in an IPv4 address or only allowing one instance of \"::\" in IPv6\n// addresses) and needs to deal with % escaping in names.\n//\n// Worse, the uri-host reg-name rule admits ',' and ';' as members of sub-delim\n// rule, making parsing ambiguous in some cases! RFC 3986 does this in order to\n// be \"future-proof\" for naming schemes we haven't dreamed up yet. RFC 8586\n// says that if a CDN uses a uri-host as its cdn-id, the uri-host must be a\n// \"hostname under its control\". The only global naming system we have is DNS,\n// so the only really valid reg-name an Internet-facing Envoy should see is a\n// DNS name.\n//\n// Luckily, the token rule more or less covers the uri-host rule for DNS names\n// and for IPv4 addresses. We just a new rule to parse IPv6 addresses. See\n// ParsePlausibleIpV6 for the rule we'll follow.\n//\n// The definition of port comes from RFC 3986 Section\n// 3.2.3 as:\n//\n// port        = *DIGIT\n//\n// In other words, any number of digits is allowed.\n//\n// In all, this function will parse cdn-id as:\n//\n// cdn-id = ( plausible-ipv6-address / token ) [ \":\" *DIGIT ]\nStatusOr<ParsedCdnId> parseCdnId(const ParseContext& input);\n\n// Parses a parameter according RFC 7231 Appendix D.\n//\n// parameter = token \"=\" ( token / quoted-string )\nStatusOr<ParseContext> parseParameter(const ParseContext& input);\n\n// Parses a cdn-info according to RFC 8586 Section 2.\n//\n// cdn-info  = cdn-id *( OWS \";\" OWS parameter )\nStatusOr<ParsedCdnInfo> parseCdnInfo(const ParseContext& input);\n\n// Parses the top-level cdn-info according to RFC 8586 Section 2.\n//\n// CDN-Loop  = #cdn-info\n//\n// The # rule is defined by RFC 7230 Section 7. The # is different for senders\n// and recipients. We're a recipient, so:\n//\n//   For compatibility with legacy list rules, a recipient MUST parse and\n//   ignore a reasonable number of empty list elements: enough to handle\n//   common mistakes by senders that merge values, but not so much that\n//   they could be used as a denial-of-service mechanism. In other words,\n//   a recipient MUST accept lists that satisfy the following syntax:\n//\n//     #element => [ ( \",\" / element ) *( OWS \",\" [ OWS element ] ) ]\n//\n//     1#element => *( \",\" OWS ) element *( OWS \",\" [ OWS element ] )\n//\n//   Empty elements do not contribute to the count of elements present.\n//\n// Since #cdn-info uses the #element form, we have to parse (but not count)\n// blank entries.\n//\n// In a divergence with the RFC's grammar, this function will also ignore\n// leading and trailing OWS. This function expects to consume the entire input\n// and will return an error if there is something it cannot parse.\nStatusOr<ParsedCdnInfoList> parseCdnInfoList(const ParseContext& input);\n\n} // namespace Parser\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/utils.cc",
    "content": "#include \"extensions/filters/http/cdn_loop/utils.h\"\n\n#include <algorithm>\n\n#include \"common/common/statusor.h\"\n\n#include \"extensions/filters/http/cdn_loop/parser.h\"\n\n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\n\nStatusOr<int> countCdnLoopOccurrences(absl::string_view header, absl::string_view cdn_id) {\n  if (cdn_id.empty()) {\n    return absl::InvalidArgumentError(\"cdn_id cannot be empty\");\n  }\n\n  if (absl::StatusOr<Parser::ParsedCdnInfoList> parsed = Parser::parseCdnInfoList(header); parsed) {\n    return std::count(parsed->cdnIds().begin(), parsed->cdnIds().end(), cdn_id);\n  } else {\n    return parsed.status();\n  }\n}\n\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cdn_loop/utils.h",
    "content": "#pragma once\n\n#include \"common/common/statusor.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\n\n// Count the number of times cdn_id appears as a cdn-id element in header.\n//\n// According to RFC 8586, a cdn-id is either a uri-host[:port] or a pseudonym.\n// In either case, cdn_id must be at least one character long.\n//\n// If the header is unparseable or if cdn_id is the empty string, this function\n// will return an InvalidArgument status.\nStatusOr<int> countCdnLoopOccurrences(absl::string_view header, absl::string_view cdn_id);\n\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"pass_through_filter_lib\",\n    hdrs = [\"pass_through_filter.h\"],\n    # A thin shim used by test and prod filters.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"jwks_fetcher_lib\",\n    srcs = [\"jwks_fetcher.cc\"],\n    hdrs = [\"jwks_fetcher.h\"],\n    external_deps = [\n        \"jwt_verify_lib\",\n    ],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/http:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    # Used by the router filter.  TODO(#9953) clean up.\n    visibility = [\n        \"//source:__subpackages__\",\n        \"//test:__subpackages__\",\n    ],\n    deps = [\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:macros\",\n        \"//source/extensions/common:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/common/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# TODO(rojkov): move this library to source/extensions/filters/http/compressor/.\nenvoy_cc_library(\n    name = \"compressor_lib\",\n    srcs = [\"compressor.cc\"],\n    hdrs = [\"compressor.h\"],\n    deps = [\n        \"//include/envoy/compression/compressor:compressor_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/common/compressor/compressor.cc",
    "content": "#include \"extensions/filters/http/common/compressor/compressor.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_map_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nnamespace Compressors {\n\nnamespace {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    accept_encoding_handle(Http::CustomHeaders::get().AcceptEncoding);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    cache_control_handle(Http::CustomHeaders::get().CacheControl);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    content_encoding_handle(Http::CustomHeaders::get().ContentEncoding);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    etag_handle(Http::CustomHeaders::get().Etag);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    vary_handle(Http::CustomHeaders::get().Vary);\n\n// Default minimum length of an upstream response that allows compression.\nconst uint64_t DefaultMinimumContentLength = 30;\n\n// Default content types will be used if any is provided by the user.\nconst std::vector<std::string>& defaultContentEncoding() {\n  CONSTRUCT_ON_FIRST_USE(\n      std::vector<std::string>,\n      {\"text/html\", \"text/plain\", \"text/css\", \"application/javascript\", \"application/x-javascript\",\n       \"text/javascript\", \"text/x-javascript\", \"text/ecmascript\", \"text/js\", \"text/jscript\",\n       \"text/x-js\", \"application/ecmascript\", \"application/x-json\", \"application/xml\",\n       \"application/json\", \"image/svg+xml\", \"text/xml\", \"application/xhtml+xml\"});\n}\n\n// List of CompressorFilterConfig objects registered for a stream.\nstruct CompressorRegistry : public StreamInfo::FilterState::Object {\n  std::list<CompressorFilterConfigSharedPtr> filter_configs_;\n};\n\n// Key to per stream CompressorRegistry objects.\nconst std::string& compressorRegistryKey() { CONSTRUCT_ON_FIRST_USE(std::string, \"compressors\"); }\n\n} // namespace\n\nCompressorFilterConfig::CompressorFilterConfig(\n    const envoy::extensions::filters::http::compressor::v3::Compressor& compressor,\n    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n    const std::string& content_encoding)\n    : content_length_(contentLengthUint(compressor.content_length().value())),\n      content_type_values_(contentTypeSet(compressor.content_type())),\n      disable_on_etag_header_(compressor.disable_on_etag_header()),\n      remove_accept_encoding_header_(compressor.remove_accept_encoding_header()),\n      stats_(generateStats(stats_prefix, scope)), enabled_(compressor.runtime_enabled(), runtime),\n      content_encoding_(content_encoding) {}\n\nStringUtil::CaseUnorderedSet\nCompressorFilterConfig::contentTypeSet(const Protobuf::RepeatedPtrField<std::string>& types) {\n  const auto& default_content_encodings = defaultContentEncoding();\n  return types.empty() ? StringUtil::CaseUnorderedSet(default_content_encodings.begin(),\n                                                      default_content_encodings.end())\n                       : StringUtil::CaseUnorderedSet(types.cbegin(), types.cend());\n}\n\nuint32_t CompressorFilterConfig::contentLengthUint(Protobuf::uint32 length) {\n  return length > 0 ? length : DefaultMinimumContentLength;\n}\n\nCompressorFilter::CompressorFilter(const CompressorFilterConfigSharedPtr config)\n    : skip_compression_{true}, config_(std::move(config)) {}\n\nHttp::FilterHeadersStatus CompressorFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  const Http::HeaderEntry* accept_encoding = headers.getInline(accept_encoding_handle.handle());\n  if (accept_encoding != nullptr) {\n    // Capture the value of the \"Accept-Encoding\" request header to use it later when making\n    // decision on compressing the corresponding HTTP response.\n    accept_encoding_ = std::make_unique<std::string>(accept_encoding->value().getStringView());\n  }\n\n  if (config_->enabled() && config_->removeAcceptEncodingHeader()) {\n    headers.removeInline(accept_encoding_handle.handle());\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nvoid CompressorFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  decoder_callbacks_ = &callbacks;\n\n  absl::string_view key = compressorRegistryKey();\n  // To properly handle the cases where the decision on instantiating a compressor depends on\n  // the presence of other compression filters in the chain the filters need to be aware of each\n  // other. This is achieved by exploiting per-request data objects StreamInfo::FilterState: upon\n  // setting up a CompressorFilter, the new instance registers itself in the filter state. Then in\n  // the method isAcceptEncodingAllowed() the first filter is making a decision which encoder needs\n  // to be used for a request, with e.g. \"Accept-Encoding: deflate;q=0.75, gzip;q=0.5\", and caches\n  // it in the state. All other compression filters in the sequence use the cached decision.\n  const StreamInfo::FilterStateSharedPtr& filter_state = callbacks.streamInfo().filterState();\n  if (filter_state->hasData<CompressorRegistry>(key)) {\n    CompressorRegistry& registry = filter_state->getDataMutable<CompressorRegistry>(key);\n    registry.filter_configs_.push_back(config_);\n  } else {\n    auto registry_ptr = std::make_unique<CompressorRegistry>();\n    registry_ptr->filter_configs_.push_back(config_);\n    filter_state->setData(key, std::move(registry_ptr),\n                          StreamInfo::FilterState::StateType::Mutable);\n  }\n}\n\nHttp::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                          bool end_stream) {\n  const bool isEnabledAndContentLengthBigEnough =\n      config_->enabled() && isMinimumContentLength(headers);\n  const bool isCompressible = isEnabledAndContentLengthBigEnough && isContentTypeAllowed(headers) &&\n                              !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) &&\n                              !headers.getInline(content_encoding_handle.handle());\n  if (!end_stream && isEnabledAndContentLengthBigEnough && isAcceptEncodingAllowed(headers) &&\n      isCompressible && isTransferEncodingAllowed(headers)) {\n    skip_compression_ = false;\n    sanitizeEtagHeader(headers);\n    headers.removeContentLength();\n    headers.setInline(content_encoding_handle.handle(), config_->contentEncoding());\n    config_->stats().compressed_.inc();\n    // Finally instantiate the compressor.\n    compressor_ = config_->makeCompressor();\n  } else {\n    config_->stats().not_compressed_.inc();\n  }\n\n  // Even if we decided not to compress due to incompatible Accept-Encoding value,\n  // the Vary header would need to be inserted to let a caching proxy in front of Envoy\n  // know that the requested resource still can be served with compression applied.\n  if (isCompressible) {\n    insertVaryHeader(headers);\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus CompressorFilter::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (!skip_compression_) {\n    config_->stats().total_uncompressed_bytes_.add(data.length());\n    compressor_->compress(data, end_stream ? Envoy::Compression::Compressor::State::Finish\n                                           : Envoy::Compression::Compressor::State::Flush);\n    config_->stats().total_compressed_bytes_.add(data.length());\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus CompressorFilter::encodeTrailers(Http::ResponseTrailerMap&) {\n  if (!skip_compression_) {\n    Buffer::OwnedImpl empty_buffer;\n    compressor_->compress(empty_buffer, Envoy::Compression::Compressor::State::Finish);\n    config_->stats().total_compressed_bytes_.add(empty_buffer.length());\n    encoder_callbacks_->addEncodedData(empty_buffer, true);\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nbool CompressorFilter::hasCacheControlNoTransform(Http::ResponseHeaderMap& headers) const {\n  const Http::HeaderEntry* cache_control = headers.getInline(cache_control_handle.handle());\n  if (cache_control) {\n    return StringUtil::caseFindToken(cache_control->value().getStringView(), \",\",\n                                     Http::CustomHeaders::get().CacheControlValues.NoTransform);\n  }\n\n  return false;\n}\n\n// This function makes decision on which encoding to use for the response body and is\n// supposed to be called only once per request even if there are multiple compressor\n// filters in the chain. To make a decision the function needs to know what's the\n// request's Accept-Encoding, the response's Content-Type and the list of compressor\n// filters in the current chain.\n// TODO(rojkov): add an explicit fuzzer for chooseEncoding().\nstd::unique_ptr<CompressorFilter::EncodingDecision>\nCompressorFilter::chooseEncoding(const Http::ResponseHeaderMap& headers) const {\n  using EncPair = std::pair<absl::string_view, float>; // pair of {encoding, q_value}\n  std::vector<EncPair> pairs;\n  absl::string_view content_type_value;\n\n  const Http::HeaderEntry* content_type = headers.ContentType();\n  if (content_type != nullptr) {\n    content_type_value =\n        StringUtil::trim(StringUtil::cropRight(content_type->value().getStringView(), \";\"));\n  }\n\n  // Find all compressors enabled for the filter chain.\n  std::map<std::string, uint32_t> allowed_compressors;\n  uint32_t registration_count{0};\n  for (const auto& filter_config :\n       decoder_callbacks_->streamInfo()\n           .filterState()\n           ->getDataReadOnly<CompressorRegistry>(compressorRegistryKey())\n           .filter_configs_) {\n    // A compressor filter may be limited to compress certain Content-Types. If the response's\n    // content type doesn't match the list of content types this filter is enabled for then\n    // it must be excluded from the decision process.\n    // For example, there are two compressor filters in the chain e.g. \"gzip\" and \"deflate\".\n    // \"gzip\" is configured to compress only \"text/html\" and \"deflate\" is configured to compress\n    // only \"application/javascript\". Then comes a request with Accept-Encoding header\n    // \"gzip;q=1,deflate;q=.5\". The corresponding response content type is \"application/javascript\".\n    // If \"gzip\" is not excluded from the decision process then it will take precedence over\n    // \"deflate\" and the resulting response won't be compressed at all.\n    if (!content_type_value.empty() && !filter_config->contentTypeValues().empty()) {\n      auto iter = filter_config->contentTypeValues().find(content_type_value);\n      if (iter == filter_config->contentTypeValues().end()) {\n        // Skip adding this filter to the list of allowed compressors.\n        continue;\n      }\n    }\n\n    // There could be many compressors registered for the same content encoding, e.g. consider a\n    // case when there are two gzip filters using different compression levels for different content\n    // sizes. In such case we ignore duplicates (or different filters for the same encoding)\n    // registered last.\n    auto enc = allowed_compressors.find(filter_config->contentEncoding());\n    if (enc == allowed_compressors.end()) {\n      allowed_compressors.insert({filter_config->contentEncoding(), registration_count});\n      ++registration_count;\n    }\n  }\n\n  // Find all encodings accepted by the user agent and adjust the list of allowed compressors.\n  for (const auto& token : StringUtil::splitToken(*accept_encoding_, \",\", false /* keep_empty */)) {\n    EncPair pair =\n        std::make_pair(StringUtil::trim(StringUtil::cropRight(token, \";\")), static_cast<float>(1));\n    const auto params = StringUtil::cropLeft(token, \";\");\n    if (params != token) {\n      const auto q_value = StringUtil::cropLeft(params, \"=\");\n      if (q_value != params &&\n          absl::EqualsIgnoreCase(\"q\", StringUtil::trim(StringUtil::cropRight(params, \"=\")))) {\n        auto result = absl::SimpleAtof(StringUtil::trim(q_value), &pair.second);\n        if (!result) {\n          // Skip not parseable q-value.\n          continue;\n        }\n      }\n    }\n\n    pairs.push_back(pair);\n\n    if (!pair.second) {\n      // Disallow compressors with \"q=0\".\n      // The reason why we add encodings to \"pairs\" even with \"q=0\" is that \"pairs\" contains\n      // client's expectations and \"allowed_compressors\" is what Envoy can handle. Consider\n      // the cases of \"Accept-Encoding: gzip;q=0, deflate, *\" and \"Accept-Encoding: deflate, *\"\n      // whereas the proxy has only \"gzip\" configured. If we just exclude the encodings with \"q=0\"\n      // from \"pairs\" then upon noticing \"*\" we don't know if \"gzip\" is acceptable by the client.\n      allowed_compressors.erase(std::string(pair.first));\n    }\n  }\n\n  if (pairs.empty() || allowed_compressors.empty()) {\n    // If there's no intersection between accepted encodings and the ones provided by the allowed\n    // compressors, then only the \"identity\" encoding is acceptable.\n    return std::make_unique<CompressorFilter::EncodingDecision>(\n        Http::CustomHeaders::get().AcceptEncodingValues.Identity,\n        CompressorFilter::EncodingDecision::HeaderStat::NotValid);\n  }\n\n  // Find intersection of encodings accepted by the user agent and provided\n  // by the allowed compressors and choose the one with the highest q-value.\n  EncPair choice{Http::CustomHeaders::get().AcceptEncodingValues.Identity, static_cast<float>(0)};\n  for (const auto& pair : pairs) {\n    if ((pair.second > choice.second) &&\n        (allowed_compressors.count(std::string(pair.first)) ||\n         pair.first == Http::CustomHeaders::get().AcceptEncodingValues.Identity ||\n         pair.first == Http::CustomHeaders::get().AcceptEncodingValues.Wildcard)) {\n      choice = pair;\n    }\n  }\n\n  if (!choice.second) {\n    // The value of \"Accept-Encoding\" must be invalid as we ended up with zero q-value.\n    return std::make_unique<CompressorFilter::EncodingDecision>(\n        Http::CustomHeaders::get().AcceptEncodingValues.Identity,\n        CompressorFilter::EncodingDecision::HeaderStat::NotValid);\n  }\n\n  // The \"identity\" encoding (no compression) is always available.\n  if (choice.first == Http::CustomHeaders::get().AcceptEncodingValues.Identity) {\n    return std::make_unique<CompressorFilter::EncodingDecision>(\n        Http::CustomHeaders::get().AcceptEncodingValues.Identity,\n        CompressorFilter::EncodingDecision::HeaderStat::Identity);\n  }\n\n  // If wildcard is given then use which ever compressor is registered first.\n  if (choice.first == Http::CustomHeaders::get().AcceptEncodingValues.Wildcard) {\n    auto first_registered = std::min_element(\n        allowed_compressors.begin(), allowed_compressors.end(),\n        [](const std::pair<std::string, uint32_t>& a,\n           const std::pair<std::string, uint32_t>& b) -> bool { return a.second < b.second; });\n    return std::make_unique<CompressorFilter::EncodingDecision>(\n        first_registered->first, CompressorFilter::EncodingDecision::HeaderStat::Wildcard);\n  }\n\n  return std::make_unique<CompressorFilter::EncodingDecision>(\n      std::string(choice.first), CompressorFilter::EncodingDecision::HeaderStat::ValidCompressor);\n}\n\n// Check if this filter was chosen to compress. Also update the filter's stat counters related to\n// the Accept-Encoding header.\nbool CompressorFilter::shouldCompress(const CompressorFilter::EncodingDecision& decision) const {\n  const bool should_compress =\n      absl::EqualsIgnoreCase(config_->contentEncoding(), decision.encoding());\n\n  switch (decision.stat()) {\n  case CompressorFilter::EncodingDecision::HeaderStat::ValidCompressor:\n    if (should_compress) {\n      config_->stats().header_compressor_used_.inc();\n      // TODO(rojkov): Remove this increment when the gzip-specific stat is gone.\n      if (absl::EqualsIgnoreCase(\"gzip\", config_->contentEncoding())) {\n        config_->stats().header_gzip_.inc();\n      }\n    } else {\n      // Some other compressor filter in the same chain compressed the response body,\n      // but not this filter.\n      config_->stats().header_compressor_overshadowed_.inc();\n    }\n    break;\n  case CompressorFilter::EncodingDecision::HeaderStat::Identity:\n    config_->stats().header_identity_.inc();\n    break;\n  case CompressorFilter::EncodingDecision::HeaderStat::Wildcard:\n    config_->stats().header_wildcard_.inc();\n    break;\n  default:\n    config_->stats().header_not_valid_.inc();\n    break;\n  }\n\n  return should_compress;\n}\n\nbool CompressorFilter::isAcceptEncodingAllowed(const Http::ResponseHeaderMap& headers) const {\n  if (accept_encoding_ == nullptr) {\n    config_->stats().no_accept_header_.inc();\n    return false;\n  }\n\n  const absl::string_view encoding_decision_key{\"encoding_decision\"};\n\n  // Check if we have already cached our decision on encoding.\n  const StreamInfo::FilterStateSharedPtr& filter_state =\n      decoder_callbacks_->streamInfo().filterState();\n  if (filter_state->hasData<CompressorFilter::EncodingDecision>(encoding_decision_key)) {\n    const CompressorFilter::EncodingDecision& decision =\n        filter_state->getDataReadOnly<CompressorFilter::EncodingDecision>(encoding_decision_key);\n    return shouldCompress(decision);\n  }\n\n  // No cached decision found, so decide now.\n  std::unique_ptr<CompressorFilter::EncodingDecision> decision = chooseEncoding(headers);\n  bool result = shouldCompress(*decision);\n  filter_state->setData(encoding_decision_key, std::move(decision),\n                        StreamInfo::FilterState::StateType::ReadOnly);\n  return result;\n}\n\nbool CompressorFilter::isContentTypeAllowed(Http::ResponseHeaderMap& headers) const {\n  const Http::HeaderEntry* content_type = headers.ContentType();\n  if (content_type != nullptr && !config_->contentTypeValues().empty()) {\n    const absl::string_view value =\n        StringUtil::trim(StringUtil::cropRight(content_type->value().getStringView(), \";\"));\n    return config_->contentTypeValues().find(value) != config_->contentTypeValues().end();\n  }\n\n  return true;\n}\n\nbool CompressorFilter::isEtagAllowed(Http::ResponseHeaderMap& headers) const {\n  const bool is_etag_allowed =\n      !(config_->disableOnEtagHeader() && headers.getInline(etag_handle.handle()));\n  if (!is_etag_allowed) {\n    config_->stats().not_compressed_etag_.inc();\n  }\n  return is_etag_allowed;\n}\n\nbool CompressorFilter::isMinimumContentLength(Http::ResponseHeaderMap& headers) const {\n  const Http::HeaderEntry* content_length = headers.ContentLength();\n  if (content_length != nullptr) {\n    uint64_t length;\n    const bool is_minimum_content_length =\n        absl::SimpleAtoi(content_length->value().getStringView(), &length) &&\n        length >= config_->minimumLength();\n    if (!is_minimum_content_length) {\n      config_->stats().content_length_too_small_.inc();\n    }\n    return is_minimum_content_length;\n  }\n\n  return StringUtil::caseFindToken(headers.getTransferEncodingValue(), \",\",\n                                   Http::Headers::get().TransferEncodingValues.Chunked);\n}\n\nbool CompressorFilter::isTransferEncodingAllowed(Http::ResponseHeaderMap& headers) const {\n  const Http::HeaderEntry* transfer_encoding = headers.TransferEncoding();\n  if (transfer_encoding != nullptr) {\n    for (absl::string_view header_value :\n         StringUtil::splitToken(transfer_encoding->value().getStringView(), \",\", true)) {\n      const auto trimmed_value = StringUtil::trim(header_value);\n      if (absl::EqualsIgnoreCase(trimmed_value, config_->contentEncoding()) ||\n          // or any other compression type known to Envoy\n          absl::EqualsIgnoreCase(trimmed_value, Http::Headers::get().TransferEncodingValues.Gzip) ||\n          absl::EqualsIgnoreCase(trimmed_value,\n                                 Http::Headers::get().TransferEncodingValues.Deflate)) {\n        return false;\n      }\n    }\n  }\n\n  return true;\n}\n\nvoid CompressorFilter::insertVaryHeader(Http::ResponseHeaderMap& headers) {\n  const Http::HeaderEntry* vary = headers.getInline(vary_handle.handle());\n  if (vary != nullptr) {\n    if (!StringUtil::findToken(vary->value().getStringView(), \",\",\n                               Http::CustomHeaders::get().VaryValues.AcceptEncoding, true)) {\n      std::string new_header;\n      absl::StrAppend(&new_header, vary->value().getStringView(), \", \",\n                      Http::CustomHeaders::get().VaryValues.AcceptEncoding);\n      headers.setInline(vary_handle.handle(), new_header);\n    }\n  } else {\n    headers.setReferenceInline(vary_handle.handle(),\n                               Http::CustomHeaders::get().VaryValues.AcceptEncoding);\n  }\n}\n\n// TODO(gsagula): It seems that every proxy has a different opinion how to handle Etag. Some\n// discussions around this topic have been going on for over a decade, e.g.,\n// https://bz.apache.org/bugzilla/show_bug.cgi?id=45023\n// This design attempts to stay more on the safe side by preserving weak etags and removing\n// the strong ones when disable_on_etag_header is false. Envoy does NOT re-write entity tags.\nvoid CompressorFilter::sanitizeEtagHeader(Http::ResponseHeaderMap& headers) {\n  const Http::HeaderEntry* etag = headers.getInline(etag_handle.handle());\n  if (etag != nullptr) {\n    absl::string_view value(etag->value().getStringView());\n    if (value.length() > 2 && !((value[0] == 'w' || value[0] == 'W') && value[1] == '/')) {\n      headers.removeInline(etag_handle.handle());\n    }\n  }\n}\n\n} // namespace Compressors\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/common/compressor/compressor.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/compressor.h\"\n#include \"envoy/extensions/filters/http/compressor/v3/compressor.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nnamespace Compressors {\n\n/**\n * All compressor filter stats. @see stats_macros.h\n * \"total_uncompressed_bytes\" only includes bytes from requests that were marked for compression.\n * If the request was not marked for compression, the filter increments \"not_compressed\", but does\n * not add to \"total_uncompressed_bytes\". This way, the user can measure the memory performance of\n * the compression.\n *\n * \"header_compressor_used\" is a number of requests whose Accept-Encoding header explicitly stated\n * that the response body should be compressed with the encoding provided by this filter instance.\n *\n * \"header_compressor_overshadowed\" is a number of requests skipped by this filter instance because\n * they were handled by another filter in the same filter chain.\n *\n * \"header_gzip\" is specific to the gzip filter and is deprecated since it duplicates\n * \"header_compressor_used\".\n */\n#define ALL_COMPRESSOR_STATS(COUNTER)                                                              \\\n  COUNTER(compressed)                                                                              \\\n  COUNTER(not_compressed)                                                                          \\\n  COUNTER(no_accept_header)                                                                        \\\n  COUNTER(header_identity)                                                                         \\\n  COUNTER(header_gzip)                                                                             \\\n  COUNTER(header_compressor_used)                                                                  \\\n  COUNTER(header_compressor_overshadowed)                                                          \\\n  COUNTER(header_wildcard)                                                                         \\\n  COUNTER(header_not_valid)                                                                        \\\n  COUNTER(total_uncompressed_bytes)                                                                \\\n  COUNTER(total_compressed_bytes)                                                                  \\\n  COUNTER(content_length_too_small)                                                                \\\n  COUNTER(not_compressed_etag)\n\n/**\n * Struct definition for compressor stats. @see stats_macros.h\n */\nstruct CompressorStats {\n  ALL_COMPRESSOR_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n// TODO(rojkov): merge this class with Compressor::CompressorFilterConfig when the filter\n// `envoy.filters.http.gzip` is fully deprecated and dropped.\nclass CompressorFilterConfig {\npublic:\n  CompressorFilterConfig() = delete;\n  virtual ~CompressorFilterConfig() = default;\n\n  virtual Envoy::Compression::Compressor::CompressorPtr makeCompressor() PURE;\n\n  bool enabled() const { return enabled_.enabled(); }\n  const CompressorStats& stats() { return stats_; }\n  const StringUtil::CaseUnorderedSet& contentTypeValues() const { return content_type_values_; }\n  bool disableOnEtagHeader() const { return disable_on_etag_header_; }\n  bool removeAcceptEncodingHeader() const { return remove_accept_encoding_header_; }\n  uint32_t minimumLength() const { return content_length_; }\n  const std::string contentEncoding() const { return content_encoding_; };\n\nprotected:\n  CompressorFilterConfig(\n      const envoy::extensions::filters::http::compressor::v3::Compressor& compressor,\n      const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n      const std::string& content_encoding);\n\nprivate:\n  static StringUtil::CaseUnorderedSet\n  contentTypeSet(const Protobuf::RepeatedPtrField<std::string>& types);\n\n  static uint32_t contentLengthUint(Protobuf::uint32 length);\n\n  static CompressorStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return CompressorStats{ALL_COMPRESSOR_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n  }\n\n  const uint32_t content_length_;\n  const StringUtil::CaseUnorderedSet content_type_values_;\n  const bool disable_on_etag_header_;\n  const bool remove_accept_encoding_header_;\n\n  const CompressorStats stats_;\n  Runtime::FeatureFlag enabled_;\n  const std::string content_encoding_;\n};\nusing CompressorFilterConfigSharedPtr = std::shared_ptr<CompressorFilterConfig>;\n\n/**\n * A filter that compresses data dispatched from the upstream upon client request.\n */\nclass CompressorFilter : public Http::PassThroughFilter {\npublic:\n  explicit CompressorFilter(const CompressorFilterConfigSharedPtr config);\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& buffer, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override;\n\nprivate:\n  bool hasCacheControlNoTransform(Http::ResponseHeaderMap& headers) const;\n  bool isAcceptEncodingAllowed(const Http::ResponseHeaderMap& headers) const;\n  bool isContentTypeAllowed(Http::ResponseHeaderMap& headers) const;\n  bool isEtagAllowed(Http::ResponseHeaderMap& headers) const;\n  bool isMinimumContentLength(Http::ResponseHeaderMap& headers) const;\n  bool isTransferEncodingAllowed(Http::ResponseHeaderMap& headers) const;\n\n  void sanitizeEtagHeader(Http::ResponseHeaderMap& headers);\n  void insertVaryHeader(Http::ResponseHeaderMap& headers);\n\n  class EncodingDecision : public StreamInfo::FilterState::Object {\n  public:\n    enum class HeaderStat { NotValid, Identity, Wildcard, ValidCompressor };\n    EncodingDecision(const std::string& encoding, const HeaderStat stat)\n        : encoding_(encoding), stat_(stat) {}\n    const std::string& encoding() const { return encoding_; }\n    HeaderStat stat() const { return stat_; }\n\n  private:\n    const std::string encoding_;\n    const HeaderStat stat_;\n  };\n\n  std::unique_ptr<EncodingDecision> chooseEncoding(const Http::ResponseHeaderMap& headers) const;\n  bool shouldCompress(const EncodingDecision& decision) const;\n\n  bool skip_compression_;\n  Envoy::Compression::Compressor::CompressorPtr compressor_;\n  const CompressorFilterConfigSharedPtr config_;\n  std::unique_ptr<std::string> accept_encoding_;\n};\n\n} // namespace Compressors\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/common/factory_base.h",
    "content": "#pragma once\n\n#include \"envoy/server/filter_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\n\n/**\n * Common base class for HTTP filter factory registrations. Removes a substantial amount of\n * boilerplate.\n */\ntemplate <class ConfigProto, class RouteConfigProto = ConfigProto>\nclass FactoryBase : public Server::Configuration::NamedHttpFilterConfigFactory {\npublic:\n  Http::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& proto_config,\n                               const std::string& stats_prefix,\n                               Server::Configuration::FactoryContext& context) override {\n    return createFilterFactoryFromProtoTyped(MessageUtil::downcastAndValidate<const ConfigProto&>(\n                                                 proto_config, context.messageValidationVisitor()),\n                                             stats_prefix, context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  ProtobufTypes::MessagePtr createEmptyRouteConfigProto() override {\n    return std::make_unique<RouteConfigProto>();\n  }\n\n  Router::RouteSpecificFilterConfigConstSharedPtr\n  createRouteSpecificFilterConfig(const Protobuf::Message& proto_config,\n                                  Server::Configuration::ServerFactoryContext& context,\n                                  ProtobufMessage::ValidationVisitor& validator) override {\n    return createRouteSpecificFilterConfigTyped(\n        MessageUtil::downcastAndValidate<const RouteConfigProto&>(proto_config, validator), context,\n        validator);\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  FactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual Http::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const ConfigProto& proto_config,\n                                    const std::string& stats_prefix,\n                                    Server::Configuration::FactoryContext& context) PURE;\n\n  virtual Router::RouteSpecificFilterConfigConstSharedPtr\n  createRouteSpecificFilterConfigTyped(const RouteConfigProto&,\n                                       Server::Configuration::ServerFactoryContext&,\n                                       ProtobufMessage::ValidationVisitor&) {\n    return nullptr;\n  }\n\n  const std::string name_;\n};\n\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/common/jwks_fetcher.cc",
    "content": "#include \"extensions/filters/http/common/jwks_fetcher.h\"\n\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"jwt_verify_lib/status.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nnamespace {\n\nclass JwksFetcherImpl : public JwksFetcher,\n                        public Logger::Loggable<Logger::Id::filter>,\n                        public Http::AsyncClient::Callbacks {\npublic:\n  JwksFetcherImpl(Upstream::ClusterManager& cm) : cm_(cm) { ENVOY_LOG(trace, \"{}\", __func__); }\n\n  ~JwksFetcherImpl() override { cancel(); }\n\n  void cancel() final {\n    if (request_ && !complete_) {\n      request_->cancel();\n      ENVOY_LOG(debug, \"fetch pubkey [uri = {}]: canceled\", uri_->uri());\n    }\n    reset();\n  }\n\n  void fetch(const envoy::config::core::v3::HttpUri& uri, Tracing::Span& parent_span,\n             JwksFetcher::JwksReceiver& receiver) override {\n    ENVOY_LOG(trace, \"{}\", __func__);\n    ASSERT(!receiver_);\n\n    complete_ = false;\n    receiver_ = &receiver;\n    uri_ = &uri;\n\n    // Check if cluster is configured, fail the request if not.\n    // Otherwise cm_.httpAsyncClientForCluster will throw exception.\n    if (cm_.get(uri.cluster()) == nullptr) {\n      ENVOY_LOG(error, \"{}: fetch pubkey [uri = {}] failed: [cluster = {}] is not configured\",\n                __func__, uri.uri(), uri.cluster());\n      complete_ = true;\n      receiver_->onJwksError(JwksFetcher::JwksReceiver::Failure::Network);\n      reset();\n      return;\n    }\n\n    Http::RequestMessagePtr message = Http::Utility::prepareHeaders(uri);\n    message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Get);\n    ENVOY_LOG(debug, \"fetch pubkey from [uri = {}]: start\", uri_->uri());\n    auto options = Http::AsyncClient::RequestOptions()\n                       .setTimeout(std::chrono::milliseconds(\n                           DurationUtil::durationToMilliseconds(uri.timeout())))\n                       .setParentSpan(parent_span)\n                       .setChildSpanName(\"JWT Remote PubKey Fetch\");\n    request_ =\n        cm_.httpAsyncClientForCluster(uri.cluster()).send(std::move(message), *this, options);\n  }\n\n  // HTTP async receive methods\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override {\n    ENVOY_LOG(trace, \"{}\", __func__);\n    complete_ = true;\n    const uint64_t status_code = Http::Utility::getResponseStatus(response->headers());\n    if (status_code == enumToInt(Http::Code::OK)) {\n      ENVOY_LOG(debug, \"{}: fetch pubkey [uri = {}]: success\", __func__, uri_->uri());\n      if (response->body().length() != 0) {\n        const auto body = response->bodyAsString();\n        auto jwks =\n            google::jwt_verify::Jwks::createFrom(body, google::jwt_verify::Jwks::Type::JWKS);\n        if (jwks->getStatus() == google::jwt_verify::Status::Ok) {\n          ENVOY_LOG(debug, \"{}: fetch pubkey [uri = {}]: succeeded\", __func__, uri_->uri());\n          receiver_->onJwksSuccess(std::move(jwks));\n        } else {\n          ENVOY_LOG(debug, \"{}: fetch pubkey [uri = {}]: invalid jwks\", __func__, uri_->uri());\n          receiver_->onJwksError(JwksFetcher::JwksReceiver::Failure::InvalidJwks);\n        }\n      } else {\n        ENVOY_LOG(debug, \"{}: fetch pubkey [uri = {}]: body is empty\", __func__, uri_->uri());\n        receiver_->onJwksError(JwksFetcher::JwksReceiver::Failure::Network);\n      }\n    } else {\n      ENVOY_LOG(debug, \"{}: fetch pubkey [uri = {}]: response status code {}\", __func__,\n                uri_->uri(), status_code);\n      receiver_->onJwksError(JwksFetcher::JwksReceiver::Failure::Network);\n    }\n    reset();\n  }\n\n  void onFailure(const Http::AsyncClient::Request&,\n                 Http::AsyncClient::FailureReason reason) override {\n    ENVOY_LOG(debug, \"{}: fetch pubkey [uri = {}]: network error {}\", __func__, uri_->uri(),\n              enumToInt(reason));\n    complete_ = true;\n    receiver_->onJwksError(JwksFetcher::JwksReceiver::Failure::Network);\n    reset();\n  }\n\n  void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {}\n\nprivate:\n  Upstream::ClusterManager& cm_;\n  bool complete_{};\n  JwksFetcher::JwksReceiver* receiver_{};\n  const envoy::config::core::v3::HttpUri* uri_{};\n  Http::AsyncClient::Request* request_{};\n\n  void reset() {\n    request_ = nullptr;\n    receiver_ = nullptr;\n    uri_ = nullptr;\n  }\n};\n} // namespace\n\nJwksFetcherPtr JwksFetcher::create(Upstream::ClusterManager& cm) {\n  return std::make_unique<JwksFetcherImpl>(cm);\n}\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/common/jwks_fetcher.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"jwt_verify_lib/jwks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\n\nclass JwksFetcher;\nusing JwksFetcherPtr = std::unique_ptr<JwksFetcher>;\n/**\n * JwksFetcher interface can be used to retrieve remote JWKS\n * (https://tools.ietf.org/html/rfc7517) data structures returning a concrete,\n * type-safe representation. An instance of this interface is designed to\n * retrieve one JWKS at a time.\n */\nclass JwksFetcher {\npublic:\n  class JwksReceiver {\n  public:\n    enum class Failure {\n      /* A network error occurred causing JWKS retrieval failure. */\n      Network,\n      /* A failure occurred when trying to parse the retrieved JWKS data. */\n      InvalidJwks,\n    };\n\n    virtual ~JwksReceiver() = default;\n    /*\n     * Successful retrieval callback.\n     * of the returned JWKS object.\n     * @param jwks the JWKS object retrieved.\n     */\n    virtual void onJwksSuccess(google::jwt_verify::JwksPtr&& jwks) PURE;\n    /*\n     * Retrieval error callback.\n     * * @param reason the failure reason.\n     */\n    virtual void onJwksError(Failure reason) PURE;\n  };\n\n  virtual ~JwksFetcher() = default;\n\n  /*\n   * Cancel any in-flight request.\n   */\n  virtual void cancel() PURE;\n\n  /*\n   * Retrieve a JWKS resource from a remote HTTP host.\n   * At most one outstanding request may be in-flight,\n   * i.e. from the invocation of `fetch()` until either\n   * a callback or `cancel()` is invoked, no\n   * additional `fetch()` may be issued.\n   * @param uri the uri to retrieve the jwks from.\n   * @param parent_span the active span to create children under\n   * @param receiver the receiver of the fetched JWKS or error.\n   */\n  virtual void fetch(const envoy::config::core::v3::HttpUri& uri, Tracing::Span& parent_span,\n                     JwksReceiver& receiver) PURE;\n\n  /*\n   * Factory method for creating a JwksFetcher.\n   * @param cm the cluster manager to use during Jwks retrieval\n   * @return a JwksFetcher instance\n   */\n  static JwksFetcherPtr create(Upstream::ClusterManager& cm);\n};\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/common/pass_through_filter.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n// A decoder filter which passes all data through with Continue status.\nclass PassThroughDecoderFilter : public virtual StreamDecoderFilter {\npublic:\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n  }\n\nprotected:\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n};\n\n// An encoder filter which passes all data through with Continue status.\nclass PassThroughEncoderFilter : public virtual StreamEncoderFilter {\npublic:\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override {\n    encoder_callbacks_ = &callbacks;\n  }\n\nprotected:\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n};\n\n// A filter which passes all data through with Continue status.\nclass PassThroughFilter : public StreamFilter,\n                          public PassThroughDecoderFilter,\n                          public PassThroughEncoderFilter {};\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/common/utility.h",
    "content": "#pragma once\n\n#include \"common/common/macros.h\"\n\n#include \"extensions/common/utility.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\n\n/**\n * FilterNameUtil provides utilities for handling HTTP filter\n * extension names (e.g., \"envoy.filters.http.buffer\").\n */\nclass FilterNameUtil {\npublic:\n  /**\n   * Given a deprecated HTTP filter extension name, return the\n   * canonical name. Any name not defined in the deprecated map is\n   * returned without modification. If deprecated extension names are\n   * disabled, throws EnvoyException.\n   *\n   * @return const std::string& canonical filter name\n   * @throw EnvoyException if deprecated names are disabled\n   */\n  static const std::string&\n  canonicalFilterName(const std::string& name,\n                      Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting()) {\n    const auto& it = deprecatedNameMap().find(name);\n    if (it == deprecatedNameMap().end()) {\n      return name;\n    }\n\n    Extensions::Common::Utility::ExtensionNameUtil::checkDeprecatedExtensionName(\n        \"http filter\", name, it->second, runtime);\n\n    return it->second;\n  }\n\nprivate:\n  using DeprecatedNameMap = absl::flat_hash_map<std::string, std::string>;\n\n  static const DeprecatedNameMap& deprecatedNameMap() {\n    CONSTRUCT_ON_FIRST_USE(\n        DeprecatedNameMap,\n        {\n            {\"envoy.buffer\", HttpFilters::HttpFilterNames::get().Buffer},\n            {\"envoy.cors\", HttpFilters::HttpFilterNames::get().Cors},\n            {\"envoy.csrf\", HttpFilters::HttpFilterNames::get().Csrf},\n            {\"envoy.ext_authz\", HttpFilters::HttpFilterNames::get().ExtAuthorization},\n            {\"envoy.fault\", HttpFilters::HttpFilterNames::get().Fault},\n            {\"envoy.grpc_http1_bridge\", HttpFilters::HttpFilterNames::get().GrpcHttp1Bridge},\n            {\"envoy.grpc_json_transcoder\", HttpFilters::HttpFilterNames::get().GrpcJsonTranscoder},\n            {\"envoy.grpc_web\", HttpFilters::HttpFilterNames::get().GrpcWeb},\n            {\"envoy.gzip\", HttpFilters::HttpFilterNames::get().EnvoyGzip},\n            {\"envoy.health_check\", HttpFilters::HttpFilterNames::get().HealthCheck},\n            {\"envoy.http_dynamic_filter\", HttpFilters::HttpFilterNames::get().Dynamo},\n            {\"envoy.ip_tagging\", HttpFilters::HttpFilterNames::get().IpTagging},\n            {\"envoy.lua\", HttpFilters::HttpFilterNames::get().Lua},\n            {\"envoy.rate_limit\", HttpFilters::HttpFilterNames::get().RateLimit},\n            {\"envoy.router\", HttpFilters::HttpFilterNames::get().Router},\n            {\"envoy.squash\", HttpFilters::HttpFilterNames::get().Squash},\n        });\n  }\n};\n\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that performs compression with configurable compression libraries\n# Public docs: docs/root/configuration/http_filters/compressor_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"compressor_filter_lib\",\n    srcs = [\"compressor_filter.cc\"],\n    hdrs = [\"compressor_filter.h\"],\n    deps = [\n        \"//include/envoy/compression/compressor:compressor_factory_interface\",\n        \"//source/extensions/filters/http/common/compressor:compressor_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":compressor_filter_lib\",\n        \"//include/envoy/compression/compressor:compressor_config_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/compressor/compressor_filter.cc",
    "content": "#include \"extensions/filters/http/compressor/compressor_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Compressor {\n\nCompressorFilterConfig::CompressorFilterConfig(\n    const envoy::extensions::filters::http::compressor::v3::Compressor& generic_compressor,\n    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n    Compression::Compressor::CompressorFactoryPtr compressor_factory)\n    : Common::Compressors::CompressorFilterConfig(\n          generic_compressor,\n          stats_prefix + \"compressor.\" + generic_compressor.compressor_library().name() + \".\" +\n              compressor_factory->statsPrefix(),\n          scope, runtime, compressor_factory->contentEncoding()),\n      compressor_factory_(std::move(compressor_factory)) {}\n\nEnvoy::Compression::Compressor::CompressorPtr CompressorFilterConfig::makeCompressor() {\n  return compressor_factory_->createCompressor();\n}\n\n} // namespace Compressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/compressor/compressor_filter.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/factory.h\"\n#include \"envoy/extensions/filters/http/compressor/v3/compressor.pb.h\"\n\n#include \"extensions/filters/http/common/compressor/compressor.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Compressor {\n\n/**\n * Configuration for the compressor filter.\n */\nclass CompressorFilterConfig : public Common::Compressors::CompressorFilterConfig {\n  // TODO(rojkov): move functionality of Common::Compressors::CompressorFilterConfig\n  // to this class when `envoy.filters.http.gzip` is fully deprecated and dropped.\npublic:\n  CompressorFilterConfig() = delete;\n  CompressorFilterConfig(\n      const envoy::extensions::filters::http::compressor::v3::Compressor& genereic_compressor,\n      const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n      Envoy::Compression::Compressor::CompressorFactoryPtr compressor_factory);\n\n  Envoy::Compression::Compressor::CompressorPtr makeCompressor() override;\n\nprivate:\n  const Envoy::Compression::Compressor::CompressorFactoryPtr compressor_factory_;\n};\n\n} // namespace Compressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/compressor/config.cc",
    "content": "#include \"extensions/filters/http/compressor/config.h\"\n\n#include \"envoy/compression/compressor/config.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/filters/http/compressor/compressor_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Compressor {\n\nHttp::FilterFactoryCb CompressorFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::compressor::v3::Compressor& proto_config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  // TODO(rojkov): instead of throwing an exception make the Compressor.compressor_library field\n  // required when the Gzip HTTP-filter is fully deprecated and removed.\n  if (!proto_config.has_compressor_library()) {\n    throw EnvoyException(\"Compressor filter doesn't have compressor_library defined\");\n  }\n  const std::string type{TypeUtil::typeUrlToDescriptorFullName(\n      proto_config.compressor_library().typed_config().type_url())};\n  Compression::Compressor::NamedCompressorLibraryConfigFactory* const config_factory =\n      Registry::FactoryRegistry<\n          Compression::Compressor::NamedCompressorLibraryConfigFactory>::getFactoryByType(type);\n  if (config_factory == nullptr) {\n    throw EnvoyException(\n        fmt::format(\"Didn't find a registered implementation for type: '{}'\", type));\n  }\n  ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig(\n      proto_config.compressor_library().typed_config(), context.messageValidationVisitor(),\n      *config_factory);\n  Compression::Compressor::CompressorFactoryPtr compressor_factory =\n      config_factory->createCompressorFactoryFromProto(*message, context);\n  Common::Compressors::CompressorFilterConfigSharedPtr config =\n      std::make_shared<CompressorFilterConfig>(proto_config, stats_prefix, context.scope(),\n                                               context.runtime(), std::move(compressor_factory));\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<Common::Compressors::CompressorFilter>(config));\n  };\n}\n\n/**\n * Static registration for the compressor filter. @see NamedHttpFilterConfigFactory.\n */\nREGISTER_FACTORY(CompressorFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace Compressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/compressor/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/compressor/v3/compressor.pb.h\"\n#include \"envoy/extensions/filters/http/compressor/v3/compressor.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Compressor {\n\n/**\n * Config registration for the compressor filter. @see NamedHttpFilterConfigFactory.\n */\nclass CompressorFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::compressor::v3::Compressor> {\npublic:\n  CompressorFilterFactory() : FactoryBase(HttpFilterNames::get().Compressor) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::compressor::v3::Compressor& config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\nDECLARE_FACTORY(CompressorFilterFactory);\n\n} // namespace Compressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cors/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter which implements CORS processing (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing)\n# Public docs: docs/root/configuration/http_filters/cors_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"cors_filter_lib\",\n    srcs = [\"cors_filter.cc\"],\n    hdrs = [\"cors_filter.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/cors:cors_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/cors/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/cors/config.cc",
    "content": "#include \"extensions/filters/http/cors/config.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/cors/cors_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cors {\n\nHttp::FilterFactoryCb CorsFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::cors::v3::Cors&, const std::string& stats_prefix,\n    Server::Configuration::FactoryContext& context) {\n  CorsFilterConfigSharedPtr config =\n      std::make_shared<CorsFilterConfig>(stats_prefix, context.scope());\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<CorsFilter>(config));\n  };\n}\n\n/**\n * Static registration for the cors filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(CorsFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.cors\"};\n\n} // namespace Cors\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cors/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/cors/v3/cors.pb.h\"\n#include \"envoy/extensions/filters/http/cors/v3/cors.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cors {\n\n/**\n * Config registration for the cors filter. @see NamedHttpFilterConfigFactory.\n */\nclass CorsFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::cors::v3::Cors> {\npublic:\n  CorsFilterFactory() : FactoryBase(HttpFilterNames::get().Cors) {}\n\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::cors::v3::Cors& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Cors\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cors/cors_filter.cc",
    "content": "#include \"extensions/filters/http/cors/cors_filter.h\"\n\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cors {\n\nstruct HttpResponseCodeDetailValues {\n  const absl::string_view CorsResponse = \"cors_response\";\n};\nusing HttpResponseCodeDetails = ConstSingleton<HttpResponseCodeDetailValues>;\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    origin_handle(Http::CustomHeaders::get().Origin);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_allow_origin_handle(Http::CustomHeaders::get().AccessControlAllowOrigin);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_allow_credentials_handle(\n        Http::CustomHeaders::get().AccessControlAllowCredentials);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_allow_methods_handle(Http::CustomHeaders::get().AccessControlAllowMethods);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_allow_headers_handle(Http::CustomHeaders::get().AccessControlAllowHeaders);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_max_age_handle(Http::CustomHeaders::get().AccessControlMaxAge);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_expose_headers_handle(Http::CustomHeaders::get().AccessControlExposeHeaders);\n\nCorsFilterConfig::CorsFilterConfig(const std::string& stats_prefix, Stats::Scope& scope)\n    : stats_(generateStats(stats_prefix + \"cors.\", scope)) {}\n\nCorsFilter::CorsFilter(CorsFilterConfigSharedPtr config)\n    : policies_({{nullptr, nullptr}}), config_(std::move(config)) {}\n\n// This handles the CORS preflight request as described in\n// https://www.w3.org/TR/cors/#resource-preflight-requests\nHttp::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  if (decoder_callbacks_->route() == nullptr ||\n      decoder_callbacks_->route()->routeEntry() == nullptr) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  policies_ = {{\n      decoder_callbacks_->route()->routeEntry()->corsPolicy(),\n      decoder_callbacks_->route()->routeEntry()->virtualHost().corsPolicy(),\n  }};\n\n  if (!enabled() && !shadowEnabled()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  origin_ = headers.getInline(origin_handle.handle());\n  if (origin_ == nullptr || origin_->value().empty()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (!isOriginAllowed(origin_->value())) {\n    config_->stats().origin_invalid_.inc();\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  config_->stats().origin_valid_.inc();\n  if (shadowEnabled() && !enabled()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  is_cors_request_ = true;\n\n  const absl::string_view method = headers.getMethodValue();\n  if (method != Http::Headers::get().MethodValues.Options) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (headers.getInlineValue(access_control_request_method_handle.handle()).empty()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  auto response_headers{Http::createHeaderMap<Http::ResponseHeaderMapImpl>(\n      {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::OK))}})};\n\n  response_headers->setInline(access_control_allow_origin_handle.handle(),\n                              origin_->value().getStringView());\n\n  if (allowCredentials()) {\n    response_headers->setReferenceInline(access_control_allow_credentials_handle.handle(),\n                                         Http::CustomHeaders::get().CORSValues.True);\n  }\n\n  if (!allowMethods().empty()) {\n    response_headers->setInline(access_control_allow_methods_handle.handle(), allowMethods());\n  }\n\n  if (!allowHeaders().empty()) {\n    response_headers->setInline(access_control_allow_headers_handle.handle(), allowHeaders());\n  }\n\n  if (!maxAge().empty()) {\n    response_headers->setInline(access_control_max_age_handle.handle(), maxAge());\n  }\n\n  decoder_callbacks_->encodeHeaders(std::move(response_headers), true,\n                                    HttpResponseCodeDetails::get().CorsResponse);\n\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\n// This handles simple CORS requests as described in\n// https://www.w3.org/TR/cors/#resource-requests\nHttp::FilterHeadersStatus CorsFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) {\n  if (!is_cors_request_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  headers.setInline(access_control_allow_origin_handle.handle(), origin_->value().getStringView());\n  if (allowCredentials()) {\n    headers.setReferenceInline(access_control_allow_credentials_handle.handle(),\n                               Http::CustomHeaders::get().CORSValues.True);\n  }\n\n  if (!exposeHeaders().empty()) {\n    headers.setInline(access_control_expose_headers_handle.handle(), exposeHeaders());\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nvoid CorsFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  decoder_callbacks_ = &callbacks;\n}\n\nbool CorsFilter::isOriginAllowed(const Http::HeaderString& origin) {\n  const auto allow_origins = allowOrigins();\n  if (allow_origins == nullptr) {\n    return false;\n  }\n  for (const auto& allow_origin : *allow_origins) {\n    if (allow_origin->match(\"*\") || allow_origin->match(origin.getStringView())) {\n      return true;\n    }\n  }\n  return false;\n}\n\nconst std::vector<Matchers::StringMatcherPtr>* CorsFilter::allowOrigins() {\n  for (const auto policy : policies_) {\n    if (policy && !policy->allowOrigins().empty()) {\n      return &policy->allowOrigins();\n    }\n  }\n  return nullptr;\n}\n\nconst std::string& CorsFilter::allowMethods() {\n  for (const auto policy : policies_) {\n    if (policy && !policy->allowMethods().empty()) {\n      return policy->allowMethods();\n    }\n  }\n  return EMPTY_STRING;\n}\n\nconst std::string& CorsFilter::allowHeaders() {\n  for (const auto policy : policies_) {\n    if (policy && !policy->allowHeaders().empty()) {\n      return policy->allowHeaders();\n    }\n  }\n  return EMPTY_STRING;\n}\n\nconst std::string& CorsFilter::exposeHeaders() {\n  for (const auto policy : policies_) {\n    if (policy && !policy->exposeHeaders().empty()) {\n      return policy->exposeHeaders();\n    }\n  }\n  return EMPTY_STRING;\n}\n\nconst std::string& CorsFilter::maxAge() {\n  for (const auto policy : policies_) {\n    if (policy && !policy->maxAge().empty()) {\n      return policy->maxAge();\n    }\n  }\n  return EMPTY_STRING;\n}\n\nbool CorsFilter::allowCredentials() {\n  for (const auto policy : policies_) {\n    if (policy && policy->allowCredentials()) {\n      return policy->allowCredentials().value();\n    }\n  }\n  return false;\n}\n\nbool CorsFilter::shadowEnabled() {\n  for (const auto policy : policies_) {\n    if (policy) {\n      return policy->shadowEnabled();\n    }\n  }\n  return false;\n}\n\nbool CorsFilter::enabled() {\n  for (const auto policy : policies_) {\n    if (policy) {\n      return policy->enabled();\n    }\n  }\n  return false;\n}\n\n} // namespace Cors\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/cors/cors_filter.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cors {\n\n/**\n * All CORS filter stats. @see stats_macros.h\n */\n#define ALL_CORS_STATS(COUNTER)                                                                    \\\n  COUNTER(origin_valid)                                                                            \\\n  COUNTER(origin_invalid)\n\n/**\n * Struct definition for CORS stats. @see stats_macros.h\n */\nstruct CorsStats {\n  ALL_CORS_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for the CORS filter.\n */\nclass CorsFilterConfig {\npublic:\n  CorsFilterConfig(const std::string& stats_prefix, Stats::Scope& scope);\n  CorsStats& stats() { return stats_; }\n\nprivate:\n  static CorsStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return CorsStats{ALL_CORS_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n  }\n\n  CorsStats stats_;\n};\nusing CorsFilterConfigSharedPtr = std::shared_ptr<CorsFilterConfig>;\n\nclass CorsFilter : public Http::StreamFilter {\npublic:\n  CorsFilter(CorsFilterConfigSharedPtr config);\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  };\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  };\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  };\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  };\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override {\n    encoder_callbacks_ = &callbacks;\n  };\n\nprivate:\n  friend class CorsFilterTest;\n\n  const std::vector<Matchers::StringMatcherPtr>* allowOrigins();\n  const std::string& allowMethods();\n  const std::string& allowHeaders();\n  const std::string& exposeHeaders();\n  const std::string& maxAge();\n  bool allowCredentials();\n  bool shadowEnabled();\n  bool enabled();\n  bool isOriginAllowed(const Http::HeaderString& origin);\n\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n  std::array<const Envoy::Router::CorsPolicy*, 2> policies_;\n  bool is_cors_request_{};\n  const Http::HeaderEntry* origin_{};\n\n  CorsFilterConfigSharedPtr config_;\n};\n\n} // namespace Cors\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/csrf/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter which implements CSRF processing (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF))\n# Public docs: docs/root/configuration/http_filters/csrf_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"csrf_filter_lib\",\n    srcs = [\"csrf_filter.cc\"],\n    hdrs = [\"csrf_filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/csrf/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/csrf:csrf_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/csrf/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/csrf/config.cc",
    "content": "#include \"extensions/filters/http/csrf/config.h\"\n\n#include \"envoy/extensions/filters/http/csrf/v3/csrf.pb.h\"\n#include \"envoy/extensions/filters/http/csrf/v3/csrf.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/csrf/csrf_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Csrf {\n\nHttp::FilterFactoryCb CsrfFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  CsrfFilterConfigSharedPtr config =\n      std::make_shared<CsrfFilterConfig>(policy, stats_prefix, context.scope(), context.runtime());\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<CsrfFilter>(config));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nCsrfFilterFactory::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n    Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<const Csrf::CsrfPolicy>(policy, context.runtime());\n}\n\n/**\n * Static registration for the CSRF filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(CsrfFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.csrf\"};\n\n} // namespace Csrf\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/csrf/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/csrf/v3/csrf.pb.h\"\n#include \"envoy/extensions/filters/http/csrf/v3/csrf.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Csrf {\n\n/**\n * Config registration for the CSRF filter. @see NamedHttpFilterConfigFactory.\n */\nclass CsrfFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::csrf::v3::CsrfPolicy> {\npublic:\n  CsrfFilterFactory() : FactoryBase(HttpFilterNames::get().Csrf) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n      Server::Configuration::ServerFactoryContext& context,\n      ProtobufMessage::ValidationVisitor& validator) override;\n};\n\n} // namespace Csrf\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/csrf/csrf_filter.cc",
    "content": "#include \"extensions/filters/http/csrf/csrf_filter.h\"\n\n#include \"envoy/extensions/filters/http/csrf/v3/csrf.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Csrf {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    origin_handle(Http::CustomHeaders::get().Origin);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    referer_handle(Http::CustomHeaders::get().Referer);\n\nstruct RcDetailsValues {\n  const std::string OriginMismatch = \"csrf_origin_mismatch\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nnamespace {\nbool isModifyMethod(const Http::RequestHeaderMap& headers) {\n  const absl::string_view method_type = headers.getMethodValue();\n  if (method_type.empty()) {\n    return false;\n  }\n  const auto& method_values = Http::Headers::get().MethodValues;\n  return (method_type == method_values.Post || method_type == method_values.Put ||\n          method_type == method_values.Delete || method_type == method_values.Patch);\n}\n\nstd::string hostAndPort(const absl::string_view absolute_url) {\n  Http::Utility::Url url;\n  if (!absolute_url.empty()) {\n    if (url.initialize(absolute_url, /*is_connect=*/false)) {\n      return std::string(url.hostAndPort());\n    }\n    return std::string(absolute_url);\n  }\n  return EMPTY_STRING;\n}\n\n// Note: per https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin,\n//       the Origin header must include the scheme (and hostAndPort expects\n//       an absolute URL).\nstd::string sourceOriginValue(const Http::RequestHeaderMap& headers) {\n  const auto origin = hostAndPort(headers.getInlineValue(origin_handle.handle()));\n  if (!origin.empty()) {\n    return origin;\n  }\n  return hostAndPort(headers.getInlineValue(referer_handle.handle()));\n}\n\nstd::string targetOriginValue(const Http::RequestHeaderMap& headers) {\n  const auto host_value = headers.getHostValue();\n\n  // Don't even bother if there's not Host header.\n  if (host_value.empty()) {\n    return EMPTY_STRING;\n  }\n\n  const auto absolute_url = fmt::format(\n      \"{}://{}\", headers.Scheme() != nullptr ? headers.getSchemeValue() : \"http\", host_value);\n  return hostAndPort(absolute_url);\n}\n\nstatic CsrfStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n  const std::string final_prefix = prefix + \"csrf.\";\n  return CsrfStats{ALL_CSRF_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n}\n\nstatic CsrfPolicyPtr\ngeneratePolicy(const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n               Runtime::Loader& runtime) {\n  return std::make_unique<CsrfPolicy>(policy, runtime);\n}\n} // namespace\n\nCsrfFilterConfig::CsrfFilterConfig(\n    const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime)\n    : stats_(generateStats(stats_prefix, scope)), policy_(generatePolicy(policy, runtime)) {}\n\nCsrfFilter::CsrfFilter(const CsrfFilterConfigSharedPtr config) : config_(config) {}\n\nHttp::FilterHeadersStatus CsrfFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  determinePolicy();\n\n  if (!policy_->enabled() && !policy_->shadowEnabled()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (!isModifyMethod(headers)) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  bool is_valid = true;\n  const auto source_origin = sourceOriginValue(headers);\n  if (source_origin.empty()) {\n    is_valid = false;\n    config_->stats().missing_source_origin_.inc();\n  }\n\n  if (!isValid(source_origin, headers)) {\n    is_valid = false;\n    config_->stats().request_invalid_.inc();\n  }\n\n  if (is_valid == true) {\n    config_->stats().request_valid_.inc();\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (policy_->shadowEnabled() && !policy_->enabled()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  callbacks_->sendLocalReply(Http::Code::Forbidden, \"Invalid origin\", nullptr, absl::nullopt,\n                             RcDetails::get().OriginMismatch);\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nvoid CsrfFilter::determinePolicy() {\n  const std::string& name = Extensions::HttpFilters::HttpFilterNames::get().Csrf;\n  const CsrfPolicy* policy =\n      Http::Utility::resolveMostSpecificPerFilterConfig<CsrfPolicy>(name, callbacks_->route());\n  if (policy != nullptr) {\n    policy_ = policy;\n  } else {\n    policy_ = config_->policy();\n  }\n}\n\nbool CsrfFilter::isValid(const absl::string_view source_origin, Http::RequestHeaderMap& headers) {\n  const auto target_origin = targetOriginValue(headers);\n  if (source_origin == target_origin) {\n    return true;\n  }\n\n  for (const auto& additional_origin : policy_->additionalOrigins()) {\n    if (additional_origin->match(source_origin)) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\n} // namespace Csrf\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/csrf/csrf_filter.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/csrf/v3/csrf.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/matchers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Csrf {\n\n/**\n * All CSRF filter stats. @see stats_macros.h\n */\n#define ALL_CSRF_STATS(COUNTER)                                                                    \\\n  COUNTER(missing_source_origin)                                                                   \\\n  COUNTER(request_invalid)                                                                         \\\n  COUNTER(request_valid)\n\n/**\n * Struct definition for CSRF stats. @see stats_macros.h\n */\nstruct CsrfStats {\n  ALL_CSRF_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for CSRF policy.\n */\nclass CsrfPolicy : public Router::RouteSpecificFilterConfig {\npublic:\n  CsrfPolicy(const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n             Runtime::Loader& runtime)\n      : policy_(policy), runtime_(runtime) {\n    for (const auto& additional_origin : policy.additional_origins()) {\n      additional_origins_.emplace_back(\n          std::make_unique<Matchers::StringMatcherImpl>(additional_origin));\n    }\n  }\n\n  bool enabled() const {\n    const envoy::config::core::v3::RuntimeFractionalPercent& filter_enabled =\n        policy_.filter_enabled();\n    return runtime_.snapshot().featureEnabled(filter_enabled.runtime_key(),\n                                              filter_enabled.default_value());\n  }\n\n  bool shadowEnabled() const {\n    if (!policy_.has_shadow_enabled()) {\n      return false;\n    }\n    const envoy::config::core::v3::RuntimeFractionalPercent& shadow_enabled =\n        policy_.shadow_enabled();\n    return runtime_.snapshot().featureEnabled(shadow_enabled.runtime_key(),\n                                              shadow_enabled.default_value());\n  }\n\n  const std::vector<Matchers::StringMatcherPtr>& additionalOrigins() const {\n    return additional_origins_;\n  };\n\nprivate:\n  const envoy::extensions::filters::http::csrf::v3::CsrfPolicy policy_;\n  std::vector<Matchers::StringMatcherPtr> additional_origins_;\n  Runtime::Loader& runtime_;\n};\nusing CsrfPolicyPtr = std::unique_ptr<CsrfPolicy>;\n\n/**\n * Configuration for the CSRF filter.\n */\nclass CsrfFilterConfig {\npublic:\n  CsrfFilterConfig(const envoy::extensions::filters::http::csrf::v3::CsrfPolicy& policy,\n                   const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime);\n\n  CsrfStats& stats() { return stats_; }\n  const CsrfPolicy* policy() { return policy_.get(); }\n\nprivate:\n  CsrfStats stats_;\n  const CsrfPolicyPtr policy_;\n};\nusing CsrfFilterConfigSharedPtr = std::shared_ptr<CsrfFilterConfig>;\n\nclass CsrfFilter : public Http::StreamDecoderFilter {\npublic:\n  CsrfFilter(CsrfFilterConfigSharedPtr config);\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    callbacks_ = &callbacks;\n  }\n\nprivate:\n  void determinePolicy();\n  bool isValid(const absl::string_view source_origin, Http::RequestHeaderMap& headers);\n\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n  CsrfFilterConfigSharedPtr config_;\n  const CsrfPolicy* policy_;\n};\n\n} // namespace Csrf\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/decompressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that performs decompression with configurable decompression libraries\n# Public docs: docs/root/configuration/http_filters/decompressor_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"decompressor_filter_lib\",\n    srcs = [\"decompressor_filter.cc\"],\n    hdrs = [\"decompressor_filter.h\"],\n    deps = [\n        \"//include/envoy/compression/decompressor:decompressor_config_interface\",\n        \"//include/envoy/compression/decompressor:decompressor_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    deps = [\n        \":decompressor_filter_lib\",\n        \"//include/envoy/compression/decompressor:decompressor_config_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/decompressor/config.cc",
    "content": "#include \"extensions/filters/http/decompressor/config.h\"\n\n#include \"envoy/compression/decompressor/config.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/filters/http/decompressor/decompressor_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Decompressor {\n\nHttp::FilterFactoryCb DecompressorFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  const std::string decompressor_library_type{TypeUtil::typeUrlToDescriptorFullName(\n      proto_config.decompressor_library().typed_config().type_url())};\n  Compression::Decompressor::NamedDecompressorLibraryConfigFactory* const\n      decompressor_library_factory = Registry::FactoryRegistry<\n          Compression::Decompressor::NamedDecompressorLibraryConfigFactory>::\n          getFactoryByType(decompressor_library_type);\n  if (decompressor_library_factory == nullptr) {\n    throw EnvoyException(fmt::format(\"Didn't find a registered implementation for type: '{}'\",\n                                     decompressor_library_type));\n  }\n  ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig(\n      proto_config.decompressor_library().typed_config(), context.messageValidationVisitor(),\n      *decompressor_library_factory);\n  Compression::Decompressor::DecompressorFactoryPtr decompressor_factory =\n      decompressor_library_factory->createDecompressorFactoryFromProto(*message, context);\n  DecompressorFilterConfigSharedPtr filter_config = std::make_shared<DecompressorFilterConfig>(\n      proto_config, stats_prefix, context.scope(), context.runtime(),\n      std::move(decompressor_factory));\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<DecompressorFilter>(filter_config));\n  };\n}\n\n/**\n * Static registration for the decompressor filter. @see NamedHttpFilterConfigFactory.\n */\nREGISTER_FACTORY(DecompressorFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace Decompressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/http/decompressor/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h\"\n#include \"envoy/extensions/filters/http/decompressor/v3/decompressor.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Decompressor {\n\n/**\n * Config registration for the decompressor filter. @see NamedHttpFilterConfigFactory.\n */\nclass DecompressorFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::decompressor::v3::Decompressor> {\npublic:\n  DecompressorFilterFactory() : FactoryBase(HttpFilterNames::get().Decompressor) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::decompressor::v3::Decompressor& config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\nDECLARE_FACTORY(DecompressorFilterFactory);\n\n} // namespace Decompressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/http/decompressor/decompressor_filter.cc",
    "content": "#include \"extensions/filters/http/decompressor/decompressor_filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Decompressor {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    accept_encoding_handle(Http::CustomHeaders::get().AcceptEncoding);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    cache_control_request_handle(Http::CustomHeaders::get().CacheControl);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    content_encoding_request_handle(Http::CustomHeaders::get().ContentEncoding);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    cache_control_response_handle(Http::CustomHeaders::get().CacheControl);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    content_encoding_response_handle(Http::CustomHeaders::get().ContentEncoding);\n\nDecompressorFilterConfig::DecompressorFilterConfig(\n    const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config,\n    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n    Compression::Decompressor::DecompressorFactoryPtr decompressor_factory)\n    : stats_prefix_(fmt::format(\"{}decompressor.{}.{}\", stats_prefix,\n                                proto_config.decompressor_library().name(),\n                                decompressor_factory->statsPrefix())),\n      trailers_prefix_(fmt::format(\"{}-decompressor-{}\",\n                                   ThreadSafeSingleton<Http::PrefixValue>::get().prefix(),\n                                   proto_config.decompressor_library().name())),\n      decompressor_stats_prefix_(stats_prefix_ + \"decompressor_library\"),\n      decompressor_factory_(std::move(decompressor_factory)),\n      request_direction_config_(proto_config.request_direction_config(), stats_prefix_, scope,\n                                runtime),\n      response_direction_config_(proto_config.response_direction_config(), stats_prefix_, scope,\n                                 runtime) {}\n\nDecompressorFilterConfig::DirectionConfig::DirectionConfig(\n    const envoy::extensions::filters::http::decompressor::v3::Decompressor::CommonDirectionConfig&\n        proto_config,\n    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime)\n    : stats_(generateStats(stats_prefix, scope)),\n      decompression_enabled_(proto_config.enabled(), runtime) {}\n\nDecompressorFilterConfig::RequestDirectionConfig::RequestDirectionConfig(\n    const envoy::extensions::filters::http::decompressor::v3::Decompressor::RequestDirectionConfig&\n        proto_config,\n    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime)\n    : DirectionConfig(proto_config.common_config(), stats_prefix + \"request.\", scope, runtime),\n      advertise_accept_encoding_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, advertise_accept_encoding, true)) {}\n\nDecompressorFilterConfig::ResponseDirectionConfig::ResponseDirectionConfig(\n    const envoy::extensions::filters::http::decompressor::v3::Decompressor::ResponseDirectionConfig&\n        proto_config,\n    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime)\n    : DirectionConfig(proto_config.common_config(), stats_prefix + \"response.\", scope, runtime) {}\n\nDecompressorFilter::DecompressorFilter(DecompressorFilterConfigSharedPtr config)\n    : config_(std::move(config)), request_byte_tracker_(config_->trailersCompressedBytesString(),\n                                                        config_->trailersUncompressedBytesString()),\n      response_byte_tracker_(config_->trailersCompressedBytesString(),\n                             config_->trailersUncompressedBytesString()) {}\n\nHttp::FilterHeadersStatus DecompressorFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                            bool end_stream) {\n  // Two responsibilities on the request side:\n  //   1. If response decompression is enabled (and advertisement is enabled), then advertise to\n  //      the upstream that this hop is able to decompress responses via the Accept-Encoding header.\n  if (config_->responseDirectionConfig().decompressionEnabled() &&\n      config_->requestDirectionConfig().advertiseAcceptEncoding()) {\n    headers.appendInline(accept_encoding_handle.handle(), config_->contentEncoding(), \",\");\n    ENVOY_STREAM_LOG(debug,\n                     \"DecompressorFilter::decodeHeaders advertise Accept-Encoding with value '{}'\",\n                     *decoder_callbacks_, headers.getInlineValue(accept_encoding_handle.handle()));\n  }\n\n  // Headers-only requests do not, by definition, get decompressed.\n  if (end_stream) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  ENVOY_STREAM_LOG(debug, \"DecompressorFilter::decodeHeaders: {}\", *decoder_callbacks_, headers);\n\n  //   2. Setup request decompression if all checks comply.\n  return maybeInitDecompress(config_->requestDirectionConfig(), request_decompressor_,\n                             *decoder_callbacks_, headers);\n};\n\nHttp::FilterDataStatus DecompressorFilter::decodeData(Buffer::Instance& data, bool end_stream) {\n  if (request_decompressor_) {\n    HeaderMapOptRef trailers;\n    if (end_stream) {\n      trailers = HeaderMapOptRef(std::ref(decoder_callbacks_->addDecodedTrailers()));\n    }\n    decompress(config_->requestDirectionConfig(), request_decompressor_, *decoder_callbacks_, data,\n               request_byte_tracker_, trailers);\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus DecompressorFilter::decodeTrailers(Http::RequestTrailerMap& trailers) {\n  // Only report if the filter has actually decompressed.\n  if (request_decompressor_) {\n    request_byte_tracker_.reportTotalBytes(trailers);\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus DecompressorFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                            bool end_stream) {\n  // Headers only response, continue.\n  if (end_stream) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  ENVOY_STREAM_LOG(debug, \"DecompressorFilter::encodeHeaders: {}\", *encoder_callbacks_, headers);\n\n  return maybeInitDecompress(config_->responseDirectionConfig(), response_decompressor_,\n                             *encoder_callbacks_, headers);\n}\n\nHttp::FilterDataStatus DecompressorFilter::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (response_decompressor_) {\n    HeaderMapOptRef trailers;\n    if (end_stream) {\n      trailers = HeaderMapOptRef(std::ref(encoder_callbacks_->addEncodedTrailers()));\n    }\n    decompress(config_->responseDirectionConfig(), response_decompressor_, *encoder_callbacks_,\n               data, response_byte_tracker_, trailers);\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus DecompressorFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  // Only report if the filter has actually decompressed.\n  if (response_decompressor_) {\n    response_byte_tracker_.reportTotalBytes(trailers);\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid DecompressorFilter::decompress(\n    const DecompressorFilterConfig::DirectionConfig& direction_config,\n    const Compression::Decompressor::DecompressorPtr& decompressor,\n    Http::StreamFilterCallbacks& callbacks, Buffer::Instance& input_buffer,\n    ByteTracker& byte_tracker, HeaderMapOptRef trailers) const {\n  ASSERT(decompressor);\n  Buffer::OwnedImpl output_buffer;\n  decompressor->decompress(input_buffer, output_buffer);\n\n  // Report decompression via stats and logging before modifying the input buffer.\n  byte_tracker.chargeBytes(input_buffer.length(), output_buffer.length());\n  direction_config.stats().total_compressed_bytes_.add(input_buffer.length());\n  direction_config.stats().total_uncompressed_bytes_.add(output_buffer.length());\n  ENVOY_STREAM_LOG(debug, \"{} data decompressed from {} bytes to {} bytes\", callbacks,\n                   direction_config.logString(), input_buffer.length(), output_buffer.length());\n\n  input_buffer.drain(input_buffer.length());\n  input_buffer.add(output_buffer);\n\n  if (trailers.has_value()) {\n    byte_tracker.reportTotalBytes(trailers.value().get());\n  }\n}\n\ntemplate <>\nHttp::CustomInlineHeaderRegistry::Handle<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\nDecompressorFilter::getCacheControlHandle() {\n  return cache_control_request_handle.handle();\n}\n\ntemplate <>\nHttp::CustomInlineHeaderRegistry::Handle<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\nDecompressorFilter::getCacheControlHandle() {\n  return cache_control_response_handle.handle();\n}\n\ntemplate <>\nHttp::CustomInlineHeaderRegistry::Handle<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\nDecompressorFilter::getContentEncodingHandle() {\n  return content_encoding_request_handle.handle();\n}\n\ntemplate <>\nHttp::CustomInlineHeaderRegistry::Handle<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\nDecompressorFilter::getContentEncodingHandle() {\n  return content_encoding_response_handle.handle();\n}\n\n} // namespace Decompressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/http/decompressor/decompressor_filter.h",
    "content": "#pragma once\n\n#include \"envoy/compression/decompressor/config.h\"\n#include \"envoy/compression/decompressor/decompressor.h\"\n#include \"envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h\"\n#include \"envoy/http/filter.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/http/headers.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Decompressor {\n\n/**\n * All decompressor filter stats. @see stats_macros.h\n */\n#define ALL_DECOMPRESSOR_STATS(COUNTER)                                                            \\\n  COUNTER(decompressed)                                                                            \\\n  COUNTER(not_decompressed)                                                                        \\\n  COUNTER(total_uncompressed_bytes)                                                                \\\n  COUNTER(total_compressed_bytes)\n\n/**\n * Struct definition for decompressor stats. @see stats_macros.h\n */\nstruct DecompressorStats {\n  ALL_DECOMPRESSOR_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for the decompressor filter.\n */\nclass DecompressorFilterConfig {\npublic:\n  class DirectionConfig {\n  public:\n    DirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor::\n                        CommonDirectionConfig& proto_config,\n                    const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime);\n\n    virtual ~DirectionConfig() = default;\n\n    virtual const std::string& logString() const PURE;\n    const DecompressorStats& stats() const { return stats_; }\n    bool decompressionEnabled() const { return decompression_enabled_.enabled(); }\n\n  private:\n    static DecompressorStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n      return DecompressorStats{ALL_DECOMPRESSOR_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n    }\n\n    const DecompressorStats stats_;\n    const Runtime::FeatureFlag decompression_enabled_;\n  };\n\n  class RequestDirectionConfig : public DirectionConfig {\n  public:\n    RequestDirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor::\n                               RequestDirectionConfig& proto_config,\n                           const std::string& stats_prefix, Stats::Scope& scope,\n                           Runtime::Loader& runtime);\n\n    // DirectionConfig\n    const std::string& logString() const override {\n      CONSTRUCT_ON_FIRST_USE(std::string, \"request\");\n    }\n\n    bool advertiseAcceptEncoding() const { return advertise_accept_encoding_; }\n\n  private:\n    const bool advertise_accept_encoding_;\n  };\n\n  class ResponseDirectionConfig : public DirectionConfig {\n  public:\n    ResponseDirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor::\n                                ResponseDirectionConfig& proto_config,\n                            const std::string& stats_prefix, Stats::Scope& scope,\n                            Runtime::Loader& runtime);\n\n    // DirectionConfig\n    const std::string& logString() const override {\n      CONSTRUCT_ON_FIRST_USE(std::string, \"response\");\n    }\n  };\n\n  DecompressorFilterConfig(\n      const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config,\n      const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n      Compression::Decompressor::DecompressorFactoryPtr decompressor_factory);\n\n  Compression::Decompressor::DecompressorPtr makeDecompressor() {\n    return decompressor_factory_->createDecompressor(decompressor_stats_prefix_);\n  }\n  const std::string& contentEncoding() { return decompressor_factory_->contentEncoding(); }\n  const RequestDirectionConfig& requestDirectionConfig() { return request_direction_config_; }\n  const ResponseDirectionConfig& responseDirectionConfig() { return response_direction_config_; }\n  const Http::LowerCaseString& trailersCompressedBytesString() const {\n    CONSTRUCT_ON_FIRST_USE(Http::LowerCaseString, Http::LowerCaseString(fmt::format(\n                                                      \"{}-compressed-bytes\", trailers_prefix_)));\n  }\n  const Http::LowerCaseString& trailersUncompressedBytesString() const {\n    CONSTRUCT_ON_FIRST_USE(Http::LowerCaseString, Http::LowerCaseString(fmt::format(\n                                                      \"{}-uncompressed-bytes\", trailers_prefix_)));\n  }\n\nprivate:\n  const std::string stats_prefix_;\n  const std::string trailers_prefix_;\n  const std::string decompressor_stats_prefix_;\n  const Compression::Decompressor::DecompressorFactoryPtr decompressor_factory_;\n  const RequestDirectionConfig request_direction_config_;\n  const ResponseDirectionConfig response_direction_config_;\n};\n\nusing DecompressorFilterConfigSharedPtr = std::shared_ptr<DecompressorFilterConfig>;\n\n/**\n * A filter that decompresses data bidirectionally.\n */\nclass DecompressorFilter : public Http::PassThroughFilter,\n                           public Logger::Loggable<Logger::Id::filter> {\npublic:\n  DecompressorFilter(DecompressorFilterConfigSharedPtr config);\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override;\n\nprivate:\n  struct ByteTracker {\n    ByteTracker(const Http::LowerCaseString& compressed_bytes_trailer,\n                const Http::LowerCaseString& uncompressed_bytes_trailer)\n        : compressed_bytes_trailer_(compressed_bytes_trailer),\n          uncompressed_bytes_trailer_(uncompressed_bytes_trailer) {}\n    void chargeBytes(uint64_t compressed_bytes, uint64_t uncompressed_bytes) {\n      total_compressed_bytes_ += compressed_bytes;\n      total_uncompressed_bytes_ += uncompressed_bytes;\n    }\n    void reportTotalBytes(Http::HeaderMap& trailers) const {\n      trailers.addReferenceKey(compressed_bytes_trailer_, total_compressed_bytes_);\n      trailers.addReferenceKey(uncompressed_bytes_trailer_, total_uncompressed_bytes_);\n    }\n\n  private:\n    const Http::LowerCaseString& compressed_bytes_trailer_;\n    const Http::LowerCaseString& uncompressed_bytes_trailer_;\n    uint64_t total_compressed_bytes_{};\n    uint64_t total_uncompressed_bytes_{};\n  };\n  using ByteTrackerOptConstRef = absl::optional<std::reference_wrapper<const ByteTracker>>;\n\n  template <class HeaderType>\n  Http::FilterHeadersStatus\n  maybeInitDecompress(const DecompressorFilterConfig::DirectionConfig& direction_config,\n                      Compression::Decompressor::DecompressorPtr& decompressor,\n                      Http::StreamFilterCallbacks& callbacks, HeaderType& headers) {\n    if (direction_config.decompressionEnabled() && !hasCacheControlNoTransform(headers) &&\n        contentEncodingMatches(headers)) {\n      direction_config.stats().decompressed_.inc();\n      decompressor = config_->makeDecompressor();\n\n      // Update headers.\n      headers.removeContentLength();\n      modifyContentEncoding(headers);\n\n      ENVOY_STREAM_LOG(debug, \"do decompress {}: {}\", callbacks, direction_config.logString(),\n                       headers);\n    } else {\n      direction_config.stats().not_decompressed_.inc();\n      ENVOY_STREAM_LOG(debug, \"do not decompress {}: {}\", callbacks, direction_config.logString(),\n                       headers);\n    }\n\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  using HeaderMapOptRef = absl::optional<std::reference_wrapper<Http::HeaderMap>>;\n  void decompress(const DecompressorFilterConfig::DirectionConfig& direction_config,\n                  const Compression::Decompressor::DecompressorPtr& decompressor,\n                  Http::StreamFilterCallbacks& callbacks, Buffer::Instance& input_buffer,\n                  ByteTracker& byte_tracker, HeaderMapOptRef trailers) const;\n\n  // TODO(junr03): These can be shared between compressor and decompressor.\n  template <Http::CustomInlineHeaderRegistry::Type Type>\n  static Http::CustomInlineHeaderRegistry::Handle<Type> getCacheControlHandle();\n  template <class HeaderType> static bool hasCacheControlNoTransform(HeaderType& headers) {\n    const auto handle = getCacheControlHandle<HeaderType::header_map_type>();\n    return headers.getInline(handle)\n               ? StringUtil::caseFindToken(\n                     headers.getInlineValue(handle), \",\",\n                     Http::CustomHeaders::get().CacheControlValues.NoTransform)\n               : false;\n  }\n\n  /**\n   * Content-Encoding matches if the configured encoding is the first value in the comma-delimited\n   * Content-Encoding header, regardless of spacing and casing.\n   */\n  template <Http::CustomInlineHeaderRegistry::Type Type>\n  static Http::CustomInlineHeaderRegistry::Handle<Type> getContentEncodingHandle();\n  template <class HeaderType> bool contentEncodingMatches(HeaderType& headers) const {\n    const auto handle = getContentEncodingHandle<HeaderType::header_map_type>();\n    if (headers.getInline(handle)) {\n      absl::string_view coding =\n          StringUtil::trim(StringUtil::cropRight(headers.getInlineValue(handle), \",\"));\n      return StringUtil::CaseInsensitiveCompare()(config_->contentEncoding(), coding);\n    }\n    return false;\n  }\n\n  template <class HeaderType> static void modifyContentEncoding(HeaderType& headers) {\n    const auto handle = getContentEncodingHandle<HeaderType::header_map_type>();\n    const auto all_codings = StringUtil::trim(headers.getInlineValue(handle));\n    const auto remaining_codings = StringUtil::trim(StringUtil::cropLeft(all_codings, \",\"));\n\n    if (remaining_codings != all_codings) {\n      headers.setInline(handle, remaining_codings);\n    } else {\n      headers.removeInline(handle);\n    }\n  }\n\n  DecompressorFilterConfigSharedPtr config_;\n  Compression::Decompressor::DecompressorPtr request_decompressor_{};\n  Compression::Decompressor::DecompressorPtr response_decompressor_{};\n  ByteTracker request_byte_tracker_;\n  ByteTracker response_byte_tracker_;\n};\n\n} // namespace Decompressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/http/dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"proxy_filter_lib\",\n    srcs = [\"proxy_filter.cc\"],\n    hdrs = [\"proxy_filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_interface\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/dynamic_forward_proxy:proxy_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/dynamic_forward_proxy/config.cc",
    "content": "#include \"extensions/filters/http/dynamic_forward_proxy/config.h\"\n\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h\"\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.validate.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h\"\n#include \"extensions/filters/http/dynamic_forward_proxy/proxy_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace DynamicForwardProxy {\n\nHttp::FilterFactoryCb DynamicForwardProxyFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config,\n    const std::string&, Server::Configuration::FactoryContext& context) {\n  Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory(\n      context.singletonManager(), context.dispatcher(), context.threadLocal(),\n      context.api().randomGenerator(), context.runtime(), context.scope());\n  ProxyFilterConfigSharedPtr filter_config(std::make_shared<ProxyFilterConfig>(\n      proto_config, cache_manager_factory, context.clusterManager()));\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<ProxyFilter>(filter_config));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nDynamicForwardProxyFilterFactory::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig& config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<const ProxyPerRouteConfig>(config);\n}\n\n/**\n * Static registration for the dynamic forward proxy filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(DynamicForwardProxyFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace DynamicForwardProxy\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamic_forward_proxy/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h\"\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace DynamicForwardProxy {\n\n/**\n * Config registration for the dynamic forward proxy filter.\n */\nclass DynamicForwardProxyFilterFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig,\n          envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig> {\npublic:\n  DynamicForwardProxyFilterFactory() : FactoryBase(HttpFilterNames::get().DynamicForwardProxy) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig& config,\n      Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override;\n};\n\nDECLARE_FACTORY(DynamicForwardProxyFilterFactory);\n\n} // namespace DynamicForwardProxy\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc",
    "content": "#include \"extensions/filters/http/dynamic_forward_proxy/proxy_filter.h\"\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h\"\n\n#include \"common/runtime/runtime_features.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n#include \"extensions/common/dynamic_forward_proxy/dns_cache.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace DynamicForwardProxy {\n\nstruct ResponseStringValues {\n  const std::string DnsCacheOverflow = \"DNS cache overflow\";\n  const std::string PendingRequestOverflow = \"Dynamic forward proxy pending request overflow\";\n};\n\nusing CustomClusterType = envoy::config::cluster::v3::Cluster::CustomClusterType;\n\nusing ResponseStrings = ConstSingleton<ResponseStringValues>;\n\nusing LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus;\n\nProxyFilterConfig::ProxyFilterConfig(\n    const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config,\n    Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory,\n    Upstream::ClusterManager& cluster_manager)\n    : dns_cache_manager_(cache_manager_factory.get()),\n      dns_cache_(dns_cache_manager_->getCache(proto_config.dns_cache_config())),\n      cluster_manager_(cluster_manager) {}\n\nProxyPerRouteConfig::ProxyPerRouteConfig(\n    const envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig& config)\n    : host_rewrite_(config.host_rewrite_literal()),\n      host_rewrite_header_(Http::LowerCaseString(config.host_rewrite_header())) {}\n\nvoid ProxyFilter::onDestroy() {\n  // Make sure we destroy any active cache load handle in case we are getting reset and deferred\n  // deleted.\n  cache_load_handle_.reset();\n  circuit_breaker_.reset();\n}\n\nHttp::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  Router::RouteConstSharedPtr route = decoder_callbacks_->route();\n  const Router::RouteEntry* route_entry;\n  if (!route || !(route_entry = route->routeEntry())) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Upstream::ThreadLocalCluster* cluster = config_->clusterManager().get(route_entry->clusterName());\n  if (!cluster) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  cluster_info_ = cluster->info();\n\n  // We only need to do DNS lookups for hosts in dynamic forward proxy clusters,\n  // since the other cluster types do their own DNS management.\n  const absl::optional<CustomClusterType>& cluster_type = cluster_info_->clusterType();\n  if (!cluster_type) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  if (cluster_type->name() !=\n      Envoy::Extensions::Clusters::ClusterTypes::get().DynamicForwardProxy) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  const bool should_use_dns_cache_circuit_breakers =\n      Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.enable_dns_cache_circuit_breakers\");\n\n  circuit_breaker_ = config_->cache().canCreateDnsRequest(\n      !should_use_dns_cache_circuit_breakers\n          ? absl::make_optional(std::reference_wrapper<ResourceLimit>(\n                cluster_info_->resourceManager(route_entry->priority()).pendingRequests()))\n          : absl::nullopt);\n\n  if (circuit_breaker_ == nullptr) {\n    if (!should_use_dns_cache_circuit_breakers) {\n      cluster_info_->stats().upstream_rq_pending_overflow_.inc();\n    }\n    ENVOY_STREAM_LOG(debug, \"pending request overflow\", *this->decoder_callbacks_);\n    this->decoder_callbacks_->sendLocalReply(\n        Http::Code::ServiceUnavailable, ResponseStrings::get().PendingRequestOverflow, nullptr,\n        absl::nullopt, ResponseStrings::get().PendingRequestOverflow);\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  uint16_t default_port = 80;\n  if (cluster_info_->transportSocketMatcher()\n          .resolve(nullptr)\n          .factory_.implementsSecureTransport()) {\n    default_port = 443;\n  }\n\n  // Check for per route filter config.\n  const auto* config = route_entry->mostSpecificPerFilterConfigTyped<ProxyPerRouteConfig>(\n      HttpFilterNames::get().DynamicForwardProxy);\n  if (config != nullptr) {\n    const auto& host_rewrite = config->hostRewrite();\n    if (!host_rewrite.empty()) {\n      headers.setHost(host_rewrite);\n    }\n\n    const auto& host_rewrite_header = config->hostRewriteHeader();\n    if (!host_rewrite_header.get().empty()) {\n      const auto* header = headers.get(host_rewrite_header);\n      if (header != nullptr) {\n        const auto& header_value = header->value().getStringView();\n        headers.setHost(header_value);\n      }\n    }\n  }\n\n  // See the comments in dns_cache.h for how loadDnsCacheEntry() handles hosts with embedded ports.\n  // TODO(mattklein123): Because the filter and cluster have independent configuration, it is\n  //                     not obvious to the user if something is misconfigured. We should see if\n  //                     we can do better here, perhaps by checking the cache to see if anything\n  //                     else is attached to it or something else?\n  auto result = config_->cache().loadDnsCacheEntry(headers.Host()->value().getStringView(),\n                                                   default_port, *this);\n  cache_load_handle_ = std::move(result.handle_);\n  if (cache_load_handle_ == nullptr) {\n    circuit_breaker_.reset();\n  }\n\n  switch (result.status_) {\n  case LoadDnsCacheEntryStatus::InCache: {\n    ASSERT(cache_load_handle_ == nullptr);\n    ENVOY_STREAM_LOG(debug, \"DNS cache entry already loaded, continuing\", *decoder_callbacks_);\n    return Http::FilterHeadersStatus::Continue;\n  }\n  case LoadDnsCacheEntryStatus::Loading: {\n    ASSERT(cache_load_handle_ != nullptr);\n    ENVOY_STREAM_LOG(debug, \"waiting to load DNS cache entry\", *decoder_callbacks_);\n    return Http::FilterHeadersStatus::StopAllIterationAndWatermark;\n  }\n  case LoadDnsCacheEntryStatus::Overflow: {\n    ASSERT(cache_load_handle_ == nullptr);\n    ENVOY_STREAM_LOG(debug, \"DNS cache overflow\", *decoder_callbacks_);\n    decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable,\n                                       ResponseStrings::get().DnsCacheOverflow, nullptr,\n                                       absl::nullopt, ResponseStrings::get().DnsCacheOverflow);\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid ProxyFilter::onLoadDnsCacheComplete() {\n  ENVOY_STREAM_LOG(debug, \"load DNS cache complete, continuing\", *decoder_callbacks_);\n  ASSERT(circuit_breaker_ != nullptr);\n  circuit_breaker_.reset();\n  decoder_callbacks_->continueDecoding();\n}\n\n} // namespace DynamicForwardProxy\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace DynamicForwardProxy {\n\nclass ProxyFilterConfig {\npublic:\n  ProxyFilterConfig(\n      const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config,\n      Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory,\n      Upstream::ClusterManager& cluster_manager);\n\n  Extensions::Common::DynamicForwardProxy::DnsCache& cache() { return *dns_cache_; }\n  Upstream::ClusterManager& clusterManager() { return cluster_manager_; }\n\nprivate:\n  const Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr dns_cache_manager_;\n  const Extensions::Common::DynamicForwardProxy::DnsCacheSharedPtr dns_cache_;\n  Upstream::ClusterManager& cluster_manager_;\n};\n\nusing ProxyFilterConfigSharedPtr = std::shared_ptr<ProxyFilterConfig>;\n\nclass ProxyPerRouteConfig : public ::Envoy::Router::RouteSpecificFilterConfig {\npublic:\n  ProxyPerRouteConfig(\n      const envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig& config);\n\n  const std::string& hostRewrite() const { return host_rewrite_; }\n  const Http::LowerCaseString& hostRewriteHeader() const { return host_rewrite_header_; }\n\nprivate:\n  const std::string host_rewrite_;\n  const Http::LowerCaseString host_rewrite_header_;\n};\n\nclass ProxyFilter\n    : public Http::PassThroughDecoderFilter,\n      public Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryCallbacks,\n      Logger::Loggable<Logger::Id::forward_proxy> {\npublic:\n  ProxyFilter(const ProxyFilterConfigSharedPtr& config) : config_(config) {}\n\n  // Http::PassThroughDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  void onDestroy() override;\n\n  // Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryCallbacks\n  void onLoadDnsCacheComplete() override;\n\nprivate:\n  const ProxyFilterConfigSharedPtr config_;\n  Upstream::ClusterInfoConstSharedPtr cluster_info_;\n  Upstream::ResourceAutoIncDecPtr circuit_breaker_;\n  Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryHandlePtr cache_load_handle_;\n};\n\n} // namespace DynamicForwardProxy\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# AWS DynamoDB L7 HTTP filter (observability): https://aws.amazon.com/dynamodb/\n# Public docs: docs/root/configuration/http_filters/dynamodb_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"dynamo_filter_lib\",\n    srcs = [\"dynamo_filter.cc\"],\n    hdrs = [\"dynamo_filter.h\"],\n    deps = [\n        \":dynamo_request_parser_lib\",\n        \":dynamo_stats_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:exception_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dynamo_request_parser_lib\",\n    srcs = [\"dynamo_request_parser.cc\"],\n    hdrs = [\"dynamo_request_parser.h\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/json:json_loader_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \":dynamo_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/dynamo/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dynamo_stats_lib\",\n    srcs = [\"dynamo_stats.cc\"],\n    hdrs = [\"dynamo_stats.h\"],\n    deps = [\n        \":dynamo_request_parser_lib\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/config.cc",
    "content": "#include \"extensions/filters/http/dynamo/config.h\"\n\n#include <string>\n\n#include \"envoy/extensions/filters/http/dynamo/v3/dynamo.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/dynamo/dynamo_filter.h\"\n#include \"extensions/filters/http/dynamo/dynamo_stats.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\nHttp::FilterFactoryCb DynamoFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::dynamo::v3::Dynamo&, const std::string& stats_prefix,\n    Server::Configuration::FactoryContext& context) {\n  auto stats = std::make_shared<DynamoStats>(context.scope(), stats_prefix);\n  return [&context, stats](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<Dynamo::DynamoFilter>(\n        context.runtime(), stats, context.dispatcher().timeSource()));\n  };\n}\n\n/**\n * Static registration for the http dynamodb filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(DynamoFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.http_dynamo_filter\"};\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/extensions/filters/http/dynamo/v3/dynamo.pb.h\"\n#include \"envoy/extensions/filters/http/dynamo/v3/dynamo.pb.validate.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\n/**\n * Config registration for http dynamodb filter.\n */\nclass DynamoFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::dynamo::v3::Dynamo> {\npublic:\n  DynamoFilterConfig() : FactoryBase(HttpFilterNames::get().Dynamo) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::dynamo::v3::Dynamo& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/dynamo_filter.cc",
    "content": "#include \"extensions/filters/http/dynamo/dynamo_filter.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/utility.h\"\n#include \"common/json/json_loader.h\"\n\n#include \"extensions/filters/http/dynamo/dynamo_request_parser.h\"\n#include \"extensions/filters/http/dynamo/dynamo_stats.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\nHttp::FilterHeadersStatus DynamoFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  if (enabled_) {\n    start_decode_ = time_source_.monotonicTime();\n    operation_ = RequestParser::parseOperation(headers);\n    return Http::FilterHeadersStatus::StopIteration;\n  } else {\n    return Http::FilterHeadersStatus::Continue;\n  }\n}\n\nHttp::FilterDataStatus DynamoFilter::decodeData(Buffer::Instance& data, bool end_stream) {\n  if (enabled_ && end_stream) {\n    onDecodeComplete(data);\n  }\n\n  if (!enabled_ || end_stream) {\n    return Http::FilterDataStatus::Continue;\n  } else {\n    // Buffer until the complete request has been processed.\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n}\n\nHttp::FilterTrailersStatus DynamoFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  if (enabled_) {\n    Buffer::OwnedImpl empty;\n    onDecodeComplete(empty);\n  }\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid DynamoFilter::onDecodeComplete(const Buffer::Instance& data) {\n  std::string body = buildBody(decoder_callbacks_->decodingBuffer(), data);\n  if (!body.empty()) {\n    try {\n      Json::ObjectSharedPtr json_body = Json::Factory::loadFromString(body);\n      table_descriptor_ = RequestParser::parseTable(operation_, *json_body);\n    } catch (const Json::Exception& jsonEx) {\n      // Body parsing failed. This should not happen, just put a stat for that.\n      stats_->incCounter({stats_->invalid_req_body_});\n    }\n  }\n}\n\nvoid DynamoFilter::onEncodeComplete(const Buffer::Instance& data) {\n  ASSERT(enabled_);\n  uint64_t status = Http::Utility::getResponseStatus(*response_headers_);\n  chargeBasicStats(status);\n\n  std::string body = buildBody(encoder_callbacks_->encodingBuffer(), data);\n  if (!body.empty()) {\n    try {\n      Json::ObjectSharedPtr json_body = Json::Factory::loadFromString(body);\n      chargeTablePartitionIdStats(*json_body);\n\n      if (Http::CodeUtility::is4xx(status)) {\n        chargeFailureSpecificStats(*json_body);\n      }\n      // Batch Operations will always return status 200 for a partial or full success. Check\n      // unprocessed keys to determine partial success.\n      // http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.BatchOperations\n      if (RequestParser::isBatchOperation(operation_)) {\n        chargeUnProcessedKeysStats(*json_body);\n      }\n    } catch (const Json::Exception&) {\n      // Body parsing failed. This should not happen, just put a stat for that.\n      stats_->incCounter({stats_->invalid_resp_body_});\n    }\n  }\n}\n\nHttp::FilterHeadersStatus DynamoFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                      bool end_stream) {\n  Http::FilterHeadersStatus status = Http::FilterHeadersStatus::Continue;\n  if (enabled_) {\n    response_headers_ = &headers;\n\n    if (end_stream) {\n      Buffer::OwnedImpl empty;\n      onEncodeComplete(empty);\n    } else {\n      status = Http::FilterHeadersStatus::StopIteration;\n    }\n  }\n\n  return status;\n}\n\nHttp::FilterDataStatus DynamoFilter::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (enabled_ && end_stream) {\n    onEncodeComplete(data);\n  }\n\n  if (!enabled_ || end_stream) {\n    return Http::FilterDataStatus::Continue;\n  } else {\n    // Buffer until the complete response has been processed.\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n}\n\nHttp::FilterTrailersStatus DynamoFilter::encodeTrailers(Http::ResponseTrailerMap&) {\n  if (enabled_) {\n    Buffer::OwnedImpl empty;\n    onEncodeComplete(empty);\n  }\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nstd::string DynamoFilter::buildBody(const Buffer::Instance* buffered,\n                                    const Buffer::Instance& last) {\n  std::string body;\n  body.reserve((buffered ? buffered->length() : 0) + last.length());\n  if (buffered) {\n    for (const Buffer::RawSlice& slice : buffered->getRawSlices()) {\n      body.append(static_cast<const char*>(slice.mem_), slice.len_);\n    }\n  }\n\n  for (const Buffer::RawSlice& slice : last.getRawSlices()) {\n    body.append(static_cast<const char*>(slice.mem_), slice.len_);\n  }\n\n  return body;\n}\n\nvoid DynamoFilter::chargeBasicStats(uint64_t status) {\n  if (!operation_.empty()) {\n    chargeStatsPerEntity(operation_, \"operation\", status);\n  } else {\n    stats_->incCounter({stats_->operation_missing_});\n  }\n\n  if (!table_descriptor_.table_name.empty()) {\n    chargeStatsPerEntity(table_descriptor_.table_name, \"table\", status);\n  } else if (table_descriptor_.is_single_table) {\n    stats_->incCounter({stats_->table_missing_});\n  } else {\n    stats_->incCounter({stats_->multiple_tables_});\n  }\n}\n\nvoid DynamoFilter::chargeStatsPerEntity(const std::string& entity, const std::string& entity_type,\n                                        uint64_t status) {\n  std::chrono::milliseconds latency = std::chrono::duration_cast<std::chrono::milliseconds>(\n      time_source_.monotonicTime() - start_decode_);\n\n  size_t group_index = DynamoStats::groupIndex(status);\n  Stats::StatNameDynamicPool dynamic(stats_->symbolTable());\n\n  const Stats::StatName entity_type_name =\n      stats_->getBuiltin(entity_type, stats_->unknown_entity_type_);\n  const Stats::StatName entity_name = dynamic.add(entity);\n\n  // TODO(jmarantz): Consider using a similar mechanism to common/http/codes.cc\n  // to avoid creating dynamic stat-names for common statuses.\n  const Stats::StatName total_name = dynamic.add(absl::StrCat(\"upstream_rq_total_\", status));\n  const Stats::StatName time_name = dynamic.add(absl::StrCat(\"upstream_rq_time_\", status));\n\n  stats_->incCounter({entity_type_name, entity_name, stats_->upstream_rq_total_});\n  const Stats::StatName total_group = stats_->upstream_rq_total_groups_[group_index];\n  stats_->incCounter({entity_type_name, entity_name, total_group});\n  stats_->incCounter({entity_type_name, entity_name, total_name});\n\n  stats_->recordHistogram({entity_type_name, entity_name, stats_->upstream_rq_time_},\n                          Stats::Histogram::Unit::Milliseconds, latency.count());\n  const Stats::StatName time_group = stats_->upstream_rq_time_groups_[group_index];\n  stats_->recordHistogram({entity_type_name, entity_name, time_group},\n                          Stats::Histogram::Unit::Milliseconds, latency.count());\n  stats_->recordHistogram({entity_type_name, entity_name, time_name},\n                          Stats::Histogram::Unit::Milliseconds, latency.count());\n}\n\nvoid DynamoFilter::chargeUnProcessedKeysStats(const Json::Object& json_body) {\n  // The unprocessed keys block contains a list of tables and keys for that table that did not\n  // complete apart of the batch operation. Only the table names will be logged for errors.\n  std::vector<std::string> unprocessed_tables = RequestParser::parseBatchUnProcessedKeys(json_body);\n  for (const std::string& unprocessed_table : unprocessed_tables) {\n    Stats::StatNameDynamicStorage storage(unprocessed_table, stats_->symbolTable());\n    stats_->incCounter(\n        {stats_->error_, storage.statName(), stats_->batch_failure_unprocessed_keys_});\n  }\n}\n\nvoid DynamoFilter::chargeFailureSpecificStats(const Json::Object& json_body) {\n  std::string error_type = RequestParser::parseErrorType(json_body);\n\n  if (!error_type.empty()) {\n    Stats::StatNameDynamicPool dynamic(stats_->symbolTable());\n    if (table_descriptor_.table_name.empty()) {\n      stats_->incCounter({stats_->error_, stats_->no_table_, dynamic.add(error_type)});\n    } else {\n      stats_->incCounter(\n          {stats_->error_, dynamic.add(table_descriptor_.table_name), dynamic.add(error_type)});\n    }\n  } else {\n    stats_->incCounter({stats_->empty_response_body_});\n  }\n}\n\nvoid DynamoFilter::chargeTablePartitionIdStats(const Json::Object& json_body) {\n  if (table_descriptor_.table_name.empty() || operation_.empty()) {\n    return;\n  }\n\n  std::vector<RequestParser::PartitionDescriptor> partitions =\n      RequestParser::parsePartitions(json_body);\n  for (const RequestParser::PartitionDescriptor& partition : partitions) {\n    stats_\n        ->buildPartitionStatCounter(table_descriptor_.table_name, operation_,\n                                    partition.partition_id_)\n        .add(partition.capacity_);\n  }\n}\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/dynamo_filter.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/json/json_loader.h\"\n\n#include \"extensions/filters/http/dynamo/dynamo_request_parser.h\"\n#include \"extensions/filters/http/dynamo/dynamo_stats.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\n/**\n * DynamoDb filter to process egress request to dynamo and capture comprehensive stats\n * It captures RPS/latencies:\n *  1) Per table per response code (and group of response codes, e.g., 2xx/3xx/etc)\n *  2) Per operation per response code (and group of response codes, e.g., 2xx/3xx/etc)\n */\nclass DynamoFilter : public Http::StreamFilter {\npublic:\n  DynamoFilter(Runtime::Loader& runtime, const DynamoStatsSharedPtr& stats, TimeSource& time_source)\n      : runtime_(runtime), stats_(stats), time_source_(time_source) {\n    enabled_ = runtime_.snapshot().featureEnabled(\"dynamodb.filter_enabled\", 100);\n  }\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n  }\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override {\n    encoder_callbacks_ = &callbacks;\n  }\n\nprivate:\n  void onDecodeComplete(const Buffer::Instance& data);\n  void onEncodeComplete(const Buffer::Instance& data);\n  std::string buildBody(const Buffer::Instance* buffered, const Buffer::Instance& last);\n  void chargeBasicStats(uint64_t status);\n  void chargeStatsPerEntity(const std::string& entity, const std::string& entity_type,\n                            uint64_t status);\n  void chargeFailureSpecificStats(const Json::Object& json_body);\n  void chargeUnProcessedKeysStats(const Json::Object& json_body);\n  void chargeTablePartitionIdStats(const Json::Object& json_body);\n\n  Runtime::Loader& runtime_;\n  const DynamoStatsSharedPtr stats_;\n\n  bool enabled_{};\n  std::string operation_{};\n  RequestParser::TableDescriptor table_descriptor_{\"\", true};\n  std::string error_type_{};\n  MonotonicTime start_decode_;\n  Http::ResponseHeaderMap* response_headers_;\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n  TimeSource& time_source_;\n};\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/dynamo_request_parser.cc",
    "content": "#include \"extensions/filters/http/dynamo/dynamo_request_parser.h\"\n\n#include <cmath>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\n/**\n * Basic json request/response format:\n * https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Operations_Amazon_DynamoDB.html\n */\nconst Http::LowerCaseString RequestParser::X_AMZ_TARGET(\"X-AMZ-TARGET\");\n\n// clang-format off\nconst std::vector<std::string> RequestParser::SINGLE_TABLE_OPERATIONS{\n    \"CreateTable\",\n    \"DeleteItem\",\n    \"DeleteTable\",\n    \"DescribeTable\",\n    \"GetItem\",\n    \"PutItem\",\n    \"Query\",\n    \"Scan\",\n    \"UpdateItem\",\n    \"UpdateTable\"};\n\nconst std::vector<std::string> RequestParser::SUPPORTED_ERROR_TYPES{\n    // 4xx\n    \"AccessDeniedException\",\n    \"ConditionalCheckFailedException\",\n    \"IdempotentParameterMismatchException\",\n    \"IncompleteSignatureException\",\n    \"ItemCollectionSizeLimitExceededException\",\n    \"LimitExceededException\",\n    \"MissingAuthenticationTokenException\",\n    \"ProvisionedThroughputExceededException\",\n    \"ResourceInUseException\",\n    \"ResourceNotFoundException\",\n    \"ThrottlingException\",\n    \"TransactionCanceledException\",\n    \"TransactionInProgressException\",\n    \"UnrecognizedClientException\",\n    \"ValidationException\",\n    // Errors not listed in the error handling section of DynamoDB developer guide, but observed in runtime\n    \"InvalidSignatureException\", // https://github.com/aws/aws-sdk-go/issues/2598#issuecomment-526398896\n};\n// clang-format on\n\nconst std::vector<std::string> RequestParser::BATCH_OPERATIONS{\"BatchGetItem\", \"BatchWriteItem\"};\n\nconst std::vector<std::string> RequestParser::TRANSACT_OPERATIONS{\"TransactGetItems\",\n                                                                  \"TransactWriteItems\"};\nconst std::vector<std::string> RequestParser::TRANSACT_ITEM_OPERATIONS{\"ConditionCheck\", \"Delete\",\n                                                                       \"Get\", \"Put\", \"Update\"};\n\nstd::string RequestParser::parseOperation(const Http::HeaderMap& header_map) {\n  std::string operation;\n\n  const Http::HeaderEntry* x_amz_target = header_map.get(X_AMZ_TARGET);\n  if (x_amz_target) {\n    // Normally x-amz-target contains Version.Operation, e.g., DynamoDB_20160101.GetItem\n    auto version_and_operation = StringUtil::splitToken(x_amz_target->value().getStringView(), \".\");\n    if (version_and_operation.size() == 2) {\n      operation = std::string{version_and_operation[1]};\n    }\n  }\n\n  return operation;\n}\n\nRequestParser::TableDescriptor RequestParser::parseTable(const std::string& operation,\n                                                         const Json::Object& json_data) {\n  TableDescriptor table{\"\", true};\n\n  // Simple operations on a single table, have \"TableName\" explicitly specified.\n  if (find(SINGLE_TABLE_OPERATIONS.begin(), SINGLE_TABLE_OPERATIONS.end(), operation) !=\n      SINGLE_TABLE_OPERATIONS.end()) {\n    table.table_name = json_data.getString(\"TableName\", \"\");\n  } else if (find(BATCH_OPERATIONS.begin(), BATCH_OPERATIONS.end(), operation) !=\n             BATCH_OPERATIONS.end()) {\n    Json::ObjectSharedPtr tables = json_data.getObject(\"RequestItems\", true);\n    tables->iterate([&table](const std::string& key, const Json::Object&) {\n      if (table.table_name.empty()) {\n        table.table_name = key;\n      } else {\n        if (table.table_name != key) {\n          table.table_name = \"\";\n          table.is_single_table = false;\n          return false;\n        }\n      }\n      return true;\n    });\n  } else if (find(TRANSACT_OPERATIONS.begin(), TRANSACT_OPERATIONS.end(), operation) !=\n             TRANSACT_OPERATIONS.end()) {\n    std::vector<Json::ObjectSharedPtr> transact_items =\n        json_data.getObjectArray(\"TransactItems\", true);\n    for (const Json::ObjectSharedPtr& transact_item : transact_items) {\n      const auto next_table_name = getTableNameFromTransactItem(*transact_item);\n      if (!next_table_name.has_value()) {\n        // if an operation is missing a table name, we want to throw the normal set of errors\n        table.table_name = \"\";\n        table.is_single_table = true;\n        break;\n      }\n      if (table.table_name.empty()) {\n        table.table_name = next_table_name.value();\n      } else if (table.table_name != next_table_name.value()) {\n        table.table_name = \"\";\n        table.is_single_table = false;\n        break;\n      }\n    }\n  }\n  return table;\n}\n\nabsl::optional<std::string>\nRequestParser::getTableNameFromTransactItem(const Json::Object& transact_item) {\n  for (const std::string& operation : TRANSACT_ITEM_OPERATIONS) {\n    Json::ObjectSharedPtr item = transact_item.getObject(operation, true);\n    std::string table_name = item->getString(\"TableName\", \"\");\n    if (!table_name.empty()) {\n      return absl::make_optional(table_name);\n    }\n  }\n  return absl::nullopt;\n}\n\nstd::vector<std::string> RequestParser::parseBatchUnProcessedKeys(const Json::Object& json_data) {\n  std::vector<std::string> unprocessed_tables;\n  Json::ObjectSharedPtr tables = json_data.getObject(\"UnprocessedKeys\", true);\n  tables->iterate([&unprocessed_tables](const std::string& key, const Json::Object&) {\n    unprocessed_tables.emplace_back(key);\n    return true;\n  });\n\n  return unprocessed_tables;\n}\n\nstd::string RequestParser::parseErrorType(const Json::Object& json_data) {\n  std::string error_type = json_data.getString(\"__type\", \"\");\n  if (error_type.empty()) {\n    return \"\";\n  }\n\n  for (const std::string& supported_error_type : SUPPORTED_ERROR_TYPES) {\n    if (absl::EndsWith(error_type, supported_error_type)) {\n      return supported_error_type;\n    }\n  }\n\n  return \"\";\n}\n\nbool RequestParser::isBatchOperation(const std::string& operation) {\n  return find(BATCH_OPERATIONS.begin(), BATCH_OPERATIONS.end(), operation) !=\n         BATCH_OPERATIONS.end();\n}\n\nstd::vector<RequestParser::PartitionDescriptor>\nRequestParser::parsePartitions(const Json::Object& json_data) {\n  std::vector<RequestParser::PartitionDescriptor> partition_descriptors;\n\n  Json::ObjectSharedPtr partitions =\n      json_data.getObject(\"ConsumedCapacity\", true)->getObject(\"Partitions\", true);\n  partitions->iterate([&partition_descriptors, &partitions](const std::string& key,\n                                                            const Json::Object&) {\n    // For a given partition id, the amount of capacity used is returned in the body as a double.\n    // A stat will be created to track the capacity consumed for the operation, table and partition.\n    // Stats counter only increments by whole numbers, capacity is round up to the nearest integer\n    // to account for this.\n    uint64_t capacity_integer = static_cast<uint64_t>(std::ceil(partitions->getDouble(key, 0.0)));\n    partition_descriptors.emplace_back(key, capacity_integer);\n    return true;\n  });\n\n  return partition_descriptors;\n}\n\nvoid RequestParser::forEachStatString(const StringFn& fn) {\n  for (const std::string& str : SINGLE_TABLE_OPERATIONS) {\n    fn(str);\n  }\n  for (const std::string& str : SUPPORTED_ERROR_TYPES) {\n    fn(str);\n  }\n  for (const std::string& str : BATCH_OPERATIONS) {\n    fn(str);\n  }\n  for (const std::string& str : TRANSACT_OPERATIONS) {\n    fn(str);\n  }\n  for (const std::string& str : TRANSACT_ITEM_OPERATIONS) {\n    fn(str);\n  }\n}\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/dynamo_request_parser.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/json/json_loader.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\n/**\n * Request parser for dynamodb request/response.\n *\n * Basic dynamodb json request/response format:\n * http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Appendix.CurrentAPI.html\n */\nclass RequestParser {\npublic:\n  struct TableDescriptor {\n  public:\n    std::string table_name;\n    bool is_single_table;\n  };\n\n  struct PartitionDescriptor {\n    PartitionDescriptor(const std::string& partition, uint64_t capacity)\n        : partition_id_(partition), capacity_(capacity) {}\n    std::string partition_id_;\n    uint64_t capacity_;\n  };\n\n  /**\n   * Parse operation out of x-amz-target header.\n   * @return empty string if operation cannot be parsed.\n   */\n  static std::string parseOperation(const Http::HeaderMap& header_map);\n\n  /**\n   * Parse table name out of data, based on the operation.\n   * @return empty string as TableDescriptor.table_name if table name cannot be parsed out of valid\n   *json data\n   * or if operation is not in the list of operations that we support.\n   *\n   * For simple operations on single table, e.g., GetItem, PutItem, Query etc @return table\n   * name in TableDescriptor.table_name.\n   *\n   * For batch operations, e.g. BatchGetItem/BatchWriteItem, @return table name in\n   *TableDescriptor.table_name if it's only one\n   * table used in all operations, @return empty string in TableDescriptor.table_name and\n   *TableDescriptor.is_single_table=false in case of multiple.\n   *\n   * @throw Json::Exception if data is not in valid Json format.\n   */\n  static TableDescriptor parseTable(const std::string& operation, const Json::Object& json_data);\n\n  /**\n   * @return string name of table in transaction object, or empty string if none\n   */\n  static absl::optional<std::string>\n  getTableNameFromTransactItem(const Json::Object& transact_item);\n\n  /**\n   * Parse error details which might be provided for a given response code.\n   * @return empty string if cannot get error details.\n   * For the full list of errors, see\n   * http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/CommonErrors.html\n   * Operation specific errors, for example, error section of\n   * http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html\n   *\n   * @throw Json::Exception if data is not in valid Json format.\n   */\n  static std::string parseErrorType(const Json::Object& json_data);\n\n  /**\n   * Parse unprocessed keys for batch operation results.\n   * @return empty set if there are no unprocessed keys or a set of table names that did not get\n   * processed in the batch operation.\n   */\n  static std::vector<std::string> parseBatchUnProcessedKeys(const Json::Object& json_data);\n\n  /**\n   * @return true if the operation is in the set of supported BATCH_OPERATIONS\n   */\n  static bool isBatchOperation(const std::string& operation);\n\n  /**\n   * Parse the Partition ids and the consumed capacity from the body.\n   * @return empty set if there is no partition data or a set of partition data containing\n   * the partition id as a string and the capacity consumed as an integer.\n   *\n   * @throw Json::Exception if data is not in valid Json format.\n   */\n  static std::vector<PartitionDescriptor> parsePartitions(const Json::Object& json_data);\n\n  using StringFn = std::function<void(const std::string&)>;\n\n  /**\n   * Calls a function for every string that is likely to be included as a token\n   * in a stat. This is not functionally necessary, but can reduce potentially\n   * contented access to create entries in the symbol table in the hot path.\n   *\n   * @param fn the function to call for every potential stat name.\n   */\n  static void forEachStatString(const StringFn& fn);\n\nprivate:\n  static const Http::LowerCaseString X_AMZ_TARGET;\n  static const std::vector<std::string> SINGLE_TABLE_OPERATIONS;\n  static const std::vector<std::string> BATCH_OPERATIONS;\n  static const std::vector<std::string> TRANSACT_OPERATIONS;\n  static const std::vector<std::string> TRANSACT_ITEM_OPERATIONS;\n\n  // http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html\n  static const std::vector<std::string> SUPPORTED_ERROR_TYPES;\n\n  RequestParser() = default;\n};\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/dynamo_stats.cc",
    "content": "#include \"extensions/filters/http/dynamo/dynamo_stats.h\"\n\n#include <memory>\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/http/dynamo/dynamo_request_parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\nDynamoStats::DynamoStats(Stats::Scope& scope, const std::string& prefix)\n    : scope_(scope), stat_name_set_(scope.symbolTable().makeSet(\"Dynamo\")),\n      prefix_(stat_name_set_->add(prefix + \"dynamodb\")),\n      batch_failure_unprocessed_keys_(stat_name_set_->add(\"BatchFailureUnprocessedKeys\")),\n      capacity_(stat_name_set_->add(\"capacity\")),\n      empty_response_body_(stat_name_set_->add(\"empty_response_body\")),\n      error_(stat_name_set_->add(\"error\")),\n      invalid_req_body_(stat_name_set_->add(\"invalid_req_body\")),\n      invalid_resp_body_(stat_name_set_->add(\"invalid_resp_body\")),\n      multiple_tables_(stat_name_set_->add(\"multiple_tables\")),\n      no_table_(stat_name_set_->add(\"no_table\")),\n      operation_missing_(stat_name_set_->add(\"operation_missing\")),\n      table_(stat_name_set_->add(\"table\")), table_missing_(stat_name_set_->add(\"table_missing\")),\n      upstream_rq_time_(stat_name_set_->add(\"upstream_rq_time\")),\n      upstream_rq_total_(stat_name_set_->add(\"upstream_rq_total\")),\n      unknown_entity_type_(stat_name_set_->add(\"unknown_entity_type\")),\n      unknown_operation_(stat_name_set_->add(\"unknown_operation\")) {\n  upstream_rq_total_groups_[0] = stat_name_set_->add(\"upstream_rq_total_unknown\");\n  upstream_rq_time_groups_[0] = stat_name_set_->add(\"upstream_rq_time_unknown\");\n  for (size_t i = 1; i < DynamoStats::NumGroupEntries; ++i) {\n    upstream_rq_total_groups_[i] = stat_name_set_->add(fmt::format(\"upstream_rq_total_{}xx\", i));\n    upstream_rq_time_groups_[i] = stat_name_set_->add(fmt::format(\"upstream_rq_time_{}xx\", i));\n  }\n  RequestParser::forEachStatString(\n      [this](const std::string& str) { stat_name_set_->rememberBuiltin(str); });\n  for (uint32_t status_code : {200, 400, 403, 502}) {\n    stat_name_set_->rememberBuiltin(absl::StrCat(\"upstream_rq_time_\", status_code));\n    stat_name_set_->rememberBuiltin(absl::StrCat(\"upstream_rq_total_\", status_code));\n  }\n  stat_name_set_->rememberBuiltins({\"operation\", \"table\"});\n}\n\nStats::ElementVec DynamoStats::addPrefix(const Stats::ElementVec& names) {\n  Stats::ElementVec names_with_prefix;\n  names_with_prefix.reserve(1 + names.size());\n  names_with_prefix.push_back(prefix_);\n  names_with_prefix.insert(names_with_prefix.end(), names.begin(), names.end());\n  return names_with_prefix;\n}\n\nvoid DynamoStats::incCounter(const Stats::ElementVec& names) {\n  Stats::Utility::counterFromElements(scope_, addPrefix(names)).inc();\n}\n\nvoid DynamoStats::recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit,\n                                  uint64_t value) {\n  Stats::Utility::histogramFromElements(scope_, addPrefix(names), unit).recordValue(value);\n}\n\nStats::Counter& DynamoStats::buildPartitionStatCounter(const std::string& table_name,\n                                                       const std::string& operation,\n                                                       const std::string& partition_id) {\n  // Use the last 7 characters of the partition id.\n  absl::string_view id_last_7 = absl::string_view(partition_id).substr(partition_id.size() - 7);\n  std::string partition = absl::StrCat(\"__partition_id=\", id_last_7);\n  return Stats::Utility::counterFromElements(\n      scope_,\n      addPrefix({table_, Stats::DynamicName(table_name), capacity_,\n                 getBuiltin(operation, unknown_operation_), Stats::DynamicName(partition)}));\n}\n\nsize_t DynamoStats::groupIndex(uint64_t status) {\n  size_t index = status / 100;\n  if (index >= NumGroupEntries) {\n    index = 0; // status-code 600 or higher is unknown.\n  }\n  return index;\n}\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/dynamo/dynamo_stats.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\n\nclass DynamoStats {\npublic:\n  DynamoStats(Stats::Scope& scope, const std::string& prefix);\n\n  void incCounter(const Stats::ElementVec& names);\n  void recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, uint64_t value);\n\n  /**\n   * Creates the partition id stats string. The stats format is\n   * \"<stat_prefix>table.<table_name>.capacity.<operation>.__partition_id=<partition_id>\".\n   * Partition ids and dynamodb table names can be long. To satisfy the string\n   * length, we truncate, taking only the last 7 characters of the partition id.\n   */\n  Stats::Counter& buildPartitionStatCounter(const std::string& table_name,\n                                            const std::string& operation,\n                                            const std::string& partition_id);\n\n  static size_t groupIndex(uint64_t status);\n\n  /**\n   * Finds a StatName by string.\n   */\n  Stats::StatName getBuiltin(const std::string& str, Stats::StatName fallback) {\n    return stat_name_set_->getBuiltin(str, fallback);\n  }\n\n  Stats::SymbolTable& symbolTable() { return scope_.symbolTable(); }\n\nprivate:\n  Stats::ElementVec addPrefix(const Stats::ElementVec& names);\n\n  Stats::Scope& scope_;\n  Stats::StatNameSetPtr stat_name_set_;\n  const Stats::StatName prefix_;\n\npublic:\n  const Stats::StatName batch_failure_unprocessed_keys_;\n  const Stats::StatName capacity_;\n  const Stats::StatName empty_response_body_;\n  const Stats::StatName error_;\n  const Stats::StatName invalid_req_body_;\n  const Stats::StatName invalid_resp_body_;\n  const Stats::StatName multiple_tables_;\n  const Stats::StatName no_table_;\n  const Stats::StatName operation_missing_;\n  const Stats::StatName table_;\n  const Stats::StatName table_missing_;\n  const Stats::StatName upstream_rq_time_;\n  const Stats::StatName upstream_rq_total_;\n  const Stats::StatName upstream_rq_unknown_;\n  const Stats::StatName unknown_entity_type_;\n  const Stats::StatName unknown_operation_;\n\n  // Keep group codes for HTTP status codes through the 500s.\n  static constexpr size_t NumGroupEntries = 6;\n  Stats::StatName upstream_rq_total_groups_[NumGroupEntries];\n  Stats::StatName upstream_rq_time_groups_[NumGroupEntries];\n};\nusing DynamoStatsSharedPtr = std::shared_ptr<DynamoStats>;\n\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ext_authz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# External authorization L7 HTTP filter\n# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/http_filters\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ext_authz\",\n    srcs = [\"ext_authz.cc\"],\n    hdrs = [\"ext_authz.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/router:config_lib\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_http_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":ext_authz\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_http_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/ext_authz/config.cc",
    "content": "#include \"extensions/filters/http/ext_authz/config.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n#include \"extensions/filters/common/ext_authz/ext_authz_http_impl.h\"\n#include \"extensions/filters/http/ext_authz/ext_authz.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace ExtAuthz {\n\nHttp::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& proto_config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  const auto filter_config = std::make_shared<FilterConfig>(\n      proto_config, context.scope(), context.runtime(), context.httpContext(), stats_prefix);\n  Http::FilterFactoryCb callback;\n\n  if (proto_config.has_http_service()) {\n    // Raw HTTP client.\n    const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.http_service().server_uri(),\n                                                           timeout, DefaultTimeout);\n    const auto client_config =\n        std::make_shared<Extensions::Filters::Common::ExtAuthz::ClientConfig>(\n            proto_config, timeout_ms, proto_config.http_service().path_prefix());\n    callback = [filter_config, client_config,\n                &context](Http::FilterChainFactoryCallbacks& callbacks) {\n      auto client = std::make_unique<Extensions::Filters::Common::ExtAuthz::RawHttpClientImpl>(\n          context.clusterManager(), client_config);\n      callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{\n          std::make_shared<Filter>(filter_config, std::move(client))});\n    };\n  } else if (proto_config.grpc_service().has_google_grpc()) {\n    // Google gRPC client.\n    const uint32_t timeout_ms =\n        PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout);\n    auto async_client_cache = getAsyncClientCacheSingleton(context);\n    callback = [async_client_cache, filter_config, timeout_ms, proto_config,\n                transport_api_version = proto_config.transport_api_version(),\n                use_alpha = proto_config.hidden_envoy_deprecated_use_alpha()](\n                   Http::FilterChainFactoryCallbacks& callbacks) {\n      auto client = std::make_unique<Filters::Common::ExtAuthz::GrpcClientImpl>(\n          async_client_cache->getOrCreateAsyncClient(proto_config),\n          std::chrono::milliseconds(timeout_ms), transport_api_version, use_alpha);\n      callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{\n          std::make_shared<Filter>(filter_config, std::move(client))});\n    };\n  } else {\n    // Envoy gRPC client.\n    const uint32_t timeout_ms =\n        PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout);\n    callback = [grpc_service = proto_config.grpc_service(), &context, filter_config, timeout_ms,\n                transport_api_version = proto_config.transport_api_version(),\n                use_alpha = proto_config.hidden_envoy_deprecated_use_alpha()](\n                   Http::FilterChainFactoryCallbacks& callbacks) {\n      const auto async_client_factory =\n          context.clusterManager().grpcAsyncClientManager().factoryForGrpcService(\n              grpc_service, context.scope(), true);\n      auto client = std::make_unique<Filters::Common::ExtAuthz::GrpcClientImpl>(\n          async_client_factory->create(), std::chrono::milliseconds(timeout_ms),\n          transport_api_version, use_alpha);\n      callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{\n          std::make_shared<Filter>(filter_config, std::move(client))});\n    };\n  }\n\n  return callback;\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nExtAuthzFilterConfig::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute& proto_config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<FilterConfigPerRoute>(proto_config);\n}\n\n/**\n * Static registration for the external authorization filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(ExtAuthzFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.ext_authz\"};\n\nSINGLETON_MANAGER_REGISTRATION(google_grpc_async_client_cache);\n\nFilters::Common::ExtAuthz::AsyncClientCacheSharedPtr\ngetAsyncClientCacheSingleton(Server::Configuration::FactoryContext& context) {\n  return context.singletonManager().getTyped<Filters::Common::ExtAuthz::AsyncClientCache>(\n      SINGLETON_MANAGER_REGISTERED_NAME(google_grpc_async_client_cache), [&context] {\n        return std::make_shared<Filters::Common::ExtAuthz::AsyncClientCache>(\n            context.clusterManager().grpcAsyncClientManager(), context.scope(),\n            context.threadLocal());\n      });\n}\n\n} // namespace ExtAuthz\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ext_authz/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.validate.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace ExtAuthz {\n\n/**\n * Config registration for the external authorization filter. @see NamedHttpFilterConfigFactory.\n */\nclass ExtAuthzFilterConfig\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::ext_authz::v3::ExtAuthz,\n          envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute> {\npublic:\n  ExtAuthzFilterConfig() : FactoryBase(HttpFilterNames::get().ExtAuthorization) {}\n\nprivate:\n  static constexpr uint64_t DefaultTimeout = 200;\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute& proto_config,\n      Server::Configuration::ServerFactoryContext& context,\n      ProtobufMessage::ValidationVisitor& validator) override;\n};\n\nFilters::Common::ExtAuthz::AsyncClientCacheSharedPtr\ngetAsyncClientCacheSingleton(Server::Configuration::FactoryContext& context);\n\n} // namespace ExtAuthz\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ext_authz/ext_authz.cc",
    "content": "#include \"extensions/filters/http/ext_authz/ext_authz.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/matchers.h\"\n#include \"common/http/utility.h\"\n#include \"common/router/config_impl.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace ExtAuthz {\n\nstruct RcDetailsValues {\n  // The ext_authz filter denied the downstream request.\n  const std::string AuthzDenied = \"ext_authz_denied\";\n  // The ext_authz filter encountered a failure, and was configured to fail-closed.\n  const std::string AuthzError = \"ext_authz_error\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nvoid FilterConfigPerRoute::merge(const FilterConfigPerRoute& other) {\n  // We only merge context extensions here, and leave boolean flags untouched since those flags are\n  // not used from the merged config.\n  auto begin_it = other.context_extensions_.begin();\n  auto end_it = other.context_extensions_.end();\n  for (auto it = begin_it; it != end_it; ++it) {\n    context_extensions_[it->first] = it->second;\n  }\n}\n\nvoid Filter::initiateCall(const Http::RequestHeaderMap& headers,\n                          const Router::RouteConstSharedPtr& route) {\n  if (filter_return_ == FilterReturn::StopDecoding) {\n    return;\n  }\n\n  auto&& maybe_merged_per_route_config =\n      Http::Utility::getMergedPerFilterConfig<FilterConfigPerRoute>(\n          HttpFilterNames::get().ExtAuthorization, route,\n          [](FilterConfigPerRoute& cfg_base, const FilterConfigPerRoute& cfg) {\n            cfg_base.merge(cfg);\n          });\n\n  Protobuf::Map<std::string, std::string> context_extensions;\n  if (maybe_merged_per_route_config) {\n    context_extensions = maybe_merged_per_route_config.value().takeContextExtensions();\n  }\n\n  // If metadata_context_namespaces is specified, pass matching metadata to the ext_authz service.\n  envoy::config::core::v3::Metadata metadata_context;\n  const auto& request_metadata = callbacks_->streamInfo().dynamicMetadata().filter_metadata();\n  for (const auto& context_key : config_->metadataContextNamespaces()) {\n    const auto& metadata_it = request_metadata.find(context_key);\n    if (metadata_it != request_metadata.end()) {\n      (*metadata_context.mutable_filter_metadata())[metadata_it->first] = metadata_it->second;\n    }\n  }\n\n  Filters::Common::ExtAuthz::CheckRequestUtils::createHttpCheck(\n      callbacks_, headers, std::move(context_extensions), std::move(metadata_context),\n      check_request_, config_->maxRequestBytes(), config_->packAsBytes(),\n      config_->includePeerCertificate());\n\n  ENVOY_STREAM_LOG(trace, \"ext_authz filter calling authorization server\", *callbacks_);\n  state_ = State::Calling;\n  filter_return_ = FilterReturn::StopDecoding; // Don't let the filter chain continue as we are\n                                               // going to invoke check call.\n  cluster_ = callbacks_->clusterInfo();\n  initiating_call_ = true;\n  client_->check(*this, callbacks_->dispatcher(), check_request_, callbacks_->activeSpan(),\n                 callbacks_->streamInfo());\n  initiating_call_ = false;\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) {\n  Router::RouteConstSharedPtr route = callbacks_->route();\n  const auto per_route_flags = getPerRouteFlags(route);\n  skip_check_ = per_route_flags.skip_check_;\n  if (skip_check_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (!config_->filterEnabled(callbacks_->streamInfo().dynamicMetadata())) {\n    stats_.disabled_.inc();\n    if (config_->denyAtDisable()) {\n      ENVOY_STREAM_LOG(trace, \"ext_authz filter is disabled. Deny the request.\", *callbacks_);\n      callbacks_->streamInfo().setResponseFlag(\n          StreamInfo::ResponseFlag::UnauthorizedExternalService);\n      callbacks_->sendLocalReply(config_->statusOnError(), EMPTY_STRING, nullptr, absl::nullopt,\n                                 RcDetails::get().AuthzError);\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  request_headers_ = &headers;\n  buffer_data_ = config_->withRequestBody() && !per_route_flags.skip_request_body_buffering_ &&\n                 !(end_stream || Http::Utility::isWebSocketUpgradeRequest(headers) ||\n                   Http::Utility::isH2UpgradeRequest(headers));\n\n  if (buffer_data_) {\n    ENVOY_STREAM_LOG(debug, \"ext_authz filter is buffering the request\", *callbacks_);\n    if (!config_->allowPartialMessage()) {\n      callbacks_->setDecoderBufferLimit(config_->maxRequestBytes());\n    }\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  // Initiate a call to the authorization server since we are not disabled.\n  initiateCall(headers, route);\n  return filter_return_ == FilterReturn::StopDecoding\n             ? Http::FilterHeadersStatus::StopAllIterationAndWatermark\n             : Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) {\n  if (buffer_data_ && !skip_check_) {\n    const bool buffer_is_full = isBufferFull();\n    if (end_stream || buffer_is_full) {\n      ENVOY_STREAM_LOG(debug, \"ext_authz filter finished buffering the request since {}\",\n                       *callbacks_, buffer_is_full ? \"buffer is full\" : \"stream is ended\");\n      if (!buffer_is_full) {\n        // Make sure data is available in initiateCall.\n        callbacks_->addDecodedData(data, true);\n      }\n      initiateCall(*request_headers_, callbacks_->route());\n      return filter_return_ == FilterReturn::StopDecoding\n                 ? Http::FilterDataStatus::StopIterationAndWatermark\n                 : Http::FilterDataStatus::Continue;\n    } else {\n      return Http::FilterDataStatus::StopIterationAndBuffer;\n    }\n  }\n\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap&) {\n  if (buffer_data_ && !skip_check_) {\n    if (filter_return_ != FilterReturn::StopDecoding) {\n      ENVOY_STREAM_LOG(debug, \"ext_authz filter finished buffering the request\", *callbacks_);\n      initiateCall(*request_headers_, callbacks_->route());\n    }\n    return filter_return_ == FilterReturn::StopDecoding ? Http::FilterTrailersStatus::StopIteration\n                                                        : Http::FilterTrailersStatus::Continue;\n  }\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n}\n\nvoid Filter::onDestroy() {\n  if (state_ == State::Calling) {\n    state_ = State::Complete;\n    client_->cancel();\n  }\n}\n\nvoid Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) {\n  state_ = State::Complete;\n  using Filters::Common::ExtAuthz::CheckStatus;\n  Stats::StatName empty_stat_name;\n\n  switch (response->status) {\n  case CheckStatus::OK: {\n    // Any changes to request headers can affect how the request is going to be\n    // routed. If we are changing the headers we also need to clear the route\n    // cache.\n    if (config_->clearRouteCache() &&\n        (!response->headers_to_set.empty() || !response->headers_to_append.empty() ||\n         !response->headers_to_remove.empty())) {\n      ENVOY_STREAM_LOG(debug, \"ext_authz is clearing route cache\", *callbacks_);\n      callbacks_->clearRouteCache();\n    }\n\n    ENVOY_STREAM_LOG(trace, \"ext_authz filter added header(s) to the request:\", *callbacks_);\n    for (const auto& header : response->headers_to_set) {\n      ENVOY_STREAM_LOG(trace, \"'{}':'{}'\", *callbacks_, header.first.get(), header.second);\n      request_headers_->setCopy(header.first, header.second);\n    }\n    for (const auto& header : response->headers_to_add) {\n      ENVOY_STREAM_LOG(trace, \"'{}':'{}'\", *callbacks_, header.first.get(), header.second);\n      request_headers_->addCopy(header.first, header.second);\n    }\n    for (const auto& header : response->headers_to_append) {\n      const Http::HeaderEntry* header_to_modify = request_headers_->get(header.first);\n      // TODO(dio): Add a flag to allow appending non-existent headers, without setting it first\n      // (via `headers_to_add`). For example, given:\n      // 1. Original headers {\"original\": \"true\"}\n      // 2. Response headers from the authorization servers {{\"append\": \"1\"}, {\"append\": \"2\"}}\n      //\n      // Currently it is not possible to add {{\"append\": \"1\"}, {\"append\": \"2\"}} (the intended\n      // combined headers: {{\"original\": \"true\"}, {\"append\": \"1\"}, {\"append\": \"2\"}}) to the request\n      // to upstream server by only sets `headers_to_append`.\n      if (header_to_modify != nullptr) {\n        ENVOY_STREAM_LOG(trace, \"'{}':'{}'\", *callbacks_, header.first.get(), header.second);\n        // The current behavior of appending is by combining entries with the same key, into one\n        // entry. The value of that combined entry is separated by \",\".\n        // TODO(dio): Consider to use addCopy instead.\n        request_headers_->appendCopy(header.first, header.second);\n      }\n    }\n\n    ENVOY_STREAM_LOG(trace, \"ext_authz filter removed header(s) from the request:\", *callbacks_);\n    for (const auto& header : response->headers_to_remove) {\n      // We don't allow removing any :-prefixed headers, nor Host, as removing\n      // them would make the request malformed.\n      if (absl::StartsWithIgnoreCase(absl::string_view(header.get()), \":\") ||\n          header == Http::Headers::get().HostLegacy) {\n        continue;\n      }\n      ENVOY_STREAM_LOG(trace, \"'{}'\", *callbacks_, header.get());\n      request_headers_->remove(header);\n    }\n\n    if (!response->dynamic_metadata.fields().empty()) {\n      callbacks_->streamInfo().setDynamicMetadata(HttpFilterNames::get().ExtAuthorization,\n                                                  response->dynamic_metadata);\n    }\n\n    if (cluster_) {\n      config_->incCounter(cluster_->statsScope(), config_->ext_authz_ok_);\n    }\n    stats_.ok_.inc();\n    continueDecoding();\n    break;\n  }\n\n  case CheckStatus::Denied: {\n    ENVOY_STREAM_LOG(trace, \"ext_authz filter rejected the request. Response status code: '{}\",\n                     *callbacks_, enumToInt(response->status_code));\n    stats_.denied_.inc();\n\n    if (cluster_) {\n      config_->incCounter(cluster_->statsScope(), config_->ext_authz_denied_);\n\n      Http::CodeStats::ResponseStatInfo info{config_->scope(),\n                                             cluster_->statsScope(),\n                                             empty_stat_name,\n                                             enumToInt(response->status_code),\n                                             true,\n                                             empty_stat_name,\n                                             empty_stat_name,\n                                             empty_stat_name,\n                                             empty_stat_name,\n                                             false};\n      config_->httpContext().codeStats().chargeResponseStat(info);\n    }\n\n    callbacks_->sendLocalReply(\n        response->status_code, response->body,\n        [&headers = response->headers_to_set,\n         &callbacks = *callbacks_](Http::HeaderMap& response_headers) -> void {\n          ENVOY_STREAM_LOG(trace,\n                           \"ext_authz filter added header(s) to the local response:\", callbacks);\n          // Firstly, remove all headers requested by the ext_authz filter, to ensure that they will\n          // override existing headers.\n          for (const auto& header : headers) {\n            response_headers.remove(header.first);\n          }\n          // Then set all of the requested headers, allowing the same header to be set multiple\n          // times, e.g. `Set-Cookie`.\n          for (const auto& header : headers) {\n            ENVOY_STREAM_LOG(trace, \" '{}':'{}'\", callbacks, header.first.get(), header.second);\n            response_headers.addCopy(header.first, header.second);\n          }\n        },\n        absl::nullopt, RcDetails::get().AuthzDenied);\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UnauthorizedExternalService);\n    break;\n  }\n\n  case CheckStatus::Error: {\n    if (cluster_) {\n      config_->incCounter(cluster_->statsScope(), config_->ext_authz_error_);\n      if (response->error_kind == Filters::Common::ExtAuthz::ErrorKind::Timedout) {\n        config_->incCounter(cluster_->statsScope(), config_->ext_authz_timeout_);\n      }\n    }\n    stats_.error_.inc();\n    if (response->error_kind == Filters::Common::ExtAuthz::ErrorKind::Timedout) {\n      stats_.timeout_.inc();\n    }\n    if (config_->failureModeAllow()) {\n      ENVOY_STREAM_LOG(trace, \"ext_authz filter allowed the request with error\", *callbacks_);\n      stats_.failure_mode_allowed_.inc();\n      if (cluster_) {\n        config_->incCounter(cluster_->statsScope(), config_->ext_authz_failure_mode_allowed_);\n      }\n      continueDecoding();\n    } else {\n      ENVOY_STREAM_LOG(\n          trace, \"ext_authz filter rejected the request with an error. Response status code: {}\",\n          *callbacks_, enumToInt(config_->statusOnError()));\n      callbacks_->streamInfo().setResponseFlag(\n          StreamInfo::ResponseFlag::UnauthorizedExternalService);\n      callbacks_->sendLocalReply(config_->statusOnError(), EMPTY_STRING, nullptr, absl::nullopt,\n                                 RcDetails::get().AuthzError);\n    }\n    break;\n  }\n\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n    break;\n  }\n}\n\nbool Filter::isBufferFull() const {\n  const auto* buffer = callbacks_->decodingBuffer();\n  if (config_->allowPartialMessage() && buffer != nullptr) {\n    return buffer->length() >= config_->maxRequestBytes();\n  }\n  return false;\n}\n\nvoid Filter::continueDecoding() {\n  filter_return_ = FilterReturn::ContinueDecoding;\n  if (!initiating_call_) {\n    callbacks_->continueDecoding();\n  }\n}\n\nFilter::PerRouteFlags Filter::getPerRouteFlags(const Router::RouteConstSharedPtr& route) const {\n  if (route == nullptr || route->routeEntry() == nullptr) {\n    return PerRouteFlags{true /*skip_check_*/, false /*skip_request_body_buffering_*/};\n  }\n\n  const auto* specific_per_route_config =\n      Http::Utility::resolveMostSpecificPerFilterConfig<FilterConfigPerRoute>(\n          HttpFilterNames::get().ExtAuthorization, route);\n  if (specific_per_route_config != nullptr) {\n    return PerRouteFlags{specific_per_route_config->disabled(),\n                         specific_per_route_config->disableRequestBodyBuffering()};\n  }\n\n  return PerRouteFlags{false /*skip_check_*/, false /*skip_request_body_buffering_*/};\n}\n\n} // namespace ExtAuthz\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ext_authz/ext_authz.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/matchers.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n#include \"extensions/filters/common/ext_authz/ext_authz_http_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace ExtAuthz {\n\n/**\n * All stats for the Ext Authz filter. @see stats_macros.h\n */\n\n#define ALL_EXT_AUTHZ_FILTER_STATS(COUNTER)                                                        \\\n  COUNTER(ok)                                                                                      \\\n  COUNTER(denied)                                                                                  \\\n  COUNTER(error)                                                                                   \\\n  COUNTER(timeout)                                                                                 \\\n  COUNTER(disabled)                                                                                \\\n  COUNTER(failure_mode_allowed)\n\n/**\n * Wrapper struct for ext_authz filter stats. @see stats_macros.h\n */\nstruct ExtAuthzFilterStats {\n  ALL_EXT_AUTHZ_FILTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for the External Authorization (ext_authz) filter.\n */\nclass FilterConfig {\npublic:\n  FilterConfig(const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& config,\n               Stats::Scope& scope, Runtime::Loader& runtime, Http::Context& http_context,\n               const std::string& stats_prefix)\n      : allow_partial_message_(config.with_request_body().allow_partial_message()),\n        failure_mode_allow_(config.failure_mode_allow()),\n        clear_route_cache_(config.clear_route_cache()),\n        max_request_bytes_(config.with_request_body().max_request_bytes()),\n        pack_as_bytes_(config.with_request_body().pack_as_bytes()),\n        status_on_error_(toErrorCode(config.status_on_error().code())), scope_(scope),\n        runtime_(runtime), http_context_(http_context),\n        filter_enabled_(config.has_filter_enabled()\n                            ? absl::optional<Runtime::FractionalPercent>(\n                                  Runtime::FractionalPercent(config.filter_enabled(), runtime_))\n                            : absl::nullopt),\n        filter_enabled_metadata_(\n            config.has_filter_enabled_metadata()\n                ? absl::optional<Matchers::MetadataMatcher>(config.filter_enabled_metadata())\n                : absl::nullopt),\n        deny_at_disable_(config.has_deny_at_disable()\n                             ? absl::optional<Runtime::FeatureFlag>(\n                                   Runtime::FeatureFlag(config.deny_at_disable(), runtime_))\n                             : absl::nullopt),\n        pool_(scope_.symbolTable()),\n        metadata_context_namespaces_(config.metadata_context_namespaces().begin(),\n                                     config.metadata_context_namespaces().end()),\n        include_peer_certificate_(config.include_peer_certificate()),\n        stats_(generateStats(stats_prefix, config.stat_prefix(), scope)),\n        ext_authz_ok_(pool_.add(createPoolStatName(config.stat_prefix(), \"ok\"))),\n        ext_authz_denied_(pool_.add(createPoolStatName(config.stat_prefix(), \"denied\"))),\n        ext_authz_error_(pool_.add(createPoolStatName(config.stat_prefix(), \"error\"))),\n        ext_authz_timeout_(pool_.add(createPoolStatName(config.stat_prefix(), \"timeout\"))),\n        ext_authz_failure_mode_allowed_(\n            pool_.add(createPoolStatName(config.stat_prefix(), \"failure_mode_allowed\"))) {}\n\n  bool allowPartialMessage() const { return allow_partial_message_; }\n\n  bool withRequestBody() const { return max_request_bytes_ > 0; }\n\n  bool failureModeAllow() const { return failure_mode_allow_; }\n\n  bool clearRouteCache() const { return clear_route_cache_; }\n\n  uint32_t maxRequestBytes() const { return max_request_bytes_; }\n\n  bool packAsBytes() const { return pack_as_bytes_; }\n\n  Http::Code statusOnError() const { return status_on_error_; }\n\n  bool filterEnabled(const envoy::config::core::v3::Metadata& metadata) {\n    const bool enabled = filter_enabled_.has_value() ? filter_enabled_->enabled() : true;\n    const bool enabled_metadata =\n        filter_enabled_metadata_.has_value() ? filter_enabled_metadata_->match(metadata) : true;\n    return enabled && enabled_metadata;\n  }\n\n  bool denyAtDisable() {\n    return deny_at_disable_.has_value() ? deny_at_disable_->enabled() : false;\n  }\n\n  Stats::Scope& scope() { return scope_; }\n\n  Http::Context& httpContext() { return http_context_; }\n\n  const std::vector<std::string>& metadataContextNamespaces() {\n    return metadata_context_namespaces_;\n  }\n\n  const ExtAuthzFilterStats& stats() const { return stats_; }\n\n  void incCounter(Stats::Scope& scope, Stats::StatName name) {\n    scope.counterFromStatName(name).inc();\n  }\n\n  bool includePeerCertificate() const { return include_peer_certificate_; }\n\nprivate:\n  static Http::Code toErrorCode(uint64_t status) {\n    const auto code = static_cast<Http::Code>(status);\n    if (code >= Http::Code::Continue && code <= Http::Code::NetworkAuthenticationRequired) {\n      return code;\n    }\n    return Http::Code::Forbidden;\n  }\n\n  ExtAuthzFilterStats generateStats(const std::string& prefix,\n                                    const std::string& filter_stats_prefix, Stats::Scope& scope) {\n    const std::string final_prefix = absl::StrCat(prefix, \"ext_authz.\", filter_stats_prefix);\n    return {ALL_EXT_AUTHZ_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n  }\n\n  // This generates ext_authz.<optional filter_stats_prefix>.name, for example: ext_authz.waf.ok\n  // when filter_stats_prefix is \"waf\", and ext_authz.ok when filter_stats_prefix is empty.\n  const std::string createPoolStatName(const std::string& filter_stats_prefix,\n                                       const std::string& name) {\n    return absl::StrCat(\"ext_authz\",\n                        filter_stats_prefix.empty() ? EMPTY_STRING\n                                                    : absl::StrCat(\".\", filter_stats_prefix),\n                        \".\", name);\n  }\n\n  const bool allow_partial_message_;\n  const bool failure_mode_allow_;\n  const bool clear_route_cache_;\n  const uint32_t max_request_bytes_;\n  const bool pack_as_bytes_;\n  const Http::Code status_on_error_;\n  Stats::Scope& scope_;\n  Runtime::Loader& runtime_;\n  Http::Context& http_context_;\n\n  const absl::optional<Runtime::FractionalPercent> filter_enabled_;\n  const absl::optional<Matchers::MetadataMatcher> filter_enabled_metadata_;\n  const absl::optional<Runtime::FeatureFlag> deny_at_disable_;\n\n  // TODO(nezdolik): stop using pool as part of deprecating cluster scope stats.\n  Stats::StatNamePool pool_;\n\n  const std::vector<std::string> metadata_context_namespaces_;\n\n  const bool include_peer_certificate_;\n\n  // The stats for the filter.\n  ExtAuthzFilterStats stats_;\n\npublic:\n  // TODO(nezdolik): deprecate cluster scope stats counters in favor of filter scope stats\n  // (ExtAuthzFilterStats stats_).\n  const Stats::StatName ext_authz_ok_;\n  const Stats::StatName ext_authz_denied_;\n  const Stats::StatName ext_authz_error_;\n  const Stats::StatName ext_authz_timeout_;\n  const Stats::StatName ext_authz_failure_mode_allowed_;\n};\n\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\n/**\n * Per route settings for ExtAuth. Allows customizing the CheckRequest on a\n * virtualhost\\route\\weighted cluster level.\n */\nclass FilterConfigPerRoute : public Router::RouteSpecificFilterConfig {\npublic:\n  using ContextExtensionsMap = Protobuf::Map<std::string, std::string>;\n\n  FilterConfigPerRoute(\n      const envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute& config)\n      : context_extensions_(config.has_check_settings()\n                                ? config.check_settings().context_extensions()\n                                : ContextExtensionsMap()),\n        disable_request_body_buffering_(config.has_check_settings() &&\n                                        config.check_settings().disable_request_body_buffering()),\n        disabled_(config.disabled()) {}\n\n  void merge(const FilterConfigPerRoute& other);\n\n  /**\n   * @return Context extensions to add to the CheckRequest.\n   */\n  const ContextExtensionsMap& contextExtensions() const { return context_extensions_; }\n  // Allow moving the context extensions out of this object.\n  ContextExtensionsMap&& takeContextExtensions() { return std::move(context_extensions_); }\n\n  bool disabled() const { return disabled_; }\n\n  bool disableRequestBodyBuffering() const { return disable_request_body_buffering_; }\n\nprivate:\n  // We save the context extensions as a protobuf map instead of an std::map as this allows us to\n  // move it to the CheckRequest, thus avoiding a copy that would incur by converting it.\n  ContextExtensionsMap context_extensions_;\n  bool disable_request_body_buffering_;\n  bool disabled_;\n};\n\n/**\n * HTTP ext_authz filter. Depending on the route configuration, this filter calls the global\n * ext_authz service before allowing further filter iteration.\n */\nclass Filter : public Logger::Loggable<Logger::Id::filter>,\n               public Http::StreamDecoderFilter,\n               public Filters::Common::ExtAuthz::RequestCallbacks {\npublic:\n  Filter(const FilterConfigSharedPtr& config, Filters::Common::ExtAuthz::ClientPtr&& client)\n      : config_(config), client_(std::move(client)), stats_(config->stats()) {}\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // ExtAuthz::RequestCallbacks\n  void onComplete(Filters::Common::ExtAuthz::ResponsePtr&&) override;\n\nprivate:\n  void addResponseHeaders(Http::HeaderMap& header_map, const Http::HeaderVector& headers);\n  void initiateCall(const Http::RequestHeaderMap& headers,\n                    const Router::RouteConstSharedPtr& route);\n  void continueDecoding();\n  bool isBufferFull() const;\n\n  // This holds a set of flags defined in per-route configuration.\n  struct PerRouteFlags {\n    const bool skip_check_;\n    const bool skip_request_body_buffering_;\n  };\n  PerRouteFlags getPerRouteFlags(const Router::RouteConstSharedPtr& route) const;\n\n  // State of this filter's communication with the external authorization service.\n  // The filter has either not started calling the external service, in the middle of calling\n  // it or has completed.\n  enum class State { NotStarted, Calling, Complete };\n\n  // FilterReturn is used to capture what the return code should be to the filter chain.\n  // if this filter is either in the middle of calling the service or the result is denied then\n  // the filter chain should stop. Otherwise the filter chain can continue to the next filter.\n  enum class FilterReturn { ContinueDecoding, StopDecoding };\n\n  Http::HeaderMapPtr getHeaderMap(const Filters::Common::ExtAuthz::ResponsePtr& response);\n  FilterConfigSharedPtr config_;\n  Filters::Common::ExtAuthz::ClientPtr client_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n  Http::RequestHeaderMap* request_headers_;\n  State state_{State::NotStarted};\n  FilterReturn filter_return_{FilterReturn::ContinueDecoding};\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  // The stats for the filter.\n  ExtAuthzFilterStats stats_;\n\n  // Used to identify if the callback to onComplete() is synchronous (on the stack) or asynchronous.\n  bool initiating_call_{};\n  bool buffer_data_{};\n  bool skip_check_{false};\n  envoy::service::auth::v3::CheckRequest check_request_{};\n};\n\n} // namespace ExtAuthz\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/fault/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that injects faults into the request flow\n# Public docs: docs/root/configuration/http_filters/fault_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"fault_filter_lib\",\n    srcs = [\"fault_filter.cc\"],\n    hdrs = [\"fault_filter.h\"],\n    deps = [\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:watermark_buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:token_bucket_impl_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:utility_lib\",\n        \"//source/extensions/filters/common/fault:fault_config_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/fault:fault_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/fault/config.cc",
    "content": "#include \"extensions/filters/http/fault/config.h\"\n\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/fault/fault_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\n\nHttp::FilterFactoryCb FaultFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::fault::v3::HTTPFault& config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  FaultFilterConfigSharedPtr filter_config(new FaultFilterConfig(\n      config, context.runtime(), stats_prefix, context.scope(), context.timeSource()));\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<FaultFilter>(filter_config));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nFaultFilterFactory::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::fault::v3::HTTPFault& config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<const Fault::FaultSettings>(config);\n}\n\n/**\n * Static registration for the fault filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(FaultFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.fault\"};\n\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/fault/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\n\n/**\n * Config registration for the fault injection filter. @see NamedHttpFilterConfigFactory.\n */\nclass FaultFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::fault::v3::HTTPFault> {\npublic:\n  FaultFilterFactory() : FactoryBase(HttpFilterNames::get().Fault) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::fault::v3::HTTPFault& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::fault::v3::HTTPFault& proto_config,\n      Server::Configuration::ServerFactoryContext& context,\n      ProtobufMessage::ValidationVisitor& validator) override;\n};\n\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/fault/fault_filter.cc",
    "content": "#include \"extensions/filters/http/fault/fault_filter.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/utility.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\n\nstruct RcDetailsValues {\n  // The fault filter injected an abort for this request.\n  const std::string FaultAbort = \"fault_filter_abort\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nFaultSettings::FaultSettings(const envoy::extensions::filters::http::fault::v3::HTTPFault& fault)\n    : fault_filter_headers_(Http::HeaderUtility::buildHeaderDataVector(fault.headers())),\n      delay_percent_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT(fault, delay_percent_runtime,\n                                                            RuntimeKeys::get().DelayPercentKey)),\n      abort_percent_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT(fault, abort_percent_runtime,\n                                                            RuntimeKeys::get().AbortPercentKey)),\n      delay_duration_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT(fault, delay_duration_runtime,\n                                                             RuntimeKeys::get().DelayDurationKey)),\n      abort_http_status_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT(\n          fault, abort_http_status_runtime, RuntimeKeys::get().AbortHttpStatusKey)),\n      abort_grpc_status_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT(\n          fault, abort_grpc_status_runtime, RuntimeKeys::get().AbortGrpcStatusKey)),\n      max_active_faults_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT(\n          fault, max_active_faults_runtime, RuntimeKeys::get().MaxActiveFaultsKey)),\n      response_rate_limit_percent_runtime_(\n          PROTOBUF_GET_STRING_OR_DEFAULT(fault, response_rate_limit_percent_runtime,\n                                         RuntimeKeys::get().ResponseRateLimitPercentKey)) {\n  if (fault.has_abort()) {\n    request_abort_config_ =\n        std::make_unique<Filters::Common::Fault::FaultAbortConfig>(fault.abort());\n  }\n\n  if (fault.has_delay()) {\n    request_delay_config_ =\n        std::make_unique<Filters::Common::Fault::FaultDelayConfig>(fault.delay());\n  }\n\n  upstream_cluster_ = fault.upstream_cluster();\n\n  for (const auto& node : fault.downstream_nodes()) {\n    downstream_nodes_.insert(node);\n  }\n\n  if (fault.has_max_active_faults()) {\n    max_active_faults_ = fault.max_active_faults().value();\n  }\n\n  if (fault.has_response_rate_limit()) {\n    response_rate_limit_ =\n        std::make_unique<Filters::Common::Fault::FaultRateLimitConfig>(fault.response_rate_limit());\n  }\n}\n\nFaultFilterConfig::FaultFilterConfig(\n    const envoy::extensions::filters::http::fault::v3::HTTPFault& fault, Runtime::Loader& runtime,\n    const std::string& stats_prefix, Stats::Scope& scope, TimeSource& time_source)\n    : settings_(fault), runtime_(runtime), stats_(generateStats(stats_prefix, scope)),\n      scope_(scope), time_source_(time_source),\n      stat_name_set_(scope.symbolTable().makeSet(\"Fault\")),\n      aborts_injected_(stat_name_set_->add(\"aborts_injected\")),\n      delays_injected_(stat_name_set_->add(\"delays_injected\")),\n      stats_prefix_(stat_name_set_->add(absl::StrCat(stats_prefix, \"fault\"))) {}\n\nvoid FaultFilterConfig::incCounter(Stats::StatName downstream_cluster, Stats::StatName stat_name) {\n  Stats::Utility::counterFromStatNames(scope_, {stats_prefix_, downstream_cluster, stat_name})\n      .inc();\n}\n\nFaultFilter::FaultFilter(FaultFilterConfigSharedPtr config) : config_(config) {}\n\nFaultFilter::~FaultFilter() {\n  ASSERT(delay_timer_ == nullptr);\n  ASSERT(response_limiter_ == nullptr || response_limiter_->destroyed());\n}\n\n// Delays and aborts are independent events. One can inject a delay\n// followed by an abort or inject just a delay or abort. In this callback,\n// if we inject a delay, then we will inject the abort in the delay timer\n// callback.\nHttp::FilterHeadersStatus FaultFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  // Route-level configuration overrides filter-level configuration\n  // NOTE: We should not use runtime when reading from route-level\n  // faults. In other words, runtime is supported only when faults are\n  // configured at the filter level.\n  fault_settings_ = config_->settings();\n  if (decoder_callbacks_->route() && decoder_callbacks_->route()->routeEntry()) {\n    const std::string& name = Extensions::HttpFilters::HttpFilterNames::get().Fault;\n    const auto* route_entry = decoder_callbacks_->route()->routeEntry();\n\n    const auto* per_route_settings =\n        route_entry->mostSpecificPerFilterConfigTyped<FaultSettings>(name);\n    fault_settings_ = per_route_settings ? per_route_settings : fault_settings_;\n  }\n\n  if (!matchesTargetUpstreamCluster()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (!matchesDownstreamNodes(headers)) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // Check for header matches\n  if (!Http::HeaderUtility::matchHeaders(headers, fault_settings_->filterHeaders())) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (headers.EnvoyDownstreamServiceCluster()) {\n    downstream_cluster_ = std::string(headers.getEnvoyDownstreamServiceClusterValue());\n    if (!downstream_cluster_.empty()) {\n      downstream_cluster_storage_ = std::make_unique<Stats::StatNameDynamicStorage>(\n          downstream_cluster_, config_->scope().symbolTable());\n    }\n\n    downstream_cluster_delay_percent_key_ =\n        fmt::format(\"fault.http.{}.delay.fixed_delay_percent\", downstream_cluster_);\n    downstream_cluster_abort_percent_key_ =\n        fmt::format(\"fault.http.{}.abort.abort_percent\", downstream_cluster_);\n    downstream_cluster_delay_duration_key_ =\n        fmt::format(\"fault.http.{}.delay.fixed_duration_ms\", downstream_cluster_);\n    downstream_cluster_abort_http_status_key_ =\n        fmt::format(\"fault.http.{}.abort.http_status\", downstream_cluster_);\n    downstream_cluster_abort_grpc_status_key_ =\n        fmt::format(\"fault.http.{}.abort.grpc_status\", downstream_cluster_);\n  }\n\n  maybeSetupResponseRateLimit(headers);\n\n  if (maybeSetupDelay(headers)) {\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  if (maybeDoAbort(headers)) {\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nbool FaultFilter::maybeSetupDelay(const Http::RequestHeaderMap& request_headers) {\n  absl::optional<std::chrono::milliseconds> duration = delayDuration(request_headers);\n  if (duration.has_value() && tryIncActiveFaults()) {\n    delay_timer_ = decoder_callbacks_->dispatcher().createTimer(\n        [this, &request_headers]() -> void { postDelayInjection(request_headers); });\n    ENVOY_LOG(debug, \"fault: delaying request {}ms\", duration.value().count());\n    delay_timer_->enableTimer(duration.value(), &decoder_callbacks_->scope());\n    recordDelaysInjectedStats();\n    decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DelayInjected);\n    return true;\n  }\n  return false;\n}\n\nbool FaultFilter::maybeDoAbort(const Http::RequestHeaderMap& request_headers) {\n  absl::optional<Http::Code> http_status;\n  absl::optional<Grpc::Status::GrpcStatus> grpc_status;\n  std::tie(http_status, grpc_status) = abortStatus(request_headers);\n\n  if (http_status.has_value() && tryIncActiveFaults()) {\n    abortWithStatus(http_status.value(), grpc_status);\n    return true;\n  }\n\n  return false;\n}\n\nvoid FaultFilter::maybeSetupResponseRateLimit(const Http::RequestHeaderMap& request_headers) {\n  if (!isResponseRateLimitEnabled(request_headers)) {\n    return;\n  }\n\n  absl::optional<uint64_t> rate_kbps =\n      fault_settings_->responseRateLimit()->rateKbps(&request_headers);\n  if (!rate_kbps.has_value()) {\n    return;\n  }\n\n  if (!tryIncActiveFaults()) {\n    return;\n  }\n\n  config_->stats().response_rl_injected_.inc();\n\n  response_limiter_ = std::make_unique<StreamRateLimiter>(\n      rate_kbps.value(), encoder_callbacks_->encoderBufferLimit(),\n      [this] { encoder_callbacks_->onEncoderFilterAboveWriteBufferHighWatermark(); },\n      [this] { encoder_callbacks_->onEncoderFilterBelowWriteBufferLowWatermark(); },\n      [this](Buffer::Instance& data, bool end_stream) {\n        encoder_callbacks_->injectEncodedDataToFilterChain(data, end_stream);\n      },\n      [this] { encoder_callbacks_->continueEncoding(); }, config_->timeSource(),\n      decoder_callbacks_->dispatcher(), decoder_callbacks_->scope());\n}\n\nbool FaultFilter::faultOverflow() {\n  const uint64_t max_faults = config_->runtime().snapshot().getInteger(\n      fault_settings_->maxActiveFaultsRuntime(), fault_settings_->maxActiveFaults().has_value()\n                                                     ? fault_settings_->maxActiveFaults().value()\n                                                     : std::numeric_limits<uint64_t>::max());\n  // Note: Since we don't compare/swap here this is a fuzzy limit which is similar to how the\n  // other circuit breakers work.\n  if (config_->stats().active_faults_.value() >= max_faults) {\n    config_->stats().faults_overflow_.inc();\n    return true;\n  }\n\n  return false;\n}\n\nbool FaultFilter::isDelayEnabled(const Http::RequestHeaderMap& request_headers) {\n  const auto request_delay = fault_settings_->requestDelay();\n  if (request_delay == nullptr) {\n    return false;\n  }\n\n  if (!downstream_cluster_delay_percent_key_.empty()) {\n    return config_->runtime().snapshot().featureEnabled(\n        downstream_cluster_delay_percent_key_, request_delay->percentage(&request_headers));\n  }\n  return config_->runtime().snapshot().featureEnabled(fault_settings_->delayPercentRuntime(),\n                                                      request_delay->percentage(&request_headers));\n}\n\nbool FaultFilter::isAbortEnabled(const Http::RequestHeaderMap& request_headers) {\n  const auto request_abort = fault_settings_->requestAbort();\n  if (request_abort == nullptr) {\n    return false;\n  }\n\n  if (!downstream_cluster_abort_percent_key_.empty()) {\n    return config_->runtime().snapshot().featureEnabled(\n        downstream_cluster_abort_percent_key_, request_abort->percentage(&request_headers));\n  }\n  return config_->runtime().snapshot().featureEnabled(fault_settings_->abortPercentRuntime(),\n                                                      request_abort->percentage(&request_headers));\n}\n\nbool FaultFilter::isResponseRateLimitEnabled(const Http::RequestHeaderMap& request_headers) {\n  if (fault_settings_->responseRateLimit() == nullptr) {\n    return false;\n  }\n\n  // TODO(mattklein123): Allow runtime override via downstream cluster similar to the other keys.\n  return config_->runtime().snapshot().featureEnabled(\n      fault_settings_->responseRateLimitPercentRuntime(),\n      fault_settings_->responseRateLimit()->percentage(&request_headers));\n}\n\nabsl::optional<std::chrono::milliseconds>\nFaultFilter::delayDuration(const Http::RequestHeaderMap& request_headers) {\n  absl::optional<std::chrono::milliseconds> ret;\n\n  if (!isDelayEnabled(request_headers)) {\n    return ret;\n  }\n\n  // See if the configured delay provider has a default delay, if not there is no delay (e.g.,\n  // header configuration and no/invalid header).\n  auto config_duration = fault_settings_->requestDelay()->duration(&request_headers);\n  if (!config_duration.has_value()) {\n    return ret;\n  }\n\n  std::chrono::milliseconds duration =\n      std::chrono::milliseconds(config_->runtime().snapshot().getInteger(\n          fault_settings_->delayDurationRuntime(), config_duration.value().count()));\n  if (!downstream_cluster_delay_duration_key_.empty()) {\n    duration = std::chrono::milliseconds(config_->runtime().snapshot().getInteger(\n        downstream_cluster_delay_duration_key_, duration.count()));\n  }\n\n  // Delay only if the duration is >0ms\n  if (duration.count() > 0) {\n    ret = duration;\n  }\n\n  return ret;\n}\n\nAbortHttpAndGrpcStatus FaultFilter::abortStatus(const Http::RequestHeaderMap& request_headers) {\n  if (!isAbortEnabled(request_headers)) {\n    return AbortHttpAndGrpcStatus{absl::nullopt, absl::nullopt};\n  }\n\n  auto http_status = abortHttpStatus(request_headers);\n  // If http status code is set, then gRPC status won't be used.\n  if (http_status.has_value()) {\n    return AbortHttpAndGrpcStatus{http_status, absl::nullopt};\n  }\n\n  auto grpc_status = abortGrpcStatus(request_headers);\n  // If gRPC status code is set, then http status will be set to Http::Code::OK (200)\n  if (grpc_status.has_value()) {\n    return AbortHttpAndGrpcStatus{Http::Code::OK, grpc_status};\n  }\n\n  return AbortHttpAndGrpcStatus{absl::nullopt, absl::nullopt};\n}\n\nabsl::optional<Http::Code>\nFaultFilter::abortHttpStatus(const Http::RequestHeaderMap& request_headers) {\n  // See if the configured abort provider has a default status code, if not there is no abort status\n  // code (e.g., header configuration and no/invalid header).\n  auto http_status = fault_settings_->requestAbort()->httpStatusCode(&request_headers);\n  if (!http_status.has_value()) {\n    return absl::nullopt;\n  }\n\n  auto default_http_status_code = static_cast<uint64_t>(http_status.value());\n  auto runtime_http_status_code = config_->runtime().snapshot().getInteger(\n      fault_settings_->abortHttpStatusRuntime(), default_http_status_code);\n\n  if (!downstream_cluster_abort_http_status_key_.empty()) {\n    runtime_http_status_code = config_->runtime().snapshot().getInteger(\n        downstream_cluster_abort_http_status_key_, default_http_status_code);\n  }\n\n  return static_cast<Http::Code>(runtime_http_status_code);\n}\n\nabsl::optional<Grpc::Status::GrpcStatus>\nFaultFilter::abortGrpcStatus(const Http::RequestHeaderMap& request_headers) {\n  auto grpc_status = fault_settings_->requestAbort()->grpcStatusCode(&request_headers);\n  if (!grpc_status.has_value()) {\n    return absl::nullopt;\n  }\n\n  auto default_grpc_status_code = static_cast<uint64_t>(grpc_status.value());\n  auto runtime_grpc_status_code = config_->runtime().snapshot().getInteger(\n      fault_settings_->abortGrpcStatusRuntime(), default_grpc_status_code);\n\n  if (!downstream_cluster_abort_grpc_status_key_.empty()) {\n    runtime_grpc_status_code = config_->runtime().snapshot().getInteger(\n        downstream_cluster_abort_grpc_status_key_, default_grpc_status_code);\n  }\n\n  return static_cast<Grpc::Status::GrpcStatus>(runtime_grpc_status_code);\n}\n\nvoid FaultFilter::recordDelaysInjectedStats() {\n  // Downstream specific stats.\n  if (!downstream_cluster_.empty()) {\n    config_->incDelays(downstream_cluster_storage_->statName());\n  }\n\n  config_->stats().delays_injected_.inc();\n}\n\nvoid FaultFilter::recordAbortsInjectedStats() {\n  // Downstream specific stats.\n  if (!downstream_cluster_.empty()) {\n    config_->incAborts(downstream_cluster_storage_->statName());\n  }\n\n  config_->stats().aborts_injected_.inc();\n}\n\nHttp::FilterDataStatus FaultFilter::decodeData(Buffer::Instance&, bool) {\n  if (delay_timer_ == nullptr) {\n    return Http::FilterDataStatus::Continue;\n  }\n  // If the request is too large, stop reading new data until the buffer drains.\n  return Http::FilterDataStatus::StopIterationAndWatermark;\n}\n\nHttp::FilterTrailersStatus FaultFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  return delay_timer_ == nullptr ? Http::FilterTrailersStatus::Continue\n                                 : Http::FilterTrailersStatus::StopIteration;\n}\n\nFaultFilterStats FaultFilterConfig::generateStats(const std::string& prefix, Stats::Scope& scope) {\n  const std::string final_prefix = prefix + \"fault.\";\n  return {ALL_FAULT_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                 POOL_GAUGE_PREFIX(scope, final_prefix))};\n}\n\nbool FaultFilter::tryIncActiveFaults() {\n  // Only charge 1 active fault per filter in case we are injecting multiple faults.\n  // Since we count at most one active fault per filter, we also allow multiple faults\n  // per filter without checking for overflow.\n  if (fault_active_) {\n    return true;\n  }\n\n  // We only check for overflow when attempting to perform a fault. Note that this means that a\n  // single request might increment the counter more than once if it tries to apply multiple faults,\n  // and it is also possible for it to fail the first check then succeed on the second (should\n  // another thread decrement the active fault gauge).\n  if (faultOverflow()) {\n    return false;\n  }\n\n  // TODO(mattklein123): Consider per-fault type active fault gauges.\n  config_->stats().active_faults_.inc();\n  fault_active_ = true;\n\n  return true;\n}\n\nvoid FaultFilter::onDestroy() {\n  resetTimerState();\n  if (response_limiter_ != nullptr) {\n    response_limiter_->destroy();\n  }\n  if (fault_active_) {\n    config_->stats().active_faults_.dec();\n  }\n}\n\nvoid FaultFilter::postDelayInjection(const Http::RequestHeaderMap& request_headers) {\n  resetTimerState();\n\n  // Delays can be followed by aborts\n  absl::optional<Http::Code> http_status;\n  absl::optional<Grpc::Status::GrpcStatus> grpc_status;\n  std::tie(http_status, grpc_status) = abortStatus(request_headers);\n\n  if (http_status.has_value()) {\n    abortWithStatus(http_status.value(), grpc_status);\n  } else {\n    // Continue request processing.\n    decoder_callbacks_->continueDecoding();\n  }\n}\n\nvoid FaultFilter::abortWithStatus(Http::Code http_status_code,\n                                  absl::optional<Grpc::Status::GrpcStatus> grpc_status) {\n  recordAbortsInjectedStats();\n  decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FaultInjected);\n  decoder_callbacks_->sendLocalReply(http_status_code, \"fault filter abort\", nullptr, grpc_status,\n                                     RcDetails::get().FaultAbort);\n}\n\nbool FaultFilter::matchesTargetUpstreamCluster() {\n  bool matches = true;\n\n  if (!fault_settings_->upstreamCluster().empty()) {\n    Router::RouteConstSharedPtr route = decoder_callbacks_->route();\n    matches = route && route->routeEntry() &&\n              (route->routeEntry()->clusterName() == fault_settings_->upstreamCluster());\n  }\n\n  return matches;\n}\n\nbool FaultFilter::matchesDownstreamNodes(const Http::RequestHeaderMap& headers) {\n  if (fault_settings_->downstreamNodes().empty()) {\n    return true;\n  }\n\n  if (!headers.EnvoyDownstreamServiceNode()) {\n    return false;\n  }\n\n  const absl::string_view downstream_node = headers.getEnvoyDownstreamServiceNodeValue();\n  return fault_settings_->downstreamNodes().find(downstream_node) !=\n         fault_settings_->downstreamNodes().end();\n}\n\nvoid FaultFilter::resetTimerState() {\n  if (delay_timer_) {\n    delay_timer_->disableTimer();\n    delay_timer_.reset();\n  }\n}\n\nHttp::FilterDataStatus FaultFilter::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (response_limiter_ != nullptr) {\n    response_limiter_->writeData(data, end_stream);\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus FaultFilter::encodeTrailers(Http::ResponseTrailerMap&) {\n  if (response_limiter_ != nullptr) {\n    return response_limiter_->onTrailers();\n  }\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nStreamRateLimiter::StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data,\n                                     std::function<void()> pause_data_cb,\n                                     std::function<void()> resume_data_cb,\n                                     std::function<void(Buffer::Instance&, bool)> write_data_cb,\n                                     std::function<void()> continue_cb, TimeSource& time_source,\n                                     Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope)\n    : // bytes_per_time_slice is KiB converted to bytes divided by the number of ticks per second.\n      bytes_per_time_slice_((max_kbps * 1024) / SecondDivisor), write_data_cb_(write_data_cb),\n      continue_cb_(continue_cb), scope_(scope),\n      // The token bucket is configured with a max token count of the number of ticks per second,\n      // and refills at the same rate, so that we have a per second limit which refills gradually in\n      // ~63ms intervals.\n      token_bucket_(SecondDivisor, time_source, SecondDivisor),\n      token_timer_(dispatcher.createTimer([this] { onTokenTimer(); })),\n      buffer_(resume_data_cb, pause_data_cb,\n              []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }) {\n  ASSERT(bytes_per_time_slice_ > 0);\n  ASSERT(max_buffered_data > 0);\n  buffer_.setWatermarks(max_buffered_data);\n}\n\nvoid StreamRateLimiter::onTokenTimer() {\n  ENVOY_LOG(trace, \"limiter: timer wakeup: buffered={}\", buffer_.length());\n  Buffer::OwnedImpl data_to_write;\n\n  if (!saw_data_) {\n    // The first time we see any data on this stream (via writeData()), reset the number of tokens\n    // to 1. This will ensure that we start pacing the data at the desired rate (and don't send a\n    // full 1s of data right away which might not introduce enough delay for a stream that doesn't\n    // have enough data to span more than 1s of rate allowance). Once we reset, we will subsequently\n    // allow for bursting within the second to account for our data provider being bursty.\n    token_bucket_.reset(1);\n    saw_data_ = true;\n  }\n\n  // Compute the number of tokens needed (rounded up), try to obtain that many tickets, and then\n  // figure out how many bytes to write given the number of tokens we actually got.\n  const uint64_t tokens_needed =\n      (buffer_.length() + bytes_per_time_slice_ - 1) / bytes_per_time_slice_;\n  const uint64_t tokens_obtained = token_bucket_.consume(tokens_needed, true);\n  const uint64_t bytes_to_write =\n      std::min(tokens_obtained * bytes_per_time_slice_, buffer_.length());\n  ENVOY_LOG(trace, \"limiter: tokens_needed={} tokens_obtained={} to_write={}\", tokens_needed,\n            tokens_obtained, bytes_to_write);\n\n  // Move the data to write into the output buffer with as little copying as possible.\n  // NOTE: This might be moving zero bytes, but that should work fine.\n  data_to_write.move(buffer_, bytes_to_write);\n\n  // If the buffer still contains data in it, we couldn't get enough tokens, so schedule the next\n  // token available time.\n  if (buffer_.length() > 0) {\n    const std::chrono::milliseconds ms = token_bucket_.nextTokenAvailable();\n    if (ms.count() > 0) {\n      ENVOY_LOG(trace, \"limiter: scheduling wakeup for {}ms\", ms.count());\n      token_timer_->enableTimer(ms, &scope_);\n    }\n  }\n\n  // Write the data out, indicating end stream if we saw end stream, there is no further data to\n  // send, and there are no trailers.\n  write_data_cb_(data_to_write, saw_end_stream_ && buffer_.length() == 0 && !saw_trailers_);\n\n  // If there is no more data to send and we saw trailers, we need to continue iteration to release\n  // the trailers to further filters.\n  if (buffer_.length() == 0 && saw_trailers_) {\n    continue_cb_();\n  }\n}\n\nvoid StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream) {\n  ENVOY_LOG(trace, \"limiter: incoming data length={} buffered={}\", incoming_buffer.length(),\n            buffer_.length());\n  buffer_.move(incoming_buffer);\n  saw_end_stream_ = end_stream;\n  if (!token_timer_->enabled()) {\n    // TODO(mattklein123): In an optimal world we would be able to continue iteration with the data\n    // we want in the buffer, but have a way to clear end_stream in case we can't send it all.\n    // The filter API does not currently support that and it will not be a trivial change to add.\n    // Instead we cheat here by scheduling the token timer to run immediately after the stack is\n    // unwound, at which point we can directly called encode/decodeData.\n    token_timer_->enableTimer(std::chrono::milliseconds(0), &scope_);\n  }\n}\n\nHttp::FilterTrailersStatus StreamRateLimiter::onTrailers() {\n  saw_end_stream_ = true;\n  saw_trailers_ = true;\n  return buffer_.length() > 0 ? Http::FilterTrailersStatus::StopIteration\n                              : Http::FilterTrailersStatus::Continue;\n}\n\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/fault/fault_filter.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/token_bucket_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/common/fault/fault_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\n\n/**\n * All stats for the fault filter. @see stats_macros.h\n */\n#define ALL_FAULT_FILTER_STATS(COUNTER, GAUGE)                                                     \\\n  COUNTER(aborts_injected)                                                                         \\\n  COUNTER(delays_injected)                                                                         \\\n  COUNTER(faults_overflow)                                                                         \\\n  COUNTER(response_rl_injected)                                                                    \\\n  GAUGE(active_faults, Accumulate)\n\n/**\n * Wrapper struct for connection manager stats. @see stats_macros.h\n */\nstruct FaultFilterStats {\n  ALL_FAULT_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Configuration for fault injection.\n */\nclass FaultSettings : public Router::RouteSpecificFilterConfig {\npublic:\n  FaultSettings(const envoy::extensions::filters::http::fault::v3::HTTPFault& fault);\n\n  const std::vector<Http::HeaderUtility::HeaderDataPtr>& filterHeaders() const {\n    return fault_filter_headers_;\n  }\n  const Filters::Common::Fault::FaultAbortConfig* requestAbort() const {\n    return request_abort_config_.get();\n  }\n  const Filters::Common::Fault::FaultDelayConfig* requestDelay() const {\n    return request_delay_config_.get();\n  }\n  const std::string& upstreamCluster() const { return upstream_cluster_; }\n  const absl::flat_hash_set<std::string>& downstreamNodes() const { return downstream_nodes_; }\n  absl::optional<uint64_t> maxActiveFaults() const { return max_active_faults_; }\n  const Filters::Common::Fault::FaultRateLimitConfig* responseRateLimit() const {\n    return response_rate_limit_.get();\n  }\n  const std::string& abortPercentRuntime() const { return abort_percent_runtime_; }\n  const std::string& delayPercentRuntime() const { return delay_percent_runtime_; }\n  const std::string& abortHttpStatusRuntime() const { return abort_http_status_runtime_; }\n  const std::string& abortGrpcStatusRuntime() const { return abort_grpc_status_runtime_; }\n  const std::string& delayDurationRuntime() const { return delay_duration_runtime_; }\n  const std::string& maxActiveFaultsRuntime() const { return max_active_faults_runtime_; }\n  const std::string& responseRateLimitPercentRuntime() const {\n    return response_rate_limit_percent_runtime_;\n  }\n\nprivate:\n  class RuntimeKeyValues {\n  public:\n    const std::string DelayPercentKey = \"fault.http.delay.fixed_delay_percent\";\n    const std::string AbortPercentKey = \"fault.http.abort.abort_percent\";\n    const std::string DelayDurationKey = \"fault.http.delay.fixed_duration_ms\";\n    const std::string AbortHttpStatusKey = \"fault.http.abort.http_status\";\n    const std::string AbortGrpcStatusKey = \"fault.http.abort.grpc_status\";\n    const std::string MaxActiveFaultsKey = \"fault.http.max_active_faults\";\n    const std::string ResponseRateLimitPercentKey = \"fault.http.rate_limit.response_percent\";\n  };\n\n  using RuntimeKeys = ConstSingleton<RuntimeKeyValues>;\n\n  envoy::type::v3::FractionalPercent abort_percentage_;\n  Filters::Common::Fault::FaultDelayConfigPtr request_delay_config_;\n  Filters::Common::Fault::FaultAbortConfigPtr request_abort_config_;\n  std::string upstream_cluster_; // restrict faults to specific upstream cluster\n  const std::vector<Http::HeaderUtility::HeaderDataPtr> fault_filter_headers_;\n  absl::flat_hash_set<std::string> downstream_nodes_{}; // Inject failures for specific downstream\n  absl::optional<uint64_t> max_active_faults_;\n  Filters::Common::Fault::FaultRateLimitConfigPtr response_rate_limit_;\n  const std::string delay_percent_runtime_;\n  const std::string abort_percent_runtime_;\n  const std::string delay_duration_runtime_;\n  const std::string abort_http_status_runtime_;\n  const std::string abort_grpc_status_runtime_;\n  const std::string max_active_faults_runtime_;\n  const std::string response_rate_limit_percent_runtime_;\n};\n\n/**\n * Configuration for the fault filter.\n */\nclass FaultFilterConfig {\npublic:\n  FaultFilterConfig(const envoy::extensions::filters::http::fault::v3::HTTPFault& fault,\n                    Runtime::Loader& runtime, const std::string& stats_prefix, Stats::Scope& scope,\n                    TimeSource& time_source);\n\n  Runtime::Loader& runtime() { return runtime_; }\n  FaultFilterStats& stats() { return stats_; }\n  Stats::Scope& scope() { return scope_; }\n  const FaultSettings* settings() { return &settings_; }\n  TimeSource& timeSource() { return time_source_; }\n\n  void incDelays(Stats::StatName downstream_cluster) {\n    incCounter(downstream_cluster, delays_injected_);\n  }\n\n  void incAborts(Stats::StatName downstream_cluster) {\n    incCounter(downstream_cluster, aborts_injected_);\n  }\n\nprivate:\n  static FaultFilterStats generateStats(const std::string& prefix, Stats::Scope& scope);\n  void incCounter(Stats::StatName downstream_cluster, Stats::StatName stat_name);\n\n  const FaultSettings settings_;\n  Runtime::Loader& runtime_;\n  FaultFilterStats stats_;\n  Stats::Scope& scope_;\n  TimeSource& time_source_;\n  Stats::StatNameSetPtr stat_name_set_;\n  const Stats::StatName aborts_injected_;\n  const Stats::StatName delays_injected_;\n  const Stats::StatName stats_prefix_; // Includes \".fault\".\n};\n\nusing FaultFilterConfigSharedPtr = std::shared_ptr<FaultFilterConfig>;\n\n/**\n * An HTTP stream rate limiter. Split out for ease of testing and potential code reuse elsewhere.\n */\nclass StreamRateLimiter : Logger::Loggable<Logger::Id::filter> {\npublic:\n  /**\n   * @param max_kbps maximum rate in KiB/s.\n   * @param max_buffered_data maximum data to buffer before invoking the pause callback.\n   * @param pause_data_cb callback invoked when the limiter has buffered too much data.\n   * @param resume_data_cb callback invoked when the limiter has gone under the buffer limit.\n   * @param write_data_cb callback invoked to write data to the stream.\n   * @param continue_cb callback invoked to continue the stream. This is only used to continue\n   *                    trailers that have been paused during body flush.\n   * @param time_source the time source to run the token bucket with.\n   * @param dispatcher the stream's dispatcher to use for creating timers.\n   * @param scope the stream's scope\n   */\n  StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data,\n                    std::function<void()> pause_data_cb, std::function<void()> resume_data_cb,\n                    std::function<void(Buffer::Instance&, bool)> write_data_cb,\n                    std::function<void()> continue_cb, TimeSource& time_source,\n                    Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope);\n\n  /**\n   * Called by the stream to write data. All data writes happen asynchronously, the stream should\n   * be stopped after this call (all data will be drained from incoming_buffer).\n   */\n  void writeData(Buffer::Instance& incoming_buffer, bool end_stream);\n\n  /**\n   * Called if the stream receives trailers.\n   */\n  Http::FilterTrailersStatus onTrailers();\n\n  /**\n   * Like the owning filter, we must handle inline destruction, so we have a destroy() method which\n   * kills any callbacks.\n   */\n  void destroy() { token_timer_.reset(); }\n  bool destroyed() { return token_timer_ == nullptr; }\n\nprivate:\n  void onTokenTimer();\n\n  // We currently divide each second into 16 segments for the token bucket. Thus, the rate limit is\n  // KiB per second, divided into 16 segments, ~63ms apart. 16 is used because it divides into 1024\n  // evenly.\n  static constexpr uint64_t SecondDivisor = 16;\n\n  const uint64_t bytes_per_time_slice_;\n  const std::function<void(Buffer::Instance&, bool)> write_data_cb_;\n  const std::function<void()> continue_cb_;\n  const ScopeTrackedObject& scope_;\n  TokenBucketImpl token_bucket_;\n  Event::TimerPtr token_timer_;\n  bool saw_data_{};\n  bool saw_end_stream_{};\n  bool saw_trailers_{};\n  Buffer::WatermarkBuffer buffer_;\n};\n\nusing AbortHttpAndGrpcStatus =\n    std::pair<absl::optional<Http::Code>, absl::optional<Grpc::Status::GrpcStatus>>;\n/**\n * A filter that is capable of faulting an entire request before dispatching it upstream.\n */\nclass FaultFilter : public Http::StreamFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  FaultFilter(FaultFilterConfigSharedPtr config);\n  ~FaultFilter() override;\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n  }\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override {\n    encoder_callbacks_ = &callbacks;\n  }\n\nprivate:\n  bool faultOverflow();\n  void recordAbortsInjectedStats();\n  void recordDelaysInjectedStats();\n  void resetTimerState();\n  void postDelayInjection(const Http::RequestHeaderMap& request_headers);\n  void abortWithStatus(Http::Code http_status_code,\n                       absl::optional<Grpc::Status::GrpcStatus> grpc_status_code);\n  bool matchesTargetUpstreamCluster();\n  bool matchesDownstreamNodes(const Http::RequestHeaderMap& headers);\n  bool isAbortEnabled(const Http::RequestHeaderMap& request_headers);\n  bool isDelayEnabled(const Http::RequestHeaderMap& request_headers);\n  bool isResponseRateLimitEnabled(const Http::RequestHeaderMap& request_headers);\n  absl::optional<std::chrono::milliseconds>\n  delayDuration(const Http::RequestHeaderMap& request_headers);\n  AbortHttpAndGrpcStatus abortStatus(const Http::RequestHeaderMap& request_headers);\n  absl::optional<Http::Code> abortHttpStatus(const Http::RequestHeaderMap& request_headers);\n  absl::optional<Grpc::Status::GrpcStatus>\n  abortGrpcStatus(const Http::RequestHeaderMap& request_headers);\n  // Attempts to increase the number of active faults. Returns false if we've reached the maximum\n  // number of allowed faults, in which case no fault should be performed.\n  bool tryIncActiveFaults();\n  bool maybeDoAbort(const Http::RequestHeaderMap& request_headers);\n  bool maybeSetupDelay(const Http::RequestHeaderMap& request_headers);\n  void maybeSetupResponseRateLimit(const Http::RequestHeaderMap& request_headers);\n\n  FaultFilterConfigSharedPtr config_;\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n  Event::TimerPtr delay_timer_;\n  std::string downstream_cluster_{};\n  std::unique_ptr<Stats::StatNameDynamicStorage> downstream_cluster_storage_;\n  const FaultSettings* fault_settings_;\n  bool fault_active_{};\n  std::unique_ptr<StreamRateLimiter> response_limiter_;\n  std::string downstream_cluster_delay_percent_key_{};\n  std::string downstream_cluster_abort_percent_key_{};\n  std::string downstream_cluster_delay_duration_key_{};\n  std::string downstream_cluster_abort_http_status_key_{};\n  std::string downstream_cluster_abort_grpc_status_key_{};\n};\n\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_bridge/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter that bridges HTTP/1.1 unary \"gRPC\" to compliant HTTP/2 gRPC.\n# Public docs: docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"http1_bridge_filter_lib\",\n    srcs = [\"http1_bridge_filter.cc\"],\n    hdrs = [\"http1_bridge_filter.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http/http1:codec_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # Legacy test use. TODO(#9953) clean up.\n    extra_visibility = [\n        \"//source/exe:__pkg__\",\n        \"//test/integration:__subpackages__\",\n        \"//test/server:__subpackages__\",\n    ],\n    security_posture = \"unknown\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/grpc_http1_bridge:http1_bridge_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_bridge/config.cc",
    "content": "#include \"extensions/filters/http/grpc_http1_bridge/config.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1Bridge {\n\nHttp::FilterFactoryCb GrpcHttp1BridgeFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::grpc_http1_bridge::v3::Config&, const std::string&,\n    Server::Configuration::FactoryContext& factory_context) {\n  return [&factory_context](Http::FilterChainFactoryCallbacks& callbacks) {\n    callbacks.addStreamFilter(std::make_shared<Http1BridgeFilter>(factory_context.grpcContext()));\n  };\n}\n\n/**\n * Static registration for the grpc HTTP1 bridge filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(GrpcHttp1BridgeFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.grpc_http1_bridge\"};\n\n} // namespace GrpcHttp1Bridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_bridge/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/grpc_http1_bridge/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_http1_bridge/v3/config.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1Bridge {\n\n/**\n * Config registration for the grpc HTTP1 bridge filter. @see NamedHttpFilterConfigFactory.\n */\nclass GrpcHttp1BridgeFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::grpc_http1_bridge::v3::Config> {\npublic:\n  GrpcHttp1BridgeFilterConfig() : FactoryBase(HttpFilterNames::get().GrpcHttp1Bridge) {}\n\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::grpc_http1_bridge::v3::Config& proto_config,\n      const std::string& stats_prefix,\n      Server::Configuration::FactoryContext& factory_context) override;\n};\n\n} // namespace GrpcHttp1Bridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc",
    "content": "#include \"extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h\"\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/http/codes.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/http1/codec_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1Bridge {\n\nvoid Http1BridgeFilter::chargeStat(const Http::ResponseHeaderOrTrailerMap& headers) {\n  context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, *request_stat_names_,\n                      headers.GrpcStatus());\n}\n\nHttp::FilterHeadersStatus Http1BridgeFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  const bool grpc_request = Grpc::Common::isGrpcRequestHeaders(headers);\n  if (grpc_request) {\n    setupStatTracking(headers);\n  }\n\n  const absl::optional<Http::Protocol>& protocol = decoder_callbacks_->streamInfo().protocol();\n  ASSERT(protocol);\n  if (protocol.value() < Http::Protocol::Http2 && grpc_request) {\n    do_bridging_ = true;\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus Http1BridgeFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                           bool end_stream) {\n  if (doStatTracking()) {\n    chargeStat(headers);\n  }\n\n  if (!do_bridging_ || end_stream) {\n    return Http::FilterHeadersStatus::Continue;\n  } else {\n    response_headers_ = &headers;\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n}\n\nHttp::FilterDataStatus Http1BridgeFilter::encodeData(Buffer::Instance&, bool end_stream) {\n  if (!do_bridging_ || end_stream) {\n    return Http::FilterDataStatus::Continue;\n  } else {\n    // Buffer until the complete request has been processed.\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n}\n\nHttp::FilterTrailersStatus Http1BridgeFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  if (doStatTracking()) {\n    chargeStat(trailers);\n  }\n\n  if (do_bridging_) {\n    // Here we check for grpc-status. If it's not zero, we change the response code. We assume\n    // that if a reset comes in and we disconnect the HTTP/1.1 client it will raise some type\n    // of exception/error that the response was not complete.\n    const Http::HeaderEntry* grpc_status_header = trailers.GrpcStatus();\n    if (grpc_status_header) {\n      uint64_t grpc_status_code;\n      if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) ||\n          grpc_status_code != 0) {\n        response_headers_->setStatus(enumToInt(Http::Code::ServiceUnavailable));\n      }\n      response_headers_->setGrpcStatus(grpc_status_header->value().getStringView());\n    }\n\n    const Http::HeaderEntry* grpc_message_header = trailers.GrpcMessage();\n    if (grpc_message_header) {\n      response_headers_->setGrpcMessage(grpc_message_header->value().getStringView());\n    }\n\n    // Since we are buffering, set content-length so that HTTP/1.1 callers can better determine\n    // if this is a complete response.\n    response_headers_->setContentLength(\n        encoder_callbacks_->encodingBuffer() ? encoder_callbacks_->encodingBuffer()->length() : 0);\n  }\n\n  // NOTE: We will still write the trailers, but the HTTP/1.1 codec will just eat them and end\n  //       the chunk encoded response which is what we want.\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid Http1BridgeFilter::setupStatTracking(const Http::RequestHeaderMap& headers) {\n  cluster_ = decoder_callbacks_->clusterInfo();\n  if (!cluster_) {\n    return;\n  }\n  request_stat_names_ = context_.resolveDynamicServiceAndMethod(headers.Path());\n}\n\n} // namespace GrpcHttp1Bridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/grpc/context_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1Bridge {\n/**\n * See docs/configuration/http_filters/grpc_http1_bridge_filter.rst\n */\nclass Http1BridgeFilter : public Http::StreamFilter {\npublic:\n  explicit Http1BridgeFilter(Grpc::Context& context) : context_(context) {}\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n  }\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override {\n    encoder_callbacks_ = &callbacks;\n  }\n\n  bool doStatTracking() const { return request_stat_names_.has_value(); }\n\nprivate:\n  void chargeStat(const Http::ResponseHeaderOrTrailerMap& headers);\n  void setupStatTracking(const Http::RequestHeaderMap& headers);\n\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n  Http::ResponseHeaderMap* response_headers_{};\n  bool do_bridging_{};\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  absl::optional<Grpc::Context::RequestStatNames> request_stat_names_;\n  Grpc::Context& context_;\n};\n\n} // namespace GrpcHttp1Bridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"filter_lib\",\n    srcs = [\"filter.cc\"],\n    hdrs = [\"filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/grpc:status_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \":filter_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc",
    "content": "#include \"extensions/filters/http/grpc_http1_reverse_bridge/config.h\"\n\n#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/grpc_http1_reverse_bridge/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1ReverseBridge {\n\nHttp::FilterFactoryCb Config::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfig& config,\n    const std::string&, Server::Configuration::FactoryContext&) {\n  return [config](Envoy::Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(\n        std::make_shared<Filter>(config.content_type(), config.withhold_grpc_frames()));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr Config::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute&\n        proto_config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<FilterConfigPerRoute>(proto_config);\n}\n\n/**\n * Static registration for the grpc http1 reverse bridge filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(Config, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace GrpcHttp1ReverseBridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_reverse_bridge/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.validate.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1ReverseBridge {\n\nclass Config\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfig,\n          envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute> {\npublic:\n  Config() : FactoryBase(HttpFilterNames::get().GrpcHttp1ReverseBridge) {}\n\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfig& config,\n      const std::string& stat_prefix,\n      Envoy::Server::Configuration::FactoryContext& context) override;\n\nprivate:\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute&\n          proto_config,\n      Server::Configuration::ServerFactoryContext& context,\n      ProtobufMessage::ValidationVisitor& validator) override;\n};\n} // namespace GrpcHttp1ReverseBridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc",
    "content": "#include \"extensions/filters/http/grpc_http1_reverse_bridge/filter.h\"\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/grpc/status.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1ReverseBridge {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    accept_handle(Http::CustomHeaders::get().Accept);\n\nstruct RcDetailsValues {\n  // The gRPC HTTP/1 reverse bridge failed because the body payload was too\n  // small to be a gRPC frame.\n  const std::string GrpcBridgeFailedTooSmall = \"grpc_bridge_data_too_small\";\n  // The gRPC HTTP/1 bridge encountered an unsupported content type.\n  const std::string GrpcBridgeFailedContentType = \"grpc_bridge_content_type_wrong\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nnamespace {\nGrpc::Status::GrpcStatus grpcStatusFromHeaders(Http::ResponseHeaderMap& headers) {\n  const auto http_response_status = Http::Utility::getResponseStatus(headers);\n\n  // Notably, we treat an upstream 200 as a successful response. This differs\n  // from the standard but is key in being able to transform a successful\n  // upstream HTTP response into a gRPC response.\n  if (http_response_status == 200) {\n    return Grpc::Status::WellKnownGrpcStatus::Ok;\n  } else {\n    return Grpc::Utility::httpToGrpcStatus(http_response_status);\n  }\n}\n\nstd::string badContentTypeMessage(const Http::ResponseHeaderMap& headers) {\n  if (headers.ContentType() != nullptr) {\n    return fmt::format(\n        \"envoy reverse bridge: upstream responded with unsupported content-type {}, status code {}\",\n        headers.getContentTypeValue(), headers.getStatusValue());\n  } else {\n    return fmt::format(\n        \"envoy reverse bridge: upstream responded with no content-type header, status code {}\",\n        headers.getStatusValue());\n  }\n}\n\nvoid adjustContentLength(Http::RequestOrResponseHeaderMap& headers,\n                         const std::function<uint64_t(uint64_t value)>& adjustment) {\n  auto length_header = headers.getContentLengthValue();\n  if (!length_header.empty()) {\n    uint64_t length;\n    if (absl::SimpleAtoi(length_header, &length)) {\n      if (length != 0) {\n        headers.setContentLength(adjustment(length));\n      }\n    }\n  }\n}\n} // namespace\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) {\n  // Short circuit if header only.\n  if (end_stream) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // Disable filter per route config if applies\n  if (decoder_callbacks_->route() != nullptr) {\n    const auto* per_route_config =\n        Http::Utility::resolveMostSpecificPerFilterConfig<FilterConfigPerRoute>(\n            Extensions::HttpFilters::HttpFilterNames::get().GrpcHttp1ReverseBridge,\n            decoder_callbacks_->route());\n    if (per_route_config != nullptr && per_route_config->disabled()) {\n      enabled_ = false;\n      return Http::FilterHeadersStatus::Continue;\n    }\n  }\n\n  // If this is a gRPC request we:\n  //  - mark this request as being gRPC\n  //  - change the content-type to application/x-protobuf\n  if (Envoy::Grpc::Common::isGrpcRequestHeaders(headers)) {\n    enabled_ = true;\n\n    // We keep track of the original content-type to ensure that we handle\n    // gRPC content type variations such as application/grpc+proto.\n    content_type_ = std::string(headers.getContentTypeValue());\n    headers.setContentType(upstream_content_type_);\n    headers.setInline(accept_handle.handle(), upstream_content_type_);\n\n    if (withhold_grpc_frames_) {\n      // Adjust the content-length header to account for us removing the gRPC frame header.\n      adjustContentLength(headers, [](auto size) { return size - Grpc::GRPC_FRAME_HEADER_SIZE; });\n    }\n\n    // Clear the route cache to recompute the cache. This provides additional\n    // flexibility around request modification through the route table.\n    decoder_callbacks_->clearRouteCache();\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::decodeData(Buffer::Instance& buffer, bool) {\n  if (enabled_ && withhold_grpc_frames_ && !prefix_stripped_) {\n    // Fail the request if the body is too small to possibly contain a gRPC frame.\n    if (buffer.length() < Grpc::GRPC_FRAME_HEADER_SIZE) {\n      decoder_callbacks_->sendLocalReply(Http::Code::OK, \"invalid request body\", nullptr,\n                                         Grpc::Status::WellKnownGrpcStatus::Unknown,\n                                         RcDetails::get().GrpcBridgeFailedTooSmall);\n      return Http::FilterDataStatus::StopIterationNoBuffer;\n    }\n\n    // Remove the gRPC frame header.\n    buffer.drain(Grpc::GRPC_FRAME_HEADER_SIZE);\n    prefix_stripped_ = true;\n  }\n\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) {\n  if (enabled_) {\n    absl::string_view content_type = headers.getContentTypeValue();\n\n    // If the response from upstream does not have the correct content-type,\n    // perform an early return with a useful error message in grpc-message.\n    if (content_type != upstream_content_type_) {\n      decoder_callbacks_->sendLocalReply(Http::Code::OK, badContentTypeMessage(headers), nullptr,\n                                         Grpc::Status::WellKnownGrpcStatus::Unknown,\n                                         RcDetails::get().GrpcBridgeFailedContentType);\n\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n\n    // Restore the content-type to match what the downstream sent.\n    headers.setContentType(content_type_);\n\n    if (withhold_grpc_frames_) {\n      // Adjust content-length to account for the frame header that's added.\n      adjustContentLength(headers,\n                          [](auto length) { return length + Grpc::GRPC_FRAME_HEADER_SIZE; });\n    }\n    // We can only insert trailers at the end of data, so keep track of this value\n    // until then.\n    grpc_status_ = grpcStatusFromHeaders(headers);\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::encodeData(Buffer::Instance& buffer, bool end_stream) {\n  if (!enabled_) {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  if (end_stream) {\n    // Insert grpc-status trailers to communicate the error code.\n    auto& trailers = encoder_callbacks_->addEncodedTrailers();\n    trailers.setGrpcStatus(grpc_status_);\n\n    if (withhold_grpc_frames_) {\n      buffer.prepend(buffer_);\n      buildGrpcFrameHeader(buffer);\n    }\n\n    return Http::FilterDataStatus::Continue;\n  }\n\n  // We only need to buffer if we're responsible for injecting the gRPC frame header.\n  if (withhold_grpc_frames_) {\n    // Buffer the response in a mutable buffer: we need to determine the size of the response\n    // and modify it later on.\n    buffer_.move(buffer);\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  } else {\n    return Http::FilterDataStatus::Continue;\n  }\n}\n\nHttp::FilterTrailersStatus Filter::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  if (!enabled_) {\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  trailers.setGrpcStatus(grpc_status_);\n\n  if (withhold_grpc_frames_) {\n    buildGrpcFrameHeader(buffer_);\n    encoder_callbacks_->addEncodedData(buffer_, false);\n  }\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid Filter::buildGrpcFrameHeader(Buffer::Instance& buffer) {\n  // We do this even if the upstream failed: If the response returned non-200,\n  // we'll respond with a grpc-status with an error, so clients will know that the request\n  // was unsuccessful. Since we're guaranteed at this point to have a valid response\n  // (unless upstream lied in content-type) we attempt to return a well-formed gRPC\n  // response body.\n  Grpc::Encoder().prependFrameHeader(Grpc::GRPC_FH_DEFAULT, buffer);\n}\n\n} // namespace GrpcHttp1ReverseBridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h\"\n#include \"envoy/http/filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/status.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1ReverseBridge {\n\n// When enabled, will downgrade an incoming gRPC http request into a h/1.1 request.\nclass Filter : public Envoy::Http::PassThroughFilter {\npublic:\n  Filter(std::string upstream_content_type, bool withhold_grpc_frames)\n      : upstream_content_type_(std::move(upstream_content_type)),\n        withhold_grpc_frames_(withhold_grpc_frames) {}\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& buffer, bool end_stream) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& buffer, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n\nprivate:\n  // Prepend the grpc frame into the buffer\n  void buildGrpcFrameHeader(Buffer::Instance& buffer);\n\n  const std::string upstream_content_type_;\n  const bool withhold_grpc_frames_;\n\n  bool enabled_{};\n  bool prefix_stripped_{};\n  std::string content_type_{};\n  Grpc::Status::GrpcStatus grpc_status_{};\n  // Normally we'd use the encoding buffer, but since we need to mutate the\n  // buffer we instead maintain our own.\n  Buffer::OwnedImpl buffer_{};\n};\n\nusing FilterPtr = std::unique_ptr<Filter>;\n\nclass FilterConfigPerRoute : public Router::RouteSpecificFilterConfig {\npublic:\n  FilterConfigPerRoute(\n      const envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute&\n          config)\n      : disabled_(config.disabled()) {}\n  bool disabled() const { return disabled_; }\n\nprivate:\n  bool disabled_;\n};\n\n} // namespace GrpcHttp1ReverseBridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter that implements binary gRPC to JSON transcoding\n# Public docs: docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"json_transcoder_filter_lib\",\n    srcs = [\"json_transcoder_filter.cc\"],\n    hdrs = [\"json_transcoder_filter.h\"],\n    external_deps = [\n        \"path_matcher\",\n        \"grpc_transcoding\",\n        \"http_api_protos\",\n        \"api_httpbody_protos\",\n    ],\n    deps = [\n        \":http_body_utils_lib\",\n        \":transcoder_input_stream_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http_body_utils_lib\",\n    srcs = [\"http_body_utils.cc\"],\n    hdrs = [\"http_body_utils.h\"],\n    external_deps = [\n        \"api_httpbody_protos\",\n    ],\n    deps = [\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"transcoder_input_stream_lib\",\n    srcs = [\"transcoder_input_stream_impl.cc\"],\n    hdrs = [\"transcoder_input_stream_impl.h\"],\n    external_deps = [\"grpc_transcoding\"],\n    deps = [\"//source/common/buffer:zero_copy_input_stream_lib\"],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/grpc_json_transcoder:json_transcoder_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/config.cc",
    "content": "#include \"extensions/filters/http/grpc_json_transcoder/config.h\"\n\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\nHttp::FilterFactoryCb GrpcJsonTranscoderFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder&\n        proto_config,\n    const std::string&, Server::Configuration::FactoryContext& context) {\n  JsonTranscoderConfigSharedPtr filter_config =\n      std::make_shared<JsonTranscoderConfig>(proto_config, context.api());\n\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<JsonTranscoderFilter>(*filter_config));\n  };\n}\n\n/**\n * Static registration for the grpc transcoding filter. @see RegisterNamedHttpFilterConfigFactory.\n */\nREGISTER_FACTORY(GrpcJsonTranscoderFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.grpc_json_transcoder\"};\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\n/**\n * Config registration for the gRPC JSON transcoder filter. @see NamedHttpFilterConfigFactory.\n */\nclass GrpcJsonTranscoderFilterConfig\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder> {\npublic:\n  GrpcJsonTranscoderFilterConfig() : FactoryBase(HttpFilterNames::get().GrpcJsonTranscoder) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder&\n          proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc",
    "content": "#include \"extensions/filters/http/grpc_json_transcoder/http_body_utils.h\"\n\n#include \"google/api/httpbody.pb.h\"\n\nusing Envoy::Protobuf::io::CodedInputStream;\nusing Envoy::Protobuf::io::CodedOutputStream;\nusing Envoy::Protobuf::io::StringOutputStream;\nusing Envoy::Protobuf::io::ZeroCopyInputStream;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\nnamespace {\n\n// Embedded messages are treated the same way as strings (wire type 2).\nconstexpr uint32_t ProtobufLengthDelimitedField = 2;\n\nbool parseMessageByFieldPath(CodedInputStream* input,\n                             absl::Span<const Protobuf::Field* const> field_path,\n                             Protobuf::Message* message) {\n  if (field_path.empty()) {\n    return message->MergeFromCodedStream(input);\n  }\n\n  const uint32_t expected_tag = (field_path.front()->number() << 3) | ProtobufLengthDelimitedField;\n  for (;;) {\n    const uint32_t tag = input->ReadTag();\n    if (tag == expected_tag) {\n      uint32_t length = 0;\n      if (!input->ReadVarint32(&length)) {\n        return false;\n      }\n      auto limit = input->IncrementRecursionDepthAndPushLimit(length);\n      if (!parseMessageByFieldPath(input, field_path.subspan(1), message)) {\n        return false;\n      }\n      if (!input->DecrementRecursionDepthAndPopLimit(limit.first)) {\n        return false;\n      }\n    } else if (tag == 0) {\n      return true;\n    } else {\n      if (!Protobuf::internal::WireFormatLite::SkipField(input, tag)) {\n        return false;\n      }\n    }\n  }\n}\n} // namespace\n\nbool HttpBodyUtils::parseMessageByFieldPath(ZeroCopyInputStream* stream,\n                                            const std::vector<const Protobuf::Field*>& field_path,\n                                            Protobuf::Message* message) {\n  CodedInputStream input(stream);\n  input.SetRecursionLimit(field_path.size());\n\n  return GrpcJsonTranscoder::parseMessageByFieldPath(&input, absl::MakeConstSpan(field_path),\n                                                     message);\n}\n\nvoid HttpBodyUtils::appendHttpBodyEnvelope(\n    Buffer::Instance& output, const std::vector<const Protobuf::Field*>& request_body_field_path,\n    std::string content_type, uint64_t content_length) {\n  // Manually encode the protobuf envelope for the body.\n  // See https://developers.google.com/protocol-buffers/docs/encoding#embedded for wire format.\n\n  std::string proto_envelope;\n  {\n    // For memory safety, the StringOutputStream needs to be destroyed before\n    // we read the string.\n\n    const uint32_t http_body_field_number =\n        (google::api::HttpBody::kDataFieldNumber << 3) | ProtobufLengthDelimitedField;\n\n    ::google::api::HttpBody body;\n    body.set_content_type(std::move(content_type));\n\n    uint64_t envelope_size = body.ByteSizeLong() +\n                             CodedOutputStream::VarintSize32(http_body_field_number) +\n                             CodedOutputStream::VarintSize64(content_length);\n    std::vector<uint32_t> message_sizes;\n    message_sizes.reserve(request_body_field_path.size());\n    for (auto it = request_body_field_path.rbegin(); it != request_body_field_path.rend(); ++it) {\n      const Protobuf::Field* field = *it;\n      const uint64_t message_size = envelope_size + content_length;\n      const uint32_t field_number = (field->number() << 3) | ProtobufLengthDelimitedField;\n      const uint64_t field_size = CodedOutputStream::VarintSize32(field_number) +\n                                  CodedOutputStream::VarintSize64(message_size);\n      message_sizes.push_back(message_size);\n      envelope_size += field_size;\n    }\n    std::reverse(message_sizes.begin(), message_sizes.end());\n\n    proto_envelope.reserve(envelope_size);\n\n    Envoy::Protobuf::io::StringOutputStream string_stream(&proto_envelope);\n    Envoy::Protobuf::io::CodedOutputStream coded_stream(&string_stream);\n\n    // Serialize body field definition manually to avoid the copy of the body.\n    for (size_t i = 0; i < request_body_field_path.size(); ++i) {\n      const Protobuf::Field* field = request_body_field_path[i];\n      const uint32_t field_number = (field->number() << 3) | ProtobufLengthDelimitedField;\n      const uint64_t message_size = message_sizes[i];\n      coded_stream.WriteTag(field_number);\n      coded_stream.WriteVarint64(message_size);\n    }\n    body.SerializeToCodedStream(&coded_stream);\n    coded_stream.WriteTag(http_body_field_number);\n    coded_stream.WriteVarint64(content_length);\n  }\n\n  output.add(proto_envelope);\n}\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\nclass HttpBodyUtils {\npublic:\n  static bool parseMessageByFieldPath(Protobuf::io::ZeroCopyInputStream* stream,\n                                      const std::vector<const Protobuf::Field*>& field_path,\n                                      Protobuf::Message* message);\n  static void\n  appendHttpBodyEnvelope(Buffer::Instance& output,\n                         const std::vector<const Protobuf::Field*>& request_body_field_path,\n                         std::string content_type, uint64_t content_length);\n};\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc",
    "content": "#include \"extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h\"\n\n#include <memory>\n#include <unordered_set>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h\"\n#include \"envoy/http/filter.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/grpc_json_transcoder/http_body_utils.h\"\n\n#include \"google/api/annotations.pb.h\"\n#include \"google/api/http.pb.h\"\n#include \"google/api/httpbody.pb.h\"\n#include \"grpc_transcoding/json_request_translator.h\"\n#include \"grpc_transcoding/path_matcher_utility.h\"\n#include \"grpc_transcoding/response_to_json_translator.h\"\n\nusing Envoy::Protobuf::FileDescriptorSet;\nusing Envoy::Protobuf::io::ZeroCopyInputStream;\nusing Envoy::ProtobufUtil::Status;\nusing Envoy::ProtobufUtil::error::Code;\nusing google::api::HttpRule;\nusing google::grpc::transcoding::JsonRequestTranslator;\nusing JsonRequestTranslatorPtr = std::unique_ptr<JsonRequestTranslator>;\nusing google::grpc::transcoding::MessageStream;\nusing google::grpc::transcoding::PathMatcherBuilder;\nusing google::grpc::transcoding::PathMatcherUtility;\nusing google::grpc::transcoding::RequestInfo;\nusing google::grpc::transcoding::RequestMessageTranslator;\nusing RequestMessageTranslatorPtr = std::unique_ptr<RequestMessageTranslator>;\nusing google::grpc::transcoding::ResponseToJsonTranslator;\nusing ResponseToJsonTranslatorPtr = std::unique_ptr<ResponseToJsonTranslator>;\nusing google::grpc::transcoding::Transcoder;\nusing TranscoderPtr = std::unique_ptr<Transcoder>;\nusing google::grpc::transcoding::TranscoderInputStream;\nusing TranscoderInputStreamPtr = std::unique_ptr<TranscoderInputStream>;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\nstruct RcDetailsValues {\n  // The gRPC json transcoder filter failed to transcode when processing request headers.\n  // This will generally be accompanied by details about the transcoder failure.\n  const std::string GrpcTranscodeFailedEarly = \"early_grpc_json_transcode_failure\";\n  // The gRPC json transcoder filter failed to transcode when processing the request body.\n  // This will generally be accompanied by details about the transcoder failure.\n  const std::string GrpcTranscodeFailed = \"grpc_json_transcode_failure\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nnamespace {\n\nconst Http::LowerCaseString& trailerHeader() {\n  CONSTRUCT_ON_FIRST_USE(Http::LowerCaseString, \"trailer\");\n}\n\n// Transcoder:\n// https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/blob/master/src/include/grpc_transcoding/transcoder.h\n// implementation based on JsonRequestTranslator & ResponseToJsonTranslator\nclass TranscoderImpl : public Transcoder {\npublic:\n  /**\n   * Construct a transcoder implementation\n   * @param request_translator a JsonRequestTranslator that does the request translation\n   * @param response_translator a ResponseToJsonTranslator that does the response translation\n   */\n  TranscoderImpl(RequestMessageTranslatorPtr request_translator,\n                 JsonRequestTranslatorPtr json_request_translator,\n                 ResponseToJsonTranslatorPtr response_translator)\n      : request_translator_(std::move(request_translator)),\n        json_request_translator_(std::move(json_request_translator)),\n        request_message_stream_(request_translator_ ? *request_translator_\n                                                    : json_request_translator_->Output()),\n        response_translator_(std::move(response_translator)),\n        request_stream_(request_message_stream_.CreateInputStream()),\n        response_stream_(response_translator_->CreateInputStream()) {}\n\n  // Transcoder\n  ::google::grpc::transcoding::TranscoderInputStream* RequestOutput() override {\n    return request_stream_.get();\n  }\n  ProtobufUtil::Status RequestStatus() override { return request_message_stream_.Status(); }\n\n  ZeroCopyInputStream* ResponseOutput() override { return response_stream_.get(); }\n  ProtobufUtil::Status ResponseStatus() override { return response_translator_->Status(); }\n\nprivate:\n  RequestMessageTranslatorPtr request_translator_;\n  JsonRequestTranslatorPtr json_request_translator_;\n  MessageStream& request_message_stream_;\n  ResponseToJsonTranslatorPtr response_translator_;\n  TranscoderInputStreamPtr request_stream_;\n  TranscoderInputStreamPtr response_stream_;\n};\n\n} // namespace\n\nJsonTranscoderConfig::JsonTranscoderConfig(\n    const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder&\n        proto_config,\n    Api::Api& api) {\n  FileDescriptorSet descriptor_set;\n\n  switch (proto_config.descriptor_set_case()) {\n  case envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder::\n      DescriptorSetCase::kProtoDescriptor:\n    if (!descriptor_set.ParseFromString(\n            api.fileSystem().fileReadToEnd(proto_config.proto_descriptor()))) {\n      throw EnvoyException(\"transcoding_filter: Unable to parse proto descriptor\");\n    }\n    break;\n  case envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder::\n      DescriptorSetCase::kProtoDescriptorBin:\n    if (!descriptor_set.ParseFromString(proto_config.proto_descriptor_bin())) {\n      throw EnvoyException(\"transcoding_filter: Unable to parse proto descriptor\");\n    }\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  for (const auto& file : descriptor_set.file()) {\n    addFileDescriptor(file);\n  }\n\n  convert_grpc_status_ = proto_config.convert_grpc_status();\n  if (convert_grpc_status_) {\n    addBuiltinSymbolDescriptor(\"google.protobuf.Any\");\n    addBuiltinSymbolDescriptor(\"google.rpc.Status\");\n  }\n\n  type_helper_ = std::make_unique<google::grpc::transcoding::TypeHelper>(\n      Protobuf::util::NewTypeResolverForDescriptorPool(Grpc::Common::typeUrlPrefix(),\n                                                       &descriptor_pool_));\n\n  PathMatcherBuilder<MethodInfoSharedPtr> pmb;\n  // clang-format off\n  // We cannot convert this to a absl hash set as PathMatcherUtility::RegisterByHttpRule takes a\n  // std::unordered_set as an argument\n  std::unordered_set<std::string> ignored_query_parameters;\n  // clang-format on\n  for (const auto& query_param : proto_config.ignored_query_parameters()) {\n    ignored_query_parameters.insert(query_param);\n  }\n\n  for (const auto& service_name : proto_config.services()) {\n    auto service = descriptor_pool_.FindServiceByName(service_name);\n    if (service == nullptr) {\n      throw EnvoyException(\"transcoding_filter: Could not find '\" + service_name +\n                           \"' in the proto descriptor\");\n    }\n    for (int i = 0; i < service->method_count(); ++i) {\n      auto method = service->method(i);\n\n      HttpRule http_rule;\n      if (method->options().HasExtension(google::api::http)) {\n        http_rule = method->options().GetExtension(google::api::http);\n      } else if (proto_config.auto_mapping()) {\n        auto post = \"/\" + service->full_name() + \"/\" + method->name();\n        http_rule.set_post(post);\n        http_rule.set_body(\"*\");\n      }\n\n      MethodInfoSharedPtr method_info;\n      Status status = createMethodInfo(method, http_rule, method_info);\n      if (!status.ok()) {\n        throw EnvoyException(\"transcoding_filter: Cannot register '\" + method->full_name() +\n                             \"': \" + status.message().ToString());\n      }\n\n      if (!PathMatcherUtility::RegisterByHttpRule(pmb, http_rule, ignored_query_parameters,\n                                                  method_info)) {\n        throw EnvoyException(\"transcoding_filter: Cannot register '\" + method->full_name() +\n                             \"' to path matcher\");\n      }\n    }\n  }\n\n  path_matcher_ = pmb.Build();\n\n  const auto& print_config = proto_config.print_options();\n  print_options_.add_whitespace = print_config.add_whitespace();\n  print_options_.always_print_primitive_fields = print_config.always_print_primitive_fields();\n  print_options_.always_print_enums_as_ints = print_config.always_print_enums_as_ints();\n  print_options_.preserve_proto_field_names = print_config.preserve_proto_field_names();\n\n  match_incoming_request_route_ = proto_config.match_incoming_request_route();\n  ignore_unknown_query_parameters_ = proto_config.ignore_unknown_query_parameters();\n}\n\nvoid JsonTranscoderConfig::addFileDescriptor(const Protobuf::FileDescriptorProto& file) {\n  if (descriptor_pool_.BuildFile(file) == nullptr) {\n    throw EnvoyException(\"transcoding_filter: Unable to build proto descriptor pool\");\n  }\n}\n\nvoid JsonTranscoderConfig::addBuiltinSymbolDescriptor(const std::string& symbol_name) {\n  if (descriptor_pool_.FindFileContainingSymbol(symbol_name) != nullptr) {\n    return;\n  }\n\n  auto* builtin_pool = Protobuf::DescriptorPool::generated_pool();\n  if (!builtin_pool) {\n    return;\n  }\n\n  Protobuf::DescriptorPoolDatabase pool_database(*builtin_pool);\n  Protobuf::FileDescriptorProto file_proto;\n  pool_database.FindFileContainingSymbol(symbol_name, &file_proto);\n  addFileDescriptor(file_proto);\n}\n\nStatus JsonTranscoderConfig::resolveField(const Protobuf::Descriptor* descriptor,\n                                          const std::string& field_path_str,\n                                          std::vector<const Protobuf::Field*>* field_path,\n                                          bool* is_http_body) {\n  const Protobuf::Type* message_type =\n      type_helper_->Info()->GetTypeByTypeUrl(Grpc::Common::typeUrl(descriptor->full_name()));\n  if (message_type == nullptr) {\n    return ProtobufUtil::Status(Code::NOT_FOUND,\n                                \"Could not resolve type: \" + descriptor->full_name());\n  }\n\n  Status status = type_helper_->ResolveFieldPath(\n      *message_type, field_path_str == \"*\" ? \"\" : field_path_str, field_path);\n  if (!status.ok()) {\n    return status;\n  }\n\n  if (field_path->empty()) {\n    *is_http_body = descriptor->full_name() == google::api::HttpBody::descriptor()->full_name();\n  } else {\n    const Protobuf::Type* body_type =\n        type_helper_->Info()->GetTypeByTypeUrl(field_path->back()->type_url());\n    *is_http_body = body_type != nullptr &&\n                    body_type->name() == google::api::HttpBody::descriptor()->full_name();\n  }\n  return Status::OK;\n}\n\nStatus JsonTranscoderConfig::createMethodInfo(const Protobuf::MethodDescriptor* descriptor,\n                                              const HttpRule& http_rule,\n                                              MethodInfoSharedPtr& method_info) {\n  method_info = std::make_shared<MethodInfo>();\n  method_info->descriptor_ = descriptor;\n\n  Status status =\n      resolveField(descriptor->input_type(), http_rule.body(),\n                   &method_info->request_body_field_path, &method_info->request_type_is_http_body_);\n  if (!status.ok()) {\n    return status;\n  }\n\n  status = resolveField(descriptor->output_type(), http_rule.response_body(),\n                        &method_info->response_body_field_path,\n                        &method_info->response_type_is_http_body_);\n  if (!status.ok()) {\n    return status;\n  }\n\n  if (!method_info->response_body_field_path.empty() && !method_info->response_type_is_http_body_) {\n    // TODO(euroelessar): Implement https://github.com/envoyproxy/envoy/issues/11136.\n    return Status(Code::UNIMPLEMENTED,\n                  \"Setting \\\"response_body\\\" is not supported yet for non-HttpBody fields: \" +\n                      descriptor->full_name());\n  }\n\n  return Status::OK;\n}\n\nbool JsonTranscoderConfig::matchIncomingRequestInfo() const {\n  return match_incoming_request_route_;\n}\n\nbool JsonTranscoderConfig::convertGrpcStatus() const { return convert_grpc_status_; }\n\nProtobufUtil::Status JsonTranscoderConfig::createTranscoder(\n    const Http::RequestHeaderMap& headers, ZeroCopyInputStream& request_input,\n    google::grpc::transcoding::TranscoderInputStream& response_input, TranscoderPtr& transcoder,\n    MethodInfoSharedPtr& method_info) {\n  if (Grpc::Common::isGrpcRequestHeaders(headers)) {\n    return ProtobufUtil::Status(Code::INVALID_ARGUMENT,\n                                \"Request headers has application/grpc content-type\");\n  }\n  const std::string method(headers.getMethodValue());\n  std::string path(headers.getPathValue());\n  std::string args;\n\n  const size_t pos = path.find('?');\n  if (pos != std::string::npos) {\n    args = path.substr(pos + 1);\n    path = path.substr(0, pos);\n  }\n\n  struct RequestInfo request_info;\n  std::vector<VariableBinding> variable_bindings;\n  method_info =\n      path_matcher_->Lookup(method, path, args, &variable_bindings, &request_info.body_field_path);\n  if (!method_info) {\n    return ProtobufUtil::Status(Code::NOT_FOUND, \"Could not resolve \" + path + \" to a method\");\n  }\n\n  auto status = methodToRequestInfo(method_info, &request_info);\n  if (!status.ok()) {\n    return status;\n  }\n\n  for (const auto& binding : variable_bindings) {\n    google::grpc::transcoding::RequestWeaver::BindingInfo resolved_binding;\n    status = type_helper_->ResolveFieldPath(*request_info.message_type, binding.field_path,\n                                            &resolved_binding.field_path);\n    if (!status.ok()) {\n      if (ignore_unknown_query_parameters_) {\n        continue;\n      }\n      return status;\n    }\n\n    resolved_binding.value = binding.value;\n\n    request_info.variable_bindings.emplace_back(std::move(resolved_binding));\n  }\n\n  RequestMessageTranslatorPtr request_translator;\n  JsonRequestTranslatorPtr json_request_translator;\n  if (method_info->request_type_is_http_body_) {\n    request_translator = std::make_unique<RequestMessageTranslator>(*type_helper_->Resolver(),\n                                                                    false, std::move(request_info));\n    request_translator->Input().StartObject(nullptr)->EndObject();\n  } else {\n    json_request_translator = std::make_unique<JsonRequestTranslator>(\n        type_helper_->Resolver(), &request_input, std::move(request_info),\n        method_info->descriptor_->client_streaming(), true);\n  }\n\n  const auto response_type_url =\n      Grpc::Common::typeUrl(method_info->descriptor_->output_type()->full_name());\n  ResponseToJsonTranslatorPtr response_translator{new ResponseToJsonTranslator(\n      type_helper_->Resolver(), response_type_url, method_info->descriptor_->server_streaming(),\n      &response_input, print_options_)};\n\n  transcoder = std::make_unique<TranscoderImpl>(std::move(request_translator),\n                                                std::move(json_request_translator),\n                                                std::move(response_translator));\n  return ProtobufUtil::Status();\n}\n\nProtobufUtil::Status\nJsonTranscoderConfig::methodToRequestInfo(const MethodInfoSharedPtr& method_info,\n                                          google::grpc::transcoding::RequestInfo* info) {\n  const std::string& request_type_full_name = method_info->descriptor_->input_type()->full_name();\n  auto request_type_url = Grpc::Common::typeUrl(request_type_full_name);\n  info->message_type = type_helper_->Info()->GetTypeByTypeUrl(request_type_url);\n  if (info->message_type == nullptr) {\n    ENVOY_LOG(debug, \"Cannot resolve input-type: {}\", request_type_full_name);\n    return ProtobufUtil::Status(Code::NOT_FOUND,\n                                \"Could not resolve type: \" + request_type_full_name);\n  }\n\n  return ProtobufUtil::Status();\n}\n\nProtobufUtil::Status\nJsonTranscoderConfig::translateProtoMessageToJson(const Protobuf::Message& message,\n                                                  std::string* json_out) {\n  return ProtobufUtil::BinaryToJsonString(\n      type_helper_->Resolver(), Grpc::Common::typeUrl(message.GetDescriptor()->full_name()),\n      message.SerializeAsString(), json_out, print_options_);\n}\n\nJsonTranscoderFilter::JsonTranscoderFilter(JsonTranscoderConfig& config) : config_(config) {}\n\nHttp::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                              bool end_stream) {\n  const auto status =\n      config_.createTranscoder(headers, request_in_, response_in_, transcoder_, method_);\n\n  if (!status.ok()) {\n    // If transcoder couldn't be created, it might be a normal gRPC request, so the filter will\n    // just pass-through the request to upstream.\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (method_->request_type_is_http_body_) {\n    if (headers.ContentType() != nullptr) {\n      absl::string_view content_type = headers.getContentTypeValue();\n      content_type_.assign(content_type.begin(), content_type.end());\n    }\n\n    bool done = !readToBuffer(*transcoder_->RequestOutput(), initial_request_data_);\n    if (!done) {\n      ENVOY_LOG(\n          debug,\n          \"Transcoding of query arguments of HttpBody request is not done (unexpected state)\");\n      error_ = true;\n      decoder_callbacks_->sendLocalReply(\n          Http::Code::BadRequest, \"Bad request\", nullptr, absl::nullopt,\n          absl::StrCat(RcDetails::get().GrpcTranscodeFailedEarly, \"{BAD_REQUEST}\"));\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    if (checkIfTranscoderFailed(RcDetails::get().GrpcTranscodeFailed)) {\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n  }\n\n  headers.removeContentLength();\n  headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc);\n  headers.setEnvoyOriginalPath(headers.getPathValue());\n  headers.addReferenceKey(Http::Headers::get().EnvoyOriginalMethod, headers.getMethodValue());\n  headers.setPath(\"/\" + method_->descriptor_->service()->full_name() + \"/\" +\n                  method_->descriptor_->name());\n  headers.setReferenceMethod(Http::Headers::get().MethodValues.Post);\n  headers.setReferenceTE(Http::Headers::get().TEValues.Trailers);\n\n  if (!config_.matchIncomingRequestInfo()) {\n    decoder_callbacks_->clearRouteCache();\n  }\n\n  if (end_stream && method_->request_type_is_http_body_) {\n    maybeSendHttpBodyRequestMessage();\n  } else if (end_stream) {\n    request_in_.finish();\n\n    if (checkIfTranscoderFailed(RcDetails::get().GrpcTranscodeFailedEarly)) {\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n\n    Buffer::OwnedImpl data;\n    readToBuffer(*transcoder_->RequestOutput(), data);\n\n    if (data.length() > 0) {\n      decoder_callbacks_->addDecodedData(data, true);\n    }\n  }\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus JsonTranscoderFilter::decodeData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!error_);\n\n  if (!transcoder_) {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  if (method_->request_type_is_http_body_) {\n    request_data_.move(data);\n    // TODO(euroelessar): Upper bound message size for streaming case.\n    if (end_stream || method_->descriptor_->client_streaming()) {\n      maybeSendHttpBodyRequestMessage();\n    } else {\n      // TODO(euroelessar): Avoid buffering if content length is already known.\n      return Http::FilterDataStatus::StopIterationAndBuffer;\n    }\n  } else {\n    request_in_.move(data);\n\n    if (end_stream) {\n      request_in_.finish();\n    }\n\n    readToBuffer(*transcoder_->RequestOutput(), data);\n  }\n\n  if (checkIfTranscoderFailed(RcDetails::get().GrpcTranscodeFailed)) {\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus JsonTranscoderFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  ASSERT(!error_);\n\n  if (!transcoder_) {\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  if (method_->request_type_is_http_body_) {\n    maybeSendHttpBodyRequestMessage();\n  } else {\n    request_in_.finish();\n\n    Buffer::OwnedImpl data;\n    readToBuffer(*transcoder_->RequestOutput(), data);\n\n    if (data.length()) {\n      decoder_callbacks_->addDecodedData(data, true);\n    }\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid JsonTranscoderFilter::setDecoderFilterCallbacks(\n    Http::StreamDecoderFilterCallbacks& callbacks) {\n  decoder_callbacks_ = &callbacks;\n}\n\nHttp::FilterHeadersStatus JsonTranscoderFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                              bool end_stream) {\n  if (!Grpc::Common::isGrpcResponseHeaders(headers, end_stream)) {\n    error_ = true;\n  }\n\n  if (error_ || !transcoder_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  response_headers_ = &headers;\n\n  if (end_stream) {\n    if (method_->descriptor_->server_streaming()) {\n      // When there is no body in a streaming response, a empty JSON array is\n      // returned by default. Set the content type correctly.\n      headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n    }\n\n    // In gRPC wire protocol, headers frame with end_stream is a trailers-only response.\n    // The return value from encodeTrailers is ignored since it is always continue.\n    doTrailers(headers);\n\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n\n  // In case of HttpBody in response - content type is unknown at this moment.\n  // So \"Continue\" only for regular streaming use case and StopIteration for\n  // all other cases (non streaming, streaming + httpBody)\n  if (method_->descriptor_->server_streaming() && !method_->response_type_is_http_body_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nHttp::FilterDataStatus JsonTranscoderFilter::encodeData(Buffer::Instance& data, bool end_stream) {\n  if (error_ || !transcoder_) {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  has_body_ = true;\n\n  if (method_->response_type_is_http_body_) {\n    bool frame_processed = buildResponseFromHttpBodyOutput(*response_headers_, data);\n    if (!method_->descriptor_->server_streaming()) {\n      return Http::FilterDataStatus::StopIterationAndBuffer;\n    }\n    if (!http_body_response_headers_set_ && !frame_processed) {\n      return Http::FilterDataStatus::StopIterationAndBuffer;\n    }\n    return Http::FilterDataStatus::Continue;\n  }\n\n  response_in_.move(data);\n\n  if (end_stream) {\n    response_in_.finish();\n  }\n\n  readToBuffer(*transcoder_->ResponseOutput(), data);\n\n  if (!method_->descriptor_->server_streaming() && !end_stream) {\n    // Buffer until the response is complete.\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n  // TODO(lizan): Check ResponseStatus\n\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus\nJsonTranscoderFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  doTrailers(trailers);\n\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid JsonTranscoderFilter::doTrailers(Http::ResponseHeaderOrTrailerMap& headers_or_trailers) {\n  if (error_ || !transcoder_) {\n    return;\n  }\n\n  response_in_.finish();\n\n  const absl::optional<Grpc::Status::GrpcStatus> grpc_status =\n      Grpc::Common::getGrpcStatus(headers_or_trailers, true);\n  if (grpc_status && maybeConvertGrpcStatus(*grpc_status, headers_or_trailers)) {\n    return;\n  }\n\n  if (!method_->response_type_is_http_body_) {\n    Buffer::OwnedImpl data;\n    readToBuffer(*transcoder_->ResponseOutput(), data);\n    if (data.length()) {\n      encoder_callbacks_->addEncodedData(data, true);\n    }\n  }\n\n  // If there was no previous headers frame, this |trailers| map is our |response_headers_|,\n  // so there is no need to copy headers from one to the other.\n  const bool is_trailers_only_response = response_headers_ == &headers_or_trailers;\n  const bool is_server_streaming = method_->descriptor_->server_streaming();\n\n  if (is_server_streaming && !is_trailers_only_response) {\n    // Continue if headers were sent already.\n    return;\n  }\n\n  if (!grpc_status || grpc_status.value() == Grpc::Status::WellKnownGrpcStatus::InvalidCode) {\n    response_headers_->setStatus(enumToInt(Http::Code::ServiceUnavailable));\n  } else {\n    response_headers_->setStatus(Grpc::Utility::grpcToHttpStatus(grpc_status.value()));\n    if (!is_trailers_only_response) {\n      response_headers_->setGrpcStatus(grpc_status.value());\n    }\n  }\n\n  if (!is_trailers_only_response) {\n    // Copy the grpc-message header if it exists.\n    const Http::HeaderEntry* grpc_message_header = headers_or_trailers.GrpcMessage();\n    if (grpc_message_header) {\n      response_headers_->setGrpcMessage(grpc_message_header->value().getStringView());\n    }\n  }\n\n  // remove Trailer headers if the client connection was http/1\n  if (encoder_callbacks_->streamInfo().protocol() < Http::Protocol::Http2) {\n    response_headers_->remove(trailerHeader());\n  }\n\n  if (!method_->descriptor_->server_streaming()) {\n    // Set content-length for non-streaming responses.\n    response_headers_->setContentLength(\n        encoder_callbacks_->encodingBuffer() ? encoder_callbacks_->encodingBuffer()->length() : 0);\n  }\n}\n\nvoid JsonTranscoderFilter::setEncoderFilterCallbacks(\n    Http::StreamEncoderFilterCallbacks& callbacks) {\n  encoder_callbacks_ = &callbacks;\n}\n\nbool JsonTranscoderFilter::checkIfTranscoderFailed(const std::string& details) {\n  const auto& request_status = transcoder_->RequestStatus();\n  if (!request_status.ok()) {\n    ENVOY_LOG(debug, \"Transcoding request error {}\", request_status.ToString());\n    error_ = true;\n    decoder_callbacks_->sendLocalReply(\n        Http::Code::BadRequest,\n        absl::string_view(request_status.error_message().data(),\n                          request_status.error_message().size()),\n        nullptr, absl::nullopt,\n        absl::StrCat(details, \"{\", MessageUtil::CodeEnumToString(request_status.code()), \"}\"));\n\n    return true;\n  }\n  return false;\n}\n\n// TODO(lizan): Incorporate watermarks to bound buffer sizes\nbool JsonTranscoderFilter::readToBuffer(Protobuf::io::ZeroCopyInputStream& stream,\n                                        Buffer::Instance& data) {\n  const void* out;\n  int size;\n  while (stream.Next(&out, &size)) {\n    data.add(out, size);\n\n    if (size == 0) {\n      return true;\n    }\n  }\n  return false;\n}\n\nvoid JsonTranscoderFilter::maybeSendHttpBodyRequestMessage() {\n  if (first_request_sent_ && request_data_.length() == 0) {\n    return;\n  }\n\n  Buffer::OwnedImpl message_payload;\n  message_payload.move(initial_request_data_);\n  HttpBodyUtils::appendHttpBodyEnvelope(message_payload, method_->request_body_field_path,\n                                        std::move(content_type_), request_data_.length());\n  content_type_.clear();\n  message_payload.move(request_data_);\n\n  Envoy::Grpc::Encoder().prependFrameHeader(Envoy::Grpc::GRPC_FH_DEFAULT, message_payload);\n\n  decoder_callbacks_->addDecodedData(message_payload, true);\n\n  first_request_sent_ = true;\n}\n\nbool JsonTranscoderFilter::buildResponseFromHttpBodyOutput(\n    Http::ResponseHeaderMap& response_headers, Buffer::Instance& data) {\n  std::vector<Grpc::Frame> frames;\n  decoder_.decode(data, frames);\n  if (frames.empty()) {\n    return false;\n  }\n\n  google::api::HttpBody http_body;\n  for (auto& frame : frames) {\n    if (frame.length_ > 0) {\n      http_body.Clear();\n      Buffer::ZeroCopyInputStreamImpl stream(std::move(frame.data_));\n      if (!HttpBodyUtils::parseMessageByFieldPath(&stream, method_->response_body_field_path,\n                                                  &http_body)) {\n        // TODO(euroelessar): Return error to client.\n        encoder_callbacks_->resetStream();\n        return true;\n      }\n      const auto& body = http_body.data();\n\n      data.add(body);\n\n      if (!method_->descriptor_->server_streaming()) {\n        // Non streaming case: single message with content type / length\n        response_headers.setContentType(http_body.content_type());\n        response_headers.setContentLength(body.size());\n        return true;\n      } else if (!http_body_response_headers_set_) {\n        // Streaming case: set content type only once from first HttpBody message\n        response_headers.setContentType(http_body.content_type());\n        http_body_response_headers_set_ = true;\n      }\n    }\n  }\n\n  return true;\n}\n\nbool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status,\n                                                  Http::ResponseHeaderOrTrailerMap& trailers) {\n  if (!config_.convertGrpcStatus()) {\n    return false;\n  }\n\n  // Send a serialized status only if there was no body.\n  if (has_body_) {\n    return false;\n  }\n\n  if (grpc_status == Grpc::Status::WellKnownGrpcStatus::Ok ||\n      grpc_status == Grpc::Status::WellKnownGrpcStatus::InvalidCode) {\n    return false;\n  }\n\n  // TODO(mattklein123): The dynamic cast here is needed because ResponseHeaderOrTrailerMap is not\n  // a header map. This can likely be cleaned up.\n  auto status_details =\n      Grpc::Common::getGrpcStatusDetailsBin(dynamic_cast<Http::HeaderMap&>(trailers));\n  if (!status_details) {\n    // If no rpc.Status object was sent in the grpc-status-details-bin header,\n    // construct it from the grpc-status and grpc-message headers.\n    status_details.emplace();\n    status_details->set_code(grpc_status);\n\n    auto grpc_message_header = trailers.GrpcMessage();\n    if (grpc_message_header) {\n      auto message = grpc_message_header->value().getStringView();\n      status_details->set_message(message.data(), message.size());\n    }\n  }\n\n  std::string json_status;\n  auto translate_status = config_.translateProtoMessageToJson(*status_details, &json_status);\n  if (!translate_status.ok()) {\n    ENVOY_LOG(debug, \"Transcoding status error {}\", translate_status.ToString());\n    return false;\n  }\n\n  response_headers_->setStatus(Grpc::Utility::grpcToHttpStatus(grpc_status));\n\n  bool is_trailers_only_response = response_headers_ == &trailers;\n  if (is_trailers_only_response) {\n    // Drop the gRPC status headers, we already have them in the JSON body.\n    response_headers_->removeGrpcStatus();\n    response_headers_->removeGrpcMessage();\n    response_headers_->remove(Http::Headers::get().GrpcStatusDetailsBin);\n  }\n\n  // remove Trailer headers if the client connection was http/1\n  if (encoder_callbacks_->streamInfo().protocol() < Http::Protocol::Http2) {\n    response_headers_->remove(trailerHeader());\n  }\n\n  response_headers_->setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n\n  response_headers_->setContentLength(json_status.length());\n\n  Buffer::OwnedImpl status_data(json_status);\n  encoder_callbacks_->addEncodedData(status_data, false);\n  return true;\n}\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/http/grpc_json_transcoder/transcoder_input_stream_impl.h\"\n\n#include \"grpc_transcoding/path_matcher.h\"\n#include \"grpc_transcoding/request_message_translator.h\"\n#include \"grpc_transcoding/transcoder.h\"\n#include \"grpc_transcoding/type_helper.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\n/**\n * VariableBinding specifies a value for a single field in the request message.\n * When transcoding HTTP/REST/JSON to gRPC/proto the request message is\n * constructed using the HTTP body and the variable bindings (specified through\n * request url).\n * See https://github.com/googleapis/googleapis/blob/master/google/api/http.proto\n * for details of variable binding.\n */\nstruct VariableBinding {\n  // The location of the field in the protobuf message, where the value\n  // needs to be inserted, e.g. \"shelf.theme\" would mean the \"theme\" field\n  // of the nested \"shelf\" message of the request protobuf message.\n  std::vector<std::string> field_path;\n  // The value to be inserted.\n  std::string value;\n};\n\nstruct MethodInfo {\n  const Protobuf::MethodDescriptor* descriptor_ = nullptr;\n  std::vector<const Protobuf::Field*> request_body_field_path;\n  std::vector<const Protobuf::Field*> response_body_field_path;\n  bool request_type_is_http_body_ = false;\n  bool response_type_is_http_body_ = false;\n};\nusing MethodInfoSharedPtr = std::shared_ptr<MethodInfo>;\n\nvoid createHttpBodyEnvelope(Buffer::Instance& output,\n                            const std::vector<const Protobuf::Field*>& request_body_field_path,\n                            std::string content_type, uint64_t content_length);\n\n/**\n * Global configuration for the gRPC JSON transcoder filter. Factory for the Transcoder interface.\n */\nclass JsonTranscoderConfig : public Logger::Loggable<Logger::Id::config> {\npublic:\n  /**\n   * constructor that loads protobuf descriptors from the file specified in the JSON config.\n   * and construct a path matcher for HTTP path bindings.\n   */\n  JsonTranscoderConfig(\n      const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder&\n          proto_config,\n      Api::Api& api);\n\n  /**\n   * Create an instance of Transcoder interface based on incoming request\n   * @param headers headers received from decoder\n   * @param request_input a ZeroCopyInputStream reading from downstream request body\n   * @param response_input a TranscoderInputStream reading from upstream response body\n   * @param transcoder output parameter for the instance of Transcoder interface\n   * @param method_descriptor output parameter for the method looked up from config\n   * @return status whether the Transcoder instance are successfully created or not\n   */\n  ProtobufUtil::Status\n  createTranscoder(const Http::RequestHeaderMap& headers,\n                   Protobuf::io::ZeroCopyInputStream& request_input,\n                   google::grpc::transcoding::TranscoderInputStream& response_input,\n                   std::unique_ptr<google::grpc::transcoding::Transcoder>& transcoder,\n                   MethodInfoSharedPtr& method_info);\n\n  /**\n   * Converts an arbitrary protobuf message to JSON.\n   */\n  ProtobufUtil::Status translateProtoMessageToJson(const Protobuf::Message& message,\n                                                   std::string* json_out);\n\n  /**\n   * If true, skip clearing the route cache after the incoming request has been modified.\n   * This allows Envoy to select the upstream cluster based on the incoming request\n   * rather than the outgoing.\n   */\n  bool matchIncomingRequestInfo() const;\n\n  /**\n   * If true, when trailer indicates a gRPC error and there was no HTTP body,\n   * make google.rpc.Status out of gRPC status headers and use it as JSON body.\n   */\n  bool convertGrpcStatus() const;\n\nprivate:\n  /**\n   * Convert method descriptor to RequestInfo that needed for transcoding library\n   */\n  ProtobufUtil::Status methodToRequestInfo(const MethodInfoSharedPtr& method_info,\n                                           google::grpc::transcoding::RequestInfo* info);\n\nprivate:\n  void addFileDescriptor(const Protobuf::FileDescriptorProto& file);\n  void addBuiltinSymbolDescriptor(const std::string& symbol_name);\n  ProtobufUtil::Status resolveField(const Protobuf::Descriptor* descriptor,\n                                    const std::string& field_path_str,\n                                    std::vector<const Protobuf::Field*>* field_path,\n                                    bool* is_http_body);\n  ProtobufUtil::Status createMethodInfo(const Protobuf::MethodDescriptor* descriptor,\n                                        const google::api::HttpRule& http_rule,\n                                        MethodInfoSharedPtr& method_info);\n\n  Protobuf::DescriptorPool descriptor_pool_;\n  google::grpc::transcoding::PathMatcherPtr<MethodInfoSharedPtr> path_matcher_;\n  std::unique_ptr<google::grpc::transcoding::TypeHelper> type_helper_;\n  Protobuf::util::JsonPrintOptions print_options_;\n\n  bool match_incoming_request_route_{false};\n  bool ignore_unknown_query_parameters_{false};\n  bool convert_grpc_status_{false};\n};\n\nusing JsonTranscoderConfigSharedPtr = std::shared_ptr<JsonTranscoderConfig>;\n\n/**\n * The filter instance for gRPC JSON transcoder.\n */\nclass JsonTranscoderFilter : public Http::StreamFilter, public Logger::Loggable<Logger::Id::http2> {\npublic:\n  JsonTranscoderFilter(JsonTranscoderConfig& config);\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override;\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\nprivate:\n  bool checkIfTranscoderFailed(const std::string& details);\n  bool readToBuffer(Protobuf::io::ZeroCopyInputStream& stream, Buffer::Instance& data);\n  void maybeSendHttpBodyRequestMessage();\n  /**\n   * Builds response from HttpBody protobuf.\n   * Returns true if at least one gRPC frame has processed.\n   */\n  bool buildResponseFromHttpBodyOutput(Http::ResponseHeaderMap& response_headers,\n                                       Buffer::Instance& data);\n  bool maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status,\n                              Http::ResponseHeaderOrTrailerMap& trailers);\n  bool hasHttpBodyAsOutputType();\n  void doTrailers(Http::ResponseHeaderOrTrailerMap& headers_or_trailers);\n\n  JsonTranscoderConfig& config_;\n  std::unique_ptr<google::grpc::transcoding::Transcoder> transcoder_;\n  TranscoderInputStreamImpl request_in_;\n  TranscoderInputStreamImpl response_in_;\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{nullptr};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{nullptr};\n  MethodInfoSharedPtr method_;\n  Http::ResponseHeaderMap* response_headers_{nullptr};\n  Grpc::Decoder decoder_;\n\n  // Data of the initial request message, initialized from query arguments, path, etc.\n  Buffer::OwnedImpl initial_request_data_;\n  Buffer::OwnedImpl request_data_;\n  bool first_request_sent_{false};\n  std::string content_type_;\n\n  bool error_{false};\n  bool has_body_{false};\n  bool http_body_response_headers_set_{false};\n};\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/transcoder_input_stream_impl.cc",
    "content": "#include \"extensions/filters/http/grpc_json_transcoder/transcoder_input_stream_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\nint64_t TranscoderInputStreamImpl::BytesAvailable() const { return buffer_->length() - position_; }\n\nbool TranscoderInputStreamImpl::Finished() const { return finished_; }\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_json_transcoder/transcoder_input_stream_impl.h",
    "content": "#pragma once\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n\n#include \"grpc_transcoding/transcoder_input_stream.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\n\nclass TranscoderInputStreamImpl : public Buffer::ZeroCopyInputStreamImpl,\n                                  public google::grpc::transcoding::TranscoderInputStream {\npublic:\n  // TranscoderInputStream\n  int64_t BytesAvailable() const override;\n  bool Finished() const override;\n};\n\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_stats/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter that implements gRPC telemetry\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"grpc_stats_filter.cc\"],\n    hdrs = [\"grpc_stats_filter.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_stats/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc",
    "content": "#include \"extensions/filters/http/grpc_stats/grpc_stats_filter.h\"\n\n#include \"envoy/extensions/filters/http/grpc_stats/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_stats/v3/config.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcStats {\n\nnamespace {\n\n// A map from gRPC service/method name to symbolized stat names for the service/method.\n//\n// The expected usage pattern is that the map is populated once, and can then be queried lock-free\n// as long as it isn't being modified.\nclass GrpcServiceMethodToRequestNamesMap {\npublic:\npublic:\n  // Construct a map populated with the services/methods in method_list.\n  GrpcServiceMethodToRequestNamesMap(Stats::SymbolTable& symbol_table,\n                                     const envoy::config::core::v3::GrpcMethodList& method_list)\n      : stat_name_pool_(symbol_table), map_(populate(method_list)) {}\n\n  absl::optional<Grpc::Context::RequestStatNames>\n  lookup(const Grpc::Common::RequestNames& request_names) const {\n    auto it = map_.find(request_names);\n    if (it != map_.end()) {\n      return it->second;\n    }\n\n    return {};\n  }\n\nprivate:\n  using OwningKey = std::tuple<std::string, std::string>;\n  using ViewKey = Grpc::Common::RequestNames;\n\n  class MapHash {\n  private:\n    // Use the same type for hashing all variations to ensure the same hash value from all source\n    // types.\n    using ViewTuple = std::tuple<absl::string_view, absl::string_view>;\n    static uint64_t hash(const ViewTuple& key) { return absl::Hash<ViewTuple>()(key); }\n\n  public:\n    using is_transparent = void;\n\n    uint64_t operator()(const OwningKey& key) const { return hash(key); }\n    uint64_t operator()(const ViewKey& key) const {\n      return hash(ViewTuple(key.service_, key.method_));\n    }\n  };\n\n  struct MapEq {\n    using is_transparent = void;\n    bool operator()(const OwningKey& left, const OwningKey& right) const { return left == right; }\n    bool operator()(const OwningKey& left, const ViewKey& right) const {\n      return left == std::make_tuple(right.service_, right.method_);\n    }\n  };\n  using MapType = absl::flat_hash_map<OwningKey, Grpc::Context::RequestStatNames, MapHash, MapEq>;\n\n  // Helper for generating a populated MapType so that `map_` can be const.\n  MapType populate(const envoy::config::core::v3::GrpcMethodList& method_list) {\n    MapType map;\n    for (const auto& service : method_list.services()) {\n      Stats::StatName stat_name_service = stat_name_pool_.add(service.name());\n\n      for (const auto& method_name : service.method_names()) {\n        Stats::StatName stat_name_method = stat_name_pool_.add(method_name);\n        map[OwningKey(service.name(), method_name)] =\n            Grpc::Context::RequestStatNames{stat_name_service, stat_name_method};\n      }\n    }\n    return map;\n  }\n\n  Stats::StatNamePool stat_name_pool_;\n  const MapType map_;\n};\n\nstruct Config {\n  Config(const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& proto_config,\n         Server::Configuration::FactoryContext& context)\n      : context_(context.grpcContext()), emit_filter_state_(proto_config.emit_filter_state()),\n        enable_upstream_stats_(proto_config.enable_upstream_stats()) {\n\n    switch (proto_config.per_method_stat_specifier_case()) {\n    case envoy::extensions::filters::http::grpc_stats::v3::FilterConfig::\n        PER_METHOD_STAT_SPECIFIER_NOT_SET:\n    case envoy::extensions::filters::http::grpc_stats::v3::FilterConfig::kStatsForAllMethods:\n      if (proto_config.has_stats_for_all_methods()) {\n        stats_for_all_methods_ = proto_config.stats_for_all_methods().value();\n      } else {\n        // Default for when \"grpc_stats_filter_enable_stats_for_all_methods_by_default\" isn't\n        // set.\n        //\n        // This will flip to false after one release.\n        const bool runtime_feature_default = true;\n\n        const char* runtime_key = \"envoy.deprecated_features.grpc_stats_filter_enable_\"\n                                  \"stats_for_all_methods_by_default\";\n\n        stats_for_all_methods_ = context.runtime().snapshot().deprecatedFeatureEnabled(\n            runtime_key, runtime_feature_default);\n\n        if (stats_for_all_methods_) {\n          ENVOY_LOG_MISC(warn,\n                         \"Using deprecated default value for \"\n                         \"'envoy.extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_\"\n                         \"methods'. The default for this field will become false in a future \"\n                         \"release. To retain this behavior, set this field to true in your \"\n                         \"configuration. A short-term workaround of setting runtime configuration \"\n                         \"{} to true can be used if the configuration cannot be changed.\",\n                         runtime_key);\n        }\n      }\n      break;\n\n    case envoy::extensions::filters::http::grpc_stats::v3::FilterConfig::\n        kIndividualMethodStatsAllowlist:\n      allowlist_.emplace(context.scope().symbolTable(),\n                         proto_config.individual_method_stats_allowlist());\n      break;\n    }\n  }\n  Grpc::Context& context_;\n  const bool emit_filter_state_;\n  const bool enable_upstream_stats_;\n  bool stats_for_all_methods_{false};\n  absl::optional<GrpcServiceMethodToRequestNamesMap> allowlist_;\n};\nusing ConfigConstSharedPtr = std::shared_ptr<const Config>;\n\nclass GrpcStatsFilter : public Http::PassThroughFilter {\npublic:\n  GrpcStatsFilter(ConfigConstSharedPtr config) : config_(config) {}\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override {\n    grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers);\n    if (grpc_request_) {\n      cluster_ = decoder_callbacks_->clusterInfo();\n      if (cluster_) {\n        if (config_->stats_for_all_methods_) {\n          // Get dynamically-allocated Context::RequestStatNames from the context.\n          request_names_ = config_->context_.resolveDynamicServiceAndMethod(headers.Path());\n          do_stat_tracking_ = request_names_.has_value();\n        } else {\n          // This case handles both proto_config.stats_for_all_methods() == false,\n          // and proto_config.has_individual_method_stats_allowlist(). This works\n          // because proto_config.stats_for_all_methods() == false results in\n          // an empty allowlist, which exactly matches the behavior specified for\n          // this configuration.\n          //\n          // Resolve the service and method to a string_view, then get\n          // the Context::RequestStatNames out of the pre-allocated list that\n          // can be produced with the allowlist being present.\n          absl::optional<Grpc::Common::RequestNames> request_names =\n              Grpc::Common::resolveServiceAndMethod(headers.Path());\n\n          if (request_names) {\n            // Do stat tracking as long as this looks like a grpc service/method,\n            // even if it isn't in the allowlist. Things not in the allowlist\n            // are counted with a stat with no service/method in the name.\n            do_stat_tracking_ = true;\n\n            // If the entry is not found in the allowlist, this will return\n            // an empty optional; each of the `charge` functions on the context\n            // will interpret an empty optional for this value to mean that the\n            // service.method prefix on the stat should be omitted.\n            if (config_->allowlist_) {\n              request_names_ = config_->allowlist_->lookup(*request_names);\n            }\n          }\n        }\n      }\n    }\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool) override {\n    if (grpc_request_) {\n      uint64_t delta = request_counter_.inspect(data);\n      if (delta > 0) {\n        maybeWriteFilterState();\n        if (doStatTracking()) {\n          config_->context_.chargeRequestMessageStat(*cluster_, request_names_, delta);\n        }\n      }\n    }\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override {\n    grpc_response_ = Grpc::Common::isGrpcResponseHeaders(headers, end_stream);\n    if (doStatTracking()) {\n      config_->context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, request_names_,\n                                   headers.GrpcStatus());\n      if (end_stream) {\n        maybeChargeUpstreamStat();\n      }\n    }\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool) override {\n    if (grpc_response_) {\n      uint64_t delta = response_counter_.inspect(data);\n      if (delta > 0) {\n        maybeWriteFilterState();\n        if (doStatTracking()) {\n          config_->context_.chargeResponseMessageStat(*cluster_, request_names_, delta);\n        }\n      }\n    }\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override {\n    if (doStatTracking()) {\n      config_->context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, request_names_,\n                                   trailers.GrpcStatus());\n      maybeChargeUpstreamStat();\n    }\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  bool doStatTracking() const { return do_stat_tracking_; }\n\n  void maybeWriteFilterState() {\n    if (!config_->emit_filter_state_) {\n      return;\n    }\n    if (filter_object_ == nullptr) {\n      auto state = std::make_unique<GrpcStatsObject>();\n      filter_object_ = state.get();\n      decoder_callbacks_->streamInfo().filterState()->setData(\n          HttpFilterNames::get().GrpcStats, std::move(state),\n          StreamInfo::FilterState::StateType::Mutable,\n          StreamInfo::FilterState::LifeSpan::FilterChain);\n    }\n    filter_object_->request_message_count = request_counter_.frameCount();\n    filter_object_->response_message_count = response_counter_.frameCount();\n  }\n\n  void maybeChargeUpstreamStat() {\n    if (config_->enable_upstream_stats_ &&\n        decoder_callbacks_->streamInfo().lastUpstreamTxByteSent().has_value() &&\n        decoder_callbacks_->streamInfo().lastUpstreamRxByteReceived().has_value()) {\n      std::chrono::milliseconds chrono_duration =\n          std::chrono::duration_cast<std::chrono::milliseconds>(\n              decoder_callbacks_->streamInfo().lastUpstreamRxByteReceived().value() -\n              decoder_callbacks_->streamInfo().lastUpstreamTxByteSent().value());\n      config_->context_.chargeUpstreamStat(*cluster_, request_names_, chrono_duration);\n    }\n  }\n\nprivate:\n  ConfigConstSharedPtr config_;\n  GrpcStatsObject* filter_object_{};\n  bool do_stat_tracking_{false};\n  bool grpc_request_{false};\n  bool grpc_response_{false};\n  Grpc::FrameInspector request_counter_;\n  Grpc::FrameInspector response_counter_;\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  absl::optional<Grpc::Context::RequestStatNames> request_names_;\n}; // namespace\n\n} // namespace\n\nHttp::FilterFactoryCb GrpcStatsFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& proto_config,\n    const std::string&, Server::Configuration::FactoryContext& factory_context) {\n\n  ConfigConstSharedPtr config = std::make_shared<const Config>(proto_config, factory_context);\n\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) {\n    callbacks.addStreamFilter(std::make_shared<GrpcStatsFilter>(config));\n  };\n}\n\n/**\n * Static registration for the gRPC stats filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(GrpcStatsFilterConfigFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace GrpcStats\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_stats/grpc_stats_filter.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/grpc_stats/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_stats/v3/config.pb.validate.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcStats {\n\n// Filter state exposing the gRPC message counts.\nstruct GrpcStatsObject : public StreamInfo::FilterState::Object {\n  uint64_t request_message_count = 0;\n  uint64_t response_message_count = 0;\n\n  ProtobufTypes::MessagePtr serializeAsProto() const override {\n    auto msg = std::make_unique<envoy::extensions::filters::http::grpc_stats::v3::FilterObject>();\n    msg->set_request_message_count(request_message_count);\n    msg->set_response_message_count(response_message_count);\n    return msg;\n  }\n\n  absl::optional<std::string> serializeAsString() const override {\n    return absl::StrCat(request_message_count, \",\", response_message_count);\n  }\n};\n\nclass GrpcStatsFilterConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::grpc_stats::v3::FilterConfig> {\npublic:\n  GrpcStatsFilterConfigFactory() : FactoryBase(HttpFilterNames::get().GrpcStats) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& proto_config,\n      const std::string&, Server::Configuration::FactoryContext&) override;\n};\n\n} // namespace GrpcStats\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_web/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter that implements the grpc-web protocol (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md)\n# Public docs: docs/root/configuration/http_filters/grpc_web_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"grpc_web_filter_lib\",\n    srcs = [\"grpc_web_filter.cc\"],\n    hdrs = [\"grpc_web_filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/grpc_web:grpc_web_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_web/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_web/config.cc",
    "content": "#include \"extensions/filters/http/grpc_web/config.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/grpc_web/grpc_web_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcWeb {\n\nHttp::FilterFactoryCb GrpcWebFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::grpc_web::v3::GrpcWeb&, const std::string&,\n    Server::Configuration::FactoryContext& factory_context) {\n  return [&factory_context](Http::FilterChainFactoryCallbacks& callbacks) {\n    callbacks.addStreamFilter(std::make_shared<GrpcWebFilter>(factory_context.grpcContext()));\n  };\n}\n\n/**\n * Static registration for the gRPC-Web filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(GrpcWebFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.grpc_web\"};\n\n} // namespace GrpcWeb\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_web/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/grpc_web/v3/grpc_web.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_web/v3/grpc_web.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcWeb {\n\nclass GrpcWebFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::grpc_web::v3::GrpcWeb> {\npublic:\n  GrpcWebFilterConfig() : FactoryBase(HttpFilterNames::get().GrpcWeb) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::grpc_web::v3::GrpcWeb& proto_config,\n      const std::string& stats_prefix,\n      Server::Configuration::FactoryContext& factory_context) override;\n};\n\n} // namespace GrpcWeb\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_web/grpc_web_filter.cc",
    "content": "#include \"extensions/filters/http/grpc_web/grpc_web_filter.h\"\n\n#ifndef WIN32\n#include <arpa/inet.h>\n#endif\n\n#include \"common/common/assert.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcWeb {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    accept_handle(Http::CustomHeaders::get().Accept);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    grpc_accept_encoding_handle(Http::CustomHeaders::get().GrpcAcceptEncoding);\n\nstruct RcDetailsValues {\n  // The grpc web filter couldn't decode the data as the size wasn't a multiple of 4.\n  const std::string GrpcDecodeFailedDueToSize = \"grpc_base_64_decode_failed_bad_size\";\n  // The grpc web filter couldn't decode the data provided.\n  const std::string GrpcDecodeFailedDueToData = \"grpc_base_64_decode_failed\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\n// Bit mask denotes a trailers frame of gRPC-Web.\nconst uint8_t GrpcWebFilter::GRPC_WEB_TRAILER = 0b10000000;\n\n// Supported gRPC-Web content-types.\nconst absl::flat_hash_set<std::string>& GrpcWebFilter::gRpcWebContentTypes() const {\n  static const absl::flat_hash_set<std::string>* types = new absl::flat_hash_set<std::string>(\n      {Http::Headers::get().ContentTypeValues.GrpcWeb,\n       Http::Headers::get().ContentTypeValues.GrpcWebProto,\n       Http::Headers::get().ContentTypeValues.GrpcWebText,\n       Http::Headers::get().ContentTypeValues.GrpcWebTextProto});\n  return *types;\n}\n\nbool GrpcWebFilter::isGrpcWebRequest(const Http::RequestHeaderMap& headers) {\n  if (!headers.Path()) {\n    return false;\n  }\n  const Http::HeaderEntry* content_type = headers.ContentType();\n  if (content_type != nullptr) {\n    return gRpcWebContentTypes().count(content_type->value().getStringView()) > 0;\n  }\n  return false;\n}\n\n// Implements StreamDecoderFilter.\n// TODO(fengli): Implements the subtypes of gRPC-Web content-type other than proto, like +json, etc.\nHttp::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  if (!isGrpcWebRequest(headers)) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  is_grpc_web_request_ = true;\n\n  // Remove content-length header since it represents http1.1 payload size, not the sum of the h2\n  // DATA frame payload lengths. https://http2.github.io/http2-spec/#malformed This effectively\n  // switches to chunked encoding which is the default for h2\n  headers.removeContentLength();\n  setupStatTracking(headers);\n\n  const absl::string_view content_type = headers.getContentTypeValue();\n  if (content_type == Http::Headers::get().ContentTypeValues.GrpcWebText ||\n      content_type == Http::Headers::get().ContentTypeValues.GrpcWebTextProto) {\n    // Checks whether gRPC-Web client is sending base64 encoded request.\n    is_text_request_ = true;\n  }\n  headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc);\n\n  const absl::string_view accept = headers.getInlineValue(accept_handle.handle());\n  if (accept == Http::Headers::get().ContentTypeValues.GrpcWebText ||\n      accept == Http::Headers::get().ContentTypeValues.GrpcWebTextProto) {\n    // Checks whether gRPC-Web client is asking for base64 encoded response.\n    is_text_response_ = true;\n  }\n\n  // Adds te:trailers to upstream HTTP2 request. It's required for gRPC.\n  headers.setReferenceTE(Http::Headers::get().TEValues.Trailers);\n  // Adds grpc-accept-encoding:identity,deflate,gzip. It's required for gRPC.\n  headers.setReferenceInline(grpc_accept_encoding_handle.handle(),\n                             Http::CustomHeaders::get().GrpcAcceptEncodingValues.Default);\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus GrpcWebFilter::decodeData(Buffer::Instance& data, bool end_stream) {\n  if (!is_grpc_web_request_) {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  if (!is_text_request_) {\n    // No additional transcoding required if gRPC client is sending binary request.\n    return Http::FilterDataStatus::Continue;\n  }\n\n  // Parse application/grpc-web-text format.\n  const uint64_t available = data.length() + decoding_buffer_.length();\n  if (end_stream) {\n    if (available == 0) {\n      return Http::FilterDataStatus::Continue;\n    }\n    if (available % 4 != 0) {\n      // Client end stream with invalid base64. Note, base64 padding is mandatory.\n      decoder_callbacks_->sendLocalReply(Http::Code::BadRequest,\n                                         \"Bad gRPC-web request, invalid base64 data.\", nullptr,\n                                         absl::nullopt, RcDetails::get().GrpcDecodeFailedDueToSize);\n      return Http::FilterDataStatus::StopIterationNoBuffer;\n    }\n  } else if (available < 4) {\n    decoding_buffer_.move(data);\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n\n  const uint64_t needed = available / 4 * 4 - decoding_buffer_.length();\n  decoding_buffer_.move(data, needed);\n  const std::string decoded = Base64::decode(\n      std::string(static_cast<const char*>(decoding_buffer_.linearize(decoding_buffer_.length())),\n                  decoding_buffer_.length()));\n  if (decoded.empty()) {\n    // Error happened when decoding base64.\n    decoder_callbacks_->sendLocalReply(Http::Code::BadRequest,\n                                       \"Bad gRPC-web request, invalid base64 data.\", nullptr,\n                                       absl::nullopt, RcDetails::get().GrpcDecodeFailedDueToData);\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n\n  decoding_buffer_.drain(decoding_buffer_.length());\n  decoding_buffer_.move(data);\n  data.add(decoded);\n  // Any block of 4 bytes or more should have been decoded and passed through.\n  ASSERT(decoding_buffer_.length() < 4);\n  return Http::FilterDataStatus::Continue;\n}\n\n// Implements StreamEncoderFilter.\nHttp::FilterHeadersStatus GrpcWebFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) {\n  if (!is_grpc_web_request_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  if (doStatTracking()) {\n    chargeStat(headers);\n  }\n  if (is_text_response_) {\n    headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.GrpcWebTextProto);\n  } else {\n    headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.GrpcWebProto);\n  }\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus GrpcWebFilter::encodeData(Buffer::Instance& data, bool) {\n  if (!is_grpc_web_request_) {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  if (!is_text_response_) {\n    // No additional transcoding required if gRPC-Web client asked for binary response.\n    return Http::FilterDataStatus::Continue;\n  }\n\n  // The decoder always consumes and drains the given buffer. Incomplete data frame is buffered\n  // inside the decoder.\n  std::vector<Grpc::Frame> frames;\n  decoder_.decode(data, frames);\n  if (frames.empty()) {\n    // We don't have enough data to decode for one single frame, stop iteration until more data\n    // comes in.\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n\n  // Encodes the decoded gRPC frames with base64.\n  for (auto& frame : frames) {\n    Buffer::OwnedImpl temp;\n    temp.add(&frame.flags_, 1);\n    const uint32_t length = htonl(frame.length_);\n    temp.add(&length, 4);\n    if (frame.length_ > 0) {\n      temp.add(*frame.data_);\n    }\n    data.add(Base64::encode(temp, temp.length()));\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus GrpcWebFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  if (!is_grpc_web_request_) {\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  if (doStatTracking()) {\n    chargeStat(trailers);\n  }\n\n  // Trailers are expected to come all in once, and will be encoded into one single trailers frame.\n  // Trailers in the trailers frame are separated by CRLFs.\n  Buffer::OwnedImpl temp;\n  trailers.iterate([&temp](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    temp.add(header.key().getStringView().data(), header.key().size());\n    temp.add(\":\");\n    temp.add(header.value().getStringView().data(), header.value().size());\n    temp.add(\"\\r\\n\");\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  // Clears out the trailers so they don't get added since it is now in the body.\n  trailers.clear();\n  Buffer::OwnedImpl buffer;\n  // Adds the trailers frame head.\n  buffer.add(&GRPC_WEB_TRAILER, 1);\n  // Adds the trailers frame length.\n  const uint32_t length = htonl(temp.length());\n  buffer.add(&length, 4);\n  buffer.move(temp);\n  if (is_text_response_) {\n    Buffer::OwnedImpl encoded(Base64::encode(buffer, buffer.length()));\n    encoder_callbacks_->addEncodedData(encoded, true);\n  } else {\n    encoder_callbacks_->addEncodedData(buffer, true);\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid GrpcWebFilter::setupStatTracking(const Http::RequestHeaderMap& headers) {\n  cluster_ = decoder_callbacks_->clusterInfo();\n  if (!cluster_) {\n    return;\n  }\n  request_stat_names_ = context_.resolveDynamicServiceAndMethod(headers.Path());\n}\n\nvoid GrpcWebFilter::chargeStat(const Http::ResponseHeaderOrTrailerMap& headers) {\n  context_.chargeStat(*cluster_, Grpc::Context::Protocol::GrpcWeb, *request_stat_names_,\n                      headers.GrpcStatus());\n}\n\n} // namespace GrpcWeb\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/grpc_web/grpc_web_filter.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/non_copyable.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/context_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcWeb {\n\n/**\n * See docs/configuration/http_filters/grpc_web_filter.rst\n */\nclass GrpcWebFilter : public Http::StreamFilter, NonCopyable {\npublic:\n  explicit GrpcWebFilter(Grpc::Context& context) : context_(context) {}\n  ~GrpcWebFilter() override = default;\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Implements StreamDecoderFilter.\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n  }\n\n  // Implements StreamEncoderFilter.\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override {\n    encoder_callbacks_ = &callbacks;\n  }\n\n  bool doStatTracking() const { return request_stat_names_.has_value(); }\n\nprivate:\n  friend class GrpcWebFilterTest;\n\n  void chargeStat(const Http::ResponseHeaderOrTrailerMap& headers);\n  void setupStatTracking(const Http::RequestHeaderMap& headers);\n  bool isGrpcWebRequest(const Http::RequestHeaderMap& headers);\n\n  static const uint8_t GRPC_WEB_TRAILER;\n  const absl::flat_hash_set<std::string>& gRpcWebContentTypes() const;\n\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n  bool is_text_request_{};\n  bool is_text_response_{};\n  Buffer::OwnedImpl decoding_buffer_;\n  Grpc::Decoder decoder_;\n  absl::optional<Grpc::Context::RequestStatNames> request_stat_names_;\n  bool is_grpc_web_request_{};\n  Grpc::Context& context_;\n};\n\n} // namespace GrpcWeb\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/gzip/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that performs gzip compression\n# Public docs: docs/root/configuration/http_filters/gzip_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"gzip_filter_lib\",\n    srcs = [\"gzip_filter.cc\"],\n    hdrs = [\"gzip_filter.h\"],\n    deps = [\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/compression/gzip/compressor:compressor_lib\",\n        \"//source/extensions/filters/http/common/compressor:compressor_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/gzip:gzip_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/gzip/config.cc",
    "content": "#include \"extensions/filters/http/gzip/config.h\"\n\n#include \"extensions/filters/http/gzip/gzip_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Gzip {\n\nHttp::FilterFactoryCb GzipFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::gzip::v3::Gzip& proto_config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  // This will flip to false eventually.\n  const bool runtime_feature_default = true;\n  const char runtime_key[] = \"envoy.deprecated_features.allow_deprecated_gzip_http_filter\";\n  const std::string warn_message =\n      \"Using deprecated extension 'envoy.extensions.filters.http.gzip'. This \"\n      \"extension will be removed from Envoy soon. Please use \"\n      \"'envoy.extensions.filters.http.compressor' instead.\";\n\n  if (context.runtime().snapshot().deprecatedFeatureEnabled(runtime_key, runtime_feature_default)) {\n    ENVOY_LOG_MISC(warn, \"{}\", warn_message);\n  } else {\n    throw EnvoyException(\n        warn_message +\n        \" If continued use of this extension is absolutely necessary, see \"\n        \"https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime\"\n        \"#using-runtime-overrides-for-deprecated-features for how to apply a temporary and \"\n        \"highly discouraged override.\");\n  }\n\n  Common::Compressors::CompressorFilterConfigSharedPtr config = std::make_shared<GzipFilterConfig>(\n      proto_config, stats_prefix, context.scope(), context.runtime());\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<Common::Compressors::CompressorFilter>(config));\n  };\n}\n\n/**\n * Static registration for the gzip filter. @see NamedHttpFilterConfigFactory.\n */\nREGISTER_FACTORY(GzipFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.gzip\"};\n\n} // namespace Gzip\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/gzip/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/gzip/v3/gzip.pb.h\"\n#include \"envoy/extensions/filters/http/gzip/v3/gzip.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Gzip {\n\n/**\n * Config registration for the gzip filter. @see NamedHttpFilterConfigFactory.\n */\nclass GzipFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::gzip::v3::Gzip> {\npublic:\n  GzipFilterFactory() : FactoryBase(HttpFilterNames::get().EnvoyGzip) {}\n\nprivate:\n  Http::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const envoy::extensions::filters::http::gzip::v3::Gzip& config,\n                                    const std::string& stats_prefix,\n                                    Server::Configuration::FactoryContext& context) override;\n};\n\nDECLARE_FACTORY(GzipFilterFactory);\n\n} // namespace Gzip\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/gzip/gzip_filter.cc",
    "content": "#include \"extensions/filters/http/gzip/gzip_filter.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Gzip {\n\nnamespace {\n// Default zlib memory level.\nconst uint64_t DefaultMemoryLevel = 5;\n\n// Default and maximum compression window size.\nconst uint64_t DefaultWindowBits = 12;\n\n// When logical OR'ed to window bits, this sets a gzip header and trailer around the compressed\n// data.\nconst uint64_t GzipHeaderValue = 16;\n\n} // namespace\n\nGzipFilterConfig::GzipFilterConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip,\n                                   const std::string& stats_prefix, Stats::Scope& scope,\n                                   Runtime::Loader& runtime)\n    : CompressorFilterConfig(compressorConfig(gzip), stats_prefix + \"gzip.\", scope, runtime,\n                             Http::CustomHeaders::get().ContentEncodingValues.Gzip),\n      compression_level_(compressionLevelEnum(gzip.compression_level())),\n      compression_strategy_(compressionStrategyEnum(gzip.compression_strategy())),\n      memory_level_(memoryLevelUint(gzip.memory_level().value())),\n      window_bits_(windowBitsUint(gzip.window_bits().value())),\n      chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, 4096)) {}\n\nEnvoy::Compression::Compressor::CompressorPtr GzipFilterConfig::makeCompressor() {\n  auto compressor =\n      std::make_unique<Compression::Gzip::Compressor::ZlibCompressorImpl>(chunk_size_);\n  compressor->init(compressionLevel(), compressionStrategy(), windowBits(), memoryLevel());\n  return compressor;\n}\n\nCompression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel\nGzipFilterConfig::compressionLevelEnum(\n    envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::Enum compression_level) {\n  switch (compression_level) {\n  case envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::BEST:\n    return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best;\n  case envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::SPEED:\n    return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed;\n  default:\n    return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard;\n  }\n}\n\nCompression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy\nGzipFilterConfig::compressionStrategyEnum(\n    envoy::extensions::filters::http::gzip::v3::Gzip::CompressionStrategy compression_strategy) {\n  switch (compression_strategy) {\n  case envoy::extensions::filters::http::gzip::v3::Gzip::RLE:\n    return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle;\n  case envoy::extensions::filters::http::gzip::v3::Gzip::FILTERED:\n    return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered;\n  case envoy::extensions::filters::http::gzip::v3::Gzip::HUFFMAN:\n    return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman;\n  default:\n    return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard;\n  }\n}\n\nuint64_t GzipFilterConfig::memoryLevelUint(Protobuf::uint32 level) {\n  return level > 0 ? level : DefaultMemoryLevel;\n}\n\nuint64_t GzipFilterConfig::windowBitsUint(Protobuf::uint32 window_bits) {\n  return (window_bits > 0 ? window_bits : DefaultWindowBits) | GzipHeaderValue;\n}\n\nconst envoy::extensions::filters::http::compressor::v3::Compressor\nGzipFilterConfig::compressorConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip) {\n  if (gzip.has_compressor()) {\n    return gzip.compressor();\n  }\n  envoy::extensions::filters::http::compressor::v3::Compressor compressor = {};\n  if (gzip.has_hidden_envoy_deprecated_content_length()) {\n    compressor.set_allocated_content_length(\n        // According to\n        // https://developers.google.com/protocol-buffers/docs/reference/cpp-generated#embeddedmessage\n        // the message Compressor takes ownership of the allocated Protobuf::Uint32Value object.\n        new Protobuf::UInt32Value(gzip.hidden_envoy_deprecated_content_length()));\n  }\n  // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks)\n  for (const std::string& ctype : gzip.hidden_envoy_deprecated_content_type()) {\n    compressor.add_content_type(ctype);\n  }\n  compressor.set_disable_on_etag_header(gzip.hidden_envoy_deprecated_disable_on_etag_header());\n  compressor.set_remove_accept_encoding_header(\n      gzip.hidden_envoy_deprecated_remove_accept_encoding_header());\n  return compressor;\n}\n\n} // namespace Gzip\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/gzip/gzip_filter.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/gzip/v3/gzip.pb.h\"\n\n#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n#include \"extensions/filters/http/common/compressor/compressor.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Gzip {\n\n/**\n * Configuration for the gzip filter.\n */\nclass GzipFilterConfig : public Common::Compressors::CompressorFilterConfig {\n\npublic:\n  GzipFilterConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip,\n                   const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime);\n\n  Envoy::Compression::Compressor::CompressorPtr makeCompressor() override;\n\n  Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compressionLevel() const {\n    return compression_level_;\n  }\n  Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy\n  compressionStrategy() const {\n    return compression_strategy_;\n  }\n\n  uint64_t memoryLevel() const { return memory_level_; }\n  uint64_t windowBits() const { return window_bits_; }\n  uint32_t chunkSize() const { return chunk_size_; }\n\nprivate:\n  static Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compressionLevelEnum(\n      envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::Enum compression_level);\n  static Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy\n  compressionStrategyEnum(\n      envoy::extensions::filters::http::gzip::v3::Gzip::CompressionStrategy compression_strategy);\n\n  static uint64_t memoryLevelUint(Protobuf::uint32 level);\n  static uint64_t windowBitsUint(Protobuf::uint32 window_bits);\n\n  // TODO(rojkov): this is going to be deprecated when the old configuration fields are dropped.\n  static const envoy::extensions::filters::http::compressor::v3::Compressor\n  compressorConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip);\n\n  Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compression_level_;\n  Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy compression_strategy_;\n\n  const int32_t memory_level_;\n  const int32_t window_bits_;\n  const uint32_t chunk_size_;\n};\n\n} // namespace Gzip\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/header_to_metadata/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that transforms request data into dynamic metadata\n# Public docs: docs/root/configuration/http_filters/header_to_metadata_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"header_to_metadata_filter_lib\",\n    srcs = [\"header_to_metadata_filter.cc\"],\n    hdrs = [\"header_to_metadata_filter.h\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/header_to_metadata:header_to_metadata_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/header_to_metadata/config.cc",
    "content": "#include \"extensions/filters/http/header_to_metadata/config.h\"\n\n#include <string>\n\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h\"\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/header_to_metadata/header_to_metadata_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HeaderToMetadataFilter {\n\nHttp::FilterFactoryCb HeaderToMetadataConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::header_to_metadata::v3::Config& proto_config,\n    const std::string&, Server::Configuration::FactoryContext&) {\n  ConfigSharedPtr filter_config(std::make_shared<Config>(proto_config));\n\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(\n        Http::StreamFilterSharedPtr{new HeaderToMetadataFilter(filter_config)});\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nHeaderToMetadataConfig::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::header_to_metadata::v3::Config& config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<const Config>(config, true);\n}\n\n/**\n * Static registration for the header-to-metadata filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(HeaderToMetadataConfig, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace HeaderToMetadataFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/header_to_metadata/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h\"\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HeaderToMetadataFilter {\n\n/**\n * Config registration for the header-to-metadata filter. @see NamedHttpFilterConfigFactory.\n */\nclass HeaderToMetadataConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::header_to_metadata::v3::Config> {\npublic:\n  HeaderToMetadataConfig() : FactoryBase(HttpFilterNames::get().HeaderToMetadata) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::header_to_metadata::v3::Config& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::header_to_metadata::v3::Config& config,\n      Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override;\n};\n\n} // namespace HeaderToMetadataFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc",
    "content": "#include \"extensions/filters/http/header_to_metadata/header_to_metadata_filter.h\"\n\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/common/regex.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HeaderToMetadataFilter {\n\n// Extract the value of the header.\nabsl::optional<std::string> HeaderValueSelector::extract(Http::HeaderMap& map) const {\n  const auto header_value = Http::HeaderUtility::getAllOfHeaderAsString(map, header_);\n  if (!header_value.result().has_value()) {\n    return absl::nullopt;\n  }\n  // Catch the value in the header before removing.\n  absl::optional<std::string> value = std::string(header_value.result().value());\n  if (remove_) {\n    map.remove(header_);\n  }\n  return value;\n}\n\n// Extract the value of the key from the cookie header.\nabsl::optional<std::string> CookieValueSelector::extract(Http::HeaderMap& map) const {\n  std::string value = Envoy::Http::Utility::parseCookieValue(map, cookie_);\n  if (!value.empty()) {\n    return absl::optional<std::string>(std::move(value));\n  }\n  return absl::nullopt;\n}\n\nRule::Rule(const ProtoRule& rule) : rule_(rule) {\n  // Ensure only one of header and cookie is specified.\n  // TODO(radha13): remove this once we are on v4 and these fields are folded into a oneof.\n  if (!rule.cookie().empty() && !rule.header().empty()) {\n    throw EnvoyException(\"Cannot specify both header and cookie\");\n  }\n\n  // Initialize the shared pointer.\n  if (!rule.header().empty()) {\n    selector_ =\n        std::make_shared<HeaderValueSelector>(Http::LowerCaseString(rule.header()), rule.remove());\n  } else if (!rule.cookie().empty()) {\n    selector_ = std::make_shared<CookieValueSelector>(rule.cookie());\n  } else {\n    throw EnvoyException(\"One of Cookie or Header option needs to be specified\");\n  }\n\n  // Rule must have at least one of the `on_header_*` fields set.\n  if (!rule.has_on_header_present() && !rule.has_on_header_missing()) {\n    const auto& error = fmt::format(\"header to metadata filter: rule for {} has neither \"\n                                    \"`on_header_present` nor `on_header_missing` set\",\n                                    selector_->toString());\n    throw EnvoyException(error);\n  }\n\n  // Ensure value and regex_value_rewrite are not mixed.\n  // TODO(rgs1): remove this once we are on v4 and these fields are folded into a oneof.\n  if (!rule.on_header_present().value().empty() &&\n      rule.on_header_present().has_regex_value_rewrite()) {\n    throw EnvoyException(\"Cannot specify both value and regex_value_rewrite\");\n  }\n\n  // Remove field is un-supported for cookie.\n  if (!rule.cookie().empty() && rule.remove()) {\n    throw EnvoyException(\"Cannot specify remove for cookie\");\n  }\n\n  if (rule.has_on_header_missing() && rule.on_header_missing().value().empty()) {\n    throw EnvoyException(\"Cannot specify on_header_missing rule with an empty value\");\n  }\n\n  if (rule.on_header_present().has_regex_value_rewrite()) {\n    const auto& rewrite_spec = rule.on_header_present().regex_value_rewrite();\n    regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern());\n    regex_rewrite_substitution_ = rewrite_spec.substitution();\n  }\n}\n\nConfig::Config(const envoy::extensions::filters::http::header_to_metadata::v3::Config config,\n               const bool per_route) {\n  request_set_ = Config::configToVector(config.request_rules(), request_rules_);\n  response_set_ = Config::configToVector(config.response_rules(), response_rules_);\n\n  // Note: empty configs are fine for the global config, which would be the case for enabling\n  //       the filter globally without rules and then applying them at the virtual host or\n  //       route level. At the virtual or route level, it makes no sense to have an empty\n  //       config so we throw an error.\n  if (per_route && !response_set_ && !request_set_) {\n    throw EnvoyException(\"header_to_metadata_filter: Per filter configs must at least specify \"\n                         \"either request or response rules\");\n  }\n}\n\nbool Config::configToVector(const ProtobufRepeatedRule& proto_rules,\n                            HeaderToMetadataRules& vector) {\n  if (proto_rules.empty()) {\n    ENVOY_LOG(debug, \"no rules provided\");\n    return false;\n  }\n\n  for (const auto& entry : proto_rules) {\n    vector.emplace_back(entry);\n  }\n\n  return true;\n}\n\nHeaderToMetadataFilter::HeaderToMetadataFilter(const ConfigSharedPtr config) : config_(config) {}\n\nHeaderToMetadataFilter::~HeaderToMetadataFilter() = default;\n\nHttp::FilterHeadersStatus HeaderToMetadataFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                                bool) {\n  const auto* config = getConfig();\n  if (config->doRequest()) {\n    writeHeaderToMetadata(headers, config->requestRules(), *decoder_callbacks_);\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nvoid HeaderToMetadataFilter::setDecoderFilterCallbacks(\n    Http::StreamDecoderFilterCallbacks& callbacks) {\n  decoder_callbacks_ = &callbacks;\n}\n\nHttp::FilterHeadersStatus HeaderToMetadataFilter::encodeHeaders(Http::ResponseHeaderMap& headers,\n                                                                bool) {\n  const auto* config = getConfig();\n  if (config->doResponse()) {\n    writeHeaderToMetadata(headers, config->responseRules(), *encoder_callbacks_);\n  }\n  return Http::FilterHeadersStatus::Continue;\n}\n\nvoid HeaderToMetadataFilter::setEncoderFilterCallbacks(\n    Http::StreamEncoderFilterCallbacks& callbacks) {\n  encoder_callbacks_ = &callbacks;\n}\n\nbool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta_namespace,\n                                         const std::string& key, std::string value, ValueType type,\n                                         ValueEncode encode) const {\n  ProtobufWkt::Value val;\n\n  ASSERT(!value.empty());\n\n  if (value.size() >= MAX_HEADER_VALUE_LEN) {\n    // Too long, go away.\n    ENVOY_LOG(debug, \"metadata value is too long\");\n    return false;\n  }\n\n  if (encode == envoy::extensions::filters::http::header_to_metadata::v3::Config::BASE64) {\n    value = Base64::decodeWithoutPadding(value);\n    if (value.empty()) {\n      ENVOY_LOG(debug, \"Base64 decode failed\");\n      return false;\n    }\n  }\n\n  // Sane enough, add the key/value.\n  switch (type) {\n  case envoy::extensions::filters::http::header_to_metadata::v3::Config::STRING:\n    val.set_string_value(std::move(value));\n    break;\n  case envoy::extensions::filters::http::header_to_metadata::v3::Config::NUMBER: {\n    double dval;\n    if (absl::SimpleAtod(StringUtil::trim(value), &dval)) {\n      val.set_number_value(dval);\n    } else {\n      ENVOY_LOG(debug, \"value to number conversion failed\");\n      return false;\n    }\n    break;\n  }\n  case envoy::extensions::filters::http::header_to_metadata::v3::Config::PROTOBUF_VALUE: {\n    if (!val.ParseFromString(value)) {\n      ENVOY_LOG(debug, \"parse from decoded string failed\");\n      return false;\n    }\n    break;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  // Have we seen this namespace before?\n  auto namespace_iter = map.find(meta_namespace);\n  if (namespace_iter == map.end()) {\n    map[meta_namespace] = ProtobufWkt::Struct();\n    namespace_iter = map.find(meta_namespace);\n  }\n\n  auto& keyval = namespace_iter->second;\n  (*keyval.mutable_fields())[key] = val;\n\n  return true;\n}\n\nconst std::string& HeaderToMetadataFilter::decideNamespace(const std::string& nspace) const {\n  return nspace.empty() ? HttpFilterNames::get().HeaderToMetadata : nspace;\n}\n\n// add metadata['key']= value depending on header present or missing case\nvoid HeaderToMetadataFilter::applyKeyValue(std::string&& value, const Rule& rule,\n                                           const KeyValuePair& keyval, StructMap& np) {\n  if (!keyval.value().empty()) {\n    value = keyval.value();\n  } else {\n    const auto& matcher = rule.regexRewrite();\n    if (matcher != nullptr) {\n      value = matcher->replaceAll(value, rule.regexSubstitution());\n    }\n  }\n  if (!value.empty()) {\n    const auto& nspace = decideNamespace(keyval.metadata_namespace());\n    addMetadata(np, nspace, keyval.key(), value, keyval.type(), keyval.encode());\n  } else {\n    ENVOY_LOG(debug, \"value is empty, not adding metadata\");\n  }\n}\n\nvoid HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers,\n                                                   const HeaderToMetadataRules& rules,\n                                                   Http::StreamFilterCallbacks& callbacks) {\n  StructMap structs_by_namespace;\n\n  for (const auto& rule : rules) {\n    const auto& proto_rule = rule.rule();\n    absl::optional<std::string> value = rule.selector_->extract(headers);\n\n    if (value && proto_rule.has_on_header_present()) {\n      applyKeyValue(std::move(value).value_or(\"\"), rule, proto_rule.on_header_present(),\n                    structs_by_namespace);\n    } else if (!value && proto_rule.has_on_header_missing()) {\n      applyKeyValue(std::move(value).value_or(\"\"), rule, proto_rule.on_header_missing(),\n                    structs_by_namespace);\n    }\n  }\n  // Any matching rules?\n  if (!structs_by_namespace.empty()) {\n    for (auto const& entry : structs_by_namespace) {\n      callbacks.streamInfo().setDynamicMetadata(entry.first, entry.second);\n    }\n  }\n}\n\n// TODO(rgs1): this belongs in one of the filter interfaces, see issue #10164.\nconst Config* HeaderToMetadataFilter::getConfig() const {\n  // Cached config pointer.\n  if (effective_config_) {\n    return effective_config_;\n  }\n\n  effective_config_ = Http::Utility::resolveMostSpecificPerFilterConfig<Config>(\n      HttpFilterNames::get().HeaderToMetadata, decoder_callbacks_->route());\n  if (effective_config_) {\n    return effective_config_;\n  }\n\n  effective_config_ = config_.get();\n  return effective_config_;\n}\n\n} // namespace HeaderToMetadataFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h",
    "content": "#pragma once\n\n#include <string>\n#include <tuple>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/matchers.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HeaderToMetadataFilter {\n\nusing ProtoRule = envoy::extensions::filters::http::header_to_metadata::v3::Config::Rule;\nusing ValueType = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueType;\nusing ValueEncode = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueEncode;\nusing KeyValuePair = envoy::extensions::filters::http::header_to_metadata::v3::Config::KeyValuePair;\n\n// Interface for getting values from a cookie or a header.\nclass ValueSelector {\npublic:\n  virtual ~ValueSelector() = default;\n\n  /**\n   * Called to extract the value of a given header or cookie.\n   * @param http header map.\n   * @return absl::optional<std::string> the extracted header or cookie.\n   */\n  virtual absl::optional<std::string> extract(Http::HeaderMap& map) const PURE;\n\n  /**\n   * @return a string representation of either a cookie or a header passed in the request.\n   */\n  virtual std::string toString() const PURE;\n};\n\n// Get value from a header.\nclass HeaderValueSelector : public ValueSelector {\npublic:\n  // ValueSelector.\n  explicit HeaderValueSelector(Http::LowerCaseString header, bool remove)\n      : header_(std::move(header)), remove_(std::move(remove)) {}\n  absl::optional<std::string> extract(Http::HeaderMap& map) const override;\n  std::string toString() const override { return fmt::format(\"header '{}'\", header_.get()); }\n  ~HeaderValueSelector() override = default;\n\nprivate:\n  const Http::LowerCaseString header_;\n  const bool remove_;\n};\n\n// Get value from a cookie.\nclass CookieValueSelector : public ValueSelector {\npublic:\n  // ValueSelector.\n  explicit CookieValueSelector(std::string cookie) : cookie_(std::move(cookie)) {}\n  absl::optional<std::string> extract(Http::HeaderMap& map) const override;\n  std::string toString() const override { return fmt::format(\"cookie '{}'\", cookie_); }\n  ~CookieValueSelector() override = default;\n\nprivate:\n  const std::string cookie_;\n};\n\nclass Rule {\npublic:\n  Rule(const ProtoRule& rule);\n  const ProtoRule& rule() const { return rule_; }\n  const Regex::CompiledMatcherPtr& regexRewrite() const { return regex_rewrite_; }\n  const std::string& regexSubstitution() const { return regex_rewrite_substitution_; }\n  std::shared_ptr<const ValueSelector> selector_;\n\nprivate:\n  const ProtoRule rule_;\n  Regex::CompiledMatcherPtr regex_rewrite_{};\n  std::string regex_rewrite_substitution_{};\n};\n\nusing HeaderToMetadataRules = std::vector<Rule>;\n\n// TODO(yangminzhu): Make MAX_HEADER_VALUE_LEN configurable.\nconst uint32_t MAX_HEADER_VALUE_LEN = 8 * 1024;\n\n/**\n *  Encapsulates the filter configuration with STL containers and provides an area for any custom\n *  configuration logic.\n */\nclass Config : public ::Envoy::Router::RouteSpecificFilterConfig,\n               public Logger::Loggable<Logger::Id::config> {\npublic:\n  Config(const envoy::extensions::filters::http::header_to_metadata::v3::Config config,\n         bool per_route = false);\n\n  const HeaderToMetadataRules& requestRules() const { return request_rules_; }\n  const HeaderToMetadataRules& responseRules() const { return response_rules_; }\n  bool doResponse() const { return response_set_; }\n  bool doRequest() const { return request_set_; }\n\nprivate:\n  using ProtobufRepeatedRule = Protobuf::RepeatedPtrField<ProtoRule>;\n\n  /**\n   *  configToVector is a helper function for converting from configuration (protobuf types) into\n   *  STL containers for usage elsewhere.\n   *\n   *  @param config A protobuf repeated field of metadata that specifies what headers to convert to\n   *         metadata\n   *  @param vector A vector that will be populated with the configuration data from config\n   *  @return true if any configuration data was added to the vector, false otherwise. Can be used\n   *          to validate whether the configuration was empty.\n   */\n  static bool configToVector(const ProtobufRepeatedRule&, HeaderToMetadataRules&);\n\n  const std::string& decideNamespace(const std::string& nspace) const;\n\n  HeaderToMetadataRules request_rules_;\n  HeaderToMetadataRules response_rules_;\n  bool response_set_;\n  bool request_set_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * Header-To-Metadata examines request/response headers and either copies or\n * moves the values into request metadata based on configuration information.\n */\nclass HeaderToMetadataFilter : public Http::StreamFilter,\n                               public Logger::Loggable<Logger::Id::filter> {\npublic:\n  HeaderToMetadataFilter(const ConfigSharedPtr config);\n  ~HeaderToMetadataFilter() override;\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override;\n\nprivate:\n  friend class HeaderToMetadataTest;\n\n  using StructMap = std::map<std::string, ProtobufWkt::Struct>;\n\n  const ConfigSharedPtr config_;\n  mutable const Config* effective_config_{nullptr};\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n\n  /**\n   *  writeHeaderToMetadata encapsulates (1) searching for the header and (2) writing it to the\n   *  request metadata.\n   *  @param headers the map of key-value headers to look through. These could be response or\n   *                 request headers depending on whether this is called from the encode state or\n   *                 decode state.\n   *  @param rules the header-to-metadata mapping set in configuration.\n   *  @param callbacks the callback used to fetch the StreamInfo (which is then used to get\n   *                   metadata). Callable with both encoder_callbacks_ and decoder_callbacks_.\n   */\n  void writeHeaderToMetadata(Http::HeaderMap& headers, const HeaderToMetadataRules& rules,\n                             Http::StreamFilterCallbacks& callbacks);\n  bool addMetadata(StructMap&, const std::string&, const std::string&, std::string, ValueType,\n                   ValueEncode) const;\n  void applyKeyValue(std::string&&, const Rule&, const KeyValuePair&, StructMap&);\n  const std::string& decideNamespace(const std::string& nspace) const;\n  const Config* getConfig() const;\n};\n\n} // namespace HeaderToMetadataFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/health_check/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter that implements health check responses\n# Public docs: docs/root/configuration/http_filters/health_check_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"health_check_lib\",\n    srcs = [\"health_check.cc\"],\n    hdrs = [\"health_check.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # Legacy test use. TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/common/filter/http:__subpackages__\",\n        \"//test/integration:__subpackages__\",\n        \"//test/server:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/health_check:health_check_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/health_check/config.cc",
    "content": "#include \"extensions/filters/http/health_check/config.h\"\n\n#include <memory>\n\n#include \"envoy/extensions/filters/http/health_check/v3/health_check.pb.h\"\n#include \"envoy/extensions/filters/http/health_check/v3/health_check.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/http/health_check/health_check.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HealthCheck {\n\nHttp::FilterFactoryCb HealthCheckFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::health_check::v3::HealthCheck& proto_config,\n    const std::string&, Server::Configuration::FactoryContext& context) {\n  ASSERT(proto_config.has_pass_through_mode());\n\n  const bool pass_through_mode = proto_config.pass_through_mode().value();\n  const int64_t cache_time_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config, cache_time, 0);\n\n  auto header_match_data = std::make_shared<std::vector<Http::HeaderUtility::HeaderDataPtr>>();\n  *header_match_data = Http::HeaderUtility::buildHeaderDataVector(proto_config.headers());\n\n  if (!pass_through_mode && cache_time_ms) {\n    throw EnvoyException(\"cache_time_ms must not be set when path_through_mode is disabled\");\n  }\n\n  HealthCheckCacheManagerSharedPtr cache_manager;\n  if (cache_time_ms > 0) {\n    cache_manager = std::make_shared<HealthCheckCacheManager>(\n        context.dispatcher(), std::chrono::milliseconds(cache_time_ms));\n  }\n\n  ClusterMinHealthyPercentagesConstSharedPtr cluster_min_healthy_percentages;\n  if (!pass_through_mode && !proto_config.cluster_min_healthy_percentages().empty()) {\n    auto cluster_to_percentage = std::make_unique<ClusterMinHealthyPercentages>();\n    for (const auto& item : proto_config.cluster_min_healthy_percentages()) {\n      cluster_to_percentage->emplace(std::make_pair(item.first, item.second.value()));\n    }\n    cluster_min_healthy_percentages = std::move(cluster_to_percentage);\n  }\n\n  return [&context, pass_through_mode, cache_manager, header_match_data,\n          cluster_min_healthy_percentages](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<HealthCheckFilter>(context, pass_through_mode,\n                                                                  cache_manager, header_match_data,\n                                                                  cluster_min_healthy_percentages));\n  };\n}\n\n/**\n * Static registration for the health check filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(HealthCheckFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.health_check\"};\n\n} // namespace HealthCheck\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/health_check/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/health_check/v3/health_check.pb.h\"\n#include \"envoy/extensions/filters/http/health_check/v3/health_check.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HealthCheck {\n\nclass HealthCheckFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::health_check::v3::HealthCheck> {\npublic:\n  HealthCheckFilterConfig() : FactoryBase(HttpFilterNames::get().HealthCheck) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::health_check::v3::HealthCheck& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace HealthCheck\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/health_check/health_check.cc",
    "content": "#include \"extensions/filters/http/health_check/health_check.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HealthCheck {\n\nstruct RcDetailsValues {\n  // The health check filter returned healthy to a health check.\n  const std::string HealthCheckOk = \"health_check_ok\";\n  // The health check filter responded with a failed health check.\n  const std::string HealthCheckFailed = \"health_check_failed\";\n  // The health check filter returned a cached health value.\n  const std::string HealthCheckCached = \"health_check_cached\";\n  // The health check filter failed due to health checking a nonexistent cluster.\n  const std::string HealthCheckNoCluster = \"health_check_failed_no_cluster_found\";\n  // The health check filter failed due to checking min_degraded against an empty cluster.\n  const std::string HealthCheckClusterEmpty = \"health_check_failed_cluster_empty\";\n  // The health check filter succeeded given the cluster health was sufficient.\n  const std::string HealthCheckClusterHealthy = \"health_check_ok_cluster_healthy\";\n  // The health check filter failed given the cluster health was not sufficient.\n  const std::string HealthCheckClusterUnhealthy = \"health_check_failed_cluster_unhealthy\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nHealthCheckCacheManager::HealthCheckCacheManager(Event::Dispatcher& dispatcher,\n                                                 std::chrono::milliseconds timeout)\n    : clear_cache_timer_(dispatcher.createTimer([this]() -> void { onTimer(); })),\n      timeout_(timeout) {\n  onTimer();\n}\n\nvoid HealthCheckCacheManager::onTimer() {\n  use_cached_response_ = false;\n  clear_cache_timer_->enableTimer(timeout_);\n}\n\nHttp::FilterHeadersStatus HealthCheckFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                           bool end_stream) {\n  if (Http::HeaderUtility::matchHeaders(headers, *header_match_data_)) {\n    health_check_request_ = true;\n    callbacks_->streamInfo().healthCheck(true);\n\n    // Set the 'sampled' status for the span to false. This overrides\n    // any previous sampling decision associated with the trace instance,\n    // resulting in this span (and any subsequent child spans) not being\n    // reported to the backend tracing system.\n    callbacks_->activeSpan().setSampled(false);\n\n    // If we are not in pass through mode, we always handle. Otherwise, we handle if the server is\n    // in the failed state or if we are using caching and we should use the cached response.\n    if (!pass_through_mode_ || context_.healthCheckFailed() ||\n        (cache_manager_ && cache_manager_->useCachedResponse())) {\n      handling_ = true;\n    }\n  }\n\n  if (end_stream && handling_) {\n    onComplete();\n  }\n\n  return handling_ ? Http::FilterHeadersStatus::StopIteration : Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus HealthCheckFilter::decodeData(Buffer::Instance&, bool end_stream) {\n  if (end_stream && handling_) {\n    onComplete();\n  }\n\n  return handling_ ? Http::FilterDataStatus::StopIterationNoBuffer\n                   : Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus HealthCheckFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  if (handling_) {\n    onComplete();\n  }\n\n  return handling_ ? Http::FilterTrailersStatus::StopIteration\n                   : Http::FilterTrailersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus HealthCheckFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) {\n  if (health_check_request_) {\n    if (cache_manager_) {\n      cache_manager_->setCachedResponse(\n          static_cast<Http::Code>(Http::Utility::getResponseStatus(headers)),\n          headers.EnvoyDegraded() != nullptr);\n    }\n\n    headers.setEnvoyUpstreamHealthCheckedCluster(context_.localInfo().clusterName());\n  } else if (context_.healthCheckFailed()) {\n    headers.setReferenceEnvoyImmediateHealthCheckFail(\n        Http::Headers::get().EnvoyImmediateHealthCheckFailValues.True);\n  }\n\n  return Http::FilterHeadersStatus::Continue;\n}\n\nvoid HealthCheckFilter::onComplete() {\n  ASSERT(handling_);\n  Http::Code final_status = Http::Code::OK;\n  const std::string* details = &RcDetails::get().HealthCheckOk;\n  bool degraded = false;\n  if (context_.healthCheckFailed()) {\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck);\n    final_status = Http::Code::ServiceUnavailable;\n    details = &RcDetails::get().HealthCheckFailed;\n  } else {\n    if (cache_manager_) {\n      const auto status_and_degraded = cache_manager_->getCachedResponse();\n      final_status = status_and_degraded.first;\n      details = &RcDetails::get().HealthCheckCached;\n      degraded = status_and_degraded.second;\n    } else if (cluster_min_healthy_percentages_ != nullptr &&\n               !cluster_min_healthy_percentages_->empty()) {\n      // Check the status of the specified upstream cluster(s) to determine the right response.\n      auto& clusterManager = context_.clusterManager();\n      for (const auto& item : *cluster_min_healthy_percentages_) {\n        details = &RcDetails::get().HealthCheckClusterHealthy;\n        const std::string& cluster_name = item.first;\n        const uint64_t min_healthy_percentage = static_cast<uint64_t>(item.second);\n        auto* cluster = clusterManager.get(cluster_name);\n        if (cluster == nullptr) {\n          // If the cluster does not exist at all, consider the service unhealthy.\n          final_status = Http::Code::ServiceUnavailable;\n          details = &RcDetails::get().HealthCheckNoCluster;\n\n          break;\n        }\n        const auto& stats = cluster->info()->stats();\n        const uint64_t membership_total = stats.membership_total_.value();\n        if (membership_total == 0) {\n          // If the cluster exists but is empty, consider the service unhealthy unless\n          // the specified minimum percent healthy for the cluster happens to be zero.\n          if (min_healthy_percentage == 0UL) {\n            continue;\n          } else {\n            final_status = Http::Code::ServiceUnavailable;\n            details = &RcDetails::get().HealthCheckClusterEmpty;\n            break;\n          }\n        }\n        // In the general case, consider the service unhealthy if fewer than the\n        // specified percentage of the servers in the cluster are available (healthy + degraded).\n        if ((100UL * (stats.membership_healthy_.value() + stats.membership_degraded_.value())) <\n            membership_total * min_healthy_percentage) {\n          final_status = Http::Code::ServiceUnavailable;\n          details = &RcDetails::get().HealthCheckClusterUnhealthy;\n          break;\n        }\n      }\n    }\n\n    if (!Http::CodeUtility::is2xx(enumToInt(final_status))) {\n      callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck);\n    }\n  }\n\n  callbacks_->sendLocalReply(\n      final_status, \"\",\n      [degraded](auto& headers) {\n        if (degraded) {\n          headers.setEnvoyDegraded(\"\");\n        }\n      },\n      absl::nullopt, *details);\n}\n\n} // namespace HealthCheck\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/health_check/health_check.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/http/header_utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HealthCheck {\n\n/**\n * Shared cache manager used by all instances of a health check filter configuration as well as\n * all threads. This sets up a timer that will invalidate the cached response code and allow some\n * requests to go through to the backend. No attempt is made to allow only a single request to go\n * through to the backend, so during the invalidation window some number of requests will get\n * through.\n */\nclass HealthCheckCacheManager {\npublic:\n  HealthCheckCacheManager(Event::Dispatcher& dispatcher, std::chrono::milliseconds timeout);\n\n  std::pair<Http::Code, bool> getCachedResponse() {\n    return {last_response_code_, last_response_degraded_};\n  }\n  void setCachedResponse(Http::Code code, bool degraded) {\n    last_response_code_ = code;\n    last_response_degraded_ = degraded;\n    use_cached_response_ = true;\n  }\n  bool useCachedResponse() { return use_cached_response_; }\n\nprivate:\n  void onTimer();\n\n  Event::TimerPtr clear_cache_timer_;\n  const std::chrono::milliseconds timeout_;\n  std::atomic<bool> use_cached_response_{};\n  std::atomic<Http::Code> last_response_code_{};\n  std::atomic<bool> last_response_degraded_{};\n};\n\nusing HealthCheckCacheManagerSharedPtr = std::shared_ptr<HealthCheckCacheManager>;\n\nusing ClusterMinHealthyPercentages = std::map<std::string, double>;\nusing ClusterMinHealthyPercentagesConstSharedPtr =\n    std::shared_ptr<const ClusterMinHealthyPercentages>;\n\nusing HeaderDataVectorSharedPtr = std::shared_ptr<std::vector<Http::HeaderUtility::HeaderDataPtr>>;\n\n/**\n * Health check responder filter.\n */\nclass HealthCheckFilter : public Http::StreamFilter {\npublic:\n  HealthCheckFilter(Server::Configuration::FactoryContext& context, bool pass_through_mode,\n                    HealthCheckCacheManagerSharedPtr cache_manager,\n                    HeaderDataVectorSharedPtr header_match_data,\n                    ClusterMinHealthyPercentagesConstSharedPtr cluster_min_healthy_percentages)\n      : context_(context), pass_through_mode_(pass_through_mode), cache_manager_(cache_manager),\n        header_match_data_(std::move(header_match_data)),\n        cluster_min_healthy_percentages_(cluster_min_healthy_percentages) {}\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    callbacks_ = &callbacks;\n  }\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks&) override {}\n\nprivate:\n  void onComplete();\n\n  Server::Configuration::FactoryContext& context_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n  bool handling_{};\n  bool health_check_request_{};\n  bool pass_through_mode_{};\n  HealthCheckCacheManagerSharedPtr cache_manager_;\n  const HeaderDataVectorSharedPtr header_match_data_;\n  ClusterMinHealthyPercentagesConstSharedPtr cluster_min_healthy_percentages_;\n};\n\n} // namespace HealthCheck\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ip_tagging/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter that writes an IP tagging header based on IP trie data\n# Public docs: docs/root/configuration/http_filters/ip_tagging_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ip_tagging_filter_lib\",\n    srcs = [\"ip_tagging_filter.cc\"],\n    hdrs = [\"ip_tagging_filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/network:lc_trie_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/ip_tagging:ip_tagging_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/ip_tagging/config.cc",
    "content": "#include \"extensions/filters/http/ip_tagging/config.h\"\n\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h\"\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/ip_tagging/ip_tagging_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace IpTagging {\n\nHttp::FilterFactoryCb IpTaggingFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::ip_tagging::v3::IPTagging& proto_config,\n    const std::string& stat_prefix, Server::Configuration::FactoryContext& context) {\n\n  IpTaggingFilterConfigSharedPtr config(\n      new IpTaggingFilterConfig(proto_config, stat_prefix, context.scope(), context.runtime()));\n\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<IpTaggingFilter>(config));\n  };\n}\n\n/**\n * Static registration for the ip tagging filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(IpTaggingFilterFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.ip_tagging\"};\n\n} // namespace IpTagging\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ip_tagging/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h\"\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace IpTagging {\n\n/**\n * Config registration for the router filter. @see NamedHttpFilterConfigFactory.\n */\nclass IpTaggingFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::ip_tagging::v3::IPTagging> {\npublic:\n  IpTaggingFilterFactory() : FactoryBase(HttpFilterNames::get().IpTagging) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::ip_tagging::v3::IPTagging& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace IpTagging\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc",
    "content": "#include \"extensions/filters/http/ip_tagging/ip_tagging_filter.h\"\n\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace IpTagging {\n\nIpTaggingFilterConfig::IpTaggingFilterConfig(\n    const envoy::extensions::filters::http::ip_tagging::v3::IPTagging& config,\n    const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime)\n    : request_type_(requestTypeEnum(config.request_type())), scope_(scope), runtime_(runtime),\n      stat_name_set_(scope.symbolTable().makeSet(\"IpTagging\")),\n      stats_prefix_(stat_name_set_->add(stat_prefix + \"ip_tagging\")),\n      no_hit_(stat_name_set_->add(\"no_hit\")), total_(stat_name_set_->add(\"total\")),\n      unknown_tag_(stat_name_set_->add(\"unknown_tag.hit\")) {\n\n  // Once loading IP tags from a file system is supported, the restriction on the size\n  // of the set should be removed and observability into what tags are loaded needs\n  // to be implemented.\n  // TODO(ccaraman): Remove size check once file system support is implemented.\n  // Work is tracked by issue https://github.com/envoyproxy/envoy/issues/2695.\n  if (config.ip_tags().empty()) {\n    throw EnvoyException(\"HTTP IP Tagging Filter requires ip_tags to be specified.\");\n  }\n\n  std::vector<std::pair<std::string, std::vector<Network::Address::CidrRange>>> tag_data;\n  tag_data.reserve(config.ip_tags().size());\n  for (const auto& ip_tag : config.ip_tags()) {\n    std::vector<Network::Address::CidrRange> cidr_set;\n    cidr_set.reserve(ip_tag.ip_list().size());\n    for (const envoy::config::core::v3::CidrRange& entry : ip_tag.ip_list()) {\n\n      // Currently, CidrRange::create doesn't guarantee that the CidrRanges are valid.\n      Network::Address::CidrRange cidr_entry = Network::Address::CidrRange::create(entry);\n      if (cidr_entry.isValid()) {\n        cidr_set.emplace_back(std::move(cidr_entry));\n      } else {\n        throw EnvoyException(\n            fmt::format(\"invalid ip/mask combo '{}/{}' (format is <ip>/<# mask bits>)\",\n                        entry.address_prefix(), entry.prefix_len().value()));\n      }\n    }\n\n    tag_data.emplace_back(ip_tag.ip_tag_name(), cidr_set);\n    stat_name_set_->rememberBuiltin(absl::StrCat(ip_tag.ip_tag_name(), \".hit\"));\n  }\n  trie_ = std::make_unique<Network::LcTrie::LcTrie<std::string>>(tag_data);\n}\n\nvoid IpTaggingFilterConfig::incCounter(Stats::StatName name) {\n  Stats::SymbolTable::StoragePtr storage = scope_.symbolTable().join({stats_prefix_, name});\n  scope_.counterFromStatName(Stats::StatName(storage.get())).inc();\n}\n\nIpTaggingFilter::IpTaggingFilter(IpTaggingFilterConfigSharedPtr config) : config_(config) {}\n\nIpTaggingFilter::~IpTaggingFilter() = default;\n\nvoid IpTaggingFilter::onDestroy() {}\n\nHttp::FilterHeadersStatus IpTaggingFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  const bool is_internal_request = headers.EnvoyInternalRequest() &&\n                                   (headers.EnvoyInternalRequest()->value() ==\n                                    Http::Headers::get().EnvoyInternalRequestValues.True.c_str());\n\n  if ((is_internal_request && config_->requestType() == FilterRequestType::EXTERNAL) ||\n      (!is_internal_request && config_->requestType() == FilterRequestType::INTERNAL) ||\n      !config_->runtime().snapshot().featureEnabled(\"ip_tagging.http_filter_enabled\", 100)) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  std::vector<std::string> tags =\n      config_->trie().getData(callbacks_->streamInfo().downstreamRemoteAddress());\n\n  if (!tags.empty()) {\n    const std::string tags_join = absl::StrJoin(tags, \",\");\n    headers.appendEnvoyIpTags(tags_join, \",\");\n\n    // We must clear the route cache or else we can't match on x-envoy-ip-tags.\n    callbacks_->clearRouteCache();\n\n    // For a large number(ex > 1000) of tags, stats cardinality will be an issue.\n    // If there are use cases with a large set of tags, a way to opt into these stats\n    // should be exposed and other observability options like logging tags need to be implemented.\n    for (const std::string& tag : tags) {\n      config_->incHit(tag);\n    }\n  } else {\n    config_->incNoHit();\n  }\n  config_->incTotal();\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus IpTaggingFilter::decodeData(Buffer::Instance&, bool) {\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus IpTaggingFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid IpTaggingFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n}\n\n} // namespace IpTagging\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ip_tagging/ip_tagging_filter.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/network/cidr_range.h\"\n#include \"common/network/lc_trie.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace IpTagging {\n\n/**\n * Type of requests the filter should apply to.\n */\nenum class FilterRequestType { INTERNAL, EXTERNAL, BOTH };\n\n/**\n * Configuration for the HTTP IP Tagging filter.\n */\nclass IpTaggingFilterConfig {\npublic:\n  IpTaggingFilterConfig(const envoy::extensions::filters::http::ip_tagging::v3::IPTagging& config,\n                        const std::string& stat_prefix, Stats::Scope& scope,\n                        Runtime::Loader& runtime);\n\n  Runtime::Loader& runtime() { return runtime_; }\n  FilterRequestType requestType() const { return request_type_; }\n  const Network::LcTrie::LcTrie<std::string>& trie() const { return *trie_; }\n\n  void incHit(absl::string_view tag) {\n    incCounter(stat_name_set_->getBuiltin(absl::StrCat(tag, \".hit\"), unknown_tag_));\n  }\n  void incNoHit() { incCounter(no_hit_); }\n  void incTotal() { incCounter(total_); }\n\nprivate:\n  static FilterRequestType requestTypeEnum(\n      envoy::extensions::filters::http::ip_tagging::v3::IPTagging::RequestType request_type) {\n    switch (request_type) {\n    case envoy::extensions::filters::http::ip_tagging::v3::IPTagging::BOTH:\n      return FilterRequestType::BOTH;\n    case envoy::extensions::filters::http::ip_tagging::v3::IPTagging::INTERNAL:\n      return FilterRequestType::INTERNAL;\n    case envoy::extensions::filters::http::ip_tagging::v3::IPTagging::EXTERNAL:\n      return FilterRequestType::EXTERNAL;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  void incCounter(Stats::StatName name);\n\n  const FilterRequestType request_type_;\n  Stats::Scope& scope_;\n  Runtime::Loader& runtime_;\n  Stats::StatNameSetPtr stat_name_set_;\n  const Stats::StatName stats_prefix_;\n  const Stats::StatName no_hit_;\n  const Stats::StatName total_;\n  const Stats::StatName unknown_tag_;\n  std::unique_ptr<Network::LcTrie::LcTrie<std::string>> trie_;\n};\n\nusing IpTaggingFilterConfigSharedPtr = std::shared_ptr<IpTaggingFilterConfig>;\n\n/**\n * A filter that gets all tags associated with a request's downstream remote address and\n * sets a header `x-envoy-ip-tags` with those values.\n */\nclass IpTaggingFilter : public Http::StreamDecoderFilter {\npublic:\n  IpTaggingFilter(IpTaggingFilterConfigSharedPtr config);\n  ~IpTaggingFilter() override;\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\nprivate:\n  IpTaggingFilterConfigSharedPtr config_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n};\n\n} // namespace IpTagging\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"extractor_lib\",\n    srcs = [\"extractor.cc\"],\n    hdrs = [\"extractor.h\"],\n    deps = [\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"jwks_cache_lib\",\n    srcs = [\"jwks_cache.cc\"],\n    hdrs = [\"jwks_cache.h\"],\n    external_deps = [\n        \"jwt_verify_lib\",\n    ],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"authenticator_lib\",\n    srcs = [\"authenticator.cc\"],\n    hdrs = [\"authenticator.h\"],\n    deps = [\n        \":extractor_lib\",\n        \":jwks_cache_lib\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/filters/http/common:jwks_fetcher_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_lib\",\n    srcs = [\"filter.cc\"],\n    hdrs = [\"filter.h\"],\n    external_deps = [\n        \"jwt_verify_lib\",\n    ],\n    deps = [\n        \":filter_config_interface\",\n        \":matchers_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"filter_factory.cc\"],\n    hdrs = [\"filter_factory.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    status = \"alpha\",\n    deps = [\n        \":filter_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"matchers_lib\",\n    srcs = [\"matcher.cc\"],\n    hdrs = [\"matcher.h\"],\n    deps = [\n        \":verifier_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/router:config_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"verifier_lib\",\n    srcs = [\"verifier.cc\"],\n    hdrs = [\"verifier.h\"],\n    deps = [\n        \":authenticator_lib\",\n        \":extractor_lib\",\n        \"//include/envoy/http:header_map_interface\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_config_interface\",\n    srcs = [\"filter_config.cc\"],\n    hdrs = [\"filter_config.h\"],\n    deps = [\n        \":jwks_cache_lib\",\n        \":matchers_lib\",\n        \"//include/envoy/router:string_accessor_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/authenticator.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/authenticator.h\"\n\n#include \"envoy/http/async_client.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"jwt_verify_lib/jwt.h\"\n#include \"jwt_verify_lib/verify.h\"\n\nusing ::google::jwt_verify::CheckAudience;\nusing ::google::jwt_verify::Status;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\n/**\n * Object to implement Authenticator interface.\n */\nclass AuthenticatorImpl : public Logger::Loggable<Logger::Id::jwt>,\n                          public Authenticator,\n                          public Common::JwksFetcher::JwksReceiver {\npublic:\n  AuthenticatorImpl(const CheckAudience* check_audience,\n                    const absl::optional<std::string>& provider, bool allow_failed,\n                    bool allow_missing, JwksCache& jwks_cache,\n                    Upstream::ClusterManager& cluster_manager,\n                    CreateJwksFetcherCb create_jwks_fetcher_cb, TimeSource& time_source)\n      : jwks_cache_(jwks_cache), cm_(cluster_manager),\n        create_jwks_fetcher_cb_(create_jwks_fetcher_cb), check_audience_(check_audience),\n        provider_(provider), is_allow_failed_(allow_failed), is_allow_missing_(allow_missing),\n        time_source_(time_source) {}\n\n  // Following functions are for JwksFetcher::JwksReceiver interface\n  void onJwksSuccess(google::jwt_verify::JwksPtr&& jwks) override;\n  void onJwksError(Failure reason) override;\n  // Following functions are for Authenticator interface\n  void verify(Http::HeaderMap& headers, Tracing::Span& parent_span,\n              std::vector<JwtLocationConstPtr>&& tokens, SetPayloadCallback set_payload_cb,\n              AuthenticatorCallback callback) override;\n  void onDestroy() override;\n\n  TimeSource& timeSource() { return time_source_; }\n\nprivate:\n  // Returns the name of the authenticator. For debug logging only.\n  std::string name() const;\n\n  // Verify with a specific public key.\n  void verifyKey();\n\n  // Calls the callback with status.\n  void doneWithStatus(const Status& status);\n\n  // Start verification process. It will continue to eliminate tokens with invalid claims until it\n  // finds one to verify with key.\n  void startVerify();\n\n  // The jwks cache object.\n  JwksCache& jwks_cache_;\n  // the cluster manager object.\n  Upstream::ClusterManager& cm_;\n\n  // The callback used to create a JwksFetcher instance.\n  CreateJwksFetcherCb create_jwks_fetcher_cb_;\n\n  // The Jwks fetcher object\n  Common::JwksFetcherPtr fetcher_;\n\n  // The token data\n  std::vector<JwtLocationConstPtr> tokens_;\n  JwtLocationConstPtr curr_token_;\n  // The JWT object.\n  std::unique_ptr<::google::jwt_verify::Jwt> jwt_;\n  // The JWKS data object\n  JwksCache::JwksData* jwks_data_{};\n\n  // The HTTP request headers\n  Http::HeaderMap* headers_{};\n  // The active span for the request\n  Tracing::Span* parent_span_{&Tracing::NullSpan::instance()};\n  // the callback function to set payload\n  SetPayloadCallback set_payload_cb_;\n  // The on_done function.\n  AuthenticatorCallback callback_;\n  // check audience object.\n  const CheckAudience* check_audience_;\n  // specific provider or not when it is allow missing or failed.\n  const absl::optional<std::string> provider_;\n  const bool is_allow_failed_;\n  const bool is_allow_missing_;\n  TimeSource& time_source_;\n};\n\nstd::string AuthenticatorImpl::name() const {\n  if (provider_) {\n    return provider_.value() + (is_allow_missing_ ? \"-OPTIONAL\" : \"\");\n  }\n  if (is_allow_failed_) {\n    return \"_IS_ALLOW_FAILED_\";\n  }\n  if (is_allow_missing_) {\n    return \"_IS_ALLOW_MISSING_\";\n  }\n  return \"_UNKNOWN_\";\n}\n\nvoid AuthenticatorImpl::verify(Http::HeaderMap& headers, Tracing::Span& parent_span,\n                               std::vector<JwtLocationConstPtr>&& tokens,\n                               SetPayloadCallback set_payload_cb, AuthenticatorCallback callback) {\n  ASSERT(!callback_);\n  headers_ = &headers;\n  parent_span_ = &parent_span;\n  tokens_ = std::move(tokens);\n  set_payload_cb_ = std::move(set_payload_cb);\n  callback_ = std::move(callback);\n\n  ENVOY_LOG(debug, \"{}: JWT authentication starts (allow_failed={}), tokens size={}\", name(),\n            is_allow_failed_, tokens_.size());\n  if (tokens_.empty()) {\n    doneWithStatus(Status::JwtMissed);\n    return;\n  }\n\n  startVerify();\n}\n\nvoid AuthenticatorImpl::startVerify() {\n  ASSERT(!tokens_.empty());\n  ENVOY_LOG(debug, \"{}: startVerify: tokens size {}\", name(), tokens_.size());\n  curr_token_ = std::move(tokens_.back());\n  tokens_.pop_back();\n\n  jwt_ = std::make_unique<::google::jwt_verify::Jwt>();\n  const Status status = jwt_->parseFromString(curr_token_->token());\n  if (status != Status::Ok) {\n    doneWithStatus(status);\n    return;\n  }\n\n  ENVOY_LOG(debug, \"{}: Verifying JWT token of issuer {}\", name(), jwt_->iss_);\n  if (!jwt_->iss_.empty()) {\n    // Check if token extracted from the location contains the issuer specified by config.\n    if (!curr_token_->isIssuerSpecified(jwt_->iss_)) {\n      doneWithStatus(Status::JwtUnknownIssuer);\n      return;\n    }\n  } else {\n    // If provider is not specified, in allow_missing_or_failed or allow_missing case,\n    // the issuer specified in \"iss\" payload is required in order to lookup provider.\n    if (!provider_) {\n      doneWithStatus(Status::JwtUnknownIssuer);\n      return;\n    }\n  }\n\n  // TODO(qiwzhang): Cross-platform-wise the below unix_timestamp code is wrong as the\n  // epoch is not guaranteed to be defined as the unix epoch. We should use\n  // the abseil time functionality instead or use the jwt_verify_lib to check\n  // the validity of a JWT.\n  // Check \"exp\" claim.\n  const uint64_t unix_timestamp =\n      std::chrono::duration_cast<std::chrono::seconds>(timeSource().systemTime().time_since_epoch())\n          .count();\n  // If the nbf claim does *not* appear in the JWT, then the nbf field is defaulted\n  // to 0.\n  if (jwt_->nbf_ > unix_timestamp) {\n    doneWithStatus(Status::JwtNotYetValid);\n    return;\n  }\n  // If the exp claim does *not* appear in the JWT then the exp field is defaulted\n  // to 0.\n  if (jwt_->exp_ > 0 && jwt_->exp_ < unix_timestamp) {\n    doneWithStatus(Status::JwtExpired);\n    return;\n  }\n\n  // Check the issuer is configured or not.\n  jwks_data_ = provider_ ? jwks_cache_.findByProvider(provider_.value())\n                         : jwks_cache_.findByIssuer(jwt_->iss_);\n  // isIssuerSpecified() check already make sure the issuer is in the cache.\n  ASSERT(jwks_data_ != nullptr);\n\n  // Check if audience is allowed\n  bool is_allowed = check_audience_ ? check_audience_->areAudiencesAllowed(jwt_->audiences_)\n                                    : jwks_data_->areAudiencesAllowed(jwt_->audiences_);\n  if (!is_allowed) {\n    doneWithStatus(Status::JwtAudienceNotAllowed);\n    return;\n  }\n\n  auto jwks_obj = jwks_data_->getJwksObj();\n  if (jwks_obj != nullptr && !jwks_data_->isExpired()) {\n    // TODO(qiwzhang): It would seem there's a window of error whereby if the JWT issuer\n    // has started signing with a new key that's not in our cache, then the\n    // verification will fail even though the JWT is valid. A simple fix\n    // would be to check the JWS kid header field; if present check we have\n    // the key cached, if we do proceed to verify else try a new JWKS retrieval.\n    // JWTs without a kid header field in the JWS we might be best to get each\n    // time? This all only matters for remote JWKS.\n    verifyKey();\n    return;\n  }\n\n  // TODO(potatop): potential optimization.\n  // Only one remote jwks will be fetched, verify will not continue util it is completed. This is\n  // fine for provider name requirements, as each provider has only one issuer, but for allow\n  // missing or failed there can be more than one issuers. This can be optimized; the same remote\n  // jwks fetching can be shared by two requests.\n  if (jwks_data_->getJwtProvider().has_remote_jwks()) {\n    if (!fetcher_) {\n      fetcher_ = create_jwks_fetcher_cb_(cm_);\n    }\n    fetcher_->fetch(jwks_data_->getJwtProvider().remote_jwks().http_uri(), *parent_span_, *this);\n    return;\n  }\n  // No valid keys for this issuer. This may happen as a result of incorrect local\n  // JWKS configuration.\n  doneWithStatus(Status::JwksNoValidKeys);\n}\n\nvoid AuthenticatorImpl::onJwksSuccess(google::jwt_verify::JwksPtr&& jwks) {\n  const Status status = jwks_data_->setRemoteJwks(std::move(jwks))->getStatus();\n  if (status != Status::Ok) {\n    doneWithStatus(status);\n  } else {\n    verifyKey();\n  }\n}\n\nvoid AuthenticatorImpl::onJwksError(Failure) { doneWithStatus(Status::JwksFetchFail); }\n\nvoid AuthenticatorImpl::onDestroy() {\n  if (fetcher_) {\n    fetcher_->cancel();\n  }\n}\n\n// Verify with a specific public key.\nvoid AuthenticatorImpl::verifyKey() {\n  const Status status = ::google::jwt_verify::verifyJwt(*jwt_, *jwks_data_->getJwksObj());\n  if (status != Status::Ok) {\n    doneWithStatus(status);\n    return;\n  }\n\n  // Forward the payload\n  const auto& provider = jwks_data_->getJwtProvider();\n  if (!provider.forward_payload_header().empty()) {\n    headers_->addCopy(Http::LowerCaseString(provider.forward_payload_header()),\n                      jwt_->payload_str_base64url_);\n  }\n\n  if (!provider.forward()) {\n    // TODO(potatop) remove JWT from queries.\n    // Remove JWT from headers.\n    curr_token_->removeJwt(*headers_);\n  }\n  if (set_payload_cb_ && !provider.payload_in_metadata().empty()) {\n    set_payload_cb_(provider.payload_in_metadata(), jwt_->payload_pb_);\n  }\n\n  doneWithStatus(Status::Ok);\n}\n\nvoid AuthenticatorImpl::doneWithStatus(const Status& status) {\n  ENVOY_LOG(debug, \"{}: JWT token verification completed with: {}\", name(),\n            ::google::jwt_verify::getStatusString(status));\n\n  // If a request has multiple tokens, all of them must be valid. Otherwise it may have\n  // following security hole: a request has a good token and a bad one, it will pass\n  // verification, forwarded to the backend, and the backend may mistakenly use the bad\n  // token as the good one that passed the verification.\n\n  // Unless allowing failed or missing, all tokens must be verified successfully.\n  if ((Status::Ok != status && !is_allow_failed_ && !is_allow_missing_) || tokens_.empty()) {\n    tokens_.clear();\n    if (is_allow_failed_) {\n      callback_(Status::Ok);\n    } else if (is_allow_missing_ && status == Status::JwtMissed) {\n      callback_(Status::Ok);\n    } else {\n      callback_(status);\n    }\n\n    callback_ = nullptr;\n    return;\n  }\n  startVerify();\n}\n\n} // namespace\n\nAuthenticatorPtr Authenticator::create(const CheckAudience* check_audience,\n                                       const absl::optional<std::string>& provider,\n                                       bool allow_failed, bool allow_missing, JwksCache& jwks_cache,\n                                       Upstream::ClusterManager& cluster_manager,\n                                       CreateJwksFetcherCb create_jwks_fetcher_cb,\n                                       TimeSource& time_source) {\n  return std::make_unique<AuthenticatorImpl>(check_audience, provider, allow_failed, allow_missing,\n                                             jwks_cache, cluster_manager, create_jwks_fetcher_cb,\n                                             time_source);\n}\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/authenticator.h",
    "content": "#pragma once\n\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/jwks_fetcher.h\"\n#include \"extensions/filters/http/jwt_authn/extractor.h\"\n#include \"extensions/filters/http/jwt_authn/jwks_cache.h\"\n\n#include \"jwt_verify_lib/check_audience.h\"\n#include \"jwt_verify_lib/status.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\nclass Authenticator;\nusing AuthenticatorPtr = std::unique_ptr<Authenticator>;\n\nusing AuthenticatorCallback = std::function<void(const ::google::jwt_verify::Status& status)>;\n\nusing SetPayloadCallback = std::function<void(const std::string&, const ProtobufWkt::Struct&)>;\n\n/**\n *  CreateJwksFetcherCb is a callback interface for creating a JwksFetcher instance.\n */\nusing CreateJwksFetcherCb = std::function<Common::JwksFetcherPtr(Upstream::ClusterManager&)>;\n\n/**\n *  Authenticator object to handle all JWT authentication flow.\n */\nclass Authenticator {\npublic:\n  virtual ~Authenticator() = default;\n\n  // Verify if headers satisfies the JWT requirements. Can be limited to single provider with\n  // extract_param.\n  virtual void verify(Http::HeaderMap& headers, Tracing::Span& parent_span,\n                      std::vector<JwtLocationConstPtr>&& tokens, SetPayloadCallback set_payload_cb,\n                      AuthenticatorCallback callback) PURE;\n\n  // Called when the object is about to be destroyed.\n  virtual void onDestroy() PURE;\n\n  // Authenticator factory function.\n  static AuthenticatorPtr create(const ::google::jwt_verify::CheckAudience* check_audience,\n                                 const absl::optional<std::string>& provider, bool allow_failed,\n                                 bool allow_missing, JwksCache& jwks_cache,\n                                 Upstream::ClusterManager& cluster_manager,\n                                 CreateJwksFetcherCb create_jwks_fetcher_cb,\n                                 TimeSource& time_source);\n};\n\n/**\n * Interface for authenticator factory.\n */\nclass AuthFactory {\npublic:\n  virtual ~AuthFactory() = default;\n\n  // Factory method for creating authenticator, and populate it with provider config.\n  virtual AuthenticatorPtr create(const ::google::jwt_verify::CheckAudience* check_audience,\n                                  const absl::optional<std::string>& provider, bool allow_failed,\n                                  bool allow_missing) const PURE;\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/extractor.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/extractor.h\"\n\n#include <memory>\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/match.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtProvider;\nusing Envoy::Http::LowerCaseString;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\n/**\n * Constant values\n */\nstruct JwtConstValueStruct {\n  // The header value prefix for Authorization.\n  const std::string BearerPrefix{\"Bearer \"};\n\n  // The default query parameter name to extract JWT token\n  const std::string AccessTokenParam{\"access_token\"};\n};\nusing JwtConstValues = ConstSingleton<JwtConstValueStruct>;\n\n// A base JwtLocation object to store token and specified_issuers.\nclass JwtLocationBase : public JwtLocation {\npublic:\n  JwtLocationBase(const std::string& token, const absl::node_hash_set<std::string>& issuers)\n      : token_(token), specified_issuers_(issuers) {}\n\n  // Get the token string\n  const std::string& token() const override { return token_; }\n\n  // Check if an issuer has specified the location.\n  bool isIssuerSpecified(const std::string& issuer) const override {\n    return specified_issuers_.find(issuer) != specified_issuers_.end();\n  }\n\nprivate:\n  // Extracted token.\n  const std::string token_;\n  // Stored issuers specified the location.\n  const absl::node_hash_set<std::string>& specified_issuers_;\n};\n\n// The JwtLocation for header extraction.\nclass JwtHeaderLocation : public JwtLocationBase {\npublic:\n  JwtHeaderLocation(const std::string& token, const absl::node_hash_set<std::string>& issuers,\n                    const LowerCaseString& header)\n      : JwtLocationBase(token, issuers), header_(header) {}\n\n  void removeJwt(Http::HeaderMap& headers) const override { headers.remove(header_); }\n\nprivate:\n  // the header name the JWT is extracted from.\n  const LowerCaseString& header_;\n};\n\n// The JwtLocation for param extraction.\nclass JwtParamLocation : public JwtLocationBase {\npublic:\n  JwtParamLocation(const std::string& token, const absl::node_hash_set<std::string>& issuers,\n                   const std::string&)\n      : JwtLocationBase(token, issuers) {}\n\n  void removeJwt(Http::HeaderMap&) const override {\n    // TODO(qiwzhang): remove JWT from parameter.\n  }\n};\n\n/**\n * The class implements Extractor interface\n *\n */\nclass ExtractorImpl : public Logger::Loggable<Logger::Id::jwt>, public Extractor {\npublic:\n  ExtractorImpl(const JwtProvider& provider);\n\n  ExtractorImpl(\n      const std::vector<const envoy::extensions::filters::http::jwt_authn::v3::JwtProvider*>&\n          providers);\n\n  std::vector<JwtLocationConstPtr> extract(const Http::RequestHeaderMap& headers) const override;\n\n  void sanitizePayloadHeaders(Http::HeaderMap& headers) const override;\n\nprivate:\n  // add a header config\n  void addHeaderConfig(const std::string& issuer, const Http::LowerCaseString& header_name,\n                       const std::string& value_prefix);\n  // add a query param config\n  void addQueryParamConfig(const std::string& issuer, const std::string& param);\n  // ctor helper for a jwt provider config\n  void addProvider(const JwtProvider& provider);\n\n  // @return what should be the 3-part base64url-encoded substring; see RFC-7519\n  absl::string_view extractJWT(absl::string_view value_str,\n                               absl::string_view::size_type after) const;\n\n  // HeaderMap value type to store prefix and issuers that specified this\n  // header.\n  struct HeaderLocationSpec {\n    HeaderLocationSpec(const Http::LowerCaseString& header, const std::string& value_prefix)\n        : header_(header), value_prefix_(value_prefix) {}\n    // The header name.\n    Http::LowerCaseString header_;\n    // The value prefix. e.g. for \"Bearer <token>\", the value_prefix is \"Bearer \".\n    std::string value_prefix_;\n    // Issuers that specified this header.\n    absl::node_hash_set<std::string> specified_issuers_;\n  };\n  using HeaderLocationSpecPtr = std::unique_ptr<HeaderLocationSpec>;\n  // The map of (header + value_prefix) to HeaderLocationSpecPtr\n  std::map<std::string, HeaderLocationSpecPtr> header_locations_;\n\n  // ParamMap value type to store issuers that specified this header.\n  struct ParamLocationSpec {\n    // Issuers that specified this param.\n    absl::node_hash_set<std::string> specified_issuers_;\n  };\n  // The map of a parameter key to set of issuers specified the parameter\n  std::map<std::string, ParamLocationSpec> param_locations_;\n\n  std::vector<LowerCaseString> forward_payload_headers_;\n};\n\nExtractorImpl::ExtractorImpl(const JwtProvider& provider) { addProvider(provider); }\n\nExtractorImpl::ExtractorImpl(const JwtProviderList& providers) {\n  for (const auto& provider : providers) {\n    ASSERT(provider);\n    addProvider(*provider);\n  }\n}\n\nvoid ExtractorImpl::addProvider(const JwtProvider& provider) {\n  for (const auto& header : provider.from_headers()) {\n    addHeaderConfig(provider.issuer(), LowerCaseString(header.name()), header.value_prefix());\n  }\n  for (const std::string& param : provider.from_params()) {\n    addQueryParamConfig(provider.issuer(), param);\n  }\n  // If not specified, use default locations.\n  if (provider.from_headers().empty() && provider.from_params().empty()) {\n    addHeaderConfig(provider.issuer(), Http::CustomHeaders::get().Authorization,\n                    JwtConstValues::get().BearerPrefix);\n    addQueryParamConfig(provider.issuer(), JwtConstValues::get().AccessTokenParam);\n  }\n  if (!provider.forward_payload_header().empty()) {\n    forward_payload_headers_.emplace_back(provider.forward_payload_header());\n  }\n}\n\nvoid ExtractorImpl::addHeaderConfig(const std::string& issuer, const LowerCaseString& header_name,\n                                    const std::string& value_prefix) {\n  ENVOY_LOG(debug, \"addHeaderConfig for issuer {} at {}\", issuer, header_name.get());\n  const std::string map_key = header_name.get() + value_prefix;\n  auto& header_location_spec = header_locations_[map_key];\n  if (!header_location_spec) {\n    header_location_spec = std::make_unique<HeaderLocationSpec>(header_name, value_prefix);\n  }\n  header_location_spec->specified_issuers_.insert(issuer);\n}\n\nvoid ExtractorImpl::addQueryParamConfig(const std::string& issuer, const std::string& param) {\n  auto& param_location_spec = param_locations_[param];\n  param_location_spec.specified_issuers_.insert(issuer);\n}\n\nstd::vector<JwtLocationConstPtr>\nExtractorImpl::extract(const Http::RequestHeaderMap& headers) const {\n  std::vector<JwtLocationConstPtr> tokens;\n\n  // Check header locations first\n  for (const auto& location_it : header_locations_) {\n    const auto& location_spec = location_it.second;\n    ENVOY_LOG(debug, \"extract {}\", location_it.first);\n    const auto result =\n        Http::HeaderUtility::getAllOfHeaderAsString(headers, location_spec->header_);\n    if (result.result().has_value()) {\n      auto value_str = result.result().value();\n      if (!location_spec->value_prefix_.empty()) {\n        const auto pos = value_str.find(location_spec->value_prefix_);\n        if (pos == absl::string_view::npos) {\n          // value_prefix not found anywhere in value_str, so skip\n          continue;\n        }\n        value_str = extractJWT(value_str, pos + location_spec->value_prefix_.length());\n      }\n      tokens.push_back(std::make_unique<const JwtHeaderLocation>(\n          std::string(value_str), location_spec->specified_issuers_, location_spec->header_));\n    }\n  }\n\n  // If no query parameter locations specified, or Path() is null, bail out\n  if (param_locations_.empty() || headers.Path() == nullptr) {\n    return tokens;\n  }\n\n  // Check query parameter locations.\n  const auto& params = Http::Utility::parseAndDecodeQueryString(headers.getPathValue());\n  for (const auto& location_it : param_locations_) {\n    const auto& param_key = location_it.first;\n    const auto& location_spec = location_it.second;\n    const auto& it = params.find(param_key);\n    if (it != params.end()) {\n      tokens.push_back(std::make_unique<const JwtParamLocation>(\n          it->second, location_spec.specified_issuers_, param_key));\n    }\n  }\n  return tokens;\n}\n\n// as specified in RFC-4648 § 5, plus dot (period, 0x2e), of which two are required in the JWT\nconstexpr absl::string_view ConstantBase64UrlEncodingCharsPlusDot =\n    \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.\";\n\n// Returns a token, not a URL: skips non-Base64Url-legal (or dot) characters, collects following\n// Base64Url+dot string until first non-Base64Url char.\n//\n// The input parameters:\n//    \"value_str\" - the header value string, perhaps \"Bearer string....\", and\n//    \"after\" - the offset into that string after which to begin looking for JWT-legal characters\n//\n// For backwards compatibility, if it finds no suitable string, it returns value_str as-is.\n//\n// It is forgiving w.r.t. dots/periods, as the exact syntax will be verified after extraction.\n//\n// See RFC-7519 § 2, RFC-7515 § 2, and RFC-4648 \"Base-N Encodings\" § 5.\nabsl::string_view ExtractorImpl::extractJWT(absl::string_view value_str,\n                                            absl::string_view::size_type after) const {\n  const auto starting = value_str.find_first_of(ConstantBase64UrlEncodingCharsPlusDot, after);\n  if (starting == value_str.npos) {\n    return value_str;\n  }\n  // There should be two dots (periods; 0x2e) inside the string, but we don't verify that here\n  auto ending = value_str.find_first_not_of(ConstantBase64UrlEncodingCharsPlusDot, starting);\n  if (ending == value_str.npos) { // Base64Url-encoded string occupies the rest of the line\n    return value_str.substr(starting);\n  }\n  return value_str.substr(starting, ending - starting);\n}\n\nvoid ExtractorImpl::sanitizePayloadHeaders(Http::HeaderMap& headers) const {\n  for (const auto& header : forward_payload_headers_) {\n    headers.remove(header);\n  }\n}\n\n} // namespace\n\nExtractorConstPtr Extractor::create(const JwtProvider& provider) {\n  return std::make_unique<ExtractorImpl>(provider);\n}\n\nExtractorConstPtr Extractor::create(const JwtProviderList& providers) {\n  return std::make_unique<ExtractorImpl>(providers);\n}\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/extractor.h",
    "content": "#pragma once\n\n#include <map>\n#include <string>\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\n/**\n * JwtLocation stores following token information:\n *\n * * extracted token string,\n * * the location where the JWT is extracted from,\n * * list of issuers specified the location.\n *\n */\nclass JwtLocation {\npublic:\n  virtual ~JwtLocation() = default;\n\n  // Get the token string\n  virtual const std::string& token() const PURE;\n\n  // Check if an issuer has specified the location.\n  virtual bool isIssuerSpecified(const std::string& issuer) const PURE;\n\n  // Remove the token from the headers\n  virtual void removeJwt(Http::HeaderMap& headers) const PURE;\n};\n\nusing JwtLocationConstPtr = std::unique_ptr<const JwtLocation>;\nusing JwtProviderList =\n    std::vector<const envoy::extensions::filters::http::jwt_authn::v3::JwtProvider*>;\n\nclass Extractor;\nusing ExtractorConstPtr = std::unique_ptr<const Extractor>;\n\n/**\n * Extracts JWT from locations specified in the config.\n *\n * Usage example:\n *\n *  auto extractor = Extractor::create(config);\n *  auto tokens = extractor->extract(headers);\n *  for (token : tokens) {\n *     Jwt jwt;\n *     if (jwt.parseFromString(token->token()) != Status::Ok) {\n *       // Handle JWT parsing failure.\n *     }\n *\n *     if (need_to_remove) {\n *        // remove the JWT\n *        token->removeJwt(headers);\n *     }\n *  }\n *\n */\nclass Extractor {\npublic:\n  virtual ~Extractor() = default;\n\n  /**\n   * Extract all JWT tokens from the headers. If set of header_keys or param_keys\n   * is not empty only those in the matching locations will be returned.\n   *\n   * @param headers is the HTTP request headers.\n   * @return list of extracted Jwt location info.\n   */\n  virtual std::vector<JwtLocationConstPtr>\n  extract(const Http::RequestHeaderMap& headers) const PURE;\n\n  /**\n   * Remove headers that configured to send JWT payloads.\n   *\n   * @param headers is the HTTP request headers.\n   */\n  virtual void sanitizePayloadHeaders(Http::HeaderMap& headers) const PURE;\n\n  /**\n   * Create an instance of Extractor for a given config.\n   * @param from_headers header location config.\n   * @param from_params query param location config.\n   * @return the extractor object.\n   */\n  static ExtractorConstPtr\n  create(const envoy::extensions::filters::http::jwt_authn::v3::JwtProvider& provider);\n\n  /**\n   * Create an instance of Extractor for a list of provider config.\n   * @param the list of JwtProvider configs.\n   * @return the extractor object.\n   */\n  static ExtractorConstPtr create(const JwtProviderList& providers);\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/filter.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/filter.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"jwt_verify_lib/status.h\"\n\nusing ::google::jwt_verify::Status;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\nnamespace {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    origin_handle(Http::CustomHeaders::get().Origin);\n\nbool isCorsPreflightRequest(const Http::RequestHeaderMap& headers) {\n  return headers.getMethodValue() == Http::Headers::get().MethodValues.Options &&\n         !headers.getInlineValue(origin_handle.handle()).empty() &&\n         !headers.getInlineValue(access_control_request_method_handle.handle()).empty();\n}\n\n} // namespace\n\nstruct RcDetailsValues {\n  // The jwt_authn filter rejected the request\n  const std::string JwtAuthnAccessDenied = \"jwt_authn_access_denied\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nFilter::Filter(FilterConfigSharedPtr config)\n    : stats_(config->stats()), config_(std::move(config)) {}\n\nvoid Filter::onDestroy() {\n  ENVOY_LOG(debug, \"Called Filter : {}\", __func__);\n  if (context_) {\n    context_->cancel();\n  }\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  ENVOY_LOG(debug, \"Called Filter : {}\", __func__);\n\n  state_ = Calling;\n  stopped_ = false;\n\n  if (config_->bypassCorsPreflightRequest() && isCorsPreflightRequest(headers)) {\n    // The CORS preflight doesn't include user credentials, bypass regardless of JWT requirements.\n    // See http://www.w3.org/TR/cors/#cross-origin-request-with-preflight.\n    ENVOY_LOG(debug, \"CORS preflight request bypassed regardless of JWT requirements\");\n    stats_.cors_preflight_bypassed_.inc();\n    onComplete(Status::Ok);\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // Verify the JWT token, onComplete() will be called when completed.\n  const auto* verifier =\n      config_->findVerifier(headers, *decoder_callbacks_->streamInfo().filterState());\n  if (!verifier) {\n    onComplete(Status::Ok);\n  } else {\n    context_ = Verifier::createContext(headers, decoder_callbacks_->activeSpan(), this);\n    verifier->verify(context_);\n  }\n\n  if (state_ == Complete) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  ENVOY_LOG(debug, \"Called Filter : {} Stop\", __func__);\n  stopped_ = true;\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nvoid Filter::setPayload(const ProtobufWkt::Struct& payload) {\n  decoder_callbacks_->streamInfo().setDynamicMetadata(HttpFilterNames::get().JwtAuthn, payload);\n}\n\nvoid Filter::onComplete(const Status& status) {\n  ENVOY_LOG(debug, \"Called Filter : check complete {}\",\n            ::google::jwt_verify::getStatusString(status));\n  // This stream has been reset, abort the callback.\n  if (state_ == Responded) {\n    return;\n  }\n  if (Status::Ok != status) {\n    stats_.denied_.inc();\n    state_ = Responded;\n    // verification failed\n    Http::Code code =\n        status == Status::JwtAudienceNotAllowed ? Http::Code::Forbidden : Http::Code::Unauthorized;\n    // return failure reason as message body\n    decoder_callbacks_->sendLocalReply(code, ::google::jwt_verify::getStatusString(status), nullptr,\n                                       absl::nullopt, RcDetails::get().JwtAuthnAccessDenied);\n    return;\n  }\n  stats_.allowed_.inc();\n  state_ = Complete;\n  if (stopped_) {\n    decoder_callbacks_->continueDecoding();\n  }\n}\n\nHttp::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool) {\n  ENVOY_LOG(debug, \"Called Filter : {}\", __func__);\n  if (state_ == Calling) {\n    return Http::FilterDataStatus::StopIterationAndWatermark;\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap&) {\n  ENVOY_LOG(debug, \"Called Filter : {}\", __func__);\n  if (state_ == Calling) {\n    return Http::FilterTrailersStatus::StopIteration;\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  ENVOY_LOG(debug, \"Called Filter : {}\", __func__);\n  decoder_callbacks_ = &callbacks;\n}\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/filter.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n\n#include \"extensions/filters/http/jwt_authn/filter_config.h\"\n#include \"extensions/filters/http/jwt_authn/matcher.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\n// The Envoy filter to process JWT auth.\nclass Filter : public Http::StreamDecoderFilter,\n               public Verifier::Callbacks,\n               public Logger::Loggable<Logger::Id::jwt> {\npublic:\n  Filter(FilterConfigSharedPtr config);\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\nprivate:\n  // Following two functions are for Verifier::Callbacks interface.\n  // Pass the payload as Struct.\n  void setPayload(const ProtobufWkt::Struct& payload) override;\n  // It will be called when its verify() call is completed.\n  void onComplete(const ::google::jwt_verify::Status& status) override;\n\n  // The callback function.\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_;\n  // The stats object.\n  JwtAuthnFilterStats& stats_;\n  // The state of the request\n  enum State { Init, Calling, Responded, Complete };\n  State state_ = Init;\n  // Mark if request has been stopped.\n  bool stopped_ = false;\n  // Filter config object.\n  FilterConfigSharedPtr config_;\n  // Verify context for current request.\n  ContextSharedPtr context_;\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/filter_config.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/filter_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\nvoid FilterConfigImpl::init() {\n  ENVOY_LOG(debug, \"Loaded JwtAuthConfig: {}\", proto_config_.DebugString());\n\n  // Note: `this` and `context` have a a lifetime of the listener.\n  // That may be shorter of the tls callback if the listener is torn shortly after it is created.\n  // We use a shared pointer to make sure this object outlives the tls callbacks.\n  auto shared_this = shared_from_this();\n  tls_->set([shared_this](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<ThreadLocalCache>(shared_this->proto_config_, shared_this->time_source_,\n                                              shared_this->api_);\n  });\n\n  for (const auto& rule : proto_config_.rules()) {\n    rule_pairs_.emplace_back(Matcher::create(rule),\n                             Verifier::create(rule.requires(), proto_config_.providers(), *this));\n  }\n\n  if (proto_config_.has_filter_state_rules()) {\n    filter_state_name_ = proto_config_.filter_state_rules().name();\n    for (const auto& it : proto_config_.filter_state_rules().requires()) {\n      filter_state_verifiers_.emplace(\n          it.first, Verifier::create(it.second, proto_config_.providers(), *this));\n    }\n  }\n}\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/filter_config.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/router/string_accessor.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"extensions/filters/http/jwt_authn/matcher.h\"\n#include \"extensions/filters/http/jwt_authn/verifier.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\n/**\n * Making cache as a thread local object, its read/write operations don't need to be protected.\n * Now it only has jwks_cache, but in the future, it will have token cache: to cache the tokens\n * with their verification results.\n */\nclass ThreadLocalCache : public ThreadLocal::ThreadLocalObject {\npublic:\n  // Load the config from envoy config.\n  ThreadLocalCache(const envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication& config,\n                   TimeSource& time_source, Api::Api& api) {\n    jwks_cache_ = JwksCache::create(config, time_source, api);\n  }\n\n  // Get the JwksCache object.\n  JwksCache& getJwksCache() { return *jwks_cache_; }\n\nprivate:\n  // The JwksCache object.\n  JwksCachePtr jwks_cache_;\n};\n\n/**\n * All stats for the Jwt Authn filter. @see stats_macros.h\n */\n#define ALL_JWT_AUTHN_FILTER_STATS(COUNTER)                                                        \\\n  COUNTER(allowed)                                                                                 \\\n  COUNTER(cors_preflight_bypassed)                                                                 \\\n  COUNTER(denied)\n\n/**\n * Wrapper struct for jwt_authn filter stats. @see stats_macros.h\n */\nstruct JwtAuthnFilterStats {\n  ALL_JWT_AUTHN_FILTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * The filter config interface. It is an interface so that we can mock it in tests.\n */\nclass FilterConfig {\npublic:\n  virtual ~FilterConfig() = default;\n\n  virtual JwtAuthnFilterStats& stats() PURE;\n\n  virtual bool bypassCorsPreflightRequest() const PURE;\n\n  // Finds the matcher that matched the header\n  virtual const Verifier* findVerifier(const Http::RequestHeaderMap& headers,\n                                       const StreamInfo::FilterState& filter_state) const PURE;\n};\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\n/**\n * The filter config object to hold config and relevant objects.\n */\nclass FilterConfigImpl : public Logger::Loggable<Logger::Id::jwt>,\n                         public FilterConfig,\n                         public AuthFactory,\n                         public std::enable_shared_from_this<FilterConfigImpl> {\npublic:\n  ~FilterConfigImpl() override = default;\n\n  // Finds the matcher that matched the header\n  static std::shared_ptr<FilterConfigImpl>\n  create(envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication proto_config,\n         const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n    // We can't use make_shared here because the constructor of this class is private.\n    std::shared_ptr<FilterConfigImpl> ptr(\n        new FilterConfigImpl(proto_config, stats_prefix, context));\n    ptr->init();\n    return ptr;\n  }\n\n  // Get per-thread cache object.\n  ThreadLocalCache& getCache() const { return tls_->getTyped<ThreadLocalCache>(); }\n\n  Upstream::ClusterManager& cm() const { return cm_; }\n  TimeSource& timeSource() const { return time_source_; }\n\n  // FilterConfig\n\n  JwtAuthnFilterStats& stats() override { return stats_; }\n\n  bool bypassCorsPreflightRequest() const override { return proto_config_.bypass_cors_preflight(); }\n\n  const Verifier* findVerifier(const Http::RequestHeaderMap& headers,\n                               const StreamInfo::FilterState& filter_state) const override {\n    for (const auto& pair : rule_pairs_) {\n      if (pair.matcher_->matches(headers)) {\n        return pair.verifier_.get();\n      }\n    }\n    if (!filter_state_name_.empty() && !filter_state_verifiers_.empty() &&\n        filter_state.hasData<Router::StringAccessor>(filter_state_name_)) {\n      const auto& state = filter_state.getDataReadOnly<Router::StringAccessor>(filter_state_name_);\n      ENVOY_LOG(debug, \"use filter state value {} to find verifier.\", state.asString());\n      const auto& it = filter_state_verifiers_.find(state.asString());\n      if (it != filter_state_verifiers_.end()) {\n        return it->second.get();\n      }\n    }\n    return nullptr;\n  }\n\n  // methods for AuthFactory interface. Factory method to help create authenticators.\n  AuthenticatorPtr create(const ::google::jwt_verify::CheckAudience* check_audience,\n                          const absl::optional<std::string>& provider, bool allow_failed,\n                          bool allow_missing) const override {\n    return Authenticator::create(check_audience, provider, allow_failed, allow_missing,\n                                 getCache().getJwksCache(), cm(), Common::JwksFetcher::create,\n                                 timeSource());\n  }\n\nprivate:\n  FilterConfigImpl(envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication proto_config,\n                   const std::string& stats_prefix, Server::Configuration::FactoryContext& context)\n      : proto_config_(std::move(proto_config)),\n        stats_(generateStats(stats_prefix, context.scope())),\n        tls_(context.threadLocal().allocateSlot()), cm_(context.clusterManager()),\n        time_source_(context.dispatcher().timeSource()), api_(context.api()) {}\n\n  void init();\n\n  JwtAuthnFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    const std::string final_prefix = prefix + \"jwt_authn.\";\n    return {ALL_JWT_AUTHN_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n  }\n\n  struct MatcherVerifierPair {\n    MatcherVerifierPair(MatcherConstPtr matcher, VerifierConstPtr verifier)\n        : matcher_(std::move(matcher)), verifier_(std::move(verifier)) {}\n    MatcherConstPtr matcher_;\n    VerifierConstPtr verifier_;\n  };\n\n  // The proto config.\n  envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication proto_config_;\n  // The stats for the filter.\n  JwtAuthnFilterStats stats_;\n  // Thread local slot to store per-thread auth store\n  ThreadLocal::SlotPtr tls_;\n  // the cluster manager object.\n  Upstream::ClusterManager& cm_;\n  // The list of rule matchers.\n  std::vector<MatcherVerifierPair> rule_pairs_;\n  // The filter state name to lookup filter_state_rules.\n  std::string filter_state_name_;\n  // The filter state verifier map from filter_state_rules.\n  absl::flat_hash_map<std::string, VerifierConstPtr> filter_state_verifiers_;\n  TimeSource& time_source_;\n  Api::Api& api_;\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/filter_factory.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/filter_factory.h\"\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/datasource.h\"\n\n#include \"extensions/filters/http/jwt_authn/filter.h\"\n\n#include \"jwt_verify_lib/jwks.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing ::google::jwt_verify::Jwks;\nusing ::google::jwt_verify::Status;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\n/**\n * Validate inline jwks, make sure they are the valid\n */\nvoid validateJwtConfig(const JwtAuthentication& proto_config, Api::Api& api) {\n  for (const auto& it : proto_config.providers()) {\n    const auto& provider = it.second;\n    const auto inline_jwks = Config::DataSource::read(provider.local_jwks(), true, api);\n    if (!inline_jwks.empty()) {\n      auto jwks_obj = Jwks::createFrom(inline_jwks, Jwks::JWKS);\n      if (jwks_obj->getStatus() != Status::Ok) {\n        throw EnvoyException(fmt::format(\n            \"Issuer '{}' in jwt_authn config has invalid local jwks: {}\", provider.issuer(),\n            ::google::jwt_verify::getStatusString(jwks_obj->getStatus())));\n      }\n    }\n  }\n}\n\n} // namespace\n\nHttp::FilterFactoryCb\nFilterFactory::createFilterFactoryFromProtoTyped(const JwtAuthentication& proto_config,\n                                                 const std::string& prefix,\n                                                 Server::Configuration::FactoryContext& context) {\n  validateJwtConfig(proto_config, context.api());\n  auto filter_config = FilterConfigImpl::create(proto_config, prefix, context);\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<Filter>(filter_config));\n  };\n}\n\n/**\n * Static registration for this jwt_authn filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(FilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/filter_factory.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.validate.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\n/**\n * Config registration for jwt_authn filter.\n */\nclass FilterFactory : public Common::FactoryBase<\n                          envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication> {\npublic:\n  FilterFactory() : FactoryBase(HttpFilterNames::get().JwtAuthn) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/jwks_cache.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/jwks_cache.h\"\n\n#include <chrono>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/datasource.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"jwt_verify_lib/check_audience.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtProvider;\nusing ::google::jwt_verify::Jwks;\nusing ::google::jwt_verify::Status;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\n// Default cache expiration time in 5 minutes.\nconstexpr int PubkeyCacheExpirationSec = 600;\n\nclass JwksDataImpl : public JwksCache::JwksData, public Logger::Loggable<Logger::Id::jwt> {\npublic:\n  JwksDataImpl(const JwtProvider& jwt_provider, TimeSource& time_source, Api::Api& api)\n      : jwt_provider_(jwt_provider), time_source_(time_source) {\n    std::vector<std::string> audiences;\n    for (const auto& aud : jwt_provider_.audiences()) {\n      audiences.push_back(aud);\n    }\n    audiences_ = std::make_unique<::google::jwt_verify::CheckAudience>(audiences);\n\n    const auto inline_jwks = Config::DataSource::read(jwt_provider_.local_jwks(), true, api);\n    if (!inline_jwks.empty()) {\n      auto ptr = setKey(\n          ::google::jwt_verify::Jwks::createFrom(inline_jwks, ::google::jwt_verify::Jwks::JWKS),\n          std::chrono::steady_clock::time_point::max());\n      if (ptr->getStatus() != Status::Ok) {\n        ENVOY_LOG(warn, \"Invalid inline jwks for issuer: {}, jwks: {}\", jwt_provider_.issuer(),\n                  inline_jwks);\n        jwks_obj_.reset(nullptr);\n      }\n    }\n  }\n\n  const JwtProvider& getJwtProvider() const override { return jwt_provider_; }\n\n  bool areAudiencesAllowed(const std::vector<std::string>& jwt_audiences) const override {\n    return audiences_->areAudiencesAllowed(jwt_audiences);\n  }\n\n  const Jwks* getJwksObj() const override { return jwks_obj_.get(); }\n\n  bool isExpired() const override { return time_source_.monotonicTime() >= expiration_time_; }\n\n  const ::google::jwt_verify::Jwks* setRemoteJwks(::google::jwt_verify::JwksPtr&& jwks) override {\n    return setKey(std::move(jwks), getRemoteJwksExpirationTime());\n  }\n\nprivate:\n  // Get the expiration time for a remote Jwks\n  std::chrono::steady_clock::time_point getRemoteJwksExpirationTime() const {\n    auto expire = time_source_.monotonicTime();\n    if (jwt_provider_.has_remote_jwks() && jwt_provider_.remote_jwks().has_cache_duration()) {\n      expire += std::chrono::milliseconds(\n          DurationUtil::durationToMilliseconds(jwt_provider_.remote_jwks().cache_duration()));\n    } else {\n      expire += std::chrono::seconds(PubkeyCacheExpirationSec);\n    }\n    return expire;\n  }\n\n  const ::google::jwt_verify::Jwks* setKey(::google::jwt_verify::JwksPtr&& jwks,\n                                           MonotonicTime expire) {\n    jwks_obj_ = std::move(jwks);\n    expiration_time_ = expire;\n    return jwks_obj_.get();\n  }\n\n  // The jwt provider config.\n  const JwtProvider& jwt_provider_;\n  // Check audience object\n  ::google::jwt_verify::CheckAudiencePtr audiences_;\n  // The generated jwks object.\n  ::google::jwt_verify::JwksPtr jwks_obj_;\n  TimeSource& time_source_;\n  // The pubkey expiration time.\n  MonotonicTime expiration_time_;\n};\n\nclass JwksCacheImpl : public JwksCache {\npublic:\n  // Load the config from envoy config.\n  JwksCacheImpl(const JwtAuthentication& config, TimeSource& time_source, Api::Api& api) {\n    for (const auto& it : config.providers()) {\n      const auto& provider = it.second;\n      jwks_data_map_.emplace(it.first, JwksDataImpl(provider, time_source, api));\n      if (issuer_ptr_map_.find(provider.issuer()) == issuer_ptr_map_.end()) {\n        issuer_ptr_map_.emplace(provider.issuer(), findByProvider(it.first));\n      }\n    }\n  }\n\n  JwksData* findByIssuer(const std::string& issuer) override {\n    const auto it = issuer_ptr_map_.find(issuer);\n    if (it == issuer_ptr_map_.end()) {\n      return nullptr;\n    }\n    return it->second;\n  }\n\n  JwksData* findByProvider(const std::string& provider) final {\n    const auto it = jwks_data_map_.find(provider);\n    if (it == jwks_data_map_.end()) {\n      return nullptr;\n    }\n    return &it->second;\n  }\n\nprivate:\n  // The Jwks data map indexed by provider.\n  absl::node_hash_map<std::string, JwksDataImpl> jwks_data_map_;\n  // The Jwks data pointer map indexed by issuer.\n  absl::node_hash_map<std::string, JwksData*> issuer_ptr_map_;\n};\n\n} // namespace\n\nJwksCachePtr\nJwksCache::create(const envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication& config,\n                  TimeSource& time_source, Api::Api& api) {\n  return JwksCachePtr(new JwksCacheImpl(config, time_source, api));\n}\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/jwks_cache.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"jwt_verify_lib/jwks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\nclass JwksCache;\nusing JwksCachePtr = std::unique_ptr<JwksCache>;\n\n/**\n * Interface to access all configured Jwt rules and their cached Jwks objects.\n * It only caches Jwks specified in the config.\n * Its usage:\n *     auto jwks_cache = JwksCache::create(Config);\n *\n *     // for a given jwt\n *     auto jwks_data = jwks_cache->findByIssuer(jwt->getIssuer());\n *     if (!jwks_data->areAudiencesAllowed(jwt->getAudiences())) reject;\n *\n *     if (jwks_data->getJwksObj() == nullptr || jwks_data->isExpired()) {\n *        // Fetch remote Jwks.\n *        jwks_data->setRemoteJwks(remote_jwks_str);\n *     }\n *\n *     verifyJwt(jwks_data->getJwksObj(), jwt);\n */\n\nclass JwksCache {\npublic:\n  virtual ~JwksCache() = default;\n\n  // Interface to access a Jwks config rule and its cached Jwks object.\n  class JwksData {\n  public:\n    virtual ~JwksData() = default;\n\n    // Check if a list of audiences are allowed.\n    virtual bool areAudiencesAllowed(const std::vector<std::string>& audiences) const PURE;\n\n    // Get the cached config: JWT rule.\n    virtual const envoy::extensions::filters::http::jwt_authn::v3::JwtProvider&\n    getJwtProvider() const PURE;\n\n    // Get the Jwks object.\n    virtual const ::google::jwt_verify::Jwks* getJwksObj() const PURE;\n\n    // Return true if jwks object is expired.\n    virtual bool isExpired() const PURE;\n\n    // Set a remote Jwks.\n    virtual const ::google::jwt_verify::Jwks*\n    setRemoteJwks(::google::jwt_verify::JwksPtr&& jwks) PURE;\n  };\n\n  // Lookup issuer cache map. The cache only stores Jwks specified in the config.\n  virtual JwksData* findByIssuer(const std::string& issuer) PURE;\n\n  virtual JwksData* findByProvider(const std::string& provider) PURE;\n\n  // Factory function to create an instance.\n  static JwksCachePtr\n  create(const envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication& config,\n         TimeSource& time_source, Api::Api& api);\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/matcher.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/matcher.h\"\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/matchers.h\"\n#include \"common/common/regex.h\"\n#include \"common/router/config_impl.h\"\n\n#include \"absl/strings/match.h\"\n\nusing envoy::config::route::v3::RouteMatch;\nusing envoy::extensions::filters::http::jwt_authn::v3::RequirementRule;\nusing Envoy::Router::ConfigUtility;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\n/**\n * Perform a match against any HTTP header or pseudo-header.\n */\nclass BaseMatcherImpl : public Matcher, public Logger::Loggable<Logger::Id::jwt> {\npublic:\n  BaseMatcherImpl(const RequirementRule& rule)\n      : case_sensitive_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(rule.match(), case_sensitive, true)),\n        config_headers_(Http::HeaderUtility::buildHeaderDataVector(rule.match().headers())) {\n    for (const auto& query_parameter : rule.match().query_parameters()) {\n      config_query_parameters_.push_back(\n          std::make_unique<Router::ConfigUtility::QueryParameterMatcher>(query_parameter));\n    }\n  }\n\n  // Check match for HeaderMatcher and QueryParameterMatcher\n  bool matchRoute(const Http::RequestHeaderMap& headers) const {\n    bool matches = true;\n    // TODO(potatop): matching on RouteMatch runtime is not implemented.\n\n    matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_);\n    if (!config_query_parameters_.empty()) {\n      Http::Utility::QueryParams query_parameters =\n          Http::Utility::parseQueryString(headers.getPathValue());\n      matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_);\n    }\n    return matches;\n  }\n\nprotected:\n  const bool case_sensitive_;\n\nprivate:\n  std::vector<Http::HeaderUtility::HeaderDataPtr> config_headers_;\n  std::vector<Router::ConfigUtility::QueryParameterMatcherPtr> config_query_parameters_;\n};\n\n/**\n * Perform a match against any path with prefix rule.\n */\nclass PrefixMatcherImpl : public BaseMatcherImpl {\npublic:\n  PrefixMatcherImpl(const RequirementRule& rule)\n      : BaseMatcherImpl(rule), prefix_(rule.match().prefix()),\n        path_matcher_(Matchers::PathMatcher::createPrefix(prefix_, !case_sensitive_)) {}\n\n  bool matches(const Http::RequestHeaderMap& headers) const override {\n    if (BaseMatcherImpl::matchRoute(headers) && path_matcher_->match(headers.getPathValue())) {\n      ENVOY_LOG(debug, \"Prefix requirement '{}' matched.\", prefix_);\n      return true;\n    }\n    return false;\n  }\n\nprivate:\n  // prefix string\n  const std::string prefix_;\n  const Matchers::PathMatcherConstSharedPtr path_matcher_;\n};\n\n/**\n * Perform a match against any path with a specific path rule.\n */\nclass PathMatcherImpl : public BaseMatcherImpl {\npublic:\n  PathMatcherImpl(const RequirementRule& rule)\n      : BaseMatcherImpl(rule), path_(rule.match().path()),\n        path_matcher_(Matchers::PathMatcher::createExact(path_, !case_sensitive_)) {}\n\n  bool matches(const Http::RequestHeaderMap& headers) const override {\n    if (BaseMatcherImpl::matchRoute(headers) && path_matcher_->match(headers.getPathValue())) {\n      ENVOY_LOG(debug, \"Path requirement '{}' matched.\", path_);\n      return true;\n    }\n    return false;\n  }\n\nprivate:\n  // path string.\n  const std::string path_;\n  const Matchers::PathMatcherConstSharedPtr path_matcher_;\n};\n\n/**\n * Perform a match against any path with a regex rule.\n * TODO(mattklein123): This code needs dedup with RegexRouteEntryImpl.\n */\nclass RegexMatcherImpl : public BaseMatcherImpl {\npublic:\n  RegexMatcherImpl(const RequirementRule& rule) : BaseMatcherImpl(rule) {\n    // TODO(yangminzhu): Use PathMatcher once hidden_envoy_deprecated_regex is removed.\n    if (rule.match().path_specifier_case() ==\n        envoy::config::route::v3::RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex) {\n      regex_ = Regex::Utility::parseStdRegexAsCompiledMatcher(\n          rule.match().hidden_envoy_deprecated_regex());\n      regex_str_ = rule.match().hidden_envoy_deprecated_regex();\n    } else {\n      ASSERT(rule.match().path_specifier_case() ==\n             envoy::config::route::v3::RouteMatch::PathSpecifierCase::kSafeRegex);\n      regex_ = Regex::Utility::parseRegex(rule.match().safe_regex());\n      regex_str_ = rule.match().safe_regex().regex();\n    }\n  }\n\n  bool matches(const Http::RequestHeaderMap& headers) const override {\n    if (BaseMatcherImpl::matchRoute(headers)) {\n      const Http::HeaderString& path = headers.Path()->value();\n      const absl::string_view query_string = Http::Utility::findQueryStringStart(path);\n      absl::string_view path_view = path.getStringView();\n      path_view.remove_suffix(query_string.length());\n      if (regex_->match(path_view)) {\n        ENVOY_LOG(debug, \"Regex requirement '{}' matched.\", regex_str_);\n        return true;\n      }\n    }\n    return false;\n  }\n\nprivate:\n  Regex::CompiledMatcherPtr regex_;\n  // raw regex string, for logging.\n  std::string regex_str_;\n};\n\n/**\n * Perform a match against an HTTP CONNECT request.\n */\nclass ConnectMatcherImpl : public BaseMatcherImpl {\npublic:\n  ConnectMatcherImpl(const RequirementRule& rule) : BaseMatcherImpl(rule) {}\n\n  bool matches(const Http::RequestHeaderMap& headers) const override {\n    if (Http::HeaderUtility::isConnect(headers) && BaseMatcherImpl::matchRoute(headers)) {\n      ENVOY_LOG(debug, \"CONNECT requirement matched.\");\n      return true;\n    }\n\n    return false;\n  }\n};\n} // namespace\n\nMatcherConstPtr Matcher::create(const RequirementRule& rule) {\n  switch (rule.match().path_specifier_case()) {\n  case RouteMatch::PathSpecifierCase::kPrefix:\n    return std::make_unique<PrefixMatcherImpl>(rule);\n  case RouteMatch::PathSpecifierCase::kPath:\n    return std::make_unique<PathMatcherImpl>(rule);\n  case RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex:\n  case RouteMatch::PathSpecifierCase::kSafeRegex:\n    return std::make_unique<RegexMatcherImpl>(rule);\n  case RouteMatch::PathSpecifierCase::kConnectMatcher:\n    return std::make_unique<ConnectMatcherImpl>(rule);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/matcher.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\nclass Matcher;\nusing MatcherConstPtr = std::unique_ptr<const Matcher>;\n\n/**\n * Supports matching a HTTP requests with JWT requirements.\n */\nclass Matcher {\npublic:\n  virtual ~Matcher() = default;\n\n  /**\n   * Returns if a HTTP request matches with the rules of the matcher.\n   *\n   * @param headers    the request headers used to match against. An empty map should be used if\n   *                   there are none headers available.\n   * @return  true if request is a match, false otherwise.\n   */\n  virtual bool matches(const Http::RequestHeaderMap& headers) const PURE;\n\n  /**\n   * Factory method to create a shared instance of a matcher based on the rule defined.\n   *\n   * @param rule  the proto rule match message.\n   * @return the matcher instance.\n   */\n  static MatcherConstPtr\n  create(const envoy::extensions::filters::http::jwt_authn::v3::RequirementRule& rule);\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/verifier.cc",
    "content": "#include \"extensions/filters/http/jwt_authn/verifier.h\"\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"jwt_verify_lib/check_audience.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtProvider;\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtRequirement;\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtRequirementAndList;\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtRequirementOrList;\nusing ::google::jwt_verify::CheckAudience;\nusing ::google::jwt_verify::Status;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\n/**\n * Struct to keep track of verifier completed and responded state for a request.\n */\nstruct CompletionState {\n  // if verifier node has responded to a request or not.\n  bool is_completed_{false};\n  // number of completed inner verifier for an any/all verifier.\n  std::size_t number_completed_children_{0};\n  // A valid error for a RequireAny\n  Status any_valid_error_{Status::Ok};\n};\n\nclass ContextImpl : public Verifier::Context {\npublic:\n  ContextImpl(Http::RequestHeaderMap& headers, Tracing::Span& parent_span,\n              Verifier::Callbacks* callback)\n      : headers_(headers), parent_span_(parent_span), callback_(callback) {}\n\n  Http::RequestHeaderMap& headers() const override { return headers_; }\n\n  Tracing::Span& parentSpan() const override { return parent_span_; }\n\n  Verifier::Callbacks* callback() const override { return callback_; }\n\n  void cancel() override {\n    for (const auto& it : auths_) {\n      it->onDestroy();\n    }\n  }\n\n  // Get Response data which can be used to check if a verifier node has responded or not.\n  CompletionState& getCompletionState(const Verifier* verifier) {\n    return completion_states_[verifier];\n  }\n\n  // Stores an authenticator object for this request.\n  void storeAuth(AuthenticatorPtr&& auth) { auths_.emplace_back(std::move(auth)); }\n\n  // Add a pair of (name, payload), called by Authenticator\n  void addPayload(const std::string& name, const ProtobufWkt::Struct& payload) {\n    *(*payload_.mutable_fields())[name].mutable_struct_value() = payload;\n  }\n\n  void setPayload() {\n    if (!payload_.fields().empty()) {\n      callback_->setPayload(payload_);\n    }\n  }\n\nprivate:\n  Http::RequestHeaderMap& headers_;\n  Tracing::Span& parent_span_;\n  Verifier::Callbacks* callback_;\n  absl::node_hash_map<const Verifier*, CompletionState> completion_states_;\n  std::vector<AuthenticatorPtr> auths_;\n  ProtobufWkt::Struct payload_;\n};\n\n// base verifier for provider_name, provider_and_audiences, and allow_missing_or_failed.\nclass BaseVerifierImpl : public Logger::Loggable<Logger::Id::jwt>, public Verifier {\npublic:\n  BaseVerifierImpl(const BaseVerifierImpl* parent) : parent_(parent) {}\n\n  void completeWithStatus(Status status, ContextImpl& context) const {\n    if (parent_ != nullptr) {\n      return parent_->onComplete(status, context);\n    }\n\n    if (Status::Ok == status) {\n      context.setPayload();\n    }\n    context.callback()->onComplete(status);\n    context.cancel();\n  }\n\n  // Check if next verifier should be notified of status, or if no next verifier exists signal\n  // callback in context.\n  virtual void onComplete(const Status& status, ContextImpl& context) const {\n    auto& completion_state = context.getCompletionState(this);\n    if (!completion_state.is_completed_) {\n      completion_state.is_completed_ = true;\n      completeWithStatus(status, context);\n    }\n  }\n\nprotected:\n  // The parent group verifier.\n  const BaseVerifierImpl* const parent_;\n};\n\n// Provider specific verifier\nclass ProviderVerifierImpl : public BaseVerifierImpl {\npublic:\n  ProviderVerifierImpl(const std::string& provider_name, const AuthFactory& factory,\n                       const JwtProvider& provider, const BaseVerifierImpl* parent)\n      : BaseVerifierImpl(parent), auth_factory_(factory), extractor_(Extractor::create(provider)),\n        provider_name_(absl::make_optional<std::string>(provider_name)) {}\n\n  void verify(ContextSharedPtr context) const override {\n    auto& ctximpl = static_cast<ContextImpl&>(*context);\n    auto auth = auth_factory_.create(getAudienceChecker(), provider_name_, false, false);\n    extractor_->sanitizePayloadHeaders(ctximpl.headers());\n    auth->verify(\n        ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()),\n        [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) {\n          ctximpl.addPayload(name, payload);\n        },\n        [this, context](const Status& status) {\n          onComplete(status, static_cast<ContextImpl&>(*context));\n        });\n    if (!ctximpl.getCompletionState(this).is_completed_) {\n      ctximpl.storeAuth(std::move(auth));\n    } else {\n      auth->onDestroy();\n    }\n  }\n\nprotected:\n  virtual const CheckAudience* getAudienceChecker() const { return nullptr; }\n\nprivate:\n  const AuthFactory& auth_factory_;\n  const ExtractorConstPtr extractor_;\n  const absl::optional<std::string> provider_name_;\n};\n\nclass ProviderAndAudienceVerifierImpl : public ProviderVerifierImpl {\npublic:\n  ProviderAndAudienceVerifierImpl(const std::string& provider_name, const AuthFactory& factory,\n                                  const JwtProvider& provider, const BaseVerifierImpl* parent,\n                                  const std::vector<std::string>& config_audiences)\n      : ProviderVerifierImpl(provider_name, factory, provider, parent),\n        check_audience_(std::make_unique<CheckAudience>(config_audiences)) {}\n\nprivate:\n  const CheckAudience* getAudienceChecker() const override { return check_audience_.get(); }\n\n  // Check audience object\n  ::google::jwt_verify::CheckAudiencePtr check_audience_;\n};\n\n// Allow missing or failed verifier\nclass AllowFailedVerifierImpl : public BaseVerifierImpl {\npublic:\n  AllowFailedVerifierImpl(const AuthFactory& factory, const JwtProviderList& providers,\n                          const BaseVerifierImpl* parent)\n      : BaseVerifierImpl(parent), auth_factory_(factory), extractor_(Extractor::create(providers)) {\n  }\n\n  void verify(ContextSharedPtr context) const override {\n    auto& ctximpl = static_cast<ContextImpl&>(*context);\n    auto auth = auth_factory_.create(nullptr, absl::nullopt, true, true);\n    extractor_->sanitizePayloadHeaders(ctximpl.headers());\n    auth->verify(\n        ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()),\n        [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) {\n          ctximpl.addPayload(name, payload);\n        },\n        [this, context](const Status& status) {\n          onComplete(status, static_cast<ContextImpl&>(*context));\n        });\n    if (!ctximpl.getCompletionState(this).is_completed_) {\n      ctximpl.storeAuth(std::move(auth));\n    } else {\n      auth->onDestroy();\n    }\n  }\n\nprivate:\n  const AuthFactory& auth_factory_;\n  // const Extractor& extractor_;\n  const ExtractorConstPtr extractor_;\n};\n\nclass AllowMissingVerifierImpl : public BaseVerifierImpl {\npublic:\n  AllowMissingVerifierImpl(const AuthFactory& factory, const JwtProviderList& providers,\n                           const BaseVerifierImpl* parent)\n      : BaseVerifierImpl(parent), auth_factory_(factory), extractor_(Extractor::create(providers)) {\n  }\n\n  void verify(ContextSharedPtr context) const override {\n    ENVOY_LOG(debug, \"Called AllowMissingVerifierImpl.verify : {}\", __func__);\n\n    auto& ctximpl = static_cast<ContextImpl&>(*context);\n    auto auth = auth_factory_.create(nullptr, absl::nullopt, false /* allow failed */,\n                                     true /* allow missing */);\n    extractor_->sanitizePayloadHeaders(ctximpl.headers());\n    auth->verify(\n        ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()),\n        [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) {\n          ctximpl.addPayload(name, payload);\n        },\n        [this, context](const Status& status) {\n          onComplete(status, static_cast<ContextImpl&>(*context));\n        });\n    if (!ctximpl.getCompletionState(this).is_completed_) {\n      ctximpl.storeAuth(std::move(auth));\n    } else {\n      auth->onDestroy();\n    }\n  }\n\nprivate:\n  const AuthFactory& auth_factory_;\n  const ExtractorConstPtr extractor_;\n};\n\nVerifierConstPtr innerCreate(const JwtRequirement& requirement,\n                             const Protobuf::Map<std::string, JwtProvider>& providers,\n                             const AuthFactory& factory,\n                             const std::vector<std::string> parent_provider_names,\n                             const BaseVerifierImpl* parent);\n\n// Base verifier for requires all or any.\nclass BaseGroupVerifierImpl : public BaseVerifierImpl {\npublic:\n  BaseGroupVerifierImpl(const BaseVerifierImpl* parent) : BaseVerifierImpl(parent) {}\n\n  void verify(ContextSharedPtr context) const override {\n    auto& ctximpl = static_cast<ContextImpl&>(*context);\n    for (const auto& it : verifiers_) {\n      if (ctximpl.getCompletionState(this).is_completed_) {\n        return;\n      }\n      it->verify(context);\n    }\n  }\n\nprotected:\n  // The list of requirement verifiers\n  std::vector<VerifierConstPtr> verifiers_;\n};\n\n// Requires any verifier.\nclass AnyVerifierImpl : public BaseGroupVerifierImpl {\npublic:\n  AnyVerifierImpl(const JwtRequirementOrList& or_list, const AuthFactory& factory,\n                  const Protobuf::Map<std::string, JwtProvider>& providers,\n                  const BaseVerifierImpl* parent)\n      : BaseGroupVerifierImpl(parent) {\n    const JwtRequirement* by_pass_type_requirement = nullptr;\n    std::vector<std::string> used_providers;\n    for (const auto& it : or_list.requirements()) {\n      bool is_regular_requirement = true;\n      switch (it.requires_type_case()) {\n      case JwtRequirement::RequiresTypeCase::kProviderName:\n        used_providers.emplace_back(it.provider_name());\n        break;\n      case JwtRequirement::RequiresTypeCase::kProviderAndAudiences:\n        used_providers.emplace_back(it.provider_and_audiences().provider_name());\n        break;\n      case JwtRequirement::RequiresTypeCase::kAllowMissingOrFailed:\n      case JwtRequirement::RequiresTypeCase::kAllowMissing:\n        is_regular_requirement = false;\n        if (by_pass_type_requirement == nullptr ||\n            by_pass_type_requirement->requires_type_case() ==\n                JwtRequirement::RequiresTypeCase::kAllowMissing) {\n          // We need to keep only one by_pass_type_requirement. If both\n          // kAllowMissing and kAllowMissingOrFailed are set, use\n          // kAllowMissingOrFailed.\n          by_pass_type_requirement = &it;\n        }\n      default:\n        break;\n      }\n      if (is_regular_requirement) {\n        verifiers_.emplace_back(\n            innerCreate(it, providers, factory, std::vector<std::string>{}, this));\n      }\n    }\n    if (by_pass_type_requirement) {\n      verifiers_.emplace_back(\n          innerCreate(*by_pass_type_requirement, providers, factory, used_providers, this));\n    }\n  }\n\n  void onComplete(const Status& status, ContextImpl& context) const override {\n    auto& completion_state = context.getCompletionState(this);\n    if (completion_state.is_completed_) {\n      return;\n    }\n    // For RequireAny: usually it returns the error from the last provider.\n    // But if a Jwt is not for a provider, its auth returns JwtMissed or JwtUnknownIssuer.\n    // Such error should not be used as the final error if there are other valid errors.\n    if (status != Status::Ok && status != Status::JwtMissed && status != Status::JwtUnknownIssuer) {\n      completion_state.any_valid_error_ = status;\n    }\n    if (++completion_state.number_completed_children_ == verifiers_.size() ||\n        Status::Ok == status) {\n      completion_state.is_completed_ = true;\n      Status final_status = status;\n      if (status != Status::Ok && completion_state.any_valid_error_ != Status::Ok) {\n        final_status = completion_state.any_valid_error_;\n      }\n      completeWithStatus(final_status, context);\n    }\n  }\n};\n\n// Requires all verifier\nclass AllVerifierImpl : public BaseGroupVerifierImpl {\npublic:\n  AllVerifierImpl(const JwtRequirementAndList& and_list, const AuthFactory& factory,\n                  const Protobuf::Map<std::string, JwtProvider>& providers,\n                  // const Extractor& extractor_for_allow_fail,\n                  const BaseVerifierImpl* parent)\n      : BaseGroupVerifierImpl(parent) {\n    for (const auto& it : and_list.requirements()) {\n      verifiers_.emplace_back(\n          innerCreate(it, providers, factory, std::vector<std::string>{}, this));\n    }\n  }\n\n  void onComplete(const Status& status, ContextImpl& context) const override {\n    auto& completion_state = context.getCompletionState(this);\n    if (completion_state.is_completed_) {\n      return;\n    }\n    if (++completion_state.number_completed_children_ == verifiers_.size() ||\n        Status::Ok != status) {\n      completion_state.is_completed_ = true;\n      completeWithStatus(status, context);\n    }\n  }\n};\n\n// Match all, for requirement not set\nclass AllowAllVerifierImpl : public BaseVerifierImpl {\npublic:\n  AllowAllVerifierImpl(const BaseVerifierImpl* parent) : BaseVerifierImpl(parent) {}\n\n  void verify(ContextSharedPtr context) const override {\n    completeWithStatus(Status::Ok, static_cast<ContextImpl&>(*context));\n  }\n};\n\nVerifierConstPtr innerCreate(const JwtRequirement& requirement,\n                             const Protobuf::Map<std::string, JwtProvider>& providers,\n                             const AuthFactory& factory,\n                             const std::vector<std::string> parent_provider_names,\n                             const BaseVerifierImpl* parent) {\n  std::string provider_name;\n  std::vector<std::string> audiences;\n  JwtProviderList parent_providers;\n  for (const auto& name : parent_provider_names) {\n    const auto& it = providers.find(name);\n    if (it == providers.end()) {\n      throw EnvoyException(fmt::format(\"Required provider ['{}'] is not configured.\", name));\n    }\n    parent_providers.emplace_back(&it->second);\n  }\n  switch (requirement.requires_type_case()) {\n  case JwtRequirement::RequiresTypeCase::kProviderName:\n    provider_name = requirement.provider_name();\n    break;\n  case JwtRequirement::RequiresTypeCase::kProviderAndAudiences:\n    for (const auto& it : requirement.provider_and_audiences().audiences()) {\n      audiences.emplace_back(it);\n    }\n    provider_name = requirement.provider_and_audiences().provider_name();\n    break;\n  case JwtRequirement::RequiresTypeCase::kRequiresAny:\n    return std::make_unique<AnyVerifierImpl>(requirement.requires_any(), factory, providers,\n                                             parent);\n  case JwtRequirement::RequiresTypeCase::kRequiresAll:\n    return std::make_unique<AllVerifierImpl>(requirement.requires_all(), factory, providers,\n                                             parent);\n  case JwtRequirement::RequiresTypeCase::kAllowMissingOrFailed:\n    return std::make_unique<AllowFailedVerifierImpl>(factory, parent_providers, parent);\n  case JwtRequirement::RequiresTypeCase::kAllowMissing:\n    return std::make_unique<AllowMissingVerifierImpl>(factory, parent_providers, parent);\n  case JwtRequirement::RequiresTypeCase::REQUIRES_TYPE_NOT_SET:\n    return std::make_unique<AllowAllVerifierImpl>(parent);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  const auto& it = providers.find(provider_name);\n  if (it == providers.end()) {\n    throw EnvoyException(fmt::format(\"Required provider ['{}'] is not configured.\", provider_name));\n  }\n  if (audiences.empty()) {\n    return std::make_unique<ProviderVerifierImpl>(provider_name, factory, it->second, parent);\n  }\n  return std::make_unique<ProviderAndAudienceVerifierImpl>(provider_name, factory, it->second,\n                                                           parent, audiences);\n}\n\n} // namespace\n\nContextSharedPtr Verifier::createContext(Http::RequestHeaderMap& headers,\n                                         Tracing::Span& parent_span, Callbacks* callback) {\n  return std::make_shared<ContextImpl>(headers, parent_span, callback);\n}\n\nVerifierConstPtr Verifier::create(const JwtRequirement& requirement,\n                                  const Protobuf::Map<std::string, JwtProvider>& providers,\n                                  const AuthFactory& factory) {\n  return innerCreate(requirement, providers, factory, std::vector<std::string>{}, nullptr);\n}\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/jwt_authn/verifier.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"extensions/filters/http/jwt_authn/authenticator.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\nclass Verifier;\nusing VerifierConstPtr = std::unique_ptr<const Verifier>;\n\n/**\n * Supports verification of JWTs with configured requirements.\n */\nclass Verifier {\npublic:\n  virtual ~Verifier() = default;\n\n  /**\n   * Handle for notifying Verifier callers of request completion.\n   */\n  class Callbacks {\n  public:\n    virtual ~Callbacks() = default;\n\n    /**\n     * Successfully verified JWT payload are stored in the struct with its\n     * *fields* containing **issuer** as keys and **payload** as string values\n     * This function is called before onComplete() function.\n     * It will not be called if no payload to write.\n     */\n    virtual void setPayload(const ProtobufWkt::Struct& payload) PURE;\n\n    /**\n     * Called on completion of request.\n     *\n     * @param status the status of the request.\n     */\n    virtual void onComplete(const ::google::jwt_verify::Status& status) PURE;\n  };\n\n  // Context object to hold data needed for verifier.\n  class Context {\n  public:\n    virtual ~Context() = default;\n\n    /**\n     * Returns the request headers wrapped in this context.\n     *\n     * @return the request headers.\n     */\n    virtual Http::HeaderMap& headers() const PURE;\n\n    /**\n     * Returns the active span wrapped in this context.\n     *\n     * @return the active span.\n     */\n    virtual Tracing::Span& parentSpan() const PURE;\n\n    /**\n     * Returns the request callback wrapped in this context.\n     *\n     * @returns the request callback.\n     */\n    virtual Callbacks* callback() const PURE;\n\n    /**\n     * Cancel any pending requests for this context.\n     */\n    virtual void cancel() PURE;\n  };\n\n  using ContextSharedPtr = std::shared_ptr<Context>;\n\n  // Verify all tokens on headers, and signal the caller with callback.\n  virtual void verify(ContextSharedPtr context) const PURE;\n\n  // Factory method for creating verifiers.\n  static VerifierConstPtr create(\n      const envoy::extensions::filters::http::jwt_authn::v3::JwtRequirement& requirement,\n      const Protobuf::Map<std::string,\n                          envoy::extensions::filters::http::jwt_authn::v3::JwtProvider>& providers,\n      const AuthFactory& factory);\n\n  // Factory method for creating verifier contexts.\n  static ContextSharedPtr createContext(Http::RequestHeaderMap& headers, Tracing::Span& parent_span,\n                                        Callbacks* callback);\n};\n\nusing ContextSharedPtr = std::shared_ptr<Verifier::Context>;\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/local_ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Local Ratelimit L7 HTTP filter\n# Public docs: docs/root/configuration/http_filters/local_rate_limit_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"local_ratelimit_lib\",\n    srcs = [\"local_ratelimit.cc\"],\n    hdrs = [\"local_ratelimit.h\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/router:header_parser_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/common/local_ratelimit:local_ratelimit_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/local_ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    deps = [\n        \":local_ratelimit_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/local_ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/local_ratelimit/config.cc",
    "content": "#include \"extensions/filters/http/local_ratelimit/config.h\"\n\n#include <string>\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/local_ratelimit/local_ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace LocalRateLimitFilter {\n\nHttp::FilterFactoryCb LocalRateLimitFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config,\n    const std::string&, Server::Configuration::FactoryContext& context) {\n  FilterConfigSharedPtr filter_config = std::make_shared<FilterConfig>(\n      proto_config, context.dispatcher(), context.scope(), context.runtime());\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<Filter>(filter_config));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nLocalRateLimitFilterConfig::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config,\n    Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<const FilterConfig>(proto_config, context.dispatcher(), context.scope(),\n                                              context.runtime(), true);\n}\n\n/**\n * Static registration for the rate limit filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(LocalRateLimitFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.local_rate_limit\"};\n\n} // namespace LocalRateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/local_ratelimit/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.h\"\n#include \"envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace LocalRateLimitFilter {\n\n/**\n * Config registration for the local rate limit filter. @see NamedHttpFilterConfigFactory.\n */\nclass LocalRateLimitFilterConfig\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit> {\npublic:\n  LocalRateLimitFilterConfig() : FactoryBase(\"envoy.filters.http.local_ratelimit\") {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config,\n      Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override;\n};\n\n} // namespace LocalRateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/local_ratelimit/local_ratelimit.cc",
    "content": "#include \"extensions/filters/http/local_ratelimit/local_ratelimit.h\"\n\n#include <string>\n#include <vector>\n\n#include \"envoy/http/codes.h\"\n\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace LocalRateLimitFilter {\n\nFilterConfig::FilterConfig(\n    const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& config,\n    Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::Loader& runtime,\n    const bool per_route)\n    : status_(toErrorCode(config.status().code())),\n      stats_(generateStats(config.stat_prefix(), scope)),\n      rate_limiter_(Filters::Common::LocalRateLimit::LocalRateLimiterImpl(\n          std::chrono::milliseconds(\n              PROTOBUF_GET_MS_OR_DEFAULT(config.token_bucket(), fill_interval, 0)),\n          config.token_bucket().max_tokens(),\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.token_bucket(), tokens_per_fill, 1), dispatcher)),\n      runtime_(runtime),\n      filter_enabled_(\n          config.has_filter_enabled()\n              ? absl::optional<Envoy::Runtime::FractionalPercent>(\n                    Envoy::Runtime::FractionalPercent(config.filter_enabled(), runtime_))\n              : absl::nullopt),\n      filter_enforced_(\n          config.has_filter_enabled()\n              ? absl::optional<Envoy::Runtime::FractionalPercent>(\n                    Envoy::Runtime::FractionalPercent(config.filter_enforced(), runtime_))\n              : absl::nullopt),\n      response_headers_parser_(\n          Envoy::Router::HeaderParser::configure(config.response_headers_to_add())) {\n  // Note: no token bucket is fine for the global config, which would be the case for enabling\n  //       the filter globally but disabled and then applying limits at the virtual host or\n  //       route level. At the virtual or route level, it makes no sense to have an no token\n  //       bucket so we throw an error. If there's no token bucket configured globally or\n  //       at the vhost/route level, no rate limiting is applied.\n  if (per_route && !config.has_token_bucket()) {\n    throw EnvoyException(\"local rate limit token bucket must be set for per filter configs\");\n  }\n}\n\nbool FilterConfig::requestAllowed() const { return rate_limiter_.requestAllowed(); }\n\nLocalRateLimitStats FilterConfig::generateStats(const std::string& prefix, Stats::Scope& scope) {\n  const std::string final_prefix = prefix + \".http_local_rate_limit\";\n  return {ALL_LOCAL_RATE_LIMIT_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n}\n\nbool FilterConfig::enabled() const {\n  return filter_enabled_.has_value() ? filter_enabled_->enabled() : false;\n}\n\nbool FilterConfig::enforced() const {\n  return filter_enforced_.has_value() ? filter_enforced_->enabled() : false;\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap&, bool) {\n  const auto* config = getConfig();\n\n  if (!config->enabled()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  config->stats().enabled_.inc();\n\n  if (config->requestAllowed()) {\n    config->stats().ok_.inc();\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  config->stats().rate_limited_.inc();\n\n  if (!config->enforced()) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  config->stats().enforced_.inc();\n\n  decoder_callbacks_->sendLocalReply(\n      config->status(), \"local_rate_limited\",\n      [this, config](Http::HeaderMap& headers) {\n        config->responseHeadersParser().evaluateHeaders(headers, decoder_callbacks_->streamInfo());\n      },\n      absl::nullopt, \"local_rate_limited\");\n  decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited);\n\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nconst FilterConfig* Filter::getConfig() const {\n  const auto* config = Http::Utility::resolveMostSpecificPerFilterConfig<FilterConfig>(\n      \"envoy.filters.http.local_ratelimit\", decoder_callbacks_->route());\n  if (config) {\n    return config;\n  }\n\n  return config_.get();\n}\n\n} // namespace LocalRateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/local_ratelimit/local_ratelimit.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/router/header_parser.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"extensions/filters/common/local_ratelimit/local_ratelimit_impl.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace LocalRateLimitFilter {\n\n/**\n * All local rate limit stats. @see stats_macros.h\n */\n#define ALL_LOCAL_RATE_LIMIT_STATS(COUNTER)                                                        \\\n  COUNTER(enabled)                                                                                 \\\n  COUNTER(enforced)                                                                                \\\n  COUNTER(rate_limited)                                                                            \\\n  COUNTER(ok)\n\n/**\n * Struct definition for all local rate limit stats. @see stats_macros.h\n */\nstruct LocalRateLimitStats {\n  ALL_LOCAL_RATE_LIMIT_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Global configuration for the HTTP local rate limit filter.\n */\nclass FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig {\npublic:\n  FilterConfig(const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& config,\n               Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::Loader& runtime,\n               bool per_route = false);\n  ~FilterConfig() override = default;\n  Runtime::Loader& runtime() { return runtime_; }\n  bool requestAllowed() const;\n  bool enabled() const;\n  bool enforced() const;\n  LocalRateLimitStats& stats() const { return stats_; }\n  const Router::HeaderParser& responseHeadersParser() const { return *response_headers_parser_; }\n  Http::Code status() const { return status_; }\n\nprivate:\n  friend class FilterTest;\n\n  static LocalRateLimitStats generateStats(const std::string& prefix, Stats::Scope& scope);\n\n  static Http::Code toErrorCode(uint64_t status) {\n    const auto code = static_cast<Http::Code>(status);\n    if (code >= Http::Code::BadRequest) {\n      return code;\n    }\n    return Http::Code::TooManyRequests;\n  }\n\n  const Http::Code status_;\n  mutable LocalRateLimitStats stats_;\n  Filters::Common::LocalRateLimit::LocalRateLimiterImpl rate_limiter_;\n  Runtime::Loader& runtime_;\n  const absl::optional<Envoy::Runtime::FractionalPercent> filter_enabled_;\n  const absl::optional<Envoy::Runtime::FractionalPercent> filter_enforced_;\n  Router::HeaderParserPtr response_headers_parser_;\n};\n\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\n/**\n * HTTP local rate limit filter. Depending on the route configuration, this filter calls consults\n * with local token bucket before allowing further filter iteration.\n */\nclass Filter : public Http::PassThroughFilter {\npublic:\n  Filter(FilterConfigSharedPtr config) : config_(config) {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n\nprivate:\n  friend class FilterTest;\n\n  const FilterConfig* getConfig() const;\n\n  FilterConfigSharedPtr config_;\n};\n\n} // namespace LocalRateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/lua/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Lua scripting L7 HTTP filter (https://www.lua.org/, http://luajit.org/)\n# Public docs: docs/root/configuration/http_filters/lua_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"lua_filter_lib\",\n    srcs = [\"lua_filter.cc\"],\n    hdrs = [\"lua_filter.h\"],\n    deps = [\n        \":wrappers_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/extensions/common:utility_lib\",\n        \"//source/extensions/filters/common/lua:lua_lib\",\n        \"//source/extensions/filters/common/lua:wrappers_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"wrappers_lib\",\n    srcs = [\"wrappers.cc\"],\n    hdrs = [\"wrappers.h\"],\n    deps = [\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n        \"//source/extensions/filters/common/lua:lua_lib\",\n        \"//source/extensions/filters/common/lua:wrappers_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/lua:lua_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/lua/config.cc",
    "content": "#include \"extensions/filters/http/lua/config.h\"\n\n#include \"envoy/extensions/filters/http/lua/v3/lua.pb.h\"\n#include \"envoy/extensions/filters/http/lua/v3/lua.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/lua/lua_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\n\nHttp::FilterFactoryCb LuaFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::lua::v3::Lua& proto_config, const std::string&,\n    Server::Configuration::FactoryContext& context) {\n  FilterConfigConstSharedPtr filter_config(new FilterConfig{\n      proto_config, context.threadLocal(), context.clusterManager(), context.api()});\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<Filter>(filter_config));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nLuaFilterConfig::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::lua::v3::LuaPerRoute& proto_config,\n    Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<FilterConfigPerRoute>(proto_config, context);\n}\n\n/**\n * Static registration for the Lua filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(LuaFilterConfig, Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.lua\"};\n\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/lua/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/lua/v3/lua.pb.h\"\n#include \"envoy/extensions/filters/http/lua/v3/lua.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\n\n/**\n * Config registration for the Lua filter. @see NamedHttpFilterConfigFactory.\n */\nclass LuaFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::lua::v3::Lua,\n                                 envoy::extensions::filters::http::lua::v3::LuaPerRoute> {\npublic:\n  LuaFilterConfig() : FactoryBase(HttpFilterNames::get().Lua) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::lua::v3::Lua& proto_config, const std::string&,\n      Server::Configuration::FactoryContext& context) override;\n\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::lua::v3::LuaPerRoute& proto_config,\n      Server::Configuration::ServerFactoryContext& context,\n      ProtobufMessage::ValidationVisitor& validator) override;\n};\n\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/lua/lua_filter.cc",
    "content": "#include \"extensions/filters/http/lua/lua_filter.h\"\n\n#include <atomic>\n#include <memory>\n\n#include \"envoy/http/codes.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/config/datasource.h\"\n#include \"common/crypto/utility.h\"\n#include \"common/http/message_impl.h\"\n\n#include \"absl/strings/escaping.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\n\nnamespace {\n\nstruct HttpResponseCodeDetailValues {\n  const absl::string_view LuaResponse = \"lua_response\";\n};\nusing HttpResponseCodeDetails = ConstSingleton<HttpResponseCodeDetailValues>;\n\nconst std::string DEPRECATED_LUA_NAME = \"envoy.lua\";\n\nstd::atomic<bool>& deprecatedNameLogged() {\n  MUTABLE_CONSTRUCT_ON_FIRST_USE(std::atomic<bool>, false);\n}\n\n// Checks if deprecated metadata names are allowed. On the first check only it will log either\n// a warning (indicating the name should be updated) or an error (the feature is off and the\n// name is not allowed). When warning, the deprecated feature stat is incremented. Subsequent\n// checks do not log since this check is done in potentially high-volume request paths.\nbool allowDeprecatedMetadataName() {\n  if (!deprecatedNameLogged().exchange(true)) {\n    // Have not logged yet, so use the logging test.\n    return Extensions::Common::Utility::ExtensionNameUtil::allowDeprecatedExtensionName(\n        \"http filter\", DEPRECATED_LUA_NAME, Extensions::HttpFilters::HttpFilterNames::get().Lua);\n  }\n\n  // We have logged (or another thread will do so momentarily), so just check whether the\n  // deprecated name is allowed.\n  auto status = Extensions::Common::Utility::ExtensionNameUtil::deprecatedExtensionNameStatus();\n  return status == Extensions::Common::Utility::ExtensionNameUtil::Status::Warn;\n}\n\nconst ProtobufWkt::Struct& getMetadata(Http::StreamFilterCallbacks* callbacks) {\n  if (callbacks->route() == nullptr || callbacks->route()->routeEntry() == nullptr) {\n    return ProtobufWkt::Struct::default_instance();\n  }\n  const auto& metadata = callbacks->route()->routeEntry()->metadata();\n\n  {\n    const auto& filter_it = metadata.filter_metadata().find(HttpFilterNames::get().Lua);\n    if (filter_it != metadata.filter_metadata().end()) {\n      return filter_it->second;\n    }\n  }\n\n  // TODO(zuercher): Remove this block when deprecated filter names are removed.\n  {\n    const auto& filter_it = metadata.filter_metadata().find(DEPRECATED_LUA_NAME);\n    if (filter_it != metadata.filter_metadata().end()) {\n      // Use the non-throwing check here because this happens at request time.\n      if (allowDeprecatedMetadataName()) {\n        return filter_it->second;\n      }\n    }\n  }\n\n  return ProtobufWkt::Struct::default_instance();\n}\n\n// Okay to return non-const reference because this doesn't ever get changed.\nNoopCallbacks& noopCallbacks() {\n  static NoopCallbacks* callbacks = new NoopCallbacks();\n  return *callbacks;\n}\n\nvoid buildHeadersFromTable(Http::HeaderMap& headers, lua_State* state, int table_index) {\n  // Build a header map to make the request. We iterate through the provided table to do this and\n  // check that we are getting strings.\n  lua_pushnil(state);\n  while (lua_next(state, table_index) != 0) {\n    // Uses 'key' (at index -2) and 'value' (at index -1).\n    const char* key = luaL_checkstring(state, -2);\n    // Check if the current value is a table, we iterate through the table and add each element of\n    // it as a header entry value for the current key.\n    if (lua_istable(state, -1)) {\n      lua_pushnil(state);\n      while (lua_next(state, -2) != 0) {\n        const char* value = luaL_checkstring(state, -1);\n        headers.addCopy(Http::LowerCaseString(key), value);\n        lua_pop(state, 1);\n      }\n    } else {\n      const char* value = luaL_checkstring(state, -1);\n      headers.addCopy(Http::LowerCaseString(key), value);\n    }\n    // Removes 'value'; keeps 'key' for next iteration. This is the input for lua_next() so that\n    // it can push the next key/value pair onto the stack.\n    lua_pop(state, 1);\n  }\n}\n\nHttp::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter,\n                                         Tracing::Span& parent_span,\n                                         Http::AsyncClient::Callbacks& callbacks) {\n  const std::string cluster = luaL_checkstring(state, 2);\n  luaL_checktype(state, 3, LUA_TTABLE);\n  size_t body_size;\n  const char* body = luaL_optlstring(state, 4, nullptr, &body_size);\n  int timeout_ms = luaL_checkint(state, 5);\n  if (timeout_ms < 0) {\n    luaL_error(state, \"http call timeout must be >= 0\");\n  }\n\n  if (filter.clusterManager().get(cluster) == nullptr) {\n    luaL_error(state, \"http call cluster invalid. Must be configured\");\n  }\n\n  auto headers = Http::RequestHeaderMapImpl::create();\n  buildHeadersFromTable(*headers, state, 3);\n  Http::RequestMessagePtr message(new Http::RequestMessageImpl(std::move(headers)));\n\n  // Check that we were provided certain headers.\n  if (message->headers().Path() == nullptr || message->headers().Method() == nullptr ||\n      message->headers().Host() == nullptr) {\n    luaL_error(state, \"http call headers must include ':path', ':method', and ':authority'\");\n  }\n\n  if (body != nullptr) {\n    message->body().add(body, body_size);\n    message->headers().setContentLength(body_size);\n  }\n\n  absl::optional<std::chrono::milliseconds> timeout;\n  if (timeout_ms > 0) {\n    timeout = std::chrono::milliseconds(timeout_ms);\n  }\n\n  auto options = Http::AsyncClient::RequestOptions().setTimeout(timeout).setParentSpan(parent_span);\n  return filter.clusterManager().httpAsyncClientForCluster(cluster).send(std::move(message),\n                                                                         callbacks, options);\n}\n} // namespace\n\nPerLuaCodeSetup::PerLuaCodeSetup(const std::string& lua_code, ThreadLocal::SlotAllocator& tls)\n    : lua_state_(lua_code, tls) {\n  lua_state_.registerType<Filters::Common::Lua::BufferWrapper>();\n  lua_state_.registerType<Filters::Common::Lua::MetadataMapWrapper>();\n  lua_state_.registerType<Filters::Common::Lua::MetadataMapIterator>();\n  lua_state_.registerType<Filters::Common::Lua::ConnectionWrapper>();\n  lua_state_.registerType<Filters::Common::Lua::SslConnectionWrapper>();\n  lua_state_.registerType<HeaderMapWrapper>();\n  lua_state_.registerType<HeaderMapIterator>();\n  lua_state_.registerType<StreamInfoWrapper>();\n  lua_state_.registerType<DynamicMetadataMapWrapper>();\n  lua_state_.registerType<DynamicMetadataMapIterator>();\n  lua_state_.registerType<StreamHandleWrapper>();\n  lua_state_.registerType<PublicKeyWrapper>();\n\n  request_function_slot_ = lua_state_.registerGlobal(\"envoy_on_request\");\n  if (lua_state_.getGlobalRef(request_function_slot_) == LUA_REFNIL) {\n    ENVOY_LOG(info, \"envoy_on_request() function not found. Lua filter will not hook requests.\");\n  }\n\n  response_function_slot_ = lua_state_.registerGlobal(\"envoy_on_response\");\n  if (lua_state_.getGlobalRef(response_function_slot_) == LUA_REFNIL) {\n    ENVOY_LOG(info, \"envoy_on_response() function not found. Lua filter will not hook responses.\");\n  }\n}\n\nStreamHandleWrapper::StreamHandleWrapper(Filters::Common::Lua::Coroutine& coroutine,\n                                         Http::HeaderMap& headers, bool end_stream, Filter& filter,\n                                         FilterCallbacks& callbacks)\n    : coroutine_(coroutine), headers_(headers), end_stream_(end_stream), filter_(filter),\n      callbacks_(callbacks), yield_callback_([this]() {\n        if (state_ == State::Running) {\n          throw Filters::Common::Lua::LuaException(\"script performed an unexpected yield\");\n        }\n      }) {}\n\nHttp::FilterHeadersStatus StreamHandleWrapper::start(int function_ref) {\n  // We are on the top of the stack.\n  coroutine_.start(function_ref, 1, yield_callback_);\n  Http::FilterHeadersStatus status =\n      (state_ == State::WaitForBody || state_ == State::HttpCall || state_ == State::Responded)\n          ? Http::FilterHeadersStatus::StopIteration\n          : Http::FilterHeadersStatus::Continue;\n\n  if (status == Http::FilterHeadersStatus::Continue) {\n    headers_continued_ = true;\n  }\n\n  return status;\n}\n\nHttp::FilterDataStatus StreamHandleWrapper::onData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!end_stream_);\n  end_stream_ = end_stream;\n  saw_body_ = true;\n\n  if (state_ == State::WaitForBodyChunk) {\n    ENVOY_LOG(trace, \"resuming for next body chunk\");\n    Filters::Common::Lua::LuaDeathRef<Filters::Common::Lua::BufferWrapper> wrapper(\n        Filters::Common::Lua::BufferWrapper::create(coroutine_.luaState(), data), true);\n    state_ = State::Running;\n    coroutine_.resume(1, yield_callback_);\n  } else if (state_ == State::WaitForBody && end_stream_) {\n    ENVOY_LOG(debug, \"resuming body due to end stream\");\n    callbacks_.addData(data);\n    state_ = State::Running;\n    coroutine_.resume(luaBody(coroutine_.luaState()), yield_callback_);\n  } else if (state_ == State::WaitForTrailers && end_stream_) {\n    ENVOY_LOG(debug, \"resuming nil trailers due to end stream\");\n    state_ = State::Running;\n    coroutine_.resume(0, yield_callback_);\n  }\n\n  if (state_ == State::HttpCall || state_ == State::WaitForBody) {\n    ENVOY_LOG(trace, \"buffering body\");\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  } else if (state_ == State::Responded) {\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  } else {\n    headers_continued_ = true;\n    return Http::FilterDataStatus::Continue;\n  }\n}\n\nHttp::FilterTrailersStatus StreamHandleWrapper::onTrailers(Http::HeaderMap& trailers) {\n  ASSERT(!end_stream_);\n  end_stream_ = true;\n  trailers_ = &trailers;\n\n  if (state_ == State::WaitForBodyChunk) {\n    ENVOY_LOG(debug, \"resuming nil body chunk due to trailers\");\n    state_ = State::Running;\n    coroutine_.resume(0, yield_callback_);\n  } else if (state_ == State::WaitForBody) {\n    ENVOY_LOG(debug, \"resuming body due to trailers\");\n    state_ = State::Running;\n    coroutine_.resume(luaBody(coroutine_.luaState()), yield_callback_);\n  }\n\n  if (state_ == State::WaitForTrailers) {\n    // Mimic a call to trailers which will push the trailers onto the stack and then resume.\n    state_ = State::Running;\n    coroutine_.resume(luaTrailers(coroutine_.luaState()), yield_callback_);\n  }\n\n  Http::FilterTrailersStatus status = (state_ == State::HttpCall || state_ == State::Responded)\n                                          ? Http::FilterTrailersStatus::StopIteration\n                                          : Http::FilterTrailersStatus::Continue;\n\n  if (status == Http::FilterTrailersStatus::Continue) {\n    headers_continued_ = true;\n  }\n\n  return status;\n}\n\nint StreamHandleWrapper::luaRespond(lua_State* state) {\n  ASSERT(state_ == State::Running);\n\n  if (headers_continued_) {\n    luaL_error(state, \"respond() cannot be called if headers have been continued\");\n  }\n\n  luaL_checktype(state, 2, LUA_TTABLE);\n  size_t body_size;\n  const char* raw_body = luaL_optlstring(state, 3, nullptr, &body_size);\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  buildHeadersFromTable(*headers, state, 2);\n\n  uint64_t status;\n  if (!absl::SimpleAtoi(headers->getStatusValue(), &status) || status < 200 || status >= 600) {\n    luaL_error(state, \":status must be between 200-599\");\n  }\n\n  Buffer::InstancePtr body;\n  if (raw_body != nullptr) {\n    body = std::make_unique<Buffer::OwnedImpl>(raw_body, body_size);\n    headers->setContentLength(body_size);\n  }\n\n  // Once we respond we treat that as the end of the script even if there is more code. Thus we\n  // yield.\n  callbacks_.respond(std::move(headers), body.get(), state);\n  state_ = State::Responded;\n  return lua_yield(state, 0);\n}\n\nint StreamHandleWrapper::luaHttpCall(lua_State* state) {\n  ASSERT(state_ == State::Running);\n\n  const int async_flag_index = 6;\n  if (!lua_isnone(state, async_flag_index) && !lua_isboolean(state, async_flag_index)) {\n    luaL_error(state, \"http call asynchronous flag must be 'true', 'false', or empty\");\n  }\n\n  if (lua_toboolean(state, async_flag_index)) {\n    return doAsynchronousHttpCall(state, callbacks_.activeSpan());\n  } else {\n    return doSynchronousHttpCall(state, callbacks_.activeSpan());\n  }\n}\n\nint StreamHandleWrapper::doSynchronousHttpCall(lua_State* state, Tracing::Span& span) {\n  http_request_ = makeHttpCall(state, filter_, span, *this);\n  if (http_request_) {\n    state_ = State::HttpCall;\n    return lua_yield(state, 0);\n  } else {\n    // Immediate failure case. The return arguments are already on the stack.\n    ASSERT(lua_gettop(state) >= 2);\n    return 2;\n  }\n}\n\nint StreamHandleWrapper::doAsynchronousHttpCall(lua_State* state, Tracing::Span& span) {\n  makeHttpCall(state, filter_, span, noopCallbacks());\n  return 0;\n}\n\nvoid StreamHandleWrapper::onSuccess(const Http::AsyncClient::Request&,\n                                    Http::ResponseMessagePtr&& response) {\n  ASSERT(state_ == State::HttpCall || state_ == State::Running);\n  ENVOY_LOG(debug, \"async HTTP response complete\");\n  http_request_ = nullptr;\n\n  // We need to build a table with the headers as return param 1. The body will be return param 2.\n  lua_newtable(coroutine_.luaState());\n  response->headers().iterate([lua_State = coroutine_.luaState()](\n                                  const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    lua_pushlstring(lua_State, header.key().getStringView().data(),\n                    header.key().getStringView().length());\n    lua_pushlstring(lua_State, header.value().getStringView().data(),\n                    header.value().getStringView().length());\n    lua_settable(lua_State, -3);\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  // TODO(mattklein123): Avoid double copy here.\n  if (response->body().length() > 0) {\n    lua_pushlstring(coroutine_.luaState(), response->bodyAsString().data(),\n                    response->body().length());\n  } else {\n    lua_pushnil(coroutine_.luaState());\n  }\n\n  // In the immediate failure case, we are just going to immediately return to the script. We\n  // have already pushed the return arguments onto the stack.\n  if (state_ == State::HttpCall) {\n    state_ = State::Running;\n    markLive();\n\n    try {\n      coroutine_.resume(2, yield_callback_);\n      markDead();\n    } catch (const Filters::Common::Lua::LuaException& e) {\n      filter_.scriptError(e);\n    }\n\n    if (state_ == State::Running) {\n      headers_continued_ = true;\n      callbacks_.continueIteration();\n    }\n  }\n}\n\nvoid StreamHandleWrapper::onFailure(const Http::AsyncClient::Request& request,\n                                    Http::AsyncClient::FailureReason) {\n  ASSERT(state_ == State::HttpCall || state_ == State::Running);\n  ENVOY_LOG(debug, \"async HTTP failure\");\n\n  // Just fake a basic 503 response.\n  Http::ResponseMessagePtr response_message(\n      new Http::ResponseMessageImpl(Http::createHeaderMap<Http::ResponseHeaderMapImpl>(\n          {{Http::Headers::get().Status,\n            std::to_string(enumToInt(Http::Code::ServiceUnavailable))}})));\n  response_message->body().add(\"upstream failure\");\n  onSuccess(request, std::move(response_message));\n}\n\nint StreamHandleWrapper::luaHeaders(lua_State* state) {\n  ASSERT(state_ == State::Running);\n\n  if (headers_wrapper_.get() != nullptr) {\n    headers_wrapper_.pushStack();\n  } else {\n    headers_wrapper_.reset(HeaderMapWrapper::create(state, headers_,\n                                                    [this] {\n                                                      // If we are about to do a modifiable header\n                                                      // operation, blow away the route cache. We\n                                                      // could be a little more intelligent about\n                                                      // when we do this so the performance would be\n                                                      // higher, but this is simple and will get the\n                                                      // job done for now. This is a NOP on the\n                                                      // encoder path.\n                                                      if (!headers_continued_) {\n                                                        callbacks_.onHeadersModified();\n                                                      }\n\n                                                      return !headers_continued_;\n                                                    }),\n                           true);\n  }\n  return 1;\n}\n\nint StreamHandleWrapper::luaBody(lua_State* state) {\n  ASSERT(state_ == State::Running);\n\n  if (end_stream_) {\n    if (!buffered_body_ && saw_body_) {\n      return luaL_error(state, \"cannot call body() after body has been streamed\");\n    } else if (callbacks_.bufferedBody() == nullptr) {\n      ENVOY_LOG(debug, \"end stream. no body\");\n      return 0;\n    } else {\n      if (body_wrapper_.get() != nullptr) {\n        body_wrapper_.pushStack();\n      } else {\n        body_wrapper_.reset(Filters::Common::Lua::BufferWrapper::create(\n                                state, const_cast<Buffer::Instance&>(*callbacks_.bufferedBody())),\n                            true);\n      }\n      return 1;\n    }\n  } else if (saw_body_) {\n    return luaL_error(state, \"cannot call body() after body streaming has started\");\n  } else {\n    ENVOY_LOG(debug, \"yielding for full body\");\n    state_ = State::WaitForBody;\n    buffered_body_ = true;\n    return lua_yield(state, 0);\n  }\n}\n\nint StreamHandleWrapper::luaBodyChunks(lua_State* state) {\n  ASSERT(state_ == State::Running);\n\n  if (saw_body_) {\n    luaL_error(state, \"cannot call bodyChunks after body processing has begun\");\n  }\n\n  // We are currently at the top of the stack. Push a closure that has us as the upvalue.\n  lua_pushcclosure(state, static_luaBodyIterator, 1);\n  return 1;\n}\n\nint StreamHandleWrapper::luaBodyIterator(lua_State* state) {\n  ASSERT(state_ == State::Running);\n\n  if (end_stream_) {\n    ENVOY_LOG(debug, \"body complete. no more body chunks\");\n    return 0;\n  } else {\n    ENVOY_LOG(debug, \"yielding for next body chunk\");\n    state_ = State::WaitForBodyChunk;\n    return lua_yield(state, 0);\n  }\n}\n\nint StreamHandleWrapper::luaTrailers(lua_State* state) {\n  ASSERT(state_ == State::Running);\n\n  if (end_stream_ && trailers_ == nullptr) {\n    ENVOY_LOG(debug, \"end stream. no trailers\");\n    return 0;\n  } else if (trailers_ != nullptr) {\n    if (trailers_wrapper_.get() != nullptr) {\n      trailers_wrapper_.pushStack();\n    } else {\n      trailers_wrapper_.reset(HeaderMapWrapper::create(state, *trailers_, []() { return true; }),\n                              true);\n    }\n    return 1;\n  } else {\n    ENVOY_LOG(debug, \"yielding for trailers\");\n    state_ = State::WaitForTrailers;\n    return lua_yield(state, 0);\n  }\n}\n\nint StreamHandleWrapper::luaMetadata(lua_State* state) {\n  ASSERT(state_ == State::Running);\n  if (metadata_wrapper_.get() != nullptr) {\n    metadata_wrapper_.pushStack();\n  } else {\n    metadata_wrapper_.reset(\n        Filters::Common::Lua::MetadataMapWrapper::create(state, callbacks_.metadata()), true);\n  }\n  return 1;\n}\n\nint StreamHandleWrapper::luaStreamInfo(lua_State* state) {\n  ASSERT(state_ == State::Running);\n  if (stream_info_wrapper_.get() != nullptr) {\n    stream_info_wrapper_.pushStack();\n  } else {\n    stream_info_wrapper_.reset(StreamInfoWrapper::create(state, callbacks_.streamInfo()), true);\n  }\n  return 1;\n}\n\nint StreamHandleWrapper::luaConnection(lua_State* state) {\n  ASSERT(state_ == State::Running);\n  if (connection_wrapper_.get() != nullptr) {\n    connection_wrapper_.pushStack();\n  } else {\n    connection_wrapper_.reset(\n        Filters::Common::Lua::ConnectionWrapper::create(state, callbacks_.connection()), true);\n  }\n  return 1;\n}\n\nint StreamHandleWrapper::luaLogTrace(lua_State* state) {\n  const char* message = luaL_checkstring(state, 2);\n  filter_.scriptLog(spdlog::level::trace, message);\n  return 0;\n}\n\nint StreamHandleWrapper::luaLogDebug(lua_State* state) {\n  const char* message = luaL_checkstring(state, 2);\n  filter_.scriptLog(spdlog::level::debug, message);\n  return 0;\n}\n\nint StreamHandleWrapper::luaLogInfo(lua_State* state) {\n  const char* message = luaL_checkstring(state, 2);\n  filter_.scriptLog(spdlog::level::info, message);\n  return 0;\n}\n\nint StreamHandleWrapper::luaLogWarn(lua_State* state) {\n  const char* message = luaL_checkstring(state, 2);\n  filter_.scriptLog(spdlog::level::warn, message);\n  return 0;\n}\n\nint StreamHandleWrapper::luaLogErr(lua_State* state) {\n  const char* message = luaL_checkstring(state, 2);\n  filter_.scriptLog(spdlog::level::err, message);\n  return 0;\n}\n\nint StreamHandleWrapper::luaLogCritical(lua_State* state) {\n  const char* message = luaL_checkstring(state, 2);\n  filter_.scriptLog(spdlog::level::critical, message);\n  return 0;\n}\n\nint StreamHandleWrapper::luaVerifySignature(lua_State* state) {\n  // Step 1: Get hash function.\n  absl::string_view hash = luaL_checkstring(state, 2);\n\n  // Step 2: Get the key pointer.\n  auto key = luaL_checkstring(state, 3);\n  auto ptr = public_key_storage_.find(key);\n  if (ptr == public_key_storage_.end()) {\n    luaL_error(state, \"invalid public key\");\n    return 0;\n  }\n\n  // Step 3: Get signature from args.\n  const char* signature = luaL_checkstring(state, 4);\n  int sig_len = luaL_checknumber(state, 5);\n  const std::vector<uint8_t> sig_vec(signature, signature + sig_len);\n\n  // Step 4: Get clear text from args.\n  const char* clear_text = luaL_checkstring(state, 6);\n  int text_len = luaL_checknumber(state, 7);\n  const std::vector<uint8_t> text_vec(clear_text, clear_text + text_len);\n\n  // Step 5: Verify signature.\n  auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n  auto output = crypto_util.verifySignature(hash, *ptr->second, sig_vec, text_vec);\n  lua_pushboolean(state, output.result_);\n  if (output.result_) {\n    lua_pushnil(state);\n  } else {\n    lua_pushlstring(state, output.error_message_.data(), output.error_message_.length());\n  }\n  return 2;\n}\n\nint StreamHandleWrapper::luaImportPublicKey(lua_State* state) {\n  // Get byte array and the length.\n  const char* str = luaL_checkstring(state, 2);\n  int n = luaL_checknumber(state, 3);\n  std::vector<uint8_t> key(str, str + n);\n  if (public_key_wrapper_.get() != nullptr) {\n    public_key_wrapper_.pushStack();\n  } else {\n    auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n    Envoy::Common::Crypto::CryptoObjectPtr crypto_ptr = crypto_util.importPublicKey(key);\n    auto wrapper = Envoy::Common::Crypto::Access::getTyped<Envoy::Common::Crypto::PublicKeyObject>(\n        *crypto_ptr);\n    EVP_PKEY* pkey = wrapper->getEVP_PKEY();\n    if (pkey == nullptr) {\n      // TODO(dio): Call luaL_error here instead of failing silently. However, the current behavior\n      // is to return nil (when calling get() to the wrapped object, hence we create a wrapper\n      // initialized by an empty string here) when importing a public key is failed.\n      public_key_wrapper_.reset(PublicKeyWrapper::create(state, EMPTY_STRING), true);\n    }\n\n    public_key_storage_.insert({std::string(str).substr(0, n), std::move(crypto_ptr)});\n    public_key_wrapper_.reset(PublicKeyWrapper::create(state, str), true);\n  }\n\n  return 1;\n}\n\nint StreamHandleWrapper::luaBase64Escape(lua_State* state) {\n  size_t input_size;\n  const char* input = luaL_checklstring(state, 2, &input_size);\n  auto output = absl::Base64Escape(absl::string_view(input, input_size));\n  lua_pushlstring(state, output.data(), output.length());\n\n  return 1;\n}\n\nFilterConfig::FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& proto_config,\n                           ThreadLocal::SlotAllocator& tls,\n                           Upstream::ClusterManager& cluster_manager, Api::Api& api)\n    : cluster_manager_(cluster_manager) {\n  auto global_setup_ptr = std::make_unique<PerLuaCodeSetup>(proto_config.inline_code(), tls);\n  if (global_setup_ptr) {\n    per_lua_code_setups_map_[GLOBAL_SCRIPT_NAME] = std::move(global_setup_ptr);\n  }\n\n  for (const auto& source : proto_config.source_codes()) {\n    const std::string code = Config::DataSource::read(source.second, true, api);\n    auto per_lua_code_setup_ptr = std::make_unique<PerLuaCodeSetup>(code, tls);\n    if (!per_lua_code_setup_ptr) {\n      continue;\n    }\n    per_lua_code_setups_map_[source.first] = std::move(per_lua_code_setup_ptr);\n  }\n}\n\nFilterConfigPerRoute::FilterConfigPerRoute(\n    const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config,\n    Server::Configuration::ServerFactoryContext& context)\n    : main_thread_dispatcher_(context.dispatcher()), disabled_(config.disabled()),\n      name_(config.name()) {\n  if (disabled_ || !name_.empty()) {\n    return;\n  }\n  // Read and parse the inline Lua code defined in the route configuration.\n  const std::string code_str = Config::DataSource::read(config.source_code(), true, context.api());\n  per_lua_code_setup_ptr_ = std::make_unique<PerLuaCodeSetup>(code_str, context.threadLocal());\n}\n\nvoid Filter::onDestroy() {\n  destroyed_ = true;\n  if (request_stream_wrapper_.get()) {\n    request_stream_wrapper_.get()->onReset();\n  }\n  if (response_stream_wrapper_.get()) {\n    response_stream_wrapper_.get()->onReset();\n  }\n}\n\nHttp::FilterHeadersStatus Filter::doHeaders(StreamHandleRef& handle,\n                                            Filters::Common::Lua::CoroutinePtr& coroutine,\n                                            FilterCallbacks& callbacks, int function_ref,\n                                            PerLuaCodeSetup* setup, Http::HeaderMap& headers,\n                                            bool end_stream) {\n  if (function_ref == LUA_REFNIL) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  ASSERT(setup);\n  coroutine = setup->createCoroutine();\n\n  handle.reset(StreamHandleWrapper::create(coroutine->luaState(), *coroutine, headers, end_stream,\n                                           *this, callbacks),\n               true);\n\n  Http::FilterHeadersStatus status = Http::FilterHeadersStatus::Continue;\n  try {\n    status = handle.get()->start(function_ref);\n    handle.markDead();\n  } catch (const Filters::Common::Lua::LuaException& e) {\n    scriptError(e);\n  }\n\n  return status;\n}\n\nHttp::FilterDataStatus Filter::doData(StreamHandleRef& handle, Buffer::Instance& data,\n                                      bool end_stream) {\n  Http::FilterDataStatus status = Http::FilterDataStatus::Continue;\n  if (handle.get() != nullptr) {\n    try {\n      handle.markLive();\n      status = handle.get()->onData(data, end_stream);\n      handle.markDead();\n    } catch (const Filters::Common::Lua::LuaException& e) {\n      scriptError(e);\n    }\n  }\n\n  return status;\n}\n\nHttp::FilterTrailersStatus Filter::doTrailers(StreamHandleRef& handle, Http::HeaderMap& trailers) {\n  Http::FilterTrailersStatus status = Http::FilterTrailersStatus::Continue;\n  if (handle.get() != nullptr) {\n    try {\n      handle.markLive();\n      status = handle.get()->onTrailers(trailers);\n      handle.markDead();\n    } catch (const Filters::Common::Lua::LuaException& e) {\n      scriptError(e);\n    }\n  }\n\n  return status;\n}\n\nvoid Filter::scriptError(const Filters::Common::Lua::LuaException& e) {\n  scriptLog(spdlog::level::err, e.what());\n  request_stream_wrapper_.reset();\n  response_stream_wrapper_.reset();\n}\n\nvoid Filter::scriptLog(spdlog::level::level_enum level, const char* message) {\n  switch (level) {\n  case spdlog::level::trace:\n    ENVOY_LOG(trace, \"script log: {}\", message);\n    return;\n  case spdlog::level::debug:\n    ENVOY_LOG(debug, \"script log: {}\", message);\n    return;\n  case spdlog::level::info:\n    ENVOY_LOG(info, \"script log: {}\", message);\n    return;\n  case spdlog::level::warn:\n    ENVOY_LOG(warn, \"script log: {}\", message);\n    return;\n  case spdlog::level::err:\n    ENVOY_LOG(error, \"script log: {}\", message);\n    return;\n  case spdlog::level::critical:\n    ENVOY_LOG(critical, \"script log: {}\", message);\n    return;\n  case spdlog::level::off:\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    return;\n  case spdlog::level::n_levels:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid Filter::DecoderCallbacks::respond(Http::ResponseHeaderMapPtr&& headers, Buffer::Instance* body,\n                                       lua_State*) {\n  callbacks_->encodeHeaders(std::move(headers), body == nullptr,\n                            HttpResponseCodeDetails::get().LuaResponse);\n  if (body && !parent_.destroyed_) {\n    callbacks_->encodeData(*body, true);\n  }\n}\n\nconst ProtobufWkt::Struct& Filter::DecoderCallbacks::metadata() const {\n  return getMetadata(callbacks_);\n}\n\nvoid Filter::EncoderCallbacks::respond(Http::ResponseHeaderMapPtr&&, Buffer::Instance*,\n                                       lua_State* state) {\n  // TODO(mattklein123): Support response in response path if nothing has been continued\n  // yet.\n  luaL_error(state, \"respond not currently supported in the response path\");\n}\n\nconst ProtobufWkt::Struct& Filter::EncoderCallbacks::metadata() const {\n  return getMetadata(callbacks_);\n}\n\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/lua/lua_filter.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/lua/v3/lua.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/crypto/utility.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/common/utility.h\"\n#include \"extensions/filters/common/lua/wrappers.h\"\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/lua/wrappers.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\n\nconstexpr char GLOBAL_SCRIPT_NAME[] = \"GLOBAL\";\n\nclass PerLuaCodeSetup : Logger::Loggable<Logger::Id::lua> {\npublic:\n  PerLuaCodeSetup(const std::string& lua_code, ThreadLocal::SlotAllocator& tls);\n\n  Extensions::Filters::Common::Lua::CoroutinePtr createCoroutine() {\n    return lua_state_.createCoroutine();\n  }\n\n  int requestFunctionRef() { return lua_state_.getGlobalRef(request_function_slot_); }\n  int responseFunctionRef() { return lua_state_.getGlobalRef(response_function_slot_); }\n\n  uint64_t runtimeBytesUsed() { return lua_state_.runtimeBytesUsed(); }\n  void runtimeGC() { return lua_state_.runtimeGC(); }\n\nprivate:\n  uint64_t request_function_slot_{};\n  uint64_t response_function_slot_{};\n\n  Filters::Common::Lua::ThreadLocalState lua_state_;\n};\n\nusing PerLuaCodeSetupPtr = std::unique_ptr<PerLuaCodeSetup>;\n\n/**\n * Callbacks used by a stream handler to access the filter.\n */\nclass FilterCallbacks {\npublic:\n  virtual ~FilterCallbacks() = default;\n\n  /**\n   * Add data to the connection manager buffer.\n   * @param data supplies the data to add.\n   */\n  virtual void addData(Buffer::Instance& data) PURE;\n\n  /**\n   * @return const Buffer::Instance* the currently buffered body.\n   */\n  virtual const Buffer::Instance* bufferedBody() PURE;\n\n  /**\n   * Continue filter iteration if iteration has been paused due to an async call.\n   */\n  virtual void continueIteration() PURE;\n\n  /**\n   * Called when headers have been modified by a script. This can only happen prior to headers\n   * being continued.\n   */\n  virtual void onHeadersModified() PURE;\n\n  /**\n   * Perform an immediate response.\n   * @param headers supplies the response headers.\n   * @param body supplies the optional response body.\n   * @param state supplies the active Lua state.\n   */\n  virtual void respond(Http::ResponseHeaderMapPtr&& headers, Buffer::Instance* body,\n                       lua_State* state) PURE;\n\n  /**\n   * @return const ProtobufWkt::Struct& the value of metadata inside the lua filter scope of current\n   * route entry.\n   */\n  virtual const ProtobufWkt::Struct& metadata() const PURE;\n\n  /**\n   * @return StreamInfo::StreamInfo& the current stream info handle. This handle is mutable to\n   * accommodate write API e.g. setDynamicMetadata().\n   */\n  virtual StreamInfo::StreamInfo& streamInfo() PURE;\n\n  /**\n   * @return const Network::Connection* the current network connection handle.\n   */\n  virtual const Network::Connection* connection() const PURE;\n\n  /**\n   * @return const Tracing::Span& the current tracing active span.\n   */\n  virtual Tracing::Span& activeSpan() PURE;\n};\n\nclass Filter;\n\n/**\n * A wrapper for a currently running request/response. This is the primary handle passed to Lua.\n * The script interacts with Envoy entirely through this handle.\n */\nclass StreamHandleWrapper : public Filters::Common::Lua::BaseLuaObject<StreamHandleWrapper>,\n                            public Http::AsyncClient::Callbacks {\npublic:\n  /**\n   * The state machine for a stream handler. In the current implementation everything the filter\n   * does is a discrete state. This may become sub-optimal as we add other things that might\n   * cause the filter to block.\n   * TODO(mattklein123): Consider whether we should split the state machine into an overall state\n   * and a blocking reason type.\n   */\n  enum class State {\n    // Lua code is currently running or the script has finished.\n    Running,\n    // Lua script is blocked waiting for the next body chunk.\n    WaitForBodyChunk,\n    // Lua script is blocked waiting for the full body.\n    WaitForBody,\n    // Lua script is blocked waiting for trailers.\n    WaitForTrailers,\n    // Lua script is blocked waiting for the result of an HTTP call.\n    HttpCall,\n    // Lua script has done a direct response.\n    Responded\n  };\n\n  StreamHandleWrapper(Filters::Common::Lua::Coroutine& coroutine, Http::HeaderMap& headers,\n                      bool end_stream, Filter& filter, FilterCallbacks& callbacks);\n\n  Http::FilterHeadersStatus start(int function_ref);\n  Http::FilterDataStatus onData(Buffer::Instance& data, bool end_stream);\n  Http::FilterTrailersStatus onTrailers(Http::HeaderMap& trailers);\n\n  void onReset() {\n    if (http_request_) {\n      http_request_->cancel();\n      http_request_ = nullptr;\n    }\n  }\n\n  static ExportedFunctions exportedFunctions() {\n    return {{\"headers\", static_luaHeaders},\n            {\"body\", static_luaBody},\n            {\"bodyChunks\", static_luaBodyChunks},\n            {\"trailers\", static_luaTrailers},\n            {\"metadata\", static_luaMetadata},\n            {\"logTrace\", static_luaLogTrace},\n            {\"logDebug\", static_luaLogDebug},\n            {\"logInfo\", static_luaLogInfo},\n            {\"logWarn\", static_luaLogWarn},\n            {\"logErr\", static_luaLogErr},\n            {\"logCritical\", static_luaLogCritical},\n            {\"httpCall\", static_luaHttpCall},\n            {\"respond\", static_luaRespond},\n            {\"streamInfo\", static_luaStreamInfo},\n            {\"connection\", static_luaConnection},\n            {\"importPublicKey\", static_luaImportPublicKey},\n            {\"verifySignature\", static_luaVerifySignature},\n            {\"base64Escape\", static_luaBase64Escape}};\n  }\n\nprivate:\n  /**\n   * Perform an HTTP call to an upstream host.\n   * @param 1 (string): The name of the upstream cluster to call. This cluster must be configured.\n   * @param 2 (table): A table of HTTP headers. :method, :path, and :authority must be defined.\n   * @param 3 (string): Body. Can be nil.\n   * @param 4 (int): Timeout in milliseconds for the call.\n   * @param 5 (bool): Optional flag. If true, filter continues without waiting for HTTP response\n   * from upstream service. False/synchronous by default.\n   * @return headers (table), body (string/nil)\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaHttpCall);\n\n  /**\n   * Perform an inline response. This call is currently only valid on the request path. Further\n   * filter iteration will stop. No further script code will run after this call.\n   * @param 1 (table): A table of HTTP headers. :status must be defined.\n   * @param 2 (string): Body. Can be nil.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaRespond);\n\n  /**\n   * @return a handle to the headers.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaHeaders);\n\n  /**\n   * @return a handle to the full body or nil if there is no body. This call will cause the script\n   *         to yield until the entire body is received (or if there is no body will return nil\n   *         right away).\n   *         NOTE: This call causes Envoy to buffer the body. The max buffer size is configured\n   *         based on the currently active flow control settings.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaBody);\n\n  /**\n   * @return an iterator that allows the script to iterate through all body chunks as they are\n   *         received. The iterator will yield between body chunks. Envoy *will not* buffer\n   *         the body chunks in this case, but the script can look at them as they go by.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaBodyChunks);\n\n  /**\n   * @return a handle to the trailers or nil if there are no trailers. This call will cause the\n   *         script to yield if Envoy does not yet know if there are trailers or not.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaTrailers);\n\n  /**\n   * @return a handle to the metadata.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaMetadata);\n\n  /**\n   * @return a handle to the stream info.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaStreamInfo);\n\n  /**\n   * @return a handle to the network connection.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaConnection);\n\n  /**\n   * Log a message to the Envoy log.\n   * @param 1 (string): The log message.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaLogTrace);\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaLogDebug);\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaLogInfo);\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaLogWarn);\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaLogErr);\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaLogCritical);\n\n  /**\n   * Verify cryptographic signatures.\n   * @param 1 (string) hash function(including SHA1, SHA224, SHA256, SHA384, SHA512)\n   * @param 2 (void*)  pointer to public key\n   * @param 3 (string) signature\n   * @param 4 (int)    length of signature\n   * @param 5 (string) clear text\n   * @param 6 (int)    length of clear text\n   * @return (bool, string) If the first element is true, the second element is empty; otherwise,\n   * the second element stores the error message\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaVerifySignature);\n\n  /**\n   * Import public key.\n   * @param 1 (string) keyder string\n   * @param 2 (int)    length of keyder string\n   * @return pointer to public key\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaImportPublicKey);\n\n  /**\n   * This is the closure/iterator returned by luaBodyChunks() above.\n   */\n  DECLARE_LUA_CLOSURE(StreamHandleWrapper, luaBodyIterator);\n\n  /**\n   * Base64 escape a string.\n   * @param1 (string) string to be base64 escaped.\n   * @return (string) base64 escaped string.\n   */\n  DECLARE_LUA_FUNCTION(StreamHandleWrapper, luaBase64Escape);\n\n  int doSynchronousHttpCall(lua_State* state, Tracing::Span& span);\n  int doAsynchronousHttpCall(lua_State* state, Tracing::Span& span);\n\n  // Filters::Common::Lua::BaseLuaObject\n  void onMarkDead() override {\n    // Headers/body/trailers wrappers do not survive any yields. The user can request them\n    // again across yields if needed.\n    headers_wrapper_.reset();\n    body_wrapper_.reset();\n    trailers_wrapper_.reset();\n    metadata_wrapper_.reset();\n    stream_info_wrapper_.reset();\n    connection_wrapper_.reset();\n    public_key_wrapper_.reset();\n  }\n\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override;\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override;\n  void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {}\n\n  Filters::Common::Lua::Coroutine& coroutine_;\n  Http::HeaderMap& headers_;\n  bool end_stream_;\n  bool headers_continued_{};\n  bool buffered_body_{};\n  bool saw_body_{};\n  Filter& filter_;\n  FilterCallbacks& callbacks_;\n  Http::HeaderMap* trailers_{};\n  Filters::Common::Lua::LuaDeathRef<HeaderMapWrapper> headers_wrapper_;\n  Filters::Common::Lua::LuaDeathRef<Filters::Common::Lua::BufferWrapper> body_wrapper_;\n  Filters::Common::Lua::LuaDeathRef<HeaderMapWrapper> trailers_wrapper_;\n  Filters::Common::Lua::LuaDeathRef<Filters::Common::Lua::MetadataMapWrapper> metadata_wrapper_;\n  Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> stream_info_wrapper_;\n  Filters::Common::Lua::LuaDeathRef<Filters::Common::Lua::ConnectionWrapper> connection_wrapper_;\n  Filters::Common::Lua::LuaDeathRef<PublicKeyWrapper> public_key_wrapper_;\n  State state_{State::Running};\n  std::function<void()> yield_callback_;\n  Http::AsyncClient::Request* http_request_{};\n\n  // The inserted crypto object pointers will not be removed from this map.\n  absl::flat_hash_map<std::string, Envoy::Common::Crypto::CryptoObjectPtr> public_key_storage_;\n};\n\n/**\n * An empty Callbacks client. It will ignore everything, including successes and failures.\n */\nclass NoopCallbacks : public Http::AsyncClient::Callbacks {\npublic:\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override {}\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override {}\n  void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {}\n};\n\n/**\n * Global configuration for the filter.\n */\nclass FilterConfig : Logger::Loggable<Logger::Id::lua> {\npublic:\n  FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& proto_config,\n               ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cluster_manager,\n               Api::Api& api);\n\n  PerLuaCodeSetup* perLuaCodeSetup(const std::string& name) const {\n    const auto iter = per_lua_code_setups_map_.find(name);\n    if (iter != per_lua_code_setups_map_.end()) {\n      return iter->second.get();\n    }\n    return nullptr;\n  }\n\n  Upstream::ClusterManager& cluster_manager_;\n\nprivate:\n  absl::flat_hash_map<std::string, PerLuaCodeSetupPtr> per_lua_code_setups_map_;\n};\n\nusing FilterConfigConstSharedPtr = std::shared_ptr<FilterConfig>;\n\n/**\n * Route configuration for the filter.\n */\nclass FilterConfigPerRoute : public Router::RouteSpecificFilterConfig {\npublic:\n  FilterConfigPerRoute(const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config,\n                       Server::Configuration::ServerFactoryContext& context);\n\n  ~FilterConfigPerRoute() override {\n    // The design of the TLS system does not allow TLS state to be modified in worker threads.\n    // However, when the route configuration is dynamically updated via RDS, the old\n    // FilterConfigPerRoute object may be destructed in a random worker thread. Therefore, to\n    // ensure thread safety, ownership of per_lua_code_setup_ptr_ must be transferred to the main\n    // thread and destroyed when the FilterConfigPerRoute object is not destructed in the main\n    // thread.\n    if (per_lua_code_setup_ptr_ && !main_thread_dispatcher_.isThreadSafe()) {\n      auto shared_ptr_wrapper =\n          std::make_shared<PerLuaCodeSetupPtr>(std::move(per_lua_code_setup_ptr_));\n      main_thread_dispatcher_.post([shared_ptr_wrapper] { shared_ptr_wrapper->reset(); });\n    }\n  }\n\n  bool disabled() const { return disabled_; }\n  const std::string& name() const { return name_; }\n  PerLuaCodeSetup* perLuaCodeSetup() const { return per_lua_code_setup_ptr_.get(); }\n\nprivate:\n  Event::Dispatcher& main_thread_dispatcher_;\n\n  const bool disabled_;\n  const std::string name_;\n  PerLuaCodeSetupPtr per_lua_code_setup_ptr_;\n};\n\nnamespace {\n\nPerLuaCodeSetup* getPerLuaCodeSetup(const FilterConfig* filter_config,\n                                    Http::StreamFilterCallbacks* callbacks) {\n  const FilterConfigPerRoute* config_per_route = nullptr;\n  if (callbacks && callbacks->route()) {\n    config_per_route = Http::Utility::resolveMostSpecificPerFilterConfig<FilterConfigPerRoute>(\n        HttpFilterNames::get().Lua, callbacks->route());\n  }\n\n  if (config_per_route != nullptr) {\n    if (config_per_route->disabled()) {\n      return nullptr;\n    }\n    if (!config_per_route->name().empty()) {\n      ASSERT(filter_config);\n      return filter_config->perLuaCodeSetup(config_per_route->name());\n    }\n    return config_per_route->perLuaCodeSetup();\n  }\n  ASSERT(filter_config);\n  return filter_config->perLuaCodeSetup(GLOBAL_SCRIPT_NAME);\n}\n\n} // namespace\n\n// TODO(mattklein123): Filter stats.\n\n/**\n * The HTTP Lua filter. Allows scripts to run in both the request an response flow.\n */\nclass Filter : public Http::StreamFilter, Logger::Loggable<Logger::Id::lua> {\npublic:\n  Filter(FilterConfigConstSharedPtr config) : config_(config) {}\n\n  Upstream::ClusterManager& clusterManager() { return config_->cluster_manager_; }\n  void scriptError(const Filters::Common::Lua::LuaException& e);\n  virtual void scriptLog(spdlog::level::level_enum level, const char* message);\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override {\n    PerLuaCodeSetup* setup = getPerLuaCodeSetup(config_.get(), decoder_callbacks_.callbacks_);\n    const int function_ref = setup ? setup->requestFunctionRef() : LUA_REFNIL;\n    return doHeaders(request_stream_wrapper_, request_coroutine_, decoder_callbacks_, function_ref,\n                     setup, headers, end_stream);\n  }\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override {\n    return doData(request_stream_wrapper_, data, end_stream);\n  }\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override {\n    return doTrailers(request_stream_wrapper_, trailers);\n  }\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_.callbacks_ = &callbacks;\n  }\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override {\n    PerLuaCodeSetup* setup = getPerLuaCodeSetup(config_.get(), decoder_callbacks_.callbacks_);\n    const int function_ref = setup ? setup->responseFunctionRef() : LUA_REFNIL;\n    return doHeaders(response_stream_wrapper_, response_coroutine_, encoder_callbacks_,\n                     function_ref, setup, headers, end_stream);\n  }\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override {\n    return doData(response_stream_wrapper_, data, end_stream);\n  };\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override {\n    return doTrailers(response_stream_wrapper_, trailers);\n  };\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override {\n    encoder_callbacks_.callbacks_ = &callbacks;\n  };\n\nprivate:\n  struct DecoderCallbacks : public FilterCallbacks {\n    DecoderCallbacks(Filter& parent) : parent_(parent) {}\n\n    // FilterCallbacks\n    void addData(Buffer::Instance& data) override {\n      return callbacks_->addDecodedData(data, false);\n    }\n    const Buffer::Instance* bufferedBody() override { return callbacks_->decodingBuffer(); }\n    void continueIteration() override { return callbacks_->continueDecoding(); }\n    void onHeadersModified() override { callbacks_->clearRouteCache(); }\n    void respond(Http::ResponseHeaderMapPtr&& headers, Buffer::Instance* body,\n                 lua_State* state) override;\n\n    const ProtobufWkt::Struct& metadata() const override;\n    StreamInfo::StreamInfo& streamInfo() override { return callbacks_->streamInfo(); }\n    const Network::Connection* connection() const override { return callbacks_->connection(); }\n    Tracing::Span& activeSpan() override { return callbacks_->activeSpan(); }\n\n    Filter& parent_;\n    Http::StreamDecoderFilterCallbacks* callbacks_{};\n  };\n\n  struct EncoderCallbacks : public FilterCallbacks {\n    EncoderCallbacks(Filter& parent) : parent_(parent) {}\n\n    // FilterCallbacks\n    void addData(Buffer::Instance& data) override {\n      return callbacks_->addEncodedData(data, false);\n    }\n    const Buffer::Instance* bufferedBody() override { return callbacks_->encodingBuffer(); }\n    void continueIteration() override { return callbacks_->continueEncoding(); }\n    void onHeadersModified() override {}\n    void respond(Http::ResponseHeaderMapPtr&& headers, Buffer::Instance* body,\n                 lua_State* state) override;\n\n    const ProtobufWkt::Struct& metadata() const override;\n    StreamInfo::StreamInfo& streamInfo() override { return callbacks_->streamInfo(); }\n    const Network::Connection* connection() const override { return callbacks_->connection(); }\n    Tracing::Span& activeSpan() override { return callbacks_->activeSpan(); }\n\n    Filter& parent_;\n    Http::StreamEncoderFilterCallbacks* callbacks_{};\n  };\n\n  using StreamHandleRef = Filters::Common::Lua::LuaDeathRef<StreamHandleWrapper>;\n\n  Http::FilterHeadersStatus doHeaders(StreamHandleRef& handle,\n                                      Filters::Common::Lua::CoroutinePtr& coroutine,\n                                      FilterCallbacks& callbacks, int function_ref,\n                                      PerLuaCodeSetup* setup, Http::HeaderMap& headers,\n                                      bool end_stream);\n  Http::FilterDataStatus doData(StreamHandleRef& handle, Buffer::Instance& data, bool end_stream);\n  Http::FilterTrailersStatus doTrailers(StreamHandleRef& handle, Http::HeaderMap& trailers);\n\n  FilterConfigConstSharedPtr config_;\n  DecoderCallbacks decoder_callbacks_{*this};\n  EncoderCallbacks encoder_callbacks_{*this};\n  StreamHandleRef request_stream_wrapper_;\n  StreamHandleRef response_stream_wrapper_;\n  bool destroyed_{};\n\n  // These coroutines used to be owned by the stream handles. After investigating #3570, it\n  // became clear that there is a circular memory reference when a coroutine yields. Basically,\n  // the coroutine holds a reference to the stream wrapper. I'm not completely sure why this is,\n  // but I think it is because the yield happens via a stream handle method, so the runtime must\n  // hold a reference so that it can return out of the yield through the object. So now we hold\n  // the coroutine references at the same level as the stream handles so that when the filter is\n  // destroyed the circular reference is broken and both objects are cleaned up.\n  //\n  // Note that the above explanation probably means that we don't need to hold a reference to the\n  // coroutine at all and it would be taken care of automatically via a runtime internal reference\n  // when a yield happens. However, given that I don't fully understand the runtime internals, this\n  // seems like a safer fix for now.\n  Filters::Common::Lua::CoroutinePtr request_coroutine_;\n  Filters::Common::Lua::CoroutinePtr response_coroutine_;\n};\n\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/lua/wrappers.cc",
    "content": "#include \"extensions/filters/http/lua/wrappers.h\"\n\n#include \"common/http/header_utility.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/common/lua/wrappers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\n\nHeaderMapIterator::HeaderMapIterator(HeaderMapWrapper& parent) : parent_(parent) {\n  entries_.reserve(parent_.headers_.size());\n  parent_.headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    entries_.push_back(&header);\n    return Http::HeaderMap::Iterate::Continue;\n  });\n}\n\nint HeaderMapIterator::luaPairsIterator(lua_State* state) {\n  if (current_ == entries_.size()) {\n    parent_.iterator_.reset();\n    return 0;\n  } else {\n    const absl::string_view key_view(entries_[current_]->key().getStringView());\n    lua_pushlstring(state, key_view.data(), key_view.length());\n    const absl::string_view value_view(entries_[current_]->value().getStringView());\n    lua_pushlstring(state, value_view.data(), value_view.length());\n    current_++;\n    return 2;\n  }\n}\n\nint HeaderMapWrapper::luaAdd(lua_State* state) {\n  checkModifiable(state);\n\n  const char* key = luaL_checkstring(state, 2);\n  const char* value = luaL_checkstring(state, 3);\n  headers_.addCopy(Http::LowerCaseString(key), value);\n  return 0;\n}\n\nint HeaderMapWrapper::luaGet(lua_State* state) {\n  const char* key = luaL_checkstring(state, 2);\n  const auto value =\n      Http::HeaderUtility::getAllOfHeaderAsString(headers_, Http::LowerCaseString(key));\n  if (value.result().has_value()) {\n    lua_pushlstring(state, value.result().value().data(), value.result().value().length());\n    return 1;\n  } else {\n    return 0;\n  }\n}\n\nint HeaderMapWrapper::luaPairs(lua_State* state) {\n  if (iterator_.get() != nullptr) {\n    luaL_error(state, \"cannot create a second iterator before completing the first\");\n  }\n\n  // The way iteration works is we create an iteration wrapper that snaps pointers to all of\n  // the headers. We don't allow modification while an iterator is active. This means that\n  // currently if a script breaks out of iteration, further modifications will not be possible\n  // because we don't know if they may resume iteration in the future and it isn't safe. There\n  // are potentially better ways of handling this but due to GC of the iterator it's very\n  // difficult to control safety without tracking every allocated iterator and invalidating them\n  // if the map is modified.\n  iterator_.reset(HeaderMapIterator::create(state, *this), true);\n  lua_pushcclosure(state, HeaderMapIterator::static_luaPairsIterator, 1);\n  return 1;\n}\n\nint HeaderMapWrapper::luaReplace(lua_State* state) {\n  checkModifiable(state);\n\n  const char* key = luaL_checkstring(state, 2);\n  const char* value = luaL_checkstring(state, 3);\n  const Http::LowerCaseString lower_key(key);\n\n  headers_.setCopy(lower_key, value);\n\n  return 0;\n}\n\nint HeaderMapWrapper::luaRemove(lua_State* state) {\n  checkModifiable(state);\n\n  const char* key = luaL_checkstring(state, 2);\n  headers_.remove(Http::LowerCaseString(key));\n  return 0;\n}\n\nvoid HeaderMapWrapper::checkModifiable(lua_State* state) {\n  if (iterator_.get() != nullptr) {\n    luaL_error(state, \"header map cannot be modified while iterating\");\n  }\n\n  if (!cb_()) {\n    luaL_error(state, \"header map can no longer be modified\");\n  }\n}\n\nint StreamInfoWrapper::luaProtocol(lua_State* state) {\n  lua_pushstring(state, Http::Utility::getProtocolString(stream_info_.protocol().value()).c_str());\n  return 1;\n}\n\nint StreamInfoWrapper::luaDynamicMetadata(lua_State* state) {\n  if (dynamic_metadata_wrapper_.get() != nullptr) {\n    dynamic_metadata_wrapper_.pushStack();\n  } else {\n    dynamic_metadata_wrapper_.reset(DynamicMetadataMapWrapper::create(state, *this), true);\n  }\n  return 1;\n}\n\nint StreamInfoWrapper::luaDownstreamSslConnection(lua_State* state) {\n  const auto& ssl = stream_info_.downstreamSslConnection();\n  if (ssl != nullptr) {\n    if (downstream_ssl_connection_.get() != nullptr) {\n      downstream_ssl_connection_.pushStack();\n    } else {\n      downstream_ssl_connection_.reset(\n          Filters::Common::Lua::SslConnectionWrapper::create(state, *ssl), true);\n    }\n  } else {\n    lua_pushnil(state);\n  }\n  return 1;\n}\n\nDynamicMetadataMapIterator::DynamicMetadataMapIterator(DynamicMetadataMapWrapper& parent)\n    : parent_{parent}, current_{parent_.streamInfo().dynamicMetadata().filter_metadata().begin()} {}\n\nStreamInfo::StreamInfo& DynamicMetadataMapWrapper::streamInfo() { return parent_.stream_info_; }\n\nint DynamicMetadataMapIterator::luaPairsIterator(lua_State* state) {\n  if (current_ == parent_.streamInfo().dynamicMetadata().filter_metadata().end()) {\n    parent_.iterator_.reset();\n    return 0;\n  }\n\n  lua_pushstring(state, current_->first.c_str());\n  Filters::Common::Lua::MetadataMapHelper::createTable(state, current_->second.fields());\n\n  current_++;\n  return 2;\n}\n\nint DynamicMetadataMapWrapper::luaGet(lua_State* state) {\n  const char* filter_name = luaL_checkstring(state, 2);\n  const auto& metadata = streamInfo().dynamicMetadata().filter_metadata();\n  const auto filter_it = metadata.find(filter_name);\n  if (filter_it == metadata.end()) {\n    return 0;\n  }\n\n  Filters::Common::Lua::MetadataMapHelper::createTable(state, filter_it->second.fields());\n  return 1;\n}\n\nint DynamicMetadataMapWrapper::luaSet(lua_State* state) {\n  if (iterator_.get() != nullptr) {\n    luaL_error(state, \"dynamic metadata map cannot be modified while iterating\");\n  }\n\n  const char* filter_name = luaL_checkstring(state, 2);\n  const char* key = luaL_checkstring(state, 3);\n\n  // MetadataMapHelper::loadValue will convert the value on top of the Lua stack,\n  // so push a copy of the 3rd arg (\"value\") to the top.\n  lua_pushvalue(state, 4);\n\n  ProtobufWkt::Struct value;\n  (*value.mutable_fields())[key] = Filters::Common::Lua::MetadataMapHelper::loadValue(state);\n  streamInfo().setDynamicMetadata(filter_name, value);\n\n  // Pop the copy of the metadata value from the stack.\n  lua_pop(state, 1);\n  return 0;\n}\n\nint DynamicMetadataMapWrapper::luaPairs(lua_State* state) {\n  if (iterator_.get() != nullptr) {\n    luaL_error(state, \"cannot create a second iterator before completing the first\");\n  }\n\n  iterator_.reset(DynamicMetadataMapIterator::create(state, *this), true);\n  lua_pushcclosure(state, DynamicMetadataMapIterator::static_luaPairsIterator, 1);\n  return 1;\n}\n\nint PublicKeyWrapper::luaGet(lua_State* state) {\n  if (public_key_.empty()) {\n    lua_pushnil(state);\n  } else {\n    lua_pushstring(state, public_key_.c_str());\n  }\n  return 1;\n}\n\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/lua/wrappers.h",
    "content": "#pragma once\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/crypto/utility.h\"\n\n#include \"extensions/common/crypto/crypto_impl.h\"\n#include \"extensions/filters/common/lua/lua.h\"\n#include \"extensions/filters/common/lua/wrappers.h\"\n\n#include \"openssl/evp.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\n\nclass HeaderMapWrapper;\n\n/**\n * Iterator over a header map.\n */\nclass HeaderMapIterator : public Filters::Common::Lua::BaseLuaObject<HeaderMapIterator> {\npublic:\n  HeaderMapIterator(HeaderMapWrapper& parent);\n\n  static ExportedFunctions exportedFunctions() { return {}; }\n\n  DECLARE_LUA_CLOSURE(HeaderMapIterator, luaPairsIterator);\n\nprivate:\n  HeaderMapWrapper& parent_;\n  std::vector<const Http::HeaderEntry*> entries_;\n  uint64_t current_{};\n};\n\n/**\n * Lua wrapper for a header map. Methods that will modify the map will call a check function\n * to see if modification is allowed.\n */\nclass HeaderMapWrapper : public Filters::Common::Lua::BaseLuaObject<HeaderMapWrapper> {\npublic:\n  using CheckModifiableCb = std::function<bool()>;\n\n  HeaderMapWrapper(Http::HeaderMap& headers, CheckModifiableCb cb) : headers_(headers), cb_(cb) {}\n\n  static ExportedFunctions exportedFunctions() {\n    return {{\"add\", static_luaAdd},\n            {\"get\", static_luaGet},\n            {\"remove\", static_luaRemove},\n            {\"replace\", static_luaReplace},\n            {\"__pairs\", static_luaPairs}};\n  }\n\nprivate:\n  /**\n   * Add a header to the map.\n   * @param 1 (string): header name.\n   * @param 2 (string): header value.\n   * @return nothing.\n   */\n  DECLARE_LUA_FUNCTION(HeaderMapWrapper, luaAdd);\n\n  /**\n   * Get a header value from the map.\n   * @param 1 (string): header name.\n   * @return string value if found or nil.\n   */\n  DECLARE_LUA_FUNCTION(HeaderMapWrapper, luaGet);\n\n  /**\n   * Implementation of the __pairs metamethod so a headers wrapper can be iterated over using\n   * pairs().\n   */\n  DECLARE_LUA_FUNCTION(HeaderMapWrapper, luaPairs);\n\n  /**\n   * Remove a header from the map.\n   * @param 1 (string): header name.\n   * @return nothing.\n   */\n  DECLARE_LUA_FUNCTION(HeaderMapWrapper, luaRemove);\n\n  /**\n   * Replace a header in the map. If the header does not exist, it will be added.\n   * @param 1 (string): header name.\n   * @param 2 (string): header value.\n   * @return nothing.\n   */\n  DECLARE_LUA_FUNCTION(HeaderMapWrapper, luaReplace);\n\n  void checkModifiable(lua_State* state);\n\n  // Envoy::Lua::BaseLuaObject\n  void onMarkDead() override {\n    // Iterators do not survive yields.\n    iterator_.reset();\n  }\n\n  Http::HeaderMap& headers_;\n  CheckModifiableCb cb_;\n  Filters::Common::Lua::LuaDeathRef<HeaderMapIterator> iterator_;\n\n  friend class HeaderMapIterator;\n};\n\nclass DynamicMetadataMapWrapper;\nclass StreamInfoWrapper;\n\n/**\n * Iterator over a dynamic metadata map.\n */\nclass DynamicMetadataMapIterator\n    : public Filters::Common::Lua::BaseLuaObject<DynamicMetadataMapIterator> {\npublic:\n  DynamicMetadataMapIterator(DynamicMetadataMapWrapper& parent);\n\n  static ExportedFunctions exportedFunctions() { return {}; }\n\n  DECLARE_LUA_CLOSURE(DynamicMetadataMapIterator, luaPairsIterator);\n\nprivate:\n  DynamicMetadataMapWrapper& parent_;\n  Protobuf::Map<std::string, ProtobufWkt::Struct>::const_iterator current_;\n};\n\n/**\n * Lua wrapper for a dynamic metadata.\n */\nclass DynamicMetadataMapWrapper\n    : public Filters::Common::Lua::BaseLuaObject<DynamicMetadataMapWrapper> {\npublic:\n  DynamicMetadataMapWrapper(StreamInfoWrapper& parent) : parent_{parent} {}\n\n  static ExportedFunctions exportedFunctions() {\n    return {{\"get\", static_luaGet}, {\"set\", static_luaSet}, {\"__pairs\", static_luaPairs}};\n  }\n\nprivate:\n  /**\n   * Get a metadata value from the map.\n   * @param 1 (string): filter name.\n   * @return value if found or nil.\n   */\n  DECLARE_LUA_FUNCTION(DynamicMetadataMapWrapper, luaGet);\n\n  /**\n   * Get a metadata value from the map.\n   * @param 1 (string): filter name.\n   * @param 2 (string or table): key.\n   * @param 3 (string or table): value.\n   * @return nil.\n   */\n  DECLARE_LUA_FUNCTION(DynamicMetadataMapWrapper, luaSet);\n\n  /**\n   * Implementation of the __pairs metamethod so a dynamic metadata wrapper can be iterated over\n   * using pairs().\n   */\n  DECLARE_LUA_FUNCTION(DynamicMetadataMapWrapper, luaPairs);\n\n  // Envoy::Lua::BaseLuaObject\n  void onMarkDead() override {\n    // Iterators do not survive yields.\n    iterator_.reset();\n  }\n\n  // To get reference to parent's (StreamInfoWrapper) stream info member.\n  StreamInfo::StreamInfo& streamInfo();\n\n  StreamInfoWrapper& parent_;\n  Filters::Common::Lua::LuaDeathRef<DynamicMetadataMapIterator> iterator_;\n\n  friend class DynamicMetadataMapIterator;\n};\n\n/**\n * Lua wrapper for a stream info.\n */\nclass StreamInfoWrapper : public Filters::Common::Lua::BaseLuaObject<StreamInfoWrapper> {\npublic:\n  StreamInfoWrapper(StreamInfo::StreamInfo& stream_info) : stream_info_{stream_info} {}\n  static ExportedFunctions exportedFunctions() {\n    return {{\"protocol\", static_luaProtocol},\n            {\"dynamicMetadata\", static_luaDynamicMetadata},\n            {\"downstreamSslConnection\", static_luaDownstreamSslConnection}};\n  }\n\nprivate:\n  /**\n   * Get current protocol being used.\n   * @return string representation of Http::Protocol.\n   */\n  DECLARE_LUA_FUNCTION(StreamInfoWrapper, luaProtocol);\n\n  /**\n   * Get reference to stream info dynamic metadata object.\n   * @return DynamicMetadataMapWrapper representation of StreamInfo dynamic metadata.\n   */\n  DECLARE_LUA_FUNCTION(StreamInfoWrapper, luaDynamicMetadata);\n\n  /**\n   * Get reference to stream info downstreamSslConnection.\n   * @return SslConnectionWrapper representation of StreamInfo downstream SSL connection.\n   */\n  DECLARE_LUA_FUNCTION(StreamInfoWrapper, luaDownstreamSslConnection);\n\n  // Envoy::Lua::BaseLuaObject\n  void onMarkDead() override { dynamic_metadata_wrapper_.reset(); }\n\n  StreamInfo::StreamInfo& stream_info_;\n  Filters::Common::Lua::LuaDeathRef<DynamicMetadataMapWrapper> dynamic_metadata_wrapper_;\n  Filters::Common::Lua::LuaDeathRef<Filters::Common::Lua::SslConnectionWrapper>\n      downstream_ssl_connection_;\n\n  friend class DynamicMetadataMapWrapper;\n};\n\n/**\n * Lua wrapper for key for accessing the imported public keys.\n */\nclass PublicKeyWrapper : public Filters::Common::Lua::BaseLuaObject<PublicKeyWrapper> {\npublic:\n  explicit PublicKeyWrapper(absl::string_view key) : public_key_(key) {}\n  static ExportedFunctions exportedFunctions() { return {{\"get\", static_luaGet}}; }\n\nprivate:\n  /**\n   * Get public key value.\n   * @return public key value or nil if key is empty.\n   */\n  DECLARE_LUA_FUNCTION(PublicKeyWrapper, luaGet);\n\n  const std::string public_key_;\n};\n\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# HTTP L7 filter that performs oauth.\n# Public docs: docs/root/configuration/http_filters/oauth_filter.rst\n\nenvoy_cc_library(\n    name = \"oauth_callback_interface\",\n    hdrs = [\"oauth.h\"],\n)\n\nenvoy_proto_library(\n    name = \"oauth_response\",\n    srcs = [\"oauth_response.proto\"],\n)\n\nenvoy_cc_library(\n    name = \"oauth_client\",\n    srcs = [\"oauth_client.cc\"],\n    hdrs = [\"oauth_client.h\"],\n    deps = [\n        \":oauth_response_cc_proto\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/extensions/filters/http/oauth2:oauth_callback_interface\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"oauth_lib\",\n    srcs = [\"filter.cc\"],\n    hdrs = [\"filter.h\"],\n    deps = [\n        \":oauth_client\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//source/common/http:rest_api_fetcher_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    status = \"alpha\",\n    deps = [\n        \":oauth_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/api/v2/auth:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/config.cc",
    "content": "#include \"extensions/filters/http/oauth2/config.h\"\n\n#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/v2/auth/secret.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/secret/secret_provider.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/oauth2/filter.h\"\n#include \"extensions/filters/http/oauth2/oauth.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nnamespace {\nSecret::GenericSecretConfigProviderSharedPtr\nsecretsProvider(const envoy::extensions::transport_sockets::tls::v3::SdsSecretConfig& config,\n                Secret::SecretManager& secret_manager,\n                Server::Configuration::TransportSocketFactoryContext& transport_socket_factory) {\n  if (config.has_sds_config()) {\n    return secret_manager.findOrCreateGenericSecretProvider(config.sds_config(), config.name(),\n                                                            transport_socket_factory);\n  } else {\n    return secret_manager.findStaticGenericSecretProvider(config.name());\n  }\n}\n} // namespace\n\nHttp::FilterFactoryCb OAuth2Config::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2& proto,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  if (!proto.has_config()) {\n    throw EnvoyException(\"config must be present for global config\");\n  }\n\n  const auto& proto_config = proto.config();\n  const auto& credentials = proto_config.credentials();\n\n  const auto& token_secret = credentials.token_secret();\n  const auto& hmac_secret = credentials.hmac_secret();\n\n  auto& secret_manager = context.clusterManager().clusterManagerFactory().secretManager();\n  auto& transport_socket_factory = context.getTransportSocketFactoryContext();\n  auto secret_provider_token_secret =\n      secretsProvider(token_secret, secret_manager, transport_socket_factory);\n  auto secret_provider_hmac_secret =\n      secretsProvider(hmac_secret, secret_manager, transport_socket_factory);\n\n  auto secret_reader = std::make_shared<SDSSecretReader>(\n      secret_provider_token_secret, secret_provider_hmac_secret, context.api());\n  auto config = std::make_shared<FilterConfig>(proto_config, context.clusterManager(),\n                                               secret_reader, context.scope(), stats_prefix);\n\n  return [&context, config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    std::unique_ptr<OAuth2Client> oauth_client =\n        std::make_unique<OAuth2ClientImpl>(context.clusterManager(), config->oauthTokenEndpoint());\n    callbacks.addStreamDecoderFilter(\n        std::make_shared<OAuth2Filter>(config, std::move(oauth_client), context.timeSource()));\n  };\n}\n\n/*\n * Static registration for the OAuth2 filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(OAuth2Config, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h\"\n#include \"envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nclass OAuth2Config : public Extensions::HttpFilters::Common::FactoryBase<\n                         envoy::extensions::filters::http::oauth2::v3alpha::OAuth2> {\npublic:\n  OAuth2Config() : FactoryBase(HttpFilterNames::get().OAuth) {}\n\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2&, const std::string&,\n      Server::Configuration::FactoryContext&) override;\n};\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/filter.cc",
    "content": "#include \"extensions/filters/http/oauth2/filter.h\"\n\n#include <algorithm>\n#include <chrono>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hex.h\"\n#include \"common/common/matchers.h\"\n#include \"common/crypto/utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"absl/strings/str_split.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nnamespace {\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    authorization_handle(Http::CustomHeaders::get().Authorization);\n\nconstexpr absl::string_view SignoutCookieValue =\n    \"OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\";\n\nconstexpr absl::string_view SignoutBearerTokenValue =\n    \"BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\";\n\nconstexpr const char* CookieTailFormatString = \";version=1;path=/;Max-Age={};secure\";\n\nconstexpr const char* CookieTailHttpOnlyFormatString =\n    \";version=1;path=/;Max-Age={};secure;HttpOnly\";\n\nconst char* AuthorizationEndpointFormat =\n    \"{}?client_id={}&scope=user&response_type=code&redirect_uri={}&state={}\";\n\nconstexpr absl::string_view UnauthorizedBodyMessage = \"OAuth flow failed.\";\n\nconst std::string& queryParamsError() { CONSTRUCT_ON_FIRST_USE(std::string, \"error\"); }\nconst std::string& queryParamsCode() { CONSTRUCT_ON_FIRST_USE(std::string, \"code\"); }\nconst std::string& queryParamsState() { CONSTRUCT_ON_FIRST_USE(std::string, \"state\"); }\n\nconstexpr absl::string_view REDIRECT_RACE = \"oauth.race_redirect\";\nconstexpr absl::string_view REDIRECT_LOGGED_IN = \"oauth.logged_in\";\nconstexpr absl::string_view REDIRECT_FOR_CREDENTIALS = \"oauth.missing_credentials\";\nconstexpr absl::string_view SIGN_OUT = \"oauth.sign_out\";\n\ntemplate <class T>\nstd::vector<Http::HeaderUtility::HeaderData> headerMatchers(const T& matcher_protos) {\n  std::vector<Http::HeaderUtility::HeaderData> matchers;\n  matchers.reserve(matcher_protos.size());\n\n  for (const auto& proto : matcher_protos) {\n    matchers.emplace_back(proto);\n  }\n\n  return matchers;\n}\n\n// Sets the auth token as the Bearer token in the authorization header.\nvoid setBearerToken(Http::RequestHeaderMap& headers, const std::string& token) {\n  headers.setInline(authorization_handle.handle(), absl::StrCat(\"Bearer \", token));\n}\n} // namespace\n\nFilterConfig::FilterConfig(\n    const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config& proto_config,\n    Upstream::ClusterManager& cluster_manager, std::shared_ptr<SecretReader> secret_reader,\n    Stats::Scope& scope, const std::string& stats_prefix)\n    : oauth_token_endpoint_(proto_config.token_endpoint()),\n      authorization_endpoint_(proto_config.authorization_endpoint()),\n      client_id_(proto_config.credentials().client_id()),\n      redirect_uri_(proto_config.redirect_uri()),\n      redirect_matcher_(proto_config.redirect_path_matcher()),\n      signout_path_(proto_config.signout_path()), secret_reader_(secret_reader),\n      stats_(FilterConfig::generateStats(stats_prefix, scope)),\n      forward_bearer_token_(proto_config.forward_bearer_token()),\n      pass_through_header_matchers_(headerMatchers(proto_config.pass_through_matcher())) {\n  if (!cluster_manager.get(oauth_token_endpoint_.cluster())) {\n    throw EnvoyException(fmt::format(\"OAuth2 filter: unknown cluster '{}' in config. Please \"\n                                     \"specify which cluster to direct OAuth requests to.\",\n                                     oauth_token_endpoint_.cluster()));\n  }\n}\n\nFilterStats FilterConfig::generateStats(const std::string& prefix, Stats::Scope& scope) {\n  return {ALL_OAUTH_FILTER_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n}\n\nvoid OAuth2CookieValidator::setParams(const Http::RequestHeaderMap& headers,\n                                      const std::string& secret) {\n  expires_ = Http::Utility::parseCookieValue(headers, \"OauthExpires\");\n  token_ = Http::Utility::parseCookieValue(headers, \"BearerToken\");\n  hmac_ = Http::Utility::parseCookieValue(headers, \"OauthHMAC\");\n  host_ = headers.Host()->value().getStringView();\n\n  secret_.assign(secret.begin(), secret.end());\n}\n\nbool OAuth2CookieValidator::hmacIsValid() const {\n  auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n  const auto hmac_payload = absl::StrCat(host_, expires_, token_);\n  const auto pre_encoded_hmac = Hex::encode(crypto_util.getSha256Hmac(secret_, hmac_payload));\n  std::string encoded_hmac;\n  absl::Base64Escape(pre_encoded_hmac, &encoded_hmac);\n\n  return encoded_hmac == hmac_;\n}\n\nbool OAuth2CookieValidator::timestampIsValid() const {\n  uint64_t expires;\n  if (!absl::SimpleAtoi(expires_, &expires)) {\n    return false;\n  }\n\n  const auto current_epoch = time_source_.systemTime().time_since_epoch();\n  return std::chrono::seconds(expires) > current_epoch;\n}\n\nbool OAuth2CookieValidator::isValid() const { return hmacIsValid() && timestampIsValid(); }\n\nOAuth2Filter::OAuth2Filter(FilterConfigSharedPtr config,\n                           std::unique_ptr<OAuth2Client>&& oauth_client, TimeSource& time_source)\n    : validator_(std::make_shared<OAuth2CookieValidator>(time_source)),\n      oauth_client_(std::move(oauth_client)), config_(std::move(config)),\n      time_source_(time_source) {\n\n  oauth_client_->setCallbacks(*this);\n}\n\nconst std::string& OAuth2Filter::bearerPrefix() const {\n  CONSTRUCT_ON_FIRST_USE(std::string, \"bearer \");\n}\n\nstd::string OAuth2Filter::extractAccessToken(const Http::RequestHeaderMap& headers) const {\n  ASSERT(headers.Path() != nullptr);\n\n  // Start by looking for a bearer token in the Authorization header.\n  const Http::HeaderEntry* authorization = headers.getInline(authorization_handle.handle());\n  if (authorization != nullptr) {\n    const auto value = StringUtil::trim(authorization->value().getStringView());\n    const auto& bearer_prefix = bearerPrefix();\n    if (absl::StartsWithIgnoreCase(value, bearer_prefix)) {\n      const size_t start = bearer_prefix.length();\n      return std::string(StringUtil::ltrim(value.substr(start)));\n    }\n  }\n\n  // Check for the named query string parameter.\n  const auto path = headers.Path()->value().getStringView();\n  const auto params = Http::Utility::parseQueryString(path);\n  const auto param = params.find(\"token\");\n  if (param != params.end()) {\n    return param->second;\n  }\n\n  return EMPTY_STRING;\n}\n\n/**\n * primary cases:\n * 1) user is signing out\n * 2) /_oauth redirect\n * 3) user is authorized\n * 4) user is unauthorized\n */\nHttp::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n\n  // The following 2 headers are guaranteed for regular requests. The asserts are helpful when\n  // writing test code to not forget these important variables in mock requests\n  const Http::HeaderEntry* host_header = headers.Host();\n  ASSERT(host_header != nullptr);\n  host_ = host_header->value().getStringView();\n\n  const Http::HeaderEntry* path_header = headers.Path();\n  ASSERT(path_header != nullptr);\n  const absl::string_view path_str = path_header->value().getStringView();\n\n  // We should check if this is a sign out request.\n  if (config_->signoutPath().match(path_header->value().getStringView())) {\n    return signOutUser(headers);\n  }\n\n  if (canSkipOAuth(headers)) {\n    // Update the path header with the query string parameters after a successful OAuth login.\n    // This is necessary if a website requests multiple resources which get redirected to the\n    // auth server. A cached login on the authorization server side will set cookies\n    // correctly but cause a race condition on future requests that have their location set\n    // to the callback path.\n\n    if (config_->redirectPathMatcher().match(path_str)) {\n      Http::Utility::QueryParams query_parameters = Http::Utility::parseQueryString(path_str);\n\n      const auto state =\n          Http::Utility::PercentEncoding::decode(query_parameters.at(queryParamsState()));\n      Http::Utility::Url state_url;\n      if (!state_url.initialize(state, false)) {\n        sendUnauthorizedResponse();\n        return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n      }\n      // Avoid infinite redirect storm\n      if (config_->redirectPathMatcher().match(state_url.pathAndQueryParams())) {\n        sendUnauthorizedResponse();\n        return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n      }\n      Http::ResponseHeaderMapPtr response_headers{\n          Http::createHeaderMap<Http::ResponseHeaderMapImpl>(\n              {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))},\n               {Http::Headers::get().Location, state}})};\n      decoder_callbacks_->encodeHeaders(std::move(response_headers), true, REDIRECT_RACE);\n    }\n\n    // Continue on with the filter stack.\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // If a bearer token is supplied as a header or param, we ingest it here and kick off the\n  // user resolution immediately. Note this comes after HMAC validation, so technically this\n  // header is sanitized in a way, as the validation check forces the correct Bearer Cookie value.\n  access_token_ = extractAccessToken(headers);\n  if (!access_token_.empty()) {\n    found_bearer_token_ = true;\n    request_headers_ = &headers;\n    finishFlow();\n\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // If no access token and this isn't the callback URI, redirect to acquire credentials.\n  //\n  // The following conditional could be replaced with a regex pattern-match,\n  // if we're concerned about strict matching against the callback path.\n  if (!config_->redirectPathMatcher().match(path_str)) {\n    Http::ResponseHeaderMapPtr response_headers{Http::createHeaderMap<Http::ResponseHeaderMapImpl>(\n        {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))}})};\n\n    // Construct the correct scheme. We default to https since this is a requirement for OAuth to\n    // succeed. However, if a downstream client explicitly declares the \"http\" scheme for whatever\n    // reason, we also use \"http\" when constructing our redirect uri to the authorization server.\n    auto scheme = Http::Headers::get().SchemeValues.Https;\n\n    const auto* scheme_header = headers.Scheme();\n    if ((scheme_header != nullptr &&\n         scheme_header->value().getStringView() == Http::Headers::get().SchemeValues.Http)) {\n      scheme = Http::Headers::get().SchemeValues.Http;\n    }\n\n    const std::string base_path = absl::StrCat(scheme, \"://\", host_);\n    const std::string state_path = absl::StrCat(base_path, headers.Path()->value().getStringView());\n    const std::string escaped_state = Http::Utility::PercentEncoding::encode(state_path, \":/=&?\");\n\n    Formatter::FormatterImpl formatter(config_->redirectUri());\n    const auto redirect_uri = formatter.format(headers, *Http::ResponseHeaderMapImpl::create(),\n                                               *Http::ResponseTrailerMapImpl::create(),\n                                               decoder_callbacks_->streamInfo(), \"\");\n    const std::string escaped_redirect_uri =\n        Http::Utility::PercentEncoding::encode(redirect_uri, \":/=&?\");\n\n    const std::string new_url =\n        fmt::format(AuthorizationEndpointFormat, config_->authorizationEndpoint(),\n                    config_->clientId(), escaped_redirect_uri, escaped_state);\n    response_headers->setLocation(new_url);\n    decoder_callbacks_->encodeHeaders(std::move(response_headers), true, REDIRECT_FOR_CREDENTIALS);\n\n    config_->stats().oauth_unauthorized_rq_.inc();\n\n    return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n  }\n\n  // At this point, we *are* on /_oauth. We believe this request comes from the authorization\n  // server and we expect the query strings to contain the information required to get the access\n  // token\n  const auto query_parameters = Http::Utility::parseQueryString(path_str);\n  if (query_parameters.find(queryParamsError()) != query_parameters.end()) {\n    sendUnauthorizedResponse();\n    return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n  }\n\n  // if the data we need is not present on the URL, stop execution\n  if (query_parameters.find(queryParamsCode()) == query_parameters.end() ||\n      query_parameters.find(queryParamsState()) == query_parameters.end()) {\n    sendUnauthorizedResponse();\n    return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n  }\n\n  auth_code_ = query_parameters.at(queryParamsCode());\n  state_ = Http::Utility::PercentEncoding::decode(query_parameters.at(queryParamsState()));\n\n  Http::Utility::Url state_url;\n  if (!state_url.initialize(state_, false)) {\n    sendUnauthorizedResponse();\n    return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n  }\n\n  Formatter::FormatterImpl formatter(config_->redirectUri());\n  const auto redirect_uri = formatter.format(headers, *Http::ResponseHeaderMapImpl::create(),\n                                             *Http::ResponseTrailerMapImpl::create(),\n                                             decoder_callbacks_->streamInfo(), \"\");\n  oauth_client_->asyncGetAccessToken(auth_code_, config_->clientId(), config_->clientSecret(),\n                                     redirect_uri);\n\n  // pause while we await the next step from the OAuth server\n  return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n}\n\n// Defines a sequence of checks determining whether we should initiate a new OAuth flow or skip to\n// the next filter in the chain.\nbool OAuth2Filter::canSkipOAuth(Http::RequestHeaderMap& headers) const {\n  // We can skip OAuth if the supplied HMAC cookie is valid. Apply the OAuth details as headers\n  // if we successfully validate the cookie.\n  validator_->setParams(headers, config_->tokenSecret());\n  if (validator_->isValid()) {\n    config_->stats().oauth_success_.inc();\n    setBearerToken(headers, validator_->token());\n    return true;\n  }\n\n  for (const auto& matcher : config_->passThroughMatchers()) {\n    if (matcher.matchesHeaders(headers)) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\n/**\n * Modifies the state of the filter by adding response headers to the decoder_callbacks\n */\nHttp::FilterHeadersStatus OAuth2Filter::signOutUser(const Http::RequestHeaderMap& headers) {\n  Http::ResponseHeaderMapPtr response_headers{Http::createHeaderMap<Http::ResponseHeaderMapImpl>(\n      {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))}})};\n\n  const std::string new_path =\n      absl::StrCat(headers.ForwardedProto()->value().getStringView(), \"://\", host_, \"/\");\n  response_headers->addReference(Http::Headers::get().SetCookie, SignoutCookieValue);\n  response_headers->addReference(Http::Headers::get().SetCookie, SignoutBearerTokenValue);\n  response_headers->setLocation(new_path);\n  decoder_callbacks_->encodeHeaders(std::move(response_headers), true, SIGN_OUT);\n\n  return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n}\n\nvoid OAuth2Filter::onGetAccessTokenSuccess(const std::string& access_code,\n                                           std::chrono::seconds expires_in) {\n  access_token_ = access_code;\n\n  const auto new_epoch = time_source_.systemTime() + expires_in;\n  new_expires_ = std::to_string(\n      std::chrono::duration_cast<std::chrono::seconds>(new_epoch.time_since_epoch()).count());\n\n  finishFlow();\n}\n\nvoid OAuth2Filter::finishFlow() {\n\n  // We have fully completed the entire OAuth flow, whether through Authorization header or from\n  // user redirection to the auth server.\n  if (found_bearer_token_) {\n    setBearerToken(*request_headers_, access_token_);\n    config_->stats().oauth_success_.inc();\n    decoder_callbacks_->continueDecoding();\n    return;\n  }\n\n  std::string token_payload;\n  if (config_->forwardBearerToken()) {\n    token_payload = absl::StrCat(host_, new_expires_, access_token_);\n  } else {\n    token_payload = absl::StrCat(host_, new_expires_);\n  }\n\n  auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get();\n\n  auto token_secret = config_->tokenSecret();\n  std::vector<uint8_t> token_secret_vec(token_secret.begin(), token_secret.end());\n  const std::string pre_encoded_token =\n      Hex::encode(crypto_util.getSha256Hmac(token_secret_vec, token_payload));\n  std::string encoded_token;\n  absl::Base64Escape(pre_encoded_token, &encoded_token);\n\n  // We use HTTP Only cookies for the HMAC and Expiry.\n  const std::string cookie_tail = fmt::format(CookieTailFormatString, new_expires_);\n  const std::string cookie_tail_http_only =\n      fmt::format(CookieTailHttpOnlyFormatString, new_expires_);\n\n  // At this point we have all of the pieces needed to authorize a user that did not originally\n  // have a bearer access token. Now, we construct a redirect request to return the user to their\n  // previous state and additionally set the OAuth cookies in browser.\n  // The redirection should result in successfully passing this filter.\n  Http::ResponseHeaderMapPtr response_headers{Http::createHeaderMap<Http::ResponseHeaderMapImpl>(\n      {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))}})};\n\n  response_headers->addReferenceKey(\n      Http::Headers::get().SetCookie,\n      absl::StrCat(\"OauthHMAC=\", encoded_token, cookie_tail_http_only));\n  response_headers->addReferenceKey(\n      Http::Headers::get().SetCookie,\n      absl::StrCat(\"OauthExpires=\", new_expires_, cookie_tail_http_only));\n\n  // If opted-in, we also create a new Bearer cookie for the authorization token provided by the\n  // auth server.\n  if (config_->forwardBearerToken()) {\n    response_headers->addReferenceKey(Http::Headers::get().SetCookie,\n                                      absl::StrCat(\"BearerToken=\", access_token_, cookie_tail));\n  }\n\n  response_headers->setLocation(state_);\n\n  decoder_callbacks_->encodeHeaders(std::move(response_headers), true, REDIRECT_LOGGED_IN);\n  config_->stats().oauth_success_.inc();\n  decoder_callbacks_->continueDecoding();\n}\n\nvoid OAuth2Filter::sendUnauthorizedResponse() {\n  config_->stats().oauth_failure_.inc();\n  decoder_callbacks_->sendLocalReply(Http::Code::Unauthorized, UnauthorizedBodyMessage, nullptr,\n                                     absl::nullopt, EMPTY_STRING);\n}\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/filter.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/matchers.h\"\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stream_info/stream_info.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/matchers.h\"\n#include \"common/config/datasource.h\"\n#include \"common/formatter/substitution_formatter.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/rest_api_fetcher.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n#include \"extensions/filters/http/oauth2/oauth.h\"\n#include \"extensions/filters/http/oauth2/oauth_client.h\"\n\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nclass OAuth2Client;\n\n// Helper class used to fetch secrets (usually from SDS).\nclass SecretReader {\npublic:\n  virtual ~SecretReader() = default;\n  virtual const std::string& clientSecret() const PURE;\n  virtual const std::string& tokenSecret() const PURE;\n};\n\nclass SDSSecretReader : public SecretReader {\npublic:\n  SDSSecretReader(Secret::GenericSecretConfigProviderSharedPtr client_secret_provider,\n                  Secret::GenericSecretConfigProviderSharedPtr token_secret_provider, Api::Api& api)\n      : api_(api), client_secret_provider_(std::move(client_secret_provider)),\n        token_secret_provider_(std::move(token_secret_provider)) {\n    readAndWatchSecret(client_secret_, *client_secret_provider_);\n    readAndWatchSecret(token_secret_, *token_secret_provider_);\n  }\n\n  const std::string& clientSecret() const override { return client_secret_; }\n\n  const std::string& tokenSecret() const override { return token_secret_; }\n\nprivate:\n  void readAndWatchSecret(std::string& value,\n                          Secret::GenericSecretConfigProvider& secret_provider) {\n    const auto* secret = secret_provider.secret();\n    if (secret != nullptr) {\n      value = Config::DataSource::read(secret->secret(), true, api_);\n    }\n\n    secret_provider.addUpdateCallback([&secret_provider, this, &value]() {\n      const auto* secret = secret_provider.secret();\n      if (secret != nullptr) {\n        value = Config::DataSource::read(secret->secret(), true, api_);\n      }\n    });\n  }\n  std::string client_secret_;\n  std::string token_secret_;\n  Api::Api& api_;\n\n  Secret::GenericSecretConfigProviderSharedPtr client_secret_provider_;\n  Secret::GenericSecretConfigProviderSharedPtr token_secret_provider_;\n};\n\n/**\n * All stats for the OAuth filter. @see stats_macros.h\n */\n#define ALL_OAUTH_FILTER_STATS(COUNTER)                                                            \\\n  COUNTER(oauth_unauthorized_rq)                                                                   \\\n  COUNTER(oauth_failure)                                                                           \\\n  COUNTER(oauth_success)\n\n/**\n * Wrapper struct filter stats. @see stats_macros.h\n */\nstruct FilterStats {\n  ALL_OAUTH_FILTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * This class encapsulates all data needed for the filter to operate so that we don't pass around\n * raw protobufs and other arbitrary data.\n */\nclass FilterConfig {\npublic:\n  FilterConfig(const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config& proto_config,\n               Upstream::ClusterManager& cluster_manager,\n               std::shared_ptr<SecretReader> secret_reader, Stats::Scope& scope,\n               const std::string& stats_prefix);\n  const std::string& clusterName() const { return oauth_token_endpoint_.cluster(); }\n  const std::string& clientId() const { return client_id_; }\n  bool forwardBearerToken() const { return forward_bearer_token_; }\n  const std::vector<Http::HeaderUtility::HeaderData>& passThroughMatchers() const {\n    return pass_through_header_matchers_;\n  }\n\n  const envoy::config::core::v3::HttpUri& oauthTokenEndpoint() const {\n    return oauth_token_endpoint_;\n  }\n  const std::string& authorizationEndpoint() const { return authorization_endpoint_; }\n  const std::string& redirectUri() const { return redirect_uri_; }\n  const Matchers::PathMatcher& redirectPathMatcher() const { return redirect_matcher_; }\n  const Matchers::PathMatcher& signoutPath() const { return signout_path_; }\n  std::string clientSecret() const { return secret_reader_->clientSecret(); }\n  std::string tokenSecret() const { return secret_reader_->tokenSecret(); }\n  FilterStats& stats() { return stats_; }\n\nprivate:\n  static FilterStats generateStats(const std::string& prefix, Stats::Scope& scope);\n\n  const envoy::config::core::v3::HttpUri oauth_token_endpoint_;\n  const std::string authorization_endpoint_;\n  const std::string client_id_;\n  const std::string redirect_uri_;\n  const Matchers::PathMatcher redirect_matcher_;\n  const Matchers::PathMatcher signout_path_;\n  std::shared_ptr<SecretReader> secret_reader_;\n  FilterStats stats_;\n  const bool forward_bearer_token_ : 1;\n  const std::vector<Http::HeaderUtility::HeaderData> pass_through_header_matchers_;\n};\n\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\n/**\n * An OAuth cookie validator:\n * 1. extracts cookies from a request\n * 2. HMAC/encodes the values\n * 3. Compares the result to the cookie HMAC\n * 4. Checks that the `expires` value is valid relative to current time\n *\n * Required components:\n * - header map\n * - secret\n */\nclass CookieValidator {\npublic:\n  virtual ~CookieValidator() = default;\n  virtual const std::string& token() const PURE;\n  virtual void setParams(const Http::RequestHeaderMap& headers, const std::string& secret) PURE;\n  virtual bool isValid() const PURE;\n};\n\nclass OAuth2CookieValidator : public CookieValidator {\npublic:\n  explicit OAuth2CookieValidator(TimeSource& time_source) : time_source_(time_source) {}\n\n  const std::string& token() const override { return token_; }\n  void setParams(const Http::RequestHeaderMap& headers, const std::string& secret) override;\n  bool isValid() const override;\n  bool hmacIsValid() const;\n  bool timestampIsValid() const;\n\nprivate:\n  std::string token_;\n  std::string expires_;\n  std::string hmac_;\n  std::vector<uint8_t> secret_;\n  absl::string_view host_;\n  TimeSource& time_source_;\n};\n\n/**\n * The filter is the primary entry point for the OAuth workflow. Its responsibilities are to\n * receive incoming requests and decide at what state of the OAuth workflow they are in. Logic\n * beyond that is broken into component classes.\n */\nclass OAuth2Filter : public Http::PassThroughDecoderFilter, public FilterCallbacks {\npublic:\n  OAuth2Filter(FilterConfigSharedPtr config, std::unique_ptr<OAuth2Client>&& oauth_client,\n               TimeSource& time_source);\n\n  // Http::PassThroughDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override;\n\n  // FilterCallbacks\n  void onGetAccessTokenSuccess(const std::string& access_code,\n                               std::chrono::seconds expires_in) override;\n  // a catch-all function used for request failures. we don't retry, as a user can simply refresh\n  // the page in the case of a network blip.\n  void sendUnauthorizedResponse() override;\n\n  void finishFlow();\n\nprivate:\n  friend class OAuth2Test;\n\n  std::shared_ptr<CookieValidator> validator_;\n\n  // wrap up some of these in a UserData struct or something...\n  std::string auth_code_;\n  std::string access_token_; // TODO - see if we can avoid this being a member variable\n  std::string new_expires_;\n  absl::string_view host_;\n  std::string state_;\n  bool found_bearer_token_{false};\n  Http::RequestHeaderMap* request_headers_{nullptr};\n\n  std::unique_ptr<OAuth2Client> oauth_client_;\n  FilterConfigSharedPtr config_;\n  TimeSource& time_source_;\n\n  // Determines whether or not the current request can skip the entire OAuth flow (HMAC is valid,\n  // connection is mTLS, etc.)\n  bool canSkipOAuth(Http::RequestHeaderMap& headers) const;\n\n  Http::FilterHeadersStatus signOutUser(const Http::RequestHeaderMap& headers);\n\n  const std::string& bearerPrefix() const;\n  std::string extractAccessToken(const Http::RequestHeaderMap& headers) const;\n};\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/oauth.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\n/**\n * Callback interface to enable the OAuth client to trigger actions upon completion of an\n * asynchronous HTTP request/response.\n */\nclass FilterCallbacks {\npublic:\n  virtual ~FilterCallbacks() = default;\n\n  virtual void onGetAccessTokenSuccess(const std::string& access_token,\n                                       std::chrono::seconds expires_in) PURE;\n\n  virtual void sendUnauthorizedResponse() PURE;\n};\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/oauth_client.cc",
    "content": "#include \"extensions/filters/http/oauth2/oauth_client.h\"\n\n#include <chrono>\n\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/message.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"source/extensions/filters/http/oauth2/oauth_response.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nnamespace {\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    authorization_handle(Http::CustomHeaders::get().Authorization);\n\nconstexpr const char* GetAccessTokenBodyFormatString =\n    \"grant_type=authorization_code&code={0}&client_id={1}&client_secret={2}&redirect_uri={3}\";\n\n} // namespace\n\nvoid OAuth2ClientImpl::asyncGetAccessToken(const std::string& auth_code,\n                                           const std::string& client_id, const std::string& secret,\n                                           const std::string& cb_url) {\n  const auto encoded_client_id = Http::Utility::PercentEncoding::encode(client_id, \":/=&?\");\n  const auto encoded_secret = Http::Utility::PercentEncoding::encode(secret, \":/=&?\");\n  const auto encoded_cb_url = Http::Utility::PercentEncoding::encode(cb_url, \":/=&?\");\n\n  Http::RequestMessagePtr request = createPostRequest();\n  const std::string body = fmt::format(GetAccessTokenBodyFormatString, auth_code, encoded_client_id,\n                                       encoded_secret, encoded_cb_url);\n  ENVOY_LOG(debug, \"Dispatching OAuth request for access token.\");\n  dispatchRequest(std::move(request));\n\n  ASSERT(state_ == OAuthState::Idle);\n  state_ = OAuthState::PendingAccessToken;\n}\n\nvoid OAuth2ClientImpl::dispatchRequest(Http::RequestMessagePtr&& msg) {\n  in_flight_request_ =\n      cm_.httpAsyncClientForCluster(uri_.cluster())\n          .send(std::move(msg), *this,\n                Http::AsyncClient::RequestOptions().setTimeout(\n                    std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(uri_, timeout))));\n}\n\nvoid OAuth2ClientImpl::onSuccess(const Http::AsyncClient::Request&,\n                                 Http::ResponseMessagePtr&& message) {\n  in_flight_request_ = nullptr;\n\n  ASSERT(state_ == OAuthState::PendingAccessToken);\n  state_ = OAuthState::Idle;\n\n  // Check that the auth cluster returned a happy response.\n  const auto response_code = message->headers().Status()->value().getStringView();\n  if (response_code != \"200\") {\n    ENVOY_LOG(debug, \"Oauth response code: {}\", response_code);\n    parent_->sendUnauthorizedResponse();\n    return;\n  }\n\n  const std::string response_body = message->bodyAsString();\n\n  envoy::extensions::http_filters::oauth2::OAuthResponse response;\n  try {\n    MessageUtil::loadFromJson(response_body, response, ProtobufMessage::getNullValidationVisitor());\n  } catch (EnvoyException& e) {\n    ENVOY_LOG(debug, \"Error parsing response body, received exception: {}\", e.what());\n    ENVOY_LOG(debug, \"Response body: {}\", response_body);\n    parent_->sendUnauthorizedResponse();\n    return;\n  }\n\n  // TODO(snowp): Should this be a pgv validation instead? A more readable log\n  // message might be good enough reason to do this manually?\n  if (!response.has_access_token() || !response.has_expires_in()) {\n    ENVOY_LOG(debug, \"No access token or expiration after asyncGetAccessToken\");\n    parent_->sendUnauthorizedResponse();\n    return;\n  }\n\n  const std::string access_token{PROTOBUF_GET_WRAPPED_REQUIRED(response, access_token)};\n  const std::chrono::seconds expires_in{PROTOBUF_GET_WRAPPED_REQUIRED(response, expires_in)};\n  parent_->onGetAccessTokenSuccess(access_token, expires_in);\n}\n\nvoid OAuth2ClientImpl::onFailure(const Http::AsyncClient::Request&,\n                                 Http::AsyncClient::FailureReason) {\n  ENVOY_LOG(debug, \"OAuth request failed.\");\n  in_flight_request_ = nullptr;\n  parent_->sendUnauthorizedResponse();\n}\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/oauth_client.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/message.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/oauth2/oauth.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\n/**\n * An OAuth client abstracts away everything regarding how to communicate with\n * the OAuth server. The filter should only need to invoke the functions here,\n * and then wait in a `StopIteration` mode until a callback is triggered.\n */\nclass OAuth2Client : public Http::AsyncClient::Callbacks {\npublic:\n  virtual void asyncGetAccessToken(const std::string& auth_code, const std::string& client_id,\n                                   const std::string& secret, const std::string& cb_url) PURE;\n  virtual void setCallbacks(FilterCallbacks& callbacks) PURE;\n\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& m) override PURE;\n  void onFailure(const Http::AsyncClient::Request&,\n                 Http::AsyncClient::FailureReason f) override PURE;\n};\n\nclass OAuth2ClientImpl : public OAuth2Client, Logger::Loggable<Logger::Id::upstream> {\npublic:\n  OAuth2ClientImpl(Upstream::ClusterManager& cm, const envoy::config::core::v3::HttpUri& uri)\n      : cm_(cm), uri_(uri) {}\n\n  ~OAuth2ClientImpl() override {\n    if (in_flight_request_ != nullptr) {\n      in_flight_request_->cancel();\n    }\n  }\n\n  // OAuth2Client\n  /**\n   * Request the access token from the OAuth server. Calls the `onSuccess` on `onFailure` callbacks.\n   */\n  void asyncGetAccessToken(const std::string& auth_code, const std::string& client_id,\n                           const std::string& secret, const std::string& cb_url) override;\n\n  void setCallbacks(FilterCallbacks& callbacks) override { parent_ = &callbacks; }\n\n  // AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& m) override;\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason f) override;\n  void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&,\n                                    const Http::ResponseHeaderMap*) override {}\n\nprivate:\n  friend class OAuth2ClientTest;\n\n  FilterCallbacks* parent_{nullptr};\n\n  Upstream::ClusterManager& cm_;\n  const envoy::config::core::v3::HttpUri uri_;\n\n  // Tracks any outstanding in-flight requests, allowing us to cancel the request\n  // if the filter ends before the request completes.\n  Http::AsyncClient::Request* in_flight_request_{nullptr};\n\n  enum class OAuthState { Idle, PendingAccessToken };\n\n  // Due to the asynchronous nature of this functionality, it is helpful to have managed state which\n  // is tracked here.\n  OAuthState state_{OAuthState::Idle};\n\n  /**\n   * Begins execution of an asynchronous request.\n   *\n   * @param request the HTTP request to be executed.\n   */\n  void dispatchRequest(Http::RequestMessagePtr&& request);\n\n  Http::RequestMessagePtr createPostRequest() {\n    auto request = Http::Utility::prepareHeaders(uri_);\n    request->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post);\n    request->headers().setContentType(Http::Headers::get().ContentTypeValues.FormUrlEncoded);\n    return request;\n  }\n};\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/oauth2/oauth_response.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.extensions.http_filters.oauth2;\n\nimport \"google/protobuf/wrappers.proto\";\n\nmessage OAuthResponse {\n  google.protobuf.StringValue access_token = 1;\n  google.protobuf.UInt64Value expires_in = 2;\n}"
  },
  {
    "path": "source/extensions/filters/http/on_demand/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# On-demand RDS update HTTP filter\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"on_demand_update_lib\",\n    srcs = [\"on_demand_update.cc\"],\n    hdrs = [\"on_demand_update.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) classify and clean up.\n    extra_visibility = [\n        \"//test/common/access_log:__subpackages__\",\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/on_demand:on_demand_update_lib\",\n        \"@envoy_api//envoy/config/filter/http/on_demand/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/on_demand/config.cc",
    "content": "#include \"extensions/filters/http/on_demand/config.h\"\n\n#include \"envoy/config/filter/http/on_demand/v2/on_demand.pb.validate.h\"\n\n#include \"extensions/filters/http/on_demand/on_demand_update.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OnDemand {\n\nHttp::FilterFactoryCb OnDemandFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::config::filter::http::on_demand::v2::OnDemand&, const std::string&,\n    Server::Configuration::FactoryContext&) {\n  return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(\n        std::make_shared<Extensions::HttpFilters::OnDemand::OnDemandRouteUpdate>());\n  };\n}\n\n/**\n * Static registration for the on-demand filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(OnDemandFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace OnDemand\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/on_demand/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/filter/http/on_demand/v2/on_demand.pb.h\"\n#include \"envoy/config/filter/http/on_demand/v2/on_demand.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OnDemand {\n\n/**\n * Config registration for the OnDemand filter. @see NamedHttpFilterConfigFactory.\n */\nclass OnDemandFilterFactory\n    : public Common::FactoryBase<envoy::config::filter::http::on_demand::v2::OnDemand> {\npublic:\n  OnDemandFilterFactory() : FactoryBase(HttpFilterNames::get().OnDemand) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::config::filter::http::on_demand::v2::OnDemand& proto_config, const std::string&,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace OnDemand\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/on_demand/on_demand_update.cc",
    "content": "#include \"extensions/filters/http/on_demand/on_demand_update.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/codes.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OnDemand {\n\nHttp::FilterHeadersStatus OnDemandRouteUpdate::decodeHeaders(Http::RequestHeaderMap&, bool) {\n\n  if (callbacks_->route() != nullptr) {\n    filter_iteration_state_ = Http::FilterHeadersStatus::Continue;\n    return filter_iteration_state_;\n  }\n  // decodeHeaders() is interrupted.\n  decode_headers_active_ = true;\n  route_config_updated_callback_ =\n      std::make_shared<Http::RouteConfigUpdatedCallback>(Http::RouteConfigUpdatedCallback(\n          [this](bool route_exists) -> void { onRouteConfigUpdateCompletion(route_exists); }));\n  filter_iteration_state_ = Http::FilterHeadersStatus::StopIteration;\n  callbacks_->requestRouteConfigUpdate(route_config_updated_callback_);\n  // decodeHeaders() is completed.\n  decode_headers_active_ = false;\n  return filter_iteration_state_;\n}\n\nHttp::FilterDataStatus OnDemandRouteUpdate::decodeData(Buffer::Instance&, bool) {\n  return filter_iteration_state_ == Http::FilterHeadersStatus::StopIteration\n             ? Http::FilterDataStatus::StopIterationAndWatermark\n             : Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus OnDemandRouteUpdate::decodeTrailers(Http::RequestTrailerMap&) {\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid OnDemandRouteUpdate::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n}\n\n// A weak_ptr copy of the route_config_updated_callback_ is kept by RdsRouteConfigProviderImpl\n// in config_update_callbacks_. By resetting the pointer in onDestroy() callback we ensure\n// that this filter/filter-chain will not be resumed if the corresponding has been closed\nvoid OnDemandRouteUpdate::onDestroy() { route_config_updated_callback_.reset(); }\n\n// This is the callback which is called when an update requested in requestRouteConfigUpdate()\n// has been propagated to workers, at which point the request processing is restarted from the\n// beginning.\nvoid OnDemandRouteUpdate::onRouteConfigUpdateCompletion(bool route_exists) {\n  filter_iteration_state_ = Http::FilterHeadersStatus::Continue;\n\n  // Don't call continueDecoding in the middle of decodeHeaders()\n  if (decode_headers_active_) {\n    return;\n  }\n\n  if (route_exists &&                  // route can be resolved after an on-demand\n                                       // VHDS update\n      !callbacks_->decodingBuffer() && // Redirects with body not yet supported.\n      callbacks_->recreateStream()) {\n    return;\n  }\n\n  // route cannot be resolved after an on-demand VHDS update or\n  // recreating stream failed, continue the filter-chain\n  callbacks_->continueDecoding();\n}\n\n} // namespace OnDemand\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/on_demand/on_demand_update.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OnDemand {\n\nclass OnDemandRouteUpdate : public Http::StreamDecoderFilter {\npublic:\n  OnDemandRouteUpdate() = default;\n\n  void onRouteConfigUpdateCompletion(bool route_exists);\n\n  void setFilterIterationState(Envoy::Http::FilterHeadersStatus status) {\n    filter_iteration_state_ = status;\n  }\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  void onDestroy() override;\n\nprivate:\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n  Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_callback_;\n  Envoy::Http::FilterHeadersStatus filter_iteration_state_{Http::FilterHeadersStatus::Continue};\n  bool decode_headers_active_{false};\n};\n\n} // namespace OnDemand\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/original_src/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# A filter for mirroring the downstream remote address on the upstream's source.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"config_lib\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    deps = [\"@envoy_api//envoy/extensions/filters/http/original_src/v3:pkg_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"original_src_lib\",\n    srcs = [\"original_src.cc\"],\n    hdrs = [\"original_src.h\"],\n    deps = [\n        \":config_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/extensions/filters/common/original_src:socket_option_factory_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",  # The extension build system requires a library named config\n    srcs = [\"original_src_config_factory.cc\"],\n    hdrs = [\"original_src_config_factory.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    status = \"alpha\",\n    deps = [\n        \":config_lib\",\n        \":original_src_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/original_src/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/original_src/config.cc",
    "content": "#include \"extensions/filters/http/original_src/config.h\"\n\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\n\nConfig::Config(const envoy::extensions::filters::http::original_src::v3::OriginalSrc& config)\n    : mark_(config.mark()) {}\n\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/original_src/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\nclass Config {\npublic:\n  Config() = default;\n  explicit Config(const envoy::extensions::filters::http::original_src::v3::OriginalSrc& config);\n\n  uint32_t mark() const { return mark_; }\n\nprivate:\n  uint32_t mark_ = 0;\n};\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/original_src/original_src.cc",
    "content": "#include \"extensions/filters/http/original_src/original_src.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/filters/common/original_src/socket_option_factory.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\n\nOriginalSrcFilter::OriginalSrcFilter(const Config& config) : config_(config) {}\n\nvoid OriginalSrcFilter::onDestroy() {}\n\nHttp::FilterHeadersStatus OriginalSrcFilter::decodeHeaders(Http::RequestHeaderMap&, bool) {\n  const auto downstream_address = callbacks_->streamInfo().downstreamRemoteAddress();\n  ASSERT(downstream_address);\n\n  if (downstream_address->type() != Network::Address::Type::Ip) {\n    // Nothing we can do with this.\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  ENVOY_LOG(debug,\n            \"Got a new connection in the original_src filter for address {}. Marking with {}\",\n            downstream_address->asString(), config_.mark());\n\n  const auto options_to_add = Filters::Common::OriginalSrc::buildOriginalSrcOptions(\n      std::move(downstream_address), config_.mark());\n  callbacks_->addUpstreamSocketOptions(options_to_add);\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus OriginalSrcFilter::decodeData(Buffer::Instance&, bool) {\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus OriginalSrcFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid OriginalSrcFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n}\n\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/original_src/original_src.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/http/original_src/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\n\n/**\n * Implements the Original Src http filter. This filter places the downstream source address of the\n * request, as determined by the stream's `downstreamRemoteAddress()`, into an option which will  be\n * used to partition upstream connections. This does not support non-ip (e.g. AF_UNIX) connections;\n * they will use the same address they would have had this filter not been installed.\n */\nclass OriginalSrcFilter : public Http::StreamDecoderFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  explicit OriginalSrcFilter(const Config& config);\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\nprivate:\n  Config config_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n};\n\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/original_src/original_src_config_factory.cc",
    "content": "#include \"extensions/filters/http/original_src/original_src_config_factory.h\"\n\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.h\"\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/original_src/config.h\"\n#include \"extensions/filters/http/original_src/original_src.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\n\nHttp::FilterFactoryCb OriginalSrcConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::original_src::v3::OriginalSrc& proto_config,\n    const std::string&, Server::Configuration::FactoryContext&) {\n  Config config(proto_config);\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<OriginalSrcFilter>(config));\n  };\n}\n\n/**\n * Static registration for the original_src filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(OriginalSrcConfigFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/original_src/original_src_config_factory.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.h\"\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\n/**\n * Config registration for the original_src filter.\n */\nclass OriginalSrcConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::original_src::v3::OriginalSrc> {\npublic:\n  OriginalSrcConfigFactory() : FactoryBase(HttpFilterNames::get().OriginalSrc) {}\n\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::original_src::v3::OriginalSrc& proto_config,\n      const std::string& stat_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Ratelimit L7 HTTP filter\n# Public docs: docs/root/configuration/http_filters/rate_limit_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ratelimit_lib\",\n    srcs = [\"ratelimit.cc\"],\n    hdrs = [\"ratelimit.h\"],\n    deps = [\n        \":ratelimit_headers_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/router:config_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n        \"//source/extensions/filters/common/ratelimit:stat_names_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ratelimit_headers_lib\",\n    srcs = [\"ratelimit_headers.cc\"],\n    hdrs = [\"ratelimit_headers.h\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":ratelimit_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/ratelimit/config.cc",
    "content": "#include \"extensions/filters/http/ratelimit/config.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit_impl.h\"\n#include \"extensions/filters/http/ratelimit/ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\n\nHttp::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::ratelimit::v3::RateLimit& proto_config,\n    const std::string&, Server::Configuration::FactoryContext& context) {\n  ASSERT(!proto_config.domain().empty());\n  FilterConfigSharedPtr filter_config(new FilterConfig(proto_config, context.localInfo(),\n                                                       context.scope(), context.runtime(),\n                                                       context.httpContext()));\n  const std::chrono::milliseconds timeout =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20));\n\n  return [proto_config, &context, timeout,\n          filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamFilter(std::make_shared<Filter>(\n        filter_config, Filters::Common::RateLimit::rateLimitClient(\n                           context, proto_config.rate_limit_service().grpc_service(), timeout,\n                           proto_config.rate_limit_service().transport_api_version())));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nRateLimitFilterConfig::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute& proto_config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<FilterConfigPerRoute>(proto_config);\n}\n\n/**\n * Static registration for the rate limit filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RateLimitFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.rate_limit\"};\n\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ratelimit/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\n\n/**\n * Config registration for the rate limit filter. @see NamedHttpFilterConfigFactory.\n */\nclass RateLimitFilterConfig\n    : public Common::FactoryBase<\n          envoy::extensions::filters::http::ratelimit::v3::RateLimit,\n          envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute> {\npublic:\n  RateLimitFilterConfig() : FactoryBase(HttpFilterNames::get().RateLimit) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::ratelimit::v3::RateLimit& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute& proto_config,\n      Server::Configuration::ServerFactoryContext& context,\n      ProtobufMessage::ValidationVisitor& validator) override;\n};\n\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ratelimit/ratelimit.cc",
    "content": "#include \"extensions/filters/http/ratelimit/ratelimit.h\"\n\n#include <string>\n#include <vector>\n\n#include \"envoy/http/codes.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/router/config_impl.h\"\n\n#include \"extensions/filters/http/ratelimit/ratelimit_headers.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\n\nstruct RcDetailsValues {\n  // This request went above the configured limits for the rate limit filter.\n  const std::string RateLimited = \"request_rate_limited\";\n  // The rate limiter encountered a failure, and was configured to fail-closed.\n  const std::string RateLimitError = \"rate_limiter_error\";\n};\nusing RcDetails = ConstSingleton<RcDetailsValues>;\n\nvoid Filter::initiateCall(const Http::RequestHeaderMap& headers) {\n  const bool is_internal_request = Http::HeaderUtility::isEnvoyInternalRequest(headers);\n  if ((is_internal_request && config_->requestType() == FilterRequestType::External) ||\n      (!is_internal_request && config_->requestType() == FilterRequestType::Internal)) {\n    return;\n  }\n\n  Router::RouteConstSharedPtr route = callbacks_->route();\n  if (!route || !route->routeEntry()) {\n    return;\n  }\n\n  cluster_ = callbacks_->clusterInfo();\n  if (!cluster_) {\n    return;\n  }\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n\n  const Router::RouteEntry* route_entry = route->routeEntry();\n  // Get all applicable rate limit policy entries for the route.\n  populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors, route_entry, headers);\n\n  VhRateLimitOptions vh_rate_limit_option = getVirtualHostRateLimitOption(route);\n\n  switch (vh_rate_limit_option) {\n  case VhRateLimitOptions::Ignore:\n    break;\n  case VhRateLimitOptions::Include:\n    populateRateLimitDescriptors(route_entry->virtualHost().rateLimitPolicy(), descriptors,\n                                 route_entry, headers);\n    break;\n  case VhRateLimitOptions::Override:\n    if (route_entry->rateLimitPolicy().empty()) {\n      populateRateLimitDescriptors(route_entry->virtualHost().rateLimitPolicy(), descriptors,\n                                   route_entry, headers);\n    }\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  if (!descriptors.empty()) {\n    state_ = State::Calling;\n    initiating_call_ = true;\n    client_->limit(*this, config_->domain(), descriptors, callbacks_->activeSpan());\n    initiating_call_ = false;\n  }\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  if (!config_->runtime().snapshot().featureEnabled(\"ratelimit.http_filter_enabled\", 100)) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  request_headers_ = &headers;\n  initiateCall(headers);\n  return (state_ == State::Calling || state_ == State::Responded)\n             ? Http::FilterHeadersStatus::StopIteration\n             : Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool) {\n  ASSERT(state_ != State::Responded);\n  if (state_ != State::Calling) {\n    return Http::FilterDataStatus::Continue;\n  }\n  // If the request is too large, stop reading new data until the buffer drains.\n  return Http::FilterDataStatus::StopIterationAndWatermark;\n}\n\nHttp::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap&) {\n  ASSERT(state_ != State::Responded);\n  return state_ == State::Calling ? Http::FilterTrailersStatus::StopIteration\n                                  : Http::FilterTrailersStatus::Continue;\n}\n\nvoid Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n}\n\nHttp::FilterHeadersStatus Filter::encode100ContinueHeaders(Http::ResponseHeaderMap&) {\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) {\n  populateResponseHeaders(headers);\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::encodeData(Buffer::Instance&, bool) {\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus Filter::encodeTrailers(Http::ResponseTrailerMap&) {\n  return Http::FilterTrailersStatus::Continue;\n}\n\nHttp::FilterMetadataStatus Filter::encodeMetadata(Http::MetadataMap&) {\n  return Http::FilterMetadataStatus::Continue;\n}\n\nvoid Filter::setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks&) {}\n\nvoid Filter::onDestroy() {\n  if (state_ == State::Calling) {\n    state_ = State::Complete;\n    client_->cancel();\n  }\n}\n\nvoid Filter::complete(Filters::Common::RateLimit::LimitStatus status,\n                      Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses,\n                      Http::ResponseHeaderMapPtr&& response_headers_to_add,\n                      Http::RequestHeaderMapPtr&& request_headers_to_add) {\n  state_ = State::Complete;\n  response_headers_to_add_ = std::move(response_headers_to_add);\n  Http::HeaderMapPtr req_headers_to_add = std::move(request_headers_to_add);\n  Stats::StatName empty_stat_name;\n  Filters::Common::RateLimit::StatNames& stat_names = config_->statNames();\n\n  switch (status) {\n  case Filters::Common::RateLimit::LimitStatus::OK:\n    cluster_->statsScope().counterFromStatName(stat_names.ok_).inc();\n    break;\n  case Filters::Common::RateLimit::LimitStatus::Error:\n    cluster_->statsScope().counterFromStatName(stat_names.error_).inc();\n    break;\n  case Filters::Common::RateLimit::LimitStatus::OverLimit:\n    cluster_->statsScope().counterFromStatName(stat_names.over_limit_).inc();\n    Http::CodeStats::ResponseStatInfo info{config_->scope(),\n                                           cluster_->statsScope(),\n                                           empty_stat_name,\n                                           enumToInt(Http::Code::TooManyRequests),\n                                           true,\n                                           empty_stat_name,\n                                           empty_stat_name,\n                                           empty_stat_name,\n                                           empty_stat_name,\n                                           false};\n    httpContext().codeStats().chargeResponseStat(info);\n    if (response_headers_to_add_ == nullptr) {\n      response_headers_to_add_ = Http::ResponseHeaderMapImpl::create();\n    }\n    response_headers_to_add_->setReferenceEnvoyRateLimited(\n        Http::Headers::get().EnvoyRateLimitedValues.True);\n    break;\n  }\n\n  if (config_->enableXRateLimitHeaders()) {\n    Http::ResponseHeaderMapPtr rate_limit_headers =\n        XRateLimitHeaderUtils::create(std::move(descriptor_statuses));\n    if (response_headers_to_add_ == nullptr) {\n      response_headers_to_add_ = Http::ResponseHeaderMapImpl::create();\n    }\n    Http::HeaderUtility::addHeaders(*response_headers_to_add_, *rate_limit_headers);\n  } else {\n    descriptor_statuses = nullptr;\n  }\n\n  if (status == Filters::Common::RateLimit::LimitStatus::OverLimit &&\n      config_->runtime().snapshot().featureEnabled(\"ratelimit.http_filter_enforcing\", 100)) {\n    state_ = State::Responded;\n    callbacks_->sendLocalReply(\n        Http::Code::TooManyRequests, \"\",\n        [this](Http::HeaderMap& headers) { populateResponseHeaders(headers); },\n        config_->rateLimitedGrpcStatus(), RcDetails::get().RateLimited);\n    callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited);\n  } else if (status == Filters::Common::RateLimit::LimitStatus::Error) {\n    if (config_->failureModeAllow()) {\n      cluster_->statsScope().counterFromStatName(stat_names.failure_mode_allowed_).inc();\n      if (!initiating_call_) {\n        appendRequestHeaders(req_headers_to_add);\n        callbacks_->continueDecoding();\n      }\n    } else {\n      state_ = State::Responded;\n      callbacks_->sendLocalReply(Http::Code::InternalServerError, \"\", nullptr, absl::nullopt,\n                                 RcDetails::get().RateLimitError);\n      callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError);\n    }\n  } else if (!initiating_call_) {\n    appendRequestHeaders(req_headers_to_add);\n    callbacks_->continueDecoding();\n  }\n}\n\nvoid Filter::populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy,\n                                          std::vector<RateLimit::Descriptor>& descriptors,\n                                          const Router::RouteEntry* route_entry,\n                                          const Http::HeaderMap& headers) const {\n  for (const Router::RateLimitPolicyEntry& rate_limit :\n       rate_limit_policy.getApplicableRateLimit(config_->stage())) {\n    const std::string& disable_key = rate_limit.disableKey();\n    if (!disable_key.empty() &&\n        !config_->runtime().snapshot().featureEnabled(\n            fmt::format(\"ratelimit.{}.http_filter_enabled\", disable_key), 100)) {\n      continue;\n    }\n    rate_limit.populateDescriptors(*route_entry, descriptors, config_->localInfo().clusterName(),\n                                   headers, *callbacks_->streamInfo().downstreamRemoteAddress(),\n                                   &callbacks_->streamInfo().dynamicMetadata());\n  }\n}\n\nvoid Filter::populateResponseHeaders(Http::HeaderMap& response_headers) {\n  if (response_headers_to_add_) {\n    Http::HeaderUtility::addHeaders(response_headers, *response_headers_to_add_);\n    response_headers_to_add_ = nullptr;\n  }\n}\n\nvoid Filter::appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add) {\n  if (request_headers_to_add && request_headers_) {\n    Http::HeaderUtility::addHeaders(*request_headers_, *request_headers_to_add);\n    request_headers_to_add = nullptr;\n  }\n}\n\nVhRateLimitOptions Filter::getVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route) {\n  if (route->routeEntry()->includeVirtualHostRateLimits()) {\n    vh_rate_limits_ = VhRateLimitOptions::Include;\n  } else {\n    const auto* specific_per_route_config =\n        Http::Utility::resolveMostSpecificPerFilterConfig<FilterConfigPerRoute>(\n            HttpFilterNames::get().RateLimit, route);\n    if (specific_per_route_config != nullptr) {\n      switch (specific_per_route_config->virtualHostRateLimits()) {\n      case envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::INCLUDE:\n        vh_rate_limits_ = VhRateLimitOptions::Include;\n        break;\n      case envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::IGNORE:\n        vh_rate_limits_ = VhRateLimitOptions::Ignore;\n        break;\n      case envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::OVERRIDE:\n      default:\n        vh_rate_limits_ = VhRateLimitOptions::Override;\n      }\n    } else {\n      vh_rate_limits_ = VhRateLimitOptions::Override;\n    }\n  }\n  return vh_rate_limits_;\n}\n\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ratelimit/ratelimit.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/http/context.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n#include \"extensions/filters/common/ratelimit/stat_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\n\n/**\n * Type of requests the filter should apply to.\n */\nenum class FilterRequestType { Internal, External, Both };\n\n/**\n * Type of virtual host rate limit options\n */\nenum class VhRateLimitOptions { Override, Include, Ignore };\n\n/**\n * Global configuration for the HTTP rate limit filter.\n */\nclass FilterConfig {\npublic:\n  FilterConfig(const envoy::extensions::filters::http::ratelimit::v3::RateLimit& config,\n               const LocalInfo::LocalInfo& local_info, Stats::Scope& scope,\n               Runtime::Loader& runtime, Http::Context& http_context)\n      : domain_(config.domain()), stage_(static_cast<uint64_t>(config.stage())),\n        request_type_(config.request_type().empty() ? stringToType(\"both\")\n                                                    : stringToType(config.request_type())),\n        local_info_(local_info), scope_(scope), runtime_(runtime),\n        failure_mode_deny_(config.failure_mode_deny()),\n        enable_x_ratelimit_headers_(\n            config.enable_x_ratelimit_headers() ==\n            envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03),\n        rate_limited_grpc_status_(\n            config.rate_limited_as_resource_exhausted()\n                ? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted)\n                : absl::nullopt),\n        http_context_(http_context), stat_names_(scope.symbolTable()) {}\n  const std::string& domain() const { return domain_; }\n  const LocalInfo::LocalInfo& localInfo() const { return local_info_; }\n  uint64_t stage() const { return stage_; }\n  Runtime::Loader& runtime() { return runtime_; }\n  Stats::Scope& scope() { return scope_; }\n  FilterRequestType requestType() const { return request_type_; }\n  bool failureModeAllow() const { return !failure_mode_deny_; }\n  bool enableXRateLimitHeaders() const { return enable_x_ratelimit_headers_; }\n  const absl::optional<Grpc::Status::GrpcStatus> rateLimitedGrpcStatus() const {\n    return rate_limited_grpc_status_;\n  }\n  Http::Context& httpContext() { return http_context_; }\n  Filters::Common::RateLimit::StatNames& statNames() { return stat_names_; }\n\nprivate:\n  static FilterRequestType stringToType(const std::string& request_type) {\n    if (request_type == \"internal\") {\n      return FilterRequestType::Internal;\n    } else if (request_type == \"external\") {\n      return FilterRequestType::External;\n    } else {\n      ASSERT(request_type == \"both\");\n      return FilterRequestType::Both;\n    }\n  }\n\n  const std::string domain_;\n  const uint64_t stage_;\n  const FilterRequestType request_type_;\n  const LocalInfo::LocalInfo& local_info_;\n  Stats::Scope& scope_;\n  Runtime::Loader& runtime_;\n  const bool failure_mode_deny_;\n  const bool enable_x_ratelimit_headers_;\n  const absl::optional<Grpc::Status::GrpcStatus> rate_limited_grpc_status_;\n  Http::Context& http_context_;\n  Filters::Common::RateLimit::StatNames stat_names_;\n};\n\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\nclass FilterConfigPerRoute : public Router::RouteSpecificFilterConfig {\npublic:\n  FilterConfigPerRoute(\n      const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute& config)\n      : vh_rate_limits_(config.vh_rate_limits()) {}\n\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::VhRateLimitsOptions\n  virtualHostRateLimits() const {\n    return vh_rate_limits_;\n  }\n\nprivate:\n  const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::VhRateLimitsOptions\n      vh_rate_limits_;\n};\n\n/**\n * HTTP rate limit filter. Depending on the route configuration, this filter calls the global\n * rate limiting service before allowing further filter iteration.\n */\nclass Filter : public Http::StreamFilter, public Filters::Common::RateLimit::RequestCallbacks {\npublic:\n  Filter(FilterConfigSharedPtr config, Filters::Common::RateLimit::ClientPtr&& client)\n      : config_(config), client_(std::move(client)) {}\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap& headers) override;\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override;\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override;\n\n  // RateLimit::RequestCallbacks\n  void complete(Filters::Common::RateLimit::LimitStatus status,\n                Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses,\n                Http::ResponseHeaderMapPtr&& response_headers_to_add,\n                Http::RequestHeaderMapPtr&& request_headers_to_add) override;\n\nprivate:\n  void initiateCall(const Http::RequestHeaderMap& headers);\n  void populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy,\n                                    std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n                                    const Router::RouteEntry* route_entry,\n                                    const Http::HeaderMap& headers) const;\n  void populateResponseHeaders(Http::HeaderMap& response_headers);\n  void appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add);\n  VhRateLimitOptions getVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route);\n\n  Http::Context& httpContext() { return config_->httpContext(); }\n\n  enum class State { NotStarted, Calling, Complete, Responded };\n\n  FilterConfigSharedPtr config_;\n  Filters::Common::RateLimit::ClientPtr client_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n  State state_{State::NotStarted};\n  VhRateLimitOptions vh_rate_limits_;\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  bool initiating_call_{};\n  Http::ResponseHeaderMapPtr response_headers_to_add_;\n  Http::RequestHeaderMap* request_headers_{};\n};\n\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ratelimit/ratelimit_headers.cc",
    "content": "#include \"extensions/filters/http/ratelimit/ratelimit_headers.h\"\n\n#include \"common/http/header_map_impl.h\"\n\n#include \"absl/strings/substitute.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\n\nHttp::ResponseHeaderMapPtr XRateLimitHeaderUtils::create(\n    Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses) {\n  Http::ResponseHeaderMapPtr result = Http::ResponseHeaderMapImpl::create();\n  if (!descriptor_statuses || descriptor_statuses->empty()) {\n    descriptor_statuses = nullptr;\n    return result;\n  }\n\n  absl::optional<envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus>\n      min_remaining_limit_status;\n  std::string quota_policy;\n  for (auto&& status : *descriptor_statuses) {\n    if (!status.has_current_limit()) {\n      continue;\n    }\n    if (!min_remaining_limit_status ||\n        status.limit_remaining() < min_remaining_limit_status.value().limit_remaining()) {\n      min_remaining_limit_status.emplace(status);\n    }\n    const uint32_t window = convertRateLimitUnit(status.current_limit().unit());\n    // Constructing the quota-policy per RFC\n    // https://tools.ietf.org/id/draft-polli-ratelimit-headers-02.html#name-ratelimit-limit\n    // Example of the result: `, 10;w=1;name=\"per-ip\", 1000;w=3600`\n    if (window) {\n      // For each descriptor status append `<LIMIT>;w=<WINDOW_IN_SECONDS>`\n      absl::SubstituteAndAppend(&quota_policy, \", $0;$1=$2\",\n                                status.current_limit().requests_per_unit(),\n                                XRateLimitHeaders::get().QuotaPolicyKeys.Window, window);\n      if (!status.current_limit().name().empty()) {\n        // If the descriptor has a name, append `;name=\"<DESCRIPTOR_NAME>\"`\n        absl::SubstituteAndAppend(&quota_policy, \";$0=\\\"$1\\\"\",\n                                  XRateLimitHeaders::get().QuotaPolicyKeys.Name,\n                                  status.current_limit().name());\n      }\n    }\n  }\n\n  if (min_remaining_limit_status) {\n    const std::string rate_limit_limit = absl::StrCat(\n        min_remaining_limit_status.value().current_limit().requests_per_unit(), quota_policy);\n    result->addReferenceKey(XRateLimitHeaders::get().XRateLimitLimit, rate_limit_limit);\n    result->addReferenceKey(XRateLimitHeaders::get().XRateLimitRemaining,\n                            min_remaining_limit_status.value().limit_remaining());\n    result->addReferenceKey(XRateLimitHeaders::get().XRateLimitReset,\n                            min_remaining_limit_status.value().duration_until_reset().seconds());\n  }\n  descriptor_statuses = nullptr;\n  return result;\n}\n\nuint32_t XRateLimitHeaderUtils::convertRateLimitUnit(\n    const envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::Unit unit) {\n  switch (unit) {\n  case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::SECOND:\n    return 1;\n  case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE:\n    return 60;\n  case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR:\n    return 60 * 60;\n  case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::DAY:\n    return 24 * 60 * 60;\n  case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::UNKNOWN:\n  default:\n    return 0;\n  }\n}\n\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/ratelimit/ratelimit_headers.h",
    "content": "#pragma once\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\n\nclass XRateLimitHeaderValues {\npublic:\n  const Http::LowerCaseString XRateLimitLimit{\"x-ratelimit-limit\"};\n  const Http::LowerCaseString XRateLimitRemaining{\"x-ratelimit-remaining\"};\n  const Http::LowerCaseString XRateLimitReset{\"x-ratelimit-reset\"};\n\n  struct {\n    const std::string Window{\"w\"};\n    const std::string Name{\"name\"};\n  } QuotaPolicyKeys;\n};\nusing XRateLimitHeaders = ConstSingleton<XRateLimitHeaderValues>;\n\nclass XRateLimitHeaderUtils {\npublic:\n  static Http::ResponseHeaderMapPtr\n  create(Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses);\n\nprivate:\n  static uint32_t\n  convertRateLimitUnit(envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::Unit unit);\n};\n\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/rbac/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/rbac:rbac_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"rbac_filter_lib\",\n    srcs = [\"rbac_filter.cc\"],\n    hdrs = [\"rbac_filter.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/filters/common/rbac:engine_lib\",\n        \"//source/extensions/filters/common/rbac:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/rbac/config.cc",
    "content": "#include \"extensions/filters/http/rbac/config.h\"\n\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/rbac/rbac_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RBACFilter {\n\nHttp::FilterFactoryCb RoleBasedAccessControlFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n\n  auto config = std::make_shared<RoleBasedAccessControlFilterConfig>(proto_config, stats_prefix,\n                                                                     context.scope());\n\n  return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<RoleBasedAccessControlFilter>(config));\n  };\n}\n\nRouter::RouteSpecificFilterConfigConstSharedPtr\nRoleBasedAccessControlFilterConfigFactory::createRouteSpecificFilterConfigTyped(\n    const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& proto_config,\n    Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) {\n  return std::make_shared<const RoleBasedAccessControlRouteSpecificFilterConfig>(proto_config);\n}\n\n/**\n * Static registration for the RBAC filter. @see RegisterFactory\n */\nREGISTER_FACTORY(RoleBasedAccessControlFilterConfigFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace RBACFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/rbac/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RBACFilter {\n\n/**\n * Config registration for the RBAC filter. @see NamedHttpFilterConfigFactory.\n */\nclass RoleBasedAccessControlFilterConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::rbac::v3::RBAC,\n                                 envoy::extensions::filters::http::rbac::v3::RBACPerRoute> {\npublic:\n  RoleBasedAccessControlFilterConfigFactory() : FactoryBase(HttpFilterNames::get().Rbac) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n\n  Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped(\n      const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& proto_config,\n      Server::Configuration::ServerFactoryContext& context,\n      ProtobufMessage::ValidationVisitor& validator) override;\n};\n\n} // namespace RBACFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/rbac/rbac_filter.cc",
    "content": "#include \"extensions/filters/http/rbac/rbac_filter.h\"\n\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RBACFilter {\n\nRoleBasedAccessControlFilterConfig::RoleBasedAccessControlFilterConfig(\n    const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config,\n    const std::string& stats_prefix, Stats::Scope& scope)\n    : stats_(Filters::Common::RBAC::generateStats(stats_prefix, scope)),\n      engine_(Filters::Common::RBAC::createEngine(proto_config)),\n      shadow_engine_(Filters::Common::RBAC::createShadowEngine(proto_config)) {}\n\nconst Filters::Common::RBAC::RoleBasedAccessControlEngineImpl*\nRoleBasedAccessControlFilterConfig::engine(const Router::RouteConstSharedPtr route,\n                                           Filters::Common::RBAC::EnforcementMode mode) const {\n  if (!route || !route->routeEntry()) {\n    return engine(mode);\n  }\n\n  const std::string& name = HttpFilterNames::get().Rbac;\n  const auto* entry = route->routeEntry();\n  const auto* route_local =\n      entry->mostSpecificPerFilterConfigTyped<RoleBasedAccessControlRouteSpecificFilterConfig>(\n          name);\n\n  if (route_local) {\n    return route_local->engine(mode);\n  }\n\n  return engine(mode);\n}\n\nRoleBasedAccessControlRouteSpecificFilterConfig::RoleBasedAccessControlRouteSpecificFilterConfig(\n    const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& per_route_config)\n    : engine_(Filters::Common::RBAC::createEngine(per_route_config.rbac())),\n      shadow_engine_(Filters::Common::RBAC::createShadowEngine(per_route_config.rbac())) {}\n\nHttp::FilterHeadersStatus\nRoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  ENVOY_LOG(\n      debug,\n      \"checking request: requestedServerName: {}, sourceIP: {}, directRemoteIP: {}, remoteIP: {},\"\n      \"localAddress: {}, ssl: {}, headers: {}, dynamicMetadata: {}\",\n      callbacks_->connection()->requestedServerName(),\n      callbacks_->connection()->remoteAddress()->asString(),\n      callbacks_->streamInfo().downstreamDirectRemoteAddress()->asString(),\n      callbacks_->streamInfo().downstreamRemoteAddress()->asString(),\n      callbacks_->streamInfo().downstreamLocalAddress()->asString(),\n      callbacks_->connection()->ssl()\n          ? \"uriSanPeerCertificate: \" +\n                absl::StrJoin(callbacks_->connection()->ssl()->uriSanPeerCertificate(), \",\") +\n                \", dnsSanPeerCertificate: \" +\n                absl::StrJoin(callbacks_->connection()->ssl()->dnsSansPeerCertificate(), \",\") +\n                \", subjectPeerCertificate: \" +\n                callbacks_->connection()->ssl()->subjectPeerCertificate()\n          : \"none\",\n      headers, callbacks_->streamInfo().dynamicMetadata().DebugString());\n\n  std::string effective_policy_id;\n  const auto shadow_engine =\n      config_->engine(callbacks_->route(), Filters::Common::RBAC::EnforcementMode::Shadow);\n\n  if (shadow_engine != nullptr) {\n    std::string shadow_resp_code =\n        Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultAllowed;\n    if (shadow_engine->handleAction(*callbacks_->connection(), headers, callbacks_->streamInfo(),\n                                    &effective_policy_id)) {\n      ENVOY_LOG(debug, \"shadow allowed, matched policy {}\",\n                effective_policy_id.empty() ? \"none\" : effective_policy_id);\n      config_->stats().shadow_allowed_.inc();\n    } else {\n      ENVOY_LOG(debug, \"shadow denied, matched policy {}\",\n                effective_policy_id.empty() ? \"none\" : effective_policy_id);\n      config_->stats().shadow_denied_.inc();\n      shadow_resp_code =\n          Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultDenied;\n    }\n\n    ProtobufWkt::Struct metrics;\n\n    auto& fields = *metrics.mutable_fields();\n    if (!effective_policy_id.empty()) {\n      *fields[Filters::Common::RBAC::DynamicMetadataKeysSingleton::get()\n                  .ShadowEffectivePolicyIdField]\n           .mutable_string_value() = effective_policy_id;\n    }\n\n    *fields[Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().ShadowEngineResultField]\n         .mutable_string_value() = shadow_resp_code;\n\n    callbacks_->streamInfo().setDynamicMetadata(HttpFilterNames::get().Rbac, metrics);\n  }\n\n  const auto engine =\n      config_->engine(callbacks_->route(), Filters::Common::RBAC::EnforcementMode::Enforced);\n  if (engine != nullptr) {\n    std::string effective_policy_id;\n    bool allowed = engine->handleAction(*callbacks_->connection(), headers,\n                                        callbacks_->streamInfo(), &effective_policy_id);\n    const std::string log_policy_id = effective_policy_id.empty() ? \"none\" : effective_policy_id;\n    if (allowed) {\n      ENVOY_LOG(debug, \"enforced allowed, matched policy {}\", log_policy_id);\n      config_->stats().allowed_.inc();\n      return Http::FilterHeadersStatus::Continue;\n    } else {\n      ENVOY_LOG(debug, \"enforced denied, matched policy {}\", log_policy_id);\n      callbacks_->sendLocalReply(Http::Code::Forbidden, \"RBAC: access denied\", nullptr,\n                                 absl::nullopt,\n                                 Filters::Common::RBAC::responseDetail(log_policy_id));\n      config_->stats().denied_.inc();\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n  }\n\n  ENVOY_LOG(debug, \"no engine, allowed by default\");\n  return Http::FilterHeadersStatus::Continue;\n}\n\n} // namespace RBACFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/rbac/rbac_filter.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/common/rbac/engine_impl.h\"\n#include \"extensions/filters/common/rbac/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RBACFilter {\n\nclass RoleBasedAccessControlRouteSpecificFilterConfig : public Router::RouteSpecificFilterConfig {\npublic:\n  RoleBasedAccessControlRouteSpecificFilterConfig(\n      const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& per_route_config);\n\n  const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl*\n  engine(Filters::Common::RBAC::EnforcementMode mode) const {\n    return mode == Filters::Common::RBAC::EnforcementMode::Enforced ? engine_.get()\n                                                                    : shadow_engine_.get();\n  }\n\nprivate:\n  std::unique_ptr<Filters::Common::RBAC::RoleBasedAccessControlEngineImpl> engine_;\n  std::unique_ptr<Filters::Common::RBAC::RoleBasedAccessControlEngineImpl> shadow_engine_;\n};\n\n/**\n * Configuration for the RBAC filter.\n */\nclass RoleBasedAccessControlFilterConfig {\npublic:\n  RoleBasedAccessControlFilterConfig(\n      const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config,\n      const std::string& stats_prefix, Stats::Scope& scope);\n\n  Filters::Common::RBAC::RoleBasedAccessControlFilterStats& stats() { return stats_; }\n\n  const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl*\n  engine(const Router::RouteConstSharedPtr route,\n         Filters::Common::RBAC::EnforcementMode mode) const;\n\nprivate:\n  const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl*\n  engine(Filters::Common::RBAC::EnforcementMode mode) const {\n    return mode == Filters::Common::RBAC::EnforcementMode::Enforced ? engine_.get()\n                                                                    : shadow_engine_.get();\n  }\n\n  Filters::Common::RBAC::RoleBasedAccessControlFilterStats stats_;\n\n  std::unique_ptr<const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl> engine_;\n  std::unique_ptr<const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl> shadow_engine_;\n};\n\nusing RoleBasedAccessControlFilterConfigSharedPtr =\n    std::shared_ptr<RoleBasedAccessControlFilterConfig>;\n\n/**\n * A filter that provides role-based access control authorization for HTTP requests.\n */\nclass RoleBasedAccessControlFilter : public Http::StreamDecoderFilter,\n                                     public Logger::Loggable<Logger::Id::rbac> {\npublic:\n  RoleBasedAccessControlFilter(RoleBasedAccessControlFilterConfigSharedPtr config)\n      : config_(config) {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    callbacks_ = &callbacks;\n  }\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\nprivate:\n  RoleBasedAccessControlFilterConfigSharedPtr config_;\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n};\n\n} // namespace RBACFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP L7 filter responsible for routing to upstream connection pools\n# Public docs: docs/root/configuration/http_filters/router_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    # This is core Envoy config.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/router:router_lib\",\n        \"//source/common/router:shadow_writer_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/on_demand:on_demand_update_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/router/config.cc",
    "content": "#include \"extensions/filters/http/router/config.h\"\n\n#include \"envoy/extensions/filters/http/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/http/router/v3/router.pb.validate.h\"\n\n#include \"common/router/router.h\"\n#include \"common/router/shadow_writer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RouterFilter {\n\nHttp::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::router::v3::Router& proto_config,\n    const std::string& stat_prefix, Server::Configuration::FactoryContext& context) {\n  Router::FilterConfigSharedPtr filter_config(new Router::FilterConfig(\n      stat_prefix, context, std::make_unique<Router::ShadowWriterImpl>(context.clusterManager()),\n      proto_config));\n\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(std::make_shared<Router::ProdFilter>(*filter_config));\n  };\n}\n\n/**\n * Static registration for the router filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RouterFilterConfig,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.router\"};\n\n} // namespace RouterFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/router/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/http/router/v3/router.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RouterFilter {\n\n/**\n * Config registration for the router filter. @see NamedHttpFilterConfigFactory.\n */\nclass RouterFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::router::v3::Router> {\npublic:\n  RouterFilterConfig() : FactoryBase(HttpFilterNames::get().Router) {}\n\n  bool isTerminalFilter() override { return true; }\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::router::v3::Router& proto_config,\n      const std::string& stat_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\nDECLARE_FACTORY(RouterFilterConfig);\n\n} // namespace RouterFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/squash/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP filter that implements the Squash microservice debugger\n# Public docs: docs/root/configuration/http_filters/squash_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"squash_filter_lib\",\n    srcs = [\"squash_filter.cc\"],\n    hdrs = [\"squash_filter.h\"],\n    deps = [\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/squash:squash_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/squash/config.cc",
    "content": "#include \"extensions/filters/http/squash/config.h\"\n\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.h\"\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/squash/squash_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Squash {\n\nHttp::FilterFactoryCb SquashFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::squash::v3::Squash& proto_config, const std::string&,\n    Server::Configuration::FactoryContext& context) {\n\n  SquashFilterConfigSharedPtr config = std::make_shared<SquashFilterConfig>(\n      SquashFilterConfig(proto_config, context.clusterManager()));\n\n  return [&context, config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addStreamDecoderFilter(\n        std::make_shared<SquashFilter>(config, context.clusterManager()));\n  };\n}\n\n/**\n * Static registration for the squash filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(SquashFilterConfigFactory,\n                 Server::Configuration::NamedHttpFilterConfigFactory){\"envoy.squash\"};\n\n} // namespace Squash\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/squash/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.h\"\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Squash {\n\n/**\n * Config registration for the squash filter. @see NamedHttpFilterConfigFactory.\n */\nclass SquashFilterConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::squash::v3::Squash> {\npublic:\n  SquashFilterConfigFactory() : FactoryBase(HttpFilterNames::get().Squash) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::squash::v3::Squash& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Squash\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/squash/squash_filter.cc",
    "content": "#include \"extensions/filters/http/squash/squash_filter.h\"\n\n#include <memory>\n\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.h\"\n#include \"envoy/http/codes.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Squash {\n\nusing std::placeholders::_1;\n\nconst std::regex SquashFilterConfig::ENV_REGEX(\"\\\\{\\\\{ (\\\\w+) \\\\}\\\\}\");\n\nconst std::string SquashFilter::POST_ATTACHMENT_PATH = \"/api/v2/debugattachment/\";\nconst std::string SquashFilter::SERVER_AUTHORITY = \"squash-server\";\nconst std::string SquashFilter::ATTACHED_STATE = \"attached\";\nconst std::string SquashFilter::ERROR_STATE = \"error\";\n\nSquashFilterConfig::SquashFilterConfig(\n    const envoy::extensions::filters::http::squash::v3::Squash& proto_config,\n    Upstream::ClusterManager& cluster_manager)\n    : cluster_name_(proto_config.cluster()),\n      attachment_json_(getAttachment(proto_config.attachment_template())),\n      attachment_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, attachment_timeout, 60000)),\n      attachment_poll_period_(\n          PROTOBUF_GET_MS_OR_DEFAULT(proto_config, attachment_poll_period, 1000)),\n      request_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, request_timeout, 1000)) {\n\n  if (!cluster_manager.get(cluster_name_)) {\n    throw EnvoyException(\n        fmt::format(\"squash filter: unknown cluster '{}' in squash config\", cluster_name_));\n  }\n}\n\nstd::string SquashFilterConfig::getAttachment(const ProtobufWkt::Struct& attachment_template) {\n  ProtobufWkt::Struct attachment_json(attachment_template);\n  updateTemplateInStruct(attachment_json);\n  return MessageUtil::getJsonStringFromMessage(attachment_json);\n}\n\nvoid SquashFilterConfig::updateTemplateInStruct(ProtobufWkt::Struct& attachment_template) {\n  for (auto& value_it : *attachment_template.mutable_fields()) {\n    auto& curvalue = value_it.second;\n    updateTemplateInValue(curvalue);\n  }\n}\n\nvoid SquashFilterConfig::updateTemplateInValue(ProtobufWkt::Value& curvalue) {\n  switch (curvalue.kind_case()) {\n  case ProtobufWkt::Value::kStructValue: {\n    updateTemplateInStruct(*curvalue.mutable_struct_value());\n    break;\n  }\n  case ProtobufWkt::Value::kListValue: {\n    ProtobufWkt::ListValue& values = *curvalue.mutable_list_value();\n    for (int i = 0; i < values.values_size(); i++) {\n      updateTemplateInValue(*values.mutable_values(i));\n    }\n    break;\n  }\n  case ProtobufWkt::Value::kStringValue: {\n    curvalue.set_string_value(replaceEnv(curvalue.string_value()));\n    break;\n  }\n  case ProtobufWkt::Value::KIND_NOT_SET:\n  case ProtobufWkt::Value::kNullValue:\n  case ProtobufWkt::Value::kBoolValue:\n  case ProtobufWkt::Value::kNumberValue: {\n    // Nothing here... we only need to transform strings\n  }\n  }\n}\n\n/*\n This function interpolates environment variables in a string template.\n To interpolate an environment variable named ENV, add '{{ ENV }}' (without the\n  quotes, with the spaces) to the template string.\n\n  See api/envoy/config/filter/http/squash/v2/squash.proto for the motivation on why this is needed.\n*/\nstd::string SquashFilterConfig::replaceEnv(const std::string& attachment_template) {\n  std::string s;\n\n  auto end_last_match = attachment_template.begin();\n\n  auto replaceEnvVarInTemplateCallback =\n      [&s, &attachment_template,\n       &end_last_match](const std::match_results<std::string::const_iterator>& match) {\n        auto start_match = attachment_template.begin() + match.position(0);\n\n        s.append(end_last_match, start_match);\n\n        std::string envar_name = match[1].str();\n        const char* envar_value = std::getenv(envar_name.c_str());\n        if (envar_value == nullptr) {\n          ENVOY_LOG(warn, \"Squash: no environment variable named {}.\", envar_name);\n        } else {\n          s.append(envar_value);\n        }\n        end_last_match = start_match + match.length(0);\n      };\n\n  std::sregex_iterator begin(attachment_template.begin(), attachment_template.end(), ENV_REGEX),\n      end;\n  std::for_each(begin, end, replaceEnvVarInTemplateCallback);\n  s.append(end_last_match, attachment_template.end());\n\n  return s;\n}\n\nSquashFilter::SquashFilter(SquashFilterConfigSharedPtr config, Upstream::ClusterManager& cm)\n    : config_(config), is_squashing_(false), attachment_poll_period_timer_(nullptr),\n      attachment_timeout_timer_(nullptr), in_flight_request_(nullptr),\n      create_attachment_callback_(std::bind(&SquashFilter::onCreateAttachmentSuccess, this, _1),\n                                  std::bind(&SquashFilter::onCreateAttachmentFailure, this, _1)),\n      check_attachment_callback_(std::bind(&SquashFilter::onGetAttachmentSuccess, this, _1),\n                                 std::bind(&SquashFilter::onGetAttachmentFailure, this, _1)),\n      cm_(cm), decoder_callbacks_(nullptr) {}\n\nSquashFilter::~SquashFilter() = default;\n\nvoid SquashFilter::onDestroy() { cleanup(); }\n\nHttp::FilterHeadersStatus SquashFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  // Check for squash header\n  if (!headers.get(Http::Headers::get().XSquashDebug)) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  ENVOY_LOG(debug, \"Squash: Holding request and requesting debug attachment\");\n\n  Http::RequestMessagePtr request(new Http::RequestMessageImpl());\n  request->headers().setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  request->headers().setReferencePath(POST_ATTACHMENT_PATH);\n  request->headers().setReferenceHost(SERVER_AUTHORITY);\n  request->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post);\n  request->body().add(config_->attachmentJson());\n\n  is_squashing_ = true;\n  in_flight_request_ =\n      cm_.httpAsyncClientForCluster(config_->clusterName())\n          .send(std::move(request), create_attachment_callback_,\n                Http::AsyncClient::RequestOptions().setTimeout(config_->requestTimeout()));\n\n  if (in_flight_request_ == nullptr) {\n    ENVOY_LOG(debug, \"Squash: can't create request for squash server\");\n    is_squashing_ = false;\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  attachment_timeout_timer_ =\n      decoder_callbacks_->dispatcher().createTimer([this]() -> void { doneSquashing(); });\n  attachment_timeout_timer_->enableTimer(config_->attachmentTimeout(),\n                                         &decoder_callbacks_->scope());\n  // Check if the timer expired inline.\n  if (!is_squashing_) {\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nHttp::FilterDataStatus SquashFilter::decodeData(Buffer::Instance&, bool) {\n  if (is_squashing_) {\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus SquashFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  if (is_squashing_) {\n    return Http::FilterTrailersStatus::StopIteration;\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid SquashFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n  decoder_callbacks_ = &callbacks;\n}\n\nvoid SquashFilter::onCreateAttachmentSuccess(Http::ResponseMessagePtr&& m) {\n  in_flight_request_ = nullptr;\n\n  // Get the config object that was created\n  if (Http::Utility::getResponseStatus(m->headers()) != enumToInt(Http::Code::Created)) {\n    ENVOY_LOG(debug, \"Squash: can't create attachment object. status {} - not squashing\",\n              m->headers().getStatusValue());\n    doneSquashing();\n  } else {\n    std::string debug_attachment_id;\n    try {\n      Json::ObjectSharedPtr json_config = getJsonBody(std::move(m));\n      debug_attachment_id =\n          json_config->getObject(\"metadata\", true)->getString(\"name\", EMPTY_STRING);\n    } catch (Json::Exception&) {\n      debug_attachment_id = EMPTY_STRING;\n    }\n\n    if (debug_attachment_id.empty()) {\n      ENVOY_LOG(debug, \"Squash: failed to parse debug attachment object - check server settings.\");\n      doneSquashing();\n    } else {\n      debug_attachment_path_ = POST_ATTACHMENT_PATH + debug_attachment_id;\n      pollForAttachment();\n    }\n  }\n}\n\nvoid SquashFilter::onCreateAttachmentFailure(Http::AsyncClient::FailureReason) {\n  // in_flight_request_ will be null if we are called inline of async client send()\n  bool request_created = in_flight_request_ != nullptr;\n  in_flight_request_ = nullptr;\n\n  // No retries here, as we couldn't create the attachment object.\n  if (request_created) {\n    // Cleanup not needed if onFailure called inline in async client send, as this means that\n    // decodeHeaders is down the stack and will return Continue.\n    doneSquashing();\n  }\n}\n\nvoid SquashFilter::onGetAttachmentSuccess(Http::ResponseMessagePtr&& m) {\n  in_flight_request_ = nullptr;\n\n  std::string attachmentstate;\n  try {\n    Json::ObjectSharedPtr json_config = getJsonBody(std::move(m));\n    attachmentstate = json_config->getObject(\"status\", true)->getString(\"state\", EMPTY_STRING);\n  } catch (Json::Exception&) {\n    // No state yet.. leave it empty for the retry logic.\n  }\n\n  if (attachmentstate == ATTACHED_STATE || attachmentstate == ERROR_STATE) {\n    doneSquashing();\n  } else {\n    // Always schedule a retry. The attachment_timeout_timer_ will stop the retry loop when it\n    // expires.\n    scheduleRetry();\n  }\n}\n\nvoid SquashFilter::onGetAttachmentFailure(Http::AsyncClient::FailureReason) {\n  in_flight_request_ = nullptr;\n  scheduleRetry();\n}\n\nvoid SquashFilter::scheduleRetry() {\n  if (attachment_poll_period_timer_.get() == nullptr) {\n    attachment_poll_period_timer_ =\n        decoder_callbacks_->dispatcher().createTimer([this]() -> void { pollForAttachment(); });\n  }\n  attachment_poll_period_timer_->enableTimer(config_->attachmentPollPeriod(),\n                                             &decoder_callbacks_->scope());\n}\n\nvoid SquashFilter::pollForAttachment() {\n  Http::RequestMessagePtr request(new Http::RequestMessageImpl());\n  request->headers().setReferenceMethod(Http::Headers::get().MethodValues.Get);\n  request->headers().setReferencePath(debug_attachment_path_);\n  request->headers().setReferenceHost(SERVER_AUTHORITY);\n\n  in_flight_request_ =\n      cm_.httpAsyncClientForCluster(config_->clusterName())\n          .send(std::move(request), check_attachment_callback_,\n                Http::AsyncClient::RequestOptions().setTimeout(config_->requestTimeout()));\n  // No need to check if in_flight_request_ is null as onFailure will take care of\n  // cleanup.\n}\n\nvoid SquashFilter::doneSquashing() {\n  cleanup();\n  decoder_callbacks_->continueDecoding();\n}\n\nvoid SquashFilter::cleanup() {\n  is_squashing_ = false;\n\n  if (attachment_poll_period_timer_) {\n    attachment_poll_period_timer_->disableTimer();\n    attachment_poll_period_timer_.reset();\n  }\n\n  if (attachment_timeout_timer_) {\n    attachment_timeout_timer_->disableTimer();\n    attachment_timeout_timer_.reset();\n  }\n\n  if (in_flight_request_ != nullptr) {\n    in_flight_request_->cancel();\n    in_flight_request_ = nullptr;\n  }\n\n  debug_attachment_path_ = EMPTY_STRING;\n}\n\nJson::ObjectSharedPtr SquashFilter::getJsonBody(Http::ResponseMessagePtr&& m) {\n  return Json::Factory::loadFromString(m->bodyAsString());\n}\n\n} // namespace Squash\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/squash/squash_filter.h",
    "content": "#pragma once\n\n#include <regex>\n\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/json/json_object.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Squash {\n\nclass SquashFilterConfig : protected Logger::Loggable<Logger::Id::config> {\npublic:\n  SquashFilterConfig(const envoy::extensions::filters::http::squash::v3::Squash& proto_config,\n                     Upstream::ClusterManager& cluster_manager);\n  const std::string& clusterName() const { return cluster_name_; }\n  const std::string& attachmentJson() const { return attachment_json_; }\n  const std::chrono::milliseconds& attachmentTimeout() const { return attachment_timeout_; }\n  const std::chrono::milliseconds& attachmentPollPeriod() const { return attachment_poll_period_; }\n  const std::chrono::milliseconds& requestTimeout() const { return request_timeout_; }\n\nprivate:\n  // Get the attachment body, and returns a JSON representations with environment variables\n  // interpolated.\n  static std::string getAttachment(const ProtobufWkt::Struct& attachment_template);\n  // Recursively interpolates environment variables inline in the struct.\n  static void updateTemplateInStruct(ProtobufWkt::Struct& attachment_template);\n  // Recursively interpolates environment variables inline in the value.\n  static void updateTemplateInValue(ProtobufWkt::Value& curvalue);\n  // Interpolates environment variables in a string, and returns the new interpolated string.\n  static std::string replaceEnv(const std::string& attachment_template);\n\n  // The name of the squash server cluster.\n  const std::string cluster_name_;\n  // The attachment body sent to squash server on create attachment.\n  const std::string attachment_json_;\n  // The total amount of time for an attachment to reach a final state (attached or error).\n  const std::chrono::milliseconds attachment_timeout_;\n  // How frequently should we poll the attachment state with the squash server.\n  const std::chrono::milliseconds attachment_poll_period_;\n  // The timeout for individual requests to the squash server.\n  const std::chrono::milliseconds request_timeout_;\n\n  // Defines the pattern for interpolating environment variables in to the attachment.\n  const static std::regex ENV_REGEX;\n};\n\nusing SquashFilterConfigSharedPtr = std::shared_ptr<SquashFilterConfig>;\n\nclass AsyncClientCallbackShim : public Http::AsyncClient::Callbacks {\npublic:\n  AsyncClientCallbackShim(std::function<void(Http::ResponseMessagePtr&&)>&& on_success,\n                          std::function<void(Http::AsyncClient::FailureReason)>&& on_fail)\n      : on_success_(on_success), on_fail_(on_fail) {}\n  // Http::AsyncClient::Callbacks\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& m) override {\n    on_success_(std::forward<Http::ResponseMessagePtr>(m));\n  }\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason f) override {\n    on_fail_(f);\n  }\n  void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {}\n\nprivate:\n  const std::function<void(Http::ResponseMessagePtr&&)> on_success_;\n  const std::function<void(Http::AsyncClient::FailureReason)> on_fail_;\n};\n\nclass SquashFilter : public Http::StreamDecoderFilter,\n                     protected Logger::Loggable<Logger::Id::filter> {\npublic:\n  SquashFilter(SquashFilterConfigSharedPtr config, Upstream::ClusterManager& cm);\n  ~SquashFilter() override;\n\n  // Http::StreamFilterBase\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override;\n\nprivate:\n  // AsyncClient callbacks for create attachment request\n  void onCreateAttachmentSuccess(Http::ResponseMessagePtr&&);\n  void onCreateAttachmentFailure(Http::AsyncClient::FailureReason);\n  // AsyncClient callbacks for get attachment request\n  void onGetAttachmentSuccess(Http::ResponseMessagePtr&&);\n  void onGetAttachmentFailure(Http::AsyncClient::FailureReason);\n\n  // Schedules a pollForAttachment\n  void scheduleRetry();\n  // Contacts Squash server to get the latest version of a debug attachment.\n  void pollForAttachment();\n  // Cleanup and continue the filter chain.\n  void doneSquashing();\n  void cleanup();\n  // Creates a JSON from the message body.\n  Json::ObjectSharedPtr getJsonBody(Http::ResponseMessagePtr&& m);\n\n  const SquashFilterConfigSharedPtr config_;\n\n  // Current state of the squash filter. If is_squashing_ is true, Hold the request while we\n  // communicate with the squash server to attach a debugger. If it is false, let the request\n  // pass-through.\n  bool is_squashing_;\n  // The API path of the created debug attachment (used for polling its state).\n  std::string debug_attachment_path_;\n  // A timer for polling the state of a debug attachment until it reaches a final state.\n  Event::TimerPtr attachment_poll_period_timer_;\n  // A timeout timer - after this timer expires we abort polling the debug attachment, and continue\n  // filter iteration\n  Event::TimerPtr attachment_timeout_timer_;\n  // The current inflight request to the squash server.\n  Http::AsyncClient::Request* in_flight_request_;\n  // Shims to get AsyncClient callbacks to specific methods, per API method.\n  AsyncClientCallbackShim create_attachment_callback_;\n  AsyncClientCallbackShim check_attachment_callback_;\n\n  // ClusterManager to send requests to squash server\n  Upstream::ClusterManager& cm_;\n  // Callbacks used to continue filter iteration.\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_;\n\n  // Create debug attachment URL path.\n  const static std::string POST_ATTACHMENT_PATH;\n  // Authority header for squash server.\n  const static std::string SERVER_AUTHORITY;\n  // The state of a debug attachment object when a debugger is successfully attached.\n  const static std::string ATTACHED_STATE;\n  // The state of a debug attachment object when an error has occurred.\n  const static std::string ERROR_STATE;\n};\n\n} // namespace Squash\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/tap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L7 HTTP Tap filter\n# Public docs: docs/root/configuration/http_filters/tap_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"tap_config_interface\",\n    hdrs = [\"tap_config.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/extensions/common/tap:tap_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tap_config_impl\",\n    srcs = [\"tap_config_impl.cc\"],\n    hdrs = [\"tap_config_impl.h\"],\n    deps = [\n        \":tap_config_interface\",\n        \"//source/extensions/common/tap:tap_config_base\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tap_filter_lib\",\n    srcs = [\"tap_filter.cc\"],\n    hdrs = [\"tap_filter.h\"],\n    deps = [\n        \":tap_config_interface\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/extensions/common/tap:extension_config_base\",\n        \"@envoy_api//envoy/extensions/filters/http/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":tap_config_impl\",\n        \":tap_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/tap/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/tap/config.cc",
    "content": "#include \"extensions/filters/http/tap/config.h\"\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/extensions/filters/http/tap/v3/tap.pb.h\"\n#include \"envoy/extensions/filters/http/tap/v3/tap.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/tap/tap_config_impl.h\"\n#include \"extensions/filters/http/tap/tap_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\nclass HttpTapConfigFactoryImpl : public Extensions::Common::Tap::TapConfigFactory {\npublic:\n  // TapConfigFactory\n  Extensions::Common::Tap::TapConfigSharedPtr\n  createConfigFromProto(envoy::config::tap::v3::TapConfig&& proto_config,\n                        Extensions::Common::Tap::Sink* admin_streamer) override {\n    return std::make_shared<HttpTapConfigImpl>(std::move(proto_config), admin_streamer);\n  }\n};\n\nHttp::FilterFactoryCb TapFilterFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::tap::v3::Tap& proto_config,\n    const std::string& stats_prefix, Server::Configuration::FactoryContext& context) {\n  FilterConfigSharedPtr filter_config(new FilterConfigImpl(\n      proto_config, stats_prefix, std::make_unique<HttpTapConfigFactoryImpl>(), context.scope(),\n      context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher()));\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    auto filter = std::make_shared<Filter>(filter_config);\n    callbacks.addStreamFilter(filter);\n    callbacks.addAccessLogHandler(filter);\n  };\n}\n\n/**\n * Static registration for the tap filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(TapFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/tap/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/tap/v3/tap.pb.h\"\n#include \"envoy/extensions/filters/http/tap/v3/tap.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\n/**\n * Config registration for the tap filter.\n */\nclass TapFilterFactory\n    : public Common::FactoryBase<envoy::extensions::filters::http::tap::v3::Tap> {\npublic:\n  TapFilterFactory() : FactoryBase(HttpFilterNames::get().Tap) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::tap::v3::Tap& proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/tap/tap_config.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"extensions/common/tap/tap.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\n/**\n * Per-request/stream HTTP tap implementation. Abstractly handles all request lifecycle events in\n * order to tap if the configuration matches.\n */\nclass HttpPerRequestTapper {\npublic:\n  virtual ~HttpPerRequestTapper() = default;\n\n  /**\n   * Called when request headers are received.\n   */\n  virtual void onRequestHeaders(const Http::RequestHeaderMap& headers) PURE;\n\n  /**\n   * Called when request body is received.\n   */\n  virtual void onRequestBody(const Buffer::Instance& data) PURE;\n\n  /**\n   * Called when request trailers are received.\n   */\n  virtual void onRequestTrailers(const Http::RequestTrailerMap& trailers) PURE;\n\n  /**\n   * Called when response headers are received.\n   */\n  virtual void onResponseHeaders(const Http::ResponseHeaderMap& headers) PURE;\n\n  /**\n   * Called when response body is received.\n   */\n  virtual void onResponseBody(const Buffer::Instance& data) PURE;\n\n  /**\n   * Called when response trailers are received.\n   */\n  virtual void onResponseTrailers(const Http::ResponseTrailerMap& headers) PURE;\n\n  /**\n   * Called when the request is being destroyed and is being logged.\n   * @return whether the request was tapped or not.\n   */\n  virtual bool onDestroyLog() PURE;\n};\n\nusing HttpPerRequestTapperPtr = std::unique_ptr<HttpPerRequestTapper>;\n\n/**\n * Abstract HTTP tap configuration.\n */\nclass HttpTapConfig : public virtual Extensions::Common::Tap::TapConfig {\npublic:\n  /**\n   * @return a new per-request HTTP tapper which is used to handle tapping of a discrete request.\n   * @param stream_id supplies the owning HTTP stream ID.\n   */\n  virtual HttpPerRequestTapperPtr createPerRequestTapper(uint64_t stream_id) PURE;\n};\n\nusing HttpTapConfigSharedPtr = std::shared_ptr<HttpTapConfig>;\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/tap/tap_config_impl.cc",
    "content": "#include \"extensions/filters/http/tap/tap_config_impl.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/http.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\nnamespace TapCommon = Extensions::Common::Tap;\n\nnamespace {\nHttp::HeaderMap::ConstIterateCb\nfillHeaderList(Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValue>* output) {\n  return [output](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    auto& new_header = *output->Add();\n    new_header.set_key(std::string(header.key().getStringView()));\n    new_header.set_value(std::string(header.value().getStringView()));\n    return Http::HeaderMap::Iterate::Continue;\n  };\n}\n} // namespace\n\nHttpTapConfigImpl::HttpTapConfigImpl(envoy::config::tap::v3::TapConfig&& proto_config,\n                                     Common::Tap::Sink* admin_streamer)\n    : TapCommon::TapConfigBaseImpl(std::move(proto_config), admin_streamer) {}\n\nHttpPerRequestTapperPtr HttpTapConfigImpl::createPerRequestTapper(uint64_t stream_id) {\n  return std::make_unique<HttpPerRequestTapperImpl>(shared_from_this(), stream_id);\n}\n\nvoid HttpPerRequestTapperImpl::streamRequestHeaders() {\n  TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n  request_headers_->iterate(fillHeaderList(\n      trace->mutable_http_streamed_trace_segment()->mutable_request_headers()->mutable_headers()));\n  sink_handle_->submitTrace(std::move(trace));\n}\n\nvoid HttpPerRequestTapperImpl::onRequestHeaders(const Http::RequestHeaderMap& headers) {\n  request_headers_ = &headers;\n  config_->rootMatcher().onHttpRequestHeaders(headers, statuses_);\n  if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) {\n    ASSERT(!started_streaming_trace_);\n    started_streaming_trace_ = true;\n    streamRequestHeaders();\n  }\n}\n\nvoid HttpPerRequestTapperImpl::streamBufferedRequestBody() {\n  if (buffered_streamed_request_body_ != nullptr) {\n    sink_handle_->submitTrace(std::move(buffered_streamed_request_body_));\n    buffered_streamed_request_body_.reset();\n  }\n}\n\nvoid HttpPerRequestTapperImpl::onRequestBody(const Buffer::Instance& data) {\n  onBody(data, buffered_streamed_request_body_, config_->maxBufferedRxBytes(),\n         &envoy::data::tap::v3::HttpStreamedTraceSegment::mutable_request_body_chunk,\n         &envoy::data::tap::v3::HttpBufferedTrace::mutable_request, true);\n}\n\nvoid HttpPerRequestTapperImpl::streamRequestTrailers() {\n  if (request_trailers_ != nullptr) {\n    TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n    request_trailers_->iterate(fillHeaderList(trace->mutable_http_streamed_trace_segment()\n                                                  ->mutable_request_trailers()\n                                                  ->mutable_headers()));\n    sink_handle_->submitTrace(std::move(trace));\n  }\n}\n\nvoid HttpPerRequestTapperImpl::onRequestTrailers(const Http::RequestTrailerMap& trailers) {\n  request_trailers_ = &trailers;\n  config_->rootMatcher().onHttpRequestTrailers(trailers, statuses_);\n  if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) {\n    if (!started_streaming_trace_) {\n      started_streaming_trace_ = true;\n      // Flush anything that we already buffered.\n      streamRequestHeaders();\n      streamBufferedRequestBody();\n    }\n\n    streamRequestTrailers();\n  }\n}\n\nvoid HttpPerRequestTapperImpl::streamResponseHeaders() {\n  TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n  response_headers_->iterate(fillHeaderList(\n      trace->mutable_http_streamed_trace_segment()->mutable_response_headers()->mutable_headers()));\n  sink_handle_->submitTrace(std::move(trace));\n}\n\nvoid HttpPerRequestTapperImpl::onResponseHeaders(const Http::ResponseHeaderMap& headers) {\n  response_headers_ = &headers;\n  config_->rootMatcher().onHttpResponseHeaders(headers, statuses_);\n  if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) {\n    if (!started_streaming_trace_) {\n      started_streaming_trace_ = true;\n      // Flush anything that we already buffered.\n      streamRequestHeaders();\n      streamBufferedRequestBody();\n      streamRequestTrailers();\n    }\n\n    streamResponseHeaders();\n  }\n}\n\nvoid HttpPerRequestTapperImpl::streamBufferedResponseBody() {\n  if (buffered_streamed_response_body_ != nullptr) {\n    sink_handle_->submitTrace(std::move(buffered_streamed_response_body_));\n    buffered_streamed_response_body_.reset();\n  }\n}\n\nvoid HttpPerRequestTapperImpl::onResponseBody(const Buffer::Instance& data) {\n  onBody(data, buffered_streamed_response_body_, config_->maxBufferedTxBytes(),\n         &envoy::data::tap::v3::HttpStreamedTraceSegment::mutable_response_body_chunk,\n         &envoy::data::tap::v3::HttpBufferedTrace::mutable_response, false);\n}\n\nvoid HttpPerRequestTapperImpl::onResponseTrailers(const Http::ResponseTrailerMap& trailers) {\n  response_trailers_ = &trailers;\n  config_->rootMatcher().onHttpResponseTrailers(trailers, statuses_);\n  if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) {\n    if (!started_streaming_trace_) {\n      started_streaming_trace_ = true;\n      // Flush anything that we already buffered.\n      streamRequestHeaders();\n      streamBufferedRequestBody();\n      streamRequestTrailers();\n      streamResponseHeaders();\n      streamBufferedResponseBody();\n    }\n\n    TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n    trailers.iterate(fillHeaderList(trace->mutable_http_streamed_trace_segment()\n                                        ->mutable_response_trailers()\n                                        ->mutable_headers()));\n    sink_handle_->submitTrace(std::move(trace));\n  }\n}\n\nbool HttpPerRequestTapperImpl::onDestroyLog() {\n  if (config_->streaming() || !config_->rootMatcher().matchStatus(statuses_).matches_) {\n    return config_->rootMatcher().matchStatus(statuses_).matches_;\n  }\n\n  makeBufferedFullTraceIfNeeded();\n  auto& http_trace = *buffered_full_trace_->mutable_http_buffered_trace();\n  if (request_headers_ != nullptr) {\n    request_headers_->iterate(fillHeaderList(http_trace.mutable_request()->mutable_headers()));\n  }\n  if (request_trailers_ != nullptr) {\n    request_trailers_->iterate(fillHeaderList(http_trace.mutable_request()->mutable_trailers()));\n  }\n  if (response_headers_ != nullptr) {\n    response_headers_->iterate(fillHeaderList(http_trace.mutable_response()->mutable_headers()));\n  }\n  if (response_trailers_ != nullptr) {\n    response_trailers_->iterate(fillHeaderList(http_trace.mutable_response()->mutable_trailers()));\n  }\n\n  ENVOY_LOG(debug, \"submitting buffered trace sink\");\n  // move is safe as onDestroyLog is the last method called.\n  sink_handle_->submitTrace(std::move(buffered_full_trace_));\n  return true;\n}\n\nvoid HttpPerRequestTapperImpl::onBody(\n    const Buffer::Instance& data, Extensions::Common::Tap::TraceWrapperPtr& buffered_streamed_body,\n    uint32_t max_buffered_bytes, MutableBodyChunk mutable_body_chunk,\n    MutableMessage mutable_message, bool request) {\n  // Invoke body matcher.\n  request ? config_->rootMatcher().onRequestBody(data, statuses_)\n          : config_->rootMatcher().onResponseBody(data, statuses_);\n  if (config_->streaming()) {\n    const auto& match_status = config_->rootMatcher().matchStatus(statuses_);\n    // Without body matching, we must have already started tracing or have not yet matched.\n    ASSERT(started_streaming_trace_ || !match_status.matches_);\n\n    if (started_streaming_trace_) {\n      // If we have already started streaming, flush a body segment now.\n      TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n      TapCommon::Utility::addBufferToProtoBytes(\n          *(trace->mutable_http_streamed_trace_segment()->*mutable_body_chunk)(),\n          max_buffered_bytes, data, 0, data.length());\n      sink_handle_->submitTrace(std::move(trace));\n    } else if (match_status.might_change_status_) {\n      // If we might still match, start buffering the body up to our limit.\n      if (buffered_streamed_body == nullptr) {\n        buffered_streamed_body = makeTraceSegment();\n      }\n      auto& body =\n          *(buffered_streamed_body->mutable_http_streamed_trace_segment()->*mutable_body_chunk)();\n      ASSERT(body.as_bytes().size() <= max_buffered_bytes);\n      TapCommon::Utility::addBufferToProtoBytes(body, max_buffered_bytes - body.as_bytes().size(),\n                                                data, 0, data.length());\n    }\n  } else {\n    // If we are not streaming, buffer the body up to our limit.\n    makeBufferedFullTraceIfNeeded();\n    auto& body =\n        *(buffered_full_trace_->mutable_http_buffered_trace()->*mutable_message)()->mutable_body();\n    ASSERT(body.as_bytes().size() <= max_buffered_bytes);\n    TapCommon::Utility::addBufferToProtoBytes(body, max_buffered_bytes - body.as_bytes().size(),\n                                              data, 0, data.length());\n  }\n}\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/tap/tap_config_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/http.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/tap/tap_config_base.h\"\n#include \"extensions/filters/http/tap/tap_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\nclass HttpTapConfigImpl : public Extensions::Common::Tap::TapConfigBaseImpl,\n                          public HttpTapConfig,\n                          public std::enable_shared_from_this<HttpTapConfigImpl> {\npublic:\n  HttpTapConfigImpl(envoy::config::tap::v3::TapConfig&& proto_config,\n                    Extensions::Common::Tap::Sink* admin_streamer);\n\n  // TapFilter::HttpTapConfig\n  HttpPerRequestTapperPtr createPerRequestTapper(uint64_t stream_id) override;\n};\n\nclass HttpPerRequestTapperImpl : public HttpPerRequestTapper, Logger::Loggable<Logger::Id::tap> {\npublic:\n  HttpPerRequestTapperImpl(HttpTapConfigSharedPtr config, uint64_t stream_id)\n      : config_(std::move(config)), stream_id_(stream_id),\n        sink_handle_(config_->createPerTapSinkHandleManager(stream_id)),\n        statuses_(config_->createMatchStatusVector()) {\n    config_->rootMatcher().onNewStream(statuses_);\n  }\n\n  // TapFilter::HttpPerRequestTapper\n  void onRequestHeaders(const Http::RequestHeaderMap& headers) override;\n  void onRequestBody(const Buffer::Instance& data) override;\n  void onRequestTrailers(const Http::RequestTrailerMap& headers) override;\n  void onResponseHeaders(const Http::ResponseHeaderMap& headers) override;\n  void onResponseBody(const Buffer::Instance& data) override;\n  void onResponseTrailers(const Http::ResponseTrailerMap& headers) override;\n  bool onDestroyLog() override;\n\nprivate:\n  using HttpStreamedTraceSegment = envoy::data::tap::v3::HttpStreamedTraceSegment;\n  using MutableBodyChunk = envoy::data::tap::v3::Body* (HttpStreamedTraceSegment::*)();\n  using HttpBufferedTrace = envoy::data::tap::v3::HttpBufferedTrace;\n  using MutableMessage = envoy::data::tap::v3::HttpBufferedTrace::Message* (HttpBufferedTrace::*)();\n\n  void onBody(const Buffer::Instance& data,\n              Extensions::Common::Tap::TraceWrapperPtr& buffered_streamed_body,\n              uint32_t max_buffered_bytes, MutableBodyChunk mutable_body_chunk,\n              MutableMessage mutable_message, bool request);\n\n  void makeBufferedFullTraceIfNeeded() {\n    if (buffered_full_trace_ == nullptr) {\n      buffered_full_trace_ = Extensions::Common::Tap::makeTraceWrapper();\n    }\n  }\n\n  Extensions::Common::Tap::TraceWrapperPtr makeTraceSegment() {\n    Extensions::Common::Tap::TraceWrapperPtr segment = Extensions::Common::Tap::makeTraceWrapper();\n    segment->mutable_http_streamed_trace_segment()->set_trace_id(stream_id_);\n    return segment;\n  }\n\n  void streamRequestHeaders();\n  void streamBufferedRequestBody();\n  void streamRequestTrailers();\n  void streamResponseHeaders();\n  void streamBufferedResponseBody();\n\n  HttpTapConfigSharedPtr config_;\n  const uint64_t stream_id_;\n  Extensions::Common::Tap::PerTapSinkHandleManagerPtr sink_handle_;\n  Extensions::Common::Tap::Matcher::MatchStatusVector statuses_;\n  bool started_streaming_trace_{};\n  const Http::RequestHeaderMap* request_headers_{};\n  const Http::HeaderMap* request_trailers_{};\n  const Http::ResponseHeaderMap* response_headers_{};\n  const Http::ResponseTrailerMap* response_trailers_{};\n  // Must be a shared_ptr because of submitTrace().\n  Extensions::Common::Tap::TraceWrapperPtr buffered_streamed_request_body_;\n  Extensions::Common::Tap::TraceWrapperPtr buffered_streamed_response_body_;\n  Extensions::Common::Tap::TraceWrapperPtr buffered_full_trace_;\n};\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/tap/tap_filter.cc",
    "content": "#include \"extensions/filters/http/tap/tap_filter.h\"\n\n#include \"envoy/extensions/filters/http/tap/v3/tap.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\nFilterConfigImpl::FilterConfigImpl(\n    const envoy::extensions::filters::http::tap::v3::Tap& proto_config,\n    const std::string& stats_prefix, Common::Tap::TapConfigFactoryPtr&& config_factory,\n    Stats::Scope& scope, Server::Admin& admin, Singleton::Manager& singleton_manager,\n    ThreadLocal::SlotAllocator& tls, Event::Dispatcher& main_thread_dispatcher)\n    : ExtensionConfigBase(proto_config.common_config(), std::move(config_factory), admin,\n                          singleton_manager, tls, main_thread_dispatcher),\n      stats_(Filter::generateStats(stats_prefix, scope)) {}\n\nHttpTapConfigSharedPtr FilterConfigImpl::currentConfig() {\n  return currentConfigHelper<HttpTapConfig>();\n}\n\nFilterStats Filter::generateStats(const std::string& prefix, Stats::Scope& scope) {\n  // TODO(mattklein123): Consider whether we want to additionally namespace the stats on the\n  // filter's configured opaque ID.\n  std::string final_prefix = prefix + \"tap.\";\n  return {ALL_TAP_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n}\n\nHttp::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {\n  if (tapper_ != nullptr) {\n    tapper_->onRequestHeaders(headers);\n  }\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool) {\n  if ((tapper_ != nullptr) && (0 != data.length())) {\n    tapper_->onRequestBody(data);\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap& trailers) {\n  if (tapper_ != nullptr) {\n    tapper_->onRequestTrailers(trailers);\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nHttp::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) {\n  if (tapper_ != nullptr) {\n    tapper_->onResponseHeaders(headers);\n  }\n  return Http::FilterHeadersStatus::Continue;\n}\n\nHttp::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool) {\n  if ((tapper_ != nullptr) && (0 != data.length())) {\n    tapper_->onResponseBody(data);\n  }\n  return Http::FilterDataStatus::Continue;\n}\n\nHttp::FilterTrailersStatus Filter::encodeTrailers(Http::ResponseTrailerMap& trailers) {\n  if (tapper_ != nullptr) {\n    tapper_->onResponseTrailers(trailers);\n  }\n  return Http::FilterTrailersStatus::Continue;\n}\n\nvoid Filter::log(const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*,\n                 const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo&) {\n  if (tapper_ != nullptr && tapper_->onDestroyLog()) {\n    config_->stats().rq_tapped_.inc();\n  }\n}\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/tap/tap_filter.h",
    "content": "#pragma once\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/extensions/filters/http/tap/v3/tap.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"extensions/common/tap/extension_config_base.h\"\n#include \"extensions/filters/http/tap/tap_config.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\n/**\n * All stats for the tap filter. @see stats_macros.h\n */\n// clang-format off\n#define ALL_TAP_FILTER_STATS(COUNTER)                                                           \\\n  COUNTER(rq_tapped)\n// clang-format on\n\n/**\n * Wrapper struct for tap filter stats. @see stats_macros.h\n */\nstruct FilterStats {\n  ALL_TAP_FILTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Abstract filter configuration.\n */\nclass FilterConfig {\npublic:\n  virtual ~FilterConfig() = default;\n\n  /**\n   * @return the current tap configuration if there is one.\n   */\n  virtual HttpTapConfigSharedPtr currentConfig() PURE;\n\n  /**\n   * @return the filter stats.\n   */\n  virtual FilterStats& stats() PURE;\n};\n\nusing FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;\n\n/**\n * Configuration for the tap filter.\n */\nclass FilterConfigImpl : public FilterConfig, public Extensions::Common::Tap::ExtensionConfigBase {\npublic:\n  FilterConfigImpl(const envoy::extensions::filters::http::tap::v3::Tap& proto_config,\n                   const std::string& stats_prefix,\n                   Extensions::Common::Tap::TapConfigFactoryPtr&& config_factory,\n                   Stats::Scope& scope, Server::Admin& admin, Singleton::Manager& singleton_manager,\n                   ThreadLocal::SlotAllocator& tls, Event::Dispatcher& main_thread_dispatcher);\n\n  // FilterConfig\n  HttpTapConfigSharedPtr currentConfig() override;\n  FilterStats& stats() override { return stats_; }\n\nprivate:\n  FilterStats stats_;\n};\n\n/**\n * HTTP tap filter.\n */\nclass Filter : public Http::StreamFilter, public AccessLog::Instance {\npublic:\n  Filter(FilterConfigSharedPtr config) : config_(std::move(config)) {}\n\n  static FilterStats generateStats(const std::string& prefix, Stats::Scope& scope);\n\n  // Http::StreamFilterBase\n  void onDestroy() override {}\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    HttpTapConfigSharedPtr config = config_->currentConfig();\n    tapper_ = config ? config->createPerRequestTapper(callbacks.streamId()) : nullptr;\n  }\n\n  // Http::StreamEncoderFilter\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    return Http::FilterHeadersStatus::Continue;\n  }\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override;\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override {\n    return Http::FilterMetadataStatus::Continue;\n  }\n  void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks&) override {}\n\n  // AccessLog::Instance\n  void log(const Http::RequestHeaderMap* request_headers,\n           const Http::ResponseHeaderMap* response_headers,\n           const Http::ResponseTrailerMap* response_trailers,\n           const StreamInfo::StreamInfo& stream_info) override;\n\nprivate:\n  FilterConfigSharedPtr config_;\n  HttpPerRequestTapperPtr tapper_;\n};\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# Public docs: docs/root/configuration/http_filters/wasm_filter.rst\n\nenvoy_cc_library(\n    name = \"wasm_filter_lib\",\n    srcs = [\"wasm_filter.cc\"],\n    hdrs = [\"wasm_filter.h\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/http/wasm/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \":wasm_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/wasm/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/http/wasm/config.cc",
    "content": "#include \"extensions/filters/http/wasm/config.h\"\n\n#include \"envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/datasource.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/filters/http/wasm/wasm_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Wasm {\n\nHttp::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config, const std::string&,\n    Server::Configuration::FactoryContext& context) {\n  auto filter_config = std::make_shared<FilterConfig>(proto_config, context);\n  return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n    auto filter = filter_config->createFilter();\n    if (!filter) { // Fail open\n      return;\n    }\n    callbacks.addStreamFilter(filter);\n    callbacks.addAccessLogHandler(filter);\n  };\n}\n\n/**\n * Static registration for the Wasm filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(WasmFilterConfig, Server::Configuration::NamedHttpFilterConfigFactory);\n\n} // namespace Wasm\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/wasm/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/wasm/v3/wasm.pb.h\"\n#include \"envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Wasm {\n\n/**\n * Config registration for the Wasm filter. @see NamedHttpFilterConfigFactory.\n */\nclass WasmFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::http::wasm::v3::Wasm> {\npublic:\n  WasmFilterConfig() : FactoryBase(HttpFilterNames::get().Wasm) {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config, const std::string&,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Wasm\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/wasm/wasm_filter.cc",
    "content": "#include \"extensions/filters/http/wasm/wasm_filter.h\"\n\n#include \"envoy/http/codes.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Wasm {\n\nFilterConfig::FilterConfig(const envoy::extensions::filters::http::wasm::v3::Wasm& config,\n                           Server::Configuration::FactoryContext& context)\n    : tls_slot_(context.threadLocal().allocateSlot()) {\n  plugin_ = std::make_shared<Common::Wasm::Plugin>(\n      config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(),\n      config.config().vm_config().runtime(),\n      Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(),\n      context.direction(), context.localInfo(), &context.listenerMetadata());\n\n  auto plugin = plugin_;\n  auto callback = [plugin, this](const Common::Wasm::WasmHandleSharedPtr& base_wasm) {\n    // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call.\n    tls_slot_->set(\n        [base_wasm,\n         plugin](Event::Dispatcher& dispatcher) -> std::shared_ptr<ThreadLocal::ThreadLocalObject> {\n          if (!base_wasm) {\n            return nullptr;\n          }\n          return std::static_pointer_cast<ThreadLocal::ThreadLocalObject>(\n              Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher));\n        });\n  };\n\n  if (!Common::Wasm::createWasm(\n          config.config().vm_config(), plugin_, context.scope().createScope(\"\"),\n          context.clusterManager(), context.initManager(), context.dispatcher(), context.api(),\n          context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) {\n    throw Common::Wasm::WasmException(\n        fmt::format(\"Unable to create Wasm HTTP filter {}\", plugin->name_));\n  }\n}\n\n} // namespace Wasm\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/wasm/wasm_filter.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Wasm {\n\nusing Envoy::Extensions::Common::Wasm::Context;\nusing Envoy::Extensions::Common::Wasm::Wasm;\nusing Envoy::Extensions::Common::Wasm::WasmHandle;\n\nclass FilterConfig : Logger::Loggable<Logger::Id::wasm> {\npublic:\n  FilterConfig(const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config,\n               Server::Configuration::FactoryContext& context);\n\n  std::shared_ptr<Context> createFilter() {\n    Wasm* wasm = nullptr;\n    if (tls_slot_->get()) {\n      wasm = tls_slot_->getTyped<WasmHandle>().wasm().get();\n    }\n    if (plugin_->fail_open_ && (!wasm || wasm->isFailed())) {\n      return nullptr;\n    }\n    if (wasm && !root_context_id_) {\n      root_context_id_ = wasm->getRootContext(plugin_->root_id_)->id();\n    }\n    return std::make_shared<Context>(wasm, root_context_id_, plugin_);\n  }\n\nprivate:\n  uint32_t root_context_id_{0};\n  Envoy::Extensions::Common::Wasm::PluginSharedPtr plugin_;\n  ThreadLocal::SlotPtr tls_slot_;\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_;\n};\n\ntypedef std::shared_ptr<FilterConfig> FilterConfigSharedPtr;\n\n} // namespace Wasm\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/http/well_known_names.h",
    "content": "#pragma once\n\n#include \"common/config/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\n\n/**\n * Well-known http filter names.\n * NOTE: New filters should use the well known name: envoy.filters.http.name.\n */\nclass HttpFilterNameValues {\npublic:\n  // Buffer filter\n  const std::string Buffer = \"envoy.filters.http.buffer\";\n  // Cache filter\n  const std::string Cache = \"envoy.filters.http.cache\";\n  // CDN Loop filter\n  const std::string CdnLoop = \"envoy.filters.http.cdn_loop\";\n  // Compressor filter\n  const std::string Compressor = \"envoy.filters.http.compressor\";\n  // CORS filter\n  const std::string Cors = \"envoy.filters.http.cors\";\n  // CSRF filter\n  const std::string Csrf = \"envoy.filters.http.csrf\";\n  // Decompressor filter\n  const std::string Decompressor = \"envoy.filters.http.decompressor\";\n  // Dynamo filter\n  const std::string Dynamo = \"envoy.filters.http.dynamo\";\n  // Fault filter\n  const std::string Fault = \"envoy.filters.http.fault\";\n  // GRPC http1 bridge filter\n  const std::string GrpcHttp1Bridge = \"envoy.filters.http.grpc_http1_bridge\";\n  // GRPC json transcoder filter\n  const std::string GrpcJsonTranscoder = \"envoy.filters.http.grpc_json_transcoder\";\n  // GRPC web filter\n  const std::string GrpcWeb = \"envoy.filters.http.grpc_web\";\n  // GRPC http1 reverse bridge filter\n  const std::string GrpcHttp1ReverseBridge = \"envoy.filters.http.grpc_http1_reverse_bridge\";\n  // GRPC telemetry\n  const std::string GrpcStats = \"envoy.filters.http.grpc_stats\";\n  // Gzip filter\n  const std::string EnvoyGzip = \"envoy.filters.http.gzip\";\n  // IP tagging filter\n  const std::string IpTagging = \"envoy.filters.http.ip_tagging\";\n  // Rate limit filter\n  const std::string RateLimit = \"envoy.filters.http.ratelimit\";\n  // Router filter\n  const std::string Router = \"envoy.filters.http.router\";\n  // Health checking filter\n  const std::string HealthCheck = \"envoy.filters.http.health_check\";\n  // Lua filter\n  const std::string Lua = \"envoy.filters.http.lua\";\n  // On-demand RDS updates filter\n  const std::string OnDemand = \"envoy.filters.http.on_demand\";\n  // Squash filter\n  const std::string Squash = \"envoy.filters.http.squash\";\n  // External Authorization filter\n  const std::string ExtAuthorization = \"envoy.filters.http.ext_authz\";\n  // RBAC HTTP Authorization filter\n  const std::string Rbac = \"envoy.filters.http.rbac\";\n  // JWT authentication filter\n  const std::string JwtAuthn = \"envoy.filters.http.jwt_authn\";\n  // Header to metadata filter\n  const std::string HeaderToMetadata = \"envoy.filters.http.header_to_metadata\";\n  // Tap filter\n  const std::string Tap = \"envoy.filters.http.tap\";\n  // Adaptive concurrency limit filter\n  const std::string AdaptiveConcurrency = \"envoy.filters.http.adaptive_concurrency\";\n  // Admission control filter\n  const std::string AdmissionControl = \"envoy.filters.http.admission_control\";\n  // Original Src Filter\n  const std::string OriginalSrc = \"envoy.filters.http.original_src\";\n  // Dynamic forward proxy filter\n  const std::string DynamicForwardProxy = \"envoy.filters.http.dynamic_forward_proxy\";\n  // WebAssembly filter\n  const std::string Wasm = \"envoy.filters.http.wasm\";\n  // AWS request signing filter\n  const std::string AwsRequestSigning = \"envoy.filters.http.aws_request_signing\";\n  // AWS Lambda filter\n  const std::string AwsLambda = \"envoy.filters.http.aws_lambda\";\n  // OAuth filter\n  const std::string OAuth = \"envoy.filters.http.oauth\";\n};\n\nusing HttpFilterNames = ConstSingleton<HttpFilterNameValues>;\n\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # Well known names are public.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/listener/http_inspector/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# HTTP inspector filter for sniffing HTTP protocol and setting HTTP version to a FilterChain.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"http_inspector_lib\",\n    srcs = [\"http_inspector.cc\"],\n    hdrs = [\"http_inspector.h\"],\n    external_deps = [\"http_parser\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \":http_inspector_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/listener/http_inspector/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/listener/http_inspector/config.cc",
    "content": "#include \"envoy/extensions/filters/listener/http_inspector/v3/http_inspector.pb.h\"\n#include \"envoy/extensions/filters/listener/http_inspector/v3/http_inspector.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/listener/http_inspector/http_inspector.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace HttpInspector {\n\n/**\n * Config registration for the Http inspector filter. @see NamedNetworkFilterConfigFactory.\n */\nclass HttpInspectorConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory {\npublic:\n  // NamedListenerFilterConfigFactory\n  Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto(\n      const Protobuf::Message&,\n      const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n      Server::Configuration::ListenerFactoryContext& context) override {\n    ConfigSharedPtr config(std::make_shared<Config>(context.scope()));\n    return\n        [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void {\n          filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique<Filter>(config));\n        };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::filters::listener::http_inspector::v3::HttpInspector>();\n  }\n\n  std::string name() const override { return ListenerFilterNames::get().HttpInspector; }\n};\n\n/**\n * Static registration for the http inspector filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(HttpInspectorConfigFactory,\n                 Server::Configuration::NamedListenerFilterConfigFactory){\n    \"envoy.listener.http_inspector\"};\n\n} // namespace HttpInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/http_inspector/http_inspector.cc",
    "content": "#include \"extensions/filters/listener/http_inspector/http_inspector.h\"\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_split.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace HttpInspector {\n\nConfig::Config(Stats::Scope& scope)\n    : stats_{ALL_HTTP_INSPECTOR_STATS(POOL_COUNTER_PREFIX(scope, \"http_inspector.\"))} {}\n\nconst absl::string_view Filter::HTTP2_CONNECTION_PREFACE = \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\";\nthread_local uint8_t Filter::buf_[Config::MAX_INSPECT_SIZE];\n\nFilter::Filter(const ConfigSharedPtr config) : config_(config) {\n  http_parser_init(&parser_, HTTP_REQUEST);\n}\n\nhttp_parser_settings Filter::settings_{\n    nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,\n};\n\nNetwork::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) {\n  ENVOY_LOG(debug, \"http inspector: new connection accepted\");\n\n  const Network::ConnectionSocket& socket = cb.socket();\n\n  const absl::string_view transport_protocol = socket.detectedTransportProtocol();\n  if (!transport_protocol.empty() &&\n      transport_protocol != TransportSockets::TransportProtocolNames::get().RawBuffer) {\n    ENVOY_LOG(trace, \"http inspector: cannot inspect http protocol with transport socket {}\",\n              transport_protocol);\n    return Network::FilterStatus::Continue;\n  }\n\n  cb_ = &cb;\n  const ParseState parse_state = onRead();\n  switch (parse_state) {\n  case ParseState::Error:\n    // As per discussion in https://github.com/envoyproxy/envoy/issues/7864\n    // we don't add new enum in FilterStatus so we have to signal the caller\n    // the new condition.\n    cb.socket().close();\n    return Network::FilterStatus::StopIteration;\n  case ParseState::Done:\n    return Network::FilterStatus::Continue;\n  case ParseState::Continue:\n    // do nothing but create the event\n    ASSERT(file_event_ == nullptr);\n    file_event_ = cb.socket().ioHandle().createFileEvent(\n        cb.dispatcher(),\n        [this](uint32_t events) {\n          ENVOY_LOG(trace, \"http inspector event: {}\", events);\n          // inspector is always peeking and can never determine EOF.\n          // Use this event type to avoid listener timeout on the OS supporting\n          // FileReadyType::Closed.\n          bool end_stream = events & Event::FileReadyType::Closed;\n\n          const ParseState parse_state = onRead();\n          switch (parse_state) {\n          case ParseState::Error:\n            file_event_.reset();\n            cb_->continueFilterChain(false);\n            break;\n          case ParseState::Done:\n            file_event_.reset();\n            // Do not skip following listener filters.\n            cb_->continueFilterChain(true);\n            break;\n          case ParseState::Continue:\n            if (end_stream) {\n              // Parser fails to determine http but the end of stream is reached. Fallback to\n              // non-http.\n              done(false);\n              file_event_.reset();\n              cb_->continueFilterChain(true);\n            }\n            // do nothing but wait for the next event\n            break;\n          }\n        },\n        Event::PlatformDefaultTriggerType,\n        Event::FileReadyType::Read | Event::FileReadyType::Closed);\n    return Network::FilterStatus::StopIteration;\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nParseState Filter::onRead() {\n  auto result = cb_->socket().ioHandle().recv(buf_, Config::MAX_INSPECT_SIZE, MSG_PEEK);\n  ENVOY_LOG(trace, \"http inspector: recv: {}\", result.rc_);\n  if (!result.ok()) {\n    if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) {\n      return ParseState::Continue;\n    }\n    config_->stats().read_error_.inc();\n    return ParseState::Error;\n  }\n\n  const auto parse_state =\n      parseHttpHeader(absl::string_view(reinterpret_cast<const char*>(buf_), result.rc_));\n  switch (parse_state) {\n  case ParseState::Continue:\n    // do nothing but wait for the next event\n    return ParseState::Continue;\n  case ParseState::Error:\n    done(false);\n    return ParseState::Done;\n  case ParseState::Done:\n    done(true);\n    return ParseState::Done;\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nParseState Filter::parseHttpHeader(absl::string_view data) {\n  const size_t len = std::min(data.length(), Filter::HTTP2_CONNECTION_PREFACE.length());\n  if (Filter::HTTP2_CONNECTION_PREFACE.compare(0, len, data, 0, len) == 0) {\n    if (data.length() < Filter::HTTP2_CONNECTION_PREFACE.length()) {\n      return ParseState::Continue;\n    }\n    ENVOY_LOG(trace, \"http inspector: http2 connection preface found\");\n    protocol_ = \"HTTP/2\";\n    return ParseState::Done;\n  } else {\n    absl::string_view new_data = data.substr(parser_.nread);\n    const size_t pos = new_data.find_first_of(\"\\r\\n\");\n\n    if (pos != absl::string_view::npos) {\n      // Include \\r or \\n\n      new_data = new_data.substr(0, pos + 1);\n      ssize_t rc = http_parser_execute(&parser_, &settings_, new_data.data(), new_data.length());\n      ENVOY_LOG(trace, \"http inspector: http_parser parsed {} chars, error code: {}\", rc,\n                HTTP_PARSER_ERRNO(&parser_));\n\n      // Errors in parsing HTTP.\n      if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) {\n        return ParseState::Error;\n      }\n\n      if (parser_.http_major == 1 && parser_.http_minor == 1) {\n        protocol_ = Http::Headers::get().ProtocolStrings.Http11String;\n      } else {\n        // Set other HTTP protocols to HTTP/1.0\n        protocol_ = Http::Headers::get().ProtocolStrings.Http10String;\n      }\n      return ParseState::Done;\n    } else {\n      ssize_t rc = http_parser_execute(&parser_, &settings_, new_data.data(), new_data.length());\n      ENVOY_LOG(trace, \"http inspector: http_parser parsed {} chars, error code: {}\", rc,\n                HTTP_PARSER_ERRNO(&parser_));\n\n      // Errors in parsing HTTP.\n      if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) {\n        return ParseState::Error;\n      } else {\n        return ParseState::Continue;\n      }\n    }\n  }\n}\n\nvoid Filter::done(bool success) {\n  ENVOY_LOG(trace, \"http inspector: done: {}\", success);\n\n  if (success) {\n    absl::string_view protocol;\n    if (protocol_ == Http::Headers::get().ProtocolStrings.Http10String) {\n      config_->stats().http10_found_.inc();\n      protocol = Http::Utility::AlpnNames::get().Http10;\n    } else if (protocol_ == Http::Headers::get().ProtocolStrings.Http11String) {\n      config_->stats().http11_found_.inc();\n      protocol = Http::Utility::AlpnNames::get().Http11;\n    } else {\n      ASSERT(protocol_ == \"HTTP/2\");\n      config_->stats().http2_found_.inc();\n      // h2 HTTP/2 over TLS, h2c HTTP/2 over TCP\n      // TODO(yxue): use detected protocol from http inspector and support h2c token in HCM\n      protocol = Http::Utility::AlpnNames::get().Http2c;\n    }\n\n    cb_->socket().setRequestedApplicationProtocols({protocol});\n  } else {\n    config_->stats().http_not_found_.inc();\n  }\n}\n\n} // namespace HttpInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/http_inspector/http_inspector.h",
    "content": "#pragma once\n\n#include <http_parser.h>\n\n#include \"envoy/event/file_event.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace HttpInspector {\n\n/**\n * All stats for the http inspector. @see stats_macros.h\n */\n#define ALL_HTTP_INSPECTOR_STATS(COUNTER)                                                          \\\n  COUNTER(read_error)                                                                              \\\n  COUNTER(http10_found)                                                                            \\\n  COUNTER(http11_found)                                                                            \\\n  COUNTER(http2_found)                                                                             \\\n  COUNTER(http_not_found)\n\n/**\n * Definition of all stats for the Http inspector. @see stats_macros.h\n */\nstruct HttpInspectorStats {\n  ALL_HTTP_INSPECTOR_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nenum class ParseState {\n  // Parse result is out. It could be http family or empty.\n  Done,\n  // Parser expects more data.\n  Continue,\n  // Parser reports unrecoverable error.\n  Error\n};\n\n/**\n * Global configuration for http inspector.\n */\nclass Config {\npublic:\n  Config(Stats::Scope& scope);\n\n  const HttpInspectorStats& stats() const { return stats_; }\n\n  static constexpr uint32_t MAX_INSPECT_SIZE = 8192;\n\nprivate:\n  HttpInspectorStats stats_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * Http inspector listener filter.\n */\nclass Filter : public Network::ListenerFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  Filter(const ConfigSharedPtr config);\n\n  // Network::ListenerFilter\n  Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override;\n\nprivate:\n  static const absl::string_view HTTP2_CONNECTION_PREFACE;\n\n  ParseState onRead();\n  void done(bool success);\n  ParseState parseHttpHeader(absl::string_view data);\n\n  const absl::flat_hash_set<std::string>& httpProtocols() const;\n  const absl::flat_hash_set<std::string>& http1xMethods() const;\n\n  ConfigSharedPtr config_;\n  Network::ListenerFilterCallbacks* cb_{nullptr};\n  Event::FileEventPtr file_event_;\n  absl::string_view protocol_;\n  http_parser parser_;\n  static http_parser_settings settings_;\n\n  // Use static thread_local to avoid allocating buffer over and over again.\n  static thread_local uint8_t buf_[Config::MAX_INSPECT_SIZE];\n};\n\n} // namespace HttpInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_dst/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# ORIGINAL_DST iptables redirection listener filter\n# Public docs: docs/root/configuration/listener_filters/original_dst_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"original_dst_lib\",\n    srcs = [\"original_dst.cc\"],\n    hdrs = [\"original_dst.h\"],\n    deps = [\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/network:utility_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":original_dst_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/listener/original_dst/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/listener/original_dst/config.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/listener/original_dst/v3/original_dst.pb.h\"\n#include \"envoy/extensions/filters/listener/original_dst/v3/original_dst.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/listener/original_dst/original_dst.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalDst {\n\n/**\n * Config registration for the original dst filter. @see NamedNetworkFilterConfigFactory.\n */\nclass OriginalDstConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory {\npublic:\n  // NamedListenerFilterConfigFactory\n  Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto(\n      const Protobuf::Message&,\n      const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n      Server::Configuration::ListenerFactoryContext&) override {\n    return [listener_filter_matcher](Network::ListenerFilterManager& filter_manager) -> void {\n      filter_manager.addAcceptFilter(listener_filter_matcher,\n                                     std::make_unique<OriginalDstFilter>());\n    };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<envoy::extensions::filters::listener::original_dst::v3::OriginalDst>();\n  }\n\n  std::string name() const override { return ListenerFilterNames::get().OriginalDst; }\n};\n\n/**\n * Static registration for the original dst filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(OriginalDstConfigFactory, Server::Configuration::NamedListenerFilterConfigFactory){\n    \"envoy.listener.original_dst\"};\n\n} // namespace OriginalDst\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_dst/original_dst.cc",
    "content": "#include \"extensions/filters/listener/original_dst/original_dst.h\"\n\n#include \"envoy/network/listen_socket.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalDst {\n\nNetwork::Address::InstanceConstSharedPtr OriginalDstFilter::getOriginalDst(Network::Socket& sock) {\n  return Network::Utility::getOriginalDst(sock);\n}\n\nNetwork::FilterStatus OriginalDstFilter::onAccept(Network::ListenerFilterCallbacks& cb) {\n  ENVOY_LOG(debug, \"original_dst: New connection accepted\");\n  Network::ConnectionSocket& socket = cb.socket();\n\n  if (socket.addressType() == Network::Address::Type::Ip) {\n    Network::Address::InstanceConstSharedPtr original_local_address = getOriginalDst(socket);\n\n    // A listener that has the use_original_dst flag set to true can still receive\n    // connections that are NOT redirected using iptables. If a connection was not redirected,\n    // the address returned by getOriginalDst() matches the local address of the new socket.\n    // In this case the listener handles the connection directly and does not hand it off.\n    if (original_local_address) {\n      // Restore the local address to the original one.\n      socket.restoreLocalAddress(original_local_address);\n    }\n  }\n\n  return Network::FilterStatus::Continue;\n}\n\n} // namespace OriginalDst\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_dst/original_dst.h",
    "content": "#pragma once\n\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalDst {\n\n/**\n * Implementation of an original destination listener filter.\n */\nclass OriginalDstFilter : public Network::ListenerFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  virtual Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket& sock);\n\n  // Network::ListenerFilter\n  Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override;\n};\n\n} // namespace OriginalDst\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_src/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# A filter for mirroring the downstream remote address on the upstream's source.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"config_lib\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    deps = [\"@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"original_src_lib\",\n    srcs = [\"original_src.cc\"],\n    hdrs = [\"original_src.h\"],\n    deps = [\n        \":config_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/extensions/filters/common/original_src:socket_option_factory_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",  # The extension build system requires a library named config\n    srcs = [\"original_src_config_factory.cc\"],\n    hdrs = [\"original_src_config_factory.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    status = \"alpha\",\n    deps = [\n        \":config_lib\",\n        \":original_src_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/listener/original_src/config.cc",
    "content": "#include \"extensions/filters/listener/original_src/config.h\"\n\n#include \"envoy/extensions/filters/listener/original_src/v3/original_src.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\n\nConfig::Config(const envoy::extensions::filters::listener::original_src::v3::OriginalSrc& config)\n    : use_port_(config.bind_port()), mark_(config.mark()) {}\n\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_src/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/listener/original_src/v3/original_src.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\nclass Config {\npublic:\n  Config() = default;\n  Config(const envoy::extensions::filters::listener::original_src::v3::OriginalSrc& config);\n\n  bool usePort() const { return use_port_; }\n  uint32_t mark() const { return mark_; }\n\nprivate:\n  bool use_port_ = false;\n  uint32_t mark_ = 0;\n};\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_src/original_src.cc",
    "content": "#include \"extensions/filters/listener/original_src/original_src.h\"\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/filters/common/original_src/socket_option_factory.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\n\nOriginalSrcFilter::OriginalSrcFilter(const Config& config) : config_(config) {}\n\nNetwork::FilterStatus OriginalSrcFilter::onAccept(Network::ListenerFilterCallbacks& cb) {\n  auto& socket = cb.socket();\n  auto address = socket.remoteAddress();\n  ASSERT(address);\n\n  ENVOY_LOG(debug,\n            \"Got a new connection in the original_src filter for address {}. Marking with {}\",\n            address->asString(), config_.mark());\n\n  if (address->type() != Network::Address::Type::Ip) {\n    // nothing we can do with this.\n    return Network::FilterStatus::Continue;\n  }\n  auto options_to_add =\n      Filters::Common::OriginalSrc::buildOriginalSrcOptions(std::move(address), config_.mark());\n  socket.addOptions(std::move(options_to_add));\n  return Network::FilterStatus::Continue;\n}\n\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_src/original_src.h",
    "content": "#pragma once\n\n#include \"envoy/network/address.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/listener/original_src/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\n\n/**\n * Implements the Original Src network filter. This filter places the source address of the socket\n * into an option which will alter be used to partition upstream connections.\n * This does not support non-ip (e.g. AF_UNIX) connections, which will be failed and counted.\n */\nclass OriginalSrcFilter : public Network::ListenerFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  OriginalSrcFilter(const Config& config);\n\n  // Network::ListenerFilter\n  Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override;\n\nprivate:\n  Config config_;\n};\n\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_src/original_src_config_factory.cc",
    "content": "#include \"extensions/filters/listener/original_src/original_src_config_factory.h\"\n\n#include \"envoy/extensions/filters/listener/original_src/v3/original_src.pb.h\"\n#include \"envoy/extensions/filters/listener/original_src/v3/original_src.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/listener/original_src/config.h\"\n#include \"extensions/filters/listener/original_src/original_src.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\n\nNetwork::ListenerFilterFactoryCb OriginalSrcConfigFactory::createListenerFilterFactoryFromProto(\n    const Protobuf::Message& message,\n    const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n    Server::Configuration::ListenerFactoryContext& context) {\n  auto proto_config = MessageUtil::downcastAndValidate<\n      const envoy::extensions::filters::listener::original_src::v3::OriginalSrc&>(\n      message, context.messageValidationVisitor());\n  Config config(proto_config);\n  return [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void {\n    filter_manager.addAcceptFilter(listener_filter_matcher,\n                                   std::make_unique<OriginalSrcFilter>(config));\n  };\n}\n\nProtobufTypes::MessagePtr OriginalSrcConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::filters::listener::original_src::v3::OriginalSrc>();\n}\n/**\n * Static registration for the original_src filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(OriginalSrcConfigFactory, Server::Configuration::NamedListenerFilterConfigFactory){\n    \"envoy.listener.original_src\"};\n\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/original_src/original_src_config_factory.h",
    "content": "#pragma once\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/listener/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\n/**\n * Config registration for the original_src filter.\n */\nclass OriginalSrcConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory {\npublic:\n  // NamedListenerFilterConfigFactory\n  Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto(\n      const Protobuf::Message& message,\n      const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n      Server::Configuration::ListenerFactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override { return ListenerFilterNames::get().OriginalSrc; }\n};\n\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/proxy_protocol/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Proxy protocol listener filter: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"proxy_protocol_lib\",\n    srcs = [\"proxy_protocol.cc\"],\n    hdrs = [\n        \"proxy_protocol.h\",\n        \"proxy_protocol_header.h\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"//source/extensions/filters/listener/proxy_protocol:proxy_protocol_lib\",\n        \"@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/listener/proxy_protocol/config.cc",
    "content": "#include <memory>\n\n#include \"envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h\"\n#include \"envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/listener/proxy_protocol/proxy_protocol.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace ProxyProtocol {\n\n/**\n * Config registration for the proxy protocol filter. @see NamedNetworkFilterConfigFactory.\n */\nclass ProxyProtocolConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory {\npublic:\n  // NamedListenerFilterConfigFactory\n  Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto(\n      const Protobuf::Message& message,\n      const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n      Server::Configuration::ListenerFactoryContext& context) override {\n\n    // downcast it to the proxy protocol config\n    const auto& proto_config = MessageUtil::downcastAndValidate<\n        const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol&>(\n        message, context.messageValidationVisitor());\n\n    ConfigSharedPtr config = std::make_shared<Config>(context.scope(), proto_config);\n    return\n        [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void {\n          filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique<Filter>(config));\n        };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol>();\n  }\n\n  std::string name() const override { return ListenerFilterNames::get().ProxyProtocol; }\n};\n\n/**\n * Static registration for the proxy protocol filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(ProxyProtocolConfigFactory,\n                 Server::Configuration::NamedListenerFilterConfigFactory){\n    \"envoy.listener.proxy_protocol\"};\n\n} // namespace ProxyProtocol\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc",
    "content": "#include \"extensions/filters/listener/proxy_protocol/proxy_protocol.h\"\n\n#include <algorithm>\n#include <cstddef>\n#include <cstdint>\n#include <cstring>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V1_SIGNATURE;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V1_SIGNATURE_LEN;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_INET;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_INET6;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_AF_INET;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_AF_INET6;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_HEADER_LEN;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_LOCAL;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ONBEHALF_OF;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_SIGNATURE;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_SIGNATURE_LEN;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_TRANSPORT_DGRAM;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_TRANSPORT_STREAM;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_VERSION;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace ProxyProtocol {\n\nConfig::Config(\n    Stats::Scope& scope,\n    const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol& proto_config)\n    : stats_{ALL_PROXY_PROTOCOL_STATS(POOL_COUNTER(scope))} {\n  for (const auto& rule : proto_config.rules()) {\n    tlv_types_[0xFF & rule.tlv_type()] = rule.on_tlv_present();\n  }\n}\n\nconst KeyValuePair* Config::isTlvTypeNeeded(uint8_t type) const {\n  auto tlv_type = tlv_types_.find(type);\n  if (tlv_types_.end() != tlv_type) {\n    return &tlv_type->second;\n  }\n\n  return nullptr;\n}\n\nsize_t Config::numberOfNeededTlvTypes() const { return tlv_types_.size(); }\n\nNetwork::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) {\n  ENVOY_LOG(debug, \"proxy_protocol: New connection accepted\");\n  Network::ConnectionSocket& socket = cb.socket();\n  ASSERT(file_event_.get() == nullptr);\n  file_event_ = socket.ioHandle().createFileEvent(\n      cb.dispatcher(),\n      [this](uint32_t events) {\n        ASSERT(events == Event::FileReadyType::Read);\n        onRead();\n      },\n      Event::PlatformDefaultTriggerType, Event::FileReadyType::Read);\n  cb_ = &cb;\n  return Network::FilterStatus::StopIteration;\n}\n\nvoid Filter::onRead() {\n  try {\n    onReadWorker();\n  } catch (const EnvoyException& ee) {\n    config_->stats_.downstream_cx_proxy_proto_error_.inc();\n    cb_->continueFilterChain(false);\n  }\n}\n\nvoid Filter::onReadWorker() {\n  Network::ConnectionSocket& socket = cb_->socket();\n\n  if ((!proxy_protocol_header_.has_value() && !readProxyHeader(socket.ioHandle())) ||\n      (proxy_protocol_header_.has_value() && !readExtensions(socket.ioHandle()))) {\n    // We return if a) we do not yet have the header, or b) we have the header but not yet all\n    // the extension data. In both cases we'll be called again when the socket is ready to read\n    // and pick up where we left off.\n    return;\n  }\n\n  if (proxy_protocol_header_.has_value() && !proxy_protocol_header_.value().local_command_) {\n    // If this is a local_command, we are not to override address\n    // Error check the source and destination fields. Most errors are caught by the address\n    // parsing above, but a malformed IPv6 address may combine with a malformed port and parse as\n    // an IPv6 address when parsing for an IPv4 address(for v1 mode). Remote address refers to the\n    // source address.\n    const auto remote_version = proxy_protocol_header_.value().remote_address_->ip()->version();\n    const auto local_version = proxy_protocol_header_.value().local_address_->ip()->version();\n    if (remote_version != proxy_protocol_header_.value().protocol_version_ ||\n        local_version != proxy_protocol_header_.value().protocol_version_) {\n      throw EnvoyException(\"failed to read proxy protocol\");\n    }\n    // Check that both addresses are valid unicast addresses, as required for TCP\n    if (!proxy_protocol_header_.value().remote_address_->ip()->isUnicastAddress() ||\n        !proxy_protocol_header_.value().local_address_->ip()->isUnicastAddress()) {\n      throw EnvoyException(\"failed to read proxy protocol\");\n    }\n\n    // Only set the local address if it really changed, and mark it as address being restored.\n    if (*proxy_protocol_header_.value().local_address_ != *socket.localAddress()) {\n      socket.restoreLocalAddress(proxy_protocol_header_.value().local_address_);\n    }\n    socket.setRemoteAddress(proxy_protocol_header_.value().remote_address_);\n  }\n\n  // Release the file event so that we do not interfere with the connection read events.\n  file_event_.reset();\n  cb_->continueFilterChain(true);\n}\n\nsize_t Filter::lenV2Address(char* buf) {\n  const uint8_t proto_family = buf[PROXY_PROTO_V2_SIGNATURE_LEN + 1];\n  const int ver_cmd = buf[PROXY_PROTO_V2_SIGNATURE_LEN];\n  size_t len;\n\n  if ((ver_cmd & 0xf) == PROXY_PROTO_V2_LOCAL) {\n    // According to the spec there is no address encoded, len=0, and we must ignore\n    return 0;\n  }\n\n  switch ((proto_family & 0xf0) >> 4) {\n  case PROXY_PROTO_V2_AF_INET:\n    len = PROXY_PROTO_V2_ADDR_LEN_INET;\n    break;\n  case PROXY_PROTO_V2_AF_INET6:\n    len = PROXY_PROTO_V2_ADDR_LEN_INET6;\n    break;\n  default:\n    throw EnvoyException(\"Unsupported V2 proxy protocol address family\");\n  }\n  return len;\n}\n\nvoid Filter::parseV2Header(char* buf) {\n  const int ver_cmd = buf[PROXY_PROTO_V2_SIGNATURE_LEN];\n  uint8_t upper_byte = buf[PROXY_PROTO_V2_HEADER_LEN - 2];\n  uint8_t lower_byte = buf[PROXY_PROTO_V2_HEADER_LEN - 1];\n  size_t hdr_addr_len = (upper_byte << 8) + lower_byte;\n\n  if ((ver_cmd & 0xf) == PROXY_PROTO_V2_LOCAL) {\n    // This is locally-initiated, e.g. health-check, and should not override remote address\n    proxy_protocol_header_.emplace(WireHeader{hdr_addr_len});\n    return;\n  }\n\n  // Only do connections on behalf of another user, not internally-driven health-checks. If\n  // its not on behalf of someone, or its not AF_INET{6} / STREAM/DGRAM, ignore and\n  // use the real-remote info\n  if ((ver_cmd & 0xf) == PROXY_PROTO_V2_ONBEHALF_OF) {\n    uint8_t proto_family = buf[PROXY_PROTO_V2_SIGNATURE_LEN + 1];\n    if (((proto_family & 0x0f) == PROXY_PROTO_V2_TRANSPORT_STREAM) ||\n        ((proto_family & 0x0f) == PROXY_PROTO_V2_TRANSPORT_DGRAM)) {\n      if (((proto_family & 0xf0) >> 4) == PROXY_PROTO_V2_AF_INET) {\n        PACKED_STRUCT(struct pp_ipv4_addr {\n          uint32_t src_addr;\n          uint32_t dst_addr;\n          uint16_t src_port;\n          uint16_t dst_port;\n        });\n        pp_ipv4_addr* v4;\n        v4 = reinterpret_cast<pp_ipv4_addr*>(&buf[PROXY_PROTO_V2_HEADER_LEN]);\n        sockaddr_in ra4, la4;\n        memset(&ra4, 0, sizeof(ra4));\n        memset(&la4, 0, sizeof(la4));\n        ra4.sin_family = AF_INET;\n        ra4.sin_port = v4->src_port;\n        ra4.sin_addr.s_addr = v4->src_addr;\n\n        la4.sin_family = AF_INET;\n        la4.sin_port = v4->dst_port;\n        la4.sin_addr.s_addr = v4->dst_addr;\n        proxy_protocol_header_.emplace(\n            WireHeader{hdr_addr_len - PROXY_PROTO_V2_ADDR_LEN_INET, Network::Address::IpVersion::v4,\n                       std::make_shared<Network::Address::Ipv4Instance>(&ra4),\n                       std::make_shared<Network::Address::Ipv4Instance>(&la4)});\n        return;\n      } else if (((proto_family & 0xf0) >> 4) == PROXY_PROTO_V2_AF_INET6) {\n        PACKED_STRUCT(struct pp_ipv6_addr {\n          uint8_t src_addr[16];\n          uint8_t dst_addr[16];\n          uint16_t src_port;\n          uint16_t dst_port;\n        });\n        pp_ipv6_addr* v6;\n        v6 = reinterpret_cast<pp_ipv6_addr*>(&buf[PROXY_PROTO_V2_HEADER_LEN]);\n        sockaddr_in6 ra6, la6;\n        memset(&ra6, 0, sizeof(ra6));\n        memset(&la6, 0, sizeof(la6));\n        ra6.sin6_family = AF_INET6;\n        ra6.sin6_port = v6->src_port;\n        memcpy(ra6.sin6_addr.s6_addr, v6->src_addr, sizeof(ra6.sin6_addr.s6_addr));\n\n        la6.sin6_family = AF_INET6;\n        la6.sin6_port = v6->dst_port;\n        memcpy(la6.sin6_addr.s6_addr, v6->dst_addr, sizeof(la6.sin6_addr.s6_addr));\n\n        proxy_protocol_header_.emplace(WireHeader{\n            hdr_addr_len - PROXY_PROTO_V2_ADDR_LEN_INET6, Network::Address::IpVersion::v6,\n            std::make_shared<Network::Address::Ipv6Instance>(ra6),\n            std::make_shared<Network::Address::Ipv6Instance>(la6)});\n        return;\n      }\n    }\n  }\n  throw EnvoyException(\"Unsupported command or address family or transport\");\n}\n\nvoid Filter::parseV1Header(char* buf, size_t len) {\n  std::string proxy_line;\n  proxy_line.assign(buf, len);\n  const auto trimmed_proxy_line = StringUtil::rtrim(proxy_line);\n\n  // Parse proxy protocol line with format: PROXY TCP4/TCP6/UNKNOWN SOURCE_ADDRESS\n  // DESTINATION_ADDRESS SOURCE_PORT DESTINATION_PORT.\n  const auto line_parts = StringUtil::splitToken(trimmed_proxy_line, \" \", true);\n  if (line_parts.size() < 2 || line_parts[0] != \"PROXY\") {\n    throw EnvoyException(\"failed to read proxy protocol\");\n  }\n\n  // If the line starts with UNKNOWN we know it's a proxy protocol line, so we can remove it from\n  // the socket and continue. According to spec \"real connection's parameters\" should be used, so\n  // we should NOT restore the addresses in this case.\n  if (line_parts[1] != \"UNKNOWN\") {\n    // If protocol not UNKNOWN, src and dst addresses have to be present.\n    if (line_parts.size() != 6) {\n      throw EnvoyException(\"failed to read proxy protocol\");\n    }\n\n    // TODO(gsagula): parseInternetAddressAndPort() could be modified to take two string_view\n    // arguments, so we can eliminate allocation here.\n    if (line_parts[1] == \"TCP4\") {\n      proxy_protocol_header_.emplace(\n          WireHeader{0, Network::Address::IpVersion::v4,\n                     Network::Utility::parseInternetAddressAndPort(\n                         std::string{line_parts[2]} + \":\" + std::string{line_parts[4]}),\n                     Network::Utility::parseInternetAddressAndPort(\n                         std::string{line_parts[3]} + \":\" + std::string{line_parts[5]})});\n    } else if (line_parts[1] == \"TCP6\") {\n      proxy_protocol_header_.emplace(\n          WireHeader{0, Network::Address::IpVersion::v6,\n                     Network::Utility::parseInternetAddressAndPort(\n                         \"[\" + std::string{line_parts[2]} + \"]:\" + std::string{line_parts[4]}),\n                     Network::Utility::parseInternetAddressAndPort(\n                         \"[\" + std::string{line_parts[3]} + \"]:\" + std::string{line_parts[5]})});\n    } else {\n      throw EnvoyException(\"failed to read proxy protocol\");\n    }\n  }\n}\n\nbool Filter::parseExtensions(Network::IoHandle& io_handle, uint8_t* buf, size_t buf_size,\n                             size_t* buf_off) {\n  // If we ever implement extensions elsewhere, be sure to\n  // continue to skip and ignore those for LOCAL.\n  while (proxy_protocol_header_.value().extensions_length_) {\n    int to_read = std::min(buf_size, proxy_protocol_header_.value().extensions_length_);\n    buf += (nullptr != buf_off) ? *buf_off : 0;\n    const auto recv_result = io_handle.recv(buf, to_read, 0);\n    if (!recv_result.ok()) {\n      if (recv_result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) {\n        return false;\n      }\n      throw EnvoyException(\"failed to read proxy protocol (no bytes avail)\");\n    }\n\n    proxy_protocol_header_.value().extensions_length_ -= recv_result.rc_;\n\n    if (nullptr != buf_off) {\n      *buf_off += recv_result.rc_;\n    }\n  }\n\n  return true;\n}\n\n/**\n * @note  A TLV is arranged in the following format:\n *        struct pp2_tlv {\n *          uint8_t type;\n *          uint8_t length_hi;\n *          uint8_t length_lo;\n *          uint8_t value[0];\n *        };\n *        See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details\n */\nvoid Filter::parseTlvs(const std::vector<uint8_t>& tlvs) {\n  size_t idx{0};\n  while (idx < tlvs.size()) {\n    const uint8_t tlv_type = tlvs[idx];\n    idx++;\n\n    if ((idx + 1) >= tlvs.size()) {\n      throw EnvoyException(\n          fmt::format(\"failed to read proxy protocol extension. No bytes for TLV length. \"\n                      \"Extension length is {}, current index is {}, current type is {}.\",\n                      tlvs.size(), idx, tlv_type));\n    }\n\n    const uint8_t tlv_length_upper = tlvs[idx];\n    const uint8_t tlv_length_lower = tlvs[idx + 1];\n    const size_t tlv_value_length = (tlv_length_upper << 8) + tlv_length_lower;\n    idx += 2;\n\n    // Get the value.\n    if ((idx + tlv_value_length - 1) >= tlvs.size()) {\n      throw EnvoyException(\n          fmt::format(\"failed to read proxy protocol extension. No bytes for TLV value. \"\n                      \"Extension length is {}, current index is {}, current type is {}, current \"\n                      \"value length is {}.\",\n                      tlvs.size(), idx, tlv_type, tlv_length_upper));\n    }\n\n    // Only save to dynamic metadata if this type of TLV is needed.\n    auto key_value_pair = config_->isTlvTypeNeeded(tlv_type);\n    if (nullptr != key_value_pair) {\n      ProtobufWkt::Value metadata_value;\n      metadata_value.set_string_value(reinterpret_cast<char const*>(tlvs.data() + idx),\n                                      tlv_value_length);\n\n      std::string metadata_key = key_value_pair->metadata_namespace().empty()\n                                     ? ListenerFilterNames::get().ProxyProtocol\n                                     : key_value_pair->metadata_namespace();\n\n      ProtobufWkt::Struct metadata(\n          (*cb_->dynamicMetadata().mutable_filter_metadata())[metadata_key]);\n      metadata.mutable_fields()->insert({key_value_pair->key(), metadata_value});\n      cb_->setDynamicMetadata(metadata_key, metadata);\n    } else {\n      ENVOY_LOG(trace, \"proxy_protocol: Skip TLV of type {} since it's not needed\", tlv_type);\n    }\n\n    idx += tlv_value_length;\n    ASSERT(idx <= tlvs.size());\n  }\n}\n\nbool Filter::readExtensions(Network::IoHandle& io_handle) {\n  // Parse and discard the extensions if this is a local command or there's no TLV needs to be saved\n  // to metadata.\n  if (proxy_protocol_header_.value().local_command_ || 0 == config_->numberOfNeededTlvTypes()) {\n    // buf_ is no longer in use so we re-use it to read/discard.\n    return parseExtensions(io_handle, reinterpret_cast<uint8_t*>(buf_), sizeof(buf_), nullptr);\n  }\n\n  // Initialize the buf_tlv_ only when we need to read the TLVs.\n  if (buf_tlv_.empty()) {\n    buf_tlv_.resize(proxy_protocol_header_.value().extensions_length_);\n  }\n\n  // Parse until we have all the TLVs in buf_tlv.\n  if (!parseExtensions(io_handle, buf_tlv_.data(), buf_tlv_.size(), &buf_tlv_off_)) {\n    return false;\n  }\n\n  parseTlvs(buf_tlv_);\n\n  return true;\n}\n\nbool Filter::readProxyHeader(Network::IoHandle& io_handle) {\n  while (buf_off_ < MAX_PROXY_PROTO_LEN_V2) {\n    const auto result =\n        io_handle.recv(buf_ + buf_off_, MAX_PROXY_PROTO_LEN_V2 - buf_off_, MSG_PEEK);\n\n    if (!result.ok()) {\n      if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) {\n        return false;\n      }\n      throw EnvoyException(\"failed to read proxy protocol (no bytes read)\");\n    }\n    ssize_t nread = result.rc_;\n\n    if (nread < 1) {\n      throw EnvoyException(\"failed to read proxy protocol (no bytes read)\");\n    }\n\n    if (buf_off_ + nread >= PROXY_PROTO_V2_HEADER_LEN) {\n      const char* sig = PROXY_PROTO_V2_SIGNATURE;\n      if (!memcmp(buf_, sig, PROXY_PROTO_V2_SIGNATURE_LEN)) {\n        header_version_ = V2;\n      } else if (memcmp(buf_, PROXY_PROTO_V1_SIGNATURE, PROXY_PROTO_V1_SIGNATURE_LEN)) {\n        // It is not v2, and can't be v1, so no sense hanging around: it is invalid\n        throw EnvoyException(\"failed to read proxy protocol (exceed max v1 header len)\");\n      }\n    }\n\n    if (header_version_ == V2) {\n      const int ver_cmd = buf_[PROXY_PROTO_V2_SIGNATURE_LEN];\n      if (((ver_cmd & 0xf0) >> 4) != PROXY_PROTO_V2_VERSION) {\n        throw EnvoyException(\"Unsupported V2 proxy protocol version\");\n      }\n      if (buf_off_ < PROXY_PROTO_V2_HEADER_LEN) {\n        ssize_t exp = PROXY_PROTO_V2_HEADER_LEN - buf_off_;\n        const auto read_result = io_handle.recv(buf_ + buf_off_, exp, 0);\n        if (!result.ok() || read_result.rc_ != uint64_t(exp)) {\n          throw EnvoyException(\"failed to read proxy protocol (remote closed)\");\n        }\n        buf_off_ += read_result.rc_;\n        nread -= read_result.rc_;\n      }\n      ssize_t addr_len = lenV2Address(buf_);\n      uint8_t upper_byte = buf_[PROXY_PROTO_V2_HEADER_LEN - 2];\n      uint8_t lower_byte = buf_[PROXY_PROTO_V2_HEADER_LEN - 1];\n      ssize_t hdr_addr_len = (upper_byte << 8) + lower_byte;\n      if (hdr_addr_len < addr_len) {\n        throw EnvoyException(\"failed to read proxy protocol (insufficient data)\");\n      }\n      if (ssize_t(buf_off_) + nread >= PROXY_PROTO_V2_HEADER_LEN + addr_len) {\n        ssize_t missing = (PROXY_PROTO_V2_HEADER_LEN + addr_len) - buf_off_;\n        const auto read_result = io_handle.recv(buf_ + buf_off_, missing, 0);\n        if (!result.ok() || read_result.rc_ != uint64_t(missing)) {\n          throw EnvoyException(\"failed to read proxy protocol (remote closed)\");\n        }\n        buf_off_ += read_result.rc_;\n        parseV2Header(buf_);\n        // The TLV remain, they are read/discard in parseExtensions() which is called from the\n        // parent (if needed).\n        return true;\n      } else {\n        const auto result = io_handle.recv(buf_ + buf_off_, nread, 0);\n        nread = result.rc_;\n        if (!result.ok()) {\n          throw EnvoyException(\"failed to read proxy protocol (remote closed)\");\n        }\n        buf_off_ += nread;\n      }\n    } else {\n      // continue searching buf_ from where we left off\n      for (; search_index_ < buf_off_ + nread; search_index_++) {\n        if (buf_[search_index_] == '\\n' && buf_[search_index_ - 1] == '\\r') {\n          if (search_index_ == 1) {\n            // This could be the binary protocol. It cannot be the ascii protocol\n            header_version_ = InProgress;\n          } else {\n            header_version_ = V1;\n            search_index_++;\n          }\n          break;\n        }\n      }\n\n      // If we bailed on the first char, we might be v2, but are for sure not v1. Thus we\n      // can read up to min(PROXY_PROTO_V2_HEADER_LEN, bytes_avail). If we bailed after first\n      // char, but before we hit \\r\\n, read up to search_index_. We're asking only for\n      // bytes we've already seen so there should be no block or fail\n      size_t ntoread;\n      if (header_version_ == InProgress) {\n        ntoread = nread;\n      } else {\n        ntoread = search_index_ - buf_off_;\n      }\n\n      const auto result = io_handle.recv(buf_ + buf_off_, ntoread, 0);\n      nread = result.rc_;\n      ASSERT(result.ok() && size_t(nread) == ntoread);\n\n      buf_off_ += nread;\n\n      if (header_version_ == V1) {\n        parseV1Header(buf_, buf_off_);\n        return true;\n      }\n    }\n  }\n\n  throw EnvoyException(\"failed to read proxy protocol (exceed max v2 header len)\");\n}\n\n} // namespace ProxyProtocol\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/proxy_protocol/proxy_protocol.h",
    "content": "#pragma once\n\n#include \"envoy/event/file_event.h\"\n#include \"envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"proxy_protocol_header.h\"\n\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_UNIX;\nusing Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_HEADER_LEN;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace ProxyProtocol {\n\nusing KeyValuePair =\n    envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol::KeyValuePair;\n\n/**\n * All stats for the proxy protocol. @see stats_macros.h\n */\n// clang-format off\n#define ALL_PROXY_PROTOCOL_STATS(COUNTER)                                                          \\\n  COUNTER(downstream_cx_proxy_proto_error)\n// clang-format on\n\n/**\n * Definition of all stats for the proxy protocol. @see stats_macros.h\n */\nstruct ProxyProtocolStats {\n  ALL_PROXY_PROTOCOL_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Global configuration for Proxy Protocol listener filter.\n */\nclass Config : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  Config(\n      Stats::Scope& scope,\n      const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol& proto_config);\n\n  ProxyProtocolStats stats_;\n\n  /**\n   * Return null if the type of TLV is not needed otherwise a pointer to the KeyValuePair for\n   * emitting to dynamic metadata.\n   */\n  const KeyValuePair* isTlvTypeNeeded(uint8_t type) const;\n\n  /**\n   * Number of TLV types that need to be parsed and saved to dynamic metadata.\n   */\n  size_t numberOfNeededTlvTypes() const;\n\nprivate:\n  absl::flat_hash_map<uint8_t, KeyValuePair> tlv_types_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\nenum ProxyProtocolVersion { Unknown = -1, InProgress = -2, V1 = 1, V2 = 2 };\n\n/**\n * Implementation the PROXY Protocol listener filter\n * (https://github.com/haproxy/haproxy/blob/master/doc/proxy-protocol.txt)\n *\n * This implementation supports Proxy Protocol v1 (TCP/UDP, v4/v6),\n * and Proxy Protocol v2 (TCP/UDP, v4/v6).\n *\n * Non INET (AF_UNIX) address family in v2 is not supported, will throw an error.\n * Extensions (TLV) in v2 are skipped over.\n */\nclass Filter : public Network::ListenerFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  Filter(const ConfigSharedPtr& config) : config_(config) {}\n\n  // Network::ListenerFilter\n  Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override;\n\nprivate:\n  static const size_t MAX_PROXY_PROTO_LEN_V2 =\n      PROXY_PROTO_V2_HEADER_LEN + PROXY_PROTO_V2_ADDR_LEN_UNIX;\n  static const size_t MAX_PROXY_PROTO_LEN_V1 = 108;\n\n  void onRead();\n  void onReadWorker();\n\n  /**\n   * Helper function that attempts to read the proxy header\n   * (delimited by \\r\\n if V1 format, or with length if V2)\n   * throws EnvoyException on any socket errors.\n   * @return bool true valid header, false if more data is needed.\n   */\n  bool readProxyHeader(Network::IoHandle& io_handle);\n\n  /**\n   * Parse (and discard unknown) header extensions (until hdr.extensions_length == 0)\n   */\n  bool parseExtensions(Network::IoHandle& io_handle, uint8_t* buf, size_t buf_size,\n                       size_t* buf_off = nullptr);\n  void parseTlvs(const std::vector<uint8_t>& tlvs);\n  bool readExtensions(Network::IoHandle& io_handle);\n\n  /**\n   * Given a char * & len, parse the header as per spec\n   */\n  void parseV1Header(char* buf, size_t len);\n  void parseV2Header(char* buf);\n  size_t lenV2Address(char* buf);\n\n  Network::ListenerFilterCallbacks* cb_{};\n  Event::FileEventPtr file_event_;\n\n  // The offset in buf_ that has been fully read\n  size_t buf_off_{};\n\n  // The index in buf_ where the search for '\\r\\n' should continue from\n  size_t search_index_{1};\n\n  ProxyProtocolVersion header_version_{Unknown};\n\n  // Stores the portion of the first line that has been read so far.\n  char buf_[MAX_PROXY_PROTO_LEN_V2];\n\n  /**\n   * Store the extension TLVs if they need to be read.\n   */\n  std::vector<uint8_t> buf_tlv_;\n\n  /**\n   * The index in buf_tlv_ that has been fully read.\n   */\n  size_t buf_tlv_off_{};\n\n  ConfigSharedPtr config_;\n\n  absl::optional<WireHeader> proxy_protocol_header_;\n};\n\n} // namespace ProxyProtocol\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h",
    "content": "#pragma once\n\n#include \"envoy/network/address.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace ProxyProtocol {\n\nstruct WireHeader {\n  WireHeader(size_t extensions_length)\n      : extensions_length_(extensions_length), protocol_version_(Network::Address::IpVersion::v4),\n        remote_address_(nullptr), local_address_(nullptr), local_command_(true) {}\n  WireHeader(size_t extensions_length, Network::Address::IpVersion protocol_version,\n             Network::Address::InstanceConstSharedPtr remote_address,\n             Network::Address::InstanceConstSharedPtr local_address)\n      : extensions_length_(extensions_length), protocol_version_(protocol_version),\n        remote_address_(remote_address), local_address_(local_address), local_command_(false) {\n\n    ASSERT(extensions_length_ <= 65535);\n  }\n  size_t extensions_length_;\n  const Network::Address::IpVersion protocol_version_;\n  const Network::Address::InstanceConstSharedPtr remote_address_;\n  const Network::Address::InstanceConstSharedPtr local_address_;\n  const bool local_command_;\n};\n\n} // namespace ProxyProtocol\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/tls_inspector/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# TLS inspector filter for examining various TLS parameters before routing to a FilterChain.\n# Public docs: docs/root/configuration/listener_filters/tls_inspector.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"tls_inspector_lib\",\n    srcs = [\"tls_inspector.cc\"],\n    hdrs = [\"tls_inspector.h\"],\n    external_deps = [\"ssl\"],\n    # TODO(#9953) clean up.\n    visibility = [\n        \"//visibility:public\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"//source/extensions/filters/listener/tls_inspector:tls_inspector_lib\",\n        \"@envoy_api//envoy/extensions/filters/listener/tls_inspector/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/listener/tls_inspector/config.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.pb.h\"\n#include \"envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/listener/tls_inspector/tls_inspector.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace TlsInspector {\n\n/**\n * Config registration for the TLS inspector filter. @see NamedNetworkFilterConfigFactory.\n */\nclass TlsInspectorConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory {\npublic:\n  // NamedListenerFilterConfigFactory\n  Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto(\n      const Protobuf::Message&,\n      const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n      Server::Configuration::ListenerFactoryContext& context) override {\n    ConfigSharedPtr config(new Config(context.scope()));\n    return\n        [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void {\n          filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique<Filter>(config));\n        };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::filters::listener::tls_inspector::v3::TlsInspector>();\n  }\n\n  std::string name() const override { return ListenerFilterNames::get().TlsInspector; }\n};\n\n/**\n * Static registration for the TLS inspector filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(TlsInspectorConfigFactory,\n                 Server::Configuration::NamedListenerFilterConfigFactory){\n    \"envoy.listener.tls_inspector\"};\n\n} // namespace TlsInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/tls_inspector/tls_inspector.cc",
    "content": "#include \"extensions/filters/listener/tls_inspector/tls_inspector.h\"\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace TlsInspector {\n\n// Min/max TLS version recognized by the underlying TLS/SSL library.\nconst unsigned Config::TLS_MIN_SUPPORTED_VERSION = TLS1_VERSION;\nconst unsigned Config::TLS_MAX_SUPPORTED_VERSION = TLS1_3_VERSION;\n\nConfig::Config(Stats::Scope& scope, uint32_t max_client_hello_size)\n    : stats_{ALL_TLS_INSPECTOR_STATS(POOL_COUNTER_PREFIX(scope, \"tls_inspector.\"))},\n      ssl_ctx_(SSL_CTX_new(TLS_with_buffers_method())),\n      max_client_hello_size_(max_client_hello_size) {\n\n  if (max_client_hello_size_ > TLS_MAX_CLIENT_HELLO) {\n    throw EnvoyException(fmt::format(\"max_client_hello_size of {} is greater than maximum of {}.\",\n                                     max_client_hello_size_, size_t(TLS_MAX_CLIENT_HELLO)));\n  }\n\n  SSL_CTX_set_min_proto_version(ssl_ctx_.get(), TLS_MIN_SUPPORTED_VERSION);\n  SSL_CTX_set_max_proto_version(ssl_ctx_.get(), TLS_MAX_SUPPORTED_VERSION);\n  SSL_CTX_set_options(ssl_ctx_.get(), SSL_OP_NO_TICKET);\n  SSL_CTX_set_session_cache_mode(ssl_ctx_.get(), SSL_SESS_CACHE_OFF);\n  SSL_CTX_set_select_certificate_cb(\n      ssl_ctx_.get(), [](const SSL_CLIENT_HELLO* client_hello) -> ssl_select_cert_result_t {\n        const uint8_t* data;\n        size_t len;\n        if (SSL_early_callback_ctx_extension_get(\n                client_hello, TLSEXT_TYPE_application_layer_protocol_negotiation, &data, &len)) {\n          Filter* filter = static_cast<Filter*>(SSL_get_app_data(client_hello->ssl));\n          filter->onALPN(data, len);\n        }\n        return ssl_select_cert_success;\n      });\n  SSL_CTX_set_tlsext_servername_callback(\n      ssl_ctx_.get(), [](SSL* ssl, int* out_alert, void*) -> int {\n        Filter* filter = static_cast<Filter*>(SSL_get_app_data(ssl));\n        filter->onServername(\n            absl::NullSafeStringView(SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name)));\n\n        // Return an error to stop the handshake; we have what we wanted already.\n        *out_alert = SSL_AD_USER_CANCELLED;\n        return SSL_TLSEXT_ERR_ALERT_FATAL;\n      });\n}\n\nbssl::UniquePtr<SSL> Config::newSsl() { return bssl::UniquePtr<SSL>{SSL_new(ssl_ctx_.get())}; }\n\nthread_local uint8_t Filter::buf_[Config::TLS_MAX_CLIENT_HELLO];\n\nFilter::Filter(const ConfigSharedPtr config) : config_(config), ssl_(config_->newSsl()) {\n  RELEASE_ASSERT(sizeof(buf_) >= config_->maxClientHelloSize(), \"\");\n\n  SSL_set_app_data(ssl_.get(), this);\n  SSL_set_accept_state(ssl_.get());\n}\n\nNetwork::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) {\n  ENVOY_LOG(debug, \"tls inspector: new connection accepted\");\n  Network::ConnectionSocket& socket = cb.socket();\n  ASSERT(file_event_ == nullptr);\n  cb_ = &cb;\n\n  ParseState parse_state = onRead();\n  switch (parse_state) {\n  case ParseState::Error:\n    // As per discussion in https://github.com/envoyproxy/envoy/issues/7864\n    // we don't add new enum in FilterStatus so we have to signal the caller\n    // the new condition.\n    cb.socket().close();\n    return Network::FilterStatus::StopIteration;\n  case ParseState::Done:\n    return Network::FilterStatus::Continue;\n  case ParseState::Continue:\n    // do nothing but create the event\n    file_event_ = socket.ioHandle().createFileEvent(\n        cb.dispatcher(),\n        [this](uint32_t events) {\n          if (events & Event::FileReadyType::Closed) {\n            config_->stats().connection_closed_.inc();\n            done(false);\n            return;\n          }\n\n          ASSERT(events == Event::FileReadyType::Read);\n          ParseState parse_state = onRead();\n          switch (parse_state) {\n          case ParseState::Error:\n            done(false);\n            break;\n          case ParseState::Done:\n            done(true);\n            break;\n          case ParseState::Continue:\n            // do nothing but wait for the next event\n            break;\n          }\n        },\n        Event::PlatformDefaultTriggerType,\n        Event::FileReadyType::Read | Event::FileReadyType::Closed);\n    return Network::FilterStatus::StopIteration;\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid Filter::onALPN(const unsigned char* data, unsigned int len) {\n  CBS wire, list;\n  CBS_init(&wire, reinterpret_cast<const uint8_t*>(data), static_cast<size_t>(len));\n  if (!CBS_get_u16_length_prefixed(&wire, &list) || CBS_len(&wire) != 0 || CBS_len(&list) < 2) {\n    // Don't produce errors, let the real TLS stack do it.\n    return;\n  }\n  CBS name;\n  std::vector<absl::string_view> protocols;\n  while (CBS_len(&list) > 0) {\n    if (!CBS_get_u8_length_prefixed(&list, &name) || CBS_len(&name) == 0) {\n      // Don't produce errors, let the real TLS stack do it.\n      return;\n    }\n    protocols.emplace_back(reinterpret_cast<const char*>(CBS_data(&name)), CBS_len(&name));\n  }\n  cb_->socket().setRequestedApplicationProtocols(protocols);\n  alpn_found_ = true;\n}\n\nvoid Filter::onServername(absl::string_view name) {\n  if (!name.empty()) {\n    config_->stats().sni_found_.inc();\n    cb_->socket().setRequestedServerName(name);\n    ENVOY_LOG(debug, \"tls:onServerName(), requestedServerName: {}\", name);\n  } else {\n    config_->stats().sni_not_found_.inc();\n  }\n  clienthello_success_ = true;\n}\n\nParseState Filter::onRead() {\n  // This receive code is somewhat complicated, because it must be done as a MSG_PEEK because\n  // there is no way for a listener-filter to pass payload data to the ConnectionImpl and filters\n  // that get created later.\n  //\n  // The file_event_ in this class gets events every time new data is available on the socket,\n  // even if previous data has not been read, which is always the case due to MSG_PEEK. When\n  // the TlsInspector completes and passes the socket along, a new FileEvent is created for the\n  // socket, so that new event is immediately signaled as readable because it is new and the socket\n  // is readable, even though no new events have occurred.\n  //\n  // TODO(ggreenway): write an integration test to ensure the events work as expected on all\n  // platforms.\n  const auto result = cb_->socket().ioHandle().recv(buf_, config_->maxClientHelloSize(), MSG_PEEK);\n  ENVOY_LOG(trace, \"tls inspector: recv: {}\", result.rc_);\n\n  if (!result.ok()) {\n    if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) {\n      return ParseState::Continue;\n    }\n    config_->stats().read_error_.inc();\n    return ParseState::Error;\n  }\n\n  // Because we're doing a MSG_PEEK, data we've seen before gets returned every time, so\n  // skip over what we've already processed.\n  if (static_cast<uint64_t>(result.rc_) > read_) {\n    const uint8_t* data = buf_ + read_;\n    const size_t len = result.rc_ - read_;\n    read_ = result.rc_;\n    return parseClientHello(data, len);\n  }\n  return ParseState::Continue;\n}\n\nvoid Filter::done(bool success) {\n  ENVOY_LOG(trace, \"tls inspector: done: {}\", success);\n  file_event_.reset();\n  cb_->continueFilterChain(success);\n}\n\nParseState Filter::parseClientHello(const void* data, size_t len) {\n  // Ownership is passed to ssl_ in SSL_set_bio()\n  bssl::UniquePtr<BIO> bio(BIO_new_mem_buf(data, len));\n\n  // Make the mem-BIO return that there is more data\n  // available beyond it's end\n  BIO_set_mem_eof_return(bio.get(), -1);\n\n  SSL_set_bio(ssl_.get(), bio.get(), bio.get());\n  bio.release();\n\n  int ret = SSL_do_handshake(ssl_.get());\n\n  // This should never succeed because an error is always returned from the SNI callback.\n  ASSERT(ret <= 0);\n  switch (SSL_get_error(ssl_.get(), ret)) {\n  case SSL_ERROR_WANT_READ:\n    if (read_ == config_->maxClientHelloSize()) {\n      // We've hit the specified size limit. This is an unreasonably large ClientHello;\n      // indicate failure.\n      config_->stats().client_hello_too_large_.inc();\n      return ParseState::Error;\n    }\n    return ParseState::Continue;\n  case SSL_ERROR_SSL:\n    if (clienthello_success_) {\n      config_->stats().tls_found_.inc();\n      if (alpn_found_) {\n        config_->stats().alpn_found_.inc();\n      } else {\n        config_->stats().alpn_not_found_.inc();\n      }\n      cb_->socket().setDetectedTransportProtocol(\n          TransportSockets::TransportProtocolNames::get().Tls);\n    } else {\n      config_->stats().tls_not_found_.inc();\n    }\n    return ParseState::Done;\n  default:\n    return ParseState::Error;\n  }\n}\n\n} // namespace TlsInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/tls_inspector/tls_inspector.h",
    "content": "#pragma once\n\n#include \"envoy/event/file_event.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace TlsInspector {\n\n/**\n * All stats for the TLS inspector. @see stats_macros.h\n */\n#define ALL_TLS_INSPECTOR_STATS(COUNTER)                                                           \\\n  COUNTER(connection_closed)                                                                       \\\n  COUNTER(client_hello_too_large)                                                                  \\\n  COUNTER(read_error)                                                                              \\\n  COUNTER(tls_found)                                                                               \\\n  COUNTER(tls_not_found)                                                                           \\\n  COUNTER(alpn_found)                                                                              \\\n  COUNTER(alpn_not_found)                                                                          \\\n  COUNTER(sni_found)                                                                               \\\n  COUNTER(sni_not_found)\n\n/**\n * Definition of all stats for the TLS inspector. @see stats_macros.h\n */\nstruct TlsInspectorStats {\n  ALL_TLS_INSPECTOR_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nenum class ParseState {\n  // Parse result is out. It could be tls or not.\n  Done,\n  // Parser expects more data.\n  Continue,\n  // Parser reports unrecoverable error.\n  Error\n};\n/**\n * Global configuration for TLS inspector.\n */\nclass Config {\npublic:\n  Config(Stats::Scope& scope, uint32_t max_client_hello_size = TLS_MAX_CLIENT_HELLO);\n\n  const TlsInspectorStats& stats() const { return stats_; }\n  bssl::UniquePtr<SSL> newSsl();\n  uint32_t maxClientHelloSize() const { return max_client_hello_size_; }\n\n  static constexpr size_t TLS_MAX_CLIENT_HELLO = 64 * 1024;\n  static const unsigned TLS_MIN_SUPPORTED_VERSION;\n  static const unsigned TLS_MAX_SUPPORTED_VERSION;\n\nprivate:\n  TlsInspectorStats stats_;\n  bssl::UniquePtr<SSL_CTX> ssl_ctx_;\n  const uint32_t max_client_hello_size_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * TLS inspector listener filter.\n */\nclass Filter : public Network::ListenerFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  Filter(const ConfigSharedPtr config);\n\n  // Network::ListenerFilter\n  Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override;\n\nprivate:\n  ParseState parseClientHello(const void* data, size_t len);\n  ParseState onRead();\n  void done(bool success);\n  void onALPN(const unsigned char* data, unsigned int len);\n  void onServername(absl::string_view name);\n\n  ConfigSharedPtr config_;\n  Network::ListenerFilterCallbacks* cb_;\n  Event::FileEventPtr file_event_;\n\n  bssl::UniquePtr<SSL> ssl_;\n  uint64_t read_{0};\n  bool alpn_found_{false};\n  bool clienthello_success_{false};\n\n  static thread_local uint8_t buf_[Config::TLS_MAX_CLIENT_HELLO];\n\n  // Allows callbacks on the SSL_CTX to set fields in this class.\n  friend class Config;\n};\n\n} // namespace TlsInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/listener/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\n\n/**\n * Well-known listener filter names.\n * NOTE: New filters should use the well known name: envoy.filters.listener.name.\n */\nclass ListenerFilterNameValues {\npublic:\n  // HTTP Inspector listener filter\n  const std::string HttpInspector = \"envoy.filters.listener.http_inspector\";\n  // Original destination listener filter\n  const std::string OriginalDst = \"envoy.filters.listener.original_dst\";\n  // Original source listener filter\n  const std::string OriginalSrc = \"envoy.filters.listener.original_src\";\n  // Proxy Protocol listener filter\n  const std::string ProxyProtocol = \"envoy.filters.listener.proxy_protocol\";\n  // TLS Inspector listener filter\n  const std::string TlsInspector = \"envoy.filters.listener.tls_inspector\";\n};\n\nusing ListenerFilterNames = ConstSingleton<ListenerFilterNameValues>;\n\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # Well known names are public.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/config:well_known_names\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/client_ssl_auth/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Client SSL authorization L4 network filter\n# Public docs: docs/root/configuration/network_filters/client_ssl_auth_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"client_ssl_auth\",\n    srcs = [\"client_ssl_auth.cc\"],\n    hdrs = [\"client_ssl_auth.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:rest_api_fetcher_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:cidr_range_lib\",\n        \"//source/common/network:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/client_ssl_auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":client_ssl_auth\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/client_ssl_auth/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc",
    "content": "#include \"extensions/filters/network/client_ssl_auth/client_ssl_auth.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ClientSslAuth {\n\nClientSslAuthConfig::ClientSslAuthConfig(\n    const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config,\n    ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher,\n    Stats::Scope& scope, Random::RandomGenerator& random)\n    : RestApiFetcher(\n          cm, config.auth_api_cluster(), dispatcher, random,\n          std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, refresh_delay, 60000)),\n          std::chrono::milliseconds(1000)),\n      tls_(tls.allocateSlot()), ip_allowlist_(config.ip_white_list()),\n      stats_(generateStats(scope, config.stat_prefix())) {\n\n  if (!cm.get(remote_cluster_name_)) {\n    throw EnvoyException(\n        fmt::format(\"unknown cluster '{}' in client ssl auth config\", remote_cluster_name_));\n  }\n\n  AllowedPrincipalsSharedPtr empty(new AllowedPrincipals());\n  tls_->set(\n      [empty](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return empty; });\n}\n\nClientSslAuthConfigSharedPtr ClientSslAuthConfig::create(\n    const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config,\n    ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher,\n    Stats::Scope& scope, Random::RandomGenerator& random) {\n  ClientSslAuthConfigSharedPtr new_config(\n      new ClientSslAuthConfig(config, tls, cm, dispatcher, scope, random));\n  new_config->initialize();\n  return new_config;\n}\n\nconst AllowedPrincipals& ClientSslAuthConfig::allowedPrincipals() {\n  return tls_->getTyped<AllowedPrincipals>();\n}\n\nGlobalStats ClientSslAuthConfig::generateStats(Stats::Scope& scope, const std::string& prefix) {\n  std::string final_prefix = fmt::format(\"auth.clientssl.{}.\", prefix);\n  GlobalStats stats{ALL_CLIENT_SSL_AUTH_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                              POOL_GAUGE_PREFIX(scope, final_prefix))};\n  return stats;\n}\n\nvoid ClientSslAuthConfig::parseResponse(const Http::ResponseMessage& message) {\n  AllowedPrincipalsSharedPtr new_principals(new AllowedPrincipals());\n  Json::ObjectSharedPtr loader = Json::Factory::loadFromString(message.bodyAsString());\n  for (const Json::ObjectSharedPtr& certificate : loader->getObjectArray(\"certificates\")) {\n    new_principals->add(certificate->getString(\"fingerprint_sha256\"));\n  }\n\n  tls_->set([new_principals](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return new_principals;\n  });\n\n  stats_.update_success_.inc();\n  stats_.total_principals_.set(new_principals->size());\n}\n\nvoid ClientSslAuthConfig::onFetchFailure(Config::ConfigUpdateFailureReason, const EnvoyException*) {\n  stats_.update_failure_.inc();\n}\n\nstatic const std::string Path = \"/v1/certs/list/approved\";\n\nvoid ClientSslAuthConfig::createRequest(Http::RequestMessage& request) {\n  request.headers().setReferenceMethod(Http::Headers::get().MethodValues.Get);\n  request.headers().setPath(Path);\n}\n\nNetwork::FilterStatus ClientSslAuthFilter::onData(Buffer::Instance&, bool) {\n  return Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus ClientSslAuthFilter::onNewConnection() {\n  // If this is not an SSL connection, do no further checking. High layers should redirect, etc.\n  // if SSL is required.\n  if (!read_callbacks_->connection().ssl()) {\n    config_->stats().auth_no_ssl_.inc();\n    return Network::FilterStatus::Continue;\n  } else {\n    // Otherwise we need to wait for handshake to be complete before proceeding.\n    return Network::FilterStatus::StopIteration;\n  }\n}\n\nvoid ClientSslAuthFilter::onEvent(Network::ConnectionEvent event) {\n  if (event != Network::ConnectionEvent::Connected) {\n    return;\n  }\n\n  ASSERT(read_callbacks_->connection().ssl());\n  if (config_->ipAllowlist().contains(*read_callbacks_->connection().remoteAddress())) {\n    config_->stats().auth_ip_allowlist_.inc();\n    read_callbacks_->continueReading();\n    return;\n  }\n\n  if (!config_->allowedPrincipals().allowed(\n          read_callbacks_->connection().ssl()->sha256PeerCertificateDigest())) {\n    config_->stats().auth_digest_no_match_.inc();\n    read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    return;\n  }\n\n  config_->stats().auth_digest_match_.inc();\n  read_callbacks_->continueReading();\n}\n\n} // namespace ClientSslAuth\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/http/rest_api_fetcher.h\"\n#include \"common/network/cidr_range.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ClientSslAuth {\n\n/**\n * All client SSL auth stats. @see stats_macros.h\n */\n#define ALL_CLIENT_SSL_AUTH_STATS(COUNTER, GAUGE)                                                  \\\n  COUNTER(auth_digest_match)                                                                       \\\n  COUNTER(auth_digest_no_match)                                                                    \\\n  COUNTER(auth_ip_allowlist)                                                                       \\\n  COUNTER(auth_no_ssl)                                                                             \\\n  COUNTER(update_failure)                                                                          \\\n  COUNTER(update_success)                                                                          \\\n  GAUGE(total_principals, NeverImport)\n\n/**\n * Struct definition for all client SSL auth stats. @see stats_macros.h\n */\nstruct GlobalStats {\n  ALL_CLIENT_SSL_AUTH_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Wraps the principals currently allowed to authenticate.\n */\nclass AllowedPrincipals : public ThreadLocal::ThreadLocalObject {\npublic:\n  void add(const std::string& sha256_digest) {\n    if (!sha256_digest.empty()) {\n      allowed_sha256_digests_.emplace(sha256_digest);\n    }\n  }\n  bool allowed(const std::string& sha256_digest) const {\n    return allowed_sha256_digests_.count(sha256_digest) != 0;\n  }\n  size_t size() const { return allowed_sha256_digests_.size(); }\n\nprivate:\n  absl::node_hash_set<std::string> allowed_sha256_digests_;\n};\n\nusing AllowedPrincipalsSharedPtr = std::shared_ptr<AllowedPrincipals>;\n\nclass ClientSslAuthConfig;\nusing ClientSslAuthConfigSharedPtr = std::shared_ptr<ClientSslAuthConfig>;\n\n/**\n * Global configuration for client SSL authentication. The config contacts a JSON API to fetch the\n * list of allowed principals, caches it, then makes auth decisions on it and any associated IP\n * allowlist.\n */\nclass ClientSslAuthConfig : public Http::RestApiFetcher {\npublic:\n  static ClientSslAuthConfigSharedPtr\n  create(const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config,\n         ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm,\n         Event::Dispatcher& dispatcher, Stats::Scope& scope, Random::RandomGenerator& random);\n\n  const AllowedPrincipals& allowedPrincipals();\n  const Network::Address::IpList& ipAllowlist() { return ip_allowlist_; }\n  GlobalStats& stats() { return stats_; }\n\nprivate:\n  ClientSslAuthConfig(\n      const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config,\n      ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher,\n      Stats::Scope& scope, Random::RandomGenerator& random);\n\n  static GlobalStats generateStats(Stats::Scope& scope, const std::string& prefix);\n\n  // Http::RestApiFetcher\n  void createRequest(Http::RequestMessage& request) override;\n  void parseResponse(const Http::ResponseMessage& response) override;\n  void onFetchComplete() override {}\n  void onFetchFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override;\n\n  ThreadLocal::SlotPtr tls_;\n  Network::Address::IpList ip_allowlist_;\n  GlobalStats stats_;\n};\n\n/**\n * A client SSL auth filter instance. One per connection.\n */\nclass ClientSslAuthFilter : public Network::ReadFilter, public Network::ConnectionCallbacks {\npublic:\n  ClientSslAuthFilter(ClientSslAuthConfigSharedPtr config) : config_(config) {}\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n    read_callbacks_->connection().addConnectionCallbacks(*this);\n  }\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\nprivate:\n  ClientSslAuthConfigSharedPtr config_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n};\n\n} // namespace ClientSslAuth\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/client_ssl_auth/config.cc",
    "content": "#include \"extensions/filters/network/client_ssl_auth/config.h\"\n\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h\"\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.validate.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/network/client_ssl_auth/client_ssl_auth.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ClientSslAuth {\n\nNetwork::FilterFactoryCb ClientSslAuthConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  ASSERT(!proto_config.auth_api_cluster().empty());\n  ASSERT(!proto_config.stat_prefix().empty());\n\n  ClientSslAuthConfigSharedPtr filter_config(ClientSslAuthConfig::create(\n      proto_config, context.threadLocal(), context.clusterManager(), context.dispatcher(),\n      context.scope(), context.api().randomGenerator()));\n  return [filter_config](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<ClientSslAuthFilter>(filter_config));\n  };\n}\n\n/**\n * Static registration for the client SSL auth filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(ClientSslAuthConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\"envoy.client_ssl_auth\"};\n\n} // namespace ClientSslAuth\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/client_ssl_auth/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h\"\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ClientSslAuth {\n\n/**\n * Config registration for the client SSL auth filter. @see NamedNetworkFilterConfigFactory.\n */\nclass ClientSslAuthConfigFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth> {\npublic:\n  ClientSslAuthConfigFactory() : FactoryBase(NetworkFilterNames::get().ClientSslAuth) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace ClientSslAuth\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    # Used by core.  TODO(#9953) clean up.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    # Used by core.  TODO(#9953) clean up.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:macros\",\n        \"//source/extensions/common:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/common/factory_base.h",
    "content": "#pragma once\n\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\n\n/**\n * Common base class for network filter factory registrations. Removes a substantial amount of\n * boilerplate.\n */\ntemplate <class ConfigProto, class ProtocolOptionsProto = ConfigProto>\nclass FactoryBase : public Server::Configuration::NamedNetworkFilterConfigFactory {\npublic:\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& proto_config,\n                               Server::Configuration::FactoryContext& context) override {\n    return createFilterFactoryFromProtoTyped(MessageUtil::downcastAndValidate<const ConfigProto&>(\n                                                 proto_config, context.messageValidationVisitor()),\n                                             context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  ProtobufTypes::MessagePtr createEmptyProtocolOptionsProto() override {\n    return std::make_unique<ProtocolOptionsProto>();\n  }\n\n  Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(\n      const Protobuf::Message& proto_config,\n      Server::Configuration::ProtocolOptionsFactoryContext& factory_context) override {\n    return createProtocolOptionsTyped(MessageUtil::downcastAndValidate<const ProtocolOptionsProto&>(\n                                          proto_config, factory_context.messageValidationVisitor()),\n                                      factory_context);\n  }\n\n  std::string name() const override { return name_; }\n\n  bool isTerminalFilter() override { return is_terminal_filter_; }\n\nprotected:\n  FactoryBase(const std::string& name, bool is_terminal = false)\n      : name_(name), is_terminal_filter_(is_terminal) {}\n\nprivate:\n  virtual Network::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const ConfigProto& proto_config,\n                                    Server::Configuration::FactoryContext& context) PURE;\n\n  virtual Upstream::ProtocolOptionsConfigConstSharedPtr\n  createProtocolOptionsTyped(const ProtocolOptionsProto&,\n                             Server::Configuration::ProtocolOptionsFactoryContext&) {\n    ExceptionUtil::throwEnvoyException(\n        fmt::format(\"filter {} does not support protocol options\", name_));\n  }\n\n  const std::string name_;\n  const bool is_terminal_filter_;\n};\n\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"codec_interface\",\n    hdrs = [\"codec.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_interface\",\n    hdrs = [\"utility.h\"],\n    deps = [\":codec_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"codec_lib\",\n    srcs = [\"codec_impl.cc\"],\n    hdrs = [\"codec_impl.h\"],\n    deps = [\n        \":codec_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"supported_commands_lib\",\n    hdrs = [\"supported_commands.h\"],\n    deps = [\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"client_interface\",\n    hdrs = [\"client.h\"],\n    deps = [\n        \":codec_lib\",\n        \":redis_command_stats_lib\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"client_lib\",\n    srcs = [\"client_impl.cc\"],\n    hdrs = [\"client_impl.h\"],\n    deps = [\n        \":client_interface\",\n        \":codec_lib\",\n        \":utility_lib\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    deps = [\n        \":codec_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"redis_command_stats_lib\",\n    srcs = [\"redis_command_stats.cc\"],\n    hdrs = [\"redis_command_stats.h\"],\n    deps = [\n        \":codec_interface\",\n        \":supported_commands_lib\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"fault_interface\",\n    hdrs = [\"fault.h\"],\n    deps = [\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"fault_lib\",\n    srcs = [\"fault_impl.cc\"],\n    hdrs = [\"fault_impl.h\"],\n    deps = [\n        \":codec_lib\",\n        \":fault_interface\",\n        \"//include/envoy/common:random_generator_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/client.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"extensions/filters/network/common/redis/codec_impl.h\"\n#include \"extensions/filters/network/common/redis/redis_command_stats.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\nnamespace Client {\n\n/**\n * A handle to an outbound request.\n */\nclass PoolRequest {\npublic:\n  virtual ~PoolRequest() = default;\n\n  /**\n   * Cancel the request. No further request callbacks will be called.\n   */\n  virtual void cancel() PURE;\n};\n\n/**\n * Outbound request callbacks.\n */\nclass ClientCallbacks {\npublic:\n  virtual ~ClientCallbacks() = default;\n\n  /**\n   * Called when a pipelined response is received.\n   * @param value supplies the response which is now owned by the callee.\n   */\n  virtual void onResponse(RespValuePtr&& value) PURE;\n\n  /**\n   * Called when a network/protocol error occurs and there is no response.\n   */\n  virtual void onFailure() PURE;\n\n  /**\n   * Called when a MOVED or ASK redirection error is received, and the request must be retried.\n   * @param value supplies the MOVED error response\n   * @param host_address supplies the redirection host address and port\n   * @param ask_redirection indicates if this is a ASK redirection\n   * @return bool true if the request is successfully redirected, false otherwise\n   */\n  virtual bool onRedirection(RespValuePtr&& value, const std::string& host_address,\n                             bool ask_redirection) PURE;\n};\n\n/**\n * DoNothingPoolCallbacks is used for internally generated commands whose response is\n * transparently filtered, and redirection never occurs (e.g., \"asking\", \"auth\", etc.).\n */\nclass DoNothingPoolCallbacks : public ClientCallbacks {\npublic:\n  // ClientCallbacks\n  void onResponse(Common::Redis::RespValuePtr&&) override {}\n  void onFailure() override {}\n  bool onRedirection(Common::Redis::RespValuePtr&&, const std::string&, bool) override {\n    return false;\n  }\n};\n\n/**\n * A single redis client connection.\n */\nclass Client : public Event::DeferredDeletable {\npublic:\n  ~Client() override = default;\n\n  /**\n   * Adds network connection callbacks to the underlying network connection.\n   */\n  virtual void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) PURE;\n\n  /**\n   * Called to determine if the client has pending requests.\n   * @return bool true if the client is processing requests or false if it is currently idle.\n   */\n  virtual bool active() PURE;\n\n  /**\n   * Closes the underlying network connection.\n   */\n  virtual void close() PURE;\n\n  /**\n   * Make a pipelined request to the remote redis server.\n   * @param request supplies the RESP request to make.\n   * @param callbacks supplies the request callbacks.\n   * @return PoolRequest* a handle to the active request or nullptr if the request could not be made\n   *         for some reason.\n   */\n  virtual PoolRequest* makeRequest(const RespValue& request, ClientCallbacks& callbacks) PURE;\n\n  /**\n   * Initialize the connection. Issue the auth command and readonly command as needed.\n   * @param auth password for upstream host.\n   */\n  virtual void initialize(const std::string& auth_username, const std::string& auth_password) PURE;\n};\n\nusing ClientPtr = std::unique_ptr<Client>;\n\n/**\n * Read policy to use for Redis cluster.\n */\nenum class ReadPolicy { Primary, PreferPrimary, Replica, PreferReplica, Any };\n\n/**\n * Configuration for a redis connection pool.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  /**\n   * @return std::chrono::milliseconds the timeout for an individual redis operation. Currently,\n   *         all operations use the same timeout.\n   */\n  virtual std::chrono::milliseconds opTimeout() const PURE;\n\n  /**\n   * @return bool disable outlier events even if the cluster has it enabled. This is used by the\n   * healthchecker's connection pool to avoid double counting active healthcheck operations as\n   * passive healthcheck operations.\n   */\n  virtual bool disableOutlierEvents() const PURE;\n\n  /**\n   * @return when enabled, a hash tagging function will be used to guarantee that keys with the\n   * same hash tag will be forwarded to the same upstream.\n   */\n  virtual bool enableHashtagging() const PURE;\n\n  /**\n   * @return when enabled, moved/ask redirection errors from upstream redis servers will be\n   * processed.\n   */\n  virtual bool enableRedirection() const PURE;\n\n  /**\n   * @return buffer size for batching commands for a single upstream host.\n   */\n  virtual uint32_t maxBufferSizeBeforeFlush() const PURE;\n\n  /**\n   * @return timeout for batching commands for a single upstream host.\n   */\n  virtual std::chrono::milliseconds bufferFlushTimeoutInMs() const PURE;\n\n  /**\n   * @return the maximum number of upstream connections to unknown hosts when enableRedirection() is\n   * true.\n   *\n   * This value acts as an upper bound on the number of servers in a cluster if only a subset\n   * of the cluster's servers are known via configuration (cluster size - number of servers in\n   * cluster known to cluster manager <= maxUpstreamUnknownConnections() for proper operation).\n   * Redirection errors are processed if enableRedirection() is true, and a new upstream connection\n   * to a previously unknown server will be made as a result of redirection if the number of unknown\n   * server connections is currently less than maxUpstreamUnknownConnections(). If a connection\n   * cannot be made, then the original redirection error will be passed though unchanged to the\n   * downstream client. If a cluster is using the Redis cluster protocol (RedisCluster), then the\n   * cluster logic will periodically discover all of the servers in the cluster; this should\n   * minimize the need for a large maxUpstreamUnknownConnections() value.\n   */\n  virtual uint32_t maxUpstreamUnknownConnections() const PURE;\n\n  /**\n   * @return when enabled, upstream cluster per-command statistics will be recorded.\n   */\n  virtual bool enableCommandStats() const PURE;\n\n  /**\n   * @return the read policy the proxy should use.\n   */\n  virtual ReadPolicy readPolicy() const PURE;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * A factory for individual redis client connections.\n */\nclass ClientFactory {\npublic:\n  virtual ~ClientFactory() = default;\n\n  /**\n   * Create a client given an upstream host.\n   * @param host supplies the upstream host.\n   * @param dispatcher supplies the owning thread's dispatcher.\n   * @param config supplies the connection pool configuration.\n   * @param redis_command_stats supplies the redis command stats.\n   * @param scope supplies the stats scope.\n   * @param auth password for upstream host.\n   * @return ClientPtr a new connection pool client.\n   */\n  virtual ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher,\n                           const Config& config,\n                           const RedisCommandStatsSharedPtr& redis_command_stats,\n                           Stats::Scope& scope, const std::string& auth_username,\n                           const std::string& auth_password) PURE;\n};\n\n} // namespace Client\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/client_impl.cc",
    "content": "#include \"extensions/filters/network/common/redis/client_impl.h\"\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\nnamespace Client {\nnamespace {\n// null_pool_callbacks is used for requests that must be filtered and not redirected such as\n// \"asking\".\nCommon::Redis::Client::DoNothingPoolCallbacks null_pool_callbacks;\n} // namespace\n\nConfigImpl::ConfigImpl(\n    const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings&\n        config)\n    : op_timeout_(PROTOBUF_GET_MS_REQUIRED(config, op_timeout)),\n      enable_hashtagging_(config.enable_hashtagging()),\n      enable_redirection_(config.enable_redirection()),\n      max_buffer_size_before_flush_(\n          config.max_buffer_size_before_flush()), // This is a scalar, so default is zero.\n      buffer_flush_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(\n          config, buffer_flush_timeout,\n          3)), // Default timeout is 3ms. If max_buffer_size_before_flush is zero, this is not used\n               // as the buffer is flushed on each request immediately.\n      max_upstream_unknown_connections_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_upstream_unknown_connections, 100)),\n      enable_command_stats_(config.enable_command_stats()) {\n  switch (config.read_policy()) {\n  case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::MASTER:\n    read_policy_ = ReadPolicy::Primary;\n    break;\n  case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::\n      PREFER_MASTER:\n    read_policy_ = ReadPolicy::PreferPrimary;\n    break;\n  case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::REPLICA:\n    read_policy_ = ReadPolicy::Replica;\n    break;\n  case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::\n      PREFER_REPLICA:\n    read_policy_ = ReadPolicy::PreferReplica;\n    break;\n  case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ANY:\n    read_policy_ = ReadPolicy::Any;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n    break;\n  }\n}\n\nClientPtr ClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher,\n                             EncoderPtr&& encoder, DecoderFactory& decoder_factory,\n                             const Config& config,\n                             const RedisCommandStatsSharedPtr& redis_command_stats,\n                             Stats::Scope& scope) {\n  auto client = std::make_unique<ClientImpl>(host, dispatcher, std::move(encoder), decoder_factory,\n                                             config, redis_command_stats, scope);\n  client->connection_ = host->createConnection(dispatcher, nullptr, nullptr).connection_;\n  client->connection_->addConnectionCallbacks(*client);\n  client->connection_->addReadFilter(Network::ReadFilterSharedPtr{new UpstreamReadFilter(*client)});\n  client->connection_->connect();\n  client->connection_->noDelay(true);\n  return client;\n}\n\nClientImpl::ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher,\n                       EncoderPtr&& encoder, DecoderFactory& decoder_factory, const Config& config,\n                       const RedisCommandStatsSharedPtr& redis_command_stats, Stats::Scope& scope)\n    : host_(host), encoder_(std::move(encoder)), decoder_(decoder_factory.create(*this)),\n      config_(config),\n      connect_or_op_timer_(dispatcher.createTimer([this]() { onConnectOrOpTimeout(); })),\n      flush_timer_(dispatcher.createTimer([this]() { flushBufferAndResetTimer(); })),\n      time_source_(dispatcher.timeSource()), redis_command_stats_(redis_command_stats),\n      scope_(scope) {\n  host->cluster().stats().upstream_cx_total_.inc();\n  host->stats().cx_total_.inc();\n  host->cluster().stats().upstream_cx_active_.inc();\n  host->stats().cx_active_.inc();\n  connect_or_op_timer_->enableTimer(host->cluster().connectTimeout());\n}\n\nClientImpl::~ClientImpl() {\n  ASSERT(pending_requests_.empty());\n  ASSERT(connection_->state() == Network::Connection::State::Closed);\n  host_->cluster().stats().upstream_cx_active_.dec();\n  host_->stats().cx_active_.dec();\n}\n\nvoid ClientImpl::close() { connection_->close(Network::ConnectionCloseType::NoFlush); }\n\nvoid ClientImpl::flushBufferAndResetTimer() {\n  if (flush_timer_->enabled()) {\n    flush_timer_->disableTimer();\n  }\n  connection_->write(encoder_buffer_, false);\n}\n\nPoolRequest* ClientImpl::makeRequest(const RespValue& request, ClientCallbacks& callbacks) {\n  ASSERT(connection_->state() == Network::Connection::State::Open);\n\n  const bool empty_buffer = encoder_buffer_.length() == 0;\n\n  Stats::StatName command;\n  if (config_.enableCommandStats()) {\n    // Only lowercase command and get StatName if we enable command stats\n    command = redis_command_stats_->getCommandFromRequest(request);\n    redis_command_stats_->updateStatsTotal(scope_, command);\n  } else {\n    // If disabled, we use a placeholder stat name \"unused\" that is not used\n    command = redis_command_stats_->getUnusedStatName();\n  }\n\n  pending_requests_.emplace_back(*this, callbacks, command);\n  encoder_->encode(request, encoder_buffer_);\n\n  // If buffer is full, flush. If the buffer was empty before the request, start the timer.\n  if (encoder_buffer_.length() >= config_.maxBufferSizeBeforeFlush()) {\n    flushBufferAndResetTimer();\n  } else if (empty_buffer) {\n    flush_timer_->enableTimer(std::chrono::milliseconds(config_.bufferFlushTimeoutInMs()));\n  }\n\n  // Only boost the op timeout if:\n  // - We are not already connected. Otherwise, we are governed by the connect timeout and the timer\n  //   will be reset when/if connection occurs. This allows a relatively long connection spin up\n  //   time for example if TLS is being used.\n  // - This is the first request on the pipeline. Otherwise the timeout would effectively start on\n  //   the last operation.\n  if (connected_ && pending_requests_.size() == 1) {\n    connect_or_op_timer_->enableTimer(config_.opTimeout());\n  }\n\n  return &pending_requests_.back();\n}\n\nvoid ClientImpl::onConnectOrOpTimeout() {\n  putOutlierEvent(Upstream::Outlier::Result::LocalOriginTimeout);\n  if (connected_) {\n    host_->cluster().stats().upstream_rq_timeout_.inc();\n    host_->stats().rq_timeout_.inc();\n  } else {\n    host_->cluster().stats().upstream_cx_connect_timeout_.inc();\n    host_->stats().cx_connect_fail_.inc();\n  }\n\n  connection_->close(Network::ConnectionCloseType::NoFlush);\n}\n\nvoid ClientImpl::onData(Buffer::Instance& data) {\n  try {\n    decoder_->decode(data);\n  } catch (ProtocolError&) {\n    putOutlierEvent(Upstream::Outlier::Result::ExtOriginRequestFailed);\n    host_->cluster().stats().upstream_cx_protocol_error_.inc();\n    host_->stats().rq_error_.inc();\n    connection_->close(Network::ConnectionCloseType::NoFlush);\n  }\n}\n\nvoid ClientImpl::putOutlierEvent(Upstream::Outlier::Result result) {\n  if (!config_.disableOutlierEvents()) {\n    host_->outlierDetector().putResult(result);\n  }\n}\n\nvoid ClientImpl::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n\n    Upstream::reportUpstreamCxDestroy(host_, event);\n    if (!pending_requests_.empty()) {\n      Upstream::reportUpstreamCxDestroyActiveRequest(host_, event);\n      if (event == Network::ConnectionEvent::RemoteClose) {\n        putOutlierEvent(Upstream::Outlier::Result::LocalOriginConnectFailed);\n      }\n    }\n\n    while (!pending_requests_.empty()) {\n      PendingRequest& request = pending_requests_.front();\n      if (!request.canceled_) {\n        request.callbacks_.onFailure();\n      } else {\n        host_->cluster().stats().upstream_rq_cancelled_.inc();\n      }\n      pending_requests_.pop_front();\n    }\n\n    connect_or_op_timer_->disableTimer();\n  } else if (event == Network::ConnectionEvent::Connected) {\n    connected_ = true;\n    ASSERT(!pending_requests_.empty());\n    connect_or_op_timer_->enableTimer(config_.opTimeout());\n  }\n\n  if (event == Network::ConnectionEvent::RemoteClose && !connected_) {\n    host_->cluster().stats().upstream_cx_connect_fail_.inc();\n    host_->stats().cx_connect_fail_.inc();\n  }\n}\n\nvoid ClientImpl::onRespValue(RespValuePtr&& value) {\n  ASSERT(!pending_requests_.empty());\n  PendingRequest& request = pending_requests_.front();\n  const bool canceled = request.canceled_;\n\n  if (config_.enableCommandStats()) {\n    bool success = !canceled && (value->type() != Common::Redis::RespType::Error);\n    redis_command_stats_->updateStats(scope_, request.command_, success);\n    request.command_request_timer_->complete();\n  }\n  request.aggregate_request_timer_->complete();\n\n  ClientCallbacks& callbacks = request.callbacks_;\n\n  // We need to ensure the request is popped before calling the callback, since the callback might\n  // result in closing the connection.\n  pending_requests_.pop_front();\n  if (canceled) {\n    host_->cluster().stats().upstream_rq_cancelled_.inc();\n  } else if (config_.enableRedirection() && (value->type() == Common::Redis::RespType::Error)) {\n    std::vector<absl::string_view> err = StringUtil::splitToken(value->asString(), \" \", false);\n    bool redirected = false;\n    if (err.size() == 3) {\n      // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash\n      // key slot (err[1]), and IP address and TCP port separated by a colon (err[2])\n      if (err[0] == RedirectionResponse::get().MOVED || err[0] == RedirectionResponse::get().ASK) {\n        redirected = true;\n        bool redirect_succeeded = callbacks.onRedirection(std::move(value), std::string(err[2]),\n                                                          err[0] == RedirectionResponse::get().ASK);\n        if (redirect_succeeded) {\n          host_->cluster().stats().upstream_internal_redirect_succeeded_total_.inc();\n        } else {\n          host_->cluster().stats().upstream_internal_redirect_failed_total_.inc();\n        }\n      }\n    }\n    if (!redirected) {\n      if (err[0] == RedirectionResponse::get().CLUSTER_DOWN) {\n        callbacks.onFailure();\n      } else {\n        callbacks.onResponse(std::move(value));\n      }\n    }\n  } else {\n    callbacks.onResponse(std::move(value));\n  }\n\n  // If there are no remaining ops in the pipeline we need to disable the timer.\n  // Otherwise we boost the timer since we are receiving responses and there are more to flush\n  // out.\n  if (pending_requests_.empty()) {\n    connect_or_op_timer_->disableTimer();\n  } else {\n    connect_or_op_timer_->enableTimer(config_.opTimeout());\n  }\n\n  putOutlierEvent(Upstream::Outlier::Result::ExtOriginRequestSuccess);\n}\n\nClientImpl::PendingRequest::PendingRequest(ClientImpl& parent, ClientCallbacks& callbacks,\n                                           Stats::StatName command)\n    : parent_(parent), callbacks_(callbacks), command_{command},\n      aggregate_request_timer_(parent_.redis_command_stats_->createAggregateTimer(\n          parent_.scope_, parent_.time_source_)) {\n  if (parent_.config_.enableCommandStats()) {\n    command_request_timer_ = parent_.redis_command_stats_->createCommandTimer(\n        parent_.scope_, command_, parent_.time_source_);\n  }\n  parent.host_->cluster().stats().upstream_rq_total_.inc();\n  parent.host_->stats().rq_total_.inc();\n  parent.host_->cluster().stats().upstream_rq_active_.inc();\n  parent.host_->stats().rq_active_.inc();\n}\n\nClientImpl::PendingRequest::~PendingRequest() {\n  parent_.host_->cluster().stats().upstream_rq_active_.dec();\n  parent_.host_->stats().rq_active_.dec();\n}\n\nvoid ClientImpl::PendingRequest::cancel() {\n  // If we get a cancellation, we just mark the pending request as cancelled, and then we drop\n  // the response as it comes through. There is no reason to blow away the connection when the\n  // remote is already responding as fast as possible.\n  canceled_ = true;\n}\n\nvoid ClientImpl::initialize(const std::string& auth_username, const std::string& auth_password) {\n  if (!auth_username.empty()) {\n    // Send an AUTH command to the upstream server with username and password.\n    Utility::AuthRequest auth_request(auth_username, auth_password);\n    makeRequest(auth_request, null_pool_callbacks);\n  } else if (!auth_password.empty()) {\n    // Send an AUTH command to the upstream server.\n    Utility::AuthRequest auth_request(auth_password);\n    makeRequest(auth_request, null_pool_callbacks);\n  }\n  // Any connection to replica requires the READONLY command in order to perform read.\n  // Also the READONLY command is a no-opt for the primary.\n  // We only need to send the READONLY command iff it's possible that the host is a replica.\n  if (config_.readPolicy() != Common::Redis::Client::ReadPolicy::Primary) {\n    makeRequest(Utility::ReadOnlyRequest::instance(), null_pool_callbacks);\n  }\n}\n\nClientFactoryImpl ClientFactoryImpl::instance_;\n\nClientPtr ClientFactoryImpl::create(Upstream::HostConstSharedPtr host,\n                                    Event::Dispatcher& dispatcher, const Config& config,\n                                    const RedisCommandStatsSharedPtr& redis_command_stats,\n                                    Stats::Scope& scope, const std::string& auth_username,\n                                    const std::string& auth_password) {\n  ClientPtr client = ClientImpl::create(host, dispatcher, EncoderPtr{new EncoderImpl()},\n                                        decoder_factory_, config, redis_command_stats, scope);\n  client->initialize(auth_username, auth_password);\n  return client;\n}\n\n} // namespace Client\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/client_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/stats/timespan.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/hash.h\"\n#include \"common/network/filter_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/filters/network/common/redis/client.h\"\n#include \"extensions/filters/network/common/redis/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\nnamespace Client {\n\n// TODO(mattklein123): Circuit breaking\n// TODO(rshriram): Fault injection\n\nstruct RedirectionValues {\n  const std::string ASK = \"ASK\";\n  const std::string MOVED = \"MOVED\";\n  const std::string CLUSTER_DOWN = \"CLUSTERDOWN\";\n};\n\nusing RedirectionResponse = ConstSingleton<RedirectionValues>;\n\nclass ConfigImpl : public Config {\npublic:\n  ConfigImpl(\n      const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings&\n          config);\n\n  bool disableOutlierEvents() const override { return false; }\n  std::chrono::milliseconds opTimeout() const override { return op_timeout_; }\n  bool enableHashtagging() const override { return enable_hashtagging_; }\n  bool enableRedirection() const override { return enable_redirection_; }\n  uint32_t maxBufferSizeBeforeFlush() const override { return max_buffer_size_before_flush_; }\n  std::chrono::milliseconds bufferFlushTimeoutInMs() const override {\n    return buffer_flush_timeout_;\n  }\n  uint32_t maxUpstreamUnknownConnections() const override {\n    return max_upstream_unknown_connections_;\n  }\n  bool enableCommandStats() const override { return enable_command_stats_; }\n  ReadPolicy readPolicy() const override { return read_policy_; }\n\nprivate:\n  const std::chrono::milliseconds op_timeout_;\n  const bool enable_hashtagging_;\n  const bool enable_redirection_;\n  const uint32_t max_buffer_size_before_flush_;\n  const std::chrono::milliseconds buffer_flush_timeout_;\n  const uint32_t max_upstream_unknown_connections_;\n  const bool enable_command_stats_;\n  ReadPolicy read_policy_;\n};\n\nclass ClientImpl : public Client, public DecoderCallbacks, public Network::ConnectionCallbacks {\npublic:\n  static ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher,\n                          EncoderPtr&& encoder, DecoderFactory& decoder_factory,\n                          const Config& config,\n                          const RedisCommandStatsSharedPtr& redis_command_stats,\n                          Stats::Scope& scope);\n\n  ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, EncoderPtr&& encoder,\n             DecoderFactory& decoder_factory, const Config& config,\n             const RedisCommandStatsSharedPtr& redis_command_stats, Stats::Scope& scope);\n  ~ClientImpl() override;\n\n  // Client\n  void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) override {\n    connection_->addConnectionCallbacks(callbacks);\n  }\n  void close() override;\n  PoolRequest* makeRequest(const RespValue& request, ClientCallbacks& callbacks) override;\n  bool active() override { return !pending_requests_.empty(); }\n  void flushBufferAndResetTimer();\n  void initialize(const std::string& auth_username, const std::string& auth_password) override;\n\nprivate:\n  friend class RedisClientImplTest;\n\n  struct UpstreamReadFilter : public Network::ReadFilterBaseImpl {\n    UpstreamReadFilter(ClientImpl& parent) : parent_(parent) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n      parent_.onData(data);\n      return Network::FilterStatus::Continue;\n    }\n\n    ClientImpl& parent_;\n  };\n\n  struct PendingRequest : public PoolRequest {\n    PendingRequest(ClientImpl& parent, ClientCallbacks& callbacks, Stats::StatName stat_name);\n    ~PendingRequest() override;\n\n    // PoolRequest\n    void cancel() override;\n\n    ClientImpl& parent_;\n    ClientCallbacks& callbacks_;\n    Stats::StatName command_;\n    bool canceled_{};\n    Stats::TimespanPtr aggregate_request_timer_;\n    Stats::TimespanPtr command_request_timer_;\n  };\n\n  void onConnectOrOpTimeout();\n  void onData(Buffer::Instance& data);\n  void putOutlierEvent(Upstream::Outlier::Result result);\n\n  // DecoderCallbacks\n  void onRespValue(RespValuePtr&& value) override;\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  Upstream::HostConstSharedPtr host_;\n  Network::ClientConnectionPtr connection_;\n  EncoderPtr encoder_;\n  Buffer::OwnedImpl encoder_buffer_;\n  DecoderPtr decoder_;\n  const Config& config_;\n  std::list<PendingRequest> pending_requests_;\n  Event::TimerPtr connect_or_op_timer_;\n  bool connected_{};\n  Event::TimerPtr flush_timer_;\n  Envoy::TimeSource& time_source_;\n  const RedisCommandStatsSharedPtr redis_command_stats_;\n  Stats::Scope& scope_;\n};\n\nclass ClientFactoryImpl : public ClientFactory {\npublic:\n  // RedisProxy::ConnPool::ClientFactoryImpl\n  ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher,\n                   const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats,\n                   Stats::Scope& scope, const std::string& auth_username,\n                   const std::string& auth_password) override;\n\n  static ClientFactoryImpl instance_;\n\nprivate:\n  DecoderFactoryImpl decoder_factory_;\n};\n\n} // namespace Client\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/codec.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\n/**\n * All RESP types as defined here: https://redis.io/topics/protocol with the exception of\n * CompositeArray. CompositeArray is an internal type that behaves like an Array type. Its first\n * element is a SimpleString or BulkString and the rest of the elements are portion of another\n * Array. This is created for performance.\n */\nenum class RespType { Null, SimpleString, BulkString, Integer, Error, Array, CompositeArray };\n\n/**\n * A variant implementation of a RESP value optimized for performance. A C++11 union is used for\n * the underlying type so that no unnecessary allocations/constructions are needed.\n */\nclass RespValue {\npublic:\n  RespValue() : type_(RespType::Null) {}\n\n  RespValue(std::shared_ptr<RespValue> base_array, const RespValue& command, const uint64_t start,\n            const uint64_t end)\n      : type_(RespType::CompositeArray) {\n    new (&composite_array_) CompositeArray(std::move(base_array), command, start, end);\n  }\n  virtual ~RespValue() { cleanup(); }\n\n  RespValue(const RespValue& other);                // copy constructor\n  RespValue(RespValue&& other) noexcept;            // move constructor\n  RespValue& operator=(const RespValue& other);     // copy assignment\n  RespValue& operator=(RespValue&& other) noexcept; // move assignment\n  bool operator==(const RespValue& other) const;    // test for equality, unit tests\n  bool operator!=(const RespValue& other) const { return !(*this == other); }\n\n  /**\n   * Convert a RESP value to a string for debugging purposes.\n   */\n  std::string toString() const;\n\n  /**\n   * Holds the data for CompositeArray RespType\n   */\n  class CompositeArray {\n  public:\n    CompositeArray() = default;\n    CompositeArray(std::shared_ptr<RespValue> base_array, const RespValue& command,\n                   const uint64_t start, const uint64_t end)\n        : base_array_(std::move(base_array)), command_(&command), start_(start), end_(end) {\n      ASSERT(command.type() == RespType::BulkString || command.type() == RespType::SimpleString);\n      ASSERT(base_array_ != nullptr);\n      ASSERT(base_array_->type() == RespType::Array);\n      ASSERT(start <= end);\n      ASSERT(end < base_array_->asArray().size());\n    }\n\n    const RespValue* command() const { return command_; }\n    const std::shared_ptr<RespValue>& baseArray() const { return base_array_; }\n\n    bool operator==(const CompositeArray& other) const;\n\n    uint64_t size() const;\n\n    /**\n     * Forward const iterator for CompositeArray.\n     * @note this implementation currently supports the minimum functionality needed to support\n     *       the `for (const RespValue& value : array)` idiom.\n     */\n    struct CompositeArrayConstIterator {\n      CompositeArrayConstIterator(const RespValue* command, const std::vector<RespValue>& array,\n                                  uint64_t index, bool first)\n          : command_(command), array_(array), index_(index), first_(first) {}\n      const RespValue& operator*();\n      CompositeArrayConstIterator& operator++();\n      bool operator!=(const CompositeArrayConstIterator& rhs) const;\n      static const CompositeArrayConstIterator& empty();\n\n      const RespValue* command_;\n      const std::vector<RespValue>& array_;\n      uint64_t index_;\n      bool first_;\n    };\n\n    CompositeArrayConstIterator begin() const noexcept {\n      return (command_ && base_array_)\n                 ? CompositeArrayConstIterator{command_, base_array_->asArray(), start_, true}\n                 : CompositeArrayConstIterator::empty();\n    }\n\n    CompositeArrayConstIterator end() const noexcept {\n      return (command_ && base_array_)\n                 ? CompositeArrayConstIterator{command_, base_array_->asArray(), end_ + 1, false}\n                 : CompositeArrayConstIterator::empty();\n    }\n\n  private:\n    std::shared_ptr<RespValue> base_array_;\n    const RespValue* command_;\n    uint64_t start_;\n    uint64_t end_;\n  };\n\n  /**\n   * The following are getters and setters for the internal value. A RespValue starts as null,\n   * and must change type via type() before the following methods can be used.\n   */\n  std::vector<RespValue>& asArray();\n  const std::vector<RespValue>& asArray() const;\n  std::string& asString();\n  const std::string& asString() const;\n  int64_t& asInteger();\n  int64_t asInteger() const;\n  CompositeArray& asCompositeArray();\n  const CompositeArray& asCompositeArray() const;\n\n  /**\n   * Get/set the type of the RespValue. A RespValue can only be a single type at a time. Each time\n   * type() is called the type is changed and then the type specific as* methods can be used.\n   */\n  RespType type() const { return type_; }\n  void type(RespType type);\n\nprivate:\n  union {\n    std::vector<RespValue> array_;\n    std::string string_;\n    int64_t integer_;\n    CompositeArray composite_array_;\n  };\n\n  void cleanup();\n\n  RespType type_{};\n};\n\nusing RespValuePtr = std::unique_ptr<RespValue>;\nusing RespValueSharedPtr = std::shared_ptr<RespValue>;\nusing RespValueConstSharedPtr = std::shared_ptr<const RespValue>;\n\n/**\n * Callbacks that the decoder fires.\n */\nclass DecoderCallbacks {\npublic:\n  virtual ~DecoderCallbacks() = default;\n\n  /**\n   * Called when a new top level RESP value has been decoded. This value may include multiple\n   * sub-values in the case of arrays or nested arrays.\n   * @param value supplies the decoded value that is now owned by the callee.\n   */\n  virtual void onRespValue(RespValuePtr&& value) PURE;\n};\n\n/**\n * A redis byte decoder for https://redis.io/topics/protocol\n */\nclass Decoder {\npublic:\n  virtual ~Decoder() = default;\n\n  /**\n   * Decode redis protocol bytes.\n   * @param data supplies the data to decode. All bytes will be consumed by the decoder or a\n   *        ProtocolError will be thrown.\n   */\n  virtual void decode(Buffer::Instance& data) PURE;\n};\n\nusing DecoderPtr = std::unique_ptr<Decoder>;\n\n/**\n * A factory for a redis decoder.\n */\nclass DecoderFactory {\npublic:\n  virtual ~DecoderFactory() = default;\n\n  /**\n   * Create a decoder given a set of decoder callbacks.\n   */\n  virtual DecoderPtr create(DecoderCallbacks& callbacks) PURE;\n};\n\n/**\n * A redis byte encoder for https://redis.io/topics/protocol\n */\nclass Encoder {\npublic:\n  virtual ~Encoder() = default;\n\n  /**\n   * Encode a RESP value to a buffer.\n   * @param value supplies the value to encode.\n   * @param out supplies the buffer to encode to.\n   */\n  virtual void encode(const RespValue& value, Buffer::Instance& out) PURE;\n};\n\nusing EncoderPtr = std::unique_ptr<Encoder>;\n\n/**\n * A redis protocol error.\n */\nclass ProtocolError : public EnvoyException {\npublic:\n  ProtocolError(const std::string& error) : EnvoyException(error) {}\n};\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/codec_impl.cc",
    "content": "#include \"extensions/filters/network/common/redis/codec_impl.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nstd::string RespValue::toString() const {\n  switch (type_) {\n  case RespType::Array: {\n    std::string ret = \"[\";\n    for (uint64_t i = 0; i < asArray().size(); i++) {\n      ret += asArray()[i].toString();\n      if (i != asArray().size() - 1) {\n        ret += \", \";\n      }\n    }\n    return ret + \"]\";\n  }\n  case RespType::CompositeArray: {\n    std::string ret = \"[\";\n    uint64_t i = 0;\n    for (const RespValue& value : asCompositeArray()) {\n      ret += value.toString();\n      if (++i != asCompositeArray().size()) {\n        ret += \", \";\n      }\n    }\n    return ret + \"]\";\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error:\n    return fmt::format(\"\\\"{}\\\"\", asString());\n  case RespType::Null:\n    return \"null\";\n  case RespType::Integer:\n    return std::to_string(asInteger());\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nstd::vector<RespValue>& RespValue::asArray() {\n  ASSERT(type_ == RespType::Array);\n  return array_;\n}\n\nconst std::vector<RespValue>& RespValue::asArray() const {\n  ASSERT(type_ == RespType::Array);\n  return array_;\n}\n\nstd::string& RespValue::asString() {\n  ASSERT(type_ == RespType::BulkString || type_ == RespType::Error ||\n         type_ == RespType::SimpleString);\n  return string_;\n}\n\nconst std::string& RespValue::asString() const {\n  ASSERT(type_ == RespType::BulkString || type_ == RespType::Error ||\n         type_ == RespType::SimpleString);\n  return string_;\n}\n\nint64_t& RespValue::asInteger() {\n  ASSERT(type_ == RespType::Integer);\n  return integer_;\n}\n\nint64_t RespValue::asInteger() const {\n  ASSERT(type_ == RespType::Integer);\n  return integer_;\n}\n\nRespValue::CompositeArray& RespValue::asCompositeArray() {\n  ASSERT(type_ == RespType::CompositeArray);\n  return composite_array_;\n}\n\nconst RespValue::CompositeArray& RespValue::asCompositeArray() const {\n  ASSERT(type_ == RespType::CompositeArray);\n  return composite_array_;\n}\n\nvoid RespValue::cleanup() {\n  // Need to manually delete because of the union.\n  switch (type_) {\n  case RespType::Array: {\n    array_.~vector<RespValue>();\n    break;\n  }\n  case RespType::CompositeArray: {\n    composite_array_.~CompositeArray();\n    break;\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error: {\n    string_.~basic_string<char>();\n    break;\n  }\n  case RespType::Null:\n  case RespType::Integer: {\n    break;\n  }\n  }\n}\n\nvoid RespValue::type(RespType type) {\n  cleanup();\n\n  // Need to use placement new because of the union.\n  type_ = type;\n  switch (type) {\n  case RespType::Array: {\n    new (&array_) std::vector<RespValue>();\n    break;\n  }\n  case RespType::CompositeArray: {\n    new (&composite_array_) CompositeArray();\n    break;\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error: {\n    new (&string_) std::string();\n    break;\n  }\n  case RespType::Null:\n  case RespType::Integer: {\n    break;\n  }\n  }\n}\n\nRespValue::RespValue(const RespValue& other) : type_(RespType::Null) {\n  type(other.type());\n  switch (type_) {\n  case RespType::Array: {\n    asArray() = other.asArray();\n    break;\n  }\n  case RespType::CompositeArray: {\n    asCompositeArray() = other.asCompositeArray();\n    break;\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error: {\n    asString() = other.asString();\n    break;\n  }\n  case RespType::Integer: {\n    asInteger() = other.asInteger();\n    break;\n  }\n  case RespType::Null:\n    break;\n  }\n}\n\nRespValue::RespValue(RespValue&& other) noexcept : type_(other.type_) {\n  switch (type_) {\n  case RespType::Array: {\n    new (&array_) std::vector<RespValue>(std::move(other.array_));\n    break;\n  }\n  case RespType::CompositeArray: {\n    new (&composite_array_) CompositeArray(std::move(other.composite_array_));\n    break;\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error: {\n    new (&string_) std::string(std::move(other.string_));\n    break;\n  }\n  case RespType::Integer: {\n    integer_ = other.integer_;\n    break;\n  }\n  case RespType::Null:\n    break;\n  }\n}\n\nRespValue& RespValue::operator=(const RespValue& other) {\n  if (&other == this) {\n    return *this;\n  }\n  type(other.type());\n  switch (type_) {\n  case RespType::Array: {\n    asArray() = other.asArray();\n    break;\n  }\n  case RespType::CompositeArray: {\n    asCompositeArray() = other.asCompositeArray();\n    break;\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error: {\n    asString() = other.asString();\n    break;\n  }\n  case RespType::Integer: {\n    asInteger() = other.asInteger();\n    break;\n  }\n  case RespType::Null:\n    break;\n  }\n  return *this;\n}\n\nRespValue& RespValue::operator=(RespValue&& other) noexcept {\n  if (&other == this) {\n    return *this;\n  }\n\n  type(other.type());\n  switch (type_) {\n  case RespType::Array: {\n    array_ = std::move(other.array_);\n    break;\n  }\n  case RespType::CompositeArray: {\n    composite_array_ = std::move(other.composite_array_);\n    break;\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error: {\n    string_ = std::move(other.string_);\n    break;\n  }\n  case RespType::Integer: {\n    integer_ = other.integer_;\n    break;\n  }\n  case RespType::Null:\n    break;\n  }\n  return *this;\n}\n\nbool RespValue::operator==(const RespValue& other) const {\n  bool result = false;\n  if (type_ != other.type()) {\n    return result;\n  }\n\n  switch (type_) {\n  case RespType::Array: {\n    result = (asArray() == other.asArray());\n    break;\n  }\n  case RespType::CompositeArray: {\n    result = (asCompositeArray() == other.asCompositeArray());\n    break;\n  }\n  case RespType::SimpleString:\n  case RespType::BulkString:\n  case RespType::Error: {\n    result = (asString() == other.asString());\n    break;\n  }\n  case RespType::Integer: {\n    result = (asInteger() == other.asInteger());\n    break;\n  }\n  case RespType::Null: {\n    result = true;\n    break;\n  }\n  }\n  return result;\n}\n\nuint64_t RespValue::CompositeArray::size() const {\n  return (command_ && base_array_) ? end_ - start_ + 2 : 0;\n}\n\nbool RespValue::CompositeArray::operator==(const RespValue::CompositeArray& other) const {\n  return base_array_ == other.base_array_ && command_ == other.command_ && start_ == other.start_ &&\n         end_ == other.end_;\n}\n\nconst RespValue& RespValue::CompositeArray::CompositeArrayConstIterator::operator*() {\n  return first_ ? *command_ : array_[index_];\n}\n\nRespValue::CompositeArray::CompositeArrayConstIterator&\nRespValue::CompositeArray::CompositeArrayConstIterator::operator++() {\n  if (first_) {\n    first_ = false;\n  } else {\n    ++index_;\n  }\n  return *this;\n}\n\nbool RespValue::CompositeArray::CompositeArrayConstIterator::operator!=(\n    const CompositeArrayConstIterator& rhs) const {\n  return command_ != (rhs.command_) || &array_ != &(rhs.array_) || index_ != rhs.index_ ||\n         first_ != rhs.first_;\n}\n\nconst RespValue::CompositeArray::CompositeArrayConstIterator&\nRespValue::CompositeArray::CompositeArrayConstIterator::empty() {\n  static const RespValue::CompositeArray::CompositeArrayConstIterator* instance =\n      new RespValue::CompositeArray::CompositeArrayConstIterator(nullptr, {}, 0, false);\n  return *instance;\n}\n\nvoid DecoderImpl::decode(Buffer::Instance& data) {\n  for (const Buffer::RawSlice& slice : data.getRawSlices()) {\n    parseSlice(slice);\n  }\n\n  data.drain(data.length());\n}\n\nvoid DecoderImpl::parseSlice(const Buffer::RawSlice& slice) {\n  const char* buffer = reinterpret_cast<const char*>(slice.mem_);\n  uint64_t remaining = slice.len_;\n\n  while (remaining || state_ == State::ValueComplete) {\n    ENVOY_LOG(trace, \"parse slice: {} remaining\", remaining);\n    switch (state_) {\n    case State::ValueRootStart: {\n      ENVOY_LOG(trace, \"parse slice: ValueRootStart\");\n      pending_value_root_ = std::make_unique<RespValue>();\n      pending_value_stack_.push_front({pending_value_root_.get(), 0});\n      state_ = State::ValueStart;\n      break;\n    }\n\n    case State::ValueStart: {\n      ENVOY_LOG(trace, \"parse slice: ValueStart: {}\", buffer[0]);\n      pending_integer_.reset();\n      switch (buffer[0]) {\n      case '*': {\n        state_ = State::IntegerStart;\n        pending_value_stack_.front().value_->type(RespType::Array);\n        break;\n      }\n      case '$': {\n        state_ = State::IntegerStart;\n        pending_value_stack_.front().value_->type(RespType::BulkString);\n        break;\n      }\n      case '-': {\n        state_ = State::SimpleString;\n        pending_value_stack_.front().value_->type(RespType::Error);\n        break;\n      }\n      case '+': {\n        state_ = State::SimpleString;\n        pending_value_stack_.front().value_->type(RespType::SimpleString);\n        break;\n      }\n      case ':': {\n        state_ = State::IntegerStart;\n        pending_value_stack_.front().value_->type(RespType::Integer);\n        break;\n      }\n      default: {\n        throw ProtocolError(\"invalid value type\");\n      }\n      }\n\n      remaining--;\n      buffer++;\n      break;\n    }\n\n    case State::IntegerStart: {\n      ENVOY_LOG(trace, \"parse slice: IntegerStart: {}\", buffer[0]);\n      if (buffer[0] == '-') {\n        pending_integer_.negative_ = true;\n        remaining--;\n        buffer++;\n      }\n\n      state_ = State::Integer;\n      break;\n    }\n\n    case State::Integer: {\n      ENVOY_LOG(trace, \"parse slice: Integer: {}\", buffer[0]);\n      char c = buffer[0];\n      if (buffer[0] == '\\r') {\n        state_ = State::IntegerLF;\n      } else {\n        if (c < '0' || c > '9') {\n          throw ProtocolError(\"invalid integer character\");\n        } else {\n          pending_integer_.integer_ = (pending_integer_.integer_ * 10) + (c - '0');\n        }\n      }\n\n      remaining--;\n      buffer++;\n      break;\n    }\n\n    case State::IntegerLF: {\n      if (buffer[0] != '\\n') {\n        throw ProtocolError(\"expected new line\");\n      }\n\n      ENVOY_LOG(trace, \"parse slice: IntegerLF: {}\", pending_integer_.integer_);\n      remaining--;\n      buffer++;\n\n      PendingValue& current_value = pending_value_stack_.front();\n      if (current_value.value_->type() == RespType::Array) {\n        if (pending_integer_.negative_) {\n          // Null array. Convert to null.\n          current_value.value_->type(RespType::Null);\n          state_ = State::ValueComplete;\n        } else if (pending_integer_.integer_ == 0) {\n          state_ = State::ValueComplete;\n        } else {\n          std::vector<RespValue> values(pending_integer_.integer_);\n          current_value.value_->asArray().swap(values);\n          pending_value_stack_.push_front({&current_value.value_->asArray()[0], 0});\n          state_ = State::ValueStart;\n        }\n      } else if (current_value.value_->type() == RespType::Integer) {\n        if (pending_integer_.integer_ == 0 || !pending_integer_.negative_) {\n          current_value.value_->asInteger() = pending_integer_.integer_;\n        } else {\n          // By subtracting 1 (and later correcting) we ensure that we remain within the int64_t\n          // range to allow a valid static_cast. This is an issue when we have a value of -2^63,\n          // which cannot be represented as 2^63 in the intermediate int64_t.\n          current_value.value_->asInteger() =\n              static_cast<int64_t>(pending_integer_.integer_ - 1) * -1 - 1;\n        }\n        state_ = State::ValueComplete;\n      } else {\n        ASSERT(current_value.value_->type() == RespType::BulkString);\n        if (!pending_integer_.negative_) {\n          // TODO(mattklein123): reserve and define max length since we don't stream currently.\n          state_ = State::BulkStringBody;\n        } else {\n          // Null bulk string. Switch type to null and move to value complete.\n          current_value.value_->type(RespType::Null);\n          state_ = State::ValueComplete;\n        }\n      }\n\n      break;\n    }\n\n    case State::BulkStringBody: {\n      ASSERT(!pending_integer_.negative_);\n      uint64_t length_to_copy =\n          std::min(static_cast<uint64_t>(pending_integer_.integer_), remaining);\n      pending_value_stack_.front().value_->asString().append(buffer, length_to_copy);\n      pending_integer_.integer_ -= length_to_copy;\n      remaining -= length_to_copy;\n      buffer += length_to_copy;\n\n      if (pending_integer_.integer_ == 0) {\n        ENVOY_LOG(trace, \"parse slice: BulkStringBody complete: {}\",\n                  pending_value_stack_.front().value_->asString());\n        state_ = State::CR;\n      }\n\n      break;\n    }\n\n    case State::CR: {\n      ENVOY_LOG(trace, \"parse slice: CR\");\n      if (buffer[0] != '\\r') {\n        throw ProtocolError(\"expected carriage return\");\n      }\n\n      remaining--;\n      buffer++;\n      state_ = State::LF;\n      break;\n    }\n\n    case State::LF: {\n      ENVOY_LOG(trace, \"parse slice: LF\");\n      if (buffer[0] != '\\n') {\n        throw ProtocolError(\"expected new line\");\n      }\n\n      remaining--;\n      buffer++;\n      state_ = State::ValueComplete;\n      break;\n    }\n\n    case State::SimpleString: {\n      ENVOY_LOG(trace, \"parse slice: SimpleString: {}\", buffer[0]);\n      if (buffer[0] == '\\r') {\n        state_ = State::LF;\n      } else {\n        pending_value_stack_.front().value_->asString().push_back(buffer[0]);\n      }\n\n      remaining--;\n      buffer++;\n      break;\n    }\n\n    case State::ValueComplete: {\n      ENVOY_LOG(trace, \"parse slice: ValueComplete\");\n      ASSERT(!pending_value_stack_.empty());\n      pending_value_stack_.pop_front();\n      if (pending_value_stack_.empty()) {\n        callbacks_.onRespValue(std::move(pending_value_root_));\n        state_ = State::ValueRootStart;\n      } else {\n        PendingValue& current_value = pending_value_stack_.front();\n        ASSERT(current_value.value_->type() == RespType::Array);\n        if (current_value.current_array_element_ < current_value.value_->asArray().size() - 1) {\n          current_value.current_array_element_++;\n          pending_value_stack_.push_front(\n              {&current_value.value_->asArray()[current_value.current_array_element_], 0});\n          state_ = State::ValueStart;\n        }\n      }\n\n      break;\n    }\n    }\n  }\n}\n\nvoid EncoderImpl::encode(const RespValue& value, Buffer::Instance& out) {\n  switch (value.type()) {\n  case RespType::Array: {\n    encodeArray(value.asArray(), out);\n    break;\n  }\n  case RespType::CompositeArray: {\n    encodeCompositeArray(value.asCompositeArray(), out);\n    break;\n  }\n  case RespType::SimpleString: {\n    encodeSimpleString(value.asString(), out);\n    break;\n  }\n  case RespType::BulkString: {\n    encodeBulkString(value.asString(), out);\n    break;\n  }\n  case RespType::Error: {\n    encodeError(value.asString(), out);\n    break;\n  }\n  case RespType::Null: {\n    out.add(\"$-1\\r\\n\", 5);\n    break;\n  }\n  case RespType::Integer:\n    encodeInteger(value.asInteger(), out);\n    break;\n  }\n}\n\nvoid EncoderImpl::encodeArray(const std::vector<RespValue>& array, Buffer::Instance& out) {\n  char buffer[32];\n  char* current = buffer;\n  *current++ = '*';\n  current += StringUtil::itoa(current, 21, array.size());\n  *current++ = '\\r';\n  *current++ = '\\n';\n  out.add(buffer, current - buffer);\n\n  for (const RespValue& value : array) {\n    encode(value, out);\n  }\n}\n\nvoid EncoderImpl::encodeCompositeArray(const RespValue::CompositeArray& composite_array,\n                                       Buffer::Instance& out) {\n  char buffer[32];\n  char* current = buffer;\n  *current++ = '*';\n  current += StringUtil::itoa(current, 21, composite_array.size());\n  *current++ = '\\r';\n  *current++ = '\\n';\n  out.add(buffer, current - buffer);\n  for (const RespValue& value : composite_array) {\n    encode(value, out);\n  }\n}\n\nvoid EncoderImpl::encodeBulkString(const std::string& string, Buffer::Instance& out) {\n  char buffer[32];\n  char* current = buffer;\n  *current++ = '$';\n  current += StringUtil::itoa(current, 21, string.size());\n  *current++ = '\\r';\n  *current++ = '\\n';\n  out.add(buffer, current - buffer);\n  out.add(string);\n  out.add(\"\\r\\n\", 2);\n}\n\nvoid EncoderImpl::encodeError(const std::string& string, Buffer::Instance& out) {\n  out.add(\"-\", 1);\n  out.add(string);\n  out.add(\"\\r\\n\", 2);\n}\n\nvoid EncoderImpl::encodeInteger(int64_t integer, Buffer::Instance& out) {\n  char buffer[32];\n  char* current = buffer;\n  *current++ = ':';\n  if (integer >= 0) {\n    current += StringUtil::itoa(current, 21, integer);\n  } else {\n    *current++ = '-';\n    // By adding 1 (and later correcting) we ensure that we remain within the int64_t\n    // range prior to the static_cast. This is an issue when we have a value of -2^63,\n    // which cannot be represented as 2^63 in the intermediate int64_t.\n    current += StringUtil::itoa(current, 30, static_cast<uint64_t>((integer + 1) * -1) + 1ULL);\n  }\n\n  *current++ = '\\r';\n  *current++ = '\\n';\n  out.add(buffer, current - buffer);\n}\n\nvoid EncoderImpl::encodeSimpleString(const std::string& string, Buffer::Instance& out) {\n  out.add(\"+\", 1);\n  out.add(string);\n  out.add(\"\\r\\n\", 2);\n}\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/codec_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <forward_list>\n#include <string>\n#include <vector>\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/common/redis/codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\n/**\n * Decoder implementation of https://redis.io/topics/protocol\n *\n * This implementation buffers when needed and will always consume all bytes passed for decoding.\n */\nclass DecoderImpl : public Decoder, Logger::Loggable<Logger::Id::redis> {\npublic:\n  DecoderImpl(DecoderCallbacks& callbacks) : callbacks_(callbacks) {}\n\n  // RedisProxy::Decoder\n  void decode(Buffer::Instance& data) override;\n\nprivate:\n  enum class State {\n    ValueRootStart,\n    ValueStart,\n    IntegerStart,\n    Integer,\n    IntegerLF,\n    BulkStringBody,\n    CR,\n    LF,\n    SimpleString,\n    ValueComplete\n  };\n\n  struct PendingInteger {\n    void reset() {\n      integer_ = 0;\n      negative_ = false;\n    }\n\n    uint64_t integer_;\n    bool negative_;\n  };\n\n  struct PendingValue {\n    RespValue* value_;\n    uint64_t current_array_element_;\n  };\n\n  void parseSlice(const Buffer::RawSlice& slice);\n\n  DecoderCallbacks& callbacks_;\n  State state_{State::ValueRootStart};\n  PendingInteger pending_integer_;\n  RespValuePtr pending_value_root_;\n  std::forward_list<PendingValue> pending_value_stack_;\n};\n\n/**\n * A factory implementation that returns a real decoder.\n */\nclass DecoderFactoryImpl : public DecoderFactory {\npublic:\n  // RedisProxy::DecoderFactory\n  DecoderPtr create(DecoderCallbacks& callbacks) override {\n    return DecoderPtr{new DecoderImpl(callbacks)};\n  }\n};\n\n/**\n * Encoder implementation of https://redis.io/topics/protocol\n */\nclass EncoderImpl : public Encoder {\npublic:\n  // RedisProxy::Encoder\n  void encode(const RespValue& value, Buffer::Instance& out) override;\n\nprivate:\n  void encodeArray(const std::vector<RespValue>& array, Buffer::Instance& out);\n  void encodeCompositeArray(const RespValue::CompositeArray& array, Buffer::Instance& out);\n  void encodeBulkString(const std::string& string, Buffer::Instance& out);\n  void encodeError(const std::string& string, Buffer::Instance& out);\n  void encodeInteger(int64_t integer, Buffer::Instance& out);\n  void encodeSimpleString(const std::string& string, Buffer::Instance& out);\n};\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/fault.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\n/**\n * Fault Type.\n */\nenum class FaultType { Delay, Error };\n\nclass Fault {\npublic:\n  virtual ~Fault() = default;\n\n  virtual FaultType faultType() const PURE;\n  virtual std::chrono::milliseconds delayMs() const PURE;\n  virtual const std::vector<std::string> commands() const PURE;\n  virtual envoy::type::v3::FractionalPercent defaultValue() const PURE;\n  virtual absl::optional<std::string> runtimeKey() const PURE;\n};\n\nusing FaultSharedPtr = std::shared_ptr<const Fault>;\n\nclass FaultManager {\npublic:\n  virtual ~FaultManager() = default;\n\n  /**\n   * Get fault type and delay given a Redis command.\n   * @param command supplies the Redis command string.\n   */\n  virtual const Fault* getFaultForCommand(const std::string& command) const PURE;\n};\n\nusing FaultManagerPtr = std::unique_ptr<FaultManager>;\n\nusing FaultManagerSharedPtr = std::shared_ptr<FaultManager>;\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/common/redis/fault_impl.cc",
    "content": "#include \"extensions/filters/network/common/redis/fault_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nstruct FaultManagerKeyNamesValues {\n  // The rbac filter rejected the request\n  const std::string AllKey = \"ALL_KEY\";\n};\nusing FaultManagerKeyNames = ConstSingleton<FaultManagerKeyNamesValues>;\n\nFaultManagerImpl::FaultImpl::FaultImpl(\n    envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault)\n    : commands_(buildCommands(base_fault)) {\n  delay_ms_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(base_fault, delay, 0));\n\n  switch (base_fault.fault_type()) {\n  case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::RedisFault::DELAY:\n    fault_type_ = FaultType::Delay;\n    break;\n  case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::RedisFault::ERROR:\n    fault_type_ = FaultType::Error;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n    break;\n  }\n\n  default_value_ = base_fault.fault_enabled().default_value();\n  runtime_key_ = base_fault.fault_enabled().runtime_key();\n};\n\nstd::vector<std::string> FaultManagerImpl::FaultImpl::buildCommands(\n    envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault) {\n  std::vector<std::string> commands;\n  for (const std::string& command : base_fault.commands()) {\n    commands.emplace_back(absl::AsciiStrToLower(command));\n  }\n  return commands;\n}\n\nFaultManagerImpl::FaultManagerImpl(\n    Random::RandomGenerator& random, Runtime::Loader& runtime,\n    const Protobuf::RepeatedPtrField<\n        ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault>\n        faults)\n    : fault_map_(buildFaultMap(faults)), random_(random), runtime_(runtime) {}\n\nFaultMap FaultManagerImpl::buildFaultMap(\n    const Protobuf::RepeatedPtrField<\n        ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault>\n        faults) {\n  // Next, create the fault map that maps commands to pointers to Fault objects.\n  // Group faults by command\n  FaultMap fault_map;\n  for (auto const& base_fault : faults) {\n    auto fault_ptr = std::make_shared<FaultImpl>(base_fault);\n    if (!fault_ptr->commands().empty()) {\n      for (const std::string& command : fault_ptr->commands()) {\n        fault_map[command].emplace_back(fault_ptr);\n      }\n    } else {\n      // Generic \"ALL\" entry in map for faults that map to all keys; also add to each command\n      fault_map[FaultManagerKeyNames::get().AllKey].emplace_back(fault_ptr);\n    }\n  }\n\n  // Add the ALL keys faults to each command too so that we can just query faults by command.\n  // Get all ALL_KEY faults.\n  FaultMap::iterator it_outer = fault_map.find(FaultManagerKeyNames::get().AllKey);\n  if (it_outer != fault_map.end()) {\n    for (const FaultSharedPtr& fault_ptr : it_outer->second) {\n      FaultMap::iterator it_inner;\n      for (it_inner = fault_map.begin(); it_inner != fault_map.end(); it_inner++) {\n        std::string command = it_inner->first;\n        if (command != FaultManagerKeyNames::get().AllKey) {\n          fault_map[command].push_back(fault_ptr);\n        }\n      }\n    }\n  }\n  return fault_map;\n}\n\nuint64_t FaultManagerImpl::getIntegerNumeratorOfFractionalPercent(\n    absl::string_view key, const envoy::type::v3::FractionalPercent& default_value) const {\n  uint64_t numerator;\n  if (default_value.denominator() == envoy::type::v3::FractionalPercent::HUNDRED) {\n    numerator = default_value.numerator();\n  } else {\n    int denominator =\n        ProtobufPercentHelper::fractionalPercentDenominatorToInt(default_value.denominator());\n    numerator = (default_value.numerator() * 100) / denominator;\n  }\n  return runtime_.snapshot().getInteger(key, numerator);\n}\n\n// Fault checking algorithm:\n//\n// For example, if we have an ERROR fault at 5% for all commands, and a DELAY fault at 10% for GET,\n// if we receive a GET, we want 5% of GETs to get DELAY, and 10% to get ERROR. Thus, we need to\n// amortize the percentages.\n//\n// 0. Get random number.\n// 1. Get faults for given command.\n// 2. For each fault, calculate the amortized fault injection percentage.\n//\n// Note that we do not check to make sure the probabilities of faults are <= 100%!\nconst Fault* FaultManagerImpl::getFaultForCommandInternal(const std::string& command) const {\n  FaultMap::const_iterator it_outer = fault_map_.find(command);\n  if (it_outer != fault_map_.end()) {\n    auto random_number = random_.random() % 100;\n    int amortized_fault = 0;\n\n    for (const FaultSharedPtr& fault_ptr : it_outer->second) {\n      uint64_t fault_injection_percentage = getIntegerNumeratorOfFractionalPercent(\n          fault_ptr->runtimeKey().value(), fault_ptr->defaultValue());\n      if (random_number < (fault_injection_percentage + amortized_fault)) {\n        return fault_ptr.get();\n      } else {\n        amortized_fault += fault_injection_percentage;\n      }\n    }\n  }\n\n  return nullptr;\n}\n\nconst Fault* FaultManagerImpl::getFaultForCommand(const std::string& command) const {\n  if (!fault_map_.empty()) {\n    if (fault_map_.count(command) > 0) {\n      return getFaultForCommandInternal(command);\n    } else {\n      return getFaultForCommandInternal(FaultManagerKeyNames::get().AllKey);\n    }\n  }\n\n  return nullptr;\n}\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/common/redis/fault_impl.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/network/common/redis/fault.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nusing FaultMap = absl::flat_hash_map<std::string, std::vector<FaultSharedPtr>>;\n\n/**\n * Message returned for particular types of faults.\n */\nstruct FaultMessagesValues {\n  const std::string Error = \"Fault Injected: Error\";\n};\nusing FaultMessages = ConstSingleton<FaultMessagesValues>;\n\n/**\n * Fault management- creation, storage and retrieval. Faults are queried for by command,\n * so they are stored in an unordered map using the command as key. For faults that apply to\n * all commands, we use a special ALL_KEYS entry in the map.\n */\nclass FaultManagerImpl : public FaultManager {\npublic:\n  FaultManagerImpl(\n      Random::RandomGenerator& random, Runtime::Loader& runtime,\n      const Protobuf::RepeatedPtrField<\n          ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault>\n          base_faults);\n\n  const Fault* getFaultForCommand(const std::string& command) const override;\n\n  static FaultSharedPtr makeFaultForTest(Common::Redis::FaultType fault_type,\n                                         std::chrono::milliseconds delay_ms) {\n    envoy::type::v3::FractionalPercent default_value;\n    default_value.set_numerator(100);\n    default_value.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n    FaultImpl fault =\n        FaultImpl(fault_type, delay_ms, std::vector<std::string>(), default_value, \"foo\");\n    return std::make_shared<FaultImpl>(fault);\n  }\n\n  // Allow the unit test to have access to private members.\n  friend class FaultTest;\n\nprivate:\n  class FaultImpl : public Fault {\n  public:\n    FaultImpl(\n        envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault);\n    FaultImpl(FaultType fault_type, std::chrono::milliseconds delay_ms,\n              const std::vector<std::string> commands,\n              envoy::type::v3::FractionalPercent default_value,\n              absl::optional<std::string> runtime_key)\n        : fault_type_(fault_type), delay_ms_(delay_ms), commands_(commands),\n          default_value_(default_value), runtime_key_(runtime_key) {} // For testing only\n\n    FaultType faultType() const override { return fault_type_; };\n    std::chrono::milliseconds delayMs() const override { return delay_ms_; };\n    const std::vector<std::string> commands() const override { return commands_; };\n    envoy::type::v3::FractionalPercent defaultValue() const override { return default_value_; };\n    absl::optional<std::string> runtimeKey() const override { return runtime_key_; };\n\n  private:\n    static std::vector<std::string> buildCommands(\n        envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault);\n\n    FaultType fault_type_;\n    std::chrono::milliseconds delay_ms_;\n    const std::vector<std::string> commands_;\n    envoy::type::v3::FractionalPercent default_value_;\n    absl::optional<std::string> runtime_key_;\n  };\n\n  static FaultMap\n  buildFaultMap(const Protobuf::RepeatedPtrField<\n                ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault>\n                    faults);\n\n  uint64_t getIntegerNumeratorOfFractionalPercent(\n      absl::string_view key, const envoy::type::v3::FractionalPercent& default_value) const;\n  const Fault* getFaultForCommandInternal(const std::string& command) const;\n  const FaultMap fault_map_;\n\nprotected:\n  Random::RandomGenerator& random_;\n  Runtime::Loader& runtime_;\n};\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/common/redis/redis_command_stats.cc",
    "content": "#include \"extensions/filters/network/common/redis/redis_command_stats.h\"\n\n#include \"common/stats/timespan_impl.h\"\n#include \"common/stats/utility.h\"\n\n#include \"extensions/filters/network/common/redis/supported_commands.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nRedisCommandStats::RedisCommandStats(Stats::SymbolTable& symbol_table, const std::string& prefix)\n    : symbol_table_(symbol_table), stat_name_set_(symbol_table_.makeSet(\"Redis\")),\n      prefix_(stat_name_set_->add(prefix)),\n      upstream_rq_time_(stat_name_set_->add(\"upstream_rq_time\")),\n      latency_(stat_name_set_->add(\"latency\")), total_(stat_name_set_->add(\"total\")),\n      success_(stat_name_set_->add(\"success\")), failure_(stat_name_set_->add(\"failure\")),\n      unused_metric_(stat_name_set_->add(\"unused\")), null_metric_(stat_name_set_->add(\"null\")),\n      unknown_metric_(stat_name_set_->add(\"unknown\")) {\n  // Note: Even if this is disabled, we track the upstream_rq_time.\n  // Create StatName for each Redis command. Note that we don't include Auth or Ping.\n  stat_name_set_->rememberBuiltins(\n      Extensions::NetworkFilters::Common::Redis::SupportedCommands::simpleCommands());\n  stat_name_set_->rememberBuiltins(\n      Extensions::NetworkFilters::Common::Redis::SupportedCommands::evalCommands());\n  stat_name_set_->rememberBuiltins(Extensions::NetworkFilters::Common::Redis::SupportedCommands::\n                                       hashMultipleSumResultCommands());\n  stat_name_set_->rememberBuiltin(\n      Extensions::NetworkFilters::Common::Redis::SupportedCommands::mget());\n  stat_name_set_->rememberBuiltin(\n      Extensions::NetworkFilters::Common::Redis::SupportedCommands::mset());\n}\n\nStats::TimespanPtr RedisCommandStats::createCommandTimer(Stats::Scope& scope,\n                                                         Stats::StatName command,\n                                                         Envoy::TimeSource& time_source) {\n  return std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      Stats::Utility::histogramFromStatNames(scope, {prefix_, command, latency_},\n                                             Stats::Histogram::Unit::Microseconds),\n      time_source);\n}\n\nStats::TimespanPtr RedisCommandStats::createAggregateTimer(Stats::Scope& scope,\n                                                           Envoy::TimeSource& time_source) {\n  return std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      Stats::Utility::histogramFromStatNames(scope, {prefix_, upstream_rq_time_},\n                                             Stats::Histogram::Unit::Microseconds),\n      time_source);\n}\n\nStats::StatName RedisCommandStats::getCommandFromRequest(const RespValue& request) {\n  // Get command from RespValue\n  switch (request.type()) {\n  case RespType::Array:\n    return getCommandFromRequest(request.asArray().front());\n  case RespType::CompositeArray:\n    return getCommandFromRequest(*request.asCompositeArray().command());\n  case RespType::Null:\n    return null_metric_;\n  case RespType::BulkString:\n  case RespType::SimpleString: {\n    std::string to_lower_command = absl::AsciiStrToLower(request.asString());\n    return stat_name_set_->getBuiltin(to_lower_command, unknown_metric_);\n  }\n  case RespType::Integer:\n  case RespType::Error:\n  default:\n    return unknown_metric_;\n  }\n}\n\nvoid RedisCommandStats::updateStatsTotal(Stats::Scope& scope, Stats::StatName command) {\n  Stats::Utility::counterFromStatNames(scope, {prefix_, command, total_}).inc();\n}\n\nvoid RedisCommandStats::updateStats(Stats::Scope& scope, Stats::StatName command,\n                                    const bool success) {\n  Stats::StatName status = success ? success_ : failure_;\n  Stats::Utility::counterFromStatNames(scope, {prefix_, command, status}).inc();\n}\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/redis_command_stats.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/network/common/redis/codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nclass RedisCommandStats {\npublic:\n  RedisCommandStats(Stats::SymbolTable& symbol_table, const std::string& prefix);\n\n  // TODO (@FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg): Use Singleton to manage a single\n  // RedisCommandStats on the client factory so that it can be used for proxy filter, discovery and\n  // health check.\n  static std::shared_ptr<RedisCommandStats>\n  createRedisCommandStats(Stats::SymbolTable& symbol_table) {\n    return std::make_shared<Common::Redis::RedisCommandStats>(symbol_table, \"upstream_commands\");\n  }\n\n  Stats::TimespanPtr createCommandTimer(Stats::Scope& scope, Stats::StatName command,\n                                        Envoy::TimeSource& time_source);\n  Stats::TimespanPtr createAggregateTimer(Stats::Scope& scope, Envoy::TimeSource& time_source);\n  Stats::StatName getCommandFromRequest(const RespValue& request);\n  void updateStatsTotal(Stats::Scope& scope, Stats::StatName command);\n  void updateStats(Stats::Scope& scope, Stats::StatName command, const bool success);\n  Stats::StatName getUnusedStatName() { return unused_metric_; }\n\nprivate:\n  Stats::SymbolTable& symbol_table_;\n  Stats::StatNameSetPtr stat_name_set_;\n  const Stats::StatName prefix_;\n  const Stats::StatName upstream_rq_time_;\n  const Stats::StatName latency_;\n  const Stats::StatName total_;\n  const Stats::StatName success_;\n  const Stats::StatName failure_;\n  const Stats::StatName unused_metric_;\n  const Stats::StatName null_metric_;\n  const Stats::StatName unknown_metric_;\n};\nusing RedisCommandStatsSharedPtr = std::shared_ptr<RedisCommandStats>;\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/supported_commands.h",
    "content": "#pragma once\n\n#include <set>\n#include <string>\n#include <vector>\n\n#include \"common/common/macros.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nstruct SupportedCommands {\n  /**\n   * @return commands which hash to a single server\n   */\n  static const absl::flat_hash_set<std::string>& simpleCommands() {\n    CONSTRUCT_ON_FIRST_USE(\n        absl::flat_hash_set<std::string>, \"append\", \"bitcount\", \"bitfield\", \"bitpos\", \"decr\",\n        \"decrby\", \"dump\", \"expire\", \"expireat\", \"geoadd\", \"geodist\", \"geohash\", \"geopos\",\n        \"georadius_ro\", \"georadiusbymember_ro\", \"get\", \"getbit\", \"getrange\", \"getset\", \"hdel\",\n        \"hexists\", \"hget\", \"hgetall\", \"hincrby\", \"hincrbyfloat\", \"hkeys\", \"hlen\", \"hmget\", \"hmset\",\n        \"hscan\", \"hset\", \"hsetnx\", \"hstrlen\", \"hvals\", \"incr\", \"incrby\", \"incrbyfloat\", \"lindex\",\n        \"linsert\", \"llen\", \"lpop\", \"lpush\", \"lpushx\", \"lrange\", \"lrem\", \"lset\", \"ltrim\", \"persist\",\n        \"pexpire\", \"pexpireat\", \"pfadd\", \"pfcount\", \"psetex\", \"pttl\", \"restore\", \"rpop\", \"rpush\",\n        \"rpushx\", \"sadd\", \"scard\", \"set\", \"setbit\", \"setex\", \"setnx\", \"setrange\", \"sismember\",\n        \"smembers\", \"spop\", \"srandmember\", \"srem\", \"sscan\", \"strlen\", \"ttl\", \"type\", \"zadd\",\n        \"zcard\", \"zcount\", \"zincrby\", \"zlexcount\", \"zpopmin\", \"zpopmax\", \"zrange\", \"zrangebylex\",\n        \"zrangebyscore\", \"zrank\", \"zrem\", \"zremrangebylex\", \"zremrangebyrank\", \"zremrangebyscore\",\n        \"zrevrange\", \"zrevrangebylex\", \"zrevrangebyscore\", \"zrevrank\", \"zscan\", \"zscore\");\n  }\n\n  /**\n   * @return commands which hash on the fourth argument\n   */\n  static const absl::flat_hash_set<std::string>& evalCommands() {\n    CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set<std::string>, \"eval\", \"evalsha\");\n  }\n\n  /**\n   * @return commands which are sent to multiple servers and coalesced by summing the responses\n   */\n  static const absl::flat_hash_set<std::string>& hashMultipleSumResultCommands() {\n    CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set<std::string>, \"del\", \"exists\", \"touch\", \"unlink\");\n  }\n\n  /**\n   * @return auth command\n   */\n  static const std::string& auth() { CONSTRUCT_ON_FIRST_USE(std::string, \"auth\"); }\n\n  /**\n   * @return mget command\n   */\n  static const std::string& mget() { CONSTRUCT_ON_FIRST_USE(std::string, \"mget\"); }\n\n  /**\n   * @return mset command\n   */\n  static const std::string& mset() { CONSTRUCT_ON_FIRST_USE(std::string, \"mset\"); }\n\n  /**\n   * @return ping command\n   */\n  static const std::string& ping() { CONSTRUCT_ON_FIRST_USE(std::string, \"ping\"); }\n\n  /**\n   * @return commands which alters the state of redis\n   */\n  static const absl::flat_hash_set<std::string>& writeCommands() {\n    CONSTRUCT_ON_FIRST_USE(\n        absl::flat_hash_set<std::string>, \"append\", \"bitfield\", \"decr\", \"decrby\", \"del\", \"expire\",\n        \"expireat\", \"eval\", \"evalsha\", \"geoadd\", \"hdel\", \"hincrby\", \"hincrbyfloat\", \"hmset\", \"hset\",\n        \"hsetnx\", \"incr\", \"incrby\", \"incrbyfloat\", \"linsert\", \"lpop\", \"lpush\", \"lpushx\", \"lrem\",\n        \"lset\", \"ltrim\", \"mset\", \"persist\", \"pexpire\", \"pexpireat\", \"pfadd\", \"psetex\", \"restore\",\n        \"rpop\", \"rpush\", \"rpushx\", \"sadd\", \"set\", \"setbit\", \"setex\", \"setnx\", \"setrange\", \"spop\",\n        \"srem\", \"zadd\", \"zincrby\", \"touch\", \"zpopmin\", \"zpopmax\", \"zrem\", \"zremrangebylex\",\n        \"zremrangebyrank\", \"zremrangebyscore\", \"unlink\");\n  }\n\n  static bool isReadCommand(const std::string& command) {\n    return !writeCommands().contains(command);\n  }\n};\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/utility.cc",
    "content": "#include \"extensions/filters/network/common/redis/utility.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\nnamespace Utility {\n\nAuthRequest::AuthRequest(const std::string& password) {\n  std::vector<RespValue> values(2);\n  values[0].type(RespType::BulkString);\n  values[0].asString() = \"auth\";\n  values[1].type(RespType::BulkString);\n  values[1].asString() = password;\n  type(RespType::Array);\n  asArray().swap(values);\n}\n\nAuthRequest::AuthRequest(const std::string& username, const std::string& password) {\n  std::vector<RespValue> values(3);\n  values[0].type(RespType::BulkString);\n  values[0].asString() = \"auth\";\n  values[1].type(RespType::BulkString);\n  values[1].asString() = username;\n  values[2].type(RespType::BulkString);\n  values[2].asString() = password;\n  type(RespType::Array);\n  asArray().swap(values);\n}\n\nRespValuePtr makeError(const std::string& error) {\n  Common::Redis::RespValuePtr response(new RespValue());\n  response->type(Common::Redis::RespType::Error);\n  response->asString() = error;\n  return response;\n}\n\nReadOnlyRequest::ReadOnlyRequest() {\n  std::vector<RespValue> values(1);\n  values[0].type(RespType::BulkString);\n  values[0].asString() = \"readonly\";\n  type(RespType::Array);\n  asArray().swap(values);\n}\n\nconst ReadOnlyRequest& ReadOnlyRequest::instance() {\n  static const ReadOnlyRequest* instance = new ReadOnlyRequest{};\n  return *instance;\n}\n\nAskingRequest::AskingRequest() {\n  std::vector<RespValue> values(1);\n  values[0].type(RespType::BulkString);\n  values[0].asString() = \"asking\";\n  type(RespType::Array);\n  asArray().swap(values);\n}\n\nconst AskingRequest& AskingRequest::instance() {\n  static const AskingRequest* instance = new AskingRequest{};\n  return *instance;\n}\n\nGetRequest::GetRequest() {\n  type(RespType::BulkString);\n  asString() = \"get\";\n}\n\nconst GetRequest& GetRequest::instance() {\n  static const GetRequest* instance = new GetRequest{};\n  return *instance;\n}\n\nSetRequest::SetRequest() {\n  type(RespType::BulkString);\n  asString() = \"set\";\n}\n\nconst SetRequest& SetRequest::instance() {\n  static const SetRequest* instance = new SetRequest{};\n  return *instance;\n}\n} // namespace Utility\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/redis/utility.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"extensions/filters/network/common/redis/codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\nnamespace Utility {\n\nclass AuthRequest : public Redis::RespValue {\npublic:\n  AuthRequest(const std::string& username, const std::string& password);\n  AuthRequest(const std::string& password);\n};\n\nRespValuePtr makeError(const std::string& error);\n\nclass ReadOnlyRequest : public Redis::RespValue {\npublic:\n  ReadOnlyRequest();\n  static const ReadOnlyRequest& instance();\n};\n\nclass AskingRequest : public Redis::RespValue {\npublic:\n  AskingRequest();\n  static const AskingRequest& instance();\n};\n\nclass GetRequest : public Redis::RespValue {\npublic:\n  GetRequest();\n  static const GetRequest& instance();\n};\n\nclass SetRequest : public Redis::RespValue {\npublic:\n  SetRequest();\n  static const SetRequest& instance();\n};\n\n} // namespace Utility\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/common/utility.h",
    "content": "#pragma once\n\n#include \"common/common/macros.h\"\n\n#include \"extensions/common/utility.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\n\n/**\n * FilterNameUtil provides utilities for handling Network filter\n * extension names (e.g., \"envoy.filters.network.redis_proxy\").\n */\nclass FilterNameUtil {\npublic:\n  /**\n   * Given a deprecated network filter extension name, return the\n   * canonical name. Any name not defined in the deprecated map is\n   * returned without modification. If deprecated extension names are\n   * disabled, throws EnvoyException.\n   *\n   * @return const std::string& canonical filter name\n   * @throw EnvoyException if deprecated names are disabled\n   */\n  static const std::string&\n  canonicalFilterName(const std::string& name,\n                      Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting()) {\n    const auto& it = deprecatedNameMap().find(name);\n    if (it == deprecatedNameMap().end()) {\n      return name;\n    }\n\n    Extensions::Common::Utility::ExtensionNameUtil::checkDeprecatedExtensionName(\n        \"network filter\", name, it->second, runtime);\n\n    return it->second;\n  }\n\nprivate:\n  using DeprecatedNameMap = absl::flat_hash_map<std::string, std::string>;\n\n  static const DeprecatedNameMap& deprecatedNameMap() {\n    CONSTRUCT_ON_FIRST_USE(\n        DeprecatedNameMap,\n        {\n            {\"envoy.redis_proxy\", NetworkFilters::NetworkFilterNames::get().RedisProxy},\n        });\n  }\n};\n\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/direct_response/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Direct response L4 network filter.\n# Public docs: docs/root/configuration/network_filters/direct_response_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"filter\",\n    srcs = [\"filter.cc\"],\n    hdrs = [\"filter.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    security_posture = \"unknown\",\n    deps = [\n        \":filter\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/direct_response/config.cc",
    "content": "#include \"envoy/extensions/filters/network/direct_response/v3/config.pb.h\"\n#include \"envoy/extensions/filters/network/direct_response/v3/config.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/config/datasource.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/direct_response/filter.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DirectResponse {\n\n/**\n * Config registration for the direct response filter. @see NamedNetworkFilterConfigFactory.\n */\nclass DirectResponseConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::direct_response::v3::Config> {\npublic:\n  DirectResponseConfigFactory() : FactoryBase(NetworkFilterNames::get().DirectResponse) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::direct_response::v3::Config& config,\n      Server::Configuration::FactoryContext& context) override {\n    return [config, &context](Network::FilterManager& filter_manager) -> void {\n      auto content = Config::DataSource::read(config.response(), true, context.api());\n      filter_manager.addReadFilter(std::make_shared<DirectResponseFilter>(content));\n    };\n  }\n\n  bool isTerminalFilter() override { return true; }\n};\n\n/**\n * Static registration for the direct response filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(DirectResponseConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace DirectResponse\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/direct_response/filter.cc",
    "content": "#include \"extensions/filters/network/direct_response/filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DirectResponse {\n\nNetwork::FilterStatus DirectResponseFilter::onNewConnection() {\n  auto& connection = read_callbacks_->connection();\n  ENVOY_CONN_LOG(trace, \"direct_response: new connection\", connection);\n  if (!response_.empty()) {\n    Buffer::OwnedImpl data(response_);\n    connection.write(data, true);\n    ASSERT(0 == data.length());\n  }\n  connection.streamInfo().setResponseCodeDetails(\n      StreamInfo::ResponseCodeDetails::get().DirectResponse);\n  connection.close(Network::ConnectionCloseType::FlushWrite);\n  return Network::FilterStatus::StopIteration;\n}\n\n} // namespace DirectResponse\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/direct_response/filter.h",
    "content": "#pragma once\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DirectResponse {\n\n/**\n * Implementation of a basic direct response filter.\n */\nclass DirectResponseFilter : public Network::ReadFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  DirectResponseFilter(const std::string& response) : response_(response) {}\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance&, bool) override {\n    return Network::FilterStatus::Continue;\n  }\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n    read_callbacks_->connection().enableHalfClose(true);\n  }\n\nprivate:\n  const std::string response_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n};\n\n} // namespace DirectResponse\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"buffer_helper_lib\",\n    srcs = [\"buffer_helper.cc\"],\n    hdrs = [\"buffer_helper.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:byte_order_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hessian_utils_lib\",\n    srcs = [\"hessian_utils.cc\"],\n    hdrs = [\"hessian_utils.h\"],\n    deps = [\n        \":buffer_helper_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protocol_interface\",\n    hdrs = [\"protocol.h\"],\n    deps = [\n        \":buffer_helper_lib\",\n        \":message_lib\",\n        \":metadata_lib\",\n        \":serializer_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dubbo_protocol_impl_lib\",\n    srcs = [\"dubbo_protocol_impl.cc\"],\n    hdrs = [\"dubbo_protocol_impl.h\"],\n    deps = [\n        \":protocol_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"serializer_interface\",\n    srcs = [\"serializer_impl.cc\"],\n    hdrs = [\n        \"protocol_constants.h\",\n        \"serializer.h\",\n        \"serializer_impl.h\",\n    ],\n    deps = [\n        \":message_lib\",\n        \":metadata_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dubbo_hessian2_serializer_impl_lib\",\n    srcs = [\"dubbo_hessian2_serializer_impl.cc\"],\n    hdrs = [\n        \"dubbo_hessian2_serializer_impl.h\",\n    ],\n    deps = [\n        \":buffer_helper_lib\",\n        \":hessian_utils_lib\",\n        \":serializer_interface\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"decoder_lib\",\n    srcs = [\"decoder.cc\"],\n    hdrs = [\"decoder.h\"],\n    deps = [\n        \":decoder_events_lib\",\n        \":dubbo_hessian2_serializer_impl_lib\",\n        \":dubbo_protocol_impl_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:logger_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":conn_manager_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:factory_base_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:well_known_names\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:config\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:route_matcher\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:router_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metadata_lib\",\n    hdrs = [\"metadata.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":message_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"message_lib\",\n    hdrs = [\n        \"message.h\",\n        \"message_impl.h\",\n    ],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"decoder_events_lib\",\n    hdrs = [\"decoder_event_handler.h\"],\n    deps = [\n        \":metadata_lib\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stats_lib\",\n    hdrs = [\"stats.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"app_exception_lib\",\n    srcs = [\"app_exception.cc\"],\n    hdrs = [\"app_exception.h\"],\n    deps = [\n        \":message_lib\",\n        \":metadata_lib\",\n        \":protocol_interface\",\n        \":serializer_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:filter_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"heartbeat_response_lib\",\n    srcs = [\"heartbeat_response.cc\"],\n    hdrs = [\"heartbeat_response.h\"],\n    deps = [\n        \":metadata_lib\",\n        \":protocol_interface\",\n        \":serializer_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:filter_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_manager_lib\",\n    srcs = [\n        \"active_message.cc\",\n        \"conn_manager.cc\",\n    ],\n    hdrs = [\n        \"active_message.h\",\n        \"conn_manager.h\",\n    ],\n    deps = [\n        \":app_exception_lib\",\n        \":decoder_events_lib\",\n        \":decoder_lib\",\n        \":dubbo_hessian2_serializer_impl_lib\",\n        \":dubbo_protocol_impl_lib\",\n        \":heartbeat_response_lib\",\n        \":stats_lib\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:watermark_buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:filter_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:router_interface\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/active_message.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/active_message.h\"\n\n#include \"common/stats/timespan_impl.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/app_exception.h\"\n#include \"extensions/filters/network/dubbo_proxy/conn_manager.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n// class ActiveResponseDecoder\nActiveResponseDecoder::ActiveResponseDecoder(ActiveMessage& parent, DubboFilterStats& stats,\n                                             Network::Connection& connection,\n                                             ProtocolPtr&& protocol)\n    : parent_(parent), stats_(stats), response_connection_(connection),\n      protocol_(std::move(protocol)),\n      decoder_(std::make_unique<ResponseDecoder>(*protocol_, *this)), complete_(false),\n      response_status_(DubboFilters::UpstreamResponseStatus::MoreData) {}\n\nDubboFilters::UpstreamResponseStatus ActiveResponseDecoder::onData(Buffer::Instance& data) {\n  ENVOY_LOG(debug, \"dubbo response: the received reply data length is {}\", data.length());\n\n  bool underflow = false;\n  decoder_->onData(data, underflow);\n  ASSERT(complete_ || underflow);\n\n  return response_status_;\n}\n\nvoid ActiveResponseDecoder::onStreamDecoded(MessageMetadataSharedPtr metadata,\n                                            ContextSharedPtr ctx) {\n  ASSERT(metadata->messageType() == MessageType::Response ||\n         metadata->messageType() == MessageType::Exception);\n  ASSERT(metadata->hasResponseStatus());\n\n  metadata_ = metadata;\n  if (applyMessageEncodedFilters(metadata, ctx) != FilterStatus::Continue) {\n    response_status_ = DubboFilters::UpstreamResponseStatus::Complete;\n    return;\n  }\n\n  if (response_connection_.state() != Network::Connection::State::Open) {\n    throw DownstreamConnectionCloseException(\"Downstream has closed or closing\");\n  }\n\n  response_connection_.write(ctx->messageOriginData(), false);\n  ENVOY_LOG(debug,\n            \"dubbo response: the upstream response message has been forwarded to the downstream\");\n\n  stats_.response_.inc();\n  stats_.response_decoding_success_.inc();\n  if (metadata->messageType() == MessageType::Exception) {\n    stats_.response_business_exception_.inc();\n  }\n\n  switch (metadata->responseStatus()) {\n  case ResponseStatus::Ok:\n    stats_.response_success_.inc();\n    break;\n  default:\n    stats_.response_error_.inc();\n    ENVOY_LOG(error, \"dubbo response status: {}\", static_cast<uint8_t>(metadata->responseStatus()));\n    break;\n  }\n\n  complete_ = true;\n  response_status_ = DubboFilters::UpstreamResponseStatus::Complete;\n\n  ENVOY_LOG(debug, \"dubbo response: complete processing of upstream response messages, id is {}\",\n            metadata->requestId());\n}\n\nFilterStatus ActiveResponseDecoder::applyMessageEncodedFilters(MessageMetadataSharedPtr metadata,\n                                                               ContextSharedPtr ctx) {\n  parent_.encoder_filter_action_ = [metadata,\n                                    ctx](DubboFilters::EncoderFilter* filter) -> FilterStatus {\n    return filter->onMessageEncoded(metadata, ctx);\n  };\n\n  auto status = parent_.applyEncoderFilters(\n      nullptr, ActiveMessage::FilterIterationStartState::CanStartFromCurrent);\n  switch (status) {\n  case FilterStatus::StopIteration:\n    break;\n  case FilterStatus::Retry:\n    response_status_ = DubboFilters::UpstreamResponseStatus::Retry;\n    decoder_->reset();\n    break;\n  default:\n    ASSERT(FilterStatus::Continue == status);\n    break;\n  }\n\n  return status;\n}\n\n// class ActiveMessageFilterBase\nuint64_t ActiveMessageFilterBase::requestId() const { return parent_.requestId(); }\n\nuint64_t ActiveMessageFilterBase::streamId() const { return parent_.streamId(); }\n\nconst Network::Connection* ActiveMessageFilterBase::connection() const {\n  return parent_.connection();\n}\n\nRouter::RouteConstSharedPtr ActiveMessageFilterBase::route() { return parent_.route(); }\n\nSerializationType ActiveMessageFilterBase::serializationType() const {\n  return parent_.serializationType();\n}\n\nProtocolType ActiveMessageFilterBase::protocolType() const { return parent_.protocolType(); }\n\nEvent::Dispatcher& ActiveMessageFilterBase::dispatcher() { return parent_.dispatcher(); }\n\nvoid ActiveMessageFilterBase::resetStream() { parent_.resetStream(); }\n\nStreamInfo::StreamInfo& ActiveMessageFilterBase::streamInfo() { return parent_.streamInfo(); }\n\n// class ActiveMessageDecoderFilter\nActiveMessageDecoderFilter::ActiveMessageDecoderFilter(ActiveMessage& parent,\n                                                       DubboFilters::DecoderFilterSharedPtr filter,\n                                                       bool dual_filter)\n    : ActiveMessageFilterBase(parent, dual_filter), handle_(filter) {}\n\nvoid ActiveMessageDecoderFilter::continueDecoding() {\n  ASSERT(parent_.context());\n  auto state = ActiveMessage::FilterIterationStartState::AlwaysStartFromNext;\n  if (0 != parent_.context()->messageOriginData().length()) {\n    state = ActiveMessage::FilterIterationStartState::CanStartFromCurrent;\n    ENVOY_LOG(warn, \"The original message data is not consumed, triggering the decoder filter from \"\n                    \"the current location\");\n  }\n  const FilterStatus status = parent_.applyDecoderFilters(this, state);\n  if (status == FilterStatus::Continue) {\n    ENVOY_LOG(debug, \"dubbo response: start upstream\");\n    // All filters have been executed for the current decoder state.\n    if (parent_.pendingStreamDecoded()) {\n      // If the filter stack was paused during messageEnd, handle end-of-request details.\n      parent_.finalizeRequest();\n    }\n    parent_.continueDecoding();\n  }\n}\n\nvoid ActiveMessageDecoderFilter::sendLocalReply(const DubboFilters::DirectResponse& response,\n                                                bool end_stream) {\n  parent_.sendLocalReply(response, end_stream);\n}\n\nvoid ActiveMessageDecoderFilter::startUpstreamResponse() { parent_.startUpstreamResponse(); }\n\nDubboFilters::UpstreamResponseStatus\nActiveMessageDecoderFilter::upstreamData(Buffer::Instance& buffer) {\n  return parent_.upstreamData(buffer);\n}\n\nvoid ActiveMessageDecoderFilter::resetDownstreamConnection() {\n  parent_.resetDownstreamConnection();\n}\n\n// class ActiveMessageEncoderFilter\nActiveMessageEncoderFilter::ActiveMessageEncoderFilter(ActiveMessage& parent,\n                                                       DubboFilters::EncoderFilterSharedPtr filter,\n                                                       bool dual_filter)\n    : ActiveMessageFilterBase(parent, dual_filter), handle_(filter) {}\n\nvoid ActiveMessageEncoderFilter::continueEncoding() {\n  ASSERT(parent_.context());\n  auto state = ActiveMessage::FilterIterationStartState::AlwaysStartFromNext;\n  if (0 != parent_.context()->messageOriginData().length()) {\n    state = ActiveMessage::FilterIterationStartState::CanStartFromCurrent;\n    ENVOY_LOG(warn, \"The original message data is not consumed, triggering the encoder filter from \"\n                    \"the current location\");\n  }\n  const FilterStatus status = parent_.applyEncoderFilters(this, state);\n  if (FilterStatus::Continue == status) {\n    ENVOY_LOG(debug, \"All encoding filters have been executed\");\n  }\n}\n\n// class ActiveMessage\nActiveMessage::ActiveMessage(ConnectionManager& parent)\n    : parent_(parent), request_timer_(std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n                           parent_.stats().request_time_ms_, parent.timeSystem())),\n      request_id_(-1), stream_id_(parent.randomGenerator().random()),\n      stream_info_(parent.timeSystem()), pending_stream_decoded_(false),\n      local_response_sent_(false) {\n  parent_.stats().request_active_.inc();\n  stream_info_.setDownstreamLocalAddress(parent_.connection().localAddress());\n  stream_info_.setDownstreamRemoteAddress(parent_.connection().remoteAddress());\n  stream_info_.setDownstreamDirectRemoteAddress(parent_.connection().directRemoteAddress());\n}\n\nActiveMessage::~ActiveMessage() {\n  parent_.stats().request_active_.dec();\n  request_timer_->complete();\n  for (auto& filter : decoder_filters_) {\n    ENVOY_LOG(debug, \"destroy decoder filter\");\n    filter->handler()->onDestroy();\n  }\n\n  for (auto& filter : encoder_filters_) {\n    // Do not call on destroy twice for dual registered filters.\n    if (!filter->dual_filter_) {\n      ENVOY_LOG(debug, \"destroy encoder filter\");\n      filter->handler()->onDestroy();\n    }\n  }\n}\n\nstd::list<ActiveMessageEncoderFilterPtr>::iterator\nActiveMessage::commonEncodePrefix(ActiveMessageEncoderFilter* filter,\n                                  FilterIterationStartState state) {\n  // Only do base state setting on the initial call. Subsequent calls for filtering do not touch\n  // the base state.\n  if (filter == nullptr) {\n    // ASSERT(!state_.local_complete_);\n    // state_.local_complete_ = end_stream;\n    return encoder_filters_.begin();\n  }\n\n  if (state == FilterIterationStartState::CanStartFromCurrent) {\n    // The filter iteration has been stopped for all frame types, and now the iteration continues.\n    // The current filter's encoding callback has not be called. Call it now.\n    return filter->entry();\n  }\n  return std::next(filter->entry());\n}\n\nstd::list<ActiveMessageDecoderFilterPtr>::iterator\nActiveMessage::commonDecodePrefix(ActiveMessageDecoderFilter* filter,\n                                  FilterIterationStartState state) {\n  if (!filter) {\n    return decoder_filters_.begin();\n  }\n  if (state == FilterIterationStartState::CanStartFromCurrent) {\n    // The filter iteration has been stopped for all frame types, and now the iteration continues.\n    // The current filter's callback function has not been called. Call it now.\n    return filter->entry();\n  }\n  return std::next(filter->entry());\n}\n\nvoid ActiveMessage::onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) {\n  parent_.stats().request_decoding_success_.inc();\n\n  metadata_ = metadata;\n  context_ = ctx;\n  filter_action_ = [metadata, ctx](DubboFilters::DecoderFilter* filter) -> FilterStatus {\n    return filter->onMessageDecoded(metadata, ctx);\n  };\n\n  auto status = applyDecoderFilters(nullptr, FilterIterationStartState::CanStartFromCurrent);\n  if (status == FilterStatus::StopIteration) {\n    ENVOY_LOG(debug, \"dubbo request: stop calling decoder filter, id is {}\", metadata->requestId());\n    pending_stream_decoded_ = true;\n    return;\n  }\n\n  finalizeRequest();\n\n  ENVOY_LOG(debug, \"dubbo request: complete processing of downstream request messages, id is {}\",\n            metadata->requestId());\n}\n\nvoid ActiveMessage::finalizeRequest() {\n  pending_stream_decoded_ = false;\n  parent_.stats().request_.inc();\n  bool is_one_way = false;\n  switch (metadata_->messageType()) {\n  case MessageType::Request:\n    parent_.stats().request_twoway_.inc();\n    break;\n  case MessageType::Oneway:\n    parent_.stats().request_oneway_.inc();\n    is_one_way = true;\n    break;\n  default:\n    break;\n  }\n\n  if (local_response_sent_ || is_one_way) {\n    parent_.deferredMessage(*this);\n  }\n}\n\nvoid ActiveMessage::createFilterChain() {\n  parent_.config().filterFactory().createFilterChain(*this);\n}\n\nDubboProxy::Router::RouteConstSharedPtr ActiveMessage::route() {\n  if (cached_route_) {\n    return cached_route_.value();\n  }\n\n  if (metadata_ != nullptr) {\n    DubboProxy::Router::RouteConstSharedPtr route =\n        parent_.config().routerConfig().route(*metadata_, stream_id_);\n    cached_route_ = route;\n    return cached_route_.value();\n  }\n\n  return nullptr;\n}\n\nFilterStatus ActiveMessage::applyDecoderFilters(ActiveMessageDecoderFilter* filter,\n                                                FilterIterationStartState state) {\n  ASSERT(filter_action_ != nullptr);\n  if (!local_response_sent_) {\n    for (auto entry = commonDecodePrefix(filter, state); entry != decoder_filters_.end(); entry++) {\n      const FilterStatus status = filter_action_((*entry)->handler().get());\n      if (local_response_sent_) {\n        break;\n      }\n\n      if (status != FilterStatus::Continue) {\n        return status;\n      }\n    }\n  }\n\n  filter_action_ = nullptr;\n\n  return FilterStatus::Continue;\n}\n\nFilterStatus ActiveMessage::applyEncoderFilters(ActiveMessageEncoderFilter* filter,\n                                                FilterIterationStartState state) {\n  ASSERT(encoder_filter_action_ != nullptr);\n\n  if (!local_response_sent_) {\n    for (auto entry = commonEncodePrefix(filter, state); entry != encoder_filters_.end(); entry++) {\n      const FilterStatus status = encoder_filter_action_((*entry)->handler().get());\n      if (local_response_sent_) {\n        break;\n      }\n\n      if (status != FilterStatus::Continue) {\n        return status;\n      }\n    }\n  }\n\n  encoder_filter_action_ = nullptr;\n\n  return FilterStatus::Continue;\n}\n\nvoid ActiveMessage::sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) {\n  ASSERT(metadata_);\n  metadata_->setRequestId(request_id_);\n  parent_.sendLocalReply(*metadata_, response, end_stream);\n\n  if (end_stream) {\n    return;\n  }\n\n  local_response_sent_ = true;\n}\n\nvoid ActiveMessage::startUpstreamResponse() {\n  ENVOY_LOG(debug, \"dubbo response: start upstream\");\n\n  ASSERT(response_decoder_ == nullptr);\n\n  auto protocol =\n      NamedProtocolConfigFactory::getFactory(protocolType()).createProtocol(serializationType());\n\n  // Create a response message decoder.\n  response_decoder_ = std::make_unique<ActiveResponseDecoder>(\n      *this, parent_.stats(), parent_.connection(), std::move(protocol));\n}\n\nDubboFilters::UpstreamResponseStatus ActiveMessage::upstreamData(Buffer::Instance& buffer) {\n  ASSERT(response_decoder_ != nullptr);\n\n  try {\n    auto status = response_decoder_->onData(buffer);\n    if (status == DubboFilters::UpstreamResponseStatus::Complete) {\n      if (requestId() != response_decoder_->requestId()) {\n        throw EnvoyException(fmt::format(\"dubbo response: request ID is not equal, {}:{}\",\n                                         requestId(), response_decoder_->requestId()));\n      }\n\n      // Completed upstream response.\n      parent_.deferredMessage(*this);\n    } else if (status == DubboFilters::UpstreamResponseStatus::Retry) {\n      response_decoder_.reset();\n    }\n\n    return status;\n  } catch (const DownstreamConnectionCloseException& ex) {\n    ENVOY_CONN_LOG(error, \"dubbo response: exception ({})\", parent_.connection(), ex.what());\n    onReset();\n    parent_.stats().response_error_caused_connection_close_.inc();\n    return DubboFilters::UpstreamResponseStatus::Reset;\n  } catch (const EnvoyException& ex) {\n    ENVOY_CONN_LOG(error, \"dubbo response: exception ({})\", parent_.connection(), ex.what());\n    parent_.stats().response_decoding_error_.inc();\n\n    onError(ex.what());\n    return DubboFilters::UpstreamResponseStatus::Reset;\n  }\n}\n\nvoid ActiveMessage::resetDownstreamConnection() {\n  parent_.connection().close(Network::ConnectionCloseType::NoFlush);\n}\n\nvoid ActiveMessage::resetStream() { parent_.deferredMessage(*this); }\n\nuint64_t ActiveMessage::requestId() const {\n  return metadata_ != nullptr ? metadata_->requestId() : 0;\n}\n\nuint64_t ActiveMessage::streamId() const { return stream_id_; }\n\nvoid ActiveMessage::continueDecoding() { parent_.continueDecoding(); }\n\nSerializationType ActiveMessage::serializationType() const {\n  return parent_.downstreamSerializationType();\n}\n\nProtocolType ActiveMessage::protocolType() const { return parent_.downstreamProtocolType(); }\n\nStreamInfo::StreamInfo& ActiveMessage::streamInfo() { return stream_info_; }\n\nEvent::Dispatcher& ActiveMessage::dispatcher() { return parent_.connection().dispatcher(); }\n\nconst Network::Connection* ActiveMessage::connection() const { return &parent_.connection(); }\n\nvoid ActiveMessage::addDecoderFilter(DubboFilters::DecoderFilterSharedPtr filter) {\n  addDecoderFilterWorker(filter, false);\n}\n\nvoid ActiveMessage::addEncoderFilter(DubboFilters::EncoderFilterSharedPtr filter) {\n  addEncoderFilterWorker(filter, false);\n}\n\nvoid ActiveMessage::addFilter(DubboFilters::CodecFilterSharedPtr filter) {\n  addDecoderFilterWorker(filter, true);\n  addEncoderFilterWorker(filter, true);\n}\n\nvoid ActiveMessage::addDecoderFilterWorker(DubboFilters::DecoderFilterSharedPtr filter,\n                                           bool dual_filter) {\n  ActiveMessageDecoderFilterPtr wrapper =\n      std::make_unique<ActiveMessageDecoderFilter>(*this, filter, dual_filter);\n  filter->setDecoderFilterCallbacks(*wrapper);\n  LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_);\n}\nvoid ActiveMessage::addEncoderFilterWorker(DubboFilters::EncoderFilterSharedPtr filter,\n                                           bool dual_filter) {\n  ActiveMessageEncoderFilterPtr wrapper =\n      std::make_unique<ActiveMessageEncoderFilter>(*this, filter, dual_filter);\n  filter->setEncoderFilterCallbacks(*wrapper);\n  LinkedList::moveIntoListBack(std::move(wrapper), encoder_filters_);\n}\n\nvoid ActiveMessage::onReset() { parent_.deferredMessage(*this); }\n\nvoid ActiveMessage::onError(const std::string& what) {\n  if (!metadata_) {\n    // It's possible that an error occurred before the decoder generated metadata,\n    // and a metadata object needs to be created in order to generate a local reply.\n    metadata_ = std::make_shared<MessageMetadata>();\n  }\n\n  ASSERT(metadata_);\n  ENVOY_LOG(error, \"Bad response: {}\", what);\n  sendLocalReply(AppException(ResponseStatus::BadResponse, what), false);\n  parent_.deferredMessage(*this);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/active_message.h",
    "content": "#pragma once\n\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/decoder.h\"\n#include \"extensions/filters/network/dubbo_proxy/decoder_event_handler.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router.h\"\n#include \"extensions/filters/network/dubbo_proxy/stats.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass ConnectionManager;\nclass ActiveMessage;\n\nclass ActiveResponseDecoder : public ResponseDecoderCallbacks,\n                              public StreamHandler,\n                              Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  ActiveResponseDecoder(ActiveMessage& parent, DubboFilterStats& stats,\n                        Network::Connection& connection, ProtocolPtr&& protocol);\n  ~ActiveResponseDecoder() override = default;\n\n  DubboFilters::UpstreamResponseStatus onData(Buffer::Instance& data);\n\n  // StreamHandler\n  void onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) override;\n\n  // ResponseDecoderCallbacks\n  StreamHandler& newStream() override { return *this; }\n  void onHeartbeat(MessageMetadataSharedPtr) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  uint64_t requestId() const { return metadata_ ? metadata_->requestId() : 0; }\n\nprivate:\n  FilterStatus applyMessageEncodedFilters(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx);\n\n  ActiveMessage& parent_;\n  DubboFilterStats& stats_;\n  Network::Connection& response_connection_;\n  ProtocolPtr protocol_;\n  ResponseDecoderPtr decoder_;\n  MessageMetadataSharedPtr metadata_;\n  bool complete_ : 1;\n  DubboFilters::UpstreamResponseStatus response_status_;\n};\n\nusing ActiveResponseDecoderPtr = std::unique_ptr<ActiveResponseDecoder>;\n\nclass ActiveMessageFilterBase : public virtual DubboFilters::FilterCallbacksBase {\npublic:\n  ActiveMessageFilterBase(ActiveMessage& parent, bool dual_filter)\n      : parent_(parent), dual_filter_(dual_filter) {}\n  ~ActiveMessageFilterBase() override = default;\n\n  // DubboFilters::FilterCallbacksBase\n  uint64_t requestId() const override;\n  uint64_t streamId() const override;\n  const Network::Connection* connection() const override;\n  DubboProxy::Router::RouteConstSharedPtr route() override;\n  SerializationType serializationType() const override;\n  ProtocolType protocolType() const override;\n  StreamInfo::StreamInfo& streamInfo() override;\n  Event::Dispatcher& dispatcher() override;\n  void resetStream() override;\n\nprotected:\n  ActiveMessage& parent_;\n  const bool dual_filter_ : 1;\n};\n\n// Wraps a DecoderFilter and acts as the DecoderFilterCallbacks for the filter, enabling filter\n// chain continuation.\nclass ActiveMessageDecoderFilter : public DubboFilters::DecoderFilterCallbacks,\n                                   public ActiveMessageFilterBase,\n                                   public LinkedObject<ActiveMessageDecoderFilter>,\n                                   Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  ActiveMessageDecoderFilter(ActiveMessage& parent, DubboFilters::DecoderFilterSharedPtr filter,\n                             bool dual_filter);\n  ~ActiveMessageDecoderFilter() override = default;\n\n  void continueDecoding() override;\n  void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) override;\n  void startUpstreamResponse() override;\n  DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer) override;\n  void resetDownstreamConnection() override;\n\n  DubboFilters::DecoderFilterSharedPtr handler() { return handle_; }\n\nprivate:\n  DubboFilters::DecoderFilterSharedPtr handle_;\n};\n\nusing ActiveMessageDecoderFilterPtr = std::unique_ptr<ActiveMessageDecoderFilter>;\n\n// Wraps a EncoderFilter and acts as the EncoderFilterCallbacks for the filter, enabling filter\n// chain continuation.\nclass ActiveMessageEncoderFilter : public ActiveMessageFilterBase,\n                                   public DubboFilters::EncoderFilterCallbacks,\n                                   public LinkedObject<ActiveMessageEncoderFilter>,\n                                   Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  ActiveMessageEncoderFilter(ActiveMessage& parent, DubboFilters::EncoderFilterSharedPtr filter,\n                             bool dual_filter);\n  ~ActiveMessageEncoderFilter() override = default;\n\n  void continueEncoding() override;\n  DubboFilters::EncoderFilterSharedPtr handler() { return handle_; }\n\nprivate:\n  DubboFilters::EncoderFilterSharedPtr handle_;\n\n  friend class ActiveMessage;\n};\n\nusing ActiveMessageEncoderFilterPtr = std::unique_ptr<ActiveMessageEncoderFilter>;\n\n// ActiveMessage tracks downstream requests for which no response has been received.\nclass ActiveMessage : public LinkedObject<ActiveMessage>,\n                      public Event::DeferredDeletable,\n                      public StreamHandler,\n                      public DubboFilters::DecoderFilterCallbacks,\n                      public DubboFilters::FilterChainFactoryCallbacks,\n                      Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  ActiveMessage(ConnectionManager& parent);\n  ~ActiveMessage() override;\n\n  // Indicates which filter to start the iteration with.\n  enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent };\n\n  // Returns the encoder filter to start iteration with.\n  std::list<ActiveMessageEncoderFilterPtr>::iterator\n  commonEncodePrefix(ActiveMessageEncoderFilter* filter, FilterIterationStartState state);\n  // Returns the decoder filter to start iteration with.\n  std::list<ActiveMessageDecoderFilterPtr>::iterator\n  commonDecodePrefix(ActiveMessageDecoderFilter* filter, FilterIterationStartState state);\n\n  // Dubbo::FilterChainFactoryCallbacks\n  void addDecoderFilter(DubboFilters::DecoderFilterSharedPtr filter) override;\n  void addEncoderFilter(DubboFilters::EncoderFilterSharedPtr filter) override;\n  void addFilter(DubboFilters::CodecFilterSharedPtr filter) override;\n\n  // StreamHandler\n  void onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) override;\n\n  // DubboFilters::DecoderFilterCallbacks\n  uint64_t requestId() const override;\n  uint64_t streamId() const override;\n  const Network::Connection* connection() const override;\n  void continueDecoding() override;\n  SerializationType serializationType() const override;\n  ProtocolType protocolType() const override;\n  StreamInfo::StreamInfo& streamInfo() override;\n  Router::RouteConstSharedPtr route() override;\n  void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) override;\n  void startUpstreamResponse() override;\n  DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer) override;\n  void resetDownstreamConnection() override;\n  Event::Dispatcher& dispatcher() override;\n  void resetStream() override;\n\n  void createFilterChain();\n  FilterStatus applyDecoderFilters(ActiveMessageDecoderFilter* filter,\n                                   FilterIterationStartState state);\n  FilterStatus applyEncoderFilters(ActiveMessageEncoderFilter* filter,\n                                   FilterIterationStartState state);\n  void finalizeRequest();\n  void onReset();\n  void onError(const std::string& what);\n  MessageMetadataSharedPtr metadata() const { return metadata_; }\n  ContextSharedPtr context() const { return context_; }\n  bool pendingStreamDecoded() const { return pending_stream_decoded_; }\n\nprivate:\n  void addDecoderFilterWorker(DubboFilters::DecoderFilterSharedPtr filter, bool dual_filter);\n  void addEncoderFilterWorker(DubboFilters::EncoderFilterSharedPtr, bool dual_filter);\n\n  ConnectionManager& parent_;\n\n  ContextSharedPtr context_;\n  MessageMetadataSharedPtr metadata_;\n  Stats::TimespanPtr request_timer_;\n  ActiveResponseDecoderPtr response_decoder_;\n\n  absl::optional<Router::RouteConstSharedPtr> cached_route_;\n\n  std::list<ActiveMessageDecoderFilterPtr> decoder_filters_;\n  std::function<FilterStatus(DubboFilters::DecoderFilter*)> filter_action_;\n\n  std::list<ActiveMessageEncoderFilterPtr> encoder_filters_;\n  std::function<FilterStatus(DubboFilters::EncoderFilter*)> encoder_filter_action_;\n\n  int32_t request_id_;\n\n  // This value is used in the calculation of the weighted cluster.\n  uint64_t stream_id_;\n  StreamInfo::StreamInfoImpl stream_info_;\n\n  Buffer::OwnedImpl response_buffer_;\n\n  bool pending_stream_decoded_ : 1;\n  bool local_response_sent_ : 1;\n\n  friend class ActiveResponseDecoder;\n};\n\nusing ActiveMessagePtr = std::unique_ptr<ActiveMessage>;\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/app_exception.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/app_exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nDownstreamConnectionCloseException::DownstreamConnectionCloseException(const std::string& what)\n    : EnvoyException(what) {}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/app_exception.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nusing ResponseType = DubboFilters::DirectResponse::ResponseType;\n\ntemplate <typename T = ResponseStatus>\nstruct AppExceptionBase : public EnvoyException,\n                          public DubboFilters::DirectResponse,\n                          Logger::Loggable<Logger::Id::dubbo> {\n  AppExceptionBase(const AppExceptionBase& ex) = default;\n  AppExceptionBase(T status, const std::string& what)\n      : EnvoyException(what), status_(status),\n        response_type_(RpcResponseType::ResponseWithException) {}\n\n  ResponseType encode(MessageMetadata& metadata, DubboProxy::Protocol& protocol,\n                      Buffer::Instance& buffer) const override {\n    ASSERT(buffer.length() == 0);\n\n    ENVOY_LOG(debug, \"Exception information: {}\", what());\n\n    metadata.setResponseStatus<T>(status_);\n    metadata.setMessageType(MessageType::Response);\n    if (!protocol.encode(buffer, metadata, what(), response_type_)) {\n      ExceptionUtil::throwEnvoyException(\"Failed to encode local reply message\");\n    }\n\n    return ResponseType::Exception;\n  }\n\n  const T status_;\n  const RpcResponseType response_type_;\n};\n\nusing AppException = AppExceptionBase<>;\n\nstruct DownstreamConnectionCloseException : public EnvoyException {\n  DownstreamConnectionCloseException(const std::string& what);\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/buffer_helper.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/buffer_helper.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\ndouble BufferHelper::peekDouble(Buffer::Instance& buffer, uint64_t offset) {\n  if (buffer.length() < offset + 8) {\n    throw EnvoyException(\"buffer underflow\");\n  }\n  double i;\n  uint64_t j = buffer.peekBEInt<uint64_t>(offset);\n  std::memcpy(&i, &j, 8);\n  return i;\n}\n\nfloat BufferHelper::peekFloat(Buffer::Instance& buffer, uint64_t offset) {\n  if (buffer.length() < offset + 4) {\n    throw EnvoyException(\"buffer underflow\");\n  }\n  float i;\n  uint32_t j = buffer.peekBEInt<uint32_t>(offset);\n  std::memcpy(&i, &j, 4);\n  return i;\n}\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/buffer_helper.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/**\n * BufferHelper provides buffer operations for reading bytes and numbers in the various encodings\n * used by protocols.\n */\nclass BufferHelper {\npublic:\n  /**\n   * Reads an double from the buffer at the given offset.\n   * @param buffer Buffer::Instance containing data to decode\n   * @param offset offset into buffer to peek at\n   * @return the double at offset in buffer\n   */\n  static double peekDouble(Buffer::Instance& buffer, uint64_t offset = 0);\n\n  /**\n   * Reads an float from the buffer at the given offset.\n   * @param buffer Buffer::Instance containing data to decode\n   * @param offset offset into buffer to peek at\n   * @return the float at offset in buffer\n   */\n  static float peekFloat(Buffer::Instance& buffer, uint64_t offset = 0);\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/config.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/config.h\"\n\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/conn_manager.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/factory_base.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/well_known_names.h\"\n#include \"extensions/filters/network/dubbo_proxy/stats.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nNetwork::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  std::shared_ptr<Config> filter_config(std::make_shared<ConfigImpl>(proto_config, context));\n\n  return [filter_config, &context](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<ConnectionManager>(\n        *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource()));\n  };\n}\n\n/**\n * Static registration for the dubbo filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(DubboProxyFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\nclass ProtocolTypeMapper {\npublic:\n  using ConfigProtocolType = envoy::extensions::filters::network::dubbo_proxy::v3::ProtocolType;\n  using ProtocolTypeMap = absl::flat_hash_map<ConfigProtocolType, ProtocolType>;\n\n  static ProtocolType lookupProtocolType(ConfigProtocolType config_type) {\n    const auto& iter = protocolTypeMap().find(config_type);\n    ASSERT(iter != protocolTypeMap().end());\n    return iter->second;\n  }\n\nprivate:\n  static const ProtocolTypeMap& protocolTypeMap() {\n    CONSTRUCT_ON_FIRST_USE(ProtocolTypeMap, {\n                                                {ConfigProtocolType::Dubbo, ProtocolType::Dubbo},\n                                            });\n  }\n};\n\nclass SerializationTypeMapper {\npublic:\n  using ConfigSerializationType =\n      envoy::extensions::filters::network::dubbo_proxy::v3::SerializationType;\n  using SerializationTypeMap = absl::flat_hash_map<ConfigSerializationType, SerializationType>;\n\n  static SerializationType lookupSerializationType(ConfigSerializationType type) {\n    const auto& iter = serializationTypeMap().find(type);\n    ASSERT(iter != serializationTypeMap().end());\n    return iter->second;\n  }\n\nprivate:\n  static const SerializationTypeMap& serializationTypeMap() {\n    CONSTRUCT_ON_FIRST_USE(SerializationTypeMap,\n                           {\n                               {ConfigSerializationType::Hessian2, SerializationType::Hessian2},\n                           });\n  }\n};\n\nclass RouteMatcherTypeMapper {\npublic:\n  using ConfigProtocolType = envoy::extensions::filters::network::dubbo_proxy::v3::ProtocolType;\n  using RouteMatcherTypeMap = absl::flat_hash_map<ConfigProtocolType, Router::RouteMatcherType>;\n\n  static Router::RouteMatcherType lookupRouteMatcherType(ConfigProtocolType type) {\n    const auto& iter = routeMatcherTypeMap().find(type);\n    ASSERT(iter != routeMatcherTypeMap().end());\n    return iter->second;\n  }\n\nprivate:\n  static const RouteMatcherTypeMap& routeMatcherTypeMap() {\n    CONSTRUCT_ON_FIRST_USE(RouteMatcherTypeMap,\n                           {\n                               {ConfigProtocolType::Dubbo, Router::RouteMatcherType::Default},\n                           });\n  }\n};\n\n// class ConfigImpl.\nConfigImpl::ConfigImpl(const DubboProxyConfig& config,\n                       Server::Configuration::FactoryContext& context)\n    : context_(context), stats_prefix_(fmt::format(\"dubbo.{}.\", config.stat_prefix())),\n      stats_(DubboFilterStats::generateStats(stats_prefix_, context_.scope())),\n      serialization_type_(\n          SerializationTypeMapper::lookupSerializationType(config.serialization_type())),\n      protocol_type_(ProtocolTypeMapper::lookupProtocolType(config.protocol_type())) {\n  auto type = RouteMatcherTypeMapper::lookupRouteMatcherType(config.protocol_type());\n  route_matcher_ = Router::NamedRouteMatcherConfigFactory::getFactory(type).createRouteMatcher(\n      config.route_config(), context);\n  if (config.dubbo_filters().empty()) {\n    ENVOY_LOG(debug, \"using default router filter\");\n\n    envoy::extensions::filters::network::dubbo_proxy::v3::DubboFilter router_config;\n    router_config.set_name(DubboFilters::DubboFilterNames::get().ROUTER);\n    registerFilter(router_config);\n  } else {\n    for (const auto& filter_config : config.dubbo_filters()) {\n      registerFilter(filter_config);\n    }\n  }\n}\n\nvoid ConfigImpl::createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) {\n  for (const DubboFilters::FilterFactoryCb& factory : filter_factories_) {\n    factory(callbacks);\n  }\n}\n\nRouter::RouteConstSharedPtr ConfigImpl::route(const MessageMetadata& metadata,\n                                              uint64_t random_value) const {\n  return route_matcher_->route(metadata, random_value);\n}\n\nProtocolPtr ConfigImpl::createProtocol() {\n  return NamedProtocolConfigFactory::getFactory(protocol_type_).createProtocol(serialization_type_);\n}\n\nvoid ConfigImpl::registerFilter(const DubboFilterConfig& proto_config) {\n  const auto& string_name = proto_config.name();\n  ENVOY_LOG(debug, \"    dubbo filter #{}\", filter_factories_.size());\n  ENVOY_LOG(debug, \"      name: {}\", string_name);\n  ENVOY_LOG(debug, \"    config: {}\",\n            MessageUtil::getJsonStringFromMessage(proto_config.config(), true));\n\n  auto& factory =\n      Envoy::Config::Utility::getAndCheckFactoryByName<DubboFilters::NamedDubboFilterConfigFactory>(\n          string_name);\n  ProtobufTypes::MessagePtr message = factory.createEmptyConfigProto();\n  Envoy::Config::Utility::translateOpaqueConfig(proto_config.config(),\n                                                ProtobufWkt::Struct::default_instance(),\n                                                context_.messageValidationVisitor(), *message);\n  DubboFilters::FilterFactoryCb callback =\n      factory.createFilterFactoryFromProto(*message, stats_prefix_, context_);\n\n  filter_factories_.push_back(callback);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/dubbo_proxy/conn_manager.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/route_matcher.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router_impl.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/**\n * Config registration for the dubbo proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nclass DubboProxyFilterConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy> {\npublic:\n  DubboProxyFilterConfigFactory() : FactoryBase(NetworkFilterNames::get().DubboProxy, true) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\nclass ConfigImpl : public Config,\n                   public Router::Config,\n                   public DubboFilters::FilterChainFactory,\n                   Logger::Loggable<Logger::Id::config> {\npublic:\n  using DubboProxyConfig = envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy;\n  using DubboFilterConfig = envoy::extensions::filters::network::dubbo_proxy::v3::DubboFilter;\n\n  ConfigImpl(const DubboProxyConfig& config, Server::Configuration::FactoryContext& context);\n  ~ConfigImpl() override = default;\n\n  // DubboFilters::FilterChainFactory\n  void createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) override;\n\n  // Router::Config\n  Router::RouteConstSharedPtr route(const MessageMetadata& metadata,\n                                    uint64_t random_value) const override;\n\n  // Config\n  DubboFilterStats& stats() override { return stats_; }\n  DubboFilters::FilterChainFactory& filterFactory() override { return *this; }\n  Router::Config& routerConfig() override { return *this; }\n  ProtocolPtr createProtocol() override;\n\nprivate:\n  void registerFilter(const DubboFilterConfig& proto_config);\n\n  Server::Configuration::FactoryContext& context_;\n  const std::string stats_prefix_;\n  DubboFilterStats stats_;\n  const SerializationType serialization_type_;\n  const ProtocolType protocol_type_;\n  Router::RouteMatcherPtr route_matcher_;\n\n  std::list<DubboFilters::FilterFactoryCb> filter_factories_;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/conn_manager.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/conn_manager.h\"\n\n#include <cstdint>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/app_exception.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/heartbeat_response.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nconstexpr uint32_t BufferLimit = UINT32_MAX;\n\nConnectionManager::ConnectionManager(Config& config, Random::RandomGenerator& random_generator,\n                                     TimeSource& time_system)\n    : config_(config), time_system_(time_system), stats_(config_.stats()),\n      random_generator_(random_generator), protocol_(config.createProtocol()),\n      decoder_(std::make_unique<RequestDecoder>(*protocol_, *this)) {}\n\nNetwork::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_LOG(trace, \"dubbo: read {} bytes\", data.length());\n  request_buffer_.move(data);\n  dispatch();\n\n  if (end_stream) {\n    ENVOY_CONN_LOG(trace, \"downstream half-closed\", read_callbacks_->connection());\n\n    // Downstream has closed. Unless we're waiting for an upstream connection to complete a oneway\n    // request, close. The special case for oneway requests allows them to complete before the\n    // ConnectionManager is destroyed.\n    if (stopped_) {\n      ASSERT(!active_message_list_.empty());\n      auto metadata = (*active_message_list_.begin())->metadata();\n      if (metadata && metadata->messageType() == MessageType::Oneway) {\n        ENVOY_CONN_LOG(trace, \"waiting for one-way completion\", read_callbacks_->connection());\n        half_closed_ = true;\n        return Network::FilterStatus::StopIteration;\n      }\n    }\n\n    ENVOY_LOG(debug, \"dubbo: end data processing\");\n    resetAllMessages(false);\n    read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n\n  return Network::FilterStatus::StopIteration;\n}\n\nNetwork::FilterStatus ConnectionManager::onNewConnection() {\n  return Network::FilterStatus::Continue;\n}\n\nvoid ConnectionManager::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  read_callbacks_ = &callbacks;\n  read_callbacks_->connection().addConnectionCallbacks(*this);\n  read_callbacks_->connection().enableHalfClose(true);\n  read_callbacks_->connection().setBufferLimits(BufferLimit);\n}\n\nvoid ConnectionManager::onEvent(Network::ConnectionEvent event) {\n  resetAllMessages(event == Network::ConnectionEvent::LocalClose);\n}\n\nvoid ConnectionManager::onAboveWriteBufferHighWatermark() {\n  ENVOY_CONN_LOG(debug, \"onAboveWriteBufferHighWatermark\", read_callbacks_->connection());\n  read_callbacks_->connection().readDisable(true);\n}\n\nvoid ConnectionManager::onBelowWriteBufferLowWatermark() {\n  ENVOY_CONN_LOG(debug, \"onBelowWriteBufferLowWatermark\", read_callbacks_->connection());\n  read_callbacks_->connection().readDisable(false);\n}\n\nStreamHandler& ConnectionManager::newStream() {\n  ENVOY_LOG(debug, \"dubbo: create the new decoder event handler\");\n\n  ActiveMessagePtr new_message(std::make_unique<ActiveMessage>(*this));\n  new_message->createFilterChain();\n  LinkedList::moveIntoList(std::move(new_message), active_message_list_);\n  return **active_message_list_.begin();\n}\n\nvoid ConnectionManager::onHeartbeat(MessageMetadataSharedPtr metadata) {\n  stats_.request_event_.inc();\n\n  if (read_callbacks_->connection().state() != Network::Connection::State::Open) {\n    ENVOY_LOG(warn, \"dubbo: downstream connection is closed or closing\");\n    return;\n  }\n\n  metadata->setResponseStatus(ResponseStatus::Ok);\n  metadata->setMessageType(MessageType::HeartbeatResponse);\n\n  HeartbeatResponse heartbeat;\n  Buffer::OwnedImpl response_buffer;\n  heartbeat.encode(*metadata, *protocol_, response_buffer);\n\n  read_callbacks_->connection().write(response_buffer, false);\n}\n\nvoid ConnectionManager::dispatch() {\n  if (0 == request_buffer_.length()) {\n    ENVOY_LOG(warn, \"dubbo: it's empty data\");\n    return;\n  }\n\n  if (stopped_) {\n    ENVOY_CONN_LOG(debug, \"dubbo: dubbo filter stopped\", read_callbacks_->connection());\n    return;\n  }\n\n  try {\n    bool underflow = false;\n    while (!underflow) {\n      decoder_->onData(request_buffer_, underflow);\n    }\n    return;\n  } catch (const EnvoyException& ex) {\n    ENVOY_CONN_LOG(error, \"dubbo error: {}\", read_callbacks_->connection(), ex.what());\n    read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    stats_.request_decoding_error_.inc();\n  }\n  resetAllMessages(true);\n}\n\nvoid ConnectionManager::sendLocalReply(MessageMetadata& metadata,\n                                       const DubboFilters::DirectResponse& response,\n                                       bool end_stream) {\n  if (read_callbacks_->connection().state() != Network::Connection::State::Open) {\n    return;\n  }\n\n  DubboFilters::DirectResponse::ResponseType result =\n      DubboFilters::DirectResponse::ResponseType::ErrorReply;\n\n  try {\n    Buffer::OwnedImpl buffer;\n    result = response.encode(metadata, *protocol_, buffer);\n    read_callbacks_->connection().write(buffer, end_stream);\n  } catch (const EnvoyException& ex) {\n    ENVOY_CONN_LOG(error, \"dubbo error: {}\", read_callbacks_->connection(), ex.what());\n  }\n\n  if (end_stream) {\n    read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n\n  switch (result) {\n  case DubboFilters::DirectResponse::ResponseType::SuccessReply:\n    stats_.local_response_success_.inc();\n    break;\n  case DubboFilters::DirectResponse::ResponseType::ErrorReply:\n    stats_.local_response_error_.inc();\n    break;\n  case DubboFilters::DirectResponse::ResponseType::Exception:\n    stats_.local_response_business_exception_.inc();\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid ConnectionManager::continueDecoding() {\n  ENVOY_CONN_LOG(debug, \"dubbo filter continued\", read_callbacks_->connection());\n  stopped_ = false;\n  dispatch();\n\n  if (!stopped_ && half_closed_) {\n    // If we're half closed, but not stopped waiting for an upstream,\n    // reset any pending rpcs and close the connection.\n    resetAllMessages(false);\n    read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n}\n\nvoid ConnectionManager::deferredMessage(ActiveMessage& message) {\n  if (!message.inserted()) {\n    return;\n  }\n  read_callbacks_->connection().dispatcher().deferredDelete(\n      message.removeFromList(active_message_list_));\n}\n\nvoid ConnectionManager::resetAllMessages(bool local_reset) {\n  while (!active_message_list_.empty()) {\n    if (local_reset) {\n      ENVOY_CONN_LOG(debug, \"local close with active request\", read_callbacks_->connection());\n      stats_.cx_destroy_local_with_active_rq_.inc();\n    } else {\n      ENVOY_CONN_LOG(debug, \"remote close with active request\", read_callbacks_->connection());\n      stats_.cx_destroy_remote_with_active_rq_.inc();\n    }\n\n    active_message_list_.front()->onReset();\n  }\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/conn_manager.h",
    "content": "#pragma once\n\n#include \"envoy/common/time.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/active_message.h\"\n#include \"extensions/filters/network/dubbo_proxy/decoder.h\"\n#include \"extensions/filters/network/dubbo_proxy/decoder_event_handler.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n#include \"extensions/filters/network/dubbo_proxy/stats.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/**\n * Config is a configuration interface for ConnectionManager.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  virtual DubboFilters::FilterChainFactory& filterFactory() PURE;\n  virtual DubboFilterStats& stats() PURE;\n  virtual ProtocolPtr createProtocol() PURE;\n  virtual Router::Config& routerConfig() PURE;\n};\n\n// class ActiveMessagePtr;\nclass ConnectionManager : public Network::ReadFilter,\n                          public Network::ConnectionCallbacks,\n                          public RequestDecoderCallbacks,\n                          Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  using ConfigProtocolType = envoy::extensions::filters::network::dubbo_proxy::v3::ProtocolType;\n  using ConfigSerializationType =\n      envoy::extensions::filters::network::dubbo_proxy::v3::SerializationType;\n\n  ConnectionManager(Config& config, Random::RandomGenerator& random_generator,\n                    TimeSource& time_system);\n  ~ConnectionManager() override = default;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override;\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent) override;\n  void onAboveWriteBufferHighWatermark() override;\n  void onBelowWriteBufferLowWatermark() override;\n\n  // RequestDecoderCallbacks\n  StreamHandler& newStream() override;\n  void onHeartbeat(MessageMetadataSharedPtr metadata) override;\n\n  DubboFilterStats& stats() const { return stats_; }\n  Network::Connection& connection() const { return read_callbacks_->connection(); }\n  TimeSource& timeSystem() const { return time_system_; }\n  Random::RandomGenerator& randomGenerator() const { return random_generator_; }\n  Config& config() const { return config_; }\n  SerializationType downstreamSerializationType() const { return protocol_->serializer()->type(); }\n  ProtocolType downstreamProtocolType() const { return protocol_->type(); }\n\n  void continueDecoding();\n  void deferredMessage(ActiveMessage& message);\n  void sendLocalReply(MessageMetadata& metadata, const DubboFilters::DirectResponse& response,\n                      bool end_stream);\n\n  // This function is for testing only.\n  std::list<ActiveMessagePtr>& getActiveMessagesForTest() { return active_message_list_; }\n\nprivate:\n  void dispatch();\n  void resetAllMessages(bool local_reset);\n\n  Buffer::OwnedImpl request_buffer_;\n  std::list<ActiveMessagePtr> active_message_list_;\n\n  bool stopped_{false};\n  bool half_closed_{false};\n\n  Config& config_;\n  TimeSource& time_system_;\n  DubboFilterStats& stats_;\n  Random::RandomGenerator& random_generator_;\n\n  SerializerPtr serializer_;\n  ProtocolPtr protocol_;\n  RequestDecoderPtr decoder_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/decoder.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/decoder.h\"\n\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nDecoderStateMachine::DecoderStatus\nDecoderStateMachine::onDecodeStreamHeader(Buffer::Instance& buffer) {\n  ASSERT(!active_stream_);\n\n  auto metadata = std::make_shared<MessageMetadata>();\n  auto ret = protocol_.decodeHeader(buffer, metadata);\n  if (!ret.second) {\n    ENVOY_LOG(debug, \"dubbo decoder: need more data for {} protocol\", protocol_.name());\n    return {ProtocolState::WaitForData};\n  }\n\n  auto context = ret.first;\n  if (metadata->messageType() == MessageType::HeartbeatRequest ||\n      metadata->messageType() == MessageType::HeartbeatResponse) {\n    if (buffer.length() < (context->headerSize() + context->bodySize())) {\n      ENVOY_LOG(debug, \"dubbo decoder: need more data for {} protocol heartbeat\", protocol_.name());\n      return {ProtocolState::WaitForData};\n    }\n\n    ENVOY_LOG(debug, \"dubbo decoder: this is the {} heartbeat message\", protocol_.name());\n    buffer.drain(context->headerSize() + context->bodySize());\n    delegate_.onHeartbeat(metadata);\n    return {ProtocolState::Done};\n  }\n\n  active_stream_ = delegate_.newStream(metadata, context);\n  ASSERT(active_stream_);\n  context->messageOriginData().move(buffer, context->headerSize());\n\n  return {ProtocolState::OnDecodeStreamData};\n}\n\nDecoderStateMachine::DecoderStatus\nDecoderStateMachine::onDecodeStreamData(Buffer::Instance& buffer) {\n  ASSERT(active_stream_);\n\n  if (!protocol_.decodeData(buffer, active_stream_->context_, active_stream_->metadata_)) {\n    ENVOY_LOG(debug, \"dubbo decoder: need more data for {} serialization, current size {}\",\n              protocol_.serializer()->name(), buffer.length());\n    return {ProtocolState::WaitForData};\n  }\n\n  active_stream_->context_->messageOriginData().move(buffer, active_stream_->context_->bodySize());\n  active_stream_->onStreamDecoded();\n  active_stream_ = nullptr;\n\n  ENVOY_LOG(debug, \"dubbo decoder: ends the deserialization of the message\");\n  return {ProtocolState::Done};\n}\n\nDecoderStateMachine::DecoderStatus DecoderStateMachine::handleState(Buffer::Instance& buffer) {\n  switch (state_) {\n  case ProtocolState::OnDecodeStreamHeader:\n    return onDecodeStreamHeader(buffer);\n  case ProtocolState::OnDecodeStreamData:\n    return onDecodeStreamData(buffer);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nProtocolState DecoderStateMachine::run(Buffer::Instance& buffer) {\n  while (state_ != ProtocolState::Done) {\n    ENVOY_LOG(trace, \"dubbo decoder: state {}, {} bytes available\",\n              ProtocolStateNameValues::name(state_), buffer.length());\n\n    DecoderStatus s = handleState(buffer);\n    if (s.next_state_ == ProtocolState::WaitForData) {\n      return ProtocolState::WaitForData;\n    }\n\n    state_ = s.next_state_;\n  }\n\n  return state_;\n}\n\nusing DecoderStateMachinePtr = std::unique_ptr<DecoderStateMachine>;\n\nDecoderBase::DecoderBase(Protocol& protocol) : protocol_(protocol) {}\n\nDecoderBase::~DecoderBase() { complete(); }\n\nFilterStatus DecoderBase::onData(Buffer::Instance& data, bool& buffer_underflow) {\n  ENVOY_LOG(debug, \"dubbo decoder: {} bytes available\", data.length());\n  buffer_underflow = false;\n\n  if (!decode_started_) {\n    start();\n  }\n\n  ASSERT(state_machine_ != nullptr);\n\n  ENVOY_LOG(debug, \"dubbo decoder: protocol {}, state {}, {} bytes available\", protocol_.name(),\n            ProtocolStateNameValues::name(state_machine_->currentState()), data.length());\n\n  ProtocolState rv = state_machine_->run(data);\n  switch (rv) {\n  case ProtocolState::WaitForData:\n    ENVOY_LOG(debug, \"dubbo decoder: wait for data\");\n    buffer_underflow = true;\n    return FilterStatus::Continue;\n  default:\n    break;\n  }\n\n  ASSERT(rv == ProtocolState::Done);\n\n  complete();\n  buffer_underflow = (data.length() == 0);\n  ENVOY_LOG(debug, \"dubbo decoder: data length {}\", data.length());\n  return FilterStatus::Continue;\n}\n\nvoid DecoderBase::start() {\n  state_machine_ = std::make_unique<DecoderStateMachine>(protocol_, *this);\n  decode_started_ = true;\n}\n\nvoid DecoderBase::complete() {\n  state_machine_.reset();\n  stream_.reset();\n  decode_started_ = false;\n}\n\nvoid DecoderBase::reset() { complete(); }\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/decoder.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/decoder_event_handler.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n#define ALL_PROTOCOL_STATES(FUNCTION)                                                              \\\n  FUNCTION(StopIteration)                                                                          \\\n  FUNCTION(WaitForData)                                                                            \\\n  FUNCTION(OnDecodeStreamHeader)                                                                   \\\n  FUNCTION(OnDecodeStreamData)                                                                     \\\n  FUNCTION(Done)\n\n/**\n * ProtocolState represents a set of states used in a state machine to decode Dubbo requests\n * and responses.\n */\nenum class ProtocolState { ALL_PROTOCOL_STATES(GENERATE_ENUM) };\n\nclass ProtocolStateNameValues {\npublic:\n  static const std::string& name(ProtocolState state) {\n    size_t i = static_cast<size_t>(state);\n    ASSERT(i < names().size());\n    return names()[i];\n  }\n\nprivate:\n  static const std::vector<std::string>& names() {\n    CONSTRUCT_ON_FIRST_USE(std::vector<std::string>, {ALL_PROTOCOL_STATES(GENERATE_STRING)});\n  }\n};\n\nstruct ActiveStream {\n  ActiveStream(StreamHandler& handler, MessageMetadataSharedPtr metadata, ContextSharedPtr context)\n      : handler_(handler), metadata_(metadata), context_(context) {}\n  ~ActiveStream() {\n    metadata_.reset();\n    context_.reset();\n  }\n\n  void onStreamDecoded() {\n    ASSERT(metadata_ && context_);\n    handler_.onStreamDecoded(metadata_, context_);\n  }\n\n  StreamHandler& handler_;\n  MessageMetadataSharedPtr metadata_;\n  ContextSharedPtr context_;\n};\n\nusing ActiveStreamPtr = std::unique_ptr<ActiveStream>;\n\nclass DecoderStateMachine : public Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  class Delegate {\n  public:\n    virtual ~Delegate() = default;\n    virtual ActiveStream* newStream(MessageMetadataSharedPtr metadata,\n                                    ContextSharedPtr context) PURE;\n    virtual void onHeartbeat(MessageMetadataSharedPtr metadata) PURE;\n  };\n\n  DecoderStateMachine(Protocol& protocol, Delegate& delegate)\n      : protocol_(protocol), delegate_(delegate), state_(ProtocolState::OnDecodeStreamHeader) {}\n\n  /**\n   * Consumes as much data from the configured Buffer as possible and executes the decoding state\n   * machine. Returns ProtocolState::WaitForData if more data is required to complete processing of\n   * a message. Returns ProtocolState::Done when the end of a message is successfully processed.\n   * Once the Done state is reached, further invocations of run return immediately with Done.\n   *\n   * @param buffer a buffer containing the remaining data to be processed\n   * @return ProtocolState returns with ProtocolState::WaitForData or ProtocolState::Done\n   * @throw Envoy Exception if thrown by the underlying Protocol\n   */\n  ProtocolState run(Buffer::Instance& buffer);\n\n  /**\n   * @return the current ProtocolState\n   */\n  ProtocolState currentState() const { return state_; }\n\nprivate:\n  struct DecoderStatus {\n    DecoderStatus() = default;\n    DecoderStatus(ProtocolState next_state) : next_state_(next_state){};\n    DecoderStatus(ProtocolState next_state, FilterStatus filter_status)\n        : next_state_(next_state), filter_status_(filter_status){};\n\n    ProtocolState next_state_;\n    absl::optional<FilterStatus> filter_status_;\n  };\n\n  // These functions map directly to the matching ProtocolState values. Each returns the next state\n  // or ProtocolState::WaitForData if more data is required.\n  DecoderStatus onDecodeStreamHeader(Buffer::Instance& buffer);\n  DecoderStatus onDecodeStreamData(Buffer::Instance& buffer);\n\n  // handleState delegates to the appropriate method based on state_.\n  DecoderStatus handleState(Buffer::Instance& buffer);\n\n  Protocol& protocol_;\n  Delegate& delegate_;\n\n  ProtocolState state_;\n  ActiveStream* active_stream_{nullptr};\n};\n\nusing DecoderStateMachinePtr = std::unique_ptr<DecoderStateMachine>;\n\nclass DecoderBase : public DecoderStateMachine::Delegate,\n                    public Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  DecoderBase(Protocol& protocol);\n  ~DecoderBase() override;\n\n  /**\n   * Drains data from the given buffer\n   *\n   * @param data a Buffer containing Dubbo protocol data\n   * @throw EnvoyException on Dubbo protocol errors\n   */\n  FilterStatus onData(Buffer::Instance& data, bool& buffer_underflow);\n\n  const Protocol& protocol() { return protocol_; }\n\n  // It is assumed that all of the protocol parsing are stateless,\n  // if there is a state of the need to provide the reset interface call here.\n  void reset();\n\nprotected:\n  void start();\n  void complete();\n\n  Protocol& protocol_;\n\n  ActiveStreamPtr stream_;\n  DecoderStateMachinePtr state_machine_;\n\n  bool decode_started_{false};\n};\n\n/**\n * Decoder encapsulates a configured and ProtocolPtr and SerializationPtr.\n */\ntemplate <typename T> class Decoder : public DecoderBase {\npublic:\n  Decoder(Protocol& protocol, T& callbacks) : DecoderBase(protocol), callbacks_(callbacks) {}\n\n  ActiveStream* newStream(MessageMetadataSharedPtr metadata, ContextSharedPtr context) override {\n    ASSERT(!stream_);\n    stream_ = std::make_unique<ActiveStream>(callbacks_.newStream(), metadata, context);\n    return stream_.get();\n  }\n\n  void onHeartbeat(MessageMetadataSharedPtr metadata) override { callbacks_.onHeartbeat(metadata); }\n\nprivate:\n  T& callbacks_;\n};\n\nclass RequestDecoder : public Decoder<RequestDecoderCallbacks> {\npublic:\n  RequestDecoder(Protocol& protocol, RequestDecoderCallbacks& callbacks)\n      : Decoder(protocol, callbacks) {}\n};\n\nusing RequestDecoderPtr = std::unique_ptr<RequestDecoder>;\n\nclass ResponseDecoder : public Decoder<ResponseDecoderCallbacks> {\npublic:\n  ResponseDecoder(Protocol& protocol, ResponseDecoderCallbacks& callbacks)\n      : Decoder(protocol, callbacks) {}\n};\n\nusing ResponseDecoderPtr = std::unique_ptr<ResponseDecoder>;\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/decoder_event_handler.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nenum class FilterStatus : uint8_t {\n  // Continue filter chain iteration.\n  Continue,\n  // Do not iterate to any of the remaining filters in the chain. Returning\n  // FilterDataStatus::Continue from decodeData()/encodeData() or calling\n  // continueDecoding()/continueEncoding() MUST be called if continued filter iteration is desired.\n  StopIteration,\n  // Indicates that a retry is required for the reply message received.\n  Retry,\n};\n\nclass StreamDecoder {\npublic:\n  virtual ~StreamDecoder() = default;\n\n  /**\n   * Indicates that the message had been decoded.\n   * @param metadata MessageMetadataSharedPtr describing the message\n   * @param ctx the message context information\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus onMessageDecoded(MessageMetadataSharedPtr metadata,\n                                        ContextSharedPtr ctx) PURE;\n};\n\nusing StreamDecoderSharedPtr = std::shared_ptr<StreamDecoder>;\n\nclass StreamEncoder {\npublic:\n  virtual ~StreamEncoder() = default;\n\n  /**\n   * Indicates that the message had been encoded.\n   * @param metadata MessageMetadataSharedPtr describing the message\n   * @param ctx the message context information\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus onMessageEncoded(MessageMetadataSharedPtr metadata,\n                                        ContextSharedPtr ctx) PURE;\n};\n\nusing StreamEncoderSharedPtr = std::shared_ptr<StreamEncoder>;\n\nclass StreamHandler {\npublic:\n  virtual ~StreamHandler() = default;\n\n  /**\n   * Indicates that the message had been decoded.\n   * @param metadata MessageMetadataSharedPtr describing the message\n   * @param ctx the message context information\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual void onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) PURE;\n};\n\nusing StreamDecoderSharedPtr = std::shared_ptr<StreamDecoder>;\n\nclass DecoderCallbacksBase {\npublic:\n  virtual ~DecoderCallbacksBase() = default;\n\n  /**\n   * @return StreamDecoder* a new StreamDecoder for a message.\n   */\n  virtual StreamHandler& newStream() PURE;\n\n  /**\n   * Indicates that the message is a heartbeat.\n   */\n  virtual void onHeartbeat(MessageMetadataSharedPtr) PURE;\n};\n\nclass RequestDecoderCallbacks : public DecoderCallbacksBase {};\nclass ResponseDecoderCallbacks : public DecoderCallbacksBase {};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/hessian_utils.h\"\n#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nstd::pair<RpcInvocationSharedPtr, bool>\nDubboHessian2SerializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer,\n                                                      ContextSharedPtr context) {\n  size_t total_size = 0, size;\n  // TODO(zyfjeff): Add format checker\n  std::string dubbo_version = HessianUtils::peekString(buffer, &size);\n  total_size += size;\n  std::string service_name = HessianUtils::peekString(buffer, &size, total_size);\n  total_size += size;\n  std::string service_version = HessianUtils::peekString(buffer, &size, total_size);\n  total_size += size;\n  std::string method_name = HessianUtils::peekString(buffer, &size, total_size);\n  total_size += size;\n\n  if (static_cast<uint64_t>(context->bodySize()) < total_size) {\n    throw EnvoyException(fmt::format(\"RpcInvocation size({}) large than body size({})\", total_size,\n                                     context->bodySize()));\n  }\n\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  invo->setServiceName(service_name);\n  invo->setServiceVersion(service_version);\n  invo->setMethodName(method_name);\n\n  return std::pair<RpcInvocationSharedPtr, bool>(invo, true);\n}\n\nstd::pair<RpcResultSharedPtr, bool>\nDubboHessian2SerializerImpl::deserializeRpcResult(Buffer::Instance& buffer,\n                                                  ContextSharedPtr context) {\n  ASSERT(buffer.length() >= context->bodySize());\n  size_t total_size = 0;\n  bool has_value = true;\n\n  auto result = std::make_shared<RpcResultImpl>();\n  RpcResponseType type = static_cast<RpcResponseType>(HessianUtils::peekInt(buffer, &total_size));\n\n  switch (type) {\n  case RpcResponseType::ResponseWithException:\n  case RpcResponseType::ResponseWithExceptionWithAttachments:\n  case RpcResponseType::ResponseWithValue:\n    result->setException(true);\n    break;\n  case RpcResponseType::ResponseWithNullValue:\n    has_value = false;\n    FALLTHRU;\n  case RpcResponseType::ResponseValueWithAttachments:\n  case RpcResponseType::ResponseNullValueWithAttachments:\n    result->setException(false);\n    break;\n  default:\n    throw EnvoyException(fmt::format(\"not supported return type {}\", static_cast<uint8_t>(type)));\n  }\n\n  if (context->bodySize() < total_size) {\n    throw EnvoyException(fmt::format(\"RpcResult size({}) large than body size({})\", total_size,\n                                     context->bodySize()));\n  }\n\n  if (!has_value && context->bodySize() != total_size) {\n    throw EnvoyException(\n        fmt::format(\"RpcResult is no value, but the rest of the body size({}) not equal 0\",\n                    (context->bodySize() - total_size)));\n  }\n\n  return std::pair<RpcResultSharedPtr, bool>(result, true);\n}\n\nsize_t DubboHessian2SerializerImpl::serializeRpcResult(Buffer::Instance& output_buffer,\n                                                       const std::string& content,\n                                                       RpcResponseType type) {\n  size_t origin_length = output_buffer.length();\n\n  // The serialized response type is compact int.\n  size_t serialized_size = HessianUtils::writeInt(\n      output_buffer, static_cast<std::underlying_type<RpcResponseType>::type>(type));\n\n  // Serialized response content.\n  serialized_size += HessianUtils::writeString(output_buffer, content);\n\n  ASSERT((output_buffer.length() - origin_length) == serialized_size);\n\n  return serialized_size;\n}\n\nclass DubboHessian2SerializerConfigFactory\n    : public SerializerFactoryBase<DubboHessian2SerializerImpl> {\npublic:\n  DubboHessian2SerializerConfigFactory()\n      : SerializerFactoryBase(ProtocolType::Dubbo, SerializationType::Hessian2) {}\n};\n\n/**\n * Static registration for the Hessian protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(DubboHessian2SerializerConfigFactory, NamedSerializerConfigFactory);\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nclass DubboHessian2SerializerImpl : public Serializer {\npublic:\n  ~DubboHessian2SerializerImpl() override = default;\n  const std::string& name() const override {\n    return ProtocolSerializerNames::get().fromType(ProtocolType::Dubbo, type());\n  }\n  SerializationType type() const override { return SerializationType::Hessian2; }\n\n  std::pair<RpcInvocationSharedPtr, bool>\n  deserializeRpcInvocation(Buffer::Instance& buffer, ContextSharedPtr context) override;\n\n  std::pair<RpcResultSharedPtr, bool> deserializeRpcResult(Buffer::Instance& buffer,\n                                                           ContextSharedPtr context) override;\n\n  size_t serializeRpcResult(Buffer::Instance& output_buffer, const std::string& content,\n                            RpcResponseType type) override;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace {\n\nconstexpr uint16_t MagicNumber = 0xdabb;\nconstexpr uint8_t MessageTypeMask = 0x80;\nconstexpr uint8_t EventMask = 0x20;\nconstexpr uint8_t TwoWayMask = 0x40;\nconstexpr uint8_t SerializationTypeMask = 0x1f;\nconstexpr uint64_t FlagOffset = 2;\nconstexpr uint64_t StatusOffset = 3;\nconstexpr uint64_t RequestIDOffset = 4;\nconstexpr uint64_t BodySizeOffset = 12;\n\n} // namespace\n\n// Consistent with the SerializationType\nbool isValidSerializationType(SerializationType type) {\n  switch (type) {\n  case SerializationType::Hessian2:\n    break;\n  default:\n    return false;\n  }\n  return true;\n}\n\n// Consistent with the ResponseStatus\nbool isValidResponseStatus(ResponseStatus status) {\n  switch (status) {\n  case ResponseStatus::Ok:\n  case ResponseStatus::ClientTimeout:\n  case ResponseStatus::ServerTimeout:\n  case ResponseStatus::BadRequest:\n  case ResponseStatus::BadResponse:\n  case ResponseStatus::ServiceNotFound:\n  case ResponseStatus::ServiceError:\n  case ResponseStatus::ClientError:\n  case ResponseStatus::ServerThreadpoolExhaustedError:\n    break;\n  default:\n    return false;\n  }\n  return true;\n}\n\nvoid parseRequestInfoFromBuffer(Buffer::Instance& data, MessageMetadataSharedPtr metadata) {\n  ASSERT(data.length() >= DubboProtocolImpl::MessageSize);\n  uint8_t flag = data.peekInt<uint8_t>(FlagOffset);\n  bool is_two_way = (flag & TwoWayMask) == TwoWayMask ? true : false;\n  SerializationType type = static_cast<SerializationType>(flag & SerializationTypeMask);\n  if (!isValidSerializationType(type)) {\n    throw EnvoyException(\n        absl::StrCat(\"invalid dubbo message serialization type \",\n                     static_cast<std::underlying_type<SerializationType>::type>(type)));\n  }\n\n  if (!is_two_way && metadata->messageType() != MessageType::HeartbeatRequest) {\n    metadata->setMessageType(MessageType::Oneway);\n  }\n\n  metadata->setSerializationType(type);\n}\n\nvoid parseResponseInfoFromBuffer(Buffer::Instance& buffer, MessageMetadataSharedPtr metadata) {\n  ASSERT(buffer.length() >= DubboProtocolImpl::MessageSize);\n  ResponseStatus status = static_cast<ResponseStatus>(buffer.peekInt<uint8_t>(StatusOffset));\n  if (!isValidResponseStatus(status)) {\n    throw EnvoyException(\n        absl::StrCat(\"invalid dubbo message response status \",\n                     static_cast<std::underlying_type<ResponseStatus>::type>(status)));\n  }\n\n  metadata->setResponseStatus(status);\n}\n\nstd::pair<ContextSharedPtr, bool>\nDubboProtocolImpl::decodeHeader(Buffer::Instance& buffer, MessageMetadataSharedPtr metadata) {\n  if (!metadata) {\n    throw EnvoyException(\"invalid metadata parameter\");\n  }\n\n  if (buffer.length() < DubboProtocolImpl::MessageSize) {\n    return std::pair<ContextSharedPtr, bool>(nullptr, false);\n  }\n\n  uint16_t magic_number = buffer.peekBEInt<uint16_t>();\n  if (magic_number != MagicNumber) {\n    throw EnvoyException(absl::StrCat(\"invalid dubbo message magic number \", magic_number));\n  }\n\n  uint8_t flag = buffer.peekInt<uint8_t>(FlagOffset);\n  MessageType type =\n      (flag & MessageTypeMask) == MessageTypeMask ? MessageType::Request : MessageType::Response;\n  bool is_event = (flag & EventMask) == EventMask ? true : false;\n  int64_t request_id = buffer.peekBEInt<int64_t>(RequestIDOffset);\n  int32_t body_size = buffer.peekBEInt<int32_t>(BodySizeOffset);\n\n  // The body size of the heartbeat message is zero.\n  if (body_size > MaxBodySize || body_size < 0) {\n    throw EnvoyException(absl::StrCat(\"invalid dubbo message size \", body_size));\n  }\n\n  metadata->setRequestId(request_id);\n\n  if (type == MessageType::Request) {\n    if (is_event) {\n      type = MessageType::HeartbeatRequest;\n    }\n    metadata->setMessageType(type);\n    parseRequestInfoFromBuffer(buffer, metadata);\n  } else {\n    if (is_event) {\n      type = MessageType::HeartbeatResponse;\n    }\n    metadata->setMessageType(type);\n    parseResponseInfoFromBuffer(buffer, metadata);\n  }\n\n  auto context = std::make_shared<ContextImpl>();\n  context->setHeaderSize(DubboProtocolImpl::MessageSize);\n  context->setBodySize(body_size);\n  context->setHeartbeat(is_event);\n\n  return std::pair<ContextSharedPtr, bool>(context, true);\n}\n\nbool DubboProtocolImpl::decodeData(Buffer::Instance& buffer, ContextSharedPtr context,\n                                   MessageMetadataSharedPtr metadata) {\n  ASSERT(serializer_);\n\n  if ((buffer.length()) < static_cast<uint64_t>(context->bodySize())) {\n    return false;\n  }\n\n  switch (metadata->messageType()) {\n  case MessageType::Oneway:\n  case MessageType::Request: {\n    auto ret = serializer_->deserializeRpcInvocation(buffer, context);\n    if (!ret.second) {\n      return false;\n    }\n    metadata->setInvocationInfo(ret.first);\n    break;\n  }\n  case MessageType::Response: {\n    auto ret = serializer_->deserializeRpcResult(buffer, context);\n    if (!ret.second) {\n      return false;\n    }\n    if (ret.first->hasException()) {\n      metadata->setMessageType(MessageType::Exception);\n    }\n    break;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  return true;\n}\n\nbool DubboProtocolImpl::encode(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                               const std::string& content, RpcResponseType type) {\n  ASSERT(serializer_);\n\n  switch (metadata.messageType()) {\n  case MessageType::HeartbeatResponse: {\n    ASSERT(metadata.hasResponseStatus());\n    ASSERT(content.empty());\n    buffer.writeBEInt<uint16_t>(MagicNumber);\n    uint8_t flag = static_cast<uint8_t>(metadata.serializationType());\n    flag = flag ^ EventMask;\n    buffer.writeByte(flag);\n    buffer.writeByte(static_cast<uint8_t>(metadata.responseStatus()));\n    buffer.writeBEInt<uint64_t>(metadata.requestId());\n    buffer.writeBEInt<uint32_t>(0);\n    return true;\n  }\n  case MessageType::Response: {\n    ASSERT(metadata.hasResponseStatus());\n    ASSERT(!content.empty());\n    Buffer::OwnedImpl body_buffer;\n    size_t serialized_body_size = serializer_->serializeRpcResult(body_buffer, content, type);\n\n    buffer.writeBEInt<uint16_t>(MagicNumber);\n    buffer.writeByte(static_cast<uint8_t>(metadata.serializationType()));\n    buffer.writeByte(static_cast<uint8_t>(metadata.responseStatus()));\n    buffer.writeBEInt<uint64_t>(metadata.requestId());\n    buffer.writeBEInt<uint32_t>(serialized_body_size);\n\n    buffer.move(body_buffer, serialized_body_size);\n    return true;\n  }\n  case MessageType::Request:\n  case MessageType::Oneway:\n  case MessageType::Exception:\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nclass DubboProtocolConfigFactory : public ProtocolFactoryBase<DubboProtocolImpl> {\npublic:\n  DubboProtocolConfigFactory() : ProtocolFactoryBase(ProtocolType::Dubbo) {}\n};\n\n/**\n * Static registration for the Dubbo protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(DubboProtocolConfigFactory, NamedProtocolConfigFactory);\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass DubboProtocolImpl : public Protocol {\npublic:\n  DubboProtocolImpl() = default;\n  ~DubboProtocolImpl() override = default;\n\n  const std::string& name() const override { return ProtocolNames::get().fromType(type()); }\n  ProtocolType type() const override { return ProtocolType::Dubbo; }\n\n  std::pair<ContextSharedPtr, bool> decodeHeader(Buffer::Instance& buffer,\n                                                 MessageMetadataSharedPtr metadata) override;\n  bool decodeData(Buffer::Instance& buffer, ContextSharedPtr context,\n                  MessageMetadataSharedPtr metadata) override;\n\n  bool encode(Buffer::Instance& buffer, const MessageMetadata& metadata, const std::string& content,\n              RpcResponseType type) override;\n\n  static constexpr uint8_t MessageSize = 16;\n  static constexpr int32_t MaxBodySize = 16 * 1024 * 1024;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/filters/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"filter_interface\",\n    hdrs = [\"filter.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy:decoder_events_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:protocol_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy:serializer_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:router_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_config_interface\",\n    hdrs = [\"filter_config.h\"],\n    deps = [\n        \":filter_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:macros\",\n        \"//source/common/protobuf:cc_wkt_protos\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    deps = [\n        \":filter_config_interface\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/filters/factory_base.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/filters/filter_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace DubboFilters {\n\ntemplate <class ConfigProto> class FactoryBase : public NamedDubboFilterConfigFactory {\npublic:\n  FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& proto_config,\n                               const std::string& stats_prefix,\n                               Server::Configuration::FactoryContext& context) override {\n    return createFilterFactoryFromProtoTyped(MessageUtil::downcastAndValidate<const ConfigProto&>(\n                                                 proto_config, context.messageValidationVisitor()),\n                                             stats_prefix, context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  FactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const ConfigProto& proto_config,\n                                    const std::string& stats_prefix,\n                                    Server::Configuration::FactoryContext& context) PURE;\n\n  const std::string name_;\n};\n\n} // namespace DubboFilters\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/filters/filter.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/decoder_event_handler.h\"\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace DubboFilters {\n\nenum class UpstreamResponseStatus : uint8_t {\n  MoreData = 0, // The upstream response requires more data.\n  Complete = 1, // The upstream response is complete.\n  Reset = 2,    // The upstream response is invalid and its connection must be reset.\n  Retry = 3,    // The upstream response is failure need to retry.\n};\n\nclass DirectResponse {\npublic:\n  virtual ~DirectResponse() = default;\n\n  enum class ResponseType : uint8_t {\n    // DirectResponse encodes MessageType::Reply with success payload\n    SuccessReply,\n\n    // DirectResponse encodes MessageType::Reply with an exception payload\n    ErrorReply,\n\n    // DirectResponse encodes MessageType::Exception\n    Exception,\n  };\n\n  /**\n   * Encodes the response via the given Protocol.\n   * @param metadata the MessageMetadata for the request that generated this response\n   * @param proto the Protocol to be used for message encoding\n   * @param buffer the Buffer into which the message should be encoded\n   * @return ResponseType indicating whether the message is a successful or error reply or an\n   *         exception\n   */\n  virtual ResponseType encode(MessageMetadata& metadata, Protocol& protocol,\n                              Buffer::Instance& buffer) const PURE;\n};\n\nusing DirectResponsePtr = std::unique_ptr<DirectResponse>;\n\n/**\n * Decoder filter callbacks add additional callbacks.\n */\nclass FilterCallbacksBase {\npublic:\n  virtual ~FilterCallbacksBase() = default;\n\n  /**\n   * @return uint64_t the ID of the originating request for logging purposes.\n   */\n  virtual uint64_t requestId() const PURE;\n\n  /**\n   * @return uint64_t the ID of the originating stream for logging purposes.\n   */\n  virtual uint64_t streamId() const PURE;\n\n  /**\n   * @return const Network::Connection* the originating connection, or nullptr if there is none.\n   */\n  virtual const Network::Connection* connection() const PURE;\n\n  /**\n   * @return RouteConstSharedPtr the route for the current request.\n   */\n  virtual DubboProxy::Router::RouteConstSharedPtr route() PURE;\n\n  /**\n   * @return SerializationType the originating protocol.\n   */\n  virtual SerializationType serializationType() const PURE;\n\n  /**\n   * @return ProtocolType the originating protocol.\n   */\n  virtual ProtocolType protocolType() const PURE;\n\n  /**\n   * @return StreamInfo for logging purposes.\n   */\n  virtual StreamInfo::StreamInfo& streamInfo() PURE;\n\n  /**\n   * @return Event::Dispatcher& the thread local dispatcher for allocating timers, etc.\n   */\n  virtual Event::Dispatcher& dispatcher() PURE;\n\n  /**\n   * Reset the underlying stream.\n   */\n  virtual void resetStream() PURE;\n};\n\n/**\n * Decoder filter callbacks add additional callbacks.\n */\nclass DecoderFilterCallbacks : public virtual FilterCallbacksBase {\npublic:\n  ~DecoderFilterCallbacks() override = default;\n\n  /**\n   * Continue iterating through the filter chain with buffered data. This routine can only be\n   * called if the filter has previously returned StopIteration from one of the DecoderFilter\n   * methods. The connection manager will callbacks to the next filter in the chain. Further note\n   * that if the request is not complete, the calling filter may receive further callbacks and must\n   * return an appropriate status code depending on what the filter needs to do.\n   */\n  virtual void continueDecoding() PURE;\n\n  /**\n   * Create a locally generated response using the provided response object.\n   * @param response DirectResponsePtr the response to send to the downstream client\n   */\n  virtual void sendLocalReply(const DirectResponse& response, bool end_stream) PURE;\n\n  /**\n   * Indicates the start of an upstream response. May only be called once.\n   * @param transport_type TransportType the upstream is using\n   * @param protocol_type ProtocolType the upstream is using\n   */\n  virtual void startUpstreamResponse() PURE;\n\n  /**\n   * Called with upstream response data.\n   * @param data supplies the upstream's data\n   * @return UpstreamResponseStatus indicating if the upstream response requires more data, is\n   * complete, or if an error occurred requiring the upstream connection to be reset.\n   */\n  virtual UpstreamResponseStatus upstreamData(Buffer::Instance& data) PURE;\n\n  /**\n   * Reset the downstream connection.\n   */\n  virtual void resetDownstreamConnection() PURE;\n};\n\n/**\n * Encoder filter callbacks add additional callbacks.\n */\nclass EncoderFilterCallbacks : public virtual FilterCallbacksBase {\npublic:\n  ~EncoderFilterCallbacks() override = default;\n\n  /**\n   * Continue iterating through the filter chain with buffered data. This routine can only be\n   * called if the filter has previously returned StopIteration from one of the DecoderFilter\n   * methods. The connection manager will callbacks to the next filter in the chain. Further note\n   * that if the request is not complete, the calling filter may receive further callbacks and must\n   * return an appropriate status code depending on what the filter needs to do.\n   */\n  virtual void continueEncoding() PURE;\n};\n\n/**\n * Common base class for both decoder and encoder filters.\n */\nclass FilterBase {\npublic:\n  virtual ~FilterBase() = default;\n\n  /**\n   * This routine is called prior to a filter being destroyed. This may happen after normal stream\n   * finish (both downstream and upstream) or due to reset. Every filter is responsible for making\n   * sure that any async events are cleaned up in the context of this routine. This includes timers,\n   * network calls, etc. The reason there is an onDestroy() method vs. doing this type of cleanup\n   * in the destructor is due to the deferred deletion model that Envoy uses to avoid stack unwind\n   * complications. Filters must not invoke either encoder or decoder filter callbacks after having\n   * onDestroy() invoked.\n   */\n  virtual void onDestroy() PURE;\n};\n\n/**\n * Decoder filter interface.\n */\nclass DecoderFilter : public StreamDecoder, public FilterBase {\npublic:\n  ~DecoderFilter() override = default;\n\n  /**\n   * Called by the connection manager once to initialize the filter decoder callbacks that the\n   * filter should use. Callbacks will not be invoked by the filter after onDestroy() is called.\n   */\n  virtual void setDecoderFilterCallbacks(DecoderFilterCallbacks& callbacks) PURE;\n};\n\nusing DecoderFilterSharedPtr = std::shared_ptr<DecoderFilter>;\n\n/**\n * Encoder filter interface.\n */\nclass EncoderFilter : public StreamEncoder, public FilterBase {\npublic:\n  ~EncoderFilter() override = default;\n\n  /**\n   * Called by the connection manager once to initialize the filter encoder callbacks that the\n   * filter should use. Callbacks will not be invoked by the filter after onDestroy() is called.\n   */\n  virtual void setEncoderFilterCallbacks(EncoderFilterCallbacks& callbacks) PURE;\n};\n\nusing EncoderFilterSharedPtr = std::shared_ptr<EncoderFilter>;\n\n/**\n * A filter that handles both encoding and decoding.\n */\nclass CodecFilter : public virtual DecoderFilter, public virtual EncoderFilter {};\n\nusing CodecFilterSharedPtr = std::shared_ptr<CodecFilter>;\n\n/**\n * These callbacks are provided by the connection manager to the factory so that the factory can\n * build the filter chain in an application specific way.\n */\nclass FilterChainFactoryCallbacks {\npublic:\n  virtual ~FilterChainFactoryCallbacks() = default;\n\n  /**\n   * Add a decoder filter that is used when reading connection data.\n   * @param filter supplies the filter to add.\n   */\n  virtual void addDecoderFilter(DecoderFilterSharedPtr filter) PURE;\n\n  /**\n   * Add a encoder filter that is used when writing connection data.\n   * @param filter supplies the filter to add.\n   */\n  virtual void addEncoderFilter(EncoderFilterSharedPtr filter) PURE;\n\n  /**\n   * Add a decoder/encoder filter that is used both when reading and writing connection data.\n   * @param filter supplies the filter to add.\n   */\n  virtual void addFilter(CodecFilterSharedPtr filter) PURE;\n};\n\n/**\n * This function is used to wrap the creation of a Dubbo filter chain for new connections as they\n * come in. Filter factories create the function at configuration initialization time, and then\n * they are used at runtime.\n * @param callbacks supplies the callbacks for the stream to install filters to. Typically the\n * function will install a single filter, but it's technically possibly to install more than one\n * if desired.\n */\nusing FilterFactoryCb = std::function<void(FilterChainFactoryCallbacks& callbacks)>;\n\n/**\n * A FilterChainFactory is used by a connection manager to create a Dubbo level filter chain when\n * a new connection is created. Typically it would be implemented by a configuration engine that\n * would install a set of filters that are able to process an application scenario on top of a\n * stream of Dubbo requests.\n */\nclass FilterChainFactory {\npublic:\n  virtual ~FilterChainFactory() = default;\n\n  /**\n   * Called when a new Dubbo stream is created on the connection.\n   * @param callbacks supplies the \"sink\" that is used for actually creating the filter chain. @see\n   *                  FilterChainFactoryCallbacks.\n   */\n  virtual void createFilterChain(FilterChainFactoryCallbacks& callbacks) PURE;\n};\n\n} // namespace DubboFilters\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/filters/filter_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace DubboFilters {\n\n/**\n * Implemented by each Dubbo filter and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedDubboFilterConfigFactory : public Envoy::Config::TypedFactory {\npublic:\n  ~NamedDubboFilterConfigFactory() override = default;\n\n  /**\n   * Create a particular dubbo filter factory implementation. If the implementation is unable to\n   * produce a factory with the provided parameters, it should throw an EnvoyException in the case\n   * of general error. The returned callback should always be initialized.\n   * @param config supplies the configuration for the filter\n   * @param stat_prefix prefix for stat logging\n   * @param context supplies the filter's context.\n   * @return FilterFactoryCb the factory creation function.\n   */\n  virtual DubboFilters::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& config, const std::string& stat_prefix,\n                               Server::Configuration::FactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.dubbo_proxy.filters\"; }\n};\n\n} // namespace DubboFilters\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/filters/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace DubboFilters {\n\n/**\n * Well-known Dubbo filter names.\n * NOTE: New filters should use the well known name: envoy.filters.dubbo.name.\n */\nclass DubboFilterNameValues {\npublic:\n  // Router filter\n  const std::string ROUTER = \"envoy.filters.dubbo.router\";\n};\n\nusing DubboFilterNames = ConstSingleton<DubboFilterNameValues>;\n\n} // namespace DubboFilters\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/heartbeat_response.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nDubboFilters::DirectResponse::ResponseType\nHeartbeatResponse::encode(MessageMetadata& metadata, DubboProxy::Protocol& protocol,\n                          Buffer::Instance& buffer) const {\n  ASSERT(metadata.responseStatus() == ResponseStatus::Ok);\n  ASSERT(metadata.messageType() == MessageType::HeartbeatResponse);\n\n  if (!protocol.encode(buffer, metadata, \"\")) {\n    throw EnvoyException(\"failed to encode heartbeat message\");\n  }\n\n  ENVOY_LOG(debug, \"buffer length {}\", buffer.length());\n  return DirectResponse::ResponseType::SuccessReply;\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/heartbeat_response.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nstruct HeartbeatResponse : public DubboFilters::DirectResponse,\n                           Logger::Loggable<Logger::Id::dubbo> {\n  HeartbeatResponse() = default;\n  ~HeartbeatResponse() override = default;\n\n  using ResponseType = DubboFilters::DirectResponse::ResponseType;\n  ResponseType encode(MessageMetadata& metadata, Protocol& protocol,\n                      Buffer::Instance& buffer) const override;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/hessian_utils.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/hessian_utils.h\"\n\n#include <type_traits>\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/buffer_helper.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nnamespace {\n\ntemplate <typename T>\ntypename std::enable_if<std::is_signed<T>::value, T>::type leftShift(T left, uint16_t bit_number) {\n  if (left < 0) {\n    left = -left;\n    return -1 * (left << bit_number);\n  }\n\n  return left << bit_number;\n}\n\ninline void addByte(Buffer::Instance& buffer, const uint8_t value) { buffer.add(&value, 1); }\n\nvoid addSeq(Buffer::Instance& buffer, const std::initializer_list<uint8_t>& values) {\n  for (const uint8_t& value : values) {\n    buffer.add(&value, 1);\n  }\n}\n\nsize_t doWriteString(Buffer::Instance& instance, absl::string_view str_view) {\n  const size_t length = str_view.length();\n  constexpr size_t str_max_length = 0xffff;\n  constexpr size_t two_octet_max_lenth = 1024;\n\n  if (length < 32) {\n    addByte(instance, static_cast<uint8_t>(length));\n    instance.add(str_view.data(), str_view.length());\n    return length + sizeof(uint8_t);\n  }\n\n  if (length < two_octet_max_lenth) {\n    const uint8_t code = length >> 8; // 0x30 + length / 0x100 must less than 0x34\n    const uint8_t remain = length & 0xff;\n    std::initializer_list<uint8_t> values{static_cast<uint8_t>(0x30 + code), remain};\n    addSeq(instance, values);\n    instance.add(str_view.data(), str_view.length());\n    return length + values.size();\n  }\n\n  if (length <= str_max_length) {\n    const uint8_t code = length >> 8;\n    const uint8_t remain = length & 0xff;\n    std::initializer_list<uint8_t> values{'S', code, remain};\n    addSeq(instance, values);\n    instance.add(str_view.data(), str_view.length());\n    return length + values.size();\n  }\n\n  std::initializer_list<uint8_t> values{0x52, 0xff, 0xff};\n  addSeq(instance, values);\n  instance.add(str_view.data(), str_max_length);\n  const size_t size = str_max_length + values.size();\n  ASSERT(size == (str_max_length + values.size()));\n\n  const size_t child_size =\n      doWriteString(instance, str_view.substr(str_max_length, length - str_max_length));\n  return child_size + size;\n}\n\n} // namespace\n\n/*\n * Reference:\n * https://cs.chromium.org/chromium/src/base/strings/string_util.h?q=WriteInto&sq=package:chromium&dr=CSs&l=426\n */\nchar* allocStringBuffer(std::string* str, size_t length) {\n  str->reserve(length);\n  str->resize(length - 1);\n  return &((*str)[0]);\n}\n\nstd::string HessianUtils::peekString(Buffer::Instance& buffer, size_t* size, uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  const uint8_t code = buffer.peekInt<uint8_t>(offset);\n  size_t delta_length = 0;\n  std::string result;\n  switch (code) {\n  case 0x00:\n  case 0x01:\n  case 0x02:\n  case 0x03:\n  case 0x04:\n  case 0x05:\n  case 0x06:\n  case 0x07:\n  case 0x08:\n  case 0x09:\n  case 0x0a:\n  case 0x0b:\n  case 0x0c:\n  case 0x0d:\n  case 0x0e:\n  case 0x0f:\n  case 0x10:\n  case 0x11:\n  case 0x12:\n  case 0x13:\n  case 0x14:\n  case 0x15:\n  case 0x16:\n  case 0x17:\n  case 0x18:\n  case 0x19:\n  case 0x1a:\n  case 0x1b:\n  case 0x1c:\n  case 0x1d:\n  case 0x1e:\n  case 0x1f:\n    delta_length = code - 0x00;\n    if (delta_length + 1 + offset > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    buffer.copyOut(offset + 1, delta_length, allocStringBuffer(&result, delta_length + 1));\n    *size = delta_length + 1;\n    return result;\n\n  case 0x30:\n  case 0x31:\n  case 0x32:\n  case 0x33:\n    if (offset + 2 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    delta_length = (code - 0x30) * 256 + buffer.peekInt<uint8_t>(offset + 1);\n    if (delta_length + 2 + offset > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    buffer.copyOut(offset + 2, delta_length, allocStringBuffer(&result, delta_length + 1));\n    *size = delta_length + 2;\n    return result;\n\n  case 0x53:\n    if (offset + 3 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    delta_length = buffer.peekBEInt<uint16_t>(offset + 1);\n\n    if (delta_length + 3 + offset > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    buffer.copyOut(offset + 3, delta_length, allocStringBuffer(&result, delta_length + 1));\n    *size = delta_length + 3;\n    return result;\n\n  case 0x52:\n    if (offset + 3 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    delta_length = buffer.peekBEInt<uint16_t>(offset + 1);\n    buffer.copyOut(offset + 3, delta_length, allocStringBuffer(&result, delta_length + 1));\n    size_t next_size = 0;\n    result.append(peekString(buffer, &next_size, delta_length + 3 + offset));\n    *size = next_size + delta_length + 3;\n    return result;\n  }\n  throw EnvoyException(absl::StrCat(\"hessian type is not string \", code));\n}\n\nstd::string HessianUtils::readString(Buffer::Instance& buffer) {\n  size_t size;\n  std::string result(peekString(buffer, &size));\n  buffer.drain(size);\n  return result;\n}\n\nlong HessianUtils::peekLong(Buffer::Instance& buffer, size_t* size, uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  long result;\n  uint8_t code = buffer.peekInt<uint8_t>(offset);\n  switch (code) {\n  case 0xd8:\n  case 0xd9:\n  case 0xda:\n  case 0xdb:\n  case 0xdc:\n  case 0xdd:\n  case 0xde:\n  case 0xdf:\n  case 0xe0:\n  case 0xe1:\n  case 0xe2:\n  case 0xe3:\n  case 0xe4:\n  case 0xe5:\n  case 0xe6:\n  case 0xe7:\n  case 0xe8:\n  case 0xe9:\n  case 0xea:\n  case 0xee:\n  case 0xef:\n\n    result = code - 0xe0;\n    *size = 1;\n    return result;\n\n  case 0xf0:\n  case 0xf1:\n  case 0xf2:\n  case 0xf3:\n  case 0xf4:\n  case 0xf5:\n  case 0xf6:\n  case 0xf7:\n  case 0xf8:\n  case 0xf9:\n  case 0xfa:\n  case 0xfb:\n  case 0xfe:\n  case 0xff:\n\n    if (offset + 2 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    result = leftShift<int16_t>(code - 0xf8, 8) + buffer.peekInt<uint8_t>(offset + 1);\n    *size = 2;\n    return result;\n\n  case 0x38:\n  case 0x39:\n  case 0x3a:\n  case 0x3b:\n  case 0x3c:\n  case 0x3d:\n  case 0x3e:\n  case 0x3f:\n\n    if (offset + 3 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    result = leftShift<int32_t>(code - 0x3c, 16) + (buffer.peekInt<uint8_t>(offset + 1) << 8) +\n             buffer.peekInt<uint8_t>(offset + 2);\n    *size = 3;\n    return result;\n\n  case 0x59:\n\n    if (offset + 5 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    result = buffer.peekBEInt<uint32_t>(offset + 1);\n    *size = 5;\n    return result;\n\n  case 0x4c:\n\n    if (offset + 9 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    result = buffer.peekBEInt<int64_t>(offset + 1);\n    *size = 9;\n    return result;\n  }\n\n  throw EnvoyException(absl::StrCat(\"hessian type is not long \", code));\n}\n\nlong HessianUtils::readLong(Buffer::Instance& buffer) {\n  size_t size;\n  const long result = peekLong(buffer, &size);\n  buffer.drain(size);\n  return result;\n}\n\nbool HessianUtils::peekBool(Buffer::Instance& buffer, size_t* size, uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  bool result;\n  const uint8_t code = buffer.peekInt<uint8_t>(offset);\n  if (code == 0x46) {\n    result = false;\n    *size = 1;\n    return result;\n  }\n\n  if (code == 0x54) {\n    result = true;\n    *size = 1;\n    return result;\n  }\n\n  throw EnvoyException(absl::StrCat(\"hessian type is not bool \", code));\n}\n\nbool HessianUtils::readBool(Buffer::Instance& buffer) {\n  size_t size;\n  bool result(peekBool(buffer, &size));\n  buffer.drain(size);\n  return result;\n}\n\nint HessianUtils::peekInt(Buffer::Instance& buffer, size_t* size, uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  const uint8_t code = buffer.peekInt<uint8_t>(offset);\n  int result;\n\n  // Compact int\n  if (code >= 0x80 && code <= 0xbf) {\n    result = (code - 0x90);\n    *size = 1;\n    return result;\n  }\n\n  switch (code) {\n  case 0xc0:\n  case 0xc1:\n  case 0xc2:\n  case 0xc3:\n  case 0xc4:\n  case 0xc5:\n  case 0xc6:\n  case 0xc7:\n  case 0xc8:\n  case 0xc9:\n  case 0xca:\n  case 0xcb:\n  case 0xcd:\n  case 0xce:\n  case 0xcf:\n    if (offset + 2 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    result = leftShift<int16_t>(code - 0xc8, 8) + buffer.peekInt<uint8_t>(offset + 1);\n    *size = 2;\n    return result;\n\n  case 0xd0:\n  case 0xd1:\n  case 0xd2:\n  case 0xd3:\n  case 0xd4:\n  case 0xd5:\n  case 0xd6:\n  case 0xd7:\n    if (offset + 3 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    result = leftShift<int32_t>(code - 0xd4, 16) + (buffer.peekInt<uint8_t>(offset + 1) << 8) +\n             buffer.peekInt<uint8_t>(offset + 2);\n    *size = 3;\n    return result;\n\n  case 0x49:\n    if (offset + 5 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    result = buffer.peekBEInt<int32_t>(offset + 1);\n    *size = 5;\n    return result;\n  }\n\n  throw EnvoyException(absl::StrCat(\"hessian type is not int \", code));\n}\n\nint HessianUtils::readInt(Buffer::Instance& buffer) {\n  size_t size;\n  int result(peekInt(buffer, &size));\n  buffer.drain(size);\n  return result;\n}\n\ndouble HessianUtils::peekDouble(Buffer::Instance& buffer, size_t* size, uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  double result;\n  uint8_t code = buffer.peekInt<uint8_t>(offset);\n  switch (code) {\n  case 0x5b:\n    result = 0.0;\n    *size = 1;\n    return result;\n\n  case 0x5c:\n    result = 1.0;\n    *size = 1;\n    return result;\n\n  case 0x5d:\n    if (offset + 2 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    result = static_cast<double>(buffer.peekInt<int8_t>(offset + 1));\n    *size = 2;\n    return result;\n\n  case 0x5e:\n    if (offset + 3 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    result = static_cast<double>(256 * buffer.peekInt<int8_t>(offset + 1) +\n                                 buffer.peekInt<uint8_t>(offset + 2));\n    *size = 3;\n    return result;\n\n  case 0x5f:\n    if (offset + 5 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    result = BufferHelper::peekFloat(buffer, offset + 1);\n    *size = 5;\n    return result;\n\n  case 0x44:\n    if (offset + 9 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    result = BufferHelper::peekDouble(buffer, offset + 1);\n    *size = 9;\n    return result;\n  }\n\n  throw EnvoyException(absl::StrCat(\"hessian type is not double \", code));\n}\n\ndouble HessianUtils::readDouble(Buffer::Instance& buffer) {\n  size_t size;\n  double result(peekDouble(buffer, &size));\n  buffer.drain(size);\n  return result;\n}\n\nvoid HessianUtils::peekNull(Buffer::Instance& buffer, size_t* size, uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  uint8_t code = buffer.peekInt<uint8_t>(offset);\n  if (code == 0x4e) {\n    *size = 1;\n    return;\n  }\n\n  throw EnvoyException(absl::StrCat(\"hessian type is not null \", code));\n}\n\nvoid HessianUtils::readNull(Buffer::Instance& buffer) {\n  size_t size;\n  peekNull(buffer, &size);\n  buffer.drain(size);\n}\n\nstd::chrono::milliseconds HessianUtils::peekDate(Buffer::Instance& buffer, size_t* size,\n                                                 uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  std::chrono::milliseconds result;\n  uint8_t code = buffer.peekInt<uint8_t>(offset);\n  switch (code) {\n  case 0x4b:\n    if (offset + 5 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    result = std::chrono::minutes(buffer.peekBEInt<uint32_t>(offset + 1));\n    *size = 5;\n    return result;\n\n  case 0x4a:\n    if (offset + 9 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n    result = std::chrono::milliseconds(buffer.peekBEInt<uint64_t>(offset + 1));\n    *size = 9;\n    return result;\n  }\n\n  throw EnvoyException(absl::StrCat(\"hessian type is not date \", code));\n}\n\nstd::chrono::milliseconds HessianUtils::readDate(Buffer::Instance& buffer) {\n  size_t size;\n  std::chrono::milliseconds result;\n  result = peekDate(buffer, &size);\n  buffer.drain(size);\n  return result;\n}\n\nstd::string HessianUtils::peekByte(Buffer::Instance& buffer, size_t* size, uint64_t offset) {\n  ASSERT(buffer.length() > offset);\n  std::string result;\n  uint8_t code = buffer.peekInt<uint8_t>(offset);\n  size_t delta_length = 0;\n  switch (code) {\n  case 0x20:\n  case 0x21:\n  case 0x22:\n  case 0x23:\n  case 0x24:\n  case 0x25:\n  case 0x26:\n  case 0x27:\n  case 0x28:\n  case 0x29:\n  case 0x2a:\n  case 0x2b:\n  case 0x2c:\n  case 0x2d:\n  case 0x2e:\n  case 0x2f:\n    delta_length = code - 0x20;\n    if (delta_length + 1 + offset > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    buffer.copyOut(offset + 1, delta_length, allocStringBuffer(&result, delta_length + 1));\n    *size = delta_length + 1;\n    return result;\n\n  case 0x42:\n    if (offset + 3 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    delta_length = buffer.peekBEInt<uint16_t>(offset + 1);\n    if (delta_length + 3 + offset > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    buffer.copyOut(offset + 3, delta_length, allocStringBuffer(&result, delta_length + 1));\n    *size = delta_length + 3;\n    return result;\n\n  case 0x41:\n    if (offset + 3 > buffer.length()) {\n      throw EnvoyException(\"buffer underflow\");\n    }\n\n    delta_length = buffer.peekBEInt<uint16_t>(offset + 1);\n    buffer.copyOut(offset + 3, delta_length, allocStringBuffer(&result, delta_length + 1));\n    size_t next_size;\n    result.append(peekByte(buffer, &next_size, delta_length + 3 + offset));\n    *size = delta_length + 3 + next_size;\n    return result;\n  }\n\n  throw EnvoyException(absl::StrCat(\"hessian type is not byte \", code));\n}\n\nstd::string HessianUtils::readByte(Buffer::Instance& buffer) {\n  size_t size;\n  std::string result(peekByte(buffer, &size));\n  buffer.drain(size);\n  return result;\n}\n\nsize_t HessianUtils::writeString(Buffer::Instance& buffer, absl::string_view str) {\n  return doWriteString(buffer, str);\n}\n\nsize_t HessianUtils::writeInt(Buffer::Instance& buffer, uint8_t value) {\n  // Compact int\n  buffer.writeByte(0x90 + value);\n  return sizeof(uint8_t);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/hessian_utils.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <map>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/*\n * Hessian deserialization\n * See http://hessian.caucho.com/doc/hessian-serialization.html\n */\nclass HessianUtils {\npublic:\n  static std::string peekString(Buffer::Instance& buffer, size_t* size, uint64_t offset = 0);\n  static long peekLong(Buffer::Instance& buffer, size_t* size, uint64_t offset = 0);\n  static bool peekBool(Buffer::Instance& buffer, size_t* size, uint64_t offset = 0);\n  static int peekInt(Buffer::Instance& buffer, size_t* size, uint64_t offset = 0);\n  static double peekDouble(Buffer::Instance& buffer, size_t* size, uint64_t offset = 0);\n  static void peekNull(Buffer::Instance& buffer, size_t* size, uint64_t offset = 0);\n  static std::chrono::milliseconds peekDate(Buffer::Instance& buffer, size_t* size,\n                                            uint64_t offset = 0);\n  static std::string peekByte(Buffer::Instance& buffer, size_t* size, uint64_t offset = 0);\n\n  static std::string readString(Buffer::Instance& buffer);\n  static long readLong(Buffer::Instance& buffer);\n  static bool readBool(Buffer::Instance& buffer);\n  static int readInt(Buffer::Instance& buffer);\n  static double readDouble(Buffer::Instance& buffer);\n  static void readNull(Buffer::Instance& buffer);\n  static std::chrono::milliseconds readDate(Buffer::Instance& buffer);\n  static std::string readByte(Buffer::Instance& buffer);\n\n  static size_t writeString(Buffer::Instance& buffer, absl::string_view str);\n  static size_t writeInt(Buffer::Instance& buffer, uint8_t value);\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/message.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/**\n * Stream reset reasons.\n */\nenum class StreamResetReason : uint8_t {\n  // If a local codec level reset was sent on the stream.\n  LocalReset,\n  // If a local codec level refused stream reset was sent on the stream (allowing for retry).\n  LocalRefusedStreamReset,\n  // If a remote codec level reset was received on the stream.\n  RemoteReset,\n  // If a remote codec level refused stream reset was received on the stream (allowing for retry).\n  RemoteRefusedStreamReset,\n  // If the stream was locally reset by a connection pool due to an initial connection failure.\n  ConnectionFailure,\n  // If the stream was locally reset due to connection termination.\n  ConnectionTermination,\n  // The stream was reset because of a resource overflow.\n  Overflow\n};\n\n// Supported protocol type\nenum class ProtocolType : uint8_t {\n  Dubbo = 1,\n\n  // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST PROTOCOL TYPE\n  LastProtocolType = Dubbo,\n};\n\n// Supported serialization type\nenum class SerializationType : uint8_t {\n  Hessian2 = 2,\n};\n\n// Message Type\nenum class MessageType : uint8_t {\n  Response = 0,\n  Request = 1,\n  Oneway = 2,\n  Exception = 3,\n  HeartbeatRequest = 4,\n  HeartbeatResponse = 5,\n\n  // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST MESSAGE TYPE\n  LastMessageType = HeartbeatResponse,\n};\n\n/**\n * Dubbo protocol response status types.\n * See org.apache.dubbo.remoting.exchange\n */\nenum class ResponseStatus : uint8_t {\n  Ok = 20,\n  ClientTimeout = 30,\n  ServerTimeout = 31,\n  BadRequest = 40,\n  BadResponse = 50,\n  ServiceNotFound = 60,\n  ServiceError = 70,\n  ServerError = 80,\n  ClientError = 90,\n  ServerThreadpoolExhaustedError = 100,\n};\n\nenum class RpcResponseType : uint8_t {\n  ResponseWithException = 0,\n  ResponseWithValue = 1,\n  ResponseWithNullValue = 2,\n  ResponseWithExceptionWithAttachments = 3,\n  ResponseValueWithAttachments = 4,\n  ResponseNullValueWithAttachments = 5,\n};\n\nclass Context {\npublic:\n  using AttachmentMap = absl::node_hash_map<std::string, std::string>;\n\n  bool hasAttachments() const { return !attachments_.empty(); }\n  const AttachmentMap& attachments() const { return attachments_; }\n\n  Buffer::Instance& messageOriginData() { return message_origin_buffer_; }\n  size_t messageSize() const { return headerSize() + bodySize(); }\n\n  virtual size_t bodySize() const PURE;\n  virtual size_t headerSize() const PURE;\n\nprotected:\n  Context() = default;\n  virtual ~Context() { attachments_.clear(); }\n\n  AttachmentMap attachments_;\n  Buffer::OwnedImpl message_origin_buffer_;\n};\n\nusing ContextSharedPtr = std::shared_ptr<Context>;\n\n/**\n * RpcInvocation represent an rpc call\n * See\n * https://github.com/apache/incubator-dubbo/blob/master/dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/RpcInvocation.java\n */\nclass RpcInvocation {\npublic:\n  virtual ~RpcInvocation() = default;\n\n  virtual const std::string& serviceName() const PURE;\n  virtual const std::string& methodName() const PURE;\n  virtual const absl::optional<std::string>& serviceVersion() const PURE;\n  virtual const absl::optional<std::string>& serviceGroup() const PURE;\n};\n\nusing RpcInvocationSharedPtr = std::shared_ptr<RpcInvocation>;\n\n/**\n * RpcResult represent the result of an rpc call\n * See\n * https://github.com/apache/incubator-dubbo/blob/master/dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/RpcResult.java\n */\nclass RpcResult {\npublic:\n  virtual ~RpcResult() = default;\n  virtual bool hasException() const PURE;\n};\n\nusing RpcResultSharedPtr = std::shared_ptr<RpcResult>;\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/message_impl.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass ContextBase : public Context {\npublic:\n  ContextBase() = default;\n  ~ContextBase() override = default;\n\n  // Override from Context\n  size_t bodySize() const override { return body_size_; }\n  size_t headerSize() const override { return header_size_; }\n\n  void setBodySize(size_t size) { body_size_ = size; }\n  void setHeaderSize(size_t size) { header_size_ = size; }\n\nprotected:\n  size_t body_size_{0};\n  size_t header_size_{0};\n};\n\nclass ContextImpl : public ContextBase {\npublic:\n  ContextImpl() = default;\n  ~ContextImpl() override = default;\n\n  bool isHeartbeat() const { return is_heartbeat_; }\n  void setHeartbeat(bool is_heartbeat) { is_heartbeat_ = is_heartbeat; }\n\nprivate:\n  bool is_heartbeat_{false};\n};\n\nclass RpcInvocationBase : public RpcInvocation {\npublic:\n  ~RpcInvocationBase() override = default;\n\n  void setServiceName(const std::string& name) { service_name_ = name; }\n  const std::string& serviceName() const override { return service_name_; }\n\n  void setMethodName(const std::string& name) { method_name_ = name; }\n  const std::string& methodName() const override { return method_name_; }\n\n  void setServiceVersion(const std::string& version) { service_version_ = version; }\n  const absl::optional<std::string>& serviceVersion() const override { return service_version_; }\n\n  void setServiceGroup(const std::string& group) { group_ = group; }\n  const absl::optional<std::string>& serviceGroup() const override { return group_; }\n\nprotected:\n  std::string service_name_;\n  std::string method_name_;\n  absl::optional<std::string> service_version_;\n  absl::optional<std::string> group_;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/metadata.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass MessageMetadata {\npublic:\n  void setInvocationInfo(RpcInvocationSharedPtr invocation_info) {\n    invocation_info_ = invocation_info;\n  }\n  bool hasInvocationInfo() const { return invocation_info_ != nullptr; }\n  const RpcInvocation& invocationInfo() const { return *invocation_info_; }\n\n  void setProtocolType(ProtocolType type) { proto_type_ = type; }\n  ProtocolType protocolType() const { return proto_type_; }\n\n  void setProtocolVersion(uint8_t version) { protocol_version_ = version; }\n  uint8_t protocolVersion() const { return protocol_version_; }\n\n  void setMessageType(MessageType type) { message_type_ = type; }\n  MessageType messageType() const { return message_type_; }\n\n  void setRequestId(int64_t id) { request_id_ = id; }\n  int64_t requestId() const { return request_id_; }\n\n  void setTimeout(uint32_t timeout) { timeout_ = timeout; }\n  absl::optional<uint32_t> timeout() const { return timeout_; }\n\n  void setTwoWayFlag(bool two_way) { is_two_way_ = two_way; }\n  bool isTwoWay() const { return is_two_way_; }\n\n  template <typename T = SerializationType> void setSerializationType(T type) {\n    ASSERT((std::is_same<uint8_t, typename std::underlying_type<T>::type>::value));\n    serialization_type_ = static_cast<uint8_t>(type);\n  }\n  template <typename T = SerializationType> T serializationType() const {\n    ASSERT((std::is_same<uint8_t, typename std::underlying_type<T>::type>::value));\n    return static_cast<T>(serialization_type_);\n  }\n\n  template <typename T = ResponseStatus> void setResponseStatus(T status) {\n    ASSERT((std::is_same<uint8_t, typename std::underlying_type<T>::type>::value));\n    response_status_ = static_cast<uint8_t>(status);\n  }\n  template <typename T = ResponseStatus> T responseStatus() const {\n    ASSERT((std::is_same<uint8_t, typename std::underlying_type<T>::type>::value));\n    return static_cast<T>(response_status_.value());\n  }\n  bool hasResponseStatus() const { return response_status_.has_value(); }\n\nprivate:\n  bool is_two_way_{false};\n\n  MessageType message_type_{MessageType::Request};\n  ProtocolType proto_type_{ProtocolType::Dubbo};\n\n  absl::optional<uint8_t> response_status_;\n  absl::optional<uint32_t> timeout_;\n\n  RpcInvocationSharedPtr invocation_info_;\n\n  uint8_t serialization_type_{static_cast<uint8_t>(SerializationType::Hessian2)};\n  uint8_t protocol_version_{1};\n  int64_t request_id_ = 0;\n};\n\nusing MessageMetadataSharedPtr = std::shared_ptr<MessageMetadata>;\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/protocol.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/typed_config.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/**\n * See https://dubbo.incubator.apache.org/en-us/docs/dev/implementation.html\n */\nclass Protocol {\npublic:\n  virtual ~Protocol() = default;\n  Protocol() = default;\n\n  /**\n   * @return Initializes the serializer used by the protocol codec\n   */\n  void initSerializer(SerializationType type) {\n    serializer_ = NamedSerializerConfigFactory::getFactory(this->type(), type).createSerializer();\n  }\n\n  /**\n   * @return Serializer the protocol Serializer\n   */\n  virtual Serializer* serializer() const { return serializer_.get(); }\n\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return ProtocolType the protocol type\n   */\n  virtual ProtocolType type() const PURE;\n\n  /*\n   * decodes the dubbo protocol message header.\n   *\n   * @param buffer the currently buffered dubbo data.\n   * @param metadata the meta data of current messages\n   * @return ContextSharedPtr save the context data of current messages,\n   *                 nullptr if more data is required.\n   *         bool true if a complete message was successfully consumed, false if more data\n   *                 is required.\n   * @throws EnvoyException if the data is not valid for this protocol.\n   */\n  virtual std::pair<ContextSharedPtr, bool> decodeHeader(Buffer::Instance& buffer,\n                                                         MessageMetadataSharedPtr metadata) PURE;\n\n  /*\n   * decodes the dubbo protocol message body, potentially invoking callbacks.\n   * If successful, the message is removed from the buffer.\n   *\n   * @param buffer the currently buffered dubbo data.\n   * @param context save the meta data of current messages.\n   * @param metadata the meta data of current messages\n   * @return bool true if a complete message was successfully consumed, false if more data\n   *                 is required.\n   * @throws EnvoyException if the data is not valid for this protocol.\n   */\n  virtual bool decodeData(Buffer::Instance& buffer, ContextSharedPtr context,\n                          MessageMetadataSharedPtr metadata) PURE;\n\n  /*\n   * encodes the dubbo protocol message.\n   *\n   * @param buffer save the currently buffered dubbo data.\n   * @param metadata the meta data of dubbo protocol\n   * @param content the body of dubbo protocol message\n   * @param type the type of dubbo protocol response message\n   * @return bool true if the protocol coding succeeds.\n   */\n  virtual bool encode(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                      const std::string& content,\n                      RpcResponseType type = RpcResponseType::ResponseWithValue) PURE;\n\nprotected:\n  SerializerPtr serializer_;\n};\n\nusing ProtocolPtr = std::unique_ptr<Protocol>;\n\n/**\n * Implemented by each Dubbo protocol and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedProtocolConfigFactory : public Config::UntypedFactory {\npublic:\n  ~NamedProtocolConfigFactory() override = default;\n\n  /**\n   * Create a particular Dubbo protocol.\n   * @param serialization_type the serialization type of the protocol body.\n   * @return protocol instance pointer.\n   */\n  virtual ProtocolPtr createProtocol(SerializationType serialization_type) PURE;\n\n  std::string category() const override { return \"envoy.dubbo_proxy.protocols\"; }\n\n  /**\n   * Convenience method to lookup a factory by type.\n   * @param ProtocolType the protocol type.\n   * @return NamedProtocolConfigFactory& for the ProtocolType.\n   */\n  static NamedProtocolConfigFactory& getFactory(ProtocolType type) {\n    const std::string& name = ProtocolNames::get().fromType(type);\n    return Envoy::Config::Utility::getAndCheckFactoryByName<NamedProtocolConfigFactory>(name);\n  }\n};\n\n/**\n * ProtocolFactoryBase provides a template for a trivial NamedProtocolConfigFactory.\n */\ntemplate <class ProtocolImpl> class ProtocolFactoryBase : public NamedProtocolConfigFactory {\npublic:\n  ProtocolPtr createProtocol(SerializationType serialization_type) override {\n    auto protocol = std::make_unique<ProtocolImpl>();\n    protocol->initSerializer(serialization_type);\n    return protocol;\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  ProtocolFactoryBase(ProtocolType type) : name_(ProtocolNames::get().fromType(type)) {}\n\nprivate:\n  const std::string name_;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/protocol_constants.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/**\n * Names of available Protocol implementations.\n */\nclass ProtocolNameValues {\npublic:\n  struct ProtocolTypeHash {\n    template <typename T> std::size_t operator()(T t) const { return static_cast<std::size_t>(t); }\n  };\n\n  using ProtocolTypeNameMap = absl::node_hash_map<ProtocolType, std::string, ProtocolTypeHash>;\n\n  const ProtocolTypeNameMap protocolTypeNameMap = {\n      {ProtocolType::Dubbo, \"dubbo\"},\n  };\n\n  const std::string& fromType(ProtocolType type) const {\n    const auto& itor = protocolTypeNameMap.find(type);\n    ASSERT(itor != protocolTypeNameMap.end());\n    return itor->second;\n  }\n};\n\nusing ProtocolNames = ConstSingleton<ProtocolNameValues>;\n\n/**\n * Names of available serializer implementations.\n */\nclass SerializerNameValues {\npublic:\n  struct SerializationTypeHash {\n    template <typename T> std::size_t operator()(T t) const { return static_cast<std::size_t>(t); }\n  };\n\n  using SerializerTypeNameMap =\n      absl::node_hash_map<SerializationType, std::string, SerializationTypeHash>;\n\n  const SerializerTypeNameMap serializerTypeNameMap = {\n      {SerializationType::Hessian2, \"hessian2\"},\n  };\n\n  const std::string& fromType(SerializationType type) const {\n    const auto& itor = serializerTypeNameMap.find(type);\n    ASSERT(itor != serializerTypeNameMap.end());\n    return itor->second;\n  }\n};\n\nusing SerializerNames = ConstSingleton<SerializerNameValues>;\n\nclass ProtocolSerializerNameValues {\npublic:\n  inline uint8_t generateKey(ProtocolType protocol_type,\n                             SerializationType serialization_type) const {\n    return static_cast<uint8_t>(serialization_type) ^ static_cast<uint8_t>(protocol_type);\n  }\n\n  inline std::string generateValue(ProtocolType protocol_type,\n                                   SerializationType serialization_type) const {\n    return fmt::format(\"{}.{}\", ProtocolNames::get().fromType(protocol_type),\n                       SerializerNames::get().fromType(serialization_type));\n  }\n\n#define GENERATE_PAIR(X, Y) generateKey(X, Y), generateValue(X, Y)\n\n  using ProtocolSerializerTypeNameMap = absl::node_hash_map<uint8_t, std::string>;\n\n  const ProtocolSerializerTypeNameMap protocolSerializerTypeNameMap = {\n      {GENERATE_PAIR(ProtocolType::Dubbo, SerializationType::Hessian2)},\n  };\n\n  const std::string& fromType(ProtocolType protocol_type, SerializationType type) const {\n    const auto& itor = protocolSerializerTypeNameMap.find(generateKey(protocol_type, type));\n    ASSERT(itor != protocolSerializerTypeNameMap.end());\n    return itor->second;\n  }\n};\n\nusing ProtocolSerializerNames = ConstSingleton<ProtocolSerializerNameValues>;\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"router_interface\",\n    hdrs = [\"router.h\"],\n    deps = [\n        \"//include/envoy/router:router_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"route_matcher_interface\",\n    hdrs = [\"route.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"route_matcher\",\n    srcs = [\"route_matcher.cc\"],\n    hdrs = [\"route_matcher.h\"],\n    deps = [\n        \":route_matcher_interface\",\n        \":router_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:serializer_interface\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    deps = [\n        \":router_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:factory_base_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:filter_config_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_lib\",\n    srcs = [\"router_impl.cc\"],\n    hdrs = [\"router_impl.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//include/envoy/upstream:thread_local_cluster_interface\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/router:metadatamatchcriteria_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:app_exception_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:protocol_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy:serializer_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:filter_interface\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/config.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/router/config.h\"\n\n#include \"envoy/extensions/filters/network/dubbo_proxy/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/router/v3/router.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/router/router_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nDubboFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::dubbo_proxy::router::v3::Router&, const std::string&,\n    Server::Configuration::FactoryContext& context) {\n  return [&context](DubboFilters::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addDecoderFilter(std::make_shared<Router>(context.clusterManager()));\n  };\n}\n\n/**\n * Static registration for the router filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RouterFilterConfig, DubboFilters::NamedDubboFilterConfigFactory);\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/dubbo_proxy/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/router/v3/router.pb.validate.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/filters/factory_base.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nclass RouterFilterConfig\n    : public DubboFilters::FactoryBase<\n          envoy::extensions::filters::network::dubbo_proxy::router::v3::Router> {\npublic:\n  RouterFilterConfig() : FactoryBase(DubboFilters::DubboFilterNames::get().ROUTER) {}\n\nprivate:\n  DubboFilters::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::dubbo_proxy::router::v3::Router& proto_config,\n      const std::string& stat_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/route.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/route.pb.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nusing RouteConfigurations = Protobuf::RepeatedPtrField<\n    envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration>;\n\nenum class RouteMatcherType : uint8_t {\n  Default,\n};\n\n/**\n * Names of available Protocol implementations.\n */\nclass RouteMatcherNameValues {\npublic:\n  struct RouteMatcherTypeHash {\n    template <typename T> std::size_t operator()(T t) const { return static_cast<std::size_t>(t); }\n  };\n\n  using RouteMatcherNameMap =\n      absl::node_hash_map<RouteMatcherType, std::string, RouteMatcherTypeHash>;\n\n  const RouteMatcherNameMap routeMatcherNameMap = {\n      {RouteMatcherType::Default, \"default\"},\n  };\n\n  const std::string& fromType(RouteMatcherType type) const {\n    const auto& itor = routeMatcherNameMap.find(type);\n    ASSERT(itor != routeMatcherNameMap.end());\n    return itor->second;\n  }\n};\n\nusing RouteMatcherNames = ConstSingleton<RouteMatcherNameValues>;\n\nclass RouteMatcher {\npublic:\n  virtual ~RouteMatcher() = default;\n\n  virtual RouteConstSharedPtr route(const MessageMetadata& metadata,\n                                    uint64_t random_value) const PURE;\n};\n\nusing RouteMatcherPtr = std::unique_ptr<RouteMatcher>;\nusing RouteMatcherConstSharedPtr = std::shared_ptr<const RouteMatcher>;\n\n/**\n * Implemented by each Dubbo protocol and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedRouteMatcherConfigFactory : public Envoy::Config::UntypedFactory {\npublic:\n  ~NamedRouteMatcherConfigFactory() override = default;\n\n  /**\n   * Create a particular Dubbo protocol.\n   * @param serialization_type the serialization type of the protocol body.\n   * @return protocol instance pointer.\n   */\n  virtual RouteMatcherPtr createRouteMatcher(const RouteConfigurations& route_configs,\n                                             Server::Configuration::FactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.dubbo_proxy.route_matchers\"; }\n\n  /**\n   * Convenience method to lookup a factory by type.\n   * @param RouteMatcherType the protocol type.\n   * @return NamedRouteMatcherConfigFactory& for the RouteMatcherType.\n   */\n  static NamedRouteMatcherConfigFactory& getFactory(RouteMatcherType type) {\n    const std::string& name = RouteMatcherNames::get().fromType(type);\n    return Envoy::Config::Utility::getAndCheckFactoryByName<NamedRouteMatcherConfigFactory>(name);\n  }\n};\n\n/**\n * RouteMatcherFactoryBase provides a template for a trivial NamedProtocolConfigFactory.\n */\ntemplate <class RouteMatcherImpl>\nclass RouteMatcherFactoryBase : public NamedRouteMatcherConfigFactory {\npublic:\n  RouteMatcherPtr createRouteMatcher(const RouteConfigurations& route_configs,\n                                     Server::Configuration::FactoryContext& context) override {\n    return std::make_unique<RouteMatcherImpl>(route_configs, context);\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  RouteMatcherFactoryBase(RouteMatcherType type) : name_(RouteMatcherNames::get().fromType(type)) {}\n\nprivate:\n  const std::string name_;\n};\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/router/route_matcher.h\"\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/route.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nRouteEntryImplBase::RouteEntryImplBase(\n    const envoy::extensions::filters::network::dubbo_proxy::v3::Route& route)\n    : cluster_name_(route.route().cluster()),\n      config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())) {\n  if (route.route().cluster_specifier_case() ==\n      envoy::extensions::filters::network::dubbo_proxy::v3::RouteAction::ClusterSpecifierCase::\n          kWeightedClusters) {\n    total_cluster_weight_ = 0UL;\n    for (const auto& cluster : route.route().weighted_clusters().clusters()) {\n      weighted_clusters_.emplace_back(std::make_shared<WeightedClusterEntry>(*this, cluster));\n      total_cluster_weight_ += weighted_clusters_.back()->clusterWeight();\n    }\n    ENVOY_LOG(debug, \"dubbo route matcher: weighted_clusters_size {}\", weighted_clusters_.size());\n  }\n}\n\nconst std::string& RouteEntryImplBase::clusterName() const { return cluster_name_; }\n\nconst RouteEntry* RouteEntryImplBase::routeEntry() const { return this; }\n\nRouteConstSharedPtr RouteEntryImplBase::clusterEntry(uint64_t random_value) const {\n  if (weighted_clusters_.empty()) {\n    ENVOY_LOG(debug, \"dubbo route matcher: weighted_clusters_size {}\", weighted_clusters_.size());\n    return shared_from_this();\n  }\n\n  return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_, random_value,\n                                          false);\n}\n\nbool RouteEntryImplBase::headersMatch(const Http::HeaderMap& headers) const {\n  ENVOY_LOG(debug, \"dubbo route matcher: headers size {}, metadata headers size {}\",\n            config_headers_.size(), headers.size());\n  return Http::HeaderUtility::matchHeaders(headers, config_headers_);\n}\n\nRouteEntryImplBase::WeightedClusterEntry::WeightedClusterEntry(const RouteEntryImplBase& parent,\n                                                               const WeightedCluster& cluster)\n    : parent_(parent), cluster_name_(cluster.name()),\n      cluster_weight_(PROTOBUF_GET_WRAPPED_REQUIRED(cluster, weight)) {}\n\nParameterRouteEntryImpl::ParameterRouteEntryImpl(\n    const envoy::extensions::filters::network::dubbo_proxy::v3::Route& route)\n    : RouteEntryImplBase(route) {\n  for (auto& config : route.match().method().params_match()) {\n    parameter_data_list_.emplace_back(config.first, config.second);\n  }\n}\n\nParameterRouteEntryImpl::~ParameterRouteEntryImpl() = default;\n\nbool ParameterRouteEntryImpl::matchParameter(absl::string_view request_data,\n                                             const ParameterData& config_data) const {\n  switch (config_data.match_type_) {\n  case Http::HeaderUtility::HeaderMatchType::Value:\n    return config_data.value_.empty() || request_data == config_data.value_;\n  case Http::HeaderUtility::HeaderMatchType::Range: {\n    int64_t value = 0;\n    return absl::SimpleAtoi(request_data, &value) && value >= config_data.range_.start() &&\n           value < config_data.range_.end();\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nRouteConstSharedPtr ParameterRouteEntryImpl::matches(const MessageMetadata& metadata,\n                                                     uint64_t random_value) const {\n  ASSERT(metadata.hasInvocationInfo());\n  const auto invocation = dynamic_cast<const RpcInvocationImpl*>(&metadata.invocationInfo());\n  ASSERT(invocation);\n  if (!invocation->hasParameters()) {\n    return nullptr;\n  }\n\n  ENVOY_LOG(debug, \"dubbo route matcher: parameter name match\");\n  for (auto& config_data : parameter_data_list_) {\n    const std::string& data = invocation->getParameterValue(config_data.index_);\n    if (data.empty()) {\n      ENVOY_LOG(debug,\n                \"dubbo route matcher: parameter matching failed, there are no parameters in the \"\n                \"user request, index '{}'\",\n                config_data.index_);\n      return nullptr;\n    }\n\n    if (!matchParameter(data, config_data)) {\n      ENVOY_LOG(debug, \"dubbo route matcher: parameter matching failed, index '{}', value '{}'\",\n                config_data.index_, data);\n      return nullptr;\n    }\n  }\n\n  return clusterEntry(random_value);\n}\n\nParameterRouteEntryImpl::ParameterData::ParameterData(uint32_t index,\n                                                      const ParameterMatchSpecifier& config) {\n  index_ = index;\n  switch (config.parameter_match_specifier_case()) {\n  case ParameterMatchSpecifier::kExactMatch:\n    match_type_ = Http::HeaderUtility::HeaderMatchType::Value;\n    value_ = config.exact_match();\n    break;\n  case ParameterMatchSpecifier::kRangeMatch:\n    match_type_ = Http::HeaderUtility::HeaderMatchType::Range;\n    range_.set_start(config.range_match().start());\n    range_.set_end(config.range_match().end());\n    break;\n  default:\n    match_type_ = Http::HeaderUtility::HeaderMatchType::Value;\n    break;\n  }\n}\n\nMethodRouteEntryImpl::MethodRouteEntryImpl(\n    const envoy::extensions::filters::network::dubbo_proxy::v3::Route& route)\n    : RouteEntryImplBase(route), method_name_(route.match().method().name()) {\n  if (route.match().method().params_match_size() != 0) {\n    parameter_route_ = std::make_shared<ParameterRouteEntryImpl>(route);\n  }\n}\n\nMethodRouteEntryImpl::~MethodRouteEntryImpl() = default;\n\nRouteConstSharedPtr MethodRouteEntryImpl::matches(const MessageMetadata& metadata,\n                                                  uint64_t random_value) const {\n  ASSERT(metadata.hasInvocationInfo());\n  const auto invocation = dynamic_cast<const RpcInvocationImpl*>(&metadata.invocationInfo());\n  ASSERT(invocation);\n\n  if (invocation->hasHeaders() && !RouteEntryImplBase::headersMatch(invocation->headers())) {\n    ENVOY_LOG(error, \"dubbo route matcher: headers not match\");\n    return nullptr;\n  }\n\n  if (invocation->methodName().empty()) {\n    ENVOY_LOG(error, \"dubbo route matcher: there is no method name in the metadata\");\n    return nullptr;\n  }\n\n  if (!method_name_.match(invocation->methodName())) {\n    ENVOY_LOG(debug, \"dubbo route matcher: method matching failed, input method '{}'\",\n              invocation->methodName());\n    return nullptr;\n  }\n\n  if (parameter_route_) {\n    ENVOY_LOG(debug, \"dubbo route matcher: parameter matching is required\");\n    return parameter_route_->matches(metadata, random_value);\n  }\n\n  return clusterEntry(random_value);\n}\n\nSingleRouteMatcherImpl::SingleRouteMatcherImpl(const RouteConfig& config,\n                                               Server::Configuration::FactoryContext&)\n    : service_name_(config.interface()), group_(config.group()), version_(config.version()) {\n  using envoy::extensions::filters::network::dubbo_proxy::v3::RouteMatch;\n\n  for (const auto& route : config.routes()) {\n    routes_.emplace_back(std::make_shared<MethodRouteEntryImpl>(route));\n  }\n  ENVOY_LOG(debug, \"dubbo route matcher: routes list size {}\", routes_.size());\n}\n\nRouteConstSharedPtr SingleRouteMatcherImpl::route(const MessageMetadata& metadata,\n                                                  uint64_t random_value) const {\n  ASSERT(metadata.hasInvocationInfo());\n  const auto& invocation = metadata.invocationInfo();\n\n  if (service_name_ == invocation.serviceName() &&\n      (group_.value().empty() ||\n       (invocation.serviceGroup().has_value() && invocation.serviceGroup().value() == group_)) &&\n      (version_.value().empty() || (invocation.serviceVersion().has_value() &&\n                                    invocation.serviceVersion().value() == version_))) {\n    for (const auto& route : routes_) {\n      RouteConstSharedPtr route_entry = route->matches(metadata, random_value);\n      if (nullptr != route_entry) {\n        return route_entry;\n      }\n    }\n  } else {\n    ENVOY_LOG(debug, \"dubbo route matcher: interface matching failed\");\n  }\n\n  return nullptr;\n}\n\nMultiRouteMatcher::MultiRouteMatcher(const RouteConfigList& route_config_list,\n                                     Server::Configuration::FactoryContext& context) {\n  for (const auto& route_config : route_config_list) {\n    route_matcher_list_.emplace_back(\n        std::make_unique<SingleRouteMatcherImpl>(route_config, context));\n  }\n  ENVOY_LOG(debug, \"route matcher list size {}\", route_matcher_list_.size());\n}\n\nRouteConstSharedPtr MultiRouteMatcher::route(const MessageMetadata& metadata,\n                                             uint64_t random_value) const {\n  for (const auto& route_matcher : route_matcher_list_) {\n    auto route = route_matcher->route(metadata, random_value);\n    if (nullptr != route) {\n      return route;\n    }\n  }\n\n  return nullptr;\n}\n\nclass DefaultRouteMatcherConfigFactory : public RouteMatcherFactoryBase<MultiRouteMatcher> {\npublic:\n  DefaultRouteMatcherConfigFactory() : RouteMatcherFactoryBase(RouteMatcherType::Default) {}\n};\n\n/**\n * Static registration for the Dubbo protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(DefaultRouteMatcherConfigFactory, NamedRouteMatcherConfigFactory);\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/route_matcher.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/route.pb.h\"\n#include \"envoy/type/v3/range.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/matchers.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/route.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nclass RouteEntryImplBase : public RouteEntry,\n                           public Route,\n                           public std::enable_shared_from_this<RouteEntryImplBase>,\n                           public Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  RouteEntryImplBase(const envoy::extensions::filters::network::dubbo_proxy::v3::Route& route);\n  ~RouteEntryImplBase() override = default;\n\n  // Router::RouteEntry\n  const std::string& clusterName() const override;\n  const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n    return metadata_match_criteria_.get();\n  }\n\n  // Router::Route\n  const RouteEntry* routeEntry() const override;\n\n  virtual RouteConstSharedPtr matches(const MessageMetadata& metadata,\n                                      uint64_t random_value) const PURE;\n\nprotected:\n  RouteConstSharedPtr clusterEntry(uint64_t random_value) const;\n  bool headersMatch(const Http::HeaderMap& headers) const;\n\nprivate:\n  class WeightedClusterEntry : public RouteEntry, public Route {\n  public:\n    using WeightedCluster = envoy::config::route::v3::WeightedCluster::ClusterWeight;\n    WeightedClusterEntry(const RouteEntryImplBase& parent, const WeightedCluster& cluster);\n\n    uint64_t clusterWeight() const { return cluster_weight_; }\n\n    // Router::RouteEntry\n    const std::string& clusterName() const override { return cluster_name_; }\n    const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n      return metadata_match_criteria_ ? metadata_match_criteria_.get()\n                                      : parent_.metadataMatchCriteria();\n    }\n\n    // Router::Route\n    const RouteEntry* routeEntry() const override { return this; }\n\n  private:\n    const RouteEntryImplBase& parent_;\n    const std::string cluster_name_;\n    const uint64_t cluster_weight_;\n    Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n  };\n\n  using WeightedClusterEntrySharedPtr = std::shared_ptr<WeightedClusterEntry>;\n\n  uint64_t total_cluster_weight_;\n  const std::string cluster_name_;\n  const std::vector<Http::HeaderUtility::HeaderDataPtr> config_headers_;\n  std::vector<WeightedClusterEntrySharedPtr> weighted_clusters_;\n\n  // TODO(gengleilei) Implement it.\n  Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n};\n\nusing RouteEntryImplBaseConstSharedPtr = std::shared_ptr<const RouteEntryImplBase>;\n\nclass ParameterRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  ParameterRouteEntryImpl(const envoy::extensions::filters::network::dubbo_proxy::v3::Route& route);\n  ~ParameterRouteEntryImpl() override;\n\n  struct ParameterData {\n    using ParameterMatchSpecifier =\n        envoy::extensions::filters::network::dubbo_proxy::v3::MethodMatch::ParameterMatchSpecifier;\n    ParameterData(uint32_t index, const ParameterMatchSpecifier& config);\n\n    Http::HeaderUtility::HeaderMatchType match_type_;\n    std::string value_;\n    envoy::type::v3::Int64Range range_;\n    uint32_t index_;\n  };\n\n  // RoutEntryImplBase\n  RouteConstSharedPtr matches(const MessageMetadata& metadata,\n                              uint64_t random_value) const override;\n\nprivate:\n  bool matchParameter(absl::string_view request_data, const ParameterData& config_data) const;\n\n  std::vector<ParameterData> parameter_data_list_;\n};\n\nclass MethodRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  MethodRouteEntryImpl(const envoy::extensions::filters::network::dubbo_proxy::v3::Route& route);\n  ~MethodRouteEntryImpl() override;\n\n  // RoutEntryImplBase\n  RouteConstSharedPtr matches(const MessageMetadata& metadata,\n                              uint64_t random_value) const override;\n\nprivate:\n  const Matchers::StringMatcherImpl method_name_;\n  std::shared_ptr<ParameterRouteEntryImpl> parameter_route_;\n};\n\nclass SingleRouteMatcherImpl : public RouteMatcher, public Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  using RouteConfig = envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration;\n  SingleRouteMatcherImpl(const RouteConfig& config, Server::Configuration::FactoryContext& context);\n\n  RouteConstSharedPtr route(const MessageMetadata& metadata, uint64_t random_value) const override;\n\nprivate:\n  std::vector<RouteEntryImplBaseConstSharedPtr> routes_;\n  const std::string service_name_;\n  const absl::optional<std::string> group_;\n  const absl::optional<std::string> version_;\n};\n\nclass MultiRouteMatcher : public RouteMatcher, public Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  using RouteConfigList = Envoy::Protobuf::RepeatedPtrField<\n      envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration>;\n  MultiRouteMatcher(const RouteConfigList& route_config_list,\n                    Server::Configuration::FactoryContext& context);\n\n  RouteConstSharedPtr route(const MessageMetadata& metadata, uint64_t random_value) const override;\n\nprivate:\n  std::vector<RouteMatcherPtr> route_matcher_list_;\n};\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/router.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/router/router.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\n/**\n * RouteEntry is an individual resolved route entry.\n */\nclass RouteEntry {\npublic:\n  virtual ~RouteEntry() = default;\n\n  /**\n   * @return const std::string& the upstream cluster that owns the route.\n   */\n  virtual const std::string& clusterName() const PURE;\n\n  /**\n   * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when\n   * selecting an upstream host\n   */\n  virtual const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE;\n};\n\nusing RouteEntryPtr = std::shared_ptr<RouteEntry>;\n\n/**\n * Route holds the RouteEntry for a request.\n */\nclass Route {\npublic:\n  virtual ~Route() = default;\n\n  /**\n   * @return the route entry or nullptr if there is no matching route for the request.\n   */\n  virtual const RouteEntry* routeEntry() const PURE;\n};\n\nusing RouteConstSharedPtr = std::shared_ptr<const Route>;\nusing RouteSharedPtr = std::shared_ptr<Route>;\n\n/**\n * The router configuration.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  /**\n   * Based on the incoming Dubbo request transport and/or protocol data, determine the target\n   * route for the request.\n   * @param metadata MessageMetadata for the message to route\n   * @param random_value uint64_t used to select cluster affinity\n   * @return the route or nullptr if there is no matching route for the request.\n   */\n  virtual RouteConstSharedPtr route(const MessageMetadata& metadata,\n                                    uint64_t random_value) const PURE;\n};\n\nusing ConfigConstSharedPtr = std::shared_ptr<const Config>;\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/router_impl.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/router/router_impl.h\"\n\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/thread_local_cluster.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/app_exception.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nvoid Router::onDestroy() {\n  if (upstream_request_) {\n    upstream_request_->resetStream();\n  }\n  cleanup();\n}\n\nvoid Router::setDecoderFilterCallbacks(DubboFilters::DecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n}\n\nFilterStatus Router::onMessageDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) {\n  ASSERT(metadata->hasInvocationInfo());\n  const auto& invocation = metadata->invocationInfo();\n\n  route_ = callbacks_->route();\n  if (!route_) {\n    ENVOY_STREAM_LOG(debug, \"dubbo router: no cluster match for interface '{}'\", *callbacks_,\n                     invocation.serviceName());\n    callbacks_->sendLocalReply(AppException(ResponseStatus::ServiceNotFound,\n                                            fmt::format(\"dubbo router: no route for interface '{}'\",\n                                                        invocation.serviceName())),\n                               false);\n    return FilterStatus::StopIteration;\n  }\n\n  route_entry_ = route_->routeEntry();\n\n  Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(route_entry_->clusterName());\n  if (!cluster) {\n    ENVOY_STREAM_LOG(debug, \"dubbo router: unknown cluster '{}'\", *callbacks_,\n                     route_entry_->clusterName());\n    callbacks_->sendLocalReply(\n        AppException(ResponseStatus::ServerError, fmt::format(\"dubbo router: unknown cluster '{}'\",\n                                                              route_entry_->clusterName())),\n        false);\n    return FilterStatus::StopIteration;\n  }\n\n  cluster_ = cluster->info();\n  ENVOY_STREAM_LOG(debug, \"dubbo router: cluster '{}' match for interface '{}'\", *callbacks_,\n                   route_entry_->clusterName(), invocation.serviceName());\n\n  if (cluster_->maintenanceMode()) {\n    callbacks_->sendLocalReply(\n        AppException(ResponseStatus::ServerError,\n                     fmt::format(\"dubbo router: maintenance mode for cluster '{}'\",\n                                 route_entry_->clusterName())),\n        false);\n    return FilterStatus::StopIteration;\n  }\n\n  Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster(\n      route_entry_->clusterName(), Upstream::ResourcePriority::Default, this);\n  if (!conn_pool) {\n    callbacks_->sendLocalReply(\n        AppException(\n            ResponseStatus::ServerError,\n            fmt::format(\"dubbo router: no healthy upstream for '{}'\", route_entry_->clusterName())),\n        false);\n    return FilterStatus::StopIteration;\n  }\n\n  ENVOY_STREAM_LOG(debug, \"dubbo router: decoding request\", *callbacks_);\n  upstream_request_buffer_.move(ctx->messageOriginData(), ctx->messageSize());\n\n  upstream_request_ = std::make_unique<UpstreamRequest>(\n      *this, *conn_pool, metadata, callbacks_->serializationType(), callbacks_->protocolType());\n  return upstream_request_->start();\n}\n\nvoid Router::onUpstreamData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!upstream_request_->response_complete_);\n\n  ENVOY_STREAM_LOG(trace, \"dubbo router: reading response: {} bytes\", *callbacks_, data.length());\n\n  // Handle normal response.\n  if (!upstream_request_->response_started_) {\n    callbacks_->startUpstreamResponse();\n    upstream_request_->response_started_ = true;\n  }\n\n  DubboFilters::UpstreamResponseStatus status = callbacks_->upstreamData(data);\n  if (status == DubboFilters::UpstreamResponseStatus::Complete) {\n    ENVOY_STREAM_LOG(debug, \"dubbo router: response complete\", *callbacks_);\n    upstream_request_->onResponseComplete();\n    cleanup();\n    return;\n  } else if (status == DubboFilters::UpstreamResponseStatus::Reset) {\n    ENVOY_STREAM_LOG(debug, \"dubbo router: upstream reset\", *callbacks_);\n    // When the upstreamData function returns Reset,\n    // the current stream is already released from the upper layer,\n    // so there is no need to call callbacks_->resetStream() to notify\n    // the upper layer to release the stream.\n    upstream_request_->resetStream();\n    return;\n  }\n\n  if (end_stream) {\n    // Response is incomplete, but no more data is coming.\n    ENVOY_STREAM_LOG(debug, \"dubbo router: response underflow\", *callbacks_);\n    upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n    upstream_request_->onResponseComplete();\n    cleanup();\n  }\n}\n\nvoid Router::onEvent(Network::ConnectionEvent event) {\n  if (!upstream_request_ || upstream_request_->response_complete_) {\n    // Client closed connection after completing response.\n    ENVOY_LOG(debug, \"dubbo upstream request: the upstream request had completed\");\n    return;\n  }\n\n  if (upstream_request_->stream_reset_ && event == Network::ConnectionEvent::LocalClose) {\n    ENVOY_LOG(debug, \"dubbo upstream request: the stream reset\");\n    return;\n  }\n\n  switch (event) {\n  case Network::ConnectionEvent::RemoteClose:\n    upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n    break;\n  case Network::ConnectionEvent::LocalClose:\n    upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::LocalConnectionFailure);\n    break;\n  default:\n    // Connected is consumed by the connection pool.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nconst Network::Connection* Router::downstreamConnection() const {\n  return callbacks_ != nullptr ? callbacks_->connection() : nullptr;\n}\n\nvoid Router::cleanup() {\n  if (upstream_request_) {\n    upstream_request_.reset();\n  }\n}\n\nRouter::UpstreamRequest::UpstreamRequest(Router& parent, Tcp::ConnectionPool::Instance& pool,\n                                         MessageMetadataSharedPtr& metadata,\n                                         SerializationType serialization_type,\n                                         ProtocolType protocol_type)\n    : parent_(parent), conn_pool_(pool), metadata_(metadata),\n      protocol_(\n          NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol(serialization_type)),\n      request_complete_(false), response_started_(false), response_complete_(false),\n      stream_reset_(false) {}\n\nRouter::UpstreamRequest::~UpstreamRequest() = default;\n\nFilterStatus Router::UpstreamRequest::start() {\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(*this);\n  if (handle) {\n    // Pause while we wait for a connection.\n    conn_pool_handle_ = handle;\n    return FilterStatus::StopIteration;\n  }\n\n  return FilterStatus::Continue;\n}\n\nvoid Router::UpstreamRequest::resetStream() {\n  stream_reset_ = true;\n\n  if (conn_pool_handle_) {\n    ASSERT(!conn_data_);\n    conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default);\n    conn_pool_handle_ = nullptr;\n    ENVOY_LOG(debug, \"dubbo upstream request: reset connection pool handler\");\n  }\n\n  if (conn_data_) {\n    ASSERT(!conn_pool_handle_);\n    conn_data_->connection().close(Network::ConnectionCloseType::NoFlush);\n    conn_data_.reset();\n    ENVOY_LOG(debug, \"dubbo upstream request: reset connection data\");\n  }\n}\n\nvoid Router::UpstreamRequest::encodeData(Buffer::Instance& data) {\n  ASSERT(conn_data_);\n  ASSERT(!conn_pool_handle_);\n\n  ENVOY_STREAM_LOG(trace, \"proxying {} bytes\", *parent_.callbacks_, data.length());\n  conn_data_->connection().write(data, false);\n}\n\nvoid Router::UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                                            Upstream::HostDescriptionConstSharedPtr host) {\n  conn_pool_handle_ = nullptr;\n\n  // Mimic an upstream reset.\n  onUpstreamHostSelected(host);\n  onResetStream(reason);\n\n  parent_.upstream_request_buffer_.drain(parent_.upstream_request_buffer_.length());\n\n  // If it is a connection error, it means that the connection pool returned\n  // the error asynchronously and the upper layer needs to be notified to continue decoding.\n  // If it is a non-connection error, it is returned synchronously from the connection pool\n  // and is still in the callback at the current Filter, nothing to do.\n  if (reason == ConnectionPool::PoolFailureReason::Timeout ||\n      reason == ConnectionPool::PoolFailureReason::LocalConnectionFailure ||\n      reason == ConnectionPool::PoolFailureReason::RemoteConnectionFailure) {\n    parent_.callbacks_->continueDecoding();\n  }\n}\n\nvoid Router::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,\n                                          Upstream::HostDescriptionConstSharedPtr host) {\n  ENVOY_LOG(debug, \"dubbo upstream request: tcp connection has ready\");\n\n  // Only invoke continueDecoding if we'd previously stopped the filter chain.\n  bool continue_decoding = conn_pool_handle_ != nullptr;\n\n  onUpstreamHostSelected(host);\n  conn_data_ = std::move(conn_data);\n  conn_data_->addUpstreamCallbacks(parent_);\n  conn_pool_handle_ = nullptr;\n\n  onRequestStart(continue_decoding);\n  encodeData(parent_.upstream_request_buffer_);\n}\n\nvoid Router::UpstreamRequest::onRequestStart(bool continue_decoding) {\n  ENVOY_LOG(debug, \"dubbo upstream request: start sending data to the server {}\",\n            upstream_host_->address()->asString());\n\n  if (continue_decoding) {\n    parent_.callbacks_->continueDecoding();\n  }\n  onRequestComplete();\n}\n\nvoid Router::UpstreamRequest::onRequestComplete() { request_complete_ = true; }\n\nvoid Router::UpstreamRequest::onResponseComplete() {\n  response_complete_ = true;\n  conn_data_.reset();\n}\n\nvoid Router::UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) {\n  ENVOY_LOG(debug, \"dubbo upstream request: selected upstream {}\", host->address()->asString());\n  upstream_host_ = host;\n}\n\nvoid Router::UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) {\n  if (metadata_->messageType() == MessageType::Oneway) {\n    // For oneway requests, we should not attempt a response. Reset the downstream to signal\n    // an error.\n    ENVOY_LOG(debug, \"dubbo upstream request: the request is oneway, reset downstream stream\");\n    parent_.callbacks_->resetStream();\n    return;\n  }\n\n  // When the filter's callback does not end, the sendLocalReply function call\n  // triggers the release of the current stream at the end of the filter's callback.\n  switch (reason) {\n  case ConnectionPool::PoolFailureReason::Overflow:\n    parent_.callbacks_->sendLocalReply(\n        AppException(ResponseStatus::ServerError,\n                     fmt::format(\"dubbo upstream request: too many connections\")),\n        false);\n    break;\n  case ConnectionPool::PoolFailureReason::LocalConnectionFailure:\n    // Should only happen if we closed the connection, due to an error condition, in which case\n    // we've already handled any possible downstream response.\n    parent_.callbacks_->sendLocalReply(\n        AppException(ResponseStatus::ServerError,\n                     fmt::format(\"dubbo upstream request: local connection failure '{}'\",\n                                 upstream_host_->address()->asString())),\n        false);\n    break;\n  case ConnectionPool::PoolFailureReason::RemoteConnectionFailure:\n    parent_.callbacks_->sendLocalReply(\n        AppException(ResponseStatus::ServerError,\n                     fmt::format(\"dubbo upstream request: remote connection failure '{}'\",\n                                 upstream_host_->address()->asString())),\n        false);\n    break;\n  case ConnectionPool::PoolFailureReason::Timeout:\n    parent_.callbacks_->sendLocalReply(\n        AppException(ResponseStatus::ServerError,\n                     fmt::format(\"dubbo upstream request: connection failure '{}' due to timeout\",\n                                 upstream_host_->address()->asString())),\n        false);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  if (parent_.filter_complete_ && !response_complete_) {\n    // When the filter's callback has ended and the reply message has not been processed,\n    // call resetStream to release the current stream.\n    // the resetStream eventually triggers the onDestroy function call.\n    parent_.callbacks_->resetStream();\n  }\n}\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/router/router_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/tcp/conn_pool.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nclass Router : public Tcp::ConnectionPool::UpstreamCallbacks,\n               public Upstream::LoadBalancerContextBase,\n               public DubboFilters::DecoderFilter,\n               Logger::Loggable<Logger::Id::dubbo> {\npublic:\n  Router(Upstream::ClusterManager& cluster_manager) : cluster_manager_(cluster_manager) {}\n  ~Router() override = default;\n\n  // DubboFilters::DecoderFilter\n  void onDestroy() override;\n  void setDecoderFilterCallbacks(DubboFilters::DecoderFilterCallbacks& callbacks) override;\n\n  FilterStatus onMessageDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) override;\n\n  // Upstream::LoadBalancerContextBase\n  const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override { return nullptr; }\n  const Network::Connection* downstreamConnection() const override;\n\n  // Tcp::ConnectionPool::UpstreamCallbacks\n  void onUpstreamData(Buffer::Instance& data, bool end_stream) override;\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\nprivate:\n  struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks {\n    UpstreamRequest(Router& parent, Tcp::ConnectionPool::Instance& pool,\n                    MessageMetadataSharedPtr& metadata, SerializationType serialization_type,\n                    ProtocolType protocol_type);\n    ~UpstreamRequest() override;\n\n    FilterStatus start();\n    void resetStream();\n    void encodeData(Buffer::Instance& data);\n\n    // Tcp::ConnectionPool::Callbacks\n    void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                       Upstream::HostDescriptionConstSharedPtr host) override;\n    void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn,\n                     Upstream::HostDescriptionConstSharedPtr host) override;\n\n    void onRequestStart(bool continue_decoding);\n    void onRequestComplete();\n    void onResponseComplete();\n    void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host);\n    void onResetStream(ConnectionPool::PoolFailureReason reason);\n\n    Router& parent_;\n    Tcp::ConnectionPool::Instance& conn_pool_;\n    MessageMetadataSharedPtr metadata_;\n\n    Tcp::ConnectionPool::Cancellable* conn_pool_handle_{};\n    Tcp::ConnectionPool::ConnectionDataPtr conn_data_;\n    Upstream::HostDescriptionConstSharedPtr upstream_host_;\n    SerializerPtr serializer_;\n    ProtocolPtr protocol_;\n\n    bool request_complete_ : 1;\n    bool response_started_ : 1;\n    bool response_complete_ : 1;\n    bool stream_reset_ : 1;\n  };\n\n  void cleanup();\n\n  Upstream::ClusterManager& cluster_manager_;\n\n  DubboFilters::DecoderFilterCallbacks* callbacks_{};\n  RouteConstSharedPtr route_{};\n  const RouteEntry* route_entry_{};\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n\n  std::unique_ptr<UpstreamRequest> upstream_request_;\n  Envoy::Buffer::OwnedImpl upstream_request_buffer_;\n\n  bool filter_complete_{false};\n};\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/serializer.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/typed_config.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/config/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/message.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass Serializer {\npublic:\n  virtual ~Serializer() = default;\n\n  /**\n   * Return this Serializer's name\n   *\n   * @return std::string containing the serialization name.\n   */\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return SerializationType the serializer type\n   */\n  virtual SerializationType type() const PURE;\n\n  /**\n   * deserialize an rpc call\n   * If successful, the RpcInvocation removed from the buffer\n   *\n   * @param buffer the currently buffered dubbo data\n   * @param context context information for RPC messages\n   * @return a pair containing the deserialized result of the message and the deserialized\n   *         invocation information.\n   * @throws EnvoyException if the data is not valid for this serialization\n   */\n  virtual std::pair<RpcInvocationSharedPtr, bool>\n  deserializeRpcInvocation(Buffer::Instance& buffer, ContextSharedPtr context) PURE;\n\n  /**\n   * deserialize result of an rpc call\n   *\n   * @param buffer the currently buffered dubbo data\n   * @param context context information for RPC messages\n   * @return a pair containing the deserialized result of the message and the deserialized\n   *         result information.\n   * @throws EnvoyException if the data is not valid for this serialization\n   */\n  virtual std::pair<RpcResultSharedPtr, bool> deserializeRpcResult(Buffer::Instance& buffer,\n                                                                   ContextSharedPtr context) PURE;\n\n  /**\n   * serialize result of an rpc call\n   * If successful, the output_buffer is written to the serialized data\n   *\n   * @param output_buffer store the serialized data\n   * @param content the rpc response content\n   * @param type the rpc response type\n   * @return size_t the length of the serialized content\n   */\n  virtual size_t serializeRpcResult(Buffer::Instance& output_buffer, const std::string& content,\n                                    RpcResponseType type) PURE;\n};\n\nusing SerializerPtr = std::unique_ptr<Serializer>;\n\n/**\n * Implemented by each Dubbo serialize and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedSerializerConfigFactory : public Config::UntypedFactory {\npublic:\n  ~NamedSerializerConfigFactory() override = default;\n\n  /**\n   * Create a particular Dubbo serializer.\n   * @return SerializerPtr the transport\n   */\n  virtual SerializerPtr createSerializer() PURE;\n\n  std::string category() const override { return \"envoy.dubbo_proxy.serializers\"; }\n\n  /**\n   * Convenience method to lookup a factory by type.\n   * @param TransportType the transport type\n   * @return NamedSerializerConfigFactory& for the TransportType\n   */\n  static NamedSerializerConfigFactory& getFactory(ProtocolType protocol_type,\n                                                  SerializationType type) {\n    const std::string& name = ProtocolSerializerNames::get().fromType(protocol_type, type);\n    return Envoy::Config::Utility::getAndCheckFactoryByName<NamedSerializerConfigFactory>(name);\n  }\n};\n\n/**\n * SerializerFactoryBase provides a template for a trivial NamedSerializerConfigFactory.\n */\ntemplate <class SerializerImpl> class SerializerFactoryBase : public NamedSerializerConfigFactory {\npublic:\n  SerializerPtr createSerializer() override { return std::make_unique<SerializerImpl>(); }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  SerializerFactoryBase(ProtocolType protocol_type, SerializationType type)\n      : name_(ProtocolSerializerNames::get().fromType(protocol_type, type)) {}\n\nprivate:\n  const std::string name_;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/serializer_impl.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nvoid RpcInvocationImpl::addParameterValue(uint32_t index, const std::string& value) {\n  assignParameterIfNeed();\n  parameter_map_->emplace(index, value);\n}\n\nconst std::string& RpcInvocationImpl::getParameterValue(uint32_t index) const {\n  if (parameter_map_) {\n    auto itor = parameter_map_->find(index);\n    if (itor != parameter_map_->end()) {\n      return itor->second;\n    }\n  }\n\n  return EMPTY_STRING;\n}\n\nconst RpcInvocationImpl::ParameterValueMap& RpcInvocationImpl::parameters() {\n  ASSERT(hasParameters());\n  return *parameter_map_;\n}\n\nconst Http::HeaderMap& RpcInvocationImpl::headers() const {\n  ASSERT(hasHeaders());\n  return *headers_;\n}\n\nvoid RpcInvocationImpl::addHeader(const std::string& key, const std::string& value) {\n  assignHeaderIfNeed();\n  headers_->addCopy(Http::LowerCaseString(key), value);\n}\n\nvoid RpcInvocationImpl::addHeaderReference(const Http::LowerCaseString& key,\n                                           const std::string& value) {\n  assignHeaderIfNeed();\n  headers_->addReference(key, value);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/serializer_impl.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass RpcInvocationImpl : public RpcInvocationBase {\npublic:\n  // TODO(gengleilei) Add parameter data types and implement Dubbo data type mapping.\n  using ParameterValueMap = absl::node_hash_map<uint32_t, std::string>;\n  using ParameterValueMapPtr = std::unique_ptr<ParameterValueMap>;\n\n  RpcInvocationImpl() = default;\n  ~RpcInvocationImpl() override = default;\n\n  void addParameterValue(uint32_t index, const std::string& value);\n  const ParameterValueMap& parameters();\n  const std::string& getParameterValue(uint32_t index) const;\n  bool hasParameters() const { return parameter_map_ != nullptr; }\n\n  void addHeader(const std::string& key, const std::string& value);\n  void addHeaderReference(const Http::LowerCaseString& key, const std::string& value);\n  const Http::HeaderMap& headers() const;\n  bool hasHeaders() const { return headers_ != nullptr; }\n\nprivate:\n  inline void assignHeaderIfNeed() {\n    if (!headers_) {\n      headers_ = Http::RequestHeaderMapImpl::create();\n    }\n  }\n\n  inline void assignParameterIfNeed() {\n    if (!parameter_map_) {\n      parameter_map_ = std::make_unique<ParameterValueMap>();\n    }\n  }\n\n  ParameterValueMapPtr parameter_map_;\n  Http::HeaderMapPtr headers_; // attachment\n};\n\nclass RpcResultImpl : public RpcResult {\npublic:\n  RpcResultImpl() = default;\n  ~RpcResultImpl() override = default;\n\n  bool hasException() const override { return has_exception_; }\n  void setException(bool has_exception) { has_exception_ = has_exception; }\n\nprivate:\n  bool has_exception_ = false;\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/dubbo_proxy/stats.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\n/**\n * All dubbo filter stats. @see stats_macros.h\n */\n#define ALL_DUBBO_FILTER_STATS(COUNTER, GAUGE, HISTOGRAM)                                          \\\n  COUNTER(cx_destroy_local_with_active_rq)                                                         \\\n  COUNTER(cx_destroy_remote_with_active_rq)                                                        \\\n  COUNTER(local_response_business_exception)                                                       \\\n  COUNTER(local_response_error)                                                                    \\\n  COUNTER(local_response_success)                                                                  \\\n  COUNTER(request)                                                                                 \\\n  COUNTER(request_decoding_error)                                                                  \\\n  COUNTER(request_decoding_success)                                                                \\\n  COUNTER(request_event)                                                                           \\\n  COUNTER(request_oneway)                                                                          \\\n  COUNTER(request_twoway)                                                                          \\\n  COUNTER(response)                                                                                \\\n  COUNTER(response_business_exception)                                                             \\\n  COUNTER(response_decoding_error)                                                                 \\\n  COUNTER(response_decoding_success)                                                               \\\n  COUNTER(response_error)                                                                          \\\n  COUNTER(response_error_caused_connection_close)                                                  \\\n  COUNTER(response_success)                                                                        \\\n  GAUGE(request_active, Accumulate)                                                                \\\n  HISTOGRAM(request_time_ms, Milliseconds)\n\n/**\n * Struct definition for all dubbo proxy stats. @see stats_macros.h\n */\nstruct DubboFilterStats {\n  ALL_DUBBO_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n\n  static DubboFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return DubboFilterStats{ALL_DUBBO_FILTER_STATS(POOL_COUNTER_PREFIX(scope, prefix),\n                                                   POOL_GAUGE_PREFIX(scope, prefix),\n                                                   POOL_HISTOGRAM_PREFIX(scope, prefix))};\n  }\n};\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/echo/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Echo L4 network filter. This is primarily a simplistic example.\n# Public docs: docs/root/configuration/network_filters/echo_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"echo\",\n    srcs = [\"echo.cc\"],\n    hdrs = [\"echo.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    # TODO(#9953) move echo integration test to extensions.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"unknown\",\n    deps = [\n        \":echo\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/echo/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/echo/config.cc",
    "content": "#include \"envoy/extensions/filters/network/echo/v3/echo.pb.h\"\n#include \"envoy/extensions/filters/network/echo/v3/echo.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/echo/echo.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Echo {\n\n/**\n * Config registration for the echo filter. @see NamedNetworkFilterConfigFactory.\n */\nclass EchoConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::echo::v3::Echo> {\npublic:\n  EchoConfigFactory() : FactoryBase(NetworkFilterNames::get().Echo) {}\n\nprivate:\n  Network::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const envoy::extensions::filters::network::echo::v3::Echo&,\n                                    Server::Configuration::FactoryContext&) override {\n    return [](Network::FilterManager& filter_manager) -> void {\n      filter_manager.addReadFilter(std::make_shared<EchoFilter>());\n    };\n  }\n\n  bool isTerminalFilter() override { return true; }\n};\n\n/**\n * Static registration for the echo filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(EchoConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\"envoy.echo\"};\n\n} // namespace Echo\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/echo/echo.cc",
    "content": "#include \"extensions/filters/network/echo/echo.h\"\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Echo {\n\nNetwork::FilterStatus EchoFilter::onData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_CONN_LOG(trace, \"echo: got {} bytes\", read_callbacks_->connection(), data.length());\n  read_callbacks_->connection().write(data, end_stream);\n  ASSERT(0 == data.length());\n  return Network::FilterStatus::StopIteration;\n}\n\n} // namespace Echo\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/echo/echo.h",
    "content": "#pragma once\n\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Echo {\n\n/**\n * Implementation of a basic echo filter.\n */\nclass EchoFilter : public Network::ReadFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n  }\n\nprivate:\n  Network::ReadFilterCallbacks* read_callbacks_{};\n};\n\n} // namespace Echo\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ext_authz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# External authorization L4 network filter\n# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/network_filters\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ext_authz\",\n    srcs = [\"ext_authz.cc\"],\n    hdrs = [\"ext_authz.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_interface\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/ext_authz\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/ext_authz/config.cc",
    "content": "#include \"extensions/filters/network/ext_authz/config.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.validate.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n#include \"extensions/filters/network/ext_authz/ext_authz.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ExtAuthz {\n\nNetwork::FilterFactoryCb ExtAuthzConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::ext_authz::v3::ExtAuthz& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  ConfigSharedPtr ext_authz_config = std::make_shared<Config>(proto_config, context.scope());\n  const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, 200);\n\n  return [grpc_service = proto_config.grpc_service(), &context, ext_authz_config,\n          transport_api_version = proto_config.transport_api_version(),\n          timeout_ms](Network::FilterManager& filter_manager) -> void {\n    auto async_client_factory =\n        context.clusterManager().grpcAsyncClientManager().factoryForGrpcService(\n            grpc_service, context.scope(), true);\n\n    auto client = std::make_unique<Filters::Common::ExtAuthz::GrpcClientImpl>(\n        async_client_factory->create(), std::chrono::milliseconds(timeout_ms),\n        transport_api_version, false);\n    filter_manager.addReadFilter(Network::ReadFilterSharedPtr{\n        std::make_shared<Filter>(ext_authz_config, std::move(client))});\n  };\n}\n\n/**\n * Static registration for the external authorization filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(ExtAuthzConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\"envoy.ext_authz\"};\n\n} // namespace ExtAuthz\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ext_authz/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ExtAuthz {\n\n/**\n * Config registration for the  external authorization filter. @see NamedNetworkFilterConfigFactory.\n */\nclass ExtAuthzConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::ext_authz::v3::ExtAuthz> {\npublic:\n  ExtAuthzConfigFactory() : FactoryBase(NetworkFilterNames::get().ExtAuthorization) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::ext_authz::v3::ExtAuthz& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace ExtAuthz\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ext_authz/ext_authz.cc",
    "content": "#include \"extensions/filters/network/ext_authz/ext_authz.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ExtAuthz {\n\nInstanceStats Config::generateStats(const std::string& name, Stats::Scope& scope) {\n  const std::string final_prefix = fmt::format(\"ext_authz.{}.\", name);\n  return {ALL_TCP_EXT_AUTHZ_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                  POOL_GAUGE_PREFIX(scope, final_prefix))};\n}\n\nvoid Filter::callCheck() {\n  Filters::Common::ExtAuthz::CheckRequestUtils::createTcpCheck(filter_callbacks_, check_request_,\n                                                               config_->includePeerCertificate());\n\n  status_ = Status::Calling;\n  config_->stats().active_.inc();\n  config_->stats().total_.inc();\n\n  calling_check_ = true;\n  auto& connection = filter_callbacks_->connection();\n  client_->check(*this, connection.dispatcher(), check_request_, Tracing::NullSpan::instance(),\n                 connection.streamInfo());\n  calling_check_ = false;\n}\n\nNetwork::FilterStatus Filter::onData(Buffer::Instance&, bool /* end_stream */) {\n  if (!filterEnabled(filter_callbacks_->connection().streamInfo().dynamicMetadata())) {\n    config_->stats().disabled_.inc();\n    return Network::FilterStatus::Continue;\n  }\n\n  if (status_ == Status::NotStarted) {\n    // By waiting to invoke the check at onData() the call to authorization service will have\n    // sufficient information to fill out the checkRequest_.\n    callCheck();\n  }\n  return filter_return_ == FilterReturn::Stop ? Network::FilterStatus::StopIteration\n                                              : Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus Filter::onNewConnection() {\n  // Wait till onData() happens.\n  return Network::FilterStatus::Continue;\n}\n\nvoid Filter::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    if (status_ == Status::Calling) {\n      // Make sure that any pending request in the client is cancelled. This will be NOP if the\n      // request already completed.\n      client_->cancel();\n      config_->stats().active_.dec();\n    }\n  }\n}\n\nvoid Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) {\n  status_ = Status::Complete;\n  config_->stats().active_.dec();\n\n  switch (response->status) {\n  case Filters::Common::ExtAuthz::CheckStatus::OK:\n    config_->stats().ok_.inc();\n    break;\n  case Filters::Common::ExtAuthz::CheckStatus::Error:\n    config_->stats().error_.inc();\n    if (response->error_kind == Filters::Common::ExtAuthz::ErrorKind::Timedout) {\n      config_->stats().timeout_.inc();\n    }\n    break;\n  case Filters::Common::ExtAuthz::CheckStatus::Denied:\n    config_->stats().denied_.inc();\n    break;\n  }\n\n  // Fail open only if configured to do so and if the check status was a error.\n  if (response->status == Filters::Common::ExtAuthz::CheckStatus::Denied ||\n      (response->status == Filters::Common::ExtAuthz::CheckStatus::Error &&\n       !config_->failureModeAllow())) {\n    config_->stats().cx_closed_.inc();\n    filter_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n  } else {\n    // Let the filter chain continue.\n    filter_return_ = FilterReturn::Continue;\n    if (config_->failureModeAllow() &&\n        response->status == Filters::Common::ExtAuthz::CheckStatus::Error) {\n      // Status is Error and yet we are configured to allow traffic. Click a counter.\n      config_->stats().failure_mode_allowed_.inc();\n    }\n\n    if (!response->dynamic_metadata.fields().empty()) {\n      filter_callbacks_->connection().streamInfo().setDynamicMetadata(\n          NetworkFilterNames::get().ExtAuthorization, response->dynamic_metadata);\n    }\n\n    // We can get completion inline, so only call continue if that isn't happening.\n    if (!calling_check_) {\n      filter_callbacks_->continueReading();\n    }\n  }\n}\n\n} // namespace ExtAuthz\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ext_authz/ext_authz.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/matchers.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ExtAuthz {\n\n/**\n * All tcp external authorization stats. @see stats_macros.h\n */\n#define ALL_TCP_EXT_AUTHZ_STATS(COUNTER, GAUGE)                                                    \\\n  COUNTER(cx_closed)                                                                               \\\n  COUNTER(denied)                                                                                  \\\n  COUNTER(error)                                                                                   \\\n  COUNTER(timeout)                                                                                 \\\n  COUNTER(failure_mode_allowed)                                                                    \\\n  COUNTER(ok)                                                                                      \\\n  COUNTER(total)                                                                                   \\\n  COUNTER(disabled)                                                                                \\\n  GAUGE(active, Accumulate)\n\n/**\n * Struct definition for all external authorization stats. @see stats_macros.h\n */\nstruct InstanceStats {\n  ALL_TCP_EXT_AUTHZ_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Global configuration for ExtAuthz filter.\n */\nclass Config {\npublic:\n  Config(const envoy::extensions::filters::network::ext_authz::v3::ExtAuthz& config,\n         Stats::Scope& scope)\n      : stats_(generateStats(config.stat_prefix(), scope)),\n        failure_mode_allow_(config.failure_mode_allow()),\n        include_peer_certificate_(config.include_peer_certificate()),\n        filter_enabled_metadata_(\n            config.has_filter_enabled_metadata()\n                ? absl::optional<Matchers::MetadataMatcher>(config.filter_enabled_metadata())\n                : absl::nullopt) {}\n\n  const InstanceStats& stats() { return stats_; }\n  bool failureModeAllow() const { return failure_mode_allow_; }\n  void setFailModeAllow(bool value) { failure_mode_allow_ = value; }\n  bool includePeerCertificate() const { return include_peer_certificate_; }\n  bool filterEnabledMetadata(const envoy::config::core::v3::Metadata& metadata) const {\n    return filter_enabled_metadata_.has_value() ? filter_enabled_metadata_->match(metadata) : true;\n  }\n\nprivate:\n  static InstanceStats generateStats(const std::string& name, Stats::Scope& scope);\n  const InstanceStats stats_;\n  bool failure_mode_allow_;\n  const bool include_peer_certificate_;\n  const absl::optional<Matchers::MetadataMatcher> filter_enabled_metadata_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * ExtAuthz filter instance. This filter will call the Authorization service with the given\n * configuration parameters. If the authorization service returns an error or a deny the\n * connection will be closed without any further filters being called. Otherwise all buffered\n * data will be released to further filters.\n */\nclass Filter : public Network::ReadFilter,\n               public Network::ConnectionCallbacks,\n               public Filters::Common::ExtAuthz::RequestCallbacks {\npublic:\n  Filter(ConfigSharedPtr config, Filters::Common::ExtAuthz::ClientPtr&& client)\n      : config_(config), client_(std::move(client)) {}\n  ~Filter() override = default;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    filter_callbacks_ = &callbacks;\n    filter_callbacks_->connection().addConnectionCallbacks(*this);\n  }\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  // ExtAuthz::RequestCallbacks\n  void onComplete(Filters::Common::ExtAuthz::ResponsePtr&&) override;\n\nprivate:\n  // State of this filter's communication with the external authorization service.\n  // The filter has either not started calling the external service, in the middle of calling\n  // it or has completed.\n  enum class Status { NotStarted, Calling, Complete };\n  // FilterReturn is used to capture what the return code should be to the filter chain.\n  // if this filter is either in the middle of calling the external service or the result is denied\n  // then the filter chain should stop. Otherwise the filter chain can continue to the next filter.\n  enum class FilterReturn { Stop, Continue };\n  void callCheck();\n\n  bool filterEnabled(const envoy::config::core::v3::Metadata& metadata) {\n    return config_->filterEnabledMetadata(metadata);\n  }\n\n  ConfigSharedPtr config_;\n  Filters::Common::ExtAuthz::ClientPtr client_;\n  Network::ReadFilterCallbacks* filter_callbacks_{};\n  Status status_{Status::NotStarted};\n  FilterReturn filter_return_{FilterReturn::Stop};\n  // Used to identify if the callback to onComplete() is synchronous (on the stack) or asynchronous.\n  bool calling_check_{};\n  envoy::service::auth::v3::CheckRequest check_request_{};\n};\n} // namespace ExtAuthz\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/http_connection_manager/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# L4 network filter that implements HTTP protocol handling and filtering. This filter internally\n# drives all of the L7 HTTP filters.\n# Public docs: docs/root/configuration/http_conn_man/http_conn_man.rst\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    # This is core Envoy config.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/config:config_provider_manager_interface\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/router:route_config_provider_manager_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:options_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/filter/http:filter_config_discovery_lib\",\n        \"//source/common/http:conn_manager_lib\",\n        \"//source/common/http:default_server_string_lib\",\n        \"//source/common/http:request_id_extension_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/http/http1:codec_legacy_lib\",\n        \"//source/common/http/http1:codec_lib\",\n        \"//source/common/http/http2:codec_legacy_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/local_reply:local_reply_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/router:rds_lib\",\n        \"//source/common/router:scoped_rds_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/tracing:http_tracer_config_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/common/tracing:http_tracer_manager_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/tracing/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/http_connection_manager/config.cc",
    "content": "#include \"extensions/filters/network/http_connection_manager/config.h\"\n\n#include <chrono>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/type/tracing/v3/custom_tag.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/utility.h\"\n#include \"common/filter/http/filter_config_discovery_impl.h\"\n#include \"common/http/conn_manager_utility.h\"\n#include \"common/http/default_server_string.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http1/codec_impl_legacy.h\"\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/http/http2/codec_impl_legacy.h\"\n#include \"common/http/http3/quic_codec_factory.h\"\n#include \"common/http/http3/well_known_names.h\"\n#include \"common/http/request_id_extension_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/local_reply/local_reply.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/router/scoped_rds.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/tracing/http_tracer_config_impl.h\"\n#include \"common/tracing/http_tracer_manager_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace HttpConnectionManager {\nnamespace {\n\nusing FilterFactoriesList = std::list<Http::FilterFactoryCb>;\nusing FilterFactoryMap = std::map<std::string, HttpConnectionManagerConfig::FilterConfig>;\n\nHttpConnectionManagerConfig::UpgradeMap::const_iterator\nfindUpgradeBoolCaseInsensitive(const HttpConnectionManagerConfig::UpgradeMap& upgrade_map,\n                               absl::string_view upgrade_type) {\n  for (auto it = upgrade_map.begin(); it != upgrade_map.end(); ++it) {\n    if (StringUtil::CaseInsensitiveCompare()(it->first, upgrade_type)) {\n      return it;\n    }\n  }\n  return upgrade_map.end();\n}\n\nFilterFactoryMap::const_iterator findUpgradeCaseInsensitive(const FilterFactoryMap& upgrade_map,\n                                                            absl::string_view upgrade_type) {\n  for (auto it = upgrade_map.begin(); it != upgrade_map.end(); ++it) {\n    if (StringUtil::CaseInsensitiveCompare()(it->first, upgrade_type)) {\n      return it;\n    }\n  }\n  return upgrade_map.end();\n}\n\nstd::unique_ptr<Http::InternalAddressConfig> createInternalAddressConfig(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        config) {\n  if (config.has_internal_address_config()) {\n    return std::make_unique<InternalAddressConfig>(config.internal_address_config());\n  }\n\n  return std::make_unique<Http::DefaultInternalAddressConfig>();\n}\n\nclass MissingConfigFilter : public Http::PassThroughDecoderFilter {\npublic:\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override {\n    decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound);\n    decoder_callbacks_->sendLocalReply(Http::Code::InternalServerError, EMPTY_STRING, nullptr,\n                                       absl::nullopt, EMPTY_STRING);\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n};\n\n} // namespace\n\n// Singleton registration via macro defined in envoy/singleton/manager.h\nSINGLETON_MANAGER_REGISTRATION(date_provider);\nSINGLETON_MANAGER_REGISTRATION(route_config_provider_manager);\nSINGLETON_MANAGER_REGISTRATION(scoped_routes_config_provider_manager);\nSINGLETON_MANAGER_REGISTRATION(http_tracer_manager);\nSINGLETON_MANAGER_REGISTRATION(filter_config_provider_manager);\n\nUtility::Singletons Utility::createSingletons(Server::Configuration::FactoryContext& context) {\n  std::shared_ptr<Http::TlsCachingDateProviderImpl> date_provider =\n      context.singletonManager().getTyped<Http::TlsCachingDateProviderImpl>(\n          SINGLETON_MANAGER_REGISTERED_NAME(date_provider), [&context] {\n            return std::make_shared<Http::TlsCachingDateProviderImpl>(context.dispatcher(),\n                                                                      context.threadLocal());\n          });\n\n  Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager =\n      context.singletonManager().getTyped<Router::RouteConfigProviderManager>(\n          SINGLETON_MANAGER_REGISTERED_NAME(route_config_provider_manager), [&context] {\n            return std::make_shared<Router::RouteConfigProviderManagerImpl>(context.admin());\n          });\n\n  Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager =\n      context.singletonManager().getTyped<Router::ScopedRoutesConfigProviderManager>(\n          SINGLETON_MANAGER_REGISTERED_NAME(scoped_routes_config_provider_manager),\n          [&context, route_config_provider_manager] {\n            return std::make_shared<Router::ScopedRoutesConfigProviderManager>(\n                context.admin(), *route_config_provider_manager);\n          });\n\n  auto http_tracer_manager = context.singletonManager().getTyped<Tracing::HttpTracerManagerImpl>(\n      SINGLETON_MANAGER_REGISTERED_NAME(http_tracer_manager), [&context] {\n        return std::make_shared<Tracing::HttpTracerManagerImpl>(\n            std::make_unique<Tracing::TracerFactoryContextImpl>(\n                context.getServerFactoryContext(), context.messageValidationVisitor()));\n      });\n\n  std::shared_ptr<Filter::Http::FilterConfigProviderManager> filter_config_provider_manager =\n      context.singletonManager().getTyped<Filter::Http::FilterConfigProviderManager>(\n          SINGLETON_MANAGER_REGISTERED_NAME(filter_config_provider_manager),\n          [] { return std::make_shared<Filter::Http::FilterConfigProviderManagerImpl>(); });\n\n  return {date_provider, route_config_provider_manager, scoped_routes_config_provider_manager,\n          http_tracer_manager, filter_config_provider_manager};\n}\n\nstd::shared_ptr<HttpConnectionManagerConfig> Utility::createConfig(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        proto_config,\n    Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider,\n    Router::RouteConfigProviderManager& route_config_provider_manager,\n    Config::ConfigProviderManager& scoped_routes_config_provider_manager,\n    Tracing::HttpTracerManager& http_tracer_manager,\n    Filter::Http::FilterConfigProviderManager& filter_config_provider_manager) {\n  return std::make_shared<HttpConnectionManagerConfig>(\n      proto_config, context, date_provider, route_config_provider_manager,\n      scoped_routes_config_provider_manager, http_tracer_manager, filter_config_provider_manager);\n}\n\nNetwork::FilterFactoryCb\nHttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        proto_config,\n    Server::Configuration::FactoryContext& context) {\n  Utility::Singletons singletons = Utility::createSingletons(context);\n\n  auto filter_config = Utility::createConfig(\n      proto_config, context, *singletons.date_provider_, *singletons.route_config_provider_manager_,\n      *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_,\n      *singletons.filter_config_provider_manager_);\n\n  // This lambda captures the shared_ptrs created above, thus preserving the\n  // reference count.\n  // Keep in mind the lambda capture list **doesn't** determine the destruction order, but it's fine\n  // as these captured objects are also global singletons.\n  return [singletons, filter_config, &context](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl(\n        *filter_config, context.drainDecision(), context.api().randomGenerator(),\n        context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(),\n        context.overloadManager(), context.dispatcher().timeSource())});\n  };\n}\n\n/**\n * Static registration for the HTTP connection manager filter.\n */\nREGISTER_FACTORY(HttpConnectionManagerFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\n    \"envoy.http_connection_manager\"};\n\nInternalAddressConfig::InternalAddressConfig(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        InternalAddressConfig& config)\n    : unix_sockets_(config.unix_sockets()) {}\n\nHttpConnectionManagerConfig::HttpConnectionManagerConfig(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        config,\n    Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider,\n    Router::RouteConfigProviderManager& route_config_provider_manager,\n    Config::ConfigProviderManager& scoped_routes_config_provider_manager,\n    Tracing::HttpTracerManager& http_tracer_manager,\n    Filter::Http::FilterConfigProviderManager& filter_config_provider_manager)\n    : context_(context), stats_prefix_(fmt::format(\"http.{}.\", config.stat_prefix())),\n      stats_(Http::ConnectionManagerImpl::generateStats(stats_prefix_, context_.scope())),\n      tracing_stats_(\n          Http::ConnectionManagerImpl::generateTracingStats(stats_prefix_, context_.scope())),\n      use_remote_address_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, use_remote_address, false)),\n      internal_address_config_(createInternalAddressConfig(config)),\n      xff_num_trusted_hops_(config.xff_num_trusted_hops()),\n      skip_xff_append_(config.skip_xff_append()), via_(config.via()),\n      route_config_provider_manager_(route_config_provider_manager),\n      scoped_routes_config_provider_manager_(scoped_routes_config_provider_manager),\n      filter_config_provider_manager_(filter_config_provider_manager),\n      http2_options_(Http2::Utility::initializeAndValidateOptions(\n          config.http2_protocol_options(), config.has_stream_error_on_invalid_http_message(),\n          config.stream_error_on_invalid_http_message())),\n      http1_settings_(Http::Utility::parseHttp1Settings(\n          config.http_protocol_options(), config.stream_error_on_invalid_http_message())),\n      max_request_headers_kb_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, max_request_headers_kb, Http::DEFAULT_MAX_REQUEST_HEADERS_KB)),\n      max_request_headers_count_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config.common_http_protocol_options(), max_headers_count,\n          context.runtime().snapshot().getInteger(Http::MaxRequestHeadersCountOverrideKey,\n                                                  Http::DEFAULT_MAX_HEADERS_COUNT))),\n      idle_timeout_(PROTOBUF_GET_OPTIONAL_MS(config.common_http_protocol_options(), idle_timeout)),\n      max_connection_duration_(\n          PROTOBUF_GET_OPTIONAL_MS(config.common_http_protocol_options(), max_connection_duration)),\n      max_stream_duration_(\n          PROTOBUF_GET_OPTIONAL_MS(config.common_http_protocol_options(), max_stream_duration)),\n      stream_idle_timeout_(\n          PROTOBUF_GET_MS_OR_DEFAULT(config, stream_idle_timeout, StreamIdleTimeoutMs)),\n      request_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, request_timeout, RequestTimeoutMs)),\n      drain_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, drain_timeout, 5000)),\n      generate_request_id_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, generate_request_id, true)),\n      preserve_external_request_id_(config.preserve_external_request_id()),\n      always_set_request_id_in_response_(config.always_set_request_id_in_response()),\n      date_provider_(date_provider),\n      listener_stats_(Http::ConnectionManagerImpl::generateListenerStats(stats_prefix_,\n                                                                         context_.listenerScope())),\n      proxy_100_continue_(config.proxy_100_continue()),\n      stream_error_on_invalid_http_messaging_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, stream_error_on_invalid_http_message, false)),\n      delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)),\n#ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT\n      normalize_path_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, normalize_path,\n          // TODO(htuch): we should have a boolean variant of featureEnabled() here.\n          context.runtime().snapshot().featureEnabled(\"http_connection_manager.normalize_path\",\n                                                      100))),\n#else\n      normalize_path_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n          config, normalize_path,\n          // TODO(htuch): we should have a boolean variant of featureEnabled() here.\n          context.runtime().snapshot().featureEnabled(\"http_connection_manager.normalize_path\",\n                                                      0))),\n#endif\n      merge_slashes_(config.merge_slashes()),\n      strip_matching_port_(config.strip_matching_host_port()),\n      headers_with_underscores_action_(\n          config.common_http_protocol_options().headers_with_underscores_action()),\n      local_reply_(LocalReply::Factory::create(config.local_reply_config(), context)) {\n  // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated\n  // idle_timeout field.\n  // TODO(asraa): Remove when idle_timeout is removed.\n  if (!idle_timeout_) {\n    idle_timeout_ = PROTOBUF_GET_OPTIONAL_MS(config, hidden_envoy_deprecated_idle_timeout);\n  }\n  if (!idle_timeout_) {\n    idle_timeout_ = std::chrono::hours(1);\n  } else if (idle_timeout_.value().count() == 0) {\n    idle_timeout_ = absl::nullopt;\n  }\n\n  // If we are provided a different request_id_extension implementation to use try and create a new\n  // instance of it, otherwise use default one.\n  if (config.request_id_extension().has_typed_config()) {\n    request_id_extension_ =\n        Http::RequestIDExtensionFactory::fromProto(config.request_id_extension(), context_);\n  } else {\n    request_id_extension_ =\n        Http::RequestIDExtensionFactory::defaultInstance(context_.api().randomGenerator());\n  }\n\n  // If scoped RDS is enabled, avoid creating a route config provider. Route config providers will\n  // be managed by the scoped routing logic instead.\n  switch (config.route_specifier_case()) {\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      RouteSpecifierCase::kRds:\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      RouteSpecifierCase::kRouteConfig:\n    route_config_provider_ = Router::RouteConfigProviderUtil::create(\n        config, context_.getServerFactoryContext(), context_.messageValidationVisitor(),\n        context_.initManager(), stats_prefix_, route_config_provider_manager_);\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      RouteSpecifierCase::kScopedRoutes:\n    scoped_routes_config_provider_ = Router::ScopedRoutesConfigProviderUtil::create(\n        config, context_.getServerFactoryContext(), context_.initManager(), stats_prefix_,\n        scoped_routes_config_provider_manager_);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  switch (config.forward_client_cert_details()) {\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE:\n    forward_client_cert_ = Http::ForwardClientCertType::Sanitize;\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      FORWARD_ONLY:\n    forward_client_cert_ = Http::ForwardClientCertType::ForwardOnly;\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD:\n    forward_client_cert_ = Http::ForwardClientCertType::AppendForward;\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE_SET:\n    forward_client_cert_ = Http::ForwardClientCertType::SanitizeSet;\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      ALWAYS_FORWARD_ONLY:\n    forward_client_cert_ = Http::ForwardClientCertType::AlwaysForwardOnly;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  const auto& set_current_client_cert_details = config.set_current_client_cert_details();\n  if (set_current_client_cert_details.cert()) {\n    set_current_client_cert_details_.push_back(Http::ClientCertDetailsType::Cert);\n  }\n  if (set_current_client_cert_details.chain()) {\n    set_current_client_cert_details_.push_back(Http::ClientCertDetailsType::Chain);\n  }\n  if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(set_current_client_cert_details, subject, false)) {\n    set_current_client_cert_details_.push_back(Http::ClientCertDetailsType::Subject);\n  }\n  if (set_current_client_cert_details.uri()) {\n    set_current_client_cert_details_.push_back(Http::ClientCertDetailsType::URI);\n  }\n  if (set_current_client_cert_details.dns()) {\n    set_current_client_cert_details_.push_back(Http::ClientCertDetailsType::DNS);\n  }\n\n  if (config.has_add_user_agent() && config.add_user_agent().value()) {\n    user_agent_ = context_.localInfo().clusterName();\n  }\n\n  if (config.has_tracing()) {\n    http_tracer_ = http_tracer_manager.getOrCreateHttpTracer(getPerFilterTracerConfig(config));\n\n    const auto& tracing_config = config.tracing();\n\n    Tracing::OperationName tracing_operation_name;\n\n    // Listener level traffic direction overrides the operation name\n    switch (context.direction()) {\n    case envoy::config::core::v3::UNSPECIFIED: {\n      switch (tracing_config.hidden_envoy_deprecated_operation_name()) {\n      case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n          Tracing::INGRESS:\n        tracing_operation_name = Tracing::OperationName::Ingress;\n        break;\n      case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n          Tracing::EGRESS:\n        tracing_operation_name = Tracing::OperationName::Egress;\n        break;\n      default:\n        NOT_REACHED_GCOVR_EXCL_LINE;\n      }\n      break;\n    }\n    case envoy::config::core::v3::INBOUND:\n      tracing_operation_name = Tracing::OperationName::Ingress;\n      break;\n    case envoy::config::core::v3::OUTBOUND:\n      tracing_operation_name = Tracing::OperationName::Egress;\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n\n    Tracing::CustomTagMap custom_tags;\n    for (const std::string& header :\n         tracing_config.hidden_envoy_deprecated_request_headers_for_tags()) {\n      envoy::type::tracing::v3::CustomTag::Header headerTag;\n      headerTag.set_name(header);\n      custom_tags.emplace(\n          header, std::make_shared<const Tracing::RequestHeaderCustomTag>(header, headerTag));\n    }\n    for (const auto& tag : tracing_config.custom_tags()) {\n      custom_tags.emplace(tag.tag(), Tracing::HttpTracerUtility::createCustomTag(tag));\n    }\n\n    envoy::type::v3::FractionalPercent client_sampling;\n    client_sampling.set_numerator(\n        tracing_config.has_client_sampling() ? tracing_config.client_sampling().value() : 100);\n    envoy::type::v3::FractionalPercent random_sampling;\n    // TODO: Random sampling historically was an integer and default to out of 10,000. We should\n    // deprecate that and move to a straight fractional percent config.\n    uint64_t random_sampling_numerator{PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT(\n        tracing_config, random_sampling, 10000, 10000)};\n    random_sampling.set_numerator(random_sampling_numerator);\n    random_sampling.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n    envoy::type::v3::FractionalPercent overall_sampling;\n    overall_sampling.set_numerator(\n        tracing_config.has_overall_sampling() ? tracing_config.overall_sampling().value() : 100);\n\n    const uint32_t max_path_tag_length = PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n        tracing_config, max_path_tag_length, Tracing::DefaultMaxPathTagLength);\n\n    tracing_config_ =\n        std::make_unique<Http::TracingConnectionManagerConfig>(Http::TracingConnectionManagerConfig{\n            tracing_operation_name, custom_tags, client_sampling, random_sampling, overall_sampling,\n            tracing_config.verbose(), max_path_tag_length});\n  }\n\n  for (const auto& access_log : config.access_log()) {\n    AccessLog::InstanceSharedPtr current_access_log =\n        AccessLog::AccessLogFactory::fromProto(access_log, context_);\n    access_logs_.push_back(current_access_log);\n  }\n\n  server_transformation_ = config.server_header_transformation();\n\n  if (!config.server_name().empty()) {\n    server_name_ = config.server_name();\n  } else {\n    server_name_ = Http::DefaultServerString::get();\n  }\n\n  switch (config.codec_type()) {\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      AUTO:\n    codec_type_ = CodecType::AUTO;\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      HTTP1:\n    codec_type_ = CodecType::HTTP1;\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      HTTP2:\n    codec_type_ = CodecType::HTTP2;\n    break;\n  case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      HTTP3:\n    codec_type_ = CodecType::HTTP3;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  const auto& filters = config.http_filters();\n  for (int32_t i = 0; i < filters.size(); i++) {\n    processFilter(filters[i], i, \"http\", filter_factories_, \"http\", i == filters.size() - 1);\n  }\n\n  for (const auto& upgrade_config : config.upgrade_configs()) {\n    const std::string& name = upgrade_config.upgrade_type();\n    const bool enabled = upgrade_config.has_enabled() ? upgrade_config.enabled().value() : true;\n    if (findUpgradeCaseInsensitive(upgrade_filter_factories_, name) !=\n        upgrade_filter_factories_.end()) {\n      throw EnvoyException(\n          fmt::format(\"Error: multiple upgrade configs with the same name: '{}'\", name));\n    }\n    if (!upgrade_config.filters().empty()) {\n      std::unique_ptr<FilterFactoriesList> factories = std::make_unique<FilterFactoriesList>();\n      for (int32_t j = 0; j < upgrade_config.filters().size(); j++) {\n        processFilter(upgrade_config.filters(j), j, name, *factories, \"http upgrade\",\n                      j == upgrade_config.filters().size() - 1);\n      }\n      upgrade_filter_factories_.emplace(\n          std::make_pair(name, FilterConfig{std::move(factories), enabled}));\n    } else {\n      std::unique_ptr<FilterFactoriesList> factories(nullptr);\n      upgrade_filter_factories_.emplace(\n          std::make_pair(name, FilterConfig{std::move(factories), enabled}));\n    }\n  }\n}\n\nvoid HttpConnectionManagerConfig::processFilter(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter&\n        proto_config,\n    int i, absl::string_view prefix, FilterFactoriesList& filter_factories,\n    const char* filter_chain_type, bool last_filter_in_current_config) {\n  ENVOY_LOG(debug, \"    {} filter #{}\", prefix, i);\n  if (proto_config.config_type_case() ==\n      envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter::ConfigTypeCase::\n          kConfigDiscovery) {\n    processDynamicFilterConfig(proto_config.name(), proto_config.config_discovery(),\n                               filter_factories, filter_chain_type, last_filter_in_current_config);\n    return;\n  }\n\n  // Now see if there is a factory that will accept the config.\n  auto& factory =\n      Config::Utility::getAndCheckFactory<Server::Configuration::NamedHttpFilterConfigFactory>(\n          proto_config);\n  ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n      proto_config, context_.messageValidationVisitor(), factory);\n  Http::FilterFactoryCb callback =\n      factory.createFilterFactoryFromProto(*message, stats_prefix_, context_);\n  bool is_terminal = factory.isTerminalFilter();\n  Config::Utility::validateTerminalFilters(proto_config.name(), factory.name(), filter_chain_type,\n                                           is_terminal, last_filter_in_current_config);\n  auto filter_config_provider = filter_config_provider_manager_.createStaticFilterConfigProvider(\n      callback, proto_config.name());\n  ENVOY_LOG(debug, \"      name: {}\", filter_config_provider->name());\n  ENVOY_LOG(debug, \"    config: {}\",\n            MessageUtil::getJsonStringFromMessage(\n                proto_config.has_typed_config()\n                    ? static_cast<const Protobuf::Message&>(proto_config.typed_config())\n                    : static_cast<const Protobuf::Message&>(\n                          proto_config.hidden_envoy_deprecated_config()),\n                true));\n  filter_factories.push_back(std::move(filter_config_provider));\n}\n\nvoid HttpConnectionManagerConfig::processDynamicFilterConfig(\n    const std::string& name, const envoy::config::core::v3::ExtensionConfigSource& config_discovery,\n    FilterFactoriesList& filter_factories, const char* filter_chain_type,\n    bool last_filter_in_current_config) {\n  ENVOY_LOG(debug, \"      dynamic filter name: {}\", name);\n  if (config_discovery.apply_default_config_without_warming() &&\n      !config_discovery.has_default_config()) {\n    throw EnvoyException(fmt::format(\n        \"Error: filter config {} applied without warming but has no default config.\", name));\n  }\n  std::set<std::string> require_type_urls;\n  for (const auto& type_url : config_discovery.type_urls()) {\n    auto factory_type_url = TypeUtil::typeUrlToDescriptorFullName(type_url);\n    require_type_urls.emplace(factory_type_url);\n    auto* factory = Registry::FactoryRegistry<\n        Server::Configuration::NamedHttpFilterConfigFactory>::getFactoryByType(factory_type_url);\n    if (factory == nullptr) {\n      throw EnvoyException(\n          fmt::format(\"Error: no factory found for a required type URL {}.\", factory_type_url));\n    }\n    Config::Utility::validateTerminalFilters(name, factory->name(), filter_chain_type,\n                                             factory->isTerminalFilter(),\n                                             last_filter_in_current_config);\n  }\n  auto filter_config_provider = filter_config_provider_manager_.createDynamicFilterConfigProvider(\n      config_discovery.config_source(), name, require_type_urls, context_, stats_prefix_,\n      config_discovery.apply_default_config_without_warming());\n  if (config_discovery.has_default_config()) {\n    auto* default_factory =\n        Config::Utility::getFactoryByType<Server::Configuration::NamedHttpFilterConfigFactory>(\n            config_discovery.default_config());\n    if (default_factory == nullptr) {\n      throw EnvoyException(fmt::format(\"Error: cannot find filter factory {} for default filter \"\n                                       \"configuration with type URL {}.\",\n                                       name, config_discovery.default_config().type_url()));\n    }\n    filter_config_provider->validateConfig(config_discovery.default_config(), *default_factory);\n    ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig(\n        config_discovery.default_config(), context_.messageValidationVisitor(), *default_factory);\n    Http::FilterFactoryCb default_config =\n        default_factory->createFilterFactoryFromProto(*message, stats_prefix_, context_);\n    filter_config_provider->onConfigUpdate(default_config, \"\", nullptr);\n  }\n  filter_factories.push_back(std::move(filter_config_provider));\n}\n\nHttp::ServerConnectionPtr\nHttpConnectionManagerConfig::createCodec(Network::Connection& connection,\n                                         const Buffer::Instance& data,\n                                         Http::ServerConnectionCallbacks& callbacks) {\n  switch (codec_type_) {\n  case CodecType::HTTP1: {\n    if (context_.runtime().snapshot().runtimeFeatureEnabled(\n            \"envoy.reloadable_features.new_codec_behavior\")) {\n      return std::make_unique<Http::Http1::ServerConnectionImpl>(\n          connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()),\n          callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(),\n          headersWithUnderscoresAction());\n    } else {\n      return std::make_unique<Http::Legacy::Http1::ServerConnectionImpl>(\n          connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()),\n          callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(),\n          headersWithUnderscoresAction());\n    }\n  }\n  case CodecType::HTTP2: {\n    if (context_.runtime().snapshot().runtimeFeatureEnabled(\n            \"envoy.reloadable_features.new_codec_behavior\")) {\n      return std::make_unique<Http::Http2::ServerConnectionImpl>(\n          connection, callbacks,\n          Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()),\n          context_.api().randomGenerator(), http2_options_, maxRequestHeadersKb(),\n          maxRequestHeadersCount(), headersWithUnderscoresAction());\n    } else {\n      return std::make_unique<Http::Legacy::Http2::ServerConnectionImpl>(\n          connection, callbacks,\n          Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()),\n          context_.api().randomGenerator(), http2_options_, maxRequestHeadersKb(),\n          maxRequestHeadersCount(), headersWithUnderscoresAction());\n    }\n  }\n  case CodecType::HTTP3:\n    // Hard code Quiche factory name here to instantiate a QUIC codec implemented.\n    // TODO(danzh) Add support to get the factory name from config, possibly\n    // from HttpConnectionManager protobuf. This is not essential till there are multiple\n    // implementations of QUIC.\n    return std::unique_ptr<Http::ServerConnection>(\n        Config::Utility::getAndCheckFactoryByName<Http::QuicHttpServerConnectionFactory>(\n            Http::QuicCodecNames::get().Quiche)\n            .createQuicServerConnection(connection, callbacks));\n  case CodecType::AUTO:\n    return Http::ConnectionManagerUtility::autoCreateCodec(\n        connection, data, callbacks, context_.scope(), context_.api().randomGenerator(),\n        http1_codec_stats_, http2_codec_stats_, http1_settings_, http2_options_,\n        maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction());\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid HttpConnectionManagerConfig::createFilterChainForFactories(\n    Http::FilterChainFactoryCallbacks& callbacks, const FilterFactoriesList& filter_factories) {\n  bool added_missing_config_filter = false;\n  for (const auto& filter_config_provider : filter_factories) {\n    auto config = filter_config_provider->config();\n    if (config.has_value()) {\n      config.value()(callbacks);\n      continue;\n    }\n\n    // If a filter config is missing after warming, inject a local reply with status 500.\n    if (!added_missing_config_filter) {\n      ENVOY_LOG(trace, \"Missing filter config for a provider {}\", filter_config_provider->name());\n      callbacks.addStreamDecoderFilter(\n          Http::StreamDecoderFilterSharedPtr{std::make_shared<MissingConfigFilter>()});\n      added_missing_config_filter = true;\n    } else {\n      ENVOY_LOG(trace, \"Provider {} missing a filter config\", filter_config_provider->name());\n    }\n  }\n}\n\nvoid HttpConnectionManagerConfig::createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) {\n  createFilterChainForFactories(callbacks, filter_factories_);\n}\n\nbool HttpConnectionManagerConfig::createUpgradeFilterChain(\n    absl::string_view upgrade_type,\n    const Http::FilterChainFactory::UpgradeMap* per_route_upgrade_map,\n    Http::FilterChainFactoryCallbacks& callbacks) {\n  bool route_enabled = false;\n  if (per_route_upgrade_map) {\n    auto route_it = findUpgradeBoolCaseInsensitive(*per_route_upgrade_map, upgrade_type);\n    if (route_it != per_route_upgrade_map->end()) {\n      // Upgrades explicitly not allowed on this route.\n      if (route_it->second == false) {\n        return false;\n      }\n      // Upgrades explicitly enabled on this route.\n      route_enabled = true;\n    }\n  }\n\n  auto it = findUpgradeCaseInsensitive(upgrade_filter_factories_, upgrade_type);\n  if ((it == upgrade_filter_factories_.end() || !it->second.allow_upgrade) && !route_enabled) {\n    // Either the HCM disables upgrades and the route-config does not override,\n    // or neither is configured for this upgrade.\n    return false;\n  }\n  FilterFactoriesList* filters_to_use = &filter_factories_;\n  if (it != upgrade_filter_factories_.end() && it->second.filter_factories != nullptr) {\n    filters_to_use = it->second.filter_factories.get();\n  }\n\n  createFilterChainForFactories(callbacks, *filters_to_use);\n  return true;\n}\n\nconst Network::Address::Instance& HttpConnectionManagerConfig::localAddress() {\n  return *context_.localInfo().address();\n}\n\n/**\n * Determines what tracing provider to use for a given\n * \"envoy.filters.network.http_connection_manager\" filter instance.\n */\nconst envoy::config::trace::v3::Tracing_Http* HttpConnectionManagerConfig::getPerFilterTracerConfig(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        config) {\n  // Give precedence to tracing provider configuration defined as part of\n  // \"envoy.filters.network.http_connection_manager\" filter config.\n  if (config.tracing().has_provider()) {\n    return &config.tracing().provider();\n  }\n  // Otherwise, for the sake of backwards compatibility, fallback to using tracing provider\n  // configuration defined in the bootstrap config.\n  if (context_.httpContext().defaultTracingConfig().has_http()) {\n    return &context_.httpContext().defaultTracingConfig().http();\n  }\n  return nullptr;\n}\n\nstd::function<Http::ApiListenerPtr()>\nHttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        proto_config,\n    Server::Configuration::FactoryContext& context, Network::ReadFilterCallbacks& read_callbacks) {\n\n  Utility::Singletons singletons = Utility::createSingletons(context);\n\n  auto filter_config = Utility::createConfig(\n      proto_config, context, *singletons.date_provider_, *singletons.route_config_provider_manager_,\n      *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_,\n      *singletons.filter_config_provider_manager_);\n\n  // This lambda captures the shared_ptrs created above, thus preserving the\n  // reference count.\n  // Keep in mind the lambda capture list **doesn't** determine the destruction order, but it's fine\n  // as these captured objects are also global singletons.\n  return [singletons, filter_config, &context, &read_callbacks]() -> Http::ApiListenerPtr {\n    auto conn_manager = std::make_unique<Http::ConnectionManagerImpl>(\n        *filter_config, context.drainDecision(), context.api().randomGenerator(),\n        context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(),\n        context.overloadManager(), context.dispatcher().timeSource());\n\n    // This factory creates a new ConnectionManagerImpl in the absence of its usual environment as\n    // an L4 filter, so this factory needs to take a few actions.\n\n    // When a new connection is creating its filter chain it hydrates the factory with a filter\n    // manager which provides the ConnectionManager with its \"read_callbacks\".\n    conn_manager->initializeReadFilterCallbacks(read_callbacks);\n\n    // When the connection first calls onData on the ConnectionManager, the ConnectionManager\n    // creates a codec. Here we force create a codec as onData will not be called.\n    Buffer::OwnedImpl dummy;\n    conn_manager->createCodec(dummy);\n\n    return conn_manager;\n  };\n}\n\n} // namespace HttpConnectionManager\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/http_connection_manager/config.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <map>\n#include <string>\n\n#include \"envoy/config/config_provider_manager.h\"\n#include \"envoy/config/core/v3/extension.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h\"\n#include \"envoy/filter/http/filter_config_provider.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/router/route_config_provider_manager.h\"\n#include \"envoy/tracing/http_tracer_manager.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/http/conn_manager_impl.h\"\n#include \"common/http/date_provider_impl.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/local_reply/local_reply.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/router/scoped_rds.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace HttpConnectionManager {\n\n/**\n * Config registration for the HTTP connection manager filter. @see NamedNetworkFilterConfigFactory.\n */\nclass HttpConnectionManagerFilterConfigFactory\n    : Logger::Loggable<Logger::Id::config>,\n      public Common::FactoryBase<\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager> {\npublic:\n  HttpConnectionManagerFilterConfigFactory()\n      : FactoryBase(NetworkFilterNames::get().HttpConnectionManager, true) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n          proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\nDECLARE_FACTORY(HttpConnectionManagerFilterConfigFactory);\n\n/**\n * Determines if an address is internal based on user provided config.\n */\nclass InternalAddressConfig : public Http::InternalAddressConfig {\npublic:\n  InternalAddressConfig(const envoy::extensions::filters::network::http_connection_manager::v3::\n                            HttpConnectionManager::InternalAddressConfig& config);\n\n  bool isInternalAddress(const Network::Address::Instance& address) const override {\n    if (address.type() == Network::Address::Type::Pipe) {\n      return unix_sockets_;\n    }\n\n    // TODO(snowp): Make internal subnets configurable.\n    return Network::Utility::isInternalAddress(address);\n  }\n\nprivate:\n  const bool unix_sockets_;\n};\n\n/**\n * Maps proto config to runtime config for an HTTP connection manager network filter.\n */\nclass HttpConnectionManagerConfig : Logger::Loggable<Logger::Id::config>,\n                                    public Http::FilterChainFactory,\n                                    public Http::ConnectionManagerConfig {\npublic:\n  HttpConnectionManagerConfig(\n      const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n          config,\n      Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider,\n      Router::RouteConfigProviderManager& route_config_provider_manager,\n      Config::ConfigProviderManager& scoped_routes_config_provider_manager,\n      Tracing::HttpTracerManager& http_tracer_manager,\n      Filter::Http::FilterConfigProviderManager& filter_config_provider_manager);\n\n  // Http::FilterChainFactory\n  void createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) override;\n  using FilterFactoriesList = std::list<Filter::Http::FilterConfigProviderPtr>;\n  struct FilterConfig {\n    std::unique_ptr<FilterFactoriesList> filter_factories;\n    bool allow_upgrade;\n  };\n  bool createUpgradeFilterChain(absl::string_view upgrade_type,\n                                const Http::FilterChainFactory::UpgradeMap* per_route_upgrade_map,\n                                Http::FilterChainFactoryCallbacks& callbacks) override;\n\n  // Http::ConnectionManagerConfig\n  Http::RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; }\n  const std::list<AccessLog::InstanceSharedPtr>& accessLogs() override { return access_logs_; }\n  Http::ServerConnectionPtr createCodec(Network::Connection& connection,\n                                        const Buffer::Instance& data,\n                                        Http::ServerConnectionCallbacks& callbacks) override;\n  Http::DateProvider& dateProvider() override { return date_provider_; }\n  std::chrono::milliseconds drainTimeout() const override { return drain_timeout_; }\n  FilterChainFactory& filterFactory() override { return *this; }\n  bool generateRequestId() const override { return generate_request_id_; }\n  bool preserveExternalRequestId() const override { return preserve_external_request_id_; }\n  bool alwaysSetRequestIdInResponse() const override { return always_set_request_id_in_response_; }\n  uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; }\n  uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; }\n  absl::optional<std::chrono::milliseconds> idleTimeout() const override { return idle_timeout_; }\n  bool isRoutable() const override { return true; }\n  absl::optional<std::chrono::milliseconds> maxConnectionDuration() const override {\n    return max_connection_duration_;\n  }\n  std::chrono::milliseconds streamIdleTimeout() const override { return stream_idle_timeout_; }\n  std::chrono::milliseconds requestTimeout() const override { return request_timeout_; }\n  absl::optional<std::chrono::milliseconds> maxStreamDuration() const override {\n    return max_stream_duration_;\n  }\n  Router::RouteConfigProvider* routeConfigProvider() override {\n    return route_config_provider_.get();\n  }\n  Config::ConfigProvider* scopedRouteConfigProvider() override {\n    return scoped_routes_config_provider_.get();\n  }\n  const std::string& serverName() const override { return server_name_; }\n  HttpConnectionManagerProto::ServerHeaderTransformation\n  serverHeaderTransformation() const override {\n    return server_transformation_;\n  }\n  Http::ConnectionManagerStats& stats() override { return stats_; }\n  Http::ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; }\n  bool useRemoteAddress() const override { return use_remote_address_; }\n  const Http::InternalAddressConfig& internalAddressConfig() const override {\n    return *internal_address_config_;\n  }\n  uint32_t xffNumTrustedHops() const override { return xff_num_trusted_hops_; }\n  bool skipXffAppend() const override { return skip_xff_append_; }\n  const std::string& via() const override { return via_; }\n  Http::ForwardClientCertType forwardClientCert() const override { return forward_client_cert_; }\n  const std::vector<Http::ClientCertDetailsType>& setCurrentClientCertDetails() const override {\n    return set_current_client_cert_details_;\n  }\n  Tracing::HttpTracerSharedPtr tracer() override { return http_tracer_; }\n  const Http::TracingConnectionManagerConfig* tracingConfig() override {\n    return tracing_config_.get();\n  }\n  const Network::Address::Instance& localAddress() override;\n  const absl::optional<std::string>& userAgent() override { return user_agent_; }\n  Http::ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; }\n  bool proxy100Continue() const override { return proxy_100_continue_; }\n  bool streamErrorOnInvalidHttpMessaging() const override {\n    return stream_error_on_invalid_http_messaging_;\n  }\n  const Http::Http1Settings& http1Settings() const override { return http1_settings_; }\n  bool shouldNormalizePath() const override { return normalize_path_; }\n  bool shouldMergeSlashes() const override { return merge_slashes_; }\n  bool shouldStripMatchingPort() const override { return strip_matching_port_; }\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n  headersWithUnderscoresAction() const override {\n    return headers_with_underscores_action_;\n  }\n  std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; }\n  const LocalReply::LocalReply& localReply() const override { return *local_reply_; }\n\nprivate:\n  enum class CodecType { HTTP1, HTTP2, HTTP3, AUTO };\n  void\n  processFilter(const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter&\n                    proto_config,\n                int i, absl::string_view prefix, FilterFactoriesList& filter_factories,\n                const char* filter_chain_type, bool last_filter_in_current_config);\n  void\n  processDynamicFilterConfig(const std::string& name,\n                             const envoy::config::core::v3::ExtensionConfigSource& config_discovery,\n                             FilterFactoriesList& filter_factories, const char* filter_chain_type,\n                             bool last_filter_in_current_config);\n  void createFilterChainForFactories(Http::FilterChainFactoryCallbacks& callbacks,\n                                     const FilterFactoriesList& filter_factories);\n\n  /**\n   * Determines what tracing provider to use for a given\n   * \"envoy.filters.network.http_connection_manager\" filter instance.\n   */\n  const envoy::config::trace::v3::Tracing_Http* getPerFilterTracerConfig(\n      const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n          filter_config);\n\n  Http::RequestIDExtensionSharedPtr request_id_extension_;\n  Server::Configuration::FactoryContext& context_;\n  FilterFactoriesList filter_factories_;\n  std::map<std::string, FilterConfig> upgrade_filter_factories_;\n  std::list<AccessLog::InstanceSharedPtr> access_logs_;\n  const std::string stats_prefix_;\n  Http::ConnectionManagerStats stats_;\n  mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_;\n  mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_;\n  Http::ConnectionManagerTracingStats tracing_stats_;\n  const bool use_remote_address_{};\n  const std::unique_ptr<Http::InternalAddressConfig> internal_address_config_;\n  const uint32_t xff_num_trusted_hops_;\n  const bool skip_xff_append_;\n  const std::string via_;\n  Http::ForwardClientCertType forward_client_cert_;\n  std::vector<Http::ClientCertDetailsType> set_current_client_cert_details_;\n  Router::RouteConfigProviderManager& route_config_provider_manager_;\n  Config::ConfigProviderManager& scoped_routes_config_provider_manager_;\n  Filter::Http::FilterConfigProviderManager& filter_config_provider_manager_;\n  CodecType codec_type_;\n  envoy::config::core::v3::Http2ProtocolOptions http2_options_;\n  const Http::Http1Settings http1_settings_;\n  HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{\n      HttpConnectionManagerProto::OVERWRITE};\n  std::string server_name_;\n  Tracing::HttpTracerSharedPtr http_tracer_{std::make_shared<Tracing::HttpNullTracer>()};\n  Http::TracingConnectionManagerConfigPtr tracing_config_;\n  absl::optional<std::string> user_agent_;\n  const uint32_t max_request_headers_kb_;\n  const uint32_t max_request_headers_count_;\n  absl::optional<std::chrono::milliseconds> idle_timeout_;\n  absl::optional<std::chrono::milliseconds> max_connection_duration_;\n  absl::optional<std::chrono::milliseconds> max_stream_duration_;\n  std::chrono::milliseconds stream_idle_timeout_;\n  std::chrono::milliseconds request_timeout_;\n  Router::RouteConfigProviderSharedPtr route_config_provider_;\n  Config::ConfigProviderPtr scoped_routes_config_provider_;\n  std::chrono::milliseconds drain_timeout_;\n  bool generate_request_id_;\n  const bool preserve_external_request_id_;\n  const bool always_set_request_id_in_response_;\n  Http::DateProvider& date_provider_;\n  Http::ConnectionManagerListenerStats listener_stats_;\n  const bool proxy_100_continue_;\n  const bool stream_error_on_invalid_http_messaging_;\n  std::chrono::milliseconds delayed_close_timeout_;\n  const bool normalize_path_;\n  const bool merge_slashes_;\n  const bool strip_matching_port_;\n  const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_;\n  const LocalReply::LocalReplyPtr local_reply_;\n\n  // Default idle timeout is 5 minutes if nothing is specified in the HCM config.\n  static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000;\n  // request timeout is disabled by default\n  static const uint64_t RequestTimeoutMs = 0;\n};\n\n/**\n * Factory to create an HttpConnectionManager outside of a Network Filter Chain.\n */\nclass HttpConnectionManagerFactory {\npublic:\n  static std::function<Http::ApiListenerPtr()> createHttpConnectionManagerFactoryFromProto(\n      const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n          proto_config,\n      Server::Configuration::FactoryContext& context, Network::ReadFilterCallbacks& read_callbacks);\n};\n\n/**\n * Utility class for shared logic between HTTP connection manager factories.\n */\nclass Utility {\npublic:\n  struct Singletons {\n    std::shared_ptr<Http::TlsCachingDateProviderImpl> date_provider_;\n    Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager_;\n    Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager_;\n    Tracing::HttpTracerManagerSharedPtr http_tracer_manager_;\n    std::shared_ptr<Filter::Http::FilterConfigProviderManager> filter_config_provider_manager_;\n  };\n\n  /**\n   * Create/get singletons needed for config creation.\n   *\n   * @param context supplies the context used to create the singletons.\n   * @return Singletons struct containing all the singletons.\n   */\n  static Singletons createSingletons(Server::Configuration::FactoryContext& context);\n\n  /**\n   * Create the HttpConnectionManagerConfig.\n   *\n   * @param proto_config supplies the config to install.\n   * @param context supplies the context used to create the config.\n   * @param date_provider the singleton used in config creation.\n   * @param route_config_provider_manager the singleton used in config creation.\n   * @param scoped_routes_config_provider_manager the singleton used in config creation.\n   * @return a shared_ptr to the created config object.\n   */\n  static std::shared_ptr<HttpConnectionManagerConfig> createConfig(\n      const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n          proto_config,\n      Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider,\n      Router::RouteConfigProviderManager& route_config_provider_manager,\n      Config::ConfigProviderManager& scoped_routes_config_provider_manager,\n      Tracing::HttpTracerManager& http_tracer_manager,\n      Filter::Http::FilterConfigProviderManager& filter_config_provider_manager);\n};\n\n} // namespace HttpConnectionManager\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\nload(\"@rules_python//python:defs.bzl\", \"py_binary\", \"py_library\")\nload(\"@kafka_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\n# Kafka network filter.\n# Broker filter public docs: docs/root/configuration/network_filters/kafka_broker_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"kafka_broker_config_lib\",\n    srcs = [\"broker/config.cc\"],\n    hdrs = [\"broker/config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"wip\",\n    deps = [\n        \":kafka_broker_filter_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/kafka_broker/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"kafka_broker_filter_lib\",\n    srcs = [\"broker/filter.cc\"],\n    hdrs = [\n        \"broker/filter.h\",\n        \"external/request_metrics.h\",\n        \"external/response_metrics.h\",\n    ],\n    deps = [\n        \":kafka_request_codec_lib\",\n        \":kafka_response_codec_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"abstract_codec_lib\",\n    srcs = [],\n    hdrs = [\n        \"codec.h\",\n    ],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"kafka_request_codec_lib\",\n    srcs = [\"request_codec.cc\"],\n    hdrs = [\n        \"request_codec.h\",\n    ],\n    deps = [\n        \":abstract_codec_lib\",\n        \":kafka_request_parser_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"kafka_request_parser_lib\",\n    srcs = [\n        \"external/kafka_request_resolver.cc\",\n        \"kafka_request_parser.cc\",\n    ],\n    hdrs = [\n        \"external/requests.h\",\n        \"kafka_request_parser.h\",\n    ],\n    deps = [\n        \":kafka_request_lib\",\n        \":parser_lib\",\n        \":tagged_fields_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"kafka_request_lib\",\n    srcs = [\n    ],\n    hdrs = [\n        \"kafka_request.h\",\n    ],\n    deps = [\n        \":serialization_lib\",\n        \":tagged_fields_lib\",\n    ],\n)\n\ngenrule(\n    name = \"kafka_request_generated_source\",\n    srcs = [\n        \"@kafka_source//:request_protocol_files\",\n    ],\n    outs = [\n        \"external/requests.h\",\n        \"external/kafka_request_resolver.cc\",\n        \"external/request_metrics.h\",\n    ],\n    cmd = \"\"\"\n      ./$(location :kafka_protocol_code_generator_bin) request \\\n        $(location external/requests.h) $(location external/kafka_request_resolver.cc) \\\n        $(location external/request_metrics.h) $(SRCS)\n    \"\"\",\n    tools = [\n        \":kafka_protocol_code_generator_bin\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"kafka_response_codec_lib\",\n    srcs = [\"response_codec.cc\"],\n    hdrs = [\n        \"response_codec.h\",\n    ],\n    deps = [\n        \":abstract_codec_lib\",\n        \":kafka_response_parser_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"kafka_response_parser_lib\",\n    srcs = [\n        \"external/kafka_response_resolver.cc\",\n        \"kafka_response_parser.cc\",\n    ],\n    hdrs = [\n        \"external/responses.h\",\n        \"kafka_response_parser.h\",\n    ],\n    deps = [\n        \":kafka_response_lib\",\n        \":parser_lib\",\n        \":tagged_fields_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"kafka_response_lib\",\n    srcs = [\n    ],\n    hdrs = [\n        \"kafka_response.h\",\n    ],\n    deps = [\n        \":serialization_lib\",\n        \":tagged_fields_lib\",\n    ],\n)\n\ngenrule(\n    name = \"kafka_response_generated_source\",\n    srcs = [\n        \"@kafka_source//:response_protocol_files\",\n    ],\n    outs = [\n        \"external/responses.h\",\n        \"external/kafka_response_resolver.cc\",\n        \"external/response_metrics.h\",\n    ],\n    cmd = \"\"\"\n      ./$(location :kafka_protocol_code_generator_bin) response \\\n        $(location external/responses.h) $(location external/kafka_response_resolver.cc) \\\n        $(location external/response_metrics.h) $(SRCS)\n    \"\"\",\n    tools = [\n        \":kafka_protocol_code_generator_bin\",\n    ],\n)\n\npy_binary(\n    name = \"kafka_protocol_code_generator_bin\",\n    srcs = [\"protocol/launcher.py\"],\n    data = glob([\"protocol/*.j2\"]),\n    main = \"protocol/launcher.py\",\n    deps = [\n        \":kafka_protocol_generator_lib\",\n        requirement(\"Jinja2\"),\n        requirement(\"MarkupSafe\"),\n    ],\n)\n\npy_library(\n    name = \"kafka_protocol_generator_lib\",\n    srcs = [\"protocol/generator.py\"],\n)\n\nenvoy_cc_library(\n    name = \"parser_lib\",\n    hdrs = [\"parser.h\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tagged_fields_lib\",\n    hdrs = [\"tagged_fields.h\"],\n    deps = [\":serialization_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"serialization_lib\",\n    srcs = [\n        \"serialization.cc\",\n    ],\n    hdrs = [\n        \"external/serialization_composite.h\",\n        \"serialization.h\",\n    ],\n    deps = [\n        \":kafka_types_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:byte_order_lib\",\n    ],\n)\n\ngenrule(\n    name = \"serialization_composite_generated_source\",\n    srcs = [],\n    outs = [\n        \"external/serialization_composite.h\",\n    ],\n    cmd = \"\"\"\n      ./$(location :serialization_composite_code_generator_bin) \\\n      $(location external/serialization_composite.h)\n    \"\"\",\n    tools = [\n        \":serialization_composite_code_generator_bin\",\n    ],\n)\n\npy_binary(\n    name = \"serialization_composite_code_generator_bin\",\n    srcs = [\"serialization/launcher.py\"],\n    data = glob([\"serialization/*.j2\"]),\n    main = \"serialization/launcher.py\",\n    deps = [\n        \":serialization_composite_generator_lib\",\n        requirement(\"Jinja2\"),\n        requirement(\"MarkupSafe\"),\n    ],\n)\n\npy_library(\n    name = \"serialization_composite_generator_lib\",\n    srcs = [\"serialization/generator.py\"],\n)\n\nenvoy_cc_library(\n    name = \"kafka_types_lib\",\n    hdrs = [\n        \"kafka_types.h\",\n    ],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/common:macros\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/broker/config.cc",
    "content": "#include \"extensions/filters/network/kafka/broker/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"extensions/filters/network/kafka/broker/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace Broker {\n\nNetwork::FilterFactoryCb KafkaConfigFactory::createFilterFactoryFromProtoTyped(\n    const KafkaBrokerProtoConfig& proto_config, Server::Configuration::FactoryContext& context) {\n\n  ASSERT(!proto_config.stat_prefix().empty());\n\n  const std::string& stat_prefix = proto_config.stat_prefix();\n\n  return [&context, stat_prefix](Network::FilterManager& filter_manager) -> void {\n    Network::FilterSharedPtr filter =\n        std::make_shared<KafkaBrokerFilter>(context.scope(), context.timeSource(), stat_prefix);\n    filter_manager.addFilter(filter);\n  };\n}\n\n/**\n * Static registration for the Kafka filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(KafkaConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace Broker\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/broker/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.pb.h\"\n#include \"envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace Broker {\n\nusing KafkaBrokerProtoConfig = envoy::extensions::filters::network::kafka_broker::v3::KafkaBroker;\n\n/**\n * Config registration for the Kafka filter.\n */\nclass KafkaConfigFactory : public Common::FactoryBase<KafkaBrokerProtoConfig> {\npublic:\n  KafkaConfigFactory() : FactoryBase(NetworkFilterNames::get().KafkaBroker) {}\n\nprivate:\n  // Common::FactoryBase<KafkaBrokerProtoConfig>\n  Network::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const KafkaBrokerProtoConfig& proto_config,\n                                    Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Broker\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/broker/filter.cc",
    "content": "#include \"extensions/filters/network/kafka/broker/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace Broker {\n\nvoid Forwarder::onMessage(AbstractRequestSharedPtr request) {\n  const RequestHeader& header = request->request_header_;\n  response_decoder_.expectResponse(header.correlation_id_, header.api_key_, header.api_version_);\n}\n\nvoid Forwarder::onFailedParse(RequestParseFailureSharedPtr parse_failure) {\n  const RequestHeader& header = parse_failure->request_header_;\n  response_decoder_.expectResponse(header.correlation_id_, header.api_key_, header.api_version_);\n}\n\n// Nothing fancy here, proper metrics registration is left to Rich...MetricsImpl constructors.\nKafkaMetricsFacadeImpl::KafkaMetricsFacadeImpl(Stats::Scope& scope, TimeSource& time_source,\n                                               const std::string& stat_prefix)\n    : KafkaMetricsFacadeImpl{time_source,\n                             std::make_shared<RichRequestMetricsImpl>(scope, stat_prefix),\n                             std::make_shared<RichResponseMetricsImpl>(scope, stat_prefix)} {};\n\nKafkaMetricsFacadeImpl::KafkaMetricsFacadeImpl(TimeSource& time_source,\n                                               RichRequestMetricsSharedPtr request_metrics,\n                                               RichResponseMetricsSharedPtr response_metrics)\n    : time_source_{time_source}, request_metrics_{request_metrics}, response_metrics_{\n                                                                        response_metrics} {};\n\n// When request is successfully parsed, increase type count and store its arrival timestamp.\nvoid KafkaMetricsFacadeImpl::onMessage(AbstractRequestSharedPtr request) {\n  const RequestHeader& header = request->request_header_;\n  request_metrics_->onRequest(header.api_key_);\n\n  const MonotonicTime request_arrival_ts = time_source_.monotonicTime();\n  request_arrivals_[header.correlation_id_] = request_arrival_ts;\n}\n\nvoid KafkaMetricsFacadeImpl::onFailedParse(RequestParseFailureSharedPtr) {\n  request_metrics_->onUnknownRequest();\n}\n\nvoid KafkaMetricsFacadeImpl::onRequestException() { request_metrics_->onBrokenRequest(); }\n\n// When response is successfully parsed, compute processing time using its correlation id and\n// stored request arrival timestamp, then update metrics with the result.\nvoid KafkaMetricsFacadeImpl::onMessage(AbstractResponseSharedPtr response) {\n  const ResponseMetadata& metadata = response->metadata_;\n\n  const MonotonicTime response_arrival_ts = time_source_.monotonicTime();\n  const MonotonicTime request_arrival_ts = request_arrivals_[metadata.correlation_id_];\n  request_arrivals_.erase(metadata.correlation_id_);\n\n  const MonotonicTime::duration time_in_broker = response_arrival_ts - request_arrival_ts;\n  const auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(time_in_broker);\n\n  response_metrics_->onResponse(metadata.api_key_, ms.count());\n}\n\nvoid KafkaMetricsFacadeImpl::onFailedParse(ResponseMetadataSharedPtr) {\n  response_metrics_->onUnknownResponse();\n}\n\nvoid KafkaMetricsFacadeImpl::onResponseException() { response_metrics_->onBrokenResponse(); }\n\nabsl::flat_hash_map<int32_t, MonotonicTime>& KafkaMetricsFacadeImpl::getRequestArrivalsForTest() {\n  return request_arrivals_;\n}\n\nKafkaBrokerFilter::KafkaBrokerFilter(Stats::Scope& scope, TimeSource& time_source,\n                                     const std::string& stat_prefix)\n    : KafkaBrokerFilter{\n          std::make_shared<KafkaMetricsFacadeImpl>(scope, time_source, stat_prefix)} {};\n\nKafkaBrokerFilter::KafkaBrokerFilter(const KafkaMetricsFacadeSharedPtr& metrics)\n    : metrics_{metrics}, response_decoder_{new ResponseDecoder({metrics})},\n      request_decoder_{\n          new RequestDecoder({std::make_shared<Forwarder>(*response_decoder_), metrics})} {};\n\nKafkaBrokerFilter::KafkaBrokerFilter(KafkaMetricsFacadeSharedPtr metrics,\n                                     ResponseDecoderSharedPtr response_decoder,\n                                     RequestDecoderSharedPtr request_decoder)\n    : metrics_{metrics}, response_decoder_{response_decoder}, request_decoder_{request_decoder} {};\n\nNetwork::FilterStatus KafkaBrokerFilter::onNewConnection() {\n  return Network::FilterStatus::Continue;\n}\n\nvoid KafkaBrokerFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) {}\n\nNetwork::FilterStatus KafkaBrokerFilter::onData(Buffer::Instance& data, bool) {\n  ENVOY_LOG(trace, \"data from Kafka client [{} request bytes]\", data.length());\n  try {\n    request_decoder_->onData(data);\n    return Network::FilterStatus::Continue;\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG(debug, \"could not process data from Kafka client: {}\", e.what());\n    metrics_->onRequestException();\n    request_decoder_->reset();\n    return Network::FilterStatus::StopIteration;\n  }\n}\n\nNetwork::FilterStatus KafkaBrokerFilter::onWrite(Buffer::Instance& data, bool) {\n  ENVOY_LOG(trace, \"data from Kafka broker [{} response bytes]\", data.length());\n  try {\n    response_decoder_->onData(data);\n    return Network::FilterStatus::Continue;\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG(debug, \"could not process data from Kafka broker: {}\", e.what());\n    metrics_->onResponseException();\n    response_decoder_->reset();\n    return Network::FilterStatus::StopIteration;\n  }\n}\n\nRequestDecoderSharedPtr KafkaBrokerFilter::getRequestDecoderForTest() { return request_decoder_; }\n\nResponseDecoderSharedPtr KafkaBrokerFilter::getResponseDecoderForTest() {\n  return response_decoder_;\n}\n\n} // namespace Broker\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/broker/filter.h",
    "content": "#pragma once\n\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/kafka/external/request_metrics.h\"\n#include \"extensions/filters/network/kafka/external/response_metrics.h\"\n#include \"extensions/filters/network/kafka/parser.h\"\n#include \"extensions/filters/network/kafka/request_codec.h\"\n#include \"extensions/filters/network/kafka/response_codec.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace Broker {\n\n/**\n * Utility class that merges both request & response callbacks.\n */\nclass KafkaCallback : public RequestCallback, public ResponseCallback {};\n\nusing KafkaCallbackSharedPtr = std::shared_ptr<KafkaCallback>;\n\n/**\n * Request callback responsible for updating state of related response decoder.\n * When a request gets successfully parsed, the response decoder registers a new incoming request.\n * When request could not be recognized, we can extract the header, so we can register the\n * expected response (decoder will not be able to capable of decoding the response, but at least we\n * will not break the communication between client and broker).\n */\nclass Forwarder : public RequestCallback {\npublic:\n  /**\n   * Binds forwarder to given response decoder.\n   */\n  Forwarder(ResponseDecoder& response_decoder) : response_decoder_{response_decoder} {};\n\n  // RequestCallback\n  void onMessage(AbstractRequestSharedPtr request) override;\n  void onFailedParse(RequestParseFailureSharedPtr parse_failure) override;\n\nprivate:\n  ResponseDecoder& response_decoder_;\n};\n\n/**\n * Single access point for all Kafka-related metrics.\n * Implements Kafka message callback, so decoders can refer to it when parse results appear.\n * This interface was extracted to facilitate mock injection in unit tests.\n */\nclass KafkaMetricsFacade : public KafkaCallback {\npublic:\n  /**\n   * To be invoked when exceptions occur while processing a request.\n   */\n  virtual void onRequestException() PURE;\n\n  /**\n   * To be invoked when exceptions occur while processing a response.\n   */\n  virtual void onResponseException() PURE;\n};\n\nusing KafkaMetricsFacadeSharedPtr = std::shared_ptr<KafkaMetricsFacade>;\n\n/**\n * Metrics facade implementation that actually uses rich request/response metrics.\n * Keeps requests' arrival timestamps (by correlation id) and uses them calculate response\n * processing time.\n */\nclass KafkaMetricsFacadeImpl : public KafkaMetricsFacade {\npublic:\n  /**\n   * Creates facade that keeps prefixed metrics in given scope, and uses given time source to\n   * compute processing durations.\n   */\n  KafkaMetricsFacadeImpl(Stats::Scope& scope, TimeSource& time_source,\n                         const std::string& stat_prefix);\n\n  /**\n   * Visible for testing.\n   */\n  KafkaMetricsFacadeImpl(TimeSource& time_source, RichRequestMetricsSharedPtr request_metrics,\n                         RichResponseMetricsSharedPtr response_metrics);\n\n  // RequestCallback\n  void onMessage(AbstractRequestSharedPtr request) override;\n  void onFailedParse(RequestParseFailureSharedPtr parse_failure) override;\n\n  // ResponseCallback\n  void onMessage(AbstractResponseSharedPtr response) override;\n  void onFailedParse(ResponseMetadataSharedPtr parse_failure) override;\n\n  // KafkaMetricsFacade\n  void onRequestException() override;\n  void onResponseException() override;\n\n  absl::flat_hash_map<int32_t, MonotonicTime>& getRequestArrivalsForTest();\n\nprivate:\n  TimeSource& time_source_;\n  absl::flat_hash_map<int32_t, MonotonicTime> request_arrivals_;\n  RichRequestMetricsSharedPtr request_metrics_;\n  RichResponseMetricsSharedPtr response_metrics_;\n};\n\n/**\n * Implementation of Kafka broker-level filter.\n * Uses two decoders - request and response ones, that are connected using Forwarder instance.\n * There's also a KafkaMetricsFacade, that is listening on codec events.\n *\n *        +---------------------------------------------------+\n *        |                                                   |\n *        |               +--------------+                    |\n *        |   +---------->+RequestDecoder+----------------+   |\n *        |   |           +-------+------+                |   |\n *        |   |                   |                       |   |\n *        |   |                   |                       |   |\n *        |   |                   v                       v   v\n * +------+---+------+       +----+----+        +---------+---+----+\n * |KafkaBrokerFilter|       |Forwarder|        |KafkaMetricsFacade|\n * +----------+------+       +----+----+        +---------+--------+\n *            |                   |                       ^\n *            |                   |                       |\n *            |                   v                       |\n *            |           +-------+-------+               |\n *            +---------->+ResponseDecoder+---------------+\n *                        +---------------+\n */\nclass KafkaBrokerFilter : public Network::Filter, private Logger::Loggable<Logger::Id::kafka> {\npublic:\n  /**\n   * Main constructor.\n   * Creates decoders that eventually update prefixed metrics stored in scope, using time source for\n   * duration calculation.\n   */\n  KafkaBrokerFilter(Stats::Scope& scope, TimeSource& time_source, const std::string& stat_prefix);\n\n  /**\n   * Visible for testing.\n   */\n  KafkaBrokerFilter(KafkaMetricsFacadeSharedPtr metrics, ResponseDecoderSharedPtr response_decoder,\n                    RequestDecoderSharedPtr request_decoder);\n\n  // Network::ReadFilter\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override;\n\n  RequestDecoderSharedPtr getRequestDecoderForTest();\n  ResponseDecoderSharedPtr getResponseDecoderForTest();\n\nprivate:\n  /**\n   * Helper delegate constructor.\n   * Passes metrics facade as argument to decoders.\n   */\n  KafkaBrokerFilter(const KafkaMetricsFacadeSharedPtr& metrics);\n\n  const KafkaMetricsFacadeSharedPtr metrics_;\n  const ResponseDecoderSharedPtr response_decoder_;\n  const RequestDecoderSharedPtr request_decoder_;\n};\n\n} // namespace Broker\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/codec.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"absl/container/fixed_array.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Kafka message decoder.\n */\nclass MessageDecoder {\npublic:\n  virtual ~MessageDecoder() = default;\n\n  /**\n   * Processes given buffer attempting to decode messages contained within.\n   * @param data buffer instance.\n   */\n  virtual void onData(Buffer::Instance& data) PURE;\n};\n\ntemplate <typename MessageType, typename ParseFailureType> class MessageCallback {\npublic:\n  virtual ~MessageCallback() = default;\n\n  /**\n   * Callback method invoked when message is successfully decoded.\n   * @param message message that has been decoded.\n   */\n  virtual void onMessage(MessageType message) PURE;\n\n  /**\n   * Callback method invoked when message could not be decoded.\n   * Invoked after all message's bytes have been consumed.\n   */\n  virtual void onFailedParse(ParseFailureType failure_data) PURE;\n};\n\n/**\n * Abstract message decoder, that resolves messages from Buffer instances provided.\n * When the message has been parsed, notify the callbacks.\n */\ntemplate <typename ParserType, typename CallbackType>\nclass AbstractMessageDecoder : public MessageDecoder {\npublic:\n  ~AbstractMessageDecoder() override = default;\n\n  /**\n   * Creates a decoder that will invoke given callbacks when a message has been parsed.\n   * @param callbacks callbacks to be invoked (in order).\n   */\n  AbstractMessageDecoder(const std::vector<CallbackType> callbacks) : callbacks_{callbacks} {};\n\n  /**\n   * Consumes all data present in a buffer.\n   * If a message can be successfully parsed, then callbacks get notified with parsed response.\n   * Updates decoder state.\n   * Can throw if codec's state does not permit usage, or there there were parse failures.\n   * Impl note: similar to redis codec, which also keeps state.\n   */\n  void onData(Buffer::Instance& data) override {\n    // Pass slices to `doParse`.\n    for (const Buffer::RawSlice& slice : data.getRawSlices()) {\n      doParse(slice);\n    }\n  }\n\n  /**\n   * Erases codec state.\n   */\n  virtual void reset() { current_parser_ = nullptr; }\n\n  ParserType getCurrentParserForTest() const { return current_parser_; }\n\nprotected:\n  /**\n   * Create a start parser for a new message.\n   */\n  virtual ParserType createStartParser() PURE;\n\nprivate:\n  /**\n   * Main parse loop.\n   *\n   * If there is data to process, and the current parser is not present,\n   * create a new one with `createStartParser`.\n   * Feed data to a current parser until it returns a parse result.\n   * If the parse result is a parsed message, notify callbacks and reset current parser.\n   * If the parse result is another parser, update current parser, and keep feeding.\n   */\n  void doParse(const Buffer::RawSlice& slice) {\n    const char* bytes = reinterpret_cast<const char*>(slice.mem_);\n    absl::string_view data = {bytes, slice.len_};\n\n    while (!data.empty()) {\n\n      // Re-initialize the parser.\n      if (!current_parser_) {\n        current_parser_ = createStartParser();\n      }\n\n      // Feed the data to the parser.\n      auto result = current_parser_->parse(data);\n      // This loop guarantees that parsers consuming 0 bytes also get processed in this invocation.\n      while (result.hasData()) {\n        if (!result.next_parser_) {\n\n          // Next parser is not present, so we have finished parsing a message.\n          // Depending on whether the parse was successful, invoke the correct callback.\n          if (result.message_) {\n            for (auto& callback : callbacks_) {\n              callback->onMessage(result.message_);\n            }\n          } else {\n            for (auto& callback : callbacks_) {\n              callback->onFailedParse(result.failure_data_);\n            }\n          }\n\n          // As we finished parsing this response, return to outer loop.\n          // If there is more data, the parser will be re-initialized.\n          current_parser_ = nullptr;\n          break;\n        } else {\n\n          // The next parser that's supposed to consume the rest of payload was given.\n          current_parser_ = result.next_parser_;\n        }\n\n        // Keep parsing the data.\n        result = current_parser_->parse(data);\n      }\n    }\n  }\n\n  const std::vector<CallbackType> callbacks_;\n\n  ParserType current_parser_;\n};\n\n/**\n * Kafka message encoder.\n * @param MessageType encoded message type (request or response).\n */\ntemplate <typename MessageType> class MessageEncoder {\npublic:\n  virtual ~MessageEncoder() = default;\n\n  /**\n   * Encodes given message.\n   * @param message message to be encoded.\n   */\n  virtual void encode(const MessageType& message) PURE;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/kafka_request.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n\n#include \"extensions/filters/network/kafka/external/serialization_composite.h\"\n#include \"extensions/filters/network/kafka/serialization.h\"\n#include \"extensions/filters/network/kafka/tagged_fields.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Decides if request with given api key & version should have tagged fields in header.\n * This method gets implemented in generated code through 'kafka_request_resolver_cc.j2'.\n * @param api_key Kafka request key.\n * @param api_version Kafka request's version.\n * @return Whether tagged fields should be used for this request.\n */\nbool requestUsesTaggedFieldsInHeader(const uint16_t api_key, const uint16_t api_version);\n\n/**\n * Represents fields that are present in every Kafka request message.\n * @see http://kafka.apache.org/protocol.html#protocol_messages\n */\nstruct RequestHeader {\n  int16_t api_key_;\n  int16_t api_version_;\n  int32_t correlation_id_;\n  NullableString client_id_;\n  TaggedFields tagged_fields_;\n\n  RequestHeader(const int16_t api_key, const int16_t api_version, const int32_t correlation_id,\n                const NullableString& client_id)\n      : RequestHeader{api_key, api_version, correlation_id, client_id, TaggedFields{}} {};\n\n  RequestHeader(const int16_t api_key, const int16_t api_version, const int32_t correlation_id,\n                const NullableString& client_id, const TaggedFields& tagged_fields)\n      : api_key_{api_key}, api_version_{api_version}, correlation_id_{correlation_id},\n        client_id_{client_id}, tagged_fields_{tagged_fields} {};\n\n  uint32_t computeSize(const EncodingContext& context) const {\n    uint32_t result{0};\n    result += context.computeSize(api_key_);\n    result += context.computeSize(api_version_);\n    result += context.computeSize(correlation_id_);\n    result += context.computeSize(client_id_);\n    if (requestUsesTaggedFieldsInHeader(api_key_, api_version_)) {\n      result += context.computeCompactSize(tagged_fields_);\n    }\n    return result;\n  }\n\n  uint32_t encode(Buffer::Instance& dst, EncodingContext& context) const {\n    uint32_t written{0};\n    written += context.encode(api_key_, dst);\n    written += context.encode(api_version_, dst);\n    written += context.encode(correlation_id_, dst);\n    written += context.encode(client_id_, dst);\n    if (requestUsesTaggedFieldsInHeader(api_key_, api_version_)) {\n      written += context.encodeCompact(tagged_fields_, dst);\n    }\n    return written;\n  }\n\n  bool operator==(const RequestHeader& rhs) const {\n    return api_key_ == rhs.api_key_ && api_version_ == rhs.api_version_ &&\n           correlation_id_ == rhs.correlation_id_ && client_id_ == rhs.client_id_ &&\n           tagged_fields_ == rhs.tagged_fields_;\n  };\n};\n\n/**\n * Carries information that could be extracted during the failed parse.\n */\nclass RequestParseFailure {\npublic:\n  RequestParseFailure(const RequestHeader& request_header) : request_header_{request_header} {};\n\n  /**\n   * Request's header.\n   */\n  const RequestHeader request_header_;\n};\n\nusing RequestParseFailureSharedPtr = std::shared_ptr<RequestParseFailure>;\n\n/**\n * Abstract Kafka request.\n * Contains data present in every request (the header with request key, version, etc.).\n * @see http://kafka.apache.org/protocol.html#protocol_messages\n */\nclass AbstractRequest {\npublic:\n  virtual ~AbstractRequest() = default;\n\n  /**\n   * Constructs a request with given header data.\n   * @param request_header request's header.\n   */\n  AbstractRequest(const RequestHeader& request_header) : request_header_{request_header} {};\n\n  /**\n   * Computes the size of this request, if it were to be serialized.\n   * @return serialized size of request\n   */\n  virtual uint32_t computeSize() const PURE;\n\n  /**\n   * Encode the contents of this request into a given buffer.\n   * @param dst buffer instance to keep serialized message\n   */\n  virtual uint32_t encode(Buffer::Instance& dst) const PURE;\n\n  /**\n   * Request's header.\n   */\n  const RequestHeader request_header_;\n};\n\nusing AbstractRequestSharedPtr = std::shared_ptr<AbstractRequest>;\n\n/**\n * Concrete request that carries data particular to given request type.\n * @param Data concrete request data type.\n */\ntemplate <typename Data> class Request : public AbstractRequest {\npublic:\n  /**\n   * Request header fields need to be initialized by user in case of newly created requests.\n   */\n  Request(const RequestHeader& request_header, const Data& data)\n      : AbstractRequest{request_header}, data_{data} {};\n\n  /**\n   * Compute the size of request, which includes both the request header and its real data.\n   */\n  uint32_t computeSize() const override {\n    const EncodingContext context{request_header_.api_version_};\n    uint32_t result{0};\n    // Compute size of header.\n    result += context.computeSize(request_header_);\n    // Compute size of request data.\n    result += context.computeSize(data_);\n    return result;\n  }\n\n  /**\n   * Encodes given request into a buffer, with any extra configuration carried by the context.\n   */\n  uint32_t encode(Buffer::Instance& dst) const override {\n    EncodingContext context{request_header_.api_version_};\n    uint32_t written{0};\n    // Encode request header.\n    written += context.encode(request_header_, dst);\n    // Encode request-specific data.\n    written += context.encode(data_, dst);\n    return written;\n  }\n\n  bool operator==(const Request<Data>& rhs) const {\n    return request_header_ == rhs.request_header_ && data_ == rhs.data_;\n  };\n\nprivate:\n  const Data data_;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/kafka_request_parser.cc",
    "content": "#include \"extensions/filters/network/kafka/kafka_request_parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nconst RequestParserResolver& RequestParserResolver::getDefaultInstance() {\n  CONSTRUCT_ON_FIRST_USE(RequestParserResolver);\n}\n\nRequestParseResponse RequestStartParser::parse(absl::string_view& data) {\n  request_length_.feed(data);\n  if (request_length_.ready()) {\n    context_->remaining_request_size_ = request_length_.get();\n    return RequestParseResponse::nextParser(\n        std::make_shared<RequestHeaderParser>(parser_resolver_, context_));\n  } else {\n    return RequestParseResponse::stillWaiting();\n  }\n}\n\nuint32_t RequestHeaderDeserializer::feed(absl::string_view& data) {\n  uint32_t consumed = 0;\n\n  consumed += common_part_deserializer_.feed(data);\n  if (common_part_deserializer_.ready()) {\n    const auto request_header = common_part_deserializer_.get();\n    if (requestUsesTaggedFieldsInHeader(request_header.api_key_, request_header.api_version_)) {\n      tagged_fields_present_ = true;\n      consumed += tagged_fields_deserializer_.feed(data);\n    }\n  }\n\n  return consumed;\n}\n\nbool RequestHeaderDeserializer::ready() const {\n  // Header is only fully parsed after we have processed everything, including tagged fields (if\n  // they are present).\n  return common_part_deserializer_.ready() &&\n         (tagged_fields_present_ ? tagged_fields_deserializer_.ready() : true);\n}\n\nRequestHeader RequestHeaderDeserializer::get() const {\n  auto result = common_part_deserializer_.get();\n  if (tagged_fields_present_) {\n    result.tagged_fields_ = tagged_fields_deserializer_.get();\n  }\n  return result;\n}\n\nRequestParseResponse RequestHeaderParser::parse(absl::string_view& data) {\n  context_->remaining_request_size_ -= deserializer_->feed(data);\n  // One of the two needs must have happened when feeding finishes:\n  // - deserializer has consumed all the bytes it needed,\n  // - or all the data has been consumed (but deserializer might still need data to be ready).\n  ASSERT(deserializer_->ready() || data.empty());\n  if (deserializer_->ready()) {\n    RequestHeader request_header = deserializer_->get();\n    context_->request_header_ = request_header;\n    RequestParserSharedPtr next_parser = parser_resolver_.createParser(\n        request_header.api_key_, request_header.api_version_, context_);\n    return RequestParseResponse::nextParser(next_parser);\n  } else {\n    return RequestParseResponse::stillWaiting();\n  }\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/kafka_request_parser.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/filters/network/kafka/kafka_request.h\"\n#include \"extensions/filters/network/kafka/parser.h\"\n#include \"extensions/filters/network/kafka/tagged_fields.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nusing RequestParseResponse = ParseResponse<AbstractRequestSharedPtr, RequestParseFailureSharedPtr>;\nusing RequestParser = Parser<AbstractRequestSharedPtr, RequestParseFailureSharedPtr>;\nusing RequestParserSharedPtr = std::shared_ptr<RequestParser>;\n\n/**\n * Context that is shared between parsers that are handling the same single message.\n */\nstruct RequestContext {\n\n  /**\n   * Bytes left to consume.\n   */\n  uint32_t remaining_request_size_{0};\n\n  /**\n   * Request header that gets filled in during the parse.\n   */\n  RequestHeader request_header_{-1, -1, -1, absl::nullopt};\n\n  /**\n   * Bytes left to consume.\n   */\n  uint32_t& remaining() { return remaining_request_size_; }\n\n  /**\n   * Returns data needed for construction of parse failure message.\n   */\n  const RequestHeader asFailureData() const { return request_header_; }\n};\n\nusing RequestContextSharedPtr = std::shared_ptr<RequestContext>;\n\n/**\n * Request decoder configuration object.\n * Resolves the parser that will be responsible for consuming the request-specific data.\n * In other words: provides (api_key, api_version) -> Parser function.\n */\nclass RequestParserResolver {\npublic:\n  virtual ~RequestParserResolver() = default;\n\n  /**\n   * Creates a parser that is going to process data specific for given api_key & api_version.\n   * @param api_key request type.\n   * @param api_version request version.\n   * @param context context to be used by parser.\n   * @return parser that is capable of processing data for given request type & version.\n   */\n  virtual RequestParserSharedPtr createParser(int16_t api_key, int16_t api_version,\n                                              RequestContextSharedPtr context) const;\n\n  /**\n   * Return default resolver, that uses request's api key and version to provide a matching parser.\n   */\n  static const RequestParserResolver& getDefaultInstance();\n};\n\n/**\n * Request parser responsible for consuming request length and setting up context with this data.\n * @see http://kafka.apache.org/protocol.html#protocol_common\n */\nclass RequestStartParser : public RequestParser {\npublic:\n  RequestStartParser(const RequestParserResolver& parser_resolver)\n      : parser_resolver_{parser_resolver}, context_{std::make_shared<RequestContext>()} {};\n\n  /**\n   * Consumes 4 bytes (INT32) as request length and updates the context with that value.\n   * @return RequestHeaderParser instance to process request header.\n   */\n  RequestParseResponse parse(absl::string_view& data) override;\n\n  const RequestContextSharedPtr contextForTest() const { return context_; }\n\nprivate:\n  const RequestParserResolver& parser_resolver_;\n  const RequestContextSharedPtr context_;\n  Int32Deserializer request_length_;\n};\n\n/**\n * Deserializer that extracts request header (4 fields).\n * Can throw, as one of the fields (client-id) can throw (nullable string with invalid length).\n * @see http://kafka.apache.org/protocol.html#protocol_messages\n */\nclass RequestHeaderDeserializer : public Deserializer<RequestHeader>,\n                                  private Logger::Loggable<Logger::Id::kafka> {\n\n  // Request header, no matter what, has at least 4 fields. They are extracted here.\n  using CommonPartDeserializer =\n      CompositeDeserializerWith4Delegates<RequestHeader, Int16Deserializer, Int16Deserializer,\n                                          Int32Deserializer, NullableStringDeserializer>;\n\npublic:\n  RequestHeaderDeserializer() = default;\n\n  uint32_t feed(absl::string_view& data) override;\n  bool ready() const override;\n  RequestHeader get() const override;\n\nprivate:\n  // Deserializer for the first 4 fields, that are present in every request header.\n  CommonPartDeserializer common_part_deserializer_;\n\n  // Tagged fields are used only in request header v2.\n  // This flag will be set depending on common part's result (api key & version), and will decide\n  // whether we want to feed data to tagged fields deserializer.\n  bool tagged_fields_present_;\n  TaggedFieldsDeserializer tagged_fields_deserializer_;\n};\n\nusing RequestHeaderDeserializerPtr = std::unique_ptr<RequestHeaderDeserializer>;\n\n/**\n * Parser responsible for extracting the request header and putting it into context.\n * On a successful parse the resolved data (api_key & api_version) is used to determine the next\n * parser.\n * @see http://kafka.apache.org/protocol.html#protocol_messages\n */\nclass RequestHeaderParser : public RequestParser {\npublic:\n  // Default constructor.\n  RequestHeaderParser(const RequestParserResolver& parser_resolver, RequestContextSharedPtr context)\n      : RequestHeaderParser{parser_resolver, context,\n                            std::make_unique<RequestHeaderDeserializer>()} {};\n\n  // Constructor visible for testing (allows for initial parser injection).\n  RequestHeaderParser(const RequestParserResolver& parser_resolver, RequestContextSharedPtr context,\n                      RequestHeaderDeserializerPtr deserializer)\n      : parser_resolver_{parser_resolver}, context_{context}, deserializer_{\n                                                                  std::move(deserializer)} {};\n\n  /**\n   * Uses data provided to compute request header.\n   * @return Parser instance responsible for processing rest of the message\n   */\n  RequestParseResponse parse(absl::string_view& data) override;\n\n  const RequestContextSharedPtr contextForTest() const { return context_; }\n\nprivate:\n  const RequestParserResolver& parser_resolver_;\n  const RequestContextSharedPtr context_;\n  RequestHeaderDeserializerPtr deserializer_;\n};\n\n/**\n * Sentinel parser that is responsible for consuming message bytes for messages that had unsupported\n * api_key & api_version. It does not attempt to capture any data, just throws it away until end of\n * message.\n */\nclass SentinelParser : public AbstractSentinelParser<RequestContextSharedPtr, RequestParseResponse>,\n                       public RequestParser {\npublic:\n  SentinelParser(RequestContextSharedPtr context) : AbstractSentinelParser{context} {};\n\n  RequestParseResponse parse(absl::string_view& data) override {\n    return AbstractSentinelParser::parse(data);\n  }\n};\n\n/**\n * Request parser uses a single deserializer to construct a request object.\n * This parser is responsible for consuming request-specific data (e.g. topic names) and always\n * returns a parsed message.\n * @param RequestType request class.\n * @param DeserializerType deserializer type corresponding to request class (should be subclass of\n * Deserializer<RequestType>).\n */\ntemplate <typename RequestType, typename DeserializerType>\nclass RequestDataParser : public RequestParser {\npublic:\n  /**\n   * Create a parser with given context.\n   * @param context parse context containing request header.\n   */\n  RequestDataParser(RequestContextSharedPtr context) : context_{context} {};\n\n  /**\n   * Consume enough data to fill in deserializer and receive the parsed request.\n   * Fill in request's header with data stored in context.\n   */\n  RequestParseResponse parse(absl::string_view& data) override {\n    context_->remaining_request_size_ -= deserializer.feed(data);\n\n    if (deserializer.ready()) {\n      if (0 == context_->remaining_request_size_) {\n        // After a successful parse, there should be nothing left - we have consumed all the bytes.\n        AbstractRequestSharedPtr msg =\n            std::make_shared<Request<RequestType>>(context_->request_header_, deserializer.get());\n        return RequestParseResponse::parsedMessage(msg);\n      } else {\n        // The message makes no sense, the deserializer that matches the schema consumed all\n        // necessary data, but there are still bytes in this message.\n        return RequestParseResponse::nextParser(std::make_shared<SentinelParser>(context_));\n      }\n    } else {\n      return RequestParseResponse::stillWaiting();\n    }\n  }\n\n  const RequestContextSharedPtr contextForTest() const { return context_; }\n\nprotected:\n  RequestContextSharedPtr context_;\n  DeserializerType deserializer; // underlying request-specific deserializer\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/kafka_response.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/kafka/external/serialization_composite.h\"\n#include \"extensions/filters/network/kafka/serialization.h\"\n#include \"extensions/filters/network/kafka/tagged_fields.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Decides if response with given api key & version should have tagged fields in header.\n * Bear in mind, that ApiVersions responses DO NOT contain tagged fields in header (despite having\n * flexible versions) as per\n * https://github.com/apache/kafka/blob/2.4.0/clients/src/main/resources/common/message/ApiVersionsResponse.json#L24\n * This method gets implemented in generated code through 'kafka_response_resolver_cc.j2'.\n *\n * @param api_key Kafka request key.\n * @param api_version Kafka request's version.\n * @return Whether tagged fields should be used for this request.\n */\nbool responseUsesTaggedFieldsInHeader(const uint16_t api_key, const uint16_t api_version);\n\n/**\n * Represents Kafka response metadata: expected api key, version and correlation id.\n * @see http://kafka.apache.org/protocol.html#protocol_messages\n */\nstruct ResponseMetadata {\n  ResponseMetadata(const int16_t api_key, const int16_t api_version, const int32_t correlation_id)\n      : ResponseMetadata{api_key, api_version, correlation_id, TaggedFields{}} {};\n\n  ResponseMetadata(const int16_t api_key, const int16_t api_version, const int32_t correlation_id,\n                   const TaggedFields& tagged_fields)\n      : api_key_{api_key}, api_version_{api_version}, correlation_id_{correlation_id},\n        tagged_fields_{tagged_fields} {};\n\n  uint32_t computeSize(const EncodingContext& context) const {\n    uint32_t result{0};\n    result += context.computeSize(correlation_id_);\n    if (responseUsesTaggedFieldsInHeader(api_key_, api_version_)) {\n      result += context.computeCompactSize(tagged_fields_);\n    }\n    return result;\n  }\n\n  uint32_t encode(Buffer::Instance& dst, EncodingContext& context) const {\n    uint32_t written{0};\n    // Encode correlation id (api key / version are not present in responses).\n    written += context.encode(correlation_id_, dst);\n    if (responseUsesTaggedFieldsInHeader(api_key_, api_version_)) {\n      written += context.encodeCompact(tagged_fields_, dst);\n    }\n    return written;\n  }\n\n  bool operator==(const ResponseMetadata& rhs) const {\n    return api_key_ == rhs.api_key_ && api_version_ == rhs.api_version_ &&\n           correlation_id_ == rhs.correlation_id_ && tagged_fields_ == rhs.tagged_fields_;\n  };\n\n  const int16_t api_key_;\n  const int16_t api_version_;\n  const int32_t correlation_id_;\n  const TaggedFields tagged_fields_;\n};\n\nusing ResponseMetadataSharedPtr = std::shared_ptr<ResponseMetadata>;\n\n/**\n * Abstract response object, carrying data related to every response.\n * @see http://kafka.apache.org/protocol.html#protocol_messages\n */\nclass AbstractResponse {\npublic:\n  virtual ~AbstractResponse() = default;\n\n  /**\n   * Constructs a request with given metadata.\n   * @param metadata response metadata.\n   */\n  AbstractResponse(const ResponseMetadata& metadata) : metadata_{metadata} {};\n\n  /**\n   * Computes the size of this response, if it were to be serialized.\n   * @return serialized size of response.\n   */\n  virtual uint32_t computeSize() const PURE;\n\n  /**\n   * Encode the contents of this response into a given buffer.\n   * @param dst buffer instance to keep serialized message.\n   */\n  virtual uint32_t encode(Buffer::Instance& dst) const PURE;\n\n  /**\n   * Response's metadata.\n   */\n  const ResponseMetadata metadata_;\n};\n\nusing AbstractResponseSharedPtr = std::shared_ptr<AbstractResponse>;\n\n/**\n * Concrete response that carries data particular to given response type.\n * @param Data concrete response data type.\n */\ntemplate <typename Data> class Response : public AbstractResponse {\npublic:\n  Response(const ResponseMetadata& metadata, const Data& data)\n      : AbstractResponse{metadata}, data_{data} {};\n\n  /**\n   * Compute the size of response, which includes both the response header (correlation id) and\n   * real data.\n   */\n  uint32_t computeSize() const override {\n    const EncodingContext context{metadata_.api_version_};\n    uint32_t result{0};\n    // Compute size of header.\n    result += context.computeSize(metadata_);\n    // Compute size of response data.\n    result += context.computeSize(data_);\n    return result;\n  }\n\n  /**\n   * Encodes given response into a buffer, with any extra configuration carried by the context.\n   */\n  uint32_t encode(Buffer::Instance& dst) const override {\n    EncodingContext context{metadata_.api_version_};\n    uint32_t written{0};\n    // Encode response header.\n    written += context.encode(metadata_, dst);\n    // Encode response-specific data.\n    written += context.encode(data_, dst);\n    return written;\n  }\n\n  bool operator==(const Response<Data>& rhs) const {\n    return metadata_ == rhs.metadata_ && data_ == rhs.data_;\n  };\n\nprivate:\n  const Data data_;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/kafka_response_parser.cc",
    "content": "#include \"extensions/filters/network/kafka/kafka_response_parser.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nconst ResponseParserResolver& ResponseParserResolver::getDefaultInstance() {\n  CONSTRUCT_ON_FIRST_USE(ResponseParserResolver);\n}\n\nResponseParseResponse ResponseHeaderParser::parse(absl::string_view& data) {\n  length_deserializer_.feed(data);\n  if (!length_deserializer_.ready()) {\n    return ResponseParseResponse::stillWaiting();\n  }\n\n  correlation_id_deserializer_.feed(data);\n  if (!correlation_id_deserializer_.ready()) {\n    return ResponseParseResponse::stillWaiting();\n  }\n\n  if (!context_->api_info_set_) {\n    // We have consumed first two response header fields: payload length and correlation id.\n    context_->remaining_response_size_ = length_deserializer_.get();\n    context_->remaining_response_size_ -= sizeof(context_->correlation_id_);\n    context_->correlation_id_ = correlation_id_deserializer_.get();\n\n    // We have correlation id now, so we can see what is the expected response api key & version.\n    const ExpectedResponseSpec spec = getResponseSpec(context_->correlation_id_);\n    context_->api_key_ = spec.first;\n    context_->api_version_ = spec.second;\n\n    // Mark that version data has been set, so we do not attempt to re-initialize again.\n    context_->api_info_set_ = true;\n  }\n\n  // Depending on response's api key & version, we might need to parse tagged fields element.\n  if (responseUsesTaggedFieldsInHeader(context_->api_key_, context_->api_version_)) {\n    context_->remaining_response_size_ -= tagged_fields_deserializer_.feed(data);\n    if (tagged_fields_deserializer_.ready()) {\n      context_->tagged_fields_ = tagged_fields_deserializer_.get();\n    } else {\n      return ResponseParseResponse::stillWaiting();\n    }\n  }\n\n  // At this stage, we have fully setup the context - we know the response's api key & version,\n  // so we can safely create the payload parser.\n  auto next_parser = parser_resolver_.createParser(context_);\n  return ResponseParseResponse::nextParser(next_parser);\n}\n\nExpectedResponseSpec ResponseHeaderParser::getResponseSpec(const int32_t correlation_id) {\n  const auto it = expected_responses_->find(correlation_id);\n  if (it != expected_responses_->end()) {\n    const auto spec = it->second;\n    expected_responses_->erase(it);\n    return spec;\n  } else {\n    // Response data should always be present in expected responses before response is to be parsed.\n    throw EnvoyException(\n        absl::StrCat(\"no response metadata registered for correlation_id \", correlation_id));\n  }\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/kafka_response_parser.h",
    "content": "#pragma once\n\n#include <map>\n#include <memory>\n\n#include \"extensions/filters/network/kafka/kafka_response.h\"\n#include \"extensions/filters/network/kafka/parser.h\"\n#include \"extensions/filters/network/kafka/tagged_fields.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nusing ResponseParseResponse = ParseResponse<AbstractResponseSharedPtr, ResponseMetadataSharedPtr>;\nusing ResponseParser = Parser<AbstractResponseSharedPtr, ResponseMetadataSharedPtr>;\nusing ResponseParserSharedPtr = std::shared_ptr<ResponseParser>;\n\n/**\n * Context that is shared between parsers that are handling the same single message.\n */\nstruct ResponseContext {\n\n  /**\n   * Whether the 'api_key_' & 'api_version_' fields have been initialized.\n   */\n  bool api_info_set_ = false;\n\n  /**\n   * Api key of response that's being parsed.\n   */\n  int16_t api_key_;\n\n  /**\n   * Api version of response that's being parsed.\n   */\n  int16_t api_version_;\n\n  /**\n   * Bytes left to process.\n   */\n  uint32_t remaining_response_size_;\n\n  /**\n   * Response's correlation id.\n   */\n  int32_t correlation_id_;\n\n  /**\n   * Response's tagged fields.\n   */\n  TaggedFields tagged_fields_;\n\n  /**\n   * Bytes left to consume.\n   */\n  uint32_t& remaining() { return remaining_response_size_; }\n\n  /**\n   * Returns data needed for construction of parse failure message.\n   */\n  const ResponseMetadata asFailureData() const {\n    return {api_key_, api_version_, correlation_id_, tagged_fields_};\n  }\n};\n\nusing ResponseContextSharedPtr = std::shared_ptr<ResponseContext>;\n\n// Helper container for response api key & version.\nusing ExpectedResponseSpec = std::pair<int16_t, int16_t>;\n// Response metadata store (maps from correlation id to api key & version).\nusing ExpectedResponses = std::map<int32_t, ExpectedResponseSpec>;\nusing ExpectedResponsesSharedPtr = std::shared_ptr<ExpectedResponses>;\n\n/**\n * Response decoder configuration object.\n * Resolves the parser that will be responsible for consuming the response.\n * In other words: provides (api_key, api_version) -> Parser function.\n */\nclass ResponseParserResolver {\npublic:\n  virtual ~ResponseParserResolver() = default;\n\n  /**\n   * Creates a parser that is going to process data specific for given response.\n   * @param metadata expected response metadata.\n   * @return parser that is capable of processing response.\n   */\n  virtual ResponseParserSharedPtr createParser(ResponseContextSharedPtr metadata) const;\n\n  /**\n   * Return default resolver, that uses response's api key and version to provide a matching parser.\n   */\n  static const ResponseParserResolver& getDefaultInstance();\n};\n\n/**\n * Response parser responsible for consuming response header (payload length and correlation id) and\n * setting up context with this data.\n * @see http://kafka.apache.org/protocol.html#protocol_common\n */\nclass ResponseHeaderParser : public ResponseParser {\npublic:\n  /**\n   * Creates a parser with necessary dependencies (store of expected responses & parser resolver).\n   * @param expected_responses store containing mapping from response correlation id to api key &\n   * version.\n   * @param parser_resolver factory used to create the following payload parser.\n   */\n  ResponseHeaderParser(ExpectedResponsesSharedPtr expected_responses,\n                       const ResponseParserResolver& parser_resolver)\n      : expected_responses_{expected_responses},\n        parser_resolver_{parser_resolver}, context_{std::make_shared<ResponseContext>()} {};\n\n  /**\n   * Consumes 8 bytes (2 x INT32) as response length and correlation id.\n   * Uses correlation id to resolve response's api version & key (throws if not possible).\n   * Updates the context with data resolved, and then creates the following payload parser using the\n   * parser resolver.\n   * @return ResponseParser instance to process the response payload.\n   */\n  ResponseParseResponse parse(absl::string_view& data) override;\n\n  const ResponseContextSharedPtr contextForTest() const { return context_; }\n\nprivate:\n  ExpectedResponseSpec getResponseSpec(int32_t correlation_id);\n\n  const ExpectedResponsesSharedPtr expected_responses_;\n  const ResponseParserResolver& parser_resolver_;\n  const ResponseContextSharedPtr context_;\n\n  Int32Deserializer length_deserializer_;\n  Int32Deserializer correlation_id_deserializer_;\n  TaggedFieldsDeserializer tagged_fields_deserializer_;\n};\n\n/**\n * Sentinel parser that is responsible for consuming message bytes for messages that had unsupported\n * api_key & api_version. It does not attempt to capture any data, just throws it away until end of\n * message.\n */\nclass SentinelResponseParser\n    : public AbstractSentinelParser<ResponseContextSharedPtr, ResponseParseResponse>,\n      public ResponseParser {\npublic:\n  SentinelResponseParser(ResponseContextSharedPtr context) : AbstractSentinelParser{context} {};\n\n  ResponseParseResponse parse(absl::string_view& data) override {\n    return AbstractSentinelParser::parse(data);\n  }\n};\n\n/**\n * Response parser uses a single deserializer to construct a response object.\n * This parser is responsible for consuming response-specific data (e.g. topic names) and always\n * returns a parsed message.\n * @param ResponseType response class.\n * @param DeserializerType deserializer type corresponding to response class (should be subclass of\n * Deserializer<ResponseType>).\n */\ntemplate <typename ResponseType, typename DeserializerType>\nclass ResponseDataParser : public ResponseParser {\npublic:\n  /**\n   * Create a parser for given response metadata.\n   * @param metadata expected message metadata.\n   */\n  ResponseDataParser(ResponseContextSharedPtr context) : context_{context} {};\n\n  /**\n   * Consume enough data to fill in deserializer and receive the parsed response.\n   * Fill in response's header with data stored in context.\n   * @param data data to process.\n   */\n  ResponseParseResponse parse(absl::string_view& data) override {\n    context_->remaining_response_size_ -= deserializer_.feed(data);\n\n    if (deserializer_.ready()) {\n      if (0 == context_->remaining_response_size_) {\n        // After a successful parse, there should be nothing left - we have consumed all the bytes.\n        const ResponseMetadata metadata = {context_->api_key_, context_->api_version_,\n                                           context_->correlation_id_, context_->tagged_fields_};\n        const AbstractResponseSharedPtr response =\n            std::make_shared<Response<ResponseType>>(metadata, deserializer_.get());\n        return ResponseParseResponse::parsedMessage(response);\n      } else {\n        // The message makes no sense, the deserializer that matches the schema consumed all\n        // necessary data, but there are still bytes in this message.\n        return ResponseParseResponse::nextParser(\n            std::make_shared<SentinelResponseParser>(context_));\n      }\n    } else {\n      return ResponseParseResponse::stillWaiting();\n    }\n  }\n\n  const ResponseContextSharedPtr contextForTest() const { return context_; }\n\nprivate:\n  ResponseContextSharedPtr context_;\n  DeserializerType deserializer_; // Underlying response-specific deserializer.\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/kafka_types.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Nullable string used by Kafka.\n */\nusing NullableString = absl::optional<std::string>;\n\n/**\n * Bytes array used by Kafka.\n */\nusing Bytes = std::vector<unsigned char>;\n\n/**\n * Nullable bytes array used by Kafka.\n */\nusing NullableBytes = absl::optional<Bytes>;\n\n/**\n * Kafka array of elements of type T.\n */\ntemplate <typename T> using NullableArray = absl::optional<std::vector<T>>;\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/parser.h",
    "content": "#pragma once\n\n#include <sstream>\n\n#include \"common/common/logger.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\ntemplate <typename MessageType, typename FailureDataType> class ParseResponse;\n\n/**\n * Parser is responsible for consuming data relevant to some part of a message, and then returning\n * the decision how the parsing should continue.\n */\ntemplate <typename MessageType, typename FailureDataType>\nclass Parser : public Logger::Loggable<Logger::Id::kafka> {\npublic:\n  virtual ~Parser() = default;\n\n  /**\n   * Submit data to be processed by parser, will consume as much data as it is necessary to reach\n   * the conclusion what should be the next parse step.\n   * @param data bytes to be processed, will be updated by parser if any have been consumed.\n   * @return parse status - decision what should be done with current parser (keep/replace).\n   */\n  virtual ParseResponse<MessageType, FailureDataType> parse(absl::string_view& data) PURE;\n};\n\ntemplate <typename MessageType, typename FailureDataType>\nusing ParserSharedPtr = std::shared_ptr<Parser<MessageType, FailureDataType>>;\n\n/**\n * Three-state holder representing one of:\n * - parser still needs data (`stillWaiting`),\n * - parser is finished, and following parser should be used to process the rest of data\n * (`nextParser`),\n * - parser is finished, and parse result is attached (`parsedMessage` or `parseFailure`).\n */\ntemplate <typename MessageType, typename FailureDataType> class ParseResponse {\npublic:\n  using FailureType = FailureDataType;\n\n  /**\n   * Constructs a response that states that parser still needs data and should not be replaced.\n   */\n  static ParseResponse stillWaiting() { return {nullptr, nullptr, nullptr}; }\n\n  /**\n   * Constructs a response that states that parser is finished and should be replaced by given\n   * parser.\n   */\n  static ParseResponse nextParser(ParserSharedPtr<MessageType, FailureDataType> next_parser) {\n    return {next_parser, nullptr, nullptr};\n  };\n\n  /**\n   * Constructs a response that states that parser is finished, the message is ready, and parsing\n   * can start anew for next message.\n   */\n  static ParseResponse parsedMessage(MessageType message) { return {nullptr, message, nullptr}; };\n\n  /**\n   * Constructs a response that states that parser is finished, the message could not be parsed\n   * properly, and parsing can start anew for next message.\n   */\n  static ParseResponse parseFailure(FailureDataType failure_data) {\n    return {nullptr, nullptr, failure_data};\n  };\n\n  /**\n   * If response contains a next parser or a parse result.\n   */\n  bool hasData() const {\n    return (next_parser_ != nullptr) || (message_ != nullptr) || (failure_data_ != nullptr);\n  }\n\nprivate:\n  ParseResponse(ParserSharedPtr<MessageType, FailureDataType> parser, MessageType message,\n                FailureDataType failure_data)\n      : next_parser_{parser}, message_{message}, failure_data_{failure_data} {};\n\npublic:\n  ParserSharedPtr<MessageType, FailureDataType> next_parser_;\n  MessageType message_;\n  FailureDataType failure_data_;\n};\n\ntemplate <typename ContextType, typename ResponseType> class AbstractSentinelParser {\npublic:\n  AbstractSentinelParser(ContextType context) : context_{context} {};\n\n  ResponseType parse(absl::string_view& data) {\n    const uint32_t min = std::min<uint32_t>(context_->remaining(), data.size());\n    data = {data.data() + min, data.size() - min};\n    context_->remaining() -= min;\n    if (0 == context_->remaining()) {\n      using FailureType = typename ResponseType::FailureType::element_type;\n      auto failure_data = std::make_shared<FailureType>(context_->asFailureData());\n      return ResponseType::parseFailure(failure_data);\n    } else {\n      return ResponseType::stillWaiting();\n    }\n  }\n\n  const ContextType contextForTest() const { return context_; }\n\nprivate:\n  ContextType context_;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/complex_type_template.j2",
    "content": "{#\n  Template for structure representing a composite entity in Kafka protocol (request or response).\n  Rendered templates for each structure in Kafka protocol will be put into 'requests.h'\n  or 'responses.h'.\n\n  Each structure is capable of holding all versions of given entity (what means its fields are\n  actually a superset of union of all versions' fields). Each version has a dedicated deserializer\n  (named \"${name}V${version}Deserializer\" e.g. ProduceRequestV0Deserializer or\n  FetchResponseV1Deserializer), which calls the matching constructor.\n\n  To serialize, it is necessary to pass the encoding context (that contains the version that's\n  being serialized). Depending on the version, the fields will be written to the buffer.\n#}\nstruct {{ complex_type.name }} {\n\n  {#\n     Constructors invoked by deserializers.\n     Each constructor has a signature that matches the fields in at least one version (as sometimes\n     there are different Kafka versions that are actually composed of precisely the same fields).\n  #}\n  {% for field in complex_type.fields %}\n  const {{ field.field_declaration() }}_;{% endfor %}\n  {% for constructor in complex_type.compute_constructors() %}\n  // constructor used in versions: {{ constructor['versions'] }}\n  {{ constructor['full_declaration'] }}{% endfor %}\n\n  {# For every field that's used in version, just compute its size using an encoder. #}\n  uint32_t computeSize(const EncodingContext& encoder) const {\n    const int16_t api_version = encoder.apiVersion();\n    uint32_t written{0};\n\n    {% for spec in complex_type.compute_serialization_specs() %}\n    if (api_version >= {{ spec.versions[0] }} && api_version < {{ spec.versions[-1] + 1 }}) {\n      written += encoder.{{ spec.compute_size_method_name }}({{ spec.field.name }}_);\n    }\n    {% endfor %}\n\n    return written;\n  }\n\n  uint32_t computeCompactSize(const EncodingContext& encoder) const {\n    return computeSize(encoder);\n  }\n\n  {# For every field that's used in version, just serialize it. #}\n  uint32_t encode(Buffer::Instance& dst, EncodingContext& encoder) const {\n    const int16_t api_version = encoder.apiVersion();\n    uint32_t written{0};\n\n    {% for spec in complex_type.compute_serialization_specs() %}\n    if (api_version >= {{ spec.versions[0] }} && api_version < {{ spec.versions[-1] + 1 }}) {\n      written += encoder.{{ spec.encode_method_name }}({{ spec.field.name }}_, dst);\n    }\n    {% endfor %}\n\n    return written;\n  }\n\n  uint32_t encodeCompact(Buffer::Instance& dst, EncodingContext& encoder) const {\n    return encode(dst, encoder);\n  }\n\n  {% if complex_type.fields|length > 0 %}\n  bool operator==(const {{ complex_type.name }}& rhs) const {\n  {% else %}\n  bool operator==(const {{ complex_type.name }}&) const {\n  {% endif %}\n    return true{% for field in complex_type.fields %}\n    && {{ field.name }}_ == rhs.{{ field.name }}_{% endfor %};\n  };\n\n};\n\n{#\n  Each structure version has a deserializer that matches the structure's field list.\n#}\n{% for field_list in complex_type.compute_field_lists() %}\nclass {{ complex_type.name }}V{{ field_list.version }}Deserializer:\n  public CompositeDeserializerWith{{ field_list.field_count() }}Delegates<\n    {{ complex_type.name }}\n    {% for field in field_list.used_fields() %},\n      {{ field.deserializer_name_in_version(field_list.version, field_list.uses_compact_fields) }}\n    {% endfor %}>{};\n{% endfor %}\n\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/generator.py",
    "content": "#!/usr/bin/python\n\n# Main library file containing all the protocol generation logic.\n\n\ndef generate_main_code(type, main_header_file, resolver_cc_file, metrics_header_file, input_files):\n  \"\"\"\n  Main code generator.\n\n  Takes input files and processes them into structures representing a Kafka message (request or\n  response).\n\n  These responses are then used to create:\n  - main_header_file - contains definitions of Kafka structures and their deserializers\n  - resolver_cc_file - contains request api key & version mapping to deserializer (from header file)\n  - metrics_header_file - contains metrics with names corresponding to messages\n  \"\"\"\n  processor = StatefulProcessor()\n  # Parse provided input files.\n  messages = processor.parse_messages(input_files)\n\n  complex_type_template = RenderingHelper.get_template('complex_type_template.j2')\n  parsers_template = RenderingHelper.get_template(\"%s_parser.j2\" % type)\n\n  main_header_contents = ''\n\n  for message in messages:\n    # For each child structure that is used by request/response, render its matching C++ code.\n    dependencies = message.compute_declaration_chain()\n    for dependency in dependencies:\n      main_header_contents += complex_type_template.render(complex_type=dependency)\n    # Each top-level structure (e.g. FetchRequest/FetchResponse) needs corresponding parsers.\n    main_header_contents += parsers_template.render(complex_type=message)\n\n  # Full file with headers, namespace declaration etc.\n  template = RenderingHelper.get_template(\"%ss_h.j2\" % type)\n  contents = template.render(contents=main_header_contents)\n\n  # Generate main header file.\n  with open(main_header_file, 'w') as fd:\n    fd.write(contents)\n\n  # Generate ...resolver.cc file.\n  template = RenderingHelper.get_template(\"kafka_%s_resolver_cc.j2\" % type)\n  contents = template.render(message_types=messages)\n  with open(resolver_cc_file, 'w') as fd:\n    fd.write(contents)\n\n  # Generate ...metrics.h file.\n  template = RenderingHelper.get_template(\"%s_metrics_h.j2\" % type)\n  contents = template.render(message_types=messages)\n  with open(metrics_header_file, 'w') as fd:\n    fd.write(contents)\n\n\ndef generate_test_code(type, header_test_cc_file, codec_test_cc_file, utilities_cc_file,\n                       input_files):\n  \"\"\"\n  Test code generator.\n\n  Takes input files and processes them into structures representing a Kafka message (request or\n  response).\n\n  These responses are then used to create:\n  - header_test_cc_file - tests for basic message serialization deserialization,\n  - codec_test_cc_file - tests involving codec and Request/ResponseParserResolver,\n  - utilities_cc_file - utilities for creating sample messages.\n  \"\"\"\n  processor = StatefulProcessor()\n  # Parse provided input files.\n  messages = processor.parse_messages(input_files)\n\n  # Generate header-test file.\n  template = RenderingHelper.get_template(\"%ss_test_cc.j2\" % type)\n  contents = template.render(message_types=messages)\n  with open(header_test_cc_file, 'w') as fd:\n    fd.write(contents)\n\n  # Generate codec-test file.\n  template = RenderingHelper.get_template(\"%s_codec_%s_test_cc.j2\" % (type, type))\n  contents = template.render(message_types=messages)\n  with open(codec_test_cc_file, 'w') as fd:\n    fd.write(contents)\n\n  # Generate utilities file.\n  template = RenderingHelper.get_template(\"%s_utilities_cc.j2\" % type)\n  contents = template.render(message_types=messages)\n  with open(utilities_cc_file, 'w') as fd:\n    fd.write(contents)\n\n\nclass StatefulProcessor:\n  \"\"\"\n  Helper entity that keeps state during the processing.\n  Some state needs to be shared across multiple message types, as we need to handle identical\n  sub-type names (e.g. both AlterConfigsRequest & IncrementalAlterConfigsRequest have child\n  AlterConfigsResource, what would cause a compile-time error if we were to handle it trivially).\n  \"\"\"\n\n  def __init__(self):\n    # Complex types that have been encountered during processing.\n    self.known_types = set()\n    # Name of parent message type that's being processed right now.\n    self.currently_processed_message_type = None\n    # Common structs declared in this message type.\n    self.common_structs = {}\n\n  def parse_messages(self, input_files):\n    \"\"\"\n    Parse request/response structures from provided input files.\n    \"\"\"\n    import re\n    import json\n\n    messages = []\n    # Sort the input files, as the processing is stateful, as we want the same order every time.\n    input_files.sort()\n    # For each specification file, remove comments, and parse the remains.\n    for input_file in input_files:\n      try:\n        with open(input_file, 'r') as fd:\n          raw_contents = fd.read()\n          without_comments = re.sub(r'\\s*//.*\\n', '\\n', raw_contents)\n          without_empty_newlines = re.sub(r'^\\s*$', '', without_comments, flags=re.MULTILINE)\n          message_spec = json.loads(without_empty_newlines)\n          message = self.parse_top_level_element(message_spec)\n          messages.append(message)\n      except Exception as e:\n        print('could not process %s' % input_file)\n        raise\n\n    # Sort messages by api_key.\n    messages.sort(key=lambda x: x.get_extra('api_key'))\n    return messages\n\n  def parse_top_level_element(self, spec):\n    \"\"\"\n    Parse a given structure into a request/response.\n    Request/response is just a complex type, that has name & version information kept in differently\n    named fields, compared to sub-structures in a message.\n    \"\"\"\n    self.currently_processed_message_type = spec['name']\n\n    # Figure out all versions of this message type.\n    versions = Statics.parse_version_string(spec['validVersions'], 2 << 16 - 1)\n\n    # Figure out the flexible versions.\n    flexible_versions_string = spec.get('flexibleVersions', 'none')\n    if 'none' != flexible_versions_string:\n      flexible_versions = Statics.parse_version_string(flexible_versions_string, versions[-1])\n    else:\n      flexible_versions = []\n\n    # Sanity check - all flexible versions need to be versioned.\n    if [x for x in flexible_versions if x not in versions]:\n      raise ValueError('invalid flexible versions')\n\n    try:\n      # In 2.4 some types are declared at top level, and only referenced inside.\n      # So let's parse them and store them in state.\n      common_structs = spec.get('commonStructs')\n      if common_structs is not None:\n        for common_struct in common_structs:\n          common_struct_name = common_struct['name']\n          common_struct_versions = Statics.parse_version_string(common_struct['versions'],\n                                                                versions[-1])\n          parsed_complex = self.parse_complex_type(common_struct_name, common_struct,\n                                                   common_struct_versions)\n          self.common_structs[parsed_complex.name] = parsed_complex\n\n      # Parse the type itself.\n      complex_type = self.parse_complex_type(self.currently_processed_message_type, spec, versions)\n      complex_type.register_flexible_versions(flexible_versions)\n\n      # Request / response types need to carry api key version.\n      result = complex_type.with_extra('api_key', spec['apiKey'])\n      return result\n\n    finally:\n      self.common_structs = {}\n      self.currently_processed_message_type = None\n\n  def parse_complex_type(self, type_name, field_spec, versions):\n    \"\"\"\n    Parse given complex type, returning a structure that holds its name, field specification and\n    allowed versions.\n    \"\"\"\n    fields_el = field_spec.get('fields')\n\n    if fields_el is not None:\n      fields = []\n      for child_field in field_spec['fields']:\n        child = self.parse_field(child_field, versions[-1])\n        if child is not None:\n          fields.append(child)\n\n      # Some of the types repeat multiple times (e.g. AlterableConfig).\n      # In such a case, every second or later occurrence of the same name is going to be prefixed\n      # with parent type, e.g. we have AlterableConfig (for AlterConfigsRequest) and then\n      # IncrementalAlterConfigsRequestAlterableConfig (for IncrementalAlterConfigsRequest).\n      # This keeps names unique, while keeping non-duplicate ones short.\n      if type_name not in self.known_types:\n        self.known_types.add(type_name)\n      else:\n        type_name = self.currently_processed_message_type + type_name\n        self.known_types.add(type_name)\n\n      return Complex(type_name, fields, versions)\n\n    else:\n      return self.common_structs[type_name]\n\n  def parse_field(self, field_spec, highest_possible_version):\n    \"\"\"\n    Parse given field, returning a structure holding the name, type, and versions when this field is\n    actually used (nullable or not). Obviously, field cannot be used in version higher than its\n    type's usage.\n    \"\"\"\n    if field_spec.get('tag') is not None:\n      return None\n\n    version_usage = Statics.parse_version_string(field_spec['versions'], highest_possible_version)\n    version_usage_as_nullable = Statics.parse_version_string(\n        field_spec['nullableVersions'],\n        highest_possible_version) if 'nullableVersions' in field_spec else range(-1)\n    parsed_type = self.parse_type(field_spec['type'], field_spec, highest_possible_version)\n    return FieldSpec(field_spec['name'], parsed_type, version_usage, version_usage_as_nullable)\n\n  def parse_type(self, type_name, field_spec, highest_possible_version):\n    \"\"\"\n    Parse a given type element - returns an array type, primitive (e.g. uint32_t) or complex one.\n    \"\"\"\n    if (type_name.startswith('[]')):\n      # In spec files, array types are defined as `[]underlying_type` instead of having its own\n      # element with type inside.\n      underlying_type = self.parse_type(type_name[2:], field_spec, highest_possible_version)\n      return Array(underlying_type)\n    else:\n      if (type_name in Primitive.USABLE_PRIMITIVE_TYPE_NAMES):\n        return Primitive(type_name, field_spec.get('default'))\n      else:\n        versions = Statics.parse_version_string(field_spec['versions'], highest_possible_version)\n        return self.parse_complex_type(type_name, field_spec, versions)\n\n\nclass Statics:\n\n  @staticmethod\n  def parse_version_string(raw_versions, highest_possible_version):\n    \"\"\"\n    Return integer range that corresponds to version string in spec file.\n    \"\"\"\n    if raw_versions.endswith('+'):\n      return range(int(raw_versions[:-1]), highest_possible_version + 1)\n    else:\n      if '-' in raw_versions:\n        tokens = raw_versions.split('-', 1)\n        return range(int(tokens[0]), int(tokens[1]) + 1)\n      else:\n        single_version = int(raw_versions)\n        return range(single_version, single_version + 1)\n\n\nclass FieldList:\n  \"\"\"\n  List of fields used by given entity (request or child structure) in given message version\n  (as fields get added or removed across versions and/or they change compaction level).\n  \"\"\"\n\n  def __init__(self, version, uses_compact_fields, fields):\n    self.version = version\n    self.uses_compact_fields = uses_compact_fields\n    self.fields = fields\n\n  def used_fields(self):\n    \"\"\"\n    Return list of fields that are actually used in this version of structure.\n    \"\"\"\n    return filter(lambda x: x.used_in_version(self.version), self.fields)\n\n  def constructor_signature(self):\n    \"\"\"\n    Return constructor signature.\n    Multiple versions of the same structure can have identical signatures (due to version bumps in\n    Kafka).\n    \"\"\"\n    parameter_spec = map(lambda x: x.parameter_declaration(self.version), self.used_fields())\n    return ', '.join(parameter_spec)\n\n  def constructor_init_list(self):\n    \"\"\"\n    Renders member initialization list in constructor.\n    Takes care of potential optional<T> conversions (as field could be T in V1, but optional<T>\n    in V2).\n    \"\"\"\n    init_list = []\n    for field in self.fields:\n      if field.used_in_version(self.version):\n        if field.is_nullable():\n          if field.is_nullable_in_version(self.version):\n            # Field is optional<T>, and the parameter is optional<T> in this version.\n            init_list_item = '%s_{%s}' % (field.name, field.name)\n            init_list.append(init_list_item)\n          else:\n            # Field is optional<T>, and the parameter is T in this version.\n            init_list_item = '%s_{absl::make_optional(%s)}' % (field.name, field.name)\n            init_list.append(init_list_item)\n        else:\n          # Field is T, so parameter cannot be optional<T>.\n          init_list_item = '%s_{%s}' % (field.name, field.name)\n          init_list.append(init_list_item)\n      else:\n        # Field is not used in this version, so we need to put in default value.\n        init_list_item = '%s_{%s}' % (field.name, field.default_value())\n        init_list.append(init_list_item)\n      pass\n    return ', '.join(init_list)\n\n  def field_count(self):\n    return len(list(self.used_fields()))\n\n  def example_value(self):\n    return ', '.join(map(lambda x: x.example_value_for_test(self.version), self.used_fields()))\n\n\nclass FieldSpec:\n  \"\"\"\n  Represents a field present in a structure (request, or child structure thereof).\n  Contains name, type, and versions when it is used (nullable or not).\n  \"\"\"\n\n  def __init__(self, name, type, version_usage, version_usage_as_nullable):\n    import re\n    separated = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n    self.name = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', separated).lower()\n    self.type = type\n    self.version_usage = version_usage\n    self.version_usage_as_nullable = version_usage_as_nullable\n\n  def is_nullable(self):\n    return len(self.version_usage_as_nullable) > 0\n\n  def is_nullable_in_version(self, version):\n    \"\"\"\n    Whether the field is nullable in given version.\n    Fields can be non-nullable in earlier versions.\n    See https://github.com/apache/kafka/tree/2.2.0-rc0/clients/src/main/resources/common/message#nullable-fields\n    \"\"\"\n    return version in self.version_usage_as_nullable\n\n  def used_in_version(self, version):\n    return version in self.version_usage\n\n  def field_declaration(self):\n    if self.is_nullable():\n      return 'absl::optional<%s> %s' % (self.type.name, self.name)\n    else:\n      return '%s %s' % (self.type.name, self.name)\n\n  def parameter_declaration(self, version):\n    if self.is_nullable_in_version(version):\n      return 'absl::optional<%s> %s' % (self.type.name, self.name)\n    else:\n      return '%s %s' % (self.type.name, self.name)\n\n  def default_value(self):\n    if self.is_nullable():\n      type_default_value = self.type.default_value()\n      # For nullable fields, it's possible to have (Java) null as default value.\n      if type_default_value != 'null':\n        return '{%s}' % type_default_value\n      else:\n        return 'absl::nullopt'\n    else:\n      return str(self.type.default_value())\n\n  def example_value_for_test(self, version):\n    if self.is_nullable():\n      return 'absl::make_optional<%s>(%s)' % (self.type.name,\n                                              self.type.example_value_for_test(version))\n    else:\n      return str(self.type.example_value_for_test(version))\n\n  def deserializer_name_in_version(self, version, compact):\n    if self.is_nullable_in_version(version):\n      return 'Nullable%s' % self.type.deserializer_name_in_version(version, compact)\n    else:\n      return self.type.deserializer_name_in_version(version, compact)\n\n  def is_printable(self):\n    return self.type.is_printable()\n\n\nclass TypeSpecification:\n\n  def compute_declaration_chain(self):\n    \"\"\"\n    Computes types that need to be declared before this type can be declared, in C++ sense.\n    \"\"\"\n    raise NotImplementedError()\n\n  def deserializer_name_in_version(self, version, compact):\n    \"\"\"\n    Renders the deserializer name of given type, in message with given version.\n    \"\"\"\n    raise NotImplementedError()\n\n  def default_value(self):\n    \"\"\"\n    Returns a default value for given type.\n    \"\"\"\n    raise NotImplementedError()\n\n  def has_flexible_handling(self):\n    \"\"\"\n    Whether the given type has special encoding when carrying message is using flexible encoding.\n    \"\"\"\n    raise NotImplementedError()\n\n  def example_value_for_test(self, version):\n    raise NotImplementedError()\n\n  def is_printable(self):\n    raise NotImplementedError()\n\n\nclass Array(TypeSpecification):\n  \"\"\"\n  Represents array complex type.\n  To use instance of this type, it is necessary to declare structures required by self.underlying\n  (e.g. to use Array<Foo>, we need to have `struct Foo {...}`).\n  \"\"\"\n\n  def __init__(self, underlying):\n    self.underlying = underlying\n\n  @property\n  def name(self):\n    return 'std::vector<%s>' % self.underlying.name\n\n  def compute_declaration_chain(self):\n    # To use an array of type T, we just need to be capable of using type T.\n    return self.underlying.compute_declaration_chain()\n\n  def deserializer_name_in_version(self, version, compact):\n    # For arrays, deserializer name is (Compact)(Nullable)ArrayDeserializer<ElementDeserializer>.\n    element_deserializer_name = self.underlying.deserializer_name_in_version(version, compact)\n    return '%sArrayDeserializer<%s>' % (\"Compact\" if compact else \"\", element_deserializer_name)\n\n  def default_value(self):\n    return 'std::vector<%s>{}' % (self.underlying.name)\n\n  def has_flexible_handling(self):\n    return True\n\n  def example_value_for_test(self, version):\n    return 'std::vector<%s>{ %s }' % (self.underlying.name,\n                                      self.underlying.example_value_for_test(version))\n\n  def is_printable(self):\n    return self.underlying.is_printable()\n\n\nclass Primitive(TypeSpecification):\n  \"\"\"\n  Represents a Kafka primitive value.\n  \"\"\"\n\n  USABLE_PRIMITIVE_TYPE_NAMES = ['bool', 'int8', 'int16', 'int32', 'int64', 'string', 'bytes']\n\n  KAFKA_TYPE_TO_ENVOY_TYPE = {\n      'string': 'std::string',\n      'bool': 'bool',\n      'int8': 'int8_t',\n      'int16': 'int16_t',\n      'int32': 'int32_t',\n      'int64': 'int64_t',\n      'bytes': 'Bytes',\n      'tagged_fields': 'TaggedFields',\n  }\n\n  KAFKA_TYPE_TO_DESERIALIZER = {\n      'string': 'StringDeserializer',\n      'bool': 'BooleanDeserializer',\n      'int8': 'Int8Deserializer',\n      'int16': 'Int16Deserializer',\n      'int32': 'Int32Deserializer',\n      'int64': 'Int64Deserializer',\n      'bytes': 'BytesDeserializer',\n      'tagged_fields': 'TaggedFieldsDeserializer',\n  }\n\n  KAFKA_TYPE_TO_COMPACT_DESERIALIZER = {\n      'string': 'CompactStringDeserializer',\n      'bytes': 'CompactBytesDeserializer'\n  }\n\n  # See https://github.com/apache/kafka/tree/trunk/clients/src/main/resources/common/message#deserializing-messages\n  KAFKA_TYPE_TO_DEFAULT_VALUE = {\n      'string': '\"\"',\n      'bool': 'false',\n      'int8': '0',\n      'int16': '0',\n      'int32': '0',\n      'int64': '0',\n      'bytes': '{}',\n      'tagged_fields': 'TaggedFields({})',\n  }\n\n  # Custom values that make test code more readable.\n  KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST = {\n      'string':\n          '\"string\"',\n      'bool':\n          'false',\n      'int8':\n          'static_cast<int8_t>(8)',\n      'int16':\n          'static_cast<int16_t>(16)',\n      'int32':\n          'static_cast<int32_t>(32)',\n      'int64':\n          'static_cast<int64_t>(64)',\n      'bytes':\n          'Bytes({0, 1, 2, 3})',\n      'tagged_fields':\n          'TaggedFields{std::vector<TaggedField>{{10, Bytes({1, 2, 3})}, {20, Bytes({4, 5, 6})}}}',\n  }\n\n  def __init__(self, name, custom_default_value):\n    self.original_name = name\n    self.name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_ENVOY_TYPE)\n    self.custom_default_value = custom_default_value\n\n  @staticmethod\n  def compute(name, map):\n    if name in map:\n      return map[name]\n    else:\n      raise ValueError(name)\n\n  def compute_declaration_chain(self):\n    # Primitives need no declarations.\n    return []\n\n  def deserializer_name_in_version(self, version, compact):\n    if compact and self.original_name in Primitive.KAFKA_TYPE_TO_COMPACT_DESERIALIZER.keys():\n      return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_COMPACT_DESERIALIZER)\n    else:\n      return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DESERIALIZER)\n\n  def default_value(self):\n    if self.custom_default_value is not None:\n      return self.custom_default_value\n    else:\n      return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DEFAULT_VALUE)\n\n  def has_flexible_handling(self):\n    return self.original_name in ['string', 'bytes', 'tagged_fields']\n\n  def example_value_for_test(self, version):\n    return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST)\n\n  def is_printable(self):\n    return self.name not in ['Bytes']\n\n\nclass FieldSerializationSpec():\n\n  def __init__(self, field, versions, compute_size_method_name, encode_method_name):\n    self.field = field\n    self.versions = versions\n    self.compute_size_method_name = compute_size_method_name\n    self.encode_method_name = encode_method_name\n\n\nclass Complex(TypeSpecification):\n  \"\"\"\n  Represents a complex type (multiple types aggregated into one).\n  This type gets mapped to a C++ struct.\n  \"\"\"\n\n  def __init__(self, name, fields, versions):\n    self.name = name\n    self.fields = fields\n    self.versions = versions\n    self.flexible_versions = None  # Will be set in 'register_flexible_versions'.\n    self.attributes = {}\n\n  def register_flexible_versions(self, flexible_versions):\n    # If flexible versions are present, so we need to add placeholder 'tagged_fields' field to\n    # *every* type that's used in by this message type.\n    for type in self.compute_declaration_chain():\n      type.flexible_versions = flexible_versions\n      if len(flexible_versions) > 0:\n        tagged_fields_field = FieldSpec('tagged_fields', Primitive('tagged_fields', None),\n                                        flexible_versions, [])\n        type.fields.append(tagged_fields_field)\n\n  def compute_declaration_chain(self):\n    \"\"\"\n    Computes all dependencies, what means all non-primitive types used by this type.\n    They need to be declared before this struct is declared.\n    \"\"\"\n    result = []\n    for field in self.fields:\n      field_dependencies = field.type.compute_declaration_chain()\n      for field_dependency in field_dependencies:\n        if field_dependency not in result:\n          result.append(field_dependency)\n    result.append(self)\n    return result\n\n  def with_extra(self, key, value):\n    self.attributes[key] = value\n    return self\n\n  def get_extra(self, key):\n    return self.attributes[key]\n\n  def compute_constructors(self):\n    \"\"\"\n    Field lists for different versions may not differ (as Kafka can bump version without any\n    changes). But constructors need to be unique, so we need to remove duplicates if the signatures\n    match.\n    \"\"\"\n    signature_to_constructor = {}\n    for field_list in self.compute_field_lists():\n      signature = field_list.constructor_signature()\n      constructor = signature_to_constructor.get(signature)\n      if constructor is None:\n        entry = {}\n        entry['versions'] = [field_list.version]\n        entry['signature'] = signature\n        if (len(signature) > 0):\n          entry['full_declaration'] = '%s(%s): %s {};' % (self.name, signature,\n                                                          field_list.constructor_init_list())\n        else:\n          entry['full_declaration'] = '%s() {};' % self.name\n        signature_to_constructor[signature] = entry\n      else:\n        constructor['versions'].append(field_list.version)\n    return sorted(signature_to_constructor.values(), key=lambda x: x['versions'][0])\n\n  def compute_field_lists(self):\n    \"\"\"\n    Return field lists representing each of structure versions.\n    \"\"\"\n    field_lists = []\n    for version in self.versions:\n      field_list = FieldList(version, version in self.flexible_versions, self.fields)\n      field_lists.append(field_list)\n    return field_lists\n\n  def compute_serialization_specs(self):\n    result = []\n    for field in self.fields:\n      if field.type.has_flexible_handling():\n        flexible = [x for x in field.version_usage if x in self.flexible_versions]\n        non_flexible = [x for x in field.version_usage if x not in flexible]\n        if non_flexible:\n          result.append(FieldSerializationSpec(field, non_flexible, 'computeSize', 'encode'))\n        if flexible:\n          result.append(\n              FieldSerializationSpec(field, flexible, 'computeCompactSize', 'encodeCompact'))\n      else:\n        result.append(FieldSerializationSpec(field, field.version_usage, 'computeSize', 'encode'))\n    return result\n\n  def deserializer_name_in_version(self, version, compact):\n    return '%sV%dDeserializer' % (self.name, version)\n\n  def name_in_c_case(self):\n    import re\n    s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', self.name)\n    return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n  def default_value(self):\n    raise NotImplementedError('unable to create default value of complex type')\n\n  def has_flexible_handling(self):\n    return False\n\n  def example_value_for_test(self, version):\n    field_list = next(fl for fl in self.compute_field_lists() if fl.version == version)\n    example_values = map(lambda x: x.example_value_for_test(version), field_list.used_fields())\n    return '%s(%s)' % (self.name, ', '.join(example_values))\n\n  def is_printable(self):\n    return True\n\n\nclass RenderingHelper:\n  \"\"\"\n  Helper for jinja templates.\n  \"\"\"\n\n  @staticmethod\n  def get_template(template):\n    import jinja2\n    import os\n    import sys\n    # Templates are resolved relatively to main start script, due to main & test templates being\n    # stored in different directories.\n    env = jinja2.Environment(loader=jinja2.FileSystemLoader(\n        searchpath=os.path.dirname(os.path.abspath(sys.argv[0]))))\n    return env.get_template(template)\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/kafka_request_resolver_cc.j2",
    "content": "{#\n  Template for 'kafka_request_resolver.cc'.\n  Defines default Kafka request resolver, that uses request parsers in (also generated)\n  'requests.h'.\n#}\n#include \"extensions/filters/network/kafka/external/requests.h\"\n#include \"extensions/filters/network/kafka/kafka_request_parser.h\"\n#include \"extensions/filters/network/kafka/parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n// Implements declaration from 'kafka_request.h'.\nbool requestUsesTaggedFieldsInHeader(const uint16_t api_key, const uint16_t api_version) {\n  switch (api_key) {\n    {% for message_type in message_types %}\n    case {{ message_type.get_extra('api_key') }}:\n      switch (api_version) {\n        {% for flexible_version in message_type.flexible_versions %}\n        case {{ flexible_version }}:\n          return true;\n        {% endfor %}\n        default:\n          return false;\n      }\n    {% endfor %}\n    default:\n      return false;\n  }\n}\n\n/**\n * Creates a parser that corresponds to provided key and version.\n * If corresponding parser cannot be found (what means a newer version of Kafka protocol),\n * a sentinel parser is returned.\n * @param api_key Kafka request key\n * @param api_version Kafka request's version\n * @param context parse context\n */\nRequestParserSharedPtr RequestParserResolver::createParser(int16_t api_key, int16_t api_version,\n                                                           RequestContextSharedPtr context) const {\n\n{% for message_type in message_types %}{% for field_list in message_type.compute_field_lists() %}\n  if ({{ message_type.get_extra('api_key') }} == api_key\n    && {{ field_list.version }} == api_version) {\n    return std::make_shared<{{ message_type.name }}V{{ field_list.version }}Parser>(context);\n  }{% endfor %}{% endfor %}\n  return std::make_shared<SentinelParser>(context);\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/kafka_response_resolver_cc.j2",
    "content": "{#\n  Template for 'kafka_response_resolver.cc'.\n  Defines default Kafka response resolver, that uses response parsers in (also generated)\n  'responses.h'.\n#}\n#include \"extensions/filters/network/kafka/external/responses.h\"\n#include \"extensions/filters/network/kafka/kafka_response_parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n// Implements declaration from 'kafka_response.h'.\nbool responseUsesTaggedFieldsInHeader(const uint16_t api_key, const uint16_t api_version) {\n  switch (api_key) {\n    {% for message_type in message_types %}\n    case {{ message_type.get_extra('api_key') }}:\n      switch (api_version) {\n        {# ApiVersions responses require special handling. #}\n        {% if message_type.get_extra('api_key') != 18 %}\n        {% for flexible_version in message_type.flexible_versions %}\n        case {{ flexible_version }}:\n          return true;\n        {% endfor %}\n        {% endif %}\n        default:\n          return false;\n      }\n    {% endfor %}\n    default:\n      return false;\n  }\n}\n\n/**\n * Creates a parser that is going to process data specific for given response.\n * If corresponding parser cannot be found (what means a newer version of Kafka protocol),\n * a sentinel parser is returned.\n * @param context parse context (carries the expected message type information).\n * @return parser that is capable of properly consuming response bytes.\n */\nResponseParserSharedPtr ResponseParserResolver::createParser(\n  ResponseContextSharedPtr context) const {\n\n  const int16_t api_key = context->api_key_;\n  const int16_t api_version = context->api_version_;\n\n{% for message_type in message_types %}{% for field_list in message_type.compute_field_lists() %}\n  if ({{ message_type.get_extra('api_key') }} == api_key\n    && {{ field_list.version }} == api_version) {\n    return std::make_shared<{{ message_type.name }}V{{ field_list.version }}Parser>(context);\n  }{% endfor %}{% endfor %}\n  return std::make_shared<SentinelResponseParser>(context);\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/launcher.py",
    "content": "#!/usr/bin/python\n\n# Launcher for generating Kafka protocol code.\n\nimport source.extensions.filters.network.kafka.protocol.generator as generator\nimport sys\nimport os\n\n\ndef main():\n  \"\"\"\n  Kafka code generator script\n  ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n  Generates C++ code from Kafka protocol specification for Kafka codec.\n\n  Usage:\n    launcher.py MESSAGE_TYPE OUTPUT_FILES INPUT_FILES\n  where:\n  MESSAGE_TYPE : 'request' or 'response'\n  OUTPUT_FILES : location of 'requests.h'/'responses.h',\n                 'kafka_request_resolver.cc'/'kafka_response_resolver.cc' and\n                 'request_metrics.h'/'response_metrics.h'.\n  INPUT_FILES: Kafka protocol json files to be processed.\n\n  Kafka spec files are provided in Kafka clients jar file.\n\n  Files created are:\n    - ${MESSAGE_TYPE}s.h - definition of all the structures/deserializers/parsers related to Kafka\n      requests/responses,\n    - kafka_${MESSAGE_TYPE}_resolver.cc - resolver that is responsible for creation of parsers\n      defined in ${MESSAGE_TYPE}s.h (it maps request's api key & version to matching parser),\n    - ${MESSAGE_TYPE}_metrics.h - rich metrics wrappers for all possible message types.\n\n  Templates used are:\n  - to create '${MESSAGE_TYPE}.h': ${MESSAGE_TYPE}_h.j2, complex_type_template.j2,\n    request_parser.j2,\n  - to create 'kafka_${MESSAGE_TYPE}_resolver.cc': kafka_${MESSAGE_TYPE}_resolver_cc.j2,\n  - to create '${MESSAGE_TYPE}_metrics.h': ${MESSAGE_TYPE}_metrics_h.j2.\n  \"\"\"\n\n  type = sys.argv[1]\n  main_header_file = os.path.abspath(sys.argv[2])\n  resolver_cc_file = os.path.abspath(sys.argv[3])\n  metrics_h_file = os.path.abspath(sys.argv[4])\n  input_files = sys.argv[5:]\n  generator.generate_main_code(type, main_header_file, resolver_cc_file, metrics_h_file,\n                               input_files)\n\n\nif __name__ == \"__main__\":\n  main()\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/request_metrics_h.j2",
    "content": "{#\n  Template for 'request_metrics.h'.\n\n  Generates the request metric names from Kafka message types.\n  The metrics structure (KAFKA_REQUEST_METRICS) is wrapped by RichRequestMetrics instance, allowing\n  for easier access to metrics using message's api_key.\n\n  There is one metric for each of request types (e.g. produce) - number of responses received.\n  There is also a metric for counting requests that could not be recognised, and one for requests\n  that could caused deserialization errors.\n#}\n\n#pragma once\n\n#include <array>\n#include <functional>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Generated metrics, we have a counter for each request type.\n */\n#define KAFKA_REQUEST_METRICS(COUNTER)                                                             \\\n{% for message_type in message_types %}                                                            \\\n  COUNTER({{ message_type.name_in_c_case() }})                                                     \\\n{% endfor %}                                                                                       \\\n  COUNTER(unknown)                                                                                 \\\n  COUNTER(failure)\n\nstruct KafkaRequestMetrics {\n  KAFKA_REQUEST_METRICS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Abstraction layer over request-related metrics.\n * Pure interface so that it can be mocked easily.\n */\nclass RichRequestMetrics {\npublic:\n  virtual ~RichRequestMetrics() = default;\n\n  /**\n   * Invoked when properly-parsed message is received.\n   */\n  virtual void onRequest(const int16_t api_key) PURE;\n\n  /**\n   * Invoked when an unknown message is received.\n   */\n  virtual void onUnknownRequest() PURE;\n\n  /**\n   * Invoked when a deserialization error occurs.\n   */\n  virtual void onBrokenRequest() PURE;\n};\n\nusing RichRequestMetricsSharedPtr = std::shared_ptr<RichRequestMetrics>;\n\n/**\n * Metrics implementation that uses Envoy Scope to store metrics.\n */\nclass RichRequestMetricsImpl: public RichRequestMetrics {\npublic:\n  RichRequestMetricsImpl(Stats::Scope& scope, std::string stat_prefix): metrics_({\n    KAFKA_REQUEST_METRICS(POOL_COUNTER_PREFIX(scope, fmt::format(\"kafka.{}.request.\",\n      stat_prefix)))}) {};\n\n  void onRequest(const int16_t api_key) override {\n    // Both successful message parsing & metrics list depend on protocol-generated code, what means\n    // both do support the same api keys.\n    switch (api_key) {\n    {% for message_type in message_types %}\n    case {{ message_type.get_extra('api_key') }} :\n      metrics_.{{ message_type.name_in_c_case() }}_.inc();\n      return;\n    {% endfor %}\n    }\n  }\n\n  void onUnknownRequest() override { metrics_.unknown_.inc(); }\n\n  void onBrokenRequest() override { metrics_.failure_.inc(); }\n\nprivate:\n  KafkaRequestMetrics metrics_;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/request_parser.j2",
    "content": "{#\n  Template for top-level structure representing a request in Kafka protocol (e.g. ProduceRequest).\n  Rendered templates for each request in Kafka protocol will be put into 'requests.h' file.\n\n  This template handles binding the top-level structure deserializer\n  (e.g. ProduceRequestV0Deserializer) with RequestDataParser. These parsers are then used by\n  RequestParserResolver instance depending on received Kafka api key & api version\n  (see 'kafka_request_resolver_cc.j2').\n#}\n\n{% for version in complex_type.versions %}class {{ complex_type.name }}V{{ version }}Parser:\n  public RequestDataParser<\n    {{ complex_type.name }}, {{ complex_type.name }}V{{ version }}Deserializer>\n{\npublic:\n  {{ complex_type.name }}V{{ version }}Parser(RequestContextSharedPtr ctx) :\n    RequestDataParser{ctx} {};\n};\n\n{% endfor %}"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/requests_h.j2",
    "content": "{#\n  Main template for 'requests.h' file.\n  Gets filled in (by 'contents') with Kafka request structures, deserializers, and parsers.\n\n  For each request we have the following:\n  - 1 top-level structure corresponding to the request (e.g. `struct FetchRequest`),\n  - N deserializers for top-level structure, one for each request version,\n  - N parsers binding each deserializer with parser,\n  - 0+ child structures (e.g. `struct FetchRequestTopic`, `FetchRequestPartition`) that are used by\n    the request's top-level structure,\n  - deserializers for each child structure.\n\n  So for example, for FetchRequest we have:\n  - struct FetchRequest,\n  - FetchRequestV0Deserializer, FetchRequestV1Deserializer, FetchRequestV2Deserializer, etc.,\n  - FetchRequestV0Parser, FetchRequestV1Parser, FetchRequestV2Parser, etc.,\n  - struct FetchRequestTopic,\n  - FetchRequestTopicV0Deserializer, FetchRequestTopicV1Deserializer, etc.\n    (because topic data is present in every FetchRequest version),\n  - struct FetchRequestPartition,\n  - FetchRequestPartitionV0Deserializer, FetchRequestPartitionV1Deserializer, etc.\n    (because partition data is present in every FetchRequestTopic version).\n#}\n#pragma once\n#include \"extensions/filters/network/kafka/kafka_request.h\"\n#include \"extensions/filters/network/kafka/kafka_request_parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n{{ contents }}\n\n}}}}\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/response_metrics_h.j2",
    "content": "{#\n  Template for 'response_metrics.h'.\n\n  Generates the response metric names from Kafka message types.\n  The metrics structure (KAFKA_RESPONSE_METRICS) is wrapped by RichResponseMetrics instance,\n  allowing for easier access to metrics using message's api_key.\n\n  There are two metrics for each of response types (e.g. produce):\n  - number of responses received,\n  - response processing time in milliseconds (time between receiving a request and receiving a\n    response with the same correlation id).\n  There is also a metric for counting responses that could not be recognised, and one for responses\n  that could caused deserialization errors.\n#}\n\n#pragma once\n\n#include <array>\n#include <functional>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Generated metrics, we have a counter and a histogram for each request type.\n */\n#define KAFKA_RESPONSE_METRICS(COUNTER, HISTOGRAM)                                                 \\\n{% for message_type in message_types %}                                                            \\\n  COUNTER({{ message_type.name_in_c_case() }})                                                     \\\n  HISTOGRAM({{ message_type.name_in_c_case() }}_duration, Milliseconds)                            \\\n{% endfor %}                                                                                       \\\n  COUNTER(unknown)                                                                                 \\\n  COUNTER(failure)\n\nstruct KafkaResponseMetrics {\n  KAFKA_RESPONSE_METRICS(GENERATE_COUNTER_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\n/**\n * Abstraction layer over response-related metrics.\n * Pure interface so that it can be mocked easily.\n */\nclass RichResponseMetrics {\npublic:\n  virtual ~RichResponseMetrics() = default;\n\n  /**\n   * Invoked when properly-parsed message is received.\n   */\n  virtual void onResponse(const int16_t api_key, const long long duration) PURE;\n\n  /**\n   * Invoked when an unknown message is received.\n   */\n  virtual void onUnknownResponse() PURE;\n\n  /**\n   * Invoked when a deserialization error occurs.\n   */\n  virtual void onBrokenResponse() PURE;\n};\n\nusing RichResponseMetricsSharedPtr = std::shared_ptr<RichResponseMetrics>;\n\n/**\n * Metrics implementation that uses Envoy Scope to store metrics.\n */\nclass RichResponseMetricsImpl: public RichResponseMetrics {\npublic:\n  RichResponseMetricsImpl(Stats::Scope& scope, std::string stat_prefix): metrics_({\n    KAFKA_RESPONSE_METRICS(POOL_COUNTER_PREFIX(scope, fmt::format(\"kafka.{}.response.\",\n      stat_prefix)), POOL_HISTOGRAM_PREFIX(scope, fmt::format(\"kafka.{}.response.\", stat_prefix)))})\n  {};\n\n  void onResponse(const int16_t api_key, const long long duration) override {\n    // Both successful message parsing & metrics list depend on protocol-generated code, what means\n    // both do support the same api keys.\n    switch (api_key) {\n    {% for message_type in message_types %}\n    case {{ message_type.get_extra('api_key') }} :\n      // Increase received message counter and update histogram with duration.\n      metrics_.{{ message_type.name_in_c_case() }}_.inc();\n      metrics_.{{ message_type.name_in_c_case() }}_duration_.recordValue(duration);\n      return;\n    {% endfor %}\n    }\n  }\n\n  void onUnknownResponse() override { metrics_.unknown_.inc(); }\n\n  void onBrokenResponse() override { metrics_.failure_.inc(); }\n\nprivate:\n  KafkaResponseMetrics metrics_;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/response_parser.j2",
    "content": "{#\n  Template for top-level structure representing a response in Kafka protocol\n  (e.g. ProduceResponse).\n  Rendered templates for each response in Kafka protocol will be put into 'responses.h' file.\n\n  This template handles binding the top-level structure deserializer\n  (e.g. ProduceResponseV0Deserializer) with ResponseDataParser.\n  These parsers are then used by ResponseParserResolver instance depending on received Kafka\n  api key & api version (see 'kafka_response_resolver_cc.j2').\n#}\n\n{% for version in complex_type.versions %}class {{ complex_type.name }}V{{ version }}Parser:\n  public ResponseDataParser<\n    {{ complex_type.name }}, {{ complex_type.name }}V{{ version }}Deserializer>{\npublic:\n  {{ complex_type.name }}V{{ version }}Parser(ResponseContextSharedPtr context):\n    ResponseDataParser{context} {};\n};\n\n{% endfor %}"
  },
  {
    "path": "source/extensions/filters/network/kafka/protocol/responses_h.j2",
    "content": "{#\n  Main template for 'responses.h' file.\n  Gets filled in (by 'contents') with Kafka response structures, deserializers, and parsers.\n\n  For each response we have the following:\n  - 1 top-level structure corresponding to the response (e.g. `struct FetchResponse`),\n  - N deserializers for top-level structure, one for each response version,\n  - N parsers binding each deserializer with parser,\n  - 0+ child structures (e.g. `struct FetchableTopicResponse`) that are used by the response's\n    top-level structure,\n  - deserializers for each child structure.\n\n  So for example, for FetchResponse we have:\n  - struct FetchResponse,\n  - FetchResponseV0Deserializer, FetchResponseV1Deserializer, FetchResponseV2Deserializer, etc.,\n  - FetchResponseV0Parser, FetchResponseV1Parser, FetchResponseV2Parser, etc.,\n  - struct FetchableTopicResponse,\n  - FetchableTopicResponseV0Deserializer, FetchableTopicResponseV1Deserializer, etc.\n    (because topic data is present in every FetchResponse version),\n  - struct FetchablePartitionResponse,\n  - FetchablePartitionResponseV0Deserializer, FetchablePartitionResponseV1Deserializer, etc.\n    (because partition data is present in every FetchableTopicResponse version).\n  - AbortedTransaction & its Deserializers (starting with version 4).\n#}\n#pragma once\n#include \"extensions/filters/network/kafka/kafka_response.h\"\n#include \"extensions/filters/network/kafka/kafka_response_parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n{{ contents }}\n\n}}}}\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/request_codec.cc",
    "content": "#include \"extensions/filters/network/kafka/request_codec.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nclass RequestStartParserFactory : public InitialParserFactory {\n  RequestParserSharedPtr create(const RequestParserResolver& parser_resolver) const override {\n    return std::make_shared<RequestStartParser>(parser_resolver);\n  }\n};\n\nconst InitialParserFactory& InitialParserFactory::getDefaultInstance() {\n  CONSTRUCT_ON_FIRST_USE(RequestStartParserFactory);\n}\n\nRequestParserSharedPtr RequestDecoder::createStartParser() {\n  return factory_.create(parser_resolver_);\n}\n\nvoid RequestEncoder::encode(const AbstractRequest& message) {\n  const uint32_t size = htobe32(message.computeSize());\n  output_.add(&size, sizeof(size)); // Encode data length.\n  message.encode(output_);          // Encode data.\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/request_codec.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"extensions/filters/network/kafka/codec.h\"\n#include \"extensions/filters/network/kafka/kafka_request.h\"\n#include \"extensions/filters/network/kafka/kafka_request_parser.h\"\n#include \"extensions/filters/network/kafka/parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nusing RequestCallback = MessageCallback<AbstractRequestSharedPtr, RequestParseFailureSharedPtr>;\n\nusing RequestCallbackSharedPtr = std::shared_ptr<RequestCallback>;\n\n/**\n * Provides initial parser for messages (class extracted to allow injecting test factories).\n */\nclass InitialParserFactory {\npublic:\n  virtual ~InitialParserFactory() = default;\n\n  /**\n   * Creates default instance that returns RequestStartParser instances.\n   */\n  static const InitialParserFactory& getDefaultInstance();\n\n  /**\n   * Creates parser with given context.\n   */\n  virtual RequestParserSharedPtr create(const RequestParserResolver& parser_resolver) const PURE;\n};\n\n/**\n * Decoder that decodes Kafka requests.\n * When a request is decoded, the callbacks are notified, in order.\n *\n * This decoder uses chain of parsers to parse fragments of a request.\n * Each parser along the line returns the fully parsed message or the next parser.\n * Stores parse state (as large message's payload can be provided through multiple `onData` calls).\n */\nclass RequestDecoder\n    : public AbstractMessageDecoder<RequestParserSharedPtr, RequestCallbackSharedPtr> {\npublic:\n  /**\n   * Creates a decoder that will notify provided callbacks when a message is successfully parsed.\n   * @param callbacks callbacks to be invoked (in order).\n   */\n  RequestDecoder(const std::vector<RequestCallbackSharedPtr> callbacks)\n      : RequestDecoder(InitialParserFactory::getDefaultInstance(),\n                       RequestParserResolver::getDefaultInstance(), callbacks){};\n\n  /**\n   * Visible for testing.\n   * Allows injecting initial parser factory and parser resolver.\n   * @param factory parser factory to be used when new message is to be processed.\n   * @param parser_resolver supported parser resolver.\n   * @param callbacks callbacks to be invoked (in order).\n   */\n  RequestDecoder(const InitialParserFactory& factory, const RequestParserResolver& parser_resolver,\n                 const std::vector<RequestCallbackSharedPtr> callbacks)\n      : AbstractMessageDecoder{callbacks}, factory_{factory}, parser_resolver_{parser_resolver} {};\n\nprotected:\n  RequestParserSharedPtr createStartParser() override;\n\nprivate:\n  const InitialParserFactory& factory_;\n  const RequestParserResolver& parser_resolver_;\n};\n\nusing RequestDecoderSharedPtr = std::shared_ptr<RequestDecoder>;\n\n/**\n * Encodes requests into underlying buffer.\n */\nclass RequestEncoder : public MessageEncoder<AbstractRequest> {\npublic:\n  /**\n   * Wraps buffer with encoder.\n   */\n  RequestEncoder(Buffer::Instance& output) : output_(output) {}\n\n  /**\n   * Encodes request into wrapped buffer.\n   */\n  void encode(const AbstractRequest& message) override;\n\nprivate:\n  Buffer::Instance& output_;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/requirements.txt",
    "content": "Jinja2==2.11.2 \\\n    --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \\\n    --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035\nMarkupSafe==1.1.1 \\\n    --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \\\n    --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \\\n    --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \\\n    --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \\\n    --hash=sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42 \\\n    --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \\\n    --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \\\n    --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \\\n    --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \\\n    --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \\\n    --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \\\n    --hash=sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b \\\n    --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \\\n    --hash=sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15 \\\n    --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \\\n    --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \\\n    --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \\\n    --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \\\n    --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \\\n    --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \\\n    --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \\\n    --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \\\n    --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \\\n    --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \\\n    --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \\\n    --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \\\n    --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \\\n    --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \\\n    --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \\\n    --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \\\n    --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \\\n    --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \\\n    --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/response_codec.cc",
    "content": "#include \"extensions/filters/network/kafka/response_codec.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n// Default implementation that just creates ResponseHeaderParser with dependencies provided.\nclass ResponseInitialParserFactoryImpl : public ResponseInitialParserFactory {\n  ResponseParserSharedPtr create(ExpectedResponsesSharedPtr expected_responses,\n                                 const ResponseParserResolver& parser_resolver) const override {\n    return std::make_shared<ResponseHeaderParser>(expected_responses, parser_resolver);\n  }\n};\n\nconst ResponseInitialParserFactory& ResponseInitialParserFactory::getDefaultInstance() {\n  CONSTRUCT_ON_FIRST_USE(ResponseInitialParserFactoryImpl);\n}\n\nvoid ResponseDecoder::expectResponse(const int32_t correlation_id, const int16_t api_key,\n                                     const int16_t api_version) {\n  (*expected_responses_)[correlation_id] = {api_key, api_version};\n};\n\nResponseParserSharedPtr ResponseDecoder::createStartParser() {\n  return factory_.create(expected_responses_, response_parser_resolver_);\n}\n\nvoid ResponseEncoder::encode(const AbstractResponse& message) {\n  const uint32_t size = htobe32(message.computeSize());\n  output_.add(&size, sizeof(size)); // Encode data length.\n  message.encode(output_);          // Encode data.\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/response_codec.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/kafka/codec.h\"\n#include \"extensions/filters/network/kafka/kafka_response_parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nusing ResponseCallback = MessageCallback<AbstractResponseSharedPtr, ResponseMetadataSharedPtr>;\n\nusing ResponseCallbackSharedPtr = std::shared_ptr<ResponseCallback>;\n\n/**\n * Provides initial parser for responses (class extracted to allow injecting test factories).\n */\nclass ResponseInitialParserFactory {\npublic:\n  virtual ~ResponseInitialParserFactory() = default;\n\n  /**\n   * Creates default instance that returns ResponseHeaderParser instances.\n   */\n  static const ResponseInitialParserFactory& getDefaultInstance();\n\n  /**\n   * Creates first parser in a chain with given dependencies (that will be used by parser further\n   * along the parse process).\n   */\n  virtual ResponseParserSharedPtr create(ExpectedResponsesSharedPtr expected_responses,\n                                         const ResponseParserResolver& parser_resolver) const PURE;\n};\n\n/**\n * Decoder that decodes Kafka responses.\n * When a response is decoded, the callbacks are notified, in order.\n *\n * This decoder uses chain of parsers to parse fragments of a response.\n * Each parser along the line returns the fully parsed message or the next parser.\n * Stores parse state (as large message's payload can be provided through multiple `onData` calls).\n *\n * As Kafka protocol does not carry response type data, it is necessary to register expected message\n * type beforehand with `expectResponse`.\n */\nclass ResponseDecoder\n    : public AbstractMessageDecoder<ResponseParserSharedPtr, ResponseCallbackSharedPtr>,\n      public Logger::Loggable<Logger::Id::kafka> {\npublic:\n  /**\n   * Creates a decoder that will notify provided callbacks when a message is successfully parsed.\n   * @param callbacks callbacks to be invoked (in order).\n   */\n  ResponseDecoder(const std::vector<ResponseCallbackSharedPtr> callbacks)\n      : ResponseDecoder{ResponseInitialParserFactory::getDefaultInstance(),\n                        ResponseParserResolver::getDefaultInstance(), callbacks} {};\n\n  /**\n   * Visible for testing.\n   * Allows injecting initial parser factory and parser resolver.\n   * @param factory parser factory to be used when new message is to be processed.\n   * @param parserResolver supported parser resolver.\n   * @param callbacks callbacks to be invoked (in order).\n   */\n  ResponseDecoder(const ResponseInitialParserFactory& factory,\n                  const ResponseParserResolver& response_parser_resolver,\n                  const std::vector<ResponseCallbackSharedPtr> callbacks)\n\n      : AbstractMessageDecoder{callbacks}, factory_{factory}, response_parser_resolver_{\n                                                                  response_parser_resolver} {};\n\n  /**\n   * Registers an expected message.\n   * The response's api key & version will be used to create corresponding payload parser when\n   * message with the same correlation id is received.\n   * @param correlation_id id of the response.\n   * @param api_key expected api key of response with given correlation id.\n   * @param api_version expected api version of response with given correlation id.\n   */\n  virtual void expectResponse(const int32_t correlation_id, const int16_t api_key,\n                              const int16_t api_version);\n\nprotected:\n  ResponseParserSharedPtr createStartParser() override;\n\nprivate:\n  const ResponseInitialParserFactory& factory_;\n  const ResponseParserResolver& response_parser_resolver_;\n\n  // Store containing expected response metadata (api key & version).\n  // Response data is stored in order, as per Kafka protocol.\n  const ExpectedResponsesSharedPtr expected_responses_ = std::make_shared<ExpectedResponses>();\n};\n\nusing ResponseDecoderSharedPtr = std::shared_ptr<ResponseDecoder>;\n\n/**\n * Encodes responses into underlying buffer.\n */\nclass ResponseEncoder : public MessageEncoder<AbstractResponse> {\npublic:\n  /**\n   * Wraps buffer with encoder.\n   */\n  ResponseEncoder(Buffer::Instance& output) : output_(output) {}\n\n  /**\n   * Encodes response into wrapped buffer.\n   */\n  void encode(const AbstractResponse& message) override;\n\nprivate:\n  Buffer::Instance& output_;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/serialization/generator.py",
    "content": "#!/usr/bin/python\n\n# Main library file containing all the composite deserializer logic.\n\n\ndef generate_main_code(serialization_composite_h_file):\n  \"\"\"\n  Main code generator.\n  Renders the header file for serialization composites.\n  The location of output file is provided as argument.\n  \"\"\"\n  generate_code('serialization_composite_h.j2', serialization_composite_h_file)\n\n\ndef generate_test_code(serialization_composite_test_cc_file):\n  \"\"\"\n  Test code generator.\n  Renders the test file for serialization composites.\n  The location of output file is provided as argument.\n  \"\"\"\n  generate_code('serialization_composite_test_cc.j2', serialization_composite_test_cc_file)\n\n\ndef generate_code(template_name, output_file):\n  \"\"\"\n  Gets definition of structures to render.\n  Then renders these structures using template provided into provided output file.\n  \"\"\"\n  field_counts = get_field_counts()\n  template = RenderingHelper.get_template(template_name)\n  contents = template.render(counts=field_counts)\n  with open(output_file, 'w') as fd:\n    fd.write(contents)\n\n\ndef get_field_counts():\n  \"\"\"\n  Generate argument counts that should be processed by composite deserializers.\n  \"\"\"\n  return range(1, 12)\n\n\nclass RenderingHelper:\n  \"\"\"\n  Helper for jinja templates.\n  \"\"\"\n\n  @staticmethod\n  def get_template(template):\n    import jinja2\n    import os\n    import sys\n    # Templates are resolved relatively to main start script, due to main & test templates being\n    # stored in different directories.\n    env = jinja2.Environment(loader=jinja2.FileSystemLoader(\n        searchpath=os.path.dirname(os.path.abspath(sys.argv[0]))))\n    return env.get_template(template)\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/serialization/launcher.py",
    "content": "#!/usr/bin/python\n\n# Launcher for generating composite serializer code.\n\nimport source.extensions.filters.network.kafka.serialization.generator as generator\nimport sys\nimport os\n\n\ndef main():\n  \"\"\"\n  Serialization composite code generator\n  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n  Generates main source code files for composite deserializers.\n  The files are generated, as they are extremely repetitive (composite deserializer for 0..9\n  sub-deserializers).\n\n  Usage:\n    launcher.py LOCATION_OF_OUTPUT_FILE\n  where:\n  LOCATION_OF_OUTPUT_FILE : location of 'serialization_composite.h'.\n\n  Creates 'serialization_composite.h' - header with declarations of\n  CompositeDeserializerWith???Delegates classes.\n\n  Template used: 'serialization_composite_h.j2'.\n  \"\"\"\n  serialization_composite_h_file = os.path.abspath(sys.argv[1])\n  generator.generate_main_code(serialization_composite_h_file)\n\n\nif __name__ == \"__main__\":\n  main()\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/serialization/serialization_composite_h.j2",
    "content": "{#\n  Creates 'serialization_composite.h'.\n\n  Template for composite serializers (the CompositeDeserializerWith_N_Delegates classes).\n  Covers the corner case of 0 delegates, and then uses templating to create declarations for 1..N\n  variants.\n#}\n#pragma once\n\n#include <algorithm>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/byte_order.h\"\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/kafka/kafka_types.h\"\n#include \"extensions/filters/network/kafka/serialization.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * This header contains only composite deserializers.\n * The basic design is composite deserializer creating delegates DeserializerType1..N.\n * Result of type ResponseType is constructed by getting results of each of delegates.\n * These deserializers can throw, if any of the delegate deserializers can.\n */\n\n/**\n * Composite deserializer that uses 0 deserializer(s) (corner case).\n * Does not consume any bytes, and is always ready to return the result.\n * Creates a result value using the no-arg ResponseType constructor.\n * @param ResponseType type of deserialized data.\n */\ntemplate <typename ResponseType>\nclass CompositeDeserializerWith0Delegates : public Deserializer<ResponseType> {\npublic:\n  CompositeDeserializerWith0Delegates(){};\n  uint32_t feed(absl::string_view&) override { return 0; }\n  bool ready() const override { return true; }\n  ResponseType get() const override { return {}; }\n};\n\n{% for field_count in counts %}\n/**\n * Composite deserializer that uses {{ field_count }} deserializer(s).\n * Passes data to each of the underlying deserializers (deserializers that are already ready do not\n * consume data, so it's safe).\n * The composite deserializer is ready when the last deserializer is ready (what means that all\n * deserializers before it are ready too).\n * Constructs the result of type ResponseType using { delegate1_.get(), delegate2_.get() ... }.\n *\n * @param ResponseType type of deserialized data{% for field in range(1, field_count + 1) %}.\n * @param DeserializerType{{ field }} deserializer {{ field }}.\n{% endfor %} */\ntemplate <\n  typename ResponseType{% for field in range(1, field_count + 1) %},\n  typename DeserializerType{{ field }}{% endfor %}\n>\nclass CompositeDeserializerWith{{ field_count }}Delegates : public Deserializer<ResponseType> {\npublic:\n  CompositeDeserializerWith{{ field_count }}Delegates(){};\n\n  uint32_t feed(absl::string_view& data) override {\n    uint32_t consumed = 0;\n    {% for field in range(1, field_count + 1) %}\n    consumed += delegate{{ field }}_.feed(data);\n    {% endfor %}\n    return consumed;\n  }\n\n  bool ready() const override { return delegate{{ field_count }}_.ready(); }\n\n  ResponseType get() const override {\n    return {\n      {% for field in range(1, field_count + 1) %}delegate{{ field }}_.get(),\n      {% endfor %}};\n  }\n\nprotected:\n  {% for field in range(1, field_count + 1) %}\n  DeserializerType{{ field }} delegate{{ field }}_;\n  {% endfor %}\n};\n{% endfor %}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/serialization.cc",
    "content": "#include \"extensions/filters/network/kafka/serialization.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nconstexpr static int16_t NULL_STRING_LENGTH = -1;\nconstexpr static uint32_t NULL_COMPACT_STRING_LENGTH = 0;\nconstexpr static int32_t NULL_BYTES_LENGTH = -1;\nconstexpr static uint32_t NULL_COMPACT_BYTES_LENGTH = 0;\n\n/**\n * Helper method for deserializers that get the length of data, and then copy the given bytes into a\n * local buffer. Templated as there are length and byte type differences.\n * Impl note: This method modifies (sets up) most of Deserializer's fields.\n * @param data bytes to deserialize.\n * @param length_deserializer payload length deserializer.\n * @param length_consumed_marker marker telling whether length has been extracted from\n * length_deserializer, and underlying buffer has been initialized.\n * @param required remaining bytes to consume.\n * @param data_buffer buffer with capacity for 'required' bytes.\n * @param ready marker telling whether this deserialized has finished processing.\n * @param null_value_length value marking null values.\n * @param allow_null_value whether null value if allowed.\n * @return number of bytes consumed.\n */\ntemplate <typename DeserializerType, typename LengthType, typename ByteType>\nuint32_t feedBytesIntoBuffers(absl::string_view& data, DeserializerType& length_deserializer,\n                              bool& length_consumed_marker, LengthType& required,\n                              std::vector<ByteType>& data_buffer, bool& ready,\n                              const LengthType null_value_length, const bool allow_null_value) {\n\n  const uint32_t length_consumed = length_deserializer.feed(data);\n  if (!length_deserializer.ready()) {\n    // Break early: we still need to fill in length buffer.\n    return length_consumed;\n  }\n\n  if (!length_consumed_marker) {\n    // Length buffer is ready, but we have not yet processed the result.\n    // We need to extract the real data length and initialize buffer for it.\n    required = length_deserializer.get();\n\n    if (required >= 0) {\n      data_buffer = std::vector<ByteType>(required);\n    }\n\n    if (required == null_value_length) {\n      if (allow_null_value) {\n        // We have received 'null' value in deserializer that allows it (e.g. NullableBytes), no\n        // more processing is necessary.\n        ready = true;\n      } else {\n        // Invalid payload: null length for non-null object.\n        throw EnvoyException(absl::StrCat(\"invalid length: \", required));\n      }\n    }\n\n    if (required < null_value_length) {\n      throw EnvoyException(absl::StrCat(\"invalid length: \", required));\n    }\n\n    length_consumed_marker = true;\n  }\n\n  if (ready) {\n    // Break early: we might not need to consume any bytes for nullable values OR in case of repeat\n    // invocation on already-ready buffer.\n    return length_consumed;\n  }\n\n  const uint32_t data_consumed = std::min<uint32_t>(required, data.size());\n  const uint32_t written = data_buffer.size() - required;\n  if (data_consumed > 0) {\n    memcpy(data_buffer.data() + written, data.data(), data_consumed);\n    required -= data_consumed;\n    data = {data.data() + data_consumed, data.size() - data_consumed};\n  }\n\n  // We have consumed all the bytes, mark the deserializer as ready.\n  if (required == 0) {\n    ready = true;\n  }\n\n  return length_consumed + data_consumed;\n}\n\nuint32_t StringDeserializer::feed(absl::string_view& data) {\n  return feedBytesIntoBuffers<Int16Deserializer, int16_t, char>(\n      data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_STRING_LENGTH, false);\n}\n\nuint32_t NullableStringDeserializer::feed(absl::string_view& data) {\n  return feedBytesIntoBuffers<Int16Deserializer, int16_t, char>(\n      data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_STRING_LENGTH, true);\n}\n\nuint32_t BytesDeserializer::feed(absl::string_view& data) {\n  return feedBytesIntoBuffers<Int32Deserializer, int32_t, unsigned char>(\n      data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_BYTES_LENGTH, false);\n}\n\nuint32_t NullableBytesDeserializer::feed(absl::string_view& data) {\n  return feedBytesIntoBuffers<Int32Deserializer, int32_t, unsigned char>(\n      data, length_buf_, length_consumed_, required_, data_buf_, ready_, NULL_BYTES_LENGTH, true);\n}\n\n/**\n * Helper method for \"compact\" deserializers that get the length of data, and then copy the given\n * bytes into a local buffer. Compared to `feedBytesIntoBuffers` we only use template for data type,\n * as compact data types always use variable-length uint32 for data length.\n * Impl note: This method modifies (sets up) most of Deserializer's fields.\n * @param data bytes to deserialize.\n * @param length_deserializer payload length deserializer.\n * @param length_consumed_marker marker telling whether length has been extracted from\n * length_deserializer, and underlying buffer has been initialized.\n * @param required remaining bytes to consume.\n * @param data_buffer buffer with capacity for 'required' bytes.\n * @param ready marker telling whether this deserialized has finished processing.\n * @param null_value_length value marking null values.\n * @param allow_null_value whether null value if allowed.\n * @return number of bytes consumed.\n */\ntemplate <typename ByteType>\nuint32_t\nfeedCompactBytesIntoBuffers(absl::string_view& data, VarUInt32Deserializer& length_deserializer,\n                            bool& length_consumed_marker, uint32_t& required,\n                            std::vector<ByteType>& data_buffer, bool& ready,\n                            const uint32_t null_value_length, const bool allow_null_value) {\n\n  const uint32_t length_consumed = length_deserializer.feed(data);\n  if (!length_deserializer.ready()) {\n    // Break early: we still need to fill in length buffer.\n    return length_consumed;\n  }\n\n  if (!length_consumed_marker) {\n    // Length buffer is ready, but we have not yet processed the result.\n    // We need to extract the real data length and initialize buffer for it.\n    required = length_deserializer.get();\n\n    if (null_value_length == required) {\n      if (allow_null_value) {\n        // We have received 'null' value in deserializer that allows it (e.g. NullableCompactBytes),\n        // no more processing is necessary.\n        ready = true;\n      } else {\n        // Invalid payload: null length for non-null object.\n        throw EnvoyException(absl::StrCat(\"invalid length: \", required));\n      }\n    } else {\n      // Compact data types carry data length + 1 (0 is used to mark 'null' in nullable types).\n      required--;\n      data_buffer = std::vector<ByteType>(required);\n    }\n\n    length_consumed_marker = true;\n  }\n\n  if (ready) {\n    // Break early: we might not need to consume any bytes for nullable values OR in case of repeat\n    // invocation on already-ready buffer.\n    return length_consumed;\n  }\n\n  const uint32_t data_consumed = std::min<uint32_t>(required, data.size());\n  const uint32_t written = data_buffer.size() - required;\n  if (data_consumed > 0) {\n    memcpy(data_buffer.data() + written, data.data(), data_consumed);\n    required -= data_consumed;\n    data = {data.data() + data_consumed, data.size() - data_consumed};\n  }\n\n  // We have consumed all the bytes, mark the deserializer as ready.\n  if (required == 0) {\n    ready = true;\n  }\n\n  return length_consumed + data_consumed;\n}\n\nuint32_t CompactStringDeserializer::feed(absl::string_view& data) {\n  return feedCompactBytesIntoBuffers<char>(data, length_buf_, length_consumed_, required_,\n                                           data_buf_, ready_, NULL_COMPACT_STRING_LENGTH, false);\n}\n\nuint32_t NullableCompactStringDeserializer::feed(absl::string_view& data) {\n  return feedCompactBytesIntoBuffers<char>(data, length_buf_, length_consumed_, required_,\n                                           data_buf_, ready_, NULL_COMPACT_STRING_LENGTH, true);\n}\n\nNullableString NullableCompactStringDeserializer::get() const {\n  const uint32_t original_data_len = length_buf_.get();\n  if (NULL_COMPACT_STRING_LENGTH == original_data_len) {\n    return absl::nullopt;\n  } else {\n    return absl::make_optional(std::string(data_buf_.begin(), data_buf_.end()));\n  }\n}\n\nuint32_t CompactBytesDeserializer::feed(absl::string_view& data) {\n  return feedCompactBytesIntoBuffers<unsigned char>(data, length_buf_, length_consumed_, required_,\n                                                    data_buf_, ready_, NULL_COMPACT_BYTES_LENGTH,\n                                                    false);\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/serialization.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/byte_order.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n\n#include \"extensions/filters/network/kafka/kafka_types.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Deserializer is a stateful entity that constructs a result of type T from bytes provided.\n * It can be feed()-ed data until it is ready, filling the internal store.\n * When ready(), it is safe to call get() to transform the internally stored bytes into result.\n * Further feed()-ing should have no effect on a buffer (should return 0 and not move\n * provided pointer).\n * @param T type of deserialized data.\n */\ntemplate <typename T> class Deserializer {\npublic:\n  /**\n   * The type this deserializer is deserializing.\n   */\n  using result_type = T;\n\n  virtual ~Deserializer() = default;\n\n  /**\n   * Submit data to be processed, will consume as much data as it is necessary.\n   * If any bytes are consumed, then the provided string view is updated by stepping over consumed\n   * bytes. Invoking this method when deserializer is ready has no effect (consumes 0 bytes).\n   * @param data bytes to be processed, will be updated if any have been consumed.\n   * @return number of bytes consumed (equal to change in 'data').\n   */\n  virtual uint32_t feed(absl::string_view& data) PURE;\n\n  /**\n   * Whether deserializer has consumed enough data to return result.\n   */\n  virtual bool ready() const PURE;\n\n  /**\n   * Returns the entity that is represented by bytes stored in this deserializer.\n   * Should be only called when deserializer is ready.\n   */\n  virtual T get() const PURE;\n};\n\n/**\n * Generic integer deserializer (uses array of sizeof(T) bytes).\n * After all bytes are filled in, the value is converted from network byte-order and returned.\n */\ntemplate <typename T> class IntDeserializer : public Deserializer<T> {\npublic:\n  uint32_t feed(absl::string_view& data) override {\n    const uint32_t available = std::min<uint32_t>(sizeof(buf_) - written_, data.size());\n    memcpy(buf_ + written_, data.data(), available);\n    written_ += available;\n\n    if (written_ == sizeof(buf_)) {\n      ready_ = true;\n    }\n\n    data = {data.data() + available, data.size() - available};\n\n    return available;\n  }\n\n  bool ready() const override { return ready_; }\n\nprotected:\n  char buf_[sizeof(T) / sizeof(char)];\n  uint32_t written_{0};\n  bool ready_{false};\n};\n\n/**\n * Integer deserializer for int8_t.\n */\nclass Int8Deserializer : public IntDeserializer<int8_t> {\npublic:\n  int8_t get() const override {\n    int8_t result;\n    memcpy(&result, buf_, sizeof(result));\n    return result;\n  }\n};\n\n/**\n * Integer deserializer for int16_t.\n */\nclass Int16Deserializer : public IntDeserializer<int16_t> {\npublic:\n  int16_t get() const override {\n    int16_t result;\n    memcpy(&result, buf_, sizeof(result));\n    return be16toh(result);\n  }\n};\n\n/**\n * Integer deserializer for int32_t.\n */\nclass Int32Deserializer : public IntDeserializer<int32_t> {\npublic:\n  int32_t get() const override {\n    int32_t result;\n    memcpy(&result, buf_, sizeof(result));\n    return be32toh(result);\n  }\n};\n\n/**\n * Integer deserializer for uint32_t.\n */\nclass UInt32Deserializer : public IntDeserializer<uint32_t> {\npublic:\n  uint32_t get() const override {\n    uint32_t result;\n    memcpy(&result, buf_, sizeof(result));\n    return be32toh(result);\n  }\n};\n\n/**\n * Integer deserializer for uint64_t.\n */\nclass Int64Deserializer : public IntDeserializer<int64_t> {\npublic:\n  int64_t get() const override {\n    int64_t result;\n    memcpy(&result, buf_, sizeof(result));\n    return be64toh(result);\n  }\n};\n\n/**\n * Deserializer for boolean values.\n * Uses a single int8 deserializer, and checks whether the results equals 0.\n * When reading a boolean value, any non-zero value is considered true.\n * Impl note: could have been a subclass of IntDeserializer<int8_t> with a different get function,\n * but it makes it harder to understand.\n */\nclass BooleanDeserializer : public Deserializer<bool> {\npublic:\n  BooleanDeserializer() = default;\n\n  uint32_t feed(absl::string_view& data) override { return buffer_.feed(data); }\n\n  bool ready() const override { return buffer_.ready(); }\n\n  bool get() const override { return 0 != buffer_.get(); }\n\nprivate:\n  Int8Deserializer buffer_;\n};\n\n/**\n * Integer deserializer for uint32_t that was encoded as variable-length byte array.\n * Encoding documentation:\n * https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields#KIP-482:TheKafkaProtocolshouldSupportOptionalTaggedFields-UnsignedVarints\n *\n * Impl note:\n * This implementation is equivalent to the one present in Kafka 2.4.0, what means that for 5-byte\n * inputs, the data at bits 5-7 in 5th byte are *ignored* (as long as 8th bit is unset).\n */\nclass VarUInt32Deserializer : public Deserializer<uint32_t> {\npublic:\n  VarUInt32Deserializer() = default;\n\n  uint32_t feed(absl::string_view& data) override {\n    uint32_t processed = 0;\n    while (!ready_ && !data.empty()) {\n\n      // Read next byte from input.\n      uint8_t el;\n      memcpy(&el, data.data(), sizeof(uint8_t));\n      data = {data.data() + 1, data.size() - 1};\n      processed++;\n\n      // Put the 7 bits where they should have been.\n      // Impl note: the cast is done to avoid undefined behaviour when offset_ >= 28 and some bits\n      // at positions 5-7 are set (we would have left shift of signed value that does not fit in\n      // data type).\n      result_ |= ((static_cast<uint32_t>(el) & 0x7f) << offset_);\n      if ((el & 0x80) == 0) {\n        // If this was the last byte to process (what is marked by unset highest bit), we are done.\n        ready_ = true;\n        break;\n      } else {\n        // Otherwise, we need to read next byte.\n        offset_ += 7;\n        // Valid input can have at most 5 bytes.\n        if (offset_ >= 5 * 7) {\n          ExceptionUtil::throwEnvoyException(\n              \"VarUInt32 is too long (5th byte has highest bit set)\");\n        }\n      }\n    }\n    return processed;\n  }\n\n  bool ready() const override { return ready_; }\n\n  uint32_t get() const override { return result_; }\n\nprivate:\n  uint32_t result_ = 0;\n  uint32_t offset_ = 0;\n  bool ready_ = false;\n};\n\n/**\n * Deserializer of string value.\n * First reads length (INT16) and then allocates the buffer of given length.\n *\n * From Kafka documentation:\n * First the length N is given as an INT16.\n * Then N bytes follow which are the UTF-8 encoding of the character sequence.\n * Length must not be negative.\n */\nclass StringDeserializer : public Deserializer<std::string> {\npublic:\n  /**\n   * Can throw EnvoyException if given string length is not valid.\n   */\n  uint32_t feed(absl::string_view& data) override;\n\n  bool ready() const override { return ready_; }\n\n  std::string get() const override { return std::string(data_buf_.begin(), data_buf_.end()); }\n\nprivate:\n  Int16Deserializer length_buf_;\n  bool length_consumed_{false};\n\n  int16_t required_;\n  std::vector<char> data_buf_;\n\n  bool ready_{false};\n};\n\n/**\n * Deserializer of compact string value.\n * First reads length (UNSIGNED_VARINT) and then allocates the buffer of given length.\n *\n * From Kafka documentation:\n * First the length N + 1 is given as an UNSIGNED_VARINT.\n * Then N bytes follow which are the UTF-8 encoding of the character sequence.\n */\nclass CompactStringDeserializer : public Deserializer<std::string> {\npublic:\n  uint32_t feed(absl::string_view& data) override;\n\n  bool ready() const override { return ready_; }\n\n  std::string get() const override { return std::string(data_buf_.begin(), data_buf_.end()); }\n\nprivate:\n  VarUInt32Deserializer length_buf_;\n  bool length_consumed_{false};\n\n  uint32_t required_;\n  std::vector<char> data_buf_;\n\n  bool ready_{false};\n};\n\n/**\n * Deserializer of nullable string value.\n * First reads length (INT16) and then allocates the buffer of given length.\n * If length was -1, buffer allocation is omitted and deserializer is immediately ready (returning\n * null value).\n *\n * From Kafka documentation:\n * For non-null strings, first the length N is given as an INT16.\n * Then N bytes follow which are the UTF-8 encoding of the character sequence.\n * A null value is encoded with length of -1 and there are no following bytes.\n */\nclass NullableStringDeserializer : public Deserializer<NullableString> {\npublic:\n  /**\n   * Can throw EnvoyException if given string length is not valid.\n   */\n  uint32_t feed(absl::string_view& data) override;\n\n  bool ready() const override { return ready_; }\n\n  NullableString get() const override {\n    return required_ >= 0 ? absl::make_optional(std::string(data_buf_.begin(), data_buf_.end()))\n                          : absl::nullopt;\n  }\n\nprivate:\n  Int16Deserializer length_buf_;\n  bool length_consumed_{false};\n\n  int16_t required_;\n  std::vector<char> data_buf_;\n\n  bool ready_{false};\n};\n\n/**\n * Deserializer of nullable compact string value.\n * First reads length (UNSIGNED_VARINT) and then allocates the buffer of given length.\n * If length was 0, buffer allocation is omitted and deserializer is immediately ready (returning\n * null value).\n *\n * From Kafka documentation:\n * First the length N + 1 is given as an UNSIGNED_VARINT.\n * Then N bytes follow which are the UTF-8 encoding of the character sequence.\n * A null string is represented with a length of 0.\n */\nclass NullableCompactStringDeserializer : public Deserializer<NullableString> {\npublic:\n  uint32_t feed(absl::string_view& data) override;\n\n  bool ready() const override { return ready_; }\n\n  NullableString get() const override;\n\nprivate:\n  VarUInt32Deserializer length_buf_;\n  bool length_consumed_{false};\n\n  uint32_t required_;\n  std::vector<char> data_buf_;\n\n  bool ready_{false};\n};\n\n/**\n * Deserializer of bytes value.\n * First reads length (INT32) and then allocates the buffer of given length.\n *\n * From Kafka documentation:\n * First the length N is given as an INT32. Then N bytes follow.\n */\nclass BytesDeserializer : public Deserializer<Bytes> {\npublic:\n  /**\n   * Can throw EnvoyException if given bytes length is not valid.\n   */\n  uint32_t feed(absl::string_view& data) override;\n\n  bool ready() const override { return ready_; }\n\n  Bytes get() const override { return data_buf_; }\n\nprivate:\n  Int32Deserializer length_buf_;\n  bool length_consumed_{false};\n  int32_t required_;\n\n  std::vector<unsigned char> data_buf_;\n  bool ready_{false};\n};\n\n/**\n * Deserializer of compact bytes value.\n * First reads length (UNSIGNED_VARINT) and then allocates the buffer of given length.\n *\n * From Kafka documentation:\n * First the length N+1 is given as an UNSIGNED_VARINT. Then N bytes follow.\n */\nclass CompactBytesDeserializer : public Deserializer<Bytes> {\npublic:\n  /**\n   * Can throw EnvoyException if given bytes length is not valid.\n   */\n  uint32_t feed(absl::string_view& data) override;\n\n  bool ready() const override { return ready_; }\n\n  Bytes get() const override { return data_buf_; }\n\nprivate:\n  VarUInt32Deserializer length_buf_;\n  bool length_consumed_{false};\n  uint32_t required_;\n\n  std::vector<unsigned char> data_buf_;\n  bool ready_{false};\n};\n\n/**\n * Deserializer of nullable bytes value.\n * First reads length (INT32) and then allocates the buffer of given length.\n * If length was -1, buffer allocation is omitted and deserializer is immediately ready (returning\n * null value).\n *\n * From Kafka documentation:\n * For non-null values, first the length N is given as an INT32. Then N bytes follow.\n * A null value is encoded with length of -1 and there are no following bytes.\n */\nclass NullableBytesDeserializer : public Deserializer<NullableBytes> {\npublic:\n  /**\n   * Can throw EnvoyException if given bytes length is not valid.\n   */\n  uint32_t feed(absl::string_view& data) override;\n\n  bool ready() const override { return ready_; }\n\n  NullableBytes get() const override {\n    return required_ >= 0 ? absl::make_optional(data_buf_) : absl::nullopt;\n  }\n\nprivate:\n  Int32Deserializer length_buf_;\n  bool length_consumed_{false};\n  int32_t required_;\n\n  std::vector<unsigned char> data_buf_;\n  bool ready_{false};\n};\n\n/**\n * Deserializer for array of objects of the same type.\n *\n * First reads the length of the array, then initializes N underlying deserializers of type\n * DeserializerType. After the last of N deserializers is ready, the results of each of them are\n * gathered and put in a vector.\n * @param DeserializerType underlying deserializer type.\n *\n * From Kafka documentation:\n * Represents a sequence of objects of a given type T. Type T can be either a primitive type (e.g.\n * STRING) or a structure. First, the length N is given as an int32_t. Then N instances of type T\n * follow. A null array is represented with a length of -1.\n */\ntemplate <typename DeserializerType>\nclass ArrayDeserializer : public Deserializer<std::vector<typename DeserializerType::result_type>> {\npublic:\n  using ResponseType = typename DeserializerType::result_type;\n\n  /**\n   * Can throw EnvoyException if array length is invalid or if underlying deserializer can throw.\n   */\n  uint32_t feed(absl::string_view& data) override {\n\n    const uint32_t length_consumed = length_buf_.feed(data);\n    if (!length_buf_.ready()) {\n      // Break early: we still need to fill in length buffer.\n      return length_consumed;\n    }\n\n    if (!length_consumed_) {\n      required_ = length_buf_.get();\n      if (required_ >= 0) {\n        children_ = std::vector<DeserializerType>(required_);\n      } else {\n        ExceptionUtil::throwEnvoyException(absl::StrCat(\"invalid ARRAY length: \", required_));\n      }\n      length_consumed_ = true;\n    }\n\n    if (ready_) {\n      return length_consumed;\n    }\n\n    uint32_t child_consumed{0};\n    for (DeserializerType& child : children_) {\n      child_consumed += child.feed(data);\n    }\n\n    bool children_ready_ = true;\n    for (DeserializerType& child : children_) {\n      children_ready_ &= child.ready();\n    }\n    ready_ = children_ready_;\n\n    return length_consumed + child_consumed;\n  }\n\n  bool ready() const override { return ready_; }\n\n  std::vector<ResponseType> get() const override {\n    std::vector<ResponseType> result{};\n    result.reserve(children_.size());\n    for (const DeserializerType& child : children_) {\n      const ResponseType child_result = child.get();\n      result.push_back(child_result);\n    }\n    return result;\n  }\n\nprivate:\n  Int32Deserializer length_buf_;\n  bool length_consumed_{false};\n  int32_t required_;\n  std::vector<DeserializerType> children_;\n  bool children_setup_{false};\n  bool ready_{false};\n};\n\n/**\n * Deserializer for compact array of objects of the same type.\n *\n * First reads the length of the array, then initializes N underlying deserializers of type\n * DeserializerType. After the last of N deserializers is ready, the results of each of them are\n * gathered and put in a vector.\n * @param DeserializerType underlying deserializer type.\n *\n * From Kafka documentation:\n * Represents a sequence of objects of a given type T. Type T can be either a primitive type (e.g.\n * STRING) or a structure. First, the length N + 1 is given as an UNSIGNED_VARINT. Then N instances\n * of type T follow. A null array is represented with a length of 0.\n */\ntemplate <typename DeserializerType>\nclass CompactArrayDeserializer\n    : public Deserializer<std::vector<typename DeserializerType::result_type>> {\npublic:\n  using ResponseType = typename DeserializerType::result_type;\n\n  /**\n   * Can throw EnvoyException if array length is invalid or if underlying deserializer can throw.\n   */\n  uint32_t feed(absl::string_view& data) override {\n\n    const uint32_t length_consumed = length_buf_.feed(data);\n    if (!length_buf_.ready()) {\n      // Break early: we still need to fill in length buffer.\n      return length_consumed;\n    }\n\n    if (!length_consumed_) {\n      const uint32_t required = length_buf_.get();\n      if (required >= 1) {\n        children_ = std::vector<DeserializerType>(required - 1);\n      } else {\n        ExceptionUtil::throwEnvoyException(\n            absl::StrCat(\"invalid COMPACT_ARRAY length: \", required));\n      }\n      length_consumed_ = true;\n    }\n\n    if (ready_) {\n      return length_consumed;\n    }\n\n    uint32_t child_consumed{0};\n    for (DeserializerType& child : children_) {\n      child_consumed += child.feed(data);\n    }\n\n    bool children_ready_ = true;\n    for (DeserializerType& child : children_) {\n      children_ready_ &= child.ready();\n    }\n    ready_ = children_ready_;\n\n    return length_consumed + child_consumed;\n  }\n\n  bool ready() const override { return ready_; }\n\n  std::vector<ResponseType> get() const override {\n    std::vector<ResponseType> result{};\n    result.reserve(children_.size());\n    for (const DeserializerType& child : children_) {\n      const ResponseType child_result = child.get();\n      result.push_back(child_result);\n    }\n    return result;\n  }\n\nprivate:\n  VarUInt32Deserializer length_buf_;\n  bool length_consumed_{false};\n  std::vector<DeserializerType> children_;\n  bool children_setup_{false};\n  bool ready_{false};\n};\n\n/**\n * Deserializer for nullable array of objects of the same type.\n *\n * First reads the length of the array, then initializes N underlying deserializers of type\n * DeserializerType. After the last of N deserializers is ready, the results of each of them are\n * gathered and put in a vector.\n * @param DeserializerType underlying deserializer type.\n *\n * From Kafka documentation:\n * Represents a sequence of objects of a given type T. Type T can be either a primitive type (e.g.\n * STRING) or a structure. First, the length N is given as an int32_t. Then N instances of type T\n * follow. A null array is represented with a length of -1.\n */\ntemplate <typename DeserializerType>\nclass NullableArrayDeserializer\n    : public Deserializer<NullableArray<typename DeserializerType::result_type>> {\npublic:\n  using ResponseType = typename DeserializerType::result_type;\n\n  /**\n   * Can throw EnvoyException if array length is invalid or if underlying deserializer can throw.\n   */\n  uint32_t feed(absl::string_view& data) override {\n\n    const uint32_t length_consumed = length_buf_.feed(data);\n    if (!length_buf_.ready()) {\n      // Break early: we still need to fill in length buffer.\n      return length_consumed;\n    }\n\n    if (!length_consumed_) {\n      required_ = length_buf_.get();\n\n      if (required_ >= 0) {\n        children_ = std::vector<DeserializerType>(required_);\n      }\n      if (required_ == NULL_ARRAY_LENGTH) {\n        ready_ = true;\n      }\n      if (required_ < NULL_ARRAY_LENGTH) {\n        ExceptionUtil::throwEnvoyException(\n            fmt::format(\"invalid NULLABLE_ARRAY length: {}\", required_));\n      }\n\n      length_consumed_ = true;\n    }\n\n    if (ready_) {\n      return length_consumed;\n    }\n\n    uint32_t child_consumed{0};\n    for (DeserializerType& child : children_) {\n      child_consumed += child.feed(data);\n    }\n\n    bool children_ready_ = true;\n    for (DeserializerType& child : children_) {\n      children_ready_ &= child.ready();\n    }\n    ready_ = children_ready_;\n\n    return length_consumed + child_consumed;\n  }\n\n  bool ready() const override { return ready_; }\n\n  NullableArray<ResponseType> get() const override {\n    if (NULL_ARRAY_LENGTH != required_) {\n      std::vector<ResponseType> result{};\n      result.reserve(children_.size());\n      for (const DeserializerType& child : children_) {\n        const ResponseType child_result = child.get();\n        result.push_back(child_result);\n      }\n      return result;\n    } else {\n      return absl::nullopt;\n    }\n  }\n\nprivate:\n  constexpr static int32_t NULL_ARRAY_LENGTH{-1};\n\n  Int32Deserializer length_buf_;\n  bool length_consumed_{false};\n  int32_t required_;\n  std::vector<DeserializerType> children_;\n  bool children_setup_{false};\n  bool ready_{false};\n};\n\n/**\n * Deserializer for compact nullable array of objects of the same type.\n *\n * First reads the length of the array, then initializes N underlying deserializers of type\n * DeserializerType. After the last of N deserializers is ready, the results of each of them are\n * gathered and put in a vector.\n * @param DeserializerType underlying deserializer type.\n *\n * From Kafka documentation:\n * Represents a sequence of objects of a given type T. Type T can be either a primitive type (e.g.\n * STRING) or a structure. First, the length N + 1 is given as an UNSIGNED_VARINT. Then N instances\n * of type T follow. A null array is represented with a length of 0.\n */\ntemplate <typename DeserializerType>\nclass NullableCompactArrayDeserializer\n    : public Deserializer<NullableArray<typename DeserializerType::result_type>> {\npublic:\n  using ResponseType = typename DeserializerType::result_type;\n\n  /**\n   * Can throw EnvoyException if array length is invalid or if underlying deserializer can throw.\n   */\n  uint32_t feed(absl::string_view& data) override {\n\n    const uint32_t length_consumed = length_buf_.feed(data);\n    if (!length_buf_.ready()) {\n      // Break early: we still need to fill in length buffer.\n      return length_consumed;\n    }\n\n    if (!length_consumed_) {\n      const uint32_t required = length_buf_.get();\n\n      // Length is unsigned, so we never throw exceptions.\n      if (required >= 1) {\n        children_ = std::vector<DeserializerType>(required - 1);\n      } else {\n        ready_ = true;\n      }\n\n      length_consumed_ = true;\n    }\n\n    if (ready_) {\n      return length_consumed;\n    }\n\n    uint32_t child_consumed{0};\n    for (DeserializerType& child : children_) {\n      child_consumed += child.feed(data);\n    }\n\n    bool children_ready_ = true;\n    for (DeserializerType& child : children_) {\n      children_ready_ &= child.ready();\n    }\n    ready_ = children_ready_;\n\n    return length_consumed + child_consumed;\n  }\n\n  bool ready() const override { return ready_; }\n\n  NullableArray<ResponseType> get() const override {\n    if (NULL_ARRAY_LENGTH != length_buf_.get()) {\n      std::vector<ResponseType> result{};\n      result.reserve(children_.size());\n      for (const DeserializerType& child : children_) {\n        const ResponseType child_result = child.get();\n        result.push_back(child_result);\n      }\n      return result;\n    } else {\n      return absl::nullopt;\n    }\n  }\n\nprivate:\n  constexpr static int32_t NULL_ARRAY_LENGTH{0};\n\n  VarUInt32Deserializer length_buf_;\n  bool length_consumed_{false};\n  std::vector<DeserializerType> children_;\n  bool children_setup_{false};\n  bool ready_{false};\n};\n\n/**\n * Encodes provided argument in Kafka format.\n * In case of primitive types, this is done explicitly as per specification.\n * In case of composite types, this is done by calling 'encode' on provided argument.\n *\n * This object also carries extra information that is used while traversing the request\n * structure-tree during encoding (currently api_version, as different request versions serialize\n * differently).\n */\nclass EncodingContext {\npublic:\n  EncodingContext(int16_t api_version) : api_version_{api_version} {};\n\n  /**\n   * Compute size of given reference, if it were to be encoded.\n   * @return serialized size of argument.\n   */\n  template <typename T> uint32_t computeSize(const T& arg) const;\n\n  /**\n   * Compute size of given array, if it were to be encoded.\n   * @return serialized size of argument.\n   */\n  template <typename T> uint32_t computeSize(const std::vector<T>& arg) const;\n\n  /**\n   * Compute size of given nullable array, if it were to be encoded.\n   * @return serialized size of argument.\n   */\n  template <typename T> uint32_t computeSize(const NullableArray<T>& arg) const;\n\n  /**\n   * Compute size of given reference, if it were to be compactly encoded.\n   * @return serialized size of argument.\n   */\n  template <typename T> uint32_t computeCompactSize(const T& arg) const;\n\n  /**\n   * Compute size of given array, if it were to be compactly encoded.\n   * @return serialized size of argument.\n   */\n  template <typename T> uint32_t computeCompactSize(const std::vector<T>& arg) const;\n\n  /**\n   * Compute size of given nullable array, if it were to be encoded.\n   * @return serialized size of argument.\n   */\n  template <typename T> uint32_t computeCompactSize(const NullableArray<T>& arg) const;\n\n  /**\n   * Encode given reference in a buffer.\n   * @return bytes written\n   */\n  template <typename T> uint32_t encode(const T& arg, Buffer::Instance& dst);\n\n  /**\n   * Encode given array in a buffer.\n   * @return bytes written\n   */\n  template <typename T> uint32_t encode(const std::vector<T>& arg, Buffer::Instance& dst);\n\n  /**\n   * Encode given nullable array in a buffer.\n   * @return bytes written\n   */\n  template <typename T> uint32_t encode(const NullableArray<T>& arg, Buffer::Instance& dst);\n\n  /**\n   * Compactly encode given reference in a buffer.\n   * @return bytes written.\n   */\n  template <typename T> uint32_t encodeCompact(const T& arg, Buffer::Instance& dst);\n\n  /**\n   * Compactly encode given array in a buffer.\n   * @return bytes written.\n   */\n  template <typename T> uint32_t encodeCompact(const std::vector<T>& arg, Buffer::Instance& dst);\n\n  /**\n   * Compactly encode given nullable array in a buffer.\n   * @return bytes written.\n   */\n  template <typename T> uint32_t encodeCompact(const NullableArray<T>& arg, Buffer::Instance& dst);\n\n  int16_t apiVersion() const { return api_version_; }\n\nprivate:\n  const int16_t api_version_;\n};\n\n/**\n * For non-primitive types, call `computeSize` on them, to delegate the work to the entity itself.\n * The entity may use the information in context to decide which fields are included etc.\n */\ntemplate <typename T> inline uint32_t EncodingContext::computeSize(const T& arg) const {\n  return arg.computeSize(*this);\n}\n\n/**\n * For primitive types, Kafka size == sizeof(x).\n */\n#define COMPUTE_SIZE_OF_NUMERIC_TYPE(TYPE)                                                         \\\n  template <> constexpr uint32_t EncodingContext::computeSize(const TYPE&) const {                 \\\n    return sizeof(TYPE);                                                                           \\\n  }\n\nCOMPUTE_SIZE_OF_NUMERIC_TYPE(bool)\nCOMPUTE_SIZE_OF_NUMERIC_TYPE(int8_t)\nCOMPUTE_SIZE_OF_NUMERIC_TYPE(int16_t)\nCOMPUTE_SIZE_OF_NUMERIC_TYPE(int32_t)\nCOMPUTE_SIZE_OF_NUMERIC_TYPE(uint32_t)\nCOMPUTE_SIZE_OF_NUMERIC_TYPE(int64_t)\n\n/**\n * Template overload for string.\n * Kafka String's size is INT16 for header + N bytes.\n */\ntemplate <> inline uint32_t EncodingContext::computeSize(const std::string& arg) const {\n  return sizeof(int16_t) + arg.size();\n}\n\n/**\n * Template overload for nullable string.\n * Kafka NullableString's size is INT16 for header + N bytes (N >= 0).\n */\ntemplate <> inline uint32_t EncodingContext::computeSize(const NullableString& arg) const {\n  return sizeof(int16_t) + (arg ? arg->size() : 0);\n}\n\n/**\n * Template overload for byte array.\n * Kafka byte array size is INT32 for header + N bytes.\n */\ntemplate <> inline uint32_t EncodingContext::computeSize(const Bytes& arg) const {\n  return sizeof(int32_t) + arg.size();\n}\n\n/**\n * Template overload for nullable byte array.\n * Kafka nullable byte array size is INT32 for header + N bytes (N >= 0).\n */\ntemplate <> inline uint32_t EncodingContext::computeSize(const NullableBytes& arg) const {\n  return sizeof(int32_t) + (arg ? arg->size() : 0);\n}\n\n/**\n * Template overload for Array of T.\n * The size of array is size of header and all of its elements.\n */\ntemplate <typename T>\ninline uint32_t EncodingContext::computeSize(const std::vector<T>& arg) const {\n  uint32_t result = sizeof(int32_t);\n  for (const T& el : arg) {\n    result += computeSize(el);\n  }\n  return result;\n}\n\n/**\n * Template overload for NullableArray of T.\n * The size of array is size of header and all of its elements.\n */\ntemplate <typename T>\ninline uint32_t EncodingContext::computeSize(const NullableArray<T>& arg) const {\n  return arg ? computeSize(*arg) : sizeof(int32_t);\n}\n\n/**\n * For non-primitive types, call `computeCompactSize` on them, to delegate the work to the entity\n * itself. The entity may use the information in context to decide which fields are included etc.\n */\ntemplate <typename T> inline uint32_t EncodingContext::computeCompactSize(const T& arg) const {\n  return arg.computeCompactSize(*this);\n}\n\n/**\n * Template overload for int32_t.\n * This data type is not compacted, so we just point to non-compact implementation.\n */\ntemplate <> inline uint32_t EncodingContext::computeCompactSize(const int32_t& arg) const {\n  return computeSize(arg);\n}\n\n/**\n * Template overload for uint32_t.\n * For this data type, we notice that the result's length depends on whether there are any bits set\n * in groups (1-7, 8-14, 15-21, 22-28, 29-32).\n */\ntemplate <> inline uint32_t EncodingContext::computeCompactSize(const uint32_t& arg) const {\n  if (arg <= 0x7f) /* 2^7-1 */ {\n    return 1;\n  } else if (arg <= 0x3fff) /* 2^14-1 */ {\n    return 2;\n  } else if (arg <= 0x1fffff) /* 2^21-1 */ {\n    return 3;\n  } else if (arg <= 0xfffffff) /* 2^28-1 */ {\n    return 4;\n  } else {\n    return 5;\n  }\n}\n\n/**\n * Template overload for compact string.\n * Kafka CompactString's size is var-len encoding of N+1 + N bytes.\n */\ntemplate <> inline uint32_t EncodingContext::computeCompactSize(const std::string& arg) const {\n  return computeCompactSize(static_cast<uint32_t>(arg.size()) + 1) + arg.size();\n}\n\n/**\n * Template overload for compact nullable string.\n * Kafka CompactString's size is var-len encoding of N+1 + N bytes, or 1 otherwise (because we\n * var-length encode the length of 0).\n */\ntemplate <> inline uint32_t EncodingContext::computeCompactSize(const NullableString& arg) const {\n  return arg ? computeCompactSize(*arg) : 1;\n}\n\n/**\n * Template overload for compact byte array.\n * Kafka CompactBytes' size is var-len encoding of N+1 + N bytes.\n */\ntemplate <> inline uint32_t EncodingContext::computeCompactSize(const Bytes& arg) const {\n  return computeCompactSize(static_cast<uint32_t>(arg.size()) + 1) + arg.size();\n}\n\n/**\n * Template overload for CompactArray of T.\n * The size of array is compact size of header and all of its elements.\n */\ntemplate <typename T>\nuint32_t EncodingContext::computeCompactSize(const std::vector<T>& arg) const {\n  uint32_t result = computeCompactSize(static_cast<uint32_t>(arg.size()) + 1);\n  for (const T& el : arg) {\n    result += computeCompactSize(el);\n  }\n  return result;\n}\n\n/**\n * Template overload for CompactNullableArray of T.\n * The size of array is compact size of header and all of its elements; 1 otherwise (because we\n * var-length encode the length of 0).\n */\ntemplate <typename T>\nuint32_t EncodingContext::computeCompactSize(const NullableArray<T>& arg) const {\n  return arg ? computeCompactSize(*arg) : 1;\n}\n\n/**\n * For non-primitive types, call `encode` on them, to delegate the serialization to the entity\n * itself.\n */\ntemplate <typename T> inline uint32_t EncodingContext::encode(const T& arg, Buffer::Instance& dst) {\n  return arg.encode(dst, *this);\n}\n\n/**\n * Template overload for int8_t.\n * Encode a single byte.\n */\ntemplate <> inline uint32_t EncodingContext::encode(const int8_t& arg, Buffer::Instance& dst) {\n  dst.add(&arg, sizeof(int8_t));\n  return sizeof(int8_t);\n}\n\n/**\n * Template overload for int16_t, int32_t, uint32_t, int64_t.\n * Encode a N-byte integer, converting to network byte-order.\n */\n#define ENCODE_NUMERIC_TYPE(TYPE, CONVERTER)                                                       \\\n  template <> inline uint32_t EncodingContext::encode(const TYPE& arg, Buffer::Instance& dst) {    \\\n    const TYPE val = CONVERTER(arg);                                                               \\\n    dst.add(&val, sizeof(TYPE));                                                                   \\\n    return sizeof(TYPE);                                                                           \\\n  }\n\nENCODE_NUMERIC_TYPE(int16_t, htobe16);\nENCODE_NUMERIC_TYPE(int32_t, htobe32);\nENCODE_NUMERIC_TYPE(uint32_t, htobe32);\nENCODE_NUMERIC_TYPE(int64_t, htobe64);\n\n/**\n * Template overload for bool.\n * Encode boolean as a single byte.\n */\ntemplate <> inline uint32_t EncodingContext::encode(const bool& arg, Buffer::Instance& dst) {\n  int8_t val = arg;\n  dst.add(&val, sizeof(int8_t));\n  return sizeof(int8_t);\n}\n\n/**\n * Template overload for std::string.\n * Encode string as INT16 length + N bytes.\n */\ntemplate <> inline uint32_t EncodingContext::encode(const std::string& arg, Buffer::Instance& dst) {\n  int16_t string_length = arg.length();\n  uint32_t header_length = encode(string_length, dst);\n  dst.add(arg.c_str(), string_length);\n  return header_length + string_length;\n}\n\n/**\n * Template overload for NullableString.\n * Encode nullable string as INT16 length + N bytes (length = -1 for null).\n */\ntemplate <>\ninline uint32_t EncodingContext::encode(const NullableString& arg, Buffer::Instance& dst) {\n  if (arg.has_value()) {\n    return encode(*arg, dst);\n  } else {\n    const int16_t len = -1;\n    return encode(len, dst);\n  }\n}\n\n/**\n * Template overload for Bytes.\n * Encode byte array as INT32 length + N bytes.\n */\ntemplate <> inline uint32_t EncodingContext::encode(const Bytes& arg, Buffer::Instance& dst) {\n  const int32_t data_length = arg.size();\n  const uint32_t header_length = encode(data_length, dst);\n  dst.add(arg.data(), arg.size());\n  return header_length + data_length;\n}\n\n/**\n * Template overload for NullableBytes.\n * Encode nullable byte array as INT32 length + N bytes (length = -1 for null value).\n */\ntemplate <>\ninline uint32_t EncodingContext::encode(const NullableBytes& arg, Buffer::Instance& dst) {\n  if (arg.has_value()) {\n    return encode(*arg, dst);\n  } else {\n    const int32_t len = -1;\n    return encode(len, dst);\n  }\n}\n\n/**\n * Encode nullable object array to T as INT32 length + N elements.\n * Each element of type T then serializes itself on its own.\n */\ntemplate <typename T>\nuint32_t EncodingContext::encode(const std::vector<T>& arg, Buffer::Instance& dst) {\n  const NullableArray<T> wrapped = {arg};\n  return encode(wrapped, dst);\n}\n\n/**\n * Encode nullable object array to T as INT32 length + N elements (length = -1 for null value).\n * Each element of type T then serializes itself on its own.\n */\ntemplate <typename T>\nuint32_t EncodingContext::encode(const NullableArray<T>& arg, Buffer::Instance& dst) {\n  if (arg.has_value()) {\n    const int32_t len = arg->size();\n    const uint32_t header_length = encode(len, dst);\n    uint32_t written{0};\n    for (const T& el : *arg) {\n      // For each of array elements, resolve the correct method again.\n      // Elements could be primitives or complex types, so calling encode() on object won't work.\n      written += encode(el, dst);\n    }\n    return header_length + written;\n  } else {\n    const int32_t len = -1;\n    return encode(len, dst);\n  }\n}\n\n/**\n * For non-primitive types, call `encodeCompact` on them, to delegate the serialization to the\n * entity itself.\n */\ntemplate <typename T>\ninline uint32_t EncodingContext::encodeCompact(const T& arg, Buffer::Instance& dst) {\n  return arg.encodeCompact(dst, *this);\n}\n\n/**\n * int32_t is not encoded in compact fashion, so we just delegate to normal implementation.\n */\ntemplate <>\ninline uint32_t EncodingContext::encodeCompact(const int32_t& arg, Buffer::Instance& dst) {\n  return encode(arg, dst);\n}\n\n/**\n * Template overload for variable-length uint32_t (VAR_UINT).\n * Encode the value in 7-bit chunks + marker if field is the last one.\n * Details:\n * https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields#KIP-482:TheKafkaProtocolshouldSupportOptionalTaggedFields-UnsignedVarints\n */\ntemplate <>\ninline uint32_t EncodingContext::encodeCompact(const uint32_t& arg, Buffer::Instance& dst) {\n  uint32_t value = arg;\n\n  uint32_t elements_with_1 = 0;\n  // As long as there are bits set on indexes 8 or higher (counting from 1).\n  while ((value & ~(0x7f)) != 0) {\n    // Save next 7-bit batch with highest bit set.\n    const uint8_t el = (value & 0x7f) | 0x80;\n    dst.add(&el, sizeof(uint8_t));\n    value >>= 7;\n    elements_with_1++;\n  }\n\n  // After the loop has finished, we are certain that bit 8 = 0, so we can just add final element.\n  const uint8_t el = value;\n  dst.add(&el, sizeof(uint8_t));\n\n  return elements_with_1 + 1;\n}\n\n/**\n * Template overload for std::string.\n * Encode string as VAR_UINT + N bytes.\n */\ntemplate <>\ninline uint32_t EncodingContext::encodeCompact(const std::string& arg, Buffer::Instance& dst) {\n  const uint32_t string_length = arg.length();\n  const uint32_t header_length = encodeCompact(string_length + 1, dst);\n  dst.add(arg.c_str(), string_length);\n  return header_length + string_length;\n}\n\n/**\n * Template overload for NullableString.\n * Encode string as VAR_UINT + N bytes, or VAR_UINT 0 for null value.\n */\ntemplate <>\ninline uint32_t EncodingContext::encodeCompact(const NullableString& arg, Buffer::Instance& dst) {\n  if (arg.has_value()) {\n    return encodeCompact(*arg, dst);\n  } else {\n    const uint32_t len = 0;\n    return encodeCompact(len, dst);\n  }\n}\n\n/**\n * Template overload for Bytes.\n * Encode byte array as VAR_UINT + N bytes.\n */\ntemplate <>\ninline uint32_t EncodingContext::encodeCompact(const Bytes& arg, Buffer::Instance& dst) {\n  const uint32_t data_length = arg.size();\n  const uint32_t header_length = encodeCompact(data_length + 1, dst);\n  dst.add(arg.data(), data_length);\n  return header_length + data_length;\n}\n\n/**\n * Encode object array of T as VAR_UINT + N elements.\n * Each element of type T then serializes itself on its own.\n */\ntemplate <typename T>\nuint32_t EncodingContext::encodeCompact(const std::vector<T>& arg, Buffer::Instance& dst) {\n  const NullableArray<T> wrapped = {arg};\n  return encodeCompact(wrapped, dst);\n}\n\n/**\n * Encode nullable object array of T as VAR_UINT + N elements, or VAR_UINT 0 for null value.\n * Each element of type T then serializes itself on its own.\n */\ntemplate <typename T>\nuint32_t EncodingContext::encodeCompact(const NullableArray<T>& arg, Buffer::Instance& dst) {\n  if (arg.has_value()) {\n    const uint32_t len = arg->size() + 1;\n    const uint32_t header_length = encodeCompact(len, dst);\n    uint32_t written{0};\n    for (const T& el : *arg) {\n      written += encodeCompact(el, dst);\n    }\n    return header_length + written;\n  } else {\n    const uint32_t len = 0;\n    return encodeCompact(len, dst);\n  }\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/kafka/tagged_fields.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"extensions/filters/network/kafka/serialization.h\"\n\n/**\n * This header file provides serialization support for tagged fields structure added in 2.4.\n * https://github.com/apache/kafka/blob/2.4.0/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java\n *\n * Impl note: contrary to other compact data structures, data in tagged field does not have +1 in\n * data length.\n */\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Simple data-holding structure.\n */\nstruct TaggedField {\n\n  uint32_t tag_;\n  std::vector<unsigned char> data_;\n\n  uint32_t computeCompactSize(const EncodingContext& encoder) const {\n    uint32_t result{0};\n    result += encoder.computeCompactSize(tag_);\n    result += encoder.computeCompactSize(static_cast<uint32_t>(data_.size()));\n    result += data_.size();\n    return result;\n  }\n\n  uint32_t encodeCompact(Buffer::Instance& dst, EncodingContext& encoder) const {\n    uint32_t written{0};\n    written += encoder.encodeCompact(tag_, dst);\n    written += encoder.encodeCompact(static_cast<uint32_t>(data_.size()), dst);\n    dst.add(data_.data(), data_.size());\n    written += data_.size();\n    return written;\n  }\n\n  bool operator==(const TaggedField& rhs) const { return tag_ == rhs.tag_ && data_ == rhs.data_; }\n};\n\n/**\n * Deserializer responsible for extracting a TaggedField from data provided.\n */\nclass TaggedFieldDeserializer : public Deserializer<TaggedField> {\npublic:\n  TaggedFieldDeserializer() = default;\n\n  uint32_t feed(absl::string_view& data) override {\n    uint32_t consumed = 0;\n    consumed += tag_deserializer_.feed(data);\n    consumed += length_deserializer_.feed(data);\n\n    if (!length_deserializer_.ready()) {\n      return consumed;\n    }\n\n    if (!length_consumed_) {\n      required_ = length_deserializer_.get();\n      data_buffer_ = std::vector<unsigned char>(required_);\n      length_consumed_ = true;\n    }\n\n    const uint32_t data_consumed = std::min<uint32_t>(required_, data.size());\n    const uint32_t written = data_buffer_.size() - required_;\n    if (data_consumed > 0) {\n      memcpy(data_buffer_.data() + written, data.data(), data_consumed);\n      required_ -= data_consumed;\n      data = {data.data() + data_consumed, data.size() - data_consumed};\n    }\n\n    if (required_ == 0) {\n      ready_ = true;\n    }\n\n    return consumed + data_consumed;\n  };\n\n  bool ready() const override { return ready_; };\n\n  TaggedField get() const override { return {tag_deserializer_.get(), data_buffer_}; };\n\nprivate:\n  VarUInt32Deserializer tag_deserializer_;\n  VarUInt32Deserializer length_deserializer_;\n  bool length_consumed_{false};\n  uint32_t required_;\n  std::vector<unsigned char> data_buffer_;\n  bool ready_{false};\n};\n\n/**\n * Aggregate of multiple TaggedField objects.\n */\nstruct TaggedFields {\n\n  std::vector<TaggedField> fields_;\n\n  uint32_t computeCompactSize(const EncodingContext& encoder) const {\n    uint32_t result{0};\n    result += encoder.computeCompactSize(static_cast<uint32_t>(fields_.size()));\n    for (const TaggedField& tagged_field : fields_) {\n      result += tagged_field.computeCompactSize(encoder);\n    }\n    return result;\n  }\n\n  uint32_t encodeCompact(Buffer::Instance& dst, EncodingContext& encoder) const {\n    uint32_t written{0};\n    written += encoder.encodeCompact(static_cast<uint32_t>(fields_.size()), dst);\n    for (const TaggedField& tagged_field : fields_) {\n      written += tagged_field.encodeCompact(dst, encoder);\n    }\n    return written;\n  }\n\n  bool operator==(const TaggedFields& rhs) const { return fields_ == rhs.fields_; }\n};\n\n/**\n * Deserializer responsible for extracting tagged fields from data provided.\n */\nclass TaggedFieldsDeserializer : public Deserializer<TaggedFields> {\npublic:\n  uint32_t feed(absl::string_view& data) override {\n\n    const uint32_t count_consumed = count_deserializer_.feed(data);\n    if (!count_deserializer_.ready()) {\n      return count_consumed;\n    }\n\n    if (!children_setup_) {\n      const uint32_t field_count = count_deserializer_.get();\n      children_ = std::vector<TaggedFieldDeserializer>(field_count);\n      children_setup_ = true;\n    }\n\n    if (ready_) {\n      return count_consumed;\n    }\n\n    uint32_t child_consumed{0};\n    for (TaggedFieldDeserializer& child : children_) {\n      child_consumed += child.feed(data);\n    }\n\n    bool children_ready_ = true;\n    for (TaggedFieldDeserializer& child : children_) {\n      children_ready_ &= child.ready();\n    }\n    ready_ = children_ready_;\n\n    return count_consumed + child_consumed;\n  };\n\n  bool ready() const override { return ready_; };\n\n  TaggedFields get() const override {\n    std::vector<TaggedField> fields{};\n    fields.reserve(children_.size());\n    for (const TaggedFieldDeserializer& child : children_) {\n      const TaggedField child_result = child.get();\n      fields.push_back(child_result);\n    }\n    return {fields};\n  };\n\nprivate:\n  VarUInt32Deserializer count_deserializer_;\n  std::vector<TaggedFieldDeserializer> children_;\n\n  bool children_setup_ = false;\n  bool ready_ = false;\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/local_ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Local ratelimit L4 network filter\n# Public docs: docs/root/configuration/network_filters/local_rate_limit_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"local_ratelimit_lib\",\n    srcs = [\"local_ratelimit.cc\"],\n    hdrs = [\"local_ratelimit.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/filters/common/local_ratelimit:local_ratelimit_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/local_ratelimit:local_ratelimit_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/local_ratelimit/config.cc",
    "content": "#include \"extensions/filters/network/local_ratelimit/config.h\"\n\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/network/local_ratelimit/local_ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace LocalRateLimitFilter {\n\nNetwork::FilterFactoryCb LocalRateLimitConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  ConfigSharedPtr filter_config(\n      new Config(proto_config, context.dispatcher(), context.scope(), context.runtime()));\n  return [filter_config](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<Filter>(filter_config));\n  };\n}\n\n/**\n * Static registration for the local rate limit filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(LocalRateLimitConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace LocalRateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/local_ratelimit/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace LocalRateLimitFilter {\n\n/**\n * Config registration for the local rate limit filter. @see NamedNetworkFilterConfigFactory.\n */\nclass LocalRateLimitConfigFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit> {\npublic:\n  LocalRateLimitConfigFactory() : FactoryBase(NetworkFilterNames::get().LocalRateLimit) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace LocalRateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/local_ratelimit/local_ratelimit.cc",
    "content": "#include \"extensions/filters/network/local_ratelimit/local_ratelimit.h\"\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace LocalRateLimitFilter {\n\nConfig::Config(\n    const envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& proto_config,\n    Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::Loader& runtime)\n    : rate_limiter_(Filters::Common::LocalRateLimit::LocalRateLimiterImpl(\n          std::chrono::milliseconds(\n              PROTOBUF_GET_MS_REQUIRED(proto_config.token_bucket(), fill_interval)),\n          proto_config.token_bucket().max_tokens(),\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config.token_bucket(), tokens_per_fill, 1),\n          dispatcher)),\n      enabled_(proto_config.runtime_enabled(), runtime),\n      stats_(generateStats(proto_config.stat_prefix(), scope)) {}\n\nLocalRateLimitStats Config::generateStats(const std::string& prefix, Stats::Scope& scope) {\n  const std::string final_prefix = \"local_rate_limit.\" + prefix;\n  return {ALL_LOCAL_RATE_LIMIT_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n}\n\nbool Config::canCreateConnection() { return rate_limiter_.requestAllowed(); }\n\nNetwork::FilterStatus Filter::onNewConnection() {\n  if (!config_->enabled()) {\n    ENVOY_CONN_LOG(trace, \"local_rate_limit: runtime disabled\", read_callbacks_->connection());\n    return Network::FilterStatus::Continue;\n  }\n\n  if (!config_->canCreateConnection()) {\n    config_->stats().rate_limited_.inc();\n    ENVOY_CONN_LOG(trace, \"local_rate_limit: rate limiting connection\",\n                   read_callbacks_->connection());\n    read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    return Network::FilterStatus::StopIteration;\n  }\n\n  return Network::FilterStatus::Continue;\n}\n\n} // namespace LocalRateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/local_ratelimit/local_ratelimit.h",
    "content": "#pragma once\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/thread_synchronizer.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"extensions/filters/common/local_ratelimit/local_ratelimit_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace LocalRateLimitFilter {\n\n/**\n * All local rate limit stats. @see stats_macros.h\n */\n#define ALL_LOCAL_RATE_LIMIT_STATS(COUNTER) COUNTER(rate_limited)\n\n/**\n * Struct definition for all local rate limit stats. @see stats_macros.h\n */\nstruct LocalRateLimitStats {\n  ALL_LOCAL_RATE_LIMIT_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration shared across all connections. Must be thread safe.\n */\nclass Config : Logger::Loggable<Logger::Id::filter> {\npublic:\n  Config(\n      const envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& proto_config,\n      Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::Loader& runtime);\n\n  bool canCreateConnection();\n  bool enabled() { return enabled_.enabled(); }\n  LocalRateLimitStats& stats() { return stats_; }\n\nprivate:\n  static LocalRateLimitStats generateStats(const std::string& prefix, Stats::Scope& scope);\n  void onFillTimer();\n\n  Filters::Common::LocalRateLimit::LocalRateLimiterImpl rate_limiter_;\n  Runtime::FeatureFlag enabled_;\n  LocalRateLimitStats stats_;\n\n  friend class LocalRateLimitTestBase;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * Per-connection local rate limit filter.\n */\nclass Filter : public Network::ReadFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  Filter(const ConfigSharedPtr& config) : config_(config) {}\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance&, bool) override {\n    return Network::FilterStatus::Continue;\n  }\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& read_callbacks) override {\n    read_callbacks_ = &read_callbacks;\n  }\n\nprivate:\n  const ConfigSharedPtr config_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n};\n\n} // namespace LocalRateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Mongo proxy L4 network filter (observability and fault injection).\n# Public docs: docs/root/configuration/network_filters/mongo_proxy_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"bson_interface\",\n    hdrs = [\"bson.h\"],\n    deps = [\"//include/envoy/buffer:buffer_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"codec_interface\",\n    hdrs = [\"codec.h\"],\n    deps = [\":bson_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"bson_lib\",\n    srcs = [\"bson_impl.cc\"],\n    hdrs = [\"bson_impl.h\"],\n    deps = [\n        \":bson_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:byte_order_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codec_lib\",\n    srcs = [\"codec_impl.cc\"],\n    hdrs = [\"codec_impl.h\"],\n    deps = [\n        \":bson_lib\",\n        \":codec_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"proxy_lib\",\n    srcs = [\"proxy.cc\"],\n    hdrs = [\"proxy.h\"],\n    deps = [\n        \":codec_interface\",\n        \":codec_lib\",\n        \":mongo_stats_lib\",\n        \":utility_lib\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:drain_decision_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/extensions/filters/common/fault:fault_config_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"mongo_stats_lib\",\n    srcs = [\"mongo_stats.cc\"],\n    hdrs = [\"mongo_stats.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    deps = [\n        \":codec_interface\",\n        \"//source/common/json:json_loader_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \":proxy_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/mongo_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/bson.h",
    "content": "#pragma once\n\n#include <array>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\nnamespace Bson {\n\n/**\n * Implementation of http://bsonspec.org/spec.html\n */\nclass Document;\nusing DocumentSharedPtr = std::shared_ptr<Document>;\n\n/**\n * A BSON document field. This is essentially a variably typed parameter that can be \"cast\" to\n * the correct type via the as*() functions.\n */\nclass Field {\npublic:\n  /**\n   * Raw field type.\n   */\n  enum class Type : uint8_t {\n    Double = 0x01,\n    String = 0x02,\n    Document = 0x03,\n    Array = 0x04,\n    Binary = 0x05,\n    ObjectId = 0x07,\n    Boolean = 0x08,\n    Datetime = 0x09,\n    NullValue = 0x0A,\n    Regex = 0x0B,\n    Symbol = 0x0E,\n    Int32 = 0x10,\n    Timestamp = 0x11,\n    Int64 = 0x12\n  };\n\n  /**\n   * 12 byte ObjectId type.\n   */\n  using ObjectId = std::array<uint8_t, 12>;\n\n  /**\n   * Regex type.\n   */\n  struct Regex {\n    bool operator==(const Regex& rhs) const {\n      return pattern_ == rhs.pattern_ && options_ == rhs.options_;\n    }\n\n    std::string pattern_;\n    std::string options_;\n  };\n\n  virtual ~Field() = default;\n\n  virtual double asDouble() const PURE;\n  virtual const std::string& asString() const PURE;\n  virtual const std::string& asSymbol() const PURE;\n  virtual const Document& asDocument() const PURE;\n  virtual const Document& asArray() const PURE;\n  virtual const std::string& asBinary() const PURE;\n  virtual const ObjectId& asObjectId() const PURE;\n  virtual bool asBoolean() const PURE;\n  virtual int64_t asDatetime() const PURE;\n  virtual const Regex& asRegex() const PURE;\n  virtual int32_t asInt32() const PURE;\n  virtual int64_t asTimestamp() const PURE;\n  virtual int64_t asInt64() const PURE;\n\n  virtual int32_t byteSize() const PURE;\n  virtual void encode(Buffer::Instance& output) const PURE;\n  virtual const std::string& key() const PURE;\n  virtual bool operator==(const Field& rhs) const PURE;\n  virtual std::string toString() const PURE;\n  virtual Type type() const PURE;\n};\n\nusing FieldPtr = std::unique_ptr<Field>;\n\n/**\n * A BSON document. add*() is used to add strongly typed fields.\n */\nclass Document {\npublic:\n  virtual ~Document() = default;\n\n  virtual DocumentSharedPtr addDouble(const std::string& key, double value) PURE;\n  virtual DocumentSharedPtr addString(const std::string& key, std::string&& value) PURE;\n  virtual DocumentSharedPtr addSymbol(const std::string& key, std::string&& value) PURE;\n  virtual DocumentSharedPtr addDocument(const std::string& key, DocumentSharedPtr value) PURE;\n  virtual DocumentSharedPtr addArray(const std::string& key, DocumentSharedPtr value) PURE;\n  virtual DocumentSharedPtr addBinary(const std::string& key, std::string&& value) PURE;\n  virtual DocumentSharedPtr addObjectId(const std::string& key, Field::ObjectId&& value) PURE;\n  virtual DocumentSharedPtr addBoolean(const std::string& key, bool value) PURE;\n  virtual DocumentSharedPtr addDatetime(const std::string& key, int64_t value) PURE;\n  virtual DocumentSharedPtr addNull(const std::string& key) PURE;\n  virtual DocumentSharedPtr addRegex(const std::string& key, Field::Regex&& value) PURE;\n  virtual DocumentSharedPtr addInt32(const std::string& key, int32_t value) PURE;\n  virtual DocumentSharedPtr addTimestamp(const std::string& key, int64_t value) PURE;\n  virtual DocumentSharedPtr addInt64(const std::string& key, int64_t value) PURE;\n\n  virtual bool operator==(const Document& rhs) const PURE;\n  virtual int32_t byteSize() const PURE;\n  virtual void encode(Buffer::Instance& output) const PURE;\n  virtual const Field* find(const std::string& name) const PURE;\n  virtual const Field* find(const std::string& name, Field::Type type) const PURE;\n  virtual std::string toString() const PURE;\n  virtual const std::list<FieldPtr>& values() const PURE;\n};\n\n} // namespace Bson\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/bson_impl.cc",
    "content": "#include \"extensions/filters/network/mongo_proxy/bson_impl.h\"\n\n#include <cstdint>\n#include <sstream>\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/byte_order.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hex.h\"\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\nnamespace Bson {\n\nint32_t BufferHelper::peekInt32(Buffer::Instance& data) {\n  if (data.length() < sizeof(int32_t)) {\n    throw EnvoyException(\"invalid buffer size\");\n  }\n\n  int32_t val;\n  void* mem = data.linearize(sizeof(int32_t));\n  std::memcpy(reinterpret_cast<void*>(&val), mem, sizeof(int32_t));\n  return le32toh(val);\n}\n\nuint8_t BufferHelper::removeByte(Buffer::Instance& data) {\n  if (data.length() == 0) {\n    throw EnvoyException(\"invalid buffer size\");\n  }\n\n  void* mem = data.linearize(sizeof(uint8_t));\n  uint8_t ret = *reinterpret_cast<uint8_t*>(mem);\n  data.drain(sizeof(uint8_t));\n  return ret;\n}\n\nvoid BufferHelper::removeBytes(Buffer::Instance& data, uint8_t* out, size_t out_len) {\n  if (data.length() < out_len) {\n    throw EnvoyException(\"invalid buffer size\");\n  }\n\n  void* mem = data.linearize(out_len);\n  std::memcpy(out, mem, out_len);\n  data.drain(out_len);\n}\n\nstd::string BufferHelper::removeCString(Buffer::Instance& data) {\n  char end = '\\0';\n  ssize_t index = data.search(&end, sizeof(end), 0);\n  if (index == -1) {\n    throw EnvoyException(\"invalid CString\");\n  }\n\n  char* start = reinterpret_cast<char*>(data.linearize(index + 1));\n  std::string ret(start);\n  data.drain(index + 1);\n  return ret;\n}\n\ndouble BufferHelper::removeDouble(Buffer::Instance& data) {\n  ASSERT(sizeof(double) == 8);\n\n  // There is not really official endian support for floating point so we unpack an 8 byte integer\n  // into a union with a double.\n  union {\n    int64_t i;\n    double d;\n  } memory;\n\n  static_assert(sizeof(memory.i) == sizeof(memory.d), \"invalid type size\");\n  memory.i = removeInt64(data);\n  return memory.d;\n}\n\nint32_t BufferHelper::removeInt32(Buffer::Instance& data) {\n  int32_t ret = peekInt32(data);\n  data.drain(sizeof(int32_t));\n  return ret;\n}\n\nint64_t BufferHelper::removeInt64(Buffer::Instance& data) {\n  if (data.length() < sizeof(int64_t)) {\n    throw EnvoyException(\"invalid buffer size\");\n  }\n\n  int64_t val;\n  void* mem = data.linearize(sizeof(int64_t));\n  std::memcpy(reinterpret_cast<void*>(&val), mem, sizeof(int64_t));\n  data.drain(sizeof(int64_t));\n  return le64toh(val);\n}\n\nstd::string BufferHelper::removeString(Buffer::Instance& data) {\n  int32_t length = removeInt32(data);\n  if (static_cast<uint32_t>(length) > data.length()) {\n    throw EnvoyException(\"invalid buffer size\");\n  }\n\n  char* start = reinterpret_cast<char*>(data.linearize(length));\n  std::string ret(start);\n  data.drain(length);\n  return ret;\n}\n\nstd::string BufferHelper::removeBinary(Buffer::Instance& data) {\n  // Read out the subtype but do not store it for now.\n  int32_t length = removeInt32(data);\n  removeByte(data);\n  if (static_cast<uint32_t>(length) > data.length()) {\n    throw EnvoyException(\"invalid buffer size\");\n  }\n\n  char* start = reinterpret_cast<char*>(data.linearize(length));\n  std::string ret(start, length);\n  data.drain(length);\n  return ret;\n}\n\nvoid BufferHelper::writeCString(Buffer::Instance& data, const std::string& value) {\n  data.add(value.c_str(), value.size() + 1);\n}\n\nvoid BufferHelper::writeDouble(Buffer::Instance& data, double value) {\n  // We need to hack converting a double into little endian.\n  int64_t* to_write = reinterpret_cast<int64_t*>(&value);\n  writeInt64(data, *to_write);\n}\n\nvoid BufferHelper::writeInt32(Buffer::Instance& data, int32_t value) {\n  value = htole32(value);\n  data.add(&value, sizeof(value));\n}\n\nvoid BufferHelper::writeInt64(Buffer::Instance& data, int64_t value) {\n  value = htole64(value);\n  data.add(&value, sizeof(value));\n}\n\nvoid BufferHelper::writeString(Buffer::Instance& data, const std::string& value) {\n  writeInt32(data, value.size() + 1);\n  data.add(value.c_str(), value.size() + 1);\n}\n\nvoid BufferHelper::writeBinary(Buffer::Instance& data, const std::string& value) {\n  // Right now we do not actually store the binary subtype and always use zero.\n  writeInt32(data, value.size());\n  uint8_t subtype = 0;\n  data.add(&subtype, sizeof(subtype));\n  data.add(value.c_str(), value.size());\n}\n\nint32_t FieldImpl::byteSize() const {\n  // 1 byte type, cstring key, field.\n  int32_t total = 1 + key_.size() + 1;\n\n  switch (type_) {\n  case Type::Double:\n  case Type::Datetime:\n  case Type::Timestamp:\n  case Type::Int64: {\n    return total + 8;\n  }\n\n  case Type::String:\n  case Type::Symbol: {\n    return total + 4 + value_.string_value_.size() + 1;\n  }\n\n  case Type::Document:\n  case Type::Array: {\n    return total + value_.document_value_->byteSize();\n  }\n\n  case Type::Binary: {\n    return total + 5 + value_.string_value_.size();\n  }\n\n  case Type::ObjectId: {\n    return total + sizeof(ObjectId);\n  }\n\n  case Type::Boolean: {\n    return total + 1;\n  }\n\n  case Type::NullValue: {\n    return total;\n  }\n\n  case Type::Regex: {\n    return total + value_.regex_value_.pattern_.size() + value_.regex_value_.options_.size() + 2;\n  }\n\n  case Type::Int32: {\n    return total + 4;\n  }\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid FieldImpl::encode(Buffer::Instance& output) const {\n  output.add(&type_, sizeof(type_));\n  BufferHelper::writeCString(output, key_);\n\n  switch (type_) {\n  case Type::Double: {\n    return BufferHelper::writeDouble(output, value_.double_value_);\n  }\n\n  case Type::String:\n  case Type::Symbol: {\n    return BufferHelper::writeString(output, value_.string_value_);\n  }\n\n  case Type::Document:\n  case Type::Array: {\n    return value_.document_value_->encode(output);\n  }\n\n  case Type::Binary: {\n    return BufferHelper::writeBinary(output, value_.string_value_);\n  }\n\n  case Type::ObjectId: {\n    return output.add(&value_.object_id_value_[0], value_.object_id_value_.size());\n  }\n\n  case Type::Boolean: {\n    uint8_t to_write = value_.bool_value_ ? 1 : 0;\n    return output.add(&to_write, sizeof(to_write));\n  }\n\n  case Type::Datetime:\n  case Type::Timestamp:\n  case Type::Int64: {\n    return BufferHelper::writeInt64(output, value_.int64_value_);\n  }\n\n  case Type::NullValue: {\n    return;\n  }\n\n  case Type::Regex: {\n    BufferHelper::writeCString(output, value_.regex_value_.pattern_);\n    return BufferHelper::writeCString(output, value_.regex_value_.options_);\n  }\n\n  case Type::Int32:\n    return BufferHelper::writeInt32(output, value_.int32_value_);\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nbool FieldImpl::operator==(const Field& rhs) const {\n  if (type() != rhs.type()) {\n    return false;\n  }\n\n  switch (type_) {\n  case Type::Double: {\n    return asDouble() == rhs.asDouble();\n  }\n\n  case Type::String: {\n    return asString() == rhs.asString();\n  }\n\n  case Type::Symbol: {\n    return asSymbol() == rhs.asSymbol();\n  }\n\n  case Type::Document: {\n    return asDocument() == rhs.asDocument();\n  }\n\n  case Type::Array: {\n    return asArray() == rhs.asArray();\n  }\n\n  case Type::Binary: {\n    return asBinary() == rhs.asBinary();\n  }\n\n  case Type::ObjectId: {\n    return asObjectId() == rhs.asObjectId();\n  }\n\n  case Type::Boolean: {\n    return asBoolean() == rhs.asBoolean();\n  }\n\n  case Type::NullValue: {\n    return true;\n  }\n\n  case Type::Regex: {\n    return asRegex() == rhs.asRegex();\n  }\n\n  case Type::Int32: {\n    return asInt32() == rhs.asInt32();\n  }\n\n  case Type::Datetime: {\n    return asDatetime() == rhs.asDatetime();\n  }\n\n  case Type::Timestamp: {\n    return asTimestamp() == rhs.asTimestamp();\n  }\n\n  case Type::Int64: {\n    return asInt64() == rhs.asInt64();\n  }\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nstd::string FieldImpl::toString() const {\n  switch (type_) {\n  case Type::Double: {\n    return std::to_string(value_.double_value_);\n  }\n\n  case Type::String:\n  case Type::Symbol:\n  case Type::Binary: {\n    return fmt::format(\"\\\"{}\\\"\", StringUtil::escape(value_.string_value_));\n  }\n\n  case Type::Document:\n  case Type::Array: {\n    return value_.document_value_->toString();\n  }\n\n  case Type::ObjectId: {\n    return fmt::format(\"\\\"{}\\\"\",\n                       Hex::encode(&value_.object_id_value_[0], value_.object_id_value_.size()));\n  }\n\n  case Type::Boolean: {\n    return value_.bool_value_ ? \"true\" : \"false\";\n  }\n\n  case Type::NullValue: {\n    return \"null\";\n  }\n\n  case Type::Regex: {\n    return fmt::format(\"[\\\"{}\\\", \\\"{}\\\"]\", value_.regex_value_.pattern_,\n                       value_.regex_value_.options_);\n  }\n\n  case Type::Int32: {\n    return std::to_string(value_.int32_value_);\n  }\n\n  case Type::Datetime:\n  case Type::Timestamp:\n  case Type::Int64: {\n    return std::to_string(value_.int64_value_);\n  }\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid DocumentImpl::fromBuffer(Buffer::Instance& data) {\n  uint64_t original_buffer_length = data.length();\n  int32_t message_length = BufferHelper::removeInt32(data);\n  if (static_cast<uint64_t>(message_length) > original_buffer_length) {\n    throw EnvoyException(\"invalid BSON message length\");\n  }\n\n  ENVOY_LOG(trace, \"BSON document length: {} data length: {}\", message_length,\n            original_buffer_length);\n\n  while (true) {\n    uint64_t document_bytes_remaining = data.length() - (original_buffer_length - message_length);\n    ENVOY_LOG(trace, \"BSON document bytes remaining: {}\", document_bytes_remaining);\n    if (document_bytes_remaining == 1) {\n      uint8_t last_byte = BufferHelper::removeByte(data);\n      if (last_byte != 0) {\n        throw EnvoyException(\"invalid document\");\n      }\n\n      return;\n    }\n\n    uint8_t element_type = BufferHelper::removeByte(data);\n    std::string key = BufferHelper::removeCString(data);\n    ENVOY_LOG(trace, \"BSON element type: {:#x} key: {}\", element_type, key);\n    switch (static_cast<Field::Type>(element_type)) {\n    case Field::Type::Double: {\n      double value = BufferHelper::removeDouble(data);\n      ENVOY_LOG(trace, \"BSON double: {}\", value);\n      addDouble(key, value);\n      break;\n    }\n\n    case Field::Type::String: {\n      std::string value = BufferHelper::removeString(data);\n      ENVOY_LOG(trace, \"BSON string: {}\", value);\n      addString(key, std::move(value));\n      break;\n    }\n\n    case Field::Type::Symbol: {\n      std::string value = BufferHelper::removeString(data);\n      ENVOY_LOG(trace, \"BSON symbol: {}\", value);\n      addSymbol(key, std::move(value));\n      break;\n    }\n\n    case Field::Type::Document: {\n      ENVOY_LOG(trace, \"BSON document\");\n      addDocument(key, DocumentImpl::create(data));\n      break;\n    }\n\n    case Field::Type::Array: {\n      ENVOY_LOG(trace, \"BSON array\");\n      addArray(key, DocumentImpl::create(data));\n      break;\n    }\n\n    case Field::Type::Binary: {\n      std::string value = BufferHelper::removeBinary(data);\n      ENVOY_LOG(trace, \"BSON binary: {}\", value);\n      addBinary(key, std::move(value));\n      break;\n    }\n\n    case Field::Type::ObjectId: {\n      Field::ObjectId value;\n      BufferHelper::removeBytes(data, &value[0], value.size());\n      addObjectId(key, std::move(value));\n      break;\n    }\n\n    case Field::Type::Boolean: {\n      const bool value = BufferHelper::removeByte(data) != 0;\n      ENVOY_LOG(trace, \"BSON boolean: {}\", value);\n      addBoolean(key, value);\n      break;\n    }\n\n    case Field::Type::Datetime: {\n      const int64_t value = BufferHelper::removeInt64(data);\n      ENVOY_LOG(trace, \"BSON datetime: {}\", value);\n      addDatetime(key, value);\n      break;\n    }\n\n    case Field::Type::NullValue: {\n      ENVOY_LOG(trace, \"BSON null value\");\n      addNull(key);\n      break;\n    }\n\n    case Field::Type::Regex: {\n      Field::Regex value;\n      value.pattern_ = BufferHelper::removeCString(data);\n      value.options_ = BufferHelper::removeCString(data);\n      ENVOY_LOG(trace, \"BSON regex pattern: {} options: {}\", value.pattern_, value.options_);\n      addRegex(key, std::move(value));\n      break;\n    }\n\n    case Field::Type::Int32: {\n      int32_t value = BufferHelper::removeInt32(data);\n      ENVOY_LOG(trace, \"BSON int32: {}\", value);\n      addInt32(key, value);\n      break;\n    }\n\n    case Field::Type::Timestamp: {\n      int64_t value = BufferHelper::removeInt64(data);\n      ENVOY_LOG(trace, \"BSON timestamp: {}\", value);\n      addTimestamp(key, value);\n      break;\n    }\n\n    case Field::Type::Int64: {\n      int64_t value = BufferHelper::removeInt64(data);\n      ENVOY_LOG(trace, \"BSON int64: {}\", value);\n      addInt64(key, value);\n      break;\n    }\n\n    default:\n      throw EnvoyException(\n          fmt::format(\"invalid BSON element type: {:#x} key: {}\", element_type, key));\n    }\n  }\n}\n\nint32_t DocumentImpl::byteSize() const {\n  // Minimum size is 5.\n  int32_t total_size = sizeof(int32_t) + 1;\n  for (const FieldPtr& field : fields_) {\n    total_size += field->byteSize();\n  }\n\n  return total_size;\n}\n\nvoid DocumentImpl::encode(Buffer::Instance& output) const {\n  BufferHelper::writeInt32(output, byteSize());\n  for (const FieldPtr& field : fields_) {\n    field->encode(output);\n  }\n\n  uint8_t done = 0;\n  output.add(&done, sizeof(done));\n}\n\nbool DocumentImpl::operator==(const Document& rhs) const {\n  if (values().size() != rhs.values().size()) {\n    return false;\n  }\n\n  for (auto i1 = values().begin(), i2 = rhs.values().begin(); i1 != values().end(); i1++, i2++) {\n    if (**i1 == **i2) {\n      continue;\n    }\n\n    return false;\n  }\n\n  return true;\n}\n\nstd::string DocumentImpl::toString() const {\n  std::stringstream out;\n  out << \"{\";\n\n  bool first = true;\n  for (const FieldPtr& field : fields_) {\n    if (!first) {\n      out << \", \";\n    }\n\n    out << fmt::format(\"\\\"{}\\\": {}\", field->key(), field->toString());\n    first = false;\n  }\n\n  out << \"}\";\n  return out.str();\n}\n\nconst Field* DocumentImpl::find(const std::string& name) const {\n  for (const FieldPtr& field : fields_) {\n    if (field->key() == name) {\n      return field.get();\n    }\n  }\n\n  return nullptr;\n}\n\nconst Field* DocumentImpl::find(const std::string& name, Field::Type type) const {\n  for (const FieldPtr& field : fields_) {\n    if (field->key() == name && field->type() == type) {\n      return field.get();\n    }\n  }\n\n  return nullptr;\n}\n\n} // namespace Bson\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/bson_impl.h",
    "content": "#pragma once\n\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n\n#include \"extensions/filters/network/mongo_proxy/bson.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\nnamespace Bson {\n\n/**\n * IO helpers for reading/writing BSON data from/to a buffer.\n */\nclass BufferHelper {\npublic:\n  static int32_t peekInt32(Buffer::Instance& data);\n  static uint8_t removeByte(Buffer::Instance& data);\n  static void removeBytes(Buffer::Instance& data, uint8_t* out, size_t out_len);\n  static std::string removeCString(Buffer::Instance& data);\n  static double removeDouble(Buffer::Instance& data);\n  static int32_t removeInt32(Buffer::Instance& data);\n  static int64_t removeInt64(Buffer::Instance& data);\n  static std::string removeString(Buffer::Instance& data);\n  static std::string removeBinary(Buffer::Instance& data);\n  static void writeCString(Buffer::Instance& data, const std::string& value);\n  static void writeInt32(Buffer::Instance& data, int32_t value);\n  static void writeInt64(Buffer::Instance& data, int64_t value);\n  static void writeDouble(Buffer::Instance& data, double value);\n  static void writeString(Buffer::Instance& data, const std::string& value);\n  static void writeBinary(Buffer::Instance& data, const std::string& value);\n};\n\nclass FieldImpl : public Field {\npublic:\n  explicit FieldImpl(const std::string& key, double value) : type_(Type::Double), key_(key) {\n    value_.double_value_ = value;\n  }\n\n  explicit FieldImpl(Type type, const std::string& key, std::string&& value)\n      : type_(type), key_(key) {\n    value_.string_value_ = std::move(value);\n  }\n\n  explicit FieldImpl(Type type, const std::string& key, DocumentSharedPtr value)\n      : type_(type), key_(key) {\n    value_.document_value_ = value;\n  }\n\n  explicit FieldImpl(const std::string& key, ObjectId&& value) : type_(Type::ObjectId), key_(key) {\n    value_.object_id_value_ = std::move(value);\n  }\n\n  explicit FieldImpl(const std::string& key, bool value) : type_(Type::Boolean), key_(key) {\n    value_.bool_value_ = value;\n  }\n\n  explicit FieldImpl(Type type, const std::string& key, int64_t value) : type_(type), key_(key) {\n    value_.int64_value_ = value;\n  }\n\n  explicit FieldImpl(const std::string& key) : type_(Type::NullValue), key_(key) {}\n\n  explicit FieldImpl(const std::string& key, Regex&& value) : type_(Type::Regex), key_(key) {\n    value_.regex_value_ = std::move(value);\n  }\n\n  explicit FieldImpl(const std::string& key, int32_t value) : type_(Type::Int32), key_(key) {\n    value_.int32_value_ = value;\n  }\n\n  // Bson::Field\n  double asDouble() const override {\n    checkType(Type::Double);\n    return value_.double_value_;\n  }\n\n  const std::string& asString() const override {\n    checkType(Type::String);\n    return value_.string_value_;\n  }\n\n  const std::string& asSymbol() const override {\n    checkType(Type::Symbol);\n    return value_.string_value_;\n  }\n\n  const Document& asDocument() const override {\n    checkType(Type::Document);\n    return *value_.document_value_;\n  }\n\n  const Document& asArray() const override {\n    checkType(Type::Array);\n    return *value_.document_value_;\n  }\n\n  const std::string& asBinary() const override {\n    checkType(Type::Binary);\n    return value_.string_value_;\n  }\n\n  const ObjectId& asObjectId() const override {\n    checkType(Type::ObjectId);\n    return value_.object_id_value_;\n  }\n\n  bool asBoolean() const override {\n    checkType(Type::Boolean);\n    return value_.bool_value_;\n  }\n\n  int64_t asDatetime() const override {\n    checkType(Type::Datetime);\n    return value_.int64_value_;\n  }\n\n  const Regex& asRegex() const override {\n    checkType(Type::Regex);\n    return value_.regex_value_;\n  }\n\n  int32_t asInt32() const override {\n    checkType(Type::Int32);\n    return value_.int32_value_;\n  }\n\n  int64_t asTimestamp() const override {\n    checkType(Type::Timestamp);\n    return value_.int64_value_;\n  }\n\n  int64_t asInt64() const override {\n    checkType(Type::Int64);\n    return value_.int64_value_;\n  }\n\n  int32_t byteSize() const override;\n  void encode(Buffer::Instance& output) const override;\n  const std::string& key() const override { return key_; }\n  bool operator==(const Field& rhs) const override;\n  std::string toString() const override;\n  Type type() const override { return type_; }\n\nprivate:\n  void checkType(Type type) const {\n    if (type_ != type) {\n      ExceptionUtil::throwEnvoyException(\"invalid BSON field type cast\");\n    }\n  }\n\n  /**\n   * All of the possible variadic values that a field can be.\n   * TODO(mattklein123): Make this a C++11 union to save a little space and time.\n   */\n  struct Value {\n    double double_value_;\n    std::string string_value_;\n    DocumentSharedPtr document_value_;\n    Field::ObjectId object_id_value_;\n    bool bool_value_;\n    int32_t int32_value_;\n    int64_t int64_value_;\n    Regex regex_value_;\n  };\n\n  Field::Type type_;\n  std::string key_;\n  Value value_;\n};\n\nclass DocumentImpl : public Document,\n                     Logger::Loggable<Logger::Id::mongo>,\n                     public std::enable_shared_from_this<DocumentImpl> {\npublic:\n  static DocumentSharedPtr create() { return DocumentSharedPtr{new DocumentImpl()}; }\n  static DocumentSharedPtr create(Buffer::Instance& data) {\n    std::shared_ptr<DocumentImpl> new_doc{new DocumentImpl()};\n    new_doc->fromBuffer(data);\n    return new_doc;\n  }\n\n  // Mongo::Document\n  DocumentSharedPtr addDouble(const std::string& key, double value) override {\n    fields_.emplace_back(new FieldImpl(key, value));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addString(const std::string& key, std::string&& value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::String, key, std::move(value)));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addSymbol(const std::string& key, std::string&& value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::Symbol, key, std::move(value)));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addDocument(const std::string& key, DocumentSharedPtr value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::Document, key, value));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addArray(const std::string& key, DocumentSharedPtr value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::Array, key, value));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addBinary(const std::string& key, std::string&& value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::Binary, key, std::move(value)));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addObjectId(const std::string& key, Field::ObjectId&& value) override {\n    fields_.emplace_back(new FieldImpl(key, std::move(value)));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addBoolean(const std::string& key, bool value) override {\n    fields_.emplace_back(new FieldImpl(key, value));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addDatetime(const std::string& key, int64_t value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::Datetime, key, value));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addNull(const std::string& key) override {\n    fields_.emplace_back(new FieldImpl(key));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addRegex(const std::string& key, Field::Regex&& value) override {\n    fields_.emplace_back(new FieldImpl(key, std::move(value)));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addInt32(const std::string& key, int32_t value) override {\n    fields_.emplace_back(new FieldImpl(key, value));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addTimestamp(const std::string& key, int64_t value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::Timestamp, key, value));\n    return shared_from_this();\n  }\n\n  DocumentSharedPtr addInt64(const std::string& key, int64_t value) override {\n    fields_.emplace_back(new FieldImpl(Field::Type::Int64, key, value));\n    return shared_from_this();\n  }\n\n  bool operator==(const Document& rhs) const override;\n  int32_t byteSize() const override;\n  void encode(Buffer::Instance& output) const override;\n  const Field* find(const std::string& name) const override;\n  const Field* find(const std::string& name, Field::Type type) const override;\n  std::string toString() const override;\n  const std::list<FieldPtr>& values() const override { return fields_; }\n\nprivate:\n  DocumentImpl() = default;\n\n  void fromBuffer(Buffer::Instance& data);\n\n  std::list<FieldPtr> fields_;\n};\n\n} // namespace Bson\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/codec.h",
    "content": "#pragma once\n\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"extensions/filters/network/mongo_proxy/bson.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\n/**\n * General implementation of https://docs.mongodb.org/manual/reference/mongodb-wire-protocol/\n */\n\n/**\n * Base class for all mongo messages.\n */\nclass Message {\npublic:\n  enum class OpCode {\n    Reply = 1,\n    Msg = 1000,\n    Update = 2001,\n    Insert = 2002,\n    Query = 2004,\n    GetMore = 2005,\n    Delete = 2006,\n    KillCursors = 2007,\n    Command = 2010,\n    CommandReply = 2011\n  };\n\n  virtual ~Message() = default;\n\n  virtual int32_t requestId() const PURE;\n  virtual int32_t responseTo() const PURE;\n  virtual std::string toString(bool full) const PURE;\n\n  // Define some constants used in mongo messages encoding\n  constexpr static uint32_t MessageHeaderSize = 16;\n  constexpr static uint32_t Int32Length = 4;\n  constexpr static uint32_t Int64Length = 8;\n  constexpr static uint32_t StringPaddingLength = 1;\n};\n\n/**\n * Mongo OP_GET_MORE message.\n */\nclass GetMoreMessage : public virtual Message {\npublic:\n  virtual bool operator==(const GetMoreMessage& rhs) const PURE;\n\n  virtual const std::string& fullCollectionName() const PURE;\n  virtual void fullCollectionName(const std::string& name) PURE;\n  virtual int32_t numberToReturn() const PURE;\n  virtual void numberToReturn(int32_t to_return) PURE;\n  virtual int64_t cursorId() const PURE;\n  virtual void cursorId(int64_t cursor_id) PURE;\n};\n\nusing GetMoreMessagePtr = std::unique_ptr<GetMoreMessage>;\n\n/**\n * Mongo OP_INSERT message.\n */\nclass InsertMessage : public virtual Message {\npublic:\n  virtual bool operator==(const InsertMessage& rhs) const PURE;\n\n  virtual int32_t flags() const PURE;\n  virtual void flags(int32_t flags) PURE;\n  virtual const std::string& fullCollectionName() const PURE;\n  virtual void fullCollectionName(const std::string& name) PURE;\n  virtual const std::list<Bson::DocumentSharedPtr>& documents() const PURE;\n  virtual std::list<Bson::DocumentSharedPtr>& documents() PURE;\n};\n\nusing InsertMessagePtr = std::unique_ptr<InsertMessage>;\n\n/**\n * Mongo OP_KILL_CURSORS message.\n */\nclass KillCursorsMessage : public virtual Message {\npublic:\n  virtual bool operator==(const KillCursorsMessage& rhs) const PURE;\n\n  virtual int32_t numberOfCursorIds() const PURE;\n  virtual void numberOfCursorIds(int32_t number_of_cursors_ids) PURE;\n  virtual const std::vector<int64_t>& cursorIds() const PURE;\n  virtual void cursorIds(std::vector<int64_t>&& cursors_ids) PURE;\n};\n\nusing KillCursorsMessagePtr = std::unique_ptr<KillCursorsMessage>;\n\n/**\n * Mongo OP_QUERY message.\n */\nclass QueryMessage : public virtual Message {\npublic:\n  struct Flags {\n    // clang-format off\n    static const int32_t TailableCursor  = 0x1 << 1;\n    static const int32_t NoCursorTimeout = 0x1 << 4;\n    static const int32_t AwaitData       = 0x1 << 5;\n    static const int32_t Exhaust         = 0x1 << 6;\n    // clang-format on\n  };\n\n  virtual bool operator==(const QueryMessage& rhs) const PURE;\n\n  virtual int32_t flags() const PURE;\n  virtual void flags(int32_t flags) PURE;\n  virtual const std::string& fullCollectionName() const PURE;\n  virtual void fullCollectionName(const std::string& name) PURE;\n  virtual int32_t numberToSkip() const PURE;\n  virtual void numberToSkip(int32_t skip) PURE;\n  virtual int32_t numberToReturn() const PURE;\n  virtual void numberToReturn(int32_t to_return) PURE;\n  virtual const Bson::Document* query() const PURE;\n  virtual void query(Bson::DocumentSharedPtr&& query) PURE;\n  virtual const Bson::Document* returnFieldsSelector() const PURE;\n  virtual void returnFieldsSelector(Bson::DocumentSharedPtr&& fields) PURE;\n};\n\nusing QueryMessagePtr = std::unique_ptr<QueryMessage>;\n\n/**\n * Mongo OP_REPLY\n */\nclass ReplyMessage : public virtual Message {\npublic:\n  struct Flags {\n    // clang-format off\n    static const int32_t CursorNotFound = 0x1 << 0;\n    static const int32_t QueryFailure   = 0x1 << 1;\n    // clang-format on\n  };\n\n  virtual bool operator==(const ReplyMessage& rhs) const PURE;\n\n  virtual int32_t flags() const PURE;\n  virtual void flags(int32_t flags) PURE;\n  virtual int64_t cursorId() const PURE;\n  virtual void cursorId(int64_t cursor_id) PURE;\n  virtual int32_t startingFrom() const PURE;\n  virtual void startingFrom(int32_t starting_from) PURE;\n  virtual int32_t numberReturned() const PURE;\n  virtual void numberReturned(int32_t number_returned) PURE;\n  virtual const std::list<Bson::DocumentSharedPtr>& documents() const PURE;\n  virtual std::list<Bson::DocumentSharedPtr>& documents() PURE;\n};\n\nusing ReplyMessagePtr = std::unique_ptr<ReplyMessage>;\n\nclass CommandMessage : public virtual Message {\npublic:\n  // CommandMessage accessors.\n  virtual bool operator==(const CommandMessage& rhs) const PURE;\n  virtual std::string database() const PURE;\n  virtual void database(std::string database) PURE;\n  virtual std::string commandName() const PURE;\n  virtual void commandName(std::string command_name) PURE;\n  virtual const Bson::Document* metadata() const PURE;\n  virtual void metadata(Bson::DocumentSharedPtr&& metadata) PURE;\n  virtual const Bson::Document* commandArgs() const PURE;\n  virtual void commandArgs(Bson::DocumentSharedPtr&& command_args) PURE;\n  virtual const std::list<Bson::DocumentSharedPtr>& inputDocs() const PURE;\n  virtual std::list<Bson::DocumentSharedPtr>& inputDocs() PURE;\n};\n\nusing CommandMessagePtr = std::unique_ptr<CommandMessage>;\n\nclass CommandReplyMessage : public virtual Message {\npublic:\n  virtual bool operator==(const CommandReplyMessage& rhs) const PURE;\n  virtual const Bson::Document* metadata() const PURE;\n  virtual void metadata(Bson::DocumentSharedPtr&& metadata) PURE;\n  virtual const Bson::Document* commandReply() const PURE;\n  virtual void commandReply(Bson::DocumentSharedPtr&& command_reply) PURE;\n  virtual const std::list<Bson::DocumentSharedPtr>& outputDocs() const PURE;\n  virtual std::list<Bson::DocumentSharedPtr>& outputDocs() PURE;\n};\n\nusing CommandReplyMessagePtr = std::unique_ptr<CommandReplyMessage>;\n\n/**\n * General callbacks for dispatching decoded mongo messages to a sink.\n */\nclass DecoderCallbacks {\npublic:\n  virtual ~DecoderCallbacks() = default;\n\n  virtual void decodeGetMore(GetMoreMessagePtr&& message) PURE;\n  virtual void decodeInsert(InsertMessagePtr&& message) PURE;\n  virtual void decodeKillCursors(KillCursorsMessagePtr&& message) PURE;\n  virtual void decodeQuery(QueryMessagePtr&& message) PURE;\n  virtual void decodeReply(ReplyMessagePtr&& message) PURE;\n  virtual void decodeCommand(CommandMessagePtr&& message) PURE;\n  virtual void decodeCommandReply(CommandReplyMessagePtr&& message) PURE;\n};\n\n/**\n * Mongo message decoder.\n */\nclass Decoder {\npublic:\n  virtual ~Decoder() = default;\n\n  virtual void onData(Buffer::Instance& data) PURE;\n};\n\nusing DecoderPtr = std::unique_ptr<Decoder>;\n\n/**\n * Mongo message encoder.\n */\nclass Encoder {\npublic:\n  virtual ~Encoder() = default;\n\n  virtual void encodeGetMore(const GetMoreMessage& message) PURE;\n  virtual void encodeInsert(const InsertMessage& message) PURE;\n  virtual void encodeKillCursors(const KillCursorsMessage& message) PURE;\n  virtual void encodeQuery(const QueryMessage& message) PURE;\n  virtual void encodeReply(const ReplyMessage& message) PURE;\n  virtual void encodeCommand(const CommandMessage& message) PURE;\n  virtual void encodeCommandReply(const CommandReplyMessage& message) PURE;\n};\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/codec_impl.cc",
    "content": "#include \"extensions/filters/network/mongo_proxy/codec_impl.h\"\n\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <sstream>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/mongo_proxy/bson_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nstd::string\nMessageImpl::documentListToString(const std::list<Bson::DocumentSharedPtr>& documents) const {\n  std::stringstream out;\n  out << \"[\";\n\n  bool first = true;\n  for (const Bson::DocumentSharedPtr& document : documents) {\n    if (!first) {\n      out << \", \";\n    }\n\n    out << document->toString();\n    first = false;\n  }\n\n  out << \"]\";\n  return out.str();\n}\n\nvoid GetMoreMessageImpl::fromBuffer(uint32_t, Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"decoding get more message\");\n  Bson::BufferHelper::removeInt32(data); // \"zero\" (unused)\n  full_collection_name_ = Bson::BufferHelper::removeCString(data);\n  number_to_return_ = Bson::BufferHelper::removeInt32(data);\n  cursor_id_ = Bson::BufferHelper::removeInt64(data);\n  ENVOY_LOG(trace, \"{}\", toString(true));\n}\n\nbool GetMoreMessageImpl::operator==(const GetMoreMessage& rhs) const {\n  return requestId() == rhs.requestId() && responseTo() == rhs.responseTo() &&\n         fullCollectionName() == rhs.fullCollectionName() &&\n         numberToReturn() == rhs.numberToReturn() && cursorId() == rhs.cursorId();\n}\n\nstd::string GetMoreMessageImpl::toString(bool) const {\n  return fmt::format(\n      R\"EOF({{\"opcode\": \"OP_GET_MORE\", \"id\": {}, \"response_to\": {}, \"collection\": \"{}\", \"return\": {}, )EOF\"\n      R\"EOF(\"cursor\": {}}})EOF\",\n      request_id_, response_to_, full_collection_name_, number_to_return_, cursor_id_);\n}\n\nvoid InsertMessageImpl::fromBuffer(uint32_t message_length, Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"decoding insert message\");\n  uint64_t original_buffer_length = data.length();\n  ASSERT(message_length <= original_buffer_length);\n\n  flags_ = Bson::BufferHelper::removeInt32(data);\n  full_collection_name_ = Bson::BufferHelper::removeCString(data);\n  while (data.length() - (original_buffer_length - message_length) > 0) {\n    documents_.emplace_back(Bson::DocumentImpl::create(data));\n  }\n\n  ENVOY_LOG(trace, \"{}\", toString(true));\n}\n\nbool InsertMessageImpl::operator==(const InsertMessage& rhs) const {\n  if (!(requestId() == rhs.requestId() && responseTo() == rhs.responseTo() &&\n        flags() == rhs.flags() && fullCollectionName() == rhs.fullCollectionName() &&\n        documents().size() == rhs.documents().size())) {\n    return false;\n  }\n\n  for (auto i = documents().begin(), j = rhs.documents().begin(); i != documents().end();\n       i++, j++) {\n    if (!(**i == **j)) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\nstd::string InsertMessageImpl::toString(bool full) const {\n  return fmt::format(\n      R\"EOF({{\"opcode\": \"OP_INSERT\", \"id\": {}, \"response_to\": {}, \"flags\": \"{:#x}\", \"collection\": \"{}\", )EOF\"\n      R\"EOF(\"documents\": {}}})EOF\",\n      request_id_, response_to_, flags_, full_collection_name_,\n      full ? documentListToString(documents_) : std::to_string(documents_.size()));\n}\n\nvoid KillCursorsMessageImpl::fromBuffer(uint32_t, Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"decoding kill cursors message\");\n  Bson::BufferHelper::removeInt32(data); // zero\n  number_of_cursor_ids_ = Bson::BufferHelper::removeInt32(data);\n  for (int32_t i = 0; i < number_of_cursor_ids_; i++) {\n    cursor_ids_.push_back(Bson::BufferHelper::removeInt64(data));\n  }\n\n  ENVOY_LOG(trace, \"{}\", toString(true));\n}\n\nbool KillCursorsMessageImpl::operator==(const KillCursorsMessage& rhs) const {\n  return requestId() == rhs.requestId() && responseTo() == rhs.responseTo() &&\n         numberOfCursorIds() == rhs.numberOfCursorIds() && cursorIds() == rhs.cursorIds();\n}\n\nstd::string KillCursorsMessageImpl::toString(bool) const {\n  std::stringstream cursors;\n  cursors << \"[\";\n  for (size_t i = 0; i < cursor_ids_.size(); i++) {\n    if (i > 0) {\n      cursors << \", \";\n    }\n\n    cursors << cursor_ids_[i];\n  }\n  cursors << \"]\";\n\n  return fmt::format(\n      R\"EOF({{\"opcode\": \"KILL_CURSORS\", \"id\": {}, \"response_to\": \"{:#x}\", \"num_cursors\": \"{}\", )EOF\"\n      R\"EOF(\"cursors\": {}}})EOF\",\n      request_id_, response_to_, number_of_cursor_ids_, cursors.str());\n}\n\nvoid QueryMessageImpl::fromBuffer(uint32_t message_length, Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"decoding query message\");\n  uint64_t original_buffer_length = data.length();\n  ASSERT(message_length <= original_buffer_length);\n\n  flags_ = Bson::BufferHelper::removeInt32(data);\n  full_collection_name_ = Bson::BufferHelper::removeCString(data);\n  number_to_skip_ = Bson::BufferHelper::removeInt32(data);\n  number_to_return_ = Bson::BufferHelper::removeInt32(data);\n  query_ = Bson::DocumentImpl::create(data);\n\n  if (data.length() - (original_buffer_length - message_length) > 0) {\n    return_fields_selector_ = Bson::DocumentImpl::create(data);\n  }\n\n  ENVOY_LOG(trace, \"{}\", toString(true));\n}\n\nbool QueryMessageImpl::operator==(const QueryMessage& rhs) const {\n  if (!(requestId() == rhs.requestId() && responseTo() == rhs.responseTo() &&\n        flags() == rhs.flags() && fullCollectionName() == rhs.fullCollectionName() &&\n        numberToSkip() == rhs.numberToSkip() && numberToReturn() == rhs.numberToReturn() &&\n        !query() == !rhs.query() && !returnFieldsSelector() == !rhs.returnFieldsSelector())) {\n    return false;\n  }\n\n  if (query()) {\n    if (!(*query() == *rhs.query())) {\n      return false;\n    }\n  }\n\n  if (returnFieldsSelector()) {\n    if (!(*returnFieldsSelector() == *rhs.returnFieldsSelector())) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\nstd::string QueryMessageImpl::toString(bool full) const {\n  return fmt::format(\n      R\"EOF({{\"opcode\": \"OP_QUERY\", \"id\": {}, \"response_to\": {}, \"flags\": \"{:#x}\", \"collection\": \"{}\", )EOF\"\n      R\"EOF(\"skip\": {}, \"return\": {}, \"query\": {}, \"fields\": {}}})EOF\",\n      request_id_, response_to_, flags_, full_collection_name_, number_to_skip_, number_to_return_,\n      full ? query_->toString() : \"\\\"{...}\\\"\",\n      return_fields_selector_ ? return_fields_selector_->toString() : \"{}\");\n}\n\nvoid ReplyMessageImpl::fromBuffer(uint32_t, Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"decoding reply message\");\n  flags_ = Bson::BufferHelper::removeInt32(data);\n  cursor_id_ = Bson::BufferHelper::removeInt64(data);\n  starting_from_ = Bson::BufferHelper::removeInt32(data);\n  number_returned_ = Bson::BufferHelper::removeInt32(data);\n  for (int32_t i = 0; i < number_returned_; i++) {\n    documents_.emplace_back(Bson::DocumentImpl::create(data));\n  }\n\n  ENVOY_LOG(trace, \"{}\", toString(true));\n}\n\nbool ReplyMessageImpl::operator==(const ReplyMessage& rhs) const {\n  if (!(requestId() == rhs.requestId() && responseTo() == rhs.responseTo() &&\n        flags() == rhs.flags() && cursorId() == rhs.cursorId() &&\n        startingFrom() == rhs.startingFrom() && numberReturned() == rhs.numberReturned())) {\n\n    return false;\n  }\n\n  for (auto i = documents().begin(), j = rhs.documents().begin(); i != documents().end();\n       i++, j++) {\n    if (!(**i == **j)) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\nstd::string ReplyMessageImpl::toString(bool full) const {\n  return fmt::format(\n      R\"EOF({{\"opcode\": \"OP_REPLY\", \"id\": {}, \"response_to\": {}, \"flags\": \"{:#x}\", \"cursor\": \"{}\", )EOF\"\n      R\"EOF(\"from\": {}, \"returned\": {}, \"documents\": {}}})EOF\",\n      request_id_, response_to_, flags_, cursor_id_, starting_from_, number_returned_,\n      full ? documentListToString(documents_) : std::to_string(documents_.size()));\n}\n\n/*\n * OP_COMMAND mongo message implementation.\n */\nvoid CommandMessageImpl::fromBuffer(uint32_t message_length, Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"decoding COMMAND message\");\n  const uint64_t original_data_length = data.length();\n  ASSERT(data.length() >= message_length); // See comment below about relationship.\n\n  database_ = Bson::BufferHelper::removeCString(data);\n  command_name_ = Bson::BufferHelper::removeCString(data);\n  metadata_ = Bson::DocumentImpl::create(data);\n  command_args_ = Bson::DocumentImpl::create(data);\n\n  // There may be additional docs.\n  // message_length is mongo message length. original_data_length contains\n  // mongo message and possibly first few bytes of next message.\n  while (data.length() - (original_data_length - message_length) > 0) {\n    input_docs_.emplace_back(Bson::DocumentImpl::create(data));\n  }\n\n  ENVOY_LOG(trace, \"{}\", toString(true));\n}\n\nstd::string CommandMessageImpl::toString(bool full) const {\n  return fmt::format(\n      R\"EOF({{\"opcode\": \"OP_COMMAND\", \"id\": {}, \"response_to\": {}, \"database\": \"{}\", )EOF\"\n      R\"EOF(\"commandName\": \"{}\", \"metadata\": {}, )EOF\"\n      R\"EOF(\"commandArgs\": {}, \"inputDocs\": {}}})EOF\",\n      request_id_, response_to_, database_.c_str(), command_name_.c_str(), metadata_->toString(),\n      command_args_->toString(),\n      full ? documentListToString(input_docs_) : std::to_string(input_docs_.size()));\n}\n\nbool CommandMessageImpl::operator==(const CommandMessage& rhs) const {\n  if (!(requestId() == rhs.requestId() && responseTo() == rhs.responseTo() &&\n        database() == rhs.database() && commandName() == rhs.commandName() &&\n        !metadata() == !rhs.metadata() && !commandArgs() == !rhs.commandArgs() &&\n        inputDocs().size() == rhs.inputDocs().size())) {\n    return false;\n  }\n\n  // Compare documents now.\n  if (metadata()) {\n    if (!(*metadata() == *rhs.metadata())) {\n      return false;\n    }\n  }\n\n  if (commandArgs()) {\n    if (!(*commandArgs() == *rhs.commandArgs())) {\n      return false;\n    }\n  }\n\n  for (auto i = inputDocs().begin(), j = rhs.inputDocs().begin(); i != inputDocs().end();\n       i++, j++) {\n    if (!(**i == **j)) {\n      return false;\n    }\n  }\n\n  return true;\n}\n\n// OP_COMMANDREPLY implementation.\nvoid CommandReplyMessageImpl::fromBuffer(uint32_t message_length, Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"decoding COMMAND REPLY message\");\n  const uint64_t original_data_length = data.length();\n  ASSERT(data.length() >= message_length); // See comment below about relationship.\n\n  metadata_ = Bson::DocumentImpl::create(data);\n  command_reply_ = Bson::DocumentImpl::create(data);\n\n  // There may be additional docs.\n  // message_length is mongo message length. original_data_length contains\n  // mongo message and possibly first few bytes of next message.\n  while (data.length() - (original_data_length - message_length) > 0) {\n    output_docs_.emplace_back(Bson::DocumentImpl::create(data));\n  }\n\n  ENVOY_LOG(trace, \"{}\", toString(true));\n}\n\nstd::string CommandReplyMessageImpl::toString(bool full) const {\n  return fmt::format(R\"EOF({{\"opcode\": \"OP_COMMANDREPLY\", \"id\": {}, \"response_to\": {}, )EOF\"\n                     R\"EOF(\"metadata\": {}, \"commandReply\": {}, \"outputDocs\":{}}} )EOF\",\n                     request_id_, response_to_, metadata_->toString(), command_reply_->toString(),\n                     full ? documentListToString(output_docs_)\n                          : std::to_string(output_docs_.size()));\n}\n\nbool CommandReplyMessageImpl::operator==(const CommandReplyMessage& rhs) const {\n  if (!(requestId() == rhs.requestId() && responseTo() == rhs.responseTo() &&\n        !metadata() == !rhs.metadata() && !commandReply() == !rhs.commandReply() &&\n        outputDocs().size() == rhs.outputDocs().size())) {\n    return false;\n  }\n\n  // Compare documents now.\n  if (metadata()) {\n    if (!(*metadata() == *rhs.metadata())) {\n      return false;\n    }\n  }\n\n  if (commandReply()) {\n    if (!(*commandReply() == *rhs.commandReply())) {\n      return false;\n    }\n  }\n\n  for (auto i = outputDocs().begin(), j = rhs.outputDocs().begin(); i != outputDocs().end();\n       i++, j++) {\n    if (!(**i == **j)) {\n      return false;\n    }\n  }\n\n  return true;\n}\nbool DecoderImpl::decode(Buffer::Instance& data) {\n  // See if we have enough data for the message length.\n  ENVOY_LOG(trace, \"decoding {} bytes\", data.length());\n  if (data.length() < sizeof(int32_t)) {\n    return false;\n  }\n\n  uint32_t message_length = Bson::BufferHelper::peekInt32(data);\n  ENVOY_LOG(trace, \"message is {} bytes\", message_length);\n  if (data.length() < message_length) {\n    return false;\n  }\n\n  data.drain(sizeof(int32_t));\n  int32_t request_id = Bson::BufferHelper::removeInt32(data);\n  int32_t response_to = Bson::BufferHelper::removeInt32(data);\n  Message::OpCode op_code = static_cast<Message::OpCode>(Bson::BufferHelper::removeInt32(data));\n  ENVOY_LOG(trace, \"message op: {}\", static_cast<int32_t>(op_code));\n\n  // Some messages need to know how long they are to parse. Subtract the header that we have already\n  // parsed off before passing the final value.\n  message_length -= Message::MessageHeaderSize;\n\n  switch (op_code) {\n  case Message::OpCode::Reply: {\n    std::unique_ptr<ReplyMessageImpl> message(new ReplyMessageImpl(request_id, response_to));\n    message->fromBuffer(message_length, data);\n    callbacks_.decodeReply(std::move(message));\n    break;\n  }\n\n  case Message::OpCode::Query: {\n    std::unique_ptr<QueryMessageImpl> message(new QueryMessageImpl(request_id, response_to));\n    message->fromBuffer(message_length, data);\n    callbacks_.decodeQuery(std::move(message));\n    break;\n  }\n\n  case Message::OpCode::GetMore: {\n    std::unique_ptr<GetMoreMessageImpl> message(new GetMoreMessageImpl(request_id, response_to));\n    message->fromBuffer(message_length, data);\n    callbacks_.decodeGetMore(std::move(message));\n    break;\n  }\n\n  case Message::OpCode::Insert: {\n    std::unique_ptr<InsertMessageImpl> message(new InsertMessageImpl(request_id, response_to));\n    message->fromBuffer(message_length, data);\n    callbacks_.decodeInsert(std::move(message));\n    break;\n  }\n\n  case Message::OpCode::KillCursors: {\n    std::unique_ptr<KillCursorsMessageImpl> message(\n        new KillCursorsMessageImpl(request_id, response_to));\n    message->fromBuffer(message_length, data);\n    callbacks_.decodeKillCursors(std::move(message));\n    break;\n  }\n\n  case Message::OpCode::Command: {\n    std::unique_ptr<CommandMessageImpl> message(new CommandMessageImpl(request_id, response_to));\n    message->fromBuffer(message_length, data);\n    callbacks_.decodeCommand(std::move(message));\n    break;\n  }\n\n  case Message::OpCode::CommandReply: {\n    std::unique_ptr<CommandReplyMessageImpl> message(\n        new CommandReplyMessageImpl(request_id, response_to));\n    message->fromBuffer(message_length, data);\n    callbacks_.decodeCommandReply(std::move(message));\n    break;\n  }\n\n  default:\n    throw EnvoyException(fmt::format(\"invalid mongo op {}\", static_cast<int32_t>(op_code)));\n  }\n\n  ENVOY_LOG(trace, \"{} bytes remaining after decoding\", data.length());\n  return true;\n}\n\nvoid DecoderImpl::onData(Buffer::Instance& data) {\n  while (data.length() > 0 && decode(data)) {\n  }\n}\n\nvoid EncoderImpl::encodeCommonHeader(int32_t total_size, const Message& message,\n                                     Message::OpCode op) {\n  Bson::BufferHelper::writeInt32(output_, total_size);\n  Bson::BufferHelper::writeInt32(output_, message.requestId());\n  Bson::BufferHelper::writeInt32(output_, message.responseTo());\n  Bson::BufferHelper::writeInt32(output_, static_cast<int32_t>(op));\n}\n\nvoid EncoderImpl::encodeGetMore(const GetMoreMessage& message) {\n  if (message.fullCollectionName().empty() || message.cursorId() == 0) {\n    throw EnvoyException(\"invalid get more message\");\n  }\n\n  // https://docs.mongodb.org/manual/reference/mongodb-wire-protocol/#op-get-more\n  int32_t total_size = Message::MessageHeaderSize + Message::Int32Length +\n                       message.fullCollectionName().size() + Message::StringPaddingLength +\n                       Message::Int32Length + Message::Int64Length;\n\n  encodeCommonHeader(total_size, message, Message::OpCode::GetMore);\n  Bson::BufferHelper::writeInt32(output_, 0);\n  Bson::BufferHelper::writeCString(output_, message.fullCollectionName());\n  Bson::BufferHelper::writeInt32(output_, message.numberToReturn());\n  Bson::BufferHelper::writeInt64(output_, message.cursorId());\n}\n\nvoid EncoderImpl::encodeInsert(const InsertMessage& message) {\n  if (message.fullCollectionName().empty() || message.documents().empty()) {\n    throw EnvoyException(\"invalid insert message\");\n  }\n\n  // https://docs.mongodb.org/manual/reference/mongodb-wire-protocol/#op-insert\n  int32_t total_size = Message::MessageHeaderSize + Message::Int32Length +\n                       message.fullCollectionName().size() + Message::StringPaddingLength;\n  for (const Bson::DocumentSharedPtr& document : message.documents()) {\n    total_size += document->byteSize();\n  }\n\n  encodeCommonHeader(total_size, message, Message::OpCode::Insert);\n  Bson::BufferHelper::writeInt32(output_, message.flags());\n  Bson::BufferHelper::writeCString(output_, message.fullCollectionName());\n  for (const Bson::DocumentSharedPtr& document : message.documents()) {\n    document->encode(output_);\n  }\n}\n\nvoid EncoderImpl::encodeKillCursors(const KillCursorsMessage& message) {\n  if (message.numberOfCursorIds() == 0 ||\n      message.numberOfCursorIds() != static_cast<int32_t>(message.cursorIds().size())) {\n    throw EnvoyException(\"invalid kill cursors message\");\n  }\n\n  // https://docs.mongodb.org/manual/reference/mongodb-wire-protocol/#op-kill-cursors\n  int32_t total_size =\n      Message::MessageHeaderSize + 2 * Message::Int32Length + (message.numberOfCursorIds() * 8);\n\n  encodeCommonHeader(total_size, message, Message::OpCode::KillCursors);\n  Bson::BufferHelper::writeInt32(output_, 0);\n  Bson::BufferHelper::writeInt32(output_, message.numberOfCursorIds());\n  for (int64_t cursor : message.cursorIds()) {\n    Bson::BufferHelper::writeInt64(output_, cursor);\n  }\n}\n\nvoid EncoderImpl::encodeQuery(const QueryMessage& message) {\n  if (message.fullCollectionName().empty() || !message.query()) {\n    throw EnvoyException(\"invalid query message\");\n  }\n\n  // https://docs.mongodb.org/manual/reference/mongodb-wire-protocol/#op-query\n  int32_t total_size = Message::MessageHeaderSize + 3 * Message::Int32Length +\n                       message.fullCollectionName().size() + Message::StringPaddingLength +\n                       message.query()->byteSize();\n  if (message.returnFieldsSelector()) {\n    total_size += message.returnFieldsSelector()->byteSize();\n  }\n\n  encodeCommonHeader(total_size, message, Message::OpCode::Query);\n  Bson::BufferHelper::writeInt32(output_, message.flags());\n  Bson::BufferHelper::writeCString(output_, message.fullCollectionName());\n  Bson::BufferHelper::writeInt32(output_, message.numberToSkip());\n  Bson::BufferHelper::writeInt32(output_, message.numberToReturn());\n\n  message.query()->encode(output_);\n  if (message.returnFieldsSelector()) {\n    message.returnFieldsSelector()->encode(output_);\n  }\n}\n\nvoid EncoderImpl::encodeReply(const ReplyMessage& message) {\n  // https://docs.mongodb.org/manual/reference/mongodb-wire-protocol/#op-reply\n  int32_t total_size = Message::MessageHeaderSize + 3 * Message::Int32Length + Message::Int64Length;\n  for (const Bson::DocumentSharedPtr& document : message.documents()) {\n    total_size += document->byteSize();\n  }\n\n  encodeCommonHeader(total_size, message, Message::OpCode::Reply);\n  Bson::BufferHelper::writeInt32(output_, message.flags());\n  Bson::BufferHelper::writeInt64(output_, message.cursorId());\n  Bson::BufferHelper::writeInt32(output_, message.startingFrom());\n  Bson::BufferHelper::writeInt32(output_, message.numberReturned());\n  for (const Bson::DocumentSharedPtr& document : message.documents()) {\n    document->encode(output_);\n  }\n}\n\nvoid EncoderImpl::encodeCommand(const CommandMessage& message) {\n  int32_t total_size = Message::MessageHeaderSize;\n  total_size += message.database().size() + Message::StringPaddingLength;\n  total_size += message.commandName().size() + Message::StringPaddingLength;\n  total_size += message.metadata()->byteSize();\n  total_size += message.commandArgs()->byteSize();\n  for (const Bson::DocumentSharedPtr& document : message.inputDocs()) {\n    total_size += document->byteSize();\n  }\n\n  // Now encode.\n  encodeCommonHeader(total_size, message, Message::OpCode::Command);\n  Bson::BufferHelper::writeCString(output_, message.database());\n  Bson::BufferHelper::writeCString(output_, message.commandName());\n  message.metadata()->encode(output_);\n  message.commandArgs()->encode(output_);\n  for (const Bson::DocumentSharedPtr& document : message.inputDocs()) {\n    document->encode(output_);\n  }\n}\n\nvoid EncoderImpl::encodeCommandReply(const CommandReplyMessage& message) {\n  int32_t total_size = Message::MessageHeaderSize;\n  total_size += message.metadata()->byteSize();\n  total_size += message.commandReply()->byteSize();\n  for (const Bson::DocumentSharedPtr& document : message.outputDocs()) {\n    total_size += document->byteSize();\n  }\n\n  // Now encode.\n  encodeCommonHeader(total_size, message, Message::OpCode::CommandReply);\n  message.metadata()->encode(output_);\n  message.commandReply()->encode(output_);\n  for (const Bson::DocumentSharedPtr& document : message.outputDocs()) {\n    document->encode(output_);\n  }\n}\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/codec_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n#include <vector>\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/mongo_proxy/codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nclass MessageImpl : public virtual Message {\npublic:\n  MessageImpl(int32_t request_id, uint32_t response_to)\n      : request_id_(request_id), response_to_(response_to) {}\n\n  virtual void fromBuffer(uint32_t message_length, Buffer::Instance& data) PURE;\n\n  // Mongo::Message\n  int32_t requestId() const override { return request_id_; }\n  int32_t responseTo() const override { return response_to_; }\n\nprotected:\n  std::string documentListToString(const std::list<Bson::DocumentSharedPtr>& documents) const;\n\n  const int32_t request_id_;\n  const int32_t response_to_;\n};\n\nclass GetMoreMessageImpl : public MessageImpl,\n                           public GetMoreMessage,\n                           Logger::Loggable<Logger::Id::mongo> {\npublic:\n  using MessageImpl::MessageImpl;\n\n  // MessageImpl\n  void fromBuffer(uint32_t message_length, Buffer::Instance& data) override;\n\n  // Mongo::Message\n  std::string toString(bool full) const override;\n\n  // Mongo::GetMoreMessage\n  bool operator==(const GetMoreMessage& rhs) const override;\n  const std::string& fullCollectionName() const override { return full_collection_name_; }\n  void fullCollectionName(const std::string& name) override { full_collection_name_ = name; }\n  int32_t numberToReturn() const override { return number_to_return_; }\n  void numberToReturn(int32_t to_return) override { number_to_return_ = to_return; }\n  int64_t cursorId() const override { return cursor_id_; }\n  void cursorId(int64_t cursor_id) override { cursor_id_ = cursor_id; }\n\nprivate:\n  std::string full_collection_name_;\n  int32_t number_to_return_{};\n  int64_t cursor_id_{};\n};\n\nclass InsertMessageImpl : public MessageImpl,\n                          public InsertMessage,\n                          Logger::Loggable<Logger::Id::mongo> {\npublic:\n  using MessageImpl::MessageImpl;\n\n  // MessageImpl\n  void fromBuffer(uint32_t message_length, Buffer::Instance& data) override;\n\n  // Mongo::Message\n  std::string toString(bool full) const override;\n\n  // Mongo::InsertMessage\n  bool operator==(const InsertMessage& rhs) const override;\n  int32_t flags() const override { return flags_; }\n  void flags(int32_t flags) override { flags_ = flags; }\n  const std::string& fullCollectionName() const override { return full_collection_name_; }\n  void fullCollectionName(const std::string& name) override { full_collection_name_ = name; }\n  const std::list<Bson::DocumentSharedPtr>& documents() const override { return documents_; }\n  std::list<Bson::DocumentSharedPtr>& documents() override { return documents_; }\n\nprivate:\n  int32_t flags_{};\n  std::string full_collection_name_;\n  std::list<Bson::DocumentSharedPtr> documents_;\n};\n\nclass KillCursorsMessageImpl : public MessageImpl,\n                               public KillCursorsMessage,\n                               Logger::Loggable<Logger::Id::mongo> {\npublic:\n  using MessageImpl::MessageImpl;\n\n  // MessageImpl\n  void fromBuffer(uint32_t message_length, Buffer::Instance& data) override;\n\n  // Mongo::Message\n  std::string toString(bool full) const override;\n\n  // Mongo::KillCursorsMessage\n  bool operator==(const KillCursorsMessage& rhs) const override;\n  int32_t numberOfCursorIds() const override { return number_of_cursor_ids_; }\n  void numberOfCursorIds(int32_t number_of_cursor_ids) override {\n    number_of_cursor_ids_ = number_of_cursor_ids;\n  }\n  const std::vector<int64_t>& cursorIds() const override { return cursor_ids_; }\n  void cursorIds(std::vector<int64_t>&& cursor_ids) override {\n    cursor_ids_ = std::move(cursor_ids);\n  }\n\nprivate:\n  int32_t number_of_cursor_ids_{};\n  std::vector<int64_t> cursor_ids_;\n};\n\nclass QueryMessageImpl : public MessageImpl,\n                         public QueryMessage,\n                         Logger::Loggable<Logger::Id::mongo> {\npublic:\n  using MessageImpl::MessageImpl;\n\n  // MessageImpl\n  void fromBuffer(uint32_t message_length, Buffer::Instance& data) override;\n\n  // Mongo::Message\n  std::string toString(bool full) const override;\n\n  // Mongo::QueryMessage\n  bool operator==(const QueryMessage& rhs) const override;\n  int32_t flags() const override { return flags_; }\n  void flags(int32_t flags) override { flags_ = flags; }\n  const std::string& fullCollectionName() const override { return full_collection_name_; }\n  void fullCollectionName(const std::string& name) override { full_collection_name_ = name; }\n  int32_t numberToSkip() const override { return number_to_skip_; }\n  void numberToSkip(int32_t skip) override { number_to_skip_ = skip; }\n  int32_t numberToReturn() const override { return number_to_return_; }\n  void numberToReturn(int32_t to_return) override { number_to_return_ = to_return; }\n  const Bson::Document* query() const override { return query_.get(); }\n  void query(Bson::DocumentSharedPtr&& query) override { query_ = std::move(query); }\n  const Bson::Document* returnFieldsSelector() const override {\n    return return_fields_selector_.get();\n  }\n  void returnFieldsSelector(Bson::DocumentSharedPtr&& fields) override {\n    return_fields_selector_ = std::move(fields);\n  }\n\nprivate:\n  int32_t flags_{};\n  std::string full_collection_name_;\n  int32_t number_to_skip_{};\n  int32_t number_to_return_{};\n  Bson::DocumentSharedPtr query_;\n  Bson::DocumentSharedPtr return_fields_selector_;\n};\n\nclass ReplyMessageImpl : public MessageImpl,\n                         public ReplyMessage,\n                         Logger::Loggable<Logger::Id::mongo> {\npublic:\n  using MessageImpl::MessageImpl;\n\n  // MessageImpl\n  void fromBuffer(uint32_t message_length, Buffer::Instance& data) override;\n\n  // Mongo::Message\n  std::string toString(bool full) const override;\n\n  // Mongo::ReplyMessage\n  bool operator==(const ReplyMessage& rhs) const override;\n  int32_t flags() const override { return flags_; }\n  void flags(int32_t flags) override { flags_ = flags; }\n  int64_t cursorId() const override { return cursor_id_; }\n  void cursorId(int64_t cursor_id) override { cursor_id_ = cursor_id; }\n  int32_t startingFrom() const override { return starting_from_; }\n  void startingFrom(int32_t starting_from) override { starting_from_ = starting_from; }\n  int32_t numberReturned() const override { return number_returned_; }\n  void numberReturned(int32_t number_returned) override { number_returned_ = number_returned; }\n  const std::list<Bson::DocumentSharedPtr>& documents() const override { return documents_; }\n  std::list<Bson::DocumentSharedPtr>& documents() override { return documents_; }\n\nprivate:\n  int32_t flags_{};\n  int64_t cursor_id_{};\n  int32_t starting_from_{};\n  int32_t number_returned_{};\n  std::list<Bson::DocumentSharedPtr> documents_;\n};\n\n// OP_COMMAND message.\nclass CommandMessageImpl : public MessageImpl,\n                           public CommandMessage,\n                           Logger::Loggable<Logger::Id::mongo> {\npublic:\n  using MessageImpl::MessageImpl;\n\n  // MessageImpl.\n  void fromBuffer(uint32_t message_length, Buffer::Instance& data) override;\n  std::string toString(bool full) const override;\n\n  // CommandMessageImpl accessors.\n  bool operator==(const CommandMessage& rhs) const override;\n  std::string database() const override { return database_; }\n  void database(std::string database) override { database_ = database; }\n  std::string commandName() const override { return command_name_; }\n  void commandName(std::string command_name) override { command_name_ = command_name; }\n  const Bson::Document* metadata() const override { return metadata_.get(); }\n  void metadata(Bson::DocumentSharedPtr&& metadata) override { metadata_ = std::move(metadata); }\n  const Bson::Document* commandArgs() const override { return command_args_.get(); }\n  void commandArgs(Bson::DocumentSharedPtr&& command_args) override {\n    command_args_ = std::move(command_args);\n  }\n  const std::list<Bson::DocumentSharedPtr>& inputDocs() const override { return input_docs_; }\n  std::list<Bson::DocumentSharedPtr>& inputDocs() override { return input_docs_; }\n\nprivate:\n  std::string database_;\n  std::string command_name_;\n  Bson::DocumentSharedPtr metadata_;\n  Bson::DocumentSharedPtr command_args_;\n  std::list<Bson::DocumentSharedPtr> input_docs_;\n};\n\n// OP_COMMANDREPLY message.\nclass CommandReplyMessageImpl : public MessageImpl,\n                                public CommandReplyMessage,\n                                Logger::Loggable<Logger::Id::mongo> {\npublic:\n  using MessageImpl::MessageImpl;\n\n  // MessageImpl.\n  void fromBuffer(uint32_t message_length, Buffer::Instance& data) override;\n  std::string toString(bool full) const override;\n\n  // CommandMessageReplyImpl accessors.\n  bool operator==(const CommandReplyMessage& rhs) const override;\n  const Bson::Document* metadata() const override { return metadata_.get(); }\n  void metadata(Bson::DocumentSharedPtr&& metadata) override { metadata_ = std::move(metadata); }\n  const Bson::Document* commandReply() const override { return command_reply_.get(); }\n  void commandReply(Bson::DocumentSharedPtr&& command_reply) override {\n    command_reply_ = std::move(command_reply);\n  }\n  const std::list<Bson::DocumentSharedPtr>& outputDocs() const override { return output_docs_; }\n  std::list<Bson::DocumentSharedPtr>& outputDocs() override { return output_docs_; }\n\nprivate:\n  Bson::DocumentSharedPtr metadata_;\n  Bson::DocumentSharedPtr command_reply_;\n  std::list<Bson::DocumentSharedPtr> output_docs_;\n};\n\nclass DecoderImpl : public Decoder, Logger::Loggable<Logger::Id::mongo> {\npublic:\n  DecoderImpl(DecoderCallbacks& callbacks) : callbacks_(callbacks) {}\n\n  // Mongo::Decoder\n  void onData(Buffer::Instance& data) override;\n\nprivate:\n  bool decode(Buffer::Instance& data);\n\n  DecoderCallbacks& callbacks_;\n};\n\nclass EncoderImpl : public Encoder, Logger::Loggable<Logger::Id::mongo> {\npublic:\n  EncoderImpl(Buffer::Instance& output) : output_(output) {}\n\n  // Mongo::Encoder\n  void encodeGetMore(const GetMoreMessage& message) override;\n  void encodeInsert(const InsertMessage& message) override;\n  void encodeKillCursors(const KillCursorsMessage& message) override;\n  void encodeQuery(const QueryMessage& message) override;\n  void encodeReply(const ReplyMessage& message) override;\n  void encodeCommand(const CommandMessage& message) override;\n  void encodeCommandReply(const CommandReplyMessage& message) override;\n\nprivate:\n  void encodeCommonHeader(int32_t total_size, const Message& message, Message::OpCode op);\n\n  Buffer::Instance& output_;\n};\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/config.cc",
    "content": "#include \"extensions/filters/network/mongo_proxy/config.h\"\n\n#include <memory>\n\n#include \"envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.validate.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/mongo_proxy/proxy.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nNetwork::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n\n  ASSERT(!proto_config.stat_prefix().empty());\n\n  const std::string stat_prefix = fmt::format(\"mongo.{}\", proto_config.stat_prefix());\n  AccessLogSharedPtr access_log;\n  if (!proto_config.access_log().empty()) {\n    access_log = std::make_shared<AccessLog>(proto_config.access_log(), context.accessLogManager(),\n                                             context.dispatcher().timeSource());\n  }\n\n  Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config;\n  if (proto_config.has_delay()) {\n    fault_config = std::make_shared<Filters::Common::Fault::FaultDelayConfig>(proto_config.delay());\n  }\n\n  auto stats = std::make_shared<MongoStats>(context.scope(), stat_prefix);\n  const bool emit_dynamic_metadata = proto_config.emit_dynamic_metadata();\n  return [stat_prefix, &context, access_log, fault_config, emit_dynamic_metadata,\n          stats](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addFilter(std::make_shared<ProdProxyFilter>(\n        stat_prefix, context.scope(), context.runtime(), access_log, fault_config,\n        context.drainDecision(), context.dispatcher().timeSource(), emit_dynamic_metadata, stats));\n  };\n}\n\n/**\n * Static registration for the mongo filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(MongoProxyFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\"envoy.mongo_proxy\"};\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\n/**\n * Config registration for the mongo proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nclass MongoProxyFilterConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy> {\npublic:\n  MongoProxyFilterConfigFactory() : FactoryBase(NetworkFilterNames::get().MongoProxy) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/mongo_stats.cc",
    "content": "#include \"extensions/filters/network/mongo_proxy/mongo_stats.h\"\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nMongoStats::MongoStats(Stats::Scope& scope, absl::string_view prefix)\n    : scope_(scope), stat_name_set_(scope.symbolTable().makeSet(\"Mongo\")),\n      prefix_(stat_name_set_->add(prefix)), callsite_(stat_name_set_->add(\"callsite\")),\n      cmd_(stat_name_set_->add(\"cmd\")), collection_(stat_name_set_->add(\"collection\")),\n      multi_get_(stat_name_set_->add(\"multi_get\")),\n      reply_num_docs_(stat_name_set_->add(\"reply_num_docs\")),\n      reply_size_(stat_name_set_->add(\"reply_size\")),\n      reply_time_ms_(stat_name_set_->add(\"reply_time_ms\")),\n      time_ms_(stat_name_set_->add(\"time_ms\")), query_(stat_name_set_->add(\"query\")),\n      scatter_get_(stat_name_set_->add(\"scatter_get\")), total_(stat_name_set_->add(\"total\")),\n      unknown_command_(stat_name_set_->add(\"unknown_command\")) {\n\n  // TODO(jmarantz): is this the right set of mongo commands to use as builtins?\n  // Should we also have builtins for callsites or collections, or do those need\n  // to be dynamic?\n  stat_name_set_->rememberBuiltins({\"insert\", \"query\", \"update\", \"delete\"});\n}\n\nStats::ElementVec MongoStats::addPrefix(const Stats::ElementVec& names) {\n  Stats::ElementVec names_with_prefix;\n  names_with_prefix.reserve(1 + names.size());\n  names_with_prefix.push_back(prefix_);\n  names_with_prefix.insert(names_with_prefix.end(), names.begin(), names.end());\n  return names_with_prefix;\n}\n\nvoid MongoStats::incCounter(const Stats::ElementVec& names) {\n  Stats::Utility::counterFromElements(scope_, addPrefix(names)).inc();\n}\n\nvoid MongoStats::recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit,\n                                 uint64_t sample) {\n  Stats::Utility::histogramFromElements(scope_, addPrefix(names), unit).recordValue(sample);\n}\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/mongo_stats.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nclass MongoStats {\npublic:\n  MongoStats(Stats::Scope& scope, absl::string_view prefix);\n\n  void incCounter(const Stats::ElementVec& names);\n  void recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit,\n                       uint64_t sample);\n\n  /**\n   * Finds or creates a StatName by string, taking a global lock if needed.\n   *\n   * TODO(jmarantz): Potential perf issue here with mutex contention for names\n   * that have not been remembered as builtins in the constructor.\n   */\n  Stats::StatName getBuiltin(const std::string& str, Stats::StatName fallback) {\n    return stat_name_set_->getBuiltin(str, fallback);\n  }\n\nprivate:\n  Stats::ElementVec addPrefix(const Stats::ElementVec& names);\n\n  Stats::Scope& scope_;\n  Stats::StatNameSetPtr stat_name_set_;\n\npublic:\n  const Stats::StatName prefix_;\n  const Stats::StatName callsite_;\n  const Stats::StatName cmd_;\n  const Stats::StatName collection_;\n  const Stats::StatName multi_get_;\n  const Stats::StatName reply_num_docs_;\n  const Stats::StatName reply_size_;\n  const Stats::StatName reply_time_ms_;\n  const Stats::StatName time_ms_;\n  const Stats::StatName query_;\n  const Stats::StatName scatter_get_;\n  const Stats::StatName total_;\n  const Stats::StatName unknown_command_;\n};\nusing MongoStatsSharedPtr = std::shared_ptr<MongoStats>;\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/proxy.cc",
    "content": "#include \"extensions/filters/network/mongo_proxy/proxy.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n\n#include \"extensions/filters/network/mongo_proxy/codec_impl.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"absl/strings/str_split.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nclass DynamicMetadataKeys {\npublic:\n  const std::string OperationInsert{\"insert\"};\n  const std::string OperationQuery{\"query\"};\n  // TODO: Parse out the delete/update operation from the commands\n  const std::string OperationUpdate{\"update\"};\n  const std::string OperationDelete{\"delete\"};\n};\n\nusing DynamicMetadataKeysSingleton = ConstSingleton<DynamicMetadataKeys>;\n\nAccessLog::AccessLog(const std::string& file_name, Envoy::AccessLog::AccessLogManager& log_manager,\n                     TimeSource& time_source)\n    : time_source_(time_source) {\n  file_ = log_manager.createAccessLog(file_name);\n}\n\nvoid AccessLog::logMessage(const Message& message, bool full,\n                           const Upstream::HostDescription* upstream_host) {\n  static const std::string log_format =\n      \"{{\\\"time\\\": \\\"{}\\\", \\\"message\\\": {}, \\\"upstream_host\\\": \\\"{}\\\"}}\\n\";\n\n  SystemTime now = time_source_.systemTime();\n  std::string log_line =\n      fmt::format(log_format, AccessLogDateTimeFormatter::fromTime(now), message.toString(full),\n                  upstream_host ? upstream_host->address()->asString() : \"-\");\n\n  file_->write(log_line);\n}\n\nProxyFilter::ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope,\n                         Runtime::Loader& runtime, AccessLogSharedPtr access_log,\n                         const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config,\n                         const Network::DrainDecision& drain_decision, TimeSource& time_source,\n                         bool emit_dynamic_metadata, const MongoStatsSharedPtr& mongo_stats)\n    : stats_(generateStats(stat_prefix, scope)), runtime_(runtime), drain_decision_(drain_decision),\n      access_log_(access_log), fault_config_(fault_config), time_source_(time_source),\n      emit_dynamic_metadata_(emit_dynamic_metadata), mongo_stats_(mongo_stats) {\n  if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().ConnectionLoggingEnabled,\n                                          100)) {\n    // If we are not logging at the connection level, just release the shared pointer so that we\n    // don't ever log.\n    access_log_.reset();\n  }\n}\n\nProxyFilter::~ProxyFilter() { ASSERT(!delay_timer_); }\n\nvoid ProxyFilter::setDynamicMetadata(std::string operation, std::string resource) {\n  ProtobufWkt::Struct metadata(\n      (*read_callbacks_->connection()\n            .streamInfo()\n            .dynamicMetadata()\n            .mutable_filter_metadata())[NetworkFilterNames::get().MongoProxy]);\n  auto& fields = *metadata.mutable_fields();\n  // TODO(rshriram): reverse the resource string (table.db)\n  auto& operations = *fields[resource].mutable_list_value();\n  operations.add_values()->set_string_value(operation);\n\n  read_callbacks_->connection().streamInfo().setDynamicMetadata(\n      NetworkFilterNames::get().MongoProxy, metadata);\n}\n\nvoid ProxyFilter::decodeGetMore(GetMoreMessagePtr&& message) {\n  tryInjectDelay();\n\n  stats_.op_get_more_.inc();\n  logMessage(*message, true);\n  ENVOY_LOG(debug, \"decoded GET_MORE: {}\", message->toString(true));\n}\n\nvoid ProxyFilter::decodeInsert(InsertMessagePtr&& message) {\n  tryInjectDelay();\n\n  if (emit_dynamic_metadata_) {\n    setDynamicMetadata(DynamicMetadataKeysSingleton::get().OperationInsert,\n                       message->fullCollectionName());\n  }\n\n  stats_.op_insert_.inc();\n  logMessage(*message, true);\n  ENVOY_LOG(debug, \"decoded INSERT: {}\", message->toString(true));\n}\n\nvoid ProxyFilter::decodeKillCursors(KillCursorsMessagePtr&& message) {\n  tryInjectDelay();\n\n  stats_.op_kill_cursors_.inc();\n  logMessage(*message, true);\n  ENVOY_LOG(debug, \"decoded KILL_CURSORS: {}\", message->toString(true));\n}\n\nvoid ProxyFilter::decodeQuery(QueryMessagePtr&& message) {\n  tryInjectDelay();\n\n  if (emit_dynamic_metadata_) {\n    setDynamicMetadata(DynamicMetadataKeysSingleton::get().OperationQuery,\n                       message->fullCollectionName());\n  }\n\n  stats_.op_query_.inc();\n  logMessage(*message, true);\n  ENVOY_LOG(debug, \"decoded QUERY: {}\", message->toString(true));\n\n  if (message->flags() & QueryMessage::Flags::TailableCursor) {\n    stats_.op_query_tailable_cursor_.inc();\n  }\n  if (message->flags() & QueryMessage::Flags::NoCursorTimeout) {\n    stats_.op_query_no_cursor_timeout_.inc();\n  }\n  if (message->flags() & QueryMessage::Flags::AwaitData) {\n    stats_.op_query_await_data_.inc();\n  }\n  if (message->flags() & QueryMessage::Flags::Exhaust) {\n    stats_.op_query_exhaust_.inc();\n  }\n\n  ActiveQueryPtr active_query(new ActiveQuery(*this, *message));\n  if (!active_query->query_info_.command().empty()) {\n    // First field key is the operation.\n    mongo_stats_->incCounter({mongo_stats_->cmd_,\n                              mongo_stats_->getBuiltin(active_query->query_info_.command(),\n                                                       mongo_stats_->unknown_command_),\n                              mongo_stats_->total_});\n  } else {\n    // Normal query, get stats on a per collection basis first.\n    QueryMessageInfo::QueryType query_type = active_query->query_info_.type();\n    Stats::ElementVec names;\n    names.reserve(6); // 2 entries are added by chargeQueryStats().\n    names.push_back(mongo_stats_->collection_);\n    names.push_back(Stats::DynamicName(active_query->query_info_.collection()));\n    chargeQueryStats(names, query_type);\n\n    // Callsite stats if we have it.\n    if (!active_query->query_info_.callsite().empty()) {\n      names.push_back(mongo_stats_->callsite_);\n      names.push_back(Stats::DynamicName(active_query->query_info_.callsite()));\n      chargeQueryStats(names, query_type);\n    }\n\n    // Global stats.\n    if (active_query->query_info_.maxTime() < 1) {\n      stats_.op_query_no_max_time_.inc();\n    }\n    if (query_type == QueryMessageInfo::QueryType::ScatterGet) {\n      stats_.op_query_scatter_get_.inc();\n    } else if (query_type == QueryMessageInfo::QueryType::MultiGet) {\n      stats_.op_query_multi_get_.inc();\n    }\n  }\n\n  active_query_list_.emplace_back(std::move(active_query));\n}\n\nvoid ProxyFilter::chargeQueryStats(Stats::ElementVec& names,\n                                   QueryMessageInfo::QueryType query_type) {\n  // names come in containing {\"collection\", collection}. Report stats for 1 or\n  // 2 variations on this array, and then return with the array in the same\n  // state it had on entry. Both of these variations by appending {\"query\", \"total\"}.\n  size_t orig_size = names.size();\n  ASSERT(names.capacity() - orig_size >= 2); // Ensures the caller has reserved() enough memory.\n  names.push_back(mongo_stats_->query_);\n  names.push_back(mongo_stats_->total_);\n  mongo_stats_->incCounter(names);\n\n  // And now replace \"total\" with either \"scatter_get\" or \"multi_get\" if depending on query_type.\n  if (query_type == QueryMessageInfo::QueryType::ScatterGet) {\n    names.back() = mongo_stats_->scatter_get_;\n    mongo_stats_->incCounter(names);\n  } else if (query_type == QueryMessageInfo::QueryType::MultiGet) {\n    names.back() = mongo_stats_->multi_get_;\n    mongo_stats_->incCounter(names);\n  }\n  names.resize(orig_size);\n}\n\nvoid ProxyFilter::decodeReply(ReplyMessagePtr&& message) {\n  stats_.op_reply_.inc();\n  logMessage(*message, false);\n  ENVOY_LOG(debug, \"decoded REPLY: {}\", message->toString(true));\n\n  if (message->cursorId() != 0) {\n    stats_.op_reply_valid_cursor_.inc();\n  }\n  if (message->flags() & ReplyMessage::Flags::CursorNotFound) {\n    stats_.op_reply_cursor_not_found_.inc();\n  }\n  if (message->flags() & ReplyMessage::Flags::QueryFailure) {\n    stats_.op_reply_query_failure_.inc();\n  }\n\n  for (auto i = active_query_list_.begin(); i != active_query_list_.end(); i++) {\n    ActiveQuery& active_query = **i;\n    if (active_query.query_info_.requestId() != message->responseTo()) {\n      continue;\n    }\n\n    if (!active_query.query_info_.command().empty()) {\n      Stats::ElementVec names{mongo_stats_->cmd_,\n                              mongo_stats_->getBuiltin(active_query.query_info_.command(),\n                                                       mongo_stats_->unknown_command_)};\n      chargeReplyStats(active_query, names, *message);\n    } else {\n      // Collection stats first.\n      Stats::ElementVec names{mongo_stats_->collection_,\n                              Stats::DynamicName(active_query.query_info_.collection()),\n                              mongo_stats_->query_};\n      chargeReplyStats(active_query, names, *message);\n\n      // Callsite stats if we have it.\n      if (!active_query.query_info_.callsite().empty()) {\n        // Currently, names == {\"collection\", collection, \"query\"} and we are going\n        // to mutate the array to {\"collection\", collection, \"callsite\", callsite, \"query\"}.\n        ASSERT(names.size() == 3);\n        names.back() = mongo_stats_->callsite_; // Replaces \"query\".\n        names.push_back(Stats::DynamicName(active_query.query_info_.callsite()));\n        names.push_back(mongo_stats_->query_);\n        chargeReplyStats(active_query, names, *message);\n      }\n    }\n\n    active_query_list_.erase(i);\n    break;\n  }\n\n  if (active_query_list_.empty() && drain_decision_.drainClose() &&\n      runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().DrainCloseEnabled, 100)) {\n    ENVOY_LOG(debug, \"drain closing mongo connection\");\n    stats_.cx_drain_close_.inc();\n\n    // We are currently in the write path, so we need to let the write flush out before we close.\n    // We do this by creating a timer and firing it with a zero timeout. This will cause it to run\n    // in the next event loop iteration. This is really a hack. A better solution would be to\n    // introduce the concept of a write complete callback so we can get notified when the write goes\n    // out (e.g., flow control, further filters, etc.). This is a much larger project so we can\n    // start with this since it will get the job done.\n    // TODO(mattklein123): Investigate a better solution for write complete callbacks.\n    if (drain_close_timer_ == nullptr) {\n      drain_close_timer_ =\n          read_callbacks_->connection().dispatcher().createTimer([this] { onDrainClose(); });\n      drain_close_timer_->enableTimer(std::chrono::milliseconds(0));\n    }\n  }\n}\n\nvoid ProxyFilter::decodeCommand(CommandMessagePtr&& message) {\n  tryInjectDelay();\n\n  stats_.op_command_.inc();\n  logMessage(*message, true);\n  ENVOY_LOG(debug, \"decoded COMMAND: {}\", message->toString(true));\n}\n\nvoid ProxyFilter::decodeCommandReply(CommandReplyMessagePtr&& message) {\n  tryInjectDelay();\n\n  stats_.op_command_reply_.inc();\n  logMessage(*message, true);\n  ENVOY_LOG(debug, \"decoded COMMANDREPLY: {}\", message->toString(true));\n}\n\nvoid ProxyFilter::onDrainClose() {\n  read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n}\n\nvoid ProxyFilter::chargeReplyStats(ActiveQuery& active_query, Stats::ElementVec& names,\n                                   const ReplyMessage& message) {\n  uint64_t reply_documents_byte_size = 0;\n  for (const Bson::DocumentSharedPtr& document : message.documents()) {\n    reply_documents_byte_size += document->byteSize();\n  }\n\n  // Write 3 different histograms; appending 3 different suffixes to the name\n  // that was passed in. Here we overwrite the passed-in names, but we restore\n  // names to its original state upon return.\n  const size_t orig_size = names.size();\n  names.push_back(mongo_stats_->reply_num_docs_);\n  mongo_stats_->recordHistogram(names, Stats::Histogram::Unit::Unspecified,\n                                message.documents().size());\n  names[orig_size] = mongo_stats_->reply_size_;\n  mongo_stats_->recordHistogram(names, Stats::Histogram::Unit::Bytes, reply_documents_byte_size);\n  names[orig_size] = mongo_stats_->reply_time_ms_;\n  mongo_stats_->recordHistogram(names, Stats::Histogram::Unit::Milliseconds,\n                                std::chrono::duration_cast<std::chrono::milliseconds>(\n                                    time_source_.monotonicTime() - active_query.start_time_)\n                                    .count());\n  names.resize(orig_size);\n}\n\nvoid ProxyFilter::doDecode(Buffer::Instance& buffer) {\n  if (!sniffing_ ||\n      !runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().ProxyEnabled, 100)) {\n    // Safety measure just to make sure that if we have a decoding error we keep going and lose\n    // stats. This can be removed once we are more confident of this code.\n    buffer.drain(buffer.length());\n    return;\n  }\n\n  // Clear dynamic metadata\n  if (emit_dynamic_metadata_) {\n    auto& metadata = (*read_callbacks_->connection()\n                           .streamInfo()\n                           .dynamicMetadata()\n                           .mutable_filter_metadata())[NetworkFilterNames::get().MongoProxy];\n    metadata.mutable_fields()->clear();\n  }\n\n  if (!decoder_) {\n    decoder_ = createDecoder(*this);\n  }\n\n  try {\n    decoder_->onData(buffer);\n  } catch (EnvoyException& e) {\n    ENVOY_LOG(info, \"mongo decoding error: {}\", e.what());\n    stats_.decoding_error_.inc();\n    sniffing_ = false;\n  }\n}\n\nvoid ProxyFilter::logMessage(Message& message, bool full) {\n  if (access_log_ &&\n      runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().LoggingEnabled, 100)) {\n    access_log_->logMessage(message, full, read_callbacks_->upstreamHost().get());\n  }\n}\n\nvoid ProxyFilter::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    if (delay_timer_) {\n      delay_timer_->disableTimer();\n      delay_timer_.reset();\n    }\n\n    if (drain_close_timer_) {\n      drain_close_timer_->disableTimer();\n      drain_close_timer_.reset();\n    }\n  }\n\n  if (event == Network::ConnectionEvent::RemoteClose && !active_query_list_.empty()) {\n    stats_.cx_destroy_local_with_active_rq_.inc();\n  }\n\n  if (event == Network::ConnectionEvent::LocalClose && !active_query_list_.empty()) {\n    stats_.cx_destroy_remote_with_active_rq_.inc();\n  }\n}\n\nNetwork::FilterStatus ProxyFilter::onData(Buffer::Instance& data, bool) {\n  read_buffer_.add(data);\n  doDecode(read_buffer_);\n\n  return delay_timer_ ? Network::FilterStatus::StopIteration : Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus ProxyFilter::onWrite(Buffer::Instance& data, bool) {\n  write_buffer_.add(data);\n  doDecode(write_buffer_);\n  return Network::FilterStatus::Continue;\n}\n\nDecoderPtr ProdProxyFilter::createDecoder(DecoderCallbacks& callbacks) {\n  return DecoderPtr{new DecoderImpl(callbacks)};\n}\n\nabsl::optional<std::chrono::milliseconds> ProxyFilter::delayDuration() {\n  absl::optional<std::chrono::milliseconds> result;\n\n  if (!fault_config_) {\n    return result;\n  }\n\n  // Use a default percentage\n  const auto percentage = fault_config_->percentage(nullptr);\n  if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().FixedDelayPercent,\n                                          percentage)) {\n    return result;\n  }\n\n  // See if the delay provider has a default delay, if not there is no delay.\n  auto config_duration = fault_config_->duration(nullptr);\n  if (!config_duration.has_value()) {\n    return result;\n  }\n\n  const std::chrono::milliseconds duration =\n      std::chrono::milliseconds(runtime_.snapshot().getInteger(\n          MongoRuntimeConfig::get().FixedDelayDurationMs, config_duration.value().count()));\n\n  // Delay only if the duration is > 0ms.\n  if (duration.count() > 0) {\n    result = duration;\n  }\n\n  return result;\n}\n\nvoid ProxyFilter::delayInjectionTimerCallback() {\n  delay_timer_.reset();\n\n  // Continue request processing.\n  read_callbacks_->continueReading();\n}\n\nvoid ProxyFilter::tryInjectDelay() {\n  // Do not try to inject delays if there is an active delay.\n  // Make sure to capture stats for the request otherwise.\n  if (delay_timer_) {\n    return;\n  }\n\n  const absl::optional<std::chrono::milliseconds> delay = delayDuration();\n\n  if (delay) {\n    delay_timer_ = read_callbacks_->connection().dispatcher().createTimer(\n        [this]() -> void { delayInjectionTimerCallback(); });\n    delay_timer_->enableTimer(delay.value());\n    stats_.delays_injected_.inc();\n  }\n}\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/proxy.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/network/filter_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/common/fault/fault_config.h\"\n#include \"extensions/filters/network/mongo_proxy/codec.h\"\n#include \"extensions/filters/network/mongo_proxy/mongo_stats.h\"\n#include \"extensions/filters/network/mongo_proxy/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nclass MongoRuntimeConfigKeys {\npublic:\n  const std::string FixedDelayPercent{\"mongo.fault.fixed_delay.percent\"};\n  const std::string FixedDelayDurationMs{\"mongo.fault.fixed_delay.duration_ms\"};\n  const std::string LoggingEnabled{\"mongo.logging_enabled\"};\n  const std::string ProxyEnabled{\"mongo.proxy_enabled\"};\n  const std::string ConnectionLoggingEnabled{\"mongo.connection_logging_enabled\"};\n  const std::string DrainCloseEnabled{\"mongo.drain_close_enabled\"};\n};\n\nusing MongoRuntimeConfig = ConstSingleton<MongoRuntimeConfigKeys>;\n\n/**\n * All mongo proxy stats. @see stats_macros.h\n */\n#define ALL_MONGO_PROXY_STATS(COUNTER, GAUGE, HISTOGRAM)                                           \\\n  COUNTER(cx_destroy_local_with_active_rq)                                                         \\\n  COUNTER(cx_destroy_remote_with_active_rq)                                                        \\\n  COUNTER(cx_drain_close)                                                                          \\\n  COUNTER(decoding_error)                                                                          \\\n  COUNTER(delays_injected)                                                                         \\\n  COUNTER(op_command)                                                                              \\\n  COUNTER(op_command_reply)                                                                        \\\n  COUNTER(op_get_more)                                                                             \\\n  COUNTER(op_insert)                                                                               \\\n  COUNTER(op_kill_cursors)                                                                         \\\n  COUNTER(op_query)                                                                                \\\n  COUNTER(op_query_await_data)                                                                     \\\n  COUNTER(op_query_exhaust)                                                                        \\\n  COUNTER(op_query_multi_get)                                                                      \\\n  COUNTER(op_query_no_cursor_timeout)                                                              \\\n  COUNTER(op_query_no_max_time)                                                                    \\\n  COUNTER(op_query_scatter_get)                                                                    \\\n  COUNTER(op_query_tailable_cursor)                                                                \\\n  COUNTER(op_reply)                                                                                \\\n  COUNTER(op_reply_cursor_not_found)                                                               \\\n  COUNTER(op_reply_query_failure)                                                                  \\\n  COUNTER(op_reply_valid_cursor)                                                                   \\\n  GAUGE(op_query_active, Accumulate)\n\n/**\n * Struct definition for all mongo proxy stats. @see stats_macros.h\n */\nstruct MongoProxyStats {\n  ALL_MONGO_PROXY_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\n/**\n * Access logger for mongo messages.\n */\nclass AccessLog {\npublic:\n  AccessLog(const std::string& file_name, Envoy::AccessLog::AccessLogManager& log_manager,\n            TimeSource& time_source);\n\n  void logMessage(const Message& message, bool full,\n                  const Upstream::HostDescription* upstream_host);\n\nprivate:\n  TimeSource& time_source_;\n  Envoy::AccessLog::AccessLogFileSharedPtr file_;\n};\n\nusing AccessLogSharedPtr = std::shared_ptr<AccessLog>;\n\n/**\n * A sniffing filter for mongo traffic. The current implementation makes a copy of read/written\n * data, decodes it, and generates stats.\n */\nclass ProxyFilter : public Network::Filter,\n                    public DecoderCallbacks,\n                    public Network::ConnectionCallbacks,\n                    Logger::Loggable<Logger::Id::mongo> {\npublic:\n  ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n              AccessLogSharedPtr access_log,\n              const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config,\n              const Network::DrainDecision& drain_decision, TimeSource& time_system,\n              bool emit_dynamic_metadata, const MongoStatsSharedPtr& stats);\n  ~ProxyFilter() override;\n\n  virtual DecoderPtr createDecoder(DecoderCallbacks& callbacks) PURE;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n    read_callbacks_->connection().addConnectionCallbacks(*this);\n  }\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override;\n\n  // Mongo::DecoderCallback\n  void decodeGetMore(GetMoreMessagePtr&& message) override;\n  void decodeInsert(InsertMessagePtr&& message) override;\n  void decodeKillCursors(KillCursorsMessagePtr&& message) override;\n  void decodeQuery(QueryMessagePtr&& message) override;\n  void decodeReply(ReplyMessagePtr&& message) override;\n  void decodeCommand(CommandMessagePtr&& message) override;\n  void decodeCommandReply(CommandReplyMessagePtr&& message) override;\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  void setDynamicMetadata(std::string operation, std::string resource);\n\nprivate:\n  struct ActiveQuery {\n    ActiveQuery(ProxyFilter& parent, const QueryMessage& query)\n        : parent_(parent), query_info_(query), start_time_(parent_.time_source_.monotonicTime()) {\n      parent_.stats_.op_query_active_.inc();\n    }\n\n    ~ActiveQuery() { parent_.stats_.op_query_active_.dec(); }\n\n    ProxyFilter& parent_;\n    QueryMessageInfo query_info_;\n    MonotonicTime start_time_;\n  };\n\n  using ActiveQueryPtr = std::unique_ptr<ActiveQuery>;\n\n  MongoProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return MongoProxyStats{ALL_MONGO_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix),\n                                                 POOL_GAUGE_PREFIX(scope, prefix),\n                                                 POOL_HISTOGRAM_PREFIX(scope, prefix))};\n  }\n\n  // Increment counters related to queries. 'names' is passed by non-const\n  // reference so the implementation can mutate it without copying, though it\n  // always restores it to its prior state prior to return.\n  void chargeQueryStats(Stats::ElementVec& names, QueryMessageInfo::QueryType query_type);\n\n  // Add samples to histograms related to replies. 'names' is passed by\n  // non-const reference so the implementation can mutate it without copying,\n  // though it always restores it to its prior state prior to return.\n  void chargeReplyStats(ActiveQuery& active_query, Stats::ElementVec& names,\n                        const ReplyMessage& message);\n\n  void doDecode(Buffer::Instance& buffer);\n  void logMessage(Message& message, bool full);\n  void onDrainClose();\n  absl::optional<std::chrono::milliseconds> delayDuration();\n  void delayInjectionTimerCallback();\n  void tryInjectDelay();\n\n  std::unique_ptr<Decoder> decoder_;\n  MongoProxyStats stats_;\n  Runtime::Loader& runtime_;\n  const Network::DrainDecision& drain_decision_;\n  Buffer::OwnedImpl read_buffer_;\n  Buffer::OwnedImpl write_buffer_;\n  bool sniffing_{true};\n  std::list<ActiveQueryPtr> active_query_list_;\n  AccessLogSharedPtr access_log_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  const Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config_;\n  Event::TimerPtr delay_timer_;\n  Event::TimerPtr drain_close_timer_;\n  TimeSource& time_source_;\n  const bool emit_dynamic_metadata_;\n  MongoStatsSharedPtr mongo_stats_;\n};\n\nclass ProdProxyFilter : public ProxyFilter {\npublic:\n  using ProxyFilter::ProxyFilter;\n\n  // ProxyFilter\n  DecoderPtr createDecoder(DecoderCallbacks& callbacks) override;\n};\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/utility.cc",
    "content": "#include \"extensions/filters/network/mongo_proxy/utility.h\"\n\n#include <string>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/json/json_loader.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nQueryMessageInfo::QueryMessageInfo(const QueryMessage& query)\n    : request_id_{query.requestId()}, max_time_{0} {\n  // First see if this is a command, if so we are done.\n  const Bson::Document* command = parseCommand(query);\n  if (command) {\n    command_ = command->values().front()->key();\n\n    // Special case the 3.2 'find' command since it is a query.\n    if (command_ == \"find\") {\n      command_ = \"\";\n      parseFindCommand(*command);\n    }\n\n    return;\n  }\n\n  // Standard query.\n  collection_ = parseCollection(query.fullCollectionName());\n  callsite_ = parseCallingFunction(query);\n  max_time_ = parseMaxTime(query);\n  type_ = parseType(query);\n}\n\nstd::string QueryMessageInfo::parseCollection(const std::string& full_collection_name) {\n  size_t collection_index = full_collection_name.find('.');\n  if (collection_index == std::string::npos) {\n    throw EnvoyException(\"invalid full collection name\");\n  }\n\n  return full_collection_name.substr(collection_index + 1);\n}\n\nint32_t QueryMessageInfo::parseMaxTime(const QueryMessage& query) {\n  const Bson::Field* field = query.query()->find(\"$maxTimeMS\");\n  if (!field) {\n    field = query.query()->find(\"maxTimeMS\");\n    if (!field) {\n      return 0;\n    }\n  }\n\n  if (field->type() == Bson::Field::Type::Int32) {\n    return field->asInt32();\n  } else if (field->type() == Bson::Field::Type::Int64) {\n    return static_cast<int32_t>(field->asInt64());\n  } else {\n    return 0;\n  }\n}\n\nconst Bson::Document* QueryMessageInfo::parseCommand(const QueryMessage& query) {\n  if (query.fullCollectionName().find(\"$cmd\") == std::string::npos) {\n    return nullptr;\n  }\n\n  // See if there is a $query document, and use that to find the command if so.\n  const Bson::Document* doc_to_use = query.query();\n  const Bson::Field* field = query.query()->find(\"$query\", Bson::Field::Type::Document);\n  if (field) {\n    doc_to_use = &field->asDocument();\n  }\n\n  if (doc_to_use->values().empty()) {\n    throw EnvoyException(\"invalid query command\");\n  }\n\n  return doc_to_use;\n}\n\nstd::string QueryMessageInfo::parseCallingFunction(const QueryMessage& query) {\n  const Bson::Field* field = query.query()->find(\"$comment\", Bson::Field::Type::String);\n  if (!field) {\n    return \"\";\n  }\n\n  return parseCallingFunctionJson(field->asString());\n}\n\nstd::string QueryMessageInfo::parseCallingFunctionJson(const std::string& json_string) {\n  try {\n    Json::ObjectSharedPtr json = Json::Factory::loadFromString(json_string);\n    return json->getString(\"callingFunction\");\n  } catch (Json::Exception&) {\n    return \"\";\n  }\n}\n\nQueryMessageInfo::QueryType QueryMessageInfo::parseType(const QueryMessage& query) {\n  // First check the top level for _id.\n  QueryType type = parseTypeFromDocument(*query.query());\n  if (type == QueryType::ScatterGet) {\n    // If we didn't find it in the top level, see if we have a top level $query element and look\n    // there.\n    const Bson::Field* field = query.query()->find(\"$query\", Bson::Field::Type::Document);\n    if (field) {\n      type = parseTypeFromDocument(field->asDocument());\n    }\n  }\n\n  return type;\n}\n\nQueryMessageInfo::QueryType\nQueryMessageInfo::parseTypeFromDocument(const Bson::Document& document) {\n  const Bson::Field* field = document.find(\"_id\");\n  if (!field) {\n    return QueryType::ScatterGet;\n  }\n\n  // For now we call any query where _id is equal to a non-scalar value a multi get.\n  if (field->type() == Bson::Field::Type::Document || field->type() == Bson::Field::Type::Array) {\n    return QueryType::MultiGet;\n  }\n\n  return QueryType::PrimaryKey;\n}\n\nvoid QueryMessageInfo::parseFindCommand(const Bson::Document& command) {\n  collection_ = command.values().front()->asString();\n  const Bson::Field* comment = command.find(\"comment\", Bson::Field::Type::String);\n  if (comment) {\n    callsite_ = parseCallingFunctionJson(comment->asString());\n  }\n\n  const Bson::Field* filter = command.find(\"filter\", Bson::Field::Type::Document);\n  if (filter) {\n    type_ = parseTypeFromDocument(filter->asDocument());\n  }\n}\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mongo_proxy/utility.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"extensions/filters/network/mongo_proxy/codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\n/**\n * Parses a query into information that can be used for stat gathering.\n */\nclass QueryMessageInfo {\npublic:\n  enum class QueryType { PrimaryKey, MultiGet, ScatterGet };\n\n  QueryMessageInfo(const QueryMessage& query);\n\n  /**\n   * @return the query's request ID.\n   */\n  int32_t requestId() { return request_id_; }\n\n  /**\n   * @return the collection name with the database name removed, or \"\" if a command.\n   */\n  const std::string& collection() { return collection_; }\n\n  /**\n   * @return calling function if it can be found in the query. The calling function is found by:\n   *         1) Looking for a top level query field name $comment\n   *         2) Parsing $comment as a JSON string\n   *         3) Accessing the 'callingFunction' field in the JSON.\n   *         \"\" is returned if any of the above fails.\n   */\n  const std::string& callsite() { return callsite_; }\n\n  /**\n   * @return the value of maxTimeMS or 0 if not given.\n   */\n  int32_t maxTime() { return max_time_; }\n\n  /**\n   * @return the type of a query message.\n   */\n  QueryType type() { return type_; }\n\n  /**\n   * @return the name of the command if the query is a command, otherwise \"\".\n   */\n  const std::string& command() { return command_; }\n\nprivate:\n  std::string parseCallingFunction(const QueryMessage& query);\n  std::string parseCallingFunctionJson(const std::string& json_string);\n  std::string parseCollection(const std::string& full_collection_name);\n  int32_t parseMaxTime(const QueryMessage& query);\n  const Bson::Document* parseCommand(const QueryMessage& query);\n  void parseFindCommand(const Bson::Document& command);\n  QueryType parseType(const QueryMessage& query);\n  QueryType parseTypeFromDocument(const Bson::Document& document);\n\n  int32_t request_id_;\n  std::string collection_;\n  std::string callsite_;\n  int32_t max_time_;\n  QueryType type_{QueryType::ScatterGet};\n  std::string command_;\n};\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# MySQL proxy L7 network filter.\n# Public docs: docs/root/configuration/network_filters/mysql_proxy_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"proxy_lib\",\n    srcs = [\n        \"mysql_codec_clogin.cc\",\n        \"mysql_codec_clogin_resp.cc\",\n        \"mysql_codec_command.cc\",\n        \"mysql_codec_greeting.cc\",\n        \"mysql_codec_switch_resp.cc\",\n        \"mysql_decoder.cc\",\n        \"mysql_filter.cc\",\n        \"mysql_utils.cc\",\n    ],\n    hdrs = [\n        \"mysql_codec.h\",\n        \"mysql_codec_clogin.h\",\n        \"mysql_codec_clogin_resp.h\",\n        \"mysql_codec_command.h\",\n        \"mysql_codec_greeting.h\",\n        \"mysql_codec_switch_resp.h\",\n        \"mysql_decoder.h\",\n        \"mysql_filter.h\",\n        \"mysql_session.h\",\n        \"mysql_utils.h\",\n    ],\n    deps = [\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/extensions/common/sqlutils:sqlutils_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"mysql_config.cc\"],\n    hdrs = [\"mysql_config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":proxy_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/mysql_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec.h",
    "content": "#pragma once\n#include <cstdint>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nconstexpr uint16_t MYSQL_MAX_STR_SIZE = 256;\nconstexpr uint16_t MYSQL_PKT_SIZE = 1500;\nconstexpr uint8_t MYSQL_HDR_SIZE = 4;\nconstexpr uint8_t MYSQL_PROTOCOL_9 = 9;\nconstexpr uint8_t MYSQL_PROTOCOL_10 = 10;\nconstexpr uint8_t MYSQL_PKT_0 = 0;\nconstexpr uint8_t MYSQL_UNAME_PKT_NUM = 1;\nconstexpr uint32_t MYSQL_HDR_PKT_SIZE_MASK = 0x00FFFFFF;\nconstexpr uint32_t MYSQL_HDR_SEQ_MASK = 0x000000FF;\nconstexpr uint8_t MYSQL_LOGIN_RESP_PKT_NUM = 2;\nconstexpr uint8_t MYSQL_REQUEST_PKT_NUM = 0;\nconstexpr uint8_t MYSQL_RESPONSE_PKT_NUM = 1;\nconstexpr uint16_t MAX_MYSQL_QUERY_STRING = 256;\nconstexpr uint16_t MAX_MYSQL_USER_STRING = 256;\nconstexpr uint8_t MIN_RESPONSE_PAYLOAD = 5;\nconstexpr uint8_t MYSQL_MAX_USER_LEN = 32;\nconstexpr uint8_t MYSQL_MAX_PASSWD_LEN = 32;\nconstexpr uint8_t MYSQL_RESP_OK = 0x00;\nconstexpr uint8_t MYSQL_RESP_MORE = 0x01;\nconstexpr uint8_t MYSQL_RESP_AUTH_SWITCH = 0xfe;\nconstexpr uint8_t MYSQL_RESP_ERR = 0xff;\n\nconstexpr uint8_t EOF_MARKER = 0xfe;\nconstexpr uint8_t ERR_MARKER = 0xff;\n\nconstexpr uint8_t CLIENT_CAP_FLD = 2;\nconstexpr uint8_t EXT_CLIENT_CAP_FLD = 2;\nconstexpr uint8_t MAX_PKT_FLD = 4;\nconstexpr uint8_t CHARSET_FLD = 1;\nconstexpr uint8_t UNAME_RSVD_STR = 23;\n\nconstexpr uint8_t FILLER_1_SIZE = 1;\nconstexpr uint8_t FILLER_2_SIZE = 2;\nconstexpr uint8_t FILLER_3_SIZE = 3;\nconstexpr uint8_t MYSQL_DEFAULT = 4;\nconstexpr uint8_t CHARACTER_SET_SIZE = 2;\n\nconstexpr uint8_t MAX_TABLE_COLUMNS = 64;\nconstexpr uint8_t MAX_TABLE_ROWS = 128;\n\nconstexpr uint8_t LAYOUT_CTLG = 0;\nconstexpr uint8_t LAYOUT_DB = 1;\nconstexpr uint8_t LAYOUT_TBL = 2;\nconstexpr uint8_t LAYOUT_ORG_TBL = 3;\nconstexpr uint8_t LAYOUT_NAME = 4;\nconstexpr uint8_t LAYOUT_ORG_NAME = 5;\nconstexpr uint8_t MYSQL_CATALOG_LAYOUT = 6;\nconstexpr uint8_t MULTI_CLIENT = 10;\nconstexpr uint8_t LOGIN_OK_SEQ = 2;\nconstexpr uint8_t GREETING_SEQ_NUM = 0;\nconstexpr uint8_t CHALLENGE_SEQ_NUM = 1;\nconstexpr uint8_t CHALLENGE_RESP_SEQ_NUM = 2;\nconstexpr uint8_t AUTH_SWITH_RESP_SEQ = 3;\nconstexpr uint32_t MYSQL_THREAD_ID = 0x5e;\nconstexpr uint16_t MYSQL_SERVER_CAPAB = 0x0101;\nconstexpr uint8_t MYSQL_SERVER_LANGUAGE = 0x21;\nconstexpr uint16_t MYSQL_SERVER_STATUS = 0x0200;\nconstexpr uint16_t MYSQL_SERVER_EXT_CAPAB = 0x0200;\nconstexpr uint8_t MYSQL_AUTHPLGIN = 0x00;\nconstexpr uint8_t MYSQL_UNSET = 0x00;\nconstexpr uint8_t MYSQL_UNSET_SIZE = 10;\nconstexpr uint16_t MYSQL_CLIENT_CONNECT_WITH_DB = 0x0008;\nconstexpr uint16_t MYSQL_CLIENT_CAPAB_41VS320 = 0x0200;\nconstexpr uint16_t MYSQL_CLIENT_CAPAB_SSL = 0x0800;\nconstexpr uint16_t MYSQL_EXT_CLIENT_CAPAB = 0x0300;\nconstexpr uint16_t MYSQL_EXT_CL_PLG_AUTH_CL_DATA = 0x0020;\nconstexpr uint16_t MYSQL_EXT_CL_SECURE_CONNECTION = 0x8000;\nconstexpr uint32_t MYSQL_MAX_PACKET = 0x00000001;\nconstexpr uint8_t MYSQL_CHARSET = 0x21;\n\nconstexpr uint8_t LENENCODINT_1BYTE = 0xfb;\nconstexpr uint8_t LENENCODINT_2BYTES = 0xfc;\nconstexpr uint8_t LENENCODINT_3BYTES = 0xfd;\nconstexpr uint8_t LENENCODINT_8BYTES = 0xfe;\n\nconstexpr int MYSQL_SUCCESS = 0;\nconstexpr int MYSQL_FAILURE = -1;\nconstexpr char MYSQL_STR_END = '\\0';\n\nclass MySQLCodec : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  enum class PktType {\n    MysqlRequest = 0,\n    MysqlResponse = 1,\n  };\n\n  virtual ~MySQLCodec() = default;\n\n  int decode(Buffer::Instance& data, uint8_t seq, uint32_t len) {\n    seq_ = seq;\n    return parseMessage(data, len);\n  }\n\n  virtual std::string encode() PURE;\n\nprotected:\n  virtual int parseMessage(Buffer::Instance& data, uint32_t len) PURE;\n\n  uint8_t seq_;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nvoid ClientLogin::setClientCap(int client_cap) { client_cap_ = client_cap; }\n\nvoid ClientLogin::setExtendedClientCap(int extended_client_cap) {\n  extended_client_cap_ = extended_client_cap;\n}\n\nvoid ClientLogin::setMaxPacket(int max_packet) { max_packet_ = max_packet; }\n\nvoid ClientLogin::setCharset(int charset) { charset_ = charset; }\n\nvoid ClientLogin::setUsername(std::string& username) {\n  if (username.length() <= MYSQL_MAX_USER_LEN) {\n    username_.assign(username);\n  }\n}\n\nvoid ClientLogin::setDb(std::string& db) { db_ = db; }\n\nvoid ClientLogin::setAuthResp(std::string& auth_resp) { auth_resp_.assign(auth_resp); }\n\nbool ClientLogin::isResponse41() const { return client_cap_ & MYSQL_CLIENT_CAPAB_41VS320; }\n\nbool ClientLogin::isResponse320() const { return !(client_cap_ & MYSQL_CLIENT_CAPAB_41VS320); }\n\nbool ClientLogin::isSSLRequest() const { return client_cap_ & MYSQL_CLIENT_CAPAB_SSL; }\n\nbool ClientLogin::isConnectWithDb() const { return client_cap_ & MYSQL_CLIENT_CONNECT_WITH_DB; }\n\nbool ClientLogin::isClientAuthLenClData() const {\n  return extended_client_cap_ & MYSQL_EXT_CL_PLG_AUTH_CL_DATA;\n}\n\nbool ClientLogin::isClientSecureConnection() const {\n  return extended_client_cap_ & MYSQL_EXT_CL_SECURE_CONNECTION;\n}\n\nint ClientLogin::parseMessage(Buffer::Instance& buffer, uint32_t) {\n  uint16_t client_cap = 0;\n  if (BufferHelper::readUint16(buffer, client_cap) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing client_cap in mysql ClientLogin msg\");\n    return MYSQL_FAILURE;\n  }\n  setClientCap(client_cap);\n  uint16_t extended_client_cap = 0;\n  if (BufferHelper::readUint16(buffer, extended_client_cap) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing extended_client_cap in mysql ClientLogin msg\");\n    return MYSQL_FAILURE;\n  }\n  setExtendedClientCap(extended_client_cap);\n  uint32_t max_packet = 0;\n  if (BufferHelper::readUint32(buffer, max_packet) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing max_packet in mysql ClientLogin msg\");\n    return MYSQL_FAILURE;\n  }\n  setMaxPacket(max_packet);\n  if (isSSLRequest()) {\n    // Stop Parsing if CLIENT_SSL flag is set\n    return MYSQL_SUCCESS;\n  }\n  uint8_t charset = 0;\n  if (BufferHelper::readUint8(buffer, charset) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing charset in mysql ClientLogin msg\");\n    return MYSQL_FAILURE;\n  }\n  setCharset(charset);\n  if (BufferHelper::readBytes(buffer, UNSET_BYTES) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error skipping unset bytes in mysql ClientLogin msg\");\n    return MYSQL_FAILURE;\n  }\n  std::string username;\n  if (BufferHelper::readString(buffer, username) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing username in mysql ClientLogin msg\");\n    return MYSQL_FAILURE;\n  }\n  setUsername(username);\n  std::string auth_resp;\n  if (isClientAuthLenClData()) {\n    uint64_t auth_resp_len = 0;\n    if (BufferHelper::readLengthEncodedInteger(buffer, auth_resp_len) != MYSQL_SUCCESS) {\n      ENVOY_LOG(info, \"error parsing LengthEncodedInteger in mysql ClientLogin msg\");\n      return MYSQL_FAILURE;\n    }\n    if (BufferHelper::readStringBySize(buffer, auth_resp_len, auth_resp) != MYSQL_SUCCESS) {\n      ENVOY_LOG(info, \"error parsing auth_resp in mysql ClientLogin msg\");\n      return MYSQL_FAILURE;\n    }\n  } else if (isClientSecureConnection()) {\n    uint8_t auth_resp_len = 0;\n    if (BufferHelper::readUint8(buffer, auth_resp_len) != MYSQL_SUCCESS) {\n      ENVOY_LOG(info, \"error parsing auth_resp_len in mysql ClientLogin msg\");\n      return MYSQL_FAILURE;\n    }\n    if (BufferHelper::readStringBySize(buffer, auth_resp_len, auth_resp) != MYSQL_SUCCESS) {\n      ENVOY_LOG(info, \"error parsing auth_resp in mysql ClientLogin msg\");\n      return MYSQL_FAILURE;\n    }\n  } else {\n    if (BufferHelper::readString(buffer, auth_resp) != MYSQL_SUCCESS) {\n      ENVOY_LOG(info, \"error parsing auth_resp in mysql ClientLogin msg\");\n      return MYSQL_FAILURE;\n    }\n  }\n  setAuthResp(auth_resp);\n  if (isConnectWithDb()) {\n    std::string db;\n    if (BufferHelper::readString(buffer, db) != MYSQL_SUCCESS) {\n      ENVOY_LOG(info, \"error parsing auth_resp in mysql ClientLogin msg\");\n      return MYSQL_FAILURE;\n    }\n    setDb(db);\n  }\n  return MYSQL_SUCCESS;\n}\n\nstd::string ClientLogin::encode() {\n  uint8_t enc_end_string = 0;\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  BufferHelper::addUint16(*buffer, client_cap_);\n  BufferHelper::addUint16(*buffer, extended_client_cap_);\n  BufferHelper::addUint32(*buffer, max_packet_);\n  BufferHelper::addUint8(*buffer, charset_);\n  for (int idx = 0; idx < UNSET_BYTES; idx++) {\n    BufferHelper::addUint8(*buffer, 0);\n  }\n  BufferHelper::addString(*buffer, username_);\n  BufferHelper::addUint8(*buffer, enc_end_string);\n  if ((extended_client_cap_ & MYSQL_EXT_CL_PLG_AUTH_CL_DATA) ||\n      (extended_client_cap_ & MYSQL_EXT_CL_SECURE_CONNECTION)) {\n    BufferHelper::addUint8(*buffer, auth_resp_.length());\n    BufferHelper::addString(*buffer, auth_resp_);\n  } else {\n    BufferHelper::addString(*buffer, auth_resp_);\n    BufferHelper::addUint8(*buffer, enc_end_string);\n  }\n  if (client_cap_ & MYSQL_CLIENT_CONNECT_WITH_DB) {\n    BufferHelper::addString(*buffer, db_);\n    BufferHelper::addUint8(*buffer, enc_end_string);\n  }\n\n  return buffer->toString();\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h",
    "content": "#pragma once\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nconstexpr int UNSET_BYTES = 23;\nclass ClientLogin : public MySQLCodec {\npublic:\n  // MySQLCodec\n  int parseMessage(Buffer::Instance& buffer, uint32_t len) override;\n  std::string encode() override;\n\n  int getClientCap() const { return client_cap_; }\n  int getExtendedClientCap() const { return extended_client_cap_; }\n  int getMaxPacket() const { return max_packet_; }\n  int getCharset() const { return charset_; }\n  const std::string& getUsername() const { return username_; }\n  const std::string& getAuthResp() const { return auth_resp_; }\n  const std::string& getDb() const { return db_; }\n  bool isResponse41() const;\n  bool isResponse320() const;\n  bool isSSLRequest() const;\n  bool isConnectWithDb() const;\n  bool isClientAuthLenClData() const;\n  bool isClientSecureConnection() const;\n  void setClientCap(int client_cap);\n  void setExtendedClientCap(int extended_client_cap);\n  void setMaxPacket(int max_packet);\n  void setCharset(int charset);\n  void setUsername(std::string& username);\n  void setAuthResp(std::string& auth_resp);\n  void setDb(std::string& db);\n\nprivate:\n  int client_cap_;\n  int extended_client_cap_;\n  int max_packet_;\n  int charset_;\n  std::string username_;\n  std::string auth_resp_;\n  std::string db_;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nvoid ClientLoginResponse::setRespCode(uint8_t resp_code) { resp_code_ = resp_code; }\n\nvoid ClientLoginResponse::setAffectedRows(uint8_t affected_rows) { affected_rows_ = affected_rows; }\n\nvoid ClientLoginResponse::setLastInsertId(uint8_t last_insert_id) {\n  last_insert_id_ = last_insert_id;\n}\n\nvoid ClientLoginResponse::setServerStatus(uint16_t status) { server_status_ = status; }\n\nvoid ClientLoginResponse::setWarnings(uint16_t warnings) { warnings_ = warnings; }\n\nint ClientLoginResponse::parseMessage(Buffer::Instance& buffer, uint32_t) {\n  uint8_t resp_code = 0;\n  if (BufferHelper::readUint8(buffer, resp_code) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing response code in mysql Login Ok msg\");\n    return MYSQL_FAILURE;\n  }\n  setRespCode(resp_code);\n  if ((resp_code == MYSQL_RESP_AUTH_SWITCH) && BufferHelper::endOfBuffer(buffer)) {\n    // OldAuthSwitchRequest\n    return MYSQL_SUCCESS;\n  }\n  uint8_t affected_rows = 0;\n  if (BufferHelper::readUint8(buffer, affected_rows) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing affected_rows in mysql Login Ok msg\");\n    return MYSQL_FAILURE;\n  }\n  setAffectedRows(affected_rows);\n  uint8_t last_insert_id = 0;\n  if (BufferHelper::readUint8(buffer, last_insert_id) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing last_insert_id in mysql Login Ok msg\");\n    return MYSQL_FAILURE;\n  }\n  setLastInsertId(last_insert_id);\n  uint16_t server_status = 0;\n  if (BufferHelper::readUint16(buffer, server_status) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing server_status in mysql Login Ok msg\");\n    return MYSQL_FAILURE;\n  }\n  setServerStatus(server_status);\n  uint16_t warnings = 0;\n  if (BufferHelper::readUint16(buffer, warnings) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing warnings in mysql Login Ok msg\");\n    return MYSQL_FAILURE;\n  }\n  setWarnings(warnings);\n  return MYSQL_SUCCESS;\n}\n\nstd::string ClientLoginResponse::encode() {\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  BufferHelper::addUint8(*buffer, resp_code_);\n  BufferHelper::addUint8(*buffer, affected_rows_);\n  BufferHelper::addUint8(*buffer, last_insert_id_);\n  BufferHelper::addUint16(*buffer, server_status_);\n  BufferHelper::addUint16(*buffer, warnings_);\n\n  std::string e_string = buffer->toString();\n  return e_string;\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h",
    "content": "#pragma once\n#include <cstdint>\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nclass ClientLoginResponse : public MySQLCodec {\npublic:\n  // MySQLCodec\n  int parseMessage(Buffer::Instance& buffer, uint32_t len) override;\n  std::string encode() override;\n\n  uint8_t getRespCode() const { return resp_code_; }\n  uint8_t getAffectedRows() const { return affected_rows_; }\n  uint8_t getLastInsertId() const { return last_insert_id_; }\n  uint16_t getServerStatus() const { return server_status_; }\n  uint16_t getWarnings() const { return warnings_; }\n  void setRespCode(uint8_t resp_code);\n  void setAffectedRows(uint8_t affected_rows);\n  void setLastInsertId(uint8_t last_insert_id);\n  void setServerStatus(uint16_t status);\n  void setWarnings(uint16_t warnings);\n\nprivate:\n  uint8_t resp_code_;\n  uint8_t affected_rows_;\n  uint8_t last_insert_id_;\n  uint16_t server_status_;\n  uint16_t warnings_;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_command.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec_command.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nCommand::Cmd Command::parseCmd(Buffer::Instance& data) {\n  uint8_t cmd;\n  if (BufferHelper::readUint8(data, cmd) != MYSQL_SUCCESS) {\n    return Command::Cmd::Null;\n  }\n  return static_cast<Command::Cmd>(cmd);\n}\n\nvoid Command::setCmd(Command::Cmd cmd) { cmd_ = cmd; }\n\nvoid Command::setDb(std::string db) { db_ = db; }\n\nint Command::parseMessage(Buffer::Instance& buffer, uint32_t len) {\n  Command::Cmd cmd = parseCmd(buffer);\n  setCmd(cmd);\n  if (cmd == Command::Cmd::Null) {\n    return MYSQL_FAILURE;\n  }\n\n  switch (cmd) {\n  case Command::Cmd::InitDb:\n  case Command::Cmd::CreateDb:\n  case Command::Cmd::DropDb: {\n    std::string db = \"\";\n    BufferHelper::readStringBySize(buffer, len - 1, db);\n    setDb(db);\n    break;\n  }\n\n  case Command::Cmd::Query:\n    is_query_ = true;\n    // query string starts after one byte for comm type\n    BufferHelper::readStringBySize(buffer, len - 1, data_);\n    setDb(\"\");\n    break;\n\n  default:\n    setDb(\"\");\n    break;\n  }\n\n  return MYSQL_SUCCESS;\n}\n\nvoid Command::setData(std::string& data) { data_.assign(data); }\n\nstd::string Command::encode() {\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n\n  BufferHelper::addUint8(*buffer, static_cast<int>(cmd_));\n  BufferHelper::addString(*buffer, data_);\n  std::string e_string = buffer->toString();\n  return e_string;\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_command.h",
    "content": "#pragma once\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nclass Command : public MySQLCodec {\npublic:\n  enum class Cmd {\n    Null = -1,\n    Sleep = 0,\n    Quit = 1,\n    InitDb = 2,\n    Query = 3,\n    FieldList = 4,\n    CreateDb = 5,\n    DropDb = 6,\n    Refresh = 7,\n    Shutdown = 8,\n    Statistics = 9,\n    ProcessInfo = 10,\n    Connect = 11,\n    ProcessKill = 12,\n    Debug = 13,\n    Ping = 14,\n    Time = 15,\n    DelayedInsert = 16,\n    ChangeUser = 17,\n    Daemon = 29,\n    ResetConnection = 31,\n  };\n\n  // MySQLCodec\n  int parseMessage(Buffer::Instance&, uint32_t len) override;\n  std::string encode() override;\n\n  Cmd parseCmd(Buffer::Instance& data);\n  Cmd getCmd() const { return cmd_; }\n  const std::string& getData() const { return data_; }\n  std::string& getDb() { return db_; }\n  void setCmd(Cmd cmd);\n  void setData(std::string& data);\n  void setDb(std::string db);\n  bool isQuery() { return is_query_; }\n\nprivate:\n  Cmd cmd_;\n  std::string data_;\n  std::string db_;\n  bool is_query_;\n};\n\nclass CommandResponse : public MySQLCodec {\npublic:\n  // MySQLCodec\n  int parseMessage(Buffer::Instance&, uint32_t) override { return MYSQL_SUCCESS; }\n  std::string encode() override { return \"\"; }\n\n  void setServerStatus(uint16_t status);\n  void setWarnings(uint16_t warnings);\n\nprivate:\n  uint16_t server_status_;\n  uint16_t warnings_;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nvoid ServerGreeting::setProtocol(int protocol) { protocol_ = protocol; }\n\nvoid ServerGreeting::setVersion(std::string& version) { version_.assign(version); }\n\nvoid ServerGreeting::setThreadId(int thread_id) { thread_id_ = thread_id; }\n\nvoid ServerGreeting::setSalt(std::string& salt) { salt_ = salt; }\n\nvoid ServerGreeting::setServerCap(int server_cap) { server_cap_ = server_cap; }\n\nvoid ServerGreeting::setServerLanguage(int server_language) { server_language_ = server_language; }\n\nvoid ServerGreeting::setServerStatus(int server_status) { server_status_ = server_status; }\n\nvoid ServerGreeting::setExtServerCap(int ext_server_cap) { ext_server_cap_ = ext_server_cap; }\n\nint ServerGreeting::parseMessage(Buffer::Instance& buffer, uint32_t) {\n  uint8_t protocol = 0;\n  if (BufferHelper::readUint8(buffer, protocol) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing protocol in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setProtocol(protocol);\n  std::string version;\n  if (BufferHelper::readString(buffer, version) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing version in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setVersion(version);\n  uint32_t thread_id = 0;\n  if (BufferHelper::readUint32(buffer, thread_id) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing thread_id in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setThreadId(thread_id);\n  std::string salt;\n  if (BufferHelper::readString(buffer, salt) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing salt in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setSalt(salt);\n  if (protocol_ == MYSQL_PROTOCOL_9) {\n    // End of HandshakeV9 greeting\n    return MYSQL_SUCCESS;\n  }\n  uint16_t server_cap = 0;\n  if (BufferHelper::readUint16(buffer, server_cap) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing server_cap in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setServerCap(server_cap);\n  if (BufferHelper::endOfBuffer(buffer)) {\n    // HandshakeV10 can terminate after Server Capabilities\n    return MYSQL_SUCCESS;\n  }\n  uint8_t server_language = 0;\n  if (BufferHelper::readUint8(buffer, server_language) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing server_language in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setServerLanguage(server_language);\n  uint16_t server_status = 0;\n  if (BufferHelper::readUint16(buffer, server_status) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing server_status in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setServerStatus(server_status);\n  uint16_t ext_server_cap = 0;\n  if (BufferHelper::readUint16(buffer, ext_server_cap) != MYSQL_SUCCESS) {\n    ENVOY_LOG(info, \"error parsing ext_server_cap in mysql Greeting msg\");\n    return MYSQL_FAILURE;\n  }\n  setExtServerCap(ext_server_cap);\n  return MYSQL_SUCCESS;\n}\n\nstd::string ServerGreeting::encode() {\n  uint8_t enc_end_string = 0;\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  BufferHelper::addUint8(*buffer, protocol_);\n  BufferHelper::addString(*buffer, version_);\n  BufferHelper::addUint8(*buffer, enc_end_string);\n  BufferHelper::addUint32(*buffer, thread_id_);\n  BufferHelper::addString(*buffer, salt_);\n  BufferHelper::addUint8(*buffer, enc_end_string);\n  BufferHelper::addUint16(*buffer, server_cap_);\n  BufferHelper::addUint8(*buffer, server_language_);\n  BufferHelper::addUint16(*buffer, server_status_);\n  BufferHelper::addUint16(*buffer, ext_server_cap_);\n\n  return buffer->toString();\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h",
    "content": "#pragma once\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nclass ServerGreeting : public MySQLCodec {\npublic:\n  // MySQLCodec\n  int parseMessage(Buffer::Instance& buffer, uint32_t len) override;\n  std::string encode() override;\n\n  int getProtocol() const { return protocol_; }\n  const std::string& getVersion() const { return version_; }\n  int getThreadId() const { return thread_id_; }\n  const std::string& getSalt() const { return salt_; };\n  int getServerCap() const { return server_cap_; }\n  int getServerLanguage() const { return server_language_; }\n  int getServerStatus() const { return server_status_; }\n  int getExtServerCap() const { return ext_server_cap_; }\n  void setProtocol(int protocol);\n  void setVersion(std::string& version);\n  void setThreadId(int thread_id);\n  void setSalt(std::string& salt);\n  void setServerCap(int server_cap);\n  void setServerLanguage(int server_language);\n  void setServerStatus(int server_status);\n  void setExtServerCap(int ext_server_cap);\n\nprivate:\n  int protocol_;\n  std::string version_;\n  int thread_id_;\n  std::string salt_;\n  int server_cap_;\n  int server_language_;\n  int server_status_;\n  int ext_server_cap_;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nvoid ClientSwitchResponse::setAuthPluginResp(std::string& auth_plugin_resp) {\n  auth_plugin_resp_.assign(auth_plugin_resp);\n}\n\nint ClientSwitchResponse::parseMessage(Buffer::Instance&, uint32_t) { return MYSQL_SUCCESS; }\n\nstd::string ClientSwitchResponse::encode() {\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n\n  BufferHelper::addString(*buffer, auth_plugin_resp_);\n  return buffer->toString();\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h",
    "content": "#pragma once\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nclass ClientSwitchResponse : public MySQLCodec {\npublic:\n  // MySQLCodec\n  int parseMessage(Buffer::Instance& buffer, uint32_t len) override;\n  std::string encode() override;\n\n  void setAuthPluginResp(std::string& auth_swith_resp);\n\nprivate:\n  std::string auth_plugin_resp_;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_config.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_config.h\"\n\n#include <string>\n\n#include \"envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\n/**\n * Config registration for the MySQL proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nNetwork::FilterFactoryCb\nNetworkFilters::MySQLProxy::MySQLConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::mysql_proxy::v3::MySQLProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n\n  ASSERT(!proto_config.stat_prefix().empty());\n\n  const std::string stat_prefix = fmt::format(\"mysql.{}\", proto_config.stat_prefix());\n\n  MySQLFilterConfigSharedPtr filter_config(\n      std::make_shared<MySQLFilterConfig>(stat_prefix, context.scope()));\n  return [filter_config](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addFilter(std::make_shared<MySQLFilter>(filter_config));\n  };\n}\n\n/**\n * Static registration for the MySQL proxy filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(MySQLConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_filter.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\n/**\n * Config registration for the MySQL proxy filter.\n */\nclass MySQLConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::mysql_proxy::v3::MySQLProxy> {\npublic:\n  MySQLConfigFactory() : FactoryBase(NetworkFilterNames::get().MySQLProxy) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::mysql_proxy::v3::MySQLProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_decoder.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_decoder.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nvoid DecoderImpl::parseMessage(Buffer::Instance& message, uint8_t seq, uint32_t len) {\n  ENVOY_LOG(trace, \"mysql_proxy: parsing message, seq {}, len {}\", seq, len);\n\n  // Run the MySQL state machine\n  switch (session_.getState()) {\n\n  // Expect Server Challenge packet\n  case MySQLSession::State::Init: {\n    ServerGreeting greeting;\n    greeting.decode(message, seq, len);\n    callbacks_.onServerGreeting(greeting);\n\n    session_.setState(MySQLSession::State::ChallengeReq);\n    break;\n  }\n\n  // Process Client Handshake Response\n  case MySQLSession::State::ChallengeReq: {\n    ClientLogin client_login{};\n    client_login.decode(message, seq, len);\n    callbacks_.onClientLogin(client_login);\n\n    if (client_login.isSSLRequest()) {\n      session_.setState(MySQLSession::State::SslPt);\n    } else if (client_login.isResponse41()) {\n      session_.setState(MySQLSession::State::ChallengeResp41);\n    } else {\n      session_.setState(MySQLSession::State::ChallengeResp320);\n    }\n    break;\n  }\n\n  case MySQLSession::State::SslPt:\n    break;\n\n  case MySQLSession::State::ChallengeResp41:\n  case MySQLSession::State::ChallengeResp320: {\n    ClientLoginResponse client_login_resp{};\n    client_login_resp.decode(message, seq, len);\n    callbacks_.onClientLoginResponse(client_login_resp);\n\n    if (client_login_resp.getRespCode() == MYSQL_RESP_OK) {\n      session_.setState(MySQLSession::State::Req);\n      // reset seq# when entering the REQ state\n      session_.setExpectedSeq(MYSQL_REQUEST_PKT_NUM);\n    } else if (client_login_resp.getRespCode() == MYSQL_RESP_AUTH_SWITCH) {\n      session_.setState(MySQLSession::State::AuthSwitchResp);\n    } else if (client_login_resp.getRespCode() == MYSQL_RESP_ERR) {\n      // client/server should close the connection:\n      // https://dev.mysql.com/doc/internals/en/connection-phase.html\n      session_.setState(MySQLSession::State::Error);\n    } else {\n      session_.setState(MySQLSession::State::NotHandled);\n    }\n    break;\n  }\n\n  case MySQLSession::State::AuthSwitchResp: {\n    ClientSwitchResponse client_switch_resp{};\n    client_switch_resp.decode(message, seq, len);\n    callbacks_.onClientSwitchResponse(client_switch_resp);\n\n    session_.setState(MySQLSession::State::AuthSwitchMore);\n    break;\n  }\n\n  case MySQLSession::State::AuthSwitchMore: {\n    ClientLoginResponse client_login_resp{};\n    client_login_resp.decode(message, seq, len);\n    callbacks_.onMoreClientLoginResponse(client_login_resp);\n\n    if (client_login_resp.getRespCode() == MYSQL_RESP_OK) {\n      session_.setState(MySQLSession::State::Req);\n    } else if (client_login_resp.getRespCode() == MYSQL_RESP_MORE) {\n      session_.setState(MySQLSession::State::AuthSwitchResp);\n    } else if (client_login_resp.getRespCode() == MYSQL_RESP_ERR) {\n      // stop parsing auth req/response, attempt to resync in command state\n      session_.setState(MySQLSession::State::Resync);\n      session_.setExpectedSeq(MYSQL_REQUEST_PKT_NUM);\n    } else {\n      session_.setState(MySQLSession::State::NotHandled);\n    }\n    break;\n  }\n\n  case MySQLSession::State::Resync: {\n    // re-sync to MYSQL_REQ state\n    // expected seq check succeeded, no need to verify\n    session_.setState(MySQLSession::State::Req);\n    FALLTHRU;\n  }\n\n  // Process Command\n  case MySQLSession::State::Req: {\n    Command command{};\n    command.decode(message, seq, len);\n    callbacks_.onCommand(command);\n\n    session_.setState(MySQLSession::State::ReqResp);\n    break;\n  }\n\n  // Process Command Response\n  case MySQLSession::State::ReqResp: {\n    CommandResponse command_resp{};\n    command_resp.decode(message, seq, len);\n    callbacks_.onCommandResponse(command_resp);\n\n    session_.setState(MySQLSession::State::Req);\n    session_.setExpectedSeq(MYSQL_REQUEST_PKT_NUM);\n    break;\n  }\n\n  case MySQLSession::State::Error:\n  case MySQLSession::State::NotHandled:\n  default:\n    break;\n  }\n\n  ENVOY_LOG(trace, \"mysql_proxy: msg parsed, session in state {}\",\n            static_cast<int>(session_.getState()));\n}\n\nbool DecoderImpl::decode(Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"mysql_proxy: decoding {} bytes\", data.length());\n\n  uint32_t len = 0;\n  uint8_t seq = 0;\n  if (BufferHelper::peekHdr(data, len, seq) != MYSQL_SUCCESS) {\n    throw EnvoyException(\"error parsing mysql packet header\");\n  }\n\n  // If message is split over multiple packets, hold off until the entire message is available.\n  // Consider the size of the header here as it's not consumed yet.\n  if (sizeof(uint32_t) + len > data.length()) {\n    return false;\n  }\n\n  BufferHelper::consumeHdr(data); // Consume the header once the message is fully available.\n  callbacks_.onNewMessage(session_.getState());\n\n  // Ignore duplicate and out-of-sync packets.\n  if (seq != session_.getExpectedSeq()) {\n    callbacks_.onProtocolError();\n    ENVOY_LOG(info, \"mysql_proxy: ignoring out-of-sync packet\");\n    data.drain(len); // Ensure that the whole message was consumed\n    return true;\n  }\n\n  session_.setExpectedSeq(seq + 1);\n\n  const ssize_t data_len = data.length();\n  parseMessage(data, seq, len);\n  const ssize_t consumed_len = data_len - data.length();\n  data.drain(len - consumed_len); // Ensure that the whole message was consumed\n\n  ENVOY_LOG(trace, \"mysql_proxy: {} bytes remaining in buffer\", data.length());\n  return true;\n}\n\nvoid DecoderImpl::onData(Buffer::Instance& data) {\n  // TODO(venilnoronha): handle messages over 16 mb. See\n  // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_basic_packets.html#sect_protocol_basic_packets_sending_mt_16mb.\n  while (!BufferHelper::endOfBuffer(data) && decode(data)) {\n  }\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_decoder.h",
    "content": "#pragma once\n#include <cstdint>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/sqlutils/sqlutils.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_command.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_session.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\n/**\n * General callbacks for dispatching decoded MySQL messages to a sink.\n */\nclass DecoderCallbacks {\npublic:\n  virtual ~DecoderCallbacks() = default;\n\n  virtual void onProtocolError() PURE;\n  virtual void onNewMessage(MySQLSession::State) PURE;\n  virtual void onServerGreeting(ServerGreeting&) PURE;\n  virtual void onClientLogin(ClientLogin&) PURE;\n  virtual void onClientLoginResponse(ClientLoginResponse&) PURE;\n  virtual void onClientSwitchResponse(ClientSwitchResponse&) PURE;\n  virtual void onMoreClientLoginResponse(ClientLoginResponse&) PURE;\n  virtual void onCommand(Command&) PURE;\n  virtual void onCommandResponse(CommandResponse&) PURE;\n};\n\n/**\n * MySQL message decoder.\n */\nclass Decoder {\npublic:\n  virtual ~Decoder() = default;\n\n  virtual void onData(Buffer::Instance& data) PURE;\n  virtual MySQLSession& getSession() PURE;\n\n  const Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes& getAttributes() const {\n    return attributes_;\n  }\n\nprotected:\n  // Decoder attributes.\n  Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes attributes_;\n};\n\nusing DecoderPtr = std::unique_ptr<Decoder>;\n\nclass DecoderImpl : public Decoder, Logger::Loggable<Logger::Id::filter> {\npublic:\n  DecoderImpl(DecoderCallbacks& callbacks) : callbacks_(callbacks) {}\n\n  // MySQLProxy::Decoder\n  void onData(Buffer::Instance& data) override;\n  MySQLSession& getSession() override { return session_; }\n\nprivate:\n  bool decode(Buffer::Instance& data);\n  void parseMessage(Buffer::Instance& message, uint8_t seq, uint32_t len);\n\n  DecoderCallbacks& callbacks_;\n  MySQLSession session_;\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_filter.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_filter.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nMySQLFilterConfig::MySQLFilterConfig(const std::string& stat_prefix, Stats::Scope& scope)\n    : scope_(scope), stats_(generateStats(stat_prefix, scope)) {}\n\nMySQLFilter::MySQLFilter(MySQLFilterConfigSharedPtr config) : config_(std::move(config)) {}\n\nvoid MySQLFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  read_callbacks_ = &callbacks;\n}\n\nNetwork::FilterStatus MySQLFilter::onData(Buffer::Instance& data, bool) {\n  // Safety measure just to make sure that if we have a decoding error we keep going and lose stats.\n  // This can be removed once we are more confident of this code.\n  if (sniffing_) {\n    read_buffer_.add(data);\n    doDecode(read_buffer_);\n  }\n  return Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus MySQLFilter::onWrite(Buffer::Instance& data, bool) {\n  // Safety measure just to make sure that if we have a decoding error we keep going and lose stats.\n  // This can be removed once we are more confident of this code.\n  if (sniffing_) {\n    write_buffer_.add(data);\n    doDecode(write_buffer_);\n  }\n  return Network::FilterStatus::Continue;\n}\n\nvoid MySQLFilter::doDecode(Buffer::Instance& buffer) {\n  // Clear dynamic metadata.\n  envoy::config::core::v3::Metadata& dynamic_metadata =\n      read_callbacks_->connection().streamInfo().dynamicMetadata();\n  auto& metadata =\n      (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().MySQLProxy];\n  metadata.mutable_fields()->clear();\n\n  if (!decoder_) {\n    decoder_ = createDecoder(*this);\n  }\n\n  try {\n    decoder_->onData(buffer);\n  } catch (EnvoyException& e) {\n    ENVOY_LOG(info, \"mysql_proxy: decoding error: {}\", e.what());\n    config_->stats_.decoder_errors_.inc();\n    sniffing_ = false;\n    read_buffer_.drain(read_buffer_.length());\n    write_buffer_.drain(write_buffer_.length());\n  }\n}\n\nDecoderPtr MySQLFilter::createDecoder(DecoderCallbacks& callbacks) {\n  return std::make_unique<DecoderImpl>(callbacks);\n}\n\nvoid MySQLFilter::onProtocolError() { config_->stats_.protocol_errors_.inc(); }\n\nvoid MySQLFilter::onNewMessage(MySQLSession::State state) {\n  if (state == MySQLSession::State::ChallengeReq) {\n    config_->stats_.login_attempts_.inc();\n  }\n}\n\nvoid MySQLFilter::onClientLogin(ClientLogin& client_login) {\n  if (client_login.isSSLRequest()) {\n    config_->stats_.upgraded_to_ssl_.inc();\n  }\n}\n\nvoid MySQLFilter::onClientLoginResponse(ClientLoginResponse& client_login_resp) {\n  if (client_login_resp.getRespCode() == MYSQL_RESP_AUTH_SWITCH) {\n    config_->stats_.auth_switch_request_.inc();\n  } else if (client_login_resp.getRespCode() == MYSQL_RESP_ERR) {\n    config_->stats_.login_failures_.inc();\n  }\n}\n\nvoid MySQLFilter::onMoreClientLoginResponse(ClientLoginResponse& client_login_resp) {\n  if (client_login_resp.getRespCode() == MYSQL_RESP_ERR) {\n    config_->stats_.login_failures_.inc();\n  }\n}\n\nvoid MySQLFilter::onCommand(Command& command) {\n  if (!command.isQuery()) {\n    return;\n  }\n\n  // Parse a given query\n  envoy::config::core::v3::Metadata& dynamic_metadata =\n      read_callbacks_->connection().streamInfo().dynamicMetadata();\n  ProtobufWkt::Struct metadata(\n      (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().MySQLProxy]);\n\n  auto result = Common::SQLUtils::SQLUtils::setMetadata(command.getData(),\n                                                        decoder_->getAttributes(), metadata);\n\n  ENVOY_CONN_LOG(trace, \"mysql_proxy: query processed {}\", read_callbacks_->connection(),\n                 command.getData());\n\n  if (!result) {\n    config_->stats_.queries_parse_error_.inc();\n    return;\n  }\n  config_->stats_.queries_parsed_.inc();\n\n  read_callbacks_->connection().streamInfo().setDynamicMetadata(\n      NetworkFilterNames::get().MySQLProxy, metadata);\n}\n\nNetwork::FilterStatus MySQLFilter::onNewConnection() {\n  config_->stats_.sessions_.inc();\n  return Network::FilterStatus::Continue;\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_filter.h",
    "content": "#pragma once\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_command.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_decoder.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_session.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\n/**\n * All MySQL proxy stats. @see stats_macros.h\n */\n#define ALL_MYSQL_PROXY_STATS(COUNTER)                                                             \\\n  COUNTER(sessions)                                                                                \\\n  COUNTER(login_attempts)                                                                          \\\n  COUNTER(login_failures)                                                                          \\\n  COUNTER(decoder_errors)                                                                          \\\n  COUNTER(protocol_errors)                                                                         \\\n  COUNTER(upgraded_to_ssl)                                                                         \\\n  COUNTER(auth_switch_request)                                                                     \\\n  COUNTER(queries_parsed)                                                                          \\\n  COUNTER(queries_parse_error)\n\n/**\n * Struct definition for all MySQL proxy stats. @see stats_macros.h\n */\nstruct MySQLProxyStats {\n  ALL_MYSQL_PROXY_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for the MySQL proxy filter.\n */\nclass MySQLFilterConfig {\npublic:\n  MySQLFilterConfig(const std::string& stat_prefix, Stats::Scope& scope);\n\n  const MySQLProxyStats& stats() { return stats_; }\n\n  Stats::Scope& scope_;\n  MySQLProxyStats stats_;\n\nprivate:\n  MySQLProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return MySQLProxyStats{ALL_MYSQL_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n  }\n};\n\nusing MySQLFilterConfigSharedPtr = std::shared_ptr<MySQLFilterConfig>;\n\n/**\n * Implementation of MySQL proxy filter.\n */\nclass MySQLFilter : public Network::Filter, DecoderCallbacks, Logger::Loggable<Logger::Id::filter> {\npublic:\n  MySQLFilter(MySQLFilterConfigSharedPtr config);\n  ~MySQLFilter() override = default;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override;\n\n  // MySQLProxy::DecoderCallback\n  void onProtocolError() override;\n  void onNewMessage(MySQLSession::State state) override;\n  void onServerGreeting(ServerGreeting&) override{};\n  void onClientLogin(ClientLogin& message) override;\n  void onClientLoginResponse(ClientLoginResponse& message) override;\n  void onClientSwitchResponse(ClientSwitchResponse&) override{};\n  void onMoreClientLoginResponse(ClientLoginResponse& message) override;\n  void onCommand(Command& message) override;\n  void onCommandResponse(CommandResponse&) override{};\n\n  void doDecode(Buffer::Instance& buffer);\n  DecoderPtr createDecoder(DecoderCallbacks& callbacks);\n  MySQLSession& getSession() { return decoder_->getSession(); }\n\nprivate:\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  MySQLFilterConfigSharedPtr config_;\n  Buffer::OwnedImpl read_buffer_;\n  Buffer::OwnedImpl write_buffer_;\n  std::unique_ptr<Decoder> decoder_;\n  bool sniffing_{true};\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_session.h",
    "content": "#pragma once\n#include <cstdint>\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nclass MySQLSession : Logger::Loggable<Logger::Id::filter> {\npublic:\n  enum class State {\n    Init = 0,\n    ChallengeReq = 1,\n    ChallengeResp41 = 2,\n    ChallengeResp320 = 3,\n    SslPt = 4,\n    AuthSwitchReq = 5,\n    AuthSwitchReqOld = 6,\n    AuthSwitchResp = 7,\n    AuthSwitchMore = 8,\n    ReqResp = 9,\n    Req = 10,\n    Resync = 11,\n    NotHandled = 12,\n    Error = 13,\n  };\n\n  void setState(MySQLSession::State state) { state_ = state; }\n  MySQLSession::State getState() { return state_; }\n  uint8_t getExpectedSeq() { return expected_seq_; }\n  void setExpectedSeq(uint8_t seq) { expected_seq_ = seq; }\n\nprivate:\n  MySQLSession::State state_{State::Init};\n  uint8_t expected_seq_{0};\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_utils.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nvoid BufferHelper::addUint8(Buffer::Instance& buffer, uint8_t val) {\n  buffer.writeLEInt<uint8_t>(val);\n}\n\nvoid BufferHelper::addUint16(Buffer::Instance& buffer, uint16_t val) {\n  buffer.writeLEInt<uint16_t>(val);\n}\n\nvoid BufferHelper::addUint32(Buffer::Instance& buffer, uint32_t val) {\n  buffer.writeLEInt<uint32_t>(val);\n}\n\nvoid BufferHelper::addString(Buffer::Instance& buffer, const std::string& str) { buffer.add(str); }\n\nstd::string BufferHelper::encodeHdr(const std::string& cmd_str, uint8_t seq) {\n  Buffer::OwnedImpl buffer;\n  // First byte contains sequence number, next 3 bytes contain cmd string size\n  uint32_t header = (seq << 24) | (cmd_str.length() & MYSQL_HDR_PKT_SIZE_MASK);\n  addUint32(buffer, header);\n\n  std::string e_string = buffer.toString();\n  e_string.append(cmd_str);\n  return e_string;\n}\n\nbool BufferHelper::endOfBuffer(Buffer::Instance& buffer) { return buffer.length() == 0; }\n\nint BufferHelper::readUint8(Buffer::Instance& buffer, uint8_t& val) {\n  try {\n    val = buffer.peekLEInt<uint8_t>(0);\n    buffer.drain(sizeof(uint8_t));\n    return MYSQL_SUCCESS;\n  } catch (EnvoyException& e) {\n    // buffer underflow\n    return MYSQL_FAILURE;\n  }\n}\n\nint BufferHelper::readUint16(Buffer::Instance& buffer, uint16_t& val) {\n  try {\n    val = buffer.peekLEInt<uint16_t>(0);\n    buffer.drain(sizeof(uint16_t));\n    return MYSQL_SUCCESS;\n  } catch (EnvoyException& e) {\n    // buffer underflow\n    return MYSQL_FAILURE;\n  }\n}\n\nint BufferHelper::readUint32(Buffer::Instance& buffer, uint32_t& val) {\n  try {\n    val = buffer.peekLEInt<uint32_t>(0);\n    buffer.drain(sizeof(uint32_t));\n    return MYSQL_SUCCESS;\n  } catch (EnvoyException& e) {\n    // buffer underflow\n    return MYSQL_FAILURE;\n  }\n}\n\n// Implementation of MySQL lenenc encoder based on\n// https://dev.mysql.com/doc/internals/en/integer.html#packet-Protocol::LengthEncodedInteger\nint BufferHelper::readLengthEncodedInteger(Buffer::Instance& buffer, uint64_t& val) {\n  uint8_t byte_val = 0;\n  if (readUint8(buffer, byte_val) == MYSQL_FAILURE) {\n    return MYSQL_FAILURE;\n  }\n  if (byte_val < LENENCODINT_1BYTE) {\n    val = byte_val;\n    return MYSQL_SUCCESS;\n  }\n\n  try {\n    if (byte_val == LENENCODINT_2BYTES) {\n      val = buffer.peekLEInt<uint64_t, sizeof(uint16_t)>(0);\n      buffer.drain(sizeof(uint16_t));\n    } else if (byte_val == LENENCODINT_3BYTES) {\n      val = buffer.peekLEInt<uint64_t, sizeof(uint8_t) * 3>(0);\n      buffer.drain(sizeof(uint8_t) * 3);\n    } else if (byte_val == LENENCODINT_8BYTES) {\n      val = buffer.peekLEInt<uint64_t>(0);\n      buffer.drain(sizeof(uint64_t));\n    } else {\n      return MYSQL_FAILURE;\n    }\n  } catch (EnvoyException& e) {\n    // buffer underflow\n    return MYSQL_FAILURE;\n  }\n\n  return MYSQL_SUCCESS;\n}\n\nint BufferHelper::readBytes(Buffer::Instance& buffer, size_t skip_bytes) {\n  if (buffer.length() < skip_bytes) {\n    return MYSQL_FAILURE;\n  }\n  buffer.drain(skip_bytes);\n  return MYSQL_SUCCESS;\n}\n\nint BufferHelper::readString(Buffer::Instance& buffer, std::string& str) {\n  char end = MYSQL_STR_END;\n  ssize_t index = buffer.search(&end, sizeof(end), 0);\n  if (index == -1) {\n    return MYSQL_FAILURE;\n  }\n  if (static_cast<int>(buffer.length()) < (index + 1)) {\n    return MYSQL_FAILURE;\n  }\n  str.assign(std::string(static_cast<char*>(buffer.linearize(index)), index));\n  str = str.substr(0);\n  buffer.drain(index + 1);\n  return MYSQL_SUCCESS;\n}\n\nint BufferHelper::readStringBySize(Buffer::Instance& buffer, size_t len, std::string& str) {\n  if (buffer.length() < len) {\n    return MYSQL_FAILURE;\n  }\n  str.assign(std::string(static_cast<char*>(buffer.linearize(len)), len));\n  str = str.substr(0);\n  buffer.drain(len);\n  return MYSQL_SUCCESS;\n}\n\nint BufferHelper::peekUint32(Buffer::Instance& buffer, uint32_t& val) {\n  try {\n    val = buffer.peekLEInt<uint32_t>(0);\n    return MYSQL_SUCCESS;\n  } catch (EnvoyException& e) {\n    // buffer underflow\n    return MYSQL_FAILURE;\n  }\n}\n\nvoid BufferHelper::consumeHdr(Buffer::Instance& buffer) { buffer.drain(sizeof(uint32_t)); }\n\nint BufferHelper::peekHdr(Buffer::Instance& buffer, uint32_t& len, uint8_t& seq) {\n  uint32_t val = 0;\n  if (peekUint32(buffer, val) != MYSQL_SUCCESS) {\n    return MYSQL_FAILURE;\n  }\n  seq = htobe32(val) & MYSQL_HDR_SEQ_MASK;\n  len = val & MYSQL_HDR_PKT_SIZE_MASK;\n  ENVOY_LOG(trace, \"mysql_proxy: MYSQL-hdrseq {}, len {}\", seq, len);\n  return MYSQL_SUCCESS;\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/mysql_proxy/mysql_utils.h",
    "content": "#pragma once\n#include <cstdint>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/byte_order.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_command.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_session.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\n/**\n * IO helpers for reading/writing MySQL data from/to a buffer.\n * MySQL uses unsigned integer values in Little Endian format only.\n */\nclass BufferHelper : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  static void addUint8(Buffer::Instance& buffer, uint8_t val);\n  static void addUint16(Buffer::Instance& buffer, uint16_t val);\n  static void addUint32(Buffer::Instance& buffer, uint32_t val);\n  static void addString(Buffer::Instance& buffer, const std::string& str);\n  static std::string encodeHdr(const std::string& cmd_str, uint8_t seq);\n  static bool endOfBuffer(Buffer::Instance& buffer);\n  static int readUint8(Buffer::Instance& buffer, uint8_t& val);\n  static int readUint16(Buffer::Instance& buffer, uint16_t& val);\n  static int readUint32(Buffer::Instance& buffer, uint32_t& val);\n  static int readLengthEncodedInteger(Buffer::Instance& buffer, uint64_t& val);\n  static int readBytes(Buffer::Instance& buffer, size_t skip_bytes);\n  static int readString(Buffer::Instance& buffer, std::string& str);\n  static int readStringBySize(Buffer::Instance& buffer, size_t len, std::string& str);\n  static int peekUint32(Buffer::Instance& buffer, uint32_t& val);\n  static void consumeHdr(Buffer::Instance& buffer);\n  static int peekHdr(Buffer::Instance& buffer, uint32_t& len, uint8_t& seq);\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n#package(default_visibility = [\"//visibility:public\"])\n\n# PostgresSQL proxy L7 network filter.\n# Public docs: docs/root/configuration/network_filters/postgres_proxy_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"filter\",\n    srcs = [\n        \"postgres_decoder.cc\",\n        \"postgres_filter.cc\",\n        \"postgres_message.cc\",\n    ],\n    hdrs = [\n        \"postgres_decoder.h\",\n        \"postgres_filter.h\",\n        \"postgres_message.h\",\n        \"postgres_session.h\",\n    ],\n    repository = \"@envoy\",\n    deps = [\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/extensions/common/sqlutils:sqlutils_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    repository = \"@envoy\",\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \":filter\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/config.cc",
    "content": "#include \"extensions/filters/network/postgres_proxy/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n/**\n * Config registration for the Postgres proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nNetwork::FilterFactoryCb\nNetworkFilters::PostgresProxy::PostgresConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::postgres_proxy::v3alpha::PostgresProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  ASSERT(!proto_config.stat_prefix().empty());\n\n  const std::string stat_prefix = fmt::format(\"postgres.{}\", proto_config.stat_prefix());\n  const bool enable_sql = PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, enable_sql_parsing, true);\n\n  PostgresFilterConfigSharedPtr filter_config(\n      std::make_shared<PostgresFilterConfig>(stat_prefix, enable_sql, context.scope()));\n  return [filter_config](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addFilter(std::make_shared<PostgresFilter>(filter_config));\n  };\n}\n\n/**\n * Static registration for the Postgres proxy filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(PostgresConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/postgres_proxy/postgres_filter.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n/**\n * Config registration for the Postgres proxy filter.\n */\nclass PostgresConfigFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::network::postgres_proxy::v3alpha::PostgresProxy> {\npublic:\n  PostgresConfigFactory() : FactoryBase{NetworkFilterNames::get().PostgresProxy} {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::postgres_proxy::v3alpha::PostgresProxy&\n          proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/postgres_decoder.cc",
    "content": "#include \"extensions/filters/network/postgres_proxy/postgres_decoder.h\"\n\n#include <vector>\n\n#include \"absl/strings/str_split.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n#define BODY_FORMAT(...)                                                                           \\\n  []() -> std::unique_ptr<Message> { return createMsgBodyReader<__VA_ARGS__>(); }\n#define NO_BODY BODY_FORMAT()\n\nvoid DecoderImpl::initialize() {\n  // Special handler for first message of the transaction.\n  first_ =\n      MessageProcessor{\"Startup\", BODY_FORMAT(Int32, Repeated<String>), {&DecoderImpl::onStartup}};\n\n  // Frontend messages.\n  FE_messages_.direction_ = \"Frontend\";\n\n  // Setup handlers for known messages.\n  absl::flat_hash_map<char, MessageProcessor>& FE_known_msgs = FE_messages_.messages_;\n\n  // Handler for known Frontend messages.\n  FE_known_msgs['B'] = MessageProcessor{\n      \"Bind\", BODY_FORMAT(String, String, Array<Int16>, Array<VarByteN>, Array<Int16>), {}};\n  FE_known_msgs['C'] = MessageProcessor{\"Close\", BODY_FORMAT(Byte1, String), {}};\n  FE_known_msgs['d'] = MessageProcessor{\"CopyData\", BODY_FORMAT(ByteN), {}};\n  FE_known_msgs['c'] = MessageProcessor{\"CopyDone\", NO_BODY, {}};\n  FE_known_msgs['f'] = MessageProcessor{\"CopyFail\", BODY_FORMAT(String), {}};\n  FE_known_msgs['D'] = MessageProcessor{\"Describe\", BODY_FORMAT(Byte1, String), {}};\n  FE_known_msgs['E'] = MessageProcessor{\"Execute\", BODY_FORMAT(String, Int32), {}};\n  FE_known_msgs['H'] = MessageProcessor{\"Flush\", NO_BODY, {}};\n  FE_known_msgs['F'] = MessageProcessor{\n      \"FunctionCall\", BODY_FORMAT(Int32, Array<Int16>, Array<VarByteN>, Int16), {}};\n  FE_known_msgs['p'] =\n      MessageProcessor{\"PasswordMessage/GSSResponse/SASLInitialResponse/SASLResponse\",\n                       BODY_FORMAT(Int32, ByteN),\n                       {}};\n  FE_known_msgs['P'] =\n      MessageProcessor{\"Parse\", BODY_FORMAT(String, String, Array<Int32>), {&DecoderImpl::onParse}};\n  FE_known_msgs['Q'] = MessageProcessor{\"Query\", BODY_FORMAT(String), {&DecoderImpl::onQuery}};\n  FE_known_msgs['S'] = MessageProcessor{\"Sync\", NO_BODY, {}};\n  FE_known_msgs['X'] =\n      MessageProcessor{\"Terminate\", NO_BODY, {&DecoderImpl::decodeFrontendTerminate}};\n\n  // Handler for unknown Frontend messages.\n  FE_messages_.unknown_ =\n      MessageProcessor{\"Other\", BODY_FORMAT(ByteN), {&DecoderImpl::incMessagesUnknown}};\n\n  // Backend messages.\n  BE_messages_.direction_ = \"Backend\";\n\n  // Setup handlers for known messages.\n  absl::flat_hash_map<char, MessageProcessor>& BE_known_msgs = BE_messages_.messages_;\n\n  // Handler for known Backend messages.\n  BE_known_msgs['R'] =\n      MessageProcessor{\"Authentication\", BODY_FORMAT(ByteN), {&DecoderImpl::decodeAuthentication}};\n  BE_known_msgs['K'] = MessageProcessor{\"BackendKeyData\", BODY_FORMAT(Int32, Int32), {}};\n  BE_known_msgs['2'] = MessageProcessor{\"BindComplete\", NO_BODY, {}};\n  BE_known_msgs['3'] = MessageProcessor{\"CloseComplete\", NO_BODY, {}};\n  BE_known_msgs['C'] = MessageProcessor{\n      \"CommandComplete\", BODY_FORMAT(String), {&DecoderImpl::decodeBackendStatements}};\n  BE_known_msgs['d'] = MessageProcessor{\"CopyData\", BODY_FORMAT(ByteN), {}};\n  BE_known_msgs['c'] = MessageProcessor{\"CopyDone\", NO_BODY, {}};\n  BE_known_msgs['G'] = MessageProcessor{\"CopyInResponse\", BODY_FORMAT(Int8, Array<Int16>), {}};\n  BE_known_msgs['H'] = MessageProcessor{\"CopyOutResponse\", BODY_FORMAT(Int8, Array<Int16>), {}};\n  BE_known_msgs['W'] = MessageProcessor{\"CopyBothResponse\", BODY_FORMAT(Int8, Array<Int16>), {}};\n  BE_known_msgs['D'] = MessageProcessor{\"DataRow\", BODY_FORMAT(Array<VarByteN>), {}};\n  BE_known_msgs['I'] = MessageProcessor{\"EmptyQueryResponse\", NO_BODY, {}};\n  BE_known_msgs['E'] = MessageProcessor{\n      \"ErrorResponse\", BODY_FORMAT(Byte1, String), {&DecoderImpl::decodeBackendErrorResponse}};\n  BE_known_msgs['V'] = MessageProcessor{\"FunctionCallResponse\", BODY_FORMAT(VarByteN), {}};\n  BE_known_msgs['v'] = MessageProcessor{\"NegotiateProtocolVersion\", BODY_FORMAT(ByteN), {}};\n  BE_known_msgs['n'] = MessageProcessor{\"NoData\", NO_BODY, {}};\n  BE_known_msgs['N'] = MessageProcessor{\n      \"NoticeResponse\", BODY_FORMAT(ByteN), {&DecoderImpl::decodeBackendNoticeResponse}};\n  BE_known_msgs['A'] =\n      MessageProcessor{\"NotificationResponse\", BODY_FORMAT(Int32, String, String), {}};\n  BE_known_msgs['t'] = MessageProcessor{\"ParameterDescription\", BODY_FORMAT(Array<Int32>), {}};\n  BE_known_msgs['S'] = MessageProcessor{\"ParameterStatus\", BODY_FORMAT(String, String), {}};\n  BE_known_msgs['1'] = MessageProcessor{\"ParseComplete\", NO_BODY, {}};\n  BE_known_msgs['s'] = MessageProcessor{\"PortalSuspend\", NO_BODY, {}};\n  BE_known_msgs['Z'] = MessageProcessor{\"ReadyForQuery\", BODY_FORMAT(Byte1), {}};\n  BE_known_msgs['T'] = MessageProcessor{\n      \"RowDescription\",\n      BODY_FORMAT(Array<Sequence<String, Int32, Int16, Int32, Int16, Int32, Int16>>),\n      {}};\n\n  // Handler for unknown Backend messages.\n  BE_messages_.unknown_ =\n      MessageProcessor{\"Other\", BODY_FORMAT(ByteN), {&DecoderImpl::incMessagesUnknown}};\n\n  // Setup hash map for handling backend statements.\n  BE_statements_[\"BEGIN\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Other);\n    callbacks_->incTransactions();\n    session_.setInTransaction(true);\n  };\n  BE_statements_[\"ROLLBACK\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Noop);\n    callbacks_->incTransactionsRollback();\n    session_.setInTransaction(false);\n  };\n  BE_statements_[\"START\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Other);\n    callbacks_->incTransactions();\n    session_.setInTransaction(true);\n  };\n  BE_statements_[\"COMMIT\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Noop);\n    session_.setInTransaction(false);\n    callbacks_->incTransactionsCommit();\n  };\n  BE_statements_[\"SELECT\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Select);\n    callbacks_->incTransactions();\n    callbacks_->incTransactionsCommit();\n  };\n  BE_statements_[\"INSERT\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Insert);\n    callbacks_->incTransactions();\n    callbacks_->incTransactionsCommit();\n  };\n  BE_statements_[\"UPDATE\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Update);\n    callbacks_->incTransactions();\n    callbacks_->incTransactionsCommit();\n  };\n  BE_statements_[\"DELETE\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Delete);\n    callbacks_->incTransactions();\n    callbacks_->incTransactionsCommit();\n  };\n\n  // Setup hash map for handling backend ErrorResponse messages.\n  BE_errors_.keywords_[\"ERROR\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incErrors(DecoderCallbacks::ErrorType::Error);\n  };\n  BE_errors_.keywords_[\"FATAL\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incErrors(DecoderCallbacks::ErrorType::Fatal);\n  };\n  BE_errors_.keywords_[\"PANIC\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incErrors(DecoderCallbacks::ErrorType::Panic);\n  };\n  // Setup handler which is called when decoder cannot decode the message and treats it as Unknown\n  // Error message.\n  BE_errors_.unknown_ = [this](DecoderImpl*) -> void {\n    callbacks_->incErrors(DecoderCallbacks::ErrorType::Unknown);\n  };\n\n  // Setup hash map for handling backend NoticeResponse messages.\n  BE_notices_.keywords_[\"WARNING\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incNotices(DecoderCallbacks::NoticeType::Warning);\n  };\n  BE_notices_.keywords_[\"NOTICE\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incNotices(DecoderCallbacks::NoticeType::Notice);\n  };\n  BE_notices_.keywords_[\"DEBUG\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incNotices(DecoderCallbacks::NoticeType::Debug);\n  };\n  BE_notices_.keywords_[\"INFO\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incNotices(DecoderCallbacks::NoticeType::Info);\n  };\n  BE_notices_.keywords_[\"LOG\"] = [this](DecoderImpl*) -> void {\n    callbacks_->incNotices(DecoderCallbacks::NoticeType::Log);\n  };\n  // Setup handler which is called when decoder cannot decode the message and treats it as Unknown\n  // Notice message.\n  BE_notices_.unknown_ = [this](DecoderImpl*) -> void {\n    callbacks_->incNotices(DecoderCallbacks::NoticeType::Unknown);\n  };\n}\n\nbool DecoderImpl::parseMessage(Buffer::Instance& data) {\n  ENVOY_LOG(trace, \"postgres_proxy: parsing message, len {}\", data.length());\n\n  // The minimum size of the message sufficient for parsing is 5 bytes.\n  if (data.length() < 5) {\n    // not enough data in the buffer.\n    return false;\n  }\n\n  if (!startup_) {\n    data.copyOut(0, 1, &command_);\n    ENVOY_LOG(trace, \"postgres_proxy: command is {}\", command_);\n  }\n\n  // The 1 byte message type and message length should be in the buffer\n  // Check if the entire message has been read.\n  std::string message;\n  message_len_ = data.peekBEInt<uint32_t>(startup_ ? 0 : 1);\n  if (data.length() < (message_len_ + (startup_ ? 0 : 1))) {\n    ENVOY_LOG(trace, \"postgres_proxy: cannot parse message. Need {} bytes in buffer\",\n              message_len_ + (startup_ ? 0 : 1));\n    // Not enough data in the buffer.\n    return false;\n  }\n\n  if (startup_) {\n    uint32_t code = data.peekBEInt<uint32_t>(4);\n    // Startup message with 1234 in the most significant 16 bits\n    // indicate request to encrypt.\n    if (code >= 0x04d20000) {\n      ENVOY_LOG(trace, \"postgres_proxy: detected encrypted traffic.\");\n      encrypted_ = true;\n      startup_ = false;\n      incSessionsEncrypted();\n      data.drain(data.length());\n      return false;\n    } else {\n      ENVOY_LOG(debug, \"Detected version {}.{} of Postgres\", code >> 16, code & 0x0000FFFF);\n      // 4 bytes of length and 4 bytes of version code.\n    }\n  }\n\n  data.drain(startup_ ? 4 : 5); // Length plus optional 1st byte.\n\n  uint32_t bytes_to_read = message_len_ - 4;\n  message.assign(std::string(static_cast<char*>(data.linearize(bytes_to_read)), bytes_to_read));\n  setMessage(message);\n\n  ENVOY_LOG(trace, \"postgres_proxy: msg parsed\");\n  return true;\n}\n\nbool DecoderImpl::onData(Buffer::Instance& data, bool frontend) {\n  // If encrypted, just drain the traffic.\n  if (encrypted_) {\n    ENVOY_LOG(trace, \"postgres_proxy: ignoring {} bytes of encrypted data\", data.length());\n    data.drain(data.length());\n    return true;\n  }\n\n  ENVOY_LOG(trace, \"postgres_proxy: decoding {} bytes\", data.length());\n\n  if (!parseMessage(data)) {\n    return false;\n  }\n\n  MsgGroup& msg_processor = std::ref(frontend ? FE_messages_ : BE_messages_);\n  frontend ? callbacks_->incMessagesFrontend() : callbacks_->incMessagesBackend();\n\n  // Set processing to the handler of unknown messages.\n  // If message is found, the processing will be updated.\n  std::reference_wrapper<MessageProcessor> msg = msg_processor.unknown_;\n\n  if (startup_) {\n    msg = std::ref(first_);\n    startup_ = false;\n  } else {\n    auto it = msg_processor.messages_.find(command_);\n    if (it != msg_processor.messages_.end()) {\n      msg = std::ref((*it).second);\n    }\n  }\n\n  std::vector<MsgAction>& actions = std::get<2>(msg.get());\n  for (const auto& action : actions) {\n    action(this);\n  }\n\n  // message_len_ specifies total message length including 4 bytes long\n  // \"length\" field. The length of message body is total length minus size\n  // of \"length\" field (4 bytes).\n  uint32_t bytes_to_read = message_len_ - 4;\n\n  ENVOY_LOG(debug, \"({}) command = {} ({})\", msg_processor.direction_, command_,\n            std::get<0>(msg.get()));\n  ENVOY_LOG(debug, \"({}) length = {}\", msg_processor.direction_, message_len_);\n  ENVOY_LOG(debug, \"({}) message = {}\", msg_processor.direction_,\n            genDebugMessage(msg, data, bytes_to_read));\n\n  data.drain(bytes_to_read);\n  ENVOY_LOG(trace, \"postgres_proxy: {} bytes remaining in buffer\", data.length());\n\n  return true;\n}\n\n// Method is called when C (CommandComplete) message has been\n// decoded. It extracts the keyword from message's payload\n// and updates stats associated with that keyword.\nvoid DecoderImpl::decodeBackendStatements() {\n  // The message_ contains the statement. Find space character\n  // and the statement is the first word. If space cannot be found\n  // take the whole message.\n  std::string statement = message_.substr(0, message_.find(' '));\n\n  auto it = BE_statements_.find(statement);\n  if (it != BE_statements_.end()) {\n    (*it).second(this);\n  } else {\n    callbacks_->incStatements(DecoderCallbacks::StatementType::Other);\n    callbacks_->incTransactions();\n    callbacks_->incTransactionsCommit();\n  }\n}\n\n// Method is called when X (Terminate) message\n// is encountered by the decoder.\nvoid DecoderImpl::decodeFrontendTerminate() {\n  if (session_.inTransaction()) {\n    session_.setInTransaction(false);\n    callbacks_->incTransactionsRollback();\n  }\n}\n\n// Method does deep inspection of Authentication message.\n// It looks for 4 bytes of zeros, which means that login to\n// database was successful.\nvoid DecoderImpl::decodeAuthentication() {\n  // Check if auth message indicates successful authentication.\n  // Length must be 8 and payload must be 0.\n  if ((8 == message_len_) && (0 == message_.data()[0]) && (0 == message_.data()[1]) &&\n      (0 == message_.data()[2]) && (0 == message_.data()[3])) {\n    incSessionsUnencrypted();\n  }\n}\n\n// Method is used to parse Error and Notice messages. Their syntax is the same, but they\n// use different keywords inside the message and statistics fields are different.\nvoid DecoderImpl::decodeErrorNotice(MsgParserDict& types) {\n  // Error/Notice message should start with character \"S\".\n  if (message_[0] != 'S') {\n    types.unknown_(this);\n    return;\n  }\n\n  for (const auto& it : types.keywords_) {\n    // Try to find a keyword with S prefix or V prefix.\n    // Postgres versions prior to 9.6 use only S prefix while\n    // versions higher than 9.6 use S and V prefixes.\n    if ((message_.find(\"S\" + it.first) != std::string::npos) ||\n        (message_.find(\"V\" + it.first) != std::string::npos)) {\n      it.second(this);\n      return;\n    }\n  }\n\n  // Keyword was not found in the message. Count is as Unknown.\n  types.unknown_(this);\n}\n\n// Method parses E (Error) message and looks for string\n// indicating that error happened.\nvoid DecoderImpl::decodeBackendErrorResponse() { decodeErrorNotice(BE_errors_); }\n\n// Method parses N (Notice) message and looks for string\n// indicating its meaning. It can be warning, notice, info, debug or log.\nvoid DecoderImpl::decodeBackendNoticeResponse() { decodeErrorNotice(BE_notices_); }\n\n// Method parses Parse message of the following format:\n// String: The name of the destination prepared statement (an empty string selects the unnamed\n// prepared statement).\n//\n// String: The query string to be parsed.\n//\n// Int16: The number of parameter data\n// types specified (can be zero). Note that this is not an indication of the number of parameters\n// that might appear in the query string, only the number that the frontend wants to pre-specify\n// types for. Then, for each parameter, there is the following:\n//\n// Int32: Specifies the object ID of\n// the parameter data type. Placing a zero here is equivalent to leaving the type unspecified.\nvoid DecoderImpl::onParse() {\n  // The first two strings are separated by \\0.\n  // The first string is optional. If no \\0 is found it means\n  // that the message contains query string only.\n  std::vector<std::string> query_parts = absl::StrSplit(message_, absl::ByChar('\\0'));\n  callbacks_->processQuery(query_parts[1]);\n}\n\nvoid DecoderImpl::onQuery() { callbacks_->processQuery(message_); }\n\n// Method is invoked on clear-text Startup message.\n// The message format is continuous string of the following format:\n// user<username>database<database-name>application_name<application>encoding<encoding-type>\nvoid DecoderImpl::onStartup() {\n  // First 4 bytes of startup message contains version code.\n  // It is skipped. After that message contains attributes.\n  attributes_ = absl::StrSplit(message_.substr(4), absl::ByChar('\\0'), absl::SkipEmpty());\n\n  // If \"database\" attribute is not found, default it to \"user\" attribute.\n  if ((attributes_.find(\"database\") == attributes_.end()) &&\n      (attributes_.find(\"user\") != attributes_.end())) {\n    attributes_[\"database\"] = attributes_[\"user\"];\n  }\n}\n\n// Method generates displayable format of currently processed message.\nconst std::string DecoderImpl::genDebugMessage(const MessageProcessor& msg, Buffer::Instance& data,\n                                               uint32_t message_len) {\n  const MsgBodyReader& f = std::get<1>(msg);\n  std::string message = \"Unrecognized\";\n  if (f != nullptr) {\n    const auto msgParser = f();\n    msgParser->read(data, message_len);\n    message = msgParser->toString();\n  }\n  return message;\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/postgres_decoder.h",
    "content": "#pragma once\n#include <cstdint>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/sqlutils/sqlutils.h\"\n#include \"extensions/filters/network/postgres_proxy/postgres_message.h\"\n#include \"extensions/filters/network/postgres_proxy/postgres_session.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n// General callbacks for dispatching decoded Postgres messages to a sink.\nclass DecoderCallbacks {\npublic:\n  virtual ~DecoderCallbacks() = default;\n\n  virtual void incMessagesBackend() PURE;\n  virtual void incMessagesFrontend() PURE;\n  virtual void incMessagesUnknown() PURE;\n\n  virtual void incSessionsEncrypted() PURE;\n  virtual void incSessionsUnencrypted() PURE;\n\n  enum class StatementType { Insert, Delete, Select, Update, Other, Noop };\n  virtual void incStatements(StatementType) PURE;\n\n  virtual void incTransactions() PURE;\n  virtual void incTransactionsCommit() PURE;\n  virtual void incTransactionsRollback() PURE;\n\n  enum class NoticeType { Warning, Notice, Debug, Info, Log, Unknown };\n  virtual void incNotices(NoticeType) PURE;\n\n  enum class ErrorType { Error, Fatal, Panic, Unknown };\n  virtual void incErrors(ErrorType) PURE;\n\n  virtual void processQuery(const std::string&) PURE;\n};\n\n// Postgres message decoder.\nclass Decoder {\npublic:\n  virtual ~Decoder() = default;\n\n  virtual bool onData(Buffer::Instance& data, bool frontend) PURE;\n  virtual PostgresSession& getSession() PURE;\n\n  const Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes& getAttributes() const {\n    return attributes_;\n  }\n\nprotected:\n  // Decoder attributes extracted from Startup message.\n  // It can be username, database name, client app type, etc.\n  Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes attributes_;\n};\n\nusing DecoderPtr = std::unique_ptr<Decoder>;\n\nclass DecoderImpl : public Decoder, Logger::Loggable<Logger::Id::filter> {\npublic:\n  DecoderImpl(DecoderCallbacks* callbacks) : callbacks_(callbacks) { initialize(); }\n\n  bool onData(Buffer::Instance& data, bool frontend) override;\n  PostgresSession& getSession() override { return session_; }\n\n  void setMessage(std::string message) { message_ = message; }\n  std::string getMessage() { return message_; }\n\n  void setStartup(bool startup) { startup_ = startup; }\n  void initialize();\n\n  bool encrypted() const { return encrypted_; }\n\nprotected:\n  // MsgAction defines the Decoder's method which will be invoked\n  // when a specific message has been decoded.\n  using MsgAction = std::function<void(DecoderImpl*)>;\n\n  // MsgBodyReader is a function which returns a pointer to a Message\n  // class which is able to read the Postgres message body.\n  // The Postgres message body structure depends on the message type.\n  using MsgBodyReader = std::function<std::unique_ptr<Message>()>;\n\n  // MessageProcessor has the following fields:\n  // first - string with message description\n  // second - function which instantiates a Message object of specific type\n  // which is capable of parsing the message's body.\n  // third - vector of Decoder's methods which are invoked when the message\n  // is processed.\n  using MessageProcessor = std::tuple<std::string, MsgBodyReader, std::vector<MsgAction>>;\n\n  // Frontend and Backend messages.\n  using MsgGroup = struct {\n    // String describing direction (Frontend or Backend).\n    std::string direction_;\n    // Hash map indexed by messages' 1st byte points to handlers used for processing messages.\n    absl::flat_hash_map<char, MessageProcessor> messages_;\n    // Handler used for processing messages not found in hash map.\n    MessageProcessor unknown_;\n  };\n\n  // Hash map binding keyword found in a message to an\n  // action to be executed when the keyword is found.\n  using KeywordProcessor = absl::flat_hash_map<std::string, MsgAction>;\n\n  // Structure is used for grouping keywords found in a specific message.\n  // Known keywords are dispatched via hash map and unknown keywords\n  // are handled by unknown_.\n  using MsgParserDict = struct {\n    // Handler for known keywords.\n    KeywordProcessor keywords_;\n    // Handler invoked when a keyword is not found in hash map.\n    MsgAction unknown_;\n  };\n\n  bool parseMessage(Buffer::Instance& data);\n  void decode(Buffer::Instance& data);\n  void decodeAuthentication();\n  void decodeBackendStatements();\n  void decodeBackendErrorResponse();\n  void decodeBackendNoticeResponse();\n  void decodeFrontendTerminate();\n  void decodeErrorNotice(MsgParserDict& types);\n  void onQuery();\n  void onParse();\n  void onStartup();\n\n  void incMessagesUnknown() { callbacks_->incMessagesUnknown(); }\n  void incSessionsEncrypted() { callbacks_->incSessionsEncrypted(); }\n  void incSessionsUnencrypted() { callbacks_->incSessionsUnencrypted(); }\n\n  // Helper method generating currently processed message in\n  // displayable format.\n  const std::string genDebugMessage(const MessageProcessor& msg, Buffer::Instance& data,\n                                    uint32_t message_len);\n\n  DecoderCallbacks* callbacks_{};\n  PostgresSession session_{};\n\n  // The following fields store result of message parsing.\n  char command_{};\n  std::string message_;\n  uint32_t message_len_{};\n\n  bool startup_{true};    // startup stage does not have 1st byte command\n  bool encrypted_{false}; // tells if exchange is encrypted\n\n  // Dispatchers for Backend (BE) and Frontend (FE) messages.\n  MsgGroup FE_messages_;\n  MsgGroup BE_messages_;\n\n  // Handler for startup postgres message.\n  // Startup message message which does not start with 1 byte TYPE.\n  // It starts with message length and must be therefore handled\n  // differently.\n  MessageProcessor first_;\n\n  // hash map for dispatching backend transaction messages\n  KeywordProcessor BE_statements_;\n\n  MsgParserDict BE_errors_;\n  MsgParserDict BE_notices_;\n};\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/postgres_filter.cc",
    "content": "#include \"extensions/filters/network/postgres_proxy/postgres_filter.h\"\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"extensions/filters/network/postgres_proxy/postgres_decoder.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\nPostgresFilterConfig::PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing,\n                                           Stats::Scope& scope)\n    : enable_sql_parsing_(enable_sql_parsing), scope_{scope}, stats_{generateStats(stat_prefix,\n                                                                                   scope)} {}\n\nPostgresFilter::PostgresFilter(PostgresFilterConfigSharedPtr config) : config_{config} {\n  if (!decoder_) {\n    decoder_ = createDecoder(this);\n  }\n}\n\n// Network::ReadFilter\nNetwork::FilterStatus PostgresFilter::onData(Buffer::Instance& data, bool) {\n  ENVOY_CONN_LOG(trace, \"postgres_proxy: got {} bytes\", read_callbacks_->connection(),\n                 data.length());\n\n  // Frontend Buffer\n  frontend_buffer_.add(data);\n  doDecode(frontend_buffer_, true);\n\n  return Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus PostgresFilter::onNewConnection() { return Network::FilterStatus::Continue; }\n\nvoid PostgresFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  read_callbacks_ = &callbacks;\n}\n\n// Network::WriteFilter\nNetwork::FilterStatus PostgresFilter::onWrite(Buffer::Instance& data, bool) {\n\n  // Backend Buffer\n  backend_buffer_.add(data);\n  doDecode(backend_buffer_, false);\n\n  return Network::FilterStatus::Continue;\n}\n\nDecoderPtr PostgresFilter::createDecoder(DecoderCallbacks* callbacks) {\n  return std::make_unique<DecoderImpl>(callbacks);\n}\n\nvoid PostgresFilter::incMessagesBackend() {\n  config_->stats_.messages_.inc();\n  config_->stats_.messages_backend_.inc();\n}\n\nvoid PostgresFilter::incMessagesFrontend() {\n  config_->stats_.messages_.inc();\n  config_->stats_.messages_frontend_.inc();\n}\n\nvoid PostgresFilter::incMessagesUnknown() {\n  config_->stats_.messages_.inc();\n  config_->stats_.messages_unknown_.inc();\n}\n\nvoid PostgresFilter::incSessionsEncrypted() {\n  config_->stats_.sessions_.inc();\n  config_->stats_.sessions_encrypted_.inc();\n}\n\nvoid PostgresFilter::incSessionsUnencrypted() {\n  config_->stats_.sessions_.inc();\n  config_->stats_.sessions_unencrypted_.inc();\n}\n\nvoid PostgresFilter::incTransactions() {\n  if (!decoder_->getSession().inTransaction()) {\n    config_->stats_.transactions_.inc();\n  }\n}\n\nvoid PostgresFilter::incTransactionsCommit() {\n  if (!decoder_->getSession().inTransaction()) {\n    config_->stats_.transactions_commit_.inc();\n  }\n}\n\nvoid PostgresFilter::incTransactionsRollback() {\n  if (!decoder_->getSession().inTransaction()) {\n    config_->stats_.transactions_rollback_.inc();\n  }\n}\n\nvoid PostgresFilter::incNotices(NoticeType type) {\n  config_->stats_.notices_.inc();\n  switch (type) {\n  case DecoderCallbacks::NoticeType::Warning:\n    config_->stats_.notices_warning_.inc();\n    break;\n  case DecoderCallbacks::NoticeType::Notice:\n    config_->stats_.notices_notice_.inc();\n    break;\n  case DecoderCallbacks::NoticeType::Debug:\n    config_->stats_.notices_debug_.inc();\n    break;\n  case DecoderCallbacks::NoticeType::Info:\n    config_->stats_.notices_info_.inc();\n    break;\n  case DecoderCallbacks::NoticeType::Log:\n    config_->stats_.notices_log_.inc();\n    break;\n  case DecoderCallbacks::NoticeType::Unknown:\n    config_->stats_.notices_unknown_.inc();\n    break;\n  }\n}\n\nvoid PostgresFilter::incErrors(ErrorType type) {\n  config_->stats_.errors_.inc();\n  switch (type) {\n  case DecoderCallbacks::ErrorType::Error:\n    config_->stats_.errors_error_.inc();\n    break;\n  case DecoderCallbacks::ErrorType::Fatal:\n    config_->stats_.errors_fatal_.inc();\n    break;\n  case DecoderCallbacks::ErrorType::Panic:\n    config_->stats_.errors_panic_.inc();\n    break;\n  case DecoderCallbacks::ErrorType::Unknown:\n    config_->stats_.errors_unknown_.inc();\n    break;\n  }\n}\n\nvoid PostgresFilter::incStatements(StatementType type) {\n  config_->stats_.statements_.inc();\n\n  switch (type) {\n  case DecoderCallbacks::StatementType::Insert:\n    config_->stats_.statements_insert_.inc();\n    break;\n  case DecoderCallbacks::StatementType::Delete:\n    config_->stats_.statements_delete_.inc();\n    break;\n  case DecoderCallbacks::StatementType::Select:\n    config_->stats_.statements_select_.inc();\n    break;\n  case DecoderCallbacks::StatementType::Update:\n    config_->stats_.statements_update_.inc();\n    break;\n  case DecoderCallbacks::StatementType::Other:\n    config_->stats_.statements_other_.inc();\n    break;\n  case DecoderCallbacks::StatementType::Noop:\n    break;\n  }\n}\n\nvoid PostgresFilter::processQuery(const std::string& sql) {\n  if (config_->enable_sql_parsing_) {\n    ProtobufWkt::Struct metadata;\n\n    auto result = Common::SQLUtils::SQLUtils::setMetadata(sql, decoder_->getAttributes(), metadata);\n\n    if (!result) {\n      config_->stats_.statements_parse_error_.inc();\n      ENVOY_CONN_LOG(trace, \"postgres_proxy: cannot parse SQL: {}\", read_callbacks_->connection(),\n                     sql.c_str());\n      return;\n    }\n\n    config_->stats_.statements_parsed_.inc();\n    ENVOY_CONN_LOG(trace, \"postgres_proxy: query processed {}\", read_callbacks_->connection(),\n                   sql.c_str());\n\n    // Set dynamic metadata\n    read_callbacks_->connection().streamInfo().setDynamicMetadata(\n        NetworkFilterNames::get().PostgresProxy, metadata);\n  }\n}\n\nvoid PostgresFilter::doDecode(Buffer::Instance& data, bool frontend) {\n  // Keep processing data until buffer is empty or decoder says\n  // that it cannot process data in the buffer.\n  while ((0 < data.length()) && (decoder_->onData(data, frontend))) {\n    ;\n  }\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/postgres_filter.h",
    "content": "#pragma once\n\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/postgres_proxy/postgres_decoder.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n/**\n * All Postgres proxy stats. @see stats_macros.h\n */\n#define ALL_POSTGRES_PROXY_STATS(COUNTER)                                                          \\\n  COUNTER(errors)                                                                                  \\\n  COUNTER(errors_error)                                                                            \\\n  COUNTER(errors_fatal)                                                                            \\\n  COUNTER(errors_panic)                                                                            \\\n  COUNTER(errors_unknown)                                                                          \\\n  COUNTER(messages)                                                                                \\\n  COUNTER(messages_backend)                                                                        \\\n  COUNTER(messages_frontend)                                                                       \\\n  COUNTER(messages_unknown)                                                                        \\\n  COUNTER(sessions)                                                                                \\\n  COUNTER(sessions_encrypted)                                                                      \\\n  COUNTER(sessions_unencrypted)                                                                    \\\n  COUNTER(statements)                                                                              \\\n  COUNTER(statements_insert)                                                                       \\\n  COUNTER(statements_delete)                                                                       \\\n  COUNTER(statements_update)                                                                       \\\n  COUNTER(statements_select)                                                                       \\\n  COUNTER(statements_other)                                                                        \\\n  COUNTER(transactions)                                                                            \\\n  COUNTER(transactions_commit)                                                                     \\\n  COUNTER(transactions_rollback)                                                                   \\\n  COUNTER(statements_parsed)                                                                       \\\n  COUNTER(statements_parse_error)                                                                  \\\n  COUNTER(notices)                                                                                 \\\n  COUNTER(notices_notice)                                                                          \\\n  COUNTER(notices_warning)                                                                         \\\n  COUNTER(notices_debug)                                                                           \\\n  COUNTER(notices_info)                                                                            \\\n  COUNTER(notices_log)                                                                             \\\n  COUNTER(notices_unknown)\n\n/**\n * Struct definition for all Postgres proxy stats. @see stats_macros.h\n */\nstruct PostgresProxyStats {\n  ALL_POSTGRES_PROXY_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for the Postgres proxy filter.\n */\nclass PostgresFilterConfig {\npublic:\n  PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing,\n                       Stats::Scope& scope);\n\n  bool enable_sql_parsing_{true};\n  Stats::Scope& scope_;\n  PostgresProxyStats stats_;\n\nprivate:\n  PostgresProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return PostgresProxyStats{ALL_POSTGRES_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n  }\n};\n\nusing PostgresFilterConfigSharedPtr = std::shared_ptr<PostgresFilterConfig>;\n\nclass PostgresFilter : public Network::Filter,\n                       DecoderCallbacks,\n                       Logger::Loggable<Logger::Id::filter> {\npublic:\n  PostgresFilter(PostgresFilterConfigSharedPtr config);\n  ~PostgresFilter() override = default;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override;\n\n  // PostgresProxy::DecoderCallback\n  void incErrors(ErrorType) override;\n  void incMessagesBackend() override;\n  void incMessagesFrontend() override;\n  void incMessagesUnknown() override;\n  void incNotices(NoticeType) override;\n  void incSessionsEncrypted() override;\n  void incSessionsUnencrypted() override;\n  void incStatements(StatementType) override;\n  void incTransactions() override;\n  void incTransactionsCommit() override;\n  void incTransactionsRollback() override;\n  void processQuery(const std::string&) override;\n\n  void doDecode(Buffer::Instance& data, bool);\n  DecoderPtr createDecoder(DecoderCallbacks* callbacks);\n  void setDecoder(std::unique_ptr<Decoder> decoder) { decoder_ = std::move(decoder); }\n  Decoder* getDecoder() const { return decoder_.get(); }\n\n  // Routines used during integration and unit tests\n  uint32_t getFrontendBufLength() const { return frontend_buffer_.length(); }\n  uint32_t getBackendBufLength() const { return backend_buffer_.length(); }\n  const PostgresProxyStats& getStats() const { return config_->stats_; }\n  Network::Connection& connection() const { return read_callbacks_->connection(); }\n  const PostgresFilterConfigSharedPtr& getConfig() const { return config_; }\n\nprivate:\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  PostgresFilterConfigSharedPtr config_;\n  Buffer::OwnedImpl frontend_buffer_;\n  Buffer::OwnedImpl backend_buffer_;\n  std::unique_ptr<Decoder> decoder_;\n};\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/postgres_message.cc",
    "content": "#include \"extensions/filters/network/postgres_proxy/postgres_message.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n// String type methods.\nbool String::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) {\n  // First find the terminating zero.\n  const char zero = 0;\n  const ssize_t index = data.search(&zero, 1, pos);\n  if (index == -1) {\n    return false;\n  }\n\n  // Reserve that many bytes in the string.\n  const uint64_t size = index - pos;\n  value_.resize(size);\n  // Now copy from buffer to string.\n  data.copyOut(pos, index - pos, value_.data());\n  pos += (size + 1);\n  left -= (size + 1);\n\n  return true;\n}\n\nstd::string String::toString() const { return absl::StrCat(\"[\", value_, \"]\"); }\n\n// ByteN type methods.\nbool ByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) {\n  if (left > (data.length() - pos)) {\n    return false;\n  }\n  value_.resize(left);\n  data.copyOut(pos, left, value_.data());\n  pos += left;\n  left = 0;\n  return true;\n}\n\nstd::string ByteN::toString() const {\n  std::string out = \"[\";\n  absl::StrAppend(&out, absl::StrJoin(value_, \" \"));\n  absl::StrAppend(&out, \"]\");\n  return out;\n}\n\n// VarByteN type methods.\nbool VarByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) {\n  if ((left < sizeof(int32_t)) || ((data.length() - pos) < sizeof(int32_t))) {\n    return false;\n  }\n  len_ = data.peekBEInt<int32_t>(pos);\n  pos += sizeof(int32_t);\n  left -= sizeof(int32_t);\n  if (len_ < 1) {\n    // There is no payload if length is not positive.\n    value_.clear();\n    return true;\n  }\n  if ((left < static_cast<uint64_t>(len_)) ||\n      ((data.length() - pos) < static_cast<uint64_t>(len_))) {\n    return false;\n  }\n  value_.resize(len_);\n  data.copyOut(pos, len_, value_.data());\n  pos += len_;\n  left -= len_;\n  return true;\n}\n\nstd::string VarByteN::toString() const {\n  std::string out;\n  out = fmt::format(\"[({} bytes):\", len_);\n  absl::StrAppend(&out, absl::StrJoin(value_, \" \"));\n  absl::StrAppend(&out, \"]\");\n  return out;\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/postgres_message.h",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"fmt/printf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n/**\n * Postgres messages are described in official Postgres documentation:\n * https://www.postgresql.org/docs/12/protocol-message-formats.html\n *\n * Most of messages start with 1-byte message identifier followed by 4-bytes length field. Few\n * messages are defined without starting 1-byte character and are used during well-defined initial\n * stage of connection process.\n *\n * Messages are composed from various fields: 8, 16, 32-bit integers, String, Arrays, etc.\n *\n * Structures defined below have the same naming as types used in official Postgres documentation.\n *\n * Each structure has the following methods:\n * read - to read number of bytes from received buffer. The number of bytes depends on structure\n * type. toString - method returns displayable representation of the structure value.\n *\n */\n\n// Template for integer types.\n// Size of integer types is fixed and depends on the type of integer.\ntemplate <typename T> class Int {\npublic:\n  /**\n   * Read integer value from data buffer.\n   * @param data reference to a buffer containing data to read.\n   * @param pos offset in the buffer where data to read is located. Successful read will advance\n   * this parameter.\n   * @param left number of bytes to be read to reach the end of Postgres message.\n   * Successful read will adjust this parameter.\n   * @return boolean value indicating whether read was successful. If read returns\n   * false \"pos\" and \"left\" params are not updated. When read is not successful,\n   * the caller should not continue reading next values from the data buffer\n   * for the current message.\n   */\n  bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) {\n    if ((data.length() - pos) < sizeof(T)) {\n      return false;\n    }\n    value_ = data.peekBEInt<T>(pos);\n    pos += sizeof(T);\n    left -= sizeof(T);\n    return true;\n  }\n\n  std::string toString() const { return fmt::format(\"[{}]\", value_); }\n\n  T get() const { return value_; }\n\nprivate:\n  T value_{};\n};\n\nusing Int32 = Int<uint32_t>;\nusing Int16 = Int<uint16_t>;\nusing Int8 = Int<uint8_t>;\n\n// 8-bits character value.\nusing Byte1 = Int<char>;\n\n// String type requires byte with zero value to indicate end of string.\nclass String {\npublic:\n  /**\n   * See above for parameter and return value description.\n   */\n  bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left);\n  std::string toString() const;\n\nprivate:\n  std::string value_;\n};\n\n// ByteN type is used as the last type in the Postgres message and contains\n// sequence of bytes. The length must be deduced from message length.\nclass ByteN {\npublic:\n  /**\n   * See above for parameter and return value description.\n   */\n  bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left);\n  std::string toString() const;\n\nprivate:\n  std::vector<uint8_t> value_;\n};\n\n// VarByteN represents the structure consisting of 4 bytes of length\n// indicating how many bytes follow.\n// In Postgres documentation it is described as:\n// - Int32\n//   The number of bytes in the structure (this count does not include itself). Can be\n//   zero. As a special case, -1 indicates a NULL (no result). No value bytes follow in the NULL\n// case.\n//\n// - ByteN\n// The sequence of bytes representing the value. Bytes are present only when length has a positive\n// value.\nclass VarByteN {\npublic:\n  /**\n   * See above for parameter and return value description.\n   */\n  bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left);\n  std::string toString() const;\n\nprivate:\n  int32_t len_;\n  std::vector<uint8_t> value_;\n};\n\n// Array contains one or more values of the same type.\ntemplate <typename T> class Array {\npublic:\n  /**\n   * See above for parameter and return value description.\n   */\n  bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) {\n    // First read the 16 bits value which indicates how many\n    // elements there are in the array.\n    if (((data.length() - pos) < sizeof(uint16_t)) || (left < sizeof(uint16_t))) {\n      return false;\n    }\n    const uint16_t num = data.peekBEInt<uint16_t>(pos);\n    pos += sizeof(uint16_t);\n    left -= sizeof(uint16_t);\n    if (num != 0) {\n      for (uint16_t i = 0; i < num; i++) {\n        auto item = std::make_unique<T>();\n        if (!item->read(data, pos, left)) {\n          return false;\n        }\n        value_.push_back(std::move(item));\n      }\n    }\n    return true;\n  }\n\n  std::string toString() const {\n    std::string out = fmt::format(\"[Array of {}:{{\", value_.size());\n\n    // Iterate through all elements in the array.\n    // No delimiter is required between elements, as each\n    // element is wrapped in \"[]\" or \"{}\".\n    for (const auto& i : value_) {\n      absl::StrAppend(&out, i->toString());\n    }\n    absl::StrAppend(&out, \"}]\");\n\n    return out;\n  }\n\nprivate:\n  std::vector<std::unique_ptr<T>> value_;\n};\n\n// Repeated is a composite type used at the end of the message.\n// It indicates to read the value of the same type until the end\n// of the Postgres message.\ntemplate <typename T> class Repeated {\npublic:\n  /**\n   * See above for parameter and return value description.\n   */\n  bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) {\n    if ((data.length() - pos) < left) {\n      return false;\n    }\n    // Read until nothing is left.\n    while (left != 0) {\n      auto item = std::make_unique<T>();\n      if (!item->read(data, pos, left)) {\n        return false;\n      }\n      value_.push_back(std::move(item));\n    }\n    return true;\n  }\n\n  std::string toString() const {\n    std::string out;\n\n    // Iterate through all repeated elements.\n    // No delimiter is required between elements, as each\n    // element is wrapped in \"[]\" or \"{}\".\n    for (const auto& i : value_) {\n      absl::StrAppend(&out, i->toString());\n    }\n    return out;\n  }\n\nprivate:\n  std::vector<std::unique_ptr<T>> value_;\n};\n\n// Interface to Postgres message class.\nclass Message {\npublic:\n  virtual ~Message() = default;\n\n  // read method should read only as many bytes from data\n  // buffer as it is indicated in message's length field.\n  // \"length\" parameter indicates how many bytes were indicated in Postgres message's\n  // length field. \"data\" buffer may contain more bytes than \"length\".\n  virtual bool read(const Buffer::Instance& data, const uint64_t length) PURE;\n\n  // toString method provides displayable representation of\n  // the Postgres message.\n  virtual std::string toString() const PURE;\n};\n\n// Sequence is tuple like structure, which binds together\n// set of several fields of different types.\ntemplate <typename... Types> class Sequence;\n\ntemplate <typename FirstField, typename... Remaining>\nclass Sequence<FirstField, Remaining...> : public Message {\n  FirstField first_;\n  Sequence<Remaining...> remaining_;\n\npublic:\n  Sequence() = default;\n  std::string toString() const override {\n    return absl::StrCat(first_.toString(), remaining_.toString());\n  }\n\n  bool read(const Buffer::Instance& data, const uint64_t length) override {\n    uint64_t pos = 0;\n    uint64_t left = length;\n    return read(data, pos, left);\n  }\n\n  /**\n   * Implementation of \"read\" method for variadic template.\n   * It reads data for the current type and invokes read operation\n   * for remaining types.\n   * See above for parameter and return value description for individual types.\n   */\n  bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) {\n    bool result = first_.read(data, pos, left);\n    if (!result) {\n      return false;\n    }\n    return remaining_.read(data, pos, left);\n  }\n};\n\n// Terminal template definition for variadic Sequence template.\ntemplate <> class Sequence<> : public Message {\npublic:\n  Sequence<>() = default;\n  std::string toString() const override { return \"\"; }\n  bool read(const Buffer::Instance&, uint64_t&, uint64_t&) { return true; }\n  bool read(const Buffer::Instance&, const uint64_t) override { return true; }\n};\n\n// Helper function to create pointer to a Sequence structure and is used by Postgres\n// decoder after learning the type of Postgres message.\ntemplate <typename... Types> std::unique_ptr<Message> createMsgBodyReader() {\n  return std::make_unique<Sequence<Types...>>();\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/postgres_proxy/postgres_session.h",
    "content": "#pragma once\n#include <cstdint>\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n// Class stores data about the current state of a transaction between postgres client and server.\nclass PostgresSession {\npublic:\n  bool inTransaction() { return in_transaction_; };\n  void setInTransaction(bool in_transaction) { in_transaction_ = in_transaction; };\n\nprivate:\n  bool in_transaction_{false};\n};\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Ratelimit L4 network filter\n# Public docs: docs/root/configuration/network_filters/rate_limit_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ratelimit_lib\",\n    srcs = [\"ratelimit.cc\"],\n    hdrs = [\"ratelimit.h\"],\n    # Legacy test use. TODO(#9953) clean up.\n    visibility = [\n        \"//source/extensions:__subpackages__\",\n        \"//test/common/network:__pkg__\",\n        \"//test/extensions:__subpackages__\",\n    ],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n        \"@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/ratelimit:ratelimit_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/ratelimit/config.cc",
    "content": "#include \"extensions/filters/network/ratelimit/config.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit_impl.h\"\n#include \"extensions/filters/network/ratelimit/ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RateLimitFilter {\n\nNetwork::FilterFactoryCb RateLimitConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::ratelimit::v3::RateLimit& proto_config,\n    Server::Configuration::FactoryContext& context) {\n\n  ASSERT(!proto_config.stat_prefix().empty());\n  ASSERT(!proto_config.domain().empty());\n  ASSERT(proto_config.descriptors_size() > 0);\n\n  ConfigSharedPtr filter_config(new Config(proto_config, context.scope(), context.runtime()));\n  const std::chrono::milliseconds timeout =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20));\n\n  return [proto_config, &context, timeout,\n          filter_config](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<Filter>(\n        filter_config,\n\n        Filters::Common::RateLimit::rateLimitClient(\n            context, proto_config.rate_limit_service().grpc_service(), timeout,\n            proto_config.rate_limit_service().transport_api_version())));\n  };\n}\n\n/**\n * Static registration for the rate limit filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RateLimitConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\"envoy.ratelimit\"};\n\n} // namespace RateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ratelimit/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RateLimitFilter {\n\n/**\n * Config registration for the rate limit filter. @see NamedNetworkFilterConfigFactory.\n */\nclass RateLimitConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::ratelimit::v3::RateLimit> {\npublic:\n  RateLimitConfigFactory() : FactoryBase(NetworkFilterNames::get().RateLimit) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::ratelimit::v3::RateLimit& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace RateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ratelimit/ratelimit.cc",
    "content": "#include \"extensions/filters/network/ratelimit/ratelimit.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RateLimitFilter {\n\nConfig::Config(const envoy::extensions::filters::network::ratelimit::v3::RateLimit& config,\n               Stats::Scope& scope, Runtime::Loader& runtime)\n    : domain_(config.domain()), stats_(generateStats(config.stat_prefix(), scope)),\n      runtime_(runtime), failure_mode_deny_(config.failure_mode_deny()) {\n  for (const auto& descriptor : config.descriptors()) {\n    RateLimit::Descriptor new_descriptor;\n    for (const auto& entry : descriptor.entries()) {\n      new_descriptor.entries_.push_back({entry.key(), entry.value()});\n    }\n    descriptors_.push_back(new_descriptor);\n  }\n}\n\nInstanceStats Config::generateStats(const std::string& name, Stats::Scope& scope) {\n  std::string final_prefix = fmt::format(\"ratelimit.{}.\", name);\n  return {ALL_TCP_RATE_LIMIT_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                   POOL_GAUGE_PREFIX(scope, final_prefix))};\n}\n\nNetwork::FilterStatus Filter::onData(Buffer::Instance&, bool) {\n  return status_ == Status::Calling ? Network::FilterStatus::StopIteration\n                                    : Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus Filter::onNewConnection() {\n  if (status_ == Status::NotStarted &&\n      !config_->runtime().snapshot().featureEnabled(\"ratelimit.tcp_filter_enabled\", 100)) {\n    status_ = Status::Complete;\n  }\n\n  if (status_ == Status::NotStarted) {\n    status_ = Status::Calling;\n    config_->stats().active_.inc();\n    config_->stats().total_.inc();\n    calling_limit_ = true;\n    client_->limit(*this, config_->domain(), config_->descriptors(), Tracing::NullSpan::instance());\n    calling_limit_ = false;\n  }\n\n  return status_ == Status::Calling ? Network::FilterStatus::StopIteration\n                                    : Network::FilterStatus::Continue;\n}\n\nvoid Filter::onEvent(Network::ConnectionEvent event) {\n  // Make sure that any pending request in the client is cancelled. This will be NOP if the\n  // request already completed.\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    if (status_ == Status::Calling) {\n      client_->cancel();\n      config_->stats().active_.dec();\n    }\n  }\n}\n\nvoid Filter::complete(Filters::Common::RateLimit::LimitStatus status,\n                      Filters::Common::RateLimit::DescriptorStatusListPtr&&,\n                      Http::ResponseHeaderMapPtr&&, Http::RequestHeaderMapPtr&&) {\n  status_ = Status::Complete;\n  config_->stats().active_.dec();\n\n  switch (status) {\n  case Filters::Common::RateLimit::LimitStatus::OK:\n    config_->stats().ok_.inc();\n    break;\n  case Filters::Common::RateLimit::LimitStatus::Error:\n    config_->stats().error_.inc();\n    break;\n  case Filters::Common::RateLimit::LimitStatus::OverLimit:\n    config_->stats().over_limit_.inc();\n    break;\n  }\n\n  if (status == Filters::Common::RateLimit::LimitStatus::OverLimit &&\n      config_->runtime().snapshot().featureEnabled(\"ratelimit.tcp_filter_enforcing\", 100)) {\n    config_->stats().cx_closed_.inc();\n    filter_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n  } else if (status == Filters::Common::RateLimit::LimitStatus::Error) {\n    if (config_->failureModeAllow()) {\n      config_->stats().failure_mode_allowed_.inc();\n      if (!calling_limit_) {\n        filter_callbacks_->continueReading();\n      }\n    } else {\n      config_->stats().cx_closed_.inc();\n      filter_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    }\n  } else {\n    // We can get completion inline, so only call continue if that isn't happening.\n    if (!calling_limit_) {\n      filter_callbacks_->continueReading();\n    }\n  }\n}\n\n} // namespace RateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/ratelimit/ratelimit.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RateLimitFilter {\n\n/**\n * All tcp rate limit stats. @see stats_macros.h\n */\n#define ALL_TCP_RATE_LIMIT_STATS(COUNTER, GAUGE)                                                   \\\n  COUNTER(cx_closed)                                                                               \\\n  COUNTER(error)                                                                                   \\\n  COUNTER(failure_mode_allowed)                                                                    \\\n  COUNTER(ok)                                                                                      \\\n  COUNTER(over_limit)                                                                              \\\n  COUNTER(total)                                                                                   \\\n  GAUGE(active, Accumulate)\n\n/**\n * Struct definition for all tcp rate limit stats. @see stats_macros.h\n */\nstruct InstanceStats {\n  ALL_TCP_RATE_LIMIT_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Global configuration for TCP rate limit filter.\n */\nclass Config {\npublic:\n  Config(const envoy::extensions::filters::network::ratelimit::v3::RateLimit& config,\n         Stats::Scope& scope, Runtime::Loader& runtime);\n  const std::string& domain() { return domain_; }\n  const std::vector<RateLimit::Descriptor>& descriptors() { return descriptors_; }\n  Runtime::Loader& runtime() { return runtime_; }\n  const InstanceStats& stats() { return stats_; }\n  bool failureModeAllow() const { return !failure_mode_deny_; };\n\nprivate:\n  static InstanceStats generateStats(const std::string& name, Stats::Scope& scope);\n\n  std::string domain_;\n  std::vector<RateLimit::Descriptor> descriptors_;\n  const InstanceStats stats_;\n  Runtime::Loader& runtime_;\n  const bool failure_mode_deny_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * TCP rate limit filter instance. This filter will call the rate limit service with the given\n * configuration parameters. If the rate limit service returns an error or an over limit the\n * connection will be closed without any further filters being called. Otherwise all buffered\n * data will be released to further filters.\n */\nclass Filter : public Network::ReadFilter,\n               public Network::ConnectionCallbacks,\n               public Filters::Common::RateLimit::RequestCallbacks {\npublic:\n  Filter(ConfigSharedPtr config, Filters::Common::RateLimit::ClientPtr&& client)\n      : config_(config), client_(std::move(client)) {}\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    filter_callbacks_ = &callbacks;\n    filter_callbacks_->connection().addConnectionCallbacks(*this);\n  }\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  // RateLimit::RequestCallbacks\n  void complete(Filters::Common::RateLimit::LimitStatus status,\n                Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses,\n                Http::ResponseHeaderMapPtr&& response_headers_to_add,\n                Http::RequestHeaderMapPtr&& request_headers_to_add) override;\n\nprivate:\n  enum class Status { NotStarted, Calling, Complete };\n\n  ConfigSharedPtr config_;\n  Filters::Common::RateLimit::ClientPtr client_;\n  Network::ReadFilterCallbacks* filter_callbacks_{};\n  Status status_{Status::NotStarted};\n  bool calling_limit_{};\n};\n} // namespace RateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rbac/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":rbac_filter\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/rbac/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"rbac_filter\",\n    srcs = [\"rbac_filter.cc\"],\n    hdrs = [\"rbac_filter.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/extensions/filters/common/rbac:engine_lib\",\n        \"//source/extensions/filters/common/rbac:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/network/rbac/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/rbac/config.cc",
    "content": "#include \"extensions/filters/network/rbac/config.h\"\n\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.validate.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/network/rbac/rbac_filter.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RBACFilter {\n\nstatic void validateFail(const std::string& header) {\n  throw EnvoyException(fmt::format(\"Found header({}) rule,\"\n                                   \"not supported by RBAC network filter\",\n                                   header));\n}\n\nstatic void validatePermission(const envoy::config::rbac::v3::Permission& permission) {\n  if (permission.has_header()) {\n    validateFail(permission.header().DebugString());\n  }\n  if (permission.has_and_rules()) {\n    for (const auto& r : permission.and_rules().rules()) {\n      validatePermission(r);\n    }\n  }\n  if (permission.has_or_rules()) {\n    for (const auto& r : permission.or_rules().rules()) {\n      validatePermission(r);\n    }\n  }\n  if (permission.has_not_rule()) {\n    validatePermission(permission.not_rule());\n  }\n}\n\nstatic void validatePrincipal(const envoy::config::rbac::v3::Principal& principal) {\n  if (principal.has_header()) {\n    validateFail(principal.header().DebugString());\n  }\n  if (principal.has_and_ids()) {\n    for (const auto& r : principal.and_ids().ids()) {\n      validatePrincipal(r);\n    }\n  }\n  if (principal.has_or_ids()) {\n    for (const auto& r : principal.or_ids().ids()) {\n      validatePrincipal(r);\n    }\n  }\n  if (principal.has_not_id()) {\n    validatePrincipal(principal.not_id());\n  }\n}\n\n/**\n * Validate the RBAC rules doesn't include any header or metadata rule.\n */\nstatic void validateRbacRules(const envoy::config::rbac::v3::RBAC& rules) {\n  for (const auto& policy : rules.policies()) {\n    for (const auto& permission : policy.second.permissions()) {\n      validatePermission(permission);\n    }\n    for (const auto& principal : policy.second.principals()) {\n      validatePrincipal(principal);\n    }\n  }\n}\n\nNetwork::FilterFactoryCb\nRoleBasedAccessControlNetworkFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  validateRbacRules(proto_config.rules());\n  validateRbacRules(proto_config.shadow_rules());\n  RoleBasedAccessControlFilterConfigSharedPtr config(\n      std::make_shared<RoleBasedAccessControlFilterConfig>(proto_config, context.scope()));\n  return [config](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<RoleBasedAccessControlFilter>(config));\n  };\n}\n\n/**\n * Static registration for the RBAC network filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RoleBasedAccessControlNetworkFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace RBACFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rbac/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RBACFilter {\n\n/**\n * Config registration for the RBAC network filter. @see NamedNetworkFilterConfigFactory.\n */\nclass RoleBasedAccessControlNetworkFilterConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::rbac::v3::RBAC> {\n\npublic:\n  RoleBasedAccessControlNetworkFilterConfigFactory()\n      : FactoryBase(NetworkFilterNames::get().Rbac) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace RBACFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rbac/rbac_filter.cc",
    "content": "#include \"extensions/filters/network/rbac/rbac_filter.h\"\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RBACFilter {\n\nRoleBasedAccessControlFilterConfig::RoleBasedAccessControlFilterConfig(\n    const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config, Stats::Scope& scope)\n    : stats_(Filters::Common::RBAC::generateStats(proto_config.stat_prefix(), scope)),\n      engine_(Filters::Common::RBAC::createEngine(proto_config)),\n      shadow_engine_(Filters::Common::RBAC::createShadowEngine(proto_config)),\n      enforcement_type_(proto_config.enforcement_type()) {}\n\nNetwork::FilterStatus RoleBasedAccessControlFilter::onData(Buffer::Instance&, bool) {\n  ENVOY_LOG(debug,\n            \"checking connection: requestedServerName: {}, sourceIP: {}, directRemoteIP: {},\"\n            \"remoteIP: {}, localAddress: {}, ssl: {}, dynamicMetadata: {}\",\n            callbacks_->connection().requestedServerName(),\n            callbacks_->connection().remoteAddress()->asString(),\n            callbacks_->connection().streamInfo().downstreamDirectRemoteAddress()->asString(),\n            callbacks_->connection().streamInfo().downstreamRemoteAddress()->asString(),\n            callbacks_->connection().streamInfo().downstreamLocalAddress()->asString(),\n            callbacks_->connection().ssl()\n                ? \"uriSanPeerCertificate: \" +\n                      absl::StrJoin(callbacks_->connection().ssl()->uriSanPeerCertificate(), \",\") +\n                      \", dnsSanPeerCertificate: \" +\n                      absl::StrJoin(callbacks_->connection().ssl()->dnsSansPeerCertificate(), \",\") +\n                      \", subjectPeerCertificate: \" +\n                      callbacks_->connection().ssl()->subjectPeerCertificate()\n                : \"none\",\n            callbacks_->connection().streamInfo().dynamicMetadata().DebugString());\n\n  std::string log_policy_id = \"none\";\n  // When the enforcement type is continuous always do the RBAC checks. If it is a one time check,\n  // run the check once and skip it for subsequent onData calls.\n  if (config_->enforcementType() ==\n      envoy::extensions::filters::network::rbac::v3::RBAC::CONTINUOUS) {\n    shadow_engine_result_ =\n        checkEngine(Filters::Common::RBAC::EnforcementMode::Shadow).engine_result_;\n    auto result = checkEngine(Filters::Common::RBAC::EnforcementMode::Enforced);\n    engine_result_ = result.engine_result_;\n    log_policy_id = result.connection_termination_details_;\n  } else {\n    if (shadow_engine_result_ == Unknown) {\n      // TODO(quanlin): Pass the shadow engine results to other filters.\n      shadow_engine_result_ =\n          checkEngine(Filters::Common::RBAC::EnforcementMode::Shadow).engine_result_;\n    }\n\n    if (engine_result_ == Unknown) {\n      auto result = checkEngine(Filters::Common::RBAC::EnforcementMode::Enforced);\n      engine_result_ = result.engine_result_;\n      log_policy_id = result.connection_termination_details_;\n    }\n  }\n\n  if (engine_result_ == Allow) {\n    return Network::FilterStatus::Continue;\n  } else if (engine_result_ == Deny) {\n    callbacks_->connection().streamInfo().setConnectionTerminationDetails(\n        Filters::Common::RBAC::responseDetail(log_policy_id));\n    callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    return Network::FilterStatus::StopIteration;\n  }\n\n  ENVOY_LOG(debug, \"no engine, allowed by default\");\n  return Network::FilterStatus::Continue;\n}\n\nvoid RoleBasedAccessControlFilter::setDynamicMetadata(std::string shadow_engine_result,\n                                                      std::string shadow_policy_id) {\n  ProtobufWkt::Struct metrics;\n  auto& fields = *metrics.mutable_fields();\n  if (!shadow_policy_id.empty()) {\n    *fields[Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().ShadowEffectivePolicyIdField]\n         .mutable_string_value() = shadow_policy_id;\n  }\n  *fields[Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().ShadowEngineResultField]\n       .mutable_string_value() = shadow_engine_result;\n  callbacks_->connection().streamInfo().setDynamicMetadata(NetworkFilterNames::get().Rbac, metrics);\n}\n\nResult RoleBasedAccessControlFilter::checkEngine(Filters::Common::RBAC::EnforcementMode mode) {\n  const auto engine = config_->engine(mode);\n  std::string effective_policy_id;\n  if (engine != nullptr) {\n    // Check authorization decision and do Action operations\n    bool allowed = engine->handleAction(\n        callbacks_->connection(), callbacks_->connection().streamInfo(), &effective_policy_id);\n    const std::string log_policy_id = effective_policy_id.empty() ? \"none\" : effective_policy_id;\n    if (allowed) {\n      if (mode == Filters::Common::RBAC::EnforcementMode::Shadow) {\n        ENVOY_LOG(debug, \"shadow allowed, matched policy {}\", log_policy_id);\n        config_->stats().shadow_allowed_.inc();\n        setDynamicMetadata(\n            Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultAllowed,\n            effective_policy_id);\n      } else if (mode == Filters::Common::RBAC::EnforcementMode::Enforced) {\n        ENVOY_LOG(debug, \"enforced allowed, matched policy {}\", log_policy_id);\n        config_->stats().allowed_.inc();\n      }\n      return Result{Allow, effective_policy_id};\n    } else {\n      if (mode == Filters::Common::RBAC::EnforcementMode::Shadow) {\n        ENVOY_LOG(debug, \"shadow denied, matched policy {}\", log_policy_id);\n        config_->stats().shadow_denied_.inc();\n        setDynamicMetadata(\n            Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultDenied,\n            effective_policy_id);\n      } else if (mode == Filters::Common::RBAC::EnforcementMode::Enforced) {\n        ENVOY_LOG(debug, \"enforced denied, matched policy {}\", log_policy_id);\n        config_->stats().denied_.inc();\n      }\n      return Result{Deny, log_policy_id};\n    }\n  }\n  return Result{None, \"none\"};\n}\n\n} // namespace RBACFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rbac/rbac_filter.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/common/rbac/engine_impl.h\"\n#include \"extensions/filters/common/rbac/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RBACFilter {\n\nenum EngineResult { Unknown, None, Allow, Deny };\n\nstruct Result {\n  EngineResult engine_result_;\n  std::string connection_termination_details_;\n};\n\n/**\n * Configuration for the RBAC network filter.\n */\nclass RoleBasedAccessControlFilterConfig {\npublic:\n  RoleBasedAccessControlFilterConfig(\n      const envoy::extensions::filters::network::rbac::v3::RBAC& proto_config, Stats::Scope& scope);\n\n  Filters::Common::RBAC::RoleBasedAccessControlFilterStats& stats() { return stats_; }\n\n  const Filters::Common::RBAC::RoleBasedAccessControlEngineImpl*\n  engine(Filters::Common::RBAC::EnforcementMode mode) const {\n    return mode == Filters::Common::RBAC::EnforcementMode::Enforced ? engine_.get()\n                                                                    : shadow_engine_.get();\n  }\n\n  envoy::extensions::filters::network::rbac::v3::RBAC::EnforcementType enforcementType() const {\n    return enforcement_type_;\n  }\n\nprivate:\n  Filters::Common::RBAC::RoleBasedAccessControlFilterStats stats_;\n\n  std::unique_ptr<Filters::Common::RBAC::RoleBasedAccessControlEngineImpl> engine_;\n  std::unique_ptr<Filters::Common::RBAC::RoleBasedAccessControlEngineImpl> shadow_engine_;\n  const envoy::extensions::filters::network::rbac::v3::RBAC::EnforcementType enforcement_type_;\n};\n\nusing RoleBasedAccessControlFilterConfigSharedPtr =\n    std::shared_ptr<RoleBasedAccessControlFilterConfig>;\n\n/**\n * Implementation of a basic RBAC network filter.\n */\nclass RoleBasedAccessControlFilter : public Network::ReadFilter,\n                                     public Logger::Loggable<Logger::Id::rbac> {\n\npublic:\n  RoleBasedAccessControlFilter(RoleBasedAccessControlFilterConfigSharedPtr config)\n      : config_(config) {}\n  ~RoleBasedAccessControlFilter() override = default;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; };\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    callbacks_ = &callbacks;\n  }\n\n  void setDynamicMetadata(std::string shadow_engine_result, std::string shadow_policy_id);\n\nprivate:\n  RoleBasedAccessControlFilterConfigSharedPtr config_;\n  Network::ReadFilterCallbacks* callbacks_{};\n  EngineResult engine_result_{Unknown};\n  EngineResult shadow_engine_result_{Unknown};\n\n  Result checkEngine(Filters::Common::RBAC::EnforcementMode mode);\n};\n\n} // namespace RBACFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis\n# clusters.\n# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"command_splitter_interface\",\n    hdrs = [\"command_splitter.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/extensions/filters/network/common/redis:codec_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config_interface\",\n    hdrs = [\"config.h\"],\n    deps = [\n        \"//source/common/config:datasource_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_interface\",\n    hdrs = [\"conn_pool.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/extensions/filters/network/common/redis:client_interface\",\n        \"//source/extensions/filters/network/common/redis:codec_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_interface\",\n    hdrs = [\"router.h\"],\n    deps = [\":conn_pool_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"command_splitter_lib\",\n    srcs = [\"command_splitter_impl.cc\"],\n    hdrs = [\"command_splitter_impl.h\"],\n    deps = [\n        \":command_splitter_interface\",\n        \":conn_pool_lib\",\n        \":router_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/extensions/filters/network/common/redis:client_lib\",\n        \"//source/extensions/filters/network/common/redis:fault_lib\",\n        \"//source/extensions/filters/network/common/redis:supported_commands_lib\",\n        \"//source/extensions/filters/network/common/redis:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_pool_lib\",\n    srcs = [\"conn_pool_impl.cc\"],\n    hdrs = [\"conn_pool_impl.h\"],\n    deps = [\n        \":config_interface\",\n        \":conn_pool_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/clusters/redis:redis_cluster_lb\",\n        \"//source/extensions/common/redis:cluster_refresh_manager_interface\",\n        \"//source/extensions/filters/network/common/redis:client_lib\",\n        \"//source/extensions/filters/network/common/redis:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"proxy_filter_lib\",\n    srcs = [\"proxy_filter.cc\"],\n    hdrs = [\"proxy_filter.h\"],\n    deps = [\n        \":command_splitter_interface\",\n        \"//include/envoy/network:drain_decision_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/network/common/redis:codec_interface\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/extensions/common/redis:cluster_refresh_manager_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/common/redis:codec_lib\",\n        \"//source/extensions/filters/network/common/redis:fault_lib\",\n        \"//source/extensions/filters/network/common/redis:redis_command_stats_lib\",\n        \"//source/extensions/filters/network/redis_proxy:command_splitter_lib\",\n        \"//source/extensions/filters/network/redis_proxy:conn_pool_lib\",\n        \"//source/extensions/filters/network/redis_proxy:proxy_filter_lib\",\n        \"//source/extensions/filters/network/redis_proxy:router_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_lib\",\n    srcs = [\"router_impl.cc\"],\n    hdrs = [\"router_impl.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/extensions/filters/network/common/redis:codec_lib\",\n        \"//source/extensions/filters/network/common/redis:supported_commands_lib\",\n        \"//source/extensions/filters/network/common/redis:utility_lib\",\n        \"//source/extensions/filters/network/redis_proxy:conn_pool_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/command_splitter.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/event/dispatcher.h\"\n\n#include \"extensions/filters/network/common/redis/codec.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace CommandSplitter {\n\n/**\n * A handle to a split request.\n */\nclass SplitRequest {\npublic:\n  virtual ~SplitRequest() = default;\n\n  /**\n   * Cancel the request. No further request callbacks will be called.\n   */\n  virtual void cancel() PURE;\n};\n\nusing SplitRequestPtr = std::unique_ptr<SplitRequest>;\n\n/**\n * Split request callbacks.\n */\nclass SplitCallbacks {\npublic:\n  virtual ~SplitCallbacks() = default;\n\n  /**\n   * Called to verify that commands should be processed.\n   * @return bool true if commands from this client connection can be processed, false if not.\n   */\n  virtual bool connectionAllowed() PURE;\n\n  /**\n   * Called when an authentication command has been received with a password.\n   * @param password supplies the AUTH password provided by the downstream client.\n   */\n  virtual void onAuth(const std::string& password) PURE;\n\n  /**\n   * Called when an authentication command has been received with a username and password.\n   * @param username supplies the AUTH username provided by the downstream client.\n   * @param password supplies the AUTH password provided by the downstream client.\n   */\n  virtual void onAuth(const std::string& username, const std::string& password) PURE;\n\n  /**\n   * Called when the response is ready.\n   * @param value supplies the response which is now owned by the callee.\n   */\n  virtual void onResponse(Common::Redis::RespValuePtr&& value) PURE;\n};\n\n/**\n * A command splitter that takes incoming redis commands and splits them as appropriate to a\n * backend connection pool.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  /**\n   * Make a split redis request capable of being retried/redirected.\n   * @param request supplies the split request to make (ownership transferred to call).\n   * @param callbacks supplies the split request completion callbacks.\n   * @param dispatcher supplies dispatcher used for delay fault timer.\n   * @return SplitRequestPtr a handle to the active request or nullptr if the request has already\n   *         been satisfied (via onResponse() being called). The splitter ALWAYS calls\n   *         onResponse() for a given request.\n   */\n  virtual SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request,\n                                      SplitCallbacks& callbacks,\n                                      Event::Dispatcher& dispatcher) PURE;\n};\n\n} // namespace CommandSplitter\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/command_splitter_impl.cc",
    "content": "#include \"extensions/filters/network/redis_proxy/command_splitter_impl.h\"\n\n#include \"extensions/filters/network/common/redis/supported_commands.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace CommandSplitter {\nnamespace {\n\n// null_pool_callbacks is used for requests that must be filtered and not redirected such as\n// \"asking\".\nConnPool::DoNothingPoolCallbacks null_pool_callbacks;\n\n/**\n * Make request and maybe mirror the request based on the mirror policies of the route.\n * @param route supplies the route matched with the request.\n * @param command supplies the command of the request.\n * @param key supplies the key of the request.\n * @param incoming_request supplies the request.\n * @param callbacks supplies the request completion callbacks.\n * @return PoolRequest* a handle to the active request or nullptr if the request could not be made\n *         for some reason.\n */\nCommon::Redis::Client::PoolRequest* makeSingleServerRequest(\n    const RouteSharedPtr& route, const std::string& command, const std::string& key,\n    Common::Redis::RespValueConstSharedPtr incoming_request, ConnPool::PoolCallbacks& callbacks) {\n  auto handler =\n      route->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request), callbacks);\n  if (handler) {\n    for (auto& mirror_policy : route->mirrorPolicies()) {\n      if (mirror_policy->shouldMirror(command)) {\n        mirror_policy->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request),\n                                               null_pool_callbacks);\n      }\n    }\n  }\n  return handler;\n}\n\n/**\n * Make request and maybe mirror the request based on the mirror policies of the route.\n * @param route supplies the route matched with the request.\n * @param command supplies the command of the request.\n * @param key supplies the key of the request.\n * @param incoming_request supplies the request.\n * @param callbacks supplies the request completion callbacks.\n * @return PoolRequest* a handle to the active request or nullptr if the request could not be made\n *         for some reason.\n */\nCommon::Redis::Client::PoolRequest*\nmakeFragmentedRequest(const RouteSharedPtr& route, const std::string& command,\n                      const std::string& key, const Common::Redis::RespValue& incoming_request,\n                      ConnPool::PoolCallbacks& callbacks) {\n  auto handler =\n      route->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request), callbacks);\n  if (handler) {\n    for (auto& mirror_policy : route->mirrorPolicies()) {\n      if (mirror_policy->shouldMirror(command)) {\n        mirror_policy->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request),\n                                               null_pool_callbacks);\n      }\n    }\n  }\n  return handler;\n}\n} // namespace\n\nvoid SplitRequestBase::onWrongNumberOfArguments(SplitCallbacks& callbacks,\n                                                const Common::Redis::RespValue& request) {\n  callbacks.onResponse(Common::Redis::Utility::makeError(\n      fmt::format(\"wrong number of arguments for '{}' command\", request.asArray()[0].asString())));\n}\n\nvoid SplitRequestBase::updateStats(const bool success) {\n  if (success) {\n    command_stats_.success_.inc();\n  } else {\n    command_stats_.error_.inc();\n  }\n  if (command_latency_ != nullptr) {\n    command_latency_->complete();\n  }\n}\n\nSingleServerRequest::~SingleServerRequest() { ASSERT(!handle_); }\n\nvoid SingleServerRequest::onResponse(Common::Redis::RespValuePtr&& response) {\n  handle_ = nullptr;\n  updateStats(true);\n  callbacks_.onResponse(std::move(response));\n}\n\nvoid SingleServerRequest::onFailure() { onFailure(Response::get().UpstreamFailure); }\n\nvoid SingleServerRequest::onFailure(std::string error_msg) {\n  handle_ = nullptr;\n  updateStats(false);\n  callbacks_.onResponse(Common::Redis::Utility::makeError(error_msg));\n}\n\nvoid SingleServerRequest::cancel() {\n  handle_->cancel();\n  handle_ = nullptr;\n}\n\nSplitRequestPtr ErrorFaultRequest::create(SplitCallbacks& callbacks, CommandStats& command_stats,\n                                          TimeSource& time_source, bool delay_command_latency) {\n  std::unique_ptr<ErrorFaultRequest> request_ptr{\n      new ErrorFaultRequest(callbacks, command_stats, time_source, delay_command_latency)};\n\n  request_ptr->onFailure(Common::Redis::FaultMessages::get().Error);\n  command_stats.error_fault_.inc();\n  return nullptr;\n}\n\nstd::unique_ptr<DelayFaultRequest> DelayFaultRequest::create(SplitCallbacks& callbacks,\n                                                             CommandStats& command_stats,\n                                                             TimeSource& time_source,\n                                                             Event::Dispatcher& dispatcher,\n                                                             std::chrono::milliseconds delay) {\n  return std::make_unique<DelayFaultRequest>(callbacks, command_stats, time_source, dispatcher,\n                                             delay);\n}\n\nvoid DelayFaultRequest::onResponse(Common::Redis::RespValuePtr&& response) {\n  response_ = std::move(response);\n  delay_timer_->enableTimer(delay_);\n}\n\nvoid DelayFaultRequest::onDelayResponse() {\n  command_stats_.delay_fault_.inc();\n  command_latency_->complete(); // Complete latency of the command stats of the wrapped request\n  callbacks_.onResponse(std::move(response_));\n}\n\nvoid DelayFaultRequest::cancel() { delay_timer_->disableTimer(); }\n\nSplitRequestPtr SimpleRequest::create(Router& router,\n                                      Common::Redis::RespValuePtr&& incoming_request,\n                                      SplitCallbacks& callbacks, CommandStats& command_stats,\n                                      TimeSource& time_source, bool delay_command_latency) {\n  std::unique_ptr<SimpleRequest> request_ptr{\n      new SimpleRequest(callbacks, command_stats, time_source, delay_command_latency)};\n  const auto route = router.upstreamPool(incoming_request->asArray()[1].asString());\n  if (route) {\n    Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);\n    request_ptr->handle_ =\n        makeSingleServerRequest(route, base_request->asArray()[0].asString(),\n                                base_request->asArray()[1].asString(), base_request, *request_ptr);\n  }\n\n  if (!request_ptr->handle_) {\n    command_stats.error_.inc();\n    callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));\n    return nullptr;\n  }\n\n  return request_ptr;\n}\n\nSplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                    SplitCallbacks& callbacks, CommandStats& command_stats,\n                                    TimeSource& time_source, bool delay_command_latency) {\n  // EVAL looks like: EVAL script numkeys key [key ...] arg [arg ...]\n  // Ensure there are at least three args to the command or it cannot be hashed.\n  if (incoming_request->asArray().size() < 4) {\n    onWrongNumberOfArguments(callbacks, *incoming_request);\n    command_stats.error_.inc();\n    return nullptr;\n  }\n\n  std::unique_ptr<EvalRequest> request_ptr{\n      new EvalRequest(callbacks, command_stats, time_source, delay_command_latency)};\n\n  const auto route = router.upstreamPool(incoming_request->asArray()[3].asString());\n  if (route) {\n    Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);\n    request_ptr->handle_ =\n        makeSingleServerRequest(route, base_request->asArray()[0].asString(),\n                                base_request->asArray()[3].asString(), base_request, *request_ptr);\n  }\n\n  if (!request_ptr->handle_) {\n    command_stats.error_.inc();\n    callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));\n    return nullptr;\n  }\n\n  return request_ptr;\n}\n\nFragmentedRequest::~FragmentedRequest() {\n#ifndef NDEBUG\n  for (const PendingRequest& request : pending_requests_) {\n    ASSERT(!request.handle_);\n  }\n#endif\n}\n\nvoid FragmentedRequest::cancel() {\n  for (PendingRequest& request : pending_requests_) {\n    if (request.handle_) {\n      request.handle_->cancel();\n      request.handle_ = nullptr;\n    }\n  }\n}\n\nvoid FragmentedRequest::onChildFailure(uint32_t index) {\n  onChildResponse(Common::Redis::Utility::makeError(Response::get().UpstreamFailure), index);\n}\n\nSplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                    SplitCallbacks& callbacks, CommandStats& command_stats,\n                                    TimeSource& time_source, bool delay_command_latency) {\n  std::unique_ptr<MGETRequest> request_ptr{\n      new MGETRequest(callbacks, command_stats, time_source, delay_command_latency)};\n\n  request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1;\n  request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_);\n\n  request_ptr->pending_response_ = std::make_unique<Common::Redis::RespValue>();\n  request_ptr->pending_response_->type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> responses(request_ptr->num_pending_responses_);\n  request_ptr->pending_response_->asArray().swap(responses);\n\n  Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);\n  for (uint32_t i = 1; i < base_request->asArray().size(); i++) {\n    request_ptr->pending_requests_.emplace_back(*request_ptr, i - 1);\n    PendingRequest& pending_request = request_ptr->pending_requests_.back();\n\n    const auto route = router.upstreamPool(base_request->asArray()[i].asString());\n    if (route) {\n      // Create composite array for a single get.\n      const Common::Redis::RespValue single_mget(\n          base_request, Common::Redis::Utility::GetRequest::instance(), i, i);\n      pending_request.handle_ = makeFragmentedRequest(\n          route, \"get\", base_request->asArray()[i].asString(), single_mget, pending_request);\n    }\n\n    if (!pending_request.handle_) {\n      pending_request.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));\n    }\n  }\n\n  if (request_ptr->num_pending_responses_ > 0) {\n    return request_ptr;\n  }\n\n  return nullptr;\n}\n\nvoid MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) {\n  pending_requests_[index].handle_ = nullptr;\n\n  pending_response_->asArray()[index].type(value->type());\n  switch (value->type()) {\n  case Common::Redis::RespType::Array:\n  case Common::Redis::RespType::Integer:\n  case Common::Redis::RespType::SimpleString:\n  case Common::Redis::RespType::CompositeArray: {\n    pending_response_->asArray()[index].type(Common::Redis::RespType::Error);\n    pending_response_->asArray()[index].asString() = Response::get().UpstreamProtocolError;\n    error_count_++;\n    break;\n  }\n  case Common::Redis::RespType::Error: {\n    error_count_++;\n    FALLTHRU;\n  }\n  case Common::Redis::RespType::BulkString: {\n    pending_response_->asArray()[index].asString().swap(value->asString());\n    break;\n  }\n  case Common::Redis::RespType::Null:\n    break;\n  }\n\n  ASSERT(num_pending_responses_ > 0);\n  if (--num_pending_responses_ == 0) {\n    updateStats(error_count_ == 0);\n    ENVOY_LOG(debug, \"redis: response: '{}'\", pending_response_->toString());\n    callbacks_.onResponse(std::move(pending_response_));\n  }\n}\n\nSplitRequestPtr MSETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                    SplitCallbacks& callbacks, CommandStats& command_stats,\n                                    TimeSource& time_source, bool delay_command_latency) {\n  if ((incoming_request->asArray().size() - 1) % 2 != 0) {\n    onWrongNumberOfArguments(callbacks, *incoming_request);\n    command_stats.error_.inc();\n    return nullptr;\n  }\n  std::unique_ptr<MSETRequest> request_ptr{\n      new MSETRequest(callbacks, command_stats, time_source, delay_command_latency)};\n\n  request_ptr->num_pending_responses_ = (incoming_request->asArray().size() - 1) / 2;\n  request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_);\n\n  request_ptr->pending_response_ = std::make_unique<Common::Redis::RespValue>();\n  request_ptr->pending_response_->type(Common::Redis::RespType::SimpleString);\n\n  Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);\n  uint32_t fragment_index = 0;\n  for (uint32_t i = 1; i < base_request->asArray().size(); i += 2) {\n    request_ptr->pending_requests_.emplace_back(*request_ptr, fragment_index++);\n    PendingRequest& pending_request = request_ptr->pending_requests_.back();\n\n    const auto route = router.upstreamPool(base_request->asArray()[i].asString());\n    if (route) {\n      // Create composite array for a single set command.\n      const Common::Redis::RespValue single_set(\n          base_request, Common::Redis::Utility::SetRequest::instance(), i, i + 1);\n      ENVOY_LOG(debug, \"redis: parallel set: '{}'\", single_set.toString());\n      pending_request.handle_ = makeFragmentedRequest(\n          route, \"set\", base_request->asArray()[i].asString(), single_set, pending_request);\n    }\n\n    if (!pending_request.handle_) {\n      pending_request.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));\n    }\n  }\n\n  if (request_ptr->num_pending_responses_ > 0) {\n    return request_ptr;\n  }\n\n  return nullptr;\n}\n\nvoid MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) {\n  pending_requests_[index].handle_ = nullptr;\n\n  switch (value->type()) {\n  case Common::Redis::RespType::SimpleString: {\n    if (value->asString() == Response::get().OK) {\n      break;\n    }\n    FALLTHRU;\n  }\n  default: {\n    error_count_++;\n    break;\n  }\n  }\n\n  ASSERT(num_pending_responses_ > 0);\n  if (--num_pending_responses_ == 0) {\n    updateStats(error_count_ == 0);\n    if (error_count_ == 0) {\n      pending_response_->asString() = Response::get().OK;\n      callbacks_.onResponse(std::move(pending_response_));\n    } else {\n      callbacks_.onResponse(Common::Redis::Utility::makeError(\n          fmt::format(\"finished with {} error(s)\", error_count_)));\n    }\n  }\n}\n\nSplitRequestPtr\nSplitKeysSumResultRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                  SplitCallbacks& callbacks, CommandStats& command_stats,\n                                  TimeSource& time_source, bool delay_command_latency) {\n  std::unique_ptr<SplitKeysSumResultRequest> request_ptr{\n      new SplitKeysSumResultRequest(callbacks, command_stats, time_source, delay_command_latency)};\n\n  request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1;\n  request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_);\n\n  request_ptr->pending_response_ = std::make_unique<Common::Redis::RespValue>();\n  request_ptr->pending_response_->type(Common::Redis::RespType::Integer);\n\n  Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);\n  for (uint32_t i = 1; i < base_request->asArray().size(); i++) {\n    request_ptr->pending_requests_.emplace_back(*request_ptr, i - 1);\n    PendingRequest& pending_request = request_ptr->pending_requests_.back();\n\n    // Create the composite array for a single fragment.\n    const Common::Redis::RespValue single_fragment(base_request, base_request->asArray()[0], i, i);\n    ENVOY_LOG(debug, \"redis: parallel {}: '{}'\", base_request->asArray()[0].asString(),\n              single_fragment.toString());\n    const auto route = router.upstreamPool(base_request->asArray()[i].asString());\n    if (route) {\n      pending_request.handle_ = makeFragmentedRequest(route, base_request->asArray()[0].asString(),\n                                                      base_request->asArray()[i].asString(),\n                                                      single_fragment, pending_request);\n    }\n\n    if (!pending_request.handle_) {\n      pending_request.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));\n    }\n  }\n\n  if (request_ptr->num_pending_responses_ > 0) {\n    return request_ptr;\n  }\n\n  return nullptr;\n}\n\nvoid SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& value,\n                                                uint32_t index) {\n  pending_requests_[index].handle_ = nullptr;\n\n  switch (value->type()) {\n  case Common::Redis::RespType::Integer: {\n    total_ += value->asInteger();\n    break;\n  }\n  default: {\n    error_count_++;\n    break;\n  }\n  }\n\n  ASSERT(num_pending_responses_ > 0);\n  if (--num_pending_responses_ == 0) {\n    updateStats(error_count_ == 0);\n    if (error_count_ == 0) {\n      pending_response_->asInteger() = total_;\n      callbacks_.onResponse(std::move(pending_response_));\n    } else {\n      callbacks_.onResponse(Common::Redis::Utility::makeError(\n          fmt::format(\"finished with {} error(s)\", error_count_)));\n    }\n  }\n}\n\nInstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix,\n                           TimeSource& time_source, bool latency_in_micros,\n                           Common::Redis::FaultManagerPtr&& fault_manager)\n    : router_(std::move(router)), simple_command_handler_(*router_),\n      eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_),\n      split_keys_sum_result_handler_(*router_),\n      stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + \"splitter.\"))},\n      time_source_(time_source), fault_manager_(std::move(fault_manager)) {\n  for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) {\n    addHandler(scope, stat_prefix, command, latency_in_micros, simple_command_handler_);\n  }\n\n  for (const std::string& command : Common::Redis::SupportedCommands::evalCommands()) {\n    addHandler(scope, stat_prefix, command, latency_in_micros, eval_command_handler_);\n  }\n\n  for (const std::string& command :\n       Common::Redis::SupportedCommands::hashMultipleSumResultCommands()) {\n    addHandler(scope, stat_prefix, command, latency_in_micros, split_keys_sum_result_handler_);\n  }\n\n  addHandler(scope, stat_prefix, Common::Redis::SupportedCommands::mget(), latency_in_micros,\n             mget_handler_);\n\n  addHandler(scope, stat_prefix, Common::Redis::SupportedCommands::mset(), latency_in_micros,\n             mset_handler_);\n}\n\nSplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request,\n                                          SplitCallbacks& callbacks,\n                                          Event::Dispatcher& dispatcher) {\n  if ((request->type() != Common::Redis::RespType::Array) || request->asArray().empty()) {\n    onInvalidRequest(callbacks);\n    return nullptr;\n  }\n\n  for (const Common::Redis::RespValue& value : request->asArray()) {\n    if (value.type() != Common::Redis::RespType::BulkString) {\n      onInvalidRequest(callbacks);\n      return nullptr;\n    }\n  }\n\n  std::string to_lower_string = absl::AsciiStrToLower(request->asArray()[0].asString());\n\n  if (to_lower_string == Common::Redis::SupportedCommands::auth()) {\n    if (request->asArray().size() < 2) {\n      onInvalidRequest(callbacks);\n      return nullptr;\n    }\n    if (request->asArray().size() == 3) {\n      callbacks.onAuth(request->asArray()[1].asString(), request->asArray()[2].asString());\n    } else {\n      callbacks.onAuth(request->asArray()[1].asString());\n    }\n\n    return nullptr;\n  }\n\n  if (!callbacks.connectionAllowed()) {\n    callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().AuthRequiredError));\n    return nullptr;\n  }\n\n  if (to_lower_string == Common::Redis::SupportedCommands::ping()) {\n    // Respond to PING locally.\n    Common::Redis::RespValuePtr pong(new Common::Redis::RespValue());\n    pong->type(Common::Redis::RespType::SimpleString);\n    pong->asString() = \"PONG\";\n    callbacks.onResponse(std::move(pong));\n    return nullptr;\n  }\n\n  if (request->asArray().size() < 2) {\n    // Commands other than PING all have at least two arguments.\n    onInvalidRequest(callbacks);\n    return nullptr;\n  }\n\n  // Get the handler for the downstream request\n  auto handler = handler_lookup_table_.find(to_lower_string.c_str());\n  if (handler == nullptr) {\n    stats_.unsupported_command_.inc();\n    callbacks.onResponse(Common::Redis::Utility::makeError(\n        fmt::format(\"unsupported command '{}'\", request->asArray()[0].asString())));\n    return nullptr;\n  }\n\n  // Fault Injection Check\n  const Common::Redis::Fault* fault_ptr = fault_manager_->getFaultForCommand(to_lower_string);\n\n  // Check if delay, which determines which callbacks to use. If a delay fault is enabled,\n  // the delay fault itself wraps the request (or other fault) and the delay fault itself\n  // implements the callbacks functions, and in turn calls the real callbacks after injecting\n  // delay on the result of the wrapped request or fault.\n  const bool has_delay_fault =\n      fault_ptr != nullptr && fault_ptr->delayMs() > std::chrono::milliseconds(0);\n  std::unique_ptr<DelayFaultRequest> delay_fault_ptr;\n  if (has_delay_fault) {\n    delay_fault_ptr = DelayFaultRequest::create(callbacks, handler->command_stats_, time_source_,\n                                                dispatcher, fault_ptr->delayMs());\n  }\n\n  // Note that the command_stats_ object of the original request is used for faults, so that our\n  // downstream metrics reflect any faults added (with special fault metrics) or extra latency from\n  // a delay. 2) we use a ternary operator for the callback parameter- we want to use the\n  // delay_fault as callback if there is a delay per the earlier comment.\n  ENVOY_LOG(debug, \"redis: splitting '{}'\", request->toString());\n  handler->command_stats_.total_.inc();\n\n  SplitRequestPtr request_ptr;\n  if (fault_ptr != nullptr && fault_ptr->faultType() == Common::Redis::FaultType::Error) {\n    request_ptr = ErrorFaultRequest::create(has_delay_fault ? *delay_fault_ptr : callbacks,\n                                            handler->command_stats_, time_source_, has_delay_fault);\n  } else {\n    request_ptr = handler->handler_.get().startRequest(\n        std::move(request), has_delay_fault ? *delay_fault_ptr : callbacks, handler->command_stats_,\n        time_source_, has_delay_fault);\n  }\n\n  // Complete delay, if any. The delay fault takes ownership of the wrapped request.\n  if (has_delay_fault) {\n    delay_fault_ptr->wrapped_request_ptr_ = std::move(request_ptr);\n    return delay_fault_ptr;\n  } else {\n    return request_ptr;\n  }\n}\n\nvoid InstanceImpl::onInvalidRequest(SplitCallbacks& callbacks) {\n  stats_.invalid_request_.inc();\n  callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().InvalidRequest));\n}\n\nvoid InstanceImpl::addHandler(Stats::Scope& scope, const std::string& stat_prefix,\n                              const std::string& name, bool latency_in_micros,\n                              CommandHandler& handler) {\n  std::string to_lower_name = absl::AsciiStrToLower(name);\n  const std::string command_stat_prefix = fmt::format(\"{}command.{}.\", stat_prefix, to_lower_name);\n  Stats::StatNameManagedStorage storage{command_stat_prefix + std::string(\"latency\"),\n                                        scope.symbolTable()};\n  handler_lookup_table_.add(\n      to_lower_name.c_str(),\n      std::make_shared<HandlerData>(HandlerData{\n          CommandStats{ALL_COMMAND_STATS(POOL_COUNTER_PREFIX(scope, command_stat_prefix))\n                           scope.histogramFromStatName(storage.statName(),\n                                                       latency_in_micros\n                                                           ? Stats::Histogram::Unit::Microseconds\n                                                           : Stats::Histogram::Unit::Milliseconds)},\n          handler}));\n}\n\n} // namespace CommandSplitter\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/command_splitter_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/stats/timespan_impl.h\"\n\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/fault_impl.h\"\n#include \"extensions/filters/network/common/redis/utility.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter.h\"\n#include \"extensions/filters/network/redis_proxy/conn_pool_impl.h\"\n#include \"extensions/filters/network/redis_proxy/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace CommandSplitter {\n\nstruct ResponseValues {\n  const std::string OK = \"OK\";\n  const std::string InvalidRequest = \"invalid request\";\n  const std::string NoUpstreamHost = \"no upstream host\";\n  const std::string UpstreamFailure = \"upstream failure\";\n  const std::string UpstreamProtocolError = \"upstream protocol error\";\n  const std::string AuthRequiredError = \"NOAUTH Authentication required.\";\n};\n\nusing Response = ConstSingleton<ResponseValues>;\n\n/**\n * All command level stats. @see stats_macros.h\n */\n#define ALL_COMMAND_STATS(COUNTER)                                                                 \\\n  COUNTER(total)                                                                                   \\\n  COUNTER(success)                                                                                 \\\n  COUNTER(error)                                                                                   \\\n  COUNTER(error_fault)                                                                             \\\n  COUNTER(delay_fault)\n\n/**\n * Struct definition for all command stats. @see stats_macros.h\n */\nstruct CommandStats {\n  ALL_COMMAND_STATS(GENERATE_COUNTER_STRUCT)\n  Envoy::Stats::Histogram& latency_;\n};\n\nclass CommandHandler {\npublic:\n  virtual ~CommandHandler() = default;\n\n  virtual SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request,\n                                       SplitCallbacks& callbacks, CommandStats& command_stats,\n                                       TimeSource& time_source, bool delay_command_latency) PURE;\n};\n\nclass CommandHandlerBase {\nprotected:\n  CommandHandlerBase(Router& router) : router_(router) {}\n\n  Router& router_;\n};\n\nclass SplitRequestBase : public SplitRequest {\nprotected:\n  static void onWrongNumberOfArguments(SplitCallbacks& callbacks,\n                                       const Common::Redis::RespValue& request);\n  void updateStats(const bool success);\n\n  SplitRequestBase(CommandStats& command_stats, TimeSource& time_source, bool delay_command_latency)\n      : command_stats_(command_stats) {\n    if (!delay_command_latency) {\n      command_latency_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n          command_stats_.latency_, time_source);\n    } else {\n      command_latency_ = nullptr;\n    }\n  }\n  CommandStats& command_stats_;\n  Stats::TimespanPtr command_latency_;\n};\n\n/**\n * SingleServerRequest is a base class for commands that hash to a single backend.\n */\nclass SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallbacks {\npublic:\n  ~SingleServerRequest() override;\n\n  // ConnPool::PoolCallbacks\n  void onResponse(Common::Redis::RespValuePtr&& response) override;\n  void onFailure() override;\n  void onFailure(std::string error_msg);\n\n  // RedisProxy::CommandSplitter::SplitRequest\n  void cancel() override;\n\nprotected:\n  SingleServerRequest(SplitCallbacks& callbacks, CommandStats& command_stats,\n                      TimeSource& time_source, bool delay_command_latency)\n      : SplitRequestBase(command_stats, time_source, delay_command_latency), callbacks_(callbacks) {\n  }\n\n  SplitCallbacks& callbacks_;\n  ConnPool::InstanceSharedPtr conn_pool_;\n  Common::Redis::Client::PoolRequest* handle_{};\n  Common::Redis::RespValuePtr incoming_request_;\n};\n\n/**\n * ErrorFaultRequest returns an error.\n */\nclass ErrorFaultRequest : public SingleServerRequest {\npublic:\n  static SplitRequestPtr create(SplitCallbacks& callbacks, CommandStats& command_stats,\n                                TimeSource& time_source, bool has_delaydelay_command_latency_fault);\n\nprivate:\n  ErrorFaultRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n                    bool delay_command_latency)\n      : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {}\n};\n\n/**\n * DelayFaultRequest wraps a request- either a normal request or a fault- and delays it.\n */\nclass DelayFaultRequest : public SplitRequestBase, public SplitCallbacks {\npublic:\n  static std::unique_ptr<DelayFaultRequest>\n  create(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n         Event::Dispatcher& dispatcher, std::chrono::milliseconds delay);\n\n  DelayFaultRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n                    Event::Dispatcher& dispatcher, std::chrono::milliseconds delay)\n      : SplitRequestBase(command_stats, time_source, false), callbacks_(callbacks), delay_(delay) {\n    delay_timer_ = dispatcher.createTimer([this]() -> void { onDelayResponse(); });\n  }\n\n  // SplitCallbacks\n  bool connectionAllowed() override { return callbacks_.connectionAllowed(); }\n  void onAuth(const std::string& password) override { callbacks_.onAuth(password); }\n  void onAuth(const std::string& username, const std::string& password) override {\n    callbacks_.onAuth(username, password);\n  }\n  void onResponse(Common::Redis::RespValuePtr&& response) override;\n\n  // RedisProxy::CommandSplitter::SplitRequest\n  void cancel() override;\n\n  SplitRequestPtr wrapped_request_ptr_;\n\nprivate:\n  void onDelayResponse();\n\n  SplitCallbacks& callbacks_;\n  std::chrono::milliseconds delay_;\n  Event::TimerPtr delay_timer_;\n  Common::Redis::RespValuePtr response_;\n};\n\n/**\n * SimpleRequest hashes the first argument as the key.\n */\nclass SimpleRequest : public SingleServerRequest {\npublic:\n  static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                SplitCallbacks& callbacks, CommandStats& command_stats,\n                                TimeSource& time_source, bool delay_command_latency);\n\nprivate:\n  SimpleRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n                bool delay_command_latency)\n      : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {}\n};\n\n/**\n * EvalRequest hashes the fourth argument as the key.\n */\nclass EvalRequest : public SingleServerRequest {\npublic:\n  static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                SplitCallbacks& callbacks, CommandStats& command_stats,\n                                TimeSource& time_source, bool delay_command_latency);\n\nprivate:\n  EvalRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n              bool delay_command_latency)\n      : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {}\n};\n\n/**\n * FragmentedRequest is a base class for requests that contains multiple keys. An individual request\n * is sent to the appropriate server for each key. The responses from all servers are combined and\n * returned to the client.\n */\nclass FragmentedRequest : public SplitRequestBase {\npublic:\n  ~FragmentedRequest() override;\n\n  // RedisProxy::CommandSplitter::SplitRequest\n  void cancel() override;\n\nprotected:\n  FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n                    bool delay_command_latency)\n      : SplitRequestBase(command_stats, time_source, delay_command_latency), callbacks_(callbacks) {\n  }\n\n  struct PendingRequest : public ConnPool::PoolCallbacks {\n    PendingRequest(FragmentedRequest& parent, uint32_t index) : parent_(parent), index_(index) {}\n\n    // ConnPool::PoolCallbacks\n    void onResponse(Common::Redis::RespValuePtr&& value) override {\n      parent_.onChildResponse(std::move(value), index_);\n    }\n    void onFailure() override { parent_.onChildFailure(index_); }\n\n    FragmentedRequest& parent_;\n    const uint32_t index_;\n    Common::Redis::Client::PoolRequest* handle_{};\n  };\n\n  virtual void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) PURE;\n  void onChildFailure(uint32_t index);\n\n  SplitCallbacks& callbacks_;\n\n  Common::Redis::RespValuePtr pending_response_;\n  std::vector<PendingRequest> pending_requests_;\n  uint32_t num_pending_responses_;\n  uint32_t error_count_{0};\n};\n\n/**\n * MGETRequest takes each key from the command and sends a GET for each to the appropriate Redis\n * server. The response contains the result from each command.\n */\nclass MGETRequest : public FragmentedRequest, Logger::Loggable<Logger::Id::redis> {\npublic:\n  static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                SplitCallbacks& callbacks, CommandStats& command_stats,\n                                TimeSource& time_source, bool delay_command_latency);\n\nprivate:\n  MGETRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n              bool delay_command_latency)\n      : FragmentedRequest(callbacks, command_stats, time_source, delay_command_latency) {}\n\n  // RedisProxy::CommandSplitter::FragmentedRequest\n  void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override;\n};\n\n/**\n * SplitKeysSumResultRequest takes each key from the command and sends the same incoming command\n * with each key to the appropriate Redis server. The response from each Redis (which must be an\n * integer) is summed and returned to the user. If there is any error or failure in processing the\n * fragmented commands, an error will be returned.\n */\nclass SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable<Logger::Id::redis> {\npublic:\n  static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                SplitCallbacks& callbacks, CommandStats& command_stats,\n                                TimeSource& time_source, bool delay_command_latency);\n\nprivate:\n  SplitKeysSumResultRequest(SplitCallbacks& callbacks, CommandStats& command_stats,\n                            TimeSource& time_source, bool delay_command_latency)\n      : FragmentedRequest(callbacks, command_stats, time_source, delay_command_latency) {}\n\n  // RedisProxy::CommandSplitter::FragmentedRequest\n  void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override;\n\n  int64_t total_{0};\n};\n\n/**\n * MSETRequest takes each key and value pair from the command and sends a SET for each to the\n * appropriate Redis server. The response is an OK if all commands succeeded or an ERR if any\n * failed.\n */\nclass MSETRequest : public FragmentedRequest, Logger::Loggable<Logger::Id::redis> {\npublic:\n  static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request,\n                                SplitCallbacks& callbacks, CommandStats& command_stats,\n                                TimeSource& time_source, bool delay_command_latency);\n\nprivate:\n  MSETRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source,\n              bool delay_command_latency)\n      : FragmentedRequest(callbacks, command_stats, time_source, delay_command_latency) {}\n\n  // RedisProxy::CommandSplitter::FragmentedRequest\n  void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override;\n};\n\n/**\n * CommandHandlerFactory is placed in the command lookup map for each supported command and is used\n * to create Request objects.\n */\ntemplate <class RequestClass>\nclass CommandHandlerFactory : public CommandHandler, CommandHandlerBase {\npublic:\n  CommandHandlerFactory(Router& router) : CommandHandlerBase(router) {}\n  SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks,\n                               CommandStats& command_stats, TimeSource& time_source,\n                               bool delay_command_latency) override {\n    return RequestClass::create(router_, std::move(request), callbacks, command_stats, time_source,\n                                delay_command_latency);\n  }\n};\n\n/**\n * All splitter stats. @see stats_macros.h\n */\n#define ALL_COMMAND_SPLITTER_STATS(COUNTER)                                                        \\\n  COUNTER(invalid_request)                                                                         \\\n  COUNTER(unsupported_command)\n\n/**\n * Struct definition for all splitter stats. @see stats_macros.h\n */\nstruct InstanceStats {\n  ALL_COMMAND_SPLITTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass InstanceImpl : public Instance, Logger::Loggable<Logger::Id::redis> {\npublic:\n  InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix,\n               TimeSource& time_source, bool latency_in_micros,\n               Common::Redis::FaultManagerPtr&& fault_manager);\n\n  // RedisProxy::CommandSplitter::Instance\n  SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks,\n                              Event::Dispatcher& dispatcher) override;\n\nprivate:\n  friend class RedisCommandSplitterImplTest;\n\n  struct HandlerData {\n    CommandStats command_stats_;\n    std::reference_wrapper<CommandHandler> handler_;\n  };\n\n  using HandlerDataPtr = std::shared_ptr<HandlerData>;\n\n  void addHandler(Stats::Scope& scope, const std::string& stat_prefix, const std::string& name,\n                  bool latency_in_micros, CommandHandler& handler);\n  void onInvalidRequest(SplitCallbacks& callbacks);\n\n  RouterPtr router_;\n  CommandHandlerFactory<SimpleRequest> simple_command_handler_;\n  CommandHandlerFactory<EvalRequest> eval_command_handler_;\n  CommandHandlerFactory<MGETRequest> mget_handler_;\n  CommandHandlerFactory<MSETRequest> mset_handler_;\n  CommandHandlerFactory<SplitKeysSumResultRequest> split_keys_sum_result_handler_;\n  TrieLookupTable<HandlerDataPtr> handler_lookup_table_;\n  InstanceStats stats_;\n  TimeSource& time_source_;\n  Common::Redis::FaultManagerPtr fault_manager_;\n};\n\n} // namespace CommandSplitter\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/config.cc",
    "content": "#include \"extensions/filters/network/redis_proxy/config.h\"\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\n#include \"extensions/common/redis/cluster_refresh_manager_impl.h\"\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/fault_impl.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter_impl.h\"\n#include \"extensions/filters/network/redis_proxy/proxy_filter.h\"\n#include \"extensions/filters/network/redis_proxy/router_impl.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nnamespace {\ninline void addUniqueClusters(\n    absl::flat_hash_set<std::string>& clusters,\n    const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route&\n        route) {\n  clusters.emplace(route.cluster());\n  for (auto& mirror : route.request_mirror_policy()) {\n    clusters.emplace(mirror.cluster());\n  }\n}\n} // namespace\n\nNetwork::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n\n  ASSERT(!proto_config.stat_prefix().empty());\n  ASSERT(proto_config.has_settings());\n\n  Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager =\n      Extensions::Common::Redis::getClusterRefreshManager(\n          context.singletonManager(), context.dispatcher(), context.clusterManager(),\n          context.timeSource());\n\n  ProxyFilterConfigSharedPtr filter_config(std::make_shared<ProxyFilterConfig>(\n      proto_config, context.scope(), context.drainDecision(), context.runtime(), context.api()));\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes prefix_routes(\n      proto_config.prefix_routes());\n\n  // Set the catch-all route from the deprecated cluster and settings parameters.\n  if (prefix_routes.hidden_envoy_deprecated_catch_all_cluster().empty() &&\n      prefix_routes.routes_size() == 0 && !prefix_routes.has_catch_all_route()) {\n    if (proto_config.hidden_envoy_deprecated_cluster().empty()) {\n      throw EnvoyException(\"cannot configure a redis-proxy without any upstream\");\n    }\n\n    prefix_routes.mutable_catch_all_route()->set_cluster(\n        proto_config.hidden_envoy_deprecated_cluster());\n  } else if (!prefix_routes.hidden_envoy_deprecated_catch_all_cluster().empty() &&\n             !prefix_routes.has_catch_all_route()) {\n    // Set the catch-all route from the deprecated catch-all cluster.\n    prefix_routes.mutable_catch_all_route()->set_cluster(\n        prefix_routes.hidden_envoy_deprecated_catch_all_cluster());\n  }\n\n  absl::flat_hash_set<std::string> unique_clusters;\n  for (auto& route : prefix_routes.routes()) {\n    addUniqueClusters(unique_clusters, route);\n  }\n  addUniqueClusters(unique_clusters, prefix_routes.catch_all_route());\n\n  auto redis_command_stats =\n      Common::Redis::RedisCommandStats::createRedisCommandStats(context.scope().symbolTable());\n\n  Upstreams upstreams;\n  for (auto& cluster : unique_clusters) {\n    Stats::ScopePtr stats_scope =\n        context.scope().createScope(fmt::format(\"cluster.{}.redis_cluster\", cluster));\n    auto conn_pool_ptr = std::make_shared<ConnPool::InstanceImpl>(\n        cluster, context.clusterManager(), Common::Redis::Client::ClientFactoryImpl::instance_,\n        context.threadLocal(), proto_config.settings(), context.api(), std::move(stats_scope),\n        redis_command_stats, refresh_manager);\n    conn_pool_ptr->init();\n    upstreams.emplace(cluster, conn_pool_ptr);\n  }\n\n  auto router =\n      std::make_unique<PrefixRoutes>(prefix_routes, std::move(upstreams), context.runtime());\n\n  auto fault_manager = std::make_unique<Common::Redis::FaultManagerImpl>(\n      context.api().randomGenerator(), context.runtime(), proto_config.faults());\n\n  std::shared_ptr<CommandSplitter::Instance> splitter =\n      std::make_shared<CommandSplitter::InstanceImpl>(\n          std::move(router), context.scope(), filter_config->stat_prefix_, context.timeSource(),\n          proto_config.latency_in_micros(), std::move(fault_manager));\n  return [splitter, filter_config](Network::FilterManager& filter_manager) -> void {\n    Common::Redis::DecoderFactoryImpl factory;\n    filter_manager.addReadFilter(std::make_shared<ProxyFilter>(\n        factory, Common::Redis::EncoderPtr{new Common::Redis::EncoderImpl()}, *splitter,\n        filter_config));\n  };\n}\n\n/**\n * Static registration for the redis filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RedisProxyFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\"envoy.redis_proxy\"};\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/datasource.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nclass ProtocolOptionsConfigImpl : public Upstream::ProtocolOptionsConfig {\npublic:\n  ProtocolOptionsConfigImpl(\n      const envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions&\n          proto_config)\n      : auth_username_(proto_config.auth_username()), auth_password_(proto_config.auth_password()) {\n  }\n\n  std::string authUsername(Api::Api& api) const {\n    return Config::DataSource::read(auth_username_, true, api);\n  }\n\n  std::string authPassword(Api::Api& api) const {\n    return Config::DataSource::read(auth_password_, true, api);\n  }\n\n  static const std::string authUsername(const Upstream::ClusterInfoConstSharedPtr info,\n                                        Api::Api& api) {\n    auto options = info->extensionProtocolOptionsTyped<ProtocolOptionsConfigImpl>(\n        NetworkFilterNames::get().RedisProxy);\n    if (options) {\n      return options->authUsername(api);\n    }\n    return EMPTY_STRING;\n  }\n\n  static const std::string authPassword(const Upstream::ClusterInfoConstSharedPtr info,\n                                        Api::Api& api) {\n    auto options = info->extensionProtocolOptionsTyped<ProtocolOptionsConfigImpl>(\n        NetworkFilterNames::get().RedisProxy);\n    if (options) {\n      return options->authPassword(api);\n    }\n    return EMPTY_STRING;\n  }\n\nprivate:\n  envoy::config::core::v3::DataSource auth_username_;\n  envoy::config::core::v3::DataSource auth_password_;\n};\n\n/**\n * Config registration for the redis proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nclass RedisProxyFilterConfigFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::network::redis_proxy::v3::RedisProxy,\n          envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions> {\npublic:\n  RedisProxyFilterConfigFactory() : FactoryBase(NetworkFilterNames::get().RedisProxy, true) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n\n  Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped(\n      const envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions&\n          proto_config,\n      Server::Configuration::ProtocolOptionsFactoryContext&) override {\n    return std::make_shared<ProtocolOptionsConfigImpl>(proto_config);\n  }\n};\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/conn_pool.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"extensions/filters/network/common/redis/client.h\"\n#include \"extensions/filters/network/common/redis/codec.h\"\n\n#include \"absl/types/variant.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace ConnPool {\n\n/**\n * Outbound request callbacks.\n */\nclass PoolCallbacks {\npublic:\n  virtual ~PoolCallbacks() = default;\n\n  /**\n   * Called when a pipelined response is received.\n   * @param value supplies the response which is now owned by the callee.\n   */\n  virtual void onResponse(Common::Redis::RespValuePtr&& value) PURE;\n\n  /**\n   * Called when a network/protocol error occurs and there is no response.\n   */\n  virtual void onFailure() PURE;\n};\n\n/**\n * A variant that either holds a shared pointer to a single server request or a composite array\n * resp value. This is for performance reason to avoid creating RespValueSharedPtr for each\n * composite arrays.\n */\nusing RespVariant =\n    absl::variant<const Common::Redis::RespValue, Common::Redis::RespValueConstSharedPtr>;\n\n/**\n * A redis connection pool. Wraps M connections to N upstream hosts, consistent hashing,\n * pipelining, failure handling, etc.\n */\nclass Instance {\npublic:\n  virtual ~Instance() = default;\n\n  /**\n   * Makes a redis request.\n   * @param hash_key supplies the key to use for consistent hashing.\n   * @param request supplies the request to make.\n   * @param callbacks supplies the request completion callbacks.\n   * @return PoolRequest* a handle to the active request or nullptr if the request could not be made\n   *         for some reason.\n   */\n  virtual Common::Redis::Client::PoolRequest*\n  makeRequest(const std::string& hash_key, RespVariant&& request, PoolCallbacks& callbacks) PURE;\n};\n\nusing InstanceSharedPtr = std::shared_ptr<Instance>;\n\n} // namespace ConnPool\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/conn_pool_impl.cc",
    "content": "#include \"extensions/filters/network/redis_proxy/conn_pool_impl.h\"\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/stats/utility.h\"\n\n#include \"extensions/filters/network/redis_proxy/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace ConnPool {\nnamespace {\n// null_pool_callbacks is used for requests that must be filtered and not redirected such as\n// \"asking\".\nCommon::Redis::Client::DoNothingPoolCallbacks null_client_callbacks;\n\nconst Common::Redis::RespValue& getRequest(const RespVariant& request) {\n  if (request.index() == 0) {\n    return absl::get<const Common::Redis::RespValue>(request);\n  } else {\n    return *(absl::get<Common::Redis::RespValueConstSharedPtr>(request));\n  }\n}\n} // namespace\n\nInstanceImpl::InstanceImpl(\n    const std::string& cluster_name, Upstream::ClusterManager& cm,\n    Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls,\n    const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings&\n        config,\n    Api::Api& api, Stats::ScopePtr&& stats_scope,\n    const Common::Redis::RedisCommandStatsSharedPtr& redis_command_stats,\n    Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager)\n    : cluster_name_(cluster_name), cm_(cm), client_factory_(client_factory),\n      tls_(tls.allocateSlot()), config_(new Common::Redis::Client::ConfigImpl(config)), api_(api),\n      stats_scope_(std::move(stats_scope)),\n      redis_command_stats_(redis_command_stats), redis_cluster_stats_{REDIS_CLUSTER_STATS(\n                                                     POOL_COUNTER(*stats_scope_))},\n      refresh_manager_(std::move(refresh_manager)) {}\n\nvoid InstanceImpl::init() {\n  // Note: `this` and `cluster_name` have a a lifetime of the filter.\n  // That may be shorter than the tls callback if the listener is torn down shortly after it is\n  // created. We use a weak pointer to make sure this object outlives the tls callbacks.\n  std::weak_ptr<InstanceImpl> this_weak_ptr = this->shared_from_this();\n  tls_->set(\n      [this_weak_ptr](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n        if (auto this_shared_ptr = this_weak_ptr.lock()) {\n          return std::make_shared<ThreadLocalPool>(this_shared_ptr, dispatcher,\n                                                   this_shared_ptr->cluster_name_);\n        }\n        return nullptr;\n      });\n}\n\n// This method is always called from a InstanceSharedPtr we don't have to worry about tls_->getTyped\n// failing due to InstanceImpl going away.\nCommon::Redis::Client::PoolRequest*\nInstanceImpl::makeRequest(const std::string& key, RespVariant&& request, PoolCallbacks& callbacks) {\n  return tls_->getTyped<ThreadLocalPool>().makeRequest(key, std::move(request), callbacks);\n}\n\n// This method is always called from a InstanceSharedPtr we don't have to worry about tls_->getTyped\n// failing due to InstanceImpl going away.\nCommon::Redis::Client::PoolRequest*\nInstanceImpl::makeRequestToHost(const std::string& host_address,\n                                const Common::Redis::RespValue& request,\n                                Common::Redis::Client::ClientCallbacks& callbacks) {\n  return tls_->getTyped<ThreadLocalPool>().makeRequestToHost(host_address, request, callbacks);\n}\n\nInstanceImpl::ThreadLocalPool::ThreadLocalPool(std::shared_ptr<InstanceImpl> parent,\n                                               Event::Dispatcher& dispatcher,\n                                               std::string cluster_name)\n    : parent_(parent), dispatcher_(dispatcher), cluster_name_(std::move(cluster_name)),\n      drain_timer_(dispatcher.createTimer([this]() -> void { drainClients(); })),\n      is_redis_cluster_(false), client_factory_(parent->client_factory_), config_(parent->config_),\n      stats_scope_(parent->stats_scope_), redis_command_stats_(parent->redis_command_stats_),\n      redis_cluster_stats_(parent->redis_cluster_stats_),\n      refresh_manager_(parent->refresh_manager_) {\n  cluster_update_handle_ = parent->cm_.addThreadLocalClusterUpdateCallbacks(*this);\n  Upstream::ThreadLocalCluster* cluster = parent->cm_.get(cluster_name_);\n  if (cluster != nullptr) {\n    auth_username_ = ProtocolOptionsConfigImpl::authUsername(cluster->info(), parent->api_);\n    auth_password_ = ProtocolOptionsConfigImpl::authPassword(cluster->info(), parent->api_);\n    onClusterAddOrUpdateNonVirtual(*cluster);\n  }\n}\n\nInstanceImpl::ThreadLocalPool::~ThreadLocalPool() {\n  if (host_set_member_update_cb_handle_ != nullptr) {\n    host_set_member_update_cb_handle_->remove();\n  }\n  while (!pending_requests_.empty()) {\n    pending_requests_.pop_front();\n  }\n  while (!client_map_.empty()) {\n    client_map_.begin()->second->redis_client_->close();\n  }\n  while (!clients_to_drain_.empty()) {\n    (*clients_to_drain_.begin())->redis_client_->close();\n  }\n}\n\nvoid InstanceImpl::ThreadLocalPool::onClusterAddOrUpdateNonVirtual(\n    Upstream::ThreadLocalCluster& cluster) {\n  if (cluster.info()->name() != cluster_name_) {\n    return;\n  }\n  // Ensure the filter is not deleted in the main thread during this method.\n  auto shared_parent = parent_.lock();\n  if (!shared_parent) {\n    return;\n  }\n\n  if (cluster_ != nullptr) {\n    // Treat an update as a removal followed by an add.\n    ThreadLocalPool::onClusterRemoval(cluster_name_);\n  }\n\n  ASSERT(cluster_ == nullptr);\n  cluster_ = &cluster;\n  ASSERT(host_set_member_update_cb_handle_ == nullptr);\n  host_set_member_update_cb_handle_ = cluster_->prioritySet().addMemberUpdateCb(\n      [this](const std::vector<Upstream::HostSharedPtr>& hosts_added,\n             const std::vector<Upstream::HostSharedPtr>& hosts_removed) -> void {\n        onHostsAdded(hosts_added);\n        onHostsRemoved(hosts_removed);\n      });\n\n  ASSERT(host_address_map_.empty());\n  for (const auto& i : cluster_->prioritySet().hostSetsPerPriority()) {\n    for (auto& host : i->hosts()) {\n      host_address_map_[host->address()->asString()] = host;\n    }\n  }\n\n  // Figure out if the cluster associated with this ConnPool is a Redis cluster\n  // with its own hash slot sharding scheme and ability to dynamically discover\n  // its members. This is done once to minimize overhead in the data path, makeRequest() in\n  // particular.\n  Upstream::ClusterInfoConstSharedPtr info = cluster_->info();\n  const auto& cluster_type = info->clusterType();\n  is_redis_cluster_ = info->lbType() == Upstream::LoadBalancerType::ClusterProvided &&\n                      cluster_type.has_value() &&\n                      cluster_type->name() == Extensions::Clusters::ClusterTypes::get().Redis;\n}\n\nvoid InstanceImpl::ThreadLocalPool::onClusterRemoval(const std::string& cluster_name) {\n  if (cluster_name != cluster_name_) {\n    return;\n  }\n\n  // Treat cluster removal as a removal of all hosts. Close all connections and fail all pending\n  // requests.\n  if (host_set_member_update_cb_handle_ != nullptr) {\n    host_set_member_update_cb_handle_->remove();\n    host_set_member_update_cb_handle_ = nullptr;\n  }\n  while (!client_map_.empty()) {\n    client_map_.begin()->second->redis_client_->close();\n  }\n  while (!clients_to_drain_.empty()) {\n    (*clients_to_drain_.begin())->redis_client_->close();\n  }\n\n  cluster_ = nullptr;\n  host_address_map_.clear();\n}\n\nvoid InstanceImpl::ThreadLocalPool::onHostsAdded(\n    const std::vector<Upstream::HostSharedPtr>& hosts_added) {\n  for (const auto& host : hosts_added) {\n    std::string host_address = host->address()->asString();\n    // Insert new host into address map, possibly overwriting a previous host's entry.\n    host_address_map_[host_address] = host;\n    for (const auto& created_host : created_via_redirect_hosts_) {\n      if (created_host->address()->asString() == host_address) {\n        // Remove our \"temporary\" host created in makeRequestToHost().\n        onHostsRemoved({created_host});\n        created_via_redirect_hosts_.remove(created_host);\n        break;\n      }\n    }\n  }\n}\n\nvoid InstanceImpl::ThreadLocalPool::onHostsRemoved(\n    const std::vector<Upstream::HostSharedPtr>& hosts_removed) {\n  for (const auto& host : hosts_removed) {\n    auto it = client_map_.find(host);\n    if (it != client_map_.end()) {\n      if (it->second->redis_client_->active()) {\n        // Put the ThreadLocalActiveClient to the side to drain.\n        clients_to_drain_.push_back(std::move(it->second));\n        client_map_.erase(it);\n        if (!drain_timer_->enabled()) {\n          drain_timer_->enableTimer(std::chrono::seconds(1));\n        }\n      } else {\n        // There are no pending requests so close the connection.\n        it->second->redis_client_->close();\n      }\n    }\n    // There is the possibility that multiple hosts with the same address\n    // are registered in host_address_map_ given that hosts may be created\n    // upon redirection or supplied as part of the cluster's definition.\n    auto it2 = host_address_map_.find(host->address()->asString());\n    if ((it2 != host_address_map_.end()) && (it2->second == host)) {\n      host_address_map_.erase(it2);\n    }\n  }\n}\n\nvoid InstanceImpl::ThreadLocalPool::drainClients() {\n  while (!clients_to_drain_.empty() && !(*clients_to_drain_.begin())->redis_client_->active()) {\n    (*clients_to_drain_.begin())->redis_client_->close();\n  }\n  if (!clients_to_drain_.empty()) {\n    drain_timer_->enableTimer(std::chrono::seconds(1));\n  }\n}\n\nInstanceImpl::ThreadLocalActiveClientPtr&\nInstanceImpl::ThreadLocalPool::threadLocalActiveClient(Upstream::HostConstSharedPtr host) {\n  ThreadLocalActiveClientPtr& client = client_map_[host];\n  if (!client) {\n    client = std::make_unique<ThreadLocalActiveClient>(*this);\n    client->host_ = host;\n    client->redis_client_ =\n        client_factory_.create(host, dispatcher_, *config_, redis_command_stats_, *(stats_scope_),\n                               auth_username_, auth_password_);\n    client->redis_client_->addConnectionCallbacks(*client);\n  }\n  return client;\n}\n\nCommon::Redis::Client::PoolRequest*\nInstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, RespVariant&& request,\n                                           PoolCallbacks& callbacks) {\n  if (cluster_ == nullptr) {\n    ASSERT(client_map_.empty());\n    ASSERT(host_set_member_update_cb_handle_ == nullptr);\n    return nullptr;\n  }\n\n  Clusters::Redis::RedisLoadBalancerContextImpl lb_context(key, config_->enableHashtagging(),\n                                                           is_redis_cluster_, getRequest(request),\n                                                           config_->readPolicy());\n  Upstream::HostConstSharedPtr host = cluster_->loadBalancer().chooseHost(&lb_context);\n  if (!host) {\n    return nullptr;\n  }\n  pending_requests_.emplace_back(*this, std::move(request), callbacks);\n  PendingRequest& pending_request = pending_requests_.back();\n  ThreadLocalActiveClientPtr& client = this->threadLocalActiveClient(host);\n  pending_request.request_handler_ = client->redis_client_->makeRequest(\n      getRequest(pending_request.incoming_request_), pending_request);\n  if (pending_request.request_handler_) {\n    return &pending_request;\n  } else {\n    onRequestCompleted();\n    return nullptr;\n  }\n}\n\nCommon::Redis::Client::PoolRequest* InstanceImpl::ThreadLocalPool::makeRequestToHost(\n    const std::string& host_address, const Common::Redis::RespValue& request,\n    Common::Redis::Client::ClientCallbacks& callbacks) {\n  if (cluster_ == nullptr) {\n    ASSERT(client_map_.empty());\n    ASSERT(host_set_member_update_cb_handle_ == nullptr);\n    return nullptr;\n  }\n\n  auto colon_pos = host_address.rfind(':');\n  if ((colon_pos == std::string::npos) || (colon_pos == (host_address.size() - 1))) {\n    return nullptr;\n  }\n\n  const std::string ip_address = host_address.substr(0, colon_pos);\n  const bool ipv6 = (ip_address.find(':') != std::string::npos);\n  std::string host_address_map_key;\n  Network::Address::InstanceConstSharedPtr address_ptr;\n\n  if (!ipv6) {\n    host_address_map_key = host_address;\n  } else {\n    const auto ip_port = absl::string_view(host_address).substr(colon_pos + 1);\n    uint32_t ip_port_number;\n    if (!absl::SimpleAtoi(ip_port, &ip_port_number) || (ip_port_number > 65535)) {\n      return nullptr;\n    }\n    try {\n      address_ptr = std::make_shared<Network::Address::Ipv6Instance>(ip_address, ip_port_number);\n    } catch (const EnvoyException&) {\n      return nullptr;\n    }\n    host_address_map_key = address_ptr->asString();\n  }\n\n  auto it = host_address_map_.find(host_address_map_key);\n  if (it == host_address_map_.end()) {\n    // This host is not known to the cluster manager. Create a new host and insert it into the map.\n    if (created_via_redirect_hosts_.size() == config_->maxUpstreamUnknownConnections()) {\n      // Too many upstream connections to unknown hosts have been created.\n      redis_cluster_stats_.max_upstream_unknown_connections_reached_.inc();\n      return nullptr;\n    }\n    if (!ipv6) {\n      // Only create an IPv4 address instance if we need a new Upstream::HostImpl.\n      const auto ip_port = absl::string_view(host_address).substr(colon_pos + 1);\n      uint32_t ip_port_number;\n      if (!absl::SimpleAtoi(ip_port, &ip_port_number) || (ip_port_number > 65535)) {\n        return nullptr;\n      }\n      try {\n        address_ptr = std::make_shared<Network::Address::Ipv4Instance>(ip_address, ip_port_number);\n      } catch (const EnvoyException&) {\n        return nullptr;\n      }\n    }\n    Upstream::HostSharedPtr new_host{new Upstream::HostImpl(\n        cluster_->info(), \"\", address_ptr, nullptr, 1, envoy::config::core::v3::Locality(),\n        envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0,\n        envoy::config::core::v3::UNKNOWN)};\n    host_address_map_[host_address_map_key] = new_host;\n    created_via_redirect_hosts_.push_back(new_host);\n    it = host_address_map_.find(host_address_map_key);\n  }\n\n  ThreadLocalActiveClientPtr& client = threadLocalActiveClient(it->second);\n\n  return client->redis_client_->makeRequest(request, callbacks);\n}\n\nvoid InstanceImpl::ThreadLocalPool::onRequestCompleted() {\n  ASSERT(!pending_requests_.empty());\n\n  // The response we got might not be in order, so flush out what we can. (A new response may\n  // unlock several out of order responses).\n  while (!pending_requests_.empty() && !pending_requests_.front().request_handler_) {\n    pending_requests_.pop_front();\n  }\n}\n\nvoid InstanceImpl::ThreadLocalActiveClient::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    auto client_to_delete = parent_.client_map_.find(host_);\n    if (client_to_delete != parent_.client_map_.end()) {\n      parent_.dispatcher_.deferredDelete(std::move(redis_client_));\n      parent_.client_map_.erase(client_to_delete);\n    } else {\n      for (auto it = parent_.clients_to_drain_.begin(); it != parent_.clients_to_drain_.end();\n           it++) {\n        if ((*it).get() == this) {\n          if (!redis_client_->active()) {\n            parent_.redis_cluster_stats_.upstream_cx_drained_.inc();\n          }\n          parent_.dispatcher_.deferredDelete(std::move(redis_client_));\n          parent_.clients_to_drain_.erase(it);\n          break;\n        }\n      }\n    }\n  }\n}\n\nInstanceImpl::PendingRequest::PendingRequest(InstanceImpl::ThreadLocalPool& parent,\n                                             RespVariant&& incoming_request,\n                                             PoolCallbacks& pool_callbacks)\n    : parent_(parent), incoming_request_(std::move(incoming_request)),\n      pool_callbacks_(pool_callbacks) {}\n\nInstanceImpl::PendingRequest::~PendingRequest() {\n  if (request_handler_) {\n    request_handler_->cancel();\n    request_handler_ = nullptr;\n    // If we have to cancel the request on the client, then we'll treat this as failure for pool\n    // callback\n    pool_callbacks_.onFailure();\n  }\n}\n\nvoid InstanceImpl::PendingRequest::onResponse(Common::Redis::RespValuePtr&& response) {\n  request_handler_ = nullptr;\n  pool_callbacks_.onResponse(std::move(response));\n  parent_.onRequestCompleted();\n}\n\nvoid InstanceImpl::PendingRequest::onFailure() {\n  request_handler_ = nullptr;\n  pool_callbacks_.onFailure();\n  parent_.refresh_manager_->onFailure(parent_.cluster_name_);\n  parent_.onRequestCompleted();\n}\n\nbool InstanceImpl::PendingRequest::onRedirection(Common::Redis::RespValuePtr&& value,\n                                                 const std::string& host_address,\n                                                 bool ask_redirection) {\n  // Prepend request with an asking command if redirected via an ASK error. The returned handle is\n  // not important since there is no point in being able to cancel the request. The use of\n  // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the\n  // \"asking\" command; this is fine since the server either responds with an OK or an error message\n  // if cluster support is not enabled (in which case we should not get an ASK redirection error).\n  if (ask_redirection &&\n      !parent_.makeRequestToHost(host_address, Common::Redis::Utility::AskingRequest::instance(),\n                                 null_client_callbacks)) {\n    onResponse(std::move(value));\n    return false;\n  }\n  request_handler_ = parent_.makeRequestToHost(host_address, getRequest(incoming_request_), *this);\n  if (!request_handler_) {\n    onResponse(std::move(value));\n    return false;\n  } else {\n    parent_.refresh_manager_->onRedirection(parent_.cluster_name_);\n    return true;\n  }\n}\n\nvoid InstanceImpl::PendingRequest::cancel() {\n  request_handler_->cancel();\n  request_handler_ = nullptr;\n  parent_.onRequestCompleted();\n}\n\n} // namespace ConnPool\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/conn_pool_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/filter_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"source/extensions/clusters/redis/redis_cluster_lb.h\"\n\n#include \"extensions/common/redis/cluster_refresh_manager.h\"\n#include \"extensions/filters/network/common/redis/client.h\"\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/codec_impl.h\"\n#include \"extensions/filters/network/common/redis/utility.h\"\n#include \"extensions/filters/network/redis_proxy/conn_pool.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace ConnPool {\n\n// TODO(mattklein123): Circuit breaking\n// TODO(rshriram): Fault injection\n\n#define REDIS_CLUSTER_STATS(COUNTER)                                                               \\\n  COUNTER(upstream_cx_drained)                                                                     \\\n  COUNTER(max_upstream_unknown_connections_reached)\n\nstruct RedisClusterStats {\n  REDIS_CLUSTER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass DoNothingPoolCallbacks : public PoolCallbacks {\npublic:\n  void onResponse(Common::Redis::RespValuePtr&&) override{};\n  void onFailure() override{};\n};\n\nclass InstanceImpl : public Instance, public std::enable_shared_from_this<InstanceImpl> {\npublic:\n  InstanceImpl(\n      const std::string& cluster_name, Upstream::ClusterManager& cm,\n      Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls,\n      const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings&\n          config,\n      Api::Api& api, Stats::ScopePtr&& stats_scope,\n      const Common::Redis::RedisCommandStatsSharedPtr& redis_command_stats,\n      Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager);\n  // RedisProxy::ConnPool::Instance\n  Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, RespVariant&& request,\n                                                  PoolCallbacks& callbacks) override;\n  /**\n   * Makes a redis request based on IP address and TCP port of the upstream host (e.g.,\n   * moved/ask cluster redirection). This is now only kept mostly for testing.\n   * @param host_address supplies the IP address and TCP port of the upstream host to receive\n   * the request.\n   * @param request supplies the Redis request to make.\n   * @param callbacks supplies the request completion callbacks.\n   * @return PoolRequest* a handle to the active request or nullptr if the request could not be\n   * made for some reason.\n   */\n  Common::Redis::Client::PoolRequest*\n  makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request,\n                    Common::Redis::Client::ClientCallbacks& callbacks);\n\n  void init();\n\n  // Allow the unit test to have access to private members.\n  friend class RedisConnPoolImplTest;\n\nprivate:\n  struct ThreadLocalPool;\n\n  struct ThreadLocalActiveClient : public Network::ConnectionCallbacks {\n    ThreadLocalActiveClient(ThreadLocalPool& parent) : parent_(parent) {}\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    ThreadLocalPool& parent_;\n    Upstream::HostConstSharedPtr host_;\n    Common::Redis::Client::ClientPtr redis_client_;\n  };\n\n  using ThreadLocalActiveClientPtr = std::unique_ptr<ThreadLocalActiveClient>;\n\n  struct PendingRequest : public Common::Redis::Client::ClientCallbacks,\n                          public Common::Redis::Client::PoolRequest {\n    PendingRequest(ThreadLocalPool& parent, RespVariant&& incoming_request,\n                   PoolCallbacks& pool_callbacks);\n    ~PendingRequest() override;\n\n    // Common::Redis::Client::ClientCallbacks\n    void onResponse(Common::Redis::RespValuePtr&& response) override;\n    void onFailure() override;\n    bool onRedirection(Common::Redis::RespValuePtr&& value, const std::string& host_address,\n                       bool ask_redirection) override;\n\n    // PoolRequest\n    void cancel() override;\n\n    ThreadLocalPool& parent_;\n    const RespVariant incoming_request_;\n    Common::Redis::Client::PoolRequest* request_handler_;\n    PoolCallbacks& pool_callbacks_;\n  };\n\n  struct ThreadLocalPool : public ThreadLocal::ThreadLocalObject,\n                           public Upstream::ClusterUpdateCallbacks {\n    ThreadLocalPool(std::shared_ptr<InstanceImpl> parent, Event::Dispatcher& dispatcher,\n                    std::string cluster_name);\n    ~ThreadLocalPool() override;\n    ThreadLocalActiveClientPtr& threadLocalActiveClient(Upstream::HostConstSharedPtr host);\n    Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, RespVariant&& request,\n                                                    PoolCallbacks& callbacks);\n    Common::Redis::Client::PoolRequest*\n    makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request,\n                      Common::Redis::Client::ClientCallbacks& callbacks);\n\n    void onClusterAddOrUpdateNonVirtual(Upstream::ThreadLocalCluster& cluster);\n    void onHostsAdded(const std::vector<Upstream::HostSharedPtr>& hosts_added);\n    void onHostsRemoved(const std::vector<Upstream::HostSharedPtr>& hosts_removed);\n    void drainClients();\n\n    // Upstream::ClusterUpdateCallbacks\n    void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) override {\n      onClusterAddOrUpdateNonVirtual(cluster);\n    }\n    void onClusterRemoval(const std::string& cluster_name) override;\n\n    void onRequestCompleted();\n\n    std::weak_ptr<InstanceImpl> parent_;\n    Event::Dispatcher& dispatcher_;\n    const std::string cluster_name_;\n    Upstream::ClusterUpdateCallbacksHandlePtr cluster_update_handle_;\n    Upstream::ThreadLocalCluster* cluster_{};\n    absl::node_hash_map<Upstream::HostConstSharedPtr, ThreadLocalActiveClientPtr> client_map_;\n    Envoy::Common::CallbackHandle* host_set_member_update_cb_handle_{};\n    absl::node_hash_map<std::string, Upstream::HostConstSharedPtr> host_address_map_;\n    std::string auth_username_;\n    std::string auth_password_;\n    std::list<Upstream::HostSharedPtr> created_via_redirect_hosts_;\n    std::list<ThreadLocalActiveClientPtr> clients_to_drain_;\n    std::list<PendingRequest> pending_requests_;\n\n    /* This timer is used to poll the active clients in clients_to_drain_ to determine whether they\n     * have been drained (have no active requests) or not. It is only enabled after a client has\n     * been added to clients_to_drain_, and is only re-enabled as long as that list is not empty. A\n     * timer is being used as opposed to using a callback to avoid adding a check of\n     * clients_to_drain_ to the main data code path as this should only rarely be not empty.\n     */\n    Event::TimerPtr drain_timer_;\n    bool is_redis_cluster_;\n    Common::Redis::Client::ClientFactory& client_factory_;\n    Common::Redis::Client::ConfigSharedPtr config_;\n    Stats::ScopeSharedPtr stats_scope_;\n    Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_;\n    RedisClusterStats redis_cluster_stats_;\n    const Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_;\n  };\n\n  const std::string cluster_name_;\n  Upstream::ClusterManager& cm_;\n  Common::Redis::Client::ClientFactory& client_factory_;\n  ThreadLocal::SlotPtr tls_;\n  Common::Redis::Client::ConfigSharedPtr config_;\n  Api::Api& api_;\n  Stats::ScopeSharedPtr stats_scope_;\n  Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_;\n  RedisClusterStats redis_cluster_stats_;\n  const Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_;\n};\n\n} // namespace ConnPool\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/proxy_filter.cc",
    "content": "#include \"extensions/filters/network/redis_proxy/proxy_filter.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/datasource.h\"\n#include \"common/config/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nProxyFilterConfig::ProxyFilterConfig(\n    const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy& config,\n    Stats::Scope& scope, const Network::DrainDecision& drain_decision, Runtime::Loader& runtime,\n    Api::Api& api)\n    : drain_decision_(drain_decision), runtime_(runtime),\n      stat_prefix_(fmt::format(\"redis.{}.\", config.stat_prefix())),\n      stats_(generateStats(stat_prefix_, scope)),\n      downstream_auth_username_(\n          Config::DataSource::read(config.downstream_auth_username(), true, api)),\n      downstream_auth_password_(\n          Config::DataSource::read(config.downstream_auth_password(), true, api)) {}\n\nProxyStats ProxyFilterConfig::generateStats(const std::string& prefix, Stats::Scope& scope) {\n  return {\n      ALL_REDIS_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix), POOL_GAUGE_PREFIX(scope, prefix))};\n}\n\nProxyFilter::ProxyFilter(Common::Redis::DecoderFactory& factory,\n                         Common::Redis::EncoderPtr&& encoder, CommandSplitter::Instance& splitter,\n                         ProxyFilterConfigSharedPtr config)\n    : decoder_(factory.create(*this)), encoder_(std::move(encoder)), splitter_(splitter),\n      config_(config) {\n  config_->stats_.downstream_cx_total_.inc();\n  config_->stats_.downstream_cx_active_.inc();\n  connection_allowed_ =\n      config_->downstream_auth_username_.empty() && config_->downstream_auth_password_.empty();\n}\n\nProxyFilter::~ProxyFilter() {\n  ASSERT(pending_requests_.empty());\n  config_->stats_.downstream_cx_active_.dec();\n}\n\nvoid ProxyFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n  callbacks_->connection().addConnectionCallbacks(*this);\n  callbacks_->connection().setConnectionStats({config_->stats_.downstream_cx_rx_bytes_total_,\n                                               config_->stats_.downstream_cx_rx_bytes_buffered_,\n                                               config_->stats_.downstream_cx_tx_bytes_total_,\n                                               config_->stats_.downstream_cx_tx_bytes_buffered_,\n                                               nullptr, nullptr});\n}\n\nvoid ProxyFilter::onRespValue(Common::Redis::RespValuePtr&& value) {\n  pending_requests_.emplace_back(*this);\n  PendingRequest& request = pending_requests_.back();\n  CommandSplitter::SplitRequestPtr split =\n      splitter_.makeRequest(std::move(value), request, callbacks_->connection().dispatcher());\n  if (split) {\n    // The splitter can immediately respond and destroy the pending request. Only store the handle\n    // if the request is still alive.\n    request.request_handle_ = std::move(split);\n  }\n}\n\nvoid ProxyFilter::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    while (!pending_requests_.empty()) {\n      if (pending_requests_.front().request_handle_ != nullptr) {\n        pending_requests_.front().request_handle_->cancel();\n      }\n      pending_requests_.pop_front();\n    }\n  }\n}\n\nvoid ProxyFilter::onAuth(PendingRequest& request, const std::string& password) {\n  Common::Redis::RespValuePtr response{new Common::Redis::RespValue()};\n  if (config_->downstream_auth_password_.empty()) {\n    response->type(Common::Redis::RespType::Error);\n    response->asString() = \"ERR Client sent AUTH, but no password is set\";\n  } else if (password == config_->downstream_auth_password_) {\n    response->type(Common::Redis::RespType::SimpleString);\n    response->asString() = \"OK\";\n    connection_allowed_ = true;\n  } else {\n    response->type(Common::Redis::RespType::Error);\n    response->asString() = \"ERR invalid password\";\n    connection_allowed_ = false;\n  }\n  request.onResponse(std::move(response));\n}\n\nvoid ProxyFilter::onAuth(PendingRequest& request, const std::string& username,\n                         const std::string& password) {\n  Common::Redis::RespValuePtr response{new Common::Redis::RespValue()};\n  if (config_->downstream_auth_username_.empty() && config_->downstream_auth_password_.empty()) {\n    response->type(Common::Redis::RespType::Error);\n    response->asString() = \"ERR Client sent AUTH, but no username-password pair is set\";\n  } else if (config_->downstream_auth_username_.empty() && username == \"default\" &&\n             password == config_->downstream_auth_password_) {\n    // empty username and \"default\" are synonymous in Redis 6 ACLs\n    response->type(Common::Redis::RespType::SimpleString);\n    response->asString() = \"OK\";\n    connection_allowed_ = true;\n  } else if (username == config_->downstream_auth_username_ &&\n             password == config_->downstream_auth_password_) {\n    response->type(Common::Redis::RespType::SimpleString);\n    response->asString() = \"OK\";\n    connection_allowed_ = true;\n  } else {\n    response->type(Common::Redis::RespType::Error);\n    response->asString() = \"WRONGPASS invalid username-password pair\";\n    connection_allowed_ = false;\n  }\n  request.onResponse(std::move(response));\n}\n\nvoid ProxyFilter::onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value) {\n  ASSERT(!pending_requests_.empty());\n  request.pending_response_ = std::move(value);\n  request.request_handle_ = nullptr;\n\n  // The response we got might not be in order, so flush out what we can. (A new response may\n  // unlock several out of order responses).\n  while (!pending_requests_.empty() && pending_requests_.front().pending_response_) {\n    encoder_->encode(*pending_requests_.front().pending_response_, encoder_buffer_);\n    pending_requests_.pop_front();\n  }\n\n  if (encoder_buffer_.length() > 0) {\n    callbacks_->connection().write(encoder_buffer_, false);\n  }\n\n  // Check for drain close only if there are no pending responses.\n  if (pending_requests_.empty() && config_->drain_decision_.drainClose() &&\n      config_->runtime_.snapshot().featureEnabled(config_->redis_drain_close_runtime_key_, 100)) {\n    config_->stats_.downstream_cx_drain_close_.inc();\n    callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n}\n\nNetwork::FilterStatus ProxyFilter::onData(Buffer::Instance& data, bool) {\n  try {\n    decoder_->decode(data);\n    return Network::FilterStatus::Continue;\n  } catch (Common::Redis::ProtocolError&) {\n    config_->stats_.downstream_cx_protocol_error_.inc();\n    Common::Redis::RespValue error;\n    error.type(Common::Redis::RespType::Error);\n    error.asString() = \"downstream protocol error\";\n    encoder_->encode(error, encoder_buffer_);\n    callbacks_->connection().write(encoder_buffer_, false);\n    callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    return Network::FilterStatus::StopIteration;\n  }\n}\n\nProxyFilter::PendingRequest::PendingRequest(ProxyFilter& parent) : parent_(parent) {\n  parent.config_->stats_.downstream_rq_total_.inc();\n  parent.config_->stats_.downstream_rq_active_.inc();\n}\n\nProxyFilter::PendingRequest::~PendingRequest() {\n  parent_.config_->stats_.downstream_rq_active_.dec();\n}\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/proxy_filter.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/common/redis/codec.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\n/**\n * All redis proxy stats. @see stats_macros.h\n */\n#define ALL_REDIS_PROXY_STATS(COUNTER, GAUGE)                                                      \\\n  COUNTER(downstream_cx_drain_close)                                                               \\\n  COUNTER(downstream_cx_protocol_error)                                                            \\\n  COUNTER(downstream_cx_rx_bytes_total)                                                            \\\n  COUNTER(downstream_cx_total)                                                                     \\\n  COUNTER(downstream_cx_tx_bytes_total)                                                            \\\n  COUNTER(downstream_rq_total)                                                                     \\\n  GAUGE(downstream_cx_active, Accumulate)                                                          \\\n  GAUGE(downstream_cx_rx_bytes_buffered, Accumulate)                                               \\\n  GAUGE(downstream_cx_tx_bytes_buffered, Accumulate)                                               \\\n  GAUGE(downstream_rq_active, Accumulate)\n\n/**\n * Struct definition for all redis proxy stats. @see stats_macros.h\n */\nstruct ProxyStats {\n  ALL_REDIS_PROXY_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Configuration for the redis proxy filter.\n */\nclass ProxyFilterConfig {\npublic:\n  ProxyFilterConfig(const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy& config,\n                    Stats::Scope& scope, const Network::DrainDecision& drain_decision,\n                    Runtime::Loader& runtime, Api::Api& api);\n\n  const Network::DrainDecision& drain_decision_;\n  Runtime::Loader& runtime_;\n  const std::string stat_prefix_;\n  const std::string redis_drain_close_runtime_key_{\"redis.drain_close_enabled\"};\n  ProxyStats stats_;\n  const std::string downstream_auth_username_;\n  const std::string downstream_auth_password_;\n\nprivate:\n  static ProxyStats generateStats(const std::string& prefix, Stats::Scope& scope);\n};\n\nusing ProxyFilterConfigSharedPtr = std::shared_ptr<ProxyFilterConfig>;\n\n/**\n * A redis multiplexing proxy filter. This filter will take incoming redis pipelined commands, and\n * multiplex them onto a consistently hashed connection pool of backend servers.\n */\nclass ProxyFilter : public Network::ReadFilter,\n                    public Common::Redis::DecoderCallbacks,\n                    public Network::ConnectionCallbacks {\npublic:\n  ProxyFilter(Common::Redis::DecoderFactory& factory, Common::Redis::EncoderPtr&& encoder,\n              CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config);\n  ~ProxyFilter() override;\n\n  // Network::ReadFilter\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  // Common::Redis::DecoderCallbacks\n  void onRespValue(Common::Redis::RespValuePtr&& value) override;\n\n  bool connectionAllowed() { return connection_allowed_; }\n\nprivate:\n  friend class RedisProxyFilterTest;\n\n  struct PendingRequest : public CommandSplitter::SplitCallbacks {\n    PendingRequest(ProxyFilter& parent);\n    ~PendingRequest() override;\n\n    // RedisProxy::CommandSplitter::SplitCallbacks\n    bool connectionAllowed() override { return parent_.connectionAllowed(); }\n    void onAuth(const std::string& password) override { parent_.onAuth(*this, password); }\n    void onAuth(const std::string& username, const std::string& password) override {\n      parent_.onAuth(*this, username, password);\n    }\n    void onResponse(Common::Redis::RespValuePtr&& value) override {\n      parent_.onResponse(*this, std::move(value));\n    }\n\n    ProxyFilter& parent_;\n    Common::Redis::RespValuePtr pending_response_;\n    CommandSplitter::SplitRequestPtr request_handle_;\n  };\n\n  void onAuth(PendingRequest& request, const std::string& password);\n  void onAuth(PendingRequest& request, const std::string& username, const std::string& password);\n  void onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value);\n\n  Common::Redis::DecoderPtr decoder_;\n  Common::Redis::EncoderPtr encoder_;\n  CommandSplitter::Instance& splitter_;\n  ProxyFilterConfigSharedPtr config_;\n  Buffer::OwnedImpl encoder_buffer_;\n  Network::ReadFilterCallbacks* callbacks_{};\n  std::list<PendingRequest> pending_requests_;\n  bool connection_allowed_;\n};\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/router.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\n#include \"extensions/filters/network/redis_proxy/conn_pool.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\n/**\n * Per route policy for request mirroring.\n */\nclass MirrorPolicy {\npublic:\n  virtual ~MirrorPolicy() = default;\n\n  /**\n   * @return the upstream connection pool that a matching request should be mirrored to. Returns\n   * null if no mirroring should take place.\n   */\n  virtual ConnPool::InstanceSharedPtr upstream() const PURE;\n\n  /**\n   * Determine whether a request should be mirrored.\n   * @param command the redis command being requested\n   * @return TRUE if mirroring should take place.\n   */\n  virtual bool shouldMirror(const std::string& command) const PURE;\n};\n\nusing MirrorPolicyConstSharedPtr = std::shared_ptr<const MirrorPolicy>;\n\nusing MirrorPolicies = std::vector<MirrorPolicyConstSharedPtr>;\n\n/**\n * An resolved route that wraps an upstream connection pool and list of mirror policies\n */\nclass Route {\npublic:\n  virtual ~Route() = default;\n\n  virtual ConnPool::InstanceSharedPtr upstream() const PURE;\n\n  virtual const MirrorPolicies& mirrorPolicies() const PURE;\n};\n\nusing RouteSharedPtr = std::shared_ptr<Route>;\n\n/*\n * Decorator of a connection pool in order to enable key based routing.\n */\nclass Router {\npublic:\n  virtual ~Router() = default;\n\n  /**\n   * Returns a connection pool that matches a given route. When no match is found, the catch all\n   * pool is used. When remove prefix is set to true, the prefix will be removed from the key.\n   * @param key mutable reference to the key of the current command.\n   * @return a handle to the connection pool.\n   */\n  virtual RouteSharedPtr upstreamPool(std::string& key) PURE;\n};\n\nusing RouterPtr = std::unique_ptr<Router>;\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/router_impl.cc",
    "content": "#include \"extensions/filters/network/redis_proxy/router_impl.h\"\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nMirrorPolicyImpl::MirrorPolicyImpl(const envoy::extensions::filters::network::redis_proxy::v3::\n                                       RedisProxy::PrefixRoutes::Route::RequestMirrorPolicy& config,\n                                   const ConnPool::InstanceSharedPtr upstream,\n                                   Runtime::Loader& runtime)\n    : runtime_key_(config.runtime_fraction().runtime_key()),\n      default_value_(config.has_runtime_fraction()\n                         ? absl::optional<envoy::type::v3::FractionalPercent>(\n                               config.runtime_fraction().default_value())\n                         : absl::nullopt),\n      exclude_read_commands_(config.exclude_read_commands()), upstream_(upstream),\n      runtime_(runtime) {}\n\nbool MirrorPolicyImpl::shouldMirror(const std::string& command) const {\n  if (!upstream_) {\n    return false;\n  }\n\n  std::string to_lower_string = absl::AsciiStrToLower(command);\n\n  if (exclude_read_commands_ && Common::Redis::SupportedCommands::isReadCommand(to_lower_string)) {\n    return false;\n  }\n\n  if (default_value_.has_value()) {\n    return runtime_.snapshot().featureEnabled(runtime_key_, default_value_.value());\n  }\n\n  return true;\n}\n\nPrefix::Prefix(\n    const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route\n        route,\n    Upstreams& upstreams, Runtime::Loader& runtime)\n    : prefix_(route.prefix()), remove_prefix_(route.remove_prefix()),\n      upstream_(upstreams.at(route.cluster())) {\n  for (auto const& mirror_policy : route.request_mirror_policy()) {\n    mirror_policies_.emplace_back(std::make_shared<MirrorPolicyImpl>(\n        mirror_policy, upstreams.at(mirror_policy.cluster()), runtime));\n  }\n}\n\nPrefixRoutes::PrefixRoutes(\n    const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes& config,\n    Upstreams&& upstreams, Runtime::Loader& runtime)\n    : case_insensitive_(config.case_insensitive()), upstreams_(std::move(upstreams)),\n      catch_all_route_(config.has_catch_all_route()\n                           ? std::make_shared<Prefix>(config.catch_all_route(), upstreams_, runtime)\n                           : nullptr) {\n\n  for (auto const& route : config.routes()) {\n    std::string copy(route.prefix());\n\n    if (case_insensitive_) {\n      absl::AsciiStrToLower(&copy);\n    }\n\n    auto success = prefix_lookup_table_.add(\n        copy.c_str(), std::make_shared<Prefix>(route, upstreams_, runtime), false);\n    if (!success) {\n      throw EnvoyException(fmt::format(\"prefix `{}` already exists.\", route.prefix()));\n    }\n  }\n}\n\nRouteSharedPtr PrefixRoutes::upstreamPool(std::string& key) {\n  PrefixSharedPtr value = nullptr;\n  if (case_insensitive_) {\n    std::string copy = absl::AsciiStrToLower(key);\n    value = prefix_lookup_table_.findLongestPrefix(copy.c_str());\n  } else {\n    value = prefix_lookup_table_.findLongestPrefix(key.c_str());\n  }\n\n  if (value != nullptr) {\n    if (value->removePrefix()) {\n      key.erase(0, value->prefix().length());\n    }\n    return value;\n  }\n\n  return catch_all_route_;\n}\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/redis_proxy/router_impl.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <functional>\n#include <map>\n#include <memory>\n#include <set>\n#include <string>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"extensions/filters/network/common/redis/supported_commands.h\"\n#include \"extensions/filters/network/redis_proxy/conn_pool_impl.h\"\n#include \"extensions/filters/network/redis_proxy/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nusing Upstreams = std::map<std::string, ConnPool::InstanceSharedPtr>;\n\nclass MirrorPolicyImpl : public MirrorPolicy {\npublic:\n  MirrorPolicyImpl(const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::\n                       PrefixRoutes::Route::RequestMirrorPolicy&,\n                   const ConnPool::InstanceSharedPtr, Runtime::Loader& runtime);\n\n  ConnPool::InstanceSharedPtr upstream() const override { return upstream_; };\n\n  bool shouldMirror(const std::string& command) const override;\n\nprivate:\n  const std::string runtime_key_;\n  const absl::optional<envoy::type::v3::FractionalPercent> default_value_;\n  const bool exclude_read_commands_;\n  ConnPool::InstanceSharedPtr upstream_;\n  Runtime::Loader& runtime_;\n};\n\nclass Prefix : public Route {\npublic:\n  Prefix(const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route\n             route,\n         Upstreams& upstreams, Runtime::Loader& runtime);\n\n  ConnPool::InstanceSharedPtr upstream() const override { return upstream_; }\n  const MirrorPolicies& mirrorPolicies() const override { return mirror_policies_; };\n  const std::string& prefix() const { return prefix_; }\n  bool removePrefix() const { return remove_prefix_; }\n\nprivate:\n  const std::string prefix_;\n  const bool remove_prefix_;\n  const ConnPool::InstanceSharedPtr upstream_;\n  MirrorPolicies mirror_policies_;\n};\n\nusing PrefixSharedPtr = std::shared_ptr<Prefix>;\n\nclass PrefixRoutes : public Router {\npublic:\n  PrefixRoutes(const envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes&\n                   prefix_routes,\n               Upstreams&& upstreams, Runtime::Loader& runtime);\n\n  RouteSharedPtr upstreamPool(std::string& key) override;\n\nprivate:\n  TrieLookupTable<PrefixSharedPtr> prefix_lookup_table_;\n  const bool case_insensitive_;\n  Upstreams upstreams_;\n  RouteSharedPtr catch_all_route_;\n};\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    deps = [\"//source/common/singleton:const_singleton\"],\n)\n\nenvoy_cc_library(\n    name = \"stats_interface\",\n    hdrs = [\"stats.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"rocketmq_interface\",\n    hdrs = [\n        \"topic_route.h\",\n    ],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"rocketmq_lib\",\n    srcs = [\n        \"topic_route.cc\",\n    ],\n    deps = [\n        \":rocketmq_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protocol_interface\",\n    hdrs = [\"protocol.h\"],\n    deps = [\n        \":metadata_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protocol_lib\",\n    srcs = [\"protocol.cc\"],\n    deps = [\n        \":protocol_interface\",\n        \":well_known_names\",\n        \"//source/common/common:enum_to_int\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codec_lib\",\n    srcs = [\n        \"codec.cc\",\n    ],\n    hdrs = [\n        \"codec.h\",\n    ],\n    deps = [\n        \":protocol_lib\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_manager_lib\",\n    srcs = [\n        \"active_message.cc\",\n        \"conn_manager.cc\",\n    ],\n    hdrs = [\n        \"active_message.h\",\n        \"conn_manager.h\",\n    ],\n    deps = [\n        \":codec_lib\",\n        \":protocol_lib\",\n        \":rocketmq_lib\",\n        \":stats_interface\",\n        \":well_known_names\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/rocketmq_proxy/router:router_interface\",\n        \"@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\n        \"config.cc\",\n    ],\n    hdrs = [\n        \"config.h\",\n    ],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":conn_manager_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/rocketmq_proxy/router:route_matcher\",\n        \"//source/extensions/filters/network/rocketmq_proxy/router:router_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metadata_lib\",\n    hdrs = [\"metadata.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/active_message.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/active_message.h\"\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n#include \"extensions/filters/network/rocketmq_proxy/topic_route.h\"\n#include \"extensions/filters/network/rocketmq_proxy/well_known_names.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"absl/strings/match.h\"\n\nusing Envoy::Tcp::ConnectionPool::ConnectionDataPtr;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nActiveMessage::ActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request)\n    : connection_manager_(conn_manager), request_(std::move(request)) {\n  metadata_ = std::make_shared<MessageMetadata>();\n  MetadataHelper::parseRequest(request_, metadata_);\n  updateActiveRequestStats();\n}\n\nActiveMessage::~ActiveMessage() { updateActiveRequestStats(false); }\n\nvoid ActiveMessage::createFilterChain() { router_ = connection_manager_.config().createRouter(); }\n\nvoid ActiveMessage::sendRequestToUpstream() {\n  if (!router_) {\n    createFilterChain();\n  }\n  router_->sendRequestToUpstream(*this);\n}\n\nRouter::RouteConstSharedPtr ActiveMessage::route() {\n  if (cached_route_) {\n    return cached_route_.value();\n  }\n  const std::string& topic_name = metadata_->topicName();\n  ENVOY_LOG(trace, \"fetch route for topic: {}\", topic_name);\n  Router::RouteConstSharedPtr route = connection_manager_.config().routerConfig().route(*metadata_);\n  cached_route_ = route;\n  return cached_route_.value();\n}\n\nvoid ActiveMessage::onError(absl::string_view error_message) {\n  connection_manager_.onError(request_, error_message);\n}\n\nconst RemotingCommandPtr& ActiveMessage::downstreamRequest() const { return request_; }\n\nvoid ActiveMessage::fillAckMessageDirective(Buffer::Instance& buffer, const std::string& group,\n                                            const std::string& topic,\n                                            const AckMessageDirective& directive) {\n  int32_t cursor = 0;\n  const int32_t buffer_length = buffer.length();\n  while (cursor < buffer_length) {\n    auto frame_length = buffer.peekBEInt<int32_t>(cursor);\n    std::string decoded_topic = Decoder::decodeTopic(buffer, cursor);\n    ENVOY_LOG(trace, \"Process a message: consumer group: {}, topic: {}, messageId: {}\",\n              decoded_topic, group, Decoder::decodeMsgId(buffer, cursor));\n    if (!absl::StartsWith(decoded_topic, RetryTopicPrefix) && decoded_topic != topic) {\n      ENVOY_LOG(warn,\n                \"Decoded topic from pop-response does not equal to request. Decoded topic: \"\n                \"{}, request topic: {}, message ID: {}\",\n                decoded_topic, topic, Decoder::decodeMsgId(buffer, cursor));\n    }\n\n    /*\n     * Sometimes, client SDK may used -1 for queue-id in the pop request so that broker servers\n     * are allowed to lookup all queues it serves. So we need to use the actual queue Id from\n     * response body.\n     */\n    int32_t queue_id = Decoder::decodeQueueId(buffer, cursor);\n    int64_t queue_offset = Decoder::decodeQueueOffset(buffer, cursor);\n\n    std::string key = fmt::format(\"{}-{}-{}-{}\", group, decoded_topic, queue_id, queue_offset);\n    connection_manager_.insertAckDirective(key, directive);\n    ENVOY_LOG(\n        debug,\n        \"Insert an ack directive. Consumer group: {}, topic: {}, queue Id: {}, queue offset: {}\",\n        group, topic, queue_id, queue_offset);\n    cursor += frame_length;\n  }\n}\n\nvoid ActiveMessage::sendResponseToDownstream() {\n  if (request_->code() == enumToSignedInt(RequestCode::PopMessage)) {\n    // Fill ack message directive\n    auto pop_header = request_->typedCustomHeader<PopMessageRequestHeader>();\n    AckMessageDirective directive(pop_header->targetBrokerName(), pop_header->targetBrokerId(),\n                                  connection_manager_.timeSource().monotonicTime());\n    ENVOY_LOG(trace, \"Receive pop response from broker name: {}, broker ID: {}\",\n              pop_header->targetBrokerName(), pop_header->targetBrokerId());\n    fillAckMessageDirective(response_->body(), pop_header->consumerGroup(), pop_header->topic(),\n                            directive);\n  }\n\n  // If acknowledgment of the message is successful, we need to erase the ack directive from\n  // manager.\n  if (request_->code() == enumToSignedInt(RequestCode::AckMessage) &&\n      response_->code() == enumToSignedInt(ResponseCode::Success)) {\n    auto ack_header = request_->typedCustomHeader<AckMessageRequestHeader>();\n    connection_manager_.eraseAckDirective(ack_header->directiveKey());\n  }\n\n  if (response_) {\n    response_->opaque(request_->opaque());\n    connection_manager_.sendResponseToDownstream(response_);\n  }\n}\n\nvoid ActiveMessage::fillBrokerData(std::vector<BrokerData>& list, const std::string& cluster,\n                                   const std::string& broker_name, int64_t broker_id,\n                                   const std::string& address) {\n  bool found = false;\n  for (auto& entry : list) {\n    if (entry.cluster() == cluster && entry.brokerName() == broker_name) {\n      found = true;\n      if (entry.brokerAddresses().find(broker_id) != entry.brokerAddresses().end()) {\n        ENVOY_LOG(warn, \"Duplicate broker_id found. Broker ID: {}, address: {}\", broker_id,\n                  address);\n        continue;\n      } else {\n        entry.brokerAddresses()[broker_id] = address;\n      }\n    }\n  }\n\n  if (!found) {\n    absl::node_hash_map<int64_t, std::string> addresses;\n    addresses.emplace(broker_id, address);\n\n    list.emplace_back(BrokerData(cluster, broker_name, std::move(addresses)));\n  }\n}\n\nvoid ActiveMessage::onQueryTopicRoute() {\n  std::string cluster_name;\n  ASSERT(metadata_->hasTopicName());\n  const std::string& topic_name = metadata_->topicName();\n  Upstream::ThreadLocalCluster* cluster = nullptr;\n  Router::RouteConstSharedPtr route = this->route();\n  if (route) {\n    cluster_name = route->routeEntry()->clusterName();\n    Upstream::ClusterManager& cluster_manager = connection_manager_.config().clusterManager();\n    cluster = cluster_manager.get(cluster_name);\n  }\n  if (cluster) {\n    ENVOY_LOG(trace, \"Enovy has an operating cluster {} for topic {}\", cluster_name, topic_name);\n    std::vector<QueueData> queue_data_list;\n    std::vector<BrokerData> broker_data_list;\n    for (auto& host_set : cluster->prioritySet().hostSetsPerPriority()) {\n      if (host_set->hosts().empty()) {\n        continue;\n      }\n      for (const auto& host : host_set->hosts()) {\n        std::string broker_address = host->address()->asString();\n        auto& filter_metadata = host->metadata()->filter_metadata();\n        const auto filter_it = filter_metadata.find(NetworkFilterNames::get().RocketmqProxy);\n        ASSERT(filter_it != filter_metadata.end());\n        const auto& metadata_fields = filter_it->second.fields();\n        ASSERT(metadata_fields.contains(RocketmqConstants::get().BrokerName));\n        std::string broker_name =\n            metadata_fields.at(RocketmqConstants::get().BrokerName).string_value();\n        ASSERT(metadata_fields.contains(RocketmqConstants::get().ClusterName));\n        std::string broker_cluster_name =\n            metadata_fields.at(RocketmqConstants::get().ClusterName).string_value();\n        // Proto3 will ignore the field if the value is zero.\n        int32_t read_queue_num = 0;\n        if (metadata_fields.contains(RocketmqConstants::get().ReadQueueNum)) {\n          read_queue_num = static_cast<int32_t>(\n              metadata_fields.at(RocketmqConstants::get().WriteQueueNum).number_value());\n        }\n        int32_t write_queue_num = 0;\n        if (metadata_fields.contains(RocketmqConstants::get().WriteQueueNum)) {\n          write_queue_num = static_cast<int32_t>(\n              metadata_fields.at(RocketmqConstants::get().WriteQueueNum).number_value());\n        }\n        int32_t perm = 0;\n        if (metadata_fields.contains(RocketmqConstants::get().Perm)) {\n          perm = static_cast<int32_t>(\n              metadata_fields.at(RocketmqConstants::get().Perm).number_value());\n        }\n        int32_t broker_id = 0;\n        if (metadata_fields.contains(RocketmqConstants::get().BrokerId)) {\n          broker_id = static_cast<int32_t>(\n              metadata_fields.at(RocketmqConstants::get().BrokerId).number_value());\n        }\n        queue_data_list.emplace_back(QueueData(broker_name, read_queue_num, write_queue_num, perm));\n        if (connection_manager_.config().developMode()) {\n          ENVOY_LOG(trace, \"Develop mode, return proxy address to replace all broker addresses so \"\n                           \"that L4 network rewrite is not required\");\n          fillBrokerData(broker_data_list, broker_cluster_name, broker_name, broker_id,\n                         connection_manager_.config().proxyAddress());\n        } else {\n          fillBrokerData(broker_data_list, broker_cluster_name, broker_name, broker_id,\n                         broker_address);\n        }\n      }\n    }\n    ENVOY_LOG(trace, \"Prepare TopicRouteData for {} OK\", topic_name);\n    TopicRouteData topic_route_data(std::move(queue_data_list), std::move(broker_data_list));\n    ProtobufWkt::Struct data_struct;\n    topic_route_data.encode(data_struct);\n    std::string json = MessageUtil::getJsonStringFromMessage(data_struct);\n    ENVOY_LOG(trace, \"Serialize TopicRouteData for {} OK:\\n{}\", cluster_name, json);\n    RemotingCommandPtr response = std::make_unique<RemotingCommand>(\n        static_cast<int>(ResponseCode::Success), downstreamRequest()->version(),\n        downstreamRequest()->opaque());\n    response->markAsResponse();\n    response->body().add(json);\n    connection_manager_.sendResponseToDownstream(response);\n  } else {\n    onError(\"Cluster is not available\");\n    ENVOY_LOG(warn, \"Cluster for topic {} is not available\", topic_name);\n  }\n  onReset();\n}\n\nvoid ActiveMessage::onReset() { connection_manager_.deferredDelete(*this); }\n\nbool ActiveMessage::onUpstreamData(Envoy::Buffer::Instance& data, bool end_stream,\n                                   ConnectionDataPtr& conn_data) {\n  bool underflow = false;\n  bool has_error = false;\n  response_ = Decoder::decode(data, underflow, has_error, downstreamRequest()->code());\n  if (underflow && !end_stream) {\n    ENVOY_LOG(trace, \"Wait for more data from upstream\");\n    return false;\n  }\n\n  if (enumToSignedInt(RequestCode::PopMessage) == request_->code() && router_ != nullptr) {\n    recordPopRouteInfo(router_->upstreamHost());\n  }\n\n  connection_manager_.stats().response_.inc();\n  if (!has_error) {\n    connection_manager_.stats().response_decoding_success_.inc();\n    // Relay response to downstream\n    sendResponseToDownstream();\n  } else {\n    ENVOY_LOG(error, \"Failed to decode response for opaque: {}, close immediately.\",\n              downstreamRequest()->opaque());\n    onError(\"Failed to decode response from upstream\");\n    connection_manager_.stats().response_decoding_error_.inc();\n    conn_data->connection().close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  if (end_stream) {\n    conn_data->connection().close(Network::ConnectionCloseType::NoFlush);\n  }\n  return true;\n}\n\nvoid ActiveMessage::recordPopRouteInfo(Upstream::HostDescriptionConstSharedPtr host_description) {\n  if (host_description) {\n    auto host_metadata = host_description->metadata();\n    auto filter_metadata = host_metadata->filter_metadata();\n    const auto filter_it = filter_metadata.find(NetworkFilterNames::get().RocketmqProxy);\n    ASSERT(filter_it != filter_metadata.end());\n    const auto& metadata_fields = filter_it->second.fields();\n    ASSERT(metadata_fields.contains(RocketmqConstants::get().BrokerName));\n    std::string broker_name =\n        metadata_fields.at(RocketmqConstants::get().BrokerName).string_value();\n    // Proto3 will ignore the field if the value is zero.\n    int32_t broker_id = 0;\n    if (metadata_fields.contains(RocketmqConstants::get().BrokerId)) {\n      broker_id = static_cast<int32_t>(\n          metadata_fields.at(RocketmqConstants::get().BrokerId).number_value());\n    }\n    // Tag the request with upstream host metadata: broker-name, broker-id\n    auto custom_header = request_->typedCustomHeader<CommandCustomHeader>();\n    custom_header->targetBrokerName(broker_name);\n    custom_header->targetBrokerId(broker_id);\n  }\n}\n\nvoid ActiveMessage::updateActiveRequestStats(bool is_inc) {\n  if (is_inc) {\n    connection_manager_.stats().request_active_.inc();\n  } else {\n    connection_manager_.stats().request_active_.dec();\n  }\n  auto code = static_cast<RequestCode>(request_->code());\n  switch (code) {\n  case RequestCode::PopMessage: {\n    if (is_inc) {\n      connection_manager_.stats().pop_message_active_.inc();\n    } else {\n      connection_manager_.stats().pop_message_active_.dec();\n    }\n    break;\n  }\n  case RequestCode::SendMessage: {\n    if (is_inc) {\n      connection_manager_.stats().send_message_v1_active_.inc();\n    } else {\n      connection_manager_.stats().send_message_v1_active_.dec();\n    }\n    break;\n  }\n  case RequestCode::SendMessageV2: {\n    if (is_inc) {\n      connection_manager_.stats().send_message_v2_active_.inc();\n    } else {\n      connection_manager_.stats().send_message_v2_active_.dec();\n    }\n    break;\n  }\n  case RequestCode::GetRouteInfoByTopic: {\n    if (is_inc) {\n      connection_manager_.stats().get_topic_route_active_.inc();\n    } else {\n      connection_manager_.stats().get_topic_route_active_.dec();\n    }\n    break;\n  }\n  default:\n    break;\n  }\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/active_message.h",
    "content": "#pragma once\n\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/codec.h\"\n#include \"extensions/filters/network/rocketmq_proxy/protocol.h\"\n#include \"extensions/filters/network/rocketmq_proxy/router/router.h\"\n#include \"extensions/filters/network/rocketmq_proxy/topic_route.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass ConnectionManager;\n\n/**\n * ActiveMessage represents an in-flight request from downstream that has not yet received response\n * from upstream.\n */\nclass ActiveMessage : public LinkedObject<ActiveMessage>,\n                      public Event::DeferredDeletable,\n                      Logger::Loggable<Logger::Id::rocketmq> {\npublic:\n  ActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request);\n\n  ~ActiveMessage() override;\n\n  /**\n   * Set up filter-chain according to configuration from bootstrap config file and dynamic\n   * configuration items from Pilot.\n   */\n  void createFilterChain();\n\n  /**\n   * Relay requests from downstream to upstream cluster. If the target cluster is absent at the\n   * moment, it triggers cluster discovery service request and mark awaitCluster as true.\n   * ClusterUpdateCallback will process requests marked await-cluster once the target cluster is\n   * in place.\n   */\n  void sendRequestToUpstream();\n\n  const RemotingCommandPtr& downstreamRequest() const;\n\n  /**\n   * Parse pop response and insert ack route directive such that ack requests will be forwarded to\n   * the same broker host from which messages are popped.\n   * @param buffer Pop response body.\n   * @param group Consumer group name.\n   * @param topic Topic from which messages are popped\n   * @param directive ack route directive\n   */\n  virtual void fillAckMessageDirective(Buffer::Instance& buffer, const std::string& group,\n                                       const std::string& topic,\n                                       const AckMessageDirective& directive);\n\n  virtual void sendResponseToDownstream();\n\n  void onQueryTopicRoute();\n\n  virtual void onError(absl::string_view error_message);\n\n  ConnectionManager& connectionManager() { return connection_manager_; }\n\n  virtual void onReset();\n\n  bool onUpstreamData(Buffer::Instance& data, bool end_stream,\n                      Tcp::ConnectionPool::ConnectionDataPtr& conn_data);\n\n  virtual MessageMetadataSharedPtr metadata() const { return metadata_; }\n\n  virtual Router::RouteConstSharedPtr route();\n\n  void recordPopRouteInfo(Upstream::HostDescriptionConstSharedPtr host_description);\n\n  static void fillBrokerData(std::vector<BrokerData>& list, const std::string& cluster,\n                             const std::string& broker_name, int64_t broker_id,\n                             const std::string& address);\n\nprivate:\n  ConnectionManager& connection_manager_;\n  RemotingCommandPtr request_;\n  RemotingCommandPtr response_;\n  MessageMetadataSharedPtr metadata_;\n  Router::RouterPtr router_;\n  absl::optional<Router::RouteConstSharedPtr> cached_route_;\n\n  void updateActiveRequestStats(bool is_inc = true);\n};\n\nusing ActiveMessagePtr = std::unique_ptr<ActiveMessage>;\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/codec.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/codec.h\"\n\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/protocol.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nRemotingCommandPtr Decoder::decode(Buffer::Instance& buffer, bool& underflow, bool& has_error,\n                                   int request_code) {\n  // Verify there is at least some bits, which stores frame length and header length\n  if (buffer.length() <= MIN_FRAME_SIZE) {\n    underflow = true;\n    return nullptr;\n  }\n\n  auto frame_length = buffer.peekBEInt<uint32_t>();\n\n  if (frame_length > MAX_FRAME_SIZE) {\n    has_error = true;\n    return nullptr;\n  }\n\n  if (buffer.length() < frame_length) {\n    underflow = true;\n    return nullptr;\n  }\n  buffer.drain(FRAME_LENGTH_FIELD_SIZE);\n\n  auto mark = buffer.peekBEInt<uint32_t>();\n  uint32_t header_length = adjustHeaderLength(mark);\n  if (frame_length < header_length + FRAME_HEADER_LENGTH_FIELD_SIZE) {\n    // There is an error in frame_length.\n    // Make sure body_length is non-negative.\n    has_error = true;\n    return nullptr;\n  }\n  buffer.drain(FRAME_HEADER_LENGTH_FIELD_SIZE);\n\n  uint32_t body_length = frame_length - FRAME_HEADER_LENGTH_FIELD_SIZE - header_length;\n\n  ENVOY_LOG(debug,\n            \"Request/Response Frame Meta: Frame Length = {}, Header Length = {}, Body Length = {}\",\n            frame_length, header_length, body_length);\n\n  Buffer::OwnedImpl header_buffer;\n  header_buffer.move(buffer, header_length);\n  std::string header_json = header_buffer.toString();\n  ENVOY_LOG(trace, \"Request/Response Header JSON: {}\", header_json);\n\n  int32_t code, version, opaque;\n  uint32_t flag;\n  if (isJsonHeader(mark)) {\n    ProtobufWkt::Struct header_struct;\n\n    // Parse header JSON text\n    try {\n      MessageUtil::loadFromJson(header_json, header_struct);\n    } catch (std::exception& e) {\n      has_error = true;\n      ENVOY_LOG(error, \"Failed to parse header JSON: {}. Error message: {}\", header_json, e.what());\n      return nullptr;\n    }\n\n    const auto& filed_value_pair = header_struct.fields();\n    if (!filed_value_pair.contains(\"code\")) {\n      ENVOY_LOG(error, \"Malformed frame: 'code' field is missing. Header JSON: {}\", header_json);\n      has_error = true;\n      return nullptr;\n    }\n    code = filed_value_pair.at(\"code\").number_value();\n    if (!filed_value_pair.contains(\"version\")) {\n      ENVOY_LOG(error, \"Malformed frame: 'version' field is missing. Header JSON: {}\", header_json);\n      has_error = true;\n      return nullptr;\n    }\n    version = filed_value_pair.at(\"version\").number_value();\n    if (!filed_value_pair.contains(\"opaque\")) {\n      ENVOY_LOG(error, \"Malformed frame: 'opaque' field is missing. Header JSON: {}\", header_json);\n      has_error = true;\n      return nullptr;\n    }\n    opaque = filed_value_pair.at(\"opaque\").number_value();\n    if (!filed_value_pair.contains(\"flag\")) {\n      ENVOY_LOG(error, \"Malformed frame: 'flag' field is missing. Header JSON: {}\", header_json);\n      has_error = true;\n      return nullptr;\n    }\n    flag = filed_value_pair.at(\"flag\").number_value();\n    RemotingCommandPtr cmd = std::make_unique<RemotingCommand>(code, version, opaque);\n    cmd->flag(flag);\n    if (filed_value_pair.contains(\"language\")) {\n      cmd->language(filed_value_pair.at(\"language\").string_value());\n    }\n\n    if (filed_value_pair.contains(\"serializeTypeCurrentRPC\")) {\n      cmd->serializeTypeCurrentRPC(filed_value_pair.at(\"serializeTypeCurrentRPC\").string_value());\n    }\n\n    cmd->body_.move(buffer, body_length);\n\n    if (RemotingCommand::isResponse(flag)) {\n      if (filed_value_pair.contains(\"remark\")) {\n        cmd->remark(filed_value_pair.at(\"remark\").string_value());\n      }\n      cmd->custom_header_ = decodeResponseExtHeader(static_cast<ResponseCode>(code), header_struct,\n                                                    static_cast<RequestCode>(request_code));\n    } else {\n      cmd->custom_header_ = decodeExtHeader(static_cast<RequestCode>(code), header_struct);\n    }\n    return cmd;\n  } else {\n    ENVOY_LOG(warn, \"Unsupported header serialization type\");\n    has_error = true;\n    return nullptr;\n  }\n}\n\nbool Decoder::isComplete(Buffer::Instance& buffer, int32_t cursor) {\n  if (buffer.length() - cursor < 4) {\n    // buffer is definitely incomplete.\n    return false;\n  }\n\n  auto total_size = buffer.peekBEInt<int32_t>(cursor);\n  return buffer.length() - cursor >= static_cast<uint32_t>(total_size);\n}\n\nstd::string Decoder::decodeTopic(Buffer::Instance& buffer, int32_t cursor) {\n  if (!isComplete(buffer, cursor)) {\n    return EMPTY_STRING;\n  }\n\n  auto magic_code = buffer.peekBEInt<int32_t>(cursor + 4);\n\n  MessageVersion message_version = V1;\n  if (enumToSignedInt(MessageVersion::V1) == magic_code) {\n    message_version = V1;\n  } else if (enumToSignedInt(MessageVersion::V2) == magic_code) {\n    message_version = V2;\n  }\n\n  int32_t offset = 4   /* total size */\n                   + 4 /* magic code */\n                   + 4 /* body CRC */\n                   + 4 /* queue Id */\n                   + 4 /* flag */\n                   + 8 /* queue offset */\n                   + 8 /* physical offset */\n                   + 4 /* sys flag */\n                   + 8 /* born timestamp */\n                   + 4 /* born host */\n                   + 4 /* born host port */\n                   + 8 /* store timestamp */\n                   + 4 /* store host */\n                   + 4 /* store host port */\n                   + 4 /* re-consume times */\n                   + 8 /* transaction offset */\n      ;\n  auto body_size = buffer.peekBEInt<int32_t>(cursor + offset);\n  offset += 4 /* body size */\n            + body_size /* body */;\n  int32_t topic_length;\n  std::string topic;\n  switch (message_version) {\n  case V1: {\n    topic_length = buffer.peekBEInt<int8_t>(cursor + offset);\n    topic.reserve(topic_length);\n    topic.resize(topic_length);\n    buffer.copyOut(cursor + offset + sizeof(int8_t), topic_length, &topic[0]);\n    break;\n  }\n  case V2: {\n    topic_length = buffer.peekBEInt<int16_t>(cursor + offset);\n    topic.reserve(topic_length);\n    topic.resize(topic_length);\n    buffer.copyOut(cursor + offset + sizeof(int16_t), topic_length, &topic[0]);\n    break;\n  }\n  }\n  return topic;\n}\n\nint32_t Decoder::decodeQueueId(Buffer::Instance& buffer, int32_t cursor) {\n  if (!isComplete(buffer, cursor)) {\n    return -1;\n  }\n\n  int32_t offset = 4   /* total size */\n                   + 4 /* magic code */\n                   + 4 /* body CRC */;\n\n  return buffer.peekBEInt<int32_t>(cursor + offset);\n}\n\nint64_t Decoder::decodeQueueOffset(Buffer::Instance& buffer, int32_t cursor) {\n  if (!isComplete(buffer, cursor)) {\n    return -1;\n  }\n\n  int32_t offset = 4   /* total size */\n                   + 4 /* magic code */\n                   + 4 /* body CRC */\n                   + 4 /* queue Id */\n                   + 4 /* flag */;\n  return buffer.peekBEInt<int64_t>(cursor + offset);\n}\n\nstd::string Decoder::decodeMsgId(Buffer::Instance& buffer, int32_t cursor) {\n  if (!isComplete(buffer, cursor)) {\n    return EMPTY_STRING;\n  }\n\n  int32_t offset = 4   /* total size */\n                   + 4 /* magic code */\n                   + 4 /* body CRC */\n                   + 4 /* queue Id */\n                   + 4 /* flag */\n                   + 8 /* queue offset */;\n  auto physical_offset = buffer.peekBEInt<int64_t>(cursor + offset);\n  offset += 8   /* physical offset */\n            + 4 /* sys flag */\n            + 8 /* born timestamp */\n            + 4 /* born host */\n            + 4 /* born host port */\n            + 8 /* store timestamp */\n      ;\n\n  Buffer::OwnedImpl msg_id_buffer;\n  msg_id_buffer.writeBEInt<int64_t>(buffer.peekBEInt<int64_t>(cursor + offset));\n  msg_id_buffer.writeBEInt<int64_t>(physical_offset);\n  std::string msg_id;\n  msg_id.reserve(32);\n  for (uint64_t i = 0; i < msg_id_buffer.length(); i++) {\n    auto c = msg_id_buffer.peekBEInt<uint8_t>();\n    msg_id.append(1, static_cast<char>(c >> 4U));\n    msg_id.append(1, static_cast<char>(c & 0xFU));\n  }\n  return msg_id;\n}\n\nCommandCustomHeaderPtr Decoder::decodeExtHeader(RequestCode code,\n                                                ProtobufWkt::Struct& header_struct) {\n  const auto& filed_value_pair = header_struct.fields();\n  switch (code) {\n  case RequestCode::SendMessage: {\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    auto send_msg_ext_header = new SendMessageRequestHeader();\n    send_msg_ext_header->version_ = SendMessageRequestVersion::V1;\n    send_msg_ext_header->decode(ext_fields);\n    return send_msg_ext_header;\n  }\n  case RequestCode::SendMessageV2: {\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    auto send_msg_ext_header = new SendMessageRequestHeader();\n    send_msg_ext_header->version_ = SendMessageRequestVersion::V2;\n    send_msg_ext_header->decode(ext_fields);\n    return send_msg_ext_header;\n  }\n\n  case RequestCode::GetRouteInfoByTopic: {\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    auto get_route_info_request_header = new GetRouteInfoRequestHeader();\n    get_route_info_request_header->decode(ext_fields);\n    return get_route_info_request_header;\n  }\n\n  case RequestCode::UnregisterClient: {\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    auto unregister_client_request_header = new UnregisterClientRequestHeader();\n    unregister_client_request_header->decode(ext_fields);\n    return unregister_client_request_header;\n  }\n\n  case RequestCode::GetConsumerListByGroup: {\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    auto get_consumer_list_by_group_request_header = new GetConsumerListByGroupRequestHeader();\n    get_consumer_list_by_group_request_header->decode(ext_fields);\n    return get_consumer_list_by_group_request_header;\n  }\n\n  case RequestCode::PopMessage: {\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    auto pop_message_request_header = new PopMessageRequestHeader();\n    pop_message_request_header->decode(ext_fields);\n    return pop_message_request_header;\n  }\n\n  case RequestCode::AckMessage: {\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    auto ack_message_request_header = new AckMessageRequestHeader();\n    ack_message_request_header->decode(ext_fields);\n    return ack_message_request_header;\n  }\n\n  case RequestCode::HeartBeat: {\n    // Heartbeat does not have an extended header.\n    return nullptr;\n  }\n\n  default:\n    ENVOY_LOG(warn, \"Unsupported request code: {}\", static_cast<int>(code));\n    return nullptr;\n  }\n}\n\nCommandCustomHeaderPtr Decoder::decodeResponseExtHeader(ResponseCode response_code,\n                                                        ProtobufWkt::Struct& header_struct,\n                                                        RequestCode request_code) {\n  // No need to decode a failed response.\n  if (response_code != ResponseCode::Success &&\n      response_code != ResponseCode::ReplicaNotAvailable) {\n    return nullptr;\n  }\n  const auto& filed_value_pair = header_struct.fields();\n  switch (request_code) {\n  case RequestCode::SendMessage:\n  case RequestCode::SendMessageV2: {\n    auto send_message_response_header = new SendMessageResponseHeader();\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    auto& ext_fields = filed_value_pair.at(\"extFields\");\n    send_message_response_header->decode(ext_fields);\n    return send_message_response_header;\n  }\n\n  case RequestCode::PopMessage: {\n    auto pop_message_response_header = new PopMessageResponseHeader();\n    ASSERT(filed_value_pair.contains(\"extFields\"));\n    const auto& ext_fields = filed_value_pair.at(\"extFields\");\n    pop_message_response_header->decode(ext_fields);\n    return pop_message_response_header;\n  }\n  default:\n    return nullptr;\n  }\n}\n\nvoid Encoder::encode(const RemotingCommandPtr& command, Buffer::Instance& data) {\n\n  ProtobufWkt::Struct command_struct;\n  auto* fields = command_struct.mutable_fields();\n\n  ProtobufWkt::Value code_v;\n  code_v.set_number_value(command->code_);\n  (*fields)[\"code\"] = code_v;\n\n  ProtobufWkt::Value language_v;\n  language_v.set_string_value(command->language());\n  (*fields)[\"language\"] = language_v;\n\n  ProtobufWkt::Value version_v;\n  version_v.set_number_value(command->version_);\n  (*fields)[\"version\"] = version_v;\n\n  ProtobufWkt::Value opaque_v;\n  opaque_v.set_number_value(command->opaque_);\n  (*fields)[\"opaque\"] = opaque_v;\n\n  ProtobufWkt::Value flag_v;\n  flag_v.set_number_value(command->flag_);\n  (*fields)[\"flag\"] = flag_v;\n\n  if (!command->remark_.empty()) {\n    ProtobufWkt::Value remark_v;\n    remark_v.set_string_value(command->remark_);\n    (*fields)[\"remark\"] = remark_v;\n  }\n\n  ProtobufWkt::Value serialization_type_v;\n  serialization_type_v.set_string_value(command->serializeTypeCurrentRPC());\n  (*fields)[\"serializeTypeCurrentRPC\"] = serialization_type_v;\n\n  if (command->custom_header_) {\n    ProtobufWkt::Value ext_fields_v;\n    command->custom_header_->encode(ext_fields_v);\n    (*fields)[\"extFields\"] = ext_fields_v;\n  }\n\n  std::string json = MessageUtil::getJsonStringFromMessage(command_struct);\n\n  int32_t frame_length = 4;\n  int32_t header_length = json.size();\n  frame_length += header_length;\n  frame_length += command->bodyLength();\n\n  data.writeBEInt<int32_t>(frame_length);\n  data.writeBEInt<int32_t>(header_length);\n  data.add(json);\n\n  // add body\n  if (command->bodyLength() > 0) {\n    data.add(command->body());\n  }\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/codec.h",
    "content": "#pragma once\n\n#include <map>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/protocol.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nenum MessageVersion : uint32_t {\n  V1 = (0xAABBCCDDU ^ 1880681586U) + 8U,\n  V2 = (0xAABBCCDDU ^ 1880681586U) + 4U\n};\n\nclass Decoder : Logger::Loggable<Logger::Id::rocketmq> {\npublic:\n  Decoder() = default;\n\n  ~Decoder() = default;\n\n  /**\n   * @param buffer Data buffer to decode.\n   * @param underflow Indicate if buffer contains enough data in terms of protocol frame.\n   * @param has_error Indicate if the decoding is successful or not.\n   * @param request_code Corresponding request code if applies.\n   * @return Decoded remote command.\n   */\n  static RemotingCommandPtr decode(Buffer::Instance& buffer, bool& underflow, bool& has_error,\n                                   int request_code = 0);\n\n  static std::string decodeTopic(Buffer::Instance& buffer, int32_t cursor);\n\n  static int32_t decodeQueueId(Buffer::Instance& buffer, int32_t cursor);\n\n  static int64_t decodeQueueOffset(Buffer::Instance& buffer, int32_t cursor);\n\n  static std::string decodeMsgId(Buffer::Instance& buffer, int32_t cursor);\n\n  static constexpr uint32_t MIN_FRAME_SIZE = 8;\n\n  static constexpr uint32_t MAX_FRAME_SIZE = 4 * 1024 * 1024;\n\n  static constexpr uint32_t FRAME_LENGTH_FIELD_SIZE = 4;\n\n  static constexpr uint32_t FRAME_HEADER_LENGTH_FIELD_SIZE = 4;\n\nprivate:\n  static uint32_t adjustHeaderLength(uint32_t len) { return len & 0xFFFFFFu; }\n\n  static bool isJsonHeader(uint32_t len) { return (len >> 24u) == 0; }\n\n  static CommandCustomHeaderPtr decodeExtHeader(RequestCode code,\n                                                ProtobufWkt::Struct& header_struct);\n\n  static CommandCustomHeaderPtr decodeResponseExtHeader(ResponseCode response_code,\n                                                        ProtobufWkt::Struct& header_struct,\n                                                        RequestCode request_code);\n\n  static bool isComplete(Buffer::Instance& buffer, int32_t cursor);\n};\n\nclass Encoder {\npublic:\n  static void encode(const RemotingCommandPtr& command, Buffer::Instance& buffer);\n};\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/config.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/config.h\"\n\n#include <cstdlib>\n\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n#include \"extensions/filters/network/rocketmq_proxy/stats.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nnamespace rocketmq_config = envoy::extensions::filters::network::rocketmq_proxy::v3;\n\nNetwork::FilterFactoryCb RocketmqProxyFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const rocketmq_config::RocketmqProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  std::shared_ptr<ConfigImpl> filter_config = std::make_shared<ConfigImpl>(proto_config, context);\n  return [filter_config, &context](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(\n        std::make_shared<ConnectionManager>(*filter_config, context.dispatcher().timeSource()));\n  };\n}\n\nREGISTER_FACTORY(RocketmqProxyFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\nConfigImpl::ConfigImpl(const RocketmqProxyConfig& config,\n                       Server::Configuration::FactoryContext& context)\n    : context_(context), stats_prefix_(fmt::format(\"rocketmq.{}.\", config.stat_prefix())),\n      stats_(RocketmqFilterStats::generateStats(stats_prefix_, context_.scope())),\n      route_matcher_(new Router::RouteMatcher(config.route_config())),\n      develop_mode_(config.develop_mode()),\n      transient_object_life_span_(PROTOBUF_GET_MS_OR_DEFAULT(config, transient_object_life_span,\n                                                             TransientObjectLifeSpan)) {}\n\nstd::string ConfigImpl::proxyAddress() {\n  const LocalInfo::LocalInfo& localInfo = context_.getServerFactoryContext().localInfo();\n  Network::Address::InstanceConstSharedPtr address = localInfo.address();\n  if (address->type() == Network::Address::Type::Ip) {\n    const std::string& ip = address->ip()->addressAsString();\n    std::string proxyAddr{ip};\n    if (address->ip()->port()) {\n      return proxyAddr.append(\":\").append(std::to_string(address->ip()->port()));\n    } else {\n      ENVOY_LOG(trace, \"Local info does not have port specified, defaulting to 10000\");\n      return proxyAddr.append(\":10000\");\n    }\n  }\n  return address->asString();\n}\n\nRouter::RouteConstSharedPtr ConfigImpl::route(const MessageMetadata& metadata) const {\n  return route_matcher_->route(metadata);\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/config.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n#include \"extensions/filters/network/rocketmq_proxy/router/route_matcher.h\"\n#include \"extensions/filters/network/rocketmq_proxy/router/router_impl.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass RocketmqProxyFilterConfigFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy> {\npublic:\n  RocketmqProxyFilterConfigFactory() : FactoryBase(NetworkFilterNames::get().RocketmqProxy, true) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\nclass ConfigImpl : public Config, public Router::Config, Logger::Loggable<Logger::Id::config> {\npublic:\n  using RocketmqProxyConfig =\n      envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy;\n\n  ConfigImpl(const RocketmqProxyConfig& config, Server::Configuration::FactoryContext& context);\n  ~ConfigImpl() override = default;\n\n  // Config\n  RocketmqFilterStats& stats() override { return stats_; }\n  Upstream::ClusterManager& clusterManager() override { return context_.clusterManager(); }\n  Router::RouterPtr createRouter() override {\n    return std::make_unique<Router::RouterImpl>(context_.clusterManager());\n  }\n  bool developMode() const override { return develop_mode_; }\n\n  std::chrono::milliseconds transientObjectLifeSpan() const override {\n    return transient_object_life_span_;\n  }\n\n  std::string proxyAddress() override;\n  Router::Config& routerConfig() override { return *this; }\n\n  // Router::Config\n  Router::RouteConstSharedPtr route(const MessageMetadata& metadata) const override;\n\nprivate:\n  Server::Configuration::FactoryContext& context_;\n  const std::string stats_prefix_;\n  RocketmqFilterStats stats_;\n  Router::RouteMatcherPtr route_matcher_;\n  const bool develop_mode_;\n  std::chrono::milliseconds transient_object_life_span_;\n\n  static constexpr uint64_t TransientObjectLifeSpan = 30 * 1000;\n};\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/conn_manager.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nConsumerGroupMember::ConsumerGroupMember(absl::string_view client_id,\n                                         ConnectionManager& conn_manager)\n    : client_id_(client_id.data(), client_id.size()), connection_manager_(&conn_manager),\n      last_(connection_manager_->time_source_.monotonicTime()) {}\n\nvoid ConsumerGroupMember::refresh() { last_ = connection_manager_->time_source_.monotonicTime(); }\n\nbool ConsumerGroupMember::expired() const {\n  auto duration = connection_manager_->time_source_.monotonicTime() - last_;\n  return std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() >\n         connection_manager_->config().transientObjectLifeSpan().count();\n}\n\nConnectionManager::ConnectionManager(Config& config, TimeSource& time_source)\n    : config_(config), time_source_(time_source), stats_(config.stats()) {}\n\nEnvoy::Network::FilterStatus ConnectionManager::onData(Envoy::Buffer::Instance& data,\n                                                       bool end_stream) {\n  ENVOY_CONN_LOG(trace, \"rocketmq_proxy: received {} bytes.\", read_callbacks_->connection(),\n                 data.length());\n  request_buffer_.move(data);\n  dispatch();\n  if (end_stream) {\n    resetAllActiveMessages(\"Connection to downstream is closed\");\n    read_callbacks_->connection().close(Envoy::Network::ConnectionCloseType::FlushWrite);\n  }\n  return Network::FilterStatus::StopIteration;\n}\n\nvoid ConnectionManager::dispatch() {\n  if (request_buffer_.length() < Decoder::MIN_FRAME_SIZE) {\n    ENVOY_CONN_LOG(warn, \"rocketmq_proxy: request buffer length is less than min frame size: {}\",\n                   read_callbacks_->connection(), request_buffer_.length());\n    return;\n  }\n\n  bool underflow = false;\n  bool has_decode_error = false;\n  while (!underflow) {\n    RemotingCommandPtr request = Decoder::decode(request_buffer_, underflow, has_decode_error);\n    if (underflow) {\n      // Wait for more data\n      break;\n    }\n    stats_.request_.inc();\n\n    // Decode error, we need to close connection immediately.\n    if (has_decode_error) {\n      ENVOY_CONN_LOG(error, \"Failed to decode request, close connection immediately\",\n                     read_callbacks_->connection());\n      stats_.request_decoding_error_.inc();\n      resetAllActiveMessages(\"Failed to decode data from downstream. Close connection immediately\");\n      read_callbacks_->connection().close(Envoy::Network::ConnectionCloseType::FlushWrite);\n      return;\n    } else {\n      stats_.request_decoding_success_.inc();\n    }\n\n    switch (static_cast<RequestCode>(request->code())) {\n    case RequestCode::GetRouteInfoByTopic: {\n      ENVOY_CONN_LOG(trace, \"GetTopicRoute request, code: {}, opaque: {}\",\n                     read_callbacks_->connection(), request->code(), request->opaque());\n      onGetTopicRoute(std::move(request));\n    } break;\n\n    case RequestCode::UnregisterClient: {\n      ENVOY_CONN_LOG(trace, \"process unregister client request, code: {}, opaque: {}\",\n                     read_callbacks_->connection(), request->code(), request->opaque());\n      onUnregisterClient(std::move(request));\n    } break;\n\n    case RequestCode::SendMessage: {\n      ENVOY_CONN_LOG(trace, \"SendMessage request, code: {}, opaque: {}\",\n                     read_callbacks_->connection(), request->code(), request->opaque());\n      onSendMessage(std::move(request));\n      stats_.send_message_v1_.inc();\n    } break;\n\n    case RequestCode::SendMessageV2: {\n      ENVOY_CONN_LOG(trace, \"SendMessage request, code: {}, opaque: {}\",\n                     read_callbacks_->connection(), request->code(), request->opaque());\n      onSendMessage(std::move(request));\n      stats_.send_message_v2_.inc();\n    } break;\n\n    case RequestCode::GetConsumerListByGroup: {\n      ENVOY_CONN_LOG(trace, \"GetConsumerListByGroup request, code: {}, opaque: {}\",\n                     read_callbacks_->connection(), request->code(), request->opaque());\n      onGetConsumerListByGroup(std::move(request));\n    } break;\n\n    case RequestCode::PopMessage: {\n      ENVOY_CONN_LOG(trace, \"PopMessage request, code: {}, opaque: {}\",\n                     read_callbacks_->connection(), request->code(), request->opaque());\n      onPopMessage(std::move(request));\n      stats_.pop_message_.inc();\n    } break;\n\n    case RequestCode::AckMessage: {\n      ENVOY_CONN_LOG(trace, \"AckMessage request, code: {}, opaque: {}\",\n                     read_callbacks_->connection(), request->code(), request->opaque());\n      onAckMessage(std::move(request));\n      stats_.ack_message_.inc();\n    } break;\n\n    case RequestCode::HeartBeat: {\n      ENVOY_CONN_LOG(trace, \"Heartbeat request, opaque: {}\", read_callbacks_->connection(),\n                     request->opaque());\n      onHeartbeat(std::move(request));\n    } break;\n\n    default: {\n      ENVOY_CONN_LOG(warn, \"Request code {} not supported yet\", read_callbacks_->connection(),\n                     request->code());\n      std::string error_msg(\"Request not supported\");\n      onError(request, error_msg);\n    } break;\n    }\n  }\n}\n\nvoid ConnectionManager::purgeDirectiveTable() {\n  auto current = time_source_.monotonicTime();\n  for (auto it = ack_directive_table_.begin(); it != ack_directive_table_.end();) {\n    auto duration = current - it->second.creation_time_;\n    if (std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() >\n        config_.transientObjectLifeSpan().count()) {\n      ack_directive_table_.erase(it++);\n    } else {\n      it++;\n    }\n  }\n}\n\nvoid ConnectionManager::sendResponseToDownstream(RemotingCommandPtr& response) {\n  Buffer::OwnedImpl buffer;\n  Encoder::encode(response, buffer);\n  if (read_callbacks_->connection().state() == Network::Connection::State::Open) {\n    ENVOY_CONN_LOG(trace, \"Write response to downstream. Opaque: {}\", read_callbacks_->connection(),\n                   response->opaque());\n    read_callbacks_->connection().write(buffer, false);\n  } else {\n    ENVOY_CONN_LOG(error, \"Send response to downstream failed as connection is no longer open\",\n                   read_callbacks_->connection());\n  }\n}\n\nvoid ConnectionManager::onGetTopicRoute(RemotingCommandPtr request) {\n  createActiveMessage(request).onQueryTopicRoute();\n  stats_.get_topic_route_.inc();\n}\n\nvoid ConnectionManager::onHeartbeat(RemotingCommandPtr request) {\n  const std::string& body = request->body().toString();\n\n  purgeDirectiveTable();\n\n  ProtobufWkt::Struct body_struct;\n  try {\n    MessageUtil::loadFromJson(body, body_struct);\n  } catch (std::exception& e) {\n    ENVOY_LOG(warn, \"Failed to decode heartbeat body. Error message: {}\", e.what());\n    return;\n  }\n\n  HeartbeatData heartbeatData;\n  if (!heartbeatData.decode(body_struct)) {\n    ENVOY_LOG(warn, \"Failed to decode heartbeat data\");\n    return;\n  }\n\n  for (const auto& group : heartbeatData.consumerGroups()) {\n    addOrUpdateGroupMember(group, heartbeatData.clientId());\n  }\n\n  RemotingCommandPtr response = std::make_unique<RemotingCommand>();\n  response->code(enumToSignedInt(ResponseCode::Success));\n  response->opaque(request->opaque());\n  response->remark(\"Heartbeat OK\");\n  response->markAsResponse();\n  sendResponseToDownstream(response);\n  stats_.heartbeat_.inc();\n}\n\nvoid ConnectionManager::addOrUpdateGroupMember(absl::string_view group,\n                                               absl::string_view client_id) {\n  ENVOY_LOG(trace, \"#addOrUpdateGroupMember. Group: {}, client ID: {}\", group, client_id);\n  auto search = group_members_.find(std::string(group.data(), group.length()));\n  if (search == group_members_.end()) {\n    std::vector<ConsumerGroupMember> members;\n    members.emplace_back(ConsumerGroupMember(client_id, *this));\n    group_members_.emplace(std::string(group.data(), group.size()), members);\n  } else {\n    std::vector<ConsumerGroupMember>& members = search->second;\n    for (auto it = members.begin(); it != members.end();) {\n      if (it->clientId() == client_id) {\n        it->refresh();\n        ++it;\n      } else if (it->expired()) {\n        it = members.erase(it);\n      } else {\n        ++it;\n      }\n    }\n    if (members.empty()) {\n      group_members_.erase(search);\n    }\n  }\n}\n\nvoid ConnectionManager::onUnregisterClient(RemotingCommandPtr request) {\n  auto header = request->typedCustomHeader<UnregisterClientRequestHeader>();\n  ASSERT(header != nullptr);\n  ASSERT(!header->clientId().empty());\n  ENVOY_LOG(trace, \"Unregister client ID: {}, producer group: {}, consumer group: {}\",\n            header->clientId(), header->producerGroup(), header->consumerGroup());\n\n  if (!header->consumerGroup().empty()) {\n    auto search = group_members_.find(header->consumerGroup());\n    if (search != group_members_.end()) {\n      std::vector<ConsumerGroupMember>& members = search->second;\n      for (auto it = members.begin(); it != members.end();) {\n        if (it->clientId() == header->clientId()) {\n          it = members.erase(it);\n        } else if (it->expired()) {\n          it = members.erase(it);\n        } else {\n          ++it;\n        }\n      }\n      if (members.empty()) {\n        group_members_.erase(search);\n      }\n    }\n  }\n\n  RemotingCommandPtr response = std::make_unique<RemotingCommand>(\n      enumToSignedInt(ResponseCode::Success), request->version(), request->opaque());\n  response->markAsResponse();\n  response->remark(\"Envoy unregister client OK.\");\n  sendResponseToDownstream(response);\n  stats_.unregister_.inc();\n}\n\nvoid ConnectionManager::onError(RemotingCommandPtr& request, absl::string_view error_msg) {\n  Buffer::OwnedImpl buffer;\n  RemotingCommandPtr response = std::make_unique<RemotingCommand>();\n  response->markAsResponse();\n  response->opaque(request->opaque());\n  response->code(enumToSignedInt(ResponseCode::SystemError));\n  response->remark(error_msg);\n  sendResponseToDownstream(response);\n}\n\nvoid ConnectionManager::onSendMessage(RemotingCommandPtr request) {\n  ENVOY_CONN_LOG(trace, \"#onSendMessage, opaque: {}\", read_callbacks_->connection(),\n                 request->opaque());\n  auto header = request->typedCustomHeader<SendMessageRequestHeader>();\n  header->queueId(-1);\n  createActiveMessage(request).sendRequestToUpstream();\n}\n\nvoid ConnectionManager::onGetConsumerListByGroup(RemotingCommandPtr request) {\n  auto requestExtHeader = request->typedCustomHeader<GetConsumerListByGroupRequestHeader>();\n\n  ASSERT(requestExtHeader != nullptr);\n  ASSERT(!requestExtHeader->consumerGroup().empty());\n\n  ENVOY_LOG(trace, \"#onGetConsumerListByGroup, consumer group: {}\",\n            requestExtHeader->consumerGroup());\n\n  auto search = group_members_.find(requestExtHeader->consumerGroup());\n  GetConsumerListByGroupResponseBody getConsumerListByGroupResponseBody;\n  if (search != group_members_.end()) {\n    std::vector<ConsumerGroupMember>& members = search->second;\n    std::sort(members.begin(), members.end());\n    for (const auto& member : members) {\n      getConsumerListByGroupResponseBody.add(member.clientId());\n    }\n  } else {\n    ENVOY_LOG(warn, \"There is no consumer belongs to consumer_group: {}\",\n              requestExtHeader->consumerGroup());\n  }\n  ProtobufWkt::Struct body_struct;\n\n  getConsumerListByGroupResponseBody.encode(body_struct);\n\n  RemotingCommandPtr response = std::make_unique<RemotingCommand>(\n      enumToSignedInt(ResponseCode::Success), request->version(), request->opaque());\n  response->markAsResponse();\n  std::string json = MessageUtil::getJsonStringFromMessage(body_struct);\n  response->body().add(json);\n  ENVOY_LOG(trace, \"GetConsumerListByGroup respond with body: {}\", json);\n\n  sendResponseToDownstream(response);\n  stats_.get_consumer_list_.inc();\n}\n\nvoid ConnectionManager::onPopMessage(RemotingCommandPtr request) {\n  auto header = request->typedCustomHeader<PopMessageRequestHeader>();\n  ASSERT(header != nullptr);\n  ENVOY_LOG(trace, \"#onPopMessage. Consumer group: {}, topic: {}\", header->consumerGroup(),\n            header->topic());\n  createActiveMessage(request).sendRequestToUpstream();\n}\n\nvoid ConnectionManager::onAckMessage(RemotingCommandPtr request) {\n  auto header = request->typedCustomHeader<AckMessageRequestHeader>();\n  ASSERT(header != nullptr);\n  ENVOY_LOG(\n      trace,\n      \"#onAckMessage. Consumer group: {}, topic: {}, queue Id: {}, offset: {}, extra-info: {}\",\n      header->consumerGroup(), header->topic(), header->queueId(), header->offset(),\n      header->extraInfo());\n\n  // Fill the target broker_name and broker_id routing directive\n  auto it = ack_directive_table_.find(header->directiveKey());\n  if (it == ack_directive_table_.end()) {\n    ENVOY_LOG(warn, \"There was no previous ack directive available, which is unexpected\");\n    onError(request, \"No ack directive is found\");\n    return;\n  }\n  header->targetBrokerName(it->second.broker_name_);\n  header->targetBrokerId(it->second.broker_id_);\n\n  createActiveMessage(request).sendRequestToUpstream();\n}\n\nActiveMessage& ConnectionManager::createActiveMessage(RemotingCommandPtr& request) {\n  ENVOY_CONN_LOG(trace, \"ConnectionManager#createActiveMessage. Code: {}, opaque: {}\",\n                 read_callbacks_->connection(), request->code(), request->opaque());\n  ActiveMessagePtr active_message = std::make_unique<ActiveMessage>(*this, std::move(request));\n  LinkedList::moveIntoList(std::move(active_message), active_message_list_);\n  return **active_message_list_.begin();\n}\n\nvoid ConnectionManager::deferredDelete(ActiveMessage& active_message) {\n  read_callbacks_->connection().dispatcher().deferredDelete(\n      active_message.removeFromList(active_message_list_));\n}\n\nvoid ConnectionManager::resetAllActiveMessages(absl::string_view error_msg) {\n  while (!active_message_list_.empty()) {\n    ENVOY_CONN_LOG(warn, \"Reset pending request {} due to error: {}\", read_callbacks_->connection(),\n                   active_message_list_.front()->downstreamRequest()->opaque(), error_msg);\n    active_message_list_.front()->onReset();\n    stats_.response_error_.inc();\n  }\n}\n\nEnvoy::Network::FilterStatus ConnectionManager::onNewConnection() {\n  return Network::FilterStatus::Continue;\n}\n\nvoid ConnectionManager::initializeReadFilterCallbacks(\n    Envoy::Network::ReadFilterCallbacks& callbacks) {\n  read_callbacks_ = &callbacks;\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/conn_manager.h",
    "content": "#pragma once\n\n#include <list>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/timespan.h\"\n#include \"envoy/upstream/thread_local_cluster.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/active_message.h\"\n#include \"extensions/filters/network/rocketmq_proxy/codec.h\"\n#include \"extensions/filters/network/rocketmq_proxy/stats.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  virtual RocketmqFilterStats& stats() PURE;\n\n  virtual Upstream::ClusterManager& clusterManager() PURE;\n\n  virtual Router::RouterPtr createRouter() PURE;\n\n  /**\n   * Indicate whether this proxy is running in development mode. If true, this proxy plugin may\n   * work without dedicated traffic intercepting facility without considering backward\n   * compatibility.\n   * @return true when in development mode; false otherwise.\n   */\n  virtual bool developMode() const PURE;\n\n  virtual std::string proxyAddress() PURE;\n\n  virtual Router::Config& routerConfig() PURE;\n\n  virtual std::chrono::milliseconds transientObjectLifeSpan() const PURE;\n};\n\nclass ConnectionManager;\n\n/**\n * This class is to ensure legacy RocketMQ SDK works. Heartbeat between client SDK and envoy is not\n * necessary any more and should be removed once the lite SDK is in-place.\n */\nclass ConsumerGroupMember {\npublic:\n  ConsumerGroupMember(absl::string_view client_id, ConnectionManager& conn_manager);\n\n  bool operator==(const ConsumerGroupMember& other) const { return client_id_ == other.client_id_; }\n\n  bool operator<(const ConsumerGroupMember& other) const { return client_id_ < other.client_id_; }\n\n  void refresh();\n\n  bool expired() const;\n\n  absl::string_view clientId() const { return client_id_; }\n\n  void setLastForTest(MonotonicTime tp) { last_ = tp; }\n\nprivate:\n  std::string client_id_;\n  ConnectionManager* connection_manager_;\n  MonotonicTime last_;\n};\n\nclass ConnectionManager : public Network::ReadFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  ConnectionManager(Config& config, TimeSource& time_source);\n\n  ~ConnectionManager() override = default;\n\n  /**\n   * Called when data is read on the connection.\n   * @param data supplies the read data which may be modified.\n   * @param end_stream supplies whether this is the last byte on the connection. This will only\n   *        be set if the connection has half-close semantics enabled.\n   * @return status used by the filter manager to manage further filter iteration.\n   */\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n\n  /**\n   * Called when a connection is first established. Filters should do one time long term processing\n   * that needs to be done when a connection is established. Filter chain iteration can be stopped\n   * if needed.\n   * @return status used by the filter manager to manage further filter iteration.\n   */\n  Network::FilterStatus onNewConnection() override;\n\n  /**\n   * Initializes the read filter callbacks used to interact with the filter manager. It will be\n   * called by the filter manager a single time when the filter is first registered. Thus, any\n   * construction that requires the backing connection should take place in the context of this\n   * function.\n   *\n   * IMPORTANT: No outbound networking or complex processing should be done in this function.\n   *            That should be done in the context of onNewConnection() if needed.\n   *\n   * @param callbacks supplies the callbacks.\n   */\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override;\n\n  /**\n   * Send response to downstream either when envoy proxy has received result from upstream hosts or\n   * the proxy itself may serve the request.\n   * @param response Response to write to downstream with identical opaque number.\n   */\n  void sendResponseToDownstream(RemotingCommandPtr& response);\n\n  void onGetTopicRoute(RemotingCommandPtr request);\n\n  /**\n   * Called when downstream sends heartbeat requests.\n   * @param request heartbeat request from downstream\n   */\n  void onHeartbeat(RemotingCommandPtr request);\n\n  void addOrUpdateGroupMember(absl::string_view group, absl::string_view client_id);\n\n  void onUnregisterClient(RemotingCommandPtr request);\n\n  void onError(RemotingCommandPtr& request, absl::string_view error_msg);\n\n  void onSendMessage(RemotingCommandPtr request);\n\n  void onGetConsumerListByGroup(RemotingCommandPtr request);\n\n  void onPopMessage(RemotingCommandPtr request);\n\n  void onAckMessage(RemotingCommandPtr request);\n\n  ActiveMessage& createActiveMessage(RemotingCommandPtr& request);\n\n  void deferredDelete(ActiveMessage& active_message);\n\n  void resetAllActiveMessages(absl::string_view error_msg);\n\n  Config& config() { return config_; }\n\n  RocketmqFilterStats& stats() { return stats_; }\n\n  absl::flat_hash_map<std::string, std::vector<ConsumerGroupMember>>& groupMembersForTest() {\n    return group_members_;\n  }\n\n  std::list<ActiveMessagePtr>& activeMessageList() { return active_message_list_; }\n\n  void insertAckDirective(const std::string& key, const AckMessageDirective& directive) {\n    ack_directive_table_.insert(std::make_pair(key, directive));\n  }\n\n  void eraseAckDirective(const std::string& key) {\n    auto it = ack_directive_table_.find(key);\n    if (it != ack_directive_table_.end()) {\n      ack_directive_table_.erase(it);\n    }\n  }\n\n  TimeSource& timeSource() const { return time_source_; }\n\n  const absl::flat_hash_map<std::string, AckMessageDirective>& getAckDirectiveTableForTest() const {\n    return ack_directive_table_;\n  }\n\n  friend class ConsumerGroupMember;\n\nprivate:\n  /**\n   * Dispatch incoming requests from downstream to run through filter chains.\n   */\n  void dispatch();\n\n  /**\n   * Invoked by heartbeat to purge deprecated ack_directive entries.\n   */\n  void purgeDirectiveTable();\n\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  Buffer::OwnedImpl request_buffer_;\n\n  Config& config_;\n  TimeSource& time_source_;\n  RocketmqFilterStats& stats_;\n\n  std::list<ActiveMessagePtr> active_message_list_;\n\n  absl::flat_hash_map<std::string, std::vector<ConsumerGroupMember>> group_members_;\n\n  /**\n   * Message unique key to message acknowledge directive mapping.\n   * Acknowledge requests first consult this table to determine which host in the cluster to go.\n   */\n  absl::flat_hash_map<std::string, AckMessageDirective> ack_directive_table_;\n};\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/metadata.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/http/header_map_impl.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass MessageMetadata {\npublic:\n  MessageMetadata() = default;\n\n  void setOneWay(bool oneway) { is_oneway_ = oneway; }\n  bool isOneWay() const { return is_oneway_; }\n\n  bool hasTopicName() const { return topic_name_.has_value(); }\n  const std::string& topicName() const { return topic_name_.value(); }\n  void setTopicName(const std::string& topic_name) { topic_name_ = topic_name; }\n\n  /**\n   * @return HeaderMap of current headers\n   */\n  const Http::HeaderMap& headers() const { return *headers_; }\n  Http::HeaderMap& headers() { return *headers_; }\n\nprivate:\n  bool is_oneway_{false};\n  absl::optional<std::string> topic_name_{};\n\n  Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()};\n};\n\nusing MessageMetadataSharedPtr = std::shared_ptr<MessageMetadata>;\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/protocol.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/protocol.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nvoid SendMessageRequestHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  switch (version_) {\n  case SendMessageRequestVersion::V1: {\n    ProtobufWkt::Value producer_group_v;\n    producer_group_v.set_string_value(producer_group_);\n    members[\"producerGroup\"] = producer_group_v;\n\n    ProtobufWkt::Value topic_v;\n    topic_v.set_string_value(topic_.c_str(), topic_.length());\n    members[\"topic\"] = topic_v;\n\n    ProtobufWkt::Value default_topic_v;\n    default_topic_v.set_string_value(default_topic_);\n    members[\"defaultTopic\"] = default_topic_v;\n\n    ProtobufWkt::Value default_topic_queue_number_v;\n    default_topic_queue_number_v.set_number_value(default_topic_queue_number_);\n    members[\"defaultTopicQueueNums\"] = default_topic_queue_number_v;\n\n    ProtobufWkt::Value queue_id_v;\n    queue_id_v.set_number_value(queue_id_);\n    members[\"queueId\"] = queue_id_v;\n\n    ProtobufWkt::Value sys_flag_v;\n    sys_flag_v.set_number_value(sys_flag_);\n    members[\"sysFlag\"] = sys_flag_v;\n\n    ProtobufWkt::Value born_timestamp_v;\n    born_timestamp_v.set_number_value(born_timestamp_);\n    members[\"bornTimestamp\"] = born_timestamp_v;\n\n    ProtobufWkt::Value flag_v;\n    flag_v.set_number_value(flag_);\n    members[\"flag\"] = flag_v;\n\n    if (!properties_.empty()) {\n      ProtobufWkt::Value properties_v;\n      properties_v.set_string_value(properties_.c_str(), properties_.length());\n      members[\"properties\"] = properties_v;\n    }\n\n    if (reconsume_time_ > 0) {\n      ProtobufWkt::Value reconsume_times_v;\n      reconsume_times_v.set_number_value(reconsume_time_);\n      members[\"reconsumeTimes\"] = reconsume_times_v;\n    }\n\n    if (unit_mode_) {\n      ProtobufWkt::Value unit_mode_v;\n      unit_mode_v.set_bool_value(unit_mode_);\n      members[\"unitMode\"] = unit_mode_v;\n    }\n\n    if (batch_) {\n      ProtobufWkt::Value batch_v;\n      batch_v.set_bool_value(batch_);\n      members[\"batch\"] = batch_v;\n    }\n\n    if (max_reconsume_time_ > 0) {\n      ProtobufWkt::Value max_reconsume_time_v;\n      max_reconsume_time_v.set_number_value(max_reconsume_time_);\n      members[\"maxReconsumeTimes\"] = max_reconsume_time_v;\n    }\n    break;\n  }\n  case SendMessageRequestVersion::V2: {\n    ProtobufWkt::Value producer_group_v;\n    producer_group_v.set_string_value(producer_group_.c_str(), producer_group_.length());\n    members[\"a\"] = producer_group_v;\n\n    ProtobufWkt::Value topic_v;\n    topic_v.set_string_value(topic_.c_str(), topic_.length());\n    members[\"b\"] = topic_v;\n\n    ProtobufWkt::Value default_topic_v;\n    default_topic_v.set_string_value(default_topic_.c_str(), default_topic_.length());\n    members[\"c\"] = default_topic_v;\n\n    ProtobufWkt::Value default_topic_queue_number_v;\n    default_topic_queue_number_v.set_number_value(default_topic_queue_number_);\n    members[\"d\"] = default_topic_queue_number_v;\n\n    ProtobufWkt::Value queue_id_v;\n    queue_id_v.set_number_value(queue_id_);\n    members[\"e\"] = queue_id_v;\n\n    ProtobufWkt::Value sys_flag_v;\n    sys_flag_v.set_number_value(sys_flag_);\n    members[\"f\"] = sys_flag_v;\n\n    ProtobufWkt::Value born_timestamp_v;\n    born_timestamp_v.set_number_value(born_timestamp_);\n    members[\"g\"] = born_timestamp_v;\n\n    ProtobufWkt::Value flag_v;\n    flag_v.set_number_value(flag_);\n    members[\"h\"] = flag_v;\n\n    if (!properties_.empty()) {\n      ProtobufWkt::Value properties_v;\n      properties_v.set_string_value(properties_.c_str(), properties_.length());\n      members[\"i\"] = properties_v;\n    }\n\n    if (reconsume_time_ > 0) {\n      ProtobufWkt::Value reconsume_times_v;\n      reconsume_times_v.set_number_value(reconsume_time_);\n      members[\"j\"] = reconsume_times_v;\n    }\n\n    if (unit_mode_) {\n      ProtobufWkt::Value unit_mode_v;\n      unit_mode_v.set_bool_value(unit_mode_);\n      members[\"k\"] = unit_mode_v;\n    }\n\n    if (batch_) {\n      ProtobufWkt::Value batch_v;\n      batch_v.set_bool_value(batch_);\n      members[\"m\"] = batch_v;\n    }\n\n    if (max_reconsume_time_ > 0) {\n      ProtobufWkt::Value max_reconsume_time_v;\n      max_reconsume_time_v.set_number_value(max_reconsume_time_);\n      members[\"l\"] = max_reconsume_time_v;\n    }\n    break;\n  }\n  default:\n    break;\n  }\n}\n\nvoid SendMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  switch (version_) {\n  case SendMessageRequestVersion::V1: {\n    ASSERT(members.contains(\"producerGroup\"));\n    ASSERT(members.contains(\"topic\"));\n    ASSERT(members.contains(\"defaultTopic\"));\n    ASSERT(members.contains(\"defaultTopicQueueNums\"));\n    ASSERT(members.contains(\"queueId\"));\n    ASSERT(members.contains(\"sysFlag\"));\n    ASSERT(members.contains(\"bornTimestamp\"));\n    ASSERT(members.contains(\"flag\"));\n\n    producer_group_ = members.at(\"producerGroup\").string_value();\n    topic_ = members.at(\"topic\").string_value();\n    default_topic_ = members.at(\"defaultTopic\").string_value();\n\n    if (members.at(\"defaultTopicQueueNums\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      default_topic_queue_number_ = members.at(\"defaultTopicQueueNums\").number_value();\n    } else {\n      default_topic_queue_number_ = std::stoi(members.at(\"defaultTopicQueueNums\").string_value());\n    }\n\n    if (members.at(\"queueId\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      queue_id_ = members.at(\"queueId\").number_value();\n    } else {\n      queue_id_ = std::stoi(members.at(\"queueId\").string_value());\n    }\n\n    if (members.at(\"sysFlag\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      sys_flag_ = static_cast<int32_t>(members.at(\"sysFlag\").number_value());\n    } else {\n      sys_flag_ = std::stoi(members.at(\"sysFlag\").string_value());\n    }\n\n    if (members.at(\"bornTimestamp\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      born_timestamp_ = static_cast<int64_t>(members.at(\"bornTimestamp\").number_value());\n    } else {\n      born_timestamp_ = std::stoll(members.at(\"bornTimestamp\").string_value());\n    }\n\n    if (members.at(\"flag\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      flag_ = static_cast<int32_t>(members.at(\"flag\").number_value());\n    } else {\n      flag_ = std::stoi(members.at(\"flag\").string_value());\n    }\n\n    if (members.contains(\"properties\")) {\n      properties_ = members.at(\"properties\").string_value();\n    }\n\n    if (members.contains(\"reconsumeTimes\")) {\n      if (members.at(\"reconsumeTimes\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n        reconsume_time_ = members.at(\"reconsumeTimes\").number_value();\n      } else {\n        reconsume_time_ = std::stoi(members.at(\"reconsumeTimes\").string_value());\n      }\n    }\n\n    if (members.contains(\"unitMode\")) {\n      if (members.at(\"unitMode\").kind_case() == ProtobufWkt::Value::kBoolValue) {\n        unit_mode_ = members.at(\"unitMode\").bool_value();\n      } else {\n        unit_mode_ = (members.at(\"unitMode\").string_value() == std::string(\"true\"));\n      }\n    }\n\n    if (members.contains(\"batch\")) {\n      if (members.at(\"batch\").kind_case() == ProtobufWkt::Value::kBoolValue) {\n        batch_ = members.at(\"batch\").bool_value();\n      } else {\n        batch_ = (members.at(\"batch\").string_value() == std::string(\"true\"));\n      }\n    }\n\n    if (members.contains(\"maxReconsumeTimes\")) {\n      if (members.at(\"maxReconsumeTimes\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n        max_reconsume_time_ = static_cast<int32_t>(members.at(\"maxReconsumeTimes\").number_value());\n      } else {\n        max_reconsume_time_ = std::stoi(members.at(\"maxReconsumeTimes\").string_value());\n      }\n    }\n    break;\n  }\n\n  case SendMessageRequestVersion::V2: {\n    ASSERT(members.contains(\"a\"));\n    ASSERT(members.contains(\"b\"));\n    ASSERT(members.contains(\"c\"));\n    ASSERT(members.contains(\"d\"));\n    ASSERT(members.contains(\"e\"));\n    ASSERT(members.contains(\"f\"));\n    ASSERT(members.contains(\"g\"));\n    ASSERT(members.contains(\"h\"));\n\n    producer_group_ = members.at(\"a\").string_value();\n    topic_ = members.at(\"b\").string_value();\n    default_topic_ = members.at(\"c\").string_value();\n\n    if (members.at(\"d\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      default_topic_queue_number_ = members.at(\"d\").number_value();\n    } else {\n      default_topic_queue_number_ = std::stoi(members.at(\"d\").string_value());\n    }\n\n    if (members.at(\"e\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      queue_id_ = members.at(\"e\").number_value();\n    } else {\n      queue_id_ = std::stoi(members.at(\"e\").string_value());\n    }\n\n    if (members.at(\"f\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      sys_flag_ = static_cast<int32_t>(members.at(\"f\").number_value());\n    } else {\n      sys_flag_ = std::stoi(members.at(\"f\").string_value());\n    }\n\n    if (members.at(\"g\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      born_timestamp_ = static_cast<int64_t>(members.at(\"g\").number_value());\n    } else {\n      born_timestamp_ = std::stoll(members.at(\"g\").string_value());\n    }\n\n    if (members.at(\"h\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n      flag_ = static_cast<int32_t>(members.at(\"h\").number_value());\n    } else {\n      flag_ = std::stoi(members.at(\"h\").string_value());\n    }\n\n    if (members.contains(\"i\")) {\n      properties_ = members.at(\"i\").string_value();\n    }\n\n    if (members.contains(\"j\")) {\n      if (members.at(\"j\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n        reconsume_time_ = members.at(\"j\").number_value();\n      } else {\n        reconsume_time_ = std::stoi(members.at(\"j\").string_value());\n      }\n    }\n\n    if (members.contains(\"k\")) {\n      if (members.at(\"k\").kind_case() == ProtobufWkt::Value::kBoolValue) {\n        unit_mode_ = members.at(\"k\").bool_value();\n      } else {\n        unit_mode_ = (members.at(\"k\").string_value() == std::string(\"true\"));\n      }\n    }\n\n    if (members.contains(\"m\")) {\n      if (members.at(\"m\").kind_case() == ProtobufWkt::Value::kBoolValue) {\n        batch_ = members.at(\"m\").bool_value();\n      } else {\n        batch_ = (members.at(\"m\").string_value() == std::string(\"true\"));\n      }\n    }\n\n    if (members.contains(\"l\")) {\n      if (members.at(\"l\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n        max_reconsume_time_ = members.at(\"l\").number_value();\n      } else {\n        max_reconsume_time_ = std::stoi(members.at(\"l\").string_value());\n      }\n    }\n    break;\n  }\n  default:\n    ENVOY_LOG(error, \"Unknown SendMessageRequestVersion: {}\", static_cast<int>(version_));\n    break;\n  }\n}\n\nvoid SendMessageResponseHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  ASSERT(!msg_id_.empty());\n  ProtobufWkt::Value msg_id_v;\n  msg_id_v.set_string_value(msg_id_.c_str(), msg_id_.length());\n  members[\"msgId\"] = msg_id_v;\n\n  ASSERT(queue_id_ >= 0);\n  ProtobufWkt::Value queue_id_v;\n  queue_id_v.set_number_value(queue_id_);\n  members[\"queueId\"] = queue_id_v;\n\n  ASSERT(queue_offset_ >= 0);\n  ProtobufWkt::Value queue_offset_v;\n  queue_offset_v.set_number_value(queue_offset_);\n  members[\"queueOffset\"] = queue_offset_v;\n\n  if (!transaction_id_.empty()) {\n    ProtobufWkt::Value transaction_id_v;\n    transaction_id_v.set_string_value(transaction_id_.c_str(), transaction_id_.length());\n    members[\"transactionId\"] = transaction_id_v;\n  }\n}\n\nvoid SendMessageResponseHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  ASSERT(members.contains(\"msgId\"));\n  ASSERT(members.contains(\"queueId\"));\n  ASSERT(members.contains(\"queueOffset\"));\n\n  msg_id_ = members.at(\"msgId\").string_value();\n\n  if (members.at(\"queueId\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    queue_id_ = members.at(\"queueId\").number_value();\n  } else {\n    queue_id_ = std::stoi(members.at(\"queueId\").string_value());\n  }\n\n  if (members.at(\"queueOffset\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    queue_offset_ = members.at(\"queueOffset\").number_value();\n  } else {\n    queue_offset_ = std::stoll(members.at(\"queueOffset\").string_value());\n  }\n\n  if (members.contains(\"transactionId\")) {\n    transaction_id_ = members.at(\"transactionId\").string_value();\n  }\n}\n\nvoid GetRouteInfoRequestHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  ProtobufWkt::Value topic_v;\n  topic_v.set_string_value(topic_.c_str(), topic_.length());\n  members[\"topic\"] = topic_v;\n}\n\nvoid GetRouteInfoRequestHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  ASSERT(members.contains(\"topic\"));\n  topic_ = members.at(\"topic\").string_value();\n}\n\nvoid PopMessageRequestHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  ASSERT(!consumer_group_.empty());\n  ProtobufWkt::Value consumer_group_v;\n  consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size());\n  members[\"consumerGroup\"] = consumer_group_v;\n\n  ASSERT(!topic_.empty());\n  ProtobufWkt::Value topicNode;\n  topicNode.set_string_value(topic_.c_str(), topic_.length());\n  members[\"topic\"] = topicNode;\n\n  ProtobufWkt::Value queue_id_v;\n  queue_id_v.set_number_value(queue_id_);\n  members[\"queueId\"] = queue_id_v;\n\n  ProtobufWkt::Value max_msg_nums_v;\n  max_msg_nums_v.set_number_value(max_msg_nums_);\n  members[\"maxMsgNums\"] = max_msg_nums_v;\n\n  ProtobufWkt::Value invisible_time_v;\n  invisible_time_v.set_number_value(invisible_time_);\n  members[\"invisibleTime\"] = invisible_time_v;\n\n  ProtobufWkt::Value poll_time_v;\n  poll_time_v.set_number_value(poll_time_);\n  members[\"pollTime\"] = poll_time_v;\n\n  ProtobufWkt::Value born_time_v;\n  born_time_v.set_number_value(born_time_);\n  members[\"bornTime\"] = born_time_v;\n\n  ProtobufWkt::Value init_mode_v;\n  init_mode_v.set_number_value(init_mode_);\n  members[\"initMode\"] = init_mode_v;\n\n  if (!exp_type_.empty()) {\n    ProtobufWkt::Value exp_type_v;\n    exp_type_v.set_string_value(exp_type_.c_str(), exp_type_.size());\n    members[\"expType\"] = exp_type_v;\n  }\n\n  if (!exp_.empty()) {\n    ProtobufWkt::Value exp_v;\n    exp_v.set_string_value(exp_.c_str(), exp_.size());\n    members[\"exp\"] = exp_v;\n  }\n}\n\nvoid PopMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  ASSERT(members.contains(\"consumerGroup\"));\n  ASSERT(members.contains(\"topic\"));\n  ASSERT(members.contains(\"queueId\"));\n  ASSERT(members.contains(\"maxMsgNums\"));\n  ASSERT(members.contains(\"invisibleTime\"));\n  ASSERT(members.contains(\"pollTime\"));\n  ASSERT(members.contains(\"bornTime\"));\n  ASSERT(members.contains(\"initMode\"));\n\n  consumer_group_ = members.at(\"consumerGroup\").string_value();\n  topic_ = members.at(\"topic\").string_value();\n\n  if (members.at(\"queueId\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    queue_id_ = members.at(\"queueId\").number_value();\n  } else {\n    queue_id_ = std::stoi(members.at(\"queueId\").string_value());\n  }\n\n  if (members.at(\"maxMsgNums\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    max_msg_nums_ = members.at(\"maxMsgNums\").number_value();\n  } else {\n    max_msg_nums_ = std::stoi(members.at(\"maxMsgNums\").string_value());\n  }\n\n  if (members.at(\"invisibleTime\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    invisible_time_ = members.at(\"invisibleTime\").number_value();\n  } else {\n    invisible_time_ = std::stoll(members.at(\"invisibleTime\").string_value());\n  }\n\n  if (members.at(\"pollTime\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    poll_time_ = members.at(\"pollTime\").number_value();\n  } else {\n    poll_time_ = std::stoll(members.at(\"pollTime\").string_value());\n  }\n\n  if (members.at(\"bornTime\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    born_time_ = members.at(\"bornTime\").number_value();\n  } else {\n    born_time_ = std::stoll(members.at(\"bornTime\").string_value());\n  }\n\n  if (members.at(\"initMode\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    init_mode_ = members.at(\"initMode\").number_value();\n  } else {\n    init_mode_ = std::stol(members.at(\"initMode\").string_value());\n  }\n\n  if (members.contains(\"expType\")) {\n    exp_type_ = members.at(\"expType\").string_value();\n  }\n\n  if (members.contains(\"exp\")) {\n    exp_ = members.at(\"exp\").string_value();\n  }\n}\n\nvoid PopMessageResponseHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  ProtobufWkt::Value pop_time_v;\n  pop_time_v.set_number_value(pop_time_);\n  members[\"popTime\"] = pop_time_v;\n\n  ProtobufWkt::Value invisible_time_v;\n  invisible_time_v.set_number_value(invisible_time_);\n  members[\"invisibleTime\"] = invisible_time_v;\n\n  ProtobufWkt::Value revive_qid_v;\n  revive_qid_v.set_number_value(revive_qid_);\n  members[\"reviveQid\"] = revive_qid_v;\n\n  ProtobufWkt::Value rest_num_v;\n  rest_num_v.set_number_value(rest_num_);\n  members[\"restNum\"] = rest_num_v;\n\n  if (!start_offset_info_.empty()) {\n    ProtobufWkt::Value start_offset_info_v;\n    start_offset_info_v.set_string_value(start_offset_info_.c_str(), start_offset_info_.size());\n    members[\"startOffsetInfo\"] = start_offset_info_v;\n  }\n\n  if (!msg_off_set_info_.empty()) {\n    ProtobufWkt::Value msg_offset_info_v;\n    msg_offset_info_v.set_string_value(msg_off_set_info_.c_str(), msg_off_set_info_.size());\n    members[\"msgOffsetInfo\"] = msg_offset_info_v;\n  }\n\n  if (!order_count_info_.empty()) {\n    ProtobufWkt::Value order_count_info_v;\n    order_count_info_v.set_string_value(order_count_info_.c_str(), order_count_info_.size());\n    members[\"orderCountInfo\"] = order_count_info_v;\n  }\n}\n\nvoid PopMessageResponseHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  ASSERT(members.contains(\"popTime\"));\n  ASSERT(members.contains(\"invisibleTime\"));\n  ASSERT(members.contains(\"reviveQid\"));\n  ASSERT(members.contains(\"restNum\"));\n\n  if (members.at(\"popTime\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    pop_time_ = members.at(\"popTime\").number_value();\n  } else {\n    pop_time_ = std::stoull(members.at(\"popTime\").string_value());\n  }\n\n  if (members.at(\"invisibleTime\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    invisible_time_ = members.at(\"invisibleTime\").number_value();\n  } else {\n    invisible_time_ = std::stoull(members.at(\"invisibleTime\").string_value());\n  }\n\n  if (members.at(\"reviveQid\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    revive_qid_ = members.at(\"reviveQid\").number_value();\n  } else {\n    revive_qid_ = std::stoul(members.at(\"reviveQid\").string_value());\n  }\n\n  if (members.at(\"restNum\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    rest_num_ = members.at(\"restNum\").number_value();\n  } else {\n    rest_num_ = std::stoull(members.at(\"restNum\").string_value());\n  }\n\n  if (members.contains(\"startOffsetInfo\")) {\n    start_offset_info_ = members.at(\"startOffsetInfo\").string_value();\n  }\n\n  if (members.contains(\"msgOffsetInfo\")) {\n    msg_off_set_info_ = members.at(\"msgOffsetInfo\").string_value();\n  }\n\n  if (members.contains(\"orderCountInfo\")) {\n    order_count_info_ = members.at(\"orderCountInfo\").string_value();\n  }\n}\n\nvoid AckMessageRequestHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  ASSERT(!consumer_group_.empty());\n  ProtobufWkt::Value consumer_group_v;\n  consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size());\n  members[\"consumerGroup\"] = consumer_group_v;\n\n  ASSERT(!topic_.empty());\n  ProtobufWkt::Value topic_v;\n  topic_v.set_string_value(topic_.c_str(), topic_.size());\n  members[\"topic\"] = topic_v;\n\n  ASSERT(queue_id_ >= 0);\n  ProtobufWkt::Value queue_id_v;\n  queue_id_v.set_number_value(queue_id_);\n  members[\"queueId\"] = queue_id_v;\n\n  ASSERT(!extra_info_.empty());\n  ProtobufWkt::Value extra_info_v;\n  extra_info_v.set_string_value(extra_info_.c_str(), extra_info_.size());\n  members[\"extraInfo\"] = extra_info_v;\n\n  ASSERT(offset_ >= 0);\n  ProtobufWkt::Value offset_v;\n  offset_v.set_number_value(offset_);\n  members[\"offset\"] = offset_v;\n}\n\nvoid AckMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  ASSERT(members.contains(\"consumerGroup\"));\n  ASSERT(members.contains(\"topic\"));\n  ASSERT(members.contains(\"queueId\"));\n  ASSERT(members.contains(\"extraInfo\"));\n  ASSERT(members.contains(\"offset\"));\n\n  consumer_group_ = members.at(\"consumerGroup\").string_value();\n\n  topic_ = members.at(\"topic\").string_value();\n\n  if (members.at(\"queueId\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    queue_id_ = members.at(\"queueId\").number_value();\n  } else {\n    queue_id_ = std::stoi(members.at(\"queueId\").string_value());\n  }\n\n  extra_info_ = members.at(\"extraInfo\").string_value();\n\n  if (members.at(\"offset\").kind_case() == ProtobufWkt::Value::kNumberValue) {\n    offset_ = members.at(\"offset\").number_value();\n  } else {\n    offset_ = std::stoll(members.at(\"offset\").string_value());\n  }\n}\n\nvoid UnregisterClientRequestHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  ASSERT(!client_id_.empty());\n  ProtobufWkt::Value client_id_v;\n  client_id_v.set_string_value(client_id_.c_str(), client_id_.size());\n  members[\"clientID\"] = client_id_v;\n\n  ASSERT(!producer_group_.empty() || !consumer_group_.empty());\n  if (!producer_group_.empty()) {\n    ProtobufWkt::Value producer_group_v;\n    producer_group_v.set_string_value(producer_group_.c_str(), producer_group_.size());\n    members[\"producerGroup\"] = producer_group_v;\n  }\n\n  if (!consumer_group_.empty()) {\n    ProtobufWkt::Value consumer_group_v;\n    consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size());\n    members[\"consumerGroup\"] = consumer_group_v;\n  }\n}\n\nvoid UnregisterClientRequestHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  ASSERT(members.contains(\"clientID\"));\n  ASSERT(members.contains(\"producerGroup\") || members.contains(\"consumerGroup\"));\n\n  client_id_ = members.at(\"clientID\").string_value();\n\n  if (members.contains(\"consumerGroup\")) {\n    consumer_group_ = members.at(\"consumerGroup\").string_value();\n  }\n\n  if (members.contains(\"producerGroup\")) {\n    producer_group_ = members.at(\"producerGroup\").string_value();\n  }\n}\n\nvoid GetConsumerListByGroupResponseBody::encode(ProtobufWkt::Struct& root) {\n  auto& members = *(root.mutable_fields());\n\n  ProtobufWkt::Value consumer_id_list_v;\n  auto member_list = consumer_id_list_v.mutable_list_value();\n  for (const auto& consumerId : consumer_id_list_) {\n    auto consumer_id_v = new ProtobufWkt::Value;\n    consumer_id_v->set_string_value(consumerId.c_str(), consumerId.size());\n    member_list->mutable_values()->AddAllocated(consumer_id_v);\n  }\n  members[\"consumerIdList\"] = consumer_id_list_v;\n}\n\nbool HeartbeatData::decode(ProtobufWkt::Struct& doc) {\n  const auto& members = doc.fields();\n  if (!members.contains(\"clientID\")) {\n    return false;\n  }\n\n  client_id_ = members.at(\"clientID\").string_value();\n\n  if (members.contains(\"consumerDataSet\")) {\n    auto& consumer_data_list = members.at(\"consumerDataSet\").list_value().values();\n    for (const auto& it : consumer_data_list) {\n      if (it.struct_value().fields().contains(\"groupName\")) {\n        consumer_groups_.push_back(it.struct_value().fields().at(\"groupName\").string_value());\n      }\n    }\n  }\n  return true;\n}\n\nvoid HeartbeatData::encode(ProtobufWkt::Struct& root) {\n  auto& members = *(root.mutable_fields());\n\n  ProtobufWkt::Value client_id_v;\n  client_id_v.set_string_value(client_id_.c_str(), client_id_.size());\n  members[\"clientID\"] = client_id_v;\n}\n\nvoid GetConsumerListByGroupRequestHeader::encode(ProtobufWkt::Value& root) {\n  auto& members = *(root.mutable_struct_value()->mutable_fields());\n\n  ProtobufWkt::Value consumer_group_v;\n  consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size());\n  members[\"consumerGroup\"] = consumer_group_v;\n}\n\nvoid GetConsumerListByGroupRequestHeader::decode(const ProtobufWkt::Value& ext_fields) {\n  const auto& members = ext_fields.struct_value().fields();\n  ASSERT(members.contains(\"consumerGroup\"));\n\n  consumer_group_ = members.at(\"consumerGroup\").string_value();\n}\n\nvoid MetadataHelper::parseRequest(RemotingCommandPtr& request, MessageMetadataSharedPtr metadata) {\n  metadata->setOneWay(request->isOneWay());\n  CommandCustomHeader* custom_header = request->customHeader();\n\n  auto route_command_custom_header = request->typedCustomHeader<RoutingCommandCustomHeader>();\n  if (route_command_custom_header != nullptr) {\n    metadata->setTopicName(route_command_custom_header->topic());\n  }\n\n  const uint64_t code = request->code();\n  metadata->headers().addCopy(Http::LowerCaseString(\"code\"), code);\n\n  if (enumToInt(RequestCode::AckMessage) == code) {\n    metadata->headers().addCopy(Http::LowerCaseString(RocketmqConstants::get().BrokerName),\n                                custom_header->targetBrokerName());\n    metadata->headers().addCopy(Http::LowerCaseString(RocketmqConstants::get().BrokerId),\n                                custom_header->targetBrokerId());\n  }\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/protocol.h",
    "content": "#pragma once\n\n#include <map>\n#include <utility>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/metadata.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\n/**\n * Retry topic prefix\n */\nconstexpr absl::string_view RetryTopicPrefix = \"%RETRY%\";\n\n/**\n * RocketMQ supports two versions of sending message protocol. These two versions are identical in\n * terms of functionality. But they do differ in encoding scheme. See SendMessageRequestHeader\n * encode/decode functions for specific differences.\n */\nenum class SendMessageRequestVersion : uint32_t {\n  V1 = 0,\n  V2 = 1,\n  // Only for test purpose\n  V3 = 2,\n};\n\n/**\n * Command custom header are used in combination with RemotingCommand::code, to provide further\n * instructions and data for the operation defined by the protocol.\n * In addition to the shared encode/decode functions, this class also defines target-broker-name and\n * target-broker-id fields, which are helpful if the associated remoting command should be delivered\n * to specific host according to the semantics of the previous command.\n */\nclass CommandCustomHeader {\npublic:\n  CommandCustomHeader() = default;\n\n  virtual ~CommandCustomHeader() = default;\n\n  virtual void encode(ProtobufWkt::Value& root) PURE;\n\n  virtual void decode(const ProtobufWkt::Value& ext_fields) PURE;\n\n  const std::string& targetBrokerName() const { return target_broker_name_; }\n\n  void targetBrokerName(absl::string_view broker_name) {\n    target_broker_name_ = std::string(broker_name.data(), broker_name.length());\n  }\n\n  int32_t targetBrokerId() const { return target_broker_id_; }\n\n  void targetBrokerId(int32_t broker_id) { target_broker_id_ = broker_id; }\n\nprotected:\n  /**\n   * If this field is not empty, RDS will employ this field and target-broker-id to direct the\n   * associated request to a subset of the chosen cluster.\n   */\n  std::string target_broker_name_;\n\n  /**\n   * Used along with target-broker-name field.\n   */\n  int32_t target_broker_id_;\n};\n\nusing CommandCustomHeaderPtr = CommandCustomHeader*;\n\n/**\n * This class extends from CommandCustomHeader, adding a commonly used field by various custom\n * command headers which participate the process of request routing.\n */\nclass RoutingCommandCustomHeader : public CommandCustomHeader {\npublic:\n  virtual const std::string& topic() const { return topic_; }\n\n  virtual void topic(absl::string_view t) { topic_ = std::string(t.data(), t.size()); }\n\nprotected:\n  std::string topic_;\n};\n\n/**\n * This class defines basic request/response forms used by RocketMQ among all its components.\n */\nclass RemotingCommand {\npublic:\n  RemotingCommand() : RemotingCommand(0, 0, 0) {}\n\n  RemotingCommand(int code, int version, int opaque)\n      : code_(code), version_(version), opaque_(opaque), flag_(0) {}\n\n  ~RemotingCommand() { delete custom_header_; }\n\n  int32_t code() const { return code_; }\n\n  void code(int code) { code_ = code; }\n\n  const std::string& language() const { return language_; }\n\n  void language(absl::string_view lang) { language_ = std::string(lang.data(), lang.size()); }\n\n  int32_t version() const { return version_; }\n\n  void opaque(int opaque) { opaque_ = opaque; }\n\n  int32_t opaque() const { return opaque_; }\n\n  uint32_t flag() const { return flag_; }\n\n  void flag(uint32_t f) { flag_ = f; }\n\n  void customHeader(CommandCustomHeaderPtr custom_header) { custom_header_ = custom_header; }\n\n  CommandCustomHeaderPtr customHeader() const { return custom_header_; }\n\n  template <typename T> T* typedCustomHeader() {\n    if (!custom_header_) {\n      return nullptr;\n    }\n\n    return dynamic_cast<T*>(custom_header_);\n  }\n\n  uint32_t bodyLength() const { return body_.length(); }\n\n  Buffer::Instance& body() { return body_; }\n\n  const std::string& remark() const { return remark_; }\n\n  void remark(absl::string_view remark) { remark_ = std::string(remark.data(), remark.length()); }\n\n  const std::string& serializeTypeCurrentRPC() const { return serialize_type_current_rpc_; }\n\n  void serializeTypeCurrentRPC(absl::string_view serialization_type) {\n    serialize_type_current_rpc_ = std::string(serialization_type.data(), serialization_type.size());\n  }\n\n  bool isOneWay() const {\n    uint32_t marker = 1u << SHIFT_ONEWAY;\n    return (flag_ & marker) == marker;\n  }\n\n  void markAsResponse() { flag_ |= (1u << SHIFT_RPC); }\n\n  void markAsOneway() { flag_ |= (1u << SHIFT_ONEWAY); }\n\n  static bool isResponse(uint32_t flag) { return (flag & (1u << SHIFT_RPC)) == (1u << SHIFT_RPC); }\n\nprivate:\n  /**\n   * Action code of this command. Possible values are defined in RequestCode enumeration.\n   */\n  int32_t code_;\n\n  /**\n   * Language used by the client.\n   */\n  std::string language_{\"CPP\"};\n\n  /**\n   * Version of the client SDK.\n   */\n  int32_t version_;\n\n  /**\n   * Request ID. If the RPC is request-response form, this field is used to establish the\n   * association.\n   */\n  int32_t opaque_;\n\n  /**\n   * Bit-wise flag indicating RPC type, including whether it is one-way or request-response;\n   * a request or response command.\n   */\n  uint32_t flag_;\n\n  /**\n   * Remark is used to deliver text message in addition to code. Urgent scenarios may use this field\n   * to transfer diagnostic message to the counterparts when a full-fledged response is impossible.\n   */\n  std::string remark_;\n\n  /**\n   * Indicate how the custom command header is serialized.\n   */\n  std::string serialize_type_current_rpc_{\"JSON\"};\n\n  /**\n   * The custom command header works with command code to provide additional protocol\n   * implementation.\n   * Generally speaking, each code has pair of request/response custom command header.\n   */\n  CommandCustomHeaderPtr custom_header_{nullptr};\n\n  /**\n   * The command body, in form of binary.\n   */\n  Buffer::OwnedImpl body_;\n\n  static constexpr uint32_t SHIFT_RPC = 0;\n\n  static constexpr uint32_t SHIFT_ONEWAY = 1;\n\n  friend class Encoder;\n  friend class Decoder;\n};\n\nusing RemotingCommandPtr = std::unique_ptr<RemotingCommand>;\n\n/**\n * Command codes used when sending requests. Meaning of each field is self-explanatory.\n */\nenum class RequestCode : uint32_t {\n  SendMessage = 10,\n  HeartBeat = 34,\n  UnregisterClient = 35,\n  GetConsumerListByGroup = 38,\n  PopMessage = 50,\n  AckMessage = 51,\n  GetRouteInfoByTopic = 105,\n  SendMessageV2 = 310,\n  // Only for test purpose\n  Unsupported = 999,\n};\n\n/**\n * Command code used when sending responses. Meaning of each enum is self-explanatory.\n */\nenum class ResponseCode : uint32_t {\n  Success = 0,\n  SystemError = 1,\n  SystemBusy = 2,\n  RequestCodeNotSupported = 3,\n  ReplicaNotAvailable = 11,\n};\n\n/**\n * Custom command header for sending messages.\n */\nclass SendMessageRequestHeader : public RoutingCommandCustomHeader,\n                                 Logger::Loggable<Logger::Id::rocketmq> {\npublic:\n  ~SendMessageRequestHeader() override = default;\n\n  int32_t queueId() const { return queue_id_; }\n\n  /**\n   * TODO(lizhanhui): Remove this write API after adding queue-id-aware route logic\n   * @param queue_id target queue Id.\n   */\n  void queueId(int32_t queue_id) { queue_id_ = queue_id; }\n\n  void producerGroup(std::string producer_group) { producer_group_ = std::move(producer_group); }\n\n  void encode(ProtobufWkt::Value& root) override;\n\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n\n  const std::string& producerGroup() const { return producer_group_; }\n\n  const std::string& defaultTopic() const { return default_topic_; }\n\n  int32_t defaultTopicQueueNumber() const { return default_topic_queue_number_; }\n\n  int32_t sysFlag() const { return sys_flag_; }\n\n  int32_t flag() const { return flag_; }\n\n  int64_t bornTimestamp() const { return born_timestamp_; }\n\n  const std::string& properties() const { return properties_; }\n\n  int32_t reconsumeTimes() const { return reconsume_time_; }\n\n  bool unitMode() const { return unit_mode_; }\n\n  bool batch() const { return batch_; }\n\n  int32_t maxReconsumeTimes() const { return max_reconsume_time_; }\n\n  void properties(absl::string_view props) {\n    properties_ = std::string(props.data(), props.size());\n  }\n\n  void reconsumeTimes(int32_t reconsume_times) { reconsume_time_ = reconsume_times; }\n\n  void unitMode(bool unit_mode) { unit_mode_ = unit_mode; }\n\n  void batch(bool batch) { batch_ = batch; }\n\n  void maxReconsumeTimes(int32_t max_reconsume_times) { max_reconsume_time_ = max_reconsume_times; }\n\n  void version(SendMessageRequestVersion version) { version_ = version; }\n\n  SendMessageRequestVersion version() const { return version_; }\n\nprivate:\n  std::string producer_group_;\n  std::string default_topic_;\n  int32_t default_topic_queue_number_{0};\n  int32_t queue_id_{-1};\n  int32_t sys_flag_{0};\n  int64_t born_timestamp_{0};\n  int32_t flag_{0};\n  std::string properties_;\n  int32_t reconsume_time_{0};\n  bool unit_mode_{false};\n  bool batch_{false};\n  int32_t max_reconsume_time_{0};\n  SendMessageRequestVersion version_{SendMessageRequestVersion::V1};\n\n  friend class Decoder;\n};\n\n/**\n * Custom command header to respond to a send-message-request.\n */\nclass SendMessageResponseHeader : public CommandCustomHeader {\npublic:\n  SendMessageResponseHeader() = default;\n\n  SendMessageResponseHeader(std::string msg_id, int32_t queue_id, int64_t queue_offset,\n                            std::string transaction_id)\n      : msg_id_(std::move(msg_id)), queue_id_(queue_id), queue_offset_(queue_offset),\n        transaction_id_(std::move(transaction_id)) {}\n\n  void encode(ProtobufWkt::Value& root) override;\n\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n\n  const std::string& msgId() const { return msg_id_; }\n\n  int32_t queueId() const { return queue_id_; }\n\n  int64_t queueOffset() const { return queue_offset_; }\n\n  const std::string& transactionId() const { return transaction_id_; }\n\n  // This function is for testing only.\n  void msgIdForTest(absl::string_view msg_id) {\n    msg_id_ = std::string(msg_id.data(), msg_id.size());\n  }\n\n  void queueId(int32_t queue_id) { queue_id_ = queue_id; }\n\n  void queueOffset(int64_t queue_offset) { queue_offset_ = queue_offset; }\n\n  void transactionId(absl::string_view transaction_id) {\n    transaction_id_ = std::string(transaction_id.data(), transaction_id.size());\n  }\n\nprivate:\n  std::string msg_id_;\n  int32_t queue_id_{0};\n  int64_t queue_offset_{0};\n  std::string transaction_id_;\n};\n\n/**\n * Classic RocketMQ needs to known addresses of each broker to work with. To resolve the addresses,\n * client SDK uses this command header to query name servers.\n *\n * This header is kept for compatible purpose only.\n */\nclass GetRouteInfoRequestHeader : public RoutingCommandCustomHeader {\npublic:\n  void encode(ProtobufWkt::Value& root) override;\n\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n};\n\n/**\n * When a client wishes to consume messages stored in brokers, it sends a pop command to brokers.\n * Brokers would send a batch of messages to the client. At the same time, the broker keeps the\n * batch invisible for a configured period of time, waiting for acknowledgments from the client.\n *\n * If the client manages to consume the messages within promised time interval and sends ack command\n * back to the broker, the broker will mark the acknowledged ones as consumed. Otherwise, the\n * previously sent messages are visible again and would be consumable for other client instances.\n *\n * Through this approach, we achieves stateless message-pulling, comparing to classic offset-based\n * consuming progress management. This models brings about some extra workload to broker side, but\n * it fits Envoy well.\n */\nclass PopMessageRequestHeader : public RoutingCommandCustomHeader {\npublic:\n  friend class Decoder;\n\n  void encode(ProtobufWkt::Value& root) override;\n\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n\n  const std::string& consumerGroup() const { return consumer_group_; }\n\n  void consumerGroup(absl::string_view consumer_group) {\n    consumer_group_ = std::string(consumer_group.data(), consumer_group.size());\n  }\n\n  int32_t queueId() const { return queue_id_; }\n\n  void queueId(int32_t queue_id) { queue_id_ = queue_id; }\n\n  int32_t maxMsgNum() const { return max_msg_nums_; }\n\n  void maxMsgNum(int32_t max_msg_num) { max_msg_nums_ = max_msg_num; }\n\n  int64_t invisibleTime() const { return invisible_time_; }\n\n  void invisibleTime(int64_t invisible_time) { invisible_time_ = invisible_time; }\n\n  int64_t pollTime() const { return poll_time_; }\n\n  void pollTime(int64_t poll_time) { poll_time_ = poll_time; }\n\n  int64_t bornTime() const { return born_time_; }\n\n  void bornTime(int64_t born_time) { born_time_ = born_time; }\n\n  int32_t initMode() const { return init_mode_; }\n\n  void initMode(int32_t init_mode) { init_mode_ = init_mode; }\n\n  const std::string& expType() const { return exp_type_; }\n\n  void expType(absl::string_view exp_type) {\n    exp_type_ = std::string(exp_type.data(), exp_type.size());\n  }\n\n  const std::string& exp() const { return exp_; }\n\n  void exp(absl::string_view exp) { exp_ = std::string(exp.data(), exp.size()); }\n\nprivate:\n  std::string consumer_group_;\n  int32_t queue_id_{-1};\n  int32_t max_msg_nums_{32};\n  int64_t invisible_time_{0};\n  int64_t poll_time_{0};\n  int64_t born_time_{0};\n  int32_t init_mode_{0};\n  std::string exp_type_;\n  std::string exp_;\n  bool order_{false};\n};\n\n/**\n * The pop response command header. See pop request header for how-things-work explanation.\n */\nclass PopMessageResponseHeader : public CommandCustomHeader {\npublic:\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n\n  void encode(ProtobufWkt::Value& root) override;\n\n  // This function is for testing only.\n  int64_t popTimeForTest() const { return pop_time_; }\n\n  void popTime(int64_t pop_time) { pop_time_ = pop_time; }\n\n  int64_t invisibleTime() const { return invisible_time_; }\n\n  void invisibleTime(int64_t invisible_time) { invisible_time_ = invisible_time; }\n\n  int32_t reviveQid() const { return revive_qid_; }\n\n  void reviveQid(int32_t revive_qid) { revive_qid_ = revive_qid; }\n\n  int64_t restNum() const { return rest_num_; }\n\n  void restNum(int64_t rest_num) { rest_num_ = rest_num; }\n\n  const std::string& startOffsetInfo() const { return start_offset_info_; }\n\n  void startOffsetInfo(absl::string_view start_offset_info) {\n    start_offset_info_ = std::string(start_offset_info.data(), start_offset_info.size());\n  }\n\n  const std::string& msgOffsetInfo() const { return msg_off_set_info_; }\n\n  void msgOffsetInfo(absl::string_view msg_offset_info) {\n    msg_off_set_info_ = std::string(msg_offset_info.data(), msg_offset_info.size());\n  }\n\n  const std::string& orderCountInfo() const { return order_count_info_; }\n\n  void orderCountInfo(absl::string_view order_count_info) {\n    order_count_info_ = std::string(order_count_info.data(), order_count_info.size());\n  }\n\nprivate:\n  int64_t pop_time_{0};\n  int64_t invisible_time_{0};\n  int32_t revive_qid_{0};\n  int64_t rest_num_{0};\n  std::string start_offset_info_;\n  std::string msg_off_set_info_;\n  std::string order_count_info_;\n};\n\n/**\n * This command is used by the client to acknowledge message(s) that has been successfully consumed.\n * Once the broker received this request, the associated message will formally marked as consumed.\n *\n * Note: the ack request has to be sent the exactly same broker where messages are popped from.\n */\nclass AckMessageRequestHeader : public RoutingCommandCustomHeader {\npublic:\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n\n  void encode(ProtobufWkt::Value& root) override;\n\n  absl::string_view consumerGroup() const { return consumer_group_; }\n\n  int64_t offset() const { return offset_; }\n\n  void consumerGroup(absl::string_view consumer_group) {\n    consumer_group_ = std::string(consumer_group.data(), consumer_group.size());\n  }\n\n  int32_t queueId() const { return queue_id_; }\n  void queueId(int32_t queue_id) { queue_id_ = queue_id; }\n\n  absl::string_view extraInfo() const { return extra_info_; }\n  void extraInfo(absl::string_view extra_info) {\n    extra_info_ = std::string(extra_info.data(), extra_info.size());\n  }\n\n  void offset(int64_t offset) { offset_ = offset; }\n\n  const std::string& directiveKey() {\n    if (key_.empty()) {\n      key_ = fmt::format(\"{}-{}-{}-{}\", consumer_group_, topic_, queue_id_, offset_);\n    }\n    return key_;\n  }\n\nprivate:\n  std::string consumer_group_;\n  int32_t queue_id_{0};\n  std::string extra_info_;\n  int64_t offset_{0};\n  std::string key_;\n};\n\n/**\n * When a client shuts down gracefully, it notifies broker(now envoy) this event.\n */\nclass UnregisterClientRequestHeader : public CommandCustomHeader {\npublic:\n  void encode(ProtobufWkt::Value& root) override;\n\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n\n  void clientId(absl::string_view client_id) {\n    client_id_ = std::string(client_id.data(), client_id.length());\n  }\n\n  const std::string& clientId() const { return client_id_; }\n\n  void producerGroup(absl::string_view producer_group) {\n    producer_group_ = std::string(producer_group.data(), producer_group.length());\n  }\n\n  const std::string& producerGroup() const { return producer_group_; }\n\n  void consumerGroup(absl::string_view consumer_group) {\n    consumer_group_ = std::string(consumer_group.data(), consumer_group.length());\n  }\n\n  const std::string& consumerGroup() const { return consumer_group_; }\n\nprivate:\n  std::string client_id_;\n  std::string producer_group_;\n  std::string consumer_group_;\n};\n\n/**\n * Classic SDK clients use client-side load balancing. This header is kept for compatibility.\n */\nclass GetConsumerListByGroupRequestHeader : public CommandCustomHeader {\npublic:\n  void encode(ProtobufWkt::Value& root) override;\n\n  void decode(const ProtobufWkt::Value& ext_fields) override;\n\n  void consumerGroup(absl::string_view consumer_group) {\n    consumer_group_ = std::string(consumer_group.data(), consumer_group.length());\n  }\n\n  const std::string& consumerGroup() const { return consumer_group_; }\n\nprivate:\n  std::string consumer_group_;\n};\n\n/**\n * The response body.\n */\nclass GetConsumerListByGroupResponseBody {\npublic:\n  void encode(ProtobufWkt::Struct& root);\n\n  void add(absl::string_view consumer_id) {\n    consumer_id_list_.emplace_back(std::string(consumer_id.data(), consumer_id.length()));\n  }\n\nprivate:\n  std::vector<std::string> consumer_id_list_;\n};\n\n/**\n * Client periodically sends heartbeat to servers to maintain alive status.\n */\nclass HeartbeatData : public Logger::Loggable<Logger::Id::rocketmq> {\npublic:\n  bool decode(ProtobufWkt::Struct& doc);\n\n  const std::string& clientId() const { return client_id_; }\n\n  const std::vector<std::string>& consumerGroups() const { return consumer_groups_; }\n\n  void encode(ProtobufWkt::Struct& root);\n\n  void clientId(absl::string_view client_id) {\n    client_id_ = std::string(client_id.data(), client_id.size());\n  }\n\nprivate:\n  std::string client_id_;\n  std::vector<std::string> consumer_groups_;\n};\n\nclass MetadataHelper {\npublic:\n  MetadataHelper() = delete;\n\n  static void parseRequest(RemotingCommandPtr& request, MessageMetadataSharedPtr metadata);\n};\n\n/**\n * Directive to ensure entailing ack requests are routed to the same broker host where pop\n * requests are made.\n */\nstruct AckMessageDirective {\n\n  AckMessageDirective(absl::string_view broker_name, int32_t broker_id, MonotonicTime create_time)\n      : broker_name_(broker_name.data(), broker_name.length()), broker_id_(broker_id),\n        creation_time_(create_time) {}\n\n  std::string broker_name_;\n  int32_t broker_id_;\n  MonotonicTime creation_time_;\n};\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"router_interface\",\n    hdrs = [\"router.h\"],\n    deps = [\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//source/common/upstream:load_balancer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_lib\",\n    srcs = [\"router_impl.cc\"],\n    hdrs = [\"router_impl.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:thread_local_cluster_interface\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/rocketmq_proxy:conn_manager_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"route_matcher\",\n    srcs = [\"route_matcher.cc\"],\n    hdrs = [\"route_matcher.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/router:metadatamatchcriteria_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/rocketmq_proxy:metadata_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/router/route_matcher.h\"\n\n#include \"common/router/metadatamatchcriteria_impl.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/metadata.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\nnamespace Router {\n\nRouteEntryImpl::RouteEntryImpl(\n    const envoy::extensions::filters::network::rocketmq_proxy::v3::Route& route)\n    : topic_name_(route.match().topic()), cluster_name_(route.route().cluster()),\n      config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())) {\n\n  if (route.route().has_metadata_match()) {\n    const auto filter_it = route.route().metadata_match().filter_metadata().find(\n        Envoy::Config::MetadataFilters::get().ENVOY_LB);\n    if (filter_it != route.route().metadata_match().filter_metadata().end()) {\n      metadata_match_criteria_ =\n          std::make_unique<Envoy::Router::MetadataMatchCriteriaImpl>(filter_it->second);\n    }\n  }\n}\n\nconst std::string& RouteEntryImpl::clusterName() const { return cluster_name_; }\n\nconst RouteEntry* RouteEntryImpl::routeEntry() const { return this; }\n\nRouteConstSharedPtr RouteEntryImpl::matches(const MessageMetadata& metadata) const {\n  if (headersMatch(metadata.headers())) {\n    const std::string& topic_name = metadata.topicName();\n    if (topic_name_.match(topic_name)) {\n      return shared_from_this();\n    }\n  }\n  return nullptr;\n}\n\nbool RouteEntryImpl::headersMatch(const Http::HeaderMap& headers) const {\n  ENVOY_LOG(debug, \"rocketmq route matcher: headers size {}, metadata headers size {}\",\n            config_headers_.size(), headers.size());\n  return Http::HeaderUtility::matchHeaders(headers, config_headers_);\n}\n\nRouteMatcher::RouteMatcher(const RouteConfig& config) {\n  for (const auto& route : config.routes()) {\n    routes_.emplace_back(std::make_shared<RouteEntryImpl>(route));\n  }\n  ENVOY_LOG(debug, \"rocketmq route matcher: routes list size {}\", routes_.size());\n}\n\nRouteConstSharedPtr RouteMatcher::route(const MessageMetadata& metadata) const {\n  const std::string& topic_name = metadata.topicName();\n  for (const auto& route : routes_) {\n    RouteConstSharedPtr route_entry = route->matches(metadata);\n    if (nullptr != route_entry) {\n      ENVOY_LOG(debug, \"rocketmq route matcher: find cluster success for topic: {}\", topic_name);\n      return route_entry;\n    }\n  }\n  ENVOY_LOG(debug, \"rocketmq route matcher: find cluster failed for topic: {}\", topic_name);\n  return nullptr;\n}\n\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/matchers.h\"\n#include \"common/http/header_utility.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass MessageMetadata;\n\nnamespace Router {\n\nclass RouteEntryImpl : public RouteEntry,\n                       public Route,\n                       public std::enable_shared_from_this<RouteEntryImpl>,\n                       public Logger::Loggable<Logger::Id::rocketmq> {\npublic:\n  RouteEntryImpl(const envoy::extensions::filters::network::rocketmq_proxy::v3::Route& route);\n  ~RouteEntryImpl() override = default;\n\n  // Router::RouteEntry\n  const std::string& clusterName() const override;\n  const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n    return metadata_match_criteria_.get();\n  }\n\n  // Router::Route\n  const RouteEntry* routeEntry() const override;\n\n  RouteConstSharedPtr matches(const MessageMetadata& metadata) const;\n\nprivate:\n  bool headersMatch(const Http::HeaderMap& headers) const;\n\n  const Matchers::StringMatcherImpl topic_name_;\n  const std::string cluster_name_;\n  const std::vector<Http::HeaderUtility::HeaderDataPtr> config_headers_;\n  Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n};\n\nusing RouteEntryImplConstSharedPtr = std::shared_ptr<const RouteEntryImpl>;\n\nclass RouteMatcher : public Logger::Loggable<Logger::Id::rocketmq> {\npublic:\n  using RouteConfig = envoy::extensions::filters::network::rocketmq_proxy::v3::RouteConfiguration;\n  RouteMatcher(const RouteConfig& config);\n\n  RouteConstSharedPtr route(const MessageMetadata& metadata) const;\n\nprivate:\n  std::vector<RouteEntryImplConstSharedPtr> routes_;\n};\n\nusing RouteMatcherPtr = std::unique_ptr<RouteMatcher>;\n\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/router/router.h",
    "content": "#pragma once\n\n#include \"envoy/tcp/conn_pool.h\"\n\n#include \"common/upstream/load_balancer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass ActiveMessage;\nclass MessageMetadata;\n\nnamespace Router {\n\n/**\n * RouteEntry is an individual resolved route entry.\n */\nclass RouteEntry {\npublic:\n  virtual ~RouteEntry() = default;\n\n  /**\n   * @return const std::string& the upstream cluster that owns the route.\n   */\n  virtual const std::string& clusterName() const PURE;\n\n  /**\n   * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when\n   * selecting an upstream host\n   */\n  virtual const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE;\n};\n\n/**\n * Route holds the RouteEntry for a request.\n */\nclass Route {\npublic:\n  virtual ~Route() = default;\n\n  /**\n   * @return the route entry or nullptr if there is no matching route for the request.\n   */\n  virtual const RouteEntry* routeEntry() const PURE;\n};\n\nusing RouteConstSharedPtr = std::shared_ptr<const Route>;\nusing RouteSharedPtr = std::shared_ptr<Route>;\n\n/**\n * The router configuration.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  virtual RouteConstSharedPtr route(const MessageMetadata& metadata) const PURE;\n};\n\nclass Router : public Tcp::ConnectionPool::UpstreamCallbacks,\n               public Upstream::LoadBalancerContextBase {\n\npublic:\n  virtual void sendRequestToUpstream(ActiveMessage& active_message) PURE;\n\n  /**\n   * Release resources associated with this router.\n   */\n  virtual void reset() PURE;\n\n  /**\n   * Return host description that is eventually connected.\n   * @return upstream host if a connection has been established; nullptr otherwise.\n   */\n  virtual Upstream::HostDescriptionConstSharedPtr upstreamHost() PURE;\n};\n\nusing RouterPtr = std::unique_ptr<Router>;\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/router/router_impl.h\"\n\n#include \"common/common/enum_to_int.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/active_message.h\"\n#include \"extensions/filters/network/rocketmq_proxy/codec.h\"\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n#include \"extensions/filters/network/rocketmq_proxy/protocol.h\"\n#include \"extensions/filters/network/rocketmq_proxy/well_known_names.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\nnamespace Router {\n\nRouterImpl::RouterImpl(Envoy::Upstream::ClusterManager& cluster_manager)\n    : cluster_manager_(cluster_manager), handle_(nullptr), active_message_(nullptr) {}\n\nRouterImpl::~RouterImpl() {\n  if (handle_) {\n    handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default);\n  }\n}\n\nUpstream::HostDescriptionConstSharedPtr RouterImpl::upstreamHost() { return upstream_host_; }\n\nvoid RouterImpl::onAboveWriteBufferHighWatermark() {\n  ENVOY_LOG(trace, \"Above write buffer high watermark\");\n}\n\nvoid RouterImpl::onBelowWriteBufferLowWatermark() {\n  ENVOY_LOG(trace, \"Below write buffer low watermark\");\n}\n\nvoid RouterImpl::onEvent(Network::ConnectionEvent event) {\n  switch (event) {\n  case Network::ConnectionEvent::RemoteClose: {\n    ENVOY_LOG(error, \"Connection to upstream: {} is closed by remote peer\",\n              upstream_host_->address()->asString());\n    // Send local reply to downstream\n    active_message_->onError(\"Connection to upstream is closed by remote peer\");\n    break;\n  }\n  case Network::ConnectionEvent::LocalClose: {\n    ENVOY_LOG(error, \"Connection to upstream: {} has been closed\",\n              upstream_host_->address()->asString());\n    // Send local reply to downstream\n    active_message_->onError(\"Connection to upstream has been closed\");\n    break;\n  }\n  default:\n    // Ignore other events for now\n    ENVOY_LOG(trace, \"Ignore event type\");\n    return;\n  }\n  active_message_->onReset();\n}\n\nconst Envoy::Router::MetadataMatchCriteria* RouterImpl::metadataMatchCriteria() {\n  if (route_entry_) {\n    return route_entry_->metadataMatchCriteria();\n  }\n  return nullptr;\n}\n\nvoid RouterImpl::onUpstreamData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_LOG(trace, \"Received some data from upstream: {} bytes, end_stream: {}\", data.length(),\n            end_stream);\n  if (active_message_->onUpstreamData(data, end_stream, connection_data_)) {\n    reset();\n  }\n}\n\nvoid RouterImpl::sendRequestToUpstream(ActiveMessage& active_message) {\n  active_message_ = &active_message;\n  int opaque = active_message_->downstreamRequest()->opaque();\n  ASSERT(active_message_->metadata()->hasTopicName());\n  std::string topic_name = active_message_->metadata()->topicName();\n\n  RouteConstSharedPtr route = active_message.route();\n  if (!route) {\n    active_message.onError(\"No route for current request.\");\n    ENVOY_LOG(warn, \"Can not find route for topic {}\", topic_name);\n    reset();\n    return;\n  }\n\n  route_entry_ = route->routeEntry();\n  const std::string cluster_name = route_entry_->clusterName();\n  Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name);\n  if (!cluster) {\n    active_message.onError(\"Cluster does not exist.\");\n    ENVOY_LOG(warn, \"Cluster for {} is not available\", cluster_name);\n    reset();\n    return;\n  }\n\n  cluster_info_ = cluster->info();\n  if (cluster_info_->maintenanceMode()) {\n    ENVOY_LOG(warn, \"Cluster {} is under maintenance. Opaque: {}\", cluster_name, opaque);\n    active_message.onError(\"Cluster under maintenance.\");\n    active_message.connectionManager().stats().maintenance_failure_.inc();\n    reset();\n    return;\n  }\n\n  Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster(\n      cluster_name, Upstream::ResourcePriority::Default, this);\n  if (!conn_pool) {\n    ENVOY_LOG(warn, \"No host available for cluster {}. Opaque: {}\", cluster_name, opaque);\n    active_message.onError(\"No host available\");\n    reset();\n    return;\n  }\n\n  upstream_request_ = std::make_unique<UpstreamRequest>(*this);\n  Tcp::ConnectionPool::Cancellable* cancellable = conn_pool->newConnection(*upstream_request_);\n  if (cancellable) {\n    handle_ = cancellable;\n    ENVOY_LOG(trace, \"No connection is available for now. Create a cancellable handle. Opaque: {}\",\n              opaque);\n  } else {\n    /*\n     * UpstreamRequest#onPoolReady or #onPoolFailure should have been invoked.\n     */\n    ENVOY_LOG(trace,\n              \"One connection is picked up from connection pool, callback should have been \"\n              \"executed. Opaque: {}\",\n              opaque);\n  }\n}\n\nRouterImpl::UpstreamRequest::UpstreamRequest(RouterImpl& router) : router_(router) {}\n\nvoid RouterImpl::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn,\n                                              Upstream::HostDescriptionConstSharedPtr host) {\n  router_.connection_data_ = std::move(conn);\n  router_.upstream_host_ = host;\n  router_.connection_data_->addUpstreamCallbacks(router_);\n  if (router_.handle_) {\n    ENVOY_LOG(trace, \"#onPoolReady, reset cancellable handle to nullptr\");\n    router_.handle_ = nullptr;\n  }\n  ENVOY_LOG(debug, \"Current chosen host address: {}\", host->address()->asString());\n  // TODO(lizhanhui): we may optimize out encoding in case we there is no protocol translation.\n  Buffer::OwnedImpl buffer;\n  Encoder::encode(router_.active_message_->downstreamRequest(), buffer);\n  router_.connection_data_->connection().write(buffer, false);\n  ENVOY_LOG(trace, \"Write data to upstream OK. Opaque: {}\",\n            router_.active_message_->downstreamRequest()->opaque());\n\n  if (router_.active_message_->metadata()->isOneWay()) {\n    ENVOY_LOG(trace,\n              \"Reset ActiveMessage since data is written and the downstream request is one-way. \"\n              \"Opaque: {}\",\n              router_.active_message_->downstreamRequest()->opaque());\n\n    // For one-way ack-message requests, we need erase previously stored ack-directive.\n    if (enumToSignedInt(RequestCode::AckMessage) ==\n        router_.active_message_->downstreamRequest()->code()) {\n      auto ack_header = router_.active_message_->downstreamRequest()\n                            ->typedCustomHeader<AckMessageRequestHeader>();\n      router_.active_message_->connectionManager().eraseAckDirective(ack_header->directiveKey());\n    }\n\n    router_.reset();\n  }\n}\n\nvoid RouterImpl::UpstreamRequest::onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason,\n                                                Upstream::HostDescriptionConstSharedPtr host) {\n  if (router_.handle_) {\n    ENVOY_LOG(trace, \"#onPoolFailure, reset cancellable handle to nullptr\");\n    router_.handle_ = nullptr;\n  }\n  switch (reason) {\n  case Tcp::ConnectionPool::PoolFailureReason::Overflow: {\n    ENVOY_LOG(error, \"Unable to acquire a connection to send request to upstream\");\n    router_.active_message_->onError(\"overflow\");\n  } break;\n\n  case Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure: {\n    ENVOY_LOG(error, \"Failed to make request to upstream due to remote connection error. Host {}\",\n              host->address()->asString());\n    router_.active_message_->onError(\"remote connection failure\");\n  } break;\n\n  case Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure: {\n    ENVOY_LOG(error, \"Failed to make request to upstream due to local connection error. Host: {}\",\n              host->address()->asString());\n    router_.active_message_->onError(\"local connection failure\");\n  } break;\n\n  case Tcp::ConnectionPool::PoolFailureReason::Timeout: {\n    ENVOY_LOG(error, \"Failed to make request to upstream due to timeout. Host: {}\",\n              host->address()->asString());\n    router_.active_message_->onError(\"timeout\");\n  } break;\n  }\n\n  // Release resources allocated to this request.\n  router_.reset();\n}\n\nvoid RouterImpl::reset() {\n  active_message_->onReset();\n  if (connection_data_) {\n    connection_data_.reset(nullptr);\n  }\n}\n\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/router/router_impl.h",
    "content": "#pragma once\n\n#include \"envoy/tcp/conn_pool.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/thread_local_cluster.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\nnamespace Router {\n\nclass RouterImpl : public Router, public Logger::Loggable<Logger::Id::rocketmq> {\npublic:\n  explicit RouterImpl(Upstream::ClusterManager& cluster_manager);\n\n  ~RouterImpl() override;\n\n  // Tcp::ConnectionPool::UpstreamCallbacks\n  void onUpstreamData(Buffer::Instance& data, bool end_stream) override;\n  void onAboveWriteBufferHighWatermark() override;\n  void onBelowWriteBufferLowWatermark() override;\n  void onEvent(Network::ConnectionEvent event) override;\n\n  // Upstream::LoadBalancerContextBase\n  const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override;\n\n  void sendRequestToUpstream(ActiveMessage& active_message) override;\n\n  void reset() override;\n\n  Upstream::HostDescriptionConstSharedPtr upstreamHost() override;\n\nprivate:\n  class UpstreamRequest : public Tcp::ConnectionPool::Callbacks {\n  public:\n    UpstreamRequest(RouterImpl& router);\n\n    void onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason,\n                       Upstream::HostDescriptionConstSharedPtr host) override;\n\n    void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn,\n                     Upstream::HostDescriptionConstSharedPtr host) override;\n\n  private:\n    RouterImpl& router_;\n  };\n  using UpstreamRequestPtr = std::unique_ptr<UpstreamRequest>;\n\n  Upstream::ClusterManager& cluster_manager_;\n  Tcp::ConnectionPool::ConnectionDataPtr connection_data_;\n\n  /**\n   * On requesting connection from upstream connection pool, this handle may be assigned when no\n   * connection is readily available at the moment. We may cancel the request through this handle.\n   *\n   * If there are connections which can be returned immediately, this handle is assigned as nullptr.\n   */\n  Tcp::ConnectionPool::Cancellable* handle_;\n  Upstream::HostDescriptionConstSharedPtr upstream_host_;\n  ActiveMessage* active_message_;\n  Upstream::ClusterInfoConstSharedPtr cluster_info_;\n  UpstreamRequestPtr upstream_request_;\n  const RouteEntry* route_entry_{};\n};\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/stats.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\n/**\n * All rocketmq filter stats. @see stats_macros.h\n */\n#define ALL_ROCKETMQ_FILTER_STATS(COUNTER, GAUGE, HISTOGRAM)                                       \\\n  COUNTER(request)                                                                                 \\\n  COUNTER(request_decoding_error)                                                                  \\\n  COUNTER(request_decoding_success)                                                                \\\n  COUNTER(response)                                                                                \\\n  COUNTER(response_decoding_error)                                                                 \\\n  COUNTER(response_decoding_success)                                                               \\\n  COUNTER(response_error)                                                                          \\\n  COUNTER(response_success)                                                                        \\\n  COUNTER(heartbeat)                                                                               \\\n  COUNTER(unregister)                                                                              \\\n  COUNTER(get_topic_route)                                                                         \\\n  COUNTER(send_message_v1)                                                                         \\\n  COUNTER(send_message_v2)                                                                         \\\n  COUNTER(pop_message)                                                                             \\\n  COUNTER(ack_message)                                                                             \\\n  COUNTER(get_consumer_list)                                                                       \\\n  COUNTER(maintenance_failure)                                                                     \\\n  GAUGE(request_active, Accumulate)                                                                \\\n  GAUGE(send_message_v1_active, Accumulate)                                                        \\\n  GAUGE(send_message_v2_active, Accumulate)                                                        \\\n  GAUGE(pop_message_active, Accumulate)                                                            \\\n  GAUGE(get_topic_route_active, Accumulate)                                                        \\\n  GAUGE(send_message_pending, Accumulate)                                                          \\\n  GAUGE(pop_message_pending, Accumulate)                                                           \\\n  GAUGE(get_topic_route_pending, Accumulate)                                                       \\\n  GAUGE(total_pending, Accumulate)                                                                 \\\n  HISTOGRAM(request_time_ms, Milliseconds)\n\n/**\n * Struct definition for all rocketmq proxy stats. @see stats_macros.h\n */\nstruct RocketmqFilterStats {\n  ALL_ROCKETMQ_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT,\n                            GENERATE_HISTOGRAM_STRUCT)\n\n  static RocketmqFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return RocketmqFilterStats{ALL_ROCKETMQ_FILTER_STATS(POOL_COUNTER_PREFIX(scope, prefix),\n                                                         POOL_GAUGE_PREFIX(scope, prefix),\n                                                         POOL_HISTOGRAM_PREFIX(scope, prefix))};\n  }\n};\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/topic_route.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/topic_route.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nvoid QueueData::encode(ProtobufWkt::Struct& data_struct) {\n  auto* fields = data_struct.mutable_fields();\n\n  ProtobufWkt::Value broker_name_v;\n  broker_name_v.set_string_value(broker_name_);\n  (*fields)[\"brokerName\"] = broker_name_v;\n\n  ProtobufWkt::Value read_queue_num_v;\n  read_queue_num_v.set_number_value(read_queue_nums_);\n  (*fields)[\"readQueueNums\"] = read_queue_num_v;\n\n  ProtobufWkt::Value write_queue_num_v;\n  write_queue_num_v.set_number_value(write_queue_nums_);\n  (*fields)[\"writeQueueNums\"] = write_queue_num_v;\n\n  ProtobufWkt::Value perm_v;\n  perm_v.set_number_value(perm_);\n  (*fields)[\"perm\"] = perm_v;\n}\n\nvoid BrokerData::encode(ProtobufWkt::Struct& data_struct) {\n  auto& members = *(data_struct.mutable_fields());\n\n  ProtobufWkt::Value cluster_v;\n  cluster_v.set_string_value(cluster_);\n  members[\"cluster\"] = cluster_v;\n\n  ProtobufWkt::Value broker_name_v;\n  broker_name_v.set_string_value(broker_name_);\n  members[\"brokerName\"] = broker_name_v;\n\n  if (!broker_addrs_.empty()) {\n    ProtobufWkt::Value brokerAddrsNode;\n    auto& brokerAddrsMembers = *(brokerAddrsNode.mutable_struct_value()->mutable_fields());\n    for (auto& entry : broker_addrs_) {\n      ProtobufWkt::Value address_v;\n      address_v.set_string_value(entry.second);\n      brokerAddrsMembers[std::to_string(entry.first)] = address_v;\n    }\n    members[\"brokerAddrs\"] = brokerAddrsNode;\n  }\n}\n\nvoid TopicRouteData::encode(ProtobufWkt::Struct& data_struct) {\n  auto* fields = data_struct.mutable_fields();\n\n  if (!queue_data_.empty()) {\n    ProtobufWkt::ListValue queue_data_list_v;\n    for (auto& queueData : queue_data_) {\n      queueData.encode(data_struct);\n      queue_data_list_v.add_values()->mutable_struct_value()->CopyFrom(data_struct);\n    }\n    (*fields)[\"queueDatas\"].mutable_list_value()->CopyFrom(queue_data_list_v);\n  }\n\n  if (!broker_data_.empty()) {\n    ProtobufWkt::ListValue broker_data_list_v;\n    for (auto& brokerData : broker_data_) {\n      brokerData.encode(data_struct);\n      broker_data_list_v.add_values()->mutable_struct_value()->CopyFrom(data_struct);\n    }\n    (*fields)[\"brokerDatas\"].mutable_list_value()->CopyFrom(broker_data_list_v);\n  }\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/topic_route.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\nclass QueueData {\npublic:\n  QueueData(const std::string& broker_name, int32_t read_queue_num, int32_t write_queue_num,\n            int32_t perm)\n      : broker_name_(broker_name), read_queue_nums_(read_queue_num),\n        write_queue_nums_(write_queue_num), perm_(perm) {}\n\n  void encode(ProtobufWkt::Struct& data_struct);\n\n  const std::string& brokerName() const { return broker_name_; }\n\n  int32_t readQueueNum() const { return read_queue_nums_; }\n\n  int32_t writeQueueNum() const { return write_queue_nums_; }\n\n  int32_t perm() const { return perm_; }\n\nprivate:\n  std::string broker_name_;\n  int32_t read_queue_nums_;\n  int32_t write_queue_nums_;\n  int32_t perm_;\n};\n\nclass BrokerData {\npublic:\n  BrokerData(const std::string& cluster, const std::string& broker_name,\n             absl::node_hash_map<int64_t, std::string>&& broker_addrs)\n      : cluster_(cluster), broker_name_(broker_name), broker_addrs_(broker_addrs) {}\n\n  void encode(ProtobufWkt::Struct& data_struct);\n\n  const std::string& cluster() const { return cluster_; }\n\n  const std::string& brokerName() const { return broker_name_; }\n\n  absl::node_hash_map<int64_t, std::string>& brokerAddresses() { return broker_addrs_; }\n\nprivate:\n  std::string cluster_;\n  std::string broker_name_;\n  absl::node_hash_map<int64_t, std::string> broker_addrs_;\n};\n\nclass TopicRouteData {\npublic:\n  void encode(ProtobufWkt::Struct& data_struct);\n\n  TopicRouteData() = default;\n\n  TopicRouteData(std::vector<QueueData>&& queue_data, std::vector<BrokerData>&& broker_data)\n      : queue_data_(queue_data), broker_data_(broker_data) {}\n\n  std::vector<QueueData>& queueData() { return queue_data_; }\n\n  std::vector<BrokerData>& brokerData() { return broker_data_; }\n\nprivate:\n  std::vector<QueueData> queue_data_;\n  std::vector<BrokerData> broker_data_;\n};\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/rocketmq_proxy/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nstruct RocketmqValues {\n  /**\n   * All the values below are the properties of single broker in filter_metadata.\n   */\n  const std::string ReadQueueNum = \"read_queue_num\";\n  const std::string WriteQueueNum = \"write_queue_num\";\n  const std::string ClusterName = \"cluster_name\";\n  const std::string BrokerName = \"broker_name\";\n  const std::string BrokerId = \"broker_id\";\n  const std::string Perm = \"perm\";\n};\n\nusing RocketmqConstants = ConstSingleton<RocketmqValues>;\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/filters/network/sni_cluster/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"sni_cluster\",\n    srcs = [\"sni_cluster.cc\"],\n    hdrs = [\"sni_cluster.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/tcp_proxy\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    deps = [\n        \":sni_cluster\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/network/sni_cluster/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/sni_cluster/config.cc",
    "content": "#include \"extensions/filters/network/sni_cluster/config.h\"\n\n#include \"envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.pb.h\"\n#include \"envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/network/sni_cluster/sni_cluster.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniCluster {\n\nNetwork::FilterFactoryCb SniClusterNetworkFilterConfigFactory::createFilterFactoryFromProto(\n    const Protobuf::Message&, Server::Configuration::FactoryContext&) {\n  return [](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<SniClusterFilter>());\n  };\n}\n\nProtobufTypes::MessagePtr SniClusterNetworkFilterConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::filters::network::sni_cluster::v3::SniCluster>();\n}\n\n/**\n * Static registration for the sni_cluster filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(SniClusterNetworkFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace SniCluster\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/sni_cluster/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniCluster {\n\n/**\n * Config registration for the sni_cluster filter. @see NamedNetworkFilterConfigFactory.\n */\nclass SniClusterNetworkFilterConfigFactory\n    : public Server::Configuration::NamedNetworkFilterConfigFactory {\npublic:\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&,\n                               Server::Configuration::FactoryContext&) override;\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n  std::string name() const override { return NetworkFilterNames::get().SniCluster; }\n};\n\n} // namespace SniCluster\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/sni_cluster/sni_cluster.cc",
    "content": "#include \"extensions/filters/network/sni_cluster/sni_cluster.h\"\n\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/tcp_proxy/tcp_proxy.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniCluster {\n\nNetwork::FilterStatus SniClusterFilter::onNewConnection() {\n  absl::string_view sni = read_callbacks_->connection().requestedServerName();\n  ENVOY_CONN_LOG(trace, \"sni_cluster: new connection with server name {}\",\n                 read_callbacks_->connection(), sni);\n\n  if (!sni.empty()) {\n    // Set the tcp_proxy cluster to the same value as SNI. The data is mutable to allow\n    // other filters to change it.\n    read_callbacks_->connection().streamInfo().filterState()->setData(\n        TcpProxy::PerConnectionCluster::key(),\n        std::make_unique<TcpProxy::PerConnectionCluster>(sni),\n        StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Connection);\n  }\n\n  return Network::FilterStatus::Continue;\n}\n\n} // namespace SniCluster\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/sni_cluster/sni_cluster.h",
    "content": "#pragma once\n\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniCluster {\n\n/**\n * Implementation of the sni_cluster filter that sets the upstream cluster name from\n * the SNI field in the TLS connection.\n */\nclass SniClusterFilter : public Network::ReadFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance&, bool) override {\n    return Network::FilterStatus::Continue;\n  }\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n  }\n\nprivate:\n  Network::ReadFilterCallbacks* read_callbacks_{};\n};\n\n} // namespace SniCluster\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"proxy_filter_lib\",\n    srcs = [\"proxy_filter.cc\"],\n    hdrs = [\"proxy_filter.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/tcp_proxy\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_interface\",\n        \"@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \":proxy_filter_lib\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc",
    "content": "#include \"extensions/filters/network/sni_dynamic_forward_proxy/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h\"\n#include \"extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniDynamicForwardProxy {\n\nSniDynamicForwardProxyNetworkFilterConfigFactory::SniDynamicForwardProxyNetworkFilterConfigFactory()\n    : FactoryBase(NetworkFilterNames::get().SniDynamicForwardProxy) {}\n\nNetwork::FilterFactoryCb\nSniDynamicForwardProxyNetworkFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const FilterConfig& proto_config, Server::Configuration::FactoryContext& context) {\n\n  Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory(\n      context.singletonManager(), context.dispatcher(), context.threadLocal(),\n      context.api().randomGenerator(), context.runtime(), context.scope());\n  ProxyFilterConfigSharedPtr filter_config(std::make_shared<ProxyFilterConfig>(\n      proto_config, cache_manager_factory, context.clusterManager()));\n\n  return [filter_config](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<ProxyFilter>(filter_config));\n  };\n}\n\n/**\n * Static registration for the sni_dynamic_forward_proxy filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(SniDynamicForwardProxyNetworkFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace SniDynamicForwardProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/sni_dynamic_forward_proxy/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniDynamicForwardProxy {\n\nusing FilterConfig =\n    envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3alpha::FilterConfig;\n\n/**\n * Config registration for the sni_dynamic_forward_proxy filter. @see\n * NamedNetworkFilterConfigFactory.\n */\nclass SniDynamicForwardProxyNetworkFilterConfigFactory : public Common::FactoryBase<FilterConfig> {\npublic:\n  SniDynamicForwardProxyNetworkFilterConfigFactory();\n\nprivate:\n  Network::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const FilterConfig& proto_config,\n                                    Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace SniDynamicForwardProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc",
    "content": "#include \"extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h\"\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/upstream/thread_local_cluster.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/tcp_proxy/tcp_proxy.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniDynamicForwardProxy {\n\nProxyFilterConfig::ProxyFilterConfig(\n    const FilterConfig& proto_config,\n    Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory,\n    Upstream::ClusterManager&)\n    : port_(static_cast<uint16_t>(proto_config.port_value())),\n      dns_cache_manager_(cache_manager_factory.get()),\n      dns_cache_(dns_cache_manager_->getCache(proto_config.dns_cache_config())) {}\n\nProxyFilter::ProxyFilter(ProxyFilterConfigSharedPtr config) : config_(std::move(config)) {}\n\nusing LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus;\n\nNetwork::FilterStatus ProxyFilter::onNewConnection() {\n  absl::string_view sni = read_callbacks_->connection().requestedServerName();\n  ENVOY_CONN_LOG(trace, \"sni_dynamic_forward_proxy: new connection with server name '{}'\",\n                 read_callbacks_->connection(), sni);\n\n  if (sni.empty()) {\n    return Network::FilterStatus::Continue;\n  }\n\n  circuit_breaker_ = config_->cache().canCreateDnsRequest(absl::nullopt);\n\n  if (circuit_breaker_ == nullptr) {\n    ENVOY_CONN_LOG(debug, \"pending request overflow\", read_callbacks_->connection());\n    read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    return Network::FilterStatus::StopIteration;\n  }\n\n  uint32_t default_port = config_->port();\n\n  auto result = config_->cache().loadDnsCacheEntry(sni, default_port, *this);\n\n  cache_load_handle_ = std::move(result.handle_);\n  if (cache_load_handle_ == nullptr) {\n    circuit_breaker_.reset();\n  }\n\n  switch (result.status_) {\n  case LoadDnsCacheEntryStatus::InCache: {\n    ASSERT(cache_load_handle_ == nullptr);\n    ENVOY_CONN_LOG(debug, \"DNS cache entry already loaded, continuing\",\n                   read_callbacks_->connection());\n    return Network::FilterStatus::Continue;\n  }\n  case LoadDnsCacheEntryStatus::Loading: {\n    ASSERT(cache_load_handle_ != nullptr);\n    ENVOY_CONN_LOG(debug, \"waiting to load DNS cache entry\", read_callbacks_->connection());\n    return Network::FilterStatus::StopIteration;\n  }\n  case LoadDnsCacheEntryStatus::Overflow: {\n    ASSERT(cache_load_handle_ == nullptr);\n    ENVOY_CONN_LOG(debug, \"DNS cache overflow\", read_callbacks_->connection());\n    read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n    return Network::FilterStatus::StopIteration;\n  }\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid ProxyFilter::onLoadDnsCacheComplete() {\n  ENVOY_CONN_LOG(debug, \"load DNS cache complete, continuing\", read_callbacks_->connection());\n  ASSERT(circuit_breaker_ != nullptr);\n  circuit_breaker_.reset();\n  read_callbacks_->continueReading();\n}\n\n} // namespace SniDynamicForwardProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniDynamicForwardProxy {\n\nusing FilterConfig =\n    envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3alpha::FilterConfig;\n\nclass ProxyFilterConfig {\npublic:\n  ProxyFilterConfig(\n      const FilterConfig& proto_config,\n      Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory,\n      Upstream::ClusterManager& cluster_manager);\n\n  Extensions::Common::DynamicForwardProxy::DnsCache& cache() { return *dns_cache_; }\n  uint32_t port() { return port_; }\n\nprivate:\n  const uint32_t port_;\n  const Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr dns_cache_manager_;\n  const Extensions::Common::DynamicForwardProxy::DnsCacheSharedPtr dns_cache_;\n};\n\nusing ProxyFilterConfigSharedPtr = std::shared_ptr<ProxyFilterConfig>;\n\nclass ProxyFilter\n    : public Network::ReadFilter,\n      public Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryCallbacks,\n      Logger::Loggable<Logger::Id::forward_proxy> {\npublic:\n  ProxyFilter(ProxyFilterConfigSharedPtr config);\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance&, bool) override {\n    return Network::FilterStatus::Continue;\n  }\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n  }\n\n  // Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryCallbacks\n  void onLoadDnsCacheComplete() override;\n\nprivate:\n  const ProxyFilterConfigSharedPtr config_;\n  Upstream::ResourceAutoIncDecPtr circuit_breaker_;\n  Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryHandlePtr cache_load_handle_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n};\n\n} // namespace SniDynamicForwardProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/tcp_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# TCP proxy L4 network filter.\n# Public docs: docs/root/configuration/network_filters/tcp_proxy_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    # This is core Envoy config.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/tcp_proxy\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/tcp_proxy/config.cc",
    "content": "#include \"extensions/filters/network/tcp_proxy/config.h\"\n\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/tcp_proxy/tcp_proxy.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace TcpProxy {\n\nNetwork::FilterFactoryCb ConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  ASSERT(!proto_config.stat_prefix().empty());\n  if (proto_config.has_hidden_envoy_deprecated_deprecated_v1()) {\n    ASSERT(proto_config.hidden_envoy_deprecated_deprecated_v1().routes_size() > 0);\n  }\n\n  Envoy::TcpProxy::ConfigSharedPtr filter_config(\n      std::make_shared<Envoy::TcpProxy::Config>(proto_config, context));\n  return [filter_config, &context](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(\n        std::make_shared<Envoy::TcpProxy::Filter>(filter_config, context.clusterManager()));\n  };\n}\n\n/**\n * Static registration for the tcp_proxy filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(ConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory){\"envoy.tcp_proxy\"};\n\n} // namespace TcpProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/tcp_proxy/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace TcpProxy {\n\n/**\n * Config registration for the tcp proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nclass ConfigFactory\n    : public Common::FactoryBase<envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy> {\npublic:\n  ConfigFactory() : FactoryBase(NetworkFilterNames::get().TcpProxy, true) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace TcpProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"app_exception_lib\",\n    srcs = [\"app_exception_impl.cc\"],\n    hdrs = [\"app_exception_impl.h\"],\n    deps = [\n        \":protocol_interface\",\n        \":thrift_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"buffer_helper_lib\",\n    srcs = [\"buffer_helper.cc\"],\n    hdrs = [\"buffer_helper.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:byte_order_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \":app_exception_lib\",\n        \":auto_protocol_lib\",\n        \":auto_transport_lib\",\n        \":binary_protocol_lib\",\n        \":compact_protocol_lib\",\n        \":conn_manager_lib\",\n        \":decoder_lib\",\n        \":framed_transport_lib\",\n        \":header_transport_lib\",\n        \":protocol_interface\",\n        \":twitter_protocol_lib\",\n        \":unframed_transport_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:filter_config_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:well_known_names\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_manager_lib\",\n    srcs = [\"conn_manager.cc\"],\n    hdrs = [\"conn_manager.h\"],\n    external_deps = [\"abseil_any\"],\n    deps = [\n        \":app_exception_lib\",\n        \":decoder_lib\",\n        \":protocol_converter_lib\",\n        \":protocol_interface\",\n        \":stats_lib\",\n        \":transport_interface\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"decoder_events_lib\",\n    hdrs = [\"decoder_events.h\"],\n    deps = [\n        \":metadata_lib\",\n        \":thrift_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"decoder_lib\",\n    srcs = [\"decoder.cc\"],\n    hdrs = [\"decoder.h\"],\n    deps = [\n        \":app_exception_lib\",\n        \":protocol_interface\",\n        \":stats_lib\",\n        \":transport_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:filter_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metadata_lib\",\n    hdrs = [\"metadata.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":thrift_lib\",\n        \":tracing_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//source/common/common:macros\",\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tracing_interface\",\n    hdrs = [\"tracing.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [],\n)\n\nenvoy_cc_library(\n    name = \"protocol_converter_lib\",\n    hdrs = [\n        \"protocol_converter.h\",\n    ],\n    deps = [\n        \":decoder_events_lib\",\n        \":protocol_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"protocol_interface\",\n    hdrs = [\n        \"protocol.h\",\n    ],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":conn_state_lib\",\n        \":decoder_events_lib\",\n        \":metadata_lib\",\n        \":thrift_lib\",\n        \":thrift_object_interface\",\n        \":transport_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"auto_protocol_lib\",\n    srcs = [\n        \"auto_protocol_impl.cc\",\n    ],\n    hdrs = [\n        \"auto_protocol_impl.h\",\n    ],\n    deps = [\n        \":binary_protocol_lib\",\n        \":buffer_helper_lib\",\n        \":compact_protocol_lib\",\n        \":protocol_interface\",\n        \":twitter_protocol_lib\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"binary_protocol_lib\",\n    srcs = [\n        \"binary_protocol_impl.cc\",\n    ],\n    hdrs = [\n        \"binary_protocol_impl.h\",\n    ],\n    deps = [\n        \":buffer_helper_lib\",\n        \":protocol_interface\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"compact_protocol_lib\",\n    srcs = [\n        \"compact_protocol_impl.cc\",\n    ],\n    hdrs = [\n        \"compact_protocol_impl.h\",\n    ],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":buffer_helper_lib\",\n        \":protocol_interface\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"twitter_protocol_lib\",\n    srcs = [\n        \"twitter_protocol_impl.cc\",\n    ],\n    hdrs = [\n        \"twitter_protocol_impl.h\",\n    ],\n    deps = [\n        \":binary_protocol_lib\",\n        \":buffer_helper_lib\",\n        \":protocol_interface\",\n        \":thrift_object_lib\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stats_lib\",\n    hdrs = [\"stats.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"transport_interface\",\n    hdrs = [\"transport.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":buffer_helper_lib\",\n        \":metadata_lib\",\n        \":thrift_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"conn_state_lib\",\n    hdrs = [\"conn_state.h\"],\n    deps = [\n        \"//include/envoy/tcp:conn_pool_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"thrift_lib\",\n    hdrs = [\"thrift.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"thrift_object_interface\",\n    hdrs = [\"thrift_object.h\"],\n    deps = [\n        \":thrift_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"thrift_object_lib\",\n    srcs = [\"thrift_object_impl.cc\"],\n    hdrs = [\"thrift_object_impl.h\"],\n    deps = [\n        \":decoder_lib\",\n        \":thrift_lib\",\n        \":thrift_object_interface\",\n        \":unframed_transport_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:filter_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"auto_transport_lib\",\n    srcs = [\n        \"auto_transport_impl.cc\",\n    ],\n    hdrs = [\n        \"auto_transport_impl.h\",\n    ],\n    deps = [\n        \":binary_protocol_lib\",\n        \":buffer_helper_lib\",\n        \":compact_protocol_lib\",\n        \":framed_transport_lib\",\n        \":header_transport_lib\",\n        \":transport_interface\",\n        \":twitter_protocol_lib\",\n        \":unframed_transport_lib\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"framed_transport_lib\",\n    srcs = [\n        \"framed_transport_impl.cc\",\n    ],\n    hdrs = [\n        \"framed_transport_impl.h\",\n    ],\n    deps = [\n        \":buffer_helper_lib\",\n        \":transport_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"header_transport_lib\",\n    srcs = [\n        \"header_transport_impl.cc\",\n    ],\n    hdrs = [\n        \"header_transport_impl.h\",\n    ],\n    deps = [\n        \":app_exception_lib\",\n        \":buffer_helper_lib\",\n        \":transport_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"unframed_transport_lib\",\n    srcs = [\n        \"unframed_transport_impl.cc\",\n    ],\n    hdrs = [\n        \"unframed_transport_impl.h\",\n    ],\n    deps = [\n        \":buffer_helper_lib\",\n        \":transport_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/app_exception_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nstatic const std::string TApplicationException = \"TApplicationException\";\nstatic const std::string MessageField = \"message\";\nstatic const std::string TypeField = \"type\";\nstatic const std::string StopField = \"\";\n\nDirectResponse::ResponseType AppException::encode(MessageMetadata& metadata,\n                                                  ThriftProxy::Protocol& proto,\n                                                  Buffer::Instance& buffer) const {\n  // Handle cases where the exception occurs before the message name (e.g. some header transport\n  // errors).\n  if (!metadata.hasMethodName()) {\n    metadata.setMethodName(\"\");\n  }\n  if (!metadata.hasSequenceId()) {\n    metadata.setSequenceId(0);\n  }\n\n  metadata.setMessageType(MessageType::Exception);\n\n  proto.writeMessageBegin(buffer, metadata);\n  proto.writeStructBegin(buffer, TApplicationException);\n\n  proto.writeFieldBegin(buffer, MessageField, FieldType::String, 1);\n  proto.writeString(buffer, std::string(what()));\n  proto.writeFieldEnd(buffer);\n\n  proto.writeFieldBegin(buffer, TypeField, FieldType::I32, 2);\n  proto.writeInt32(buffer, static_cast<int32_t>(type_));\n  proto.writeFieldEnd(buffer);\n\n  proto.writeFieldBegin(buffer, StopField, FieldType::Stop, 0);\n\n  proto.writeStructEnd(buffer);\n  proto.writeMessageEnd(buffer);\n\n  return DirectResponse::ResponseType::Exception;\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/app_exception_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nstruct AppException : public EnvoyException, public DirectResponse {\n  AppException(AppExceptionType type, const std::string& what)\n      : EnvoyException(what), type_(type) {}\n  AppException(const AppException& ex) : EnvoyException(ex.what()), type_(ex.type_) {}\n\n  ResponseType encode(MessageMetadata& metadata, Protocol& proto,\n                      Buffer::Instance& buffer) const override;\n\n  const AppExceptionType type_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/auto_protocol_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/auto_protocol_impl.h\"\n\n#include <algorithm>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/byte_order.h\"\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n#include \"extensions/filters/network/thrift_proxy/compact_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/twitter_protocol_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nvoid AutoProtocolImpl::setType(ProtocolType type) {\n  if (!protocol_) {\n    switch (type) {\n    case ProtocolType::Binary:\n      setProtocol(std::make_unique<BinaryProtocolImpl>());\n      break;\n    case ProtocolType::Compact:\n      setProtocol(std::make_unique<CompactProtocolImpl>());\n      break;\n    case ProtocolType::Twitter:\n      setProtocol(std::make_unique<TwitterProtocolImpl>());\n      break;\n    default:\n      // Ignored: attempt protocol detection.\n      break;\n    }\n  }\n}\n\nbool AutoProtocolImpl::readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  if (protocol_ == nullptr) {\n    if (buffer.length() < 2) {\n      return false;\n    }\n\n    uint16_t version = buffer.peekBEInt<uint16_t>();\n    if (BinaryProtocolImpl::isMagic(version)) {\n      // 12 bytes is the minimum length for message-begin in the binary protocol.\n      if (buffer.length() < BinaryProtocolImpl::MinMessageBeginLength) {\n        return false;\n      }\n\n      // The first message in the twitter protocol is always an upgrade request, so we use as\n      // much of the buffer as possible to detect the upgrade message. If we guess wrong,\n      // TwitterProtocolImpl will still fall back to binary protocol.\n      if (TwitterProtocolImpl::isUpgradePrefix(buffer)) {\n        setType(ProtocolType::Twitter);\n      } else {\n        setType(ProtocolType::Binary);\n      }\n    } else if (CompactProtocolImpl::isMagic(version)) {\n      setType(ProtocolType::Compact);\n    }\n\n    if (!protocol_) {\n      throw EnvoyException(\n          fmt::format(\"unknown thrift auto protocol message start {:04x}\", version));\n    }\n  }\n\n  return protocol_->readMessageBegin(buffer, metadata);\n}\n\nbool AutoProtocolImpl::readMessageEnd(Buffer::Instance& buffer) {\n  RELEASE_ASSERT(protocol_ != nullptr, \"\");\n  return protocol_->readMessageEnd(buffer);\n}\n\nclass AutoProtocolConfigFactory : public ProtocolFactoryBase<AutoProtocolImpl> {\npublic:\n  AutoProtocolConfigFactory() : ProtocolFactoryBase(ProtocolNames::get().AUTO) {}\n};\n\n/**\n * Static registration for the auto protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(AutoProtocolConfigFactory, NamedProtocolConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * AutoProtocolImpl attempts to distinguish between the Thrift binary (strict mode only), compact,\n * and Twitter protocols and then delegates subsequent decoding operations to the appropriate\n * Protocol implementation.\n */\nclass AutoProtocolImpl : public Protocol {\npublic:\n  AutoProtocolImpl() : name_(ProtocolNames::get().AUTO) {}\n\n  // Protocol\n  const std::string& name() const override { return name_; }\n  ProtocolType type() const override {\n    if (protocol_ != nullptr) {\n      return protocol_->type();\n    }\n    return ProtocolType::Auto;\n  }\n  void setType(ProtocolType type) override;\n\n  bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  bool readMessageEnd(Buffer::Instance& buffer) override;\n  bool readStructBegin(Buffer::Instance& buffer, std::string& name) override {\n    return protocol_->readStructBegin(buffer, name);\n  }\n  bool readStructEnd(Buffer::Instance& buffer) override { return protocol_->readStructEnd(buffer); }\n  bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type,\n                      int16_t& field_id) override {\n    return protocol_->readFieldBegin(buffer, name, field_type, field_id);\n  }\n  bool readFieldEnd(Buffer::Instance& buffer) override { return protocol_->readFieldEnd(buffer); }\n  bool readMapBegin(Buffer::Instance& buffer, FieldType& key_type, FieldType& value_type,\n                    uint32_t& size) override {\n    return protocol_->readMapBegin(buffer, key_type, value_type, size);\n  }\n  bool readMapEnd(Buffer::Instance& buffer) override { return protocol_->readMapEnd(buffer); }\n  bool readListBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) override {\n    return protocol_->readListBegin(buffer, elem_type, size);\n  }\n  bool readListEnd(Buffer::Instance& buffer) override { return protocol_->readListEnd(buffer); }\n  bool readSetBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) override {\n    return protocol_->readSetBegin(buffer, elem_type, size);\n  }\n  bool readSetEnd(Buffer::Instance& buffer) override { return protocol_->readSetEnd(buffer); }\n  bool readBool(Buffer::Instance& buffer, bool& value) override {\n    return protocol_->readBool(buffer, value);\n  }\n  bool readByte(Buffer::Instance& buffer, uint8_t& value) override {\n    return protocol_->readByte(buffer, value);\n  }\n  bool readInt16(Buffer::Instance& buffer, int16_t& value) override {\n    return protocol_->readInt16(buffer, value);\n  }\n  bool readInt32(Buffer::Instance& buffer, int32_t& value) override {\n    return protocol_->readInt32(buffer, value);\n  }\n  bool readInt64(Buffer::Instance& buffer, int64_t& value) override {\n    return protocol_->readInt64(buffer, value);\n  }\n  bool readDouble(Buffer::Instance& buffer, double& value) override {\n    return protocol_->readDouble(buffer, value);\n  }\n  bool readString(Buffer::Instance& buffer, std::string& value) override {\n    return protocol_->readString(buffer, value);\n  }\n  bool readBinary(Buffer::Instance& buffer, std::string& value) override {\n    return protocol_->readBinary(buffer, value);\n  }\n  void writeMessageBegin(Buffer::Instance& buffer, const MessageMetadata& metadata) override {\n    protocol_->writeMessageBegin(buffer, metadata);\n  }\n  void writeMessageEnd(Buffer::Instance& buffer) override { protocol_->writeMessageEnd(buffer); }\n  void writeStructBegin(Buffer::Instance& buffer, const std::string& name) override {\n    protocol_->writeStructBegin(buffer, name);\n  }\n  void writeStructEnd(Buffer::Instance& buffer) override { protocol_->writeStructEnd(buffer); }\n  void writeFieldBegin(Buffer::Instance& buffer, const std::string& name, FieldType field_type,\n                       int16_t field_id) override {\n    protocol_->writeFieldBegin(buffer, name, field_type, field_id);\n  }\n  void writeFieldEnd(Buffer::Instance& buffer) override { protocol_->writeFieldEnd(buffer); }\n  void writeMapBegin(Buffer::Instance& buffer, FieldType key_type, FieldType value_type,\n                     uint32_t size) override {\n    protocol_->writeMapBegin(buffer, key_type, value_type, size);\n  }\n  void writeMapEnd(Buffer::Instance& buffer) override { protocol_->writeMapEnd(buffer); }\n  void writeListBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) override {\n    protocol_->writeListBegin(buffer, elem_type, size);\n  }\n  void writeListEnd(Buffer::Instance& buffer) override { protocol_->writeListEnd(buffer); }\n  void writeSetBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) override {\n    protocol_->writeSetBegin(buffer, elem_type, size);\n  }\n  void writeSetEnd(Buffer::Instance& buffer) override { protocol_->writeSetEnd(buffer); }\n  void writeBool(Buffer::Instance& buffer, bool value) override {\n    protocol_->writeBool(buffer, value);\n  }\n  void writeByte(Buffer::Instance& buffer, uint8_t value) override {\n    protocol_->writeByte(buffer, value);\n  }\n  void writeInt16(Buffer::Instance& buffer, int16_t value) override {\n    protocol_->writeInt16(buffer, value);\n  }\n  void writeInt32(Buffer::Instance& buffer, int32_t value) override {\n    protocol_->writeInt32(buffer, value);\n  }\n  void writeInt64(Buffer::Instance& buffer, int64_t value) override {\n    protocol_->writeInt64(buffer, value);\n  }\n  void writeDouble(Buffer::Instance& buffer, double value) override {\n    protocol_->writeDouble(buffer, value);\n  }\n  void writeString(Buffer::Instance& buffer, const std::string& value) override {\n    protocol_->writeString(buffer, value);\n  }\n  void writeBinary(Buffer::Instance& buffer, const std::string& value) override {\n    protocol_->writeBinary(buffer, value);\n  }\n  bool supportsUpgrade() override { return protocol_->supportsUpgrade(); }\n  DecoderEventHandlerSharedPtr upgradeRequestDecoder() override {\n    return protocol_->upgradeRequestDecoder();\n  }\n  DirectResponsePtr upgradeResponse(const DecoderEventHandler& decoder) override {\n    return protocol_->upgradeResponse(decoder);\n  }\n  ThriftObjectPtr attemptUpgrade(Transport& transport, ThriftConnectionState& state,\n                                 Buffer::Instance& buffer) override {\n    return protocol_->attemptUpgrade(transport, state, buffer);\n  }\n  void completeUpgrade(ThriftConnectionState& state, ThriftObject& response) override {\n    return protocol_->completeUpgrade(state, response);\n  }\n\n  /*\n   * Explicitly set the protocol. Public to simplify testing.\n   */\n  void setProtocol(ProtocolPtr&& proto) {\n    protocol_ = std::move(proto);\n    name_ = fmt::format(\"{}({})\", protocol_->name(), ProtocolNames::get().AUTO);\n  }\n\nprivate:\n  ProtocolPtr protocol_{};\n  std::string name_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/auto_transport_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/auto_transport_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n#include \"extensions/filters/network/thrift_proxy/compact_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/framed_transport_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/header_transport_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/unframed_transport_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nbool AutoTransportImpl::decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  if (transport_ == nullptr) {\n    // Not enough data to select a transport.\n    if (buffer.length() < 8) {\n      return false;\n    }\n\n    int32_t size = buffer.peekBEInt<int32_t>();\n    uint16_t proto_start = buffer.peekBEInt<uint16_t>(4);\n\n    // Currently, transport detection depends on the following:\n    // 1. Protocol may only be binary or compact, which start with 0x8001 or 0x8201.\n    // 2. If unframed transport, size will appear negative due to leading protocol bytes.\n    // 3. If header transport, size is followed by 0x0FFF which is distinct from leading\n    //    protocol bytes.\n    // 4. For framed transport, size is followed by protocol bytes.\n    if (size > 0 && size <= HeaderTransportImpl::MaxFrameSize &&\n        HeaderTransportImpl::isMagic(proto_start)) {\n      setTransport(std::make_unique<HeaderTransportImpl>());\n    } else if (size > 0 && size <= FramedTransportImpl::MaxFrameSize) {\n      // TODO(zuercher): Spec says max size is 16,384,000 (0xFA0000). Apache C++ TFramedTransport\n      // is configurable, but defaults to 256 MB (0x1000000).\n      if (BinaryProtocolImpl::isMagic(proto_start) || CompactProtocolImpl::isMagic(proto_start)) {\n        setTransport(std::make_unique<FramedTransportImpl>());\n      }\n    } else {\n      // Check for sane unframed protocol.\n      proto_start = static_cast<uint16_t>((size >> 16) & 0xFFFF);\n      if (BinaryProtocolImpl::isMagic(proto_start) || CompactProtocolImpl::isMagic(proto_start)) {\n        setTransport(std::make_unique<UnframedTransportImpl>());\n      }\n    }\n\n    if (transport_ == nullptr) {\n      uint8_t start[9] = {0};\n      buffer.copyOut(0, 8, start);\n\n      throw EnvoyException(fmt::format(\"unknown thrift auto transport frame start \"\n                                       \"{:02x} {:02x} {:02x} {:02x} {:02x} {:02x} {:02x} {:02x}\",\n                                       start[0], start[1], start[2], start[3], start[4], start[5],\n                                       start[6], start[7]));\n    }\n  }\n\n  return transport_->decodeFrameStart(buffer, metadata);\n}\n\nbool AutoTransportImpl::decodeFrameEnd(Buffer::Instance& buffer) {\n  RELEASE_ASSERT(transport_ != nullptr, \"\");\n  return transport_->decodeFrameEnd(buffer);\n}\n\nvoid AutoTransportImpl::encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                                    Buffer::Instance& message) {\n  RELEASE_ASSERT(transport_ != nullptr, \"auto transport cannot encode before transport detection\");\n  transport_->encodeFrame(buffer, metadata, message);\n}\n\nclass AutoTransportConfigFactory : public TransportFactoryBase<AutoTransportImpl> {\npublic:\n  AutoTransportConfigFactory() : TransportFactoryBase(TransportNames::get().AUTO) {}\n};\n\n/**\n * Static registration for the auto transport. @see RegisterFactory.\n */\nREGISTER_FACTORY(AutoTransportConfigFactory, NamedTransportConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/auto_transport_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * AutoTransportImpl implements Transport and attempts to distinguish between the Thrift framed and\n * unframed transports. Once the transport is detected, subsequent operations are delegated to the\n * appropriate implementation.\n */\nclass AutoTransportImpl : public Transport {\npublic:\n  AutoTransportImpl() : name_(TransportNames::get().AUTO){};\n\n  // Transport\n  const std::string& name() const override { return name_; }\n  TransportType type() const override {\n    if (transport_ != nullptr) {\n      return transport_->type();\n    }\n\n    return TransportType::Auto;\n  }\n  bool decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  bool decodeFrameEnd(Buffer::Instance& buffer) override;\n  void encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                   Buffer::Instance& message) override;\n\n  /*\n   * Explicitly set the transport. Public to simplify testing.\n   */\n  void setTransport(TransportPtr&& transport) {\n    transport_ = std::move(transport);\n    name_ = fmt::format(\"{}({})\", transport_->name(), TransportNames::get().AUTO);\n  }\n\nprivate:\n  TransportPtr transport_{};\n  std::string name_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n\n#include <limits>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nconst uint16_t BinaryProtocolImpl::Magic = 0x8001;\n\nbool BinaryProtocolImpl::readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  if (buffer.length() < MinMessageBeginLength) {\n    return false;\n  }\n\n  uint16_t version = buffer.peekBEInt<uint16_t>();\n  if (version != Magic) {\n    throw EnvoyException(\n        fmt::format(\"invalid binary protocol version 0x{:04x} != 0x{:04x}\", version, Magic));\n  }\n\n  // The byte at offset 2 is unused and ignored.\n\n  MessageType type = static_cast<MessageType>(buffer.peekInt<int8_t>(3));\n  if (type < MessageType::Call || type > MessageType::LastMessageType) {\n    throw EnvoyException(\n        fmt::format(\"invalid binary protocol message type {}\", static_cast<int8_t>(type)));\n  }\n\n  uint32_t name_len = buffer.peekBEInt<uint32_t>(4);\n  if (buffer.length() < name_len + MinMessageBeginLength) {\n    return false;\n  }\n\n  buffer.drain(8);\n\n  if (name_len > 0) {\n    metadata.setMethodName(\n        std::string(static_cast<const char*>(buffer.linearize(name_len)), name_len));\n    buffer.drain(name_len);\n  } else {\n    metadata.setMethodName(\"\");\n  }\n  metadata.setMessageType(type);\n  metadata.setSequenceId(buffer.drainBEInt<int32_t>());\n\n  return true;\n}\n\nbool BinaryProtocolImpl::readMessageEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool BinaryProtocolImpl::readStructBegin(Buffer::Instance& buffer, std::string& name) {\n  UNREFERENCED_PARAMETER(buffer);\n  name.clear(); // binary protocol does not transmit struct names\n  return true;\n}\n\nbool BinaryProtocolImpl::readStructEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool BinaryProtocolImpl::readFieldBegin(Buffer::Instance& buffer, std::string& name,\n                                        FieldType& field_type, int16_t& field_id) {\n  // FieldType::Stop is encoded as 1 byte.\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  FieldType type = static_cast<FieldType>(buffer.peekInt<int8_t>());\n  if (type == FieldType::Stop) {\n    field_id = 0;\n    buffer.drain(1);\n  } else {\n    // FieldType followed by 2 bytes of field id\n    if (buffer.length() < 3) {\n      return false;\n    }\n    int16_t id = buffer.peekBEInt<int16_t>(1);\n    if (id < 0) {\n      throw EnvoyException(absl::StrCat(\"invalid binary protocol field id \", id));\n    }\n    field_id = id;\n    buffer.drain(3);\n  }\n\n  name.clear(); // binary protocol does not transmit field names\n  field_type = type;\n\n  return true;\n}\n\nbool BinaryProtocolImpl::readFieldEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool BinaryProtocolImpl::readMapBegin(Buffer::Instance& buffer, FieldType& key_type,\n                                      FieldType& value_type, uint32_t& size) {\n  // Minimum length:\n  //   key type: 1 byte +\n  //   value type: 1 byte +\n  //   map size: 4 bytes\n  if (buffer.length() < 6) {\n    return false;\n  }\n\n  FieldType ktype = static_cast<FieldType>(buffer.peekInt<int8_t>(0));\n  FieldType vtype = static_cast<FieldType>(buffer.peekInt<int8_t>(1));\n  int32_t s = buffer.peekBEInt<int32_t>(2);\n  if (s < 0) {\n    throw EnvoyException(absl::StrCat(\"negative binary protocol map size \", s));\n  }\n\n  buffer.drain(6);\n\n  key_type = ktype;\n  value_type = vtype;\n  size = static_cast<uint32_t>(s);\n\n  return true;\n}\n\nbool BinaryProtocolImpl::readMapEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool BinaryProtocolImpl::readListBegin(Buffer::Instance& buffer, FieldType& elem_type,\n                                       uint32_t& size) {\n  // Minimum length:\n  //   elem type: 1 byte +\n  //   map size: 4 bytes\n  if (buffer.length() < 5) {\n    return false;\n  }\n\n  FieldType type = static_cast<FieldType>(buffer.peekInt<int8_t>());\n  int32_t s = buffer.peekBEInt<int32_t>(1);\n  if (s < 0) {\n    throw EnvoyException(fmt::format(\"negative binary protocol list/set size {}\", s));\n  }\n  buffer.drain(5);\n\n  elem_type = type;\n  size = static_cast<uint32_t>(s);\n\n  return true;\n}\n\nbool BinaryProtocolImpl::readListEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool BinaryProtocolImpl::readSetBegin(Buffer::Instance& buffer, FieldType& elem_type,\n                                      uint32_t& size) {\n  return readListBegin(buffer, elem_type, size);\n}\n\nbool BinaryProtocolImpl::readSetEnd(Buffer::Instance& buffer) { return readListEnd(buffer); }\n\nbool BinaryProtocolImpl::readBool(Buffer::Instance& buffer, bool& value) {\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  value = buffer.drainInt<int8_t>() != 0;\n  return true;\n}\n\nbool BinaryProtocolImpl::readByte(Buffer::Instance& buffer, uint8_t& value) {\n  if (buffer.length() < 1) {\n    return false;\n  }\n  value = buffer.drainInt<int8_t>();\n  return true;\n}\n\nbool BinaryProtocolImpl::readInt16(Buffer::Instance& buffer, int16_t& value) {\n  if (buffer.length() < 2) {\n    return false;\n  }\n  value = buffer.drainBEInt<int16_t>();\n  return true;\n}\n\nbool BinaryProtocolImpl::readInt32(Buffer::Instance& buffer, int32_t& value) {\n  if (buffer.length() < 4) {\n    return false;\n  }\n  value = buffer.drainBEInt<int32_t>();\n  return true;\n}\n\nbool BinaryProtocolImpl::readInt64(Buffer::Instance& buffer, int64_t& value) {\n  if (buffer.length() < 8) {\n    return false;\n  }\n  value = buffer.drainBEInt<int64_t>();\n  return true;\n}\n\nbool BinaryProtocolImpl::readDouble(Buffer::Instance& buffer, double& value) {\n  static_assert(sizeof(double) == sizeof(uint64_t), \"sizeof(double) != size(uint64_t)\");\n\n  if (buffer.length() < 8) {\n    return false;\n  }\n\n  value = BufferHelper::drainBEDouble(buffer);\n  return true;\n}\n\nbool BinaryProtocolImpl::readString(Buffer::Instance& buffer, std::string& value) {\n  // Encoded as size (4 bytes) followed by string (0+ bytes).\n  if (buffer.length() < 4) {\n    return false;\n  }\n\n  int32_t str_len = buffer.peekBEInt<int32_t>();\n  if (str_len < 0) {\n    throw EnvoyException(fmt::format(\"negative binary protocol string/binary length {}\", str_len));\n  }\n\n  if (str_len == 0) {\n    buffer.drain(4);\n    value.clear();\n    return true;\n  }\n\n  if (buffer.length() < static_cast<uint64_t>(str_len) + 4) {\n    return false;\n  }\n\n  buffer.drain(4);\n  value.assign(static_cast<const char*>(buffer.linearize(str_len)), str_len);\n  buffer.drain(str_len);\n  return true;\n}\n\nbool BinaryProtocolImpl::readBinary(Buffer::Instance& buffer, std::string& value) {\n  return readString(buffer, value);\n}\n\nvoid BinaryProtocolImpl::writeMessageBegin(Buffer::Instance& buffer,\n                                           const MessageMetadata& metadata) {\n  buffer.writeBEInt<uint16_t>(Magic);\n  buffer.writeBEInt<uint16_t>(static_cast<uint16_t>(metadata.messageType()));\n  writeString(buffer, metadata.methodName());\n  buffer.writeBEInt<int32_t>(metadata.sequenceId());\n}\n\nvoid BinaryProtocolImpl::writeMessageEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n}\n\nvoid BinaryProtocolImpl::writeStructBegin(Buffer::Instance& buffer, const std::string& name) {\n  UNREFERENCED_PARAMETER(buffer);\n  UNREFERENCED_PARAMETER(name);\n}\n\nvoid BinaryProtocolImpl::writeStructEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n}\n\nvoid BinaryProtocolImpl::writeFieldBegin(Buffer::Instance& buffer, const std::string& name,\n                                         FieldType field_type, int16_t field_id) {\n  UNREFERENCED_PARAMETER(name);\n\n  buffer.writeByte(static_cast<uint8_t>(field_type));\n  if (field_type == FieldType::Stop) {\n    return;\n  }\n\n  buffer.writeBEInt<int16_t>(field_id);\n}\n\nvoid BinaryProtocolImpl::writeFieldEnd(Buffer::Instance& buffer) { UNREFERENCED_PARAMETER(buffer); }\n\nvoid BinaryProtocolImpl::writeMapBegin(Buffer::Instance& buffer, FieldType key_type,\n                                       FieldType value_type, uint32_t size) {\n  if (size > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) {\n    throw EnvoyException(absl::StrCat(\"illegal binary protocol map size \", size));\n  }\n\n  buffer.writeByte(static_cast<int8_t>(key_type));\n  buffer.writeByte(static_cast<int8_t>(value_type));\n  buffer.writeBEInt<int32_t>(static_cast<int32_t>(size));\n}\n\nvoid BinaryProtocolImpl::writeMapEnd(Buffer::Instance& buffer) { UNREFERENCED_PARAMETER(buffer); }\n\nvoid BinaryProtocolImpl::writeListBegin(Buffer::Instance& buffer, FieldType elem_type,\n                                        uint32_t size) {\n  if (size > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) {\n    throw EnvoyException(fmt::format(\"illegal binary protocol list/set size {}\", size));\n  }\n\n  buffer.writeByte(static_cast<int8_t>(elem_type));\n  buffer.writeBEInt<int32_t>(static_cast<int32_t>(size));\n}\n\nvoid BinaryProtocolImpl::writeListEnd(Buffer::Instance& buffer) { UNREFERENCED_PARAMETER(buffer); }\n\nvoid BinaryProtocolImpl::writeSetBegin(Buffer::Instance& buffer, FieldType elem_type,\n                                       uint32_t size) {\n  writeListBegin(buffer, elem_type, size);\n}\n\nvoid BinaryProtocolImpl::writeSetEnd(Buffer::Instance& buffer) { writeListEnd(buffer); }\n\nvoid BinaryProtocolImpl::writeBool(Buffer::Instance& buffer, bool value) {\n  buffer.writeByte(value ? 1 : 0);\n}\n\nvoid BinaryProtocolImpl::writeByte(Buffer::Instance& buffer, uint8_t value) {\n  buffer.writeByte(value);\n}\n\nvoid BinaryProtocolImpl::writeInt16(Buffer::Instance& buffer, int16_t value) {\n  buffer.writeBEInt<int16_t>(value);\n}\n\nvoid BinaryProtocolImpl::writeInt32(Buffer::Instance& buffer, int32_t value) {\n  buffer.writeBEInt<int32_t>(value);\n}\n\nvoid BinaryProtocolImpl::writeInt64(Buffer::Instance& buffer, int64_t value) {\n  buffer.writeBEInt<int64_t>(value);\n}\n\nvoid BinaryProtocolImpl::writeDouble(Buffer::Instance& buffer, double value) {\n  BufferHelper::writeBEDouble(buffer, value);\n}\n\nvoid BinaryProtocolImpl::writeString(Buffer::Instance& buffer, const std::string& value) {\n  buffer.writeBEInt<uint32_t>(value.length());\n  buffer.add(value);\n}\n\nvoid BinaryProtocolImpl::writeBinary(Buffer::Instance& buffer, const std::string& value) {\n  writeString(buffer, value);\n}\n\nbool LaxBinaryProtocolImpl::readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  // Minimum message length:\n  //   name len: 4 bytes +\n  //   name: 0 bytes +\n  //   msg type: 1 byte +\n  //   seq id: 4 bytes\n  if (buffer.length() < 9) {\n    return false;\n  }\n\n  uint32_t name_len = buffer.peekBEInt<uint32_t>();\n\n  if (buffer.length() < 9 + name_len) {\n    return false;\n  }\n\n  MessageType type = static_cast<MessageType>(buffer.peekInt<int8_t>(name_len + 4));\n  if (type < MessageType::Call || type > MessageType::LastMessageType) {\n    throw EnvoyException(\n        fmt::format(\"invalid (lax) binary protocol message type {}\", static_cast<int8_t>(type)));\n  }\n\n  buffer.drain(4);\n  if (name_len > 0) {\n    metadata.setMethodName(\n        std::string(static_cast<const char*>(buffer.linearize(name_len)), name_len));\n    buffer.drain(name_len);\n  } else {\n    metadata.setMethodName(\"\");\n  }\n\n  metadata.setMessageType(type);\n  metadata.setSequenceId(buffer.peekBEInt<int32_t>(1));\n  buffer.drain(5);\n\n  return true;\n}\n\nvoid LaxBinaryProtocolImpl::writeMessageBegin(Buffer::Instance& buffer,\n                                              const MessageMetadata& metadata) {\n  writeString(buffer, metadata.methodName());\n  buffer.writeByte(static_cast<int8_t>(metadata.messageType()));\n  buffer.writeBEInt<int32_t>(metadata.sequenceId());\n}\n\nclass BinaryProtocolConfigFactory : public ProtocolFactoryBase<BinaryProtocolImpl> {\npublic:\n  BinaryProtocolConfigFactory() : ProtocolFactoryBase(ProtocolNames::get().BINARY) {}\n};\n\n/**\n * Static registration for the binary protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(BinaryProtocolConfigFactory, NamedProtocolConfigFactory);\n\nclass LaxBinaryProtocolConfigFactory : public ProtocolFactoryBase<LaxBinaryProtocolImpl> {\npublic:\n  LaxBinaryProtocolConfigFactory() : ProtocolFactoryBase(ProtocolNames::get().LAX_BINARY) {}\n};\n\n/**\n * Static registration for the auto protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(LaxBinaryProtocolConfigFactory, NamedProtocolConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * BinaryProtocolImpl implements the Thrift Binary protocol with strict message encoding.\n * See https://github.com/apache/thrift/blob/master/doc/specs/thrift-binary-protocol.md\n */\nclass BinaryProtocolImpl : public Protocol {\npublic:\n  BinaryProtocolImpl() = default;\n\n  // Protocol\n  const std::string& name() const override { return ProtocolNames::get().BINARY; }\n  ProtocolType type() const override { return ProtocolType::Binary; }\n  bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  bool readMessageEnd(Buffer::Instance& buffer) override;\n  bool readStructBegin(Buffer::Instance& buffer, std::string& name) override;\n  bool readStructEnd(Buffer::Instance& buffer) override;\n  bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type,\n                      int16_t& field_id) override;\n  bool readFieldEnd(Buffer::Instance& buffer) override;\n  bool readMapBegin(Buffer::Instance& buffer, FieldType& key_type, FieldType& value_type,\n                    uint32_t& size) override;\n  bool readMapEnd(Buffer::Instance& buffer) override;\n  bool readListBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) override;\n  bool readListEnd(Buffer::Instance& buffer) override;\n  bool readSetBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) override;\n  bool readSetEnd(Buffer::Instance& buffer) override;\n  bool readBool(Buffer::Instance& buffer, bool& value) override;\n  bool readByte(Buffer::Instance& buffer, uint8_t& value) override;\n  bool readInt16(Buffer::Instance& buffer, int16_t& value) override;\n  bool readInt32(Buffer::Instance& buffer, int32_t& value) override;\n  bool readInt64(Buffer::Instance& buffer, int64_t& value) override;\n  bool readDouble(Buffer::Instance& buffer, double& value) override;\n  bool readString(Buffer::Instance& buffer, std::string& value) override;\n  bool readBinary(Buffer::Instance& buffer, std::string& value) override;\n  void writeMessageBegin(Buffer::Instance& buffer, const MessageMetadata& metadata) override;\n  void writeMessageEnd(Buffer::Instance& buffer) override;\n  void writeStructBegin(Buffer::Instance& buffer, const std::string& name) override;\n  void writeStructEnd(Buffer::Instance& buffer) override;\n  void writeFieldBegin(Buffer::Instance& buffer, const std::string& name, FieldType field_type,\n                       int16_t field_id) override;\n  void writeFieldEnd(Buffer::Instance& buffer) override;\n  void writeMapBegin(Buffer::Instance& buffer, FieldType key_type, FieldType value_type,\n                     uint32_t size) override;\n  void writeMapEnd(Buffer::Instance& buffer) override;\n  void writeListBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) override;\n  void writeListEnd(Buffer::Instance& buffer) override;\n  void writeSetBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) override;\n  void writeSetEnd(Buffer::Instance& buffer) override;\n  void writeBool(Buffer::Instance& buffer, bool value) override;\n  void writeByte(Buffer::Instance& buffer, uint8_t value) override;\n  void writeInt16(Buffer::Instance& buffer, int16_t value) override;\n  void writeInt32(Buffer::Instance& buffer, int32_t value) override;\n  void writeInt64(Buffer::Instance& buffer, int64_t value) override;\n  void writeDouble(Buffer::Instance& buffer, double value) override;\n  void writeString(Buffer::Instance& buffer, const std::string& value) override;\n  void writeBinary(Buffer::Instance& buffer, const std::string& value) override;\n\n  static bool isMagic(uint16_t word) { return word == Magic; }\n\n  // Minimum message length:\n  //   version: 2 bytes +\n  //   unused: 1 byte +\n  //   msg type: 1 byte +\n  //   name len: 4 bytes +\n  //   name: 0 bytes +\n  //   seq id: 4 bytes\n  static constexpr uint64_t MinMessageBeginLength = 12;\n\nprivate:\n  const static uint16_t Magic;\n};\n\n/**\n * LaxBinaryProtocolImpl implements the Thrift Binary protocol with non-strict (e.g. lax) message\n * encoding. See https://github.com/apache/thrift/blob/master/doc/specs/thrift-binary-protocol.md\n */\nclass LaxBinaryProtocolImpl : public BinaryProtocolImpl {\npublic:\n  LaxBinaryProtocolImpl() = default;\n\n  const std::string& name() const override { return ProtocolNames::get().LAX_BINARY; }\n\n  bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  void writeMessageBegin(Buffer::Instance& buffer, const MessageMetadata& metadata) override;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/buffer_helper.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n\n#include \"common/common/byte_order.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\ndouble BufferHelper::drainBEDouble(Buffer::Instance& buffer) {\n  static_assert(sizeof(double) == sizeof(uint64_t), \"sizeof(double) != sizeof(uint64_t)\");\n  static_assert(std::numeric_limits<double>::is_iec559, \"non-IEC559 (IEEE 754) double\");\n\n  // Implementation based on:\n  // https://github.com/CppCon/CppCon2017/raw/master/Presentations/Type%20Punning%20In%20C%2B%2B17%20-%20Avoiding%20Pun-defined%20Behavior/Type%20Punning%20In%20C%2B%2B17%20-%20Avoiding%20Pun-defined%20Behavior%20-%20Scott%20Schurr%20-%20CppCon%202017.pdf\n  // The short version:\n  // 1. Reinterpreting uint64_t* to double* falls astray of strict aliasing rules.\n  // 2. Using union {uint64_t i; double d;} is undefined behavior in C++ (but not C11).\n  // 3. Using memcpy may be undefined, but probably reliable, and can be optimized to the\n  //    same instructions as 1 and 2.\n  // 4. Implementation of last resort is to manually copy from i to d via unsigned char*.\n  uint64_t i = buffer.drainBEInt<uint64_t>();\n  double d;\n  std::memcpy(&d, &i, 8);\n  return d;\n}\n\n// Thrift's var int encoding is described in\n// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md\nuint64_t BufferHelper::peekVarInt(Buffer::Instance& buffer, uint64_t offset, int& size) {\n  // Need at least 1 byte for a var int.\n  if (buffer.length() <= offset) {\n    throw EnvoyException(\"buffer underflow\");\n  }\n\n  // Need at most 10 bytes for a 64-bit var int.\n  const uint64_t last = std::min(buffer.length() - offset, static_cast<uint64_t>(10));\n\n  uint8_t shift = 0;\n  uint64_t result = 0;\n  for (uint64_t i = 0; i < last; i++) {\n    uint8_t b = buffer.peekInt<uint8_t>(offset + i);\n\n    // Note: the compact protocol spec says these variable-length ints are encoded as big-endian,\n    // but the Apache C++, Java, and Python implementations read and write them little-endian.\n    result |= static_cast<uint64_t>(b & 0x7f) << shift;\n    shift += 7;\n\n    if ((b & 0x80) == 0) {\n      // End of encoded int.\n      size = i + 1;\n      return result;\n    }\n  }\n\n  // Ran out of bytes (or it's invalid).\n  size = -last;\n  return 0;\n}\n\nint32_t BufferHelper::peekVarIntI32(Buffer::Instance& buffer, uint64_t offset, int& size) {\n  int underlying_size;\n  uint64_t v64 = peekVarInt(buffer, offset, underlying_size);\n\n  if (underlying_size <= -5 || underlying_size > 5) {\n    throw EnvoyException(\"invalid compact protocol varint i32\");\n  }\n\n  size = underlying_size;\n  if (size < 0) {\n    return 0;\n  }\n\n  return static_cast<int32_t>(v64);\n}\n\n// Thrift's zig-zag int encoding is described in\n// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md\nint64_t BufferHelper::peekZigZagI64(Buffer::Instance& buffer, uint64_t offset, int& size) {\n  int underlying_size;\n  uint64_t zz64 = peekVarInt(buffer, offset, underlying_size);\n\n  if (underlying_size <= -10 || underlying_size > 10) {\n    // Max size is 10, so this must be an invalid encoding.\n    throw EnvoyException(\"invalid compact protocol zig-zag i64\");\n  }\n\n  size = underlying_size;\n  if (size < 0) {\n    // Still an underflow, but it might become valid with additional data.\n    return 0;\n  }\n\n  return (zz64 >> 1) ^ static_cast<uint64_t>(-static_cast<int64_t>(zz64 & 1));\n}\n\n// Thrift's zig-zag int encoding is described in\n// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md\nint32_t BufferHelper::peekZigZagI32(Buffer::Instance& buffer, uint64_t offset, int& size) {\n  int underlying_size;\n  uint64_t zz64 = peekVarInt(buffer, offset, underlying_size);\n\n  if (underlying_size <= -5 || underlying_size > 5) {\n    // Max size is 5, so this must be an invalid encoding.\n    throw EnvoyException(\"invalid compact protocol zig-zag i32\");\n  }\n\n  size = underlying_size;\n  if (size < 0) {\n    // Still an underflow, but it might become valid with additional data.\n    return 0;\n  }\n\n  uint32_t zz32 = static_cast<uint32_t>(zz64);\n  return (zz32 >> 1) ^ static_cast<uint32_t>(-static_cast<int32_t>(zz32 & 1));\n}\n\nvoid BufferHelper::writeBEDouble(Buffer::Instance& buffer, double value) {\n  static_assert(sizeof(double) == sizeof(uint64_t), \"sizeof(double) != sizeof(uint64_t)\");\n  static_assert(std::numeric_limits<double>::is_iec559, \"non-IEC559 (IEEE 754) double\");\n\n  // See drainDouble for implementation details.\n  uint64_t i;\n  std::memcpy(&i, &value, 8);\n  buffer.writeBEInt<uint64_t>(i);\n}\n\n// Thrift's var int encoding is described in\n// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md\nvoid BufferHelper::writeVarIntI32(Buffer::Instance& buffer, int32_t value) {\n  uint8_t bytes[5];\n  uint32_t v = static_cast<uint32_t>(value);\n  int pos = 0;\n  while (pos < 5) {\n    if ((v & ~0x7F) == 0) {\n      bytes[pos++] = static_cast<uint8_t>(v);\n      break;\n    }\n\n    bytes[pos++] = static_cast<uint8_t>(v & 0x7F) | 0x80;\n    v >>= 7;\n  }\n  ASSERT(v < 0x80);\n  ASSERT(pos <= 5);\n\n  buffer.add(bytes, pos);\n}\n\nvoid BufferHelper::writeVarIntI64(Buffer::Instance& buffer, int64_t value) {\n  uint8_t bytes[10];\n  uint64_t v = static_cast<uint64_t>(value);\n  int pos = 0;\n  while (pos < 10) {\n    if ((v & ~0x7F) == 0) {\n      bytes[pos++] = static_cast<uint8_t>(v);\n      break;\n    }\n\n    bytes[pos++] = static_cast<uint8_t>(v & 0x7F) | 0x80;\n    v >>= 7;\n  }\n\n  ASSERT(v < 0x80);\n  ASSERT(pos <= 10);\n\n  buffer.add(bytes, pos);\n}\n\nvoid BufferHelper::writeZigZagI32(Buffer::Instance& buffer, int32_t value) {\n  uint32_t zz32 = (static_cast<uint32_t>(value) << 1) ^ (value >> 31);\n  writeVarIntI32(buffer, zz32);\n}\n\nvoid BufferHelper::writeZigZagI64(Buffer::Instance& buffer, int64_t value) {\n  uint64_t zz64 = (static_cast<uint64_t>(value) << 1) ^ (value >> 63);\n  writeVarIntI64(buffer, zz64);\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/buffer_helper.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * BufferHelper provides buffer operations for reading bytes and numbers in the various encodings\n * used by Thrift protocols.\n */\nclass BufferHelper {\npublic:\n  /**\n   * Reads and drains a double from a buffer.\n   * @param buffer Buffer::Instance containing data to decode\n   * @return the double at the start of buffer\n   */\n  static double drainBEDouble(Buffer::Instance& buffer);\n\n  /**\n   * Peeks at a variable-length int32_t at offset. Updates size to the number of bytes used to\n   * encode the value. If insufficient bytes are available in the buffer to complete decoding, size\n   * is set to a negative number whose absolute value is the number of bytes examined. At least one\n   * byte must be available in the buffer.\n   *\n   * @param buffer Buffer::Instance containing data to decode\n   * @param offset offset into buffer to peek at\n   * @param size updated with number of bytes decoded for successful result (positive values of\n   *        size) or the number of bytes examined before underflowing (negative values of size).\n   * @return the decoded variable-length int32_t (if size > 0), otherwise 0.\n   * @throw EnvoyException if there is a buffer underflow, but more data would result in an integer\n   *                       larger than 32 bits.\n   */\n  static int32_t peekVarIntI32(Buffer::Instance& buffer, uint64_t offset, int& size);\n\n  /**\n   * Peeks at the zig-zag encoded int64_t at offset. Updates size with the same semantics as\n   * peekVarIntI32.\n   *\n   * @param buffer Buffer::Instance containing data to decode\n   * @param offset offset into buffer to peek at\n   * @param size updated with number of bytes decoded for successful result (positive values of\n   *        size) or the number of bytes examined before underflowing (negative values of size).\n   * @return the decoded variable-length zig-zag encoded int64_t (if size > 0), otherwise 0.\n   * @throw EnvoyException if there is a buffer underflow, but more data would result in an integer\n   *                       larger than 64 bits.\n   */\n  static int64_t peekZigZagI64(Buffer::Instance& buffer, uint64_t offset, int& size);\n\n  /**\n   * Peeks at the zig-zag encoded int32_t at offset with the same semantics for the size parameter\n   * as peekVarInt32.\n   *\n   * @param buffer Buffer::Instance containing data to decode\n   * @param offset offset into buffer to peek at\n   * @param size updated with number of bytes decoded for successful result (positive values of\n   *        size) or the number of bytes examined before underflowing (negative values of size).\n   * @return the decoded variable-length zig-zag encoded int32_t (if size > 0), otherwise 0.\n   * @throw EnvoyException if there is a buffer underflow, but more data would result in an integer\n   *                       larger than 32 bits.\n   */\n  static int32_t peekZigZagI32(Buffer::Instance& buffer, uint64_t offset, int& size);\n\n  /**\n   * Writes a double to the buffer.\n   * @param buffer Buffer::Instance written to\n   * @param value the double to write\n   */\n  static void writeBEDouble(Buffer::Instance& buffer, double value);\n\n  /**\n   * Writes a var-int encoded int32_t to the buffer.\n   * @param buffer Buffer::Instance written to\n   * @param value the int32_t to write\n   */\n  static void writeVarIntI32(Buffer::Instance& buffer, int32_t value);\n\n  /**\n   * Writes a var-int encoded int64_t to the buffer.\n   * @param buffer Buffer::Instance written to\n   * @param value the int64_t to write\n   */\n  static void writeVarIntI64(Buffer::Instance& buffer, int64_t value);\n\n  /**\n   * Writes a zig-zag encoded int32_t to the buffer.\n   * @param buffer Buffer::Instance written to\n   * @param value the int32_t to write\n   */\n  static void writeZigZagI32(Buffer::Instance& buffer, int32_t value);\n\n  /**\n   * Writes a zig-zag encoded int64_t to the buffer.\n   * @param buffer Buffer::Instance written to\n   * @param value the int64_t to write\n   */\n  static void writeZigZagI64(Buffer::Instance& buffer, int64_t value);\n\nprivate:\n  /**\n   * Peeks at a variable-length int of up to 64 bits at offset. Updates size to indicate how many\n   * bytes were examined.\n   *\n   * @param buffer Buffer::Instance containing data to decode\n   * @param offset offset into buffer to peek at\n   * @param size updated with number of bytes decoded for successful result (positive values of\n   *        size) or the number of bytes examined before underflowing (negative values of size).\n   * @return the decoded variable-length int64_t (if size > 0), otherwise 0.\n   */\n  static uint64_t peekVarInt(Buffer::Instance& buffer, uint64_t offset, int& size);\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/compact_protocol_impl.h\"\n\n#include <limits>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nconst uint16_t CompactProtocolImpl::Magic = 0x8201;\nconst uint16_t CompactProtocolImpl::MagicMask = 0xFF1F;\n\nbool CompactProtocolImpl::readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  // Minimum message length:\n  //   protocol, message type, and version: 2 bytes +\n  //   seq id (var int): 1 byte +\n  //   name length (var int): 1 byte +\n  //   name: 0 bytes\n  if (buffer.length() < 4) {\n    return false;\n  }\n\n  uint16_t version = buffer.peekBEInt<uint16_t>();\n  if ((version & MagicMask) != Magic) {\n    throw EnvoyException(fmt::format(\"invalid compact protocol version 0x{:04x} != 0x{:04x}\",\n                                     version & MagicMask, Magic));\n  }\n\n  MessageType type = static_cast<MessageType>((version & ~MagicMask) >> 5);\n  if (type < MessageType::Call || type > MessageType::LastMessageType) {\n    throw EnvoyException(\n        fmt::format(\"invalid compact protocol message type {}\", static_cast<int8_t>(type)));\n  }\n\n  int id_size;\n  int32_t id = BufferHelper::peekVarIntI32(buffer, 2, id_size);\n  if (id_size < 0) {\n    return false;\n  }\n\n  int name_len_size;\n  int32_t name_len = BufferHelper::peekVarIntI32(buffer, id_size + 2, name_len_size);\n  if (name_len_size < 0) {\n    return false;\n  }\n\n  if (name_len < 0) {\n    throw EnvoyException(absl::StrCat(\"negative compact protocol message name length \", name_len));\n  }\n\n  if (buffer.length() < static_cast<uint64_t>(id_size + name_len_size + name_len + 2)) {\n    return false;\n  }\n\n  buffer.drain(id_size + name_len_size + 2);\n\n  if (name_len > 0) {\n    metadata.setMethodName(\n        std::string(static_cast<const char*>(buffer.linearize(name_len)), name_len));\n    buffer.drain(name_len);\n  } else {\n    metadata.setMethodName(\"\");\n  }\n  metadata.setMessageType(type);\n  metadata.setSequenceId(id);\n\n  return true;\n}\n\nbool CompactProtocolImpl::readMessageEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool CompactProtocolImpl::readStructBegin(Buffer::Instance& buffer, std::string& name) {\n  UNREFERENCED_PARAMETER(buffer);\n  name.clear(); // compact protocol does not transmit struct names\n\n  // Field ids are encoded as deltas specific to the field's containing struct. Field ids are\n  // tracked in a stack to handle nested structs.\n  last_field_id_stack_.push(last_field_id_);\n  last_field_id_ = 0;\n\n  return true;\n}\n\nbool CompactProtocolImpl::readStructEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n\n  if (last_field_id_stack_.empty()) {\n    throw EnvoyException(\"invalid check for compact protocol struct end\");\n  }\n\n  last_field_id_ = last_field_id_stack_.top();\n  last_field_id_stack_.pop();\n\n  return true;\n}\n\nbool CompactProtocolImpl::readFieldBegin(Buffer::Instance& buffer, std::string& name,\n                                         FieldType& field_type, int16_t& field_id) {\n  // Minimum size: FieldType::Stop is encoded as 1 byte.\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  uint8_t delta_and_type = buffer.peekInt<int8_t>();\n  if ((delta_and_type & 0x0f) == 0) {\n    // Type is stop, no need to do further decoding.\n    name.clear();\n    field_id = 0;\n    field_type = FieldType::Stop;\n    buffer.drain(1);\n\n    return true;\n  }\n\n  int16_t compact_field_id;\n  CompactFieldType compact_field_type;\n  int id_size = 0;\n  if ((delta_and_type >> 4) == 0) {\n    // Field ID delta is zero: this is a long-form field header, followed by zig-zag field id.\n    if (buffer.length() < 2) {\n      return false;\n    }\n\n    int32_t id = BufferHelper::peekZigZagI32(buffer, 1, id_size);\n    if (id_size < 0) {\n      return false;\n    }\n\n    if (id < 0 || id > std::numeric_limits<int16_t>::max()) {\n      throw EnvoyException(absl::StrCat(\"invalid compact protocol field id \", id));\n    }\n\n    compact_field_type = static_cast<CompactFieldType>(delta_and_type);\n    compact_field_id = static_cast<int16_t>(id);\n  } else {\n    // Short form field header: 4 bits of field id delta, 4 bits of field type.\n    compact_field_type = static_cast<CompactFieldType>(delta_and_type & 0x0F);\n    compact_field_id = last_field_id_ + static_cast<int16_t>(delta_and_type >> 4);\n  }\n\n  field_type = convertCompactFieldType(compact_field_type);\n  // For simple fields, boolean values are transmitted as a type with no further data.\n  if (field_type == FieldType::Bool) {\n    bool_value_ = compact_field_type == CompactFieldType::BoolTrue;\n  }\n\n  name.clear(); // compact protocol does not transmit field names\n  field_id = compact_field_id;\n  last_field_id_ = compact_field_id;\n\n  buffer.drain(id_size + 1);\n\n  return true;\n}\n\nbool CompactProtocolImpl::readFieldEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  bool_value_.reset();\n  return true;\n}\n\nbool CompactProtocolImpl::readMapBegin(Buffer::Instance& buffer, FieldType& key_type,\n                                       FieldType& value_type, uint32_t& size) {\n  int s_size;\n  int32_t s = BufferHelper::peekVarIntI32(buffer, 0, s_size);\n  if (s_size < 0) {\n    return false;\n  }\n\n  if (s < 0) {\n    throw EnvoyException(absl::StrCat(\"negative compact protocol map size \", s));\n  }\n\n  if (s == 0) {\n    // Empty map. Compact protocol provides no type information in this case.\n    key_type = value_type = FieldType::Stop;\n    size = 0;\n    buffer.drain(s_size);\n    return true;\n  }\n\n  if (buffer.length() < static_cast<uint64_t>(s_size + 1)) {\n    return false;\n  }\n\n  uint8_t types = buffer.peekInt<int8_t>(s_size);\n  FieldType ktype = convertCompactFieldType(static_cast<CompactFieldType>(types >> 4));\n  FieldType vtype = convertCompactFieldType(static_cast<CompactFieldType>(types & 0xF));\n\n  // Drain the size and the types byte.\n  buffer.drain(s_size + 1);\n\n  key_type = ktype;\n  value_type = vtype;\n  size = static_cast<uint32_t>(s);\n\n  return true;\n}\n\nbool CompactProtocolImpl::readMapEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool CompactProtocolImpl::readListBegin(Buffer::Instance& buffer, FieldType& elem_type,\n                                        uint32_t& size) {\n  // Minimum length:\n  //   size and type: 1 byte\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  uint32_t sz = 0;\n  int s_size = 0;\n  uint8_t size_and_type = buffer.peekInt<int8_t>();\n  if ((size_and_type & 0xF0) != 0xF0) {\n    // Short form list header: size and type byte.\n    sz = static_cast<uint32_t>(size_and_type >> 4);\n  } else {\n    // Long form list header: type byte followed by var int size.\n    int32_t s = BufferHelper::peekVarIntI32(buffer, 1, s_size);\n    if (s_size < 0) {\n      return false;\n    }\n\n    if (s < 0) {\n      throw EnvoyException(fmt::format(\"negative compact protocol list/set size {}\", s));\n    }\n\n    sz = static_cast<uint32_t>(s);\n  }\n\n  elem_type = convertCompactFieldType(static_cast<CompactFieldType>(size_and_type & 0x0F));\n  size = sz;\n\n  buffer.drain(s_size + 1);\n  return true;\n}\n\nbool CompactProtocolImpl::readListEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n  return true;\n}\n\nbool CompactProtocolImpl::readSetBegin(Buffer::Instance& buffer, FieldType& elem_type,\n                                       uint32_t& size) {\n  return readListBegin(buffer, elem_type, size);\n}\n\nbool CompactProtocolImpl::readSetEnd(Buffer::Instance& buffer) { return readListEnd(buffer); }\n\nbool CompactProtocolImpl::readBool(Buffer::Instance& buffer, bool& value) {\n  // Boolean struct fields have their value encoded in the field type.\n  if (bool_value_.has_value()) {\n    value = bool_value_.value();\n    return true;\n  }\n\n  // All other boolean values (list, set, or map elements) are encoded as single bytes.\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  value = buffer.drainInt<int8_t>() != 0;\n  return true;\n}\n\nbool CompactProtocolImpl::readByte(Buffer::Instance& buffer, uint8_t& value) {\n  if (buffer.length() < 1) {\n    return false;\n  }\n  value = buffer.drainInt<int8_t>();\n  return true;\n}\n\nbool CompactProtocolImpl::readInt16(Buffer::Instance& buffer, int16_t& value) {\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  int size;\n  int32_t i = BufferHelper::peekZigZagI32(buffer, 0, size);\n  if (size < 0) {\n    return false;\n  }\n\n  if (i < std::numeric_limits<int16_t>::min() || i > std::numeric_limits<int16_t>::max()) {\n    throw EnvoyException(fmt::format(\"compact protocol i16 exceeds allowable range {}\", i));\n  }\n\n  buffer.drain(size);\n  value = static_cast<int16_t>(i);\n  return true;\n}\n\nbool CompactProtocolImpl::readInt32(Buffer::Instance& buffer, int32_t& value) {\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  int size;\n  int32_t i = BufferHelper::peekZigZagI32(buffer, 0, size);\n  if (size < 0) {\n    return false;\n  }\n\n  buffer.drain(size);\n  value = i;\n  return true;\n}\n\nbool CompactProtocolImpl::readInt64(Buffer::Instance& buffer, int64_t& value) {\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  int size;\n  int64_t i = BufferHelper::peekZigZagI64(buffer, 0, size);\n  if (size < 0) {\n    return false;\n  }\n\n  buffer.drain(size);\n  value = i;\n  return true;\n}\n\nbool CompactProtocolImpl::readDouble(Buffer::Instance& buffer, double& value) {\n  static_assert(sizeof(double) == sizeof(uint64_t), \"sizeof(double) != size(uint64_t)\");\n\n  if (buffer.length() < 8) {\n    return false;\n  }\n\n  value = BufferHelper::drainBEDouble(buffer);\n  return true;\n}\n\nbool CompactProtocolImpl::readString(Buffer::Instance& buffer, std::string& value) {\n  if (buffer.length() < 1) {\n    return false;\n  }\n\n  int len_size;\n  int32_t str_len = BufferHelper::peekVarIntI32(buffer, 0, len_size);\n  if (len_size < 0) {\n    return false;\n  }\n\n  if (str_len < 0) {\n    throw EnvoyException(fmt::format(\"negative compact protocol string/binary length {}\", str_len));\n  }\n\n  if (str_len == 0) {\n    buffer.drain(len_size);\n    value.clear();\n    return true;\n  }\n\n  if (buffer.length() < static_cast<uint64_t>(str_len + len_size)) {\n    return false;\n  }\n\n  buffer.drain(len_size);\n  value.assign(static_cast<const char*>(buffer.linearize(str_len)), str_len);\n  buffer.drain(str_len);\n  return true;\n}\n\nbool CompactProtocolImpl::readBinary(Buffer::Instance& buffer, std::string& value) {\n  return readString(buffer, value);\n}\n\nvoid CompactProtocolImpl::writeMessageBegin(Buffer::Instance& buffer,\n                                            const MessageMetadata& metadata) {\n  MessageType msg_type = metadata.messageType();\n\n  uint16_t ptv = (Magic & MagicMask) | (static_cast<uint16_t>(msg_type) << 5);\n  ASSERT((ptv & MagicMask) == Magic);\n  ASSERT((ptv & ~MagicMask) >> 5 == static_cast<uint16_t>(msg_type));\n\n  buffer.writeBEInt<uint16_t>(ptv);\n  BufferHelper::writeVarIntI32(buffer, metadata.sequenceId());\n  writeString(buffer, metadata.methodName());\n}\n\nvoid CompactProtocolImpl::writeMessageEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n}\n\nvoid CompactProtocolImpl::writeStructBegin(Buffer::Instance& buffer, const std::string& name) {\n  UNREFERENCED_PARAMETER(buffer);\n  UNREFERENCED_PARAMETER(name);\n\n  // Field ids are encoded as deltas specific to the field's containing struct. Field ids are\n  // tracked in a stack to handle nested structs.\n  last_field_id_stack_.push(last_field_id_);\n  last_field_id_ = 0;\n}\n\nvoid CompactProtocolImpl::writeStructEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n\n  if (last_field_id_stack_.empty()) {\n    throw EnvoyException(\"invalid write of compact protocol struct end\");\n  }\n\n  last_field_id_ = last_field_id_stack_.top();\n  last_field_id_stack_.pop();\n}\n\nvoid CompactProtocolImpl::writeFieldBegin(Buffer::Instance& buffer, const std::string& name,\n                                          FieldType field_type, int16_t field_id) {\n  UNREFERENCED_PARAMETER(name);\n\n  if (field_type == FieldType::Stop) {\n    buffer.writeByte(0);\n    return;\n  }\n\n  if (field_type == FieldType::Bool) {\n    bool_field_id_ = field_id;\n    return;\n  }\n\n  writeFieldBeginInternal(buffer, field_type, field_id, {});\n}\n\nvoid CompactProtocolImpl::writeFieldBeginInternal(\n    Buffer::Instance& buffer, FieldType field_type, int16_t field_id,\n    absl::optional<CompactFieldType> field_type_override) {\n  CompactFieldType compact_field_type;\n  if (field_type_override.has_value()) {\n    compact_field_type = field_type_override.value();\n  } else {\n    compact_field_type = convertFieldType(field_type);\n  }\n\n  if (field_id > last_field_id_ && field_id - last_field_id_ <= 15) {\n    // Encode short-form field header.\n    buffer.writeByte((static_cast<int8_t>(field_id - last_field_id_) << 4) |\n                     static_cast<int8_t>(compact_field_type));\n  } else {\n    buffer.writeByte(static_cast<int8_t>(compact_field_type));\n    BufferHelper::writeZigZagI32(buffer, static_cast<int32_t>(field_id));\n  }\n\n  last_field_id_ = field_id;\n}\n\nvoid CompactProtocolImpl::writeFieldEnd(Buffer::Instance& buffer) {\n  UNREFERENCED_PARAMETER(buffer);\n\n  bool_field_id_.reset();\n}\n\nvoid CompactProtocolImpl::writeMapBegin(Buffer::Instance& buffer, FieldType key_type,\n                                        FieldType value_type, uint32_t size) {\n  if (size > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) {\n    throw EnvoyException(absl::StrCat(\"illegal compact protocol map size \", size));\n  }\n\n  BufferHelper::writeVarIntI32(buffer, static_cast<int32_t>(size));\n  if (size == 0) {\n    return;\n  }\n\n  CompactFieldType compact_key_type = convertFieldType(key_type);\n  CompactFieldType compact_value_type = convertFieldType(value_type);\n  buffer.writeByte((static_cast<int8_t>(compact_key_type) << 4) |\n                   static_cast<int8_t>(compact_value_type));\n}\n\nvoid CompactProtocolImpl::writeMapEnd(Buffer::Instance& buffer) { UNREFERENCED_PARAMETER(buffer); }\n\nvoid CompactProtocolImpl::writeListBegin(Buffer::Instance& buffer, FieldType elem_type,\n                                         uint32_t size) {\n  if (size > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) {\n    throw EnvoyException(fmt::format(\"illegal compact protocol list/set size {}\", size));\n  }\n\n  CompactFieldType compact_elem_type = convertFieldType(elem_type);\n\n  if (size < 0xF) {\n    // Short form list/set header\n    int8_t short_size = static_cast<int8_t>(size & 0xF);\n    buffer.writeByte((short_size << 4) | static_cast<int8_t>(compact_elem_type));\n  } else {\n    buffer.writeByte(0xF0 | static_cast<int8_t>(compact_elem_type));\n    BufferHelper::writeVarIntI32(buffer, static_cast<int32_t>(size));\n  }\n}\n\nvoid CompactProtocolImpl::writeListEnd(Buffer::Instance& buffer) { UNREFERENCED_PARAMETER(buffer); }\n\nvoid CompactProtocolImpl::writeSetBegin(Buffer::Instance& buffer, FieldType elem_type,\n                                        uint32_t size) {\n  writeListBegin(buffer, elem_type, size);\n}\n\nvoid CompactProtocolImpl::writeSetEnd(Buffer::Instance& buffer) { UNREFERENCED_PARAMETER(buffer); }\n\nvoid CompactProtocolImpl::writeBool(Buffer::Instance& buffer, bool value) {\n  if (bool_field_id_.has_value()) {\n    // Boolean fields have their value encoded by type.\n    CompactFieldType bool_field_type =\n        value ? CompactFieldType::BoolTrue : CompactFieldType::BoolFalse;\n    writeFieldBeginInternal(buffer, FieldType::Bool, bool_field_id_.value(), {bool_field_type});\n    return;\n  }\n\n  // Map/Set/List booleans are encoded as bytes.\n  buffer.writeByte(value ? 1 : 0);\n}\n\nvoid CompactProtocolImpl::writeByte(Buffer::Instance& buffer, uint8_t value) {\n  buffer.writeByte(value);\n}\n\nvoid CompactProtocolImpl::writeInt16(Buffer::Instance& buffer, int16_t value) {\n  int32_t extended = static_cast<int32_t>(value);\n  BufferHelper::writeZigZagI32(buffer, extended);\n}\n\nvoid CompactProtocolImpl::writeInt32(Buffer::Instance& buffer, int32_t value) {\n  BufferHelper::writeZigZagI32(buffer, value);\n}\n\nvoid CompactProtocolImpl::writeInt64(Buffer::Instance& buffer, int64_t value) {\n  BufferHelper::writeZigZagI64(buffer, value);\n}\n\nvoid CompactProtocolImpl::writeDouble(Buffer::Instance& buffer, double value) {\n  BufferHelper::writeBEDouble(buffer, value);\n}\n\nvoid CompactProtocolImpl::writeString(Buffer::Instance& buffer, const std::string& value) {\n  BufferHelper::writeVarIntI32(buffer, value.length());\n  buffer.add(value);\n}\n\nvoid CompactProtocolImpl::writeBinary(Buffer::Instance& buffer, const std::string& value) {\n  writeString(buffer, value);\n}\n\nFieldType CompactProtocolImpl::convertCompactFieldType(CompactFieldType compact_field_type) {\n  switch (compact_field_type) {\n  case CompactFieldType::BoolTrue:\n    return FieldType::Bool;\n  case CompactFieldType::BoolFalse:\n    return FieldType::Bool;\n  case CompactFieldType::Byte:\n    return FieldType::Byte;\n  case CompactFieldType::I16:\n    return FieldType::I16;\n  case CompactFieldType::I32:\n    return FieldType::I32;\n  case CompactFieldType::I64:\n    return FieldType::I64;\n  case CompactFieldType::Double:\n    return FieldType::Double;\n  case CompactFieldType::String:\n    return FieldType::String;\n  case CompactFieldType::List:\n    return FieldType::List;\n  case CompactFieldType::Set:\n    return FieldType::Set;\n  case CompactFieldType::Map:\n    return FieldType::Map;\n  case CompactFieldType::Struct:\n    return FieldType::Struct;\n  default:\n    throw EnvoyException(fmt::format(\"unknown compact protocol field type {}\",\n                                     static_cast<int8_t>(compact_field_type)));\n  }\n}\n\nCompactProtocolImpl::CompactFieldType CompactProtocolImpl::convertFieldType(FieldType field_type) {\n  switch (field_type) {\n  case FieldType::Bool:\n    // c.f. special handling in writeFieldBegin\n    return CompactFieldType::BoolTrue;\n  case FieldType::Byte:\n    return CompactFieldType::Byte;\n  case FieldType::I16:\n    return CompactFieldType::I16;\n  case FieldType::I32:\n    return CompactFieldType::I32;\n  case FieldType::I64:\n    return CompactFieldType::I64;\n  case FieldType::Double:\n    return CompactFieldType::Double;\n  case FieldType::String:\n    return CompactFieldType::String;\n  case FieldType::Struct:\n    return CompactFieldType::Struct;\n  case FieldType::Map:\n    return CompactFieldType::Map;\n  case FieldType::Set:\n    return CompactFieldType::Set;\n  case FieldType::List:\n    return CompactFieldType::List;\n  default:\n    throw EnvoyException(\n        fmt::format(\"unknown protocol field type {}\", static_cast<int8_t>(field_type)));\n  }\n}\n\nclass CompactProtocolConfigFactory : public ProtocolFactoryBase<CompactProtocolImpl> {\npublic:\n  CompactProtocolConfigFactory() : ProtocolFactoryBase(ProtocolNames::get().COMPACT) {}\n};\n\n/**\n * Static registration for the binary protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(CompactProtocolConfigFactory, NamedProtocolConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h",
    "content": "#pragma once\n\n#include <stack>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * CompactProtocolImpl implements the Thrift Compact protocol.\n * See https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md\n */\nclass CompactProtocolImpl : public Protocol {\npublic:\n  CompactProtocolImpl() = default;\n\n  // Protocol\n  const std::string& name() const override { return ProtocolNames::get().COMPACT; }\n  ProtocolType type() const override { return ProtocolType::Compact; }\n  bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  bool readMessageEnd(Buffer::Instance& buffer) override;\n  bool readStructBegin(Buffer::Instance& buffer, std::string& name) override;\n  bool readStructEnd(Buffer::Instance& buffer) override;\n  bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type,\n                      int16_t& field_id) override;\n  bool readFieldEnd(Buffer::Instance& buffer) override;\n  bool readMapBegin(Buffer::Instance& buffer, FieldType& key_type, FieldType& value_type,\n                    uint32_t& size) override;\n  bool readMapEnd(Buffer::Instance& buffer) override;\n  bool readListBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) override;\n  bool readListEnd(Buffer::Instance& buffer) override;\n  bool readSetBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) override;\n  bool readSetEnd(Buffer::Instance& buffer) override;\n  bool readBool(Buffer::Instance& buffer, bool& value) override;\n  bool readByte(Buffer::Instance& buffer, uint8_t& value) override;\n  bool readInt16(Buffer::Instance& buffer, int16_t& value) override;\n  bool readInt32(Buffer::Instance& buffer, int32_t& value) override;\n  bool readInt64(Buffer::Instance& buffer, int64_t& value) override;\n  bool readDouble(Buffer::Instance& buffer, double& value) override;\n  bool readString(Buffer::Instance& buffer, std::string& value) override;\n  bool readBinary(Buffer::Instance& buffer, std::string& value) override;\n  void writeMessageBegin(Buffer::Instance& buffer, const MessageMetadata& metadata) override;\n  void writeMessageEnd(Buffer::Instance& buffer) override;\n  void writeStructBegin(Buffer::Instance& buffer, const std::string& name) override;\n  void writeStructEnd(Buffer::Instance& buffer) override;\n  void writeFieldBegin(Buffer::Instance& buffer, const std::string& name, FieldType field_type,\n                       int16_t field_id) override;\n  void writeFieldEnd(Buffer::Instance& buffer) override;\n  void writeMapBegin(Buffer::Instance& buffer, FieldType key_type, FieldType value_type,\n                     uint32_t size) override;\n  void writeMapEnd(Buffer::Instance& buffer) override;\n  void writeListBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) override;\n  void writeListEnd(Buffer::Instance& buffer) override;\n  void writeSetBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) override;\n  void writeSetEnd(Buffer::Instance& buffer) override;\n  void writeBool(Buffer::Instance& buffer, bool value) override;\n  void writeByte(Buffer::Instance& buffer, uint8_t value) override;\n  void writeInt16(Buffer::Instance& buffer, int16_t value) override;\n  void writeInt32(Buffer::Instance& buffer, int32_t value) override;\n  void writeInt64(Buffer::Instance& buffer, int64_t value) override;\n  void writeDouble(Buffer::Instance& buffer, double value) override;\n  void writeString(Buffer::Instance& buffer, const std::string& value) override;\n  void writeBinary(Buffer::Instance& buffer, const std::string& value) override;\n\n  static bool isMagic(uint16_t word) { return (word & MagicMask) == Magic; }\n\nprivate:\n  enum class CompactFieldType {\n    Stop = 0,\n    BoolTrue = 1,\n    BoolFalse = 2,\n    Byte = 3,\n    I16 = 4,\n    I32 = 5,\n    I64 = 6,\n    Double = 7,\n    String = 8,\n    List = 9,\n    Set = 10,\n    Map = 11,\n    Struct = 12,\n  };\n\n  FieldType convertCompactFieldType(CompactFieldType compact_field_type);\n  CompactFieldType convertFieldType(FieldType field_type);\n\n  void writeFieldBeginInternal(Buffer::Instance& buffer, FieldType field_type, int16_t field_id,\n                               absl::optional<CompactFieldType> field_type_override);\n\n  std::stack<int16_t> last_field_id_stack_{};\n  int16_t last_field_id_{0};\n\n  // Compact protocol encodes boolean struct fields as true/false *types* with no data.\n  // This tracks the last boolean struct field's value for readBool.\n  absl::optional<bool> bool_value_{};\n\n  // Similarly, track the field id for writeBool.\n  absl::optional<int16_t> bool_field_id_{};\n\n  const static uint16_t Magic;\n  const static uint16_t MagicMask;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/config.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/config.h\"\n\n#include <map>\n#include <string>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.validate.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/filters/network/thrift_proxy/auto_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/auto_transport_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/compact_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/decoder.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/filter_config.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/well_known_names.h\"\n#include \"extensions/filters/network/thrift_proxy/framed_transport_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/stats.h\"\n#include \"extensions/filters/network/thrift_proxy/unframed_transport_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\nusing TransportTypeMap =\n    std::map<envoy::extensions::filters::network::thrift_proxy::v3::TransportType, TransportType>;\n\nstatic const TransportTypeMap& transportTypeMap() {\n  CONSTRUCT_ON_FIRST_USE(\n      TransportTypeMap,\n      {\n          {envoy::extensions::filters::network::thrift_proxy::v3::AUTO_TRANSPORT,\n           TransportType::Auto},\n          {envoy::extensions::filters::network::thrift_proxy::v3::FRAMED, TransportType::Framed},\n          {envoy::extensions::filters::network::thrift_proxy::v3::UNFRAMED,\n           TransportType::Unframed},\n          {envoy::extensions::filters::network::thrift_proxy::v3::HEADER, TransportType::Header},\n      });\n}\n\nusing ProtocolTypeMap =\n    std::map<envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType, ProtocolType>;\n\nstatic const ProtocolTypeMap& protocolTypeMap() {\n  CONSTRUCT_ON_FIRST_USE(\n      ProtocolTypeMap,\n      {\n          {envoy::extensions::filters::network::thrift_proxy::v3::AUTO_PROTOCOL,\n           ProtocolType::Auto},\n          {envoy::extensions::filters::network::thrift_proxy::v3::BINARY, ProtocolType::Binary},\n          {envoy::extensions::filters::network::thrift_proxy::v3::LAX_BINARY,\n           ProtocolType::LaxBinary},\n          {envoy::extensions::filters::network::thrift_proxy::v3::COMPACT, ProtocolType::Compact},\n          {envoy::extensions::filters::network::thrift_proxy::v3::TWITTER, ProtocolType::Twitter},\n      });\n}\n\nTransportType\nlookupTransport(envoy::extensions::filters::network::thrift_proxy::v3::TransportType transport) {\n  const auto& transport_iter = transportTypeMap().find(transport);\n  if (transport_iter == transportTypeMap().end()) {\n    throw EnvoyException(fmt::format(\n        \"unknown transport {}\",\n        envoy::extensions::filters::network::thrift_proxy::v3::TransportType_Name(transport)));\n  }\n\n  return transport_iter->second;\n}\n\nProtocolType\nlookupProtocol(envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType protocol) {\n  const auto& protocol_iter = protocolTypeMap().find(protocol);\n  if (protocol_iter == protocolTypeMap().end()) {\n    throw EnvoyException(fmt::format(\n        \"unknown protocol {}\",\n        envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType_Name(protocol)));\n  }\n  return protocol_iter->second;\n}\n\n} // namespace\n\nProtocolOptionsConfigImpl::ProtocolOptionsConfigImpl(\n    const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions& config)\n    : transport_(lookupTransport(config.transport())),\n      protocol_(lookupProtocol(config.protocol())) {}\n\nTransportType ProtocolOptionsConfigImpl::transport(TransportType downstream_transport) const {\n  return (transport_ == TransportType::Auto) ? downstream_transport : transport_;\n}\n\nProtocolType ProtocolOptionsConfigImpl::protocol(ProtocolType downstream_protocol) const {\n  return (protocol_ == ProtocolType::Auto) ? downstream_protocol : protocol_;\n}\n\nNetwork::FilterFactoryCb ThriftProxyFilterConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  std::shared_ptr<Config> filter_config(new ConfigImpl(proto_config, context));\n\n  return [filter_config, &context](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addReadFilter(std::make_shared<ConnectionManager>(\n        *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource()));\n  };\n}\n\n/**\n * Static registration for the thrift filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(ThriftProxyFilterConfigFactory,\n                 Server::Configuration::NamedNetworkFilterConfigFactory);\n\nConfigImpl::ConfigImpl(\n    const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& config,\n    Server::Configuration::FactoryContext& context)\n    : context_(context), stats_prefix_(fmt::format(\"thrift.{}.\", config.stat_prefix())),\n      stats_(ThriftFilterStats::generateStats(stats_prefix_, context_.scope())),\n      transport_(lookupTransport(config.transport())), proto_(lookupProtocol(config.protocol())),\n      route_matcher_(new Router::RouteMatcher(config.route_config())) {\n\n  if (config.thrift_filters().empty()) {\n    ENVOY_LOG(debug, \"using default router filter\");\n\n    envoy::extensions::filters::network::thrift_proxy::v3::ThriftFilter router;\n    router.set_name(ThriftFilters::ThriftFilterNames::get().ROUTER);\n    processFilter(router);\n  } else {\n    for (const auto& filter : config.thrift_filters()) {\n      processFilter(filter);\n    }\n  }\n}\n\nvoid ConfigImpl::createFilterChain(ThriftFilters::FilterChainFactoryCallbacks& callbacks) {\n  for (const ThriftFilters::FilterFactoryCb& factory : filter_factories_) {\n    factory(callbacks);\n  }\n}\n\nTransportPtr ConfigImpl::createTransport() {\n  return NamedTransportConfigFactory::getFactory(transport_).createTransport();\n}\n\nProtocolPtr ConfigImpl::createProtocol() {\n  return NamedProtocolConfigFactory::getFactory(proto_).createProtocol();\n}\n\nvoid ConfigImpl::processFilter(\n    const envoy::extensions::filters::network::thrift_proxy::v3::ThriftFilter& proto_config) {\n  const std::string& string_name = proto_config.name();\n\n  ENVOY_LOG(debug, \"    thrift filter #{}\", filter_factories_.size());\n  ENVOY_LOG(debug, \"      name: {}\", string_name);\n  ENVOY_LOG(debug, \"    config: {}\",\n            MessageUtil::getJsonStringFromMessage(\n                proto_config.has_typed_config()\n                    ? static_cast<const Protobuf::Message&>(proto_config.typed_config())\n                    : static_cast<const Protobuf::Message&>(\n                          proto_config.hidden_envoy_deprecated_config()),\n                true));\n  auto& factory =\n      Envoy::Config::Utility::getAndCheckFactory<ThriftFilters::NamedThriftFilterConfigFactory>(\n          proto_config);\n\n  ProtobufTypes::MessagePtr message = Envoy::Config::Utility::translateToFactoryConfig(\n      proto_config, context_.messageValidationVisitor(), factory);\n  ThriftFilters::FilterFactoryCb callback =\n      factory.createFilterFactoryFromProto(*message, stats_prefix_, context_);\n\n  filter_factories_.push_back(callback);\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/config.h",
    "content": "#pragma once\n\n#include <map>\n#include <string>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/thrift_proxy/conn_manager.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_impl.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * Provides Thrift-specific cluster options.\n */\nclass ProtocolOptionsConfigImpl : public ProtocolOptionsConfig {\npublic:\n  ProtocolOptionsConfigImpl(\n      const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions&\n          proto_config);\n\n  // ProtocolOptionsConfig\n  TransportType transport(TransportType downstream_transport) const override;\n  ProtocolType protocol(ProtocolType downstream_protocol) const override;\n\nprivate:\n  const TransportType transport_;\n  const ProtocolType protocol_;\n};\n\n/**\n * Config registration for the thrift proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nclass ThriftProxyFilterConfigFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy,\n          envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions> {\npublic:\n  ThriftProxyFilterConfigFactory() : FactoryBase(NetworkFilterNames::get().ThriftProxy, true) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n\n  Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped(\n      const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions&\n          proto_config,\n      Server::Configuration::ProtocolOptionsFactoryContext&) override {\n    return std::make_shared<ProtocolOptionsConfigImpl>(proto_config);\n  }\n};\n\nclass ConfigImpl : public Config,\n                   public Router::Config,\n                   public ThriftFilters::FilterChainFactory,\n                   Logger::Loggable<Logger::Id::config> {\npublic:\n  ConfigImpl(const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& config,\n             Server::Configuration::FactoryContext& context);\n\n  // ThriftFilters::FilterChainFactory\n  void createFilterChain(ThriftFilters::FilterChainFactoryCallbacks& callbacks) override;\n\n  // Router::Config\n  Router::RouteConstSharedPtr route(const MessageMetadata& metadata,\n                                    uint64_t random_value) const override {\n    return route_matcher_->route(metadata, random_value);\n  }\n\n  // Config\n  ThriftFilterStats& stats() override { return stats_; }\n  ThriftFilters::FilterChainFactory& filterFactory() override { return *this; }\n  TransportPtr createTransport() override;\n  ProtocolPtr createProtocol() override;\n  Router::Config& routerConfig() override { return *this; }\n\nprivate:\n  void processFilter(\n      const envoy::extensions::filters::network::thrift_proxy::v3::ThriftFilter& proto_config);\n\n  Server::Configuration::FactoryContext& context_;\n  const std::string stats_prefix_;\n  ThriftFilterStats stats_;\n  const TransportType transport_;\n  const ProtocolType proto_;\n  std::unique_ptr<Router::RouteMatcher> route_matcher_;\n\n  std::list<ThriftFilters::FilterFactoryCb> filter_factories_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/conn_manager.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/conn_manager.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/event/dispatcher.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nConnectionManager::ConnectionManager(Config& config, Random::RandomGenerator& random_generator,\n                                     TimeSource& time_source)\n    : config_(config), stats_(config_.stats()), transport_(config.createTransport()),\n      protocol_(config.createProtocol()),\n      decoder_(std::make_unique<Decoder>(*transport_, *protocol_, *this)),\n      random_generator_(random_generator), time_source_(time_source) {}\n\nConnectionManager::~ConnectionManager() = default;\n\nNetwork::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end_stream) {\n  request_buffer_.move(data);\n  dispatch();\n\n  if (end_stream) {\n    ENVOY_CONN_LOG(trace, \"downstream half-closed\", read_callbacks_->connection());\n\n    // Downstream has closed. Unless we're waiting for an upstream connection to complete a oneway\n    // request, close. The special case for oneway requests allows them to complete before the\n    // ConnectionManager is destroyed.\n    if (stopped_) {\n      ASSERT(!rpcs_.empty());\n      MessageMetadata& metadata = *(*rpcs_.begin())->metadata_;\n      ASSERT(metadata.hasMessageType());\n      if (metadata.messageType() == MessageType::Oneway) {\n        ENVOY_CONN_LOG(trace, \"waiting for one-way completion\", read_callbacks_->connection());\n        half_closed_ = true;\n        return Network::FilterStatus::StopIteration;\n      }\n    }\n\n    resetAllRpcs(false);\n    read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n\n  return Network::FilterStatus::StopIteration;\n}\n\nvoid ConnectionManager::dispatch() {\n  if (stopped_) {\n    ENVOY_CONN_LOG(debug, \"thrift filter stopped\", read_callbacks_->connection());\n    return;\n  }\n\n  try {\n    bool underflow = false;\n    while (!underflow) {\n      FilterStatus status = decoder_->onData(request_buffer_, underflow);\n      if (status == FilterStatus::StopIteration) {\n        stopped_ = true;\n        break;\n      }\n    }\n\n    return;\n  } catch (const AppException& ex) {\n    ENVOY_LOG(error, \"thrift application exception: {}\", ex.what());\n    if (rpcs_.empty()) {\n      MessageMetadata metadata;\n      sendLocalReply(metadata, ex, true);\n    } else {\n      sendLocalReply(*(*rpcs_.begin())->metadata_, ex, true);\n    }\n  } catch (const EnvoyException& ex) {\n    ENVOY_CONN_LOG(error, \"thrift error: {}\", read_callbacks_->connection(), ex.what());\n\n    if (rpcs_.empty()) {\n      // Transport/protocol mismatch (including errors in automatic detection). Just hang up\n      // since we don't know how to encode a response.\n      read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n    } else {\n      // Use the current rpc's transport/protocol to send an error downstream.\n      rpcs_.front()->onError(ex.what());\n    }\n  }\n\n  stats_.request_decoding_error_.inc();\n  resetAllRpcs(true);\n}\n\nvoid ConnectionManager::sendLocalReply(MessageMetadata& metadata, const DirectResponse& response,\n                                       bool end_stream) {\n  if (read_callbacks_->connection().state() == Network::Connection::State::Closed) {\n    return;\n  }\n\n  Buffer::OwnedImpl buffer;\n  const DirectResponse::ResponseType result = response.encode(metadata, *protocol_, buffer);\n\n  Buffer::OwnedImpl response_buffer;\n  metadata.setProtocol(protocol_->type());\n  transport_->encodeFrame(response_buffer, metadata, buffer);\n\n  read_callbacks_->connection().write(response_buffer, end_stream);\n  if (end_stream) {\n    read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n\n  switch (result) {\n  case DirectResponse::ResponseType::SuccessReply:\n    stats_.response_success_.inc();\n    break;\n  case DirectResponse::ResponseType::ErrorReply:\n    stats_.response_error_.inc();\n    break;\n  case DirectResponse::ResponseType::Exception:\n    stats_.response_exception_.inc();\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid ConnectionManager::continueDecoding() {\n  ENVOY_CONN_LOG(debug, \"thrift filter continued\", read_callbacks_->connection());\n  stopped_ = false;\n  dispatch();\n\n  if (!stopped_ && half_closed_) {\n    // If we're half closed, but not stopped waiting for an upstream, reset any pending rpcs and\n    // close the connection.\n    resetAllRpcs(false);\n    read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n}\n\nvoid ConnectionManager::doDeferredRpcDestroy(ConnectionManager::ActiveRpc& rpc) {\n  read_callbacks_->connection().dispatcher().deferredDelete(rpc.removeFromList(rpcs_));\n}\n\nvoid ConnectionManager::resetAllRpcs(bool local_reset) {\n  while (!rpcs_.empty()) {\n    if (local_reset) {\n      ENVOY_CONN_LOG(debug, \"local close with active request\", read_callbacks_->connection());\n      stats_.cx_destroy_local_with_active_rq_.inc();\n    } else {\n      ENVOY_CONN_LOG(debug, \"remote close with active request\", read_callbacks_->connection());\n      stats_.cx_destroy_remote_with_active_rq_.inc();\n    }\n\n    rpcs_.front()->onReset();\n  }\n}\n\nvoid ConnectionManager::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  read_callbacks_ = &callbacks;\n\n  read_callbacks_->connection().addConnectionCallbacks(*this);\n  read_callbacks_->connection().enableHalfClose(true);\n}\n\nvoid ConnectionManager::onEvent(Network::ConnectionEvent event) {\n  resetAllRpcs(event == Network::ConnectionEvent::LocalClose);\n}\n\nDecoderEventHandler& ConnectionManager::newDecoderEventHandler() {\n  ENVOY_LOG(trace, \"new decoder filter\");\n\n  ActiveRpcPtr new_rpc(new ActiveRpc(*this));\n  new_rpc->createFilterChain();\n  LinkedList::moveIntoList(std::move(new_rpc), rpcs_);\n\n  return **rpcs_.begin();\n}\n\nbool ConnectionManager::ResponseDecoder::onData(Buffer::Instance& data) {\n  upstream_buffer_.move(data);\n\n  bool underflow = false;\n  decoder_->onData(upstream_buffer_, underflow);\n  ASSERT(complete_ || underflow);\n  return complete_;\n}\n\nFilterStatus ConnectionManager::ResponseDecoder::messageBegin(MessageMetadataSharedPtr metadata) {\n  metadata_ = metadata;\n  metadata_->setSequenceId(parent_.original_sequence_id_);\n\n  first_reply_field_ =\n      (metadata->hasMessageType() && metadata->messageType() == MessageType::Reply);\n  return ProtocolConverter::messageBegin(metadata);\n}\n\nFilterStatus ConnectionManager::ResponseDecoder::fieldBegin(absl::string_view name,\n                                                            FieldType& field_type,\n                                                            int16_t& field_id) {\n  if (first_reply_field_) {\n    // Reply messages contain a struct where field 0 is the call result and fields 1+ are\n    // exceptions, if defined. At most one field may be set. Therefore, the very first field we\n    // encounter in a reply is either field 0 (success) or not (IDL exception returned).\n    // If first fieldType is FieldType::Stop then it is a void success and handled in messageEnd()\n    // because decoder state machine does not call decoder event callback fieldBegin on\n    // FieldType::Stop.\n    success_ = (field_id == 0);\n    first_reply_field_ = false;\n  }\n\n  return ProtocolConverter::fieldBegin(name, field_type, field_id);\n}\n\nFilterStatus ConnectionManager::ResponseDecoder::messageEnd() {\n  if (first_reply_field_) {\n    // When the response is thrift void type there is never a fieldBegin call on a success\n    // because the response struct has no fields and so the first field type is FieldType::Stop.\n    // The decoder state machine handles FieldType::Stop by going immediately to structEnd,\n    // skipping fieldBegin callback. Therefore if we are still waiting for the first reply field\n    // at end of message then it is a void success.\n    success_ = true;\n    first_reply_field_ = false;\n  }\n\n  return ProtocolConverter::messageEnd();\n}\n\nFilterStatus ConnectionManager::ResponseDecoder::transportEnd() {\n  ASSERT(metadata_ != nullptr);\n\n  ConnectionManager& cm = parent_.parent_;\n\n  if (cm.read_callbacks_->connection().state() == Network::Connection::State::Closed) {\n    complete_ = true;\n    throw EnvoyException(\"downstream connection is closed\");\n  }\n\n  Buffer::OwnedImpl buffer;\n\n  // Use the factory to get the concrete transport from the decoder transport (as opposed to\n  // potentially pre-detection auto transport).\n  TransportPtr transport =\n      NamedTransportConfigFactory::getFactory(parent_.parent_.decoder_->transportType())\n          .createTransport();\n\n  metadata_->setProtocol(parent_.parent_.decoder_->protocolType());\n  transport->encodeFrame(buffer, *metadata_, parent_.response_buffer_);\n  complete_ = true;\n\n  cm.read_callbacks_->connection().write(buffer, false);\n\n  cm.stats_.response_.inc();\n\n  switch (metadata_->messageType()) {\n  case MessageType::Reply:\n    cm.stats_.response_reply_.inc();\n    if (success_.value_or(false)) {\n      cm.stats_.response_success_.inc();\n    } else {\n      cm.stats_.response_error_.inc();\n    }\n\n    break;\n\n  case MessageType::Exception:\n    cm.stats_.response_exception_.inc();\n    break;\n\n  default:\n    cm.stats_.response_invalid_type_.inc();\n    break;\n  }\n\n  return FilterStatus::Continue;\n}\n\nvoid ConnectionManager::ActiveRpcDecoderFilter::continueDecoding() {\n  const FilterStatus status = parent_.applyDecoderFilters(this);\n  if (status == FilterStatus::Continue) {\n    // All filters have been executed for the current decoder state.\n    if (parent_.pending_transport_end_) {\n      // If the filter stack was paused during transportEnd, handle end-of-request details.\n      parent_.finalizeRequest();\n    }\n\n    parent_.continueDecoding();\n  }\n}\n\nFilterStatus ConnectionManager::ActiveRpc::applyDecoderFilters(ActiveRpcDecoderFilter* filter) {\n  ASSERT(filter_action_ != nullptr);\n\n  if (!local_response_sent_) {\n    if (upgrade_handler_) {\n      // Divert events to the current protocol upgrade handler.\n      const FilterStatus status = filter_action_(upgrade_handler_.get());\n      filter_context_.reset();\n      return status;\n    }\n\n    std::list<ActiveRpcDecoderFilterPtr>::iterator entry;\n    if (!filter) {\n      entry = decoder_filters_.begin();\n    } else {\n      entry = std::next(filter->entry());\n    }\n\n    for (; entry != decoder_filters_.end(); entry++) {\n      const FilterStatus status = filter_action_((*entry)->handle_.get());\n      if (local_response_sent_) {\n        // The filter called sendLocalReply: stop processing filters and return\n        // FilterStatus::Continue irrespective of the current result.\n        break;\n      }\n\n      if (status != FilterStatus::Continue) {\n        return status;\n      }\n    }\n  }\n\n  filter_action_ = nullptr;\n  filter_context_.reset();\n\n  return FilterStatus::Continue;\n}\n\nFilterStatus ConnectionManager::ActiveRpc::transportBegin(MessageMetadataSharedPtr metadata) {\n  filter_context_ = metadata;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    MessageMetadataSharedPtr metadata = absl::any_cast<MessageMetadataSharedPtr>(filter_context_);\n    return filter->transportBegin(metadata);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::transportEnd() {\n  ASSERT(metadata_ != nullptr);\n\n  FilterStatus status;\n  if (upgrade_handler_) {\n    status = upgrade_handler_->transportEnd();\n\n    if (metadata_->isProtocolUpgradeMessage()) {\n      ENVOY_CONN_LOG(error, \"thrift: sending protocol upgrade response\",\n                     parent_.read_callbacks_->connection());\n      sendLocalReply(*parent_.protocol_->upgradeResponse(*upgrade_handler_), false);\n    }\n  } else {\n    filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus {\n      return filter->transportEnd();\n    };\n\n    status = applyDecoderFilters(nullptr);\n    if (status == FilterStatus::StopIteration) {\n      pending_transport_end_ = true;\n      return status;\n    }\n  }\n\n  finalizeRequest();\n\n  return status;\n}\n\nvoid ConnectionManager::ActiveRpc::finalizeRequest() {\n  pending_transport_end_ = false;\n\n  parent_.stats_.request_.inc();\n\n  bool destroy_rpc = false;\n  switch (original_msg_type_) {\n  case MessageType::Call:\n    parent_.stats_.request_call_.inc();\n\n    // Local response or protocol upgrade mean we don't wait for an upstream response.\n    destroy_rpc = local_response_sent_ || (upgrade_handler_ != nullptr);\n    break;\n\n  case MessageType::Oneway:\n    parent_.stats_.request_oneway_.inc();\n\n    // No response forthcoming, we're done.\n    destroy_rpc = true;\n    break;\n\n  default:\n    parent_.stats_.request_invalid_type_.inc();\n\n    // Invalid request, implies no response.\n    destroy_rpc = true;\n    break;\n  }\n\n  if (destroy_rpc) {\n    parent_.doDeferredRpcDestroy(*this);\n  }\n}\n\nFilterStatus ConnectionManager::ActiveRpc::messageBegin(MessageMetadataSharedPtr metadata) {\n  ASSERT(metadata->hasSequenceId());\n  ASSERT(metadata->hasMessageType());\n\n  metadata_ = metadata;\n  original_sequence_id_ = metadata_->sequenceId();\n  original_msg_type_ = metadata_->messageType();\n\n  if (metadata_->isProtocolUpgradeMessage()) {\n    ASSERT(parent_.protocol_->supportsUpgrade());\n\n    ENVOY_CONN_LOG(debug, \"thrift: decoding protocol upgrade request\",\n                   parent_.read_callbacks_->connection());\n    upgrade_handler_ = parent_.protocol_->upgradeRequestDecoder();\n    ASSERT(upgrade_handler_ != nullptr);\n  }\n\n  filter_context_ = metadata;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    MessageMetadataSharedPtr metadata = absl::any_cast<MessageMetadataSharedPtr>(filter_context_);\n    return filter->messageBegin(metadata);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::messageEnd() {\n  filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { return filter->messageEnd(); };\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::structBegin(absl::string_view name) {\n  filter_context_ = std::string(name);\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    std::string& name = absl::any_cast<std::string&>(filter_context_);\n    return filter->structBegin(name);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::structEnd() {\n  filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { return filter->structEnd(); };\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::fieldBegin(absl::string_view name, FieldType& field_type,\n                                                      int16_t& field_id) {\n  filter_context_ =\n      std::tuple<std::string, FieldType, int16_t>(std::string(name), field_type, field_id);\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    std::tuple<std::string, FieldType, int16_t>& t =\n        absl::any_cast<std::tuple<std::string, FieldType, int16_t>&>(filter_context_);\n    std::string& name = std::get<0>(t);\n    FieldType& field_type = std::get<1>(t);\n    int16_t& field_id = std::get<2>(t);\n    return filter->fieldBegin(name, field_type, field_id);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::fieldEnd() {\n  filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { return filter->fieldEnd(); };\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::boolValue(bool& value) {\n  filter_context_ = value;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    bool& value = absl::any_cast<bool&>(filter_context_);\n    return filter->boolValue(value);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::byteValue(uint8_t& value) {\n  filter_context_ = value;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    uint8_t& value = absl::any_cast<uint8_t&>(filter_context_);\n    return filter->byteValue(value);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::int16Value(int16_t& value) {\n  filter_context_ = value;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    int16_t& value = absl::any_cast<int16_t&>(filter_context_);\n    return filter->int16Value(value);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::int32Value(int32_t& value) {\n  filter_context_ = value;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    int32_t& value = absl::any_cast<int32_t&>(filter_context_);\n    return filter->int32Value(value);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::int64Value(int64_t& value) {\n  filter_context_ = value;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    int64_t& value = absl::any_cast<int64_t&>(filter_context_);\n    return filter->int64Value(value);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::doubleValue(double& value) {\n  filter_context_ = value;\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    double& value = absl::any_cast<double&>(filter_context_);\n    return filter->doubleValue(value);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::stringValue(absl::string_view value) {\n  filter_context_ = std::string(value);\n\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    std::string& value = absl::any_cast<std::string&>(filter_context_);\n    return filter->stringValue(value);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::mapBegin(FieldType& key_type, FieldType& value_type,\n                                                    uint32_t& size) {\n  filter_context_ = std::tuple<FieldType, FieldType, uint32_t>(key_type, value_type, size);\n\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    std::tuple<FieldType, FieldType, uint32_t>& t =\n        absl::any_cast<std::tuple<FieldType, FieldType, uint32_t>&>(filter_context_);\n    FieldType& key_type = std::get<0>(t);\n    FieldType& value_type = std::get<1>(t);\n    uint32_t& size = std::get<2>(t);\n    return filter->mapBegin(key_type, value_type, size);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::mapEnd() {\n  filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { return filter->mapEnd(); };\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::listBegin(FieldType& value_type, uint32_t& size) {\n  filter_context_ = std::tuple<FieldType, uint32_t>(value_type, size);\n\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    std::tuple<FieldType, uint32_t>& t =\n        absl::any_cast<std::tuple<FieldType, uint32_t>&>(filter_context_);\n    FieldType& value_type = std::get<0>(t);\n    uint32_t& size = std::get<1>(t);\n    return filter->listBegin(value_type, size);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::listEnd() {\n  filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { return filter->listEnd(); };\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::setBegin(FieldType& value_type, uint32_t& size) {\n  filter_context_ = std::tuple<FieldType, uint32_t>(value_type, size);\n\n  filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus {\n    std::tuple<FieldType, uint32_t>& t =\n        absl::any_cast<std::tuple<FieldType, uint32_t>&>(filter_context_);\n    FieldType& value_type = std::get<0>(t);\n    uint32_t& size = std::get<1>(t);\n    return filter->setBegin(value_type, size);\n  };\n\n  return applyDecoderFilters(nullptr);\n}\n\nFilterStatus ConnectionManager::ActiveRpc::setEnd() {\n  filter_action_ = [](DecoderEventHandler* filter) -> FilterStatus { return filter->setEnd(); };\n  return applyDecoderFilters(nullptr);\n}\n\nvoid ConnectionManager::ActiveRpc::createFilterChain() {\n  parent_.config_.filterFactory().createFilterChain(*this);\n}\n\nvoid ConnectionManager::ActiveRpc::onReset() {\n  // TODO(zuercher): e.g., parent_.stats_.named_.downstream_rq_rx_reset_.inc();\n  parent_.doDeferredRpcDestroy(*this);\n}\n\nvoid ConnectionManager::ActiveRpc::onError(const std::string& what) {\n  if (metadata_) {\n    sendLocalReply(AppException(AppExceptionType::ProtocolError, what), true);\n    return;\n  }\n\n  // Transport or protocol error happened before (or during message begin) parsing. It's not\n  // possible to provide a valid response, so don't try.\n\n  parent_.doDeferredRpcDestroy(*this);\n  parent_.read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n}\n\nconst Network::Connection* ConnectionManager::ActiveRpc::connection() const {\n  return &parent_.read_callbacks_->connection();\n}\n\nRouter::RouteConstSharedPtr ConnectionManager::ActiveRpc::route() {\n  if (!cached_route_) {\n    if (metadata_ != nullptr) {\n      Router::RouteConstSharedPtr route =\n          parent_.config_.routerConfig().route(*metadata_, stream_id_);\n      cached_route_ = std::move(route);\n    } else {\n      cached_route_ = nullptr;\n    }\n  }\n\n  return cached_route_.value();\n}\n\nvoid ConnectionManager::ActiveRpc::sendLocalReply(const DirectResponse& response, bool end_stream) {\n  metadata_->setSequenceId(original_sequence_id_);\n  parent_.sendLocalReply(*metadata_, response, end_stream);\n\n  if (end_stream) {\n    return;\n  }\n\n  if (!upgrade_handler_) {\n    // Consume any remaining request data from the downstream.\n    local_response_sent_ = true;\n  }\n}\n\nvoid ConnectionManager::ActiveRpc::startUpstreamResponse(Transport& transport, Protocol& protocol) {\n  ASSERT(response_decoder_ == nullptr);\n\n  response_decoder_ = std::make_unique<ResponseDecoder>(*this, transport, protocol);\n}\n\nThriftFilters::ResponseStatus ConnectionManager::ActiveRpc::upstreamData(Buffer::Instance& buffer) {\n  ASSERT(response_decoder_ != nullptr);\n\n  try {\n    if (response_decoder_->onData(buffer)) {\n      // Completed upstream response.\n      parent_.doDeferredRpcDestroy(*this);\n      return ThriftFilters::ResponseStatus::Complete;\n    }\n    return ThriftFilters::ResponseStatus::MoreData;\n  } catch (const AppException& ex) {\n    ENVOY_LOG(error, \"thrift response application error: {}\", ex.what());\n    parent_.stats_.response_decoding_error_.inc();\n\n    sendLocalReply(ex, true);\n    return ThriftFilters::ResponseStatus::Reset;\n  } catch (const EnvoyException& ex) {\n    ENVOY_CONN_LOG(error, \"thrift response error: {}\", parent_.read_callbacks_->connection(),\n                   ex.what());\n    parent_.stats_.response_decoding_error_.inc();\n\n    onError(ex.what());\n    return ThriftFilters::ResponseStatus::Reset;\n  }\n}\n\nvoid ConnectionManager::ActiveRpc::resetDownstreamConnection() {\n  parent_.read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/conn_manager.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/logger.h\"\n#include \"common/stats/timespan_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/decoder.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol_converter.h\"\n#include \"extensions/filters/network/thrift_proxy/stats.h\"\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\n#include \"absl/types/any.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * Config is a configuration interface for ConnectionManager.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  virtual ThriftFilters::FilterChainFactory& filterFactory() PURE;\n  virtual ThriftFilterStats& stats() PURE;\n  virtual TransportPtr createTransport() PURE;\n  virtual ProtocolPtr createProtocol() PURE;\n  virtual Router::Config& routerConfig() PURE;\n};\n\n/**\n * Extends Upstream::ProtocolOptionsConfig with Thrift-specific cluster options.\n */\nclass ProtocolOptionsConfig : public Upstream::ProtocolOptionsConfig {\npublic:\n  ~ProtocolOptionsConfig() override = default;\n\n  virtual TransportType transport(TransportType downstream_transport) const PURE;\n  virtual ProtocolType protocol(ProtocolType downstream_protocol) const PURE;\n};\n\n/**\n * ConnectionManager is a Network::Filter that will perform Thrift request handling on a connection.\n */\nclass ConnectionManager : public Network::ReadFilter,\n                          public Network::ConnectionCallbacks,\n                          public DecoderCallbacks,\n                          Logger::Loggable<Logger::Id::thrift> {\npublic:\n  ConnectionManager(Config& config, Random::RandomGenerator& random_generator,\n                    TimeSource& time_system);\n  ~ConnectionManager() override;\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override;\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  // DecoderCallbacks\n  DecoderEventHandler& newDecoderEventHandler() override;\n\nprivate:\n  struct ActiveRpc;\n\n  struct ResponseDecoder : public DecoderCallbacks, public ProtocolConverter {\n    ResponseDecoder(ActiveRpc& parent, Transport& transport, Protocol& protocol)\n        : parent_(parent), decoder_(std::make_unique<Decoder>(transport, protocol, *this)),\n          complete_(false), first_reply_field_(false) {\n      initProtocolConverter(*parent_.parent_.protocol_, parent_.response_buffer_);\n    }\n\n    bool onData(Buffer::Instance& data);\n\n    // ProtocolConverter\n    FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override;\n    FilterStatus messageEnd() override;\n    FilterStatus fieldBegin(absl::string_view name, FieldType& field_type,\n                            int16_t& field_id) override;\n    FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override {\n      UNREFERENCED_PARAMETER(metadata);\n      return FilterStatus::Continue;\n    }\n    FilterStatus transportEnd() override;\n\n    // DecoderCallbacks\n    DecoderEventHandler& newDecoderEventHandler() override { return *this; }\n\n    ActiveRpc& parent_;\n    DecoderPtr decoder_;\n    Buffer::OwnedImpl upstream_buffer_;\n    MessageMetadataSharedPtr metadata_;\n    absl::optional<bool> success_;\n    bool complete_ : 1;\n    bool first_reply_field_ : 1;\n  };\n  using ResponseDecoderPtr = std::unique_ptr<ResponseDecoder>;\n\n  // Wraps a DecoderFilter and acts as the DecoderFilterCallbacks for the filter, enabling filter\n  // chain continuation.\n  struct ActiveRpcDecoderFilter : public ThriftFilters::DecoderFilterCallbacks,\n                                  LinkedObject<ActiveRpcDecoderFilter> {\n    ActiveRpcDecoderFilter(ActiveRpc& parent, ThriftFilters::DecoderFilterSharedPtr filter)\n        : parent_(parent), handle_(filter) {}\n\n    // ThriftFilters::DecoderFilterCallbacks\n    uint64_t streamId() const override { return parent_.stream_id_; }\n    const Network::Connection* connection() const override { return parent_.connection(); }\n    void continueDecoding() override;\n    Router::RouteConstSharedPtr route() override { return parent_.route(); }\n    TransportType downstreamTransportType() const override {\n      return parent_.downstreamTransportType();\n    }\n    ProtocolType downstreamProtocolType() const override {\n      return parent_.downstreamProtocolType();\n    }\n    void sendLocalReply(const DirectResponse& response, bool end_stream) override {\n      parent_.sendLocalReply(response, end_stream);\n    }\n    void startUpstreamResponse(Transport& transport, Protocol& protocol) override {\n      parent_.startUpstreamResponse(transport, protocol);\n    }\n    ThriftFilters::ResponseStatus upstreamData(Buffer::Instance& buffer) override {\n      return parent_.upstreamData(buffer);\n    }\n    void resetDownstreamConnection() override { parent_.resetDownstreamConnection(); }\n    StreamInfo::StreamInfo& streamInfo() override { return parent_.streamInfo(); }\n\n    ActiveRpc& parent_;\n    ThriftFilters::DecoderFilterSharedPtr handle_;\n  };\n  using ActiveRpcDecoderFilterPtr = std::unique_ptr<ActiveRpcDecoderFilter>;\n\n  // ActiveRpc tracks request/response pairs.\n  struct ActiveRpc : LinkedObject<ActiveRpc>,\n                     public Event::DeferredDeletable,\n                     public DecoderEventHandler,\n                     public ThriftFilters::DecoderFilterCallbacks,\n                     public ThriftFilters::FilterChainFactoryCallbacks {\n    ActiveRpc(ConnectionManager& parent)\n        : parent_(parent), request_timer_(new Stats::HistogramCompletableTimespanImpl(\n                               parent_.stats_.request_time_ms_, parent_.time_source_)),\n          stream_id_(parent_.random_generator_.random()),\n          stream_info_(parent_.time_source_), local_response_sent_{false}, pending_transport_end_{\n                                                                               false} {\n      parent_.stats_.request_active_.inc();\n\n      stream_info_.setDownstreamLocalAddress(parent_.read_callbacks_->connection().localAddress());\n      stream_info_.setDownstreamRemoteAddress(\n          parent_.read_callbacks_->connection().remoteAddress());\n      stream_info_.setDownstreamDirectRemoteAddress(\n          parent_.read_callbacks_->connection().directRemoteAddress());\n    }\n    ~ActiveRpc() override {\n      request_timer_->complete();\n      parent_.stats_.request_active_.dec();\n\n      for (auto& filter : decoder_filters_) {\n        filter->handle_->onDestroy();\n      }\n    }\n\n    // DecoderEventHandler\n    FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override;\n    FilterStatus transportEnd() override;\n    FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override;\n    FilterStatus messageEnd() override;\n    FilterStatus structBegin(absl::string_view name) override;\n    FilterStatus structEnd() override;\n    FilterStatus fieldBegin(absl::string_view name, FieldType& field_type,\n                            int16_t& field_id) override;\n    FilterStatus fieldEnd() override;\n    FilterStatus boolValue(bool& value) override;\n    FilterStatus byteValue(uint8_t& value) override;\n    FilterStatus int16Value(int16_t& value) override;\n    FilterStatus int32Value(int32_t& value) override;\n    FilterStatus int64Value(int64_t& value) override;\n    FilterStatus doubleValue(double& value) override;\n    FilterStatus stringValue(absl::string_view value) override;\n    FilterStatus mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) override;\n    FilterStatus mapEnd() override;\n    FilterStatus listBegin(FieldType& elem_type, uint32_t& size) override;\n    FilterStatus listEnd() override;\n    FilterStatus setBegin(FieldType& elem_type, uint32_t& size) override;\n    FilterStatus setEnd() override;\n\n    // ThriftFilters::DecoderFilterCallbacks\n    uint64_t streamId() const override { return stream_id_; }\n    const Network::Connection* connection() const override;\n    void continueDecoding() override { parent_.continueDecoding(); }\n    Router::RouteConstSharedPtr route() override;\n    TransportType downstreamTransportType() const override {\n      return parent_.decoder_->transportType();\n    }\n    ProtocolType downstreamProtocolType() const override {\n      return parent_.decoder_->protocolType();\n    }\n    void sendLocalReply(const DirectResponse& response, bool end_stream) override;\n    void startUpstreamResponse(Transport& transport, Protocol& protocol) override;\n    ThriftFilters::ResponseStatus upstreamData(Buffer::Instance& buffer) override;\n    void resetDownstreamConnection() override;\n    StreamInfo::StreamInfo& streamInfo() override { return stream_info_; }\n\n    // Thrift::FilterChainFactoryCallbacks\n    void addDecoderFilter(ThriftFilters::DecoderFilterSharedPtr filter) override {\n      ActiveRpcDecoderFilterPtr wrapper = std::make_unique<ActiveRpcDecoderFilter>(*this, filter);\n      filter->setDecoderFilterCallbacks(*wrapper);\n      LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_);\n    }\n\n    FilterStatus applyDecoderFilters(ActiveRpcDecoderFilter* filter);\n    void finalizeRequest();\n\n    void createFilterChain();\n    void onReset();\n    void onError(const std::string& what);\n\n    ConnectionManager& parent_;\n    Stats::TimespanPtr request_timer_;\n    uint64_t stream_id_;\n    StreamInfo::StreamInfoImpl stream_info_;\n    MessageMetadataSharedPtr metadata_;\n    std::list<ActiveRpcDecoderFilterPtr> decoder_filters_;\n    DecoderEventHandlerSharedPtr upgrade_handler_;\n    ResponseDecoderPtr response_decoder_;\n    absl::optional<Router::RouteConstSharedPtr> cached_route_;\n    Buffer::OwnedImpl response_buffer_;\n    int32_t original_sequence_id_{0};\n    MessageType original_msg_type_{MessageType::Call};\n    std::function<FilterStatus(DecoderEventHandler*)> filter_action_;\n    absl::any filter_context_;\n    bool local_response_sent_ : 1;\n    bool pending_transport_end_ : 1;\n  };\n\n  using ActiveRpcPtr = std::unique_ptr<ActiveRpc>;\n\n  void continueDecoding();\n  void dispatch();\n  void sendLocalReply(MessageMetadata& metadata, const DirectResponse& response, bool end_stream);\n  void doDeferredRpcDestroy(ActiveRpc& rpc);\n  void resetAllRpcs(bool local_reset);\n\n  Config& config_;\n  ThriftFilterStats& stats_;\n\n  Network::ReadFilterCallbacks* read_callbacks_{};\n\n  TransportPtr transport_;\n  ProtocolPtr protocol_;\n  DecoderPtr decoder_;\n  std::list<ActiveRpcPtr> rpcs_;\n  Buffer::OwnedImpl request_buffer_;\n  Random::RandomGenerator& random_generator_;\n  bool stopped_{false};\n  bool half_closed_{false};\n  TimeSource& time_source_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/conn_state.h",
    "content": "#pragma once\n\n#include \"envoy/tcp/conn_pool.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * ThriftConnectionState tracks thrift-related connection state for pooled connections.\n */\nclass ThriftConnectionState : public Tcp::ConnectionPool::ConnectionState {\npublic:\n  ThriftConnectionState(int32_t initial_sequence_id = 0) : next_sequence_id_(initial_sequence_id) {}\n\n  /**\n   * @return int32_t the next Thrift sequence id to use for this connection.\n   */\n  int32_t nextSequenceId() {\n    if (next_sequence_id_ == std::numeric_limits<int32_t>::max()) {\n      next_sequence_id_ = 0;\n      return std::numeric_limits<int32_t>::max();\n    }\n\n    return next_sequence_id_++;\n  }\n\n  /**\n   * @return true if this upgrade has been attempted on this connection.\n   */\n  bool upgradeAttempted() const { return upgrade_attempted_; }\n  /**\n   * @return true if this connection has been upgraded\n   */\n  bool isUpgraded() const { return upgraded_; }\n\n  /**\n   * Marks the connection as successfully upgraded.\n   */\n  void markUpgraded() {\n    upgrade_attempted_ = true;\n    upgraded_ = true;\n  }\n\n  /**\n   * Marks the connection as not upgraded.\n   */\n  void markUpgradeFailed() {\n    upgrade_attempted_ = true;\n    upgraded_ = false;\n  }\n\nprivate:\n  int32_t next_sequence_id_;\n  bool upgrade_attempted_{false};\n  bool upgraded_{false};\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/decoder.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/decoder.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n// MessageBegin -> StructBegin\nDecoderStateMachine::DecoderStatus DecoderStateMachine::messageBegin(Buffer::Instance& buffer) {\n  if (!proto_.readMessageBegin(buffer, *metadata_)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  stack_.clear();\n  stack_.emplace_back(Frame(ProtocolState::MessageEnd));\n\n  return {ProtocolState::StructBegin, handler_.messageBegin(metadata_)};\n}\n\n// MessageEnd -> Done\nDecoderStateMachine::DecoderStatus DecoderStateMachine::messageEnd(Buffer::Instance& buffer) {\n  if (!proto_.readMessageEnd(buffer)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  return {ProtocolState::Done, handler_.messageEnd()};\n}\n\n// StructBegin -> FieldBegin\nDecoderStateMachine::DecoderStatus DecoderStateMachine::structBegin(Buffer::Instance& buffer) {\n  std::string name;\n  if (!proto_.readStructBegin(buffer, name)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  return {ProtocolState::FieldBegin, handler_.structBegin(absl::string_view(name))};\n}\n\n// StructEnd -> stack's return state\nDecoderStateMachine::DecoderStatus DecoderStateMachine::structEnd(Buffer::Instance& buffer) {\n  if (!proto_.readStructEnd(buffer)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  ProtocolState next_state = popReturnState();\n  return {next_state, handler_.structEnd()};\n}\n\n// FieldBegin -> FieldValue, or\n// FieldBegin -> StructEnd (stop field)\nDecoderStateMachine::DecoderStatus DecoderStateMachine::fieldBegin(Buffer::Instance& buffer) {\n  std::string name;\n  FieldType field_type;\n  int16_t field_id;\n  if (!proto_.readFieldBegin(buffer, name, field_type, field_id)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  if (field_type == FieldType::Stop) {\n    return {ProtocolState::StructEnd, FilterStatus::Continue};\n  }\n\n  stack_.emplace_back(Frame(ProtocolState::FieldEnd, field_type));\n\n  return {ProtocolState::FieldValue,\n          handler_.fieldBegin(absl::string_view(name), field_type, field_id)};\n}\n\n// FieldValue -> FieldEnd (via stack return state)\nDecoderStateMachine::DecoderStatus DecoderStateMachine::fieldValue(Buffer::Instance& buffer) {\n  ASSERT(!stack_.empty());\n\n  Frame& frame = stack_.back();\n  return handleValue(buffer, frame.elem_type_, frame.return_state_);\n}\n\n// FieldEnd -> FieldBegin\nDecoderStateMachine::DecoderStatus DecoderStateMachine::fieldEnd(Buffer::Instance& buffer) {\n  if (!proto_.readFieldEnd(buffer)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  popReturnState();\n\n  return {ProtocolState::FieldBegin, handler_.fieldEnd()};\n}\n\n// ListBegin -> ListValue\nDecoderStateMachine::DecoderStatus DecoderStateMachine::listBegin(Buffer::Instance& buffer) {\n  FieldType elem_type;\n  uint32_t size;\n  if (!proto_.readListBegin(buffer, elem_type, size)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  stack_.emplace_back(Frame(ProtocolState::ListEnd, elem_type, size));\n\n  return {ProtocolState::ListValue, handler_.listBegin(elem_type, size)};\n}\n\n// ListValue -> ListValue, ListBegin, MapBegin, SetBegin, StructBegin (depending on value type), or\n// ListValue -> ListEnd\nDecoderStateMachine::DecoderStatus DecoderStateMachine::listValue(Buffer::Instance& buffer) {\n  ASSERT(!stack_.empty());\n  const uint32_t index = stack_.size() - 1;\n  if (stack_[index].remaining_ == 0) {\n    return {popReturnState(), FilterStatus::Continue};\n  }\n  DecoderStatus status = handleValue(buffer, stack_[index].elem_type_, ProtocolState::ListValue);\n  if (status.next_state_ != ProtocolState::WaitForData) {\n    stack_[index].remaining_--;\n  }\n\n  return status;\n}\n\n// ListEnd -> stack's return state\nDecoderStateMachine::DecoderStatus DecoderStateMachine::listEnd(Buffer::Instance& buffer) {\n  if (!proto_.readListEnd(buffer)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  ProtocolState next_state = popReturnState();\n  return {next_state, handler_.listEnd()};\n}\n\n// MapBegin -> MapKey\nDecoderStateMachine::DecoderStatus DecoderStateMachine::mapBegin(Buffer::Instance& buffer) {\n  FieldType key_type, value_type;\n  uint32_t size;\n  if (!proto_.readMapBegin(buffer, key_type, value_type, size)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  stack_.emplace_back(Frame(ProtocolState::MapEnd, key_type, value_type, size));\n\n  return {ProtocolState::MapKey, handler_.mapBegin(key_type, value_type, size)};\n}\n\n// MapKey -> MapValue, ListBegin, MapBegin, SetBegin, StructBegin (depending on key type), or\n// MapKey -> MapEnd\nDecoderStateMachine::DecoderStatus DecoderStateMachine::mapKey(Buffer::Instance& buffer) {\n  ASSERT(!stack_.empty());\n  Frame& frame = stack_.back();\n  if (frame.remaining_ == 0) {\n    return {popReturnState(), FilterStatus::Continue};\n  }\n\n  return handleValue(buffer, frame.elem_type_, ProtocolState::MapValue);\n}\n\n// MapValue -> MapKey, ListBegin, MapBegin, SetBegin, StructBegin (depending on value type), or\n// MapValue -> MapKey\nDecoderStateMachine::DecoderStatus DecoderStateMachine::mapValue(Buffer::Instance& buffer) {\n  ASSERT(!stack_.empty());\n  const uint32_t index = stack_.size() - 1;\n  ASSERT(stack_[index].remaining_ != 0);\n  DecoderStatus status = handleValue(buffer, stack_[index].value_type_, ProtocolState::MapKey);\n  if (status.next_state_ != ProtocolState::WaitForData) {\n    stack_[index].remaining_--;\n  }\n\n  return status;\n}\n\n// MapEnd -> stack's return state\nDecoderStateMachine::DecoderStatus DecoderStateMachine::mapEnd(Buffer::Instance& buffer) {\n  if (!proto_.readMapEnd(buffer)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  ProtocolState next_state = popReturnState();\n  return {next_state, handler_.mapEnd()};\n}\n\n// SetBegin -> SetValue\nDecoderStateMachine::DecoderStatus DecoderStateMachine::setBegin(Buffer::Instance& buffer) {\n  FieldType elem_type;\n  uint32_t size;\n  if (!proto_.readSetBegin(buffer, elem_type, size)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  stack_.emplace_back(Frame(ProtocolState::SetEnd, elem_type, size));\n\n  return {ProtocolState::SetValue, handler_.setBegin(elem_type, size)};\n}\n\n// SetValue -> SetValue, ListBegin, MapBegin, SetBegin, StructBegin (depending on value type), or\n// SetValue -> SetEnd\nDecoderStateMachine::DecoderStatus DecoderStateMachine::setValue(Buffer::Instance& buffer) {\n  ASSERT(!stack_.empty());\n  const uint32_t index = stack_.size() - 1;\n  if (stack_[index].remaining_ == 0) {\n    return {popReturnState(), FilterStatus::Continue};\n  }\n  DecoderStatus status = handleValue(buffer, stack_[index].elem_type_, ProtocolState::SetValue);\n  if (status.next_state_ != ProtocolState::WaitForData) {\n    stack_[index].remaining_--;\n  }\n\n  return status;\n}\n\n// SetEnd -> stack's return state\nDecoderStateMachine::DecoderStatus DecoderStateMachine::setEnd(Buffer::Instance& buffer) {\n  if (!proto_.readSetEnd(buffer)) {\n    return {ProtocolState::WaitForData};\n  }\n\n  ProtocolState next_state = popReturnState();\n  return {next_state, handler_.setEnd()};\n}\n\nDecoderStateMachine::DecoderStatus DecoderStateMachine::handleValue(Buffer::Instance& buffer,\n                                                                    FieldType elem_type,\n                                                                    ProtocolState return_state) {\n  switch (elem_type) {\n  case FieldType::Bool: {\n    bool value{};\n    if (proto_.readBool(buffer, value)) {\n      return {return_state, handler_.boolValue(value)};\n    }\n    break;\n  }\n  case FieldType::Byte: {\n    uint8_t value{};\n    if (proto_.readByte(buffer, value)) {\n      return {return_state, handler_.byteValue(value)};\n    }\n    break;\n  }\n  case FieldType::I16: {\n    int16_t value{};\n    if (proto_.readInt16(buffer, value)) {\n      return {return_state, handler_.int16Value(value)};\n    }\n    break;\n  }\n  case FieldType::I32: {\n    int32_t value{};\n    if (proto_.readInt32(buffer, value)) {\n      return {return_state, handler_.int32Value(value)};\n    }\n    break;\n  }\n  case FieldType::I64: {\n    int64_t value{};\n    if (proto_.readInt64(buffer, value)) {\n      return {return_state, handler_.int64Value(value)};\n    }\n    break;\n  }\n  case FieldType::Double: {\n    double value{};\n    if (proto_.readDouble(buffer, value)) {\n      return {return_state, handler_.doubleValue(value)};\n    }\n    break;\n  }\n  case FieldType::String: {\n    std::string value;\n    if (proto_.readString(buffer, value)) {\n      return {return_state, handler_.stringValue(value)};\n    }\n    break;\n  }\n  case FieldType::Struct:\n    stack_.emplace_back(Frame(return_state));\n    return {ProtocolState::StructBegin, FilterStatus::Continue};\n  case FieldType::Map:\n    stack_.emplace_back(Frame(return_state));\n    return {ProtocolState::MapBegin, FilterStatus::Continue};\n  case FieldType::List:\n    stack_.emplace_back(Frame(return_state));\n    return {ProtocolState::ListBegin, FilterStatus::Continue};\n  case FieldType::Set:\n    stack_.emplace_back(Frame(return_state));\n    return {ProtocolState::SetBegin, FilterStatus::Continue};\n  default:\n    throw EnvoyException(fmt::format(\"unknown field type {}\", static_cast<int8_t>(elem_type)));\n  }\n\n  return {ProtocolState::WaitForData};\n}\n\nDecoderStateMachine::DecoderStatus DecoderStateMachine::handleState(Buffer::Instance& buffer) {\n  switch (state_) {\n  case ProtocolState::MessageBegin:\n    return messageBegin(buffer);\n  case ProtocolState::StructBegin:\n    return structBegin(buffer);\n  case ProtocolState::StructEnd:\n    return structEnd(buffer);\n  case ProtocolState::FieldBegin:\n    return fieldBegin(buffer);\n  case ProtocolState::FieldValue:\n    return fieldValue(buffer);\n  case ProtocolState::FieldEnd:\n    return fieldEnd(buffer);\n  case ProtocolState::ListBegin:\n    return listBegin(buffer);\n  case ProtocolState::ListValue:\n    return listValue(buffer);\n  case ProtocolState::ListEnd:\n    return listEnd(buffer);\n  case ProtocolState::MapBegin:\n    return mapBegin(buffer);\n  case ProtocolState::MapKey:\n    return mapKey(buffer);\n  case ProtocolState::MapValue:\n    return mapValue(buffer);\n  case ProtocolState::MapEnd:\n    return mapEnd(buffer);\n  case ProtocolState::SetBegin:\n    return setBegin(buffer);\n  case ProtocolState::SetValue:\n    return setValue(buffer);\n  case ProtocolState::SetEnd:\n    return setEnd(buffer);\n  case ProtocolState::MessageEnd:\n    return messageEnd(buffer);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nProtocolState DecoderStateMachine::popReturnState() {\n  ASSERT(!stack_.empty());\n  ProtocolState return_state = stack_.back().return_state_;\n  stack_.pop_back();\n  return return_state;\n}\n\nProtocolState DecoderStateMachine::run(Buffer::Instance& buffer) {\n  while (state_ != ProtocolState::Done) {\n    ENVOY_LOG(trace, \"thrift: state {}, {} bytes available\", ProtocolStateNameValues::name(state_),\n              buffer.length());\n\n    DecoderStatus s = handleState(buffer);\n    if (s.next_state_ == ProtocolState::WaitForData) {\n      return ProtocolState::WaitForData;\n    }\n\n    state_ = s.next_state_;\n\n    ASSERT(s.filter_status_.has_value());\n    if (s.filter_status_.value() == FilterStatus::StopIteration) {\n      return ProtocolState::StopIteration;\n    }\n  }\n\n  return state_;\n}\n\nDecoder::Decoder(Transport& transport, Protocol& protocol, DecoderCallbacks& callbacks)\n    : transport_(transport), protocol_(protocol), callbacks_(callbacks) {}\n\nvoid Decoder::complete() {\n  request_.reset();\n  state_machine_ = nullptr;\n  frame_started_ = false;\n  frame_ended_ = false;\n}\n\nFilterStatus Decoder::onData(Buffer::Instance& data, bool& buffer_underflow) {\n  ENVOY_LOG(debug, \"thrift: {} bytes available\", data.length());\n  buffer_underflow = false;\n\n  if (frame_ended_) {\n    // Continuation after filter stopped iteration on transportComplete callback.\n    complete();\n    buffer_underflow = (data.length() == 0);\n    return FilterStatus::Continue;\n  }\n\n  if (!frame_started_) {\n    // Look for start of next frame.\n    if (!metadata_) {\n      metadata_ = std::make_shared<MessageMetadata>();\n    }\n\n    if (!transport_.decodeFrameStart(data, *metadata_)) {\n      ENVOY_LOG(debug, \"thrift: need more data for {} transport start\", transport_.name());\n      buffer_underflow = true;\n      return FilterStatus::Continue;\n    }\n    ENVOY_LOG(debug, \"thrift: {} transport started\", transport_.name());\n\n    if (metadata_->hasProtocol()) {\n      if (protocol_.type() == ProtocolType::Auto) {\n        protocol_.setType(metadata_->protocol());\n        ENVOY_LOG(debug, \"thrift: {} transport forced {} protocol\", transport_.name(),\n                  protocol_.name());\n      } else if (metadata_->protocol() != protocol_.type()) {\n        throw EnvoyException(fmt::format(\"transport reports protocol {}, but configured for {}\",\n                                         ProtocolNames::get().fromType(metadata_->protocol()),\n                                         ProtocolNames::get().fromType(protocol_.type())));\n      }\n    }\n    if (metadata_->hasAppException()) {\n      AppExceptionType ex_type = metadata_->appExceptionType();\n      std::string ex_msg = metadata_->appExceptionMessage();\n      // Force new metadata if we get called again.\n      metadata_.reset();\n      throw AppException(ex_type, ex_msg);\n    }\n\n    request_ = std::make_unique<ActiveRequest>(callbacks_.newDecoderEventHandler());\n    frame_started_ = true;\n    state_machine_ =\n        std::make_unique<DecoderStateMachine>(protocol_, metadata_, request_->handler_);\n\n    if (request_->handler_.transportBegin(metadata_) == FilterStatus::StopIteration) {\n      return FilterStatus::StopIteration;\n    }\n  }\n\n  ASSERT(state_machine_ != nullptr);\n\n  ENVOY_LOG(debug, \"thrift: protocol {}, state {}, {} bytes available\", protocol_.name(),\n            ProtocolStateNameValues::name(state_machine_->currentState()), data.length());\n\n  ProtocolState rv = state_machine_->run(data);\n  if (rv == ProtocolState::WaitForData) {\n    ENVOY_LOG(debug, \"thrift: wait for data\");\n    buffer_underflow = true;\n    return FilterStatus::Continue;\n  } else if (rv == ProtocolState::StopIteration) {\n    ENVOY_LOG(debug, \"thrift: wait for continuation\");\n    return FilterStatus::StopIteration;\n  }\n\n  ASSERT(rv == ProtocolState::Done);\n\n  // Message complete, decode end of frame.\n  if (!transport_.decodeFrameEnd(data)) {\n    ENVOY_LOG(debug, \"thrift: need more data for {} transport end\", transport_.name());\n    buffer_underflow = true;\n    return FilterStatus::Continue;\n  }\n\n  frame_ended_ = true;\n  metadata_.reset();\n\n  ENVOY_LOG(debug, \"thrift: {} transport ended\", transport_.name());\n  if (request_->handler_.transportEnd() == FilterStatus::StopIteration) {\n    return FilterStatus::StopIteration;\n  }\n\n  // Reset for next frame.\n  complete();\n  buffer_underflow = (data.length() == 0);\n  return FilterStatus::Continue;\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/decoder.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n#define ALL_PROTOCOL_STATES(FUNCTION)                                                              \\\n  FUNCTION(StopIteration)                                                                          \\\n  FUNCTION(WaitForData)                                                                            \\\n  FUNCTION(MessageBegin)                                                                           \\\n  FUNCTION(MessageEnd)                                                                             \\\n  FUNCTION(StructBegin)                                                                            \\\n  FUNCTION(StructEnd)                                                                              \\\n  FUNCTION(FieldBegin)                                                                             \\\n  FUNCTION(FieldValue)                                                                             \\\n  FUNCTION(FieldEnd)                                                                               \\\n  FUNCTION(MapBegin)                                                                               \\\n  FUNCTION(MapKey)                                                                                 \\\n  FUNCTION(MapValue)                                                                               \\\n  FUNCTION(MapEnd)                                                                                 \\\n  FUNCTION(ListBegin)                                                                              \\\n  FUNCTION(ListValue)                                                                              \\\n  FUNCTION(ListEnd)                                                                                \\\n  FUNCTION(SetBegin)                                                                               \\\n  FUNCTION(SetValue)                                                                               \\\n  FUNCTION(SetEnd)                                                                                 \\\n  FUNCTION(Done)\n\n/**\n * ProtocolState represents a set of states used in a state machine to decode Thrift requests\n * and responses.\n */\nenum class ProtocolState { ALL_PROTOCOL_STATES(GENERATE_ENUM) };\n\nclass ProtocolStateNameValues {\npublic:\n  static const std::string& name(ProtocolState state) {\n    size_t i = static_cast<size_t>(state);\n    ASSERT(i < names().size());\n    return names()[i];\n  }\n\nprivate:\n  static const std::vector<std::string>& names() {\n    CONSTRUCT_ON_FIRST_USE(std::vector<std::string>, {ALL_PROTOCOL_STATES(GENERATE_STRING)});\n  }\n};\n\n/**\n * DecoderStateMachine is the Thrift message state machine as described in\n * source/extensions/filters/network/thrift_proxy/docs.\n */\nclass DecoderStateMachine : public Logger::Loggable<Logger::Id::thrift> {\npublic:\n  DecoderStateMachine(Protocol& proto, MessageMetadataSharedPtr& metadata,\n                      DecoderEventHandler& handler)\n      : proto_(proto), metadata_(metadata), handler_(handler), state_(ProtocolState::MessageBegin) {\n  }\n\n  /**\n   * Consumes as much data from the configured Buffer as possible and executes the decoding state\n   * machine. Returns ProtocolState::WaitForData if more data is required to complete processing of\n   * a message. Returns ProtocolState::Done when the end of a message is successfully processed.\n   * Once the Done state is reached, further invocations of run return immediately with Done.\n   *\n   * @param buffer a buffer containing the remaining data to be processed\n   * @return ProtocolState returns with ProtocolState::WaitForData or ProtocolState::Done\n   * @throw Envoy Exception if thrown by the underlying Protocol\n   */\n  ProtocolState run(Buffer::Instance& buffer);\n\n  /**\n   * @return the current ProtocolState\n   */\n  ProtocolState currentState() const { return state_; }\n\n  /**\n   * Set the current state. Used for testing only.\n   */\n  void setCurrentState(ProtocolState state) { state_ = state; }\n\nprivate:\n  /**\n   * Frame encodes information about the return state for nested elements, container element types,\n   * and the number of remaining container elements.\n   */\n  struct Frame {\n    Frame(ProtocolState state) : return_state_(state), elem_type_{}, value_type_{}, remaining_(0) {}\n    Frame(ProtocolState state, FieldType elem_type)\n        : return_state_(state), elem_type_(elem_type), value_type_{}, remaining_{} {}\n    Frame(ProtocolState state, FieldType elem_type, uint32_t remaining)\n        : return_state_(state), elem_type_(elem_type), value_type_{}, remaining_(remaining) {}\n    Frame(ProtocolState state, FieldType key_type, FieldType value_type, uint32_t remaining)\n        : return_state_(state), elem_type_(key_type), value_type_(value_type),\n          remaining_(remaining) {}\n\n    // Structs, lists, maps, and sets may be recursively nested in any combination. This field\n    // indicates which state to return to at the completion of each of those types.\n    const ProtocolState return_state_;\n\n    // Indicates the element type for lists and sets or the key type for a map.\n    const FieldType elem_type_;\n\n    // Indicates the value type for a map.\n    const FieldType value_type_;\n\n    // Indicates the number of elements (or key-value pairs) remaining in a list, map, or set.\n    uint32_t remaining_;\n  };\n\n  struct DecoderStatus {\n    DecoderStatus(ProtocolState next_state) : next_state_(next_state){};\n    DecoderStatus(ProtocolState next_state, FilterStatus filter_status)\n        : next_state_(next_state), filter_status_(filter_status){};\n\n    ProtocolState next_state_;\n    absl::optional<FilterStatus> filter_status_;\n  };\n\n  // These functions map directly to the matching ProtocolState values. Each returns the next state\n  // or ProtocolState::WaitForData if more data is required.\n  DecoderStatus messageBegin(Buffer::Instance& buffer);\n  DecoderStatus messageEnd(Buffer::Instance& buffer);\n  DecoderStatus structBegin(Buffer::Instance& buffer);\n  DecoderStatus structEnd(Buffer::Instance& buffer);\n  DecoderStatus fieldBegin(Buffer::Instance& buffer);\n  DecoderStatus fieldValue(Buffer::Instance& buffer);\n  DecoderStatus fieldEnd(Buffer::Instance& buffer);\n  DecoderStatus listBegin(Buffer::Instance& buffer);\n  DecoderStatus listValue(Buffer::Instance& buffer);\n  DecoderStatus listEnd(Buffer::Instance& buffer);\n  DecoderStatus mapBegin(Buffer::Instance& buffer);\n  DecoderStatus mapKey(Buffer::Instance& buffer);\n  DecoderStatus mapValue(Buffer::Instance& buffer);\n  DecoderStatus mapEnd(Buffer::Instance& buffer);\n  DecoderStatus setBegin(Buffer::Instance& buffer);\n  DecoderStatus setValue(Buffer::Instance& buffer);\n  DecoderStatus setEnd(Buffer::Instance& buffer);\n\n  // handleValue represents the generic Value state from the state machine documentation. It\n  // returns either ProtocolState::WaitForData if more data is required or the next state. For\n  // structs, lists, maps, or sets the return_state is pushed onto the stack and the next state is\n  // based on elem_type. For primitive value types, return_state is returned as the next state\n  // (unless WaitForData is returned).\n  DecoderStatus handleValue(Buffer::Instance& buffer, FieldType elem_type,\n                            ProtocolState return_state);\n\n  // handleState delegates to the appropriate method based on state_.\n  DecoderStatus handleState(Buffer::Instance& buffer);\n\n  // Helper method to retrieve the current frame's return state and remove the frame from the\n  // stack.\n  ProtocolState popReturnState();\n\n  Protocol& proto_;\n  MessageMetadataSharedPtr metadata_;\n  DecoderEventHandler& handler_;\n  ProtocolState state_;\n  std::vector<Frame> stack_;\n};\n\nusing DecoderStateMachinePtr = std::unique_ptr<DecoderStateMachine>;\n\nclass DecoderCallbacks {\npublic:\n  virtual ~DecoderCallbacks() = default;\n\n  /**\n   * @return DecoderEventHandler& a new DecoderEventHandler for a message.\n   */\n  virtual DecoderEventHandler& newDecoderEventHandler() PURE;\n};\n\n/**\n * Decoder encapsulates a configured Transport and Protocol and provides the ability to decode\n * Thrift messages.\n */\nclass Decoder : public Logger::Loggable<Logger::Id::thrift> {\npublic:\n  Decoder(Transport& transport, Protocol& protocol, DecoderCallbacks& callbacks);\n\n  /**\n   * Drains data from the given buffer while executing a state machine over the data.\n   *\n   * @param data a Buffer containing Thrift protocol data\n   * @param buffer_underflow bool set to true if more data is required to continue decoding\n   * @return FilterStatus::StopIteration when waiting for filter continuation,\n   *             Continue otherwise.\n   * @throw EnvoyException on Thrift protocol errors\n   */\n  FilterStatus onData(Buffer::Instance& data, bool& buffer_underflow);\n\n  TransportType transportType() { return transport_.type(); }\n  ProtocolType protocolType() { return protocol_.type(); }\n\nprivate:\n  struct ActiveRequest {\n    ActiveRequest(DecoderEventHandler& handler) : handler_(handler) {}\n\n    DecoderEventHandler& handler_;\n  };\n  using ActiveRequestPtr = std::unique_ptr<ActiveRequest>;\n\n  void complete();\n\n  Transport& transport_;\n  Protocol& protocol_;\n  DecoderCallbacks& callbacks_;\n  ActiveRequestPtr request_;\n  MessageMetadataSharedPtr metadata_;\n  DecoderStateMachinePtr state_machine_;\n  bool frame_started_{false};\n  bool frame_ended_{false};\n};\n\nusing DecoderPtr = std::unique_ptr<Decoder>;\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/decoder_events.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nenum class FilterStatus {\n  // Continue filter chain iteration.\n  Continue,\n\n  // Stop iterating over filters in the filter chain. Iteration must be explicitly restarted via\n  // continueDecoding().\n  StopIteration\n};\n\nclass DecoderEventHandler {\npublic:\n  virtual ~DecoderEventHandler() = default;\n\n  /**\n   * Indicates the start of a Thrift transport frame was detected. Unframed transports generate\n   * simulated start messages.\n   * @param metadata MessageMetadataSharedPtr describing as much as is currently known about the\n   *                                          message\n   */\n  virtual FilterStatus transportBegin(MessageMetadataSharedPtr metadata) PURE;\n\n  /**\n   * Indicates the end of a Thrift transport frame was detected. Unframed transport generate\n   * simulated complete messages.\n   */\n  virtual FilterStatus transportEnd() PURE;\n\n  /**\n   * Indicates that the start of a Thrift protocol message was detected.\n   * @param metadata MessageMetadataSharedPtr describing the message\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus messageBegin(MessageMetadataSharedPtr metadata) PURE;\n\n  /**\n   * Indicates that the end of a Thrift protocol message was detected.\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus messageEnd() PURE;\n\n  /**\n   * Indicates that the start of a Thrift protocol struct was detected.\n   * @param name the name of the struct, if available\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus structBegin(absl::string_view name) PURE;\n\n  /**\n   * Indicates that the end of a Thrift protocol struct was detected.\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus structEnd() PURE;\n\n  /**\n   * Indicates that the start of Thrift protocol struct field was detected.\n   * @param name the name of the field, if available\n   * @param field_type the type of the field\n   * @param field_id the field id\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus fieldBegin(absl::string_view name, FieldType& field_type,\n                                  int16_t& field_id) PURE;\n\n  /**\n   * Indicates that the end of a Thrift protocol struct field was detected.\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus fieldEnd() PURE;\n\n  /**\n   * A struct field, map key, map value, list element or set element was detected.\n   * @param value type value of the field\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus boolValue(bool& value) PURE;\n  virtual FilterStatus byteValue(uint8_t& value) PURE;\n  virtual FilterStatus int16Value(int16_t& value) PURE;\n  virtual FilterStatus int32Value(int32_t& value) PURE;\n  virtual FilterStatus int64Value(int64_t& value) PURE;\n  virtual FilterStatus doubleValue(double& value) PURE;\n  virtual FilterStatus stringValue(absl::string_view value) PURE;\n\n  /**\n   * Indicates the start of a Thrift protocol map was detected.\n   * @param key_type the map key type\n   * @param value_type the map value type\n   * @param size the number of key-value pairs\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) PURE;\n\n  /**\n   * Indicates that the end of a Thrift protocol map was detected.\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus mapEnd() PURE;\n\n  /**\n   * Indicates the start of a Thrift protocol list was detected.\n   * @param elem_type the list value type\n   * @param size the number of values in the list\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus listBegin(FieldType& elem_type, uint32_t& size) PURE;\n\n  /**\n   * Indicates that the end of a Thrift protocol list was detected.\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus listEnd() PURE;\n\n  /**\n   * Indicates the start of a Thrift protocol set was detected.\n   * @param elem_type the set value type\n   * @param size the number of values in the set\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus setBegin(FieldType& elem_type, uint32_t& size) PURE;\n\n  /**\n   * Indicates that the end of a Thrift protocol set was detected.\n   * @return FilterStatus to indicate if filter chain iteration should continue\n   */\n  virtual FilterStatus setEnd() PURE;\n};\n\nusing DecoderEventHandlerSharedPtr = std::shared_ptr<DecoderEventHandler>;\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/docs/thrift_state_machine.dot",
    "content": "/*\n  graphviz dot file that renders the Thrift Decoder state machine.\n\n  Rendered via: dot -Tsvg -o thrift_state_machine.svg thrift_state_machine.dot\n*/\ndigraph {\n  {\n    rank = same;\n    Start;\n    MessageBegin;\n    MessageEnd;\n    Done;\n  };\n\n  {\n    rank = same;\n    StructBegin;\n    StructEnd;\n  };\n\n  {\n    rank = same;\n    FieldBegin;\n    FieldEnd;\n  };\n\n  {\n    rank = same;\n    Value;\n  };\n\n  {\n    rank = same;\n    ListBegin;\n    ListEnd;\n    MapBegin;\n    MapEnd;\n    SetBegin;\n    SetEnd;\n    StructValueBegin;\n    StructValueEnd;\n  };\n\n  {\n    rank = same;\n    StructValueFieldBegin;\n    StructValueFieldEnd;\n  };\n\n  Start -> MessageBegin;\n\n  MessageBegin -> StructBegin;\n\n  StructBegin -> FieldBegin;\n\n  FieldBegin -> Value;\n  FieldBegin -> StructEnd;\n\n  Value -> FieldEnd;\n\n  FieldEnd -> FieldBegin;\n\n  StructEnd -> MessageEnd;\n\n  MessageEnd -> Done;\n\n  ListValue [label=\"Value\"];\n  Value -> ListBegin;\n  ListBegin -> ListValue;\n  ListValue -> ListValue [label=\"size\"];\n  ListValue -> ListEnd;\n  ListEnd -> Value;\n\n  MapKeyValue [label=\"Key (Value)\"];\n  MapValueValue [label=\"Value\"];\n  Value -> MapBegin;\n  MapBegin -> MapKeyValue;\n  MapKeyValue:sw -> MapValueValue:nw;\n  MapKeyValue -> MapEnd;\n  MapValueValue:ne -> MapKeyValue:se [label=\"size\"];\n  MapEnd -> Value;\n\n  SetValue [label=\"Value\"];\n  Value -> SetBegin;\n  SetBegin -> SetValue;\n  SetValue -> SetValue [label=\"size\"];\n  SetValue -> SetEnd;\n  SetEnd -> Value;\n\n  StructValueBegin [label=\"StructBegin\"];\n  StructValueEnd [label=\"StructEnd\"];\n  StructValueFieldBegin [label=\"FieldBegin\"];\n  StructValueFieldEnd [label=\"FieldEnd\"];\n  StructValueValue [label=\"Value\"];\n  Value -> StructValueBegin;\n  StructValueBegin -> StructValueFieldBegin;\n  StructValueFieldBegin -> StructValueValue;\n  StructValueFieldBegin -> StructValueEnd;\n  StructValueValue -> StructValueFieldEnd;\n  StructValueFieldEnd -> StructValueFieldBegin;\n  StructValueEnd -> Value;\n\n  graph [label=\"Thrift Decoder State Machine.\\n\\nStates appear in multiple locations to simplify the graph of transitions.\"];\n\n  Start [style=filled, fillcolor=\"#cccccc\"];\n  Done [style=filled, fillcolor=\"#cccccc\"];\n\n  /* force ordering within ranks */\n  MessageBegin -> MessageEnd [style=invis];\n  StructBegin -> StructEnd [style=invis];\n  FieldBegin -> FieldEnd [style=invis];\n  ListBegin -> ListEnd -> MapBegin -> MapEnd -> SetBegin -> SetEnd -> StructValueBegin ->\n    StructValueEnd [style=invis];\n  StructValueFieldBegin -> StructValueFieldEnd [style=invis];\n}\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/docs/thrift_state_machine.md",
    "content": "Thrift Decoder State Machine\n============================\n\n![Thrift Decoder State Machine](thrift_state_machine.svg)\n\nDecoding Thrift messages can be visualized as a series of state\ntransitions. Each state corresponds to the invocation of a function on\nthe `ThriftProxy::Protocol` interface. Not pictured in the state\ndiagram is the transient `WaitForData` state which implies that\ninsufficient data was available to process a state and that the state\nmachine should be resumed when more data is available.\n\nThe `Value` states in the diagram are represented by multiple states\nin the actual state machine (`FieldValue`, `ListValue`, `MapKey`,\n`MapValue`, and `SetValue`). The various value states encode the\ntransition from, for example, a list value (`ListValue`) to the\n`ListEnd` state.\n\nThe state machine tracks \"frames\" which allow the state machine to\nrecord information about nested lists, maps, sets, and\nstructures. Thrift allows these data types to be nested in arbitrary\ncombinations and the frame records the state to return to at the end\nof each type. For lists, maps, and sets the frame also records the\nnumber of remaining elements.\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"filter_config_interface\",\n    hdrs = [\"filter_config.h\"],\n    deps = [\n        \":filter_interface\",\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:macros\",\n        \"//source/common/protobuf:cc_wkt_protos\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    deps = [\n        \":filter_config_interface\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_interface\",\n    hdrs = [\"filter.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/extensions/filters/network/thrift_proxy:decoder_events_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:protocol_interface\",\n        \"//source/extensions/filters/network/thrift_proxy:thrift_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"pass_through_filter_lib\",\n    hdrs = [\"pass_through_filter.h\"],\n    deps = [\n        \":filter_interface\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/factory_base.h",
    "content": "#pragma once\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/thrift_proxy/filters/filter_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace ThriftFilters {\n\ntemplate <class ConfigProto> class FactoryBase : public NamedThriftFilterConfigFactory {\npublic:\n  FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& proto_config,\n                               const std::string& stats_prefix,\n                               Server::Configuration::FactoryContext& context) override {\n    return createFilterFactoryFromProtoTyped(MessageUtil::downcastAndValidate<const ConfigProto&>(\n                                                 proto_config, context.messageValidationVisitor()),\n                                             stats_prefix, context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  FactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const ConfigProto& proto_config,\n                                    const std::string& stats_prefix,\n                                    Server::Configuration::FactoryContext& context) PURE;\n\n  const std::string name_;\n};\n\n} // namespace ThriftFilters\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/filter.h",
    "content": "#pragma once\n\n#include <list>\n#include <string>\n#include <utility>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"extensions/filters/network/thrift_proxy/decoder_events.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace ThriftFilters {\n\nenum class ResponseStatus {\n  MoreData = 0, // The upstream response requires more data.\n  Complete = 1, // The upstream response is complete.\n  Reset = 2,    // The upstream response is invalid and its connection must be reset.\n};\n\n/**\n * Decoder filter callbacks add additional callbacks.\n */\nclass DecoderFilterCallbacks {\npublic:\n  virtual ~DecoderFilterCallbacks() = default;\n\n  /**\n   * @return uint64_t the ID of the originating stream for logging purposes.\n   */\n  virtual uint64_t streamId() const PURE;\n\n  /**\n   * @return const Network::Connection* the originating connection, or nullptr if there is none.\n   */\n  virtual const Network::Connection* connection() const PURE;\n\n  /**\n   * Continue iterating through the filter chain with buffered data. This routine can only be\n   * called if the filter has previously returned StopIteration from one of the DecoderFilter\n   * methods. The connection manager will callbacks to the next filter in the chain. Further note\n   * that if the request is not complete, the calling filter may receive further callbacks and must\n   * return an appropriate status code depending on what the filter needs to do.\n   */\n  virtual void continueDecoding() PURE;\n\n  /**\n   * @return RouteConstSharedPtr the route for the current request.\n   */\n  virtual Router::RouteConstSharedPtr route() PURE;\n\n  /**\n   * @return TransportType the originating transport.\n   */\n  virtual TransportType downstreamTransportType() const PURE;\n\n  /**\n   * @return ProtocolType the originating protocol.\n   */\n  virtual ProtocolType downstreamProtocolType() const PURE;\n\n  /**\n   * Create a locally generated response using the provided response object.\n   * @param response DirectResponse the response to send to the downstream client\n   * @param end_stream if true, the downstream connection should be closed after this response\n   */\n  virtual void sendLocalReply(const ThriftProxy::DirectResponse& response, bool end_stream) PURE;\n\n  /**\n   * Indicates the start of an upstream response. May only be called once.\n   * @param transport the transport used by the upstream response\n   * @param protocol the protocol used by the upstream response\n   */\n  virtual void startUpstreamResponse(Transport& transport, Protocol& protocol) PURE;\n\n  /**\n   * Called with upstream response data.\n   * @param data supplies the upstream's data\n   * @return ResponseStatus indicating if the upstream response requires more data, is complete,\n   *         or if an error occurred requiring the upstream connection to be reset.\n   */\n  virtual ResponseStatus upstreamData(Buffer::Instance& data) PURE;\n\n  /**\n   * Reset the downstream connection.\n   */\n  virtual void resetDownstreamConnection() PURE;\n\n  /**\n   * @return StreamInfo for logging purposes.\n   */\n  virtual StreamInfo::StreamInfo& streamInfo() PURE;\n};\n\n/**\n * Decoder filter interface.\n */\nclass DecoderFilter : public virtual DecoderEventHandler {\npublic:\n  ~DecoderFilter() override = default;\n\n  /**\n   * This routine is called prior to a filter being destroyed. This may happen after normal stream\n   * finish (both downstream and upstream) or due to reset. Every filter is responsible for making\n   * sure that any async events are cleaned up in the context of this routine. This includes timers,\n   * network calls, etc. The reason there is an onDestroy() method vs. doing this type of cleanup\n   * in the destructor is due to the deferred deletion model that Envoy uses to avoid stack unwind\n   * complications. Filters must not invoke either encoder or decoder filter callbacks after having\n   * onDestroy() invoked.\n   */\n  virtual void onDestroy() PURE;\n\n  /**\n   * Called by the connection manager once to initialize the filter decoder callbacks that the\n   * filter should use. Callbacks will not be invoked by the filter after onDestroy() is called.\n   */\n  virtual void setDecoderFilterCallbacks(DecoderFilterCallbacks& callbacks) PURE;\n};\n\nusing DecoderFilterSharedPtr = std::shared_ptr<DecoderFilter>;\n\n/**\n * These callbacks are provided by the connection manager to the factory so that the factory can\n * build the filter chain in an application specific way.\n */\nclass FilterChainFactoryCallbacks {\npublic:\n  virtual ~FilterChainFactoryCallbacks() = default;\n\n  /**\n   * Add a decoder filter that is used when reading connection data.\n   * @param filter supplies the filter to add.\n   */\n  virtual void addDecoderFilter(DecoderFilterSharedPtr filter) PURE;\n};\n\n/**\n * This function is used to wrap the creation of a Thrift filter chain for new connections as they\n * come in. Filter factories create the function at configuration initialization time, and then\n * they are used at runtime.\n * @param callbacks supplies the callbacks for the stream to install filters to. Typically the\n * function will install a single filter, but it's technically possibly to install more than one\n * if desired.\n */\nusing FilterFactoryCb = std::function<void(FilterChainFactoryCallbacks& callbacks)>;\n\n/**\n * A FilterChainFactory is used by a connection manager to create a Thrift level filter chain when\n * a new connection is created. Typically it would be implemented by a configuration engine that\n * would install a set of filters that are able to process an application scenario on top of a\n * stream of Thrift requests.\n */\nclass FilterChainFactory {\npublic:\n  virtual ~FilterChainFactory() = default;\n\n  /**\n   * Called when a new Thrift stream is created on the connection.\n   * @param callbacks supplies the \"sink\" that is used for actually creating the filter chain. @see\n   *                  FilterChainFactoryCallbacks.\n   */\n  virtual void createFilterChain(FilterChainFactoryCallbacks& callbacks) PURE;\n};\n\n} // namespace ThriftFilters\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/filter_config.h",
    "content": "#pragma once\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace ThriftFilters {\n\n/**\n * Implemented by each Thrift filter and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedThriftFilterConfigFactory : public Envoy::Config::TypedFactory {\npublic:\n  ~NamedThriftFilterConfigFactory() override = default;\n\n  /**\n   * Create a particular thrift filter factory implementation. If the implementation is unable to\n   * produce a factory with the provided parameters, it should throw an EnvoyException in the case\n   * of general error. The returned callback should always be initialized.\n   * @param config supplies the configuration for the filter\n   * @param stat_prefix prefix for stat logging\n   * @param context supplies the filter's context.\n   * @return FilterFactoryCb the factory creation function.\n   */\n  virtual FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& config, const std::string& stat_prefix,\n                               Server::Configuration::FactoryContext& context) PURE;\n\n  std::string category() const override { return \"envoy.thrift_proxy.filters\"; }\n};\n\n} // namespace ThriftFilters\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/pass_through_filter.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace ThriftFilters {\n\n/**\n * Pass through Thrift decoder filter. Continue at each decoding state within the series of\n * transitions.\n */\nclass PassThroughDecoderFilter : public DecoderFilter {\npublic:\n  // ThriftDecoderFilter\n  void onDestroy() override {}\n\n  void setDecoderFilterCallbacks(DecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n  };\n\n  // Thrift Decoder State Machine\n  ThriftProxy::FilterStatus transportBegin(ThriftProxy::MessageMetadataSharedPtr) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus transportEnd() override { return ThriftProxy::FilterStatus::Continue; }\n\n  ThriftProxy::FilterStatus messageBegin(ThriftProxy::MessageMetadataSharedPtr) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus messageEnd() override { return ThriftProxy::FilterStatus::Continue; }\n\n  ThriftProxy::FilterStatus structBegin(absl::string_view) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus structEnd() override { return ThriftProxy::FilterStatus::Continue; }\n\n  ThriftProxy::FilterStatus fieldBegin(absl::string_view, ThriftProxy::FieldType&,\n                                       int16_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus fieldEnd() override { return ThriftProxy::FilterStatus::Continue; }\n\n  ThriftProxy::FilterStatus boolValue(bool&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus byteValue(uint8_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus int16Value(int16_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus int32Value(int32_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus int64Value(int64_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus doubleValue(double&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus stringValue(absl::string_view) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus mapBegin(ThriftProxy::FieldType&, ThriftProxy::FieldType&,\n                                     uint32_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus mapEnd() override { return ThriftProxy::FilterStatus::Continue; }\n\n  ThriftProxy::FilterStatus listBegin(ThriftProxy::FieldType&, uint32_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus listEnd() override { return ThriftProxy::FilterStatus::Continue; }\n\n  ThriftProxy::FilterStatus setBegin(ThriftProxy::FieldType&, uint32_t&) override {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  ThriftProxy::FilterStatus setEnd() override { return ThriftProxy::FilterStatus::Continue; }\n\nprotected:\n  DecoderFilterCallbacks* decoder_callbacks_{};\n};\n\n} // namespace ThriftFilters\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ratelimit_lib\",\n    srcs = [\"ratelimit.cc\"],\n    hdrs = [\"ratelimit.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_lib\",\n        \"//source/extensions/filters/common/ratelimit:stat_names_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:app_exception_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:pass_through_filter_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_ratelimit_interface\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":ratelimit_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:factory_base_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/filters/ratelimit/config.h\"\n\n#include <chrono>\n#include <string>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ThriftFilters {\nnamespace RateLimitFilter {\n\nusing namespace Envoy::Extensions::NetworkFilters;\n\nThriftProxy::ThriftFilters::FilterFactoryCb\nRateLimitFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit&\n        proto_config,\n    const std::string&, Server::Configuration::FactoryContext& context) {\n  ASSERT(!proto_config.domain().empty());\n  ConfigSharedPtr config(new Config(proto_config, context.localInfo(), context.scope(),\n                                    context.runtime(), context.clusterManager()));\n  const std::chrono::milliseconds timeout =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20));\n\n  return [proto_config, &context, timeout,\n          config](ThriftProxy::ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addDecoderFilter(std::make_shared<Filter>(\n        config, Filters::Common::RateLimit::rateLimitClient(\n                    context, proto_config.rate_limit_service().grpc_service(), timeout,\n                    proto_config.rate_limit_service().transport_api_version())));\n  };\n}\n\n/**\n * Static registration for the rate limit filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RateLimitFilterConfig, ThriftProxy::ThriftFilters::NamedThriftFilterConfigFactory);\n\n} // namespace RateLimitFilter\n} // namespace ThriftFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/factory_base.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ThriftFilters {\nnamespace RateLimitFilter {\n\nusing namespace Envoy::Extensions::NetworkFilters;\n\n/**\n * Config registration for the rate limit filter. @see NamedThriftFilterConfigFactory.\n */\nclass RateLimitFilterConfig\n    : public ThriftProxy::ThriftFilters::FactoryBase<\n          envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit> {\npublic:\n  RateLimitFilterConfig()\n      : FactoryBase(ThriftProxy::ThriftFilters::ThriftFilterNames::get().RATE_LIMIT) {}\n\nprivate:\n  ThriftProxy::ThriftFilters::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit&\n          proto_config,\n      const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace RateLimitFilter\n} // namespace ThriftFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h\"\n\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ThriftFilters {\nnamespace RateLimitFilter {\n\nusing namespace Envoy::Extensions::NetworkFilters;\n\nThriftProxy::FilterStatus Filter::messageBegin(ThriftProxy::MessageMetadataSharedPtr metadata) {\n  if (!config_->runtime().snapshot().featureEnabled(\"ratelimit.thrift_filter_enabled\", 100)) {\n    return ThriftProxy::FilterStatus::Continue;\n  }\n\n  initiateCall(*metadata);\n  return (state_ == State::Calling || state_ == State::Responded)\n             ? ThriftProxy::FilterStatus::StopIteration\n             : ThriftProxy::FilterStatus::Continue;\n}\n\nvoid Filter::initiateCall(const ThriftProxy::MessageMetadata& metadata) {\n  ThriftProxy::Router::RouteConstSharedPtr route = decoder_callbacks_->route();\n  if (!route || !route->routeEntry()) {\n    return;\n  }\n\n  const ThriftProxy::Router::RouteEntry* route_entry = route->routeEntry();\n  Upstream::ThreadLocalCluster* cluster = config_->cm().get(route_entry->clusterName());\n  if (!cluster) {\n    return;\n  }\n  cluster_ = cluster->info();\n\n  std::vector<RateLimit::Descriptor> descriptors;\n\n  // Get all applicable rate limit policy entries for the route.\n  populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors, route_entry, metadata);\n\n  if (!descriptors.empty()) {\n    state_ = State::Calling;\n    initiating_call_ = true;\n    client_->limit(*this, config_->domain(), descriptors, Tracing::NullSpan::instance());\n    initiating_call_ = false;\n  }\n}\n\nvoid Filter::onDestroy() {\n  if (state_ == State::Calling) {\n    state_ = State::Complete;\n    client_->cancel();\n  }\n}\n\nvoid Filter::complete(Filters::Common::RateLimit::LimitStatus status,\n                      Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses,\n                      Http::ResponseHeaderMapPtr&& response_headers_to_add,\n                      Http::RequestHeaderMapPtr&& request_headers_to_add) {\n  // TODO(zuercher): Store headers to append to a response. Adding them to a local reply (over\n  // limit or error) is a matter of modifying the callbacks to allow it. Adding them to an upstream\n  // response requires either response (aka encoder) filters or some other mechanism.\n  UNREFERENCED_PARAMETER(descriptor_statuses);\n  UNREFERENCED_PARAMETER(response_headers_to_add);\n  UNREFERENCED_PARAMETER(request_headers_to_add);\n\n  state_ = State::Complete;\n  Filters::Common::RateLimit::StatNames& stat_names = config_->statNames();\n\n  switch (status) {\n  case Filters::Common::RateLimit::LimitStatus::OK:\n    cluster_->statsScope().counterFromStatName(stat_names.ok_).inc();\n    break;\n  case Filters::Common::RateLimit::LimitStatus::Error:\n    cluster_->statsScope().counterFromStatName(stat_names.error_).inc();\n    if (!config_->failureModeAllow()) {\n      state_ = State::Responded;\n      decoder_callbacks_->sendLocalReply(\n          ThriftProxy::AppException(ThriftProxy::AppExceptionType::InternalError, \"limiter error\"),\n          false);\n      decoder_callbacks_->streamInfo().setResponseFlag(\n          StreamInfo::ResponseFlag::RateLimitServiceError);\n      return;\n    }\n    cluster_->statsScope().counterFromStatName(stat_names.failure_mode_allowed_).inc();\n    break;\n  case Filters::Common::RateLimit::LimitStatus::OverLimit:\n    cluster_->statsScope().counterFromStatName(stat_names.over_limit_).inc();\n    if (config_->runtime().snapshot().featureEnabled(\"ratelimit.thrift_filter_enforcing\", 100)) {\n      state_ = State::Responded;\n      decoder_callbacks_->sendLocalReply(\n          ThriftProxy::AppException(ThriftProxy::AppExceptionType::InternalError, \"over limit\"),\n          false);\n      decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited);\n      return;\n    }\n    break;\n  }\n\n  if (!initiating_call_) {\n    decoder_callbacks_->continueDecoding();\n  }\n}\n\nvoid Filter::populateRateLimitDescriptors(\n    const ThriftProxy::Router::RateLimitPolicy& rate_limit_policy,\n    std::vector<RateLimit::Descriptor>& descriptors,\n    const ThriftProxy::Router::RouteEntry* route_entry,\n    const ThriftProxy::MessageMetadata& metadata) const {\n  for (const ThriftProxy::Router::RateLimitPolicyEntry& rate_limit :\n       rate_limit_policy.getApplicableRateLimit(config_->stage())) {\n    const std::string& disable_key = rate_limit.disableKey();\n    if (!disable_key.empty() &&\n        !config_->runtime().snapshot().featureEnabled(\n            fmt::format(\"ratelimit.{}.thrift_filter_enabled\", disable_key), 100)) {\n      continue;\n    }\n    rate_limit.populateDescriptors(*route_entry, descriptors, config_->localInfo().clusterName(),\n                                   metadata,\n                                   *decoder_callbacks_->streamInfo().downstreamRemoteAddress());\n  }\n}\n\n} // namespace RateLimitFilter\n} // namespace ThriftFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n#include \"extensions/filters/common/ratelimit/stat_names.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/pass_through_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ThriftFilters {\nnamespace RateLimitFilter {\n\nusing namespace Envoy::Extensions::NetworkFilters;\n\n/**\n * Global configuration for Thrift rate limit filter.\n */\nclass Config {\npublic:\n  Config(const envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit&\n             config,\n         const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, Runtime::Loader& runtime,\n         Upstream::ClusterManager& cm)\n      : domain_(config.domain()), stage_(config.stage()), local_info_(local_info), scope_(scope),\n        runtime_(runtime), cm_(cm), failure_mode_deny_(config.failure_mode_deny()),\n        stat_names_(scope_.symbolTable()) {}\n\n  const std::string& domain() const { return domain_; }\n  const LocalInfo::LocalInfo& localInfo() const { return local_info_; }\n  uint32_t stage() const { return stage_; }\n  Runtime::Loader& runtime() { return runtime_; }\n  Upstream::ClusterManager& cm() { return cm_; }\n  bool failureModeAllow() const { return !failure_mode_deny_; };\n  Filters::Common::RateLimit::StatNames& statNames() { return stat_names_; }\n\nprivate:\n  const std::string domain_;\n  const uint32_t stage_;\n  const LocalInfo::LocalInfo& local_info_;\n  Stats::Scope& scope_;\n  Runtime::Loader& runtime_;\n  Upstream::ClusterManager& cm_;\n  const bool failure_mode_deny_;\n  Filters::Common::RateLimit::StatNames stat_names_;\n};\n\nusing ConfigSharedPtr = std::shared_ptr<Config>;\n\n/**\n * Thrift rate limit filter instance. Calls the rate limit service with the given configuration\n * parameters. If the rate limit service returns an over limit response, an application exception\n * is returned, but the downstream connection is otherwise preserved. If the rate limit service\n * allows the request, no modifications are made and further filters progress as normal. If an\n * error is returned and the failure_mode_deny option is enabled, an application exception is\n * returned. By default, errors allow the request to continue.\n */\nclass Filter : public ThriftProxy::ThriftFilters::PassThroughDecoderFilter,\n               public Filters::Common::RateLimit::RequestCallbacks {\npublic:\n  Filter(ConfigSharedPtr config, Filters::Common::RateLimit::ClientPtr&& client)\n      : config_(std::move(config)), client_(std::move(client)) {}\n  ~Filter() override = default;\n\n  // ThriftFilters::PassThroughDecoderFilter\n  void onDestroy() override;\n  ThriftProxy::FilterStatus messageBegin(ThriftProxy::MessageMetadataSharedPtr) override;\n\n  // RateLimit::RequestCallbacks\n  void complete(Filters::Common::RateLimit::LimitStatus status,\n                Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses,\n                Http::ResponseHeaderMapPtr&& response_headers_to_add,\n                Http::RequestHeaderMapPtr&& request_headers_to_add) override;\n\nprivate:\n  void initiateCall(const ThriftProxy::MessageMetadata& metadata);\n  void populateRateLimitDescriptors(const ThriftProxy::Router::RateLimitPolicy& rate_limit_policy,\n                                    std::vector<RateLimit::Descriptor>& descriptors,\n                                    const ThriftProxy::Router::RouteEntry* route_entry,\n                                    const ThriftProxy::MessageMetadata& headers) const;\n\n  enum class State { NotStarted, Calling, Complete, Responded };\n\n  ConfigSharedPtr config_;\n  Filters::Common::RateLimit::ClientPtr client_;\n  State state_{State::NotStarted};\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n  bool initiating_call_{false};\n};\n\n} // namespace RateLimitFilter\n} // namespace ThriftFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/filters/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace ThriftFilters {\n\n/**\n * Well-known http filter names.\n * NOTE: New filters should use the well known name: envoy.filters.thrift.name.\n */\nclass ThriftFilterNameValues {\npublic:\n  // Ratelimit filter\n  const std::string RATE_LIMIT = \"envoy.filters.thrift.rate_limit\";\n\n  // Router filter\n  const std::string ROUTER = \"envoy.filters.thrift.router\";\n};\n\nusing ThriftFilterNames = ConstSingleton<ThriftFilterNameValues>;\n\n} // namespace ThriftFilters\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/framed_transport_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/framed_transport_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nbool FramedTransportImpl::decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n\n  if (buffer.length() < 4) {\n    return false;\n  }\n\n  int32_t thrift_size = buffer.peekBEInt<int32_t>();\n\n  if (thrift_size <= 0 || thrift_size > MaxFrameSize) {\n    throw EnvoyException(absl::StrCat(\"invalid thrift framed transport frame size \", thrift_size));\n  }\n\n  buffer.drain(4);\n\n  metadata.setFrameSize(static_cast<uint32_t>(thrift_size));\n  return true;\n}\n\nbool FramedTransportImpl::decodeFrameEnd(Buffer::Instance&) { return true; }\n\nvoid FramedTransportImpl::encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                                      Buffer::Instance& message) {\n  UNREFERENCED_PARAMETER(metadata);\n\n  uint64_t size = message.length();\n  if (size == 0 || size > MaxFrameSize) {\n    throw EnvoyException(absl::StrCat(\"invalid thrift framed transport frame size \", size));\n  }\n\n  int32_t thrift_size = static_cast<int32_t>(size);\n\n  buffer.writeBEInt<int32_t>(thrift_size);\n  buffer.move(message);\n}\n\nclass FramedTransportConfigFactory : public TransportFactoryBase<FramedTransportImpl> {\npublic:\n  FramedTransportConfigFactory() : TransportFactoryBase(TransportNames::get().FRAMED) {}\n};\n\n/**\n * Static registration for the framed transport. @see RegisterFactory.\n */\nREGISTER_FACTORY(FramedTransportConfigFactory, NamedTransportConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/framed_transport_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * FramedTransportImpl implements the Thrift Framed transport.\n * See https://github.com/apache/thrift/blob/master/doc/specs/thrift-rpc.md\n */\nclass FramedTransportImpl : public Transport {\npublic:\n  FramedTransportImpl() = default;\n\n  // Transport\n  const std::string& name() const override { return TransportNames::get().FRAMED; }\n  TransportType type() const override { return TransportType::Framed; }\n  bool decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  bool decodeFrameEnd(Buffer::Instance& buffer) override;\n  void encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                   Buffer::Instance& message) override;\n\n  static const int32_t MaxFrameSize = 0xFA0000;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/header_transport_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/header_transport_impl.h\"\n\n#include <limits>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n\n#include \"absl/strings/str_replace.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\n// c.f.\n// https://github.com/apache/thrift/blob/master/lib/cpp/src/thrift/protocol/TProtocolTypes.h#L27\nenum class HeaderProtocolType {\n  Binary = 0,\n  JSON = 1,\n  Compact = 2,\n\n  FirstHeaderProtocolType = Binary,\n  LastHeaderProtocolType = Compact,\n};\n\n// Fixed portion of frame header:\n//   Header magic: 2 bytes +\n//   Flags: 2 bytes +\n//   Sequence number: 4 bytes\n//   Header data size: 2 bytes\nconstexpr uint64_t MinFrameStartSizeNoHeaders = 10;\n\n// Minimum frame size: fixed portion of frame header + 4 bytes of header data (the minimum)\nconstexpr int32_t MinFrameStartSize = MinFrameStartSizeNoHeaders + 4;\n\n// Minimum to start decoding: 4 bytes of frame size + the fixed portion of the frame header\nconstexpr uint64_t MinDecodeBytes = MinFrameStartSizeNoHeaders + 4;\n\n// Maximum size for header data.\nconstexpr int32_t MaxHeadersSize = 65536;\n\n} // namespace\n\nbool HeaderTransportImpl::decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  if (buffer.length() < MinDecodeBytes) {\n    return false;\n  }\n\n  // Size of frame, not including the length bytes.\n  const int32_t frame_size = buffer.peekBEInt<int32_t>();\n\n  // Minimum header frame size is 18 bytes (4 bytes of frame size + 10 bytes of fixed header +\n  // minimum 4 bytes of variable header data), so frame_size must be at least 14.\n  if (frame_size < MinFrameStartSize || frame_size > MaxFrameSize) {\n    throw EnvoyException(absl::StrCat(\"invalid thrift header transport frame size \", frame_size));\n  }\n\n  int16_t magic = buffer.peekBEInt<uint16_t>(4);\n  if (!isMagic(magic)) {\n    throw EnvoyException(fmt::format(\"invalid thrift header transport magic {:04x}\", magic));\n  }\n\n  // offset 6: 16 bit flags field, unused\n  // offset 8: 32 bit sequence number field\n  int32_t seq_id = buffer.peekBEInt<int32_t>(8);\n\n  // offset 12: 16 bit (remaining) header size / 4 (spec erroneously claims / 32).\n  int16_t raw_header_size = buffer.peekBEInt<int16_t>(12);\n  int32_t header_size = static_cast<int32_t>(raw_header_size) * 4;\n  if (header_size < 0 || header_size > MaxHeadersSize) {\n    throw EnvoyException(fmt::format(\"invalid thrift header transport header size {} ({:04x})\",\n                                     header_size, static_cast<uint16_t>(raw_header_size)));\n  }\n\n  if (header_size == 0) {\n    throw EnvoyException(\"no header data\");\n  }\n\n  if (buffer.length() < static_cast<uint64_t>(header_size) + MinDecodeBytes) {\n    // Need more header data.\n    return false;\n  }\n\n  // Header data starts at offset 14 (4 bytes of frame size followed by 10 bytes of fixed header).\n  buffer.drain(MinDecodeBytes);\n\n  // Remaining frame size is the original frame size (which does not count itself), less the 10\n  // fixed bytes of the header (magic, flags, etc), less the size of the variable header data\n  // (header_size).\n  metadata.setFrameSize(\n      static_cast<uint32_t>(frame_size - header_size - MinFrameStartSizeNoHeaders));\n  metadata.setSequenceId(seq_id);\n\n  ProtocolType proto = ProtocolType::Auto;\n  HeaderProtocolType header_proto =\n      static_cast<HeaderProtocolType>(drainVarIntI16(buffer, header_size, \"protocol id\"));\n  switch (header_proto) {\n  case HeaderProtocolType::Binary:\n    proto = ProtocolType::Binary;\n    break;\n  case HeaderProtocolType::Compact:\n    proto = ProtocolType::Compact;\n    break;\n  default:\n    throw EnvoyException(fmt::format(\"Unknown protocol {}\", static_cast<int>(header_proto)));\n  }\n  metadata.setProtocol(proto);\n\n  int16_t num_xforms = drainVarIntI16(buffer, header_size, \"transform count\");\n  if (num_xforms < 0) {\n    throw EnvoyException(absl::StrCat(\"invalid header transport transform count \", num_xforms));\n  }\n\n  while (num_xforms-- > 0) {\n    int32_t xform_id = drainVarIntI32(buffer, header_size, \"transform id\");\n\n    // To date, no transforms have a data field. In the future, some transform IDs may require\n    // consuming another varint 32 at this point. The known transform IDs are:\n    // 1: zlib compression\n    // 2: hmac (appended to end of packet)\n    // 3: snappy compression\n    buffer.drain(header_size);\n    metadata.setAppException(AppExceptionType::MissingResult,\n                             absl::StrCat(\"Unknown transform \", xform_id));\n    return true;\n  }\n\n  while (header_size > 0) {\n    // Attempt to read info blocks\n    int32_t info_id = drainVarIntI32(buffer, header_size, \"info id\");\n    if (info_id != 1) {\n      // 0 indicates a padding byte, and the end of the info block.\n      // 1 indicates an info id header/value pair.\n      // Any other value is an unknown info id block, which we ignore.\n      break;\n    }\n\n    int32_t num_headers = drainVarIntI32(buffer, header_size, \"header count\");\n    if (num_headers < 0) {\n      throw EnvoyException(absl::StrCat(\"invalid header transport header count \", num_headers));\n    }\n\n    while (num_headers-- > 0) {\n      std::string key_string = drainVarString(buffer, header_size, \"header key\");\n      // LowerCaseString doesn't allow '\\0', '\\n', and '\\r'.\n      key_string =\n          absl::StrReplaceAll(key_string, {{std::string(1, '\\0'), \"\"}, {\"\\n\", \"\"}, {\"\\r\", \"\"}});\n      const Http::LowerCaseString key = Http::LowerCaseString(key_string);\n      const std::string value = drainVarString(buffer, header_size, \"header value\");\n      metadata.headers().addCopy(key, value);\n    }\n  }\n\n  // Remaining bytes are padding or ignored info blocks.\n  if (header_size > 0) {\n    buffer.drain(header_size);\n  }\n\n  return true;\n}\n\nbool HeaderTransportImpl::decodeFrameEnd(Buffer::Instance&) {\n  exception_.reset();\n  exception_reason_.clear();\n\n  return true;\n}\n\nvoid HeaderTransportImpl::encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                                      Buffer::Instance& message) {\n  uint64_t msg_size = message.length();\n  if (msg_size == 0) {\n    throw EnvoyException(absl::StrCat(\"invalid thrift header transport message size \", msg_size));\n  }\n\n  const Http::HeaderMap& headers = metadata.headers();\n  if (headers.size() > MaxHeadersSize / 2) {\n    // Each header takes a minimum of 2 bytes, yielding this limit.\n    throw EnvoyException(\n        absl::StrCat(\"invalid thrift header transport too many headers \", headers.size()));\n  }\n\n  Buffer::OwnedImpl header_buffer;\n\n  if (!metadata.hasProtocol()) {\n    throw EnvoyException(\"missing header transport protocol\");\n  }\n\n  switch (metadata.protocol()) {\n  case ProtocolType::Binary:\n    BufferHelper::writeVarIntI32(header_buffer, static_cast<int32_t>(HeaderProtocolType::Binary));\n    break;\n  case ProtocolType::Compact:\n    BufferHelper::writeVarIntI32(header_buffer, static_cast<int32_t>(HeaderProtocolType::Compact));\n    break;\n  default:\n    throw EnvoyException(fmt::format(\"invalid header transport protocol {}\",\n                                     ProtocolNames::get().fromType(metadata.protocol())));\n  }\n\n  BufferHelper::writeVarIntI32(header_buffer, 0); // num transforms\n  if (!headers.empty()) {\n    // Info ID 1\n    header_buffer.writeByte(1);\n\n    // Num headers\n    BufferHelper::writeVarIntI32(header_buffer, static_cast<int32_t>(headers.size()));\n\n    headers.iterate([&header_buffer](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n      writeVarString(header_buffer, header.key().getStringView());\n      writeVarString(header_buffer, header.value().getStringView());\n      return Http::HeaderMap::Iterate::Continue;\n    });\n  }\n\n  uint64_t header_size = header_buffer.length();\n\n  // Always pad (as the Apache implementation does).\n  const int padding = 4 - (header_size % 4);\n  header_buffer.add(\"\\0\\0\\0\\0\", padding);\n  header_size += padding;\n\n  if (header_size > MaxHeadersSize) {\n    throw EnvoyException(absl::StrCat(\"invalid thrift header transport header size \", header_size));\n  }\n\n  // Frame size does not include the frame length itself.\n  uint64_t size = header_size + msg_size + MinFrameStartSizeNoHeaders;\n  if (size > MaxFrameSize) {\n    throw EnvoyException(absl::StrCat(\"invalid thrift header transport frame size \", size));\n  }\n\n  int32_t seq_id = 0;\n  if (metadata.hasSequenceId()) {\n    seq_id = metadata.sequenceId();\n  }\n\n  buffer.writeBEInt<uint32_t>(static_cast<uint32_t>(size));\n  buffer.writeBEInt<uint16_t>(Magic);\n  buffer.writeBEInt<uint16_t>(0); // flags\n  buffer.writeBEInt<int32_t>(seq_id);\n  buffer.writeBEInt<uint16_t>(static_cast<uint16_t>(header_size / 4));\n\n  buffer.move(header_buffer);\n  buffer.move(message);\n}\n\nint16_t HeaderTransportImpl::drainVarIntI16(Buffer::Instance& buffer, int32_t& header_size,\n                                            const char* desc) {\n  int32_t value = drainVarIntI32(buffer, header_size, desc);\n  if (value > static_cast<int32_t>(std::numeric_limits<int16_t>::max())) {\n    throw EnvoyException(fmt::format(\"header transport {}: value {} exceeds max i16 ({})\", desc,\n                                     value, std::numeric_limits<int16_t>::max()));\n  }\n  return static_cast<int16_t>(value);\n}\n\nint32_t HeaderTransportImpl::drainVarIntI32(Buffer::Instance& buffer, int32_t& header_size,\n                                            const char* desc) {\n  if (header_size <= 0) {\n    throw EnvoyException(fmt::format(\"unable to read header transport {}: header too small\", desc));\n  }\n\n  int size;\n  int32_t value = BufferHelper::peekVarIntI32(buffer, 0, size);\n  if (size < 0 || (header_size - size) < 0) {\n    throw EnvoyException(fmt::format(\"unable to read header transport {}: header too small\", desc));\n  }\n  buffer.drain(size);\n  header_size -= size;\n  return value;\n}\n\nstd::string HeaderTransportImpl::drainVarString(Buffer::Instance& buffer, int32_t& header_size,\n                                                const char* desc) {\n  const int16_t str_len = drainVarIntI16(buffer, header_size, desc);\n  if (str_len == 0) {\n    return \"\";\n  }\n\n  if (header_size < static_cast<int32_t>(str_len)) {\n    throw EnvoyException(fmt::format(\"unable to read header transport {}: header too small\", desc));\n  }\n\n  const std::string value(static_cast<char*>(buffer.linearize(str_len)), str_len);\n  buffer.drain(str_len);\n  header_size -= str_len;\n  return value;\n}\n\nvoid HeaderTransportImpl::writeVarString(Buffer::Instance& buffer, const absl::string_view str) {\n  const std::string::size_type len = str.length();\n  if (len > static_cast<uint32_t>(std::numeric_limits<int16_t>::max())) {\n    throw EnvoyException(absl::StrCat(\"header string too long: \", len));\n  }\n\n  BufferHelper::writeVarIntI32(buffer, static_cast<int32_t>(len));\n  if (len == 0) {\n    return;\n  }\n  buffer.add(str.data(), len);\n}\n\nclass HeaderTransportConfigFactory : public TransportFactoryBase<HeaderTransportImpl> {\npublic:\n  HeaderTransportConfigFactory() : TransportFactoryBase(TransportNames::get().HEADER) {}\n};\n\n/**\n * Static registration for the header transport. @see RegisterFactory.\n */\nREGISTER_FACTORY(HeaderTransportConfigFactory, NamedTransportConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/header_transport_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * HeaderTransportImpl implements the Thrift Header transport.\n * See https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md and\n * https://github.com/apache/thrift/blob/master/lib/cpp/src/thrift/transport/THeaderTransport.h\n * (for constants not specified in the spec).\n */\nclass HeaderTransportImpl : public Transport {\npublic:\n  // Transport\n  const std::string& name() const override { return TransportNames::get().HEADER; }\n  TransportType type() const override { return TransportType::Header; }\n  bool decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  bool decodeFrameEnd(Buffer::Instance& buffer) override;\n  void encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                   Buffer::Instance& message) override;\n\n  static bool isMagic(uint16_t word) { return word == Magic; }\n\n  static constexpr int32_t MaxFrameSize = 0x3FFFFFFF;\n\nprivate:\n  static constexpr uint16_t Magic = 0x0FFF;\n\n  static int16_t drainVarIntI16(Buffer::Instance& buffer, int32_t& header_size, const char* desc);\n  static int32_t drainVarIntI32(Buffer::Instance& buffer, int32_t& header_size, const char* desc);\n  static std::string drainVarString(Buffer::Instance& buffer, int32_t& header_size,\n                                    const char* desc);\n  static void writeVarString(Buffer::Instance& buffer, const absl::string_view str);\n\n  absl::optional<AppExceptionType> exception_;\n  std::string exception_reason_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/metadata.h",
    "content": "#pragma once\n\n#include <algorithm>\n#include <cstring>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n#include \"extensions/filters/network/thrift_proxy/tracing.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * MessageMetadata encapsulates metadata about Thrift messages. The various fields are considered\n * optional since they may come from either the transport or protocol in some cases. Unless\n * otherwise noted, accessor methods throw absl::bad_optional_access if the corresponding value has\n * not been set.\n */\nclass MessageMetadata {\npublic:\n  MessageMetadata() = default;\n\n  bool hasFrameSize() const { return frame_size_.has_value(); }\n  uint32_t frameSize() const { return frame_size_.value(); }\n  void setFrameSize(uint32_t size) { frame_size_ = size; }\n\n  bool hasProtocol() const { return proto_.has_value(); }\n  ProtocolType protocol() const { return proto_.value(); }\n  void setProtocol(ProtocolType proto) { proto_ = proto; }\n\n  bool hasMethodName() const { return method_name_.has_value(); }\n  const std::string& methodName() const { return method_name_.value(); }\n  void setMethodName(const std::string& method_name) { method_name_ = method_name; }\n\n  bool hasSequenceId() const { return seq_id_.has_value(); }\n  int32_t sequenceId() const { return seq_id_.value(); }\n  void setSequenceId(int32_t seq_id) { seq_id_ = seq_id; }\n\n  bool hasMessageType() const { return msg_type_.has_value(); }\n  MessageType messageType() const { return msg_type_.value(); }\n  void setMessageType(MessageType msg_type) { msg_type_ = msg_type; }\n\n  /**\n   * @return HeaderMap of current headers (never throws)\n   */\n  const Http::HeaderMap& headers() const { return *headers_; }\n  Http::HeaderMap& headers() { return *headers_; }\n\n  /**\n   * @return SpanList an immutable list of Spans\n   */\n  const SpanList& spans() const { return spans_; }\n\n  /**\n   * @return SpanList& a reference to a mutable list of Spans\n   */\n  SpanList& mutableSpans() { return spans_; }\n\n  bool hasAppException() const { return app_ex_type_.has_value(); }\n  void setAppException(AppExceptionType app_ex_type, const std::string& message) {\n    app_ex_type_ = app_ex_type;\n    app_ex_msg_ = message;\n  }\n  AppExceptionType appExceptionType() const { return app_ex_type_.value(); }\n  const std::string& appExceptionMessage() const { return app_ex_msg_.value(); }\n\n  bool isProtocolUpgradeMessage() const { return protocol_upgrade_message_; }\n  void setProtocolUpgradeMessage(bool upgrade_message) {\n    protocol_upgrade_message_ = upgrade_message;\n  }\n\n  absl::optional<int64_t> traceId() const { return trace_id_; }\n  void setTraceId(int64_t trace_id) { trace_id_ = trace_id; }\n\n  absl::optional<int64_t> traceIdHigh() const { return trace_id_high_; }\n  void setTraceIdHigh(int64_t trace_id_high) { trace_id_high_ = trace_id_high; }\n\n  absl::optional<int64_t> spanId() const { return span_id_; }\n  void setSpanId(int64_t span_id) { span_id_ = span_id; }\n\n  absl::optional<int64_t> parentSpanId() const { return parent_span_id_; }\n  void setParentSpanId(int64_t parent_span_id) { parent_span_id_ = parent_span_id; }\n\n  absl::optional<int64_t> flags() const { return flags_; }\n  void setFlags(int64_t flags) { flags_ = flags; }\n\n  absl::optional<bool> sampled() const { return sampled_; }\n  void setSampled(bool sampled) { sampled_ = sampled; }\n\nprivate:\n  absl::optional<uint32_t> frame_size_{};\n  absl::optional<ProtocolType> proto_{};\n  absl::optional<std::string> method_name_{};\n  absl::optional<int32_t> seq_id_{};\n  absl::optional<MessageType> msg_type_{};\n  Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()};\n  absl::optional<AppExceptionType> app_ex_type_;\n  absl::optional<std::string> app_ex_msg_;\n  bool protocol_upgrade_message_{false};\n  SpanList spans_;\n  absl::optional<int64_t> trace_id_;\n  absl::optional<int64_t> trace_id_high_;\n  absl::optional<int64_t> span_id_;\n  absl::optional<int64_t> parent_span_id_;\n  absl::optional<int64_t> flags_;\n  absl::optional<bool> sampled_;\n};\n\nusing MessageMetadataSharedPtr = std::shared_ptr<MessageMetadata>;\n\n/**\n * Constant Thrift headers. All lower case.\n */\nclass HeaderValues {\npublic:\n  const Http::LowerCaseString ClientId{\":client-id\"};\n  const Http::LowerCaseString Dest{\":dest\"};\n  const Http::LowerCaseString MethodName{\":method-name\"};\n};\nusing Headers = ConstSingleton<HeaderValues>;\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/protocol.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/config/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/network/thrift_proxy/conn_state.h\"\n#include \"extensions/filters/network/thrift_proxy/decoder_events.h\"\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift_object.h\"\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass DirectResponse;\nusing DirectResponsePtr = std::unique_ptr<DirectResponse>;\n\n/**\n * Protocol represents the operations necessary to implement the a generic Thrift protocol.\n * See https://github.com/apache/thrift/blob/master/doc/specs/thrift-protocol-spec.md\n */\nclass Protocol {\npublic:\n  virtual ~Protocol() = default;\n\n  /**\n   * @return const std::string& the human-readable name of the protocol\n   */\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return ProtocolType the protocol type\n   */\n  virtual ProtocolType type() const PURE;\n\n  /**\n   * For protocol-detecting implementations, set the underlying type based on external\n   * (e.g. transport-level) information).\n   * @param type ProtocolType to explicitly set\n   */\n  virtual void setType(ProtocolType) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  /**\n   * Reads the start of a Thrift protocol message from the buffer and updates the metadata\n   * parameter with values from the message header. If successful, the message header is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param metadata MessageMetadata to be updated with name, message type, and sequence id.\n   * @return true if a message header was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid message header\n   */\n  virtual bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) PURE;\n\n  /**\n   * Reads the end of a Thrift protocol message from the buffer. If successful, the message footer\n   * is removed from the buffer.\n   * @param buffer the buffer to read from\n   * @return true if a message footer was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid message footer\n   */\n  virtual bool readMessageEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Reads the start of a Thrift struct from the buffer and updates the name parameter with the\n   * value from the struct header. If successful, the struct header is removed from the buffer.\n   * @param buffer the buffer to read from\n   * @param name updated with the struct name on success only\n   * @return true if a struct header was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid struct header\n   */\n  virtual bool readStructBegin(Buffer::Instance& buffer, std::string& name) PURE;\n\n  /**\n   * Reads the end of a Thrift struct from the buffer. If successful, the struct footer is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @return true if a struct footer was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid struct footer\n   */\n  virtual bool readStructEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Reads the start of a Thrift struct field from the buffer and updates the name, field_type, and\n   * field_id parameters with the values from the field header. If successful, the field header is\n   * removed from the buffer.\n   * @param buffer the buffer to read from\n   * @param name updated with the field name on success only\n   * @param field_type updated with the FieldType on success only\n   * @param field_id updated with the field ID on success only\n   * @return true if a field header was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid field header\n   */\n  virtual bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type,\n                              int16_t& field_id) PURE;\n\n  /**\n   * Reads the end of a Thrift struct field from the buffer. If successful, the field footer is\n   * removed from the buffer.\n   * @param buffer the buffer to read from\n   * @return true if a field footer was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid field footer\n   */\n  virtual bool readFieldEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Reads the start of a Thrift map from the buffer and updates the key_type, value_type, and size\n   * parameters with the values from the map header. If successful, the map header is removed from\n   * the buffer.\n   * @param buffer the buffer to read from\n   * @param key_type updated with map key FieldType on success only\n   * @param value_type updated with map value FieldType on success only\n   * @param size updated with the number of key-value pairs in the map on success only\n   * @return true if a map header was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid map header\n   */\n  virtual bool readMapBegin(Buffer::Instance& buffer, FieldType& key_type, FieldType& value_type,\n                            uint32_t& size) PURE;\n\n  /**\n   * Reads the end of a Thrift map from the buffer. If successful, the map footer is removed from\n   * the buffer.\n   * @param buffer the buffer to read from\n   * @return true if a map footer was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid map footer\n   */\n  virtual bool readMapEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Reads the start of a Thrift list from the buffer and updates the elem_type, and size\n   * parameters with the values from the list header. If successful, the list header is removed from\n   * the buffer.\n   * @param buffer the buffer to read from\n   * @param elem_type updated with list element FieldType on success only\n   * @param size updated with the number of list members on success only\n   * @return true if a list header was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid list header\n   */\n  virtual bool readListBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) PURE;\n\n  /**\n   * Reads the end of a Thrift list from the buffer. If successful, the list footer is removed from\n   * the buffer.\n   * @param buffer the buffer to read from\n   * @return true if a list footer was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid list footer\n   */\n  virtual bool readListEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Reads the start of a Thrift set from the buffer and updates the elem_type, and size\n   * parameters with the values from the set header. If successful, the set header is removed from\n   * the buffer.\n   * @param buffer the buffer to read from\n   * @param elem_type updated with set element FieldType on success only\n   * @param size updated with the number of set members on success only\n   * @return true if a set header was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set header\n   */\n  virtual bool readSetBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) PURE;\n\n  /**\n   * Reads the end of a Thrift set from the buffer. If successful, the set footer is removed from\n   * the buffer.\n   * @param buffer the buffer to read from\n   * @return true if a set footer was successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readSetEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Reads a boolean value from the buffer and updates value. If successful, the value is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readBool(Buffer::Instance& buffer, bool& value) PURE;\n\n  /**\n   * Reads a byte value from the buffer and updates value. If successful, the value is removed from\n   * the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readByte(Buffer::Instance& buffer, uint8_t& value) PURE;\n\n  /**\n   * Reads a int16_t value from the buffer and updates value. If successful, the value is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readInt16(Buffer::Instance& buffer, int16_t& value) PURE;\n\n  /**\n   * Reads a int32_t value from the buffer and updates value. If successful, the value is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readInt32(Buffer::Instance& buffer, int32_t& value) PURE;\n\n  /**\n   * Reads a int64_t value from the buffer and updates value. If successful, the value is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readInt64(Buffer::Instance& buffer, int64_t& value) PURE;\n\n  /**\n   * Reads a double value from the buffer and updates value. If successful, the value is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readDouble(Buffer::Instance& buffer, double& value) PURE;\n\n  /**\n   * Reads a string value from the buffer and updates value. If successful, the value is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readString(Buffer::Instance& buffer, std::string& value) PURE;\n\n  /**\n   * Reads a binary value from the buffer and updates value. If successful, the value is removed\n   * from the buffer.\n   * @param buffer the buffer to read from\n   * @param value updated with the value read from the buffer\n   * @return true if a value successfully read, false if more data is required\n   * @throw EnvoyException if the data is not a valid set footer\n   */\n  virtual bool readBinary(Buffer::Instance& buffer, std::string& value) PURE;\n\n  /**\n   * Writes the start of a Thrift protocol message to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param metadata MessageMetadata for the message to write.\n   */\n  virtual void writeMessageBegin(Buffer::Instance& buffer, const MessageMetadata& metadata) PURE;\n\n  /**\n   * Writes the end of a Thrift protocol message to the buffer.\n   * @param buffer Buffer::Instance to modify\n   */\n  virtual void writeMessageEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Writes the start of a Thrift struct to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param name the struct name, if known\n   */\n  virtual void writeStructBegin(Buffer::Instance& buffer, const std::string& name) PURE;\n\n  /**\n   * Writes the end of a Thrift struct to the buffer.\n   * @param buffer Buffer::Instance to modify\n   */\n  virtual void writeStructEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Writes the start of a Thrift struct field to the buffer\n   * @param buffer Buffer::Instance to modify\n   * @param name the field name, if known\n   * @param field_type the field's FieldType\n   * @param field_id the field ID\n   */\n  virtual void writeFieldBegin(Buffer::Instance& buffer, const std::string& name,\n                               FieldType field_type, int16_t field_id) PURE;\n\n  /**\n   * Writes the end of a Thrift struct field to the buffer.\n   * @param buffer Buffer::Instance to modify\n   */\n  virtual void writeFieldEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Writes the start of a Thrift map to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param key_type the map key FieldType\n   * @param value_type the map value FieldType\n   * @param size the number of key-value pairs in the map\n   */\n  virtual void writeMapBegin(Buffer::Instance& buffer, FieldType key_type, FieldType value_type,\n                             uint32_t size) PURE;\n\n  /**\n   * Writes the end of a Thrift map to the buffer.\n   * @param buffer Buffer::Instance to modify\n   */\n  virtual void writeMapEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Writes the start of a Thrift list to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param elem_type the list element FieldType\n   * @param size the number of list members\n   */\n  virtual void writeListBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) PURE;\n\n  /**\n   * Writes the end of a Thrift list to the buffer.\n   * @param buffer Buffer::Instance to modify\n   */\n  virtual void writeListEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Writes the start of a Thrift set to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param elem_type the set element FieldType\n   * @param size the number of set members\n   */\n  virtual void writeSetBegin(Buffer::Instance& buffer, FieldType elem_type, uint32_t size) PURE;\n\n  /**\n   * Writes the end of a Thrift set to the buffer.\n   * @param buffer Buffer::Instance to modify\n   */\n  virtual void writeSetEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Writes a boolean value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value bool to write\n   */\n  virtual void writeBool(Buffer::Instance& buffer, bool value) PURE;\n\n  /**\n   * Writes a byte value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value uint8_t to write\n   */\n  virtual void writeByte(Buffer::Instance& buffer, uint8_t value) PURE;\n\n  /**\n   * Writes a int16_t value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value int16_t to write\n   */\n  virtual void writeInt16(Buffer::Instance& buffer, int16_t value) PURE;\n\n  /**\n   * Writes a int32_t value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value int32_t to write\n   */\n  virtual void writeInt32(Buffer::Instance& buffer, int32_t value) PURE;\n\n  /**\n   * Writes a int64_t value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value int64_t to write\n   */\n  virtual void writeInt64(Buffer::Instance& buffer, int64_t value) PURE;\n\n  /**\n   * Writes a double value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value double to write\n   */\n  virtual void writeDouble(Buffer::Instance& buffer, double value) PURE;\n\n  /**\n   * Writes a string value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value std::string to write\n   */\n  virtual void writeString(Buffer::Instance& buffer, const std::string& value) PURE;\n\n  /**\n   * Writes a binary value to the buffer.\n   * @param buffer Buffer::Instance to modify\n   * @param value std::string to write\n   */\n  virtual void writeBinary(Buffer::Instance& buffer, const std::string& value) PURE;\n\n  /**\n   * Indicates whether a protocol uses start-of-connection messages to negotiate protocol options.\n   * If this method returns true, the Protocol must invoke setProtocolUpgradeMessage during\n   * readMessageBegin if it detects an upgrade request.\n   *\n   * @return true for protocols that exchange messages at the start of a connection to negotiate\n   *         protocol upgrade (or options)\n   */\n  virtual bool supportsUpgrade() { return false; }\n\n  /**\n   * Creates an opaque DecoderEventHandlerSharedPtr that can decode a downstream client's upgrade\n   * request. When the request is complete, the decoder is passed back to writeUpgradeResponse\n   * to allow the Protocol to update its internal state and generate a response to the request.\n   *\n   * @return a DecoderEventHandlerSharedPtr that decodes a downstream client's upgrade request\n   */\n  virtual DecoderEventHandlerSharedPtr upgradeRequestDecoder() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  /**\n   * Writes a response to a downstream client's upgrade request.\n   * @param decoder DecoderEventHandlerSharedPtr created by upgradeRequestDecoder\n   * @return DirectResponsePtr containing an upgrade response\n   */\n  virtual DirectResponsePtr upgradeResponse(const DecoderEventHandler& decoder) {\n    UNREFERENCED_PARAMETER(decoder);\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  /**\n   * Checks whether a given upstream connection can be upgraded and generates an upgrade request\n   * message. If this method returns a ThriftObject it will be used to decode the upstream's next\n   * response.\n   *\n   * @param transport the Transport to use for decoding the response\n   * @param state ThriftConnectionState tracking whether upgrade has already been performed\n   * @param buffer Buffer::Instance to modify with an upgrade request\n   * @return a ThriftObject capable of decoding an upgrade response or nullptr if upgrade was\n   *         already completed (successfully or not)\n   */\n  virtual ThriftObjectPtr attemptUpgrade(Transport& transport, ThriftConnectionState& state,\n                                         Buffer::Instance& buffer) {\n    UNREFERENCED_PARAMETER(transport);\n    UNREFERENCED_PARAMETER(state);\n    UNREFERENCED_PARAMETER(buffer);\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  /**\n   * Completes an upgrade previously started via attemptUpgrade.\n   * @param response ThriftObject created by attemptUpgrade, after the response has completed\n   *        decoding\n   */\n  virtual void completeUpgrade(ThriftConnectionState& state, ThriftObject& response) {\n    UNREFERENCED_PARAMETER(state);\n    UNREFERENCED_PARAMETER(response);\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n};\n\nusing ProtocolPtr = std::unique_ptr<Protocol>;\n\n/**\n * A DirectResponse manipulates a Protocol to directly create a Thrift response message.\n */\nclass DirectResponse {\npublic:\n  virtual ~DirectResponse() = default;\n\n  enum class ResponseType {\n    // DirectResponse encodes MessageType::Reply with success payload\n    SuccessReply,\n\n    // DirectResponse encodes MessageType::Reply with an exception payload\n    ErrorReply,\n\n    // DirectResponse encodes MessageType::Exception\n    Exception,\n  };\n\n  /**\n   * Encodes the response via the given Protocol.\n   * @param metadata the MessageMetadata for the request that generated this response\n   * @param proto the Protocol to be used for message encoding\n   * @param buffer the Buffer into which the message should be encoded\n   * @return ResponseType indicating whether the message is a successful or error reply or an\n   *         exception\n   */\n  virtual ResponseType encode(MessageMetadata& metadata, Protocol& proto,\n                              Buffer::Instance& buffer) const PURE;\n};\n\n/**\n * Implemented by each Thrift protocol and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedProtocolConfigFactory : public Config::UntypedFactory {\npublic:\n  ~NamedProtocolConfigFactory() override = default;\n\n  /**\n   * Create a particular Thrift protocol\n   * @return ProtocolFactoryCb the protocol\n   */\n  virtual ProtocolPtr createProtocol() PURE;\n\n  std::string category() const override { return \"envoy.thrift_proxy.protocols\"; }\n\n  /**\n   * Convenience method to lookup a factory by type.\n   * @param ProtocolType the protocol type\n   * @return NamedProtocolConfigFactory& for the ProtocolType\n   */\n  static NamedProtocolConfigFactory& getFactory(ProtocolType type) {\n    const std::string& name = ProtocolNames::get().fromType(type);\n    return Envoy::Config::Utility::getAndCheckFactoryByName<NamedProtocolConfigFactory>(name);\n  }\n};\n\n/**\n * ProtocolFactoryBase provides a template for a trivial NamedProtocolConfigFactory.\n */\ntemplate <class ProtocolImpl> class ProtocolFactoryBase : public NamedProtocolConfigFactory {\npublic:\n  ProtocolPtr createProtocol() override { return std::move(std::make_unique<ProtocolImpl>()); }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  ProtocolFactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  const std::string name_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/protocol_converter.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"extensions/filters/network/thrift_proxy/decoder_events.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * ProtocolConverter is an abstract class that implements protocol-related methods on\n * DecoderEventHandler in terms of converting the decoded messages into a different protocol.\n */\nclass ProtocolConverter : public virtual DecoderEventHandler {\npublic:\n  ProtocolConverter() = default;\n  ~ProtocolConverter() override = default;\n\n  void initProtocolConverter(Protocol& proto, Buffer::Instance& buffer) {\n    proto_ = &proto;\n    buffer_ = &buffer;\n  }\n\n  // DecoderEventHandler\n  FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override {\n    proto_->writeMessageBegin(*buffer_, *metadata);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus messageEnd() override {\n    proto_->writeMessageEnd(*buffer_);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus structBegin(absl::string_view name) override {\n    proto_->writeStructBegin(*buffer_, std::string(name));\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus structEnd() override {\n    proto_->writeFieldBegin(*buffer_, \"\", FieldType::Stop, 0);\n    proto_->writeStructEnd(*buffer_);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus fieldBegin(absl::string_view name, FieldType& field_type,\n                          int16_t& field_id) override {\n    proto_->writeFieldBegin(*buffer_, std::string(name), field_type, field_id);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus fieldEnd() override {\n    proto_->writeFieldEnd(*buffer_);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus boolValue(bool& value) override {\n    proto_->writeBool(*buffer_, value);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus byteValue(uint8_t& value) override {\n    proto_->writeByte(*buffer_, value);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus int16Value(int16_t& value) override {\n    proto_->writeInt16(*buffer_, value);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus int32Value(int32_t& value) override {\n    proto_->writeInt32(*buffer_, value);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus int64Value(int64_t& value) override {\n    proto_->writeInt64(*buffer_, value);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus doubleValue(double& value) override {\n    proto_->writeDouble(*buffer_, value);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus stringValue(absl::string_view value) override {\n    proto_->writeString(*buffer_, std::string(value));\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) override {\n    proto_->writeMapBegin(*buffer_, key_type, value_type, size);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus mapEnd() override {\n    proto_->writeMapEnd(*buffer_);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus listBegin(FieldType& elem_type, uint32_t& size) override {\n    proto_->writeListBegin(*buffer_, elem_type, size);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus listEnd() override {\n    proto_->writeListEnd(*buffer_);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus setBegin(FieldType& elem_type, uint32_t& size) override {\n    proto_->writeSetBegin(*buffer_, elem_type, size);\n    return FilterStatus::Continue;\n  }\n\n  FilterStatus setEnd() override {\n    proto_->writeSetEnd(*buffer_);\n    return FilterStatus::Continue;\n  }\n\nprivate:\n  Protocol* proto_;\n  Buffer::Instance* buffer_{};\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \":router_lib\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:factory_base_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:filter_config_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:well_known_names\",\n        \"@envoy_api//envoy/config/filter/thrift/router/v2alpha1:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_interface\",\n    hdrs = [\"router.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/router:router_interface\",\n        \"//source/extensions/filters/network/thrift_proxy:metadata_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_ratelimit_interface\",\n    hdrs = [\"router_ratelimit.h\"],\n    deps = [\n        \":router_interface\",\n        \"//include/envoy/router:router_ratelimit_interface\",\n        \"//source/extensions/filters/network/thrift_proxy:metadata_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_lib\",\n    srcs = [\"router_impl.cc\"],\n    hdrs = [\"router_impl.h\"],\n    deps = [\n        \":router_interface\",\n        \":router_ratelimit_lib\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//include/envoy/upstream:thread_local_cluster_interface\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/router:metadatamatchcriteria_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/thrift_proxy:app_exception_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:conn_manager_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:protocol_converter_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:protocol_interface\",\n        \"//source/extensions/filters/network/thrift_proxy:thrift_object_interface\",\n        \"//source/extensions/filters/network/thrift_proxy:transport_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:filter_interface\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"router_ratelimit_lib\",\n    srcs = [\"router_ratelimit_impl.cc\"],\n    hdrs = [\"router_ratelimit_impl.h\"],\n    deps = [\n        \":router_interface\",\n        \":router_ratelimit_interface\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:metadata_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/config.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/router/config.h\"\n\n#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.h\"\n#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/network/thrift_proxy/router/router_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\nThriftFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::config::filter::thrift::router::v2alpha1::Router& proto_config,\n    const std::string& stat_prefix, Server::Configuration::FactoryContext& context) {\n  UNREFERENCED_PARAMETER(proto_config);\n\n  return [&context, stat_prefix](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addDecoderFilter(\n        std::make_shared<Router>(context.clusterManager(), stat_prefix, context.scope()));\n  };\n}\n\n/**\n * Static registration for the router filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(RouterFilterConfig, ThriftFilters::NamedThriftFilterConfigFactory);\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.h\"\n#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.validate.h\"\n\n#include \"extensions/filters/network/thrift_proxy/filters/factory_base.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\nclass RouterFilterConfig\n    : public ThriftFilters::FactoryBase<envoy::config::filter::thrift::router::v2alpha1::Router> {\npublic:\n  RouterFilterConfig() : FactoryBase(ThriftFilters::ThriftFilterNames::get().ROUTER) {}\n\nprivate:\n  ThriftFilters::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::config::filter::thrift::router::v2alpha1::Router& proto_config,\n      const std::string& stat_prefix, Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/router.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/router/router.h\"\n\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\nclass RateLimitPolicy;\n\n/**\n * RouteEntry is an individual resolved route entry.\n */\nclass RouteEntry {\npublic:\n  virtual ~RouteEntry() = default;\n\n  /**\n   * @return const std::string& the upstream cluster that owns the route.\n   */\n  virtual const std::string& clusterName() const PURE;\n\n  /**\n   * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when\n   * selecting an upstream host\n   */\n  virtual const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE;\n\n  /**\n   * @return const RateLimitPolicy& the rate limit policy for the route.\n   */\n  virtual const RateLimitPolicy& rateLimitPolicy() const PURE;\n\n  /**\n   * @return bool should the service name prefix be stripped from the method.\n   */\n  virtual bool stripServiceName() const PURE;\n\n  /**\n   * @return const Http::LowerCaseString& the header used to determine the cluster.\n   */\n  virtual const Http::LowerCaseString& clusterHeader() const PURE;\n};\n\n/**\n * Route holds the RouteEntry for a request.\n */\nclass Route {\npublic:\n  virtual ~Route() = default;\n\n  /**\n   * @return the route entry or nullptr if there is no matching route for the request.\n   */\n  virtual const RouteEntry* routeEntry() const PURE;\n};\n\nusing RouteConstSharedPtr = std::shared_ptr<const Route>;\n\n/**\n * The router configuration.\n */\nclass Config {\npublic:\n  virtual ~Config() = default;\n\n  /**\n   * Based on the incoming Thrift request transport and/or protocol data, determine the target\n   * route for the request.\n   * @param metadata MessageMetadata for the message to route\n   * @param random_value uint64_t used to select cluster affinity\n   * @return the route or nullptr if there is no matching route for the request.\n   */\n  virtual RouteConstSharedPtr route(const MessageMetadata& metadata,\n                                    uint64_t random_value) const PURE;\n};\n\nusing ConfigConstSharedPtr = std::shared_ptr<const Config>;\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/router_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/router/router_impl.h\"\n\n#include <memory>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/route.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/thread_local_cluster.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/router/metadatamatchcriteria_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\nRouteEntryImplBase::RouteEntryImplBase(\n    const envoy::extensions::filters::network::thrift_proxy::v3::Route& route)\n    : cluster_name_(route.route().cluster()),\n      config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())),\n      rate_limit_policy_(route.route().rate_limits()),\n      strip_service_name_(route.route().strip_service_name()),\n      cluster_header_(route.route().cluster_header()) {\n  if (route.route().has_metadata_match()) {\n    const auto filter_it = route.route().metadata_match().filter_metadata().find(\n        Envoy::Config::MetadataFilters::get().ENVOY_LB);\n    if (filter_it != route.route().metadata_match().filter_metadata().end()) {\n      metadata_match_criteria_ =\n          std::make_unique<Envoy::Router::MetadataMatchCriteriaImpl>(filter_it->second);\n    }\n  }\n\n  if (route.route().cluster_specifier_case() ==\n      envoy::extensions::filters::network::thrift_proxy::v3::RouteAction::ClusterSpecifierCase::\n          kWeightedClusters) {\n\n    total_cluster_weight_ = 0UL;\n    for (const auto& cluster : route.route().weighted_clusters().clusters()) {\n      std::unique_ptr<WeightedClusterEntry> cluster_entry(new WeightedClusterEntry(*this, cluster));\n      weighted_clusters_.emplace_back(std::move(cluster_entry));\n      total_cluster_weight_ += weighted_clusters_.back()->clusterWeight();\n    }\n  }\n}\n\nconst std::string& RouteEntryImplBase::clusterName() const { return cluster_name_; }\n\nconst RouteEntry* RouteEntryImplBase::routeEntry() const { return this; }\n\nRouteConstSharedPtr RouteEntryImplBase::clusterEntry(uint64_t random_value,\n                                                     const MessageMetadata& metadata) const {\n  if (!weighted_clusters_.empty()) {\n    return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_, random_value,\n                                            false);\n  }\n\n  const auto& cluster_header = clusterHeader();\n  if (!cluster_header.get().empty()) {\n    const auto& headers = metadata.headers();\n    const auto* entry = headers.get(cluster_header);\n    if (entry != nullptr) {\n      return std::make_shared<DynamicRouteEntry>(*this, entry->value().getStringView());\n    }\n\n    return nullptr;\n  }\n\n  return shared_from_this();\n}\n\nbool RouteEntryImplBase::headersMatch(const Http::HeaderMap& headers) const {\n  return Http::HeaderUtility::matchHeaders(headers, config_headers_);\n}\n\nRouteEntryImplBase::WeightedClusterEntry::WeightedClusterEntry(\n    const RouteEntryImplBase& parent,\n    const envoy::extensions::filters::network::thrift_proxy::v3::WeightedCluster::ClusterWeight&\n        cluster)\n    : parent_(parent), cluster_name_(cluster.name()),\n      cluster_weight_(PROTOBUF_GET_WRAPPED_REQUIRED(cluster, weight)) {\n  if (cluster.has_metadata_match()) {\n    const auto filter_it = cluster.metadata_match().filter_metadata().find(\n        Envoy::Config::MetadataFilters::get().ENVOY_LB);\n    if (filter_it != cluster.metadata_match().filter_metadata().end()) {\n      if (parent.metadata_match_criteria_) {\n        metadata_match_criteria_ =\n            parent.metadata_match_criteria_->mergeMatchCriteria(filter_it->second);\n      } else {\n        metadata_match_criteria_ =\n            std::make_unique<Envoy::Router::MetadataMatchCriteriaImpl>(filter_it->second);\n      }\n    }\n  }\n}\n\nMethodNameRouteEntryImpl::MethodNameRouteEntryImpl(\n    const envoy::extensions::filters::network::thrift_proxy::v3::Route& route)\n    : RouteEntryImplBase(route), method_name_(route.match().method_name()),\n      invert_(route.match().invert()) {\n  if (method_name_.empty() && invert_) {\n    throw EnvoyException(\"Cannot have an empty method name with inversion enabled\");\n  }\n}\n\nRouteConstSharedPtr MethodNameRouteEntryImpl::matches(const MessageMetadata& metadata,\n                                                      uint64_t random_value) const {\n  if (RouteEntryImplBase::headersMatch(metadata.headers())) {\n    bool matches =\n        method_name_.empty() || (metadata.hasMethodName() && metadata.methodName() == method_name_);\n\n    if (matches ^ invert_) {\n      return clusterEntry(random_value, metadata);\n    }\n  }\n\n  return nullptr;\n}\n\nServiceNameRouteEntryImpl::ServiceNameRouteEntryImpl(\n    const envoy::extensions::filters::network::thrift_proxy::v3::Route& route)\n    : RouteEntryImplBase(route), invert_(route.match().invert()) {\n  const std::string service_name = route.match().service_name();\n  if (service_name.empty() && invert_) {\n    throw EnvoyException(\"Cannot have an empty service name with inversion enabled\");\n  }\n\n  if (!service_name.empty() && !absl::EndsWith(service_name, \":\")) {\n    service_name_ = service_name + \":\";\n  } else {\n    service_name_ = service_name;\n  }\n}\n\nRouteConstSharedPtr ServiceNameRouteEntryImpl::matches(const MessageMetadata& metadata,\n                                                       uint64_t random_value) const {\n  if (RouteEntryImplBase::headersMatch(metadata.headers())) {\n    bool matches =\n        service_name_.empty() ||\n        (metadata.hasMethodName() && absl::StartsWith(metadata.methodName(), service_name_));\n\n    if (matches ^ invert_) {\n      return clusterEntry(random_value, metadata);\n    }\n  }\n\n  return nullptr;\n}\n\nRouteMatcher::RouteMatcher(\n    const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration& config) {\n  using envoy::extensions::filters::network::thrift_proxy::v3::RouteMatch;\n\n  for (const auto& route : config.routes()) {\n    switch (route.match().match_specifier_case()) {\n    case RouteMatch::MatchSpecifierCase::kMethodName:\n      routes_.emplace_back(new MethodNameRouteEntryImpl(route));\n      break;\n    case RouteMatch::MatchSpecifierCase::kServiceName:\n      routes_.emplace_back(new ServiceNameRouteEntryImpl(route));\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n}\n\nRouteConstSharedPtr RouteMatcher::route(const MessageMetadata& metadata,\n                                        uint64_t random_value) const {\n  for (const auto& route : routes_) {\n    RouteConstSharedPtr route_entry = route->matches(metadata, random_value);\n    if (nullptr != route_entry) {\n      return route_entry;\n    }\n  }\n\n  return nullptr;\n}\n\nvoid Router::onDestroy() {\n  if (upstream_request_ != nullptr) {\n    upstream_request_->resetStream();\n    cleanup();\n  }\n}\n\nvoid Router::setDecoderFilterCallbacks(ThriftFilters::DecoderFilterCallbacks& callbacks) {\n  callbacks_ = &callbacks;\n\n  // TODO(zuercher): handle buffer limits\n}\n\nFilterStatus Router::transportBegin(MessageMetadataSharedPtr metadata) {\n  UNREFERENCED_PARAMETER(metadata);\n  return FilterStatus::Continue;\n}\n\nFilterStatus Router::transportEnd() {\n  if (upstream_request_->metadata_->messageType() == MessageType::Oneway) {\n    // No response expected\n    upstream_request_->onResponseComplete();\n    cleanup();\n  }\n  return FilterStatus::Continue;\n}\n\nFilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) {\n  route_ = callbacks_->route();\n  if (!route_) {\n    ENVOY_STREAM_LOG(debug, \"no route match for method '{}'\", *callbacks_, metadata->methodName());\n    stats_.route_missing_.inc();\n    callbacks_->sendLocalReply(\n        AppException(AppExceptionType::UnknownMethod,\n                     fmt::format(\"no route for method '{}'\", metadata->methodName())),\n        true);\n    return FilterStatus::StopIteration;\n  }\n\n  route_entry_ = route_->routeEntry();\n  const std::string& cluster_name = route_entry_->clusterName();\n\n  Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name);\n  if (!cluster) {\n    ENVOY_STREAM_LOG(debug, \"unknown cluster '{}'\", *callbacks_, cluster_name);\n    stats_.unknown_cluster_.inc();\n    callbacks_->sendLocalReply(AppException(AppExceptionType::InternalError,\n                                            fmt::format(\"unknown cluster '{}'\", cluster_name)),\n                               true);\n    return FilterStatus::StopIteration;\n  }\n\n  cluster_ = cluster->info();\n  ENVOY_STREAM_LOG(debug, \"cluster '{}' match for method '{}'\", *callbacks_, cluster_name,\n                   metadata->methodName());\n\n  if (cluster_->maintenanceMode()) {\n    stats_.upstream_rq_maintenance_mode_.inc();\n    callbacks_->sendLocalReply(\n        AppException(AppExceptionType::InternalError,\n                     fmt::format(\"maintenance mode for cluster '{}'\", cluster_name)),\n        true);\n    return FilterStatus::StopIteration;\n  }\n\n  const std::shared_ptr<const ProtocolOptionsConfig> options =\n      cluster_->extensionProtocolOptionsTyped<ProtocolOptionsConfig>(\n          NetworkFilterNames::get().ThriftProxy);\n\n  const TransportType transport = options\n                                      ? options->transport(callbacks_->downstreamTransportType())\n                                      : callbacks_->downstreamTransportType();\n  ASSERT(transport != TransportType::Auto);\n\n  const ProtocolType protocol = options ? options->protocol(callbacks_->downstreamProtocolType())\n                                        : callbacks_->downstreamProtocolType();\n  ASSERT(protocol != ProtocolType::Auto);\n\n  Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster(\n      cluster_name, Upstream::ResourcePriority::Default, this);\n  if (!conn_pool) {\n    stats_.no_healthy_upstream_.inc();\n    callbacks_->sendLocalReply(\n        AppException(AppExceptionType::InternalError,\n                     fmt::format(\"no healthy upstream for '{}'\", cluster_name)),\n        true);\n    return FilterStatus::StopIteration;\n  }\n\n  ENVOY_STREAM_LOG(debug, \"router decoding request\", *callbacks_);\n\n  if (route_entry_->stripServiceName()) {\n    const auto& method = metadata->methodName();\n    const auto pos = method.find(':');\n    if (pos != std::string::npos) {\n      metadata->setMethodName(method.substr(pos + 1));\n    }\n  }\n\n  upstream_request_ =\n      std::make_unique<UpstreamRequest>(*this, *conn_pool, metadata, transport, protocol);\n  return upstream_request_->start();\n}\n\nFilterStatus Router::messageEnd() {\n  ProtocolConverter::messageEnd();\n\n  Buffer::OwnedImpl transport_buffer;\n\n  upstream_request_->metadata_->setProtocol(upstream_request_->protocol_->type());\n\n  upstream_request_->transport_->encodeFrame(transport_buffer, *upstream_request_->metadata_,\n                                             upstream_request_buffer_);\n  upstream_request_->conn_data_->connection().write(transport_buffer, false);\n  upstream_request_->onRequestComplete();\n  return FilterStatus::Continue;\n}\n\nvoid Router::onUpstreamData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!upstream_request_->response_complete_);\n\n  if (upstream_request_->upgrade_response_ != nullptr) {\n    ENVOY_STREAM_LOG(trace, \"reading upgrade response: {} bytes\", *callbacks_, data.length());\n    // Handle upgrade response.\n    if (!upstream_request_->upgrade_response_->onData(data)) {\n      // Wait for more data.\n      return;\n    }\n\n    ENVOY_STREAM_LOG(debug, \"upgrade response complete\", *callbacks_);\n    upstream_request_->protocol_->completeUpgrade(*upstream_request_->conn_state_,\n                                                  *upstream_request_->upgrade_response_);\n\n    upstream_request_->upgrade_response_.reset();\n    upstream_request_->onRequestStart(true);\n  } else {\n    ENVOY_STREAM_LOG(trace, \"reading response: {} bytes\", *callbacks_, data.length());\n\n    // Handle normal response.\n    if (!upstream_request_->response_started_) {\n      callbacks_->startUpstreamResponse(*upstream_request_->transport_,\n                                        *upstream_request_->protocol_);\n      upstream_request_->response_started_ = true;\n    }\n\n    ThriftFilters::ResponseStatus status = callbacks_->upstreamData(data);\n    if (status == ThriftFilters::ResponseStatus::Complete) {\n      ENVOY_STREAM_LOG(debug, \"response complete\", *callbacks_);\n      upstream_request_->onResponseComplete();\n      cleanup();\n      return;\n    } else if (status == ThriftFilters::ResponseStatus::Reset) {\n      ENVOY_STREAM_LOG(debug, \"upstream reset\", *callbacks_);\n      upstream_request_->resetStream();\n      return;\n    }\n  }\n\n  if (end_stream) {\n    // Response is incomplete, but no more data is coming.\n    ENVOY_STREAM_LOG(debug, \"response underflow\", *callbacks_);\n    upstream_request_->onResponseComplete();\n    upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n    cleanup();\n  }\n}\n\nvoid Router::onEvent(Network::ConnectionEvent event) {\n  ASSERT(upstream_request_ && !upstream_request_->response_complete_);\n\n  switch (event) {\n  case Network::ConnectionEvent::RemoteClose:\n    ENVOY_STREAM_LOG(debug, \"upstream remote close\", *callbacks_);\n    upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n    break;\n  case Network::ConnectionEvent::LocalClose:\n    ENVOY_STREAM_LOG(debug, \"upstream local close\", *callbacks_);\n    upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::LocalConnectionFailure);\n    break;\n  default:\n    // Connected is consumed by the connection pool.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  upstream_request_->releaseConnection(false);\n}\n\nconst Network::Connection* Router::downstreamConnection() const {\n  if (callbacks_ != nullptr) {\n    return callbacks_->connection();\n  }\n\n  return nullptr;\n}\n\nvoid Router::convertMessageBegin(MessageMetadataSharedPtr metadata) {\n  ProtocolConverter::messageBegin(metadata);\n}\n\nvoid Router::cleanup() { upstream_request_.reset(); }\n\nRouter::UpstreamRequest::UpstreamRequest(Router& parent, Tcp::ConnectionPool::Instance& pool,\n                                         MessageMetadataSharedPtr& metadata,\n                                         TransportType transport_type, ProtocolType protocol_type)\n    : parent_(parent), conn_pool_(pool), metadata_(metadata),\n      transport_(NamedTransportConfigFactory::getFactory(transport_type).createTransport()),\n      protocol_(NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol()),\n      request_complete_(false), response_started_(false), response_complete_(false) {}\n\nRouter::UpstreamRequest::~UpstreamRequest() {\n  if (conn_pool_handle_) {\n    conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default);\n  }\n}\n\nFilterStatus Router::UpstreamRequest::start() {\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(*this);\n  if (handle) {\n    // Pause while we wait for a connection.\n    conn_pool_handle_ = handle;\n    return FilterStatus::StopIteration;\n  }\n\n  if (upgrade_response_ != nullptr) {\n    // Pause while we wait for an upgrade response.\n    return FilterStatus::StopIteration;\n  }\n\n  if (upstream_host_ == nullptr) {\n    return FilterStatus::StopIteration;\n  }\n\n  return FilterStatus::Continue;\n}\n\nvoid Router::UpstreamRequest::releaseConnection(const bool close) {\n  if (conn_pool_handle_) {\n    conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default);\n    conn_pool_handle_ = nullptr;\n  }\n\n  conn_state_ = nullptr;\n\n  // The event triggered by close will also release this connection so clear conn_data_ before\n  // closing.\n  auto conn_data = std::move(conn_data_);\n  if (close && conn_data != nullptr) {\n    conn_data->connection().close(Network::ConnectionCloseType::NoFlush);\n  }\n}\n\nvoid Router::UpstreamRequest::resetStream() { releaseConnection(true); }\n\nvoid Router::UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                                            Upstream::HostDescriptionConstSharedPtr host) {\n  conn_pool_handle_ = nullptr;\n\n  // Mimic an upstream reset.\n  onUpstreamHostSelected(host);\n  onResetStream(reason);\n}\n\nvoid Router::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,\n                                          Upstream::HostDescriptionConstSharedPtr host) {\n  // Only invoke continueDecoding if we'd previously stopped the filter chain.\n  bool continue_decoding = conn_pool_handle_ != nullptr;\n\n  onUpstreamHostSelected(host);\n  conn_data_ = std::move(conn_data);\n  conn_data_->addUpstreamCallbacks(parent_);\n  conn_pool_handle_ = nullptr;\n\n  conn_state_ = conn_data_->connectionStateTyped<ThriftConnectionState>();\n  if (conn_state_ == nullptr) {\n    conn_data_->setConnectionState(std::make_unique<ThriftConnectionState>());\n    conn_state_ = conn_data_->connectionStateTyped<ThriftConnectionState>();\n  }\n\n  if (protocol_->supportsUpgrade()) {\n    upgrade_response_ =\n        protocol_->attemptUpgrade(*transport_, *conn_state_, parent_.upstream_request_buffer_);\n    if (upgrade_response_ != nullptr) {\n      conn_data_->connection().write(parent_.upstream_request_buffer_, false);\n      return;\n    }\n  }\n\n  onRequestStart(continue_decoding);\n}\n\nvoid Router::UpstreamRequest::onRequestStart(bool continue_decoding) {\n  parent_.initProtocolConverter(*protocol_, parent_.upstream_request_buffer_);\n\n  metadata_->setSequenceId(conn_state_->nextSequenceId());\n  parent_.convertMessageBegin(metadata_);\n\n  if (continue_decoding) {\n    parent_.callbacks_->continueDecoding();\n  }\n}\n\nvoid Router::UpstreamRequest::onRequestComplete() { request_complete_ = true; }\n\nvoid Router::UpstreamRequest::onResponseComplete() {\n  response_complete_ = true;\n  conn_state_ = nullptr;\n  conn_data_.reset();\n}\n\nvoid Router::UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) {\n  upstream_host_ = host;\n}\n\nvoid Router::UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) {\n  if (metadata_->messageType() == MessageType::Oneway) {\n    // For oneway requests, we should not attempt a response. Reset the downstream to signal\n    // an error.\n    parent_.callbacks_->resetDownstreamConnection();\n    return;\n  }\n\n  switch (reason) {\n  case ConnectionPool::PoolFailureReason::Overflow:\n    parent_.callbacks_->sendLocalReply(\n        AppException(AppExceptionType::InternalError,\n                     \"thrift upstream request: too many connections\"),\n        true);\n    break;\n  case ConnectionPool::PoolFailureReason::LocalConnectionFailure:\n    // Should only happen if we closed the connection, due to an error condition, in which case\n    // we've already handled any possible downstream response.\n    parent_.callbacks_->resetDownstreamConnection();\n    break;\n  case ConnectionPool::PoolFailureReason::RemoteConnectionFailure:\n  case ConnectionPool::PoolFailureReason::Timeout:\n    // TODO(zuercher): distinguish between these cases where appropriate (particularly timeout)\n    if (!response_started_) {\n      parent_.callbacks_->sendLocalReply(\n          AppException(\n              AppExceptionType::InternalError,\n              fmt::format(\"connection failure '{}'\", (upstream_host_ != nullptr)\n                                                         ? upstream_host_->address()->asString()\n                                                         : \"to upstream\")),\n          true);\n      return;\n    }\n\n    // Error occurred after a partial response, propagate the reset to the downstream.\n    parent_.callbacks_->resetDownstreamConnection();\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/router_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/route.pb.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/tcp/conn_pool.h\"\n#include \"envoy/upstream/load_balancer.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/conn_manager.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift_object.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\nclass RouteEntryImplBase : public RouteEntry,\n                           public Route,\n                           public std::enable_shared_from_this<RouteEntryImplBase> {\npublic:\n  RouteEntryImplBase(const envoy::extensions::filters::network::thrift_proxy::v3::Route& route);\n\n  // Router::RouteEntry\n  const std::string& clusterName() const override;\n  const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n    return metadata_match_criteria_.get();\n  }\n  const RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; }\n  bool stripServiceName() const override { return strip_service_name_; };\n  const Http::LowerCaseString& clusterHeader() const override { return cluster_header_; }\n\n  // Router::Route\n  const RouteEntry* routeEntry() const override;\n\n  virtual RouteConstSharedPtr matches(const MessageMetadata& metadata,\n                                      uint64_t random_value) const PURE;\n\nprotected:\n  RouteConstSharedPtr clusterEntry(uint64_t random_value, const MessageMetadata& metadata) const;\n  bool headersMatch(const Http::HeaderMap& headers) const;\n\nprivate:\n  class WeightedClusterEntry : public RouteEntry, public Route {\n  public:\n    WeightedClusterEntry(\n        const RouteEntryImplBase& parent,\n        const envoy::extensions::filters::network::thrift_proxy::v3::WeightedCluster::ClusterWeight&\n            cluster);\n\n    uint64_t clusterWeight() const { return cluster_weight_; }\n\n    // Router::RouteEntry\n    const std::string& clusterName() const override { return cluster_name_; }\n    const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n      if (metadata_match_criteria_) {\n        return metadata_match_criteria_.get();\n      }\n\n      return parent_.metadataMatchCriteria();\n    }\n    const RateLimitPolicy& rateLimitPolicy() const override { return parent_.rateLimitPolicy(); }\n    bool stripServiceName() const override { return parent_.stripServiceName(); }\n    const Http::LowerCaseString& clusterHeader() const override { return parent_.clusterHeader(); }\n\n    // Router::Route\n    const RouteEntry* routeEntry() const override { return this; }\n\n  private:\n    const RouteEntryImplBase& parent_;\n    const std::string cluster_name_;\n    const uint64_t cluster_weight_;\n    Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n  };\n  using WeightedClusterEntrySharedPtr = std::shared_ptr<WeightedClusterEntry>;\n\n  class DynamicRouteEntry : public RouteEntry, public Route {\n  public:\n    DynamicRouteEntry(const RouteEntryImplBase& parent, absl::string_view cluster_name)\n        : parent_(parent), cluster_name_(std::string(cluster_name)) {}\n\n    // Router::RouteEntry\n    const std::string& clusterName() const override { return cluster_name_; }\n    const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override {\n      return parent_.metadataMatchCriteria();\n    }\n    const RateLimitPolicy& rateLimitPolicy() const override { return parent_.rateLimitPolicy(); }\n    bool stripServiceName() const override { return parent_.stripServiceName(); }\n    const Http::LowerCaseString& clusterHeader() const override { return parent_.clusterHeader(); }\n\n    // Router::Route\n    const RouteEntry* routeEntry() const override { return this; }\n\n  private:\n    const RouteEntryImplBase& parent_;\n    const std::string cluster_name_;\n  };\n\n  const std::string cluster_name_;\n  const std::vector<Http::HeaderUtility::HeaderDataPtr> config_headers_;\n  std::vector<WeightedClusterEntrySharedPtr> weighted_clusters_;\n  uint64_t total_cluster_weight_;\n  Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_;\n  const RateLimitPolicyImpl rate_limit_policy_;\n  const bool strip_service_name_;\n  const Http::LowerCaseString cluster_header_;\n};\n\nusing RouteEntryImplBaseConstSharedPtr = std::shared_ptr<const RouteEntryImplBase>;\n\nclass MethodNameRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  MethodNameRouteEntryImpl(\n      const envoy::extensions::filters::network::thrift_proxy::v3::Route& route);\n\n  // RouteEntryImplBase\n  RouteConstSharedPtr matches(const MessageMetadata& metadata,\n                              uint64_t random_value) const override;\n\nprivate:\n  const std::string method_name_;\n  const bool invert_;\n};\n\nclass ServiceNameRouteEntryImpl : public RouteEntryImplBase {\npublic:\n  ServiceNameRouteEntryImpl(\n      const envoy::extensions::filters::network::thrift_proxy::v3::Route& route);\n\n  // RouteEntryImplBase\n  RouteConstSharedPtr matches(const MessageMetadata& metadata,\n                              uint64_t random_value) const override;\n\nprivate:\n  std::string service_name_;\n  const bool invert_;\n};\n\nclass RouteMatcher {\npublic:\n  RouteMatcher(const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration&);\n\n  RouteConstSharedPtr route(const MessageMetadata& metadata, uint64_t random_value) const;\n\nprivate:\n  std::vector<RouteEntryImplBaseConstSharedPtr> routes_;\n};\n\n#define ALL_THRIFT_ROUTER_STATS(COUNTER, GAUGE, HISTOGRAM)                                         \\\n  COUNTER(route_missing)                                                                           \\\n  COUNTER(unknown_cluster)                                                                         \\\n  COUNTER(upstream_rq_maintenance_mode)                                                            \\\n  COUNTER(no_healthy_upstream)\n\nstruct RouterStats {\n  ALL_THRIFT_ROUTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\nclass Router : public Tcp::ConnectionPool::UpstreamCallbacks,\n               public Upstream::LoadBalancerContextBase,\n               public ProtocolConverter,\n               public ThriftFilters::DecoderFilter,\n               Logger::Loggable<Logger::Id::thrift> {\npublic:\n  Router(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix,\n         Stats::Scope& scope)\n      : cluster_manager_(cluster_manager), stats_(generateStats(stat_prefix, scope)) {}\n\n  ~Router() override = default;\n\n  // ThriftFilters::DecoderFilter\n  void onDestroy() override;\n  void setDecoderFilterCallbacks(ThriftFilters::DecoderFilterCallbacks& callbacks) override;\n\n  // ProtocolConverter\n  FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override;\n  FilterStatus transportEnd() override;\n  FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override;\n  FilterStatus messageEnd() override;\n\n  // Upstream::LoadBalancerContext\n  const Network::Connection* downstreamConnection() const override;\n  const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override {\n    if (route_entry_) {\n      return route_entry_->metadataMatchCriteria();\n    }\n    return nullptr;\n  }\n\n  // Tcp::ConnectionPool::UpstreamCallbacks\n  void onUpstreamData(Buffer::Instance& data, bool end_stream) override;\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\nprivate:\n  struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks {\n    UpstreamRequest(Router& parent, Tcp::ConnectionPool::Instance& pool,\n                    MessageMetadataSharedPtr& metadata, TransportType transport_type,\n                    ProtocolType protocol_type);\n    ~UpstreamRequest() override;\n\n    FilterStatus start();\n    void resetStream();\n    void releaseConnection(bool close);\n\n    // Tcp::ConnectionPool::Callbacks\n    void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                       Upstream::HostDescriptionConstSharedPtr host) override;\n    void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn,\n                     Upstream::HostDescriptionConstSharedPtr host) override;\n\n    void onRequestStart(bool continue_decoding);\n    void onRequestComplete();\n    void onResponseComplete();\n    void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host);\n    void onResetStream(ConnectionPool::PoolFailureReason reason);\n\n    Router& parent_;\n    Tcp::ConnectionPool::Instance& conn_pool_;\n    MessageMetadataSharedPtr metadata_;\n\n    Tcp::ConnectionPool::Cancellable* conn_pool_handle_{};\n    Tcp::ConnectionPool::ConnectionDataPtr conn_data_;\n    Upstream::HostDescriptionConstSharedPtr upstream_host_;\n    ThriftConnectionState* conn_state_{};\n    TransportPtr transport_;\n    ProtocolPtr protocol_;\n    ThriftObjectPtr upgrade_response_;\n\n    bool request_complete_ : 1;\n    bool response_started_ : 1;\n    bool response_complete_ : 1;\n  };\n\n  void convertMessageBegin(MessageMetadataSharedPtr metadata);\n  void cleanup();\n  RouterStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return RouterStats{ALL_THRIFT_ROUTER_STATS(POOL_COUNTER_PREFIX(scope, prefix),\n                                               POOL_GAUGE_PREFIX(scope, prefix),\n                                               POOL_HISTOGRAM_PREFIX(scope, prefix))};\n  }\n\n  Upstream::ClusterManager& cluster_manager_;\n  RouterStats stats_;\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks_{};\n  RouteConstSharedPtr route_{};\n  const RouteEntry* route_entry_{};\n  Upstream::ClusterInfoConstSharedPtr cluster_;\n\n  std::unique_ptr<UpstreamRequest> upstream_request_;\n  Buffer::OwnedImpl upstream_request_buffer_;\n};\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/router_ratelimit.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\n/**\n * Base interface for generic rate limit action.\n */\nclass RateLimitAction {\npublic:\n  virtual ~RateLimitAction() = default;\n\n  /**\n   * Potentially append a descriptor entry to the end of descriptor.\n   * @param route supplies the target route for the request.\n   * @param descriptor supplies the descriptor to optionally fill.\n   * @param local_service_cluster supplies the name of the local service cluster.\n   * @param metadata supplies the message metadata for the request.\n   * @param remote_address supplies the trusted downstream address for the connection.\n   * @return true if the RateLimitAction populated the descriptor.\n   */\n  virtual bool populateDescriptor(const RouteEntry& route, RateLimit::Descriptor& descriptor,\n                                  const std::string& local_service_cluster,\n                                  const MessageMetadata& metadata,\n                                  const Network::Address::Instance& remote_address) const PURE;\n};\n\nusing RateLimitActionPtr = std::unique_ptr<RateLimitAction>;\n\n/**\n * Rate limit configuration.\n */\nclass RateLimitPolicyEntry {\npublic:\n  virtual ~RateLimitPolicyEntry() = default;\n\n  /**\n   * @return the stage value that the configuration is applicable to.\n   */\n  virtual uint32_t stage() const PURE;\n\n  /**\n   * @return runtime key to be set to disable the configuration.\n   */\n  virtual const std::string& disableKey() const PURE;\n\n  /**\n   * Potentially populate the descriptor array with new descriptors to query.\n   * @param route supplies the target route for the request.\n   * @param descriptors supplies the descriptor array to optionally fill.\n   * @param local_service_cluster supplies the name of the local service cluster.\n   * @param metadata supplies the message metadata for the request.\n   * @param remote_address supplies the trusted downstream address for the connection.\n   */\n  virtual void populateDescriptors(const RouteEntry& route,\n                                   std::vector<RateLimit::Descriptor>& descriptors,\n                                   const std::string& local_service_cluster,\n                                   const MessageMetadata& metadata,\n                                   const Network::Address::Instance& remote_address) const PURE;\n};\n\n/**\n * Rate limiting policy.\n */\nclass RateLimitPolicy {\npublic:\n  virtual ~RateLimitPolicy() = default;\n\n  /**\n   * @return true if there is no rate limit policy for all stage settings.\n   */\n  virtual bool empty() const PURE;\n\n  /**\n   * @param stage the value for finding applicable rate limit configurations.\n   * @return set of RateLimitPolicyEntry that are applicable for a stage.\n   */\n  virtual const std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>&\n  getApplicableRateLimit(uint32_t stage) const PURE;\n};\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n\n#include \"extensions/filters/network/thrift_proxy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\nbool SourceClusterAction::populateDescriptor(const RouteEntry&, RateLimit::Descriptor& descriptor,\n                                             const std::string& local_service_cluster,\n                                             const MessageMetadata&,\n                                             const Network::Address::Instance&) const {\n  descriptor.entries_.push_back({\"source_cluster\", local_service_cluster});\n  return true;\n}\n\nbool DestinationClusterAction::populateDescriptor(const RouteEntry& route,\n                                                  RateLimit::Descriptor& descriptor,\n                                                  const std::string&, const MessageMetadata&,\n                                                  const Network::Address::Instance&) const {\n  descriptor.entries_.push_back({\"destination_cluster\", route.clusterName()});\n  return true;\n}\n\nbool RequestHeadersAction::populateDescriptor(const RouteEntry&, RateLimit::Descriptor& descriptor,\n                                              const std::string&, const MessageMetadata& metadata,\n                                              const Network::Address::Instance&) const {\n  if (use_method_name_) {\n    if (!metadata.hasMethodName()) {\n      return false;\n    }\n\n    descriptor.entries_.push_back({descriptor_key_, metadata.methodName()});\n    return true;\n  }\n\n  const Http::HeaderEntry* header_value = metadata.headers().get(header_name_);\n  if (!header_value) {\n    return false;\n  }\n\n  descriptor.entries_.push_back(\n      {descriptor_key_, std::string(header_value->value().getStringView())});\n  return true;\n}\n\nbool RemoteAddressAction::populateDescriptor(\n    const RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&,\n    const MessageMetadata&, const Network::Address::Instance& remote_address) const {\n  if (remote_address.type() != Network::Address::Type::Ip) {\n    return false;\n  }\n\n  descriptor.entries_.push_back({\"remote_address\", remote_address.ip()->addressAsString()});\n  return true;\n}\n\nbool GenericKeyAction::populateDescriptor(const RouteEntry&, RateLimit::Descriptor& descriptor,\n                                          const std::string&, const MessageMetadata&,\n                                          const Network::Address::Instance&) const {\n  descriptor.entries_.push_back({\"generic_key\", descriptor_value_});\n  return true;\n}\n\nHeaderValueMatchAction::HeaderValueMatchAction(\n    const envoy::config::route::v3::RateLimit::Action::HeaderValueMatch& action)\n    : descriptor_value_(action.descriptor_value()),\n      expect_match_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(action, expect_match, true)),\n      action_headers_(Http::HeaderUtility::buildHeaderDataVector(action.headers())) {}\n\nbool HeaderValueMatchAction::populateDescriptor(const RouteEntry&,\n                                                RateLimit::Descriptor& descriptor,\n                                                const std::string&, const MessageMetadata& metadata,\n                                                const Network::Address::Instance&) const {\n  if (expect_match_ == Http::HeaderUtility::matchHeaders(metadata.headers(), action_headers_)) {\n    descriptor.entries_.push_back({\"header_match\", descriptor_value_});\n    return true;\n  } else {\n    return false;\n  }\n}\n\nRateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl(\n    const envoy::config::route::v3::RateLimit& config)\n    : disable_key_(config.disable_key()),\n      stage_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, stage, 0)) {\n  for (const auto& action : config.actions()) {\n    switch (action.action_specifier_case()) {\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kSourceCluster:\n      actions_.emplace_back(new SourceClusterAction());\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kDestinationCluster:\n      actions_.emplace_back(new DestinationClusterAction());\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kRequestHeaders:\n      actions_.emplace_back(new RequestHeadersAction(action.request_headers()));\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kRemoteAddress:\n      actions_.emplace_back(new RemoteAddressAction());\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kGenericKey:\n      actions_.emplace_back(new GenericKeyAction(action.generic_key()));\n      break;\n    case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kHeaderValueMatch:\n      actions_.emplace_back(new HeaderValueMatchAction(action.header_value_match()));\n      break;\n    default:\n      throw EnvoyException(\n          absl::StrCat(\"unsupported RateLimit Action \", action.action_specifier_case()));\n    }\n  }\n}\n\nvoid RateLimitPolicyEntryImpl::populateDescriptors(\n    const RouteEntry& route, std::vector<RateLimit::Descriptor>& descriptors,\n    const std::string& local_service_cluster, const MessageMetadata& metadata,\n    const Network::Address::Instance& remote_address) const {\n  RateLimit::Descriptor descriptor;\n  bool result = true;\n  for (const RateLimitActionPtr& action : actions_) {\n    result = result && action->populateDescriptor(route, descriptor, local_service_cluster,\n                                                  metadata, remote_address);\n    if (!result) {\n      break;\n    }\n  }\n\n  if (result) {\n    descriptors.emplace_back(descriptor);\n  }\n}\n\nRateLimitPolicyImpl::RateLimitPolicyImpl(\n    const Protobuf::RepeatedPtrField<envoy::config::route::v3::RateLimit>& rate_limits)\n    : rate_limit_entries_reference_(RateLimitPolicyImpl::MAX_STAGE_NUMBER + 1) {\n  for (const auto& rate_limit : rate_limits) {\n    std::unique_ptr<RateLimitPolicyEntry> rate_limit_policy_entry(\n        new RateLimitPolicyEntryImpl(rate_limit));\n    uint32_t stage = rate_limit_policy_entry->stage();\n    ASSERT(stage < rate_limit_entries_reference_.size());\n    rate_limit_entries_reference_[stage].emplace_back(*rate_limit_policy_entry);\n    rate_limit_entries_.emplace_back(std::move(rate_limit_policy_entry));\n  }\n}\n\nconst std::vector<std::reference_wrapper<const Router::RateLimitPolicyEntry>>&\nRateLimitPolicyImpl::getApplicableRateLimit(uint32_t stage) const {\n  ASSERT(stage < rate_limit_entries_reference_.size());\n  return rate_limit_entries_reference_[stage];\n}\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/http/header_utility.h\"\n\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_ratelimit.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\n\n/**\n * Action for source cluster rate limiting.\n */\nclass SourceClusterAction : public RateLimitAction {\npublic:\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const MessageMetadata& metadata,\n                          const Network::Address::Instance& remote_address) const override;\n};\n\n/**\n * Action for destination cluster rate limiting.\n */\nclass DestinationClusterAction : public RateLimitAction {\npublic:\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const MessageMetadata& metadata,\n                          const Network::Address::Instance& remote_address) const override;\n};\n\n/**\n * Action for request headers rate limiting.\n */\nclass RequestHeadersAction : public RateLimitAction {\npublic:\n  RequestHeadersAction(const envoy::config::route::v3::RateLimit::Action::RequestHeaders& action)\n      : header_name_(action.header_name()), descriptor_key_(action.descriptor_key()),\n        use_method_name_(header_name_ == Headers::get().MethodName) {}\n\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const MessageMetadata& metadata,\n                          const Network::Address::Instance& remote_address) const override;\n\nprivate:\n  const Http::LowerCaseString header_name_;\n  const std::string descriptor_key_;\n  const bool use_method_name_;\n};\n\n/**\n * Action for remote address rate limiting.\n */\nclass RemoteAddressAction : public RateLimitAction {\npublic:\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const MessageMetadata& metadata,\n                          const Network::Address::Instance& remote_address) const override;\n};\n\n/**\n * Action for generic key rate limiting.\n */\nclass GenericKeyAction : public RateLimitAction {\npublic:\n  GenericKeyAction(const envoy::config::route::v3::RateLimit::Action::GenericKey& action)\n      : descriptor_value_(action.descriptor_value()) {}\n\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const MessageMetadata& metadata,\n                          const Network::Address::Instance& remote_address) const override;\n\nprivate:\n  const std::string descriptor_value_;\n};\n\n/**\n * Action for header value match rate limiting.\n */\nclass HeaderValueMatchAction : public RateLimitAction {\npublic:\n  HeaderValueMatchAction(\n      const envoy::config::route::v3::RateLimit::Action::HeaderValueMatch& action);\n\n  // Router::RateLimitAction\n  bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor,\n                          const std::string& local_service_cluster, const MessageMetadata& metadata,\n                          const Network::Address::Instance& remote_address) const override;\n\nprivate:\n  const std::string descriptor_value_;\n  const bool expect_match_;\n  const std::vector<Http::HeaderUtility::HeaderDataPtr> action_headers_;\n};\n\n/*\n * Implementation of RateLimitPolicyEntry that holds the action for the configuration.\n */\nclass RateLimitPolicyEntryImpl : public RateLimitPolicyEntry {\npublic:\n  RateLimitPolicyEntryImpl(const envoy::config::route::v3::RateLimit& config);\n\n  // Router::RateLimitPolicyEntry\n  uint32_t stage() const override { return stage_; }\n  const std::string& disableKey() const override { return disable_key_; }\n  void populateDescriptors(const Router::RouteEntry& route,\n                           std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n                           const std::string& local_service_cluster,\n                           const MessageMetadata& metadata,\n                           const Network::Address::Instance& remote_address) const override;\n\nprivate:\n  const std::string disable_key_;\n  uint32_t stage_;\n  std::vector<RateLimitActionPtr> actions_;\n};\n\n/**\n * Implementation of RateLimitPolicy that reads from the JSON route config.\n */\nclass RateLimitPolicyImpl : public RateLimitPolicy {\npublic:\n  RateLimitPolicyImpl(\n      const Protobuf::RepeatedPtrField<envoy::config::route::v3::RateLimit>& rate_limits);\n\n  // Router::RateLimitPolicy\n  const std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>&\n  getApplicableRateLimit(uint32_t stage = 0) const override;\n  bool empty() const override { return rate_limit_entries_.empty(); }\n\n  static constexpr uint32_t MAX_STAGE_NUMBER = 10;\n\nprivate:\n  std::vector<std::unique_ptr<RateLimitPolicyEntry>> rate_limit_entries_;\n  std::vector<std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>>\n      rate_limit_entries_reference_;\n};\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/stats.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * All thrift filter stats. @see stats_macros.h\n */\n#define ALL_THRIFT_FILTER_STATS(COUNTER, GAUGE, HISTOGRAM)                                         \\\n  COUNTER(cx_destroy_local_with_active_rq)                                                         \\\n  COUNTER(cx_destroy_remote_with_active_rq)                                                        \\\n  COUNTER(request)                                                                                 \\\n  COUNTER(request_call)                                                                            \\\n  COUNTER(request_decoding_error)                                                                  \\\n  COUNTER(request_invalid_type)                                                                    \\\n  COUNTER(request_oneway)                                                                          \\\n  COUNTER(response)                                                                                \\\n  COUNTER(response_decoding_error)                                                                 \\\n  COUNTER(response_error)                                                                          \\\n  COUNTER(response_exception)                                                                      \\\n  COUNTER(response_invalid_type)                                                                   \\\n  COUNTER(response_reply)                                                                          \\\n  COUNTER(response_success)                                                                        \\\n  GAUGE(request_active, Accumulate)                                                                \\\n  HISTOGRAM(request_time_ms, Milliseconds)\n\n/**\n * Struct definition for all thrift proxy stats. @see stats_macros.h\n */\nstruct ThriftFilterStats {\n  ALL_THRIFT_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n\n  static ThriftFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return ThriftFilterStats{ALL_THRIFT_FILTER_STATS(POOL_COUNTER_PREFIX(scope, prefix),\n                                                     POOL_GAUGE_PREFIX(scope, prefix),\n                                                     POOL_HISTOGRAM_PREFIX(scope, prefix))};\n  }\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/thrift.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nenum class TransportType {\n  Framed,\n  Header,\n  Unframed,\n  Auto,\n\n  // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST TRANSPORT TYPE\n  LastTransportType = Auto,\n};\n\n/**\n * Names of available Transport implementations.\n */\nclass TransportNameValues {\npublic:\n  // Framed transport\n  const std::string FRAMED = \"framed\";\n\n  // Header transport\n  const std::string HEADER = \"header\";\n\n  // Unframed transport\n  const std::string UNFRAMED = \"unframed\";\n\n  // Auto-detection transport\n  const std::string AUTO = \"auto\";\n\n  const std::string& fromType(TransportType type) const {\n    switch (type) {\n    case TransportType::Framed:\n      return FRAMED;\n    case TransportType::Header:\n      return HEADER;\n    case TransportType::Unframed:\n      return UNFRAMED;\n    case TransportType::Auto:\n      return AUTO;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n};\n\nusing TransportNames = ConstSingleton<TransportNameValues>;\n\nenum class ProtocolType {\n  Binary,\n  LaxBinary,\n  Compact,\n  Twitter,\n  Auto,\n\n  // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST PROTOCOL TYPE\n  LastProtocolType = Auto,\n};\n\n/**\n * Names of available Protocol implementations.\n */\nclass ProtocolNameValues {\npublic:\n  // Binary protocol\n  const std::string BINARY = \"binary\";\n\n  // Lax Binary protocol\n  const std::string LAX_BINARY = \"binary/non-strict\";\n\n  // Compact protocol\n  const std::string COMPACT = \"compact\";\n\n  // Twitter protocol\n  const std::string TWITTER = \"twitter\";\n\n  // Auto-detection protocol\n  const std::string AUTO = \"auto\";\n\n  const std::string& fromType(ProtocolType type) const {\n    switch (type) {\n    case ProtocolType::Binary:\n      return BINARY;\n    case ProtocolType::LaxBinary:\n      return LAX_BINARY;\n    case ProtocolType::Compact:\n      return COMPACT;\n    case ProtocolType::Twitter:\n      return TWITTER;\n    case ProtocolType::Auto:\n      return AUTO;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n};\n\nusing ProtocolNames = ConstSingleton<ProtocolNameValues>;\n\n/**\n * Thrift protocol message types.\n * See https://github.com/apache/thrift/blob/master/lib/cpp/src/thrift/protocol/TProtocol.h\n */\nenum class MessageType {\n  Call = 1,\n  Reply = 2,\n  Exception = 3,\n  Oneway = 4,\n\n  // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST MESSAGE TYPE\n  LastMessageType = Oneway,\n};\n\n/**\n * Thrift protocol struct field types.\n * See https://github.com/apache/thrift/blob/master/lib/cpp/src/thrift/protocol/TProtocol.h\n */\nenum class FieldType {\n  Stop = 0,\n  Void = 1,\n  Bool = 2,\n  Byte = 3,\n  Double = 4,\n  I16 = 6,\n  I32 = 8,\n  I64 = 10,\n  String = 11,\n  Struct = 12,\n  Map = 13,\n  Set = 14,\n  List = 15,\n\n  // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FIELD TYPE\n  LastFieldType = List,\n};\n\n/**\n * Thrift Application Exception types.\n * See https://github.com/apache/thrift/blob/master/doc/specs/thrift-rpc.md\n */\nenum class AppExceptionType {\n  Unknown = 0,\n  UnknownMethod = 1,\n  InvalidMessageType = 2,\n  WrongMethodName = 3,\n  BadSequenceId = 4,\n  MissingResult = 5,\n  InternalError = 6,\n  ProtocolError = 7,\n  InvalidTransform = 8,\n  InvalidProtocol = 9,\n  // FBThrift values.\n  // See https://github.com/facebook/fbthrift/blob/master/thrift/lib/cpp/TApplicationException.h#L52\n  UnsupportedClientType = 10,\n  LoadShedding = 11,\n  Timeout = 12,\n  InjectedFailure = 13,\n  ChecksumMismatch = 14,\n  Interruption = 15,\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/thrift_object.h",
    "content": "#pragma once\n\n#include <list>\n#include <memory>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass ThriftBase;\n\n/**\n * ThriftValue is a field or container (list, set, or map) element.\n */\nclass ThriftValue {\npublic:\n  virtual ~ThriftValue() = default;\n\n  /**\n   * @return FieldType the type of this value\n   */\n  virtual FieldType type() const PURE;\n\n  /**\n   * @return const T& pointer to the value, provided that it can be cast to the given type\n   * @throw EnvoyException if the type T does not match the type\n   */\n  template <typename T> const T& getValueTyped() const {\n    // Use the Traits template to determine what FieldType the value must have to be cast to T\n    // and throw if the value's type doesn't match.\n    FieldType expected_field_type = Traits<T>::getFieldType();\n    if (expected_field_type != type()) {\n      ExceptionUtil::throwEnvoyException(fmt::format(\"expected field type {}, got {}\",\n                                                     static_cast<int>(expected_field_type),\n                                                     static_cast<int>(type())));\n    }\n\n    return *static_cast<const T*>(getValue());\n  }\n\nprotected:\n  /**\n   * @return void* pointing to the underlying value, to be dynamically cast in getValueTyped\n   */\n  virtual const void* getValue() const PURE;\n\nprivate:\n  /**\n   * Traits allows getValueTyped() to enforce that the field type is being cast to the desired type.\n   */\n  template <typename T> class Traits {\n  public:\n    // Compilation failures where T does not have a member getFieldType typically mean that\n    // getValueTyped was called with a type T that is not used to encode Thrift values.\n    // The specializations below encode the valid types for Thrift primitive types.\n    static FieldType getFieldType() { return T::getFieldType(); }\n  };\n};\n\n// Explicit specializations of ThriftValue::Types for primitive types.\ntemplate <> class ThriftValue::Traits<bool> {\npublic:\n  static FieldType getFieldType() { return FieldType::Bool; }\n};\n\ntemplate <> class ThriftValue::Traits<uint8_t> {\npublic:\n  static FieldType getFieldType() { return FieldType::Byte; }\n};\n\ntemplate <> class ThriftValue::Traits<int16_t> {\npublic:\n  static FieldType getFieldType() { return FieldType::I16; }\n};\n\ntemplate <> class ThriftValue::Traits<int32_t> {\npublic:\n  static FieldType getFieldType() { return FieldType::I32; }\n};\n\ntemplate <> class ThriftValue::Traits<int64_t> {\npublic:\n  static FieldType getFieldType() { return FieldType::I64; }\n};\n\ntemplate <> class ThriftValue::Traits<double> {\npublic:\n  static FieldType getFieldType() { return FieldType::Double; }\n};\n\ntemplate <> class ThriftValue::Traits<std::string> {\npublic:\n  static FieldType getFieldType() { return FieldType::String; }\n};\n\nusing ThriftValuePtr = std::unique_ptr<ThriftValue>;\nusing ThriftValuePtrList = std::list<ThriftValuePtr>;\nusing ThriftValuePtrPairList = std::list<std::pair<ThriftValuePtr, ThriftValuePtr>>;\n\n/**\n * ThriftField is a field within a ThriftStruct.\n */\nclass ThriftField {\npublic:\n  virtual ~ThriftField() = default;\n\n  /**\n   * @return FieldType this field's type\n   */\n  virtual FieldType fieldType() const PURE;\n\n  /**\n   * @return int16_t the field's identifier\n   */\n  virtual int16_t fieldId() const PURE;\n\n  /**\n   * @return const ThriftValue& containing the field's value\n   */\n  virtual const ThriftValue& getValue() const PURE;\n};\n\nusing ThriftFieldPtr = std::unique_ptr<ThriftField>;\nusing ThriftFieldPtrList = std::list<ThriftFieldPtr>;\n\n/**\n * ThriftListValue is an ordered list of ThriftValues.\n */\nclass ThriftListValue {\npublic:\n  virtual ~ThriftListValue() = default;\n\n  /**\n   * @return const ThriftValuePtrList& containing the ThriftValues that comprise the list\n   */\n  virtual const ThriftValuePtrList& elements() const PURE;\n\n  /**\n   * @return FieldType of the underlying elements\n   */\n  virtual FieldType elementType() const PURE;\n\n  /**\n   * Used by ThriftValue::Traits to enforce type safety.\n   */\n  static FieldType getFieldType() { return FieldType::List; }\n};\n\n/**\n * ThriftSetValue is a set of ThriftValues, maintained in their original order.\n */\nclass ThriftSetValue {\npublic:\n  virtual ~ThriftSetValue() = default;\n\n  /**\n   * @return const ThriftValuePtrList& containing the ThriftValues that comprise the set\n   */\n  virtual const ThriftValuePtrList& elements() const PURE;\n\n  /**\n   * @return FieldType of the underlying elements\n   */\n  virtual FieldType elementType() const PURE;\n\n  /**\n   * Used by ThriftValue::Traits to enforce type safety.\n   */\n  static FieldType getFieldType() { return FieldType::Set; }\n};\n\n/**\n * ThriftMapValue is a map of pairs of ThriftValues, maintained in their original order.\n */\nclass ThriftMapValue {\npublic:\n  virtual ~ThriftMapValue() = default;\n\n  /**\n   * @return const ThriftValuePtrPairList& containing the ThriftValue key-value pairs that comprise\n   *         the map.\n   */\n  virtual const ThriftValuePtrPairList& elements() const PURE;\n\n  /**\n   * @return FieldType of the underlying keys\n   */\n  virtual FieldType keyType() const PURE;\n\n  /**\n   * @return FieldType of the underlying values\n   */\n  virtual FieldType valueType() const PURE;\n\n  /**\n   * Used by ThriftValue::Traits to enforce type safety.\n   */\n  static FieldType getFieldType() { return FieldType::Map; }\n};\n\n/**\n * ThriftStructValue is a sequence of ThriftFields.\n */\nclass ThriftStructValue {\npublic:\n  virtual ~ThriftStructValue() = default;\n\n  /**\n   * @return const ThriftFieldPtrList& containing the ThriftFields that comprise the struct.\n   */\n  virtual const ThriftFieldPtrList& fields() const PURE;\n\n  /**\n   * Used by ThriftValue::Traits to enforce type safety.\n   */\n  static FieldType getFieldType() { return FieldType::Struct; }\n};\n\n/**\n * ThriftObject is a ThriftStructValue that can be read from a Buffer::Instance.\n */\nclass ThriftObject : public ThriftStructValue {\npublic:\n  ~ThriftObject() override = default;\n\n  /*\n   * Consumes bytes from the buffer until a single complete Thrift struct has been consumed.\n   * @param buffer starting with a Thrift struct\n   * @return true when a single complete struct has been consumed; false if more data is needed to\n   *         complete decoding\n   * @throw EnvoyException if the struct is invalid\n   */\n  virtual bool onData(Buffer::Instance& buffer) PURE;\n};\n\nusing ThriftObjectPtr = std::unique_ptr<ThriftObject>;\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/thrift_object_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/thrift_object_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\nstd::unique_ptr<ThriftValueBase> makeValue(ThriftBase* parent, FieldType type) {\n  switch (type) {\n  case FieldType::Stop:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n\n  case FieldType::List:\n    return std::make_unique<ThriftListValueImpl>(parent);\n\n  case FieldType::Set:\n    return std::make_unique<ThriftSetValueImpl>(parent);\n\n  case FieldType::Map:\n    return std::make_unique<ThriftMapValueImpl>(parent);\n\n  case FieldType::Struct:\n    return std::make_unique<ThriftStructValueImpl>(parent);\n\n  default:\n    return std::make_unique<ThriftValueImpl>(parent, type);\n  }\n}\n\n} // namespace\n\nThriftBase::ThriftBase(ThriftBase* parent) : parent_(parent) {}\n\nFilterStatus ThriftBase::structBegin(absl::string_view name) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->structBegin(name);\n}\n\nFilterStatus ThriftBase::structEnd() {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->structEnd();\n}\n\nFilterStatus ThriftBase::fieldBegin(absl::string_view name, FieldType& field_type,\n                                    int16_t& field_id) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->fieldBegin(name, field_type, field_id);\n}\n\nFilterStatus ThriftBase::fieldEnd() {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->fieldEnd();\n}\n\nFilterStatus ThriftBase::boolValue(bool& value) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->boolValue(value);\n}\n\nFilterStatus ThriftBase::byteValue(uint8_t& value) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->byteValue(value);\n}\n\nFilterStatus ThriftBase::int16Value(int16_t& value) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->int16Value(value);\n}\n\nFilterStatus ThriftBase::int32Value(int32_t& value) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->int32Value(value);\n}\n\nFilterStatus ThriftBase::int64Value(int64_t& value) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->int64Value(value);\n}\n\nFilterStatus ThriftBase::doubleValue(double& value) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->doubleValue(value);\n}\n\nFilterStatus ThriftBase::stringValue(absl::string_view value) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->stringValue(value);\n}\n\nFilterStatus ThriftBase::mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->mapBegin(key_type, value_type, size);\n}\n\nFilterStatus ThriftBase::mapEnd() {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->mapEnd();\n}\n\nFilterStatus ThriftBase::listBegin(FieldType& elem_type, uint32_t& size) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->listBegin(elem_type, size);\n}\n\nFilterStatus ThriftBase::listEnd() {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->listEnd();\n}\n\nFilterStatus ThriftBase::setBegin(FieldType& elem_type, uint32_t& size) {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->setBegin(elem_type, size);\n}\n\nFilterStatus ThriftBase::setEnd() {\n  ASSERT(delegate_ != nullptr);\n  return delegate_->setEnd();\n}\n\nvoid ThriftBase::delegateComplete() {\n  ASSERT(delegate_ != nullptr);\n  delegate_ = nullptr;\n}\n\nThriftFieldImpl::ThriftFieldImpl(ThriftStructValueImpl* parent, absl::string_view name,\n                                 FieldType field_type, int16_t field_id)\n    : ThriftBase(parent), name_(name), field_type_(field_type), field_id_(field_id) {\n  auto value = makeValue(this, field_type_);\n  delegate_ = value.get();\n  value_ = std::move(value);\n}\n\nFilterStatus ThriftFieldImpl::fieldEnd() {\n  if (delegate_) {\n    return delegate_->fieldEnd();\n  }\n\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftListValueImpl::listBegin(FieldType& elem_type, uint32_t& size) {\n  if (delegate_) {\n    return delegate_->listBegin(elem_type, size);\n  }\n\n  elem_type_ = elem_type;\n  remaining_ = size;\n\n  delegateComplete();\n\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftListValueImpl::listEnd() {\n  if (delegate_) {\n    return delegate_->listEnd();\n  }\n\n  ASSERT(remaining_ == 0);\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nvoid ThriftListValueImpl::delegateComplete() {\n  delegate_ = nullptr;\n\n  if (remaining_ == 0) {\n    return;\n  }\n\n  auto elem = makeValue(this, elem_type_);\n  delegate_ = elem.get();\n  elements_.push_back(std::move(elem));\n  remaining_--;\n}\n\nFilterStatus ThriftSetValueImpl::setBegin(FieldType& elem_type, uint32_t& size) {\n  if (delegate_) {\n    return delegate_->setBegin(elem_type, size);\n  }\n\n  elem_type_ = elem_type;\n  remaining_ = size;\n\n  delegateComplete();\n\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftSetValueImpl::setEnd() {\n  if (delegate_) {\n    return delegate_->setEnd();\n  }\n\n  ASSERT(remaining_ == 0);\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nvoid ThriftSetValueImpl::delegateComplete() {\n  delegate_ = nullptr;\n\n  if (remaining_ == 0) {\n    return;\n  }\n\n  auto elem = makeValue(this, elem_type_);\n  delegate_ = elem.get();\n  elements_.push_back(std::move(elem));\n  remaining_--;\n}\n\nFilterStatus ThriftMapValueImpl::mapBegin(FieldType& key_type, FieldType& elem_type,\n                                          uint32_t& size) {\n  if (delegate_) {\n    return delegate_->mapBegin(key_type, elem_type, size);\n  }\n\n  key_type_ = key_type;\n  elem_type_ = elem_type;\n  remaining_ = size;\n\n  delegateComplete();\n\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftMapValueImpl::mapEnd() {\n  if (delegate_) {\n    return delegate_->mapEnd();\n  }\n\n  ASSERT(remaining_ == 0);\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nvoid ThriftMapValueImpl::delegateComplete() {\n  delegate_ = nullptr;\n\n  if (remaining_ == 0) {\n    return;\n  }\n\n  // Prepare for first element's key.\n  if (elements_.empty()) {\n    auto key = makeValue(this, key_type_);\n    delegate_ = key.get();\n    elements_.emplace_back(std::move(key), nullptr);\n    return;\n  }\n\n  // Prepare for any element's value.\n  auto& elem = elements_.back();\n  if (elem.second == nullptr) {\n    auto value = makeValue(this, elem_type_);\n    delegate_ = value.get();\n    elem.second = std::move(value);\n\n    remaining_--;\n    return;\n  }\n\n  // Key-value pair completed, prepare for next key.\n  auto key = makeValue(this, key_type_);\n  delegate_ = key.get();\n  elements_.emplace_back(std::move(key), nullptr);\n}\n\nFilterStatus ThriftValueImpl::boolValue(bool& value) {\n  ASSERT(value_type_ == FieldType::Bool);\n  bool_value_ = value;\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftValueImpl::byteValue(uint8_t& value) {\n  ASSERT(value_type_ == FieldType::Byte);\n  byte_value_ = value;\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftValueImpl::int16Value(int16_t& value) {\n  ASSERT(value_type_ == FieldType::I16);\n  int16_value_ = value;\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftValueImpl::int32Value(int32_t& value) {\n  ASSERT(value_type_ == FieldType::I32);\n  int32_value_ = value;\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftValueImpl::int64Value(int64_t& value) {\n  ASSERT(value_type_ == FieldType::I64);\n  int64_value_ = value;\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftValueImpl::doubleValue(double& value) {\n  ASSERT(value_type_ == FieldType::Double);\n  double_value_ = value;\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftValueImpl::stringValue(absl::string_view value) {\n  ASSERT(value_type_ == FieldType::String);\n  string_value_ = std::string(value);\n  parent_->delegateComplete();\n  return FilterStatus::Continue;\n}\n\nconst void* ThriftValueImpl::getValue() const {\n  switch (value_type_) {\n  case FieldType::Bool:\n    return &bool_value_;\n  case FieldType::Byte:\n    return &byte_value_;\n  case FieldType::I16:\n    return &int16_value_;\n  case FieldType::I32:\n    return &int32_value_;\n  case FieldType::I64:\n    return &int64_value_;\n  case FieldType::Double:\n    return &double_value_;\n  case FieldType::String:\n    return &string_value_;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nFilterStatus ThriftStructValueImpl::structBegin(absl::string_view name) {\n  if (delegate_) {\n    return delegate_->structBegin(name);\n  }\n\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftStructValueImpl::structEnd() {\n  if (delegate_) {\n    return delegate_->structEnd();\n  }\n\n  if (parent_) {\n    parent_->delegateComplete();\n  }\n\n  return FilterStatus::Continue;\n}\n\nFilterStatus ThriftStructValueImpl::fieldBegin(absl::string_view name, FieldType& field_type,\n                                               int16_t& field_id) {\n  if (delegate_) {\n    return delegate_->fieldBegin(name, field_type, field_id);\n  }\n\n  if (field_type != FieldType::Stop) {\n    auto field = std::make_unique<ThriftFieldImpl>(this, name, field_type, field_id);\n    delegate_ = field.get();\n    fields_.emplace_back(std::move(field));\n  }\n\n  return FilterStatus::Continue;\n}\n\nThriftObjectImpl::ThriftObjectImpl(Transport& transport, Protocol& protocol)\n    : ThriftStructValueImpl(nullptr),\n      decoder_(std::make_unique<Decoder>(transport, protocol, *this)) {}\n\nbool ThriftObjectImpl::onData(Buffer::Instance& buffer) {\n  bool underflow = false;\n  auto result = decoder_->onData(buffer, underflow);\n  ASSERT(result == FilterStatus::Continue);\n\n  if (complete_) {\n    decoder_.reset();\n  }\n  return complete_;\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/thrift_object_impl.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/thrift_proxy/decoder.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift_object.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * ThriftBase is a base class for decoding Thrift objects. It implements methods from\n * DecoderEventHandler to automatically delegate to an underlying ThriftBase so that, for example,\n * the fieldBegin call for a struct field nested within a list is automatically delegated down the\n * object hierarchy to the correct ThriftBase subclass.\n */\nclass ThriftBase : public DecoderEventHandler {\npublic:\n  ThriftBase(ThriftBase* parent);\n  ~ThriftBase() override = default;\n\n  // DecoderEventHandler\n  FilterStatus transportBegin(MessageMetadataSharedPtr) override { return FilterStatus::Continue; }\n  FilterStatus transportEnd() override { return FilterStatus::Continue; }\n  FilterStatus messageBegin(MessageMetadataSharedPtr) override { return FilterStatus::Continue; }\n  FilterStatus messageEnd() override { return FilterStatus::Continue; }\n  FilterStatus structBegin(absl::string_view name) override;\n  FilterStatus structEnd() override;\n  FilterStatus fieldBegin(absl::string_view name, FieldType& field_type,\n                          int16_t& field_id) override;\n  FilterStatus fieldEnd() override;\n  FilterStatus boolValue(bool& value) override;\n  FilterStatus byteValue(uint8_t& value) override;\n  FilterStatus int16Value(int16_t& value) override;\n  FilterStatus int32Value(int32_t& value) override;\n  FilterStatus int64Value(int64_t& value) override;\n  FilterStatus doubleValue(double& value) override;\n  FilterStatus stringValue(absl::string_view value) override;\n  FilterStatus mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) override;\n  FilterStatus mapEnd() override;\n  FilterStatus listBegin(FieldType& elem_type, uint32_t& size) override;\n  FilterStatus listEnd() override;\n  FilterStatus setBegin(FieldType& elem_type, uint32_t& size) override;\n  FilterStatus setEnd() override;\n\n  // Invoked when the current delegate is complete. Completion implies that the delegate is fully\n  // specified (all list values processed, all struct fields processed, etc).\n  virtual void delegateComplete();\n\nprotected:\n  ThriftBase* parent_;\n  ThriftBase* delegate_{nullptr};\n};\n\n/**\n * ThriftValueBase is a base class for all struct field values, list values, set values, map keys,\n * and map values.\n */\nclass ThriftValueBase : public ThriftValue, public ThriftBase {\npublic:\n  ThriftValueBase(ThriftBase* parent, FieldType value_type)\n      : ThriftBase(parent), value_type_(value_type) {}\n  ~ThriftValueBase() override = default;\n\n  // ThriftValue\n  FieldType type() const override { return value_type_; }\n\nprotected:\n  const FieldType value_type_;\n};\n\nclass ThriftStructValueImpl;\n\n/**\n * ThriftField represents a field in a thrift Struct. It always delegates DecoderEventHandler\n * methods to a subclass of ThriftValueBase.\n */\nclass ThriftFieldImpl : public ThriftField, public ThriftBase {\npublic:\n  ThriftFieldImpl(ThriftStructValueImpl* parent, absl::string_view name, FieldType field_type,\n                  int16_t field_id);\n\n  // DecoderEventHandler\n  FilterStatus fieldEnd() override;\n\n  // ThriftField\n  FieldType fieldType() const override { return field_type_; }\n  int16_t fieldId() const override { return field_id_; }\n  const ThriftValue& getValue() const override { return *value_; }\n\nprivate:\n  std::string name_;\n  FieldType field_type_;\n  int16_t field_id_;\n  ThriftValuePtr value_;\n};\n\n/**\n * ThriftStructValueImpl implements ThriftStruct.\n */\nclass ThriftStructValueImpl : public ThriftStructValue, public ThriftValueBase {\npublic:\n  ThriftStructValueImpl(ThriftBase* parent) : ThriftValueBase(parent, FieldType::Struct) {}\n\n  // DecoderEventHandler\n  FilterStatus structBegin(absl::string_view name) override;\n  FilterStatus structEnd() override;\n  FilterStatus fieldBegin(absl::string_view name, FieldType& field_type,\n                          int16_t& field_id) override;\n\n  // ThriftStructValue\n  const ThriftFieldPtrList& fields() const override { return fields_; }\n\nprivate:\n  // ThriftValue\n  const void* getValue() const override { return this; };\n\n  ThriftFieldPtrList fields_;\n};\n\n/**\n * ThriftListValueImpl represents Thrift lists.\n */\nclass ThriftListValueImpl : public ThriftListValue, public ThriftValueBase {\npublic:\n  ThriftListValueImpl(ThriftBase* parent) : ThriftValueBase(parent, FieldType::List) {}\n\n  // DecoderEventHandler\n  FilterStatus listBegin(FieldType& elem_type, uint32_t& size) override;\n  FilterStatus listEnd() override;\n\n  // ThriftListValue\n  const ThriftValuePtrList& elements() const override { return elements_; }\n  FieldType elementType() const override { return elem_type_; }\n\n  void delegateComplete() override;\n\nprotected:\n  // ThriftValue\n  const void* getValue() const override { return this; };\n\n  FieldType elem_type_{FieldType::Stop};\n  uint32_t remaining_{0};\n  ThriftValuePtrList elements_;\n};\n\n/**\n * ThriftSetValueImpl represents Thrift sets.\n */\nclass ThriftSetValueImpl : public ThriftSetValue, public ThriftValueBase {\npublic:\n  ThriftSetValueImpl(ThriftBase* parent) : ThriftValueBase(parent, FieldType::Set) {}\n\n  // DecoderEventHandler\n  FilterStatus setBegin(FieldType& elem_type, uint32_t& size) override;\n  FilterStatus setEnd() override;\n\n  // ThriftSetValue\n  const ThriftValuePtrList& elements() const override { return elements_; }\n  FieldType elementType() const override { return elem_type_; }\n\n  void delegateComplete() override;\n\nprotected:\n  // ThriftValue\n  const void* getValue() const override { return this; };\n\n  FieldType elem_type_{FieldType::Stop};\n  uint32_t remaining_{0};\n  ThriftValuePtrList elements_; // maintain original order\n};\n\n/**\n * ThriftMapValueImpl represents Thrift maps.\n */\nclass ThriftMapValueImpl : public ThriftMapValue, public ThriftValueBase {\npublic:\n  ThriftMapValueImpl(ThriftBase* parent) : ThriftValueBase(parent, FieldType::Map) {}\n\n  // DecoderEventHandler\n  FilterStatus mapBegin(FieldType& key_type, FieldType& elem_type, uint32_t& size) override;\n  FilterStatus mapEnd() override;\n\n  // ThriftMapValue\n  const ThriftValuePtrPairList& elements() const override { return elements_; }\n  FieldType keyType() const override { return key_type_; }\n  FieldType valueType() const override { return elem_type_; }\n\n  void delegateComplete() override;\n\nprotected:\n  // ThriftValue\n  const void* getValue() const override { return this; };\n\n  FieldType key_type_{FieldType::Stop};\n  FieldType elem_type_{FieldType::Stop};\n  uint32_t remaining_{0};\n  ThriftValuePtrPairList elements_; // maintain original order\n};\n\n/**\n * ThriftValueImpl represents primitive Thrift types, including strings.\n */\nclass ThriftValueImpl : public ThriftValueBase {\npublic:\n  ThriftValueImpl(ThriftBase* parent, FieldType value_type) : ThriftValueBase(parent, value_type) {}\n\n  // DecoderEventHandler\n  FilterStatus boolValue(bool& value) override;\n  FilterStatus byteValue(uint8_t& value) override;\n  FilterStatus int16Value(int16_t& value) override;\n  FilterStatus int32Value(int32_t& value) override;\n  FilterStatus int64Value(int64_t& value) override;\n  FilterStatus doubleValue(double& value) override;\n  FilterStatus stringValue(absl::string_view value) override;\n\nprotected:\n  // ThriftValue\n  const void* getValue() const override;\n\nprivate:\n  union {\n    bool bool_value_;\n    uint8_t byte_value_;\n    int16_t int16_value_;\n    int32_t int32_value_;\n    int64_t int64_value_;\n    double double_value_;\n  };\n  std::string string_value_;\n};\n\n/**\n * ThriftObjectImpl is a generic representation of a Thrift struct.\n */\nclass ThriftObjectImpl : public ThriftObject,\n                         public ThriftStructValueImpl,\n                         public DecoderCallbacks {\npublic:\n  ThriftObjectImpl(Transport& transport, Protocol& protocol);\n\n  // DecoderCallbacks\n  DecoderEventHandler& newDecoderEventHandler() override { return *this; }\n  FilterStatus transportEnd() override {\n    complete_ = true;\n    return FilterStatus::Continue;\n  }\n\n  // ThriftObject\n  bool onData(Buffer::Instance& buffer) override;\n\n  // ThriftStruct\n  const ThriftFieldPtrList& fields() const override { return ThriftStructValueImpl::fields(); }\n\nprivate:\n  DecoderPtr decoder_;\n  bool complete_{false};\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/tracing.h",
    "content": "#pragma once\n\n#include <list>\n#include <string>\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * Endpoint is an endpoint attribution on an Annotation or BinaryAnnotation.\n */\nclass Endpoint {\npublic:\n  Endpoint(int32_t ipv4, int16_t port, const std::string& service_name)\n      : ipv4_(ipv4), port_(port), service_name_(service_name) {}\n  Endpoint() = default;\n\n  int32_t ipv4_{0};\n  int16_t port_{0};\n  std::string service_name_;\n};\n\n/**\n * Annotation is a span annotation.\n */\nclass Annotation {\npublic:\n  Annotation(int64_t timestamp, const std::string& value, absl::optional<Endpoint> host)\n      : timestamp_(timestamp), value_(value), host_(host) {}\n  Annotation() = default;\n\n  int64_t timestamp_{0};\n  std::string value_;\n  absl::optional<Endpoint> host_;\n};\nusing AnnotationList = std::list<Annotation>;\n\n/**\n * AnnotationType represents a BinaryAnnotation's type.\n */\nenum class AnnotationType {\n  Bool = 0,\n  Bytes = 1,\n  I16 = 2,\n  I32 = 3,\n  I64 = 4,\n  Double = 5,\n  String = 6,\n};\n\n/**\n * BinaryAnnotation is a binary span annotation.\n */\nclass BinaryAnnotation {\npublic:\n  BinaryAnnotation(const std::string& key, const std::string& value, AnnotationType annotation_type,\n                   absl::optional<Endpoint> host)\n      : key_(key), value_(value), annotation_type_(annotation_type), host_(host) {}\n  BinaryAnnotation() = default;\n\n  std::string key_;\n  std::string value_;\n  AnnotationType annotation_type_{AnnotationType::Bool};\n  absl::optional<Endpoint> host_;\n};\nusing BinaryAnnotationList = std::list<BinaryAnnotation>;\n\n/**\n * Span is a single, annotated span in a trace.\n */\nclass Span {\npublic:\n  Span(int64_t trace_id, const std::string& name, int64_t span_id,\n       absl::optional<int64_t> parent_span_id, AnnotationList&& annotations,\n       BinaryAnnotationList&& binary_annotations, bool debug)\n      : trace_id_(trace_id), name_(name), span_id_(span_id), parent_span_id_(parent_span_id),\n        annotations_(std::move(annotations)), binary_annotations_(std::move(binary_annotations)),\n        debug_(debug) {}\n  Span() = default;\n\n  int64_t trace_id_{0};\n  std::string name_;\n  int64_t span_id_{0};\n  absl::optional<int64_t> parent_span_id_;\n  AnnotationList annotations_;\n  BinaryAnnotationList binary_annotations_;\n  bool debug_{false};\n};\nusing SpanList = std::list<Span>;\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/transport.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/config/utility.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * Transport represents a Thrift transport. The Thrift transport is nominally a generic,\n * bi-directional byte stream. In Envoy we assume it always represents a network byte stream and\n * the Transport is just a mechanism for framing messages and passing message metadata.\n */\nclass Transport {\npublic:\n  virtual ~Transport() = default;\n\n  /*\n   * Returns this transport's name.\n   *\n   * @return std::string containing the transport name.\n   */\n  virtual const std::string& name() const PURE;\n\n  /**\n   * @return TransportType the transport type\n   */\n  virtual TransportType type() const PURE;\n\n  /*\n   * Decodes the start of a transport message. If successful, the start of the frame is removed\n   * from the buffer. Transports should not modify the buffer, headers, protocol type, or size if\n   * more data is required to decode the frame's start. If the full frame start can be decoded, the\n   * Transport must drain the frame start data from the buffer. The request metadata should be\n   * modified with any data available to the transport.\n   *\n   * @param buffer the currently buffered thrift data.\n   * @param metadata MessageMetadata to be modified if transport supports additional information\n   * @return bool true if a complete frame header was successfully consumed, false if more data\n   *                 is required.\n   * @throws EnvoyException if the data is not valid for this transport.\n   */\n  virtual bool decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) PURE;\n\n  /*\n   * Decodes the end of a transport message. If successful, the end of the frame is removed from\n   * the buffer.\n   *\n   * @param buffer the currently buffered thrift data.\n   * @return bool true if a complete frame trailer was successfully consumed, false if more data\n   *                 is required.\n   * @throws EnvoyException if the data is not valid for this transport.\n   */\n  virtual bool decodeFrameEnd(Buffer::Instance& buffer) PURE;\n\n  /**\n   * Wraps the given message buffer with the transport's header and trailer (if any). After\n   * encoding, message will be empty.\n   * @param buffer is the output buffer\n   * @param metadata MessageMetadata for the message\n   * @param message a protocol-encoded message\n   * @throws EnvoyException if the message is too large for the transport\n   */\n  virtual void encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                           Buffer::Instance& message) PURE;\n};\n\nusing TransportPtr = std::unique_ptr<Transport>;\n\n/**\n * Implemented by each Thrift transport and registered via Registry::registerFactory or the\n * convenience class RegisterFactory.\n */\nclass NamedTransportConfigFactory : public Envoy::Config::UntypedFactory {\npublic:\n  ~NamedTransportConfigFactory() override = default;\n\n  /**\n   * Create a particular Thrift transport.\n   * @return TransportPtr the transport\n   */\n  virtual TransportPtr createTransport() PURE;\n\n  std::string category() const override { return \"envoy.thrift_proxy.transports\"; }\n\n  /**\n   * Convenience method to lookup a factory by type.\n   * @param TransportType the transport type\n   * @return NamedTransportConfigFactory& for the TransportType\n   */\n  static NamedTransportConfigFactory& getFactory(TransportType type) {\n    const std::string& name = TransportNames::get().fromType(type);\n    return Envoy::Config::Utility::getAndCheckFactoryByName<NamedTransportConfigFactory>(name);\n  }\n};\n\n/**\n * TransportFactoryBase provides a template for a trivial NamedTransportConfigFactory.\n */\ntemplate <class TransportImpl> class TransportFactoryBase : public NamedTransportConfigFactory {\npublic:\n  TransportPtr createTransport() override { return std::move(std::make_unique<TransportImpl>()); }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  TransportFactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  const std::string name_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/twitter_protocol_impl.h\"\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n#include \"extensions/filters/network/thrift_proxy/thrift_object_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/unframed_transport_impl.h\"\n\n#include \"absl/strings/str_replace.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\nstruct StructNameValues {\n  const std::string connectionOptionsStruct = \"ConnectionOptions\";\n  const std::string requestHeaderStruct = \"RequestHeader\";\n  const std::string clientIdStruct = \"ClientId\";\n  const std::string delegationStruct = \"Delegation\";\n  const std::string requestContextStruct = \"RequestContext\";\n  const std::string responseHeaderStruct = \"ResponseHeader\";\n  const std::string spanStruct = \"Span\";\n  const std::string annotationStruct = \"Annotation\";\n  const std::string binaryAnnotationStruct = \"BinaryAnnotation\";\n  const std::string endpointStruct = \"Endpoint\";\n  const std::string upgradeReplyStruct = \"UpgradeReply\";\n};\nusing StructNames = ConstSingleton<StructNameValues>;\n\nstruct RequestHeaderFieldNameValues {\n  const std::string traceIdField = \"trace_id\";\n  const std::string spanIdField = \"span_id\";\n  const std::string parentSpanIdField = \"parent_span_id\";\n  const std::string sampledField = \"sampled\";\n  const std::string clientIdField = \"client_id\";\n  const std::string flagsField = \"flags\";\n  const std::string contextsField = \"contexts\";\n  const std::string destField = \"dest\";\n  const std::string delegationsField = \"delegations\";\n  const std::string traceIdHighField = \"trace_id_high\";\n};\nusing RequestHeaderFieldNames = ConstSingleton<RequestHeaderFieldNameValues>;\n\nstruct ClientIdFieldNameValues {\n  const std::string nameField = \"name\";\n};\nusing ClientIdFieldNames = ConstSingleton<ClientIdFieldNameValues>;\n\nstruct DelegationFieldNameValues {\n  const std::string srcField = \"src\";\n  const std::string dstField = \"dst\";\n};\nusing DelegationFieldNames = ConstSingleton<DelegationFieldNameValues>;\n\nstruct RequestContextFieldNameValues {\n  const std::string keyField = \"key\";\n  const std::string valueField = \"value\";\n};\nusing RequestContextFieldNames = ConstSingleton<RequestContextFieldNameValues>;\n\nstruct ResponseHeaderFieldNameValues {\n  const std::string spansField = \"spans\";\n  const std::string contextsField = \"contexts\";\n};\nusing ResponseHeaderFieldNames = ConstSingleton<ResponseHeaderFieldNameValues>;\n\nstruct SpanFieldNameValues {\n  const std::string traceIdField = \"trace_id\";\n  const std::string nameField = \"name\";\n  const std::string idField = \"id\";\n  const std::string parentIdField = \"parent_id\";\n  const std::string annotationsField = \"annotations\";\n  const std::string binaryAnnotationsField = \"binary_annotations\";\n  const std::string debugField = \"debug\";\n};\nusing SpanFieldNames = ConstSingleton<SpanFieldNameValues>;\n\nstruct AnnotationFieldNameValues {\n  const std::string timestampField = \"timestamp\";\n  const std::string valueField = \"value\";\n  const std::string hostField = \"host\";\n};\nusing AnnotationFieldNames = ConstSingleton<AnnotationFieldNameValues>;\n\nstruct BinaryAnnotationFieldNameValues {\n  const std::string keyField = \"key\";\n  const std::string valueField = \"value\";\n  const std::string annotationTypeField = \"annotation_type\";\n  const std::string hostField = \"host\";\n};\nusing BinaryAnnotationFieldNames = ConstSingleton<BinaryAnnotationFieldNameValues>;\n\nstruct EndpointFieldNameValues {\n  const std::string ipv4Field = \"ipv4\";\n  const std::string portField = \"port\";\n  const std::string serviceNameField = \"service_name\";\n};\nusing EndpointFieldNames = ConstSingleton<EndpointFieldNameValues>;\n\nconst std::string& emptyString() { CONSTRUCT_ON_FIRST_USE(std::string, \"\"); }\n\n/**\n * HeaderObjectProtocol implements BinaryProtocolImpl for the specific purpose of decoding the\n * Twitter protocol RequestHeader and ResponseHeader thrift structs. These appear after any\n * transport data (e.g. frame size) and before the start of a Thrift message. Decoding them\n * via a Protocol implementation allows us to reuse the Decoder and its state machine.\n */\nclass HeaderObjectProtocol : public BinaryProtocolImpl {\npublic:\n  bool readMessageBegin(Buffer::Instance&, MessageMetadata&) override { return true; }\n  bool readMessageEnd(Buffer::Instance&) override { return true; }\n};\n\n// Not const because the interfaces do not allow it, but these objects do not maintain internal\n// state and are therefore not modifiable.\nTransport& headerObjectTransport() {\n  static UnframedTransportImpl* transport = new UnframedTransportImpl();\n  return *transport;\n}\n\nProtocol& headerObjectProtocol() {\n  static HeaderObjectProtocol* protocol = new HeaderObjectProtocol();\n  return *protocol;\n}\n\n/**\n * ClientId is a Twitter protocol client identifier.\n *\n * See https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/thrift/tracing.thrift\n */\nclass ClientId {\npublic:\n  ClientId(const std::string& name) : name_(name) {}\n  ClientId(const ThriftStructValue& value) {\n    for (const auto& field : value.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      if (field->fieldId() == NameFieldId) {\n        name_ = field->getValue().getValueTyped<std::string>();\n      }\n    }\n  }\n\n  void write(Buffer::Instance& buffer) {\n    Protocol& protocol = headerObjectProtocol();\n    protocol.writeStructBegin(buffer, StructNames::get().clientIdStruct);\n\n    // name\n    protocol.writeFieldBegin(buffer, ClientIdFieldNames::get().nameField, FieldType::String,\n                             NameFieldId);\n    protocol.writeString(buffer, name_);\n    protocol.writeFieldEnd(buffer);\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  static constexpr int16_t NameFieldId = 1;\n\n  std::string name_;\n};\n\n/**\n * UpgradeReply represents Twitter protocol upgrade responses.\n */\nclass UpgradeReply : public DirectResponse, public ThriftObject {\npublic:\n  UpgradeReply() = default;\n  UpgradeReply(Transport& transport)\n      : thrift_obj_(std::make_unique<ThriftObjectImpl>(transport, protocol_)) {}\n\n  // DirectResponse\n  DirectResponse::ResponseType encode(MessageMetadata& metadata, Protocol&,\n                                      Buffer::Instance& buffer) const override {\n    if (!metadata.hasSequenceId()) {\n      metadata.setSequenceId(0);\n    };\n\n    metadata.setMethodName(TwitterProtocolImpl::upgradeMethodName());\n    metadata.setMessageType(MessageType::Reply);\n\n    // The upgrade response cannot have Twitter protocol headers, so ignore the caller's Protocol.\n    BinaryProtocolImpl protocol;\n    protocol.writeMessageBegin(buffer, metadata);\n\n    // Per the Thrift standard, this is an invalid reply. We should start a reply struct with a\n    // single field of id 0 (0x0B 0x00 0x00) to indicate success, followed by an empty UpgradeReply\n    // struct (0x00), followed by a stop field for the reply struct (0x00). The finagle-twitter\n    // implementation, however, just emits a single stop field.\n    protocol.writeStructBegin(buffer, StructNames::get().upgradeReplyStruct);\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n\n    protocol.writeMessageEnd(buffer);\n\n    return DirectResponse::ResponseType::SuccessReply;\n  }\n\n  // ThriftObject\n  const ThriftFieldPtrList& fields() const override { return thrift_obj_->fields(); }\n  bool onData(Buffer::Instance& buffer) override { return thrift_obj_->onData(buffer); }\n\nprivate:\n  BinaryProtocolImpl protocol_;\n  ThriftObjectPtr thrift_obj_;\n};\n\n/**\n * ConnectionOptions is the Twitter protocol upgrade request. It is an empty struct.\n */\nclass ConnectionOptions : public ThriftStructValueImpl {\npublic:\n  ConnectionOptions() : ThriftStructValueImpl(nullptr) {}\n};\n\n/**\n * RequestContext is a Twitter protocol request context (key/value pair).\n *\n * See https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/thrift/tracing.thrift\n */\nclass RequestContext {\npublic:\n  RequestContext(const std::string& key, const std::string& value) : key_(key), value_(value) {}\n  RequestContext(const ThriftStructValue& value) {\n    for (const auto& field : value.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case 1:\n        key_ = field->getValue().getValueTyped<std::string>();\n        break;\n      case 2:\n        value_ = field->getValue().getValueTyped<std::string>();\n        break;\n      }\n    }\n  }\n\n  void write(Buffer::Instance& buffer) const {\n    Protocol& protocol = headerObjectProtocol();\n    protocol.writeStructBegin(buffer, StructNames::get().requestContextStruct);\n\n    // key\n    protocol.writeFieldBegin(buffer, RequestContextFieldNames::get().keyField, FieldType::String,\n                             KeyFieldId);\n    protocol.writeString(buffer, key_);\n    protocol.writeFieldEnd(buffer);\n\n    // value\n    protocol.writeFieldBegin(buffer, RequestContextFieldNames::get().valueField, FieldType::String,\n                             ValueFieldId);\n    protocol.writeString(buffer, value_);\n    protocol.writeFieldEnd(buffer);\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  static constexpr int16_t KeyFieldId = 1;\n  static constexpr int16_t ValueFieldId = 2;\n\n  std::string key_;\n  std::string value_;\n};\nusing RequestContextList = std::list<RequestContext>;\n\n/**\n * Delegation is Twitter protocol delegation table entry.\n *\n * See https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/thrift/tracing.thrift\n */\nclass Delegation {\npublic:\n  Delegation(const std::string& src, const std::string& dst) : src_(src), dst_(dst) {}\n  Delegation(const ThriftStructValue& value) {\n    for (const auto& field : value.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case SrcFieldId:\n        src_ = field->getValue().getValueTyped<std::string>();\n        break;\n      case DstFieldId:\n        dst_ = field->getValue().getValueTyped<std::string>();\n        break;\n      }\n    }\n  }\n\n  void write(Buffer::Instance& buffer) const {\n    Protocol& protocol = headerObjectProtocol();\n    protocol.writeStructBegin(buffer, StructNames::get().delegationStruct);\n\n    // src\n    protocol.writeFieldBegin(buffer, DelegationFieldNames::get().srcField, FieldType::String,\n                             SrcFieldId);\n    protocol.writeString(buffer, src_);\n    protocol.writeFieldEnd(buffer);\n\n    // dst\n    protocol.writeFieldBegin(buffer, DelegationFieldNames::get().dstField, FieldType::String,\n                             DstFieldId);\n    protocol.writeString(buffer, dst_);\n    protocol.writeFieldEnd(buffer);\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  static constexpr int16_t SrcFieldId = 1;\n  static constexpr int16_t DstFieldId = 2;\n\n  std::string src_;\n  std::string dst_;\n};\nusing DelegationList = std::list<Delegation>;\n\n/**\n * RequestHeader is a Twitter protocol request header, inserted between the transport start and\n * message begin.\n *\n * See https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/thrift/tracing.thrift\n */\nclass RequestHeader {\npublic:\n  RequestHeader(const ThriftObject& header) {\n    for (const auto& field : header.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case TraceIdFieldId:\n        trace_id_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      case SpanIdFieldId:\n        span_id_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      case ParentSpanIdFieldId:\n        parent_span_id_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      // unused: field 4\n      case SampledFieldId:\n        sampled_ = field->getValue().getValueTyped<bool>();\n        break;\n      case ClientIdFieldId:\n        client_id_ = ClientId(field->getValue().getValueTyped<ThriftStructValue>());\n        break;\n      case FlagsFieldId:\n        flags_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      case ContextsFieldId:\n        readContexts(field->getValue().getValueTyped<ThriftListValue>());\n        break;\n      case DestFieldId:\n        dest_ = field->getValue().getValueTyped<std::string>();\n        break;\n      case DelegationsFieldId:\n        readDelegations(field->getValue().getValueTyped<ThriftListValue>());\n        break;\n      case TraceIdHighFieldId:\n        trace_id_high_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      }\n    }\n  }\n\n  RequestHeader(const MessageMetadata& metadata) {\n    if (metadata.traceId()) {\n      trace_id_ = *metadata.traceId();\n    }\n    if (metadata.traceIdHigh()) {\n      trace_id_high_ = *metadata.traceIdHigh();\n    }\n\n    if (metadata.spanId()) {\n      span_id_ = *metadata.spanId();\n    }\n    if (metadata.parentSpanId()) {\n      parent_span_id_ = *metadata.parentSpanId();\n    }\n\n    if (metadata.flags()) {\n      flags_ = *metadata.flags();\n    }\n\n    if (metadata.sampled().has_value()) {\n      sampled_ = metadata.sampled().value();\n    }\n\n    metadata.headers().iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n      absl::string_view key = header.key().getStringView();\n      if (key.empty()) {\n        return Http::HeaderMap::Iterate::Continue;\n      }\n\n      if (key == Headers::get().ClientId.get()) {\n        client_id_ = ClientId(std::string(header.value().getStringView()));\n      } else if (key == Headers::get().Dest.get()) {\n        dest_ = std::string(header.value().getStringView());\n      } else if (key.find(\":d:\") == 0 && key.size() > 3) {\n        delegations_.emplace_back(std::string(key.substr(3)),\n                                  std::string(header.value().getStringView()));\n      } else if (key[0] != ':') {\n        contexts_.emplace_back(std::string(key), std::string(header.value().getStringView()));\n      }\n      return Http::HeaderMap::Iterate::Continue;\n    });\n  }\n\n  void write(Buffer::Instance& buffer) {\n    Protocol& protocol = headerObjectProtocol();\n    protocol.writeStructBegin(buffer, StructNames::get().requestHeaderStruct);\n\n    // trace_id\n    protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().traceIdField, FieldType::I64,\n                             TraceIdFieldId);\n    protocol.writeInt64(buffer, trace_id_);\n    protocol.writeFieldEnd(buffer);\n\n    // span_id\n    protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().spanIdField, FieldType::I64,\n                             SpanIdFieldId);\n    protocol.writeInt64(buffer, span_id_);\n    protocol.writeFieldEnd(buffer);\n\n    // parent_span_id\n    if (parent_span_id_) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().parentSpanIdField,\n                               FieldType::I64, ParentSpanIdFieldId);\n      protocol.writeInt64(buffer, *parent_span_id_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // sampled\n    if (sampled_) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().sampledField, FieldType::Bool,\n                               SampledFieldId);\n      protocol.writeBool(buffer, *sampled_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // client_id\n    if (client_id_) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().clientIdField,\n                               FieldType::Struct, ClientIdFieldId);\n      client_id_->write(buffer);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // flags\n    if (flags_) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().flagsField, FieldType::I64,\n                               FlagsFieldId);\n      protocol.writeInt64(buffer, *flags_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // contexts\n    if (!contexts_.empty()) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().contextsField,\n                               FieldType::List, ContextsFieldId);\n      protocol.writeListBegin(buffer, FieldType::Struct, contexts_.size());\n      for (const auto& context : contexts_) {\n        context.write(buffer);\n      }\n      protocol.writeListEnd(buffer);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // dest\n    if (dest_) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().destField, FieldType::String,\n                               DestFieldId);\n      protocol.writeString(buffer, *dest_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // delegations\n    if (!delegations_.empty()) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().delegationsField,\n                               FieldType::List, DelegationsFieldId);\n      protocol.writeListBegin(buffer, FieldType::Struct, delegations_.size());\n      for (const auto& delegation : delegations_) {\n        delegation.write(buffer);\n      }\n      protocol.writeListEnd(buffer);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // trace_id_high\n    if (trace_id_high_) {\n      protocol.writeFieldBegin(buffer, RequestHeaderFieldNames::get().traceIdHighField,\n                               FieldType::I64, TraceIdHighFieldId);\n      protocol.writeInt64(buffer, *trace_id_high_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  int64_t traceId() const { return trace_id_; }\n  int64_t spanId() const { return span_id_; }\n  absl::optional<int64_t> parentSpanId() const { return parent_span_id_; }\n  absl::optional<bool> sampled() const { return sampled_; }\n  absl::optional<ClientId> clientId() const { return client_id_; }\n  absl::optional<int64_t> flags() const { return flags_; }\n  const RequestContextList& contexts() const { return contexts_; }\n  RequestContextList* contexts() { return &contexts_; }\n  absl::optional<std::string> dest() { return dest_; }\n  const DelegationList& delegations() const { return delegations_; }\n  DelegationList* delegations() { return &delegations_; }\n  absl::optional<int64_t> traceIdHigh() const { return trace_id_high_; }\n\nprivate:\n  static constexpr int16_t TraceIdFieldId = 1;\n  static constexpr int16_t SpanIdFieldId = 2;\n  static constexpr int16_t ParentSpanIdFieldId = 3;\n  static constexpr int16_t SampledFieldId = 5;\n  static constexpr int16_t ClientIdFieldId = 6;\n  static constexpr int16_t FlagsFieldId = 7;\n  static constexpr int16_t ContextsFieldId = 8;\n  static constexpr int16_t DestFieldId = 9;\n  static constexpr int16_t DelegationsFieldId = 10;\n  static constexpr int16_t TraceIdHighFieldId = 11;\n\n  void readContexts(const ThriftListValue& ctxts_list) {\n    contexts_.clear();\n    for (const auto& elem : ctxts_list.elements()) {\n      const ThriftStructValue& ctxt_struct = elem->getValueTyped<ThriftStructValue>();\n      contexts_.emplace_back(ctxt_struct);\n    }\n  }\n\n  void readDelegations(const ThriftListValue& delegations_list) {\n    delegations_.clear();\n    for (const auto& elem : delegations_list.elements()) {\n      const ThriftStructValue& ctxt_struct = elem->getValueTyped<ThriftStructValue>();\n      delegations_.emplace_back(ctxt_struct);\n    }\n  }\n\n  int64_t trace_id_{0};\n  int64_t span_id_{0};\n  absl::optional<int64_t> parent_span_id_;\n  absl::optional<bool> sampled_;\n  absl::optional<ClientId> client_id_;\n  absl::optional<int64_t> flags_;\n  std::list<RequestContext> contexts_;\n  absl::optional<std::string> dest_;\n  DelegationList delegations_;\n  absl::optional<int64_t> trace_id_high_;\n};\n\n/**\n * ResponseHeader is a Twitter protocol response header, inserted between the transport start and\n * message begin.\n *\n * See https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/thrift/tracing.thrift\n */\nclass ResponseHeader {\npublic:\n  ResponseHeader(const ThriftObject& header) {\n    for (const auto& field : header.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case SpansFieldId:\n        readSpans(field->getValue().getValueTyped<ThriftListValue>());\n        break;\n      case ContextsFieldId:\n        readContexts(field->getValue().getValueTyped<ThriftListValue>());\n        break;\n      }\n    }\n  }\n  ResponseHeader(const MessageMetadata& metadata) : spans_(metadata.spans()) {\n    metadata.headers().iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n      absl::string_view key = header.key().getStringView();\n      if (!key.empty() && key[0] != ':') {\n        contexts_.emplace_back(std::string(key), std::string(header.value().getStringView()));\n      }\n      return Http::HeaderMap::Iterate::Continue;\n    });\n  }\n\n  void write(Buffer::Instance& buffer) {\n    Protocol& protocol = headerObjectProtocol();\n    protocol.writeStructBegin(buffer, StructNames::get().responseHeaderStruct);\n\n    // spans\n    if (!spans_.empty()) {\n      protocol.writeFieldBegin(buffer, ResponseHeaderFieldNames::get().spansField, FieldType::List,\n                               SpansFieldId);\n      protocol.writeListBegin(buffer, FieldType::Struct, spans_.size());\n      for (const auto& span : spans_) {\n        writeSpan(buffer, span);\n      }\n      protocol.writeListEnd(buffer);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // contexts\n    if (!contexts_.empty()) {\n      protocol.writeFieldBegin(buffer, ResponseHeaderFieldNames::get().contextsField,\n                               FieldType::List, ContextsFieldId);\n      protocol.writeListBegin(buffer, FieldType::Struct, contexts_.size());\n      for (const auto& context : contexts_) {\n        context.write(buffer);\n      }\n      protocol.writeListEnd(buffer);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  SpanList& spans() { return spans_; }\n  RequestContextList& contexts() { return contexts_; }\n\nprivate:\n  static constexpr int16_t SpansFieldId = 1;\n  static constexpr int16_t ContextsFieldId = 2;\n\n  static constexpr int16_t SpanTraceIdFieldId = 1;\n  static constexpr int16_t SpanNameFieldId = 3;\n  static constexpr int16_t SpanIdFieldId = 4;\n  static constexpr int16_t SpanParentIdFieldId = 5;\n  static constexpr int16_t SpanAnnotationsFieldId = 6;\n  static constexpr int16_t SpanBinaryAnnotationsFieldId = 8;\n  static constexpr int16_t SpanDebugFieldId = 9;\n\n  static constexpr int16_t AnnotationTimestampFieldId = 1;\n  static constexpr int16_t AnnotationValueFieldId = 2;\n  static constexpr int16_t AnnotationHostFieldId = 3;\n\n  static constexpr int16_t BinaryAnnotationKeyFieldId = 1;\n  static constexpr int16_t BinaryAnnotationValueFieldId = 2;\n  static constexpr int16_t BinaryAnnotationAnnotationTypeFieldId = 3;\n  static constexpr int16_t BinaryAnnotationHostFieldId = 4;\n\n  static constexpr int16_t EndpointIpv4FieldId = 1;\n  static constexpr int16_t EndpointPortFieldId = 2;\n  static constexpr int16_t EndpointServiceNameFieldId = 3;\n\n  void readSpans(const ThriftListValue& spans_list) {\n    spans_.clear();\n    for (const auto& elem : spans_list.elements()) {\n      spans_.emplace_back();\n      readSpan(spans_.back(), elem->getValueTyped<ThriftStructValue>());\n    }\n  }\n\n  void readSpan(Span& span, const ThriftStructValue& thrift_struct) {\n    for (const auto& field : thrift_struct.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case SpanTraceIdFieldId:\n        span.trace_id_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      // field 2: unused\n      case SpanNameFieldId:\n        span.name_ = field->getValue().getValueTyped<std::string>();\n        break;\n      case SpanIdFieldId:\n        span.span_id_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      case SpanParentIdFieldId:\n        span.parent_span_id_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      case SpanAnnotationsFieldId:\n        readAnnotations(span.annotations_, field->getValue().getValueTyped<ThriftListValue>());\n        break;\n      // field 7: unused\n      case SpanBinaryAnnotationsFieldId:\n        readBinaryAnnotations(span.binary_annotations_,\n                              field->getValue().getValueTyped<ThriftListValue>());\n        break;\n      case SpanDebugFieldId:\n        span.debug_ = field->getValue().getValueTyped<bool>();\n        break;\n      }\n    }\n  }\n\n  void writeSpan(Buffer::Instance& buffer, const Span& span) {\n    Protocol& protocol = headerObjectProtocol();\n\n    protocol.writeStructBegin(buffer, StructNames::get().spanStruct);\n    // trace_id\n    protocol.writeFieldBegin(buffer, SpanFieldNames::get().traceIdField, FieldType::I64,\n                             SpanTraceIdFieldId);\n    protocol.writeInt64(buffer, span.trace_id_);\n    protocol.writeFieldEnd(buffer);\n\n    // name\n    protocol.writeFieldBegin(buffer, SpanFieldNames::get().nameField, FieldType::String,\n                             SpanNameFieldId);\n    protocol.writeString(buffer, span.name_);\n    protocol.writeFieldEnd(buffer);\n\n    // id\n    protocol.writeFieldBegin(buffer, SpanFieldNames::get().idField, FieldType::I64, SpanIdFieldId);\n    protocol.writeInt64(buffer, span.span_id_);\n    protocol.writeFieldEnd(buffer);\n\n    // parent_id\n    if (span.parent_span_id_) {\n      protocol.writeFieldBegin(buffer, SpanFieldNames::get().parentIdField, FieldType::I64,\n                               SpanParentIdFieldId);\n      protocol.writeInt64(buffer, *span.parent_span_id_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    // annotations\n    protocol.writeFieldBegin(buffer, SpanFieldNames::get().annotationsField, FieldType::List,\n                             SpanAnnotationsFieldId);\n    protocol.writeListBegin(buffer, FieldType::Struct, span.annotations_.size());\n    for (const auto& annotation : span.annotations_) {\n      writeAnnotation(buffer, annotation);\n    }\n    protocol.writeListEnd(buffer);\n    protocol.writeFieldEnd(buffer);\n\n    // binary_annotations\n    protocol.writeFieldBegin(buffer, SpanFieldNames::get().binaryAnnotationsField, FieldType::List,\n                             SpanBinaryAnnotationsFieldId);\n    protocol.writeListBegin(buffer, FieldType::Struct, span.binary_annotations_.size());\n    for (const auto& annotation : span.binary_annotations_) {\n      writeBinaryAnnotation(buffer, annotation);\n    }\n    protocol.writeListEnd(buffer);\n    protocol.writeFieldEnd(buffer);\n\n    // debug\n    protocol.writeFieldBegin(buffer, SpanFieldNames::get().debugField, FieldType::Bool,\n                             SpanDebugFieldId);\n    protocol.writeBool(buffer, span.debug_);\n    protocol.writeFieldEnd(buffer);\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  void readAnnotations(AnnotationList& annotations, const ThriftListValue& thrift_list) {\n    annotations.clear();\n    for (const auto& elem : thrift_list.elements()) {\n      annotations.emplace_back();\n      readAnnotation(annotations.back(), elem->getValueTyped<ThriftStructValue>());\n    }\n  }\n\n  void readAnnotation(Annotation& annotation, const ThriftStructValue& thrift_struct) {\n    for (const auto& field : thrift_struct.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case AnnotationTimestampFieldId:\n        annotation.timestamp_ = field->getValue().getValueTyped<int64_t>();\n        break;\n      case AnnotationValueFieldId:\n        annotation.value_ = field->getValue().getValueTyped<std::string>();\n        break;\n      case AnnotationHostFieldId:\n        annotation.host_.emplace();\n        readEndpoint(annotation.host_.value(),\n                     field->getValue().getValueTyped<ThriftStructValue>());\n        break;\n      }\n    }\n  }\n\n  void writeAnnotation(Buffer::Instance& buffer, const Annotation& annotation) {\n    Protocol& protocol = headerObjectProtocol();\n\n    protocol.writeStructBegin(buffer, StructNames::get().annotationStruct);\n\n    // timestamp\n    protocol.writeFieldBegin(buffer, AnnotationFieldNames::get().timestampField, FieldType::I64,\n                             AnnotationTimestampFieldId);\n    protocol.writeInt64(buffer, annotation.timestamp_);\n    protocol.writeFieldEnd(buffer);\n\n    // value\n    protocol.writeFieldBegin(buffer, AnnotationFieldNames::get().valueField, FieldType::String,\n                             AnnotationValueFieldId);\n    protocol.writeString(buffer, annotation.value_);\n    protocol.writeFieldEnd(buffer);\n\n    // endpoint\n    if (annotation.host_) {\n      protocol.writeFieldBegin(buffer, AnnotationFieldNames::get().hostField, FieldType::Struct,\n                               AnnotationHostFieldId);\n      writeEndpoint(buffer, *annotation.host_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  void readBinaryAnnotations(BinaryAnnotationList& annotations,\n                             const ThriftListValue& thrift_list) {\n    annotations.clear();\n    for (const auto& elem : thrift_list.elements()) {\n      annotations.emplace_back();\n      readBinaryAnnotation(annotations.back(), elem->getValueTyped<ThriftStructValue>());\n    }\n  }\n\n  void readBinaryAnnotation(BinaryAnnotation& annotation, const ThriftStructValue& thrift_struct) {\n    for (const auto& field : thrift_struct.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case BinaryAnnotationKeyFieldId:\n        annotation.key_ = field->getValue().getValueTyped<std::string>();\n        break;\n      case BinaryAnnotationValueFieldId:\n        annotation.value_ = field->getValue().getValueTyped<std::string>();\n        break;\n      case BinaryAnnotationAnnotationTypeFieldId:\n        annotation.annotation_type_ =\n            static_cast<AnnotationType>(field->getValue().getValueTyped<int32_t>());\n        break;\n      case BinaryAnnotationHostFieldId:\n        annotation.host_.emplace();\n        readEndpoint(annotation.host_.value(),\n                     field->getValue().getValueTyped<ThriftStructValue>());\n        break;\n      }\n    }\n  }\n\n  void writeBinaryAnnotation(Buffer::Instance& buffer, const BinaryAnnotation& annotation) {\n    Protocol& protocol = headerObjectProtocol();\n\n    protocol.writeStructBegin(buffer, StructNames::get().binaryAnnotationStruct);\n\n    // key\n    protocol.writeFieldBegin(buffer, BinaryAnnotationFieldNames::get().keyField, FieldType::String,\n                             BinaryAnnotationKeyFieldId);\n    protocol.writeString(buffer, annotation.key_);\n    protocol.writeFieldEnd(buffer);\n\n    // value\n    protocol.writeFieldBegin(buffer, BinaryAnnotationFieldNames::get().valueField,\n                             FieldType::String, BinaryAnnotationValueFieldId);\n    protocol.writeString(buffer, annotation.value_);\n    protocol.writeFieldEnd(buffer);\n\n    // annotation_type\n    protocol.writeFieldBegin(buffer, BinaryAnnotationFieldNames::get().annotationTypeField,\n                             FieldType::I32, BinaryAnnotationAnnotationTypeFieldId);\n    protocol.writeInt32(buffer, static_cast<int32_t>(annotation.annotation_type_));\n    protocol.writeFieldEnd(buffer);\n\n    // endpoint\n    if (annotation.host_) {\n      protocol.writeFieldBegin(buffer, BinaryAnnotationFieldNames::get().hostField,\n                               FieldType::Struct, BinaryAnnotationHostFieldId);\n      writeEndpoint(buffer, *annotation.host_);\n      protocol.writeFieldEnd(buffer);\n    }\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  void readEndpoint(Endpoint& endpoint, const ThriftStructValue& thrift_struct) {\n    for (const auto& field : thrift_struct.fields()) {\n      // Unknown field id are ignored, to allow for future additional fields.\n      switch (field->fieldId()) {\n      case 1:\n        endpoint.ipv4_ = field->getValue().getValueTyped<int32_t>();\n        break;\n      case 2:\n        endpoint.port_ = field->getValue().getValueTyped<int16_t>();\n        break;\n      case 3:\n        endpoint.service_name_ = field->getValue().getValueTyped<std::string>();\n        break;\n      }\n    }\n  }\n\n  void writeEndpoint(Buffer::Instance& buffer, const Endpoint& endpoint) {\n    Protocol& protocol = headerObjectProtocol();\n\n    protocol.writeStructBegin(buffer, StructNames::get().endpointStruct);\n\n    // ipv4\n    protocol.writeFieldBegin(buffer, EndpointFieldNames::get().ipv4Field, FieldType::I32,\n                             EndpointIpv4FieldId);\n    protocol.writeInt32(buffer, endpoint.ipv4_);\n    protocol.writeFieldEnd(buffer);\n\n    // port\n    protocol.writeFieldBegin(buffer, EndpointFieldNames::get().portField, FieldType::I16,\n                             EndpointPortFieldId);\n    protocol.writeInt16(buffer, endpoint.port_);\n    protocol.writeFieldEnd(buffer);\n\n    // service_name\n    protocol.writeFieldBegin(buffer, EndpointFieldNames::get().serviceNameField, FieldType::String,\n                             EndpointServiceNameFieldId);\n    protocol.writeString(buffer, endpoint.service_name_);\n    protocol.writeFieldEnd(buffer);\n\n    protocol.writeFieldBegin(buffer, emptyString(), FieldType::Stop, 0);\n    protocol.writeStructEnd(buffer);\n  }\n\n  void readContexts(const ThriftListValue& ctxts_list) {\n    contexts_.clear();\n    for (const auto& elem : ctxts_list.elements()) {\n      const ThriftStructValue& ctxt_struct = elem->getValueTyped<ThriftStructValue>();\n      contexts_.emplace_back(ctxt_struct);\n    }\n  }\n\n  std::list<Span> spans_;\n  std::list<RequestContext> contexts_;\n};\n\n} // namespace\n\nbool TwitterProtocolImpl::readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) {\n  // If we see a normal binary protocol message with the improbable name on the first request\n  // or response, we're upgrading to the TTwitter protocol.\n  if (!upgraded_.has_value()) {\n    if (!BinaryProtocolImpl::readMessageBegin(buffer, metadata)) {\n      // Need more data.\n      return false;\n    }\n\n    ASSERT(metadata.hasMethodName());\n    if (metadata.methodName() == upgradeMethodName()) {\n      metadata.setProtocolUpgradeMessage(true);\n      return true;\n    }\n\n    upgraded_ = false;\n    return true;\n  }\n\n  if (!upgraded_.value()) {\n    // Fall back to regular binary protocol with no header object.\n    return BinaryProtocolImpl::readMessageBegin(buffer, metadata);\n  }\n\n  // Upgraded protocol: consume RequestHeader or ResponseHeader.\n  if (!header_complete_) {\n    if (!header_) {\n      header_ = std::make_unique<ThriftObjectImpl>(headerObjectTransport(), headerObjectProtocol());\n    }\n    header_complete_ = header_->onData(buffer);\n    if (!header_complete_) {\n      // Need more data.\n      return false;\n    }\n  }\n\n  if (!BinaryProtocolImpl::readMessageBegin(buffer, metadata)) {\n    // Need more data.\n    return false;\n  }\n\n  // Now that we know whether this is a request or a response, handle the header.\n  ASSERT(metadata.hasMessageType());\n  switch (metadata.messageType()) {\n  case MessageType::Call:\n  case MessageType::Oneway:\n    updateMetadataWithRequestHeader(*header_, metadata);\n    break;\n  case MessageType::Reply:\n  case MessageType::Exception:\n    updateMetadataWithResponseHeader(*header_, metadata);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  header_complete_ = false;\n  header_.reset();\n  return true;\n}\n\nvoid TwitterProtocolImpl::writeMessageBegin(Buffer::Instance& buffer,\n                                            const MessageMetadata& metadata) {\n  if (upgraded_.value_or(false)) {\n    switch (metadata.messageType()) {\n    case MessageType::Call:\n    case MessageType::Oneway:\n      writeRequestHeader(buffer, metadata);\n      break;\n    case MessageType::Reply:\n    case MessageType::Exception:\n      writeResponseHeader(buffer, metadata);\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  BinaryProtocolImpl::writeMessageBegin(buffer, metadata);\n}\n\nvoid TwitterProtocolImpl::updateMetadataWithRequestHeader(const ThriftObject& header_object,\n                                                          MessageMetadata& metadata) {\n  RequestHeader req_header(header_object);\n\n  Http::HeaderMap& headers = metadata.headers();\n\n  metadata.setTraceId(req_header.traceId());\n  metadata.setSpanId(req_header.spanId());\n  if (req_header.parentSpanId()) {\n    metadata.setParentSpanId(*req_header.parentSpanId());\n  }\n  if (req_header.sampled()) {\n    metadata.setSampled(*req_header.sampled());\n  }\n  if (req_header.clientId()) {\n    headers.addReferenceKey(Headers::get().ClientId, req_header.clientId()->name_);\n  }\n  if (req_header.flags()) {\n    metadata.setFlags(*req_header.flags());\n  }\n  for (const auto& context : *req_header.contexts()) {\n    // LowerCaseString doesn't allow '\\0', '\\n', and '\\r'.\n    const std::string key =\n        absl::StrReplaceAll(context.key_, {{std::string(1, '\\0'), \"\"}, {\"\\n\", \"\"}, {\"\\r\", \"\"}});\n    headers.addCopy(Http::LowerCaseString{key}, context.value_);\n  }\n  if (req_header.dest()) {\n    headers.addReferenceKey(Headers::get().Dest, *req_header.dest());\n  }\n  // TODO(zuercher): Delegations are stored as headers for now. Consider passing them as simple\n  // objects\n  for (const auto& delegation : *req_header.delegations()) {\n    // LowerCaseString doesn't allow '\\0', '\\n', and '\\r'.\n    const std::string src =\n        absl::StrReplaceAll(delegation.src_, {{std::string(1, '\\0'), \"\"}, {\"\\n\", \"\"}, {\"\\r\", \"\"}});\n    const std::string key = fmt::format(\":d:{}\", src);\n    headers.addCopy(Http::LowerCaseString{key}, delegation.dst_);\n  }\n  if (req_header.traceIdHigh()) {\n    metadata.setTraceIdHigh(*req_header.traceIdHigh());\n  }\n}\n\nvoid TwitterProtocolImpl::writeRequestHeader(Buffer::Instance& buffer,\n                                             const MessageMetadata& metadata) {\n  RequestHeader req_header(metadata);\n  req_header.write(buffer);\n}\n\nvoid TwitterProtocolImpl::updateMetadataWithResponseHeader(const ThriftObject& header_object,\n                                                           MessageMetadata& metadata) {\n  ResponseHeader resp_header(header_object);\n\n  Http::HeaderMap& headers = metadata.headers();\n  for (const auto& context : resp_header.contexts()) {\n    // LowerCaseString doesn't allow '\\0', '\\n', and '\\r'.\n    const std::string key =\n        absl::StrReplaceAll(context.key_, {{std::string(1, '\\0'), \"\"}, {\"\\n\", \"\"}, {\"\\r\", \"\"}});\n    headers.addCopy(Http::LowerCaseString(key), context.value_);\n  }\n\n  SpanList& spans = resp_header.spans();\n  std::copy(spans.begin(), spans.end(), std::back_inserter(metadata.mutableSpans()));\n}\n\nvoid TwitterProtocolImpl::writeResponseHeader(Buffer::Instance& buffer,\n                                              const MessageMetadata& metadata) {\n  ResponseHeader resp_header(metadata);\n  resp_header.write(buffer);\n}\n\nThriftObjectPtr TwitterProtocolImpl::newHeader() {\n  return std::make_unique<ThriftObjectImpl>(headerObjectTransport(), headerObjectProtocol());\n}\n\nDecoderEventHandlerSharedPtr TwitterProtocolImpl::upgradeRequestDecoder() {\n  return std::make_shared<ConnectionOptions>();\n}\n\nDirectResponsePtr TwitterProtocolImpl::upgradeResponse(const DecoderEventHandler& decoder) {\n  ASSERT(dynamic_cast<const ConnectionOptions*>(&decoder) != nullptr);\n  upgraded_ = true;\n  return std::make_unique<UpgradeReply>();\n};\n\nThriftObjectPtr TwitterProtocolImpl::attemptUpgrade(Transport& transport,\n                                                    ThriftConnectionState& state,\n                                                    Buffer::Instance& buffer) {\n  // Check if we've already attempted to upgrade this connection.\n  if (state.upgradeAttempted()) {\n    upgraded_ = state.isUpgraded();\n    return nullptr;\n  }\n\n  // Write upgrade request to buffer and return an object that can decode the response.\n  MessageMetadata metadata;\n  metadata.setMethodName(upgradeMethodName());\n  metadata.setSequenceId(0);\n  metadata.setMessageType(MessageType::Call);\n\n  Buffer::OwnedImpl message;\n  BinaryProtocolImpl::writeMessageBegin(message, metadata);\n  writeStructBegin(message, StructNames::get().connectionOptionsStruct);\n  writeFieldBegin(message, emptyString(), FieldType::Stop, 0);\n  writeStructEnd(message);\n  writeMessageEnd(message);\n  transport.encodeFrame(buffer, metadata, message);\n\n  return std::make_unique<UpgradeReply>(transport);\n}\n\nvoid TwitterProtocolImpl::completeUpgrade(ThriftConnectionState& state, ThriftObject& response) {\n  UpgradeReply& upgrade_reply = dynamic_cast<UpgradeReply&>(response);\n\n  if (upgrade_reply.fields().empty()) {\n    state.markUpgraded();\n    upgraded_ = true;\n  } else {\n    state.markUpgradeFailed();\n    upgraded_ = false;\n  }\n}\n\nbool TwitterProtocolImpl::isUpgradePrefix(Buffer::Instance& buffer) {\n  // 12 bytes is the minimum length for the start of a binary protocol message.\n  ASSERT(buffer.length() >= 12);\n\n  // Must appear to be binary protocol.\n  if (!isMagic(buffer.peekBEInt<uint16_t>())) {\n    return false;\n  }\n\n  // Must have correct length message name length.\n  if (buffer.peekBEInt<uint32_t>(4) != upgradeMethodName().length()) {\n    return false;\n  }\n\n  // Given the fixed 8 bytes of message begin before the name, calculate how many bytes of message\n  // name are available in the buffer.\n  uint32_t available_len = static_cast<uint32_t>(\n      std::min(static_cast<uint64_t>(upgradeMethodName().length()), buffer.length() - 8));\n  ASSERT(available_len <= upgradeMethodName().length());\n  ASSERT(buffer.length() >= available_len + 8);\n\n  // Extract as much of the name as is available.\n  absl::string_view available_name(\n      static_cast<const char*>(buffer.linearize(available_len + 8)) + 8, available_len);\n\n  absl::string_view full_name(upgradeMethodName());\n\n  return full_name.compare(0, available_len, available_name) == 0;\n}\n\nclass TwitterProtocolConfigFactory : public ProtocolFactoryBase<TwitterProtocolImpl> {\npublic:\n  TwitterProtocolConfigFactory() : ProtocolFactoryBase(ProtocolNames::get().TWITTER) {}\n};\n\n/**\n * Static registration for the Twitter protocol. @see RegisterFactory.\n */\nREGISTER_FACTORY(TwitterProtocolConfigFactory, NamedProtocolConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/pure.h\"\n\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * TwitterProtocolImpl implements the Twitter-upgraded (AKA \"TTwitter\") Thrift protocol.\n * See https://twitter.github.io/finagle/docs/com/twitter/finagle/Thrift$ and\n * https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/thrift/tracing.thrift\n */\nclass TwitterProtocolImpl : public BinaryProtocolImpl {\npublic:\n  // Protocol\n  const std::string& name() const override { return ProtocolNames::get().TWITTER; }\n  ProtocolType type() const override { return ProtocolType::Twitter; }\n  bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override;\n  void writeMessageBegin(Buffer::Instance& buffer, const MessageMetadata& metadata) override;\n  bool supportsUpgrade() override { return true; }\n  DecoderEventHandlerSharedPtr upgradeRequestDecoder() override;\n  DirectResponsePtr upgradeResponse(const DecoderEventHandler& decoder) override;\n  ThriftObjectPtr attemptUpgrade(Transport& transport, ThriftConnectionState& state,\n                                 Buffer::Instance& buffer) override;\n  void completeUpgrade(ThriftConnectionState& state, ThriftObject& response) override;\n\n  /**\n   * @return true if the protocol upgrade was success, false if not, no value if the result is not\n   *         yet known\n   */\n  absl::optional<bool> upgraded() { return upgraded_; }\n\n  /**\n   * @return std::string containing the \"improbably-named method\" used for Twitter protocol upgrade.\n   */\n  static const std::string& upgradeMethodName() {\n    // https://github.com/twitter/finagle/blob/master/finagle-thrift/src/main/scala/com/twitter/finagle/thrift/ThriftTracing.scala\n    CONSTRUCT_ON_FIRST_USE(std::string, \"__can__finagle__trace__v3__\");\n  }\n\n  /**\n   * @return true if the buffer (minimum 12 bytes) appears to start with a Twitter protocol\n   *         upgrade message, false otherwise\n   */\n  static bool isUpgradePrefix(Buffer::Instance& buffer);\n\nprotected:\n  static void updateMetadataWithRequestHeader(const ThriftObject& header_object,\n                                              MessageMetadata& metadata);\n  static void updateMetadataWithResponseHeader(const ThriftObject& header_object,\n                                               MessageMetadata& metadata);\n  static void writeRequestHeader(Buffer::Instance& buffer, const MessageMetadata& metadata);\n  static void writeResponseHeader(Buffer::Instance& buffer, const MessageMetadata& metadata);\n  static ThriftObjectPtr newHeader();\n\nprivate:\n  ThriftObjectPtr header_;\n  bool header_complete_{false};\n  absl::optional<bool> upgraded_;\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/unframed_transport_impl.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/unframed_transport_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass UnframedTransportConfigFactory : public TransportFactoryBase<UnframedTransportImpl> {\npublic:\n  UnframedTransportConfigFactory() : TransportFactoryBase(TransportNames::get().UNFRAMED) {}\n};\n\n/**\n * Static registration for the unframed transport. @see RegisterFactory.\n */\nREGISTER_FACTORY(UnframedTransportConfigFactory, NamedTransportConfigFactory);\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/thrift_proxy/unframed_transport_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * UnframedTransportImpl implements the Thrift Unframed transport.\n * See https://github.com/apache/thrift/blob/master/doc/specs/thrift-rpc.md\n */\nclass UnframedTransportImpl : public Transport {\npublic:\n  UnframedTransportImpl() = default;\n\n  // Transport\n  const std::string& name() const override { return TransportNames::get().UNFRAMED; }\n  TransportType type() const override { return TransportType::Unframed; }\n  bool decodeFrameStart(Buffer::Instance& buffer, MessageMetadata& metadata) override {\n    UNREFERENCED_PARAMETER(metadata);\n\n    // Don't start a frame if there's no data at all.\n    return buffer.length() > 0;\n  }\n  bool decodeFrameEnd(Buffer::Instance&) override { return true; }\n  void encodeFrame(Buffer::Instance& buffer, const MessageMetadata& metadata,\n                   Buffer::Instance& message) override {\n    UNREFERENCED_PARAMETER(metadata);\n    buffer.move(message);\n  }\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# Public docs: docs/root/configuration/network_filters/wasm_filter.rst\n\nenvoy_cc_library(\n    name = \"wasm_filter_lib\",\n    srcs = [\"wasm_filter.cc\"],\n    hdrs = [\"wasm_filter.h\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"@envoy_api//envoy/extensions/filters/network/wasm/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"unknown\",\n    status = \"alpha\",\n    deps = [\n        \":wasm_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/wasm/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/wasm/config.cc",
    "content": "#include \"extensions/filters/network/wasm/config.h\"\n\n#include \"envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/datasource.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/filters/network/wasm/wasm_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Wasm {\n\nNetwork::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config,\n    Server::Configuration::FactoryContext& context) {\n  auto filter_config = std::make_shared<FilterConfig>(proto_config, context);\n  return [filter_config](Network::FilterManager& filter_manager) -> void {\n    auto filter = filter_config->createFilter();\n    if (filter) {\n      filter_manager.addFilter(filter);\n    } // else fail open\n  };\n}\n\n/**\n * Static registration for the Wasm filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(WasmFilterConfig, Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace Wasm\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/wasm/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/wasm/v3/wasm.pb.h\"\n#include \"envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Wasm {\n\n/**\n * Config registration for the Wasm filter. @see NamedNetworkFilterConfigFactory.\n */\nclass WasmFilterConfig\n    : public Common::FactoryBase<envoy::extensions::filters::network::wasm::v3::Wasm> {\npublic:\n  WasmFilterConfig() : FactoryBase(NetworkFilterNames::get().Wasm) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace Wasm\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/wasm/wasm_filter.cc",
    "content": "#include \"extensions/filters/network/wasm/wasm_filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Wasm {\n\nFilterConfig::FilterConfig(const envoy::extensions::filters::network::wasm::v3::Wasm& config,\n                           Server::Configuration::FactoryContext& context)\n    : tls_slot_(context.threadLocal().allocateSlot()) {\n  plugin_ = std::make_shared<Common::Wasm::Plugin>(\n      config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(),\n      config.config().vm_config().runtime(),\n      Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(),\n      context.direction(), context.localInfo(), &context.listenerMetadata());\n\n  auto plugin = plugin_;\n  auto callback = [plugin, this](Common::Wasm::WasmHandleSharedPtr base_wasm) {\n    // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call.\n    tls_slot_->set(\n        [base_wasm,\n         plugin](Event::Dispatcher& dispatcher) -> std::shared_ptr<ThreadLocal::ThreadLocalObject> {\n          if (!base_wasm) {\n            return nullptr;\n          }\n          return std::static_pointer_cast<ThreadLocal::ThreadLocalObject>(\n              Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher));\n        });\n  };\n\n  if (!Common::Wasm::createWasm(\n          config.config().vm_config(), plugin_, context.scope().createScope(\"\"),\n          context.clusterManager(), context.initManager(), context.dispatcher(), context.api(),\n          context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) {\n    throw Common::Wasm::WasmException(\n        fmt::format(\"Unable to create Wasm network filter {}\", plugin->name_));\n  }\n}\n\n} // namespace Wasm\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/wasm/wasm_filter.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Wasm {\n\nusing Envoy::Extensions::Common::Wasm::Context;\nusing Envoy::Extensions::Common::Wasm::Wasm;\nusing Envoy::Extensions::Common::Wasm::WasmHandle;\n\nclass FilterConfig : Logger::Loggable<Logger::Id::wasm> {\npublic:\n  FilterConfig(const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config,\n               Server::Configuration::FactoryContext& context);\n\n  std::shared_ptr<Context> createFilter() {\n    Wasm* wasm = nullptr;\n    if (tls_slot_->get()) {\n      wasm = tls_slot_->getTyped<WasmHandle>().wasm().get();\n    }\n    if (plugin_->fail_open_ && (!wasm || wasm->isFailed())) {\n      return nullptr;\n    }\n    if (wasm && !root_context_id_) {\n      root_context_id_ = wasm->getRootContext(plugin_->root_id_)->id();\n    }\n    return std::make_shared<Context>(wasm, root_context_id_, plugin_);\n  }\n  Envoy::Extensions::Common::Wasm::Wasm* wasm() {\n    return tls_slot_->getTyped<WasmHandle>().wasm().get();\n  }\n\nprivate:\n  uint32_t root_context_id_{0};\n  Envoy::Extensions::Common::Wasm::PluginSharedPtr plugin_;\n  ThreadLocal::SlotPtr tls_slot_;\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_;\n};\n\ntypedef std::shared_ptr<FilterConfig> FilterConfigSharedPtr;\n\n} // namespace Wasm\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/well_known_names.h",
    "content": "#pragma once\n\n#include \"common/config/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\n\n/**\n * Well-known network filter names.\n * NOTE: New filters should use the well known name: envoy.filters.network.name.\n */\nclass NetworkFilterNameValues {\npublic:\n  // Client ssl auth filter\n  const std::string ClientSslAuth = \"envoy.filters.network.client_ssl_auth\";\n  // Echo filter\n  const std::string Echo = \"envoy.filters.network.echo\";\n  // Direct response filter\n  const std::string DirectResponse = \"envoy.filters.network.direct_response\";\n  // RocketMQ proxy filter\n  const std::string RocketmqProxy = \"envoy.filters.network.rocketmq_proxy\";\n  // Dubbo proxy filter\n  const std::string DubboProxy = \"envoy.filters.network.dubbo_proxy\";\n  // HTTP connection manager filter\n  const std::string HttpConnectionManager = \"envoy.filters.network.http_connection_manager\";\n  // Local rate limit filter\n  const std::string LocalRateLimit = \"envoy.filters.network.local_ratelimit\";\n  // Mongo proxy filter\n  const std::string MongoProxy = \"envoy.filters.network.mongo_proxy\";\n  // MySQL proxy filter\n  const std::string MySQLProxy = \"envoy.filters.network.mysql_proxy\";\n  // Postgres proxy filter\n  const std::string PostgresProxy = \"envoy.filters.network.postgres_proxy\";\n  // Rate limit filter\n  const std::string RateLimit = \"envoy.filters.network.ratelimit\";\n  // Redis proxy filter\n  const std::string RedisProxy = \"envoy.filters.network.redis_proxy\";\n  // TCP proxy filter\n  const std::string TcpProxy = \"envoy.filters.network.tcp_proxy\";\n  // Authorization filter\n  const std::string ExtAuthorization = \"envoy.filters.network.ext_authz\";\n  // Kafka Broker filter\n  const std::string KafkaBroker = \"envoy.filters.network.kafka_broker\";\n  // Thrift proxy filter\n  const std::string ThriftProxy = \"envoy.filters.network.thrift_proxy\";\n  // Role based access control filter\n  const std::string Rbac = \"envoy.filters.network.rbac\";\n  // SNI Cluster filter\n  const std::string SniCluster = \"envoy.filters.network.sni_cluster\";\n  // SNI Dynamic forward proxy filter\n  const std::string SniDynamicForwardProxy = \"envoy.filters.network.sni_dynamic_forward_proxy\";\n  // ZooKeeper proxy filter\n  const std::string ZooKeeperProxy = \"envoy.filters.network.zookeeper_proxy\";\n  // WebAssembly filter\n  const std::string Wasm = \"envoy.filters.network.wasm\";\n};\n\nusing NetworkFilterNames = ConstSingleton<NetworkFilterNameValues>;\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# ZooKeeper proxy L7 network filter.\n# Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"proxy_lib\",\n    srcs = [\n        \"decoder.cc\",\n        \"filter.cc\",\n        \"utils.cc\",\n    ],\n    hdrs = [\n        \"decoder.h\",\n        \"filter.h\",\n        \"utils.h\",\n    ],\n    deps = [\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":proxy_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/config.cc",
    "content": "#include \"extensions/filters/network/zookeeper_proxy/config.h\"\n\n#include <string>\n\n#include \"envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/zookeeper_proxy/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\n/**\n * Config registration for the ZooKeeper proxy filter. @see NamedNetworkFilterConfigFactory.\n */\nNetwork::FilterFactoryCb ZooKeeperConfigFactory::createFilterFactoryFromProtoTyped(\n    const envoy::extensions::filters::network::zookeeper_proxy::v3::ZooKeeperProxy& proto_config,\n    Server::Configuration::FactoryContext& context) {\n\n  ASSERT(!proto_config.stat_prefix().empty());\n\n  const std::string stat_prefix = fmt::format(\"{}.zookeeper\", proto_config.stat_prefix());\n  const uint32_t max_packet_bytes =\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, max_packet_bytes, 1024 * 1024);\n\n  ZooKeeperFilterConfigSharedPtr filter_config(\n      std::make_shared<ZooKeeperFilterConfig>(stat_prefix, max_packet_bytes, context.scope()));\n  auto& time_source = context.dispatcher().timeSource();\n\n  return [filter_config, &time_source](Network::FilterManager& filter_manager) -> void {\n    filter_manager.addFilter(std::make_shared<ZooKeeperFilter>(filter_config, time_source));\n  };\n}\n\n/**\n * Static registration for the ZooKeeper proxy filter. @see RegisterFactory.\n */\nREGISTER_FACTORY(ZooKeeperConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory);\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n#include \"extensions/filters/network/zookeeper_proxy/filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\n/**\n * Config registration for the ZooKeeper proxy filter.\n */\nclass ZooKeeperConfigFactory\n    : public Common::FactoryBase<\n          envoy::extensions::filters::network::zookeeper_proxy::v3::ZooKeeperProxy> {\npublic:\n  ZooKeeperConfigFactory() : FactoryBase(NetworkFilterNames::get().ZooKeeperProxy) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::zookeeper_proxy::v3::ZooKeeperProxy& proto_config,\n      Server::Configuration::FactoryContext& context) override;\n};\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/decoder.cc",
    "content": "#include \"extensions/filters/network/zookeeper_proxy/decoder.h\"\n\n#include <string>\n\n#include \"common/common/enum_to_int.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\nconstexpr uint32_t BOOL_LENGTH = 1;\nconstexpr uint32_t INT_LENGTH = 4;\nconstexpr uint32_t LONG_LENGTH = 8;\nconstexpr uint32_t XID_LENGTH = 4;\nconstexpr uint32_t OPCODE_LENGTH = 4;\nconstexpr uint32_t ZXID_LENGTH = 8;\nconstexpr uint32_t TIMEOUT_LENGTH = 4;\nconstexpr uint32_t SESSION_LENGTH = 8;\nconstexpr uint32_t MULTI_HEADER_LENGTH = 9;\nconstexpr uint32_t PROTOCOL_VERSION_LENGTH = 4;\nconstexpr uint32_t SERVER_HEADER_LENGTH = 16;\n\nconst char* createFlagsToString(CreateFlags flags) {\n  switch (flags) {\n  case CreateFlags::Persistent:\n    return \"persistent\";\n  case CreateFlags::PersistentSequential:\n    return \"persistent_sequential\";\n  case CreateFlags::Ephemeral:\n    return \"ephemeral\";\n  case CreateFlags::EphemeralSequential:\n    return \"ephemeral_sequential\";\n  case CreateFlags::Container:\n    return \"container\";\n  case CreateFlags::PersistentWithTtl:\n    return \"persistent_with_ttl\";\n  case CreateFlags::PersistentSequentialWithTtl:\n    return \"persistent_sequential_with_ttl\";\n  }\n\n  return \"unknown\";\n}\n\nvoid DecoderImpl::decodeOnData(Buffer::Instance& data, uint64_t& offset) {\n  ENVOY_LOG(trace, \"zookeeper_proxy: decoding request with {} bytes at offset {}\", data.length(),\n            offset);\n\n  // Check message length.\n  const int32_t len = helper_.peekInt32(data, offset);\n  ensureMinLength(len, INT_LENGTH + XID_LENGTH);\n  ensureMaxLength(len);\n\n  auto start_time = time_source_.monotonicTime();\n\n  // Control requests, with XIDs <= 0.\n  //\n  // These are meant to control the state of a session:\n  // connect, keep-alive, authenticate and set initial watches.\n  //\n  // Note: setWatches is a command historically used to set watches\n  //       right after connecting, typically used when roaming from one\n  //       ZooKeeper server to the next. Thus, the special xid.\n  //       However, some client implementations might expose setWatches\n  //       as a regular data request, so we support that as well.\n  const int32_t xid = helper_.peekInt32(data, offset);\n  switch (static_cast<XidCodes>(xid)) {\n  case XidCodes::ConnectXid:\n    parseConnect(data, offset, len);\n    requests_by_xid_[xid] = {OpCodes::Connect, std::move(start_time)};\n    return;\n  case XidCodes::PingXid:\n    offset += OPCODE_LENGTH;\n    callbacks_.onPing();\n    requests_by_xid_[xid] = {OpCodes::Ping, std::move(start_time)};\n    return;\n  case XidCodes::AuthXid:\n    parseAuthRequest(data, offset, len);\n    requests_by_xid_[xid] = {OpCodes::SetAuth, std::move(start_time)};\n    return;\n  case XidCodes::SetWatchesXid:\n    offset += OPCODE_LENGTH;\n    parseSetWatchesRequest(data, offset, len);\n    requests_by_xid_[xid] = {OpCodes::SetWatches, std::move(start_time)};\n    return;\n  default:\n    // WATCH_XID is generated by the server, so that and everything\n    // else can be ignored here.\n    break;\n  }\n\n  // Data requests, with XIDs > 0.\n  //\n  // These are meant to happen after a successful control request, except\n  // for two cases: auth requests can happen at any time and ping requests\n  // must happen every 1/3 of the negotiated session timeout, to keep\n  // the session alive.\n  const auto opcode = static_cast<OpCodes>(helper_.peekInt32(data, offset));\n  switch (opcode) {\n  case OpCodes::GetData:\n    parseGetDataRequest(data, offset, len);\n    break;\n  case OpCodes::Create:\n  case OpCodes::Create2:\n  case OpCodes::CreateContainer:\n  case OpCodes::CreateTtl:\n    parseCreateRequest(data, offset, len, static_cast<OpCodes>(opcode));\n    break;\n  case OpCodes::SetData:\n    parseSetRequest(data, offset, len);\n    break;\n  case OpCodes::GetChildren:\n    parseGetChildrenRequest(data, offset, len, false);\n    break;\n  case OpCodes::GetChildren2:\n    parseGetChildrenRequest(data, offset, len, true);\n    break;\n  case OpCodes::Delete:\n    parseDeleteRequest(data, offset, len);\n    break;\n  case OpCodes::Exists:\n    parseExistsRequest(data, offset, len);\n    break;\n  case OpCodes::GetAcl:\n    parseGetAclRequest(data, offset, len);\n    break;\n  case OpCodes::SetAcl:\n    parseSetAclRequest(data, offset, len);\n    break;\n  case OpCodes::Sync:\n    callbacks_.onSyncRequest(pathOnlyRequest(data, offset, len));\n    break;\n  case OpCodes::Check:\n    parseCheckRequest(data, offset, len);\n    break;\n  case OpCodes::Multi:\n    parseMultiRequest(data, offset, len);\n    break;\n  case OpCodes::Reconfig:\n    parseReconfigRequest(data, offset, len);\n    break;\n  case OpCodes::SetWatches:\n    parseSetWatchesRequest(data, offset, len);\n    break;\n  case OpCodes::CheckWatches:\n    parseXWatchesRequest(data, offset, len, OpCodes::CheckWatches);\n    break;\n  case OpCodes::RemoveWatches:\n    parseXWatchesRequest(data, offset, len, OpCodes::RemoveWatches);\n    break;\n  case OpCodes::GetEphemerals:\n    callbacks_.onGetEphemeralsRequest(pathOnlyRequest(data, offset, len));\n    break;\n  case OpCodes::GetAllChildrenNumber:\n    callbacks_.onGetAllChildrenNumberRequest(pathOnlyRequest(data, offset, len));\n    break;\n  case OpCodes::Close:\n    callbacks_.onCloseRequest();\n    break;\n  default:\n    throw EnvoyException(fmt::format(\"Unknown opcode: {}\", enumToSignedInt(opcode)));\n  }\n\n  requests_by_xid_[xid] = {opcode, std::move(start_time)};\n}\n\nvoid DecoderImpl::decodeOnWrite(Buffer::Instance& data, uint64_t& offset) {\n  ENVOY_LOG(trace, \"zookeeper_proxy: decoding response with {} bytes at offset {}\", data.length(),\n            offset);\n\n  // Check message length.\n  const int32_t len = helper_.peekInt32(data, offset);\n  ensureMinLength(len, INT_LENGTH + XID_LENGTH);\n  ensureMaxLength(len);\n\n  const auto xid = helper_.peekInt32(data, offset);\n  const auto xid_code = static_cast<XidCodes>(xid);\n\n  std::chrono::milliseconds latency;\n  OpCodes opcode;\n\n  if (xid_code != XidCodes::WatchXid) {\n    // Find the corresponding request for this XID.\n    const auto it = requests_by_xid_.find(xid);\n\n    // If this fails, it's either a server-side bug or a malformed packet.\n    if (it == requests_by_xid_.end()) {\n      throw EnvoyException(\"xid not found\");\n    }\n\n    latency = std::chrono::duration_cast<std::chrono::milliseconds>(time_source_.monotonicTime() -\n                                                                    it->second.start_time);\n    opcode = it->second.opcode;\n    requests_by_xid_.erase(it);\n  }\n\n  // Connect responses are special, they have no full reply header\n  // but just an XID with no zxid nor error fields like the ones\n  // available for all other server generated messages.\n  if (xid_code == XidCodes::ConnectXid) {\n    parseConnectResponse(data, offset, len, latency);\n    return;\n  }\n\n  // Control responses that aren't connect, with XIDs <= 0.\n  const auto zxid = helper_.peekInt64(data, offset);\n  const auto error = helper_.peekInt32(data, offset);\n  switch (xid_code) {\n  case XidCodes::PingXid:\n    callbacks_.onResponse(OpCodes::Ping, xid, zxid, error, latency);\n    return;\n  case XidCodes::AuthXid:\n    callbacks_.onResponse(OpCodes::SetAuth, xid, zxid, error, latency);\n    return;\n  case XidCodes::SetWatchesXid:\n    callbacks_.onResponse(OpCodes::SetWatches, xid, zxid, error, latency);\n    return;\n  case XidCodes::WatchXid:\n    parseWatchEvent(data, offset, len, zxid, error);\n    return;\n  default:\n    break;\n  }\n\n  callbacks_.onResponse(opcode, xid, zxid, error, latency);\n  offset += (len - (XID_LENGTH + ZXID_LENGTH + INT_LENGTH));\n}\n\nvoid DecoderImpl::ensureMinLength(const int32_t len, const int32_t minlen) const {\n  if (len < minlen) {\n    throw EnvoyException(\"Packet is too small\");\n  }\n}\n\nvoid DecoderImpl::ensureMaxLength(const int32_t len) const {\n  if (static_cast<uint32_t>(len) > max_packet_bytes_) {\n    throw EnvoyException(\"Packet is too big\");\n  }\n}\n\nvoid DecoderImpl::parseConnect(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + ZXID_LENGTH + TIMEOUT_LENGTH + SESSION_LENGTH + INT_LENGTH);\n\n  // Skip zxid, timeout, and session id.\n  offset += ZXID_LENGTH + TIMEOUT_LENGTH + SESSION_LENGTH;\n\n  // Skip password.\n  skipString(data, offset);\n\n  const bool readonly = maybeReadBool(data, offset);\n\n  callbacks_.onConnect(readonly);\n}\n\nvoid DecoderImpl::parseAuthRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + INT_LENGTH + INT_LENGTH);\n\n  // Skip opcode + type.\n  offset += OPCODE_LENGTH + INT_LENGTH;\n  const std::string scheme = helper_.peekString(data, offset);\n  // Skip credential.\n  skipString(data, offset);\n\n  callbacks_.onAuthRequest(scheme);\n}\n\nvoid DecoderImpl::parseGetDataRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH);\n\n  const std::string path = helper_.peekString(data, offset);\n  const bool watch = helper_.peekBool(data, offset);\n\n  callbacks_.onGetDataRequest(path, watch);\n}\n\nvoid DecoderImpl::skipAcls(Buffer::Instance& data, uint64_t& offset) {\n  const int32_t count = helper_.peekInt32(data, offset);\n\n  for (int i = 0; i < count; ++i) {\n    // Perms.\n    helper_.peekInt32(data, offset);\n    // Skip scheme.\n    skipString(data, offset);\n    // Skip cred.\n    skipString(data, offset);\n  }\n}\n\nvoid DecoderImpl::parseCreateRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len,\n                                     OpCodes opcode) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH));\n\n  const std::string path = helper_.peekString(data, offset);\n\n  // Skip data.\n  skipString(data, offset);\n  skipAcls(data, offset);\n\n  const CreateFlags flags = static_cast<CreateFlags>(helper_.peekInt32(data, offset));\n  callbacks_.onCreateRequest(path, flags, opcode);\n}\n\nvoid DecoderImpl::parseSetRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH));\n\n  const std::string path = helper_.peekString(data, offset);\n  // Skip data.\n  skipString(data, offset);\n  // Ignore version.\n  helper_.peekInt32(data, offset);\n\n  callbacks_.onSetRequest(path);\n}\n\nvoid DecoderImpl::parseGetChildrenRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len,\n                                          const bool two) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH);\n\n  const std::string path = helper_.peekString(data, offset);\n  const bool watch = helper_.peekBool(data, offset);\n\n  callbacks_.onGetChildrenRequest(path, watch, two);\n}\n\nvoid DecoderImpl::parseDeleteRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH));\n\n  const std::string path = helper_.peekString(data, offset);\n  const int32_t version = helper_.peekInt32(data, offset);\n\n  callbacks_.onDeleteRequest(path, version);\n}\n\nvoid DecoderImpl::parseExistsRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH);\n\n  const std::string path = helper_.peekString(data, offset);\n  const bool watch = helper_.peekBool(data, offset);\n\n  callbacks_.onExistsRequest(path, watch);\n}\n\nvoid DecoderImpl::parseGetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH);\n\n  const std::string path = helper_.peekString(data, offset);\n\n  callbacks_.onGetAclRequest(path);\n}\n\nvoid DecoderImpl::parseSetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH));\n\n  const std::string path = helper_.peekString(data, offset);\n  skipAcls(data, offset);\n  const int32_t version = helper_.peekInt32(data, offset);\n\n  callbacks_.onSetAclRequest(path, version);\n}\n\nstd::string DecoderImpl::pathOnlyRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH);\n  return helper_.peekString(data, offset);\n}\n\nvoid DecoderImpl::parseCheckRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, (2 * INT_LENGTH));\n\n  const std::string path = helper_.peekString(data, offset);\n  const int32_t version = helper_.peekInt32(data, offset);\n\n  callbacks_.onCheckRequest(path, version);\n}\n\nvoid DecoderImpl::parseMultiRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  // Treat empty transactions as a decoding error, there should be at least 1 header.\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + MULTI_HEADER_LENGTH);\n\n  while (true) {\n    const int32_t opcode = helper_.peekInt32(data, offset);\n    const bool done = helper_.peekBool(data, offset);\n    // Ignore error field.\n    helper_.peekInt32(data, offset);\n\n    if (done) {\n      break;\n    }\n\n    switch (static_cast<OpCodes>(opcode)) {\n    case OpCodes::Create:\n      parseCreateRequest(data, offset, len, OpCodes::Create);\n      break;\n    case OpCodes::SetData:\n      parseSetRequest(data, offset, len);\n      break;\n    case OpCodes::Check:\n      parseCheckRequest(data, offset, len);\n      break;\n    default:\n      throw EnvoyException(fmt::format(\"Unknown opcode within a transaction: {}\", opcode));\n    }\n  }\n\n  callbacks_.onMultiRequest();\n}\n\nvoid DecoderImpl::parseReconfigRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH) + LONG_LENGTH);\n\n  // Skip joining.\n  skipString(data, offset);\n  // Skip leaving.\n  skipString(data, offset);\n  // Skip new members.\n  skipString(data, offset);\n  // Read config id.\n  helper_.peekInt64(data, offset);\n\n  callbacks_.onReconfigRequest();\n}\n\nvoid DecoderImpl::parseSetWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH));\n\n  // Data watches.\n  skipStrings(data, offset);\n  // Exist watches.\n  skipStrings(data, offset);\n  // Child watches.\n  skipStrings(data, offset);\n\n  callbacks_.onSetWatchesRequest();\n}\n\nvoid DecoderImpl::parseXWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len,\n                                       OpCodes opcode) {\n  ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH));\n\n  const std::string path = helper_.peekString(data, offset);\n  const int32_t type = helper_.peekInt32(data, offset);\n\n  if (opcode == OpCodes::CheckWatches) {\n    callbacks_.onCheckWatchesRequest(path, type);\n  } else {\n    callbacks_.onRemoveWatchesRequest(path, type);\n  }\n}\n\nvoid DecoderImpl::skipString(Buffer::Instance& data, uint64_t& offset) {\n  const int32_t slen = helper_.peekInt32(data, offset);\n  helper_.skip(slen, offset);\n}\n\nvoid DecoderImpl::skipStrings(Buffer::Instance& data, uint64_t& offset) {\n  const int32_t count = helper_.peekInt32(data, offset);\n\n  for (int i = 0; i < count; ++i) {\n    skipString(data, offset);\n  }\n}\n\nvoid DecoderImpl::onData(Buffer::Instance& data) { decode(data, DecodeType::READ); }\n\nvoid DecoderImpl::onWrite(Buffer::Instance& data) { decode(data, DecodeType::WRITE); }\n\nvoid DecoderImpl::decode(Buffer::Instance& data, DecodeType dtype) {\n  uint64_t offset = 0;\n\n  try {\n    while (offset < data.length()) {\n      // Reset the helper's cursor, to ensure the current message stays within the\n      // allowed max length, even when it's different than the declared length\n      // by the message.\n      //\n      // Note: we need to keep two cursors — offset and helper_'s internal one — because\n      //       a buffer may contain multiple messages, so offset is global while helper_'s\n      //       internal cursor gets reset for each individual message.\n      helper_.reset();\n\n      const uint64_t current = offset;\n      switch (dtype) {\n      case DecodeType::READ:\n        decodeOnData(data, offset);\n        callbacks_.onRequestBytes(offset - current);\n        break;\n      case DecodeType::WRITE:\n        decodeOnWrite(data, offset);\n        callbacks_.onResponseBytes(offset - current);\n        break;\n      }\n    }\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG(debug, \"zookeeper_proxy: decoding exception {}\", e.what());\n    callbacks_.onDecodeError();\n  }\n}\n\nvoid DecoderImpl::parseConnectResponse(Buffer::Instance& data, uint64_t& offset, uint32_t len,\n                                       const std::chrono::milliseconds& latency) {\n  ensureMinLength(len, PROTOCOL_VERSION_LENGTH + TIMEOUT_LENGTH + SESSION_LENGTH + INT_LENGTH);\n\n  const auto timeout = helper_.peekInt32(data, offset);\n\n  // Skip session id + password.\n  offset += SESSION_LENGTH;\n  skipString(data, offset);\n\n  const bool readonly = maybeReadBool(data, offset);\n\n  callbacks_.onConnectResponse(0, timeout, readonly, latency);\n}\n\nvoid DecoderImpl::parseWatchEvent(Buffer::Instance& data, uint64_t& offset, const uint32_t len,\n                                  const int64_t zxid, const int32_t error) {\n  ensureMinLength(len, SERVER_HEADER_LENGTH + (3 * INT_LENGTH));\n\n  const auto event_type = helper_.peekInt32(data, offset);\n  const auto client_state = helper_.peekInt32(data, offset);\n  const auto path = helper_.peekString(data, offset);\n\n  callbacks_.onWatchEvent(event_type, client_state, path, zxid, error);\n}\n\nbool DecoderImpl::maybeReadBool(Buffer::Instance& data, uint64_t& offset) {\n  if (data.length() >= offset + 1) {\n    return helper_.peekBool(data, offset);\n  }\n  return false;\n}\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/decoder.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/network/zookeeper_proxy/utils.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\nenum class XidCodes {\n  ConnectXid = 0,\n  WatchXid = -1,\n  PingXid = -2,\n  AuthXid = -4,\n  SetWatchesXid = -8\n};\n\nenum class OpCodes {\n  Connect = 0,\n  Create = 1,\n  Delete = 2,\n  Exists = 3,\n  GetData = 4,\n  SetData = 5,\n  GetAcl = 6,\n  SetAcl = 7,\n  GetChildren = 8,\n  Sync = 9,\n  Ping = 11,\n  GetChildren2 = 12,\n  Check = 13,\n  Multi = 14,\n  Create2 = 15,\n  Reconfig = 16,\n  CheckWatches = 17,\n  RemoveWatches = 18,\n  CreateContainer = 19,\n  CreateTtl = 21,\n  Close = -11,\n  SetAuth = 100,\n  SetWatches = 101,\n  GetEphemerals = 103,\n  GetAllChildrenNumber = 104\n};\n\nenum class WatcherType { Children = 1, Data = 2, Any = 3 };\n\nenum class CreateFlags {\n  Persistent,\n  PersistentSequential,\n  Ephemeral,\n  EphemeralSequential,\n  Container,\n  PersistentWithTtl,\n  PersistentSequentialWithTtl\n};\n\nconst char* createFlagsToString(CreateFlags flags);\n\n/**\n * General callbacks for dispatching decoded ZooKeeper messages to a sink.\n */\nclass DecoderCallbacks {\npublic:\n  virtual ~DecoderCallbacks() = default;\n\n  virtual void onDecodeError() PURE;\n  virtual void onRequestBytes(uint64_t bytes) PURE;\n  virtual void onConnect(bool readonly) PURE;\n  virtual void onPing() PURE;\n  virtual void onAuthRequest(const std::string& scheme) PURE;\n  virtual void onGetDataRequest(const std::string& path, bool watch) PURE;\n  virtual void onCreateRequest(const std::string& path, CreateFlags flags, OpCodes opcode) PURE;\n  virtual void onSetRequest(const std::string& path) PURE;\n  virtual void onGetChildrenRequest(const std::string& path, bool watch, bool v2) PURE;\n  virtual void onGetEphemeralsRequest(const std::string& path) PURE;\n  virtual void onGetAllChildrenNumberRequest(const std::string& path) PURE;\n  virtual void onDeleteRequest(const std::string& path, int32_t version) PURE;\n  virtual void onExistsRequest(const std::string& path, bool watch) PURE;\n  virtual void onGetAclRequest(const std::string& path) PURE;\n  virtual void onSetAclRequest(const std::string& path, int32_t version) PURE;\n  virtual void onSyncRequest(const std::string& path) PURE;\n  virtual void onCheckRequest(const std::string& path, int32_t version) PURE;\n  virtual void onMultiRequest() PURE;\n  virtual void onReconfigRequest() PURE;\n  virtual void onSetWatchesRequest() PURE;\n  virtual void onCheckWatchesRequest(const std::string& path, int32_t type) PURE;\n  virtual void onRemoveWatchesRequest(const std::string& path, int32_t type) PURE;\n  virtual void onCloseRequest() PURE;\n  virtual void onResponseBytes(uint64_t bytes) PURE;\n  virtual void onConnectResponse(int32_t proto_version, int32_t timeout, bool readonly,\n                                 const std::chrono::milliseconds& latency) PURE;\n  virtual void onResponse(OpCodes opcode, int32_t xid, int64_t zxid, int32_t error,\n                          const std::chrono::milliseconds& latency) PURE;\n  virtual void onWatchEvent(int32_t event_type, int32_t client_state, const std::string& path,\n                            int64_t zxid, int32_t error) PURE;\n};\n\n/**\n * ZooKeeper message decoder.\n */\nclass Decoder {\npublic:\n  virtual ~Decoder() = default;\n\n  virtual void onData(Buffer::Instance& data) PURE;\n  virtual void onWrite(Buffer::Instance& data) PURE;\n};\n\nusing DecoderPtr = std::unique_ptr<Decoder>;\n\nclass DecoderImpl : public Decoder, Logger::Loggable<Logger::Id::filter> {\npublic:\n  explicit DecoderImpl(DecoderCallbacks& callbacks, uint32_t max_packet_bytes,\n                       TimeSource& time_source)\n      : callbacks_(callbacks), max_packet_bytes_(max_packet_bytes), helper_(max_packet_bytes),\n        time_source_(time_source) {}\n\n  // ZooKeeperProxy::Decoder\n  void onData(Buffer::Instance& data) override;\n  void onWrite(Buffer::Instance& data) override;\n\nprivate:\n  enum class DecodeType { READ, WRITE };\n  struct RequestBegin {\n    OpCodes opcode;\n    MonotonicTime start_time;\n  };\n\n  void decode(Buffer::Instance& data, DecodeType dtype);\n  void decodeOnData(Buffer::Instance& data, uint64_t& offset);\n  void decodeOnWrite(Buffer::Instance& data, uint64_t& offset);\n  void parseConnect(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseAuthRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseGetDataRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseCreateRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, OpCodes opcode);\n  void skipAcls(Buffer::Instance& data, uint64_t& offset);\n  void parseSetRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseGetChildrenRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, bool two);\n  void parseDeleteRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseExistsRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseGetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseSetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseCheckRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseMultiRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseReconfigRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseSetWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseXWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, OpCodes opcode);\n  void skipString(Buffer::Instance& data, uint64_t& offset);\n  void skipStrings(Buffer::Instance& data, uint64_t& offset);\n  void ensureMinLength(int32_t len, int32_t minlen) const;\n  void ensureMaxLength(int32_t len) const;\n  std::string pathOnlyRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len);\n  void parseConnectResponse(Buffer::Instance& data, uint64_t& offset, uint32_t len,\n                            const std::chrono::milliseconds& latency);\n  void parseWatchEvent(Buffer::Instance& data, uint64_t& offset, uint32_t len, int64_t zxid,\n                       int32_t error);\n  bool maybeReadBool(Buffer::Instance& data, uint64_t& offset);\n\n  DecoderCallbacks& callbacks_;\n  const uint32_t max_packet_bytes_;\n  BufferHelper helper_;\n  TimeSource& time_source_;\n  absl::node_hash_map<int32_t, RequestBegin> requests_by_xid_;\n};\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/filter.cc",
    "content": "#include \"extensions/filters/network/zookeeper_proxy/filter.h\"\n\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/stats/utility.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\nZooKeeperFilterConfig::ZooKeeperFilterConfig(const std::string& stat_prefix,\n                                             const uint32_t max_packet_bytes, Stats::Scope& scope)\n    : scope_(scope), max_packet_bytes_(max_packet_bytes), stats_(generateStats(stat_prefix, scope)),\n      stat_name_set_(scope.symbolTable().makeSet(\"Zookeeper\")),\n      stat_prefix_(stat_name_set_->add(stat_prefix)), auth_(stat_name_set_->add(\"auth\")),\n      connect_latency_(stat_name_set_->add(\"connect_response_latency\")),\n      unknown_scheme_rq_(stat_name_set_->add(\"unknown_scheme_rq\")),\n      unknown_opcode_latency_(stat_name_set_->add(\"unknown_opcode_latency\")) {\n  // https://zookeeper.apache.org/doc/r3.5.4-beta/zookeeperProgrammers.html#sc_BuiltinACLSchemes\n  // lists commons schemes: \"world\", \"auth\", \"digest\", \"host\", \"x509\", and\n  // \"ip\". These are used in filter.cc by appending \"_rq\".\n  stat_name_set_->rememberBuiltins(\n      {\"auth_rq\", \"digest_rq\", \"host_rq\", \"ip_rq\", \"ping_response_rq\", \"world_rq\", \"x509_rq\"});\n\n  initOpCode(OpCodes::Ping, stats_.ping_resp_, \"ping_response\");\n  initOpCode(OpCodes::SetAuth, stats_.auth_resp_, \"auth_response\");\n  initOpCode(OpCodes::GetData, stats_.getdata_resp_, \"getdata_resp\");\n  initOpCode(OpCodes::Create, stats_.create_resp_, \"create_resp\");\n  initOpCode(OpCodes::Create2, stats_.create2_resp_, \"create2_resp\");\n  initOpCode(OpCodes::CreateContainer, stats_.createcontainer_resp_, \"createcontainer_resp\");\n  initOpCode(OpCodes::CreateTtl, stats_.createttl_resp_, \"createttl_resp\");\n  initOpCode(OpCodes::SetData, stats_.setdata_resp_, \"setdata_resp\");\n  initOpCode(OpCodes::GetChildren, stats_.getchildren_resp_, \"getchildren_resp\");\n  initOpCode(OpCodes::GetChildren2, stats_.getchildren2_resp_, \"getchildren2_resp\");\n  initOpCode(OpCodes::Delete, stats_.delete_resp_, \"delete_resp\");\n  initOpCode(OpCodes::Exists, stats_.exists_resp_, \"exists_resp\");\n  initOpCode(OpCodes::GetAcl, stats_.getacl_resp_, \"getacl_resp\");\n  initOpCode(OpCodes::SetAcl, stats_.setacl_resp_, \"setacl_resp\");\n  initOpCode(OpCodes::Sync, stats_.sync_resp_, \"sync_resp\");\n  initOpCode(OpCodes::Check, stats_.check_resp_, \"check_resp\");\n  initOpCode(OpCodes::Multi, stats_.multi_resp_, \"multi_resp\");\n  initOpCode(OpCodes::Reconfig, stats_.reconfig_resp_, \"reconfig_resp\");\n  initOpCode(OpCodes::SetWatches, stats_.setwatches_resp_, \"setwatches_resp\");\n  initOpCode(OpCodes::CheckWatches, stats_.checkwatches_resp_, \"checkwatches_resp\");\n  initOpCode(OpCodes::RemoveWatches, stats_.removewatches_resp_, \"removewatches_resp\");\n  initOpCode(OpCodes::GetEphemerals, stats_.getephemerals_resp_, \"getephemerals_resp\");\n  initOpCode(OpCodes::GetAllChildrenNumber, stats_.getallchildrennumber_resp_,\n             \"getallchildrennumber_resp\");\n  initOpCode(OpCodes::Close, stats_.close_resp_, \"close_resp\");\n}\n\nvoid ZooKeeperFilterConfig::initOpCode(OpCodes opcode, Stats::Counter& counter,\n                                       absl::string_view name) {\n  OpCodeInfo& opcode_info = op_code_map_[opcode];\n  opcode_info.counter_ = &counter;\n  opcode_info.opname_ = std::string(name);\n  opcode_info.latency_name_ = stat_name_set_->add(absl::StrCat(name, \"_latency\"));\n}\n\nZooKeeperFilter::ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config, TimeSource& time_source)\n    : config_(std::move(config)), decoder_(createDecoder(*this, time_source)) {}\n\nvoid ZooKeeperFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) {\n  read_callbacks_ = &callbacks;\n}\n\nNetwork::FilterStatus ZooKeeperFilter::onData(Buffer::Instance& data, bool) {\n  clearDynamicMetadata();\n  decoder_->onData(data);\n  return Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus ZooKeeperFilter::onWrite(Buffer::Instance& data, bool) {\n  clearDynamicMetadata();\n  decoder_->onWrite(data);\n  return Network::FilterStatus::Continue;\n}\n\nNetwork::FilterStatus ZooKeeperFilter::onNewConnection() { return Network::FilterStatus::Continue; }\n\nDecoderPtr ZooKeeperFilter::createDecoder(DecoderCallbacks& callbacks, TimeSource& time_source) {\n  return std::make_unique<DecoderImpl>(callbacks, config_->maxPacketBytes(), time_source);\n}\n\nvoid ZooKeeperFilter::setDynamicMetadata(const std::string& key, const std::string& value) {\n  setDynamicMetadata({{key, value}});\n}\n\nvoid ZooKeeperFilter::clearDynamicMetadata() {\n  envoy::config::core::v3::Metadata& dynamic_metadata =\n      read_callbacks_->connection().streamInfo().dynamicMetadata();\n  auto& metadata =\n      (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().ZooKeeperProxy];\n  metadata.mutable_fields()->clear();\n}\n\nvoid ZooKeeperFilter::setDynamicMetadata(\n    const std::vector<std::pair<const std::string, const std::string>>& data) {\n  envoy::config::core::v3::Metadata& dynamic_metadata =\n      read_callbacks_->connection().streamInfo().dynamicMetadata();\n  ProtobufWkt::Struct metadata(\n      (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().ZooKeeperProxy]);\n  auto& fields = *metadata.mutable_fields();\n\n  for (const auto& pair : data) {\n    auto val = ProtobufWkt::Value();\n    val.set_string_value(pair.second);\n    fields.insert({pair.first, val});\n  }\n\n  read_callbacks_->connection().streamInfo().setDynamicMetadata(\n      NetworkFilterNames::get().ZooKeeperProxy, metadata);\n}\n\nvoid ZooKeeperFilter::onConnect(const bool readonly) {\n  if (readonly) {\n    config_->stats_.connect_readonly_rq_.inc();\n    setDynamicMetadata(\"opname\", \"connect_readonly\");\n  } else {\n    config_->stats_.connect_rq_.inc();\n    setDynamicMetadata(\"opname\", \"connect\");\n  }\n}\n\nvoid ZooKeeperFilter::onDecodeError() {\n  config_->stats_.decoder_error_.inc();\n  setDynamicMetadata(\"opname\", \"error\");\n}\n\nvoid ZooKeeperFilter::onRequestBytes(const uint64_t bytes) {\n  config_->stats_.request_bytes_.add(bytes);\n  setDynamicMetadata(\"bytes\", std::to_string(bytes));\n}\n\nvoid ZooKeeperFilter::onResponseBytes(const uint64_t bytes) {\n  config_->stats_.response_bytes_.add(bytes);\n  setDynamicMetadata(\"bytes\", std::to_string(bytes));\n}\n\nvoid ZooKeeperFilter::onPing() {\n  config_->stats_.ping_rq_.inc();\n  setDynamicMetadata(\"opname\", \"ping\");\n}\n\nvoid ZooKeeperFilter::onAuthRequest(const std::string& scheme) {\n  Stats::Counter& counter = Stats::Utility::counterFromStatNames(\n      config_->scope_, {config_->stat_prefix_, config_->auth_,\n                        config_->stat_name_set_->getBuiltin(absl::StrCat(scheme, \"_rq\"),\n                                                            config_->unknown_scheme_rq_)});\n  counter.inc();\n  setDynamicMetadata(\"opname\", \"auth\");\n}\n\nvoid ZooKeeperFilter::onGetDataRequest(const std::string& path, const bool watch) {\n  config_->stats_.getdata_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"getdata\"}, {\"path\", path}, {\"watch\", watch ? \"true\" : \"false\"}});\n}\n\nvoid ZooKeeperFilter::onCreateRequest(const std::string& path, const CreateFlags flags,\n                                      const OpCodes opcode) {\n  std::string opname;\n\n  switch (opcode) {\n  case OpCodes::Create:\n    opname = \"create\";\n    config_->stats_.create_rq_.inc();\n    break;\n  case OpCodes::Create2:\n    opname = \"create2\";\n    config_->stats_.create2_rq_.inc();\n    break;\n  case OpCodes::CreateContainer:\n    opname = \"createcontainer\";\n    config_->stats_.createcontainer_rq_.inc();\n    break;\n  case OpCodes::CreateTtl:\n    opname = \"createttl\";\n    config_->stats_.createttl_rq_.inc();\n    break;\n  default:\n    throw EnvoyException(fmt::format(\"Unknown opcode: {}\", enumToSignedInt(opcode)));\n    break;\n  }\n\n  setDynamicMetadata(\n      {{\"opname\", opname}, {\"path\", path}, {\"create_type\", createFlagsToString(flags)}});\n}\n\nvoid ZooKeeperFilter::onSetRequest(const std::string& path) {\n  config_->stats_.setdata_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"setdata\"}, {\"path\", path}});\n}\n\nvoid ZooKeeperFilter::onGetChildrenRequest(const std::string& path, const bool watch,\n                                           const bool v2) {\n  std::string opname = \"getchildren\";\n\n  if (v2) {\n    config_->stats_.getchildren2_rq_.inc();\n    opname = \"getchildren2\";\n  } else {\n    config_->stats_.getchildren_rq_.inc();\n  }\n\n  setDynamicMetadata({{\"opname\", opname}, {\"path\", path}, {\"watch\", watch ? \"true\" : \"false\"}});\n}\n\nvoid ZooKeeperFilter::onDeleteRequest(const std::string& path, const int32_t version) {\n  config_->stats_.delete_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"delete\"}, {\"path\", path}, {\"version\", std::to_string(version)}});\n}\n\nvoid ZooKeeperFilter::onExistsRequest(const std::string& path, const bool watch) {\n  config_->stats_.exists_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"exists\"}, {\"path\", path}, {\"watch\", watch ? \"true\" : \"false\"}});\n}\n\nvoid ZooKeeperFilter::onGetAclRequest(const std::string& path) {\n  config_->stats_.getacl_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"getacl\"}, {\"path\", path}});\n}\n\nvoid ZooKeeperFilter::onSetAclRequest(const std::string& path, const int32_t version) {\n  config_->stats_.setacl_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"setacl\"}, {\"path\", path}, {\"version\", std::to_string(version)}});\n}\n\nvoid ZooKeeperFilter::onSyncRequest(const std::string& path) {\n  config_->stats_.sync_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"sync\"}, {\"path\", path}});\n}\n\nvoid ZooKeeperFilter::onCheckRequest(const std::string&, const int32_t) {\n  config_->stats_.check_rq_.inc();\n}\n\nvoid ZooKeeperFilter::onCheckWatchesRequest(const std::string& path, const int32_t) {\n  config_->stats_.checkwatches_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"checkwatches\"}, {\"path\", path}});\n}\n\nvoid ZooKeeperFilter::onRemoveWatchesRequest(const std::string& path, const int32_t) {\n  config_->stats_.removewatches_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"removewatches\"}, {\"path\", path}});\n}\n\nvoid ZooKeeperFilter::onMultiRequest() {\n  config_->stats_.multi_rq_.inc();\n  setDynamicMetadata(\"opname\", \"multi\");\n}\n\nvoid ZooKeeperFilter::onReconfigRequest() {\n  config_->stats_.reconfig_rq_.inc();\n  setDynamicMetadata(\"opname\", \"reconfig\");\n}\n\nvoid ZooKeeperFilter::onSetWatchesRequest() {\n  config_->stats_.setwatches_rq_.inc();\n  setDynamicMetadata(\"opname\", \"setwatches\");\n}\n\nvoid ZooKeeperFilter::onGetEphemeralsRequest(const std::string& path) {\n  config_->stats_.getephemerals_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"getephemerals\"}, {\"path\", path}});\n}\n\nvoid ZooKeeperFilter::onGetAllChildrenNumberRequest(const std::string& path) {\n  config_->stats_.getallchildrennumber_rq_.inc();\n  setDynamicMetadata({{\"opname\", \"getallchildrennumber\"}, {\"path\", path}});\n}\n\nvoid ZooKeeperFilter::onCloseRequest() {\n  config_->stats_.close_rq_.inc();\n  setDynamicMetadata(\"opname\", \"close\");\n}\n\nvoid ZooKeeperFilter::onConnectResponse(const int32_t proto_version, const int32_t timeout,\n                                        const bool readonly,\n                                        const std::chrono::milliseconds& latency) {\n  config_->stats_.connect_resp_.inc();\n\n  Stats::Histogram& histogram = Stats::Utility::histogramFromElements(\n      config_->scope_, {config_->stat_prefix_, config_->connect_latency_},\n      Stats::Histogram::Unit::Milliseconds);\n  histogram.recordValue(latency.count());\n\n  setDynamicMetadata({{\"opname\", \"connect_response\"},\n                      {\"protocol_version\", std::to_string(proto_version)},\n                      {\"timeout\", std::to_string(timeout)},\n                      {\"readonly\", std::to_string(readonly)}});\n}\n\nvoid ZooKeeperFilter::onResponse(const OpCodes opcode, const int32_t xid, const int64_t zxid,\n                                 const int32_t error, const std::chrono::milliseconds& latency) {\n  Stats::StatName opcode_latency = config_->unknown_opcode_latency_;\n  auto iter = config_->op_code_map_.find(opcode);\n  std::string opname = \"\";\n  if (iter != config_->op_code_map_.end()) {\n    const ZooKeeperFilterConfig::OpCodeInfo& opcode_info = iter->second;\n    opcode_info.counter_->inc();\n    opname = opcode_info.opname_;\n    opcode_latency = opcode_info.latency_name_;\n  }\n\n  Stats::Histogram& histogram = Stats::Utility::histogramFromStatNames(\n      config_->scope_, {config_->stat_prefix_, opcode_latency},\n      Stats::Histogram::Unit::Milliseconds);\n  histogram.recordValue(latency.count());\n\n  setDynamicMetadata({{\"opname\", opname},\n                      {\"xid\", std::to_string(xid)},\n                      {\"zxid\", std::to_string(zxid)},\n                      {\"error\", std::to_string(error)}});\n}\n\nvoid ZooKeeperFilter::onWatchEvent(const int32_t event_type, const int32_t client_state,\n                                   const std::string& path, const int64_t zxid,\n                                   const int32_t error) {\n  config_->stats_.watch_event_.inc();\n  setDynamicMetadata({{\"opname\", \"watch_event\"},\n                      {\"event_type\", std::to_string(event_type)},\n                      {\"client_state\", std::to_string(client_state)},\n                      {\"path\", path},\n                      {\"zxid\", std::to_string(zxid)},\n                      {\"error\", std::to_string(error)}});\n}\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/filter.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/network/zookeeper_proxy/decoder.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\n/**\n * All ZooKeeper proxy stats. @see stats_macros.h\n */\n#define ALL_ZOOKEEPER_PROXY_STATS(COUNTER)                                                         \\\n  COUNTER(decoder_error)                                                                           \\\n  COUNTER(request_bytes)                                                                           \\\n  COUNTER(connect_rq)                                                                              \\\n  COUNTER(connect_readonly_rq)                                                                     \\\n  COUNTER(getdata_rq)                                                                              \\\n  COUNTER(create_rq)                                                                               \\\n  COUNTER(create2_rq)                                                                              \\\n  COUNTER(createcontainer_rq)                                                                      \\\n  COUNTER(createttl_rq)                                                                            \\\n  COUNTER(setdata_rq)                                                                              \\\n  COUNTER(getchildren_rq)                                                                          \\\n  COUNTER(getchildren2_rq)                                                                         \\\n  COUNTER(getephemerals_rq)                                                                        \\\n  COUNTER(getallchildrennumber_rq)                                                                 \\\n  COUNTER(delete_rq)                                                                               \\\n  COUNTER(exists_rq)                                                                               \\\n  COUNTER(getacl_rq)                                                                               \\\n  COUNTER(setacl_rq)                                                                               \\\n  COUNTER(sync_rq)                                                                                 \\\n  COUNTER(ping_rq)                                                                                 \\\n  COUNTER(multi_rq)                                                                                \\\n  COUNTER(reconfig_rq)                                                                             \\\n  COUNTER(close_rq)                                                                                \\\n  COUNTER(setauth_rq)                                                                              \\\n  COUNTER(setwatches_rq)                                                                           \\\n  COUNTER(checkwatches_rq)                                                                         \\\n  COUNTER(removewatches_rq)                                                                        \\\n  COUNTER(check_rq)                                                                                \\\n  COUNTER(response_bytes)                                                                          \\\n  COUNTER(connect_resp)                                                                            \\\n  COUNTER(ping_resp)                                                                               \\\n  COUNTER(auth_resp)                                                                               \\\n  COUNTER(getdata_resp)                                                                            \\\n  COUNTER(create_resp)                                                                             \\\n  COUNTER(create2_resp)                                                                            \\\n  COUNTER(createcontainer_resp)                                                                    \\\n  COUNTER(createttl_resp)                                                                          \\\n  COUNTER(setdata_resp)                                                                            \\\n  COUNTER(getchildren_resp)                                                                        \\\n  COUNTER(getchildren2_resp)                                                                       \\\n  COUNTER(getephemerals_resp)                                                                      \\\n  COUNTER(getallchildrennumber_resp)                                                               \\\n  COUNTER(delete_resp)                                                                             \\\n  COUNTER(exists_resp)                                                                             \\\n  COUNTER(getacl_resp)                                                                             \\\n  COUNTER(setacl_resp)                                                                             \\\n  COUNTER(sync_resp)                                                                               \\\n  COUNTER(multi_resp)                                                                              \\\n  COUNTER(reconfig_resp)                                                                           \\\n  COUNTER(close_resp)                                                                              \\\n  COUNTER(setauth_resp)                                                                            \\\n  COUNTER(setwatches_resp)                                                                         \\\n  COUNTER(checkwatches_resp)                                                                       \\\n  COUNTER(removewatches_resp)                                                                      \\\n  COUNTER(check_resp)                                                                              \\\n  COUNTER(watch_event)\n\n/**\n * Struct definition for all ZooKeeper proxy stats. @see stats_macros.h\n */\nstruct ZooKeeperProxyStats {\n  ALL_ZOOKEEPER_PROXY_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Configuration for the ZooKeeper proxy filter.\n */\nclass ZooKeeperFilterConfig {\npublic:\n  ZooKeeperFilterConfig(const std::string& stat_prefix, uint32_t max_packet_bytes,\n                        Stats::Scope& scope);\n\n  const ZooKeeperProxyStats& stats() { return stats_; }\n  uint32_t maxPacketBytes() const { return max_packet_bytes_; }\n\n  // Captures the counter used to track total op-code usage, as well as the\n  // StatName under which to collect the latency for that op-code. The\n  // latency-name will be joined with the stat_prefix_, which varies per filter\n  // instance.\n  struct OpCodeInfo {\n    Stats::Counter* counter_;\n    std::string opname_;\n    Stats::StatName latency_name_;\n  };\n\n  absl::flat_hash_map<OpCodes, OpCodeInfo> op_code_map_;\n  Stats::Scope& scope_;\n  const uint32_t max_packet_bytes_;\n  ZooKeeperProxyStats stats_;\n  Stats::StatNameSetPtr stat_name_set_;\n  const Stats::StatName stat_prefix_;\n  const Stats::StatName auth_;\n  const Stats::StatName connect_latency_;\n  const Stats::StatName unknown_scheme_rq_;\n  const Stats::StatName unknown_opcode_latency_;\n\nprivate:\n  void initOpCode(OpCodes opcode, Stats::Counter& counter, absl::string_view name);\n\n  ZooKeeperProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) {\n    return ZooKeeperProxyStats{ALL_ZOOKEEPER_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))};\n  }\n};\n\nusing ZooKeeperFilterConfigSharedPtr = std::shared_ptr<ZooKeeperFilterConfig>;\n\n/**\n * Implementation of ZooKeeper proxy filter.\n */\nclass ZooKeeperFilter : public Network::Filter,\n                        DecoderCallbacks,\n                        Logger::Loggable<Logger::Id::filter> {\npublic:\n  ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config, TimeSource& time_source);\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override;\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override;\n\n  // ZooKeeperProxy::DecoderCallback\n  void onDecodeError() override;\n  void onRequestBytes(uint64_t bytes) override;\n  void onConnect(bool readonly) override;\n  void onPing() override;\n  void onAuthRequest(const std::string& scheme) override;\n  void onGetDataRequest(const std::string& path, bool watch) override;\n  void onCreateRequest(const std::string& path, CreateFlags flags, OpCodes opcode) override;\n  void onSetRequest(const std::string& path) override;\n  void onGetChildrenRequest(const std::string& path, bool watch, bool v2) override;\n  void onDeleteRequest(const std::string& path, int32_t version) override;\n  void onExistsRequest(const std::string& path, bool watch) override;\n  void onGetAclRequest(const std::string& path) override;\n  void onSetAclRequest(const std::string& path, int32_t version) override;\n  void onSyncRequest(const std::string& path) override;\n  void onCheckRequest(const std::string& path, int32_t version) override;\n  void onMultiRequest() override;\n  void onReconfigRequest() override;\n  void onSetWatchesRequest() override;\n  void onCheckWatchesRequest(const std::string& path, int32_t type) override;\n  void onRemoveWatchesRequest(const std::string& path, int32_t type) override;\n  void onGetEphemeralsRequest(const std::string& path) override;\n  void onGetAllChildrenNumberRequest(const std::string& path) override;\n  void onCloseRequest() override;\n  void onResponseBytes(uint64_t bytes) override;\n  void onConnectResponse(int32_t proto_version, int32_t timeout, bool readonly,\n                         const std::chrono::milliseconds& latency) override;\n  void onResponse(OpCodes opcode, int32_t xid, int64_t zxid, int32_t error,\n                  const std::chrono::milliseconds& latency) override;\n  void onWatchEvent(int32_t event_type, int32_t client_state, const std::string& path, int64_t zxid,\n                    int32_t error) override;\n\n  DecoderPtr createDecoder(DecoderCallbacks& callbacks, TimeSource& time_source);\n  void setDynamicMetadata(const std::string& key, const std::string& value);\n  void setDynamicMetadata(const std::vector<std::pair<const std::string, const std::string>>& data);\n  void clearDynamicMetadata();\n\nprivate:\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  ZooKeeperFilterConfigSharedPtr config_;\n  std::unique_ptr<Decoder> decoder_;\n};\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/utils.cc",
    "content": "#include \"extensions/filters/network/zookeeper_proxy/utils.h\"\n\n#include <string>\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\nint32_t BufferHelper::peekInt32(Buffer::Instance& buffer, uint64_t& offset) {\n  ensureMaxLen(sizeof(int32_t));\n\n  const int32_t val = buffer.peekBEInt<int32_t>(offset);\n  offset += sizeof(int32_t);\n  return val;\n}\n\nint64_t BufferHelper::peekInt64(Buffer::Instance& buffer, uint64_t& offset) {\n  ensureMaxLen(sizeof(int64_t));\n\n  const int64_t val = buffer.peekBEInt<int64_t>(offset);\n  offset += sizeof(int64_t);\n  return val;\n}\n\nbool BufferHelper::peekBool(Buffer::Instance& buffer, uint64_t& offset) {\n  ensureMaxLen(1);\n\n  const char byte = buffer.peekInt<char, ByteOrder::Host, 1>(offset);\n  const bool val = static_cast<bool>(byte);\n  offset += 1;\n  return val;\n}\n\nstd::string BufferHelper::peekString(Buffer::Instance& buffer, uint64_t& offset) {\n  std::string val;\n  const uint32_t len = peekInt32(buffer, offset);\n\n  if (len == 0) {\n    return val;\n  }\n\n  if (buffer.length() < (offset + len)) {\n    throw EnvoyException(\"peekString: buffer is smaller than string length\");\n  }\n\n  ensureMaxLen(len);\n\n  std::unique_ptr<char[]> data(new char[len]);\n  buffer.copyOut(offset, len, data.get());\n  val.assign(data.get(), len);\n  offset += len;\n\n  return val;\n}\n\nvoid BufferHelper::skip(const uint32_t len, uint64_t& offset) {\n  offset += len;\n  current_ += len;\n}\n\nvoid BufferHelper::ensureMaxLen(const uint32_t size) {\n  current_ += size;\n\n  if (current_ > max_len_) {\n    throw EnvoyException(\"read beyond max length\");\n  }\n}\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/network/zookeeper_proxy/utils.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/byte_order.h\"\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\n/**\n * Helper for extracting ZooKeeper data from a buffer.\n *\n * If at any point a peek is tried beyond max_len, an EnvoyException\n * will be thrown. This is important to protect Envoy against malformed\n * requests (e.g.: when the declared and actual length don't match).\n *\n * Note: ZooKeeper's protocol uses network byte ordering (big-endian).\n */\nclass BufferHelper : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  BufferHelper(uint32_t max_len) : max_len_(max_len) {}\n\n  int32_t peekInt32(Buffer::Instance& buffer, uint64_t& offset);\n  int64_t peekInt64(Buffer::Instance& buffer, uint64_t& offset);\n  std::string peekString(Buffer::Instance& buffer, uint64_t& offset);\n  bool peekBool(Buffer::Instance& buffer, uint64_t& offset);\n  void skip(uint32_t len, uint64_t& offset);\n  void reset() { current_ = 0; }\n\nprivate:\n  void ensureMaxLen(uint32_t size);\n\n  const uint32_t max_len_;\n  uint32_t current_{};\n};\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"dns_filter_lib\",\n    srcs = [\n        \"dns_filter.cc\",\n        \"dns_filter_resolver.cc\",\n        \"dns_filter_utils.cc\",\n        \"dns_parser.cc\",\n    ],\n    hdrs = [\n        \"dns_filter.h\",\n        \"dns_filter_constants.h\",\n        \"dns_filter_resolver.h\",\n        \"dns_filter_utils.h\",\n        \"dns_parser.h\",\n    ],\n    external_deps = [\"ares\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/config:config_provider_lib\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/upstream:cluster_manager_lib\",\n        \"@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    status = \"alpha\",\n    deps = [\n        \":dns_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/config.cc",
    "content": "#include \"extensions/filters/udp/dns_filter/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\nNetwork::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryFromProto(\n    const Protobuf::Message& config, Server::Configuration::ListenerFactoryContext& context) {\n  auto shared_config = std::make_shared<DnsFilterEnvoyConfig>(\n      context, MessageUtil::downcastAndValidate<\n                   const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig&>(\n                   config, context.messageValidationVisitor()));\n\n  return [shared_config](Network::UdpListenerFilterManager& filter_manager,\n                         Network::UdpReadFilterCallbacks& callbacks) -> void {\n    filter_manager.addReadFilter(std::make_unique<DnsFilter>(callbacks, shared_config));\n  };\n}\n\nProtobufTypes::MessagePtr DnsFilterConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig>();\n}\n\nstd::string DnsFilterConfigFactory::name() const { return \"envoy.filters.udp.dns_filter\"; }\n\n/**\n * Static registration for the DNS Filter. @see RegisterFactory.\n */\nstatic Registry::RegisterFactory<DnsFilterConfigFactory,\n                                 Server::Configuration::NamedUdpListenerFilterConfigFactory>\n    register_;\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h\"\n#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\n/**\n * Config registration for the UDP proxy filter. @see NamedUdpListenerFilterConfigFactory.\n */\nclass DnsFilterConfigFactory : public Server::Configuration::NamedUdpListenerFilterConfigFactory {\npublic:\n  // NamedUdpListenerFilterConfigFactory\n  Network::UdpListenerFilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& config,\n                               Server::Configuration::ListenerFactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n  std::string name() const override;\n};\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_filter.cc",
    "content": "#include \"extensions/filters/udp/dns_filter/dns_filter.h\"\n\n#include \"envoy/network/listener.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/config/datasource.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\nstatic constexpr std::chrono::milliseconds DEFAULT_RESOLVER_TIMEOUT{500};\nstatic constexpr std::chrono::seconds DEFAULT_RESOLVER_TTL{300};\n\nDnsFilterEnvoyConfig::DnsFilterEnvoyConfig(\n    Server::Configuration::ListenerFactoryContext& context,\n    const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config)\n    : root_scope_(context.scope()), cluster_manager_(context.clusterManager()), api_(context.api()),\n      stats_(generateStats(config.stat_prefix(), root_scope_)),\n      resolver_timeout_(DEFAULT_RESOLVER_TIMEOUT), random_(context.api().randomGenerator()) {\n  using envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig;\n\n  const auto& server_config = config.server_config();\n\n  envoy::data::dns::v3::DnsTable dns_table;\n  bool result = loadServerConfig(server_config, dns_table);\n  ENVOY_LOG(debug, \"Loading DNS table from external file: {}\", result ? \"Success\" : \"Failure\");\n\n  retry_count_ = dns_table.external_retry_count();\n\n  virtual_domains_.reserve(dns_table.virtual_domains().size());\n  for (const auto& virtual_domain : dns_table.virtual_domains()) {\n    AddressConstPtrVec addrs{};\n\n    const absl::string_view domain_name = virtual_domain.name();\n    ENVOY_LOG(trace, \"Loading configuration for domain: {}\", domain_name);\n\n    if (virtual_domain.endpoint().has_address_list()) {\n      const auto& address_list = virtual_domain.endpoint().address_list().address();\n      addrs.reserve(address_list.size());\n\n      // Shuffle the configured addresses. We store the addresses starting at a random\n      // list index so that we do not always return answers in the same order as the IPs\n      // are configured.\n      size_t i = random_.random();\n\n      // Creating the IP address will throw an exception if the address string is malformed\n      for (auto index = 0; index < address_list.size(); index++) {\n        const auto address_iter = std::next(address_list.begin(), (i++ % address_list.size()));\n        auto ipaddr = Network::Utility::parseInternetAddress(*address_iter, 0 /* port */);\n        addrs.push_back(std::move(ipaddr));\n      }\n\n      // If the domain already exists with a different endpoint config, update the address_list\n      // with the data from the config\n      if (virtual_domains_.contains(domain_name)) {\n        auto& addr_vec = virtual_domains_[domain_name].address_list.value();\n        addr_vec.reserve(addr_vec.size() + addrs.size());\n        std::move(addrs.begin(), addrs.end(), std::inserter(addr_vec, addr_vec.end()));\n      } else {\n        DnsEndpointConfig endpoint_config{};\n        endpoint_config.address_list = absl::make_optional<AddressConstPtrVec>(std::move(addrs));\n        virtual_domains_.emplace(std::string(domain_name), std::move(endpoint_config));\n      }\n    }\n\n    if (virtual_domain.endpoint().has_service_list()) {\n      const auto& dns_service_list = virtual_domain.endpoint().service_list();\n      for (const auto& dns_service : dns_service_list.services()) {\n\n        // Each service should be its own domain in the stored config. The filter will see\n        // the full service name in queries on the wire. The protocol string returned will be empty\n        // if a numeric protocol is configured and we cannot resolve its name\n        const std::string proto = Utils::getProtoName(dns_service.protocol());\n        if (proto.empty()) {\n          continue;\n        }\n        const std::chrono::seconds ttl = std::chrono::seconds(dns_service.ttl().seconds());\n\n        // Generate the full name for the DNS service. All input parameters are populated\n        // strings enforced by the message definition\n        const std::string full_service_name =\n            Utils::buildServiceName(dns_service.service_name(), proto, virtual_domain.name());\n\n        DnsSrvRecordPtr service_record_ptr =\n            std::make_unique<DnsSrvRecord>(full_service_name, proto, ttl);\n\n        // Store service targets. We require at least one target to be present. The target should\n        // be a fully qualified domain name. If the target name is not a fully qualified name, we\n        // will consider this name to be that of a cluster\n        for (const auto& target : dns_service.targets()) {\n          DnsSrvRecord::DnsTargetAttributes attributes{};\n          attributes.priority = target.priority();\n          attributes.weight = target.weight();\n          attributes.port = target.port();\n\n          absl::string_view target_name = target.host_name();\n          if (target_name.empty()) {\n            target_name = target.cluster_name();\n            attributes.is_cluster = true;\n          }\n\n          ENVOY_LOG(trace, \"Storing service {} target {}\", full_service_name, target_name);\n          service_record_ptr->addTarget(target_name, attributes);\n        }\n\n        DnsEndpointConfig endpoint_config{};\n        endpoint_config.service_list =\n            absl::make_optional<DnsSrvRecordPtr>(std::move(service_record_ptr));\n        virtual_domains_.emplace(full_service_name, std::move(endpoint_config));\n      }\n    }\n\n    // A DNS name can be redirected to only one cluster.\n    const absl::string_view cluster_name = virtual_domain.endpoint().cluster_name();\n    if (!cluster_name.empty()) {\n      DnsEndpointConfig endpoint_config{};\n      endpoint_config.cluster_name = absl::make_optional<std::string>(cluster_name);\n      virtual_domains_.emplace(domain_name, std::move(endpoint_config));\n    }\n\n    std::chrono::seconds ttl = virtual_domain.has_answer_ttl()\n                                   ? std::chrono::seconds(virtual_domain.answer_ttl().seconds())\n                                   : DEFAULT_RESOLVER_TTL;\n    domain_ttl_.emplace(virtual_domain.name(), ttl);\n  }\n\n  // Add known domain suffixes\n  known_suffixes_.reserve(dns_table.known_suffixes().size());\n  for (const auto& suffix : dns_table.known_suffixes()) {\n    auto matcher_ptr = std::make_unique<Matchers::StringMatcherImpl>(suffix);\n    known_suffixes_.push_back(std::move(matcher_ptr));\n  }\n\n  forward_queries_ = config.has_client_config();\n  if (forward_queries_) {\n    const auto& client_config = config.client_config();\n    const auto& upstream_resolvers = client_config.upstream_resolvers();\n    resolvers_.reserve(upstream_resolvers.size());\n    for (const auto& resolver : upstream_resolvers) {\n      auto ipaddr = Network::Utility::protobufAddressToAddress(resolver);\n      resolvers_.emplace_back(std::move(ipaddr));\n    }\n    resolver_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(\n        client_config, resolver_timeout, DEFAULT_RESOLVER_TIMEOUT.count()));\n\n    max_pending_lookups_ = client_config.max_pending_lookups();\n  }\n}\n\nbool DnsFilterEnvoyConfig::loadServerConfig(\n    const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig::\n        ServerContextConfig& config,\n    envoy::data::dns::v3::DnsTable& table) {\n  using envoy::data::dns::v3::DnsTable;\n\n  if (config.has_inline_dns_table()) {\n    table = config.inline_dns_table();\n    return true;\n  }\n\n  const auto& datasource = config.external_dns_table();\n  bool data_source_loaded = false;\n  try {\n    // Data structure is deduced from the file extension. If the data is not read an exception\n    // is thrown. If no table can be read, the filter will refer all queries to an external\n    // DNS server, if configured, otherwise all queries will be responded to with Name Error.\n    MessageUtil::loadFromFile(datasource.filename(), table,\n                              ProtobufMessage::getNullValidationVisitor(), api_,\n                              false /* do_boosting */);\n    data_source_loaded = true;\n  } catch (const ProtobufMessage::UnknownProtoFieldException& e) {\n    ENVOY_LOG(warn, \"Invalid field in DNS Filter datasource configuration: {}\", e.what());\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG(warn, \"Filesystem DNS Filter config update failure: {}\", e.what());\n  }\n  return data_source_loaded;\n}\n\nDnsFilter::DnsFilter(Network::UdpReadFilterCallbacks& callbacks,\n                     const DnsFilterEnvoyConfigSharedPtr& config)\n    : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()),\n      cluster_manager_(config_->clusterManager()),\n      message_parser_(config->forwardQueries(), listener_.dispatcher().timeSource(),\n                      config->retryCount(), config->random(),\n                      config_->stats().downstream_rx_query_latency_) {\n  // This callback is executed when the dns resolution completes. At that time of a response by\n  // the resolver, we build an answer record from each IP returned then send a response to the\n  // client\n  resolver_callback_ = [this](DnsQueryContextPtr context, const DnsQueryRecord* query,\n                              AddressConstPtrVec& iplist) -> void {\n    // We cannot retry the resolution if ares returns without a response. The ares context\n    // is still dirty and will result in a segfault when it is freed during a subsequent resolve\n    // call from here. We will retry resolutions for pending lookups only\n    if (context->resolution_status_ != Network::DnsResolver::ResolutionStatus::Success &&\n        !context->in_callback_ && context->retry_ > 0) {\n      --context->retry_;\n      ENVOY_LOG(debug, \"resolving name [{}] via external resolvers [retry {}]\", query->name_,\n                context->retry_);\n      resolver_->resolveExternalQuery(std::move(context), query);\n      return;\n    }\n\n    config_->stats().externally_resolved_queries_.inc();\n    if (iplist.empty()) {\n      config_->stats().unanswered_queries_.inc();\n    }\n\n    incrementExternalQueryTypeCount(query->type_);\n    for (const auto& ip : iplist) {\n      incrementExternalQueryTypeAnswerCount(query->type_);\n      const std::chrono::seconds ttl = getDomainTTL(query->name_);\n      message_parser_.storeDnsAnswerRecord(context, *query, ttl, std::move(ip));\n    }\n    sendDnsResponse(std::move(context));\n  };\n\n  resolver_ = std::make_unique<DnsFilterResolver>(resolver_callback_, config->resolvers(),\n                                                  config->resolverTimeout(), listener_.dispatcher(),\n                                                  config->maxPendingLookups());\n}\n\nvoid DnsFilter::onData(Network::UdpRecvData& client_request) {\n  config_->stats().downstream_rx_bytes_.recordValue(client_request.buffer_->length());\n  config_->stats().downstream_rx_queries_.inc();\n\n  // Setup counters for the parser\n  DnsParserCounters parser_counters(config_->stats().query_buffer_underflow_,\n                                    config_->stats().record_name_overflow_,\n                                    config_->stats().query_parsing_failure_);\n\n  // Parse the query, if it fails return an response to the client\n  DnsQueryContextPtr query_context =\n      message_parser_.createQueryContext(client_request, parser_counters);\n  incrementQueryTypeCount(query_context->queries_);\n  if (!query_context->parse_status_) {\n    config_->stats().downstream_rx_invalid_queries_.inc();\n    sendDnsResponse(std::move(query_context));\n    return;\n  }\n\n  // Resolve the requested name and respond to the client. If the return code is\n  // External, we will respond to the client when the upstream resolver returns\n  if (getResponseForQuery(query_context) == DnsLookupResponseCode::External) {\n    return;\n  }\n\n  // We have an answer, it might be \"No Answer\". Send it to the client\n  sendDnsResponse(std::move(query_context));\n}\n\nvoid DnsFilter::sendDnsResponse(DnsQueryContextPtr query_context) {\n  Buffer::OwnedImpl response;\n\n  // Serializes the generated response to the parsed query from the client. If there is a\n  // parsing error or the incoming query is invalid, we will still generate a valid DNS response\n  message_parser_.buildResponseBuffer(query_context, response);\n  config_->stats().downstream_tx_responses_.inc();\n  config_->stats().downstream_tx_bytes_.recordValue(response.length());\n  Network::UdpSendData response_data{query_context->local_->ip(), *(query_context->peer_),\n                                     response};\n  listener_.send(response_data);\n}\n\nDnsLookupResponseCode DnsFilter::getResponseForQuery(DnsQueryContextPtr& context) {\n  /* It appears to be a rare case where we would have more than one query in a single request.\n   * It is allowed by the protocol but not widely supported:\n   *\n   * See: https://www.ietf.org/rfc/rfc1035.txt\n   *\n   * The question section is used to carry the \"question\" in most queries,\n   * i.e., the parameters that define what is being asked. The section\n   * contains QDCOUNT (usually 1) entries.\n   */\n  for (const auto& query : context->queries_) {\n    // Try to resolve the query locally. If forwarding the query externally is disabled we will\n    // always attempt to resolve with the configured domains\n    if (isKnownDomain(query->name_) || !config_->forwardQueries()) {\n      // Determine whether the name is a cluster. Move on to the next query if successful\n      if (resolveViaClusters(context, *query)) {\n        continue;\n      }\n\n      // Determine whether we an answer this query with the static configuration\n      if (resolveViaConfiguredHosts(context, *query)) {\n        continue;\n      }\n    }\n\n    ENVOY_LOG(debug, \"resolving name [{}] via external resolvers\", query->name_);\n    resolver_->resolveExternalQuery(std::move(context), query.get());\n\n    return DnsLookupResponseCode::External;\n  }\n\n  if (context->answers_.empty()) {\n    config_->stats().unanswered_queries_.inc();\n    return DnsLookupResponseCode::Failure;\n  }\n  return DnsLookupResponseCode::Success;\n}\n\nbool DnsFilter::resolveViaConfiguredHosts(DnsQueryContextPtr& context,\n                                          const DnsQueryRecord& query) {\n  switch (query.type_) {\n  case DNS_RECORD_TYPE_A:\n  case DNS_RECORD_TYPE_AAAA:\n    return resolveConfiguredDomain(context, query);\n  case DNS_RECORD_TYPE_SRV:\n    return resolveConfiguredService(context, query);\n  default:\n    return false;\n  }\n}\n\nstd::chrono::seconds DnsFilter::getDomainTTL(const absl::string_view domain) {\n  const auto& domain_ttl_config = config_->domainTtl();\n  const auto& iter = domain_ttl_config.find(domain);\n\n  if (iter == domain_ttl_config.end()) {\n    return DEFAULT_RESOLVER_TTL;\n  }\n  return iter->second;\n}\n\nbool DnsFilter::isKnownDomain(const absl::string_view domain_name) {\n  const auto& known_suffixes = config_->knownSuffixes();\n  // If we don't have a list of allowlisted domain suffixes, we will resolve the name with an\n  // external DNS server\n\n  // TODO(abaptiste): Use a trie to find a match instead of iterating through the list\n  for (auto& suffix : known_suffixes) {\n    if (suffix->match(domain_name)) {\n      config_->stats().known_domain_queries_.inc();\n      return true;\n    }\n  }\n  return false;\n}\n\nconst DnsEndpointConfig* DnsFilter::getEndpointConfigForDomain(const absl::string_view domain) {\n  const auto& domains = config_->domains();\n  const auto iter = domains.find(domain);\n  if (iter == domains.end()) {\n    ENVOY_LOG(debug, \"No endpoint configuration exists for [{}]\", domain);\n    return nullptr;\n  }\n  return &(iter->second);\n}\n\nconst DnsSrvRecord* DnsFilter::getServiceConfigForDomain(const absl::string_view domain) {\n  const DnsEndpointConfig* endpoint_config = getEndpointConfigForDomain(domain);\n  if (endpoint_config != nullptr && endpoint_config->service_list.has_value()) {\n    return endpoint_config->service_list.value().get();\n  }\n  return nullptr;\n}\n\nconst AddressConstPtrVec* DnsFilter::getAddressListForDomain(const absl::string_view domain) {\n  const DnsEndpointConfig* endpoint_config = getEndpointConfigForDomain(domain);\n  if (endpoint_config != nullptr && endpoint_config->address_list.has_value()) {\n    return &(endpoint_config->address_list.value());\n  }\n  return nullptr;\n}\n\nconst absl::string_view DnsFilter::getClusterNameForDomain(const absl::string_view domain) {\n  const DnsEndpointConfig* endpoint_config = getEndpointConfigForDomain(domain);\n  if (endpoint_config != nullptr && endpoint_config->cluster_name.has_value()) {\n    return endpoint_config->cluster_name.value();\n  }\n  return {};\n}\n\nbool DnsFilter::resolveClusterService(DnsQueryContextPtr& context, const DnsQueryRecord& query) {\n  size_t cluster_endpoints = 0;\n\n  // Get the service_list config for the domain\n  const auto* service_config = getServiceConfigForDomain(query.name_);\n  if (service_config != nullptr) {\n\n    // We can redirect to more than one cluster, but only one is supported\n    const auto& cluster_target = service_config->targets_.begin();\n    const auto& target_name = cluster_target->first;\n    const auto& attributes = cluster_target->second;\n\n    if (!attributes.is_cluster) {\n      ENVOY_LOG(trace, \"Service target [{}] is not a cluster\", target_name);\n      return false;\n    }\n\n    // Determine if there is a cluster\n    Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(target_name);\n    if (cluster == nullptr) {\n      ENVOY_LOG(trace, \"No cluster found for service target: {}\", target_name);\n      return false;\n    }\n\n    // Add a service record for each cluster endpoint using the cluster name\n    const std::chrono::seconds ttl = getDomainTTL(target_name);\n    for (const auto& hostsets : cluster->prioritySet().hostSetsPerPriority()) {\n      for (const auto& host : hostsets->hosts()) {\n\n        // If the target port is zero, use the port from the cluster host.\n        // If the cluster host port is zero also, then this is the value that will\n        // appear in the service record. Zero is a permitted value in the record\n        DnsSrvRecord::DnsTargetAttributes new_attributes = attributes;\n        if (!new_attributes.port) {\n          new_attributes.port = host->address()->ip()->port();\n        }\n\n        // Create the service record element and increment the SRV record answer count\n        auto config = std::make_unique<DnsSrvRecord>(service_config->name_, service_config->proto_,\n                                                     service_config->ttl_);\n\n        config->addTarget(target_name, new_attributes);\n        message_parser_.storeDnsSrvAnswerRecord(context, query, std::move(config));\n        incrementClusterQueryTypeAnswerCount(query.type_);\n\n        // Return the address for all discovered endpoints\n        ENVOY_LOG(debug, \"using host address {} for cluster [{}]\",\n                  host->address()->ip()->addressAsString(), target_name);\n\n        // We have to determine the address type here so that we increment the correct counter\n        const auto type = Utils::getAddressRecordType(host->address());\n        if (type.has_value() &&\n            message_parser_.storeDnsAdditionalRecord(context, target_name, type.value(),\n                                                     query.class_, ttl, host->address())) {\n          ++cluster_endpoints;\n          incrementClusterQueryTypeAnswerCount(type.value());\n        }\n      }\n    }\n  }\n  return (cluster_endpoints != 0);\n}\n\nbool DnsFilter::resolveClusterHost(DnsQueryContextPtr& context, const DnsQueryRecord& query) {\n  // Determine if the domain name is being redirected to a cluster\n  const auto cluster_name = getClusterNameForDomain(query.name_);\n  absl::string_view lookup_name;\n  if (!cluster_name.empty()) {\n    lookup_name = cluster_name;\n  } else {\n    lookup_name = query.name_;\n  }\n\n  // Return an address for all discovered endpoints. The address and query type must match\n  // for the host to be included in the response\n  size_t cluster_endpoints = 0;\n  Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(lookup_name);\n  if (cluster != nullptr) {\n    // TODO(abaptiste): consider using host weights when returning answer addresses\n    const std::chrono::seconds ttl = getDomainTTL(lookup_name);\n\n    for (const auto& hostsets : cluster->prioritySet().hostSetsPerPriority()) {\n      for (const auto& host : hostsets->hosts()) {\n        // Return the address for all discovered endpoints\n        ENVOY_LOG(debug, \"using cluster host address {} for domain [{}]\",\n                  host->address()->ip()->addressAsString(), lookup_name);\n        if (message_parser_.storeDnsAnswerRecord(context, query, ttl, host->address())) {\n          incrementClusterQueryTypeAnswerCount(query.type_);\n          ++cluster_endpoints;\n        }\n      }\n    }\n  }\n  return (cluster_endpoints != 0);\n}\n\nbool DnsFilter::resolveViaClusters(DnsQueryContextPtr& context, const DnsQueryRecord& query) {\n  switch (query.type_) {\n  case DNS_RECORD_TYPE_SRV:\n    return resolveClusterService(context, query);\n  case DNS_RECORD_TYPE_A:\n  case DNS_RECORD_TYPE_AAAA:\n    return resolveClusterHost(context, query);\n  default:\n    // unsupported query type\n    return false;\n  }\n}\n\nbool DnsFilter::resolveConfiguredDomain(DnsQueryContextPtr& context, const DnsQueryRecord& query) {\n  const auto* configured_address_list = getAddressListForDomain(query.name_);\n  uint64_t hosts_found = 0;\n  if (configured_address_list != nullptr) {\n    // Build an answer record from each configured IP address\n    for (const auto& configured_address : *configured_address_list) {\n      ASSERT(configured_address != nullptr);\n      ENVOY_LOG(trace, \"using local address {} for domain [{}]\",\n                configured_address->ip()->addressAsString(), query.name_);\n      ++hosts_found;\n      const std::chrono::seconds ttl = getDomainTTL(query.name_);\n      if (message_parser_.storeDnsAnswerRecord(context, query, ttl, configured_address)) {\n        incrementLocalQueryTypeAnswerCount(query.type_);\n      }\n    }\n  }\n  return (hosts_found != 0);\n}\n\nbool DnsFilter::resolveConfiguredService(DnsQueryContextPtr& context, const DnsQueryRecord& query) {\n  const auto* service_config = getServiceConfigForDomain(query.name_);\n\n  size_t targets_discovered = 0;\n  if (service_config != nullptr) {\n    // for each service target address, we must resolve the target's IP. The target record does not\n    // specify the address type, so we must deduce it when building the record. It is possible that\n    // the configured target's IP addresses are a mix of A and AAAA records.\n    for (const auto& [target_name, attributes] : service_config->targets_) {\n      const auto* configured_address_list = getAddressListForDomain(target_name);\n\n      if (configured_address_list != nullptr) {\n        // Build an SRV answer record for the service. We need a new SRV record for each target.\n        // Although the same class is used, the target storage is different than the way the service\n        // config is modeled. We store one SrvRecord per target so that we can enforce the response\n        // size limit when serializing the answers to the client\n        ENVOY_LOG(trace, \"Adding srv record for target [{}]\", target_name);\n\n        incrementLocalQueryTypeAnswerCount(query.type_);\n        auto config = std::make_unique<DnsSrvRecord>(service_config->name_, service_config->proto_,\n                                                     service_config->ttl_);\n        config->addTarget(target_name, attributes);\n        message_parser_.storeDnsSrvAnswerRecord(context, query, std::move(config));\n\n        for (const auto& configured_address : *configured_address_list) {\n          ASSERT(configured_address != nullptr);\n\n          // Since there is no type, only a name, we must determine the record type from its address\n          ENVOY_LOG(trace, \"using address {} for target [{}] in SRV record\",\n                    configured_address->ip()->addressAsString(), target_name);\n          const std::chrono::seconds ttl = getDomainTTL(target_name);\n\n          const auto type = Utils::getAddressRecordType(configured_address);\n          if (type.has_value()) {\n            incrementLocalQueryTypeAnswerCount(type.value());\n            message_parser_.storeDnsAdditionalRecord(context, target_name, type.value(),\n                                                     query.class_, ttl, configured_address);\n            ++targets_discovered;\n          }\n        }\n      }\n    }\n  }\n  return (targets_discovered != 0);\n}\n\nvoid DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) {\n  config_->stats().downstream_rx_errors_.inc();\n  UNREFERENCED_PARAMETER(error_code);\n}\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_filter.h",
    "content": "#pragma once\n\n#include \"envoy/event/file_event.h\"\n#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/matchers.h\"\n#include \"common/config/config_provider_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter_resolver.h\"\n#include \"extensions/filters/udp/dns_filter/dns_parser.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\n/**\n * All DNS Filter stats. @see stats_macros.h\n */\n#define ALL_DNS_FILTER_STATS(COUNTER, HISTOGRAM)                                                   \\\n  COUNTER(a_record_queries)                                                                        \\\n  COUNTER(aaaa_record_queries)                                                                     \\\n  COUNTER(srv_record_queries)                                                                      \\\n  COUNTER(cluster_a_record_answers)                                                                \\\n  COUNTER(cluster_aaaa_record_answers)                                                             \\\n  COUNTER(cluster_srv_record_answers)                                                              \\\n  COUNTER(cluster_unsupported_answers)                                                             \\\n  COUNTER(downstream_rx_errors)                                                                    \\\n  COUNTER(downstream_rx_invalid_queries)                                                           \\\n  COUNTER(downstream_rx_queries)                                                                   \\\n  COUNTER(external_a_record_queries)                                                               \\\n  COUNTER(external_a_record_answers)                                                               \\\n  COUNTER(external_aaaa_record_answers)                                                            \\\n  COUNTER(external_aaaa_record_queries)                                                            \\\n  COUNTER(external_unsupported_answers)                                                            \\\n  COUNTER(external_unsupported_queries)                                                            \\\n  COUNTER(externally_resolved_queries)                                                             \\\n  COUNTER(known_domain_queries)                                                                    \\\n  COUNTER(local_a_record_answers)                                                                  \\\n  COUNTER(local_aaaa_record_answers)                                                               \\\n  COUNTER(local_srv_record_answers)                                                                \\\n  COUNTER(local_unsupported_answers)                                                               \\\n  COUNTER(unanswered_queries)                                                                      \\\n  COUNTER(unsupported_queries)                                                                     \\\n  COUNTER(downstream_tx_responses)                                                                 \\\n  COUNTER(query_buffer_underflow)                                                                  \\\n  COUNTER(query_parsing_failure)                                                                   \\\n  COUNTER(record_name_overflow)                                                                    \\\n  HISTOGRAM(downstream_rx_bytes, Bytes)                                                            \\\n  HISTOGRAM(downstream_rx_query_latency, Milliseconds)                                             \\\n  HISTOGRAM(downstream_tx_bytes, Bytes)\n\n/**\n * Struct definition for all DNS Filter stats. @see stats_macros.h\n */\nstruct DnsFilterStats {\n  ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\nstruct DnsEndpointConfig {\n  absl::optional<AddressConstPtrVec> address_list;\n  absl::optional<std::string> cluster_name;\n  absl::optional<DnsSrvRecordPtr> service_list;\n};\n\nusing DnsVirtualDomainConfig = absl::flat_hash_map<std::string, DnsEndpointConfig>;\n\n/**\n * DnsFilter configuration class abstracting access to data necessary for the filter's operation\n */\nclass DnsFilterEnvoyConfig : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  DnsFilterEnvoyConfig(\n      Server::Configuration::ListenerFactoryContext& context,\n      const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config);\n\n  DnsFilterStats& stats() const { return stats_; }\n  const DnsVirtualDomainConfig& domains() const { return virtual_domains_; }\n  const std::vector<Matchers::StringMatcherPtr>& knownSuffixes() const { return known_suffixes_; }\n  const absl::flat_hash_map<std::string, std::chrono::seconds>& domainTtl() const {\n    return domain_ttl_;\n  }\n  const AddressConstPtrVec& resolvers() const { return resolvers_; }\n  bool forwardQueries() const { return forward_queries_; }\n  const std::chrono::milliseconds resolverTimeout() const { return resolver_timeout_; }\n  Upstream::ClusterManager& clusterManager() const { return cluster_manager_; }\n  uint64_t retryCount() const { return retry_count_; }\n  Random::RandomGenerator& random() const { return random_; }\n  uint64_t maxPendingLookups() const { return max_pending_lookups_; }\n\nprivate:\n  static DnsFilterStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) {\n    const auto final_prefix = absl::StrCat(\"dns_filter.\", stat_prefix);\n    return {ALL_DNS_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                 POOL_HISTOGRAM_PREFIX(scope, final_prefix))};\n  }\n\n  bool loadServerConfig(const envoy::extensions::filters::udp::dns_filter::v3alpha::\n                            DnsFilterConfig::ServerContextConfig& config,\n                        envoy::data::dns::v3::DnsTable& table);\n\n  Stats::Scope& root_scope_;\n  Upstream::ClusterManager& cluster_manager_;\n  Network::DnsResolverSharedPtr resolver_;\n  Api::Api& api_;\n\n  mutable DnsFilterStats stats_;\n  DnsVirtualDomainConfig virtual_domains_;\n  std::vector<Matchers::StringMatcherPtr> known_suffixes_;\n  absl::flat_hash_map<std::string, std::chrono::seconds> domain_ttl_;\n  bool forward_queries_;\n  uint64_t retry_count_;\n  AddressConstPtrVec resolvers_;\n  std::chrono::milliseconds resolver_timeout_;\n  Random::RandomGenerator& random_;\n  uint64_t max_pending_lookups_;\n};\n\nusing DnsFilterEnvoyConfigSharedPtr = std::shared_ptr<const DnsFilterEnvoyConfig>;\n\nenum class DnsLookupResponseCode { Success, Failure, External };\n\n/**\n * This class is responsible for handling incoming DNS datagrams and responding to the queries.\n * The filter will attempt to resolve the query via its configuration or direct to an external\n * resolver when necessary\n */\nclass DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  DnsFilter(Network::UdpReadFilterCallbacks& callbacks,\n            const DnsFilterEnvoyConfigSharedPtr& config);\n\n  // Network::UdpListenerReadFilter callbacks\n  void onData(Network::UdpRecvData& client_request) override;\n  void onReceiveError(Api::IoError::IoErrorCode error_code) override;\n\n  /**\n   * @return bool true if the domain_name is a known domain for which we respond to queries\n   */\n  bool isKnownDomain(const absl::string_view domain_name);\n\nprivate:\n  /**\n   * Prepare the response buffer and send it to the client\n   *\n   * @param context contains the data necessary to create a response and send it to a client\n   */\n  void sendDnsResponse(DnsQueryContextPtr context);\n\n  /**\n   * @brief Encapsulates all of the logic required to find an answer for a DNS query\n   *\n   * @return DnsLookupResponseCode indicating whether we were able to respond to the query or send\n   * the query to an external resolver\n   */\n  DnsLookupResponseCode getResponseForQuery(DnsQueryContextPtr& context);\n\n  /**\n   * @return std::chrono::seconds retrieves the configured per domain TTL to be inserted into answer\n   * records\n   */\n  std::chrono::seconds getDomainTTL(const absl::string_view domain);\n\n  /**\n   * @brief Resolves a hostname query from configured clusters\n   *\n   * @param context object containing the query context\n   * @param query query object containing the name to be resolved\n   * @return bool true if the requested name matched a cluster and an answer record was constructed\n   */\n  bool resolveClusterHost(DnsQueryContextPtr& context, const DnsQueryRecord& query);\n\n  /**\n   * @brief Resolves a service query from configured clusters\n   *\n   * @param context object containing the query context\n   * @param query query object containing the name to be resolved\n   * @return bool true if the requested name matched a cluster and an answer record was constructed\n   */\n  bool resolveClusterService(DnsQueryContextPtr& context, const DnsQueryRecord& query);\n\n  /**\n   * @brief Resolves the supplied query from configured clusters\n   *\n   * @param context object containing the query context\n   * @param query query object containing the name to be resolved\n   * @return bool true if the requested name matched a cluster and an answer record was constructed\n   */\n  bool resolveViaClusters(DnsQueryContextPtr& context, const DnsQueryRecord& query);\n\n  /**\n   * @brief Resolves the supplied query from the configured set of domains\n   *\n   * @param context object containing the query context\n   * @param query query object containing the name to be resolved\n   * @return bool true if the requested name matched a cluster and an answer record was constructed\n   */\n  bool resolveConfiguredDomain(DnsQueryContextPtr& context, const DnsQueryRecord& query);\n\n  /**\n   * @brief Resolves the supplied query from configured services\n   *\n   * @param context object containing the query context\n   * @param query query object containing the name to be resolved\n   * @return bool true if the requested name matched a cluster and an answer record was constructed\n   */\n  bool resolveConfiguredService(DnsQueryContextPtr& context, const DnsQueryRecord& query);\n\n  /**\n   * @brief Resolves the supplied query from configured hostnames or services\n   *\n   * @param context object containing the query context\n   * @param query query object containing the name to be resolved\n   * @return bool true if the requested name matches a configured domain and answer records can be\n   * constructed\n   */\n  bool resolveViaConfiguredHosts(DnsQueryContextPtr& context, const DnsQueryRecord& query);\n\n  /**\n   * @brief Increment the counter for the given query type for external queries\n   *\n   * @param query_type indicate the type of record being resolved (A, AAAA, or other).\n   */\n  void incrementExternalQueryTypeCount(const uint16_t query_type) {\n    switch (query_type) {\n    case DNS_RECORD_TYPE_A:\n      config_->stats().external_a_record_queries_.inc();\n      break;\n    case DNS_RECORD_TYPE_AAAA:\n      config_->stats().external_aaaa_record_queries_.inc();\n      break;\n    default:\n      config_->stats().external_unsupported_queries_.inc();\n      break;\n    }\n  }\n\n  /**\n   * @brief Increment the counter for the parsed query type\n   *\n   * @param queries a vector of all the incoming queries received from a client\n   */\n  void incrementQueryTypeCount(const DnsQueryPtrVec& queries) {\n    for (const auto& query : queries) {\n      incrementQueryTypeCount(query->type_);\n    }\n  }\n\n  /**\n   * @brief Increment the counter for the given query type.\n   *\n   * @param query_type indicate the type of record being resolved (A, AAAA, or other).\n   */\n  void incrementQueryTypeCount(const uint16_t query_type) {\n    switch (query_type) {\n    case DNS_RECORD_TYPE_A:\n      config_->stats().a_record_queries_.inc();\n      break;\n    case DNS_RECORD_TYPE_AAAA:\n      config_->stats().aaaa_record_queries_.inc();\n      break;\n    case DNS_RECORD_TYPE_SRV:\n      config_->stats().srv_record_queries_.inc();\n      break;\n    default:\n      config_->stats().unsupported_queries_.inc();\n      break;\n    }\n  }\n\n  /**\n   * @brief Increment the counter for answers for the given query type resolved via cluster names\n   *\n   * @param query_type indicate the type of answer record returned to the client\n   */\n  void incrementClusterQueryTypeAnswerCount(const uint16_t query_type) {\n    switch (query_type) {\n    case DNS_RECORD_TYPE_A:\n      config_->stats().cluster_a_record_answers_.inc();\n      break;\n    case DNS_RECORD_TYPE_AAAA:\n      config_->stats().cluster_aaaa_record_answers_.inc();\n      break;\n    case DNS_RECORD_TYPE_SRV:\n      config_->stats().cluster_srv_record_answers_.inc();\n      break;\n    default:\n      config_->stats().cluster_unsupported_answers_.inc();\n      break;\n    }\n  }\n\n  /**\n   * @brief Increment the counter for answers for the given query type resolved from the local\n   * configuration.\n   *\n   * @param query_type indicate the type of answer record returned to the client\n   */\n  void incrementLocalQueryTypeAnswerCount(const uint16_t query_type) {\n    switch (query_type) {\n    case DNS_RECORD_TYPE_A:\n      config_->stats().local_a_record_answers_.inc();\n      break;\n    case DNS_RECORD_TYPE_AAAA:\n      config_->stats().local_aaaa_record_answers_.inc();\n      break;\n    case DNS_RECORD_TYPE_SRV:\n      config_->stats().local_srv_record_answers_.inc();\n      break;\n    default:\n      config_->stats().local_unsupported_answers_.inc();\n      break;\n    }\n  }\n\n  /**\n   * @brief Increment the counter for answers for the given query type resolved via an external\n   * resolver\n   *\n   * @param query_type indicate the type of answer record returned to the client\n   */\n  void incrementExternalQueryTypeAnswerCount(const uint16_t query_type) {\n    switch (query_type) {\n    case DNS_RECORD_TYPE_A:\n      config_->stats().external_a_record_answers_.inc();\n      break;\n    case DNS_RECORD_TYPE_AAAA:\n      config_->stats().external_aaaa_record_answers_.inc();\n      break;\n    default:\n      config_->stats().external_unsupported_answers_.inc();\n      break;\n    }\n  }\n\n  /**\n   * @brief Helper function to retrieve the Endpoint configuration for a requested domain\n   */\n  const DnsEndpointConfig* getEndpointConfigForDomain(const absl::string_view domain);\n\n  /**\n   * @brief Helper function to retrieve the Service Config for a requested domain\n   */\n  const DnsSrvRecord* getServiceConfigForDomain(const absl::string_view domain);\n\n  /**\n   * @brief Helper function to retrieve the Address List for a requested domain\n   */\n  const AddressConstPtrVec* getAddressListForDomain(const absl::string_view domain);\n\n  /**\n   * @brief Helper function to retrieve a cluster name that a domain may be redirected towards\n   */\n  const absl::string_view getClusterNameForDomain(const absl::string_view domain);\n\n  const DnsFilterEnvoyConfigSharedPtr config_;\n  Network::UdpListener& listener_;\n  Upstream::ClusterManager& cluster_manager_;\n  DnsMessageParser message_parser_;\n  DnsFilterResolverPtr resolver_;\n  Network::Address::InstanceConstSharedPtr local_;\n  Network::Address::InstanceConstSharedPtr peer_;\n  DnsFilterResolverCallback resolver_callback_;\n};\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_filter_constants.h",
    "content": "#pragma once\n\n#include \"envoy/common/platform.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\nconstexpr uint16_t DNS_RECORD_CLASS_IN = 1;\n\nconstexpr uint16_t DNS_RECORD_TYPE_A = 0x01;\nconstexpr uint16_t DNS_RECORD_TYPE_AAAA = 0x1C;\nconstexpr uint16_t DNS_RECORD_TYPE_SRV = 0x21;\nconstexpr uint16_t DNS_RECORD_TYPE_OPT = 0x29;\n\nconstexpr uint16_t DNS_RESPONSE_CODE_NO_ERROR = 0;\nconstexpr uint16_t DNS_RESPONSE_CODE_FORMAT_ERROR = 1;\nconstexpr uint16_t DNS_RESPONSE_CODE_NAME_ERROR = 3;\nconstexpr uint16_t DNS_RESPONSE_CODE_NOT_IMPLEMENTED = 4;\n\nconstexpr size_t MIN_QUERY_NAME_LENGTH = 3;\nconstexpr size_t MAX_LABEL_LENGTH = 63;\nconstexpr size_t MAX_NAME_LENGTH = 255;\n\n// Amazon Route53 will return up to 8 records in an answer\n// https://aws.amazon.com/route53/faqs/#associate_multiple_ip_with_single_record\nconstexpr size_t MAX_RETURNED_RECORDS = 8;\n\n// Ensure that responses stay below the 512 byte byte limit. If we are to exceed this we must\n// add DNS extension fields\n//\n// Note:  There is Network::MAX_UDP_PACKET_SIZE, which is defined as 1500 bytes. If we support\n// DNS extensions, which support up to 4096 bytes, we will have to keep this 1500 byte limit\n// in mind.\nconstexpr uint64_t MAX_DNS_RESPONSE_SIZE = 512;\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_filter_resolver.cc",
    "content": "#include \"extensions/filters/udp/dns_filter/dns_filter_resolver.h\"\n\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\nvoid DnsFilterResolver::resolveExternalQuery(DnsQueryContextPtr context,\n                                             const DnsQueryRecord* domain_query) {\n  // Create an external resolution context for the query.\n  LookupContext ctx{};\n  ctx.query_rec = domain_query;\n  ctx.query_context = std::move(context);\n  ctx.query_context->in_callback_ = false;\n  ctx.expiry = std::chrono::duration_cast<std::chrono::seconds>(\n                   dispatcher_.timeSource().systemTime().time_since_epoch())\n                   .count() +\n               std::chrono::duration_cast<std::chrono::seconds>(timeout_).count();\n  ctx.resolver_status = DnsFilterResolverStatus::Pending;\n\n  Network::DnsLookupFamily lookup_family;\n  switch (domain_query->type_) {\n  case DNS_RECORD_TYPE_A:\n    lookup_family = Network::DnsLookupFamily::V4Only;\n    break;\n  case DNS_RECORD_TYPE_AAAA:\n    lookup_family = Network::DnsLookupFamily::V6Only;\n    break;\n  default:\n    // We don't support other lookups other than A and AAAA. Set success here so that we don't\n    // retry for something that we are certain will fail.\n    ENVOY_LOG(debug, \"Unknown query type [{}] for upstream lookup\", domain_query->type_);\n    ctx.query_context->resolution_status_ = Network::DnsResolver::ResolutionStatus::Success;\n    ctx.resolver_status = DnsFilterResolverStatus::Complete;\n    invokeCallback(ctx);\n    return;\n  }\n\n  const DnsQueryRecord* id = domain_query;\n\n  // If we have too many pending lookups, invoke the callback to retry the query.\n  if (lookups_.size() > max_pending_lookups_) {\n    ENVOY_LOG(\n        trace,\n        \"Retrying query for [{}] because there are too many pending lookups: [pending {}/max {}]\",\n        domain_query->name_, lookups_.size(), max_pending_lookups_);\n    ctx.resolver_status = DnsFilterResolverStatus::Complete;\n    invokeCallback(ctx);\n    return;\n  }\n\n  ctx.timeout_timer = dispatcher_.createTimer([this]() -> void { onResolveTimeout(); });\n  ctx.timeout_timer->enableTimer(timeout_);\n\n  lookups_.emplace(id, std::move(ctx));\n\n  ENVOY_LOG(trace, \"Pending queries: {}\", lookups_.size());\n\n  // Define the callback that is executed when resolution completes\n  // Resolve the address in the query and add to the resolved_hosts vector\n  resolver_->resolve(domain_query->name_, lookup_family,\n                     [this, id](Network::DnsResolver::ResolutionStatus status,\n                                std::list<Network::DnsResponse>&& response) -> void {\n                       auto ctx_iter = lookups_.find(id);\n\n                       // If the context is not in the map, the lookup has timed out and was removed\n                       // when the timer executed\n                       if (ctx_iter == lookups_.end()) {\n                         ENVOY_LOG(debug, \"Unable to find context for DNS query for ID [{}]\",\n                                   reinterpret_cast<intptr_t>(id));\n                         return;\n                       }\n\n                       auto ctx = std::move(ctx_iter->second);\n                       lookups_.erase(ctx_iter->first);\n\n                       // We are processing the response here, so we did not timeout. Cancel the\n                       // timer\n                       ctx.timeout_timer->disableTimer();\n\n                       ENVOY_LOG(trace, \"async query status returned. Entries {}\", response.size());\n                       ASSERT(ctx.resolver_status == DnsFilterResolverStatus::Pending);\n\n                       ctx.query_context->in_callback_ = true;\n                       ctx.query_context->resolution_status_ = status;\n                       ctx.resolver_status = DnsFilterResolverStatus::Complete;\n\n                       // C-ares doesn't expose the TTL in the data available here.\n                       if (status == Network::DnsResolver::ResolutionStatus::Success) {\n                         ctx.resolved_hosts.reserve(response.size());\n                         for (const auto& resp : response) {\n                           ASSERT(resp.address_ != nullptr);\n                           ENVOY_LOG(trace, \"Resolved address: {} for {}\",\n                                     resp.address_->ip()->addressAsString(), ctx.query_rec->name_);\n                           ctx.resolved_hosts.emplace_back(std::move(resp.address_));\n                         }\n                       }\n                       // Invoke the filter callback notifying it of resolved addresses\n                       invokeCallback(ctx);\n                     });\n}\n\nvoid DnsFilterResolver::onResolveTimeout() {\n  const uint64_t now = std::chrono::duration_cast<std::chrono::seconds>(\n                           dispatcher_.timeSource().systemTime().time_since_epoch())\n                           .count();\n  ENVOY_LOG(trace, \"Pending queries: {}\", lookups_.size());\n\n  // Find an outstanding pending query and purge it\n  for (auto& ctx_iter : lookups_) {\n    if (ctx_iter.second.expiry <= now &&\n        ctx_iter.second.resolver_status == DnsFilterResolverStatus::Pending) {\n      auto ctx = std::move(ctx_iter.second);\n\n      ENVOY_LOG(trace, \"Purging expired query: {}\", ctx_iter.first->name_);\n\n      ctx.query_context->resolution_status_ = Network::DnsResolver::ResolutionStatus::Failure;\n\n      lookups_.erase(ctx_iter.first);\n      callback_(std::move(ctx.query_context), ctx.query_rec, ctx.resolved_hosts);\n      return;\n    }\n  }\n}\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_filter_resolver.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/dns.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_parser.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\nenum class DnsFilterResolverStatus { Pending, Complete, TimedOut };\n\n/*\n * This class encapsulates the logic of handling an asynchronous DNS request for the DNS filter.\n * External request timeouts are handled here.\n */\nclass DnsFilterResolver : Logger::Loggable<Logger::Id::filter> {\npublic:\n  DnsFilterResolver(DnsFilterResolverCallback& callback, AddressConstPtrVec resolvers,\n                    std::chrono::milliseconds timeout, Event::Dispatcher& dispatcher,\n                    uint64_t max_pending_lookups)\n      : timeout_(timeout), dispatcher_(dispatcher),\n        resolver_(dispatcher.createDnsResolver(resolvers, false /* use_tcp_for_dns_lookups */)),\n        callback_(callback), max_pending_lookups_(max_pending_lookups) {}\n  /**\n   * @brief entry point to resolve the name in a DnsQueryRecord\n   *\n   * This function uses the query object to determine whether it is requesting an A or AAAA record\n   * for the given name. When the resolver callback executes, this will execute a DNS Filter\n   * callback in order to build the answer object returned to the client.\n   *\n   * @param domain_query the query record object containing the name for which we are resolving\n   */\n  void resolveExternalQuery(DnsQueryContextPtr context, const DnsQueryRecord* domain_query);\n\nprivate:\n  struct LookupContext {\n    const DnsQueryRecord* query_rec;\n    DnsQueryContextPtr query_context;\n    uint64_t expiry;\n    AddressConstPtrVec resolved_hosts;\n    DnsFilterResolverStatus resolver_status;\n    Event::TimerPtr timeout_timer;\n  };\n  /**\n   * @brief invokes the DNS Filter callback only if our state indicates we have not timed out\n   * waiting for a response from the external resolver\n   */\n  void invokeCallback(LookupContext& context) {\n    // If we've timed out. Guard against sending a response\n    if (context.resolver_status == DnsFilterResolverStatus::Complete) {\n      callback_(std::move(context.query_context), context.query_rec, context.resolved_hosts);\n    }\n  }\n\n  /**\n   * @brief Invoke the DNS Filter callback to send a response to a client if the query has timed out\n   * DNS Filter will respond to the client appropriately.\n   */\n  void onResolveTimeout();\n\n  std::chrono::milliseconds timeout_;\n  Event::Dispatcher& dispatcher_;\n  const Network::DnsResolverSharedPtr resolver_;\n  DnsFilterResolverCallback& callback_;\n  absl::flat_hash_map<const DnsQueryRecord*, LookupContext> lookups_;\n  uint64_t max_pending_lookups_;\n};\n\nusing DnsFilterResolverPtr = std::unique_ptr<DnsFilterResolver>;\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_filter_utils.cc",
    "content": "#include \"extensions/filters/udp/dns_filter/dns_filter_utils.h\"\n\n#include <algorithm>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/logger.h\"\n#include \"common/network/address_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace Utils {\n\nstd::string getProtoName(const DnsTable::DnsServiceProtocol& protocol) {\n  std::string proto = protocol.name();\n  if (proto.empty()) {\n    switch (protocol.number()) {\n    case 6:\n      proto = \"tcp\";\n      break;\n    case 17:\n      proto = \"udp\";\n      break;\n    default:\n      // For Envoy to resolve a protocol to a name \"/etc/protocols\"\n      // should exist. This isn't guaranteed. Since most services are\n      // tcp or udp, if we get a different value, return an empty string.\n      proto = EMPTY_STRING;\n      break;\n    } // end switch\n  }\n  return proto;\n}\n\nabsl::string_view getServiceFromName(const absl::string_view name) {\n  const size_t offset = name.find_first_of('.');\n  if (offset != std::string::npos && offset < name.size()) {\n    size_t start = 0;\n    if (name[start] == '_') {\n      return name.substr(++start, offset - 1);\n    }\n  }\n  return EMPTY_STRING;\n}\n\nabsl::string_view getProtoFromName(const absl::string_view name) {\n  size_t start = name.find_first_of('.');\n  if (start != std::string::npos && ++start < name.size() - 1) {\n    if (name[start] == '_') {\n      const size_t offset = name.find_first_of('.', ++start);\n      if (start != std::string::npos && offset < name.size()) {\n        return name.substr(start, offset - start);\n      }\n    }\n  }\n  return EMPTY_STRING;\n}\n\nstd::string buildServiceName(const std::string& name, const std::string& proto,\n                             const std::string& domain) {\n  std::string result{};\n  if (name[0] != '_') {\n    result += \"_\";\n  }\n  result += name + \".\";\n  if (proto[0] != '_') {\n    result += \"_\";\n  }\n  result += proto + '.' + domain;\n  return result;\n}\n\nabsl::optional<uint16_t>\ngetAddressRecordType(const Network::Address::InstanceConstSharedPtr& ipaddr) {\n  if (ipaddr->type() == Network::Address::Type::Ip) {\n    if (ipaddr->ip()->ipv6() != nullptr) {\n      return absl::make_optional<uint16_t>(DNS_RECORD_TYPE_AAAA);\n    } else if (ipaddr->ip()->ipv4() != nullptr) {\n      return absl::make_optional<uint16_t>(DNS_RECORD_TYPE_A);\n    }\n  }\n  return absl::nullopt;\n}\n} // namespace Utils\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_filter_utils.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h\"\n#include \"envoy/network/address.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter_constants.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace Utils {\n\nusing envoy::data::dns::v3::DnsTable;\n\n/**\n * @brief Returns the protocol name string from the configured protobuf entity\n */\nstd::string getProtoName(const DnsTable::DnsServiceProtocol& protocol);\n\n/**\n * @brief Extracts the service name from the fully qualified service name. The leading underscore\n * is discarded from the output\n */\nabsl::string_view getServiceFromName(const absl::string_view name);\n\n/**\n * @brief Extracts the protocol name from the fully qualified service name. The leading underscore\n * is discarded from the output\n */\nabsl::string_view getProtoFromName(const absl::string_view name);\n\n/**\n * @brief Construct the full service name, including underscores, from the name, protocol and\n * domain fields.\n *\n * A DNS service record name must have the protocol and name labels begin with underscores. This\n * function validates the input fields and concatenates them to form the full name\n */\nstd::string buildServiceName(const std::string& name, const std::string& proto,\n                             const std::string& domain);\n\nabsl::optional<uint16_t>\ngetAddressRecordType(const Network::Address::InstanceConstSharedPtr& ipaddr);\n\n} // namespace Utils\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_parser.cc",
    "content": "#include \"extensions/filters/udp/dns_filter/dns_parser.h\"\n\n#include \"envoy/network/address.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter_utils.h\"\n\n#include \"ares.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\nbool BaseDnsRecord::serializeSpecificName(Buffer::OwnedImpl& output, const absl::string_view name) {\n  // Iterate over a name e.g. \"www.domain.com\" once and produce a buffer containing each name\n  // segment prefixed by its length\n  static constexpr char SEPARATOR = '.';\n\n  // Names are restricted to 255 bytes per RFC\n  if (name.size() > MAX_NAME_LENGTH) {\n    return false;\n  }\n\n  size_t last = 0;\n  size_t count = name.find_first_of(SEPARATOR);\n  auto iter = name.begin();\n\n  while (count != std::string::npos) {\n    if ((count - last) > MAX_LABEL_LENGTH) {\n      return false;\n    }\n\n    count -= last;\n    output.writeBEInt<uint8_t>(count);\n    for (size_t i = 0; i < count; i++) {\n      output.writeByte(*iter);\n      ++iter;\n    }\n\n    // periods are not serialized. Skip to the next character\n    if (*iter == SEPARATOR) {\n      ++iter;\n    }\n\n    // Move our last marker to the first position after where we stopped. Search for the next name\n    // separator\n    last += count;\n    count = name.find_first_of(SEPARATOR, ++last);\n  }\n\n  // Write the remaining segment prepended by its length\n  count = name.size() - last;\n  output.writeBEInt<uint8_t>(count);\n  for (size_t i = 0; i < count; i++) {\n    output.writeByte(*iter++);\n  }\n\n  // Terminate the name record with a null byte\n  output.writeByte(0x00);\n  return true;\n}\n\nbool BaseDnsRecord::serializeName(Buffer::OwnedImpl& output) {\n  return serializeSpecificName(output, name_);\n}\n\n// Serialize a DNS Query Record\nbool DnsQueryRecord::serialize(Buffer::OwnedImpl& output) {\n  if (serializeName(output)) {\n    output.writeBEInt<uint16_t>(type_);\n    output.writeBEInt<uint16_t>(class_);\n  }\n  return (output.length() > 0);\n}\n\n// Serialize a single DNS Answer Record\nbool DnsAnswerRecord::serialize(Buffer::OwnedImpl& output) {\n  if (serializeName(output)) {\n    output.writeBEInt<uint16_t>(type_);\n    output.writeBEInt<uint16_t>(class_);\n    output.writeBEInt<uint32_t>(static_cast<uint32_t>(ttl_.count()));\n\n    ASSERT(ip_addr_ != nullptr);\n    const auto ip_address = ip_addr_->ip();\n\n    ASSERT(ip_address != nullptr);\n    if (ip_address->ipv6() != nullptr) {\n      // Store the 128bit address with 2 64 bit writes\n      const absl::uint128 addr6 = ip_address->ipv6()->address();\n      output.writeBEInt<uint16_t>(sizeof(addr6));\n      output.writeLEInt<uint64_t>(absl::Uint128Low64(addr6));\n      output.writeLEInt<uint64_t>(absl::Uint128High64(addr6));\n    } else if (ip_address->ipv4() != nullptr) {\n      output.writeBEInt<uint16_t>(4);\n      output.writeLEInt<uint32_t>(ip_address->ipv4()->address());\n    }\n  }\n  return (output.length() > 0);\n}\n\nbool DnsSrvRecord::serialize(Buffer::OwnedImpl& output) {\n  if (!targets_.empty()) {\n    // The Service Record being serialized should have only one target\n    const auto& target = targets_.begin();\n    Buffer::OwnedImpl target_buf{};\n    if (serializeSpecificName(target_buf, target->first) && serializeName(output)) {\n      output.writeBEInt<uint16_t>(type_);\n      output.writeBEInt<uint16_t>(class_);\n      output.writeBEInt<uint32_t>(static_cast<uint32_t>(ttl_.count()));\n\n      const uint16_t data_length = sizeof(target->second.priority) + sizeof(target->second.weight) +\n                                   sizeof(target->second.port) + target_buf.length();\n      output.writeBEInt<uint16_t>(data_length);\n      output.writeBEInt<uint16_t>(target->second.priority);\n      output.writeBEInt<uint16_t>(target->second.weight);\n      output.writeBEInt<uint16_t>(target->second.port);\n      output.move(target_buf);\n    }\n  }\n  return (output.length() > 0);\n}\n\nvoid DnsSrvRecord::addTarget(const absl::string_view target, const DnsTargetAttributes& attrs) {\n  targets_.emplace(std::make_pair(std::string(target), attrs));\n}\n\nDnsQueryContextPtr DnsMessageParser::createQueryContext(Network::UdpRecvData& client_request,\n                                                        DnsParserCounters& counters) {\n  DnsQueryContextPtr query_context = std::make_unique<DnsQueryContext>(\n      client_request.addresses_.local_, client_request.addresses_.peer_, counters, retry_count_);\n\n  query_context->parse_status_ = parseDnsObject(query_context, client_request.buffer_);\n  if (!query_context->parse_status_) {\n    query_context->response_code_ = DNS_RESPONSE_CODE_FORMAT_ERROR;\n    ENVOY_LOG(debug, \"Unable to parse query buffer from '{}' into a DNS object\",\n              client_request.addresses_.peer_->ip()->addressAsString());\n  }\n  return query_context;\n}\n\nbool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context,\n                                      const Buffer::InstancePtr& buffer) {\n  static constexpr uint64_t field_size = sizeof(uint16_t);\n  size_t available_bytes = buffer->length();\n  uint64_t offset = 0;\n  uint16_t data;\n  bool done = false;\n  DnsQueryParseState state{DnsQueryParseState::Init};\n\n  header_ = {};\n  do {\n    // Ensure that we have enough data remaining in the buffer to parse the query\n    if (available_bytes < field_size) {\n      context->counters_.underflow_counter.inc();\n      ENVOY_LOG(debug,\n                \"Exhausted available bytes in the buffer. Insufficient data to parse query field.\");\n      return false;\n    }\n\n    // Each aggregate DNS header field is 2 bytes wide.\n    data = buffer->peekBEInt<uint16_t>(offset);\n    offset += field_size;\n    available_bytes -= field_size;\n\n    switch (state) {\n    case DnsQueryParseState::Init:\n      header_.id = data;\n      state = DnsQueryParseState::Flags;\n      break;\n    case DnsQueryParseState::Flags:\n      ::memcpy(static_cast<void*>(&header_.flags), &data, field_size);\n      state = DnsQueryParseState::Questions;\n      break;\n    case DnsQueryParseState::Questions:\n      header_.questions = data;\n      state = DnsQueryParseState::Answers;\n      break;\n    case DnsQueryParseState::Answers:\n      header_.answers = data;\n      state = DnsQueryParseState::Authority;\n      break;\n    case DnsQueryParseState::Authority:\n      header_.authority_rrs = data;\n      state = DnsQueryParseState::Authority2;\n      break;\n    case DnsQueryParseState::Authority2:\n      header_.additional_rrs = data;\n      done = true;\n      break;\n    }\n  } while (!done);\n\n  if (!header_.flags.qr && header_.answers) {\n    ENVOY_LOG(debug, \"Answer records present in query\");\n    return false;\n  }\n\n  if (header_.questions != 1) {\n    context->response_code_ = DNS_RESPONSE_CODE_FORMAT_ERROR;\n    ENVOY_LOG(debug, \"Unexpected number [{}] of questions in DNS query\", header_.questions);\n    return false;\n  }\n\n  context->id_ = static_cast<uint16_t>(header_.id);\n  if (context->id_ == 0) {\n    ENVOY_LOG(debug, \"No ID in DNS query\");\n    return false;\n  }\n\n  // Almost always, we will have only one query here. Per the RFC, QDCOUNT is usually 1\n  context->queries_.reserve(header_.questions);\n  for (auto index = 0; index < header_.questions; index++) {\n    ENVOY_LOG(trace, \"Parsing [{}/{}] questions\", index, header_.questions);\n    auto rec = parseDnsQueryRecord(buffer, offset);\n    if (rec == nullptr) {\n      context->counters_.query_parsing_failure.inc();\n      ENVOY_LOG(debug, \"Couldn't parse query record from buffer\");\n      return false;\n    }\n    context->queries_.push_back(std::move(rec));\n  }\n\n  // From RFC 1035\n  // 4.1.3. Resource record format\n  //\n  // The answer, authority, and additional sections all share the same format: a variable number of\n  // resource records, where the number of records is specified in the corresponding count field in\n  // the header.\n\n  // Parse Answer Records and Additional Resource Records. This is primarily used for tests\n  // to validate the response generated by the filter\n  if (header_.answers && !parseAnswerRecords(context->answers_, header_.answers, buffer, offset)) {\n    return false;\n  }\n\n  if (header_.authority_rrs) {\n    // We are not generating these in the filter and don't have a use for them at the moment.\n    // If they exist, we will not parse them and return an error to the client since they appear\n    // between the answers and additional resource records in the buffer. We return true so that\n    // the proper status code is sent to the client\n    context->response_code_ = DNS_RESPONSE_CODE_NOT_IMPLEMENTED;\n    return true;\n  }\n\n  if (header_.additional_rrs) {\n    // We may encounter additional resource records that we do not support. Since the filter\n    // operates on queries, we can skip any additional records that we cannot parse since\n    // they will not affect responses.\n    parseAnswerRecords(context->additional_, header_.additional_rrs, buffer, offset);\n  }\n\n  return true;\n}\n\nbool DnsMessageParser::parseAnswerRecords(DnsAnswerMap& answers, const uint16_t answer_count,\n                                          const Buffer::InstancePtr& buffer, uint64_t& offset) {\n  answers.reserve(answer_count);\n  for (auto index = 0; index < answer_count; index++) {\n    ENVOY_LOG(trace, \"Parsing [{}/{}] answers\", index, answer_count);\n    auto rec = parseDnsAnswerRecord(buffer, offset);\n    if (rec == nullptr) {\n      ENVOY_LOG(debug, \"Couldn't parse answer record from buffer\");\n      return false;\n    }\n    const std::string name = rec->name_;\n    answers.emplace(name, std::move(rec));\n  }\n  return true;\n}\n\nconst std::string DnsMessageParser::parseDnsNameRecord(const Buffer::InstancePtr& buffer,\n                                                       uint64_t& available_bytes,\n                                                       uint64_t& name_offset) {\n  void* buf = buffer->linearize(static_cast<uint32_t>(buffer->length()));\n  const unsigned char* linearized_data = static_cast<const unsigned char*>(buf);\n  const unsigned char* record = linearized_data + name_offset;\n  long encoded_len;\n  char* output;\n\n  const int result =\n      ares_expand_name(record, linearized_data, buffer->length(), &output, &encoded_len);\n  if (result != ARES_SUCCESS) {\n    return EMPTY_STRING;\n  }\n\n  std::string name(output);\n  ares_free_string(output);\n  name_offset += encoded_len;\n  available_bytes -= encoded_len;\n\n  return name;\n}\n\nDnsAnswerRecordPtr DnsMessageParser::parseDnsARecord(DnsAnswerCtx& ctx) {\n  Network::Address::InstanceConstSharedPtr ip_addr = nullptr;\n\n  switch (ctx.record_type_) {\n  case DNS_RECORD_TYPE_A:\n    if (ctx.available_bytes_ >= sizeof(uint32_t)) {\n      sockaddr_in sa4;\n      sa4.sin_addr.s_addr = ctx.buffer_->peekLEInt<uint32_t>(ctx.offset_);\n      ip_addr = std::make_shared<Network::Address::Ipv4Instance>(&sa4);\n      ctx.offset_ += ctx.data_length_;\n    }\n    break;\n  case DNS_RECORD_TYPE_AAAA:\n    if (ctx.available_bytes_ >= sizeof(absl::uint128)) {\n      sockaddr_in6 sa6;\n      uint8_t* address6_bytes = reinterpret_cast<uint8_t*>(&sa6.sin6_addr.s6_addr);\n      static constexpr size_t count = sizeof(absl::uint128) / sizeof(uint8_t);\n      for (size_t index = 0; index < count; index++) {\n        *address6_bytes++ = ctx.buffer_->peekLEInt<uint8_t>(ctx.offset_++);\n      }\n      ip_addr = std::make_shared<Network::Address::Ipv6Instance>(sa6, true);\n    }\n    break;\n  }\n\n  if (ip_addr == nullptr) {\n    ENVOY_LOG(debug, \"No IP parsed from an A or AAAA record\");\n    return nullptr;\n  }\n\n  ENVOY_LOG(trace, \"Parsed address [{}] from record type [{}]: offset {}\",\n            ip_addr->ip()->addressAsString(), ctx.record_type_, ctx.offset_);\n\n  return std::make_unique<DnsAnswerRecord>(ctx.record_name_, ctx.record_type_, ctx.record_class_,\n                                           std::chrono::seconds(ctx.ttl_), std::move(ip_addr));\n}\n\nDnsSrvRecordPtr DnsMessageParser::parseDnsSrvRecord(DnsAnswerCtx& ctx) {\n  uint64_t data_length = ctx.data_length_;\n\n  if (data_length < 3 * sizeof(uint16_t)) {\n    ENVOY_LOG(debug, \"Insufficient data for reading a complete SRV answer record\");\n    return nullptr;\n  }\n\n  uint64_t available_bytes = ctx.buffer_->length() - ctx.offset_;\n  if (available_bytes < data_length) {\n    ENVOY_LOG(debug, \"No data left in buffer for reading SRV answer record\");\n    return nullptr;\n  }\n\n  DnsSrvRecord::DnsTargetAttributes attrs{};\n  attrs.priority = ctx.buffer_->peekBEInt<uint16_t>(ctx.offset_);\n  ctx.offset_ += sizeof(uint16_t);\n  available_bytes -= sizeof(uint16_t);\n\n  attrs.weight = ctx.buffer_->peekBEInt<uint16_t>(ctx.offset_);\n  ctx.offset_ += sizeof(uint16_t);\n  available_bytes -= sizeof(uint16_t);\n\n  attrs.port = ctx.buffer_->peekBEInt<uint16_t>(ctx.offset_);\n  ctx.offset_ += sizeof(uint16_t);\n  available_bytes -= sizeof(uint16_t);\n\n  const std::string target_name = parseDnsNameRecord(ctx.buffer_, available_bytes, ctx.offset_);\n  const absl::string_view proto = Utils::getProtoFromName(ctx.record_name_);\n\n  if (!proto.empty() && !target_name.empty()) {\n    auto srv_record =\n        std::make_unique<DnsSrvRecord>(ctx.record_name_, proto, std::chrono::seconds(ctx.ttl_));\n    srv_record->addTarget(target_name, attrs);\n    return srv_record;\n  }\n  return nullptr;\n}\n\nDnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::InstancePtr& buffer,\n                                                          uint64_t& offset) {\n  uint64_t available_bytes = buffer->length() - offset;\n  const std::string record_name = parseDnsNameRecord(buffer, available_bytes, offset);\n  if (record_name.empty()) {\n    ENVOY_LOG(debug, \"Unable to parse name record from buffer\");\n    return nullptr;\n  }\n\n  if (available_bytes < (sizeof(uint32_t) + 3 * sizeof(uint16_t))) {\n    ENVOY_LOG(debug,\n              \"Insufficient data in buffer to read answer record data.\"\n              \"Available bytes: {}\",\n              available_bytes);\n    return nullptr;\n  }\n\n  // Parse the record type\n  uint16_t record_type;\n  record_type = buffer->peekBEInt<uint16_t>(offset);\n  offset += sizeof(uint16_t);\n  available_bytes -= sizeof(uint16_t);\n\n  // TODO(abaptiste): Support Extension Mechanisms for DNS (RFC2671)\n  //\n  // We may see optional records indicating DNS extension support. We need to skip\n  // these records until we have proper support. Encountering one of these records\n  // does not indicate a failure. We support A, AAAA and SRV record types\n  if (record_type != DNS_RECORD_TYPE_A && record_type != DNS_RECORD_TYPE_AAAA &&\n      record_type != DNS_RECORD_TYPE_SRV) {\n    ENVOY_LOG(debug, \"Unsupported record type [{}] found in answer\", record_type);\n    return nullptr;\n  }\n\n  // Parse the record class\n  uint16_t record_class;\n  record_class = buffer->peekBEInt<uint16_t>(offset);\n  offset += sizeof(uint16_t);\n  available_bytes -= sizeof(uint16_t);\n\n  // We support only IN record classes\n  if (record_class != DNS_RECORD_CLASS_IN) {\n    ENVOY_LOG(debug, \"Unsupported record class [{}] found in answer\", record_class);\n    return nullptr;\n  }\n\n  // Read the record's TTL\n  uint32_t ttl;\n  ttl = buffer->peekBEInt<uint32_t>(offset);\n  offset += sizeof(uint32_t);\n  available_bytes -= sizeof(uint32_t);\n\n  // Parse the Data Length and address data record\n  uint16_t data_length;\n  data_length = buffer->peekBEInt<uint16_t>(offset);\n  offset += sizeof(uint16_t);\n  available_bytes -= sizeof(uint16_t);\n\n  if (data_length == 0) {\n    ENVOY_LOG(debug, \"Read zero for data length when reading address from answer record\");\n    return nullptr;\n  }\n\n  auto ctx = DnsAnswerCtx(buffer, record_name, record_type, record_class, available_bytes,\n                          data_length, ttl, offset);\n\n  switch (record_type) {\n  case DNS_RECORD_TYPE_A:\n  case DNS_RECORD_TYPE_AAAA:\n    return parseDnsARecord(ctx);\n  case DNS_RECORD_TYPE_SRV:\n    return parseDnsSrvRecord(ctx);\n  default:\n    ENVOY_LOG(debug, \"Unsupported record type [{}] found in answer\", record_type);\n    return nullptr;\n  }\n}\n\nDnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePtr& buffer,\n                                                        uint64_t& offset) {\n  uint64_t available_bytes = buffer->length() - offset;\n\n  // This is the minimum data length needed to parse a name [length, value, null byte]\n  if (available_bytes < MIN_QUERY_NAME_LENGTH) {\n    ENVOY_LOG(debug, \"No available data in buffer to parse a query record\");\n    return nullptr;\n  }\n\n  const std::string record_name = parseDnsNameRecord(buffer, available_bytes, offset);\n  if (record_name.empty()) {\n    ENVOY_LOG(debug, \"Unable to parse name record from buffer [length {}]\", buffer->length());\n    return nullptr;\n  }\n\n  // After reading the name we should have data for the record type and class\n  if (available_bytes < 2 * sizeof(uint16_t)) {\n    ENVOY_LOG(debug,\n              \"Insufficient data in buffer to read query record type and class. \"\n              \"Available bytes: {}\",\n              available_bytes);\n    return nullptr;\n  }\n\n  // Read the record type\n  uint16_t record_type;\n  record_type = buffer->peekBEInt<uint16_t>(offset);\n  offset += sizeof(record_type);\n\n  // Read the record class. This value is always 1 for internet address records\n  uint16_t record_class;\n  record_class = buffer->peekBEInt<uint16_t>(offset);\n  offset += sizeof(record_class);\n\n  if (record_class != DNS_RECORD_CLASS_IN) {\n    ENVOY_LOG(debug, \"Unsupported record class '{}' in address record\", record_class);\n    return nullptr;\n  }\n\n  auto rec = std::make_unique<DnsQueryRecord>(record_name, record_type, record_class);\n  rec->query_time_ms_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      query_latency_histogram_, timesource_);\n\n  ENVOY_LOG(trace, \"Extracted query record. Name: {} type: {} class: {}\", record_name, record_type,\n            record_class);\n\n  return rec;\n}\n\nvoid DnsMessageParser::setDnsResponseFlags(DnsQueryContextPtr& query_context,\n                                           const uint16_t questions, const uint16_t answers,\n                                           const uint16_t authority_rrs,\n                                           const uint16_t additional_rrs) {\n  // Copy the transaction ID\n  response_header_.id = header_.id;\n\n  // Signify that this is a response to a query\n  response_header_.flags.qr = 1;\n\n  response_header_.flags.opcode = header_.flags.opcode;\n  response_header_.flags.aa = 0;\n  response_header_.flags.tc = 0;\n\n  // Copy Recursion flags\n  response_header_.flags.rd = header_.flags.rd;\n\n  // Set the recursion flag based on whether Envoy is configured to forward queries\n  response_header_.flags.ra = recursion_available_;\n\n  // reserved flag is not set\n  response_header_.flags.z = 0;\n\n  // Set the authenticated flags to zero\n  response_header_.flags.ad = 0;\n\n  response_header_.flags.cd = 0;\n  response_header_.answers = answers;\n  response_header_.flags.rcode = query_context->response_code_;\n\n  // Set the number of questions from the incoming query\n  response_header_.questions = questions;\n\n  response_header_.authority_rrs = authority_rrs;\n  response_header_.additional_rrs = additional_rrs;\n}\n\nbool DnsMessageParser::createAndStoreDnsAnswerRecord(\n    const absl::string_view name, const uint16_t rec_type, const uint16_t rec_class,\n    const std::chrono::seconds ttl, Network::Address::InstanceConstSharedPtr ipaddr,\n    DnsAnswerMap& collection) {\n  // Verify that we have an address matching the query record type\n  switch (rec_type) {\n  case DNS_RECORD_TYPE_AAAA:\n    if (ipaddr->ip()->ipv6() == nullptr) {\n      ENVOY_LOG(debug, \"Unable to return IPV6 address for query\");\n      return false;\n    }\n    break;\n\n  case DNS_RECORD_TYPE_A:\n    if (ipaddr->ip()->ipv4() == nullptr) {\n      ENVOY_LOG(debug, \"Unable to return IPV4 address for query\");\n      return false;\n    }\n    break;\n  }\n\n  auto answer_record =\n      std::make_unique<DnsAnswerRecord>(name, rec_type, rec_class, ttl, std::move(ipaddr));\n  collection.emplace(std::string(name), std::move(answer_record));\n\n  return true;\n}\n\nbool DnsMessageParser::storeDnsAdditionalRecord(DnsQueryContextPtr& context,\n                                                const absl::string_view name,\n                                                const uint16_t rec_type, const uint16_t rec_class,\n                                                const std::chrono::seconds ttl,\n                                                Network::Address::InstanceConstSharedPtr ipaddr) {\n  return createAndStoreDnsAnswerRecord(name, rec_type, rec_class, ttl, std::move(ipaddr),\n                                       context->additional_);\n}\n\nbool DnsMessageParser::storeDnsAnswerRecord(DnsQueryContextPtr& context,\n                                            const DnsQueryRecord& query_rec,\n                                            const std::chrono::seconds ttl,\n                                            Network::Address::InstanceConstSharedPtr ipaddr) {\n  return createAndStoreDnsAnswerRecord(query_rec.name_, query_rec.type_, query_rec.class_, ttl,\n                                       std::move(ipaddr), context->answers_);\n}\n\nvoid DnsMessageParser::addNewDnsSrvAnswerRecord(DnsQueryContextPtr& context,\n                                                const DnsQueryRecord& query_rec,\n                                                DnsSrvRecordPtr service) {\n  RELEASE_ASSERT(query_rec.class_ == DNS_RECORD_CLASS_IN, \"Unsupported DNS Record Class in record\");\n  if (query_rec.type_ == DNS_RECORD_TYPE_SRV) {\n    context->answers_.emplace(query_rec.name_, std::move(service));\n  }\n}\n\nvoid DnsMessageParser::storeDnsSrvAnswerRecord(DnsQueryContextPtr& context,\n                                               const DnsQueryRecord& query_rec,\n                                               const DnsSrvRecordPtr& service) {\n  if (query_rec.type_ == DNS_RECORD_TYPE_SRV) {\n    ENVOY_LOG(trace, \"storing answer record type [{}] for {}\", query_rec.type_, query_rec.name_);\n\n    auto srv_record = std::make_unique<DnsSrvRecord>(*service);\n    addNewDnsSrvAnswerRecord(context, query_rec, std::move(srv_record));\n  }\n}\n\nvoid DnsMessageParser::setResponseCode(DnsQueryContextPtr& context,\n                                       const uint16_t serialized_queries,\n                                       const uint16_t serialized_answers) {\n  // Do not change the response returned to the client if the following errors have\n  // already been set\n  if (context->response_code_ == DNS_RESPONSE_CODE_FORMAT_ERROR ||\n      context->response_code_ == DNS_RESPONSE_CODE_NOT_IMPLEMENTED) {\n    return;\n  }\n\n  // Check for unsupported request types\n  for (const auto& query : context->queries_) {\n    switch (query->type_) {\n    case DNS_RECORD_TYPE_A:\n    case DNS_RECORD_TYPE_AAAA:\n    case DNS_RECORD_TYPE_SRV:\n      break;\n    default:\n      context->response_code_ = DNS_RESPONSE_CODE_NOT_IMPLEMENTED;\n      return;\n    }\n  }\n\n  // Output validation\n  if (serialized_queries == 0) {\n    context->response_code_ = DNS_RESPONSE_CODE_FORMAT_ERROR;\n    return;\n  }\n\n  if (serialized_answers == 0) {\n    context->response_code_ = DNS_RESPONSE_CODE_NAME_ERROR;\n    return;\n  }\n  context->response_code_ = DNS_RESPONSE_CODE_NO_ERROR;\n}\n\nvoid DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context,\n                                           Buffer::OwnedImpl& buffer) {\n  // Each response must have DNS flags, which spans 4 bytes. Account for them immediately so\n  // that we can adjust the number of returned answers to remain under the limit\n  size_t total_buffer_size = sizeof(DnsHeaderFlags);\n  uint16_t touched_answers = 0;\n  uint16_t serialized_answers = 0;\n  uint16_t serialized_queries = 0;\n  uint16_t serialized_authority_rrs = 0;\n  uint16_t serialized_additional_rrs = 0;\n\n  Buffer::OwnedImpl query_buffer{};\n  Buffer::OwnedImpl answer_buffer{};\n  Buffer::OwnedImpl addl_rec_buffer{};\n\n  ENVOY_LOG(trace, \"Building response for query ID [{}]\", query_context->id_);\n\n  for (const auto& query : query_context->queries_) {\n    if (!query->serialize(query_buffer)) {\n      ENVOY_LOG(debug, \"Unable to serialize query record for {}\", query->name_);\n      continue;\n    }\n\n    // Serialize and account for each query's size. That said, there should be only one query.\n    ++serialized_queries;\n    total_buffer_size += query_buffer.length();\n\n    const auto& answers = query_context->answers_;\n    if (answers.empty()) {\n      continue;\n    }\n\n    // Serialize the additional records in parallel with the answers to ensure consistent\n    // records\n    const auto& additional_rrs = query_context->additional_;\n\n    const size_t num_answers = answers.size();\n\n    // Randomize the starting index if we have more than 8 records\n    size_t index = num_answers > MAX_RETURNED_RECORDS ? rng_.random() % num_answers : 0;\n    while (serialized_answers < num_answers && touched_answers < num_answers) {\n      const auto answer = std::next(answers.begin(), (index++ % num_answers));\n      ++touched_answers;\n\n      // Query names are limited to 255 characters. Since we are using c-ares to decode the\n      // encoded query names, we should not end up with a non-conforming name here.\n      //\n      // See Section 2.3.4 of https://tools.ietf.org/html/rfc1035\n      RELEASE_ASSERT(query->name_.size() < MAX_NAME_LENGTH,\n                     \"Query name is too large for serialization\");\n\n      // Serialize answer records whose names and types match the query\n      if (answer->first == query->name_ && answer->second->type_ == query->type_) {\n        // Ensure that we can serialize the answer and the corresponding SRV additional\n        // record together.\n\n        // It is still possible that there may be more additional records than those referenced\n        // by the answers. However, each serialized answer will have an accompanying additional\n        // record for the host.\n        if (query->type_ == DNS_RECORD_TYPE_SRV) {\n          const DnsSrvRecord* srv_rec = dynamic_cast<DnsSrvRecord*>(answer->second.get());\n          const auto& target = srv_rec->targets_.begin();\n          const auto& rr = additional_rrs.find(target->first);\n\n          if (rr != additional_rrs.end()) {\n            Buffer::OwnedImpl serialized_rr{};\n\n            // If serializing the additional record fails, skip serializing the answer record\n            if (!rr->second->serialize(serialized_rr)) {\n              ENVOY_LOG(debug, \"Unable to serialize answer record for {}\", query->name_);\n              continue;\n            }\n            total_buffer_size += serialized_rr.length();\n            addl_rec_buffer.add(serialized_rr);\n            ++serialized_additional_rrs;\n          }\n        }\n\n        // Now we serialize the answer record. We check the length of the serialized\n        // data to ensure we don't exceed the DNS response limit\n        Buffer::OwnedImpl serialized_answer;\n        if (!answer->second->serialize(serialized_answer)) {\n          ENVOY_LOG(debug, \"Unable to serialize answer record for {}\", query->name_);\n          continue;\n        }\n        total_buffer_size += serialized_answer.length();\n        if (total_buffer_size > MAX_DNS_RESPONSE_SIZE) {\n          break;\n        }\n        answer_buffer.add(serialized_answer);\n        if (++serialized_answers == MAX_RETURNED_RECORDS) {\n          break;\n        }\n      }\n    }\n  }\n\n  setResponseCode(query_context, serialized_queries, serialized_answers);\n  setDnsResponseFlags(query_context, serialized_queries, serialized_answers,\n                      serialized_authority_rrs, serialized_additional_rrs);\n\n  // Build the response buffer for transmission to the client\n  buffer.writeBEInt<uint16_t>(response_header_.id);\n\n  uint16_t flags;\n  ::memcpy(&flags, static_cast<void*>(&response_header_.flags), sizeof(uint16_t));\n  buffer.writeBEInt<uint16_t>(flags);\n\n  buffer.writeBEInt<uint16_t>(response_header_.questions);\n  buffer.writeBEInt<uint16_t>(response_header_.answers);\n  buffer.writeBEInt<uint16_t>(response_header_.authority_rrs);\n  buffer.writeBEInt<uint16_t>(response_header_.additional_rrs);\n\n  // write the queries and answers\n  buffer.move(query_buffer);\n  buffer.move(answer_buffer);\n  buffer.move(addl_rec_buffer);\n}\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/dns_filter/dns_parser.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/network/listener.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stats/timespan_impl.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\n\n/**\n * BaseDnsRecord contains the fields and functions common to both query and answer records.\n */\nclass BaseDnsRecord {\npublic:\n  BaseDnsRecord(const absl::string_view rec_name, const uint16_t rec_type, const uint16_t rec_class)\n      : name_(rec_name), type_(rec_type), class_(rec_class) {}\n  virtual ~BaseDnsRecord() = default;\n  bool serializeName(Buffer::OwnedImpl& output);\n  virtual bool serialize(Buffer::OwnedImpl& output) PURE;\n\n  const std::string name_;\n  const uint16_t type_;\n  const uint16_t class_;\n\nprotected:\n  bool serializeSpecificName(Buffer::OwnedImpl& output, const absl::string_view name);\n};\n\n/**\n * DnsQueryRecord represents a query record parsed from a DNS request from a client. Each query\n * record contains the domain requested and the flags dictating the type of record that is sought.\n */\nclass DnsQueryRecord : public BaseDnsRecord {\npublic:\n  DnsQueryRecord(const absl::string_view rec_name, const uint16_t rec_type,\n                 const uint16_t rec_class)\n      : BaseDnsRecord(rec_name, rec_type, rec_class) {}\n  bool serialize(Buffer::OwnedImpl& output) override;\n\n  std::unique_ptr<Stats::HistogramCompletableTimespanImpl> query_time_ms_;\n};\n\nusing DnsQueryRecordPtr = std::unique_ptr<DnsQueryRecord>;\nusing DnsQueryPtrVec = std::vector<DnsQueryRecordPtr>;\nusing AddressConstPtrVec = std::vector<Network::Address::InstanceConstSharedPtr>;\n\n/**\n * DnsAnswerRecord represents a single answer record for a name that is to be serialized and sent to\n * a client. This class differs from the BaseDnsRecord and DnsQueryRecord because it contains\n * additional fields for the TTL and address.\n */\nclass DnsAnswerRecord : public BaseDnsRecord {\npublic:\n  DnsAnswerRecord(const absl::string_view query_name, const uint16_t rec_type,\n                  const uint16_t rec_class, const std::chrono::seconds ttl,\n                  Network::Address::InstanceConstSharedPtr ipaddr)\n      : BaseDnsRecord(query_name, rec_type, rec_class), ttl_(ttl), ip_addr_(ipaddr) {}\n  bool serialize(Buffer::OwnedImpl& output) override;\n\n  const std::chrono::seconds ttl_;\n  const Network::Address::InstanceConstSharedPtr ip_addr_;\n};\n\nusing DnsAnswerRecordPtr = std::unique_ptr<DnsAnswerRecord>;\nusing DnsAnswerMap = std::unordered_multimap<std::string, DnsAnswerRecordPtr>;\n\n/**\n * DnsSrvRecord represents a single answer record for a service which is to be serialized and\n * sent to a client. This class inherits core fields from DnsAnswerRecord and adds new fields\n * inherent to DNS service records\n */\nclass DnsSrvRecord : public DnsAnswerRecord {\npublic:\n  DnsSrvRecord(const absl::string_view service_name, const absl::string_view proto,\n               const std::chrono::seconds ttl, const uint16_t rec_type = DNS_RECORD_TYPE_SRV,\n               const uint16_t rec_class = DNS_RECORD_CLASS_IN)\n      : DnsAnswerRecord(service_name, rec_type, rec_class, ttl, nullptr), proto_(proto) {}\n\n  // Copy Constructor\n  DnsSrvRecord(const DnsSrvRecord& other)\n      : DnsAnswerRecord(other.name_, other.type_, other.class_, other.ttl_, nullptr),\n        proto_(other.proto_) {\n    for (const auto& entry : other.targets_) {\n      addTarget(entry.first, entry.second);\n    }\n  }\n\n  struct DnsTargetAttributes {\n    uint16_t priority;\n    uint16_t weight;\n    uint16_t port;\n    bool is_cluster;\n  };\n\n  bool serialize(Buffer::OwnedImpl& output) override;\n  void addTarget(const absl::string_view target, const DnsTargetAttributes& attrs);\n\n  std::string proto_;\n  absl::flat_hash_map<std::string, DnsTargetAttributes> targets_;\n};\n\nusing DnsSrvRecordPtr = std::unique_ptr<DnsSrvRecord>;\n\n// Store the server record with the full service name as the key to the service config. For\n// each service there can be multiple configs for a given service name. The service can be\n// weighted to distribute connections to multiple hosts, etc.\nusing DnsSrvRecordPtrVec = std::vector<DnsSrvRecordPtr>;\n\n/**\n * @brief This struct is used to hold pointers to the counters that are relevant to the\n * parser. This is done to prevent dependency loops between the parser and filter headers\n */\nstruct DnsParserCounters {\n  Stats::Counter& underflow_counter;\n  Stats::Counter& record_name_overflow;\n  Stats::Counter& query_parsing_failure;\n\n  DnsParserCounters(Stats::Counter& underflow, Stats::Counter& record_name,\n                    Stats::Counter& query_parsing)\n      : underflow_counter(underflow), record_name_overflow(record_name),\n        query_parsing_failure(query_parsing) {}\n};\n\n/**\n * DnsQueryContext contains all the data necessary for responding to a query from a given client.\n */\nclass DnsQueryContext {\npublic:\n  DnsQueryContext(Network::Address::InstanceConstSharedPtr local,\n                  Network::Address::InstanceConstSharedPtr peer, DnsParserCounters& counters,\n                  uint64_t retry_count)\n      : local_(std::move(local)), peer_(std::move(peer)), counters_(counters), parse_status_(false),\n        response_code_(DNS_RESPONSE_CODE_NO_ERROR), retry_(retry_count) {}\n\n  const Network::Address::InstanceConstSharedPtr local_;\n  const Network::Address::InstanceConstSharedPtr peer_;\n  DnsParserCounters& counters_;\n  bool parse_status_;\n  uint16_t response_code_;\n  uint64_t retry_;\n  uint16_t id_;\n  Network::DnsResolver::ResolutionStatus resolution_status_;\n  DnsQueryPtrVec queries_;\n  DnsAnswerMap answers_;\n  DnsAnswerMap additional_;\n  bool in_callback_;\n};\n\nusing DnsQueryContextPtr = std::unique_ptr<DnsQueryContext>;\nusing DnsFilterResolverCallback = std::function<void(\n    DnsQueryContextPtr context, const DnsQueryRecord* current_query, AddressConstPtrVec& ipaddr)>;\n\n/**\n * This class orchestrates parsing a DNS query and building the response to be sent to a client.\n */\nclass DnsMessageParser : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  enum class DnsQueryParseState {\n    Init,\n    Flags,     // 2 bytes\n    Questions, // 2 bytes\n    Answers,   // 2 bytes\n    Authority, // 2 bytes\n    Authority2 // 2 bytes\n  };\n\n  // The flags have been verified with dig and this structure should not be modified. The flag\n  // order here does not match the RFC, but takes byte ordering into account so that serialization\n  // does not bitwise operations.\n  PACKED_STRUCT(struct DnsHeaderFlags {\n    unsigned rcode : 4;  // return code\n    unsigned cd : 1;     // checking disabled\n    unsigned ad : 1;     // authenticated data\n    unsigned z : 1;      // z - bit (must be zero in queries per RFC1035)\n    unsigned ra : 1;     // recursion available\n    unsigned rd : 1;     // recursion desired\n    unsigned tc : 1;     // truncated response\n    unsigned aa : 1;     // authoritative answer\n    unsigned opcode : 4; // operation code\n    unsigned qr : 1;     // query or response\n  });\n\n  /**\n   * Structure representing the DNS header as it appears in a packet\n   * See https://www.ietf.org/rfc/rfc1035.txt for more details\n   */\n  PACKED_STRUCT(struct DnsHeader {\n    uint16_t id;\n    struct DnsHeaderFlags flags;\n    uint16_t questions;\n    uint16_t answers;\n    uint16_t authority_rrs;\n    uint16_t additional_rrs;\n  });\n\n  DnsMessageParser(bool recurse, TimeSource& timesource, uint64_t retry_count,\n                   Random::RandomGenerator& random, Stats::Histogram& latency_histogram)\n      : recursion_available_(recurse), timesource_(timesource), retry_count_(retry_count),\n        query_latency_histogram_(latency_histogram), rng_(random) {}\n\n  /**\n   * @brief Builds an Answer record for the active query. The active query transaction ID is at\n   * the top of a queue. This ID is sufficient enough to determine the answer records associated\n   * with the query\n   */\n  DnsAnswerRecordPtr getResponseForQuery();\n\n  /**\n   * @param buffer the buffer containing the constructed DNS response to be sent to a client\n   */\n  void buildResponseBuffer(DnsQueryContextPtr& query_context, Buffer::OwnedImpl& buffer);\n\n  /**\n   * @brief parse a single query record from a client request\n   *\n   * @param buffer a reference to the incoming request object received by the listener\n   * @param offset the buffer offset at which parsing is to begin. This parameter is updated when\n   * one record is parsed from the buffer and returned to the caller.\n   * @return DnsQueryRecordPtr a pointer to a DnsQueryRecord object containing all query data\n   * parsed from the buffer\n   */\n  DnsQueryRecordPtr parseDnsQueryRecord(const Buffer::InstancePtr& buffer, uint64_t& offset);\n\n  struct DnsAnswerCtx {\n    DnsAnswerCtx(const Buffer::InstancePtr& buffer, const absl::string_view record_name,\n                 const uint16_t record_type, const uint16_t record_class,\n                 const uint16_t available_bytes, const uint16_t data_length, const uint32_t ttl,\n                 uint64_t& offset)\n        : buffer_(buffer), record_name_(record_name), record_type_(record_type),\n          record_class_(record_class), available_bytes_(available_bytes), data_length_(data_length),\n          ttl_(ttl), offset_(offset) {}\n\n    const Buffer::InstancePtr& buffer_;\n    const std::string record_name_;\n    const uint16_t record_type_;\n    const uint16_t record_class_;\n    const uint16_t available_bytes_;\n    const uint16_t data_length_;\n    const uint32_t ttl_;\n    uint64_t& offset_;\n  };\n\n  /**\n   * @brief parse an A or AAAA DNS Record\n   *\n   * @param context the query context for which we are generating a response\n   * @return DnsAnswerRecordPtr a pointer to a DnsAnswerRecord object containing the parsed answer\n   * record\n   */\n  DnsAnswerRecordPtr parseDnsARecord(DnsAnswerCtx& context);\n\n  /**\n   * @brief parse a Server Selection (SRV) DNS Record\n   *\n   * @param context the query context for which we are generating a response\n   * @return DnsSrvRecordPtr a pointer to a DnsSrvRecord object containing the parsed server record\n   */\n  DnsSrvRecordPtr parseDnsSrvRecord(DnsAnswerCtx& context);\n\n  /**\n   * @brief parse a single answer record from a client request or filter response\n   *\n   * @param buffer a reference to a buffer containing a DNS request or response\n   * @param offset the buffer offset at which parsing is to begin. This parameter is updated when\n   * one record is parsed from the buffer and returned to the caller.\n   * @return DnsQueryRecordPtr a pointer to a DnsAnswerRecord object containing all query and answer\n   * data parsed from the buffer\n   */\n  DnsAnswerRecordPtr parseDnsAnswerRecord(const Buffer::InstancePtr& buffer, uint64_t& offset);\n\n  /**\n   * @brief Parse answer records using a single function. Answer records follow a common format\n   * so one function will suffice for reading them.\n   *\n   * @param answers a reference to the map containing the parsed records\n   * @param answer_count the indicated number of records we expect parsed from the request header\n   * @param buffer a reference to a buffer containing a DNS request or response\n   * @param offset a reference to an index into the buffer indicating the position where reading may\n   * begin\n   */\n  bool parseAnswerRecords(DnsAnswerMap& answers, const uint16_t answer_count,\n                          const Buffer::InstancePtr& buffer, uint64_t& offset);\n\n  /**\n   * @brief store Answer Records in the supplied collection after validating the record type\n   *\n   * @param name the name of the record being stored\n   * @param rec_type the type of the record being stored\n   * @param rec_class the class of the record being stored\n   * @param ttl the Time-to-live of the record being stored\n   * @param ipaddr the ip of the record being stored. In the case of SRV records, this is\n   * @param collection the destination for the new answer record being created\n   */\n  bool createAndStoreDnsAnswerRecord(const absl::string_view name, const uint16_t rec_type,\n                                     const uint16_t rec_class, const std::chrono::seconds ttl,\n                                     Network::Address::InstanceConstSharedPtr ipaddr,\n                                     DnsAnswerMap& collection);\n  /**\n   * @brief store Additional Resource Records in a separate collection from DNS answers\n   *\n   * @param context the query context for which we are generating a response\n   * @param name the name of the record being stored\n   * @param rec_type the type of the record being stored\n   * @param rec_class the class of the record being stored\n   * @param ttl the Time-to-live of the record being stored\n   * @param ipaddr the ip of the record being stored. In the case of SRV records, this is\n   * the address of a target node referenced by an SRV record entry\n   */\n  bool storeDnsAdditionalRecord(DnsQueryContextPtr& context, const absl::string_view name,\n                                const uint16_t rec_type, const uint16_t rec_class,\n                                const std::chrono::seconds ttl,\n                                Network::Address::InstanceConstSharedPtr ipaddr);\n\n  /**\n   * @brief Constructs a DNS SRV Answer record for a given service and stores the object in a map\n   * where the response is associated with query name. This creates a new record and uses the\n   * add function above to insert it into the service map\n   *\n   * @param context the query context for which we are generating a response\n   * @param query_rec to which the answer is matched.\n   * @param service the service that is returned in the answer record\n   * @param move_rec move the passed in record to the underlying connection instead of creating a\n   * new object\n   */\n  void storeDnsSrvAnswerRecord(DnsQueryContextPtr& context, const DnsQueryRecord& query_rec,\n                               const DnsSrvRecordPtr& service);\n\n  /**\n   * @brief Constructs a DNS Answer record for a given IP Address and stores the object in a map\n   * where the response is associated with query name.\n   *\n   * @param context the query context for which we are generating a response\n   * @param query_rec to which the answer is matched.\n   * @param ttl the TTL specifying how long the returned answer is cached\n   * @param ipaddr the address that is returned in the answer record\n   * @return true if the answer record matches the query type\n   */\n  bool storeDnsAnswerRecord(DnsQueryContextPtr& context, const DnsQueryRecord& query_rec,\n                            const std::chrono::seconds ttl,\n                            Network::Address::InstanceConstSharedPtr ipaddr);\n\n  /**\n   * @return uint16_t the response code flag value from a parsed dns object\n   */\n  uint16_t getQueryResponseCode() { return static_cast<uint16_t>(header_.flags.rcode); }\n\n  /**\n   * @brief Parse the incoming query and create a context object for the filter\n   *\n   * @param client_request a structure containing addressing information and the buffer received\n   * from a client\n   */\n  DnsQueryContextPtr createQueryContext(Network::UdpRecvData& client_request,\n                                        DnsParserCounters& counters);\n  /**\n   * @param buffer a reference to the incoming request object received by the listener\n   * @return bool true if all DNS records and flags were successfully parsed from the buffer\n   */\n  bool parseDnsObject(DnsQueryContextPtr& context, const Buffer::InstancePtr& buffer);\n\nprivate:\n  /**\n   * @brief Adds a new DNS SRV Answer record for a given service to the service map\n   *\n   * @param context the query context for which we are generating a response\n   * @param query_rec to which the answer is matched.\n   * @param service the service that is returned in the answer record\n   * @param move_rec move the passed in record to the underlying connection instead of creating a\n   * new object\n   */\n  void addNewDnsSrvAnswerRecord(DnsQueryContextPtr& context, const DnsQueryRecord& query_rec,\n                                DnsSrvRecordPtr service);\n  /**\n   * @brief sets the response code returned to the client\n   *\n   * @param context the query context for which we are generating a response\n   * @param queries specify the number of query records contained in the response\n   * @param answers specify the number of answer records contained in the response\n   */\n  void setResponseCode(DnsQueryContextPtr& context, const uint16_t serialized_queries,\n                       const uint16_t serialized_answers);\n\n  /**\n   * @brief sets the flags in the DNS header of the response sent to a client\n   *\n   * @param context the query context for which we are generating a response\n   * @param queries specify the number of query records contained in the response\n   * @param answers specify the number of answer records contained in the response\n   * @param authority_rrs specify the number of authority records contained in the response\n   * @param additional_rrs specify the number of additional records contained in the response\n   */\n  void setDnsResponseFlags(DnsQueryContextPtr& context, const uint16_t questions,\n                           const uint16_t answers, const uint16_t authority_rrs,\n                           const uint16_t additional_rrs);\n\n  /**\n   * @brief Extracts a DNS query name from a buffer\n   *\n   * @param buffer the buffer from which the name is extracted\n   * @param available_bytes the size of the remaining bytes in the buffer on which we can operate\n   * @param name_offset the offset from which parsing begins and ends. The updated value is\n   * returned to the caller\n   */\n  const std::string parseDnsNameRecord(const Buffer::InstancePtr& buffer, uint64_t& available_bytes,\n                                       uint64_t& name_offset);\n\n  bool recursion_available_;\n  TimeSource& timesource_;\n  uint64_t retry_count_;\n  Stats::Histogram& query_latency_histogram_;\n  DnsHeader header_;\n  DnsHeader response_header_;\n  Random::RandomGenerator& rng_;\n};\n\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/udp_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"hash_policy_lib\",\n    srcs = [\"hash_policy_impl.cc\"],\n    hdrs = [\"hash_policy_impl.h\"],\n    deps = [\n        \"//include/envoy/udp:hash_policy_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hash_lib\",\n        \"@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udp_proxy_filter_lib\",\n    srcs = [\"udp_proxy_filter.cc\"],\n    hdrs = [\"udp_proxy_filter.h\"],\n    deps = [\n        \":hash_policy_lib\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/network:socket_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":udp_proxy_filter_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/filters/udp/udp_proxy/config.cc",
    "content": "#include \"extensions/filters/udp/udp_proxy/config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\n\nstatic Registry::RegisterFactory<UdpProxyFilterConfigFactory,\n                                 Server::Configuration::NamedUdpListenerFilterConfigFactory>\n    register_;\n\n}\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/udp_proxy/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h\"\n#include \"envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.validate.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/udp/udp_proxy/udp_proxy_filter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\n\n/**\n * Config registration for the UDP proxy filter. @see NamedUdpListenerFilterConfigFactory.\n */\nclass UdpProxyFilterConfigFactory\n    : public Server::Configuration::NamedUdpListenerFilterConfigFactory {\npublic:\n  // NamedUdpListenerFilterConfigFactory\n  Network::UdpListenerFilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& config,\n                               Server::Configuration::ListenerFactoryContext& context) override {\n    auto shared_config = std::make_shared<UdpProxyFilterConfig>(\n        context.clusterManager(), context.timeSource(), context.scope(),\n        MessageUtil::downcastAndValidate<\n            const envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig&>(\n            config, context.messageValidationVisitor()));\n    return [shared_config](Network::UdpListenerFilterManager& filter_manager,\n                           Network::UdpReadFilterCallbacks& callbacks) -> void {\n      filter_manager.addReadFilter(std::make_unique<UdpProxyFilter>(callbacks, shared_config));\n    };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig>();\n  }\n\n  std::string name() const override { return \"envoy.filters.udp_listener.udp_proxy\"; }\n};\n\n} // namespace UdpProxy\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/udp_proxy/hash_policy_impl.cc",
    "content": "#include \"extensions/filters/udp/udp_proxy/hash_policy_impl.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\n\nclass SourceIpHashMethod : public HashPolicyImpl::HashMethod {\npublic:\n  absl::optional<uint64_t>\n  evaluate(const Network::Address::Instance& downstream_addr) const override {\n    if (downstream_addr.ip()) {\n      ASSERT(!downstream_addr.ip()->addressAsString().empty());\n      return HashUtil::xxHash64(downstream_addr.ip()->addressAsString());\n    }\n\n    return absl::nullopt;\n  }\n};\n\nHashPolicyImpl::HashPolicyImpl(\n    const absl::Span<const UdpProxyConfig::HashPolicy* const>& hash_policies) {\n  ASSERT(hash_policies.size() == 1);\n  switch (hash_policies[0]->policy_specifier_case()) {\n  case UdpProxyConfig::HashPolicy::PolicySpecifierCase::kSourceIp:\n    hash_impl_ = std::make_unique<SourceIpHashMethod>();\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nabsl::optional<uint64_t>\nHashPolicyImpl::generateHash(const Network::Address::Instance& downstream_addr) const {\n  return hash_impl_->evaluate(downstream_addr);\n}\n\n} // namespace UdpProxy\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/udp_proxy/hash_policy_impl.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h\"\n#include \"envoy/udp/hash_policy.h\"\n\n#include \"common/common/hash.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\n\nusing namespace envoy::extensions::filters::udp::udp_proxy::v3;\n\n/**\n * Implementation of HashPolicy that reads from the UDP proxy filter config.\n */\nclass HashPolicyImpl : public Udp::HashPolicy {\npublic:\n  explicit HashPolicyImpl(const absl::Span<const UdpProxyConfig::HashPolicy* const>& hash_policies);\n\n  // Udp::HashPolicy\n  absl::optional<uint64_t>\n  generateHash(const Network::Address::Instance& downstream_addr) const override;\n\n  class HashMethod {\n  public:\n    virtual ~HashMethod() = default;\n    virtual absl::optional<uint64_t>\n    evaluate(const Network::Address::Instance& downstream_addr) const PURE;\n  };\n\n  using HashMethodPtr = std::unique_ptr<HashMethod>;\n\nprivate:\n  HashMethodPtr hash_impl_;\n};\n\n} // namespace UdpProxy\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc",
    "content": "#include \"extensions/filters/udp/udp_proxy/udp_proxy_filter.h\"\n\n#include \"envoy/network/listener.h\"\n\n#include \"common/network/socket_option_factory.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\n\nUdpProxyFilter::UdpProxyFilter(Network::UdpReadFilterCallbacks& callbacks,\n                               const UdpProxyFilterConfigSharedPtr& config)\n    : UdpListenerReadFilter(callbacks), config_(config),\n      cluster_update_callbacks_(\n          config->clusterManager().addThreadLocalClusterUpdateCallbacks(*this)) {\n  Upstream::ThreadLocalCluster* cluster = config->clusterManager().get(config->cluster());\n  if (cluster != nullptr) {\n    onClusterAddOrUpdate(*cluster);\n  }\n}\n\nvoid UdpProxyFilter::onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) {\n  if (cluster.info()->name() != config_->cluster()) {\n    return;\n  }\n\n  ENVOY_LOG(debug, \"udp proxy: attaching to cluster {}\", cluster.info()->name());\n  ASSERT(cluster_info_ == absl::nullopt || &cluster_info_.value().cluster_ != &cluster);\n  cluster_info_.emplace(*this, cluster);\n}\n\nvoid UdpProxyFilter::onClusterRemoval(const std::string& cluster) {\n  if (cluster != config_->cluster()) {\n    return;\n  }\n\n  ENVOY_LOG(debug, \"udp proxy: detaching from cluster {}\", cluster);\n  cluster_info_.reset();\n}\n\nvoid UdpProxyFilter::onData(Network::UdpRecvData& data) {\n  if (!cluster_info_.has_value()) {\n    config_->stats().downstream_sess_no_route_.inc();\n    return;\n  }\n\n  cluster_info_.value().onData(data);\n}\n\nvoid UdpProxyFilter::onReceiveError(Api::IoError::IoErrorCode) {\n  config_->stats().downstream_sess_rx_errors_.inc();\n}\n\nUdpProxyFilter::ClusterInfo::ClusterInfo(UdpProxyFilter& filter,\n                                         Upstream::ThreadLocalCluster& cluster)\n    : filter_(filter), cluster_(cluster),\n      cluster_stats_(generateStats(cluster.info()->statsScope())),\n      member_update_cb_handle_(cluster.prioritySet().addMemberUpdateCb(\n          [this](const Upstream::HostVector&, const Upstream::HostVector& hosts_removed) {\n            for (const auto& host : hosts_removed) {\n              // This is similar to removeSession() but slightly different due to removeSession()\n              // also handling deletion of the host to session map entry if there are no sessions\n              // left. It would be nice to unify the logic but that can be cleaned up later.\n              auto host_sessions_it = host_to_sessions_.find(host.get());\n              if (host_sessions_it != host_to_sessions_.end()) {\n                for (const auto& session : host_sessions_it->second) {\n                  ASSERT(sessions_.count(session) == 1);\n                  sessions_.erase(session);\n                }\n                host_to_sessions_.erase(host_sessions_it);\n              }\n            }\n          })) {}\n\nUdpProxyFilter::ClusterInfo::~ClusterInfo() {\n  member_update_cb_handle_->remove();\n  // Sanity check the session accounting. This is not as fast as a straight teardown, but this is\n  // not a performance critical path.\n  while (!sessions_.empty()) {\n    removeSession(sessions_.begin()->get());\n  }\n  ASSERT(host_to_sessions_.empty());\n}\n\nvoid UdpProxyFilter::ClusterInfo::onData(Network::UdpRecvData& data) {\n  const auto active_session_it = sessions_.find(data.addresses_);\n  ActiveSession* active_session;\n  if (active_session_it == sessions_.end()) {\n    if (!cluster_.info()\n             ->resourceManager(Upstream::ResourcePriority::Default)\n             .connections()\n             .canCreate()) {\n      cluster_.info()->stats().upstream_cx_overflow_.inc();\n      return;\n    }\n\n    UdpLoadBalancerContext context(filter_.config_->hashPolicy(), data.addresses_.peer_);\n    Upstream::HostConstSharedPtr host = cluster_.loadBalancer().chooseHost(&context);\n    if (host == nullptr) {\n      ENVOY_LOG(debug, \"cannot find any valid host. failed to create a session.\");\n      cluster_.info()->stats().upstream_cx_none_healthy_.inc();\n      return;\n    }\n\n    active_session = createSession(std::move(data.addresses_), host);\n  } else {\n    active_session = active_session_it->get();\n    if (active_session->host().health() == Upstream::Host::Health::Unhealthy) {\n      // If a host becomes unhealthy, we optimally would like to replace it with a new session\n      // to a healthy host. We may eventually want to make this behavior configurable, but for now\n      // this will be the universal behavior.\n\n      UdpLoadBalancerContext context(filter_.config_->hashPolicy(), data.addresses_.peer_);\n      Upstream::HostConstSharedPtr host = cluster_.loadBalancer().chooseHost(&context);\n      if (host != nullptr && host->health() != Upstream::Host::Health::Unhealthy &&\n          host.get() != &active_session->host()) {\n        ENVOY_LOG(debug, \"upstream session unhealthy, recreating the session\");\n        removeSession(active_session);\n        active_session = createSession(std::move(data.addresses_), host);\n      } else {\n        // In this case we could not get a better host, so just keep using the current session.\n        ENVOY_LOG(trace, \"upstream session unhealthy, but unable to get a better host\");\n      }\n    }\n  }\n\n  active_session->write(*data.buffer_);\n}\n\nUdpProxyFilter::ActiveSession*\nUdpProxyFilter::ClusterInfo::createSession(Network::UdpRecvData::LocalPeerAddresses&& addresses,\n                                           const Upstream::HostConstSharedPtr& host) {\n  auto new_session = std::make_unique<ActiveSession>(*this, std::move(addresses), host);\n  auto new_session_ptr = new_session.get();\n  sessions_.emplace(std::move(new_session));\n  host_to_sessions_[host.get()].emplace(new_session_ptr);\n  return new_session_ptr;\n}\n\nvoid UdpProxyFilter::ClusterInfo::removeSession(const ActiveSession* session) {\n  // First remove from the host to sessions map.\n  ASSERT(host_to_sessions_[&session->host()].count(session) == 1);\n  auto host_sessions_it = host_to_sessions_.find(&session->host());\n  host_sessions_it->second.erase(session);\n  if (host_sessions_it->second.empty()) {\n    host_to_sessions_.erase(host_sessions_it);\n  }\n\n  // Now remove it from the primary map.\n  ASSERT(sessions_.count(session) == 1);\n  sessions_.erase(session);\n}\n\nUdpProxyFilter::ActiveSession::ActiveSession(ClusterInfo& cluster,\n                                             Network::UdpRecvData::LocalPeerAddresses&& addresses,\n                                             const Upstream::HostConstSharedPtr& host)\n    : cluster_(cluster), use_original_src_ip_(cluster_.filter_.config_->usingOriginalSrcIp()),\n      addresses_(std::move(addresses)), host_(host),\n      idle_timer_(cluster.filter_.read_callbacks_->udpListener().dispatcher().createTimer(\n          [this] { onIdleTimer(); })),\n      // NOTE: The socket call can only fail due to memory/fd exhaustion. No local ephemeral port\n      //       is bound until the first packet is sent to the upstream host.\n      socket_(cluster.filter_.createSocket(host)),\n      socket_event_(socket_->ioHandle().createFileEvent(\n          cluster.filter_.read_callbacks_->udpListener().dispatcher(),\n          [this](uint32_t) { onReadReady(); }, Event::PlatformDefaultTriggerType,\n          Event::FileReadyType::Read)) {\n  ENVOY_LOG(debug, \"creating new session: downstream={} local={} upstream={}\",\n            addresses_.peer_->asStringView(), addresses_.local_->asStringView(),\n            host->address()->asStringView());\n  cluster_.filter_.config_->stats().downstream_sess_total_.inc();\n  cluster_.filter_.config_->stats().downstream_sess_active_.inc();\n  cluster_.cluster_.info()\n      ->resourceManager(Upstream::ResourcePriority::Default)\n      .connections()\n      .inc();\n\n  if (use_original_src_ip_) {\n    const Network::Socket::OptionsSharedPtr socket_options =\n        Network::SocketOptionFactory::buildIpTransparentOptions();\n    const bool ok = Network::Socket::applyOptions(\n        socket_options, *socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n    RELEASE_ASSERT(ok, \"Should never occur!\");\n    ENVOY_LOG(debug, \"The original src is enabled for address {}.\",\n              addresses_.peer_->asStringView());\n  }\n\n  // TODO(mattklein123): Enable dropped packets socket option. In general the Socket abstraction\n  // does not work well right now for client sockets. It's too heavy weight and is aimed at listener\n  // sockets. We need to figure out how to either refactor Socket into something that works better\n  // for this use case or allow the socket option abstractions to work directly against an IO\n  // handle.\n}\n\nUdpProxyFilter::ActiveSession::~ActiveSession() {\n  ENVOY_LOG(debug, \"deleting the session: downstream={} local={} upstream={}\",\n            addresses_.peer_->asStringView(), addresses_.local_->asStringView(),\n            host_->address()->asStringView());\n  cluster_.filter_.config_->stats().downstream_sess_active_.dec();\n  cluster_.cluster_.info()\n      ->resourceManager(Upstream::ResourcePriority::Default)\n      .connections()\n      .dec();\n}\n\nvoid UdpProxyFilter::ActiveSession::onIdleTimer() {\n  ENVOY_LOG(debug, \"session idle timeout: downstream={} local={}\", addresses_.peer_->asStringView(),\n            addresses_.local_->asStringView());\n  cluster_.filter_.config_->stats().idle_timeout_.inc();\n  cluster_.removeSession(this);\n}\n\nvoid UdpProxyFilter::ActiveSession::onReadReady() {\n  idle_timer_->enableTimer(cluster_.filter_.config_->sessionTimeout());\n\n  // TODO(mattklein123): We should not be passing *addresses_.local_ to this function as we are\n  //                     not trying to populate the local address for received packets.\n  uint32_t packets_dropped = 0;\n  const Api::IoErrorPtr result = Network::Utility::readPacketsFromSocket(\n      socket_->ioHandle(), *addresses_.local_, *this, cluster_.filter_.config_->timeSource(),\n      packets_dropped);\n  // TODO(mattklein123): Handle no error when we limit the number of packets read.\n  if (result->getErrorCode() != Api::IoError::IoErrorCode::Again) {\n    cluster_.cluster_stats_.sess_rx_errors_.inc();\n  }\n  // Flush out buffered data at the end of IO event.\n  cluster_.filter_.read_callbacks_->udpListener().flush();\n}\n\nvoid UdpProxyFilter::ActiveSession::write(const Buffer::Instance& buffer) {\n  ENVOY_LOG(trace, \"writing {} byte datagram upstream: downstream={} local={} upstream={}\",\n            buffer.length(), addresses_.peer_->asStringView(), addresses_.local_->asStringView(),\n            host_->address()->asStringView());\n  const uint64_t buffer_length = buffer.length();\n  cluster_.filter_.config_->stats().downstream_sess_rx_bytes_.add(buffer_length);\n  cluster_.filter_.config_->stats().downstream_sess_rx_datagrams_.inc();\n\n  idle_timer_->enableTimer(cluster_.filter_.config_->sessionTimeout());\n\n  // NOTE: On the first write, a local ephemeral port is bound, and thus this write can fail due to\n  //       port exhaustion.\n  // NOTE: We do not specify the local IP to use for the sendmsg call if use_original_src_ip_ is not\n  //       set. We allow the OS to select the right IP based on outbound routing rules if\n  //       use_original_src_ip_ is not set, else use downstream peer IP as local IP.\n  const Network::Address::Ip* local_ip = use_original_src_ip_ ? addresses_.peer_->ip() : nullptr;\n  Api::IoCallUint64Result rc =\n      Network::Utility::writeToSocket(socket_->ioHandle(), buffer, local_ip, *host_->address());\n  if (!rc.ok()) {\n    cluster_.cluster_stats_.sess_tx_errors_.inc();\n  } else {\n    cluster_.cluster_stats_.sess_tx_datagrams_.inc();\n    cluster_.cluster_.info()->stats().upstream_cx_tx_bytes_total_.add(buffer_length);\n  }\n}\n\nvoid UdpProxyFilter::ActiveSession::processPacket(Network::Address::InstanceConstSharedPtr,\n                                                  Network::Address::InstanceConstSharedPtr,\n                                                  Buffer::InstancePtr buffer, MonotonicTime) {\n  ENVOY_LOG(trace, \"writing {} byte datagram downstream: downstream={} local={} upstream={}\",\n            buffer->length(), addresses_.peer_->asStringView(), addresses_.local_->asStringView(),\n            host_->address()->asStringView());\n  const uint64_t buffer_length = buffer->length();\n\n  cluster_.cluster_stats_.sess_rx_datagrams_.inc();\n  cluster_.cluster_.info()->stats().upstream_cx_rx_bytes_total_.add(buffer_length);\n\n  Network::UdpSendData data{addresses_.local_->ip(), *addresses_.peer_, *buffer};\n  const Api::IoCallUint64Result rc = cluster_.filter_.read_callbacks_->udpListener().send(data);\n  if (!rc.ok()) {\n    cluster_.filter_.config_->stats().downstream_sess_tx_errors_.inc();\n  } else {\n    cluster_.filter_.config_->stats().downstream_sess_tx_bytes_.add(buffer_length);\n    cluster_.filter_.config_->stats().downstream_sess_tx_datagrams_.inc();\n  }\n}\n\n} // namespace UdpProxy\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h",
    "content": "#pragma once\n\n#include \"envoy/event/file_event.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/network/socket_impl.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"extensions/filters/udp/udp_proxy/hash_policy_impl.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\n// TODO(mattklein123): UDP session access logging.\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\n\n/**\n * All UDP proxy downstream stats. @see stats_macros.h\n */\n#define ALL_UDP_PROXY_DOWNSTREAM_STATS(COUNTER, GAUGE)                                             \\\n  COUNTER(downstream_sess_no_route)                                                                \\\n  COUNTER(downstream_sess_rx_bytes)                                                                \\\n  COUNTER(downstream_sess_rx_datagrams)                                                            \\\n  COUNTER(downstream_sess_rx_errors)                                                               \\\n  COUNTER(downstream_sess_total)                                                                   \\\n  COUNTER(downstream_sess_tx_bytes)                                                                \\\n  COUNTER(downstream_sess_tx_datagrams)                                                            \\\n  COUNTER(downstream_sess_tx_errors)                                                               \\\n  COUNTER(idle_timeout)                                                                            \\\n  GAUGE(downstream_sess_active, Accumulate)\n\n/**\n * Struct definition for all UDP proxy downstream stats. @see stats_macros.h\n */\nstruct UdpProxyDownstreamStats {\n  ALL_UDP_PROXY_DOWNSTREAM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * All UDP proxy upstream cluster stats. @see stats_macros.h\n */\n#define ALL_UDP_PROXY_UPSTREAM_STATS(COUNTER)                                                      \\\n  COUNTER(sess_rx_datagrams)                                                                       \\\n  COUNTER(sess_rx_errors)                                                                          \\\n  COUNTER(sess_tx_datagrams)                                                                       \\\n  COUNTER(sess_tx_errors)\n\n/**\n * Struct definition for all UDP proxy upstream stats. @see stats_macros.h\n */\nstruct UdpProxyUpstreamStats {\n  ALL_UDP_PROXY_UPSTREAM_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass UdpProxyFilterConfig {\npublic:\n  UdpProxyFilterConfig(Upstream::ClusterManager& cluster_manager, TimeSource& time_source,\n                       Stats::Scope& root_scope,\n                       const envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig& config)\n      : cluster_manager_(cluster_manager), time_source_(time_source), cluster_(config.cluster()),\n        session_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, idle_timeout, 60 * 1000)),\n        use_original_src_ip_(config.use_original_src_ip()),\n        stats_(generateStats(config.stat_prefix(), root_scope)) {\n    if (use_original_src_ip_ && !Api::OsSysCallsSingleton::get().supportsIpTransparent()) {\n      ExceptionUtil::throwEnvoyException(\n          \"The platform does not support either IP_TRANSPARENT or IPV6_TRANSPARENT. Or the envoy \"\n          \"is not running with the CAP_NET_ADMIN capability.\");\n    }\n    if (!config.hash_policies().empty()) {\n      hash_policy_ = std::make_unique<HashPolicyImpl>(config.hash_policies());\n    }\n  }\n\n  const std::string& cluster() const { return cluster_; }\n  Upstream::ClusterManager& clusterManager() const { return cluster_manager_; }\n  std::chrono::milliseconds sessionTimeout() const { return session_timeout_; }\n  bool usingOriginalSrcIp() const { return use_original_src_ip_; }\n  const Udp::HashPolicy* hashPolicy() const { return hash_policy_.get(); }\n  UdpProxyDownstreamStats& stats() const { return stats_; }\n  TimeSource& timeSource() const { return time_source_; }\n\nprivate:\n  static UdpProxyDownstreamStats generateStats(const std::string& stat_prefix,\n                                               Stats::Scope& scope) {\n    const auto final_prefix = absl::StrCat(\"udp.\", stat_prefix);\n    return {ALL_UDP_PROXY_DOWNSTREAM_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),\n                                           POOL_GAUGE_PREFIX(scope, final_prefix))};\n  }\n\n  Upstream::ClusterManager& cluster_manager_;\n  TimeSource& time_source_;\n  const std::string cluster_;\n  const std::chrono::milliseconds session_timeout_;\n  const bool use_original_src_ip_;\n  std::unique_ptr<const HashPolicyImpl> hash_policy_;\n  mutable UdpProxyDownstreamStats stats_;\n};\n\nusing UdpProxyFilterConfigSharedPtr = std::shared_ptr<const UdpProxyFilterConfig>;\n\n/**\n * Currently, it only implements the hash based routing.\n */\nclass UdpLoadBalancerContext : public Upstream::LoadBalancerContextBase {\npublic:\n  UdpLoadBalancerContext(const Udp::HashPolicy* hash_policy,\n                         const Network::Address::InstanceConstSharedPtr& peer_address) {\n    if (hash_policy) {\n      hash_ = hash_policy->generateHash(*peer_address);\n    }\n  }\n\n  absl::optional<uint64_t> computeHashKey() override { return hash_; }\n\nprivate:\n  absl::optional<uint64_t> hash_;\n};\n\nclass UdpProxyFilter : public Network::UdpListenerReadFilter,\n                       public Upstream::ClusterUpdateCallbacks,\n                       Logger::Loggable<Logger::Id::filter> {\npublic:\n  UdpProxyFilter(Network::UdpReadFilterCallbacks& callbacks,\n                 const UdpProxyFilterConfigSharedPtr& config);\n\n  // Network::UdpListenerReadFilter\n  void onData(Network::UdpRecvData& data) override;\n  void onReceiveError(Api::IoError::IoErrorCode error_code) override;\n\nprivate:\n  class ClusterInfo;\n\n  /**\n   * An active session is similar to a TCP connection. It binds a 4-tuple (downstream IP/port, local\n   * IP/port) to a selected upstream host for the purpose of packet forwarding. Unlike a TCP\n   * connection, there is obviously no concept of session destruction beyond internally tracked data\n   * such as an idle timeout, maximum packets, etc. Once a session is created, downstream packets\n   * will be hashed to the same session and will be forwarded to the same upstream, using the same\n   * local ephemeral IP/port.\n   */\n  class ActiveSession : public Network::UdpPacketProcessor {\n  public:\n    ActiveSession(ClusterInfo& parent, Network::UdpRecvData::LocalPeerAddresses&& addresses,\n                  const Upstream::HostConstSharedPtr& host);\n    ~ActiveSession() override;\n    const Network::UdpRecvData::LocalPeerAddresses& addresses() const { return addresses_; }\n    const Upstream::Host& host() const { return *host_; }\n    void write(const Buffer::Instance& buffer);\n\n  private:\n    void onIdleTimer();\n    void onReadReady();\n\n    // Network::UdpPacketProcessor\n    void processPacket(Network::Address::InstanceConstSharedPtr local_address,\n                       Network::Address::InstanceConstSharedPtr peer_address,\n                       Buffer::InstancePtr buffer, MonotonicTime receive_time) override;\n    uint64_t maxPacketSize() const override {\n      // TODO(mattklein123): Support configurable/jumbo frames when proxying to upstream.\n      // Eventually we will want to support some type of PROXY header when doing L4 QUIC\n      // forwarding.\n      return Network::MAX_UDP_PACKET_SIZE;\n    }\n\n    ClusterInfo& cluster_;\n    const bool use_original_src_ip_;\n    const Network::UdpRecvData::LocalPeerAddresses addresses_;\n    const Upstream::HostConstSharedPtr host_;\n    // TODO(mattklein123): Consider replacing an idle timer for each session with a last used\n    // time stamp and a periodic scan of all sessions to look for timeouts. This solution is simple,\n    // though it might not perform well for high volume traffic. Note that this is how TCP proxy\n    // idle timeouts work so we should consider unifying the implementation if we move to a time\n    // stamp and scan approach.\n    const Event::TimerPtr idle_timer_;\n    // The socket is used for writing packets to the selected upstream host as well as receiving\n    // packets from the upstream host. Note that a a local ephemeral port is bound on the first\n    // write to the upstream host.\n    const Network::SocketPtr socket_;\n    const Event::FileEventPtr socket_event_;\n  };\n\n  using ActiveSessionPtr = std::unique_ptr<ActiveSession>;\n\n  struct HeterogeneousActiveSessionHash {\n    // Specifying is_transparent indicates to the library infrastructure that\n    // type-conversions should not be applied when calling find(), but instead\n    // pass the actual types of the contained and searched-for objects directly to\n    // these functors. See\n    // https://en.cppreference.com/w/cpp/utility/functional/less_void for an\n    // official reference, and https://abseil.io/tips/144 for a description of\n    // using it in the context of absl.\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n    size_t operator()(const Network::UdpRecvData::LocalPeerAddresses& value) const {\n      return absl::Hash<const Network::UdpRecvData::LocalPeerAddresses>()(value);\n    }\n    size_t operator()(const ActiveSessionPtr& value) const {\n      return absl::Hash<const Network::UdpRecvData::LocalPeerAddresses>()(value->addresses());\n    }\n    size_t operator()(const ActiveSession* value) const {\n      return absl::Hash<const Network::UdpRecvData::LocalPeerAddresses>()(value->addresses());\n    }\n  };\n\n  struct HeterogeneousActiveSessionEqual {\n    // See description for HeterogeneousActiveSessionHash::is_transparent.\n    using is_transparent = void; // NOLINT(readability-identifier-naming)\n\n    bool operator()(const ActiveSessionPtr& lhs,\n                    const Network::UdpRecvData::LocalPeerAddresses& rhs) const {\n      return lhs->addresses() == rhs;\n    }\n    bool operator()(const ActiveSessionPtr& lhs, const ActiveSessionPtr& rhs) const {\n      return lhs->addresses() == rhs->addresses();\n    }\n    bool operator()(const ActiveSessionPtr& lhs, const ActiveSession* rhs) const {\n      return lhs->addresses() == rhs->addresses();\n    }\n  };\n\n  /**\n   * Wraps all cluster specific UDP processing including session tracking, stats, etc. In the future\n   * we will very likely support different types of routing to multiple upstream clusters.\n   */\n  class ClusterInfo {\n  public:\n    ClusterInfo(UdpProxyFilter& filter, Upstream::ThreadLocalCluster& cluster);\n    ~ClusterInfo();\n    void onData(Network::UdpRecvData& data);\n    void removeSession(const ActiveSession* session);\n\n    UdpProxyFilter& filter_;\n    Upstream::ThreadLocalCluster& cluster_;\n    UdpProxyUpstreamStats cluster_stats_;\n\n  private:\n    ActiveSession* createSession(Network::UdpRecvData::LocalPeerAddresses&& addresses,\n                                 const Upstream::HostConstSharedPtr& host);\n    static UdpProxyUpstreamStats generateStats(Stats::Scope& scope) {\n      const auto final_prefix = \"udp\";\n      return {ALL_UDP_PROXY_UPSTREAM_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};\n    }\n\n    Envoy::Common::CallbackHandle* member_update_cb_handle_;\n    absl::flat_hash_set<ActiveSessionPtr, HeterogeneousActiveSessionHash,\n                        HeterogeneousActiveSessionEqual>\n        sessions_;\n    absl::flat_hash_map<const Upstream::Host*, absl::flat_hash_set<const ActiveSession*>>\n        host_to_sessions_;\n  };\n\n  virtual Network::SocketPtr createSocket(const Upstream::HostConstSharedPtr& host) {\n    // Virtual so this can be overridden in unit tests.\n    return std::make_unique<Network::SocketImpl>(Network::Socket::Type::Datagram, host->address());\n  }\n\n  // Upstream::ClusterUpdateCallbacks\n  void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) final;\n  void onClusterRemoval(const std::string& cluster_name) override;\n\n  const UdpProxyFilterConfigSharedPtr config_;\n  const Upstream::ClusterUpdateCallbacksHandlePtr cluster_update_callbacks_;\n  // Right now we support a single cluster to route to. It is highly likely in the future that\n  // we will support additional routing options either using filter chain matching, weighting,\n  // etc.\n  absl::optional<ClusterInfo> cluster_info_;\n};\n\n} // namespace UdpProxy\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/grpc_credentials/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/grpc_credentials/aws_iam/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# AWS IAM gRPC Credentials\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    external_deps = [\"grpc\"],\n    security_posture = \"data_plane_agnostic\",\n    status = \"alpha\",\n    deps = [\n        \"//include/envoy/grpc:google_grpc_creds_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/grpc:google_grpc_creds_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/common/aws:credentials_provider_impl_lib\",\n        \"//source/extensions/common/aws:region_provider_impl_lib\",\n        \"//source/extensions/common/aws:signer_impl_lib\",\n        \"//source/extensions/common/aws:utility_lib\",\n        \"//source/extensions/grpc_credentials:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/grpc_credential/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/grpc_credentials/aws_iam/config.cc",
    "content": "#include \"extensions/grpc_credentials/aws_iam/config.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/grpc_credential/v3/aws_iam.pb.h\"\n#include \"envoy/config/grpc_credential/v3/aws_iam.pb.validate.h\"\n#include \"envoy/grpc/google_grpc_creds.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/grpc/google_grpc_creds_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n\n#include \"extensions/common/aws/credentials_provider_impl.h\"\n#include \"extensions/common/aws/region_provider_impl.h\"\n#include \"extensions/common/aws/signer_impl.h\"\n#include \"extensions/common/aws/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace GrpcCredentials {\nnamespace AwsIam {\n\nstd::shared_ptr<grpc::ChannelCredentials> AwsIamGrpcCredentialsFactory::getChannelCredentials(\n    const envoy::config::core::v3::GrpcService& grpc_service_config, Api::Api& api) {\n\n  const auto& google_grpc = grpc_service_config.google_grpc();\n  std::shared_ptr<grpc::ChannelCredentials> creds =\n      Grpc::CredsUtility::defaultSslChannelCredentials(grpc_service_config, api);\n\n  std::shared_ptr<grpc::CallCredentials> call_creds;\n  for (const auto& credential : google_grpc.call_credentials()) {\n    switch (credential.credential_specifier_case()) {\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kFromPlugin: {\n      if (credential.from_plugin().name() == GrpcCredentialsNames::get().AwsIam) {\n        AwsIamGrpcCredentialsFactory credentials_factory;\n        // We don't deal with validation failures here at runtime today, see\n        // https://github.com/envoyproxy/envoy/issues/8010.\n        const Envoy::ProtobufTypes::MessagePtr config_message =\n            Envoy::Config::Utility::translateToFactoryConfig(\n                credential.from_plugin(), ProtobufMessage::getNullValidationVisitor(),\n                credentials_factory);\n        const auto& config = Envoy::MessageUtil::downcastAndValidate<\n            const envoy::config::grpc_credential::v3::AwsIamConfig&>(\n            *config_message, ProtobufMessage::getNullValidationVisitor());\n        auto credentials_provider = std::make_shared<Common::Aws::DefaultCredentialsProviderChain>(\n            api, Common::Aws::Utility::metadataFetcher);\n        auto signer = std::make_unique<Common::Aws::SignerImpl>(\n            config.service_name(), getRegion(config), credentials_provider, api.timeSource());\n        std::shared_ptr<grpc::CallCredentials> new_call_creds = grpc::MetadataCredentialsFromPlugin(\n            std::make_unique<AwsIamHeaderAuthenticator>(std::move(signer)));\n        if (call_creds == nullptr) {\n          call_creds = new_call_creds;\n        } else {\n          call_creds = grpc::CompositeCallCredentials(call_creds, new_call_creds);\n        }\n      }\n      break;\n    }\n    default:\n      // unused credential types\n      continue;\n    }\n  }\n\n  if (call_creds != nullptr) {\n    return grpc::CompositeChannelCredentials(creds, call_creds);\n  }\n\n  return creds;\n}\n\nstd::string AwsIamGrpcCredentialsFactory::getRegion(\n    const envoy::config::grpc_credential::v3::AwsIamConfig& config) {\n  Common::Aws::RegionProviderPtr region_provider;\n  if (!config.region().empty()) {\n    region_provider = std::make_unique<Common::Aws::StaticRegionProvider>(config.region());\n  } else {\n    region_provider = std::make_unique<Common::Aws::EnvironmentRegionProvider>();\n  }\n\n  if (!region_provider->getRegion().has_value()) {\n    throw EnvoyException(\"Could not determine AWS region. \"\n                         \"If you are not running Envoy in EC2 or ECS, \"\n                         \"provide the region in the plugin configuration.\");\n  }\n\n  return *region_provider->getRegion();\n}\n\ngrpc::Status\nAwsIamHeaderAuthenticator::GetMetadata(grpc::string_ref service_url, grpc::string_ref method_name,\n                                       const grpc::AuthContext&,\n                                       std::multimap<grpc::string, grpc::string>* metadata) {\n\n  auto message = buildMessageToSign(absl::string_view(service_url.data(), service_url.length()),\n                                    absl::string_view(method_name.data(), method_name.length()));\n\n  try {\n    signer_->sign(message, false);\n  } catch (const EnvoyException& e) {\n    return grpc::Status(grpc::StatusCode::INTERNAL, e.what());\n  }\n\n  signedHeadersToMetadata(message.headers(), *metadata);\n\n  return grpc::Status::OK;\n}\n\nHttp::RequestMessageImpl\nAwsIamHeaderAuthenticator::buildMessageToSign(absl::string_view service_url,\n                                              absl::string_view method_name) {\n\n  const auto uri = fmt::format(\"{}/{}\", service_url, method_name);\n  absl::string_view host;\n  absl::string_view path;\n  Http::Utility::extractHostPathFromUri(uri, host, path);\n\n  Http::RequestMessageImpl message;\n  message.headers().setReferenceMethod(Http::Headers::get().MethodValues.Post);\n  message.headers().setHost(host);\n  message.headers().setPath(path);\n\n  return message;\n}\n\nvoid AwsIamHeaderAuthenticator::signedHeadersToMetadata(\n    const Http::HeaderMap& headers, std::multimap<grpc::string, grpc::string>& metadata) {\n\n  headers.iterate([&metadata](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n    const auto& key = entry.key().getStringView();\n    // Skip pseudo-headers\n    if (key.empty() || key[0] == ':') {\n      return Http::HeaderMap::Iterate::Continue;\n    }\n    metadata.emplace(key, entry.value().getStringView());\n    return Http::HeaderMap::Iterate::Continue;\n  });\n}\n\nREGISTER_FACTORY(AwsIamGrpcCredentialsFactory, Grpc::GoogleGrpcCredentialsFactory);\n\n} // namespace AwsIam\n} // namespace GrpcCredentials\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/grpc_credentials/aws_iam/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/grpc_credential/v3/aws_iam.pb.h\"\n#include \"envoy/grpc/google_grpc_creds.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/http/message_impl.h\"\n\n#include \"extensions/common/aws/signer.h\"\n#include \"extensions/grpc_credentials/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace GrpcCredentials {\nnamespace AwsIam {\n\n/**\n * AWS IAM based gRPC channel credentials factory.\n */\nclass AwsIamGrpcCredentialsFactory : public Grpc::GoogleGrpcCredentialsFactory {\npublic:\n  std::shared_ptr<grpc::ChannelCredentials>\n  getChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service_config,\n                        Api::Api& api) override;\n\n  Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() {\n    return std::make_unique<envoy::config::grpc_credential::v3::AwsIamConfig>();\n  }\n\n  std::string name() const override { return GrpcCredentialsNames::get().AwsIam; }\n\nprivate:\n  static std::string getRegion(const envoy::config::grpc_credential::v3::AwsIamConfig& config);\n};\n\n/**\n * Produce AWS IAM signature metadata for a gRPC call.\n */\nclass AwsIamHeaderAuthenticator : public grpc::MetadataCredentialsPlugin {\npublic:\n  AwsIamHeaderAuthenticator(Common::Aws::SignerPtr signer) : signer_(std::move(signer)) {}\n\n  grpc::Status GetMetadata(grpc::string_ref, grpc::string_ref, const grpc::AuthContext&,\n                           std::multimap<grpc::string, grpc::string>* metadata) override;\n\n  bool IsBlocking() const override { return true; }\n\nprivate:\n  static Http::RequestMessageImpl buildMessageToSign(absl::string_view service_url,\n                                                     absl::string_view method_name);\n\n  static void signedHeadersToMetadata(const Http::HeaderMap& headers,\n                                      std::multimap<grpc::string, grpc::string>& metadata);\n\n  const Common::Aws::SignerPtr signer_;\n};\n\n} // namespace AwsIam\n} // namespace GrpcCredentials\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/grpc_credentials/example/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Example gRPC Credentials\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    external_deps = [\"grpc\"],\n    # Legacy test use.\n    visibility = [\n        \"//source/extensions:__subpackages__\",\n        \"//test/common/grpc:__subpackages__\",\n        \"//test/extensions:__subpackages__\",\n    ],\n    deps = [\n        \"//include/envoy/grpc:google_grpc_creds_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/grpc:google_grpc_creds_lib\",\n        \"//source/extensions/grpc_credentials:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/grpc_credentials/example/config.cc",
    "content": "#include \"extensions/grpc_credentials/example/config.h\"\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/google_grpc_creds.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/grpc/google_grpc_creds_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace GrpcCredentials {\nnamespace Example {\n\nstd::shared_ptr<grpc::ChannelCredentials>\nAccessTokenExampleGrpcCredentialsFactory::getChannelCredentials(\n    const envoy::config::core::v3::GrpcService& grpc_service_config, Api::Api& api) {\n  const auto& google_grpc = grpc_service_config.google_grpc();\n  std::shared_ptr<grpc::ChannelCredentials> creds =\n      Grpc::CredsUtility::defaultSslChannelCredentials(grpc_service_config, api);\n  std::shared_ptr<grpc::CallCredentials> call_creds = nullptr;\n  for (const auto& credential : google_grpc.call_credentials()) {\n    switch (credential.credential_specifier_case()) {\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kAccessToken: {\n      if (!credential.access_token().empty()) {\n        std::shared_ptr<grpc::CallCredentials> new_call_creds = grpc::MetadataCredentialsFromPlugin(\n            std::make_unique<StaticHeaderAuthenticator>(credential.access_token()));\n        if (call_creds == nullptr) {\n          call_creds = new_call_creds;\n        } else {\n          call_creds = grpc::CompositeCallCredentials(call_creds, new_call_creds);\n        }\n      }\n      break;\n    }\n    default:\n      // unused credential types\n      continue;\n    }\n  }\n  if (call_creds != nullptr) {\n    return grpc::CompositeChannelCredentials(creds, call_creds);\n  }\n  return creds;\n}\n\ngrpc::Status\nStaticHeaderAuthenticator::GetMetadata(grpc::string_ref, grpc::string_ref, const grpc::AuthContext&,\n                                       std::multimap<grpc::string, grpc::string>* metadata) {\n  // this function is run on a separate thread by the gRPC client library (independent of Envoy\n  // threading), so it can perform actions such as refreshing an access token without blocking\n  // the main thread. see:\n  // https://grpc.io/grpc/cpp/classgrpc_1_1_metadata_credentials_plugin.html#a6faf44f7c08d0311a38a868fdb8cbaf0\n  metadata->insert(std::make_pair(\"authorization\", \"Bearer \" + ticket_));\n  return grpc::Status::OK;\n}\n\n/**\n * Static registration for the static header Google gRPC credentials factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(AccessTokenExampleGrpcCredentialsFactory, Grpc::GoogleGrpcCredentialsFactory);\n\n} // namespace Example\n} // namespace GrpcCredentials\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/grpc_credentials/example/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/google_grpc_creds.h\"\n\n#include \"extensions/grpc_credentials/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace GrpcCredentials {\nnamespace Example {\n\n/**\n * Access token implementation of Google Grpc Credentials Factory\n * This implementation uses ssl creds for the grpc channel if available, similar to the default\n * implementation. Additionally, it uses MetadataCredentialsFromPlugin to add a static secret to a\n * header for call credentials. This implementation does the same thing as AccessTokenCredentials,\n * but it's implemented as a Google gRPC client library plugin to show how a custom implementation\n * would be created.\n *\n * This implementation uses the access_token field in the config to get the secret to add to the\n * header.\n *\n * This can be used as an example for how to implement a more complicated custom call credentials\n * implementation. Any blocking calls should be performed in the\n * MetadataCredentialsFromPlugin::GetMetadata to ensure that the main thread is not blocked while\n * initializing the channel.\n */\nclass AccessTokenExampleGrpcCredentialsFactory : public Grpc::GoogleGrpcCredentialsFactory {\npublic:\n  std::shared_ptr<grpc::ChannelCredentials>\n  getChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service_config,\n                        Api::Api& api) override;\n\n  std::string name() const override { return GrpcCredentialsNames::get().AccessTokenExample; }\n};\n\n/*\n * Reference:\n * https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms\n */\nclass StaticHeaderAuthenticator : public grpc::MetadataCredentialsPlugin {\npublic:\n  StaticHeaderAuthenticator(const grpc::string& ticket) : ticket_(ticket) {}\n\n  grpc::Status GetMetadata(grpc::string_ref, grpc::string_ref, const grpc::AuthContext&,\n                           std::multimap<grpc::string, grpc::string>* metadata) override;\n\nprivate:\n  grpc::string ticket_;\n};\n\n} // namespace Example\n} // namespace GrpcCredentials\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/grpc_credentials/file_based_metadata/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# File Based Metadata gRPC Credentials\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    external_deps = [\"grpc\"],\n    security_posture = \"data_plane_agnostic\",\n    status = \"alpha\",\n    deps = [\n        \"//include/envoy/grpc:google_grpc_creds_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/grpc:google_grpc_creds_lib\",\n        \"//source/extensions/grpc_credentials:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/grpc_credential/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/grpc_credentials/file_based_metadata/config.cc",
    "content": "#include \"extensions/grpc_credentials/file_based_metadata/config.h\"\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/grpc_credential/v3/file_based_metadata.pb.h\"\n#include \"envoy/config/grpc_credential/v3/file_based_metadata.pb.validate.h\"\n#include \"envoy/grpc/google_grpc_creds.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/datasource.h\"\n#include \"common/config/utility.h\"\n#include \"common/grpc/google_grpc_creds_impl.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace GrpcCredentials {\nnamespace FileBasedMetadata {\n\nstd::shared_ptr<grpc::ChannelCredentials>\nFileBasedMetadataGrpcCredentialsFactory::getChannelCredentials(\n    const envoy::config::core::v3::GrpcService& grpc_service_config, Api::Api& api) {\n  const auto& google_grpc = grpc_service_config.google_grpc();\n  std::shared_ptr<grpc::ChannelCredentials> creds =\n      Grpc::CredsUtility::defaultSslChannelCredentials(grpc_service_config, api);\n  std::shared_ptr<grpc::CallCredentials> call_creds = nullptr;\n  for (const auto& credential : google_grpc.call_credentials()) {\n    switch (credential.credential_specifier_case()) {\n    case envoy::config::core::v3::GrpcService::GoogleGrpc::CallCredentials::\n        CredentialSpecifierCase::kFromPlugin: {\n      if (credential.from_plugin().name() == GrpcCredentialsNames::get().FileBasedMetadata) {\n        FileBasedMetadataGrpcCredentialsFactory file_based_metadata_credentials_factory;\n        // We don't deal with validation failures here at runtime today, see\n        // https://github.com/envoyproxy/envoy/issues/8010.\n        const Envoy::ProtobufTypes::MessagePtr file_based_metadata_config_message =\n            Envoy::Config::Utility::translateToFactoryConfig(\n                credential.from_plugin(), ProtobufMessage::getNullValidationVisitor(),\n                file_based_metadata_credentials_factory);\n        const auto& file_based_metadata_config = Envoy::MessageUtil::downcastAndValidate<\n            const envoy::config::grpc_credential::v3::FileBasedMetadataConfig&>(\n            *file_based_metadata_config_message, ProtobufMessage::getNullValidationVisitor());\n        std::shared_ptr<grpc::CallCredentials> new_call_creds = grpc::MetadataCredentialsFromPlugin(\n            std::make_unique<FileBasedMetadataAuthenticator>(file_based_metadata_config, api));\n        if (call_creds == nullptr) {\n          call_creds = new_call_creds;\n        } else {\n          call_creds = grpc::CompositeCallCredentials(call_creds, new_call_creds);\n        }\n      }\n      break;\n    }\n    default:\n      // unused credential types\n      continue;\n    }\n  }\n  if (call_creds != nullptr) {\n    return grpc::CompositeChannelCredentials(creds, call_creds);\n  }\n  return creds;\n}\n\ngrpc::Status\nFileBasedMetadataAuthenticator::GetMetadata(grpc::string_ref, grpc::string_ref,\n                                            const grpc::AuthContext&,\n                                            std::multimap<grpc::string, grpc::string>* metadata) {\n  std::string header_key = \"authorization\";\n  std::string header_prefix = config_.header_prefix();\n  if (!config_.header_key().empty()) {\n    header_key = config_.header_key();\n  }\n  try {\n    std::string header_value = Envoy::Config::DataSource::read(config_.secret_data(), true, api_);\n    metadata->insert(std::make_pair(header_key, header_prefix + header_value));\n  } catch (const EnvoyException& e) {\n    return grpc::Status(grpc::StatusCode::NOT_FOUND, e.what());\n  }\n  return grpc::Status::OK;\n}\n\n/**\n * Static registration for the file based metadata Google gRPC credentials factory. @see\n * RegisterFactory.\n */\nREGISTER_FACTORY(FileBasedMetadataGrpcCredentialsFactory, Grpc::GoogleGrpcCredentialsFactory);\n\n} // namespace FileBasedMetadata\n} // namespace GrpcCredentials\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/grpc_credentials/file_based_metadata/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/grpc_credential/v3/file_based_metadata.pb.h\"\n#include \"envoy/grpc/google_grpc_creds.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/grpc_credentials/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace GrpcCredentials {\nnamespace FileBasedMetadata {\n\n/**\n * File Based Metadata implementation of Google Grpc Credentials Factory\n * This implementation uses ssl creds for the grpc channel if available. Additionally, it uses\n * MetadataCredentialsFromPlugin to add a static secret that is loaded from a file. The header key\n * and header prefix are configurable.\n *\n * This implementation uses the from_plugin field in the call credentials config to get the filename\n * of where the secret is stored to add to the header.\n */\nclass FileBasedMetadataGrpcCredentialsFactory : public Grpc::GoogleGrpcCredentialsFactory {\npublic:\n  std::shared_ptr<grpc::ChannelCredentials>\n  getChannelCredentials(const envoy::config::core::v3::GrpcService& grpc_service_config,\n                        Api::Api& api) override;\n\n  Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() {\n    return std::make_unique<envoy::config::grpc_credential::v3::FileBasedMetadataConfig>();\n  }\n\n  std::string name() const override { return GrpcCredentialsNames::get().FileBasedMetadata; }\n};\n\nclass FileBasedMetadataAuthenticator : public grpc::MetadataCredentialsPlugin {\npublic:\n  FileBasedMetadataAuthenticator(\n      const envoy::config::grpc_credential::v3::FileBasedMetadataConfig& config, Api::Api& api)\n      : config_(config), api_(api) {}\n\n  grpc::Status GetMetadata(grpc::string_ref, grpc::string_ref, const grpc::AuthContext&,\n                           std::multimap<grpc::string, grpc::string>* metadata) override;\n\nprivate:\n  const envoy::config::grpc_credential::v3::FileBasedMetadataConfig config_;\n  Api::Api& api_;\n};\n\n} // namespace FileBasedMetadata\n} // namespace GrpcCredentials\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/grpc_credentials/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace GrpcCredentials {\n\n/**\n * Well-known gRPC Credentials names.\n * NOTE: New gRPC Credentials should use the well known name: envoy.grpc_credentials.name.\n */\nclass GrpcCredentialsNameValues {\npublic:\n  // Access Token Example.\n  const std::string AccessTokenExample = \"envoy.grpc_credentials.access_token_example\";\n  // File Based Metadata credentials\n  const std::string FileBasedMetadata = \"envoy.grpc_credentials.file_based_metadata\";\n  // AWS IAM\n  const std::string AwsIam = \"envoy.grpc_credentials.aws_iam\";\n};\n\nusing GrpcCredentialsNames = ConstSingleton<GrpcCredentialsNameValues>;\n\n} // namespace GrpcCredentials\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/health_checkers/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/health_checkers/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Redis custom health checker.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"redis\",\n    srcs = [\"redis.cc\"],\n    hdrs = [\"redis.h\"],\n    deps = [\n        \"//source/common/upstream:health_checker_base_lib\",\n        \"//source/extensions/filters/network/common/redis:client_lib\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//source/extensions/filters/network/redis_proxy:conn_pool_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/health_checker/redis/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    deps = [\n        \":redis\",\n        \":utility\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:health_checker_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/health_checkers:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/health_checker/redis/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility\",\n    hdrs = [\"utility.h\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/health_checker/redis/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/health_checkers/redis/config.cc",
    "content": "#include \"extensions/health_checkers/redis/config.h\"\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/health_checkers/redis/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\nnamespace RedisHealthChecker {\n\nUpstream::HealthCheckerSharedPtr RedisHealthCheckerFactory::createCustomHealthChecker(\n    const envoy::config::core::v3::HealthCheck& config,\n    Server::Configuration::HealthCheckerFactoryContext& context) {\n  return std::make_shared<RedisHealthChecker>(\n      context.cluster(), config,\n      getRedisHealthCheckConfig(config, context.messageValidationVisitor()), context.dispatcher(),\n      context.runtime(), context.eventLogger(), context.api(),\n      NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_);\n};\n\n/**\n * Static registration for the redis custom health checker. @see RegisterFactory.\n */\nREGISTER_FACTORY(RedisHealthCheckerFactory, Server::Configuration::CustomHealthCheckerFactory);\n\n} // namespace RedisHealthChecker\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/health_checkers/redis/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/health_checker/redis/v2/redis.pb.h\"\n#include \"envoy/config/health_checker/redis/v2/redis.pb.validate.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n#include \"envoy/server/health_checker_config.h\"\n\n#include \"extensions/health_checkers/redis/redis.h\"\n#include \"extensions/health_checkers/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\nnamespace RedisHealthChecker {\n\n/**\n * Config registration for the redis health checker.\n */\nclass RedisHealthCheckerFactory : public Server::Configuration::CustomHealthCheckerFactory {\npublic:\n  Upstream::HealthCheckerSharedPtr\n  createCustomHealthChecker(const envoy::config::core::v3::HealthCheck& config,\n                            Server::Configuration::HealthCheckerFactoryContext& context) override;\n\n  std::string name() const override { return HealthCheckerNames::get().RedisHealthChecker; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return ProtobufTypes::MessagePtr{new envoy::config::health_checker::redis::v2::Redis()};\n  }\n};\n\n} // namespace RedisHealthChecker\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/health_checkers/redis/redis.cc",
    "content": "#include \"extensions/health_checkers/redis/redis.h\"\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/health_checker/redis/v2/redis.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\nnamespace RedisHealthChecker {\n\nRedisHealthChecker::RedisHealthChecker(\n    const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config,\n    const envoy::config::health_checker::redis::v2::Redis& redis_config,\n    Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n    Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api,\n    Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory)\n    : HealthCheckerImplBase(cluster, config, dispatcher, runtime, api.randomGenerator(),\n                            std::move(event_logger)),\n      client_factory_(client_factory), key_(redis_config.key()),\n      auth_username_(\n          NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(cluster.info(), api)),\n      auth_password_(NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword(\n          cluster.info(), api)) {\n  if (!key_.empty()) {\n    type_ = Type::Exists;\n  } else {\n    type_ = Type::Ping;\n  }\n}\n\nRedisHealthChecker::RedisActiveHealthCheckSession::RedisActiveHealthCheckSession(\n    RedisHealthChecker& parent, const Upstream::HostSharedPtr& host)\n    : ActiveHealthCheckSession(parent, host), parent_(parent) {\n  redis_command_stats_ =\n      Extensions::NetworkFilters::Common::Redis::RedisCommandStats::createRedisCommandStats(\n          parent_.cluster_.info()->statsScope().symbolTable());\n}\n\nRedisHealthChecker::RedisActiveHealthCheckSession::~RedisActiveHealthCheckSession() {\n  ASSERT(current_request_ == nullptr);\n  ASSERT(client_ == nullptr);\n}\n\nvoid RedisHealthChecker::RedisActiveHealthCheckSession::onDeferredDelete() {\n  if (current_request_) {\n    current_request_->cancel();\n    current_request_ = nullptr;\n  }\n\n  if (client_) {\n    client_->close();\n  }\n}\n\nvoid RedisHealthChecker::RedisActiveHealthCheckSession::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    // This should only happen after any active requests have been failed/cancelled.\n    ASSERT(!current_request_);\n    parent_.dispatcher_.deferredDelete(std::move(client_));\n  }\n}\n\nvoid RedisHealthChecker::RedisActiveHealthCheckSession::onInterval() {\n  if (!client_) {\n    client_ = parent_.client_factory_.create(\n        host_, parent_.dispatcher_, *this, redis_command_stats_,\n        parent_.cluster_.info()->statsScope(), parent_.auth_username_, parent_.auth_password_);\n    client_->addConnectionCallbacks(*this);\n  }\n\n  ASSERT(!current_request_);\n\n  switch (parent_.type_) {\n  case Type::Exists:\n    current_request_ = client_->makeRequest(existsHealthCheckRequest(parent_.key_), *this);\n    break;\n  case Type::Ping:\n    current_request_ = client_->makeRequest(pingHealthCheckRequest(), *this);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid RedisHealthChecker::RedisActiveHealthCheckSession::onResponse(\n    NetworkFilters::Common::Redis::RespValuePtr&& value) {\n  current_request_ = nullptr;\n\n  switch (parent_.type_) {\n  case Type::Exists:\n    if (value->type() == NetworkFilters::Common::Redis::RespType::Integer &&\n        value->asInteger() == 0) {\n      handleSuccess();\n    } else {\n      handleFailure(envoy::data::core::v3::ACTIVE);\n    }\n    break;\n  case Type::Ping:\n    if (value->type() == NetworkFilters::Common::Redis::RespType::SimpleString &&\n        value->asString() == \"PONG\") {\n      handleSuccess();\n    } else {\n      handleFailure(envoy::data::core::v3::ACTIVE);\n    }\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  if (!parent_.reuse_connection_) {\n    client_->close();\n  }\n}\n\nvoid RedisHealthChecker::RedisActiveHealthCheckSession::onFailure() {\n  current_request_ = nullptr;\n  handleFailure(envoy::data::core::v3::NETWORK);\n}\n\nbool RedisHealthChecker::RedisActiveHealthCheckSession::onRedirection(\n    NetworkFilters::Common::Redis::RespValuePtr&&, const std::string&, bool) {\n  // Treat any redirection error response from a Redis server as success.\n  current_request_ = nullptr;\n  handleSuccess();\n  return true;\n}\n\nvoid RedisHealthChecker::RedisActiveHealthCheckSession::onTimeout() {\n  current_request_->cancel();\n  current_request_ = nullptr;\n  client_->close();\n}\n\nRedisHealthChecker::HealthCheckRequest::HealthCheckRequest(const std::string& key) {\n  std::vector<NetworkFilters::Common::Redis::RespValue> values(2);\n  values[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  values[0].asString() = \"EXISTS\";\n  values[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  values[1].asString() = key;\n  request_.type(NetworkFilters::Common::Redis::RespType::Array);\n  request_.asArray().swap(values);\n}\n\nRedisHealthChecker::HealthCheckRequest::HealthCheckRequest() {\n  std::vector<NetworkFilters::Common::Redis::RespValue> values(1);\n  values[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  values[0].asString() = \"PING\";\n  request_.type(NetworkFilters::Common::Redis::RespType::Array);\n  request_.asArray().swap(values);\n}\n\n} // namespace RedisHealthChecker\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/health_checkers/redis/redis.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/health_checker/redis/v2/redis.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\n#include \"common/upstream/health_checker_base_impl.h\"\n\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/redis_proxy/config.h\"\n#include \"extensions/filters/network/redis_proxy/conn_pool_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\nnamespace RedisHealthChecker {\n\n/**\n * Redis health checker implementation. Sends PING and expects PONG.\n */\nclass RedisHealthChecker : public Upstream::HealthCheckerImplBase {\npublic:\n  RedisHealthChecker(\n      const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config,\n      const envoy::config::health_checker::redis::v2::Redis& redis_config,\n      Event::Dispatcher& dispatcher, Runtime::Loader& runtime,\n      Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api,\n      Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory);\n\n  static const NetworkFilters::Common::Redis::RespValue& pingHealthCheckRequest() {\n    static HealthCheckRequest* request = new HealthCheckRequest();\n    return request->request_;\n  }\n\n  static const NetworkFilters::Common::Redis::RespValue&\n  existsHealthCheckRequest(const std::string& key) {\n    static HealthCheckRequest* request = new HealthCheckRequest(key);\n    return request->request_;\n  }\n\nprotected:\n  envoy::data::core::v3::HealthCheckerType healthCheckerType() const override {\n    return envoy::data::core::v3::REDIS;\n  }\n\nprivate:\n  friend class RedisHealthCheckerTest;\n\n  struct RedisActiveHealthCheckSession\n      : public ActiveHealthCheckSession,\n        public Extensions::NetworkFilters::Common::Redis::Client::Config,\n        public Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks,\n        public Network::ConnectionCallbacks {\n    RedisActiveHealthCheckSession(RedisHealthChecker& parent, const Upstream::HostSharedPtr& host);\n    ~RedisActiveHealthCheckSession() override;\n\n    // ActiveHealthCheckSession\n    void onInterval() override;\n    void onTimeout() override;\n    void onDeferredDelete() final;\n\n    // Extensions::NetworkFilters::Common::Redis::Client::Config\n    bool disableOutlierEvents() const override { return true; }\n    std::chrono::milliseconds opTimeout() const override {\n      // Allow the main Health Check infra to control timeout.\n      return parent_.timeout_ * 2;\n    }\n    bool enableHashtagging() const override { return false; }\n    bool enableRedirection() const override {\n      return true;\n    } // Redirection errors are treated as check successes.\n    NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override {\n      return NetworkFilters::Common::Redis::Client::ReadPolicy::Primary;\n    }\n\n    // Batching\n    unsigned int maxBufferSizeBeforeFlush() const override {\n      return 0;\n    } // Forces an immediate flush\n    std::chrono::milliseconds bufferFlushTimeoutInMs() const override {\n      return std::chrono::milliseconds(1);\n    }\n\n    uint32_t maxUpstreamUnknownConnections() const override { return 0; }\n    bool enableCommandStats() const override { return false; }\n\n    // Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks\n    void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override;\n    void onFailure() override;\n    bool onRedirection(NetworkFilters::Common::Redis::RespValuePtr&&, const std::string&,\n                       bool) override;\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    RedisHealthChecker& parent_;\n    Extensions::NetworkFilters::Common::Redis::Client::ClientPtr client_;\n    Extensions::NetworkFilters::Common::Redis::Client::PoolRequest* current_request_{};\n    Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_;\n  };\n\n  enum class Type { Ping, Exists };\n\n  struct HealthCheckRequest {\n    HealthCheckRequest(const std::string& key);\n    HealthCheckRequest();\n\n    NetworkFilters::Common::Redis::RespValue request_;\n  };\n\n  using RedisActiveHealthCheckSessionPtr = std::unique_ptr<RedisActiveHealthCheckSession>;\n\n  // HealthCheckerImplBase\n  ActiveHealthCheckSessionPtr makeSession(Upstream::HostSharedPtr host) override {\n    return std::make_unique<RedisActiveHealthCheckSession>(*this, host);\n  }\n\n  Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory_;\n  Type type_;\n  const std::string key_;\n  const std::string auth_username_;\n  const std::string auth_password_;\n};\n\n} // namespace RedisHealthChecker\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/health_checkers/redis/utility.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/health_checker/redis/v2/redis.pb.h\"\n#include \"envoy/config/health_checker/redis/v2/redis.pb.validate.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\nnamespace RedisHealthChecker {\n\nnamespace {\n\nstatic const envoy::config::health_checker::redis::v2::Redis\ngetRedisHealthCheckConfig(const envoy::config::core::v3::HealthCheck& health_check_config,\n                          ProtobufMessage::ValidationVisitor& validation_visitor) {\n  ProtobufTypes::MessagePtr config =\n      ProtobufTypes::MessagePtr{new envoy::config::health_checker::redis::v2::Redis()};\n  Envoy::Config::Utility::translateOpaqueConfig(\n      health_check_config.custom_health_check().typed_config(),\n      health_check_config.custom_health_check().hidden_envoy_deprecated_config(),\n      validation_visitor, *config);\n  return MessageUtil::downcastAndValidate<const envoy::config::health_checker::redis::v2::Redis&>(\n      *config, validation_visitor);\n}\n\n} // namespace\n} // namespace RedisHealthChecker\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/health_checkers/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\n\n/**\n * Well-known health checker names.\n * NOTE: New health checkers should use the well known name: envoy.health_checkers.name.\n */\nclass HealthCheckerNameValues {\npublic:\n  // Redis health checker.\n  const std::string RedisHealthChecker = \"envoy.health_checkers.redis\";\n};\n\nusing HealthCheckerNames = ConstSingleton<HealthCheckerNameValues>;\n\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/internal_redirect/allow_listed_routes/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"allow_listed_routes_lib\",\n    hdrs = [\"allow_listed_routes.h\"],\n    deps = [\n        \"//include/envoy/router:internal_redirect_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/extensions/internal_redirect:well_known_names\",\n        \"@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up by moving the redirect test to extensions.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    deps = [\n        \":allow_listed_routes_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/router:internal_redirect_interface\",\n        \"//source/extensions/internal_redirect:well_known_names\",\n        \"@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h\"\n#include \"envoy/router/internal_redirect.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"extensions/internal_redirect/well_known_names.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nclass AllowListedRoutesPredicate : public Router::InternalRedirectPredicate {\npublic:\n  explicit AllowListedRoutesPredicate(\n      const envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig&\n          config)\n      : allowed_routes_(config.allowed_route_names().begin(), config.allowed_route_names().end()) {}\n\n  bool acceptTargetRoute(StreamInfo::FilterState&, absl::string_view route_name, bool,\n                         bool) override {\n    return allowed_routes_.contains(route_name);\n  }\n\n  absl::string_view name() const override {\n    return InternalRedirectPredicateValues::get().AllowListedRoutesPredicate;\n  }\n\n  const absl::flat_hash_set<std::string> allowed_routes_;\n};\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/allow_listed_routes/config.cc",
    "content": "#include \"extensions/internal_redirect/allow_listed_routes/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/router/internal_redirect.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nREGISTER_FACTORY(AllowListedRoutesPredicateFactory, Router::InternalRedirectPredicateFactory);\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/allow_listed_routes/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h\"\n#include \"envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.validate.h\"\n#include \"envoy/router/internal_redirect.h\"\n\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h\"\n#include \"extensions/internal_redirect/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nclass AllowListedRoutesPredicateFactory : public Router::InternalRedirectPredicateFactory {\npublic:\n  Router::InternalRedirectPredicateSharedPtr\n  createInternalRedirectPredicate(const Protobuf::Message& config, absl::string_view) override {\n    auto allow_listed_routes_config =\n        MessageUtil::downcastAndValidate<const envoy::extensions::internal_redirect::\n                                             allow_listed_routes::v3::AllowListedRoutesConfig&>(\n            config, ProtobufMessage::getStrictValidationVisitor());\n    return std::make_shared<AllowListedRoutesPredicate>(allow_listed_routes_config);\n  }\n\n  std::string name() const override {\n    return InternalRedirectPredicateValues::get().AllowListedRoutesPredicate;\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig>();\n  }\n};\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/previous_routes/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"previous_routes_lib\",\n    srcs = [\"previous_routes.cc\"],\n    hdrs = [\"previous_routes.h\"],\n    deps = [\n        \"//include/envoy/router:internal_redirect_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/extensions/internal_redirect:well_known_names\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up by moving the redirect test to extensions.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    deps = [\n        \":previous_routes_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/router:internal_redirect_interface\",\n        \"//source/extensions/internal_redirect:well_known_names\",\n        \"@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/internal_redirect/previous_routes/config.cc",
    "content": "#include \"extensions/internal_redirect/previous_routes/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/router/internal_redirect.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nREGISTER_FACTORY(PreviousRoutesPredicateFactory, Router::InternalRedirectPredicateFactory);\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/previous_routes/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h\"\n#include \"envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.validate.h\"\n#include \"envoy/router/internal_redirect.h\"\n\n#include \"extensions/internal_redirect/previous_routes/previous_routes.h\"\n#include \"extensions/internal_redirect/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nclass PreviousRoutesPredicateFactory : public Router::InternalRedirectPredicateFactory {\npublic:\n  Router::InternalRedirectPredicateSharedPtr\n  createInternalRedirectPredicate(const Protobuf::Message&,\n                                  absl::string_view current_route_name) override {\n    return std::make_shared<PreviousRoutesPredicate>(current_route_name);\n  }\n\n  std::string name() const override {\n    return InternalRedirectPredicateValues::get().PreviousRoutesPredicate;\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig>();\n  }\n};\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/previous_routes/previous_routes.cc",
    "content": "#include \"extensions/internal_redirect/previous_routes/previous_routes.h\"\n\n#include \"envoy/router/internal_redirect.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"absl/container/flat_hash_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nnamespace {\n\nconstexpr absl::string_view PreviousRoutesPredicateStateNamePrefix =\n    \"envoy.internal_redirect.previous_routes_predicate_state\";\n\nclass PreviousRoutesPredicateState : public StreamInfo::FilterState::Object {\npublic:\n  PreviousRoutesPredicateState() = default;\n  // Disallow copy so that we don't accidentally take a copy of the state\n  // through FilterState::getDataMutable, which will cause confusing bug that\n  // states are not updated in the original copy.\n  PreviousRoutesPredicateState(const PreviousRoutesPredicateState&) = delete;\n  PreviousRoutesPredicateState& operator=(const PreviousRoutesPredicateState&) = delete;\n\n  bool insertRouteIfNotPresent(absl::string_view route) {\n    return previous_routes_.insert(std::string(route)).second;\n  }\n\nprivate:\n  absl::flat_hash_set<std::string> previous_routes_;\n};\n\n} // namespace\n\nbool PreviousRoutesPredicate::acceptTargetRoute(StreamInfo::FilterState& filter_state,\n                                                absl::string_view route_name, bool, bool) {\n  auto filter_state_name =\n      absl::StrCat(PreviousRoutesPredicateStateNamePrefix, \".\", current_route_name_);\n  if (!filter_state.hasData<PreviousRoutesPredicateState>(filter_state_name)) {\n    filter_state.setData(filter_state_name, std::make_unique<PreviousRoutesPredicateState>(),\n                         StreamInfo::FilterState::StateType::Mutable,\n                         StreamInfo::FilterState::LifeSpan::Request);\n  }\n  auto& predicate_state =\n      filter_state.getDataMutable<PreviousRoutesPredicateState>(filter_state_name);\n  return predicate_state.insertRouteIfNotPresent(route_name);\n}\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/previous_routes/previous_routes.h",
    "content": "#pragma once\n\n#include \"envoy/router/internal_redirect.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"extensions/internal_redirect/well_known_names.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nclass PreviousRoutesPredicate : public Router::InternalRedirectPredicate {\npublic:\n  explicit PreviousRoutesPredicate(absl::string_view current_route_name)\n      : current_route_name_(current_route_name) {}\n\n  bool acceptTargetRoute(StreamInfo::FilterState& filter_state, absl::string_view route_name, bool,\n                         bool) override;\n\n  absl::string_view name() const override {\n    return InternalRedirectPredicateValues::get().PreviousRoutesPredicate;\n  }\n\nprivate:\n  const std::string current_route_name_;\n};\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/safe_cross_scheme/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"safe_cross_scheme_lib\",\n    hdrs = [\"safe_cross_scheme.h\"],\n    deps = [\n        \"//include/envoy/router:internal_redirect_interface\",\n        \"//include/envoy/stream_info:filter_state_interface\",\n        \"//source/extensions/internal_redirect:well_known_names\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up by moving the redirect test to extensions.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    deps = [\n        \":safe_cross_scheme_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/router:internal_redirect_interface\",\n        \"//source/extensions/internal_redirect:well_known_names\",\n        \"@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/internal_redirect/safe_cross_scheme/config.cc",
    "content": "#include \"extensions/internal_redirect/safe_cross_scheme/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/router/internal_redirect.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nREGISTER_FACTORY(SafeCrossSchemePredicateFactory, Router::InternalRedirectPredicateFactory);\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/safe_cross_scheme/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h\"\n#include \"envoy/router/internal_redirect.h\"\n\n#include \"extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h\"\n#include \"extensions/internal_redirect/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nclass SafeCrossSchemePredicateFactory : public Router::InternalRedirectPredicateFactory {\npublic:\n  Router::InternalRedirectPredicateSharedPtr\n  createInternalRedirectPredicate(const Protobuf::Message&, absl::string_view) override {\n    return std::make_shared<SafeCrossSchemePredicate>();\n  }\n\n  std::string name() const override {\n    return InternalRedirectPredicateValues::get().SafeCrossSchemePredicate;\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig>();\n  }\n};\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h",
    "content": "#pragma once\n\n#include \"envoy/router/internal_redirect.h\"\n#include \"envoy/stream_info/filter_state.h\"\n\n#include \"extensions/internal_redirect/well_known_names.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\nclass SafeCrossSchemePredicate : public Router::InternalRedirectPredicate {\npublic:\n  bool acceptTargetRoute(StreamInfo::FilterState&, absl::string_view, bool downstream_is_https,\n                         bool target_is_https) override {\n    return downstream_is_https || !target_is_https;\n  }\n\n  absl::string_view name() const override {\n    return InternalRedirectPredicateValues::get().SafeCrossSchemePredicate;\n  }\n};\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/internal_redirect/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\n\n/**\n * Well-known internal redirect predicate names.\n */\nclass InternalRedirectPredicatesNameValues {\npublic:\n  const std::string AllowListedRoutesPredicate =\n      \"envoy.internal_redirect_predicates.allow_listed_routes\";\n  const std::string PreviousRoutesPredicate = \"envoy.internal_redirect_predicates.previous_routes\";\n  const std::string SafeCrossSchemePredicate =\n      \"envoy.internal_redirect_predicates.safe_cross_scheme\";\n};\n\nusing InternalRedirectPredicateValues = ConstSingleton<InternalRedirectPredicatesNameValues>;\n\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"envoy_quic_alarm_lib\",\n    srcs = [\"envoy_quic_alarm.cc\"],\n    hdrs = [\"envoy_quic_alarm.h\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"@com_googlesource_quiche//:quic_core_alarm_interface_lib\",\n        \"@com_googlesource_quiche//:quic_core_clock_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_alarm_factory_lib\",\n    srcs = [\"envoy_quic_alarm_factory.cc\"],\n    hdrs = [\"envoy_quic_alarm_factory.h\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_alarm_lib\",\n        \"@com_googlesource_quiche//:quic_core_alarm_factory_interface_lib\",\n        \"@com_googlesource_quiche//:quic_core_arena_scoped_ptr_lib\",\n        \"@com_googlesource_quiche//:quic_core_one_block_arena_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_connection_helper_lib\",\n    hdrs = [\"envoy_quic_connection_helper.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche/platform:envoy_quic_clock_lib\",\n        \"@com_googlesource_quiche//:quic_core_buffer_allocator_lib\",\n        \"@com_googlesource_quiche//:quic_core_connection_lib\",\n        \"@com_googlesource_quiche//:quic_core_crypto_random_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_proof_source_base_lib\",\n    srcs = [\"envoy_quic_proof_source_base.cc\"],\n    hdrs = [\"envoy_quic_proof_source_base.h\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_utils_lib\",\n        \"@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib\",\n        \"@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib\",\n        \"@com_googlesource_quiche//:quic_core_crypto_proof_source_interface_lib\",\n        \"@com_googlesource_quiche//:quic_core_data_lib\",\n        \"@com_googlesource_quiche//:quic_core_versions_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_proof_source_lib\",\n    srcs = [\"envoy_quic_proof_source.cc\"],\n    hdrs = [\"envoy_quic_proof_source.h\"],\n    external_deps = [\"ssl\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_proof_source_base_lib\",\n        \":envoy_quic_utils_lib\",\n        \":quic_io_handle_wrapper_lib\",\n        \":quic_transport_socket_factory_lib\",\n        \"//include/envoy/ssl:tls_certificate_config_interface\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"//source/server:connection_handler_lib\",\n        \"@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_proof_verifier_base_lib\",\n    srcs = [\"envoy_quic_proof_verifier_base.cc\"],\n    hdrs = [\"envoy_quic_proof_verifier_base.h\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_utils_lib\",\n        \"@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib\",\n        \"@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib\",\n        \"@com_googlesource_quiche//:quic_core_versions_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_proof_verifier_lib\",\n    srcs = [\"envoy_quic_proof_verifier.cc\"],\n    hdrs = [\"envoy_quic_proof_verifier.h\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_proof_verifier_base_lib\",\n        \":envoy_quic_utils_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_server_push_utils_for_envoy_lib\",\n    srcs = [\"spdy_server_push_utils_for_envoy.cc\"],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"@com_googlesource_quiche//:quic_core_http_spdy_server_push_utils_header\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_stream_lib\",\n    hdrs = [\"envoy_quic_stream.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_simulated_watermark_buffer_lib\",\n        \":quic_filter_manager_connection_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//source/common/http:codec_helper_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"codec_lib\",\n    srcs = [\"codec_impl.cc\"],\n    hdrs = [\"codec_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_client_session_lib\",\n        \":envoy_quic_server_session_lib\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/http/http3:quic_codec_factory_lib\",\n        \"//source/common/http/http3:well_known_names\",\n        \"@com_googlesource_quiche//:quic_core_http_spdy_session_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_filter_manager_connection_lib\",\n    srcs = [\"quic_filter_manager_connection_impl.cc\"],\n    hdrs = [\"quic_filter_manager_connection_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_connection_lib\",\n        \":envoy_quic_simulated_watermark_buffer_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/network:connection_base_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_server_session_lib\",\n    srcs = [\n        \"envoy_quic_server_session.cc\",\n        \"envoy_quic_server_stream.cc\",\n    ],\n    hdrs = [\n        \"envoy_quic_server_session.h\",\n        \"envoy_quic_server_stream.h\",\n    ],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_proof_source_lib\",\n        \":envoy_quic_stream_lib\",\n        \":envoy_quic_utils_lib\",\n        \":quic_filter_manager_connection_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_storage_impl_lib\",\n        \"@com_googlesource_quiche//:quic_core_http_spdy_session_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_client_session_lib\",\n    srcs = [\n        \"envoy_quic_client_session.cc\",\n        \"envoy_quic_client_stream.cc\",\n    ],\n    hdrs = [\n        \"envoy_quic_client_session.h\",\n        \"envoy_quic_client_stream.h\",\n    ],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_client_connection_lib\",\n        \":envoy_quic_stream_lib\",\n        \":envoy_quic_utils_lib\",\n        \":quic_filter_manager_connection_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_storage_impl_lib\",\n        \"@com_googlesource_quiche//:quic_core_http_client_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_io_handle_wrapper_lib\",\n    hdrs = [\"quic_io_handle_wrapper.h\"],\n    deps = [\n        \"//include/envoy/network:io_handle_interface\",\n        \"//source/common/network:io_socket_error_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_connection_lib\",\n    srcs = [\"envoy_quic_connection.cc\"],\n    hdrs = [\"envoy_quic_connection.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_io_handle_wrapper_lib\",\n        \"//include/envoy/network:connection_interface\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"@com_googlesource_quiche//:quic_core_connection_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_server_connection_lib\",\n    srcs = [\"envoy_quic_server_connection.cc\"],\n    hdrs = [\"envoy_quic_server_connection.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_connection_lib\",\n        \"//source/server:connection_handler_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_client_connection_lib\",\n    srcs = [\"envoy_quic_client_connection.cc\"],\n    hdrs = [\"envoy_quic_client_connection.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_connection_lib\",\n        \":envoy_quic_packet_writer_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:udp_packet_writer_handler_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_dispatcher_lib\",\n    srcs = [\"envoy_quic_dispatcher.cc\"],\n    hdrs = [\"envoy_quic_dispatcher.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_proof_source_lib\",\n        \":envoy_quic_server_connection_lib\",\n        \":envoy_quic_server_session_lib\",\n        \"//include/envoy/network:listener_interface\",\n        \"//source/server:connection_handler_lib\",\n        \"@com_googlesource_quiche//:quic_core_server_lib\",\n        \"@com_googlesource_quiche//:quic_core_utils_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_simulated_watermark_buffer_lib\",\n    hdrs = [\"envoy_quic_simulated_watermark_buffer.h\"],\n    deps = [\"//source/common/common:assert_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"active_quic_listener_lib\",\n    srcs = [\"active_quic_listener.cc\"],\n    hdrs = [\"active_quic_listener.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_alarm_factory_lib\",\n        \":envoy_quic_connection_helper_lib\",\n        \":envoy_quic_dispatcher_lib\",\n        \":envoy_quic_packet_writer_lib\",\n        \":envoy_quic_proof_source_lib\",\n        \":envoy_quic_utils_lib\",\n        \"//include/envoy/network:listener_interface\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/server:connection_handler_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"active_quic_listener_config_lib\",\n    srcs = [\"active_quic_listener_config.cc\"],\n    hdrs = [\"active_quic_listener_config.h\"],\n    tags = [\"nofips\"],\n    # TODO(#9953) this should be cleaned up\n    visibility = [\n        \"//source/extensions:__subpackages__\",\n        \"//test/extensions:__subpackages__\",\n        \"//test/server:__subpackages__\",\n    ],\n    deps = [\n        \":active_quic_listener_lib\",\n        \"//include/envoy/registry\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_utils_lib\",\n    srcs = [\"envoy_quic_utils.cc\"],\n    hdrs = [\"envoy_quic_utils.h\"],\n    external_deps = [\n        \"quiche_quic_platform\",\n        \"ssl\",\n    ],\n    tags = [\"nofips\"],\n    deps = [\n        \"//include/envoy/http:codec_interface\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"@com_googlesource_quiche//:quic_core_http_header_list_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_transport_socket_factory_lib\",\n    srcs = [\"quic_transport_socket_factory.cc\"],\n    hdrs = [\"quic_transport_socket_factory.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl:context_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto\",\n    ],\n)\n\n# Create a single target that contains all the libraries that register factories.\n# All of these are needed for this extension to function.\nenvoy_cc_extension(\n    name = \"quic_factory_lib\",\n    security_posture = \"unknown\",\n    tags = [\"nofips\"],\n\n    # QUICHE can't build against FIPS BoringSSL until the FIPS build\n    # is on a new enough version to have QUIC support. Remove it from\n    # the build until then. Re-enable as part of #7433.\n    deps = select({\n        \"//bazel:boringssl_fips\": [],\n        \"//bazel:boringssl_disabled\": [],\n        \"//conditions:default\": [\n            \":active_quic_listener_config_lib\",\n            \":codec_lib\",\n            \":quic_transport_socket_factory_lib\",\n            \":udp_gso_batch_writer_config_lib\",\n        ],\n    }),\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_packet_writer_lib\",\n    srcs = [\"envoy_quic_packet_writer.cc\"],\n    hdrs = [\"envoy_quic_packet_writer.h\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":envoy_quic_utils_lib\",\n        \"@com_googlesource_quiche//:quic_core_packet_writer_interface_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udp_gso_batch_writer_lib\",\n    srcs = select({\n        \"//bazel:linux\": [\"udp_gso_batch_writer.cc\"],\n        \"//conditions:default\": [],\n    }),\n    hdrs = [\"udp_gso_batch_writer.h\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    visibility = [\n        \"//test/common/network:__subpackages__\",\n        \"//test/extensions:__subpackages__\",\n    ],\n    deps = [\n        \":envoy_quic_utils_lib\",\n        \"//include/envoy/network:udp_packet_writer_handler_interface\",\n        \"//source/common/network:io_socket_error_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"@com_googlesource_quiche//:quic_core_batch_writer_gso_batch_writer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"udp_gso_batch_writer_config_lib\",\n    srcs = [\"udp_gso_batch_writer_config.cc\"],\n    hdrs = [\"udp_gso_batch_writer_config.h\"],\n    tags = [\"nofips\"],\n    visibility = [\n        \"//test/server:__subpackages__\",\n    ],\n    deps = [\n        \":udp_gso_batch_writer_lib\",\n        \"//include/envoy/network:udp_packet_writer_config_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/active_quic_listener.cc",
    "content": "#include \"extensions/quic_listeners/quiche/active_quic_listener.h\"\n\n#include \"envoy/network/exception.h\"\n\n#if defined(__linux__)\n#include <linux/filter.h>\n#endif\n\n#include <vector>\n\n#include \"common/runtime/runtime_features.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection_helper.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_dispatcher.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_source.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_packet_writer.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nActiveQuicListener::ActiveQuicListener(\n    uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher,\n    Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config,\n    const quic::QuicConfig& quic_config, Network::Socket::OptionsSharedPtr options,\n    bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled)\n    : ActiveQuicListener(worker_index, concurrency, dispatcher, parent,\n                         listener_config.listenSocketFactory().getListenSocket(), listener_config,\n                         quic_config, std::move(options), kernel_worker_routing, enabled) {}\n\nActiveQuicListener::ActiveQuicListener(\n    uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher,\n    Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket,\n    Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config,\n    Network::Socket::OptionsSharedPtr options, bool kernel_worker_routing,\n    const envoy::config::core::v3::RuntimeFeatureFlag& enabled)\n    : Server::ActiveUdpListenerBase(worker_index, concurrency, parent, *listen_socket,\n                                    dispatcher.createUdpListener(listen_socket, *this),\n                                    &listener_config),\n      dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()),\n      kernel_worker_routing_(kernel_worker_routing),\n      enabled_(enabled, Runtime::LoaderSingleton::get()) {\n  if (options != nullptr) {\n    const bool ok = Network::Socket::applyOptions(\n        options, listen_socket_, envoy::config::core::v3::SocketOption::STATE_BOUND);\n    if (!ok) {\n      // TODO(fcoras): consider removing the fd from the log message\n      ENVOY_LOG(warn, \"Failed to apply socket options to socket {} on listener {} after binding\",\n                listen_socket_.ioHandle().fdDoNotUse(), listener_config.name());\n      throw Network::CreateListenerException(\"Failed to apply socket options.\");\n    }\n    listen_socket_.addOptions(options);\n  }\n\n  quic::QuicRandom* const random = quic::QuicRandom::GetInstance();\n  random->RandBytes(random_seed_, sizeof(random_seed_));\n  crypto_config_ = std::make_unique<quic::QuicCryptoServerConfig>(\n      quiche::QuicheStringPiece(reinterpret_cast<char*>(random_seed_), sizeof(random_seed_)),\n      quic::QuicRandom::GetInstance(),\n      std::make_unique<EnvoyQuicProofSource>(listen_socket_, listener_config.filterChainManager(),\n                                             stats_),\n      quic::KeyExchangeSource::Default());\n  auto connection_helper = std::make_unique<EnvoyQuicConnectionHelper>(dispatcher_);\n  crypto_config_->AddDefaultConfig(random, connection_helper->GetClock(),\n                                   quic::QuicCryptoServerConfig::ConfigOptions());\n  auto alarm_factory =\n      std::make_unique<EnvoyQuicAlarmFactory>(dispatcher_, *connection_helper->GetClock());\n  quic_dispatcher_ = std::make_unique<EnvoyQuicDispatcher>(\n      crypto_config_.get(), quic_config, &version_manager_, std::move(connection_helper),\n      std::move(alarm_factory), quic::kQuicDefaultConnectionIdLength, parent, *config_, stats_,\n      per_worker_stats_, dispatcher, listen_socket_);\n\n  // Create udp_packet_writer\n  Network::UdpPacketWriterPtr udp_packet_writer =\n      listener_config.udpPacketWriterFactory()->get().createUdpPacketWriter(\n          listen_socket_.ioHandle(), listener_config.listenerScope());\n  udp_packet_writer_ = udp_packet_writer.get();\n\n  // Some packet writers (like `UdpGsoBatchWriter`) already directly implement\n  // `quic::QuicPacketWriter` and can be used directly here. Other types need\n  // `EnvoyQuicPacketWriter` as an adapter.\n  auto* quic_packet_writer = dynamic_cast<quic::QuicPacketWriter*>(udp_packet_writer.get());\n  if (quic_packet_writer != nullptr) {\n    quic_dispatcher_->InitializeWithWriter(quic_packet_writer);\n    udp_packet_writer.release();\n  } else {\n    quic_dispatcher_->InitializeWithWriter(new EnvoyQuicPacketWriter(std::move(udp_packet_writer)));\n  }\n}\n\nActiveQuicListener::~ActiveQuicListener() { onListenerShutdown(); }\n\nvoid ActiveQuicListener::onListenerShutdown() {\n  ENVOY_LOG(info, \"Quic listener {} shutdown.\", config_->name());\n  quic_dispatcher_->Shutdown();\n  udp_listener_.reset();\n}\n\nvoid ActiveQuicListener::onDataWorker(Network::UdpRecvData&& data) {\n  if (!enabled_.enabled()) {\n    return;\n  }\n\n  quic::QuicSocketAddress peer_address(\n      envoyIpAddressToQuicSocketAddress(data.addresses_.peer_->ip()));\n  quic::QuicSocketAddress self_address(\n      envoyIpAddressToQuicSocketAddress(data.addresses_.local_->ip()));\n  quic::QuicTime timestamp =\n      quic::QuicTime::Zero() +\n      quic::QuicTime::Delta::FromMicroseconds(std::chrono::duration_cast<std::chrono::microseconds>(\n                                                  data.receive_time_.time_since_epoch())\n                                                  .count());\n  ASSERT(data.buffer_->getRawSlices().size() == 1);\n  Buffer::RawSliceVector slices = data.buffer_->getRawSlices(/*max_slices=*/1);\n  // TODO(danzh): pass in TTL and UDP header.\n  quic::QuicReceivedPacket packet(reinterpret_cast<char*>(slices[0].mem_), slices[0].len_,\n                                  timestamp, /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false,\n                                  /*packet_headers=*/nullptr, /*headers_length=*/0,\n                                  /*owns_header_buffer*/ false);\n  quic_dispatcher_->ProcessPacket(self_address, peer_address, packet);\n\n  if (quic_dispatcher_->HasChlosBuffered()) {\n    // If there are any buffered CHLOs, activate a read event for the next event loop to process\n    // them.\n    udp_listener_->activateRead();\n  }\n}\n\nvoid ActiveQuicListener::onReadReady() {\n  if (!enabled_.enabled()) {\n    ENVOY_LOG(trace, \"Quic listener {}: runtime disabled\", config_->name());\n    return;\n  }\n\n  if (quic_dispatcher_->HasChlosBuffered()) {\n    event_loops_with_buffered_chlo_for_test_++;\n  }\n\n  quic_dispatcher_->ProcessBufferedChlos(kNumSessionsToCreatePerLoop);\n\n  // If there were more buffered than the limit, schedule again for the next event loop.\n  if (quic_dispatcher_->HasChlosBuffered()) {\n    udp_listener_->activateRead();\n  }\n}\n\nvoid ActiveQuicListener::onWriteReady(const Network::Socket& /*socket*/) {\n  quic_dispatcher_->OnCanWrite();\n}\n\nvoid ActiveQuicListener::pauseListening() { quic_dispatcher_->StopAcceptingNewConnections(); }\n\nvoid ActiveQuicListener::resumeListening() { quic_dispatcher_->StartAcceptingNewConnections(); }\n\nvoid ActiveQuicListener::shutdownListener() {\n  // Same as pauseListening() because all we want is to stop accepting new\n  // connections.\n  quic_dispatcher_->StopAcceptingNewConnections();\n}\n\nuint32_t ActiveQuicListener::destination(const Network::UdpRecvData& data) const {\n  if (kernel_worker_routing_) {\n    // The kernel has already routed the packet correctly. Make it stay on the current worker.\n    return worker_index_;\n  }\n\n  // This implementation is not as performant as it could be. It will result in most packets being\n  // delivered by the kernel to the wrong worker, and then redirected to the correct worker.\n  //\n  // This could possibly be improved by keeping a global table of connection IDs, so that a new\n  // connection will add its connection ID to the table on the current worker, and so packets should\n  // be delivered to the correct worker by the kernel unless the client changes address.\n\n  // This is a re-implementation of the same algorithm written in BPF in\n  // ``ActiveQuicListenerFactory::createActiveUdpListener``\n  const uint64_t packet_length = data.buffer_->length();\n  if (packet_length < 9) {\n    return worker_index_;\n  }\n\n  uint8_t first_octet;\n  data.buffer_->copyOut(0, sizeof(first_octet), &first_octet);\n\n  uint32_t connection_id_snippet;\n  if (first_octet & 0x80) {\n    // IETF QUIC long header.\n    // The connection id starts from 7th byte.\n    // Minimum length of a long header packet is 14.\n    if (packet_length < 14) {\n      return worker_index_;\n    }\n\n    data.buffer_->copyOut(6, sizeof(connection_id_snippet), &connection_id_snippet);\n  } else {\n    // IETF QUIC short header, or gQUIC.\n    // The connection id starts from 2nd byte.\n    data.buffer_->copyOut(1, sizeof(connection_id_snippet), &connection_id_snippet);\n  }\n\n  connection_id_snippet = htonl(connection_id_snippet);\n  return connection_id_snippet % concurrency_;\n}\n\nActiveQuicListenerFactory::ActiveQuicListenerFactory(\n    const envoy::config::listener::v3::QuicProtocolOptions& config, uint32_t concurrency)\n    : concurrency_(concurrency), enabled_(config.enabled()) {\n  uint64_t idle_network_timeout_ms =\n      config.has_idle_timeout() ? DurationUtil::durationToMilliseconds(config.idle_timeout())\n                                : 300000;\n  quic_config_.SetIdleNetworkTimeout(\n      quic::QuicTime::Delta::FromMilliseconds(idle_network_timeout_ms));\n  int32_t max_time_before_crypto_handshake_ms =\n      config.has_crypto_handshake_timeout()\n          ? DurationUtil::durationToMilliseconds(config.crypto_handshake_timeout())\n          : 20000;\n  quic_config_.set_max_time_before_crypto_handshake(\n      quic::QuicTime::Delta::FromMilliseconds(max_time_before_crypto_handshake_ms));\n  int32_t max_streams = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_concurrent_streams, 100);\n  quic_config_.SetMaxBidirectionalStreamsToSend(max_streams);\n  quic_config_.SetMaxUnidirectionalStreamsToSend(max_streams);\n}\n\nNetwork::ConnectionHandler::ActiveUdpListenerPtr ActiveQuicListenerFactory::createActiveUdpListener(\n    uint32_t worker_index, Network::ConnectionHandler& parent, Event::Dispatcher& disptacher,\n    Network::ListenerConfig& config) {\n  bool kernel_worker_routing = false;\n  std::unique_ptr<Network::Socket::Options> options = std::make_unique<Network::Socket::Options>();\n\n#if defined(SO_ATTACH_REUSEPORT_CBPF) && defined(__linux__)\n  // This BPF filter reads the 1st word of QUIC connection id in the UDP payload and mods it by the\n  // number of workers to get the socket index in the SO_REUSEPORT socket groups. QUIC packets\n  // should be at least 9 bytes, with the 1st byte indicating one of the below QUIC packet headers:\n  // 1) IETF QUIC long header: most significant bit is 1. The connection id starts from the 7th\n  // byte.\n  // 2) IETF QUIC short header: most significant bit is 0. The connection id starts from 2nd\n  // byte.\n  // 3) Google QUIC header: most significant bit is 0. The connection id starts from 2nd\n  // byte.\n  // Any packet that doesn't belong to any of the three packet header types are dispatched\n  // based on 5-tuple source/destination addresses.\n  // SPELLCHECKER(off)\n  std::vector<sock_filter> filter = {\n      {0x80, 0, 0, 0000000000}, //                   ld len\n      {0x35, 0, 9, 0x00000009}, //                   jlt #0x9, packet_too_short\n      {0x30, 0, 0, 0000000000}, //                   ldb [0]\n      {0x54, 0, 0, 0x00000080}, //                   and #0x80\n      {0x15, 0, 2, 0000000000}, //                   jne #0, ietf_long_header\n      {0x20, 0, 0, 0x00000001}, //                   ld [1]\n      {0x05, 0, 0, 0x00000005}, //                   ja return\n      {0x80, 0, 0, 0000000000}, // ietf_long_header: ld len\n      {0x35, 0, 2, 0x0000000e}, //                   jlt #0xe, packet_too_short\n      {0x20, 0, 0, 0x00000006}, //                   ld [6]\n      {0x05, 0, 0, 0x00000001}, //                   ja return\n      {0x20, 0, 0,              // packet_too_short: ld rxhash\n       static_cast<uint32_t>(SKF_AD_OFF + SKF_AD_RXHASH)},\n      {0x94, 0, 0, concurrency_}, // return:         mod #socket_count\n      {0x16, 0, 0, 0000000000},   //                 ret a\n  };\n  // SPELLCHECKER(on)\n  sock_fprog prog;\n  // This option only needs to be applied once to any one of the sockets in SO_REUSEPORT socket\n  // group. One of the listener will be created with this socket option.\n  if (Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing\")) {\n    absl::call_once(install_bpf_once_, [&]() {\n      if (concurrency_ > 1) {\n        prog.len = filter.size();\n        prog.filter = filter.data();\n        options->push_back(std::make_shared<Network::SocketOptionImpl>(\n            envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_ATTACH_REUSEPORT_CBPF,\n            absl::string_view(reinterpret_cast<char*>(&prog), sizeof(prog))));\n      }\n    });\n\n    kernel_worker_routing = true;\n  };\n\n#else\n  if (concurrency_ != 1) {\n    ENVOY_LOG(warn, \"Efficient routing of QUIC packets to the correct worker is not supported or \"\n                    \"not implemented by Envoy on this platform. QUIC performance may be degraded.\");\n  }\n#endif\n\n  return std::make_unique<ActiveQuicListener>(worker_index, concurrency_, disptacher, parent,\n                                              config, quic_config_, std::move(options),\n                                              kernel_worker_routing, enabled_);\n} // namespace Quic\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/active_quic_listener.h",
    "content": "#pragma once\n\n#include \"envoy/config/listener/v3/quic_config.pb.h\"\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/network/listener.h\"\n#include \"envoy/runtime/runtime.h\"\n\n#include \"common/network/socket_option_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"server/connection_handler_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_dispatcher.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// QUIC specific UdpListenerCallbacks implementation which delegates incoming\n// packets, write signals and listener errors to QuicDispatcher.\nclass ActiveQuicListener : public Envoy::Server::ActiveUdpListenerBase,\n                           Logger::Loggable<Logger::Id::quic> {\npublic:\n  // TODO(bencebeky): Tune this value.\n  static const size_t kNumSessionsToCreatePerLoop = 16;\n\n  ActiveQuicListener(uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher,\n                     Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config,\n                     const quic::QuicConfig& quic_config, Network::Socket::OptionsSharedPtr options,\n                     bool kernel_worker_routing,\n                     const envoy::config::core::v3::RuntimeFeatureFlag& enabled);\n\n  ActiveQuicListener(uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher,\n                     Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket,\n                     Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config,\n                     Network::Socket::OptionsSharedPtr options, bool kernel_worker_routing,\n                     const envoy::config::core::v3::RuntimeFeatureFlag& enabled);\n\n  ~ActiveQuicListener() override;\n\n  void onListenerShutdown();\n  uint64_t eventLoopsWithBufferedChlosForTest() const {\n    return event_loops_with_buffered_chlo_for_test_;\n  }\n\n  // Network::UdpListenerCallbacks\n  void onReadReady() override;\n  void onWriteReady(const Network::Socket& socket) override;\n  void onReceiveError(Api::IoError::IoErrorCode /*error_code*/) override {\n    // No-op. Quic can't do anything upon listener error.\n  }\n  Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; }\n  void onDataWorker(Network::UdpRecvData&& data) override;\n  uint32_t destination(const Network::UdpRecvData& data) const override;\n\n  // ActiveListenerImplBase\n  void pauseListening() override;\n  void resumeListening() override;\n  void shutdownListener() override;\n\nprivate:\n  friend class ActiveQuicListenerPeer;\n\n  uint8_t random_seed_[16];\n  std::unique_ptr<quic::QuicCryptoServerConfig> crypto_config_;\n  Event::Dispatcher& dispatcher_;\n  quic::QuicVersionManager version_manager_;\n  std::unique_ptr<EnvoyQuicDispatcher> quic_dispatcher_;\n  const bool kernel_worker_routing_;\n  Runtime::FeatureFlag enabled_;\n  Network::UdpPacketWriter* udp_packet_writer_;\n\n  // The number of runs of the event loop in which at least one CHLO was buffered.\n  // TODO(ggreenway): Consider making this a published stat, or some variation of this information.\n  uint64_t event_loops_with_buffered_chlo_for_test_{0};\n};\n\nusing ActiveQuicListenerPtr = std::unique_ptr<ActiveQuicListener>;\n\n// A factory to create ActiveQuicListener based on given config.\nclass ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory,\n                                  Logger::Loggable<Logger::Id::quic> {\npublic:\n  ActiveQuicListenerFactory(const envoy::config::listener::v3::QuicProtocolOptions& config,\n                            uint32_t concurrency);\n\n  // Network::ActiveUdpListenerFactory.\n  Network::ConnectionHandler::ActiveUdpListenerPtr\n  createActiveUdpListener(uint32_t worker_index, Network::ConnectionHandler& parent,\n                          Event::Dispatcher& disptacher, Network::ListenerConfig& config) override;\n  bool isTransportConnectionless() const override { return false; }\n\nprivate:\n  friend class ActiveQuicListenerFactoryPeer;\n\n  quic::QuicConfig quic_config_;\n  const uint32_t concurrency_;\n  absl::once_flag install_bpf_once_;\n  envoy::config::core::v3::RuntimeFeatureFlag enabled_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/active_quic_listener_config.cc",
    "content": "#include \"extensions/quic_listeners/quiche/active_quic_listener_config.h\"\n\n#include \"envoy/config/listener/v3/quic_config.pb.h\"\n\n#include \"extensions/quic_listeners/quiche/active_quic_listener.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nProtobufTypes::MessagePtr ActiveQuicListenerConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::listener::v3::QuicProtocolOptions>();\n}\n\nNetwork::ActiveUdpListenerFactoryPtr\nActiveQuicListenerConfigFactory::createActiveUdpListenerFactory(const Protobuf::Message& message,\n                                                                uint32_t concurrency) {\n  auto& config = dynamic_cast<const envoy::config::listener::v3::QuicProtocolOptions&>(message);\n  return std::make_unique<ActiveQuicListenerFactory>(config, concurrency);\n}\n\nstd::string ActiveQuicListenerConfigFactory::name() const { return QuicListenerName; }\n\nREGISTER_FACTORY(ActiveQuicListenerConfigFactory, Server::ActiveUdpListenerConfigFactory);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/active_quic_listener_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/active_udp_listener_config.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nconst std::string QuicListenerName{\"quiche_quic_listener\"};\n\n// A factory to create ActiveQuicListenerFactory based on given protobuf.\nclass ActiveQuicListenerConfigFactory : public Server::ActiveUdpListenerConfigFactory {\npublic:\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  Network::ActiveUdpListenerFactoryPtr\n  createActiveUdpListenerFactory(const Protobuf::Message&, uint32_t concurrency) override;\n\n  std::string name() const override;\n};\n\nDECLARE_FACTORY(ActiveQuicListenerConfigFactory);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/codec_impl.cc",
    "content": "#include \"extensions/quic_listeners/quiche/codec_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_stream.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_stream.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Converts a QuicStream instance to EnvoyQuicStream instance. The current stream implementation\n// inherits from these two interfaces, with the former one providing Quic interface and the latter\n// providing Envoy interface.\nEnvoyQuicStream* quicStreamToEnvoyStream(quic::QuicStream* stream) {\n  return dynamic_cast<EnvoyQuicStream*>(stream);\n}\nEnvoyQuicClientStream* quicStreamToEnvoyClientStream(quic::QuicStream* stream) {\n  return dynamic_cast<EnvoyQuicClientStream*>(stream);\n}\n\nbool QuicHttpConnectionImplBase::wantsToWrite() { return quic_session_.bytesToSend() > 0; }\n\nvoid QuicHttpConnectionImplBase::runWatermarkCallbacksForEachStream(\n    quic::QuicSmallMap<quic::QuicStreamId, std::unique_ptr<quic::QuicStream>, 10>& stream_map,\n    bool high_watermark) {\n  for (auto& it : stream_map) {\n    if (!it.second->is_static()) {\n      // Only call watermark callbacks on non QUIC static streams which are\n      // crypto stream and Google QUIC headers stream.\n      auto stream = quicStreamToEnvoyStream(it.second.get());\n      if (high_watermark) {\n        ENVOY_LOG(debug, \"runHighWatermarkCallbacks on stream {}\", it.first);\n        stream->runHighWatermarkCallbacks();\n      } else {\n        ENVOY_LOG(debug, \"runLowWatermarkCallbacks on stream {}\", it.first);\n        stream->runLowWatermarkCallbacks();\n      }\n    }\n  }\n}\n\nQuicHttpServerConnectionImpl::QuicHttpServerConnectionImpl(\n    EnvoyQuicServerSession& quic_session, Http::ServerConnectionCallbacks& callbacks)\n    : QuicHttpConnectionImplBase(quic_session), quic_server_session_(quic_session) {\n  quic_session.setHttpConnectionCallbacks(callbacks);\n}\n\nvoid QuicHttpServerConnectionImpl::onUnderlyingConnectionAboveWriteBufferHighWatermark() {\n  runWatermarkCallbacksForEachStream(quic_server_session_.stream_map(), true);\n}\n\nvoid QuicHttpServerConnectionImpl::onUnderlyingConnectionBelowWriteBufferLowWatermark() {\n  runWatermarkCallbacksForEachStream(quic_server_session_.stream_map(), false);\n}\n\nvoid QuicHttpServerConnectionImpl::shutdownNotice() {\n  if (quic::VersionUsesHttp3(quic_server_session_.transport_version())) {\n    quic_server_session_.SendHttp3Shutdown();\n  } else {\n    ENVOY_CONN_LOG(debug, \"Shutdown notice is not propagated to QUIC.\", quic_server_session_);\n  }\n}\n\nvoid QuicHttpServerConnectionImpl::goAway() {\n  if (quic::VersionUsesHttp3(quic_server_session_.transport_version())) {\n    quic_server_session_.SendHttp3GoAway();\n  } else {\n    quic_server_session_.SendGoAway(quic::QUIC_PEER_GOING_AWAY, \"server shutdown imminent\");\n  }\n}\n\nQuicHttpClientConnectionImpl::QuicHttpClientConnectionImpl(EnvoyQuicClientSession& session,\n                                                           Http::ConnectionCallbacks& callbacks)\n    : QuicHttpConnectionImplBase(session), quic_client_session_(session) {\n  session.setHttpConnectionCallbacks(callbacks);\n}\n\nHttp::RequestEncoder&\nQuicHttpClientConnectionImpl::newStream(Http::ResponseDecoder& response_decoder) {\n  EnvoyQuicClientStream* stream =\n      quicStreamToEnvoyClientStream(quic_client_session_.CreateOutgoingBidirectionalStream());\n  // TODO(danzh) handle stream creation failure gracefully. This can happen when\n  // there are already 100 open streams. In such case, caller should hold back\n  // the stream creation till an existing stream is closed.\n  ASSERT(stream != nullptr, \"Fail to create QUIC stream.\");\n  stream->setResponseDecoder(response_decoder);\n  if (quic_client_session_.aboveHighWatermark()) {\n    stream->runHighWatermarkCallbacks();\n  }\n  return *stream;\n}\n\nvoid QuicHttpClientConnectionImpl::onUnderlyingConnectionAboveWriteBufferHighWatermark() {\n  runWatermarkCallbacksForEachStream(quic_client_session_.stream_map(), true);\n}\n\nvoid QuicHttpClientConnectionImpl::onUnderlyingConnectionBelowWriteBufferLowWatermark() {\n  runWatermarkCallbacksForEachStream(quic_client_session_.stream_map(), false);\n}\n\nstd::unique_ptr<Http::ClientConnection>\nQuicHttpClientConnectionFactoryImpl::createQuicClientConnection(\n    Network::Connection& connection, Http::ConnectionCallbacks& callbacks) {\n  return std::make_unique<Quic::QuicHttpClientConnectionImpl>(\n      dynamic_cast<Quic::EnvoyQuicClientSession&>(connection), callbacks);\n}\n\nstd::unique_ptr<Http::ServerConnection>\nQuicHttpServerConnectionFactoryImpl::createQuicServerConnection(\n    Network::Connection& connection, Http::ConnectionCallbacks& callbacks) {\n  return std::make_unique<Quic::QuicHttpServerConnectionImpl>(\n      dynamic_cast<Quic::EnvoyQuicServerSession&>(connection),\n      dynamic_cast<Http::ServerConnectionCallbacks&>(callbacks));\n}\n\nREGISTER_FACTORY(QuicHttpClientConnectionFactoryImpl, Http::QuicHttpClientConnectionFactory);\nREGISTER_FACTORY(QuicHttpServerConnectionFactoryImpl, Http::QuicHttpServerConnectionFactory);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/codec_impl.h",
    "content": "#include \"envoy/http/codec.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/http3/quic_codec_factory.h\"\n#include \"common/http/http3/well_known_names.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_session.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_session.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// QuicHttpConnectionImplBase instance is a thin QUIC codec just providing quic interface to HCM.\n// Owned by HCM and created during onNewConnection() if the network connection\n// is a QUIC connection.\nclass QuicHttpConnectionImplBase : public virtual Http::Connection,\n                                   protected Logger::Loggable<Logger::Id::quic> {\npublic:\n  QuicHttpConnectionImplBase(QuicFilterManagerConnectionImpl& quic_session)\n      : quic_session_(quic_session) {}\n\n  // Http::Connection\n  Http::Status dispatch(Buffer::Instance& /*data*/) override {\n    // Bypassed. QUIC connection already hands all data to streams.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  Http::Protocol protocol() override { return Http::Protocol::Http3; }\n  // Returns true if the session has data to send but queued in connection or\n  // stream send buffer.\n  bool wantsToWrite() override;\n\n  void runWatermarkCallbacksForEachStream(\n      quic::QuicSmallMap<quic::QuicStreamId, std::unique_ptr<quic::QuicStream>, 10>& stream_map,\n      bool high_watermark);\n\nprotected:\n  QuicFilterManagerConnectionImpl& quic_session_;\n};\n\nclass QuicHttpServerConnectionImpl : public QuicHttpConnectionImplBase,\n                                     public Http::ServerConnection {\npublic:\n  QuicHttpServerConnectionImpl(EnvoyQuicServerSession& quic_session,\n                               Http::ServerConnectionCallbacks& callbacks);\n\n  // Http::Connection\n  void goAway() override;\n  void shutdownNotice() override;\n  void onUnderlyingConnectionAboveWriteBufferHighWatermark() override;\n  void onUnderlyingConnectionBelowWriteBufferLowWatermark() override;\n\nprivate:\n  EnvoyQuicServerSession& quic_server_session_;\n};\n\nclass QuicHttpClientConnectionImpl : public QuicHttpConnectionImplBase,\n                                     public Http::ClientConnection {\npublic:\n  QuicHttpClientConnectionImpl(EnvoyQuicClientSession& session,\n                               Http::ConnectionCallbacks& callbacks);\n\n  // Http::ClientConnection\n  Http::RequestEncoder& newStream(Http::ResponseDecoder& response_decoder) override;\n\n  // Http::Connection\n  void goAway() override { NOT_REACHED_GCOVR_EXCL_LINE; }\n  void shutdownNotice() override { NOT_REACHED_GCOVR_EXCL_LINE; }\n  void onUnderlyingConnectionAboveWriteBufferHighWatermark() override;\n  void onUnderlyingConnectionBelowWriteBufferLowWatermark() override;\n\nprivate:\n  EnvoyQuicClientSession& quic_client_session_;\n};\n\n// A factory to create QuicHttpClientConnection.\nclass QuicHttpClientConnectionFactoryImpl : public Http::QuicHttpClientConnectionFactory {\npublic:\n  std::unique_ptr<Http::ClientConnection>\n  createQuicClientConnection(Network::Connection& connection,\n                             Http::ConnectionCallbacks& callbacks) override;\n\n  std::string name() const override { return Http::QuicCodecNames::get().Quiche; }\n};\n\n// A factory to create QuicHttpServerConnection.\nclass QuicHttpServerConnectionFactoryImpl : public Http::QuicHttpServerConnectionFactory {\npublic:\n  std::unique_ptr<Http::ServerConnection>\n  createQuicServerConnection(Network::Connection& connection,\n                             Http::ConnectionCallbacks& callbacks) override;\n\n  std::string name() const override { return Http::QuicCodecNames::get().Quiche; }\n};\n\nDECLARE_FACTORY(QuicHttpClientConnectionFactoryImpl);\nDECLARE_FACTORY(QuicHttpServerConnectionFactoryImpl);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_alarm.h\"\n\n#include <algorithm>\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicAlarm::EnvoyQuicAlarm(Event::Dispatcher& dispatcher, const quic::QuicClock& clock,\n                               quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate> delegate)\n    : QuicAlarm(std::move(delegate)), dispatcher_(dispatcher),\n      timer_(dispatcher_.createTimer([this]() { Fire(); })), clock_(clock) {}\n\nvoid EnvoyQuicAlarm::CancelImpl() { timer_->disableTimer(); }\n\nvoid EnvoyQuicAlarm::SetImpl() {\n  quic::QuicTime::Delta duration = getDurationBeforeDeadline();\n  // Round up the duration so that any duration < 1us will not be triggered within current event\n  // loop. QUICHE alarm is not expected to be scheduled in current event loop. This bit is a bummer\n  // in QUICHE, and we are working on the fix. Once QUICHE is fixed of expecting this behavior, we\n  // no longer need to round up the duration.\n  // TODO(antoniovicente) Remove the std::max(1, ...) when decommissioning the\n  // envoy.reloadable_features.activate_timers_next_event_loop runtime flag.\n  timer_->enableHRTimer(\n      std::chrono::microseconds(std::max(static_cast<int64_t>(1), duration.ToMicroseconds())));\n}\n\nvoid EnvoyQuicAlarm::UpdateImpl() {\n  // Since Timer::enableTimer() overrides its deadline from previous calls,\n  // there is no need to disable the timer before enabling it again.\n  SetImpl();\n}\n\nquic::QuicTime::Delta EnvoyQuicAlarm::getDurationBeforeDeadline() {\n  quic::QuicTime::Delta duration(quic::QuicTime::Delta::Zero());\n  quic::QuicTime now = clock_.ApproximateNow();\n  quic::QuicTime tmp = deadline();\n  if (tmp > now) {\n    duration = tmp - now;\n  }\n  return duration;\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_alarm.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"quiche/quic/core/quic_alarm.h\"\n#include \"quiche/quic/core/quic_clock.h\"\n#include \"quiche/quic/core/quic_time.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Implements QUIC interface\n// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/core/quic_alarm.h This class\n// wraps an Event::Timer object and provide interface for QUIC to interact with the timer.\nclass EnvoyQuicAlarm : public quic::QuicAlarm {\npublic:\n  EnvoyQuicAlarm(Event::Dispatcher& dispatcher, const quic::QuicClock& clock,\n                 quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate> delegate);\n\n  // TimerImpl destruction deletes in-flight alarm firing event.\n  ~EnvoyQuicAlarm() override = default;\n\n  // quic::QuicAlarm\n  void CancelImpl() override;\n  void SetImpl() override;\n  // Overridden to avoid cancel before set.\n  void UpdateImpl() override;\n\nprivate:\n  quic::QuicTime::Delta getDurationBeforeDeadline();\n\n  Event::Dispatcher& dispatcher_;\n  Event::TimerPtr timer_;\n  const quic::QuicClock& clock_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_alarm_factory.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nquic::QuicAlarm* EnvoyQuicAlarmFactory::CreateAlarm(quic::QuicAlarm::Delegate* delegate) {\n  return new EnvoyQuicAlarm(dispatcher_, clock_,\n                            quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate>(delegate));\n}\n\nquic::QuicArenaScopedPtr<quic::QuicAlarm>\nEnvoyQuicAlarmFactory::CreateAlarm(quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate> delegate,\n                                   quic::QuicConnectionArena* arena) {\n  if (arena != nullptr) {\n    return arena->New<EnvoyQuicAlarm>(dispatcher_, clock_, std::move(delegate));\n  }\n  return quic::QuicArenaScopedPtr<quic::QuicAlarm>(\n      new EnvoyQuicAlarm(dispatcher_, clock_, std::move(delegate)));\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h",
    "content": "#pragma once\n\n#include \"common/common/non_copyable.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \"quiche/quic/core/quic_alarm_factory.h\"\n#include \"quiche/quic/core/quic_arena_scoped_ptr.h\"\n#include \"quiche/quic/core/quic_one_block_arena.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicAlarmFactory : public quic::QuicAlarmFactory, NonCopyable {\npublic:\n  EnvoyQuicAlarmFactory(Event::Dispatcher& dispatcher, const quic::QuicClock& clock)\n      : dispatcher_(dispatcher), clock_(clock) {}\n\n  ~EnvoyQuicAlarmFactory() override = default;\n\n  // QuicAlarmFactory\n  quic::QuicAlarm* CreateAlarm(quic::QuicAlarm::Delegate* delegate) override;\n  quic::QuicArenaScopedPtr<quic::QuicAlarm>\n  CreateAlarm(quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate> delegate,\n              quic::QuicConnectionArena* arena) override;\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n  const quic::QuicClock& clock_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_client_connection.h\"\n\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/udp_packet_writer_handler_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_packet_writer.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicClientConnection::EnvoyQuicClientConnection(\n    const quic::QuicConnectionId& server_connection_id,\n    Network::Address::InstanceConstSharedPtr& initial_peer_address,\n    quic::QuicConnectionHelperInterface& helper, quic::QuicAlarmFactory& alarm_factory,\n    const quic::ParsedQuicVersionVector& supported_versions,\n    Network::Address::InstanceConstSharedPtr local_addr, Event::Dispatcher& dispatcher,\n    const Network::ConnectionSocket::OptionsSharedPtr& options)\n    : EnvoyQuicClientConnection(server_connection_id, helper, alarm_factory, supported_versions,\n                                dispatcher,\n                                createConnectionSocket(initial_peer_address, local_addr, options)) {\n}\n\nEnvoyQuicClientConnection::EnvoyQuicClientConnection(\n    const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper,\n    quic::QuicAlarmFactory& alarm_factory, const quic::ParsedQuicVersionVector& supported_versions,\n    Event::Dispatcher& dispatcher, Network::ConnectionSocketPtr&& connection_socket)\n    : EnvoyQuicClientConnection(\n          server_connection_id, helper, alarm_factory,\n          new EnvoyQuicPacketWriter(\n              std::make_unique<Network::UdpDefaultWriter>(connection_socket->ioHandle())),\n          true, supported_versions, dispatcher, std::move(connection_socket)) {}\n\nEnvoyQuicClientConnection::EnvoyQuicClientConnection(\n    const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper,\n    quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer, bool owns_writer,\n    const quic::ParsedQuicVersionVector& supported_versions, Event::Dispatcher& dispatcher,\n    Network::ConnectionSocketPtr&& connection_socket)\n    : EnvoyQuicConnection(\n          server_connection_id,\n          envoyIpAddressToQuicSocketAddress(connection_socket->remoteAddress()->ip()), helper,\n          alarm_factory, writer, owns_writer, quic::Perspective::IS_CLIENT, supported_versions,\n          std::move(connection_socket)),\n      dispatcher_(dispatcher) {}\n\nvoid EnvoyQuicClientConnection::processPacket(\n    Network::Address::InstanceConstSharedPtr local_address,\n    Network::Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer,\n    MonotonicTime receive_time) {\n  if (!connected()) {\n    return;\n  }\n  quic::QuicTime timestamp =\n      quic::QuicTime::Zero() +\n      quic::QuicTime::Delta::FromMicroseconds(\n          std::chrono::duration_cast<std::chrono::microseconds>(receive_time.time_since_epoch())\n              .count());\n  ASSERT(buffer->getRawSlices().size() == 1);\n  Buffer::RawSliceVector slices = buffer->getRawSlices(/*max_slices=*/1);\n  quic::QuicReceivedPacket packet(reinterpret_cast<char*>(slices[0].mem_), slices[0].len_,\n                                  timestamp, /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false,\n                                  /*packet_headers=*/nullptr, /*headers_length=*/0,\n                                  /*owns_header_buffer*/ false);\n  ProcessUdpPacket(envoyIpAddressToQuicSocketAddress(local_address->ip()),\n                   envoyIpAddressToQuicSocketAddress(peer_address->ip()), packet);\n}\n\nuint64_t EnvoyQuicClientConnection::maxPacketSize() const {\n  // TODO(danzh) make this variable configurable to support jumbo frames.\n  return Network::MAX_UDP_PACKET_SIZE;\n}\n\nvoid EnvoyQuicClientConnection::setUpConnectionSocket() {\n  if (connectionSocket()->ioHandle().isOpen()) {\n    file_event_ = connectionSocket()->ioHandle().createFileEvent(\n        dispatcher_, [this](uint32_t events) -> void { onFileEvent(events); },\n        Event::PlatformDefaultTriggerType,\n        Event::FileReadyType::Read | Event::FileReadyType::Write);\n\n    if (!Network::Socket::applyOptions(connectionSocket()->options(), *connectionSocket(),\n                                       envoy::config::core::v3::SocketOption::STATE_LISTENING)) {\n      ENVOY_CONN_LOG(error, \"Fail to apply listening options\", *this);\n      connectionSocket()->close();\n    }\n  }\n  if (!connectionSocket()->ioHandle().isOpen()) {\n    CloseConnection(quic::QUIC_CONNECTION_CANCELLED, \"Fail to set up connection socket.\",\n                    quic::ConnectionCloseBehavior::SILENT_CLOSE);\n  }\n}\n\nvoid EnvoyQuicClientConnection::switchConnectionSocket(\n    Network::ConnectionSocketPtr&& connection_socket) {\n  auto writer = std::make_unique<EnvoyQuicPacketWriter>(\n      std::make_unique<Network::UdpDefaultWriter>(connection_socket->ioHandle()));\n  // Destroy the old file_event before closing the old socket. Otherwise the socket might be picked\n  // up by another socket() call while file_event is still operating on it.\n  file_event_.reset();\n  // The old socket is closed in this call.\n  setConnectionSocket(std::move(connection_socket));\n  setUpConnectionSocket();\n  SetQuicPacketWriter(writer.release(), true);\n}\n\nvoid EnvoyQuicClientConnection::onFileEvent(uint32_t events) {\n  ENVOY_CONN_LOG(trace, \"socket event: {}\", *this, events);\n  ASSERT(events & (Event::FileReadyType::Read | Event::FileReadyType::Write));\n\n  if (events & Event::FileReadyType::Write) {\n    OnCanWrite();\n  }\n\n  // It's possible for a write event callback to close the connection, in such case ignore read\n  // event processing.\n  if (connected() && (events & Event::FileReadyType::Read)) {\n    Api::IoErrorPtr err = Network::Utility::readPacketsFromSocket(\n        connectionSocket()->ioHandle(), *connectionSocket()->localAddress(), *this,\n        dispatcher_.timeSource(), packets_dropped_);\n    // TODO(danzh): Handle no error when we limit the number of packets read.\n    if (err->getErrorCode() != Api::IoError::IoErrorCode::Again) {\n      ENVOY_CONN_LOG(error, \"recvmsg result {}: {}\", *this, static_cast<int>(err->getErrorCode()),\n                     err->getErrorDetails());\n    }\n  }\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_client_connection.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/network/utility.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A client QuicConnection instance managing its own file events.\nclass EnvoyQuicClientConnection : public EnvoyQuicConnection, public Network::UdpPacketProcessor {\npublic:\n  // A connection socket will be created with given |local_addr|. If binding\n  // port not provided in |local_addr|, pick up a random port.\n  EnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,\n                            Network::Address::InstanceConstSharedPtr& initial_peer_address,\n                            quic::QuicConnectionHelperInterface& helper,\n                            quic::QuicAlarmFactory& alarm_factory,\n                            const quic::ParsedQuicVersionVector& supported_versions,\n                            Network::Address::InstanceConstSharedPtr local_addr,\n                            Event::Dispatcher& dispatcher,\n                            const Network::ConnectionSocket::OptionsSharedPtr& options);\n\n  EnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,\n                            quic::QuicConnectionHelperInterface& helper,\n                            quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer,\n                            bool owns_writer,\n                            const quic::ParsedQuicVersionVector& supported_versions,\n                            Event::Dispatcher& dispatcher,\n                            Network::ConnectionSocketPtr&& connection_socket);\n\n  // Network::UdpPacketProcessor\n  void processPacket(Network::Address::InstanceConstSharedPtr local_address,\n                     Network::Address::InstanceConstSharedPtr peer_address,\n                     Buffer::InstancePtr buffer, MonotonicTime receive_time) override;\n  uint64_t maxPacketSize() const override;\n\n  // Register file event and apply socket options.\n  void setUpConnectionSocket();\n\n  // Switch underlying socket with the given one. This is used in connection migration.\n  void switchConnectionSocket(Network::ConnectionSocketPtr&& connection_socket);\n\nprivate:\n  EnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,\n                            quic::QuicConnectionHelperInterface& helper,\n                            quic::QuicAlarmFactory& alarm_factory,\n                            const quic::ParsedQuicVersionVector& supported_versions,\n                            Event::Dispatcher& dispatcher,\n                            Network::ConnectionSocketPtr&& connection_socket);\n\n  void onFileEvent(uint32_t events);\n\n  uint32_t packets_dropped_{0};\n  Event::Dispatcher& dispatcher_;\n  Event::FileEventPtr file_event_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_client_session.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicClientSession::EnvoyQuicClientSession(\n    const quic::QuicConfig& config, const quic::ParsedQuicVersionVector& supported_versions,\n    std::unique_ptr<EnvoyQuicClientConnection> connection, const quic::QuicServerId& server_id,\n    quic::QuicCryptoClientConfig* crypto_config,\n    quic::QuicClientPushPromiseIndex* push_promise_index, Event::Dispatcher& dispatcher,\n    uint32_t send_buffer_limit)\n    : QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit),\n      quic::QuicSpdyClientSession(config, supported_versions, connection.release(), server_id,\n                                  crypto_config, push_promise_index) {}\n\nEnvoyQuicClientSession::~EnvoyQuicClientSession() {\n  ASSERT(!connection()->connected());\n  quic_connection_ = nullptr;\n}\n\nabsl::string_view EnvoyQuicClientSession::requestedServerName() const {\n  return {GetCryptoStream()->crypto_negotiated_params().sni};\n}\n\nvoid EnvoyQuicClientSession::connect() {\n  dynamic_cast<EnvoyQuicClientConnection*>(quic_connection_)->setUpConnectionSocket();\n  // Start version negotiation and crypto handshake during which the connection may fail if server\n  // doesn't support the one and only supported version.\n  CryptoConnect();\n  if (quic::VersionUsesHttp3(transport_version())) {\n    SetMaxPushId(0u);\n  }\n}\n\nvoid EnvoyQuicClientSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame,\n                                                quic::ConnectionCloseSource source) {\n  quic::QuicSpdyClientSession::OnConnectionClosed(frame, source);\n  onConnectionCloseEvent(frame, source);\n}\n\nvoid EnvoyQuicClientSession::Initialize() {\n  quic::QuicSpdyClientSession::Initialize();\n  quic_connection_->setEnvoyConnection(*this);\n}\n\nvoid EnvoyQuicClientSession::OnCanWrite() {\n  const uint64_t headers_to_send_old =\n      quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes();\n  quic::QuicSpdyClientSession::OnCanWrite();\n  const uint64_t headers_to_send_new =\n      quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes();\n  adjustBytesToSend(headers_to_send_new - headers_to_send_old);\n  maybeApplyDelayClosePolicy();\n}\n\nvoid EnvoyQuicClientSession::OnGoAway(const quic::QuicGoAwayFrame& frame) {\n  ENVOY_CONN_LOG(debug, \"GOAWAY received with error {}: {}\", *this,\n                 quic::QuicErrorCodeToString(frame.error_code), frame.reason_phrase);\n  quic::QuicSpdyClientSession::OnGoAway(frame);\n  if (http_connection_callbacks_ != nullptr) {\n    http_connection_callbacks_->onGoAway(quicErrorCodeToEnvoyErrorCode(frame.error_code));\n  }\n}\n\nvoid EnvoyQuicClientSession::OnHttp3GoAway(uint64_t stream_id) {\n  ENVOY_CONN_LOG(debug, \"HTTP/3 GOAWAY received\", *this);\n  quic::QuicSpdyClientSession::OnHttp3GoAway(stream_id);\n  if (http_connection_callbacks_ != nullptr) {\n    // HTTP/3 GOAWAY doesn't have an error code field.\n    http_connection_callbacks_->onGoAway(Http::GoAwayErrorCode::NoError);\n  }\n}\n\nvoid EnvoyQuicClientSession::SetDefaultEncryptionLevel(quic::EncryptionLevel level) {\n  quic::QuicSpdyClientSession::SetDefaultEncryptionLevel(level);\n  if (level == quic::ENCRYPTION_FORWARD_SECURE) {\n    // This is only reached once, when handshake is done.\n    raiseConnectionEvent(Network::ConnectionEvent::Connected);\n  }\n}\n\nstd::unique_ptr<quic::QuicSpdyClientStream> EnvoyQuicClientSession::CreateClientStream() {\n  return std::make_unique<EnvoyQuicClientStream>(GetNextOutgoingBidirectionalStreamId(), this,\n                                                 quic::BIDIRECTIONAL);\n}\n\nquic::QuicSpdyStream* EnvoyQuicClientSession::CreateIncomingStream(quic::QuicStreamId /*id*/) {\n  // Disallow server initiated stream.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nquic::QuicSpdyStream*\nEnvoyQuicClientSession::CreateIncomingStream(quic::PendingStream* /*pending*/) {\n  // Disallow server initiated stream.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nbool EnvoyQuicClientSession::hasDataToWrite() { return HasDataToWrite(); }\n\nvoid EnvoyQuicClientSession::OnTlsHandshakeComplete() {\n  raiseConnectionEvent(Network::ConnectionEvent::Connected);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_client_session.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wtype-limits\"\n#endif\n\n#include \"quiche/quic/core/http/quic_spdy_client_session.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_stream.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_connection.h\"\n#include \"extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Act as a Network::ClientConnection to ClientCodec.\n// TODO(danzh) This class doesn't need to inherit Network::FilterManager\n// interface but need all other Network::Connection implementation in\n// QuicFilterManagerConnectionImpl. Refactor QuicFilterManagerConnectionImpl to\n// move FilterManager interface to EnvoyQuicServerSession.\nclass EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl,\n                               public quic::QuicSpdyClientSession,\n                               public Network::ClientConnection {\npublic:\n  EnvoyQuicClientSession(const quic::QuicConfig& config,\n                         const quic::ParsedQuicVersionVector& supported_versions,\n                         std::unique_ptr<EnvoyQuicClientConnection> connection,\n                         const quic::QuicServerId& server_id,\n                         quic::QuicCryptoClientConfig* crypto_config,\n                         quic::QuicClientPushPromiseIndex* push_promise_index,\n                         Event::Dispatcher& dispatcher, uint32_t send_buffer_limit);\n\n  ~EnvoyQuicClientSession() override;\n\n  // Called by QuicHttpClientConnectionImpl before creating data streams.\n  void setHttpConnectionCallbacks(Http::ConnectionCallbacks& callbacks) {\n    http_connection_callbacks_ = &callbacks;\n  }\n\n  // Network::Connection\n  absl::string_view requestedServerName() const override;\n\n  // Network::ClientConnection\n  // Set up socket and start handshake.\n  void connect() override;\n\n  // quic::QuicSession\n  void OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame,\n                          quic::ConnectionCloseSource source) override;\n  void Initialize() override;\n  void OnCanWrite() override;\n  void OnGoAway(const quic::QuicGoAwayFrame& frame) override;\n  void OnHttp3GoAway(uint64_t stream_id) override;\n  void OnTlsHandshakeComplete() override;\n  // quic::QuicSpdyClientSessionBase\n  void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override;\n\n  using quic::QuicSpdyClientSession::stream_map;\n\nprotected:\n  // quic::QuicSpdyClientSession\n  std::unique_ptr<quic::QuicSpdyClientStream> CreateClientStream() override;\n  // quic::QuicSpdySession\n  quic::QuicSpdyStream* CreateIncomingStream(quic::QuicStreamId id) override;\n  quic::QuicSpdyStream* CreateIncomingStream(quic::PendingStream* pending) override;\n\n  // QuicFilterManagerConnectionImpl\n  bool hasDataToWrite() override;\n\nprivate:\n  // These callbacks are owned by network filters and quic session should outlive\n  // them.\n  Http::ConnectionCallbacks* http_connection_callbacks_{nullptr};\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_client_stream.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/quic_session.h\"\n#include \"quiche/quic/core/http/quic_header_list.h\"\n#include \"quiche/spdy/core/spdy_header_block.h\"\n#include \"extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_session.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicClientStream::EnvoyQuicClientStream(quic::QuicStreamId id,\n                                             quic::QuicSpdyClientSession* client_session,\n                                             quic::StreamType type)\n    : quic::QuicSpdyClientStream(id, client_session, type),\n      EnvoyQuicStream(\n          // This should be larger than 8k to fully utilize congestion control\n          // window. And no larger than the max stream flow control window for\n          // the stream to buffer all the data.\n          // Ideally this limit should also correlate to peer's receive window\n          // but not fully depends on that.\n          16 * 1024, [this]() { runLowWatermarkCallbacks(); },\n          [this]() { runHighWatermarkCallbacks(); }) {}\n\nEnvoyQuicClientStream::EnvoyQuicClientStream(quic::PendingStream* pending,\n                                             quic::QuicSpdyClientSession* client_session,\n                                             quic::StreamType type)\n    : quic::QuicSpdyClientStream(pending, client_session, type),\n      EnvoyQuicStream(\n          16 * 1024, [this]() { runLowWatermarkCallbacks(); },\n          [this]() { runHighWatermarkCallbacks(); }) {}\n\nvoid EnvoyQuicClientStream::encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) {\n  ENVOY_STREAM_LOG(debug, \"encodeHeaders: (end_stream={}) {}.\", *this, end_stream, headers);\n  quic::QuicStream* writing_stream =\n      quic::VersionUsesHttp3(transport_version())\n          ? static_cast<quic::QuicStream*>(this)\n          : (dynamic_cast<quic::QuicSpdySession*>(session())->headers_stream());\n  const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes();\n  WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr);\n  local_end_stream_ = end_stream;\n  const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes();\n  ASSERT(bytes_to_send_old <= bytes_to_send_new);\n  // IETF QUIC sends HEADER frame on current stream. After writing headers, the\n  // buffer may increase.\n  maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection());\n}\n\nvoid EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_STREAM_LOG(debug, \"encodeData (end_stream={}) of {} bytes.\", *this, end_stream,\n                   data.length());\n  local_end_stream_ = end_stream;\n  // This is counting not serialized bytes in the send buffer.\n  const uint64_t bytes_to_send_old = BufferedDataBytes();\n  // QUIC stream must take all.\n  WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream);\n  if (data.length() > 0) {\n    // Send buffer didn't take all the data, threshold needs to be adjusted.\n    Reset(quic::QUIC_BAD_APPLICATION_PAYLOAD);\n    return;\n  }\n\n  const uint64_t bytes_to_send_new = BufferedDataBytes();\n  ASSERT(bytes_to_send_old <= bytes_to_send_new);\n  maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection());\n}\n\nvoid EnvoyQuicClientStream::encodeTrailers(const Http::RequestTrailerMap& trailers) {\n  ASSERT(!local_end_stream_);\n  local_end_stream_ = true;\n  ENVOY_STREAM_LOG(debug, \"encodeTrailers: {}.\", *this, trailers);\n  quic::QuicStream* writing_stream =\n      quic::VersionUsesHttp3(transport_version())\n          ? static_cast<quic::QuicStream*>(this)\n          : (dynamic_cast<quic::QuicSpdySession*>(session())->headers_stream());\n\n  const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes();\n  WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr);\n  const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes();\n  ASSERT(bytes_to_send_old <= bytes_to_send_new);\n  // IETF QUIC sends HEADER frame on current stream. After writing trailers, the\n  // buffer may increase.\n  maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection());\n}\n\nvoid EnvoyQuicClientStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) {\n  // Metadata Frame is not supported in QUIC.\n  // TODO(danzh): add stats for metadata not supported error.\n}\n\nvoid EnvoyQuicClientStream::resetStream(Http::StreamResetReason reason) {\n  // Higher layers expect calling resetStream() to immediately raise reset callbacks.\n  runResetCallbacks(reason);\n  Reset(envoyResetReasonToQuicRstError(reason));\n}\n\nvoid EnvoyQuicClientStream::switchStreamBlockState(bool should_block) {\n  ASSERT(FinishedReadingHeaders(),\n         \"Upper stream buffer limit is reached before response body is delivered.\");\n  if (should_block) {\n    sequencer()->SetBlockedUntilFlush();\n  } else {\n    ASSERT(read_disable_counter_ == 0, \"readDisable called in between.\");\n    sequencer()->SetUnblocked();\n  }\n}\n\nvoid EnvoyQuicClientStream::OnInitialHeadersComplete(bool fin, size_t frame_len,\n                                                     const quic::QuicHeaderList& header_list) {\n  quic::QuicSpdyStream::OnInitialHeadersComplete(fin, frame_len, header_list);\n  if (rst_sent()) {\n    return;\n  }\n  ASSERT(headers_decompressed());\n  response_decoder_->decodeHeaders(\n      quicHeadersToEnvoyHeaders<Http::ResponseHeaderMapImpl>(header_list), /*end_stream=*/fin);\n  if (fin) {\n    end_stream_decoded_ = true;\n  }\n  ConsumeHeaderList();\n}\n\nvoid EnvoyQuicClientStream::OnBodyAvailable() {\n  ASSERT(FinishedReadingHeaders());\n  ASSERT(read_disable_counter_ == 0);\n  ASSERT(!in_decode_data_callstack_);\n  in_decode_data_callstack_ = true;\n\n  Buffer::InstancePtr buffer = std::make_unique<Buffer::OwnedImpl>();\n  // TODO(danzh): check Envoy per stream buffer limit.\n  // Currently read out all the data.\n  while (HasBytesToRead()) {\n    iovec iov;\n    int num_regions = GetReadableRegions(&iov, 1);\n    ASSERT(num_regions > 0);\n    size_t bytes_read = iov.iov_len;\n    Buffer::RawSlice slice;\n    buffer->reserve(bytes_read, &slice, 1);\n    ASSERT(slice.len_ >= bytes_read);\n    slice.len_ = bytes_read;\n    memcpy(slice.mem_, iov.iov_base, iov.iov_len);\n    buffer->commit(&slice, 1);\n    MarkConsumed(bytes_read);\n  }\n\n  // True if no trailer and FIN read.\n  bool finished_reading = IsDoneReading();\n  bool empty_payload_with_fin = buffer->length() == 0 && fin_received();\n  // If this call is triggered by an empty frame with FIN which is not from peer\n  // but synthesized by stream itself upon receiving HEADERS with FIN or\n  // TRAILERS, do not deliver end of stream here. Because either decodeHeaders\n  // already delivered it or decodeTrailers will be called.\n  bool skip_decoding = empty_payload_with_fin && (end_stream_decoded_ || !finished_reading);\n  if (!skip_decoding) {\n    response_decoder_->decodeData(*buffer, finished_reading);\n    if (finished_reading) {\n      end_stream_decoded_ = true;\n    }\n  }\n\n  if (!sequencer()->IsClosed()) {\n    in_decode_data_callstack_ = false;\n    if (read_disable_counter_ > 0) {\n      // If readDisable() was ever called during decodeData() and it meant to disable\n      // reading from downstream, the call must have been deferred. Call it now.\n      switchStreamBlockState(true);\n    }\n    return;\n  }\n\n  if (!quic::VersionUsesHttp3(transport_version()) && !FinishedReadingTrailers()) {\n    // For Google QUIC implementation, trailers may arrived earlier and wait to\n    // be consumed after reading all the body. Consume it here.\n    // IETF QUIC shouldn't reach here because trailers are sent on same stream.\n    response_decoder_->decodeTrailers(\n        spdyHeaderBlockToEnvoyHeaders<Http::ResponseTrailerMapImpl>(received_trailers()));\n    MarkTrailersConsumed();\n  }\n  OnFinRead();\n  in_decode_data_callstack_ = false;\n}\n\nvoid EnvoyQuicClientStream::OnTrailingHeadersComplete(bool fin, size_t frame_len,\n                                                      const quic::QuicHeaderList& header_list) {\n  quic::QuicSpdyStream::OnTrailingHeadersComplete(fin, frame_len, header_list);\n  ASSERT(trailers_decompressed());\n  if (session()->connection()->connected() &&\n      (quic::VersionUsesHttp3(transport_version()) || sequencer()->IsClosed()) &&\n      !FinishedReadingTrailers()) {\n    // Before QPack, trailers can arrive before body. Only decode trailers after finishing decoding\n    // body.\n    response_decoder_->decodeTrailers(\n        spdyHeaderBlockToEnvoyHeaders<Http::ResponseTrailerMapImpl>(received_trailers()));\n    MarkTrailersConsumed();\n  }\n}\n\nvoid EnvoyQuicClientStream::OnStreamReset(const quic::QuicRstStreamFrame& frame) {\n  quic::QuicSpdyClientStream::OnStreamReset(frame);\n  runResetCallbacks(quicRstErrorToEnvoyResetReason(frame.error_code));\n}\n\nvoid EnvoyQuicClientStream::OnConnectionClosed(quic::QuicErrorCode error,\n                                               quic::ConnectionCloseSource source) {\n  quic::QuicSpdyClientStream::OnConnectionClosed(error, source);\n  runResetCallbacks(quicErrorCodeToEnvoyResetReason(error));\n}\n\nvoid EnvoyQuicClientStream::OnClose() {\n  quic::QuicSpdyClientStream::OnClose();\n  if (BufferedDataBytes() > 0) {\n    // If the stream is closed without sending out all buffered data, regard\n    // them as sent now and adjust connection buffer book keeping.\n    filterManagerConnection()->adjustBytesToSend(0 - BufferedDataBytes());\n  }\n}\n\nvoid EnvoyQuicClientStream::OnCanWrite() {\n  uint64_t buffered_data_old = BufferedDataBytes();\n  quic::QuicSpdyClientStream::OnCanWrite();\n  uint64_t buffered_data_new = BufferedDataBytes();\n  // As long as OnCanWriteNewData() is no-op, data to sent in buffer shouldn't\n  // increase.\n  ASSERT(buffered_data_new <= buffered_data_old);\n  maybeCheckWatermark(buffered_data_old, buffered_data_new, *filterManagerConnection());\n}\n\nuint32_t EnvoyQuicClientStream::streamId() { return id(); }\n\nNetwork::Connection* EnvoyQuicClientStream::connection() { return filterManagerConnection(); }\n\nQuicFilterManagerConnectionImpl* EnvoyQuicClientStream::filterManagerConnection() {\n  return dynamic_cast<QuicFilterManagerConnectionImpl*>(session());\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/http/quic_spdy_client_stream.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_stream.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// This class is a quic stream and also a request encoder.\nclass EnvoyQuicClientStream : public quic::QuicSpdyClientStream,\n                              public EnvoyQuicStream,\n                              public Http::RequestEncoder {\npublic:\n  EnvoyQuicClientStream(quic::QuicStreamId id, quic::QuicSpdyClientSession* client_session,\n                        quic::StreamType type);\n  EnvoyQuicClientStream(quic::PendingStream* pending, quic::QuicSpdyClientSession* client_session,\n                        quic::StreamType type);\n\n  void setResponseDecoder(Http::ResponseDecoder& decoder) { response_decoder_ = &decoder; }\n\n  // Http::StreamEncoder\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) override;\n  Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override {\n    return absl::nullopt;\n  }\n\n  // Http::RequestEncoder\n  void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) override;\n  void encodeTrailers(const Http::RequestTrailerMap& trailers) override;\n\n  // Http::Stream\n  void resetStream(Http::StreamResetReason reason) override;\n  void setFlushTimeout(std::chrono::milliseconds) override {}\n  // quic::QuicSpdyStream\n  void OnBodyAvailable() override;\n  void OnStreamReset(const quic::QuicRstStreamFrame& frame) override;\n  void OnClose() override;\n  void OnCanWrite() override;\n  // quic::Stream\n  void OnConnectionClosed(quic::QuicErrorCode error, quic::ConnectionCloseSource source) override;\n\nprotected:\n  // EnvoyQuicStream\n  void switchStreamBlockState(bool should_block) override;\n  uint32_t streamId() override;\n  Network::Connection* connection() override;\n\n  // quic::QuicSpdyStream\n  // Overridden to pass headers to decoder.\n  void OnInitialHeadersComplete(bool fin, size_t frame_len,\n                                const quic::QuicHeaderList& header_list) override;\n  void OnTrailingHeadersComplete(bool fin, size_t frame_len,\n                                 const quic::QuicHeaderList& header_list) override;\n\nprivate:\n  QuicFilterManagerConnectionImpl* filterManagerConnection();\n\n  Http::ResponseDecoder* response_decoder_{nullptr};\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_connection.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_connection.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicConnection::EnvoyQuicConnection(const quic::QuicConnectionId& server_connection_id,\n                                         quic::QuicSocketAddress initial_peer_address,\n                                         quic::QuicConnectionHelperInterface& helper,\n                                         quic::QuicAlarmFactory& alarm_factory,\n                                         quic::QuicPacketWriter* writer, bool owns_writer,\n                                         quic::Perspective perspective,\n                                         const quic::ParsedQuicVersionVector& supported_versions,\n                                         Network::ConnectionSocketPtr&& connection_socket)\n    : quic::QuicConnection(server_connection_id, initial_peer_address, &helper, &alarm_factory,\n                           writer, owns_writer, perspective, supported_versions),\n      connection_socket_(std::move(connection_socket)) {}\n\nEnvoyQuicConnection::~EnvoyQuicConnection() { connection_socket_->close(); }\n\nuint64_t EnvoyQuicConnection::id() const { return envoy_connection_->id(); }\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_connection.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/quic_connection.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include <memory>\n\n#include \"common/common/logger.h\"\n#include \"envoy/network/connection.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Derived for network filter chain, stats and QoS. This is used on both client\n// and server side.\nclass EnvoyQuicConnection : public quic::QuicConnection,\n                            protected Logger::Loggable<Logger::Id::connection> {\npublic:\n  EnvoyQuicConnection(const quic::QuicConnectionId& server_connection_id,\n                      quic::QuicSocketAddress initial_peer_address,\n                      quic::QuicConnectionHelperInterface& helper,\n                      quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer,\n                      bool owns_writer, quic::Perspective perspective,\n                      const quic::ParsedQuicVersionVector& supported_versions,\n                      Network::ConnectionSocketPtr&& connection_socket);\n\n  ~EnvoyQuicConnection() override;\n\n  // Called by EnvoyQuicSession::setConnectionStats().\n  void setConnectionStats(const Network::Connection::ConnectionStats& stats) {\n    connection_stats_ = std::make_unique<Network::Connection::ConnectionStats>(stats);\n  }\n\n  // Called in session Initialize().\n  void setEnvoyConnection(Network::Connection& connection) { envoy_connection_ = &connection; }\n\n  const Network::ConnectionSocketPtr& connectionSocket() const { return connection_socket_; }\n\n  // Needed for ENVOY_CONN_LOG.\n  uint64_t id() const;\n\nprotected:\n  Network::Connection::ConnectionStats& connectionStats() const { return *connection_stats_; }\n\n  Network::Connection& envoyConnection() const {\n    ASSERT(envoy_connection_ != nullptr);\n    return *envoy_connection_;\n  }\n\n  void setConnectionSocket(Network::ConnectionSocketPtr&& connection_socket) {\n    connection_socket_ = std::move(connection_socket);\n  }\n\nprivate:\n  // TODO(danzh): populate stats.\n  std::unique_ptr<Network::Connection::ConnectionStats> connection_stats_;\n  // Assigned upon construction. Constructed with empty local address if unknown\n  // by then.\n  Network::ConnectionSocketPtr connection_socket_;\n  // Points to an instance of EnvoyQuicServerSession or EnvoyQuicClientSession.\n  Network::Connection* envoy_connection_{nullptr};\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_connection_helper.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wtype-limits\"\n#endif\n\n#include \"quiche/quic/core/crypto/quic_random.h\"\n#include \"quiche/quic/core/quic_connection.h\"\n#include \"quiche/quic/core/quic_simple_buffer_allocator.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/platform/envoy_quic_clock.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Derived to provide EnvoyQuicClock and default random generator and buffer\n// allocator.\nclass EnvoyQuicConnectionHelper : public quic::QuicConnectionHelperInterface {\npublic:\n  EnvoyQuicConnectionHelper(Event::Dispatcher& dispatcher)\n      : clock_(dispatcher), random_generator_(quic::QuicRandom::GetInstance()) {}\n\n  ~EnvoyQuicConnectionHelper() override = default;\n\n  // QuicConnectionHelperInterface\n  const quic::QuicClock* GetClock() const override { return &clock_; }\n  quic::QuicRandom* GetRandomGenerator() override { return random_generator_; }\n  quic::QuicBufferAllocator* GetStreamSendBufferAllocator() override { return &buffer_allocator_; }\n\nprivate:\n  EnvoyQuicClock clock_;\n  quic::QuicRandom* random_generator_ = nullptr;\n  quic::SimpleBufferAllocator buffer_allocator_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_dispatcher.h\"\n\n#include \"common/http/utility.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_connection.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_session.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicDispatcher::EnvoyQuicDispatcher(\n    const quic::QuicCryptoServerConfig* crypto_config, const quic::QuicConfig& quic_config,\n    quic::QuicVersionManager* version_manager,\n    std::unique_ptr<quic::QuicConnectionHelperInterface> helper,\n    std::unique_ptr<quic::QuicAlarmFactory> alarm_factory,\n    uint8_t expected_server_connection_id_length, Network::ConnectionHandler& connection_handler,\n    Network::ListenerConfig& listener_config, Server::ListenerStats& listener_stats,\n    Server::PerHandlerListenerStats& per_worker_stats, Event::Dispatcher& dispatcher,\n    Network::Socket& listen_socket)\n    : quic::QuicDispatcher(&quic_config, crypto_config, version_manager, std::move(helper),\n                           std::make_unique<EnvoyQuicCryptoServerStreamHelper>(),\n                           std::move(alarm_factory), expected_server_connection_id_length),\n      connection_handler_(connection_handler), listener_config_(listener_config),\n      listener_stats_(listener_stats), per_worker_stats_(per_worker_stats), dispatcher_(dispatcher),\n      listen_socket_(listen_socket) {\n  // Set send buffer twice of max flow control window to ensure that stream send\n  // buffer always takes all the data.\n  // The max amount of data buffered is the per-stream high watermark + the max\n  // flow control window of upstream. The per-stream high watermark should be\n  // smaller than max flow control window to make sure upper stream can be flow\n  // control blocked early enough not to send more than the threshold allows.\n  // TODO(#8826) Ideally we should use the negotiated value from upstream which is not accessible\n  // for now. 512MB is way to large, but the actual bytes buffered should be bound by the negotiated\n  // upstream flow control window.\n  SetQuicFlag(\n      FLAGS_quic_buffered_data_threshold,\n      2 * ::Envoy::Http2::Utility::OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE); // 512MB\n}\n\nvoid EnvoyQuicDispatcher::OnConnectionClosed(quic::QuicConnectionId connection_id,\n                                             quic::QuicErrorCode error,\n                                             const std::string& error_details,\n                                             quic::ConnectionCloseSource source) {\n  quic::QuicDispatcher::OnConnectionClosed(connection_id, error, error_details, source);\n  listener_stats_.downstream_cx_active_.dec();\n  per_worker_stats_.downstream_cx_active_.dec();\n  connection_handler_.decNumConnections();\n}\n\nstd::unique_ptr<quic::QuicSession> EnvoyQuicDispatcher::CreateQuicSession(\n    quic::QuicConnectionId server_connection_id, const quic::QuicSocketAddress& /*self_address*/,\n    const quic::QuicSocketAddress& peer_address, quiche::QuicheStringPiece /*alpn*/,\n    const quic::ParsedQuicVersion& version) {\n  auto quic_connection = std::make_unique<EnvoyQuicServerConnection>(\n      server_connection_id, peer_address, *helper(), *alarm_factory(), writer(),\n      /*owns_writer=*/false, quic::ParsedQuicVersionVector{version}, listen_socket_);\n  auto quic_session = std::make_unique<EnvoyQuicServerSession>(\n      config(), quic::ParsedQuicVersionVector{version}, std::move(quic_connection), this,\n      session_helper(), crypto_config(), compressed_certs_cache(), dispatcher_,\n      listener_config_.perConnectionBufferLimitBytes(), listener_config_);\n  quic_session->Initialize();\n  // Filter chain can't be retrieved here as self address is unknown at this\n  // point.\n  // TODO(danzh): change QUIC interface to pass in self address as it is already\n  // known. In this way, filter chain can be retrieved at this point. But one\n  // thing to pay attention is that if the retrieval fails, connection needs to\n  // be closed, and it should be added to time wait list instead of session map.\n  connection_handler_.incNumConnections();\n  listener_stats_.downstream_cx_active_.inc();\n  listener_stats_.downstream_cx_total_.inc();\n  per_worker_stats_.downstream_cx_active_.inc();\n  per_worker_stats_.downstream_cx_total_.inc();\n  return quic_session;\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wtype-limits\"\n#endif\n\n#include \"quiche/quic/core/quic_dispatcher.h\"\n#include \"quiche/quic/core/quic_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include <string>\n\n#include \"envoy/network/listener.h\"\n#include \"server/connection_handler_impl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Envoy specific provider of server connection id and decision maker of\n// accepting new connection or not.\nclass EnvoyQuicCryptoServerStreamHelper : public quic::QuicCryptoServerStreamBase::Helper {\npublic:\n  ~EnvoyQuicCryptoServerStreamHelper() override = default;\n\n  // quic::QuicCryptoServerStream::Helper\n  bool CanAcceptClientHello(const quic::CryptoHandshakeMessage& /*message*/,\n                            const quic::QuicSocketAddress& /*client_address*/,\n                            const quic::QuicSocketAddress& /*peer_address*/,\n                            const quic::QuicSocketAddress& /*self_address*/,\n                            std::string* /*error_details*/) const override {\n    // TODO(danzh): decide to accept or not based on information from given handshake message, i.e.\n    // user agent and SNI.\n    return true;\n  }\n};\n\nclass EnvoyQuicDispatcher : public quic::QuicDispatcher {\npublic:\n  EnvoyQuicDispatcher(const quic::QuicCryptoServerConfig* crypto_config,\n                      const quic::QuicConfig& quic_config,\n                      quic::QuicVersionManager* version_manager,\n                      std::unique_ptr<quic::QuicConnectionHelperInterface> helper,\n                      std::unique_ptr<quic::QuicAlarmFactory> alarm_factory,\n                      uint8_t expected_server_connection_id_length,\n                      Network::ConnectionHandler& connection_handler,\n                      Network::ListenerConfig& listener_config,\n                      Server::ListenerStats& listener_stats,\n                      Server::PerHandlerListenerStats& per_worker_stats,\n                      Event::Dispatcher& dispatcher, Network::Socket& listen_socket);\n\n  void OnConnectionClosed(quic::QuicConnectionId connection_id, quic::QuicErrorCode error,\n                          const std::string& error_details,\n                          quic::ConnectionCloseSource source) override;\n\nprotected:\n  std::unique_ptr<quic::QuicSession>\n  CreateQuicSession(quic::QuicConnectionId server_connection_id,\n                    const quic::QuicSocketAddress& self_address,\n                    const quic::QuicSocketAddress& peer_address, quiche::QuicheStringPiece alpn,\n                    const quic::ParsedQuicVersion& version) override;\n\nprivate:\n  Network::ConnectionHandler& connection_handler_;\n  Network::ListenerConfig& listener_config_;\n  Server::ListenerStats& listener_stats_;\n  Server::PerHandlerListenerStats& per_worker_stats_;\n  Event::Dispatcher& dispatcher_;\n  Network::Socket& listen_socket_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_packet_writer.h\"\n\n#include <memory>\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nnamespace {\n\nquic::WriteResult convertToQuicWriteResult(Api::IoCallUint64Result& result) {\n  if (result.ok()) {\n    return {quic::WRITE_STATUS_OK, static_cast<int>(result.rc_)};\n  }\n  quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again\n                                 ? quic::WRITE_STATUS_BLOCKED\n                                 : quic::WRITE_STATUS_ERROR;\n  return {status, static_cast<int>(result.err_->getErrorCode())};\n}\n\n} // namespace\n\nEnvoyQuicPacketWriter::EnvoyQuicPacketWriter(Network::UdpPacketWriterPtr envoy_udp_packet_writer)\n    : envoy_udp_packet_writer_(std::move(envoy_udp_packet_writer)) {}\n\nquic::WriteResult EnvoyQuicPacketWriter::WritePacket(const char* buffer, size_t buffer_len,\n                                                     const quic::QuicIpAddress& self_ip,\n                                                     const quic::QuicSocketAddress& peer_address,\n                                                     quic::PerPacketOptions* options) {\n  ASSERT(options == nullptr, \"Per packet option is not supported yet.\");\n\n  Buffer::BufferFragmentImpl fragment(buffer, buffer_len, nullptr);\n  Buffer::OwnedImpl buf;\n  buf.addBufferFragment(fragment);\n\n  quic::QuicSocketAddress self_address(self_ip, /*port=*/0);\n  Network::Address::InstanceConstSharedPtr local_addr =\n      quicAddressToEnvoyAddressInstance(self_address);\n  Network::Address::InstanceConstSharedPtr remote_addr =\n      quicAddressToEnvoyAddressInstance(peer_address);\n\n  Api::IoCallUint64Result result = envoy_udp_packet_writer_->writePacket(\n      buf, local_addr == nullptr ? nullptr : local_addr->ip(), *remote_addr);\n\n  return convertToQuicWriteResult(result);\n}\n\nquic::QuicByteCount\nEnvoyQuicPacketWriter::GetMaxPacketSize(const quic::QuicSocketAddress& peer_address) const {\n  Network::Address::InstanceConstSharedPtr remote_addr =\n      quicAddressToEnvoyAddressInstance(peer_address);\n  return static_cast<quic::QuicByteCount>(envoy_udp_packet_writer_->getMaxPacketSize(*remote_addr));\n}\n\nquic::QuicPacketBuffer\nEnvoyQuicPacketWriter::GetNextWriteLocation(const quic::QuicIpAddress& self_ip,\n                                            const quic::QuicSocketAddress& peer_address) {\n  quic::QuicSocketAddress self_address(self_ip, /*port=*/0);\n  Network::Address::InstanceConstSharedPtr local_addr =\n      quicAddressToEnvoyAddressInstance(self_address);\n  Network::Address::InstanceConstSharedPtr remote_addr =\n      quicAddressToEnvoyAddressInstance(peer_address);\n  Network::UdpPacketWriterBuffer write_location = envoy_udp_packet_writer_->getNextWriteLocation(\n      local_addr == nullptr ? nullptr : local_addr->ip(), *remote_addr);\n  return quic::QuicPacketBuffer(reinterpret_cast<char*>(write_location.buffer_),\n                                write_location.release_buffer_);\n}\n\nquic::WriteResult EnvoyQuicPacketWriter::Flush() {\n  Api::IoCallUint64Result result = envoy_udp_packet_writer_->flush();\n  return convertToQuicWriteResult(result);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/quic_packet_writer.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"envoy/network/udp_packet_writer_handler.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicPacketWriter : public quic::QuicPacketWriter {\npublic:\n  EnvoyQuicPacketWriter(Network::UdpPacketWriterPtr envoy_udp_packet_writer);\n\n  quic::WriteResult WritePacket(const char* buffer, size_t buf_len,\n                                const quic::QuicIpAddress& self_address,\n                                const quic::QuicSocketAddress& peer_address,\n                                quic::PerPacketOptions* options) override;\n\n  // quic::QuicPacketWriter\n  bool IsWriteBlocked() const override { return envoy_udp_packet_writer_->isWriteBlocked(); }\n  void SetWritable() override { envoy_udp_packet_writer_->setWritable(); }\n  bool IsBatchMode() const override { return envoy_udp_packet_writer_->isBatchMode(); }\n  // Currently this writer doesn't support pacing offload.\n  bool SupportsReleaseTime() const override { return false; }\n\n  quic::QuicByteCount GetMaxPacketSize(const quic::QuicSocketAddress& peer_address) const override;\n  quic::QuicPacketBuffer GetNextWriteLocation(const quic::QuicIpAddress& self_address,\n                                              const quic::QuicSocketAddress& peer_address) override;\n  quic::WriteResult Flush() override;\n\nprivate:\n  Network::UdpPacketWriterPtr envoy_udp_packet_writer_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_proof_source.h\"\n\n#include <openssl/bio.h>\n\n#include \"envoy/ssl/tls_certificate_config.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/quic_io_handle_wrapper.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n\n#include \"openssl/bytestring.h\"\n#include \"quiche/quic/core/crypto/certificate_view.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nquic::QuicReferenceCountedPointer<quic::ProofSource::Chain>\nEnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address,\n                                   const quic::QuicSocketAddress& client_address,\n                                   const std::string& hostname) {\n  CertConfigWithFilterChain res =\n      getTlsCertConfigAndFilterChain(server_address, client_address, hostname);\n  absl::optional<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> cert_config_ref =\n      res.cert_config_;\n  if (!cert_config_ref.has_value()) {\n    ENVOY_LOG(warn, \"No matching filter chain found for handshake.\");\n    return nullptr;\n  }\n  auto& cert_config = cert_config_ref.value().get();\n  const std::string& chain_str = cert_config.certificateChain();\n  std::stringstream pem_stream(chain_str);\n  std::vector<std::string> chain = quic::CertificateView::LoadPemFromStream(&pem_stream);\n  return quic::QuicReferenceCountedPointer<quic::ProofSource::Chain>(\n      new quic::ProofSource::Chain(chain));\n}\n\nvoid EnvoyQuicProofSource::signPayload(\n    const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address,\n    const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in,\n    std::unique_ptr<quic::ProofSource::SignatureCallback> callback) {\n  CertConfigWithFilterChain res =\n      getTlsCertConfigAndFilterChain(server_address, client_address, hostname);\n  absl::optional<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> cert_config_ref =\n      res.cert_config_;\n  if (!cert_config_ref.has_value()) {\n    ENVOY_LOG(warn, \"No matching filter chain found for handshake.\");\n    callback->Run(false, \"\", nullptr);\n    return;\n  }\n  auto& cert_config = cert_config_ref.value().get();\n  // Load private key.\n  const std::string& pkey = cert_config.privateKey();\n  std::stringstream pem_str(pkey);\n  std::unique_ptr<quic::CertificatePrivateKey> pem_key =\n      quic::CertificatePrivateKey::LoadPemFromStream(&pem_str);\n  if (pem_key == nullptr) {\n    ENVOY_LOG(warn, \"Failed to load private key.\");\n    callback->Run(false, \"\", nullptr);\n    return;\n  }\n  // Verify the signature algorithm is as expected.\n  std::string error_details;\n  int sign_alg = deduceSignatureAlgorithmFromPublicKey(pem_key->private_key(), &error_details);\n  if (sign_alg != signature_algorithm) {\n    ENVOY_LOG(warn,\n              fmt::format(\"The signature algorithm {} from the private key is not expected: {}\",\n                          sign_alg, error_details));\n    callback->Run(false, \"\", nullptr);\n    return;\n  }\n\n  // Sign.\n  std::string sig = pem_key->Sign(in, signature_algorithm);\n  bool success = !sig.empty();\n  ASSERT(res.filter_chain_.has_value());\n  callback->Run(success, sig,\n                std::make_unique<EnvoyQuicProofSourceDetails>(res.filter_chain_.value().get()));\n}\n\nEnvoyQuicProofSource::CertConfigWithFilterChain\nEnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddress& server_address,\n                                                     const quic::QuicSocketAddress& client_address,\n                                                     const std::string& hostname) {\n  ENVOY_LOG(trace, \"Getting cert chain for {}\", hostname);\n  Network::ConnectionSocketImpl connection_socket(\n      std::make_unique<QuicIoHandleWrapper>(listen_socket_.ioHandle()),\n      quicAddressToEnvoyAddressInstance(server_address),\n      quicAddressToEnvoyAddressInstance(client_address));\n  connection_socket.setDetectedTransportProtocol(\n      Extensions::TransportSockets::TransportProtocolNames::get().Quic);\n  connection_socket.setRequestedServerName(hostname);\n  connection_socket.setRequestedApplicationProtocols({\"h2\"});\n  const Network::FilterChain* filter_chain =\n      filter_chain_manager_.findFilterChain(connection_socket);\n  if (filter_chain == nullptr) {\n    listener_stats_.no_filter_chain_match_.inc();\n    return {absl::nullopt, absl::nullopt};\n  }\n  const Network::TransportSocketFactory& transport_socket_factory =\n      filter_chain->transportSocketFactory();\n  std::vector<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> tls_cert_configs =\n      dynamic_cast<const QuicServerTransportSocketFactory&>(transport_socket_factory)\n          .serverContextConfig()\n          .tlsCertificates();\n\n  // Only return the first TLS cert config.\n  // TODO(danzh) Choose based on supported cipher suites in TLS1.3 CHLO and prefer EC\n  // certs if supported.\n  return {tls_cert_configs[0].get(), *filter_chain};\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h",
    "content": "#pragma once\n\n#include \"server/connection_handler_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h\"\n#include \"extensions/quic_listeners/quiche/quic_transport_socket_factory.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A ProofSource implementation which supplies a proof instance with certs from filter chain.\nclass EnvoyQuicProofSource : public EnvoyQuicProofSourceBase {\npublic:\n  EnvoyQuicProofSource(Network::Socket& listen_socket,\n                       Network::FilterChainManager& filter_chain_manager,\n                       Server::ListenerStats& listener_stats)\n      : listen_socket_(listen_socket), filter_chain_manager_(filter_chain_manager),\n        listener_stats_(listener_stats) {}\n\n  ~EnvoyQuicProofSource() override = default;\n\n  // quic::ProofSource\n  quic::QuicReferenceCountedPointer<quic::ProofSource::Chain>\n  GetCertChain(const quic::QuicSocketAddress& server_address,\n               const quic::QuicSocketAddress& client_address, const std::string& hostname) override;\n\nprotected:\n  // quic::ProofSource\n  void signPayload(const quic::QuicSocketAddress& server_address,\n                   const quic::QuicSocketAddress& client_address, const std::string& hostname,\n                   uint16_t signature_algorithm, quiche::QuicheStringPiece in,\n                   std::unique_ptr<quic::ProofSource::SignatureCallback> callback) override;\n\nprivate:\n  struct CertConfigWithFilterChain {\n    absl::optional<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> cert_config_;\n    absl::optional<std::reference_wrapper<const Network::FilterChain>> filter_chain_;\n  };\n\n  CertConfigWithFilterChain\n  getTlsCertConfigAndFilterChain(const quic::QuicSocketAddress& server_address,\n                                 const quic::QuicSocketAddress& client_address,\n                                 const std::string& hostname);\n\n  Network::Socket& listen_socket_;\n  Network::FilterChainManager& filter_chain_manager_;\n  Server::ListenerStats& listener_stats_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \"quiche/quic/core/quic_data_writer.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nvoid EnvoyQuicProofSourceBase::GetProof(const quic::QuicSocketAddress& server_address,\n                                        const quic::QuicSocketAddress& client_address,\n                                        const std::string& hostname,\n                                        const std::string& server_config,\n                                        quic::QuicTransportVersion /*transport_version*/,\n                                        quiche::QuicheStringPiece chlo_hash,\n                                        std::unique_ptr<quic::ProofSource::Callback> callback) {\n  quic::QuicReferenceCountedPointer<quic::ProofSource::Chain> chain =\n      GetCertChain(server_address, client_address, hostname);\n\n  if (chain == nullptr || chain->certs.empty()) {\n    quic::QuicCryptoProof proof;\n    callback->Run(/*ok=*/false, nullptr, proof, nullptr);\n    return;\n  }\n  size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() +\n                        server_config.size();\n  auto payload = std::make_unique<char[]>(payload_size);\n  quic::QuicDataWriter payload_writer(payload_size, payload.get(),\n                                      quiche::Endianness::HOST_BYTE_ORDER);\n  bool success =\n      payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) &&\n      payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) &&\n      payload_writer.WriteStringPiece(server_config);\n  if (!success) {\n    quic::QuicCryptoProof proof;\n    callback->Run(/*ok=*/false, nullptr, proof, nullptr);\n    return;\n  }\n\n  std::string error_details;\n  bssl::UniquePtr<X509> cert = parseDERCertificate(chain->certs[0], &error_details);\n  if (cert == nullptr) {\n    ENVOY_LOG(warn, absl::StrCat(\"Invalid leaf cert: \", error_details));\n    quic::QuicCryptoProof proof;\n    callback->Run(/*ok=*/false, nullptr, proof, nullptr);\n    return;\n  }\n\n  bssl::UniquePtr<EVP_PKEY> pub_key(X509_get_pubkey(cert.get()));\n  int sign_alg = deduceSignatureAlgorithmFromPublicKey(pub_key.get(), &error_details);\n  if (sign_alg == 0) {\n    ENVOY_LOG(warn, absl::StrCat(\"Failed to deduce signature algorithm from public key: \",\n                                 error_details));\n    quic::QuicCryptoProof proof;\n    callback->Run(/*ok=*/false, nullptr, proof, nullptr);\n    return;\n  }\n\n  auto signature_callback = std::make_unique<SignatureCallback>(std::move(callback), chain);\n\n  signPayload(server_address, client_address, hostname, sign_alg,\n              quiche::QuicheStringPiece(payload.get(), payload_size),\n              std::move(signature_callback));\n}\n\nvoid EnvoyQuicProofSourceBase::ComputeTlsSignature(\n    const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address,\n    const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in,\n    std::unique_ptr<quic::ProofSource::SignatureCallback> callback) {\n  signPayload(server_address, client_address, hostname, signature_algorithm, in,\n              std::move(callback));\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/common/assert.h\"\n\n#include \"absl/strings/str_cat.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \"quiche/quic/core/crypto/proof_source.h\"\n#include \"quiche/quic/core/quic_versions.h\"\n#include \"quiche/quic/core/crypto/crypto_protocol.h\"\n#include \"quiche/quic/platform/api/quic_reference_counted.h\"\n#include \"quiche/quic/platform/api/quic_socket_address.h\"\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"openssl/ssl.h\"\n#include \"envoy/network/filter.h\"\n#include \"server/backtrace.h\"\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A ProofSource::Detail implementation which retains filter chain.\nclass EnvoyQuicProofSourceDetails : public quic::ProofSource::Details {\npublic:\n  explicit EnvoyQuicProofSourceDetails(const Network::FilterChain& filter_chain)\n      : filter_chain_(filter_chain) {}\n  EnvoyQuicProofSourceDetails(const EnvoyQuicProofSourceDetails& other)\n      : filter_chain_(other.filter_chain_) {}\n\n  const Network::FilterChain& filterChain() const { return filter_chain_; }\n\nprivate:\n  const Network::FilterChain& filter_chain_;\n};\n\n// A partial implementation of quic::ProofSource which chooses a cipher suite according to the leaf\n// cert to sign in GetProof().\nclass EnvoyQuicProofSourceBase : public quic::ProofSource,\n                                 protected Logger::Loggable<Logger::Id::quic> {\npublic:\n  ~EnvoyQuicProofSourceBase() override = default;\n\n  // quic::ProofSource\n  // Returns a certs chain and its fake SCT \"Fake timestamp\" and TLS signature wrapped\n  // in QuicCryptoProof.\n  void GetProof(const quic::QuicSocketAddress& server_address,\n                const quic::QuicSocketAddress& client_address, const std::string& hostname,\n                const std::string& server_config, quic::QuicTransportVersion /*transport_version*/,\n                quiche::QuicheStringPiece chlo_hash,\n                std::unique_ptr<quic::ProofSource::Callback> callback) override;\n\n  TicketCrypter* GetTicketCrypter() override { return nullptr; }\n\n  void ComputeTlsSignature(const quic::QuicSocketAddress& server_address,\n                           const quic::QuicSocketAddress& client_address,\n                           const std::string& hostname, uint16_t signature_algorithm,\n                           quiche::QuicheStringPiece in,\n                           std::unique_ptr<quic::ProofSource::SignatureCallback> callback) override;\n\nprotected:\n  virtual void signPayload(const quic::QuicSocketAddress& server_address,\n                           const quic::QuicSocketAddress& client_address,\n                           const std::string& hostname, uint16_t signature_algorithm,\n                           quiche::QuicheStringPiece in,\n                           std::unique_ptr<quic::ProofSource::SignatureCallback> callback) PURE;\n\nprivate:\n  // Used by GetProof() to get signature.\n  class SignatureCallback : public quic::ProofSource::SignatureCallback {\n  public:\n    // TODO(danzh) Pass in Details to retain the certs chain, and quic::ProofSource::Callback to be\n    // triggered in Run().\n    SignatureCallback(std::unique_ptr<quic::ProofSource::Callback> callback,\n                      quic::QuicReferenceCountedPointer<quic::ProofSource::Chain> chain)\n        : callback_(std::move(callback)), chain_(chain) {}\n\n    // quic::ProofSource::SignatureCallback\n    void Run(bool ok, std::string signature, std::unique_ptr<Details> details) override {\n      quic::QuicCryptoProof proof;\n      if (!ok) {\n        callback_->Run(false, chain_, proof, nullptr);\n        return;\n      }\n      proof.signature = signature;\n      proof.leaf_cert_scts = \"Fake timestamp\";\n      callback_->Run(true, chain_, proof, std::move(details));\n    }\n\n  private:\n    std::unique_ptr<quic::ProofSource::Callback> callback_;\n    quic::QuicReferenceCountedPointer<quic::ProofSource::Chain> chain_;\n  };\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\n#include \"quiche/quic/core/crypto/certificate_view.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nquic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain(\n    const std::string& hostname, const uint16_t /*port*/, const std::vector<std::string>& certs,\n    const std::string& /*ocsp_response*/, const std::string& /*cert_sct*/,\n    const quic::ProofVerifyContext* /*context*/, std::string* error_details,\n    std::unique_ptr<quic::ProofVerifyDetails>* /*details*/,\n    std::unique_ptr<quic::ProofVerifierCallback> /*callback*/) {\n  ASSERT(!certs.empty());\n  bssl::UniquePtr<STACK_OF(X509)> intermediates(sk_X509_new_null());\n  bssl::UniquePtr<X509> leaf;\n  for (size_t i = 0; i < certs.size(); i++) {\n    bssl::UniquePtr<X509> cert = parseDERCertificate(certs[i], error_details);\n    if (!cert) {\n      return quic::QUIC_FAILURE;\n    }\n    if (i == 0) {\n      leaf = std::move(cert);\n    } else {\n      sk_X509_push(intermediates.get(), cert.release());\n    }\n  }\n  bool success = context_impl_.verifyCertChain(*leaf, *intermediates, *error_details);\n  if (!success) {\n    return quic::QUIC_FAILURE;\n  }\n\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(certs[0]);\n  ASSERT(cert_view != nullptr);\n  for (const absl::string_view& config_san : cert_view->subject_alt_name_domains()) {\n    if (Extensions::TransportSockets::Tls::ContextImpl::dnsNameMatch(hostname, config_san)) {\n      return quic::QUIC_SUCCESS;\n    }\n  }\n  *error_details = absl::StrCat(\"Leaf certificate doesn't match hostname: \", hostname);\n  return quic::QUIC_FAILURE;\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h",
    "content": "#pragma once\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h\"\n#include \"extensions/transport_sockets/tls/context_impl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A quic::ProofVerifier implementation which verifies cert chain using SSL\n// client context config.\nclass EnvoyQuicProofVerifier : public EnvoyQuicProofVerifierBase {\npublic:\n  EnvoyQuicProofVerifier(Stats::Scope& scope, const Envoy::Ssl::ClientContextConfig& config,\n                         TimeSource& time_source)\n      : context_impl_(scope, config, time_source) {}\n\n  // EnvoyQuicProofVerifierBase\n  quic::QuicAsyncStatus\n  VerifyCertChain(const std::string& hostname, const uint16_t port,\n                  const std::vector<std::string>& certs, const std::string& ocsp_response,\n                  const std::string& cert_sct, const quic::ProofVerifyContext* context,\n                  std::string* error_details, std::unique_ptr<quic::ProofVerifyDetails>* details,\n                  std::unique_ptr<quic::ProofVerifierCallback> callback) override;\n\nprivate:\n  Extensions::TransportSockets::Tls::ClientContextImpl context_impl_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\n#include \"openssl/ssl.h\"\n#include \"quiche/quic/core/crypto/certificate_view.h\"\n#include \"quiche/quic/core/crypto/crypto_protocol.h\"\n#include \"quiche/quic/core/quic_data_writer.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nquic::QuicAsyncStatus EnvoyQuicProofVerifierBase::VerifyProof(\n    const std::string& hostname, const uint16_t port, const std::string& server_config,\n    quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash,\n    const std::vector<std::string>& certs, const std::string& cert_sct,\n    const std::string& signature, const quic::ProofVerifyContext* context,\n    std::string* error_details, std::unique_ptr<quic::ProofVerifyDetails>* details,\n    std::unique_ptr<quic::ProofVerifierCallback> callback) {\n  if (certs.empty()) {\n    *error_details = \"Received empty cert chain.\";\n    return quic::QUIC_FAILURE;\n  }\n  if (!verifySignature(server_config, chlo_hash, certs[0], signature, error_details)) {\n    return quic::QUIC_FAILURE;\n  }\n\n  return VerifyCertChain(hostname, port, certs, \"\", cert_sct, context, error_details, details,\n                         std::move(callback));\n}\n\nbool EnvoyQuicProofVerifierBase::verifySignature(const std::string& server_config,\n                                                 absl::string_view chlo_hash,\n                                                 const std::string& cert,\n                                                 const std::string& signature,\n                                                 std::string* error_details) {\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(cert);\n  if (cert_view == nullptr) {\n    *error_details = \"Invalid leaf cert.\";\n    return false;\n  }\n  int sign_alg = deduceSignatureAlgorithmFromPublicKey(cert_view->public_key(), error_details);\n  if (sign_alg == 0) {\n    return false;\n  }\n\n  size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() +\n                        server_config.size();\n  auto payload = std::make_unique<char[]>(payload_size);\n  quic::QuicDataWriter payload_writer(payload_size, payload.get(),\n                                      quiche::Endianness::HOST_BYTE_ORDER);\n  bool success =\n      payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) &&\n      payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) &&\n      payload_writer.WriteStringPiece(server_config);\n  if (!success) {\n    *error_details = \"QuicPacketWriter error.\";\n    return false;\n  }\n  bool valid = cert_view->VerifySignature(quiche::QuicheStringPiece(payload.get(), payload_size),\n                                          signature, sign_alg);\n  if (!valid) {\n    *error_details = \"Signature is not valid.\";\n  }\n  return valid;\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h",
    "content": "#pragma once\n\n#include \"absl/strings/str_cat.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#endif\n\n#include \"quiche/quic/core/crypto/proof_verifier.h\"\n#include \"quiche/quic/core/quic_versions.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A partial implementation of quic::ProofVerifier which does signature\n// verification.\nclass EnvoyQuicProofVerifierBase : public quic::ProofVerifier,\n                                   protected Logger::Loggable<Logger::Id::quic> {\npublic:\n  ~EnvoyQuicProofVerifierBase() override = default;\n\n  // quic::ProofVerifier\n  // Return success if the certs chain is valid and signature of {\n  // server_config + chlo_hash} is valid. Otherwise failure.\n  quic::QuicAsyncStatus\n  VerifyProof(const std::string& hostname, const uint16_t port, const std::string& server_config,\n              quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash,\n              const std::vector<std::string>& certs, const std::string& cert_sct,\n              const std::string& signature, const quic::ProofVerifyContext* context,\n              std::string* error_details, std::unique_ptr<quic::ProofVerifyDetails>* details,\n              std::unique_ptr<quic::ProofVerifierCallback> callback) override;\n\n  std::unique_ptr<quic::ProofVerifyContext> CreateDefaultContext() override { return nullptr; }\n\nprotected:\n  virtual bool verifySignature(const std::string& server_config, absl::string_view chlo_hash,\n                               const std::string& cert, const std::string& signature,\n                               std::string* error_details);\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_server_connection.h\"\n\n#include \"common/network/listen_socket_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/quic_io_handle_wrapper.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicServerConnection::EnvoyQuicServerConnection(\n    const quic::QuicConnectionId& server_connection_id,\n    quic::QuicSocketAddress initial_peer_address, quic::QuicConnectionHelperInterface& helper,\n    quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer, bool owns_writer,\n    const quic::ParsedQuicVersionVector& supported_versions, Network::Socket& listen_socket)\n    : EnvoyQuicConnection(server_connection_id, initial_peer_address, helper, alarm_factory, writer,\n                          owns_writer, quic::Perspective::IS_SERVER, supported_versions,\n                          std::make_unique<Network::ConnectionSocketImpl>(\n                              // Wraps the real IoHandle instance so that if the connection socket\n                              // gets closed, the real IoHandle won't be affected.\n                              std::make_unique<QuicIoHandleWrapper>(listen_socket.ioHandle()),\n                              nullptr, quicAddressToEnvoyAddressInstance(initial_peer_address))) {}\n\nbool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& header) {\n  if (!EnvoyQuicConnection::OnPacketHeader(header)) {\n    return false;\n  }\n  if (connectionSocket()->localAddress() != nullptr) {\n    return true;\n  }\n  ASSERT(self_address().IsInitialized());\n  // Self address should be initialized by now.\n  connectionSocket()->setLocalAddress(quicAddressToEnvoyAddressInstance(self_address()));\n  connectionSocket()->setDetectedTransportProtocol(\n      Extensions::TransportSockets::TransportProtocolNames::get().Quic);\n  return true;\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h",
    "content": "#include \"envoy/network/listener.h\"\n\n#include \"server/connection_handler_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicServerConnection : public EnvoyQuicConnection {\npublic:\n  EnvoyQuicServerConnection(const quic::QuicConnectionId& server_connection_id,\n                            quic::QuicSocketAddress initial_peer_address,\n                            quic::QuicConnectionHelperInterface& helper,\n                            quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer,\n                            bool owns_writer,\n                            const quic::ParsedQuicVersionVector& supported_versions,\n                            Network::Socket& listen_socket);\n\n  // EnvoyQuicConnection\n  // Overridden to set connection_socket_ with initialized self address and retrieve filter chain.\n  bool OnPacketHeader(const quic::QuicPacketHeader& header) override;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_server_session.h\"\n\n#include <memory>\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_source.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_stream.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicServerSession::EnvoyQuicServerSession(\n    const quic::QuicConfig& config, const quic::ParsedQuicVersionVector& supported_versions,\n    std::unique_ptr<EnvoyQuicConnection> connection, quic::QuicSession::Visitor* visitor,\n    quic::QuicCryptoServerStream::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config,\n    quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher,\n    uint32_t send_buffer_limit, Network::ListenerConfig& listener_config)\n    : quic::QuicServerSessionBase(config, supported_versions, connection.get(), visitor, helper,\n                                  crypto_config, compressed_certs_cache),\n      QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit),\n      quic_connection_(std::move(connection)), listener_config_(listener_config) {}\n\nEnvoyQuicServerSession::~EnvoyQuicServerSession() {\n  ASSERT(!quic_connection_->connected());\n  QuicFilterManagerConnectionImpl::quic_connection_ = nullptr;\n}\n\nabsl::string_view EnvoyQuicServerSession::requestedServerName() const {\n  return {GetCryptoStream()->crypto_negotiated_params().sni};\n}\n\nstd::unique_ptr<quic::QuicCryptoServerStreamBase>\nEnvoyQuicServerSession::CreateQuicCryptoServerStream(\n    const quic::QuicCryptoServerConfig* crypto_config,\n    quic::QuicCompressedCertsCache* compressed_certs_cache) {\n  return CreateCryptoServerStream(crypto_config, compressed_certs_cache, this, stream_helper());\n}\n\nquic::QuicSpdyStream* EnvoyQuicServerSession::CreateIncomingStream(quic::QuicStreamId id) {\n  if (!ShouldCreateIncomingStream(id)) {\n    return nullptr;\n  }\n  auto stream = new EnvoyQuicServerStream(id, this, quic::BIDIRECTIONAL);\n  ActivateStream(absl::WrapUnique(stream));\n  setUpRequestDecoder(*stream);\n  if (aboveHighWatermark()) {\n    stream->runHighWatermarkCallbacks();\n  }\n  return stream;\n}\n\nquic::QuicSpdyStream*\nEnvoyQuicServerSession::CreateIncomingStream(quic::PendingStream* /*pending*/) {\n  // Only client side server push stream should trigger this call.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nquic::QuicSpdyStream* EnvoyQuicServerSession::CreateOutgoingBidirectionalStream() {\n  // Disallow server initiated stream.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nquic::QuicSpdyStream* EnvoyQuicServerSession::CreateOutgoingUnidirectionalStream() {\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid EnvoyQuicServerSession::setUpRequestDecoder(EnvoyQuicServerStream& stream) {\n  ASSERT(http_connection_callbacks_ != nullptr);\n  Http::RequestDecoder& decoder = http_connection_callbacks_->newStream(stream);\n  stream.setRequestDecoder(decoder);\n}\n\nvoid EnvoyQuicServerSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame,\n                                                quic::ConnectionCloseSource source) {\n  quic::QuicServerSessionBase::OnConnectionClosed(frame, source);\n  onConnectionCloseEvent(frame, source);\n}\n\nvoid EnvoyQuicServerSession::Initialize() {\n  quic::QuicServerSessionBase::Initialize();\n  quic_connection_->setEnvoyConnection(*this);\n}\n\nvoid EnvoyQuicServerSession::OnCanWrite() {\n  const uint64_t headers_to_send_old =\n      quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes();\n\n  quic::QuicServerSessionBase::OnCanWrite();\n  const uint64_t headers_to_send_new =\n      quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes();\n  adjustBytesToSend(headers_to_send_new - headers_to_send_old);\n  // Do not update delay close state according to connection level packet egress because that is\n  // equivalent to TCP transport layer egress. But only do so if the session gets chance to write.\n  maybeApplyDelayClosePolicy();\n}\n\nvoid EnvoyQuicServerSession::SetDefaultEncryptionLevel(quic::EncryptionLevel level) {\n  quic::QuicServerSessionBase::SetDefaultEncryptionLevel(level);\n  if (level != quic::ENCRYPTION_FORWARD_SECURE) {\n    return;\n  }\n  maybeCreateNetworkFilters();\n  // This is only reached once, when handshake is done.\n  raiseConnectionEvent(Network::ConnectionEvent::Connected);\n}\n\nbool EnvoyQuicServerSession::hasDataToWrite() { return HasDataToWrite(); }\n\nvoid EnvoyQuicServerSession::OnTlsHandshakeComplete() {\n  quic::QuicServerSessionBase::OnTlsHandshakeComplete();\n  maybeCreateNetworkFilters();\n  raiseConnectionEvent(Network::ConnectionEvent::Connected);\n}\n\nvoid EnvoyQuicServerSession::maybeCreateNetworkFilters() {\n  auto proof_source_details =\n      dynamic_cast<const EnvoyQuicProofSourceDetails*>(GetCryptoStream()->ProofSourceDetails());\n  ASSERT(proof_source_details != nullptr,\n         \"ProofSource didn't provide ProofSource::Details. No filter chain will be installed.\");\n\n  const bool has_filter_initialized =\n      listener_config_.filterChainFactory().createNetworkFilterChain(\n          *this, proof_source_details->filterChain().networkFilterFactories());\n  ASSERT(has_filter_initialized);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_server_session.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wtype-limits\"\n#endif\n\n#include \"quiche/quic/core/http/quic_server_session_base.h\"\n#include \"quiche/quic/core/quic_crypto_server_stream.h\"\n#include \"quiche/quic/core/tls_server_handshaker.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include <memory>\n\n#include \"extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_stream.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Act as a Network::Connection to HCM and a FilterManager to FilterFactoryCb.\n// TODO(danzh) Lifetime of quic connection and filter manager connection can be\n// simplified by changing the inheritance to a member variable instantiated\n// before quic_connection_.\nclass EnvoyQuicServerSession : public quic::QuicServerSessionBase,\n                               public QuicFilterManagerConnectionImpl {\npublic:\n  EnvoyQuicServerSession(const quic::QuicConfig& config,\n                         const quic::ParsedQuicVersionVector& supported_versions,\n                         std::unique_ptr<EnvoyQuicConnection> connection,\n                         quic::QuicSession::Visitor* visitor,\n                         quic::QuicCryptoServerStreamBase::Helper* helper,\n                         const quic::QuicCryptoServerConfig* crypto_config,\n                         quic::QuicCompressedCertsCache* compressed_certs_cache,\n                         Event::Dispatcher& dispatcher, uint32_t send_buffer_limit,\n                         Network::ListenerConfig& listener_config);\n\n  ~EnvoyQuicServerSession() override;\n\n  // Network::Connection\n  absl::string_view requestedServerName() const override;\n\n  // Called by QuicHttpServerConnectionImpl before creating data streams.\n  void setHttpConnectionCallbacks(Http::ServerConnectionCallbacks& callbacks) {\n    http_connection_callbacks_ = &callbacks;\n  }\n\n  // quic::QuicSession\n  void OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame,\n                          quic::ConnectionCloseSource source) override;\n  void Initialize() override;\n  void OnCanWrite() override;\n  void OnTlsHandshakeComplete() override;\n  // quic::QuicSpdySession\n  void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override;\n\n  using quic::QuicSession::stream_map;\n\nprotected:\n  // quic::QuicServerSessionBase\n  std::unique_ptr<quic::QuicCryptoServerStreamBase>\n  CreateQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config,\n                               quic::QuicCompressedCertsCache* compressed_certs_cache) override;\n\n  // quic::QuicSession\n  // Overridden to create stream as encoder and associate it with an decoder.\n  quic::QuicSpdyStream* CreateIncomingStream(quic::QuicStreamId id) override;\n  quic::QuicSpdyStream* CreateIncomingStream(quic::PendingStream* pending) override;\n  quic::QuicSpdyStream* CreateOutgoingBidirectionalStream() override;\n  quic::QuicSpdyStream* CreateOutgoingUnidirectionalStream() override;\n\n  // QuicFilterManagerConnectionImpl\n  bool hasDataToWrite() override;\n\nprivate:\n  void setUpRequestDecoder(EnvoyQuicServerStream& stream);\n  void maybeCreateNetworkFilters();\n\n  std::unique_ptr<EnvoyQuicConnection> quic_connection_;\n  Network::ListenerConfig& listener_config_;\n  // These callbacks are owned by network filters and quic session should out live\n  // them.\n  Http::ServerConnectionCallbacks* http_connection_callbacks_{nullptr};\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_server_stream.h\"\n\n#include <openssl/bio.h>\n#include <openssl/evp.h>\n\n#include <memory>\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/http/quic_header_list.h\"\n#include \"quiche/quic/core/quic_session.h\"\n#include \"quiche/spdy/core/spdy_header_block.h\"\n#include \"extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_session.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nEnvoyQuicServerStream::EnvoyQuicServerStream(quic::QuicStreamId id, quic::QuicSpdySession* session,\n                                             quic::StreamType type)\n    : quic::QuicSpdyServerStreamBase(id, session, type),\n      EnvoyQuicStream(\n          // This should be larger than 8k to fully utilize congestion control\n          // window. And no larger than the max stream flow control window for\n          // the stream to buffer all the data.\n          // Ideally this limit should also correlate to peer's receive window\n          // but not fully depends on that.\n          16 * 1024, [this]() { runLowWatermarkCallbacks(); },\n          [this]() { runHighWatermarkCallbacks(); }) {}\n\nEnvoyQuicServerStream::EnvoyQuicServerStream(quic::PendingStream* pending,\n                                             quic::QuicSpdySession* session, quic::StreamType type)\n    : quic::QuicSpdyServerStreamBase(pending, session, type),\n      EnvoyQuicStream(\n          // This should be larger than 8k to fully utilize congestion control\n          // window. And no larger than the max stream flow control window for\n          // the stream to buffer all the data.\n          16 * 1024, [this]() { runLowWatermarkCallbacks(); },\n          [this]() { runHighWatermarkCallbacks(); }) {}\n\nvoid EnvoyQuicServerStream::encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) {\n  ASSERT(headers.Status()->value() == \"100\");\n  encodeHeaders(headers, false);\n}\n\nvoid EnvoyQuicServerStream::encodeHeaders(const Http::ResponseHeaderMap& headers, bool end_stream) {\n  ENVOY_STREAM_LOG(debug, \"encodeHeaders (end_stream={}) {}.\", *this, end_stream, headers);\n  // QUICHE guarantees to take all the headers. This could cause infinite data to\n  // be buffered on headers stream in Google QUIC implementation because\n  // headers stream doesn't have upper bound for its send buffer. But in IETF\n  // QUIC implementation this is safe as headers are sent on data stream which\n  // is bounded by max concurrent streams limited.\n  // Same vulnerability exists in crypto stream which can infinitely buffer data\n  // if handshake implementation goes wrong.\n  // TODO(#8826) Modify QUICHE to have an upper bound for header stream send buffer.\n  // This is counting not serialized bytes in the send buffer.\n  quic::QuicStream* writing_stream =\n      quic::VersionUsesHttp3(transport_version())\n          ? static_cast<quic::QuicStream*>(this)\n          : (dynamic_cast<quic::QuicSpdySession*>(session())->headers_stream());\n  const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes();\n\n  WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr);\n  local_end_stream_ = end_stream;\n  const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes();\n  ASSERT(bytes_to_send_old <= bytes_to_send_new);\n  maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection());\n}\n\nvoid EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) {\n  ENVOY_STREAM_LOG(debug, \"encodeData (end_stream={}) of {} bytes.\", *this, end_stream,\n                   data.length());\n  local_end_stream_ = end_stream;\n  // This is counting not serialized bytes in the send buffer.\n  const uint64_t bytes_to_send_old = BufferedDataBytes();\n  // QUIC stream must take all.\n  WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream);\n  if (data.length() > 0) {\n    // Send buffer didn't take all the data, threshold needs to be adjusted.\n    Reset(quic::QUIC_BAD_APPLICATION_PAYLOAD);\n    return;\n  }\n\n  const uint64_t bytes_to_send_new = BufferedDataBytes();\n  ASSERT(bytes_to_send_old <= bytes_to_send_new);\n  maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection());\n}\n\nvoid EnvoyQuicServerStream::encodeTrailers(const Http::ResponseTrailerMap& trailers) {\n  ASSERT(!local_end_stream_);\n  local_end_stream_ = true;\n  ENVOY_STREAM_LOG(debug, \"encodeTrailers: {}.\", *this, trailers);\n  quic::QuicStream* writing_stream =\n      quic::VersionUsesHttp3(transport_version())\n          ? static_cast<quic::QuicStream*>(this)\n          : (dynamic_cast<quic::QuicSpdySession*>(session())->headers_stream());\n  const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes();\n  WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr);\n  const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes();\n  ASSERT(bytes_to_send_old <= bytes_to_send_new);\n  maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection());\n}\n\nvoid EnvoyQuicServerStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) {\n  // Metadata Frame is not supported in QUIC.\n  // TODO(danzh): add stats for metadata not supported error.\n}\n\nvoid EnvoyQuicServerStream::resetStream(Http::StreamResetReason reason) {\n  // Upper layers expect calling resetStream() to immediately raise reset callbacks.\n  runResetCallbacks(reason);\n  if (local_end_stream_ && !reading_stopped()) {\n    // This is after 200 early response. Reset with QUIC_STREAM_NO_ERROR instead\n    // of propagating original reset reason. In QUICHE if a stream stops reading\n    // before FIN or RESET received, it resets the steam with QUIC_STREAM_NO_ERROR.\n    StopReading();\n  } else {\n    Reset(envoyResetReasonToQuicRstError(reason));\n  }\n}\n\nvoid EnvoyQuicServerStream::switchStreamBlockState(bool should_block) {\n  ASSERT(FinishedReadingHeaders(),\n         \"Upperstream buffer limit is reached before request body is delivered.\");\n  if (should_block) {\n    sequencer()->SetBlockedUntilFlush();\n  } else {\n    ASSERT(read_disable_counter_ == 0, \"readDisable called in between.\");\n    sequencer()->SetUnblocked();\n  }\n}\n\nvoid EnvoyQuicServerStream::OnInitialHeadersComplete(bool fin, size_t frame_len,\n                                                     const quic::QuicHeaderList& header_list) {\n  quic::QuicSpdyServerStreamBase::OnInitialHeadersComplete(fin, frame_len, header_list);\n  ASSERT(headers_decompressed());\n  request_decoder_->decodeHeaders(\n      quicHeadersToEnvoyHeaders<Http::RequestHeaderMapImpl>(header_list), /*end_stream=*/fin);\n  if (fin) {\n    end_stream_decoded_ = true;\n  }\n  ConsumeHeaderList();\n}\n\nvoid EnvoyQuicServerStream::OnBodyAvailable() {\n  ASSERT(FinishedReadingHeaders());\n  ASSERT(read_disable_counter_ == 0);\n  ASSERT(!in_decode_data_callstack_);\n  in_decode_data_callstack_ = true;\n\n  Buffer::InstancePtr buffer = std::make_unique<Buffer::OwnedImpl>();\n  // TODO(danzh): check Envoy per stream buffer limit.\n  // Currently read out all the data.\n  while (HasBytesToRead()) {\n    iovec iov;\n    int num_regions = GetReadableRegions(&iov, 1);\n    ASSERT(num_regions > 0);\n    size_t bytes_read = iov.iov_len;\n    Buffer::RawSlice slice;\n    buffer->reserve(bytes_read, &slice, 1);\n    ASSERT(slice.len_ >= bytes_read);\n    slice.len_ = bytes_read;\n    memcpy(slice.mem_, iov.iov_base, iov.iov_len);\n    buffer->commit(&slice, 1);\n    MarkConsumed(bytes_read);\n  }\n\n  // True if no trailer and FIN read.\n  bool finished_reading = IsDoneReading();\n  bool empty_payload_with_fin = buffer->length() == 0 && fin_received();\n  // If this call is triggered by an empty frame with FIN which is not from peer\n  // but synthesized by stream itself upon receiving HEADERS with FIN or\n  // TRAILERS, do not deliver end of stream here. Because either decodeHeaders\n  // already delivered it or decodeTrailers will be called.\n  bool skip_decoding = empty_payload_with_fin && (end_stream_decoded_ || !finished_reading);\n  if (!skip_decoding) {\n    request_decoder_->decodeData(*buffer, finished_reading);\n    if (finished_reading) {\n      end_stream_decoded_ = true;\n    }\n  }\n\n  if (!sequencer()->IsClosed()) {\n    in_decode_data_callstack_ = false;\n    if (read_disable_counter_ > 0) {\n      // If readDisable() was ever called during decodeData() and it meant to disable\n      // reading from downstream, the call must have been deferred. Call it now.\n      switchStreamBlockState(true);\n    }\n    return;\n  }\n\n  if (!quic::VersionUsesHttp3(transport_version()) && !FinishedReadingTrailers()) {\n    // For Google QUIC implementation, trailers may arrived earlier and wait to\n    // be consumed after reading all the body. Consume it here.\n    // IETF QUIC shouldn't reach here because trailers are sent on same stream.\n    request_decoder_->decodeTrailers(\n        spdyHeaderBlockToEnvoyHeaders<Http::RequestTrailerMapImpl>(received_trailers()));\n    MarkTrailersConsumed();\n  }\n  OnFinRead();\n  in_decode_data_callstack_ = false;\n}\n\nvoid EnvoyQuicServerStream::OnTrailingHeadersComplete(bool fin, size_t frame_len,\n                                                      const quic::QuicHeaderList& header_list) {\n  quic::QuicSpdyServerStreamBase::OnTrailingHeadersComplete(fin, frame_len, header_list);\n  if (session()->connection()->connected() &&\n      (quic::VersionUsesHttp3(transport_version()) || sequencer()->IsClosed()) &&\n      !FinishedReadingTrailers()) {\n    // Before QPack trailers can arrive before body. Only decode trailers after finishing decoding\n    // body.\n    request_decoder_->decodeTrailers(\n        spdyHeaderBlockToEnvoyHeaders<Http::RequestTrailerMapImpl>(received_trailers()));\n    MarkTrailersConsumed();\n  }\n}\n\nvoid EnvoyQuicServerStream::OnStreamReset(const quic::QuicRstStreamFrame& frame) {\n  quic::QuicSpdyServerStreamBase::OnStreamReset(frame);\n  runResetCallbacks(quicRstErrorToEnvoyResetReason(frame.error_code));\n}\n\nvoid EnvoyQuicServerStream::OnConnectionClosed(quic::QuicErrorCode error,\n                                               quic::ConnectionCloseSource source) {\n  quic::QuicSpdyServerStreamBase::OnConnectionClosed(error, source);\n  runResetCallbacks(quicErrorCodeToEnvoyResetReason(error));\n}\n\nvoid EnvoyQuicServerStream::OnClose() {\n  quic::QuicSpdyServerStreamBase::OnClose();\n  if (BufferedDataBytes() > 0) {\n    // If the stream is closed without sending out all buffered data, regard\n    // them as sent now and adjust connection buffer book keeping.\n    filterManagerConnection()->adjustBytesToSend(0 - BufferedDataBytes());\n  }\n}\n\nvoid EnvoyQuicServerStream::OnCanWrite() {\n  const uint64_t buffered_data_old = BufferedDataBytes();\n  quic::QuicSpdyServerStreamBase::OnCanWrite();\n  const uint64_t buffered_data_new = BufferedDataBytes();\n  // As long as OnCanWriteNewData() is no-op, data to sent in buffer shouldn't\n  // increase.\n  ASSERT(buffered_data_new <= buffered_data_old);\n  maybeCheckWatermark(buffered_data_old, buffered_data_new, *filterManagerConnection());\n}\n\nuint32_t EnvoyQuicServerStream::streamId() { return id(); }\n\nNetwork::Connection* EnvoyQuicServerStream::connection() { return filterManagerConnection(); }\n\nQuicFilterManagerConnectionImpl* EnvoyQuicServerStream::filterManagerConnection() {\n  return dynamic_cast<QuicFilterManagerConnectionImpl*>(session());\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h",
    "content": "#pragma once\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/http/quic_spdy_server_stream_base.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_stream.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// This class is a quic stream and also a response encoder.\nclass EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase,\n                              public EnvoyQuicStream,\n                              public Http::ResponseEncoder {\npublic:\n  EnvoyQuicServerStream(quic::QuicStreamId id, quic::QuicSpdySession* session,\n                        quic::StreamType type);\n\n  EnvoyQuicServerStream(quic::PendingStream* pending, quic::QuicSpdySession* session,\n                        quic::StreamType type);\n\n  void setRequestDecoder(Http::RequestDecoder& decoder) { request_decoder_ = &decoder; }\n\n  // Http::StreamEncoder\n  void encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) override;\n  void encodeHeaders(const Http::ResponseHeaderMap& headers, bool end_stream) override;\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void encodeTrailers(const Http::ResponseTrailerMap& trailers) override;\n  void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) override;\n  Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override {\n    return absl::nullopt;\n  }\n  bool streamErrorOnInvalidHttpMessage() const override { return false; }\n\n  // Http::Stream\n  void resetStream(Http::StreamResetReason reason) override;\n  void setFlushTimeout(std::chrono::milliseconds) override {\n    // TODO(mattklein123): Actually implement this for HTTP/3 similar to HTTP/2.\n  }\n  // quic::QuicSpdyStream\n  void OnBodyAvailable() override;\n  void OnStreamReset(const quic::QuicRstStreamFrame& frame) override;\n  void OnClose() override;\n  void OnCanWrite() override;\n  // quic::QuicServerSessionBase\n  void OnConnectionClosed(quic::QuicErrorCode error, quic::ConnectionCloseSource source) override;\n\nprotected:\n  // EnvoyQuicStream\n  void switchStreamBlockState(bool should_block) override;\n  uint32_t streamId() override;\n  Network::Connection* connection() override;\n\n  // quic::QuicSpdyStream\n  void OnInitialHeadersComplete(bool fin, size_t frame_len,\n                                const quic::QuicHeaderList& header_list) override;\n  void OnTrailingHeadersComplete(bool fin, size_t frame_len,\n                                 const quic::QuicHeaderList& header_list) override;\n\nprivate:\n  QuicFilterManagerConnectionImpl* filterManagerConnection();\n\n  Http::RequestDecoder* request_decoder_{nullptr};\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_simulated_watermark_buffer.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"common/common/assert.h\"\n\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A class, together with a stand alone buffer, used to achieve the purpose of WatermarkBuffer.\n// Itself doesn't have buffer or bookkeep buffered bytes. But provided with buffered_bytes,\n// it reacts upon crossing high/low watermarks.\n// It's no-op if provided low and high watermark are 0.\nclass EnvoyQuicSimulatedWatermarkBuffer {\npublic:\n  EnvoyQuicSimulatedWatermarkBuffer(uint32_t low_watermark, uint32_t high_watermark,\n                                    std::function<void()> below_low_watermark,\n                                    std::function<void()> above_high_watermark,\n                                    spdlog::logger& logger)\n      : low_watermark_(low_watermark), high_watermark_(high_watermark),\n        below_low_watermark_(std::move(below_low_watermark)),\n        above_high_watermark_(std::move(above_high_watermark)), logger_(logger) {\n    ASSERT((high_watermark == 0 && low_watermark == 0) || (high_watermark_ > low_watermark_));\n  }\n\n  uint32_t highWatermark() const { return high_watermark_; }\n\n  void checkHighWatermark(uint32_t bytes_buffered) {\n    if (high_watermark_ > 0 && !is_full_ && bytes_buffered > high_watermark_) {\n      // Transitioning from below low watermark to above high watermark.\n      ENVOY_LOG_TO_LOGGER(logger_, debug, \"Buffered {} bytes, cross high watermark {}\",\n                          bytes_buffered, high_watermark_);\n      is_full_ = true;\n      above_high_watermark_();\n    }\n  }\n\n  void checkLowWatermark(uint32_t bytes_buffered) {\n    if (low_watermark_ > 0 && is_full_ && bytes_buffered < low_watermark_) {\n      // Transitioning from above high watermark to below low watermark.\n      ENVOY_LOG_TO_LOGGER(logger_, debug, \"Buffered {} bytes, cross low watermark {}\",\n                          bytes_buffered, low_watermark_);\n      is_full_ = false;\n      below_low_watermark_();\n    }\n  }\n\n  // True after the buffer goes above high watermark and hasn't come down below low\n  // watermark yet, even though the buffered data might be between high and low\n  // watermarks.\n  bool isAboveHighWatermark() const { return is_full_; }\n\n  // True before the buffer crosses the high watermark for the first time and after the buffer goes\n  // below low watermark and hasn't come up above high watermark yet, even though the buffered data\n  // might be between high and low watermarks.\n  bool isBelowLowWatermark() const { return !is_full_; }\n\nprivate:\n  uint32_t low_watermark_{0};\n  uint32_t high_watermark_{0};\n  bool is_full_{false};\n  std::function<void()> below_low_watermark_;\n  std::function<void()> above_high_watermark_;\n  spdlog::logger& logger_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_stream.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/codec.h\"\n\n#include \"common/http/codec_helper.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_simulated_watermark_buffer.h\"\n#include \"extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Base class for EnvoyQuicServer|ClientStream.\nclass EnvoyQuicStream : public virtual Http::StreamEncoder,\n                        public Http::Stream,\n                        public Http::StreamCallbackHelper,\n                        protected Logger::Loggable<Logger::Id::quic_stream> {\npublic:\n  // |buffer_limit| is the high watermark of the stream send buffer, and the low\n  // watermark will be half of it.\n  EnvoyQuicStream(uint32_t buffer_limit, std::function<void()> below_low_watermark,\n                  std::function<void()> above_high_watermark)\n      : send_buffer_simulation_(buffer_limit / 2, buffer_limit, std::move(below_low_watermark),\n                                std::move(above_high_watermark), ENVOY_LOGGER()) {}\n\n  // Http::StreamEncoder\n  Stream& getStream() override { return *this; }\n\n  // Http::Stream\n  void readDisable(bool disable) override {\n    bool status_changed{false};\n    if (disable) {\n      ++read_disable_counter_;\n      if (read_disable_counter_ == 1) {\n        status_changed = true;\n      }\n    } else {\n      ASSERT(read_disable_counter_ > 0);\n      --read_disable_counter_;\n      if (read_disable_counter_ == 0) {\n        status_changed = true;\n      }\n    }\n\n    if (status_changed && !in_decode_data_callstack_) {\n      // Avoid calling this while decoding data because transient disabling and\n      // enabling reading may trigger another decoding data inside the\n      // callstack which messes up stream state.\n      if (disable) {\n        // Block QUIC stream right away. And if there are queued switching\n        // state callback, update the desired state as well.\n        switchStreamBlockState(true);\n        if (unblock_posted_) {\n          should_block_ = true;\n        }\n      } else {\n        should_block_ = false;\n        if (!unblock_posted_) {\n          // If this is the first time unblocking stream is desired, post a\n          // callback to do it in next loop. This is because unblocking QUIC\n          // stream can lead to immediate upstream encoding.\n          unblock_posted_ = true;\n          connection()->dispatcher().post([this] {\n            unblock_posted_ = false;\n            switchStreamBlockState(should_block_);\n          });\n        }\n      }\n    }\n  }\n\n  void addCallbacks(Http::StreamCallbacks& callbacks) override {\n    ASSERT(!local_end_stream_);\n    addCallbacksHelper(callbacks);\n  }\n  void removeCallbacks(Http::StreamCallbacks& callbacks) override {\n    removeCallbacksHelper(callbacks);\n  }\n  uint32_t bufferLimit() override { return send_buffer_simulation_.highWatermark(); }\n  const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override {\n    return connection()->localAddress();\n  }\n\n  void maybeCheckWatermark(uint64_t buffered_data_old, uint64_t buffered_data_new,\n                           QuicFilterManagerConnectionImpl& connection) {\n    if (buffered_data_new == buffered_data_old) {\n      return;\n    }\n    // If buffered bytes changed, update stream and session's watermark book\n    // keeping.\n    if (buffered_data_new > buffered_data_old) {\n      send_buffer_simulation_.checkHighWatermark(buffered_data_new);\n    } else {\n      send_buffer_simulation_.checkLowWatermark(buffered_data_new);\n    }\n    connection.adjustBytesToSend(buffered_data_new - buffered_data_old);\n  }\n\nprotected:\n  virtual void switchStreamBlockState(bool should_block) PURE;\n\n  // Needed for ENVOY_STREAM_LOG.\n  virtual uint32_t streamId() PURE;\n  virtual Network::Connection* connection() PURE;\n\n  // True once end of stream is propagated to Envoy. Envoy doesn't expect to be\n  // notified more than once about end of stream. So once this is true, no need\n  // to set it in the callback to Envoy stream any more.\n  bool end_stream_decoded_{false};\n  uint32_t read_disable_counter_{0u};\n  // If true, switchStreamBlockState() should be deferred till this variable\n  // becomes false.\n  bool in_decode_data_callstack_{false};\n\nprivate:\n  // Keeps track of bytes buffered in the stream send buffer in QUICHE and reacts\n  // upon crossing high and low watermarks.\n  // Its high watermark is also the buffer limit of stream read/write filters in\n  // HCM.\n  // There is no receive buffer simulation because Quic stream's\n  // OnBodyDataAvailable() hands all the ready-to-use request data from stream sequencer to HCM\n  // directly and buffers them in filters if needed. Itself doesn't buffer request data.\n  EnvoyQuicSimulatedWatermarkBuffer send_buffer_simulation_;\n\n  // True if there is posted unblocking QUIC stream callback. There should be\n  // only one such callback no matter how many times readDisable() is called.\n  bool unblock_posted_{false};\n  // The latest state an unblocking QUIC stream callback should look at. As\n  // more readDisable() calls may happen between the callback is posted and it's\n  // executed, the stream might be unblocked and blocked several times. Only the\n  // latest desired state should be considered by the callback.\n  bool should_block_{false};\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_utils.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/network/socket_option_factory.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// TODO(danzh): this is called on each write. Consider to return an address instance on the stack if\n// the heap allocation is too expensive.\nNetwork::Address::InstanceConstSharedPtr\nquicAddressToEnvoyAddressInstance(const quic::QuicSocketAddress& quic_address) {\n  return quic_address.IsInitialized()\n             ? Network::Address::addressFromSockAddr(quic_address.generic_address(),\n                                                     quic_address.host().address_family() ==\n                                                             quic::IpAddressFamily::IP_V4\n                                                         ? sizeof(sockaddr_in)\n                                                         : sizeof(sockaddr_in6),\n                                                     false)\n             : nullptr;\n}\n\nquic::QuicSocketAddress envoyIpAddressToQuicSocketAddress(const Network::Address::Ip* envoy_ip) {\n  if (envoy_ip == nullptr) {\n    // Return uninitialized socket addr\n    return quic::QuicSocketAddress();\n  }\n\n  uint32_t port = envoy_ip->port();\n  sockaddr_storage ss;\n\n  if (envoy_ip->version() == Network::Address::IpVersion::v4) {\n    // Create and return quic ipv4 address\n    auto ipv4_addr = reinterpret_cast<sockaddr_in*>(&ss);\n    memset(ipv4_addr, 0, sizeof(sockaddr_in));\n    ipv4_addr->sin_family = AF_INET;\n    ipv4_addr->sin_port = htons(port);\n    ipv4_addr->sin_addr.s_addr = envoy_ip->ipv4()->address();\n  } else {\n    // Create and return quic ipv6 address\n    auto ipv6_addr = reinterpret_cast<sockaddr_in6*>(&ss);\n    memset(ipv6_addr, 0, sizeof(sockaddr_in6));\n    ipv6_addr->sin6_family = AF_INET6;\n    ipv6_addr->sin6_port = htons(port);\n    ASSERT(sizeof(ipv6_addr->sin6_addr.s6_addr) == 16u);\n    *reinterpret_cast<absl::uint128*>(ipv6_addr->sin6_addr.s6_addr) = envoy_ip->ipv6()->address();\n  }\n  return quic::QuicSocketAddress(ss);\n}\n\nspdy::SpdyHeaderBlock envoyHeadersToSpdyHeaderBlock(const Http::HeaderMap& headers) {\n  spdy::SpdyHeaderBlock header_block;\n  headers.iterate([&header_block](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    // The key-value pairs are copied.\n    header_block.insert({header.key().getStringView(), header.value().getStringView()});\n    return Http::HeaderMap::Iterate::Continue;\n  });\n  return header_block;\n}\n\nquic::QuicRstStreamErrorCode envoyResetReasonToQuicRstError(Http::StreamResetReason reason) {\n  switch (reason) {\n  case Http::StreamResetReason::LocalRefusedStreamReset:\n    return quic::QUIC_REFUSED_STREAM;\n  case Http::StreamResetReason::ConnectionFailure:\n    return quic::QUIC_STREAM_CONNECTION_ERROR;\n  case Http::StreamResetReason::LocalReset:\n    return quic::QUIC_STREAM_CANCELLED;\n  case Http::StreamResetReason::ConnectionTermination:\n    return quic::QUIC_STREAM_NO_ERROR;\n  default:\n    return quic::QUIC_BAD_APPLICATION_PAYLOAD;\n  }\n}\n\nHttp::StreamResetReason quicRstErrorToEnvoyResetReason(quic::QuicRstStreamErrorCode rst_err) {\n  switch (rst_err) {\n  case quic::QUIC_REFUSED_STREAM:\n    return Http::StreamResetReason::RemoteRefusedStreamReset;\n  default:\n    return Http::StreamResetReason::RemoteReset;\n  }\n}\n\nHttp::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode error) {\n  if (error == quic::QUIC_NO_ERROR) {\n    return Http::StreamResetReason::ConnectionTermination;\n  } else {\n    return Http::StreamResetReason::ConnectionFailure;\n  }\n}\n\nHttp::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept {\n  switch (error) {\n  case quic::QUIC_NO_ERROR:\n    return Http::GoAwayErrorCode::NoError;\n  default:\n    return Http::GoAwayErrorCode::Other;\n  }\n}\n\nNetwork::ConnectionSocketPtr\ncreateConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr,\n                       Network::Address::InstanceConstSharedPtr& local_addr,\n                       const Network::ConnectionSocket::OptionsSharedPtr& options) {\n  auto connection_socket = std::make_unique<Network::ConnectionSocketImpl>(\n      Network::Socket::Type::Datagram, local_addr, peer_addr);\n  connection_socket->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions());\n  connection_socket->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions());\n  if (options != nullptr) {\n    connection_socket->addOptions(options);\n  }\n  if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket,\n                                     envoy::config::core::v3::SocketOption::STATE_PREBIND)) {\n    connection_socket->close();\n    ENVOY_LOG_MISC(error, \"Fail to apply pre-bind options\");\n    return connection_socket;\n  }\n  connection_socket->bind(local_addr);\n  ASSERT(local_addr->ip());\n  local_addr = connection_socket->localAddress();\n  if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket,\n                                     envoy::config::core::v3::SocketOption::STATE_BOUND)) {\n    ENVOY_LOG_MISC(error, \"Fail to apply post-bind options\");\n    connection_socket->close();\n  }\n  return connection_socket;\n}\n\nbssl::UniquePtr<X509> parseDERCertificate(const std::string& der_bytes,\n                                          std::string* error_details) {\n  const uint8_t* data;\n  const uint8_t* orig_data;\n  orig_data = data = reinterpret_cast<const uint8_t*>(der_bytes.data());\n  bssl::UniquePtr<X509> cert(d2i_X509(nullptr, &data, der_bytes.size()));\n  if (!cert.get()) {\n    *error_details = \"d2i_X509: fail to parse DER\";\n    return nullptr;\n  }\n  if (data < orig_data || static_cast<size_t>(data - orig_data) != der_bytes.size()) {\n    *error_details = \"There is trailing garbage in DER.\";\n    return nullptr;\n  }\n  return cert;\n}\n\nint deduceSignatureAlgorithmFromPublicKey(const EVP_PKEY* public_key, std::string* error_details) {\n  int sign_alg = 0;\n  const int pkey_id = EVP_PKEY_id(public_key);\n  switch (pkey_id) {\n  case EVP_PKEY_EC: {\n    // We only support P-256 ECDSA today.\n    const EC_KEY* ecdsa_public_key = EVP_PKEY_get0_EC_KEY(public_key);\n    // Since we checked the key type above, this should be valid.\n    ASSERT(ecdsa_public_key != nullptr);\n    const EC_GROUP* ecdsa_group = EC_KEY_get0_group(ecdsa_public_key);\n    if (ecdsa_group == nullptr || EC_GROUP_get_curve_name(ecdsa_group) != NID_X9_62_prime256v1) {\n      *error_details = \"Invalid leaf cert, only P-256 ECDSA certificates are supported\";\n      break;\n    }\n    // QUICHE uses SHA-256 as hash function in cert signature.\n    sign_alg = SSL_SIGN_ECDSA_SECP256R1_SHA256;\n  } break;\n  case EVP_PKEY_RSA: {\n    // We require RSA certificates with 2048-bit or larger keys.\n    const RSA* rsa_public_key = EVP_PKEY_get0_RSA(public_key);\n    // Since we checked the key type above, this should be valid.\n    ASSERT(rsa_public_key != nullptr);\n    const unsigned rsa_key_length = RSA_size(rsa_public_key);\n#ifdef BORINGSSL_FIPS\n    if (rsa_key_length != 2048 / 8 && rsa_key_length != 3072 / 8) {\n      *error_details = \"Invalid leaf cert, only RSA certificates with 2048-bit or 3072-bit keys \"\n                       \"are supported in FIPS mode\";\n      break;\n    }\n#else\n    if (rsa_key_length < 2048 / 8) {\n      *error_details =\n          \"Invalid leaf cert, only RSA certificates with 2048-bit or larger keys are supported\";\n      break;\n    }\n#endif\n    sign_alg = SSL_SIGN_RSA_PSS_RSAE_SHA256;\n  } break;\n  default:\n    *error_details = \"Invalid leaf cert, only RSA and ECDSA certificates are supported\";\n  }\n  return sign_alg;\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/envoy_quic_utils.h",
    "content": "#pragma once\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/http/codec.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/quic_types.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"quiche/quic/core/http/quic_header_list.h\"\n#include \"quiche/quic/core/quic_error_codes.h\"\n#include \"quiche/quic/platform/api/quic_ip_address.h\"\n#include \"quiche/quic/platform/api/quic_socket_address.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// TODO(danzh): this is called on each write. Consider to return an address instance on the stack if\n// the heap allocation is too expensive.\nNetwork::Address::InstanceConstSharedPtr\nquicAddressToEnvoyAddressInstance(const quic::QuicSocketAddress& quic_address);\n\nquic::QuicSocketAddress envoyIpAddressToQuicSocketAddress(const Network::Address::Ip* envoy_ip);\n\n// The returned header map has all keys in lower case.\ntemplate <class T>\nstd::unique_ptr<T> quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_list) {\n  auto headers = T::create();\n  for (const auto& entry : header_list) {\n    // TODO(danzh): Avoid copy by referencing entry as header_list is already validated by QUIC.\n    headers->addCopy(Http::LowerCaseString(entry.first), entry.second);\n  }\n  return headers;\n}\n\ntemplate <class T>\nstd::unique_ptr<T> spdyHeaderBlockToEnvoyHeaders(const spdy::SpdyHeaderBlock& header_block) {\n  auto headers = T::create();\n  for (auto entry : header_block) {\n    // TODO(danzh): Avoid temporary strings and addCopy() with string_view.\n    std::string key(entry.first);\n    std::string value(entry.second);\n    headers->addCopy(Http::LowerCaseString(key), value);\n  }\n  return headers;\n}\n\nspdy::SpdyHeaderBlock envoyHeadersToSpdyHeaderBlock(const Http::HeaderMap& headers);\n\n// Called when Envoy wants to reset the underlying QUIC stream.\nquic::QuicRstStreamErrorCode envoyResetReasonToQuicRstError(Http::StreamResetReason reason);\n\n// Called when a RST_STREAM frame is received.\nHttp::StreamResetReason quicRstErrorToEnvoyResetReason(quic::QuicRstStreamErrorCode rst_err);\n\n// Called when underlying QUIC connection is closed either locally or by peer.\nHttp::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode error);\n\n// Called when a GOAWAY frame is received.\nABSL_MUST_USE_RESULT\nHttp::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept;\n\n// Create a connection socket instance and apply given socket options to the\n// socket. IP_PKTINFO and SO_RXQ_OVFL is always set if supported.\nNetwork::ConnectionSocketPtr\ncreateConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr,\n                       Network::Address::InstanceConstSharedPtr& local_addr,\n                       const Network::ConnectionSocket::OptionsSharedPtr& options);\n\n// Convert a cert in string form to X509 object.\n// Return nullptr if the bytes passed cannot be passed.\nbssl::UniquePtr<X509> parseDERCertificate(const std::string& der_bytes, std::string* error_details);\n\n// Deduce the suitable signature algorithm according to the public key.\n// Return the sign algorithm id works with the public key; If the public key is\n// not supported, return 0 with error_details populated correspondingly.\nint deduceSignatureAlgorithmFromPublicKey(const EVP_PKEY* public_key, std::string* error_details);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# Build targets in this package are part of the QUICHE platform implementation.\n# These implementations are the infrastructure building block for QUIC. They are\n# used in 2 different ways:\n#\n# Most of them are not to be consumed or referenced directly by other Envoy code.\n# Their only consumers should be build rules under @com_googlesource_quiche//...,\n# and tests. In a monorepo, this would be enforced via visibility attribute, but\n# Bazel does not support limiting visibility to specific external dependencies.\n#\n# Very few of them are used by Envoy to interact with QUIC. They are used as a shim\n# to match a non-virtualized API required by the external Quiche implementation.\n#\n# See a detailed description of QUIC platform API dependency model at:\n# https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/README.md\n\n# These implementations are tested through their APIs with tests mostly brought in from\n# QUICHE, thus new unit tests for them are deliberately omitted in Envoy tree. These\n# tests are added to @com_googlesource_quiche//:quic_platform_api_test. And all tests\n# under @com_googlesource_quiche// are configured in test/coverage/gen_build.sh to run in\n# CI.\n# For some APIs which are not covered in QUICHE tests, their tests is added into\n# //test/extensions/quic_listeners/quiche/platform/.\n\n# TODO: add build target for quic_platform_impl_lib\n\nenvoy_cc_library(\n    name = \"flags_impl_lib\",\n    srcs = [\"flags_impl.cc\"],\n    hdrs = [\n        \"flags_impl.h\",\n        \"flags_list.h\",\n    ],\n    external_deps = [\n        \"abseil_base\",\n        \"abseil_synchronization\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\nenvoy_cc_library(\n    name = \"string_utils_lib\",\n    srcs = [\"string_utils.cc\"],\n    hdrs = [\"string_utils.h\"],\n    external_deps = [\"abseil_str_format\"],\n    visibility = [\"//visibility:private\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:base64_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"http2_platform_impl_lib\",\n    hdrs = [\n        \"http2_arraysize_impl.h\",\n        \"http2_bug_tracker_impl.h\",\n        \"http2_containers_impl.h\",\n        \"http2_estimate_memory_usage_impl.h\",\n        \"http2_flag_utils_impl.h\",\n        \"http2_flags_impl.h\",\n        \"http2_logging_impl.h\",\n        \"http2_macros_impl.h\",\n        \"http2_string_utils_impl.h\",\n    ],\n    external_deps = [\n        \"abseil_base\",\n        \"abseil_optional\",\n        \"abseil_str_format\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":flags_impl_lib\",\n        \":quic_platform_logging_impl_lib\",\n        \":string_utils_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_export_impl_lib\",\n    hdrs = [\"quic_export_impl.h\"],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_logging_impl_lib\",\n    srcs = [\"quic_logging_impl.cc\"],\n    hdrs = [\n        \"quic_bug_tracker_impl.h\",\n        \"quic_logging_impl.h\",\n    ],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:stl_helpers\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_base_impl_lib\",\n    srcs = [\n        \"quic_mem_slice_impl.cc\",\n    ],\n    hdrs = [\n        \"quic_aligned_impl.h\",\n        \"quic_client_stats_impl.h\",\n        \"quic_containers_impl.h\",\n        \"quic_error_code_wrappers_impl.h\",\n        \"quic_estimate_memory_usage_impl.h\",\n        \"quic_fallthrough_impl.h\",\n        \"quic_flag_utils_impl.h\",\n        \"quic_flags_impl.h\",\n        \"quic_iovec_impl.h\",\n        \"quic_macros_impl.h\",\n        \"quic_map_util_impl.h\",\n        \"quic_mem_slice_impl.h\",\n        \"quic_prefetch_impl.h\",\n        \"quic_ptr_util_impl.h\",\n        \"quic_reference_counted_impl.h\",\n        \"quic_server_stats_impl.h\",\n        \"quic_stack_trace_impl.h\",\n        \"quic_stream_buffer_allocator_impl.h\",\n        \"quic_uint128_impl.h\",\n    ],\n    external_deps = [\n        \"abseil_base\",\n        \"abseil_hash\",\n        \"abseil_inlined_vector\",\n        \"abseil_memory\",\n        \"abseil_node_hash_map\",\n        \"abseil_node_hash_set\",\n        \"abseil_optional\",\n    ],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":flags_impl_lib\",\n        \":string_utils_lib\",\n        \"//include/envoy/api:io_error_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/server:backtrace_lib\",\n        \"@com_googlesource_quiche//:quic_core_buffer_allocator_lib\",\n        \"@com_googlesource_quiche//:quic_platform_export\",\n        \"@com_googlesource_quiche//:quic_platform_ip_address_family\",\n        \"@com_googlesource_quiche//:quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_impl_lib\",\n    srcs = [\n        \"quic_cert_utils_impl.cc\",\n        \"quic_file_utils_impl.cc\",\n        \"quic_hostname_utils_impl.cc\",\n    ],\n    hdrs = [\n        \"quic_cert_utils_impl.h\",\n        \"quic_file_utils_impl.h\",\n        \"quic_hostname_utils_impl.h\",\n        \"quic_mutex_impl.h\",\n        \"quic_pcc_sender_impl.h\",\n        \"quic_string_utils_impl.h\",\n    ],\n    external_deps = [\n        \"quiche_quic_platform_base\",\n        \"abseil_str_format\",\n        \"abseil_synchronization\",\n        \"abseil_time\",\n        \"ssl\",\n    ],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/filesystem:directory_lib\",\n        \"//source/common/filesystem:filesystem_lib\",\n        \"//source/common/http:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_mem_slice_span_impl_lib\",\n    srcs = [\"quic_mem_slice_span_impl.cc\"],\n    hdrs = [\"quic_mem_slice_span_impl.h\"],\n    copts = select({\n        \"//bazel:windows_x86_64\": [],\n        \"//conditions:default\": [\"-Wno-unused-parameter\"],\n    }),\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"@com_googlesource_quiche//:quic_core_types_lib\",\n        \"@com_googlesource_quiche//:quic_platform_base\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_mem_slice_storage_impl_lib\",\n    srcs = [\"quic_mem_slice_storage_impl.cc\"],\n    hdrs = [\"quic_mem_slice_storage_impl.h\"],\n    copts = select({\n        \"//bazel:windows_x86_64\": [],\n        \"//conditions:default\": [\n            \"-Wno-error=invalid-offsetof\",\n            \"-Wno-unused-parameter\",\n        ],\n    }),\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"@com_googlesource_quiche//:quic_core_buffer_allocator_lib\",\n        \"@com_googlesource_quiche//:quic_core_utils_lib\",\n        \"@com_googlesource_quiche//:quic_platform_mem_slice_span\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quic_platform_udp_socket_impl_lib\",\n    hdrs = select({\n        \"//bazel:linux\": [\"quic_udp_socket_platform_impl.h\"],\n        \"//conditions:default\": [],\n    }),\n    repository = \"@envoy\",\n    tags = [\"nofips\"],\n)\n\nenvoy_cc_library(\n    name = \"envoy_quic_clock_lib\",\n    srcs = [\"envoy_quic_clock.cc\"],\n    hdrs = [\"envoy_quic_clock.h\"],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"@com_googlesource_quiche//:quic_core_clock_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform_optional_impl_lib\",\n    hdrs = [\"quiche_optional_impl.h\"],\n    external_deps = [\n        \"abseil_node_hash_map\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform_impl_lib\",\n    srcs = [\"quiche_time_utils_impl.cc\"],\n    hdrs = [\n        \"quiche_arraysize_impl.h\",\n        \"quiche_logging_impl.h\",\n        \"quiche_map_util_impl.h\",\n        \"quiche_ptr_util_impl.h\",\n        \"quiche_str_cat_impl.h\",\n        \"quiche_string_piece_impl.h\",\n        \"quiche_text_utils_impl.h\",\n        \"quiche_time_utils_impl.h\",\n        \"quiche_unordered_containers_impl.h\",\n    ],\n    external_deps = [\n        \"abseil_hash\",\n        \"abseil_node_hash_map\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":quic_platform_logging_impl_lib\",\n        \":string_utils_lib\",\n        \"@com_googlesource_quiche//:quiche_common_platform_optional\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_platform_impl_lib\",\n    hdrs = [\n        \"spdy_arraysize_impl.h\",\n        \"spdy_bug_tracker_impl.h\",\n        \"spdy_containers_impl.h\",\n        \"spdy_endianness_util_impl.h\",\n        \"spdy_estimate_memory_usage_impl.h\",\n        \"spdy_flags_impl.h\",\n        \"spdy_logging_impl.h\",\n        \"spdy_macros_impl.h\",\n        \"spdy_mem_slice_impl.h\",\n        \"spdy_string_utils_impl.h\",\n        \"spdy_test_helpers_impl.h\",\n        \"spdy_test_utils_prod_impl.h\",\n    ],\n    external_deps = [\n        \"abseil_base\",\n        \"abseil_hash\",\n        \"abseil_inlined_vector\",\n        \"abseil_memory\",\n        \"abseil_str_format\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":flags_impl_lib\",\n        \":quic_platform_logging_impl_lib\",\n        \":string_utils_lib\",\n        \"//source/common/common:assert_lib\",\n        \"@com_googlesource_quiche//:quiche_common_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"spdy_platform_unsafe_arena_impl_lib\",\n    hdrs = [\"spdy_unsafe_arena_impl.h\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@com_googlesource_quiche//:spdy_simple_arena_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform_export_impl_lib\",\n    hdrs = [\"quiche_export_impl.h\"],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n)\n\nenvoy_cc_library(\n    name = \"quiche_common_platform_endian_impl_lib\",\n    hdrs = [\"quiche_endian_impl.h\"],\n    tags = [\"nofips\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"quiche_common_platform_export_impl_lib\",\n        \"//source/common/common:byte_order_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/envoy_quic_clock.cc",
    "content": "#include \"extensions/quic_listeners/quiche/platform/envoy_quic_clock.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nquic::QuicTime EnvoyQuicClock::ApproximateNow() const {\n  return quic::QuicTime::Zero() + quic::QuicTime::Delta::FromMicroseconds(microsecondsSinceEpoch(\n                                      dispatcher_.approximateMonotonicTime()));\n}\n\nquic::QuicTime EnvoyQuicClock::Now() const {\n  // Since the expensive operation of obtaining time has to be performed anyway,\n  // make Dispatcher update approximate time. Without this, alarms might fire\n  // one event loop later. const_cast is necessary here because\n  // updateApproximateMonotonicTime() is a non-const operation, and Now() is\n  // conceptually const (even though this particular implementation has a\n  // visible side effect). Changing Now() to non-const would necessitate\n  // changing a number of other methods and members to non-const, which would\n  // not increase clarity.\n  const_cast<Event::Dispatcher&>(dispatcher_).updateApproximateMonotonicTime();\n  return ApproximateNow();\n}\n\nquic::QuicWallTime EnvoyQuicClock::WallNow() const {\n  return quic::QuicWallTime::FromUNIXMicroseconds(\n      microsecondsSinceEpoch(dispatcher_.timeSource().systemTime()));\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/envoy_quic_clock.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/event/dispatcher.h\"\n\n#include \"quiche/quic/core/quic_clock.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicClock : public quic::QuicClock {\npublic:\n  EnvoyQuicClock(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {}\n\n  // quic::QuicClock\n  quic::QuicTime ApproximateNow() const override;\n  quic::QuicTime Now() const override;\n  quic::QuicWallTime WallNow() const override;\n\nprivate:\n  template <typename T> int64_t microsecondsSinceEpoch(std::chrono::time_point<T> time) const {\n    return std::chrono::duration_cast<std::chrono::microseconds>(time.time_since_epoch()).count();\n  }\n\n  Event::Dispatcher& dispatcher_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/flags_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/flags_impl.h\"\n\n#include <set>\n\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/numbers.h\"\n\nnamespace quiche {\n\nnamespace {\n\nabsl::flat_hash_map<std::string, Flag*> MakeFlagMap() {\n  absl::flat_hash_map<std::string, Flag*> flags;\n\n#define QUICHE_FLAG(type, flag, value, help) flags.emplace(FLAGS_##flag->name(), FLAGS_##flag);\n#include \"extensions/quic_listeners/quiche/platform/flags_list.h\"\n#undef QUICHE_FLAG\n\n  return flags;\n}\n\n} // namespace\n\n// static\nFlagRegistry& FlagRegistry::GetInstance() {\n  static auto* instance = new FlagRegistry();\n  return *instance;\n}\n\nFlagRegistry::FlagRegistry() : flags_(MakeFlagMap()) {}\n\nvoid FlagRegistry::ResetFlags() const {\n  for (auto& kv : flags_) {\n    kv.second->ResetValue();\n  }\n}\n\nFlag* FlagRegistry::FindFlag(const std::string& name) const {\n  auto it = flags_.find(name);\n  return (it != flags_.end()) ? it->second : nullptr;\n}\n\ntemplate <> bool TypedFlag<bool>::SetValueFromString(const std::string& value_str) {\n  static const auto* kTrueValues = new std::set<std::string>({\"1\", \"t\", \"true\", \"y\", \"yes\"});\n  static const auto* kFalseValues = new std::set<std::string>({\"0\", \"f\", \"false\", \"n\", \"no\"});\n  auto lower = absl::AsciiStrToLower(value_str);\n  if (kTrueValues->find(lower) != kTrueValues->end()) {\n    SetValue(true);\n    return true;\n  }\n  if (kFalseValues->find(lower) != kFalseValues->end()) {\n    SetValue(false);\n    return true;\n  }\n  return false;\n}\n\ntemplate <> bool TypedFlag<int32_t>::SetValueFromString(const std::string& value_str) {\n  int32_t value;\n  if (absl::SimpleAtoi(value_str, &value)) {\n    SetValue(value);\n    return true;\n  }\n  return false;\n}\n\ntemplate <> bool TypedFlag<int64_t>::SetValueFromString(const std::string& value_str) {\n  int64_t value;\n  if (absl::SimpleAtoi(value_str, &value)) {\n    SetValue(value);\n    return true;\n  }\n  return false;\n}\n\ntemplate <> bool TypedFlag<double>::SetValueFromString(const std::string& value_str) {\n  double value;\n  if (absl::SimpleAtod(value_str, &value)) {\n    SetValue(value);\n    return true;\n  }\n  return false;\n}\n\ntemplate <> bool TypedFlag<std::string>::SetValueFromString(const std::string& value_str) {\n  SetValue(value_str);\n  return true;\n}\n\n// Flag definitions\n#define QUICHE_FLAG(type, flag, value, help)                                                       \\\n  TypedFlag<type>* FLAGS_##flag = new TypedFlag<type>(#flag, value, help);\n#include \"extensions/quic_listeners/quiche/platform/flags_list.h\"\n#undef QUICHE_FLAG\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/flags_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <string>\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/synchronization/mutex.h\"\n\nnamespace quiche {\n\nclass Flag;\n\n// TODO: modify flags implementation to be backed by\n// Runtime::runtimeFeatureEnabled(), which is the canonical Envoy way of\n// enabling and disabling features.\n\n// Registry of QUICHE flags. Can be used to reset all flags to default values,\n// and to look up and set flags by name.\nclass FlagRegistry {\npublic:\n  ~FlagRegistry() = default;\n\n  // Return singleton instance.\n  static FlagRegistry& GetInstance();\n\n  // Reset all registered flags to their default values.\n  void ResetFlags() const;\n\n  // Look up a flag by name.\n  Flag* FindFlag(const std::string& name) const;\n\nprivate:\n  FlagRegistry();\n\n  const absl::flat_hash_map<std::string, Flag*> flags_;\n};\n\n// Abstract class for QUICHE protocol and feature flags.\nclass Flag {\npublic:\n  // Construct Flag with the given name and help string.\n  Flag(const char* name, const char* help) : name_(name), help_(help) {}\n  virtual ~Flag() = default;\n\n  // Set flag value from given string, returning true iff successful.\n  virtual bool SetValueFromString(const std::string& value_str) = 0;\n\n  // Reset flag to default value.\n  virtual void ResetValue() = 0;\n\n  // Return flag name.\n  std::string name() const { return name_; }\n\n  // Return flag help string.\n  std::string help() const { return help_; }\n\nprivate:\n  std::string name_;\n  std::string help_;\n};\n\n// Concrete class for QUICHE protocol and feature flags, templated by flag type.\ntemplate <typename T> class TypedFlag : public Flag {\npublic:\n  TypedFlag(const char* name, T default_value, const char* help)\n      : Flag(name, help), value_(default_value), default_value_(default_value) {}\n\n  bool SetValueFromString(const std::string& value_str) override;\n\n  void ResetValue() override {\n    absl::MutexLock lock(&mutex_);\n    value_ = default_value_;\n  }\n\n  // Set flag value.\n  void SetValue(T value) {\n    absl::MutexLock lock(&mutex_);\n    value_ = value;\n  }\n\n  // Return flag value.\n  T value() const {\n    absl::MutexLock lock(&mutex_);\n    return value_;\n  }\n\nprivate:\n  mutable absl::Mutex mutex_;\n  T value_ ABSL_GUARDED_BY(mutex_);\n  T default_value_;\n};\n\n// SetValueFromString specializations\ntemplate <> bool TypedFlag<bool>::SetValueFromString(const std::string& value_str);\ntemplate <> bool TypedFlag<int32_t>::SetValueFromString(const std::string& value_str);\ntemplate <> bool TypedFlag<int64_t>::SetValueFromString(const std::string& value_str);\ntemplate <> bool TypedFlag<double>::SetValueFromString(const std::string& value_str);\ntemplate <> bool TypedFlag<std::string>::SetValueFromString(const std::string& value_str);\n\n// Flag declarations\n#define QUICHE_FLAG(type, flag, value, help) extern TypedFlag<type>* FLAGS_##flag;\n#include \"extensions/quic_listeners/quiche/platform/flags_list.h\"\n#undef QUICHE_FLAG\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/flags_list.h",
    "content": "// This file intentionally does not have header guards. It is intended to be\n// included multiple times, each time with a different definition of\n// QUICHE_FLAG.\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n// This file is generated by //third_party/quic/tools:quic_flags_list in\n// Google3.\n\n#if defined(QUICHE_FLAG)\n\nQUICHE_FLAG(\n    bool, http2_reloadable_flag_http2_backend_alpn_failure_error_code, false,\n    \"If true, the GFE will return a new ResponseCodeDetails error when ALPN to the backend fails.\")\n\nQUICHE_FLAG(bool, http2_reloadable_flag_http2_ip_based_cwnd_exp, true,\n            \"If true, enable IP address based CWND bootstrapping experiment with different \"\n            \"bandwidth models and priorities in HTTP2.\")\n\nQUICHE_FLAG(\n    bool, http2_reloadable_flag_http2_load_based_goaway_warning, false,\n    \"If true, load-based connection closures will send a warning GOAWAY before the actual GOAWAY.\")\n\nQUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, false,\n            \"If true, check whether client meets security requirements during SSL handshake. If \"\n            \"flag is true and client does not meet security requirements, do not negotiate HTTP/2 \"\n            \"with client or terminate the session with SPDY_INADEQUATE_SECURITY if HTTP/2 is \"\n            \"already negotiated. The spec contains both cipher and TLS version requirements.\")\n\nQUICHE_FLAG(bool, http2_reloadable_flag_http2_websocket_detection, false,\n            \"If true, uses a HTTP/2-specific method of detecting websocket upgrade requests.\")\n\nQUICHE_FLAG(bool, http2_reloadable_flag_permissive_http2_switch, false,\n            \"If true, the GFE allows both HTTP/1.0 and HTTP/1.1 versions in HTTP/2 upgrade \"\n            \"requests/responses.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, false, \"\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, \"\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_gclb_quic_allow_alia, true,\n            \"If gfe2_reloadable_flag_gclb_use_alia is also true, use Alia for GCLB QUIC \"\n            \"handshakes. To be used as a big red button if there's a problem with Alia/QUIC.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_abort_qpack_on_stream_close, false,\n            \"If true, abort async QPACK header decompression in QuicSpdyStream::OnClose().\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_ack_delay_alarm_granularity, false,\n            \"When true, ensure the ACK delay is never less than the alarm granularity when ACK \"\n            \"decimation is enabled.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_add_missing_connected_checks, false,\n            \"If true, add missing connected checks.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_add_silent_idle_timeout, true,\n            \"If true, when server is silently closing connections due to idle timeout, serialize \"\n            \"the connection close packets which will be added to time wait list.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_add_stream_info_to_idle_close_detail, false,\n            \"If true, include stream information in idle timeout connection close detail.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false,\n            \"If true, check backend response header for X-Response-Ttl. If it is provided, the \"\n            \"stream TTL is set. A QUIC stream will be immediately canceled when tries to write \"\n            \"data if this TTL expired.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, true,\n            \"If true, allow client to enable BBRv2 on server via connection option 'B2ON'.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false,\n            \"Support different QUIC sessions, as indicated by ALPN. Used for QBONE.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_too_low_probe_bw_cwnd, false,\n            \"If true, QUIC BBRv2's PROBE_BW mode will not reduce cwnd below BDP+ack_height.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_fewer_startup_round_trips, false,\n            \"When true, the 1RTT and 2RTT connection options decrease the number of round trips in \"\n            \"BBRv2 STARTUP without a 25% bandwidth increase to 1 or 2 round trips respectively.\")\n\nQUICHE_FLAG(\n    bool, quic_reloadable_flag_quic_bbr2_limit_inflight_hi, false,\n    \"When true, the B2HI connection option limits reduction of inflight_hi to (1-Beta)*CWND.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_use_post_inflight_to_detect_queuing, false,\n            \"If true, QUIC BBRv2 will use inflight byte after congestion event to detect queuing \"\n            \"during PROBE_UP.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recovery, false,\n            \"When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's CWND in \"\n            \"CalculateCongestionWindow()\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true,\n            \"If true, bootstrap initial QUIC cwnd by SPDY priorities.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_cap_large_client_initial_rtt, true,\n            \"If true, cap client suggested initial RTT to 1s if it is longer than 1s.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_clean_up_spdy_session_destructor, false,\n            \"If true, QuicSpdySession's destructor won't need to do cleanup.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_in_on_can_write_with_blocked_writer,\n            false,\n            \"If true, close connection if writer is still blocked while OnCanWrite is called.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_on_serialization_failure, false,\n            \"If true, close connection on packet serialization failures.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false,\n            \"If true, set burst token to 2 in cwnd bootstrapping experiment.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false,\n            \"If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.\")\n\nQUICHE_FLAG(\n    bool, quic_reloadable_flag_quic_copy_bbr_cwnd_to_bbr2, false,\n    \"If true, when switching from BBR to BBRv2, BBRv2 will use BBR's cwnd as its initial cwnd.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_default_enable_5rto_blackhole_detection2, true,\n            \"If true, default-enable 5RTO blachole detection.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_default_on_pto, false,\n            \"If true, default on PTO which unifies TLP + RTO loss recovery.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr, true,\n            \"When true, defaults to BBR congestion control instead of Cubic.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false,\n            \"If true, use BBRv2 as the default congestion controller. Takes precedence over \"\n            \"--quic_default_to_bbr.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_server_blackhole_detection, false,\n            \"If true, disable blackhole detection on server side.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_27, false,\n            \"If true, disable QUIC version h3-27.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_29, false,\n            \"If true, disable QUIC version h3-29.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q043, false,\n            \"If true, disable QUIC version Q043.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q046, false,\n            \"If true, disable QUIC version Q046.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q050, false,\n            \"If true, disable QUIC version Q050.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_t050, false,\n            \"If true, disable QUIC version h3-T050.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_t051, false,\n            \"If true, disable QUIC version h3-T051.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_discard_initial_packet_with_key_dropped, false,\n            \"If true, discard INITIAL packet if the key has been dropped.\")\n\nQUICHE_FLAG(\n    bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false,\n    \"In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false,\n            \"If true, stop resetting ideal_next_packet_send_time_ in pacing sender.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_experiment_at_gfe, false,\n            \"If ture, enable GFE-picked loss detection experiment.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_tuner, false,\n            \"If true, allow QUIC loss detection tuning to be enabled by connection option ELDT.\")\n\nQUICHE_FLAG(\n    bool, quic_reloadable_flag_quic_enable_mtu_discovery_at_server, false,\n    \"If true, QUIC will default enable MTU discovery at server, with a target of 1450 bytes.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, \"\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_arm_pto_for_application_data, false,\n            \"If true, do not arm PTO for application data until handshake confirmed.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bytes_left_for_batch_write, false,\n            \"If true, convert bytes_left_for_batch_write_ to unsigned int.\")\n\nQUICHE_FLAG(\n    bool, quic_reloadable_flag_quic_fix_http3_goaway_stream_id, false,\n    \"If true, send the lowest stream ID that can be retried by the client in a GOAWAY frame. If \"\n    \"false, send the highest received stream ID, which actually should not be retried.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_out_of_order_sending, false,\n            \"If true, fix a potential out of order sending caused by handshake gets confirmed \"\n            \"while the coalescer is not empty.\")\n\nQUICHE_FLAG(\n    bool, quic_reloadable_flag_quic_fix_pto_pending_timer_count, false,\n    \"If true, make sure there is pending timer credit when trying to PTO retransmit any packets.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_undecryptable_packets2, false,\n            \"If true, remove processed undecryptable packets.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_get_stream_information_from_stream_map, true,\n            \"If true, gQUIC will only consult stream_map in QuicSession::GetNumActiveStreams().\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_give_sent_packet_to_debug_visitor_after_sent, false,\n            \"If true, QUIC connection will pass sent packet information to the debug visitor after \"\n            \"a packet is recorded as sent in sent packet manager.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_http3_new_default_urgency_value, false,\n            \"If true, QuicStream::kDefaultUrgency is 3, otherwise 1.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_ip_based_cwnd_exp, true,\n            \"If true, enable IP address based CWND bootstrapping experiment with different \"\n            \"bandwidth models and priorities. \")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false,\n            \"If true, QuicListener::OnSocketIsWritable will always return false, which means there \"\n            \"will never be a fake EPOLLOUT event in the next epoll iteration.\")\n\nQUICHE_FLAG(bool,\n            quic_reloadable_flag_quic_neuter_initial_packet_in_coalescer_with_initial_key_discarded,\n            false, \"If true, neuter initial packet in the coalescer when discarding initial keys.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false,\n            \"If true, transport connection stats doesn't report duplicated experiments for same \"\n            \"connection.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_no_silent_close_for_idle_timeout, true,\n            \"If true, always send connection close for idle timeout if NSLC is received.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_only_set_uaid_in_tcs_visitor, false,\n            \"If true, QuicTransportConnectionStatsVisitor::PopulateTransportConnectionStats will \"\n            \"be the only place where TCS's uaid field is set.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_only_truncate_long_cids, true,\n            \"In IETF QUIC, only truncate long CIDs from the client's Initial, don't modify them.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_preferred_altsvc_version, false,\n            \"When true, we will send a preferred QUIC version at the start of our Alt-Svc list.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false,\n            \"If true, QuicProxyDispatcher will write packed_client_address and packed_server_vip \"\n            \"in TcpProxyHeaderProto.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, true,\n            \"If true, for L1 GFE, as requests come in, record frontend service to VIP mapping \"\n            \"which is used to announce VIP in SHLO for proxied sessions. \")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_record_received_min_ack_delay, false,\n            \"If true, record the received min_ack_delay in transport parameters to QUIC config.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, \"\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_zombie_streams, true,\n            \"If true, QuicSession doesn't keep a separate zombie_streams. Instead, all streams are \"\n            \"stored in stream_map_.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false,\n            \"If true, require handshake confirmation for QUIC connections, functionally disabling \"\n            \"0-rtt handshakes.\")\n\nQUICHE_FLAG(\n    bool, quic_reloadable_flag_quic_send_key_update_not_yet_supported, false,\n    \"When true, QUIC+TLS versions will send the key_update_not_yet_supported transport parameter.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_send_path_response, false,\n            \"If true, send PATH_RESPONSE upon receiving PATH_CHALLENGE regardless of perspective. \"\n            \"--gfe2_reloadable_flag_quic_start_peer_migration_earlier has to be true before turn \"\n            \"on this flag.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false,\n            \"When the STMP connection option is sent by the client, timestamps in the QUIC ACK \"\n            \"frame are sent and processed.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, false,\n            \"If true, enable server push feature on QUIC.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_set_resumed_ssl_session_early, false,\n            \"If true, set resumed_ssl_session if this is a 0-RTT connection.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_start_peer_migration_earlier, false,\n            \"If true, while reading an IETF quic packet, start peer migration immediately when \"\n            \"detecting the existence of any non-probing frame instead of at the end of the packet.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_sending_uses_ietf_error_code, false,\n            \"If true, use IETF QUIC application error codes in STOP_SENDING frames. If false, use \"\n            \"QuicRstStreamErrorCodes.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false,\n            \"A testonly reloadable flag that will always default to false.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true,\n            \"A testonly reloadable flag that will always default to true.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false,\n            \"When true, set the initial congestion control window from connection options in \"\n            \"QuicSentPacketManager rather than TcpCubicSenderBytes.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false,\n            \"If true, use header stage idle list for QUIC connections in GFE.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_quic_use_leto_key_exchange, false,\n            \"If true, QUIC will attempt to use the Leto key exchange service and only fall back to \"\n            \"local key exchange if that fails.\")\n\nQUICHE_FLAG(bool, quic_reloadable_flag_send_quic_fallback_server_config_on_leto_error, false,\n            \"If true and using Leto for QUIC shared-key calculations, GFE will react to a failure \"\n            \"to contact Leto by sending a REJ containing a fallback ServerConfig, allowing the \"\n            \"client to continue the handshake.\")\n\nQUICHE_FLAG(\n    bool, quic_restart_flag_dont_fetch_quic_private_keys_from_leto, false,\n    \"If true, GFE will not request private keys when fetching QUIC ServerConfigs from Leto.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_adjust_initial_cwnd_by_gws, true,\n            \"If true, GFE informs backend that a client request is the first one on the connection \"\n            \"via frontline header \\\"first_request=1\\\". Also, adjust initial cwnd based on \"\n            \"X-Google-Gws-Initial-Cwnd-Mode sent by GWS.\")\n\nQUICHE_FLAG(\n    bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false,\n    \"If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs\")\n\nQUICHE_FLAG(\n    bool, quic_restart_flag_quic_disable_gws_cwnd_experiment, false,\n    \"If true, X-Google-Gws-Initial-Cwnd-Mode related header sent by GWS becomes no-op for QUIC.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_enable_tls_resumption_v4, true,\n            \"If true, enables support for TLS resumption in QUIC.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_enable_zero_rtt_for_tls_v2, true,\n            \"If true, support for IETF QUIC 0-rtt is enabled.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false,\n            \"If true, QUIC offload pacing when using USPS as egress method.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_rx_ring_use_tpacket_v3, false,\n            \"If true, use TPACKET_V3 for QuicRxRing instead of TPACKET_V2.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_should_accept_new_connection, false,\n            \"If true, reject QUIC CHLO packets when dispatcher is asked to do so.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_support_release_time_for_gso, false,\n            \"If true, QuicGsoBatchWriter will support release time if it is available and the \"\n            \"process has the permission to do so.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_false, false,\n            \"A testonly restart flag that will always default to false.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_true, true,\n            \"A testonly restart flag that will always default to true.\")\n\nQUICHE_FLAG(\n    bool, quic_restart_flag_quic_use_leto_for_quic_configs, false,\n    \"If true, use Leto to fetch QUIC server configs instead of using the seeds from Memento.\")\n\nQUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false,\n            \"If true, create a shared pigeon socket for all quic to backend connections and switch \"\n            \"to use it after successful handshake.\")\n\nQUICHE_FLAG(bool, spdy_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true,\n            \"If true, bootstrap initial QUIC cwnd by SPDY priorities.\")\n\nQUICHE_FLAG(bool, spdy_reloadable_flag_quic_clean_up_spdy_session_destructor, false,\n            \"If true, QuicSpdySession's destructor won't need to do cleanup.\")\n\nQUICHE_FLAG(\n    bool, spdy_reloadable_flag_spdy_discard_response_body_if_disallowed, false,\n    \"If true, SPDY will discard all response body bytes when response code indicates no response \"\n    \"body should exist. Previously, we only discard partial bytes on the first response processing \"\n    \"and the rest of the response bytes would still be delivered even though the response code \"\n    \"said there should not be any body associated with the response code.\")\n\nQUICHE_FLAG(bool, quic_allow_chlo_buffering, true,\n            \"If true, allows packets to be buffered in anticipation of a \"\n            \"future CHLO, and allow CHLO packets to be buffered until next \"\n            \"iteration of the event loop.\")\n\nQUICHE_FLAG(bool, quic_disable_pacing_for_perf_tests, false, \"If true, disable pacing in QUIC\")\n\nQUICHE_FLAG(bool, quic_enforce_single_packet_chlo, true,\n            \"If true, enforce that QUIC CHLOs fit in one packet\")\n\nQUICHE_FLAG(int64_t, quic_time_wait_list_max_connections, 600000,\n            \"Maximum number of connections on the time-wait list. \"\n            \"A negative value implies no configured limit.\")\n\nQUICHE_FLAG(int64_t, quic_time_wait_list_seconds, 200,\n            \"Time period for which a given connection_id should live in \"\n            \"the time-wait state.\")\n\nQUICHE_FLAG(double, quic_bbr_cwnd_gain, 2.0f,\n            \"Congestion window gain for QUIC BBR during PROBE_BW phase.\")\n\nQUICHE_FLAG(int32_t, quic_buffered_data_threshold, 8 * 1024,\n            \"If buffered data in QUIC stream is less than this \"\n            \"threshold, buffers all provided data or asks upper layer for more data\")\n\nQUICHE_FLAG(int32_t, quic_send_buffer_max_data_slice_size, 4 * 1024,\n            \"Max size of data slice in bytes for QUIC stream send buffer.\")\n\nQUICHE_FLAG(int32_t, quic_lumpy_pacing_size, 2,\n            \"Number of packets that the pacing sender allows in bursts during \"\n            \"pacing. This flag is ignored if a flow's estimated bandwidth is \"\n            \"lower than 1200 kbps.\")\n\nQUICHE_FLAG(double, quic_lumpy_pacing_cwnd_fraction, 0.25f,\n            \"Congestion window fraction that the pacing sender allows in bursts \"\n            \"during pacing.\")\n\nQUICHE_FLAG(int32_t, quic_max_pace_time_into_future_ms, 10,\n            \"Max time that QUIC can pace packets into the future in ms.\")\n\nQUICHE_FLAG(double, quic_pace_time_into_future_srtt_fraction, 0.125f,\n            \"Smoothed RTT fraction that a connection can pace packets into the future.\")\n\nQUICHE_FLAG(bool, quic_export_server_num_packets_per_write_histogram, false,\n            \"If true, export number of packets written per write operation histogram.\")\n\nQUICHE_FLAG(bool, quic_disable_version_negotiation_grease_randomness, false,\n            \"If true, use predictable version negotiation versions.\")\n\nQUICHE_FLAG(bool, quic_enable_http3_grease_randomness, true,\n            \"If true, use random greased settings and frames.\")\n\nQUICHE_FLAG(int64_t, quic_max_tracked_packet_count, 10000, \"Maximum number of tracked packets.\")\n\nQUICHE_FLAG(bool, quic_prober_uses_length_prefixed_connection_ids, false,\n            \"If true, QuicFramer::WriteClientVersionNegotiationProbePacket uses \"\n            \"length-prefixed connection IDs.\")\n\nQUICHE_FLAG(bool, quic_client_convert_http_header_name_to_lowercase, true,\n            \"If true, HTTP request header names sent from QuicSpdyClientBase(and \"\n            \"descendents) will be automatically converted to lower case.\")\n\nQUICHE_FLAG(bool, quic_enable_http3_server_push, false,\n            \"If true, server push will be allowed in QUIC versions that use HTTP/3.\")\n\nQUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_base_duration_ms, 2000,\n            \"The default minimum duration for BBRv2-native probes, in milliseconds.\")\n\nQUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_max_rand_duration_ms, 1000,\n            \"The default upper bound of the random amount of BBRv2-native \"\n            \"probes, in milliseconds.\")\n\nQUICHE_FLAG(int32_t, quic_bbr2_default_probe_rtt_period_ms, 10000,\n            \"The default period for entering PROBE_RTT, in milliseconds.\")\n\nQUICHE_FLAG(double, quic_bbr2_default_loss_threshold, 0.02,\n            \"The default loss threshold for QUIC BBRv2, should be a value \"\n            \"between 0 and 1.\")\n\nQUICHE_FLAG(int32_t, quic_bbr2_default_startup_full_loss_count, 8,\n            \"The default minimum number of loss marking events to exit STARTUP.\")\n\nQUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_full_loss_count, 2,\n            \"The default minimum number of loss marking events to exit PROBE_UP phase.\")\n\nQUICHE_FLAG(double, quic_bbr2_default_inflight_hi_headroom, 0.01,\n            \"The default fraction of unutilized headroom to try to leave in path \"\n            \"upon high loss.\")\n\nQUICHE_FLAG(int32_t, quic_bbr2_default_initial_ack_height_filter_window, 10,\n            \"The default initial value of the max ack height filter's window length.\")\n\nQUICHE_FLAG(double, quic_ack_aggregation_bandwidth_threshold, 1.0,\n            \"If the bandwidth during ack aggregation is smaller than (estimated \"\n            \"bandwidth * this flag), consider the current aggregation completed \"\n            \"and starts a new one.\")\n\nQUICHE_FLAG(int32_t, quic_anti_amplification_factor, 5,\n            \"Anti-amplification factor. Before address validation, server will \"\n            \"send no more than factor times bytes received.\")\n\nQUICHE_FLAG(int32_t, quic_max_buffered_crypto_bytes, 16 * 1024,\n            \"The maximum amount of CRYPTO frame data that can be buffered.\")\n\nQUICHE_FLAG(int32_t, quic_max_aggressive_retransmittable_on_wire_ping_count, 0,\n            \"If set to non-zero, the maximum number of consecutive pings that \"\n            \"can be sent with aggressive initial retransmittable on wire timeout \"\n            \"if there is no new data received. After which, the timeout will be \"\n            \"exponentially back off until exceeds the default ping timeout.\")\n\nQUICHE_FLAG(int32_t, quic_max_congestion_window, 2000, \"The maximum congestion window in packets.\")\n\nQUICHE_FLAG(int32_t, quic_max_streams_window_divisor, 2,\n            \"The divisor that controls how often MAX_STREAMS frame is sent.\")\n\nQUICHE_FLAG(bool, http2_reloadable_flag_http2_testonly_default_false, false,\n            \"A testonly reloadable flag that will always default to false.\")\n\nQUICHE_FLAG(bool, http2_restart_flag_http2_testonly_default_false, false,\n            \"A testonly restart flag that will always default to false.\")\n\nQUICHE_FLAG(bool, spdy_reloadable_flag_spdy_testonly_default_false, false,\n            \"A testonly reloadable flag that will always default to false.\")\n\nQUICHE_FLAG(bool, spdy_restart_flag_spdy_testonly_default_false, false,\n            \"A testonly restart flag that will always default to false.\")\n\n#endif\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_arraysize_impl.h",
    "content": "#pragma once\n\n#include \"absl/base/macros.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define HTTP2_ARRAYSIZE_IMPL(x) ABSL_ARRAYSIZE(x)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h\"\n\n#define HTTP2_BUG_IMPL QUIC_BUG_IMPL\n#define HTTP2_BUG_IF_IMPL QUIC_BUG_IF_IMPL\n#define FLAGS_http2_always_log_bugs_for_tests_IMPL true\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_containers_impl.h",
    "content": "#pragma once\n\n#include <deque>\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace http2 {\n\ntemplate <typename T> using Http2DequeImpl = std::deque<T>;\n\n} // namespace http2\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_estimate_memory_usage_impl.h",
    "content": "#pragma once\n\n#include <cstddef>\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace http2 {\n\ntemplate <class T> size_t Http2EstimateMemoryUsageImpl(const T& /*object*/) { return 0; }\n\n} // namespace http2\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_flag_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define HTTP2_RELOADABLE_FLAG_COUNT_IMPL(flag)                                                     \\\n  do {                                                                                             \\\n  } while (0)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_flags_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/flags_impl.h\"\n\n#define GetHttp2ReloadableFlagImpl(flag) quiche::FLAGS_http2_reloadable_flag_##flag->value()\n\n#define SetHttp2ReloadableFlagImpl(flag, value)                                                    \\\n  quiche::FLAGS_http2_reloadable_flag_##flag->SetValue(value)\n\n#define HTTP2_CODE_COUNT_N_IMPL(flag, instance, total)                                             \\\n  do {                                                                                             \\\n  } while (0)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n\n#define HTTP2_LOG_IMPL(severity) QUICHE_LOG_IMPL(severity)\n\n#define HTTP2_VLOG_IMPL(verbose_level) QUICHE_VLOG_IMPL(verbose_level)\n\n#define HTTP2_DLOG_IMPL(severity) QUICHE_DLOG_IMPL(severity)\n\n#define HTTP2_DLOG_IF_IMPL(severity, condition) QUICHE_DLOG_IF_IMPL(severity, condition)\n\n#define HTTP2_DVLOG_IMPL(verbose_level) QUICHE_DVLOG_IMPL(verbose_level)\n\n#define HTTP2_DVLOG_IF_IMPL(verbose_level, condition) QUICHE_DVLOG_IF_IMPL(verbose_level, condition)\n\n#define HTTP2_DLOG_EVERY_N_IMPL(severity, n) QUICHE_DLOG_EVERY_N_IMPL(severity, n)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <utility>\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n\n#include \"absl/base/macros.h\"\n\n#define HTTP2_FALLTHROUGH_IMPL ABSL_FALLTHROUGH_INTENDED\n#define HTTP2_DIE_IF_NULL_IMPL(ptr) dieIfNull(ptr)\n#define HTTP2_UNREACHABLE_IMPL() DCHECK(false)\n\nnamespace http2 {\n\ntemplate <typename T> inline T dieIfNull(T&& ptr) {\n  CHECK((ptr) != nullptr);\n  return std::forward<T>(ptr);\n}\n\n} // namespace http2\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_string_piece_impl.h",
    "content": "#pragma once\n\n#include \"absl/strings/string_view.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace http2 {\n\nusing Http2StringPieceImpl = absl::string_view;\n\n} // namespace http2\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/http2_string_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/string_utils.h\"\n\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"fmt/printf.h\"\n\nnamespace http2 {\n\ntemplate <typename... Args> inline std::string Http2StrCatImpl(const Args&... args) {\n  return absl::StrCat(std::forward<const Args&>(args)...);\n}\n\ntemplate <typename... Args>\ninline void Http2StrAppendImpl(std::string* output, const Args&... args) {\n  absl::StrAppend(output, std::forward<const Args&>(args)...);\n}\n\ntemplate <typename... Args> inline std::string Http2StringPrintfImpl(const Args&... args) {\n  return fmt::sprintf(std::forward<const Args&>(args)...);\n}\n\ninline std::string Http2HexEncodeImpl(const void* bytes, size_t size) {\n  return absl::BytesToHexString(absl::string_view(static_cast<const char*>(bytes), size));\n}\n\ninline std::string Http2HexDecodeImpl(absl::string_view data) {\n  return absl::HexStringToBytes(data);\n}\n\ninline std::string Http2HexDumpImpl(absl::string_view data) { return quiche::HexDump(data); }\n\ninline std::string Http2HexEscapeImpl(absl::string_view data) { return absl::CHexEscape(data); }\n\ntemplate <typename Number> inline std::string Http2HexImpl(Number number) {\n  return absl::StrCat(absl::Hex(number));\n}\n\n} // namespace http2\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_aligned_impl.h",
    "content": "#pragma once\n\n#include \"absl/base/optimization.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define QUIC_ALIGN_OF_IMPL alignof\n#ifdef _MSC_VER\n#define QUIC_ALIGNED_IMPL(X) __declspec(align(X))\n#else\n#define QUIC_ALIGNED_IMPL(X) __attribute__((aligned(X)))\n#endif\n#define QUIC_CACHELINE_ALIGNED_IMPL ABSL_CACHELINE_ALIGNED\n#define QUIC_CACHELINE_SIZE_IMPL ABSL_CACHELINE_SIZE\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n\n// TODO(wub): Implement exponential back off to avoid performance problems due\n// to excessive QUIC_BUG.\n#define QUIC_BUG_IMPL QUICHE_LOG_IMPL(DFATAL)\n#define QUIC_BUG_IF_IMPL(condition) QUICHE_LOG_IF_IMPL(DFATAL, condition)\n#define QUIC_PEER_BUG_IMPL QUICHE_LOG_IMPL(ERROR)\n#define QUIC_PEER_BUG_IF_IMPL(condition) QUICHE_LOG_IF_IMPL(ERROR, condition)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.h\"\n\n#include \"openssl/bytestring.h\"\n\nnamespace quic {\n\n// static\nbool QuicCertUtilsImpl::ExtractSubjectNameFromDERCert(quiche::QuicheStringPiece cert,\n                                                      quiche::QuicheStringPiece* subject_out) {\n  CBS tbs_certificate;\n  if (!SeekToSubject(cert, &tbs_certificate)) {\n    return false;\n  }\n\n  CBS subject;\n  if (!CBS_get_asn1_element(&tbs_certificate, &subject, CBS_ASN1_SEQUENCE)) {\n    return false;\n  }\n  *subject_out =\n      absl::string_view(reinterpret_cast<const char*>(CBS_data(&subject)), CBS_len(&subject));\n  return true;\n}\n\n// static\nbool QuicCertUtilsImpl::SeekToSubject(quiche::QuicheStringPiece cert, CBS* tbs_certificate) {\n  CBS der;\n  CBS_init(&der, reinterpret_cast<const uint8_t*>(cert.data()), cert.size());\n  CBS certificate;\n  // From RFC 5280, section 4.1\n  //    Certificate  ::=  SEQUENCE  {\n  //      tbsCertificate       TBSCertificate,\n  //      signatureAlgorithm   AlgorithmIdentifier,\n  //      signatureValue       BIT STRING  }\n\n  // TBSCertificate  ::=  SEQUENCE  {\n  //      version         [0]  EXPLICIT Version DEFAULT v1,\n  //      serialNumber         CertificateSerialNumber,\n  //      signature            AlgorithmIdentifier,\n  //      issuer               Name,\n  //      validity             Validity,\n  //      subject              Name,\n  //      subjectPublicKeyInfo SubjectPublicKeyInfo,\n  if (!CBS_get_asn1(&der, &certificate, CBS_ASN1_SEQUENCE) ||\n      CBS_len(&der) != 0 || // We don't allow junk after the certificate.\n      !CBS_get_asn1(&certificate, tbs_certificate, CBS_ASN1_SEQUENCE) ||\n      // version.\n      !CBS_get_optional_asn1(tbs_certificate, nullptr, nullptr,\n                             CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) ||\n      // Serial number.\n      !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_INTEGER) ||\n      // Signature.\n      !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_SEQUENCE) ||\n      // Issuer.\n      !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_SEQUENCE) ||\n      // Validity.\n      !CBS_get_asn1(tbs_certificate, nullptr, CBS_ASN1_SEQUENCE)) {\n    return false;\n  }\n  return true;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_cert_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"openssl/base.h\"\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n\nnamespace quic {\n\nclass QuicCertUtilsImpl {\npublic:\n  static bool ExtractSubjectNameFromDERCert(quiche::QuicheStringPiece cert,\n                                            quiche::QuicheStringPiece* subject_out);\n\nprivate:\n  static bool SeekToSubject(quiche::QuicheStringPiece cert, CBS* tbs_certificate);\n};\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_client_stats_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <string>\n\n// NOTE(wub): These macros are currently NOOP because they are supposed to be\n// used by client-side stats. They should be implemented when QUIC client code\n// is used by Envoy to connect to backends.\n\n#define QUIC_CLIENT_HISTOGRAM_ENUM_IMPL(name, sample, enum_size, docstring)                        \\\n  do {                                                                                             \\\n    (void)(sample);                                                                                \\\n  } while (0)\n#define QUIC_CLIENT_HISTOGRAM_BOOL_IMPL(name, sample, docstring)                                   \\\n  (void)(sample);                                                                                  \\\n  do {                                                                                             \\\n  } while (0)\n#define QUIC_CLIENT_HISTOGRAM_TIMES_IMPL(name, sample, min, max, num_buckets, docstring)           \\\n  do {                                                                                             \\\n    (void)(sample);                                                                                \\\n  } while (0)\n#define QUIC_CLIENT_HISTOGRAM_COUNTS_IMPL(name, sample, min, max, num_buckets, docstring)          \\\n  do {                                                                                             \\\n    (void)(sample);                                                                                \\\n  } while (0)\n\nnamespace quic {\n\ninline void QuicClientSparseHistogramImpl(const std::string& /*name*/, int /*sample*/) {}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h",
    "content": "#pragma once\n\n#include <deque>\n#include <memory>\n#include <ostream>\n#include <queue>\n#include <sstream>\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/container/node_hash_set.h\"\n#include \"quiche/common/simple_linked_hash_map.h\"\n#include \"quiche/quic/platform/api/quic_flags.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quic {\n\ntemplate <typename Key> using QuicDefaultHasherImpl = absl::Hash<Key>;\n\ntemplate <typename Key, typename Value, typename Hash>\nusing QuicUnorderedMapImpl = absl::node_hash_map<Key, Value, Hash>;\n\ntemplate <typename Key, typename Value, typename Hash>\nusing QuicHashMapImpl = absl::flat_hash_map<Key, Value, Hash>;\n\ntemplate <typename Key, typename Hash> using QuicHashSetImpl = absl::flat_hash_set<Key, Hash>;\n\ntemplate <typename Key, typename Hash> using QuicUnorderedSetImpl = absl::node_hash_set<Key, Hash>;\n\ntemplate <typename Key, typename Value, typename Hash>\nusing QuicLinkedHashMapImpl = quiche::SimpleLinkedHashMap<Key, Value, Hash>;\n\ntemplate <typename Key, typename Value, int Size>\nusing QuicSmallMapImpl = absl::flat_hash_map<Key, Value>;\n\ntemplate <typename T> using QuicQueueImpl = std::queue<T>;\n\ntemplate <typename T> using QuicDequeImpl = std::deque<T>;\n\ntemplate <typename T, size_t N, typename A = std::allocator<T>>\nusing QuicInlinedVectorImpl = absl::InlinedVector<T, N, A>;\n\ntemplate <typename T, size_t N, typename A>\ninline std::ostream& operator<<(std::ostream& os,\n                                const QuicInlinedVectorImpl<T, N, A> inlined_vector) {\n  std::stringstream debug_string;\n  debug_string << \"{\";\n  typename QuicInlinedVectorImpl<T, N, A>::const_iterator it = inlined_vector.cbegin();\n  debug_string << *it;\n  ++it;\n  while (it != inlined_vector.cend()) {\n    debug_string << \", \" << *it;\n    ++it;\n  }\n  debug_string << \"}\";\n  return os << debug_string.str();\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_error_code_wrappers_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <memory>\n\n#include \"envoy/api/io_error.h\"\n\n#define QUIC_EMSGSIZE_IMPL static_cast<int>(Envoy::Api::IoError::IoErrorCode::MessageTooBig)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_estimate_memory_usage_impl.h",
    "content": "#pragma once\n\n#include <cstddef>\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quic {\n\n// Dummy implementation.\ntemplate <class T> size_t QuicEstimateMemoryUsageImpl(const T& /*object*/) { return 0; }\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_export_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define QUIC_EXPORT\n#define QUIC_EXPORT_PRIVATE\n#define QUIC_NO_EXPORT\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_fallthrough_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/base/macros.h\"\n\n#define QUIC_FALLTHROUGH_INTENDED_IMPL ABSL_FALLTHROUGH_INTENDED\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h\"\n\n#include \"common/filesystem/directory.h\"\n#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace quic {\nnamespace {\n\nvoid depthFirstTraverseDirectory(const std::string& dirname, std::vector<std::string>& files) {\n  Envoy::Filesystem::Directory directory(dirname);\n  for (const Envoy::Filesystem::DirectoryEntry& entry : directory) {\n    switch (entry.type_) {\n    case Envoy::Filesystem::FileType::Regular:\n      files.push_back(absl::StrCat(dirname, \"/\", entry.name_));\n      break;\n    case Envoy::Filesystem::FileType::Directory:\n      if (entry.name_ != \".\" && entry.name_ != \"..\") {\n        depthFirstTraverseDirectory(absl::StrCat(dirname, \"/\", entry.name_), files);\n      }\n      break;\n    default:\n      ASSERT(false,\n             absl::StrCat(\"Unknow file entry type \", entry.type_, \" under directory \", dirname));\n    }\n  }\n}\n\n} // namespace\n\n// Traverses the directory |dirname| and returns all of the files it contains.\nstd::vector<std::string> ReadFileContentsImpl(const std::string& dirname) {\n  std::vector<std::string> files;\n  depthFirstTraverseDirectory(dirname, files);\n  return files;\n}\n\n// Reads the contents of |filename| as a string into |contents|.\nvoid ReadFileContentsImpl(quiche::QuicheStringPiece filename, std::string* contents) {\n#ifdef WIN32\n  Envoy::Filesystem::InstanceImplWin32 fs;\n#else\n  Envoy::Filesystem::InstanceImplPosix fs;\n#endif\n  *contents = fs.fileReadToEnd(std::string(filename.data(), filename.size()));\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <vector>\n\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n\nnamespace quic {\n\n/**\n * Traverses the directory |dirname| and returns all of the files it contains.\n * @param dirname full path without trailing '/'.\n */\nstd::vector<std::string> ReadFileContentsImpl(const std::string& dirname);\n\n/**\n * Reads the contents of |filename| as a string into |contents|.\n *  @param filename the full path to the file.\n *  @param contents output location of the file content.\n */\nvoid ReadFileContentsImpl(quiche::QuicheStringPiece filename, std::string* contents);\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_flag_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define QUIC_RELOADABLE_FLAG_COUNT_IMPL(flag)                                                      \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_RELOADABLE_FLAG_COUNT_N_IMPL(flag, instance, total)                                   \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_RESTART_FLAG_COUNT_IMPL(flag)                                                         \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_RESTART_FLAG_COUNT_N_IMPL(flag, instance, total)                                      \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_CODE_COUNT_IMPL(name)                                                                 \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_CODE_COUNT_N_IMPL(name, instance, total)                                              \\\n  do {                                                                                             \\\n  } while (0)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_flags_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <string>\n#include <vector>\n\n#include \"extensions/quic_listeners/quiche/platform/flags_impl.h\"\n\n// |flag| is the global flag variable, which is a pointer to TypedFlag<type>.\n#define GetQuicFlagImpl(flag) (quiche::flag)->value()\n\n// |flag| is the global flag variable, which is a pointer to TypedFlag<type>.\n#define SetQuicFlagImpl(flag, value) (quiche::flag)->SetValue(value)\n\n#define GetQuicReloadableFlagImpl(flag) quiche::FLAGS_quic_reloadable_flag_##flag->value()\n\n#define SetQuicReloadableFlagImpl(flag, value)                                                     \\\n  quiche::FLAGS_quic_reloadable_flag_##flag->SetValue(value)\n\n#define GetQuicRestartFlagImpl(flag) quiche::FLAGS_quic_restart_flag_##flag->value()\n\n#define SetQuicRestartFlagImpl(flag, value) quiche::FLAGS_quic_restart_flag_##flag->SetValue(value)\n\n// Not wired into command-line parsing.\n#define DEFINE_QUIC_COMMAND_LINE_FLAG_IMPL(type, flag, value, help)                                \\\n  quiche::TypedFlag<type>* FLAGS_##flag = new TypedFlag<type>(#flag, value, help);\n\nnamespace quic {\n\n// TODO(mpwarres): implement. Lower priority since only used by QUIC command-line tools.\ninline std::vector<std::string> QuicParseCommandLineFlagsImpl(const char* /*usage*/, int /*argc*/,\n                                                              const char* const* /*argv*/) {\n  return std::vector<std::string>();\n}\n\n// TODO(mpwarres): implement. Lower priority since only used by QUIC command-line tools.\ninline void QuicPrintCommandLineFlagHelpImpl(const char* /*usage*/) {}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h\"\n\n#include <string>\n\n#include \"common/http/utility.h\"\n\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/str_cat.h\"\n\n// TODO(wub): Implement both functions on top of GoogleUrl, then enable\n// quiche/quic/platform/api/quic_hostname_utils_test.cc.\n\nnamespace quic {\n\n// static\nbool QuicHostnameUtilsImpl::IsValidSNI(quiche::QuicheStringPiece sni) {\n  // TODO(wub): Implement it on top of GoogleUrl, once it is available.\n\n  return sni.find_last_of('.') != std::string::npos &&\n         Envoy::Http::Utility::Url().initialize(absl::StrCat(\"http://\", sni), false);\n}\n\n// static\nstd::string QuicHostnameUtilsImpl::NormalizeHostname(quiche::QuicheStringPiece hostname) {\n  // TODO(wub): Implement it on top of GoogleUrl, once it is available.\n  std::string host = absl::AsciiStrToLower(hostname);\n\n  // Walk backwards over the string, stopping at the first trailing dot.\n  size_t host_end = host.length();\n  while (host_end != 0 && host[host_end - 1] == '.') {\n    host_end--;\n  }\n\n  // Erase the trailing dots.\n  if (host_end != host.length()) {\n    host.erase(host_end, host.length() - host_end);\n  }\n\n  return host;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n#include \"quiche/quic/platform/api/quic_export.h\"\n\nnamespace quic {\n\nclass QUIC_EXPORT_PRIVATE QuicHostnameUtilsImpl {\npublic:\n  // Returns true if the sni is valid, false otherwise.\n  //  (1) disallow IP addresses;\n  //  (2) check that the hostname contains valid characters only; and\n  //  (3) contains at least one dot.\n  // NOTE(wub): Only (3) is implemented for now.\n  static bool IsValidSNI(quiche::QuicheStringPiece sni);\n\n  // Normalize a hostname:\n  //  (1) Canonicalize it, similar to what Chromium does in\n  //  https://cs.chromium.org/chromium/src/net/base/url_util.h?q=net::CanonicalizeHost\n  //  (2) Convert it to lower case.\n  //  (3) Remove the trailing '.'.\n  // WARNING: May mutate |hostname| in place.\n  // NOTE(wub): Only (2) and (3) are implemented for now.\n  static std::string NormalizeHostname(quiche::QuicheStringPiece hostname);\n\nprivate:\n  QuicHostnameUtilsImpl() = delete;\n};\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_iovec_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"envoy/common/platform.h\"\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n\n#include <atomic>\n\n#include \"common/common/utility.h\"\n\nnamespace quic {\n\nnamespace {\nstd::atomic<int> g_verbosity_threshold;\nstd::atomic<bool> g_dfatal_exit_disabled;\n\n// Pointer to the global log sink, usually it is nullptr.\n// If not nullptr, as in some tests, the sink will receive a copy of the log message right after the\n// message is emitted from the QUIC_LOG... macros.\nstd::atomic<QuicLogSink*> g_quic_log_sink;\nabsl::Mutex g_quic_log_sink_mutex;\n} // namespace\n\nQuicLogEmitter::QuicLogEmitter(QuicLogLevel level) : level_(level), saved_errno_(errno) {}\n\nQuicLogEmitter::~QuicLogEmitter() {\n  if (is_perror_) {\n    // TODO(wub): Change to a thread-safe version of errorDetails.\n    stream_ << \": \" << Envoy::errorDetails(saved_errno_) << \" [\" << saved_errno_ << \"]\";\n  }\n  std::string content = stream_.str();\n  if (!content.empty() && content.back() == '\\n') {\n    // strip the last trailing '\\n' because spd log will add a trailing '\\n' to\n    // the output.\n    content.back() = '\\0';\n  }\n  GetLogger().log(level_, \"{}\", content.c_str());\n\n  // Normally there is no log sink and we can avoid acquiring the lock.\n  if (g_quic_log_sink.load(std::memory_order_relaxed) != nullptr) {\n    absl::MutexLock lock(&g_quic_log_sink_mutex);\n    QuicLogSink* sink = g_quic_log_sink.load(std::memory_order_relaxed);\n    if (sink != nullptr) {\n      sink->Log(level_, content);\n    }\n  }\n\n  if (level_ == FATAL) {\n    GetLogger().flush();\n#ifdef NDEBUG\n    // Release mode.\n    abort();\n#else\n    // Debug mode.\n    if (!g_dfatal_exit_disabled) {\n      abort();\n    }\n#endif\n  }\n}\n\nint GetVerbosityLogThreshold() { return g_verbosity_threshold.load(std::memory_order_relaxed); }\n\nvoid SetVerbosityLogThreshold(int new_verbosity) {\n  g_verbosity_threshold.store(new_verbosity, std::memory_order_relaxed);\n}\n\nbool IsDFatalExitDisabled() { return g_dfatal_exit_disabled.load(std::memory_order_relaxed); }\n\nvoid SetDFatalExitDisabled(bool is_disabled) {\n  g_dfatal_exit_disabled.store(is_disabled, std::memory_order_relaxed);\n}\n\nQuicLogSink* SetLogSink(QuicLogSink* new_sink) {\n  absl::MutexLock lock(&g_quic_log_sink_mutex);\n  QuicLogSink* old_sink = g_quic_log_sink.load(std::memory_order_relaxed);\n  g_quic_log_sink.store(new_sink, std::memory_order_relaxed);\n  return old_sink;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <cerrno>\n#include <cstring>\n#include <iostream>\n#include <sstream>\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/stl_helpers.h\"\n\n#include \"absl/base/optimization.h\"\n#include \"absl/synchronization/mutex.h\"\n\n// This implementation is only used by Quiche code, use macros provided by\n// assert.h and logger.h in Envoy code instead. See QUIC platform API\n// dependency model described in\n// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/README.md\n//\n// The implementation is backed by Envoy::Logger.\n\n// If |condition| is true, use |logstream| to stream the log message and send it to spdlog.\n// If |condition| is false, |logstream| will not be instantiated.\n// The switch(0) is used to suppress a compiler warning on ambiguous \"else\".\n#define QUICHE_LOG_IMPL_INTERNAL(condition, logstream)                                             \\\n  switch (0)                                                                                       \\\n  default:                                                                                         \\\n    if (!(condition)) {                                                                            \\\n    } else                                                                                         \\\n      logstream\n\n#define QUICHE_LOG_IF_IMPL(severity, condition)                                                    \\\n  QUICHE_LOG_IMPL_INTERNAL((condition) && quic::IsLogLevelEnabled(quic::severity),                 \\\n                           quic::QuicLogEmitter(quic::severity).stream())\n\n#define QUICHE_LOG_IMPL(severity) QUICHE_LOG_IF_IMPL(severity, true)\n\n#define QUICHE_VLOG_IF_IMPL(verbosity, condition)                                                  \\\n  QUICHE_LOG_IMPL_INTERNAL((condition) && quic::IsVerboseLogEnabled(verbosity),                    \\\n                           quic::QuicLogEmitter(quic::INFO).stream())\n\n#define QUICHE_VLOG_IMPL(verbosity) QUICHE_VLOG_IF_IMPL(verbosity, true)\n\n// TODO(wub): Implement QUICHE_LOG_FIRST_N_IMPL.\n#define QUICHE_LOG_FIRST_N_IMPL(severity, n) QUICHE_LOG_IMPL(severity)\n\n// TODO(wub): Implement QUICHE_LOG_EVERY_N_IMPL.\n#define QUICHE_LOG_EVERY_N_IMPL(severity, n) QUICHE_LOG_IMPL(severity)\n\n// TODO(wub): Implement QUICHE_LOG_EVERY_N_SEC_IMPL.\n#define QUICHE_LOG_EVERY_N_SEC_IMPL(severity, seconds) QUICHE_LOG_IMPL(severity)\n\n#define QUICHE_PLOG_IMPL(severity)                                                                 \\\n  QUICHE_LOG_IMPL_INTERNAL(quic::IsLogLevelEnabled(quic::severity),                                \\\n                           quic::QuicLogEmitter(quic::severity).SetPerror().stream())\n\n#define QUICHE_LOG_INFO_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::INFO)\n#define QUICHE_LOG_WARNING_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::WARNING)\n#define QUICHE_LOG_ERROR_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::ERROR)\n\n#define CHECK(condition)                                                                           \\\n  QUICHE_LOG_IF_IMPL(FATAL, ABSL_PREDICT_FALSE(!(condition))) << \"CHECK failed: \" #condition \".\"\n\n#define CHECK_GT(a, b) CHECK((a) > (b))\n#define CHECK_GE(a, b) CHECK((a) >= (b))\n#define CHECK_LT(a, b) CHECK((a) < (b))\n#define CHECK_LE(a, b) CHECK((a) <= (b))\n#define CHECK_NE(a, b) CHECK((a) != (b))\n#define CHECK_EQ(a, b) CHECK((a) == (b))\n\n#ifdef NDEBUG\n// Release build\n#define DCHECK(condition) QUICHE_COMPILED_OUT_LOG(condition)\n#define QUICHE_COMPILED_OUT_LOG(condition)                                                         \\\n  QUICHE_LOG_IMPL_INTERNAL(false && (condition), quic::NullLogStream().stream())\n#define QUICHE_DVLOG_IMPL(verbosity) QUICHE_COMPILED_OUT_LOG(false)\n#define QUICHE_DVLOG_IF_IMPL(verbosity, condition) QUICHE_COMPILED_OUT_LOG(condition)\n#define QUICHE_DLOG_IMPL(severity) QUICHE_COMPILED_OUT_LOG(false)\n#define QUICHE_DLOG_IF_IMPL(severity, condition) QUICHE_COMPILED_OUT_LOG(condition)\n#define QUICHE_DLOG_INFO_IS_ON_IMPL() 0\n#define QUICHE_DLOG_EVERY_N_IMPL(severity, n) QUICHE_COMPILED_OUT_LOG(false)\n#define QUICHE_NOTREACHED_IMPL()\n#else\n// Debug build\n#define DCHECK(condition) CHECK(condition)\n#define QUICHE_DVLOG_IMPL(verbosity) QUICHE_VLOG_IMPL(verbosity)\n#define QUICHE_DVLOG_IF_IMPL(verbosity, condition) QUICHE_VLOG_IF_IMPL(verbosity, condition)\n#define QUICHE_DLOG_IMPL(severity) QUICHE_LOG_IMPL(severity)\n#define QUICHE_DLOG_IF_IMPL(severity, condition) QUICHE_LOG_IF_IMPL(severity, condition)\n#define QUICHE_DLOG_INFO_IS_ON_IMPL() QUICHE_LOG_INFO_IS_ON_IMPL()\n#define QUICHE_DLOG_EVERY_N_IMPL(severity, n) QUICHE_LOG_EVERY_N_IMPL(severity, n)\n#define QUICHE_NOTREACHED_IMPL() NOT_REACHED_GCOVR_EXCL_LINE\n#endif\n\n#define DCHECK_GE(a, b) DCHECK((a) >= (b))\n#define DCHECK_GT(a, b) DCHECK((a) > (b))\n#define DCHECK_LT(a, b) DCHECK((a) < (b))\n#define DCHECK_LE(a, b) DCHECK((a) <= (b))\n#define DCHECK_NE(a, b) DCHECK((a) != (b))\n#define DCHECK_EQ(a, b) DCHECK((a) == (b))\n\n#define QUICHE_PREDICT_FALSE_IMPL(x) ABSL_PREDICT_FALSE(x)\n\nnamespace quic {\n\nusing QuicLogLevel = spdlog::level::level_enum;\n\nstatic const QuicLogLevel INFO = spdlog::level::info;\nstatic const QuicLogLevel WARNING = spdlog::level::warn;\nstatic const QuicLogLevel ERROR = spdlog::level::err;\nstatic const QuicLogLevel FATAL = spdlog::level::critical;\n\n// DFATAL is FATAL in debug mode, ERROR in release mode.\n#ifdef NDEBUG\nstatic const QuicLogLevel DFATAL = ERROR;\n#else\nstatic const QuicLogLevel DFATAL = FATAL;\n#endif\n\nclass QuicLogEmitter {\npublic:\n  explicit QuicLogEmitter(QuicLogLevel level);\n\n  ~QuicLogEmitter();\n\n  QuicLogEmitter& SetPerror() {\n    is_perror_ = true;\n    return *this;\n  }\n\n  std::ostringstream& stream() { return stream_; }\n\nprivate:\n  const QuicLogLevel level_;\n  const int saved_errno_;\n  bool is_perror_ = false;\n  std::ostringstream stream_;\n};\n\nclass NullLogStream : public std::ostream {\npublic:\n  NullLogStream() : std::ostream(nullptr) {}\n\n  NullLogStream& stream() { return *this; }\n};\n\ntemplate <typename T> inline NullLogStream& operator<<(NullLogStream& s, const T&) { return s; }\n\ninline spdlog::logger& GetLogger() {\n  return Envoy::Logger::Registry::getLog(Envoy::Logger::Id::quic);\n}\n\ninline bool IsLogLevelEnabled(QuicLogLevel level) { return level >= GetLogger().level(); }\n\nint GetVerbosityLogThreshold();\nvoid SetVerbosityLogThreshold(int new_verbosity);\n\ninline bool IsVerboseLogEnabled(int verbosity) {\n  return IsLogLevelEnabled(INFO) && verbosity <= GetVerbosityLogThreshold();\n}\n\nbool IsDFatalExitDisabled();\nvoid SetDFatalExitDisabled(bool is_disabled);\n\n// QuicLogSink is used to capture logs emitted from the QUICHE_LOG... macros.\nclass QuicLogSink {\npublic:\n  virtual ~QuicLogSink() = default;\n\n  // Called when |message| is emitted at |level|.\n  virtual void Log(QuicLogLevel level, const std::string& message) = 0;\n};\n\n// Only one QuicLogSink can capture log at a time. SetLogSink causes future logs\n// to be captured by the |new_sink|.\n// Return the previous sink.\nQuicLogSink* SetLogSink(QuicLogSink* new_sink);\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/base/attributes.h\"\n\n#define QUIC_MUST_USE_RESULT_IMPL ABSL_MUST_USE_RESULT\n#define QUIC_UNUSED_IMPL ABSL_ATTRIBUTE_UNUSED\n#define QUIC_CONST_INIT_IMPL ABSL_CONST_INIT\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_map_util_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <algorithm>\n\nnamespace quic {\n\ntemplate <class Collection, class Key>\nbool QuicContainsKeyImpl(const Collection& collection, const Key& key) {\n  return collection.find(key) != collection.end();\n}\n\ntemplate <typename Collection, typename Value>\nbool QuicContainsValueImpl(const Collection& collection, const Value& value) {\n  return std::find(collection.begin(), collection.end(), value) != collection.end();\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.h\"\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace quic {\n\nQuicMemSliceImpl::QuicMemSliceImpl(QuicUniqueBufferPtr buffer, size_t length)\n    : fragment_(std::make_unique<Envoy::Buffer::BufferFragmentImpl>(\n          buffer.release(), length,\n          [](const void* p, size_t, const Envoy::Buffer::BufferFragmentImpl*) {\n            delete[] static_cast<const char*>(p);\n          })) {\n  single_slice_buffer_.addBufferFragment(*fragment_);\n  ASSERT(this->length() == length);\n}\n\nQuicMemSliceImpl::QuicMemSliceImpl(Envoy::Buffer::Instance& buffer, size_t length) {\n  ASSERT(firstSliceLength(buffer) == length);\n  single_slice_buffer_.move(buffer, length);\n  ASSERT(single_slice_buffer_.getRawSlices().size() == 1);\n}\n\nconst char* QuicMemSliceImpl::data() const {\n  Envoy::Buffer::RawSliceVector slices = single_slice_buffer_.getRawSlices(/*max_slices=*/1);\n  ASSERT(slices.size() <= 1);\n  return !slices.empty() ? static_cast<const char*>(slices[0].mem_) : nullptr;\n}\n\nsize_t QuicMemSliceImpl::firstSliceLength(Envoy::Buffer::Instance& buffer) {\n  Envoy::Buffer::RawSliceVector slices = buffer.getRawSlices(/*max_slices=*/1);\n  ASSERT(slices.size() == 1);\n  return slices[0].len_;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <cstddef>\n#include <iostream>\n#include <memory>\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"quiche/quic/core/quic_buffer_allocator.h\"\n\nnamespace quic {\n\n// Implements the interface required by\n// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/quic_mem_slice.h\nclass QuicMemSliceImpl {\npublic:\n  // Constructs an empty QuicMemSliceImpl.\n  QuicMemSliceImpl() = default;\n\n  // Constructs a QuicMemSliceImpl by taking ownership of the memory in |buffer|.\n  QuicMemSliceImpl(QuicUniqueBufferPtr buffer, size_t length);\n\n  // Constructs a QuicMemSliceImpl from a Buffer::Instance with first |length| bytes in it.\n  // Data will be moved from |buffer| to this mem slice.\n  // Prerequisite: |buffer| has at least |length| bytes of data and not empty.\n  explicit QuicMemSliceImpl(Envoy::Buffer::Instance& buffer, size_t length);\n\n  QuicMemSliceImpl(const QuicMemSliceImpl& other) = delete;\n  // Move constructors. |other| will not hold a reference to the data buffer\n  // after this call completes.\n  QuicMemSliceImpl(QuicMemSliceImpl&& other) noexcept { *this = std::move(other); }\n\n  QuicMemSliceImpl& operator=(const QuicMemSliceImpl& other) = delete;\n  QuicMemSliceImpl& operator=(QuicMemSliceImpl&& other) noexcept {\n    if (this != &other) {\n      fragment_ = std::move(other.fragment_);\n      single_slice_buffer_.move(other.single_slice_buffer_);\n    }\n    return *this;\n  }\n\n  // Below methods implements interface needed by QuicMemSlice.\n  void Reset() { single_slice_buffer_.drain(length()); }\n\n  // Returns a char pointer to the one and only slice in buffer.\n  const char* data() const;\n\n  size_t length() const { return single_slice_buffer_.length(); }\n  bool empty() const { return length() == 0; }\n\n  Envoy::Buffer::OwnedImpl& single_slice_buffer() { return single_slice_buffer_; }\n\nprivate:\n  // Prerequisite: buffer has at least one slice.\n  size_t firstSliceLength(Envoy::Buffer::Instance& buffer);\n\n  std::unique_ptr<Envoy::Buffer::BufferFragmentImpl> fragment_;\n  Envoy::Buffer::OwnedImpl single_slice_buffer_;\n};\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h\"\n\n#include \"quiche/quic/platform/api/quic_mem_slice.h\"\n\nnamespace quic {\n\nquiche::QuicheStringPiece QuicMemSliceSpanImpl::GetData(size_t index) {\n  Envoy::Buffer::RawSliceVector slices = buffer_->getRawSlices(/*max_slices=*/index + 1);\n  ASSERT(slices.size() > index);\n  return {reinterpret_cast<char*>(slices[index].mem_), slices[index].len_};\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n#include \"quiche/quic/core/quic_types.h\"\n#include \"quiche/quic/platform/api/quic_mem_slice.h\"\n\nnamespace quic {\n\n// Implements the interface required by\n// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/quic_mem_slice_span.h\n// Wraps a Buffer::Instance and deliver its data with minimum number of copies.\nclass QuicMemSliceSpanImpl {\npublic:\n  QuicMemSliceSpanImpl() = default;\n  /**\n   * @param buffer has to outlive the life time of this class.\n   */\n  explicit QuicMemSliceSpanImpl(Envoy::Buffer::Instance& buffer) : buffer_(&buffer) {}\n  explicit QuicMemSliceSpanImpl(QuicMemSliceImpl* slice) : buffer_(&slice->single_slice_buffer()) {}\n\n  QuicMemSliceSpanImpl(const QuicMemSliceSpanImpl& other) = default;\n  QuicMemSliceSpanImpl& operator=(const QuicMemSliceSpanImpl& other) = default;\n\n  QuicMemSliceSpanImpl(QuicMemSliceSpanImpl&& other) noexcept : buffer_(other.buffer_) {\n    other.buffer_ = nullptr;\n  }\n\n  QuicMemSliceSpanImpl& operator=(QuicMemSliceSpanImpl&& other) noexcept {\n    if (this != &other) {\n      buffer_ = other.buffer_;\n      other.buffer_ = nullptr;\n    }\n    return *this;\n  }\n\n  // QuicMemSliceSpan\n  quiche::QuicheStringPiece GetData(size_t index);\n  QuicByteCount total_length() { return buffer_->length(); };\n  size_t NumSlices() { return buffer_->getRawSlices().size(); }\n  template <typename ConsumeFunction> QuicByteCount ConsumeAll(ConsumeFunction consume);\n  bool empty() const { return buffer_->length() == 0; }\n\nprivate:\n  Envoy::Buffer::Instance* buffer_{nullptr};\n};\n\ntemplate <typename ConsumeFunction>\nQuicByteCount QuicMemSliceSpanImpl::ConsumeAll(ConsumeFunction consume) {\n  size_t saved_length = 0;\n  for (auto& slice : buffer_->getRawSlices()) {\n    if (slice.len_ == 0) {\n      continue;\n    }\n    // Move each slice into a stand-alone buffer.\n    // TODO(danzh): investigate the cost of allocating one buffer per slice.\n    // If it turns out to be expensive, add a new function to free data in the middle in buffer\n    // interface and re-design QuicMemSliceImpl.\n    consume(QuicMemSlice(QuicMemSliceImpl(*buffer_, slice.len_)));\n    saved_length += slice.len_;\n  }\n  ASSERT(buffer_->length() == 0);\n  return saved_length;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_mem_slice_storage_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_mem_slice_storage_impl.h\"\n\n#include <cstdint>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"quiche/quic/core/quic_utils.h\"\n\nnamespace quic {\n\n// TODO(danzh)Note that |allocator| is not used to allocate memory currently, instead,\n// Buffer::OwnedImpl allocates memory on its own. Investigate if a customized\n// QuicBufferAllocator can improve cache hit.\nQuicMemSliceStorageImpl::QuicMemSliceStorageImpl(const iovec* iov, int iov_count,\n                                                 QuicBufferAllocator* /*allocator*/,\n                                                 const QuicByteCount max_slice_len) {\n  if (iov == nullptr) {\n    return;\n  }\n  QuicByteCount write_len = 0;\n  for (int i = 0; i < iov_count; ++i) {\n    write_len += iov[i].iov_len;\n  }\n  size_t io_offset = 0;\n  while (io_offset < write_len) {\n    size_t slice_len = std::min(write_len - io_offset, max_slice_len);\n    Envoy::Buffer::RawSlice slice;\n    // Populate a temporary buffer instance and then move it to |buffer_|. This is necessary because\n    // consecutive reserve/commit can return addresses in same slice which violates the restriction\n    // of |max_slice_len| when ToSpan() is called.\n    Envoy::Buffer::OwnedImpl buffer;\n    uint16_t num_slice = buffer.reserve(slice_len, &slice, 1);\n    ASSERT(num_slice == 1);\n    QuicUtils::CopyToBuffer(iov, iov_count, io_offset, slice_len, static_cast<char*>(slice.mem_));\n    io_offset += slice_len;\n    // OwnedImpl may return a slice longer than needed, trim it to requested length.\n    slice.len_ = slice_len;\n    buffer.commit(&slice, num_slice);\n    buffer_.move(buffer);\n  }\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_mem_slice_storage_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"quiche/quic/core/quic_buffer_allocator.h\"\n#include \"quiche/quic/platform/api/quic_iovec.h\"\n#include \"quiche/quic/platform/api/quic_mem_slice_span.h\"\n\nnamespace quic {\n\n// QuicMemSliceStorageImpl wraps a MemSlice vector.\nclass QuicMemSliceStorageImpl {\npublic:\n  QuicMemSliceStorageImpl(const iovec* iov, int iov_count, QuicBufferAllocator* allocator,\n                          const QuicByteCount max_slice_len);\n\n  QuicMemSliceStorageImpl(const QuicMemSliceStorageImpl& other) { buffer_.add(other.buffer_); }\n\n  QuicMemSliceStorageImpl& operator=(const QuicMemSliceStorageImpl& other) {\n    if (this != &other) {\n      if (buffer_.length() > 0) {\n        buffer_.drain(buffer_.length());\n      }\n      buffer_.add(other.buffer_);\n    }\n    return *this;\n  }\n  QuicMemSliceStorageImpl(QuicMemSliceStorageImpl&& other) = default;\n  QuicMemSliceStorageImpl& operator=(QuicMemSliceStorageImpl&& other) = default;\n\n  QuicMemSliceSpan ToSpan() { return QuicMemSliceSpan(QuicMemSliceSpanImpl(buffer_)); }\n\n  void Append(QuicMemSliceImpl mem_slice) { buffer_.move(mem_slice.single_slice_buffer()); }\n\nprivate:\n  Envoy::Buffer::OwnedImpl buffer_;\n};\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/synchronization/notification.h\"\n#include \"quiche/quic/platform/api/quic_export.h\"\n\nnamespace quic {\n\n#define QUIC_EXCLUSIVE_LOCKS_REQUIRED_IMPL ABSL_EXCLUSIVE_LOCKS_REQUIRED\n#define QUIC_GUARDED_BY_IMPL ABSL_GUARDED_BY\n#define QUIC_LOCKABLE_IMPL ABSL_LOCKABLE\n#define QUIC_LOCKS_EXCLUDED_IMPL ABSL_LOCKS_EXCLUDED\n#define QUIC_SHARED_LOCKS_REQUIRED_IMPL ABSL_SHARED_LOCKS_REQUIRED\n#define QUIC_EXCLUSIVE_LOCK_FUNCTION_IMPL ABSL_EXCLUSIVE_LOCK_FUNCTION\n#define QUIC_UNLOCK_FUNCTION_IMPL ABSL_UNLOCK_FUNCTION\n#define QUIC_SHARED_LOCK_FUNCTION_IMPL ABSL_SHARED_LOCK_FUNCTION\n#define QUIC_SCOPED_LOCKABLE_IMPL ABSL_SCOPED_LOCKABLE\n#define QUIC_ASSERT_SHARED_LOCK_IMPL ABSL_ASSERT_SHARED_LOCK\n\n// A class wrapping a non-reentrant mutex.\nclass QUIC_LOCKABLE_IMPL QUIC_EXPORT_PRIVATE QuicLockImpl {\npublic:\n  QuicLockImpl() = default;\n  QuicLockImpl(const QuicLockImpl&) = delete;\n  QuicLockImpl& operator=(const QuicLockImpl&) = delete;\n\n  // Block until mu_ is free, then acquire it exclusively.\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  void WriterLock() QUIC_EXCLUSIVE_LOCK_FUNCTION_IMPL() { mu_.WriterLock(); }\n\n  // Release mu_. Caller must hold it exclusively.\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  void WriterUnlock() QUIC_UNLOCK_FUNCTION_IMPL() { mu_.WriterUnlock(); }\n\n  // Block until mu_ is free or shared, then acquire a share of it.\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  void ReaderLock() QUIC_SHARED_LOCK_FUNCTION_IMPL() { mu_.ReaderLock(); }\n\n  // Release mu_. Caller could hold it in shared mode.\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  void ReaderUnlock() QUIC_UNLOCK_FUNCTION_IMPL() { mu_.ReaderUnlock(); }\n\n  // Returns immediately if current thread holds mu_ in at least shared\n  // mode. Otherwise, reports an error by crashing with a diagnostic.\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  void AssertReaderHeld() const QUIC_ASSERT_SHARED_LOCK_IMPL() { mu_.AssertReaderHeld(); }\n\nprivate:\n  absl::Mutex mu_;\n};\n\n// A Notification allows threads to receive notification of a single occurrence\n// of a single event.\nclass QUIC_EXPORT_PRIVATE QuicNotificationImpl {\npublic:\n  QuicNotificationImpl() = default;\n  QuicNotificationImpl(const QuicNotificationImpl&) = delete;\n  QuicNotificationImpl& operator=(const QuicNotificationImpl&) = delete;\n\n  bool HasBeenNotified() { return notification_.HasBeenNotified(); }\n\n  void Notify() { notification_.Notify(); }\n\n  void WaitForNotification() { notification_.WaitForNotification(); }\n\nprivate:\n  absl::Notification notification_;\n};\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_pcc_sender_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"common/common/assert.h\"\n\n#include \"quiche/quic/core/quic_types.h\"\n\nnamespace quic {\n\nclass QuicClock;\nstruct QuicConnectionStats;\nclass QuicRandom;\nclass QuicUnackedPacketMap;\nclass RttStats;\nclass SendAlgorithmInterface;\n\n// Interface for creating a PCC SendAlgorithmInterface.\ninline SendAlgorithmInterface*\nCreatePccSenderImpl(const QuicClock* /*clock*/, const RttStats* /*rtt_stats*/,\n                    const QuicUnackedPacketMap* /*unacked_packets*/, QuicRandom* /*random*/,\n                    QuicConnectionStats* /*stats*/, QuicPacketCount /*initial_congestion_window*/,\n                    QuicPacketCount /*max_congestion_window*/) {\n  PANIC(\"PccSender is not supported.\");\n  return nullptr;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_prefetch_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quic {\n\n#if defined(__GNUC__)\n// See __builtin_prefetch in:\n// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.\ninline void QuicPrefetchT0Impl(const void* addr) { __builtin_prefetch(addr, 0, 3); }\n#else\ninline void QuicPrefetchT0Impl(const void*) {}\n#endif\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_ptr_util_impl.h",
    "content": "#pragma once\n\n#include <memory>\n#include <utility>\n\n#include \"absl/memory/memory.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quic {\n\ntemplate <typename T, typename... Args> std::unique_ptr<T> QuicMakeUniqueImpl(Args&&... args) {\n  return std::make_unique<T>(std::forward<Args>(args)...);\n}\n\ntemplate <typename T> std::unique_ptr<T> QuicWrapUniqueImpl(T* ptr) {\n  return absl::WrapUnique<T>(ptr);\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_reference_counted_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <memory>\n\n#include \"quiche/quic/platform/api/quic_export.h\"\n\nnamespace quic {\n\n/** The implementation of reference counted object is merely wrapping\n * std::shared_ptr. So QuicReferenceCountedImpl class does not do anything\n * related to reference counting as shared_ptr already takes care of that. But\n * it customizes destruction to provide a interface for shared_ptr to destroy\n * the object, because according to the API declared in QuicReferenceCounted,\n * this class has to hide its destructor.\n */\nclass QuicReferenceCountedImpl {\npublic:\n  QuicReferenceCountedImpl() = default;\n\n  // Expose destructor through this method.\n  static void destroy(QuicReferenceCountedImpl* impl) { delete impl; }\n\nprotected:\n  // Non-public destructor to match API declared in QuicReferenceCounted.\n  virtual ~QuicReferenceCountedImpl() = default;\n};\n\ntemplate <typename T> class QuicReferenceCountedPointerImpl {\npublic:\n  QuicReferenceCountedPointerImpl() : refptr_(nullptr, T::destroy) {}\n  QuicReferenceCountedPointerImpl(T* p) : refptr_(p, T::destroy) {}\n  QuicReferenceCountedPointerImpl(std::nullptr_t) : refptr_(nullptr, T::destroy) {}\n  // Copy constructor.\n  template <typename U>\n  QuicReferenceCountedPointerImpl(const QuicReferenceCountedPointerImpl<U>& other)\n      : refptr_(other.refptr()) {}\n  QuicReferenceCountedPointerImpl(const QuicReferenceCountedPointerImpl& other)\n      : refptr_(other.refptr()) {}\n\n  // Move constructor.\n  template <typename U>\n  QuicReferenceCountedPointerImpl(QuicReferenceCountedPointerImpl<U>&& other) noexcept\n      : refptr_(std::move(other.refptr())) {}\n  QuicReferenceCountedPointerImpl(QuicReferenceCountedPointerImpl&& other) noexcept\n      : refptr_(std::move(other.refptr())) {}\n\n  ~QuicReferenceCountedPointerImpl() = default;\n\n  // Copy assignments.\n  QuicReferenceCountedPointerImpl& operator=(const QuicReferenceCountedPointerImpl& other) {\n    refptr_ = other.refptr();\n    return *this;\n  }\n  template <typename U>\n  QuicReferenceCountedPointerImpl& operator=(const QuicReferenceCountedPointerImpl<U>& other) {\n    refptr_ = other.refptr();\n    return *this;\n  }\n\n  // Move assignments.\n  QuicReferenceCountedPointerImpl& operator=(QuicReferenceCountedPointerImpl&& other) noexcept {\n    refptr_ = std::move(other.refptr());\n    return *this;\n  }\n  template <typename U>\n  QuicReferenceCountedPointerImpl& operator=(QuicReferenceCountedPointerImpl<U>&& other) noexcept {\n    refptr_ = std::move(other.refptr());\n    return *this;\n  }\n\n  QuicReferenceCountedPointerImpl<T>& operator=(T* p) {\n    refptr_.reset(p, T::destroy);\n    return *this;\n  }\n\n  // Returns the raw pointer with no change in reference.\n  T* get() const { return refptr_.get(); }\n\n  // Accessors for the referenced object.\n  // operator* and operator-> will assert() if there is no current object.\n  T& operator*() const {\n    assert(refptr_ != nullptr);\n    return *refptr_;\n  }\n  T* operator->() const {\n    assert(refptr_ != nullptr);\n    return refptr_.get();\n  }\n\n  explicit operator bool() const { return static_cast<bool>(refptr_); }\n\n  const std::shared_ptr<T>& refptr() const { return refptr_; }\n\n  std::shared_ptr<T>& refptr() { return refptr_; }\n\nprivate:\n  std::shared_ptr<T> refptr_;\n};\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_server_stats_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define QUIC_SERVER_HISTOGRAM_ENUM_IMPL(name, sample, enum_size, docstring)                        \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_SERVER_HISTOGRAM_BOOL_IMPL(name, sample, docstring)                                   \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_SERVER_HISTOGRAM_TIMES_IMPL(name, sample, min, max, bucket_count, docstring)          \\\n  do {                                                                                             \\\n  } while (0)\n\n#define QUIC_SERVER_HISTOGRAM_COUNTS_IMPL(name, sample, min, max, bucket_count, docstring)         \\\n  do {                                                                                             \\\n  } while (0)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_stack_trace_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <sstream>\n#include <string>\n\n#include \"server/backtrace.h\"\n\nnamespace quic {\n\ninline std::string QuicStackTraceImpl() {\n  Envoy::BackwardsTrace t;\n  t.capture();\n  std::ostringstream os;\n  t.printTrace(os);\n  return os.str();\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"quiche/quic/core/quic_simple_buffer_allocator.h\"\n\nnamespace quic {\n\n// Implements the interface required by\n// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/quic_stream_buffer_allocator.h\n// with the default implementation provided by QUICHE.\nusing QuicStreamBufferAllocatorImpl = SimpleBufferAllocator;\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_string_utils_impl.h",
    "content": "#pragma once\n\n#include \"absl/strings/str_cat.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quic {\n\ntemplate <typename... Args>\ninline void QuicStrAppendImpl(std::string* output, const Args&... args) {\n  absl::StrAppend(output, args...);\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <sys/socket.h>\n\nnamespace quic {\n\nconst size_t kCmsgSpaceForGooglePacketHeaderImpl = 0;\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline bool GetGooglePacketHeadersFromControlMessageImpl(struct ::cmsghdr* /*cmsg*/,\n                                                         char** /*packet_headers*/,\n                                                         size_t* /*packet_headers_len*/) {\n  return false;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quic_uint128_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/numeric/int128.h\"\n\nnamespace quic {\n\nusing QuicUint128Impl = absl::uint128;\n#define MakeQuicUint128Impl(hi, lo) absl::MakeUint128(hi, lo)\n#define QuicUint128Low64Impl(x) absl::Uint128Low64(x)\n#define QuicUint128High64Impl(x) absl::Uint128High64(x)\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_arraysize_impl.h",
    "content": "#pragma once\n\n#include \"absl/base/macros.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define QUICHE_ARRAYSIZE_IMPL(array) ABSL_ARRAYSIZE(array)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_endian_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <cstdint>\n\n#include \"common/common/byte_order.h\"\n\nnamespace quiche {\n\nclass QuicheEndianImpl {\npublic:\n  static uint16_t HostToNet16(uint16_t x) { return toEndianness<ByteOrder::BigEndian>(x); }\n  static uint32_t HostToNet32(uint32_t x) { return toEndianness<ByteOrder::BigEndian>(x); }\n  static uint64_t HostToNet64(uint64_t x) { return toEndianness<ByteOrder::BigEndian>(x); }\n\n  static uint16_t NetToHost16(uint16_t x) { return fromEndianness<ByteOrder::BigEndian>(x); }\n  static uint32_t NetToHost32(uint32_t x) { return fromEndianness<ByteOrder::BigEndian>(x); }\n  static uint64_t NetToHost64(uint64_t x) { return fromEndianness<ByteOrder::BigEndian>(x); }\n\n  static bool HostIsLittleEndian() { return NetToHost16(0x1234) != 0x1234; }\n};\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_export_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define QUICHE_EXPORT\n#define QUICHE_EXPORT_PRIVATE\n#define QUICHE_NO_EXPORT\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_logging_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_map_util_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quiche {\n\ntemplate <class Collection, class Key>\nbool QuicheContainsKeyImpl(const Collection& collection, const Key& key) {\n  return collection.find(key) != collection.end();\n}\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h",
    "content": "#pragma once\n\n#include \"absl/types/optional.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quiche {\n\ntemplate <typename T> using QuicheOptionalImpl = absl::optional<T>;\n\n#define QUICHE_NULLOPT_IMPL absl::nullopt\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_ptr_util_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/memory/memory.h\"\n\nnamespace quiche {\n\ntemplate <typename T> std::unique_ptr<T> QuicheWrapUniqueImpl(T* ptr) {\n  return absl::WrapUnique<T>(ptr);\n}\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_str_cat_impl.h",
    "content": "#pragma once\n\n#include \"absl/strings/str_cat.h\"\n#include \"fmt/printf.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quiche {\n\ntemplate <typename... Args> inline std::string QuicheStrCatImpl(const Args&... args) {\n  return absl::StrCat(args...);\n}\n\ntemplate <typename... Args> inline std::string QuicheStringPrintfImpl(const Args&... args) {\n  return fmt::sprintf(std::forward<const Args&>(args)...);\n}\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_string_piece_impl.h",
    "content": "#pragma once\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n\n#include \"absl/hash/hash.h\"\n#include \"absl/strings/string_view.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quiche {\n\nusing QuicheStringPieceImpl = absl::string_view;\n\nusing QuicheStringPieceHashImpl = absl::Hash<QuicheStringPieceImpl>;\n\ninline size_t QuicheHashStringPairImpl(QuicheStringPieceImpl a, QuicheStringPieceImpl b) {\n  return absl::Hash<QuicheStringPieceImpl>()(a) ^ absl::Hash<QuicheStringPieceImpl>()(b);\n}\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h",
    "content": "#pragma once\n\n#include \"common/common/base64.h\"\n\n#include \"extensions/quic_listeners/quiche/platform/quiche_optional_impl.h\"\n#include \"extensions/quic_listeners/quiche/platform/quiche_string_piece_impl.h\"\n#include \"extensions/quic_listeners/quiche/platform/string_utils.h\"\n\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_split.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quiche {\n\nclass QuicheTextUtilsImpl {\npublic:\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool StartsWith(QuicheStringPieceImpl data, QuicheStringPieceImpl prefix) {\n    return absl::StartsWith(data, prefix);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool EndsWith(QuicheStringPieceImpl data, QuicheStringPieceImpl suffix) {\n    return absl::EndsWith(data, suffix);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool EndsWithIgnoreCase(QuicheStringPieceImpl data, QuicheStringPieceImpl suffix) {\n    return absl::EndsWithIgnoreCase(data, suffix);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static std::string ToLower(QuicheStringPieceImpl data) { return absl::AsciiStrToLower(data); }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static void RemoveLeadingAndTrailingWhitespace(QuicheStringPieceImpl* data) {\n    *data = absl::StripAsciiWhitespace(*data);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool StringToUint64(QuicheStringPieceImpl in, uint64_t* out) {\n    return absl::SimpleAtoi(in, out);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool StringToInt(QuicheStringPieceImpl in, int* out) { return absl::SimpleAtoi(in, out); }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool StringToUint32(QuicheStringPieceImpl in, uint32_t* out) {\n    return absl::SimpleAtoi(in, out);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool StringToSizeT(QuicheStringPieceImpl in, size_t* out) {\n    return absl::SimpleAtoi(in, out);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static std::string Uint64ToString(uint64_t in) { return absl::StrCat(in); }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static std::string HexEncode(QuicheStringPieceImpl data) { return absl::BytesToHexString(data); }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static std::string Hex(uint32_t v) { return absl::StrCat(absl::Hex(v)); }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static std::string HexDecode(QuicheStringPieceImpl data) { return absl::HexStringToBytes(data); }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static void Base64Encode(const uint8_t* data, size_t data_len, std::string* output) {\n    *output =\n        Envoy::Base64::encode(reinterpret_cast<const char*>(data), data_len, /*add_padding=*/false);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static QuicheOptionalImpl<std::string> Base64Decode(QuicheStringPieceImpl input) {\n    return Envoy::Base64::decodeWithoutPadding(input);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static std::string HexDump(QuicheStringPieceImpl binary_data) {\n    return quiche::HexDump(binary_data);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool ContainsUpperCase(QuicheStringPieceImpl data) {\n    return std::any_of(data.begin(), data.end(), absl::ascii_isupper);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static bool IsAllDigits(QuicheStringPieceImpl data) {\n    return std::all_of(data.begin(), data.end(), absl::ascii_isdigit);\n  }\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  static std::vector<QuicheStringPieceImpl> Split(QuicheStringPieceImpl data, char delim) {\n    return absl::StrSplit(data, delim);\n  }\n};\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h\"\n\nnamespace quiche {\n\nnamespace {\nQuicheOptional<int64_t> quicheUtcDateTimeToUnixSecondsInner(int year, int month, int day, int hour,\n                                                            int minute, int second) {\n  const absl::CivilSecond civil_time(year, month, day, hour, minute, second);\n  if (second != 60 && (civil_time.year() != year || civil_time.month() != month ||\n                       civil_time.day() != day || civil_time.hour() != hour ||\n                       civil_time.minute() != minute || civil_time.second() != second)) {\n    return absl::nullopt;\n  }\n\n  const absl::Time time = absl::FromCivil(civil_time, absl::UTCTimeZone());\n  return absl::ToUnixSeconds(time);\n}\n} // namespace\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nQuicheOptional<int64_t> QuicheUtcDateTimeToUnixSecondsImpl(int year, int month, int day, int hour,\n                                                           int minute, int second) {\n  // Handle leap seconds without letting any other irregularities happen.\n  if (second == 60) {\n    auto previous_second =\n        quicheUtcDateTimeToUnixSecondsInner(year, month, day, hour, minute, second - 1);\n    if (!previous_second.has_value()) {\n      return absl::nullopt;\n    }\n    return *previous_second + 1;\n  }\n\n  return quicheUtcDateTimeToUnixSecondsInner(year, month, day, hour, minute, second);\n}\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#pragma once\n\n#include <cstdint>\n\n#include \"absl/time/civil_time.h\"\n#include \"absl/time/time.h\"\n#include \"quiche/common/platform/api/quiche_optional.h\"\n\nnamespace quiche {\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nQuicheOptional<int64_t> QuicheUtcDateTimeToUnixSecondsImpl(int year, int month, int day, int hour,\n                                                           int minute, int second);\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/hash/hash.h\"\n\nnamespace quiche {\n\n// The default hasher used by hash tables.\ntemplate <typename Key> using QuicheDefaultHasherImpl = absl::Hash<Key>;\n\n// Similar to absl::node_hash_map, but with better performance and memory usage.\ntemplate <typename Key, typename Value, typename Hash, typename Eq>\nusing QuicheUnorderedMapImpl = absl::node_hash_map<Key, Value, Hash, Eq>;\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_arraysize_impl.h",
    "content": "#pragma once\n\n#include \"absl/base/macros.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define SPDY_ARRAYSIZE_IMPL(x) ABSL_ARRAYSIZE(x)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h\"\n\n#define SPDY_BUG_IMPL QUIC_BUG_IMPL\n#define SPDY_BUG_IF_IMPL QUIC_BUG_IF_IMPL\n#define FLAGS_spdy_always_log_bugs_for_tests_impl true\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_containers_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/hash/hash.h\"\n#include \"quiche/common/simple_linked_hash_map.h\"\n\nnamespace spdy {\n\ntemplate <typename KeyType> using SpdyHashImpl = absl::Hash<KeyType>;\n\ntemplate <typename KeyType, typename ValueType, typename Hash = absl::Hash<KeyType>>\nusing SpdyHashMapImpl = absl::flat_hash_map<KeyType, ValueType, Hash>;\n\ntemplate <typename ElementType, typename Hasher, typename Eq>\nusing SpdyHashSetImpl = absl::flat_hash_set<ElementType, Hasher, Eq>;\n\ntemplate <typename Key, typename Value, typename Hash, typename Eq>\nusing SpdyLinkedHashMapImpl = quiche::SimpleLinkedHashMap<Key, Value, Hash, Eq>;\n\ntemplate <typename T, size_t N, typename A = std::allocator<T>>\nusing SpdyInlinedVectorImpl = absl::InlinedVector<T, N, A>;\n\ntemplate <typename Key, typename Value, int Size>\nusing SpdySmallMapImpl = absl::flat_hash_map<Key, Value>;\n} // namespace spdy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_endianness_util_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"envoy/common/platform.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace spdy {\n\ninline uint16_t SpdyNetToHost16Impl(uint16_t x) { return ntohs(x); }\n\ninline uint32_t SpdyNetToHost32Impl(uint32_t x) { return ntohl(x); }\n\n// TODO: implement\ninline uint64_t SpdyNetToHost64Impl(uint64_t /*x*/) { return 0; }\n\ninline uint16_t SpdyHostToNet16Impl(uint16_t x) { return htons(x); }\n\ninline uint32_t SpdyHostToNet32Impl(uint32_t x) { return htonl(x); }\n\n// TODO: implement\ninline uint64_t SpdyHostToNet64Impl(uint64_t /*x*/) { return 0; }\n\n} // namespace spdy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_estimate_memory_usage_impl.h",
    "content": "#pragma once\n\n#include <cstddef>\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace spdy {\n\n// Dummy implementation.\ntemplate <class T> size_t SpdyEstimateMemoryUsageImpl(const T& /*object*/) { return 0; }\n\n} // namespace spdy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_flags_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/flags_impl.h\"\n\n#define GetSpdyReloadableFlagImpl(flag) quiche::FLAGS_spdy_reloadable_flag_##flag->value()\n\n#define GetSpdyRestartFlagImpl(flag) quiche::FLAGS_spdy_restart_flag_##flag->value()\n\n#define SPDY_CODE_COUNT_N_IMPL(flag, instance, total)                                              \\\n  do {                                                                                             \\\n  } while (0)\n\n#define SPDY_CODE_COUNT_IMPL(name)                                                                 \\\n  do {                                                                                             \\\n  } while (0)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n\n#define SPDY_LOG_IMPL(severity) QUICHE_LOG_IMPL(severity)\n\n#define SPDY_VLOG_IMPL(verbose_level) QUICHE_VLOG_IMPL(verbose_level)\n\n#define SPDY_DLOG_IMPL(severity) QUICHE_DLOG_IMPL(severity)\n\n#define SPDY_DLOG_IF_IMPL(severity, condition) QUICHE_DLOG_IF_IMPL(severity, condition)\n\n#define SPDY_DVLOG_IMPL(verbose_level) QUICHE_DVLOG_IMPL(verbose_level)\n\n#define SPDY_DVLOG_IF_IMPL(verbose_level, condition) QUICHE_DVLOG_IF_IMPL(verbose_level, condition)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_macros_impl.h",
    "content": "#pragma once\n\n#include \"absl/base/attributes.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define SPDY_MUST_USE_RESULT_IMPL ABSL_MUST_USE_RESULT\n#define SPDY_UNUSED_IMPL ABSL_ATTRIBUTE_UNUSED\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_mem_slice_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <cstddef>\n\nnamespace spdy {\n\n// TODO(danzh): Fill out SpdyMemSliceImpl.\n//\n// SpdyMemSliceImpl wraps a reference counted MemSlice and only provides partial\n// interfaces of MemSlice.\nclass SpdyMemSliceImpl {\npublic:\n  // Constructs an empty SpdyMemSliceImpl that contains an empty MemSlice.\n  SpdyMemSliceImpl();\n\n  // Constructs a SpdyMemSlice with reference count 1 to a newly allocated data\n  // buffer of |length| bytes.\n  explicit SpdyMemSliceImpl(size_t length);\n\n  // Constructs a reference-counted MemSlice to |data|.\n  SpdyMemSliceImpl(const char* data, size_t length);\n\n  SpdyMemSliceImpl(const SpdyMemSliceImpl& other) = delete;\n  SpdyMemSliceImpl& operator=(const SpdyMemSliceImpl& other) = delete;\n\n  // Move constructors. |other| will not hold a reference to the data buffer\n  // after this call completes.\n  SpdyMemSliceImpl(SpdyMemSliceImpl&& other) = default;\n  SpdyMemSliceImpl& operator=(SpdyMemSliceImpl&& other) = default;\n\n  ~SpdyMemSliceImpl();\n\n  // Returns a char pointer to underlying data buffer.\n  const char* data() const;\n  // Returns the length of underlying data buffer.\n  size_t length() const;\n};\n\n} // namespace spdy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/string_utils.h\"\n\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"fmt/printf.h\"\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n\nnamespace spdy {\n\ntemplate <typename... Args>\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline void SpdyStrAppendImpl(std::string* output, const Args&... args) {\n  absl::StrAppend(output, std::forward<const Args&>(args)...);\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline char SpdyHexDigitToIntImpl(char c) { return quiche::HexDigitToInt(c); }\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline std::string SpdyHexDecodeImpl(absl::string_view data) {\n  return absl::HexStringToBytes(data);\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline bool SpdyHexDecodeToUInt32Impl(absl::string_view data, uint32_t* out) {\n  return quiche::HexDecodeToUInt32(data, out);\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline std::string SpdyHexEncodeImpl(const void* bytes, size_t size) {\n  return absl::BytesToHexString(absl::string_view(static_cast<const char*>(bytes), size));\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline std::string SpdyHexEncodeUInt32AndTrimImpl(uint32_t data) {\n  return absl::StrCat(absl::Hex(data));\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline std::string SpdyHexDumpImpl(absl::string_view data) { return quiche::HexDump(data); }\n\nstruct SpdyStringPieceCaseHashImpl {\n  size_t operator()(quiche::QuicheStringPiece data) const {\n    std::string lower = absl::AsciiStrToLower(data);\n    return absl::Hash<std::string>()(lower);\n  }\n};\n\nstruct SpdyStringPieceCaseEqImpl {\n  bool operator()(absl::string_view piece1, absl::string_view piece2) const {\n    return absl::EqualsIgnoreCase(piece1, piece2);\n  }\n};\n\n} // namespace spdy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_test_helpers_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n// TODO: implement\n#define EXPECT_SPDY_BUG_IMPL 0\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_test_utils_prod_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n// TODO: implement\n#define SPDY_FRIEND_TEST_IMPL 0\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"quiche/spdy/core/spdy_simple_arena.h\"\n\nnamespace spdy {\n\nusing SpdyUnsafeArenaImpl = SpdySimpleArena;\n\n} // namespace spdy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/string_utils.cc",
    "content": "#include \"extensions/quic_listeners/quiche/platform/string_utils.h\"\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <cstring>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/str_format.h\"\n#include \"common/common/assert.h\"\n\nnamespace quiche {\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstd::string HexDump(absl::string_view data) {\n  const int kBytesPerLine = 16;\n  const char* buf = data.data();\n  int bytes_remaining = data.size();\n  int offset = 0;\n  std::string out;\n  const char* p = buf;\n  while (bytes_remaining > 0) {\n    const int line_bytes = std::min(bytes_remaining, kBytesPerLine);\n    absl::StrAppendFormat(&out, \"0x%04x:  \", offset); // Do the line header\n    for (int i = 0; i < kBytesPerLine; ++i) {\n      if (i < line_bytes) {\n        absl::StrAppendFormat(&out, \"%02x\", p[i]);\n      } else {\n        out += \"  \"; // two-space filler instead of two-space hex digits\n      }\n      if (i % 2) {\n        out += ' ';\n      }\n    }\n    out += ' ';\n    for (int i = 0; i < line_bytes; ++i) { // Do the ASCII dump\n      out += absl::ascii_isgraph(p[i]) ? p[i] : '.';\n    }\n\n    bytes_remaining -= line_bytes;\n    offset += line_bytes;\n    p += line_bytes;\n    out += '\\n';\n  }\n  return out;\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nchar HexDigitToInt(char c) {\n  ASSERT(std::isxdigit(c));\n\n  if (std::isdigit(c)) {\n    return c - '0';\n  }\n  if (c >= 'A' && c <= 'F') {\n    return c - 'A' + 10;\n  }\n  if (c >= 'a' && c <= 'f') {\n    return c - 'a' + 10;\n  }\n  return 0;\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nbool HexDecodeToUInt32(absl::string_view data, uint32_t* out) {\n  if (data.empty() || data.size() > 8u) {\n    return false;\n  }\n\n  for (char c : data) {\n    if (!absl::ascii_isxdigit(c)) {\n      return false;\n    }\n  }\n\n  // Pad with leading zeros.\n  std::string data_padded(data.data(), data.size());\n  data_padded.insert(0, 8u - data.size(), '0');\n\n  std::string byte_string = absl::HexStringToBytes(data_padded);\n\n  RELEASE_ASSERT(byte_string.size() == 4u, \"padded data is not 4 byte long.\");\n  uint32_t bytes;\n  memcpy(&bytes, byte_string.data(), byte_string.length());\n  *out = ntohl(bytes);\n  return true;\n}\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/platform/string_utils.h",
    "content": "// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <cstdint>\n#include <string>\n\n#include \"absl/strings/string_view.h\"\n\nnamespace quiche {\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstd::string HexDump(absl::string_view data);\n\n// '0' => 0,  '1' => 1, 'a' or 'A' => 10, etc.\n// NOLINTNEXTLINE(readability-identifier-naming)\nchar HexDigitToInt(char c);\n\n// Turns a 8-byte hex string into a uint32 in host byte order.\n// e.g. \"12345678\" => 0x12345678\n// NOLINTNEXTLINE(readability-identifier-naming)\nbool HexDecodeToUInt32(absl::string_view data, uint32_t* out);\n\n} // namespace quiche\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc",
    "content": "#include \"extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h\"\n\n#include <memory>\n\nnamespace Envoy {\nnamespace Quic {\n\nQuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl(EnvoyQuicConnection& connection,\n                                                                 Event::Dispatcher& dispatcher,\n                                                                 uint32_t send_buffer_limit)\n    // Using this for purpose other than logging is not safe. Because QUIC connection id can be\n    // 18 bytes, so there might be collision when it's hashed to 8 bytes.\n    : Network::ConnectionImplBase(dispatcher, /*id=*/connection.connection_id().Hash()),\n      quic_connection_(&connection), filter_manager_(*this), stream_info_(dispatcher.timeSource()),\n      write_buffer_watermark_simulation_(\n          send_buffer_limit / 2, send_buffer_limit, [this]() { onSendBufferLowWatermark(); },\n          [this]() { onSendBufferHighWatermark(); }, ENVOY_LOGGER()) {\n  stream_info_.protocol(Http::Protocol::Http3);\n}\n\nvoid QuicFilterManagerConnectionImpl::addWriteFilter(Network::WriteFilterSharedPtr filter) {\n  filter_manager_.addWriteFilter(filter);\n}\n\nvoid QuicFilterManagerConnectionImpl::addFilter(Network::FilterSharedPtr filter) {\n  filter_manager_.addFilter(filter);\n}\n\nvoid QuicFilterManagerConnectionImpl::addReadFilter(Network::ReadFilterSharedPtr filter) {\n  filter_manager_.addReadFilter(filter);\n}\n\nbool QuicFilterManagerConnectionImpl::initializeReadFilters() {\n  return filter_manager_.initializeReadFilters();\n}\n\nvoid QuicFilterManagerConnectionImpl::enableHalfClose(bool enabled) {\n  RELEASE_ASSERT(!enabled, \"Quic connection doesn't support half close.\");\n}\n\nvoid QuicFilterManagerConnectionImpl::setBufferLimits(uint32_t /*limit*/) {\n  // Currently read buffer is capped by connection level flow control. And write buffer limit is set\n  // during construction. Changing the buffer limit during the life time of the connection is not\n  // supported.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nbool QuicFilterManagerConnectionImpl::aboveHighWatermark() const {\n  return write_buffer_watermark_simulation_.isAboveHighWatermark();\n}\n\nvoid QuicFilterManagerConnectionImpl::close(Network::ConnectionCloseType type) {\n  if (quic_connection_ == nullptr) {\n    // Already detached from quic connection.\n    return;\n  }\n  const bool delayed_close_timeout_configured = delayed_close_timeout_.count() > 0;\n  if (hasDataToWrite() && type != Network::ConnectionCloseType::NoFlush) {\n    if (delayed_close_timeout_configured) {\n      // QUIC connection has unsent data and caller wants to flush them. Wait for flushing or\n      // timeout.\n      if (!inDelayedClose()) {\n        // Only set alarm if not in delay close mode yet.\n        initializeDelayedCloseTimer();\n      }\n      // Update delay close state according to current call.\n      if (type == Network::ConnectionCloseType::FlushWriteAndDelay) {\n        delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait;\n      } else {\n        ASSERT(type == Network::ConnectionCloseType::FlushWrite);\n        delayed_close_state_ = DelayedCloseState::CloseAfterFlush;\n      }\n    } else {\n      delayed_close_state_ = DelayedCloseState::CloseAfterFlush;\n    }\n  } else if (hasDataToWrite()) {\n    // Quic connection has unsent data but caller wants to close right away.\n    ASSERT(type == Network::ConnectionCloseType::NoFlush);\n    quic_connection_->OnCanWrite();\n    closeConnectionImmediately();\n  } else {\n    // Quic connection doesn't have unsent data. It's up to the caller and\n    // the configuration whether to wait or not before closing.\n    if (delayed_close_timeout_configured &&\n        type == Network::ConnectionCloseType::FlushWriteAndDelay) {\n      if (!inDelayedClose()) {\n        initializeDelayedCloseTimer();\n      }\n      delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait;\n    } else {\n      closeConnectionImmediately();\n    }\n  }\n}\n\nconst Network::ConnectionSocket::OptionsSharedPtr&\nQuicFilterManagerConnectionImpl::socketOptions() const {\n  return quic_connection_->connectionSocket()->options();\n}\n\nconst Network::Address::InstanceConstSharedPtr&\nQuicFilterManagerConnectionImpl::remoteAddress() const {\n  ASSERT(quic_connection_->connectionSocket() != nullptr,\n         \"remoteAddress() should only be called after OnPacketHeader\");\n  return quic_connection_->connectionSocket()->remoteAddress();\n}\n\nconst Network::Address::InstanceConstSharedPtr&\nQuicFilterManagerConnectionImpl::directRemoteAddress() const {\n  ASSERT(quic_connection_->connectionSocket() != nullptr,\n         \"directRemoteAddress() should only be called after OnPacketHeader\");\n  return quic_connection_->connectionSocket()->directRemoteAddress();\n}\n\nconst Network::Address::InstanceConstSharedPtr&\nQuicFilterManagerConnectionImpl::localAddress() const {\n  ASSERT(quic_connection_->connectionSocket() != nullptr,\n         \"localAddress() should only be called after OnPacketHeader\");\n  return quic_connection_->connectionSocket()->localAddress();\n}\n\nSsl::ConnectionInfoConstSharedPtr QuicFilterManagerConnectionImpl::ssl() const {\n  // TODO(danzh): construct Ssl::ConnectionInfo from crypto stream\n  return nullptr;\n}\n\nvoid QuicFilterManagerConnectionImpl::rawWrite(Buffer::Instance& /*data*/, bool /*end_stream*/) {\n  // Network filter should stop iteration.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid QuicFilterManagerConnectionImpl::adjustBytesToSend(int64_t delta) {\n  const size_t bytes_to_send_old = bytes_to_send_;\n  bytes_to_send_ += delta;\n  if (delta < 0) {\n    ASSERT(bytes_to_send_old > bytes_to_send_);\n  } else {\n    ASSERT(bytes_to_send_old <= bytes_to_send_);\n  }\n  write_buffer_watermark_simulation_.checkHighWatermark(bytes_to_send_);\n  write_buffer_watermark_simulation_.checkLowWatermark(bytes_to_send_);\n}\n\nvoid QuicFilterManagerConnectionImpl::maybeApplyDelayClosePolicy() {\n  if (!inDelayedClose()) {\n    return;\n  }\n  if (hasDataToWrite() || delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) {\n    if (delayed_close_timer_ != nullptr) {\n      // Re-arm delay close timer on every write event if there are still data\n      // buffered or the connection close is supposed to be delayed.\n      delayed_close_timer_->enableTimer(delayed_close_timeout_);\n    }\n  } else {\n    closeConnectionImmediately();\n  }\n}\n\nvoid QuicFilterManagerConnectionImpl::onConnectionCloseEvent(\n    const quic::QuicConnectionCloseFrame& frame, quic::ConnectionCloseSource source) {\n  transport_failure_reason_ = absl::StrCat(quic::QuicErrorCodeToString(frame.quic_error_code),\n                                           \" with details: \", frame.error_details);\n  if (quic_connection_ != nullptr) {\n    // Tell network callbacks about connection close if not detached yet.\n    raiseConnectionEvent(source == quic::ConnectionCloseSource::FROM_PEER\n                             ? Network::ConnectionEvent::RemoteClose\n                             : Network::ConnectionEvent::LocalClose);\n  }\n}\n\nvoid QuicFilterManagerConnectionImpl::closeConnectionImmediately() {\n  if (quic_connection_ == nullptr) {\n    return;\n  }\n  quic_connection_->CloseConnection(quic::QUIC_NO_ERROR, \"Closed by application\",\n                                    quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);\n  quic_connection_ = nullptr;\n}\n\nvoid QuicFilterManagerConnectionImpl::onSendBufferHighWatermark() {\n  ENVOY_CONN_LOG(trace, \"onSendBufferHighWatermark\", *this);\n  for (auto callback : callbacks_) {\n    callback->onAboveWriteBufferHighWatermark();\n  }\n}\n\nvoid QuicFilterManagerConnectionImpl::onSendBufferLowWatermark() {\n  ENVOY_CONN_LOG(trace, \"onSendBufferLowWatermark\", *this);\n  for (auto callback : callbacks_) {\n    callback->onBelowWriteBufferLowWatermark();\n  }\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/logger.h\"\n#include \"common/network/connection_impl_base.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_simulated_watermark_buffer.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Act as a Network::Connection to HCM and a FilterManager to FilterFactoryCb.\nclass QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase {\npublic:\n  QuicFilterManagerConnectionImpl(EnvoyQuicConnection& connection, Event::Dispatcher& dispatcher,\n                                  uint32_t send_buffer_limit);\n\n  // Network::FilterManager\n  // Overridden to delegate calls to filter_manager_.\n  void addWriteFilter(Network::WriteFilterSharedPtr filter) override;\n  void addFilter(Network::FilterSharedPtr filter) override;\n  void addReadFilter(Network::ReadFilterSharedPtr filter) override;\n  bool initializeReadFilters() override;\n\n  // Network::Connection\n  void addBytesSentCallback(Network::Connection::BytesSentCb /*cb*/) override {\n    // TODO(danzh): implement to support proxy. This interface is only called from\n    // TCP proxy code.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  void enableHalfClose(bool enabled) override;\n  void close(Network::ConnectionCloseType type) override;\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n  std::string nextProtocol() const override { return EMPTY_STRING; }\n  void noDelay(bool /*enable*/) override {\n    // No-op. TCP_NODELAY doesn't apply to UDP.\n  }\n  void readDisable(bool /*disable*/) override { NOT_REACHED_GCOVR_EXCL_LINE; }\n  void detectEarlyCloseWhenReadDisabled(bool /*value*/) override { NOT_REACHED_GCOVR_EXCL_LINE; }\n  bool readEnabled() const override { return true; }\n  const Network::Address::InstanceConstSharedPtr& remoteAddress() const override;\n  const Network::Address::InstanceConstSharedPtr& directRemoteAddress() const override;\n  const Network::Address::InstanceConstSharedPtr& localAddress() const override;\n  absl::optional<Network::Connection::UnixDomainSocketPeerCredentials>\n  unixSocketPeerCredentials() const override {\n    // Unix domain socket is not supported.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  void setConnectionStats(const Network::Connection::ConnectionStats& stats) override {\n    // TODO(danzh): populate stats.\n    Network::ConnectionImplBase::setConnectionStats(stats);\n    quic_connection_->setConnectionStats(stats);\n  }\n  Ssl::ConnectionInfoConstSharedPtr ssl() const override;\n  Network::Connection::State state() const override {\n    if (quic_connection_ != nullptr && quic_connection_->connected()) {\n      return Network::Connection::State::Open;\n    }\n    return Network::Connection::State::Closed;\n  }\n  void write(Buffer::Instance& /*data*/, bool /*end_stream*/) override {\n    // All writes should be handled by Quic internally.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  void setBufferLimits(uint32_t limit) override;\n  uint32_t bufferLimit() const override {\n    // As quic connection is not HTTP1.1, this method shouldn't be called by HCM.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  bool localAddressRestored() const override {\n    // SO_ORIGINAL_DST not supported by QUIC.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  bool aboveHighWatermark() const override;\n\n  const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() const override;\n  StreamInfo::StreamInfo& streamInfo() override { return stream_info_; }\n  const StreamInfo::StreamInfo& streamInfo() const override { return stream_info_; }\n  absl::string_view transportFailureReason() const override { return transport_failure_reason_; }\n  absl::optional<std::chrono::milliseconds> lastRoundTripTime() const override { return {}; }\n\n  // Network::FilterManagerConnection\n  void rawWrite(Buffer::Instance& data, bool end_stream) override;\n\n  // Network::ReadBufferSource\n  Network::StreamBuffer getReadBuffer() override { return {empty_buffer_, false}; }\n  // Network::WriteBufferSource\n  Network::StreamBuffer getWriteBuffer() override { NOT_REACHED_GCOVR_EXCL_LINE; }\n\n  // Update the book keeping of the aggregated buffered bytes cross all the\n  // streams, and run watermark check.\n  void adjustBytesToSend(int64_t delta);\n\n  // Called after each write when a previous connection close call is postponed.\n  void maybeApplyDelayClosePolicy();\n\n  uint32_t bytesToSend() { return bytes_to_send_; }\n\nprotected:\n  // Propagate connection close to network_connection_callbacks_.\n  void onConnectionCloseEvent(const quic::QuicConnectionCloseFrame& frame,\n                              quic::ConnectionCloseSource source);\n\n  void closeConnectionImmediately() override;\n\n  virtual bool hasDataToWrite() PURE;\n\n  EnvoyQuicConnection* quic_connection_{nullptr};\n\nprivate:\n  // Called when aggregated buffered bytes across all the streams exceeds high watermark.\n  void onSendBufferHighWatermark();\n  // Called when aggregated buffered bytes across all the streams declines to low watermark.\n  void onSendBufferLowWatermark();\n\n  // Currently ConnectionManagerImpl is the one and only filter. If more network\n  // filters are added, ConnectionManagerImpl should always be the last one.\n  // Its onRead() is only called once to trigger ReadFilter::onNewConnection()\n  // and the rest incoming data bypasses these filters.\n  Network::FilterManagerImpl filter_manager_;\n\n  StreamInfo::StreamInfoImpl stream_info_;\n  std::string transport_failure_reason_;\n  uint32_t bytes_to_send_{0};\n  // Keeps the buffer state of the connection, and react upon the changes of how many bytes are\n  // buffered cross all streams' send buffer. The state is evaluated and may be changed upon each\n  // stream write. QUICHE doesn't buffer data in connection, all the data is buffered in stream's\n  // send buffer.\n  EnvoyQuicSimulatedWatermarkBuffer write_buffer_watermark_simulation_;\n  Buffer::OwnedImpl empty_buffer_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h",
    "content": "#include <chrono>\n#include <iostream>\n\n#include \"envoy/network/io_handle.h\"\n\n#include \"common/network/io_socket_error_impl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A wrapper class around IoHandle object which doesn't close() upon destruction. It is used to\n// create ConnectionSocket as the actual IoHandle instance should out live connection socket.\nclass QuicIoHandleWrapper : public Network::IoHandle {\npublic:\n  QuicIoHandleWrapper(Network::IoHandle& io_handle) : io_handle_(io_handle) {}\n\n  // Network::IoHandle\n  os_fd_t fdDoNotUse() const override { return io_handle_.fdDoNotUse(); }\n  Api::IoCallUint64Result close() override {\n    closed_ = true;\n    return Api::ioCallUint64ResultNoError();\n  }\n  bool isOpen() const override { return !closed_; }\n  Api::IoCallUint64Result readv(uint64_t max_length, Buffer::RawSlice* slices,\n                                uint64_t num_slice) override {\n    if (closed_) {\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.readv(max_length, slices, num_slice);\n  }\n  Api::IoCallUint64Result read(Buffer::Instance& buffer, uint64_t max_length) override {\n    if (closed_) {\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.read(buffer, max_length);\n  }\n  Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) override {\n    if (closed_) {\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.writev(slices, num_slice);\n  }\n  Api::IoCallUint64Result write(Buffer::Instance& buffer) override {\n    if (closed_) {\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.write(buffer);\n  }\n  Api::IoCallUint64Result sendmsg(const Buffer::RawSlice* slices, uint64_t num_slice, int flags,\n                                  const Envoy::Network::Address::Ip* self_ip,\n                                  const Network::Address::Instance& peer_address) override {\n    if (closed_) {\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.sendmsg(slices, num_slice, flags, self_ip, peer_address);\n  }\n  Api::IoCallUint64Result recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice,\n                                  uint32_t self_port, RecvMsgOutput& output) override {\n    if (closed_) {\n      ASSERT(false, \"recvmmsg is called after close.\");\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.recvmsg(slices, num_slice, self_port, output);\n  }\n  Api::IoCallUint64Result recvmmsg(RawSliceArrays& slices, uint32_t self_port,\n                                   RecvMsgOutput& output) override {\n    if (closed_) {\n      ASSERT(false, \"recvmmsg is called after close.\");\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.recvmmsg(slices, self_port, output);\n  }\n  Api::IoCallUint64Result recv(void* buffer, size_t length, int flags) override {\n    if (closed_) {\n      ASSERT(false, \"recv called after close.\");\n      return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF),\n                                                        Network::IoSocketError::deleteIoError));\n    }\n    return io_handle_.recv(buffer, length, flags);\n  }\n  bool supportsMmsg() const override { return io_handle_.supportsMmsg(); }\n  bool supportsUdpGro() const override { return io_handle_.supportsUdpGro(); }\n  Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override {\n    return io_handle_.bind(address);\n  }\n  Api::SysCallIntResult listen(int backlog) override { return io_handle_.listen(backlog); }\n  Network::IoHandlePtr accept(struct sockaddr* addr, socklen_t* addrlen) override {\n    return io_handle_.accept(addr, addrlen);\n  }\n  Api::SysCallIntResult connect(Network::Address::InstanceConstSharedPtr address) override {\n    return io_handle_.connect(address);\n  }\n  Api::SysCallIntResult setOption(int level, int optname, const void* optval,\n                                  socklen_t optlen) override {\n    return io_handle_.setOption(level, optname, optval, optlen);\n  }\n  Api::SysCallIntResult getOption(int level, int optname, void* optval,\n                                  socklen_t* optlen) override {\n    return io_handle_.getOption(level, optname, optval, optlen);\n  }\n  Api::SysCallIntResult setBlocking(bool blocking) override {\n    return io_handle_.setBlocking(blocking);\n  }\n  absl::optional<int> domain() override { return io_handle_.domain(); }\n  Network::Address::InstanceConstSharedPtr localAddress() override {\n    return io_handle_.localAddress();\n  }\n  Network::Address::InstanceConstSharedPtr peerAddress() override {\n    return io_handle_.peerAddress();\n  }\n  Event::FileEventPtr createFileEvent(Event::Dispatcher& dispatcher, Event::FileReadyCb cb,\n                                      Event::FileTriggerType trigger, uint32_t events) override {\n    return io_handle_.createFileEvent(dispatcher, cb, trigger, events);\n  }\n  Api::SysCallIntResult shutdown(int how) override { return io_handle_.shutdown(how); }\n  absl::optional<std::chrono::milliseconds> lastRoundTripTime() override { return {}; }\n\nprivate:\n  Network::IoHandle& io_handle_;\n  bool closed_{false};\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc",
    "content": "#include \"extensions/quic_listeners/quiche/quic_transport_socket_factory.h\"\n\n// #include \"envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h\"\n#include \"envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h\"\n#include \"envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.validate.h\"\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nNetwork::TransportSocketFactoryPtr\nQuicServerTransportSocketConfigFactory::createTransportSocketFactory(\n    const Protobuf::Message& config, Server::Configuration::TransportSocketFactoryContext& context,\n    const std::vector<std::string>& /*server_names*/) {\n  auto quic_transport = MessageUtil::downcastAndValidate<\n      const envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport&>(\n      config, context.messageValidationVisitor());\n  auto server_config = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n      quic_transport.downstream_tls_context(), context);\n  return std::make_unique<QuicServerTransportSocketFactory>(std::move(server_config));\n}\n\nProtobufTypes::MessagePtr QuicServerTransportSocketConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<\n      envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport>();\n}\n\nNetwork::TransportSocketFactoryPtr\nQuicClientTransportSocketConfigFactory::createTransportSocketFactory(\n    const Protobuf::Message& config,\n    Server::Configuration::TransportSocketFactoryContext& context) {\n  auto quic_transport = MessageUtil::downcastAndValidate<\n      const envoy::extensions::transport_sockets::quic::v3::QuicUpstreamTransport&>(\n      config, context.messageValidationVisitor());\n  auto client_config = std::make_unique<Extensions::TransportSockets::Tls::ClientContextConfigImpl>(\n      quic_transport.upstream_tls_context(), context);\n  return std::make_unique<QuicClientTransportSocketFactory>(std::move(client_config));\n}\n\nProtobufTypes::MessagePtr QuicClientTransportSocketConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::transport_sockets::quic::v3::QuicUpstreamTransport>();\n}\n\nREGISTER_FACTORY(QuicServerTransportSocketConfigFactory,\n                 Server::Configuration::DownstreamTransportSocketConfigFactory);\n\nREGISTER_FACTORY(QuicClientTransportSocketConfigFactory,\n                 Server::Configuration::UpstreamTransportSocketConfigFactory);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h",
    "content": "#pragma once\n\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/context_config.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// Base class for QUIC transport socket factory.\n// Because QUIC stack handles all L4 data, there is no need of a real transport\n// socket for QUIC in current implementation. This factory doesn't provides a\n// transport socket, instead, its derived class provides TLS context config for\n// server and client.\nclass QuicTransportSocketFactoryBase : public Network::TransportSocketFactory {\npublic:\n  // Network::TransportSocketFactory\n  Network::TransportSocketPtr\n  createTransportSocket(Network::TransportSocketOptionsSharedPtr /*options*/) const override {\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  bool implementsSecureTransport() const override { return true; }\n};\n\n// TODO(danzh): when implement ProofSource, examine of it's necessary to\n// differentiate server and client side context config.\nclass QuicServerTransportSocketFactory : public QuicTransportSocketFactoryBase {\npublic:\n  QuicServerTransportSocketFactory(Ssl::ServerContextConfigPtr config)\n      : config_(std::move(config)) {}\n\n  const Ssl::ServerContextConfig& serverContextConfig() const { return *config_; }\n\nprivate:\n  std::unique_ptr<const Ssl::ServerContextConfig> config_;\n};\n\nclass QuicClientTransportSocketFactory : public QuicTransportSocketFactoryBase {\npublic:\n  QuicClientTransportSocketFactory(Envoy::Ssl::ClientContextConfigPtr config)\n      : config_(std::move(config)) {}\n\n  const Ssl::ClientContextConfig& clientContextConfig() const { return *config_; }\n\nprivate:\n  std::unique_ptr<const Ssl::ClientContextConfig> config_;\n};\n\n// Base class to create above QuicTransportSocketFactory for server and client\n// side.\nclass QuicTransportSocketConfigFactory\n    : public virtual Server::Configuration::TransportSocketConfigFactory {\npublic:\n  // Server::Configuration::TransportSocketConfigFactory\n  std::string name() const override {\n    return Extensions::TransportSockets::TransportSocketNames::get().Quic;\n  }\n};\n\nclass QuicServerTransportSocketConfigFactory\n    : public QuicTransportSocketConfigFactory,\n      public Server::Configuration::DownstreamTransportSocketConfigFactory {\npublic:\n  // Server::Configuration::DownstreamTransportSocketConfigFactory\n  Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message& config,\n                               Server::Configuration::TransportSocketFactoryContext& context,\n                               const std::vector<std::string>& server_names) override;\n\n  // Server::Configuration::TransportSocketConfigFactory\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  // Prevent double registration for the config proto\n  std::string configType() override { return \"\"; }\n};\n\nDECLARE_FACTORY(QuicServerTransportSocketConfigFactory);\n\nclass QuicClientTransportSocketConfigFactory\n    : public QuicTransportSocketConfigFactory,\n      public Server::Configuration::UpstreamTransportSocketConfigFactory {\npublic:\n  // Server::Configuration::UpstreamTransportSocketConfigFactory\n  Network::TransportSocketFactoryPtr createTransportSocketFactory(\n      const Protobuf::Message& config,\n      Server::Configuration::TransportSocketFactoryContext& context) override;\n\n  // Server::Configuration::TransportSocketConfigFactory\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  // Prevent double registration for the config proto\n  std::string configType() override { return \"\"; }\n};\n\nDECLARE_FACTORY(QuicClientTransportSocketConfigFactory);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/spdy_server_push_utils_for_envoy.cc",
    "content": "#include \"quiche/quic/core/http/spdy_server_push_utils.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file has a substitute definition for\n// quiche/quic/core/http/spdy_server_push_utils.cc which depends on GURL.\n// Since Envoy doesn't support server push, these functions shouldn't be\n// executed at all.\n\nusing spdy::SpdyHeaderBlock;\n\nnamespace quic {\n\n// static\nstd::string SpdyServerPushUtils::GetPromisedUrlFromHeaders(const SpdyHeaderBlock& /*headers*/) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n// static\nstd::string\nSpdyServerPushUtils::GetPromisedHostNameFromHeaders(const SpdyHeaderBlock& /*headers*/) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n// static\nbool SpdyServerPushUtils::PromisedUrlIsValid(const SpdyHeaderBlock& /*headers*/) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n// static\nstd::string SpdyServerPushUtils::GetPushPromiseUrl(quiche::QuicheStringPiece /*scheme*/,\n                                                   quiche::QuicheStringPiece /*authority*/,\n                                                   quiche::QuicheStringPiece /*path*/) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n} // namespace quic\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc",
    "content": "#include \"extensions/quic_listeners/quiche/udp_gso_batch_writer.h\"\n\n#include \"common/network/io_socket_error_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\nnamespace Envoy {\nnamespace Quic {\nnamespace {\nApi::IoCallUint64Result convertQuicWriteResult(quic::WriteResult quic_result, size_t payload_len) {\n  switch (quic_result.status) {\n  case quic::WRITE_STATUS_OK: {\n    if (quic_result.bytes_written == 0) {\n      ENVOY_LOG_MISC(trace, \"sendmsg successful, message buffered to send\");\n    } else {\n      ENVOY_LOG_MISC(trace, \"sendmsg successful, flushed bytes {}\", quic_result.bytes_written);\n    }\n    // Return payload_len as rc & nullptr as error on success\n    return Api::IoCallUint64Result(\n        /*rc=*/payload_len,\n        /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError));\n  }\n  case quic::WRITE_STATUS_BLOCKED_DATA_BUFFERED: {\n    // Data was buffered, Return payload_len as rc & nullptr as error\n    ENVOY_LOG_MISC(trace, \"sendmsg blocked, message buffered to send\");\n    return Api::IoCallUint64Result(\n        /*rc=*/payload_len,\n        /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError));\n  }\n  case quic::WRITE_STATUS_BLOCKED: {\n    // Writer blocked, return error\n    ENVOY_LOG_MISC(trace, \"sendmsg blocked, message not buffered\");\n    return Api::IoCallUint64Result(\n        /*rc=*/0,\n        /*err=*/Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(),\n                                Network::IoSocketError::deleteIoError));\n  }\n  default: {\n    // Write Failed, return {0 and error_code}\n    ENVOY_LOG_MISC(trace, \"sendmsg failed with error code {}\",\n                   static_cast<int>(quic_result.error_code));\n    return Api::IoCallUint64Result(\n        /*rc=*/0,\n        /*err=*/Api::IoErrorPtr(new Network::IoSocketError(quic_result.error_code),\n                                Network::IoSocketError::deleteIoError));\n  }\n  }\n}\n\n} // namespace\n\n// Initialize QuicGsoBatchWriter, set io_handle_ and stats_\nUdpGsoBatchWriter::UdpGsoBatchWriter(Network::IoHandle& io_handle, Stats::Scope& scope)\n    : quic::QuicGsoBatchWriter(io_handle.fdDoNotUse()), stats_(generateStats(scope)) {}\n\n// Do Nothing in the Destructor For now\nUdpGsoBatchWriter::~UdpGsoBatchWriter() = default;\n\nApi::IoCallUint64Result\nUdpGsoBatchWriter::writePacket(const Buffer::Instance& buffer, const Network::Address::Ip* local_ip,\n                               const Network::Address::Instance& peer_address) {\n  // Convert received parameters to relevant forms\n  quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip());\n  quic::QuicSocketAddress self_addr = envoyIpAddressToQuicSocketAddress(local_ip);\n  size_t payload_len = static_cast<size_t>(buffer.length());\n\n  // TODO(yugant): Currently we do not use PerPacketOptions with Quic, we may want to\n  // specify this parameter here at a later stage.\n  quic::WriteResult quic_result =\n      WritePacket(buffer.toString().c_str(), payload_len, self_addr.host(), peer_addr,\n                  /*quic::PerPacketOptions=*/nullptr);\n  updateUdpGsoBatchWriterStats(quic_result);\n\n  return convertQuicWriteResult(quic_result, payload_len);\n}\n\nuint64_t UdpGsoBatchWriter::getMaxPacketSize(const Network::Address::Instance& peer_address) const {\n  quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip());\n  return static_cast<uint64_t>(GetMaxPacketSize(peer_addr));\n}\n\nNetwork::UdpPacketWriterBuffer\nUdpGsoBatchWriter::getNextWriteLocation(const Network::Address::Ip* local_ip,\n                                        const Network::Address::Instance& peer_address) {\n  quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip());\n  quic::QuicSocketAddress self_addr = envoyIpAddressToQuicSocketAddress(local_ip);\n  quic::QuicPacketBuffer quic_buf = GetNextWriteLocation(self_addr.host(), peer_addr);\n  return Network::UdpPacketWriterBuffer(reinterpret_cast<uint8_t*>(quic_buf.buffer),\n                                        Network::UdpMaxOutgoingPacketSize, quic_buf.release_buffer);\n}\n\nApi::IoCallUint64Result UdpGsoBatchWriter::flush() {\n  quic::WriteResult quic_result = Flush();\n  updateUdpGsoBatchWriterStats(quic_result);\n\n  return convertQuicWriteResult(quic_result, /*payload_len=*/0);\n}\n\nvoid UdpGsoBatchWriter::updateUdpGsoBatchWriterStats(quic::WriteResult quic_result) {\n  if (quic_result.status == quic::WRITE_STATUS_OK && quic_result.bytes_written > 0) {\n    if (gso_size_ > 0u) {\n      uint64_t num_pkts_in_batch =\n          std::ceil(static_cast<float>(quic_result.bytes_written) / gso_size_);\n      stats_.pkts_sent_per_batch_.recordValue(num_pkts_in_batch);\n    }\n    stats_.total_bytes_sent_.add(quic_result.bytes_written);\n  }\n  stats_.internal_buffer_size_.set(batch_buffer().SizeInUse());\n  gso_size_ = buffered_writes().empty() ? 0u : buffered_writes().front().buf_len;\n}\n\nUdpGsoBatchWriterStats UdpGsoBatchWriter::generateStats(Stats::Scope& scope) {\n  return {\n      UDP_GSO_BATCH_WRITER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))};\n}\n\nUdpGsoBatchWriterFactory::UdpGsoBatchWriterFactory() = default;\n\nNetwork::UdpPacketWriterPtr\nUdpGsoBatchWriterFactory::createUdpPacketWriter(Network::IoHandle& io_handle, Stats::Scope& scope) {\n  return std::make_unique<UdpGsoBatchWriter>(io_handle, scope);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h",
    "content": "#pragma once\n\n#if !defined(__linux__)\n#define UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT 0\n#else\n#define UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT 1\n\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wignored-qualifiers\"\n\n// QUICHE doesn't mark override at QuicBatchWriterBase::SupportsReleaseTime()\n#ifdef __clang__\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Winconsistent-missing-override\"\n#elif defined(__GNUC__) && __GNUC__ >= 5\n#pragma GCC diagnostic ignored \"-Wsuggest-override\"\n#endif\n\n#include \"quiche/quic/core/batch_writer/quic_gso_batch_writer.h\"\n\n#ifdef __clang__\n#pragma clang diagnostic pop\n#endif\n\n#pragma GCC diagnostic pop\n\n#include \"envoy/network/udp_packet_writer_handler.h\"\n\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_protos.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n/**\n * @brief The following can be used to collect statistics\n * related to UdpGsoBatchWriter. The stats maintained are\n * as follows:\n *\n * @total_bytes_sent: Maintains the count of total bytes\n * sent via the UdpGsoBatchWriter on the current ioHandle\n * via both WritePacket() and Flush() functions.\n *\n * @internal_buffer_size: Gauge value to keep a track of the\n * total bytes buffered to writer by UdpGsoBatchWriter.\n * Resets whenever the internal bytes are sent to the client.\n *\n * @pkts_sent_per_batch: Histogram to keep maintain stats of\n * total number of packets sent in each batch by UdpGsoBatchWriter\n * Provides summary count of batch-sizes within bucketed range,\n * and also provides sum and count stats.\n *\n * TODO(danzh): Add writer stats to QUIC Documentation when it is\n * created for QUIC/HTTP3 docs. Also specify in the documentation\n * that user has to compile in QUICHE to use UdpGsoBatchWriter.\n */\n#define UDP_GSO_BATCH_WRITER_STATS(COUNTER, GAUGE, HISTOGRAM)                                      \\\n  COUNTER(total_bytes_sent)                                                                        \\\n  GAUGE(internal_buffer_size, NeverImport)                                                         \\\n  HISTOGRAM(pkts_sent_per_batch, Unspecified)\n\n/**\n * Wrapper struct for udp gso batch writer stats. @see stats_macros.h\n */\nstruct UdpGsoBatchWriterStats {\n  UDP_GSO_BATCH_WRITER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT,\n                             GENERATE_HISTOGRAM_STRUCT)\n};\n\n/**\n * UdpPacketWriter implementation based on quic::QuicGsoBatchWriter to send packets\n * in batches, using UDP socket's generic segmentation offload(GSO) capability.\n */\nclass UdpGsoBatchWriter : public quic::QuicGsoBatchWriter, public Network::UdpPacketWriter {\npublic:\n  UdpGsoBatchWriter(Network::IoHandle& io_handle, Stats::Scope& scope);\n\n  ~UdpGsoBatchWriter() override;\n\n  // writePacket perform batched sends based on QuicGsoBatchWriter::WritePacket\n  Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer,\n                                      const Network::Address::Ip* local_ip,\n                                      const Network::Address::Instance& peer_address) override;\n\n  // UdpPacketWriter Implementations\n  bool isWriteBlocked() const override { return IsWriteBlocked(); }\n  void setWritable() override { return SetWritable(); }\n  bool isBatchMode() const override { return IsBatchMode(); }\n  uint64_t getMaxPacketSize(const Network::Address::Instance& peer_address) const override;\n  Network::UdpPacketWriterBuffer\n  getNextWriteLocation(const Network::Address::Ip* local_ip,\n                       const Network::Address::Instance& peer_address) override;\n  Api::IoCallUint64Result flush() override;\n\nprivate:\n  /**\n   * @brief Update stats_ field for the udp packet writer\n   * @param quic_result is the result from Flush/WritePacket\n   */\n  void updateUdpGsoBatchWriterStats(quic::WriteResult quic_result);\n\n  /**\n   * @brief Generate UdpGsoBatchWriterStats object from scope\n   * @param scope for stats\n   * @return UdpGsoBatchWriterStats for scope\n   */\n  UdpGsoBatchWriterStats generateStats(Stats::Scope& scope);\n  UdpGsoBatchWriterStats stats_;\n  uint64_t gso_size_;\n};\n\nclass UdpGsoBatchWriterFactory : public Network::UdpPacketWriterFactory {\npublic:\n  UdpGsoBatchWriterFactory();\n\n  Network::UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle,\n                                                    Stats::Scope& scope) override;\n\nprivate:\n  envoy::config::core::v3::RuntimeFeatureFlag enabled_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n\n#endif // defined(__linux__)\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc",
    "content": "#include \"extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h\"\n\n#include \"envoy/config/listener/v3/udp_gso_batch_writer_config.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n\n#if defined(__linux__)\n#include \"extensions/quic_listeners/quiche/udp_gso_batch_writer.h\"\n#endif\n\nnamespace Envoy {\nnamespace Quic {\n\nProtobufTypes::MessagePtr UdpGsoBatchWriterConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::listener::v3::UdpGsoBatchWriterOptions>();\n}\n\nNetwork::UdpPacketWriterFactoryPtr\nUdpGsoBatchWriterConfigFactory::createUdpPacketWriterFactory(const Protobuf::Message& /*message*/) {\n  if (!Api::OsSysCallsSingleton::get().supportsUdpGso()) {\n    throw EnvoyException(\"Error configuring batch writer on platform without support \"\n                         \"for UDP GSO. Reset udp_writer_config to default writer\");\n  }\n\n#if defined(__linux__)\n  return std::make_unique<UdpGsoBatchWriterFactory>();\n#else\n  // On non-linux, `supportsUdpGso()` always returns false.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n#endif\n}\n\nstd::string UdpGsoBatchWriterConfigFactory::name() const { return GsoBatchWriterName; }\n\nREGISTER_FACTORY(UdpGsoBatchWriterConfigFactory, Network::UdpPacketWriterConfigFactory);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/network/udp_packet_writer_config.h\"\n#include \"envoy/registry/registry.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nconst std::string GsoBatchWriterName{\"udp_gso_batch_writer\"};\n\n// Network::UdpPacketWriterConfigFactory to create UdpGsoBatchWriterFactory based on given\n// protobuf.\nclass UdpGsoBatchWriterConfigFactory : public Network::UdpPacketWriterConfigFactory {\npublic:\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  Network::UdpPacketWriterFactoryPtr\n  createUdpPacketWriterFactory(const Protobuf::Message&) override;\n\n  std::string name() const override;\n};\n\nDECLARE_FACTORY(UdpGsoBatchWriterConfigFactory);\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/resource_monitors/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    # This resource monitoring library is considered core code.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/server:resource_monitor_config_interface\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/resource_monitors/common/factory_base.h",
    "content": "#pragma once\n\n#include \"envoy/server/resource_monitor_config.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace Common {\n\ntemplate <class ConfigProto>\nclass FactoryBase : public Server::Configuration::ResourceMonitorFactory {\npublic:\n  Server::ResourceMonitorPtr\n  createResourceMonitor(const Protobuf::Message& config,\n                        Server::Configuration::ResourceMonitorFactoryContext& context) override {\n    return createResourceMonitorFromProtoTyped(MessageUtil::downcastAndValidate<const ConfigProto&>(\n                                                   config, context.messageValidationVisitor()),\n                                               context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  FactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual Server::ResourceMonitorPtr createResourceMonitorFromProtoTyped(\n      const ConfigProto& config,\n      Server::Configuration::ResourceMonitorFactoryContext& context) PURE;\n\n  const std::string name_;\n};\n\n} // namespace Common\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/fixed_heap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"fixed_heap_monitor\",\n    srcs = [\"fixed_heap_monitor.cc\"],\n    hdrs = [\"fixed_heap_monitor.h\"],\n    deps = [\n        \"//include/envoy/server:resource_monitor_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"@envoy_api//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    status = \"alpha\",\n    deps = [\n        \":fixed_heap_monitor\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/resource_monitors:well_known_names\",\n        \"//source/extensions/resource_monitors/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/resource_monitors/fixed_heap/config.cc",
    "content": "#include \"extensions/resource_monitors/fixed_heap/config.h\"\n\n#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.h\"\n#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/resource_monitors/fixed_heap/fixed_heap_monitor.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace FixedHeapMonitor {\n\nServer::ResourceMonitorPtr FixedHeapMonitorFactory::createResourceMonitorFromProtoTyped(\n    const envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig& config,\n    Server::Configuration::ResourceMonitorFactoryContext& /*unused_context*/) {\n  return std::make_unique<FixedHeapMonitor>(config);\n}\n\n/**\n * Static registration for the fixed heap resource monitor factory. @see RegistryFactory.\n */\nREGISTER_FACTORY(FixedHeapMonitorFactory, Server::Configuration::ResourceMonitorFactory);\n\n} // namespace FixedHeapMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/fixed_heap/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.h\"\n#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.validate.h\"\n#include \"envoy/server/resource_monitor_config.h\"\n\n#include \"extensions/resource_monitors/common/factory_base.h\"\n#include \"extensions/resource_monitors/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace FixedHeapMonitor {\n\nclass FixedHeapMonitorFactory\n    : public Common::FactoryBase<\n          envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig> {\npublic:\n  FixedHeapMonitorFactory() : FactoryBase(ResourceMonitorNames::get().FixedHeap) {}\n\nprivate:\n  Server::ResourceMonitorPtr createResourceMonitorFromProtoTyped(\n      const envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig& config,\n      Server::Configuration::ResourceMonitorFactoryContext& context) override;\n};\n\n} // namespace FixedHeapMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/fixed_heap/fixed_heap_monitor.cc",
    "content": "#include \"extensions/resource_monitors/fixed_heap/fixed_heap_monitor.h\"\n\n#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/memory/stats.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace FixedHeapMonitor {\n\nuint64_t MemoryStatsReader::reservedHeapBytes() { return Memory::Stats::totalCurrentlyReserved(); }\n\nuint64_t MemoryStatsReader::unmappedHeapBytes() { return Memory::Stats::totalPageHeapUnmapped(); }\n\nFixedHeapMonitor::FixedHeapMonitor(\n    const envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig& config,\n    std::unique_ptr<MemoryStatsReader> stats)\n    : max_heap_(config.max_heap_size_bytes()), stats_(std::move(stats)) {\n  ASSERT(max_heap_ > 0);\n}\n\nvoid FixedHeapMonitor::updateResourceUsage(Server::ResourceMonitor::Callbacks& callbacks) {\n  const size_t physical = stats_->reservedHeapBytes();\n  const size_t unmapped = stats_->unmappedHeapBytes();\n  ASSERT(physical >= unmapped);\n  const size_t used = physical - unmapped;\n\n  Server::ResourceUsage usage;\n  usage.resource_pressure_ = used / static_cast<double>(max_heap_);\n\n  callbacks.onSuccess(usage);\n}\n\n} // namespace FixedHeapMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/fixed_heap/fixed_heap_monitor.h",
    "content": "#pragma once\n\n#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.h\"\n#include \"envoy/server/resource_monitor.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace FixedHeapMonitor {\n\n/**\n * Helper class for getting memory heap stats.\n */\nclass MemoryStatsReader {\npublic:\n  MemoryStatsReader() = default;\n  virtual ~MemoryStatsReader() = default;\n\n  // Memory reserved for the process by the heap.\n  virtual uint64_t reservedHeapBytes();\n  // Memory in free, unmapped pages in the page heap.\n  virtual uint64_t unmappedHeapBytes();\n};\n\n/**\n * Heap memory monitor with a statically configured maximum.\n */\nclass FixedHeapMonitor : public Server::ResourceMonitor {\npublic:\n  FixedHeapMonitor(\n      const envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig& config,\n      std::unique_ptr<MemoryStatsReader> stats = std::make_unique<MemoryStatsReader>());\n\n  void updateResourceUsage(Server::ResourceMonitor::Callbacks& callbacks) override;\n\nprivate:\n  const uint64_t max_heap_;\n  std::unique_ptr<MemoryStatsReader> stats_;\n};\n\n} // namespace FixedHeapMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/injected_resource/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"injected_resource_monitor\",\n    srcs = [\"injected_resource_monitor.cc\"],\n    hdrs = [\"injected_resource_monitor.h\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//include/envoy/server:resource_monitor_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"@envoy_api//envoy/config/resource_monitor/injected_resource/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"data_plane_agnostic\",\n    status = \"alpha\",\n    deps = [\n        \":injected_resource_monitor\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/resource_monitors:well_known_names\",\n        \"//source/extensions/resource_monitors/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/resource_monitor/injected_resource/v2alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/resource_monitors/injected_resource/config.cc",
    "content": "#include \"extensions/resource_monitors/injected_resource/config.h\"\n\n#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.h\"\n#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/resource_monitors/injected_resource/injected_resource_monitor.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace InjectedResourceMonitor {\n\nServer::ResourceMonitorPtr InjectedResourceMonitorFactory::createResourceMonitorFromProtoTyped(\n    const envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig&\n        config,\n    Server::Configuration::ResourceMonitorFactoryContext& context) {\n  return std::make_unique<InjectedResourceMonitor>(config, context);\n}\n\n/**\n * Static registration for the injected resource monitor factory. @see RegistryFactory.\n */\nREGISTER_FACTORY(InjectedResourceMonitorFactory, Server::Configuration::ResourceMonitorFactory);\n\n} // namespace InjectedResourceMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/injected_resource/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.h\"\n#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.validate.h\"\n#include \"envoy/server/resource_monitor_config.h\"\n\n#include \"extensions/resource_monitors/common/factory_base.h\"\n#include \"extensions/resource_monitors/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace InjectedResourceMonitor {\n\nclass InjectedResourceMonitorFactory\n    : public Common::FactoryBase<\n          envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig> {\npublic:\n  InjectedResourceMonitorFactory() : FactoryBase(ResourceMonitorNames::get().InjectedResource) {}\n\nprivate:\n  Server::ResourceMonitorPtr createResourceMonitorFromProtoTyped(\n      const envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig&\n          config,\n      Server::Configuration::ResourceMonitorFactoryContext& context) override;\n};\n\n} // namespace InjectedResourceMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc",
    "content": "#include \"extensions/resource_monitors/injected_resource/injected_resource_monitor.h\"\n\n#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/strings/numbers.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace InjectedResourceMonitor {\n\nInjectedResourceMonitor::InjectedResourceMonitor(\n    const envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig&\n        config,\n    Server::Configuration::ResourceMonitorFactoryContext& context)\n    : filename_(config.filename()), file_changed_(true),\n      watcher_(context.dispatcher().createFilesystemWatcher()), api_(context.api()) {\n  watcher_->addWatch(filename_, Filesystem::Watcher::Events::MovedTo,\n                     [this](uint32_t) { onFileChanged(); });\n}\n\nvoid InjectedResourceMonitor::onFileChanged() { file_changed_ = true; }\n\nvoid InjectedResourceMonitor::updateResourceUsage(Server::ResourceMonitor::Callbacks& callbacks) {\n  if (file_changed_) {\n    file_changed_ = false;\n    try {\n      const std::string contents = api_.fileSystem().fileReadToEnd(filename_);\n      double pressure;\n      if (absl::SimpleAtod(contents, &pressure)) {\n        if (pressure < 0 || pressure > 1) {\n          throw EnvoyException(\"pressure out of range\");\n        }\n        pressure_ = pressure;\n        error_.reset();\n      } else {\n        throw EnvoyException(\"failed to parse injected resource pressure\");\n      }\n    } catch (const EnvoyException& error) {\n      error_ = error;\n      pressure_.reset();\n    }\n  }\n\n  ASSERT(pressure_.has_value() != error_.has_value());\n  if (pressure_.has_value()) {\n    callbacks.onSuccess({*pressure_});\n  } else {\n    callbacks.onFailure(*error_);\n  }\n}\n\n} // namespace InjectedResourceMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/injected_resource/injected_resource_monitor.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/server/resource_monitor.h\"\n#include \"envoy/server/resource_monitor_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace InjectedResourceMonitor {\n\n/**\n * A monitor for an injected resource. The resource pressure is read from a text file\n * specified in the config, which must contain a floating-point number in the range\n * [0..1] and be updated atomically by a symbolic link swap.\n * This is intended primarily for integration tests to force Envoy into an overloaded state.\n */\nclass InjectedResourceMonitor : public Server::ResourceMonitor {\npublic:\n  InjectedResourceMonitor(\n      const envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig&\n          config,\n      Server::Configuration::ResourceMonitorFactoryContext& context);\n\n  // Server::ResourceMonitor\n  void updateResourceUsage(Server::ResourceMonitor::Callbacks& callbacks) override;\n\nprotected:\n  virtual void onFileChanged();\n\nprivate:\n  const std::string filename_;\n  bool file_changed_;\n  Filesystem::WatcherPtr watcher_;\n  absl::optional<double> pressure_;\n  absl::optional<EnvoyException> error_;\n  Api::Api& api_;\n};\n\n} // namespace InjectedResourceMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/resource_monitors/well_known_names.h",
    "content": "#pragma once\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\n\n/**\n * Well-known resource monitor names.\n * NOTE: New resource monitors should use the well known name: envoy.resource_monitors.name.\n */\nclass ResourceMonitorNameValues {\npublic:\n  // Heap monitor with statically configured max.\n  const std::string FixedHeap = \"envoy.resource_monitors.fixed_heap\";\n\n  // File-based injected resource monitor.\n  const std::string InjectedResource = \"envoy.resource_monitors.injected_resource\";\n};\n\nusing ResourceMonitorNames = ConstSingleton<ResourceMonitorNameValues>;\n\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/omit_canary_hosts/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"omit_canary_hosts_predicate_lib\",\n    hdrs = [\"omit_canary_hosts.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":omit_canary_hosts_predicate_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/upstream:retry_interface\",\n        \"@envoy_api//envoy/config/retry/omit_canary_hosts/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/retry/host/omit_canary_hosts/config.cc",
    "content": "#include \"extensions/retry/host/omit_canary_hosts/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nREGISTER_FACTORY(OmitCanaryHostsRetryPredicateFactory, Upstream::RetryHostPredicateFactory);\n\n}\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/omit_canary_hosts/config.h",
    "content": "#include \"envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.pb.validate.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"extensions/retry/host/omit_canary_hosts/omit_canary_hosts.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nclass OmitCanaryHostsRetryPredicateFactory : public Upstream::RetryHostPredicateFactory {\n\npublic:\n  Upstream::RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&,\n                                                            uint32_t) override {\n    return std::make_shared<OmitCanaryHostsRetryPredicate>();\n  }\n\n  std::string name() const override { return \"envoy.retry_host_predicates.omit_canary_hosts\"; }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::config::retry::omit_canary_hosts::v2::OmitCanaryHostsPredicate>();\n  }\n};\n\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/omit_canary_hosts/omit_canary_hosts.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nclass OmitCanaryHostsRetryPredicate : public Upstream::RetryHostPredicate {\npublic:\n  bool shouldSelectAnotherHost(const Upstream::Host& candidate_host) override {\n    return candidate_host.canary();\n  }\n\n  void onHostAttempted(Upstream::HostDescriptionConstSharedPtr) override {}\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/omit_host_metadata/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"omit_host_metadata_predicate_lib\",\n    srcs = [\"omit_host_metadata.cc\"],\n    hdrs = [\"omit_host_metadata.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n        \"//source/common/config:well_known_names\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":omit_host_metadata_predicate_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/upstream:retry_interface\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/extensions/retry/host/omit_host_metadata/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/retry/host/omit_host_metadata/config.cc",
    "content": "#include \"extensions/retry/host/omit_host_metadata/config.h\"\n\n#include \"envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"common/protobuf/message_validator_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nUpstream::RetryHostPredicateSharedPtr\nOmitHostsRetryPredicateFactory::createHostPredicate(const Protobuf::Message& config, uint32_t) {\n  return std::make_shared<OmitHostsRetryPredicate>(\n      MessageUtil::downcastAndValidate<\n          const envoy::extensions::retry::host::omit_host_metadata::v3::OmitHostMetadataConfig&>(\n          config, ProtobufMessage::getStrictValidationVisitor())\n          .metadata_match());\n}\n\nREGISTER_FACTORY(OmitHostsRetryPredicateFactory, Upstream::RetryHostPredicateFactory);\n\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/omit_host_metadata/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.pb.h\"\n#include \"envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.pb.validate.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"extensions/retry/host/omit_host_metadata/omit_host_metadata.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nclass OmitHostsRetryPredicateFactory : public Upstream::RetryHostPredicateFactory {\npublic:\n  Upstream::RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message& config,\n                                                            uint32_t retry_count) override;\n\n  std::string name() const override { return \"envoy.retry_host_predicates.omit_host_metadata\"; }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return ProtobufTypes::MessagePtr(\n        new envoy::extensions::retry::host::omit_host_metadata::v3::OmitHostMetadataConfig());\n  }\n};\n\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc",
    "content": "#include \"extensions/retry/host/omit_host_metadata/omit_host_metadata.h\"\n\n#include \"common/config/metadata.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nbool OmitHostsRetryPredicate::shouldSelectAnotherHost(const Upstream::Host& host) {\n  // Note: The additional check to verify if the labelSet is empty is performed since\n  // metadataLabelMatch returns true in case of an empty labelSet. However, for an empty labelSet,\n  // i.e. if there is no matching criteria defined, this method should return false.\n  return !labelSet_.empty() && Envoy::Config::Metadata::metadataLabelMatch(\n                                   labelSet_, host.metadata().get(),\n                                   Envoy::Config::MetadataFilters::get().ENVOY_LB, true);\n}\n\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/omit_host_metadata/omit_host_metadata.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/config/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nclass OmitHostsRetryPredicate : public Upstream::RetryHostPredicate {\npublic:\n  explicit OmitHostsRetryPredicate(const envoy::config::core::v3::Metadata& metadata_match_criteria)\n      : metadata_match_criteria_(metadata_match_criteria) {\n    const auto& filter_it = metadata_match_criteria_.filter_metadata().find(\n        Envoy::Config::MetadataFilters::get().ENVOY_LB);\n    if (filter_it != metadata_match_criteria_.filter_metadata().end()) {\n      for (auto const& it : filter_it->second.fields()) {\n        labelSet_.push_back(it);\n      }\n    }\n  }\n\n  bool shouldSelectAnotherHost(const Upstream::Host& host) override;\n\n  void onHostAttempted(Upstream::HostDescriptionConstSharedPtr) override {}\n\nprivate:\n  const envoy::config::core::v3::Metadata metadata_match_criteria_;\n  std::vector<std::pair<std::string, ProtobufWkt::Value>> labelSet_;\n};\n\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/previous_hosts/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"previous_hosts_predicate_lib\",\n    hdrs = [\"previous_hosts.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":previous_hosts_predicate_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/upstream:retry_interface\",\n        \"@envoy_api//envoy/config/retry/previous_hosts/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/retry/host/previous_hosts/config.cc",
    "content": "#include \"extensions/retry/host/previous_hosts/config.h\"\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nREGISTER_FACTORY(PreviousHostsRetryPredicateFactory, Upstream::RetryHostPredicateFactory);\n\n}\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/previous_hosts/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/retry/previous_hosts/v2/previous_hosts.pb.validate.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"extensions/retry/host/previous_hosts/previous_hosts.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\n\nclass PreviousHostsRetryPredicateFactory : public Upstream::RetryHostPredicateFactory {\npublic:\n  Upstream::RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&,\n                                                            uint32_t retry_count) override {\n    return std::make_shared<PreviousHostsRetryPredicate>(retry_count);\n  }\n\n  std::string name() const override { return \"envoy.retry_host_predicates.previous_hosts\"; }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<envoy::config::retry::previous_hosts::v2::PreviousHostsPredicate>();\n  }\n};\n\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/host/previous_hosts/previous_hosts.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n#include \"envoy/upstream/upstream.h\"\n\nnamespace Envoy {\nclass PreviousHostsRetryPredicate : public Upstream::RetryHostPredicate {\npublic:\n  PreviousHostsRetryPredicate(uint32_t retry_count) : attempted_hosts_(retry_count) {}\n\n  bool shouldSelectAnotherHost(const Upstream::Host& candidate_host) override {\n    return std::find(attempted_hosts_.begin(), attempted_hosts_.end(), &candidate_host) !=\n           attempted_hosts_.end();\n  }\n  void onHostAttempted(Upstream::HostDescriptionConstSharedPtr attempted_host) override {\n    attempted_hosts_.emplace_back(attempted_host.get());\n  }\n\nprivate:\n  std::vector<Upstream::HostDescription const*> attempted_hosts_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/priority/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/retry/priority/previous_priorities/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"previous_priorities_lib\",\n    srcs = [\"previous_priorities.cc\"],\n    hdrs = [\"previous_priorities.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n        \"//source/common/upstream:load_balancer_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":previous_priorities_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/upstream:retry_interface\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/retry/priority:well_known_names\",\n        \"@envoy_api//envoy/config/retry/previous_priorities:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/retry/priority/previous_priorities/config.cc",
    "content": "#include \"extensions/retry/priority/previous_priorities/config.h\"\n\n#include \"envoy/config/retry/previous_priorities/previous_priorities_config.pb.h\"\n#include \"envoy/config/retry/previous_priorities/previous_priorities_config.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Priority {\n\nUpstream::RetryPrioritySharedPtr PreviousPrioritiesRetryPriorityFactory::createRetryPriority(\n    const Protobuf::Message& config, ProtobufMessage::ValidationVisitor& validation_visitor,\n\n    uint32_t max_retries) {\n  return std::make_shared<PreviousPrioritiesRetryPriority>(\n      MessageUtil::downcastAndValidate<\n          const envoy::config::retry::previous_priorities::PreviousPrioritiesConfig&>(\n          config, validation_visitor)\n          .update_frequency(),\n      max_retries);\n}\n\nREGISTER_FACTORY(PreviousPrioritiesRetryPriorityFactory, Upstream::RetryPriorityFactory);\n\n} // namespace Priority\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/priority/previous_priorities/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/retry/previous_priorities/previous_priorities_config.pb.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/retry/priority/previous_priorities/previous_priorities.h\"\n#include \"extensions/retry/priority/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Priority {\n\nclass PreviousPrioritiesRetryPriorityFactory : public Upstream::RetryPriorityFactory {\npublic:\n  Upstream::RetryPrioritySharedPtr\n  createRetryPriority(const Protobuf::Message& config,\n                      ProtobufMessage::ValidationVisitor& validation_visitor,\n                      uint32_t max_retries) override;\n\n  std::string name() const override {\n    return RetryPriorityValues::get().PreviousPrioritiesRetryPriority;\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return ProtobufTypes::MessagePtr(\n        new envoy::config::retry::previous_priorities::PreviousPrioritiesConfig());\n  }\n};\n\n} // namespace Priority\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/priority/previous_priorities/previous_priorities.cc",
    "content": "#include \"extensions/retry/priority/previous_priorities/previous_priorities.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Priority {\n\nconst Upstream::HealthyAndDegradedLoad& PreviousPrioritiesRetryPriority::determinePriorityLoad(\n    const Upstream::PrioritySet& priority_set,\n    const Upstream::HealthyAndDegradedLoad& original_priority_load,\n    const PriorityMappingFunc& priority_mapping_func) {\n  // If we've not seen enough retries to modify the priority load, just\n  // return the original.\n  // If this retry should trigger an update, recalculate the priority load by excluding attempted\n  // priorities.\n  if (attempted_hosts_.size() < update_frequency_) {\n    return original_priority_load;\n  } else if (attempted_hosts_.size() % update_frequency_ == 0) {\n    if (excluded_priorities_.size() < priority_set.hostSetsPerPriority().size()) {\n      excluded_priorities_.resize(priority_set.hostSetsPerPriority().size());\n    }\n\n    for (const auto& host : attempted_hosts_) {\n      absl::optional<uint32_t> mapped_host_priority = priority_mapping_func(*host);\n      if (mapped_host_priority.has_value()) {\n        excluded_priorities_[mapped_host_priority.value()] = true;\n      }\n    }\n\n    if (!adjustForAttemptedPriorities(priority_set)) {\n      return original_priority_load;\n    }\n  }\n\n  return per_priority_load_;\n}\n\nbool PreviousPrioritiesRetryPriority::adjustForAttemptedPriorities(\n    const Upstream::PrioritySet& priority_set) {\n  for (auto& host_set : priority_set.hostSetsPerPriority()) {\n    recalculatePerPriorityState(host_set->priority(), priority_set);\n  }\n\n  std::vector<uint32_t> adjusted_per_priority_health(per_priority_health_.get().size(), 0);\n  std::vector<uint32_t> adjusted_per_priority_degraded(per_priority_degraded_.get().size(), 0);\n  auto total_availability =\n      adjustedAvailability(adjusted_per_priority_health, adjusted_per_priority_degraded);\n\n  // If there are no available priorities left, we reset the attempted priorities and recompute the\n  // adjusted availability.\n  // This allows us to fall back to the unmodified priority load when we run out of priorities\n  // instead of failing to route requests.\n  if (total_availability == 0) {\n    for (auto excluded_priority : excluded_priorities_) {\n      excluded_priority = false;\n    }\n    attempted_hosts_.clear();\n    total_availability =\n        adjustedAvailability(adjusted_per_priority_health, adjusted_per_priority_degraded);\n  }\n\n  // If total availability is still zero at this point, it must mean that all clusters are\n  // completely unavailable. If so, fall back to using the original priority loads. This maintains\n  // whatever handling the default LB uses when all priorities are unavailable.\n  if (total_availability == 0) {\n    return false;\n  }\n\n  std::fill(per_priority_load_.healthy_priority_load_.get().begin(),\n            per_priority_load_.healthy_priority_load_.get().end(), 0);\n  std::fill(per_priority_load_.degraded_priority_load_.get().begin(),\n            per_priority_load_.degraded_priority_load_.get().end(), 0);\n\n  // TODO(snowp): This code is basically distributeLoad from load_balancer_impl.cc, should probably\n  // reuse that.\n\n  // We then adjust the load by rebalancing priorities with the adjusted availability values.\n  size_t total_load = 100;\n  // The outer loop is used to eliminate rounding errors: any remaining load will be assigned to the\n  // first availability priority.\n  while (total_load != 0) {\n    for (size_t i = 0; i < adjusted_per_priority_health.size(); ++i) {\n      // Now assign as much load as possible to the high priority levels and cease assigning load\n      // when total_load runs out.\n      const auto delta = std::min<uint32_t>(total_load, adjusted_per_priority_health[i] * 100 /\n                                                            total_availability);\n      per_priority_load_.healthy_priority_load_.get()[i] += delta;\n      total_load -= delta;\n    }\n\n    for (size_t i = 0; i < adjusted_per_priority_degraded.size(); ++i) {\n      // Now assign as much load as possible to the high priority levels and cease assigning load\n      // when total_load runs out.\n      const auto delta = std::min<uint32_t>(total_load, adjusted_per_priority_degraded[i] * 100 /\n                                                            total_availability);\n      per_priority_load_.degraded_priority_load_.get()[i] += delta;\n      total_load -= delta;\n    }\n  }\n\n  return true;\n}\n\nuint32_t PreviousPrioritiesRetryPriority::adjustedAvailability(\n    std::vector<uint32_t>& adjusted_per_priority_health,\n    std::vector<uint32_t>& adjusted_per_priority_degraded) const {\n  // Create an adjusted view of the priorities, where attempted priorities are given a zero load.\n  // Create an adjusted health view of the priorities, where attempted priorities are\n  // given a zero weight.\n  uint32_t total_availability = 0;\n\n  ASSERT(per_priority_health_.get().size() == per_priority_degraded_.get().size());\n\n  for (size_t i = 0; i < per_priority_health_.get().size(); ++i) {\n    if (!excluded_priorities_[i]) {\n      adjusted_per_priority_health[i] = per_priority_health_.get()[i];\n      adjusted_per_priority_degraded[i] = per_priority_degraded_.get()[i];\n      total_availability += per_priority_health_.get()[i];\n      total_availability += per_priority_degraded_.get()[i];\n    } else {\n      adjusted_per_priority_health[i] = 0;\n      adjusted_per_priority_degraded[i] = 0;\n    }\n  }\n\n  return std::min(total_availability, 100u);\n}\n\n} // namespace Priority\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/priority/previous_priorities/previous_priorities.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n\n#include \"common/upstream/load_balancer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Priority {\n\nclass PreviousPrioritiesRetryPriority : public Upstream::RetryPriority {\npublic:\n  PreviousPrioritiesRetryPriority(uint32_t update_frequency, uint32_t max_retries)\n      : update_frequency_(update_frequency) {\n    attempted_hosts_.reserve(max_retries);\n  }\n\n  const Upstream::HealthyAndDegradedLoad&\n  determinePriorityLoad(const Upstream::PrioritySet& priority_set,\n                        const Upstream::HealthyAndDegradedLoad& original_priority_load,\n                        const PriorityMappingFunc& priority_mapping_func) override;\n\n  void onHostAttempted(Upstream::HostDescriptionConstSharedPtr attempted_host) override {\n    attempted_hosts_.emplace_back(attempted_host);\n  }\n\nprivate:\n  void recalculatePerPriorityState(uint32_t priority, const Upstream::PrioritySet& priority_set) {\n    // Recalculate health and priority the same way the load balancer does it.\n    Upstream::LoadBalancerBase::recalculatePerPriorityState(\n        priority, priority_set, per_priority_load_, per_priority_health_, per_priority_degraded_);\n  }\n\n  uint32_t adjustedAvailability(std::vector<uint32_t>& per_priority_health,\n                                std::vector<uint32_t>& per_priority_degraded) const;\n\n  // Distributes priority load between priorities that should be considered after\n  // excluding attempted priorities.\n  // @return whether the adjustment was successful. If not, the original priority load should be\n  // used.\n  bool adjustForAttemptedPriorities(const Upstream::PrioritySet& priority_set);\n\n  const uint32_t update_frequency_;\n  std::vector<Upstream::HostDescriptionConstSharedPtr> attempted_hosts_;\n  std::vector<bool> excluded_priorities_;\n  Upstream::HealthyAndDegradedLoad per_priority_load_;\n  Upstream::HealthyAvailability per_priority_health_;\n  Upstream::DegradedAvailability per_priority_degraded_;\n};\n\n} // namespace Priority\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/retry/priority/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Priority {\n\n/**\n * Well-known retry priority load names.\n */\nclass RetryPriorityNameValues {\npublic:\n  // Previous priority retry priority. Excludes previously attempted priorities during retries.\n  const std::string PreviousPrioritiesRetryPriority = \"envoy.retry_priorities.previous_priorities\";\n};\n\nusing RetryPriorityValues = ConstSingleton<RetryPriorityNameValues>;\n\n} // namespace Priority\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/stat_sinks/common/statsd/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"statsd_lib\",\n    srcs = [\"statsd.cc\"],\n    hdrs = [\"statsd.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/network:address_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/stat_sinks/common/statsd/statsd.cc",
    "content": "#include \"extensions/stat_sinks/common/statsd/statsd.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Common {\nnamespace Statsd {\n\nUdpStatsdSink::WriterImpl::WriterImpl(UdpStatsdSink& parent)\n    : parent_(parent), io_handle_(Network::ioHandleForAddr(Network::Socket::Type::Datagram,\n                                                           parent_.server_address_)) {}\n\nvoid UdpStatsdSink::WriterImpl::write(const std::string& message) {\n  // TODO(mattklein123): We can avoid this const_cast pattern by having a constant variant of\n  // RawSlice. This can be fixed elsewhere as well.\n  Buffer::RawSlice slice{const_cast<char*>(message.c_str()), message.size()};\n  Network::Utility::writeToSocket(*io_handle_, &slice, 1, nullptr, *parent_.server_address_);\n}\n\nvoid UdpStatsdSink::WriterImpl::writeBuffer(Buffer::Instance& data) {\n  Network::Utility::writeToSocket(*io_handle_, data, nullptr, *parent_.server_address_);\n}\n\nUdpStatsdSink::UdpStatsdSink(ThreadLocal::SlotAllocator& tls,\n                             Network::Address::InstanceConstSharedPtr address, const bool use_tag,\n                             const std::string& prefix, absl::optional<uint64_t> buffer_size)\n    : tls_(tls.allocateSlot()), server_address_(std::move(address)), use_tag_(use_tag),\n      prefix_(prefix.empty() ? Statsd::getDefaultPrefix() : prefix),\n      buffer_size_(buffer_size.value_or(0)) {\n  tls_->set([this](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<WriterImpl>(*this);\n  });\n}\n\nvoid UdpStatsdSink::flush(Stats::MetricSnapshot& snapshot) {\n  Writer& writer = tls_->getTyped<Writer>();\n  Buffer::OwnedImpl buffer;\n\n  for (const auto& counter : snapshot.counters()) {\n    if (counter.counter_.get().used()) {\n      const std::string counter_str =\n          absl::StrCat(prefix_, \".\", getName(counter.counter_.get()), \":\", counter.delta_, \"|c\",\n                       buildTagStr(counter.counter_.get().tags()));\n      writeBuffer(buffer, writer, counter_str);\n    }\n  }\n\n  for (const auto& gauge : snapshot.gauges()) {\n    if (gauge.get().used()) {\n      const std::string gauge_str =\n          absl::StrCat(prefix_, \".\", getName(gauge.get()), \":\", gauge.get().value(), \"|g\",\n                       buildTagStr(gauge.get().tags()));\n      writeBuffer(buffer, writer, gauge_str);\n    }\n  }\n\n  flushBuffer(buffer, writer);\n  // TODO(efimki): Add support of text readouts stats.\n}\n\nvoid UdpStatsdSink::writeBuffer(Buffer::OwnedImpl& buffer, Writer& writer,\n                                const std::string& statsd_metric) const {\n  if (statsd_metric.length() >= buffer_size_) {\n    // Our statsd_metric is too large to fit into the buffer, skip buffering and write directly\n    writer.write(statsd_metric);\n  } else {\n    if ((buffer.length() + statsd_metric.length() + 1) > buffer_size_) {\n      // If we add the new statsd_metric, we'll overflow our buffer. Flush the buffer to make\n      // room for the new statsd_metric.\n      flushBuffer(buffer, writer);\n    } else if (buffer.length() > 0) {\n      // We have room and have metrics already in the buffer, add a newline to separate\n      // metric entries.\n      buffer.add(\"\\n\");\n    }\n    buffer.add(statsd_metric);\n  }\n}\n\nvoid UdpStatsdSink::flushBuffer(Buffer::OwnedImpl& buffer, Writer& writer) const {\n  if (buffer.length() == 0) {\n    return;\n  }\n  writer.writeBuffer(buffer);\n  buffer.drain(buffer.length());\n}\n\nvoid UdpStatsdSink::onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) {\n  // For statsd histograms are all timers in milliseconds, Envoy histograms are however\n  // not necessarily timers in milliseconds, for Envoy histograms suffixed with their corresponding\n  // SI unit symbol this is acceptable, but for histograms without a suffix, especially those which\n  // are timers but record in units other than milliseconds, it may make sense to scale the value to\n  // milliseconds here and potentially suffix the names accordingly (minus the pre-existing ones for\n  // backwards compatibility).\n  const std::string message(absl::StrCat(prefix_, \".\", getName(histogram), \":\",\n                                         std::chrono::milliseconds(value).count(), \"|ms\",\n                                         buildTagStr(histogram.tags())));\n  tls_->getTyped<Writer>().write(message);\n}\n\nconst std::string UdpStatsdSink::getName(const Stats::Metric& metric) const {\n  if (use_tag_) {\n    return metric.tagExtractedName();\n  } else {\n    return metric.name();\n  }\n}\n\nconst std::string UdpStatsdSink::buildTagStr(const std::vector<Stats::Tag>& tags) const {\n  if (!use_tag_ || tags.empty()) {\n    return \"\";\n  }\n\n  std::vector<std::string> tag_strings;\n  tag_strings.reserve(tags.size());\n  for (const Stats::Tag& tag : tags) {\n    tag_strings.emplace_back(tag.name_ + \":\" + tag.value_);\n  }\n  return \"|#\" + absl::StrJoin(tag_strings, \",\");\n}\n\nTcpStatsdSink::TcpStatsdSink(const LocalInfo::LocalInfo& local_info,\n                             const std::string& cluster_name, ThreadLocal::SlotAllocator& tls,\n                             Upstream::ClusterManager& cluster_manager, Stats::Scope& scope,\n                             const std::string& prefix)\n    : prefix_(prefix.empty() ? Statsd::getDefaultPrefix() : prefix), tls_(tls.allocateSlot()),\n      cluster_manager_(cluster_manager),\n      cx_overflow_stat_(scope.counterFromStatName(\n          Stats::StatNameManagedStorage(\"statsd.cx_overflow\", scope.symbolTable()).statName())) {\n  Config::Utility::checkClusterAndLocalInfo(\"tcp statsd\", cluster_name, cluster_manager,\n                                            local_info);\n  cluster_info_ = cluster_manager.get(cluster_name)->info();\n  tls_->set([this](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<TlsSink>(*this, dispatcher);\n  });\n}\n\nvoid TcpStatsdSink::flush(Stats::MetricSnapshot& snapshot) {\n  TlsSink& tls_sink = tls_->getTyped<TlsSink>();\n  tls_sink.beginFlush(true);\n  for (const auto& counter : snapshot.counters()) {\n    if (counter.counter_.get().used()) {\n      tls_sink.flushCounter(counter.counter_.get().name(), counter.delta_);\n    }\n  }\n\n  for (const auto& gauge : snapshot.gauges()) {\n    if (gauge.get().used()) {\n      tls_sink.flushGauge(gauge.get().name(), gauge.get().value());\n    }\n  }\n  // TODO(efimki): Add support of text readouts stats.\n  tls_sink.endFlush(true);\n}\n\nTcpStatsdSink::TlsSink::TlsSink(TcpStatsdSink& parent, Event::Dispatcher& dispatcher)\n    : parent_(parent), dispatcher_(dispatcher) {}\n\nTcpStatsdSink::TlsSink::~TlsSink() {\n  if (connection_) {\n    connection_->close(Network::ConnectionCloseType::NoFlush);\n  }\n}\n\nvoid TcpStatsdSink::TlsSink::beginFlush(bool expect_empty_buffer) {\n  ASSERT(!expect_empty_buffer || buffer_.length() == 0);\n  ASSERT(current_slice_mem_ == nullptr);\n\n  uint64_t num_iovecs = buffer_.reserve(FLUSH_SLICE_SIZE_BYTES, &current_buffer_slice_, 1);\n  ASSERT(num_iovecs == 1);\n\n  ASSERT(current_buffer_slice_.len_ >= FLUSH_SLICE_SIZE_BYTES);\n  current_slice_mem_ = reinterpret_cast<char*>(current_buffer_slice_.mem_);\n}\n\nvoid TcpStatsdSink::TlsSink::commonFlush(const std::string& name, uint64_t value, char stat_type) {\n  ASSERT(current_slice_mem_ != nullptr);\n  // 36 > 1 (\".\" after prefix) + 1 (\":\" after name) + 4 (postfix chars, e.g., \"|ms\\n\") + 30 for\n  // number (bigger than it will ever be)\n  const uint32_t max_size = name.size() + parent_.getPrefix().size() + 36;\n  if (current_buffer_slice_.len_ - usedBuffer() < max_size) {\n    endFlush(false);\n    beginFlush(false);\n  }\n\n  // Produces something like \"envoy.{}:{}|c\\n\"\n  // This written this way for maximum perf since with a large number of stats and at a high flush\n  // rate this can become expensive.\n  const char* snapped_current = current_slice_mem_;\n  memcpy(current_slice_mem_, parent_.getPrefix().c_str(), parent_.getPrefix().size());\n  current_slice_mem_ += parent_.getPrefix().size();\n  *current_slice_mem_++ = '.';\n  memcpy(current_slice_mem_, name.c_str(), name.size());\n  current_slice_mem_ += name.size();\n  *current_slice_mem_++ = ':';\n  current_slice_mem_ += StringUtil::itoa(current_slice_mem_, 30, value);\n  *current_slice_mem_++ = '|';\n  *current_slice_mem_++ = stat_type;\n  *current_slice_mem_++ = '\\n';\n\n  ASSERT(static_cast<uint64_t>(current_slice_mem_ - snapped_current) < max_size);\n}\n\nvoid TcpStatsdSink::TlsSink::flushCounter(const std::string& name, uint64_t delta) {\n  commonFlush(name, delta, 'c');\n}\n\nvoid TcpStatsdSink::TlsSink::flushGauge(const std::string& name, uint64_t value) {\n  commonFlush(name, value, 'g');\n}\n\nvoid TcpStatsdSink::TlsSink::endFlush(bool do_write) {\n  ASSERT(current_slice_mem_ != nullptr);\n  current_buffer_slice_.len_ = usedBuffer();\n  buffer_.commit(&current_buffer_slice_, 1);\n  current_slice_mem_ = nullptr;\n  if (do_write) {\n    write(buffer_);\n    ASSERT(buffer_.length() == 0);\n  }\n}\n\nvoid TcpStatsdSink::TlsSink::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::LocalClose ||\n      event == Network::ConnectionEvent::RemoteClose) {\n    dispatcher_.deferredDelete(std::move(connection_));\n  }\n}\n\nvoid TcpStatsdSink::TlsSink::onTimespanComplete(const std::string& name,\n                                                std::chrono::milliseconds ms) {\n  // Ultimately it would be nice to perf optimize this path also, but it's not very frequent. It's\n  // also currently not possible that this interleaves with any counter/gauge flushing.\n  // See the comment at UdpStatsdSink::onHistogramComplete with respect to unit suffixes.\n  ASSERT(current_slice_mem_ == nullptr);\n  Buffer::OwnedImpl buffer(\n      fmt::format(\"{}.{}:{}|ms\\n\", parent_.getPrefix().c_str(), name, ms.count()));\n  write(buffer);\n}\n\nvoid TcpStatsdSink::TlsSink::write(Buffer::Instance& buffer) {\n  // Guard against the stats connection backing up. In this case we probably have no visibility\n  // into what is going on externally, but we also increment a stat that should be viewable\n  // locally.\n  // NOTE: In the current implementation, we write most stats on the main thread, but timers\n  //       get emitted on the worker threads. Since this is using global buffered data, it's\n  //       possible that we are about to kill the connection that is not actually backed up.\n  //       This is essentially a panic state, so it's not worth keeping per thread buffer stats,\n  //       since if we stay over, the other threads will eventually kill their connections too.\n  // TODO(mattklein123): The use of the stat is somewhat of a hack, and should be replaced with\n  // real flow control callbacks once they are available.\n  if (parent_.cluster_info_->stats().upstream_cx_tx_bytes_buffered_.value() >\n      MAX_BUFFERED_STATS_BYTES) {\n    if (connection_) {\n      connection_->close(Network::ConnectionCloseType::NoFlush);\n    }\n    parent_.cx_overflow_stat_.inc();\n    buffer.drain(buffer.length());\n    return;\n  }\n\n  if (!connection_) {\n    Upstream::Host::CreateConnectionData info =\n        parent_.cluster_manager_.tcpConnForCluster(parent_.cluster_info_->name(), nullptr);\n    if (!info.connection_) {\n      buffer.drain(buffer.length());\n      return;\n    }\n\n    connection_ = std::move(info.connection_);\n    connection_->addConnectionCallbacks(*this);\n    connection_->setConnectionStats({parent_.cluster_info_->stats().upstream_cx_rx_bytes_total_,\n                                     parent_.cluster_info_->stats().upstream_cx_rx_bytes_buffered_,\n                                     parent_.cluster_info_->stats().upstream_cx_tx_bytes_total_,\n                                     parent_.cluster_info_->stats().upstream_cx_tx_bytes_buffered_,\n                                     &parent_.cluster_info_->stats().bind_errors_, nullptr});\n    connection_->connect();\n  }\n\n  connection_->write(buffer, false);\n}\n\nuint64_t TcpStatsdSink::TlsSink::usedBuffer() const {\n  ASSERT(current_slice_mem_ != nullptr);\n  return current_slice_mem_ - reinterpret_cast<char*>(current_buffer_slice_.mem_);\n}\n\n} // namespace Statsd\n} // namespace Common\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/common/statsd/statsd.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/sink.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/tag.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/macros.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Common {\nnamespace Statsd {\n\nstatic const std::string& getDefaultPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, \"envoy\"); }\n\n/**\n * Implementation of Sink that writes to a UDP statsd address.\n */\nclass UdpStatsdSink : public Stats::Sink {\npublic:\n  /**\n   * Base interface for writing UDP datagrams.\n   */\n  class Writer : public ThreadLocal::ThreadLocalObject {\n  public:\n    virtual void write(const std::string& message) PURE;\n    virtual void writeBuffer(Buffer::Instance& data) PURE;\n  };\n\n  UdpStatsdSink(ThreadLocal::SlotAllocator& tls, Network::Address::InstanceConstSharedPtr address,\n                const bool use_tag, const std::string& prefix = getDefaultPrefix(),\n                absl::optional<uint64_t> buffer_size = absl::nullopt);\n  // For testing.\n  UdpStatsdSink(ThreadLocal::SlotAllocator& tls, const std::shared_ptr<Writer>& writer,\n                const bool use_tag, const std::string& prefix = getDefaultPrefix(),\n                absl::optional<uint64_t> buffer_size = absl::nullopt)\n      : tls_(tls.allocateSlot()), use_tag_(use_tag),\n        prefix_(prefix.empty() ? getDefaultPrefix() : prefix),\n        buffer_size_(buffer_size.value_or(0)) {\n    tls_->set(\n        [writer](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return writer; });\n  }\n\n  // Stats::Sink\n  void flush(Stats::MetricSnapshot& snapshot) override;\n  void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override;\n\n  bool getUseTagForTest() { return use_tag_; }\n  uint64_t getBufferSizeForTest() { return buffer_size_; }\n  const std::string& getPrefix() { return prefix_; }\n\nprivate:\n  /**\n   * This is a simple UDP localhost writer for statsd messages.\n   */\n  class WriterImpl : public Writer {\n  public:\n    WriterImpl(UdpStatsdSink& parent);\n\n    // Writer\n    void write(const std::string& message) override;\n    void writeBuffer(Buffer::Instance& data) override;\n\n  private:\n    UdpStatsdSink& parent_;\n    const Network::IoHandlePtr io_handle_;\n  };\n\n  void flushBuffer(Buffer::OwnedImpl& buffer, Writer& writer) const;\n  void writeBuffer(Buffer::OwnedImpl& buffer, Writer& writer, const std::string& data) const;\n\n  const std::string getName(const Stats::Metric& metric) const;\n  const std::string buildTagStr(const std::vector<Stats::Tag>& tags) const;\n\n  const ThreadLocal::SlotPtr tls_;\n  const Network::Address::InstanceConstSharedPtr server_address_;\n  const bool use_tag_;\n  // Prefix for all flushed stats.\n  const std::string prefix_;\n  const uint64_t buffer_size_;\n};\n\n/**\n * Per thread implementation of a TCP stats flusher for statsd.\n */\nclass TcpStatsdSink : public Stats::Sink {\npublic:\n  TcpStatsdSink(const LocalInfo::LocalInfo& local_info, const std::string& cluster_name,\n                ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cluster_manager,\n                Stats::Scope& scope, const std::string& prefix = getDefaultPrefix());\n\n  // Stats::Sink\n  void flush(Stats::MetricSnapshot& snapshot) override;\n  void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override {\n    // For statsd histograms are all timers.\n    tls_->getTyped<TlsSink>().onTimespanComplete(histogram.name(),\n                                                 std::chrono::milliseconds(value));\n  }\n\n  const std::string& getPrefix() { return prefix_; }\n\nprivate:\n  struct TlsSink : public ThreadLocal::ThreadLocalObject, public Network::ConnectionCallbacks {\n    TlsSink(TcpStatsdSink& parent, Event::Dispatcher& dispatcher);\n    ~TlsSink() override;\n\n    void beginFlush(bool expect_empty_buffer);\n    void commonFlush(const std::string& name, uint64_t value, char stat_type);\n    void flushCounter(const std::string& name, uint64_t delta);\n    void flushGauge(const std::string& name, uint64_t value);\n    void endFlush(bool do_write);\n    void onTimespanComplete(const std::string& name, std::chrono::milliseconds ms);\n    uint64_t usedBuffer() const;\n    void write(Buffer::Instance& buffer);\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    TcpStatsdSink& parent_;\n    Event::Dispatcher& dispatcher_;\n    Network::ClientConnectionPtr connection_;\n    Buffer::OwnedImpl buffer_;\n    Buffer::RawSlice current_buffer_slice_;\n    char* current_slice_mem_{};\n  };\n\n  // Somewhat arbitrary 16MiB limit for buffered stats.\n  static constexpr uint32_t MAX_BUFFERED_STATS_BYTES = (1024 * 1024 * 16);\n\n  // 16KiB intermediate buffer for flushing.\n  static constexpr uint32_t FLUSH_SLICE_SIZE_BYTES = (1024 * 16);\n\n  // Prefix for all flushed stats.\n  const std::string prefix_;\n\n  Upstream::ClusterInfoConstSharedPtr cluster_info_;\n  ThreadLocal::SlotPtr tls_;\n  Upstream::ClusterManager& cluster_manager_;\n  Stats::Counter& cx_overflow_stat_;\n};\n\n} // namespace Statsd\n} // namespace Common\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/dog_statsd/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Stats sink for the DataDog (https://www.datadoghq.com/) variant of the statsd protocol\n# (https://docs.datadoghq.com/developers/dogstatsd/).\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/extensions/stat_sinks:well_known_names\",\n        \"//source/extensions/stat_sinks/common/statsd:statsd_lib\",\n        \"//source/server:configuration_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/stat_sinks/dog_statsd/config.cc",
    "content": "#include \"extensions/stat_sinks/dog_statsd/config.h\"\n\n#include <memory>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/network/resolver_impl.h\"\n\n#include \"extensions/stat_sinks/common/statsd/statsd.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace DogStatsd {\n\nStats::SinkPtr\nDogStatsdSinkFactory::createStatsSink(const Protobuf::Message& config,\n                                      Server::Configuration::ServerFactoryContext& server) {\n  const auto& sink_config =\n      MessageUtil::downcastAndValidate<const envoy::config::metrics::v3::DogStatsdSink&>(\n          config, server.messageValidationContext().staticValidationVisitor());\n  Network::Address::InstanceConstSharedPtr address =\n      Network::Address::resolveProtoAddress(sink_config.address());\n  ENVOY_LOG(debug, \"dog_statsd UDP ip address: {}\", address->asString());\n  absl::optional<uint64_t> max_bytes;\n  if (sink_config.has_max_bytes_per_datagram()) {\n    max_bytes = sink_config.max_bytes_per_datagram().value();\n  }\n  return std::make_unique<Common::Statsd::UdpStatsdSink>(server.threadLocal(), std::move(address),\n                                                         true, sink_config.prefix(), max_bytes);\n}\n\nProtobufTypes::MessagePtr DogStatsdSinkFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::metrics::v3::DogStatsdSink>();\n}\n\nstd::string DogStatsdSinkFactory::name() const { return StatsSinkNames::get().DogStatsd; }\n\n/**\n * Static registration for the this sink factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(DogStatsdSinkFactory, Server::Configuration::StatsSinkFactory){\"envoy.dog_statsd\"};\n\n} // namespace DogStatsd\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/dog_statsd/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/instance.h\"\n\n#include \"server/configuration_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace DogStatsd {\n\n/**\n * Config registration for the DogStatsD compatible statsd sink. @see StatsSinkFactory.\n */\nclass DogStatsdSinkFactory : Logger::Loggable<Logger::Id::config>,\n                             public Server::Configuration::StatsSinkFactory {\npublic:\n  Stats::SinkPtr createStatsSink(const Protobuf::Message& config,\n                                 Server::Configuration::ServerFactoryContext& server) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n};\n\n} // namespace DogStatsd\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/hystrix/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Stats sink for the basic version of the hystrix protocol (https://github.com/b/hystrix_spec).\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    deps = [\n        \":hystrix_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/extensions/stat_sinks:well_known_names\",\n        \"//source/server:configuration_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hystrix_lib\",\n    srcs = [\"hystrix.cc\"],\n    hdrs = [\"hystrix.h\"],\n    deps = [\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/stat_sinks/hystrix/config.cc",
    "content": "#include \"extensions/stat_sinks/hystrix/config.h\"\n\n#include <memory>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/network/resolver_impl.h\"\n\n#include \"extensions/stat_sinks/hystrix/hystrix.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Hystrix {\n\nStats::SinkPtr\nHystrixSinkFactory::createStatsSink(const Protobuf::Message& config,\n                                    Server::Configuration::ServerFactoryContext& server) {\n  const auto& hystrix_sink =\n      MessageUtil::downcastAndValidate<const envoy::config::metrics::v3::HystrixSink&>(\n          config, server.messageValidationContext().staticValidationVisitor());\n  return std::make_unique<Hystrix::HystrixSink>(server, hystrix_sink.num_buckets());\n}\n\nProtobufTypes::MessagePtr HystrixSinkFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::metrics::v3::HystrixSink>();\n}\n\nstd::string HystrixSinkFactory::name() const { return StatsSinkNames::get().Hystrix; }\n\n/**\n * Static registration for the statsd sink factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(HystrixSinkFactory, Server::Configuration::StatsSinkFactory);\n\n} // namespace Hystrix\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/hystrix/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/instance.h\"\n\n#include \"server/configuration_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Hystrix {\n\nclass HystrixSinkFactory : Logger::Loggable<Logger::Id::config>,\n                           public Server::Configuration::StatsSinkFactory {\npublic:\n  // StatsSinkFactory\n  Stats::SinkPtr createStatsSink(const Protobuf::Message& config,\n                                 Server::Configuration::ServerFactoryContext& server) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n};\n\n} // namespace Hystrix\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/hystrix/hystrix.cc",
    "content": "#include \"extensions/stat_sinks/hystrix/hystrix.h\"\n\n#include <chrono>\n#include <ctime>\n#include <iostream>\n#include <sstream>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/http/headers.h\"\n#include \"common/stats/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"fmt/printf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Hystrix {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_allow_origin_handle(Http::CustomHeaders::get().AccessControlAllowOrigin);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    access_control_allow_headers_handle(Http::CustomHeaders::get().AccessControlAllowHeaders);\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::ResponseHeaders>\n    cache_control_handle(Http::CustomHeaders::get().CacheControl);\n\nconst uint64_t HystrixSink::DEFAULT_NUM_BUCKETS;\nClusterStatsCache::ClusterStatsCache(const std::string& cluster_name)\n    : cluster_name_(cluster_name) {}\n\nvoid ClusterStatsCache::printToStream(std::stringstream& out_str) {\n  const std::string cluster_name_prefix = absl::StrCat(cluster_name_, \".\");\n\n  printRollingWindow(absl::StrCat(cluster_name_prefix, \"success\"), success_, out_str);\n  printRollingWindow(absl::StrCat(cluster_name_prefix, \"errors\"), errors_, out_str);\n  printRollingWindow(absl::StrCat(cluster_name_prefix, \"timeouts\"), timeouts_, out_str);\n  printRollingWindow(absl::StrCat(cluster_name_prefix, \"rejected\"), rejected_, out_str);\n  printRollingWindow(absl::StrCat(cluster_name_prefix, \"total\"), total_, out_str);\n}\n\nvoid ClusterStatsCache::printRollingWindow(absl::string_view name, RollingWindow rolling_window,\n                                           std::stringstream& out_str) {\n  out_str << name << \" | \";\n  for (uint64_t& specific_stat_vec_itr : rolling_window) {\n    out_str << specific_stat_vec_itr << \" | \";\n  }\n  out_str << std::endl;\n}\n\nvoid HystrixSink::addHistogramToStream(const QuantileLatencyMap& latency_map, absl::string_view key,\n                                       std::stringstream& ss) {\n  // TODO: Consider if we better use join here\n  ss << \", \\\"\" << key << \"\\\": {\";\n  bool is_first = true;\n  for (const auto& element : latency_map) {\n    const std::string quantile = fmt::sprintf(\"%g\", element.first * 100);\n    HystrixSink::addDoubleToStream(quantile, element.second, ss, is_first);\n    is_first = false;\n  }\n  ss << \"}\";\n}\n\n// Add new value to rolling window, in place of oldest one.\nvoid HystrixSink::pushNewValue(RollingWindow& rolling_window, uint64_t value) {\n  if (rolling_window.empty()) {\n    rolling_window.resize(window_size_, value);\n  } else {\n    rolling_window[current_index_] = value;\n  }\n}\n\nuint64_t HystrixSink::getRollingValue(RollingWindow rolling_window) {\n\n  if (rolling_window.empty()) {\n    return 0;\n  }\n  // If the counter was reset, the result is negative\n  // better return 0, will be back to normal once one rolling window passes.\n  if (rolling_window[current_index_] < rolling_window[(current_index_ + 1) % window_size_]) {\n    return 0;\n  } else {\n    return rolling_window[current_index_] - rolling_window[(current_index_ + 1) % window_size_];\n  }\n}\n\nvoid HystrixSink::updateRollingWindowMap(const Upstream::ClusterInfo& cluster_info,\n                                         ClusterStatsCache& cluster_stats_cache) {\n  Upstream::ClusterStats& cluster_stats = cluster_info.stats();\n  Stats::Scope& cluster_stats_scope = cluster_info.statsScope();\n\n  // Combining timeouts+retries - retries are counted  as separate requests\n  // (alternative: each request including the retries counted as 1).\n  uint64_t timeouts = cluster_stats.upstream_rq_timeout_.value() +\n                      cluster_stats.upstream_rq_per_try_timeout_.value();\n\n  pushNewValue(cluster_stats_cache.timeouts_, timeouts);\n\n  // Combining errors+retry errors - retries are counted as separate requests\n  // (alternative: each request including the retries counted as 1)\n  // since timeouts are 504 (or 408), deduce them from here (\"-\" sign).\n  // Timeout retries were not counted here anyway.\n  uint64_t errors = cluster_stats_scope.counterFromStatName(upstream_rq_5xx_).value() +\n                    cluster_stats_scope.counterFromStatName(retry_upstream_rq_5xx_).value() +\n                    cluster_stats_scope.counterFromStatName(upstream_rq_4xx_).value() +\n                    cluster_stats_scope.counterFromStatName(retry_upstream_rq_4xx_).value() -\n                    cluster_stats.upstream_rq_timeout_.value();\n\n  pushNewValue(cluster_stats_cache.errors_, errors);\n\n  uint64_t success = cluster_stats_scope.counterFromStatName(upstream_rq_2xx_).value();\n  pushNewValue(cluster_stats_cache.success_, success);\n\n  uint64_t rejected = cluster_stats.upstream_rq_pending_overflow_.value();\n  pushNewValue(cluster_stats_cache.rejected_, rejected);\n\n  // should not take from upstream_rq_total since it is updated before its components,\n  // leading to wrong results such as error percentage higher than 100%\n  uint64_t total = errors + timeouts + success + rejected;\n  pushNewValue(cluster_stats_cache.total_, total);\n\n  ENVOY_LOG(trace, \"{}\", printRollingWindows());\n}\n\nvoid HystrixSink::resetRollingWindow() { cluster_stats_cache_map_.clear(); }\n\nvoid HystrixSink::addStringToStream(absl::string_view key, absl::string_view value,\n                                    std::stringstream& info, bool is_first) {\n  std::string quoted_value = absl::StrCat(\"\\\"\", value, \"\\\"\");\n  addInfoToStream(key, quoted_value, info, is_first);\n}\n\nvoid HystrixSink::addIntToStream(absl::string_view key, uint64_t value, std::stringstream& info,\n                                 bool is_first) {\n  addInfoToStream(key, std::to_string(value), info, is_first);\n}\n\nvoid HystrixSink::addDoubleToStream(absl::string_view key, double value, std::stringstream& info,\n                                    bool is_first) {\n  addInfoToStream(key, std::to_string(value), info, is_first);\n}\n\nvoid HystrixSink::addInfoToStream(absl::string_view key, absl::string_view value,\n                                  std::stringstream& info, bool is_first) {\n  if (!is_first) {\n    info << \", \";\n  }\n  std::string added_info = absl::StrCat(\"\\\"\", key, \"\\\": \", value);\n  info << added_info;\n}\n\nvoid HystrixSink::addHystrixCommand(ClusterStatsCache& cluster_stats_cache,\n                                    absl::string_view cluster_name,\n                                    uint64_t max_concurrent_requests, uint64_t reporting_hosts,\n                                    std::chrono::milliseconds rolling_window_ms,\n                                    const QuantileLatencyMap& histogram, std::stringstream& ss) {\n\n  std::time_t currentTime = std::chrono::system_clock::to_time_t(server_.timeSource().systemTime());\n\n  ss << \"data: {\";\n  addStringToStream(\"type\", \"HystrixCommand\", ss, true);\n  addStringToStream(\"name\", cluster_name, ss);\n  addStringToStream(\"group\", \"NA\", ss);\n  addIntToStream(\"currentTime\", static_cast<uint64_t>(currentTime), ss);\n  addInfoToStream(\"isCircuitBreakerOpen\", \"false\", ss);\n\n  uint64_t errors = getRollingValue(cluster_stats_cache.errors_);\n  uint64_t timeouts = getRollingValue(cluster_stats_cache.timeouts_);\n  uint64_t rejected = getRollingValue(cluster_stats_cache.rejected_);\n  uint64_t total = getRollingValue(cluster_stats_cache.total_);\n\n  uint64_t error_rate = total == 0 ? 0 : (100 * (errors + timeouts + rejected)) / total;\n\n  addIntToStream(\"errorPercentage\", error_rate, ss);\n  addIntToStream(\"errorCount\", errors, ss);\n  addIntToStream(\"requestCount\", total, ss);\n  addIntToStream(\"rollingCountCollapsedRequests\", 0, ss);\n  addIntToStream(\"rollingCountExceptionsThrown\", 0, ss);\n  addIntToStream(\"rollingCountFailure\", errors, ss);\n  addIntToStream(\"rollingCountFallbackFailure\", 0, ss);\n  addIntToStream(\"rollingCountFallbackRejection\", 0, ss);\n  addIntToStream(\"rollingCountFallbackSuccess\", 0, ss);\n  addIntToStream(\"rollingCountResponsesFromCache\", 0, ss);\n\n  // Envoy's \"circuit breaker\" has similar meaning to hystrix's isolation\n  // so we count upstream_rq_pending_overflow and present it as rollingCountSemaphoreRejected\n  addIntToStream(\"rollingCountSemaphoreRejected\", rejected, ss);\n\n  // Hystrix's short circuit is not similar to Envoy's since it is triggered by 503 responses\n  // there is no parallel counter in Envoy since as a result of errors (outlier detection)\n  // requests are not rejected, but rather the node is removed from load balancer healthy pool.\n  addIntToStream(\"rollingCountShortCircuited\", 0, ss);\n  addIntToStream(\"rollingCountSuccess\", getRollingValue(cluster_stats_cache.success_), ss);\n  addIntToStream(\"rollingCountThreadPoolRejected\", 0, ss);\n  addIntToStream(\"rollingCountTimeout\", timeouts, ss);\n  addIntToStream(\"rollingCountBadRequests\", 0, ss);\n  addIntToStream(\"currentConcurrentExecutionCount\", 0, ss);\n  addStringToStream(\"latencyExecute_mean\", \"null\", ss);\n  addHistogramToStream(histogram, \"latencyExecute\", ss);\n  addIntToStream(\"propertyValue_circuitBreakerRequestVolumeThreshold\", 0, ss);\n  addIntToStream(\"propertyValue_circuitBreakerSleepWindowInMilliseconds\", 0, ss);\n  addIntToStream(\"propertyValue_circuitBreakerErrorThresholdPercentage\", 0, ss);\n  addInfoToStream(\"propertyValue_circuitBreakerForceOpen\", \"false\", ss);\n  addInfoToStream(\"propertyValue_circuitBreakerForceClosed\", \"true\", ss);\n  addStringToStream(\"propertyValue_executionIsolationStrategy\", \"SEMAPHORE\", ss);\n  addIntToStream(\"propertyValue_executionIsolationThreadTimeoutInMilliseconds\", 0, ss);\n  addInfoToStream(\"propertyValue_executionIsolationThreadInterruptOnTimeout\", \"false\", ss);\n  addIntToStream(\"propertyValue_executionIsolationSemaphoreMaxConcurrentRequests\",\n                 max_concurrent_requests, ss);\n  addIntToStream(\"propertyValue_fallbackIsolationSemaphoreMaxConcurrentRequests\", 0, ss);\n  addInfoToStream(\"propertyValue_requestCacheEnabled\", \"false\", ss);\n  addInfoToStream(\"propertyValue_requestLogEnabled\", \"true\", ss);\n  addIntToStream(\"reportingHosts\", reporting_hosts, ss);\n  addIntToStream(\"propertyValue_metricsRollingStatisticalWindowInMilliseconds\",\n                 rolling_window_ms.count(), ss);\n\n  ss << \"}\" << std::endl << std::endl;\n}\n\nvoid HystrixSink::addHystrixThreadPool(absl::string_view cluster_name, uint64_t queue_size,\n                                       uint64_t reporting_hosts,\n                                       std::chrono::milliseconds rolling_window_ms,\n                                       std::stringstream& ss) {\n\n  ss << \"data: {\";\n  addIntToStream(\"currentPoolSize\", 0, ss, true);\n  addIntToStream(\"rollingMaxActiveThreads\", 0, ss);\n  addIntToStream(\"currentActiveCount\", 0, ss);\n  addIntToStream(\"currentCompletedTaskCount\", 0, ss);\n  addIntToStream(\"propertyValue_queueSizeRejectionThreshold\", queue_size, ss);\n  addStringToStream(\"type\", \"HystrixThreadPool\", ss);\n  addIntToStream(\"reportingHosts\", reporting_hosts, ss);\n  addIntToStream(\"propertyValue_metricsRollingStatisticalWindowInMilliseconds\",\n                 rolling_window_ms.count(), ss);\n  addStringToStream(\"name\", cluster_name, ss);\n  addIntToStream(\"currentLargestPoolSize\", 0, ss);\n  addIntToStream(\"currentCorePoolSize\", 0, ss);\n  addIntToStream(\"currentQueueSize\", 0, ss);\n  addIntToStream(\"currentTaskCount\", 0, ss);\n  addIntToStream(\"rollingCountThreadsExecuted\", 0, ss);\n  addIntToStream(\"currentMaximumPoolSize\", 0, ss);\n\n  ss << \"}\" << std::endl << std::endl;\n}\n\nvoid HystrixSink::addClusterStatsToStream(ClusterStatsCache& cluster_stats_cache,\n                                          absl::string_view cluster_name,\n                                          uint64_t max_concurrent_requests,\n                                          uint64_t reporting_hosts,\n                                          std::chrono::milliseconds rolling_window_ms,\n                                          const QuantileLatencyMap& histogram,\n                                          std::stringstream& ss) {\n\n  addHystrixCommand(cluster_stats_cache, cluster_name, max_concurrent_requests, reporting_hosts,\n                    rolling_window_ms, histogram, ss);\n  addHystrixThreadPool(cluster_name, max_concurrent_requests, reporting_hosts, rolling_window_ms,\n                       ss);\n}\n\nconst std::string HystrixSink::printRollingWindows() {\n  std::stringstream out_str;\n\n  for (auto& itr : cluster_stats_cache_map_) {\n    ClusterStatsCache& cluster_stats_cache = *(itr.second);\n    cluster_stats_cache.printToStream(out_str);\n  }\n  return out_str.str();\n}\n\nHystrixSink::HystrixSink(Server::Configuration::ServerFactoryContext& server,\n                         const uint64_t num_buckets)\n    : server_(server), current_index_(num_buckets > 0 ? num_buckets : DEFAULT_NUM_BUCKETS),\n      window_size_(current_index_ + 1), stat_name_pool_(server.scope().symbolTable()),\n      cluster_name_(stat_name_pool_.add(Config::TagNames::get().CLUSTER_NAME)),\n      cluster_upstream_rq_time_(stat_name_pool_.add(\"cluster.upstream_rq_time\")),\n      membership_total_(stat_name_pool_.add(\"membership_total\")),\n      retry_upstream_rq_4xx_(stat_name_pool_.add(\"retry.upstream_rq_4xx\")),\n      retry_upstream_rq_5xx_(stat_name_pool_.add(\"retry.upstream_rq_5xx\")),\n      upstream_rq_2xx_(stat_name_pool_.add(\"upstream_rq_2xx\")),\n      upstream_rq_4xx_(stat_name_pool_.add(\"upstream_rq_4xx\")),\n      upstream_rq_5xx_(stat_name_pool_.add(\"upstream_rq_5xx\")) {\n  Server::Admin& admin = server_.admin();\n  ENVOY_LOG(debug,\n            \"adding hystrix_event_stream endpoint to enable connection to hystrix dashboard\");\n  admin.addHandler(\"/hystrix_event_stream\", \"send hystrix event stream\",\n                   MAKE_ADMIN_HANDLER(handlerHystrixEventStream), false, false);\n}\n\nHttp::Code HystrixSink::handlerHystrixEventStream(absl::string_view,\n                                                  Http::ResponseHeaderMap& response_headers,\n                                                  Buffer::Instance&,\n                                                  Server::AdminStream& admin_stream) {\n\n  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextEventStream);\n  response_headers.setReferenceInline(cache_control_handle.handle(),\n                                      Http::CustomHeaders::get().CacheControlValues.NoCache);\n  response_headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Close);\n  response_headers.setReferenceInline(access_control_allow_headers_handle.handle(),\n                                      AccessControlAllowHeadersValue.AllowHeadersHystrix);\n  response_headers.setReferenceInline(access_control_allow_origin_handle.handle(),\n                                      Http::CustomHeaders::get().AccessControlAllowOriginValue.All);\n\n  Http::StreamDecoderFilterCallbacks& stream_decoder_filter_callbacks =\n      admin_stream.getDecoderFilterCallbacks();\n\n  // Disable chunk-encoding in HTTP/1.x.\n  if (stream_decoder_filter_callbacks.streamInfo().protocol() < Http::Protocol::Http2) {\n    admin_stream.http1StreamEncoderOptions().value().get().disableChunkEncoding();\n  }\n\n  registerConnection(&stream_decoder_filter_callbacks);\n\n  admin_stream.setEndStreamOnComplete(false); // set streaming\n\n  // Separated out just so it's easier to understand\n  auto on_destroy_callback = [this, &stream_decoder_filter_callbacks]() {\n    ENVOY_LOG(debug, \"stopped sending data to hystrix dashboard on port {}\",\n              stream_decoder_filter_callbacks.connection()->remoteAddress()->asString());\n\n    // Unregister the callbacks from the sink so data is no longer encoded through them.\n    unregisterConnection(&stream_decoder_filter_callbacks);\n  };\n\n  // Add the callback to the admin_filter list of callbacks\n  admin_stream.addOnDestroyCallback(std::move(on_destroy_callback));\n\n  ENVOY_LOG(debug, \"started sending data to hystrix dashboard on port {}\",\n            stream_decoder_filter_callbacks.connection()->remoteAddress()->asString());\n  return Http::Code::OK;\n}\n\nvoid HystrixSink::flush(Stats::MetricSnapshot& snapshot) {\n  if (callbacks_list_.empty()) {\n    return;\n  }\n  incCounter();\n  std::stringstream ss;\n  Upstream::ClusterManager::ClusterInfoMap clusters = server_.clusterManager().clusters();\n\n  // Save a map of the relevant histograms per cluster in a convenient format.\n  absl::node_hash_map<std::string, QuantileLatencyMap> time_histograms;\n  for (const auto& histogram : snapshot.histograms()) {\n    if (histogram.get().tagExtractedStatName() == cluster_upstream_rq_time_) {\n      absl::optional<Stats::StatName> value =\n          Stats::Utility::findTag(histogram.get(), cluster_name_);\n      // Make sure we found the cluster name tag\n      ASSERT(value);\n      std::string value_str = server_.scope().symbolTable().toString(*value);\n      auto it_bool_pair = time_histograms.emplace(std::make_pair(value_str, QuantileLatencyMap()));\n      // Make sure histogram with this name was not already added\n      ASSERT(it_bool_pair.second);\n      QuantileLatencyMap& hist_map = it_bool_pair.first->second;\n\n      const std::vector<double>& supported_quantiles =\n          histogram.get().intervalStatistics().supportedQuantiles();\n      for (size_t i = 0; i < supported_quantiles.size(); ++i) {\n        // binary-search here is likely not worth it, as hystrix_quantiles has <10 elements.\n        if (std::find(hystrix_quantiles.begin(), hystrix_quantiles.end(), supported_quantiles[i]) !=\n            hystrix_quantiles.end()) {\n          const double value = histogram.get().intervalStatistics().computedQuantiles()[i];\n          if (!std::isnan(value)) {\n            hist_map[supported_quantiles[i]] = value;\n          }\n        }\n      }\n    }\n  }\n\n  for (auto& cluster : clusters) {\n    Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.second.get().info();\n\n    std::unique_ptr<ClusterStatsCache>& cluster_stats_cache_ptr =\n        cluster_stats_cache_map_[cluster_info->name()];\n    if (cluster_stats_cache_ptr == nullptr) {\n      cluster_stats_cache_ptr = std::make_unique<ClusterStatsCache>(cluster_info->name());\n    }\n\n    // update rolling window with cluster stats\n    updateRollingWindowMap(*cluster_info, *cluster_stats_cache_ptr);\n\n    // append it to stream to be sent\n    addClusterStatsToStream(\n        *cluster_stats_cache_ptr, cluster_info->name(),\n        cluster_info->resourceManager(Upstream::ResourcePriority::Default).pendingRequests().max(),\n        cluster_info->statsScope()\n            .gaugeFromStatName(membership_total_, Stats::Gauge::ImportMode::NeverImport)\n            .value(),\n        server_.statsFlushInterval(), time_histograms[cluster_info->name()], ss);\n  }\n\n  Buffer::OwnedImpl data;\n  for (auto callbacks : callbacks_list_) {\n    data.add(ss.str());\n    callbacks->encodeData(data, false);\n  }\n\n  // send keep alive ping\n  // TODO (@trabetti) : is it ok to send together with data?\n  Buffer::OwnedImpl ping_data;\n  for (auto callbacks : callbacks_list_) {\n    ping_data.add(\":\\n\\n\");\n    callbacks->encodeData(ping_data, false);\n  }\n\n  // check if any clusters were removed, and remove from cache\n  if (clusters.size() < cluster_stats_cache_map_.size()) {\n    for (auto it = cluster_stats_cache_map_.begin(); it != cluster_stats_cache_map_.end();) {\n      if (clusters.find(it->first) == clusters.end()) {\n        auto next_it = std::next(it);\n        cluster_stats_cache_map_.erase(it);\n        it = next_it;\n      } else {\n        ++it;\n      }\n    }\n  }\n}\n\nvoid HystrixSink::registerConnection(Http::StreamDecoderFilterCallbacks* callbacks_to_register) {\n  callbacks_list_.emplace_back(callbacks_to_register);\n}\n\nvoid HystrixSink::unregisterConnection(Http::StreamDecoderFilterCallbacks* callbacks_to_remove) {\n  for (auto it = callbacks_list_.begin(); it != callbacks_list_.end(); ++it) {\n    if ((*it)->streamId() == callbacks_to_remove->streamId()) {\n      callbacks_list_.erase(it);\n      break;\n    }\n  }\n  // If there are no callbacks, clear the map to avoid stale values or having to keep updating the\n  // map. When a new callback is assigned, the rollingWindow is initialized with current statistics\n  // and within RollingWindow time, the results showed in the dashboard will be reliable\n  if (callbacks_list_.empty()) {\n    resetRollingWindow();\n  }\n}\n\n} // namespace Hystrix\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/hystrix/hystrix.h",
    "content": "#pragma once\n\n#include <map>\n#include <memory>\n#include <vector>\n\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/sink.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Hystrix {\n\nusing RollingWindow = std::vector<uint64_t>;\nusing RollingStatsMap = std::map<const std::string, RollingWindow>;\n\nusing QuantileLatencyMap = absl::node_hash_map<double, double>;\nstatic const std::vector<double> hystrix_quantiles = {0,    0.25, 0.5,   0.75, 0.90,\n                                                      0.95, 0.99, 0.995, 1};\n\nstatic const struct {\n  absl::string_view AllowHeadersHystrix{\"Accept, Cache-Control, X-Requested-With, Last-Event-ID\"};\n} AccessControlAllowHeadersValue;\n\nstruct ClusterStatsCache {\n  ClusterStatsCache(const std::string& cluster_name);\n\n  void printToStream(std::stringstream& out_str);\n  void printRollingWindow(absl::string_view name, RollingWindow rolling_window,\n                          std::stringstream& out_str);\n  std::string cluster_name_;\n\n  // Rolling windows\n  RollingWindow errors_;\n  RollingWindow success_;\n  RollingWindow total_;\n  RollingWindow timeouts_;\n  RollingWindow rejected_;\n};\n\nusing ClusterStatsCachePtr = std::unique_ptr<ClusterStatsCache>;\n\nclass HystrixSink : public Stats::Sink, public Logger::Loggable<Logger::Id::hystrix> {\npublic:\n  HystrixSink(Server::Configuration::ServerFactoryContext& server, uint64_t num_buckets);\n  Http::Code handlerHystrixEventStream(absl::string_view, Http::ResponseHeaderMap& response_headers,\n                                       Buffer::Instance&, Server::AdminStream& admin_stream);\n  void flush(Stats::MetricSnapshot& snapshot) override;\n  void onHistogramComplete(const Stats::Histogram&, uint64_t) override{};\n\n  /**\n   * Register a new connection.\n   */\n  void registerConnection(Http::StreamDecoderFilterCallbacks* callbacks_to_register);\n\n  /**\n   * Remove registered connection.\n   */\n  void unregisterConnection(Http::StreamDecoderFilterCallbacks* callbacks_to_remove);\n\n  /**\n   * Add new value to top of rolling window, pushing out the oldest value.\n   */\n  void pushNewValue(RollingWindow& rolling_window, uint64_t value);\n\n  /**\n   * Increment pointer of next value to add to rolling window.\n   */\n  void incCounter() { current_index_ = (current_index_ + 1) % window_size_; }\n\n  /**\n   * Generate the streams to be sent to hystrix dashboard.\n   */\n  void addClusterStatsToStream(ClusterStatsCache& cluster_stats_cache,\n                               absl::string_view cluster_name, uint64_t max_concurrent_requests,\n                               uint64_t reporting_hosts,\n                               std::chrono::milliseconds rolling_window_ms,\n                               const QuantileLatencyMap& histogram, std::stringstream& ss);\n\n  /**\n   * Calculate values needed to create the stream and write into the map.\n   */\n  void updateRollingWindowMap(const Upstream::ClusterInfo& cluster_info,\n                              ClusterStatsCache& cluster_stats_cache);\n  /**\n   * Clear map.\n   */\n  void resetRollingWindow();\n\n  /**\n   * Return string representing current state of the map. for DEBUG.\n   */\n  const std::string printRollingWindows();\n\n  /**\n   * Get the statistic's value change over the rolling window time frame.\n   */\n  uint64_t getRollingValue(RollingWindow rolling_window);\n\n  /**\n   * Format the given key and value to \"key\"=value, and adding to the stringstream.\n   */\n  static void addInfoToStream(absl::string_view key, absl::string_view value,\n                              std::stringstream& info, bool is_first = false);\n\n  /**\n   * Format the given key and double value to \"key\"=<string of uint64_t>, and adding to the\n   * stringstream.\n   */\n  static void addDoubleToStream(absl::string_view key, double value, std::stringstream& info,\n                                bool is_first);\n\n  /**\n   * Format the given key and absl::string_view value to \"key\"=\"value\", and adding to the\n   * stringstream.\n   */\n  static void addStringToStream(absl::string_view key, absl::string_view value,\n                                std::stringstream& info, bool is_first = false);\n\n  /**\n   * Format the given key and uint64_t value to \"key\"=<string of uint64_t>, and adding to the\n   * stringstream.\n   */\n  static void addIntToStream(absl::string_view key, uint64_t value, std::stringstream& info,\n                             bool is_first = false);\n\n  static void addHistogramToStream(const QuantileLatencyMap& latency_map, absl::string_view key,\n                                   std::stringstream& ss);\n\nprivate:\n  /**\n   * Generate HystrixCommand event stream.\n   */\n  void addHystrixCommand(ClusterStatsCache& cluster_stats_cache, absl::string_view cluster_name,\n                         uint64_t max_concurrent_requests, uint64_t reporting_hosts,\n                         std::chrono::milliseconds rolling_window_ms,\n                         const QuantileLatencyMap& histogram, std::stringstream& ss);\n\n  /**\n   * Generate HystrixThreadPool event stream.\n   */\n  void addHystrixThreadPool(absl::string_view cluster_name, uint64_t queue_size,\n                            uint64_t reporting_hosts, std::chrono::milliseconds rolling_window_ms,\n                            std::stringstream& ss);\n\n  std::vector<Http::StreamDecoderFilterCallbacks*> callbacks_list_;\n  Server::Configuration::ServerFactoryContext& server_;\n  uint64_t current_index_;\n  const uint64_t window_size_;\n  static const uint64_t DEFAULT_NUM_BUCKETS = 10;\n\n  // Map from cluster names to a struct of all of that cluster's stat windows.\n  absl::node_hash_map<std::string, ClusterStatsCachePtr> cluster_stats_cache_map_;\n\n  // Saved StatNames for fast comparisons in loop.\n  // TODO(mattklein123): Many/all of these stats should just be pulled directly from the cluster\n  // stats directly. This needs some cleanup.\n  Stats::StatNamePool stat_name_pool_;\n  const Stats::StatName cluster_name_;\n  const Stats::StatName cluster_upstream_rq_time_;\n  const Stats::StatName membership_total_;\n  const Stats::StatName retry_upstream_rq_4xx_;\n  const Stats::StatName retry_upstream_rq_5xx_;\n  const Stats::StatName upstream_rq_2xx_;\n  const Stats::StatName upstream_rq_4xx_;\n  const Stats::StatName upstream_rq_5xx_;\n};\n\nusing HystrixSinkPtr = std::unique_ptr<HystrixSink>;\n\n} // namespace Hystrix\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/metrics_service/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"metrics_service_grpc_lib\",\n    srcs = [\"grpc_metrics_service_impl.cc\"],\n    hdrs = [\"grpc_metrics_service_impl.h\"],\n    deps = [\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/singleton:instance_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/grpc:async_client_lib\",\n        \"@envoy_api//envoy/service/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"metrics_proto_descriptors_lib\",\n    srcs = [\"grpc_metrics_proto_descriptors.cc\"],\n    hdrs = [\"grpc_metrics_proto_descriptors.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/protobuf\",\n        \"@envoy_api//envoy/config/metrics/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/metrics/v2:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/stat_sinks:well_known_names\",\n        \"//source/extensions/stat_sinks/metrics_service:metrics_proto_descriptors_lib\",\n        \"//source/extensions/stat_sinks/metrics_service:metrics_service_grpc_lib\",\n        \"//source/server:configuration_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/stat_sinks/metrics_service/config.cc",
    "content": "#include \"extensions/stat_sinks/metrics_service/config.h\"\n\n#include \"envoy/config/metrics/v3/metrics_service.pb.h\"\n#include \"envoy/config/metrics/v3/metrics_service.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/network/resolver_impl.h\"\n\n#include \"extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h\"\n#include \"extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\n\nStats::SinkPtr\nMetricsServiceSinkFactory::createStatsSink(const Protobuf::Message& config,\n                                           Server::Configuration::ServerFactoryContext& server) {\n  validateProtoDescriptors();\n\n  const auto& sink_config =\n      MessageUtil::downcastAndValidate<const envoy::config::metrics::v3::MetricsServiceConfig&>(\n          config, server.messageValidationContext().staticValidationVisitor());\n  const auto& grpc_service = sink_config.grpc_service();\n  const auto& transport_api_version = sink_config.transport_api_version();\n  ENVOY_LOG(debug, \"Metrics Service gRPC service configuration: {}\", grpc_service.DebugString());\n\n  std::shared_ptr<GrpcMetricsStreamer> grpc_metrics_streamer =\n      std::make_shared<GrpcMetricsStreamerImpl>(\n          server.clusterManager().grpcAsyncClientManager().factoryForGrpcService(\n              grpc_service, server.scope(), false),\n          server.localInfo(), transport_api_version);\n\n  return std::make_unique<MetricsServiceSink>(\n      grpc_metrics_streamer, server.timeSource(),\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(sink_config, report_counters_as_deltas, false));\n}\n\nProtobufTypes::MessagePtr MetricsServiceSinkFactory::createEmptyConfigProto() {\n  return std::unique_ptr<envoy::config::metrics::v3::MetricsServiceConfig>(\n      std::make_unique<envoy::config::metrics::v3::MetricsServiceConfig>());\n}\n\nstd::string MetricsServiceSinkFactory::name() const { return StatsSinkNames::get().MetricsService; }\n\n/**\n * Static registration for the this sink factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(MetricsServiceSinkFactory,\n                 Server::Configuration::StatsSinkFactory){\"envoy.metrics_service\"};\n\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/metrics_service/config.h",
    "content": "#pragma once\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/configuration_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\n\n/**\n * Config registration for the MetricsService stats sink. @see StatsSinkFactory.\n */\nclass MetricsServiceSinkFactory : Logger::Loggable<Logger::Id::config>,\n                                  public Server::Configuration::StatsSinkFactory {\npublic:\n  Stats::SinkPtr createStatsSink(const Protobuf::Message& config,\n                                 Server::Configuration::ServerFactoryContext& server) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n};\n\nDECLARE_FACTORY(MetricsServiceSinkFactory);\n\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc",
    "content": "#include \"extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h\"\n\n#include \"envoy/config/metrics/v2/metrics_service.pb.h\"\n#include \"envoy/service/metrics/v2/metrics_service.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/api_version.h\"\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\n\nvoid validateProtoDescriptors() {\n  // https://github.com/envoyproxy/envoy/issues/9639\n  const API_NO_BOOST(envoy::service::metrics::v2::StreamMetricsMessage) _dummy_service_v2;\n  // https://github.com/envoyproxy/envoy/pull/9618 made it necessary to register the previous\n  // version's config descriptor by calling ApiTypeOracle::getEarlierVersionDescriptor which has an\n  // assertion for nullptr types.\n  const API_NO_BOOST(envoy::config::metrics::v2::MetricsServiceConfig) _dummy_config_v2;\n\n  const auto method = \"envoy.service.metrics.v2.MetricsService.StreamMetrics\";\n\n  RELEASE_ASSERT(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) != nullptr,\n                 \"\");\n\n  const auto config = \"envoy.config.metrics.v2.MetricsServiceConfig\";\n\n  // Keeping this as an ASSERT because ApiTypeOracle::getEarlierVersionDescriptor also has an\n  // ASSERT.\n  ASSERT(Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(config) != nullptr, \"\");\n};\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\n\n// This function validates that the method descriptors for gRPC services and type descriptors that\n// are referenced in Any messages are available in the descriptor pool.\nvoid validateProtoDescriptors();\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc",
    "content": "#include \"extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/service/metrics/v3/metrics_service.pb.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\n\nGrpcMetricsStreamerImpl::GrpcMetricsStreamerImpl(\n    Grpc::AsyncClientFactoryPtr&& factory, const LocalInfo::LocalInfo& local_info,\n    envoy::config::core::v3::ApiVersion transport_api_version)\n    : client_(factory->create()), local_info_(local_info),\n      service_method_(\n          Grpc::VersionedMethods(\"envoy.service.metrics.v3.MetricsService.StreamMetrics\",\n                                 \"envoy.service.metrics.v2.MetricsService.StreamMetrics\")\n              .getMethodDescriptorForVersion(transport_api_version)),\n      transport_api_version_(transport_api_version) {}\n\nvoid GrpcMetricsStreamerImpl::send(envoy::service::metrics::v3::StreamMetricsMessage& message) {\n  if (stream_ == nullptr) {\n    stream_ = client_->start(service_method_, *this, Http::AsyncClient::StreamOptions());\n    auto* identifier = message.mutable_identifier();\n    *identifier->mutable_node() = local_info_.node();\n  }\n  if (stream_ != nullptr) {\n    stream_->sendMessage(message, transport_api_version_, false);\n  }\n}\n\nMetricsServiceSink::MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer,\n                                       TimeSource& time_source,\n                                       const bool report_counters_as_deltas)\n    : grpc_metrics_streamer_(grpc_metrics_streamer), time_source_(time_source),\n      report_counters_as_deltas_(report_counters_as_deltas) {}\n\nvoid MetricsServiceSink::flushCounter(\n    const Stats::MetricSnapshot::CounterSnapshot& counter_snapshot) {\n  io::prometheus::client::MetricFamily* metrics_family = message_.add_envoy_metrics();\n  metrics_family->set_type(io::prometheus::client::MetricType::COUNTER);\n  metrics_family->set_name(counter_snapshot.counter_.get().name());\n  auto* metric = metrics_family->add_metric();\n  metric->set_timestamp_ms(std::chrono::duration_cast<std::chrono::milliseconds>(\n                               time_source_.systemTime().time_since_epoch())\n                               .count());\n  auto* counter_metric = metric->mutable_counter();\n  if (report_counters_as_deltas_) {\n    counter_metric->set_value(counter_snapshot.delta_);\n  } else {\n    counter_metric->set_value(counter_snapshot.counter_.get().value());\n  }\n}\n\nvoid MetricsServiceSink::flushGauge(const Stats::Gauge& gauge) {\n  io::prometheus::client::MetricFamily* metrics_family = message_.add_envoy_metrics();\n  metrics_family->set_type(io::prometheus::client::MetricType::GAUGE);\n  metrics_family->set_name(gauge.name());\n  auto* metric = metrics_family->add_metric();\n  metric->set_timestamp_ms(std::chrono::duration_cast<std::chrono::milliseconds>(\n                               time_source_.systemTime().time_since_epoch())\n                               .count());\n  auto* gauge_metric = metric->mutable_gauge();\n  gauge_metric->set_value(gauge.value());\n}\n\nvoid MetricsServiceSink::flushHistogram(const Stats::ParentHistogram& envoy_histogram) {\n  // TODO(ramaraochavali): Currently we are sending both quantile information and bucket\n  // information. We should make this configurable if it turns out that sending both affects\n  // performance.\n\n  // Add summary information for histograms.\n  io::prometheus::client::MetricFamily* summary_metrics_family = message_.add_envoy_metrics();\n  summary_metrics_family->set_type(io::prometheus::client::MetricType::SUMMARY);\n  summary_metrics_family->set_name(envoy_histogram.name());\n  auto* summary_metric = summary_metrics_family->add_metric();\n  summary_metric->set_timestamp_ms(std::chrono::duration_cast<std::chrono::milliseconds>(\n                                       time_source_.systemTime().time_since_epoch())\n                                       .count());\n  auto* summary = summary_metric->mutable_summary();\n  const Stats::HistogramStatistics& hist_stats = envoy_histogram.intervalStatistics();\n  for (size_t i = 0; i < hist_stats.supportedQuantiles().size(); i++) {\n    auto* quantile = summary->add_quantile();\n    quantile->set_quantile(hist_stats.supportedQuantiles()[i]);\n    quantile->set_value(hist_stats.computedQuantiles()[i]);\n  }\n\n  // Add bucket information for histograms.\n  io::prometheus::client::MetricFamily* histogram_metrics_family = message_.add_envoy_metrics();\n  histogram_metrics_family->set_type(io::prometheus::client::MetricType::HISTOGRAM);\n  histogram_metrics_family->set_name(envoy_histogram.name());\n  auto* histogram_metric = histogram_metrics_family->add_metric();\n  histogram_metric->set_timestamp_ms(std::chrono::duration_cast<std::chrono::milliseconds>(\n                                         time_source_.systemTime().time_since_epoch())\n                                         .count());\n  auto* histogram = histogram_metric->mutable_histogram();\n  histogram->set_sample_count(hist_stats.sampleCount());\n  histogram->set_sample_sum(hist_stats.sampleSum());\n  for (size_t i = 0; i < hist_stats.supportedBuckets().size(); i++) {\n    auto* bucket = histogram->add_bucket();\n    bucket->set_upper_bound(hist_stats.supportedBuckets()[i]);\n    bucket->set_cumulative_count(hist_stats.computedBuckets()[i]);\n  }\n}\n\nvoid MetricsServiceSink::flush(Stats::MetricSnapshot& snapshot) {\n  message_.clear_envoy_metrics();\n\n  // TODO(mrice32): there's probably some more sophisticated preallocation we can do here where we\n  // actually preallocate the submessages and then pass ownership to the proto (rather than just\n  // preallocating the pointer array).\n  message_.mutable_envoy_metrics()->Reserve(snapshot.counters().size() + snapshot.gauges().size() +\n                                            snapshot.histograms().size());\n  for (const auto& counter : snapshot.counters()) {\n    if (counter.counter_.get().used()) {\n      flushCounter(counter);\n    }\n  }\n\n  for (const auto& gauge : snapshot.gauges()) {\n    if (gauge.get().used()) {\n      flushGauge(gauge.get());\n    }\n  }\n\n  for (const auto& histogram : snapshot.histograms()) {\n    if (histogram.get().used()) {\n      flushHistogram(histogram.get());\n    }\n  }\n\n  grpc_metrics_streamer_->send(message_);\n  // for perf reasons, clear the identifier after the first flush.\n  if (message_.has_identifier()) {\n    message_.clear_identifier();\n  }\n}\n\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/service/metrics/v3/metrics_service.pb.h\"\n#include \"envoy/singleton/instance.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/sink.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/typed_async_client.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\n\n/**\n * Interface for metrics streamer.\n */\nclass GrpcMetricsStreamer\n    : public Grpc::AsyncStreamCallbacks<envoy::service::metrics::v3::StreamMetricsResponse> {\npublic:\n  ~GrpcMetricsStreamer() override = default;\n\n  /**\n   * Send Metrics Message.\n   * @param message supplies the metrics to send.\n   */\n  virtual void send(envoy::service::metrics::v3::StreamMetricsMessage& message) PURE;\n\n  // Grpc::AsyncStreamCallbacks\n  void onCreateInitialMetadata(Http::RequestHeaderMap&) override {}\n  void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&&) override {}\n  void\n  onReceiveMessage(std::unique_ptr<envoy::service::metrics::v3::StreamMetricsResponse>&&) override {\n  }\n  void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&&) override {}\n  void onRemoteClose(Grpc::Status::GrpcStatus, const std::string&) override{};\n};\n\nusing GrpcMetricsStreamerSharedPtr = std::shared_ptr<GrpcMetricsStreamer>;\n\n/**\n * Production implementation of GrpcMetricsStreamer\n */\nclass GrpcMetricsStreamerImpl : public Singleton::Instance, public GrpcMetricsStreamer {\npublic:\n  GrpcMetricsStreamerImpl(Grpc::AsyncClientFactoryPtr&& factory,\n                          const LocalInfo::LocalInfo& local_info,\n                          envoy::config::core::v3::ApiVersion transport_api_version);\n\n  // GrpcMetricsStreamer\n  void send(envoy::service::metrics::v3::StreamMetricsMessage& message) override;\n\n  // Grpc::AsyncStreamCallbacks\n  void onRemoteClose(Grpc::Status::GrpcStatus, const std::string&) override { stream_ = nullptr; }\n\nprivate:\n  Grpc::AsyncStream<envoy::service::metrics::v3::StreamMetricsMessage> stream_{};\n  Grpc::AsyncClient<envoy::service::metrics::v3::StreamMetricsMessage,\n                    envoy::service::metrics::v3::StreamMetricsResponse>\n      client_;\n  const LocalInfo::LocalInfo& local_info_;\n  const Protobuf::MethodDescriptor& service_method_;\n  const envoy::config::core::v3::ApiVersion transport_api_version_;\n};\n\nusing GrpcMetricsStreamerImplPtr = std::unique_ptr<GrpcMetricsStreamerImpl>;\n\n/**\n * Stat Sink implementation of Metrics Service.\n */\nclass MetricsServiceSink : public Stats::Sink {\npublic:\n  // MetricsService::Sink\n  MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer,\n                     TimeSource& time_system, const bool report_counters_as_deltas);\n  void flush(Stats::MetricSnapshot& snapshot) override;\n  void onHistogramComplete(const Stats::Histogram&, uint64_t) override {}\n\n  void flushCounter(const Stats::MetricSnapshot::CounterSnapshot& counter_snapshot);\n  void flushGauge(const Stats::Gauge& gauge);\n  void flushHistogram(const Stats::ParentHistogram& envoy_histogram);\n\nprivate:\n  GrpcMetricsStreamerSharedPtr grpc_metrics_streamer_;\n  envoy::service::metrics::v3::StreamMetricsMessage message_;\n  TimeSource& time_source_;\n  const bool report_counters_as_deltas_;\n};\n\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/statsd/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Stats sink for the basic version of the statsd protocol (https://github.com/b/statsd_spec).\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    # Legacy test use. TODO(#9953) clean up.\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/extensions/stat_sinks:well_known_names\",\n        \"//source/extensions/stat_sinks/common/statsd:statsd_lib\",\n        \"//source/server:configuration_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/stat_sinks/statsd/config.cc",
    "content": "#include \"extensions/stat_sinks/statsd/config.h\"\n\n#include <memory>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/network/resolver_impl.h\"\n\n#include \"extensions/stat_sinks/common/statsd/statsd.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Statsd {\n\nStats::SinkPtr\nStatsdSinkFactory::createStatsSink(const Protobuf::Message& config,\n                                   Server::Configuration::ServerFactoryContext& server) {\n\n  const auto& statsd_sink =\n      MessageUtil::downcastAndValidate<const envoy::config::metrics::v3::StatsdSink&>(\n          config, server.messageValidationContext().staticValidationVisitor());\n  switch (statsd_sink.statsd_specifier_case()) {\n  case envoy::config::metrics::v3::StatsdSink::StatsdSpecifierCase::kAddress: {\n    Network::Address::InstanceConstSharedPtr address =\n        Network::Address::resolveProtoAddress(statsd_sink.address());\n    ENVOY_LOG(debug, \"statsd UDP ip address: {}\", address->asString());\n    return std::make_unique<Common::Statsd::UdpStatsdSink>(server.threadLocal(), std::move(address),\n                                                           false, statsd_sink.prefix());\n  }\n  case envoy::config::metrics::v3::StatsdSink::StatsdSpecifierCase::kTcpClusterName:\n    ENVOY_LOG(debug, \"statsd TCP cluster: {}\", statsd_sink.tcp_cluster_name());\n    return std::make_unique<Common::Statsd::TcpStatsdSink>(\n        server.localInfo(), statsd_sink.tcp_cluster_name(), server.threadLocal(),\n        server.clusterManager(), server.scope(), statsd_sink.prefix());\n  default:\n    // Verified by schema.\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nProtobufTypes::MessagePtr StatsdSinkFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::metrics::v3::StatsdSink>();\n}\n\nstd::string StatsdSinkFactory::name() const { return StatsSinkNames::get().Statsd; }\n\n/**\n * Static registration for the statsd sink factory. @see RegisterFactory.\n */\nREGISTER_FACTORY(StatsdSinkFactory, Server::Configuration::StatsSinkFactory){\"envoy.statsd\"};\n\n} // namespace Statsd\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/statsd/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/instance.h\"\n\n#include \"server/configuration_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Statsd {\n\n/**\n * Config registration for the tcp statsd sink. @see StatsSinkFactory.\n */\nclass StatsdSinkFactory : Logger::Loggable<Logger::Id::config>,\n                          public Server::Configuration::StatsSinkFactory {\npublic:\n  // StatsSinkFactory\n  Stats::SinkPtr createStatsSink(const Protobuf::Message& config,\n                                 Server::Configuration::ServerFactoryContext& server) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n};\n\nDECLARE_FACTORY(StatsdSinkFactory);\n\n} // namespace Statsd\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\n# Stats sink for wasm\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    status = \"alpha\",\n    deps = [\n        \":wasm_stat_sink_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:factory_context_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/stat_sinks:well_known_names\",\n        \"//source/server:configuration_lib\",\n        \"@envoy_api//envoy/extensions/stat_sinks/wasm/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"wasm_stat_sink_lib\",\n    hdrs = [\"wasm_stat_sink_impl.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/stat_sinks/wasm/config.cc",
    "content": "#include \"extensions/stat_sinks/wasm/config.h\"\n\n#include <memory>\n\n#include \"envoy/extensions/stat_sinks/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/factory_context.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/stat_sinks/wasm/wasm_stat_sink_impl.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Wasm {\n\nStats::SinkPtr\nWasmSinkFactory::createStatsSink(const Protobuf::Message& proto_config,\n                                 Server::Configuration::ServerFactoryContext& context) {\n  const auto& config =\n      MessageUtil::downcastAndValidate<const envoy::extensions::stat_sinks::wasm::v3::Wasm&>(\n          proto_config, context.messageValidationContext().staticValidationVisitor());\n\n  auto wasm_sink = std::make_unique<WasmStatSink>(config.config().root_id(), nullptr);\n\n  auto plugin = std::make_shared<Common::Wasm::Plugin>(\n      config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(),\n      config.config().vm_config().runtime(),\n      Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(),\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, context.localInfo(), nullptr);\n\n  auto callback = [&wasm_sink, &context, plugin](Common::Wasm::WasmHandleSharedPtr base_wasm) {\n    if (!base_wasm) {\n      if (plugin->fail_open_) {\n        ENVOY_LOG(error, \"Unable to create Wasm Stat Sink {}\", plugin->name_);\n      } else {\n        ENVOY_LOG(critical, \"Unable to create Wasm Stat Sink {}\", plugin->name_);\n      }\n      return;\n    }\n    wasm_sink->setSingleton(\n        Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, context.dispatcher()));\n  };\n\n  if (!Common::Wasm::createWasm(\n          config.config().vm_config(), plugin, context.scope().createScope(\"\"),\n          context.clusterManager(), context.initManager(), context.dispatcher(), context.api(),\n          context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) {\n    throw Common::Wasm::WasmException(\n        fmt::format(\"Unable to create Wasm Stat Sink {}\", plugin->name_));\n  }\n\n  return wasm_sink;\n}\n\nProtobufTypes::MessagePtr WasmSinkFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::stat_sinks::wasm::v3::Wasm>();\n}\n\nstd::string WasmSinkFactory::name() const { return StatsSinkNames::get().Wasm; }\n\n/**\n * Static registration for the wasm access log. @see RegisterFactory.\n */\nREGISTER_FACTORY(WasmSinkFactory, Server::Configuration::StatsSinkFactory);\n\n} // namespace Wasm\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/wasm/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/factory_context.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"common/config/datasource.h\"\n\n#include \"server/configuration_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Wasm {\n\n/**\n * Config registration for the Wasm statsd sink. @see StatSinkFactory.\n */\nclass WasmSinkFactory : Logger::Loggable<Logger::Id::config>,\n                        public Server::Configuration::StatsSinkFactory {\npublic:\n  // StatsSinkFactory\n  Stats::SinkPtr createStatsSink(const Protobuf::Message& config,\n                                 Server::Configuration::ServerFactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  std::string name() const override;\n\nprivate:\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_;\n};\n\n} // namespace Wasm\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h",
    "content": "#pragma once\n\n#include \"envoy/stats/sink.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Wasm {\n\nusing Envoy::Extensions::Common::Wasm::WasmHandle;\n\nclass WasmStatSink : public Stats::Sink {\npublic:\n  WasmStatSink(absl::string_view root_id, Common::Wasm::WasmHandleSharedPtr singleton)\n      : root_id_(root_id), singleton_(std::move(singleton)) {}\n\n  void flush(Stats::MetricSnapshot& snapshot) override {\n    singleton_->wasm()->onStatsUpdate(root_id_, snapshot);\n  }\n\n  void setSingleton(Common::Wasm::WasmHandleSharedPtr singleton) {\n    ASSERT(singleton != nullptr);\n    singleton_ = std::move(singleton);\n  }\n\n  void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override {\n    (void)histogram;\n    (void)value;\n  }\n\nprivate:\n  std::string root_id_;\n  Common::Wasm::WasmHandleSharedPtr singleton_;\n};\n\n} // namespace Wasm\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/stat_sinks/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\n\n/**\n * Well-known stats sink names.\n * NOTE: New sinks should use the well known name: envoy.stat_sinks.name.\n */\nclass StatsSinkNameValues {\npublic:\n  // Statsd sink\n  const std::string Statsd = \"envoy.stat_sinks.statsd\";\n  // DogStatsD compatible statsd sink\n  const std::string DogStatsd = \"envoy.stat_sinks.dog_statsd\";\n  // MetricsService sink\n  const std::string MetricsService = \"envoy.stat_sinks.metrics_service\";\n  // Hystrix sink\n  const std::string Hystrix = \"envoy.stat_sinks.hystrix\";\n  // WebAssembly sink\n  const std::string Wasm = \"envoy.stat_sinks.wasm\";\n};\n\nusing StatsSinkNames = ConstSingleton<StatsSinkNameValues>;\n\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"factory_base_lib\",\n    hdrs = [\"factory_base.h\"],\n    deps = [\n        \"//include/envoy/server:tracer_config_interface\",\n        \"//source/common/config:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/tracers/common/factory_base.h",
    "content": "#pragma once\n\n#include \"envoy/server/tracer_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Common {\n\n/**\n * Common base class for tracer factory registrations. Removes a substantial amount of\n * boilerplate.\n */\ntemplate <class ConfigProto> class FactoryBase : public Server::Configuration::TracerFactory {\npublic:\n  // Server::Configuration::TracerFactory\n  Tracing::HttpTracerSharedPtr\n  createHttpTracer(const Protobuf::Message& config,\n                   Server::Configuration::TracerFactoryContext& context) override {\n    return createHttpTracerTyped(MessageUtil::downcastAndValidate<const ConfigProto&>(\n                                     config, context.messageValidationVisitor()),\n                                 context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  FactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual Tracing::HttpTracerSharedPtr\n  createHttpTracerTyped(const ConfigProto& proto_config,\n                        Server::Configuration::TracerFactoryContext& context) PURE;\n\n  const std::string name_;\n};\n\n} // namespace Common\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/common/ot/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"opentracing_driver_lib\",\n    srcs = [\n        \"opentracing_driver_impl.cc\",\n    ],\n    hdrs = [\n        \"opentracing_driver_impl.h\",\n    ],\n    external_deps = [\"opentracing\"],\n    deps = [\n        \"//source/common/tracing:http_tracer_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/tracers/common/ot/opentracing_driver_impl.cc",
    "content": "#include \"extensions/tracers/common/ot/opentracing_driver_impl.h\"\n\n#include <sstream>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Common {\nnamespace Ot {\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    ot_span_context_handle(Http::CustomHeaders::get().OtSpanContext);\n\nnamespace {\nclass OpenTracingHTTPHeadersWriter : public opentracing::HTTPHeadersWriter {\npublic:\n  explicit OpenTracingHTTPHeadersWriter(Http::HeaderMap& request_headers)\n      : request_headers_(request_headers) {}\n\n  // opentracing::HTTPHeadersWriter\n  opentracing::expected<void> Set(opentracing::string_view key,\n                                  opentracing::string_view value) const override {\n    Http::LowerCaseString lowercase_key{key};\n    request_headers_.remove(lowercase_key);\n    request_headers_.addCopy(std::move(lowercase_key), {value.data(), value.size()});\n    return {};\n  }\n\nprivate:\n  Http::HeaderMap& request_headers_;\n};\n\nclass OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader {\npublic:\n  explicit OpenTracingHTTPHeadersReader(const Http::RequestHeaderMap& request_headers)\n      : request_headers_(request_headers) {}\n\n  using OpenTracingCb = std::function<opentracing::expected<void>(opentracing::string_view,\n                                                                  opentracing::string_view)>;\n\n  // opentracing::HTTPHeadersReader\n  opentracing::expected<opentracing::string_view>\n  LookupKey(opentracing::string_view key) const override {\n    const Http::HeaderEntry* entry = request_headers_.get(Http::LowerCaseString{key});\n    if (entry != nullptr) {\n      return opentracing::string_view{entry->value().getStringView().data(),\n                                      entry->value().getStringView().length()};\n    } else {\n      return opentracing::make_unexpected(opentracing::key_not_found_error);\n    }\n  }\n\n  opentracing::expected<void> ForeachKey(OpenTracingCb f) const override {\n    request_headers_.iterate(headerMapCallback(f));\n    return {};\n  }\n\nprivate:\n  const Http::RequestHeaderMap& request_headers_;\n\n  static Http::HeaderMap::ConstIterateCb headerMapCallback(OpenTracingCb callback) {\n    return [callback =\n                std::move(callback)](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n      opentracing::string_view key{header.key().getStringView().data(),\n                                   header.key().getStringView().length()};\n      opentracing::string_view value{header.value().getStringView().data(),\n                                     header.value().getStringView().length()};\n      if (callback(key, value)) {\n        return Http::HeaderMap::Iterate::Continue;\n      } else {\n        return Http::HeaderMap::Iterate::Break;\n      }\n    };\n  }\n};\n} // namespace\n\nOpenTracingSpan::OpenTracingSpan(OpenTracingDriver& driver,\n                                 std::unique_ptr<opentracing::Span>&& span)\n    : driver_{driver}, span_(std::move(span)) {}\n\nvoid OpenTracingSpan::finishSpan() { span_->FinishWithOptions(finish_options_); }\n\nvoid OpenTracingSpan::setOperation(absl::string_view operation) {\n  span_->SetOperationName({operation.data(), operation.length()});\n}\n\nvoid OpenTracingSpan::setTag(absl::string_view name, absl::string_view value) {\n  span_->SetTag({name.data(), name.length()},\n                opentracing::v2::string_view{value.data(), value.length()});\n}\n\nvoid OpenTracingSpan::log(SystemTime timestamp, const std::string& event) {\n  opentracing::LogRecord record{timestamp, {{Tracing::Logs::get().EventKey, event}}};\n  finish_options_.log_records.emplace_back(std::move(record));\n}\n\nvoid OpenTracingSpan::setBaggage(absl::string_view key, absl::string_view value) {\n  span_->SetBaggageItem({key.data(), key.length()}, {value.data(), value.length()});\n}\n\nstd::string OpenTracingSpan::getBaggage(absl::string_view key) {\n  return span_->BaggageItem({key.data(), key.length()});\n}\n\nvoid OpenTracingSpan::injectContext(Http::RequestHeaderMap& request_headers) {\n  if (driver_.propagationMode() == OpenTracingDriver::PropagationMode::SingleHeader) {\n    // Inject the span context using Envoy's single-header format.\n    std::ostringstream oss;\n    const opentracing::expected<void> was_successful =\n        span_->tracer().Inject(span_->context(), oss);\n    if (!was_successful) {\n      ENVOY_LOG(debug, \"Failed to inject span context: {}\", was_successful.error().message());\n      driver_.tracerStats().span_context_injection_error_.inc();\n      return;\n    }\n    const std::string current_span_context = oss.str();\n    request_headers.setInline(\n        ot_span_context_handle.handle(),\n        Base64::encode(current_span_context.c_str(), current_span_context.length()));\n  } else {\n    // Inject the context using the tracer's standard HTTP header format.\n    const OpenTracingHTTPHeadersWriter writer{request_headers};\n    const opentracing::expected<void> was_successful =\n        span_->tracer().Inject(span_->context(), writer);\n    if (!was_successful) {\n      ENVOY_LOG(debug, \"Failed to inject span context: {}\", was_successful.error().message());\n      driver_.tracerStats().span_context_injection_error_.inc();\n      return;\n    }\n  }\n}\n\nvoid OpenTracingSpan::setSampled(bool sampled) {\n  span_->SetTag(opentracing::ext::sampling_priority, sampled ? 1 : 0);\n}\n\nTracing::SpanPtr OpenTracingSpan::spawnChild(const Tracing::Config&, const std::string& name,\n                                             SystemTime start_time) {\n  std::unique_ptr<opentracing::Span> ot_span = span_->tracer().StartSpan(\n      name, {opentracing::ChildOf(&span_->context()), opentracing::StartTimestamp(start_time)});\n  RELEASE_ASSERT(ot_span != nullptr, \"\");\n  return Tracing::SpanPtr{new OpenTracingSpan{driver_, std::move(ot_span)}};\n}\n\nOpenTracingDriver::OpenTracingDriver(Stats::Scope& scope)\n    : tracer_stats_{OPENTRACING_TRACER_STATS(POOL_COUNTER_PREFIX(scope, \"tracing.opentracing.\"))} {}\n\nTracing::SpanPtr OpenTracingDriver::startSpan(const Tracing::Config& config,\n                                              Http::RequestHeaderMap& request_headers,\n                                              const std::string& operation_name,\n                                              SystemTime start_time,\n                                              const Tracing::Decision tracing_decision) {\n  const PropagationMode propagation_mode = this->propagationMode();\n  const opentracing::Tracer& tracer = this->tracer();\n  std::unique_ptr<opentracing::Span> active_span;\n  std::unique_ptr<opentracing::SpanContext> parent_span_ctx;\n  if (propagation_mode == PropagationMode::SingleHeader &&\n      request_headers.getInline(ot_span_context_handle.handle())) {\n    opentracing::expected<std::unique_ptr<opentracing::SpanContext>> parent_span_ctx_maybe;\n    std::string parent_context = Base64::decode(\n        std::string(request_headers.getInlineValue(ot_span_context_handle.handle())));\n\n    if (!parent_context.empty()) {\n      InputConstMemoryStream istream{parent_context.data(), parent_context.size()};\n      parent_span_ctx_maybe = tracer.Extract(istream);\n    } else {\n      parent_span_ctx_maybe =\n          opentracing::make_unexpected(opentracing::span_context_corrupted_error);\n    }\n\n    if (parent_span_ctx_maybe) {\n      parent_span_ctx = std::move(*parent_span_ctx_maybe);\n    } else {\n      ENVOY_LOG(debug, \"Failed to extract span context: {}\",\n                parent_span_ctx_maybe.error().message());\n      tracerStats().span_context_extraction_error_.inc();\n    }\n  } else if (propagation_mode == PropagationMode::TracerNative) {\n    const OpenTracingHTTPHeadersReader reader{request_headers};\n    opentracing::expected<std::unique_ptr<opentracing::SpanContext>> parent_span_ctx_maybe =\n        tracer.Extract(reader);\n    if (parent_span_ctx_maybe) {\n      parent_span_ctx = std::move(*parent_span_ctx_maybe);\n    } else {\n      ENVOY_LOG(debug, \"Failed to extract span context: {}\",\n                parent_span_ctx_maybe.error().message());\n      tracerStats().span_context_extraction_error_.inc();\n    }\n  }\n  opentracing::StartSpanOptions options;\n  options.references.emplace_back(opentracing::SpanReferenceType::ChildOfRef,\n                                  parent_span_ctx.get());\n  options.start_system_timestamp = start_time;\n  if (!tracing_decision.traced) {\n    options.tags.emplace_back(opentracing::ext::sampling_priority, 0);\n  }\n  active_span = tracer.StartSpanWithOptions(operation_name, options);\n  RELEASE_ASSERT(active_span != nullptr, \"\");\n  active_span->SetTag(opentracing::ext::span_kind,\n                      config.operationName() == Tracing::OperationName::Egress\n                          ? opentracing::ext::span_kind_rpc_client\n                          : opentracing::ext::span_kind_rpc_server);\n  return Tracing::SpanPtr{new OpenTracingSpan{*this, std::move(active_span)}};\n}\n\n} // namespace Ot\n} // namespace Common\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/common/ot/opentracing_driver_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/stats/scope.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/singleton/const_singleton.h\"\n\n#include \"opentracing/ext/tags.h\"\n#include \"opentracing/tracer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Common {\nnamespace Ot {\n\n#define OPENTRACING_TRACER_STATS(COUNTER)                                                          \\\n  COUNTER(span_context_extraction_error)                                                           \\\n  COUNTER(span_context_injection_error)\n\nstruct OpenTracingTracerStats {\n  OPENTRACING_TRACER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass OpenTracingDriver;\n\nclass OpenTracingSpan : public Tracing::Span, Logger::Loggable<Logger::Id::tracing> {\npublic:\n  OpenTracingSpan(OpenTracingDriver& driver, std::unique_ptr<opentracing::Span>&& span);\n\n  // Tracing::Span\n  void finishSpan() override;\n  void setOperation(absl::string_view operation) override;\n  void setTag(absl::string_view name, const absl::string_view) override;\n  void log(SystemTime timestamp, const std::string& event) override;\n  void injectContext(Http::RequestHeaderMap& request_headers) override;\n  Tracing::SpanPtr spawnChild(const Tracing::Config& config, const std::string& name,\n                              SystemTime start_time) override;\n  void setSampled(bool) override;\n  std::string getBaggage(absl::string_view key) override;\n  void setBaggage(absl::string_view key, absl::string_view value) override;\n\nprivate:\n  OpenTracingDriver& driver_;\n  opentracing::FinishSpanOptions finish_options_;\n  std::unique_ptr<opentracing::Span> span_;\n};\n\n/**\n * This driver can be used by tracing libraries implementing the OpenTracing API (see\n * https://github.com/opentracing/opentracing-cpp) to hook into Envoy's tracing functionality with a\n * minimal amount of effort. Libraries need only provide an opentracing::Tracer implementation; the\n * rest of span creation is taken care of by this class.\n */\nclass OpenTracingDriver : public Tracing::Driver, protected Logger::Loggable<Logger::Id::tracing> {\npublic:\n  explicit OpenTracingDriver(Stats::Scope& scope);\n\n  // Tracer::TracingDriver\n  Tracing::SpanPtr startSpan(const Tracing::Config& config, Http::RequestHeaderMap& request_headers,\n                             const std::string& operation_name, SystemTime start_time,\n                             const Tracing::Decision tracing_decision) override;\n\n  virtual opentracing::Tracer& tracer() PURE;\n\n  enum class PropagationMode { SingleHeader, TracerNative };\n\n  /**\n   * Controls how span context is propagated in HTTP headers. PropagationMode::SingleHeader will\n   * propagate span context as a single header within the inline header HeaderMap::OtSpanContext;\n   * otherwise, span context will be propagated using the native format of the tracing library.\n   */\n  virtual PropagationMode propagationMode() const { return PropagationMode::SingleHeader; }\n\n  OpenTracingTracerStats& tracerStats() { return tracer_stats_; }\n\nprivate:\n  OpenTracingTracerStats tracer_stats_;\n};\n} // namespace Ot\n} // namespace Common\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/datadog/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Trace driver for Datadog (https://datadoghq.com/)\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"datadog_tracer_lib\",\n    srcs = [\n        \"datadog_tracer_impl.cc\",\n    ],\n    hdrs = [\n        \"datadog_tracer_impl.h\",\n    ],\n    external_deps = [\"dd_opentracing_cpp\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http:async_client_utility_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/common/upstream:cluster_update_tracker_lib\",\n        \"//source/common/version:version_lib\",\n        \"//source/extensions/tracers/common/ot:opentracing_driver_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":datadog_tracer_lib\",\n        \"//source/extensions/tracers/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/tracers/datadog/config.cc",
    "content": "#include \"extensions/tracers/datadog/config.h\"\n\n#include \"envoy/config/trace/v3/datadog.pb.h\"\n#include \"envoy/config/trace/v3/datadog.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/datadog/datadog_tracer_impl.h\"\n\n#include \"datadog/opentracing.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Datadog {\n\nDatadogTracerFactory::DatadogTracerFactory() : FactoryBase(\"envoy.tracers.datadog\") {}\n\nTracing::HttpTracerSharedPtr DatadogTracerFactory::createHttpTracerTyped(\n    const envoy::config::trace::v3::DatadogConfig& proto_config,\n    Server::Configuration::TracerFactoryContext& context) {\n  Tracing::DriverPtr datadog_driver = std::make_unique<Driver>(\n      proto_config, context.serverFactoryContext().clusterManager(),\n      context.serverFactoryContext().scope(), context.serverFactoryContext().threadLocal(),\n      context.serverFactoryContext().runtime());\n  return std::make_shared<Tracing::HttpTracerImpl>(std::move(datadog_driver),\n                                                   context.serverFactoryContext().localInfo());\n}\n\n/**\n * Static registration for the Datadog tracer. @see RegisterFactory.\n */\nREGISTER_FACTORY(DatadogTracerFactory, Server::Configuration::TracerFactory);\n\n} // namespace Datadog\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/datadog/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/trace/v3/datadog.pb.h\"\n#include \"envoy/config/trace/v3/datadog.pb.validate.h\"\n\n#include \"extensions/tracers/common/factory_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Datadog {\n\n/**\n * Config registration for the Datadog tracer. @see TracerFactory.\n */\nclass DatadogTracerFactory : public Common::FactoryBase<envoy::config::trace::v3::DatadogConfig> {\npublic:\n  DatadogTracerFactory();\n\nprivate:\n  // FactoryBase\n  Tracing::HttpTracerSharedPtr\n  createHttpTracerTyped(const envoy::config::trace::v3::DatadogConfig& proto_config,\n                        Server::Configuration::TracerFactoryContext& context) override;\n};\n\n} // namespace Datadog\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/datadog/datadog_tracer_impl.cc",
    "content": "#include \"extensions/tracers/datadog/datadog_tracer_impl.h\"\n\n#include \"envoy/config/trace/v3/datadog.pb.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n#include \"common/version/version.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Datadog {\n\nDriver::TlsTracer::TlsTracer(const std::shared_ptr<opentracing::Tracer>& tracer,\n                             TraceReporterPtr&& reporter, Driver& driver)\n    : tracer_(tracer), reporter_(std::move(reporter)), driver_(driver) {}\n\nDriver::Driver(const envoy::config::trace::v3::DatadogConfig& datadog_config,\n               Upstream::ClusterManager& cluster_manager, Stats::Scope& scope,\n               ThreadLocal::SlotAllocator& tls, Runtime::Loader&)\n    : OpenTracingDriver{scope},\n      cm_(cluster_manager), tracer_stats_{DATADOG_TRACER_STATS(\n                                POOL_COUNTER_PREFIX(scope, \"tracing.datadog.\"))},\n      tls_(tls.allocateSlot()) {\n\n  Config::Utility::checkCluster(\"envoy.tracers.datadog\", datadog_config.collector_cluster(), cm_,\n                                /* allow_added_via_api */ true);\n  cluster_ = datadog_config.collector_cluster();\n\n  // Default tracer options.\n  tracer_options_.version = absl::StrCat(\"envoy \", Envoy::VersionInfo::version());\n  tracer_options_.operation_name_override = \"envoy.proxy\";\n  tracer_options_.service = \"envoy\";\n  tracer_options_.inject = std::set<datadog::opentracing::PropagationStyle>{\n      datadog::opentracing::PropagationStyle::Datadog};\n  tracer_options_.extract = std::set<datadog::opentracing::PropagationStyle>{\n      datadog::opentracing::PropagationStyle::Datadog};\n\n  // Configuration overrides for tracer options.\n  if (!datadog_config.service_name().empty()) {\n    tracer_options_.service = datadog_config.service_name();\n  }\n\n  tls_->set([this](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    auto tp = datadog::opentracing::makeTracerAndEncoder(tracer_options_);\n    auto tracer = std::get<0>(tp);\n    auto encoder = std::get<1>(tp);\n    TraceReporterPtr reporter(new TraceReporter(encoder, *this, dispatcher));\n    return ThreadLocal::ThreadLocalObjectSharedPtr{\n        new TlsTracer(tracer, std::move(reporter), *this)};\n  });\n}\n\nopentracing::Tracer& Driver::tracer() { return *tls_->getTyped<TlsTracer>().tracer_; }\n\nTraceReporter::TraceReporter(TraceEncoderSharedPtr encoder, Driver& driver,\n                             Event::Dispatcher& dispatcher)\n    : driver_(driver), encoder_(encoder),\n      collector_cluster_(driver_.clusterManager(), driver_.cluster()) {\n  flush_timer_ = dispatcher.createTimer([this]() -> void {\n    for (auto& h : encoder_->headers()) {\n      lower_case_headers_.emplace(h.first, Http::LowerCaseString{h.first});\n    }\n    driver_.tracerStats().timer_flushed_.inc();\n    flushTraces();\n    enableTimer();\n  });\n\n  enableTimer();\n}\n\nvoid TraceReporter::enableTimer() {\n  // The duration for this timer should not be a factor of the\n  // datadog-agent's read timer of 5000ms.\n  // Further details in https://github.com/envoyproxy/envoy/pull/5358\n  flush_timer_->enableTimer(std::chrono::milliseconds(900U));\n}\n\nvoid TraceReporter::flushTraces() {\n  auto pendingTraces = encoder_->pendingTraces();\n  if (pendingTraces) {\n    ENVOY_LOG(debug, \"flushing traces: {} traces\", pendingTraces);\n    driver_.tracerStats().traces_sent_.add(pendingTraces);\n\n    Http::RequestMessagePtr message(new Http::RequestMessageImpl());\n    message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post);\n    message->headers().setReferencePath(encoder_->path());\n    message->headers().setReferenceHost(driver_.cluster());\n    for (auto& h : encoder_->headers()) {\n      message->headers().setReferenceKey(lower_case_headers_.at(h.first), h.second);\n    }\n\n    message->body().add(encoder_->payload());\n    ENVOY_LOG(debug, \"submitting {} trace(s) to {} with payload size {}\", pendingTraces,\n              encoder_->path(), encoder_->payload().size());\n\n    if (collector_cluster_.exists()) {\n      Http::AsyncClient::Request* request =\n          driver_.clusterManager()\n              .httpAsyncClientForCluster(collector_cluster_.info()->name())\n              .send(\n                  std::move(message), *this,\n                  Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(1000U)));\n      if (request) {\n        active_requests_.add(*request);\n      }\n    } else {\n      ENVOY_LOG(debug, \"collector cluster '{}' does not exist\", driver_.cluster());\n      driver_.tracerStats().reports_skipped_no_cluster_.inc();\n    }\n\n    encoder_->clearTraces();\n  }\n}\n\nvoid TraceReporter::onFailure(const Http::AsyncClient::Request& request,\n                              Http::AsyncClient::FailureReason) {\n  active_requests_.remove(request);\n  ENVOY_LOG(debug, \"failure submitting traces to datadog agent\");\n  driver_.tracerStats().reports_failed_.inc();\n}\n\nvoid TraceReporter::onSuccess(const Http::AsyncClient::Request& request,\n                              Http::ResponseMessagePtr&& http_response) {\n  active_requests_.remove(request);\n  uint64_t responseStatus = Http::Utility::getResponseStatus(http_response->headers());\n  if (responseStatus != enumToInt(Http::Code::OK)) {\n    // TODO: Consider adding retries for failed submissions.\n    ENVOY_LOG(debug, \"unexpected HTTP response code from datadog agent: {}\", responseStatus);\n    driver_.tracerStats().reports_dropped_.inc();\n  } else {\n    ENVOY_LOG(debug, \"traces successfully submitted to datadog agent\");\n    driver_.tracerStats().reports_sent_.inc();\n    encoder_->handleResponse(http_response->bodyAsString());\n  }\n}\n\n} // namespace Datadog\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/datadog/datadog_tracer_impl.h",
    "content": "#pragma once\n\n#include <datadog/opentracing.h>\n\n#include \"envoy/config/trace/v3/datadog.pb.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/http/async_client_utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/upstream/cluster_update_tracker.h\"\n\n#include \"extensions/tracers/common/ot/opentracing_driver_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Datadog {\n\n#define DATADOG_TRACER_STATS(COUNTER)                                                              \\\n  COUNTER(traces_sent)                                                                             \\\n  COUNTER(timer_flushed)                                                                           \\\n  COUNTER(reports_skipped_no_cluster)                                                              \\\n  COUNTER(reports_sent)                                                                            \\\n  COUNTER(reports_dropped)                                                                         \\\n  COUNTER(reports_failed)\n\nstruct DatadogTracerStats {\n  DATADOG_TRACER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nclass TraceReporter;\nusing TraceReporterPtr = std::unique_ptr<TraceReporter>;\nusing TraceEncoderSharedPtr = std::shared_ptr<datadog::opentracing::TraceEncoder>;\n\n/**\n * Class for a Datadog-specific Driver.\n */\nclass Driver : public Common::Ot::OpenTracingDriver {\npublic:\n  /**\n   * Constructor. It adds itself and a newly-created Datadog::Tracer object to a thread-local store.\n   */\n  Driver(const envoy::config::trace::v3::DatadogConfig& datadog_config,\n         Upstream::ClusterManager& cluster_manager, Stats::Scope& scope,\n         ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime);\n\n  // Getters to return the DatadogDriver's key members.\n  Upstream::ClusterManager& clusterManager() { return cm_; }\n  const std::string& cluster() { return cluster_; }\n  DatadogTracerStats& tracerStats() { return tracer_stats_; }\n\n  // Tracer::OpenTracingDriver\n  opentracing::Tracer& tracer() override;\n  PropagationMode propagationMode() const override {\n    return Common::Ot::OpenTracingDriver::PropagationMode::TracerNative;\n  }\n\nprivate:\n  /**\n   * Thread-local store containing DatadogDriver and Datadog::Tracer objects.\n   */\n  struct TlsTracer : ThreadLocal::ThreadLocalObject {\n    TlsTracer(const std::shared_ptr<opentracing::Tracer>& tracer, TraceReporterPtr&& reporter,\n              Driver& driver);\n\n    std::shared_ptr<opentracing::Tracer> tracer_;\n    TraceReporterPtr reporter_;\n    Driver& driver_;\n  };\n\n  Upstream::ClusterManager& cm_;\n  std::string cluster_;\n  DatadogTracerStats tracer_stats_;\n  datadog::opentracing::TracerOptions tracer_options_;\n  ThreadLocal::SlotPtr tls_;\n};\n\n/**\n * This class wraps the encoder provided with the tracer at initialization\n * and uses Http::AsyncClient to send completed traces to the Datadog Agent.\n *\n * The cluster to use for submitting traces to the agent is controlled with\n * the setting tracing.datadog.collector_cluster, which is mandatory and must\n * refer to a cluster in the active configuration.\n *\n * An internal timer is used to control how often traces are submitted.\n * If zero traces have completed in the interval between timer events,\n * no action is taken.\n * The timer interval can be controlled with the setting\n * tracing.datadog.flush_interval_ms, and defaults to 2000ms.\n */\nclass TraceReporter : public Http::AsyncClient::Callbacks,\n                      protected Logger::Loggable<Logger::Id::tracing> {\npublic:\n  /**\n   * Constructor.\n   *\n   * @param encoder Provides methods to retrieve data for publishing traces.\n   * @param driver The driver to be associated with the reporter.\n   * @param dispatcher Controls the timer used to flush buffered traces.\n   */\n  TraceReporter(TraceEncoderSharedPtr encoder, Driver& driver, Event::Dispatcher& dispatcher);\n\n  // Http::AsyncClient::Callbacks.\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override;\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override;\n  void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {}\n\nprivate:\n  /**\n   * Enables the trace-flushing timer.\n   */\n  void enableTimer();\n\n  /**\n   * Removes all traces from the trace buffer and sends them to a Datadog Agent using\n   * Http::AsyncClient.\n   */\n  void flushTraces();\n\n  Driver& driver_;\n  Event::TimerPtr flush_timer_;\n  TraceEncoderSharedPtr encoder_;\n\n  std::map<std::string, Http::LowerCaseString> lower_case_headers_;\n\n  Upstream::ClusterUpdateTracker collector_cluster_;\n  // Track active HTTP requests to be able to cancel them on destruction.\n  Http::AsyncClientRequestTracker active_requests_;\n};\n} // namespace Datadog\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/dynamic_ot/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Trace driver for dynamically loadable C++ OpenTracing drivers (http://opentracing.io/).\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"dynamic_opentracing_driver_lib\",\n    srcs = [\n        \"dynamic_opentracing_driver_impl.cc\",\n    ],\n    hdrs = [\n        \"dynamic_opentracing_driver_impl.h\",\n    ],\n    deps = [\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/tracers/common/ot:opentracing_driver_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":dynamic_opentracing_driver_lib\",\n        \"//source/extensions/tracers/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/tracers/dynamic_ot/config.cc",
    "content": "#include \"extensions/tracers/dynamic_ot/config.h\"\n\n#include \"envoy/config/trace/v3/dynamic_ot.pb.h\"\n#include \"envoy/config/trace/v3/dynamic_ot.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace DynamicOt {\n\nDynamicOpenTracingTracerFactory::DynamicOpenTracingTracerFactory()\n    : FactoryBase(\"envoy.tracers.dynamic_ot\") {}\n\nTracing::HttpTracerSharedPtr DynamicOpenTracingTracerFactory::createHttpTracerTyped(\n    const envoy::config::trace::v3::DynamicOtConfig& proto_config,\n    Server::Configuration::TracerFactoryContext& context) {\n  const std::string& library = proto_config.library();\n  const std::string config = MessageUtil::getJsonStringFromMessage(proto_config.config());\n  Tracing::DriverPtr dynamic_driver = std::make_unique<DynamicOpenTracingDriver>(\n      context.serverFactoryContext().scope(), library, config);\n  return std::make_shared<Tracing::HttpTracerImpl>(std::move(dynamic_driver),\n                                                   context.serverFactoryContext().localInfo());\n}\n\n/**\n * Static registration for the dynamic opentracing tracer. @see RegisterFactory.\n */\nREGISTER_FACTORY(DynamicOpenTracingTracerFactory,\n                 Server::Configuration::TracerFactory){\"envoy.dynamic.ot\"};\n\n} // namespace DynamicOt\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/dynamic_ot/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/trace/v3/dynamic_ot.pb.h\"\n#include \"envoy/config/trace/v3/dynamic_ot.pb.validate.h\"\n\n#include \"extensions/tracers/common/factory_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace DynamicOt {\n\n/**\n * Config registration for the dynamic opentracing tracer. @see TracerFactory.\n */\nclass DynamicOpenTracingTracerFactory\n    : public Common::FactoryBase<envoy::config::trace::v3::DynamicOtConfig> {\npublic:\n  DynamicOpenTracingTracerFactory();\n\nprivate:\n  // FactoryBase\n  Tracing::HttpTracerSharedPtr\n  createHttpTracerTyped(const envoy::config::trace::v3::DynamicOtConfig& configuration,\n                        Server::Configuration::TracerFactoryContext& context) override;\n};\n\n} // namespace DynamicOt\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl.cc",
    "content": "#include \"extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace DynamicOt {\n\nDynamicOpenTracingDriver::DynamicOpenTracingDriver(Stats::Scope& scope, const std::string& library,\n                                                   const std::string& tracer_config)\n    : OpenTracingDriver{scope} {\n  std::string error_message;\n  opentracing::expected<opentracing::DynamicTracingLibraryHandle> library_handle_maybe =\n      opentracing::DynamicallyLoadTracingLibrary(library.c_str(), error_message);\n  if (!library_handle_maybe) {\n    throw EnvoyException{formatErrorMessage(library_handle_maybe.error(), error_message)};\n  }\n  library_handle_ = std::move(*library_handle_maybe);\n\n  opentracing::expected<std::shared_ptr<opentracing::Tracer>> tracer_maybe =\n      library_handle_.tracer_factory().MakeTracer(tracer_config.c_str(), error_message);\n  if (!tracer_maybe) {\n    throw EnvoyException{formatErrorMessage(tracer_maybe.error(), error_message)};\n  }\n  tracer_ = std::move(*tracer_maybe);\n  RELEASE_ASSERT(tracer_ != nullptr, \"\");\n}\n\nstd::string DynamicOpenTracingDriver::formatErrorMessage(std::error_code error_code,\n                                                         const std::string& error_message) {\n  if (error_message.empty()) {\n    return absl::StrCat(\"\", error_code.message());\n  } else {\n    return fmt::format(\"{}: {}\", error_code.message(), error_message);\n  }\n}\n\n} // namespace DynamicOt\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl.h",
    "content": "#pragma once\n\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"extensions/tracers/common/ot/opentracing_driver_impl.h\"\n\n#include \"opentracing/dynamic_load.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace DynamicOt {\n\n/**\n * This driver provides support for dynamically loading tracing libraries into Envoy that provide an\n * implementation of the OpenTracing API (see https://github.com/opentracing/opentracing-cpp).\n * TODO(rnburn): Add an example showing how to use a tracer library with this driver.\n */\nclass DynamicOpenTracingDriver : public Common::Ot::OpenTracingDriver {\npublic:\n  DynamicOpenTracingDriver(Stats::Scope& scope, const std::string& library,\n                           const std::string& tracer_config);\n\n  static std::string formatErrorMessage(std::error_code error_code,\n                                        const std::string& error_message);\n\n  // Tracer::OpenTracingDriver\n  opentracing::Tracer& tracer() override { return *tracer_; }\n\n  PropagationMode propagationMode() const override {\n    return OpenTracingDriver::PropagationMode::TracerNative;\n  }\n\nprivate:\n  opentracing::DynamicTracingLibraryHandle library_handle_;\n  std::shared_ptr<opentracing::Tracer> tracer_;\n};\n\n} // namespace DynamicOt\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/lightstep/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Trace driver for LightStep (https://lightstep.com/)\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"lightstep_tracer_lib\",\n    srcs = [\n        \"lightstep_tracer_impl.cc\",\n    ],\n    hdrs = [\n        \"lightstep_tracer_impl.h\",\n    ],\n    external_deps = [\"lightstep\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/common/upstream:cluster_update_tracker_lib\",\n        \"//source/extensions/tracers/common/ot:opentracing_driver_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":lightstep_tracer_lib\",\n        \"//source/extensions/tracers/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/tracers/lightstep/config.cc",
    "content": "#include \"extensions/tracers/lightstep/config.h\"\n\n#include \"envoy/config/trace/v3/lightstep.pb.h\"\n#include \"envoy/config/trace/v3/lightstep.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/lightstep/lightstep_tracer_impl.h\"\n\n#include \"lightstep/tracer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Lightstep {\n\nLightstepTracerFactory::LightstepTracerFactory() : FactoryBase(\"envoy.tracers.lightstep\") {}\n\nTracing::HttpTracerSharedPtr LightstepTracerFactory::createHttpTracerTyped(\n    const envoy::config::trace::v3::LightstepConfig& proto_config,\n    Server::Configuration::TracerFactoryContext& context) {\n  auto opts = std::make_unique<lightstep::LightStepTracerOptions>();\n  const auto access_token_file = context.serverFactoryContext().api().fileSystem().fileReadToEnd(\n      proto_config.access_token_file());\n  const auto access_token_sv = StringUtil::rtrim(access_token_file);\n  opts->access_token.assign(access_token_sv.data(), access_token_sv.size());\n  opts->component_name = context.serverFactoryContext().localInfo().clusterName();\n\n  Tracing::DriverPtr lightstep_driver = std::make_unique<LightStepDriver>(\n      proto_config, context.serverFactoryContext().clusterManager(),\n      context.serverFactoryContext().scope(), context.serverFactoryContext().threadLocal(),\n      context.serverFactoryContext().runtime(), std::move(opts),\n      Common::Ot::OpenTracingDriver::PropagationMode::TracerNative,\n      context.serverFactoryContext().grpcContext());\n  return std::make_shared<Tracing::HttpTracerImpl>(std::move(lightstep_driver),\n                                                   context.serverFactoryContext().localInfo());\n}\n\n/**\n * Static registration for the lightstep tracer. @see RegisterFactory.\n */\nREGISTER_FACTORY(LightstepTracerFactory, Server::Configuration::TracerFactory){\"envoy.lightstep\"};\n\n} // namespace Lightstep\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/lightstep/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/trace/v3/lightstep.pb.h\"\n#include \"envoy/config/trace/v3/lightstep.pb.validate.h\"\n\n#include \"extensions/tracers/common/factory_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Lightstep {\n\n/**\n * Config registration for the lightstep tracer. @see TracerFactory.\n */\nclass LightstepTracerFactory\n    : public Common::FactoryBase<envoy::config::trace::v3::LightstepConfig> {\npublic:\n  LightstepTracerFactory();\n\nprivate:\n  // FactoryBase\n  Tracing::HttpTracerSharedPtr\n  createHttpTracerTyped(const envoy::config::trace::v3::LightstepConfig& proto_config,\n                        Server::Configuration::TracerFactoryContext& context) override;\n};\n\n} // namespace Lightstep\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/lightstep/lightstep_tracer_impl.cc",
    "content": "#include \"extensions/tracers/lightstep/lightstep_tracer_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/trace/v3/lightstep.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Lightstep {\n\nstatic void serializeGrpcMessage(const lightstep::BufferChain& buffer_chain,\n                                 Buffer::Instance& body) {\n  auto size = buffer_chain.num_bytes();\n  Buffer::RawSlice iovec;\n  body.reserve(size, &iovec, 1);\n  ASSERT(iovec.len_ >= size);\n  iovec.len_ = size;\n  buffer_chain.CopyOut(static_cast<char*>(iovec.mem_), size);\n  body.commit(&iovec, 1);\n  Grpc::Common::prependGrpcFrameHeader(body);\n}\n\nstatic std::vector<lightstep::PropagationMode>\nMakePropagationModes(const envoy::config::trace::v3::LightstepConfig& lightstep_config) {\n  if (lightstep_config.propagation_modes().empty()) {\n    return {lightstep::PropagationMode::envoy};\n  }\n  std::vector<lightstep::PropagationMode> result;\n  result.reserve(lightstep_config.propagation_modes().size());\n  for (auto propagation_mode : lightstep_config.propagation_modes()) {\n    switch (propagation_mode) {\n    case envoy::config::trace::v3::LightstepConfig::ENVOY:\n      result.push_back(lightstep::PropagationMode::envoy);\n      break;\n    case envoy::config::trace::v3::LightstepConfig::LIGHTSTEP:\n      result.push_back(lightstep::PropagationMode::lightstep);\n      break;\n    case envoy::config::trace::v3::LightstepConfig::B3:\n      result.push_back(lightstep::PropagationMode::b3);\n      break;\n    case envoy::config::trace::v3::LightstepConfig::TRACE_CONTEXT:\n      result.push_back(lightstep::PropagationMode::trace_context);\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n  return result;\n}\n\nvoid LightStepLogger::operator()(lightstep::LogLevel level,\n                                 opentracing::string_view message) const {\n  const fmt::string_view fmt_message{message.data(), message.size()};\n  switch (level) {\n  case lightstep::LogLevel::debug:\n    ENVOY_LOG(debug, \"{}\", fmt_message);\n    break;\n  case lightstep::LogLevel::info:\n    ENVOY_LOG(info, \"{}\", fmt_message);\n    break;\n  default:\n    ENVOY_LOG(warn, \"{}\", fmt_message);\n    break;\n  }\n}\n\n// If the default min_flush_spans value is too small, the larger number of reports can overwhelm\n// LightStep's satellites. Hence, we need to choose a number that's large enough; though, it's\n// somewhat arbitrary.\n//\n// See https://github.com/lightstep/lightstep-tracer-cpp/issues/106\nconst size_t LightStepDriver::DefaultMinFlushSpans = 200U;\n\nLightStepDriver::LightStepTransporter::LightStepTransporter(LightStepDriver& driver)\n    : driver_(driver), collector_cluster_(driver_.clusterManager(), driver_.cluster()) {}\n\nLightStepDriver::LightStepTransporter::~LightStepTransporter() {\n  if (active_request_ != nullptr) {\n    active_request_->cancel();\n  }\n}\n\nvoid LightStepDriver::LightStepTransporter::onSuccess(const Http::AsyncClient::Request&,\n                                                      Http::ResponseMessagePtr&& /*response*/) {\n  driver_.grpc_context_.chargeStat(*active_cluster_, driver_.request_stat_names_, true);\n  active_callback_->OnSuccess(*active_report_);\n  reset();\n}\n\nvoid LightStepDriver::LightStepTransporter::onFailure(\n    const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason /*failure_reason*/) {\n  driver_.grpc_context_.chargeStat(*active_cluster_, driver_.request_stat_names_, false);\n  active_callback_->OnFailure(*active_report_);\n  reset();\n}\n\nvoid LightStepDriver::LightStepTransporter::OnSpanBufferFull() noexcept {\n  if (active_report_ != nullptr) {\n    return;\n  }\n  driver_.flush();\n}\n\nvoid LightStepDriver::LightStepTransporter::Send(std::unique_ptr<lightstep::BufferChain>&& report,\n                                                 Callback& callback) noexcept {\n  if (active_report_ != nullptr) {\n    callback.OnFailure(*report);\n    return;\n  }\n\n  const uint64_t timeout =\n      driver_.runtime().snapshot().getInteger(\"tracing.lightstep.request_timeout\", 5000U);\n  Http::RequestMessagePtr message = Grpc::Common::prepareHeaders(\n      driver_.cluster(), lightstep::CollectorServiceFullName(), lightstep::CollectorMethodName(),\n      absl::optional<std::chrono::milliseconds>(timeout));\n  serializeGrpcMessage(*report, message->body());\n\n  if (collector_cluster_.exists()) {\n    active_report_ = std::move(report);\n    active_callback_ = &callback;\n    active_cluster_ = collector_cluster_.info();\n    active_request_ = driver_.clusterManager()\n                          .httpAsyncClientForCluster(collector_cluster_.info()->name())\n                          .send(std::move(message), *this,\n                                Http::AsyncClient::RequestOptions().setTimeout(\n                                    std::chrono::milliseconds(timeout)));\n  } else {\n    ENVOY_LOG(debug, \"collector cluster '{}' does not exist\", driver_.cluster());\n    driver_.tracerStats().reports_skipped_no_cluster_.inc();\n  }\n}\n\nvoid LightStepDriver::LightStepTransporter::reset() {\n  active_cluster_ = nullptr;\n  active_request_ = nullptr;\n  active_callback_ = nullptr;\n  active_report_ = nullptr;\n}\n\nLightStepDriver::LightStepMetricsObserver::LightStepMetricsObserver(LightStepDriver& driver)\n    : driver_(driver) {}\n\nvoid LightStepDriver::LightStepMetricsObserver::OnSpansSent(int num_spans) noexcept {\n  driver_.tracerStats().spans_sent_.add(num_spans);\n}\n\nvoid LightStepDriver::LightStepMetricsObserver::OnSpansDropped(int num_spans) noexcept {\n  driver_.tracerStats().spans_dropped_.add(num_spans);\n}\n\nLightStepDriver::TlsLightStepTracer::TlsLightStepTracer(\n    const std::shared_ptr<lightstep::LightStepTracer>& tracer, LightStepDriver& driver,\n    Event::Dispatcher& dispatcher)\n    : tracer_{tracer}, driver_{driver} {\n  flush_timer_ = dispatcher.createTimer([this]() -> void {\n    driver_.tracerStats().timer_flushed_.inc();\n    tracer_->Flush();\n    enableTimer();\n  });\n\n  enableTimer();\n}\n\nlightstep::LightStepTracer& LightStepDriver::TlsLightStepTracer::tracer() { return *tracer_; }\n\nvoid LightStepDriver::TlsLightStepTracer::enableTimer() {\n  const uint64_t flush_interval =\n      driver_.runtime().snapshot().getInteger(\"tracing.lightstep.flush_interval_ms\", 1000U);\n  flush_timer_->enableTimer(std::chrono::milliseconds(flush_interval));\n}\n\nLightStepDriver::LightStepDriver(const envoy::config::trace::v3::LightstepConfig& lightstep_config,\n                                 Upstream::ClusterManager& cluster_manager, Stats::Scope& scope,\n                                 ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime,\n                                 std::unique_ptr<lightstep::LightStepTracerOptions>&& options,\n                                 PropagationMode propagation_mode, Grpc::Context& grpc_context)\n    : OpenTracingDriver{scope}, cm_{cluster_manager},\n      tracer_stats_{LIGHTSTEP_TRACER_STATS(POOL_COUNTER_PREFIX(scope, \"tracing.lightstep.\"))},\n      tls_{tls.allocateSlot()}, runtime_{runtime}, options_{std::move(options)},\n      propagation_mode_{propagation_mode}, grpc_context_(grpc_context),\n      pool_(scope.symbolTable()), request_stat_names_{\n                                      pool_.add(lightstep::CollectorServiceFullName()),\n                                      pool_.add(lightstep::CollectorMethodName())} {\n\n  Config::Utility::checkCluster(\"envoy.tracers.lightstep\", lightstep_config.collector_cluster(),\n                                cm_, /* allow_added_via_api */ true);\n  cluster_ = lightstep_config.collector_cluster();\n\n  if (!(cm_.get(cluster_)->info()->features() & Upstream::ClusterInfo::Features::HTTP2)) {\n    throw EnvoyException(\n        fmt::format(\"{} collector cluster must support http2 for gRPC calls\", cluster_));\n  }\n\n  auto propagation_modes = MakePropagationModes(lightstep_config);\n\n  tls_->set([this, propagation_modes = std::move(propagation_modes)](\n                Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    lightstep::LightStepTracerOptions tls_options;\n    tls_options.access_token = options_->access_token;\n    tls_options.component_name = options_->component_name;\n    tls_options.use_thread = false;\n    tls_options.propagation_modes = propagation_modes;\n    tls_options.logger_sink = LightStepLogger{};\n\n    tls_options.max_buffered_spans = std::function<size_t()>{[this] {\n      return runtime_.snapshot().getInteger(\"tracing.lightstep.min_flush_spans\",\n                                            DefaultMinFlushSpans);\n    }};\n    tls_options.metrics_observer = std::make_unique<LightStepMetricsObserver>(*this);\n    tls_options.transporter = std::make_unique<LightStepTransporter>(*this);\n    std::shared_ptr<lightstep::LightStepTracer> tracer =\n        lightstep::MakeLightStepTracer(std::move(tls_options));\n\n    return ThreadLocal::ThreadLocalObjectSharedPtr{\n        new TlsLightStepTracer{tracer, *this, dispatcher}};\n  });\n}\n\nvoid LightStepDriver::flush() {\n  auto& tls_tracer = tls_->getTyped<TlsLightStepTracer>();\n  tls_tracer.tracer().Flush();\n  tls_tracer.enableTimer();\n}\n\nopentracing::Tracer& LightStepDriver::tracer() {\n  return tls_->getTyped<TlsLightStepTracer>().tracer();\n}\n\n} // namespace Lightstep\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/lightstep/lightstep_tracer_impl.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/trace/v3/lightstep.pb.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/upstream/cluster_update_tracker.h\"\n\n#include \"extensions/tracers/common/ot/opentracing_driver_impl.h\"\n\n#include \"lightstep/tracer.h\"\n#include \"lightstep/transporter.h\"\n#include \"opentracing/noop.h\"\n#include \"opentracing/tracer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Lightstep {\n\n#define LIGHTSTEP_TRACER_STATS(COUNTER)                                                            \\\n  COUNTER(spans_sent)                                                                              \\\n  COUNTER(spans_dropped)                                                                           \\\n  COUNTER(timer_flushed)                                                                           \\\n  COUNTER(reports_skipped_no_cluster)\n\nstruct LightstepTracerStats {\n  LIGHTSTEP_TRACER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * LightStepLogger is used to translate logs generated from LightStep's tracer to Envoy logs.\n */\nclass LightStepLogger : Logger::Loggable<Logger::Id::tracing> {\npublic:\n  void operator()(lightstep::LogLevel level, opentracing::string_view message) const;\n};\n\n/**\n * LightStep (http://lightstep.com/) provides tracing capabilities, aggregation, visualization of\n * application trace data.\n *\n * LightStepSink is for flushing data to LightStep collectors.\n */\nclass LightStepDriver : public Common::Ot::OpenTracingDriver {\npublic:\n  LightStepDriver(const envoy::config::trace::v3::LightstepConfig& lightstep_config,\n                  Upstream::ClusterManager& cluster_manager, Stats::Scope& scope,\n                  ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime,\n                  std::unique_ptr<lightstep::LightStepTracerOptions>&& options,\n                  PropagationMode propagation_mode, Grpc::Context& grpc_context);\n\n  Upstream::ClusterManager& clusterManager() { return cm_; }\n  const std::string& cluster() { return cluster_; }\n  Runtime::Loader& runtime() { return runtime_; }\n  LightstepTracerStats& tracerStats() { return tracer_stats_; }\n\n  static const size_t DefaultMinFlushSpans;\n\n  void flush();\n\n  // Tracer::OpenTracingDriver\n  opentracing::Tracer& tracer() override;\n  PropagationMode propagationMode() const override { return propagation_mode_; }\n\nprivate:\n  class LightStepTransporter : Logger::Loggable<Logger::Id::tracing>,\n                               public lightstep::AsyncTransporter,\n                               public Http::AsyncClient::Callbacks {\n  public:\n    explicit LightStepTransporter(LightStepDriver& driver);\n\n    ~LightStepTransporter() override;\n\n    // lightstep::AsyncTransporter\n    void OnSpanBufferFull() noexcept override;\n\n    void Send(std::unique_ptr<lightstep::BufferChain>&& message,\n              Callback& callback) noexcept override;\n\n    // Http::AsyncClient::Callbacks\n    void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override;\n    void onFailure(const Http::AsyncClient::Request&,\n                   Http::AsyncClient::FailureReason failure_reason) override;\n    void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {}\n\n  private:\n    std::unique_ptr<lightstep::BufferChain> active_report_;\n    Callback* active_callback_ = nullptr;\n    Upstream::ClusterInfoConstSharedPtr active_cluster_;\n    Http::AsyncClient::Request* active_request_ = nullptr;\n    LightStepDriver& driver_;\n    Upstream::ClusterUpdateTracker collector_cluster_;\n\n    void reset();\n  };\n\n  class LightStepMetricsObserver : public ::lightstep::MetricsObserver {\n  public:\n    explicit LightStepMetricsObserver(LightStepDriver& driver);\n\n    void OnSpansSent(int num_spans) noexcept override;\n\n    void OnSpansDropped(int num_spans) noexcept override;\n\n  private:\n    LightStepDriver& driver_;\n  };\n\n  class TlsLightStepTracer : public ThreadLocal::ThreadLocalObject {\n  public:\n    TlsLightStepTracer(const std::shared_ptr<lightstep::LightStepTracer>& tracer,\n                       LightStepDriver& driver, Event::Dispatcher& dispatcher);\n\n    lightstep::LightStepTracer& tracer();\n\n    void enableTimer();\n\n  private:\n    std::shared_ptr<lightstep::LightStepTracer> tracer_;\n    LightStepDriver& driver_;\n    Event::TimerPtr flush_timer_;\n  };\n\n  Upstream::ClusterManager& cm_;\n  std::string cluster_;\n  LightstepTracerStats tracer_stats_;\n  ThreadLocal::SlotPtr tls_;\n  Runtime::Loader& runtime_;\n  std::unique_ptr<lightstep::LightStepTracerOptions> options_;\n  const PropagationMode propagation_mode_;\n  Grpc::Context& grpc_context_;\n  Stats::StatNamePool pool_;\n  const Grpc::Context::RequestStatNames request_stat_names_;\n};\n} // namespace Lightstep\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/opencensus/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n    \"envoy_select_google_grpc\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Trace driver for OpenCensus: https://opencensus.io/\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":opencensus_tracer_impl\",\n        \"//source/extensions/tracers/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"opencensus_tracer_impl\",\n    srcs = [\"opencensus_tracer_impl.cc\"],\n    hdrs = [\"opencensus_tracer_impl.h\"],\n    copts = [\"-Wno-unused-parameter\"],\n    external_deps = [\n        \"opencensus_trace\",\n        \"opencensus_trace_b3\",\n        \"opencensus_trace_cloud_trace_context\",\n        \"opencensus_trace_grpc_trace_bin\",\n        \"opencensus_trace_trace_context\",\n        \"opencensus_exporter_ocagent\",\n        \"opencensus_exporter_stdout\",\n        \"opencensus_exporter_stackdriver\",\n        \"opencensus_exporter_zipkin\",\n    ],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ] + envoy_select_google_grpc([\"//source/common/grpc:google_async_client_lib\"]),\n)\n"
  },
  {
    "path": "source/extensions/tracers/opencensus/config.cc",
    "content": "#include \"extensions/tracers/opencensus/config.h\"\n\n#include \"envoy/config/trace/v3/opencensus.pb.h\"\n#include \"envoy/config/trace/v3/opencensus.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/opencensus/opencensus_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace OpenCensus {\n\nOpenCensusTracerFactory::OpenCensusTracerFactory() : FactoryBase(\"envoy.tracers.opencensus\") {}\n\nTracing::HttpTracerSharedPtr OpenCensusTracerFactory::createHttpTracerTyped(\n    const envoy::config::trace::v3::OpenCensusConfig& proto_config,\n    Server::Configuration::TracerFactoryContext& context) {\n  // Since OpenCensus can only support a single tracing configuration per entire process,\n  // we need to make sure that it is configured at most once.\n  if (tracer_) {\n    if (Envoy::Protobuf::util::MessageDifferencer::Equals(config_, proto_config)) {\n      return tracer_;\n    } else {\n      throw EnvoyException(\"Opencensus has already been configured with a different config.\");\n    }\n  }\n  Tracing::DriverPtr driver =\n      std::make_unique<Driver>(proto_config, context.serverFactoryContext().localInfo(),\n                               context.serverFactoryContext().api());\n  tracer_ = std::make_shared<Tracing::HttpTracerImpl>(std::move(driver),\n                                                      context.serverFactoryContext().localInfo());\n  config_ = proto_config;\n  return tracer_;\n}\n\n/**\n * Static registration for the OpenCensus tracer. @see RegisterFactory.\n */\nREGISTER_FACTORY(OpenCensusTracerFactory, Server::Configuration::TracerFactory);\n\n} // namespace OpenCensus\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/opencensus/config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/trace/v3/opencensus.pb.h\"\n#include \"envoy/config/trace/v3/opencensus.pb.validate.h\"\n\n#include \"extensions/tracers/common/factory_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace OpenCensus {\n\n/**\n * Config registration for the OpenCensus tracer. @see TracerFactory.\n */\nclass OpenCensusTracerFactory\n    : public Common::FactoryBase<envoy::config::trace::v3::OpenCensusConfig> {\npublic:\n  OpenCensusTracerFactory();\n\nprivate:\n  // FactoryBase\n  Tracing::HttpTracerSharedPtr\n  createHttpTracerTyped(const envoy::config::trace::v3::OpenCensusConfig& proto_config,\n                        Server::Configuration::TracerFactoryContext& context) override;\n\n  // Since OpenCensus can only support a single tracing configuration per entire process,\n  // we need to make sure that it is configured at most once.\n  Tracing::HttpTracerSharedPtr tracer_;\n  envoy::config::trace::v3::OpenCensusConfig config_;\n};\n\n} // namespace OpenCensus\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/opencensus/opencensus_tracer_impl.cc",
    "content": "#include \"extensions/tracers/opencensus/opencensus_tracer_impl.h\"\n\n#include <grpcpp/grpcpp.h>\n\n#include \"envoy/config/trace/v3/opencensus.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/base64.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"google/devtools/cloudtrace/v2/tracing.grpc.pb.h\"\n#include \"opencensus/exporters/trace/ocagent/ocagent_exporter.h\"\n#include \"opencensus/exporters/trace/stackdriver/stackdriver_exporter.h\"\n#include \"opencensus/exporters/trace/stdout/stdout_exporter.h\"\n#include \"opencensus/exporters/trace/zipkin/zipkin_exporter.h\"\n#include \"opencensus/trace/propagation/b3.h\"\n#include \"opencensus/trace/propagation/cloud_trace_context.h\"\n#include \"opencensus/trace/propagation/grpc_trace_bin.h\"\n#include \"opencensus/trace/propagation/trace_context.h\"\n#include \"opencensus/trace/sampler.h\"\n#include \"opencensus/trace/span.h\"\n#include \"opencensus/trace/span_context.h\"\n#include \"opencensus/trace/trace_config.h\"\n#include \"opencensus/trace/trace_params.h\"\n\n#ifdef ENVOY_GOOGLE_GRPC\n#include \"common/grpc/google_grpc_utils.h\"\n#endif\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace OpenCensus {\n\n#ifdef ENVOY_GOOGLE_GRPC\nconstexpr char GoogleStackdriverTraceAddress[] = \"cloudtrace.googleapis.com\";\n#endif\n\nnamespace {\n\nclass ConstantValues {\npublic:\n  const Http::LowerCaseString TRACEPARENT{\"traceparent\"};\n  const Http::LowerCaseString GRPC_TRACE_BIN{\"grpc-trace-bin\"};\n  const Http::LowerCaseString X_CLOUD_TRACE_CONTEXT{\"x-cloud-trace-context\"};\n  const Http::LowerCaseString X_B3_TRACEID{\"x-b3-traceid\"};\n  const Http::LowerCaseString X_B3_SPANID{\"x-b3-spanid\"};\n  const Http::LowerCaseString X_B3_SAMPLED{\"x-b3-sampled\"};\n  const Http::LowerCaseString X_B3_FLAGS{\"x-b3-flags\"};\n};\n\nusing Constants = ConstSingleton<ConstantValues>;\n\n/**\n * OpenCensus tracing implementation of the Envoy Span object.\n */\nclass Span : public Tracing::Span {\npublic:\n  Span(const Tracing::Config& config, const envoy::config::trace::v3::OpenCensusConfig& oc_config,\n       Http::RequestHeaderMap& request_headers, const std::string& operation_name,\n       SystemTime start_time, const Tracing::Decision tracing_decision);\n\n  // Used by spawnChild().\n  Span(const envoy::config::trace::v3::OpenCensusConfig& oc_config,\n       ::opencensus::trace::Span&& span);\n\n  void setOperation(absl::string_view operation) override;\n  void setTag(absl::string_view name, absl::string_view value) override;\n  void log(SystemTime timestamp, const std::string& event) override;\n  void finishSpan() override;\n  void injectContext(Http::RequestHeaderMap& request_headers) override;\n  Tracing::SpanPtr spawnChild(const Tracing::Config& config, const std::string& name,\n                              SystemTime start_time) override;\n  void setSampled(bool sampled) override;\n\n  // OpenCensus doesn't support baggage, so noop these OpenTracing functions.\n  void setBaggage(absl::string_view, absl::string_view) override{};\n  std::string getBaggage(absl::string_view) override { return std::string(); };\n\nprivate:\n  ::opencensus::trace::Span span_;\n  const envoy::config::trace::v3::OpenCensusConfig& oc_config_;\n};\n\n::opencensus::trace::Span\nstartSpanHelper(const std::string& name, bool traced, const Http::RequestHeaderMap& request_headers,\n                const envoy::config::trace::v3::OpenCensusConfig& oc_config) {\n  // Determine if there is a parent context.\n  using OpenCensusConfig = envoy::config::trace::v3::OpenCensusConfig;\n  ::opencensus::trace::SpanContext parent_ctx;\n  for (const auto& incoming : oc_config.incoming_trace_context()) {\n    bool found = false;\n    switch (incoming) {\n    case OpenCensusConfig::TRACE_CONTEXT: {\n      const Http::HeaderEntry* header = request_headers.get(Constants::get().TRACEPARENT);\n      if (header != nullptr) {\n        found = true;\n        parent_ctx = ::opencensus::trace::propagation::FromTraceParentHeader(\n            header->value().getStringView());\n      }\n      break;\n    }\n\n    case OpenCensusConfig::GRPC_TRACE_BIN: {\n      const Http::HeaderEntry* header = request_headers.get(Constants::get().GRPC_TRACE_BIN);\n      if (header != nullptr) {\n        found = true;\n        parent_ctx = ::opencensus::trace::propagation::FromGrpcTraceBinHeader(\n            Base64::decodeWithoutPadding(header->value().getStringView()));\n      }\n      break;\n    }\n\n    case OpenCensusConfig::CLOUD_TRACE_CONTEXT: {\n      const Http::HeaderEntry* header = request_headers.get(Constants::get().X_CLOUD_TRACE_CONTEXT);\n      if (header != nullptr) {\n        found = true;\n        parent_ctx = ::opencensus::trace::propagation::FromCloudTraceContextHeader(\n            header->value().getStringView());\n      }\n      break;\n    }\n\n    case OpenCensusConfig::B3: {\n      absl::string_view b3_trace_id;\n      absl::string_view b3_span_id;\n      absl::string_view b3_sampled;\n      absl::string_view b3_flags;\n      const Http::HeaderEntry* h_b3_trace_id = request_headers.get(Constants::get().X_B3_TRACEID);\n      if (h_b3_trace_id != nullptr) {\n        b3_trace_id = h_b3_trace_id->value().getStringView();\n      }\n      const Http::HeaderEntry* h_b3_span_id = request_headers.get(Constants::get().X_B3_SPANID);\n      if (h_b3_span_id != nullptr) {\n        b3_span_id = h_b3_span_id->value().getStringView();\n      }\n      const Http::HeaderEntry* h_b3_sampled = request_headers.get(Constants::get().X_B3_SAMPLED);\n      if (h_b3_sampled != nullptr) {\n        b3_sampled = h_b3_sampled->value().getStringView();\n      }\n      const Http::HeaderEntry* h_b3_flags = request_headers.get(Constants::get().X_B3_FLAGS);\n      if (h_b3_flags != nullptr) {\n        b3_flags = h_b3_flags->value().getStringView();\n      }\n      if (h_b3_trace_id != nullptr && h_b3_span_id != nullptr) {\n        found = true;\n        parent_ctx = ::opencensus::trace::propagation::FromB3Headers(b3_trace_id, b3_span_id,\n                                                                     b3_sampled, b3_flags);\n      }\n      break;\n    }\n    }\n    // First header found wins.\n    if (found) {\n      break;\n    }\n  }\n\n  // Honor Envoy's tracing decision.\n  ::opencensus::trace::AlwaysSampler always_sampler;\n  ::opencensus::trace::NeverSampler never_sampler;\n  // This is safe because opts are not used after StartSpan.\n  ::opencensus::trace::StartSpanOptions opts{&never_sampler};\n  if (traced) {\n    opts.sampler = &always_sampler;\n  }\n\n  if (parent_ctx.IsValid()) {\n    return ::opencensus::trace::Span::StartSpanWithRemoteParent(name, parent_ctx, opts);\n  }\n  return ::opencensus::trace::Span::StartSpan(name, /*parent=*/nullptr, opts);\n}\n\nSpan::Span(const Tracing::Config& config,\n           const envoy::config::trace::v3::OpenCensusConfig& oc_config,\n           Http::RequestHeaderMap& request_headers, const std::string& operation_name,\n           SystemTime /*start_time*/, const Tracing::Decision tracing_decision)\n    : span_(startSpanHelper(operation_name, tracing_decision.traced, request_headers, oc_config)),\n      oc_config_(oc_config) {\n  span_.AddAttribute(\"OperationName\", config.operationName() == Tracing::OperationName::Ingress\n                                          ? \"Ingress\"\n                                          : \"Egress\");\n}\n\nSpan::Span(const envoy::config::trace::v3::OpenCensusConfig& oc_config,\n           ::opencensus::trace::Span&& span)\n    : span_(std::move(span)), oc_config_(oc_config) {}\n\nvoid Span::setOperation(absl::string_view operation) { span_.SetName(operation); }\n\nvoid Span::setTag(absl::string_view name, absl::string_view value) {\n  span_.AddAttribute(name, value);\n}\n\nvoid Span::log(SystemTime /*timestamp*/, const std::string& event) {\n  // timestamp is ignored.\n  span_.AddAnnotation(event);\n}\n\nvoid Span::finishSpan() { span_.End(); }\n\nvoid Span::injectContext(Http::RequestHeaderMap& request_headers) {\n  using OpenCensusConfig = envoy::config::trace::v3::OpenCensusConfig;\n  const auto& ctx = span_.context();\n  for (const auto& outgoing : oc_config_.outgoing_trace_context()) {\n    switch (outgoing) {\n    case OpenCensusConfig::TRACE_CONTEXT:\n      request_headers.setReferenceKey(Constants::get().TRACEPARENT,\n                                      ::opencensus::trace::propagation::ToTraceParentHeader(ctx));\n      break;\n\n    case OpenCensusConfig::GRPC_TRACE_BIN: {\n      std::string val = ::opencensus::trace::propagation::ToGrpcTraceBinHeader(ctx);\n      val = Base64::encode(val.data(), val.size(), /*add_padding=*/false);\n      request_headers.setReferenceKey(Constants::get().GRPC_TRACE_BIN, val);\n      break;\n    }\n\n    case OpenCensusConfig::CLOUD_TRACE_CONTEXT:\n      request_headers.setReferenceKey(\n          Constants::get().X_CLOUD_TRACE_CONTEXT,\n          ::opencensus::trace::propagation::ToCloudTraceContextHeader(ctx));\n      break;\n\n    case OpenCensusConfig::B3:\n      request_headers.setReferenceKey(Constants::get().X_B3_TRACEID,\n                                      ::opencensus::trace::propagation::ToB3TraceIdHeader(ctx));\n      request_headers.setReferenceKey(Constants::get().X_B3_SPANID,\n                                      ::opencensus::trace::propagation::ToB3SpanIdHeader(ctx));\n      request_headers.setReferenceKey(Constants::get().X_B3_SAMPLED,\n                                      ::opencensus::trace::propagation::ToB3SampledHeader(ctx));\n      // OpenCensus's trace context propagation doesn't produce the\n      // \"X-B3-Flags:\" header.\n      break;\n    }\n  }\n}\n\nTracing::SpanPtr Span::spawnChild(const Tracing::Config& /*config*/, const std::string& name,\n                                  SystemTime /*start_time*/) {\n  span_.AddAnnotation(\"spawnChild\");\n  return std::make_unique<Span>(oc_config_,\n                                ::opencensus::trace::Span::StartSpan(name, /*parent=*/&span_));\n}\n\nvoid Span::setSampled(bool sampled) { span_.AddAnnotation(\"setSampled\", {{\"sampled\", sampled}}); }\n\n} // namespace\n\nDriver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config,\n               const LocalInfo::LocalInfo& localinfo, Api::Api& api)\n    : oc_config_(oc_config), local_info_(localinfo) {\n  // To give user a chance to correct initially invalid configuration and try to apply it once again\n  // without a need to restart Envoy, validation checks must be done prior to any side effects.\n  if (oc_config.stackdriver_exporter_enabled() && oc_config.has_stackdriver_grpc_service() &&\n      !oc_config.stackdriver_grpc_service().has_google_grpc()) {\n    throw EnvoyException(\"Opencensus stackdriver tracer only support GoogleGrpc.\");\n  }\n  if (oc_config.ocagent_exporter_enabled() && oc_config.has_ocagent_grpc_service() &&\n      !oc_config.ocagent_grpc_service().has_google_grpc()) {\n    throw EnvoyException(\"Opencensus ocagent tracer only supports GoogleGrpc.\");\n  }\n  // Process-wide side effects.\n  if (oc_config.has_trace_config()) {\n    applyTraceConfig(oc_config.trace_config());\n  }\n  if (oc_config.stdout_exporter_enabled()) {\n    ::opencensus::exporters::trace::StdoutExporter::Register();\n  }\n  if (oc_config.stackdriver_exporter_enabled()) {\n    ::opencensus::exporters::trace::StackdriverOptions opts;\n    opts.project_id = oc_config.stackdriver_project_id();\n    if (!oc_config.stackdriver_address().empty()) {\n      auto channel =\n          grpc::CreateChannel(oc_config.stackdriver_address(), grpc::InsecureChannelCredentials());\n      opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel);\n    } else if (oc_config.has_stackdriver_grpc_service() &&\n               oc_config.stackdriver_grpc_service().has_google_grpc()) {\n#ifdef ENVOY_GOOGLE_GRPC\n      envoy::config::core::v3::GrpcService stackdriver_service =\n          oc_config.stackdriver_grpc_service();\n      if (stackdriver_service.google_grpc().target_uri().empty()) {\n        // If stackdriver server address is not provided, the default production stackdriver\n        // address will be used.\n        stackdriver_service.mutable_google_grpc()->set_target_uri(GoogleStackdriverTraceAddress);\n      }\n      auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(stackdriver_service, api);\n      // TODO(bianpengyuan): add tests for trace_service_stub and initial_metadata options with mock\n      // stubs.\n      opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel);\n      const auto& initial_metadata = stackdriver_service.initial_metadata();\n      if (!initial_metadata.empty()) {\n        opts.prepare_client_context = [initial_metadata](grpc::ClientContext* ctx) {\n          for (const auto& metadata : initial_metadata) {\n            ctx->AddMetadata(metadata.key(), metadata.value());\n          }\n        };\n      }\n#else\n      throw EnvoyException(\"Opencensus tracer: cannot handle stackdriver google grpc service, \"\n                           \"google grpc is not built in.\");\n#endif\n    }\n    ::opencensus::exporters::trace::StackdriverExporter::Register(std::move(opts));\n  }\n  if (oc_config.zipkin_exporter_enabled()) {\n    ::opencensus::exporters::trace::ZipkinExporterOptions opts(oc_config.zipkin_url());\n    opts.service_name = local_info_.clusterName();\n    ::opencensus::exporters::trace::ZipkinExporter::Register(opts);\n  }\n  if (oc_config.ocagent_exporter_enabled()) {\n    ::opencensus::exporters::trace::OcAgentOptions opts;\n    if (!oc_config.ocagent_address().empty()) {\n      opts.address = oc_config.ocagent_address();\n    } else if (oc_config.has_ocagent_grpc_service() &&\n               oc_config.ocagent_grpc_service().has_google_grpc()) {\n#ifdef ENVOY_GOOGLE_GRPC\n      const envoy::config::core::v3::GrpcService& ocagent_service =\n          oc_config.ocagent_grpc_service();\n      auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(ocagent_service, api);\n      opts.trace_service_stub =\n          ::opencensus::proto::agent::trace::v1::TraceService::NewStub(channel);\n#else\n      throw EnvoyException(\"Opencensus tracer: cannot handle ocagent google grpc service, google \"\n                           \"grpc is not built in.\");\n#endif\n    }\n    opts.service_name = local_info_.clusterName();\n    ::opencensus::exporters::trace::OcAgentExporter::Register(std::move(opts));\n  }\n}\n\nvoid Driver::applyTraceConfig(const opencensus::proto::trace::v1::TraceConfig& config) {\n  using SamplerCase = opencensus::proto::trace::v1::TraceConfig::SamplerCase;\n  using opencensus::proto::trace::v1::ConstantSampler;\n  constexpr double kDefaultSamplingProbability = 1e-4;\n  double probability = kDefaultSamplingProbability;\n\n  switch (config.sampler_case()) {\n  case SamplerCase::kProbabilitySampler:\n    probability = config.probability_sampler().samplingprobability();\n    break;\n  case SamplerCase::kConstantSampler:\n    switch (config.constant_sampler().decision()) {\n    case ConstantSampler::ALWAYS_OFF:\n      probability = 0.;\n      break;\n    case ConstantSampler::ALWAYS_ON:\n    case ConstantSampler::ALWAYS_PARENT:\n      probability = 1.;\n      break;\n    default:\n      break; /* Keep default probability. */\n    }\n    break;\n  case SamplerCase::kRateLimitingSampler:\n    ENVOY_LOG(error, \"RateLimitingSampler is not supported.\");\n    break;\n  case SamplerCase::SAMPLER_NOT_SET:\n    break; // Keep default.\n  default:\n    ENVOY_LOG(error, \"Unknown sampler type in TraceConfig.\");\n  }\n\n  ::opencensus::trace::TraceConfig::SetCurrentTraceParams(::opencensus::trace::TraceParams{\n      uint32_t(config.max_number_of_attributes()), uint32_t(config.max_number_of_annotations()),\n      uint32_t(config.max_number_of_message_events()), uint32_t(config.max_number_of_links()),\n      ::opencensus::trace::ProbabilitySampler(probability)});\n}\n\nTracing::SpanPtr Driver::startSpan(const Tracing::Config& config,\n                                   Http::RequestHeaderMap& request_headers,\n                                   const std::string& operation_name, SystemTime start_time,\n                                   const Tracing::Decision tracing_decision) {\n  return std::make_unique<Span>(config, oc_config_, request_headers, operation_name, start_time,\n                                tracing_decision);\n}\n\n} // namespace OpenCensus\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/opencensus/opencensus_tracer_impl.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/trace/v3/opencensus.pb.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace OpenCensus {\n\n/**\n * OpenCensus tracing driver.\n */\nclass Driver : public Tracing::Driver, Logger::Loggable<Logger::Id::tracing> {\npublic:\n  Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config,\n         const LocalInfo::LocalInfo& localinfo, Api::Api& api);\n\n  /**\n   * Implements the abstract Driver's startSpan operation.\n   */\n  Tracing::SpanPtr startSpan(const Tracing::Config& config, Http::RequestHeaderMap& request_headers,\n                             const std::string& operation_name, SystemTime start_time,\n                             const Tracing::Decision tracing_decision) override;\n\nprivate:\n  void applyTraceConfig(const opencensus::proto::trace::v1::TraceConfig& config);\n\n  const envoy::config::trace::v3::OpenCensusConfig oc_config_;\n  const LocalInfo::LocalInfo& local_info_;\n};\n\n} // namespace OpenCensus\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Trace driver for AWS X-Ray.\n\nenvoy_extension_package()\n\nenvoy_proto_library(\n    name = \"daemon\",\n    srcs = [\"daemon.proto\"],\n)\n\nenvoy_cc_library(\n    name = \"xray_lib\",\n    srcs = [\n        \"daemon_broker.cc\",\n        \"localized_sampling.cc\",\n        \"tracer.cc\",\n        \"util.cc\",\n        \"xray_tracer_impl.cc\",\n    ],\n    hdrs = [\n        \"daemon_broker.h\",\n        \"localized_sampling.h\",\n        \"reservoir.h\",\n        \"sampling_strategy.h\",\n        \"tracer.h\",\n        \"util.h\",\n        \"xray_configuration.h\",\n        \"xray_tracer_impl.h\",\n    ],\n    external_deps = [],\n    deps = [\n        \":daemon_cc_proto\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/server:tracer_config_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:random_generator_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream\",\n    status = \"wip\",\n    deps = [\n        \":xray_lib\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/extensions/tracers/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/tracers/xray/config.cc",
    "content": "#include \"extensions/tracers/xray/config.h\"\n\n#include <string>\n\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/trace/v3/xray.pb.h\"\n#include \"envoy/config/trace/v3/xray.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/config/datasource.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/xray/xray_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nXRayTracerFactory::XRayTracerFactory() : FactoryBase(\"envoy.tracers.xray\") {}\n\nTracing::HttpTracerSharedPtr\nXRayTracerFactory::createHttpTracerTyped(const envoy::config::trace::v3::XRayConfig& proto_config,\n                                         Server::Configuration::TracerFactoryContext& context) {\n  std::string sampling_rules_json;\n  try {\n    sampling_rules_json = Config::DataSource::read(proto_config.sampling_rule_manifest(), true,\n                                                   context.serverFactoryContext().api());\n  } catch (EnvoyException& e) {\n    ENVOY_LOG(error, \"Failed to read sampling rules manifest because of {}.\", e.what());\n  }\n\n  if (proto_config.daemon_endpoint().protocol() != envoy::config::core::v3::SocketAddress::UDP) {\n    throw EnvoyException(\"X-Ray daemon endpoint must be a UDP socket address\");\n  }\n\n  if (proto_config.daemon_endpoint().port_specifier_case() !=\n      envoy::config::core::v3::SocketAddress::PortSpecifierCase::kPortValue) {\n    throw EnvoyException(\"X-Ray daemon port must be specified as number. Not a named port.\");\n  }\n\n  const std::string endpoint = fmt::format(\"{}:{}\", proto_config.daemon_endpoint().address(),\n                                           proto_config.daemon_endpoint().port_value());\n\n  auto aws = absl::flat_hash_map<std::string, ProtobufWkt::Value>{};\n  for (const auto& field : proto_config.segment_fields().aws().fields()) {\n    aws.emplace(field.first, field.second);\n  }\n  const auto& origin = proto_config.segment_fields().origin();\n  XRayConfiguration xconfig{endpoint, proto_config.segment_name(), sampling_rules_json, origin,\n                            std::move(aws)};\n\n  auto xray_driver = std::make_unique<XRay::Driver>(xconfig, context);\n\n  return std::make_shared<Tracing::HttpTracerImpl>(std::move(xray_driver),\n                                                   context.serverFactoryContext().localInfo());\n}\n\n/**\n * Static registration for the XRay tracer. @see RegisterFactory.\n */\nREGISTER_FACTORY(XRayTracerFactory, Server::Configuration::TracerFactory);\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/trace/v3/xray.pb.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/tracers/common/factory_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n/**\n * Config registration for the XRay tracer. @see TracerFactory.\n */\nclass XRayTracerFactory : public Common::FactoryBase<envoy::config::trace::v3::XRayConfig>,\n                          Logger::Loggable<Logger::Id::tracing> {\npublic:\n  XRayTracerFactory();\n\nprivate:\n  Tracing::HttpTracerSharedPtr\n  createHttpTracerTyped(const envoy::config::trace::v3::XRayConfig& proto_config,\n                        Server::Configuration::TracerFactoryContext& context) override;\n};\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/daemon.proto",
    "content": "syntax = \"proto3\";\n\n// The structures in this file are merely for JSON serialization to communicate with the X-Ray\n// daemon. The X-Ray daemon does not speak protobuf.\npackage source.extensions.tracers.xray.daemon;\n\nimport \"validate/validate.proto\";\nimport \"google/protobuf/struct.proto\";\n\n// see https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html\nmessage Segment {\n  string name = 1 [(validate.rules).string = {min_len: 1}];\n  string id = 2 [(validate.rules).string = {min_len: 1}];\n  string trace_id = 3 [(validate.rules).string = {len: 35}];\n  double start_time = 4 [(validate.rules).double = {gt: 0}];\n  double end_time = 5 [(validate.rules).double = {gt: 0}];\n  string origin = 9;\n  string parent_id = 6;\n  google.protobuf.Struct aws = 10;\n  http_annotations http = 7;\n  message http_annotations {\n    google.protobuf.Struct request = 1;\n    google.protobuf.Struct response = 2;\n  }\n  map<string, string> annotations = 8;\n}\n\nmessage Header {\n  string format = 1;\n  uint32 version = 2;\n}\n"
  },
  {
    "path": "source/extensions/tracers/xray/daemon_broker.cc",
    "content": "#include \"extensions/tracers/xray/daemon_broker.h\"\n\n#include \"envoy/network/address.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"source/extensions/tracers/xray/daemon.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nnamespace {\n// creates a header JSON for X-Ray daemon.\n// For example:\n// { \"format\": \"json\", \"version\": 1}\nstd::string createHeader(const std::string& format, uint32_t version) {\n  source::extensions::tracers::xray::daemon::Header header;\n  header.set_format(format);\n  header.set_version(version);\n  return MessageUtil::getJsonStringFromMessage(header, false /* pretty_print  */,\n                                               false /* always_print_primitive_fields */);\n}\n\n} // namespace\n\nDaemonBrokerImpl::DaemonBrokerImpl(const std::string& daemon_endpoint)\n    : address_(Network::Utility::parseInternetAddressAndPort(daemon_endpoint, false /*v6only*/)),\n      io_handle_(Network::ioHandleForAddr(Network::Socket::Type::Datagram, address_)) {}\n\nvoid DaemonBrokerImpl::send(const std::string& data) const {\n  auto& logger = Logger::Registry::getLog(Logger::Id::tracing);\n  constexpr auto version = 1;\n  constexpr auto format = \"json\";\n  const std::string payload = absl::StrCat(createHeader(format, version), \"\\n\", data);\n  Buffer::RawSlice buf;\n  buf.mem_ = const_cast<char*>(payload.data());\n  buf.len_ = payload.length();\n  const auto rc = Network::Utility::writeToSocket(*io_handle_, &buf, 1 /*num_slices*/,\n                                                  nullptr /*local_ip*/, *address_);\n\n  if (rc.rc_ != payload.length()) {\n    // TODO(marcomagdy): report this in stats\n    ENVOY_LOG_TO_LOGGER(logger, debug, \"Failed to send trace payload to the X-Ray daemon.\");\n  }\n}\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/daemon_broker.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/network/io_socket_handle_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n/**\n * The broker is a way to isolate the network dependency required to communicate with the X-Ray\n * daemon.\n */\nclass DaemonBroker {\npublic:\n  /**\n   * Sends the input string as data to the X-Ray daemon.\n   * The input string is typically a JSON serialized Span.\n   * This method prefixes the data with a header necessary for the daemon to accept the input.\n   */\n  virtual void send(const std::string& data) const PURE;\n\n  virtual ~DaemonBroker() = default;\n};\n\nusing DaemonBrokerPtr = std::unique_ptr<DaemonBroker>;\n\nclass DaemonBrokerImpl : public DaemonBroker {\npublic:\n  /**\n   * Creates a new Broker instance.\n   *\n   * @param daemon_endpoint The ip and port on which the X-Ray daemon is listening.\n   */\n  explicit DaemonBrokerImpl(const std::string& daemon_endpoint);\n\n  void send(const std::string& data) const final;\n\nprivate:\n  const Network::Address::InstanceConstSharedPtr address_;\n  const Network::IoHandlePtr io_handle_;\n};\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/localized_sampling.cc",
    "content": "#include \"extensions/tracers/xray/localized_sampling.h\"\n\n#include \"common/http/exception.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/tracers/xray/util.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nconstexpr double DefaultRate = 0.5;\nconstexpr int DefaultFixedTarget = 1;\nconstexpr int SamplingFileVersion = 2;\nconstexpr auto VersionJsonKey = \"version\";\nconstexpr auto DefaultRuleJsonKey = \"default\";\nconstexpr auto FixedTargetJsonKey = \"fixed_target\";\nconstexpr auto RateJsonKey = \"rate\";\nconstexpr auto CustomRulesJsonKey = \"rules\";\nconstexpr auto HostJsonKey = \"host\";\nconstexpr auto HttpMethodJsonKey = \"http_method\";\nconstexpr auto UrlPathJsonKey = \"url_path\";\n\nnamespace {\nvoid fail(absl::string_view msg) {\n  auto& logger = Logger::Registry::getLog(Logger::Id::tracing);\n  ENVOY_LOG_TO_LOGGER(logger, error, \"Failed to parse sampling rules - {}\", msg);\n}\n\nbool is_valid_rate(double n) { return n >= 0 && n <= 1.0; }\nbool is_valid_fixed_target(double n) { return n >= 0 && static_cast<uint32_t>(n) == n; }\n\nbool validateRule(const ProtobufWkt::Struct& rule) {\n  using ProtobufWkt::Value;\n\n  const auto host_it = rule.fields().find(HostJsonKey);\n  if (host_it != rule.fields().end() &&\n      host_it->second.kind_case() != Value::KindCase::kStringValue) {\n    fail(\"host must be a string\");\n    return false;\n  }\n\n  const auto http_method_it = rule.fields().find(HttpMethodJsonKey);\n  if (http_method_it != rule.fields().end() &&\n      http_method_it->second.kind_case() != Value::KindCase::kStringValue) {\n    fail(\"HTTP method must be a string\");\n    return false;\n  }\n\n  const auto url_path_it = rule.fields().find(UrlPathJsonKey);\n  if (url_path_it != rule.fields().end() &&\n      url_path_it->second.kind_case() != Value::KindCase::kStringValue) {\n    fail(\"URL path must be a string\");\n    return false;\n  }\n\n  const auto fixed_target_it = rule.fields().find(FixedTargetJsonKey);\n  if (fixed_target_it == rule.fields().end() ||\n      fixed_target_it->second.kind_case() != Value::KindCase::kNumberValue ||\n      !is_valid_fixed_target(fixed_target_it->second.number_value())) {\n    fail(\"fixed target is missing or not a valid positive integer\");\n    return false;\n  }\n\n  const auto rate_it = rule.fields().find(RateJsonKey);\n  if (rate_it == rule.fields().end() ||\n      rate_it->second.kind_case() != Value::KindCase::kNumberValue ||\n      !is_valid_rate(rate_it->second.number_value())) {\n    fail(\"rate is missing or not a valid positive floating number\");\n    return false;\n  }\n  return true;\n}\n} // namespace\n\nLocalizedSamplingRule LocalizedSamplingRule::createDefault() {\n  return LocalizedSamplingRule(DefaultFixedTarget, DefaultRate);\n}\n\nbool LocalizedSamplingRule::appliesTo(const SamplingRequest& request) const {\n  return (request.host_.empty() || wildcardMatch(host_, request.host_)) &&\n         (request.http_method_.empty() || wildcardMatch(http_method_, request.http_method_)) &&\n         (request.http_url_.empty() || wildcardMatch(url_path_, request.http_url_));\n}\n\nLocalizedSamplingManifest::LocalizedSamplingManifest(const std::string& rule_json)\n    : default_rule_(LocalizedSamplingRule::createDefault()) {\n  if (rule_json.empty()) {\n    return;\n  }\n\n  ProtobufWkt::Struct document;\n  try {\n    MessageUtil::loadFromJson(rule_json, document);\n  } catch (EnvoyException& e) {\n    fail(\"invalid JSON format\");\n    return;\n  }\n\n  const auto version_it = document.fields().find(VersionJsonKey);\n  if (version_it == document.fields().end()) {\n    fail(\"missing version number\");\n    return;\n  }\n\n  if (version_it->second.kind_case() != ProtobufWkt::Value::KindCase::kNumberValue ||\n      version_it->second.number_value() != SamplingFileVersion) {\n    fail(\"wrong version number\");\n    return;\n  }\n\n  const auto default_rule_it = document.fields().find(DefaultRuleJsonKey);\n  if (default_rule_it == document.fields().end() ||\n      default_rule_it->second.kind_case() != ProtobufWkt::Value::KindCase::kStructValue) {\n    fail(\"missing default rule\");\n    return;\n  }\n\n  // extract default rule members\n  auto& default_rule_object = default_rule_it->second.struct_value();\n  if (!validateRule(default_rule_object)) {\n    return;\n  }\n\n  default_rule_.setRate(default_rule_object.fields().find(RateJsonKey)->second.number_value());\n  default_rule_.setFixedTarget(static_cast<uint32_t>(\n      default_rule_object.fields().find(FixedTargetJsonKey)->second.number_value()));\n\n  const auto custom_rules_it = document.fields().find(CustomRulesJsonKey);\n  if (custom_rules_it == document.fields().end()) {\n    return;\n  }\n\n  if (custom_rules_it->second.kind_case() != ProtobufWkt::Value::KindCase::kListValue) {\n    fail(\"rules must be JSON array\");\n    return;\n  }\n\n  for (auto& el : custom_rules_it->second.list_value().values()) {\n    if (el.kind_case() != ProtobufWkt::Value::KindCase::kStructValue) {\n      fail(\"rules array must be objects\");\n      return;\n    }\n\n    auto& rule_json = el.struct_value();\n    if (!validateRule(rule_json)) {\n      return;\n    }\n\n    LocalizedSamplingRule rule = LocalizedSamplingRule::createDefault();\n    const auto host_it = rule_json.fields().find(HostJsonKey);\n    if (host_it != rule_json.fields().end()) {\n      rule.setHost(host_it->second.string_value());\n    }\n\n    const auto http_method_it = rule_json.fields().find(HttpMethodJsonKey);\n    if (http_method_it != rule_json.fields().end()) {\n      rule.setHttpMethod(http_method_it->second.string_value());\n    }\n\n    const auto url_path_it = rule_json.fields().find(UrlPathJsonKey);\n    if (url_path_it != rule_json.fields().end()) {\n      rule.setUrlPath(url_path_it->second.string_value());\n    }\n\n    // rate and fixed_target must exist because we validated this rule\n    rule.setRate(rule_json.fields().find(RateJsonKey)->second.number_value());\n    rule.setFixedTarget(\n        static_cast<uint32_t>(rule_json.fields().find(FixedTargetJsonKey)->second.number_value()));\n\n    custom_rules_.push_back(std::move(rule));\n  }\n}\n\nbool LocalizedSamplingStrategy::shouldTrace(const SamplingRequest& sampling_request) {\n  if (!custom_manifest_.hasCustomRules()) {\n    return shouldTrace(default_manifest_.defaultRule());\n  }\n\n  for (auto&& rule : custom_manifest_.customRules()) {\n    if (rule.appliesTo(sampling_request)) {\n      return shouldTrace(rule);\n    }\n  }\n  return shouldTrace(custom_manifest_.defaultRule());\n}\n\nbool LocalizedSamplingStrategy::shouldTrace(LocalizedSamplingRule& rule) {\n  const auto now = time_source_.monotonicTime();\n  if (rule.reservoir().take(now)) {\n    return true;\n  }\n\n  // rule.rate() is a rational number between 0 and 1\n  auto toss = random() % 100;\n  if (toss < (100 * rule.rate())) {\n    return true;\n  }\n\n  return false;\n}\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/localized_sampling.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/random_generator.h\"\n\n#include \"extensions/tracers/xray/reservoir.h\"\n#include \"extensions/tracers/xray/sampling_strategy.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n/**\n * This class encompasses the algorithm used when deciding whether to sample a given request.\n * The rule contains wildcard strings for matching a request based on its hostname, HTTP method or\n * URL path. A request must match on all 3 parts before this rule is applied.\n * If the rule applies, then |fixed_target| determines how many requests to sample per second.\n * While, rate determines the percentage of requests to sample after that within the same second.\n *\n * By default, this tracer records the first request each second, and five percent of\n * any additional requests.\n */\nclass LocalizedSamplingRule {\npublic:\n  /**\n   * Creates a default sampling rule that has the default |fixed_target| and default |rate| set.\n   */\n  static LocalizedSamplingRule createDefault();\n\n  LocalizedSamplingRule(uint32_t fixed_target, double rate)\n      : fixed_target_(fixed_target), rate_(rate), reservoir_(fixed_target_) {}\n\n  /**\n   * Determines whether Hostname, HTTP method and URL path match the given request.\n   */\n  bool appliesTo(const SamplingRequest& request) const;\n\n  /**\n   * Set the hostname to match against.\n   * This value can contain wildcard characters such as '*' or '?'.\n   */\n  void setHost(absl::string_view host) { host_ = std::string(host); }\n\n  /**\n   * Set the HTTP method to match against.\n   * This value can contain wildcard characters such as '*' or '?'.\n   */\n  void setHttpMethod(absl::string_view http_method) { http_method_ = std::string(http_method); }\n\n  /**\n   * Set the URL path to match against.\n   * This value can contain wildcard characters such as '*' or '?'.\n   */\n  void setUrlPath(absl::string_view url_path) { url_path_ = std::string(url_path); }\n\n  /**\n   * Set the minimum number of requests to sample per second.\n   */\n  void setFixedTarget(uint32_t fixed_target) {\n    fixed_target_ = fixed_target;\n    reservoir_ = Reservoir(fixed_target);\n  }\n\n  /**\n   * Set the percentage of requests to sample _after_ sampling |fixed_target| requests per second.\n   */\n  void setRate(double rate) { rate_ = rate; }\n  double rate() const { return rate_; }\n  Reservoir& reservoir() { return reservoir_; }\n\nprivate:\n  std::string host_;\n  std::string http_method_;\n  std::string url_path_;\n  uint32_t fixed_target_;\n  double rate_;\n  Reservoir reservoir_;\n};\n\n/**\n * The manifest represents the set of sampling rules (custom and default) used to match incoming\n * requests.\n */\nclass LocalizedSamplingManifest {\npublic:\n  /**\n   * Create a default manifest. The default manifest is used when a custom manifest does not exist\n   * or failed to parse. The default manifest, will have an empty set of custom rules.\n   */\n  static LocalizedSamplingManifest createDefault() {\n    return LocalizedSamplingManifest{LocalizedSamplingRule::createDefault()};\n  }\n\n  /**\n   * Create a manifest by de-serializing the input string as JSON representation of the sampling\n   * rules.\n   * @param sampling_rules_json JSON representation of X-Ray localized sampling rules.\n   */\n  explicit LocalizedSamplingManifest(const std::string& sampling_rules_json);\n\n  /**\n   * Create a manifest by assigning the argument rule as the default rule. The set of custom rules\n   * in this manifest will be empty.\n   * @param default_rule A localized sampling rule that will be assigned as the default rule.\n   */\n  explicit LocalizedSamplingManifest(const LocalizedSamplingRule& default_rule)\n      : default_rule_(default_rule) {}\n\n  /**\n   * @return default sampling rule\n   */\n  LocalizedSamplingRule& defaultRule() { return default_rule_; }\n\n  /**\n   * @return the user-defined sampling rules\n   */\n  std::vector<LocalizedSamplingRule>& customRules() { return custom_rules_; }\n\n  /**\n   * @return true if the this manifest has a set of custom rules; otherwise false.\n   */\n  bool hasCustomRules() const { return !custom_rules_.empty(); }\n\nprivate:\n  LocalizedSamplingRule default_rule_;\n  std::vector<LocalizedSamplingRule> custom_rules_;\n};\n\nclass LocalizedSamplingStrategy : public SamplingStrategy {\npublic:\n  LocalizedSamplingStrategy(const std::string& sampling_rules_json, Random::RandomGenerator& rng,\n                            TimeSource& time_source)\n      : SamplingStrategy(rng), default_manifest_(LocalizedSamplingManifest::createDefault()),\n        custom_manifest_(sampling_rules_json), time_source_(time_source),\n        use_default_(!custom_manifest_.hasCustomRules()) {}\n\n  /**\n   * Determines if an incoming request matches one of the sampling rules in the local manifests.\n   * If a match is found, then the request might be traced based on the sampling percentages etc.\n   * determined by the matching rule.\n   */\n  bool shouldTrace(const SamplingRequest& sampling_request) override;\n\n  /**\n   * Determines whether default rules are in effect. Mainly for unit testing purposes.\n   */\n  bool usingDefaultManifest() const { return use_default_; }\n\nprivate:\n  bool shouldTrace(LocalizedSamplingRule& rule);\n  LocalizedSamplingManifest default_manifest_;\n  LocalizedSamplingManifest custom_manifest_;\n  TimeSource& time_source_;\n  bool use_default_;\n};\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/reservoir.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/common/time.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n/**\n * Simple token-bucket algorithm that enables counting samples/traces used per second.\n */\nclass Reservoir {\npublic:\n  /**\n   * Creates a new reservoir that allows up to |traces_per_second| samples.\n   */\n  explicit Reservoir(uint32_t traces_per_second)\n      : traces_per_second_(traces_per_second), used_(0) {}\n\n  Reservoir(const Reservoir& other)\n      : traces_per_second_(other.traces_per_second_), used_(other.used_),\n        time_point_(other.time_point_) {}\n\n  Reservoir& operator=(const Reservoir& other) {\n    if (this == &other) {\n      return *this;\n    }\n    traces_per_second_ = other.traces_per_second_;\n    used_ = other.used_;\n    time_point_ = other.time_point_;\n    return *this;\n  }\n\n  /**\n   * Determines whether all samples have been used up for this particular second.\n   * Every second, this reservoir starts over with a full bucket.\n   *\n   * @param now Used to compare against the last recorded time to determine if it's still within the\n   * same second.\n   */\n  bool take(Envoy::MonotonicTime now) {\n    Envoy::Thread::LockGuard lg(sync_);\n    const auto diff = now - time_point_;\n    if (diff > std::chrono::seconds(1)) {\n      used_ = 0;\n      time_point_ = now;\n    }\n\n    if (used_ >= traces_per_second_) {\n      return false;\n    }\n\n    ++used_;\n    return true;\n  }\n\nprivate:\n  uint32_t traces_per_second_;\n  uint32_t used_;\n  Envoy::MonotonicTime time_point_;\n  Envoy::Thread::MutexBasicLockable sync_;\n};\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/sampling_strategy.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/random_generator.h\"\n\n#include \"common/common/macros.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nstruct SamplingRequest {\n  std::string host_;\n  std::string http_method_;\n  std::string http_url_;\n};\n\n/**\n * Strategy provides an interface for implementing trace sampling strategies.\n */\nclass SamplingStrategy {\npublic:\n  explicit SamplingStrategy(Random::RandomGenerator& rng) : rng_(rng) {}\n  virtual ~SamplingStrategy() = default;\n\n  /**\n   * sampleRequest determines if the given request should be traced or not.\n   * Implementation _must_ be thread-safe.\n   */\n  virtual bool shouldTrace(const SamplingRequest& sampling_request) PURE;\n\nprotected:\n  uint64_t random() const { return rng_.random(); }\n\nprivate:\n  Random::RandomGenerator& rng_;\n};\n\nusing SamplingStrategyPtr = std::unique_ptr<SamplingStrategy>;\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/tracer.cc",
    "content": "#include \"extensions/tracers/xray/tracer.h\"\n\n#include <algorithm>\n#include <chrono>\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/listener.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"source/extensions/tracers/xray/daemon.pb.validate.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nnamespace {\nconstexpr auto XRaySerializationVersion = \"1\";\n\n// X-Ray Trace ID Format\n//\n// A trace_id consists of three parts separated by hyphens.\n// For example, 1-58406cf0-a006649127e371903a2de979.\n// This includes:\n//\n// - The version number, that is, 1.\n// - The time of the original request, in Unix epoch time, in 8 hexadecimal digits.\n// - A 96-bit unique identifier in 24 hexadecimal digits.\n//\n// For more details see:\n// https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-fields\nstd::string generateTraceId(SystemTime point_in_time, Random::RandomGenerator& random) {\n  using std::chrono::seconds;\n  using std::chrono::time_point_cast;\n  const auto epoch = time_point_cast<seconds>(point_in_time).time_since_epoch().count();\n  std::string out;\n  out.reserve(35);\n  out += XRaySerializationVersion;\n  out.push_back('-');\n  // epoch in seconds represented as 8 hexadecimal characters\n  out += Hex::uint32ToHex(epoch);\n  out.push_back('-');\n  std::string uuid = random.uuid();\n  // unique id represented as 24 hexadecimal digits and no dashes\n  uuid.erase(std::remove(uuid.begin(), uuid.end(), '-'), uuid.end());\n  ASSERT(uuid.length() >= 24);\n  out += uuid.substr(0, 24);\n  return out;\n}\n\n} // namespace\n\nvoid Span::finishSpan() {\n  using std::chrono::time_point_cast;\n  using namespace source::extensions::tracers::xray;\n  // X-Ray expects timestamps to be in epoch seconds with milli/micro-second precision as a fraction\n  using SecondsWithFraction = std::chrono::duration<double>;\n  if (!sampled()) {\n    return;\n  }\n\n  daemon::Segment s;\n  s.set_name(name());\n  s.set_id(id());\n  s.set_trace_id(traceId());\n  s.set_start_time(time_point_cast<SecondsWithFraction>(startTime()).time_since_epoch().count());\n  s.set_end_time(\n      time_point_cast<SecondsWithFraction>(time_source_.systemTime()).time_since_epoch().count());\n  s.set_origin(origin());\n  s.set_parent_id(parentId());\n\n  auto* aws = s.mutable_aws()->mutable_fields();\n  for (const auto& field : aws_metadata_) {\n    aws->insert({field.first, field.second});\n  }\n\n  auto* request_fields = s.mutable_http()->mutable_request()->mutable_fields();\n  for (const auto& field : http_request_annotations_) {\n    request_fields->insert({field.first, field.second});\n  }\n\n  auto* response_fields = s.mutable_http()->mutable_response()->mutable_fields();\n  for (const auto& field : http_response_annotations_) {\n    response_fields->insert({field.first, field.second});\n  }\n\n  for (const auto& item : custom_annotations_) {\n    s.mutable_annotations()->insert({item.first, item.second});\n  }\n\n  const std::string json = MessageUtil::getJsonStringFromMessage(\n      s, false /* pretty_print  */, false /* always_print_primitive_fields */);\n\n  broker_.send(json);\n} // namespace XRay\n\nvoid Span::injectContext(Http::RequestHeaderMap& request_headers) {\n  const std::string xray_header_value =\n      fmt::format(\"Root={};Parent={};Sampled={}\", traceId(), id(), sampled() ? \"1\" : \"0\");\n  request_headers.setCopy(Http::LowerCaseString(XRayTraceHeader), xray_header_value);\n}\n\nTracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& operation_name,\n                                  Envoy::SystemTime start_time) {\n  auto child_span = std::make_unique<XRay::Span>(time_source_, random_, broker_);\n  child_span->setName(name());\n  child_span->setOperation(operation_name);\n  child_span->setStartTime(start_time);\n  child_span->setParentId(id());\n  child_span->setTraceId(traceId());\n  child_span->setSampled(sampled());\n  return child_span;\n}\n\nTracing::SpanPtr Tracer::startSpan(const std::string& operation_name, Envoy::SystemTime start_time,\n                                   const absl::optional<XRayHeader>& xray_header) {\n\n  auto span_ptr = std::make_unique<XRay::Span>(time_source_, random_, *daemon_broker_);\n  span_ptr->setName(segment_name_);\n  span_ptr->setOperation(operation_name);\n  // Even though we have a TimeSource member in the tracer, we assume the start_time argument has a\n  // more precise value than calling the systemTime() at this point in time.\n  span_ptr->setStartTime(start_time);\n  span_ptr->setOrigin(origin_);\n  span_ptr->setAwsMetadata(aws_metadata_);\n\n  if (xray_header) { // there's a previous span that this span should be based-on\n    span_ptr->setParentId(xray_header->parent_id_);\n    span_ptr->setTraceId(xray_header->trace_id_);\n    switch (xray_header->sample_decision_) {\n    case SamplingDecision::Sampled:\n      span_ptr->setSampled(true);\n      break;\n    case SamplingDecision::NotSampled:\n      // should never get here. If the header has Sampled=0 then we never call startSpan().\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    default:\n      break;\n    }\n  } else {\n    span_ptr->setTraceId(generateTraceId(time_source_.systemTime(), random_));\n  }\n  return span_ptr;\n}\n\nXRay::SpanPtr Tracer::createNonSampledSpan() const {\n  auto span_ptr = std::make_unique<XRay::Span>(time_source_, random_, *daemon_broker_);\n  span_ptr->setName(segment_name_);\n  span_ptr->setOrigin(origin_);\n  span_ptr->setTraceId(generateTraceId(time_source_.systemTime(), random_));\n  span_ptr->setAwsMetadata(aws_metadata_);\n  span_ptr->setSampled(false);\n  return span_ptr;\n}\n\nvoid Span::setTag(absl::string_view name, absl::string_view value) {\n  // For the full set of values see:\n  // https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-http\n  constexpr auto SpanContentLength = \"content_length\";\n  constexpr auto SpanMethod = \"method\";\n  constexpr auto SpanStatus = \"status\";\n  constexpr auto SpanUserAgent = \"user_agent\";\n  constexpr auto SpanUrl = \"url\";\n  constexpr auto SpanClientIp = \"client_ip\";\n  constexpr auto SpanXForwardedFor = \"x_forwarded_for\";\n\n  constexpr auto HttpUrl = \"http.url\";\n  constexpr auto HttpMethod = \"http.method\";\n  constexpr auto HttpStatusCode = \"http.status_code\";\n  constexpr auto HttpUserAgent = \"user_agent\";\n  constexpr auto HttpResponseSize = \"response_size\";\n  constexpr auto PeerAddress = \"peer.address\";\n\n  if (name.empty() || value.empty()) {\n    return;\n  }\n\n  if (name == HttpUrl) {\n    http_request_annotations_.emplace(SpanUrl, ValueUtil::stringValue(std::string(value)));\n  } else if (name == HttpMethod) {\n    http_request_annotations_.emplace(SpanMethod, ValueUtil::stringValue(std::string(value)));\n  } else if (name == HttpUserAgent) {\n    http_request_annotations_.emplace(SpanUserAgent, ValueUtil::stringValue(std::string(value)));\n  } else if (name == HttpStatusCode) {\n    uint64_t status_code;\n    if (!absl::SimpleAtoi(value, &status_code)) {\n      ENVOY_LOG(debug, \"{} must be a number, given: {}\", HttpStatusCode, value);\n      return;\n    }\n    http_response_annotations_.emplace(SpanStatus, ValueUtil::numberValue(status_code));\n  } else if (name == HttpResponseSize) {\n    uint64_t response_size;\n    if (!absl::SimpleAtoi(value, &response_size)) {\n      ENVOY_LOG(debug, \"{} must be a number, given: {}\", HttpResponseSize, value);\n      return;\n    }\n    http_response_annotations_.emplace(SpanContentLength, ValueUtil::numberValue(response_size));\n  } else if (name == PeerAddress) {\n    http_request_annotations_.emplace(SpanClientIp, ValueUtil::stringValue(std::string(value)));\n    // In this case, PeerAddress refers to the client's actual IP address, not\n    // the address specified in the HTTP X-Forwarded-For header.\n    http_request_annotations_.emplace(SpanXForwardedFor, ValueUtil::boolValue(false));\n  } else {\n    custom_annotations_.emplace(name, value);\n  }\n}\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/tracer.h",
    "content": "#pragma once\n\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/common/hex.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/tracers/xray/daemon_broker.h\"\n#include \"extensions/tracers/xray/sampling_strategy.h\"\n#include \"extensions/tracers/xray/xray_configuration.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nconstexpr auto XRayTraceHeader = \"x-amzn-trace-id\";\n\nclass Span : public Tracing::Span, Logger::Loggable<Logger::Id::config> {\npublic:\n  /**\n   * Creates a new Span.\n   *\n   * @param time_source A time source to get the span end time\n   * @param random random generator for generating unique child span ids\n   * @param broker Facilitates communication with the X-Ray daemon.\n   */\n  Span(TimeSource& time_source, Random::RandomGenerator& random, DaemonBroker& broker)\n      : time_source_(time_source), random_(random), broker_(broker),\n        id_(Hex::uint64ToHex(random_.random())), sampled_(true) {}\n\n  /**\n   * Sets the Span's trace ID.\n   */\n  void setTraceId(absl::string_view trace_id) { trace_id_ = std::string(trace_id); };\n\n  /**\n   * Gets the Span's trace ID.\n   */\n  const std::string& traceId() const { return trace_id_; }\n\n  /**\n   * Completes the current span, serialize it and send it to the X-Ray daemon.\n   */\n  void finishSpan() override;\n\n  /**\n   * Sets the current operation name on the Span.\n   * This information will be included in the X-Ray span's metadata.\n   */\n  void setOperation(absl::string_view operation) override {\n    operation_name_ = std::string(operation);\n  }\n\n  /**\n   * Sets the name of the Span.\n   */\n  void setName(absl::string_view name) { name_ = std::string(name); }\n\n  /**\n   * Sets the origin of the Span.\n   */\n  void setOrigin(absl::string_view origin) { origin_ = std::string(origin); }\n\n  /**\n   * Gets the origin of the Span.\n   */\n  const std::string& origin() { return origin_; }\n\n  /**\n   * Adds a key-value pair to either the Span's annotations or metadata.\n   * An allowlist of keys are added to the annotations, everything else is added to the metadata.\n   */\n  void setTag(absl::string_view name, absl::string_view value) override;\n\n  /**\n   * Sets the ID of the parent segment. This is different from the Trace ID.\n   * The parent ID is used if the request originated from an instrumented application.\n   * For more information see:\n   * https://docs.aws.amazon.com/xray/latest/devguide/xray-concepts.html#xray-concepts-tracingheader\n   */\n  void setParentId(absl::string_view parent_segment_id) {\n    parent_segment_id_ = std::string(parent_segment_id);\n  }\n\n  /**\n   * Sets the aws metadata field of the Span.\n   */\n  void setAwsMetadata(const absl::flat_hash_map<std::string, ProtobufWkt::Value>& aws_metadata) {\n    aws_metadata_ = aws_metadata;\n  }\n\n  /**\n   * Gets the AWS metadata\n   * field of the Span.\n   */\n  const absl::flat_hash_map<std::string, ProtobufWkt::Value>& awsMetadata() {\n    return aws_metadata_;\n  }\n\n  /**\n   * Sets the recording start time of the traced operation/request.\n   */\n  void setStartTime(Envoy::SystemTime start_time) { start_time_ = start_time; }\n\n  /**\n   * Marks the span as either \"sampled\" or \"not-sampled\".\n   * By default, Spans are \"sampled\".\n   * This is handy in cases where the sampling decision has already been determined either by Envoy\n   * or by a downstream service.\n   */\n  void setSampled(bool sampled) override { sampled_ = sampled; };\n\n  /**\n   * Adds X-Ray trace header to the set of outgoing headers.\n   */\n  void injectContext(Http::RequestHeaderMap& request_headers) override;\n\n  /**\n   * Gets the start time of this Span.\n   */\n  Envoy::SystemTime startTime() const { return start_time_; }\n\n  /**\n   * Gets this Span's ID.\n   */\n  const std::string& id() const { return id_; }\n\n  const std::string& parentId() const { return parent_segment_id_; }\n\n  /**\n   * Gets this Span's name.\n   */\n  const std::string& name() const { return name_; }\n\n  /**\n   * Determines whether this span is sampled.\n   */\n  bool sampled() const { return sampled_; }\n\n  /**\n   * Not used by X-Ray because the Spans are \"logged\" (serialized) to the X-Ray daemon.\n   */\n  void log(Envoy::SystemTime, const std::string&) override {}\n\n  // X-Ray doesn't support baggage, so noop these OpenTracing functions.\n  void setBaggage(absl::string_view, absl::string_view) override {}\n  std::string getBaggage(absl::string_view) override { return std::string(); }\n\n  /**\n   * Creates a child span.\n   * In X-Ray terms this creates a sub-segment and sets its parent ID to the current span's ID.\n   * @param operation_name The span of the child span.\n   * @param start_time The time at which this child span started.\n   */\n  Tracing::SpanPtr spawnChild(const Tracing::Config&, const std::string& operation_name,\n                              Envoy::SystemTime start_time) override;\n\nprivate:\n  Envoy::TimeSource& time_source_;\n  Random::RandomGenerator& random_;\n  DaemonBroker& broker_;\n  Envoy::SystemTime start_time_;\n  std::string operation_name_;\n  std::string id_;\n  std::string trace_id_;\n  std::string parent_segment_id_;\n  std::string name_;\n  std::string origin_;\n  absl::flat_hash_map<std::string, ProtobufWkt::Value> aws_metadata_;\n  absl::flat_hash_map<std::string, ProtobufWkt::Value> http_request_annotations_;\n  absl::flat_hash_map<std::string, ProtobufWkt::Value> http_response_annotations_;\n  absl::flat_hash_map<std::string, std::string> custom_annotations_;\n  bool sampled_;\n};\n\nusing SpanPtr = std::unique_ptr<Span>;\n\nclass Tracer {\npublic:\n  Tracer(absl::string_view segment_name, absl::string_view origin,\n         const absl::flat_hash_map<std::string, ProtobufWkt::Value>& aws_metadata,\n         DaemonBrokerPtr daemon_broker, TimeSource& time_source, Random::RandomGenerator& random)\n      : segment_name_(segment_name), origin_(origin), aws_metadata_(aws_metadata),\n        daemon_broker_(std::move(daemon_broker)), time_source_(time_source), random_(random) {}\n  /**\n   * Starts a tracing span for X-Ray\n   */\n  Tracing::SpanPtr startSpan(const std::string& operation_name, Envoy::SystemTime start_time,\n                             const absl::optional<XRayHeader>& xray_header);\n  /**\n   * Creates a Span that is marked as not-sampled.\n   * This is useful when the sampling decision is done in Envoy's X-Ray and we want to avoid\n   * overruling that decision in the upstream service in case that service itself uses X-Ray for\n   * tracing.\n   */\n  XRay::SpanPtr createNonSampledSpan() const;\n\nprivate:\n  const std::string segment_name_;\n  const std::string origin_;\n  const absl::flat_hash_map<std::string, ProtobufWkt::Value> aws_metadata_;\n  const DaemonBrokerPtr daemon_broker_;\n  Envoy::TimeSource& time_source_;\n  Random::RandomGenerator& random_;\n};\n\nusing TracerPtr = std::unique_ptr<Tracer>;\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/util.cc",
    "content": "#include \"extensions/tracers/xray/util.h\"\n\n#include \"absl/strings/ascii.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n// AWS X-Ray has two sources of sampling rules (wildcard patterns):\n//\n// 1- The manifest file (static config)\n// 2- The X-Ray Service - periodically polled for new rules. (dynamic config)\n//\n// X-Ray will inspect every single request and try to match it against the set of rules it has to\n// decide whether or not a given request should be sampled. That means this wildcard matching\n// routine is on the hot path. I've spent a great deal of time to make this function optimal and not\n// allocate.\n// Using regex matching here has many downsides and it would require us to:\n// 1- escape/lint the user input pattern to avoid messing up its meaning (think '.' character is a\n// valid regex and URL character) 2- compile the regex and store it with every corresponding part of\n// the rule\n//\n// Those two steps would add significant overhead to the tracer. Meanwhile, the following function\n// has a comprehensive test suite and fuzz tests.\nbool wildcardMatch(absl::string_view pattern, absl::string_view input) {\n  if (pattern.empty()) {\n    return input.empty();\n  }\n\n  // Check the special case of a single * pattern, as it's common.\n  constexpr char glob = '*';\n  if (pattern.size() == 1 && pattern[0] == glob) {\n    return true;\n  }\n\n  size_t i = 0, p = 0, i_star = input.size(), p_star = 0;\n  while (i < input.size()) {\n    if (p < pattern.size() && absl::ascii_tolower(input[i]) == absl::ascii_tolower(pattern[p])) {\n      ++i;\n      ++p;\n    } else if (p < pattern.size() && '?' == pattern[p]) {\n      ++i;\n      ++p;\n    } else if (p < pattern.size() && pattern[p] == glob) {\n      i_star = i;\n      p_star = p++;\n    } else if (i_star != input.size()) {\n      i = ++i_star;\n      p = p_star + 1;\n    } else {\n      return false;\n    }\n  }\n\n  while (p < pattern.size() && pattern[p] == glob) {\n    ++p;\n  }\n\n  return p == pattern.size() && i == input.size();\n}\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/util.h",
    "content": "#pragma once\n#include <string>\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n/**\n * Performs a case-insensitive wild-card match against the input string.\n * This method works with pseudo-regex chars; specifically ? and *\n *\n * An asterisk (*) represents any combination of characters.\n * A question mark (?) represents any single character.\n *\n * @param pattern The regex-like pattern to compare with.\n * @param text The string to compare against the pattern.\n * @return whether the text matches the pattern.\n */\nbool wildcardMatch(absl::string_view pattern, absl::string_view input);\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/xray_configuration.h",
    "content": "#pragma once\n#include <string>\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n/**\n * X-Ray configuration Model.\n */\nstruct XRayConfiguration {\n  const std::string daemon_endpoint_;\n  const std::string segment_name_;\n  const std::string sampling_rules_;\n  const std::string origin_;\n  const absl::flat_hash_map<std::string, ProtobufWkt::Value> aws_metadata_;\n};\n\nenum class SamplingDecision {\n  Sampled,\n  NotSampled,\n  Unknown,\n};\n\n/**\n * X-Ray header Model.\n */\nstruct XRayHeader {\n  std::string trace_id_;\n  std::string parent_id_;\n  SamplingDecision sample_decision_{};\n};\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/xray_tracer_impl.cc",
    "content": "#include \"extensions/tracers/xray/xray_tracer_impl.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n\n#include \"extensions/tracers/xray/localized_sampling.h\"\n#include \"extensions/tracers/xray/tracer.h\"\n#include \"extensions/tracers/xray/xray_configuration.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nnamespace {\nconstexpr auto DefaultDaemonEndpoint = \"127.0.0.1:2000\";\nXRayHeader parseXRayHeader(const Http::LowerCaseString& header) {\n  const auto& lowered_header = header.get();\n  XRayHeader result;\n  for (const auto& token : StringUtil::splitToken(lowered_header, \";\")) {\n    if (absl::StartsWith(token, \"root=\")) {\n      result.trace_id_ = std::string(StringUtil::cropLeft(token, \"=\"));\n    } else if (absl::StartsWith(token, \"parent=\")) {\n      result.parent_id_ = std::string(StringUtil::cropLeft(token, \"=\"));\n    } else if (absl::StartsWith(token, \"sampled=\")) {\n      const auto s = StringUtil::cropLeft(token, \"=\");\n      if (s == \"1\") {\n        result.sample_decision_ = SamplingDecision::Sampled;\n      } else if (s == \"0\") {\n        result.sample_decision_ = SamplingDecision::NotSampled;\n      } else {\n        result.sample_decision_ = SamplingDecision::Unknown;\n      }\n    }\n  }\n  return result;\n}\n} // namespace\n\nDriver::Driver(const XRayConfiguration& config,\n               Server::Configuration::TracerFactoryContext& context)\n    : xray_config_(config),\n      tls_slot_ptr_(context.serverFactoryContext().threadLocal().allocateSlot()) {\n\n  const std::string daemon_endpoint =\n      config.daemon_endpoint_.empty() ? DefaultDaemonEndpoint : config.daemon_endpoint_;\n\n  ENVOY_LOG(debug, \"send X-Ray generated segments to daemon address on {}\", daemon_endpoint);\n  sampling_strategy_ = std::make_unique<XRay::LocalizedSamplingStrategy>(\n      xray_config_.sampling_rules_, context.serverFactoryContext().api().randomGenerator(),\n      context.serverFactoryContext().timeSource());\n\n  tls_slot_ptr_->set([this, daemon_endpoint,\n                      &context](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    DaemonBrokerPtr broker = std::make_unique<DaemonBrokerImpl>(daemon_endpoint);\n    TracerPtr tracer = std::make_unique<Tracer>(\n        xray_config_.segment_name_, xray_config_.origin_, xray_config_.aws_metadata_,\n        std::move(broker), context.serverFactoryContext().timeSource(),\n        context.serverFactoryContext().api().randomGenerator());\n    return std::make_shared<XRay::Driver::TlsTracer>(std::move(tracer), *this);\n  });\n}\n\nTracing::SpanPtr Driver::startSpan(const Tracing::Config& config,\n                                   Http::RequestHeaderMap& request_headers,\n                                   const std::string& operation_name, Envoy::SystemTime start_time,\n                                   const Tracing::Decision tracing_decision) {\n  // First thing is to determine whether this request will be sampled or not.\n  // if there's a X-Ray header and it has a sampling decision already determined (i.e. Sample=1)\n  // then we can move on; otherwise, we ask the sampling strategy whether this request should be\n  // sampled or not.\n  //\n  // The second step is create a Span.\n  // If we have a XRay TraceID in the headers, then we create a SpanContext to pass that trace-id\n  // around if no TraceID (which means no x-ray header) then this is a brand new span.\n\n  UNREFERENCED_PARAMETER(config);\n  // TODO(marcomagdy) - how do we factor this into the logic above\n  UNREFERENCED_PARAMETER(tracing_decision);\n  const auto* header = request_headers.get(Http::LowerCaseString(XRayTraceHeader));\n  absl::optional<bool> should_trace;\n  XRayHeader xray_header;\n  if (header) {\n    Http::LowerCaseString lowered_header_value{std::string(header->value().getStringView())};\n    xray_header = parseXRayHeader(lowered_header_value);\n    // if the sample_decision in the x-ray header is unknown then we try to make a decision based\n    // on the sampling strategy\n    if (xray_header.sample_decision_ == SamplingDecision::Sampled) {\n      should_trace = true;\n    } else if (xray_header.sample_decision_ == SamplingDecision::NotSampled) {\n      should_trace = false;\n    } else {\n      ENVOY_LOG(\n          trace,\n          \"Unable to determine from the X-Ray trace header whether request is sampled or not\");\n    }\n  }\n\n  if (!should_trace.has_value()) {\n    const SamplingRequest request{std::string{request_headers.getHostValue()},\n                                  std::string{request_headers.getMethodValue()},\n                                  std::string{request_headers.getPathValue()}};\n\n    should_trace = sampling_strategy_->shouldTrace(request);\n  }\n\n  auto* tracer = tls_slot_ptr_->getTyped<Driver::TlsTracer>().tracer_.get();\n  if (should_trace.value()) {\n    return tracer->startSpan(operation_name, start_time,\n                             header ? absl::optional<XRayHeader>(xray_header) : absl::nullopt);\n  }\n\n  // instead of returning nullptr, we return a Span that is marked as not-sampled.\n  // This is important to communicate that information to upstream services (see injectContext()).\n  // Otherwise, the upstream service can decide to sample the request regardless and we end up with\n  // more samples than we asked for.\n  return tracer->createNonSampledSpan();\n}\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/xray/xray_tracer_impl.h",
    "content": "#pragma once\n\n#include \"envoy/server/tracer_config.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/xray/tracer.h\"\n#include \"extensions/tracers/xray/xray_configuration.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nclass Driver : public Tracing::Driver, public Logger::Loggable<Logger::Id::tracing> {\npublic:\n  Driver(const XRay::XRayConfiguration& config,\n         Server::Configuration::TracerFactoryContext& context);\n\n  Tracing::SpanPtr startSpan(const Tracing::Config& config, Http::RequestHeaderMap& request_headers,\n                             const std::string& operation_name, Envoy::SystemTime start_time,\n                             const Tracing::Decision tracing_decision) override;\n\nprivate:\n  struct TlsTracer : ThreadLocal::ThreadLocalObject {\n    TlsTracer(TracerPtr tracer, Driver& driver) : tracer_(std::move(tracer)), driver_(driver) {}\n    TracerPtr tracer_;\n    Driver& driver_;\n  };\n\n  XRayConfiguration xray_config_;\n  SamplingStrategyPtr sampling_strategy_;\n  ThreadLocal::SlotPtr tls_slot_ptr_;\n};\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Trace driver for Zipkin (https://zipkin.io/).\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"zipkin_lib\",\n    srcs = [\n        \"span_buffer.cc\",\n        \"span_context.cc\",\n        \"span_context_extractor.cc\",\n        \"tracer.cc\",\n        \"util.cc\",\n        \"zipkin_core_types.cc\",\n        \"zipkin_tracer_impl.cc\",\n    ],\n    hdrs = [\n        \"span_buffer.h\",\n        \"span_context.h\",\n        \"span_context_extractor.h\",\n        \"tracer.h\",\n        \"tracer_interface.h\",\n        \"util.h\",\n        \"zipkin_core_constants.h\",\n        \"zipkin_core_types.h\",\n        \"zipkin_json_field_names.h\",\n        \"zipkin_tracer_impl.h\",\n    ],\n    external_deps = [\n        \"abseil_optional\",\n    ],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http:async_client_utility_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/singleton:const_singleton\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/common/upstream:cluster_update_tracker_lib\",\n        \"@com_github_openzipkin_zipkinapi//:zipkin_cc_proto\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # Legacy test use. TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/server:__subpackages__\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    deps = [\n        \":zipkin_lib\",\n        \"//source/extensions/tracers/common:factory_base_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/config.cc",
    "content": "#include \"extensions/tracers/zipkin/config.h\"\n\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n#include \"envoy/config/trace/v3/zipkin.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/zipkin/zipkin_tracer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nZipkinTracerFactory::ZipkinTracerFactory() : FactoryBase(\"envoy.tracers.zipkin\") {}\n\nTracing::HttpTracerSharedPtr ZipkinTracerFactory::createHttpTracerTyped(\n    const envoy::config::trace::v3::ZipkinConfig& proto_config,\n    Server::Configuration::TracerFactoryContext& context) {\n  Tracing::DriverPtr zipkin_driver = std::make_unique<Zipkin::Driver>(\n      proto_config, context.serverFactoryContext().clusterManager(),\n      context.serverFactoryContext().scope(), context.serverFactoryContext().threadLocal(),\n      context.serverFactoryContext().runtime(), context.serverFactoryContext().localInfo(),\n      context.serverFactoryContext().api().randomGenerator(),\n      context.serverFactoryContext().timeSource());\n\n  return std::make_shared<Tracing::HttpTracerImpl>(std::move(zipkin_driver),\n                                                   context.serverFactoryContext().localInfo());\n}\n\n/**\n * Static registration for the Zipkin tracer. @see RegisterFactory.\n */\nREGISTER_FACTORY(ZipkinTracerFactory, Server::Configuration::TracerFactory){\"envoy.zipkin\"};\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/config.h",
    "content": "#pragma once\n\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n#include \"envoy/config/trace/v3/zipkin.pb.validate.h\"\n\n#include \"extensions/tracers/common/factory_base.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\n/**\n * Config registration for the zipkin tracer. @see TracerFactory.\n */\nclass ZipkinTracerFactory : public Common::FactoryBase<envoy::config::trace::v3::ZipkinConfig> {\npublic:\n  ZipkinTracerFactory();\n\nprivate:\n  // FactoryBase\n  Tracing::HttpTracerSharedPtr\n  createHttpTracerTyped(const envoy::config::trace::v3::ZipkinConfig& proto_config,\n                        Server::Configuration::TracerFactoryContext& context) override;\n};\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/span_buffer.cc",
    "content": "#include \"extensions/tracers/zipkin/span_buffer.h\"\n\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/tracers/zipkin/util.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n#include \"extensions/tracers/zipkin/zipkin_json_field_names.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_replace.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nSpanBuffer::SpanBuffer(\n    const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version,\n    const bool shared_span_context)\n    : serializer_{makeSerializer(version, shared_span_context)} {}\n\nSpanBuffer::SpanBuffer(\n    const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version,\n    const bool shared_span_context, uint64_t size)\n    : serializer_{makeSerializer(version, shared_span_context)} {\n  allocateBuffer(size);\n}\n\nbool SpanBuffer::addSpan(Span&& span) {\n  const auto& annotations = span.annotations();\n  if (span_buffer_.size() == span_buffer_.capacity() || annotations.empty() ||\n      annotations.end() ==\n          std::find_if(annotations.begin(), annotations.end(), [](const auto& annotation) {\n            return annotation.value() == CLIENT_SEND || annotation.value() == SERVER_RECV;\n          })) {\n\n    // Buffer full or invalid span.\n    return false;\n  }\n\n  span_buffer_.push_back(std::move(span));\n\n  return true;\n}\n\nSerializerPtr SpanBuffer::makeSerializer(\n    const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version,\n    const bool shared_span_context) {\n  switch (version) {\n  case envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1:\n    return std::make_unique<JsonV1Serializer>();\n  case envoy::config::trace::v3::ZipkinConfig::HTTP_JSON:\n    return std::make_unique<JsonV2Serializer>(shared_span_context);\n  case envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO:\n    return std::make_unique<ProtobufSerializer>(shared_span_context);\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nstd::string JsonV1Serializer::serialize(const std::vector<Span>& zipkin_spans) {\n  const std::string serialized_elements =\n      absl::StrJoin(zipkin_spans, \",\", [](std::string* element, const Span& zipkin_span) {\n        absl::StrAppend(element, zipkin_span.toJson());\n      });\n  return absl::StrCat(\"[\", serialized_elements, \"]\");\n}\n\nJsonV2Serializer::JsonV2Serializer(const bool shared_span_context)\n    : shared_span_context_{shared_span_context} {}\n\nstd::string JsonV2Serializer::serialize(const std::vector<Span>& zipkin_spans) {\n  Util::Replacements replacements;\n  const std::string serialized_elements = absl::StrJoin(\n      zipkin_spans, \",\", [this, &replacements](std::string* out, const Span& zipkin_span) {\n        const auto& replacement_values = replacements;\n        absl::StrAppend(\n            out, absl::StrJoin(\n                     toListOfSpans(zipkin_span, replacements), \",\",\n                     [&replacement_values](std::string* element, const ProtobufWkt::Struct& span) {\n                       const std::string json = MessageUtil::getJsonStringFromMessage(\n                           span, /* pretty_print */ false,\n                           /* always_print_primitive_fields */ true);\n\n                       // The Zipkin API V2 specification mandates to store timestamp value as int64\n                       // https://github.com/openzipkin/zipkin-api/blob/228fabe660f1b5d1e28eac9df41f7d1deed4a1c2/zipkin2-api.yaml#L447-L463\n                       // (often translated as uint64 in some of the official implementations:\n                       // https://github.com/openzipkin/zipkin-go/blob/62dc8b26c05e0e8b88eb7536eff92498e65bbfc3/model/span.go#L114,\n                       // and see the discussion here:\n                       // https://github.com/openzipkin/zipkin-go/pull/161#issuecomment-598558072).\n                       // However, when the timestamp is stored as number value in a protobuf\n                       // struct, it is stored as a double. Because of how protobuf serializes\n                       // doubles, there is a possibility that the value will be rendered as a\n                       // number with scientific notation as reported in:\n                       // https://github.com/envoyproxy/envoy/issues/9341#issuecomment-566912973. To\n                       // deal with that issue, here we do a workaround by storing the timestamp as\n                       // string and keeping track of that with the corresponding integer\n                       // replacements, and do the replacement here so we can meet the Zipkin API V2\n                       // requirements.\n                       //\n                       // TODO(dio): The right fix for this is to introduce additional knob when\n                       // serializing double in protobuf DoubleToBuffer function, and make it\n                       // available to be controlled at caller site.\n                       // https://github.com/envoyproxy/envoy/issues/10411).\n                       absl::StrAppend(element, absl::StrReplaceAll(json, replacement_values));\n                     }));\n      });\n  return absl::StrCat(\"[\", serialized_elements, \"]\");\n}\n\nconst std::vector<ProtobufWkt::Struct>\nJsonV2Serializer::toListOfSpans(const Span& zipkin_span, Util::Replacements& replacements) const {\n  std::vector<ProtobufWkt::Struct> spans;\n  spans.reserve(zipkin_span.annotations().size());\n  for (const auto& annotation : zipkin_span.annotations()) {\n    ProtobufWkt::Struct span;\n    auto* fields = span.mutable_fields();\n\n    if (annotation.value() == CLIENT_SEND) {\n      (*fields)[SPAN_KIND] = ValueUtil::stringValue(KIND_CLIENT);\n    } else if (annotation.value() == SERVER_RECV) {\n      if (shared_span_context_ && zipkin_span.annotations().size() > 1) {\n        (*fields)[SPAN_SHARED] = ValueUtil::boolValue(true);\n      }\n      (*fields)[SPAN_KIND] = ValueUtil::stringValue(KIND_SERVER);\n    } else {\n      continue;\n    }\n\n    if (annotation.isSetEndpoint()) {\n      // Usually we store number to a ProtobufWkt::Struct object via ValueUtil::numberValue.\n      // However, due to the possibility of rendering that to a number with scientific notation, we\n      // chose to store it as a string and keeping track the corresponding replacement. For example,\n      // we have 1584324295476870 if we stored it as a double value, MessageToJsonString gives\n      // us 1.58432429547687e+15. Instead we store it as the string of 1584324295476870 (when it is\n      // serialized: \"1584324295476870\"), and replace it post MessageToJsonString serialization with\n      // integer (1584324295476870 without `\"`), see: JsonV2Serializer::serialize.\n      (*fields)[SPAN_TIMESTAMP] =\n          Util::uint64Value(annotation.timestamp(), SPAN_TIMESTAMP, replacements);\n      (*fields)[SPAN_LOCAL_ENDPOINT] =\n          ValueUtil::structValue(toProtoEndpoint(annotation.endpoint()));\n    }\n\n    (*fields)[SPAN_TRACE_ID] = ValueUtil::stringValue(zipkin_span.traceIdAsHexString());\n    if (zipkin_span.isSetParentId()) {\n      (*fields)[SPAN_PARENT_ID] = ValueUtil::stringValue(zipkin_span.parentIdAsHexString());\n    }\n\n    (*fields)[SPAN_ID] = ValueUtil::stringValue(zipkin_span.idAsHexString());\n\n    const auto& span_name = zipkin_span.name();\n    if (!span_name.empty()) {\n      (*fields)[SPAN_NAME] = ValueUtil::stringValue(span_name);\n    }\n\n    if (zipkin_span.isSetDuration()) {\n      // Since SPAN_DURATION has the same data type with SPAN_TIMESTAMP, we use Util::uint64Value to\n      // store it.\n      (*fields)[SPAN_DURATION] =\n          Util::uint64Value(zipkin_span.duration(), SPAN_DURATION, replacements);\n    }\n\n    const auto& binary_annotations = zipkin_span.binaryAnnotations();\n    if (!binary_annotations.empty()) {\n      ProtobufWkt::Struct tags;\n      auto* tag_fields = tags.mutable_fields();\n      for (const auto& binary_annotation : binary_annotations) {\n        (*tag_fields)[binary_annotation.key()] = ValueUtil::stringValue(binary_annotation.value());\n      }\n      (*fields)[SPAN_TAGS] = ValueUtil::structValue(tags);\n    }\n\n    spans.push_back(std::move(span));\n  }\n  return spans;\n}\n\nconst ProtobufWkt::Struct JsonV2Serializer::toProtoEndpoint(const Endpoint& zipkin_endpoint) const {\n  ProtobufWkt::Struct endpoint;\n  auto* fields = endpoint.mutable_fields();\n\n  Network::Address::InstanceConstSharedPtr address = zipkin_endpoint.address();\n  if (address) {\n    if (address->ip()->version() == Network::Address::IpVersion::v4) {\n      (*fields)[ENDPOINT_IPV4] = ValueUtil::stringValue(address->ip()->addressAsString());\n    } else {\n      (*fields)[ENDPOINT_IPV6] = ValueUtil::stringValue(address->ip()->addressAsString());\n    }\n    (*fields)[ENDPOINT_PORT] = ValueUtil::numberValue(address->ip()->port());\n  }\n\n  const std::string& service_name = zipkin_endpoint.serviceName();\n  if (!service_name.empty()) {\n    (*fields)[ENDPOINT_SERVICE_NAME] = ValueUtil::stringValue(service_name);\n  }\n\n  return endpoint;\n}\n\nProtobufSerializer::ProtobufSerializer(const bool shared_span_context)\n    : shared_span_context_{shared_span_context} {}\n\nstd::string ProtobufSerializer::serialize(const std::vector<Span>& zipkin_spans) {\n  zipkin::proto3::ListOfSpans spans;\n  for (const Span& zipkin_span : zipkin_spans) {\n    spans.MergeFrom(toListOfSpans(zipkin_span));\n  }\n  std::string serialized;\n  spans.SerializeToString(&serialized);\n  return serialized;\n}\n\nconst zipkin::proto3::ListOfSpans ProtobufSerializer::toListOfSpans(const Span& zipkin_span) const {\n  zipkin::proto3::ListOfSpans spans;\n  for (const auto& annotation : zipkin_span.annotations()) {\n    zipkin::proto3::Span span;\n    if (annotation.value() == CLIENT_SEND) {\n      span.set_kind(zipkin::proto3::Span::CLIENT);\n    } else if (annotation.value() == SERVER_RECV) {\n      span.set_shared(shared_span_context_ && zipkin_span.annotations().size() > 1);\n      span.set_kind(zipkin::proto3::Span::SERVER);\n    } else {\n      continue;\n    }\n\n    if (annotation.isSetEndpoint()) {\n      span.set_timestamp(annotation.timestamp());\n      span.mutable_local_endpoint()->MergeFrom(toProtoEndpoint(annotation.endpoint()));\n    }\n\n    span.set_trace_id(zipkin_span.traceIdAsByteString());\n    if (zipkin_span.isSetParentId()) {\n      span.set_parent_id(zipkin_span.parentIdAsByteString());\n    }\n\n    span.set_id(zipkin_span.idAsByteString());\n    span.set_name(zipkin_span.name());\n\n    if (zipkin_span.isSetDuration()) {\n      span.set_duration(zipkin_span.duration());\n    }\n\n    auto& tags = *span.mutable_tags();\n    for (const auto& binary_annotation : zipkin_span.binaryAnnotations()) {\n      tags[binary_annotation.key()] = binary_annotation.value();\n    }\n\n    auto* mutable_span = spans.add_spans();\n    mutable_span->MergeFrom(span);\n  }\n  return spans;\n}\n\nconst zipkin::proto3::Endpoint\nProtobufSerializer::toProtoEndpoint(const Endpoint& zipkin_endpoint) const {\n  zipkin::proto3::Endpoint endpoint;\n  Network::Address::InstanceConstSharedPtr address = zipkin_endpoint.address();\n  if (address) {\n    if (address->ip()->version() == Network::Address::IpVersion::v4) {\n      endpoint.set_ipv4(Util::toByteString(address->ip()->ipv4()->address()));\n    } else {\n      endpoint.set_ipv6(Util::toByteString(address->ip()->ipv6()->address()));\n    }\n    endpoint.set_port(address->ip()->port());\n  }\n\n  const std::string& service_name = zipkin_endpoint.serviceName();\n  if (!service_name.empty()) {\n    endpoint.set_service_name(service_name);\n  }\n\n  return endpoint;\n}\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/span_buffer.h",
    "content": "#pragma once\n\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/tracers/zipkin/tracer_interface.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_types.h\"\n\n#include \"zipkin.pb.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\n/**\n * This class implements a simple buffer to store Zipkin tracing spans\n * prior to flushing them.\n */\nclass SpanBuffer {\npublic:\n  /**\n   * Constructor that creates an empty buffer. Space needs to be allocated by invoking\n   * the method allocateBuffer(size).\n   *\n   * @param version The selected Zipkin collector version. @see\n   * api/envoy/config/trace/v2/trace.proto.\n   * @param shared_span_context To determine whether client and server spans will share the same\n   * span context.\n   */\n  SpanBuffer(const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version,\n             bool shared_span_context);\n\n  /**\n   * Constructor that initializes a buffer with the given size.\n   *\n   * @param version The selected Zipkin collector version. @see\n   * api/envoy/config/trace/v2/trace.proto.\n   * @param shared_span_context To determine whether client and server spans will share the same\n   * span context.\n   * @param size The desired buffer size.\n   */\n  SpanBuffer(const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version,\n             bool shared_span_context, uint64_t size);\n\n  /**\n   * Allocates space for an empty buffer or resizes a previously-allocated one.\n   *\n   * @param size The desired buffer size.\n   */\n  void allocateBuffer(uint64_t size) { span_buffer_.reserve(size); }\n\n  /**\n   * Adds the given Zipkin span to the buffer.\n   *\n   * @param span The span to be added to the buffer.\n   *\n   * @return true if the span was successfully added, or false if the buffer was full.\n   */\n  bool addSpan(Span&& span);\n\n  /**\n   * Empties the buffer. This method is supposed to be called when all buffered spans\n   * have been sent to the Zipkin service.\n   */\n  void clear() { span_buffer_.clear(); }\n\n  /**\n   * @return the number of spans currently buffered.\n   */\n  uint64_t pendingSpans() { return span_buffer_.size(); }\n\n  /**\n   * Serializes std::vector<Span> span_buffer_ to std::string as payload for the reporter when the\n   * reporter does spans flushing. This function does only serialization and does not clear\n   * span_buffer_.\n   *\n   * @return std::string the contents of the buffer, a collection of serialized pending Zipkin\n   * spans.\n   */\n  std::string serialize() const { return serializer_->serialize(span_buffer_); }\n\nprivate:\n  SerializerPtr\n  makeSerializer(const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version,\n                 bool shared_span_context);\n\n  // We use a pre-allocated vector to improve performance\n  std::vector<Span> span_buffer_;\n  SerializerPtr serializer_;\n};\n\nusing SpanBufferPtr = std::unique_ptr<SpanBuffer>;\n\n/**\n * JsonV1Serializer implements Zipkin::Serializer that serializes list of Zipkin spans into JSON\n * Zipkin v1 array.\n */\nclass JsonV1Serializer : public Serializer {\npublic:\n  JsonV1Serializer() = default;\n\n  /**\n   * Serialize list of Zipkin spans into Zipkin v1 JSON array.\n   * @return std::string serialized pending spans as Zipkin v1 JSON array.\n   */\n  std::string serialize(const std::vector<Span>& pending_spans) override;\n};\n\n/**\n * JsonV2Serializer implements Zipkin::Serializer that serializes list of Zipkin spans into JSON\n * Zipkin v2 array.\n */\nclass JsonV2Serializer : public Serializer {\npublic:\n  JsonV2Serializer(bool shared_span_context);\n\n  /**\n   * Serialize list of Zipkin spans into Zipkin v2 JSON array.\n   * @return std::string serialized pending spans as Zipkin v2 JSON array.\n   */\n  std::string serialize(const std::vector<Span>& pending_spans) override;\n\nprivate:\n  const std::vector<ProtobufWkt::Struct> toListOfSpans(const Span& zipkin_span,\n                                                       Util::Replacements& replacements) const;\n  const ProtobufWkt::Struct toProtoEndpoint(const Endpoint& zipkin_endpoint) const;\n\n  const bool shared_span_context_;\n};\n\n/**\n * ProtobufSerializer implements Zipkin::Serializer that serializes list of Zipkin spans into\n * stringified (SerializeToString) protobuf message.\n */\nclass ProtobufSerializer : public Serializer {\npublic:\n  ProtobufSerializer(bool shared_span_context);\n\n  /**\n   * Serialize list of Zipkin spans into Zipkin v2 zipkin::proto3::ListOfSpans.\n   * @return std::string serialized pending spans as Zipkin zipkin::proto3::ListOfSpans.\n   */\n  std::string serialize(const std::vector<Span>& pending_spans) override;\n\nprivate:\n  const zipkin::proto3::ListOfSpans toListOfSpans(const Span& zipkin_span) const;\n  const zipkin::proto3::Endpoint toProtoEndpoint(const Endpoint& zipkin_endpoint) const;\n\n  const bool shared_span_context_;\n};\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/span_context.cc",
    "content": "#include \"extensions/tracers/zipkin/span_context.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nSpanContext::SpanContext(const Span& span)\n    : trace_id_high_(span.isSetTraceIdHigh() ? span.traceIdHigh() : 0), trace_id_(span.traceId()),\n      id_(span.id()), parent_id_(span.isSetParentId() ? span.parentId() : 0),\n      sampled_(span.sampled()) {}\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/span_context.h",
    "content": "#pragma once\n\n#include <regex>\n\n#include \"extensions/tracers/zipkin/util.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_types.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\n/**\n * This class represents the context of a Zipkin span. It embodies the following\n * span characteristics: trace id, span id, parent id, and basic annotations.\n */\nclass SpanContext {\npublic:\n  /**\n   * Default constructor. Creates an empty context.\n   */\n  SpanContext() = default;\n\n  /**\n   * Constructor that creates a context object from the supplied trace, span and\n   * parent ids, and sampled flag.\n   *\n   * @param trace_id_high The high 64 bits of the trace id.\n   * @param trace_id The low 64 bits of the trace id.\n   * @param id The span id.\n   * @param parent_id The parent id.\n   * @param sampled The sampled flag.\n   */\n  SpanContext(const uint64_t trace_id_high, const uint64_t trace_id, const uint64_t id,\n              const uint64_t parent_id, bool sampled)\n      : trace_id_high_(trace_id_high), trace_id_(trace_id), id_(id), parent_id_(parent_id),\n        sampled_(sampled) {}\n\n  /**\n   * Constructor that creates a context object from the given Zipkin span object.\n   *\n   * @param span The Zipkin span used to initialize a SpanContext object.\n   */\n  SpanContext(const Span& span);\n\n  /**\n   * @return the span id as an integer\n   */\n  uint64_t id() const { return id_; }\n\n  /**\n   * @return the span's parent id as an integer.\n   */\n  uint64_t parentId() const { return parent_id_; }\n\n  /**\n   * @return the high 64 bits of the trace id as an integer.\n   */\n  uint64_t traceIdHigh() const { return trace_id_high_; }\n\n  /**\n   * @return the low 64 bits of the trace id as an integer.\n   */\n  uint64_t traceId() const { return trace_id_; }\n\n  /**\n   * @return whether using 128 bit trace id.\n   */\n  bool is128BitTraceId() const { return trace_id_high_ != 0; }\n\n  /**\n   * @return the sampled flag.\n   */\n  bool sampled() const { return sampled_; }\n\nprivate:\n  const uint64_t trace_id_high_{0};\n  const uint64_t trace_id_{0};\n  const uint64_t id_{0};\n  const uint64_t parent_id_{0};\n  const bool sampled_{false};\n};\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/span_context_extractor.cc",
    "content": "#include \"extensions/tracers/zipkin/span_context_extractor.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n\n#include \"extensions/tracers/zipkin/span_context.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\nnamespace {\nconstexpr int FormatMaxLength = 32 + 1 + 16 + 3 + 16; // traceid128-spanid-1-parentid\nbool validSamplingFlags(char c) {\n  if (c == '1' || c == '0' || c == 'd') {\n    return true;\n  }\n  return false;\n}\n\nbool getSamplingFlags(char c, const Tracing::Decision tracing_decision) {\n  if (validSamplingFlags(c)) {\n    return c == '0' ? false : true;\n  } else {\n    return tracing_decision.traced;\n  }\n}\n\n} // namespace\n\nSpanContextExtractor::SpanContextExtractor(Http::RequestHeaderMap& request_headers)\n    : request_headers_(request_headers) {}\n\nSpanContextExtractor::~SpanContextExtractor() = default;\n\nbool SpanContextExtractor::extractSampled(const Tracing::Decision tracing_decision) {\n  bool sampled(false);\n  auto b3_header_entry = request_headers_.get(ZipkinCoreConstants::get().B3);\n  if (b3_header_entry) {\n    absl::string_view b3 = b3_header_entry->value().getStringView();\n    int sampled_pos = 0;\n    switch (b3.length()) {\n    case 1:\n      break;\n    case 35: // 16 + 1 + 16 + 2\n      sampled_pos = 34;\n      break;\n    case 51: // 32 + 1 + 16 + 2\n      sampled_pos = 50;\n      break;\n    case 52: // 16 + 1 + 16 + 2 + 1 + 16\n      sampled_pos = 34;\n      break;\n    case 68: // 32 + 1 + 16 + 2 + 1 + 16\n      sampled_pos = 50;\n      break;\n    default:\n      return tracing_decision.traced;\n    }\n    return getSamplingFlags(b3[sampled_pos], tracing_decision);\n  }\n\n  auto x_b3_sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED);\n  if (!x_b3_sampled_entry) {\n    return tracing_decision.traced;\n  }\n  // Checking if sampled flag has been specified. Also checking for 'true' value, as some old\n  // zipkin tracers may still use that value, although should be 0 or 1.\n  absl::string_view xb3_sampled = x_b3_sampled_entry->value().getStringView();\n  sampled = xb3_sampled == SAMPLED || xb3_sampled == \"true\";\n  return sampled;\n}\n\nstd::pair<SpanContext, bool> SpanContextExtractor::extractSpanContext(bool is_sampled) {\n  if (request_headers_.get(ZipkinCoreConstants::get().B3)) {\n    return extractSpanContextFromB3SingleFormat(is_sampled);\n  }\n  uint64_t trace_id(0);\n  uint64_t trace_id_high(0);\n  uint64_t span_id(0);\n  uint64_t parent_id(0);\n\n  auto b3_trace_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID);\n  auto b3_span_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID);\n  if (b3_span_id_entry && b3_trace_id_entry) {\n    // Extract trace id - which can either be 128 or 64 bit. For 128 bit,\n    // it needs to be divided into two 64 bit numbers (high and low).\n    const std::string tid(b3_trace_id_entry->value().getStringView());\n    if (b3_trace_id_entry->value().size() == 32) {\n      const std::string high_tid = tid.substr(0, 16);\n      const std::string low_tid = tid.substr(16, 16);\n      if (!StringUtil::atoull(high_tid.c_str(), trace_id_high, 16) ||\n          !StringUtil::atoull(low_tid.c_str(), trace_id, 16)) {\n        throw ExtractorException(\n            fmt::format(\"Invalid traceid_high {} or tracid {}\", high_tid.c_str(), low_tid.c_str()));\n      }\n    } else if (!StringUtil::atoull(tid.c_str(), trace_id, 16)) {\n      throw ExtractorException(absl::StrCat(\"Invalid trace_id \", tid.c_str()));\n    }\n\n    const std::string spid(b3_span_id_entry->value().getStringView());\n    if (!StringUtil::atoull(spid.c_str(), span_id, 16)) {\n      throw ExtractorException(absl::StrCat(\"Invalid span id \", spid.c_str()));\n    }\n\n    auto b3_parent_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID);\n    if (b3_parent_id_entry && !b3_parent_id_entry->value().empty()) {\n      const std::string pspid(b3_parent_id_entry->value().getStringView());\n      if (!StringUtil::atoull(pspid.c_str(), parent_id, 16)) {\n        throw ExtractorException(absl::StrCat(\"Invalid parent span id \", pspid.c_str()));\n      }\n    }\n  } else {\n    return {SpanContext(), false};\n  }\n\n  return {SpanContext(trace_id_high, trace_id, span_id, parent_id, is_sampled), true};\n}\n\nstd::pair<SpanContext, bool>\nSpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) {\n  auto b3_head_entry = request_headers_.get(ZipkinCoreConstants::get().B3);\n  ASSERT(b3_head_entry);\n  const std::string b3(b3_head_entry->value().getStringView());\n  if (!b3.length()) {\n    throw ExtractorException(\"Invalid input: empty\");\n  }\n\n  if (b3.length() == 1) { // possibly sampling flags\n    if (validSamplingFlags(b3[0])) {\n      return std::pair<SpanContext, bool>(SpanContext(), false);\n    }\n    throw ExtractorException(fmt::format(\"Invalid input: invalid sampling flag {}\", b3[0]));\n  }\n\n  if (b3.length() < 16 + 1 + 16 /* traceid64-spanid */) {\n    throw ExtractorException(\"Invalid input: truncated\");\n  } else if (b3.length() > FormatMaxLength) {\n    throw ExtractorException(\"Invalid input: too long\");\n  }\n\n  uint64_t trace_id(0);\n  uint64_t trace_id_high(0);\n  uint64_t span_id(0);\n  uint64_t parent_id(0);\n\n  uint64_t pos = 0;\n\n  const std::string trace_id_str = b3.substr(pos, 16);\n  if (b3[pos + 32] == '-') {\n    if (!StringUtil::atoull(trace_id_str.c_str(), trace_id_high, 16)) {\n      throw ExtractorException(\n          fmt::format(\"Invalid input: invalid trace id high {}\", trace_id_str.c_str()));\n    }\n    pos += 16;\n    const std::string trace_id_low_str = b3.substr(pos, 16);\n    if (!StringUtil::atoull(trace_id_low_str.c_str(), trace_id, 16)) {\n      throw ExtractorException(\n          fmt::format(\"Invalid input: invalid trace id {}\", trace_id_low_str.c_str()));\n    }\n  } else {\n    if (!StringUtil::atoull(trace_id_str.c_str(), trace_id, 16)) {\n      throw ExtractorException(\n          fmt::format(\"Invalid input: invalid trace id {}\", trace_id_str.c_str()));\n    }\n  }\n\n  pos += 16; // traceId ended\n  if (!(b3[pos++] == '-')) {\n    throw ExtractorException(\"Invalid input: not exists span id\");\n  }\n\n  const std::string span_id_str = b3.substr(pos, 16);\n  if (!StringUtil::atoull(span_id_str.c_str(), span_id, 16)) {\n    throw ExtractorException(fmt::format(\"Invalid input: invalid span id {}\", span_id_str.c_str()));\n  }\n  pos += 16; // spanId ended\n\n  if (b3.length() > pos) {\n    // If we are at this point, we have more than just traceId-spanId.\n    // If the sampling field is present, we'll have a delimiter 2 characters from now. Ex \"-1\"\n    // If it is absent, but a parent ID is (which is strange), we'll have at least 17 characters.\n    // Therefore, if we have less than two characters, the input is truncated.\n    if (b3.length() == (pos + 1)) {\n      throw ExtractorException(\"Invalid input: truncated\");\n    }\n\n    if (!(b3[pos++] == '-')) {\n      throw ExtractorException(\"Invalid input: not exists sampling field\");\n    }\n\n    // If our position is at the end of the string, or another delimiter is one character past our\n    // position, try to read sampled status.\n    if (b3.length() == pos + 1 || ((b3.length() >= pos + 2) && (b3[pos + 1] == '-'))) {\n      if (!validSamplingFlags(b3[pos])) {\n        throw ExtractorException(fmt::format(\"Invalid input: invalid sampling flag {}\", b3[pos]));\n      }\n      pos++; // consume the sampled status\n    } else {\n      throw ExtractorException(\"Invalid input: truncated\");\n    }\n\n    if (b3.length() > pos) {\n      // If we are at this point, we should have a parent ID, encoded as \"-[0-9a-f]{16}\"\n      if (b3.length() != pos + 17) {\n        throw ExtractorException(\"Invalid input: truncated\");\n      }\n\n      ASSERT(b3[pos] == '-');\n      pos++;\n\n      const std::string parent_id_str = b3.substr(pos, b3.length() - pos);\n      if (!StringUtil::atoull(parent_id_str.c_str(), parent_id, 16)) {\n        throw ExtractorException(\n            fmt::format(\"Invalid input: invalid parent id {}\", parent_id_str.c_str()));\n      }\n    }\n  }\n\n  return {SpanContext(trace_id_high, trace_id, span_id, parent_id, is_sampled), true};\n}\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/span_context_extractor.h",
    "content": "#pragma once\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/http/header_map_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nclass SpanContext;\n\nstruct ExtractorException : public EnvoyException {\n  ExtractorException(const std::string& what) : EnvoyException(what) {}\n};\n\n/**\n * This class is used to SpanContext extracted from the Http header\n */\nclass SpanContextExtractor {\npublic:\n  SpanContextExtractor(Http::RequestHeaderMap& request_headers);\n  ~SpanContextExtractor();\n  bool extractSampled(const Tracing::Decision tracing_decision);\n  std::pair<SpanContext, bool> extractSpanContext(bool is_sampled);\n\nprivate:\n  /*\n   * Use to SpanContext extracted from B3 single format Http header\n   * b3: {x-b3-traceid}-{x-b3-spanid}-{if x-b3-flags 'd' else x-b3-sampled}-{x-b3-parentspanid}\n   * See: \"https://github.com/openzipkin/b3-propagation\n   */\n  std::pair<SpanContext, bool> extractSpanContextFromB3SingleFormat(bool is_sampled);\n  bool tryExtractSampledFromB3SingleFormat();\n  const Http::RequestHeaderMap& request_headers_;\n};\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/tracer.cc",
    "content": "#include \"extensions/tracers/zipkin/tracer.h\"\n\n#include <chrono>\n\n#include \"common/common/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/zipkin/util.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nSpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span_name,\n                          SystemTime timestamp) {\n  // Build the endpoint\n  Endpoint ep(service_name_, address_);\n\n  // Build the CS annotation\n  Annotation cs;\n  cs.setEndpoint(std::move(ep));\n  if (config.operationName() == Tracing::OperationName::Egress) {\n    cs.setValue(CLIENT_SEND);\n  } else {\n    cs.setValue(SERVER_RECV);\n  }\n\n  // Create an all-new span, with no parent id\n  SpanPtr span_ptr = std::make_unique<Span>(time_source_);\n  span_ptr->setName(span_name);\n  uint64_t random_number = random_generator_.random();\n  span_ptr->setId(random_number);\n  span_ptr->setTraceId(random_number);\n  if (trace_id_128bit_) {\n    span_ptr->setTraceIdHigh(random_generator_.random());\n  }\n  int64_t start_time_micro = std::chrono::duration_cast<std::chrono::microseconds>(\n                                 time_source_.monotonicTime().time_since_epoch())\n                                 .count();\n  span_ptr->setStartTime(start_time_micro);\n\n  // Set the timestamp globally for the span and also for the CS annotation\n  uint64_t timestamp_micro =\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count();\n  cs.setTimestamp(timestamp_micro);\n  span_ptr->setTimestamp(timestamp_micro);\n\n  // Add CS annotation to the span\n  span_ptr->addAnnotation(std::move(cs));\n\n  span_ptr->setTracer(this);\n\n  return span_ptr;\n}\n\nSpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span_name,\n                          SystemTime timestamp, const SpanContext& previous_context) {\n  SpanPtr span_ptr = std::make_unique<Span>(time_source_);\n  Annotation annotation;\n  uint64_t timestamp_micro;\n\n  timestamp_micro =\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count();\n\n  span_ptr->setName(span_name);\n\n  // Set the span's kind (client or server)\n  if (config.operationName() == Tracing::OperationName::Egress) {\n    annotation.setValue(CLIENT_SEND);\n  } else {\n    annotation.setValue(SERVER_RECV);\n  }\n\n  // Set the span's id and parent id\n  if (config.operationName() == Tracing::OperationName::Egress || !shared_span_context_) {\n    // We need to create a new span that is a child of the previous span; no shared context\n\n    // Create a new span id\n    uint64_t random_number = random_generator_.random();\n    span_ptr->setId(random_number);\n\n    // Set the parent id to the id of the previous span\n    span_ptr->setParentId(previous_context.id());\n\n    // Set the timestamp globally for the span\n    span_ptr->setTimestamp(timestamp_micro);\n  } else if (config.operationName() == Tracing::OperationName::Ingress) {\n    // We need to create a new span that will share context with the previous span\n\n    // Initialize the shared context for the new span\n    span_ptr->setId(previous_context.id());\n    if (previous_context.parentId()) {\n      span_ptr->setParentId(previous_context.parentId());\n    }\n  } else {\n    return span_ptr; // return an empty span\n  }\n\n  // Build the endpoint\n  Endpoint ep(service_name_, address_);\n\n  // Add the newly-created annotation to the span\n  annotation.setEndpoint(std::move(ep));\n  annotation.setTimestamp(timestamp_micro);\n  span_ptr->addAnnotation(std::move(annotation));\n\n  // Keep the same trace id\n  span_ptr->setTraceId(previous_context.traceId());\n  if (previous_context.is128BitTraceId()) {\n    span_ptr->setTraceIdHigh(previous_context.traceIdHigh());\n  }\n\n  // Keep the same sampled flag\n  span_ptr->setSampled(previous_context.sampled());\n\n  int64_t start_time_micro = std::chrono::duration_cast<std::chrono::microseconds>(\n                                 time_source_.monotonicTime().time_since_epoch())\n                                 .count();\n  span_ptr->setStartTime(start_time_micro);\n\n  span_ptr->setTracer(this);\n\n  return span_ptr;\n}\n\nvoid Tracer::reportSpan(Span&& span) {\n  if (reporter_ && span.sampled()) {\n    reporter_->reportSpan(std::move(span));\n  }\n}\n\nvoid Tracer::setReporter(ReporterPtr reporter) { reporter_ = std::move(reporter); }\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/tracer.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"extensions/tracers/zipkin/span_context.h\"\n#include \"extensions/tracers/zipkin/tracer_interface.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_types.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\n/**\n * Abstract class that delegates to users of the Tracer class the responsibility\n * of \"reporting\" a Zipkin span that has ended its life cycle. \"Reporting\" can mean that the\n * span will be sent to out to Zipkin, or buffered so that it can be sent out later.\n */\nclass Reporter {\npublic:\n  /**\n   * Destructor.\n   */\n  virtual ~Reporter() = default;\n\n  /**\n   * Method that a concrete Reporter class must implement to handle finished spans.\n   * For example, a span-buffer management policy could be implemented.\n   *\n   * @param span The span that needs action.\n   */\n  virtual void reportSpan(Span&& span) PURE;\n};\n\nusing ReporterPtr = std::unique_ptr<Reporter>;\n\n/**\n * This class implements the Zipkin tracer. It has methods to create the appropriate Zipkin span\n * type, i.e., root span, child span, or shared-context span.\n *\n * This class allows its users to supply a concrete Reporter class whose reportSpan method\n * is called by its own reportSpan method. By doing so, we have cleanly separated the logic\n * of dealing with finished spans from the span-creation and tracing logic.\n */\nclass Tracer : public TracerInterface {\npublic:\n  /**\n   * Constructor.\n   *\n   * @param service_name The name of the service where the Tracer is running. This name is\n   * used in all annotations' endpoints of the spans created by the Tracer.\n   * @param address Pointer to a network-address object. The IP address and port are used\n   * in all annotations' endpoints of the spans created by the Tracer.\n   * @param random_generator Reference to the random-number generator to be used by the Tracer.\n   * @param trace_id_128bit Whether 128bit ids should be used.\n   * @param shared_span_context Whether shared span id should be used.\n   */\n  Tracer(const std::string& service_name, Network::Address::InstanceConstSharedPtr address,\n         Random::RandomGenerator& random_generator, const bool trace_id_128bit,\n         const bool shared_span_context, TimeSource& time_source)\n      : service_name_(service_name), address_(address), reporter_(nullptr),\n        random_generator_(random_generator), trace_id_128bit_(trace_id_128bit),\n        shared_span_context_(shared_span_context), time_source_(time_source) {}\n\n  /**\n   * Creates a \"root\" Zipkin span.\n   *\n   * @param config The tracing configuration\n   * @param span_name Name of the new span.\n   * @param start_time The time indicating the beginning of the span.\n   * @return SpanPtr The root span.\n   */\n  SpanPtr startSpan(const Tracing::Config&, const std::string& span_name, SystemTime timestamp);\n\n  /**\n   * Depending on the given context, creates either a \"child\" or a \"shared-context\" Zipkin span.\n   *\n   * @param config The tracing configuration\n   * @param span_name Name of the new span.\n   * @param start_time The time indicating the beginning of the span.\n   * @param previous_context The context of the span preceding the one to be created.\n   * @return SpanPtr The child span.\n   */\n  SpanPtr startSpan(const Tracing::Config&, const std::string& span_name, SystemTime timestamp,\n                    const SpanContext& previous_context);\n\n  /**\n   * TracerInterface::reportSpan.\n   *\n   * @param span The span to be reported.\n   */\n  void reportSpan(Span&& span) override;\n\n  /**\n   * Associates a Reporter object with this Tracer.\n   *\n   * @param The span reporter.\n   */\n  void setReporter(ReporterPtr reporter);\n\nprivate:\n  const std::string service_name_;\n  Network::Address::InstanceConstSharedPtr address_;\n  ReporterPtr reporter_;\n  Random::RandomGenerator& random_generator_;\n  const bool trace_id_128bit_;\n  const bool shared_span_context_;\n  TimeSource& time_source_;\n};\n\nusing TracerPtr = std::unique_ptr<Tracer>;\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/tracer_interface.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nclass Span;\n\n/**\n * This interface must be observed by a Zipkin tracer.\n */\nclass TracerInterface {\npublic:\n  /**\n   * Destructor.\n   */\n  virtual ~TracerInterface() = default;\n\n  /**\n   * A Zipkin tracer must implement this method. Its implementation must perform whatever\n   * actions are required when the given span is considered finished. An implementation\n   * will typically buffer the given span so that it can be flushed later.\n   *\n   * This method is invoked by the Span object when its finish() method is called.\n   *\n   * @param span The span that needs action.\n   */\n  virtual void reportSpan(Span&& span) PURE;\n};\n\n/**\n * Buffered pending spans serializer.\n */\nclass Serializer {\npublic:\n  virtual ~Serializer() = default;\n\n  /**\n   * Serialize buffered pending spans.\n   *\n   * @return std::string serialized buffered pending spans.\n   */\n  virtual std::string serialize(const std::vector<Span>& spans) PURE;\n};\n\nusing SerializerPtr = std::unique_ptr<Serializer>;\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/util.cc",
    "content": "#include \"extensions/tracers/zipkin/util.h\"\n\n#include <chrono>\n#include <random>\n#include <regex>\n\n#include \"common/common/hex.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nuint64_t Util::generateRandom64(TimeSource& time_source) {\n  uint64_t seed = std::chrono::duration_cast<std::chrono::nanoseconds>(\n                      time_source.systemTime().time_since_epoch())\n                      .count();\n  std::mt19937_64 rand_64(seed);\n  return rand_64();\n}\n\nProtobufWkt::Value Util::uint64Value(uint64_t value, absl::string_view name,\n                                     Replacements& replacements) {\n  const std::string string_value = std::to_string(value);\n  replacements.push_back({absl::StrCat(\"\\\"\", name, \"\\\":\\\"\", string_value, \"\\\"\"),\n                          absl::StrCat(\"\\\"\", name, \"\\\":\", string_value)});\n  return ValueUtil::stringValue(string_value);\n}\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/util.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n\n#include \"common/common/byte_order.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\n/**\n * Utility class with a few convenient methods\n */\nclass Util {\npublic:\n  /**\n   * Returns a randomly-generated 64-bit integer number.\n   */\n  static uint64_t generateRandom64(TimeSource& time_source);\n\n  /**\n   * Returns byte string representation of a number.\n   *\n   * @param value Number that will be represented in byte string.\n   * @return std::string byte string representation of a number.\n   */\n  template <typename Type> static std::string toByteString(Type value) {\n    return std::string(reinterpret_cast<const char*>(&value), sizeof(Type));\n  }\n\n  /**\n   * Returns big endian byte string representation of a number.\n   *\n   * @param value Number that will be represented in byte string.\n   * @param flip indicates to flip order or not.\n   * @return std::string byte string representation of a number.\n   */\n  template <typename Type> static std::string toBigEndianByteString(Type value) {\n    auto bytes = toEndianness<ByteOrder::BigEndian>(value);\n    return std::string(reinterpret_cast<const char*>(&bytes), sizeof(Type));\n  }\n\n  using Replacements = std::vector<std::pair<const std::string, const std::string>>;\n\n  /**\n   * Returns a wrapped uint64_t value as a string. In addition to that, it also pushes back a\n   * replacement to the given replacements vector. The replacement includes the supplied name\n   * as a key, for identification in a JSON stream.\n   *\n   * @param value unt64_t number that will be represented in string.\n   * @param name std::string that is the key for the value being replaced.\n   * @param replacements a container to hold the required replacements when serializing this value.\n   * @return ProtobufWkt::Value wrapped uint64_t as a string.\n   */\n  static ProtobufWkt::Value uint64Value(uint64_t value, absl::string_view name,\n                                        Replacements& replacements);\n};\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/zipkin_core_constants.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nnamespace {\n\nconstexpr char KIND_CLIENT[] = \"CLIENT\";\nconstexpr char KIND_SERVER[] = \"SERVER\";\n\nconstexpr char CLIENT_SEND[] = \"cs\";\nconstexpr char CLIENT_RECV[] = \"cr\";\nconstexpr char SERVER_SEND[] = \"ss\";\nconstexpr char SERVER_RECV[] = \"sr\";\n\nconstexpr char HTTP_HOST[] = \"http.host\";\nconstexpr char HTTP_METHOD[] = \"http.method\";\nconstexpr char HTTP_PATH[] = \"http.path\";\nconstexpr char HTTP_URL[] = \"http.url\";\nconstexpr char HTTP_STATUS_CODE[] = \"http.status_code\";\nconstexpr char HTTP_REQUEST_SIZE[] = \"http.request.size\";\nconstexpr char HTTP_RESPONSE_SIZE[] = \"http.response.size\";\n\nconstexpr char LOCAL_COMPONENT[] = \"lc\";\nconstexpr char ERROR[] = \"error\";\nconstexpr char CLIENT_ADDR[] = \"ca\";\nconstexpr char SERVER_ADDR[] = \"sa\";\n\nconstexpr char SAMPLED[] = \"1\";\nconstexpr char NOT_SAMPLED[] = \"0\";\n\nconstexpr char DEFAULT_COLLECTOR_ENDPOINT[] = \"/api/v1/spans\";\nconstexpr bool DEFAULT_SHARED_SPAN_CONTEXT = true;\n\n} // namespace\n\nclass ZipkinCoreConstantValues {\npublic:\n  // Zipkin B3 headers\n  const Http::LowerCaseString X_B3_TRACE_ID{\"x-b3-traceid\"};\n  const Http::LowerCaseString X_B3_SPAN_ID{\"x-b3-spanid\"};\n  const Http::LowerCaseString X_B3_PARENT_SPAN_ID{\"x-b3-parentspanid\"};\n  const Http::LowerCaseString X_B3_SAMPLED{\"x-b3-sampled\"};\n  const Http::LowerCaseString X_B3_FLAGS{\"x-b3-flags\"};\n\n  // Zipkin b3 single header\n  const Http::LowerCaseString B3{\"b3\"};\n};\n\nusing ZipkinCoreConstants = ConstSingleton<ZipkinCoreConstantValues>;\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/zipkin_core_types.cc",
    "content": "#include \"extensions/tracers/zipkin/zipkin_core_types.h\"\n\n#include <vector>\n\n#include \"common/common/utility.h\"\n\n#include \"extensions/tracers/zipkin/span_context.h\"\n#include \"extensions/tracers/zipkin/util.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n#include \"extensions/tracers/zipkin/zipkin_json_field_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nEndpoint::Endpoint(const Endpoint& ep) {\n  service_name_ = ep.serviceName();\n  address_ = ep.address();\n}\n\nEndpoint& Endpoint::operator=(const Endpoint& ep) {\n  service_name_ = ep.serviceName();\n  address_ = ep.address();\n  return *this;\n}\n\nconst ProtobufWkt::Struct Endpoint::toStruct(Util::Replacements&) const {\n  ProtobufWkt::Struct endpoint;\n  auto* fields = endpoint.mutable_fields();\n  if (!address_) {\n    (*fields)[ENDPOINT_IPV4] = ValueUtil::stringValue(\"\");\n    (*fields)[ENDPOINT_PORT] = ValueUtil::numberValue(0);\n  } else {\n    (*fields)[address_->ip()->version() == Network::Address::IpVersion::v4 ? ENDPOINT_IPV4\n                                                                           : ENDPOINT_IPV6] =\n        ValueUtil::stringValue(address_->ip()->addressAsString());\n    (*fields)[ENDPOINT_PORT] = ValueUtil::numberValue(address_->ip()->port());\n  }\n  (*fields)[ENDPOINT_SERVICE_NAME] = ValueUtil::stringValue(service_name_);\n\n  return endpoint;\n}\n\nAnnotation::Annotation(const Annotation& ann) {\n  timestamp_ = ann.timestamp();\n  value_ = ann.value();\n  if (ann.isSetEndpoint()) {\n    endpoint_ = ann.endpoint();\n  }\n}\n\nAnnotation& Annotation::operator=(const Annotation& ann) {\n  timestamp_ = ann.timestamp();\n  value_ = ann.value();\n  if (ann.isSetEndpoint()) {\n    endpoint_ = ann.endpoint();\n  }\n\n  return *this;\n}\n\nvoid Annotation::changeEndpointServiceName(const std::string& service_name) {\n  if (endpoint_.has_value()) {\n    endpoint_.value().setServiceName(service_name);\n  }\n}\n\nconst ProtobufWkt::Struct Annotation::toStruct(Util::Replacements& replacements) const {\n  ProtobufWkt::Struct annotation;\n  auto* fields = annotation.mutable_fields();\n  (*fields)[ANNOTATION_TIMESTAMP] = Util::uint64Value(timestamp_, SPAN_TIMESTAMP, replacements);\n  (*fields)[ANNOTATION_VALUE] = ValueUtil::stringValue(value_);\n  if (endpoint_.has_value()) {\n    (*fields)[ANNOTATION_ENDPOINT] =\n        ValueUtil::structValue(static_cast<Endpoint>(endpoint_.value()).toStruct(replacements));\n  }\n  return annotation;\n}\n\nBinaryAnnotation::BinaryAnnotation(const BinaryAnnotation& ann) {\n  key_ = ann.key();\n  value_ = ann.value();\n  annotation_type_ = ann.annotationType();\n  if (ann.isSetEndpoint()) {\n    endpoint_ = ann.endpoint();\n  }\n}\n\nBinaryAnnotation& BinaryAnnotation::operator=(const BinaryAnnotation& ann) {\n  key_ = ann.key();\n  value_ = ann.value();\n  annotation_type_ = ann.annotationType();\n  if (ann.isSetEndpoint()) {\n    endpoint_ = ann.endpoint();\n  }\n\n  return *this;\n}\n\nconst ProtobufWkt::Struct BinaryAnnotation::toStruct(Util::Replacements& replacements) const {\n  ProtobufWkt::Struct binary_annotation;\n  auto* fields = binary_annotation.mutable_fields();\n  (*fields)[BINARY_ANNOTATION_KEY] = ValueUtil::stringValue(key_);\n  (*fields)[BINARY_ANNOTATION_VALUE] = ValueUtil::stringValue(value_);\n\n  if (endpoint_) {\n    (*fields)[BINARY_ANNOTATION_ENDPOINT] =\n        ValueUtil::structValue(static_cast<Endpoint>(endpoint_.value()).toStruct(replacements));\n  }\n\n  return binary_annotation;\n}\n\nconst std::string Span::EMPTY_HEX_STRING_ = \"0000000000000000\";\n\nSpan::Span(const Span& span) : time_source_(span.time_source_) {\n  trace_id_ = span.traceId();\n  if (span.isSetTraceIdHigh()) {\n    trace_id_high_ = span.traceIdHigh();\n  }\n  name_ = span.name();\n  id_ = span.id();\n  if (span.isSetParentId()) {\n    parent_id_ = span.parentId();\n  }\n  debug_ = span.debug();\n  sampled_ = span.sampled();\n  annotations_ = span.annotations();\n  binary_annotations_ = span.binaryAnnotations();\n  if (span.isSetTimestamp()) {\n    timestamp_ = span.timestamp();\n  }\n  if (span.isSetDuration()) {\n    duration_ = span.duration();\n  }\n  monotonic_start_time_ = span.startTime();\n  tracer_ = span.tracer();\n}\n\nvoid Span::setServiceName(const std::string& service_name) {\n  for (auto& annotation : annotations_) {\n    annotation.changeEndpointServiceName(service_name);\n  }\n}\n\nconst ProtobufWkt::Struct Span::toStruct(Util::Replacements& replacements) const {\n  ProtobufWkt::Struct span;\n  auto* fields = span.mutable_fields();\n  (*fields)[SPAN_TRACE_ID] = ValueUtil::stringValue(traceIdAsHexString());\n  (*fields)[SPAN_NAME] = ValueUtil::stringValue(name_);\n  (*fields)[SPAN_ID] = ValueUtil::stringValue(Hex::uint64ToHex(id_));\n\n  if (parent_id_.has_value()) {\n    (*fields)[SPAN_PARENT_ID] = ValueUtil::stringValue(Hex::uint64ToHex(parent_id_.value()));\n  }\n\n  if (timestamp_.has_value()) {\n    // Usually we store number to a ProtobufWkt::Struct object via ValueUtil::numberValue.\n    // However, due to the possibility of rendering that to a number with scientific notation, we\n    // chose to store it as a string and keeping track the corresponding replacement.\n    (*fields)[SPAN_TIMESTAMP] = Util::uint64Value(timestamp_.value(), SPAN_TIMESTAMP, replacements);\n  }\n\n  if (duration_.has_value()) {\n    // Since SPAN_DURATION has the same data type with SPAN_TIMESTAMP, we use Util::uint64Value to\n    // store it.\n    (*fields)[SPAN_DURATION] = Util::uint64Value(duration_.value(), SPAN_DURATION, replacements);\n  }\n\n  if (!annotations_.empty()) {\n    std::vector<ProtobufWkt::Value> annotation_list;\n    for (auto& annotation : annotations_) {\n      annotation_list.push_back(ValueUtil::structValue(annotation.toStruct(replacements)));\n    }\n    (*fields)[SPAN_ANNOTATIONS] = ValueUtil::listValue(annotation_list);\n  }\n\n  if (!binary_annotations_.empty()) {\n    std::vector<ProtobufWkt::Value> binary_annotation_list;\n    for (auto& binary_annotation : binary_annotations_) {\n      binary_annotation_list.push_back(\n          ValueUtil::structValue(binary_annotation.toStruct(replacements)));\n    }\n    (*fields)[SPAN_BINARY_ANNOTATIONS] = ValueUtil::listValue(binary_annotation_list);\n  }\n\n  return span;\n}\n\nvoid Span::finish() {\n  // Assumption: Span will have only one annotation when this method is called.\n  SpanContext context(*this);\n  if (annotations_[0].value() == SERVER_RECV) {\n    // Need to set the SS annotation\n    Annotation ss;\n    ss.setEndpoint(annotations_[0].endpoint());\n    ss.setTimestamp(std::chrono::duration_cast<std::chrono::microseconds>(\n                        time_source_.systemTime().time_since_epoch())\n                        .count());\n    ss.setValue(SERVER_SEND);\n    annotations_.push_back(std::move(ss));\n  } else if (annotations_[0].value() == CLIENT_SEND) {\n    // Need to set the CR annotation.\n    Annotation cr;\n    const uint64_t stop_timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                                        time_source_.systemTime().time_since_epoch())\n                                        .count();\n    cr.setEndpoint(annotations_[0].endpoint());\n    cr.setTimestamp(stop_timestamp);\n    cr.setValue(CLIENT_RECV);\n    annotations_.push_back(std::move(cr));\n  }\n\n  if (monotonic_start_time_) {\n    const int64_t monotonic_stop_time = std::chrono::duration_cast<std::chrono::microseconds>(\n                                            time_source_.monotonicTime().time_since_epoch())\n                                            .count();\n    setDuration(monotonic_stop_time - monotonic_start_time_);\n  }\n\n  if (auto t = tracer()) {\n    t->reportSpan(std::move(*this));\n  }\n}\n\nvoid Span::setTag(absl::string_view name, absl::string_view value) {\n  if (!name.empty() && !value.empty()) {\n    addBinaryAnnotation(BinaryAnnotation(name, value));\n  }\n}\n\nvoid Span::log(SystemTime timestamp, const std::string& event) {\n  Annotation annotation;\n  annotation.setTimestamp(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count());\n  annotation.setValue(event);\n  addAnnotation(std::move(annotation));\n}\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/zipkin_core_types.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/pure.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/hex.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/tracers/zipkin/tracer_interface.h\"\n#include \"extensions/tracers/zipkin/util.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\n/**\n * Base class to be inherited by all classes that represent Zipkin-related concepts, namely:\n * endpoint, annotation, binary annotation, and span.\n */\nclass ZipkinBase {\npublic:\n  /**\n   * Destructor.\n   */\n  virtual ~ZipkinBase() = default;\n\n  /**\n   * All classes defining Zipkin abstractions need to implement this method to convert\n   * the corresponding abstraction to a ProtobufWkt::Struct.\n   * @param replacements A container that is used to hold the required replacements when this object\n   * is serialized.\n   */\n  virtual const ProtobufWkt::Struct toStruct(Util::Replacements& replacements) const PURE;\n\n  /**\n   * Serializes the a type as a Zipkin-compliant JSON representation as a string.\n   *\n   * @return a stringified JSON.\n   */\n  const std::string toJson() const {\n    Util::Replacements replacements;\n    return absl::StrReplaceAll(\n        MessageUtil::getJsonStringFromMessage(toStruct(replacements), /* pretty_print */ false,\n                                              /* always_print_primitive_fields */ true),\n        replacements);\n  };\n};\n\n/**\n * Represents a Zipkin endpoint. This class is based on Zipkin's Thrift definition of an endpoint.\n * Endpoints can be added to Zipkin annotations.\n */\nclass Endpoint : public ZipkinBase {\npublic:\n  /**\n   * Copy constructor.\n   */\n  Endpoint(const Endpoint&);\n\n  /**\n   * Assignment operator.\n   */\n  Endpoint& operator=(const Endpoint&);\n\n  /**\n   * Default constructor. Creates an empty Endpoint.\n   */\n  Endpoint() : address_(nullptr) {}\n\n  /**\n   * Constructor that initializes an endpoint with the given attributes.\n   *\n   * @param service_name String representing the endpoint's service name\n   * @param address Pointer to an object representing the endpoint's network address\n   */\n  Endpoint(const std::string& service_name, Network::Address::InstanceConstSharedPtr address)\n      : service_name_(service_name), address_(address) {}\n\n  /**\n   * @return the endpoint's address.\n   */\n  Network::Address::InstanceConstSharedPtr address() const { return address_; }\n\n  /**\n   * Sets the endpoint's address\n   */\n  void setAddress(Network::Address::InstanceConstSharedPtr address) { address_ = address; }\n\n  /**\n   * @return the endpoint's service name attribute.\n   */\n  const std::string& serviceName() const { return service_name_; }\n\n  /**\n   * Sets the endpoint's service name attribute.\n   */\n  void setServiceName(const std::string& service_name) { service_name_ = service_name; }\n\n  /**\n   * Represents the endpoint as a protobuf struct.\n   *\n   * @return a protobuf struct.\n   */\n  const ProtobufWkt::Struct toStruct(Util::Replacements& replacements) const override;\n\nprivate:\n  std::string service_name_;\n  Network::Address::InstanceConstSharedPtr address_;\n};\n\n/**\n * Represents a Zipkin basic annotation. This class is based on Zipkin's Thrift definition of\n * an annotation.\n */\nclass Annotation : public ZipkinBase {\npublic:\n  /**\n   * Copy constructor.\n   */\n  Annotation(const Annotation&);\n\n  /**\n   * Assignment operator.\n   */\n  Annotation& operator=(const Annotation&);\n\n  /**\n   * Default constructor. Creates an empty annotation.\n   */\n  Annotation() = default;\n\n  /**\n   * Constructor that creates an annotation based on the given parameters.\n   *\n   * @param timestamp A 64-bit integer containing the annotation timestamp attribute.\n   * @param value A string containing the annotation's value attribute. Valid values\n   * appear on ZipkinCoreConstants. The most commonly used values are \"cs\", \"cr\", \"ss\" and \"sr\".\n   * @param endpoint The endpoint object representing the annotation's endpoint attribute.\n   */\n  Annotation(uint64_t timestamp, const std::string value, Endpoint& endpoint)\n      : timestamp_(timestamp), value_(value), endpoint_(endpoint) {}\n\n  /**\n   * @return the annotation's endpoint attribute.\n   */\n  const Endpoint& endpoint() const { return endpoint_.value(); }\n\n  /**\n   * Sets the annotation's endpoint attribute (copy semantics).\n   */\n  void setEndpoint(const Endpoint& endpoint) { endpoint_ = endpoint; }\n\n  /**\n   * Sets the annotation's endpoint attribute (move semantics).\n   */\n  void setEndpoint(const Endpoint&& endpoint) { endpoint_ = endpoint; }\n\n  /**\n   * Replaces the endpoint's service-name attribute value with the given value.\n   *\n   * @param service_name String with the new service name.\n   */\n  void changeEndpointServiceName(const std::string& service_name);\n\n  /**\n   * @return the annotation's timestamp attribute\n   * (clock time for user presentation: microseconds since epoch).\n   */\n  uint64_t timestamp() const { return timestamp_; }\n\n  /**\n   * Sets the annotation's timestamp attribute.\n   */\n  void setTimestamp(uint64_t timestamp) { timestamp_ = timestamp; }\n\n  /**\n   * return the annotation's value attribute.\n   */\n  const std::string& value() const { return value_; }\n\n  /**\n   * Sets the annotation's value attribute.\n   */\n  void setValue(const std::string& value) { value_ = value; }\n\n  /**\n   * @return true if the endpoint attribute is set, or false otherwise.\n   */\n  bool isSetEndpoint() const { return endpoint_.has_value(); }\n\n  /**\n   * Represents the annotation as a protobuf struct.\n   *\n   * @return a protobuf struct.\n   */\n  const ProtobufWkt::Struct toStruct(Util::Replacements& replacements) const override;\n\nprivate:\n  uint64_t timestamp_{0};\n  std::string value_;\n  absl::optional<Endpoint> endpoint_;\n};\n\n/**\n * Enum representing valid types of Zipkin binary annotations.\n */\nenum AnnotationType { BOOL = 0, STRING = 1 };\n\n/**\n * Represents a Zipkin binary annotation. This class is based on Zipkin's Thrift definition of\n * a binary annotation. A binary annotation allows arbitrary key-value pairs to be associated\n * with a Zipkin span.\n */\nclass BinaryAnnotation : public ZipkinBase {\npublic:\n  /**\n   * Copy constructor.\n   */\n  BinaryAnnotation(const BinaryAnnotation&);\n\n  /**\n   * Assignment operator.\n   */\n  BinaryAnnotation& operator=(const BinaryAnnotation&);\n\n  /**\n   * Default constructor. Creates an empty binary annotation.\n   */\n  BinaryAnnotation() : annotation_type_(STRING) {}\n\n  /**\n   * Constructor that creates a binary annotation based on the given parameters.\n   *\n   * @param key The key name of the annotation.\n   * @param value The value associated with the key.\n   */\n  BinaryAnnotation(absl::string_view key, absl::string_view value)\n      : key_(key), value_(value), annotation_type_(STRING) {}\n\n  /**\n   * @return the type of the binary annotation.\n   */\n  AnnotationType annotationType() const { return annotation_type_; }\n\n  /**\n   * Sets the binary's annotation type.\n   */\n  void setAnnotationType(AnnotationType annotation_type) { annotation_type_ = annotation_type; }\n\n  /**\n   * @return the annotation's endpoint attribute.\n   */\n  const Endpoint& endpoint() const { return endpoint_.value(); }\n\n  /**\n   * Sets the annotation's endpoint attribute (copy semantics).\n   */\n  void setEndpoint(const Endpoint& endpoint) { endpoint_ = endpoint; }\n\n  /**\n   * Sets the annotation's endpoint attribute (move semantics).\n   */\n  void setEndpoint(const Endpoint&& endpoint) { endpoint_ = endpoint; }\n\n  /**\n   * @return true if the endpoint attribute is set, or false otherwise.\n   */\n  bool isSetEndpoint() const { return endpoint_.has_value(); }\n  /**\n   * @return the key attribute.\n   */\n  const std::string& key() const { return key_; }\n\n  /**\n   * Sets the key attribute.\n   */\n  void setKey(const std::string& key) { key_ = key; }\n\n  /**\n   * @return the value attribute.\n   */\n  const std::string& value() const { return value_; }\n\n  /**\n   * Sets the value attribute.\n   */\n  void setValue(const std::string& value) { value_ = value; }\n\n  /**\n   * Represents the binary annotation as a protobuf struct.\n   * @param replacements Used to hold the required replacements on serialization step.\n   * @return a protobuf struct.\n   */\n  const ProtobufWkt::Struct toStruct(Util::Replacements& replacements) const override;\n\nprivate:\n  std::string key_;\n  std::string value_;\n  absl::optional<Endpoint> endpoint_;\n  AnnotationType annotation_type_{};\n};\n\nusing SpanPtr = std::unique_ptr<Span>;\n\n/**\n * Represents a Zipkin span. This class is based on Zipkin's Thrift definition of a span.\n */\nclass Span : public ZipkinBase {\npublic:\n  /**\n   * Copy constructor.\n   */\n  Span(const Span&);\n\n  /**\n   * Default constructor. Creates an empty span.\n   */\n  explicit Span(TimeSource& time_source)\n      : trace_id_(0), id_(0), debug_(false), sampled_(false), monotonic_start_time_(0),\n        tracer_(nullptr), time_source_(time_source) {}\n\n  /**\n   * Sets the span's trace id attribute.\n   */\n  void setTraceId(const uint64_t val) { trace_id_ = val; }\n\n  /**\n   * Sets the span's name attribute.\n   */\n  void setName(const std::string& val) { name_ = val; }\n\n  /**\n   * Sets the span's id.\n   */\n  void setId(const uint64_t val) { id_ = val; }\n\n  /**\n   * Sets the span's parent id.\n   */\n  void setParentId(const uint64_t val) { parent_id_ = val; }\n\n  /**\n   * @return Whether or not the parent_id attribute is set.\n   */\n  bool isSetParentId() const { return parent_id_.has_value(); }\n\n  /**\n   * Set the span's sampled flag.\n   */\n  void setSampled(bool val) { sampled_ = val; }\n\n  /**\n   * @return a vector with all annotations added to the span.\n   */\n  const std::vector<Annotation>& annotations() { return annotations_; }\n\n  /**\n   * Sets the span's annotations all at once.\n   */\n  void setAnnotations(const std::vector<Annotation>& val) { annotations_ = val; }\n\n  /**\n   * Adds an annotation to the span (copy semantics).\n   */\n  void addAnnotation(const Annotation& ann) { annotations_.push_back(ann); }\n\n  /**\n   * Adds an annotation to the span (move semantics).\n   */\n  void addAnnotation(Annotation&& ann) { annotations_.emplace_back(std::move(ann)); }\n\n  /**\n   * Sets the span's binary annotations all at once.\n   */\n  void setBinaryAnnotations(const std::vector<BinaryAnnotation>& val) { binary_annotations_ = val; }\n\n  /**\n   * Adds a binary annotation to the span (copy semantics).\n   */\n  void addBinaryAnnotation(const BinaryAnnotation& bann) { binary_annotations_.push_back(bann); }\n\n  /**\n   * Adds a binary annotation to the span (move semantics).\n   */\n  void addBinaryAnnotation(BinaryAnnotation&& bann) {\n    binary_annotations_.emplace_back(std::move(bann));\n  }\n\n  /**\n   * Sets the span's debug attribute.\n   */\n  void setDebug() { debug_ = true; }\n\n  /**\n   * Sets the span's timestamp attribute.\n   */\n  void setTimestamp(const int64_t val) { timestamp_ = val; }\n\n  /**\n   * @return Whether or not the timestamp attribute is set.\n   */\n  bool isSetTimestamp() const { return timestamp_.has_value(); }\n\n  /**\n   * Sets the span's duration attribute.\n   */\n  void setDuration(const int64_t val) { duration_ = val; }\n\n  /**\n   * @return Whether or not the duration attribute is set.\n   */\n  bool isSetDuration() const { return duration_.has_value(); }\n\n  /**\n   * Sets the higher 64 bits of the span's 128-bit trace id.\n   * Note that this is optional, since 64-bit trace ids are valid.\n   */\n  void setTraceIdHigh(const uint64_t val) { trace_id_high_ = val; }\n\n  /**\n   * @return whether or not the trace_id_high attribute is set.\n   */\n  bool isSetTraceIdHigh() const { return trace_id_high_.has_value(); }\n\n  /**\n   * Sets the span start-time attribute (monotonic, used to calculate duration).\n   */\n  void setStartTime(const int64_t time) { monotonic_start_time_ = time; }\n\n  /**\n   * @return the span's annotations.\n   */\n  const std::vector<Annotation>& annotations() const { return annotations_; }\n\n  /**\n   * @return the span's binary annotations.\n   */\n  const std::vector<BinaryAnnotation>& binaryAnnotations() const { return binary_annotations_; }\n\n  /**\n   * @return the span's duration attribute.\n   */\n  int64_t duration() const { return duration_.value(); }\n\n  /**\n   * @return the span's id as an integer.\n   */\n  uint64_t id() const { return id_; }\n\n  /**\n   * @return the span's id as a hexadecimal string.\n   */\n  const std::string idAsHexString() const { return Hex::uint64ToHex(id_); }\n\n  /**\n   * @return the span's id as a byte string.\n   */\n  const std::string idAsByteString() const { return Util::toByteString(id_); }\n\n  /**\n   * @return the span's name.\n   */\n  const std::string& name() const { return name_; }\n\n  /**\n   * @return the span's parent id as an integer.\n   */\n  uint64_t parentId() const { return parent_id_.value(); }\n\n  /**\n   * @return the span's parent id as a hexadecimal string.\n   */\n  const std::string parentIdAsHexString() const {\n    return parent_id_ ? Hex::uint64ToHex(parent_id_.value()) : EMPTY_HEX_STRING_;\n  }\n\n  /**\n   * @return the span's parent id as a byte string.\n   */\n  const std::string parentIdAsByteString() const {\n    ASSERT(parent_id_);\n    return Util::toByteString(parent_id_.value());\n  }\n\n  /**\n   * @return whether or not the debug attribute is set\n   */\n  bool debug() const { return debug_; }\n\n  /**\n   * @return whether or not the sampled attribute is set\n   */\n  bool sampled() const { return sampled_; }\n\n  /**\n   * @return the span's timestamp (clock time for user presentation: microseconds since epoch).\n   */\n  int64_t timestamp() const { return timestamp_.value(); }\n\n  /**\n   * @return the higher 64 bits of a 128-bit trace id.\n   */\n  uint64_t traceIdHigh() const { return trace_id_high_.value(); }\n\n  /**\n   * @return the span's trace id as an integer.\n   */\n  uint64_t traceId() const { return trace_id_; }\n\n  /**\n   * @return the span's trace id as a hexadecimal string.\n   */\n  const std::string traceIdAsHexString() const {\n    return trace_id_high_.has_value()\n               ? absl::StrCat(Hex::uint64ToHex(trace_id_high_.value()), Hex::uint64ToHex(trace_id_))\n               : Hex::uint64ToHex(trace_id_);\n  }\n\n  /**\n   * @return the span's trace id as a byte string.\n   */\n  const std::string traceIdAsByteString() const {\n    // https://github.com/openzipkin/zipkin-api/blob/v0.2.1/zipkin.proto#L60-L61.\n    return trace_id_high_.has_value()\n               ? absl::StrCat(Util::toBigEndianByteString(trace_id_high_.value()),\n                              Util::toBigEndianByteString(trace_id_))\n               : Util::toBigEndianByteString(trace_id_);\n  }\n\n  /**\n   * @return the span's start time (monotonic, used to calculate duration).\n   */\n  int64_t startTime() const { return monotonic_start_time_; }\n\n  /**\n   * Replaces the service-name attribute of the span's basic annotations with the provided value.\n   *\n   * This method will operate on all basic annotations that are part of the span when the call\n   * is made.\n   *\n   * @param service_name String to be used as the new service name for all basic annotations\n   */\n  void setServiceName(const std::string& service_name);\n\n  /**\n   * Represents the binary annotation as a protobuf struct.\n   *\n   * @return a protobuf struct.\n   */\n  const ProtobufWkt::Struct toStruct(Util::Replacements& replacements) const override;\n\n  /**\n   * Associates a Tracer object with the span. The tracer's reportSpan() method is invoked\n   * by the span's finish() method so that the tracer can decide what to do with the span\n   * when it is finished.\n   *\n   * @param tracer Represents the Tracer object to be associated with the span.\n   */\n  void setTracer(TracerInterface* tracer) { tracer_ = tracer; }\n\n  /**\n   * @return the Tracer object associated with the span.\n   */\n  TracerInterface* tracer() const { return tracer_; }\n\n  /**\n   * Marks a successful end of the span. This method will:\n   *\n   * (1) determine if it needs to add more annotations to the span (e.g., a span containing a CS\n   * annotation will need to add a CR annotation) and add them;\n   * (2) compute and set the span's duration; and\n   * (3) invoke the tracer's reportSpan() method if a tracer has been associated with the span.\n   */\n  void finish();\n\n  /**\n   * Adds a binary annotation to the span.\n   *\n   * @param name The binary annotation's key.\n   * @param value The binary annotation's value.\n   */\n  void setTag(absl::string_view name, absl::string_view value);\n\n  /**\n   * Adds an annotation to the span\n   *\n   * @param timestamp The annotation's timestamp.\n   * @param event The annotation's value.\n   */\n  void log(SystemTime timestamp, const std::string& event);\n\nprivate:\n  static const std::string EMPTY_HEX_STRING_;\n  uint64_t trace_id_;\n  std::string name_;\n  uint64_t id_;\n  absl::optional<uint64_t> parent_id_;\n  bool debug_;\n  bool sampled_;\n  std::vector<Annotation> annotations_;\n  std::vector<BinaryAnnotation> binary_annotations_;\n  absl::optional<int64_t> timestamp_;\n  absl::optional<int64_t> duration_;\n  absl::optional<uint64_t> trace_id_high_;\n  int64_t monotonic_start_time_;\n  TracerInterface* tracer_;\n  TimeSource& time_source_;\n};\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/zipkin_json_field_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nnamespace {\n\nconstexpr char SPAN_ID[] = \"id\";\nconstexpr char SPAN_KIND[] = \"kind\";\nconstexpr char SPAN_NAME[] = \"name\";\nconstexpr char SPAN_TAGS[] = \"tags\";\nconstexpr char SPAN_SHARED[] = \"shared\";\nconstexpr char SPAN_TRACE_ID[] = \"traceId\";\nconstexpr char SPAN_DURATION[] = \"duration\";\nconstexpr char SPAN_PARENT_ID[] = \"parentId\";\nconstexpr char SPAN_TIMESTAMP[] = \"timestamp\";\nconstexpr char SPAN_ANNOTATIONS[] = \"annotations\";\nconstexpr char SPAN_LOCAL_ENDPOINT[] = \"localEndpoint\";\nconstexpr char SPAN_BINARY_ANNOTATIONS[] = \"binaryAnnotations\";\n\nconstexpr char ANNOTATION_VALUE[] = \"value\";\nconstexpr char ANNOTATION_ENDPOINT[] = \"endpoint\";\nconstexpr char ANNOTATION_TIMESTAMP[] = \"timestamp\";\n\nconstexpr char BINARY_ANNOTATION_KEY[] = \"key\";\nconstexpr char BINARY_ANNOTATION_VALUE[] = \"value\";\nconstexpr char BINARY_ANNOTATION_ENDPOINT[] = \"endpoint\";\n\nconstexpr char ENDPOINT_PORT[] = \"port\";\nconstexpr char ENDPOINT_IPV4[] = \"ipv4\";\nconstexpr char ENDPOINT_IPV6[] = \"ipv6\";\nconstexpr char ENDPOINT_SERVICE_NAME[] = \"serviceName\";\n\n} // namespace\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/zipkin_tracer_impl.cc",
    "content": "#include \"extensions/tracers/zipkin/zipkin_tracer_impl.h\"\n\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/zipkin/span_context_extractor.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\nZipkinSpan::ZipkinSpan(Zipkin::Span& span, Zipkin::Tracer& tracer) : span_(span), tracer_(tracer) {}\n\nvoid ZipkinSpan::finishSpan() { span_.finish(); }\n\nvoid ZipkinSpan::setOperation(absl::string_view operation) {\n  span_.setName(std::string(operation));\n}\n\nvoid ZipkinSpan::setTag(absl::string_view name, absl::string_view value) {\n  span_.setTag(name, value);\n}\n\nvoid ZipkinSpan::log(SystemTime timestamp, const std::string& event) {\n  span_.log(timestamp, event);\n}\n\n// TODO(#11622): Implement baggage storage for zipkin spans\nvoid ZipkinSpan::setBaggage(absl::string_view, absl::string_view) {}\nstd::string ZipkinSpan::getBaggage(absl::string_view) { return std::string(); }\n\nvoid ZipkinSpan::injectContext(Http::RequestHeaderMap& request_headers) {\n  // Set the trace-id and span-id headers properly, based on the newly-created span structure.\n  request_headers.setReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID,\n                                  span_.traceIdAsHexString());\n  request_headers.setReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, span_.idAsHexString());\n\n  // Set the parent-span header properly, based on the newly-created span structure.\n  if (span_.isSetParentId()) {\n    request_headers.setReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID,\n                                    span_.parentIdAsHexString());\n  }\n\n  // Set the sampled header.\n  request_headers.setReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED,\n                                  span_.sampled() ? SAMPLED : NOT_SAMPLED);\n}\n\nvoid ZipkinSpan::setSampled(bool sampled) { span_.setSampled(sampled); }\n\nTracing::SpanPtr ZipkinSpan::spawnChild(const Tracing::Config& config, const std::string& name,\n                                        SystemTime start_time) {\n  SpanContext previous_context(span_);\n  return std::make_unique<ZipkinSpan>(\n      *tracer_.startSpan(config, name, start_time, previous_context), tracer_);\n}\n\nDriver::TlsTracer::TlsTracer(TracerPtr&& tracer, Driver& driver)\n    : tracer_(std::move(tracer)), driver_(driver) {}\n\nDriver::Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config,\n               Upstream::ClusterManager& cluster_manager, Stats::Scope& scope,\n               ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime,\n               const LocalInfo::LocalInfo& local_info, Random::RandomGenerator& random_generator,\n               TimeSource& time_source)\n    : cm_(cluster_manager), tracer_stats_{ZIPKIN_TRACER_STATS(\n                                POOL_COUNTER_PREFIX(scope, \"tracing.zipkin.\"))},\n      tls_(tls.allocateSlot()), runtime_(runtime), local_info_(local_info),\n      time_source_(time_source) {\n  Config::Utility::checkCluster(\"envoy.tracers.zipkin\", zipkin_config.collector_cluster(), cm_,\n                                /* allow_added_via_api */ true);\n  cluster_ = zipkin_config.collector_cluster();\n\n  CollectorInfo collector;\n  if (!zipkin_config.collector_endpoint().empty()) {\n    collector.endpoint_ = zipkin_config.collector_endpoint();\n  }\n  // The current default version of collector_endpoint_version is HTTP_JSON_V1.\n  collector.version_ = zipkin_config.collector_endpoint_version();\n  const bool trace_id_128bit = zipkin_config.trace_id_128bit();\n\n  const bool shared_span_context = PROTOBUF_GET_WRAPPED_OR_DEFAULT(\n      zipkin_config, shared_span_context, DEFAULT_SHARED_SPAN_CONTEXT);\n  collector.shared_span_context_ = shared_span_context;\n\n  tls_->set([this, collector, &random_generator, trace_id_128bit, shared_span_context](\n                Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    TracerPtr tracer =\n        std::make_unique<Tracer>(local_info_.clusterName(), local_info_.address(), random_generator,\n                                 trace_id_128bit, shared_span_context, time_source_);\n    tracer->setReporter(\n        ReporterImpl::NewInstance(std::ref(*this), std::ref(dispatcher), collector));\n    return std::make_shared<TlsTracer>(std::move(tracer), *this);\n  });\n}\n\nTracing::SpanPtr Driver::startSpan(const Tracing::Config& config,\n                                   Http::RequestHeaderMap& request_headers, const std::string&,\n                                   SystemTime start_time,\n                                   const Tracing::Decision tracing_decision) {\n  Tracer& tracer = *tls_->getTyped<TlsTracer>().tracer_;\n  SpanPtr new_zipkin_span;\n  SpanContextExtractor extractor(request_headers);\n  bool sampled{extractor.extractSampled(tracing_decision)};\n  try {\n    auto ret_span_context = extractor.extractSpanContext(sampled);\n    if (!ret_span_context.second) {\n      // Create a root Zipkin span. No context was found in the headers.\n      new_zipkin_span =\n          tracer.startSpan(config, std::string(request_headers.getHostValue()), start_time);\n      new_zipkin_span->setSampled(sampled);\n    } else {\n      new_zipkin_span = tracer.startSpan(config, std::string(request_headers.getHostValue()),\n                                         start_time, ret_span_context.first);\n    }\n\n  } catch (const ExtractorException& e) {\n    return std::make_unique<Tracing::NullSpan>();\n  }\n\n  // Return the active Zipkin span.\n  return std::make_unique<ZipkinSpan>(*new_zipkin_span, tracer);\n}\n\nReporterImpl::ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher,\n                           const CollectorInfo& collector)\n    : driver_(driver),\n      collector_(collector), span_buffer_{std::make_unique<SpanBuffer>(\n                                 collector.version_, collector.shared_span_context_)},\n      collector_cluster_(driver_.clusterManager(), driver_.cluster()) {\n  flush_timer_ = dispatcher.createTimer([this]() -> void {\n    driver_.tracerStats().timer_flushed_.inc();\n    flushSpans();\n    enableTimer();\n  });\n\n  const uint64_t min_flush_spans =\n      driver_.runtime().snapshot().getInteger(\"tracing.zipkin.min_flush_spans\", 5U);\n  span_buffer_->allocateBuffer(min_flush_spans);\n\n  enableTimer();\n}\n\nReporterPtr ReporterImpl::NewInstance(Driver& driver, Event::Dispatcher& dispatcher,\n                                      const CollectorInfo& collector) {\n  return std::make_unique<ReporterImpl>(driver, dispatcher, collector);\n}\n\nvoid ReporterImpl::reportSpan(Span&& span) {\n  span_buffer_->addSpan(std::move(span));\n\n  const uint64_t min_flush_spans =\n      driver_.runtime().snapshot().getInteger(\"tracing.zipkin.min_flush_spans\", 5U);\n\n  if (span_buffer_->pendingSpans() == min_flush_spans) {\n    flushSpans();\n  }\n}\n\nvoid ReporterImpl::enableTimer() {\n  const uint64_t flush_interval =\n      driver_.runtime().snapshot().getInteger(\"tracing.zipkin.flush_interval_ms\", 5000U);\n  flush_timer_->enableTimer(std::chrono::milliseconds(flush_interval));\n}\n\nvoid ReporterImpl::flushSpans() {\n  if (span_buffer_->pendingSpans()) {\n    driver_.tracerStats().spans_sent_.add(span_buffer_->pendingSpans());\n    const std::string request_body = span_buffer_->serialize();\n    Http::RequestMessagePtr message = std::make_unique<Http::RequestMessageImpl>();\n    message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post);\n    message->headers().setPath(collector_.endpoint_);\n    message->headers().setHost(driver_.cluster());\n    message->headers().setReferenceContentType(\n        collector_.version_ == envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO\n            ? Http::Headers::get().ContentTypeValues.Protobuf\n            : Http::Headers::get().ContentTypeValues.Json);\n\n    message->body().add(request_body);\n\n    const uint64_t timeout =\n        driver_.runtime().snapshot().getInteger(\"tracing.zipkin.request_timeout\", 5000U);\n\n    if (collector_cluster_.exists()) {\n      Http::AsyncClient::Request* request =\n          driver_.clusterManager()\n              .httpAsyncClientForCluster(collector_cluster_.info()->name())\n              .send(std::move(message), *this,\n                    Http::AsyncClient::RequestOptions().setTimeout(\n                        std::chrono::milliseconds(timeout)));\n      if (request) {\n        active_requests_.add(*request);\n      }\n    } else {\n      ENVOY_LOG(debug, \"collector cluster '{}' does not exist\", driver_.cluster());\n      driver_.tracerStats().reports_skipped_no_cluster_.inc();\n    }\n\n    span_buffer_->clear();\n  }\n}\n\nvoid ReporterImpl::onFailure(const Http::AsyncClient::Request& request,\n                             Http::AsyncClient::FailureReason) {\n  active_requests_.remove(request);\n  driver_.tracerStats().reports_failed_.inc();\n}\n\nvoid ReporterImpl::onSuccess(const Http::AsyncClient::Request& request,\n                             Http::ResponseMessagePtr&& http_response) {\n  active_requests_.remove(request);\n  if (Http::Utility::getResponseStatus(http_response->headers()) !=\n      enumToInt(Http::Code::Accepted)) {\n    driver_.tracerStats().reports_dropped_.inc();\n  } else {\n    driver_.tracerStats().reports_sent_.inc();\n  }\n}\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/tracers/zipkin/zipkin_tracer_impl.h",
    "content": "#pragma once\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/http/async_client_utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/upstream/cluster_update_tracker.h\"\n\n#include \"extensions/tracers/zipkin/span_buffer.h\"\n#include \"extensions/tracers/zipkin/tracer.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\n\n#define ZIPKIN_TRACER_STATS(COUNTER)                                                               \\\n  COUNTER(spans_sent)                                                                              \\\n  COUNTER(timer_flushed)                                                                           \\\n  COUNTER(reports_skipped_no_cluster)                                                              \\\n  COUNTER(reports_sent)                                                                            \\\n  COUNTER(reports_dropped)                                                                         \\\n  COUNTER(reports_failed)\n\nstruct ZipkinTracerStats {\n  ZIPKIN_TRACER_STATS(GENERATE_COUNTER_STRUCT)\n};\n\n/**\n * Class for Zipkin spans, wrapping a Zipkin::Span object.\n */\nclass ZipkinSpan : public Tracing::Span {\npublic:\n  /**\n   * Constructor. Wraps a Zipkin::Span object.\n   *\n   * @param span to be wrapped.\n   */\n  ZipkinSpan(Zipkin::Span& span, Zipkin::Tracer& tracer);\n\n  /**\n   * Calls Zipkin::Span::finishSpan() to perform all actions needed to finalize the span.\n   * This function is called by Tracing::HttpTracerUtility::finalizeSpan().\n   */\n  void finishSpan() override;\n\n  /**\n   * This method sets the operation name on the span.\n   * @param operation the operation name\n   */\n  void setOperation(absl::string_view operation) override;\n\n  /**\n   * This function adds a Zipkin \"string\" binary annotation to this span.\n   * In Zipkin, binary annotations of the type \"string\" allow arbitrary key-value pairs\n   * to be associated with a span.\n   *\n   * Note that Tracing::HttpTracerUtility::finalizeSpan() makes several calls to this function,\n   * associating several key-value pairs with this span.\n   */\n  void setTag(absl::string_view name, absl::string_view value) override;\n\n  void log(SystemTime timestamp, const std::string& event) override;\n\n  void injectContext(Http::RequestHeaderMap& request_headers) override;\n  Tracing::SpanPtr spawnChild(const Tracing::Config&, const std::string& name,\n                              SystemTime start_time) override;\n\n  void setSampled(bool sampled) override;\n\n  // TODO(#11622): Implement baggage storage for zipkin spans\n  void setBaggage(absl::string_view, absl::string_view) override;\n  std::string getBaggage(absl::string_view) override;\n\n  /**\n   * @return a reference to the Zipkin::Span object.\n   */\n  Zipkin::Span& span() { return span_; }\n\nprivate:\n  Zipkin::Span span_;\n  Zipkin::Tracer& tracer_;\n};\n\nusing ZipkinSpanPtr = std::unique_ptr<ZipkinSpan>;\n\n/**\n * Class for a Zipkin-specific Driver.\n */\nclass Driver : public Tracing::Driver {\npublic:\n  /**\n   * Constructor. It adds itself and a newly-created Zipkin::Tracer object to a thread-local store.\n   * Also, it associates the given random-number generator to the Zipkin::Tracer object it creates.\n   */\n  Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config,\n         Upstream::ClusterManager& cluster_manager, Stats::Scope& scope,\n         ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime,\n         const LocalInfo::LocalInfo& localinfo, Random::RandomGenerator& random_generator,\n         TimeSource& time_source);\n\n  /**\n   * This function is inherited from the abstract Driver class.\n   *\n   * It starts a new Zipkin span. Depending on the request headers, it can create a root span,\n   * a child span, or a shared-context span.\n   *\n   * The third parameter (operation_name) does not actually make sense for Zipkin.\n   * Thus, this implementation of the virtual function startSpan() ignores the operation name\n   * (\"ingress\" or \"egress\") passed by the caller.\n   */\n  Tracing::SpanPtr startSpan(const Tracing::Config&, Http::RequestHeaderMap& request_headers,\n                             const std::string&, SystemTime start_time,\n                             const Tracing::Decision tracing_decision) override;\n\n  // Getters to return the ZipkinDriver's key members.\n  Upstream::ClusterManager& clusterManager() { return cm_; }\n  const std::string& cluster() { return cluster_; }\n  Runtime::Loader& runtime() { return runtime_; }\n  ZipkinTracerStats& tracerStats() { return tracer_stats_; }\n\nprivate:\n  /**\n   * Thread-local store containing ZipkinDriver and Zipkin::Tracer objects.\n   */\n  struct TlsTracer : ThreadLocal::ThreadLocalObject {\n    TlsTracer(TracerPtr&& tracer, Driver& driver);\n\n    TracerPtr tracer_;\n    Driver& driver_;\n  };\n\n  Upstream::ClusterManager& cm_;\n  std::string cluster_;\n  ZipkinTracerStats tracer_stats_;\n  ThreadLocal::SlotPtr tls_;\n  Runtime::Loader& runtime_;\n  const LocalInfo::LocalInfo& local_info_;\n  TimeSource& time_source_;\n};\n\n/**\n * Information about the Zipkin collector.\n */\nstruct CollectorInfo {\n  // The Zipkin collector endpoint/path to receive the collected trace data. e.g. /api/v1/spans if\n  // HTTP_JSON_V1 or /api/v2/spans otherwise.\n  std::string endpoint_{DEFAULT_COLLECTOR_ENDPOINT};\n\n  // The version of the collector. This is related to endpoint's supported payload specification and\n  // transport. Currently it defaults to envoy::config::trace::v2::ZipkinConfig::HTTP_JSON_V1. In\n  // the future, we will throw when collector_endpoint_version is not specified.\n  envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion version_{\n      envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1};\n\n  bool shared_span_context_{DEFAULT_SHARED_SPAN_CONTEXT};\n};\n\n/**\n * This class derives from the abstract Zipkin::Reporter.\n * It buffers spans and relies on Http::AsyncClient to send spans to\n * Zipkin using JSON over HTTP.\n *\n * Two runtime parameters control the span buffering/flushing behavior, namely:\n * tracing.zipkin.min_flush_spans and tracing.zipkin.flush_interval_ms.\n *\n * Up to `tracing.zipkin.min_flush_spans` will be buffered. Spans are flushed (sent to Zipkin)\n * either when the buffer is full, or when a timer, set to `tracing.zipkin.flush_interval_ms`,\n * expires, whichever happens first.\n *\n * The default values for the runtime parameters are 5 spans and 5000ms.\n */\nclass ReporterImpl : Logger::Loggable<Logger::Id::tracing>,\n                     public Reporter,\n                     public Http::AsyncClient::Callbacks {\npublic:\n  /**\n   * Constructor.\n   *\n   * @param driver ZipkinDriver to be associated with the reporter.\n   * @param dispatcher Controls the timer used to flush buffered spans.\n   * @param collector holds the endpoint version and path information.\n   * when making HTTP POST requests carrying spans. This value comes from the\n   * Zipkin-related tracing configuration.\n   */\n  ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher, const CollectorInfo& collector);\n\n  /**\n   * Implementation of Zipkin::Reporter::reportSpan().\n   *\n   * Buffers the given span and calls flushSpans() if the buffer is full.\n   *\n   * @param span The span to be buffered.\n   */\n  void reportSpan(Span&& span) override;\n\n  // Http::AsyncClient::Callbacks.\n  // The callbacks below record Zipkin-span-related stats.\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override;\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override;\n  void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {}\n\n  /**\n   * Creates a heap-allocated ZipkinReporter.\n   *\n   * @param driver ZipkinDriver to be associated with the reporter.\n   * @param dispatcher Controls the timer used to flush buffered spans.\n   * @param collector holds the endpoint version and path information.\n   * when making HTTP POST requests carrying spans. This value comes from the\n   * Zipkin-related tracing configuration.\n   *\n   * @return Pointer to the newly-created ZipkinReporter.\n   */\n  static ReporterPtr NewInstance(Driver& driver, Event::Dispatcher& dispatcher,\n                                 const CollectorInfo& collector);\n\nprivate:\n  /**\n   * Enables the span-flushing timer.\n   */\n  void enableTimer();\n\n  /**\n   * Removes all spans from the span buffer and sends them to Zipkin using Http::AsyncClient.\n   */\n  void flushSpans();\n\n  Driver& driver_;\n  Event::TimerPtr flush_timer_;\n  const CollectorInfo collector_;\n  SpanBufferPtr span_buffer_;\n  Upstream::ClusterUpdateTracker collector_cluster_;\n  // Track active HTTP requests to be able to cancel them on destruction.\n  Http::AsyncClientRequestTracker active_requests_;\n};\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"well_known_names\",\n    hdrs = [\"well_known_names.h\"],\n    # well known names files are public as long as they exist.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/common/singleton:const_singleton\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# ALTS transport socket. This provides Google's ALTS protocol support in GCP to Envoy.\n# https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"grpc_tsi_wrapper\",\n    hdrs = [\n        \"grpc_tsi.h\",\n    ],\n    external_deps = [\n        \"grpc\",\n    ],\n    visibility = [\"//visibility:private\"],\n    deps = [\n        \"//source/common/common:c_smart_ptr_lib\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\n        \"config.cc\",\n    ],\n    hdrs = [\n        \"config.h\",\n    ],\n    external_deps = [\n        \"abseil_node_hash_set\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    deps = [\n        \":tsi_handshaker\",\n        \":tsi_socket\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//source/common/grpc:google_grpc_context_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"@envoy_api//envoy/extensions/transport_sockets/alts/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tsi_frame_protector\",\n    srcs = [\n        \"tsi_frame_protector.cc\",\n    ],\n    hdrs = [\n        \"tsi_frame_protector.h\",\n    ],\n    deps = [\n        \":grpc_tsi_wrapper\",\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tsi_handshaker\",\n    srcs = [\n        \"tsi_handshaker.cc\",\n    ],\n    hdrs = [\n        \"tsi_handshaker.h\",\n    ],\n    deps = [\n        \":grpc_tsi_wrapper\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tsi_socket\",\n    srcs = [\n        \"tsi_socket.cc\",\n    ],\n    hdrs = [\n        \"tsi_socket.h\",\n    ],\n    deps = [\n        \":noop_transport_socket_callbacks_lib\",\n        \":tsi_frame_protector\",\n        \":tsi_handshaker\",\n        \"//include/envoy/network:io_handle_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/network:raw_buffer_socket_lib\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"noop_transport_socket_callbacks_lib\",\n    hdrs = [\"noop_transport_socket_callbacks.h\"],\n    deps = [\n        \"//include/envoy/network:io_handle_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/config.cc",
    "content": "#include \"extensions/transport_sockets/alts/config.h\"\n\n#include \"envoy/extensions/transport_sockets/alts/v3/alts.pb.h\"\n#include \"envoy/extensions/transport_sockets/alts/v3/alts.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/grpc/google_grpc_context.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/transport_sockets/alts/grpc_tsi.h\"\n#include \"extensions/transport_sockets/alts/tsi_socket.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\n// smart pointer for grpc_alts_credentials_options that will be automatically freed.\nusing GrpcAltsCredentialsOptionsPtr =\n    CSmartPtr<grpc_alts_credentials_options, grpc_alts_credentials_options_destroy>;\n\nnamespace {\n\n// TODO: gRPC v1.30.0-pre1 defines the equivalent function grpc_alts_set_rpc_protocol_versions\n// that should be called directly when available.\nvoid grpcAltsSetRpcProtocolVersions(grpc_gcp_rpc_protocol_versions* rpc_versions) {\n  grpc_gcp_rpc_protocol_versions_set_max(rpc_versions, GRPC_PROTOCOL_VERSION_MAX_MAJOR,\n                                         GRPC_PROTOCOL_VERSION_MAX_MINOR);\n  grpc_gcp_rpc_protocol_versions_set_min(rpc_versions, GRPC_PROTOCOL_VERSION_MIN_MAJOR,\n                                         GRPC_PROTOCOL_VERSION_MIN_MINOR);\n}\n\n// Returns true if the peer's service account is found in peers, otherwise\n// returns false and fills out err with an error message.\nbool doValidate(const tsi_peer& peer, const absl::node_hash_set<std::string>& peers,\n                std::string& err) {\n  for (size_t i = 0; i < peer.property_count; ++i) {\n    const std::string name = std::string(peer.properties[i].name);\n    const std::string value =\n        std::string(peer.properties[i].value.data, peer.properties[i].value.length);\n    if (name.compare(TSI_ALTS_SERVICE_ACCOUNT_PEER_PROPERTY) == 0 &&\n        peers.find(value) != peers.end()) {\n      return true;\n    }\n  }\n\n  err =\n      \"Couldn't find peer's service account in peer_service_accounts: \" + absl::StrJoin(peers, \",\");\n  return false;\n}\n\nHandshakeValidator\ncreateHandshakeValidator(const envoy::extensions::transport_sockets::alts::v3::Alts& config) {\n  const auto& peer_service_accounts = config.peer_service_accounts();\n  const absl::node_hash_set<std::string> peers(peer_service_accounts.cbegin(),\n                                               peer_service_accounts.cend());\n  HandshakeValidator validator;\n  // Skip validation if peers is empty.\n  if (!peers.empty()) {\n    validator = [peers](const tsi_peer& peer, std::string& err) {\n      return doValidate(peer, peers, err);\n    };\n  }\n  return validator;\n}\n\n// Manage ALTS singleton state via SingletonManager\nclass AltsSharedState : public Singleton::Instance {\npublic:\n  AltsSharedState() { grpc_alts_shared_resource_dedicated_init(); }\n\n  ~AltsSharedState() override { grpc_alts_shared_resource_dedicated_shutdown(); }\n\nprivate:\n  // There is blanket google-grpc initialization in MainCommonBase, but that\n  // doesn't cover unit tests. However, putting blanket coverage in ProcessWide\n  // causes background threaded memory allocation in all unit tests making it\n  // hard to measure memory. Thus we also initialize grpc using our idempotent\n  // wrapper-class in classes that need it. See\n  // https://github.com/envoyproxy/envoy/issues/8282 for details.\n#ifdef ENVOY_GOOGLE_GRPC\n  Grpc::GoogleGrpcContext google_grpc_context_;\n#endif\n};\n\nSINGLETON_MANAGER_REGISTRATION(alts_shared_state);\n\nNetwork::TransportSocketFactoryPtr createTransportSocketFactoryHelper(\n    const Protobuf::Message& message, bool is_upstream,\n    Server::Configuration::TransportSocketFactoryContext& factory_ctxt) {\n  // A reference to this is held in the factory closure to keep the singleton\n  // instance alive.\n  auto alts_shared_state = factory_ctxt.singletonManager().getTyped<AltsSharedState>(\n      SINGLETON_MANAGER_REGISTERED_NAME(alts_shared_state),\n      [] { return std::make_shared<AltsSharedState>(); });\n  auto config =\n      MessageUtil::downcastAndValidate<const envoy::extensions::transport_sockets::alts::v3::Alts&>(\n          message, factory_ctxt.messageValidationVisitor());\n  HandshakeValidator validator = createHandshakeValidator(config);\n\n  const std::string& handshaker_service = config.handshaker_service();\n  HandshakerFactory factory =\n      [handshaker_service, is_upstream,\n       alts_shared_state](Event::Dispatcher& dispatcher,\n                          const Network::Address::InstanceConstSharedPtr& local_address,\n                          const Network::Address::InstanceConstSharedPtr&) -> TsiHandshakerPtr {\n    ASSERT(local_address != nullptr);\n\n    GrpcAltsCredentialsOptionsPtr options;\n    if (is_upstream) {\n      options = GrpcAltsCredentialsOptionsPtr(grpc_alts_credentials_client_options_create());\n    } else {\n      options = GrpcAltsCredentialsOptionsPtr(grpc_alts_credentials_server_options_create());\n    }\n    grpcAltsSetRpcProtocolVersions(&options->rpc_versions);\n    const char* target_name = is_upstream ? \"\" : nullptr;\n    tsi_handshaker* handshaker = nullptr;\n    // Specifying target name as empty since TSI won't take care of validating peer identity\n    // in this use case. The validation will be performed by TsiSocket with the validator.\n    tsi_result status =\n        alts_tsi_handshaker_create(options.get(), target_name, handshaker_service.c_str(),\n                                   is_upstream, nullptr /* interested_parties */, &handshaker);\n    CHandshakerPtr handshaker_ptr{handshaker};\n\n    if (status != TSI_OK) {\n      const std::string handshaker_name = is_upstream ? \"client\" : \"server\";\n      ENVOY_LOG_MISC(warn, \"Cannot create ATLS {} handshaker, status: {}\", handshaker_name, status);\n      return nullptr;\n    }\n\n    return std::make_unique<TsiHandshaker>(std::move(handshaker_ptr), dispatcher);\n  };\n\n  return std::make_unique<TsiSocketFactory>(factory, validator);\n}\n\n} // namespace\n\nProtobufTypes::MessagePtr AltsTransportSocketConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::transport_sockets::alts::v3::Alts>();\n}\n\nNetwork::TransportSocketFactoryPtr\nUpstreamAltsTransportSocketConfigFactory::createTransportSocketFactory(\n    const Protobuf::Message& message,\n    Server::Configuration::TransportSocketFactoryContext& factory_ctxt) {\n  return createTransportSocketFactoryHelper(message, /* is_upstream */ true, factory_ctxt);\n}\n\nNetwork::TransportSocketFactoryPtr\nDownstreamAltsTransportSocketConfigFactory::createTransportSocketFactory(\n    const Protobuf::Message& message,\n    Server::Configuration::TransportSocketFactoryContext& factory_ctxt,\n    const std::vector<std::string>&) {\n  return createTransportSocketFactoryHelper(message, /* is_upstream */ false, factory_ctxt);\n}\n\nREGISTER_FACTORY(UpstreamAltsTransportSocketConfigFactory,\n                 Server::Configuration::UpstreamTransportSocketConfigFactory);\n\nREGISTER_FACTORY(DownstreamAltsTransportSocketConfigFactory,\n                 Server::Configuration::DownstreamTransportSocketConfigFactory);\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\n// ALTS config registry\nclass AltsTransportSocketConfigFactory\n    : public virtual Server::Configuration::TransportSocketConfigFactory {\npublic:\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n  std::string name() const override { return TransportSocketNames::get().Alts; }\n};\n\nclass UpstreamAltsTransportSocketConfigFactory\n    : public AltsTransportSocketConfigFactory,\n      public Server::Configuration::UpstreamTransportSocketConfigFactory {\npublic:\n  Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message&,\n                               Server::Configuration::TransportSocketFactoryContext&) override;\n};\n\nclass DownstreamAltsTransportSocketConfigFactory\n    : public AltsTransportSocketConfigFactory,\n      public Server::Configuration::DownstreamTransportSocketConfigFactory {\npublic:\n  Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message&,\n                               Server::Configuration::TransportSocketFactoryContext&,\n                               const std::vector<std::string>&) override;\n};\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/grpc_tsi.h",
    "content": "#pragma once\n\n// Some gRPC headers contains old style cast and unused parameter which doesn't\n// compile with -Werror, ignoring those compiler warning since we don't have\n// control on those source codes. This works with GCC and Clang.\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Wold-style-cast\"\n#endif\n\n#include \"grpc/grpc_security.h\"\n#include \"src/core/lib/transport/transport.h\"\n#include \"src/core/tsi/alts/handshaker/alts_shared_resource.h\"\n#include \"src/core/tsi/alts/handshaker/alts_tsi_handshaker.h\"\n#include \"src/core/tsi/alts/handshaker/transport_security_common_api.h\"\n#include \"src/core/tsi/transport_security_grpc.h\"\n#include \"src/core/tsi/transport_security_interface.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"common/common/c_smart_ptr.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\nusing CFrameProtectorPtr =\n    CSmartPtr<tsi_zero_copy_grpc_protector, tsi_zero_copy_grpc_protector_destroy>;\n\nusing CHandshakerResultPtr = CSmartPtr<tsi_handshaker_result, tsi_handshaker_result_destroy>;\nusing CHandshakerPtr = CSmartPtr<tsi_handshaker, tsi_handshaker_destroy>;\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/noop_transport_socket_callbacks.h",
    "content": "#pragma once\n#include \"envoy/network/io_handle.h\"\n#include \"envoy/network/transport_socket.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\n/**\n * A TransportSocketCallbacks for wrapped TransportSocket object. Some\n * TransportSocket implementation wraps another socket which does actual I/O.\n * This class is used by the wrapped socket as its callbacks instead of the real\n * connection to hold back callbacks from the underlying socket to connection.\n */\nclass NoOpTransportSocketCallbacks : public Network::TransportSocketCallbacks {\npublic:\n  explicit NoOpTransportSocketCallbacks(Network::TransportSocketCallbacks& parent)\n      : parent_(parent) {}\n\n  Network::IoHandle& ioHandle() override { return parent_.ioHandle(); }\n  const Network::IoHandle& ioHandle() const override { return parent_.ioHandle(); }\n  Network::Connection& connection() override { return parent_.connection(); }\n  bool shouldDrainReadBuffer() override { return false; }\n  /*\n   * No-op for these two methods to hold back the callbacks.\n   */\n  void setReadBufferReady() override {}\n  void raiseEvent(Network::ConnectionEvent) override {}\n  void flushWriteBuffer() override {}\n\nprivate:\n  Network::TransportSocketCallbacks& parent_;\n};\n\nusing NoOpTransportSocketCallbacksPtr = std::unique_ptr<NoOpTransportSocketCallbacks>;\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/tsi_frame_protector.cc",
    "content": "#include \"extensions/transport_sockets/alts/tsi_frame_protector.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n\n#include \"grpc/slice_buffer.h\"\n#include \"src/core/tsi/transport_security_grpc.h\"\n#include \"src/core/tsi/transport_security_interface.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\nTsiFrameProtector::TsiFrameProtector(CFrameProtectorPtr&& frame_protector)\n    : frame_protector_(std::move(frame_protector)) {}\n\ntsi_result TsiFrameProtector::protect(Buffer::Instance& input, Buffer::Instance& output) {\n  ASSERT(frame_protector_);\n\n  if (input.length() == 0) {\n    return TSI_OK;\n  }\n\n  grpc_core::ExecCtx exec_ctx;\n  grpc_slice input_slice = grpc_slice_from_copied_buffer(\n      reinterpret_cast<char*>(input.linearize(input.length())), input.length());\n\n  grpc_slice_buffer message_buffer;\n  grpc_slice_buffer_init(&message_buffer);\n  grpc_slice_buffer_add(&message_buffer, input_slice);\n\n  grpc_slice_buffer protected_buffer;\n  grpc_slice_buffer_init(&protected_buffer);\n\n  tsi_result result = tsi_zero_copy_grpc_protector_protect(frame_protector_.get(), &message_buffer,\n                                                           &protected_buffer);\n\n  if (result != TSI_OK) {\n    ASSERT(result != TSI_INVALID_ARGUMENT && result != TSI_UNIMPLEMENTED);\n    grpc_slice_buffer_destroy(&message_buffer);\n    grpc_slice_buffer_destroy(&protected_buffer);\n    return result;\n  }\n\n  const size_t protected_data_length = protected_buffer.length;\n  char* protected_data = new char[protected_data_length];\n\n  grpc_slice_buffer_move_first_into_buffer(&protected_buffer, protected_data_length,\n                                           protected_data);\n\n  auto fragment = new Buffer::BufferFragmentImpl(\n      protected_data, protected_data_length,\n      [protected_data](const void*, size_t,\n                       const Envoy::Buffer::BufferFragmentImpl* this_fragment) {\n        delete[] protected_data;\n        delete this_fragment;\n      });\n\n  output.addBufferFragment(*fragment);\n  input.drain(input.length());\n\n  grpc_slice_buffer_destroy(&message_buffer);\n  grpc_slice_buffer_destroy(&protected_buffer);\n\n  return TSI_OK;\n}\n\ntsi_result TsiFrameProtector::unprotect(Buffer::Instance& input, Buffer::Instance& output) {\n  ASSERT(frame_protector_);\n\n  if (input.length() == 0) {\n    return TSI_OK;\n  }\n\n  grpc_core::ExecCtx exec_ctx;\n  grpc_slice input_slice = grpc_slice_from_copied_buffer(\n      reinterpret_cast<char*>(input.linearize(input.length())), input.length());\n\n  grpc_slice_buffer protected_buffer;\n  grpc_slice_buffer_init(&protected_buffer);\n  grpc_slice_buffer_add(&protected_buffer, input_slice);\n\n  grpc_slice_buffer unprotected_buffer;\n  grpc_slice_buffer_init(&unprotected_buffer);\n\n  tsi_result result = tsi_zero_copy_grpc_protector_unprotect(\n      frame_protector_.get(), &protected_buffer, &unprotected_buffer);\n\n  if (result != TSI_OK) {\n    ASSERT(result != TSI_INVALID_ARGUMENT && result != TSI_UNIMPLEMENTED);\n    grpc_slice_buffer_destroy(&protected_buffer);\n    grpc_slice_buffer_destroy(&unprotected_buffer);\n    return result;\n  }\n\n  const size_t unprotected_data_length = unprotected_buffer.length;\n  char* unprotected_data = new char[unprotected_data_length];\n\n  grpc_slice_buffer_move_first_into_buffer(&unprotected_buffer, unprotected_data_length,\n                                           unprotected_data);\n\n  auto fragment = new Buffer::BufferFragmentImpl(\n      unprotected_data, unprotected_data_length,\n      [unprotected_data](const void*, size_t,\n                         const Envoy::Buffer::BufferFragmentImpl* this_fragment) {\n        delete[] unprotected_data;\n        delete this_fragment;\n      });\n\n  output.addBufferFragment(*fragment);\n  input.drain(input.length());\n\n  grpc_slice_buffer_destroy(&protected_buffer);\n  grpc_slice_buffer_destroy(&unprotected_buffer);\n\n  return TSI_OK;\n}\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/tsi_frame_protector.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"extensions/transport_sockets/alts/grpc_tsi.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\n/**\n * A C++ wrapper for tsi_frame_protector interface.\n * For detail of tsi_frame_protector, see\n * https://github.com/grpc/grpc/blob/v1.10.0/src/core/tsi/transport_security_interface.h#L70\n */\nclass TsiFrameProtector final {\npublic:\n  explicit TsiFrameProtector(CFrameProtectorPtr&& frame_protector);\n\n  /**\n   * Wrapper for tsi_frame_protector_protect\n   * @param input supplies the input data to protect, the method will drain it when it is processed.\n   * @param output supplies the buffer where the protected data will be stored.\n   * @return tsi_result the status.\n   */\n  tsi_result protect(Buffer::Instance& input, Buffer::Instance& output);\n\n  /**\n   * Wrapper for tsi_frame_protector_unprotect\n   * @param input supplies the input data to unprotect, the method will drain it when it is\n   * processed.\n   * @param output supplies the buffer where the unprotected data will be stored.\n   * @return tsi_result the status.\n   */\n  tsi_result unprotect(Buffer::Instance& input, Buffer::Instance& output);\n\nprivate:\n  CFrameProtectorPtr frame_protector_;\n};\n\nusing TsiFrameProtectorPtr = std::unique_ptr<TsiFrameProtector>;\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/tsi_handshaker.cc",
    "content": "#include \"extensions/transport_sockets/alts/tsi_handshaker.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\nvoid TsiHandshaker::onNextDone(tsi_result status, void* user_data,\n                               const unsigned char* bytes_to_send, size_t bytes_to_send_size,\n                               tsi_handshaker_result* handshaker_result) {\n  TsiHandshaker* handshaker = static_cast<TsiHandshaker*>(user_data);\n\n  Buffer::InstancePtr to_send = std::make_unique<Buffer::OwnedImpl>();\n  if (bytes_to_send_size > 0) {\n    to_send->add(bytes_to_send, bytes_to_send_size);\n  }\n\n  auto next_result =\n      new TsiHandshakerCallbacks::NextResult{status, std::move(to_send), {handshaker_result}};\n\n  handshaker->dispatcher_.post([handshaker, next_result]() {\n    TsiHandshakerCallbacks::NextResultPtr next_result_ptr{next_result};\n\n    ASSERT(handshaker->calling_);\n    handshaker->calling_ = false;\n\n    if (handshaker->delete_on_done_) {\n      handshaker->deferredDelete();\n      return;\n    }\n    handshaker->callbacks_->onNextDone(std::move(next_result_ptr));\n  });\n}\n\nTsiHandshaker::TsiHandshaker(CHandshakerPtr&& handshaker, Event::Dispatcher& dispatcher)\n    : handshaker_(std::move(handshaker)), dispatcher_(dispatcher) {}\n\nTsiHandshaker::~TsiHandshaker() { ASSERT(!calling_); }\n\ntsi_result TsiHandshaker::next(Envoy::Buffer::Instance& received) {\n  ASSERT(callbacks_);\n  ASSERT(!calling_);\n  calling_ = true;\n\n  uint64_t received_size = received.length();\n  const unsigned char* bytes_to_send = nullptr;\n  size_t bytes_to_send_size = 0;\n  tsi_handshaker_result* result = nullptr;\n  tsi_result status = tsi_handshaker_next(\n      handshaker_.get(), reinterpret_cast<const unsigned char*>(received.linearize(received_size)),\n      received_size, &bytes_to_send, &bytes_to_send_size, &result, onNextDone, this);\n  received.drain(received_size);\n\n  if (status != TSI_ASYNC) {\n    onNextDone(status, this, bytes_to_send, bytes_to_send_size, result);\n  }\n  return status;\n}\n\nvoid TsiHandshaker::deferredDelete() {\n  if (calling_) {\n    delete_on_done_ = true;\n  } else {\n    dispatcher_.deferredDelete(Event::DeferredDeletablePtr{this});\n  }\n}\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/tsi_handshaker.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/common/c_smart_ptr.h\"\n\n#include \"extensions/transport_sockets/alts/grpc_tsi.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\n/**\n * An interface to get callbacks from TsiHandshaker. TsiHandshaker will call this callbacks in the\n * thread which its dispatcher posts to.\n */\nclass TsiHandshakerCallbacks {\npublic:\n  virtual ~TsiHandshakerCallbacks() = default;\n\n  struct NextResult {\n    // A enum of the result.\n    tsi_result status_;\n\n    // The buffer to be sent to the peer.\n    Buffer::InstancePtr to_send_;\n\n    // A pointer to tsi_handshaker_result struct. Owned by instance.\n    CHandshakerResultPtr result_;\n  };\n\n  using NextResultPtr = std::unique_ptr<NextResult>;\n\n  /**\n   * Called when `next` is done, this may be called inline in `next` if the handshaker is not\n   * asynchronous.\n   * @param result a unique pointer to NextResult struct including the result returned by TSI.\n   */\n  virtual void onNextDone(NextResultPtr&& result) PURE;\n};\n\n/**\n * A C++ wrapper for tsi_handshaker interface.\n * For detail of tsi_handshaker, see\n * https://github.com/grpc/grpc/blob/v1.12.0/src/core/tsi/transport_security_interface.h#L236\n */\nclass TsiHandshaker final : public Event::DeferredDeletable {\npublic:\n  explicit TsiHandshaker(CHandshakerPtr&& handshaker, Event::Dispatcher& dispatcher);\n  ~TsiHandshaker() override;\n\n  /**\n   * Conduct next step of handshake, see\n   * https://github.com/grpc/grpc/blob/v1.12.0/src/core/tsi/transport_security_interface.h#L418\n   * It is callers responsibility to not call this method again until the\n   * TsiHandshakerCallbacks::onNextDone is called.\n   * @param received the buffer received from peer.\n   */\n  tsi_result next(Buffer::Instance& received);\n\n  /**\n   * Set handshaker callbacks. This must be called before calling next.\n   * @param callbacks supplies the callback instance.\n   */\n  void setHandshakerCallbacks(TsiHandshakerCallbacks& callbacks) { callbacks_ = &callbacks; }\n\n  /**\n   * Delete the handshaker when it is ready. This must be called after releasing from a smart\n   * pointer. If there is no call in progress, this calls dispatcher_.deferredDelete(). If there is\n   * a call in progress dispatcher_.deferredDelete happens after ongoing next call are processed.\n   */\n  void deferredDelete();\n\nprivate:\n  static void onNextDone(tsi_result status, void* user_data, const unsigned char* bytes_to_send,\n                         size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);\n\n  CHandshakerPtr handshaker_;\n  TsiHandshakerCallbacks* callbacks_{};\n\n  // This is set to true when there is an ongoing next call to handshaker, and set to false when\n  // the callback posted to dispatcher_ by TsiHandshaker::onNextDone is executed.\n  bool calling_{false};\n\n  // This will be set when deferredDelete is called. If there is an ongoing next call,\n  // the handshaker will delete itself after the call is processed.\n  bool delete_on_done_{false};\n\n  Event::Dispatcher& dispatcher_;\n};\n\nusing TsiHandshakerPtr = std::unique_ptr<TsiHandshaker>;\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/tsi_socket.cc",
    "content": "#include \"extensions/transport_sockets/alts/tsi_socket.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\nTsiSocket::TsiSocket(HandshakerFactory handshaker_factory, HandshakeValidator handshake_validator,\n                     Network::TransportSocketPtr&& raw_socket)\n    : handshaker_factory_(handshaker_factory), handshake_validator_(handshake_validator),\n      raw_buffer_socket_(std::move(raw_socket)) {}\n\nTsiSocket::TsiSocket(HandshakerFactory handshaker_factory, HandshakeValidator handshake_validator)\n    : TsiSocket(handshaker_factory, handshake_validator,\n                std::make_unique<Network::RawBufferSocket>()) {}\n\nTsiSocket::~TsiSocket() { ASSERT(!handshaker_); }\n\nvoid TsiSocket::setTransportSocketCallbacks(Envoy::Network::TransportSocketCallbacks& callbacks) {\n  ASSERT(!callbacks_);\n  callbacks_ = &callbacks;\n\n  noop_callbacks_ = std::make_unique<NoOpTransportSocketCallbacks>(callbacks);\n  raw_buffer_socket_->setTransportSocketCallbacks(*noop_callbacks_);\n}\n\nstd::string TsiSocket::protocol() const {\n  // TSI doesn't have a generic way to indicate application layer protocol.\n  // TODO(lizan): support application layer protocol from TSI for known TSIs.\n  return EMPTY_STRING;\n}\n\nabsl::string_view TsiSocket::failureReason() const {\n  // TODO(htuch): Implement error reason for TSI.\n  return EMPTY_STRING;\n}\n\nNetwork::PostIoAction TsiSocket::doHandshake() {\n  ASSERT(!handshake_complete_);\n  ENVOY_CONN_LOG(debug, \"TSI: doHandshake\", callbacks_->connection());\n  if (!handshaker_next_calling_ && raw_read_buffer_.length() > 0) {\n    doHandshakeNext();\n  }\n  return Network::PostIoAction::KeepOpen;\n}\n\nvoid TsiSocket::doHandshakeNext() {\n  ENVOY_CONN_LOG(debug, \"TSI: doHandshake next: received: {}\", callbacks_->connection(),\n                 raw_read_buffer_.length());\n\n  if (!handshaker_) {\n    handshaker_ = handshaker_factory_(callbacks_->connection().dispatcher(),\n                                      callbacks_->connection().localAddress(),\n                                      callbacks_->connection().remoteAddress());\n    if (!handshaker_) {\n      ENVOY_CONN_LOG(warn, \"TSI: failed to create handshaker\", callbacks_->connection());\n      callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n      return;\n    }\n\n    handshaker_->setHandshakerCallbacks(*this);\n  }\n\n  handshaker_next_calling_ = true;\n  Buffer::OwnedImpl handshaker_buffer;\n  handshaker_buffer.move(raw_read_buffer_);\n  handshaker_->next(handshaker_buffer);\n}\n\nNetwork::PostIoAction TsiSocket::doHandshakeNextDone(NextResultPtr&& next_result) {\n  ASSERT(next_result);\n\n  ENVOY_CONN_LOG(debug, \"TSI: doHandshake next done: status: {} to_send: {}\",\n                 callbacks_->connection(), next_result->status_, next_result->to_send_->length());\n\n  tsi_result status = next_result->status_;\n  tsi_handshaker_result* handshaker_result = next_result->result_.get();\n\n  if (status != TSI_INCOMPLETE_DATA && status != TSI_OK) {\n    ENVOY_CONN_LOG(debug, \"TSI: Handshake failed: status: {}\", callbacks_->connection(), status);\n    return Network::PostIoAction::Close;\n  }\n\n  if (next_result->to_send_->length() > 0) {\n    raw_write_buffer_.move(*next_result->to_send_);\n  }\n\n  if (status == TSI_OK && handshaker_result != nullptr) {\n    tsi_peer peer;\n    // returns TSI_OK assuming there is no fatal error. Asserting OK.\n    status = tsi_handshaker_result_extract_peer(handshaker_result, &peer);\n    ASSERT(status == TSI_OK);\n    Cleanup peer_cleanup([&peer]() { tsi_peer_destruct(&peer); });\n    ENVOY_CONN_LOG(debug, \"TSI: Handshake successful: peer properties: {}\",\n                   callbacks_->connection(), peer.property_count);\n    for (size_t i = 0; i < peer.property_count; ++i) {\n      ENVOY_CONN_LOG(debug, \"  {}: {}\", callbacks_->connection(), peer.properties[i].name,\n                     std::string(peer.properties[i].value.data, peer.properties[i].value.length));\n    }\n    if (handshake_validator_) {\n      std::string err;\n      const bool peer_validated = handshake_validator_(peer, err);\n      if (peer_validated) {\n        ENVOY_CONN_LOG(debug, \"TSI: Handshake validation succeeded.\", callbacks_->connection());\n      } else {\n        ENVOY_CONN_LOG(debug, \"TSI: Handshake validation failed: {}\", callbacks_->connection(),\n                       err);\n        return Network::PostIoAction::Close;\n      }\n    } else {\n      ENVOY_CONN_LOG(debug, \"TSI: Handshake validation skipped.\", callbacks_->connection());\n    }\n\n    const unsigned char* unused_bytes;\n    size_t unused_byte_size;\n\n    // returns TSI_OK assuming there is no fatal error. Asserting OK.\n    status =\n        tsi_handshaker_result_get_unused_bytes(handshaker_result, &unused_bytes, &unused_byte_size);\n    ASSERT(status == TSI_OK);\n    if (unused_byte_size > 0) {\n      raw_read_buffer_.prepend(\n          absl::string_view{reinterpret_cast<const char*>(unused_bytes), unused_byte_size});\n    }\n    ENVOY_CONN_LOG(debug, \"TSI: Handshake successful: unused_bytes: {}\", callbacks_->connection(),\n                   unused_byte_size);\n\n    // returns TSI_OK assuming there is no fatal error. Asserting OK.\n    tsi_zero_copy_grpc_protector* frame_protector;\n    grpc_core::ExecCtx exec_ctx;\n    status = tsi_handshaker_result_create_zero_copy_grpc_protector(handshaker_result, nullptr,\n                                                                   &frame_protector);\n    ASSERT(status == TSI_OK);\n    frame_protector_ = std::make_unique<TsiFrameProtector>(frame_protector);\n\n    handshake_complete_ = true;\n    callbacks_->raiseEvent(Network::ConnectionEvent::Connected);\n  }\n\n  if (read_error_ || (!handshake_complete_ && end_stream_read_)) {\n    ENVOY_CONN_LOG(debug, \"TSI: Handshake failed: end of stream without enough data\",\n                   callbacks_->connection());\n    return Network::PostIoAction::Close;\n  }\n\n  if (raw_read_buffer_.length() > 0) {\n    callbacks_->setReadBufferReady();\n  }\n\n  // Try to write raw buffer when next call is done, even this is not in do[Read|Write] stack.\n  if (raw_write_buffer_.length() > 0) {\n    return raw_buffer_socket_->doWrite(raw_write_buffer_, false).action_;\n  }\n\n  return Network::PostIoAction::KeepOpen;\n}\n\nNetwork::IoResult TsiSocket::doRead(Buffer::Instance& buffer) {\n  Network::IoResult result = {Network::PostIoAction::KeepOpen, 0, false};\n  if (!end_stream_read_ && !read_error_) {\n    result = raw_buffer_socket_->doRead(raw_read_buffer_);\n    ENVOY_CONN_LOG(debug, \"TSI: raw read result action {} bytes {} end_stream {}\",\n                   callbacks_->connection(), enumToInt(result.action_), result.bytes_processed_,\n                   result.end_stream_read_);\n    if (result.action_ == Network::PostIoAction::Close && result.bytes_processed_ == 0) {\n      return result;\n    }\n\n    if (!handshake_complete_ && result.end_stream_read_ && result.bytes_processed_ == 0) {\n      return {Network::PostIoAction::Close, result.bytes_processed_, result.end_stream_read_};\n    }\n\n    end_stream_read_ = result.end_stream_read_;\n    read_error_ = result.action_ == Network::PostIoAction::Close;\n  }\n\n  if (!handshake_complete_) {\n    Network::PostIoAction action = doHandshake();\n    if (action == Network::PostIoAction::Close || !handshake_complete_) {\n      return {action, 0, false};\n    }\n  }\n\n  if (handshake_complete_) {\n    ASSERT(frame_protector_);\n\n    uint64_t read_size = raw_read_buffer_.length();\n    ENVOY_CONN_LOG(debug, \"TSI: unprotecting buffer size: {}\", callbacks_->connection(),\n                   raw_read_buffer_.length());\n    tsi_result status = frame_protector_->unprotect(raw_read_buffer_, buffer);\n    ENVOY_CONN_LOG(debug, \"TSI: unprotected buffer left: {} result: {}\", callbacks_->connection(),\n                   raw_read_buffer_.length(), tsi_result_to_string(status));\n    result.bytes_processed_ = read_size - raw_read_buffer_.length();\n  }\n\n  ENVOY_CONN_LOG(debug, \"TSI: do read result action {} bytes {} end_stream {}\",\n                 callbacks_->connection(), enumToInt(result.action_), result.bytes_processed_,\n                 result.end_stream_read_);\n  return result;\n}\n\nNetwork::IoResult TsiSocket::doWrite(Buffer::Instance& buffer, bool end_stream) {\n  if (!handshake_complete_) {\n    Network::PostIoAction action = doHandshake();\n    ASSERT(action == Network::PostIoAction::KeepOpen);\n    // TODO(lizan): Handle synchronous handshake when TsiHandshaker supports it.\n  }\n\n  if (handshake_complete_) {\n    ASSERT(frame_protector_);\n    ENVOY_CONN_LOG(debug, \"TSI: protecting buffer size: {}\", callbacks_->connection(),\n                   buffer.length());\n    tsi_result status = frame_protector_->protect(buffer, raw_write_buffer_);\n    ENVOY_CONN_LOG(debug, \"TSI: protected buffer left: {} result: {}\", callbacks_->connection(),\n                   buffer.length(), tsi_result_to_string(status));\n  }\n\n  if (raw_write_buffer_.length() > 0) {\n    ENVOY_CONN_LOG(debug, \"TSI: raw_write length {} end_stream {}\", callbacks_->connection(),\n                   raw_write_buffer_.length(), end_stream);\n    return raw_buffer_socket_->doWrite(raw_write_buffer_, end_stream && (buffer.length() == 0));\n  }\n  return {Network::PostIoAction::KeepOpen, 0, false};\n}\n\nvoid TsiSocket::closeSocket(Network::ConnectionEvent) {\n  ENVOY_CONN_LOG(debug, \"TSI: closing socket\", callbacks_->connection());\n  if (handshaker_) {\n    handshaker_.release()->deferredDelete();\n  }\n}\n\nvoid TsiSocket::onConnected() {\n  ASSERT(!handshake_complete_);\n  doHandshakeNext();\n}\n\nvoid TsiSocket::onNextDone(NextResultPtr&& result) {\n  handshaker_next_calling_ = false;\n\n  Network::PostIoAction action = doHandshakeNextDone(std::move(result));\n  if (action == Network::PostIoAction::Close) {\n    callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);\n  }\n}\n\nTsiSocketFactory::TsiSocketFactory(HandshakerFactory handshaker_factory,\n                                   HandshakeValidator handshake_validator)\n    : handshaker_factory_(std::move(handshaker_factory)),\n      handshake_validator_(std::move(handshake_validator)) {}\n\nbool TsiSocketFactory::implementsSecureTransport() const { return true; }\n\nNetwork::TransportSocketPtr\nTsiSocketFactory::createTransportSocket(Network::TransportSocketOptionsSharedPtr) const {\n  return std::make_unique<TsiSocket>(handshaker_factory_, handshake_validator_);\n}\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/alts/tsi_socket.h",
    "content": "#pragma once\n\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/raw_buffer_socket.h\"\n\n#include \"extensions/transport_sockets/alts/noop_transport_socket_callbacks.h\"\n#include \"extensions/transport_sockets/alts/tsi_frame_protector.h\"\n#include \"extensions/transport_sockets/alts/tsi_handshaker.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\n\n/**\n * A factory function to create TsiHandshaker\n * @param dispatcher the dispatcher for the thread where the socket is running on.\n * @param local_address the local address of the connection.\n * @param remote_address the remote address of the connection.\n */\nusing HandshakerFactory = std::function<TsiHandshakerPtr(\n    Event::Dispatcher& dispatcher, const Network::Address::InstanceConstSharedPtr& local_address,\n    const Network::Address::InstanceConstSharedPtr& remote_address)>;\n\n/**\n * A function to validate the peer of the connection.\n * @param peer the detail peer information of the connection.\n * @param err an error message to indicate why the peer is invalid. This is an\n * output param that should be populated by the function implementation.\n * @return true if the peer is valid or false if the peer is invalid.\n */\nusing HandshakeValidator = std::function<bool(const tsi_peer& peer, std::string& err)>;\n\n/**\n * A implementation of Network::TransportSocket based on gRPC TSI\n */\nclass TsiSocket : public Network::TransportSocket,\n                  public TsiHandshakerCallbacks,\n                  public Logger::Loggable<Logger::Id::connection> {\npublic:\n  // For Test\n  TsiSocket(HandshakerFactory handshaker_factory, HandshakeValidator handshake_validator,\n            Network::TransportSocketPtr&& raw_socket_ptr);\n\n  /**\n   * @param handshaker_factory a function to initiate a TsiHandshaker\n   * @param handshake_validator a function to validate the peer. Called right\n   * after the handshake completed with peer data to do the peer validation.\n   * The connection will be closed immediately if it returns false.\n   */\n  TsiSocket(HandshakerFactory handshaker_factory, HandshakeValidator handshake_validator);\n  ~TsiSocket() override;\n\n  // Network::TransportSocket\n  void setTransportSocketCallbacks(Envoy::Network::TransportSocketCallbacks& callbacks) override;\n  std::string protocol() const override;\n  absl::string_view failureReason() const override;\n  bool canFlushClose() override { return handshake_complete_; }\n  Envoy::Ssl::ConnectionInfoConstSharedPtr ssl() const override { return nullptr; }\n  Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override;\n  void closeSocket(Network::ConnectionEvent event) override;\n  Network::IoResult doRead(Buffer::Instance& buffer) override;\n  void onConnected() override;\n\n  // TsiHandshakerCallbacks\n  void onNextDone(NextResultPtr&& result) override;\n\nprivate:\n  Network::PostIoAction doHandshake();\n  void doHandshakeNext();\n  Network::PostIoAction doHandshakeNextDone(NextResultPtr&& next_result);\n\n  HandshakerFactory handshaker_factory_;\n  HandshakeValidator handshake_validator_;\n  TsiHandshakerPtr handshaker_{};\n  bool handshaker_next_calling_{};\n\n  TsiFrameProtectorPtr frame_protector_;\n\n  Envoy::Network::TransportSocketCallbacks* callbacks_{};\n  NoOpTransportSocketCallbacksPtr noop_callbacks_;\n  Network::TransportSocketPtr raw_buffer_socket_;\n\n  Envoy::Buffer::OwnedImpl raw_read_buffer_;\n  Envoy::Buffer::OwnedImpl raw_write_buffer_;\n  bool handshake_complete_{};\n  bool end_stream_read_{};\n  bool read_error_{};\n};\n\n/**\n * An implementation of Network::TransportSocketFactory for TsiSocket\n */\nclass TsiSocketFactory : public Network::TransportSocketFactory {\npublic:\n  TsiSocketFactory(HandshakerFactory handshaker_factory, HandshakeValidator handshake_validator);\n\n  bool implementsSecureTransport() const override;\n  Network::TransportSocketPtr\n  createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override;\n\nprivate:\n  HandshakerFactory handshaker_factory_;\n  HandshakeValidator handshake_validator_;\n};\n\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"passthrough_lib\",\n    srcs = [\"passthrough.cc\"],\n    hdrs = [\"passthrough.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/common/passthrough.cc",
    "content": "#include \"extensions/transport_sockets/common/passthrough.h\"\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\n\nPassthroughSocket::PassthroughSocket(Network::TransportSocketPtr&& transport_socket)\n    : transport_socket_(std::move(transport_socket)) {}\n\nvoid PassthroughSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) {\n  transport_socket_->setTransportSocketCallbacks(callbacks);\n}\n\nstd::string PassthroughSocket::protocol() const { return transport_socket_->protocol(); }\n\nabsl::string_view PassthroughSocket::failureReason() const {\n  return transport_socket_->failureReason();\n}\n\nbool PassthroughSocket::canFlushClose() { return transport_socket_->canFlushClose(); }\n\nvoid PassthroughSocket::closeSocket(Network::ConnectionEvent event) {\n  transport_socket_->closeSocket(event);\n}\n\nNetwork::IoResult PassthroughSocket::doRead(Buffer::Instance& buffer) {\n  return transport_socket_->doRead(buffer);\n}\n\nNetwork::IoResult PassthroughSocket::doWrite(Buffer::Instance& buffer, bool end_stream) {\n  return transport_socket_->doWrite(buffer, end_stream);\n}\n\nvoid PassthroughSocket::onConnected() { transport_socket_->onConnected(); }\n\nSsl::ConnectionInfoConstSharedPtr PassthroughSocket::ssl() const {\n  return transport_socket_->ssl();\n}\n\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/common/passthrough.h",
    "content": "#pragma once\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\n\nclass PassthroughSocket : public Network::TransportSocket {\npublic:\n  PassthroughSocket(Network::TransportSocketPtr&& transport_socket);\n\n  void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override;\n  std::string protocol() const override;\n  absl::string_view failureReason() const override;\n  bool canFlushClose() override;\n  void closeSocket(Network::ConnectionEvent event) override;\n  Network::IoResult doRead(Buffer::Instance& buffer) override;\n  Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override;\n  void onConnected() override;\n  Ssl::ConnectionInfoConstSharedPtr ssl() const override;\n\nprotected:\n  Network::TransportSocketPtr transport_socket_;\n};\n\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/proxy_protocol/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"upstream_config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",  # header generated in Envoy, so can't be faked\n    deps = [\n        \":upstream_proxy_protocol\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"@envoy_api//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_proxy_protocol\",\n    srcs = [\"proxy_protocol.cc\"],\n    hdrs = [\"proxy_protocol.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n        \"//source/extensions/transport_sockets/common:passthrough_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/proxy_protocol/config.cc",
    "content": "#include \"extensions/transport_sockets/proxy_protocol/config.h\"\n\n#include \"envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.pb.h\"\n#include \"envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/transport_sockets/proxy_protocol/proxy_protocol.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace ProxyProtocol {\n\nNetwork::TransportSocketFactoryPtr\nUpstreamProxyProtocolSocketConfigFactory::createTransportSocketFactory(\n    const Protobuf::Message& message,\n    Server::Configuration::TransportSocketFactoryContext& context) {\n  const auto& outer_config =\n      MessageUtil::downcastAndValidate<const envoy::extensions::transport_sockets::proxy_protocol::\n                                           v3::ProxyProtocolUpstreamTransport&>(\n          message, context.messageValidationVisitor());\n  auto& inner_config_factory = Config::Utility::getAndCheckFactory<\n      Server::Configuration::UpstreamTransportSocketConfigFactory>(outer_config.transport_socket());\n  ProtobufTypes::MessagePtr inner_factory_config = Config::Utility::translateToFactoryConfig(\n      outer_config.transport_socket(), context.messageValidationVisitor(), inner_config_factory);\n  auto inner_transport_factory =\n      inner_config_factory.createTransportSocketFactory(*inner_factory_config, context);\n  return std::make_unique<UpstreamProxyProtocolSocketFactory>(std::move(inner_transport_factory),\n                                                              outer_config.config());\n}\n\nProtobufTypes::MessagePtr UpstreamProxyProtocolSocketConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<\n      envoy::extensions::transport_sockets::proxy_protocol::v3::ProxyProtocolUpstreamTransport>();\n  ;\n}\n\nREGISTER_FACTORY(UpstreamProxyProtocolSocketConfigFactory,\n                 Server::Configuration::UpstreamTransportSocketConfigFactory);\n\n} // namespace ProxyProtocol\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/proxy_protocol/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace ProxyProtocol {\n\n/**\n * Config registration for the proxy protocol wrapper for transport socket factory.\n * @see TransportSocketConfigFactory.\n */\nclass UpstreamProxyProtocolSocketConfigFactory\n    : public Server::Configuration::UpstreamTransportSocketConfigFactory {\npublic:\n  std::string name() const override { return TransportSocketNames::get().UpstreamProxyProtocol; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n  Network::TransportSocketFactoryPtr createTransportSocketFactory(\n      const Protobuf::Message& config,\n      Server::Configuration::TransportSocketFactoryContext& context) override;\n};\n\n} // namespace ProxyProtocol\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc",
    "content": "#include \"extensions/transport_sockets/proxy_protocol/proxy_protocol.h\"\n\n#include <sstream>\n\n#include \"envoy/config/core/v3/proxy_protocol.pb.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n\nusing envoy::config::core::v3::ProxyProtocolConfig;\nusing envoy::config::core::v3::ProxyProtocolConfig_Version;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace ProxyProtocol {\n\nUpstreamProxyProtocolSocket::UpstreamProxyProtocolSocket(\n    Network::TransportSocketPtr&& transport_socket,\n    Network::TransportSocketOptionsSharedPtr options, ProxyProtocolConfig_Version version)\n    : PassthroughSocket(std::move(transport_socket)), options_(options), version_(version) {}\n\nvoid UpstreamProxyProtocolSocket::setTransportSocketCallbacks(\n    Network::TransportSocketCallbacks& callbacks) {\n  transport_socket_->setTransportSocketCallbacks(callbacks);\n  callbacks_ = &callbacks;\n}\n\nNetwork::IoResult UpstreamProxyProtocolSocket::doWrite(Buffer::Instance& buffer, bool end_stream) {\n  if (header_buffer_.length() > 0) {\n    auto header_res = writeHeader();\n    if (header_buffer_.length() == 0 && header_res.action_ == Network::PostIoAction::KeepOpen) {\n      auto inner_res = transport_socket_->doWrite(buffer, end_stream);\n      return {inner_res.action_, header_res.bytes_processed_ + inner_res.bytes_processed_, false};\n    }\n    return header_res;\n  } else {\n    return transport_socket_->doWrite(buffer, end_stream);\n  }\n}\n\nvoid UpstreamProxyProtocolSocket::generateHeader() {\n  if (version_ == ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1) {\n    generateHeaderV1();\n  } else {\n    generateHeaderV2();\n  }\n}\n\nvoid UpstreamProxyProtocolSocket::generateHeaderV1() {\n  // Default to local addresses (used if no downstream connection exists e.g. health checks)\n  auto src_addr = callbacks_->connection().localAddress();\n  auto dst_addr = callbacks_->connection().remoteAddress();\n\n  if (options_ && options_->proxyProtocolOptions().has_value()) {\n    const auto options = options_->proxyProtocolOptions().value();\n    src_addr = options.src_addr_;\n    dst_addr = options.dst_addr_;\n  }\n\n  Common::ProxyProtocol::generateV1Header(*src_addr->ip(), *dst_addr->ip(), header_buffer_);\n}\n\nvoid UpstreamProxyProtocolSocket::generateHeaderV2() {\n  if (!options_ || !options_->proxyProtocolOptions().has_value()) {\n    Common::ProxyProtocol::generateV2LocalHeader(header_buffer_);\n  } else {\n    const auto options = options_->proxyProtocolOptions().value();\n    Common::ProxyProtocol::generateV2Header(*options.src_addr_->ip(), *options.dst_addr_->ip(),\n                                            header_buffer_);\n  }\n}\n\nNetwork::IoResult UpstreamProxyProtocolSocket::writeHeader() {\n  Network::PostIoAction action = Network::PostIoAction::KeepOpen;\n  uint64_t bytes_written = 0;\n  do {\n    if (header_buffer_.length() == 0) {\n      break;\n    }\n\n    Api::IoCallUint64Result result = callbacks_->ioHandle().write(header_buffer_);\n\n    if (result.ok()) {\n      ENVOY_CONN_LOG(trace, \"write returns: {}\", callbacks_->connection(), result.rc_);\n      bytes_written += result.rc_;\n    } else {\n      ENVOY_CONN_LOG(trace, \"write error: {}\", callbacks_->connection(),\n                     result.err_->getErrorDetails());\n      if (result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) {\n        action = Network::PostIoAction::Close;\n      }\n      break;\n    }\n  } while (true);\n\n  return {action, bytes_written, false};\n}\n\nvoid UpstreamProxyProtocolSocket::onConnected() {\n  generateHeader();\n  transport_socket_->onConnected();\n}\n\nUpstreamProxyProtocolSocketFactory::UpstreamProxyProtocolSocketFactory(\n    Network::TransportSocketFactoryPtr transport_socket_factory, ProxyProtocolConfig config)\n    : transport_socket_factory_(std::move(transport_socket_factory)), config_(config) {}\n\nNetwork::TransportSocketPtr UpstreamProxyProtocolSocketFactory::createTransportSocket(\n    Network::TransportSocketOptionsSharedPtr options) const {\n  auto inner_socket = transport_socket_factory_->createTransportSocket(options);\n  if (inner_socket == nullptr) {\n    return nullptr;\n  }\n  return std::make_unique<UpstreamProxyProtocolSocket>(std::move(inner_socket), options,\n                                                       config_.version());\n}\n\nbool UpstreamProxyProtocolSocketFactory::implementsSecureTransport() const {\n  return transport_socket_factory_->implementsSecureTransport();\n}\n\n} // namespace ProxyProtocol\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/proxy_protocol.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n\n#include \"extensions/transport_sockets/common/passthrough.h\"\n\nusing envoy::config::core::v3::ProxyProtocolConfig;\nusing envoy::config::core::v3::ProxyProtocolConfig_Version;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace ProxyProtocol {\n\nclass UpstreamProxyProtocolSocket : public TransportSockets::PassthroughSocket,\n                                    public Logger::Loggable<Logger::Id::connection> {\npublic:\n  UpstreamProxyProtocolSocket(Network::TransportSocketPtr&& transport_socket,\n                              Network::TransportSocketOptionsSharedPtr options,\n                              ProxyProtocolConfig_Version version);\n\n  void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override;\n  Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override;\n  void onConnected() override;\n\nprivate:\n  void generateHeader();\n  void generateHeaderV1();\n  void generateHeaderV2();\n  Network::IoResult writeHeader();\n\n  Network::TransportSocketOptionsSharedPtr options_;\n  Network::TransportSocketCallbacks* callbacks_{};\n  Buffer::OwnedImpl header_buffer_{};\n  ProxyProtocolConfig_Version version_{ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1};\n};\n\nclass UpstreamProxyProtocolSocketFactory : public Network::TransportSocketFactory {\npublic:\n  UpstreamProxyProtocolSocketFactory(Network::TransportSocketFactoryPtr transport_socket_factory,\n                                     ProxyProtocolConfig config);\n\n  // Network::TransportSocketFactory\n  Network::TransportSocketPtr\n  createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override;\n  bool implementsSecureTransport() const override;\n\nprivate:\n  Network::TransportSocketFactoryPtr transport_socket_factory_;\n  ProxyProtocolConfig config_;\n};\n\n} // namespace ProxyProtocol\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/transport_sockets/raw_buffer/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Built-in plaintext connection transport socket.\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    # This is core Envoy config.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//source/common/network:raw_buffer_socket_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"@envoy_api//envoy/config/transport_socket/raw_buffer/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/raw_buffer/config.cc",
    "content": "#include \"extensions/transport_sockets/raw_buffer/config.h\"\n\n#include <iostream>\n\n#include \"envoy/config/transport_socket/raw_buffer/v2/raw_buffer.pb.h\"\n#include \"envoy/config/transport_socket/raw_buffer/v2/raw_buffer.pb.validate.h\"\n\n#include \"common/network/raw_buffer_socket.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace RawBuffer {\n\nNetwork::TransportSocketFactoryPtr UpstreamRawBufferSocketFactory::createTransportSocketFactory(\n    const Protobuf::Message&, Server::Configuration::TransportSocketFactoryContext&) {\n  return std::make_unique<Network::RawBufferSocketFactory>();\n}\n\nNetwork::TransportSocketFactoryPtr DownstreamRawBufferSocketFactory::createTransportSocketFactory(\n    const Protobuf::Message&, Server::Configuration::TransportSocketFactoryContext&,\n    const std::vector<std::string>&) {\n  return std::make_unique<Network::RawBufferSocketFactory>();\n}\n\nProtobufTypes::MessagePtr RawBufferSocketFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::transport_socket::raw_buffer::v2::RawBuffer>();\n}\n\nREGISTER_FACTORY(UpstreamRawBufferSocketFactory,\n                 Server::Configuration::UpstreamTransportSocketConfigFactory){\"raw_buffer\"};\n\nREGISTER_FACTORY(DownstreamRawBufferSocketFactory,\n                 Server::Configuration::DownstreamTransportSocketConfigFactory){\"raw_buffer\"};\n\n} // namespace RawBuffer\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/raw_buffer/config.h",
    "content": "#pragma once\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace RawBuffer {\n\n/**\n * Config registration for the raw buffer transport socket factory.\n * @see TransportSocketConfigFactory.\n */\nclass RawBufferSocketFactory : public virtual Server::Configuration::TransportSocketConfigFactory {\npublic:\n  std::string name() const override { return TransportSocketNames::get().RawBuffer; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n};\n\nclass UpstreamRawBufferSocketFactory\n    : public Server::Configuration::UpstreamTransportSocketConfigFactory,\n      public RawBufferSocketFactory {\npublic:\n  Network::TransportSocketFactoryPtr createTransportSocketFactory(\n      const Protobuf::Message& config,\n      Server::Configuration::TransportSocketFactoryContext& context) override;\n};\n\nclass DownstreamRawBufferSocketFactory\n    : public Server::Configuration::DownstreamTransportSocketConfigFactory,\n      public RawBufferSocketFactory {\npublic:\n  Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message& config,\n                               Server::Configuration::TransportSocketFactoryContext& context,\n                               const std::vector<std::string>& server_names) override;\n};\n\nDECLARE_FACTORY(UpstreamRawBufferSocketFactory);\n\nDECLARE_FACTORY(DownstreamRawBufferSocketFactory);\n\n} // namespace RawBuffer\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# tap wrapper around a transport socket.\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"tap_config_interface\",\n    hdrs = [\"tap_config.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//source/extensions/common/tap:tap_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tap_config_impl\",\n    srcs = [\"tap_config_impl.cc\"],\n    hdrs = [\"tap_config_impl.h\"],\n    deps = [\n        \":tap_config_interface\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/common/tap:tap_config_base\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"tap_lib\",\n    srcs = [\"tap.cc\"],\n    hdrs = [\"tap.h\"],\n    deps = [\n        \":tap_config_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/common/tap:extension_config_base\",\n        \"//source/extensions/transport_sockets/common:passthrough_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    # TODO(#9953) clean up.\n    extra_visibility = [\n        \"//test/common/access_log:__subpackages__\",\n        \"//test/integration:__subpackages__\",\n    ],\n    security_posture = \"requires_trusted_downstream_and_upstream\",\n    status = \"alpha\",\n    deps = [\n        \":tap_config_impl\",\n        \":tap_lib\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/config.cc",
    "content": "#include \"extensions/transport_sockets/tap/config.h\"\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/extensions/transport_sockets/tap/v3/tap.pb.h\"\n#include \"envoy/extensions/transport_sockets/tap/v3/tap.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/transport_sockets/tap/tap.h\"\n#include \"extensions/transport_sockets/tap/tap_config_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\n\nclass SocketTapConfigFactoryImpl : public Extensions::Common::Tap::TapConfigFactory {\npublic:\n  SocketTapConfigFactoryImpl(TimeSource& time_source) : time_source_(time_source) {}\n\n  // TapConfigFactory\n  Extensions::Common::Tap::TapConfigSharedPtr\n  createConfigFromProto(envoy::config::tap::v3::TapConfig&& proto_config,\n                        Extensions::Common::Tap::Sink* admin_streamer) override {\n    return std::make_shared<SocketTapConfigImpl>(std::move(proto_config), admin_streamer,\n                                                 time_source_);\n  }\n\nprivate:\n  TimeSource& time_source_;\n};\n\nNetwork::TransportSocketFactoryPtr UpstreamTapSocketConfigFactory::createTransportSocketFactory(\n    const Protobuf::Message& message,\n    Server::Configuration::TransportSocketFactoryContext& context) {\n  const auto& outer_config =\n      MessageUtil::downcastAndValidate<const envoy::extensions::transport_sockets::tap::v3::Tap&>(\n          message, context.messageValidationVisitor());\n  auto& inner_config_factory = Config::Utility::getAndCheckFactory<\n      Server::Configuration::UpstreamTransportSocketConfigFactory>(outer_config.transport_socket());\n  ProtobufTypes::MessagePtr inner_factory_config = Config::Utility::translateToFactoryConfig(\n      outer_config.transport_socket(), context.messageValidationVisitor(), inner_config_factory);\n  auto inner_transport_factory =\n      inner_config_factory.createTransportSocketFactory(*inner_factory_config, context);\n  return std::make_unique<TapSocketFactory>(\n      outer_config, std::make_unique<SocketTapConfigFactoryImpl>(context.dispatcher().timeSource()),\n      context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(),\n      std::move(inner_transport_factory));\n}\n\nNetwork::TransportSocketFactoryPtr DownstreamTapSocketConfigFactory::createTransportSocketFactory(\n    const Protobuf::Message& message, Server::Configuration::TransportSocketFactoryContext& context,\n    const std::vector<std::string>& server_names) {\n  const auto& outer_config =\n      MessageUtil::downcastAndValidate<const envoy::extensions::transport_sockets::tap::v3::Tap&>(\n          message, context.messageValidationVisitor());\n  auto& inner_config_factory = Config::Utility::getAndCheckFactory<\n      Server::Configuration::DownstreamTransportSocketConfigFactory>(\n      outer_config.transport_socket());\n  ProtobufTypes::MessagePtr inner_factory_config = Config::Utility::translateToFactoryConfig(\n      outer_config.transport_socket(), context.messageValidationVisitor(), inner_config_factory);\n  auto inner_transport_factory = inner_config_factory.createTransportSocketFactory(\n      *inner_factory_config, context, server_names);\n  return std::make_unique<TapSocketFactory>(\n      outer_config, std::make_unique<SocketTapConfigFactoryImpl>(context.dispatcher().timeSource()),\n      context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(),\n      std::move(inner_transport_factory));\n}\n\nProtobufTypes::MessagePtr TapSocketConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::transport_sockets::tap::v3::Tap>();\n}\n\nREGISTER_FACTORY(UpstreamTapSocketConfigFactory,\n                 Server::Configuration::UpstreamTransportSocketConfigFactory);\n\nREGISTER_FACTORY(DownstreamTapSocketConfigFactory,\n                 Server::Configuration::DownstreamTransportSocketConfigFactory);\n\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/config.h",
    "content": "#pragma once\n\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\n\n/**\n * Config registration for the tap wrapper for transport socket factory.\n * @see TransportSocketConfigFactory.\n */\nclass TapSocketConfigFactory : public virtual Server::Configuration::TransportSocketConfigFactory {\npublic:\n  std::string name() const override { return TransportSocketNames::get().Tap; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n};\n\nclass UpstreamTapSocketConfigFactory\n    : public Server::Configuration::UpstreamTransportSocketConfigFactory,\n      public TapSocketConfigFactory {\npublic:\n  Network::TransportSocketFactoryPtr createTransportSocketFactory(\n      const Protobuf::Message& config,\n      Server::Configuration::TransportSocketFactoryContext& context) override;\n};\n\nclass DownstreamTapSocketConfigFactory\n    : public Server::Configuration::DownstreamTransportSocketConfigFactory,\n      public TapSocketConfigFactory {\npublic:\n  Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message& config,\n                               Server::Configuration::TransportSocketFactoryContext& context,\n                               const std::vector<std::string>& server_names) override;\n};\n\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/tap.cc",
    "content": "#include \"extensions/transport_sockets/tap/tap.h\"\n\n#include \"envoy/extensions/transport_sockets/tap/v3/tap.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\n\nTapSocket::TapSocket(SocketTapConfigSharedPtr config,\n                     Network::TransportSocketPtr&& transport_socket)\n    : PassthroughSocket(std::move(transport_socket)), config_(config) {}\n\nvoid TapSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) {\n  ASSERT(!tapper_);\n  transport_socket_->setTransportSocketCallbacks(callbacks);\n  tapper_ = config_ ? config_->createPerSocketTapper(callbacks.connection()) : nullptr;\n}\n\nvoid TapSocket::closeSocket(Network::ConnectionEvent event) {\n  if (tapper_ != nullptr) {\n    tapper_->closeSocket(event);\n  }\n\n  transport_socket_->closeSocket(event);\n}\n\nNetwork::IoResult TapSocket::doRead(Buffer::Instance& buffer) {\n  Network::IoResult result = transport_socket_->doRead(buffer);\n  if (tapper_ != nullptr && result.bytes_processed_ > 0) {\n    tapper_->onRead(buffer, result.bytes_processed_);\n  }\n\n  return result;\n}\n\nNetwork::IoResult TapSocket::doWrite(Buffer::Instance& buffer, bool end_stream) {\n  // TODO(htuch): avoid copy.\n  Buffer::OwnedImpl copy(buffer);\n  Network::IoResult result = transport_socket_->doWrite(buffer, end_stream);\n  if (tapper_ != nullptr && result.bytes_processed_ > 0) {\n    tapper_->onWrite(copy, result.bytes_processed_, end_stream);\n  }\n  return result;\n}\n\nTapSocketFactory::TapSocketFactory(\n    const envoy::extensions::transport_sockets::tap::v3::Tap& proto_config,\n    Common::Tap::TapConfigFactoryPtr&& config_factory, Server::Admin& admin,\n    Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n    Event::Dispatcher& main_thread_dispatcher,\n    Network::TransportSocketFactoryPtr&& transport_socket_factory)\n    : ExtensionConfigBase(proto_config.common_config(), std::move(config_factory), admin,\n                          singleton_manager, tls, main_thread_dispatcher),\n      transport_socket_factory_(std::move(transport_socket_factory)) {}\n\nNetwork::TransportSocketPtr\nTapSocketFactory::createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const {\n  return std::make_unique<TapSocket>(currentConfigHelper<SocketTapConfig>(),\n                                     transport_socket_factory_->createTransportSocket(options));\n}\n\nbool TapSocketFactory::implementsSecureTransport() const {\n  return transport_socket_factory_->implementsSecureTransport();\n}\n\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/tap.h",
    "content": "#pragma once\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/extensions/transport_sockets/tap/v3/tap.pb.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"extensions/common/tap/extension_config_base.h\"\n#include \"extensions/transport_sockets/common/passthrough.h\"\n#include \"extensions/transport_sockets/tap/tap_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\n\nclass TapSocket : public TransportSockets::PassthroughSocket {\npublic:\n  TapSocket(SocketTapConfigSharedPtr config, Network::TransportSocketPtr&& transport_socket);\n\n  // Network::TransportSocket\n  void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override;\n  void closeSocket(Network::ConnectionEvent event) override;\n  Network::IoResult doRead(Buffer::Instance& buffer) override;\n  Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override;\n\nprivate:\n  SocketTapConfigSharedPtr config_;\n  PerSocketTapperPtr tapper_;\n};\n\nclass TapSocketFactory : public Network::TransportSocketFactory,\n                         public Common::Tap::ExtensionConfigBase {\npublic:\n  TapSocketFactory(const envoy::extensions::transport_sockets::tap::v3::Tap& proto_config,\n                   Common::Tap::TapConfigFactoryPtr&& config_factory, Server::Admin& admin,\n                   Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls,\n                   Event::Dispatcher& main_thread_dispatcher,\n                   Network::TransportSocketFactoryPtr&& transport_socket_factory);\n\n  // Network::TransportSocketFactory\n  Network::TransportSocketPtr\n  createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override;\n  bool implementsSecureTransport() const override;\n\nprivate:\n  Network::TransportSocketFactoryPtr transport_socket_factory_;\n};\n\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/tap_config.h",
    "content": "#pragma once\n\n#include \"envoy/network/connection.h\"\n\n#include \"extensions/common/tap/tap.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\n\n/**\n * Per-socket tap implementation. Abstractly handles all socket lifecycle events in order to tap\n * if the configuration matches.\n */\nclass PerSocketTapper {\npublic:\n  virtual ~PerSocketTapper() = default;\n\n  /**\n   * Called when the socket is closed.\n   * @param event supplies the close type.\n   */\n  virtual void closeSocket(Network::ConnectionEvent event) PURE;\n\n  /**\n   * Called when data is read from the underlying transport.\n   * @param data supplies the read data.\n   * @param bytes_read supplies the number of bytes read (data might already have bytes in it).\n   */\n  virtual void onRead(const Buffer::Instance& data, uint32_t bytes_read) PURE;\n\n  /**\n   * Called when data is written to the underlying transport.\n   * @param data supplies the written data.\n   * @param bytes_written supplies the number of bytes written (data might not have been fully\n   *                      written).\n   * @param end_stream supplies whether this is the end of socket writes.\n   */\n  virtual void onWrite(const Buffer::Instance& data, uint32_t bytes_written, bool end_stream) PURE;\n};\n\nusing PerSocketTapperPtr = std::unique_ptr<PerSocketTapper>;\n\n/**\n * Abstract socket tap configuration.\n */\nclass SocketTapConfig : public virtual Extensions::Common::Tap::TapConfig {\npublic:\n  /**\n   * @return a new per-socket tapper which is used to handle tapping of a discrete socket.\n   * @param connection supplies the underlying network connection.\n   */\n  virtual PerSocketTapperPtr createPerSocketTapper(const Network::Connection& connection) PURE;\n\n  /**\n   * @return time source to use for stamping events.\n   */\n  virtual TimeSource& timeSource() const PURE;\n};\n\nusing SocketTapConfigSharedPtr = std::shared_ptr<SocketTapConfig>;\n\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/tap_config_impl.cc",
    "content": "#include \"extensions/transport_sockets/tap/tap_config_impl.h\"\n\n#include \"envoy/data/tap/v3/transport.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\n\nnamespace TapCommon = Extensions::Common::Tap;\n\nPerSocketTapperImpl::PerSocketTapperImpl(SocketTapConfigSharedPtr config,\n                                         const Network::Connection& connection)\n    : config_(std::move(config)),\n      sink_handle_(config_->createPerTapSinkHandleManager(connection.id())),\n      connection_(connection), statuses_(config_->createMatchStatusVector()) {\n  config_->rootMatcher().onNewStream(statuses_);\n  if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) {\n    TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n    fillConnectionInfo(*trace->mutable_socket_streamed_trace_segment()->mutable_connection());\n    sink_handle_->submitTrace(std::move(trace));\n  }\n}\n\nvoid PerSocketTapperImpl::fillConnectionInfo(envoy::data::tap::v3::Connection& connection) {\n  Network::Utility::addressToProtobufAddress(*connection_.localAddress(),\n                                             *connection.mutable_local_address());\n  Network::Utility::addressToProtobufAddress(*connection_.remoteAddress(),\n                                             *connection.mutable_remote_address());\n}\n\nvoid PerSocketTapperImpl::closeSocket(Network::ConnectionEvent) {\n  if (!config_->rootMatcher().matchStatus(statuses_).matches_) {\n    return;\n  }\n\n  if (config_->streaming()) {\n    TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n    auto& event = *trace->mutable_socket_streamed_trace_segment()->mutable_event();\n    initEvent(event);\n    event.mutable_closed();\n    sink_handle_->submitTrace(std::move(trace));\n  } else {\n    makeBufferedTraceIfNeeded();\n    fillConnectionInfo(*buffered_trace_->mutable_socket_buffered_trace()->mutable_connection());\n    sink_handle_->submitTrace(std::move(buffered_trace_));\n  }\n\n  // Here we explicitly reset the sink_handle_ to release any sink resources and force a flush\n  // of any data (e.g., files). This is not explicitly needed in production, but is needed in\n  // tests to avoid race conditions due to deferred deletion. We could also do this with a stat,\n  // but this seems fine in general and is simpler.\n  sink_handle_.reset();\n}\n\nvoid PerSocketTapperImpl::initEvent(envoy::data::tap::v3::SocketEvent& event) {\n  event.mutable_timestamp()->MergeFrom(Protobuf::util::TimeUtil::NanosecondsToTimestamp(\n      std::chrono::duration_cast<std::chrono::nanoseconds>(\n          config_->timeSource().systemTime().time_since_epoch())\n          .count()));\n}\n\nvoid PerSocketTapperImpl::onRead(const Buffer::Instance& data, uint32_t bytes_read) {\n  if (!config_->rootMatcher().matchStatus(statuses_).matches_) {\n    return;\n  }\n\n  if (config_->streaming()) {\n    TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n    auto& event = *trace->mutable_socket_streamed_trace_segment()->mutable_event();\n    initEvent(event);\n    TapCommon::Utility::addBufferToProtoBytes(*event.mutable_read()->mutable_data(),\n                                              config_->maxBufferedRxBytes(), data,\n                                              data.length() - bytes_read, bytes_read);\n    sink_handle_->submitTrace(std::move(trace));\n  } else {\n    if (buffered_trace_ != nullptr && buffered_trace_->socket_buffered_trace().read_truncated()) {\n      return;\n    }\n\n    makeBufferedTraceIfNeeded();\n    auto& event = *buffered_trace_->mutable_socket_buffered_trace()->add_events();\n    initEvent(event);\n    ASSERT(rx_bytes_buffered_ <= config_->maxBufferedRxBytes());\n    buffered_trace_->mutable_socket_buffered_trace()->set_read_truncated(\n        TapCommon::Utility::addBufferToProtoBytes(*event.mutable_read()->mutable_data(),\n                                                  config_->maxBufferedRxBytes() -\n                                                      rx_bytes_buffered_,\n                                                  data, data.length() - bytes_read, bytes_read));\n    rx_bytes_buffered_ += event.read().data().as_bytes().size();\n  }\n}\n\nvoid PerSocketTapperImpl::onWrite(const Buffer::Instance& data, uint32_t bytes_written,\n                                  bool end_stream) {\n  if (!config_->rootMatcher().matchStatus(statuses_).matches_) {\n    return;\n  }\n\n  if (config_->streaming()) {\n    TapCommon::TraceWrapperPtr trace = makeTraceSegment();\n    auto& event = *trace->mutable_socket_streamed_trace_segment()->mutable_event();\n    initEvent(event);\n    TapCommon::Utility::addBufferToProtoBytes(*event.mutable_write()->mutable_data(),\n                                              config_->maxBufferedTxBytes(), data, 0,\n                                              bytes_written);\n    event.mutable_write()->set_end_stream(end_stream);\n    sink_handle_->submitTrace(std::move(trace));\n  } else {\n    if (buffered_trace_ != nullptr && buffered_trace_->socket_buffered_trace().write_truncated()) {\n      return;\n    }\n\n    makeBufferedTraceIfNeeded();\n    auto& event = *buffered_trace_->mutable_socket_buffered_trace()->add_events();\n    initEvent(event);\n    ASSERT(tx_bytes_buffered_ <= config_->maxBufferedTxBytes());\n    buffered_trace_->mutable_socket_buffered_trace()->set_write_truncated(\n        TapCommon::Utility::addBufferToProtoBytes(\n            *event.mutable_write()->mutable_data(),\n            config_->maxBufferedTxBytes() - tx_bytes_buffered_, data, 0, bytes_written));\n    tx_bytes_buffered_ += event.write().data().as_bytes().size();\n    event.mutable_write()->set_end_stream(end_stream);\n  }\n}\n\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tap/tap_config_impl.h",
    "content": "#pragma once\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/transport.pb.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"extensions/common/tap/tap_config_base.h\"\n#include \"extensions/transport_sockets/tap/tap_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\n\nclass PerSocketTapperImpl : public PerSocketTapper {\npublic:\n  PerSocketTapperImpl(SocketTapConfigSharedPtr config, const Network::Connection& connection);\n\n  // PerSocketTapper\n  void closeSocket(Network::ConnectionEvent event) override;\n  void onRead(const Buffer::Instance& data, uint32_t bytes_read) override;\n  void onWrite(const Buffer::Instance& data, uint32_t bytes_written, bool end_stream) override;\n\nprivate:\n  void initEvent(envoy::data::tap::v3::SocketEvent&);\n  void fillConnectionInfo(envoy::data::tap::v3::Connection& connection);\n  void makeBufferedTraceIfNeeded() {\n    if (buffered_trace_ == nullptr) {\n      buffered_trace_ = Extensions::Common::Tap::makeTraceWrapper();\n      buffered_trace_->mutable_socket_buffered_trace()->set_trace_id(connection_.id());\n    }\n  }\n  Extensions::Common::Tap::TraceWrapperPtr makeTraceSegment() {\n    Extensions::Common::Tap::TraceWrapperPtr trace = Extensions::Common::Tap::makeTraceWrapper();\n    trace->mutable_socket_streamed_trace_segment()->set_trace_id(connection_.id());\n    return trace;\n  }\n\n  SocketTapConfigSharedPtr config_;\n  Extensions::Common::Tap::PerTapSinkHandleManagerPtr sink_handle_;\n  const Network::Connection& connection_;\n  Extensions::Common::Tap::Matcher::MatchStatusVector statuses_;\n  // Must be a shared_ptr because of submitTrace().\n  Extensions::Common::Tap::TraceWrapperPtr buffered_trace_;\n  uint32_t rx_bytes_buffered_{};\n  uint32_t tx_bytes_buffered_{};\n};\n\nclass SocketTapConfigImpl : public Extensions::Common::Tap::TapConfigBaseImpl,\n                            public SocketTapConfig,\n                            public std::enable_shared_from_this<SocketTapConfigImpl> {\npublic:\n  SocketTapConfigImpl(envoy::config::tap::v3::TapConfig&& proto_config,\n                      Extensions::Common::Tap::Sink* admin_streamer, TimeSource& time_system)\n      : Extensions::Common::Tap::TapConfigBaseImpl(std::move(proto_config), admin_streamer),\n        time_source_(time_system) {}\n\n  // SocketTapConfig\n  PerSocketTapperPtr createPerSocketTapper(const Network::Connection& connection) override {\n    return std::make_unique<PerSocketTapperImpl>(shared_from_this(), connection);\n  }\n  TimeSource& timeSource() const override { return time_source_; }\n\nprivate:\n  TimeSource& time_source_;\n\n  friend class PerSocketTapperImpl;\n};\n\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\n# Built-in TLS connection transport socket.\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"robust_to_untrusted_downstream_and_upstream\",\n    # TLS is core functionality.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":ssl_socket_lib\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ssl_handshaker_lib\",\n    srcs = [\"ssl_handshaker.cc\"],\n    hdrs = [\"ssl_handshaker.h\"],\n    external_deps = [\"ssl\"],\n    deps = [\n        \":context_lib\",\n        \":utility_lib\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/ssl:handshaker_interface\",\n        \"//include/envoy/ssl:ssl_socket_extended_info_interface\",\n        \"//include/envoy/ssl:ssl_socket_state\",\n        \"//include/envoy/ssl/private_key:private_key_callbacks_interface\",\n        \"//include/envoy/ssl/private_key:private_key_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_annotations\",\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"io_handle_bio_lib\",\n    srcs = [\"io_handle_bio.cc\"],\n    hdrs = [\"io_handle_bio.h\"],\n    external_deps = [\"ssl\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:io_handle_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ssl_socket_lib\",\n    srcs = [\"ssl_socket.cc\"],\n    hdrs = [\"ssl_socket.h\"],\n    external_deps = [\n        \"abseil_hash\",\n        \"abseil_node_hash_map\",\n        \"abseil_optional\",\n        \"abseil_synchronization\",\n        \"ssl\",\n    ],\n    # TLS is core functionality.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":context_config_lib\",\n        \":context_lib\",\n        \":io_handle_bio_lib\",\n        \":ssl_handshaker_lib\",\n        \":utility_lib\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/ssl:handshaker_interface\",\n        \"//include/envoy/ssl:ssl_socket_extended_info_interface\",\n        \"//include/envoy/ssl:ssl_socket_state\",\n        \"//include/envoy/ssl/private_key:private_key_callbacks_interface\",\n        \"//include/envoy/ssl/private_key:private_key_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_annotations\",\n        \"//source/common/http:headers_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_config_lib\",\n    srcs = [\"context_config_impl.cc\"],\n    hdrs = [\"context_config_impl.h\"],\n    external_deps = [\n        \"ssl\",\n    ],\n    # TLS is core functionality.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":ssl_handshaker_lib\",\n        \"//include/envoy/secret:secret_callbacks_interface\",\n        \"//include/envoy/secret:secret_provider_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl:context_config_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/secret:sds_api_lib\",\n        \"//source/common/ssl:certificate_validation_context_config_impl_lib\",\n        \"//source/common/ssl:tls_certificate_config_impl_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"context_lib\",\n    srcs = [\n        \"context_impl.cc\",\n        \"context_manager_impl.cc\",\n    ],\n    hdrs = [\n        \"context_impl.h\",\n        \"context_manager_impl.h\",\n    ],\n    external_deps = [\n        \"abseil_node_hash_set\",\n        \"abseil_synchronization\",\n        \"ssl\",\n    ],\n    # TLS is core functionality.\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/ssl:context_config_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/ssl:ssl_socket_extended_info_interface\",\n        \"//include/envoy/ssl/private_key:private_key_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_features_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n        \"//source/extensions/transport_sockets/tls/ocsp:ocsp_lib\",\n        \"//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    external_deps = [\n        \"ssl\",\n    ],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:address_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/config.cc",
    "content": "#include \"extensions/transport_sockets/tls/config.h\"\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nNetwork::TransportSocketFactoryPtr UpstreamSslSocketFactory::createTransportSocketFactory(\n    const Protobuf::Message& message,\n    Server::Configuration::TransportSocketFactoryContext& context) {\n  auto client_config = std::make_unique<ClientContextConfigImpl>(\n      MessageUtil::downcastAndValidate<\n          const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext&>(\n          message, context.messageValidationVisitor()),\n      context);\n  return std::make_unique<ClientSslSocketFactory>(std::move(client_config),\n                                                  context.sslContextManager(), context.scope());\n}\n\nProtobufTypes::MessagePtr UpstreamSslSocketFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext>();\n}\n\nREGISTER_FACTORY(UpstreamSslSocketFactory,\n                 Server::Configuration::UpstreamTransportSocketConfigFactory){\"tls\"};\n\nNetwork::TransportSocketFactoryPtr DownstreamSslSocketFactory::createTransportSocketFactory(\n    const Protobuf::Message& message, Server::Configuration::TransportSocketFactoryContext& context,\n    const std::vector<std::string>& server_names) {\n  auto server_config = std::make_unique<ServerContextConfigImpl>(\n      MessageUtil::downcastAndValidate<\n          const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext&>(\n          message, context.messageValidationVisitor()),\n      context);\n  return std::make_unique<ServerSslSocketFactory>(\n      std::move(server_config), context.sslContextManager(), context.scope(), server_names);\n}\n\nProtobufTypes::MessagePtr DownstreamSslSocketFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext>();\n}\n\nREGISTER_FACTORY(DownstreamSslSocketFactory,\n                 Server::Configuration::DownstreamTransportSocketConfigFactory){\"tls\"};\n\nSsl::ContextManagerPtr SslContextManagerFactory::createContextManager(TimeSource& time_source) {\n  return std::make_unique<ContextManagerImpl>(time_source);\n}\n\nstatic Envoy::Registry::RegisterInternalFactory<SslContextManagerFactory,\n                                                Ssl::ContextManagerFactory>\n    ssl_manager_registered;\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/config.h",
    "content": "#pragma once\n\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\n/**\n * Config registration for the BoringSSL transport socket factory.\n * @see TransportSocketConfigFactory.\n */\nclass SslSocketConfigFactory : public virtual Server::Configuration::TransportSocketConfigFactory {\npublic:\n  ~SslSocketConfigFactory() override = default;\n  std::string name() const override { return TransportSocketNames::get().Tls; }\n};\n\nclass UpstreamSslSocketFactory : public Server::Configuration::UpstreamTransportSocketConfigFactory,\n                                 public SslSocketConfigFactory {\npublic:\n  Network::TransportSocketFactoryPtr createTransportSocketFactory(\n      const Protobuf::Message& config,\n      Server::Configuration::TransportSocketFactoryContext& context) override;\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n};\n\nDECLARE_FACTORY(UpstreamSslSocketFactory);\n\nclass DownstreamSslSocketFactory\n    : public Server::Configuration::DownstreamTransportSocketConfigFactory,\n      public SslSocketConfigFactory {\npublic:\n  Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message& config,\n                               Server::Configuration::TransportSocketFactoryContext& context,\n                               const std::vector<std::string>& server_names) override;\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n};\n\nDECLARE_FACTORY(DownstreamSslSocketFactory);\n\nclass SslContextManagerFactory : public Ssl::ContextManagerFactory {\npublic:\n  Ssl::ContextManagerPtr createContextManager(TimeSource& time_source) override;\n};\n\nDECLARE_FACTORY(SslContextManagerFactory);\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/context_config_impl.cc",
    "content": "#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/config/datasource.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/secret/sds_api.h\"\n#include \"common/ssl/certificate_validation_context_config_impl.h\"\n\n#include \"extensions/transport_sockets/tls/ssl_handshaker.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nnamespace {\n\nstd::vector<Secret::TlsCertificateConfigProviderSharedPtr> getTlsCertificateConfigProviders(\n    const envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& config,\n    Server::Configuration::TransportSocketFactoryContext& factory_context) {\n  if (!config.tls_certificates().empty()) {\n    std::vector<Secret::TlsCertificateConfigProviderSharedPtr> providers;\n    for (const auto& tls_certificate : config.tls_certificates()) {\n      if (!tls_certificate.has_private_key_provider() && !tls_certificate.has_certificate_chain() &&\n          !tls_certificate.has_private_key()) {\n        continue;\n      }\n      providers.push_back(\n          factory_context.secretManager().createInlineTlsCertificateProvider(tls_certificate));\n    }\n    return providers;\n  }\n  if (!config.tls_certificate_sds_secret_configs().empty()) {\n    const auto& sds_secret_config = config.tls_certificate_sds_secret_configs(0);\n    if (sds_secret_config.has_sds_config()) {\n      // Fetch dynamic secret.\n      return {factory_context.secretManager().findOrCreateTlsCertificateProvider(\n          sds_secret_config.sds_config(), sds_secret_config.name(), factory_context)};\n    } else {\n      // Load static secret.\n      auto secret_provider = factory_context.secretManager().findStaticTlsCertificateProvider(\n          sds_secret_config.name());\n      if (!secret_provider) {\n        throw EnvoyException(fmt::format(\"Unknown static secret: {}\", sds_secret_config.name()));\n      }\n      return {secret_provider};\n    }\n  }\n  return {};\n}\n\nSecret::CertificateValidationContextConfigProviderSharedPtr getProviderFromSds(\n    Server::Configuration::TransportSocketFactoryContext& factory_context,\n    const envoy::extensions::transport_sockets::tls::v3::SdsSecretConfig& sds_secret_config) {\n  if (sds_secret_config.has_sds_config()) {\n    // Fetch dynamic secret.\n    return factory_context.secretManager().findOrCreateCertificateValidationContextProvider(\n        sds_secret_config.sds_config(), sds_secret_config.name(), factory_context);\n  } else {\n    // Load static secret.\n    auto secret_provider =\n        factory_context.secretManager().findStaticCertificateValidationContextProvider(\n            sds_secret_config.name());\n    if (!secret_provider) {\n      throw EnvoyException(fmt::format(\"Unknown static certificate validation context: {}\",\n                                       sds_secret_config.name()));\n    }\n    return secret_provider;\n  }\n  return nullptr;\n}\n\nSecret::CertificateValidationContextConfigProviderSharedPtr\ngetCertificateValidationContextConfigProvider(\n    const envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& config,\n    Server::Configuration::TransportSocketFactoryContext& factory_context,\n    std::unique_ptr<envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext>*\n        default_cvc) {\n  switch (config.validation_context_type_case()) {\n  case envoy::extensions::transport_sockets::tls::v3::CommonTlsContext::ValidationContextTypeCase::\n      kValidationContext: {\n    auto secret_provider =\n        factory_context.secretManager().createInlineCertificateValidationContextProvider(\n            config.validation_context());\n    return secret_provider;\n  }\n  case envoy::extensions::transport_sockets::tls::v3::CommonTlsContext::ValidationContextTypeCase::\n      kValidationContextSdsSecretConfig: {\n    const auto& sds_secret_config = config.validation_context_sds_secret_config();\n    return getProviderFromSds(factory_context, sds_secret_config);\n  }\n  case envoy::extensions::transport_sockets::tls::v3::CommonTlsContext::ValidationContextTypeCase::\n      kCombinedValidationContext: {\n    *default_cvc = std::make_unique<\n        envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext>(\n        config.combined_validation_context().default_validation_context());\n    const auto& sds_secret_config =\n        config.combined_validation_context().validation_context_sds_secret_config();\n    return getProviderFromSds(factory_context, sds_secret_config);\n  }\n  default:\n    return nullptr;\n  }\n}\n\nSecret::TlsSessionTicketKeysConfigProviderSharedPtr getTlsSessionTicketKeysConfigProvider(\n    Server::Configuration::TransportSocketFactoryContext& factory_context,\n    const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext& config) {\n\n  switch (config.session_ticket_keys_type_case()) {\n  case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::\n      SessionTicketKeysTypeCase::kSessionTicketKeys:\n    return factory_context.secretManager().createInlineTlsSessionTicketKeysProvider(\n        config.session_ticket_keys());\n  case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::\n      SessionTicketKeysTypeCase::kSessionTicketKeysSdsSecretConfig: {\n    const auto& sds_secret_config = config.session_ticket_keys_sds_secret_config();\n    if (sds_secret_config.has_sds_config()) {\n      // Fetch dynamic secret.\n      return factory_context.secretManager().findOrCreateTlsSessionTicketKeysContextProvider(\n          sds_secret_config.sds_config(), sds_secret_config.name(), factory_context);\n    } else {\n      // Load static secret.\n      auto secret_provider =\n          factory_context.secretManager().findStaticTlsSessionTicketKeysContextProvider(\n              sds_secret_config.name());\n      if (!secret_provider) {\n        throw EnvoyException(\n            fmt::format(\"Unknown tls session ticket keys: {}\", sds_secret_config.name()));\n      }\n      return secret_provider;\n    }\n  }\n  case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::\n      SessionTicketKeysTypeCase::kDisableStatelessSessionResumption:\n  case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::\n      SessionTicketKeysTypeCase::SESSION_TICKET_KEYS_TYPE_NOT_SET:\n    return nullptr;\n  default:\n    throw EnvoyException(fmt::format(\"Unexpected case for oneof session_ticket_keys: {}\",\n                                     config.session_ticket_keys_type_case()));\n  }\n}\n\nbool getStatelessSessionResumptionDisabled(\n    const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext& config) {\n  if (config.session_ticket_keys_type_case() ==\n      envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::\n          SessionTicketKeysTypeCase::kDisableStatelessSessionResumption) {\n    return config.disable_stateless_session_resumption();\n  } else {\n    return false;\n  }\n}\n\n} // namespace\n\nContextConfigImpl::ContextConfigImpl(\n    const envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& config,\n    const unsigned default_min_protocol_version, const unsigned default_max_protocol_version,\n    const std::string& default_cipher_suites, const std::string& default_curves,\n    Server::Configuration::TransportSocketFactoryContext& factory_context)\n    : api_(factory_context.api()),\n      alpn_protocols_(RepeatedPtrUtil::join(config.alpn_protocols(), \",\")),\n      cipher_suites_(StringUtil::nonEmptyStringOrDefault(\n          RepeatedPtrUtil::join(config.tls_params().cipher_suites(), \":\"), default_cipher_suites)),\n      ecdh_curves_(StringUtil::nonEmptyStringOrDefault(\n          RepeatedPtrUtil::join(config.tls_params().ecdh_curves(), \":\"), default_curves)),\n      tls_certificate_providers_(getTlsCertificateConfigProviders(config, factory_context)),\n      certificate_validation_context_provider_(\n          getCertificateValidationContextConfigProvider(config, factory_context, &default_cvc_)),\n      min_protocol_version_(tlsVersionFromProto(config.tls_params().tls_minimum_protocol_version(),\n                                                default_min_protocol_version)),\n      max_protocol_version_(tlsVersionFromProto(config.tls_params().tls_maximum_protocol_version(),\n                                                default_max_protocol_version)) {\n  if (certificate_validation_context_provider_ != nullptr) {\n    if (default_cvc_) {\n      // We need to validate combined certificate validation context.\n      // The default certificate validation context and dynamic certificate validation\n      // context could only contain partial fields, which is okay to fail the validation.\n      // But the combined certificate validation context should pass validation. If\n      // validation of combined certificate validation context fails,\n      // getCombinedValidationContextConfig() throws exception, validation_context_config_ will not\n      // get updated.\n      cvc_validation_callback_handle_ =\n          certificate_validation_context_provider_->addValidationCallback(\n              [this](\n                  const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n                      dynamic_cvc) { getCombinedValidationContextConfig(dynamic_cvc); });\n    }\n    // Load inlined, static or dynamic secret that's already available.\n    if (certificate_validation_context_provider_->secret() != nullptr) {\n      if (default_cvc_) {\n        validation_context_config_ =\n            getCombinedValidationContextConfig(*certificate_validation_context_provider_->secret());\n      } else {\n        validation_context_config_ = std::make_unique<Ssl::CertificateValidationContextConfigImpl>(\n            *certificate_validation_context_provider_->secret(), api_);\n      }\n    }\n  }\n  // Load inlined, static or dynamic secrets that are already available.\n  if (!tls_certificate_providers_.empty()) {\n    for (auto& provider : tls_certificate_providers_) {\n      if (provider->secret() != nullptr) {\n        tls_certificate_configs_.emplace_back(*provider->secret(), &factory_context, api_);\n      }\n    }\n  }\n\n  HandshakerFactoryContextImpl handshaker_factory_context(api_, alpn_protocols_);\n  Ssl::HandshakerFactory* handshaker_factory;\n  if (config.has_custom_handshaker()) {\n    // If a custom handshaker is configured, derive the factory from the config.\n    const auto& handshaker_config = config.custom_handshaker();\n    handshaker_factory =\n        &Config::Utility::getAndCheckFactory<Ssl::HandshakerFactory>(handshaker_config);\n    handshaker_factory_cb_ = handshaker_factory->createHandshakerCb(\n        handshaker_config.typed_config(), handshaker_factory_context,\n        factory_context.messageValidationVisitor());\n  } else {\n    // Otherwise, derive the config from the default factory.\n    handshaker_factory = HandshakerFactoryImpl::getDefaultHandshakerFactory();\n    handshaker_factory_cb_ = handshaker_factory->createHandshakerCb(\n        *handshaker_factory->createEmptyConfigProto(), handshaker_factory_context,\n        factory_context.messageValidationVisitor());\n  }\n  capabilities_ = handshaker_factory->capabilities();\n}\n\nSsl::CertificateValidationContextConfigPtr ContextConfigImpl::getCombinedValidationContextConfig(\n    const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n        dynamic_cvc) {\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext combined_cvc =\n      *default_cvc_;\n  combined_cvc.MergeFrom(dynamic_cvc);\n  return std::make_unique<Envoy::Ssl::CertificateValidationContextConfigImpl>(combined_cvc, api_);\n}\n\nvoid ContextConfigImpl::setSecretUpdateCallback(std::function<void()> callback) {\n  if (!tls_certificate_providers_.empty()) {\n    if (tc_update_callback_handle_) {\n      tc_update_callback_handle_->remove();\n    }\n    // Once tls_certificate_config_ receives new secret, this callback updates\n    // ContextConfigImpl::tls_certificate_config_ with new secret.\n    tc_update_callback_handle_ =\n        tls_certificate_providers_[0]->addUpdateCallback([this, callback]() {\n          // This breaks multiple certificate support, but today SDS is only single cert.\n          // TODO(htuch): Fix this when SDS goes multi-cert.\n          tls_certificate_configs_.clear();\n          tls_certificate_configs_.emplace_back(*tls_certificate_providers_[0]->secret(), nullptr,\n                                                api_);\n          callback();\n        });\n  }\n  if (certificate_validation_context_provider_) {\n    if (cvc_update_callback_handle_) {\n      cvc_update_callback_handle_->remove();\n    }\n    if (default_cvc_) {\n      // Once certificate_validation_context_provider_ receives new secret, this callback updates\n      // ContextConfigImpl::validation_context_config_ with a combined certificate validation\n      // context. The combined certificate validation context is created by merging new secret into\n      // default_cvc_.\n      cvc_update_callback_handle_ =\n          certificate_validation_context_provider_->addUpdateCallback([this, callback]() {\n            validation_context_config_ = getCombinedValidationContextConfig(\n                *certificate_validation_context_provider_->secret());\n            callback();\n          });\n    } else {\n      // Once certificate_validation_context_provider_ receives new secret, this callback updates\n      // ContextConfigImpl::validation_context_config_ with new secret.\n      cvc_update_callback_handle_ =\n          certificate_validation_context_provider_->addUpdateCallback([this, callback]() {\n            validation_context_config_ =\n                std::make_unique<Ssl::CertificateValidationContextConfigImpl>(\n                    *certificate_validation_context_provider_->secret(), api_);\n            callback();\n          });\n    }\n  }\n}\n\nSsl::HandshakerFactoryCb ContextConfigImpl::createHandshaker() const {\n  return handshaker_factory_cb_;\n}\n\nContextConfigImpl::~ContextConfigImpl() {\n  if (tc_update_callback_handle_) {\n    tc_update_callback_handle_->remove();\n  }\n  if (cvc_update_callback_handle_) {\n    cvc_update_callback_handle_->remove();\n  }\n  if (cvc_validation_callback_handle_) {\n    cvc_validation_callback_handle_->remove();\n  }\n}\n\nunsigned ContextConfigImpl::tlsVersionFromProto(\n    const envoy::extensions::transport_sockets::tls::v3::TlsParameters::TlsProtocol& version,\n    unsigned default_version) {\n  switch (version) {\n  case envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLS_AUTO:\n    return default_version;\n  case envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0:\n    return TLS1_VERSION;\n  case envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_1:\n    return TLS1_1_VERSION;\n  case envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2:\n    return TLS1_2_VERSION;\n  case envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3:\n    return TLS1_3_VERSION;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nconst unsigned ClientContextConfigImpl::DEFAULT_MIN_VERSION = TLS1_2_VERSION;\nconst unsigned ClientContextConfigImpl::DEFAULT_MAX_VERSION = TLS1_2_VERSION;\n\nconst std::string ClientContextConfigImpl::DEFAULT_CIPHER_SUITES =\n#ifndef BORINGSSL_FIPS\n    \"[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]:\"\n    \"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]:\"\n#else // BoringSSL FIPS\n    \"ECDHE-ECDSA-AES128-GCM-SHA256:\"\n    \"ECDHE-RSA-AES128-GCM-SHA256:\"\n#endif\n    \"ECDHE-ECDSA-AES128-SHA:\"\n    \"ECDHE-RSA-AES128-SHA:\"\n    \"AES128-GCM-SHA256:\"\n    \"AES128-SHA:\"\n    \"ECDHE-ECDSA-AES256-GCM-SHA384:\"\n    \"ECDHE-RSA-AES256-GCM-SHA384:\"\n    \"ECDHE-ECDSA-AES256-SHA:\"\n    \"ECDHE-RSA-AES256-SHA:\"\n    \"AES256-GCM-SHA384:\"\n    \"AES256-SHA\";\n\nconst std::string ClientContextConfigImpl::DEFAULT_CURVES =\n#ifndef BORINGSSL_FIPS\n    \"X25519:\"\n#endif\n    \"P-256\";\n\nClientContextConfigImpl::ClientContextConfigImpl(\n    const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& config,\n    absl::string_view sigalgs,\n    Server::Configuration::TransportSocketFactoryContext& factory_context)\n    : ContextConfigImpl(config.common_tls_context(), DEFAULT_MIN_VERSION, DEFAULT_MAX_VERSION,\n                        DEFAULT_CIPHER_SUITES, DEFAULT_CURVES, factory_context),\n      server_name_indication_(config.sni()), allow_renegotiation_(config.allow_renegotiation()),\n      max_session_keys_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_session_keys, 1)),\n      sigalgs_(sigalgs) {\n  // BoringSSL treats this as a C string, so embedded NULL characters will not\n  // be handled correctly.\n  if (server_name_indication_.find('\\0') != std::string::npos) {\n    throw EnvoyException(\"SNI names containing NULL-byte are not allowed\");\n  }\n  // TODO(PiotrSikora): Support multiple TLS certificates.\n  if ((config.common_tls_context().tls_certificates().size() +\n       config.common_tls_context().tls_certificate_sds_secret_configs().size()) > 1) {\n    throw EnvoyException(\"Multiple TLS certificates are not supported for client contexts\");\n  }\n}\n\nconst unsigned ServerContextConfigImpl::DEFAULT_MIN_VERSION = TLS1_VERSION;\nconst unsigned ServerContextConfigImpl::DEFAULT_MAX_VERSION = TLS1_3_VERSION;\n\nconst std::string ServerContextConfigImpl::DEFAULT_CIPHER_SUITES =\n#ifndef BORINGSSL_FIPS\n    \"[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]:\"\n    \"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]:\"\n#else // BoringSSL FIPS\n    \"ECDHE-ECDSA-AES128-GCM-SHA256:\"\n    \"ECDHE-RSA-AES128-GCM-SHA256:\"\n#endif\n    \"ECDHE-ECDSA-AES128-SHA:\"\n    \"ECDHE-RSA-AES128-SHA:\"\n    \"AES128-GCM-SHA256:\"\n    \"AES128-SHA:\"\n    \"ECDHE-ECDSA-AES256-GCM-SHA384:\"\n    \"ECDHE-RSA-AES256-GCM-SHA384:\"\n    \"ECDHE-ECDSA-AES256-SHA:\"\n    \"ECDHE-RSA-AES256-SHA:\"\n    \"AES256-GCM-SHA384:\"\n    \"AES256-SHA\";\n\nconst std::string ServerContextConfigImpl::DEFAULT_CURVES =\n#ifndef BORINGSSL_FIPS\n    \"X25519:\"\n#endif\n    \"P-256\";\n\nServerContextConfigImpl::ServerContextConfigImpl(\n    const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext& config,\n    Server::Configuration::TransportSocketFactoryContext& factory_context)\n    : ContextConfigImpl(config.common_tls_context(), DEFAULT_MIN_VERSION, DEFAULT_MAX_VERSION,\n                        DEFAULT_CIPHER_SUITES, DEFAULT_CURVES, factory_context),\n      require_client_certificate_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, require_client_certificate, false)),\n      ocsp_staple_policy_(ocspStaplePolicyFromProto(config.ocsp_staple_policy())),\n      session_ticket_keys_provider_(getTlsSessionTicketKeysConfigProvider(factory_context, config)),\n      disable_stateless_session_resumption_(getStatelessSessionResumptionDisabled(config)) {\n\n  if (session_ticket_keys_provider_ != nullptr) {\n    // Validate tls session ticket keys early to reject bad sds updates.\n    stk_validation_callback_handle_ = session_ticket_keys_provider_->addValidationCallback(\n        [this](const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys& keys) {\n          getSessionTicketKeys(keys);\n        });\n    // Load inlined, static or dynamic secret that's already available.\n    if (session_ticket_keys_provider_->secret() != nullptr) {\n      session_ticket_keys_ = getSessionTicketKeys(*session_ticket_keys_provider_->secret());\n    }\n  }\n\n  if (!capabilities().provides_certificates) {\n    if ((config.common_tls_context().tls_certificates().size() +\n         config.common_tls_context().tls_certificate_sds_secret_configs().size()) == 0) {\n      throw EnvoyException(\"No TLS certificates found for server context\");\n    } else if (!config.common_tls_context().tls_certificates().empty() &&\n               !config.common_tls_context().tls_certificate_sds_secret_configs().empty()) {\n      throw EnvoyException(\"SDS and non-SDS TLS certificates may not be mixed in server contexts\");\n    }\n  }\n\n  if (config.has_session_timeout()) {\n    session_timeout_ =\n        std::chrono::seconds(DurationUtil::durationToSeconds(config.session_timeout()));\n  }\n}\n\nServerContextConfigImpl::~ServerContextConfigImpl() {\n  if (stk_update_callback_handle_ != nullptr) {\n    stk_update_callback_handle_->remove();\n  }\n  if (stk_validation_callback_handle_ != nullptr) {\n    stk_validation_callback_handle_->remove();\n  }\n}\n\nvoid ServerContextConfigImpl::setSecretUpdateCallback(std::function<void()> callback) {\n  ContextConfigImpl::setSecretUpdateCallback(callback);\n  if (session_ticket_keys_provider_) {\n    if (stk_update_callback_handle_) {\n      stk_update_callback_handle_->remove();\n    }\n    // Once session_ticket_keys_ receives new secret, this callback updates\n    // ContextConfigImpl::session_ticket_keys_ with new session ticket keys.\n    stk_update_callback_handle_ =\n        session_ticket_keys_provider_->addUpdateCallback([this, callback]() {\n          session_ticket_keys_ = getSessionTicketKeys(*session_ticket_keys_provider_->secret());\n          callback();\n        });\n  }\n}\n\nstd::vector<Ssl::ServerContextConfig::SessionTicketKey>\nServerContextConfigImpl::getSessionTicketKeys(\n    const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys& keys) {\n  std::vector<Ssl::ServerContextConfig::SessionTicketKey> result;\n  for (const auto& datasource : keys.keys()) {\n    result.emplace_back(getSessionTicketKey(Config::DataSource::read(datasource, false, api_)));\n  }\n  return result;\n}\n\n// Extracts a SessionTicketKey from raw binary data.\n// Throws if key_data is invalid.\nSsl::ServerContextConfig::SessionTicketKey\nServerContextConfigImpl::getSessionTicketKey(const std::string& key_data) {\n  // If this changes, need to figure out how to deal with key files\n  // that previously worked. For now, just assert so we'll notice that\n  // it changed if it does.\n  static_assert(sizeof(SessionTicketKey) == 80, \"Input is expected to be this size\");\n\n  if (key_data.size() != sizeof(SessionTicketKey)) {\n    throw EnvoyException(fmt::format(\"Incorrect TLS session ticket key length. \"\n                                     \"Length {}, expected length {}.\",\n                                     key_data.size(), sizeof(SessionTicketKey)));\n  }\n\n  SessionTicketKey dst_key;\n\n  std::copy_n(key_data.begin(), dst_key.name_.size(), dst_key.name_.begin());\n  size_t pos = dst_key.name_.size();\n  std::copy_n(key_data.begin() + pos, dst_key.hmac_key_.size(), dst_key.hmac_key_.begin());\n  pos += dst_key.hmac_key_.size();\n  std::copy_n(key_data.begin() + pos, dst_key.aes_key_.size(), dst_key.aes_key_.begin());\n  pos += dst_key.aes_key_.size();\n  ASSERT(key_data.begin() + pos == key_data.end());\n\n  return dst_key;\n}\n\nSsl::ServerContextConfig::OcspStaplePolicy ServerContextConfigImpl::ocspStaplePolicyFromProto(\n    const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::OcspStaplePolicy&\n        policy) {\n  switch (policy) {\n  case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::LENIENT_STAPLING:\n    return Ssl::ServerContextConfig::OcspStaplePolicy::LenientStapling;\n  case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::STRICT_STAPLING:\n    return Ssl::ServerContextConfig::OcspStaplePolicy::StrictStapling;\n  case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::MUST_STAPLE:\n    return Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/context_config_impl.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/secret/secret_callbacks.h\"\n#include \"envoy/secret/secret_provider.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/context_config.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/ssl/tls_certificate_config_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nstatic const std::string INLINE_STRING = \"<inline>\";\n\nclass ContextConfigImpl : public virtual Ssl::ContextConfig {\npublic:\n  ~ContextConfigImpl() override;\n\n  // Ssl::ContextConfig\n  const std::string& alpnProtocols() const override { return alpn_protocols_; }\n  const std::string& cipherSuites() const override { return cipher_suites_; }\n  const std::string& ecdhCurves() const override { return ecdh_curves_; }\n  // TODO(htuch): This needs to be made const again and/or zero copy and/or callers fixed.\n  std::vector<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>>\n  tlsCertificates() const override {\n    std::vector<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> configs;\n    for (const auto& config : tls_certificate_configs_) {\n      configs.push_back(config);\n    }\n    return configs;\n  }\n  const Envoy::Ssl::CertificateValidationContextConfig*\n  certificateValidationContext() const override {\n    return validation_context_config_.get();\n  }\n  unsigned minProtocolVersion() const override { return min_protocol_version_; };\n  unsigned maxProtocolVersion() const override { return max_protocol_version_; };\n\n  bool isReady() const override {\n    const bool tls_is_ready =\n        (tls_certificate_providers_.empty() || !tls_certificate_configs_.empty());\n    const bool combined_cvc_is_ready =\n        (default_cvc_ == nullptr || validation_context_config_ != nullptr);\n    const bool cvc_is_ready = (certificate_validation_context_provider_ == nullptr ||\n                               default_cvc_ != nullptr || validation_context_config_ != nullptr);\n    return tls_is_ready && combined_cvc_is_ready && cvc_is_ready;\n  }\n\n  void setSecretUpdateCallback(std::function<void()> callback) override;\n  Ssl::HandshakerFactoryCb createHandshaker() const override;\n  Ssl::HandshakerCapabilities capabilities() const override { return capabilities_; }\n\n  Ssl::CertificateValidationContextConfigPtr getCombinedValidationContextConfig(\n      const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n          dynamic_cvc);\n\nprotected:\n  ContextConfigImpl(const envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& config,\n                    const unsigned default_min_protocol_version,\n                    const unsigned default_max_protocol_version,\n                    const std::string& default_cipher_suites, const std::string& default_curves,\n                    Server::Configuration::TransportSocketFactoryContext& factory_context);\n  Api::Api& api_;\n\nprivate:\n  static unsigned tlsVersionFromProto(\n      const envoy::extensions::transport_sockets::tls::v3::TlsParameters::TlsProtocol& version,\n      unsigned default_version);\n\n  const std::string alpn_protocols_;\n  const std::string cipher_suites_;\n  const std::string ecdh_curves_;\n\n  std::vector<Ssl::TlsCertificateConfigImpl> tls_certificate_configs_;\n  Ssl::CertificateValidationContextConfigPtr validation_context_config_;\n  // If certificate validation context type is combined_validation_context. default_cvc_\n  // holds a copy of CombinedCertificateValidationContext::default_validation_context.\n  // Otherwise, default_cvc_ is nullptr.\n  std::unique_ptr<envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext>\n      default_cvc_;\n  std::vector<Secret::TlsCertificateConfigProviderSharedPtr> tls_certificate_providers_;\n  // Handle for TLS certificate dynamic secret callback.\n  Envoy::Common::CallbackHandle* tc_update_callback_handle_{};\n  Secret::CertificateValidationContextConfigProviderSharedPtr\n      certificate_validation_context_provider_;\n  // Handle for certificate validation context dynamic secret callback.\n  Envoy::Common::CallbackHandle* cvc_update_callback_handle_{};\n  Envoy::Common::CallbackHandle* cvc_validation_callback_handle_{};\n  const unsigned min_protocol_version_;\n  const unsigned max_protocol_version_;\n\n  Ssl::HandshakerFactoryCb handshaker_factory_cb_;\n  Ssl::HandshakerCapabilities capabilities_;\n};\n\nclass ClientContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::ClientContextConfig {\npublic:\n  static const std::string DEFAULT_CIPHER_SUITES;\n  static const std::string DEFAULT_CURVES;\n\n  ClientContextConfigImpl(\n      const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& config,\n      absl::string_view sigalgs,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context);\n  ClientContextConfigImpl(\n      const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& config,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context)\n      : ClientContextConfigImpl(config, \"\", secret_provider_context) {}\n\n  // Ssl::ClientContextConfig\n  const std::string& serverNameIndication() const override { return server_name_indication_; }\n  bool allowRenegotiation() const override { return allow_renegotiation_; }\n  size_t maxSessionKeys() const override { return max_session_keys_; }\n  const std::string& signingAlgorithmsForTest() const override { return sigalgs_; }\n\nprivate:\n  static const unsigned DEFAULT_MIN_VERSION;\n  static const unsigned DEFAULT_MAX_VERSION;\n\n  const std::string server_name_indication_;\n  const bool allow_renegotiation_;\n  const size_t max_session_keys_;\n  const std::string sigalgs_;\n};\n\nclass ServerContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::ServerContextConfig {\npublic:\n  ServerContextConfigImpl(\n      const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext& config,\n      Server::Configuration::TransportSocketFactoryContext& secret_provider_context);\n  ~ServerContextConfigImpl() override;\n\n  // Ssl::ServerContextConfig\n  bool requireClientCertificate() const override { return require_client_certificate_; }\n  OcspStaplePolicy ocspStaplePolicy() const override { return ocsp_staple_policy_; }\n  const std::vector<SessionTicketKey>& sessionTicketKeys() const override {\n    return session_ticket_keys_;\n  }\n  absl::optional<std::chrono::seconds> sessionTimeout() const override { return session_timeout_; }\n\n  bool isReady() const override {\n    const bool parent_is_ready = ContextConfigImpl::isReady();\n    const bool session_ticket_keys_are_ready =\n        (session_ticket_keys_provider_ == nullptr || !session_ticket_keys_.empty());\n    return parent_is_ready && session_ticket_keys_are_ready;\n  }\n\n  void setSecretUpdateCallback(std::function<void()> callback) override;\n  bool disableStatelessSessionResumption() const override {\n    return disable_stateless_session_resumption_;\n  }\n\nprivate:\n  static const unsigned DEFAULT_MIN_VERSION;\n  static const unsigned DEFAULT_MAX_VERSION;\n  static const std::string DEFAULT_CIPHER_SUITES;\n  static const std::string DEFAULT_CURVES;\n\n  const bool require_client_certificate_;\n  const OcspStaplePolicy ocsp_staple_policy_;\n  std::vector<SessionTicketKey> session_ticket_keys_;\n  const Secret::TlsSessionTicketKeysConfigProviderSharedPtr session_ticket_keys_provider_;\n  Envoy::Common::CallbackHandle* stk_update_callback_handle_{};\n  Envoy::Common::CallbackHandle* stk_validation_callback_handle_{};\n\n  std::vector<ServerContextConfig::SessionTicketKey> getSessionTicketKeys(\n      const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys& keys);\n  ServerContextConfig::SessionTicketKey getSessionTicketKey(const std::string& key_data);\n  static OcspStaplePolicy ocspStaplePolicyFromProto(\n      const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::OcspStaplePolicy&\n          policy);\n\n  absl::optional<std::chrono::seconds> session_timeout_;\n  const bool disable_stateless_session_resumption_;\n};\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/context_impl.cc",
    "content": "#include \"extensions/transport_sockets/tls/context_impl.h\"\n\n#include <algorithm>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/admin/v3/certs.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/ssl/ssl_socket_extended_info.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/hex.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/stats/utility.h\"\n\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_join.h\"\n#include \"openssl/evp.h\"\n#include \"openssl/hmac.h\"\n#include \"openssl/rand.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nnamespace {\n\nbool cbsContainsU16(CBS& cbs, uint16_t n) {\n  while (CBS_len(&cbs) > 0) {\n    uint16_t v;\n    if (!CBS_get_u16(&cbs, &v)) {\n      return false;\n    }\n    if (v == n) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\n} // namespace\n\nint ContextImpl::sslExtendedSocketInfoIndex() {\n  CONSTRUCT_ON_FIRST_USE(int, []() -> int {\n    int ssl_context_index = SSL_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr);\n    RELEASE_ASSERT(ssl_context_index >= 0, \"\");\n    return ssl_context_index;\n  }());\n}\n\nContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& config,\n                         TimeSource& time_source)\n    : scope_(scope), stats_(generateStats(scope)), time_source_(time_source),\n      tls_max_version_(config.maxProtocolVersion()),\n      stat_name_set_(scope.symbolTable().makeSet(\"TransportSockets::Tls\")),\n      unknown_ssl_cipher_(stat_name_set_->add(\"unknown_ssl_cipher\")),\n      unknown_ssl_curve_(stat_name_set_->add(\"unknown_ssl_curve\")),\n      unknown_ssl_algorithm_(stat_name_set_->add(\"unknown_ssl_algorithm\")),\n      unknown_ssl_version_(stat_name_set_->add(\"unknown_ssl_version\")),\n      ssl_ciphers_(stat_name_set_->add(\"ssl.ciphers\")),\n      ssl_versions_(stat_name_set_->add(\"ssl.versions\")),\n      ssl_curves_(stat_name_set_->add(\"ssl.curves\")),\n      ssl_sigalgs_(stat_name_set_->add(\"ssl.sigalgs\")), capabilities_(config.capabilities()) {\n  const auto tls_certificates = config.tlsCertificates();\n  tls_contexts_.resize(std::max(static_cast<size_t>(1), tls_certificates.size()));\n\n  for (auto& ctx : tls_contexts_) {\n    ctx.ssl_ctx_.reset(SSL_CTX_new(TLS_method()));\n\n    int rc = SSL_CTX_set_app_data(ctx.ssl_ctx_.get(), this);\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n\n    rc = SSL_CTX_set_min_proto_version(ctx.ssl_ctx_.get(), config.minProtocolVersion());\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n\n    rc = SSL_CTX_set_max_proto_version(ctx.ssl_ctx_.get(), config.maxProtocolVersion());\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n\n    if (!capabilities_.provides_ciphers_and_curves &&\n        !SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), config.cipherSuites().c_str())) {\n      // Break up a set of ciphers into each individual cipher and try them each individually in\n      // order to attempt to log which specific one failed. Example of config.cipherSuites():\n      // \"-ALL:[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]:ECDHE-ECDSA-AES128-SHA\".\n      //\n      // \"-\" is both an operator when in the leading position of a token (-ALL: don't allow this\n      // cipher), and the common separator in names (ECDHE-ECDSA-AES128-GCM-SHA256). Don't split on\n      // it because it will separate pieces of the same cipher. When it is a leading character, it\n      // is removed below.\n      std::vector<absl::string_view> ciphers =\n          StringUtil::splitToken(config.cipherSuites(), \":+![|]\", false);\n      std::vector<std::string> bad_ciphers;\n      for (const auto& cipher : ciphers) {\n        std::string cipher_str(cipher);\n\n        if (absl::StartsWith(cipher_str, \"-\")) {\n          cipher_str.erase(cipher_str.begin());\n        }\n\n        if (!SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), cipher_str.c_str())) {\n          bad_ciphers.push_back(cipher_str);\n        }\n      }\n      throw EnvoyException(fmt::format(\"Failed to initialize cipher suites {}. The following \"\n                                       \"ciphers were rejected when tried individually: {}\",\n                                       config.cipherSuites(), absl::StrJoin(bad_ciphers, \", \")));\n    }\n\n    if (!capabilities_.provides_ciphers_and_curves &&\n        !SSL_CTX_set1_curves_list(ctx.ssl_ctx_.get(), config.ecdhCurves().c_str())) {\n      throw EnvoyException(absl::StrCat(\"Failed to initialize ECDH curves \", config.ecdhCurves()));\n    }\n  }\n\n  int verify_mode = SSL_VERIFY_NONE;\n  int verify_mode_validation_context = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT;\n\n  if (config.certificateValidationContext() != nullptr) {\n    envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext::\n        TrustChainVerification verification =\n            config.certificateValidationContext()->trustChainVerification();\n    if (verification == envoy::extensions::transport_sockets::tls::v3::\n                            CertificateValidationContext::ACCEPT_UNTRUSTED) {\n      verify_mode = SSL_VERIFY_PEER; // Ensure client-certs will be requested even if we have\n                                     // nothing to verify against\n      verify_mode_validation_context = SSL_VERIFY_PEER;\n    }\n  }\n\n#ifdef BORINGSSL_FIPS\n  if (!capabilities_.is_fips_compliant) {\n    throw EnvoyException(\n        \"Can't load a FIPS noncompliant custom handshaker while running in FIPS compliant mode.\");\n  }\n#endif\n\n  if (config.certificateValidationContext() != nullptr &&\n      !config.certificateValidationContext()->caCert().empty() &&\n      !config.capabilities().provides_certificates) {\n    ca_file_path_ = config.certificateValidationContext()->caCertPath();\n    bssl::UniquePtr<BIO> bio(\n        BIO_new_mem_buf(const_cast<char*>(config.certificateValidationContext()->caCert().data()),\n                        config.certificateValidationContext()->caCert().size()));\n    RELEASE_ASSERT(bio != nullptr, \"\");\n    // Based on BoringSSL's X509_load_cert_crl_file().\n    bssl::UniquePtr<STACK_OF(X509_INFO)> list(\n        PEM_X509_INFO_read_bio(bio.get(), nullptr, nullptr, nullptr));\n    if (list == nullptr) {\n      throw EnvoyException(absl::StrCat(\"Failed to load trusted CA certificates from \",\n                                        config.certificateValidationContext()->caCertPath()));\n    }\n\n    for (auto& ctx : tls_contexts_) {\n      X509_STORE* store = SSL_CTX_get_cert_store(ctx.ssl_ctx_.get());\n      bool has_crl = false;\n      for (const X509_INFO* item : list.get()) {\n        if (item->x509) {\n          X509_STORE_add_cert(store, item->x509);\n          if (ca_cert_ == nullptr) {\n            X509_up_ref(item->x509);\n            ca_cert_.reset(item->x509);\n          }\n        }\n        if (item->crl) {\n          X509_STORE_add_crl(store, item->crl);\n          has_crl = true;\n        }\n      }\n      if (ca_cert_ == nullptr) {\n        throw EnvoyException(absl::StrCat(\"Failed to load trusted CA certificates from \",\n                                          config.certificateValidationContext()->caCertPath()));\n      }\n      if (has_crl) {\n        X509_STORE_set_flags(store, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);\n      }\n      verify_mode = SSL_VERIFY_PEER;\n      verify_trusted_ca_ = true;\n\n      // NOTE: We're using SSL_CTX_set_cert_verify_callback() instead of X509_verify_cert()\n      // directly. However, our new callback is still calling X509_verify_cert() under\n      // the hood. Therefore, to ignore cert expiration, we need to set the callback\n      // for X509_verify_cert to ignore that error.\n      if (config.certificateValidationContext()->allowExpiredCertificate()) {\n        X509_STORE_set_verify_cb(store, ContextImpl::ignoreCertificateExpirationCallback);\n      }\n    }\n  }\n\n  if (config.certificateValidationContext() != nullptr &&\n      !config.certificateValidationContext()->certificateRevocationList().empty()) {\n    bssl::UniquePtr<BIO> bio(BIO_new_mem_buf(\n        const_cast<char*>(\n            config.certificateValidationContext()->certificateRevocationList().data()),\n        config.certificateValidationContext()->certificateRevocationList().size()));\n    RELEASE_ASSERT(bio != nullptr, \"\");\n\n    // Based on BoringSSL's X509_load_cert_crl_file().\n    bssl::UniquePtr<STACK_OF(X509_INFO)> list(\n        PEM_X509_INFO_read_bio(bio.get(), nullptr, nullptr, nullptr));\n    if (list == nullptr) {\n      throw EnvoyException(\n          absl::StrCat(\"Failed to load CRL from \",\n                       config.certificateValidationContext()->certificateRevocationListPath()));\n    }\n\n    for (auto& ctx : tls_contexts_) {\n      X509_STORE* store = SSL_CTX_get_cert_store(ctx.ssl_ctx_.get());\n      for (const X509_INFO* item : list.get()) {\n        if (item->crl) {\n          X509_STORE_add_crl(store, item->crl);\n        }\n      }\n\n      X509_STORE_set_flags(store, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);\n    }\n  }\n\n  const Envoy::Ssl::CertificateValidationContextConfig* cert_validation_config =\n      config.certificateValidationContext();\n  if (cert_validation_config != nullptr) {\n    if (!cert_validation_config->verifySubjectAltNameList().empty()) {\n      verify_subject_alt_name_list_ = cert_validation_config->verifySubjectAltNameList();\n      verify_mode = verify_mode_validation_context;\n    }\n\n    if (!cert_validation_config->subjectAltNameMatchers().empty()) {\n      for (const envoy::type::matcher::v3::StringMatcher& matcher :\n           cert_validation_config->subjectAltNameMatchers()) {\n        subject_alt_name_matchers_.push_back(Matchers::StringMatcherImpl(matcher));\n      }\n      verify_mode = verify_mode_validation_context;\n    }\n\n    if (!cert_validation_config->verifyCertificateHashList().empty()) {\n      for (auto hash : cert_validation_config->verifyCertificateHashList()) {\n        // Remove colons from the 95 chars long colon-separated \"fingerprint\"\n        // in order to get the hex-encoded string.\n        if (hash.size() == 95) {\n          hash.erase(std::remove(hash.begin(), hash.end(), ':'), hash.end());\n        }\n        const auto& decoded = Hex::decode(hash);\n        if (decoded.size() != SHA256_DIGEST_LENGTH) {\n          throw EnvoyException(absl::StrCat(\"Invalid hex-encoded SHA-256 \", hash));\n        }\n        verify_certificate_hash_list_.push_back(decoded);\n      }\n      verify_mode = verify_mode_validation_context;\n    }\n\n    if (!cert_validation_config->verifyCertificateSpkiList().empty()) {\n      for (const auto& hash : cert_validation_config->verifyCertificateSpkiList()) {\n        const auto decoded = Base64::decode(hash);\n        if (decoded.size() != SHA256_DIGEST_LENGTH) {\n          throw EnvoyException(absl::StrCat(\"Invalid base64-encoded SHA-256 \", hash));\n        }\n        verify_certificate_spki_list_.emplace_back(decoded.begin(), decoded.end());\n      }\n      verify_mode = verify_mode_validation_context;\n    }\n  }\n\n  if (!capabilities_.verifies_peer_certificates) {\n    for (auto& ctx : tls_contexts_) {\n      if (verify_mode != SSL_VERIFY_NONE) {\n        SSL_CTX_set_verify(ctx.ssl_ctx_.get(), verify_mode, nullptr);\n        SSL_CTX_set_cert_verify_callback(ctx.ssl_ctx_.get(), ContextImpl::verifyCallback, this);\n      }\n    }\n  }\n\n  absl::node_hash_set<int> cert_pkey_ids;\n  if (!capabilities_.provides_certificates) {\n    for (uint32_t i = 0; i < tls_certificates.size(); ++i) {\n      auto& ctx = tls_contexts_[i];\n      // Load certificate chain.\n      const auto& tls_certificate = tls_certificates[i].get();\n      ctx.cert_chain_file_path_ = tls_certificate.certificateChainPath();\n      bssl::UniquePtr<BIO> bio(\n          BIO_new_mem_buf(const_cast<char*>(tls_certificate.certificateChain().data()),\n                          tls_certificate.certificateChain().size()));\n      RELEASE_ASSERT(bio != nullptr, \"\");\n      ctx.cert_chain_.reset(PEM_read_bio_X509_AUX(bio.get(), nullptr, nullptr, nullptr));\n      if (ctx.cert_chain_ == nullptr ||\n          !SSL_CTX_use_certificate(ctx.ssl_ctx_.get(), ctx.cert_chain_.get())) {\n        while (uint64_t err = ERR_get_error()) {\n          ENVOY_LOG_MISC(debug, \"SSL error: {}:{}:{}:{}\", err, ERR_lib_error_string(err),\n                         ERR_func_error_string(err), ERR_GET_REASON(err),\n                         ERR_reason_error_string(err));\n        }\n        throw EnvoyException(\n            absl::StrCat(\"Failed to load certificate chain from \", ctx.cert_chain_file_path_));\n      }\n      // Read rest of the certificate chain.\n      while (true) {\n        bssl::UniquePtr<X509> cert(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr));\n        if (cert == nullptr) {\n          break;\n        }\n        if (!SSL_CTX_add_extra_chain_cert(ctx.ssl_ctx_.get(), cert.get())) {\n          throw EnvoyException(\n              absl::StrCat(\"Failed to load certificate chain from \", ctx.cert_chain_file_path_));\n        }\n        // SSL_CTX_add_extra_chain_cert() takes ownership.\n        cert.release();\n      }\n      // Check for EOF.\n      const uint32_t err = ERR_peek_last_error();\n      if (ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) {\n        ERR_clear_error();\n      } else {\n        throw EnvoyException(\n            absl::StrCat(\"Failed to load certificate chain from \", ctx.cert_chain_file_path_));\n      }\n\n      // The must staple extension means the certificate promises to carry\n      // with it an OCSP staple. https://tools.ietf.org/html/rfc7633#section-6\n      constexpr absl::string_view tls_feature_ext = \"1.3.6.1.5.5.7.1.24\";\n      constexpr absl::string_view must_staple_ext_value = \"\\x30\\x3\\x02\\x01\\x05\";\n      auto must_staple = Utility::getCertificateExtensionValue(*ctx.cert_chain_, tls_feature_ext);\n      if (must_staple == must_staple_ext_value) {\n        ctx.is_must_staple_ = true;\n      }\n\n      bssl::UniquePtr<EVP_PKEY> public_key(X509_get_pubkey(ctx.cert_chain_.get()));\n      const int pkey_id = EVP_PKEY_id(public_key.get());\n      if (!cert_pkey_ids.insert(pkey_id).second) {\n        throw EnvoyException(fmt::format(\"Failed to load certificate chain from {}, at most one \"\n                                         \"certificate of a given type may be specified\",\n                                         ctx.cert_chain_file_path_));\n      }\n      ctx.is_ecdsa_ = pkey_id == EVP_PKEY_EC;\n      switch (pkey_id) {\n      case EVP_PKEY_EC: {\n        // We only support P-256 ECDSA today.\n        const EC_KEY* ecdsa_public_key = EVP_PKEY_get0_EC_KEY(public_key.get());\n        // Since we checked the key type above, this should be valid.\n        ASSERT(ecdsa_public_key != nullptr);\n        const EC_GROUP* ecdsa_group = EC_KEY_get0_group(ecdsa_public_key);\n        if (ecdsa_group == nullptr ||\n            EC_GROUP_get_curve_name(ecdsa_group) != NID_X9_62_prime256v1) {\n          throw EnvoyException(fmt::format(\"Failed to load certificate chain from {}, only P-256 \"\n                                           \"ECDSA certificates are supported\",\n                                           ctx.cert_chain_file_path_));\n        }\n        ctx.is_ecdsa_ = true;\n      } break;\n      case EVP_PKEY_RSA: {\n        // We require RSA certificates with 2048-bit or larger keys.\n        const RSA* rsa_public_key = EVP_PKEY_get0_RSA(public_key.get());\n        // Since we checked the key type above, this should be valid.\n        ASSERT(rsa_public_key != nullptr);\n        const unsigned rsa_key_length = RSA_size(rsa_public_key);\n#ifdef BORINGSSL_FIPS\n        if (rsa_key_length != 2048 / 8 && rsa_key_length != 3072 / 8) {\n          throw EnvoyException(\n              fmt::format(\"Failed to load certificate chain from {}, only RSA certificates with \"\n                          \"2048-bit or 3072-bit keys are supported in FIPS mode\",\n                          ctx.cert_chain_file_path_));\n        }\n#else\n        if (rsa_key_length < 2048 / 8) {\n          throw EnvoyException(\n              fmt::format(\"Failed to load certificate chain from {}, only RSA \"\n                          \"certificates with 2048-bit or larger keys are supported\",\n                          ctx.cert_chain_file_path_));\n        }\n#endif\n      } break;\n#ifdef BORINGSSL_FIPS\n      default:\n        throw EnvoyException(fmt::format(\"Failed to load certificate chain from {}, only RSA and \"\n                                         \"ECDSA certificates are supported in FIPS mode\",\n                                         ctx.cert_chain_file_path_));\n#endif\n      }\n\n      Envoy::Ssl::PrivateKeyMethodProviderSharedPtr private_key_method_provider =\n          tls_certificate.privateKeyMethod();\n      // We either have a private key or a BoringSSL private key method provider.\n      if (private_key_method_provider) {\n        ctx.private_key_method_provider_ = private_key_method_provider;\n        // The provider has a reference to the private key method for the context lifetime.\n        Ssl::BoringSslPrivateKeyMethodSharedPtr private_key_method =\n            private_key_method_provider->getBoringSslPrivateKeyMethod();\n        if (private_key_method == nullptr) {\n          throw EnvoyException(\n              fmt::format(\"Failed to get BoringSSL private key method from provider\"));\n        }\n#ifdef BORINGSSL_FIPS\n        if (!ctx.private_key_method_provider_->checkFips()) {\n          throw EnvoyException(\n              fmt::format(\"Private key method doesn't support FIPS mode with current parameters\"));\n        }\n#endif\n        SSL_CTX_set_private_key_method(ctx.ssl_ctx_.get(), private_key_method.get());\n      } else {\n        // Load private key.\n        bio.reset(BIO_new_mem_buf(const_cast<char*>(tls_certificate.privateKey().data()),\n                                  tls_certificate.privateKey().size()));\n        RELEASE_ASSERT(bio != nullptr, \"\");\n        bssl::UniquePtr<EVP_PKEY> pkey(\n            PEM_read_bio_PrivateKey(bio.get(), nullptr, nullptr,\n                                    !tls_certificate.password().empty()\n                                        ? const_cast<char*>(tls_certificate.password().c_str())\n                                        : nullptr));\n        if (pkey == nullptr || !SSL_CTX_use_PrivateKey(ctx.ssl_ctx_.get(), pkey.get())) {\n          throw EnvoyException(\n              absl::StrCat(\"Failed to load private key from \", tls_certificate.privateKeyPath()));\n        }\n\n#ifdef BORINGSSL_FIPS\n        // Verify that private keys are passing FIPS pairwise consistency tests.\n        switch (pkey_id) {\n        case EVP_PKEY_EC: {\n          const EC_KEY* ecdsa_private_key = EVP_PKEY_get0_EC_KEY(pkey.get());\n          if (!EC_KEY_check_fips(ecdsa_private_key)) {\n            throw EnvoyException(fmt::format(\"Failed to load private key from {}, ECDSA key failed \"\n                                             \"pairwise consistency test required in FIPS mode\",\n                                             tls_certificate.privateKeyPath()));\n          }\n        } break;\n        case EVP_PKEY_RSA: {\n          RSA* rsa_private_key = EVP_PKEY_get0_RSA(pkey.get());\n          if (!RSA_check_fips(rsa_private_key)) {\n            throw EnvoyException(fmt::format(\"Failed to load private key from {}, RSA key failed \"\n                                             \"pairwise consistency test required in FIPS mode\",\n                                             tls_certificate.privateKeyPath()));\n          }\n        } break;\n        }\n#endif\n      }\n    }\n  }\n\n  // use the server's cipher list preferences\n  for (auto& ctx : tls_contexts_) {\n    SSL_CTX_set_options(ctx.ssl_ctx_.get(), SSL_OP_CIPHER_SERVER_PREFERENCE);\n  }\n\n  if (config.certificateValidationContext() != nullptr) {\n    allow_untrusted_certificate_ =\n        config.certificateValidationContext()->trustChainVerification() ==\n        envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext::\n            ACCEPT_UNTRUSTED;\n  }\n\n  parsed_alpn_protocols_ = parseAlpnProtocols(config.alpnProtocols());\n\n  // To enumerate the required builtin ciphers, curves, algorithms, and\n  // versions, uncomment '#define LOG_BUILTIN_STAT_NAMES' below, and run\n  //  bazel test //test/extensions/transport_sockets/tls/... --test_output=streamed\n  //      | grep \" Builtin ssl.\" | sort | uniq\n  // #define LOG_BUILTIN_STAT_NAMES\n  //\n  // TODO(#8035): improve tooling to find any other built-ins needed to avoid\n  // contention.\n\n  // Ciphers\n  stat_name_set_->rememberBuiltin(\"AEAD-AES128-GCM-SHA256\");\n  stat_name_set_->rememberBuiltin(\"ECDHE-ECDSA-AES128-GCM-SHA256\");\n  stat_name_set_->rememberBuiltin(\"ECDHE-RSA-AES128-GCM-SHA256\");\n  stat_name_set_->rememberBuiltin(\"ECDHE-RSA-AES128-SHA\");\n  stat_name_set_->rememberBuiltin(\"ECDHE-RSA-CHACHA20-POLY1305\");\n  stat_name_set_->rememberBuiltin(\"TLS_AES_128_GCM_SHA256\");\n\n  // Curves from\n  // https://github.com/google/boringssl/blob/f4d8b969200f1ee2dd872ffb85802e6a0976afe7/ssl/ssl_key_share.cc#L384\n  stat_name_set_->rememberBuiltins(\n      {\"P-224\", \"P-256\", \"P-384\", \"P-521\", \"X25519\", \"CECPQ2\", \"CECPQ2b\"});\n\n  // Algorithms\n  stat_name_set_->rememberBuiltins({\"ecdsa_secp256r1_sha256\", \"rsa_pss_rsae_sha256\"});\n\n  // Versions\n  stat_name_set_->rememberBuiltins({\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\", \"TLSv1.3\"});\n}\n\nint ServerContextImpl::alpnSelectCallback(const unsigned char** out, unsigned char* outlen,\n                                          const unsigned char* in, unsigned int inlen) {\n  // Currently this uses the standard selection algorithm in priority order.\n  const uint8_t* alpn_data = parsed_alpn_protocols_.data();\n  size_t alpn_data_size = parsed_alpn_protocols_.size();\n\n  if (SSL_select_next_proto(const_cast<unsigned char**>(out), outlen, alpn_data, alpn_data_size, in,\n                            inlen) != OPENSSL_NPN_NEGOTIATED) {\n    return SSL_TLSEXT_ERR_NOACK;\n  } else {\n    return SSL_TLSEXT_ERR_OK;\n  }\n}\n\nstd::vector<uint8_t> ContextImpl::parseAlpnProtocols(const std::string& alpn_protocols) {\n  if (alpn_protocols.empty()) {\n    return {};\n  }\n\n  if (alpn_protocols.size() >= 65535) {\n    throw EnvoyException(\"Invalid ALPN protocol string\");\n  }\n\n  std::vector<uint8_t> out(alpn_protocols.size() + 1);\n  size_t start = 0;\n  for (size_t i = 0; i <= alpn_protocols.size(); i++) {\n    if (i == alpn_protocols.size() || alpn_protocols[i] == ',') {\n      if (i - start > 255) {\n        throw EnvoyException(\"Invalid ALPN protocol string\");\n      }\n\n      out[start] = i - start;\n      start = i + 1;\n    } else {\n      out[i + 1] = alpn_protocols[i];\n    }\n  }\n\n  return out;\n}\n\nbssl::UniquePtr<SSL> ContextImpl::newSsl(const Network::TransportSocketOptions*) {\n  // We use the first certificate for a new SSL object, later in the\n  // SSL_CTX_set_select_certificate_cb() callback following ClientHello, we replace with the\n  // selected certificate via SSL_set_SSL_CTX().\n  return bssl::UniquePtr<SSL>(SSL_new(tls_contexts_[0].ssl_ctx_.get()));\n}\n\nint ContextImpl::ignoreCertificateExpirationCallback(int ok, X509_STORE_CTX* ctx) {\n  if (!ok) {\n    int err = X509_STORE_CTX_get_error(ctx);\n    if (err == X509_V_ERR_CERT_HAS_EXPIRED || err == X509_V_ERR_CERT_NOT_YET_VALID) {\n      return 1;\n    }\n  }\n\n  return ok;\n}\n\nint ContextImpl::verifyCallback(X509_STORE_CTX* store_ctx, void* arg) {\n  ContextImpl* impl = reinterpret_cast<ContextImpl*>(arg);\n  SSL* ssl = reinterpret_cast<SSL*>(\n      X509_STORE_CTX_get_ex_data(store_ctx, SSL_get_ex_data_X509_STORE_CTX_idx()));\n  auto cert = bssl::UniquePtr<X509>(SSL_get_peer_certificate(ssl));\n  return impl->doVerifyCertChain(\n      store_ctx,\n      reinterpret_cast<Envoy::Ssl::SslExtendedSocketInfo*>(\n          SSL_get_ex_data(ssl, ContextImpl::sslExtendedSocketInfoIndex())),\n      *cert, static_cast<const Network::TransportSocketOptions*>(SSL_get_app_data(ssl)));\n}\n\nint ContextImpl::doVerifyCertChain(\n    X509_STORE_CTX* store_ctx, Ssl::SslExtendedSocketInfo* ssl_extended_info, X509& leaf_cert,\n    const Network::TransportSocketOptions* transport_socket_options) {\n  if (verify_trusted_ca_) {\n    int ret = X509_verify_cert(store_ctx);\n    if (ssl_extended_info) {\n      ssl_extended_info->setCertificateValidationStatus(\n          ret == 1 ? Envoy::Ssl::ClientValidationStatus::Validated\n                   : Envoy::Ssl::ClientValidationStatus::Failed);\n    }\n\n    if (ret <= 0) {\n      stats_.fail_verify_error_.inc();\n      return allow_untrusted_certificate_ ? 1 : ret;\n    }\n  }\n\n  Envoy::Ssl::ClientValidationStatus validated = verifyCertificate(\n      &leaf_cert,\n      transport_socket_options &&\n              !transport_socket_options->verifySubjectAltNameListOverride().empty()\n          ? transport_socket_options->verifySubjectAltNameListOverride()\n          : verify_subject_alt_name_list_,\n      subject_alt_name_matchers_);\n\n  if (ssl_extended_info) {\n    if (ssl_extended_info->certificateValidationStatus() ==\n        Envoy::Ssl::ClientValidationStatus::NotValidated) {\n      ssl_extended_info->setCertificateValidationStatus(validated);\n    } else if (validated != Envoy::Ssl::ClientValidationStatus::NotValidated) {\n      ssl_extended_info->setCertificateValidationStatus(validated);\n    }\n  }\n\n  return allow_untrusted_certificate_ ? 1\n                                      : (validated != Envoy::Ssl::ClientValidationStatus::Failed);\n}\n\nEnvoy::Ssl::ClientValidationStatus ContextImpl::verifyCertificate(\n    X509* cert, const std::vector<std::string>& verify_san_list,\n    const std::vector<Matchers::StringMatcherImpl>& subject_alt_name_matchers) {\n  Envoy::Ssl::ClientValidationStatus validated = Envoy::Ssl::ClientValidationStatus::NotValidated;\n\n  if (!verify_san_list.empty()) {\n    if (!verifySubjectAltName(cert, verify_san_list)) {\n      stats_.fail_verify_san_.inc();\n      return Envoy::Ssl::ClientValidationStatus::Failed;\n    }\n    validated = Envoy::Ssl::ClientValidationStatus::Validated;\n  }\n\n  if (!subject_alt_name_matchers.empty() && !matchSubjectAltName(cert, subject_alt_name_matchers)) {\n    stats_.fail_verify_san_.inc();\n    return Envoy::Ssl::ClientValidationStatus::Failed;\n  }\n\n  if (!verify_certificate_hash_list_.empty() || !verify_certificate_spki_list_.empty()) {\n    const bool valid_certificate_hash =\n        !verify_certificate_hash_list_.empty() &&\n        verifyCertificateHashList(cert, verify_certificate_hash_list_);\n    const bool valid_certificate_spki =\n        !verify_certificate_spki_list_.empty() &&\n        verifyCertificateSpkiList(cert, verify_certificate_spki_list_);\n\n    if (!valid_certificate_hash && !valid_certificate_spki) {\n      stats_.fail_verify_cert_hash_.inc();\n      return Envoy::Ssl::ClientValidationStatus::Failed;\n    }\n\n    validated = Envoy::Ssl::ClientValidationStatus::Validated;\n  }\n\n  return validated;\n}\n\nvoid ContextImpl::incCounter(const Stats::StatName name, absl::string_view value,\n                             const Stats::StatName fallback) const {\n  Stats::Counter& counter = Stats::Utility::counterFromElements(\n      scope_, {name, stat_name_set_->getBuiltin(value, fallback)});\n  counter.inc();\n\n#ifdef LOG_BUILTIN_STAT_NAMES\n  std::cerr << absl::StrCat(\"Builtin \", symbol_table.toString(name), \": \", value, \"\\n\")\n            << std::flush;\n#endif\n}\n\nvoid ContextImpl::logHandshake(SSL* ssl) const {\n  stats_.handshake_.inc();\n\n  if (SSL_session_reused(ssl)) {\n    stats_.session_reused_.inc();\n  }\n\n  incCounter(ssl_ciphers_, SSL_get_cipher_name(ssl), unknown_ssl_cipher_);\n  incCounter(ssl_versions_, SSL_get_version(ssl), unknown_ssl_version_);\n\n  const uint16_t curve_id = SSL_get_curve_id(ssl);\n  if (curve_id) {\n    incCounter(ssl_curves_, SSL_get_curve_name(curve_id), unknown_ssl_curve_);\n  }\n\n  const uint16_t sigalg_id = SSL_get_peer_signature_algorithm(ssl);\n  if (sigalg_id) {\n    const char* sigalg = SSL_get_signature_algorithm_name(sigalg_id, 1 /* include curve */);\n    incCounter(ssl_sigalgs_, sigalg, unknown_ssl_algorithm_);\n  }\n\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl));\n  if (!cert.get()) {\n    stats_.no_certificate_.inc();\n  }\n}\n\nstd::vector<Ssl::PrivateKeyMethodProviderSharedPtr> ContextImpl::getPrivateKeyMethodProviders() {\n  std::vector<Envoy::Ssl::PrivateKeyMethodProviderSharedPtr> providers;\n\n  for (auto& tls_context : tls_contexts_) {\n    Envoy::Ssl::PrivateKeyMethodProviderSharedPtr provider =\n        tls_context.getPrivateKeyMethodProvider();\n    if (provider) {\n      providers.push_back(provider);\n    }\n  }\n  return providers;\n}\n\nbool ContextImpl::matchSubjectAltName(\n    X509* cert, const std::vector<Matchers::StringMatcherImpl>& subject_alt_name_matchers) {\n  bssl::UniquePtr<GENERAL_NAMES> san_names(\n      static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr)));\n  if (san_names == nullptr) {\n    return false;\n  }\n  for (const GENERAL_NAME* general_name : san_names.get()) {\n    const std::string san = Utility::generalNameAsString(general_name);\n    for (auto& config_san_matcher : subject_alt_name_matchers) {\n      // For DNS SAN, if the StringMatcher type is exact, we have to follow DNS matching semantics.\n      if (general_name->type == GEN_DNS &&\n                  config_san_matcher.matcher().match_pattern_case() ==\n                      envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact\n              ? dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san))\n              : config_san_matcher.match(san)) {\n        return true;\n      }\n    }\n  }\n  return false;\n}\n\nbool ContextImpl::verifySubjectAltName(X509* cert,\n                                       const std::vector<std::string>& subject_alt_names) {\n  bssl::UniquePtr<GENERAL_NAMES> san_names(\n      static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr)));\n  if (san_names == nullptr) {\n    return false;\n  }\n  for (const GENERAL_NAME* general_name : san_names.get()) {\n    const std::string san = Utility::generalNameAsString(general_name);\n    for (auto& config_san : subject_alt_names) {\n      if (general_name->type == GEN_DNS ? dnsNameMatch(config_san, san.c_str())\n                                        : config_san == san) {\n        return true;\n      }\n    }\n  }\n  return false;\n}\n\nbool ContextImpl::dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern) {\n  if (dns_name == pattern) {\n    return true;\n  }\n\n  size_t pattern_len = pattern.length();\n  if (pattern_len > 1 && pattern[0] == '*' && pattern[1] == '.') {\n    if (dns_name.length() > pattern_len - 1) {\n      const size_t off = dns_name.length() - pattern_len + 1;\n      if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.fix_wildcard_matching\")) {\n        return dns_name.substr(0, off).find('.') == std::string::npos &&\n               dns_name.substr(off, pattern_len - 1) == pattern.substr(1, pattern_len - 1);\n      } else {\n        return dns_name.substr(off, pattern_len - 1) == pattern.substr(1, pattern_len - 1);\n      }\n    }\n  }\n\n  return false;\n}\n\nbool ContextImpl::verifyCertificateHashList(\n    X509* cert, const std::vector<std::vector<uint8_t>>& expected_hashes) {\n  std::vector<uint8_t> computed_hash(SHA256_DIGEST_LENGTH);\n  unsigned int n;\n  X509_digest(cert, EVP_sha256(), computed_hash.data(), &n);\n  RELEASE_ASSERT(n == computed_hash.size(), \"\");\n\n  for (const auto& expected_hash : expected_hashes) {\n    if (computed_hash == expected_hash) {\n      return true;\n    }\n  }\n  return false;\n}\n\nbool ContextImpl::verifyCertificateSpkiList(\n    X509* cert, const std::vector<std::vector<uint8_t>>& expected_hashes) {\n  X509_PUBKEY* pubkey = X509_get_X509_PUBKEY(cert);\n  if (pubkey == nullptr) {\n    return false;\n  }\n  uint8_t* spki = nullptr;\n  const int len = i2d_X509_PUBKEY(pubkey, &spki);\n  if (len < 0) {\n    return false;\n  }\n  bssl::UniquePtr<uint8_t> free_spki(spki);\n\n  std::vector<uint8_t> computed_hash(SHA256_DIGEST_LENGTH);\n  SHA256(spki, len, computed_hash.data());\n\n  for (const auto& expected_hash : expected_hashes) {\n    if (computed_hash == expected_hash) {\n      return true;\n    }\n  }\n  return false;\n}\n\nSslStats ContextImpl::generateStats(Stats::Scope& store) {\n  std::string prefix(\"ssl.\");\n  return {ALL_SSL_STATS(POOL_COUNTER_PREFIX(store, prefix), POOL_GAUGE_PREFIX(store, prefix),\n                        POOL_HISTOGRAM_PREFIX(store, prefix))};\n}\n\nsize_t ContextImpl::daysUntilFirstCertExpires() const {\n  int daysUntilExpiration = Utility::getDaysUntilExpiration(ca_cert_.get(), time_source_);\n  for (auto& ctx : tls_contexts_) {\n    daysUntilExpiration = std::min<int>(\n        Utility::getDaysUntilExpiration(ctx.cert_chain_.get(), time_source_), daysUntilExpiration);\n  }\n  if (daysUntilExpiration < 0) { // Ensure that the return value is unsigned\n    return 0;\n  }\n  return daysUntilExpiration;\n}\n\nabsl::optional<uint64_t> ContextImpl::secondsUntilFirstOcspResponseExpires() const {\n  absl::optional<uint64_t> secs_until_expiration;\n  for (auto& ctx : tls_contexts_) {\n    if (ctx.ocsp_response_) {\n      uint64_t next_expiration = ctx.ocsp_response_->secondsUntilExpiration();\n      secs_until_expiration = std::min<uint64_t>(\n          next_expiration, secs_until_expiration.value_or(std::numeric_limits<uint64_t>::max()));\n    }\n  }\n\n  return secs_until_expiration;\n}\n\nEnvoy::Ssl::CertificateDetailsPtr ContextImpl::getCaCertInformation() const {\n  if (ca_cert_ == nullptr) {\n    return nullptr;\n  }\n  return certificateDetails(ca_cert_.get(), getCaFileName(), nullptr);\n}\n\nstd::vector<Envoy::Ssl::CertificateDetailsPtr> ContextImpl::getCertChainInformation() const {\n  std::vector<Envoy::Ssl::CertificateDetailsPtr> cert_details;\n  for (const auto& ctx : tls_contexts_) {\n    if (ctx.cert_chain_ == nullptr) {\n      continue;\n    }\n    cert_details.emplace_back(certificateDetails(ctx.cert_chain_.get(), ctx.getCertChainFileName(),\n                                                 ctx.ocsp_response_.get()));\n  }\n  return cert_details;\n}\n\nEnvoy::Ssl::CertificateDetailsPtr\nContextImpl::certificateDetails(X509* cert, const std::string& path,\n                                const Ocsp::OcspResponseWrapper* ocsp_response) const {\n  Envoy::Ssl::CertificateDetailsPtr certificate_details =\n      std::make_unique<envoy::admin::v3::CertificateDetails>();\n  certificate_details->set_path(path);\n  certificate_details->set_serial_number(Utility::getSerialNumberFromCertificate(*cert));\n  certificate_details->set_days_until_expiration(\n      Utility::getDaysUntilExpiration(cert, time_source_));\n  if (ocsp_response) {\n    auto* ocsp_details = certificate_details->mutable_ocsp_details();\n    ProtobufWkt::Timestamp* valid_from = ocsp_details->mutable_valid_from();\n    TimestampUtil::systemClockToTimestamp(ocsp_response->getThisUpdate(), *valid_from);\n    ProtobufWkt::Timestamp* expiration = ocsp_details->mutable_expiration();\n    TimestampUtil::systemClockToTimestamp(ocsp_response->getNextUpdate(), *expiration);\n  }\n  ProtobufWkt::Timestamp* valid_from = certificate_details->mutable_valid_from();\n  TimestampUtil::systemClockToTimestamp(Utility::getValidFrom(*cert), *valid_from);\n  ProtobufWkt::Timestamp* expiration_time = certificate_details->mutable_expiration_time();\n  TimestampUtil::systemClockToTimestamp(Utility::getExpirationTime(*cert), *expiration_time);\n\n  for (auto& dns_san : Utility::getSubjectAltNames(*cert, GEN_DNS)) {\n    envoy::admin::v3::SubjectAlternateName& subject_alt_name =\n        *certificate_details->add_subject_alt_names();\n    subject_alt_name.set_dns(dns_san);\n  }\n  for (auto& uri_san : Utility::getSubjectAltNames(*cert, GEN_URI)) {\n    envoy::admin::v3::SubjectAlternateName& subject_alt_name =\n        *certificate_details->add_subject_alt_names();\n    subject_alt_name.set_uri(uri_san);\n  }\n  for (auto& ip_san : Utility::getSubjectAltNames(*cert, GEN_IPADD)) {\n    envoy::admin::v3::SubjectAlternateName& subject_alt_name =\n        *certificate_details->add_subject_alt_names();\n    subject_alt_name.set_ip_address(ip_san);\n  }\n  return certificate_details;\n}\n\nClientContextImpl::ClientContextImpl(Stats::Scope& scope,\n                                     const Envoy::Ssl::ClientContextConfig& config,\n                                     TimeSource& time_source)\n    : ContextImpl(scope, config, time_source),\n      server_name_indication_(config.serverNameIndication()),\n      allow_renegotiation_(config.allowRenegotiation()),\n      max_session_keys_(config.maxSessionKeys()) {\n  // This should be guaranteed during configuration ingestion for client contexts.\n  ASSERT(tls_contexts_.size() == 1);\n  if (!parsed_alpn_protocols_.empty()) {\n    for (auto& ctx : tls_contexts_) {\n      const int rc = SSL_CTX_set_alpn_protos(ctx.ssl_ctx_.get(), parsed_alpn_protocols_.data(),\n                                             parsed_alpn_protocols_.size());\n      RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or(\"\"));\n    }\n  }\n\n  if (!config.signingAlgorithmsForTest().empty()) {\n    const uint16_t sigalgs = parseSigningAlgorithmsForTest(config.signingAlgorithmsForTest());\n    RELEASE_ASSERT(sigalgs != 0, fmt::format(\"unsupported signing algorithm {}\",\n                                             config.signingAlgorithmsForTest()));\n\n    for (auto& ctx : tls_contexts_) {\n      const int rc = SSL_CTX_set_verify_algorithm_prefs(ctx.ssl_ctx_.get(), &sigalgs, 1);\n      RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n    }\n  }\n\n  if (max_session_keys_ > 0) {\n    SSL_CTX_set_session_cache_mode(tls_contexts_[0].ssl_ctx_.get(), SSL_SESS_CACHE_CLIENT);\n    SSL_CTX_sess_set_new_cb(\n        tls_contexts_[0].ssl_ctx_.get(), [](SSL* ssl, SSL_SESSION* session) -> int {\n          ContextImpl* context_impl =\n              static_cast<ContextImpl*>(SSL_CTX_get_app_data(SSL_get_SSL_CTX(ssl)));\n          ClientContextImpl* client_context_impl = dynamic_cast<ClientContextImpl*>(context_impl);\n          RELEASE_ASSERT(client_context_impl != nullptr, \"\"); // for Coverity\n          return client_context_impl->newSessionKey(session);\n        });\n  }\n}\n\nbool ContextImpl::parseAndSetAlpn(const std::vector<std::string>& alpn, SSL& ssl) {\n  std::vector<uint8_t> parsed_alpn = parseAlpnProtocols(absl::StrJoin(alpn, \",\"));\n  if (!parsed_alpn.empty()) {\n    const int rc = SSL_set_alpn_protos(&ssl, parsed_alpn.data(), parsed_alpn.size());\n    // This should only if memory allocation fails, e.g. OOM.\n    RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or(\"\"));\n    return true;\n  }\n\n  return false;\n}\n\nbssl::UniquePtr<SSL> ClientContextImpl::newSsl(const Network::TransportSocketOptions* options) {\n  bssl::UniquePtr<SSL> ssl_con(ContextImpl::newSsl(options));\n\n  const std::string server_name_indication = options && options->serverNameOverride().has_value()\n                                                 ? options->serverNameOverride().value()\n                                                 : server_name_indication_;\n\n  if (!server_name_indication.empty()) {\n    const int rc = SSL_set_tlsext_host_name(ssl_con.get(), server_name_indication.c_str());\n    RELEASE_ASSERT(rc, Utility::getLastCryptoError().value_or(\"\"));\n  }\n\n  if (options && !options->verifySubjectAltNameListOverride().empty()) {\n    SSL_set_app_data(ssl_con.get(), options);\n    SSL_set_verify(ssl_con.get(), SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr);\n  }\n\n  // We determine what ALPN using the following precedence:\n  // 1. Option-provided ALPN override.\n  // 2. ALPN statically configured in the upstream TLS context.\n  // 3. Option-provided ALPN fallback.\n\n  // At this point in the code the ALPN has already been set (if present) to the value specified in\n  // the TLS context. We've stored this value in parsed_alpn_protocols_ so we can check that to see\n  // if it's already been set.\n  bool has_alpn_defined = !parsed_alpn_protocols_.empty();\n  if (options) {\n    // ALPN override takes precedence over TLS context specified, so blindly overwrite it.\n    has_alpn_defined |= parseAndSetAlpn(options->applicationProtocolListOverride(), *ssl_con);\n  }\n\n  if (options && !has_alpn_defined && options->applicationProtocolFallback().has_value()) {\n    // If ALPN hasn't already been set (either through TLS context or override), use the fallback.\n    parseAndSetAlpn({*options->applicationProtocolFallback()}, *ssl_con);\n  }\n\n  if (allow_renegotiation_) {\n    SSL_set_renegotiate_mode(ssl_con.get(), ssl_renegotiate_freely);\n  }\n\n  if (max_session_keys_ > 0) {\n    if (session_keys_single_use_) {\n      // Stored single-use session keys, use write/write locks.\n      absl::WriterMutexLock l(&session_keys_mu_);\n      if (!session_keys_.empty()) {\n        // Use the most recently stored session key, since it has the highest\n        // probability of still being recognized/accepted by the server.\n        SSL_SESSION* session = session_keys_.front().get();\n        SSL_set_session(ssl_con.get(), session);\n        // Remove single-use session key (TLS 1.3) after first use.\n        if (SSL_SESSION_should_be_single_use(session)) {\n          session_keys_.pop_front();\n        }\n      }\n    } else {\n      // Never stored single-use session keys, use read/write locks.\n      absl::ReaderMutexLock l(&session_keys_mu_);\n      if (!session_keys_.empty()) {\n        // Use the most recently stored session key, since it has the highest\n        // probability of still being recognized/accepted by the server.\n        SSL_SESSION* session = session_keys_.front().get();\n        SSL_set_session(ssl_con.get(), session);\n      }\n    }\n  }\n\n  return ssl_con;\n}\n\nint ClientContextImpl::newSessionKey(SSL_SESSION* session) {\n  // In case we ever store single-use session key (TLS 1.3),\n  // we need to switch to using write/write locks.\n  if (SSL_SESSION_should_be_single_use(session)) {\n    session_keys_single_use_ = true;\n  }\n  absl::WriterMutexLock l(&session_keys_mu_);\n  // Evict oldest entries.\n  while (session_keys_.size() >= max_session_keys_) {\n    session_keys_.pop_back();\n  }\n  // Add new session key at the front of the queue, so that it's used first.\n  session_keys_.push_front(bssl::UniquePtr<SSL_SESSION>(session));\n  return 1; // Tell BoringSSL that we took ownership of the session.\n}\n\nuint16_t ClientContextImpl::parseSigningAlgorithmsForTest(const std::string& sigalgs) {\n  // This is used only when testing RSA/ECDSA certificate selection, so only the signing algorithms\n  // used in tests are supported here.\n  if (sigalgs == \"rsa_pss_rsae_sha256\") {\n    return SSL_SIGN_RSA_PSS_RSAE_SHA256;\n  } else if (sigalgs == \"ecdsa_secp256r1_sha256\") {\n    return SSL_SIGN_ECDSA_SECP256R1_SHA256;\n  }\n  return 0;\n}\n\nServerContextImpl::ServerContextImpl(Stats::Scope& scope,\n                                     const Envoy::Ssl::ServerContextConfig& config,\n                                     const std::vector<std::string>& server_names,\n                                     TimeSource& time_source)\n    : ContextImpl(scope, config, time_source), session_ticket_keys_(config.sessionTicketKeys()),\n      ocsp_staple_policy_(config.ocspStaplePolicy()) {\n  if (config.tlsCertificates().empty() && !config.capabilities().provides_certificates) {\n    throw EnvoyException(\"Server TlsCertificates must have a certificate specified\");\n  }\n\n  // Compute the session context ID hash. We use all the certificate identities,\n  // since we should have a common ID for session resumption no matter what cert\n  // is used. We do this early because it can throw an EnvoyException.\n  const SessionContextID session_id = generateHashForSessionContextId(server_names);\n\n  // First, configure the base context for ClientHello interception.\n  // TODO(htuch): replace with SSL_IDENTITY when we have this as a means to do multi-cert in\n  // BoringSSL.\n  if (!config.capabilities().provides_certificates) {\n    SSL_CTX_set_select_certificate_cb(\n        tls_contexts_[0].ssl_ctx_.get(),\n        [](const SSL_CLIENT_HELLO* client_hello) -> ssl_select_cert_result_t {\n          return static_cast<ServerContextImpl*>(\n                     SSL_CTX_get_app_data(SSL_get_SSL_CTX(client_hello->ssl)))\n              ->selectTlsContext(client_hello);\n        });\n  }\n\n  const auto tls_certificates = config.tlsCertificates();\n  for (uint32_t i = 0; i < tls_certificates.size(); ++i) {\n    auto& ctx = tls_contexts_[i];\n    if (!config.capabilities().verifies_peer_certificates &&\n        config.certificateValidationContext() != nullptr &&\n        !config.certificateValidationContext()->caCert().empty()) {\n      ctx.addClientValidationContext(*config.certificateValidationContext(),\n                                     config.requireClientCertificate());\n    }\n\n    if (!parsed_alpn_protocols_.empty() && !config.capabilities().handles_alpn_selection) {\n      SSL_CTX_set_alpn_select_cb(\n          ctx.ssl_ctx_.get(),\n          [](SSL*, const unsigned char** out, unsigned char* outlen, const unsigned char* in,\n             unsigned int inlen, void* arg) -> int {\n            return static_cast<ServerContextImpl*>(arg)->alpnSelectCallback(out, outlen, in, inlen);\n          },\n          this);\n    }\n\n    // If the handshaker handles session tickets natively, don't call\n    // `SSL_CTX_set_tlsext_ticket_key_cb`.\n    if (config.disableStatelessSessionResumption()) {\n      SSL_CTX_set_options(ctx.ssl_ctx_.get(), SSL_OP_NO_TICKET);\n    } else if (!session_ticket_keys_.empty() && !config.capabilities().handles_session_resumption) {\n      SSL_CTX_set_tlsext_ticket_key_cb(\n          ctx.ssl_ctx_.get(),\n          [](SSL* ssl, uint8_t* key_name, uint8_t* iv, EVP_CIPHER_CTX* ctx, HMAC_CTX* hmac_ctx,\n             int encrypt) -> int {\n            ContextImpl* context_impl =\n                static_cast<ContextImpl*>(SSL_CTX_get_app_data(SSL_get_SSL_CTX(ssl)));\n            ServerContextImpl* server_context_impl = dynamic_cast<ServerContextImpl*>(context_impl);\n            RELEASE_ASSERT(server_context_impl != nullptr, \"\"); // for Coverity\n            return server_context_impl->sessionTicketProcess(ssl, key_name, iv, ctx, hmac_ctx,\n                                                             encrypt);\n          });\n    }\n\n    if (config.sessionTimeout() && !config.capabilities().handles_session_resumption) {\n      auto timeout = config.sessionTimeout().value().count();\n      SSL_CTX_set_timeout(ctx.ssl_ctx_.get(), uint32_t(timeout));\n    }\n\n    int rc =\n        SSL_CTX_set_session_id_context(ctx.ssl_ctx_.get(), session_id.data(), session_id.size());\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n\n    auto& ocsp_resp_bytes = tls_certificates[i].get().ocspStaple();\n    if (ocsp_resp_bytes.empty()) {\n      if (Runtime::runtimeFeatureEnabled(\n              \"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs\") &&\n          ctx.is_must_staple_) {\n        throw EnvoyException(\"OCSP response is required for must-staple certificate\");\n      }\n      if (ocsp_staple_policy_ == Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple) {\n        throw EnvoyException(\"Required OCSP response is missing from TLS context\");\n      }\n    } else {\n      auto response = std::make_unique<Ocsp::OcspResponseWrapper>(ocsp_resp_bytes, time_source_);\n      if (!response->matchesCertificate(*ctx.cert_chain_)) {\n        throw EnvoyException(\"OCSP response does not match its TLS certificate\");\n      }\n      ctx.ocsp_response_ = std::move(response);\n    }\n  }\n}\n\nServerContextImpl::SessionContextID\nServerContextImpl::generateHashForSessionContextId(const std::vector<std::string>& server_names) {\n  uint8_t hash_buffer[EVP_MAX_MD_SIZE];\n  unsigned hash_length;\n\n  bssl::ScopedEVP_MD_CTX md;\n\n  int rc = EVP_DigestInit(md.get(), EVP_sha256());\n  RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n\n  // Hash the CommonName/SANs of all the server certificates. This makes sure that sessions can only\n  // be resumed to certificate(s) for the same name(s), but allows resuming to unique certs in the\n  // case that different Envoy instances each have their own certs. All certificates in a\n  // ServerContextImpl context are hashed together, since they all constitute a match on a filter\n  // chain for resumption purposes.\n  if (!capabilities_.provides_certificates) {\n    for (const auto& ctx : tls_contexts_) {\n      X509* cert = SSL_CTX_get0_certificate(ctx.ssl_ctx_.get());\n      RELEASE_ASSERT(cert != nullptr, \"TLS context should have an active certificate\");\n      X509_NAME* cert_subject = X509_get_subject_name(cert);\n      RELEASE_ASSERT(cert_subject != nullptr, \"TLS certificate should have a subject\");\n\n      const int cn_index = X509_NAME_get_index_by_NID(cert_subject, NID_commonName, -1);\n      if (cn_index >= 0) {\n        X509_NAME_ENTRY* cn_entry = X509_NAME_get_entry(cert_subject, cn_index);\n        RELEASE_ASSERT(cn_entry != nullptr, \"certificate subject CN should be present\");\n\n        ASN1_STRING* cn_asn1 = X509_NAME_ENTRY_get_data(cn_entry);\n        if (ASN1_STRING_length(cn_asn1) <= 0) {\n          throw EnvoyException(\"Invalid TLS context has an empty subject CN\");\n        }\n\n        rc = EVP_DigestUpdate(md.get(), ASN1_STRING_data(cn_asn1), ASN1_STRING_length(cn_asn1));\n        RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n      }\n\n      unsigned san_count = 0;\n      bssl::UniquePtr<GENERAL_NAMES> san_names(static_cast<GENERAL_NAMES*>(\n          X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr)));\n\n      if (san_names != nullptr) {\n        for (const GENERAL_NAME* san : san_names.get()) {\n          switch (san->type) {\n          case GEN_IPADD:\n            rc = EVP_DigestUpdate(md.get(), san->d.iPAddress->data, san->d.iPAddress->length);\n            RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n            ++san_count;\n            break;\n          case GEN_DNS:\n            rc = EVP_DigestUpdate(md.get(), ASN1_STRING_data(san->d.dNSName),\n                                  ASN1_STRING_length(san->d.dNSName));\n            RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n            ++san_count;\n            break;\n          case GEN_URI:\n            rc = EVP_DigestUpdate(md.get(), ASN1_STRING_data(san->d.uniformResourceIdentifier),\n                                  ASN1_STRING_length(san->d.uniformResourceIdentifier));\n            RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n            ++san_count;\n            break;\n          }\n        }\n      }\n\n      // It's possible that the certificate doesn't have a subject, but\n      // does have SANs. Make sure that we have one or the other.\n      if (cn_index < 0 && san_count == 0) {\n        throw EnvoyException(\"Invalid TLS context has neither subject CN nor SAN names\");\n      }\n\n      rc = X509_NAME_digest(X509_get_issuer_name(cert), EVP_sha256(), hash_buffer, &hash_length);\n      RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n      RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH,\n                     fmt::format(\"invalid SHA256 hash length {}\", hash_length));\n\n      rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length);\n      RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n    }\n  }\n\n  // Hash all the settings that affect whether the server will allow/accept\n  // the client connection. This ensures that the client is always validated against\n  // the correct settings, even if session resumption across different listeners\n  // is enabled.\n  if (ca_cert_ != nullptr) {\n    rc = X509_digest(ca_cert_.get(), EVP_sha256(), hash_buffer, &hash_length);\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n    RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH,\n                   fmt::format(\"invalid SHA256 hash length {}\", hash_length));\n\n    rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length);\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n\n    // verify_subject_alt_name_list_ can only be set with a ca_cert\n    for (const std::string& name : verify_subject_alt_name_list_) {\n      rc = EVP_DigestUpdate(md.get(), name.data(), name.size());\n      RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n    }\n  }\n\n  for (const auto& hash : verify_certificate_hash_list_) {\n    rc = EVP_DigestUpdate(md.get(), hash.data(),\n                          hash.size() *\n                              sizeof(std::remove_reference<decltype(hash)>::type::value_type));\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n  }\n\n  for (const auto& hash : verify_certificate_spki_list_) {\n    rc = EVP_DigestUpdate(md.get(), hash.data(),\n                          hash.size() *\n                              sizeof(std::remove_reference<decltype(hash)>::type::value_type));\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n  }\n\n  // Hash configured SNIs for this context, so that sessions cannot be resumed across different\n  // filter chains, even when using the same server certificate.\n  for (const auto& name : server_names) {\n    rc = EVP_DigestUpdate(md.get(), name.data(), name.size());\n    RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n  }\n\n  SessionContextID session_id;\n\n  // Ensure that the output size of the hash we are using is no greater than\n  // TLS session ID length that we want to generate.\n  static_assert(session_id.size() == SHA256_DIGEST_LENGTH, \"hash size mismatch\");\n  static_assert(session_id.size() == SSL_MAX_SSL_SESSION_ID_LENGTH, \"TLS session ID size mismatch\");\n\n  rc = EVP_DigestFinal(md.get(), session_id.data(), &hash_length);\n  RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(\"\"));\n  RELEASE_ASSERT(hash_length == session_id.size(),\n                 \"SHA256 hash length must match TLS Session ID size\");\n\n  return session_id;\n}\n\nint ServerContextImpl::sessionTicketProcess(SSL*, uint8_t* key_name, uint8_t* iv,\n                                            EVP_CIPHER_CTX* ctx, HMAC_CTX* hmac_ctx, int encrypt) {\n  const EVP_MD* hmac = EVP_sha256();\n  const EVP_CIPHER* cipher = EVP_aes_256_cbc();\n\n  if (encrypt == 1) {\n    // Encrypt\n    RELEASE_ASSERT(!session_ticket_keys_.empty(), \"\");\n    // TODO(ggreenway): validate in SDS that session_ticket_keys_ cannot be empty,\n    // or if we allow it to be emptied, reconfigure the context so this callback\n    // isn't set.\n\n    const Envoy::Ssl::ServerContextConfig::SessionTicketKey& key = session_ticket_keys_.front();\n\n    static_assert(std::tuple_size<decltype(key.name_)>::value == SSL_TICKET_KEY_NAME_LEN,\n                  \"Expected key.name length\");\n    std::copy_n(key.name_.begin(), SSL_TICKET_KEY_NAME_LEN, key_name);\n\n    const int rc = RAND_bytes(iv, EVP_CIPHER_iv_length(cipher));\n    ASSERT(rc);\n\n    // This RELEASE_ASSERT is logically a static_assert, but we can't actually get\n    // EVP_CIPHER_key_length(cipher) at compile-time\n    RELEASE_ASSERT(key.aes_key_.size() == EVP_CIPHER_key_length(cipher), \"\");\n    if (!EVP_EncryptInit_ex(ctx, cipher, nullptr, key.aes_key_.data(), iv)) {\n      return -1;\n    }\n\n    if (!HMAC_Init_ex(hmac_ctx, key.hmac_key_.data(), key.hmac_key_.size(), hmac, nullptr)) {\n      return -1;\n    }\n\n    return 1; // success\n  } else {\n    // Decrypt\n    bool is_enc_key = true; // first element is the encryption key\n    for (const Envoy::Ssl::ServerContextConfig::SessionTicketKey& key : session_ticket_keys_) {\n      static_assert(std::tuple_size<decltype(key.name_)>::value == SSL_TICKET_KEY_NAME_LEN,\n                    \"Expected key.name length\");\n      if (std::equal(key.name_.begin(), key.name_.end(), key_name)) {\n        if (!HMAC_Init_ex(hmac_ctx, key.hmac_key_.data(), key.hmac_key_.size(), hmac, nullptr)) {\n          return -1;\n        }\n\n        RELEASE_ASSERT(key.aes_key_.size() == EVP_CIPHER_key_length(cipher), \"\");\n        if (!EVP_DecryptInit_ex(ctx, cipher, nullptr, key.aes_key_.data(), iv)) {\n          return -1;\n        }\n\n        // If our current encryption was not the decryption key, renew\n        return is_enc_key ? 1  // success; do not renew\n                          : 2; // success: renew key\n      }\n      is_enc_key = false;\n    }\n\n    return 0; // decryption failed\n  }\n}\n\nbool ServerContextImpl::isClientEcdsaCapable(const SSL_CLIENT_HELLO* ssl_client_hello) {\n  CBS client_hello;\n  CBS_init(&client_hello, ssl_client_hello->client_hello, ssl_client_hello->client_hello_len);\n\n  // This is the TLSv1.3 case (TLSv1.2 on the wire and the supported_versions extensions present).\n  // We just need to look at signature algorithms.\n  const uint16_t client_version = ssl_client_hello->version;\n  if (client_version == TLS1_2_VERSION && tls_max_version_ == TLS1_3_VERSION) {\n    // If the supported_versions extension is found then we assume that the client is competent\n    // enough that just checking the signature_algorithms is sufficient.\n    const uint8_t* supported_versions_data;\n    size_t supported_versions_len;\n    if (SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_supported_versions,\n                                             &supported_versions_data, &supported_versions_len)) {\n      const uint8_t* signature_algorithms_data;\n      size_t signature_algorithms_len;\n      if (SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_signature_algorithms,\n                                               &signature_algorithms_data,\n                                               &signature_algorithms_len)) {\n        CBS signature_algorithms_ext, signature_algorithms;\n        CBS_init(&signature_algorithms_ext, signature_algorithms_data, signature_algorithms_len);\n        if (!CBS_get_u16_length_prefixed(&signature_algorithms_ext, &signature_algorithms) ||\n            CBS_len(&signature_algorithms_ext) != 0) {\n          return false;\n        }\n        if (cbsContainsU16(signature_algorithms, SSL_SIGN_ECDSA_SECP256R1_SHA256)) {\n          return true;\n        }\n      }\n\n      return false;\n    }\n  }\n\n  // Otherwise we are < TLSv1.3 and need to look at both the curves in the supported_groups for\n  // ECDSA and also for a compatible cipher suite. https://tools.ietf.org/html/rfc4492#section-5.1.1\n  const uint8_t* curvelist_data;\n  size_t curvelist_len;\n  if (!SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_supported_groups,\n                                            &curvelist_data, &curvelist_len)) {\n    return false;\n  }\n\n  CBS curvelist;\n  CBS_init(&curvelist, curvelist_data, curvelist_len);\n\n  // We only support P256 ECDSA curves today.\n  if (!cbsContainsU16(curvelist, SSL_CURVE_SECP256R1)) {\n    return false;\n  }\n\n  // The client must have offered an ECDSA ciphersuite that we like.\n  CBS cipher_suites;\n  CBS_init(&cipher_suites, ssl_client_hello->cipher_suites, ssl_client_hello->cipher_suites_len);\n\n  while (CBS_len(&cipher_suites) > 0) {\n    uint16_t cipher_id;\n    if (!CBS_get_u16(&cipher_suites, &cipher_id)) {\n      return false;\n    }\n    // All tls_context_ share the same set of enabled ciphers, so we can just look at the base\n    // context.\n    if (tls_contexts_[0].isCipherEnabled(cipher_id, client_version)) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\nbool ServerContextImpl::isClientOcspCapable(const SSL_CLIENT_HELLO* ssl_client_hello) {\n  const uint8_t* status_request_data;\n  size_t status_request_len;\n  if (SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_status_request,\n                                           &status_request_data, &status_request_len)) {\n    return true;\n  }\n\n  return false;\n}\n\nOcspStapleAction ServerContextImpl::ocspStapleAction(const ContextImpl::TlsContext& ctx,\n                                                     bool client_ocsp_capable) {\n  if (!client_ocsp_capable) {\n    return OcspStapleAction::ClientNotCapable;\n  }\n\n  auto& response = ctx.ocsp_response_;\n  if (!Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.check_ocsp_policy\")) {\n    // Expiration check is disabled. Proceed as if the policy is LenientStapling and the response\n    // is not expired.\n    return response ? OcspStapleAction::Staple : OcspStapleAction::NoStaple;\n  }\n\n  auto policy = ocsp_staple_policy_;\n  if (ctx.is_must_staple_) {\n    // The certificate has the must-staple extension, so upgrade the policy to match.\n    policy = Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple;\n  }\n\n  const bool valid_response = response && !response->isExpired();\n\n  switch (policy) {\n  case Ssl::ServerContextConfig::OcspStaplePolicy::LenientStapling:\n    if (!valid_response) {\n      return OcspStapleAction::NoStaple;\n    }\n    return OcspStapleAction::Staple;\n\n  case Ssl::ServerContextConfig::OcspStaplePolicy::StrictStapling:\n    if (valid_response) {\n      return OcspStapleAction::Staple;\n    }\n    if (response) {\n      // Expired response.\n      return OcspStapleAction::Fail;\n    }\n    return OcspStapleAction::NoStaple;\n\n  case Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple:\n    if (!valid_response) {\n      return OcspStapleAction::Fail;\n    }\n    return OcspStapleAction::Staple;\n\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nenum ssl_select_cert_result_t\nServerContextImpl::selectTlsContext(const SSL_CLIENT_HELLO* ssl_client_hello) {\n  const bool client_ecdsa_capable = isClientEcdsaCapable(ssl_client_hello);\n  const bool client_ocsp_capable = isClientOcspCapable(ssl_client_hello);\n\n  // Fallback on first certificate.\n  const TlsContext* selected_ctx = &tls_contexts_[0];\n  auto ocsp_staple_action = ocspStapleAction(*selected_ctx, client_ocsp_capable);\n  for (const auto& ctx : tls_contexts_) {\n    if (client_ecdsa_capable != ctx.is_ecdsa_) {\n      continue;\n    }\n\n    auto action = ocspStapleAction(ctx, client_ocsp_capable);\n    if (action == OcspStapleAction::Fail) {\n      continue;\n    }\n\n    selected_ctx = &ctx;\n    ocsp_staple_action = action;\n    break;\n  }\n\n  if (client_ocsp_capable) {\n    stats_.ocsp_staple_requests_.inc();\n  }\n\n  switch (ocsp_staple_action) {\n  case OcspStapleAction::Staple: {\n    // We avoid setting the OCSP response if the client didn't request it, but doing so is safe.\n    RELEASE_ASSERT(selected_ctx->ocsp_response_,\n                   \"OCSP response must be present under OcspStapleAction::Staple\");\n    auto& resp_bytes = selected_ctx->ocsp_response_->rawBytes();\n    int rc = SSL_set_ocsp_response(ssl_client_hello->ssl, resp_bytes.data(), resp_bytes.size());\n    RELEASE_ASSERT(rc != 0, \"\");\n    stats_.ocsp_staple_responses_.inc();\n  } break;\n  case OcspStapleAction::NoStaple:\n    stats_.ocsp_staple_omitted_.inc();\n    break;\n  case OcspStapleAction::Fail:\n    stats_.ocsp_staple_failed_.inc();\n    return ssl_select_cert_error;\n  case OcspStapleAction::ClientNotCapable:\n    break;\n  }\n\n  RELEASE_ASSERT(SSL_set_SSL_CTX(ssl_client_hello->ssl, selected_ctx->ssl_ctx_.get()) != nullptr,\n                 \"\");\n  return ssl_select_cert_success;\n}\n\nvoid ServerContextImpl::TlsContext::addClientValidationContext(\n    const Envoy::Ssl::CertificateValidationContextConfig& config, bool require_client_cert) {\n  bssl::UniquePtr<BIO> bio(\n      BIO_new_mem_buf(const_cast<char*>(config.caCert().data()), config.caCert().size()));\n  RELEASE_ASSERT(bio != nullptr, \"\");\n  // Based on BoringSSL's SSL_add_file_cert_subjects_to_stack().\n  bssl::UniquePtr<STACK_OF(X509_NAME)> list(sk_X509_NAME_new(\n      [](const X509_NAME** a, const X509_NAME** b) -> int { return X509_NAME_cmp(*a, *b); }));\n  RELEASE_ASSERT(list != nullptr, \"\");\n  for (;;) {\n    bssl::UniquePtr<X509> cert(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr));\n    if (cert == nullptr) {\n      break;\n    }\n    X509_NAME* name = X509_get_subject_name(cert.get());\n    if (name == nullptr) {\n      throw EnvoyException(\n          absl::StrCat(\"Failed to load trusted client CA certificates from \", config.caCertPath()));\n    }\n    // Check for duplicates.\n    if (sk_X509_NAME_find(list.get(), nullptr, name)) {\n      continue;\n    }\n    bssl::UniquePtr<X509_NAME> name_dup(X509_NAME_dup(name));\n    if (name_dup == nullptr || !sk_X509_NAME_push(list.get(), name_dup.release())) {\n      throw EnvoyException(\n          absl::StrCat(\"Failed to load trusted client CA certificates from \", config.caCertPath()));\n    }\n  }\n  // Check for EOF.\n  const uint32_t err = ERR_peek_last_error();\n  if (ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) {\n    ERR_clear_error();\n  } else {\n    throw EnvoyException(\n        absl::StrCat(\"Failed to load trusted client CA certificates from \", config.caCertPath()));\n  }\n  SSL_CTX_set_client_CA_list(ssl_ctx_.get(), list.release());\n\n  // SSL_VERIFY_PEER or stronger mode was already set in ContextImpl::ContextImpl().\n  if (require_client_cert) {\n    SSL_CTX_set_verify(ssl_ctx_.get(), SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr);\n  }\n}\n\nbool ServerContextImpl::TlsContext::isCipherEnabled(uint16_t cipher_id, uint16_t client_version) {\n  const SSL_CIPHER* c = SSL_get_cipher_by_value(cipher_id);\n  if (c == nullptr) {\n    return false;\n  }\n  // Skip TLS 1.2 only ciphersuites unless the client supports it.\n  if (SSL_CIPHER_get_min_version(c) > client_version) {\n    return false;\n  }\n  if (SSL_CIPHER_get_auth_nid(c) != NID_auth_ecdsa) {\n    return false;\n  }\n  for (const SSL_CIPHER* our_c : SSL_CTX_get_ciphers(ssl_ctx_.get())) {\n    if (SSL_CIPHER_get_id(our_c) == SSL_CIPHER_get_id(c)) {\n      return true;\n    }\n  }\n  return false;\n}\n\nbool ContextImpl::verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediates,\n                                  std::string& error_details) {\n  bssl::UniquePtr<X509_STORE_CTX> ctx(X509_STORE_CTX_new());\n  // It doesn't matter which SSL context is used, because they share the same\n  // cert validation config.\n  X509_STORE* store = SSL_CTX_get_cert_store(tls_contexts_[0].ssl_ctx_.get());\n  if (!X509_STORE_CTX_init(ctx.get(), store, &leaf_cert, &intermediates)) {\n    error_details = \"Failed to verify certificate chain: X509_STORE_CTX_init\";\n    return false;\n  }\n\n  int res = doVerifyCertChain(ctx.get(), nullptr, leaf_cert, nullptr);\n  if (res <= 0) {\n    const int n = X509_STORE_CTX_get_error(ctx.get());\n    const int depth = X509_STORE_CTX_get_error_depth(ctx.get());\n    error_details = absl::StrCat(\"X509_verify_cert: certificate verification error at depth \",\n                                 depth, \": \", X509_verify_cert_error_string(n));\n    return false;\n  }\n  return true;\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/context_impl.h",
    "content": "#pragma once\n\n#include <array>\n#include <deque>\n#include <functional>\n#include <string>\n#include <vector>\n\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/ssl/context.h\"\n#include \"envoy/ssl/context_config.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n#include \"envoy/ssl/ssl_socket_extended_info.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n#include \"extensions/transport_sockets/tls/ocsp/ocsp.h\"\n\n#include \"absl/synchronization/mutex.h\"\n#include \"openssl/ssl.h\"\n#include \"openssl/x509v3.h\"\n\nnamespace Envoy {\n#ifndef OPENSSL_IS_BORINGSSL\n#error Envoy requires BoringSSL\n#endif\n\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\n#define ALL_SSL_STATS(COUNTER, GAUGE, HISTOGRAM)                                                   \\\n  COUNTER(connection_error)                                                                        \\\n  COUNTER(handshake)                                                                               \\\n  COUNTER(session_reused)                                                                          \\\n  COUNTER(no_certificate)                                                                          \\\n  COUNTER(fail_verify_no_cert)                                                                     \\\n  COUNTER(fail_verify_error)                                                                       \\\n  COUNTER(fail_verify_san)                                                                         \\\n  COUNTER(fail_verify_cert_hash)                                                                   \\\n  COUNTER(ocsp_staple_failed)                                                                      \\\n  COUNTER(ocsp_staple_omitted)                                                                     \\\n  COUNTER(ocsp_staple_responses)                                                                   \\\n  COUNTER(ocsp_staple_requests)\n\n/**\n * Wrapper struct for SSL stats. @see stats_macros.h\n */\nstruct SslStats {\n  ALL_SSL_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\nclass ContextImpl : public virtual Envoy::Ssl::Context {\npublic:\n  virtual bssl::UniquePtr<SSL> newSsl(const Network::TransportSocketOptions* options);\n\n  /**\n   * Logs successful TLS handshake and updates stats.\n   * @param ssl the connection to log\n   */\n  void logHandshake(SSL* ssl) const;\n\n  /**\n   * Performs subjectAltName verification\n   * @param ssl the certificate to verify\n   * @param subject_alt_names the configured subject_alt_names to match\n   * @return true if the verification succeeds\n   */\n  static bool verifySubjectAltName(X509* cert, const std::vector<std::string>& subject_alt_names);\n\n  /**\n   * Performs subjectAltName matching with the provided matchers.\n   * @param ssl the certificate to verify\n   * @param subject_alt_name_matchers the configured matchers to match\n   * @return true if the verification succeeds\n   */\n  static bool\n  matchSubjectAltName(X509* cert,\n                      const std::vector<Matchers::StringMatcherImpl>& subject_alt_name_matchers);\n\n  /**\n   * Determines whether the given name matches 'pattern' which may optionally begin with a wildcard.\n   * NOTE:  public for testing\n   * @param dns_name the DNS name to match\n   * @param pattern the pattern to match against (*.example.com)\n   * @return true if the san matches pattern\n   */\n  static bool dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern);\n\n  SslStats& stats() { return stats_; }\n\n  /**\n   * The global SSL-library index used for storing a pointer to the SslExtendedSocketInfo\n   * class in the SSL instance, for retrieval in callbacks.\n   */\n  static int sslExtendedSocketInfoIndex();\n\n  // Ssl::Context\n  size_t daysUntilFirstCertExpires() const override;\n  Envoy::Ssl::CertificateDetailsPtr getCaCertInformation() const override;\n  std::vector<Envoy::Ssl::CertificateDetailsPtr> getCertChainInformation() const override;\n  absl::optional<uint64_t> secondsUntilFirstOcspResponseExpires() const override;\n\n  std::vector<Ssl::PrivateKeyMethodProviderSharedPtr> getPrivateKeyMethodProviders();\n\n  bool verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediates, std::string& error_details);\n\nprotected:\n  ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& config,\n              TimeSource& time_source);\n\n  /**\n   * The global SSL-library index used for storing a pointer to the context\n   * in the SSL instance, for retrieval in callbacks.\n   */\n  static int sslContextIndex();\n\n  // A X509_STORE_CTX_verify_cb callback for ignoring cert expiration in X509_verify_cert().\n  static int ignoreCertificateExpirationCallback(int ok, X509_STORE_CTX* store_ctx);\n\n  // A SSL_CTX_set_cert_verify_callback for custom cert validation.\n  static int verifyCallback(X509_STORE_CTX* store_ctx, void* arg);\n\n  // Called by verifyCallback to do the actual cert chain verification.\n  int doVerifyCertChain(X509_STORE_CTX* store_ctx, Ssl::SslExtendedSocketInfo* ssl_extended_info,\n                        X509& leaf_cert,\n                        const Network::TransportSocketOptions* transport_socket_options);\n\n  Envoy::Ssl::ClientValidationStatus\n  verifyCertificate(X509* cert, const std::vector<std::string>& verify_san_list,\n                    const std::vector<Matchers::StringMatcherImpl>& subject_alt_name_matchers);\n\n  /**\n   * Verifies certificate hash for pinning. The hash is a hex-encoded SHA-256 of the DER-encoded\n   * certificate.\n   *\n   * @param ssl the certificate to verify\n   * @param expected_hashes the configured list of certificate hashes to match\n   * @return true if the verification succeeds\n   */\n  static bool verifyCertificateHashList(X509* cert,\n                                        const std::vector<std::vector<uint8_t>>& expected_hashes);\n\n  /**\n   * Verifies certificate hash for pinning. The hash is a base64-encoded SHA-256 of the DER-encoded\n   * Subject Public Key Information (SPKI) of the certificate.\n   *\n   * @param ssl the certificate to verify\n   * @param expected_hashes the configured list of certificate hashes to match\n   * @return true if the verification succeeds\n   */\n  static bool verifyCertificateSpkiList(X509* cert,\n                                        const std::vector<std::vector<uint8_t>>& expected_hashes);\n\n  bool parseAndSetAlpn(const std::vector<std::string>& alpn, SSL& ssl);\n  std::vector<uint8_t> parseAlpnProtocols(const std::string& alpn_protocols);\n  static SslStats generateStats(Stats::Scope& scope);\n\n  std::string getCaFileName() const { return ca_file_path_; };\n  void incCounter(const Stats::StatName name, absl::string_view value,\n                  const Stats::StatName fallback) const;\n\n  Envoy::Ssl::CertificateDetailsPtr\n  certificateDetails(X509* cert, const std::string& path,\n                     const Ocsp::OcspResponseWrapper* ocsp_response) const;\n\n  struct TlsContext {\n    // Each certificate specified for the context has its own SSL_CTX. SSL_CTXs\n    // are identical with the exception of certificate material, and can be\n    // safely substituted via SSL_set_SSL_CTX() during the\n    // SSL_CTX_set_select_certificate_cb() callback following ClientHello.\n    bssl::UniquePtr<SSL_CTX> ssl_ctx_;\n    bssl::UniquePtr<X509> cert_chain_;\n    std::string cert_chain_file_path_;\n    Ocsp::OcspResponseWrapperPtr ocsp_response_;\n    bool is_ecdsa_{};\n    bool is_must_staple_{};\n    Ssl::PrivateKeyMethodProviderSharedPtr private_key_method_provider_{};\n\n    std::string getCertChainFileName() const { return cert_chain_file_path_; };\n    void addClientValidationContext(const Envoy::Ssl::CertificateValidationContextConfig& config,\n                                    bool require_client_cert);\n    bool isCipherEnabled(uint16_t cipher_id, uint16_t client_version);\n    Envoy::Ssl::PrivateKeyMethodProviderSharedPtr getPrivateKeyMethodProvider() {\n      return private_key_method_provider_;\n    }\n  };\n\n  // This is always non-empty, with the first context used for all new SSL\n  // objects. For server contexts, once we have ClientHello, we\n  // potentially switch to a different CertificateContext based on certificate\n  // selection.\n  std::vector<TlsContext> tls_contexts_;\n  bool verify_trusted_ca_{false};\n  std::vector<std::string> verify_subject_alt_name_list_;\n  std::vector<Matchers::StringMatcherImpl> subject_alt_name_matchers_;\n  std::vector<std::vector<uint8_t>> verify_certificate_hash_list_;\n  std::vector<std::vector<uint8_t>> verify_certificate_spki_list_;\n  bool allow_untrusted_certificate_{false};\n  Stats::Scope& scope_;\n  SslStats stats_;\n  std::vector<uint8_t> parsed_alpn_protocols_;\n  bssl::UniquePtr<X509> ca_cert_;\n  bssl::UniquePtr<X509> cert_chain_;\n  std::string ca_file_path_;\n  std::string cert_chain_file_path_;\n  TimeSource& time_source_;\n  const unsigned tls_max_version_;\n  mutable Stats::StatNameSetPtr stat_name_set_;\n  const Stats::StatName unknown_ssl_cipher_;\n  const Stats::StatName unknown_ssl_curve_;\n  const Stats::StatName unknown_ssl_algorithm_;\n  const Stats::StatName unknown_ssl_version_;\n  const Stats::StatName ssl_ciphers_;\n  const Stats::StatName ssl_versions_;\n  const Stats::StatName ssl_curves_;\n  const Stats::StatName ssl_sigalgs_;\n  const Ssl::HandshakerCapabilities capabilities_;\n};\n\nusing ContextImplSharedPtr = std::shared_ptr<ContextImpl>;\n\nclass ClientContextImpl : public ContextImpl, public Envoy::Ssl::ClientContext {\npublic:\n  ClientContextImpl(Stats::Scope& scope, const Envoy::Ssl::ClientContextConfig& config,\n                    TimeSource& time_source);\n\n  bssl::UniquePtr<SSL> newSsl(const Network::TransportSocketOptions* options) override;\n\nprivate:\n  int newSessionKey(SSL_SESSION* session);\n  uint16_t parseSigningAlgorithmsForTest(const std::string& sigalgs);\n\n  const std::string server_name_indication_;\n  const bool allow_renegotiation_;\n  const size_t max_session_keys_;\n  absl::Mutex session_keys_mu_;\n  std::deque<bssl::UniquePtr<SSL_SESSION>> session_keys_ ABSL_GUARDED_BY(session_keys_mu_);\n  bool session_keys_single_use_{false};\n};\n\nenum class OcspStapleAction { Staple, NoStaple, Fail, ClientNotCapable };\n\nclass ServerContextImpl : public ContextImpl, public Envoy::Ssl::ServerContext {\npublic:\n  ServerContextImpl(Stats::Scope& scope, const Envoy::Ssl::ServerContextConfig& config,\n                    const std::vector<std::string>& server_names, TimeSource& time_source);\n\nprivate:\n  using SessionContextID = std::array<uint8_t, SSL_MAX_SSL_SESSION_ID_LENGTH>;\n\n  int alpnSelectCallback(const unsigned char** out, unsigned char* outlen, const unsigned char* in,\n                         unsigned int inlen);\n  int sessionTicketProcess(SSL* ssl, uint8_t* key_name, uint8_t* iv, EVP_CIPHER_CTX* ctx,\n                           HMAC_CTX* hmac_ctx, int encrypt);\n  bool isClientEcdsaCapable(const SSL_CLIENT_HELLO* ssl_client_hello);\n  bool isClientOcspCapable(const SSL_CLIENT_HELLO* ssl_client_hello);\n  // Select the TLS certificate context in SSL_CTX_set_select_certificate_cb() callback with\n  // ClientHello details.\n  enum ssl_select_cert_result_t selectTlsContext(const SSL_CLIENT_HELLO* ssl_client_hello);\n  OcspStapleAction ocspStapleAction(const ServerContextImpl::TlsContext& ctx,\n                                    bool client_ocsp_capable);\n\n  SessionContextID generateHashForSessionContextId(const std::vector<std::string>& server_names);\n\n  const std::vector<Envoy::Ssl::ServerContextConfig::SessionTicketKey> session_ticket_keys_;\n  const Ssl::ServerContextConfig::OcspStaplePolicy ocsp_staple_policy_;\n};\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/context_manager_impl.cc",
    "content": "#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include <algorithm>\n#include <functional>\n#include <limits>\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/transport_sockets/tls/context_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nContextManagerImpl::~ContextManagerImpl() {\n  removeEmptyContexts();\n  KNOWN_ISSUE_ASSERT(contexts_.empty(), \"https://github.com/envoyproxy/envoy/issues/10030\");\n}\n\nvoid ContextManagerImpl::removeEmptyContexts() {\n  contexts_.remove_if([](const std::weak_ptr<Envoy::Ssl::Context>& n) { return n.expired(); });\n}\n\nEnvoy::Ssl::ClientContextSharedPtr\nContextManagerImpl::createSslClientContext(Stats::Scope& scope,\n                                           const Envoy::Ssl::ClientContextConfig& config) {\n  if (!config.isReady()) {\n    return nullptr;\n  }\n\n  Envoy::Ssl::ClientContextSharedPtr context =\n      std::make_shared<ClientContextImpl>(scope, config, time_source_);\n  removeEmptyContexts();\n  contexts_.emplace_back(context);\n  return context;\n}\n\nEnvoy::Ssl::ServerContextSharedPtr\nContextManagerImpl::createSslServerContext(Stats::Scope& scope,\n                                           const Envoy::Ssl::ServerContextConfig& config,\n                                           const std::vector<std::string>& server_names) {\n  if (!config.isReady()) {\n    return nullptr;\n  }\n\n  Envoy::Ssl::ServerContextSharedPtr context =\n      std::make_shared<ServerContextImpl>(scope, config, server_names, time_source_);\n  removeEmptyContexts();\n  contexts_.emplace_back(context);\n  return context;\n}\n\nsize_t ContextManagerImpl::daysUntilFirstCertExpires() const {\n  size_t ret = std::numeric_limits<int>::max();\n  for (const auto& ctx_weak_ptr : contexts_) {\n    Envoy::Ssl::ContextSharedPtr context = ctx_weak_ptr.lock();\n    if (context) {\n      ret = std::min<size_t>(context->daysUntilFirstCertExpires(), ret);\n    }\n  }\n  return ret;\n}\n\nabsl::optional<uint64_t> ContextManagerImpl::secondsUntilFirstOcspResponseExpires() const {\n  absl::optional<uint64_t> ret;\n  for (const auto& ctx_weak_ptr : contexts_) {\n    Envoy::Ssl::ContextSharedPtr context = ctx_weak_ptr.lock();\n    if (context) {\n      auto next_expiration = context->secondsUntilFirstOcspResponseExpires();\n      if (next_expiration) {\n        ret = std::min<uint64_t>(next_expiration.value(),\n                                 ret.value_or(std::numeric_limits<uint64_t>::max()));\n      }\n    }\n  }\n  return ret;\n}\n\nvoid ContextManagerImpl::iterateContexts(std::function<void(const Envoy::Ssl::Context&)> callback) {\n  for (const auto& ctx_weak_ptr : contexts_) {\n    Envoy::Ssl::ContextSharedPtr context = ctx_weak_ptr.lock();\n    if (context) {\n      callback(*context);\n    }\n  }\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/context_manager_impl.h",
    "content": "#pragma once\n\n#include <functional>\n#include <list>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"extensions/transport_sockets/tls/private_key/private_key_manager_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\n/**\n * The SSL context manager has the following threading model:\n * Contexts can be allocated via any thread (through in practice they are only allocated on the main\n * thread). They can be released from any thread (and in practice are since cluster information can\n * be released from any thread). Context allocation/free is a very uncommon thing so we just do a\n * global lock to protect it all.\n */\nclass ContextManagerImpl final : public Envoy::Ssl::ContextManager {\npublic:\n  ContextManagerImpl(TimeSource& time_source) : time_source_(time_source) {}\n  ~ContextManagerImpl() override;\n\n  // Ssl::ContextManager\n  Ssl::ClientContextSharedPtr\n  createSslClientContext(Stats::Scope& scope,\n                         const Envoy::Ssl::ClientContextConfig& config) override;\n  Ssl::ServerContextSharedPtr\n  createSslServerContext(Stats::Scope& scope, const Envoy::Ssl::ServerContextConfig& config,\n                         const std::vector<std::string>& server_names) override;\n  size_t daysUntilFirstCertExpires() const override;\n  absl::optional<uint64_t> secondsUntilFirstOcspResponseExpires() const override;\n  void iterateContexts(std::function<void(const Envoy::Ssl::Context&)> callback) override;\n  Ssl::PrivateKeyMethodManager& privateKeyMethodManager() override {\n    return private_key_method_manager_;\n  };\n\nprivate:\n  void removeEmptyContexts();\n  TimeSource& time_source_;\n  std::list<std::weak_ptr<Envoy::Ssl::Context>> contexts_;\n  PrivateKeyMethodManagerImpl private_key_method_manager_{};\n};\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/io_handle_bio.cc",
    "content": "#include \"extensions/transport_sockets/tls/io_handle_bio.h\"\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/io_handle.h\"\n\n#include \"openssl/bio.h\"\n#include \"openssl/err.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nnamespace {\n\n// NOLINTNEXTLINE(readability-identifier-naming)\ninline Envoy::Network::IoHandle* bio_io_handle(BIO* bio) {\n  return reinterpret_cast<Envoy::Network::IoHandle*>(bio->ptr);\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nint io_handle_new(BIO* bio) {\n  bio->init = 0;\n  bio->num = -1;\n  bio->ptr = nullptr;\n  bio->flags = 0;\n  return 1;\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nint io_handle_free(BIO* bio) {\n  if (bio == nullptr) {\n    return 0;\n  }\n\n  if (bio->shutdown) {\n    if (bio->init) {\n      bio_io_handle(bio)->close();\n    }\n    bio->init = 0;\n    bio->flags = 0;\n  }\n  return 1;\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nint io_handle_read(BIO* b, char* out, int outl) {\n  if (out == nullptr) {\n    return 0;\n  }\n\n  Envoy::Buffer::RawSlice slice;\n  slice.mem_ = out;\n  slice.len_ = outl;\n  auto result = bio_io_handle(b)->readv(outl, &slice, 1);\n  BIO_clear_retry_flags(b);\n  if (!result.ok()) {\n    auto err = result.err_->getErrorCode();\n    if (err == Api::IoError::IoErrorCode::Again || err == Api::IoError::IoErrorCode::Interrupt) {\n      BIO_set_retry_read(b);\n    }\n    return -1;\n  }\n  return result.rc_;\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nint io_handle_write(BIO* b, const char* in, int inl) {\n  Envoy::Buffer::RawSlice slice;\n  slice.mem_ = const_cast<char*>(in);\n  slice.len_ = inl;\n  auto result = bio_io_handle(b)->writev(&slice, 1);\n  BIO_clear_retry_flags(b);\n  if (!result.ok()) {\n    auto err = result.err_->getErrorCode();\n    if (err == Api::IoError::IoErrorCode::Again || err == Api::IoError::IoErrorCode::Interrupt) {\n      BIO_set_retry_write(b);\n    }\n    return -1;\n  }\n  return result.rc_;\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nlong io_handle_ctrl(BIO* b, int cmd, long num, void*) {\n  long ret = 1;\n\n  switch (cmd) {\n  case BIO_C_SET_FD:\n    RELEASE_ASSERT(false, \"should not be called\");\n    break;\n  case BIO_C_GET_FD:\n    RELEASE_ASSERT(false, \"should not be called\");\n    break;\n  case BIO_CTRL_GET_CLOSE:\n    ret = b->shutdown;\n    break;\n  case BIO_CTRL_SET_CLOSE:\n    b->shutdown = int(num);\n    break;\n  case BIO_CTRL_FLUSH:\n    ret = 1;\n    break;\n  default:\n    ret = 0;\n    break;\n  }\n  return ret;\n}\n\nconst BIO_METHOD methods_io_handlep = {\n    BIO_TYPE_SOCKET,    \"io_handle\",\n    io_handle_write,    io_handle_read,\n    nullptr /* puts */, nullptr /* gets, */,\n    io_handle_ctrl,     io_handle_new,\n    io_handle_free,     nullptr /* callback_ctrl */,\n};\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nconst BIO_METHOD* BIO_s_io_handle(void) { return &methods_io_handlep; }\n\n} // namespace\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nBIO* BIO_new_io_handle(Envoy::Network::IoHandle* io_handle) {\n  BIO* b;\n\n  b = BIO_new(BIO_s_io_handle());\n  RELEASE_ASSERT(b != nullptr, \"\");\n\n  // Initialize the BIO\n  b->num = -1;\n  b->ptr = io_handle;\n  b->shutdown = 0;\n  b->init = 1;\n\n  return b;\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/transport_sockets/tls/io_handle_bio.h",
    "content": "#pragma once\n\n#include \"envoy/network/io_handle.h\"\n\n#include \"openssl/bio.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\n/**\n * Creates a custom BIO that can read from/write to an IoHandle. It's equivalent to a socket BIO\n * but instead of relying on access to an fd, it relies on IoHandle APIs for all interactions.\n */\n// NOLINTNEXTLINE(readability-identifier-naming)\nBIO* BIO_new_io_handle(Envoy::Network::IoHandle* io_handle);\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ocsp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"ocsp_lib\",\n    srcs = [\"ocsp.cc\"],\n    hdrs = [\"ocsp.h\"],\n    repository = \"\",\n    deps = [\n        \":asn1_utility_lib\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/ssl:context_config_interface\",\n        \"//source/extensions/transport_sockets/tls:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"asn1_utility_lib\",\n    srcs = [\"asn1_utility.cc\"],\n    hdrs = [\"asn1_utility.h\"],\n    repository = \"\",\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/ssl:context_config_interface\",\n        \"//source/common/common:c_smart_ptr_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ocsp/asn1_utility.cc",
    "content": "#include \"extensions/transport_sockets/tls/ocsp/asn1_utility.h\"\n\n#include \"common/common/c_smart_ptr.h\"\n\n#include \"absl/strings/ascii.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace Ocsp {\n\nnamespace {\n// A type adapter since OPENSSL_free accepts void*.\nvoid freeOpensslString(char* str) { OPENSSL_free(str); }\n\n// `ASN1_INTEGER` is a type alias for `ASN1_STRING`.\n// This static_cast is intentional to avoid the\n// c-style cast performed in `M_ASN1_INTEGER_free`.\nvoid freeAsn1Integer(ASN1_INTEGER* integer) {\n  ASN1_STRING_free(static_cast<ASN1_STRING*>(integer));\n}\n} // namespace\n\nabsl::string_view Asn1Utility::cbsToString(CBS& cbs) {\n  auto str_head = reinterpret_cast<const char*>(CBS_data(&cbs));\n  return {str_head, CBS_len(&cbs)};\n}\n\nParsingResult<absl::optional<CBS>> Asn1Utility::getOptional(CBS& cbs, unsigned tag) {\n  int is_present;\n  CBS data;\n  if (!CBS_get_optional_asn1(&cbs, &data, &is_present, tag)) {\n    return \"Failed to parse ASN.1 element tag\";\n  }\n\n  return is_present ? absl::optional<CBS>(data) : absl::nullopt;\n}\n\nParsingResult<std::string> Asn1Utility::parseOid(CBS& cbs) {\n  CBS oid;\n  if (!CBS_get_asn1(&cbs, &oid, CBS_ASN1_OBJECT)) {\n    return absl::string_view(\"Input is not a well-formed ASN.1 OBJECT\");\n  }\n  CSmartPtr<char, freeOpensslString> oid_text(CBS_asn1_oid_to_text(&oid));\n  if (oid_text == nullptr) {\n    return absl::string_view(\"Failed to parse oid\");\n  }\n\n  std::string oid_text_str(oid_text.get());\n  return oid_text_str;\n}\n\nParsingResult<Envoy::SystemTime> Asn1Utility::parseGeneralizedTime(CBS& cbs) {\n  CBS elem;\n  if (!CBS_get_asn1(&cbs, &elem, CBS_ASN1_GENERALIZEDTIME)) {\n    return \"Input is not a well-formed ASN.1 GENERALIZEDTIME\";\n  }\n\n  auto time_str = cbsToString(elem);\n  // OCSP follows the RFC 5280 enforcement that `GENERALIZEDTIME`\n  // fields MUST be in UTC, so must be suffixed with a Z character.\n  // Local time or time differential, though a part of the `ASN.1`\n  // `GENERALIZEDTIME` spec, are not supported.\n  // Reference: https://tools.ietf.org/html/rfc5280#section-4.1.2.5.2\n  if (time_str.length() > 0 && absl::ascii_toupper(time_str.at(time_str.length() - 1)) != 'Z') {\n    return \"GENERALIZEDTIME must be in UTC\";\n  }\n\n  absl::Time time;\n  auto utc_time_str = time_str.substr(0, time_str.length() - 1);\n  std::string parse_error;\n  if (!absl::ParseTime(GENERALIZED_TIME_FORMAT, utc_time_str, &time, &parse_error)) {\n    return \"Error parsing string of GENERALIZEDTIME format\";\n  }\n  return absl::ToChronoTime(time);\n}\n\n// Performs the following conversions to go from bytestring to hex integer\n// `CBS` -> `ASN1_INTEGER` -> `BIGNUM` -> String.\nParsingResult<std::string> Asn1Utility::parseInteger(CBS& cbs) {\n  CBS num;\n  if (!CBS_get_asn1(&cbs, &num, CBS_ASN1_INTEGER)) {\n    return absl::string_view(\"Input is not a well-formed ASN.1 INTEGER\");\n  }\n\n  auto head = CBS_data(&num);\n  CSmartPtr<ASN1_INTEGER, freeAsn1Integer> asn1_integer(\n      c2i_ASN1_INTEGER(nullptr, &head, CBS_len(&num)));\n  if (asn1_integer != nullptr) {\n    BIGNUM num_bn;\n    BN_init(&num_bn);\n    ASN1_INTEGER_to_BN(asn1_integer.get(), &num_bn);\n\n    CSmartPtr<char, freeOpensslString> char_hex_number(BN_bn2hex(&num_bn));\n    BN_free(&num_bn);\n    if (char_hex_number != nullptr) {\n      std::string hex_number(char_hex_number.get());\n      return hex_number;\n    }\n  }\n\n  return absl::string_view(\"Failed to parse ASN.1 INTEGER\");\n}\n\nParsingResult<std::vector<uint8_t>> Asn1Utility::parseOctetString(CBS& cbs) {\n  CBS value;\n  if (!CBS_get_asn1(&cbs, &value, CBS_ASN1_OCTETSTRING)) {\n    return \"Input is not a well-formed ASN.1 OCTETSTRING\";\n  }\n\n  auto data = reinterpret_cast<const uint8_t*>(CBS_data(&value));\n  return std::vector<uint8_t>{data, data + CBS_len(&value)};\n}\n\nParsingResult<absl::monostate> Asn1Utility::skipOptional(CBS& cbs, unsigned tag) {\n  if (!CBS_get_optional_asn1(&cbs, nullptr, nullptr, tag)) {\n    return \"Failed to parse ASN.1 element tag\";\n  }\n  return absl::monostate();\n}\n\nParsingResult<absl::monostate> Asn1Utility::skip(CBS& cbs, unsigned tag) {\n  if (!CBS_get_asn1(&cbs, nullptr, tag)) {\n    return \"Failed to parse ASN.1 element\";\n  }\n\n  return absl::monostate();\n}\n\n} // namespace Ocsp\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ocsp/asn1_utility.h",
    "content": "#pragma once\n\n#include <iomanip>\n#include <sstream>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/time.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/types/optional.h\"\n#include \"absl/types/variant.h\"\n#include \"openssl/bn.h\"\n#include \"openssl/bytestring.h\"\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace Ocsp {\n\nconstexpr absl::string_view GENERALIZED_TIME_FORMAT = \"%E4Y%m%d%H%M%S\";\n\n/**\n * The result of parsing an `ASN.1` structure or an `absl::string_view` error\n * description.\n */\ntemplate <typename T> using ParsingResult = absl::variant<T, absl::string_view>;\n\n/**\n * Construct a `T` from the data contained in the CBS&. Functions\n * of this type must advance the input CBS& over the element.\n */\ntemplate <typename T> using Asn1ParsingFunc = std::function<ParsingResult<T>(CBS&)>;\n\n/**\n * Utility functions for parsing DER-encoded `ASN.1` objects.\n * This relies heavily on the 'openssl/bytestring' API which\n * is BoringSSL's recommended interface for parsing DER-encoded\n * `ASN.1` data when there is not an existing wrapper.\n * This is not a complete library for `ASN.1` parsing and primarily\n * serves as abstractions for the OCSP module, but can be\n * extended and moved into a general utility to support parsing of\n * additional `ASN.1` objects.\n *\n * Each function adheres to the invariant that given a reference\n * to a crypto `bytestring` (CBS&), it will parse the specified\n * `ASN.1` element and advance `cbs` over it.\n *\n * An exception is thrown if the `bytestring` is malformed or does\n * not match the specified `ASN.1` object. The position\n * of `cbs` is not reliable after an exception is thrown.\n */\nclass Asn1Utility {\npublic:\n  ~Asn1Utility() = default;\n\n  /**\n   * Extracts the full contents of `cbs` as a string.\n   *\n   * @param `cbs` a CBS& that refers to the current document position\n   * @returns absl::string_view containing the contents of `cbs`\n   */\n  static absl::string_view cbsToString(CBS& cbs);\n\n  /**\n   * Parses all elements of an `ASN.1` SEQUENCE OF. `parse_element` must\n   * advance its input CBS& over the entire element.\n   *\n   * @param cbs a CBS& that refers to an `ASN.1` SEQUENCE OF object\n   * @param parse_element an `Asn1ParsingFunc<T>` used to parse each element\n   * @returns ParsingResult<std::vector<T>> containing the parsed elements of the sequence\n   * or an error string if `cbs` does not point to a well-formed\n   * SEQUENCE OF object.\n   */\n  template <typename T>\n  static ParsingResult<std::vector<T>> parseSequenceOf(CBS& cbs, Asn1ParsingFunc<T> parse_element);\n\n  /**\n   * Checks if an explicitly tagged optional element of `tag` is present and\n   * if so parses its value with `parse_data`. If the element is not present,\n   * `cbs` is not advanced.\n   *\n   * @param cbs a CBS& that refers to the current document position\n   * @param parse_data an `Asn1ParsingFunc<T>` used to parse the data if present\n   * @return ParsingResult<absl::optional<T>> with a `T` if `cbs` is of the specified tag,\n   * nullopt if the element has a different tag, or an error string if parsing fails.\n   */\n  template <typename T>\n  static ParsingResult<absl::optional<T>> parseOptional(CBS& cbs, Asn1ParsingFunc<T> parse_data,\n                                                        unsigned tag);\n\n  /**\n   * Returns whether or not an element explicitly tagged with `tag` is present\n   * at `cbs`. If so, `cbs` is advanced over the optional and assigns\n   * `data` to the inner element, if `data` is not nullptr.\n   * If `cbs` does not contain `tag`, `cbs` remains at the same position.\n   *\n   * @param cbs a CBS& that refers to the current document position\n   * @param an unsigned explicit tag indicating an optional value\n   *\n   * @returns ParsingResult<bool> whether `cbs` points to an element tagged with `tag` or\n   * an error string if parsing fails.\n   */\n  static ParsingResult<absl::optional<CBS>> getOptional(CBS& cbs, unsigned tag);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` OBJECT IDENTIFIER element\n   * @returns ParsingResult<std::string> the `OID` encoded in `cbs` or an error\n   * string if `cbs` does not point to a well-formed OBJECT IDENTIFIER\n   */\n  static ParsingResult<std::string> parseOid(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` `GENERALIZEDTIME` element\n   * @returns ParsingResult<Envoy::SystemTime> the UTC timestamp encoded in `cbs`\n   * or an error string if `cbs` does not point to a well-formed\n   * `GENERALIZEDTIME`\n   */\n  static ParsingResult<Envoy::SystemTime> parseGeneralizedTime(CBS& cbs);\n\n  /**\n   * Parses an `ASN.1` INTEGER type into its hex string representation.\n   * `ASN.1` INTEGER types are arbitrary precision.\n   * If you're SURE the integer fits into a fixed-size int,\n   * use `CBS_get_asn1_*` functions for the given integer type instead.\n   *\n   * @param cbs a CBS& that refers to an `ASN.1` INTEGER element\n   * @returns ParsingResult<std::string> a hex representation of the integer\n   * or an error string if `cbs` does not point to a well-formed INTEGER\n   */\n  static ParsingResult<std::string> parseInteger(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` `OCTETSTRING` element\n   * @returns ParsingResult<std::vector<uint8_t>> the octets in `cbs` or\n   * an error string if `cbs` does not point to a well-formed `OCTETSTRING`\n   */\n  static ParsingResult<std::vector<uint8_t>> parseOctetString(CBS& cbs);\n\n  /**\n   * Advance `cbs` over an `ASN.1` value of the class `tag` if that\n   * value is present. Otherwise, `cbs` stays in the same position.\n   *\n   * @param cbs a CBS& that refers to the current document position\n   * @param tag the tag of the value to skip\n   * @returns `ParsingResult<absl::monostate>` a unit type denoting success\n   * or an error string if parsing fails.\n   */\n  static ParsingResult<absl::monostate> skipOptional(CBS& cbs, unsigned tag);\n\n  /**\n   * Advance `cbs` over an `ASN.1` value of the class `tag`.\n   *\n   * @param cbs a CBS& that refers to the current document position\n   * @param tag the tag of the value to skip\n   * @returns `ParsingResult<absl::monostate>` a unit type denoting success\n   * or an error string if parsing fails.\n   */\n  static ParsingResult<absl::monostate> skip(CBS& cbs, unsigned tag);\n};\n\ntemplate <typename T>\nParsingResult<std::vector<T>> Asn1Utility::parseSequenceOf(CBS& cbs,\n                                                           Asn1ParsingFunc<T> parse_element) {\n  CBS seq_elem;\n  std::vector<T> vec;\n\n  // Initialize seq_elem to first element in sequence.\n  if (!CBS_get_asn1(&cbs, &seq_elem, CBS_ASN1_SEQUENCE)) {\n    return \"Expected sequence of ASN.1 elements.\";\n  }\n\n  while (CBS_data(&seq_elem) < CBS_data(&cbs)) {\n    // parse_element MUST advance seq_elem\n    auto elem_res = parse_element(seq_elem);\n    if (absl::holds_alternative<T>(elem_res)) {\n      vec.push_back(absl::get<0>(elem_res));\n    } else {\n      return absl::get<1>(elem_res);\n    }\n  }\n\n  RELEASE_ASSERT(CBS_data(&cbs) == CBS_data(&seq_elem),\n                 \"Sequence tag length must match actual length or element parsing would fail\");\n\n  return vec;\n}\n\ntemplate <typename T>\nParsingResult<absl::optional<T>> Asn1Utility::parseOptional(CBS& cbs, Asn1ParsingFunc<T> parse_data,\n                                                            unsigned tag) {\n  auto maybe_data_res = getOptional(cbs, tag);\n\n  if (absl::holds_alternative<absl::string_view>(maybe_data_res)) {\n    return absl::get<absl::string_view>(maybe_data_res);\n  }\n\n  auto maybe_data = absl::get<absl::optional<CBS>>(maybe_data_res);\n  if (maybe_data) {\n    auto res = parse_data(maybe_data.value());\n    if (absl::holds_alternative<T>(res)) {\n      return absl::get<0>(res);\n    }\n    return absl::get<1>(res);\n  }\n\n  return absl::nullopt;\n}\n\n} // namespace Ocsp\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ocsp/ocsp.cc",
    "content": "#include \"extensions/transport_sockets/tls/ocsp/ocsp.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"extensions/transport_sockets/tls/ocsp/asn1_utility.h\"\n#include \"extensions/transport_sockets/tls/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace Ocsp {\n\nnamespace CertUtility = Envoy::Extensions::TransportSockets::Tls::Utility;\n\nnamespace {\n\ntemplate <typename T> T unwrap(ParsingResult<T> res) {\n  if (absl::holds_alternative<T>(res)) {\n    return absl::get<0>(res);\n  }\n\n  throw EnvoyException(std::string(absl::get<1>(res)));\n}\n\nunsigned parseTag(CBS& cbs) {\n  unsigned tag;\n  if (!CBS_get_any_asn1_element(&cbs, nullptr, &tag, nullptr)) {\n    throw EnvoyException(\"Failed to parse ASN.1 element tag\");\n  }\n  return tag;\n}\n\nstd::unique_ptr<OcspResponse> readDerEncodedOcspResponse(const std::vector<uint8_t>& der) {\n  CBS cbs;\n  CBS_init(&cbs, der.data(), der.size());\n\n  auto resp = Asn1OcspUtility::parseOcspResponse(cbs);\n  if (CBS_len(&cbs) != 0) {\n    throw EnvoyException(\"Data contained more than a single OCSP response\");\n  }\n\n  return resp;\n}\n\nvoid skipResponderId(CBS& cbs) {\n  // ResponderID ::= CHOICE {\n  //    byName               [1] Name,\n  //    byKey                [2] KeyHash\n  // }\n  //\n  // KeyHash ::= OCTET STRING -- SHA-1 hash of responder's public key\n  //    (excluding the tag and length fields)\n\n  if (unwrap(Asn1Utility::getOptional(cbs, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1)) ||\n      unwrap(Asn1Utility::getOptional(cbs, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 2))) {\n    return;\n  }\n\n  throw EnvoyException(absl::StrCat(\"Unknown choice for Responder ID: \", parseTag(cbs)));\n}\n\nvoid skipCertStatus(CBS& cbs) {\n  // CertStatus ::= CHOICE {\n  //  good                [0] IMPLICIT NULL,\n  //  revoked             [1] IMPLICIT RevokedInfo,\n  //  unknown             [2] IMPLICIT UnknownInfo\n  // }\n  if (!(unwrap(Asn1Utility::getOptional(cbs, CBS_ASN1_CONTEXT_SPECIFIC | 0)) ||\n        unwrap(\n            Asn1Utility::getOptional(cbs, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1)) ||\n        unwrap(Asn1Utility::getOptional(cbs, CBS_ASN1_CONTEXT_SPECIFIC | 2)))) {\n    throw EnvoyException(absl::StrCat(\"Unknown OcspCertStatus tag: \", parseTag(cbs)));\n  }\n}\n\n} // namespace\n\nOcspResponse::OcspResponse(OcspResponseStatus status, ResponsePtr response)\n    : status_(status), response_(std::move(response)) {}\n\nBasicOcspResponse::BasicOcspResponse(ResponseData data) : data_(data) {}\n\nResponseData::ResponseData(std::vector<SingleResponse> single_responses)\n    : single_responses_(std::move(single_responses)) {}\n\nSingleResponse::SingleResponse(CertId cert_id, Envoy::SystemTime this_update,\n                               absl::optional<Envoy::SystemTime> next_update)\n    : cert_id_(cert_id), this_update_(this_update), next_update_(next_update) {}\n\nCertId::CertId(std::string serial_number) : serial_number_(serial_number) {}\n\nOcspResponseWrapper::OcspResponseWrapper(std::vector<uint8_t> der_response, TimeSource& time_source)\n    : raw_bytes_(std::move(der_response)), response_(readDerEncodedOcspResponse(raw_bytes_)),\n      time_source_(time_source) {\n\n  if (response_->status_ != OcspResponseStatus::Successful) {\n    throw EnvoyException(\"OCSP response was unsuccessful\");\n  }\n\n  if (response_->response_ == nullptr) {\n    throw EnvoyException(\"OCSP response has no body\");\n  }\n\n  // We only permit a 1:1 of certificate to response.\n  if (response_->response_->getNumCerts() != 1) {\n    throw EnvoyException(\"OCSP Response must be for one certificate only\");\n  }\n\n  auto& this_update = response_->response_->getThisUpdate();\n  if (time_source_.systemTime() < this_update) {\n    std::string time_format(GENERALIZED_TIME_FORMAT);\n    DateFormatter formatter(time_format);\n    ENVOY_LOG_MISC(warn, \"OCSP Response thisUpdate field is set in the future: {}\",\n                   formatter.fromTime(this_update));\n  }\n}\n\n// We use just the serial number to uniquely identify a certificate.\n// Though different issuers could produce certificates with the same serial\n// number, this is check is to prevent operator error and a collision in this\n// case is unlikely.\nbool OcspResponseWrapper::matchesCertificate(X509& cert) const {\n  std::string cert_serial_number = CertUtility::getSerialNumberFromCertificate(cert);\n  std::string resp_cert_serial_number = response_->response_->getCertSerialNumber();\n  return resp_cert_serial_number == cert_serial_number;\n}\n\nbool OcspResponseWrapper::isExpired() {\n  auto& next_update = response_->response_->getNextUpdate();\n  return next_update == absl::nullopt || next_update < time_source_.systemTime();\n}\n\nuint64_t OcspResponseWrapper::secondsUntilExpiration() const {\n  auto& next_update = response_->response_->getNextUpdate();\n  auto now = time_source_.systemTime();\n  if (!next_update || next_update.value() <= now) {\n    return 0;\n  }\n  return std::chrono::duration_cast<std::chrono::seconds>(next_update.value() - now).count();\n}\n\nEnvoy::SystemTime OcspResponseWrapper::getThisUpdate() const {\n  return response_->response_->getThisUpdate();\n}\n\nEnvoy::SystemTime OcspResponseWrapper::getNextUpdate() const {\n  auto& next_update = response_->response_->getNextUpdate();\n  if (next_update) {\n    return *next_update;\n  }\n\n  return time_source_.systemTime();\n}\n\nstd::unique_ptr<OcspResponse> Asn1OcspUtility::parseOcspResponse(CBS& cbs) {\n  // OCSPResponse ::= SEQUENCE {\n  //    responseStatus         OCSPResponseStatus,\n  //    responseBytes          [0] EXPLICIT ResponseBytes OPTIONAL\n  // }\n\n  CBS elem;\n  if (!CBS_get_asn1(&cbs, &elem, CBS_ASN1_SEQUENCE)) {\n    throw EnvoyException(\"OCSP Response is not a well-formed ASN.1 SEQUENCE\");\n  }\n\n  OcspResponseStatus status = Asn1OcspUtility::parseResponseStatus(elem);\n  auto maybe_bytes =\n      unwrap(Asn1Utility::getOptional(elem, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0));\n  ResponsePtr resp = nullptr;\n  if (maybe_bytes) {\n    resp = Asn1OcspUtility::parseResponseBytes(maybe_bytes.value());\n  }\n\n  return std::make_unique<OcspResponse>(status, std::move(resp));\n}\n\nOcspResponseStatus Asn1OcspUtility::parseResponseStatus(CBS& cbs) {\n  // OCSPResponseStatus ::= ENUMERATED {\n  //    successful            (0),  -- Response has valid confirmations\n  //    malformedRequest      (1),  -- Illegal confirmation request\n  //    internalError         (2),  -- Internal error in issuer\n  //    tryLater              (3),  -- Try again later\n  //                                -- (4) is not used\n  //    sigRequired           (5),  -- Must sign the request\n  //    unauthorized          (6)   -- Request unauthorized\n  // }\n  CBS status;\n  if (!CBS_get_asn1(&cbs, &status, CBS_ASN1_ENUMERATED)) {\n    throw EnvoyException(\"OCSP ResponseStatus is not a well-formed ASN.1 ENUMERATED\");\n  }\n\n  auto status_ordinal = *CBS_data(&status);\n  switch (status_ordinal) {\n  case 0:\n    return OcspResponseStatus::Successful;\n  case 1:\n    return OcspResponseStatus::MalformedRequest;\n  case 2:\n    return OcspResponseStatus::InternalError;\n  case 3:\n    return OcspResponseStatus::TryLater;\n  case 5:\n    return OcspResponseStatus::SigRequired;\n  case 6:\n    return OcspResponseStatus::Unauthorized;\n  default:\n    throw EnvoyException(absl::StrCat(\"Unknown OCSP Response Status variant: \", status_ordinal));\n  }\n}\n\nResponsePtr Asn1OcspUtility::parseResponseBytes(CBS& cbs) {\n  // ResponseBytes ::=  SEQUENCE {\n  //     responseType        RESPONSE.\n  //                             &id ({ResponseSet}),\n  //     response            OCTET STRING (CONTAINING RESPONSE.\n  //                             &Type({ResponseSet}{@responseType}))\n  // }\n  CBS elem, response;\n  if (!CBS_get_asn1(&cbs, &elem, CBS_ASN1_SEQUENCE)) {\n    throw EnvoyException(\"OCSP ResponseBytes is not a well-formed SEQUENCE\");\n  }\n\n  auto oid_str = unwrap(Asn1Utility::parseOid(elem));\n  if (!CBS_get_asn1(&elem, &response, CBS_ASN1_OCTETSTRING)) {\n    throw EnvoyException(\"Expected ASN.1 OCTETSTRING for response\");\n  }\n\n  if (oid_str == BasicOcspResponse::OID) {\n    return Asn1OcspUtility::parseBasicOcspResponse(response);\n  }\n  throw EnvoyException(absl::StrCat(\"Unknown OCSP Response type with OID: \", oid_str));\n}\n\nstd::unique_ptr<BasicOcspResponse> Asn1OcspUtility::parseBasicOcspResponse(CBS& cbs) {\n  // BasicOCSPResponse       ::= SEQUENCE {\n  //    tbsResponseData      ResponseData,\n  //    signatureAlgorithm   AlgorithmIdentifier{SIGNATURE-ALGORITHM,\n  //                             {`sa-dsaWithSHA1` | `sa-rsaWithSHA1` |\n  //                                  `sa-rsaWithMD5` | `sa-rsaWithMD2`, ...}},\n  //    signature            BIT STRING,\n  //    certs            [0] EXPLICIT SEQUENCE OF Certificate OPTIONAL\n  // }\n  CBS elem;\n  if (!CBS_get_asn1(&cbs, &elem, CBS_ASN1_SEQUENCE)) {\n    throw EnvoyException(\"OCSP BasicOCSPResponse is not a wellf-formed ASN.1 SEQUENCE\");\n  }\n  auto response_data = Asn1OcspUtility::parseResponseData(elem);\n  // The `signatureAlgorithm` and `signature` are ignored because OCSP\n  // responses are expected to be delivered from a reliable source.\n  // Optional additional certs are ignored.\n\n  return std::make_unique<BasicOcspResponse>(response_data);\n}\n\nResponseData Asn1OcspUtility::parseResponseData(CBS& cbs) {\n  // ResponseData ::= SEQUENCE {\n  //    version              [0] EXPLICIT Version DEFAULT v1,\n  //    responderID              ResponderID,\n  //    producedAt               GeneralizedTime,\n  //    responses                SEQUENCE OF SingleResponse,\n  //    responseExtensions   [1] EXPLICIT Extensions OPTIONAL\n  // }\n  CBS elem;\n  if (!CBS_get_asn1(&cbs, &elem, CBS_ASN1_SEQUENCE)) {\n    throw EnvoyException(\"OCSP ResponseData is not a well-formed ASN.1 SEQUENCE\");\n  }\n\n  unwrap(Asn1Utility::skipOptional(elem, 0));\n  skipResponderId(elem);\n  unwrap(Asn1Utility::skip(elem, CBS_ASN1_GENERALIZEDTIME));\n  auto responses = unwrap(Asn1Utility::parseSequenceOf<SingleResponse>(\n      elem, [](CBS& cbs) -> ParsingResult<SingleResponse> {\n        return ParsingResult<SingleResponse>(parseSingleResponse(cbs));\n      }));\n  // Extensions currently ignored.\n\n  return {std::move(responses)};\n}\n\nSingleResponse Asn1OcspUtility::parseSingleResponse(CBS& cbs) {\n  // SingleResponse ::= SEQUENCE {\n  //    certID                  CertID,\n  //    certStatus              CertStatus,\n  //    thisUpdate              GeneralizedTime,\n  //    nextUpdate          [0] EXPLICIT GeneralizedTime OPTIONAL,\n  //    singleExtensions    [1] EXPLICIT Extensions OPTIONAL\n  // }\n  CBS elem;\n  if (!CBS_get_asn1(&cbs, &elem, CBS_ASN1_SEQUENCE)) {\n    throw EnvoyException(\"OCSP SingleResponse is not a well-formed ASN.1 SEQUENCE\");\n  }\n\n  auto cert_id = Asn1OcspUtility::parseCertId(elem);\n  skipCertStatus(elem);\n  auto this_update = unwrap(Asn1Utility::parseGeneralizedTime(elem));\n  auto next_update = unwrap(Asn1Utility::parseOptional<Envoy::SystemTime>(\n      elem, Asn1Utility::parseGeneralizedTime,\n      CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0));\n  // Extensions currently ignored.\n\n  return {cert_id, this_update, next_update};\n}\n\nCertId Asn1OcspUtility::parseCertId(CBS& cbs) {\n  // CertID ::= SEQUENCE {\n  //    hashAlgorithm       AlgorithmIdentifier,\n  //    issuerNameHash      OCTET STRING, -- Hash of issuer's `DN`\n  //    issuerKeyHash       OCTET STRING, -- Hash of issuer's public key\n  //    serialNumber        CertificateSerialNumber\n  // }\n  CBS elem;\n  if (!CBS_get_asn1(&cbs, &elem, CBS_ASN1_SEQUENCE)) {\n    throw EnvoyException(\"OCSP CertID is not a well-formed ASN.1 SEQUENCE\");\n  }\n\n  unwrap(Asn1Utility::skip(elem, CBS_ASN1_SEQUENCE));\n  unwrap(Asn1Utility::skip(elem, CBS_ASN1_OCTETSTRING));\n  unwrap(Asn1Utility::skip(elem, CBS_ASN1_OCTETSTRING));\n  auto serial_number = unwrap(Asn1Utility::parseInteger(elem));\n\n  return {serial_number};\n}\n\n} // namespace Ocsp\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ocsp/ocsp.h",
    "content": "#pragma once\n\n#include <iomanip>\n#include <sstream>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/time.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"openssl/bytestring.h\"\n#include \"openssl/ssl.h\"\n\n/**\n * Data structures and functions for unmarshaling OCSP responses\n * according to the RFC6960 B.2 spec. See: https://tools.ietf.org/html/rfc6960#appendix-B\n *\n * WARNING: This module is meant to validate that OCSP responses are well-formed\n * and extract useful fields for OCSP stapling. This assumes that responses are\n * provided from configs or another trusted source and does not perform\n * checks necessary to verify responses coming from an upstream server.\n */\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace Ocsp {\n\n/**\n * Reflection of the `ASN.1` OcspResponseStatus enumeration.\n * The possible statuses that can accompany an OCSP response.\n */\nenum class OcspResponseStatus {\n  // OCSPResponseStatus ::= ENUMERATED {\n  //    successful            (0),  -- Response has valid confirmations\n  //    malformedRequest      (1),  -- Illegal confirmation request\n  //    internalError         (2),  -- Internal error in issuer\n  //    tryLater              (3),  -- Try again later\n  //                                -- (4) is not used\n  //    sigRequired           (5),  -- Must sign the request\n  //    unauthorized          (6)   -- Request unauthorized\n  // }\n  Successful = 0,\n  MalformedRequest = 1,\n  InternalError = 2,\n  TryLater = 3,\n  SigRequired = 5,\n  Unauthorized = 6\n};\n\n/**\n * Partial reflection of the `ASN.1` CertId structure.\n * Contains the information to identify an SSL Certificate.\n * Serial numbers are guaranteed to be\n * unique per issuer but not necessarily universally.\n */\nstruct CertId {\n  CertId(std::string serial_number);\n\n  std::string serial_number_;\n};\n\n/**\n * Partial reflection of the `ASN.1` SingleResponse structure.\n * Contains information about the OCSP status of a single certificate.\n * An OCSP request may request the status of multiple certificates and\n * therefore responses may contain multiple SingleResponses.\n *\n * this_update_ and next_update_ reflect the validity period for this response.\n * If next_update_ is not present, the OCSP responder always has new information\n * available. In this case the response would be considered immediately expired\n * and invalid for stapling.\n */\nstruct SingleResponse {\n  SingleResponse(CertId cert_id, Envoy::SystemTime this_update,\n                 absl::optional<Envoy::SystemTime> next_update);\n\n  const CertId cert_id_;\n  const Envoy::SystemTime this_update_;\n  const absl::optional<Envoy::SystemTime> next_update_;\n};\n\n/**\n * Partial reflection of the `ASN.1` ResponseData structure.\n * Contains an OCSP response for each certificate in a given request\n * as well as the time at which the response was produced.\n */\nstruct ResponseData {\n  ResponseData(std::vector<SingleResponse> single_responses);\n\n  const std::vector<SingleResponse> single_responses_;\n};\n\n/**\n * An abstract type for OCSP response formats. Which variant of `Response` is\n * used in an `OcspResponse` is indicated by the structure's `OID`.\n *\n * Envoy enforces that OCSP responses must be for a single certificate\n * only. The methods on this class extract the relevant information for the\n * single certificate contained in the response.\n */\nclass Response {\npublic:\n  virtual ~Response() = default;\n\n  /**\n   * @return The number of certs reported on by this response.\n   */\n  virtual size_t getNumCerts() PURE;\n\n  /**\n   * @return The serial number of the certificate.\n   */\n  virtual const std::string& getCertSerialNumber() PURE;\n\n  /**\n   * @return The beginning of the validity window for this response.\n   */\n  virtual const Envoy::SystemTime& getThisUpdate() PURE;\n\n  /**\n   * The time at which this response is considered to expire. If\n   * `nullopt`, then there is assumed to always be more up-to-date\n   * information available and the response is always considered expired.\n   *\n   * @return The end of the validity window for this response.\n   */\n  virtual const absl::optional<Envoy::SystemTime>& getNextUpdate() PURE;\n};\n\nusing ResponsePtr = std::unique_ptr<Response>;\n\n/**\n * Reflection of the `ASN.1` BasicOcspResponse structure.\n * Contains the full data of an OCSP response.\n * Envoy enforces that OCSP responses contain a response for only\n * a single certificate.\n *\n * BasicOcspResponse is the only supported Response type in RFC 6960.\n */\nclass BasicOcspResponse : public Response {\npublic:\n  BasicOcspResponse(ResponseData data);\n\n  // Response\n  size_t getNumCerts() override { return data_.single_responses_.size(); }\n  const std::string& getCertSerialNumber() override {\n    return data_.single_responses_[0].cert_id_.serial_number_;\n  }\n  const Envoy::SystemTime& getThisUpdate() override {\n    return data_.single_responses_[0].this_update_;\n  }\n  const absl::optional<Envoy::SystemTime>& getNextUpdate() override {\n    return data_.single_responses_[0].next_update_;\n  }\n\n  // Identified as `id-pkix-ocsp-basic` in\n  // https://tools.ietf.org/html/rfc6960#appendix-B.2\n  constexpr static absl::string_view OID = \"1.3.6.1.5.5.7.48.1.1\";\n\nprivate:\n  const ResponseData data_;\n};\n\n/**\n * Reflection of the `ASN.1` OcspResponse structure.\n * This is the top-level data structure for OCSP responses.\n */\nstruct OcspResponse {\n  OcspResponse(OcspResponseStatus status, ResponsePtr response);\n\n  OcspResponseStatus status_;\n  ResponsePtr response_;\n};\n\n/**\n * A wrapper used to own and query an OCSP response in DER-encoded format.\n */\nclass OcspResponseWrapper {\npublic:\n  OcspResponseWrapper(std::vector<uint8_t> der_response, TimeSource& time_source);\n\n  /**\n   * @return std::vector<uint8_t>& a reference to the underlying bytestring representation\n   * of the OCSP response\n   */\n  const std::vector<uint8_t>& rawBytes() const { return raw_bytes_; }\n\n  /**\n   * @return OcspResponseStatus whether the OCSP response was successfully created\n   * or a status indicating an error in the OCSP process\n   */\n  OcspResponseStatus getResponseStatus() const { return response_->status_; }\n\n  /**\n   * @param cert a X509& SSL certificate\n   * @returns bool whether this OCSP response contains the revocation status of `cert`\n   */\n  bool matchesCertificate(X509& cert) const;\n\n  /**\n   * Determines whether the OCSP response can no longer be considered valid.\n   * This can be true if the nextUpdate field of the response has passed\n   * or is not present, indicating that there is always more updated information\n   * available.\n   *\n   * @returns bool if the OCSP response is expired.\n   */\n  bool isExpired();\n\n  /**\n   * @returns the seconds until this OCSP response expires.\n   */\n  uint64_t secondsUntilExpiration() const;\n\n  /**\n   * @return The beginning of the validity window for this response.\n   */\n  Envoy::SystemTime getThisUpdate() const;\n\n  /**\n   * The time at which this response is considered to expire. If\n   * the underlying response does not have a value, then the current\n   * time is returned.\n   *\n   * @return The end of the validity window for this response.\n   */\n  Envoy::SystemTime getNextUpdate() const;\n\nprivate:\n  const std::vector<uint8_t> raw_bytes_;\n  const std::unique_ptr<OcspResponse> response_;\n  TimeSource& time_source_;\n};\n\nusing OcspResponseWrapperPtr = std::unique_ptr<OcspResponseWrapper>;\n\n/**\n * `ASN.1` DER-encoded parsing functions similar to `Asn1Utility` but specifically\n * for structures related to OCSP.\n *\n * Each function must advance `cbs` across the element it refers to.\n */\nclass Asn1OcspUtility {\npublic:\n  /**\n   * @param `cbs` a CBS& that refers to an `ASN.1` OcspResponse element\n   * @returns std::unique_ptr<OcspResponse> the OCSP response encoded in `cbs`\n   * @throws Envoy::EnvoyException if `cbs` does not contain a well-formed OcspResponse\n   * element.\n   */\n  static std::unique_ptr<OcspResponse> parseOcspResponse(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` OcspResponseStatus element\n   * @returns OcspResponseStatus the OCSP response encoded in `cbs`\n   * @throws Envoy::EnvoyException if `cbs` does not contain a well-formed\n   * OcspResponseStatus element.\n   */\n  static OcspResponseStatus parseResponseStatus(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` Response element\n   * @returns Response containing the content of an OCSP response\n   * @throws Envoy::EnvoyException if `cbs` does not contain a well-formed\n   * structure that is a valid Response type.\n   */\n  static ResponsePtr parseResponseBytes(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` BasicOcspResponse element\n   * @returns BasicOcspResponse containing the content of an OCSP response\n   * @throws Envoy::EnvoyException if `cbs` does not contain a well-formed\n   * BasicOcspResponse element.\n   */\n  static std::unique_ptr<BasicOcspResponse> parseBasicOcspResponse(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` ResponseData element\n   * @returns ResponseData containing the content of an OCSP response relating\n   * to certificate statuses.\n   * @throws Envoy::EnvoyException if `cbs` does not contain a well-formed\n   * ResponseData element.\n   */\n  static ResponseData parseResponseData(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` SingleResponse element\n   * @returns SingleResponse containing the id and revocation status of\n   * a single certificate.\n   * @throws Envoy::EnvoyException if `cbs` does not contain a well-formed\n   * SingleResponse element.\n   */\n  static SingleResponse parseSingleResponse(CBS& cbs);\n\n  /**\n   * @param cbs a CBS& that refers to an `ASN.1` CertId element\n   * @returns CertId containing the information necessary to uniquely identify\n   * an SSL certificate.\n   * @throws Envoy::EnvoyException if `cbs` does not contain a well-formed\n   * CertId element.\n   */\n  static CertId parseCertId(CBS& cbs);\n};\n\n} // namespace Ocsp\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/private_key/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"private_key_manager_lib\",\n    srcs = [\n        \"private_key_manager_impl.cc\",\n    ],\n    hdrs = [\n        \"private_key_manager_impl.h\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/ssl/private_key:private_key_config_interface\",\n        \"//include/envoy/ssl/private_key:private_key_interface\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/private_key/private_key_manager_impl.cc",
    "content": "#include \"extensions/transport_sockets/tls/private_key/private_key_manager_impl.h\"\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/registry/registry.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nEnvoy::Ssl::PrivateKeyMethodProviderSharedPtr\nPrivateKeyMethodManagerImpl::createPrivateKeyMethodProvider(\n    const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& config,\n    Server::Configuration::TransportSocketFactoryContext& factory_context) {\n\n  Ssl::PrivateKeyMethodProviderInstanceFactory* factory =\n      Registry::FactoryRegistry<Ssl::PrivateKeyMethodProviderInstanceFactory>::getFactory(\n          config.provider_name());\n\n  // Create a new provider instance with the configuration.\n  if (factory) {\n    return factory->createPrivateKeyMethodProviderInstance(config, factory_context);\n  }\n\n  return nullptr;\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/private_key/private_key_manager_impl.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n#include \"envoy/ssl/private_key/private_key_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nclass PrivateKeyMethodManagerImpl : public virtual Ssl::PrivateKeyMethodManager {\npublic:\n  // Ssl::PrivateKeyMethodManager\n  Ssl::PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProvider(\n      const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& config,\n      Server::Configuration::TransportSocketFactoryContext& factory_context) override;\n};\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ssl_handshaker.cc",
    "content": "#include \"extensions/transport_sockets/tls/ssl_handshaker.h\"\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/hex.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"openssl/err.h\"\n#include \"openssl/x509v3.h\"\n\nusing Envoy::Network::PostIoAction;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nvoid SslExtendedSocketInfoImpl::setCertificateValidationStatus(\n    Envoy::Ssl::ClientValidationStatus validated) {\n  certificate_validation_status_ = validated;\n}\n\nEnvoy::Ssl::ClientValidationStatus SslExtendedSocketInfoImpl::certificateValidationStatus() const {\n  return certificate_validation_status_;\n}\n\nSslHandshakerImpl::SslHandshakerImpl(bssl::UniquePtr<SSL> ssl, int ssl_extended_socket_info_index,\n                                     Ssl::HandshakeCallbacks* handshake_callbacks)\n    : ssl_(std::move(ssl)), handshake_callbacks_(handshake_callbacks),\n      state_(Ssl::SocketState::PreHandshake) {\n  SSL_set_ex_data(ssl_.get(), ssl_extended_socket_info_index, &(this->extended_socket_info_));\n}\n\nbool SslHandshakerImpl::peerCertificatePresented() const {\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  return cert != nullptr;\n}\n\nbool SslHandshakerImpl::peerCertificateValidated() const {\n  return extended_socket_info_.certificateValidationStatus() ==\n         Envoy::Ssl::ClientValidationStatus::Validated;\n}\n\nabsl::Span<const std::string> SslHandshakerImpl::uriSanLocalCertificate() const {\n  if (!cached_uri_san_local_certificate_.empty()) {\n    return cached_uri_san_local_certificate_;\n  }\n\n  // The cert object is not owned.\n  X509* cert = SSL_get_certificate(ssl());\n  if (!cert) {\n    ASSERT(cached_uri_san_local_certificate_.empty());\n    return cached_uri_san_local_certificate_;\n  }\n  cached_uri_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI);\n  return cached_uri_san_local_certificate_;\n}\n\nabsl::Span<const std::string> SslHandshakerImpl::dnsSansLocalCertificate() const {\n  if (!cached_dns_san_local_certificate_.empty()) {\n    return cached_dns_san_local_certificate_;\n  }\n\n  X509* cert = SSL_get_certificate(ssl());\n  if (!cert) {\n    ASSERT(cached_dns_san_local_certificate_.empty());\n    return cached_dns_san_local_certificate_;\n  }\n  cached_dns_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS);\n  return cached_dns_san_local_certificate_;\n}\n\nconst std::string& SslHandshakerImpl::sha256PeerCertificateDigest() const {\n  if (!cached_sha_256_peer_certificate_digest_.empty()) {\n    return cached_sha_256_peer_certificate_digest_;\n  }\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_sha_256_peer_certificate_digest_.empty());\n    return cached_sha_256_peer_certificate_digest_;\n  }\n\n  std::vector<uint8_t> computed_hash(SHA256_DIGEST_LENGTH);\n  unsigned int n;\n  X509_digest(cert.get(), EVP_sha256(), computed_hash.data(), &n);\n  RELEASE_ASSERT(n == computed_hash.size(), \"\");\n  cached_sha_256_peer_certificate_digest_ = Hex::encode(computed_hash);\n  return cached_sha_256_peer_certificate_digest_;\n}\n\nconst std::string& SslHandshakerImpl::sha1PeerCertificateDigest() const {\n  if (!cached_sha_1_peer_certificate_digest_.empty()) {\n    return cached_sha_1_peer_certificate_digest_;\n  }\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_sha_1_peer_certificate_digest_.empty());\n    return cached_sha_1_peer_certificate_digest_;\n  }\n\n  std::vector<uint8_t> computed_hash(SHA_DIGEST_LENGTH);\n  unsigned int n;\n  X509_digest(cert.get(), EVP_sha1(), computed_hash.data(), &n);\n  RELEASE_ASSERT(n == computed_hash.size(), \"\");\n  cached_sha_1_peer_certificate_digest_ = Hex::encode(computed_hash);\n  return cached_sha_1_peer_certificate_digest_;\n}\n\nconst std::string& SslHandshakerImpl::urlEncodedPemEncodedPeerCertificate() const {\n  if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) {\n    return cached_url_encoded_pem_encoded_peer_certificate_;\n  }\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_url_encoded_pem_encoded_peer_certificate_.empty());\n    return cached_url_encoded_pem_encoded_peer_certificate_;\n  }\n\n  bssl::UniquePtr<BIO> buf(BIO_new(BIO_s_mem()));\n  RELEASE_ASSERT(buf != nullptr, \"\");\n  RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert.get()) == 1, \"\");\n  const uint8_t* output;\n  size_t length;\n  RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, \"\");\n  absl::string_view pem(reinterpret_cast<const char*>(output), length);\n  cached_url_encoded_pem_encoded_peer_certificate_ = absl::StrReplaceAll(\n      pem, {{\"\\n\", \"%0A\"}, {\" \", \"%20\"}, {\"+\", \"%2B\"}, {\"/\", \"%2F\"}, {\"=\", \"%3D\"}});\n  return cached_url_encoded_pem_encoded_peer_certificate_;\n}\n\nconst std::string& SslHandshakerImpl::urlEncodedPemEncodedPeerCertificateChain() const {\n  if (!cached_url_encoded_pem_encoded_peer_cert_chain_.empty()) {\n    return cached_url_encoded_pem_encoded_peer_cert_chain_;\n  }\n\n  STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl());\n  if (cert_chain == nullptr) {\n    ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty());\n    return cached_url_encoded_pem_encoded_peer_cert_chain_;\n  }\n\n  for (uint64_t i = 0; i < sk_X509_num(cert_chain); i++) {\n    X509* cert = sk_X509_value(cert_chain, i);\n\n    bssl::UniquePtr<BIO> buf(BIO_new(BIO_s_mem()));\n    RELEASE_ASSERT(buf != nullptr, \"\");\n    RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert) == 1, \"\");\n    const uint8_t* output;\n    size_t length;\n    RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, \"\");\n\n    absl::string_view pem(reinterpret_cast<const char*>(output), length);\n    cached_url_encoded_pem_encoded_peer_cert_chain_ = absl::StrCat(\n        cached_url_encoded_pem_encoded_peer_cert_chain_,\n        absl::StrReplaceAll(\n            pem, {{\"\\n\", \"%0A\"}, {\" \", \"%20\"}, {\"+\", \"%2B\"}, {\"/\", \"%2F\"}, {\"=\", \"%3D\"}}));\n  }\n  return cached_url_encoded_pem_encoded_peer_cert_chain_;\n}\n\nabsl::Span<const std::string> SslHandshakerImpl::uriSanPeerCertificate() const {\n  if (!cached_uri_san_peer_certificate_.empty()) {\n    return cached_uri_san_peer_certificate_;\n  }\n\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_uri_san_peer_certificate_.empty());\n    return cached_uri_san_peer_certificate_;\n  }\n  cached_uri_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI);\n  return cached_uri_san_peer_certificate_;\n}\n\nabsl::Span<const std::string> SslHandshakerImpl::dnsSansPeerCertificate() const {\n  if (!cached_dns_san_peer_certificate_.empty()) {\n    return cached_dns_san_peer_certificate_;\n  }\n\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_dns_san_peer_certificate_.empty());\n    return cached_dns_san_peer_certificate_;\n  }\n  cached_dns_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS);\n  return cached_dns_san_peer_certificate_;\n}\n\nuint16_t SslHandshakerImpl::ciphersuiteId() const {\n  const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl());\n  if (cipher == nullptr) {\n    return 0xffff;\n  }\n\n  // From the OpenSSL docs:\n  //    SSL_CIPHER_get_id returns |cipher|'s id. It may be cast to a |uint16_t| to\n  //    get the cipher suite value.\n  return static_cast<uint16_t>(SSL_CIPHER_get_id(cipher));\n}\n\nstd::string SslHandshakerImpl::ciphersuiteString() const {\n  const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl());\n  if (cipher == nullptr) {\n    return {};\n  }\n\n  return SSL_CIPHER_get_name(cipher);\n}\n\nconst std::string& SslHandshakerImpl::tlsVersion() const {\n  if (!cached_tls_version_.empty()) {\n    return cached_tls_version_;\n  }\n  cached_tls_version_ = SSL_get_version(ssl());\n  return cached_tls_version_;\n}\n\nNetwork::PostIoAction SslHandshakerImpl::doHandshake() {\n  ASSERT(state_ != Ssl::SocketState::HandshakeComplete && state_ != Ssl::SocketState::ShutdownSent);\n  int rc = SSL_do_handshake(ssl());\n  if (rc == 1) {\n    state_ = Ssl::SocketState::HandshakeComplete;\n    handshake_callbacks_->onSuccess(ssl());\n\n    // It's possible that we closed during the handshake callback.\n    return handshake_callbacks_->connection().state() == Network::Connection::State::Open\n               ? PostIoAction::KeepOpen\n               : PostIoAction::Close;\n  } else {\n    int err = SSL_get_error(ssl(), rc);\n    switch (err) {\n    case SSL_ERROR_WANT_READ:\n    case SSL_ERROR_WANT_WRITE:\n      return PostIoAction::KeepOpen;\n    case SSL_ERROR_WANT_PRIVATE_KEY_OPERATION:\n      state_ = Ssl::SocketState::HandshakeInProgress;\n      return PostIoAction::KeepOpen;\n    default:\n      handshake_callbacks_->onFailure();\n      return PostIoAction::Close;\n    }\n  }\n}\n\nconst std::string& SslHandshakerImpl::serialNumberPeerCertificate() const {\n  if (!cached_serial_number_peer_certificate_.empty()) {\n    return cached_serial_number_peer_certificate_;\n  }\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_serial_number_peer_certificate_.empty());\n    return cached_serial_number_peer_certificate_;\n  }\n  cached_serial_number_peer_certificate_ = Utility::getSerialNumberFromCertificate(*cert.get());\n  return cached_serial_number_peer_certificate_;\n}\n\nconst std::string& SslHandshakerImpl::issuerPeerCertificate() const {\n  if (!cached_issuer_peer_certificate_.empty()) {\n    return cached_issuer_peer_certificate_;\n  }\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_issuer_peer_certificate_.empty());\n    return cached_issuer_peer_certificate_;\n  }\n  cached_issuer_peer_certificate_ = Utility::getIssuerFromCertificate(*cert);\n  return cached_issuer_peer_certificate_;\n}\n\nconst std::string& SslHandshakerImpl::subjectPeerCertificate() const {\n  if (!cached_subject_peer_certificate_.empty()) {\n    return cached_subject_peer_certificate_;\n  }\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    ASSERT(cached_subject_peer_certificate_.empty());\n    return cached_subject_peer_certificate_;\n  }\n  cached_subject_peer_certificate_ = Utility::getSubjectFromCertificate(*cert);\n  return cached_subject_peer_certificate_;\n}\n\nconst std::string& SslHandshakerImpl::subjectLocalCertificate() const {\n  if (!cached_subject_local_certificate_.empty()) {\n    return cached_subject_local_certificate_;\n  }\n  X509* cert = SSL_get_certificate(ssl());\n  if (!cert) {\n    ASSERT(cached_subject_local_certificate_.empty());\n    return cached_subject_local_certificate_;\n  }\n  cached_subject_local_certificate_ = Utility::getSubjectFromCertificate(*cert);\n  return cached_subject_local_certificate_;\n}\n\nabsl::optional<SystemTime> SslHandshakerImpl::validFromPeerCertificate() const {\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    return absl::nullopt;\n  }\n  return Utility::getValidFrom(*cert);\n}\n\nabsl::optional<SystemTime> SslHandshakerImpl::expirationPeerCertificate() const {\n  bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl()));\n  if (!cert) {\n    return absl::nullopt;\n  }\n  return Utility::getExpirationTime(*cert);\n}\n\nconst std::string& SslHandshakerImpl::sessionId() const {\n  if (!cached_session_id_.empty()) {\n    return cached_session_id_;\n  }\n  SSL_SESSION* session = SSL_get_session(ssl());\n  if (session == nullptr) {\n    ASSERT(cached_session_id_.empty());\n    return cached_session_id_;\n  }\n\n  unsigned int session_id_length = 0;\n  const uint8_t* session_id = SSL_SESSION_get_id(session, &session_id_length);\n  cached_session_id_ = Hex::encode(session_id, session_id_length);\n  return cached_session_id_;\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ssl_handshaker.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/secret/secret_callbacks.h\"\n#include \"envoy/ssl/handshaker.h\"\n#include \"envoy/ssl/private_key/private_key_callbacks.h\"\n#include \"envoy/ssl/ssl_socket_extended_info.h\"\n#include \"envoy/ssl/ssl_socket_state.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/types/optional.h\"\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nclass SslExtendedSocketInfoImpl : public Envoy::Ssl::SslExtendedSocketInfo {\npublic:\n  void setCertificateValidationStatus(Envoy::Ssl::ClientValidationStatus validated) override;\n  Envoy::Ssl::ClientValidationStatus certificateValidationStatus() const override;\n\nprivate:\n  Envoy::Ssl::ClientValidationStatus certificate_validation_status_{\n      Envoy::Ssl::ClientValidationStatus::NotValidated};\n};\n\nclass SslHandshakerImpl : public Ssl::ConnectionInfo, public Ssl::Handshaker {\npublic:\n  SslHandshakerImpl(bssl::UniquePtr<SSL> ssl, int ssl_extended_socket_info_index,\n                    Ssl::HandshakeCallbacks* handshake_callbacks);\n\n  // Ssl::ConnectionInfo\n  bool peerCertificatePresented() const override;\n  bool peerCertificateValidated() const override;\n  absl::Span<const std::string> uriSanLocalCertificate() const override;\n  const std::string& sha256PeerCertificateDigest() const override;\n  const std::string& sha1PeerCertificateDigest() const override;\n  const std::string& serialNumberPeerCertificate() const override;\n  const std::string& issuerPeerCertificate() const override;\n  const std::string& subjectPeerCertificate() const override;\n  const std::string& subjectLocalCertificate() const override;\n  absl::Span<const std::string> uriSanPeerCertificate() const override;\n  const std::string& urlEncodedPemEncodedPeerCertificate() const override;\n  const std::string& urlEncodedPemEncodedPeerCertificateChain() const override;\n  absl::Span<const std::string> dnsSansPeerCertificate() const override;\n  absl::Span<const std::string> dnsSansLocalCertificate() const override;\n  absl::optional<SystemTime> validFromPeerCertificate() const override;\n  absl::optional<SystemTime> expirationPeerCertificate() const override;\n  const std::string& sessionId() const override;\n  uint16_t ciphersuiteId() const override;\n  std::string ciphersuiteString() const override;\n  const std::string& tlsVersion() const override;\n\n  // Ssl::Handshaker\n  Network::PostIoAction doHandshake() override;\n\n  Ssl::SocketState state() { return state_; }\n  void setState(Ssl::SocketState state) { state_ = state; }\n  SSL* ssl() const { return ssl_.get(); }\n  Ssl::HandshakeCallbacks* handshakeCallbacks() { return handshake_callbacks_; }\n\n  bssl::UniquePtr<SSL> ssl_;\n\nprivate:\n  Ssl::HandshakeCallbacks* handshake_callbacks_;\n\n  Ssl::SocketState state_;\n  mutable std::vector<std::string> cached_uri_san_local_certificate_;\n  mutable std::string cached_sha_256_peer_certificate_digest_;\n  mutable std::string cached_sha_1_peer_certificate_digest_;\n  mutable std::string cached_serial_number_peer_certificate_;\n  mutable std::string cached_issuer_peer_certificate_;\n  mutable std::string cached_subject_peer_certificate_;\n  mutable std::string cached_subject_local_certificate_;\n  mutable std::vector<std::string> cached_uri_san_peer_certificate_;\n  mutable std::string cached_url_encoded_pem_encoded_peer_certificate_;\n  mutable std::string cached_url_encoded_pem_encoded_peer_cert_chain_;\n  mutable std::vector<std::string> cached_dns_san_peer_certificate_;\n  mutable std::vector<std::string> cached_dns_san_local_certificate_;\n  mutable std::string cached_session_id_;\n  mutable std::string cached_tls_version_;\n  mutable SslExtendedSocketInfoImpl extended_socket_info_;\n};\n\nusing SslHandshakerImplSharedPtr = std::shared_ptr<SslHandshakerImpl>;\n\nclass HandshakerFactoryContextImpl : public Ssl::HandshakerFactoryContext {\npublic:\n  HandshakerFactoryContextImpl(Api::Api& api, absl::string_view alpn_protocols)\n      : api_(api), alpn_protocols_(alpn_protocols) {}\n\n  // HandshakerFactoryContext\n  Api::Api& api() override { return api_; }\n  absl::string_view alpnProtocols() const override { return alpn_protocols_; }\n\nprivate:\n  Api::Api& api_;\n  const std::string alpn_protocols_;\n};\n\nclass HandshakerFactoryImpl : public Ssl::HandshakerFactory {\npublic:\n  std::string name() const override { return \"envoy.default_tls_handshaker\"; }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n\n  Ssl::HandshakerFactoryCb createHandshakerCb(const Protobuf::Message&,\n                                              Ssl::HandshakerFactoryContext&,\n                                              ProtobufMessage::ValidationVisitor&) override {\n    // The default HandshakerImpl doesn't take a config or use the HandshakerFactoryContext.\n    return [](bssl::UniquePtr<SSL> ssl, int ssl_extended_socket_info_index,\n              Ssl::HandshakeCallbacks* handshake_callbacks) {\n      return std::make_shared<SslHandshakerImpl>(std::move(ssl), ssl_extended_socket_info_index,\n                                                 handshake_callbacks);\n    };\n  }\n\n  Ssl::HandshakerCapabilities capabilities() const override {\n    // The default handshaker impl requires Envoy to handle all enumerated behaviors.\n    return Ssl::HandshakerCapabilities{};\n  }\n\n  static HandshakerFactory* getDefaultHandshakerFactory() {\n    static HandshakerFactoryImpl default_handshaker_factory;\n    return &default_handshaker_factory;\n  }\n};\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ssl_socket.cc",
    "content": "#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/hex.h\"\n#include \"common/http/headers.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"extensions/transport_sockets/tls/io_handle_bio.h\"\n#include \"extensions/transport_sockets/tls/ssl_handshaker.h\"\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"openssl/err.h\"\n#include \"openssl/x509v3.h\"\n\nusing Envoy::Network::PostIoAction;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nnamespace {\n\nconstexpr absl::string_view NotReadyReason{\"TLS error: Secret is not supplied by SDS\"};\n\n// This SslSocket will be used when SSL secret is not fetched from SDS server.\nclass NotReadySslSocket : public Network::TransportSocket {\npublic:\n  // Network::TransportSocket\n  void setTransportSocketCallbacks(Network::TransportSocketCallbacks&) override {}\n  std::string protocol() const override { return EMPTY_STRING; }\n  absl::string_view failureReason() const override { return NotReadyReason; }\n  bool canFlushClose() override { return true; }\n  void closeSocket(Network::ConnectionEvent) override {}\n  Network::IoResult doRead(Buffer::Instance&) override { return {PostIoAction::Close, 0, false}; }\n  Network::IoResult doWrite(Buffer::Instance&, bool) override {\n    return {PostIoAction::Close, 0, false};\n  }\n  void onConnected() override {}\n  Ssl::ConnectionInfoConstSharedPtr ssl() const override { return nullptr; }\n};\n} // namespace\n\nSslSocket::SslSocket(Envoy::Ssl::ContextSharedPtr ctx, InitialState state,\n                     const Network::TransportSocketOptionsSharedPtr& transport_socket_options,\n                     Ssl::HandshakerFactoryCb handshaker_factory_cb)\n    : transport_socket_options_(transport_socket_options),\n      ctx_(std::dynamic_pointer_cast<ContextImpl>(ctx)),\n      info_(std::dynamic_pointer_cast<SslHandshakerImpl>(\n          handshaker_factory_cb(ctx_->newSsl(transport_socket_options_.get()),\n                                ctx_->sslExtendedSocketInfoIndex(), this))) {\n  if (state == InitialState::Client) {\n    SSL_set_connect_state(rawSsl());\n  } else {\n    ASSERT(state == InitialState::Server);\n    SSL_set_accept_state(rawSsl());\n  }\n}\n\nvoid SslSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) {\n  ASSERT(!callbacks_);\n  callbacks_ = &callbacks;\n\n  // Associate this SSL connection with all the certificates (with their potentially different\n  // private key methods).\n  for (auto const& provider : ctx_->getPrivateKeyMethodProviders()) {\n    provider->registerPrivateKeyMethod(rawSsl(), *this, callbacks_->connection().dispatcher());\n  }\n\n  BIO* bio;\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.tls_use_io_handle_bio\")) {\n    // Use custom BIO that reads from/writes to IoHandle\n    bio = BIO_new_io_handle(&callbacks_->ioHandle());\n  } else {\n    // TODO(fcoras): remove once the io_handle_bio proves to be stable\n    bio = BIO_new_socket(callbacks_->ioHandle().fdDoNotUse(), 0);\n  }\n  SSL_set_bio(rawSsl(), bio, bio);\n}\n\nSslSocket::ReadResult SslSocket::sslReadIntoSlice(Buffer::RawSlice& slice) {\n  ReadResult result;\n  uint8_t* mem = static_cast<uint8_t*>(slice.mem_);\n  size_t remaining = slice.len_;\n  while (remaining > 0) {\n    int rc = SSL_read(rawSsl(), mem, remaining);\n    ENVOY_CONN_LOG(trace, \"ssl read returns: {}\", callbacks_->connection(), rc);\n    if (rc > 0) {\n      ASSERT(static_cast<size_t>(rc) <= remaining);\n      mem += rc;\n      remaining -= rc;\n      result.commit_slice_ = true;\n    } else {\n      result.error_ = absl::make_optional<int>(rc);\n      break;\n    }\n  }\n\n  if (result.commit_slice_) {\n    slice.len_ -= remaining;\n  }\n  return result;\n}\n\nNetwork::IoResult SslSocket::doRead(Buffer::Instance& read_buffer) {\n  if (info_->state() != Ssl::SocketState::HandshakeComplete &&\n      info_->state() != Ssl::SocketState::ShutdownSent) {\n    PostIoAction action = doHandshake();\n    if (action == PostIoAction::Close || info_->state() != Ssl::SocketState::HandshakeComplete) {\n      // end_stream is false because either a hard error occurred (action == Close) or\n      // the handshake isn't complete, so a half-close cannot occur yet.\n      return {action, 0, false};\n    }\n  }\n\n  bool keep_reading = true;\n  bool end_stream = false;\n  PostIoAction action = PostIoAction::KeepOpen;\n  uint64_t bytes_read = 0;\n  while (keep_reading) {\n    // We use 2 slices here so that we can use the remainder of an existing buffer chain element\n    // if there is extra space. 16K read is arbitrary and can be tuned later.\n    Buffer::RawSlice slices[2];\n    uint64_t slices_to_commit = 0;\n    uint64_t num_slices = read_buffer.reserve(16384, slices, 2);\n    for (uint64_t i = 0; i < num_slices; i++) {\n      auto result = sslReadIntoSlice(slices[i]);\n      if (result.commit_slice_) {\n        slices_to_commit++;\n        bytes_read += slices[i].len_;\n      }\n      if (result.error_.has_value()) {\n        keep_reading = false;\n        int err = SSL_get_error(rawSsl(), result.error_.value());\n        switch (err) {\n        case SSL_ERROR_WANT_READ:\n          break;\n        case SSL_ERROR_ZERO_RETURN:\n          end_stream = true;\n          break;\n        case SSL_ERROR_WANT_WRITE:\n        // Renegotiation has started. We don't handle renegotiation so just fall through.\n        default:\n          drainErrorQueue();\n          action = PostIoAction::Close;\n          break;\n        }\n\n        break;\n      }\n    }\n\n    if (slices_to_commit > 0) {\n      read_buffer.commit(slices, slices_to_commit);\n      if (callbacks_->shouldDrainReadBuffer()) {\n        callbacks_->setReadBufferReady();\n        keep_reading = false;\n      }\n    }\n  }\n\n  ENVOY_CONN_LOG(trace, \"ssl read {} bytes\", callbacks_->connection(), bytes_read);\n\n  return {action, bytes_read, end_stream};\n}\n\nvoid SslSocket::onPrivateKeyMethodComplete() {\n  ASSERT(isThreadSafe());\n  ASSERT(info_->state() == Ssl::SocketState::HandshakeInProgress);\n\n  // Resume handshake.\n  PostIoAction action = doHandshake();\n  if (action == PostIoAction::Close) {\n    ENVOY_CONN_LOG(debug, \"async handshake completion error\", callbacks_->connection());\n    callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite);\n  }\n}\n\nNetwork::Connection& SslSocket::connection() const { return callbacks_->connection(); }\n\nvoid SslSocket::onSuccess(SSL* ssl) {\n  ctx_->logHandshake(ssl);\n  callbacks_->raiseEvent(Network::ConnectionEvent::Connected);\n}\n\nvoid SslSocket::onFailure() { drainErrorQueue(); }\n\nPostIoAction SslSocket::doHandshake() { return info_->doHandshake(); }\n\nvoid SslSocket::drainErrorQueue() {\n  bool saw_error = false;\n  bool saw_counted_error = false;\n  while (uint64_t err = ERR_get_error()) {\n    if (ERR_GET_LIB(err) == ERR_LIB_SSL) {\n      if (ERR_GET_REASON(err) == SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE) {\n        ctx_->stats().fail_verify_no_cert_.inc();\n        saw_counted_error = true;\n      } else if (ERR_GET_REASON(err) == SSL_R_CERTIFICATE_VERIFY_FAILED) {\n        saw_counted_error = true;\n      }\n    }\n    saw_error = true;\n\n    if (failure_reason_.empty()) {\n      failure_reason_ = \"TLS error:\";\n    }\n    failure_reason_.append(absl::StrCat(\" \", err, \":\", ERR_lib_error_string(err), \":\",\n                                        ERR_func_error_string(err), \":\",\n                                        ERR_reason_error_string(err)));\n  }\n  ENVOY_CONN_LOG(debug, \"{}\", callbacks_->connection(), failure_reason_);\n  if (saw_error && !saw_counted_error) {\n    ctx_->stats().connection_error_.inc();\n  }\n}\n\nNetwork::IoResult SslSocket::doWrite(Buffer::Instance& write_buffer, bool end_stream) {\n  ASSERT(info_->state() != Ssl::SocketState::ShutdownSent || write_buffer.length() == 0);\n  if (info_->state() != Ssl::SocketState::HandshakeComplete &&\n      info_->state() != Ssl::SocketState::ShutdownSent) {\n    PostIoAction action = doHandshake();\n    if (action == PostIoAction::Close || info_->state() != Ssl::SocketState::HandshakeComplete) {\n      return {action, 0, false};\n    }\n  }\n\n  uint64_t bytes_to_write;\n  if (bytes_to_retry_) {\n    bytes_to_write = bytes_to_retry_;\n    bytes_to_retry_ = 0;\n  } else {\n    bytes_to_write = std::min(write_buffer.length(), static_cast<uint64_t>(16384));\n  }\n\n  uint64_t total_bytes_written = 0;\n  while (bytes_to_write > 0) {\n    // TODO(mattklein123): As it relates to our fairness efforts, we might want to limit the number\n    // of iterations of this loop, either by pure iterations, bytes written, etc.\n\n    // SSL_write() requires that if a previous call returns SSL_ERROR_WANT_WRITE, we need to call\n    // it again with the same parameters. This is done by tracking last write size, but not write\n    // data, since linearize() will return the same undrained data anyway.\n    ASSERT(bytes_to_write <= write_buffer.length());\n    int rc = SSL_write(rawSsl(), write_buffer.linearize(bytes_to_write), bytes_to_write);\n    ENVOY_CONN_LOG(trace, \"ssl write returns: {}\", callbacks_->connection(), rc);\n    if (rc > 0) {\n      ASSERT(rc == static_cast<int>(bytes_to_write));\n      total_bytes_written += rc;\n      write_buffer.drain(rc);\n      bytes_to_write = std::min(write_buffer.length(), static_cast<uint64_t>(16384));\n    } else {\n      int err = SSL_get_error(rawSsl(), rc);\n      switch (err) {\n      case SSL_ERROR_WANT_WRITE:\n        bytes_to_retry_ = bytes_to_write;\n        break;\n      case SSL_ERROR_WANT_READ:\n      // Renegotiation has started. We don't handle renegotiation so just fall through.\n      default:\n        drainErrorQueue();\n        return {PostIoAction::Close, total_bytes_written, false};\n      }\n\n      break;\n    }\n  }\n\n  if (write_buffer.length() == 0 && end_stream) {\n    shutdownSsl();\n  }\n\n  return {PostIoAction::KeepOpen, total_bytes_written, false};\n}\n\nvoid SslSocket::onConnected() { ASSERT(info_->state() == Ssl::SocketState::PreHandshake); }\n\nSsl::ConnectionInfoConstSharedPtr SslSocket::ssl() const { return info_; }\n\nvoid SslSocket::shutdownSsl() {\n  ASSERT(info_->state() != Ssl::SocketState::PreHandshake);\n  if (info_->state() != Ssl::SocketState::ShutdownSent &&\n      callbacks_->connection().state() != Network::Connection::State::Closed) {\n    int rc = SSL_shutdown(rawSsl());\n    ENVOY_CONN_LOG(debug, \"SSL shutdown: rc={}\", callbacks_->connection(), rc);\n    drainErrorQueue();\n    info_->setState(Ssl::SocketState::ShutdownSent);\n  }\n}\n\nvoid SslSocket::closeSocket(Network::ConnectionEvent) {\n  // Unregister the SSL connection object from private key method providers.\n  for (auto const& provider : ctx_->getPrivateKeyMethodProviders()) {\n    provider->unregisterPrivateKeyMethod(rawSsl());\n  }\n\n  // Attempt to send a shutdown before closing the socket. It's possible this won't go out if\n  // there is no room on the socket. We can extend the state machine to handle this at some point\n  // if needed.\n  if (info_->state() == Ssl::SocketState::HandshakeInProgress ||\n      info_->state() == Ssl::SocketState::HandshakeComplete) {\n    shutdownSsl();\n  }\n}\n\nstd::string SslSocket::protocol() const {\n  const unsigned char* proto;\n  unsigned int proto_len;\n  SSL_get0_alpn_selected(rawSsl(), &proto, &proto_len);\n  return std::string(reinterpret_cast<const char*>(proto), proto_len);\n}\n\nabsl::string_view SslSocket::failureReason() const { return failure_reason_; }\n\nnamespace {\nSslSocketFactoryStats generateStats(const std::string& prefix, Stats::Scope& store) {\n  return {\n      ALL_SSL_SOCKET_FACTORY_STATS(POOL_COUNTER_PREFIX(store, prefix + \"_ssl_socket_factory.\"))};\n}\n} // namespace\n\nClientSslSocketFactory::ClientSslSocketFactory(Envoy::Ssl::ClientContextConfigPtr config,\n                                               Envoy::Ssl::ContextManager& manager,\n                                               Stats::Scope& stats_scope)\n    : manager_(manager), stats_scope_(stats_scope), stats_(generateStats(\"client\", stats_scope)),\n      config_(std::move(config)),\n      ssl_ctx_(manager_.createSslClientContext(stats_scope_, *config_)) {\n  config_->setSecretUpdateCallback([this]() { onAddOrUpdateSecret(); });\n}\n\nNetwork::TransportSocketPtr ClientSslSocketFactory::createTransportSocket(\n    Network::TransportSocketOptionsSharedPtr transport_socket_options) const {\n  // onAddOrUpdateSecret() could be invoked in the middle of checking the existence of ssl_ctx and\n  // creating SslSocket using ssl_ctx. Capture ssl_ctx_ into a local variable so that we check and\n  // use the same ssl_ctx to create SslSocket.\n  Envoy::Ssl::ClientContextSharedPtr ssl_ctx;\n  {\n    absl::ReaderMutexLock l(&ssl_ctx_mu_);\n    ssl_ctx = ssl_ctx_;\n  }\n  if (ssl_ctx) {\n    return std::make_unique<SslSocket>(std::move(ssl_ctx), InitialState::Client,\n                                       transport_socket_options, config_->createHandshaker());\n  } else {\n    ENVOY_LOG(debug, \"Create NotReadySslSocket\");\n    stats_.upstream_context_secrets_not_ready_.inc();\n    return std::make_unique<NotReadySslSocket>();\n  }\n}\n\nbool ClientSslSocketFactory::implementsSecureTransport() const { return true; }\n\nvoid ClientSslSocketFactory::onAddOrUpdateSecret() {\n  ENVOY_LOG(debug, \"Secret is updated.\");\n  {\n    absl::WriterMutexLock l(&ssl_ctx_mu_);\n    ssl_ctx_ = manager_.createSslClientContext(stats_scope_, *config_);\n  }\n  stats_.ssl_context_update_by_sds_.inc();\n}\n\nServerSslSocketFactory::ServerSslSocketFactory(Envoy::Ssl::ServerContextConfigPtr config,\n                                               Envoy::Ssl::ContextManager& manager,\n                                               Stats::Scope& stats_scope,\n                                               const std::vector<std::string>& server_names)\n    : manager_(manager), stats_scope_(stats_scope), stats_(generateStats(\"server\", stats_scope)),\n      config_(std::move(config)), server_names_(server_names),\n      ssl_ctx_(manager_.createSslServerContext(stats_scope_, *config_, server_names_)) {\n  config_->setSecretUpdateCallback([this]() { onAddOrUpdateSecret(); });\n}\n\nNetwork::TransportSocketPtr\nServerSslSocketFactory::createTransportSocket(Network::TransportSocketOptionsSharedPtr) const {\n  // onAddOrUpdateSecret() could be invoked in the middle of checking the existence of ssl_ctx and\n  // creating SslSocket using ssl_ctx. Capture ssl_ctx_ into a local variable so that we check and\n  // use the same ssl_ctx to create SslSocket.\n  Envoy::Ssl::ServerContextSharedPtr ssl_ctx;\n  {\n    absl::ReaderMutexLock l(&ssl_ctx_mu_);\n    ssl_ctx = ssl_ctx_;\n  }\n  if (ssl_ctx) {\n    return std::make_unique<SslSocket>(std::move(ssl_ctx), InitialState::Server, nullptr,\n                                       config_->createHandshaker());\n  } else {\n    ENVOY_LOG(debug, \"Create NotReadySslSocket\");\n    stats_.downstream_context_secrets_not_ready_.inc();\n    return std::make_unique<NotReadySslSocket>();\n  }\n}\n\nbool ServerSslSocketFactory::implementsSecureTransport() const { return true; }\n\nvoid ServerSslSocketFactory::onAddOrUpdateSecret() {\n  ENVOY_LOG(debug, \"Secret is updated.\");\n  {\n    absl::WriterMutexLock l(&ssl_ctx_mu_);\n    ssl_ctx_ = manager_.createSslServerContext(stats_scope_, *config_, server_names_);\n  }\n  stats_.ssl_context_update_by_sds_.inc();\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/ssl_socket.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/secret/secret_callbacks.h\"\n#include \"envoy/ssl/handshaker.h\"\n#include \"envoy/ssl/private_key/private_key_callbacks.h\"\n#include \"envoy/ssl/ssl_socket_extended_info.h\"\n#include \"envoy/ssl/ssl_socket_state.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/transport_sockets/tls/context_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_handshaker.h\"\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/types/optional.h\"\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\n#define ALL_SSL_SOCKET_FACTORY_STATS(COUNTER)                                                      \\\n  COUNTER(ssl_context_update_by_sds)                                                               \\\n  COUNTER(upstream_context_secrets_not_ready)                                                      \\\n  COUNTER(downstream_context_secrets_not_ready)\n\n/**\n * Wrapper struct for SSL socket factory stats. @see stats_macros.h\n */\nstruct SslSocketFactoryStats {\n  ALL_SSL_SOCKET_FACTORY_STATS(GENERATE_COUNTER_STRUCT)\n};\n\nenum class InitialState { Client, Server };\n\nclass SslSocket : public Network::TransportSocket,\n                  public Envoy::Ssl::PrivateKeyConnectionCallbacks,\n                  public Ssl::HandshakeCallbacks,\n                  protected Logger::Loggable<Logger::Id::connection> {\npublic:\n  SslSocket(Envoy::Ssl::ContextSharedPtr ctx, InitialState state,\n            const Network::TransportSocketOptionsSharedPtr& transport_socket_options,\n            Ssl::HandshakerFactoryCb handshaker_factory_cb);\n\n  // Network::TransportSocket\n  void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override;\n  std::string protocol() const override;\n  absl::string_view failureReason() const override;\n  bool canFlushClose() override { return info_->state() == Ssl::SocketState::HandshakeComplete; }\n  void closeSocket(Network::ConnectionEvent close_type) override;\n  Network::IoResult doRead(Buffer::Instance& read_buffer) override;\n  Network::IoResult doWrite(Buffer::Instance& write_buffer, bool end_stream) override;\n  void onConnected() override;\n  Ssl::ConnectionInfoConstSharedPtr ssl() const override;\n  // Ssl::PrivateKeyConnectionCallbacks\n  void onPrivateKeyMethodComplete() override;\n  // Ssl::HandshakeCallbacks\n  Network::Connection& connection() const override;\n  void onSuccess(SSL* ssl) override;\n  void onFailure() override;\n  Network::TransportSocketCallbacks* transportSocketCallbacks() override { return callbacks_; }\n\n  SSL* rawSslForTest() const { return rawSsl(); }\n\nprotected:\n  SSL* rawSsl() const { return info_->ssl_.get(); }\n\nprivate:\n  struct ReadResult {\n    bool commit_slice_{};\n    absl::optional<int> error_;\n  };\n  ReadResult sslReadIntoSlice(Buffer::RawSlice& slice);\n\n  Network::PostIoAction doHandshake();\n  void drainErrorQueue();\n  void shutdownSsl();\n  bool isThreadSafe() const {\n    return callbacks_ != nullptr && callbacks_->connection().dispatcher().isThreadSafe();\n  }\n\n  const Network::TransportSocketOptionsSharedPtr transport_socket_options_;\n  Network::TransportSocketCallbacks* callbacks_{};\n  ContextImplSharedPtr ctx_;\n  uint64_t bytes_to_retry_{};\n  std::string failure_reason_;\n\n  SslHandshakerImplSharedPtr info_;\n};\n\nclass ClientSslSocketFactory : public Network::TransportSocketFactory,\n                               public Secret::SecretCallbacks,\n                               Logger::Loggable<Logger::Id::config> {\npublic:\n  ClientSslSocketFactory(Envoy::Ssl::ClientContextConfigPtr config,\n                         Envoy::Ssl::ContextManager& manager, Stats::Scope& stats_scope);\n\n  Network::TransportSocketPtr\n  createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override;\n  bool implementsSecureTransport() const override;\n\n  // Secret::SecretCallbacks\n  void onAddOrUpdateSecret() override;\n\nprivate:\n  Envoy::Ssl::ContextManager& manager_;\n  Stats::Scope& stats_scope_;\n  SslSocketFactoryStats stats_;\n  Envoy::Ssl::ClientContextConfigPtr config_;\n  mutable absl::Mutex ssl_ctx_mu_;\n  Envoy::Ssl::ClientContextSharedPtr ssl_ctx_ ABSL_GUARDED_BY(ssl_ctx_mu_);\n};\n\nclass ServerSslSocketFactory : public Network::TransportSocketFactory,\n                               public Secret::SecretCallbacks,\n                               Logger::Loggable<Logger::Id::config> {\npublic:\n  ServerSslSocketFactory(Envoy::Ssl::ServerContextConfigPtr config,\n                         Envoy::Ssl::ContextManager& manager, Stats::Scope& stats_scope,\n                         const std::vector<std::string>& server_names);\n\n  Network::TransportSocketPtr\n  createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override;\n  bool implementsSecureTransport() const override;\n\n  // Secret::SecretCallbacks\n  void onAddOrUpdateSecret() override;\n\nprivate:\n  Ssl::ContextManager& manager_;\n  Stats::Scope& stats_scope_;\n  SslSocketFactoryStats stats_;\n  Envoy::Ssl::ServerContextConfigPtr config_;\n  const std::vector<std::string> server_names_;\n  mutable absl::Mutex ssl_ctx_mu_;\n  Envoy::Ssl::ServerContextSharedPtr ssl_ctx_ ABSL_GUARDED_BY(ssl_ctx_mu_);\n};\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/utility.cc",
    "content": "#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"openssl/x509v3.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nnamespace {\n\nenum class CertName { Issuer, Subject };\n\n/**\n * Retrieves a name from a certificate and formats it as an RFC 2253 name.\n * @param cert the certificate.\n * @param desired_name the desired name (Issuer or Subject) to retrieve from the certificate.\n * @return std::string returns the desired name formatted as an RFC 2253 name.\n */\nstd::string getRFC2253NameFromCertificate(X509& cert, CertName desired_name) {\n  bssl::UniquePtr<BIO> buf(BIO_new(BIO_s_mem()));\n  RELEASE_ASSERT(buf != nullptr, \"\");\n\n  X509_NAME* name = nullptr;\n  switch (desired_name) {\n  case CertName::Issuer:\n    name = X509_get_issuer_name(&cert);\n    break;\n  case CertName::Subject:\n    name = X509_get_subject_name(&cert);\n    break;\n  }\n\n  // flags=XN_FLAG_RFC2253 is the documented parameter for single-line output in RFC 2253 format.\n  // Example from the RFC:\n  //   * Single value per Relative Distinguished Name (RDN): CN=Steve Kille,O=Isode Limited,C=GB\n  //   * Multivalue output in first RDN: OU=Sales+CN=J. Smith,O=Widget Inc.,C=US\n  //   * Quoted comma in Organization: CN=L. Eagle,O=Sue\\, Grabbit and Runn,C=GB\n  X509_NAME_print_ex(buf.get(), name, 0 /* indent */, XN_FLAG_RFC2253);\n\n  const uint8_t* data;\n  size_t data_len;\n  int rc = BIO_mem_contents(buf.get(), &data, &data_len);\n  ASSERT(rc == 1);\n  return std::string(reinterpret_cast<const char*>(data), data_len);\n}\n\n} // namespace\n\nconst ASN1_TIME& epochASN1_Time() {\n  static ASN1_TIME* e = []() -> ASN1_TIME* {\n    ASN1_TIME* epoch = ASN1_TIME_new();\n    const time_t epoch_time = 0;\n    RELEASE_ASSERT(ASN1_TIME_set(epoch, epoch_time) != nullptr, \"\");\n    return epoch;\n  }();\n  return *e;\n}\n\ninline bssl::UniquePtr<ASN1_TIME> currentASN1_Time(TimeSource& time_source) {\n  bssl::UniquePtr<ASN1_TIME> current_asn_time(ASN1_TIME_new());\n  const time_t current_time = std::chrono::system_clock::to_time_t(time_source.systemTime());\n  RELEASE_ASSERT(ASN1_TIME_set(current_asn_time.get(), current_time) != nullptr, \"\");\n  return current_asn_time;\n}\n\nstd::string Utility::getSerialNumberFromCertificate(X509& cert) {\n  ASN1_INTEGER* serial_number = X509_get_serialNumber(&cert);\n  BIGNUM num_bn;\n  BN_init(&num_bn);\n  ASN1_INTEGER_to_BN(serial_number, &num_bn);\n  char* char_serial_number = BN_bn2hex(&num_bn);\n  BN_free(&num_bn);\n  if (char_serial_number != nullptr) {\n    std::string serial_number(char_serial_number);\n    OPENSSL_free(char_serial_number);\n    return serial_number;\n  }\n  return \"\";\n}\n\nstd::vector<std::string> Utility::getSubjectAltNames(X509& cert, int type) {\n  std::vector<std::string> subject_alt_names;\n  bssl::UniquePtr<GENERAL_NAMES> san_names(\n      static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(&cert, NID_subject_alt_name, nullptr, nullptr)));\n  if (san_names == nullptr) {\n    return subject_alt_names;\n  }\n  for (const GENERAL_NAME* san : san_names.get()) {\n    if (san->type == type) {\n      subject_alt_names.push_back(generalNameAsString(san));\n    }\n  }\n  return subject_alt_names;\n}\n\nstd::string Utility::generalNameAsString(const GENERAL_NAME* general_name) {\n  std::string san;\n  switch (general_name->type) {\n  case GEN_DNS: {\n    ASN1_STRING* str = general_name->d.dNSName;\n    san.assign(reinterpret_cast<const char*>(ASN1_STRING_data(str)), ASN1_STRING_length(str));\n    break;\n  }\n  case GEN_URI: {\n    ASN1_STRING* str = general_name->d.uniformResourceIdentifier;\n    san.assign(reinterpret_cast<const char*>(ASN1_STRING_data(str)), ASN1_STRING_length(str));\n    break;\n  }\n  case GEN_IPADD: {\n    if (general_name->d.ip->length == 4) {\n      sockaddr_in sin;\n      sin.sin_port = 0;\n      sin.sin_family = AF_INET;\n      memcpy(&sin.sin_addr, general_name->d.ip->data, sizeof(sin.sin_addr));\n      Network::Address::Ipv4Instance addr(&sin);\n      san = addr.ip()->addressAsString();\n    } else if (general_name->d.ip->length == 16) {\n      sockaddr_in6 sin6;\n      sin6.sin6_port = 0;\n      sin6.sin6_family = AF_INET6;\n      memcpy(&sin6.sin6_addr, general_name->d.ip->data, sizeof(sin6.sin6_addr));\n      Network::Address::Ipv6Instance addr(sin6);\n      san = addr.ip()->addressAsString();\n    }\n    break;\n  }\n  }\n  return san;\n}\n\nstd::string Utility::getIssuerFromCertificate(X509& cert) {\n  return getRFC2253NameFromCertificate(cert, CertName::Issuer);\n}\n\nstd::string Utility::getSubjectFromCertificate(X509& cert) {\n  return getRFC2253NameFromCertificate(cert, CertName::Subject);\n}\n\nint32_t Utility::getDaysUntilExpiration(const X509* cert, TimeSource& time_source) {\n  if (cert == nullptr) {\n    return std::numeric_limits<int>::max();\n  }\n  int days, seconds;\n  if (ASN1_TIME_diff(&days, &seconds, currentASN1_Time(time_source).get(),\n                     X509_get0_notAfter(cert))) {\n    return days;\n  }\n  return 0;\n}\n\nabsl::string_view Utility::getCertificateExtensionValue(X509& cert,\n                                                        absl::string_view extension_name) {\n  bssl::UniquePtr<ASN1_OBJECT> oid(\n      OBJ_txt2obj(std::string(extension_name).c_str(), 1 /* don't search names */));\n  if (oid == nullptr) {\n    return {};\n  }\n\n  int pos = X509_get_ext_by_OBJ(&cert, oid.get(), -1);\n  if (pos < 0) {\n    return {};\n  }\n\n  X509_EXTENSION* extension = X509_get_ext(&cert, pos);\n  if (extension == nullptr) {\n    return {};\n  }\n\n  const ASN1_OCTET_STRING* octet_string = X509_EXTENSION_get_data(extension);\n  RELEASE_ASSERT(octet_string != nullptr, \"\");\n\n  // Return the entire DER-encoded value for this extension. Correct decoding depends on\n  // knowledge of the expected structure of the extension's value.\n  const unsigned char* octet_string_data = ASN1_STRING_get0_data(octet_string);\n  const int octet_string_length = ASN1_STRING_length(octet_string);\n\n  return {reinterpret_cast<const char*>(octet_string_data),\n          static_cast<absl::string_view::size_type>(octet_string_length)};\n}\n\nSystemTime Utility::getValidFrom(const X509& cert) {\n  int days, seconds;\n  int rc = ASN1_TIME_diff(&days, &seconds, &epochASN1_Time(), X509_get0_notBefore(&cert));\n  ASSERT(rc == 1);\n  // Casting to <time_t (64bit)> to prevent multiplication overflow when certificate valid-from date\n  // beyond 2038-01-19T03:14:08Z.\n  return std::chrono::system_clock::from_time_t(static_cast<time_t>(days) * 24 * 60 * 60 + seconds);\n}\n\nSystemTime Utility::getExpirationTime(const X509& cert) {\n  int days, seconds;\n  int rc = ASN1_TIME_diff(&days, &seconds, &epochASN1_Time(), X509_get0_notAfter(&cert));\n  ASSERT(rc == 1);\n  // Casting to <time_t (64bit)> to prevent multiplication overflow when certificate not-after date\n  // beyond 2038-01-19T03:14:08Z.\n  return std::chrono::system_clock::from_time_t(static_cast<time_t>(days) * 24 * 60 * 60 + seconds);\n}\n\nabsl::optional<std::string> Utility::getLastCryptoError() {\n  auto err = ERR_get_error();\n\n  if (err != 0) {\n    char errbuf[256];\n\n    ERR_error_string_n(err, errbuf, sizeof(errbuf));\n    return std::string(errbuf);\n  }\n\n  return absl::nullopt;\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/tls/utility.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"common/common/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"openssl/ssl.h\"\n#include \"openssl/x509v3.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace Utility {\n\n/**\n * Retrieves the serial number of a certificate.\n * @param cert the certificate\n * @return std::string the serial number field of the certificate. Returns \"\" if\n *         there is no serial number.\n */\nstd::string getSerialNumberFromCertificate(X509& cert);\n\n/**\n * Retrieves the subject alternate names of a certificate.\n * @param cert the certificate\n * @param type type of subject alternate name\n * @return std::vector returns the list of subject alternate names.\n */\nstd::vector<std::string> getSubjectAltNames(X509& cert, int type);\n\n/**\n * Converts the Subject Alternate Name to string.\n * @param general_name the subject alternate name\n * @return std::string returns the string representation of subject alt names.\n */\nstd::string generalNameAsString(const GENERAL_NAME* general_name);\n\n/**\n * Retrieves the issuer from certificate.\n * @param cert the certificate\n * @return std::string the issuer field for the certificate.\n */\nstd::string getIssuerFromCertificate(X509& cert);\n\n/**\n * Retrieves the subject from certificate.\n * @param cert the certificate\n * @return std::string the subject field for the certificate.\n */\nstd::string getSubjectFromCertificate(X509& cert);\n\n/**\n * Retrieves the value of a specific X509 extension from the cert, if present.\n * @param cert the certificate.\n * @param extension_name the name of the extension to extract in dotted number format\n * @return absl::string_view the DER-encoded value of the extension field or empty if not present.\n */\nabsl::string_view getCertificateExtensionValue(X509& cert, absl::string_view extension_name);\n\n/**\n * Returns the days until this certificate is valid.\n * @param cert the certificate\n * @param time_source the time source to use for current time calculation.\n * @return the number of days till this certificate is valid.\n */\nint32_t getDaysUntilExpiration(const X509* cert, TimeSource& time_source);\n\n/**\n * Returns the time from when this certificate is valid.\n * @param cert the certificate.\n * @return time from when this certificate is valid.\n */\nSystemTime getValidFrom(const X509& cert);\n\n/**\n * Returns the time when this certificate expires.\n * @param cert the certificate.\n * @return time after which the certificate expires.\n */\nSystemTime getExpirationTime(const X509& cert);\n\n/**\n * Returns the last crypto error from ERR_get_error(), or `absl::nullopt`\n * if the error stack is empty.\n * @return std::string error message\n */\nabsl::optional<std::string> getLastCryptoError();\n\n} // namespace Utility\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/transport_sockets/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\n\n/**\n * Well-known transport socket names.\n * NOTE: New transport sockets should use the well known name: envoy.transport_sockets.name.\n */\nclass TransportSocketNameValues {\npublic:\n  const std::string Alts = \"envoy.transport_sockets.alts\";\n  const std::string Quic = \"envoy.transport_sockets.quic\";\n  const std::string RawBuffer = \"envoy.transport_sockets.raw_buffer\";\n  const std::string Tap = \"envoy.transport_sockets.tap\";\n  const std::string Tls = \"envoy.transport_sockets.tls\";\n  const std::string UpstreamProxyProtocol = \"envoy.transport_sockets.upstream_proxy_protocol\";\n};\n\nusing TransportSocketNames = ConstSingleton<TransportSocketNameValues>;\n\n/**\n * Well-known transport protocol names.\n */\nclass TransportProtocolNameValues {\npublic:\n  const std::string Tls = \"tls\";\n  const std::string RawBuffer = \"raw_buffer\";\n  const std::string Quic = \"quic\";\n};\n\n// TODO(lizan): Find a better place to have this singleton.\nusing TransportProtocolNames = ConstSingleton<TransportProtocolNameValues>;\n\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/generic/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\n        \"config.cc\",\n    ],\n    hdrs = [\n        \"config.h\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//source/extensions/upstreams/http/http:upstream_request_lib\",\n        \"//source/extensions/upstreams/http/tcp:upstream_request_lib\",\n        \"@envoy_api//envoy/extensions/upstreams/http/generic/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/upstreams/http/generic/config.cc",
    "content": "#include \"extensions/upstreams/http/generic/config.h\"\n\n#include \"extensions/upstreams/http/http/upstream_request.h\"\n#include \"extensions/upstreams/http/tcp/upstream_request.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Generic {\n\nRouter::GenericConnPoolPtr GenericGenericConnPoolFactory::createGenericConnPool(\n    Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry,\n    absl::optional<Envoy::Http::Protocol> downstream_protocol,\n    Upstream::LoadBalancerContext* ctx) const {\n  if (is_connect) {\n    auto ret = std::make_unique<Upstreams::Http::Tcp::TcpConnPool>(cm, is_connect, route_entry,\n                                                                   downstream_protocol, ctx);\n    return (ret->valid() ? std::move(ret) : nullptr);\n  }\n  auto ret = std::make_unique<Upstreams::Http::Http::HttpConnPool>(cm, is_connect, route_entry,\n                                                                   downstream_protocol, ctx);\n  return (ret->valid() ? std::move(ret) : nullptr);\n}\n\nREGISTER_FACTORY(GenericGenericConnPoolFactory, Router::GenericConnPoolFactory);\n\n} // namespace Generic\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/generic/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Generic {\n\n/**\n * Config registration for the GenericConnPool. * @see Router::GenericConnPoolFactory\n */\nclass GenericGenericConnPoolFactory : public Router::GenericConnPoolFactory {\npublic:\n  std::string name() const override { return \"envoy.filters.connection_pools.http.generic\"; }\n  std::string category() const override { return \"envoy.upstreams\"; }\n  Router::GenericConnPoolPtr\n  createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect,\n                        const Router::RouteEntry& route_entry,\n                        absl::optional<Envoy::Http::Protocol> downstream_protocol,\n                        Upstream::LoadBalancerContext* ctx) const override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::upstreams::http::generic::v3::GenericConnectionPoolProto>();\n  }\n};\n\nDECLARE_FACTORY(GenericGenericConnPoolFactory);\n\n} // namespace Generic\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\n        \"config.cc\",\n    ],\n    hdrs = [\n        \"config.h\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":upstream_request_lib\",\n        \"@envoy_api//envoy/extensions/upstreams/http/http/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_request_lib\",\n    srcs = [\n        \"upstream_request.cc\",\n    ],\n    hdrs = [\n        \"upstream_request.h\",\n    ],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/network:application_protocol_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/router:router_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/upstreams/http/http/config.cc",
    "content": "#include \"extensions/upstreams/http/http/config.h\"\n\n#include \"extensions/upstreams/http/http/upstream_request.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Http {\n\nRouter::GenericConnPoolPtr HttpGenericConnPoolFactory::createGenericConnPool(\n    Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry,\n    absl::optional<Envoy::Http::Protocol> downstream_protocol,\n    Upstream::LoadBalancerContext* ctx) const {\n  auto ret = std::make_unique<HttpConnPool>(cm, is_connect, route_entry, downstream_protocol, ctx);\n  return (ret->valid() ? std::move(ret) : nullptr);\n}\n\nREGISTER_FACTORY(HttpGenericConnPoolFactory, Router::GenericConnPoolFactory);\n\n} // namespace Http\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/http/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/upstreams/http/http/v3/http_connection_pool.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Http {\n\n/**\n * Config registration for the HttpConnPool. @see Router::GenericConnPoolFactory\n */\nclass HttpGenericConnPoolFactory : public Router::GenericConnPoolFactory {\npublic:\n  std::string name() const override { return \"envoy.filters.connection_pools.http.http\"; }\n  std::string category() const override { return \"envoy.upstreams\"; }\n  Router::GenericConnPoolPtr\n  createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect,\n                        const Router::RouteEntry& route_entry,\n                        absl::optional<Envoy::Http::Protocol> downstream_protocol,\n                        Upstream::LoadBalancerContext* ctx) const override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<\n        envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto>();\n  }\n};\n\nDECLARE_FACTORY(HttpGenericConnPoolFactory);\n\n} // namespace Http\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/http/upstream_request.cc",
    "content": "#include \"extensions/upstreams/http/http/upstream_request.h\"\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/router/router.h\"\n\nusing Envoy::Router::GenericConnectionPoolCallbacks;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Http {\n\nvoid HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) {\n  callbacks_ = callbacks;\n  // It's possible for a reset to happen inline within the newStream() call. In this case, we\n  // might get deleted inline as well. Only write the returned handle out if it is not nullptr to\n  // deal with this case.\n  Envoy::Http::ConnectionPool::Cancellable* handle =\n      conn_pool_->newStream(callbacks->upstreamToDownstream(), *this);\n  if (handle) {\n    conn_pool_stream_handle_ = handle;\n  }\n}\n\nbool HttpConnPool::cancelAnyPendingStream() {\n  if (conn_pool_stream_handle_) {\n    conn_pool_stream_handle_->cancel(ConnectionPool::CancelPolicy::Default);\n    conn_pool_stream_handle_ = nullptr;\n    return true;\n  }\n  return false;\n}\n\nabsl::optional<Envoy::Http::Protocol> HttpConnPool::protocol() const {\n  return conn_pool_->protocol();\n}\n\nvoid HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                                 absl::string_view transport_failure_reason,\n                                 Upstream::HostDescriptionConstSharedPtr host) {\n  callbacks_->onPoolFailure(reason, transport_failure_reason, host);\n}\n\nvoid HttpConnPool::onPoolReady(Envoy::Http::RequestEncoder& request_encoder,\n                               Upstream::HostDescriptionConstSharedPtr host,\n                               const StreamInfo::StreamInfo& info) {\n  conn_pool_stream_handle_ = nullptr;\n  auto upstream =\n      std::make_unique<HttpUpstream>(callbacks_->upstreamToDownstream(), &request_encoder);\n  callbacks_->onPoolReady(std::move(upstream), host,\n                          request_encoder.getStream().connectionLocalAddress(), info);\n}\n\n} // namespace Http\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/http/upstream_request.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/conn_pool.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/router/upstream_request.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Http {\n\nclass HttpConnPool : public Router::GenericConnPool, public Envoy::Http::ConnectionPool::Callbacks {\npublic:\n  // GenericConnPool\n  HttpConnPool(Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry,\n               absl::optional<Envoy::Http::Protocol> downstream_protocol,\n               Upstream::LoadBalancerContext* ctx) {\n    ASSERT(!is_connect);\n    conn_pool_ = cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(),\n                                           downstream_protocol, ctx);\n  }\n  void newStream(Router::GenericConnectionPoolCallbacks* callbacks) override;\n  bool cancelAnyPendingStream() override;\n  absl::optional<Envoy::Http::Protocol> protocol() const override;\n\n  // Http::ConnectionPool::Callbacks\n  void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                     absl::string_view transport_failure_reason,\n                     Upstream::HostDescriptionConstSharedPtr host) override;\n  void onPoolReady(Envoy::Http::RequestEncoder& callbacks_encoder,\n                   Upstream::HostDescriptionConstSharedPtr host,\n                   const StreamInfo::StreamInfo& info) override;\n  Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); }\n\n  bool valid() { return conn_pool_ != nullptr; }\n\nprivate:\n  // Points to the actual connection pool to create streams from.\n  Envoy::Http::ConnectionPool::Instance* conn_pool_{};\n  Envoy::Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{};\n  Router::GenericConnectionPoolCallbacks* callbacks_{};\n};\n\nclass HttpUpstream : public Router::GenericUpstream, public Envoy::Http::StreamCallbacks {\npublic:\n  HttpUpstream(Router::UpstreamToDownstream& upstream_request, Envoy::Http::RequestEncoder* encoder)\n      : upstream_request_(upstream_request), request_encoder_(encoder) {\n    request_encoder_->getStream().addCallbacks(*this);\n  }\n\n  // GenericUpstream\n  void encodeData(Buffer::Instance& data, bool end_stream) override {\n    request_encoder_->encodeData(data, end_stream);\n  }\n  void encodeMetadata(const Envoy::Http::MetadataMapVector& metadata_map_vector) override {\n    request_encoder_->encodeMetadata(metadata_map_vector);\n  }\n  void encodeHeaders(const Envoy::Http::RequestHeaderMap& headers, bool end_stream) override {\n    request_encoder_->encodeHeaders(headers, end_stream);\n  }\n  void encodeTrailers(const Envoy::Http::RequestTrailerMap& trailers) override {\n    request_encoder_->encodeTrailers(trailers);\n  }\n\n  void readDisable(bool disable) override { request_encoder_->getStream().readDisable(disable); }\n\n  void resetStream() override {\n    request_encoder_->getStream().removeCallbacks(*this);\n    request_encoder_->getStream().resetStream(Envoy::Http::StreamResetReason::LocalReset);\n  }\n\n  // Http::StreamCallbacks\n  void onResetStream(Envoy::Http::StreamResetReason reason,\n                     absl::string_view transport_failure_reason) override {\n    upstream_request_.onResetStream(reason, transport_failure_reason);\n  }\n\n  void onAboveWriteBufferHighWatermark() override {\n    upstream_request_.onAboveWriteBufferHighWatermark();\n  }\n\n  void onBelowWriteBufferLowWatermark() override {\n    upstream_request_.onBelowWriteBufferLowWatermark();\n  }\n\nprivate:\n  Router::UpstreamToDownstream& upstream_request_;\n  Envoy::Http::RequestEncoder* request_encoder_{};\n};\n\n} // namespace Http\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/tcp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\n        \"config.cc\",\n    ],\n    hdrs = [\n        \"config.h\",\n    ],\n    security_posture = \"robust_to_untrusted_downstream\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":upstream_request_lib\",\n        \"@envoy_api//envoy/extensions/upstreams/http/tcp/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"upstream_request_lib\",\n    srcs = [\n        \"upstream_request.cc\",\n    ],\n    hdrs = [\n        \"upstream_request.h\",\n    ],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/network:application_protocol_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/router:router_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/upstreams/http/tcp/config.cc",
    "content": "#include \"extensions/upstreams/http/tcp/config.h\"\n\n#include \"extensions/upstreams/http/tcp/upstream_request.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Tcp {\n\nRouter::GenericConnPoolPtr TcpGenericConnPoolFactory::createGenericConnPool(\n    Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry,\n    absl::optional<Envoy::Http::Protocol> downstream_protocol,\n    Upstream::LoadBalancerContext* ctx) const {\n  auto ret = std::make_unique<TcpConnPool>(cm, is_connect, route_entry, downstream_protocol, ctx);\n  return (ret->valid() ? std::move(ret) : nullptr);\n}\n\nREGISTER_FACTORY(TcpGenericConnPoolFactory, Router::GenericConnPoolFactory);\n\n} // namespace Tcp\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/tcp/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Tcp {\n\n/**\n * Config registration for the TcpConnPool. @see Router::GenericConnPoolFactory\n */\nclass TcpGenericConnPoolFactory : public Router::GenericConnPoolFactory {\npublic:\n  std::string name() const override { return \"envoy.filters.connection_pools.http.tcp\"; }\n  std::string category() const override { return \"envoy.upstreams\"; }\n  Router::GenericConnPoolPtr\n  createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect,\n                        const Router::RouteEntry& route_entry,\n                        absl::optional<Envoy::Http::Protocol> downstream_protocol,\n                        Upstream::LoadBalancerContext* ctx) const override;\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<envoy::extensions::upstreams::http::tcp::v3::TcpConnectionPoolProto>();\n  }\n};\n\nDECLARE_FACTORY(TcpGenericConnPoolFactory);\n\n} // namespace Tcp\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/tcp/upstream_request.cc",
    "content": "#include \"extensions/upstreams/http/tcp/upstream_request.h\"\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/router/router.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Tcp {\n\nvoid TcpConnPool::onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,\n                              Upstream::HostDescriptionConstSharedPtr host) {\n  upstream_handle_ = nullptr;\n  Network::Connection& latched_conn = conn_data->connection();\n  auto upstream =\n      std::make_unique<TcpUpstream>(&callbacks_->upstreamToDownstream(), std::move(conn_data));\n  callbacks_->onPoolReady(std::move(upstream), host, latched_conn.localAddress(),\n                          latched_conn.streamInfo());\n}\n\nTcpUpstream::TcpUpstream(Router::UpstreamToDownstream* upstream_request,\n                         Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream)\n    : upstream_request_(upstream_request), upstream_conn_data_(std::move(upstream)) {\n  upstream_conn_data_->connection().enableHalfClose(true);\n  upstream_conn_data_->addUpstreamCallbacks(*this);\n}\n\nvoid TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) {\n  upstream_conn_data_->connection().write(data, end_stream);\n}\n\nvoid TcpUpstream::encodeHeaders(const Envoy::Http::RequestHeaderMap&, bool end_stream) {\n  // Headers should only happen once, so use this opportunity to add the proxy\n  // proto header, if configured.\n  ASSERT(upstream_request_->routeEntry().connectConfig().has_value());\n  Buffer::OwnedImpl data;\n  auto& connect_config = upstream_request_->routeEntry().connectConfig().value();\n  if (connect_config.has_proxy_protocol_config()) {\n    Extensions::Common::ProxyProtocol::generateProxyProtoHeader(\n        connect_config.proxy_protocol_config(), upstream_request_->connection(), data);\n  }\n\n  if (data.length() != 0 || end_stream) {\n    upstream_conn_data_->connection().write(data, end_stream);\n  }\n\n  // TcpUpstream::encodeHeaders is called after the UpstreamRequest is fully initialized. Also use\n  // this time to synthesize the 200 response headers downstream to complete the CONNECT handshake.\n  Envoy::Http::ResponseHeaderMapPtr headers{\n      Envoy::Http::createHeaderMap<Envoy::Http::ResponseHeaderMapImpl>(\n          {{Envoy::Http::Headers::get().Status, \"200\"}})};\n  upstream_request_->decodeHeaders(std::move(headers), false);\n}\n\nvoid TcpUpstream::encodeTrailers(const Envoy::Http::RequestTrailerMap&) {\n  Buffer::OwnedImpl data;\n  upstream_conn_data_->connection().write(data, true);\n}\n\nvoid TcpUpstream::readDisable(bool disable) {\n  if (upstream_conn_data_->connection().state() != Network::Connection::State::Open) {\n    return;\n  }\n  upstream_conn_data_->connection().readDisable(disable);\n}\n\nvoid TcpUpstream::resetStream() {\n  upstream_request_ = nullptr;\n  upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush);\n}\n\nvoid TcpUpstream::onUpstreamData(Buffer::Instance& data, bool end_stream) {\n  upstream_request_->decodeData(data, end_stream);\n}\n\nvoid TcpUpstream::onEvent(Network::ConnectionEvent event) {\n  if (event != Network::ConnectionEvent::Connected && upstream_request_) {\n    upstream_request_->onResetStream(Envoy::Http::StreamResetReason::ConnectionTermination, \"\");\n  }\n}\n\nvoid TcpUpstream::onAboveWriteBufferHighWatermark() {\n  if (upstream_request_) {\n    upstream_request_->onAboveWriteBufferHighWatermark();\n  }\n}\n\nvoid TcpUpstream::onBelowWriteBufferLowWatermark() {\n  if (upstream_request_) {\n    upstream_request_->onBelowWriteBufferLowWatermark();\n  }\n}\n\n} // namespace Tcp\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/upstreams/http/tcp/upstream_request.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/http/codec.h\"\n#include \"envoy/tcp/conn_pool.h\"\n\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/router/upstream_request.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Tcp {\n\nclass TcpConnPool : public Router::GenericConnPool, public Envoy::Tcp::ConnectionPool::Callbacks {\npublic:\n  TcpConnPool(Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry,\n              absl::optional<Envoy::Http::Protocol>, Upstream::LoadBalancerContext* ctx) {\n    ASSERT(is_connect);\n    conn_pool_ = cm.tcpConnPoolForCluster(route_entry.clusterName(),\n                                          Upstream::ResourcePriority::Default, ctx);\n  }\n  void newStream(Router::GenericConnectionPoolCallbacks* callbacks) override {\n    callbacks_ = callbacks;\n    upstream_handle_ = conn_pool_->newConnection(*this);\n  }\n\n  bool cancelAnyPendingStream() override {\n    if (upstream_handle_) {\n      upstream_handle_->cancel(Envoy::Tcp::ConnectionPool::CancelPolicy::Default);\n      upstream_handle_ = nullptr;\n      return true;\n    }\n    return false;\n  }\n  absl::optional<Envoy::Http::Protocol> protocol() const override { return absl::nullopt; }\n  Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); }\n\n  bool valid() { return conn_pool_ != nullptr; }\n\n  // Tcp::ConnectionPool::Callbacks\n  void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                     Upstream::HostDescriptionConstSharedPtr host) override {\n    upstream_handle_ = nullptr;\n    callbacks_->onPoolFailure(reason, \"\", host);\n  }\n\n  void onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,\n                   Upstream::HostDescriptionConstSharedPtr host) override;\n\nprivate:\n  Envoy::Tcp::ConnectionPool::Instance* conn_pool_;\n  Envoy::Tcp::ConnectionPool::Cancellable* upstream_handle_{};\n  Router::GenericConnectionPoolCallbacks* callbacks_{};\n};\n\nclass TcpUpstream : public Router::GenericUpstream,\n                    public Envoy::Tcp::ConnectionPool::UpstreamCallbacks {\npublic:\n  TcpUpstream(Router::UpstreamToDownstream* upstream_request,\n              Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream);\n\n  // GenericUpstream\n  void encodeData(Buffer::Instance& data, bool end_stream) override;\n  void encodeMetadata(const Envoy::Http::MetadataMapVector&) override {}\n  void encodeHeaders(const Envoy::Http::RequestHeaderMap&, bool end_stream) override;\n  void encodeTrailers(const Envoy::Http::RequestTrailerMap&) override;\n  void readDisable(bool disable) override;\n  void resetStream() override;\n\n  // Tcp::ConnectionPool::UpstreamCallbacks\n  void onUpstreamData(Buffer::Instance& data, bool end_stream) override;\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override;\n  void onBelowWriteBufferLowWatermark() override;\n\nprivate:\n  Router::UpstreamToDownstream* upstream_request_;\n  Envoy::Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_;\n};\n\n} // namespace Tcp\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/abort_action/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"abort_action_lib\",\n    srcs = [\"abort_action.cc\"],\n    hdrs = [\"abort_action.h\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/server:guarddog_config_interface\",\n        \"//include/envoy/thread:thread_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    status = \"alpha\",\n    deps = [\n        \":abort_action_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/watchdog/abort_action/abort_action.cc",
    "content": "#include \"extensions/watchdog/abort_action/abort_action.h\"\n\n#include <sys/types.h>\n\n#include <csignal>\n\n#include \"envoy/thread/thread.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace AbortAction {\nnamespace {\n#ifdef __linux__\npid_t toPlatformTid(int64_t tid) { return static_cast<pid_t>(tid); }\n#elif defined(__APPLE__)\nuint64_t toPlatformTid(int64_t tid) { return static_cast<uint64_t>(tid); }\n#endif\n} // namespace\n\nAbortAction::AbortAction(\n    envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig& config,\n    Server::Configuration::GuardDogActionFactoryContext& /*context*/)\n    : config_(config){};\n\nvoid AbortAction::run(\n    envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/,\n    const std::vector<std::pair<Thread::ThreadId, MonotonicTime>>& thread_last_checkin_pairs,\n    MonotonicTime /*now*/) {\n\n  if (thread_last_checkin_pairs.empty()) {\n    ENVOY_LOG_MISC(warn, \"Watchdog AbortAction called without any thread.\");\n    return;\n  }\n\n  // The following lines of code won't be considered covered by code coverage\n  // tools since they would run in DEATH tests.\n  int64_t raw_tid = thread_last_checkin_pairs[0].first.getId();\n\n  // Assume POSIX-compatible system and signal to the thread.\n  ENVOY_LOG_MISC(error, \"Watchdog AbortAction sending abort signal to thread with tid {}.\",\n                 raw_tid);\n\n  if (kill(toPlatformTid(raw_tid), SIGABRT) == 0) {\n    // Successfully sent signal, sleep for wait_duration.\n    absl::SleepFor(absl::Milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config_, wait_duration, 0)));\n  } else {\n    // Failed to send the signal, abort?\n    ENVOY_LOG_MISC(error, \"Failed to send signal to tid {}\", raw_tid);\n  }\n\n  // Abort from the action since the signaled thread hasn't yet crashed the process.\n  // panicing in the action gives flexibility since it doesn't depend on\n  // external code to kill the process if the signal fails.\n  PANIC(fmt::format(\"Failed to kill thread with id {}, aborting from Watchdog AbortAction instead.\",\n                    raw_tid));\n}\n\n} // namespace AbortAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/abort_action/abort_action.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h\"\n#include \"envoy/server/guarddog_config.h\"\n#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace AbortAction {\n\n/**\n * A GuardDogAction that will terminate the process by sending SIGABRT to the\n * stuck thread. This is currently only implemented for systems that\n * support kill to send signals.\n */\nclass AbortAction : public Server::Configuration::GuardDogAction {\npublic:\n  AbortAction(envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig& config,\n              Server::Configuration::GuardDogActionFactoryContext& context);\n\n  void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event,\n           const std::vector<std::pair<Thread::ThreadId, MonotonicTime>>& thread_last_checkin_pairs,\n           MonotonicTime now) override;\n\nprivate:\n  const envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig config_;\n};\n\nusing AbortActionPtr = std::unique_ptr<AbortAction>;\n\n} // namespace AbortAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/abort_action/config.cc",
    "content": "#include \"extensions/watchdog/abort_action/config.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n\n#include \"extensions/watchdog/abort_action/abort_action.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace AbortAction {\n\nServer::Configuration::GuardDogActionPtr AbortActionFactory::createGuardDogActionFromProto(\n    const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config,\n    Server::Configuration::GuardDogActionFactoryContext& context) {\n  auto message = createEmptyConfigProto();\n  Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufWkt::Struct(),\n                                         ProtobufMessage::getStrictValidationVisitor(), *message);\n  return std::make_unique<AbortAction>(dynamic_cast<AbortActionConfig&>(*message), context);\n}\n\n/**\n * Static registration for the fixed heap resource monitor factory. @see RegistryFactory.\n */\nREGISTER_FACTORY(AbortActionFactory, Server::Configuration::GuardDogActionFactory);\n\n} // namespace AbortAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/abort_action/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h\"\n#include \"envoy/server/guarddog_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace AbortAction {\n\nclass AbortActionFactory : public Server::Configuration::GuardDogActionFactory {\npublic:\n  AbortActionFactory() = default;\n\n  Server::Configuration::GuardDogActionPtr createGuardDogActionFromProto(\n      const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config,\n      Server::Configuration::GuardDogActionFactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<AbortActionConfig>();\n  }\n\n  std::string name() const override { return \"envoy.watchdog.abort_action\"; }\n\n  using AbortActionConfig = envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig;\n};\n\n} // namespace AbortAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/profile_action/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_extension\",\n    \"envoy_cc_library\",\n    \"envoy_extension_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_extension_package()\n\nenvoy_cc_library(\n    name = \"profile_action_lib\",\n    srcs = [\"profile_action.cc\"],\n    hdrs = [\"profile_action.h\"],\n    external_deps = [\n        \"abseil_optional\",\n    ],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/server:guarddog_config_interface\",\n        \"//include/envoy/thread:thread_interface\",\n        \"//source/common/profiler:profiler_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_extension(\n    name = \"config\",\n    srcs = [\"config.cc\"],\n    hdrs = [\"config.h\"],\n    security_posture = \"data_plane_agnostic\",\n    status = \"alpha\",\n    deps = [\n        \":profile_action_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/extensions/watchdog/profile_action/config.cc",
    "content": "#include \"extensions/watchdog/profile_action/config.h\"\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n\n#include \"extensions/watchdog/profile_action/profile_action.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace ProfileAction {\n\nServer::Configuration::GuardDogActionPtr ProfileActionFactory::createGuardDogActionFromProto(\n    const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config,\n    Server::Configuration::GuardDogActionFactoryContext& context) {\n  auto message = createEmptyConfigProto();\n  Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufWkt::Struct(),\n                                         ProtobufMessage::getStrictValidationVisitor(), *message);\n  return std::make_unique<ProfileAction>(dynamic_cast<ProfileActionConfig&>(*message), context);\n}\n\n/**\n * Static registration for the ProfileAction factory. @see RegistryFactory.\n */\nREGISTER_FACTORY(ProfileActionFactory, Server::Configuration::GuardDogActionFactory);\n\n} // namespace ProfileAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/profile_action/config.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h\"\n#include \"envoy/server/guarddog_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace ProfileAction {\n\nclass ProfileActionFactory : public Server::Configuration::GuardDogActionFactory {\npublic:\n  ProfileActionFactory() = default;\n\n  Server::Configuration::GuardDogActionPtr createGuardDogActionFromProto(\n      const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config,\n      Server::Configuration::GuardDogActionFactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ProfileActionConfig>();\n  }\n\n  std::string name() const override { return \"envoy.watchdog.profile_action\"; }\n\nprivate:\n  using ProfileActionConfig =\n      envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig;\n};\n\n} // namespace ProfileAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/profile_action/profile_action.cc",
    "content": "#include \"extensions/watchdog/profile_action/profile_action.h\"\n\n#include <chrono>\n\n#include \"envoy/thread/thread.h\"\n\n#include \"common/profiler/profiler.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"absl/strings/str_format.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace ProfileAction {\nnamespace {\nstatic constexpr uint64_t DefaultMaxProfiles = 10;\n\nstd::string generateProfileFilePath(const std::string& directory, const SystemTime& now) {\n  auto timestamp = std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch()).count();\n  if (absl::EndsWith(directory, \"/\")) {\n    return absl::StrFormat(\"%s%s.%d\", directory, \"ProfileAction\", timestamp);\n  }\n  return absl::StrFormat(\"%s/%s.%d\", directory, \"ProfileAction\", timestamp);\n}\n} // namespace\n\nProfileAction::ProfileAction(\n    envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig& config,\n    Server::Configuration::GuardDogActionFactoryContext& context)\n    : path_(config.profile_path()),\n      duration_(\n          std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, profile_duration, 5000))),\n      max_profiles_(config.max_profiles() == 0 ? DefaultMaxProfiles : config.max_profiles()),\n      profiles_attempted_(context.stats_.counterFromStatName(\n          Stats::StatNameManagedStorage(\n              absl::StrCat(context.guarddog_name_, \".profile_action.attempted\"),\n              context.stats_.symbolTable())\n              .statName())),\n      profiles_successfully_captured_(context.stats_.counterFromStatName(\n          Stats::StatNameManagedStorage(\n              absl::StrCat(context.guarddog_name_, \".profile_action.successfully_captured\"),\n              context.stats_.symbolTable())\n              .statName())),\n      context_(context), timer_cb_(context_.dispatcher_.createTimer([this] {\n        if (Profiler::Cpu::profilerEnabled()) {\n          Profiler::Cpu::stopProfiler();\n          running_profile_ = false;\n        } else {\n          ENVOY_LOG_MISC(error,\n                         \"Profile Action's stop() was scheduled, but profiler isn't running!\");\n          return;\n        }\n\n        if (!context_.api_.fileSystem().fileExists(profile_filename_)) {\n          ENVOY_LOG_MISC(error, \"Profile file {} wasn't created!\", profile_filename_);\n        } else {\n          profiles_successfully_captured_.inc();\n        }\n      })) {}\n\nvoid ProfileAction::run(\n    envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/,\n    const std::vector<std::pair<Thread::ThreadId, MonotonicTime>>& thread_last_checkin_pairs,\n    MonotonicTime /*now*/) {\n  if (running_profile_) {\n    return;\n  }\n  profiles_attempted_.inc();\n\n  // Check if there's a tid that justifies profiling\n  if (thread_last_checkin_pairs.empty()) {\n    ENVOY_LOG_MISC(warn, \"Profile Action: No tids were provided.\");\n    return;\n  }\n\n  if (profiles_started_ >= max_profiles_) {\n    ENVOY_LOG_MISC(warn,\n                   \"Profile Action: Unable to profile: enabled but already wrote {} profiles.\",\n                   profiles_started_);\n    return;\n  }\n\n  auto& fs = context_.api_.fileSystem();\n  if (!fs.directoryExists(path_)) {\n    ENVOY_LOG_MISC(error, \"Profile Action: Directory path {} doesn't exist.\", path_);\n    return;\n  }\n\n  // Generate file path for output and try to profile\n  profile_filename_ = generateProfileFilePath(path_, context_.api_.timeSource().systemTime());\n\n  if (!Profiler::Cpu::profilerEnabled()) {\n    if (Profiler::Cpu::startProfiler(profile_filename_)) {\n      // Update state\n      running_profile_ = true;\n      ++profiles_started_;\n\n      // Schedule callback to stop\n      timer_cb_->enableTimer(duration_);\n    } else {\n      ENVOY_LOG_MISC(error, \"Profile Action failed to start the profiler.\");\n    }\n  } else {\n    ENVOY_LOG_MISC(error, \"Profile Action unable to start the profiler as it is in use elsewhere.\");\n  }\n}\n\n} // namespace ProfileAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/extensions/watchdog/profile_action/profile_action.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h\"\n#include \"envoy/server/guarddog_config.h\"\n#include \"envoy/thread/thread.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace ProfileAction {\n\n/**\n * A GuardDogAction that will start CPU profiling.\n */\nclass ProfileAction : public Server::Configuration::GuardDogAction {\npublic:\n  ProfileAction(envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig& config,\n                Server::Configuration::GuardDogActionFactoryContext& context);\n\n  void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event,\n           const std::vector<std::pair<Thread::ThreadId, MonotonicTime>>& thread_last_checkin_pairs,\n           MonotonicTime now) override;\n\nprivate:\n  const std::string path_;\n  const std::chrono::milliseconds duration_;\n  const uint64_t max_profiles_;\n  bool running_profile_ = false;\n  std::string profile_filename_;\n  Stats::Counter& profiles_attempted_;\n  Stats::Counter& profiles_successfully_captured_;\n  uint64_t profiles_started_ = 0;\n  Server::Configuration::GuardDogActionFactoryContext& context_;\n  Event::TimerPtr timer_cb_;\n};\n\nusing ProfileActionPtr = std::unique_ptr<ProfileAction>;\n\n} // namespace ProfileAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n    \"envoy_select_hot_restart\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"backtrace_lib\",\n    srcs = [\"backtrace.cc\"],\n    hdrs = [\"backtrace.h\"],\n    external_deps = [\n        \"abseil_stacktrace\",\n        \"abseil_symbolize\",\n    ],\n    tags = [\"backtrace\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/version:version_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"configuration_lib\",\n    srcs = [\"configuration_impl.cc\"],\n    hdrs = [\"configuration_impl.h\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:configuration_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/server:tracer_config_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:runtime_utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/network:default_socket_interface_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/network:socket_interface_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"connection_handler_lib\",\n    srcs = [\"connection_handler_impl.cc\"],\n    hdrs = [\"connection_handler_impl.h\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:connection_handler_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:exception_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//include/envoy/server:active_udp_listener_config_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:non_copyable\",\n        \"//source/common/event:deferred_task\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"drain_manager_lib\",\n    srcs = [\"drain_manager_impl.cc\"],\n    hdrs = [\n        \"drain_manager_impl.h\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/server:drain_manager_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"guarddog_lib\",\n    srcs = [\"guarddog_impl.cc\"],\n    hdrs = [\"guarddog_impl.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":watchdog_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/server:configuration_interface\",\n        \"//include/envoy/server:guarddog_config_interface\",\n        \"//include/envoy/server:guarddog_interface\",\n        \"//include/envoy/server:watchdog_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread:thread_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/event:libevent_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"hot_restart\",\n    srcs = [\"hot_restart.proto\"],\n)\n\nenvoy_cc_library(\n    name = \"hot_restarting_base\",\n    srcs = envoy_select_hot_restart([\"hot_restarting_base.cc\"]),\n    hdrs = envoy_select_hot_restart([\"hot_restarting_base.h\"]),\n    deps = [\n        \":hot_restart_cc_proto\",\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/server:hot_restart_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/server:options_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hot_restarting_child\",\n    srcs = envoy_select_hot_restart([\"hot_restarting_child.cc\"]),\n    hdrs = envoy_select_hot_restart([\"hot_restarting_child.h\"]),\n    deps = [\n        \":hot_restarting_base\",\n        \"//source/common/stats:stat_merger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hot_restarting_parent\",\n    srcs = envoy_select_hot_restart([\"hot_restarting_parent.cc\"]),\n    hdrs = envoy_select_hot_restart([\"hot_restarting_parent.h\"]),\n    deps = [\n        \":hot_restarting_base\",\n        \":listener_manager_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/stats:stat_merger_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hot_restart_lib\",\n    srcs = envoy_select_hot_restart([\"hot_restart_impl.cc\"]),\n    hdrs = envoy_select_hot_restart([\"hot_restart_impl.h\"]),\n    deps = [\n        \":hot_restarting_child\",\n        \":hot_restarting_parent\",\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/server:hot_restart_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/server:options_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/stats:allocator_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"hot_restart_nop_lib\",\n    hdrs = [\"hot_restart_nop_impl.h\"],\n    deps = [\n        \"//include/envoy/server:hot_restart_interface\",\n        \"//source/common/stats:allocator_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"options_lib\",\n    srcs = [\"options_impl.cc\"] + select({\n        \"//bazel:linux_x86_64\": [\"options_impl_platform_linux.cc\"],\n        \"//bazel:linux_aarch64\": [\"options_impl_platform_linux.cc\"],\n        \"//bazel:linux_ppc\": [\"options_impl_platform_linux.cc\"],\n        \"//bazel:linux_mips64\": [\"options_impl_platform_linux.cc\"],\n        \"//conditions:default\": [\"options_impl_platform_default.cc\"],\n    }),\n    hdrs = [\n        \"options_impl.h\",\n        \"options_impl_platform.h\",\n    ] + select({\n        \"//bazel:linux_x86_64\": [\"options_impl_platform_linux.h\"],\n        \"//bazel:linux_aarch64\": [\"options_impl_platform_linux.h\"],\n        \"//bazel:linux_ppc\": [\"options_impl_platform_linux.h\"],\n        \"//bazel:linux_mips64\": [\"options_impl_platform_linux.h\"],\n        \"//conditions:default\": [],\n    }),\n    # TCLAP command line parser needs this to support int64_t/uint64_t in several build environments.\n    copts = [\"-DHAVE_LONG_LONG\"],\n    external_deps = [\"tclap\"],\n    deps = [\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:options_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/version:version_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"overload_manager_lib\",\n    srcs = [\"overload_manager_impl.cc\"],\n    hdrs = [\"overload_manager_impl.h\"],\n    deps = [\n        \"//include/envoy/server:overload_manager_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/server:resource_monitor_config_lib\",\n        \"@envoy_api//envoy/config/overload/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"lds_api_lib\",\n    srcs = [\"lds_api.cc\"],\n    hdrs = [\"lds_api.h\"],\n    deps = [\n        \"//include/envoy/config:subscription_factory_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:subscription_base_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\n# TODO(junr03): actually separate this lib from the listener and api listener lib.\n# this can be done if the parent_ in the listener and the api listener becomes the ListenerManager interface.\n# the issue right now is that the listener's reach into the listener manager's server_ instance variable.\nenvoy_cc_library(\n    name = \"listener_manager_lib\",\n    srcs = [\n        \"api_listener_impl.cc\",\n        \"listener_impl.cc\",\n        \"listener_manager_impl.cc\",\n    ],\n    hdrs = [\n        \"api_listener_impl.h\",\n        \"listener_impl.h\",\n        \"listener_manager_impl.h\",\n    ],\n    deps = [\n        \":configuration_lib\",\n        \":drain_manager_lib\",\n        \":filter_chain_manager_lib\",\n        \":lds_api_lib\",\n        \":transport_socket_config_lib\",\n        \":well_known_names_lib\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:udp_packet_writer_config_interface\",\n        \"//include/envoy/server:active_udp_listener_config_interface\",\n        \"//include/envoy/server:api_listener_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/server:worker_interface\",\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/common/common:basic_resource_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/http:conn_manager_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/network:connection_balancer_lib\",\n        \"//source/common/network:filter_matcher_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//source/extensions/transport_sockets:well_known_names\",\n        \"//source/extensions/upstreams/http/generic:config\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/listener:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_chain_manager_lib\",\n    srcs = [\"filter_chain_manager_impl.cc\"],\n    hdrs = [\"filter_chain_manager_impl.h\"],\n    deps = [\n        \":filter_chain_factory_context_callback\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/network:cidr_range_lib\",\n        \"//source/common/network:lc_trie_lib\",\n        \"//source/server:configuration_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"process_context_lib\",\n    hdrs = [\"process_context_impl.h\"],\n    deps = [\"//include/envoy/server:process_context_interface\"],\n)\n\nenvoy_cc_library(\n    name = \"proto_descriptors_lib\",\n    srcs = [\"proto_descriptors.cc\"],\n    hdrs = [\"proto_descriptors.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/protobuf\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"resource_monitor_config_lib\",\n    hdrs = [\"resource_monitor_config_impl.h\"],\n    deps = [\n        \"//include/envoy/server:resource_monitor_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"server_lib\",\n    srcs = [\"server.cc\"],\n    hdrs = [\"server.h\"],\n    external_deps = [\n        \"abseil_node_hash_map\",\n        \"abseil_optional\",\n    ],\n    deps = [\n        \":active_raw_udp_listener_config\",\n        \":configuration_lib\",\n        \":connection_handler_lib\",\n        \":guarddog_lib\",\n        \":listener_hooks_lib\",\n        \":listener_manager_lib\",\n        \":ssl_context_manager_lib\",\n        \":worker_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:signal_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/server:bootstrap_extension_config_interface\",\n        \"//include/envoy/server:drain_manager_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//include/envoy/server:options_interface\",\n        \"//include/envoy/server:process_context_interface\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/access_log:access_log_manager_lib\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/common:cleanup_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:mutex_tracer_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/grpc:async_client_manager_lib\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/init:manager_lib\",\n        \"//source/common/local_info:local_info_lib\",\n        \"//source/common/memory:heap_shrinker_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/router:rds_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/secret:secret_manager_impl_lib\",\n        \"//source/common/singleton:manager_impl_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/common/upstream:cluster_manager_lib\",\n        \"//source/common/upstream:health_discovery_service_lib\",\n        \"//source/common/version:version_lib\",\n        \"//source/server:overload_manager_lib\",\n        \"//source/server/admin:admin_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"ssl_context_manager_lib\",\n    srcs = [\"ssl_context_manager.cc\"],\n    hdrs = [\"ssl_context_manager.h\"],\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/ssl:context_manager_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"listener_hooks_lib\",\n    hdrs = [\"listener_hooks.h\"],\n)\n\nenvoy_cc_library(\n    name = \"watchdog_lib\",\n    srcs = [\"watchdog_impl.cc\"],\n    hdrs = [\"watchdog_impl.h\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/server:watchdog_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"worker_lib\",\n    srcs = [\"worker_impl.cc\"],\n    hdrs = [\"worker_impl.h\"],\n    deps = [\n        \":connection_handler_lib\",\n        \":listener_hooks_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:exception_interface\",\n        \"//include/envoy/server:configuration_interface\",\n        \"//include/envoy/server:guarddog_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//include/envoy/server:worker_interface\",\n        \"//include/envoy/thread:thread_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"transport_socket_config_lib\",\n    hdrs = [\"transport_socket_config_impl.h\"],\n    deps = [\n        \"//include/envoy/server:transport_socket_config_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"well_known_names_lib\",\n    hdrs = [\"well_known_names.h\"],\n    deps = [\"//source/common/singleton:const_singleton\"],\n)\n\nenvoy_cc_library(\n    name = \"active_raw_udp_listener_config\",\n    srcs = [\"active_raw_udp_listener_config.cc\"],\n    hdrs = [\"active_raw_udp_listener_config.h\"],\n    deps = [\n        \":connection_handler_lib\",\n        \":well_known_names_lib\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:active_udp_listener_config_interface\",\n        \"@envoy_api//envoy/api/v2/listener:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"filter_chain_factory_context_callback\",\n    hdrs = [\"filter_chain_factory_context_callback.h\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n        \"@envoy_api//envoy/api/v2/listener:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/server/active_raw_udp_listener_config.cc",
    "content": "#include \"server/active_raw_udp_listener_config.h\"\n\n#include <memory>\n#include <string>\n\n#include \"envoy/api/v2/listener/listener.pb.h\"\n\n#include \"server/connection_handler_impl.h\"\n#include \"server/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nActiveRawUdpListenerFactory::ActiveRawUdpListenerFactory(uint32_t concurrency)\n    : concurrency_(concurrency) {}\n\nNetwork::ConnectionHandler::ActiveUdpListenerPtr\nActiveRawUdpListenerFactory::createActiveUdpListener(uint32_t worker_index,\n                                                     Network::ConnectionHandler& parent,\n                                                     Event::Dispatcher& dispatcher,\n                                                     Network::ListenerConfig& config) {\n  return std::make_unique<ActiveRawUdpListener>(worker_index, concurrency_, parent, dispatcher,\n                                                config);\n}\n\nProtobufTypes::MessagePtr ActiveRawUdpListenerConfigFactory::createEmptyConfigProto() {\n  return std::make_unique<envoy::config::listener::v3::ActiveRawUdpListenerConfig>();\n}\n\nNetwork::ActiveUdpListenerFactoryPtr\nActiveRawUdpListenerConfigFactory::createActiveUdpListenerFactory(\n    const Protobuf::Message& /*message*/, uint32_t concurrency) {\n  return std::make_unique<Server::ActiveRawUdpListenerFactory>(concurrency);\n}\n\nstd::string ActiveRawUdpListenerConfigFactory::name() const {\n  return UdpListenerNames::get().RawUdp;\n}\n\nREGISTER_FACTORY(ActiveRawUdpListenerConfigFactory, Server::ActiveUdpListenerConfigFactory);\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/active_raw_udp_listener_config.h",
    "content": "#pragma once\n\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/active_udp_listener_config.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ActiveRawUdpListenerFactory : public Network::ActiveUdpListenerFactory {\npublic:\n  ActiveRawUdpListenerFactory(uint32_t concurrency);\n\n  Network::ConnectionHandler::ActiveUdpListenerPtr\n  createActiveUdpListener(uint32_t worker_index, Network::ConnectionHandler& parent,\n                          Event::Dispatcher& disptacher, Network::ListenerConfig& config) override;\n\n  bool isTransportConnectionless() const override { return true; }\n\nprivate:\n  const uint32_t concurrency_;\n};\n\n// This class uses a protobuf config to create a UDP listener factory which\n// creates a Server::ConnectionHandlerImpl::ActiveUdpListener.\n// This is the default UDP listener if not specified in config.\nclass ActiveRawUdpListenerConfigFactory : public ActiveUdpListenerConfigFactory {\npublic:\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override;\n\n  Network::ActiveUdpListenerFactoryPtr\n  createActiveUdpListenerFactory(const Protobuf::Message&, uint32_t concurrency) override;\n\n  std::string name() const override;\n};\n\nDECLARE_FACTORY(ActiveRawUdpListenerConfigFactory);\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"admin_lib\",\n    srcs = [\"admin.cc\"],\n    hdrs = [\"admin.h\"],\n    deps = [\n        \":admin_filter_lib\",\n        \":clusters_handler_lib\",\n        \":config_dump_handler_lib\",\n        \":config_tracker_lib\",\n        \":init_dump_handler_lib\",\n        \":listeners_handler_lib\",\n        \":logs_handler_lib\",\n        \":profiling_handler_lib\",\n        \":runtime_handler_lib\",\n        \":server_cmd_handler_lib\",\n        \":server_info_handler_lib\",\n        \":stats_handler_lib\",\n        \":utils_lib\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:hot_restart_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//include/envoy/server:options_interface\",\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:basic_resource_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:mutex_tracer_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//source/common/html:utility_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:conn_manager_lib\",\n        \"//source/common/http:date_provider_lib\",\n        \"//source/common/http:default_server_string_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:request_id_extension_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/memory:utils_lib\",\n        \"//source/common/network:connection_balancer_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:raw_buffer_socket_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/router:config_lib\",\n        \"//source/common/router:scoped_config_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/access_loggers/file:file_access_log_lib\",\n        \"//source/server:listener_manager_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"admin_filter_lib\",\n    srcs = [\"admin_filter.cc\"],\n    hdrs = [\"admin_filter.h\"],\n    deps = [\n        \":utils_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"handler_ctx_lib\",\n    hdrs = [\"handler_ctx.h\"],\n    deps = [\n        \"//include/envoy/server:instance_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stats_handler_lib\",\n    srcs = [\"stats_handler.cc\"],\n    hdrs = [\"stats_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":prometheus_stats_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/html:utility_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/stats:histogram_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"prometheus_stats_lib\",\n    srcs = [\"prometheus_stats.cc\"],\n    hdrs = [\"prometheus_stats.h\"],\n    deps = [\n        \":utils_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/stats:histogram_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"listeners_handler_lib\",\n    srcs = [\"listeners_handler.cc\"],\n    hdrs = [\"listeners_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"runtime_handler_lib\",\n    srcs = [\"runtime_handler.cc\"],\n    hdrs = [\"runtime_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"logs_handler_lib\",\n    srcs = [\"logs_handler.cc\"],\n    hdrs = [\"logs_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"profiling_handler_lib\",\n    srcs = [\"profiling_handler.cc\"],\n    hdrs = [\"profiling_handler.h\"],\n    deps = [\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/profiler:profiler_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"server_cmd_handler_lib\",\n    srcs = [\"server_cmd_handler.cc\"],\n    hdrs = [\"server_cmd_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"server_info_handler_lib\",\n    srcs = [\"server_info_handler.cc\"],\n    hdrs = [\"server_info_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/version:version_includes\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"clusters_handler_lib\",\n    srcs = [\"clusters_handler.cc\"],\n    hdrs = [\"clusters_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/upstream:host_utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config_dump_handler_lib\",\n    srcs = [\"config_dump_handler.cc\"],\n    hdrs = [\"config_dump_handler.h\"],\n    deps = [\n        \":config_tracker_lib\",\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"init_dump_handler_lib\",\n    srcs = [\"init_dump_handler.cc\"],\n    hdrs = [\"init_dump_handler.h\"],\n    deps = [\n        \":handler_ctx_lib\",\n        \":utils_lib\",\n        \"//include/envoy/server:admin_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"utils_lib\",\n    srcs = [\"utils.cc\"],\n    hdrs = [\"utils.h\"],\n    deps = [\n        \"//include/envoy/init:manager_interface\",\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"config_tracker_lib\",\n    srcs = [\"config_tracker_impl.cc\"],\n    hdrs = [\"config_tracker_impl.h\"],\n    deps = [\n        \"//include/envoy/server:config_tracker_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:macros\",\n    ],\n)\n"
  },
  {
    "path": "source/server/admin/admin.cc",
    "content": "#include \"server/admin/admin.h\"\n\n#include <algorithm>\n#include <cstdint>\n#include <fstream>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/server/hot_restart.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/server/options.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/outlier_detection.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/mutex_tracer_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/formatter/substitution_formatter.h\"\n#include \"common/html/utility.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/conn_manager_utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/memory/utils.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/config_impl.h\"\n\n#include \"server/admin/utils.h\"\n#include \"server/listener_impl.h\"\n\n#include \"extensions/access_loggers/file/file_access_log_impl.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"absl/strings/string_view.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\n\n/**\n * Favicon base64 image was harvested by screen-capturing the favicon from a Chrome tab\n * while visiting https://www.envoyproxy.io/. The resulting PNG was translated to base64\n * by dropping it into https://www.base64-image.de/ and then pasting the resulting string\n * below.\n *\n * The actual favicon source for that, https://www.envoyproxy.io/img/favicon.ico is nicer\n * because it's transparent, but is also 67646 bytes, which is annoying to inline. We could\n * just reference that rather than inlining it, but then the favicon won't work when visiting\n * the admin page from a network that can't see the internet.\n */\nconst char EnvoyFavicon[] =\n    \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAARnQU1\"\n    \"BAACxjwv8YQUAAAAJcEhZcwAAEnQAABJ0Ad5mH3gAAAH9SURBVEhL7ZRdTttAFIUrUFaAX5w9gIhgUfzshFRK+gIbaVbA\"\n    \"zwaqCly1dSpKk5A485/YCdXpHTB4BsdgVe0bD0cZ3Xsm38yZ8byTUuJ/6g3wqqoBrBhPTzmmLfptMbAzttJTpTKAF2MWC\"\n    \"7ADCdNIwXZpvMMwayiIwwS874CcOc9VuQPR1dBBChPMITpFXXU45hukIIH6kHhzVqkEYB8F5HYGvZ5B7EvwmHt9K/59Cr\"\n    \"U3QbY2RNYaQPYmJc+jPIBICNCcg20ZsAsCPfbcrFlRF+cJZpvXSJt9yMTxO/IAzJrCOfhJXiOgFEX/SbZmezTWxyNk4Q9\"\n    \"anHMmjnzAhEyhAW8LCE6wl26J7ZFHH1FMYQxh567weQBOO1AW8D7P/UXAQySq/QvL8Fu9HfCEw4SKALm5BkC3bwjwhSKr\"\n    \"A5hYAMXTJnPNiMyRBVzVjcgCyHiSm+8P+WGlnmwtP2RzbCMiQJ0d2KtmmmPorRHEhfMROVfTG5/fYrF5iWXzE80tfy9WP\"\n    \"sCqx5Buj7FYH0LvDyHiqd+3otpsr4/fa5+xbEVQPfrYnntylQG5VGeMLBhgEfyE7o6e6qYzwHIjwl0QwXSvvTmrVAY4D5\"\n    \"ddvT64wV0jRrr7FekO/XEjwuwwhuw7Ef7NY+dlfXpLb06EtHUJdVbsxvNUqBrwj/QGeEUSfwBAkmWHn5Bb/gAAAABJRU5\"\n    \"ErkJggg==\";\n\nconst char AdminHtmlStart[] = R\"(\n<head>\n  <title>Envoy Admin</title>\n  <link rel='shortcut icon' type='image/png' href='@FAVICON@'/>\n  <style>\n    .home-table {\n      font-family: sans-serif;\n      font-size: medium;\n      border-collapse: collapse;\n    }\n\n    .home-row:nth-child(even) {\n      background-color: #dddddd;\n    }\n\n    .home-data {\n      border: 1px solid #dddddd;\n      text-align: left;\n      padding: 8px;\n    }\n\n    .home-form {\n      margin-bottom: 0;\n    }\n  </style>\n</head>\n<body>\n  <table class='home-table'>\n    <thead>\n      <th class='home-data'>Command</th>\n      <th class='home-data'>Description</th>\n     </thead>\n     <tbody>\n)\";\n\nconst char AdminHtmlEnd[] = R\"(\n    </tbody>\n  </table>\n</body>\n)\";\n\n} // namespace\n\nConfigTracker& AdminImpl::getConfigTracker() { return config_tracker_; }\n\nAdminImpl::NullRouteConfigProvider::NullRouteConfigProvider(TimeSource& time_source)\n    : config_(new Router::NullConfigImpl()), time_source_(time_source) {}\n\nvoid AdminImpl::startHttpListener(const std::string& access_log_path,\n                                  const std::string& address_out_path,\n                                  Network::Address::InstanceConstSharedPtr address,\n                                  const Network::Socket::OptionsSharedPtr& socket_options,\n                                  Stats::ScopePtr&& listener_scope) {\n  // TODO(mattklein123): Allow admin to use normal access logger extension loading and avoid the\n  // hard dependency here.\n  access_logs_.emplace_back(new Extensions::AccessLoggers::File::FileAccessLog(\n      access_log_path, {}, Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(),\n      server_.accessLogManager()));\n  null_overload_manager_.start();\n  socket_ = std::make_shared<Network::TcpListenSocket>(address, socket_options, true);\n  socket_factory_ = std::make_shared<AdminListenSocketFactory>(socket_);\n  listener_ = std::make_unique<AdminListener>(*this, std::move(listener_scope));\n  if (!address_out_path.empty()) {\n    std::ofstream address_out_file(address_out_path);\n    if (!address_out_file) {\n      ENVOY_LOG(critical, \"cannot open admin address output file {} for writing.\",\n                address_out_path);\n    } else {\n      address_out_file << socket_->localAddress()->asString();\n    }\n  }\n}\n\nAdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server)\n    : server_(server), request_id_extension_(Http::RequestIDExtensionFactory::defaultInstance(\n                           server_.api().randomGenerator())),\n      profile_path_(profile_path),\n      stats_(Http::ConnectionManagerImpl::generateStats(\"http.admin.\", server_.stats())),\n      null_overload_manager_(server_.threadLocal()),\n      tracing_stats_(\n          Http::ConnectionManagerImpl::generateTracingStats(\"http.admin.\", no_op_store_)),\n      route_config_provider_(server.timeSource()),\n      scoped_route_config_provider_(server.timeSource()), clusters_handler_(server),\n      config_dump_handler_(config_tracker_, server), init_dump_handler_(server),\n      stats_handler_(server), logs_handler_(server), profiling_handler_(profile_path),\n      runtime_handler_(server), listeners_handler_(server), server_cmd_handler_(server),\n      server_info_handler_(server),\n      // TODO(jsedgwick) add /runtime_reset endpoint that removes all admin-set values\n      handlers_{\n          {\"/\", \"Admin home page\", MAKE_ADMIN_HANDLER(handlerAdminHome), false, false},\n          {\"/certs\", \"print certs on machine\",\n           MAKE_ADMIN_HANDLER(server_info_handler_.handlerCerts), false, false},\n          {\"/clusters\", \"upstream cluster status\",\n           MAKE_ADMIN_HANDLER(clusters_handler_.handlerClusters), false, false},\n          {\"/config_dump\", \"dump current Envoy configs (experimental)\",\n           MAKE_ADMIN_HANDLER(config_dump_handler_.handlerConfigDump), false, false},\n          {\"/init_dump\", \"dump current Envoy init manager information (experimental)\",\n           MAKE_ADMIN_HANDLER(init_dump_handler_.handlerInitDump), false, false},\n          {\"/contention\", \"dump current Envoy mutex contention stats (if enabled)\",\n           MAKE_ADMIN_HANDLER(stats_handler_.handlerContention), false, false},\n          {\"/cpuprofiler\", \"enable/disable the CPU profiler\",\n           MAKE_ADMIN_HANDLER(profiling_handler_.handlerCpuProfiler), false, true},\n          {\"/heapprofiler\", \"enable/disable the heap profiler\",\n           MAKE_ADMIN_HANDLER(profiling_handler_.handlerHeapProfiler), false, true},\n          {\"/healthcheck/fail\", \"cause the server to fail health checks\",\n           MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerHealthcheckFail), false, true},\n          {\"/healthcheck/ok\", \"cause the server to pass health checks\",\n           MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerHealthcheckOk), false, true},\n          {\"/help\", \"print out list of admin commands\", MAKE_ADMIN_HANDLER(handlerHelp), false,\n           false},\n          {\"/hot_restart_version\", \"print the hot restart compatibility version\",\n           MAKE_ADMIN_HANDLER(server_info_handler_.handlerHotRestartVersion), false, false},\n          {\"/logging\", \"query/change logging levels\",\n           MAKE_ADMIN_HANDLER(logs_handler_.handlerLogging), false, true},\n          {\"/memory\", \"print current allocation/heap usage\",\n           MAKE_ADMIN_HANDLER(server_info_handler_.handlerMemory), false, false},\n          {\"/quitquitquit\", \"exit the server\",\n           MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerQuitQuitQuit), false, true},\n          {\"/reset_counters\", \"reset all counters to zero\",\n           MAKE_ADMIN_HANDLER(stats_handler_.handlerResetCounters), false, true},\n          {\"/drain_listeners\", \"drain listeners\",\n           MAKE_ADMIN_HANDLER(listeners_handler_.handlerDrainListeners), false, true},\n          {\"/server_info\", \"print server version/status information\",\n           MAKE_ADMIN_HANDLER(server_info_handler_.handlerServerInfo), false, false},\n          {\"/ready\", \"print server state, return 200 if LIVE, otherwise return 503\",\n           MAKE_ADMIN_HANDLER(server_info_handler_.handlerReady), false, false},\n          {\"/stats\", \"print server stats\", MAKE_ADMIN_HANDLER(stats_handler_.handlerStats), false,\n           false},\n          {\"/stats/prometheus\", \"print server stats in prometheus format\",\n           MAKE_ADMIN_HANDLER(stats_handler_.handlerPrometheusStats), false, false},\n          {\"/stats/recentlookups\", \"Show recent stat-name lookups\",\n           MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookups), false, false},\n          {\"/stats/recentlookups/clear\", \"clear list of stat-name lookups and counter\",\n           MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsClear), false, true},\n          {\"/stats/recentlookups/disable\", \"disable recording of reset stat-name lookup names\",\n           MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsDisable), false, true},\n          {\"/stats/recentlookups/enable\", \"enable recording of reset stat-name lookup names\",\n           MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsEnable), false, true},\n          {\"/listeners\", \"print listener info\",\n           MAKE_ADMIN_HANDLER(listeners_handler_.handlerListenerInfo), false, false},\n          {\"/runtime\", \"print runtime values\", MAKE_ADMIN_HANDLER(runtime_handler_.handlerRuntime),\n           false, false},\n          {\"/runtime_modify\", \"modify runtime values\",\n           MAKE_ADMIN_HANDLER(runtime_handler_.handlerRuntimeModify), false, true},\n          {\"/reopen_logs\", \"reopen access logs\",\n           MAKE_ADMIN_HANDLER(logs_handler_.handlerReopenLogs), false, true},\n      },\n      date_provider_(server.dispatcher().timeSource()),\n      admin_filter_chain_(std::make_shared<AdminFilterChain>()),\n      local_reply_(LocalReply::Factory::createDefault()) {}\n\nHttp::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection,\n                                                 const Buffer::Instance& data,\n                                                 Http::ServerConnectionCallbacks& callbacks) {\n  return Http::ConnectionManagerUtility::autoCreateCodec(\n      connection, data, callbacks, server_.stats(), server_.api().randomGenerator(),\n      http1_codec_stats_, http2_codec_stats_, Http::Http1Settings(),\n      ::Envoy::Http2::Utility::initializeAndValidateOptions(\n          envoy::config::core::v3::Http2ProtocolOptions()),\n      maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction());\n}\n\nbool AdminImpl::createNetworkFilterChain(Network::Connection& connection,\n                                         const std::vector<Network::FilterFactoryCb>&) {\n  // Pass in the null overload manager so that the admin interface is accessible even when Envoy is\n  // overloaded.\n  connection.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl(\n      *this, server_.drainManager(), server_.api().randomGenerator(), server_.httpContext(),\n      server_.runtime(), server_.localInfo(), server_.clusterManager(), null_overload_manager_,\n      server_.timeSource())});\n  return true;\n}\n\nvoid AdminImpl::createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) {\n  callbacks.addStreamFilter(std::make_shared<AdminFilter>(createCallbackFunction()));\n}\n\nHttp::Code AdminImpl::runCallback(absl::string_view path_and_query,\n                                  Http::ResponseHeaderMap& response_headers,\n                                  Buffer::Instance& response, AdminStream& admin_stream) {\n\n  Http::Code code = Http::Code::OK;\n  bool found_handler = false;\n\n  std::string::size_type query_index = path_and_query.find('?');\n  if (query_index == std::string::npos) {\n    query_index = path_and_query.size();\n  }\n\n  for (const UrlHandler& handler : handlers_) {\n    if (path_and_query.compare(0, query_index, handler.prefix_) == 0) {\n      found_handler = true;\n      if (handler.mutates_server_state_) {\n        const absl::string_view method = admin_stream.getRequestHeaders().getMethodValue();\n        if (method != Http::Headers::get().MethodValues.Post) {\n          ENVOY_LOG(error, \"admin path \\\"{}\\\" mutates state, method={} rather than POST\",\n                    handler.prefix_, method);\n          code = Http::Code::MethodNotAllowed;\n          response.add(fmt::format(\"Method {} not allowed, POST required.\", method));\n          break;\n        }\n      }\n      code = handler.handler_(path_and_query, response_headers, response, admin_stream);\n      Memory::Utils::tryShrinkHeap();\n      break;\n    }\n  }\n\n  if (!found_handler) {\n    // Extra space is emitted below to have \"invalid path.\" be a separate sentence in the\n    // 404 output from \"admin commands are:\" in handlerHelp.\n    response.add(\"invalid path. \");\n    handlerHelp(path_and_query, response_headers, response, admin_stream);\n    code = Http::Code::NotFound;\n  }\n\n  return code;\n}\n\nstd::vector<const AdminImpl::UrlHandler*> AdminImpl::sortedHandlers() const {\n  std::vector<const UrlHandler*> sorted_handlers;\n  for (const UrlHandler& handler : handlers_) {\n    sorted_handlers.push_back(&handler);\n  }\n  // Note: it's generally faster to sort a vector with std::vector than to construct a std::map.\n  std::sort(sorted_handlers.begin(), sorted_handlers.end(),\n            [](const UrlHandler* h1, const UrlHandler* h2) { return h1->prefix_ < h2->prefix_; });\n  return sorted_handlers;\n}\n\nHttp::Code AdminImpl::handlerHelp(absl::string_view, Http::ResponseHeaderMap&,\n                                  Buffer::Instance& response, AdminStream&) {\n  response.add(\"admin commands are:\\n\");\n\n  // Prefix order is used during searching, but for printing do them in alpha order.\n  for (const UrlHandler* handler : sortedHandlers()) {\n    response.add(fmt::format(\"  {}: {}\\n\", handler->prefix_, handler->help_text_));\n  }\n  return Http::Code::OK;\n}\n\nHttp::Code AdminImpl::handlerAdminHome(absl::string_view, Http::ResponseHeaderMap& response_headers,\n                                       Buffer::Instance& response, AdminStream&) {\n  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Html);\n\n  response.add(absl::StrReplaceAll(AdminHtmlStart, {{\"@FAVICON@\", EnvoyFavicon}}));\n\n  // Prefix order is used during searching, but for printing do them in alpha order.\n  for (const UrlHandler* handler : sortedHandlers()) {\n    absl::string_view path = handler->prefix_;\n\n    if (path == \"/\") {\n      continue; // No need to print self-link to index page.\n    }\n\n    // Remove the leading slash from the link, so that the admin page can be\n    // rendered as part of another console, on a sub-path.\n    //\n    // E.g. consider a downstream dashboard that embeds the Envoy admin console.\n    // In that case, the \"/stats\" endpoint would be at\n    // https://DASHBOARD/envoy_admin/stats. If the links we present on the home\n    // page are absolute (e.g. \"/stats\") they won't work in the context of the\n    // dashboard. Removing the leading slash, they will work properly in both\n    // the raw admin console and when embedded in another page and URL\n    // hierarchy.\n    ASSERT(!path.empty());\n    ASSERT(path[0] == '/');\n    path = path.substr(1);\n\n    // For handlers that mutate state, render the link as a button in a POST form,\n    // rather than an anchor tag. This should discourage crawlers that find the /\n    // page from accidentally mutating all the server state by GETting all the hrefs.\n    const char* link_format =\n        handler->mutates_server_state_\n            ? \"<form action='{}' method='post' class='home-form'><button>{}</button></form>\"\n            : \"<a href='{}'>{}</a>\";\n    const std::string link = fmt::format(link_format, path, path);\n\n    // Handlers are all specified by statically above, and are thus trusted and do\n    // not require escaping.\n    response.add(fmt::format(\"<tr class='home-row'><td class='home-data'>{}</td>\"\n                             \"<td class='home-data'>{}</td></tr>\\n\",\n                             link, Html::Utility::sanitize(handler->help_text_)));\n  }\n  response.add(AdminHtmlEnd);\n  return Http::Code::OK;\n}\n\nconst Network::Address::Instance& AdminImpl::localAddress() {\n  return *server_.localInfo().address();\n}\n\nbool AdminImpl::addHandler(const std::string& prefix, const std::string& help_text,\n                           HandlerCb callback, bool removable, bool mutates_state) {\n  ASSERT(prefix.size() > 1);\n  ASSERT(prefix[0] == '/');\n\n  // Sanitize prefix and help_text to ensure no XSS can be injected, as\n  // we are injecting these strings into HTML that runs in a domain that\n  // can mutate Envoy server state. Also rule out some characters that\n  // make no sense as part of a URL path: ? and :.\n  const std::string::size_type pos = prefix.find_first_of(\"&\\\"'<>?:\");\n  if (pos != std::string::npos) {\n    ENVOY_LOG(error, \"filter \\\"{}\\\" contains invalid character '{}'\", prefix, prefix[pos]);\n    return false;\n  }\n\n  auto it = std::find_if(handlers_.cbegin(), handlers_.cend(),\n                         [&prefix](const UrlHandler& entry) { return prefix == entry.prefix_; });\n  if (it == handlers_.end()) {\n    handlers_.push_back({prefix, help_text, callback, removable, mutates_state});\n    return true;\n  }\n  return false;\n}\n\nbool AdminImpl::removeHandler(const std::string& prefix) {\n  const size_t size_before_removal = handlers_.size();\n  handlers_.remove_if(\n      [&prefix](const UrlHandler& entry) { return prefix == entry.prefix_ && entry.removable_; });\n  if (handlers_.size() != size_before_removal) {\n    return true;\n  }\n  return false;\n}\n\nHttp::Code AdminImpl::request(absl::string_view path_and_query, absl::string_view method,\n                              Http::ResponseHeaderMap& response_headers, std::string& body) {\n  AdminFilter filter(createCallbackFunction());\n\n  auto request_headers = Http::RequestHeaderMapImpl::create();\n  request_headers->setMethod(method);\n  filter.decodeHeaders(*request_headers, false);\n  Buffer::OwnedImpl response;\n\n  Http::Code code = runCallback(path_and_query, response_headers, response, filter);\n  Utility::populateFallbackResponseHeaders(code, response_headers);\n  body = response.toString();\n  return code;\n}\n\nvoid AdminImpl::closeSocket() {\n  if (socket_) {\n    socket_->close();\n  }\n}\n\nvoid AdminImpl::addListenerToHandler(Network::ConnectionHandler* handler) {\n  if (listener_) {\n    handler->addListener(absl::nullopt, *listener_);\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/admin.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <functional>\n#include <list>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/server/overload_manager.h\"\n#include \"envoy/upstream/outlier_detection.h\"\n#include \"envoy/upstream/resource_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/basic_resource_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/macros.h\"\n#include \"common/http/conn_manager_impl.h\"\n#include \"common/http/date_provider_impl.h\"\n#include \"common/http/default_server_string.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/http/request_id_extension_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/connection_balancer_impl.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/router/scoped_config_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"server/admin/admin_filter.h\"\n#include \"server/admin/clusters_handler.h\"\n#include \"server/admin/config_dump_handler.h\"\n#include \"server/admin/config_tracker_impl.h\"\n#include \"server/admin/init_dump_handler.h\"\n#include \"server/admin/listeners_handler.h\"\n#include \"server/admin/logs_handler.h\"\n#include \"server/admin/profiling_handler.h\"\n#include \"server/admin/runtime_handler.h\"\n#include \"server/admin/server_cmd_handler.h\"\n#include \"server/admin/server_info_handler.h\"\n#include \"server/admin/stats_handler.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass AdminInternalAddressConfig : public Http::InternalAddressConfig {\n  bool isInternalAddress(const Network::Address::Instance&) const override { return false; }\n};\n\n/**\n * Implementation of Server::Admin.\n */\nclass AdminImpl : public Admin,\n                  public Network::FilterChainManager,\n                  public Network::FilterChainFactory,\n                  public Http::FilterChainFactory,\n                  public Http::ConnectionManagerConfig,\n                  Logger::Loggable<Logger::Id::admin> {\npublic:\n  AdminImpl(const std::string& profile_path, Server::Instance& server);\n\n  Http::Code runCallback(absl::string_view path_and_query,\n                         Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                         AdminStream& admin_stream);\n  const Network::Socket& socket() override { return *socket_; }\n  Network::Socket& mutableSocket() { return *socket_; }\n\n  // Server::Admin\n  // TODO(jsedgwick) These can be managed with a generic version of ConfigTracker.\n  // Wins would be no manual removeHandler() and code reuse.\n  //\n  // The prefix must start with \"/\" and contain at least one additional character.\n  bool addHandler(const std::string& prefix, const std::string& help_text, HandlerCb callback,\n                  bool removable, bool mutates_server_state) override;\n  bool removeHandler(const std::string& prefix) override;\n  ConfigTracker& getConfigTracker() override;\n\n  void startHttpListener(const std::string& access_log_path, const std::string& address_out_path,\n                         Network::Address::InstanceConstSharedPtr address,\n                         const Network::Socket::OptionsSharedPtr& socket_options,\n                         Stats::ScopePtr&& listener_scope) override;\n  uint32_t concurrency() const override { return server_.options().concurrency(); }\n\n  // Network::FilterChainManager\n  const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override {\n    return admin_filter_chain_.get();\n  }\n\n  // Network::FilterChainFactory\n  bool\n  createNetworkFilterChain(Network::Connection& connection,\n                           const std::vector<Network::FilterFactoryCb>& filter_factories) override;\n  bool createListenerFilterChain(Network::ListenerFilterManager&) override { return true; }\n  void createUdpListenerFilterChain(Network::UdpListenerFilterManager&,\n                                    Network::UdpReadFilterCallbacks&) override {}\n\n  // Http::FilterChainFactory\n  void createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) override;\n  bool createUpgradeFilterChain(absl::string_view, const Http::FilterChainFactory::UpgradeMap*,\n                                Http::FilterChainFactoryCallbacks&) override {\n    return false;\n  }\n\n  // Http::ConnectionManagerConfig\n  Http::RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; }\n  const std::list<AccessLog::InstanceSharedPtr>& accessLogs() override { return access_logs_; }\n  Http::ServerConnectionPtr createCodec(Network::Connection& connection,\n                                        const Buffer::Instance& data,\n                                        Http::ServerConnectionCallbacks& callbacks) override;\n  Http::DateProvider& dateProvider() override { return date_provider_; }\n  std::chrono::milliseconds drainTimeout() const override { return std::chrono::milliseconds(100); }\n  Http::FilterChainFactory& filterFactory() override { return *this; }\n  bool generateRequestId() const override { return false; }\n  bool preserveExternalRequestId() const override { return false; }\n  bool alwaysSetRequestIdInResponse() const override { return false; }\n  absl::optional<std::chrono::milliseconds> idleTimeout() const override { return idle_timeout_; }\n  bool isRoutable() const override { return false; }\n  absl::optional<std::chrono::milliseconds> maxConnectionDuration() const override {\n    return max_connection_duration_;\n  }\n  uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; }\n  uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; }\n  std::chrono::milliseconds streamIdleTimeout() const override { return {}; }\n  std::chrono::milliseconds requestTimeout() const override { return {}; }\n  std::chrono::milliseconds delayedCloseTimeout() const override { return {}; }\n  absl::optional<std::chrono::milliseconds> maxStreamDuration() const override {\n    return max_stream_duration_;\n  }\n  Router::RouteConfigProvider* routeConfigProvider() override { return &route_config_provider_; }\n  Config::ConfigProvider* scopedRouteConfigProvider() override {\n    return &scoped_route_config_provider_;\n  }\n  const std::string& serverName() const override { return Http::DefaultServerString::get(); }\n  HttpConnectionManagerProto::ServerHeaderTransformation\n  serverHeaderTransformation() const override {\n    return HttpConnectionManagerProto::OVERWRITE;\n  }\n  Http::ConnectionManagerStats& stats() override { return stats_; }\n  Http::ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; }\n  bool useRemoteAddress() const override { return true; }\n  const Http::InternalAddressConfig& internalAddressConfig() const override {\n    return internal_address_config_;\n  }\n  uint32_t xffNumTrustedHops() const override { return 0; }\n  bool skipXffAppend() const override { return false; }\n  const std::string& via() const override { return EMPTY_STRING; }\n  Http::ForwardClientCertType forwardClientCert() const override {\n    return Http::ForwardClientCertType::Sanitize;\n  }\n  const std::vector<Http::ClientCertDetailsType>& setCurrentClientCertDetails() const override {\n    return set_current_client_cert_details_;\n  }\n  const Network::Address::Instance& localAddress() override;\n  const absl::optional<std::string>& userAgent() override { return user_agent_; }\n  Tracing::HttpTracerSharedPtr tracer() override { return nullptr; }\n  const Http::TracingConnectionManagerConfig* tracingConfig() override { return nullptr; }\n  Http::ConnectionManagerListenerStats& listenerStats() override { return listener_->stats_; }\n  bool proxy100Continue() const override { return false; }\n  bool streamErrorOnInvalidHttpMessaging() const override { return false; }\n  const Http::Http1Settings& http1Settings() const override { return http1_settings_; }\n  bool shouldNormalizePath() const override { return true; }\n  bool shouldMergeSlashes() const override { return true; }\n  bool shouldStripMatchingPort() const override { return false; }\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n  headersWithUnderscoresAction() const override {\n    return envoy::config::core::v3::HttpProtocolOptions::ALLOW;\n  }\n  const LocalReply::LocalReply& localReply() const override { return *local_reply_; }\n  Http::Code request(absl::string_view path_and_query, absl::string_view method,\n                     Http::ResponseHeaderMap& response_headers, std::string& body) override;\n  void closeSocket();\n  void addListenerToHandler(Network::ConnectionHandler* handler) override;\n  Server::Instance& server() { return server_; }\n\n  AdminFilter::AdminServerCallbackFunction createCallbackFunction() {\n    return [this](absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers,\n                  Buffer::OwnedImpl& response, AdminFilter& filter) -> Http::Code {\n      return runCallback(path_and_query, response_headers, response, filter);\n    };\n  }\n\nprivate:\n  /**\n   * Individual admin handler including prefix, help text, and callback.\n   */\n  struct UrlHandler {\n    const std::string prefix_;\n    const std::string help_text_;\n    const HandlerCb handler_;\n    const bool removable_;\n    const bool mutates_server_state_;\n  };\n\n  /**\n   * Implementation of RouteConfigProvider that returns a static null route config.\n   */\n  struct NullRouteConfigProvider : public Router::RouteConfigProvider {\n    NullRouteConfigProvider(TimeSource& time_source);\n\n    // Router::RouteConfigProvider\n    Router::ConfigConstSharedPtr config() override { return config_; }\n    absl::optional<ConfigInfo> configInfo() const override { return {}; }\n    SystemTime lastUpdated() const override { return time_source_.systemTime(); }\n    void onConfigUpdate() override {}\n    void validateConfig(const envoy::config::route::v3::RouteConfiguration&) const override {}\n    void requestVirtualHostsUpdate(const std::string&, Event::Dispatcher&,\n                                   std::weak_ptr<Http::RouteConfigUpdatedCallback>) override {\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n\n    Router::ConfigConstSharedPtr config_;\n    TimeSource& time_source_;\n  };\n\n  /**\n   * Implementation of ScopedRouteConfigProvider that returns a null scoped route config.\n   */\n  struct NullScopedRouteConfigProvider : public Config::ConfigProvider {\n    NullScopedRouteConfigProvider(TimeSource& time_source)\n        : config_(std::make_shared<const Router::NullScopedConfigImpl>()),\n          time_source_(time_source) {}\n\n    ~NullScopedRouteConfigProvider() override = default;\n\n    // Config::ConfigProvider\n    SystemTime lastUpdated() const override { return time_source_.systemTime(); }\n    const Protobuf::Message* getConfigProto() const override { return nullptr; }\n    std::string getConfigVersion() const override { return \"\"; }\n    ConfigConstSharedPtr getConfig() const override { return config_; }\n    ApiType apiType() const override { return ApiType::Full; }\n    ConfigProtoVector getConfigProtos() const override { return {}; }\n\n    Router::ScopedConfigConstSharedPtr config_;\n    TimeSource& time_source_;\n  };\n\n  /**\n   * Implementation of OverloadManager that is never overloaded. Using this instead of the real\n   * OverloadManager keeps the admin interface accessible even when the proxy is overloaded.\n   */\n  struct NullOverloadManager : public OverloadManager {\n    struct NullThreadLocalOverloadState : public ThreadLocalOverloadState {\n      const OverloadActionState& getState(const std::string&) override { return inactive_; }\n\n      const OverloadActionState inactive_ = OverloadActionState::inactive();\n    };\n\n    NullOverloadManager(ThreadLocal::SlotAllocator& slot_allocator)\n        : tls_(slot_allocator.allocateSlot()) {}\n\n    void start() override {\n      tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n        return std::make_shared<NullThreadLocalOverloadState>();\n      });\n    }\n\n    ThreadLocalOverloadState& getThreadLocalOverloadState() override {\n      return tls_->getTyped<NullThreadLocalOverloadState>();\n    }\n\n    bool registerForAction(const std::string&, Event::Dispatcher&, OverloadActionCb) override {\n      // This method shouldn't be called by the admin listener\n      NOT_REACHED_GCOVR_EXCL_LINE;\n      return false;\n    }\n\n    ThreadLocal::SlotPtr tls_;\n  };\n\n  std::vector<const UrlHandler*> sortedHandlers() const;\n  envoy::admin::v3::ServerInfo::State serverState();\n\n  /**\n   * URL handlers.\n   */\n  Http::Code handlerAdminHome(absl::string_view path_and_query,\n                              Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                              AdminStream&);\n\n  Http::Code handlerHelp(absl::string_view path_and_query,\n                         Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                         AdminStream&);\n\n  class AdminListenSocketFactory : public Network::ListenSocketFactory {\n  public:\n    AdminListenSocketFactory(Network::SocketSharedPtr socket) : socket_(socket) {}\n\n    // Network::ListenSocketFactory\n    Network::Socket::Type socketType() const override { return socket_->socketType(); }\n\n    const Network::Address::InstanceConstSharedPtr& localAddress() const override {\n      return socket_->localAddress();\n    }\n\n    Network::SocketSharedPtr getListenSocket() override {\n      // This is only supposed to be called once.\n      RELEASE_ASSERT(!socket_create_, \"AdminListener's socket shouldn't be shared.\");\n      socket_create_ = true;\n      return socket_;\n    }\n\n    Network::SocketOptRef sharedSocket() const override { return absl::nullopt; }\n\n  private:\n    Network::SocketSharedPtr socket_;\n    bool socket_create_{false};\n  };\n\n  class AdminListener : public Network::ListenerConfig {\n  public:\n    AdminListener(AdminImpl& parent, Stats::ScopePtr&& listener_scope)\n        : parent_(parent), name_(\"admin\"), scope_(std::move(listener_scope)),\n          stats_(Http::ConnectionManagerImpl::generateListenerStats(\"http.admin.\", *scope_)),\n          init_manager_(nullptr) {}\n\n    // Network::ListenerConfig\n    Network::FilterChainManager& filterChainManager() override { return parent_; }\n    Network::FilterChainFactory& filterChainFactory() override { return parent_; }\n    Network::ListenSocketFactory& listenSocketFactory() override {\n      return *parent_.socket_factory_;\n    }\n    bool bindToPort() override { return true; }\n    bool handOffRestoredDestinationConnections() const override { return false; }\n    uint32_t perConnectionBufferLimitBytes() const override { return 0; }\n    std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; }\n    bool continueOnListenerFiltersTimeout() const override { return false; }\n    Stats::Scope& listenerScope() override { return *scope_; }\n    uint64_t listenerTag() const override { return 0; }\n    const std::string& name() const override { return name_; }\n    Network::ActiveUdpListenerFactory* udpListenerFactory() override {\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n    Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override {\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n    Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override {\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n    envoy::config::core::v3::TrafficDirection direction() const override {\n      return envoy::config::core::v3::UNSPECIFIED;\n    }\n    Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; }\n    ResourceLimit& openConnections() override { return open_connections_; }\n    const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n      return empty_access_logs_;\n    }\n    uint32_t tcpBacklogSize() const override { return ENVOY_TCP_BACKLOG_SIZE; }\n    Init::Manager& initManager() override { return *init_manager_; }\n\n    AdminImpl& parent_;\n    const std::string name_;\n    Stats::ScopePtr scope_;\n    Http::ConnectionManagerListenerStats stats_;\n    Network::NopConnectionBalancerImpl connection_balancer_;\n    BasicResourceLimitImpl open_connections_;\n\n  private:\n    const std::vector<AccessLog::InstanceSharedPtr> empty_access_logs_;\n    std::unique_ptr<Init::Manager> init_manager_;\n  };\n  using AdminListenerPtr = std::unique_ptr<AdminListener>;\n\n  class AdminFilterChain : public Network::FilterChain {\n  public:\n    // We can't use the default constructor because transport_socket_factory_ doesn't have a\n    // default constructor.\n    AdminFilterChain() {} // NOLINT(modernize-use-equals-default)\n\n    // Network::FilterChain\n    const Network::TransportSocketFactory& transportSocketFactory() const override {\n      return transport_socket_factory_;\n    }\n\n    const std::vector<Network::FilterFactoryCb>& networkFilterFactories() const override {\n      return empty_network_filter_factory_;\n    }\n\n  private:\n    const Network::RawBufferSocketFactory transport_socket_factory_;\n    const std::vector<Network::FilterFactoryCb> empty_network_filter_factory_;\n  };\n\n  Server::Instance& server_;\n  Http::RequestIDExtensionSharedPtr request_id_extension_;\n  std::list<AccessLog::InstanceSharedPtr> access_logs_;\n  const std::string profile_path_;\n  Http::ConnectionManagerStats stats_;\n  NullOverloadManager null_overload_manager_;\n  // Note: this is here to essentially blackhole the tracing stats since they aren't used in the\n  // Admin case.\n  Stats::IsolatedStoreImpl no_op_store_;\n  Http::ConnectionManagerTracingStats tracing_stats_;\n  NullRouteConfigProvider route_config_provider_;\n  NullScopedRouteConfigProvider scoped_route_config_provider_;\n  Server::ClustersHandler clusters_handler_;\n  Server::ConfigDumpHandler config_dump_handler_;\n  Server::InitDumpHandler init_dump_handler_;\n  Server::StatsHandler stats_handler_;\n  Server::LogsHandler logs_handler_;\n  Server::ProfilingHandler profiling_handler_;\n  Server::RuntimeHandler runtime_handler_;\n  Server::ListenersHandler listeners_handler_;\n  Server::ServerCmdHandler server_cmd_handler_;\n  Server::ServerInfoHandler server_info_handler_;\n  std::list<UrlHandler> handlers_;\n  const uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB};\n  const uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT};\n  absl::optional<std::chrono::milliseconds> idle_timeout_;\n  absl::optional<std::chrono::milliseconds> max_connection_duration_;\n  absl::optional<std::chrono::milliseconds> max_stream_duration_;\n  absl::optional<std::string> user_agent_;\n  Http::SlowDateProviderImpl date_provider_;\n  std::vector<Http::ClientCertDetailsType> set_current_client_cert_details_;\n  Http::Http1Settings http1_settings_;\n  Http::Http1::CodecStats::AtomicPtr http1_codec_stats_;\n  Http::Http2::CodecStats::AtomicPtr http2_codec_stats_;\n  ConfigTrackerImpl config_tracker_;\n  const Network::FilterChainSharedPtr admin_filter_chain_;\n  Network::SocketSharedPtr socket_;\n  Network::ListenSocketFactorySharedPtr socket_factory_;\n  AdminListenerPtr listener_;\n  const AdminInternalAddressConfig internal_address_config_;\n  const LocalReply::LocalReplyPtr local_reply_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/admin_filter.cc",
    "content": "#include \"server/admin/admin_filter.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nAdminFilter::AdminFilter(AdminServerCallbackFunction admin_server_callback_func)\n    : admin_server_callback_func_(admin_server_callback_func) {}\n\nHttp::FilterHeadersStatus AdminFilter::decodeHeaders(Http::RequestHeaderMap& headers,\n                                                     bool end_stream) {\n  request_headers_ = &headers;\n  if (end_stream) {\n    onComplete();\n  }\n\n  return Http::FilterHeadersStatus::StopIteration;\n}\n\nHttp::FilterDataStatus AdminFilter::decodeData(Buffer::Instance& data, bool end_stream) {\n  // Currently we generically buffer all admin request data in case a handler wants to use it.\n  // If we ever support streaming admin requests we may need to revisit this. Note, we must use\n  // addDecodedData() here since we might need to perform onComplete() processing if end_stream is\n  // true.\n  decoder_callbacks_->addDecodedData(data, false);\n\n  if (end_stream) {\n    onComplete();\n  }\n\n  return Http::FilterDataStatus::StopIterationNoBuffer;\n}\n\nHttp::FilterTrailersStatus AdminFilter::decodeTrailers(Http::RequestTrailerMap&) {\n  onComplete();\n  return Http::FilterTrailersStatus::StopIteration;\n}\n\nvoid AdminFilter::onDestroy() {\n  for (const auto& callback : on_destroy_callbacks_) {\n    callback();\n  }\n}\n\nvoid AdminFilter::addOnDestroyCallback(std::function<void()> cb) {\n  on_destroy_callbacks_.push_back(std::move(cb));\n}\n\nHttp::StreamDecoderFilterCallbacks& AdminFilter::getDecoderFilterCallbacks() const {\n  ASSERT(decoder_callbacks_ != nullptr);\n  return *decoder_callbacks_;\n}\n\nconst Buffer::Instance* AdminFilter::getRequestBody() const {\n  return decoder_callbacks_->decodingBuffer();\n}\n\nconst Http::RequestHeaderMap& AdminFilter::getRequestHeaders() const {\n  ASSERT(request_headers_ != nullptr);\n  return *request_headers_;\n}\n\nvoid AdminFilter::onComplete() {\n  const absl::string_view path = request_headers_->getPathValue();\n  ENVOY_STREAM_LOG(debug, \"request complete: path: {}\", *decoder_callbacks_, path);\n\n  Buffer::OwnedImpl response;\n  auto header_map = Http::ResponseHeaderMapImpl::create();\n  RELEASE_ASSERT(request_headers_, \"\");\n  Http::Code code = admin_server_callback_func_(path, *header_map, response, *this);\n  Utility::populateFallbackResponseHeaders(code, *header_map);\n  decoder_callbacks_->encodeHeaders(std::move(header_map),\n                                    end_stream_on_complete_ && response.length() == 0,\n                                    StreamInfo::ResponseCodeDetails::get().AdminFilterResponse);\n\n  if (response.length() > 0) {\n    decoder_callbacks_->encodeData(response, end_stream_on_complete_);\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/admin_filter.h",
    "content": "#pragma once\n\n#include <functional>\n#include <list>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/server/admin.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * A terminal HTTP filter that implements server admin functionality.\n */\nclass AdminFilter : public Http::PassThroughFilter,\n                    public AdminStream,\n                    Logger::Loggable<Logger::Id::admin> {\npublic:\n  using AdminServerCallbackFunction = std::function<Http::Code(\n      absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers,\n      Buffer::OwnedImpl& response, AdminFilter& filter)>;\n\n  AdminFilter(AdminServerCallbackFunction admin_server_run_callback_func);\n\n  // Http::StreamFilterBase\n  // Handlers relying on the reference should use addOnDestroyCallback()\n  // to add a callback that will notify them when the reference is no\n  // longer valid.\n  void onDestroy() override;\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override;\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override;\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override;\n\n  // AdminStream\n  void setEndStreamOnComplete(bool end_stream) override { end_stream_on_complete_ = end_stream; }\n  void addOnDestroyCallback(std::function<void()> cb) override;\n  Http::StreamDecoderFilterCallbacks& getDecoderFilterCallbacks() const override;\n  const Buffer::Instance* getRequestBody() const override;\n  const Http::RequestHeaderMap& getRequestHeaders() const override;\n  Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override {\n    return encoder_callbacks_->http1StreamEncoderOptions();\n  }\n\nprivate:\n  /**\n   * Called when an admin request has been completely received.\n   */\n  void onComplete();\n  AdminServerCallbackFunction admin_server_callback_func_;\n  Http::RequestHeaderMap* request_headers_{};\n  std::list<std::function<void()>> on_destroy_callbacks_;\n  bool end_stream_on_complete_ = true;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/clusters_handler.cc",
    "content": "#include \"server/admin/clusters_handler.h\"\n\n#include \"envoy/admin/v3/clusters.pb.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/host_utility.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\n\nvoid addCircuitBreakerSettingsAsText(const std::string& cluster_name,\n                                     const std::string& priority_str,\n                                     Upstream::ResourceManager& resource_manager,\n                                     Buffer::Instance& response) {\n  response.add(fmt::format(\"{}::{}_priority::max_connections::{}\\n\", cluster_name, priority_str,\n                           resource_manager.connections().max()));\n  response.add(fmt::format(\"{}::{}_priority::max_pending_requests::{}\\n\", cluster_name,\n                           priority_str, resource_manager.pendingRequests().max()));\n  response.add(fmt::format(\"{}::{}_priority::max_requests::{}\\n\", cluster_name, priority_str,\n                           resource_manager.requests().max()));\n  response.add(fmt::format(\"{}::{}_priority::max_retries::{}\\n\", cluster_name, priority_str,\n                           resource_manager.retries().max()));\n}\n\nvoid addCircuitBreakerSettingsAsJson(const envoy::config::core::v3::RoutingPriority& priority,\n                                     Upstream::ResourceManager& resource_manager,\n                                     envoy::admin::v3::ClusterStatus& cluster_status) {\n  auto& thresholds = *cluster_status.mutable_circuit_breakers()->add_thresholds();\n  thresholds.set_priority(priority);\n  thresholds.mutable_max_connections()->set_value(resource_manager.connections().max());\n  thresholds.mutable_max_pending_requests()->set_value(resource_manager.pendingRequests().max());\n  thresholds.mutable_max_requests()->set_value(resource_manager.requests().max());\n  thresholds.mutable_max_retries()->set_value(resource_manager.retries().max());\n}\n\n} // namespace\n\nClustersHandler::ClustersHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code ClustersHandler::handlerClusters(absl::string_view url,\n                                            Http::ResponseHeaderMap& response_headers,\n                                            Buffer::Instance& response, AdminStream&) {\n  Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url);\n  const auto format_value = Utility::formatParam(query_params);\n\n  if (format_value.has_value() && format_value.value() == \"json\") {\n    writeClustersAsJson(response);\n    response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  } else {\n    writeClustersAsText(response);\n  }\n\n  return Http::Code::OK;\n}\n\n// Helper method that ensures that we've setting flags based on all the health flag values on the\n// host.\nvoid setHealthFlag(Upstream::Host::HealthFlag flag, const Upstream::Host& host,\n                   envoy::admin::v3::HostHealthStatus& health_status) {\n  switch (flag) {\n  case Upstream::Host::HealthFlag::FAILED_ACTIVE_HC:\n    health_status.set_failed_active_health_check(\n        host.healthFlagGet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC));\n    break;\n  case Upstream::Host::HealthFlag::FAILED_OUTLIER_CHECK:\n    health_status.set_failed_outlier_check(\n        host.healthFlagGet(Upstream::Host::HealthFlag::FAILED_OUTLIER_CHECK));\n    break;\n  case Upstream::Host::HealthFlag::FAILED_EDS_HEALTH:\n  case Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH:\n    if (host.healthFlagGet(Upstream::Host::HealthFlag::FAILED_EDS_HEALTH)) {\n      health_status.set_eds_health_status(envoy::config::core::v3::UNHEALTHY);\n    } else if (host.healthFlagGet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH)) {\n      health_status.set_eds_health_status(envoy::config::core::v3::DEGRADED);\n    } else {\n      health_status.set_eds_health_status(envoy::config::core::v3::HEALTHY);\n    }\n    break;\n  case Upstream::Host::HealthFlag::DEGRADED_ACTIVE_HC:\n    health_status.set_failed_active_degraded_check(\n        host.healthFlagGet(Upstream::Host::HealthFlag::DEGRADED_ACTIVE_HC));\n    break;\n  case Upstream::Host::HealthFlag::PENDING_DYNAMIC_REMOVAL:\n    health_status.set_pending_dynamic_removal(\n        host.healthFlagGet(Upstream::Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n    break;\n  case Upstream::Host::HealthFlag::PENDING_ACTIVE_HC:\n    health_status.set_pending_active_hc(\n        host.healthFlagGet(Upstream::Host::HealthFlag::PENDING_ACTIVE_HC));\n    break;\n  }\n}\n\n// TODO(efimki): Add support of text readouts stats.\nvoid ClustersHandler::writeClustersAsJson(Buffer::Instance& response) {\n  envoy::admin::v3::Clusters clusters;\n  for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) {\n    const Upstream::Cluster& cluster = cluster_ref.get();\n    Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info();\n\n    envoy::admin::v3::ClusterStatus& cluster_status = *clusters.add_cluster_statuses();\n    cluster_status.set_name(cluster_info->name());\n\n    addCircuitBreakerSettingsAsJson(\n        envoy::config::core::v3::RoutingPriority::DEFAULT,\n        cluster.info()->resourceManager(Upstream::ResourcePriority::Default), cluster_status);\n    addCircuitBreakerSettingsAsJson(\n        envoy::config::core::v3::RoutingPriority::HIGH,\n        cluster.info()->resourceManager(Upstream::ResourcePriority::High), cluster_status);\n\n    const Upstream::Outlier::Detector* outlier_detector = cluster.outlierDetector();\n    if (outlier_detector != nullptr &&\n        outlier_detector->successRateEjectionThreshold(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin) > 0.0) {\n      cluster_status.mutable_success_rate_ejection_threshold()->set_value(\n          outlier_detector->successRateEjectionThreshold(\n              Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n    }\n    if (outlier_detector != nullptr &&\n        outlier_detector->successRateEjectionThreshold(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin) > 0.0) {\n      cluster_status.mutable_local_origin_success_rate_ejection_threshold()->set_value(\n          outlier_detector->successRateEjectionThreshold(\n              Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n    }\n\n    cluster_status.set_added_via_api(cluster_info->addedViaApi());\n\n    for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n      for (auto& host : host_set->hosts()) {\n        envoy::admin::v3::HostStatus& host_status = *cluster_status.add_host_statuses();\n        Network::Utility::addressToProtobufAddress(*host->address(),\n                                                   *host_status.mutable_address());\n        host_status.set_hostname(host->hostname());\n        host_status.mutable_locality()->MergeFrom(host->locality());\n\n        for (const auto& [counter_name, counter] : host->counters()) {\n          auto& metric = *host_status.add_stats();\n          metric.set_name(std::string(counter_name));\n          metric.set_value(counter.get().value());\n          metric.set_type(envoy::admin::v3::SimpleMetric::COUNTER);\n        }\n\n        for (const auto& [gauge_name, gauge] : host->gauges()) {\n          auto& metric = *host_status.add_stats();\n          metric.set_name(std::string(gauge_name));\n          metric.set_value(gauge.get().value());\n          metric.set_type(envoy::admin::v3::SimpleMetric::GAUGE);\n        }\n\n        envoy::admin::v3::HostHealthStatus& health_status = *host_status.mutable_health_status();\n\n// Invokes setHealthFlag for each health flag.\n#define SET_HEALTH_FLAG(name, notused)                                                             \\\n  setHealthFlag(Upstream::Host::HealthFlag::name, *host, health_status);\n        HEALTH_FLAG_ENUM_VALUES(SET_HEALTH_FLAG)\n#undef SET_HEALTH_FLAG\n\n        double success_rate = host->outlierDetector().successRate(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin);\n        if (success_rate >= 0.0) {\n          host_status.mutable_success_rate()->set_value(success_rate);\n        }\n\n        host_status.set_weight(host->weight());\n\n        host_status.set_priority(host->priority());\n        success_rate = host->outlierDetector().successRate(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin);\n        if (success_rate >= 0.0) {\n          host_status.mutable_local_origin_success_rate()->set_value(success_rate);\n        }\n      }\n    }\n  }\n  response.add(MessageUtil::getJsonStringFromMessage(clusters, true)); // pretty-print\n}\n\n// TODO(efimki): Add support of text readouts stats.\nvoid ClustersHandler::writeClustersAsText(Buffer::Instance& response) {\n  for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) {\n    const Upstream::Cluster& cluster = cluster_ref.get();\n    const std::string& cluster_name = cluster.info()->name();\n    addOutlierInfo(cluster_name, cluster.outlierDetector(), response);\n\n    addCircuitBreakerSettingsAsText(\n        cluster_name, \"default\",\n        cluster.info()->resourceManager(Upstream::ResourcePriority::Default), response);\n    addCircuitBreakerSettingsAsText(\n        cluster_name, \"high\", cluster.info()->resourceManager(Upstream::ResourcePriority::High),\n        response);\n\n    response.add(\n        fmt::format(\"{}::added_via_api::{}\\n\", cluster_name, cluster.info()->addedViaApi()));\n    for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n      for (auto& host : host_set->hosts()) {\n        const std::string& host_address = host->address()->asString();\n        std::map<absl::string_view, uint64_t> all_stats;\n        for (const auto& [counter_name, counter] : host->counters()) {\n          all_stats[counter_name] = counter.get().value();\n        }\n\n        for (const auto& [gauge_name, gauge] : host->gauges()) {\n          all_stats[gauge_name] = gauge.get().value();\n        }\n\n        for (const auto& [stat_name, stat] : all_stats) {\n          response.add(\n              fmt::format(\"{}::{}::{}::{}\\n\", cluster_name, host_address, stat_name, stat));\n        }\n\n        response.add(\n            fmt::format(\"{}::{}::hostname::{}\\n\", cluster_name, host_address, host->hostname()));\n        response.add(fmt::format(\"{}::{}::health_flags::{}\\n\", cluster_name, host_address,\n                                 Upstream::HostUtility::healthFlagsToString(*host)));\n        response.add(\n            fmt::format(\"{}::{}::weight::{}\\n\", cluster_name, host_address, host->weight()));\n        response.add(fmt::format(\"{}::{}::region::{}\\n\", cluster_name, host_address,\n                                 host->locality().region()));\n        response.add(\n            fmt::format(\"{}::{}::zone::{}\\n\", cluster_name, host_address, host->locality().zone()));\n        response.add(fmt::format(\"{}::{}::sub_zone::{}\\n\", cluster_name, host_address,\n                                 host->locality().sub_zone()));\n        response.add(\n            fmt::format(\"{}::{}::canary::{}\\n\", cluster_name, host_address, host->canary()));\n        response.add(\n            fmt::format(\"{}::{}::priority::{}\\n\", cluster_name, host_address, host->priority()));\n        response.add(fmt::format(\n            \"{}::{}::success_rate::{}\\n\", cluster_name, host_address,\n            host->outlierDetector().successRate(\n                Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin)));\n        response.add(fmt::format(\n            \"{}::{}::local_origin_success_rate::{}\\n\", cluster_name, host_address,\n            host->outlierDetector().successRate(\n                Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin)));\n      }\n    }\n  }\n}\n\nvoid ClustersHandler::addOutlierInfo(const std::string& cluster_name,\n                                     const Upstream::Outlier::Detector* outlier_detector,\n                                     Buffer::Instance& response) {\n  if (outlier_detector) {\n    response.add(fmt::format(\n        \"{}::outlier::success_rate_average::{:g}\\n\", cluster_name,\n        outlier_detector->successRateAverage(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin)));\n    response.add(fmt::format(\n        \"{}::outlier::success_rate_ejection_threshold::{:g}\\n\", cluster_name,\n        outlier_detector->successRateEjectionThreshold(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin)));\n    response.add(fmt::format(\n        \"{}::outlier::local_origin_success_rate_average::{:g}\\n\", cluster_name,\n        outlier_detector->successRateAverage(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin)));\n    response.add(fmt::format(\n        \"{}::outlier::local_origin_success_rate_ejection_threshold::{:g}\\n\", cluster_name,\n        outlier_detector->successRateEjectionThreshold(\n            Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin)));\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/clusters_handler.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ClustersHandler : public HandlerContextBase {\n\npublic:\n  ClustersHandler(Server::Instance& server);\n\n  Http::Code handlerClusters(absl::string_view path_and_query,\n                             Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                             AdminStream&);\n\nprivate:\n  void addOutlierInfo(const std::string& cluster_name,\n                      const Upstream::Outlier::Detector* outlier_detector,\n                      Buffer::Instance& response);\n  void writeClustersAsJson(Buffer::Instance& response);\n  void writeClustersAsText(Buffer::Instance& response);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/config_dump_handler.cc",
    "content": "#include \"server/admin/config_dump_handler.h\"\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\n// Apply a field mask to a resource message. A simple field mask might look\n// like \"cluster.name,cluster.alt_stat_name,last_updated\" for a StaticCluster\n// resource. Unfortunately, since the \"cluster\" field is Any and the in-built\n// FieldMask utils can't mask inside an Any field, we need to do additional work\n// below.\n//\n// We take advantage of the fact that for the most part (with the exception of\n// DynamicListener) that ConfigDump resources have a single Any field where the\n// embedded resources lives. This allows us to construct an inner field mask for\n// the Any resource and an outer field mask for the enclosing message. In the\n// above example, the inner field mask would be \"name,alt_stat_name\" and the\n// outer field mask \"cluster,last_updated\". The masks are applied to their\n// respective messages, with the Any resource requiring an unpack/mask/pack\n// series of operations.\n//\n// TODO(htuch): we could make field masks more powerful in future and generalize\n// this to allow arbitrary indexing through Any fields. This is pretty\n// complicated, we would need to build a FieldMask tree similar to how the C++\n// Protobuf library does this internally.\nvoid trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Message& message) {\n  const Protobuf::Descriptor* descriptor = message.GetDescriptor();\n  const Protobuf::Reflection* reflection = message.GetReflection();\n  // Figure out which paths cover Any fields. For each field, gather the paths to\n  // an inner mask, switch the outer mask to cover only the original field.\n  Protobuf::FieldMask outer_field_mask;\n  Protobuf::FieldMask inner_field_mask;\n  std::string any_field_name;\n  for (int i = 0; i < field_mask.paths().size(); ++i) {\n    const std::string& path = field_mask.paths(i);\n    std::vector<std::string> frags = absl::StrSplit(path, '.');\n    if (frags.empty()) {\n      continue;\n    }\n    const Protobuf::FieldDescriptor* field = descriptor->FindFieldByName(frags[0]);\n    // Only a single Any field supported, repeated fields don't support further\n    // indexing.\n    // TODO(htuch): should add support for DynamicListener for multiple Any\n    // fields in the future, see\n    // https://github.com/envoyproxy/envoy/issues/9669.\n    if (field != nullptr && field->message_type() != nullptr && !field->is_repeated() &&\n        field->message_type()->full_name() == \"google.protobuf.Any\") {\n      if (any_field_name.empty()) {\n        any_field_name = frags[0];\n      } else {\n        // This should be structurally true due to the ConfigDump proto\n        // definition (but not for DynamicListener today).\n        ASSERT(any_field_name == frags[0],\n               \"Only a single Any field in a config dump resource is supported.\");\n      }\n      outer_field_mask.add_paths(frags[0]);\n      frags.erase(frags.begin());\n      inner_field_mask.add_paths(absl::StrJoin(frags, \".\"));\n    } else {\n      outer_field_mask.add_paths(path);\n    }\n  }\n\n  if (!any_field_name.empty()) {\n    const Protobuf::FieldDescriptor* any_field = descriptor->FindFieldByName(any_field_name);\n    if (reflection->HasField(message, any_field)) {\n      ASSERT(any_field != nullptr);\n      // Unpack to a DynamicMessage.\n      ProtobufWkt::Any any_message;\n      any_message.MergeFrom(reflection->GetMessage(message, any_field));\n      Protobuf::DynamicMessageFactory dmf;\n      const absl::string_view inner_type_name =\n          TypeUtil::typeUrlToDescriptorFullName(any_message.type_url());\n      const Protobuf::Descriptor* inner_descriptor =\n          Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(\n              static_cast<std::string>(inner_type_name));\n      ASSERT(inner_descriptor != nullptr);\n      std::unique_ptr<Protobuf::Message> inner_message;\n      inner_message.reset(dmf.GetPrototype(inner_descriptor)->New());\n      MessageUtil::unpackTo(any_message, *inner_message);\n      // Trim message.\n      ProtobufUtil::FieldMaskUtil::TrimMessage(inner_field_mask, inner_message.get());\n      // Pack it back into the Any resource.\n      any_message.PackFrom(*inner_message);\n      reflection->MutableMessage(&message, any_field)->CopyFrom(any_message);\n    }\n  }\n  ProtobufUtil::FieldMaskUtil::TrimMessage(outer_field_mask, &message);\n}\n\n// Helper method to get the resource parameter.\nabsl::optional<std::string> resourceParam(const Http::Utility::QueryParams& params) {\n  return Utility::queryParam(params, \"resource\");\n}\n\n// Helper method to get the mask parameter.\nabsl::optional<std::string> maskParam(const Http::Utility::QueryParams& params) {\n  return Utility::queryParam(params, \"mask\");\n}\n\n// Helper method to get the eds parameter.\nbool shouldIncludeEdsInDump(const Http::Utility::QueryParams& params) {\n  return Utility::queryParam(params, \"include_eds\") != absl::nullopt;\n}\n\n} // namespace\n\nConfigDumpHandler::ConfigDumpHandler(ConfigTracker& config_tracker, Server::Instance& server)\n    : HandlerContextBase(server), config_tracker_(config_tracker) {}\n\nHttp::Code ConfigDumpHandler::handlerConfigDump(absl::string_view url,\n                                                Http::ResponseHeaderMap& response_headers,\n                                                Buffer::Instance& response, AdminStream&) const {\n  Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url);\n  const auto resource = resourceParam(query_params);\n  const auto mask = maskParam(query_params);\n  const bool include_eds = shouldIncludeEdsInDump(query_params);\n\n  envoy::admin::v3::ConfigDump dump;\n\n  if (resource.has_value()) {\n    auto err = addResourceToDump(dump, mask, resource.value(), include_eds);\n    if (err.has_value()) {\n      response.add(err.value().second);\n      return err.value().first;\n    }\n  } else {\n    addAllConfigToDump(dump, mask, include_eds);\n  }\n  MessageUtil::redact(dump);\n\n  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  response.add(MessageUtil::getJsonStringFromMessage(dump, true)); // pretty-print\n  return Http::Code::OK;\n}\n\nabsl::optional<std::pair<Http::Code, std::string>>\nConfigDumpHandler::addResourceToDump(envoy::admin::v3::ConfigDump& dump,\n                                     const absl::optional<std::string>& mask,\n                                     const std::string& resource, bool include_eds) const {\n  Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap();\n  if (include_eds) {\n    if (!server_.clusterManager().clusters().empty()) {\n      callbacks_map.emplace(\"endpoint\", [this] { return dumpEndpointConfigs(); });\n    }\n  }\n\n  for (const auto& [name, callback] : callbacks_map) {\n    ProtobufTypes::MessagePtr message = callback();\n    ASSERT(message);\n\n    auto field_descriptor = message->GetDescriptor()->FindFieldByName(resource);\n    const Protobuf::Reflection* reflection = message->GetReflection();\n    if (!field_descriptor) {\n      continue;\n    } else if (!field_descriptor->is_repeated()) {\n      return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair(\n          Http::Code::BadRequest,\n          fmt::format(\"{} is not a repeated field. Use ?mask={} to get only this field\",\n                      field_descriptor->name(), field_descriptor->name()))};\n    }\n\n    auto repeated = reflection->GetRepeatedPtrField<Protobuf::Message>(*message, field_descriptor);\n    for (Protobuf::Message& msg : repeated) {\n      if (mask.has_value()) {\n        Protobuf::FieldMask field_mask;\n        ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask);\n        trimResourceMessage(field_mask, msg);\n      }\n      auto* config = dump.add_configs();\n      config->PackFrom(msg);\n    }\n\n    // We found the desired resource so there is no need to continue iterating over\n    // the other keys.\n    return absl::nullopt;\n  }\n\n  return absl::optional<std::pair<Http::Code, std::string>>{\n      std::make_pair(Http::Code::NotFound, fmt::format(\"{} not found in config dump\", resource))};\n}\n\nvoid ConfigDumpHandler::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump,\n                                           const absl::optional<std::string>& mask,\n                                           bool include_eds) const {\n  Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap();\n  if (include_eds) {\n    if (!server_.clusterManager().clusters().empty()) {\n      callbacks_map.emplace(\"endpoint\", [this] { return dumpEndpointConfigs(); });\n    }\n  }\n\n  for (const auto& [name, callback] : callbacks_map) {\n    ProtobufTypes::MessagePtr message = callback();\n    ASSERT(message);\n\n    if (mask.has_value()) {\n      Protobuf::FieldMask field_mask;\n      ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask);\n      // We don't use trimMessage() above here since masks don't support\n      // indexing through repeated fields.\n      ProtobufUtil::FieldMaskUtil::TrimMessage(field_mask, message.get());\n    }\n\n    auto* config = dump.add_configs();\n    config->PackFrom(*message);\n  }\n}\n\nProtobufTypes::MessagePtr ConfigDumpHandler::dumpEndpointConfigs() const {\n  auto endpoint_config_dump = std::make_unique<envoy::admin::v3::EndpointsConfigDump>();\n\n  for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) {\n    const Upstream::Cluster& cluster = cluster_ref.get();\n    Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info();\n    envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n\n    if (cluster_info->edsServiceName().has_value()) {\n      cluster_load_assignment.set_cluster_name(cluster_info->edsServiceName().value());\n    } else {\n      cluster_load_assignment.set_cluster_name(cluster_info->name());\n    }\n    auto& policy = *cluster_load_assignment.mutable_policy();\n\n    for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {\n      policy.mutable_overprovisioning_factor()->set_value(host_set->overprovisioningFactor());\n\n      if (!host_set->hostsPerLocality().get().empty()) {\n        for (int index = 0; index < static_cast<int>(host_set->hostsPerLocality().get().size());\n             index++) {\n          auto locality_host_set = host_set->hostsPerLocality().get()[index];\n\n          if (!locality_host_set.empty()) {\n            auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add();\n            locality_lb_endpoint.mutable_locality()->MergeFrom(locality_host_set[0]->locality());\n            locality_lb_endpoint.set_priority(locality_host_set[0]->priority());\n            if (host_set->localityWeights() != nullptr && !host_set->localityWeights()->empty()) {\n              locality_lb_endpoint.mutable_load_balancing_weight()->set_value(\n                  (*host_set->localityWeights())[index]);\n            }\n\n            for (auto& host : locality_host_set) {\n              addLbEndpoint(host, locality_lb_endpoint);\n            }\n          }\n        }\n      } else {\n        for (auto& host : host_set->hosts()) {\n          auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add();\n          locality_lb_endpoint.mutable_locality()->MergeFrom(host->locality());\n          locality_lb_endpoint.set_priority(host->priority());\n          addLbEndpoint(host, locality_lb_endpoint);\n        }\n      }\n    }\n\n    if (cluster_info->addedViaApi()) {\n      auto& dynamic_endpoint = *endpoint_config_dump->mutable_dynamic_endpoint_configs()->Add();\n      dynamic_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment);\n    } else {\n      auto& static_endpoint = *endpoint_config_dump->mutable_static_endpoint_configs()->Add();\n      static_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment);\n    }\n  }\n  return endpoint_config_dump;\n}\n\nvoid ConfigDumpHandler::addLbEndpoint(\n    const Upstream::HostSharedPtr& host,\n    envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const {\n  auto& lb_endpoint = *locality_lb_endpoint.mutable_lb_endpoints()->Add();\n  if (host->metadata() != nullptr) {\n    lb_endpoint.mutable_metadata()->MergeFrom(*host->metadata());\n  }\n  lb_endpoint.mutable_load_balancing_weight()->set_value(host->weight());\n\n  switch (host->health()) {\n  case Upstream::Host::Health::Healthy:\n    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::HEALTHY);\n    break;\n  case Upstream::Host::Health::Unhealthy:\n    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNHEALTHY);\n    break;\n  case Upstream::Host::Health::Degraded:\n    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::DEGRADED);\n    break;\n  default:\n    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNKNOWN);\n  }\n\n  auto& endpoint = *lb_endpoint.mutable_endpoint();\n  endpoint.set_hostname(host->hostname());\n  Network::Utility::addressToProtobufAddress(*host->address(), *endpoint.mutable_address());\n  auto& health_check_config = *endpoint.mutable_health_check_config();\n  health_check_config.set_hostname(host->hostnameForHealthChecks());\n  if (host->healthCheckAddress()->asString() != host->address()->asString()) {\n    health_check_config.set_port_value(host->healthCheckAddress()->ip()->port());\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/config_dump_handler.h",
    "content": "\n#pragma once\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/config_tracker_impl.h\"\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ConfigDumpHandler : public HandlerContextBase {\n\npublic:\n  ConfigDumpHandler(ConfigTracker& config_tracker, Server::Instance& server);\n\n  Http::Code handlerConfigDump(absl::string_view path_and_query,\n                               Http::ResponseHeaderMap& response_headers,\n                               Buffer::Instance& response, AdminStream&) const;\n\nprivate:\n  void addAllConfigToDump(envoy::admin::v3::ConfigDump& dump,\n                          const absl::optional<std::string>& mask, bool include_eds) const;\n  /**\n   * Add the config matching the passed resource to the passed config dump.\n   * @return absl::nullopt on success, else the Http::Code and an error message that should be added\n   * to the admin response.\n   */\n  absl::optional<std::pair<Http::Code, std::string>>\n  addResourceToDump(envoy::admin::v3::ConfigDump& dump, const absl::optional<std::string>& mask,\n                    const std::string& resource, bool include_eds) const;\n\n  /**\n   * Helper methods to add endpoints config\n   */\n  void addLbEndpoint(const Upstream::HostSharedPtr& host,\n                     envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const;\n\n  ProtobufTypes::MessagePtr dumpEndpointConfigs() const;\n\n  ConfigTracker& config_tracker_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/config_tracker_impl.cc",
    "content": "#include \"server/admin/config_tracker_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nConfigTracker::EntryOwnerPtr ConfigTrackerImpl::add(const std::string& key, Cb cb) {\n  auto insert_result = map_->emplace(key, std::move(cb));\n  return insert_result.second\n             ? std::make_unique<ConfigTrackerImpl::EntryOwnerImpl>(map_, std::move(key))\n             : nullptr;\n}\n\nconst ConfigTracker::CbsMap& ConfigTrackerImpl::getCallbacksMap() const { return *map_; }\n\nConfigTrackerImpl::EntryOwnerImpl::EntryOwnerImpl(const std::shared_ptr<ConfigTracker::CbsMap>& map,\n                                                  const std::string& key)\n    : map_(map), key_(key) {}\n\nConfigTrackerImpl::EntryOwnerImpl::~EntryOwnerImpl() {\n  size_t erased = map_->erase(key_);\n  ASSERT(erased == 1);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/config_tracker_impl.h",
    "content": "#pragma once\n\n#include \"envoy/server/config_tracker.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/macros.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Implementation of ConfigTracker.\n */\nclass ConfigTrackerImpl : public ConfigTracker {\npublic:\n  EntryOwnerPtr add(const std::string& key, Cb cb) override;\n  const CbsMap& getCallbacksMap() const override;\n\nprivate:\n  std::shared_ptr<CbsMap> map_{std::make_shared<CbsMap>()};\n\n  class EntryOwnerImpl : public ConfigTracker::EntryOwner {\n  public:\n    EntryOwnerImpl(const std::shared_ptr<CbsMap>& map, const std::string& key);\n    ~EntryOwnerImpl() override;\n\n  private:\n    std::shared_ptr<CbsMap> map_;\n    std::string key_;\n  };\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/handler_ctx.h",
    "content": "#pragma once\n\n#include \"envoy/server/instance.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass HandlerContextBase {\npublic:\n  HandlerContextBase(Server::Instance& server) : server_(server) {}\n\nprotected:\n  Server::Instance& server_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/init_dump_handler.cc",
    "content": "#include \"server/admin/init_dump_handler.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\n// Helper method to get the mask parameter.\nabsl::optional<std::string> maskParam(const Http::Utility::QueryParams& params) {\n  return Utility::queryParam(params, \"mask\");\n}\n\n} // namespace\n\nInitDumpHandler::InitDumpHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code InitDumpHandler::handlerInitDump(absl::string_view url,\n                                            Http::ResponseHeaderMap& response_headers,\n                                            Buffer::Instance& response, AdminStream&) const {\n  Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url);\n  const auto mask = maskParam(query_params);\n\n  envoy::admin::v3::UnreadyTargetsDumps dump = *dumpUnreadyTargets(mask);\n  MessageUtil::redact(dump);\n\n  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  response.add(MessageUtil::getJsonStringFromMessage(dump, true)); // pretty-print\n  return Http::Code::OK;\n}\n\nstd::unique_ptr<envoy::admin::v3::UnreadyTargetsDumps>\nInitDumpHandler::dumpUnreadyTargets(const absl::optional<std::string>& component) const {\n  auto unready_targets_dumps = std::make_unique<envoy::admin::v3::UnreadyTargetsDumps>();\n\n  if (component.has_value()) {\n    if (component.value() == \"listener\") {\n      dumpListenerUnreadyTargets(*unready_targets_dumps);\n    }\n    // More options for unready targets config dump.\n  } else {\n    // Dump all possible information of unready targets.\n    dumpListenerUnreadyTargets(*unready_targets_dumps);\n    // More unready targets to add into config dump.\n  }\n  return unready_targets_dumps;\n}\n\nvoid InitDumpHandler::dumpListenerUnreadyTargets(\n    envoy::admin::v3::UnreadyTargetsDumps& unready_targets_dumps) const {\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> listeners;\n  if (server_.listenerManager().isWorkerStarted()) {\n    listeners = server_.listenerManager().listeners(ListenerManager::WARMING);\n  } else {\n    listeners = server_.listenerManager().listeners(ListenerManager::ACTIVE);\n  }\n\n  for (const auto& listener_config : listeners) {\n    auto& listener = listener_config.get();\n    listener.initManager().dumpUnreadyTargets(unready_targets_dumps);\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/init_dump_handler.h",
    "content": "\n#pragma once\n\n#include \"envoy/admin/v3/init_dump.pb.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass InitDumpHandler : public HandlerContextBase {\n\npublic:\n  InitDumpHandler(Server::Instance& server);\n\n  Http::Code handlerInitDump(absl::string_view path_and_query,\n                             Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                             AdminStream&) const;\n\nprivate:\n  /**\n   * Helper methods for the /init_dump url handler to add unready targets information.\n   */\n  std::unique_ptr<envoy::admin::v3::UnreadyTargetsDumps>\n  dumpUnreadyTargets(const absl::optional<std::string>& target) const;\n\n  /**\n   * Helper methods for the /init_dump url handler to add unready targets config of listeners.\n   */\n  void\n  dumpListenerUnreadyTargets(envoy::admin::v3::UnreadyTargetsDumps& unready_targets_dumps) const;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/listeners_handler.cc",
    "content": "#include \"server/admin/listeners_handler.h\"\n\n#include \"envoy/admin/v3/listeners.pb.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nListenersHandler::ListenersHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code ListenersHandler::handlerDrainListeners(absl::string_view url, Http::ResponseHeaderMap&,\n                                                   Buffer::Instance& response, AdminStream&) {\n  const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url);\n\n  ListenerManager::StopListenersType stop_listeners_type =\n      params.find(\"inboundonly\") != params.end() ? ListenerManager::StopListenersType::InboundOnly\n                                                 : ListenerManager::StopListenersType::All;\n\n  const bool graceful = params.find(\"graceful\") != params.end();\n  if (graceful) {\n    // Ignore calls to /drain_listeners?graceful if the drain sequence has\n    // already started.\n    if (!server_.drainManager().draining()) {\n      server_.drainManager().startDrainSequence([this, stop_listeners_type]() {\n        server_.listenerManager().stopListeners(stop_listeners_type);\n      });\n    }\n  } else {\n    server_.listenerManager().stopListeners(stop_listeners_type);\n  }\n\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code ListenersHandler::handlerListenerInfo(absl::string_view url,\n                                                 Http::ResponseHeaderMap& response_headers,\n                                                 Buffer::Instance& response, AdminStream&) {\n  const Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url);\n  const auto format_value = Utility::formatParam(query_params);\n\n  if (format_value.has_value() && format_value.value() == \"json\") {\n    writeListenersAsJson(response);\n    response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  } else {\n    writeListenersAsText(response);\n  }\n  return Http::Code::OK;\n}\n\nvoid ListenersHandler::writeListenersAsJson(Buffer::Instance& response) {\n  envoy::admin::v3::Listeners listeners;\n  for (const auto& listener : server_.listenerManager().listeners()) {\n    envoy::admin::v3::ListenerStatus& listener_status = *listeners.add_listener_statuses();\n    listener_status.set_name(listener.get().name());\n    Network::Utility::addressToProtobufAddress(*listener.get().listenSocketFactory().localAddress(),\n                                               *listener_status.mutable_local_address());\n  }\n  response.add(MessageUtil::getJsonStringFromMessage(listeners, true)); // pretty-print\n}\n\nvoid ListenersHandler::writeListenersAsText(Buffer::Instance& response) {\n  for (const auto& listener : server_.listenerManager().listeners()) {\n    response.add(fmt::format(\"{}::{}\\n\", listener.get().name(),\n                             listener.get().listenSocketFactory().localAddress()->asString()));\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/listeners_handler.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ListenersHandler : public HandlerContextBase {\n\npublic:\n  ListenersHandler(Server::Instance& server);\n\n  Http::Code handlerDrainListeners(absl::string_view path_and_query,\n                                   Http::ResponseHeaderMap& response_headers,\n                                   Buffer::Instance& response, AdminStream&);\n\n  Http::Code handlerListenerInfo(absl::string_view path_and_query,\n                                 Http::ResponseHeaderMap& response_headers,\n                                 Buffer::Instance& response, AdminStream&);\n\nprivate:\n  /**\n   * Helper methods for the /listeners url handler.\n   */\n  void writeListenersAsJson(Buffer::Instance& response);\n  void writeListenersAsText(Buffer::Instance& response);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/logs_handler.cc",
    "content": "#include \"server/admin/logs_handler.h\"\n\n#include <string>\n\n#include \"common/common/fancy_logger.h\"\n#include \"common/common/logger.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nLogsHandler::LogsHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code LogsHandler::handlerLogging(absl::string_view url, Http::ResponseHeaderMap&,\n                                       Buffer::Instance& response, AdminStream&) {\n  Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url);\n\n  Http::Code rc = Http::Code::OK;\n  if (!query_params.empty() && !changeLogLevel(query_params)) {\n    response.add(\"usage: /logging?<name>=<level> (change single level)\\n\");\n    response.add(\"usage: /logging?level=<level> (change all levels)\\n\");\n    response.add(\"levels: \");\n    for (auto level_string_view : spdlog::level::level_string_views) {\n      response.add(fmt::format(\"{} \", level_string_view));\n    }\n\n    response.add(\"\\n\");\n    rc = Http::Code::NotFound;\n  }\n\n  if (!Logger::Context::useFancyLogger()) {\n    response.add(\"active loggers:\\n\");\n    for (const Logger::Logger& logger : Logger::Registry::loggers()) {\n      response.add(fmt::format(\"  {}: {}\\n\", logger.name(), logger.levelString()));\n    }\n\n    response.add(\"\\n\");\n  } else {\n    response.add(\"active loggers:\\n\");\n    std::string logger_info = getFancyContext().listFancyLoggers();\n    response.add(logger_info);\n  }\n\n  return rc;\n}\n\nHttp::Code LogsHandler::handlerReopenLogs(absl::string_view, Http::ResponseHeaderMap&,\n                                          Buffer::Instance& response, AdminStream&) {\n  server_.accessLogManager().reopen();\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nbool LogsHandler::changeLogLevel(const Http::Utility::QueryParams& params) {\n  if (params.size() != 1) {\n    return false;\n  }\n\n  std::string name = params.begin()->first;\n  std::string level = params.begin()->second;\n\n  // First see if the level is valid.\n  size_t level_to_use = std::numeric_limits<size_t>::max();\n  for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) {\n    if (level == spdlog::level::level_string_views[i]) {\n      level_to_use = i;\n      break;\n    }\n  }\n\n  if (level_to_use == std::numeric_limits<size_t>::max()) {\n    return false;\n  }\n\n  if (!Logger::Context::useFancyLogger()) {\n    // Now either change all levels or a single level.\n    if (name == \"level\") {\n      ENVOY_LOG(debug, \"change all log levels: level='{}'\", level);\n      for (Logger::Logger& logger : Logger::Registry::loggers()) {\n        logger.setLevel(static_cast<spdlog::level::level_enum>(level_to_use));\n      }\n    } else {\n      ENVOY_LOG(debug, \"change log level: name='{}' level='{}'\", name, level);\n      Logger::Logger* logger_to_change = nullptr;\n      for (Logger::Logger& logger : Logger::Registry::loggers()) {\n        if (logger.name() == name) {\n          logger_to_change = &logger;\n          break;\n        }\n      }\n\n      if (!logger_to_change) {\n        return false;\n      }\n\n      logger_to_change->setLevel(static_cast<spdlog::level::level_enum>(level_to_use));\n    }\n  } else {\n    // Level setting with Fancy Logger.\n    spdlog::level::level_enum lv = static_cast<spdlog::level::level_enum>(level_to_use);\n    if (name == \"level\") {\n      FANCY_LOG(info, \"change all log levels: level='{}'\", level);\n      getFancyContext().setAllFancyLoggers(lv);\n    } else {\n      FANCY_LOG(info, \"change log level: name='{}' level='{}'\", name, level);\n      bool res = getFancyContext().setFancyLogger(name, lv);\n      return res;\n    }\n  }\n\n  return true;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/logs_handler.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass LogsHandler : public HandlerContextBase, Logger::Loggable<Logger::Id::admin> {\n\npublic:\n  LogsHandler(Server::Instance& server);\n\n  Http::Code handlerLogging(absl::string_view path_and_query,\n                            Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                            AdminStream&);\n\n  Http::Code handlerReopenLogs(absl::string_view path_and_query,\n                               Http::ResponseHeaderMap& response_headers,\n                               Buffer::Instance& response, AdminStream&);\n\nprivate:\n  /**\n   * Attempt to change the log level of a logger or all loggers\n   * @param params supplies the incoming endpoint query params.\n   * @return TRUE if level change succeeded, FALSE otherwise.\n   */\n  bool changeLogLevel(const Http::Utility::QueryParams& params);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/profiling_handler.cc",
    "content": "#include \"server/admin/profiling_handler.h\"\n\n#include \"common/profiler/profiler.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nProfilingHandler::ProfilingHandler(const std::string& profile_path) : profile_path_(profile_path) {}\n\nHttp::Code ProfilingHandler::handlerCpuProfiler(absl::string_view url, Http::ResponseHeaderMap&,\n                                                Buffer::Instance& response, AdminStream&) {\n  Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url);\n  if (query_params.size() != 1 || query_params.begin()->first != \"enable\" ||\n      (query_params.begin()->second != \"y\" && query_params.begin()->second != \"n\")) {\n    response.add(\"?enable=<y|n>\\n\");\n    return Http::Code::BadRequest;\n  }\n\n  bool enable = query_params.begin()->second == \"y\";\n  if (enable && !Profiler::Cpu::profilerEnabled()) {\n    if (!Profiler::Cpu::startProfiler(profile_path_)) {\n      response.add(\"failure to start the profiler\");\n      return Http::Code::InternalServerError;\n    }\n\n  } else if (!enable && Profiler::Cpu::profilerEnabled()) {\n    Profiler::Cpu::stopProfiler();\n  }\n\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code ProfilingHandler::handlerHeapProfiler(absl::string_view url, Http::ResponseHeaderMap&,\n                                                 Buffer::Instance& response, AdminStream&) {\n  if (!Profiler::Heap::profilerEnabled()) {\n    response.add(\"The current build does not support heap profiler\");\n    return Http::Code::NotImplemented;\n  }\n\n  Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url);\n  if (query_params.size() != 1 || query_params.begin()->first != \"enable\" ||\n      (query_params.begin()->second != \"y\" && query_params.begin()->second != \"n\")) {\n    response.add(\"?enable=<y|n>\\n\");\n    return Http::Code::BadRequest;\n  }\n\n  Http::Code res = Http::Code::OK;\n  bool enable = query_params.begin()->second == \"y\";\n  if (enable) {\n    if (Profiler::Heap::isProfilerStarted()) {\n      response.add(\"Fail to start heap profiler: already started\");\n      res = Http::Code::BadRequest;\n    } else if (!Profiler::Heap::startProfiler(profile_path_)) {\n      // GCOVR_EXCL_START\n      // TODO(silentdai) remove the GCOVR when startProfiler is better implemented\n      response.add(\"Fail to start the heap profiler\");\n      res = Http::Code::InternalServerError;\n      // GCOVR_EXCL_STOP\n    } else {\n      response.add(\"Starting heap profiler\");\n      res = Http::Code::OK;\n    }\n  } else {\n    // !enable\n    if (!Profiler::Heap::isProfilerStarted()) {\n      response.add(\"Fail to stop heap profiler: not started\");\n      res = Http::Code::BadRequest;\n    } else {\n      Profiler::Heap::stopProfiler();\n      response.add(\n          fmt::format(\"Heap profiler stopped and data written to {}. See \"\n                      \"http://goog-perftools.sourceforge.net/doc/heap_profiler.html for details.\",\n                      profile_path_));\n      res = Http::Code::OK;\n    }\n  }\n  return res;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/profiling_handler.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ProfilingHandler {\n\npublic:\n  ProfilingHandler(const std::string& profile_path);\n\n  Http::Code handlerCpuProfiler(absl::string_view path_and_query,\n                                Http::ResponseHeaderMap& response_headers,\n                                Buffer::Instance& response, AdminStream&);\n\n  Http::Code handlerHeapProfiler(absl::string_view path_and_query,\n                                 Http::ResponseHeaderMap& response_headers,\n                                 Buffer::Instance& response, AdminStream&);\n\nprivate:\n  const std::string profile_path_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/prometheus_stats.cc",
    "content": "#include \"server/admin/prometheus_stats.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/macros.h\"\n#include \"common/stats/histogram_impl.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\n\nconst std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, \"[^a-zA-Z0-9_]\"); }\nconst std::regex& namespaceRegex() {\n  CONSTRUCT_ON_FIRST_USE(std::regex, \"^[a-zA-Z_][a-zA-Z0-9]*$\");\n}\n\n/**\n * Take a string and sanitize it according to Prometheus conventions.\n */\nstd::string sanitizeName(const std::string& name) {\n  // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by\n  // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/.\n  // The initial [a-zA-Z_] constraint is always satisfied by the namespace prefix.\n  return std::regex_replace(name, promRegex(), \"_\");\n}\n\n/*\n * Determine whether a metric has never been emitted and choose to\n * not show it if we only wanted used metrics.\n */\ntemplate <class StatType>\nstatic bool shouldShowMetric(const StatType& metric, const bool used_only,\n                             const absl::optional<std::regex>& regex) {\n  return ((!used_only || metric.used()) &&\n          (!regex.has_value() || std::regex_search(metric.name(), regex.value())));\n}\n\n/*\n * Comparator for Stats::Metric that does not require a string representation\n * to make the comparison, for memory efficiency.\n */\nstruct MetricLessThan {\n  bool operator()(const Stats::Metric* a, const Stats::Metric* b) const {\n    ASSERT(&a->constSymbolTable() == &b->constSymbolTable());\n    return a->constSymbolTable().lessThan(a->statName(), b->statName());\n  }\n};\n\n/**\n * Processes a stat type (counter, gauge, histogram) by generating all output lines, sorting\n * them by tag-extracted metric name, and then outputting them in the correct sorted order into\n * response.\n *\n * @param response The buffer to put the output into.\n * @param used_only Whether to only output stats that are used.\n * @param regex A filter on which stats to output.\n * @param metrics The metrics to output stats for. This must contain all stats of the given type\n *        to be included in the same output.\n * @param generate_output A function which returns the output text for this metric.\n * @param type The name of the prometheus metric type for used in TYPE annotations.\n */\ntemplate <class StatType>\nuint64_t outputStatType(\n    Buffer::Instance& response, const bool used_only, const absl::optional<std::regex>& regex,\n    const std::vector<Stats::RefcountPtr<StatType>>& metrics,\n    const std::function<std::string(\n        const StatType& metric, const std::string& prefixed_tag_extracted_name)>& generate_output,\n    absl::string_view type) {\n\n  /*\n   * From\n   * https:*github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#grouping-and-sorting:\n   *\n   * All lines for a given metric must be provided as one single group, with the optional HELP and\n   * TYPE lines first (in no particular order). Beyond that, reproducible sorting in repeated\n   * expositions is preferred but not required, i.e. do not sort if the computational cost is\n   * prohibitive.\n   */\n\n  // This is an unsorted collection of dumb-pointers (no need to increment then decrement every\n  // refcount; ownership is held throughout by `metrics`). It is unsorted for efficiency, but will\n  // be sorted before producing the final output to satisfy the \"preferred\" ordering from the\n  // prometheus spec: metrics will be sorted by their tags' textual representation, which will be\n  // consistent across calls.\n  using StatTypeUnsortedCollection = std::vector<const StatType*>;\n\n  // Return early to avoid crashing when getting the symbol table from the first metric.\n  if (metrics.empty()) {\n    return 0;\n  }\n\n  // There should only be one symbol table for all of the stats in the admin\n  // interface. If this assumption changes, the name comparisons in this function\n  // will have to change to compare to convert all StatNames to strings before\n  // comparison.\n  const Stats::SymbolTable& global_symbol_table = metrics.front()->constSymbolTable();\n\n  // Sorted collection of metrics sorted by their tagExtractedName, to satisfy the requirements\n  // of the exposition format.\n  std::map<Stats::StatName, StatTypeUnsortedCollection, Stats::StatNameLessThan> groups(\n      global_symbol_table);\n\n  for (const auto& metric : metrics) {\n    ASSERT(&global_symbol_table == &metric->constSymbolTable());\n\n    if (!shouldShowMetric(*metric, used_only, regex)) {\n      continue;\n    }\n\n    groups[metric->tagExtractedStatName()].push_back(metric.get());\n  }\n\n  for (auto& group : groups) {\n    const std::string prefixed_tag_extracted_name =\n        PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first));\n    response.add(fmt::format(\"# TYPE {0} {1}\\n\", prefixed_tag_extracted_name, type));\n\n    // Sort before producing the final output to satisfy the \"preferred\" ordering from the\n    // prometheus spec: metrics will be sorted by their tags' textual representation, which will\n    // be consistent across calls.\n    std::sort(group.second.begin(), group.second.end(), MetricLessThan());\n\n    for (const auto& metric : group.second) {\n      response.add(generate_output(*metric, prefixed_tag_extracted_name));\n    }\n    response.add(\"\\n\");\n  }\n  return groups.size();\n}\n\n/*\n * Return the prometheus output for a numeric Stat (Counter or Gauge).\n */\ntemplate <class StatType>\nstd::string generateNumericOutput(const StatType& metric,\n                                  const std::string& prefixed_tag_extracted_name) {\n  const std::string tags = PrometheusStatsFormatter::formattedTags(metric.tags());\n  return fmt::format(\"{0}{{{1}}} {2}\\n\", prefixed_tag_extracted_name, tags, metric.value());\n}\n\n/*\n * Returns the prometheus output for a histogram. The output is a multi-line string (with embedded\n * newlines) that contains all the individual bucket counts and sum/count for a single histogram\n * (metric_name plus all tags).\n */\nstd::string generateHistogramOutput(const Stats::ParentHistogram& histogram,\n                                    const std::string& prefixed_tag_extracted_name) {\n  const std::string tags = PrometheusStatsFormatter::formattedTags(histogram.tags());\n  const std::string hist_tags = histogram.tags().empty() ? EMPTY_STRING : (tags + \",\");\n\n  const Stats::HistogramStatistics& stats = histogram.cumulativeStatistics();\n  Stats::ConstSupportedBuckets& supported_buckets = stats.supportedBuckets();\n  const std::vector<uint64_t>& computed_buckets = stats.computedBuckets();\n  std::string output;\n  for (size_t i = 0; i < supported_buckets.size(); ++i) {\n    double bucket = supported_buckets[i];\n    uint64_t value = computed_buckets[i];\n    // We want to print the bucket in a fixed point (non-scientific) format. The fmt library\n    // doesn't have a specific modifier to format as a fixed-point value only so we use the\n    // 'g' operator which prints the number in general fixed point format or scientific format\n    // with precision 50 to round the number up to 32 significant digits in fixed point format\n    // which should cover pretty much all cases\n    output.append(fmt::format(\"{0}_bucket{{{1}le=\\\"{2:.32g}\\\"}} {3}\\n\", prefixed_tag_extracted_name,\n                              hist_tags, bucket, value));\n  }\n\n  output.append(fmt::format(\"{0}_bucket{{{1}le=\\\"+Inf\\\"}} {2}\\n\", prefixed_tag_extracted_name,\n                            hist_tags, stats.sampleCount()));\n  output.append(fmt::format(\"{0}_sum{{{1}}} {2:.32g}\\n\", prefixed_tag_extracted_name, tags,\n                            stats.sampleSum()));\n  output.append(fmt::format(\"{0}_count{{{1}}} {2}\\n\", prefixed_tag_extracted_name, tags,\n                            stats.sampleCount()));\n\n  return output;\n};\n\nabsl::flat_hash_set<std::string>& prometheusNamespaces() {\n  MUTABLE_CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set<std::string>);\n}\n\n} // namespace\n\nstd::string PrometheusStatsFormatter::formattedTags(const std::vector<Stats::Tag>& tags) {\n  std::vector<std::string> buf;\n  buf.reserve(tags.size());\n  for (const Stats::Tag& tag : tags) {\n    buf.push_back(fmt::format(\"{}=\\\"{}\\\"\", sanitizeName(tag.name_), tag.value_));\n  }\n  return absl::StrJoin(buf, \",\");\n}\n\nstd::string PrometheusStatsFormatter::metricName(const std::string& extracted_name) {\n  std::string sanitized_name = sanitizeName(extracted_name);\n\n  absl::string_view prom_namespace{sanitized_name};\n  prom_namespace = prom_namespace.substr(0, prom_namespace.find_first_of('_'));\n\n  if (prometheusNamespaces().contains(prom_namespace)) {\n    return sanitized_name;\n  }\n\n  // Add namespacing prefix to avoid conflicts, as per best practice:\n  // https://prometheus.io/docs/practices/naming/#metric-names\n  // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/\n  return absl::StrCat(\"envoy_\", sanitized_name);\n}\n\n// TODO(efimki): Add support of text readouts stats.\nuint64_t PrometheusStatsFormatter::statsAsPrometheus(\n    const std::vector<Stats::CounterSharedPtr>& counters,\n    const std::vector<Stats::GaugeSharedPtr>& gauges,\n    const std::vector<Stats::ParentHistogramSharedPtr>& histograms, Buffer::Instance& response,\n    const bool used_only, const absl::optional<std::regex>& regex) {\n\n  uint64_t metric_name_count = 0;\n  metric_name_count += outputStatType<Stats::Counter>(\n      response, used_only, regex, counters, generateNumericOutput<Stats::Counter>, \"counter\");\n\n  metric_name_count += outputStatType<Stats::Gauge>(response, used_only, regex, gauges,\n                                                    generateNumericOutput<Stats::Gauge>, \"gauge\");\n\n  metric_name_count += outputStatType<Stats::ParentHistogram>(\n      response, used_only, regex, histograms, generateHistogramOutput, \"histogram\");\n\n  return metric_name_count;\n}\n\nbool PrometheusStatsFormatter::registerPrometheusNamespace(absl::string_view prometheus_namespace) {\n  if (std::regex_match(prometheus_namespace.begin(), prometheus_namespace.end(),\n                       namespaceRegex())) {\n    return prometheusNamespaces().insert(std::string(prometheus_namespace)).second;\n  }\n  return false;\n}\n\nbool PrometheusStatsFormatter::unregisterPrometheusNamespace(\n    absl::string_view prometheus_namespace) {\n  auto it = prometheusNamespaces().find(prometheus_namespace);\n  if (it == prometheusNamespaces().end()) {\n    return false;\n  }\n  prometheusNamespaces().erase(it);\n  return true;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/prometheus_stats.h",
    "content": "#pragma once\n\n#include <regex>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/stats.h\"\n\nnamespace Envoy {\nnamespace Server {\n/**\n * Formatter for metric/labels exported to Prometheus.\n *\n * See: https://prometheus.io/docs/concepts/data_model\n */\nclass PrometheusStatsFormatter {\npublic:\n  /**\n   * Extracts counters and gauges and relevant tags, appending them to\n   * the response buffer after sanitizing the metric / label names.\n   * @return uint64_t total number of metric types inserted in response.\n   */\n  static uint64_t statsAsPrometheus(const std::vector<Stats::CounterSharedPtr>& counters,\n                                    const std::vector<Stats::GaugeSharedPtr>& gauges,\n                                    const std::vector<Stats::ParentHistogramSharedPtr>& histograms,\n                                    Buffer::Instance& response, const bool used_only,\n                                    const absl::optional<std::regex>& regex);\n  /**\n   * Format the given tags, returning a string as a comma-separated list\n   * of <tag_name>=\"<tag_value>\" pairs.\n   */\n  static std::string formattedTags(const std::vector<Stats::Tag>& tags);\n\n  /**\n   * Format the given metric name, prefixed with \"envoy_\".\n   */\n  static std::string metricName(const std::string& extracted_name);\n\n  /**\n   * Register a prometheus namespace, stats starting with the namespace will not be\n   * automatically prefixed with envoy namespace.\n   * This method must be called from the main thread.\n   * @returns bool if a new namespace is registered, false if the namespace is already\n   *          registered or the namespace is invalid.\n   */\n  static bool registerPrometheusNamespace(absl::string_view prometheus_namespace);\n\n  /**\n   * Unregister a prometheus namespace registered by `registerPrometheusNamespace`\n   * This method must be called from the main thread.\n   * @returns bool if the Prometheus namespace is unregistered. false if the namespace\n   *          wasn't registered.\n   */\n  static bool unregisterPrometheusNamespace(absl::string_view prometheus_namespace);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/runtime_handler.cc",
    "content": "#include \"server/admin/runtime_handler.h\"\n\n#include <string>\n#include <vector>\n\n#include \"common/common/empty_string.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"server/admin/utils.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nRuntimeHandler::RuntimeHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code RuntimeHandler::handlerRuntime(absl::string_view url,\n                                          Http::ResponseHeaderMap& response_headers,\n                                          Buffer::Instance& response, AdminStream&) {\n  const Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url);\n  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n\n  // TODO(jsedgwick): Use proto to structure this output instead of arbitrary JSON.\n  const auto& layers = server_.runtime().snapshot().getLayers();\n\n  std::vector<ProtobufWkt::Value> layer_names;\n  layer_names.reserve(layers.size());\n  std::map<std::string, std::vector<std::string>> entries;\n  for (const auto& layer : layers) {\n    layer_names.push_back(ValueUtil::stringValue(layer->name()));\n    for (const auto& value : layer->values()) {\n      const auto found = entries.find(value.first);\n      if (found == entries.end()) {\n        entries.emplace(value.first, std::vector<std::string>{});\n      }\n    }\n  }\n\n  for (const auto& layer : layers) {\n    for (auto& entry : entries) {\n      const auto found = layer->values().find(entry.first);\n      const auto& entry_value =\n          found == layer->values().end() ? EMPTY_STRING : found->second.raw_string_value_;\n      entry.second.push_back(entry_value);\n    }\n  }\n\n  ProtobufWkt::Struct layer_entries;\n  auto* layer_entry_fields = layer_entries.mutable_fields();\n  for (const auto& entry : entries) {\n    std::vector<ProtobufWkt::Value> layer_entry_values;\n    layer_entry_values.reserve(entry.second.size());\n    std::string final_value;\n    for (const auto& value : entry.second) {\n      if (!value.empty()) {\n        final_value = value;\n      }\n      layer_entry_values.push_back(ValueUtil::stringValue(value));\n    }\n\n    ProtobufWkt::Struct layer_entry_value;\n    auto* layer_entry_value_fields = layer_entry_value.mutable_fields();\n\n    (*layer_entry_value_fields)[\"final_value\"] = ValueUtil::stringValue(final_value);\n    (*layer_entry_value_fields)[\"layer_values\"] = ValueUtil::listValue(layer_entry_values);\n    (*layer_entry_fields)[entry.first] = ValueUtil::structValue(layer_entry_value);\n  }\n\n  ProtobufWkt::Struct runtime;\n  auto* fields = runtime.mutable_fields();\n\n  (*fields)[\"layers\"] = ValueUtil::listValue(layer_names);\n  (*fields)[\"entries\"] = ValueUtil::structValue(layer_entries);\n\n  response.add(MessageUtil::getJsonStringFromMessage(runtime, true, true));\n  return Http::Code::OK;\n}\n\nHttp::Code RuntimeHandler::handlerRuntimeModify(absl::string_view url, Http::ResponseHeaderMap&,\n                                                Buffer::Instance& response,\n                                                AdminStream& admin_stream) {\n  Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url);\n  if (params.empty()) {\n    // Check if the params are in the request's body.\n    if (admin_stream.getRequestBody() != nullptr &&\n        admin_stream.getRequestHeaders().getContentTypeValue() ==\n            Http::Headers::get().ContentTypeValues.FormUrlEncoded) {\n      params = Http::Utility::parseFromBody(admin_stream.getRequestBody()->toString());\n    }\n\n    if (params.empty()) {\n      response.add(\"usage: /runtime_modify?key1=value1&key2=value2&keyN=valueN\\n\");\n      response.add(\"       or send the parameters as form values\\n\");\n      response.add(\"use an empty value to remove a previously added override\");\n      return Http::Code::BadRequest;\n    }\n  }\n  absl::node_hash_map<std::string, std::string> overrides;\n  overrides.insert(params.begin(), params.end());\n  try {\n    server_.runtime().mergeValues(overrides);\n  } catch (const EnvoyException& e) {\n    response.add(e.what());\n    return Http::Code::ServiceUnavailable;\n  }\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/runtime_handler.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass RuntimeHandler : public HandlerContextBase {\n\npublic:\n  RuntimeHandler(Server::Instance& server);\n\n  Http::Code handlerRuntime(absl::string_view path_and_query,\n                            Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                            AdminStream&);\n  Http::Code handlerRuntimeModify(absl::string_view path_and_query,\n                                  Http::ResponseHeaderMap& response_headers,\n                                  Buffer::Instance& response, AdminStream&);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/server_cmd_handler.cc",
    "content": "#include \"server/admin/server_cmd_handler.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nServerCmdHandler::ServerCmdHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code ServerCmdHandler::handlerHealthcheckFail(absl::string_view, Http::ResponseHeaderMap&,\n                                                    Buffer::Instance& response, AdminStream&) {\n  server_.failHealthcheck(true);\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code ServerCmdHandler::handlerHealthcheckOk(absl::string_view, Http::ResponseHeaderMap&,\n                                                  Buffer::Instance& response, AdminStream&) {\n  server_.failHealthcheck(false);\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code ServerCmdHandler::handlerQuitQuitQuit(absl::string_view, Http::ResponseHeaderMap&,\n                                                 Buffer::Instance& response, AdminStream&) {\n  server_.shutdown();\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/server_cmd_handler.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ServerCmdHandler : public HandlerContextBase {\n\npublic:\n  ServerCmdHandler(Server::Instance& server);\n\n  Http::Code handlerQuitQuitQuit(absl::string_view path_and_query,\n                                 Http::ResponseHeaderMap& response_headers,\n                                 Buffer::Instance& response, AdminStream&);\n\n  Http::Code handlerHealthcheckFail(absl::string_view path_and_query,\n                                    Http::ResponseHeaderMap& response_headers,\n                                    Buffer::Instance& response, AdminStream&);\n\n  Http::Code handlerHealthcheckOk(absl::string_view path_and_query,\n                                  Http::ResponseHeaderMap& response_headers,\n                                  Buffer::Instance& response, AdminStream&);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/server_info_handler.cc",
    "content": "#include \"server/admin/server_info_handler.h\"\n\n#include \"envoy/admin/v3/memory.pb.h\"\n\n#include \"common/memory/stats.h\"\n#include \"common/version/version.h\"\n\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nServerInfoHandler::ServerInfoHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code ServerInfoHandler::handlerCerts(absl::string_view,\n                                           Http::ResponseHeaderMap& response_headers,\n                                           Buffer::Instance& response, AdminStream&) {\n  // This set is used to track distinct certificates. We may have multiple listeners, upstreams, etc\n  // using the same cert.\n  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  envoy::admin::v3::Certificates certificates;\n  server_.sslContextManager().iterateContexts([&](const Ssl::Context& context) -> void {\n    envoy::admin::v3::Certificate& certificate = *certificates.add_certificates();\n    if (context.getCaCertInformation() != nullptr) {\n      envoy::admin::v3::CertificateDetails* ca_certificate = certificate.add_ca_cert();\n      *ca_certificate = *context.getCaCertInformation();\n    }\n    for (const auto& cert_details : context.getCertChainInformation()) {\n      envoy::admin::v3::CertificateDetails* cert_chain = certificate.add_cert_chain();\n      *cert_chain = *cert_details;\n    }\n  });\n  response.add(MessageUtil::getJsonStringFromMessage(certificates, true, true));\n  return Http::Code::OK;\n}\n\nHttp::Code ServerInfoHandler::handlerHotRestartVersion(absl::string_view, Http::ResponseHeaderMap&,\n                                                       Buffer::Instance& response, AdminStream&) {\n  response.add(server_.hotRestart().version());\n  return Http::Code::OK;\n}\n\n// TODO(ambuc): Add more tcmalloc stats, export proto details based on allocator.\nHttp::Code ServerInfoHandler::handlerMemory(absl::string_view,\n                                            Http::ResponseHeaderMap& response_headers,\n                                            Buffer::Instance& response, AdminStream&) {\n  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  envoy::admin::v3::Memory memory;\n  memory.set_allocated(Memory::Stats::totalCurrentlyAllocated());\n  memory.set_heap_size(Memory::Stats::totalCurrentlyReserved());\n  memory.set_total_thread_cache(Memory::Stats::totalThreadCacheBytes());\n  memory.set_pageheap_unmapped(Memory::Stats::totalPageHeapUnmapped());\n  memory.set_pageheap_free(Memory::Stats::totalPageHeapFree());\n  memory.set_total_physical_bytes(Memory::Stats::totalPhysicalBytes());\n  response.add(MessageUtil::getJsonStringFromMessage(memory, true, true)); // pretty-print\n  return Http::Code::OK;\n}\n\nHttp::Code ServerInfoHandler::handlerReady(absl::string_view, Http::ResponseHeaderMap&,\n                                           Buffer::Instance& response, AdminStream&) {\n  const envoy::admin::v3::ServerInfo::State state =\n      Utility::serverState(server_.initManager().state(), server_.healthCheckFailed());\n\n  response.add(envoy::admin::v3::ServerInfo::State_Name(state) + \"\\n\");\n  Http::Code code =\n      state == envoy::admin::v3::ServerInfo::LIVE ? Http::Code::OK : Http::Code::ServiceUnavailable;\n  return code;\n}\n\nHttp::Code ServerInfoHandler::handlerServerInfo(absl::string_view, Http::ResponseHeaderMap& headers,\n                                                Buffer::Instance& response, AdminStream&) {\n  const std::time_t current_time =\n      std::chrono::system_clock::to_time_t(server_.timeSource().systemTime());\n  const std::time_t uptime_current_epoch = current_time - server_.startTimeCurrentEpoch();\n  const std::time_t uptime_all_epochs = current_time - server_.startTimeFirstEpoch();\n\n  ASSERT(uptime_current_epoch >= 0);\n  ASSERT(uptime_all_epochs >= 0);\n\n  envoy::admin::v3::ServerInfo server_info;\n  server_info.set_version(VersionInfo::version());\n  server_info.set_hot_restart_version(server_.hotRestart().version());\n  server_info.set_state(\n      Utility::serverState(server_.initManager().state(), server_.healthCheckFailed()));\n\n  server_info.mutable_uptime_current_epoch()->set_seconds(uptime_current_epoch);\n  server_info.mutable_uptime_all_epochs()->set_seconds(uptime_all_epochs);\n  envoy::admin::v3::CommandLineOptions* command_line_options =\n      server_info.mutable_command_line_options();\n  *command_line_options = *server_.options().toCommandLineOptions();\n  server_info.mutable_node()->MergeFrom(server_.localInfo().node());\n  response.add(MessageUtil::getJsonStringFromMessage(server_info, true, true));\n  headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n  return Http::Code::OK;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/server_info_handler.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ServerInfoHandler : public HandlerContextBase {\n\npublic:\n  ServerInfoHandler(Server::Instance& server);\n\n  Http::Code handlerCerts(absl::string_view path_and_query,\n                          Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                          AdminStream&);\n\n  Http::Code handlerServerInfo(absl::string_view path_and_query,\n                               Http::ResponseHeaderMap& response_headers,\n                               Buffer::Instance& response, AdminStream&);\n\n  Http::Code handlerReady(absl::string_view path_and_query,\n                          Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                          AdminStream&);\n\n  Http::Code handlerHotRestartVersion(absl::string_view path_and_query,\n                                      Http::ResponseHeaderMap& response_headers,\n                                      Buffer::Instance& response, AdminStream&);\n\n  Http::Code handlerMemory(absl::string_view path_and_query,\n                           Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                           AdminStream&);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/stats_handler.cc",
    "content": "#include \"server/admin/stats_handler.h\"\n\n#include \"envoy/admin/v3/mutex_stats.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/html/utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"server/admin/prometheus_stats.h\"\n#include \"server/admin/utils.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nconst uint64_t RecentLookupsCapacity = 100;\n\nStatsHandler::StatsHandler(Server::Instance& server) : HandlerContextBase(server) {}\n\nHttp::Code StatsHandler::handlerResetCounters(absl::string_view, Http::ResponseHeaderMap&,\n                                              Buffer::Instance& response, AdminStream&) {\n  for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) {\n    counter->reset();\n  }\n  server_.stats().symbolTable().clearRecentLookups();\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code StatsHandler::handlerStatsRecentLookups(absl::string_view, Http::ResponseHeaderMap&,\n                                                   Buffer::Instance& response, AdminStream&) {\n  Stats::SymbolTable& symbol_table = server_.stats().symbolTable();\n  std::string table;\n  const uint64_t total =\n      symbol_table.getRecentLookups([&table](absl::string_view name, uint64_t count) {\n        table += fmt::format(\"{:8d} {}\\n\", count, name);\n      });\n  if (table.empty() && symbol_table.recentLookupCapacity() == 0) {\n    table = \"Lookup tracking is not enabled. Use /stats/recentlookups/enable to enable.\\n\";\n  } else {\n    response.add(\"   Count Lookup\\n\");\n  }\n  response.add(absl::StrCat(table, \"\\ntotal: \", total, \"\\n\"));\n  return Http::Code::OK;\n}\n\nHttp::Code StatsHandler::handlerStatsRecentLookupsClear(absl::string_view, Http::ResponseHeaderMap&,\n                                                        Buffer::Instance& response, AdminStream&) {\n  server_.stats().symbolTable().clearRecentLookups();\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code StatsHandler::handlerStatsRecentLookupsDisable(absl::string_view,\n                                                          Http::ResponseHeaderMap&,\n                                                          Buffer::Instance& response,\n                                                          AdminStream&) {\n  server_.stats().symbolTable().setRecentLookupCapacity(0);\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code StatsHandler::handlerStatsRecentLookupsEnable(absl::string_view,\n                                                         Http::ResponseHeaderMap&,\n                                                         Buffer::Instance& response, AdminStream&) {\n  server_.stats().symbolTable().setRecentLookupCapacity(RecentLookupsCapacity);\n  response.add(\"OK\\n\");\n  return Http::Code::OK;\n}\n\nHttp::Code StatsHandler::handlerStats(absl::string_view url,\n                                      Http::ResponseHeaderMap& response_headers,\n                                      Buffer::Instance& response, AdminStream& admin_stream) {\n  Http::Code rc = Http::Code::OK;\n  const Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url);\n\n  const bool used_only = params.find(\"usedonly\") != params.end();\n  absl::optional<std::regex> regex;\n  if (!Utility::filterParam(params, response, regex)) {\n    return Http::Code::BadRequest;\n  }\n\n  std::map<std::string, uint64_t> all_stats;\n  for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) {\n    if (shouldShowMetric(*counter, used_only, regex)) {\n      all_stats.emplace(counter->name(), counter->value());\n    }\n  }\n\n  for (const Stats::GaugeSharedPtr& gauge : server_.stats().gauges()) {\n    if (shouldShowMetric(*gauge, used_only, regex)) {\n      ASSERT(gauge->importMode() != Stats::Gauge::ImportMode::Uninitialized);\n      all_stats.emplace(gauge->name(), gauge->value());\n    }\n  }\n\n  std::map<std::string, std::string> text_readouts;\n  for (const auto& text_readout : server_.stats().textReadouts()) {\n    if (shouldShowMetric(*text_readout, used_only, regex)) {\n      text_readouts.emplace(text_readout->name(), text_readout->value());\n    }\n  }\n\n  if (const auto format_value = Utility::formatParam(params)) {\n    if (format_value.value() == \"json\") {\n      response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n      response.add(\n          statsAsJson(all_stats, text_readouts, server_.stats().histograms(), used_only, regex));\n    } else if (format_value.value() == \"prometheus\") {\n      return handlerPrometheusStats(url, response_headers, response, admin_stream);\n    } else {\n      response.add(\"usage: /stats?format=json  or /stats?format=prometheus \\n\");\n      response.add(\"\\n\");\n      rc = Http::Code::NotFound;\n    }\n  } else { // Display plain stats if format query param is not there.\n    for (const auto& text_readout : text_readouts) {\n      response.add(fmt::format(\"{}: \\\"{}\\\"\\n\", text_readout.first,\n                               Html::Utility::sanitize(text_readout.second)));\n    }\n    for (const auto& stat : all_stats) {\n      response.add(fmt::format(\"{}: {}\\n\", stat.first, stat.second));\n    }\n    std::map<std::string, std::string> all_histograms;\n    for (const Stats::ParentHistogramSharedPtr& histogram : server_.stats().histograms()) {\n      if (shouldShowMetric(*histogram, used_only, regex)) {\n        auto insert = all_histograms.emplace(histogram->name(), histogram->quantileSummary());\n        ASSERT(insert.second); // No duplicates expected.\n      }\n    }\n    for (const auto& histogram : all_histograms) {\n      response.add(fmt::format(\"{}: {}\\n\", histogram.first, histogram.second));\n    }\n  }\n  return rc;\n}\n\nHttp::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query,\n                                                Http::ResponseHeaderMap&,\n                                                Buffer::Instance& response, AdminStream&) {\n  const Http::Utility::QueryParams params =\n      Http::Utility::parseAndDecodeQueryString(path_and_query);\n  const bool used_only = params.find(\"usedonly\") != params.end();\n  absl::optional<std::regex> regex;\n  if (!Utility::filterParam(params, response, regex)) {\n    return Http::Code::BadRequest;\n  }\n  PrometheusStatsFormatter::statsAsPrometheus(server_.stats().counters(), server_.stats().gauges(),\n                                              server_.stats().histograms(), response, used_only,\n                                              regex);\n  return Http::Code::OK;\n}\n\n// TODO(ambuc) Export this as a server (?) stat for monitoring.\nHttp::Code StatsHandler::handlerContention(absl::string_view,\n                                           Http::ResponseHeaderMap& response_headers,\n                                           Buffer::Instance& response, AdminStream&) {\n\n  if (server_.options().mutexTracingEnabled() && server_.mutexTracer() != nullptr) {\n    response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);\n\n    envoy::admin::v3::MutexStats mutex_stats;\n    mutex_stats.set_num_contentions(server_.mutexTracer()->numContentions());\n    mutex_stats.set_current_wait_cycles(server_.mutexTracer()->currentWaitCycles());\n    mutex_stats.set_lifetime_wait_cycles(server_.mutexTracer()->lifetimeWaitCycles());\n    response.add(MessageUtil::getJsonStringFromMessage(mutex_stats, true, true));\n  } else {\n    response.add(\"Mutex contention tracing is not enabled. To enable, run Envoy with flag \"\n                 \"--enable-mutex-tracing.\");\n  }\n  return Http::Code::OK;\n}\n\nstd::string\nStatsHandler::statsAsJson(const std::map<std::string, uint64_t>& all_stats,\n                          const std::map<std::string, std::string>& text_readouts,\n                          const std::vector<Stats::ParentHistogramSharedPtr>& all_histograms,\n                          const bool used_only, const absl::optional<std::regex> regex,\n                          const bool pretty_print) {\n\n  ProtobufWkt::Struct document;\n  std::vector<ProtobufWkt::Value> stats_array;\n  for (const auto& text_readout : text_readouts) {\n    ProtobufWkt::Struct stat_obj;\n    auto* stat_obj_fields = stat_obj.mutable_fields();\n    (*stat_obj_fields)[\"name\"] = ValueUtil::stringValue(text_readout.first);\n    (*stat_obj_fields)[\"value\"] = ValueUtil::stringValue(text_readout.second);\n    stats_array.push_back(ValueUtil::structValue(stat_obj));\n  }\n  for (const auto& stat : all_stats) {\n    ProtobufWkt::Struct stat_obj;\n    auto* stat_obj_fields = stat_obj.mutable_fields();\n    (*stat_obj_fields)[\"name\"] = ValueUtil::stringValue(stat.first);\n    (*stat_obj_fields)[\"value\"] = ValueUtil::numberValue(stat.second);\n    stats_array.push_back(ValueUtil::structValue(stat_obj));\n  }\n\n  ProtobufWkt::Struct histograms_obj;\n  auto* histograms_obj_fields = histograms_obj.mutable_fields();\n\n  ProtobufWkt::Struct histograms_obj_container;\n  auto* histograms_obj_container_fields = histograms_obj_container.mutable_fields();\n  std::vector<ProtobufWkt::Value> computed_quantile_array;\n\n  bool found_used_histogram = false;\n  for (const Stats::ParentHistogramSharedPtr& histogram : all_histograms) {\n    if (shouldShowMetric(*histogram, used_only, regex)) {\n      if (!found_used_histogram) {\n        // It is not possible for the supported quantiles to differ across histograms, so it is ok\n        // to send them once.\n        Stats::HistogramStatisticsImpl empty_statistics;\n        std::vector<ProtobufWkt::Value> supported_quantile_array;\n        for (double quantile : empty_statistics.supportedQuantiles()) {\n          supported_quantile_array.push_back(ValueUtil::numberValue(quantile * 100));\n        }\n        (*histograms_obj_fields)[\"supported_quantiles\"] =\n            ValueUtil::listValue(supported_quantile_array);\n        found_used_histogram = true;\n      }\n\n      ProtobufWkt::Struct computed_quantile;\n      auto* computed_quantile_fields = computed_quantile.mutable_fields();\n      (*computed_quantile_fields)[\"name\"] = ValueUtil::stringValue(histogram->name());\n\n      std::vector<ProtobufWkt::Value> computed_quantile_value_array;\n      for (size_t i = 0; i < histogram->intervalStatistics().supportedQuantiles().size(); ++i) {\n        ProtobufWkt::Struct computed_quantile_value;\n        auto* computed_quantile_value_fields = computed_quantile_value.mutable_fields();\n        const auto& interval = histogram->intervalStatistics().computedQuantiles()[i];\n        const auto& cumulative = histogram->cumulativeStatistics().computedQuantiles()[i];\n        (*computed_quantile_value_fields)[\"interval\"] =\n            std::isnan(interval) ? ValueUtil::nullValue() : ValueUtil::numberValue(interval);\n        (*computed_quantile_value_fields)[\"cumulative\"] =\n            std::isnan(cumulative) ? ValueUtil::nullValue() : ValueUtil::numberValue(cumulative);\n\n        computed_quantile_value_array.push_back(ValueUtil::structValue(computed_quantile_value));\n      }\n      (*computed_quantile_fields)[\"values\"] = ValueUtil::listValue(computed_quantile_value_array);\n      computed_quantile_array.push_back(ValueUtil::structValue(computed_quantile));\n    }\n  }\n\n  if (found_used_histogram) {\n    (*histograms_obj_fields)[\"computed_quantiles\"] = ValueUtil::listValue(computed_quantile_array);\n    (*histograms_obj_container_fields)[\"histograms\"] = ValueUtil::structValue(histograms_obj);\n    stats_array.push_back(ValueUtil::structValue(histograms_obj_container));\n  }\n\n  auto* document_fields = document.mutable_fields();\n  (*document_fields)[\"stats\"] = ValueUtil::listValue(stats_array);\n\n  return MessageUtil::getJsonStringFromMessage(document, pretty_print, true);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/stats_handler.h",
    "content": "#pragma once\n\n#include <regex>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/server/admin.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"common/stats/histogram_impl.h\"\n\n#include \"server/admin/handler_ctx.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass StatsHandler : public HandlerContextBase {\n\npublic:\n  StatsHandler(Server::Instance& server);\n\n  Http::Code handlerResetCounters(absl::string_view path_and_query,\n                                  Http::ResponseHeaderMap& response_headers,\n                                  Buffer::Instance& response, AdminStream&);\n  Http::Code handlerStatsRecentLookups(absl::string_view path_and_query,\n                                       Http::ResponseHeaderMap& response_headers,\n                                       Buffer::Instance& response, AdminStream&);\n  Http::Code handlerStatsRecentLookupsClear(absl::string_view path_and_query,\n                                            Http::ResponseHeaderMap& response_headers,\n                                            Buffer::Instance& response, AdminStream&);\n  Http::Code handlerStatsRecentLookupsDisable(absl::string_view path_and_query,\n                                              Http::ResponseHeaderMap& response_headers,\n                                              Buffer::Instance& response, AdminStream&);\n  Http::Code handlerStatsRecentLookupsEnable(absl::string_view path_and_query,\n                                             Http::ResponseHeaderMap& response_headers,\n                                             Buffer::Instance& response, AdminStream&);\n  Http::Code handlerStats(absl::string_view path_and_query,\n                          Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                          AdminStream&);\n  Http::Code handlerPrometheusStats(absl::string_view path_and_query,\n                                    Http::ResponseHeaderMap& response_headers,\n                                    Buffer::Instance& response, AdminStream&);\n  Http::Code handlerContention(absl::string_view path_and_query,\n                               Http::ResponseHeaderMap& response_headers,\n                               Buffer::Instance& response, AdminStream&);\n\nprivate:\n  template <class StatType>\n  static bool shouldShowMetric(const StatType& metric, const bool used_only,\n                               const absl::optional<std::regex>& regex) {\n    return ((!used_only || metric.used()) &&\n            (!regex.has_value() || std::regex_search(metric.name(), regex.value())));\n  }\n\n  friend class AdminStatsTest;\n\n  static std::string statsAsJson(const std::map<std::string, uint64_t>& all_stats,\n                                 const std::map<std::string, std::string>& text_readouts,\n                                 const std::vector<Stats::ParentHistogramSharedPtr>& all_histograms,\n                                 bool used_only,\n                                 const absl::optional<std::regex> regex = absl::nullopt,\n                                 bool pretty_print = false);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/utils.cc",
    "content": "#include \"server/admin/utils.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/http/headers.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Utility {\n\nenvoy::admin::v3::ServerInfo::State serverState(Init::Manager::State state,\n                                                bool health_check_failed) {\n  switch (state) {\n  case Init::Manager::State::Uninitialized:\n    return envoy::admin::v3::ServerInfo::PRE_INITIALIZING;\n  case Init::Manager::State::Initializing:\n    return envoy::admin::v3::ServerInfo::INITIALIZING;\n  case Init::Manager::State::Initialized:\n    return health_check_failed ? envoy::admin::v3::ServerInfo::DRAINING\n                               : envoy::admin::v3::ServerInfo::LIVE;\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& header_map) {\n  header_map.setStatus(std::to_string(enumToInt(code)));\n  if (header_map.ContentType() == nullptr) {\n    // Default to text-plain if unset.\n    header_map.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextUtf8);\n  }\n  // Default to 'no-cache' if unset, but not 'no-store' which may break the back button.\n  if (header_map.get(Http::CustomHeaders::get().CacheControl) == nullptr) {\n    header_map.setReference(Http::CustomHeaders::get().CacheControl,\n                            Http::CustomHeaders::get().CacheControlValues.NoCacheMaxAge0);\n  }\n\n  // Under no circumstance should browsers sniff content-type.\n  header_map.addReference(Http::Headers::get().XContentTypeOptions,\n                          Http::Headers::get().XContentTypeOptionValues.Nosniff);\n}\n\n// Helper method to get filter parameter, or report an error for an invalid regex.\nbool filterParam(Http::Utility::QueryParams params, Buffer::Instance& response,\n                 absl::optional<std::regex>& regex) {\n  auto p = params.find(\"filter\");\n  if (p != params.end()) {\n    const std::string& pattern = p->second;\n    try {\n      regex = std::regex(pattern);\n    } catch (std::regex_error& error) {\n      // Include the offending pattern in the log, but not the error message.\n      response.add(fmt::format(\"Invalid regex: \\\"{}\\\"\\n\", error.what()));\n      ENVOY_LOG_MISC(error, \"admin: Invalid regex: \\\"{}\\\": {}\", error.what(), pattern);\n      return false;\n    }\n  }\n  return true;\n}\n\n// Helper method to get the format parameter.\nabsl::optional<std::string> formatParam(const Http::Utility::QueryParams& params) {\n  return queryParam(params, \"format\");\n}\n\n// Helper method to get a query parameter.\nabsl::optional<std::string> queryParam(const Http::Utility::QueryParams& params,\n                                       const std::string& key) {\n  return (params.find(key) != params.end()) ? absl::optional<std::string>{params.at(key)}\n                                            : absl::nullopt;\n}\n\n} // namespace Utility\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/admin/utils.h",
    "content": "#pragma once\n\n#include <regex>\n\n#include \"envoy/admin/v3/server_info.pb.h\"\n#include \"envoy/init/manager.h\"\n\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/utility.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Utility {\n\nenvoy::admin::v3::ServerInfo::State serverState(Init::Manager::State state,\n                                                bool health_check_failed);\n\nvoid populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& header_map);\n\nbool filterParam(Http::Utility::QueryParams params, Buffer::Instance& response,\n                 absl::optional<std::regex>& regex);\n\nabsl::optional<std::string> formatParam(const Http::Utility::QueryParams& params);\n\nabsl::optional<std::string> queryParam(const Http::Utility::QueryParams& params,\n                                       const std::string& key);\n\n} // namespace Utility\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/api_listener_impl.cc",
    "content": "#include \"server/api_listener_impl.h\"\n\n#include \"envoy/api/v2/lds.pb.h\"\n#include \"envoy/api/v2/listener/listener.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/http/conn_manager_impl.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"server/drain_manager_impl.h\"\n#include \"server/listener_manager_impl.h\"\n\n#include \"extensions/filters/network/http_connection_manager/config.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nApiListenerImplBase::ApiListenerImplBase(const envoy::config::listener::v3::Listener& config,\n                                         ListenerManagerImpl& parent, const std::string& name)\n    : config_(config), parent_(parent), name_(name),\n      address_(Network::Address::resolveProtoAddress(config.address())),\n      global_scope_(parent_.server_.stats().createScope(\"\")),\n      listener_scope_(parent_.server_.stats().createScope(fmt::format(\"listener.api.{}.\", name_))),\n      factory_context_(parent_.server_, config_, *this, *global_scope_, *listener_scope_),\n      read_callbacks_(SyntheticReadCallbacks(*this)) {}\n\nvoid ApiListenerImplBase::SyntheticReadCallbacks::SyntheticConnection::raiseConnectionEvent(\n    Network::ConnectionEvent event) {\n  for (Network::ConnectionCallbacks* callback : callbacks_) {\n    callback->onEvent(event);\n  }\n}\n\nHttpApiListener::HttpApiListener(const envoy::config::listener::v3::Listener& config,\n                                 ListenerManagerImpl& parent, const std::string& name)\n    : ApiListenerImplBase(config, parent, name) {\n  auto typed_config = MessageUtil::anyConvertAndValidate<\n      envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>(\n      config.api_listener().api_listener(), factory_context_.messageValidationVisitor());\n\n  http_connection_manager_factory_ = Envoy::Extensions::NetworkFilters::HttpConnectionManager::\n      HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto(\n          typed_config, factory_context_, read_callbacks_);\n}\n\nHttp::ApiListenerOptRef HttpApiListener::http() {\n  if (!http_connection_manager_) {\n    http_connection_manager_ = http_connection_manager_factory_();\n  }\n  return Http::ApiListenerOptRef(std::ref(*http_connection_manager_));\n}\n\nvoid HttpApiListener::shutdown() {\n  // The Http::ConnectionManagerImpl is a callback target for the read_callback_.connection_. By\n  // raising connection closure, Http::ConnectionManagerImpl::onEvent is fired. In that case the\n  // Http::ConnectionManagerImpl will reset any ActiveStreams it has.\n  read_callbacks_.connection_.raiseConnectionEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/api_listener_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/server/api_listener.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/conn_manager_impl.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"server/filter_chain_manager_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ListenerManagerImpl;\n\n/**\n * Base class all ApiListeners.\n */\nclass ApiListenerImplBase : public ApiListener,\n                            public Network::DrainDecision,\n                            Logger::Loggable<Logger::Id::http> {\npublic:\n  // TODO(junr03): consider moving Envoy Mobile's SyntheticAddressImpl to Envoy in order to return\n  // that rather than this semi-real one.\n  const Network::Address::InstanceConstSharedPtr& address() const { return address_; }\n\n  // ApiListener\n  absl::string_view name() const override { return name_; }\n\n  // Network::DrainDecision\n  // TODO(junr03): hook up draining to listener state management.\n  bool drainClose() const override { return false; }\n\nprotected:\n  ApiListenerImplBase(const envoy::config::listener::v3::Listener& config,\n                      ListenerManagerImpl& parent, const std::string& name);\n\n  // Synthetic class that acts as a stub Network::ReadFilterCallbacks.\n  // TODO(junr03): if we are able to separate the Network Filter aspects of the\n  // Http::ConnectionManagerImpl from the http management aspects of it, it is possible we would not\n  // need this and the SyntheticConnection stub anymore.\n  class SyntheticReadCallbacks : public Network::ReadFilterCallbacks {\n  public:\n    SyntheticReadCallbacks(ApiListenerImplBase& parent)\n        : parent_(parent), connection_(SyntheticConnection(*this)) {}\n\n    // Network::ReadFilterCallbacks\n    void continueReading() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n    void injectReadDataToFilterChain(Buffer::Instance&, bool) override {\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n    Upstream::HostDescriptionConstSharedPtr upstreamHost() override { return nullptr; }\n    void upstreamHost(Upstream::HostDescriptionConstSharedPtr) override {\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n    Network::Connection& connection() override { return connection_; }\n\n    // Synthetic class that acts as a stub for the connection backing the\n    // Network::ReadFilterCallbacks.\n    class SyntheticConnection : public Network::Connection {\n    public:\n      SyntheticConnection(SyntheticReadCallbacks& parent)\n          : parent_(parent), stream_info_(parent_.parent_.factory_context_.timeSource()),\n            options_(std::make_shared<std::vector<Network::Socket::OptionConstSharedPtr>>()) {}\n\n      void raiseConnectionEvent(Network::ConnectionEvent event);\n\n      // Network::FilterManager\n      void addWriteFilter(Network::WriteFilterSharedPtr) override {\n        NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n      }\n      void addFilter(Network::FilterSharedPtr) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n      void addReadFilter(Network::ReadFilterSharedPtr) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n      bool initializeReadFilters() override { return true; }\n\n      // Network::Connection\n      void addConnectionCallbacks(Network::ConnectionCallbacks& cb) override {\n        callbacks_.push_back(&cb);\n      }\n      void addBytesSentCallback(Network::Connection::BytesSentCb) override {\n        NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n      }\n      void enableHalfClose(bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n      void close(Network::ConnectionCloseType) override {}\n      Event::Dispatcher& dispatcher() override {\n        return parent_.parent_.factory_context_.dispatcher();\n      }\n      uint64_t id() const override { return 12345; }\n      void hashKey(std::vector<uint8_t>&) const override {}\n      std::string nextProtocol() const override { return EMPTY_STRING; }\n      void noDelay(bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n      void readDisable(bool) override {}\n      void detectEarlyCloseWhenReadDisabled(bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n      bool readEnabled() const override { return true; }\n      const Network::Address::InstanceConstSharedPtr& remoteAddress() const override {\n        return parent_.parent_.address();\n      }\n      const Network::Address::InstanceConstSharedPtr& directRemoteAddress() const override {\n        return parent_.parent_.address();\n      }\n      absl::optional<Network::Connection::UnixDomainSocketPeerCredentials>\n      unixSocketPeerCredentials() const override {\n        return absl::nullopt;\n      }\n      const Network::Address::InstanceConstSharedPtr& localAddress() const override {\n        return parent_.parent_.address();\n      }\n      void setConnectionStats(const Network::Connection::ConnectionStats&) override {}\n      Ssl::ConnectionInfoConstSharedPtr ssl() const override { return nullptr; }\n      absl::string_view requestedServerName() const override { return EMPTY_STRING; }\n      State state() const override { return Network::Connection::State::Open; }\n      void write(Buffer::Instance&, bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n      void setBufferLimits(uint32_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n      uint32_t bufferLimit() const override { return 65000; }\n      bool localAddressRestored() const override { return false; }\n      bool aboveHighWatermark() const override { return false; }\n      const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() const override {\n        return options_;\n      }\n      StreamInfo::StreamInfo& streamInfo() override { return stream_info_; }\n      const StreamInfo::StreamInfo& streamInfo() const override { return stream_info_; }\n      void setDelayedCloseTimeout(std::chrono::milliseconds) override {}\n      absl::string_view transportFailureReason() const override { return EMPTY_STRING; }\n      absl::optional<std::chrono::milliseconds> lastRoundTripTime() const override { return {}; };\n\n      SyntheticReadCallbacks& parent_;\n      StreamInfo::StreamInfoImpl stream_info_;\n      Network::ConnectionSocket::OptionsSharedPtr options_;\n      std::list<Network::ConnectionCallbacks*> callbacks_;\n    };\n\n    ApiListenerImplBase& parent_;\n    SyntheticConnection connection_;\n  };\n\n  const envoy::config::listener::v3::Listener& config_;\n  ListenerManagerImpl& parent_;\n  const std::string name_;\n  Network::Address::InstanceConstSharedPtr address_;\n  Stats::ScopePtr global_scope_;\n  Stats::ScopePtr listener_scope_;\n  FactoryContextImpl factory_context_;\n  SyntheticReadCallbacks read_callbacks_;\n};\n\n/**\n * ApiListener that provides a handle to inject HTTP calls into Envoy via an\n * Http::ConnectionManager. Thus, it provides full access to Envoy's L7 features, e.g HTTP filters.\n */\nclass HttpApiListener : public ApiListenerImplBase {\npublic:\n  HttpApiListener(const envoy::config::listener::v3::Listener& config, ListenerManagerImpl& parent,\n                  const std::string& name);\n\n  // ApiListener\n  ApiListener::Type type() const override { return ApiListener::Type::HttpApiListener; }\n  Http::ApiListenerOptRef http() override;\n  void shutdown() override;\n\n  Network::ReadFilterCallbacks& readCallbacksForTest() { return read_callbacks_; }\n\nprivate:\n  // Need to store the factory due to the shared_ptrs that need to be kept alive: date provider,\n  // route config manager, scoped route config manager.\n  std::function<Http::ApiListenerPtr()> http_connection_manager_factory_;\n  // Http::ServerConnectionCallbacks is the API surface that this class provides via its handle().\n  Http::ApiListenerPtr http_connection_manager_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/backtrace.cc",
    "content": "#include \"server/backtrace.h\"\n\n#include <iostream>\n\nnamespace Envoy {\n\nbool BackwardsTrace::log_to_stderr_ = false;\n\nvoid BackwardsTrace::setLogToStderr(bool log_to_stderr) { log_to_stderr_ = log_to_stderr; }\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/backtrace.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"common/common/logger.h\"\n#include \"common/version/version.h\"\n\n#include \"absl/debugging/stacktrace.h\"\n#include \"absl/debugging/symbolize.h\"\n\nnamespace Envoy {\n#define BACKTRACE_LOG()                                                                            \\\n  do {                                                                                             \\\n    BackwardsTrace t;                                                                              \\\n    t.capture();                                                                                   \\\n    t.logTrace();                                                                                  \\\n  } while (0)\n\n/**\n * Use absl::Stacktrace and absl::Symbolize to log resolved symbols\n * stack traces on demand. To use this just do:\n *\n * BackwardsTrace tracer;\n * tracer.capture(); // Trace is captured as of here.\n * tracer.logTrace(); // Output the captured trace to the log.\n *\n * The capture and log steps are separated to enable debugging in the case where\n * you want to capture a stack trace from inside some logic but don't know whether\n * you want to bother logging it until later.\n *\n * For convenience a macro is provided BACKTRACE_LOG() which performs the\n * construction, capture, and log in one shot.\n *\n * If the symbols cannot be resolved by absl::Symbolize then the raw address\n * will be printed instead.\n */\nclass BackwardsTrace : Logger::Loggable<Logger::Id::backtrace> {\npublic:\n  BackwardsTrace() = default;\n\n  /**\n   * Directs the output of logTrace() to directly stderr rather than the\n   * logging infrastructure.\n   *\n   * This is intended for coverage tests, where we enable trace logs, but send\n   * them to /dev/null to avoid accumulating too much data in CI.\n   *\n   * @param log_to_stderr Whether to log to stderr or the logging system.\n   */\n  static void setLogToStderr(bool log_to_stderr);\n\n  /**\n   * @return whether the system directing backtraces directly to stderr.\n   */\n  static bool logToStderr() { return log_to_stderr_; }\n\n  /**\n   * Capture a stack trace.\n   *\n   * The trace will begin with the call to capture().\n   */\n  void capture() {\n    // Skip of one means we exclude the last call, which must be to capture().\n    stack_depth_ = absl::GetStackTrace(stack_trace_, MaxStackDepth, /* skip_count = */ 1);\n  }\n\n  /**\n   * Capture a stack trace from a particular context.\n   *\n   * This can be used to capture a useful stack trace from a fatal signal\n   * handler. The context argument should be a pointer to the context passed\n   * to a signal handler registered via a sigaction struct.\n   *\n   * @param context A pointer to ucontext_t obtained from a sigaction handler.\n   */\n  void captureFrom(const void* context) {\n    stack_depth_ =\n        absl::GetStackTraceWithContext(stack_trace_, MaxStackDepth, /* skip_count = */ 1, context,\n                                       /* min_dropped_frames = */ nullptr);\n  }\n\n  /**\n   * Log the stack trace.\n   */\n  void logTrace() {\n    if (log_to_stderr_) {\n      printTrace(std::cerr);\n      return;\n    }\n\n    ENVOY_LOG(critical, \"Backtrace (use tools/stack_decode.py to get line numbers):\");\n    ENVOY_LOG(critical, \"Envoy version: {}\", VersionInfo::version());\n\n    visitTrace([](int index, const char* symbol, void* address) {\n      if (symbol != nullptr) {\n        ENVOY_LOG(critical, \"#{}: {} [{}]\", index, symbol, address);\n      } else {\n        ENVOY_LOG(critical, \"#{}: [{}]\", index, address);\n      }\n    });\n  }\n\n  void logFault(const char* signame, const void* addr) {\n    ENVOY_LOG(critical, \"Caught {}, suspect faulting address {}\", signame, addr);\n  }\n\n  void printTrace(std::ostream& os) {\n    visitTrace([&](int index, const char* symbol, void* address) {\n      if (symbol != nullptr) {\n        os << \"#\" << index << \" \" << symbol << \" [\" << address << \"]\\n\";\n      } else {\n        os << \"#\" << index << \" [\" << address << \"]\\n\";\n      }\n    });\n  }\n\nprivate:\n  static bool log_to_stderr_;\n\n  /**\n   * Visit the previously captured stack trace.\n   *\n   * The visitor function is called once per frame, with 3 parameters:\n   * 1. (int) The index of the current frame.\n   * 2. (const char*) The symbol name for the address of the current frame. nullptr means\n   * symbolization failed.\n   * 3. (void*) The address of the current frame.\n   */\n  void visitTrace(const std::function<void(int, const char*, void*)>& visitor) {\n    for (int i = 0; i < stack_depth_; ++i) {\n      char out[1024];\n      const bool success = absl::Symbolize(stack_trace_[i], out, sizeof(out));\n      if (success) {\n        visitor(i, out, stack_trace_[i]);\n      } else {\n        visitor(i, nullptr, stack_trace_[i]);\n      }\n    }\n  }\n\n  static constexpr int MaxStackDepth = 64;\n  void* stack_trace_[MaxStackDepth];\n  int stack_depth_{0};\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/BUILD",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_cc_library\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"admin_lib\",\n    srcs = [\"admin.cc\"],\n    hdrs = [\"admin.h\"],\n    deps = [\n        \"//include/envoy/server:admin_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/server/admin:config_tracker_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"api_lib\",\n    srcs = [\"api.cc\"],\n    hdrs = [\"api.h\"],\n    deps = [\n        \":dispatcher_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//source/common/api:api_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"async_client_lib\",\n    srcs = [\"async_client.cc\"],\n    hdrs = [\"async_client.h\"],\n    deps = [\n        \":dispatcher_lib\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/http:message_interface\",\n        \"//source/common/common:assert_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"cluster_manager_lib\",\n    srcs = [\"cluster_manager.cc\"],\n    hdrs = [\"cluster_manager.h\"],\n    deps = [\n        \":async_client_lib\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/upstream:cluster_manager_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dispatcher_lib\",\n    srcs = [\"dispatcher.cc\"],\n    hdrs = [\n        \"connection.h\",\n        \"dispatcher.h\",\n        \"dns.h\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/event:dispatcher_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"dns_lib\",\n    srcs = [\"dns.cc\"],\n    hdrs = [\"dns.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:dns_interface\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"server_lib\",\n    srcs = [\"server.cc\"],\n    hdrs = [\"server.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":admin_lib\",\n        \":api_lib\",\n        \":cluster_manager_lib\",\n        \":dns_lib\",\n        \"//include/envoy/server:drain_manager_interface\",\n        \"//include/envoy/server:instance_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//source/common/access_log:access_log_manager_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/local_info:local_info_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/router:rds_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//source/common/version:version_lib\",\n        \"//source/server:configuration_lib\",\n        \"//source/server:server_lib\",\n        \"//source/server/admin:admin_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "source/server/config_validation/admin.cc",
    "content": "#include \"server/config_validation/admin.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n// Pretend that handler was added successfully.\nbool ValidationAdmin::addHandler(const std::string&, const std::string&, HandlerCb, bool, bool) {\n  return true;\n}\n\nbool ValidationAdmin::removeHandler(const std::string&) { return true; }\n\nconst Network::Socket& ValidationAdmin::socket() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\nConfigTracker& ValidationAdmin::getConfigTracker() { return config_tracker_; }\n\nvoid ValidationAdmin::startHttpListener(const std::string&, const std::string&,\n                                        Network::Address::InstanceConstSharedPtr,\n                                        const Network::Socket::OptionsSharedPtr&,\n                                        Stats::ScopePtr&&) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\nHttp::Code ValidationAdmin::request(absl::string_view, absl::string_view, Http::ResponseHeaderMap&,\n                                    std::string&) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\nvoid ValidationAdmin::addListenerToHandler(Network::ConnectionHandler*) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/admin.h",
    "content": "#pragma once\n\n#include \"envoy/server/admin.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"server/admin/config_tracker_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Config-validation-only implementation Server::Admin. This implementation is\n * needed because Admin is referenced by components of the server that add and\n * remove handlers.\n */\nclass ValidationAdmin : public Admin {\npublic:\n  bool addHandler(const std::string&, const std::string&, HandlerCb, bool, bool) override;\n  bool removeHandler(const std::string&) override;\n  const Network::Socket& socket() override;\n  ConfigTracker& getConfigTracker() override;\n  void startHttpListener(const std::string& access_log_path, const std::string& address_out_path,\n                         Network::Address::InstanceConstSharedPtr address,\n                         const Network::Socket::OptionsSharedPtr&,\n                         Stats::ScopePtr&& listener_scope) override;\n  Http::Code request(absl::string_view path_and_query, absl::string_view method,\n                     Http::ResponseHeaderMap& response_headers, std::string& body) override;\n  void addListenerToHandler(Network::ConnectionHandler* handler) override;\n  uint32_t concurrency() const override { return 1; }\n\nprivate:\n  ConfigTrackerImpl config_tracker_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/api.cc",
    "content": "#include \"server/config_validation/api.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"server/config_validation/dispatcher.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nValidationImpl::ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store,\n                               Event::TimeSystem& time_system, Filesystem::Instance& file_system,\n                               Random::RandomGenerator& random_generator)\n    : Impl(thread_factory, stats_store, time_system, file_system, random_generator),\n      time_system_(time_system) {}\n\nEvent::DispatcherPtr ValidationImpl::allocateDispatcher(const std::string& name) {\n  return Event::DispatcherPtr{new Event::ValidationDispatcher(name, *this, time_system_)};\n}\n\nEvent::DispatcherPtr ValidationImpl::allocateDispatcher(const std::string&,\n                                                        Buffer::WatermarkFactoryPtr&&) {\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/api.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/filesystem/filesystem.h\"\n\n#include \"common/api/api_impl.h\"\n\nnamespace Envoy {\nnamespace Api {\n\n/**\n * Config-validation-only implementation of Api::Api. Delegates to Api::Impl,\n * except for allocateDispatcher() which sets up a ValidationDispatcher.\n */\nclass ValidationImpl : public Impl {\npublic:\n  ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store,\n                 Event::TimeSystem& time_system, Filesystem::Instance& file_system,\n                 Random::RandomGenerator& random_generator);\n\n  Event::DispatcherPtr allocateDispatcher(const std::string& name) override;\n  Event::DispatcherPtr allocateDispatcher(const std::string& name,\n                                          Buffer::WatermarkFactoryPtr&& watermark_factory) override;\n\nprivate:\n  Event::TimeSystem& time_system_;\n};\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/async_client.cc",
    "content": "#include \"server/config_validation/async_client.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nValidationAsyncClient::ValidationAsyncClient(Api::Api& api, Event::TimeSystem& time_system)\n    : dispatcher_(\"validation_async_client\", api, time_system) {}\n\nAsyncClient::Request* ValidationAsyncClient::send(RequestMessagePtr&&, Callbacks&,\n                                                  const RequestOptions&) {\n  return nullptr;\n}\n\nAsyncClient::Stream* ValidationAsyncClient::start(StreamCallbacks&, const StreamOptions&) {\n  return nullptr;\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/async_client.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/message.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"server/config_validation/dispatcher.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Config-validation-only implementation of AsyncClient. Both methods on AsyncClient are allowed to\n * return nullptr if the request can't be created, so that's what the ValidationAsyncClient does in\n * all cases.\n */\nclass ValidationAsyncClient : public AsyncClient {\npublic:\n  ValidationAsyncClient(Api::Api& api, Event::TimeSystem& time_system);\n\n  // Http::AsyncClient\n  AsyncClient::Request* send(RequestMessagePtr&& request, Callbacks& callbacks,\n                             const RequestOptions&) override;\n\n  AsyncClient::Stream* start(StreamCallbacks& callbacks, const StreamOptions&) override;\n\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n\nprivate:\n  Event::ValidationDispatcher dispatcher_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/cluster_manager.cc",
    "content": "#include \"server/config_validation/cluster_manager.h\"\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nClusterManagerPtr ValidationClusterManagerFactory::clusterManagerFromProto(\n    const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n  return std::make_unique<ValidationClusterManager>(\n      bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_,\n      admin_, validation_context_, api_, http_context_, grpc_context_, time_system_);\n}\n\nCdsApiPtr\nValidationClusterManagerFactory::createCds(const envoy::config::core::v3::ConfigSource& cds_config,\n                                           ClusterManager& cm) {\n  // Create the CdsApiImpl...\n  ProdClusterManagerFactory::createCds(cds_config, cm);\n  // ... and then throw it away, so that we don't actually connect to it.\n  return nullptr;\n}\n\nValidationClusterManager::ValidationClusterManager(\n    const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory,\n    Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime,\n    const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager,\n    Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin,\n    ProtobufMessage::ValidationContext& validation_context, Api::Api& api,\n    Http::Context& http_context, Grpc::Context& grpc_context, Event::TimeSystem& time_system)\n    : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, local_info, log_manager,\n                         main_thread_dispatcher, admin, validation_context, api, http_context,\n                         grpc_context),\n      async_client_(api, time_system) {}\n\nHttp::ConnectionPool::Instance* ValidationClusterManager::httpConnPoolForCluster(\n    const std::string&, ResourcePriority, absl::optional<Http::Protocol>, LoadBalancerContext*) {\n  return nullptr;\n}\n\nHost::CreateConnectionData ValidationClusterManager::tcpConnForCluster(const std::string&,\n                                                                       LoadBalancerContext*) {\n  return Host::CreateConnectionData{nullptr, nullptr};\n}\n\nHttp::AsyncClient& ValidationClusterManager::httpAsyncClientForCluster(const std::string&) {\n  return async_client_;\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/cluster_manager.h",
    "content": "#pragma once\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/http/context_impl.h\"\n#include \"common/upstream/cluster_manager_impl.h\"\n\n#include \"server/config_validation/async_client.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\n/**\n * Config-validation-only implementation of ClusterManagerFactory, which creates\n * ValidationClusterManagers. It also creates, but never returns, CdsApiImpls.\n */\nclass ValidationClusterManagerFactory : public ProdClusterManagerFactory {\npublic:\n  using ProdClusterManagerFactory::ProdClusterManagerFactory;\n\n  explicit ValidationClusterManagerFactory(\n      Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats,\n      ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver,\n      Ssl::ContextManager& ssl_context_manager, Event::Dispatcher& main_thread_dispatcher,\n      const LocalInfo::LocalInfo& local_info, Secret::SecretManager& secret_manager,\n      ProtobufMessage::ValidationContext& validation_context, Api::Api& api,\n      Http::Context& http_context, Grpc::Context& grpc_context,\n      AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager,\n      Event::TimeSystem& time_system)\n      : ProdClusterManagerFactory(admin, runtime, stats, tls, dns_resolver, ssl_context_manager,\n                                  main_thread_dispatcher, local_info, secret_manager,\n                                  validation_context, api, http_context, grpc_context, log_manager,\n                                  singleton_manager),\n        grpc_context_(grpc_context), time_system_(time_system) {}\n\n  ClusterManagerPtr\n  clusterManagerFromProto(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) override;\n\n  // Delegates to ProdClusterManagerFactory::createCds, but discards the result and returns nullptr\n  // unconditionally.\n  CdsApiPtr createCds(const envoy::config::core::v3::ConfigSource& cds_config,\n                      ClusterManager& cm) override;\n\nprivate:\n  Grpc::Context& grpc_context_;\n  Event::TimeSystem& time_system_;\n};\n\n/**\n * Config-validation-only implementation of ClusterManager, which opens no upstream connections.\n */\nclass ValidationClusterManager : public ClusterManagerImpl {\npublic:\n  ValidationClusterManager(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                           ClusterManagerFactory& factory, Stats::Store& stats,\n                           ThreadLocal::Instance& tls, Runtime::Loader& runtime,\n                           const LocalInfo::LocalInfo& local_info,\n                           AccessLog::AccessLogManager& log_manager, Event::Dispatcher& dispatcher,\n                           Server::Admin& admin,\n                           ProtobufMessage::ValidationContext& validation_context, Api::Api& api,\n                           Http::Context& http_context, Grpc::Context& grpc_context,\n                           Event::TimeSystem& time_system);\n\n  Http::ConnectionPool::Instance* httpConnPoolForCluster(const std::string&, ResourcePriority,\n                                                         absl::optional<Http::Protocol>,\n                                                         LoadBalancerContext*) override;\n  Host::CreateConnectionData tcpConnForCluster(const std::string&, LoadBalancerContext*) override;\n  Http::AsyncClient& httpAsyncClientForCluster(const std::string&) override;\n\nprivate:\n  // ValidationAsyncClient always returns null on send() and start(), so it has\n  // no internal state -- we might as well just keep one and hand out references\n  // to it.\n  Http::ValidationAsyncClient async_client_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/connection.h",
    "content": "#pragma once\n\n#include \"common/network/connection_impl.h\"\n\n#include \"server/config_validation/dispatcher.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/*\n  Class representing connection to upstream entity in config verification mode. The connection is\n  not really established, but sockets may be allocated. Methods doing \"real\" connections should be\n  overridden with no-op implementations.\n*/\nclass ConfigValidateConnection : public Network::ClientConnectionImpl {\npublic:\n  ConfigValidateConnection(Event::ValidationDispatcher& dispatcher,\n                           Network::Address::InstanceConstSharedPtr remote_address,\n                           Network::Address::InstanceConstSharedPtr source_address,\n                           Network::TransportSocketPtr&& transport_socket,\n                           const Network::ConnectionSocket::OptionsSharedPtr& options)\n      : Network::ClientConnectionImpl(dispatcher, remote_address, source_address,\n                                      std::move(transport_socket), options) {}\n\n  // Unit tests may instantiate it without proper event machine and leave opened sockets.\n  // Do some cleanup before invoking base class's destructor.\n  ~ConfigValidateConnection() override { close(ConnectionCloseType::NoFlush); }\n\n  // connect may be called in config verification mode.\n  // It is redefined as no-op. Calling parent's method triggers connection to upstream host.\n  void connect() override {}\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/dispatcher.cc",
    "content": "#include \"server/config_validation/dispatcher.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"server/config_validation/connection.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nNetwork::ClientConnectionPtr ValidationDispatcher::createClientConnection(\n    Network::Address::InstanceConstSharedPtr remote_address,\n    Network::Address::InstanceConstSharedPtr source_address,\n    Network::TransportSocketPtr&& transport_socket,\n    const Network::ConnectionSocket::OptionsSharedPtr& options) {\n  return std::make_unique<Network::ConfigValidateConnection>(*this, remote_address, source_address,\n                                                             std::move(transport_socket), options);\n}\n\nNetwork::DnsResolverSharedPtr ValidationDispatcher::createDnsResolver(\n    const std::vector<Network::Address::InstanceConstSharedPtr>&, const bool) {\n  return dns_resolver_;\n}\n\nNetwork::ListenerPtr ValidationDispatcher::createListener(Network::SocketSharedPtr&&,\n                                                          Network::TcpListenerCallbacks&, bool,\n                                                          uint32_t) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/dispatcher.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n\n#include \"dns.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n/**\n * Config-validation-only implementation of Event::Dispatcher. This class delegates all calls to\n * Event::DispatcherImpl, except for the methods involved with network events. Those methods are\n * disallowed at validation time.\n */\nclass ValidationDispatcher : public DispatcherImpl {\npublic:\n  ValidationDispatcher(const std::string& name, Api::Api& api, Event::TimeSystem& time_system)\n      : DispatcherImpl(name, api, time_system) {}\n\n  Network::ClientConnectionPtr\n  createClientConnection(Network::Address::InstanceConstSharedPtr,\n                         Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&&,\n                         const Network::ConnectionSocket::OptionsSharedPtr& options) override;\n  Network::DnsResolverSharedPtr\n  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n                    const bool use_tcp_for_dns_lookups) override;\n  Network::ListenerPtr createListener(Network::SocketSharedPtr&&, Network::TcpListenerCallbacks&,\n                                      bool bind_to_port, uint32_t backlog_size) override;\n\nprotected:\n  std::shared_ptr<Network::ValidationDnsResolver> dns_resolver_{\n      std::make_shared<Network::ValidationDnsResolver>()};\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/dns.cc",
    "content": "#include \"server/config_validation/dns.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nActiveDnsQuery* ValidationDnsResolver::resolve(const std::string&, DnsLookupFamily,\n                                               ResolveCb callback) {\n  callback(DnsResolver::ResolutionStatus::Success, {});\n  return nullptr;\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/dns.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/dns.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n/**\n * DnsResolver to be used in config validation runs. Every DNS query immediately fails to resolve,\n * since we never need DNS information to validate a config. (If a config contains an unresolvable\n * name, it still passes validation -- for example, we might be running validation in a test\n * environment, while the name resolves fine in prod.)\n */\nclass ValidationDnsResolver : public DnsResolver {\npublic:\n  // Network::DnsResolver\n  ActiveDnsQuery* resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family,\n                          ResolveCb callback) override;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/server.cc",
    "content": "#include \"server/config_validation/server.h\"\n\n#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/event/real_time_system.h\"\n#include \"common/local_info/local_info_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/version/version.h\"\n\n#include \"server/ssl_context_manager.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nbool validateConfig(const Options& options,\n                    const Network::Address::InstanceConstSharedPtr& local_address,\n                    ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory,\n                    Filesystem::Instance& file_system) {\n  Thread::MutexBasicLockable access_log_lock;\n  Stats::IsolatedStoreImpl stats_store;\n\n  try {\n    Event::RealTimeSystem time_system;\n    ValidationInstance server(options, time_system, local_address, stats_store, access_log_lock,\n                              component_factory, thread_factory, file_system);\n    std::cout << \"configuration '\" << options.configPath() << \"' OK\" << std::endl;\n    server.shutdown();\n    return true;\n  } catch (const EnvoyException& e) {\n    return false;\n  }\n}\n\nValidationInstance::ValidationInstance(\n    const Options& options, Event::TimeSystem& time_system,\n    const Network::Address::InstanceConstSharedPtr& local_address, Stats::IsolatedStoreImpl& store,\n    Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory,\n    Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system)\n    : options_(options), validation_context_(options_.allowUnknownStaticFields(),\n                                             !options.rejectUnknownDynamicFields(),\n                                             !options.ignoreUnknownDynamicFields()),\n      stats_store_(store), api_(new Api::ValidationImpl(thread_factory, store, time_system,\n                                                        file_system, random_generator_)),\n      dispatcher_(api_->allocateDispatcher(\"main_thread\")),\n      singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory())),\n      access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock,\n                          store),\n      mutex_tracer_(nullptr), grpc_context_(stats_store_.symbolTable()),\n      http_context_(stats_store_.symbolTable()), time_system_(time_system),\n      server_contexts_(*this) {\n  try {\n    initialize(options, local_address, component_factory);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG(critical, \"error initializing configuration '{}': {}\", options.configPath(),\n              e.what());\n    shutdown();\n    throw;\n  }\n}\n\nvoid ValidationInstance::initialize(const Options& options,\n                                    const Network::Address::InstanceConstSharedPtr& local_address,\n                                    ComponentFactory& component_factory) {\n  // See comments on InstanceImpl::initialize() for the overall flow here.\n  //\n  // For validation, we only do a subset of normal server initialization: everything that could fail\n  // on a malformed config (e.g. JSON parsing and all the object construction that follows), but\n  // more importantly nothing with observable effects (e.g. binding to ports or shutting down any\n  // other Envoy process).\n  //\n  // If we get all the way through that stripped-down initialization flow, to the point where we'd\n  // be ready to serve, then the config has passed validation.\n  // Handle configuration that needs to take place prior to the main configuration load.\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  InstanceUtil::loadBootstrapConfig(bootstrap, options,\n                                    messageValidationContext().staticValidationVisitor(), *api_);\n\n  Config::Utility::createTagProducer(bootstrap);\n\n  bootstrap.mutable_node()->set_hidden_envoy_deprecated_build_version(VersionInfo::version());\n\n  local_info_ = std::make_unique<LocalInfo::LocalInfoImpl>(\n      bootstrap.node(), local_address, options.serviceZone(), options.serviceClusterName(),\n      options.serviceNodeName());\n\n  Configuration::InitialImpl initial_config(bootstrap);\n  overload_manager_ = std::make_unique<OverloadManagerImpl>(\n      dispatcher(), stats(), threadLocal(), bootstrap.overload_manager(),\n      messageValidationContext().staticValidationVisitor(), *api_);\n  listener_manager_ = std::make_unique<ListenerManagerImpl>(*this, *this, *this, false);\n  thread_local_.registerThread(*dispatcher_, true);\n  runtime_singleton_ = std::make_unique<Runtime::ScopedLoaderSingleton>(\n      component_factory.createRuntime(*this, initial_config));\n  secret_manager_ = std::make_unique<Secret::SecretManagerImpl>(admin().getConfigTracker());\n  ssl_context_manager_ = createContextManager(\"ssl_context_manager\", api_->timeSource());\n  cluster_manager_factory_ = std::make_unique<Upstream::ValidationClusterManagerFactory>(\n      admin(), runtime(), stats(), threadLocal(), dnsResolver(), sslContextManager(), dispatcher(),\n      localInfo(), *secret_manager_, messageValidationContext(), *api_, http_context_,\n      grpc_context_, accessLogManager(), singletonManager(), time_system_);\n  config_.initialize(bootstrap, *this, *cluster_manager_factory_);\n  runtime().initialize(clusterManager());\n  clusterManager().setInitializedCb([this]() -> void { init_manager_.initialize(init_watcher_); });\n}\n\nvoid ValidationInstance::shutdown() {\n  // This normally happens at the bottom of InstanceImpl::run(), but we don't have a run(). We can\n  // do an abbreviated shutdown here since there's less to clean up -- for example, no workers to\n  // exit.\n  thread_local_.shutdownGlobalThreading();\n  if (config_.clusterManager() != nullptr) {\n    config_.clusterManager()->shutdown();\n  }\n  thread_local_.shutdownThread();\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/config_validation/server.h",
    "content": "#pragma once\n\n#include <iostream>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/access_log/access_log_manager_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/grpc/common.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/secret/secret_manager_impl.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"server/admin/admin.h\"\n#include \"server/config_validation/admin.h\"\n#include \"server/config_validation/api.h\"\n#include \"server/config_validation/cluster_manager.h\"\n#include \"server/config_validation/dns.h\"\n#include \"server/listener_manager_impl.h\"\n#include \"server/server.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * validateConfig() takes over from main() for a config-validation run of Envoy. It returns true if\n * the config is valid, false if invalid.\n */\nbool validateConfig(const Options& options,\n                    const Network::Address::InstanceConstSharedPtr& local_address,\n                    ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory,\n                    Filesystem::Instance& file_system);\n\n/**\n * ValidationInstance does the bulk of the work for config-validation runs of Envoy. It implements\n * Server::Instance, but some functionality not needed until serving time, such as updating\n * health-check status, is not implemented. Everything else is written in terms of other\n * validation-specific interface implementations, with the end result that we can load and\n * initialize a configuration, skipping any steps that affect the outside world (such as\n * hot-restarting or connecting to upstream clusters) but otherwise exercising the entire startup\n * flow.\n *\n * If we finish initialization, and reach the point where an ordinary Envoy run would begin serving\n * requests, the validation is considered successful.\n */\nclass ValidationInstance final : Logger::Loggable<Logger::Id::main>,\n                                 public Instance,\n                                 public ListenerComponentFactory,\n                                 public ServerLifecycleNotifier,\n                                 public WorkerFactory {\npublic:\n  ValidationInstance(const Options& options, Event::TimeSystem& time_system,\n                     const Network::Address::InstanceConstSharedPtr& local_address,\n                     Stats::IsolatedStoreImpl& store, Thread::BasicLockable& access_log_lock,\n                     ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory,\n                     Filesystem::Instance& file_system);\n\n  // Server::Instance\n  Admin& admin() override { return admin_; }\n  Api::Api& api() override { return *api_; }\n  Upstream::ClusterManager& clusterManager() override { return *config_.clusterManager(); }\n  Ssl::ContextManager& sslContextManager() override { return *ssl_context_manager_; }\n  Event::Dispatcher& dispatcher() override { return *dispatcher_; }\n  Network::DnsResolverSharedPtr dnsResolver() override {\n    return dispatcher().createDnsResolver({}, false);\n  }\n  void drainListeners() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  DrainManager& drainManager() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; }\n  void failHealthcheck(bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  HotRestart& hotRestart() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  Init::Manager& initManager() override { return init_manager_; }\n  ServerLifecycleNotifier& lifecycleNotifier() override { return *this; }\n  ListenerManager& listenerManager() override { return *listener_manager_; }\n  Secret::SecretManager& secretManager() override { return *secret_manager_; }\n  Runtime::Loader& runtime() override { return Runtime::LoaderSingleton::get(); }\n  void shutdown() override;\n  bool isShutdown() override { return false; }\n  void shutdownAdmin() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  Singleton::Manager& singletonManager() override { return *singleton_manager_; }\n  OverloadManager& overloadManager() override { return *overload_manager_; }\n  bool healthCheckFailed() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  const Options& options() override { return options_; }\n  time_t startTimeCurrentEpoch() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  time_t startTimeFirstEpoch() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  Stats::Store& stats() override { return stats_store_; }\n  Grpc::Context& grpcContext() override { return grpc_context_; }\n  Http::Context& httpContext() override { return http_context_; }\n  ProcessContextOptRef processContext() override { return absl::nullopt; }\n  ThreadLocal::Instance& threadLocal() override { return thread_local_; }\n  const LocalInfo::LocalInfo& localInfo() const override { return *local_info_; }\n  TimeSource& timeSource() override { return api_->timeSource(); }\n  Envoy::MutexTracer* mutexTracer() override { return mutex_tracer_; }\n  std::chrono::milliseconds statsFlushInterval() const override {\n    return config_.statsFlushInterval();\n  }\n  void flushStats() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  ProtobufMessage::ValidationContext& messageValidationContext() override {\n    return validation_context_;\n  }\n  Configuration::ServerFactoryContext& serverFactoryContext() override { return server_contexts_; }\n  Configuration::TransportSocketFactoryContext& transportSocketFactoryContext() override {\n    return server_contexts_;\n  }\n  void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override {\n    http_context_.setDefaultTracingConfig(tracing_config);\n  }\n\n  // Server::ListenerComponentFactory\n  LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config,\n                         const udpa::core::v1::ResourceLocator* lds_resources_locator) override {\n    return std::make_unique<LdsApiImpl>(lds_config, lds_resources_locator, clusterManager(),\n                                        initManager(), stats(), listenerManager(),\n                                        messageValidationContext().dynamicValidationVisitor());\n  }\n  std::vector<Network::FilterFactoryCb> createNetworkFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n      Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context) override {\n    return ProdListenerComponentFactory::createNetworkFilterFactoryList_(\n        filters, filter_chain_factory_context);\n  }\n  std::vector<Network::ListenerFilterFactoryCb> createListenerFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context) override {\n    return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters, context);\n  }\n  std::vector<Network::UdpListenerFilterFactoryCb> createUdpListenerFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context) override {\n    return ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(filters, context);\n  }\n  Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr,\n                                              Network::Socket::Type,\n                                              const Network::Socket::OptionsSharedPtr&,\n                                              const ListenSocketCreationParams&) override {\n    // Returned sockets are not currently used so we can return nothing here safely vs. a\n    // validation mock.\n    return nullptr;\n  }\n  DrainManagerPtr createDrainManager(envoy::config::listener::v3::Listener::DrainType) override {\n    return nullptr;\n  }\n  uint64_t nextListenerTag() override { return 0; }\n\n  // Server::WorkerFactory\n  WorkerPtr createWorker(uint32_t, OverloadManager&, const std::string&) override {\n    // Returned workers are not currently used so we can return nothing here safely vs. a\n    // validation mock.\n    return nullptr;\n  }\n\n  // ServerLifecycleNotifier\n  ServerLifecycleNotifier::HandlePtr registerCallback(Stage, StageCallback) override {\n    return nullptr;\n  }\n  ServerLifecycleNotifier::HandlePtr registerCallback(Stage, StageCallbackWithCompletion) override {\n    return nullptr;\n  }\n\nprivate:\n  void initialize(const Options& options,\n                  const Network::Address::InstanceConstSharedPtr& local_address,\n                  ComponentFactory& component_factory);\n\n  // init_manager_ must come before any member that participates in initialization, and destructed\n  // only after referencing members are gone, since initialization continuation can potentially\n  // occur at any point during member lifetime.\n  Init::ManagerImpl init_manager_{\"Validation server\"};\n  Init::WatcherImpl init_watcher_{\"(no-op)\", []() {}};\n  // secret_manager_ must come before listener_manager_, config_ and dispatcher_, and destructed\n  // only after these members can no longer reference it, since:\n  // - There may be active filter chains referencing it in listener_manager_.\n  // - There may be active clusters referencing it in config_.cluster_manager_.\n  // - There may be active connections referencing it.\n  std::unique_ptr<Secret::SecretManager> secret_manager_;\n  const Options& options_;\n  ProtobufMessage::ProdValidationContextImpl validation_context_;\n  Stats::IsolatedStoreImpl& stats_store_;\n  ThreadLocal::InstanceImpl thread_local_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Server::ValidationAdmin admin_;\n  Singleton::ManagerPtr singleton_manager_;\n  std::unique_ptr<Runtime::ScopedLoaderSingleton> runtime_singleton_;\n  Random::RandomGeneratorImpl random_generator_;\n  std::unique_ptr<Ssl::ContextManager> ssl_context_manager_;\n  Configuration::MainImpl config_;\n  LocalInfo::LocalInfoPtr local_info_;\n  AccessLog::AccessLogManagerImpl access_log_manager_;\n  std::unique_ptr<Upstream::ValidationClusterManagerFactory> cluster_manager_factory_;\n  std::unique_ptr<ListenerManagerImpl> listener_manager_;\n  std::unique_ptr<OverloadManager> overload_manager_;\n  MutexTracer* mutex_tracer_;\n  Grpc::ContextImpl grpc_context_;\n  Http::ContextImpl http_context_;\n  Event::TimeSystem& time_system_;\n  ServerFactoryContextImpl server_contexts_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/configuration_impl.cc",
    "content": "#include \"server/configuration_impl.h\"\n\n#include <chrono>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/server/tracer_config.h\"\n#include \"envoy/ssl/context_manager.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/runtime_utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nbool FilterChainUtility::buildFilterChain(Network::FilterManager& filter_manager,\n                                          const std::vector<Network::FilterFactoryCb>& factories) {\n  for (const Network::FilterFactoryCb& factory : factories) {\n    factory(filter_manager);\n  }\n\n  return filter_manager.initializeReadFilters();\n}\n\nbool FilterChainUtility::buildFilterChain(\n    Network::ListenerFilterManager& filter_manager,\n    const std::vector<Network::ListenerFilterFactoryCb>& factories) {\n  for (const Network::ListenerFilterFactoryCb& factory : factories) {\n    factory(filter_manager);\n  }\n\n  return true;\n}\n\nvoid FilterChainUtility::buildUdpFilterChain(\n    Network::UdpListenerFilterManager& filter_manager, Network::UdpReadFilterCallbacks& callbacks,\n    const std::vector<Network::UdpListenerFilterFactoryCb>& factories) {\n  for (const Network::UdpListenerFilterFactoryCb& factory : factories) {\n    factory(filter_manager, callbacks);\n  }\n}\n\nvoid MainImpl::initialize(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                          Instance& server,\n                          Upstream::ClusterManagerFactory& cluster_manager_factory) {\n  // In order to support dynamic configuration of tracing providers,\n  // a former server-wide HttpTracer singleton has been replaced by\n  // an HttpTracer instance per \"envoy.filters.network.http_connection_manager\" filter.\n  // Tracing configuration as part of bootstrap config is still supported,\n  // however, it's become mandatory to process it prior to static Listeners.\n  // Otherwise, static Listeners will be configured in assumption that\n  // tracing configuration is missing from the bootstrap config.\n  initializeTracers(bootstrap.tracing(), server);\n\n  const auto& secrets = bootstrap.static_resources().secrets();\n  ENVOY_LOG(info, \"loading {} static secret(s)\", secrets.size());\n  for (ssize_t i = 0; i < secrets.size(); i++) {\n    ENVOY_LOG(debug, \"static secret #{}: {}\", i, secrets[i].name());\n    server.secretManager().addStaticSecret(secrets[i]);\n  }\n\n  ENVOY_LOG(info, \"loading {} cluster(s)\", bootstrap.static_resources().clusters().size());\n  cluster_manager_ = cluster_manager_factory.clusterManagerFromProto(bootstrap);\n\n  const auto& listeners = bootstrap.static_resources().listeners();\n  ENVOY_LOG(info, \"loading {} listener(s)\", listeners.size());\n  for (ssize_t i = 0; i < listeners.size(); i++) {\n    ENVOY_LOG(debug, \"listener #{}:\", i);\n    server.listenerManager().addOrUpdateListener(listeners[i], \"\", false);\n  }\n\n  stats_flush_interval_ =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(bootstrap, stats_flush_interval, 5000));\n\n  initializeWatchdogs(bootstrap, server);\n  initializeStatsSinks(bootstrap, server);\n}\n\nvoid MainImpl::initializeTracers(const envoy::config::trace::v3::Tracing& configuration,\n                                 Instance& server) {\n  ENVOY_LOG(info, \"loading tracing configuration\");\n\n  // Default tracing configuration must be set prior to processing of static Listeners begins.\n  server.setDefaultTracingConfig(configuration);\n\n  if (!configuration.has_http()) {\n    return;\n  }\n\n  // Validating tracing configuration (minimally).\n  ENVOY_LOG(info, \"  validating default server-wide tracing driver: {}\",\n            configuration.http().name());\n\n  // Now see if there is a factory that will accept the config.\n  auto& factory = Config::Utility::getAndCheckFactory<TracerFactory>(configuration.http());\n  ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), server.messageValidationContext().staticValidationVisitor(), factory);\n\n  // Notice that the actual HttpTracer instance will be created on demand\n  // in the context of \"envoy.filters.network.http_connection_manager\" filter.\n  // The side effect of this is that provider-specific configuration\n  // is no longer validated in this step.\n}\n\nvoid MainImpl::initializeStatsSinks(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                                    Instance& server) {\n  ENVOY_LOG(info, \"loading stats sink configuration\");\n\n  for (const envoy::config::metrics::v3::StatsSink& sink_object : bootstrap.stats_sinks()) {\n    // Generate factory and translate stats sink custom config\n    auto& factory = Config::Utility::getAndCheckFactory<StatsSinkFactory>(sink_object);\n    ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n        sink_object, server.messageValidationContext().staticValidationVisitor(), factory);\n\n    stats_sinks_.emplace_back(factory.createStatsSink(*message, server.serverFactoryContext()));\n  }\n}\n\nvoid MainImpl::initializeWatchdogs(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                                   Instance& server) {\n  if (bootstrap.has_watchdog() && bootstrap.has_watchdogs()) {\n    throw EnvoyException(\"Only one of watchdog or watchdogs should be set!\");\n  }\n\n  if (bootstrap.has_watchdog()) {\n    main_thread_watchdog_ = std::make_unique<WatchdogImpl>(bootstrap.watchdog(), server);\n    worker_watchdog_ = std::make_unique<WatchdogImpl>(bootstrap.watchdog(), server);\n  } else {\n    main_thread_watchdog_ =\n        std::make_unique<WatchdogImpl>(bootstrap.watchdogs().main_thread_watchdog(), server);\n    worker_watchdog_ =\n        std::make_unique<WatchdogImpl>(bootstrap.watchdogs().worker_watchdog(), server);\n  }\n}\n\nWatchdogImpl::WatchdogImpl(const envoy::config::bootstrap::v3::Watchdog& watchdog,\n                           Instance& server) {\n  miss_timeout_ =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, miss_timeout, 200));\n  megamiss_timeout_ =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, megamiss_timeout, 1000));\n\n  uint64_t kill_timeout = PROTOBUF_GET_MS_OR_DEFAULT(watchdog, kill_timeout, 0);\n  const uint64_t max_kill_timeout_jitter =\n      PROTOBUF_GET_MS_OR_DEFAULT(watchdog, max_kill_timeout_jitter, 0);\n\n  // Adjust kill timeout if we have skew enabled.\n  if (kill_timeout > 0 && max_kill_timeout_jitter > 0) {\n    // Increments the kill timeout with a random value in (0, max_skew].\n    // We shouldn't have overflow issues due to the range of Duration.\n    // This won't be entirely uniform, depending on how large max_skew\n    // is relation to uint64.\n    kill_timeout += (server.api().randomGenerator().random() % max_kill_timeout_jitter) + 1;\n  }\n\n  kill_timeout_ = std::chrono::milliseconds(kill_timeout);\n  multikill_timeout_ =\n      std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, multikill_timeout, 0));\n  multikill_threshold_ = PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(watchdog, multikill_threshold, 0.0);\n  actions_ = watchdog.actions();\n}\n\nInitialImpl::InitialImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n  const auto& admin = bootstrap.admin();\n  admin_.access_log_path_ = admin.access_log_path();\n  admin_.profile_path_ =\n      admin.profile_path().empty() ? \"/var/log/envoy/envoy.prof\" : admin.profile_path();\n  if (admin.has_address()) {\n    admin_.address_ = Network::Address::resolveProtoAddress(admin.address());\n  }\n  admin_.socket_options_ = std::make_shared<std::vector<Network::Socket::OptionConstSharedPtr>>();\n  if (!admin.socket_options().empty()) {\n    Network::Socket::appendOptions(\n        admin_.socket_options_,\n        Network::SocketOptionFactory::buildLiteralOptions(admin.socket_options()));\n  }\n\n  if (!bootstrap.flags_path().empty()) {\n    flags_path_ = bootstrap.flags_path();\n  }\n\n  if (bootstrap.has_layered_runtime()) {\n    layered_runtime_.MergeFrom(bootstrap.layered_runtime());\n    if (layered_runtime_.layers().empty()) {\n      layered_runtime_.add_layers()->mutable_admin_layer();\n    }\n  } else {\n    Config::translateRuntime(bootstrap.hidden_envoy_deprecated_runtime(), layered_runtime_);\n  }\n}\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/configuration_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <utility>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/server/configuration.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/network/utility.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Implemented for each Stats::Sink and registered via Registry::registerFactory() or\n * the convenience class RegisterFactory.\n */\nclass StatsSinkFactory : public Config::TypedFactory {\npublic:\n  ~StatsSinkFactory() override = default;\n\n  /**\n   * Create a particular Stats::Sink implementation. If the implementation is unable to produce a\n   * Stats::Sink with the provided parameters, it should throw an EnvoyException. The returned\n   * pointer should always be valid.\n   * @param config supplies the custom proto configuration for the Stats::Sink\n   * @param server supplies the server instance\n   */\n  virtual Stats::SinkPtr createStatsSink(const Protobuf::Message& config,\n                                         Server::Configuration::ServerFactoryContext& server) PURE;\n\n  std::string category() const override { return \"envoy.stats_sinks\"; }\n};\n\n/**\n * Utilities for creating a filter chain for a network connection.\n */\nclass FilterChainUtility {\npublic:\n  /**\n   * Given a connection and a list of factories, create a new filter chain. Chain creation will\n   * exit early if any filters immediately close the connection.\n   */\n  static bool buildFilterChain(Network::FilterManager& filter_manager,\n                               const std::vector<Network::FilterFactoryCb>& factories);\n\n  /**\n   * Given a ListenerFilterManager and a list of factories, create a new filter chain. Chain\n   * creation will exit early if any filters immediately close the connection.\n   *\n   * TODO(sumukhs): Coalesce with the above as they are very similar\n   */\n  static bool buildFilterChain(Network::ListenerFilterManager& filter_manager,\n                               const std::vector<Network::ListenerFilterFactoryCb>& factories);\n\n  /**\n   * Given a UdpListenerFilterManager and a list of factories, create a new filter chain. Chain\n   * creation will exit early if any filters immediately close the connection.\n   */\n  static void\n  buildUdpFilterChain(Network::UdpListenerFilterManager& filter_manager,\n                      Network::UdpReadFilterCallbacks& callbacks,\n                      const std::vector<Network::UdpListenerFilterFactoryCb>& factories);\n};\n\n/**\n * Implementation of Server::Configuration::Main that reads a configuration from\n * a JSON file.\n */\nclass MainImpl : Logger::Loggable<Logger::Id::config>, public Main {\npublic:\n  /**\n   * MainImpl is created in two phases. In the first phase it is\n   * default-constructed without a configuration as part of the server. The\n   * server won't be fully populated yet. initialize() applies the\n   * configuration in the second phase, as it requires a fully populated server.\n   *\n   * @param bootstrap v2 bootstrap proto.\n   * @param server supplies the owning server.\n   * @param cluster_manager_factory supplies the cluster manager creation factory.\n   */\n  void initialize(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, Instance& server,\n                  Upstream::ClusterManagerFactory& cluster_manager_factory);\n\n  // Server::Configuration::Main\n  Upstream::ClusterManager* clusterManager() override { return cluster_manager_.get(); }\n  std::list<Stats::SinkPtr>& statsSinks() override { return stats_sinks_; }\n  std::chrono::milliseconds statsFlushInterval() const override { return stats_flush_interval_; }\n  const Watchdog& mainThreadWatchdogConfig() const override { return *main_thread_watchdog_; }\n  const Watchdog& workerWatchdogConfig() const override { return *worker_watchdog_; }\n\nprivate:\n  /**\n   * Initialize tracers and corresponding sinks.\n   */\n  void initializeTracers(const envoy::config::trace::v3::Tracing& configuration, Instance& server);\n\n  void initializeStatsSinks(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                            Instance& server);\n  /**\n   * Initialize watchdog(s). Call before accessing any watchdog configuration.\n   */\n  void initializeWatchdogs(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                           Instance& server);\n\n  std::unique_ptr<Upstream::ClusterManager> cluster_manager_;\n  std::list<Stats::SinkPtr> stats_sinks_;\n  std::chrono::milliseconds stats_flush_interval_;\n  std::unique_ptr<Watchdog> main_thread_watchdog_;\n  std::unique_ptr<Watchdog> worker_watchdog_;\n};\n\nclass WatchdogImpl : public Watchdog {\npublic:\n  WatchdogImpl(const envoy::config::bootstrap::v3::Watchdog& watchdog, Instance& server);\n\n  std::chrono::milliseconds missTimeout() const override { return miss_timeout_; }\n  std::chrono::milliseconds megaMissTimeout() const override { return megamiss_timeout_; }\n  std::chrono::milliseconds killTimeout() const override { return kill_timeout_; }\n  std::chrono::milliseconds multiKillTimeout() const override { return multikill_timeout_; }\n  double multiKillThreshold() const override { return multikill_threshold_; }\n  Protobuf::RepeatedPtrField<envoy::config::bootstrap::v3::Watchdog::WatchdogAction>\n  actions() const override {\n    return actions_;\n  }\n\nprivate:\n  std::chrono::milliseconds miss_timeout_;\n  std::chrono::milliseconds megamiss_timeout_;\n  std::chrono::milliseconds kill_timeout_;\n  std::chrono::milliseconds multikill_timeout_;\n  double multikill_threshold_;\n  Protobuf::RepeatedPtrField<envoy::config::bootstrap::v3::Watchdog::WatchdogAction> actions_;\n};\n\n/**\n * Initial configuration that reads from JSON.\n */\nclass InitialImpl : public Initial {\npublic:\n  InitialImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap);\n\n  // Server::Configuration::Initial\n  Admin& admin() override { return admin_; }\n  absl::optional<std::string> flagsPath() const override { return flags_path_; }\n  const envoy::config::bootstrap::v3::LayeredRuntime& runtime() override {\n    return layered_runtime_;\n  }\n\nprivate:\n  struct AdminImpl : public Admin {\n    // Server::Configuration::Initial::Admin\n    const std::string& accessLogPath() const override { return access_log_path_; }\n    const std::string& profilePath() const override { return profile_path_; }\n    Network::Address::InstanceConstSharedPtr address() override { return address_; }\n    Network::Socket::OptionsSharedPtr socketOptions() override { return socket_options_; }\n\n    std::string access_log_path_;\n    std::string profile_path_;\n    Network::Address::InstanceConstSharedPtr address_;\n    Network::Socket::OptionsSharedPtr socket_options_;\n  };\n\n  AdminImpl admin_;\n  absl::optional<std::string> flags_path_;\n  envoy::config::bootstrap::v3::LayeredRuntime layered_runtime_;\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/connection_handler_impl.cc",
    "content": "#include \"server/connection_handler_impl.h\"\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/exception.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/event/deferred_task.h\"\n#include \"common/network/connection_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/timespan_impl.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\nvoid emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_info) {\n  stream_info.onRequestComplete();\n  for (const auto& access_log : config.accessLogs()) {\n    access_log->log(nullptr, nullptr, nullptr, stream_info);\n  }\n}\n} // namespace\n\nConnectionHandlerImpl::ConnectionHandlerImpl(Event::Dispatcher& dispatcher,\n                                             absl::optional<uint32_t> worker_index)\n    : worker_index_(worker_index), dispatcher_(dispatcher),\n      per_handler_stat_prefix_(dispatcher.name() + \".\"), disable_listeners_(false) {}\n\nvoid ConnectionHandlerImpl::incNumConnections() { ++num_handler_connections_; }\n\nvoid ConnectionHandlerImpl::decNumConnections() {\n  ASSERT(num_handler_connections_ > 0);\n  --num_handler_connections_;\n}\n\nvoid ConnectionHandlerImpl::addListener(absl::optional<uint64_t> overridden_listener,\n                                        Network::ListenerConfig& config) {\n  ActiveListenerDetails details;\n  if (config.listenSocketFactory().socketType() == Network::Socket::Type::Stream) {\n    if (overridden_listener.has_value()) {\n      for (auto& listener : listeners_) {\n        if (listener.second.listener_->listenerTag() == overridden_listener) {\n          listener.second.tcpListener()->get().updateListenerConfig(config);\n          return;\n        }\n      }\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n    auto tcp_listener = std::make_unique<ActiveTcpListener>(*this, config);\n    details.typed_listener_ = *tcp_listener;\n    details.listener_ = std::move(tcp_listener);\n  } else {\n    ASSERT(config.udpListenerFactory() != nullptr, \"UDP listener factory is not initialized.\");\n    ASSERT(worker_index_.has_value());\n    ConnectionHandler::ActiveUdpListenerPtr udp_listener =\n        config.udpListenerFactory()->createActiveUdpListener(*worker_index_, *this, dispatcher_,\n                                                             config);\n    details.typed_listener_ = *udp_listener;\n    details.listener_ = std::move(udp_listener);\n  }\n  if (disable_listeners_) {\n    details.listener_->pauseListening();\n  }\n  listeners_.emplace_back(config.listenSocketFactory().localAddress(), std::move(details));\n}\n\nvoid ConnectionHandlerImpl::removeListeners(uint64_t listener_tag) {\n  for (auto listener = listeners_.begin(); listener != listeners_.end();) {\n    if (listener->second.listener_->listenerTag() == listener_tag) {\n      listener = listeners_.erase(listener);\n    } else {\n      ++listener;\n    }\n  }\n}\n\nConnectionHandlerImpl::ActiveListenerDetailsOptRef\nConnectionHandlerImpl::findActiveListenerByTag(uint64_t listener_tag) {\n  // TODO(mattklein123): We should probably use a hash table here to lookup the tag\n  // instead of iterating through the listener list.\n  for (auto& listener : listeners_) {\n    if (listener.second.listener_->listener() != nullptr &&\n        listener.second.listener_->listenerTag() == listener_tag) {\n      return listener.second;\n    }\n  }\n\n  return absl::nullopt;\n}\n\nNetwork::UdpListenerCallbacksOptRef\nConnectionHandlerImpl::getUdpListenerCallbacks(uint64_t listener_tag) {\n  auto listener = findActiveListenerByTag(listener_tag);\n  if (listener.has_value()) {\n    // If the tag matches this must be a UDP listener.\n    auto udp_listener = listener->get().udpListener();\n    ASSERT(udp_listener.has_value());\n    return udp_listener;\n  }\n\n  return absl::nullopt;\n}\n\nvoid ConnectionHandlerImpl::removeFilterChains(\n    uint64_t listener_tag, const std::list<const Network::FilterChain*>& filter_chains,\n    std::function<void()> completion) {\n  for (auto& listener : listeners_) {\n    if (listener.second.listener_->listenerTag() == listener_tag) {\n      listener.second.tcpListener()->get().deferredRemoveFilterChains(filter_chains);\n      // Completion is deferred because the above removeFilterChains() may defer delete connection.\n      Event::DeferredTaskUtil::deferredRun(dispatcher_, std::move(completion));\n      return;\n    }\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nvoid ConnectionHandlerImpl::stopListeners(uint64_t listener_tag) {\n  for (auto& listener : listeners_) {\n    if (listener.second.listener_->listenerTag() == listener_tag) {\n      listener.second.listener_->shutdownListener();\n    }\n  }\n}\n\nvoid ConnectionHandlerImpl::stopListeners() {\n  for (auto& listener : listeners_) {\n    listener.second.listener_->shutdownListener();\n  }\n}\n\nvoid ConnectionHandlerImpl::disableListeners() {\n  disable_listeners_ = true;\n  for (auto& listener : listeners_) {\n    listener.second.listener_->pauseListening();\n  }\n}\n\nvoid ConnectionHandlerImpl::enableListeners() {\n  disable_listeners_ = false;\n  for (auto& listener : listeners_) {\n    listener.second.listener_->resumeListening();\n  }\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpListener::removeConnection(ActiveTcpConnection& connection) {\n  ENVOY_CONN_LOG(debug, \"adding to cleanup list\", *connection.connection_);\n  ActiveConnections& active_connections = connection.active_connections_;\n  ActiveTcpConnectionPtr removed = connection.removeFromList(active_connections.connections_);\n  parent_.dispatcher_.deferredDelete(std::move(removed));\n  // Delete map entry only iff connections becomes empty.\n  if (active_connections.connections_.empty()) {\n    auto iter = connections_by_context_.find(&active_connections.filter_chain_);\n    ASSERT(iter != connections_by_context_.end());\n    // To cover the lifetime of every single connection, Connections need to be deferred deleted\n    // because the previously contained connection is deferred deleted.\n    parent_.dispatcher_.deferredDelete(std::move(iter->second));\n    // The erase will break the iteration over the connections_by_context_ during the deletion.\n    if (!is_deleting_) {\n      connections_by_context_.erase(iter);\n    }\n  }\n}\n\nConnectionHandlerImpl::ActiveListenerImplBase::ActiveListenerImplBase(\n    Network::ConnectionHandler& parent, Network::ListenerConfig* config)\n    : stats_({ALL_LISTENER_STATS(POOL_COUNTER(config->listenerScope()),\n                                 POOL_GAUGE(config->listenerScope()),\n                                 POOL_HISTOGRAM(config->listenerScope()))}),\n      per_worker_stats_({ALL_PER_HANDLER_LISTENER_STATS(\n          POOL_COUNTER_PREFIX(config->listenerScope(), parent.statPrefix()),\n          POOL_GAUGE_PREFIX(config->listenerScope(), parent.statPrefix()))}),\n      config_(config) {}\n\nConnectionHandlerImpl::ActiveTcpListener::ActiveTcpListener(ConnectionHandlerImpl& parent,\n                                                            Network::ListenerConfig& config)\n    : ActiveTcpListener(\n          parent,\n          parent.dispatcher_.createListener(config.listenSocketFactory().getListenSocket(), *this,\n                                            config.bindToPort(), config.tcpBacklogSize()),\n          config) {}\n\nConnectionHandlerImpl::ActiveTcpListener::ActiveTcpListener(ConnectionHandlerImpl& parent,\n                                                            Network::ListenerPtr&& listener,\n                                                            Network::ListenerConfig& config)\n    : ConnectionHandlerImpl::ActiveListenerImplBase(parent, &config), parent_(parent),\n      listener_(std::move(listener)), listener_filters_timeout_(config.listenerFiltersTimeout()),\n      continue_on_listener_filters_timeout_(config.continueOnListenerFiltersTimeout()) {\n  config.connectionBalancer().registerHandler(*this);\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpListener::updateListenerConfig(\n    Network::ListenerConfig& config) {\n  ENVOY_LOG(trace, \"replacing listener \", config_->listenerTag(), \" by \", config.listenerTag());\n  ASSERT(&config_->connectionBalancer() == &config.connectionBalancer());\n  config_ = &config;\n}\n\nConnectionHandlerImpl::ActiveTcpListener::~ActiveTcpListener() {\n  is_deleting_ = true;\n  config_->connectionBalancer().unregisterHandler(*this);\n\n  // Purge sockets that have not progressed to connections. This should only happen when\n  // a listener filter stops iteration and never resumes.\n  while (!sockets_.empty()) {\n    ActiveTcpSocketPtr removed = sockets_.front()->removeFromList(sockets_);\n    parent_.dispatcher_.deferredDelete(std::move(removed));\n  }\n\n  for (auto& chain_and_connections : connections_by_context_) {\n    ASSERT(chain_and_connections.second != nullptr);\n    auto& connections = chain_and_connections.second->connections_;\n    while (!connections.empty()) {\n      connections.front()->connection_->close(Network::ConnectionCloseType::NoFlush);\n    }\n  }\n  parent_.dispatcher_.clearDeferredDeleteList();\n\n  // By the time a listener is destroyed, in the common case, there should be no connections.\n  // However, this is not always true if there is an in flight rebalanced connection that is\n  // being posted. This assert is extremely useful for debugging the common path so we will leave it\n  // for now. If it becomes a problem (developers hitting this assert when using debug builds) we\n  // can revisit. This case, if it happens, should be benign on production builds. This case is\n  // covered in ConnectionHandlerTest::RemoveListenerDuringRebalance.\n  ASSERT(num_listener_connections_ == 0);\n}\n\nConnectionHandlerImpl::ActiveTcpListenerOptRef\nConnectionHandlerImpl::findActiveTcpListenerByAddress(const Network::Address::Instance& address) {\n  // This is a linear operation, may need to add a map<address, listener> to improve performance.\n  // However, linear performance might be adequate since the number of listeners is small.\n  // We do not return stopped listeners.\n  auto listener_it = std::find_if(\n      listeners_.begin(), listeners_.end(),\n      [&address](std::pair<Network::Address::InstanceConstSharedPtr, ActiveListenerDetails>& p) {\n        return p.second.tcpListener().has_value() && p.second.listener_->listener() != nullptr &&\n               p.first->type() == Network::Address::Type::Ip && *(p.first) == address;\n      });\n\n  // If there is exact address match, return the corresponding listener.\n  if (listener_it != listeners_.end()) {\n    return listener_it->second.tcpListener();\n  }\n\n  // Otherwise, we need to look for the wild card match, i.e., 0.0.0.0:[address_port].\n  // We do not return stopped listeners.\n  // TODO(wattli): consolidate with previous search for more efficiency.\n  listener_it = std::find_if(\n      listeners_.begin(), listeners_.end(),\n      [&address](\n          const std::pair<Network::Address::InstanceConstSharedPtr, ActiveListenerDetails>& p) {\n        return absl::holds_alternative<std::reference_wrapper<ActiveTcpListener>>(\n                   p.second.typed_listener_) &&\n               p.second.listener_->listener() != nullptr &&\n               p.first->type() == Network::Address::Type::Ip &&\n               p.first->ip()->port() == address.ip()->port() && p.first->ip()->isAnyAddress();\n      });\n  return (listener_it != listeners_.end())\n             ? ActiveTcpListenerOptRef(absl::get<std::reference_wrapper<ActiveTcpListener>>(\n                   listener_it->second.typed_listener_))\n             : absl::nullopt;\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpSocket::onTimeout() {\n  listener_.stats_.downstream_pre_cx_timeout_.inc();\n  ASSERT(inserted());\n  ENVOY_LOG(debug, \"listener filter times out after {} ms\",\n            listener_.listener_filters_timeout_.count());\n\n  if (listener_.continue_on_listener_filters_timeout_) {\n    ENVOY_LOG(debug, \"fallback to default listener filter\");\n    newConnection();\n  }\n  unlink();\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpSocket::startTimer() {\n  if (listener_.listener_filters_timeout_.count() > 0) {\n    timer_ = listener_.parent_.dispatcher_.createTimer([this]() -> void { onTimeout(); });\n    timer_->enableTimer(listener_.listener_filters_timeout_);\n  }\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpSocket::unlink() {\n  ActiveTcpSocketPtr removed = removeFromList(listener_.sockets_);\n  if (removed->timer_ != nullptr) {\n    removed->timer_->disableTimer();\n  }\n  // Emit logs if a connection is not established.\n  if (!connected_) {\n    emitLogs(*listener_.config_, *stream_info_);\n  }\n  listener_.parent_.dispatcher_.deferredDelete(std::move(removed));\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpSocket::continueFilterChain(bool success) {\n  if (success) {\n    bool no_error = true;\n    if (iter_ == accept_filters_.end()) {\n      iter_ = accept_filters_.begin();\n    } else {\n      iter_ = std::next(iter_);\n    }\n\n    for (; iter_ != accept_filters_.end(); iter_++) {\n      Network::FilterStatus status = (*iter_)->onAccept(*this);\n      if (status == Network::FilterStatus::StopIteration) {\n        // The filter is responsible for calling us again at a later time to continue the filter\n        // chain from the next filter.\n        if (!socket().ioHandle().isOpen()) {\n          // break the loop but should not create new connection\n          no_error = false;\n          break;\n        } else {\n          // Blocking at the filter but no error\n          return;\n        }\n      }\n    }\n    // Successfully ran all the accept filters.\n    if (no_error) {\n      newConnection();\n    } else {\n      // Signal the caller that no extra filter chain iteration is needed.\n      iter_ = accept_filters_.end();\n    }\n  }\n\n  // Filter execution concluded, unlink and delete this ActiveTcpSocket if it was linked.\n  if (inserted()) {\n    unlink();\n  }\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpSocket::setDynamicMetadata(const std::string& name,\n                                                                const ProtobufWkt::Struct& value) {\n  stream_info_->setDynamicMetadata(name, value);\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpSocket::newConnection() {\n  connected_ = true;\n\n  // Check if the socket may need to be redirected to another listener.\n  ActiveTcpListenerOptRef new_listener;\n\n  if (hand_off_restored_destination_connections_ && socket_->localAddressRestored()) {\n    // Find a listener associated with the original destination address.\n    new_listener = listener_.parent_.findActiveTcpListenerByAddress(*socket_->localAddress());\n  }\n  if (new_listener.has_value()) {\n    // Hands off connections redirected by iptables to the listener associated with the\n    // original destination address. Pass 'hand_off_restored_destination_connections' as false to\n    // prevent further redirection as well as 'rebalanced' as true since the connection has\n    // already been balanced if applicable inside onAcceptWorker() when the connection was\n    // initially accepted. Note also that we must account for the number of connections properly\n    // across both listeners.\n    // TODO(mattklein123): See note in ~ActiveTcpSocket() related to making this accounting better.\n    listener_.decNumConnections();\n    new_listener.value().get().incNumConnections();\n    new_listener.value().get().onAcceptWorker(std::move(socket_), false, true);\n  } else {\n    // Set default transport protocol if none of the listener filters did it.\n    if (socket_->detectedTransportProtocol().empty()) {\n      socket_->setDetectedTransportProtocol(\n          Extensions::TransportSockets::TransportProtocolNames::get().RawBuffer);\n    }\n    // TODO(lambdai): add integration test\n    // TODO: Address issues in wider scope. See https://github.com/envoyproxy/envoy/issues/8925\n    // Erase accept filter states because accept filters may not get the opportunity to clean up.\n    // Particularly the assigned events need to reset before assigning new events in the follow up.\n    accept_filters_.clear();\n    // Create a new connection on this listener.\n    listener_.newConnection(std::move(socket_), std::move(stream_info_));\n  }\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpListener::onAccept(Network::ConnectionSocketPtr&& socket) {\n  if (listenerConnectionLimitReached()) {\n    ENVOY_LOG(trace, \"closing connection: listener connection limit reached for {}\",\n              config_->name());\n    socket->close();\n    stats_.downstream_cx_overflow_.inc();\n    return;\n  }\n\n  onAcceptWorker(std::move(socket), config_->handOffRestoredDestinationConnections(), false);\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpListener::onAcceptWorker(\n    Network::ConnectionSocketPtr&& socket, bool hand_off_restored_destination_connections,\n    bool rebalanced) {\n  if (!rebalanced) {\n    Network::BalancedConnectionHandler& target_handler =\n        config_->connectionBalancer().pickTargetHandler(*this);\n    if (&target_handler != this) {\n      target_handler.post(std::move(socket));\n      return;\n    }\n  }\n\n  auto active_socket = std::make_unique<ActiveTcpSocket>(*this, std::move(socket),\n                                                         hand_off_restored_destination_connections);\n\n  // Create and run the filters\n  config_->filterChainFactory().createListenerFilterChain(*active_socket);\n  active_socket->continueFilterChain(true);\n\n  // Move active_socket to the sockets_ list if filter iteration needs to continue later.\n  // Otherwise we let active_socket be destructed when it goes out of scope.\n  if (active_socket->iter_ != active_socket->accept_filters_.end()) {\n    active_socket->startTimer();\n    LinkedList::moveIntoListBack(std::move(active_socket), sockets_);\n  } else {\n    // If active_socket is about to be destructed, emit logs if a connection is not created.\n    if (!active_socket->connected_) {\n      emitLogs(*config_, *active_socket->stream_info_);\n    }\n  }\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpListener::newConnection(\n    Network::ConnectionSocketPtr&& socket, std::unique_ptr<StreamInfo::StreamInfo> stream_info) {\n  // Find matching filter chain.\n  const auto filter_chain = config_->filterChainManager().findFilterChain(*socket);\n  if (filter_chain == nullptr) {\n    ENVOY_LOG(debug, \"closing connection: no matching filter chain found\");\n    stats_.no_filter_chain_match_.inc();\n    stream_info->setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n    stream_info->setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().FilterChainNotFound);\n    emitLogs(*config_, *stream_info);\n    socket->close();\n    return;\n  }\n\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  stream_info->setDownstreamSslConnection(transport_socket->ssl());\n  auto& active_connections = getOrCreateActiveConnections(*filter_chain);\n  auto server_conn_ptr = parent_.dispatcher_.createServerConnection(\n      std::move(socket), std::move(transport_socket), *stream_info);\n  ActiveTcpConnectionPtr active_connection(\n      new ActiveTcpConnection(active_connections, std::move(server_conn_ptr),\n                              parent_.dispatcher_.timeSource(), std::move(stream_info)));\n  active_connection->connection_->setBufferLimits(config_->perConnectionBufferLimitBytes());\n\n  const bool empty_filter_chain = !config_->filterChainFactory().createNetworkFilterChain(\n      *active_connection->connection_, filter_chain->networkFilterFactories());\n  if (empty_filter_chain) {\n    ENVOY_CONN_LOG(debug, \"closing connection: no filters\", *active_connection->connection_);\n    active_connection->connection_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  // If the connection is already closed, we can just let this connection immediately die.\n  if (active_connection->connection_->state() != Network::Connection::State::Closed) {\n    ENVOY_CONN_LOG(debug, \"new connection\", *active_connection->connection_);\n    active_connection->connection_->addConnectionCallbacks(*active_connection);\n    LinkedList::moveIntoList(std::move(active_connection), active_connections.connections_);\n  }\n}\n\nConnectionHandlerImpl::ActiveConnections&\nConnectionHandlerImpl::ActiveTcpListener::getOrCreateActiveConnections(\n    const Network::FilterChain& filter_chain) {\n  ActiveConnectionsPtr& connections = connections_by_context_[&filter_chain];\n  if (connections == nullptr) {\n    connections = std::make_unique<ConnectionHandlerImpl::ActiveConnections>(*this, filter_chain);\n  }\n  return *connections;\n}\n\nvoid ConnectionHandlerImpl::ActiveTcpListener::deferredRemoveFilterChains(\n    const std::list<const Network::FilterChain*>& draining_filter_chains) {\n  // Need to recover the original deleting state.\n  const bool was_deleting = is_deleting_;\n  is_deleting_ = true;\n  for (const auto* filter_chain : draining_filter_chains) {\n    auto iter = connections_by_context_.find(filter_chain);\n    if (iter == connections_by_context_.end()) {\n      // It is possible when listener is stopping.\n    } else {\n      auto& connections = iter->second->connections_;\n      while (!connections.empty()) {\n        connections.front()->connection_->close(Network::ConnectionCloseType::NoFlush);\n      }\n      // Since is_deleting_ is on, we need to manually remove the map value and drive the iterator.\n      // Defer delete connection container to avoid race condition in destroying connection.\n      parent_.dispatcher_.deferredDelete(std::move(iter->second));\n      connections_by_context_.erase(iter);\n    }\n  }\n  is_deleting_ = was_deleting;\n}\n\nnamespace {\n// Structure used to allow a unique_ptr to be captured in a posted lambda. See below.\nstruct RebalancedSocket {\n  Network::ConnectionSocketPtr socket;\n};\nusing RebalancedSocketSharedPtr = std::shared_ptr<RebalancedSocket>;\n} // namespace\n\nvoid ConnectionHandlerImpl::ActiveTcpListener::post(Network::ConnectionSocketPtr&& socket) {\n  // It is not possible to capture a unique_ptr because the post() API copies the lambda, so we must\n  // bundle the socket inside a shared_ptr that can be captured.\n  // TODO(mattklein123): It may be possible to change the post() API such that the lambda is only\n  // moved, but this is non-trivial and needs investigation.\n  RebalancedSocketSharedPtr socket_to_rebalance = std::make_shared<RebalancedSocket>();\n  socket_to_rebalance->socket = std::move(socket);\n\n  parent_.dispatcher_.post(\n      [socket_to_rebalance, tag = config_->listenerTag(), &parent = parent_]() {\n        auto listener = parent.findActiveListenerByTag(tag);\n        if (listener.has_value()) {\n          // If the tag matches this must be a TCP listener.\n          ASSERT(absl::holds_alternative<std::reference_wrapper<ActiveTcpListener>>(\n              listener->get().typed_listener_));\n          auto& tcp_listener =\n              absl::get<std::reference_wrapper<ActiveTcpListener>>(listener->get().typed_listener_)\n                  .get();\n          tcp_listener.onAcceptWorker(std::move(socket_to_rebalance->socket),\n                                      tcp_listener.config_->handOffRestoredDestinationConnections(),\n                                      true);\n          return;\n        }\n      });\n}\n\nConnectionHandlerImpl::ActiveConnections::ActiveConnections(\n    ConnectionHandlerImpl::ActiveTcpListener& listener, const Network::FilterChain& filter_chain)\n    : listener_(listener), filter_chain_(filter_chain) {}\n\nConnectionHandlerImpl::ActiveConnections::~ActiveConnections() {\n  // connections should be defer deleted already.\n  ASSERT(connections_.empty());\n}\n\nConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection(\n    ActiveConnections& active_connections, Network::ConnectionPtr&& new_connection,\n    TimeSource& time_source, std::unique_ptr<StreamInfo::StreamInfo>&& stream_info)\n    : stream_info_(std::move(stream_info)), active_connections_(active_connections),\n      connection_(std::move(new_connection)),\n      conn_length_(new Stats::HistogramCompletableTimespanImpl(\n          active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)) {\n  // We just universally set no delay on connections. Theoretically we might at some point want\n  // to make this configurable.\n  connection_->noDelay(true);\n  auto& listener = active_connections_.listener_;\n  listener.stats_.downstream_cx_total_.inc();\n  listener.stats_.downstream_cx_active_.inc();\n  listener.per_worker_stats_.downstream_cx_total_.inc();\n  listener.per_worker_stats_.downstream_cx_active_.inc();\n  stream_info_->setConnectionID(connection_->id());\n\n  // Active connections on the handler (not listener). The per listener connections have already\n  // been incremented at this point either via the connection balancer or in the socket accept\n  // path if there is no configured balancer.\n  ++listener.parent_.num_handler_connections_;\n}\n\nConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() {\n  emitLogs(*active_connections_.listener_.config_, *stream_info_);\n  auto& listener = active_connections_.listener_;\n  listener.stats_.downstream_cx_active_.dec();\n  listener.stats_.downstream_cx_destroy_.inc();\n  listener.per_worker_stats_.downstream_cx_active_.dec();\n  conn_length_->complete();\n\n  // Active listener connections (not handler).\n  listener.decNumConnections();\n\n  // Active handler connections (not listener).\n  listener.parent_.decNumConnections();\n}\n\nConnectionHandlerImpl::ActiveTcpListenerOptRef\nConnectionHandlerImpl::ActiveListenerDetails::tcpListener() {\n  auto* val = absl::get_if<std::reference_wrapper<ActiveTcpListener>>(&typed_listener_);\n  return (val != nullptr) ? absl::make_optional(*val) : absl::nullopt;\n}\n\nConnectionHandlerImpl::UdpListenerCallbacksOptRef\nConnectionHandlerImpl::ActiveListenerDetails::udpListener() {\n  auto* val = absl::get_if<std::reference_wrapper<Network::UdpListenerCallbacks>>(&typed_listener_);\n  return (val != nullptr) ? absl::make_optional(*val) : absl::nullopt;\n}\n\nActiveUdpListenerBase::ActiveUdpListenerBase(uint32_t worker_index, uint32_t concurrency,\n                                             Network::ConnectionHandler& parent,\n                                             Network::Socket& listen_socket,\n                                             Network::UdpListenerPtr&& listener,\n                                             Network::ListenerConfig* config)\n    : ConnectionHandlerImpl::ActiveListenerImplBase(parent, config), worker_index_(worker_index),\n      concurrency_(concurrency), parent_(parent), listen_socket_(listen_socket),\n      udp_listener_(std::move(listener)) {\n  ASSERT(worker_index_ < concurrency_);\n  config_->udpListenerWorkerRouter()->get().registerWorkerForListener(*this);\n}\n\nActiveUdpListenerBase::~ActiveUdpListenerBase() {\n  config_->udpListenerWorkerRouter()->get().unregisterWorkerForListener(*this);\n}\n\nvoid ActiveUdpListenerBase::post(Network::UdpRecvData&& data) {\n  ASSERT(!udp_listener_->dispatcher().isThreadSafe(),\n         \"Shouldn't be post'ing if thread safe; use onWorkerData() instead.\");\n\n  // It is not possible to capture a unique_ptr because the post() API copies the lambda, so we must\n  // bundle the socket inside a shared_ptr that can be captured.\n  // TODO(mattklein123): It may be possible to change the post() API such that the lambda is only\n  // moved, but this is non-trivial and needs investigation.\n  auto data_to_post = std::make_shared<Network::UdpRecvData>();\n  *data_to_post = std::move(data);\n\n  udp_listener_->dispatcher().post(\n      [data_to_post, tag = config_->listenerTag(), &parent = parent_]() {\n        Network::UdpListenerCallbacksOptRef listener = parent.getUdpListenerCallbacks(tag);\n        if (listener.has_value()) {\n          listener->get().onDataWorker(std::move(*data_to_post));\n        }\n      });\n}\n\nvoid ActiveUdpListenerBase::onData(Network::UdpRecvData&& data) {\n  uint32_t dest = worker_index_;\n\n  // For concurrency == 1, the packet will always go to the current worker.\n  if (concurrency_ > 1) {\n    dest = destination(data);\n    ASSERT(dest < concurrency_);\n  }\n\n  if (dest == worker_index_) {\n    onDataWorker(std::move(data));\n  } else {\n    config_->udpListenerWorkerRouter()->get().deliver(dest, std::move(data));\n  }\n}\n\nActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                                           Network::ConnectionHandler& parent,\n                                           Event::Dispatcher& dispatcher,\n                                           Network::ListenerConfig& config)\n    : ActiveRawUdpListener(worker_index, concurrency, parent,\n                           config.listenSocketFactory().getListenSocket(), dispatcher, config) {}\n\nActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                                           Network::ConnectionHandler& parent,\n                                           Network::SocketSharedPtr listen_socket_ptr,\n                                           Event::Dispatcher& dispatcher,\n                                           Network::ListenerConfig& config)\n    : ActiveRawUdpListener(worker_index, concurrency, parent, *listen_socket_ptr, listen_socket_ptr,\n                           dispatcher, config) {}\n\nActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                                           Network::ConnectionHandler& parent,\n                                           Network::Socket& listen_socket,\n                                           Network::SocketSharedPtr listen_socket_ptr,\n                                           Event::Dispatcher& dispatcher,\n                                           Network::ListenerConfig& config)\n    : ActiveRawUdpListener(worker_index, concurrency, parent, listen_socket,\n                           dispatcher.createUdpListener(listen_socket_ptr, *this), config) {}\n\nActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                                           Network::ConnectionHandler& parent,\n                                           Network::Socket& listen_socket,\n                                           Network::UdpListenerPtr&& listener,\n                                           Network::ListenerConfig& config)\n    : ActiveUdpListenerBase(worker_index, concurrency, parent, listen_socket, std::move(listener),\n                            &config),\n      read_filter_(nullptr) {\n  // Create the filter chain on creating a new udp listener\n  config_->filterChainFactory().createUdpListenerFilterChain(*this, *this);\n\n  // If filter is nullptr, fail the creation of the listener\n  if (read_filter_ == nullptr) {\n    throw Network::CreateListenerException(\n        fmt::format(\"Cannot create listener as no read filter registered for the udp listener: {} \",\n                    config_->name()));\n  }\n\n  // Create udp_packet_writer\n  udp_packet_writer_ = config.udpPacketWriterFactory()->get().createUdpPacketWriter(\n      listen_socket_.ioHandle(), config.listenerScope());\n}\n\nvoid ActiveRawUdpListener::onDataWorker(Network::UdpRecvData&& data) { read_filter_->onData(data); }\n\nvoid ActiveRawUdpListener::onReadReady() {}\n\nvoid ActiveRawUdpListener::onWriteReady(const Network::Socket&) {\n  // TODO(sumukhs): This is not used now. When write filters are implemented, this is a\n  // trigger to invoke the on write ready API on the filters which is when they can write\n  // data\n\n  // Clear write_blocked_ status for udpPacketWriter\n  udp_packet_writer_->setWritable();\n}\n\nvoid ActiveRawUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) {\n  read_filter_->onReceiveError(error_code);\n}\n\nvoid ActiveRawUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter) {\n  ASSERT(read_filter_ == nullptr, \"Cannot add a 2nd UDP read filter\");\n  read_filter_ = std::move(filter);\n}\n\nNetwork::UdpListener& ActiveRawUdpListener::udpListener() { return *udp_listener_; }\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/connection_handler_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstdint>\n#include <list>\n#include <memory>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/listener.h\"\n#include \"envoy/server/active_udp_listener_config.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/timespan.h\"\n\n#include \"common/common/linked_object.h\"\n#include \"common/common/non_copyable.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n#define ALL_LISTENER_STATS(COUNTER, GAUGE, HISTOGRAM)                                              \\\n  COUNTER(downstream_cx_destroy)                                                                   \\\n  COUNTER(downstream_cx_overflow)                                                                  \\\n  COUNTER(downstream_cx_total)                                                                     \\\n  COUNTER(downstream_global_cx_overflow)                                                           \\\n  COUNTER(downstream_pre_cx_timeout)                                                               \\\n  COUNTER(no_filter_chain_match)                                                                   \\\n  GAUGE(downstream_cx_active, Accumulate)                                                          \\\n  GAUGE(downstream_pre_cx_active, Accumulate)                                                      \\\n  HISTOGRAM(downstream_cx_length_ms, Milliseconds)\n\n/**\n * Wrapper struct for listener stats. @see stats_macros.h\n */\nstruct ListenerStats {\n  ALL_LISTENER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\n#define ALL_PER_HANDLER_LISTENER_STATS(COUNTER, GAUGE)                                             \\\n  COUNTER(downstream_cx_total)                                                                     \\\n  GAUGE(downstream_cx_active, Accumulate)\n\n/**\n * Wrapper struct for per-handler listener stats. @see stats_macros.h\n */\nstruct PerHandlerListenerStats {\n  ALL_PER_HANDLER_LISTENER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\nclass ActiveUdpListenerBase;\n\n/**\n * Server side connection handler. This is used both by workers as well as the\n * main thread for non-threaded listeners.\n */\nclass ConnectionHandlerImpl : public Network::ConnectionHandler,\n                              NonCopyable,\n                              Logger::Loggable<Logger::Id::conn_handler> {\npublic:\n  ConnectionHandlerImpl(Event::Dispatcher& dispatcher, absl::optional<uint32_t> worker_index);\n\n  // Network::ConnectionHandler\n  uint64_t numConnections() const override { return num_handler_connections_; }\n  void incNumConnections() override;\n  void decNumConnections() override;\n  void addListener(absl::optional<uint64_t> overridden_listener,\n                   Network::ListenerConfig& config) override;\n  void removeListeners(uint64_t listener_tag) override;\n  Network::UdpListenerCallbacksOptRef getUdpListenerCallbacks(uint64_t listener_tag) override;\n  void removeFilterChains(uint64_t listener_tag,\n                          const std::list<const Network::FilterChain*>& filter_chains,\n                          std::function<void()> completion) override;\n  void stopListeners(uint64_t listener_tag) override;\n  void stopListeners() override;\n  void disableListeners() override;\n  void enableListeners() override;\n  const std::string& statPrefix() const override { return per_handler_stat_prefix_; }\n\n  /**\n   * Wrapper for an active listener owned by this handler.\n   */\n  class ActiveListenerImplBase : public virtual Network::ConnectionHandler::ActiveListener {\n  public:\n    ActiveListenerImplBase(Network::ConnectionHandler& parent, Network::ListenerConfig* config);\n\n    // Network::ConnectionHandler::ActiveListener.\n    uint64_t listenerTag() override { return config_->listenerTag(); }\n\n    ListenerStats stats_;\n    PerHandlerListenerStats per_worker_stats_;\n    Network::ListenerConfig* config_{};\n  };\n\nprivate:\n  struct ActiveTcpConnection;\n  using ActiveTcpConnectionPtr = std::unique_ptr<ActiveTcpConnection>;\n  struct ActiveTcpSocket;\n  using ActiveTcpSocketPtr = std::unique_ptr<ActiveTcpSocket>;\n  class ActiveConnections;\n  using ActiveConnectionsPtr = std::unique_ptr<ActiveConnections>;\n\n  /**\n   * Wrapper for an active tcp listener owned by this handler.\n   */\n  class ActiveTcpListener : public Network::TcpListenerCallbacks,\n                            public ActiveListenerImplBase,\n                            public Network::BalancedConnectionHandler {\n  public:\n    ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerConfig& config);\n    ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerPtr&& listener,\n                      Network::ListenerConfig& config);\n    ~ActiveTcpListener() override;\n    bool listenerConnectionLimitReached() const {\n      // TODO(tonya11en): Delegate enforcement of per-listener connection limits to overload\n      // manager.\n      return !config_->openConnections().canCreate();\n    }\n    void onAcceptWorker(Network::ConnectionSocketPtr&& socket,\n                        bool hand_off_restored_destination_connections, bool rebalanced);\n    void decNumConnections() {\n      ASSERT(num_listener_connections_ > 0);\n      --num_listener_connections_;\n      config_->openConnections().dec();\n    }\n\n    // Network::TcpListenerCallbacks\n    void onAccept(Network::ConnectionSocketPtr&& socket) override;\n    void onReject() override { stats_.downstream_global_cx_overflow_.inc(); }\n\n    // ActiveListenerImplBase\n    Network::Listener* listener() override { return listener_.get(); }\n    void pauseListening() override { listener_->disable(); }\n    void resumeListening() override { listener_->enable(); }\n    void shutdownListener() override { listener_.reset(); }\n\n    // Network::BalancedConnectionHandler\n    uint64_t numConnections() const override { return num_listener_connections_; }\n    void incNumConnections() override {\n      ++num_listener_connections_;\n      config_->openConnections().inc();\n    }\n    void post(Network::ConnectionSocketPtr&& socket) override;\n\n    /**\n     * Remove and destroy an active connection.\n     * @param connection supplies the connection to remove.\n     */\n    void removeConnection(ActiveTcpConnection& connection);\n\n    /**\n     * Create a new connection from a socket accepted by the listener.\n     */\n    void newConnection(Network::ConnectionSocketPtr&& socket,\n                       std::unique_ptr<StreamInfo::StreamInfo> stream_info);\n\n    /**\n     * Return the active connections container attached with the given filter chain.\n     */\n    ActiveConnections& getOrCreateActiveConnections(const Network::FilterChain& filter_chain);\n\n    /**\n     * Schedule to remove and destroy the active connections which are not tracked by listener\n     * config. Caution: The connection are not destroyed yet when function returns.\n     */\n    void deferredRemoveFilterChains(\n        const std::list<const Network::FilterChain*>& draining_filter_chains);\n\n    /**\n     * Update the listener config. The follow up connections will see the new config. The existing\n     * connections are not impacted.\n     */\n    void updateListenerConfig(Network::ListenerConfig& config);\n\n    ConnectionHandlerImpl& parent_;\n    Network::ListenerPtr listener_;\n    const std::chrono::milliseconds listener_filters_timeout_;\n    const bool continue_on_listener_filters_timeout_;\n    std::list<ActiveTcpSocketPtr> sockets_;\n    absl::node_hash_map<const Network::FilterChain*, ActiveConnectionsPtr> connections_by_context_;\n\n    // The number of connections currently active on this listener. This is typically used for\n    // connection balancing across per-handler listeners.\n    std::atomic<uint64_t> num_listener_connections_{};\n    bool is_deleting_{false};\n  };\n\n  /**\n   * Wrapper for a group of active connections which are attached to the same filter chain context.\n   */\n  class ActiveConnections : public Event::DeferredDeletable {\n  public:\n    ActiveConnections(ActiveTcpListener& listener, const Network::FilterChain& filter_chain);\n    ~ActiveConnections() override;\n\n    // listener filter chain pair is the owner of the connections\n    ActiveTcpListener& listener_;\n    const Network::FilterChain& filter_chain_;\n    // Owned connections\n    std::list<ActiveTcpConnectionPtr> connections_;\n  };\n\n  /**\n   * Wrapper for an active TCP connection owned by this handler.\n   */\n  struct ActiveTcpConnection : LinkedObject<ActiveTcpConnection>,\n                               public Event::DeferredDeletable,\n                               public Network::ConnectionCallbacks {\n    ActiveTcpConnection(ActiveConnections& active_connections,\n                        Network::ConnectionPtr&& new_connection, TimeSource& time_system,\n                        std::unique_ptr<StreamInfo::StreamInfo>&& stream_info);\n    ~ActiveTcpConnection() override;\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override {\n      // Any event leads to destruction of the connection.\n      if (event == Network::ConnectionEvent::LocalClose ||\n          event == Network::ConnectionEvent::RemoteClose) {\n        active_connections_.listener_.removeConnection(*this);\n      }\n    }\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    std::unique_ptr<StreamInfo::StreamInfo> stream_info_;\n    ActiveConnections& active_connections_;\n    Network::ConnectionPtr connection_;\n    Stats::TimespanPtr conn_length_;\n  };\n\n  /**\n   * Wrapper for an active accepted TCP socket owned by this handler.\n   */\n  struct ActiveTcpSocket : public Network::ListenerFilterManager,\n                           public Network::ListenerFilterCallbacks,\n                           LinkedObject<ActiveTcpSocket>,\n                           public Event::DeferredDeletable {\n    ActiveTcpSocket(ActiveTcpListener& listener, Network::ConnectionSocketPtr&& socket,\n                    bool hand_off_restored_destination_connections)\n        : listener_(listener), socket_(std::move(socket)),\n          hand_off_restored_destination_connections_(hand_off_restored_destination_connections),\n          iter_(accept_filters_.end()), stream_info_(std::make_unique<StreamInfo::StreamInfoImpl>(\n                                            listener_.parent_.dispatcher_.timeSource(),\n                                            StreamInfo::FilterState::LifeSpan::Connection)) {\n      listener_.stats_.downstream_pre_cx_active_.inc();\n      stream_info_->setDownstreamLocalAddress(socket_->localAddress());\n      stream_info_->setDownstreamRemoteAddress(socket_->remoteAddress());\n      stream_info_->setDownstreamDirectRemoteAddress(socket_->directRemoteAddress());\n    }\n    ~ActiveTcpSocket() override {\n      accept_filters_.clear();\n      listener_.stats_.downstream_pre_cx_active_.dec();\n\n      // If the underlying socket is no longer attached, it means that it has been transferred to\n      // an active connection. In this case, the active connection will decrement the number\n      // of listener connections.\n      // TODO(mattklein123): In general the way we account for the number of listener connections\n      // is incredibly fragile. Revisit this by potentially merging ActiveTcpSocket and\n      // ActiveTcpConnection, having a shared object which does accounting (but would require\n      // another allocation, etc.).\n      if (socket_ != nullptr) {\n        listener_.decNumConnections();\n      }\n    }\n\n    void onTimeout();\n    void startTimer();\n    void unlink();\n    void newConnection();\n\n    class GenericListenerFilter : public Network::ListenerFilter {\n    public:\n      GenericListenerFilter(const Network::ListenerFilterMatcherSharedPtr& matcher,\n                            Network::ListenerFilterPtr listener_filter)\n          : listener_filter_(std::move(listener_filter)), matcher_(std::move(matcher)) {}\n      Network::FilterStatus onAccept(ListenerFilterCallbacks& cb) override {\n        if (isDisabled(cb)) {\n          return Network::FilterStatus::Continue;\n        }\n        return listener_filter_->onAccept(cb);\n      }\n      /**\n       * Check if this filter filter should be disabled on the incoming socket.\n       * @param cb the callbacks the filter instance can use to communicate with the filter chain.\n       **/\n      bool isDisabled(ListenerFilterCallbacks& cb) {\n        if (matcher_ == nullptr) {\n          return false;\n        } else {\n          return matcher_->matches(cb);\n        }\n      }\n\n    private:\n      const Network::ListenerFilterPtr listener_filter_;\n      const Network::ListenerFilterMatcherSharedPtr matcher_;\n    };\n    using ListenerFilterWrapperPtr = std::unique_ptr<GenericListenerFilter>;\n\n    // Network::ListenerFilterManager\n    void addAcceptFilter(const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n                         Network::ListenerFilterPtr&& filter) override {\n      accept_filters_.emplace_back(\n          std::make_unique<GenericListenerFilter>(listener_filter_matcher, std::move(filter)));\n    }\n\n    // Network::ListenerFilterCallbacks\n    Network::ConnectionSocket& socket() override { return *socket_.get(); }\n    Event::Dispatcher& dispatcher() override { return listener_.parent_.dispatcher_; }\n    void continueFilterChain(bool success) override;\n    void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) override;\n    envoy::config::core::v3::Metadata& dynamicMetadata() override {\n      return stream_info_->dynamicMetadata();\n    };\n    const envoy::config::core::v3::Metadata& dynamicMetadata() const override {\n      return stream_info_->dynamicMetadata();\n    };\n\n    ActiveTcpListener& listener_;\n    Network::ConnectionSocketPtr socket_;\n    const bool hand_off_restored_destination_connections_;\n    std::list<ListenerFilterWrapperPtr> accept_filters_;\n    std::list<ListenerFilterWrapperPtr>::iterator iter_;\n    Event::TimerPtr timer_;\n    std::unique_ptr<StreamInfo::StreamInfo> stream_info_;\n    bool connected_{false};\n  };\n\n  using ActiveTcpListenerOptRef = absl::optional<std::reference_wrapper<ActiveTcpListener>>;\n  using UdpListenerCallbacksOptRef =\n      absl::optional<std::reference_wrapper<Network::UdpListenerCallbacks>>;\n\n  struct ActiveListenerDetails {\n    // Strong pointer to the listener, whether TCP, UDP, QUIC, etc.\n    Network::ConnectionHandler::ActiveListenerPtr listener_;\n\n    absl::variant<absl::monostate, std::reference_wrapper<ActiveTcpListener>,\n                  std::reference_wrapper<Network::UdpListenerCallbacks>>\n        typed_listener_;\n\n    // Helpers for accessing the data in the variant for cleaner code.\n    ActiveTcpListenerOptRef tcpListener();\n    UdpListenerCallbacksOptRef udpListener();\n  };\n  using ActiveListenerDetailsOptRef = absl::optional<std::reference_wrapper<ActiveListenerDetails>>;\n\n  ActiveTcpListenerOptRef findActiveTcpListenerByAddress(const Network::Address::Instance& address);\n  ActiveListenerDetailsOptRef findActiveListenerByTag(uint64_t listener_tag);\n\n  // This has a value on worker threads, and no value on the main thread.\n  const absl::optional<uint32_t> worker_index_;\n  Event::Dispatcher& dispatcher_;\n  const std::string per_handler_stat_prefix_;\n  std::list<std::pair<Network::Address::InstanceConstSharedPtr, ActiveListenerDetails>> listeners_;\n  std::atomic<uint64_t> num_handler_connections_{};\n  bool disable_listeners_;\n};\n\nclass ActiveUdpListenerBase : public ConnectionHandlerImpl::ActiveListenerImplBase,\n                              public Network::ConnectionHandler::ActiveUdpListener {\npublic:\n  ActiveUdpListenerBase(uint32_t worker_index, uint32_t concurrency,\n                        Network::ConnectionHandler& parent, Network::Socket& listen_socket,\n                        Network::UdpListenerPtr&& listener, Network::ListenerConfig* config);\n  ~ActiveUdpListenerBase() override;\n\n  // Network::UdpListenerCallbacks\n  void onData(Network::UdpRecvData&& data) final;\n  uint32_t workerIndex() const final { return worker_index_; }\n  void post(Network::UdpRecvData&& data) final;\n\n  // ActiveListenerImplBase\n  Network::Listener* listener() override { return udp_listener_.get(); }\n\nprotected:\n  uint32_t destination(const Network::UdpRecvData& /*data*/) const override {\n    // By default, route to the current worker.\n    return worker_index_;\n  }\n\n  const uint32_t worker_index_;\n  const uint32_t concurrency_;\n  Network::ConnectionHandler& parent_;\n  Network::Socket& listen_socket_;\n  Network::UdpListenerPtr udp_listener_;\n};\n\n/**\n * Wrapper for an active udp listener owned by this handler.\n */\nclass ActiveRawUdpListener : public ActiveUdpListenerBase,\n                             public Network::UdpListenerFilterManager,\n                             public Network::UdpReadFilterCallbacks {\npublic:\n  ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                       Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher,\n                       Network::ListenerConfig& config);\n  ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                       Network::ConnectionHandler& parent,\n                       Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher,\n                       Network::ListenerConfig& config);\n  ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                       Network::ConnectionHandler& parent, Network::Socket& listen_socket,\n                       Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher,\n                       Network::ListenerConfig& config);\n  ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency,\n                       Network::ConnectionHandler& parent, Network::Socket& listen_socket,\n                       Network::UdpListenerPtr&& listener, Network::ListenerConfig& config);\n\n  // Network::UdpListenerCallbacks\n  void onReadReady() override;\n  void onWriteReady(const Network::Socket& socket) override;\n  void onReceiveError(Api::IoError::IoErrorCode error_code) override;\n  Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; }\n\n  // Network::UdpWorker\n  void onDataWorker(Network::UdpRecvData&& data) override;\n\n  // ActiveListenerImplBase\n  void pauseListening() override { udp_listener_->disable(); }\n  void resumeListening() override { udp_listener_->enable(); }\n  void shutdownListener() override {\n    // The read filter should be deleted before the UDP listener is deleted.\n    // The read filter refers to the UDP listener to send packets to downstream.\n    // If the UDP listener is deleted before the read filter, the read filter may try to use it\n    // after deletion.\n    read_filter_.reset();\n    udp_listener_.reset();\n  }\n\n  // Network::UdpListenerFilterManager\n  void addReadFilter(Network::UdpListenerReadFilterPtr&& filter) override;\n\n  // Network::UdpReadFilterCallbacks\n  Network::UdpListener& udpListener() override;\n\nprivate:\n  Network::UdpListenerReadFilterPtr read_filter_;\n  Network::UdpPacketWriterPtr udp_packet_writer_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/drain_manager_impl.cc",
    "content": "#include \"server/drain_manager_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nDrainManagerImpl::DrainManagerImpl(Instance& server,\n                                   envoy::config::listener::v3::Listener::DrainType drain_type)\n    : server_(server), drain_type_(drain_type) {}\n\nbool DrainManagerImpl::drainClose() const {\n  // If we are actively health check failed and the drain type is default, always drain close.\n  //\n  // TODO(mattklein123): In relation to x-envoy-immediate-health-check-fail, it would be better\n  // if even in the case of server health check failure we had some period of drain ramp up. This\n  // would allow the other side to fail health check for the host which will require some thread\n  // jumps versus immediately start GOAWAY/connection thrashing.\n  if (drain_type_ == envoy::config::listener::v3::Listener::DEFAULT &&\n      server_.healthCheckFailed()) {\n    return true;\n  }\n\n  if (!draining_) {\n    return false;\n  }\n\n  if (server_.options().drainStrategy() == Server::DrainStrategy::Immediate) {\n    return true;\n  }\n  ASSERT(server_.options().drainStrategy() == Server::DrainStrategy::Gradual);\n\n  // P(return true) = elapsed time / drain timeout\n  // If the drain deadline is exceeded, skip the probability calculation.\n  const MonotonicTime current_time = server_.dispatcher().timeSource().monotonicTime();\n  if (current_time >= drain_deadline_) {\n    return true;\n  }\n\n  const auto remaining_time =\n      std::chrono::duration_cast<std::chrono::seconds>(drain_deadline_ - current_time);\n  ASSERT(server_.options().drainTime() >= remaining_time);\n  const auto elapsed_time = server_.options().drainTime() - remaining_time;\n  return static_cast<uint64_t>(elapsed_time.count()) >\n         (server_.api().randomGenerator().random() % server_.options().drainTime().count());\n}\n\nvoid DrainManagerImpl::startDrainSequence(std::function<void()> drain_complete_cb) {\n  ASSERT(drain_complete_cb);\n  ASSERT(!draining_);\n  ASSERT(!drain_tick_timer_);\n  draining_ = true;\n  drain_tick_timer_ = server_.dispatcher().createTimer(drain_complete_cb);\n  const std::chrono::seconds drain_delay(server_.options().drainTime());\n  drain_tick_timer_->enableTimer(drain_delay);\n  drain_deadline_ = server_.dispatcher().timeSource().monotonicTime() + drain_delay;\n}\n\nvoid DrainManagerImpl::startParentShutdownSequence() {\n  ASSERT(!parent_shutdown_timer_);\n  parent_shutdown_timer_ = server_.dispatcher().createTimer([this]() -> void {\n    // Shut down the parent now. It should have already been draining.\n    ENVOY_LOG(info, \"shutting down parent after drain\");\n    server_.hotRestart().sendParentTerminateRequest();\n  });\n\n  parent_shutdown_timer_->enableTimer(std::chrono::duration_cast<std::chrono::milliseconds>(\n      server_.options().parentShutdownTime()));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/drain_manager_impl.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Implementation of drain manager that does the following by default:\n * 1) Terminates the parent process after 15 minutes.\n * 2) Drains the parent process over a period of 10 minutes where drain close becomes more\n *    likely each second that passes.\n */\nclass DrainManagerImpl : Logger::Loggable<Logger::Id::main>, public DrainManager {\npublic:\n  DrainManagerImpl(Instance& server, envoy::config::listener::v3::Listener::DrainType drain_type);\n\n  // Network::DrainDecision\n  bool drainClose() const override;\n\n  // Server::DrainManager\n  void startDrainSequence(std::function<void()> drain_complete_cb) override;\n  bool draining() const override { return draining_; }\n  void startParentShutdownSequence() override;\n\nprivate:\n  Instance& server_;\n  const envoy::config::listener::v3::Listener::DrainType drain_type_;\n\n  std::atomic<bool> draining_{false};\n  Event::TimerPtr drain_tick_timer_;\n  MonotonicTime drain_deadline_;\n\n  Event::TimerPtr parent_shutdown_timer_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/filter_chain_factory_context_callback.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/api/v2/listener/listener.pb.h\"\n#include \"envoy/common/pure.h\"\n#include \"envoy/server/filter_config.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Handles FilterChainFactoryContext creation. It is used by a listener to add a new filter chain\n * without worrying about the lifetime of each factory context.\n */\nclass FilterChainFactoryContextCreator {\npublic:\n  virtual ~FilterChainFactoryContextCreator() = default;\n\n  /**\n   * Generate the filter chain factory context from proto. Note the caller does not own the filter\n   * chain context.\n   */\n  virtual Configuration::FilterChainFactoryContextPtr createFilterChainFactoryContext(\n      const ::envoy::config::listener::v3::FilterChain* const filter_chain) PURE;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/filter_chain_manager_impl.cc",
    "content": "#include \"server/filter_chain_manager_impl.h\"\n\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n\n#include \"common/common/cleanup.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/utility.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"server/configuration_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\n\n// Return a fake address for use when either the source or destination is UDS.\nNetwork::Address::InstanceConstSharedPtr fakeAddress() {\n  CONSTRUCT_ON_FIRST_USE(Network::Address::InstanceConstSharedPtr,\n                         Network::Utility::parseInternetAddress(\"255.255.255.255\"));\n}\n\n} // namespace\n\nPerFilterChainFactoryContextImpl::PerFilterChainFactoryContextImpl(\n    Configuration::FactoryContext& parent_context, Init::Manager& init_manager)\n    : parent_context_(parent_context), init_manager_(init_manager) {}\n\nbool PerFilterChainFactoryContextImpl::drainClose() const {\n  return is_draining_.load() || parent_context_.drainDecision().drainClose();\n}\n\nNetwork::DrainDecision& PerFilterChainFactoryContextImpl::drainDecision() { return *this; }\n\nInit::Manager& PerFilterChainFactoryContextImpl::initManager() { return init_manager_; }\n\nThreadLocal::SlotAllocator& PerFilterChainFactoryContextImpl::threadLocal() {\n  return parent_context_.threadLocal();\n}\n\nconst envoy::config::core::v3::Metadata&\nPerFilterChainFactoryContextImpl::listenerMetadata() const {\n  return parent_context_.listenerMetadata();\n}\n\nenvoy::config::core::v3::TrafficDirection PerFilterChainFactoryContextImpl::direction() const {\n  return parent_context_.direction();\n}\n\nProtobufMessage::ValidationContext& PerFilterChainFactoryContextImpl::messageValidationContext() {\n  return parent_context_.messageValidationContext();\n}\nProtobufMessage::ValidationVisitor& PerFilterChainFactoryContextImpl::messageValidationVisitor() {\n  return parent_context_.messageValidationVisitor();\n}\n\nAccessLog::AccessLogManager& PerFilterChainFactoryContextImpl::accessLogManager() {\n  return parent_context_.accessLogManager();\n}\n\nUpstream::ClusterManager& PerFilterChainFactoryContextImpl::clusterManager() {\n  return parent_context_.clusterManager();\n}\n\nEvent::Dispatcher& PerFilterChainFactoryContextImpl::dispatcher() {\n  return parent_context_.dispatcher();\n}\n\nGrpc::Context& PerFilterChainFactoryContextImpl::grpcContext() {\n  return parent_context_.grpcContext();\n}\n\nbool PerFilterChainFactoryContextImpl::healthCheckFailed() {\n  return parent_context_.healthCheckFailed();\n}\n\nHttp::Context& PerFilterChainFactoryContextImpl::httpContext() {\n  return parent_context_.httpContext();\n}\n\nconst LocalInfo::LocalInfo& PerFilterChainFactoryContextImpl::localInfo() const {\n  return parent_context_.localInfo();\n}\n\nEnvoy::Runtime::Loader& PerFilterChainFactoryContextImpl::runtime() {\n  return parent_context_.runtime();\n}\n\nStats::Scope& PerFilterChainFactoryContextImpl::scope() { return parent_context_.scope(); }\n\nSingleton::Manager& PerFilterChainFactoryContextImpl::singletonManager() {\n  return parent_context_.singletonManager();\n}\n\nOverloadManager& PerFilterChainFactoryContextImpl::overloadManager() {\n  return parent_context_.overloadManager();\n}\n\nAdmin& PerFilterChainFactoryContextImpl::admin() { return parent_context_.admin(); }\n\nTimeSource& PerFilterChainFactoryContextImpl::timeSource() { return api().timeSource(); }\n\nApi::Api& PerFilterChainFactoryContextImpl::api() { return parent_context_.api(); }\n\nServerLifecycleNotifier& PerFilterChainFactoryContextImpl::lifecycleNotifier() {\n  return parent_context_.lifecycleNotifier();\n}\n\nProcessContextOptRef PerFilterChainFactoryContextImpl::processContext() {\n  return parent_context_.processContext();\n}\n\nConfiguration::ServerFactoryContext&\nPerFilterChainFactoryContextImpl::getServerFactoryContext() const {\n  return parent_context_.getServerFactoryContext();\n}\n\nConfiguration::TransportSocketFactoryContext&\nPerFilterChainFactoryContextImpl::getTransportSocketFactoryContext() const {\n  return parent_context_.getTransportSocketFactoryContext();\n}\n\nStats::Scope& PerFilterChainFactoryContextImpl::listenerScope() {\n  return parent_context_.listenerScope();\n}\n\nFilterChainManagerImpl::FilterChainManagerImpl(\n    const Network::Address::InstanceConstSharedPtr& address,\n    Configuration::FactoryContext& factory_context, Init::Manager& init_manager,\n    const FilterChainManagerImpl& parent_manager)\n    : address_(address), parent_context_(factory_context), origin_(&parent_manager),\n      init_manager_(init_manager) {}\n\nbool FilterChainManagerImpl::isWildcardServerName(const std::string& name) {\n  return absl::StartsWith(name, \"*.\");\n}\n\nvoid FilterChainManagerImpl::addFilterChain(\n    absl::Span<const envoy::config::listener::v3::FilterChain* const> filter_chain_span,\n    FilterChainFactoryBuilder& filter_chain_factory_builder,\n    FilterChainFactoryContextCreator& context_creator) {\n  Cleanup cleanup([this]() { origin_ = absl::nullopt; });\n  absl::node_hash_map<envoy::config::listener::v3::FilterChainMatch, std::string, MessageUtil,\n                      MessageUtil>\n      filter_chains;\n  uint32_t new_filter_chain_size = 0;\n  for (const auto& filter_chain : filter_chain_span) {\n    const auto& filter_chain_match = filter_chain->filter_chain_match();\n    if (!filter_chain_match.address_suffix().empty() || filter_chain_match.has_suffix_len()) {\n      throw EnvoyException(fmt::format(\"error adding listener '{}': filter chain '{}' contains \"\n                                       \"unimplemented fields\",\n                                       address_->asString(), filter_chain->name()));\n    }\n    const auto& matching_iter = filter_chains.find(filter_chain_match);\n    if (matching_iter != filter_chains.end()) {\n      throw EnvoyException(fmt::format(\"error adding listener '{}': filter chain '{}' has \"\n                                       \"the same matching rules defined as '{}'\",\n                                       address_->asString(), filter_chain->name(),\n                                       matching_iter->second));\n    }\n    filter_chains.insert({filter_chain_match, filter_chain->name()});\n\n    // Validate IP addresses.\n    std::vector<std::string> destination_ips;\n    destination_ips.reserve(filter_chain_match.prefix_ranges().size());\n    for (const auto& destination_ip : filter_chain_match.prefix_ranges()) {\n      const auto& cidr_range = Network::Address::CidrRange::create(destination_ip);\n      destination_ips.push_back(cidr_range.asString());\n    }\n\n    std::vector<std::string> source_ips;\n    source_ips.reserve(filter_chain_match.source_prefix_ranges().size());\n    for (const auto& source_ip : filter_chain_match.source_prefix_ranges()) {\n      const auto& cidr_range = Network::Address::CidrRange::create(source_ip);\n      source_ips.push_back(cidr_range.asString());\n    }\n\n    // Reject partial wildcards, we don't match on them.\n    for (const auto& server_name : filter_chain_match.server_names()) {\n      if (server_name.find('*') != std::string::npos &&\n          !FilterChainManagerImpl::isWildcardServerName(server_name)) {\n        throw EnvoyException(\n            fmt::format(\"error adding listener '{}': partial wildcards are not supported in \"\n                        \"\\\"server_names\\\"\",\n                        address_->asString()));\n      }\n    }\n\n    // Reuse created filter chain if possible.\n    // FilterChainManager maintains the lifetime of FilterChainFactoryContext\n    // ListenerImpl maintains the dependencies of FilterChainFactoryContext\n    auto filter_chain_impl = findExistingFilterChain(*filter_chain);\n    if (filter_chain_impl == nullptr) {\n      filter_chain_impl =\n          filter_chain_factory_builder.buildFilterChain(*filter_chain, context_creator);\n      ++new_filter_chain_size;\n    }\n\n    addFilterChainForDestinationPorts(\n        destination_ports_map_,\n        PROTOBUF_GET_WRAPPED_OR_DEFAULT(filter_chain_match, destination_port, 0), destination_ips,\n        filter_chain_match.server_names(), filter_chain_match.transport_protocol(),\n        filter_chain_match.application_protocols(), filter_chain_match.source_type(), source_ips,\n        filter_chain_match.source_ports(), filter_chain_impl);\n    fc_contexts_[*filter_chain] = filter_chain_impl;\n  }\n  convertIPsToTries();\n  ENVOY_LOG(debug, \"new fc_contexts has {} filter chains, including {} newly built\",\n            fc_contexts_.size(), new_filter_chain_size);\n}\n\nvoid FilterChainManagerImpl::addFilterChainForDestinationPorts(\n    DestinationPortsMap& destination_ports_map, uint16_t destination_port,\n    const std::vector<std::string>& destination_ips,\n    const absl::Span<const std::string* const> server_names, const std::string& transport_protocol,\n    const absl::Span<const std::string* const> application_protocols,\n    const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n    const std::vector<std::string>& source_ips,\n    const absl::Span<const Protobuf::uint32> source_ports,\n    const Network::FilterChainSharedPtr& filter_chain) {\n  if (destination_ports_map.find(destination_port) == destination_ports_map.end()) {\n    destination_ports_map[destination_port] =\n        std::make_pair<DestinationIPsMap, DestinationIPsTriePtr>(DestinationIPsMap{}, nullptr);\n  }\n  addFilterChainForDestinationIPs(destination_ports_map[destination_port].first, destination_ips,\n                                  server_names, transport_protocol, application_protocols,\n                                  source_type, source_ips, source_ports, filter_chain);\n}\n\nvoid FilterChainManagerImpl::addFilterChainForDestinationIPs(\n    DestinationIPsMap& destination_ips_map, const std::vector<std::string>& destination_ips,\n    const absl::Span<const std::string* const> server_names, const std::string& transport_protocol,\n    const absl::Span<const std::string* const> application_protocols,\n    const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n    const std::vector<std::string>& source_ips,\n    const absl::Span<const Protobuf::uint32> source_ports,\n    const Network::FilterChainSharedPtr& filter_chain) {\n  if (destination_ips.empty()) {\n    addFilterChainForServerNames(destination_ips_map[EMPTY_STRING], server_names,\n                                 transport_protocol, application_protocols, source_type, source_ips,\n                                 source_ports, filter_chain);\n  } else {\n    for (const auto& destination_ip : destination_ips) {\n      addFilterChainForServerNames(destination_ips_map[destination_ip], server_names,\n                                   transport_protocol, application_protocols, source_type,\n                                   source_ips, source_ports, filter_chain);\n    }\n  }\n}\n\nvoid FilterChainManagerImpl::addFilterChainForServerNames(\n    ServerNamesMapSharedPtr& server_names_map_ptr,\n    const absl::Span<const std::string* const> server_names, const std::string& transport_protocol,\n    const absl::Span<const std::string* const> application_protocols,\n    const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n    const std::vector<std::string>& source_ips,\n    const absl::Span<const Protobuf::uint32> source_ports,\n    const Network::FilterChainSharedPtr& filter_chain) {\n  if (server_names_map_ptr == nullptr) {\n    server_names_map_ptr = std::make_shared<ServerNamesMap>();\n  }\n  auto& server_names_map = *server_names_map_ptr;\n\n  if (server_names.empty()) {\n    addFilterChainForApplicationProtocols(server_names_map[EMPTY_STRING][transport_protocol],\n                                          application_protocols, source_type, source_ips,\n                                          source_ports, filter_chain);\n  } else {\n    for (const auto& server_name_ptr : server_names) {\n      if (isWildcardServerName(*server_name_ptr)) {\n        // Add mapping for the wildcard domain, i.e. \".example.com\" for \"*.example.com\".\n        addFilterChainForApplicationProtocols(\n            server_names_map[server_name_ptr->substr(1)][transport_protocol], application_protocols,\n            source_type, source_ips, source_ports, filter_chain);\n      } else {\n        addFilterChainForApplicationProtocols(\n            server_names_map[*server_name_ptr][transport_protocol], application_protocols,\n            source_type, source_ips, source_ports, filter_chain);\n      }\n    }\n  }\n}\n\nvoid FilterChainManagerImpl::addFilterChainForApplicationProtocols(\n    ApplicationProtocolsMap& application_protocols_map,\n    const absl::Span<const std::string* const> application_protocols,\n    const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n    const std::vector<std::string>& source_ips,\n    const absl::Span<const Protobuf::uint32> source_ports,\n    const Network::FilterChainSharedPtr& filter_chain) {\n  if (application_protocols.empty()) {\n    addFilterChainForSourceTypes(application_protocols_map[EMPTY_STRING], source_type, source_ips,\n                                 source_ports, filter_chain);\n  } else {\n    for (const auto& application_protocol_ptr : application_protocols) {\n      addFilterChainForSourceTypes(application_protocols_map[*application_protocol_ptr],\n                                   source_type, source_ips, source_ports, filter_chain);\n    }\n  }\n}\n\nvoid FilterChainManagerImpl::addFilterChainForSourceTypes(\n    SourceTypesArray& source_types_array,\n    const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n    const std::vector<std::string>& source_ips,\n    const absl::Span<const Protobuf::uint32> source_ports,\n    const Network::FilterChainSharedPtr& filter_chain) {\n  if (source_ips.empty()) {\n    addFilterChainForSourceIPs(source_types_array[source_type].first, EMPTY_STRING, source_ports,\n                               filter_chain);\n  } else {\n    for (const auto& source_ip : source_ips) {\n      addFilterChainForSourceIPs(source_types_array[source_type].first, source_ip, source_ports,\n                                 filter_chain);\n    }\n  }\n}\n\nvoid FilterChainManagerImpl::addFilterChainForSourceIPs(\n    SourceIPsMap& source_ips_map, const std::string& source_ip,\n    const absl::Span<const Protobuf::uint32> source_ports,\n    const Network::FilterChainSharedPtr& filter_chain) {\n  if (source_ports.empty()) {\n    addFilterChainForSourcePorts(source_ips_map[source_ip], 0, filter_chain);\n  } else {\n    for (auto source_port : source_ports) {\n      addFilterChainForSourcePorts(source_ips_map[source_ip], source_port, filter_chain);\n    }\n  }\n}\n\nvoid FilterChainManagerImpl::addFilterChainForSourcePorts(\n    SourcePortsMapSharedPtr& source_ports_map_ptr, uint32_t source_port,\n    const Network::FilterChainSharedPtr& filter_chain) {\n  if (source_ports_map_ptr == nullptr) {\n    source_ports_map_ptr = std::make_shared<SourcePortsMap>();\n  }\n  auto& source_ports_map = *source_ports_map_ptr;\n\n  if (!source_ports_map.try_emplace(source_port, filter_chain).second) {\n    // If we got here and found already configured branch, then it means that this FilterChainMatch\n    // is a duplicate, and that there is some overlap in the repeated fields with already processed\n    // FilterChainMatches.\n    throw EnvoyException(fmt::format(\"error adding listener '{}': multiple filter chains with \"\n                                     \"overlapping matching rules are defined\",\n                                     address_->asString()));\n  }\n}\n\nnamespace {\n\n// Template function for creating a CIDR list entry for either source or destination address.\ntemplate <class T>\nstd::pair<T, std::vector<Network::Address::CidrRange>> makeCidrListEntry(const std::string& cidr,\n                                                                         const T& data) {\n  std::vector<Network::Address::CidrRange> subnets;\n  if (cidr == EMPTY_STRING) {\n    if (Network::SocketInterfaceSingleton::get().ipFamilySupported(AF_INET)) {\n      subnets.push_back(\n          Network::Address::CidrRange::create(Network::Utility::getIpv4CidrCatchAllAddress()));\n    }\n    if (Network::SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6)) {\n      subnets.push_back(\n          Network::Address::CidrRange::create(Network::Utility::getIpv6CidrCatchAllAddress()));\n    }\n  } else {\n    subnets.push_back(Network::Address::CidrRange::create(cidr));\n  }\n  return std::make_pair<T, std::vector<Network::Address::CidrRange>>(T(data), std::move(subnets));\n}\n\n}; // namespace\n\nconst Network::FilterChain*\nFilterChainManagerImpl::findFilterChain(const Network::ConnectionSocket& socket) const {\n  const auto& address = socket.localAddress();\n\n  // Match on destination port (only for IP addresses).\n  if (address->type() == Network::Address::Type::Ip) {\n    const auto port_match = destination_ports_map_.find(address->ip()->port());\n    if (port_match != destination_ports_map_.end()) {\n      return findFilterChainForDestinationIP(*port_match->second.second, socket);\n    }\n  }\n\n  // Match on catch-all port 0.\n  const auto port_match = destination_ports_map_.find(0);\n  if (port_match != destination_ports_map_.end()) {\n    return findFilterChainForDestinationIP(*port_match->second.second, socket);\n  }\n\n  return nullptr;\n}\n\nconst Network::FilterChain* FilterChainManagerImpl::findFilterChainForDestinationIP(\n    const DestinationIPsTrie& destination_ips_trie, const Network::ConnectionSocket& socket) const {\n  auto address = socket.localAddress();\n  if (address->type() != Network::Address::Type::Ip) {\n    address = fakeAddress();\n  }\n\n  // Match on both: exact IP and wider CIDR ranges using LcTrie.\n  const auto& data = destination_ips_trie.getData(address);\n  if (!data.empty()) {\n    ASSERT(data.size() == 1);\n    return findFilterChainForServerName(*data.back(), socket);\n  }\n\n  return nullptr;\n}\n\nconst Network::FilterChain* FilterChainManagerImpl::findFilterChainForServerName(\n    const ServerNamesMap& server_names_map, const Network::ConnectionSocket& socket) const {\n  const std::string server_name(socket.requestedServerName());\n\n  // Match on exact server name, i.e. \"www.example.com\" for \"www.example.com\".\n  const auto server_name_exact_match = server_names_map.find(server_name);\n  if (server_name_exact_match != server_names_map.end()) {\n    return findFilterChainForTransportProtocol(server_name_exact_match->second, socket);\n  }\n\n  // Match on all wildcard domains, i.e. \".example.com\" and \".com\" for \"www.example.com\".\n  size_t pos = server_name.find('.', 1);\n  while (pos < server_name.size() - 1 && pos != std::string::npos) {\n    const std::string wildcard = server_name.substr(pos);\n    const auto server_name_wildcard_match = server_names_map.find(wildcard);\n    if (server_name_wildcard_match != server_names_map.end()) {\n      return findFilterChainForTransportProtocol(server_name_wildcard_match->second, socket);\n    }\n    pos = server_name.find('.', pos + 1);\n  }\n\n  // Match on a filter chain without server name requirements.\n  const auto server_name_catchall_match = server_names_map.find(EMPTY_STRING);\n  if (server_name_catchall_match != server_names_map.end()) {\n    return findFilterChainForTransportProtocol(server_name_catchall_match->second, socket);\n  }\n\n  return nullptr;\n}\n\nconst Network::FilterChain* FilterChainManagerImpl::findFilterChainForTransportProtocol(\n    const TransportProtocolsMap& transport_protocols_map,\n    const Network::ConnectionSocket& socket) const {\n  const std::string transport_protocol(socket.detectedTransportProtocol());\n\n  // Match on exact transport protocol, e.g. \"tls\".\n  const auto transport_protocol_match = transport_protocols_map.find(transport_protocol);\n  if (transport_protocol_match != transport_protocols_map.end()) {\n    return findFilterChainForApplicationProtocols(transport_protocol_match->second, socket);\n  }\n\n  // Match on a filter chain without transport protocol requirements.\n  const auto any_protocol_match = transport_protocols_map.find(EMPTY_STRING);\n  if (any_protocol_match != transport_protocols_map.end()) {\n    return findFilterChainForApplicationProtocols(any_protocol_match->second, socket);\n  }\n\n  return nullptr;\n}\n\nconst Network::FilterChain* FilterChainManagerImpl::findFilterChainForApplicationProtocols(\n    const ApplicationProtocolsMap& application_protocols_map,\n    const Network::ConnectionSocket& socket) const {\n  // Match on exact application protocol, e.g. \"h2\" or \"http/1.1\".\n  for (const auto& application_protocol : socket.requestedApplicationProtocols()) {\n    const auto application_protocol_match = application_protocols_map.find(application_protocol);\n    if (application_protocol_match != application_protocols_map.end()) {\n      return findFilterChainForSourceTypes(application_protocol_match->second, socket);\n    }\n  }\n\n  // Match on a filter chain without application protocol requirements.\n  const auto any_protocol_match = application_protocols_map.find(EMPTY_STRING);\n  if (any_protocol_match != application_protocols_map.end()) {\n    return findFilterChainForSourceTypes(any_protocol_match->second, socket);\n  }\n\n  return nullptr;\n}\n\nconst Network::FilterChain* FilterChainManagerImpl::findFilterChainForSourceTypes(\n    const SourceTypesArray& source_types, const Network::ConnectionSocket& socket) const {\n\n  const auto& filter_chain_local =\n      source_types[envoy::config::listener::v3::FilterChainMatch::SAME_IP_OR_LOOPBACK];\n\n  const auto& filter_chain_external =\n      source_types[envoy::config::listener::v3::FilterChainMatch::EXTERNAL];\n\n  // isSameIpOrLoopback can be expensive. Call it only if LOCAL or EXTERNAL have entries.\n  const bool is_local_connection =\n      (!filter_chain_local.first.empty() || !filter_chain_external.first.empty())\n          ? Network::Utility::isSameIpOrLoopback(socket)\n          : false;\n\n  if (is_local_connection) {\n    if (!filter_chain_local.first.empty()) {\n      return findFilterChainForSourceIpAndPort(*filter_chain_local.second, socket);\n    }\n  } else {\n    if (!filter_chain_external.first.empty()) {\n      return findFilterChainForSourceIpAndPort(*filter_chain_external.second, socket);\n    }\n  }\n\n  const auto& filter_chain_any = source_types[envoy::config::listener::v3::FilterChainMatch::ANY];\n\n  if (!filter_chain_any.first.empty()) {\n    return findFilterChainForSourceIpAndPort(*filter_chain_any.second, socket);\n  } else {\n    return nullptr;\n  }\n}\n\nconst Network::FilterChain* FilterChainManagerImpl::findFilterChainForSourceIpAndPort(\n    const SourceIPsTrie& source_ips_trie, const Network::ConnectionSocket& socket) const {\n  auto address = socket.remoteAddress();\n  if (address->type() != Network::Address::Type::Ip) {\n    address = fakeAddress();\n  }\n\n  // Match on both: exact IP and wider CIDR ranges using LcTrie.\n  const auto& data = source_ips_trie.getData(address);\n  if (data.empty()) {\n    return nullptr;\n  }\n\n  ASSERT(data.size() == 1);\n  const auto& source_ports_map = *data.back();\n  const uint32_t source_port = address->ip()->port();\n  const auto port_match = source_ports_map.find(source_port);\n\n  // Did we get a direct hit on port.\n  if (port_match != source_ports_map.end()) {\n    return port_match->second.get();\n  }\n\n  // Try port 0 if we didn't already try it (UDS).\n  if (source_port != 0) {\n    const auto any_match = source_ports_map.find(0);\n    if (any_match != source_ports_map.end()) {\n      return any_match->second.get();\n    }\n  }\n\n  return nullptr;\n}\n\nvoid FilterChainManagerImpl::convertIPsToTries() {\n  for (auto& [destination_port, destination_ips_pair] : destination_ports_map_) {\n    // These variables are used as we build up the destination CIDRs used for the trie.\n    auto& [destination_ips_map, destination_ips_trie] = destination_ips_pair;\n    std::vector<std::pair<ServerNamesMapSharedPtr, std::vector<Network::Address::CidrRange>>>\n        destination_ips_list;\n    destination_ips_list.reserve(destination_ips_map.size());\n\n    for (const auto& [destination_ip, server_names_map_ptr] : destination_ips_map) {\n      destination_ips_list.push_back(makeCidrListEntry(destination_ip, server_names_map_ptr));\n\n      // This hugely nested for loop greatly pains me, but I'm not sure how to make it better.\n      // We need to get access to all of the source IP strings so that we can convert them into\n      // a trie like we did for the destination IPs above.\n      for (auto& [server_name, transport_protocols_map] : *server_names_map_ptr) {\n        for (auto& [transport_protocol, application_protocols_map] : transport_protocols_map) {\n          for (auto& [application_protocol, source_arrays] : application_protocols_map) {\n            for (auto& [source_ips_map, source_ips_trie] : source_arrays) {\n              std::vector<\n                  std::pair<SourcePortsMapSharedPtr, std::vector<Network::Address::CidrRange>>>\n                  source_ips_list;\n              source_ips_list.reserve(source_ips_map.size());\n\n              for (auto& [source_ip, source_port_map_ptr] : source_ips_map) {\n                source_ips_list.push_back(makeCidrListEntry(source_ip, source_port_map_ptr));\n              }\n\n              source_ips_trie = std::make_unique<SourceIPsTrie>(source_ips_list, true);\n            }\n          }\n        }\n      }\n    }\n\n    destination_ips_trie = std::make_unique<DestinationIPsTrie>(destination_ips_list, true);\n  }\n}\n\nNetwork::DrainableFilterChainSharedPtr FilterChainManagerImpl::findExistingFilterChain(\n    const envoy::config::listener::v3::FilterChain& filter_chain_message) {\n  // Origin filter chain manager could be empty if the current is the ancestor.\n  const auto* origin = getOriginFilterChainManager();\n  if (origin == nullptr) {\n    return nullptr;\n  }\n  auto iter = origin->fc_contexts_.find(filter_chain_message);\n  if (iter != origin->fc_contexts_.end()) {\n    // copy the context to this filter chain manager.\n    fc_contexts_.emplace(filter_chain_message, iter->second);\n    return iter->second;\n  }\n  return nullptr;\n}\n\nConfiguration::FilterChainFactoryContextPtr FilterChainManagerImpl::createFilterChainFactoryContext(\n    const ::envoy::config::listener::v3::FilterChain* const filter_chain) {\n  // TODO(lambdai): add stats\n  UNREFERENCED_PARAMETER(filter_chain);\n  return std::make_unique<PerFilterChainFactoryContextImpl>(parent_context_, init_manager_);\n}\n\nFactoryContextImpl::FactoryContextImpl(Server::Instance& server,\n                                       const envoy::config::listener::v3::Listener& config,\n                                       Network::DrainDecision& drain_decision,\n                                       Stats::Scope& global_scope, Stats::Scope& listener_scope)\n    : server_(server), config_(config), drain_decision_(drain_decision),\n      global_scope_(global_scope), listener_scope_(listener_scope) {}\n\nAccessLog::AccessLogManager& FactoryContextImpl::accessLogManager() {\n  return server_.accessLogManager();\n}\nUpstream::ClusterManager& FactoryContextImpl::clusterManager() { return server_.clusterManager(); }\nEvent::Dispatcher& FactoryContextImpl::dispatcher() { return server_.dispatcher(); }\nGrpc::Context& FactoryContextImpl::grpcContext() { return server_.grpcContext(); }\nbool FactoryContextImpl::healthCheckFailed() { return server_.healthCheckFailed(); }\nHttp::Context& FactoryContextImpl::httpContext() { return server_.httpContext(); }\nInit::Manager& FactoryContextImpl::initManager() { return server_.initManager(); }\nconst LocalInfo::LocalInfo& FactoryContextImpl::localInfo() const { return server_.localInfo(); }\nEnvoy::Runtime::Loader& FactoryContextImpl::runtime() { return server_.runtime(); }\nStats::Scope& FactoryContextImpl::scope() { return global_scope_; }\nSingleton::Manager& FactoryContextImpl::singletonManager() { return server_.singletonManager(); }\nOverloadManager& FactoryContextImpl::overloadManager() { return server_.overloadManager(); }\nThreadLocal::SlotAllocator& FactoryContextImpl::threadLocal() { return server_.threadLocal(); }\nAdmin& FactoryContextImpl::admin() { return server_.admin(); }\nTimeSource& FactoryContextImpl::timeSource() { return server_.timeSource(); }\nProtobufMessage::ValidationContext& FactoryContextImpl::messageValidationContext() {\n  return server_.messageValidationContext();\n}\nProtobufMessage::ValidationVisitor& FactoryContextImpl::messageValidationVisitor() {\n  return server_.messageValidationContext().staticValidationVisitor();\n}\nApi::Api& FactoryContextImpl::api() { return server_.api(); }\nServerLifecycleNotifier& FactoryContextImpl::lifecycleNotifier() {\n  return server_.lifecycleNotifier();\n}\nProcessContextOptRef FactoryContextImpl::processContext() { return server_.processContext(); }\nConfiguration::ServerFactoryContext& FactoryContextImpl::getServerFactoryContext() const {\n  return server_.serverFactoryContext();\n}\nConfiguration::TransportSocketFactoryContext&\nFactoryContextImpl::getTransportSocketFactoryContext() const {\n  return server_.transportSocketFactoryContext();\n}\nconst envoy::config::core::v3::Metadata& FactoryContextImpl::listenerMetadata() const {\n  return config_.metadata();\n}\nenvoy::config::core::v3::TrafficDirection FactoryContextImpl::direction() const {\n  return config_.traffic_direction();\n}\nNetwork::DrainDecision& FactoryContextImpl::drainDecision() { return drain_decision_; }\nStats::Scope& FactoryContextImpl::listenerScope() { return listener_scope_; }\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/filter_chain_manager_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <cstdint>\n#include <memory>\n\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/network/cidr_range.h\"\n#include \"common/network/lc_trie.h\"\n\n#include \"server/filter_chain_factory_context_callback.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass FilterChainFactoryBuilder {\npublic:\n  virtual ~FilterChainFactoryBuilder() = default;\n  /**\n   * @return Shared filter chain where builder is allowed to determine and reuse duplicated filter\n   * chain. Throw exception if failed.\n   */\n  virtual Network::DrainableFilterChainSharedPtr\n  buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain,\n                   FilterChainFactoryContextCreator& context_creator) const PURE;\n};\n\n// PerFilterChainFactoryContextImpl is supposed to be used by network filter chain.\n// Its lifetime must cover the created network filter chain.\n// Its lifetime should be covered by the owned listeners so as to support replacing the active\n// filter chains in the listener.\nclass PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactoryContext,\n                                         public Network::DrainDecision {\npublic:\n  explicit PerFilterChainFactoryContextImpl(Configuration::FactoryContext& parent_context,\n                                            Init::Manager& init_manager);\n\n  // DrainDecision\n  bool drainClose() const override;\n\n  // Configuration::FactoryContext\n  AccessLog::AccessLogManager& accessLogManager() override;\n  Upstream::ClusterManager& clusterManager() override;\n  Event::Dispatcher& dispatcher() override;\n  Network::DrainDecision& drainDecision() override;\n  Grpc::Context& grpcContext() override;\n  bool healthCheckFailed() override;\n  Http::Context& httpContext() override;\n  Init::Manager& initManager() override;\n  const LocalInfo::LocalInfo& localInfo() const override;\n  Envoy::Runtime::Loader& runtime() override;\n  Stats::Scope& scope() override;\n  Singleton::Manager& singletonManager() override;\n  OverloadManager& overloadManager() override;\n  ThreadLocal::SlotAllocator& threadLocal() override;\n  Admin& admin() override;\n  const envoy::config::core::v3::Metadata& listenerMetadata() const override;\n  envoy::config::core::v3::TrafficDirection direction() const override;\n  TimeSource& timeSource() override;\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override;\n  ProtobufMessage::ValidationContext& messageValidationContext() override;\n  Api::Api& api() override;\n  ServerLifecycleNotifier& lifecycleNotifier() override;\n  ProcessContextOptRef processContext() override;\n  Configuration::ServerFactoryContext& getServerFactoryContext() const override;\n  Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override;\n  Stats::Scope& listenerScope() override;\n\n  void startDraining() override { is_draining_.store(true); }\n\nprivate:\n  Configuration::FactoryContext& parent_context_;\n  Init::Manager& init_manager_;\n  std::atomic<bool> is_draining_{false};\n};\n\nclass FilterChainImpl : public Network::DrainableFilterChain {\npublic:\n  FilterChainImpl(Network::TransportSocketFactoryPtr&& transport_socket_factory,\n                  std::vector<Network::FilterFactoryCb>&& filters_factory)\n      : transport_socket_factory_(std::move(transport_socket_factory)),\n        filters_factory_(std::move(filters_factory)) {}\n\n  // Network::FilterChain\n  const Network::TransportSocketFactory& transportSocketFactory() const override {\n    return *transport_socket_factory_;\n  }\n  const std::vector<Network::FilterFactoryCb>& networkFilterFactories() const override {\n    return filters_factory_;\n  }\n  void startDraining() override { factory_context_->startDraining(); }\n\n  void setFilterChainFactoryContext(\n      Configuration::FilterChainFactoryContextPtr filter_chain_factory_context) {\n    ASSERT(factory_context_ == nullptr);\n    factory_context_ = std::move(filter_chain_factory_context);\n  }\n\nprivate:\n  Configuration::FilterChainFactoryContextPtr factory_context_;\n  const Network::TransportSocketFactoryPtr transport_socket_factory_;\n  const std::vector<Network::FilterFactoryCb> filters_factory_;\n};\n\n/**\n * Implementation of FactoryContext wrapping a Server::Instance and some listener components.\n */\nclass FactoryContextImpl : public Configuration::FactoryContext {\npublic:\n  FactoryContextImpl(Server::Instance& server, const envoy::config::listener::v3::Listener& config,\n                     Network::DrainDecision& drain_decision, Stats::Scope& global_scope,\n                     Stats::Scope& listener_scope);\n\n  // Configuration::FactoryContext\n  AccessLog::AccessLogManager& accessLogManager() override;\n  Upstream::ClusterManager& clusterManager() override;\n  Event::Dispatcher& dispatcher() override;\n  Grpc::Context& grpcContext() override;\n  bool healthCheckFailed() override;\n  Http::Context& httpContext() override;\n  Init::Manager& initManager() override;\n  const LocalInfo::LocalInfo& localInfo() const override;\n  Envoy::Runtime::Loader& runtime() override;\n  Stats::Scope& scope() override;\n  Singleton::Manager& singletonManager() override;\n  OverloadManager& overloadManager() override;\n  ThreadLocal::SlotAllocator& threadLocal() override;\n  Admin& admin() override;\n  TimeSource& timeSource() override;\n  ProtobufMessage::ValidationContext& messageValidationContext() override;\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override;\n  Api::Api& api() override;\n  ServerLifecycleNotifier& lifecycleNotifier() override;\n  ProcessContextOptRef processContext() override;\n  Configuration::ServerFactoryContext& getServerFactoryContext() const override;\n  Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override;\n  const envoy::config::core::v3::Metadata& listenerMetadata() const override;\n  envoy::config::core::v3::TrafficDirection direction() const override;\n  Network::DrainDecision& drainDecision() override;\n  Stats::Scope& listenerScope() override;\n\nprivate:\n  Server::Instance& server_;\n  const envoy::config::listener::v3::Listener& config_;\n  Network::DrainDecision& drain_decision_;\n  Stats::Scope& global_scope_;\n  Stats::Scope& listener_scope_;\n};\n\n/**\n * Implementation of FilterChainManager. It owns and exchange filter chains.\n */\nclass FilterChainManagerImpl : public Network::FilterChainManager,\n                               public FilterChainFactoryContextCreator,\n                               Logger::Loggable<Logger::Id::config> {\npublic:\n  using FcContextMap =\n      absl::flat_hash_map<envoy::config::listener::v3::FilterChain,\n                          Network::DrainableFilterChainSharedPtr, MessageUtil, MessageUtil>;\n  FilterChainManagerImpl(const Network::Address::InstanceConstSharedPtr& address,\n                         Configuration::FactoryContext& factory_context,\n                         Init::Manager& init_manager)\n      : address_(address), parent_context_(factory_context), init_manager_(init_manager) {}\n\n  FilterChainManagerImpl(const Network::Address::InstanceConstSharedPtr& address,\n                         Configuration::FactoryContext& factory_context,\n                         Init::Manager& init_manager, const FilterChainManagerImpl& parent_manager);\n\n  // FilterChainFactoryContextCreator\n  Configuration::FilterChainFactoryContextPtr createFilterChainFactoryContext(\n      const ::envoy::config::listener::v3::FilterChain* const filter_chain) override;\n\n  // Network::FilterChainManager\n  const Network::FilterChain*\n  findFilterChain(const Network::ConnectionSocket& socket) const override;\n\n  // Add all filter chains into this manager. During the lifetime of FilterChainManagerImpl this\n  // should be called at most once.\n  void addFilterChain(\n      absl::Span<const envoy::config::listener::v3::FilterChain* const> filter_chain_span,\n      FilterChainFactoryBuilder& b, FilterChainFactoryContextCreator& context_creator);\n  static bool isWildcardServerName(const std::string& name);\n\n  // Return the current view of filter chains, keyed by filter chain message. Used by the owning\n  // listener to calculate the intersection of filter chains with another listener.\n  const FcContextMap& filterChainsByMessage() const { return fc_contexts_; }\n\nprivate:\n  void convertIPsToTries();\n  using SourcePortsMap = absl::flat_hash_map<uint16_t, Network::FilterChainSharedPtr>;\n  using SourcePortsMapSharedPtr = std::shared_ptr<SourcePortsMap>;\n  using SourceIPsMap = absl::flat_hash_map<std::string, SourcePortsMapSharedPtr>;\n  using SourceIPsTrie = Network::LcTrie::LcTrie<SourcePortsMapSharedPtr>;\n  using SourceIPsTriePtr = std::unique_ptr<SourceIPsTrie>;\n  using SourceTypesArray = std::array<std::pair<SourceIPsMap, SourceIPsTriePtr>, 3>;\n  using ApplicationProtocolsMap = absl::flat_hash_map<std::string, SourceTypesArray>;\n  using TransportProtocolsMap = absl::flat_hash_map<std::string, ApplicationProtocolsMap>;\n  // Both exact server names and wildcard domains are part of the same map, in which wildcard\n  // domains are prefixed with \".\" (i.e. \".example.com\" for \"*.example.com\") to differentiate\n  // between exact and wildcard entries.\n  using ServerNamesMap = absl::flat_hash_map<std::string, TransportProtocolsMap>;\n  using ServerNamesMapSharedPtr = std::shared_ptr<ServerNamesMap>;\n  using DestinationIPsMap = absl::flat_hash_map<std::string, ServerNamesMapSharedPtr>;\n  using DestinationIPsTrie = Network::LcTrie::LcTrie<ServerNamesMapSharedPtr>;\n  using DestinationIPsTriePtr = std::unique_ptr<DestinationIPsTrie>;\n  using DestinationPortsMap =\n      absl::flat_hash_map<uint16_t, std::pair<DestinationIPsMap, DestinationIPsTriePtr>>;\n\n  void addFilterChainForDestinationPorts(\n      DestinationPortsMap& destination_ports_map, uint16_t destination_port,\n      const std::vector<std::string>& destination_ips,\n      const absl::Span<const std::string* const> server_names,\n      const std::string& transport_protocol,\n      const absl::Span<const std::string* const> application_protocols,\n      const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n      const std::vector<std::string>& source_ips,\n      const absl::Span<const Protobuf::uint32> source_ports,\n      const Network::FilterChainSharedPtr& filter_chain);\n  void addFilterChainForDestinationIPs(\n      DestinationIPsMap& destination_ips_map, const std::vector<std::string>& destination_ips,\n      const absl::Span<const std::string* const> server_names,\n      const std::string& transport_protocol,\n      const absl::Span<const std::string* const> application_protocols,\n      const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n      const std::vector<std::string>& source_ips,\n      const absl::Span<const Protobuf::uint32> source_ports,\n      const Network::FilterChainSharedPtr& filter_chain);\n  void addFilterChainForServerNames(\n      ServerNamesMapSharedPtr& server_names_map_ptr,\n      const absl::Span<const std::string* const> server_names,\n      const std::string& transport_protocol,\n      const absl::Span<const std::string* const> application_protocols,\n      const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n      const std::vector<std::string>& source_ips,\n      const absl::Span<const Protobuf::uint32> source_ports,\n      const Network::FilterChainSharedPtr& filter_chain);\n  void addFilterChainForApplicationProtocols(\n      ApplicationProtocolsMap& application_protocol_map,\n      const absl::Span<const std::string* const> application_protocols,\n      const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n      const std::vector<std::string>& source_ips,\n      const absl::Span<const Protobuf::uint32> source_ports,\n      const Network::FilterChainSharedPtr& filter_chain);\n  void addFilterChainForSourceTypes(\n      SourceTypesArray& source_types_array,\n      const envoy::config::listener::v3::FilterChainMatch::ConnectionSourceType source_type,\n      const std::vector<std::string>& source_ips,\n      const absl::Span<const Protobuf::uint32> source_ports,\n      const Network::FilterChainSharedPtr& filter_chain);\n  void addFilterChainForSourceIPs(SourceIPsMap& source_ips_map, const std::string& source_ip,\n                                  const absl::Span<const Protobuf::uint32> source_ports,\n                                  const Network::FilterChainSharedPtr& filter_chain);\n  void addFilterChainForSourcePorts(SourcePortsMapSharedPtr& source_ports_map_ptr,\n                                    uint32_t source_port,\n                                    const Network::FilterChainSharedPtr& filter_chain);\n\n  const Network::FilterChain*\n  findFilterChainForDestinationIP(const DestinationIPsTrie& destination_ips_trie,\n                                  const Network::ConnectionSocket& socket) const;\n  const Network::FilterChain*\n  findFilterChainForServerName(const ServerNamesMap& server_names_map,\n                               const Network::ConnectionSocket& socket) const;\n  const Network::FilterChain*\n  findFilterChainForTransportProtocol(const TransportProtocolsMap& transport_protocols_map,\n                                      const Network::ConnectionSocket& socket) const;\n  const Network::FilterChain*\n  findFilterChainForApplicationProtocols(const ApplicationProtocolsMap& application_protocols_map,\n                                         const Network::ConnectionSocket& socket) const;\n  const Network::FilterChain*\n  findFilterChainForSourceTypes(const SourceTypesArray& source_types,\n                                const Network::ConnectionSocket& socket) const;\n\n  const Network::FilterChain*\n  findFilterChainForSourceIpAndPort(const SourceIPsTrie& source_ips_trie,\n                                    const Network::ConnectionSocket& socket) const;\n\n  const FilterChainManagerImpl* getOriginFilterChainManager() { return origin_.value(); }\n  // Duplicate the inherent factory context if any.\n  Network::DrainableFilterChainSharedPtr\n  findExistingFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain_message);\n\n  // Mapping from filter chain message to filter chain. This is used by LDS response handler to\n  // detect the filter chains in the intersection of existing listener and new listener.\n  FcContextMap fc_contexts_;\n\n  // Mapping of FilterChain's configured destination ports, IPs, server names, transport protocols\n  // and application protocols, using structures defined above.\n  DestinationPortsMap destination_ports_map_;\n  const Network::Address::InstanceConstSharedPtr address_;\n  // This is the reference to a factory context which all the generations of listener share.\n  Configuration::FactoryContext& parent_context_;\n  std::list<std::shared_ptr<Configuration::FilterChainFactoryContext>> factory_contexts_;\n\n  // Reference to the previous generation of filter chain manager to share the filter chains.\n  // Caution: only during warm up could the optional have value.\n  absl::optional<const FilterChainManagerImpl*> origin_{nullptr};\n\n  // For FilterChainFactoryContextCreator\n  // init manager owned by the corresponding listener. The reference is valid when building the\n  // filter chain.\n  Init::Manager& init_manager_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/guarddog_impl.cc",
    "content": "#include \"server/guarddog_impl.h\"\n\n#include <sys/types.h>\n\n#include <chrono>\n#include <memory>\n#include <utility>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/server/configuration.h\"\n#include \"envoy/server/guarddog.h\"\n#include \"envoy/server/guarddog_config.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"server/watchdog_impl.h\"\n\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nGuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config,\n                           Api::Api& api, absl::string_view name,\n                           std::unique_ptr<TestInterlockHook>&& test_interlock)\n    : test_interlock_hook_(std::move(test_interlock)), stats_scope_(stats_scope),\n      time_source_(api.timeSource()), miss_timeout_(config.missTimeout()),\n      megamiss_timeout_(config.megaMissTimeout()), kill_timeout_(config.killTimeout()),\n      multi_kill_timeout_(config.multiKillTimeout()),\n      multi_kill_fraction_(config.multiKillThreshold() / 100.0),\n      loop_interval_([&]() -> std::chrono::milliseconds {\n        // The loop interval is simply the minimum of all specified intervals,\n        // but we must account for the 0=disabled case. This lambda takes care\n        // of that and returns a value that initializes the const loop interval.\n        const auto min_of_nonfatal = std::min(miss_timeout_, megamiss_timeout_);\n        return std::min({killEnabled() ? kill_timeout_ : min_of_nonfatal,\n                         multikillEnabled() ? multi_kill_timeout_ : min_of_nonfatal,\n                         min_of_nonfatal});\n      }()),\n      watchdog_miss_counter_(stats_scope.counterFromStatName(\n          Stats::StatNameManagedStorage(absl::StrCat(name, \".watchdog_miss\"),\n                                        stats_scope.symbolTable())\n              .statName())),\n      watchdog_megamiss_counter_(stats_scope.counterFromStatName(\n          Stats::StatNameManagedStorage(absl::StrCat(name, \".watchdog_mega_miss\"),\n                                        stats_scope.symbolTable())\n              .statName())),\n      dispatcher_(api.allocateDispatcher(absl::StrCat(name, \"_guarddog_thread\"))),\n      loop_timer_(dispatcher_->createTimer([this]() { step(); })),\n      events_to_actions_([&](const Server::Configuration::Watchdog& config) -> EventToActionsMap {\n        EventToActionsMap map;\n\n        // We should be able to share the dispatcher since guard dog's lifetime\n        // should eclipse those of actions.\n        Configuration::GuardDogActionFactoryContext context = {api, *dispatcher_, stats_scope,\n                                                               name};\n\n        const auto& actions = config.actions();\n        for (const auto& action : actions) {\n          // Get factory and add the created cb\n          auto& factory = Config::Utility::getAndCheckFactory<Configuration::GuardDogActionFactory>(\n              action.config());\n          map[action.event()].push_back(factory.createGuardDogActionFromProto(action, context));\n        }\n\n        return map;\n      }(config)),\n      run_thread_(true) {\n  start(api);\n}\n\nGuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config,\n                           Api::Api& api, absl::string_view name)\n    : GuardDogImpl(stats_scope, config, api, name, std::make_unique<TestInterlockHook>()) {}\n\nGuardDogImpl::~GuardDogImpl() { stop(); }\n\nvoid GuardDogImpl::step() {\n  {\n    Thread::LockGuard guard(mutex_);\n    if (!run_thread_) {\n      return;\n    }\n  }\n\n  const auto now = time_source_.monotonicTime();\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> miss_threads;\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> mega_miss_threads;\n\n  {\n    std::vector<std::pair<Thread::ThreadId, MonotonicTime>> multi_kill_threads;\n    Thread::LockGuard guard(wd_lock_);\n\n    // Compute the multikill threshold\n    const size_t required_for_multi_kill =\n        std::max(static_cast<size_t>(2),\n                 static_cast<size_t>(ceil(multi_kill_fraction_ * watched_dogs_.size())));\n\n    for (auto& watched_dog : watched_dogs_) {\n      const auto last_checkin = watched_dog->dog_->lastTouchTime();\n      const auto tid = watched_dog->dog_->threadId();\n      const auto delta = now - last_checkin;\n      if (watched_dog->last_alert_time_ && watched_dog->last_alert_time_.value() < last_checkin) {\n        watched_dog->miss_alerted_ = false;\n        watched_dog->megamiss_alerted_ = false;\n      }\n      if (delta > miss_timeout_) {\n        if (!watched_dog->miss_alerted_) {\n          watchdog_miss_counter_.inc();\n          watched_dog->miss_counter_.inc();\n          watched_dog->last_alert_time_ = last_checkin;\n          watched_dog->miss_alerted_ = true;\n          miss_threads.emplace_back(tid, last_checkin);\n        }\n      }\n      if (delta > megamiss_timeout_) {\n        if (!watched_dog->megamiss_alerted_) {\n          watchdog_megamiss_counter_.inc();\n          watched_dog->megamiss_counter_.inc();\n          watched_dog->last_alert_time_ = last_checkin;\n          watched_dog->megamiss_alerted_ = true;\n          mega_miss_threads.emplace_back(tid, last_checkin);\n        }\n      }\n      if (killEnabled() && delta > kill_timeout_) {\n        invokeGuardDogActions(WatchDogAction::KILL, {{tid, last_checkin}}, now);\n\n        PANIC(fmt::format(\"GuardDog: one thread ({}) stuck for more than watchdog_kill_timeout\",\n                          watched_dog->dog_->threadId().debugString()));\n      }\n      if (multikillEnabled() && delta > multi_kill_timeout_) {\n        multi_kill_threads.emplace_back(tid, last_checkin);\n\n        if (multi_kill_threads.size() >= required_for_multi_kill) {\n          invokeGuardDogActions(WatchDogAction::MULTIKILL, multi_kill_threads, now);\n\n          PANIC(fmt::format(\"GuardDog: At least {} threads ({},...) stuck for more than \"\n                            \"watchdog_multikill_timeout\",\n                            multi_kill_threads.size(), tid.debugString()));\n        }\n      }\n    }\n  }\n\n  // Run megamiss and miss handlers\n  if (!mega_miss_threads.empty()) {\n    invokeGuardDogActions(WatchDogAction::MEGAMISS, mega_miss_threads, now);\n  }\n\n  if (!miss_threads.empty()) {\n    invokeGuardDogActions(WatchDogAction::MISS, miss_threads, now);\n  }\n\n  {\n    Thread::LockGuard guard(mutex_);\n    test_interlock_hook_->signalFromImpl(now);\n    if (run_thread_) {\n      loop_timer_->enableTimer(loop_interval_);\n    }\n  }\n}\n\nWatchDogSharedPtr GuardDogImpl::createWatchDog(Thread::ThreadId thread_id,\n                                               const std::string& thread_name) {\n  // Timer started by WatchDog will try to fire at 1/2 of the interval of the\n  // minimum timeout specified. loop_interval_ is const so all shared state\n  // accessed out of the locked section below is const (time_source_ has no\n  // state).\n  const auto wd_interval = loop_interval_ / 2;\n  WatchDogSharedPtr new_watchdog =\n      std::make_shared<WatchDogImpl>(std::move(thread_id), time_source_, wd_interval);\n  WatchedDogPtr watched_dog = std::make_unique<WatchedDog>(stats_scope_, thread_name, new_watchdog);\n  {\n    Thread::LockGuard guard(wd_lock_);\n    watched_dogs_.push_back(std::move(watched_dog));\n  }\n  new_watchdog->touch();\n  return new_watchdog;\n}\n\nvoid GuardDogImpl::stopWatching(WatchDogSharedPtr wd) {\n  Thread::LockGuard guard(wd_lock_);\n  auto found_wd = std::find_if(watched_dogs_.begin(), watched_dogs_.end(),\n                               [&wd](const WatchedDogPtr& d) -> bool { return d->dog_ == wd; });\n  if (found_wd != watched_dogs_.end()) {\n    watched_dogs_.erase(found_wd);\n  } else {\n    ASSERT(false);\n  }\n}\n\nvoid GuardDogImpl::start(Api::Api& api) {\n  Thread::LockGuard guard(mutex_);\n  // See comments in WorkerImpl::start for the naming convention.\n  Thread::Options options{absl::StrCat(\"dog:\", dispatcher_->name())};\n  thread_ = api.threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }, options);\n  loop_timer_->enableTimer(std::chrono::milliseconds(0));\n}\n\nvoid GuardDogImpl::stop() {\n  {\n    Thread::LockGuard guard(mutex_);\n    run_thread_ = false;\n  }\n  dispatcher_->exit();\n  if (thread_) {\n    thread_->join();\n    thread_.reset();\n  }\n}\n\nvoid GuardDogImpl::invokeGuardDogActions(\n    WatchDogAction::WatchdogEvent event,\n    std::vector<std::pair<Thread::ThreadId, MonotonicTime>> thread_last_checkin_pairs,\n    MonotonicTime now) {\n  const auto& registered_actions = events_to_actions_.find(event);\n  if (registered_actions != events_to_actions_.end()) {\n    for (auto& action : registered_actions->second) {\n      action->run(event, thread_last_checkin_pairs, now);\n    }\n  }\n}\n\nGuardDogImpl::WatchedDog::WatchedDog(Stats::Scope& stats_scope, const std::string& thread_name,\n                                     const WatchDogSharedPtr& watch_dog)\n    : dog_(watch_dog),\n      miss_counter_(stats_scope.counterFromStatName(\n          Stats::StatNameManagedStorage(fmt::format(\"server.{}.watchdog_miss\", thread_name),\n                                        stats_scope.symbolTable())\n              .statName())),\n      megamiss_counter_(stats_scope.counterFromStatName(\n          Stats::StatNameManagedStorage(fmt::format(\"server.{}.watchdog_mega_miss\", thread_name),\n                                        stats_scope.symbolTable())\n              .statName())) {}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/guarddog_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/server/configuration.h\"\n#include \"envoy/server/guarddog.h\"\n#include \"envoy/server/guarddog_config.h\"\n#include \"envoy/server/watchdog.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/event/libevent.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * This feature performs deadlock detection stats collection & enforcement.\n *\n * It launches a thread that scans at an interval the minimum of the configured\n * intervals. If it finds starved threads or suspected deadlocks it will take\n * the appropriate action depending on the config parameters described below.\n *\n * Thread lifetime is tied to GuardDog object lifetime (RAII style).\n */\nclass GuardDogImpl : public GuardDog {\npublic:\n  /**\n   * Defines a test interlock hook to enable tests to synchronize the guard-dog\n   * execution so they can probe current counter values. The default\n   * implementation that runs in production has empty methods, which are\n   * overridden in the implementation used during tests.\n   */\n  class TestInterlockHook {\n  public:\n    virtual ~TestInterlockHook() = default;\n\n    /**\n     * Called from GuardDogImpl to indicate that it has evaluated all watch-dogs\n     * up to a particular point in time.\n     */\n    virtual void signalFromImpl(MonotonicTime) {}\n\n    /**\n     * Called from GuardDog tests to block until the implementation has reached\n     * the desired point in time.\n     */\n    virtual void waitFromTest(Thread::MutexBasicLockable&, MonotonicTime) {}\n  };\n\n  /**\n   * @param stats_scope Statistics scope to write watchdog_miss and\n   * watchdog_mega_miss events into.\n   * @param config Configuration object.\n   * @param api API object.\n   * @param test_interlock a hook for enabling interlock with unit tests.\n   *\n   * See the configuration documentation for details on the timeout settings.\n   */\n  GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config,\n               Api::Api& api, absl::string_view name,\n               std::unique_ptr<TestInterlockHook>&& test_interlock);\n  GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config,\n               Api::Api& api, absl::string_view name);\n  ~GuardDogImpl() override;\n\n  /**\n   * Exposed for testing purposes only (but harmless to call):\n   */\n  const std::chrono::milliseconds loopIntervalForTest() const { return loop_interval_; }\n\n  /**\n   * Test hook to force a step() to catch up with the current simulated\n   * time. This is inlined so that it does not need to be present in the\n   * production binary.\n   */\n  void forceCheckForTest() {\n    Thread::LockGuard guard(mutex_);\n    MonotonicTime now = time_source_.monotonicTime();\n    loop_timer_->enableTimer(std::chrono::milliseconds(0));\n    test_interlock_hook_->waitFromTest(mutex_, now);\n  }\n\n  // Server::GuardDog\n  WatchDogSharedPtr createWatchDog(Thread::ThreadId thread_id,\n                                   const std::string& thread_name) override;\n  void stopWatching(WatchDogSharedPtr wd) override;\n\nprivate:\n  void start(Api::Api& api);\n  void step();\n  void stop();\n  // Per the C++ standard it is OK to use these in ctor initializer as long as\n  // it is after kill and multikill timeout values are initialized.\n  bool killEnabled() const { return kill_timeout_ > std::chrono::milliseconds(0); }\n  bool multikillEnabled() const { return multi_kill_timeout_ > std::chrono::milliseconds(0); }\n\n  using WatchDogAction = envoy::config::bootstrap::v3::Watchdog::WatchdogAction;\n  // Helper function to invoke all the GuardDogActions registered for an Event.\n  void invokeGuardDogActions(\n      WatchDogAction::WatchdogEvent event,\n      std::vector<std::pair<Thread::ThreadId, MonotonicTime>> thread_last_checkin_pairs,\n      MonotonicTime now);\n\n  struct WatchedDog {\n    WatchedDog(Stats::Scope& stats_scope, const std::string& thread_name,\n               const WatchDogSharedPtr& watch_dog);\n\n    const WatchDogSharedPtr dog_;\n    absl::optional<MonotonicTime> last_alert_time_;\n    bool miss_alerted_{};\n    bool megamiss_alerted_{};\n    Stats::Counter& miss_counter_;\n    Stats::Counter& megamiss_counter_;\n  };\n  using WatchedDogPtr = std::unique_ptr<WatchedDog>;\n\n  std::unique_ptr<TestInterlockHook> test_interlock_hook_;\n  Stats::Scope& stats_scope_;\n  TimeSource& time_source_;\n  const std::chrono::milliseconds miss_timeout_;\n  const std::chrono::milliseconds megamiss_timeout_;\n  const std::chrono::milliseconds kill_timeout_;\n  const std::chrono::milliseconds multi_kill_timeout_;\n  const double multi_kill_fraction_;\n  const std::chrono::milliseconds loop_interval_;\n  Stats::Counter& watchdog_miss_counter_;\n  Stats::Counter& watchdog_megamiss_counter_;\n  std::vector<WatchedDogPtr> watched_dogs_ ABSL_GUARDED_BY(wd_lock_);\n  Thread::MutexBasicLockable wd_lock_;\n  Thread::ThreadPtr thread_;\n  Event::DispatcherPtr dispatcher_;\n  Event::TimerPtr loop_timer_;\n  using EventToActionsMap = absl::flat_hash_map<WatchDogAction::WatchdogEvent,\n                                                std::vector<Configuration::GuardDogActionPtr>>;\n  EventToActionsMap events_to_actions_;\n  Thread::MutexBasicLockable mutex_;\n  bool run_thread_ ABSL_GUARDED_BY(mutex_);\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restart.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy;\n\nmessage HotRestartMessage {\n  // Child->parent requests\n  message Request {\n    message PassListenSocket {\n      string address = 1;\n    }\n    message ShutdownAdmin {\n    }\n    message Stats {\n    }\n    message DrainListeners {\n    }\n    message Terminate {\n    }\n    oneof request {\n      PassListenSocket pass_listen_socket = 1;\n      ShutdownAdmin shutdown_admin = 2;\n      Stats stats = 3;\n      DrainListeners drain_listeners = 4;\n      Terminate terminate = 5;\n    }\n  }\n\n  // Parent->child replies\n  message Reply {\n    message PassListenSocket {\n      int32 fd = 1;\n    }\n    message ShutdownAdmin {\n      uint64 original_start_time_unix_seconds = 1;\n    }\n    message Span {\n      uint32 first = 1;\n      uint32 last = 2; // inclusive\n    }\n    message RepeatedSpan {\n      repeated Span spans = 1;\n    }\n    message Stats {\n      // Values for server_stats, which don't fit with the \"combination logic\" approach.\n      uint64 memory_allocated = 1;\n      uint64 num_connections = 2;\n\n      // Keys are fully qualified stat names.\n      //\n      // The amount added to the counter since the last time a message included the counter in this\n      // map. (The first time a counter is included in this map, it's the amount added since the\n      // final latch() before hot restart began).\n      map<string, uint64> counter_deltas = 3;\n      // The parent's current values for various gauges in its stats store.\n      map<string, uint64> gauges = 4;\n      // Maps the string representation of a StatName into an array of Spans,\n      // which indicate which of the StatName tokens are dynamic. For example,\n      // if we are recording a counter or gauge named \"a.b.c.d.e.f\", where \"a\",\n      // and \"d.e\" were created from a StatNameDynamicPool, then we'd map\n      // \"a.b.c.d.e.f\" to the span array [[0,0], [3,4]], where the [0,0] span\n      // covers the \"a\", and the [3,4] span covers \"d.e\".\n      map<string, RepeatedSpan> dynamics = 5;\n    }\n    oneof reply {\n      // When this oneof is of the PassListenSocketReply type, there is a special\n      // implied meaning: the recvmsg that got this proto has control data to make\n      // the passing of the fd work, so make use of CMSG_SPACE etc.\n      PassListenSocket pass_listen_socket = 1;\n      ShutdownAdmin shutdown_admin = 2;\n      Stats stats = 3;\n    }\n  }\n\n  oneof requestreply {\n    Request request = 1;\n    Reply reply = 2;\n  }\n\n  bool didnt_recognize_your_last_message = 3;\n}\n"
  },
  {
    "path": "source/server/hot_restart_impl.cc",
    "content": "#include \"server/hot_restart_impl.h\"\n\n#include <sys/prctl.h>\n#include <sys/types.h>\n#include <sys/un.h>\n\n#include <csignal>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/file_event.h\"\n#include \"envoy/server/instance.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/api/os_sys_calls_impl_hot_restart.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/lock_guard.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nSharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch) {\n  Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get();\n  Api::HotRestartOsSysCalls& hot_restart_os_sys_calls = Api::HotRestartOsSysCallsSingleton::get();\n\n  int flags = O_RDWR;\n  const std::string shmem_name = fmt::format(\"/envoy_shared_memory_{}\", base_id);\n  if (restart_epoch == 0) {\n    flags |= O_CREAT | O_EXCL;\n\n    // If we are meant to be first, attempt to unlink a previous shared memory instance. If this\n    // is a clean restart this should then allow the shm_open() call below to succeed.\n    hot_restart_os_sys_calls.shmUnlink(shmem_name.c_str());\n  }\n\n  const Api::SysCallIntResult result =\n      hot_restart_os_sys_calls.shmOpen(shmem_name.c_str(), flags, S_IRUSR | S_IWUSR);\n  if (result.rc_ == -1) {\n    PANIC(fmt::format(\"cannot open shared memory region {} check user permissions. Error: {}\",\n                      shmem_name, errorDetails(result.errno_)));\n  }\n\n  if (restart_epoch == 0) {\n    const Api::SysCallIntResult truncateRes =\n        os_sys_calls.ftruncate(result.rc_, sizeof(SharedMemory));\n    RELEASE_ASSERT(truncateRes.rc_ != -1, \"\");\n  }\n\n  const Api::SysCallPtrResult mmapRes = os_sys_calls.mmap(\n      nullptr, sizeof(SharedMemory), PROT_READ | PROT_WRITE, MAP_SHARED, result.rc_, 0);\n  SharedMemory* shmem = reinterpret_cast<SharedMemory*>(mmapRes.rc_);\n  RELEASE_ASSERT(shmem != MAP_FAILED, \"\");\n  RELEASE_ASSERT((reinterpret_cast<uintptr_t>(shmem) % alignof(decltype(shmem))) == 0, \"\");\n\n  if (restart_epoch == 0) {\n    shmem->size_ = sizeof(SharedMemory);\n    shmem->version_ = HOT_RESTART_VERSION;\n    initializeMutex(shmem->log_lock_);\n    initializeMutex(shmem->access_log_lock_);\n  } else {\n    RELEASE_ASSERT(shmem->size_ == sizeof(SharedMemory),\n                   \"Hot restart SharedMemory size mismatch! You must have hot restarted into a \"\n                   \"not-hot-restart-compatible new version of Envoy.\");\n    RELEASE_ASSERT(shmem->version_ == HOT_RESTART_VERSION,\n                   \"Hot restart version mismatch! You must have hot restarted into a \"\n                   \"not-hot-restart-compatible new version of Envoy.\");\n  }\n\n  // Here we catch the case where a new Envoy starts up when the current Envoy has not yet fully\n  // initialized. The startup logic is quite complicated, and it's not worth trying to handle this\n  // in a finer way. This will cause the startup to fail with an error code early, without\n  // affecting any currently running processes. The process runner should try again later with some\n  // back off and with the same hot restart epoch number.\n  uint64_t old_flags = shmem->flags_.fetch_or(SHMEM_FLAGS_INITIALIZING);\n  if (old_flags & SHMEM_FLAGS_INITIALIZING) {\n    throw EnvoyException(\"previous envoy process is still initializing\");\n  }\n  return shmem;\n}\n\nvoid initializeMutex(pthread_mutex_t& mutex) {\n  pthread_mutexattr_t attribute;\n  pthread_mutexattr_init(&attribute);\n  pthread_mutexattr_setpshared(&attribute, PTHREAD_PROCESS_SHARED);\n  pthread_mutexattr_setrobust(&attribute, PTHREAD_MUTEX_ROBUST);\n  pthread_mutex_init(&mutex, &attribute);\n}\n\n// The base id is automatically scaled by 10 to prevent overlap of domain socket names when\n// multiple Envoys with different base-ids run on a single host. Note that older versions of Envoy\n// performed the multiplication in OptionsImpl which produced incorrect server info output.\n// TODO(zuercher): ideally, the base_id would be separated from the restart_epoch in\n// the socket names to entirely prevent collisions between consecutive base ids.\nHotRestartImpl::HotRestartImpl(uint32_t base_id, uint32_t restart_epoch,\n                               const std::string& socket_path, mode_t socket_mode)\n    : base_id_(base_id), scaled_base_id_(base_id * 10),\n      as_child_(HotRestartingChild(scaled_base_id_, restart_epoch, socket_path, socket_mode)),\n      as_parent_(HotRestartingParent(scaled_base_id_, restart_epoch, socket_path, socket_mode)),\n      shmem_(attachSharedMemory(scaled_base_id_, restart_epoch)), log_lock_(shmem_->log_lock_),\n      access_log_lock_(shmem_->access_log_lock_) {\n  // If our parent ever goes away just terminate us so that we don't have to rely on ops/launching\n  // logic killing the entire process tree. We should never exist without our parent.\n  int rc = prctl(PR_SET_PDEATHSIG, SIGTERM);\n  RELEASE_ASSERT(rc != -1, \"\");\n}\n\nvoid HotRestartImpl::drainParentListeners() {\n  as_child_.drainParentListeners();\n  // At this point we are initialized and a new Envoy can startup if needed.\n  shmem_->flags_ &= ~SHMEM_FLAGS_INITIALIZING;\n}\n\nint HotRestartImpl::duplicateParentListenSocket(const std::string& address) {\n  return as_child_.duplicateParentListenSocket(address);\n}\n\nvoid HotRestartImpl::initialize(Event::Dispatcher& dispatcher, Server::Instance& server) {\n  as_parent_.initialize(dispatcher, server);\n}\n\nvoid HotRestartImpl::sendParentAdminShutdownRequest(time_t& original_start_time) {\n  as_child_.sendParentAdminShutdownRequest(original_start_time);\n}\n\nvoid HotRestartImpl::sendParentTerminateRequest() { as_child_.sendParentTerminateRequest(); }\n\nHotRestart::ServerStatsFromParent\nHotRestartImpl::mergeParentStatsIfAny(Stats::StoreRoot& stats_store) {\n  std::unique_ptr<envoy::HotRestartMessage> wrapper_msg = as_child_.getParentStats();\n  ServerStatsFromParent response;\n  // getParentStats() will happily and cleanly return nullptr if we have no parent.\n  if (wrapper_msg) {\n    as_child_.mergeParentStats(stats_store, wrapper_msg->reply().stats());\n    response.parent_memory_allocated_ = wrapper_msg->reply().stats().memory_allocated();\n    response.parent_connections_ = wrapper_msg->reply().stats().num_connections();\n  }\n  return response;\n}\n\nvoid HotRestartImpl::shutdown() { as_parent_.shutdown(); }\n\nuint32_t HotRestartImpl::baseId() { return base_id_; }\nstd::string HotRestartImpl::version() { return hotRestartVersion(); }\n\nstd::string HotRestartImpl::hotRestartVersion() {\n  return fmt::format(\"{}.{}\", HOT_RESTART_VERSION, sizeof(SharedMemory));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restart_impl.h",
    "content": "#pragma once\n\n#include <fcntl.h>\n#include <sys/un.h>\n\n#include <array>\n#include <atomic>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/server/hot_restart.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/stats/allocator_impl.h\"\n\n#include \"server/hot_restarting_child.h\"\n#include \"server/hot_restarting_parent.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n// Increment this whenever there is a shared memory / RPC change that will prevent a hot restart\n// from working. Operations code can then cope with this and do a full restart.\nconst uint64_t HOT_RESTART_VERSION = 11;\n\n/**\n * Shared memory segment. This structure is laid directly into shared memory and is used amongst\n * all running envoy processes.\n */\nstruct SharedMemory {\n  uint64_t size_;\n  uint64_t version_;\n  pthread_mutex_t log_lock_;\n  pthread_mutex_t access_log_lock_;\n  std::atomic<uint64_t> flags_;\n};\nstatic const uint64_t SHMEM_FLAGS_INITIALIZING = 0x1;\n\n/**\n * Initialize the shared memory segment, depending on whether we are the first running\n * envoy, or a host restarted envoy process.\n *\n * @param base_id uint32_t that is the base id flag used to start this Envoy.\n * @param restart_epoch uint32_t the restart epoch flag used to start this Envoy.\n */\nSharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch);\n\n/**\n * Initialize a pthread mutex for process shared locking.\n */\nvoid initializeMutex(pthread_mutex_t& mutex);\n\n/**\n * Implementation of Thread::BasicLockable that operates on a process shared pthread mutex.\n */\nclass ProcessSharedMutex : public Thread::BasicLockable {\npublic:\n  ProcessSharedMutex(pthread_mutex_t& mutex) : mutex_(mutex) {}\n\n  void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() override {\n    // Deal with robust handling here. If the other process dies without unlocking, we are going\n    // to die shortly but try to make sure that we can handle any signals, etc. that happen without\n    // getting into a further messed up state.\n    int rc = pthread_mutex_lock(&mutex_);\n    ASSERT(rc == 0 || rc == EOWNERDEAD);\n    if (rc == EOWNERDEAD) {\n      pthread_mutex_consistent(&mutex_);\n    }\n  }\n\n  bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) override {\n    int rc = pthread_mutex_trylock(&mutex_);\n    if (rc == EBUSY) {\n      return false;\n    }\n\n    ASSERT(rc == 0 || rc == EOWNERDEAD);\n    if (rc == EOWNERDEAD) {\n      pthread_mutex_consistent(&mutex_);\n    }\n\n    return true;\n  }\n\n  void unlock() ABSL_UNLOCK_FUNCTION() override {\n    int rc = pthread_mutex_unlock(&mutex_);\n    ASSERT(rc == 0);\n  }\n\nprivate:\n  pthread_mutex_t& mutex_;\n};\n\n/**\n * Implementation of HotRestart built for Linux. Most of the \"protocol\" type logic is split out into\n * HotRestarting{Base,Parent,Child}. This class ties all that to shared memory and version logic.\n */\nclass HotRestartImpl : public HotRestart {\npublic:\n  HotRestartImpl(uint32_t base_id, uint32_t restart_epoch, const std::string& socket_path,\n                 mode_t socket_mode);\n\n  // Server::HotRestart\n  void drainParentListeners() override;\n  int duplicateParentListenSocket(const std::string& address) override;\n  void initialize(Event::Dispatcher& dispatcher, Server::Instance& server) override;\n  void sendParentAdminShutdownRequest(time_t& original_start_time) override;\n  void sendParentTerminateRequest() override;\n  ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot& stats_store) override;\n  void shutdown() override;\n  uint32_t baseId() override;\n  std::string version() override;\n  Thread::BasicLockable& logLock() override { return log_lock_; }\n  Thread::BasicLockable& accessLogLock() override { return access_log_lock_; }\n\n  /**\n   * envoy --hot_restart_version doesn't initialize Envoy, but computes the version string\n   * based on the configured options.\n   */\n  static std::string hotRestartVersion();\n\nprivate:\n  uint32_t base_id_;\n  uint32_t scaled_base_id_;\n  HotRestartingChild as_child_;\n  HotRestartingParent as_parent_;\n  // This pointer is shared memory, and is expected to exist until process end.\n  // It will automatically be unmapped when the process terminates.\n  SharedMemory* shmem_;\n  ProcessSharedMutex log_lock_;\n  ProcessSharedMutex access_log_lock_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restart_nop_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/hot_restart.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/stats/allocator_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * No-op implementation of HotRestart.\n */\nclass HotRestartNopImpl : public Server::HotRestart {\npublic:\n  // Server::HotRestart\n  void drainParentListeners() override {}\n  int duplicateParentListenSocket(const std::string&) override { return -1; }\n  void initialize(Event::Dispatcher&, Server::Instance&) override {}\n  void sendParentAdminShutdownRequest(time_t&) override {}\n  void sendParentTerminateRequest() override {}\n  ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot&) override { return {}; }\n  void shutdown() override {}\n  uint32_t baseId() override { return 0; }\n  std::string version() override { return \"disabled\"; }\n  Thread::BasicLockable& logLock() override { return log_lock_; }\n  Thread::BasicLockable& accessLogLock() override { return access_log_lock_; }\n\nprivate:\n  Thread::MutexBasicLockable log_lock_;\n  Thread::MutexBasicLockable access_log_lock_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restarting_base.cc",
    "content": "#include \"server/hot_restarting_base.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing HotRestartMessage = envoy::HotRestartMessage;\n\nstatic constexpr uint64_t MaxSendmsgSize = 4096;\n\nHotRestartingBase::~HotRestartingBase() {\n  if (my_domain_socket_ != -1) {\n    Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get();\n    Api::SysCallIntResult result = os_sys_calls.close(my_domain_socket_);\n    ASSERT(result.rc_ == 0);\n  }\n}\n\nvoid HotRestartingBase::initDomainSocketAddress(sockaddr_un* address) {\n  memset(address, 0, sizeof(*address));\n  address->sun_family = AF_UNIX;\n}\n\nsockaddr_un HotRestartingBase::createDomainSocketAddress(uint64_t id, const std::string& role,\n                                                         const std::string& socket_path,\n                                                         mode_t socket_mode) {\n  // Right now we only allow a maximum of 3 concurrent envoy processes to be running. When the third\n  // starts up it will kill the oldest parent.\n  static constexpr uint64_t MaxConcurrentProcesses = 3;\n  id = id % MaxConcurrentProcesses;\n  sockaddr_un address;\n  initDomainSocketAddress(&address);\n  Network::Address::PipeInstance addr(fmt::format(socket_path + \"_{}_{}\", role, base_id_ + id),\n                                      socket_mode, nullptr);\n  memcpy(&address, addr.sockAddr(), addr.sockAddrLen());\n  fchmod(my_domain_socket_, socket_mode);\n  return address;\n}\n\nvoid HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role,\n                                         const std::string& socket_path, mode_t socket_mode) {\n  Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get();\n  // This actually creates the socket and binds it. We use the socket in datagram mode so we can\n  // easily read single messages.\n  my_domain_socket_ = socket(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0);\n  sockaddr_un address = createDomainSocketAddress(id, role, socket_path, socket_mode);\n  unlink(address.sun_path);\n  Api::SysCallIntResult result =\n      os_sys_calls.bind(my_domain_socket_, reinterpret_cast<sockaddr*>(&address), sizeof(address));\n  if (result.rc_ != 0) {\n    const auto msg = fmt::format(\n        \"unable to bind domain socket with base_id={}, id={}, errno={} (see --base-id option)\",\n        base_id_, id, result.errno_);\n    if (result.errno_ == SOCKET_ERROR_ADDR_IN_USE) {\n      throw HotRestartDomainSocketInUseException(msg);\n    }\n    throw EnvoyException(msg);\n  }\n}\n\nvoid HotRestartingBase::sendHotRestartMessage(sockaddr_un& address,\n                                              const HotRestartMessage& proto) {\n  const uint64_t serialized_size = proto.ByteSizeLong();\n  const uint64_t total_size = sizeof(uint64_t) + serialized_size;\n  // Fill with uint64_t 'length' followed by the serialized HotRestartMessage.\n  std::vector<uint8_t> send_buf;\n  send_buf.resize(total_size);\n  *reinterpret_cast<uint64_t*>(send_buf.data()) = htobe64(serialized_size);\n  RELEASE_ASSERT(proto.SerializeWithCachedSizesToArray(send_buf.data() + sizeof(uint64_t)),\n                 \"failed to serialize a HotRestartMessage\");\n\n  RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, 0) != -1,\n                 fmt::format(\"Set domain socket blocking failed, errno = {}\", errno));\n\n  uint8_t* next_byte_to_send = send_buf.data();\n  uint64_t sent = 0;\n  while (sent < total_size) {\n    const uint64_t cur_chunk_size = std::min(MaxSendmsgSize, total_size - sent);\n    iovec iov[1];\n    iov[0].iov_base = next_byte_to_send;\n    iov[0].iov_len = cur_chunk_size;\n    next_byte_to_send += cur_chunk_size;\n    sent += cur_chunk_size;\n    msghdr message;\n    memset(&message, 0, sizeof(message));\n    message.msg_name = &address;\n    message.msg_namelen = sizeof(address);\n    message.msg_iov = iov;\n    message.msg_iovlen = 1;\n\n    // Control data stuff, only relevant for the fd passing done with PassListenSocketReply.\n    uint8_t control_buffer[CMSG_SPACE(sizeof(int))];\n    if (replyIsExpectedType(&proto, HotRestartMessage::Reply::kPassListenSocket) &&\n        proto.reply().pass_listen_socket().fd() != -1) {\n      memset(control_buffer, 0, CMSG_SPACE(sizeof(int)));\n      message.msg_control = control_buffer;\n      message.msg_controllen = CMSG_SPACE(sizeof(int));\n      cmsghdr* control_message = CMSG_FIRSTHDR(&message);\n      control_message->cmsg_level = SOL_SOCKET;\n      control_message->cmsg_type = SCM_RIGHTS;\n      control_message->cmsg_len = CMSG_LEN(sizeof(int));\n      *reinterpret_cast<int*>(CMSG_DATA(control_message)) = proto.reply().pass_listen_socket().fd();\n      ASSERT(sent == total_size, \"an fd passing message was too long for one sendmsg().\");\n    }\n\n    const int rc = sendmsg(my_domain_socket_, &message, 0);\n    RELEASE_ASSERT(rc == static_cast<int>(cur_chunk_size),\n                   fmt::format(\"hot restart sendmsg() failed: returned {}, errno {}\", rc, errno));\n  }\n  RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, O_NONBLOCK) != -1,\n                 fmt::format(\"Set domain socket nonblocking failed, errno = {}\", errno));\n}\n\nbool HotRestartingBase::replyIsExpectedType(const HotRestartMessage* proto,\n                                            HotRestartMessage::Reply::ReplyCase oneof_type) const {\n  return proto != nullptr && proto->requestreply_case() == HotRestartMessage::kReply &&\n         proto->reply().reply_case() == oneof_type;\n}\n\n// Pull the cloned fd, if present, out of the control data and write it into the\n// PassListenSocketReply proto; the higher level code will see a listening fd that Just Works. We\n// should only get control data in a PassListenSocketReply, it should only be the fd passing type,\n// and there should only be one at a time. Crash on any other control data.\nvoid HotRestartingBase::getPassedFdIfPresent(HotRestartMessage* out, msghdr* message) {\n  cmsghdr* cmsg = CMSG_FIRSTHDR(message);\n  if (cmsg != nullptr) {\n    RELEASE_ASSERT(cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS &&\n                       replyIsExpectedType(out, HotRestartMessage::Reply::kPassListenSocket),\n                   \"recvmsg() came with control data when the message's purpose was not to pass a \"\n                   \"file descriptor.\");\n\n    out->mutable_reply()->mutable_pass_listen_socket()->set_fd(\n        *reinterpret_cast<int*>(CMSG_DATA(cmsg)));\n\n    RELEASE_ASSERT(CMSG_NXTHDR(message, cmsg) == nullptr,\n                   \"More than one control data on a single hot restart recvmsg().\");\n  }\n}\n\n// While in use, recv_buf_ is always >= MaxSendmsgSize. In between messages, it is kept empty,\n// to be grown back to MaxSendmsgSize at the start of the next message.\nvoid HotRestartingBase::initRecvBufIfNewMessage() {\n  if (recv_buf_.empty()) {\n    ASSERT(cur_msg_recvd_bytes_ == 0);\n    ASSERT(!expected_proto_length_.has_value());\n    recv_buf_.resize(MaxSendmsgSize);\n  }\n}\n\n// Must only be called when recv_buf_ contains a full proto. Returns that proto, and resets all of\n// our receive-buffering state back to empty, to await a new message.\nstd::unique_ptr<HotRestartMessage> HotRestartingBase::parseProtoAndResetState() {\n  auto ret = std::make_unique<HotRestartMessage>();\n  RELEASE_ASSERT(\n      ret->ParseFromArray(recv_buf_.data() + sizeof(uint64_t), expected_proto_length_.value()),\n      \"failed to parse a HotRestartMessage.\");\n  recv_buf_.resize(0);\n  cur_msg_recvd_bytes_ = 0;\n  expected_proto_length_.reset();\n  return ret;\n}\n\nstd::unique_ptr<HotRestartMessage> HotRestartingBase::receiveHotRestartMessage(Blocking block) {\n  // By default the domain socket is non blocking. If we need to block, make it blocking first.\n  if (block == Blocking::Yes) {\n    RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, 0) != -1,\n                   fmt::format(\"Set domain socket blocking failed, errno = {}\", errno));\n  }\n\n  initRecvBufIfNewMessage();\n\n  iovec iov[1];\n  msghdr message;\n  uint8_t control_buffer[CMSG_SPACE(sizeof(int))];\n  std::unique_ptr<HotRestartMessage> ret = nullptr;\n  while (!ret) {\n    iov[0].iov_base = recv_buf_.data() + cur_msg_recvd_bytes_;\n    iov[0].iov_len = MaxSendmsgSize;\n\n    // We always setup to receive an FD even though most messages do not pass one.\n    memset(control_buffer, 0, CMSG_SPACE(sizeof(int)));\n    memset(&message, 0, sizeof(message));\n    message.msg_iov = iov;\n    message.msg_iovlen = 1;\n    message.msg_control = control_buffer;\n    message.msg_controllen = CMSG_SPACE(sizeof(int));\n\n    const int recvmsg_rc = recvmsg(my_domain_socket_, &message, 0);\n    if (block == Blocking::No && recvmsg_rc == -1 && errno == SOCKET_ERROR_AGAIN) {\n      return nullptr;\n    }\n    RELEASE_ASSERT(recvmsg_rc != -1, fmt::format(\"recvmsg() returned -1, errno = {}\", errno));\n    RELEASE_ASSERT(message.msg_flags == 0,\n                   fmt::format(\"recvmsg() left msg_flags = {}\", message.msg_flags));\n    cur_msg_recvd_bytes_ += recvmsg_rc;\n\n    // If we don't already know 'length', we're at the start of a new length+protobuf message!\n    if (!expected_proto_length_.has_value()) {\n      // We are not ok with messages so fragmented that the length doesn't even come in one piece.\n      RELEASE_ASSERT(recvmsg_rc >= 8, \"received a brokenly tiny message fragment.\");\n\n      expected_proto_length_ = be64toh(*reinterpret_cast<uint64_t*>(recv_buf_.data()));\n      // Expand the buffer from its default 4096 if this message is going to be longer.\n      if (expected_proto_length_.value() > MaxSendmsgSize - sizeof(uint64_t)) {\n        recv_buf_.resize(expected_proto_length_.value() + sizeof(uint64_t));\n        cur_msg_recvd_bytes_ = recvmsg_rc;\n      }\n    }\n    // If we have received beyond the end of the current in-flight proto, then next is misaligned.\n    RELEASE_ASSERT(cur_msg_recvd_bytes_ <= sizeof(uint64_t) + expected_proto_length_.value(),\n                   \"received a length+protobuf message not aligned to start of sendmsg().\");\n\n    if (cur_msg_recvd_bytes_ == sizeof(uint64_t) + expected_proto_length_.value()) {\n      ret = parseProtoAndResetState();\n    }\n  }\n\n  // Turn non-blocking back on if we made it blocking.\n  if (block == Blocking::Yes) {\n    RELEASE_ASSERT(fcntl(my_domain_socket_, F_SETFL, O_NONBLOCK) != -1,\n                   fmt::format(\"Set domain socket nonblocking failed, errno = {}\", errno));\n  }\n  getPassedFdIfPresent(ret.get(), &message);\n  return ret;\n}\n\nStats::Gauge& HotRestartingBase::hotRestartGeneration(Stats::Scope& scope) {\n  // Track the hot-restart generation. Using gauge's accumulate semantics,\n  // the increments will be combined across hot-restart. This may be useful\n  // at some point, though the main motivation for this stat is to enable\n  // an integration test showing that dynamic stat-names can be coalesced\n  // across hot-restarts. There's no other reason this particular stat-name\n  // needs to be created dynamically.\n  //\n  // Note also, this stat cannot currently be represented as a counter due to\n  // the way stats get latched on sink update. See the comment in\n  // InstanceUtil::flushMetricsToSinks.\n  return Stats::Utility::gaugeFromElements(scope,\n                                           {Stats::DynamicName(\"server.hot_restart_generation\")},\n                                           Stats::Gauge::ImportMode::Accumulate);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restarting_base.h",
    "content": "#pragma once\n\n#include <fcntl.h>\n#include <sys/stat.h>\n#include <sys/un.h>\n#include <unistd.h>\n\n#include <array>\n#include <atomic>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/server/hot_restart.h\"\n#include \"envoy/server/options.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Logic shared by the implementations of both sides of the child<-->parent hot restart protocol:\n * domain socket communication, and our ad hoc RPC protocol.\n */\nclass HotRestartingBase {\nprotected:\n  HotRestartingBase(uint64_t base_id) : base_id_(base_id) {}\n  ~HotRestartingBase();\n\n  void initDomainSocketAddress(sockaddr_un* address);\n  sockaddr_un createDomainSocketAddress(uint64_t id, const std::string& role,\n                                        const std::string& socket_path, mode_t socket_mode);\n  void bindDomainSocket(uint64_t id, const std::string& role, const std::string& socket_path,\n                        mode_t socket_mode);\n  int myDomainSocket() const { return my_domain_socket_; }\n\n  // Protocol description:\n  //\n  // In each direction between parent<-->child, a series of pairs of:\n  //   A uint64 'length' (bytes in network order),\n  //   followed by 'length' bytes of a serialized HotRestartMessage.\n  // Each new message must start in a new sendmsg datagram, i.e. 'length' must always start at byte\n  // 0. Each sendmsg datagram can be up to 4096 bytes (including 'length' if present). When the\n  // serialized protobuf is longer than 4096-8 bytes, and so cannot fit in just one datagram, it is\n  // delivered by a series of datagrams. In each of these continuation datagrams, the protobuf data\n  // starts at byte 0.\n  //\n  // There is no mechanism to explicitly pair responses to requests. However, the child initiates\n  // all exchanges, and blocks until a reply is received, so there is implicit pairing.\n  void sendHotRestartMessage(sockaddr_un& address, const envoy::HotRestartMessage& proto);\n\n  enum class Blocking { Yes, No };\n  // Receive data, possibly enough to build one of our protocol messages.\n  // If block is true, blocks until a full protocol message is available.\n  // If block is false, returns nullptr if we run out of data to receive before a full protocol\n  // message is available. In either case, the HotRestartingBase may end up buffering some data for\n  // the next protocol message, even if the function returns a protobuf.\n  std::unique_ptr<envoy::HotRestartMessage> receiveHotRestartMessage(Blocking block);\n\n  bool replyIsExpectedType(const envoy::HotRestartMessage* proto,\n                           envoy::HotRestartMessage::Reply::ReplyCase oneof_type) const;\n\n  // Returns a Gauge that tracks hot-restart generation, where every successive\n  // child increments this number.\n  static Stats::Gauge& hotRestartGeneration(Stats::Scope& scope);\n\nprivate:\n  void getPassedFdIfPresent(envoy::HotRestartMessage* out, msghdr* message);\n  std::unique_ptr<envoy::HotRestartMessage> parseProtoAndResetState();\n  void initRecvBufIfNewMessage();\n\n  // An int in [0, MaxConcurrentProcesses). As hot restarts happen, each next process gets the\n  // next of 0,1,2,0,1,...\n  // A HotRestartingBase's domain socket's name contains its base_id_ value, and so we can use\n  // this value to determine which domain socket name to treat as our parent, and which to treat as\n  // our child. (E.g. if we are 2, 1 is parent and 0 is child).\n  const uint64_t base_id_;\n  int my_domain_socket_{-1};\n\n  // State for the receiving half of the protocol.\n  //\n  // When filled, the size in bytes that the in-flight HotRestartMessage should be.\n  // When empty, we're ready to start receiving a new message (starting with a uint64 'length').\n  absl::optional<uint64_t> expected_proto_length_;\n  // How much of the current in-flight message (including both the uint64 'length', plus the proto\n  // itself) we have received. Once this equals expected_proto_length_ + sizeof(uint64_t), we're\n  // ready to parse the HotRestartMessage. Should be set to 0 in between messages, to indicate\n  // readiness for a new message.\n  uint64_t cur_msg_recvd_bytes_{};\n  // The first 8 bytes will always be the raw net-order bytes of the current value of\n  // expected_proto_length_. The protobuf partial data starts at byte 8.\n  // Should be resized to 0 in between messages, to indicate readiness for a new message.\n  std::vector<uint8_t> recv_buf_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restarting_child.cc",
    "content": "#include \"server/hot_restarting_child.h\"\n\n#include \"common/common/utility.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing HotRestartMessage = envoy::HotRestartMessage;\n\nHotRestartingChild::HotRestartingChild(int base_id, int restart_epoch,\n                                       const std::string& socket_path, mode_t socket_mode)\n    : HotRestartingBase(base_id), restart_epoch_(restart_epoch) {\n  initDomainSocketAddress(&parent_address_);\n  if (restart_epoch_ != 0) {\n    parent_address_ =\n        createDomainSocketAddress(restart_epoch_ + -1, \"parent\", socket_path, socket_mode);\n  }\n  bindDomainSocket(restart_epoch_, \"child\", socket_path, socket_mode);\n}\n\nint HotRestartingChild::duplicateParentListenSocket(const std::string& address) {\n  if (restart_epoch_ == 0 || parent_terminated_) {\n    return -1;\n  }\n\n  HotRestartMessage wrapped_request;\n  wrapped_request.mutable_request()->mutable_pass_listen_socket()->set_address(address);\n  sendHotRestartMessage(parent_address_, wrapped_request);\n\n  std::unique_ptr<HotRestartMessage> wrapped_reply = receiveHotRestartMessage(Blocking::Yes);\n  if (!replyIsExpectedType(wrapped_reply.get(), HotRestartMessage::Reply::kPassListenSocket)) {\n    return -1;\n  }\n  return wrapped_reply->reply().pass_listen_socket().fd();\n}\n\nstd::unique_ptr<HotRestartMessage> HotRestartingChild::getParentStats() {\n  if (restart_epoch_ == 0 || parent_terminated_) {\n    return nullptr;\n  }\n\n  HotRestartMessage wrapped_request;\n  wrapped_request.mutable_request()->mutable_stats();\n  sendHotRestartMessage(parent_address_, wrapped_request);\n\n  std::unique_ptr<HotRestartMessage> wrapped_reply = receiveHotRestartMessage(Blocking::Yes);\n  RELEASE_ASSERT(replyIsExpectedType(wrapped_reply.get(), HotRestartMessage::Reply::kStats),\n                 \"Hot restart parent did not respond as expected to get stats request.\");\n  return wrapped_reply;\n}\n\nvoid HotRestartingChild::drainParentListeners() {\n  if (restart_epoch_ == 0 || parent_terminated_) {\n    return;\n  }\n  // No reply expected.\n  HotRestartMessage wrapped_request;\n  wrapped_request.mutable_request()->mutable_drain_listeners();\n  sendHotRestartMessage(parent_address_, wrapped_request);\n}\n\nvoid HotRestartingChild::sendParentAdminShutdownRequest(time_t& original_start_time) {\n  if (restart_epoch_ == 0 || parent_terminated_) {\n    return;\n  }\n\n  HotRestartMessage wrapped_request;\n  wrapped_request.mutable_request()->mutable_shutdown_admin();\n  sendHotRestartMessage(parent_address_, wrapped_request);\n\n  std::unique_ptr<HotRestartMessage> wrapped_reply = receiveHotRestartMessage(Blocking::Yes);\n  RELEASE_ASSERT(replyIsExpectedType(wrapped_reply.get(), HotRestartMessage::Reply::kShutdownAdmin),\n                 \"Hot restart parent did not respond as expected to ShutdownParentAdmin.\");\n  original_start_time = wrapped_reply->reply().shutdown_admin().original_start_time_unix_seconds();\n}\n\nvoid HotRestartingChild::sendParentTerminateRequest() {\n  if (restart_epoch_ == 0 || parent_terminated_) {\n    return;\n  }\n  HotRestartMessage wrapped_request;\n  wrapped_request.mutable_request()->mutable_terminate();\n  sendHotRestartMessage(parent_address_, wrapped_request);\n  parent_terminated_ = true;\n\n  // Note that the 'generation' counter needs to retain the contribution from\n  // the parent.\n  stat_merger_->retainParentGaugeValue(hot_restart_generation_stat_name_);\n\n  // Now it is safe to forget our stat transferral state.\n  //\n  // This destruction is actually important far beyond memory efficiency. The\n  // scope-based temporary counter logic relies on the StatMerger getting\n  // destroyed once hot restart's stat merging is all done. (See stat_merger.h\n  // for details).\n  stat_merger_.reset();\n}\n\nvoid HotRestartingChild::mergeParentStats(Stats::Store& stats_store,\n                                          const HotRestartMessage::Reply::Stats& stats_proto) {\n  if (!stat_merger_) {\n    stat_merger_ = std::make_unique<Stats::StatMerger>(stats_store);\n    hot_restart_generation_stat_name_ = hotRestartGeneration(stats_store).statName();\n  }\n\n  // Convert the protobuf for serialized dynamic spans into the structure\n  // required by StatMerger.\n  Stats::StatMerger::DynamicsMap dynamics;\n  for (const auto& iter : stats_proto.dynamics()) {\n    Stats::DynamicSpans& spans = dynamics[iter.first];\n    for (int i = 0; i < iter.second.spans_size(); ++i) {\n      const HotRestartMessage::Reply::Span& span_proto = iter.second.spans(i);\n      spans.push_back(Stats::DynamicSpan(span_proto.first(), span_proto.last()));\n    }\n  }\n  stat_merger_->mergeStats(stats_proto.counter_deltas(), stats_proto.gauges(), dynamics);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restarting_child.h",
    "content": "#pragma once\n\n#include \"common/stats/stat_merger.h\"\n\n#include \"server/hot_restarting_base.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * The child half of hot restarting. Issues requests and commands to the parent.\n */\nclass HotRestartingChild : HotRestartingBase, Logger::Loggable<Logger::Id::main> {\npublic:\n  HotRestartingChild(int base_id, int restart_epoch, const std::string& socket_path,\n                     mode_t socket_mode);\n\n  int duplicateParentListenSocket(const std::string& address);\n  std::unique_ptr<envoy::HotRestartMessage> getParentStats();\n  void drainParentListeners();\n  void sendParentAdminShutdownRequest(time_t& original_start_time);\n  void sendParentTerminateRequest();\n  void mergeParentStats(Stats::Store& stats_store,\n                        const envoy::HotRestartMessage::Reply::Stats& stats_proto);\n\nprivate:\n  const int restart_epoch_;\n  bool parent_terminated_{};\n  sockaddr_un parent_address_;\n  std::unique_ptr<Stats::StatMerger> stat_merger_{};\n  Stats::StatName hot_restart_generation_stat_name_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restarting_parent.cc",
    "content": "#include \"server/hot_restarting_parent.h\"\n\n#include \"envoy/server/instance.h\"\n\n#include \"common/memory/stats.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/stat_merger.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\n#include \"server/listener_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing HotRestartMessage = envoy::HotRestartMessage;\n\nHotRestartingParent::HotRestartingParent(int base_id, int restart_epoch,\n                                         const std::string& socket_path, mode_t socket_mode)\n    : HotRestartingBase(base_id), restart_epoch_(restart_epoch) {\n  child_address_ = createDomainSocketAddress(restart_epoch_ + 1, \"child\", socket_path, socket_mode);\n  bindDomainSocket(restart_epoch_, \"parent\", socket_path, socket_mode);\n}\n\nvoid HotRestartingParent::initialize(Event::Dispatcher& dispatcher, Server::Instance& server) {\n  socket_event_ = dispatcher.createFileEvent(\n      myDomainSocket(),\n      [this](uint32_t events) -> void {\n        ASSERT(events == Event::FileReadyType::Read);\n        onSocketEvent();\n      },\n      Event::PlatformDefaultTriggerType, Event::FileReadyType::Read);\n  internal_ = std::make_unique<Internal>(&server);\n}\n\nvoid HotRestartingParent::onSocketEvent() {\n  std::unique_ptr<HotRestartMessage> wrapped_request;\n  while ((wrapped_request = receiveHotRestartMessage(Blocking::No))) {\n    if (wrapped_request->requestreply_case() == HotRestartMessage::kReply) {\n      ENVOY_LOG(error, \"child sent us a HotRestartMessage reply (we want requests); ignoring.\");\n      HotRestartMessage wrapped_reply;\n      wrapped_reply.set_didnt_recognize_your_last_message(true);\n      sendHotRestartMessage(child_address_, wrapped_reply);\n      continue;\n    }\n    switch (wrapped_request->request().request_case()) {\n    case HotRestartMessage::Request::kShutdownAdmin: {\n      sendHotRestartMessage(child_address_, internal_->shutdownAdmin());\n      break;\n    }\n\n    case HotRestartMessage::Request::kPassListenSocket: {\n      sendHotRestartMessage(child_address_,\n                            internal_->getListenSocketsForChild(wrapped_request->request()));\n      break;\n    }\n\n    case HotRestartMessage::Request::kStats: {\n      HotRestartMessage wrapped_reply;\n      internal_->exportStatsToChild(wrapped_reply.mutable_reply()->mutable_stats());\n      sendHotRestartMessage(child_address_, wrapped_reply);\n      break;\n    }\n\n    case HotRestartMessage::Request::kDrainListeners: {\n      internal_->drainListeners();\n      break;\n    }\n\n    case HotRestartMessage::Request::kTerminate: {\n      ENVOY_LOG(info, \"shutting down due to child request\");\n      kill(getpid(), SIGTERM);\n      break;\n    }\n\n    default: {\n      ENVOY_LOG(error, \"child sent us an unfamiliar type of HotRestartMessage; ignoring.\");\n      HotRestartMessage wrapped_reply;\n      wrapped_reply.set_didnt_recognize_your_last_message(true);\n      sendHotRestartMessage(child_address_, wrapped_reply);\n      break;\n    }\n    }\n  }\n}\n\nvoid HotRestartingParent::shutdown() { socket_event_.reset(); }\n\nHotRestartingParent::Internal::Internal(Server::Instance* server) : server_(server) {\n  Stats::Gauge& hot_restart_generation = hotRestartGeneration(server->stats());\n  hot_restart_generation.inc();\n}\n\nHotRestartMessage HotRestartingParent::Internal::shutdownAdmin() {\n  server_->shutdownAdmin();\n  HotRestartMessage wrapped_reply;\n  wrapped_reply.mutable_reply()->mutable_shutdown_admin()->set_original_start_time_unix_seconds(\n      server_->startTimeFirstEpoch());\n  return wrapped_reply;\n}\n\nHotRestartMessage\nHotRestartingParent::Internal::getListenSocketsForChild(const HotRestartMessage::Request& request) {\n  HotRestartMessage wrapped_reply;\n  wrapped_reply.mutable_reply()->mutable_pass_listen_socket()->set_fd(-1);\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::resolveUrl(request.pass_listen_socket().address());\n  for (const auto& listener : server_->listenerManager().listeners()) {\n    Network::ListenSocketFactory& socket_factory = listener.get().listenSocketFactory();\n    if (*socket_factory.localAddress() == *addr && listener.get().bindToPort()) {\n      if (socket_factory.sharedSocket().has_value()) {\n        // Pass the socket to the new process if it is already shared across workers.\n        wrapped_reply.mutable_reply()->mutable_pass_listen_socket()->set_fd(\n            socket_factory.sharedSocket()->get().ioHandle().fdDoNotUse());\n      }\n      break;\n    }\n  }\n  return wrapped_reply;\n}\n\n// TODO(fredlas) if there are enough stats for stat name length to become an issue, this current\n// implementation can negate the benefit of symbolized stat names by periodically reaching the\n// magnitude of memory usage that they are meant to avoid, since this map holds full-string\n// names. The problem can be solved by splitting the export up over many chunks.\nvoid HotRestartingParent::Internal::exportStatsToChild(HotRestartMessage::Reply::Stats* stats) {\n  for (const auto& gauge : server_->stats().gauges()) {\n    if (gauge->used()) {\n      const std::string name = gauge->name();\n      (*stats->mutable_gauges())[name] = gauge->value();\n      recordDynamics(stats, name, gauge->statName());\n    }\n  }\n\n  for (const auto& counter : server_->stats().counters()) {\n    if (counter->used()) {\n      // The hot restart parent is expected to have stopped its normal stat exporting (and so\n      // latching) by the time it begins exporting to the hot restart child.\n      uint64_t latched_value = counter->latch();\n      if (latched_value > 0) {\n        const std::string name = counter->name();\n        (*stats->mutable_counter_deltas())[name] = latched_value;\n        recordDynamics(stats, name, counter->statName());\n      }\n    }\n  }\n  stats->set_memory_allocated(Memory::Stats::totalCurrentlyAllocated());\n  stats->set_num_connections(server_->listenerManager().numConnections());\n}\n\nvoid HotRestartingParent::Internal::recordDynamics(HotRestartMessage::Reply::Stats* stats,\n                                                   const std::string& name,\n                                                   Stats::StatName stat_name) {\n  // Compute an array of spans describing which components of the stat name are\n  // dynamic. This is needed so that when the child recovers the StatName, it\n  // correlates with how the system generates those stats, with the same exact\n  // components using a dynamic representation.\n  //\n  // See https://github.com/envoyproxy/envoy/issues/9874 for more details.\n  Stats::DynamicSpans spans = server_->stats().symbolTable().getDynamicSpans(stat_name);\n\n  // Convert that C++ structure (controlled by stat_merger.cc) into a protobuf\n  // for serialization.\n  if (!spans.empty()) {\n    HotRestartMessage::Reply::RepeatedSpan spans_proto;\n    for (const Stats::DynamicSpan& span : spans) {\n      HotRestartMessage::Reply::Span* span_proto = spans_proto.add_spans();\n      span_proto->set_first(span.first);\n      span_proto->set_last(span.second);\n    }\n    (*stats->mutable_dynamics())[name] = spans_proto;\n  }\n}\n\nvoid HotRestartingParent::Internal::drainListeners() { server_->drainListeners(); }\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/hot_restarting_parent.h",
    "content": "#pragma once\n\n#include \"common/common/hash.h\"\n\n#include \"server/hot_restarting_base.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * The parent half of hot restarting. Listens for requests and commands from the child.\n * This outer class only handles evented socket I/O. The actual hot restart logic lives in\n * HotRestartingParent::Internal.\n */\nclass HotRestartingParent : HotRestartingBase, Logger::Loggable<Logger::Id::main> {\npublic:\n  HotRestartingParent(int base_id, int restart_epoch, const std::string& socket_path,\n                      mode_t socket_mode);\n  void initialize(Event::Dispatcher& dispatcher, Server::Instance& server);\n  void shutdown();\n\n  // The hot restarting parent's hot restart logic. Each function is meant to be called to fulfill a\n  // request from the child for that action.\n  class Internal {\n  public:\n    explicit Internal(Server::Instance* server);\n    // Return value is the response to return to the child.\n    envoy::HotRestartMessage shutdownAdmin();\n    // Return value is the response to return to the child.\n    envoy::HotRestartMessage\n    getListenSocketsForChild(const envoy::HotRestartMessage::Request& request);\n    // 'stats' is a field in the reply protobuf to be sent to the child, which we should populate.\n    void exportStatsToChild(envoy::HotRestartMessage::Reply::Stats* stats);\n    void recordDynamics(envoy::HotRestartMessage::Reply::Stats* stats, const std::string& name,\n                        Stats::StatName stat_name);\n    void drainListeners();\n\n  private:\n    Server::Instance* const server_{};\n  };\n\nprivate:\n  void onSocketEvent();\n\n  const int restart_epoch_;\n  sockaddr_un child_address_;\n  Event::FileEventPtr socket_event_;\n  std::unique_ptr<Internal> internal_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/lds_api.cc",
    "content": "#include \"server/lds_api.h\"\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/api/v2/listener.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nLdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config,\n                       const udpa::core::v1::ResourceLocator* lds_resources_locator,\n                       Upstream::ClusterManager& cm, Init::Manager& init_manager,\n                       Stats::Scope& scope, ListenerManager& lm,\n                       ProtobufMessage::ValidationVisitor& validation_visitor)\n    : Envoy::Config::SubscriptionBase<envoy::config::listener::v3::Listener>(\n          lds_config.resource_api_version(), validation_visitor, \"name\"),\n      listener_manager_(lm), scope_(scope.createScope(\"listener_manager.lds.\")), cm_(cm),\n      init_target_(\"LDS\", [this]() { subscription_->start({}); }) {\n  const auto resource_name = getResourceName();\n  if (lds_resources_locator == nullptr) {\n    subscription_ = cm.subscriptionFactory().subscriptionFromConfigSource(\n        lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_);\n  } else {\n    subscription_ = cm.subscriptionFactory().collectionSubscriptionFromUrl(\n        *lds_resources_locator, lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this,\n        resource_decoder_);\n  }\n  init_manager.add(init_target_);\n}\n\nvoid LdsApiImpl::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                                const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                                const std::string& system_version_info) {\n  Config::ScopedResume maybe_resume_rds;\n  if (cm_.adsMux()) {\n    const auto type_urls =\n        Config::getAllVersionTypeUrls<envoy::config::route::v3::RouteConfiguration>();\n    maybe_resume_rds = cm_.adsMux()->pause(type_urls);\n  }\n\n  bool any_applied = false;\n  listener_manager_.beginListenerUpdate();\n\n  // We do all listener removals before adding the new listeners. This allows adding a new\n  // listener with the same address as a listener that is to be removed. Do not change the order.\n  for (const auto& removed_listener : removed_resources) {\n    if (listener_manager_.removeListener(removed_listener)) {\n      ENVOY_LOG(info, \"lds: remove listener '{}'\", removed_listener);\n      any_applied = true;\n    }\n  }\n\n  ListenerManager::FailureStates failure_state;\n  absl::node_hash_set<std::string> listener_names;\n  std::string message;\n  for (const auto& resource : added_resources) {\n    envoy::config::listener::v3::Listener listener;\n    try {\n      listener =\n          dynamic_cast<const envoy::config::listener::v3::Listener&>(resource.get().resource());\n      if (!listener_names.insert(listener.name()).second) {\n        // NOTE: at this point, the first of these duplicates has already been successfully\n        // applied.\n        throw EnvoyException(fmt::format(\"duplicate listener {} found\", listener.name()));\n      }\n      if (listener_manager_.addOrUpdateListener(listener, resource.get().version(), true)) {\n        ENVOY_LOG(info, \"lds: add/update listener '{}'\", listener.name());\n        any_applied = true;\n      } else {\n        ENVOY_LOG(debug, \"lds: add/update listener '{}' skipped\", listener.name());\n      }\n    } catch (const EnvoyException& e) {\n      failure_state.push_back(std::make_unique<envoy::admin::v3::UpdateFailureState>());\n      auto& state = failure_state.back();\n      state->set_details(e.what());\n      state->mutable_failed_configuration()->PackFrom(resource.get().resource());\n      absl::StrAppend(&message, listener.name(), \": \", e.what(), \"\\n\");\n    }\n  }\n  listener_manager_.endListenerUpdate(std::move(failure_state));\n\n  if (any_applied) {\n    system_version_info_ = system_version_info;\n  }\n  init_target_.ready();\n  if (!message.empty()) {\n    throw EnvoyException(fmt::format(\"Error adding/updating listener(s) {}\", message));\n  }\n}\n\nvoid LdsApiImpl::onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                                const std::string& version_info) {\n  // We need to keep track of which listeners need to remove.\n  // Specifically, it's [listeners we currently have] - [listeners found in the response].\n  absl::node_hash_set<std::string> listeners_to_remove;\n  for (const auto& listener :\n       listener_manager_.listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) {\n    listeners_to_remove.insert(listener.get().name());\n  }\n  for (const auto& resource : resources) {\n    // Remove its name from our delta removed pile.\n    listeners_to_remove.erase(resource.get().name());\n  }\n  // Copy our delta removed pile into the desired format.\n  Protobuf::RepeatedPtrField<std::string> to_remove_repeated;\n  for (const auto& listener : listeners_to_remove) {\n    *to_remove_repeated.Add() = listener;\n  }\n  onConfigUpdate(resources, to_remove_repeated, version_info);\n}\n\nvoid LdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                                      const EnvoyException*) {\n  ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason);\n  // We need to allow server startup to continue, even if we have a bad\n  // config.\n  init_target_.ready();\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/lds_api.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.validate.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/config/subscription_factory.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/subscription_base.h\"\n#include \"common/init/target_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * LDS API implementation that fetches via Subscription.\n */\nclass LdsApiImpl : public LdsApi,\n                   Envoy::Config::SubscriptionBase<envoy::config::listener::v3::Listener>,\n                   Logger::Loggable<Logger::Id::upstream> {\npublic:\n  LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config,\n             const udpa::core::v1::ResourceLocator* lds_resources_locator,\n             Upstream::ClusterManager& cm, Init::Manager& init_manager, Stats::Scope& scope,\n             ListenerManager& lm, ProtobufMessage::ValidationVisitor& validation_visitor);\n\n  // Server::LdsApi\n  std::string versionInfo() const override { return system_version_info_; }\n\nprivate:\n  // Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& resources,\n                      const std::string& version_info) override;\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added_resources,\n                      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                      const std::string& system_version_info) override;\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason,\n                            const EnvoyException* e) override;\n\n  Config::SubscriptionPtr subscription_;\n  std::string system_version_info_;\n  ListenerManager& listener_manager_;\n  Stats::ScopePtr scope_;\n  Upstream::ClusterManager& cm_;\n  Init::TargetImpl init_target_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/listener_hooks.h",
    "content": "#pragma once\n\n#include \"envoy/common/pure.h\"\n\nnamespace Envoy {\n\n/**\n * Hooks in the server to allow for integration testing. The real server just uses an empty\n * implementation defined below.\n */\nclass ListenerHooks {\npublic:\n  virtual ~ListenerHooks() = default;\n\n  /**\n   * Called when a worker has added a listener and it is listening.\n   */\n  virtual void onWorkerListenerAdded() PURE;\n\n  /**\n   * Called when a worker has removed a listener and it is no longer listening.\n   */\n  virtual void onWorkerListenerRemoved() PURE;\n\n  /**\n   * Called when the Runtime::ScopedLoaderSingleton is created by the server.\n   */\n  virtual void onRuntimeCreated() PURE;\n};\n\n/**\n * Empty implementation of ListenerHooks.\n */\nclass DefaultListenerHooks : public ListenerHooks {\npublic:\n  // ListenerHooks\n  void onWorkerListenerAdded() override {}\n  void onWorkerListenerRemoved() override {}\n  void onRuntimeCreated() override {}\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/listener_impl.cc",
    "content": "#include \"server/listener_impl.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h\"\n#include \"envoy/network/exception.h\"\n#include \"envoy/network/udp_packet_writer_config.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/active_udp_listener_config.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/config/utility.h\"\n#include \"common/network/connection_balancer_impl.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"server/configuration_impl.h\"\n#include \"server/drain_manager_impl.h\"\n#include \"server/filter_chain_manager_impl.h\"\n#include \"server/listener_manager_impl.h\"\n#include \"server/transport_socket_config_impl.h\"\n#include \"server/well_known_names.h\"\n\n#include \"extensions/filters/listener/well_known_names.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\nbool needTlsInspector(const envoy::config::listener::v3::Listener& config) {\n  return std::any_of(config.filter_chains().begin(), config.filter_chains().end(),\n                     [](const auto& filter_chain) {\n                       const auto& matcher = filter_chain.filter_chain_match();\n                       return matcher.transport_protocol() == \"tls\" ||\n                              (matcher.transport_protocol().empty() &&\n                               (!matcher.server_names().empty() ||\n                                !matcher.application_protocols().empty()));\n                     }) &&\n         !std::any_of(\n             config.listener_filters().begin(), config.listener_filters().end(),\n             [](const auto& filter) {\n               return filter.name() ==\n                          Extensions::ListenerFilters::ListenerFilterNames::get().TlsInspector ||\n                      filter.name() == \"envoy.listener.tls_inspector\";\n             });\n}\n} // namespace\n\nListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& factory,\n                                                 Network::Address::InstanceConstSharedPtr address,\n                                                 Network::Socket::Type socket_type,\n                                                 const Network::Socket::OptionsSharedPtr& options,\n                                                 bool bind_to_port,\n                                                 const std::string& listener_name, bool reuse_port)\n    : factory_(factory), local_address_(address), socket_type_(socket_type), options_(options),\n      bind_to_port_(bind_to_port), listener_name_(listener_name), reuse_port_(reuse_port) {\n\n  bool create_socket = false;\n  if (local_address_->type() == Network::Address::Type::Ip) {\n    if (socket_type_ == Network::Socket::Type::Datagram) {\n      ASSERT(reuse_port_ == true);\n    }\n\n    if (reuse_port_ == false) {\n      // create a socket which will be used by all worker threads\n      create_socket = true;\n    } else if (local_address_->ip()->port() == 0) {\n      // port is 0, need to create a socket here for reserving a real port number,\n      // then all worker threads should use same port.\n      create_socket = true;\n    }\n  } else {\n    ASSERT(local_address_->type() == Network::Address::Type::Pipe);\n    // Listeners with Unix domain socket always use shared socket.\n    create_socket = true;\n  }\n\n  if (create_socket) {\n    socket_ = createListenSocketAndApplyOptions();\n  }\n\n  if (socket_ && local_address_->ip() && local_address_->ip()->port() == 0) {\n    local_address_ = socket_->localAddress();\n  }\n  ENVOY_LOG(debug, \"Set listener {} socket factory local address to {}\", listener_name_,\n            local_address_->asString());\n}\n\nNetwork::SocketSharedPtr ListenSocketFactoryImpl::createListenSocketAndApplyOptions() {\n  // socket might be nullptr depending on factory_ implementation.\n  Network::SocketSharedPtr socket = factory_.createListenSocket(\n      local_address_, socket_type_, options_, {bind_to_port_, !reuse_port_});\n\n  // Binding is done by now.\n  ENVOY_LOG(debug, \"Create listen socket for listener {} on address {}\", listener_name_,\n            local_address_->asString());\n  if (socket != nullptr && options_ != nullptr) {\n    const bool ok = Network::Socket::applyOptions(\n        options_, *socket, envoy::config::core::v3::SocketOption::STATE_BOUND);\n    const std::string message =\n        fmt::format(\"{}: Setting socket options {}\", listener_name_, ok ? \"succeeded\" : \"failed\");\n    if (!ok) {\n      ENVOY_LOG(warn, \"{}\", message);\n      throw Network::CreateListenerException(message);\n    } else {\n      ENVOY_LOG(debug, \"{}\", message);\n    }\n\n    // Add the options to the socket_ so that STATE_LISTENING options can be\n    // set in the worker after listen()/evconnlistener_new() is called.\n    socket->addOptions(options_);\n  }\n  return socket;\n}\n\nNetwork::SocketSharedPtr ListenSocketFactoryImpl::getListenSocket() {\n  if (!reuse_port_) {\n    return socket_;\n  }\n\n  Network::SocketSharedPtr socket;\n  absl::call_once(steal_once_, [this, &socket]() {\n    if (socket_) {\n      // If a listener's port is set to 0, socket_ should be created for reserving a port\n      // number, it is handed over to the first worker thread came here.\n      // There are several reasons for doing this:\n      // - for UDP, once a socket being bound, it begins to receive packets, it can't be\n      //   left unused, and closing it will lost packets received by it.\n      // - port number should be reserved before adding listener to active_listeners_ list,\n      //   otherwise admin API /listeners might return 0 as listener's port.\n      socket = std::move(socket_);\n    }\n  });\n\n  if (socket) {\n    return socket;\n  }\n\n  return createListenSocketAndApplyOptions();\n}\n\nListenerFactoryContextBaseImpl::ListenerFactoryContextBaseImpl(\n    Envoy::Server::Instance& server, ProtobufMessage::ValidationVisitor& validation_visitor,\n    const envoy::config::listener::v3::Listener& config, DrainManagerPtr drain_manager)\n    : server_(server), metadata_(config.metadata()), direction_(config.traffic_direction()),\n      global_scope_(server.stats().createScope(\"\")),\n      listener_scope_(server_.stats().createScope(fmt::format(\n          \"listener.{}.\", Network::Address::resolveProtoAddress(config.address())->asString()))),\n      validation_visitor_(validation_visitor), drain_manager_(std::move(drain_manager)) {}\n\nAccessLog::AccessLogManager& ListenerFactoryContextBaseImpl::accessLogManager() {\n  return server_.accessLogManager();\n}\nUpstream::ClusterManager& ListenerFactoryContextBaseImpl::clusterManager() {\n  return server_.clusterManager();\n}\nEvent::Dispatcher& ListenerFactoryContextBaseImpl::dispatcher() { return server_.dispatcher(); }\nGrpc::Context& ListenerFactoryContextBaseImpl::grpcContext() { return server_.grpcContext(); }\nbool ListenerFactoryContextBaseImpl::healthCheckFailed() { return server_.healthCheckFailed(); }\nHttp::Context& ListenerFactoryContextBaseImpl::httpContext() { return server_.httpContext(); }\nconst LocalInfo::LocalInfo& ListenerFactoryContextBaseImpl::localInfo() const {\n  return server_.localInfo();\n}\nEnvoy::Runtime::Loader& ListenerFactoryContextBaseImpl::runtime() { return server_.runtime(); }\nStats::Scope& ListenerFactoryContextBaseImpl::scope() { return *global_scope_; }\nSingleton::Manager& ListenerFactoryContextBaseImpl::singletonManager() {\n  return server_.singletonManager();\n}\nOverloadManager& ListenerFactoryContextBaseImpl::overloadManager() {\n  return server_.overloadManager();\n}\nThreadLocal::Instance& ListenerFactoryContextBaseImpl::threadLocal() {\n  return server_.threadLocal();\n}\nAdmin& ListenerFactoryContextBaseImpl::admin() { return server_.admin(); }\nconst envoy::config::core::v3::Metadata& ListenerFactoryContextBaseImpl::listenerMetadata() const {\n  return metadata_;\n};\nenvoy::config::core::v3::TrafficDirection ListenerFactoryContextBaseImpl::direction() const {\n  return direction_;\n};\nTimeSource& ListenerFactoryContextBaseImpl::timeSource() { return api().timeSource(); }\nProtobufMessage::ValidationContext& ListenerFactoryContextBaseImpl::messageValidationContext() {\n  return server_.messageValidationContext();\n}\nProtobufMessage::ValidationVisitor& ListenerFactoryContextBaseImpl::messageValidationVisitor() {\n  return validation_visitor_;\n}\nApi::Api& ListenerFactoryContextBaseImpl::api() { return server_.api(); }\nServerLifecycleNotifier& ListenerFactoryContextBaseImpl::lifecycleNotifier() {\n  return server_.lifecycleNotifier();\n}\nProcessContextOptRef ListenerFactoryContextBaseImpl::processContext() {\n  return server_.processContext();\n}\nConfiguration::ServerFactoryContext&\nListenerFactoryContextBaseImpl::getServerFactoryContext() const {\n  return server_.serverFactoryContext();\n}\nConfiguration::TransportSocketFactoryContext&\nListenerFactoryContextBaseImpl::getTransportSocketFactoryContext() const {\n  return server_.transportSocketFactoryContext();\n}\nStats::Scope& ListenerFactoryContextBaseImpl::listenerScope() { return *listener_scope_; }\nNetwork::DrainDecision& ListenerFactoryContextBaseImpl::drainDecision() { return *this; }\nServer::DrainManager& ListenerFactoryContextBaseImpl::drainManager() { return *drain_manager_; }\n\n// Must be overridden\nInit::Manager& ListenerFactoryContextBaseImpl::initManager() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\nListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config,\n                           const std::string& version_info, ListenerManagerImpl& parent,\n                           const std::string& name, bool added_via_api, bool workers_started,\n                           uint64_t hash, uint32_t concurrency)\n    : parent_(parent), address_(Network::Address::resolveProtoAddress(config.address())),\n      bind_to_port_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.deprecated_v1(), bind_to_port, true)),\n      hand_off_restored_destination_connections_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, hidden_envoy_deprecated_use_original_dst, false)),\n      per_connection_buffer_limit_bytes_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)),\n      listener_tag_(parent_.factory_.nextListenerTag()), name_(name), added_via_api_(added_via_api),\n      workers_started_(workers_started), hash_(hash),\n      tcp_backlog_size_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, tcp_backlog_size, ENVOY_TCP_BACKLOG_SIZE)),\n      validation_visitor_(\n          added_via_api_ ? parent_.server_.messageValidationContext().dynamicValidationVisitor()\n                         : parent_.server_.messageValidationContext().staticValidationVisitor()),\n      listener_init_target_(fmt::format(\"Listener-init-target {}\", name),\n                            [this]() { dynamic_init_manager_->initialize(local_init_watcher_); }),\n      dynamic_init_manager_(std::make_unique<Init::ManagerImpl>(\n          fmt::format(\"Listener-local-init-manager {} {}\", name, hash))),\n      config_(config), version_info_(version_info),\n      listener_filters_timeout_(\n          PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)),\n      continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()),\n      listener_factory_context_(std::make_shared<PerListenerFactoryContextImpl>(\n          parent.server_, validation_visitor_, config, this, *this,\n          parent.factory_.createDrainManager(config.drain_type()))),\n      filter_chain_manager_(address_, listener_factory_context_->parentFactoryContext(),\n                            initManager()),\n      cx_limit_runtime_key_(\"envoy.resource_limits.listener.\" + config_.name() +\n                            \".connection_limit\"),\n      open_connections_(std::make_shared<BasicResourceLimitImpl>(\n          std::numeric_limits<uint64_t>::max(), listener_factory_context_->runtime(),\n          cx_limit_runtime_key_)),\n      local_init_watcher_(fmt::format(\"Listener-local-init-watcher {}\", name), [this] {\n        if (workers_started_) {\n          parent_.onListenerWarmed(*this);\n        } else {\n          // Notify Server that this listener is\n          // ready.\n          listener_init_target_.ready();\n        }\n      }) {\n\n  const absl::optional<std::string> runtime_val =\n      listener_factory_context_->runtime().snapshot().get(cx_limit_runtime_key_);\n  if (runtime_val && runtime_val->empty()) {\n    ENVOY_LOG(warn,\n              \"Listener connection limit runtime key {} is empty. There are currently no \"\n              \"limitations on the number of accepted connections for listener {}.\",\n              cx_limit_runtime_key_, config_.name());\n  }\n\n  buildAccessLog();\n  auto socket_type = Network::Utility::protobufAddressSocketType(config.address());\n  buildListenSocketOptions(socket_type);\n  buildUdpListenerFactory(socket_type, concurrency);\n  buildUdpWriterFactory(socket_type);\n  createListenerFilterFactories(socket_type);\n  validateFilterChains(socket_type);\n  buildFilterChains();\n  if (socket_type == Network::Socket::Type::Datagram) {\n    return;\n  }\n  buildSocketOptions();\n  buildOriginalDstListenerFilter();\n  buildProxyProtocolListenerFilter();\n  buildTlsInspectorListenerFilter();\n  if (!workers_started_) {\n    // Initialize dynamic_init_manager_ from Server's init manager if it's not initialized.\n    // NOTE: listener_init_target_ should be added to parent's initManager at the end of the\n    // listener constructor so that this listener's children entities could register their targets\n    // with their parent's initManager.\n    parent_.server_.initManager().add(listener_init_target_);\n  }\n}\n\nListenerImpl::ListenerImpl(ListenerImpl& origin,\n                           const envoy::config::listener::v3::Listener& config,\n                           const std::string& version_info, ListenerManagerImpl& parent,\n                           const std::string& name, bool added_via_api, bool workers_started,\n                           uint64_t hash, uint32_t concurrency)\n    : parent_(parent), address_(origin.address_),\n      bind_to_port_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.deprecated_v1(), bind_to_port, true)),\n      hand_off_restored_destination_connections_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, hidden_envoy_deprecated_use_original_dst, false)),\n      per_connection_buffer_limit_bytes_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)),\n      listener_tag_(origin.listener_tag_), name_(name), added_via_api_(added_via_api),\n      workers_started_(workers_started), hash_(hash),\n      tcp_backlog_size_(\n          PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, tcp_backlog_size, ENVOY_TCP_BACKLOG_SIZE)),\n      validation_visitor_(\n          added_via_api_ ? parent_.server_.messageValidationContext().dynamicValidationVisitor()\n                         : parent_.server_.messageValidationContext().staticValidationVisitor()),\n      // listener_init_target_ is not used during in place update because we expect server started.\n      listener_init_target_(\"\", nullptr),\n      dynamic_init_manager_(std::make_unique<Init::ManagerImpl>(\n          fmt::format(\"Listener-local-init-manager {} {}\", name, hash))),\n      config_(config), version_info_(version_info),\n      listener_filters_timeout_(\n          PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)),\n      continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()),\n      connection_balancer_(origin.connection_balancer_),\n      listener_factory_context_(std::make_shared<PerListenerFactoryContextImpl>(\n          origin.listener_factory_context_->listener_factory_context_base_, this, *this)),\n      filter_chain_manager_(address_, origin.listener_factory_context_->parentFactoryContext(),\n                            initManager(), origin.filter_chain_manager_),\n      local_init_watcher_(fmt::format(\"Listener-local-init-watcher {}\", name), [this] {\n        ASSERT(workers_started_);\n        parent_.inPlaceFilterChainUpdate(*this);\n      }) {\n  buildAccessLog();\n  auto socket_type = Network::Utility::protobufAddressSocketType(config.address());\n  buildListenSocketOptions(socket_type);\n  buildUdpListenerFactory(socket_type, concurrency);\n  buildUdpWriterFactory(socket_type);\n  createListenerFilterFactories(socket_type);\n  validateFilterChains(socket_type);\n  buildFilterChains();\n  // In place update is tcp only so it's safe to apply below tcp only initialization.\n  buildSocketOptions();\n  buildOriginalDstListenerFilter();\n  buildProxyProtocolListenerFilter();\n  buildTlsInspectorListenerFilter();\n  open_connections_ = origin.open_connections_;\n}\n\nvoid ListenerImpl::buildAccessLog() {\n  for (const auto& access_log : config_.access_log()) {\n    AccessLog::InstanceSharedPtr current_access_log =\n        AccessLog::AccessLogFactory::fromProto(access_log, *listener_factory_context_);\n    access_logs_.push_back(current_access_log);\n  }\n}\n\nvoid ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type,\n                                           uint32_t concurrency) {\n  if (socket_type == Network::Socket::Type::Datagram) {\n    if (!config_.reuse_port() && concurrency > 1) {\n      throw EnvoyException(\"Listening on UDP when concurrency is > 1 without the SO_REUSEPORT \"\n                           \"socket option results in \"\n                           \"unstable packet proxying. Configure the reuse_port listener option or \"\n                           \"set concurrency = 1.\");\n    }\n    auto udp_config = config_.udp_listener_config();\n    if (udp_config.udp_listener_name().empty()) {\n      udp_config.set_udp_listener_name(UdpListenerNames::get().RawUdp);\n    }\n    auto& config_factory =\n        Config::Utility::getAndCheckFactoryByName<ActiveUdpListenerConfigFactory>(\n            udp_config.udp_listener_name());\n    ProtobufTypes::MessagePtr message =\n        Config::Utility::translateToFactoryConfig(udp_config, validation_visitor_, config_factory);\n    udp_listener_factory_ = config_factory.createActiveUdpListenerFactory(*message, concurrency);\n\n    udp_listener_worker_router_ =\n        std::make_unique<Network::UdpListenerWorkerRouterImpl>(concurrency);\n  }\n}\n\nvoid ListenerImpl::buildUdpWriterFactory(Network::Socket::Type socket_type) {\n  if (socket_type == Network::Socket::Type::Datagram) {\n    auto udp_writer_config = config_.udp_writer_config();\n    if (!Api::OsSysCallsSingleton::get().supportsUdpGso() ||\n        udp_writer_config.typed_config().type_url().empty()) {\n      const std::string default_type_url =\n          \"type.googleapis.com/envoy.config.listener.v3.UdpDefaultWriterOptions\";\n      udp_writer_config.mutable_typed_config()->set_type_url(default_type_url);\n    }\n    auto& config_factory =\n        Config::Utility::getAndCheckFactory<Network::UdpPacketWriterConfigFactory>(\n            udp_writer_config);\n    ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig(\n        udp_writer_config.typed_config(), validation_visitor_, config_factory);\n    udp_writer_factory_ = config_factory.createUdpPacketWriterFactory(*message);\n  }\n}\n\nvoid ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) {\n  // The process-wide `signal()` handling may fail to handle SIGPIPE if overridden\n  // in the process (i.e., on a mobile client). Some OSes support handling it at the socket layer:\n  if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n    addListenSocketOptions(Network::SocketOptionFactory::buildSocketNoSigpipeOptions());\n  }\n  if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, transparent, false)) {\n    addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions());\n  }\n  if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, freebind, false)) {\n    addListenSocketOptions(Network::SocketOptionFactory::buildIpFreebindOptions());\n  }\n  if (config_.reuse_port()) {\n    addListenSocketOptions(Network::SocketOptionFactory::buildReusePortOptions());\n  }\n  if (!config_.socket_options().empty()) {\n    addListenSocketOptions(\n        Network::SocketOptionFactory::buildLiteralOptions(config_.socket_options()));\n  }\n  if (socket_type == Network::Socket::Type::Datagram) {\n    // Needed for recvmsg to return destination address in IP header.\n    addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions());\n    // Needed to return receive buffer overflown indicator.\n    addListenSocketOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions());\n    // TODO(yugant) : Add a config option for UDP_GRO\n    if (Api::OsSysCallsSingleton::get().supportsUdpGro()) {\n      // Needed to receive gso_size option\n      addListenSocketOptions(Network::SocketOptionFactory::buildUdpGroOptions());\n    }\n  }\n}\n\nvoid ListenerImpl::createListenerFilterFactories(Network::Socket::Type socket_type) {\n  if (!config_.listener_filters().empty()) {\n    switch (socket_type) {\n    case Network::Socket::Type::Datagram:\n      if (config_.listener_filters().size() > 1) {\n        // Currently supports only 1 UDP listener filter.\n        throw EnvoyException(fmt::format(\n            \"error adding listener '{}': Only 1 UDP listener filter per listener supported\",\n            address_->asString()));\n      }\n      udp_listener_filter_factories_ = parent_.factory_.createUdpListenerFilterFactoryList(\n          config_.listener_filters(), *listener_factory_context_);\n      break;\n    case Network::Socket::Type::Stream:\n      listener_filter_factories_ = parent_.factory_.createListenerFilterFactoryList(\n          config_.listener_filters(), *listener_factory_context_);\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n}\n\nvoid ListenerImpl::validateFilterChains(Network::Socket::Type socket_type) {\n  if (config_.filter_chains().empty() && (socket_type == Network::Socket::Type::Stream ||\n                                          !udp_listener_factory_->isTransportConnectionless())) {\n    // If we got here, this is a tcp listener or connection-oriented udp listener, so ensure there\n    // is a filter chain specified\n    throw EnvoyException(fmt::format(\"error adding listener '{}': no filter chains specified\",\n                                     address_->asString()));\n  } else if (udp_listener_factory_ != nullptr &&\n             !udp_listener_factory_->isTransportConnectionless()) {\n    for (auto& filter_chain : config_.filter_chains()) {\n      // Early fail if any filter chain doesn't have transport socket configured.\n      if (!filter_chain.has_transport_socket()) {\n        throw EnvoyException(fmt::format(\"error adding listener '{}': no transport socket \"\n                                         \"specified for connection oriented UDP listener\",\n                                         address_->asString()));\n      }\n    }\n  }\n}\n\nvoid ListenerImpl::buildFilterChains() {\n  Server::Configuration::TransportSocketFactoryContextImpl transport_factory_context(\n      parent_.server_.admin(), parent_.server_.sslContextManager(), listenerScope(),\n      parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(),\n      parent_.server_.stats(), parent_.server_.singletonManager(), parent_.server_.threadLocal(),\n      validation_visitor_, parent_.server_.api());\n  transport_factory_context.setInitManager(*dynamic_init_manager_);\n  // The init manager is a little messy. Will refactor when filter chain manager could accept\n  // network filter chain update.\n  // TODO(lambdai): create builder from filter_chain_manager to obtain the init manager\n  ListenerFilterChainFactoryBuilder builder(*this, transport_factory_context);\n  filter_chain_manager_.addFilterChain(config_.filter_chains(), builder, filter_chain_manager_);\n}\n\nvoid ListenerImpl::buildSocketOptions() {\n  // TCP specific setup.\n  if (connection_balancer_ == nullptr) {\n    // Not in place listener update.\n    if (config_.has_connection_balance_config()) {\n      // Currently exact balance is the only supported type and there are no options.\n      ASSERT(config_.connection_balance_config().has_exact_balance());\n      connection_balancer_ = std::make_shared<Network::ExactConnectionBalancerImpl>();\n    } else {\n      connection_balancer_ = std::make_shared<Network::NopConnectionBalancerImpl>();\n    }\n  }\n\n  if (config_.has_tcp_fast_open_queue_length()) {\n    addListenSocketOptions(Network::SocketOptionFactory::buildTcpFastOpenOptions(\n        config_.tcp_fast_open_queue_length().value()));\n  }\n}\n\nvoid ListenerImpl::buildOriginalDstListenerFilter() {\n  // Add original dst listener filter if 'use_original_dst' flag is set.\n  if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, hidden_envoy_deprecated_use_original_dst, false)) {\n    auto& factory =\n        Config::Utility::getAndCheckFactoryByName<Configuration::NamedListenerFilterConfigFactory>(\n            Extensions::ListenerFilters::ListenerFilterNames::get().OriginalDst);\n\n    listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto(\n        Envoy::ProtobufWkt::Empty(),\n        /*listener_filter_matcher=*/nullptr, *listener_factory_context_));\n  }\n}\n\nvoid ListenerImpl::buildProxyProtocolListenerFilter() {\n  // Add proxy protocol listener filter if 'use_proxy_proto' flag is set.\n  // TODO(jrajahalme): This is the last listener filter on purpose. When filter chain matching\n  //                   is implemented, this needs to be run after the filter chain has been\n  //                   selected.\n  if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_.filter_chains()[0], use_proxy_proto, false)) {\n    auto& factory =\n        Config::Utility::getAndCheckFactoryByName<Configuration::NamedListenerFilterConfigFactory>(\n            Extensions::ListenerFilters::ListenerFilterNames::get().ProxyProtocol);\n    listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto(\n        envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol(),\n        /*listener_filter_matcher=*/nullptr, *listener_factory_context_));\n  }\n}\n\nvoid ListenerImpl::buildTlsInspectorListenerFilter() {\n  // TODO(zuercher) remove the deprecated TLS inspector name when the deprecated names are removed.\n  const bool need_tls_inspector = needTlsInspector(config_);\n  // Automatically inject TLS Inspector if it wasn't configured explicitly and it's needed.\n  if (need_tls_inspector) {\n    const std::string message =\n        fmt::format(\"adding listener '{}': filter chain match rules require TLS Inspector \"\n                    \"listener filter, but it isn't configured, trying to inject it \"\n                    \"(this might fail if Envoy is compiled without it)\",\n                    address_->asString());\n    ENVOY_LOG(warn, \"{}\", message);\n\n    auto& factory =\n        Config::Utility::getAndCheckFactoryByName<Configuration::NamedListenerFilterConfigFactory>(\n            Extensions::ListenerFilters::ListenerFilterNames::get().TlsInspector);\n    listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto(\n        Envoy::ProtobufWkt::Empty(),\n        /*listener_filter_matcher=*/nullptr, *listener_factory_context_));\n  }\n}\n\nAccessLog::AccessLogManager& PerListenerFactoryContextImpl::accessLogManager() {\n  return listener_factory_context_base_->accessLogManager();\n}\nUpstream::ClusterManager& PerListenerFactoryContextImpl::clusterManager() {\n  return listener_factory_context_base_->clusterManager();\n}\nEvent::Dispatcher& PerListenerFactoryContextImpl::dispatcher() {\n  return listener_factory_context_base_->dispatcher();\n}\nNetwork::DrainDecision& PerListenerFactoryContextImpl::drainDecision() {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\nGrpc::Context& PerListenerFactoryContextImpl::grpcContext() {\n  return listener_factory_context_base_->grpcContext();\n}\nbool PerListenerFactoryContextImpl::healthCheckFailed() {\n  return listener_factory_context_base_->healthCheckFailed();\n}\nHttp::Context& PerListenerFactoryContextImpl::httpContext() {\n  return listener_factory_context_base_->httpContext();\n}\nconst LocalInfo::LocalInfo& PerListenerFactoryContextImpl::localInfo() const {\n  return listener_factory_context_base_->localInfo();\n}\nEnvoy::Runtime::Loader& PerListenerFactoryContextImpl::runtime() {\n  return listener_factory_context_base_->runtime();\n}\nStats::Scope& PerListenerFactoryContextImpl::scope() {\n  return listener_factory_context_base_->scope();\n}\nSingleton::Manager& PerListenerFactoryContextImpl::singletonManager() {\n  return listener_factory_context_base_->singletonManager();\n}\nOverloadManager& PerListenerFactoryContextImpl::overloadManager() {\n  return listener_factory_context_base_->overloadManager();\n}\nThreadLocal::Instance& PerListenerFactoryContextImpl::threadLocal() {\n  return listener_factory_context_base_->threadLocal();\n}\nAdmin& PerListenerFactoryContextImpl::admin() { return listener_factory_context_base_->admin(); }\nconst envoy::config::core::v3::Metadata& PerListenerFactoryContextImpl::listenerMetadata() const {\n  return listener_factory_context_base_->listenerMetadata();\n};\nenvoy::config::core::v3::TrafficDirection PerListenerFactoryContextImpl::direction() const {\n  return listener_factory_context_base_->direction();\n};\nTimeSource& PerListenerFactoryContextImpl::timeSource() { return api().timeSource(); }\nconst Network::ListenerConfig& PerListenerFactoryContextImpl::listenerConfig() const {\n  return *listener_config_;\n}\nProtobufMessage::ValidationContext& PerListenerFactoryContextImpl::messageValidationContext() {\n  return getServerFactoryContext().messageValidationContext();\n}\nProtobufMessage::ValidationVisitor& PerListenerFactoryContextImpl::messageValidationVisitor() {\n  return listener_factory_context_base_->messageValidationVisitor();\n}\nApi::Api& PerListenerFactoryContextImpl::api() { return listener_factory_context_base_->api(); }\nServerLifecycleNotifier& PerListenerFactoryContextImpl::lifecycleNotifier() {\n  return listener_factory_context_base_->lifecycleNotifier();\n}\nProcessContextOptRef PerListenerFactoryContextImpl::processContext() {\n  return listener_factory_context_base_->processContext();\n}\nConfiguration::ServerFactoryContext&\nPerListenerFactoryContextImpl::getServerFactoryContext() const {\n  return listener_factory_context_base_->getServerFactoryContext();\n}\nConfiguration::TransportSocketFactoryContext&\nPerListenerFactoryContextImpl::getTransportSocketFactoryContext() const {\n  return listener_factory_context_base_->getTransportSocketFactoryContext();\n}\nStats::Scope& PerListenerFactoryContextImpl::listenerScope() {\n  return listener_factory_context_base_->listenerScope();\n}\nInit::Manager& PerListenerFactoryContextImpl::initManager() { return listener_impl_.initManager(); }\n\nbool ListenerImpl::createNetworkFilterChain(\n    Network::Connection& connection,\n    const std::vector<Network::FilterFactoryCb>& filter_factories) {\n  return Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories);\n}\n\nbool ListenerImpl::createListenerFilterChain(Network::ListenerFilterManager& manager) {\n  return Configuration::FilterChainUtility::buildFilterChain(manager, listener_filter_factories_);\n}\n\nvoid ListenerImpl::createUdpListenerFilterChain(Network::UdpListenerFilterManager& manager,\n                                                Network::UdpReadFilterCallbacks& callbacks) {\n  Configuration::FilterChainUtility::buildUdpFilterChain(manager, callbacks,\n                                                         udp_listener_filter_factories_);\n}\n\nvoid ListenerImpl::debugLog(const std::string& message) {\n  UNREFERENCED_PARAMETER(message);\n  ENVOY_LOG(debug, \"{}: name={}, hash={}, address={}\", message, name_, hash_, address_->asString());\n}\n\nvoid ListenerImpl::initialize() {\n  last_updated_ = listener_factory_context_->timeSource().systemTime();\n  // If workers have already started, we shift from using the global init manager to using a local\n  // per listener init manager. See ~ListenerImpl() for why we gate the onListenerWarmed() call\n  // by resetting the watcher.\n  if (workers_started_) {\n    ENVOY_LOG_MISC(debug, \"Initialize listener {} local-init-manager.\", name_);\n    // If workers_started_ is true, dynamic_init_manager_ should be initialized by listener\n    // manager directly.\n    dynamic_init_manager_->initialize(local_init_watcher_);\n  }\n}\n\nListenerImpl::~ListenerImpl() {\n  if (!workers_started_) {\n    // We need to remove the listener_init_target_ handle from parent's initManager(), to unblock\n    // parent's initManager to get ready().\n    listener_init_target_.ready();\n  }\n}\n\nInit::Manager& ListenerImpl::initManager() { return *dynamic_init_manager_; }\n\nvoid ListenerImpl::setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory) {\n  ASSERT(!socket_factory_);\n  socket_factory_ = socket_factory;\n}\n\nbool ListenerImpl::supportUpdateFilterChain(const envoy::config::listener::v3::Listener& config,\n                                            bool worker_started) {\n  if (!Runtime::runtimeFeatureEnabled(\n          \"envoy.reloadable_features.listener_in_place_filterchain_update\")) {\n    return false;\n  }\n\n  // The in place update needs the active listener in worker thread. worker_started guarantees the\n  // existence of that active listener.\n  if (!worker_started) {\n    return false;\n  }\n\n  // Currently we only support TCP filter chain update.\n  if (Network::Utility::protobufAddressSocketType(config_.address()) !=\n          Network::Socket::Type::Stream ||\n      Network::Utility::protobufAddressSocketType(config.address()) !=\n          Network::Socket::Type::Stream) {\n    return false;\n  }\n\n  // Full listener update currently rejects tcp listener having 0 filter chain.\n  // In place filter chain update could survive under zero filter chain but we should keep the same\n  // behavior for now. This also guards the below filter chain access.\n  if (config.filter_chains_size() == 0) {\n    return false;\n  }\n\n  // See buildProxyProtocolListenerFilter(). Full listener update guarantees at least 1 filter chain\n  // at tcp listener.\n  if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_.filter_chains()[0], use_proxy_proto, false) ^\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.filter_chains()[0], use_proxy_proto, false)) {\n    return false;\n  }\n\n  // See buildTlsInspectorListenerFilter().\n  if (needTlsInspector(config_) ^ needTlsInspector(config)) {\n    return false;\n  }\n  return ListenerMessageUtil::filterChainOnlyChange(config_, config);\n}\n\nListenerImplPtr\nListenerImpl::newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config,\n                                         bool workers_started, uint64_t hash) {\n  // Use WrapUnique since the constructor is private.\n  return absl::WrapUnique(\n      new ListenerImpl(*this, config, version_info_, parent_, name_, added_via_api_,\n                       /* new new workers started state */ workers_started,\n                       /* use new hash */ hash, parent_.server_.options().concurrency()));\n}\n\nvoid ListenerImpl::diffFilterChain(const ListenerImpl& another_listener,\n                                   std::function<void(Network::DrainableFilterChain&)> callback) {\n  for (const auto& message_and_filter_chain : filter_chain_manager_.filterChainsByMessage()) {\n    if (another_listener.filter_chain_manager_.filterChainsByMessage().find(\n            message_and_filter_chain.first) ==\n        another_listener.filter_chain_manager_.filterChainsByMessage().end()) {\n      // The filter chain exists in `this` listener but not in the listener passed in.\n      callback(*message_and_filter_chain.second);\n    }\n  }\n}\n\nbool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs,\n                                                const envoy::config::listener::v3::Listener& rhs) {\n  Protobuf::util::MessageDifferencer differencer;\n  differencer.set_message_field_comparison(Protobuf::util::MessageDifferencer::EQUIVALENT);\n  differencer.set_repeated_field_comparison(Protobuf::util::MessageDifferencer::AS_SET);\n  differencer.IgnoreField(\n      envoy::config::listener::v3::Listener::GetDescriptor()->FindFieldByName(\"filter_chains\"));\n  return differencer.Compare(lhs, rhs);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/listener_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/basic_resource_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/init/target_impl.h\"\n\n#include \"server/filter_chain_manager_impl.h\"\n\n#include \"absl/base/call_once.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ListenerMessageUtil {\npublic:\n  /**\n   * @return true if listener message lhs and rhs are the same if ignoring filter_chains field.\n   */\n  static bool filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs,\n                                    const envoy::config::listener::v3::Listener& rhs);\n};\n\nclass ListenerManagerImpl;\n\nclass ListenSocketFactoryImpl : public Network::ListenSocketFactory,\n                                protected Logger::Loggable<Logger::Id::config> {\npublic:\n  ListenSocketFactoryImpl(ListenerComponentFactory& factory,\n                          Network::Address::InstanceConstSharedPtr address,\n                          Network::Socket::Type socket_type,\n                          const Network::Socket::OptionsSharedPtr& options, bool bind_to_port,\n                          const std::string& listener_name, bool reuse_port);\n\n  // Network::ListenSocketFactory\n  Network::Socket::Type socketType() const override { return socket_type_; }\n  const Network::Address::InstanceConstSharedPtr& localAddress() const override {\n    return local_address_;\n  }\n\n  Network::SocketSharedPtr getListenSocket() override;\n\n  /**\n   * @return the socket shared by worker threads; otherwise return null.\n   */\n  Network::SocketOptRef sharedSocket() const override {\n    if (!reuse_port_) {\n      ASSERT(socket_ != nullptr);\n      return *socket_;\n    }\n    // If reuse_port is true, always return null, even socket_ is created for reserving\n    // port number.\n    return absl::nullopt;\n  }\n\nprotected:\n  Network::SocketSharedPtr createListenSocketAndApplyOptions();\n\nprivate:\n  ListenerComponentFactory& factory_;\n  // Initially, its port number might be 0. Once a socket is created, its port\n  // will be set to the binding port.\n  Network::Address::InstanceConstSharedPtr local_address_;\n  Network::Socket::Type socket_type_;\n  const Network::Socket::OptionsSharedPtr options_;\n  bool bind_to_port_;\n  const std::string& listener_name_;\n  const bool reuse_port_;\n  Network::SocketSharedPtr socket_;\n  absl::once_flag steal_once_;\n};\n\n// TODO(mattklein123): Consider getting rid of pre-worker start and post-worker start code by\n//                     initializing all listeners after workers are started.\n\n/**\n * The common functionality shared by PerListenerFilterFactoryContexts and\n * PerFilterChainFactoryFactoryContexts.\n */\nclass ListenerFactoryContextBaseImpl final : public Configuration::FactoryContext,\n                                             public Network::DrainDecision {\npublic:\n  ListenerFactoryContextBaseImpl(Envoy::Server::Instance& server,\n                                 ProtobufMessage::ValidationVisitor& validation_visitor,\n                                 const envoy::config::listener::v3::Listener& config,\n                                 Server::DrainManagerPtr drain_manager);\n  AccessLog::AccessLogManager& accessLogManager() override;\n  Upstream::ClusterManager& clusterManager() override;\n  Event::Dispatcher& dispatcher() override;\n  Network::DrainDecision& drainDecision() override;\n  Grpc::Context& grpcContext() override;\n  bool healthCheckFailed() override;\n  Http::Context& httpContext() override;\n  Init::Manager& initManager() override;\n  const LocalInfo::LocalInfo& localInfo() const override;\n  Envoy::Runtime::Loader& runtime() override;\n  Stats::Scope& scope() override;\n  Singleton::Manager& singletonManager() override;\n  OverloadManager& overloadManager() override;\n  ThreadLocal::Instance& threadLocal() override;\n  Admin& admin() override;\n  const envoy::config::core::v3::Metadata& listenerMetadata() const override;\n  envoy::config::core::v3::TrafficDirection direction() const override;\n  TimeSource& timeSource() override;\n  ProtobufMessage::ValidationContext& messageValidationContext() override;\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override;\n  Api::Api& api() override;\n  ServerLifecycleNotifier& lifecycleNotifier() override;\n  ProcessContextOptRef processContext() override;\n  Configuration::ServerFactoryContext& getServerFactoryContext() const override;\n  Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override;\n  Stats::Scope& listenerScope() override;\n\n  // DrainDecision\n  bool drainClose() const override {\n    return drain_manager_->drainClose() || server_.drainManager().drainClose();\n  }\n  Server::DrainManager& drainManager();\n\nprivate:\n  Envoy::Server::Instance& server_;\n  const envoy::config::core::v3::Metadata metadata_;\n  envoy::config::core::v3::TrafficDirection direction_;\n  Stats::ScopePtr global_scope_;\n  Stats::ScopePtr listener_scope_; // Stats with listener named scope.\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  const Server::DrainManagerPtr drain_manager_;\n};\n\nclass ListenerImpl;\n\n// TODO(lambdai): Strip the interface since ListenerFactoryContext only need to support\n// ListenerFilterChain creation. e.g, Is listenerMetaData() required? Is it required only at\n// listener update or during the lifetime of listener?\nclass PerListenerFactoryContextImpl : public Configuration::ListenerFactoryContext {\npublic:\n  PerListenerFactoryContextImpl(Envoy::Server::Instance& server,\n                                ProtobufMessage::ValidationVisitor& validation_visitor,\n                                const envoy::config::listener::v3::Listener& config_message,\n                                const Network::ListenerConfig* listener_config,\n                                ListenerImpl& listener_impl, DrainManagerPtr drain_manager)\n      : listener_factory_context_base_(std::make_shared<ListenerFactoryContextBaseImpl>(\n            server, validation_visitor, config_message, std::move(drain_manager))),\n        listener_config_(listener_config), listener_impl_(listener_impl) {}\n  PerListenerFactoryContextImpl(\n      std::shared_ptr<ListenerFactoryContextBaseImpl> listener_factory_context_base,\n      const Network::ListenerConfig* listener_config, ListenerImpl& listener_impl)\n      : listener_factory_context_base_(listener_factory_context_base),\n        listener_config_(listener_config), listener_impl_(listener_impl) {}\n\n  // FactoryContext\n  AccessLog::AccessLogManager& accessLogManager() override;\n  Upstream::ClusterManager& clusterManager() override;\n  Event::Dispatcher& dispatcher() override;\n  Network::DrainDecision& drainDecision() override;\n  Grpc::Context& grpcContext() override;\n  bool healthCheckFailed() override;\n  Http::Context& httpContext() override;\n  Init::Manager& initManager() override;\n  const LocalInfo::LocalInfo& localInfo() const override;\n  Envoy::Runtime::Loader& runtime() override;\n  Stats::Scope& scope() override;\n  Singleton::Manager& singletonManager() override;\n  OverloadManager& overloadManager() override;\n  ThreadLocal::Instance& threadLocal() override;\n  Admin& admin() override;\n  const envoy::config::core::v3::Metadata& listenerMetadata() const override;\n  envoy::config::core::v3::TrafficDirection direction() const override;\n  TimeSource& timeSource() override;\n  ProtobufMessage::ValidationContext& messageValidationContext() override;\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override;\n  Api::Api& api() override;\n  ServerLifecycleNotifier& lifecycleNotifier() override;\n  ProcessContextOptRef processContext() override;\n  Configuration::ServerFactoryContext& getServerFactoryContext() const override;\n  Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override;\n\n  Stats::Scope& listenerScope() override;\n\n  // ListenerFactoryContext\n  const Network::ListenerConfig& listenerConfig() const override;\n\n  ListenerFactoryContextBaseImpl& parentFactoryContext() { return *listener_factory_context_base_; }\n  friend class ListenerImpl;\n\nprivate:\n  std::shared_ptr<ListenerFactoryContextBaseImpl> listener_factory_context_base_;\n  const Network::ListenerConfig* listener_config_;\n  ListenerImpl& listener_impl_;\n};\n\n/**\n * Maps proto config to runtime config for a listener with a network filter chain.\n */\nclass ListenerImpl final : public Network::ListenerConfig,\n                           public Network::FilterChainFactory,\n                           Logger::Loggable<Logger::Id::config> {\npublic:\n  /**\n   * Create a new listener.\n   * @param config supplies the configuration proto.\n   * @param version_info supplies the xDS version of the listener.\n   * @param parent supplies the owning manager.\n   * @param name supplies the listener name.\n   * @param added_via_api supplies whether the listener can be updated or removed.\n   * @param workers_started supplies whether the listener is being added before or after workers\n   *        have been started. This controls various behavior related to init management.\n   * @param hash supplies the hash to use for duplicate checking.\n   * @param concurrency is the number of listeners instances to be created.\n   */\n  ListenerImpl(const envoy::config::listener::v3::Listener& config, const std::string& version_info,\n               ListenerManagerImpl& parent, const std::string& name, bool added_via_api,\n               bool workers_started, uint64_t hash, uint32_t concurrency);\n  ~ListenerImpl() override;\n\n  // TODO(lambdai): Explore using the same ListenerImpl object to execute in place filter chain\n  // update.\n  /**\n   * Execute in place filter chain update. The filter chain update is less expensive than full\n   * listener update because connections may not need to be drained.\n   */\n  std::unique_ptr<ListenerImpl>\n  newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config,\n                             bool workers_started, uint64_t hash);\n  /**\n   * Determine if in place filter chain update could be executed at this moment.\n   */\n  bool supportUpdateFilterChain(const envoy::config::listener::v3::Listener& config,\n                                bool worker_started);\n\n  /**\n   * Run the callback on each filter chain that exists in this listener but not in the passed\n   * listener config.\n   */\n  void diffFilterChain(const ListenerImpl& another_listener,\n                       std::function<void(Network::DrainableFilterChain&)> callback);\n\n  /**\n   * Helper functions to determine whether a listener is blocked for update or remove.\n   */\n  bool blockUpdate(uint64_t new_hash) { return new_hash == hash_ || !added_via_api_; }\n  bool blockRemove() { return !added_via_api_; }\n\n  /**\n   * Called when a listener failed to be actually created on a worker.\n   * @return TRUE if we have seen more than one worker failure.\n   */\n  bool onListenerCreateFailure() {\n    bool ret = saw_listener_create_failure_;\n    saw_listener_create_failure_ = true;\n    return ret;\n  }\n\n  Network::Address::InstanceConstSharedPtr address() const { return address_; }\n  const envoy::config::listener::v3::Listener& config() const { return config_; }\n  const Network::ListenSocketFactorySharedPtr& getSocketFactory() const { return socket_factory_; }\n  void debugLog(const std::string& message);\n  void initialize();\n  DrainManager& localDrainManager() const {\n    return listener_factory_context_->listener_factory_context_base_->drainManager();\n  }\n  void setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory);\n  void setSocketAndOptions(const Network::SocketSharedPtr& socket);\n  const Network::Socket::OptionsSharedPtr& listenSocketOptions() { return listen_socket_options_; }\n  const std::string& versionInfo() const { return version_info_; }\n\n  // Network::ListenerConfig\n  Network::FilterChainManager& filterChainManager() override { return filter_chain_manager_; }\n  Network::FilterChainFactory& filterChainFactory() override { return *this; }\n  Network::ListenSocketFactory& listenSocketFactory() override { return *socket_factory_; }\n  bool bindToPort() override { return bind_to_port_; }\n  bool handOffRestoredDestinationConnections() const override {\n    return hand_off_restored_destination_connections_;\n  }\n  uint32_t perConnectionBufferLimitBytes() const override {\n    return per_connection_buffer_limit_bytes_;\n  }\n  std::chrono::milliseconds listenerFiltersTimeout() const override {\n    return listener_filters_timeout_;\n  }\n  bool continueOnListenerFiltersTimeout() const override {\n    return continue_on_listener_filters_timeout_;\n  }\n  Stats::Scope& listenerScope() override { return listener_factory_context_->listenerScope(); }\n  uint64_t listenerTag() const override { return listener_tag_; }\n  const std::string& name() const override { return name_; }\n  Network::ActiveUdpListenerFactory* udpListenerFactory() override {\n    return udp_listener_factory_.get();\n  }\n  Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override {\n    return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_));\n  }\n  Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override {\n    return udp_listener_worker_router_\n               ? Network::UdpListenerWorkerRouterOptRef(*udp_listener_worker_router_)\n               : absl::nullopt;\n  }\n  Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; }\n\n  ResourceLimit& openConnections() override { return *open_connections_; }\n  const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n    return access_logs_;\n  }\n  uint32_t tcpBacklogSize() const override { return tcp_backlog_size_; }\n  Init::Manager& initManager() override;\n  envoy::config::core::v3::TrafficDirection direction() const override {\n    return config().traffic_direction();\n  }\n\n  void ensureSocketOptions() {\n    if (!listen_socket_options_) {\n      listen_socket_options_ =\n          std::make_shared<std::vector<Network::Socket::OptionConstSharedPtr>>();\n    }\n  }\n\n  // Network::FilterChainFactory\n  bool createNetworkFilterChain(Network::Connection& connection,\n                                const std::vector<Network::FilterFactoryCb>& factories) override;\n  bool createListenerFilterChain(Network::ListenerFilterManager& manager) override;\n  void createUdpListenerFilterChain(Network::UdpListenerFilterManager& udp_listener,\n                                    Network::UdpReadFilterCallbacks& callbacks) override;\n\n  SystemTime last_updated_;\n\nprivate:\n  /**\n   * Create a new listener from an existing listener and the new config message if the in place\n   * filter chain update is decided. Should be called only by newListenerWithFilterChain().\n   */\n  ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config,\n               const std::string& version_info, ListenerManagerImpl& parent,\n               const std::string& name, bool added_via_api, bool workers_started, uint64_t hash,\n               uint32_t concurrency);\n  // Helpers for constructor.\n  void buildAccessLog();\n  void buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency);\n  void buildUdpWriterFactory(Network::Socket::Type socket_type);\n  void buildListenSocketOptions(Network::Socket::Type socket_type);\n  void createListenerFilterFactories(Network::Socket::Type socket_type);\n  void validateFilterChains(Network::Socket::Type socket_type);\n  void buildFilterChains();\n  void buildSocketOptions();\n  void buildOriginalDstListenerFilter();\n  void buildProxyProtocolListenerFilter();\n  void buildTlsInspectorListenerFilter();\n\n  void addListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) {\n    ensureSocketOptions();\n    Network::Socket::appendOptions(listen_socket_options_, options);\n  }\n\n  ListenerManagerImpl& parent_;\n  Network::Address::InstanceConstSharedPtr address_;\n\n  Network::ListenSocketFactorySharedPtr socket_factory_;\n  const bool bind_to_port_;\n  const bool hand_off_restored_destination_connections_;\n  const uint32_t per_connection_buffer_limit_bytes_;\n  const uint64_t listener_tag_;\n  const std::string name_;\n  const bool added_via_api_;\n  const bool workers_started_;\n  const uint64_t hash_;\n  const uint32_t tcp_backlog_size_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n\n  // A target is added to Server's InitManager if workers_started_ is false.\n  Init::TargetImpl listener_init_target_;\n  // This init manager is populated with targets from the filter chain factories, namely\n  // RdsRouteConfigSubscription::init_target_, so the listener can wait for route configs.\n  std::unique_ptr<Init::Manager> dynamic_init_manager_;\n\n  std::vector<Network::ListenerFilterFactoryCb> listener_filter_factories_;\n  std::vector<Network::UdpListenerFilterFactoryCb> udp_listener_filter_factories_;\n  std::vector<AccessLog::InstanceSharedPtr> access_logs_;\n  DrainManagerPtr local_drain_manager_;\n  bool saw_listener_create_failure_{};\n  const envoy::config::listener::v3::Listener config_;\n  const std::string version_info_;\n  Network::Socket::OptionsSharedPtr listen_socket_options_;\n  const std::chrono::milliseconds listener_filters_timeout_;\n  const bool continue_on_listener_filters_timeout_;\n  Network::ActiveUdpListenerFactoryPtr udp_listener_factory_;\n  Network::UdpPacketWriterFactoryPtr udp_writer_factory_;\n  Network::UdpListenerWorkerRouterPtr udp_listener_worker_router_;\n  Network::ConnectionBalancerSharedPtr connection_balancer_;\n  std::shared_ptr<PerListenerFactoryContextImpl> listener_factory_context_;\n  FilterChainManagerImpl filter_chain_manager_;\n\n  // Per-listener connection limits are only specified via runtime.\n  //\n  // TODO (tonya11en): Move this functionality into the overload manager.\n  const std::string cx_limit_runtime_key_;\n  std::shared_ptr<BasicResourceLimitImpl> open_connections_;\n\n  // This init watcher, if workers_started_ is false, notifies the \"parent\" listener manager when\n  // listener initialization is complete.\n  // Important: local_init_watcher_ must be the last field in the class to avoid unexpected watcher\n  // callback during the destroy of ListenerImpl.\n  Init::WatcherImpl local_init_watcher_;\n\n  // to access ListenerManagerImpl::factory_.\n  friend class ListenerFilterChainFactoryBuilder;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/listener_manager_impl.cc",
    "content": "#include \"server/listener_manager_impl.h\"\n\n#include <algorithm>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listener.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/active_udp_listener_config.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/network/filter_matcher.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"server/api_listener_impl.h\"\n#include \"server/configuration_impl.h\"\n#include \"server/drain_manager_impl.h\"\n#include \"server/filter_chain_manager_impl.h\"\n#include \"server/transport_socket_config_impl.h\"\n#include \"server/well_known_names.h\"\n\n#include \"extensions/filters/listener/well_known_names.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nstd::string toString(Network::Socket::Type socket_type) {\n  switch (socket_type) {\n  case Network::Socket::Type::Stream:\n    return \"SocketType::Stream\";\n  case Network::Socket::Type::Datagram:\n    return \"SocketType::Datagram\";\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\n// Finds and returns the DynamicListener for the name provided from listener_map, creating and\n// inserting one if necessary.\nenvoy::admin::v3::ListenersConfigDump::DynamicListener* getOrCreateDynamicListener(\n    const std::string& name, envoy::admin::v3::ListenersConfigDump& dump,\n    absl::flat_hash_map<std::string, envoy::admin::v3::ListenersConfigDump::DynamicListener*>&\n        listener_map) {\n\n  auto it = listener_map.find(name);\n  if (it != listener_map.end()) {\n    return it->second;\n  }\n  auto* state = dump.add_dynamic_listeners();\n  state->set_name(name);\n  listener_map.emplace(name, state);\n  return state;\n}\n\n// Given a listener, dumps the version info, update time and configuration into the\n// DynamicListenerState provided.\nvoid fillState(envoy::admin::v3::ListenersConfigDump::DynamicListenerState& state,\n               const ListenerImpl& listener) {\n  state.set_version_info(listener.versionInfo());\n  state.mutable_listener()->PackFrom(API_RECOVER_ORIGINAL(listener.config()));\n  TimestampUtil::systemClockToTimestamp(listener.last_updated_, *(state.mutable_last_updated()));\n}\n\n} // namespace\n\nbool ListenSocketCreationParams::operator==(const ListenSocketCreationParams& rhs) const {\n  return (bind_to_port == rhs.bind_to_port) &&\n         (duplicate_parent_socket == rhs.duplicate_parent_socket);\n}\n\nbool ListenSocketCreationParams::operator!=(const ListenSocketCreationParams& rhs) const {\n  return !operator==(rhs);\n}\n\nstd::vector<Network::FilterFactoryCb> ProdListenerComponentFactory::createNetworkFilterFactoryList_(\n    const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n    Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context) {\n  std::vector<Network::FilterFactoryCb> ret;\n  for (ssize_t i = 0; i < filters.size(); i++) {\n    const auto& proto_config = filters[i];\n    ENVOY_LOG(debug, \"  filter #{}:\", i);\n    ENVOY_LOG(debug, \"    name: {}\", proto_config.name());\n    ENVOY_LOG(debug, \"  config: {}\",\n              MessageUtil::getJsonStringFromMessage(\n                  proto_config.has_typed_config()\n                      ? static_cast<const Protobuf::Message&>(proto_config.typed_config())\n                      : static_cast<const Protobuf::Message&>(\n                            proto_config.hidden_envoy_deprecated_config()),\n                  true));\n\n    // Now see if there is a factory that will accept the config.\n    auto& factory =\n        Config::Utility::getAndCheckFactory<Configuration::NamedNetworkFilterConfigFactory>(\n            proto_config);\n\n    Config::Utility::validateTerminalFilters(filters[i].name(), factory.name(), \"network\",\n                                             factory.isTerminalFilter(), i == filters.size() - 1);\n\n    auto message = Config::Utility::translateToFactoryConfig(\n        proto_config, filter_chain_factory_context.messageValidationVisitor(), factory);\n    Network::FilterFactoryCb callback =\n        factory.createFilterFactoryFromProto(*message, filter_chain_factory_context);\n    ret.push_back(callback);\n  }\n  return ret;\n}\n\nstd::vector<Network::ListenerFilterFactoryCb>\nProdListenerComponentFactory::createListenerFilterFactoryList_(\n    const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n    Configuration::ListenerFactoryContext& context) {\n  std::vector<Network::ListenerFilterFactoryCb> ret;\n  for (ssize_t i = 0; i < filters.size(); i++) {\n    const auto& proto_config = filters[i];\n    ENVOY_LOG(debug, \"  filter #{}:\", i);\n    ENVOY_LOG(debug, \"    name: {}\", proto_config.name());\n    ENVOY_LOG(debug, \"  config: {}\",\n              MessageUtil::getJsonStringFromMessage(\n                  proto_config.has_typed_config()\n                      ? static_cast<const Protobuf::Message&>(proto_config.typed_config())\n                      : static_cast<const Protobuf::Message&>(\n                            proto_config.hidden_envoy_deprecated_config()),\n                  true));\n\n    // Now see if there is a factory that will accept the config.\n    auto& factory =\n        Config::Utility::getAndCheckFactory<Configuration::NamedListenerFilterConfigFactory>(\n            proto_config);\n    auto message = Config::Utility::translateToFactoryConfig(\n        proto_config, context.messageValidationVisitor(), factory);\n    ret.push_back(factory.createListenerFilterFactoryFromProto(\n        *message, createListenerFilterMatcher(proto_config), context));\n  }\n  return ret;\n}\n\nstd::vector<Network::UdpListenerFilterFactoryCb>\nProdListenerComponentFactory::createUdpListenerFilterFactoryList_(\n    const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n    Configuration::ListenerFactoryContext& context) {\n  std::vector<Network::UdpListenerFilterFactoryCb> ret;\n  for (ssize_t i = 0; i < filters.size(); i++) {\n    const auto& proto_config = filters[i];\n    ENVOY_LOG(debug, \"  filter #{}:\", i);\n    ENVOY_LOG(debug, \"    name: {}\", proto_config.name());\n    ENVOY_LOG(debug, \"  config: {}\",\n              MessageUtil::getJsonStringFromMessage(\n                  proto_config.has_typed_config()\n                      ? static_cast<const Protobuf::Message&>(proto_config.typed_config())\n                      : static_cast<const Protobuf::Message&>(\n                            proto_config.hidden_envoy_deprecated_config()),\n                  true));\n\n    // Now see if there is a factory that will accept the config.\n    auto& factory =\n        Config::Utility::getAndCheckFactory<Configuration::NamedUdpListenerFilterConfigFactory>(\n            proto_config);\n\n    auto message = Config::Utility::translateToFactoryConfig(\n        proto_config, context.messageValidationVisitor(), factory);\n    ret.push_back(factory.createFilterFactoryFromProto(*message, context));\n  }\n  return ret;\n}\n\nNetwork::ListenerFilterMatcherSharedPtr ProdListenerComponentFactory::createListenerFilterMatcher(\n    const envoy::config::listener::v3::ListenerFilter& listener_filter) {\n  if (!listener_filter.has_filter_disabled()) {\n    return nullptr;\n  }\n  return std::shared_ptr<Network::ListenerFilterMatcher>(\n      Network::ListenerFilterMatcherBuilder::buildListenerFilterMatcher(\n          listener_filter.filter_disabled()));\n}\n\nNetwork::SocketSharedPtr ProdListenerComponentFactory::createListenSocket(\n    Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type,\n    const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) {\n  ASSERT(address->type() == Network::Address::Type::Ip ||\n         address->type() == Network::Address::Type::Pipe);\n  ASSERT(socket_type == Network::Socket::Type::Stream ||\n         socket_type == Network::Socket::Type::Datagram);\n\n  // For each listener config we share a single socket among all threaded listeners.\n  // First we try to get the socket from our parent if applicable.\n  if (address->type() == Network::Address::Type::Pipe) {\n    if (socket_type != Network::Socket::Type::Stream) {\n      // This could be implemented in the future, since Unix domain sockets\n      // support SOCK_DGRAM, but there would need to be a way to specify it in\n      // envoy.api.v2.core.Pipe.\n      throw EnvoyException(\n          fmt::format(\"socket type {} not supported for pipes\", toString(socket_type)));\n    }\n    const std::string addr = fmt::format(\"unix://{}\", address->asString());\n    const int fd = server_.hotRestart().duplicateParentListenSocket(addr);\n    Network::IoHandlePtr io_handle = std::make_unique<Network::IoSocketHandleImpl>(fd);\n    if (io_handle->isOpen()) {\n      ENVOY_LOG(debug, \"obtained socket for address {} from parent\", addr);\n      return std::make_shared<Network::UdsListenSocket>(std::move(io_handle), address);\n    }\n    return std::make_shared<Network::UdsListenSocket>(address);\n  }\n\n  const std::string scheme = (socket_type == Network::Socket::Type::Stream)\n                                 ? std::string(Network::Utility::TCP_SCHEME)\n                                 : std::string(Network::Utility::UDP_SCHEME);\n  const std::string addr = absl::StrCat(scheme, address->asString());\n\n  if (params.bind_to_port && params.duplicate_parent_socket) {\n    const int fd = server_.hotRestart().duplicateParentListenSocket(addr);\n    if (fd != -1) {\n      ENVOY_LOG(debug, \"obtained socket for address {} from parent\", addr);\n      Network::IoHandlePtr io_handle = std::make_unique<Network::IoSocketHandleImpl>(fd);\n      if (socket_type == Network::Socket::Type::Stream) {\n        return std::make_shared<Network::TcpListenSocket>(std::move(io_handle), address, options);\n      } else {\n        return std::make_shared<Network::UdpListenSocket>(std::move(io_handle), address, options);\n      }\n    }\n  }\n\n  if (socket_type == Network::Socket::Type::Stream) {\n    return std::make_shared<Network::TcpListenSocket>(address, options, params.bind_to_port);\n  } else {\n    return std::make_shared<Network::UdpListenSocket>(address, options, params.bind_to_port);\n  }\n}\n\nDrainManagerPtr ProdListenerComponentFactory::createDrainManager(\n    envoy::config::listener::v3::Listener::DrainType drain_type) {\n  return DrainManagerPtr{new DrainManagerImpl(server_, drain_type)};\n}\n\nDrainingFilterChainsManager::DrainingFilterChainsManager(ListenerImplPtr&& draining_listener,\n                                                         uint64_t workers_pending_removal)\n    : draining_listener_(std::move(draining_listener)),\n      workers_pending_removal_(workers_pending_removal) {}\n\nListenerManagerImpl::ListenerManagerImpl(Instance& server,\n                                         ListenerComponentFactory& listener_factory,\n                                         WorkerFactory& worker_factory,\n                                         bool enable_dispatcher_stats)\n    : server_(server), factory_(listener_factory),\n      scope_(server.stats().createScope(\"listener_manager.\")), stats_(generateStats(*scope_)),\n      config_tracker_entry_(server.admin().getConfigTracker().add(\n          \"listeners\", [this] { return dumpListenerConfigs(); })),\n      enable_dispatcher_stats_(enable_dispatcher_stats) {\n  for (uint32_t i = 0; i < server.options().concurrency(); i++) {\n    workers_.emplace_back(\n        worker_factory.createWorker(i, server.overloadManager(), absl::StrCat(\"worker_\", i)));\n  }\n}\n\nProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() {\n  auto config_dump = std::make_unique<envoy::admin::v3::ListenersConfigDump>();\n  config_dump->set_version_info(lds_api_ != nullptr ? lds_api_->versionInfo() : \"\");\n\n  using DynamicListener = envoy::admin::v3::ListenersConfigDump::DynamicListener;\n  using DynamicListenerState = envoy::admin::v3::ListenersConfigDump::DynamicListenerState;\n  absl::flat_hash_map<std::string, DynamicListener*> listener_map;\n\n  for (const auto& listener : active_listeners_) {\n    if (listener->blockRemove()) {\n      auto& static_listener = *config_dump->mutable_static_listeners()->Add();\n      static_listener.mutable_listener()->PackFrom(API_RECOVER_ORIGINAL(listener->config()));\n      TimestampUtil::systemClockToTimestamp(listener->last_updated_,\n                                            *(static_listener.mutable_last_updated()));\n      continue;\n    }\n    // Listeners are always added to active_listeners_ list before workers are started.\n    // This applies even when the listeners are still waiting for initialization.\n    // To avoid confusion in config dump, in that case, we add these listeners to warming\n    // listeners config dump rather than active ones.\n    DynamicListener* dynamic_listener =\n        getOrCreateDynamicListener(listener->name(), *config_dump, listener_map);\n\n    DynamicListenerState* dump_listener;\n    if (workers_started_) {\n      dump_listener = dynamic_listener->mutable_active_state();\n    } else {\n      dump_listener = dynamic_listener->mutable_warming_state();\n    }\n    fillState(*dump_listener, *listener);\n  }\n\n  for (const auto& listener : warming_listeners_) {\n    DynamicListener* dynamic_listener =\n        getOrCreateDynamicListener(listener->name(), *config_dump, listener_map);\n    DynamicListenerState* dump_listener = dynamic_listener->mutable_warming_state();\n    fillState(*dump_listener, *listener);\n  }\n\n  for (const auto& draining_listener : draining_listeners_) {\n    const auto& listener = draining_listener.listener_;\n    DynamicListener* dynamic_listener =\n        getOrCreateDynamicListener(listener->name(), *config_dump, listener_map);\n    DynamicListenerState* dump_listener = dynamic_listener->mutable_draining_state();\n    fillState(*dump_listener, *listener);\n  }\n\n  for (const auto& [error_name, error_state] : error_state_tracker_) {\n    DynamicListener* dynamic_listener =\n        getOrCreateDynamicListener(error_name, *config_dump, listener_map);\n\n    const envoy::admin::v3::UpdateFailureState& state = *error_state;\n    dynamic_listener->mutable_error_state()->CopyFrom(state);\n  }\n\n  // Dump errors not associated with named listeners.\n  for (const auto& error : overall_error_state_) {\n    config_dump->add_dynamic_listeners()->mutable_error_state()->CopyFrom(*error);\n  }\n\n  return config_dump;\n}\n\nListenerManagerStats ListenerManagerImpl::generateStats(Stats::Scope& scope) {\n  return {ALL_LISTENER_MANAGER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))};\n}\n\nbool ListenerManagerImpl::addOrUpdateListener(const envoy::config::listener::v3::Listener& config,\n                                              const std::string& version_info, bool added_via_api) {\n  RELEASE_ASSERT(\n      !config.address().has_envoy_internal_address(),\n      fmt::format(\"listener {} has envoy internal address {}. Internal address cannot be used by \"\n                  \"listener yet\",\n                  config.name(), config.address().envoy_internal_address().DebugString()));\n  // TODO(junr03): currently only one ApiListener can be installed via bootstrap to avoid having to\n  // build a collection of listeners, and to have to be able to warm and drain the listeners. In the\n  // future allow multiple ApiListeners, and allow them to be created via LDS as well as bootstrap.\n  if (config.has_api_listener()) {\n    if (!api_listener_ && !added_via_api) {\n      // TODO(junr03): dispatch to different concrete constructors when there are other\n      // ApiListenerImplBase derived classes.\n      api_listener_ = std::make_unique<HttpApiListener>(config, *this, config.name());\n      return true;\n    } else {\n      ENVOY_LOG(warn, \"listener {} can not be added because currently only one ApiListener is \"\n                      \"allowed, and it can only be added via bootstrap configuration\");\n      return false;\n    }\n  }\n\n  std::string name;\n  if (!config.name().empty()) {\n    name = config.name();\n  } else {\n    name = server_.api().randomGenerator().uuid();\n  }\n\n  auto it = error_state_tracker_.find(name);\n  try {\n    return addOrUpdateListenerInternal(config, version_info, added_via_api, name);\n  } catch (const EnvoyException& e) {\n    if (it == error_state_tracker_.end()) {\n      it = error_state_tracker_.emplace(name, std::make_unique<UpdateFailureState>()).first;\n    }\n    TimestampUtil::systemClockToTimestamp(server_.api().timeSource().systemTime(),\n                                          *(it->second->mutable_last_update_attempt()));\n    it->second->set_details(e.what());\n    it->second->mutable_failed_configuration()->PackFrom(API_RECOVER_ORIGINAL(config));\n    throw e;\n  }\n  error_state_tracker_.erase(it);\n  return false;\n}\n\nbool ListenerManagerImpl::addOrUpdateListenerInternal(\n    const envoy::config::listener::v3::Listener& config, const std::string& version_info,\n    bool added_via_api, const std::string& name) {\n\n  if (listenersStopped(config)) {\n    ENVOY_LOG(\n        debug,\n        \"listener {} can not be added because listeners in the traffic direction {} are stopped\",\n        name, envoy::config::core::v3::TrafficDirection_Name(config.traffic_direction()));\n    return false;\n  }\n\n  const uint64_t hash = MessageUtil::hash(config);\n  ENVOY_LOG(debug, \"begin add/update listener: name={} hash={}\", name, hash);\n\n  auto existing_active_listener = getListenerByName(active_listeners_, name);\n  auto existing_warming_listener = getListenerByName(warming_listeners_, name);\n\n  // The listener should be updated back to its original state and the warming listener should be\n  // removed.\n  if (existing_warming_listener != warming_listeners_.end() &&\n      existing_active_listener != active_listeners_.end() &&\n      (*existing_active_listener)->blockUpdate(hash)) {\n    warming_listeners_.erase(existing_warming_listener);\n    updateWarmingActiveGauges();\n    stats_.listener_modified_.inc();\n    return true;\n  }\n\n  // Do a quick blocked update check before going further. This check needs to be done against both\n  // warming and active.\n  if ((existing_warming_listener != warming_listeners_.end() &&\n       (*existing_warming_listener)->blockUpdate(hash)) ||\n      (existing_active_listener != active_listeners_.end() &&\n       (*existing_active_listener)->blockUpdate(hash))) {\n    ENVOY_LOG(debug, \"duplicate/locked listener '{}'. no add/update\", name);\n    return false;\n  }\n\n  ListenerImplPtr new_listener = nullptr;\n\n  // In place filter chain update depends on the active listener at worker.\n  if (existing_active_listener != active_listeners_.end() &&\n      (*existing_active_listener)->supportUpdateFilterChain(config, workers_started_)) {\n    ENVOY_LOG(debug, \"use in place update filter chain update path for listener name={} hash={}\",\n              name, hash);\n    new_listener =\n        (*existing_active_listener)->newListenerWithFilterChain(config, workers_started_, hash);\n    stats_.listener_in_place_updated_.inc();\n  } else {\n    ENVOY_LOG(debug, \"use full listener update path for listener name={} hash={}\", name, hash);\n    new_listener =\n        std::make_unique<ListenerImpl>(config, version_info, *this, name, added_via_api,\n                                       workers_started_, hash, server_.options().concurrency());\n  }\n\n  ListenerImpl& new_listener_ref = *new_listener;\n\n  // We mandate that a listener with the same name must have the same configured address. This\n  // avoids confusion during updates and allows us to use the same bound address. Note that in\n  // the case of port 0 binding, the new listener will implicitly use the same bound port from\n  // the existing listener.\n  bool active_listener_exists = false;\n  bool warming_listener_exists = false;\n  if (existing_warming_listener != warming_listeners_.end() &&\n      *(*existing_warming_listener)->address() != *new_listener->address()) {\n    warming_listener_exists = true;\n  }\n  if (existing_active_listener != active_listeners_.end() &&\n      *(*existing_active_listener)->address() != *new_listener->address()) {\n    active_listener_exists = true;\n  }\n  if (active_listener_exists || warming_listener_exists) {\n    const std::string message =\n        fmt::format(\"error updating listener: '{}' has a different address '{}' from existing \"\n                    \"listener address '{}'\",\n                    name, new_listener->address()->asString(),\n                    warming_listener_exists ? (*existing_warming_listener)->address()->asString()\n                                            : (*existing_active_listener)->address()->asString());\n    ENVOY_LOG(warn, \"{}\", message);\n    throw EnvoyException(message);\n  }\n\n  bool added = false;\n  if (existing_warming_listener != warming_listeners_.end()) {\n    // In this case we can just replace inline.\n    ASSERT(workers_started_);\n    new_listener->debugLog(\"update warming listener\");\n    new_listener->setSocketFactory((*existing_warming_listener)->getSocketFactory());\n    *existing_warming_listener = std::move(new_listener);\n  } else if (existing_active_listener != active_listeners_.end()) {\n    // In this case we have no warming listener, so what we do depends on whether workers\n    // have been started or not. Either way we get the socket from the existing listener.\n    new_listener->setSocketFactory((*existing_active_listener)->getSocketFactory());\n    if (workers_started_) {\n      new_listener->debugLog(\"add warming listener\");\n      warming_listeners_.emplace_back(std::move(new_listener));\n    } else {\n      new_listener->debugLog(\"update active listener\");\n      *existing_active_listener = std::move(new_listener);\n    }\n  } else {\n    // Typically we catch address issues when we try to bind to the same address multiple times.\n    // However, for listeners that do not bind we must check to make sure we are not duplicating.\n    // This is an edge case and nothing will explicitly break, but there is no possibility that\n    // two listeners that do not bind will ever be used. Only the first one will be used when\n    // searched for by address. Thus we block it.\n    if (!new_listener->bindToPort() &&\n        (hasListenerWithAddress(warming_listeners_, *new_listener->address()) ||\n         hasListenerWithAddress(active_listeners_, *new_listener->address()))) {\n      const std::string message =\n          fmt::format(\"error adding listener: '{}' has duplicate address '{}' as existing listener\",\n                      name, new_listener->address()->asString());\n      ENVOY_LOG(warn, \"{}\", message);\n      throw EnvoyException(message);\n    }\n\n    // We have no warming or active listener so we need to make a new one. What we do depends on\n    // whether workers have been started or not. Additionally, search through draining listeners\n    // to see if there is a listener that has a socket factory for the same address we are\n    // configured for and doesn't use SO_REUSEPORT. This is an edge case, but may happen if a\n    // listener is removed and then added back with a same or different name and intended to listen\n    // on the same address. This should work and not fail.\n    Network::ListenSocketFactorySharedPtr draining_listen_socket_factory;\n    auto existing_draining_listener = std::find_if(\n        draining_listeners_.cbegin(), draining_listeners_.cend(),\n        [&new_listener](const DrainingListener& listener) {\n          return listener.listener_->listenSocketFactory().sharedSocket().has_value() &&\n                 listener.listener_->listenSocketFactory().sharedSocket()->get().isOpen() &&\n                 *new_listener->address() ==\n                     *listener.listener_->listenSocketFactory().localAddress();\n        });\n\n    if (existing_draining_listener != draining_listeners_.cend()) {\n      draining_listen_socket_factory = existing_draining_listener->listener_->getSocketFactory();\n    }\n\n    Network::Socket::Type socket_type =\n        Network::Utility::protobufAddressSocketType(config.address());\n    new_listener->setSocketFactory(\n        draining_listen_socket_factory\n            ? draining_listen_socket_factory\n            : createListenSocketFactory(config.address(), *new_listener,\n                                        (socket_type == Network::Socket::Type::Datagram) ||\n                                            config.reuse_port()));\n    if (workers_started_) {\n      new_listener->debugLog(\"add warming listener\");\n      warming_listeners_.emplace_back(std::move(new_listener));\n    } else {\n      new_listener->debugLog(\"add active listener\");\n      active_listeners_.emplace_back(std::move(new_listener));\n    }\n\n    added = true;\n  }\n\n  updateWarmingActiveGauges();\n  if (added) {\n    stats_.listener_added_.inc();\n  } else {\n    stats_.listener_modified_.inc();\n  }\n\n  new_listener_ref.initialize();\n  return true;\n}\n\nbool ListenerManagerImpl::hasListenerWithAddress(const ListenerList& list,\n                                                 const Network::Address::Instance& address) {\n  for (const auto& listener : list) {\n    if (*listener->address() == address) {\n      return true;\n    }\n  }\n  return false;\n}\n\nbool ListenerManagerImpl::shareSocketWithOtherListener(\n    const ListenerList& list, const Network::ListenSocketFactorySharedPtr& socket_factory) {\n  ASSERT(socket_factory->sharedSocket().has_value());\n  for (const auto& listener : list) {\n    if (listener->getSocketFactory() == socket_factory) {\n      return true;\n    }\n  }\n  return false;\n}\n\nvoid ListenerManagerImpl::drainListener(ListenerImplPtr&& listener) {\n  // First add the listener to the draining list.\n  std::list<DrainingListener>::iterator draining_it = draining_listeners_.emplace(\n      draining_listeners_.begin(), std::move(listener), workers_.size());\n\n  // Using set() avoids a multiple modifiers problem during the multiple processes phase of hot\n  // restart. Same below inside the lambda.\n  stats_.total_listeners_draining_.set(draining_listeners_.size());\n\n  // Tell all workers to stop accepting new connections on this listener.\n  draining_it->listener_->debugLog(\"draining listener\");\n  const uint64_t listener_tag = draining_it->listener_->listenerTag();\n  stopListener(\n      *draining_it->listener_,\n      [this,\n       share_socket = draining_it->listener_->listenSocketFactory().sharedSocket().has_value(),\n       listener_tag]() {\n        if (!share_socket) {\n          // Each listener has its individual socket and closes the socket on its own.\n          return;\n        }\n        for (auto& listener : draining_listeners_) {\n          if (listener.listener_->listenerTag() == listener_tag) {\n            // Handle the edge case when new listener is added for the same address as the drained\n            // one. In this case the socket is shared between both listeners so one should avoid\n            // closing it.\n            const auto& socket_factory = listener.listener_->getSocketFactory();\n            if (!shareSocketWithOtherListener(active_listeners_, socket_factory) &&\n                !shareSocketWithOtherListener(warming_listeners_, socket_factory)) {\n              // Close the socket iff it is not used anymore.\n              ASSERT(listener.listener_->listenSocketFactory().sharedSocket().has_value());\n              listener.listener_->listenSocketFactory().sharedSocket()->get().close();\n            }\n          }\n        }\n      });\n\n  // Start the drain sequence which completes when the listener's drain manager has completed\n  // draining at whatever the server configured drain times are.\n  draining_it->listener_->localDrainManager().startDrainSequence([this, draining_it]() -> void {\n    draining_it->listener_->debugLog(\"removing draining listener\");\n    for (const auto& worker : workers_) {\n      // Once the drain time has completed via the drain manager's timer, we tell the workers\n      // to remove the listener.\n      worker->removeListener(*draining_it->listener_, [this, draining_it]() -> void {\n        // The remove listener completion is called on the worker thread. We post back to the\n        // main thread to avoid locking. This makes sure that we don't destroy the listener\n        // while filters might still be using its context (stats, etc.).\n        server_.dispatcher().post([this, draining_it]() -> void {\n          // TODO(lambdai): Resolve race condition below.\n          // Consider the below events in global sequence order\n          // main thread: calling drainListener\n          // work thread: deferred delete the active connection\n          // work thread: post to main that the drain is done\n          // main thread: erase the listener\n          // worker thread: execute destroying connection when the shared listener config is\n          // destroyed at step 4 (could be worse such as access the connection because connection is\n          // not yet started to deleted). The race condition is introduced because 3 occurs too\n          // early. My solution is to defer schedule the callback posting to main thread, by\n          // introducing DeferTaskUtil. So that 5 should always happen before 3.\n          if (--draining_it->workers_pending_removal_ == 0) {\n            draining_it->listener_->debugLog(\"draining listener removal complete\");\n            draining_listeners_.erase(draining_it);\n            stats_.total_listeners_draining_.set(draining_listeners_.size());\n          }\n        });\n      });\n    }\n  });\n\n  updateWarmingActiveGauges();\n}\n\nListenerManagerImpl::ListenerList::iterator\nListenerManagerImpl::getListenerByName(ListenerList& listeners, const std::string& name) {\n  auto ret = listeners.end();\n  for (auto it = listeners.begin(); it != listeners.end(); ++it) {\n    if ((*it)->name() == name) {\n      // There should only ever be a single listener per name in the list. We could return faster\n      // but take the opportunity to assert that fact.\n      ASSERT(ret == listeners.end());\n      ret = it;\n    }\n  }\n  return ret;\n}\n\nstd::vector<std::reference_wrapper<Network::ListenerConfig>>\nListenerManagerImpl::listeners(ListenerState state) {\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> ret;\n\n  size_t size = 0;\n  size += state & WARMING ? warming_listeners_.size() : 0;\n  size += state & ACTIVE ? active_listeners_.size() : 0;\n  size += state & DRAINING ? draining_listeners_.size() : 0;\n  ret.reserve(size);\n\n  if (state & WARMING) {\n    for (const auto& listener : warming_listeners_) {\n      ret.push_back(*listener);\n    }\n  }\n  if (state & ACTIVE) {\n    for (const auto& listener : active_listeners_) {\n      ret.push_back(*listener);\n    }\n  }\n  if (state & DRAINING) {\n    for (const auto& draining_listener : draining_listeners_) {\n      ret.push_back(*(draining_listener.listener_));\n    }\n  }\n  return ret;\n}\n\nvoid ListenerManagerImpl::addListenerToWorker(Worker& worker,\n                                              absl::optional<uint64_t> overridden_listener,\n                                              ListenerImpl& listener,\n                                              ListenerCompletionCallback completion_callback) {\n  if (overridden_listener.has_value()) {\n    ENVOY_LOG(debug, \"replacing existing listener {}\", overridden_listener.value());\n    worker.addListener(overridden_listener, listener, [this, completion_callback](bool) -> void {\n      server_.dispatcher().post([this, completion_callback]() -> void {\n        stats_.listener_create_success_.inc();\n        if (completion_callback) {\n          completion_callback();\n        }\n      });\n    });\n    return;\n  }\n  worker.addListener(\n      overridden_listener, listener, [this, &listener, completion_callback](bool success) -> void {\n        // The add listener completion runs on the worker thread. Post back to the main thread to\n        // avoid locking.\n        server_.dispatcher().post([this, success, &listener, completion_callback]() -> void {\n          // It is possible for a listener to get added on 1 worker but not the others. The below\n          // check with onListenerCreateFailure() is there to ensure we execute the\n          // removal/logging/stats at most once on failure. Note also that drain/removal can race\n          // with addition. It's guaranteed that workers process remove after add so this should be\n          // fine.\n          //\n          // TODO(mattklein123): We should consider rewriting how listener sockets are added to\n          // workers, especially in the case of reuse port. If we were to create all needed\n          // listener sockets on the main thread (even in the case of reuse port) we could catch\n          // almost all socket errors here. This would both greatly simplify the logic and allow\n          // for xDS NACK in most cases.\n          if (!success && !listener.onListenerCreateFailure()) {\n            ENVOY_LOG(error, \"listener '{}' failed to listen on address '{}' on worker\",\n                      listener.name(), listener.listenSocketFactory().localAddress()->asString());\n            stats_.listener_create_failure_.inc();\n            removeListenerInternal(listener.name(), false);\n          }\n          if (success) {\n            stats_.listener_create_success_.inc();\n          }\n          if (completion_callback) {\n            completion_callback();\n          }\n        });\n      });\n}\n\nvoid ListenerManagerImpl::onListenerWarmed(ListenerImpl& listener) {\n  // The warmed listener should be added first so that the worker will accept new connections\n  // when it stops listening on the old listener.\n  for (const auto& worker : workers_) {\n    addListenerToWorker(*worker, absl::nullopt, listener, nullptr);\n  }\n\n  auto existing_active_listener = getListenerByName(active_listeners_, listener.name());\n  auto existing_warming_listener = getListenerByName(warming_listeners_, listener.name());\n\n  (*existing_warming_listener)->debugLog(\"warm complete. updating active listener\");\n  if (existing_active_listener != active_listeners_.end()) {\n    // Finish active_listeners_ transformation before calling `drainListener` as it depends on their\n    // state.\n    auto listener = std::move(*existing_active_listener);\n    *existing_active_listener = std::move(*existing_warming_listener);\n    drainListener(std::move(listener));\n  } else {\n    active_listeners_.emplace_back(std::move(*existing_warming_listener));\n  }\n\n  warming_listeners_.erase(existing_warming_listener);\n  updateWarmingActiveGauges();\n}\n\nvoid ListenerManagerImpl::inPlaceFilterChainUpdate(ListenerImpl& listener) {\n  auto existing_active_listener = getListenerByName(active_listeners_, listener.name());\n  auto existing_warming_listener = getListenerByName(warming_listeners_, listener.name());\n  ASSERT(existing_warming_listener != warming_listeners_.end());\n  ASSERT(*existing_warming_listener != nullptr);\n\n  (*existing_warming_listener)->debugLog(\"execute in place filter chain update\");\n\n  // Now that in place filter chain update was decided, the replaced listener must be in active\n  // list. It requires stop/remove listener procedure cancelling the in placed update if any.\n  ASSERT(existing_active_listener != active_listeners_.end());\n  ASSERT(*existing_active_listener != nullptr);\n\n  for (const auto& worker : workers_) {\n    // Explicitly override the existing listener with a new listener config.\n    addListenerToWorker(*worker, listener.listenerTag(), listener, nullptr);\n  }\n\n  auto previous_listener = std::move(*existing_active_listener);\n  *existing_active_listener = std::move(*existing_warming_listener);\n  // Finish active_listeners_ transformation before calling `drainFilterChains` as it depends on\n  // their state.\n  drainFilterChains(std::move(previous_listener), **existing_active_listener);\n\n  warming_listeners_.erase(existing_warming_listener);\n  updateWarmingActiveGauges();\n}\n\nvoid ListenerManagerImpl::drainFilterChains(ListenerImplPtr&& draining_listener,\n                                            ListenerImpl& new_listener) {\n  // First add the listener to the draining list.\n  std::list<DrainingFilterChainsManager>::iterator draining_group =\n      draining_filter_chains_manager_.emplace(draining_filter_chains_manager_.begin(),\n                                              std::move(draining_listener), workers_.size());\n  draining_group->getDrainingListener().diffFilterChain(\n      new_listener, [&draining_group](Network::DrainableFilterChain& filter_chain) mutable {\n        filter_chain.startDraining();\n        draining_group->addFilterChainToDrain(filter_chain);\n      });\n  auto filter_chain_size = draining_group->numDrainingFilterChains();\n  stats_.total_filter_chains_draining_.add(filter_chain_size);\n  draining_group->getDrainingListener().debugLog(\n      absl::StrCat(\"draining \", filter_chain_size, \" filter chains in listener \",\n                   draining_group->getDrainingListener().name()));\n\n  // Start the drain sequence which completes when the listener's drain manager has completed\n  // draining at whatever the server configured drain times are.\n  draining_group->startDrainSequence(\n      server_.options().drainTime(), server_.dispatcher(), [this, draining_group]() -> void {\n        draining_group->getDrainingListener().debugLog(\n            absl::StrCat(\"removing draining filter chains from listener \",\n                         draining_group->getDrainingListener().name()));\n        for (const auto& worker : workers_) {\n          // Once the drain time has completed via the drain manager's timer, we tell the workers\n          // to remove the filter chains.\n          worker->removeFilterChains(\n              draining_group->getDrainingListenerTag(), draining_group->getDrainingFilterChains(),\n              [this, draining_group]() -> void {\n                // The remove listener completion is called on the worker thread. We post back to\n                // the main thread to avoid locking. This makes sure that we don't destroy the\n                // listener while filters might still be using its context (stats, etc.).\n                server_.dispatcher().post([this, draining_group]() -> void {\n                  if (draining_group->decWorkersPendingRemoval() == 0) {\n                    draining_group->getDrainingListener().debugLog(\n                        absl::StrCat(\"draining filter chains from listener \",\n                                     draining_group->getDrainingListener().name(), \" complete\"));\n                    stats_.total_filter_chains_draining_.sub(\n                        draining_group->numDrainingFilterChains());\n                    draining_filter_chains_manager_.erase(draining_group);\n                  }\n                });\n              });\n        }\n      });\n  updateWarmingActiveGauges();\n}\n\nuint64_t ListenerManagerImpl::numConnections() const {\n  uint64_t num_connections = 0;\n  for (const auto& worker : workers_) {\n    num_connections += worker->numConnections();\n  }\n\n  return num_connections;\n}\n\nbool ListenerManagerImpl::removeListener(const std::string& name) {\n  return removeListenerInternal(name, true);\n}\n\nbool ListenerManagerImpl::removeListenerInternal(const std::string& name,\n                                                 bool dynamic_listeners_only) {\n  ENVOY_LOG(debug, \"begin remove listener: name={}\", name);\n\n  auto existing_active_listener = getListenerByName(active_listeners_, name);\n  auto existing_warming_listener = getListenerByName(warming_listeners_, name);\n  if ((existing_warming_listener == warming_listeners_.end() ||\n       (dynamic_listeners_only && (*existing_warming_listener)->blockRemove())) &&\n      (existing_active_listener == active_listeners_.end() ||\n       (dynamic_listeners_only && (*existing_active_listener)->blockRemove()))) {\n    ENVOY_LOG(debug, \"unknown/locked listener '{}'. no remove\", name);\n    return false;\n  }\n\n  // Destroy a warming listener directly.\n  if (existing_warming_listener != warming_listeners_.end()) {\n    (*existing_warming_listener)->debugLog(\"removing warming listener\");\n    warming_listeners_.erase(existing_warming_listener);\n  }\n\n  // If there is an active listener it needs to be moved to draining after workers have started, or\n  // destroyed directly.\n  if (existing_active_listener != active_listeners_.end()) {\n    // Listeners in active_listeners_ are added to workers after workers start, so we drain\n    // listeners only after this occurs.\n    // Finish active_listeners_ transformation before calling `drainListener` as it depends on their\n    // state.\n    auto listener = std::move(*existing_active_listener);\n    active_listeners_.erase(existing_active_listener);\n    if (workers_started_) {\n      drainListener(std::move(listener));\n    }\n  }\n\n  stats_.listener_removed_.inc();\n  updateWarmingActiveGauges();\n  return true;\n}\n\nvoid ListenerManagerImpl::startWorkers(GuardDog& guard_dog) {\n  ENVOY_LOG(info, \"all dependencies initialized. starting workers\");\n  ASSERT(!workers_started_);\n  workers_started_ = true;\n  uint32_t i = 0;\n\n  // We can not use \"Cleanup\" to simplify this logic here, because it results in a issue if Envoy is\n  // killed before workers are actually started. Specifically the AdminRequestGetStatsAndKill test\n  // case in main_common_test fails with ASAN error if we use \"Cleanup\" here.\n  const auto listeners_pending_init =\n      std::make_shared<std::atomic<uint64_t>>(workers_.size() * active_listeners_.size());\n  for (const auto& worker : workers_) {\n    ENVOY_LOG(debug, \"starting worker {}\", i);\n    ASSERT(warming_listeners_.empty());\n    for (const auto& listener : active_listeners_) {\n      addListenerToWorker(*worker, absl::nullopt, *listener, [this, listeners_pending_init]() {\n        if (--(*listeners_pending_init) == 0) {\n          stats_.workers_started_.set(1);\n        }\n      });\n    }\n    worker->start(guard_dog);\n    if (enable_dispatcher_stats_) {\n      worker->initializeStats(*scope_);\n    }\n    i++;\n  }\n  if (active_listeners_.empty()) {\n    stats_.workers_started_.set(1);\n  }\n}\n\nvoid ListenerManagerImpl::stopListener(Network::ListenerConfig& listener,\n                                       std::function<void()> callback) {\n  const auto workers_pending_stop = std::make_shared<std::atomic<uint64_t>>(workers_.size());\n  for (const auto& worker : workers_) {\n    worker->stopListener(listener, [this, callback, workers_pending_stop]() {\n      if (--(*workers_pending_stop) == 0) {\n        server_.dispatcher().post(callback);\n      }\n    });\n  }\n}\n\nvoid ListenerManagerImpl::stopListeners(StopListenersType stop_listeners_type) {\n  stop_listeners_type_ = stop_listeners_type;\n  for (Network::ListenerConfig& listener : listeners()) {\n    if (stop_listeners_type != StopListenersType::InboundOnly ||\n        listener.direction() == envoy::config::core::v3::INBOUND) {\n      ENVOY_LOG(debug, \"begin stop listener: name={}\", listener.name());\n      auto existing_warming_listener = getListenerByName(warming_listeners_, listener.name());\n      // Destroy a warming listener directly.\n      if (existing_warming_listener != warming_listeners_.end()) {\n        (*existing_warming_listener)->debugLog(\"removing warming listener\");\n        warming_listeners_.erase(existing_warming_listener);\n      }\n      // Close the socket once all workers stopped accepting its connections.\n      // This allows clients to fast fail instead of waiting in the accept queue.\n      const uint64_t listener_tag = listener.listenerTag();\n      stopListener(listener,\n                   [this, share_socket = listener.listenSocketFactory().sharedSocket().has_value(),\n                    listener_tag]() {\n                     stats_.listener_stopped_.inc();\n                     if (!share_socket) {\n                       // Each listener has its own socket and closes the socket\n                       // on its own.\n                       return;\n                     }\n                     for (auto& listener : active_listeners_) {\n                       if (listener->listenerTag() == listener_tag) {\n                         listener->listenSocketFactory().sharedSocket()->get().close();\n                       }\n                     }\n                   });\n    }\n  }\n}\n\nvoid ListenerManagerImpl::stopWorkers() {\n  if (!workers_started_) {\n    return;\n  }\n  for (const auto& worker : workers_) {\n    worker->stop();\n  }\n}\n\nvoid ListenerManagerImpl::endListenerUpdate(FailureStates&& failure_states) {\n  overall_error_state_ = std::move(failure_states);\n}\n\nListenerFilterChainFactoryBuilder::ListenerFilterChainFactoryBuilder(\n    ListenerImpl& listener,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context)\n    : ListenerFilterChainFactoryBuilder(listener.validation_visitor_, listener.parent_.factory_,\n                                        factory_context) {}\n\nListenerFilterChainFactoryBuilder::ListenerFilterChainFactoryBuilder(\n    ProtobufMessage::ValidationVisitor& validator,\n    ListenerComponentFactory& listener_component_factory,\n    Server::Configuration::TransportSocketFactoryContextImpl& factory_context)\n    : validator_(validator), listener_component_factory_(listener_component_factory),\n      factory_context_(factory_context) {}\n\nNetwork::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildFilterChain(\n    const envoy::config::listener::v3::FilterChain& filter_chain,\n    FilterChainFactoryContextCreator& context_creator) const {\n  return buildFilterChainInternal(filter_chain,\n                                  context_creator.createFilterChainFactoryContext(&filter_chain));\n}\n\nNetwork::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildFilterChainInternal(\n    const envoy::config::listener::v3::FilterChain& filter_chain,\n    Configuration::FilterChainFactoryContextPtr&& filter_chain_factory_context) const {\n  // If the cluster doesn't have transport socket configured, then use the default \"raw_buffer\"\n  // transport socket or BoringSSL-based \"tls\" transport socket if TLS settings are configured.\n  // We copy by value first then override if necessary.\n  auto transport_socket = filter_chain.transport_socket();\n  if (!filter_chain.has_transport_socket()) {\n    if (filter_chain.has_hidden_envoy_deprecated_tls_context()) {\n      transport_socket.set_name(Extensions::TransportSockets::TransportSocketNames::get().Tls);\n      transport_socket.mutable_typed_config()->PackFrom(\n          filter_chain.hidden_envoy_deprecated_tls_context());\n    } else {\n      transport_socket.set_name(\n          Extensions::TransportSockets::TransportSocketNames::get().RawBuffer);\n    }\n  }\n\n  auto& config_factory = Config::Utility::getAndCheckFactory<\n      Server::Configuration::DownstreamTransportSocketConfigFactory>(transport_socket);\n  ProtobufTypes::MessagePtr message =\n      Config::Utility::translateToFactoryConfig(transport_socket, validator_, config_factory);\n\n  std::vector<std::string> server_names(filter_chain.filter_chain_match().server_names().begin(),\n                                        filter_chain.filter_chain_match().server_names().end());\n\n  auto filter_chain_res =\n      std::make_unique<FilterChainImpl>(config_factory.createTransportSocketFactory(\n                                            *message, factory_context_, std::move(server_names)),\n                                        listener_component_factory_.createNetworkFilterFactoryList(\n                                            filter_chain.filters(), *filter_chain_factory_context));\n  filter_chain_res->setFilterChainFactoryContext(std::move(filter_chain_factory_context));\n  return filter_chain_res;\n}\n\nNetwork::ListenSocketFactorySharedPtr ListenerManagerImpl::createListenSocketFactory(\n    const envoy::config::core::v3::Address& proto_address, ListenerImpl& listener,\n    bool reuse_port) {\n  Network::Socket::Type socket_type = Network::Utility::protobufAddressSocketType(proto_address);\n  return std::make_shared<ListenSocketFactoryImpl>(\n      factory_, listener.address(), socket_type, listener.listenSocketOptions(),\n      listener.bindToPort(), listener.name(), reuse_port);\n}\n\nApiListenerOptRef ListenerManagerImpl::apiListener() {\n  return api_listener_ ? ApiListenerOptRef(std::ref(*api_listener_)) : absl::nullopt;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/listener_manager_impl.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/server/api_listener.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/server/worker.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"server/filter_chain_factory_context_callback.h\"\n#include \"server/filter_chain_manager_impl.h\"\n#include \"server/lds_api.h\"\n#include \"server/listener_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace Configuration {\nclass TransportSocketFactoryContextImpl;\n}\n\nclass ListenerFilterChainFactoryBuilder;\n\n/**\n * Prod implementation of ListenerComponentFactory that creates real sockets and attempts to fetch\n * sockets from the parent process via the hot restarter. The filter factory list is created from\n * statically registered filters.\n */\nclass ProdListenerComponentFactory : public ListenerComponentFactory,\n                                     Logger::Loggable<Logger::Id::config> {\npublic:\n  ProdListenerComponentFactory(Instance& server) : server_(server) {}\n\n  /**\n   * Static worker for createNetworkFilterFactoryList() that can be used directly in tests.\n   */\n  static std::vector<Network::FilterFactoryCb> createNetworkFilterFactoryList_(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n      Configuration::FilterChainFactoryContext& filter_chain_factory_context);\n\n  /**\n   * Static worker for createListenerFilterFactoryList() that can be used directly in tests.\n   */\n  static std::vector<Network::ListenerFilterFactoryCb> createListenerFilterFactoryList_(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context);\n\n  /**\n   * Static worker for createUdpListenerFilterFactoryList() that can be used directly in tests.\n   */\n  static std::vector<Network::UdpListenerFilterFactoryCb> createUdpListenerFilterFactoryList_(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context);\n\n  static Network::ListenerFilterMatcherSharedPtr\n  createListenerFilterMatcher(const envoy::config::listener::v3::ListenerFilter& listener_filter);\n\n  // Server::ListenerComponentFactory\n  LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config,\n                         const udpa::core::v1::ResourceLocator* lds_resources_locator) override {\n    return std::make_unique<LdsApiImpl>(\n        lds_config, lds_resources_locator, server_.clusterManager(), server_.initManager(),\n        server_.stats(), server_.listenerManager(),\n        server_.messageValidationContext().dynamicValidationVisitor());\n  }\n  std::vector<Network::FilterFactoryCb> createNetworkFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n      Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context) override {\n    return createNetworkFilterFactoryList_(filters, filter_chain_factory_context);\n  }\n  std::vector<Network::ListenerFilterFactoryCb> createListenerFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context) override {\n    return createListenerFilterFactoryList_(filters, context);\n  }\n  std::vector<Network::UdpListenerFilterFactoryCb> createUdpListenerFilterFactoryList(\n      const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>& filters,\n      Configuration::ListenerFactoryContext& context) override {\n    return createUdpListenerFilterFactoryList_(filters, context);\n  }\n\n  Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address,\n                                              Network::Socket::Type socket_type,\n                                              const Network::Socket::OptionsSharedPtr& options,\n                                              const ListenSocketCreationParams& params) override;\n\n  DrainManagerPtr\n  createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override;\n  uint64_t nextListenerTag() override { return next_listener_tag_++; }\n\nprivate:\n  Instance& server_;\n  uint64_t next_listener_tag_{1};\n};\n\nclass ListenerImpl;\nusing ListenerImplPtr = std::unique_ptr<ListenerImpl>;\n\n/**\n * All listener manager stats. @see stats_macros.h\n */\n#define ALL_LISTENER_MANAGER_STATS(COUNTER, GAUGE)                                                 \\\n  COUNTER(listener_added)                                                                          \\\n  COUNTER(listener_create_failure)                                                                 \\\n  COUNTER(listener_create_success)                                                                 \\\n  COUNTER(listener_in_place_updated)                                                               \\\n  COUNTER(listener_modified)                                                                       \\\n  COUNTER(listener_removed)                                                                        \\\n  COUNTER(listener_stopped)                                                                        \\\n  GAUGE(total_filter_chains_draining, NeverImport)                                                 \\\n  GAUGE(total_listeners_active, NeverImport)                                                       \\\n  GAUGE(total_listeners_draining, NeverImport)                                                     \\\n  GAUGE(total_listeners_warming, NeverImport)                                                      \\\n  GAUGE(workers_started, NeverImport)\n\n/**\n * Struct definition for all listener manager stats. @see stats_macros.h\n */\nstruct ListenerManagerStats {\n  ALL_LISTENER_MANAGER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)\n};\n\n/**\n * Provides the draining filter chains and the functionality to schedule listener destroy.\n */\nclass DrainingFilterChainsManager {\npublic:\n  DrainingFilterChainsManager(ListenerImplPtr&& draining_listener,\n                              uint64_t workers_pending_removal);\n  uint64_t getDrainingListenerTag() const { return draining_listener_->listenerTag(); }\n  const std::list<const Network::FilterChain*>& getDrainingFilterChains() const {\n    return draining_filter_chains_;\n  }\n  ListenerImpl& getDrainingListener() { return *draining_listener_; }\n  uint64_t decWorkersPendingRemoval() { return --workers_pending_removal_; }\n\n  // Schedule listener destroy.\n  void startDrainSequence(std::chrono::seconds drain_time, Event::Dispatcher& dispatcher,\n                          std::function<void()> completion) {\n    drain_sequence_completion_ = completion;\n    ASSERT(!drain_timer_);\n\n    drain_timer_ = dispatcher.createTimer([this]() -> void { drain_sequence_completion_(); });\n    drain_timer_->enableTimer(drain_time);\n  }\n\n  void addFilterChainToDrain(const Network::FilterChain& filter_chain) {\n    draining_filter_chains_.push_back(&filter_chain);\n  }\n\n  uint32_t numDrainingFilterChains() const { return draining_filter_chains_.size(); }\n\nprivate:\n  ListenerImplPtr draining_listener_;\n  std::list<const Network::FilterChain*> draining_filter_chains_;\n\n  uint64_t workers_pending_removal_;\n  Event::TimerPtr drain_timer_;\n  std::function<void()> drain_sequence_completion_;\n};\n\n/**\n * Implementation of ListenerManager.\n */\nclass ListenerManagerImpl : public ListenerManager, Logger::Loggable<Logger::Id::config> {\npublic:\n  ListenerManagerImpl(Instance& server, ListenerComponentFactory& listener_factory,\n                      WorkerFactory& worker_factory, bool enable_dispatcher_stats);\n\n  void onListenerWarmed(ListenerImpl& listener);\n  void inPlaceFilterChainUpdate(ListenerImpl& listener);\n\n  // Server::ListenerManager\n  bool addOrUpdateListener(const envoy::config::listener::v3::Listener& config,\n                           const std::string& version_info, bool added_via_api) override;\n  void createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config,\n                    const udpa::core::v1::ResourceLocator* lds_resources_locator) override {\n    ASSERT(lds_api_ == nullptr);\n    lds_api_ = factory_.createLdsApi(lds_config, lds_resources_locator);\n  }\n  std::vector<std::reference_wrapper<Network::ListenerConfig>>\n  listeners(ListenerState state = ListenerState::ACTIVE) override;\n  uint64_t numConnections() const override;\n  bool removeListener(const std::string& listener_name) override;\n  void startWorkers(GuardDog& guard_dog) override;\n  void stopListeners(StopListenersType stop_listeners_type) override;\n  void stopWorkers() override;\n  void beginListenerUpdate() override { error_state_tracker_.clear(); }\n  void endListenerUpdate(FailureStates&& failure_state) override;\n  bool isWorkerStarted() override { return workers_started_; }\n  Http::Context& httpContext() { return server_.httpContext(); }\n  ApiListenerOptRef apiListener() override;\n\n  Instance& server_;\n  ListenerComponentFactory& factory_;\n\nprivate:\n  using ListenerList = std::list<ListenerImplPtr>;\n  /**\n   * Callback invoked when a listener initialization is completed on worker.\n   */\n  using ListenerCompletionCallback = std::function<void()>;\n\n  bool addOrUpdateListenerInternal(const envoy::config::listener::v3::Listener& config,\n                                   const std::string& version_info, bool added_via_api,\n                                   const std::string& name);\n  bool removeListenerInternal(const std::string& listener_name, bool dynamic_listeners_only);\n\n  struct DrainingListener {\n    DrainingListener(ListenerImplPtr&& listener, uint64_t workers_pending_removal)\n        : listener_(std::move(listener)), workers_pending_removal_(workers_pending_removal) {}\n\n    ListenerImplPtr listener_;\n    uint64_t workers_pending_removal_;\n  };\n\n  void addListenerToWorker(Worker& worker, absl::optional<uint64_t> overridden_listener,\n                           ListenerImpl& listener, ListenerCompletionCallback completion_callback);\n\n  ProtobufTypes::MessagePtr dumpListenerConfigs();\n  static ListenerManagerStats generateStats(Stats::Scope& scope);\n  static bool hasListenerWithAddress(const ListenerList& list,\n                                     const Network::Address::Instance& address);\n  static bool\n  shareSocketWithOtherListener(const ListenerList& list,\n                               const Network::ListenSocketFactorySharedPtr& socket_factory);\n  void updateWarmingActiveGauges() {\n    // Using set() avoids a multiple modifiers problem during the multiple processes phase of hot\n    // restart.\n    stats_.total_listeners_warming_.set(warming_listeners_.size());\n    stats_.total_listeners_active_.set(active_listeners_.size());\n  }\n  bool listenersStopped(const envoy::config::listener::v3::Listener& config) {\n    // Currently all listeners in a given direction are stopped because of the way admin\n    // drain_listener functionality is implemented. This needs to be revisited, if that changes - if\n    // we support drain by listener name,for example.\n    return stop_listeners_type_ == StopListenersType::All ||\n           (stop_listeners_type_ == StopListenersType::InboundOnly &&\n            config.traffic_direction() == envoy::config::core::v3::INBOUND);\n  }\n\n  /**\n   * Mark a listener for draining. The listener will no longer be considered active but will remain\n   * present to allow connection draining.\n   * @param listener supplies the listener to drain.\n   */\n  void drainListener(ListenerImplPtr&& listener);\n\n  /**\n   * Start to draining filter chains that are owned by draining listener but not owned by\n   * new_listener. The new listener should have taken over the listener socket and partial of the\n   * filter chains from listener. This method is used by in place filter chain update.\n   * @param draining_listener supplies the listener to be replaced.\n   * @param new_listener supplies the new listener config which is going to replace the draining\n   * listener.\n   */\n  void drainFilterChains(ListenerImplPtr&& draining_listener, ListenerImpl& new_listener);\n\n  /**\n   * Stop a listener. The listener will stop accepting new connections and its socket will be\n   * closed.\n   * @param listener supplies the listener to stop.\n   * @param completion supplies the completion to be called when all workers are stopped accepting\n   * new connections. This completion is called on the main thread.\n   */\n  void stopListener(Network::ListenerConfig& listener, std::function<void()> completion);\n\n  /**\n   * Get a listener by name. This routine is used because listeners have inherent order in static\n   * configuration and especially for tests. Thus, we can't use a map.\n   * @param listeners supplies the listener list to look in.\n   * @param name supplies the name to search for.\n   */\n  ListenerList::iterator getListenerByName(ListenerList& listeners, const std::string& name);\n\n  Network::ListenSocketFactorySharedPtr\n  createListenSocketFactory(const envoy::config::core::v3::Address& proto_address,\n                            ListenerImpl& listener, bool reuse_port);\n\n  ApiListenerPtr api_listener_;\n  // Active listeners are listeners that are currently accepting new connections on the workers.\n  ListenerList active_listeners_;\n  // Warming listeners are listeners that may need further initialization via the listener's init\n  // manager. For example, RDS, or in the future KDS. Once a listener is done warming it will\n  // be transitioned to active.\n  ListenerList warming_listeners_;\n  // Draining listeners are listeners that are in the process of being drained and removed. They\n  // go through two phases where first the workers stop accepting new connections and existing\n  // connections are drained. Then after that time period the listener is removed from all workers\n  // and any remaining connections are closed.\n  std::list<DrainingListener> draining_listeners_;\n  std::list<DrainingFilterChainsManager> draining_filter_chains_manager_;\n\n  std::vector<WorkerPtr> workers_;\n  bool workers_started_{};\n  absl::optional<StopListenersType> stop_listeners_type_;\n  Stats::ScopePtr scope_;\n  ListenerManagerStats stats_;\n  ConfigTracker::EntryOwnerPtr config_tracker_entry_;\n  LdsApiPtr lds_api_;\n  const bool enable_dispatcher_stats_{};\n  using UpdateFailureState = envoy::admin::v3::UpdateFailureState;\n  absl::flat_hash_map<std::string, std::unique_ptr<UpdateFailureState>> error_state_tracker_;\n  FailureStates overall_error_state_;\n};\n\nclass ListenerFilterChainFactoryBuilder : public FilterChainFactoryBuilder {\npublic:\n  ListenerFilterChainFactoryBuilder(\n      ListenerImpl& listener, Configuration::TransportSocketFactoryContextImpl& factory_context);\n\n  ListenerFilterChainFactoryBuilder(\n      ProtobufMessage::ValidationVisitor& validator,\n      ListenerComponentFactory& listener_component_factory,\n      Server::Configuration::TransportSocketFactoryContextImpl& factory_context);\n\n  Network::DrainableFilterChainSharedPtr\n  buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain,\n                   FilterChainFactoryContextCreator& context_creator) const override;\n\nprivate:\n  Network::DrainableFilterChainSharedPtr buildFilterChainInternal(\n      const envoy::config::listener::v3::FilterChain& filter_chain,\n      Configuration::FilterChainFactoryContextPtr&& filter_chain_factory_context) const;\n\n  ProtobufMessage::ValidationVisitor& validator_;\n  ListenerComponentFactory& listener_component_factory_;\n  Configuration::TransportSocketFactoryContextImpl& factory_context_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/options_impl.cc",
    "content": "#include \"server/options_impl.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <iostream>\n#include <string>\n\n#include \"envoy/admin/v3/server_info.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/macros.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/version/version.h\"\n\n#include \"server/options_impl_platform.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"spdlog/spdlog.h\"\n#include \"tclap/CmdLine.h\"\n\nnamespace Envoy {\nnamespace {\nstd::vector<std::string> toArgsVector(int argc, const char* const* argv) {\n  std::vector<std::string> args;\n  args.reserve(argc);\n\n  for (int i = 0; i < argc; ++i) {\n    args.emplace_back(argv[i]);\n  }\n  return args;\n}\n} // namespace\n\nOptionsImpl::OptionsImpl(int argc, const char* const* argv,\n                         const HotRestartVersionCb& hot_restart_version_cb,\n                         spdlog::level::level_enum default_log_level)\n    : OptionsImpl(toArgsVector(argc, argv), hot_restart_version_cb, default_log_level) {}\n\nOptionsImpl::OptionsImpl(std::vector<std::string> args,\n                         const HotRestartVersionCb& hot_restart_version_cb,\n                         spdlog::level::level_enum default_log_level)\n    : signal_handling_enabled_(true) {\n  std::string log_levels_string = fmt::format(\"Log levels: {}\", allowedLogLevels());\n  log_levels_string +=\n      fmt::format(\"\\nDefault is [{}]\", spdlog::level::level_string_views[default_log_level]);\n\n  const std::string component_log_level_string =\n      \"Comma separated list of component log levels. For example upstream:debug,config:trace\";\n  const std::string log_format_string =\n      fmt::format(\"Log message format in spdlog syntax \"\n                  \"(see https://github.com/gabime/spdlog/wiki/3.-Custom-formatting)\"\n                  \"\\nDefault is \\\"{}\\\"\",\n                  Logger::Logger::DEFAULT_LOG_FORMAT);\n\n  TCLAP::CmdLine cmd(\"envoy\", ' ', VersionInfo::version());\n  TCLAP::ValueArg<uint32_t> base_id(\n      \"\", \"base-id\", \"base ID so that multiple envoys can run on the same host if needed\", false, 0,\n      \"uint32_t\", cmd);\n  TCLAP::SwitchArg use_dynamic_base_id(\n      \"\", \"use-dynamic-base-id\",\n      \"the server chooses a base ID dynamically. Supersedes a static base ID. May not be used \"\n      \"when the restart epoch is non-zero.\",\n      cmd, false);\n  TCLAP::ValueArg<std::string> base_id_path(\n      \"\", \"base-id-path\", \"path to which the base ID is written\", false, \"\", \"string\", cmd);\n  TCLAP::ValueArg<uint32_t> concurrency(\"\", \"concurrency\", \"# of worker threads to run\", false,\n                                        std::thread::hardware_concurrency(), \"uint32_t\", cmd);\n  TCLAP::ValueArg<std::string> config_path(\"c\", \"config-path\", \"Path to configuration file\", false,\n                                           \"\", \"string\", cmd);\n  TCLAP::ValueArg<std::string> config_yaml(\n      \"\", \"config-yaml\", \"Inline YAML configuration, merges with the contents of --config-path\",\n      false, \"\", \"string\", cmd);\n  TCLAP::ValueArg<uint32_t> bootstrap_version(\n      \"\", \"bootstrap-version\",\n      \"API version to parse the bootstrap config as (e.g. 3). If \"\n      \"unset, all known versions will be attempted\",\n      false, 0, \"string\", cmd);\n\n  TCLAP::SwitchArg allow_unknown_fields(\"\", \"allow-unknown-fields\",\n                                        \"allow unknown fields in static configuration (DEPRECATED)\",\n                                        cmd, false);\n  TCLAP::SwitchArg allow_unknown_static_fields(\"\", \"allow-unknown-static-fields\",\n                                               \"allow unknown fields in static configuration\", cmd,\n                                               false);\n  TCLAP::SwitchArg reject_unknown_dynamic_fields(\"\", \"reject-unknown-dynamic-fields\",\n                                                 \"reject unknown fields in dynamic configuration\",\n                                                 cmd, false);\n  TCLAP::SwitchArg ignore_unknown_dynamic_fields(\"\", \"ignore-unknown-dynamic-fields\",\n                                                 \"ignore unknown fields in dynamic configuration\",\n                                                 cmd, false);\n\n  TCLAP::ValueArg<std::string> admin_address_path(\"\", \"admin-address-path\", \"Admin address path\",\n                                                  false, \"\", \"string\", cmd);\n  TCLAP::ValueArg<std::string> local_address_ip_version(\"\", \"local-address-ip-version\",\n                                                        \"The local \"\n                                                        \"IP address version (v4 or v6).\",\n                                                        false, \"v4\", \"string\", cmd);\n  TCLAP::ValueArg<std::string> log_level(\n      \"l\", \"log-level\", log_levels_string, false,\n      spdlog::level::level_string_views[default_log_level].data(), \"string\", cmd);\n  TCLAP::ValueArg<std::string> component_log_level(\n      \"\", \"component-log-level\", component_log_level_string, false, \"\", \"string\", cmd);\n  TCLAP::ValueArg<std::string> log_format(\"\", \"log-format\", log_format_string, false,\n                                          Logger::Logger::DEFAULT_LOG_FORMAT, \"string\", cmd);\n  TCLAP::SwitchArg log_format_escaped(\"\", \"log-format-escaped\",\n                                      \"Escape c-style escape sequences in the application logs\",\n                                      cmd, false);\n  TCLAP::SwitchArg enable_fine_grain_logging(\n      \"\", \"enable-fine-grain-logging\",\n      \"Logger mode: enable file level log control(Fancy Logger)or not\", cmd, false);\n  TCLAP::ValueArg<bool> log_format_prefix_with_location(\n      \"\", \"log-format-prefix-with-location\",\n      \"Prefix all occurrences of '%v' in log format with with '[%g:%#] ' ('[path/to/file.cc:99] \"\n      \"').\",\n      false, false, \"bool\", cmd);\n  TCLAP::ValueArg<std::string> log_path(\"\", \"log-path\", \"Path to logfile\", false, \"\", \"string\",\n                                        cmd);\n  TCLAP::ValueArg<uint32_t> restart_epoch(\"\", \"restart-epoch\", \"hot restart epoch #\", false, 0,\n                                          \"uint32_t\", cmd);\n  TCLAP::SwitchArg hot_restart_version_option(\"\", \"hot-restart-version\",\n                                              \"hot restart compatibility version\", cmd);\n  TCLAP::ValueArg<std::string> service_cluster(\"\", \"service-cluster\", \"Cluster name\", false, \"\",\n                                               \"string\", cmd);\n  TCLAP::ValueArg<std::string> service_node(\"\", \"service-node\", \"Node name\", false, \"\", \"string\",\n                                            cmd);\n  TCLAP::ValueArg<std::string> service_zone(\"\", \"service-zone\", \"Zone name\", false, \"\", \"string\",\n                                            cmd);\n  TCLAP::ValueArg<uint32_t> file_flush_interval_msec(\"\", \"file-flush-interval-msec\",\n                                                     \"Interval for log flushing in msec\", false,\n                                                     10000, \"uint32_t\", cmd);\n  TCLAP::ValueArg<uint32_t> drain_time_s(\"\", \"drain-time-s\",\n                                         \"Hot restart and LDS removal drain time in seconds\", false,\n                                         600, \"uint32_t\", cmd);\n  TCLAP::ValueArg<std::string> drain_strategy(\n      \"\", \"drain-strategy\",\n      \"Hot restart drain sequence behaviour, one of 'gradual' (default) or 'immediate'.\", false,\n      \"gradual\", \"string\", cmd);\n  TCLAP::ValueArg<uint32_t> parent_shutdown_time_s(\"\", \"parent-shutdown-time-s\",\n                                                   \"Hot restart parent shutdown time in seconds\",\n                                                   false, 900, \"uint32_t\", cmd);\n  TCLAP::ValueArg<std::string> mode(\"\", \"mode\",\n                                    \"One of 'serve' (default; validate configs and then serve \"\n                                    \"traffic normally) or 'validate' (validate configs and exit).\",\n                                    false, \"serve\", \"string\", cmd);\n  TCLAP::SwitchArg disable_hot_restart(\"\", \"disable-hot-restart\",\n                                       \"Disable hot restart functionality\", cmd, false);\n  TCLAP::SwitchArg enable_mutex_tracing(\n      \"\", \"enable-mutex-tracing\", \"Enable mutex contention tracing functionality\", cmd, false);\n  TCLAP::SwitchArg cpuset_threads(\n      \"\", \"cpuset-threads\", \"Get the default # of worker threads from cpuset size\", cmd, false);\n\n  TCLAP::ValueArg<bool> use_fake_symbol_table(\"\", \"use-fake-symbol-table\",\n                                              \"Use fake symbol table implementation\", false, false,\n                                              \"bool\", cmd);\n\n  TCLAP::ValueArg<std::string> disable_extensions(\"\", \"disable-extensions\",\n                                                  \"Comma-separated list of extensions to disable\",\n                                                  false, \"\", \"string\", cmd);\n\n  TCLAP::ValueArg<std::string> socket_path(\"\", \"socket-path\", \"Path to hot restart socket file\",\n                                           false, \"@envoy_domain_socket\", \"string\", cmd);\n\n  TCLAP::ValueArg<std::string> socket_mode(\"\", \"socket-mode\", \"Socket file permission\", false,\n                                           \"600\", \"string\", cmd);\n\n  cmd.setExceptionHandling(false);\n  try {\n    cmd.parse(args);\n    count_ = cmd.getArgList().size();\n  } catch (TCLAP::ArgException& e) {\n    try {\n      cmd.getOutput()->failure(cmd, e);\n    } catch (const TCLAP::ExitException&) {\n      // failure() has already written an informative message to stderr, so all that's left to do\n      // is throw our own exception with the original message.\n      throw MalformedArgvException(e.what());\n    }\n  } catch (const TCLAP::ExitException& e) {\n    // parse() throws an ExitException with status 0 after printing the output for --help and\n    // --version.\n    throw NoServingException();\n  }\n\n  hot_restart_disabled_ = disable_hot_restart.getValue();\n  mutex_tracing_enabled_ = enable_mutex_tracing.getValue();\n  fake_symbol_table_enabled_ = use_fake_symbol_table.getValue();\n  if (fake_symbol_table_enabled_) {\n    ENVOY_LOG(warn, \"Fake symbol tables have been removed. Please remove references to \"\n                    \"--use-fake-symbol-table\");\n  }\n\n  cpuset_threads_ = cpuset_threads.getValue();\n\n  if (log_level.isSet()) {\n    log_level_ = parseAndValidateLogLevel(log_level.getValue());\n  } else {\n    log_level_ = default_log_level;\n  }\n\n  log_format_ = log_format.getValue();\n  if (log_format_prefix_with_location.getValue()) {\n    log_format_ = absl::StrReplaceAll(log_format_, {{\"%%\", \"%%\"}, {\"%v\", \"[%g:%#] %v\"}});\n  }\n  log_format_escaped_ = log_format_escaped.getValue();\n  enable_fine_grain_logging_ = enable_fine_grain_logging.getValue();\n\n  parseComponentLogLevels(component_log_level.getValue());\n\n  if (mode.getValue() == \"serve\") {\n    mode_ = Server::Mode::Serve;\n  } else if (mode.getValue() == \"validate\") {\n    mode_ = Server::Mode::Validate;\n  } else if (mode.getValue() == \"init_only\") {\n    mode_ = Server::Mode::InitOnly;\n  } else {\n    const std::string message = fmt::format(\"error: unknown mode '{}'\", mode.getValue());\n    throw MalformedArgvException(message);\n  }\n\n  if (local_address_ip_version.getValue() == \"v4\") {\n    local_address_ip_version_ = Network::Address::IpVersion::v4;\n  } else if (local_address_ip_version.getValue() == \"v6\") {\n    local_address_ip_version_ = Network::Address::IpVersion::v6;\n  } else {\n    const std::string message =\n        fmt::format(\"error: unknown IP address version '{}'\", local_address_ip_version.getValue());\n    throw MalformedArgvException(message);\n  }\n  base_id_ = base_id.getValue();\n  use_dynamic_base_id_ = use_dynamic_base_id.getValue();\n  base_id_path_ = base_id_path.getValue();\n  restart_epoch_ = restart_epoch.getValue();\n\n  if (use_dynamic_base_id_ && restart_epoch_ > 0) {\n    const std::string message = fmt::format(\n        \"error: cannot use --restart-epoch={} with --use-dynamic-base-id\", restart_epoch_);\n    throw MalformedArgvException(message);\n  }\n\n  if (!concurrency.isSet() && cpuset_threads_) {\n    // The 'concurrency' command line option wasn't set but the 'cpuset-threads'\n    // option was set. Use the number of CPUs assigned to the process cpuset, if\n    // that can be known.\n    concurrency_ = OptionsImplPlatform::getCpuCount();\n  } else {\n    if (concurrency.isSet() && cpuset_threads_ && cpuset_threads.isSet()) {\n      ENVOY_LOG(warn, \"Both --concurrency and --cpuset-threads options are set; not applying \"\n                      \"--cpuset-threads.\");\n    }\n    concurrency_ = std::max(1U, concurrency.getValue());\n  }\n\n  config_path_ = config_path.getValue();\n  config_yaml_ = config_yaml.getValue();\n  if (bootstrap_version.getValue() != 0) {\n    bootstrap_version_ = bootstrap_version.getValue();\n  }\n  if (allow_unknown_fields.getValue()) {\n    ENVOY_LOG(warn,\n              \"--allow-unknown-fields is deprecated, use --allow-unknown-static-fields instead.\");\n  }\n  allow_unknown_static_fields_ =\n      allow_unknown_static_fields.getValue() || allow_unknown_fields.getValue();\n  reject_unknown_dynamic_fields_ = reject_unknown_dynamic_fields.getValue();\n  ignore_unknown_dynamic_fields_ = ignore_unknown_dynamic_fields.getValue();\n  admin_address_path_ = admin_address_path.getValue();\n  log_path_ = log_path.getValue();\n  service_cluster_ = service_cluster.getValue();\n  service_node_ = service_node.getValue();\n  service_zone_ = service_zone.getValue();\n  file_flush_interval_msec_ = std::chrono::milliseconds(file_flush_interval_msec.getValue());\n  drain_time_ = std::chrono::seconds(drain_time_s.getValue());\n  parent_shutdown_time_ = std::chrono::seconds(parent_shutdown_time_s.getValue());\n  socket_path_ = socket_path.getValue();\n\n  if (socket_path_.at(0) == '@') {\n    socket_mode_ = 0;\n  } else {\n    uint64_t socket_mode_helper;\n    if (!StringUtil::atoull(socket_mode.getValue().c_str(), socket_mode_helper, 8)) {\n      throw MalformedArgvException(\n          fmt::format(\"error: invalid socket-mode '{}'\", socket_mode.getValue()));\n    }\n    socket_mode_ = socket_mode_helper;\n  }\n\n  if (drain_strategy.getValue() == \"immediate\") {\n    drain_strategy_ = Server::DrainStrategy::Immediate;\n  } else if (drain_strategy.getValue() == \"gradual\") {\n    drain_strategy_ = Server::DrainStrategy::Gradual;\n  } else {\n    throw MalformedArgvException(\n        fmt::format(\"error: unknown drain-strategy '{}'\", mode.getValue()));\n  }\n\n  if (hot_restart_version_option.getValue()) {\n    std::cerr << hot_restart_version_cb(!hot_restart_disabled_);\n    throw NoServingException();\n  }\n\n  if (!disable_extensions.getValue().empty()) {\n    disabled_extensions_ = absl::StrSplit(disable_extensions.getValue(), ',');\n  }\n}\n\nspdlog::level::level_enum OptionsImpl::parseAndValidateLogLevel(absl::string_view log_level) {\n  if (log_level == \"warn\") {\n    return spdlog::level::level_enum::warn;\n  }\n\n  size_t level_to_use = std::numeric_limits<size_t>::max();\n  for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) {\n    spdlog::string_view_t spd_log_level = spdlog::level::level_string_views[i];\n    if (log_level == absl::string_view(spd_log_level.data(), spd_log_level.size())) {\n      level_to_use = i;\n      break;\n    }\n  }\n\n  if (level_to_use == std::numeric_limits<size_t>::max()) {\n    logError(fmt::format(\"error: invalid log level specified '{}'\", log_level));\n  }\n  return static_cast<spdlog::level::level_enum>(level_to_use);\n}\n\nstd::string OptionsImpl::allowedLogLevels() {\n  std::string allowed_log_levels;\n  for (auto level_string_view : spdlog::level::level_string_views) {\n    if (level_string_view == spdlog::level::to_string_view(spdlog::level::warn)) {\n      allowed_log_levels += fmt::format(\"[{}|warn]\", level_string_view);\n    } else {\n      allowed_log_levels += fmt::format(\"[{}]\", level_string_view);\n    }\n  }\n  return allowed_log_levels;\n}\n\nvoid OptionsImpl::parseComponentLogLevels(const std::string& component_log_levels) {\n  if (component_log_levels.empty()) {\n    return;\n  }\n  component_log_level_str_ = component_log_levels;\n  std::vector<std::string> log_levels = absl::StrSplit(component_log_levels, ',');\n  for (auto& level : log_levels) {\n    std::vector<std::string> log_name_level = absl::StrSplit(level, ':');\n    if (log_name_level.size() != 2) {\n      logError(fmt::format(\"error: component log level not correctly specified '{}'\", level));\n    }\n    std::string log_name = log_name_level[0];\n    spdlog::level::level_enum log_level = parseAndValidateLogLevel(log_name_level[1]);\n    Logger::Logger* logger_to_change = Logger::Registry::logger(log_name);\n    if (!logger_to_change) {\n      logError(fmt::format(\"error: invalid component specified '{}'\", log_name));\n    }\n    component_log_levels_.push_back(std::make_pair(log_name, log_level));\n  }\n}\n\nuint32_t OptionsImpl::count() const { return count_; }\n\nvoid OptionsImpl::logError(const std::string& error) const { throw MalformedArgvException(error); }\n\nServer::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const {\n  Server::CommandLineOptionsPtr command_line_options =\n      std::make_unique<envoy::admin::v3::CommandLineOptions>();\n  command_line_options->set_base_id(baseId());\n  command_line_options->set_use_dynamic_base_id(useDynamicBaseId());\n  command_line_options->set_base_id_path(baseIdPath());\n  command_line_options->set_concurrency(concurrency());\n  command_line_options->set_config_path(configPath());\n  command_line_options->set_config_yaml(configYaml());\n  command_line_options->set_allow_unknown_static_fields(allow_unknown_static_fields_);\n  command_line_options->set_reject_unknown_dynamic_fields(reject_unknown_dynamic_fields_);\n  command_line_options->set_ignore_unknown_dynamic_fields(ignore_unknown_dynamic_fields_);\n  command_line_options->set_admin_address_path(adminAddressPath());\n  command_line_options->set_component_log_level(component_log_level_str_);\n  command_line_options->set_log_level(spdlog::level::to_string_view(logLevel()).data(),\n                                      spdlog::level::to_string_view(logLevel()).size());\n  command_line_options->set_log_format(logFormat());\n  command_line_options->set_log_format_escaped(logFormatEscaped());\n  command_line_options->set_enable_fine_grain_logging(enableFineGrainLogging());\n  command_line_options->set_log_path(logPath());\n  command_line_options->set_service_cluster(serviceClusterName());\n  command_line_options->set_service_node(serviceNodeName());\n  command_line_options->set_service_zone(serviceZone());\n  if (mode() == Server::Mode::Serve) {\n    command_line_options->set_mode(envoy::admin::v3::CommandLineOptions::Serve);\n  } else if (mode() == Server::Mode::Validate) {\n    command_line_options->set_mode(envoy::admin::v3::CommandLineOptions::Validate);\n  } else {\n    command_line_options->set_mode(envoy::admin::v3::CommandLineOptions::InitOnly);\n  }\n  if (localAddressIpVersion() == Network::Address::IpVersion::v4) {\n    command_line_options->set_local_address_ip_version(envoy::admin::v3::CommandLineOptions::v4);\n  } else {\n    command_line_options->set_local_address_ip_version(envoy::admin::v3::CommandLineOptions::v6);\n  }\n  command_line_options->mutable_file_flush_interval()->MergeFrom(\n      Protobuf::util::TimeUtil::MillisecondsToDuration(fileFlushIntervalMsec().count()));\n\n  command_line_options->mutable_drain_time()->MergeFrom(\n      Protobuf::util::TimeUtil::SecondsToDuration(drainTime().count()));\n  command_line_options->set_drain_strategy(drainStrategy() == Server::DrainStrategy::Immediate\n                                               ? envoy::admin::v3::CommandLineOptions::Immediate\n                                               : envoy::admin::v3::CommandLineOptions::Gradual);\n  command_line_options->mutable_parent_shutdown_time()->MergeFrom(\n      Protobuf::util::TimeUtil::SecondsToDuration(parentShutdownTime().count()));\n\n  command_line_options->set_disable_hot_restart(hotRestartDisabled());\n  command_line_options->set_enable_mutex_tracing(mutexTracingEnabled());\n  command_line_options->set_cpuset_threads(cpusetThreadsEnabled());\n  command_line_options->set_restart_epoch(restartEpoch());\n  for (const auto& e : disabledExtensions()) {\n    command_line_options->add_disabled_extensions(e);\n  }\n  command_line_options->set_socket_path(socketPath());\n  command_line_options->set_socket_mode(socketMode());\n  return command_line_options;\n}\n\nOptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_node,\n                         const std::string& service_zone, spdlog::level::level_enum log_level)\n    : base_id_(0u), use_dynamic_base_id_(false), base_id_path_(\"\"), concurrency_(1u),\n      config_path_(\"\"), config_yaml_(\"\"),\n      local_address_ip_version_(Network::Address::IpVersion::v4), log_level_(log_level),\n      log_format_(Logger::Logger::DEFAULT_LOG_FORMAT), log_format_escaped_(false),\n      restart_epoch_(0u), service_cluster_(service_cluster), service_node_(service_node),\n      service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600),\n      parent_shutdown_time_(900), drain_strategy_(Server::DrainStrategy::Gradual),\n      mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true),\n      mutex_tracing_enabled_(false), cpuset_threads_(false), fake_symbol_table_enabled_(false),\n      socket_path_(\"@envoy_domain_socket\"), socket_mode_(0) {}\n\nvoid OptionsImpl::disableExtensions(const std::vector<std::string>& names) {\n  for (const auto& name : names) {\n    const std::vector<absl::string_view> parts = absl::StrSplit(name, absl::MaxSplits('/', 1));\n\n    if (parts.size() != 2) {\n      ENVOY_LOG_MISC(warn, \"failed to disable invalid extension name '{}'\", name);\n      continue;\n    }\n\n    if (Registry::FactoryCategoryRegistry::disableFactory(parts[0], parts[1])) {\n      ENVOY_LOG_MISC(info, \"disabled extension '{}'\", name);\n    } else {\n      ENVOY_LOG_MISC(warn, \"failed to disable unknown extension '{}'\", name);\n    }\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/options_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/options.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\n/**\n * Implementation of Server::Options.\n */\nclass OptionsImpl : public Server::Options, protected Logger::Loggable<Logger::Id::config> {\npublic:\n  /**\n   * Parameters are hot_restart_enabled\n   */\n  using HotRestartVersionCb = std::function<std::string(bool)>;\n\n  /**\n   * @throw NoServingException if Envoy has already done everything specified by the args (e.g.\n   *        print the hot restart version) and it's time to exit without serving HTTP traffic. The\n   *        caller should exit(0) after any necessary cleanup.\n   * @throw MalformedArgvException if something is wrong with the arguments (invalid flag or flag\n   *        value). The caller should call exit(1) after any necessary cleanup.\n   */\n  OptionsImpl(int argc, const char* const* argv, const HotRestartVersionCb& hot_restart_version_cb,\n              spdlog::level::level_enum default_log_level);\n\n  /**\n   * @throw NoServingException if Envoy has already done everything specified by the args (e.g.\n   *        print the hot restart version) and it's time to exit without serving HTTP traffic. The\n   *        caller should exit(0) after any necessary cleanup.\n   * @throw MalformedArgvException if something is wrong with the arguments (invalid flag or flag\n   *        value). The caller should call exit(1) after any necessary cleanup.\n   */\n  OptionsImpl(std::vector<std::string> args, const HotRestartVersionCb& hot_restart_version_cb,\n              spdlog::level::level_enum default_log_level);\n\n  // Test constructor; creates \"reasonable\" defaults, but desired values should be set explicitly.\n  OptionsImpl(const std::string& service_cluster, const std::string& service_node,\n              const std::string& service_zone, spdlog::level::level_enum log_level);\n\n  // Setters for option fields. These are not part of the Options interface.\n  void setBaseId(uint64_t base_id) { base_id_ = base_id; };\n  void setUseDynamicBaseId(bool use_dynamic_base_id) { use_dynamic_base_id_ = use_dynamic_base_id; }\n  void setBaseIdPath(const std::string& base_id_path) { base_id_path_ = base_id_path; }\n  void setConcurrency(uint32_t concurrency) { concurrency_ = concurrency; }\n  void setConfigPath(const std::string& config_path) { config_path_ = config_path; }\n  void setConfigProto(const envoy::config::bootstrap::v3::Bootstrap& config_proto) {\n    config_proto_ = config_proto;\n  }\n  void setConfigYaml(const std::string& config_yaml) { config_yaml_ = config_yaml; }\n  void setBootstrapVersion(uint32_t bootstrap_version) { bootstrap_version_ = bootstrap_version; }\n  void setAdminAddressPath(const std::string& admin_address_path) {\n    admin_address_path_ = admin_address_path;\n  }\n  void setLocalAddressIpVersion(Network::Address::IpVersion local_address_ip_version) {\n    local_address_ip_version_ = local_address_ip_version;\n  }\n  void setDrainTime(std::chrono::seconds drain_time) { drain_time_ = drain_time; }\n  void setParentShutdownTime(std::chrono::seconds parent_shutdown_time) {\n    parent_shutdown_time_ = parent_shutdown_time;\n  }\n  void setDrainStrategy(Server::DrainStrategy drain_strategy) { drain_strategy_ = drain_strategy; }\n  void setLogLevel(spdlog::level::level_enum log_level) { log_level_ = log_level; }\n  void setLogFormat(const std::string& log_format) { log_format_ = log_format; }\n  void setLogPath(const std::string& log_path) { log_path_ = log_path; }\n  void setRestartEpoch(uint64_t restart_epoch) { restart_epoch_ = restart_epoch; }\n  void setMode(Server::Mode mode) { mode_ = mode; }\n  void setFileFlushIntervalMsec(std::chrono::milliseconds file_flush_interval_msec) {\n    file_flush_interval_msec_ = file_flush_interval_msec;\n  }\n  void setServiceClusterName(const std::string& service_cluster) {\n    service_cluster_ = service_cluster;\n  }\n  void setServiceNodeName(const std::string& service_node) { service_node_ = service_node; }\n  void setServiceZone(const std::string& service_zone) { service_zone_ = service_zone; }\n  void setHotRestartDisabled(bool hot_restart_disabled) {\n    hot_restart_disabled_ = hot_restart_disabled;\n  }\n  void setSignalHandling(bool signal_handling_enabled) {\n    signal_handling_enabled_ = signal_handling_enabled;\n  }\n  void setCpusetThreads(bool cpuset_threads_enabled) { cpuset_threads_ = cpuset_threads_enabled; }\n  void setAllowUnkownFields(bool allow_unknown_static_fields) {\n    allow_unknown_static_fields_ = allow_unknown_static_fields;\n  }\n  void setRejectUnknownFieldsDynamic(bool reject_unknown_dynamic_fields) {\n    reject_unknown_dynamic_fields_ = reject_unknown_dynamic_fields;\n  }\n  void setIgnoreUnknownFieldsDynamic(bool ignore_unknown_dynamic_fields) {\n    ignore_unknown_dynamic_fields_ = ignore_unknown_dynamic_fields;\n  }\n\n  void setFakeSymbolTableEnabled(bool fake_symbol_table_enabled) {\n    fake_symbol_table_enabled_ = fake_symbol_table_enabled;\n  }\n\n  void setSocketPath(const std::string& socket_path) { socket_path_ = socket_path; }\n\n  void setSocketMode(mode_t socket_mode) { socket_mode_ = socket_mode; }\n\n  // Server::Options\n  uint64_t baseId() const override { return base_id_; }\n  bool useDynamicBaseId() const override { return use_dynamic_base_id_; }\n  const std::string& baseIdPath() const override { return base_id_path_; }\n  uint32_t concurrency() const override { return concurrency_; }\n  const std::string& configPath() const override { return config_path_; }\n  const envoy::config::bootstrap::v3::Bootstrap& configProto() const override {\n    return config_proto_;\n  }\n  const absl::optional<uint32_t>& bootstrapVersion() const override { return bootstrap_version_; }\n  const std::string& configYaml() const override { return config_yaml_; }\n  bool allowUnknownStaticFields() const override { return allow_unknown_static_fields_; }\n  bool rejectUnknownDynamicFields() const override { return reject_unknown_dynamic_fields_; }\n  bool ignoreUnknownDynamicFields() const override { return ignore_unknown_dynamic_fields_; }\n  const std::string& adminAddressPath() const override { return admin_address_path_; }\n  Network::Address::IpVersion localAddressIpVersion() const override {\n    return local_address_ip_version_;\n  }\n  std::chrono::seconds drainTime() const override { return drain_time_; }\n  std::chrono::seconds parentShutdownTime() const override { return parent_shutdown_time_; }\n  Server::DrainStrategy drainStrategy() const override { return drain_strategy_; }\n\n  spdlog::level::level_enum logLevel() const override { return log_level_; }\n  const std::vector<std::pair<std::string, spdlog::level::level_enum>>&\n  componentLogLevels() const override {\n    return component_log_levels_;\n  }\n  const std::string& logFormat() const override { return log_format_; }\n  bool logFormatEscaped() const override { return log_format_escaped_; }\n  bool enableFineGrainLogging() const override { return enable_fine_grain_logging_; }\n  const std::string& logPath() const override { return log_path_; }\n  uint64_t restartEpoch() const override { return restart_epoch_; }\n  Server::Mode mode() const override { return mode_; }\n  std::chrono::milliseconds fileFlushIntervalMsec() const override {\n    return file_flush_interval_msec_;\n  }\n  const std::string& serviceClusterName() const override { return service_cluster_; }\n  const std::string& serviceNodeName() const override { return service_node_; }\n  const std::string& serviceZone() const override { return service_zone_; }\n  bool hotRestartDisabled() const override { return hot_restart_disabled_; }\n  bool signalHandlingEnabled() const override { return signal_handling_enabled_; }\n  bool mutexTracingEnabled() const override { return mutex_tracing_enabled_; }\n  bool fakeSymbolTableEnabled() const override { return fake_symbol_table_enabled_; }\n  Server::CommandLineOptionsPtr toCommandLineOptions() const override;\n  void parseComponentLogLevels(const std::string& component_log_levels);\n  bool cpusetThreadsEnabled() const override { return cpuset_threads_; }\n  const std::vector<std::string>& disabledExtensions() const override {\n    return disabled_extensions_;\n  }\n  uint32_t count() const;\n  const std::string& socketPath() const override { return socket_path_; }\n  mode_t socketMode() const override { return socket_mode_; }\n\n  /**\n   * disableExtensions parses the given set of extension names of\n   * the form $CATEGORY/$NAME, and disables the corresponding extension\n   * factories.\n   */\n  static void disableExtensions(const std::vector<std::string>&);\n  static std::string allowedLogLevels();\n\nprivate:\n  void logError(const std::string& error) const;\n  spdlog::level::level_enum parseAndValidateLogLevel(absl::string_view log_level);\n\n  uint64_t base_id_;\n  bool use_dynamic_base_id_;\n  std::string base_id_path_;\n  uint32_t concurrency_;\n  std::string config_path_;\n  envoy::config::bootstrap::v3::Bootstrap config_proto_;\n  absl::optional<uint32_t> bootstrap_version_;\n  std::string config_yaml_;\n  bool allow_unknown_static_fields_{false};\n  bool reject_unknown_dynamic_fields_{false};\n  bool ignore_unknown_dynamic_fields_{false};\n  std::string admin_address_path_;\n  Network::Address::IpVersion local_address_ip_version_;\n  spdlog::level::level_enum log_level_;\n  std::vector<std::pair<std::string, spdlog::level::level_enum>> component_log_levels_;\n  std::string component_log_level_str_;\n  std::string log_format_;\n  bool log_format_escaped_;\n  std::string log_path_;\n  uint64_t restart_epoch_;\n  std::string service_cluster_;\n  std::string service_node_;\n  std::string service_zone_;\n  std::chrono::milliseconds file_flush_interval_msec_;\n  std::chrono::seconds drain_time_;\n  std::chrono::seconds parent_shutdown_time_;\n  Server::DrainStrategy drain_strategy_;\n  Server::Mode mode_;\n  bool hot_restart_disabled_;\n  bool signal_handling_enabled_;\n  bool mutex_tracing_enabled_;\n  bool cpuset_threads_;\n  bool fake_symbol_table_enabled_;\n  std::vector<std::string> disabled_extensions_;\n  uint32_t count_;\n\n  // Initialization added here to avoid integration_admin_test failure caused by uninitialized\n  // enable_fine_grain_logging_.\n  bool enable_fine_grain_logging_ = false;\n  std::string socket_path_;\n  mode_t socket_mode_;\n};\n\n/**\n * Thrown when an OptionsImpl was not constructed because all of Envoy's work is done (for example,\n * it was started with --help and it's already printed a help message) so all that's left to do is\n * exit successfully.\n */\nclass NoServingException : public EnvoyException {\npublic:\n  NoServingException() : EnvoyException(\"NoServingException\") {}\n};\n\n/**\n * Thrown when an OptionsImpl was not constructed because the argv was invalid.\n */\nclass MalformedArgvException : public EnvoyException {\npublic:\n  MalformedArgvException(const std::string& what) : EnvoyException(what) {}\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/options_impl_platform.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\nclass OptionsImplPlatform : protected Logger::Loggable<Logger::Id::config> {\npublic:\n  static uint32_t getCpuCount();\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/options_impl_platform_default.cc",
    "content": "#include <thread>\n\n#include \"common/common/logger.h\"\n\n#include \"server/options_impl_platform.h\"\n\nnamespace Envoy {\n\nuint32_t OptionsImplPlatform::getCpuCount() {\n  ENVOY_LOG(warn, \"CPU number provided by HW thread count (instead of cpuset).\");\n  return std::thread::hardware_concurrency();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/options_impl_platform_linux.cc",
    "content": "#if !defined(__linux__)\n#error \"Linux platform file is part of non-Linux build.\"\n#endif\n\n#include \"server/options_impl_platform_linux.h\"\n\n#include <sched.h>\n\n#include <thread>\n\n#include \"common/api/os_sys_calls_impl_linux.h\"\n\n#include \"server/options_impl_platform.h\"\n\nnamespace Envoy {\n\nuint32_t OptionsImplPlatformLinux::getCpuAffinityCount(unsigned int hw_threads) {\n  unsigned int threads = 0;\n  pid_t pid = getpid();\n  cpu_set_t mask;\n  auto& linux_os_syscalls = Api::LinuxOsSysCallsSingleton::get();\n\n  CPU_ZERO(&mask);\n  const Api::SysCallIntResult result =\n      linux_os_syscalls.sched_getaffinity(pid, sizeof(cpu_set_t), &mask);\n  if (result.rc_ == -1) {\n    // Fall back to number of hardware threads.\n    return hw_threads;\n  }\n\n  threads = CPU_COUNT(&mask);\n\n  // Sanity check.\n  if (threads > 0 && threads <= hw_threads) {\n    return threads;\n  }\n\n  return hw_threads;\n}\n\nuint32_t OptionsImplPlatform::getCpuCount() {\n  unsigned int hw_threads = std::max(1U, std::thread::hardware_concurrency());\n  return OptionsImplPlatformLinux::getCpuAffinityCount(hw_threads);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/options_impl_platform_linux.h",
    "content": "#pragma once\n\n#if !defined(__linux__)\n#error \"Linux platform file is part of non-Linux build.\"\n#endif\n\n#include <cstdint>\n#include <string>\n\nnamespace Envoy {\nclass OptionsImplPlatformLinux {\npublic:\n  static uint32_t getCpuAffinityCount(unsigned int hw_threads);\n};\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/overload_manager_impl.cc",
    "content": "#include \"server/overload_manager_impl.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/overload/v3/overload.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"server/resource_monitor_config_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\n\nclass ThresholdTriggerImpl final : public OverloadAction::Trigger {\npublic:\n  ThresholdTriggerImpl(const envoy::config::overload::v3::ThresholdTrigger& config)\n      : threshold_(config.value()), state_(OverloadActionState::inactive()) {}\n\n  bool updateValue(double value) override {\n    const OverloadActionState state = actionState();\n    state_ =\n        value >= threshold_ ? OverloadActionState::saturated() : OverloadActionState::inactive();\n    return state.value() != actionState().value();\n  }\n\n  OverloadActionState actionState() const override { return state_; }\n\nprivate:\n  const double threshold_;\n  OverloadActionState state_;\n};\n\nclass ScaledTriggerImpl final : public OverloadAction::Trigger {\npublic:\n  ScaledTriggerImpl(const envoy::config::overload::v3::ScaledTrigger& config)\n      : scaling_threshold_(config.scaling_threshold()),\n        saturated_threshold_(config.saturation_threshold()),\n        state_(OverloadActionState::inactive()) {\n    if (scaling_threshold_ >= saturated_threshold_) {\n      throw EnvoyException(\"scaling_threshold must be less than saturation_threshold\");\n    }\n  }\n\n  bool updateValue(double value) override {\n    const OverloadActionState old_state = actionState();\n    if (value <= scaling_threshold_) {\n      state_ = OverloadActionState::inactive();\n    } else if (value >= saturated_threshold_) {\n      state_ = OverloadActionState::saturated();\n    } else {\n      state_ = OverloadActionState((value - scaling_threshold_) /\n                                   (saturated_threshold_ - scaling_threshold_));\n    }\n    return state_.value() != old_state.value();\n  }\n\n  OverloadActionState actionState() const override { return state_; }\n\nprivate:\n  const double scaling_threshold_;\n  const double saturated_threshold_;\n  OverloadActionState state_;\n};\n\n/**\n * Thread-local copy of the state of each configured overload action.\n */\nclass ThreadLocalOverloadStateImpl : public ThreadLocalOverloadState {\npublic:\n  const OverloadActionState& getState(const std::string& action) override {\n    auto it = actions_.find(action);\n    if (it == actions_.end()) {\n      it = actions_.insert(std::make_pair(action, OverloadActionState::inactive())).first;\n    }\n    return it->second;\n  }\n\n  void setState(const std::string& action, OverloadActionState state) {\n    actions_.insert_or_assign(action, state);\n  }\n\nprivate:\n  absl::node_hash_map<std::string, OverloadActionState> actions_;\n};\n\nStats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) {\n  Stats::StatNameManagedStorage stat_name(absl::StrCat(\"overload.\", a, \".\", b),\n                                          scope.symbolTable());\n  return scope.counterFromStatName(stat_name.statName());\n}\n\nStats::Gauge& makeGauge(Stats::Scope& scope, absl::string_view a, absl::string_view b,\n                        Stats::Gauge::ImportMode import_mode) {\n  Stats::StatNameManagedStorage stat_name(absl::StrCat(\"overload.\", a, \".\", b),\n                                          scope.symbolTable());\n  return scope.gaugeFromStatName(stat_name.statName(), import_mode);\n}\n\n} // namespace\n\nOverloadAction::OverloadAction(const envoy::config::overload::v3::OverloadAction& config,\n                               Stats::Scope& stats_scope)\n    : state_(OverloadActionState::inactive()),\n      active_gauge_(\n          makeGauge(stats_scope, config.name(), \"active\", Stats::Gauge::ImportMode::Accumulate)),\n      scale_percent_gauge_(makeGauge(stats_scope, config.name(), \"scale_percent\",\n                                     Stats::Gauge::ImportMode::Accumulate)) {\n  for (const auto& trigger_config : config.triggers()) {\n    TriggerPtr trigger;\n\n    switch (trigger_config.trigger_oneof_case()) {\n    case envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold:\n      trigger = std::make_unique<ThresholdTriggerImpl>(trigger_config.threshold());\n      break;\n    case envoy::config::overload::v3::Trigger::TriggerOneofCase::kScaled:\n      trigger = std::make_unique<ScaledTriggerImpl>(trigger_config.scaled());\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n\n    if (!triggers_.try_emplace(trigger_config.name(), std::move(trigger)).second) {\n      throw EnvoyException(\n          absl::StrCat(\"Duplicate trigger resource for overload action \", config.name()));\n    }\n  }\n\n  active_gauge_.set(0);\n  scale_percent_gauge_.set(0);\n}\n\nbool OverloadAction::updateResourcePressure(const std::string& name, double pressure) {\n  const OverloadActionState old_state = getState();\n\n  auto it = triggers_.find(name);\n  ASSERT(it != triggers_.end());\n  if (!it->second->updateValue(pressure)) {\n    return false;\n  }\n  const auto trigger_new_state = it->second->actionState();\n  active_gauge_.set(trigger_new_state.isSaturated() ? 1 : 0);\n  scale_percent_gauge_.set(trigger_new_state.value() * 100);\n\n  {\n    // Compute the new state as the maximum over all trigger states.\n    OverloadActionState new_state = OverloadActionState::inactive();\n    for (auto& trigger : triggers_) {\n      const auto trigger_state = trigger.second->actionState();\n      if (trigger_state.value() > new_state.value()) {\n        new_state = trigger_state;\n      }\n    }\n    state_ = new_state;\n  }\n\n  return state_.value() != old_state.value();\n}\n\nOverloadActionState OverloadAction::getState() const { return state_; }\n\nOverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::Scope& stats_scope,\n                                         ThreadLocal::SlotAllocator& slot_allocator,\n                                         const envoy::config::overload::v3::OverloadManager& config,\n                                         ProtobufMessage::ValidationVisitor& validation_visitor,\n                                         Api::Api& api)\n    : started_(false), dispatcher_(dispatcher), tls_(slot_allocator.allocateSlot()),\n      refresh_interval_(\n          std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, refresh_interval, 1000))) {\n  Configuration::ResourceMonitorFactoryContextImpl context(dispatcher, api, validation_visitor);\n  for (const auto& resource : config.resource_monitors()) {\n    const auto& name = resource.name();\n    ENVOY_LOG(debug, \"Adding resource monitor for {}\", name);\n    auto& factory =\n        Config::Utility::getAndCheckFactory<Configuration::ResourceMonitorFactory>(resource);\n    auto config = Config::Utility::translateToFactoryConfig(resource, validation_visitor, factory);\n    auto monitor = factory.createResourceMonitor(*config, context);\n\n    auto result = resources_.try_emplace(name, name, std::move(monitor), *this, stats_scope);\n    if (!result.second) {\n      throw EnvoyException(absl::StrCat(\"Duplicate resource monitor \", name));\n    }\n  }\n\n  for (const auto& action : config.actions()) {\n    const auto& name = action.name();\n    ENVOY_LOG(debug, \"Adding overload action {}\", name);\n    // TODO: use in place construction once https://github.com/abseil/abseil-cpp/issues/388 is\n    // addressed\n    // We cannot currently use in place construction as the OverloadAction constructor may throw,\n    // causing an inconsistent internal state of the actions_ map, which on destruction results in\n    // an invalid free.\n    auto result = actions_.try_emplace(name, OverloadAction(action, stats_scope));\n    if (!result.second) {\n      throw EnvoyException(absl::StrCat(\"Duplicate overload action \", name));\n    }\n\n    for (const auto& trigger : action.triggers()) {\n      const std::string& resource = trigger.name();\n\n      if (resources_.find(resource) == resources_.end()) {\n        throw EnvoyException(\n            fmt::format(\"Unknown trigger resource {} for overload action {}\", resource, name));\n      }\n\n      resource_to_actions_.insert(std::make_pair(resource, name));\n    }\n  }\n}\n\nvoid OverloadManagerImpl::start() {\n  ASSERT(!started_);\n  started_ = true;\n\n  tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    return std::make_shared<ThreadLocalOverloadStateImpl>();\n  });\n\n  if (resources_.empty()) {\n    return;\n  }\n\n  timer_ = dispatcher_.createTimer([this]() -> void {\n    // Guarantee that all resource updates get flushed after no more than one refresh_interval_.\n    flushResourceUpdates();\n\n    // Start a new flush epoch. If all resource updates complete before this callback runs, the last\n    // resource update will call flushResourceUpdates to flush the whole batch early.\n    ++flush_epoch_;\n    flush_awaiting_updates_ = resources_.size();\n\n    for (auto& resource : resources_) {\n      resource.second.update(flush_epoch_);\n    }\n\n    timer_->enableTimer(refresh_interval_);\n  });\n  timer_->enableTimer(refresh_interval_);\n}\n\nvoid OverloadManagerImpl::stop() {\n  // Disable any pending timeouts.\n  if (timer_) {\n    timer_->disableTimer();\n  }\n\n  // Clear the resource map to block on any pending updates.\n  resources_.clear();\n}\n\nbool OverloadManagerImpl::registerForAction(const std::string& action,\n                                            Event::Dispatcher& dispatcher,\n                                            OverloadActionCb callback) {\n  ASSERT(!started_);\n\n  if (actions_.find(action) == actions_.end()) {\n    ENVOY_LOG(debug, \"No overload action is configured for {}.\", action);\n    return false;\n  }\n\n  action_to_callbacks_.emplace(std::piecewise_construct, std::forward_as_tuple(action),\n                               std::forward_as_tuple(dispatcher, callback));\n  return true;\n}\n\nThreadLocalOverloadState& OverloadManagerImpl::getThreadLocalOverloadState() {\n  return tls_->getTyped<ThreadLocalOverloadStateImpl>();\n}\n\nvoid OverloadManagerImpl::updateResourcePressure(const std::string& resource, double pressure,\n                                                 FlushEpochId flush_epoch) {\n  auto [start, end] = resource_to_actions_.equal_range(resource);\n\n  std::for_each(start, end, [&](ResourceToActionMap::value_type& entry) {\n    const std::string& action = entry.second;\n    auto action_it = actions_.find(action);\n    ASSERT(action_it != actions_.end());\n    const OverloadActionState old_state = action_it->second.getState();\n    if (action_it->second.updateResourcePressure(resource, pressure)) {\n      const auto state = action_it->second.getState();\n\n      if (old_state.isSaturated() != state.isSaturated()) {\n        ENVOY_LOG(debug, \"Overload action {} became {}\", action,\n                  (state.isSaturated() ? \"saturated\" : \"scaling\"));\n      }\n\n      // Record the updated value to be sent to workers on the next thread-local-state flush, along\n      // with any update callbacks. This might overwrite a previous action state change caused by a\n      // pressure update for a different resource that hasn't been flushed yet. That's okay because\n      // the state recorded here includes the information from all previous resource updates. So\n      // even if resource 1 causes an action to have value A, and a later update to resource 2\n      // causes the action to have value B, B would have been the result for whichever order the\n      // updates to resources 1 and 2 came in.\n      state_updates_to_flush_.insert_or_assign(action, state);\n      auto [callbacks_start, callbacks_end] = action_to_callbacks_.equal_range(action);\n      std::for_each(callbacks_start, callbacks_end, [&](ActionToCallbackMap::value_type& cb_entry) {\n        callbacks_to_flush_.insert_or_assign(&cb_entry.second, state);\n      });\n    }\n  });\n\n  // Eagerly flush updates if this is the last call to updateResourcePressure expected for the\n  // current epoch. This assert is always valid because flush_awaiting_updates_ is initialized\n  // before each batch of updates, and even if a resource monitor performs a double update, or a\n  // previous update callback is late, the logic in OverloadManager::Resource::update() will prevent\n  // unexpected calls to this function.\n  ASSERT(flush_awaiting_updates_ > 0);\n  --flush_awaiting_updates_;\n  if (flush_epoch == flush_epoch_ && flush_awaiting_updates_ == 0) {\n    flushResourceUpdates();\n  }\n}\n\nvoid OverloadManagerImpl::flushResourceUpdates() {\n  if (!state_updates_to_flush_.empty()) {\n    auto shared_updates = std::make_shared<absl::flat_hash_map<std::string, OverloadActionState>>();\n    std::swap(*shared_updates, state_updates_to_flush_);\n\n    tls_->runOnAllThreads(\n        [updates = std::move(shared_updates)](ThreadLocal::ThreadLocalObjectSharedPtr object)\n            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n          for (const auto& [action, state] : *updates) {\n            object->asType<ThreadLocalOverloadStateImpl>().setState(action, state);\n          }\n          return object;\n        });\n  }\n\n  for (const auto& [cb, state] : callbacks_to_flush_) {\n    cb->dispatcher_.post([cb = cb, state = state]() { cb->callback_(state); });\n  }\n  callbacks_to_flush_.clear();\n}\n\nOverloadManagerImpl::Resource::Resource(const std::string& name, ResourceMonitorPtr monitor,\n                                        OverloadManagerImpl& manager, Stats::Scope& stats_scope)\n    : name_(name), monitor_(std::move(monitor)), manager_(manager), pending_update_(false),\n      pressure_gauge_(\n          makeGauge(stats_scope, name, \"pressure\", Stats::Gauge::ImportMode::NeverImport)),\n      failed_updates_counter_(makeCounter(stats_scope, name, \"failed_updates\")),\n      skipped_updates_counter_(makeCounter(stats_scope, name, \"skipped_updates\")) {}\n\nvoid OverloadManagerImpl::Resource::update(FlushEpochId flush_epoch) {\n  if (!pending_update_) {\n    pending_update_ = true;\n    flush_epoch_ = flush_epoch;\n    monitor_->updateResourceUsage(*this);\n    return;\n  }\n  ENVOY_LOG(debug, \"Skipping update for resource {} which has pending update\", name_);\n  skipped_updates_counter_.inc();\n}\n\nvoid OverloadManagerImpl::Resource::onSuccess(const ResourceUsage& usage) {\n  pending_update_ = false;\n  manager_.updateResourcePressure(name_, usage.resource_pressure_, flush_epoch_);\n  pressure_gauge_.set(usage.resource_pressure_ * 100); // convert to percent\n}\n\nvoid OverloadManagerImpl::Resource::onFailure(const EnvoyException& error) {\n  pending_update_ = false;\n  ENVOY_LOG(info, \"Failed to update resource {}: {}\", name_, error.what());\n  failed_updates_counter_.inc();\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/overload_manager_impl.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/overload/v3/overload.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/server/overload_manager.h\"\n#include \"envoy/server/resource_monitor.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/container/node_hash_set.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass OverloadAction {\npublic:\n  OverloadAction(const envoy::config::overload::v3::OverloadAction& config,\n                 Stats::Scope& stats_scope);\n\n  // Updates the current pressure for the given resource and returns whether the action\n  // has changed state.\n  bool updateResourcePressure(const std::string& name, double pressure);\n\n  // Returns the current action state, which is the max state across all registered triggers.\n  OverloadActionState getState() const;\n\n  class Trigger {\n  public:\n    virtual ~Trigger() = default;\n\n    // Updates the current value of the metric and returns whether the trigger has changed state.\n    virtual bool updateValue(double value) PURE;\n\n    // Returns the action state for the trigger.\n    virtual OverloadActionState actionState() const PURE;\n  };\n  using TriggerPtr = std::unique_ptr<Trigger>;\n\nprivate:\n  absl::node_hash_map<std::string, TriggerPtr> triggers_;\n  OverloadActionState state_;\n  Stats::Gauge& active_gauge_;\n  Stats::Gauge& scale_percent_gauge_;\n};\n\nclass OverloadManagerImpl : Logger::Loggable<Logger::Id::main>, public OverloadManager {\npublic:\n  OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::Scope& stats_scope,\n                      ThreadLocal::SlotAllocator& slot_allocator,\n                      const envoy::config::overload::v3::OverloadManager& config,\n                      ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api);\n\n  // Server::OverloadManager\n  void start() override;\n  bool registerForAction(const std::string& action, Event::Dispatcher& dispatcher,\n                         OverloadActionCb callback) override;\n  ThreadLocalOverloadState& getThreadLocalOverloadState() override;\n\n  // Stop the overload manager timer and wait for any pending resource updates to complete.\n  // After this returns, overload manager clients should not receive any more callbacks\n  // about overload state changes.\n  void stop();\n\nprivate:\n  using FlushEpochId = uint64_t;\n  class Resource : public ResourceMonitor::Callbacks {\n  public:\n    Resource(const std::string& name, ResourceMonitorPtr monitor, OverloadManagerImpl& manager,\n             Stats::Scope& stats_scope);\n\n    // ResourceMonitor::Callbacks\n    void onSuccess(const ResourceUsage& usage) override;\n    void onFailure(const EnvoyException& error) override;\n\n    void update(FlushEpochId flush_epoch);\n\n  private:\n    const std::string name_;\n    ResourceMonitorPtr monitor_;\n    OverloadManagerImpl& manager_;\n    bool pending_update_;\n    FlushEpochId flush_epoch_;\n    Stats::Gauge& pressure_gauge_;\n    Stats::Counter& failed_updates_counter_;\n    Stats::Counter& skipped_updates_counter_;\n  };\n\n  struct ActionCallback {\n    ActionCallback(Event::Dispatcher& dispatcher, OverloadActionCb callback)\n        : dispatcher_(dispatcher), callback_(callback) {}\n    Event::Dispatcher& dispatcher_;\n    OverloadActionCb callback_;\n  };\n\n  void updateResourcePressure(const std::string& resource, double pressure,\n                              FlushEpochId flush_epoch);\n  // Flushes any enqueued action state updates to all worker threads.\n  void flushResourceUpdates();\n\n  bool started_;\n  Event::Dispatcher& dispatcher_;\n  ThreadLocal::SlotPtr tls_;\n  const std::chrono::milliseconds refresh_interval_;\n  Event::TimerPtr timer_;\n  absl::node_hash_map<std::string, Resource> resources_;\n  absl::node_hash_map<std::string, OverloadAction> actions_;\n\n  absl::flat_hash_map<std::string, OverloadActionState> state_updates_to_flush_;\n  absl::flat_hash_map<ActionCallback*, OverloadActionState> callbacks_to_flush_;\n  FlushEpochId flush_epoch_ = 0;\n  uint64_t flush_awaiting_updates_ = 0;\n\n  using ResourceToActionMap = std::unordered_multimap<std::string, std::string>;\n  ResourceToActionMap resource_to_actions_;\n\n  using ActionToCallbackMap = std::unordered_multimap<std::string, ActionCallback>;\n  ActionToCallbackMap action_to_callbacks_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/process_context_impl.h",
    "content": "#pragma once\n\n#include \"envoy/server/process_context.h\"\n\nnamespace Envoy {\n\nclass ProcessContextImpl : public ProcessContext {\npublic:\n  ProcessContextImpl(ProcessObject& process_object) : process_object_(process_object) {}\n  // ProcessContext\n  ProcessObject& get() const override { return process_object_; }\n\nprivate:\n  ProcessObject& process_object_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/proto_descriptors.cc",
    "content": "#include \"server/proto_descriptors.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nvoid validateProtoDescriptors() {\n  const auto methods = {\n      \"envoy.api.v2.ClusterDiscoveryService.FetchClusters\",\n      \"envoy.api.v2.ClusterDiscoveryService.StreamClusters\",\n      \"envoy.api.v2.ClusterDiscoveryService.DeltaClusters\",\n      \"envoy.api.v2.EndpointDiscoveryService.FetchEndpoints\",\n      \"envoy.api.v2.EndpointDiscoveryService.StreamEndpoints\",\n      \"envoy.api.v2.EndpointDiscoveryService.DeltaEndpoints\",\n      \"envoy.api.v2.ListenerDiscoveryService.FetchListeners\",\n      \"envoy.api.v2.ListenerDiscoveryService.StreamListeners\",\n      \"envoy.api.v2.ListenerDiscoveryService.DeltaListeners\",\n      \"envoy.api.v2.RouteDiscoveryService.FetchRoutes\",\n      \"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n      \"envoy.api.v2.RouteDiscoveryService.DeltaRoutes\",\n      \"envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources\",\n      \"envoy.service.discovery.v2.AggregatedDiscoveryService.DeltaAggregatedResources\",\n      \"envoy.service.discovery.v2.HealthDiscoveryService.FetchHealthCheck\",\n      \"envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck\",\n      \"envoy.service.discovery.v2.RuntimeDiscoveryService.FetchRuntime\",\n      \"envoy.service.discovery.v2.RuntimeDiscoveryService.StreamRuntime\",\n      \"envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit\",\n  };\n\n  for (const auto& method : methods) {\n    RELEASE_ASSERT(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) != nullptr,\n                   absl::StrCat(\"Unable to find method descriptor for \", method));\n  }\n\n  const auto types = {\n      \"envoy.api.v2.Cluster\",           \"envoy.api.v2.ClusterLoadAssignment\",\n      \"envoy.api.v2.Listener\",          \"envoy.api.v2.RouteConfiguration\",\n      \"envoy.api.v2.route.VirtualHost\", \"envoy.api.v2.auth.Secret\",\n  };\n\n  for (const auto& type : types) {\n    RELEASE_ASSERT(\n        Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(type) != nullptr, \"\");\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/proto_descriptors.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Server {\n\n// This function validates that the method descriptors for gRPC services and type descriptors that\n// are referenced in Any messages are available in the descriptor pool.\nvoid validateProtoDescriptors();\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/resource_monitor_config_impl.h",
    "content": "#pragma once\n\n#include \"envoy/server/resource_monitor_config.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nclass ResourceMonitorFactoryContextImpl : public ResourceMonitorFactoryContext {\npublic:\n  ResourceMonitorFactoryContextImpl(Event::Dispatcher& dispatcher, Api::Api& api,\n                                    ProtobufMessage::ValidationVisitor& validation_visitor)\n      : dispatcher_(dispatcher), api_(api), validation_visitor_(validation_visitor) {}\n\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n\n  Api::Api& api() override { return api_; }\n\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override {\n    return validation_visitor_;\n  }\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n  Api::Api& api_;\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/server.cc",
    "content": "#include \"server/server.h\"\n\n#include <csignal>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.validate.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.validate.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/signal.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/bootstrap_extension_config.h\"\n#include \"envoy/server/options.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/common/mutex_tracer_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/http/codes.h\"\n#include \"common/local_info/local_info_impl.h\"\n#include \"common/memory/stats.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/network/socket_interface_impl.h\"\n#include \"common/network/tcp_listener_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/stats/thread_local_store.h\"\n#include \"common/stats/timespan_impl.h\"\n#include \"common/upstream/cluster_manager_impl.h\"\n#include \"common/version/version.h\"\n\n#include \"server/admin/utils.h\"\n#include \"server/configuration_impl.h\"\n#include \"server/connection_handler_impl.h\"\n#include \"server/guarddog_impl.h\"\n#include \"server/listener_hooks.h\"\n#include \"server/ssl_context_manager.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nInstanceImpl::InstanceImpl(\n    Init::Manager& init_manager, const Options& options, Event::TimeSystem& time_system,\n    Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks,\n    HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock,\n    ComponentFactory& component_factory, Random::RandomGeneratorPtr&& random_generator,\n    ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory,\n    Filesystem::Instance& file_system, std::unique_ptr<ProcessContext> process_context)\n    : init_manager_(init_manager), workers_started_(false), live_(false), shutdown_(false),\n      options_(options), validation_context_(options_.allowUnknownStaticFields(),\n                                             !options.rejectUnknownDynamicFields(),\n                                             options.ignoreUnknownDynamicFields()),\n      time_source_(time_system), restarter_(restarter), start_time_(time(nullptr)),\n      original_start_time_(start_time_), stats_store_(store), thread_local_(tls),\n      random_generator_(std::move(random_generator)),\n      api_(new Api::Impl(thread_factory, store, time_system, file_system, *random_generator_,\n                         process_context ? ProcessContextOptRef(std::ref(*process_context))\n                                         : absl::nullopt)),\n      dispatcher_(api_->allocateDispatcher(\"main_thread\")),\n      singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory())),\n      handler_(new ConnectionHandlerImpl(*dispatcher_, absl::nullopt)),\n      listener_component_factory_(*this), worker_factory_(thread_local_, *api_, hooks),\n      access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock,\n                          store),\n      terminated_(false),\n      mutex_tracer_(options.mutexTracingEnabled() ? &Envoy::MutexTracerImpl::getOrCreateTracer()\n                                                  : nullptr),\n      grpc_context_(store.symbolTable()), http_context_(store.symbolTable()),\n      process_context_(std::move(process_context)), main_thread_id_(std::this_thread::get_id()),\n      server_contexts_(*this) {\n  try {\n    if (!options.logPath().empty()) {\n      try {\n        file_logger_ = std::make_unique<Logger::FileSinkDelegate>(\n            options.logPath(), access_log_manager_, Logger::Registry::getSink());\n      } catch (const EnvoyException& e) {\n        throw EnvoyException(\n            fmt::format(\"Failed to open log-file '{}'. e.what(): {}\", options.logPath(), e.what()));\n      }\n    }\n\n    restarter_.initialize(*dispatcher_, *this);\n    drain_manager_ = component_factory.createDrainManager(*this);\n    initialize(options, std::move(local_address), component_factory, hooks);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG(critical, \"error initializing configuration '{}': {}\", options.configPath(),\n              e.what());\n    terminate();\n    throw;\n  } catch (const std::exception& e) {\n    ENVOY_LOG(critical, \"error initializing due to unexpected exception: {}\", e.what());\n    terminate();\n    throw;\n  } catch (...) {\n    ENVOY_LOG(critical, \"error initializing due to unknown exception\");\n    terminate();\n    throw;\n  }\n}\n\nInstanceImpl::~InstanceImpl() {\n  terminate();\n\n  // Stop logging to file before all the AccessLogManager and its dependencies are\n  // destructed to avoid crashing at shutdown.\n  file_logger_.reset();\n\n  // Destruct the ListenerManager explicitly, before InstanceImpl's local init_manager_ is\n  // destructed.\n  //\n  // The ListenerManager's DestinationPortsMap contains FilterChainSharedPtrs. There is a rare race\n  // condition where one of these FilterChains contains an HttpConnectionManager, which contains an\n  // RdsRouteConfigProvider, which contains an RdsRouteConfigSubscriptionSharedPtr. Since\n  // RdsRouteConfigSubscription is an Init::Target, ~RdsRouteConfigSubscription triggers a callback\n  // set at initialization, which goes to unregister it from the top-level InitManager, which has\n  // already been destructed (use-after-free) causing a segfault.\n  ENVOY_LOG(debug, \"destroying listener manager\");\n  listener_manager_.reset();\n  ENVOY_LOG(debug, \"destroyed listener manager\");\n}\n\nUpstream::ClusterManager& InstanceImpl::clusterManager() { return *config_.clusterManager(); }\n\nvoid InstanceImpl::drainListeners() {\n  ENVOY_LOG(info, \"closing and draining listeners\");\n  listener_manager_->stopListeners(ListenerManager::StopListenersType::All);\n  drain_manager_->startDrainSequence([] {});\n}\n\nvoid InstanceImpl::failHealthcheck(bool fail) {\n  live_.store(!fail);\n  server_stats_->live_.set(live_.load());\n}\n\nMetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store) {\n  snapped_counters_ = store.counters();\n  counters_.reserve(snapped_counters_.size());\n  for (const auto& counter : snapped_counters_) {\n    counters_.push_back({counter->latch(), *counter});\n  }\n\n  snapped_gauges_ = store.gauges();\n  gauges_.reserve(snapped_gauges_.size());\n  for (const auto& gauge : snapped_gauges_) {\n    ASSERT(gauge->importMode() != Stats::Gauge::ImportMode::Uninitialized);\n    gauges_.push_back(*gauge);\n  }\n\n  snapped_histograms_ = store.histograms();\n  histograms_.reserve(snapped_histograms_.size());\n  for (const auto& histogram : snapped_histograms_) {\n    histograms_.push_back(*histogram);\n  }\n\n  snapped_text_readouts_ = store.textReadouts();\n  text_readouts_.reserve(snapped_text_readouts_.size());\n  for (const auto& text_readout : snapped_text_readouts_) {\n    text_readouts_.push_back(*text_readout);\n  }\n}\n\nvoid InstanceUtil::flushMetricsToSinks(const std::list<Stats::SinkPtr>& sinks,\n                                       Stats::Store& store) {\n  // Create a snapshot and flush to all sinks.\n  // NOTE: Even if there are no sinks, creating the snapshot has the important property that it\n  //       latches all counters on a periodic basis. The hot restart code assumes this is being\n  //       done so this should not be removed.\n  MetricSnapshotImpl snapshot(store);\n  for (const auto& sink : sinks) {\n    sink->flush(snapshot);\n  }\n}\n\nvoid InstanceImpl::flushStats() {\n  ENVOY_LOG(debug, \"flushing stats\");\n  // If Envoy is not fully initialized, workers will not be started and mergeHistograms\n  // completion callback is not called immediately. As a result of this server stats will\n  // not be updated and flushed to stat sinks. So skip mergeHistograms call if workers are\n  // not started yet.\n  if (initManager().state() == Init::Manager::State::Initialized) {\n    // A shutdown initiated before this callback may prevent this from being called as per\n    // the semantics documented in ThreadLocal's runOnAllThreads method.\n    stats_store_.mergeHistograms([this]() -> void { flushStatsInternal(); });\n  } else {\n    ENVOY_LOG(debug, \"Envoy is not fully initialized, skipping histogram merge and flushing stats\");\n    flushStatsInternal();\n  }\n}\n\nvoid InstanceImpl::updateServerStats() {\n  // mergeParentStatsIfAny() does nothing and returns a struct of 0s if there is no parent.\n  HotRestart::ServerStatsFromParent parent_stats = restarter_.mergeParentStatsIfAny(stats_store_);\n\n  server_stats_->uptime_.set(time(nullptr) - original_start_time_);\n  server_stats_->memory_allocated_.set(Memory::Stats::totalCurrentlyAllocated() +\n                                       parent_stats.parent_memory_allocated_);\n  server_stats_->memory_heap_size_.set(Memory::Stats::totalCurrentlyReserved());\n  server_stats_->memory_physical_size_.set(Memory::Stats::totalPhysicalBytes());\n  server_stats_->parent_connections_.set(parent_stats.parent_connections_);\n  server_stats_->total_connections_.set(listener_manager_->numConnections() +\n                                        parent_stats.parent_connections_);\n  server_stats_->days_until_first_cert_expiring_.set(\n      sslContextManager().daysUntilFirstCertExpires());\n\n  auto secs_until_ocsp_response_expires =\n      sslContextManager().secondsUntilFirstOcspResponseExpires();\n  if (secs_until_ocsp_response_expires) {\n    server_stats_->seconds_until_first_ocsp_response_expiring_.set(\n        secs_until_ocsp_response_expires.value());\n  }\n  server_stats_->state_.set(\n      enumToInt(Utility::serverState(initManager().state(), healthCheckFailed())));\n  server_stats_->stats_recent_lookups_.set(\n      stats_store_.symbolTable().getRecentLookups([](absl::string_view, uint64_t) {}));\n}\n\nvoid InstanceImpl::flushStatsInternal() {\n  updateServerStats();\n  InstanceUtil::flushMetricsToSinks(config_.statsSinks(), stats_store_);\n  // TODO(ramaraochavali): consider adding different flush interval for histograms.\n  if (stat_flush_timer_ != nullptr) {\n    stat_flush_timer_->enableTimer(config_.statsFlushInterval());\n  }\n}\n\nbool InstanceImpl::healthCheckFailed() { return !live_.load(); }\n\nnamespace {\n// Loads a bootstrap object, potentially at a specific version (upgrading if necessary).\nvoid loadBootstrap(absl::optional<uint32_t> bootstrap_version,\n                   envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                   std::function<void(Protobuf::Message&, bool)> load_function) {\n\n  if (!bootstrap_version.has_value()) {\n    load_function(bootstrap, true);\n  } else if (*bootstrap_version == 3) {\n    load_function(bootstrap, false);\n  } else if (*bootstrap_version == 2) {\n    envoy::config::bootstrap::v2::Bootstrap bootstrap_v2;\n    load_function(bootstrap_v2, false);\n    Config::VersionConverter::upgrade(bootstrap_v2, bootstrap);\n    MessageUtil::onVersionUpgradeWarn(\"v2 bootstrap\");\n  } else {\n    throw EnvoyException(fmt::format(\"Unknown bootstrap version {}.\", *bootstrap_version));\n  }\n}\n} // namespace\n\nvoid InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                                       const Options& options,\n                                       ProtobufMessage::ValidationVisitor& validation_visitor,\n                                       Api::Api& api) {\n  const std::string& config_path = options.configPath();\n  const std::string& config_yaml = options.configYaml();\n  const envoy::config::bootstrap::v3::Bootstrap& config_proto = options.configProto();\n\n  // Exactly one of config_path and config_yaml should be specified.\n  if (config_path.empty() && config_yaml.empty() && config_proto.ByteSize() == 0) {\n    throw EnvoyException(\"At least one of --config-path or --config-yaml or Options::configProto() \"\n                         \"should be non-empty\");\n  }\n\n  if (!config_path.empty()) {\n    loadBootstrap(\n        options.bootstrapVersion(), bootstrap,\n        [&config_path, &validation_visitor, &api](Protobuf::Message& message, bool do_boosting) {\n          MessageUtil::loadFromFile(config_path, message, validation_visitor, api, do_boosting);\n        });\n  }\n  if (!config_yaml.empty()) {\n    envoy::config::bootstrap::v3::Bootstrap bootstrap_override;\n    loadBootstrap(\n        options.bootstrapVersion(), bootstrap_override,\n        [&config_yaml, &validation_visitor](Protobuf::Message& message, bool do_boosting) {\n          MessageUtil::loadFromYaml(config_yaml, message, validation_visitor, do_boosting);\n        });\n    // TODO(snowp): The fact that we do a merge here doesn't seem to be covered under test.\n    bootstrap.MergeFrom(bootstrap_override);\n  }\n  if (config_proto.ByteSize() != 0) {\n    bootstrap.MergeFrom(config_proto);\n  }\n  MessageUtil::validate(bootstrap, validation_visitor);\n}\n\nvoid InstanceImpl::initialize(const Options& options,\n                              Network::Address::InstanceConstSharedPtr local_address,\n                              ComponentFactory& component_factory, ListenerHooks& hooks) {\n  ENVOY_LOG(info, \"initializing epoch {} (base id={}, hot restart version={})\",\n            options.restartEpoch(), restarter_.baseId(), restarter_.version());\n\n  ENVOY_LOG(info, \"statically linked extensions:\");\n  for (const auto& ext : Envoy::Registry::FactoryCategoryRegistry::registeredFactories()) {\n    ENVOY_LOG(info, \"  {}: {}\", ext.first, absl::StrJoin(ext.second->registeredNames(), \", \"));\n  }\n\n  // Handle configuration that needs to take place prior to the main configuration load.\n  InstanceUtil::loadBootstrapConfig(bootstrap_, options,\n                                    messageValidationContext().staticValidationVisitor(), *api_);\n  bootstrap_config_update_time_ = time_source_.systemTime();\n\n  // Immediate after the bootstrap has been loaded, override the header prefix, if configured to\n  // do so. This must be set before any other code block references the HeaderValues ConstSingleton.\n  if (!bootstrap_.header_prefix().empty()) {\n    // setPrefix has a release assert verifying that setPrefix() is not called after prefix()\n    ThreadSafeSingleton<Http::PrefixValue>::get().setPrefix(bootstrap_.header_prefix().c_str());\n  }\n  // TODO(mattklein123): Custom O(1) headers can be registered at this point for creating/finalizing\n  // any header maps.\n  ENVOY_LOG(info, \"HTTP header map info:\");\n  for (const auto& info : Http::HeaderMapImplUtility::getAllHeaderMapImplInfo()) {\n    ENVOY_LOG(info, \"  {}: {} bytes: {}\", info.name_, info.size_,\n              absl::StrJoin(info.registered_headers_, \",\"));\n  }\n\n  // Needs to happen as early as possible in the instantiation to preempt the objects that require\n  // stats.\n  stats_store_.setTagProducer(Config::Utility::createTagProducer(bootstrap_));\n  stats_store_.setStatsMatcher(Config::Utility::createStatsMatcher(bootstrap_));\n  stats_store_.setHistogramSettings(Config::Utility::createHistogramSettings(bootstrap_));\n\n  const std::string server_stats_prefix = \"server.\";\n  server_stats_ = std::make_unique<ServerStats>(\n      ServerStats{ALL_SERVER_STATS(POOL_COUNTER_PREFIX(stats_store_, server_stats_prefix),\n                                   POOL_GAUGE_PREFIX(stats_store_, server_stats_prefix),\n                                   POOL_HISTOGRAM_PREFIX(stats_store_, server_stats_prefix))});\n  validation_context_.staticWarningValidationVisitor().setUnknownCounter(\n      server_stats_->static_unknown_fields_);\n  validation_context_.dynamicWarningValidationVisitor().setUnknownCounter(\n      server_stats_->dynamic_unknown_fields_);\n\n  initialization_timer_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(\n      server_stats_->initialization_time_ms_, timeSource());\n  server_stats_->concurrency_.set(options_.concurrency());\n  server_stats_->hot_restart_epoch_.set(options_.restartEpoch());\n\n  assert_action_registration_ = Assert::setDebugAssertionFailureRecordAction(\n      [this]() { server_stats_->debug_assertion_failures_.inc(); });\n  envoy_bug_action_registration_ = Assert::setEnvoyBugFailureRecordAction(\n      [this]() { server_stats_->envoy_bug_failures_.inc(); });\n\n  InstanceImpl::failHealthcheck(false);\n\n  // Check if bootstrap has server version override set, if yes, we should use that as\n  // 'server.version' stat.\n  uint64_t version_int;\n  if (bootstrap_.stats_server_version_override().value() > 0) {\n    version_int = bootstrap_.stats_server_version_override().value();\n  } else {\n    if (!StringUtil::atoull(VersionInfo::revision().substr(0, 6).c_str(), version_int, 16)) {\n      throw EnvoyException(\"compiled GIT SHA is invalid. Invalid build.\");\n    }\n  }\n  server_stats_->version_.set(version_int);\n\n  bootstrap_.mutable_node()->set_hidden_envoy_deprecated_build_version(VersionInfo::version());\n  bootstrap_.mutable_node()->set_user_agent_name(\"envoy\");\n  *bootstrap_.mutable_node()->mutable_user_agent_build_version() = VersionInfo::buildVersion();\n  for (const auto& ext : Envoy::Registry::FactoryCategoryRegistry::registeredFactories()) {\n    for (const auto& name : ext.second->allRegisteredNames()) {\n      auto* extension = bootstrap_.mutable_node()->add_extensions();\n      extension->set_name(std::string(name));\n      extension->set_category(ext.first);\n      auto const version = ext.second->getFactoryVersion(name);\n      if (version) {\n        *extension->mutable_version() = version.value();\n      }\n      extension->set_disabled(ext.second->isFactoryDisabled(name));\n    }\n  }\n\n  local_info_ = std::make_unique<LocalInfo::LocalInfoImpl>(\n      bootstrap_.node(), local_address, options.serviceZone(), options.serviceClusterName(),\n      options.serviceNodeName());\n\n  Configuration::InitialImpl initial_config(bootstrap_);\n\n  // Learn original_start_time_ if our parent is still around to inform us of it.\n  restarter_.sendParentAdminShutdownRequest(original_start_time_);\n  admin_ = std::make_unique<AdminImpl>(initial_config.admin().profilePath(), *this);\n\n  loadServerFlags(initial_config.flagsPath());\n\n  secret_manager_ = std::make_unique<Secret::SecretManagerImpl>(admin_->getConfigTracker());\n\n  // Initialize the overload manager early so other modules can register for actions.\n  overload_manager_ = std::make_unique<OverloadManagerImpl>(\n      *dispatcher_, stats_store_, thread_local_, bootstrap_.overload_manager(),\n      messageValidationContext().staticValidationVisitor(), *api_);\n\n  heap_shrinker_ =\n      std::make_unique<Memory::HeapShrinker>(*dispatcher_, *overload_manager_, stats_store_);\n\n  for (const auto& bootstrap_extension : bootstrap_.bootstrap_extensions()) {\n    auto& factory = Config::Utility::getAndCheckFactory<Configuration::BootstrapExtensionFactory>(\n        bootstrap_extension);\n    auto config = Config::Utility::translateAnyToFactoryConfig(\n        bootstrap_extension.typed_config(), messageValidationContext().staticValidationVisitor(),\n        factory);\n    bootstrap_extensions_.push_back(\n        factory.createBootstrapExtension(*config, serverFactoryContext()));\n  }\n\n  if (!bootstrap_.default_socket_interface().empty()) {\n    auto& sock_name = bootstrap_.default_socket_interface();\n    auto sock = const_cast<Network::SocketInterface*>(Network::socketInterface(sock_name));\n    if (sock != nullptr) {\n      Network::SocketInterfaceSingleton::clear();\n      Network::SocketInterfaceSingleton::initialize(sock);\n    }\n  }\n\n  // Workers get created first so they register for thread local updates.\n  listener_manager_ = std::make_unique<ListenerManagerImpl>(\n      *this, listener_component_factory_, worker_factory_, bootstrap_.enable_dispatcher_stats());\n\n  // The main thread is also registered for thread local updates so that code that does not care\n  // whether it runs on the main thread or on workers can still use TLS.\n  thread_local_.registerThread(*dispatcher_, true);\n\n  // We can now initialize stats for threading.\n  stats_store_.initializeThreading(*dispatcher_, thread_local_);\n\n  // It's now safe to start writing stats from the main thread's dispatcher.\n  if (bootstrap_.enable_dispatcher_stats()) {\n    dispatcher_->initializeStats(stats_store_, \"server.\");\n  }\n\n  if (initial_config.admin().address()) {\n    if (initial_config.admin().accessLogPath().empty()) {\n      throw EnvoyException(\"An admin access log path is required for a listening server.\");\n    }\n    ENVOY_LOG(info, \"admin address: {}\", initial_config.admin().address()->asString());\n    admin_->startHttpListener(initial_config.admin().accessLogPath(), options.adminAddressPath(),\n                              initial_config.admin().address(),\n                              initial_config.admin().socketOptions(),\n                              stats_store_.createScope(\"listener.admin.\"));\n  } else {\n    ENVOY_LOG(warn, \"No admin address given, so no admin HTTP server started.\");\n  }\n  config_tracker_entry_ =\n      admin_->getConfigTracker().add(\"bootstrap\", [this] { return dumpBootstrapConfig(); });\n  if (initial_config.admin().address()) {\n    admin_->addListenerToHandler(handler_.get());\n  }\n\n  // The broad order of initialization from this point on is the following:\n  // 1. Statically provisioned configuration (bootstrap) are loaded.\n  // 2. Cluster manager is created and all primary clusters (i.e. with endpoint assignments\n  //    provisioned statically in bootstrap, discovered through DNS or file based CDS) are\n  //    initialized.\n  // 3. Various services are initialized and configured using the bootstrap config.\n  // 4. RTDS is initialized using primary clusters. This  allows runtime overrides to be fully\n  //    configured before the rest of xDS configuration is provisioned.\n  // 5. Secondary clusters (with endpoint assignments provisioned by xDS servers) are initialized.\n  // 6. The rest of the dynamic configuration is provisioned.\n  //\n  // Please note: this order requires that RTDS is provisioned using a primary cluster. If RTDS is\n  // provisioned through ADS then ADS must use primary cluster as well. This invariant is enforced\n  // during RTDS initialization and invalid configuration will be rejected.\n\n  // Runtime gets initialized before the main configuration since during main configuration\n  // load things may grab a reference to the loader for later use.\n  runtime_singleton_ = std::make_unique<Runtime::ScopedLoaderSingleton>(\n      component_factory.createRuntime(*this, initial_config));\n  hooks.onRuntimeCreated();\n\n  // Once we have runtime we can initialize the SSL context manager.\n  ssl_context_manager_ = createContextManager(\"ssl_context_manager\", time_source_);\n\n  const bool use_tcp_for_dns_lookups = bootstrap_.use_tcp_for_dns_lookups();\n  dns_resolver_ = dispatcher_->createDnsResolver({}, use_tcp_for_dns_lookups);\n\n  cluster_manager_factory_ = std::make_unique<Upstream::ProdClusterManagerFactory>(\n      *admin_, Runtime::LoaderSingleton::get(), stats_store_, thread_local_, dns_resolver_,\n      *ssl_context_manager_, *dispatcher_, *local_info_, *secret_manager_,\n      messageValidationContext(), *api_, http_context_, grpc_context_, access_log_manager_,\n      *singleton_manager_);\n\n  // Now the configuration gets parsed. The configuration may start setting\n  // thread local data per above. See MainImpl::initialize() for why ConfigImpl\n  // is constructed as part of the InstanceImpl and then populated once\n  // cluster_manager_factory_ is available.\n  config_.initialize(bootstrap_, *this, *cluster_manager_factory_);\n\n  // Instruct the listener manager to create the LDS provider if needed. This must be done later\n  // because various items do not yet exist when the listener manager is created.\n  if (bootstrap_.dynamic_resources().has_lds_config() ||\n      bootstrap_.dynamic_resources().has_lds_resources_locator()) {\n    listener_manager_->createLdsApi(bootstrap_.dynamic_resources().lds_config(),\n                                    bootstrap_.dynamic_resources().has_lds_resources_locator()\n                                        ? &bootstrap_.dynamic_resources().lds_resources_locator()\n                                        : nullptr);\n  }\n\n  // We have to defer RTDS initialization until after the cluster manager is\n  // instantiated (which in turn relies on runtime...).\n  Runtime::LoaderSingleton::get().initialize(clusterManager());\n\n  clusterManager().setPrimaryClustersInitializedCb(\n      [this]() { onClusterManagerPrimaryInitializationComplete(); });\n\n  for (Stats::SinkPtr& sink : config_.statsSinks()) {\n    stats_store_.addSink(*sink);\n  }\n\n  // Some of the stat sinks may need dispatcher support so don't flush until the main loop starts.\n  // Just setup the timer.\n  stat_flush_timer_ = dispatcher_->createTimer([this]() -> void { flushStats(); });\n  stat_flush_timer_->enableTimer(config_.statsFlushInterval());\n\n  // GuardDog (deadlock detection) object and thread setup before workers are\n  // started and before our own run() loop runs.\n  main_thread_guard_dog_ = std::make_unique<Server::GuardDogImpl>(\n      stats_store_, config_.mainThreadWatchdogConfig(), *api_, \"main_thread\");\n  worker_guard_dog_ = std::make_unique<Server::GuardDogImpl>(\n      stats_store_, config_.workerWatchdogConfig(), *api_, \"workers\");\n}\n\nvoid InstanceImpl::onClusterManagerPrimaryInitializationComplete() {\n  // If RTDS was not configured the `onRuntimeReady` callback is immediately invoked.\n  Runtime::LoaderSingleton::get().startRtdsSubscriptions([this]() { onRuntimeReady(); });\n}\n\nvoid InstanceImpl::onRuntimeReady() {\n  // Begin initializing secondary clusters after RTDS configuration has been applied.\n  clusterManager().initializeSecondaryClusters(bootstrap_);\n\n  if (bootstrap_.has_hds_config()) {\n    const auto& hds_config = bootstrap_.hds_config();\n    async_client_manager_ = std::make_unique<Grpc::AsyncClientManagerImpl>(\n        *config_.clusterManager(), thread_local_, time_source_, *api_, grpc_context_.statNames());\n    hds_delegate_ = std::make_unique<Upstream::HdsDelegate>(\n        stats_store_,\n        Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, hds_config,\n                                                       stats_store_, false)\n            ->create(),\n        hds_config.transport_api_version(), *dispatcher_, Runtime::LoaderSingleton::get(),\n        stats_store_, *ssl_context_manager_, info_factory_, access_log_manager_,\n        *config_.clusterManager(), *local_info_, *admin_, *singleton_manager_, thread_local_,\n        messageValidationContext().dynamicValidationVisitor(), *api_);\n  }\n\n  // If there is no global limit to the number of active connections, warn on startup.\n  // TODO (tonya11en): Move this functionality into the overload manager.\n  if (!runtime().snapshot().get(Network::TcpListenerImpl::GlobalMaxCxRuntimeKey)) {\n    ENVOY_LOG(warn,\n              \"there is no configured limit to the number of allowed active connections. Set a \"\n              \"limit via the runtime key {}\",\n              Network::TcpListenerImpl::GlobalMaxCxRuntimeKey);\n  }\n}\n\nvoid InstanceImpl::startWorkers() {\n  listener_manager_->startWorkers(*worker_guard_dog_);\n  initialization_timer_->complete();\n  // Update server stats as soon as initialization is done.\n  updateServerStats();\n  workers_started_ = true;\n  // At this point we are ready to take traffic and all listening ports are up. Notify our parent\n  // if applicable that they can stop listening and drain.\n  restarter_.drainParentListeners();\n  drain_manager_->startParentShutdownSequence();\n}\n\nRuntime::LoaderPtr InstanceUtil::createRuntime(Instance& server,\n                                               Server::Configuration::Initial& config) {\n  ENVOY_LOG(info, \"runtime: {}\", MessageUtil::getYamlStringFromMessage(config.runtime()));\n  return std::make_unique<Runtime::LoaderImpl>(\n      server.dispatcher(), server.threadLocal(), config.runtime(), server.localInfo(),\n      server.stats(), server.api().randomGenerator(),\n      server.messageValidationContext().dynamicValidationVisitor(), server.api());\n}\n\nvoid InstanceImpl::loadServerFlags(const absl::optional<std::string>& flags_path) {\n  if (!flags_path) {\n    return;\n  }\n\n  ENVOY_LOG(info, \"server flags path: {}\", flags_path.value());\n  if (api_->fileSystem().fileExists(flags_path.value() + \"/drain\")) {\n    ENVOY_LOG(info, \"starting server in drain mode\");\n    InstanceImpl::failHealthcheck(true);\n  }\n}\n\nRunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatcher& dispatcher,\n                     Upstream::ClusterManager& cm, AccessLog::AccessLogManager& access_log_manager,\n                     Init::Manager& init_manager, OverloadManager& overload_manager,\n                     std::function<void()> post_init_cb)\n    : init_watcher_(\"RunHelper\", [&instance, post_init_cb]() {\n        if (!instance.isShutdown()) {\n          post_init_cb();\n        }\n      }) {\n  // Setup signals.\n  if (options.signalHandlingEnabled()) {\n// TODO(Pivotal): Figure out solution to graceful shutdown on Windows. None of these signals exist\n// on Windows.\n#ifndef WIN32\n    sigterm_ = dispatcher.listenForSignal(SIGTERM, [&instance]() {\n      ENVOY_LOG(warn, \"caught SIGTERM\");\n      instance.shutdown();\n    });\n\n    sigint_ = dispatcher.listenForSignal(SIGINT, [&instance]() {\n      ENVOY_LOG(warn, \"caught SIGINT\");\n      instance.shutdown();\n    });\n\n    sig_usr_1_ = dispatcher.listenForSignal(SIGUSR1, [&access_log_manager]() {\n      ENVOY_LOG(info, \"caught SIGUSR1. Reopening access logs.\");\n      access_log_manager.reopen();\n    });\n\n    sig_hup_ = dispatcher.listenForSignal(SIGHUP, []() {\n      ENVOY_LOG(warn, \"caught and eating SIGHUP. See documentation for how to hot restart.\");\n    });\n#endif\n  }\n\n  // Start overload manager before workers.\n  overload_manager.start();\n\n  // Register for cluster manager init notification. We don't start serving worker traffic until\n  // upstream clusters are initialized which may involve running the event loop. Note however that\n  // this can fire immediately if all clusters have already initialized. Also note that we need\n  // to guard against shutdown at two different levels since SIGTERM can come in once the run loop\n  // starts.\n  cm.setInitializedCb([&instance, &init_manager, &cm, this]() {\n    if (instance.isShutdown()) {\n      return;\n    }\n\n    const auto type_urls =\n        Config::getAllVersionTypeUrls<envoy::config::route::v3::RouteConfiguration>();\n    // Pause RDS to ensure that we don't send any requests until we've\n    // subscribed to all the RDS resources. The subscriptions happen in the init callbacks,\n    // so we pause RDS until we've completed all the callbacks.\n    Config::ScopedResume maybe_resume_rds;\n    if (cm.adsMux()) {\n      maybe_resume_rds = cm.adsMux()->pause(type_urls);\n    }\n\n    ENVOY_LOG(info, \"all clusters initialized. initializing init manager\");\n    init_manager.initialize(init_watcher_);\n\n    // Now that we're execute all the init callbacks we can resume RDS\n    // as we've subscribed to all the statically defined RDS resources.\n    // This is done by tearing down the maybe_resume_rds Cleanup object.\n  });\n}\n\nvoid InstanceImpl::run() {\n  // RunHelper exists primarily to facilitate testing of how we respond to early shutdown during\n  // startup (see RunHelperTest in server_test.cc).\n  const auto run_helper = RunHelper(*this, options_, *dispatcher_, clusterManager(),\n                                    access_log_manager_, init_manager_, overloadManager(), [this] {\n                                      notifyCallbacksForStage(Stage::PostInit);\n                                      startWorkers();\n                                    });\n\n  // Run the main dispatch loop waiting to exit.\n  ENVOY_LOG(info, \"starting main dispatch loop\");\n  auto watchdog = main_thread_guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(),\n                                                         \"main_thread\");\n  watchdog->startWatchdog(*dispatcher_);\n  dispatcher_->post([this] { notifyCallbacksForStage(Stage::Startup); });\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n  ENVOY_LOG(info, \"main dispatch loop exited\");\n  main_thread_guard_dog_->stopWatching(watchdog);\n  watchdog.reset();\n\n  terminate();\n}\n\nvoid InstanceImpl::terminate() {\n  if (terminated_) {\n    return;\n  }\n  terminated_ = true;\n\n  // Before starting to shutdown anything else, stop slot destruction updates.\n  thread_local_.shutdownGlobalThreading();\n\n  // Before the workers start exiting we should disable stat threading.\n  stats_store_.shutdownThreading();\n\n  if (overload_manager_) {\n    overload_manager_->stop();\n  }\n\n  // Shutdown all the workers now that the main dispatch loop is done.\n  if (listener_manager_ != nullptr) {\n    // Also shutdown the listener manager's ApiListener, if there is one, which runs on the main\n    // thread. This needs to happen ahead of calling thread_local_.shutdown() below to prevent any\n    // objects in the ApiListener destructor to reference any objects in thread local storage.\n    if (listener_manager_->apiListener().has_value()) {\n      listener_manager_->apiListener()->get().shutdown();\n    }\n\n    listener_manager_->stopWorkers();\n  }\n\n  // Only flush if we have not been hot restarted.\n  if (stat_flush_timer_) {\n    flushStats();\n  }\n\n  if (config_.clusterManager() != nullptr) {\n    config_.clusterManager()->shutdown();\n  }\n  handler_.reset();\n  thread_local_.shutdownThread();\n  restarter_.shutdown();\n  ENVOY_LOG(info, \"exiting\");\n  ENVOY_FLUSH_LOG();\n}\n\nRuntime::Loader& InstanceImpl::runtime() { return Runtime::LoaderSingleton::get(); }\n\nvoid InstanceImpl::shutdown() {\n  ENVOY_LOG(info, \"shutting down server instance\");\n  shutdown_ = true;\n  restarter_.sendParentTerminateRequest();\n  notifyCallbacksForStage(Stage::ShutdownExit, [this] { dispatcher_->exit(); });\n}\n\nvoid InstanceImpl::shutdownAdmin() {\n  ENVOY_LOG(warn, \"shutting down admin due to child startup\");\n  stat_flush_timer_.reset();\n  handler_->stopListeners();\n  admin_->closeSocket();\n\n  // If we still have a parent, it should be terminated now that we have a child.\n  ENVOY_LOG(warn, \"terminating parent process\");\n  restarter_.sendParentTerminateRequest();\n}\n\nServerLifecycleNotifier::HandlePtr InstanceImpl::registerCallback(Stage stage,\n                                                                  StageCallback callback) {\n  auto& callbacks = stage_callbacks_[stage];\n  return std::make_unique<LifecycleCallbackHandle<StageCallback>>(callbacks, callback);\n}\n\nServerLifecycleNotifier::HandlePtr\nInstanceImpl::registerCallback(Stage stage, StageCallbackWithCompletion callback) {\n  ASSERT(stage == Stage::ShutdownExit);\n  auto& callbacks = stage_completable_callbacks_[stage];\n  return std::make_unique<LifecycleCallbackHandle<StageCallbackWithCompletion>>(callbacks,\n                                                                                callback);\n}\n\nvoid InstanceImpl::notifyCallbacksForStage(Stage stage, Event::PostCb completion_cb) {\n  ASSERT(std::this_thread::get_id() == main_thread_id_);\n  const auto it = stage_callbacks_.find(stage);\n  if (it != stage_callbacks_.end()) {\n    for (const StageCallback& callback : it->second) {\n      callback();\n    }\n  }\n\n  // Wrap completion_cb so that it only gets invoked when all callbacks for this stage\n  // have finished their work.\n  std::shared_ptr<void> cb_guard(\n      new Cleanup([this, completion_cb]() { dispatcher_->post(completion_cb); }));\n\n  // Registrations which take a completion callback are typically implemented by executing a\n  // callback on all worker threads using Slot::runOnAllThreads which will hang indefinitely if\n  // worker threads have not been started so we need to skip notifications if envoy is shutdown\n  // early before workers have started.\n  if (workers_started_) {\n    const auto it2 = stage_completable_callbacks_.find(stage);\n    if (it2 != stage_completable_callbacks_.end()) {\n      ENVOY_LOG(info, \"Notifying {} callback(s) with completion.\", it2->second.size());\n      for (const StageCallbackWithCompletion& callback : it2->second) {\n        callback([cb_guard] {});\n      }\n    }\n  }\n}\n\nProtobufTypes::MessagePtr InstanceImpl::dumpBootstrapConfig() {\n  auto config_dump = std::make_unique<envoy::admin::v3::BootstrapConfigDump>();\n  config_dump->mutable_bootstrap()->MergeFrom(bootstrap_);\n  TimestampUtil::systemClockToTimestamp(bootstrap_config_update_time_,\n                                        *(config_dump->mutable_last_updated()));\n  return config_dump;\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/server.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/server/bootstrap_extension_config.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/guarddog.h\"\n#include \"envoy/server/instance.h\"\n#include \"envoy/server/process_context.h\"\n#include \"envoy/server/tracer_config.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/stats_macros.h\"\n#include \"envoy/stats/timespan.h\"\n#include \"envoy/tracing/http_tracer.h\"\n\n#include \"common/access_log/access_log_manager_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/cleanup.h\"\n#include \"common/common/logger_delegates.h\"\n#include \"common/grpc/async_client_manager_impl.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/memory/heap_shrinker.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/secret/secret_manager_impl.h\"\n#include \"common/upstream/health_discovery_service.h\"\n\n#include \"server/admin/admin.h\"\n#include \"server/configuration_impl.h\"\n#include \"server/listener_hooks.h\"\n#include \"server/listener_manager_impl.h\"\n#include \"server/overload_manager_impl.h\"\n#include \"server/worker_impl.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * All server wide stats. @see stats_macros.h\n */\n#define ALL_SERVER_STATS(COUNTER, GAUGE, HISTOGRAM)                                                \\\n  COUNTER(debug_assertion_failures)                                                                \\\n  COUNTER(envoy_bug_failures)                                                                      \\\n  COUNTER(dynamic_unknown_fields)                                                                  \\\n  COUNTER(static_unknown_fields)                                                                   \\\n  GAUGE(concurrency, NeverImport)                                                                  \\\n  GAUGE(days_until_first_cert_expiring, Accumulate)                                                \\\n  GAUGE(seconds_until_first_ocsp_response_expiring, Accumulate)                                    \\\n  GAUGE(hot_restart_epoch, NeverImport)                                                            \\\n  /* hot_restart_generation is an Accumulate gauge; we omit it here for testing dynamics. */       \\\n  GAUGE(live, NeverImport)                                                                         \\\n  GAUGE(memory_allocated, Accumulate)                                                              \\\n  GAUGE(memory_heap_size, Accumulate)                                                              \\\n  GAUGE(memory_physical_size, Accumulate)                                                          \\\n  GAUGE(parent_connections, Accumulate)                                                            \\\n  GAUGE(state, NeverImport)                                                                        \\\n  GAUGE(stats_recent_lookups, NeverImport)                                                         \\\n  GAUGE(total_connections, Accumulate)                                                             \\\n  GAUGE(uptime, Accumulate)                                                                        \\\n  GAUGE(version, NeverImport)                                                                      \\\n  HISTOGRAM(initialization_time_ms, Milliseconds)\n\nstruct ServerStats {\n  ALL_SERVER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)\n};\n\n/**\n * Interface for creating service components during boot.\n */\nclass ComponentFactory {\npublic:\n  virtual ~ComponentFactory() = default;\n\n  /**\n   * @return DrainManagerPtr a new drain manager for the server.\n   */\n  virtual DrainManagerPtr createDrainManager(Instance& server) PURE;\n\n  /**\n   * @return Runtime::LoaderPtr the runtime implementation for the server.\n   */\n  virtual Runtime::LoaderPtr createRuntime(Instance& server, Configuration::Initial& config) PURE;\n};\n\n/**\n * Helpers used during server creation.\n */\nclass InstanceUtil : Logger::Loggable<Logger::Id::main> {\npublic:\n  /**\n   * Default implementation of runtime loader creation used in the real server and in most\n   * integration tests where a mock runtime is not needed.\n   */\n  static Runtime::LoaderPtr createRuntime(Instance& server, Server::Configuration::Initial& config);\n\n  /**\n   * Helper for flushing counters, gauges and histograms to sinks. This takes care of calling\n   * flush() on each sink.\n   * @param sinks supplies the list of sinks.\n   * @param store provides the store being flushed.\n   */\n  static void flushMetricsToSinks(const std::list<Stats::SinkPtr>& sinks, Stats::Store& store);\n\n  /**\n   * Load a bootstrap config and perform validation.\n   * @param bootstrap supplies the bootstrap to fill.\n   * @param options supplies the server options.\n   * @param validation_visitor message validation visitor instance.\n   * @param api reference to the Api object\n   */\n  static void loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                                  const Options& options,\n                                  ProtobufMessage::ValidationVisitor& validation_visitor,\n                                  Api::Api& api);\n};\n\n/**\n * This is a helper used by InstanceImpl::run() on the stack. It's broken out to make testing\n * easier.\n */\nclass RunHelper : Logger::Loggable<Logger::Id::main> {\npublic:\n  RunHelper(Instance& instance, const Options& options, Event::Dispatcher& dispatcher,\n            Upstream::ClusterManager& cm, AccessLog::AccessLogManager& access_log_manager,\n            Init::Manager& init_manager, OverloadManager& overload_manager,\n            std::function<void()> workers_start_cb);\n\nprivate:\n  Init::WatcherImpl init_watcher_;\n  Event::SignalEventPtr sigterm_;\n  Event::SignalEventPtr sigint_;\n  Event::SignalEventPtr sig_usr_1_;\n  Event::SignalEventPtr sig_hup_;\n};\n\n// ServerFactoryContextImpl implements both ServerFactoryContext and\n// TransportSocketFactoryContext for convenience as these two contexts\n// share common member functions and member variables.\nclass ServerFactoryContextImpl : public Configuration::ServerFactoryContext,\n                                 public Configuration::TransportSocketFactoryContext {\npublic:\n  explicit ServerFactoryContextImpl(Instance& server)\n      : server_(server), server_scope_(server_.stats().createScope(\"\")) {}\n\n  // Configuration::ServerFactoryContext\n  Upstream::ClusterManager& clusterManager() override { return server_.clusterManager(); }\n  Event::Dispatcher& dispatcher() override { return server_.dispatcher(); }\n  const LocalInfo::LocalInfo& localInfo() const override { return server_.localInfo(); }\n  ProtobufMessage::ValidationContext& messageValidationContext() override {\n    return server_.messageValidationContext();\n  }\n  Envoy::Runtime::Loader& runtime() override { return server_.runtime(); }\n  Stats::Scope& scope() override { return *server_scope_; }\n  Singleton::Manager& singletonManager() override { return server_.singletonManager(); }\n  ThreadLocal::Instance& threadLocal() override { return server_.threadLocal(); }\n  Admin& admin() override { return server_.admin(); }\n  TimeSource& timeSource() override { return api().timeSource(); }\n  Api::Api& api() override { return server_.api(); }\n  Grpc::Context& grpcContext() override { return server_.grpcContext(); }\n  Envoy::Server::DrainManager& drainManager() override { return server_.drainManager(); }\n  ServerLifecycleNotifier& lifecycleNotifier() override { return server_.lifecycleNotifier(); }\n  std::chrono::milliseconds statsFlushInterval() const override {\n    return server_.statsFlushInterval();\n  }\n\n  // Configuration::TransportSocketFactoryContext\n  Ssl::ContextManager& sslContextManager() override { return server_.sslContextManager(); }\n  Secret::SecretManager& secretManager() override { return server_.secretManager(); }\n  Stats::Store& stats() override { return server_.stats(); }\n  Init::Manager& initManager() override { return server_.initManager(); }\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override {\n    // Server has two message validation visitors, one for static and\n    // other for dynamic configuration. Choose the dynamic validation\n    // visitor if server's init manager indicates that the server is\n    // in the Initialized state, as this state is engaged right after\n    // the static configuration (e.g., bootstrap) has been completed.\n    return initManager().state() == Init::Manager::State::Initialized\n               ? server_.messageValidationContext().dynamicValidationVisitor()\n               : server_.messageValidationContext().staticValidationVisitor();\n  }\n\nprivate:\n  Instance& server_;\n  Stats::ScopePtr server_scope_;\n};\n\n/**\n * This is the actual full standalone server which stitches together various common components.\n */\nclass InstanceImpl final : Logger::Loggable<Logger::Id::main>,\n                           public Instance,\n                           public ServerLifecycleNotifier {\npublic:\n  /**\n   * @throw EnvoyException if initialization fails.\n   */\n  InstanceImpl(Init::Manager& init_manager, const Options& options, Event::TimeSystem& time_system,\n               Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks,\n               HotRestart& restarter, Stats::StoreRoot& store,\n               Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory,\n               Random::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls,\n               Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system,\n               std::unique_ptr<ProcessContext> process_context);\n\n  ~InstanceImpl() override;\n\n  void run();\n\n  // Server::Instance\n  Admin& admin() override { return *admin_; }\n  Api::Api& api() override { return *api_; }\n  Upstream::ClusterManager& clusterManager() override;\n  Ssl::ContextManager& sslContextManager() override { return *ssl_context_manager_; }\n  Event::Dispatcher& dispatcher() override { return *dispatcher_; }\n  Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; }\n  void drainListeners() override;\n  DrainManager& drainManager() override { return *drain_manager_; }\n  AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; }\n  void failHealthcheck(bool fail) override;\n  HotRestart& hotRestart() override { return restarter_; }\n  Init::Manager& initManager() override { return init_manager_; }\n  ServerLifecycleNotifier& lifecycleNotifier() override { return *this; }\n  ListenerManager& listenerManager() override { return *listener_manager_; }\n  Secret::SecretManager& secretManager() override { return *secret_manager_; }\n  Envoy::MutexTracer* mutexTracer() override { return mutex_tracer_; }\n  OverloadManager& overloadManager() override { return *overload_manager_; }\n  Runtime::Loader& runtime() override;\n  void shutdown() override;\n  bool isShutdown() final { return shutdown_; }\n  void shutdownAdmin() override;\n  Singleton::Manager& singletonManager() override { return *singleton_manager_; }\n  bool healthCheckFailed() override;\n  const Options& options() override { return options_; }\n  time_t startTimeCurrentEpoch() override { return start_time_; }\n  time_t startTimeFirstEpoch() override { return original_start_time_; }\n  Stats::Store& stats() override { return stats_store_; }\n  Grpc::Context& grpcContext() override { return grpc_context_; }\n  Http::Context& httpContext() override { return http_context_; }\n  ProcessContextOptRef processContext() override { return *process_context_; }\n  ThreadLocal::Instance& threadLocal() override { return thread_local_; }\n  const LocalInfo::LocalInfo& localInfo() const override { return *local_info_; }\n  TimeSource& timeSource() override { return time_source_; }\n  void flushStats() override;\n\n  Configuration::ServerFactoryContext& serverFactoryContext() override { return server_contexts_; }\n\n  Configuration::TransportSocketFactoryContext& transportSocketFactoryContext() override {\n    return server_contexts_;\n  }\n\n  std::chrono::milliseconds statsFlushInterval() const override {\n    return config_.statsFlushInterval();\n  }\n\n  ProtobufMessage::ValidationContext& messageValidationContext() override {\n    return validation_context_;\n  }\n\n  void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override {\n    http_context_.setDefaultTracingConfig(tracing_config);\n  }\n\n  // ServerLifecycleNotifier\n  ServerLifecycleNotifier::HandlePtr registerCallback(Stage stage, StageCallback callback) override;\n  ServerLifecycleNotifier::HandlePtr\n  registerCallback(Stage stage, StageCallbackWithCompletion callback) override;\n\nprivate:\n  ProtobufTypes::MessagePtr dumpBootstrapConfig();\n  void flushStatsInternal();\n  void updateServerStats();\n  void initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address,\n                  ComponentFactory& component_factory, ListenerHooks& hooks);\n  void loadServerFlags(const absl::optional<std::string>& flags_path);\n  void startWorkers();\n  void terminate();\n  void notifyCallbacksForStage(\n      Stage stage, Event::PostCb completion_cb = [] {});\n  void onRuntimeReady();\n  void onClusterManagerPrimaryInitializationComplete();\n\n  using LifecycleNotifierCallbacks = std::list<StageCallback>;\n  using LifecycleNotifierCompletionCallbacks = std::list<StageCallbackWithCompletion>;\n\n  // init_manager_ must come before any member that participates in initialization, and destructed\n  // only after referencing members are gone, since initialization continuation can potentially\n  // occur at any point during member lifetime. This init manager is populated with LdsApi targets.\n  Init::Manager& init_manager_;\n  // secret_manager_ must come before listener_manager_, config_ and dispatcher_, and destructed\n  // only after these members can no longer reference it, since:\n  // - There may be active filter chains referencing it in listener_manager_.\n  // - There may be active clusters referencing it in config_.cluster_manager_.\n  // - There may be active connections referencing it.\n  std::unique_ptr<Secret::SecretManager> secret_manager_;\n  bool workers_started_;\n  std::atomic<bool> live_;\n  bool shutdown_;\n  const Options& options_;\n  ProtobufMessage::ProdValidationContextImpl validation_context_;\n  TimeSource& time_source_;\n  // Delete local_info_ as late as possible as some members below may reference it during their\n  // destruction.\n  LocalInfo::LocalInfoPtr local_info_;\n  HotRestart& restarter_;\n  const time_t start_time_;\n  time_t original_start_time_;\n  Stats::StoreRoot& stats_store_;\n  std::unique_ptr<ServerStats> server_stats_;\n  Assert::ActionRegistrationPtr assert_action_registration_;\n  Assert::ActionRegistrationPtr envoy_bug_action_registration_;\n  ThreadLocal::Instance& thread_local_;\n  Random::RandomGeneratorPtr random_generator_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  std::unique_ptr<AdminImpl> admin_;\n  Singleton::ManagerPtr singleton_manager_;\n  Network::ConnectionHandlerPtr handler_;\n  std::unique_ptr<Runtime::ScopedLoaderSingleton> runtime_singleton_;\n  std::unique_ptr<Ssl::ContextManager> ssl_context_manager_;\n  ProdListenerComponentFactory listener_component_factory_;\n  ProdWorkerFactory worker_factory_;\n  std::unique_ptr<ListenerManager> listener_manager_;\n  absl::node_hash_map<Stage, LifecycleNotifierCallbacks> stage_callbacks_;\n  absl::node_hash_map<Stage, LifecycleNotifierCompletionCallbacks> stage_completable_callbacks_;\n  Configuration::MainImpl config_;\n  Network::DnsResolverSharedPtr dns_resolver_;\n  Event::TimerPtr stat_flush_timer_;\n  DrainManagerPtr drain_manager_;\n  AccessLog::AccessLogManagerImpl access_log_manager_;\n  std::unique_ptr<Upstream::ClusterManagerFactory> cluster_manager_factory_;\n  std::unique_ptr<Server::GuardDog> main_thread_guard_dog_;\n  std::unique_ptr<Server::GuardDog> worker_guard_dog_;\n  bool terminated_;\n  std::unique_ptr<Logger::FileSinkDelegate> file_logger_;\n  envoy::config::bootstrap::v3::Bootstrap bootstrap_;\n  ConfigTracker::EntryOwnerPtr config_tracker_entry_;\n  SystemTime bootstrap_config_update_time_;\n  Grpc::AsyncClientManagerPtr async_client_manager_;\n  Upstream::ProdClusterInfoFactory info_factory_;\n  Upstream::HdsDelegatePtr hds_delegate_;\n  std::unique_ptr<OverloadManagerImpl> overload_manager_;\n  std::vector<BootstrapExtensionPtr> bootstrap_extensions_;\n  Envoy::MutexTracer* mutex_tracer_;\n  Grpc::ContextImpl grpc_context_;\n  Http::ContextImpl http_context_;\n  std::unique_ptr<ProcessContext> process_context_;\n  std::unique_ptr<Memory::HeapShrinker> heap_shrinker_;\n  const std::thread::id main_thread_id_;\n  // initialization_time is a histogram for tracking the initialization time across hot restarts\n  // whenever we have support for histogram merge across hot restarts.\n  Stats::TimespanPtr initialization_timer_;\n\n  ServerFactoryContextImpl server_contexts_;\n\n  template <class T>\n  class LifecycleCallbackHandle : public ServerLifecycleNotifier::Handle, RaiiListElement<T> {\n  public:\n    LifecycleCallbackHandle(std::list<T>& callbacks, T& callback)\n        : RaiiListElement<T>(callbacks, callback) {}\n  };\n};\n\n// Local implementation of Stats::MetricSnapshot used to flush metrics to sinks. We could\n// potentially have a single class instance held in a static and have a clear() method to avoid some\n// vector constructions and reservations, but I'm not sure it's worth the extra complexity until it\n// shows up in perf traces.\n// TODO(mattklein123): One thing we probably want to do is switch from returning vectors of metrics\n//                     to a lambda based callback iteration API. This would require less vector\n//                     copying and probably be a cleaner API in general.\nclass MetricSnapshotImpl : public Stats::MetricSnapshot {\npublic:\n  explicit MetricSnapshotImpl(Stats::Store& store);\n\n  // Stats::MetricSnapshot\n  const std::vector<CounterSnapshot>& counters() override { return counters_; }\n  const std::vector<std::reference_wrapper<const Stats::Gauge>>& gauges() override {\n    return gauges_;\n  };\n  const std::vector<std::reference_wrapper<const Stats::ParentHistogram>>& histograms() override {\n    return histograms_;\n  }\n  const std::vector<std::reference_wrapper<const Stats::TextReadout>>& textReadouts() override {\n    return text_readouts_;\n  }\n\nprivate:\n  std::vector<Stats::CounterSharedPtr> snapped_counters_;\n  std::vector<CounterSnapshot> counters_;\n  std::vector<Stats::GaugeSharedPtr> snapped_gauges_;\n  std::vector<std::reference_wrapper<const Stats::Gauge>> gauges_;\n  std::vector<Stats::ParentHistogramSharedPtr> snapped_histograms_;\n  std::vector<std::reference_wrapper<const Stats::ParentHistogram>> histograms_;\n  std::vector<Stats::TextReadoutSharedPtr> snapped_text_readouts_;\n  std::vector<std::reference_wrapper<const Stats::TextReadout>> text_readouts_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/ssl_context_manager.cc",
    "content": "#include \"server/ssl_context_manager.h\"\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/registry/registry.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * A stub that provides a SSL context manager capable of reporting on\n * certificates' data in case there's no TLS implementation built\n * into Envoy.\n */\nclass SslContextManagerNoTlsStub final : public Envoy::Ssl::ContextManager {\n  Ssl::ClientContextSharedPtr\n  createSslClientContext(Stats::Scope& /* scope */,\n                         const Envoy::Ssl::ClientContextConfig& /* config */) override {\n    throwException();\n  }\n\n  Ssl::ServerContextSharedPtr\n  createSslServerContext(Stats::Scope& /* scope */,\n                         const Envoy::Ssl::ServerContextConfig& /* config */,\n                         const std::vector<std::string>& /* server_names */) override {\n    throwException();\n  }\n\n  size_t daysUntilFirstCertExpires() const override { return std::numeric_limits<int>::max(); }\n  absl::optional<uint64_t> secondsUntilFirstOcspResponseExpires() const override {\n    return absl::nullopt;\n  }\n\n  void iterateContexts(std::function<void(const Envoy::Ssl::Context&)> /* callback */) override{};\n\n  Ssl::PrivateKeyMethodManager& privateKeyMethodManager() override { throwException(); }\n\nprivate:\n  [[noreturn]] void throwException() {\n    throw EnvoyException(\"SSL is not supported in this configuration\");\n  }\n};\n\nSsl::ContextManagerPtr createContextManager(const std::string& factory_name,\n                                            TimeSource& time_source) {\n  Ssl::ContextManagerFactory* factory =\n      Registry::FactoryRegistry<Ssl::ContextManagerFactory>::getFactory(factory_name);\n  if (factory != nullptr) {\n    return factory->createContextManager(time_source);\n  }\n\n  return std::make_unique<SslContextManagerNoTlsStub>();\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/ssl_context_manager.h",
    "content": "#pragma once\n\n#include \"envoy/common/time.h\"\n#include \"envoy/ssl/context_manager.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nSsl::ContextManagerPtr createContextManager(const std::string& factory_name,\n                                            TimeSource& time_source);\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/transport_socket_config_impl.h",
    "content": "#pragma once\n\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/stats/scope.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\n/**\n * Implementation of TransportSocketFactoryContext.\n */\nclass TransportSocketFactoryContextImpl : public TransportSocketFactoryContext {\npublic:\n  TransportSocketFactoryContextImpl(Server::Admin& admin, Ssl::ContextManager& context_manager,\n                                    Stats::Scope& stats_scope, Upstream::ClusterManager& cm,\n                                    const LocalInfo::LocalInfo& local_info,\n                                    Event::Dispatcher& dispatcher, Stats::Store& stats,\n                                    Singleton::Manager& singleton_manager,\n                                    ThreadLocal::SlotAllocator& tls,\n                                    ProtobufMessage::ValidationVisitor& validation_visitor,\n                                    Api::Api& api)\n      : admin_(admin), context_manager_(context_manager), stats_scope_(stats_scope),\n        cluster_manager_(cm), local_info_(local_info), dispatcher_(dispatcher), stats_(stats),\n        singleton_manager_(singleton_manager), tls_(tls), validation_visitor_(validation_visitor),\n        api_(api) {}\n\n  /**\n   * Pass an init manager to register dynamic secret provider.\n   * @param init_manager instance of init manager.\n   */\n  void setInitManager(Init::Manager& init_manager) { init_manager_ = &init_manager; }\n\n  // TransportSocketFactoryContext\n  Server::Admin& admin() override { return admin_; }\n  Ssl::ContextManager& sslContextManager() override { return context_manager_; }\n  Stats::Scope& scope() override { return stats_scope_; }\n  Secret::SecretManager& secretManager() override {\n    return cluster_manager_.clusterManagerFactory().secretManager();\n  }\n  Upstream::ClusterManager& clusterManager() override { return cluster_manager_; }\n  const LocalInfo::LocalInfo& localInfo() const override { return local_info_; }\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n  Stats::Store& stats() override { return stats_; }\n  Init::Manager& initManager() override {\n    ASSERT(init_manager_ != nullptr);\n    return *init_manager_;\n  }\n  Singleton::Manager& singletonManager() override { return singleton_manager_; }\n  ThreadLocal::SlotAllocator& threadLocal() override { return tls_; }\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override {\n    return validation_visitor_;\n  }\n  Api::Api& api() override { return api_; }\n\nprivate:\n  Server::Admin& admin_;\n  Ssl::ContextManager& context_manager_;\n  Stats::Scope& stats_scope_;\n  Upstream::ClusterManager& cluster_manager_;\n  const LocalInfo::LocalInfo& local_info_;\n  Event::Dispatcher& dispatcher_;\n  Stats::Store& stats_;\n  Singleton::Manager& singleton_manager_;\n  ThreadLocal::SlotAllocator& tls_;\n  Init::Manager* init_manager_{};\n  ProtobufMessage::ValidationVisitor& validation_visitor_;\n  Api::Api& api_;\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/watchdog_impl.cc",
    "content": "#include \"server/watchdog_impl.h\"\n\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nvoid WatchDogImpl::startWatchdog(Event::Dispatcher& dispatcher) {\n  timer_ = dispatcher.createTimer([this]() -> void {\n    this->touch();\n    timer_->enableTimer(timer_interval_);\n  });\n  timer_->enableTimer(timer_interval_);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/watchdog_impl.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <chrono>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/server/watchdog.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * This class stores the actual data about when the WatchDog was last touched\n * along with thread metadata.\n */\nclass WatchDogImpl : public WatchDog {\npublic:\n  /**\n   * @param interval WatchDog timer interval (used after startWatchdog())\n   */\n  WatchDogImpl(Thread::ThreadId thread_id, TimeSource& tsource, std::chrono::milliseconds interval)\n      : thread_id_(thread_id), time_source_(tsource),\n        latest_touch_time_since_epoch_(tsource.monotonicTime().time_since_epoch()),\n        timer_interval_(interval) {}\n\n  Thread::ThreadId threadId() const override { return thread_id_; }\n  MonotonicTime lastTouchTime() const override {\n    return MonotonicTime(latest_touch_time_since_epoch_.load());\n  }\n\n  // Server::WatchDog\n  void startWatchdog(Event::Dispatcher& dispatcher) override;\n  void touch() override {\n    latest_touch_time_since_epoch_.store(time_source_.monotonicTime().time_since_epoch());\n  }\n\nprivate:\n  const Thread::ThreadId thread_id_;\n  TimeSource& time_source_;\n  std::atomic<std::chrono::steady_clock::duration> latest_touch_time_since_epoch_;\n  Event::TimerPtr timer_;\n  const std::chrono::milliseconds timer_interval_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/well_known_names.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Server {\n\n/**\n * Well-known active UDP listener names.\n */\nclass UdpListenerNameValues {\npublic:\n  const std::string RawUdp = \"raw_udp_listener\";\n};\n\nusing UdpListenerNames = ConstSingleton<UdpListenerNameValues>;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/worker_impl.cc",
    "content": "#include \"server/worker_impl.h\"\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/exception.h\"\n#include \"envoy/server/configuration.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"server/connection_handler_impl.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nWorkerPtr ProdWorkerFactory::createWorker(uint32_t index, OverloadManager& overload_manager,\n                                          const std::string& worker_name) {\n  Event::DispatcherPtr dispatcher(api_.allocateDispatcher(worker_name));\n  return std::make_unique<WorkerImpl>(tls_, hooks_, std::move(dispatcher),\n                                      std::make_unique<ConnectionHandlerImpl>(*dispatcher, index),\n                                      overload_manager, api_);\n}\n\nWorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks,\n                       Event::DispatcherPtr&& dispatcher, Network::ConnectionHandlerPtr handler,\n                       OverloadManager& overload_manager, Api::Api& api)\n    : tls_(tls), hooks_(hooks), dispatcher_(std::move(dispatcher)), handler_(std::move(handler)),\n      api_(api) {\n  tls_.registerThread(*dispatcher_, false);\n  overload_manager.registerForAction(\n      OverloadActionNames::get().StopAcceptingConnections, *dispatcher_,\n      [this](OverloadActionState state) { stopAcceptingConnectionsCb(state); });\n}\n\nvoid WorkerImpl::addListener(absl::optional<uint64_t> overridden_listener,\n                             Network::ListenerConfig& listener, AddListenerCompletion completion) {\n  // All listener additions happen via post. However, we must deal with the case where the listener\n  // can not be created on the worker. There is a race condition where 2 processes can successfully\n  // bind to an address, but then fail to listen() with EADDRINUSE. During initial startup, we want\n  // to surface this.\n  dispatcher_->post([this, overridden_listener, &listener, completion]() -> void {\n    try {\n      handler_->addListener(overridden_listener, listener);\n      hooks_.onWorkerListenerAdded();\n      completion(true);\n    } catch (const Network::CreateListenerException& e) {\n      ENVOY_LOG(error, \"failed to add listener on worker: {}\", e.what());\n      completion(false);\n    }\n  });\n}\n\nuint64_t WorkerImpl::numConnections() const {\n  uint64_t ret = 0;\n  if (handler_) {\n    ret = handler_->numConnections();\n  }\n  return ret;\n}\n\nvoid WorkerImpl::removeListener(Network::ListenerConfig& listener,\n                                std::function<void()> completion) {\n  ASSERT(thread_);\n  const uint64_t listener_tag = listener.listenerTag();\n  dispatcher_->post([this, listener_tag, completion]() -> void {\n    handler_->removeListeners(listener_tag);\n    completion();\n    hooks_.onWorkerListenerRemoved();\n  });\n}\n\nvoid WorkerImpl::removeFilterChains(uint64_t listener_tag,\n                                    const std::list<const Network::FilterChain*>& filter_chains,\n                                    std::function<void()> completion) {\n  ASSERT(thread_);\n  dispatcher_->post(\n      [this, listener_tag, &filter_chains, completion = std::move(completion)]() -> void {\n        handler_->removeFilterChains(listener_tag, filter_chains, completion);\n      });\n}\n\nvoid WorkerImpl::start(GuardDog& guard_dog) {\n  ASSERT(!thread_);\n\n  // In posix, thread names are limited to 15 characters, so contrive to make\n  // sure all interesting data fits there. The naming occurs in\n  // ListenerManagerImpl's constructor: absl::StrCat(\"worker_\", i). Let's say we\n  // have 9999 threads. We'd need, so we need 7 bytes for \"worker_\", 4 bytes\n  // for the thread index, leaving us 4 bytes left to distinguish between the\n  // two threads used per dispatcher. We'll call this one \"dsp:\" and the\n  // one allocated in guarddog_impl.cc \"dog:\".\n  //\n  // TODO(jmarantz): consider refactoring how this naming works so this naming\n  // architecture is centralized, resulting in clearer names.\n  Thread::Options options{absl::StrCat(\"wrk:\", dispatcher_->name())};\n  thread_ = api_.threadFactory().createThread(\n      [this, &guard_dog]() -> void { threadRoutine(guard_dog); }, options);\n}\n\nvoid WorkerImpl::initializeStats(Stats::Scope& scope) { dispatcher_->initializeStats(scope); }\n\nvoid WorkerImpl::stop() {\n  // It's possible for the server to cleanly shut down while cluster initialization during startup\n  // is happening, so we might not yet have a thread.\n  if (thread_) {\n    dispatcher_->exit();\n    thread_->join();\n  }\n}\n\nvoid WorkerImpl::stopListener(Network::ListenerConfig& listener, std::function<void()> completion) {\n  ASSERT(thread_);\n  const uint64_t listener_tag = listener.listenerTag();\n  dispatcher_->post([this, listener_tag, completion]() -> void {\n    handler_->stopListeners(listener_tag);\n    if (completion != nullptr) {\n      completion();\n    }\n  });\n}\n\nvoid WorkerImpl::threadRoutine(GuardDog& guard_dog) {\n  ENVOY_LOG(debug, \"worker entering dispatch loop\");\n  // The watch dog must be created after the dispatcher starts running and has post events flushed,\n  // as this is when TLS stat scopes start working.\n  dispatcher_->post([this, &guard_dog]() {\n    watch_dog_ =\n        guard_dog.createWatchDog(api_.threadFactory().currentThreadId(), dispatcher_->name());\n    watch_dog_->startWatchdog(*dispatcher_);\n  });\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n  ENVOY_LOG(debug, \"worker exited dispatch loop\");\n  guard_dog.stopWatching(watch_dog_);\n\n  // We must close all active connections before we actually exit the thread. This prevents any\n  // destructors from running on the main thread which might reference thread locals. Destroying\n  // the handler does this which additionally purges the dispatcher delayed deletion list.\n  handler_.reset();\n  tls_.shutdownThread();\n  watch_dog_.reset();\n}\n\nvoid WorkerImpl::stopAcceptingConnectionsCb(OverloadActionState state) {\n  if (state.isSaturated()) {\n    handler_->disableListeners();\n  } else {\n    handler_->enableListeners();\n  }\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "source/server/worker_impl.h",
    "content": "#pragma once\n\n#include <functional>\n#include <memory>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/server/guarddog.h\"\n#include \"envoy/server/listener_manager.h\"\n#include \"envoy/server/worker.h\"\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"server/listener_hooks.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ProdWorkerFactory : public WorkerFactory, Logger::Loggable<Logger::Id::main> {\npublic:\n  ProdWorkerFactory(ThreadLocal::Instance& tls, Api::Api& api, ListenerHooks& hooks)\n      : tls_(tls), api_(api), hooks_(hooks) {}\n\n  // Server::WorkerFactory\n  WorkerPtr createWorker(uint32_t index, OverloadManager& overload_manager,\n                         const std::string& worker_name) override;\n\nprivate:\n  ThreadLocal::Instance& tls_;\n  Api::Api& api_;\n  ListenerHooks& hooks_;\n};\n\n/**\n * A server threaded worker that wraps up a worker thread, event loop, etc.\n */\nclass WorkerImpl : public Worker, Logger::Loggable<Logger::Id::main> {\npublic:\n  WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, Event::DispatcherPtr&& dispatcher,\n             Network::ConnectionHandlerPtr handler, OverloadManager& overload_manager,\n             Api::Api& api);\n\n  // Server::Worker\n  void addListener(absl::optional<uint64_t> overridden_listener, Network::ListenerConfig& listener,\n                   AddListenerCompletion completion) override;\n  uint64_t numConnections() const override;\n\n  void removeListener(Network::ListenerConfig& listener, std::function<void()> completion) override;\n  void removeFilterChains(uint64_t listener_tag,\n                          const std::list<const Network::FilterChain*>& filter_chains,\n                          std::function<void()> completion) override;\n  void start(GuardDog& guard_dog) override;\n  void initializeStats(Stats::Scope& scope) override;\n  void stop() override;\n  void stopListener(Network::ListenerConfig& listener, std::function<void()> completion) override;\n\nprivate:\n  void threadRoutine(GuardDog& guard_dog);\n  void stopAcceptingConnectionsCb(OverloadActionState state);\n\n  ThreadLocal::Instance& tls_;\n  ListenerHooks& hooks_;\n  Event::DispatcherPtr dispatcher_;\n  Network::ConnectionHandlerPtr handler_;\n  Api::Api& api_;\n  Thread::ThreadPtr thread_;\n  WatchDogSharedPtr watch_dog_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "support/README.md",
    "content": "# Support tools\n\nA collection of CLI tools meant to support and automate various aspects of\ndeveloping Envoy, particularly those related to code review. For example,\nautomatic DCO signoff and pre-commit format checking.\n\n## Usage\n\nTo get started, you need only navigate to the Envoy project root and run:\n\n```bash\n./support/bootstrap\n```\n\nThis will set up the development support toolchain automatically. The toolchain\nuses git hooks extensively, copying them from `support/hooks` to the `.git`\nfolder.\n\nThe commit hook checks can be skipped using the `-n` / `--no-verify` flags, as\nso:\n\n```bash\ngit commit --no-verify\n```\n\n## Functionality\n\nCurrently the development support toolchain exposes two main pieces of\nfunctionality:\n\n* Automatically appending DCO signoff to the end of a commit message if it\n  doesn't exist yet. Correctly covers edge cases like `commit --amend` and\n  `rebase`.\n* Automatically running DCO and format checks on all files in the diff, before\n  push.\n\n[filter]: https://github.com/envoyproxy/envoy-filter-example\n\n## Fixing Format Problems\n\nIf the pre-push format checks detect any problems, you can either fix the\naffected files manually or run the provided formatting script.\n\nTo run the format fix script directly:\n\n```\n./tools/code_format/check_format.py fix && ./tools/code_format/format_python_tools.sh fix\n```\n\nTo run the format fix script under Docker:\n\n```\n./ci/run_envoy_docker.sh './ci/do_ci.sh fix_format'\n```\n\nTo run clang-tidy under Docker, run the following (this creates a full\ncompilation db and takes a long time):\n\n```\n./ci/run_envoy_docker.sh ci/do_ci.sh bazel.clang_tidy\n```\n"
  },
  {
    "path": "support/bootstrap",
    "content": "#!/usr/bin/env bash\n#\n# Bootstraps the developer tools and development environment. This includes\n# copying project-standard git commit hooks that do things like ensure DCO\n# signoff is present during commit.\n\n# Best-effort check that we're in the envoy root directory.\n#\n# TODO(hausdorff): If compatibility becomes an issue here, come back and do this\n# \"the right way\". This is hard to do in general, since `realpath` is not\n# standard.\nif [ ! \"$PWD\" == \"$(git rev-parse --show-toplevel)\" ]; then\n    cat >&2 <<__EOF__\nERROR: this script must be run at the root of the envoy source tree\n__EOF__\n    exit 1\nfi\n\n# Helper functions that calculate `abspath` and `relpath`. Taken from Mesos\n# commit 82b040a60561cf94dec3197ea88ae15e57bcaa97, which also carries the Apache\n# V2 license, and has deployed this code successfully for some time.\nabspath() {\n    cd \"$(dirname \"${1}\")\"\n    echo \"${PWD}\"/\"$(basename \"${1}\")\"\n    cd \"${OLDPWD}\"\n}\nrelpath() {\n  local FROM TO UP\n  FROM=\"$(abspath \"${1%/}\")\" TO=\"$(abspath \"${2%/}\"/)\"\n  while test \"${TO}\"  = \"${TO#\"${FROM}\"/}\" \\\n          -a \"${TO}\" != \"${FROM}\"; do\n    FROM=\"${FROM%/*}\" UP=\"../${UP}\"\n  done\n  TO=\"${UP%/}${TO#${FROM}}\"\n  echo \"${TO:-.}\"\n}\n\n# Try to find the `.git` directory, even if it's not in Envoy project root (as\n# it wouldn't be if, say, this were in a submodule). The \"blessed\" but fairly\n# new way to do this is to use `--git-common-dir`.\nDOT_GIT_DIR=$(git rev-parse --git-common-dir)\nif test ! -d \"${DOT_GIT_DIR}\"; then\n  # If `--git-common-dir` is not available, fall back to older way of doing it.\n  DOT_GIT_DIR=$(git rev-parse --git-dir)\nfi\n\nHOOKS_DIR=\"${DOT_GIT_DIR}/hooks\"\nHOOKS_DIR_RELPATH=$(relpath \"${HOOKS_DIR}\" \"$(dirname $0)\")\n\nif [ ! -e \"${HOOKS_DIR}/prepare-commit-msg\" ]; then\n  echo \"Installing hook 'prepare-commit-msg'\"\n  ln -sf \"${HOOKS_DIR_RELPATH}/hooks/prepare-commit-msg\" \"${HOOKS_DIR}/prepare-commit-msg\"\nfi\n\nif [ ! -e \"${HOOKS_DIR}/pre-push\" ]; then\n  echo \"Installing hook 'pre-push'\"\n  ln -sf \"${HOOKS_DIR_RELPATH}/hooks/pre-push\" \"${HOOKS_DIR}/pre-push\"\nfi\n"
  },
  {
    "path": "support/hooks/pre-push",
    "content": "#!/usr/bin/env bash\n#\n# A git commit hook that will automatically run format checking and DCO signoff\n# checking before the push is successful.\n#\n# To enable this hook, run `bootstrap`, or run the following from the root of\n# the repo. (Note that `bootstrap` will correctly install this even if this code\n# is located in a submodule, while the following won't.)\n#\n# $ ln -s ../../support/hooks/pre-push .git/hooks/pre-push\n\nDUMMY_SHA=0000000000000000000000000000000000000000\n\necho \"Running pre-push check; to skip this step use 'push --no-verify'\"\n\nwhile read LOCAL_REF LOCAL_SHA REMOTE_REF REMOTE_SHA\ndo\n  if [ \"$LOCAL_SHA\" = $DUMMY_SHA ]\n  then\n    # Branch deleted. Do nothing.\n    exit 0\n  else\n    if [ \"$REMOTE_SHA\" = $DUMMY_SHA ]\n    then\n      # New branch. Verify the last commit, since this is very likely where the new code is\n      # (though there is no way to know for sure). In the extremely uncommon case in which someone\n      # pushes more than 1 new commit to a branch, CI will enforce full checking.\n      RANGE=\"$LOCAL_SHA~1..$LOCAL_SHA\"\n    else\n      # Updating branch. Verify new commits.\n      RANGE=\"$REMOTE_SHA..$LOCAL_SHA\"\n    fi\n\n    # Verify DCO signoff. We do this before the format checker, since it has\n    # some probability of failing spuriously, while this check never should.\n    #\n    # In general, we can't assume that the commits are signed off by author\n    # pushing, so we settle for just checking that there is a signoff at all.\n    SIGNED_OFF=$(git rev-list --no-merges --grep \"^Signed-off-by: \" \"$RANGE\")\n    NOT_SIGNED_OFF=$(git rev-list --no-merges \"$RANGE\" | grep -Fxv \"$SIGNED_OFF\")\n    if [ -n \"$NOT_SIGNED_OFF\" ]\n    then\n      echo >&2 \"ERROR: The following commits do not have DCO signoff:\"\n      while read -r commit; do\n        echo \"  $(git log --pretty=oneline --abbrev-commit -n 1 $commit)\"\n      done <<< \"$NOT_SIGNED_OFF\"\n      exit 1\n    fi\n\n    # NOTE: The `tools` directory will be the same relative to the support\n    # directory, independent of whether we're in a submodule, so no need to use\n    # our `relpath` helper.\n    SCRIPT_DIR=\"$(dirname \"$(realpath \"$0\")\")/../../tools\"\n\n    # TODO(hausdorff): We should have a more graceful failure story when the\n    # user does not have all the tools set up correctly. This script assumes\n    # `$CLANG_FORMAT` and `$BUILDIFY` are defined, or that the default values it\n    # assumes for these variables correspond to real binaries on the system. If\n    # either of these things aren't true, the check fails.\n    for i in $(git diff --name-only $RANGE --diff-filter=ACMR --ignore-submodules=all 2>&1); do\n      echo -ne \"  Checking format for $i - \"\n      \"$SCRIPT_DIR\"/code_format/check_format.py check $i\n      if [[ $? -ne 0 ]]; then\n        exit 1\n      fi\n\n      echo \"  Checking spelling for $i\"\n      \"$SCRIPT_DIR\"/spelling/check_spelling_pedantic.py check $i\n      if [[ $? -ne 0 ]]; then\n        exit 1\n      fi\n    done\n\n    # TODO(mattklein123): Optimally we would be able to do this on a per-file basis.\n    \"$SCRIPT_DIR\"/proto_format/proto_format.sh check\n    if [[ $? -ne 0 ]]; then\n      exit 1\n    fi\n\n    \"$SCRIPT_DIR\"/code_format/format_python_tools.sh check\n    if [[ $? -ne 0 ]]; then\n      exit 1\n    fi\n\n    # Check correctness of repositories definitions.\n    echo \"  Checking repositories definitions\"\n    if ! \"$SCRIPT_DIR\"/check_repositories.sh; then\n      exit 1\n    fi\n  fi\ndone\n\nexit 0\n"
  },
  {
    "path": "support/hooks/prepare-commit-msg",
    "content": "#!/usr/bin/env bash\n#\n# A git commit hook that will automatically append a DCO signoff to the bottom\n# of any commit message that doesn't have one. This append happens after the git\n# default message is generated, but before the user is dropped into the commit\n# message editor.\n#\n# To enable this hook, run `bootstrap`, or run the following from the root of\n# the repo. (Note that `bootstrap` will correctly install this even if this code\n# is located in a submodule, while the following won't.)\n#\n# $ ln -s ../../support/hooks/prepare-commit-msg .git/hooks/prepare-commit-msg\n\nCOMMIT_MESSAGE_FILE=\"$1\"\nAUTHOR=$(git var GIT_AUTHOR_IDENT)\nSIGNOFF=$(echo $AUTHOR | sed -n 's/^\\(.*>\\).*$/Signed-off-by: \\1/p')\n\n# Check for DCO signoff message. If one doesn't exist, append one and then warn\n# the user that you did so.\nif ! $(grep -qs \"^$SIGNOFF\" \"$COMMIT_MESSAGE_FILE\") ; then\n  echo -e \"\\n$SIGNOFF\" >> \"$COMMIT_MESSAGE_FILE\"\n  echo -e \"Appended the following signoff to the end of the commit message:\\n  $SIGNOFF\\n\"\nfi\n"
  },
  {
    "path": "test/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\n# TODO(htuch): remove when we have a solution for https://github.com/bazelbuild/bazel/issues/3510\nenvoy_cc_test_library(\n    name = \"dummy_main\",\n    srcs = [\"dummy_main.cc\"],\n)\n\nenvoy_cc_test_library(\n    name = \"main\",\n    srcs = [\n        \"main.cc\",\n        \"test_listener.cc\",\n        \"test_runner.cc\",\n        \"test_runner.h\",\n    ],\n    hdrs = [\"test_listener.h\"],\n    deps = [\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/event:libevent_lib\",\n        \"//source/exe:process_wide_lib\",\n        \"//source/server:backtrace_lib\",\n        \"//test/common/runtime:utility_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:global_lib\",\n        \"//test/test_common:printers_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/README.md",
    "content": "# Envoy tests\n\nEnvoy uses the [Google Test framework](https://github.com/google/googletest) for\ntests, including\n[Google Mock](https://github.com/google/googletest/blob/master/googlemock/README.md) for\nmocks and matchers. See the documentation on those pages for information on the\nvarious classes, macros, and matchers that Envoy uses from those frameworks.\n\n## Integration tests\n\nEnvoy contains an integration testing framework, for testing\ndownstream-Envoy-upstream communication.\n[See the framework's README for more information.](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md)\n\n## Custom matchers\n\nEnvoy includes some custom Google Mock matchers to make test expectation\nstatements simpler to write and easier to understand.\n\n### HeaderValueOf\n\nTests that a HeaderMap argument contains exactly one header with the given key,\nwhose value satisfies the given expectation. The expectation can be a matcher,\nor a string that the value should equal.\n\nExamples:\n\n```cpp\nEXPECT_THAT(response->headers(), HeaderValueOf(Headers::get().Server, \"envoy\"));\n```\n\n```cpp\nusing testing::HasSubstr;\nEXPECT_THAT(request->headers(),\n            HeaderValueOf(Headers::get().AcceptEncoding, HasSubstr(\"gzip\")));\n```\n\n### HttpStatusIs\n\nTests that a HeaderMap argument has the provided HTTP status code. The status\ncode can be passed in as a string or an integer.\n\nExample:\n\n```cpp\nEXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n```\n\n### HeaderMapEqual and HeaderMapEqualRef\n\n`HeaderMapEqualRef` tests that two `HeaderMap` arguments are equal.\n`HeaderMapEqual` is the same, but it compares two pointers to `HeaderMap`s, and\nthe matcher's argument must be a `HeaderMapPtr`.\n\nExample:\n\n```cpp\nEXPECT_THAT(response->headers(), HeaderMapEqualRef(expected_headers));\n```\n\n### ProtoEq, ProtoEqIgnoringField, RepeatedProtoEq\n\nTests equality of protobufs, with a variant that ignores the value (including\npresence) of a single named field. Another variant can be used to compare two\ninstances of Protobuf::RepeatedPtrField element-by-element.\n\nExample:\n\n```cpp\nenvoy::api::v2::DeltaDiscoveryRequest expected_request;\n// (not shown: set some fields of expected_request...)\nEXPECT_CALL(async_stream_, sendMessage(ProtoEqIgnoringField(expected_request, \"response_nonce\"), false));\n\nresponse->mutable_resources()->Add();\nresponse->mutable_resources()->Add();\nresponse->mutable_resources()->Add();\n// (not shown: do something to populate those empty added items...)\nEXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response->resources()), version));\n```\n\n### IsSubsetOfHeaders and IsSupersetOfHeaders\n\nTests that one `HeaderMap` argument contains every header in another\n`HeaderMap`.\n\nExamples:\n\n```cpp\nEXPECT_THAT(response->headers(), IsSubsetOfHeaders(allowed_headers));\nEXPECT_THAT(response->headers(), IsSupersetOfHeaders(required_headers));\n```\n\n## Controlling time in tests\n\nIn Envoy production code, time and timers are managed via\n[`Event::TimeSystem`](https://github.com/envoyproxy/envoy/blob/master/include/envoy/event/timer.h),\nwhich provides a mechanism for querying the time and setting up time-based\ncallbacks. Bypassing this abstraction in Envoy code is flagged as a format\nviolation in CI.\n\nIn tests we use a derivation\n[`Event::TestTimeSystem`](test_common/test_time_system.h) which adds the ability\nto sleep or do a blocking timed wait on a condition variable. There are two\nimplementations of the `Event::TestTimeSystem` interface:\n`Event::TestRealTimeSystem`, and `Event::SimulatedTimeSystem`. The latter is\nrecommended for all new tests, as it helps avoid flaky tests on slow machines,\nand makes tests run faster.\n\nTypically we do not want to have both real-time and simulated-time in the same\ntest; that could lead to hard-to-reproduce results. Thus both implementations\nhave a mechanism to enforce that only one of them can be instantiated at once.\nA runtime assertion occurs if an `Event::TestRealTimeSystem` and\n`Event::SimulatedTimeSystem` are instantiated at the same time. Once the\ntime-system goes out of scope, usually at the end of a test method, the slate\nis clean and a new test-method can use a different time system.\n\nThere is also `Event::GlobalTimeSystem`, which can be instantiated in shared\ntest infrastructure that wants to be agnostic to which `TimeSystem` is used in a\ntest. When no `TimeSystem` is instantiated in a test, the `Event::GlobalTimeSystem`\nwill lazy-initialize itself into a concrete `TimeSystem`. Currently this is\n`TestRealTimeSystem` but will be changed in the future to `SimulatedTimeSystem`.\n\n\n## Benchmark tests\n\nEnvoy uses [Google Benchmark](https://github.com/google/benchmark/) for\nmicrobenchmarks. There are custom bazel rules, `envoy_cc_benchmark_binary` and\n`envoy_benchmark_test`, to execute them locally and in CI environments\nrespectively. `envoy_benchmark_test` rules call the benchmark binary from a\n[script](https://github.com/envoyproxy/envoy/blob/master/bazel/test_for_benchmark_wrapper.sh)\nwhich runs the benchmark with a minimal number of iterations and skipping\nexpensive benchmarks to quickly verify that the binary is able to run to\ncompletion. In order to collect meaningful bechmarks, `bazel run -c opt` the\nbenchmark binary target on a quiescent machine.\n\nIf you would like to detect when your benchmark test is running under the\nwrapper, call\n[`Envoy::benchmark::skipExpensiveBechmarks()`](https://github.com/envoyproxy/envoy/blob/master/test/benchmark/main.h).\n"
  },
  {
    "path": "test/benchmark/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"main\",\n    srcs = [\"main.cc\"],\n    hdrs = [\"main.h\"],\n    external_deps = [\n        \"benchmark\",\n        \"tclap\",\n    ],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/benchmark/main.cc",
    "content": "// NOLINT(namespace-envoy)\n// This is an Envoy driver for benchmarks.\n#include \"test/benchmark/main.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"benchmark/benchmark.h\"\n#include \"tclap/CmdLine.h\"\n\nusing namespace Envoy;\n\nstatic bool skip_expensive_benchmarks = false;\n\n// Boilerplate main(), which discovers benchmarks and runs them. This uses two\n// different flag parsers, so the order of flags matters: flags defined here\n// must be passed first, and flags defined in benchmark::Initialize second,\n// separated by --.\n// TODO(pgenera): convert this to abseil/flags/ when benchmark also adopts abseil.\nint main(int argc, char** argv) {\n  TestEnvironment::initializeTestMain(argv[0]);\n\n  // Suppressing non-error messages in benchmark tests. This hides warning\n  // messages that appear when using a runtime feature when there isn't an initialized\n  // runtime, and may have non-negligible impact on performance.\n  // TODO(adisuissa): This should be configurable, similarly to unit tests.\n  const spdlog::level::level_enum default_log_level = spdlog::level::err;\n  Envoy::Logger::Registry::setLogLevel(default_log_level);\n\n  // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)\n  TCLAP::CmdLine cmd(\"envoy-benchmark-test\", ' ', \"0.1\");\n  TCLAP::SwitchArg skip_switch(\"s\", \"skip_expensive_benchmarks\",\n                               \"skip or minimize expensive benchmarks\", cmd, false);\n  TCLAP::MultiArg<std::string> runtime_features(\n      \"r\", \"runtime_feature\", \"runtime feature settings each of the form: <flag_name>:<flag_value>\",\n      false, \"string\", cmd);\n\n  cmd.setExceptionHandling(false);\n  try {\n    cmd.parse(argc, argv);\n  } catch (const TCLAP::ExitException& e) {\n    // parse() throws an ExitException with status 0 after printing the output\n    // for --help and --version.\n    return 0;\n  }\n\n  // Reduce logs so benchmark output is readable.\n  Thread::MutexBasicLockable lock;\n  Logger::Context logging_context{spdlog::level::warn, Logger::Context::getFancyLogFormat(), lock,\n                                  false};\n\n  skip_expensive_benchmarks = skip_switch.getValue();\n\n  // Initialize scoped_runtime if a runtime_feature argument is present. This\n  // allows benchmarks to use their own scoped_runtime in case no runtime flag is\n  // passed as an argument.\n  std::unique_ptr<TestScopedRuntime> scoped_runtime = nullptr;\n  const auto& runtime_features_args = runtime_features.getValue();\n  for (const absl::string_view runtime_feature_arg : runtime_features_args) {\n    if (scoped_runtime == nullptr) {\n      scoped_runtime = std::make_unique<TestScopedRuntime>();\n    }\n    // Make sure the argument contains a single \":\" character.\n    const std::vector<std::string> runtime_feature_split = absl::StrSplit(runtime_feature_arg, ':');\n    if (runtime_feature_split.size() != 2) {\n      ENVOY_LOG_MISC(critical,\n                     \"Given runtime flag \\\"{}\\\" should have a single ':' separating the flag name \"\n                     \"and its value.\",\n                     runtime_feature_arg);\n      return 1;\n    }\n    const auto feature_name = runtime_feature_split[0];\n    const auto feature_val = runtime_feature_split[1];\n    Runtime::LoaderSingleton::getExisting()->mergeValues({{feature_name, feature_val}});\n  }\n\n  ::benchmark::Initialize(&argc, argv);\n\n  if (skip_expensive_benchmarks) {\n    ENVOY_LOG_MISC(\n        critical,\n        \"Expensive benchmarks are being skipped; see test/README.md for more information\");\n  }\n  ::benchmark::RunSpecifiedBenchmarks();\n}\n\nbool Envoy::benchmark::skipExpensiveBenchmarks() { return skip_expensive_benchmarks; }\n"
  },
  {
    "path": "test/benchmark/main.h",
    "content": "#pragma once\n\n/**\n * Benchmarks can use this to skip or hurry through long-running tests in CI.\n */\n\nnamespace Envoy {\nnamespace benchmark {\n\nbool skipExpensiveBenchmarks();\n\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/access_log/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"access_log_impl_test\",\n    srcs = [\"access_log_impl_test.cc\"],\n    deps = [\n        \"//source/common/access_log:access_log_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/access_loggers/grpc:http_config\",\n        \"//source/extensions/access_loggers/grpc:tcp_config\",\n        \"//test/common/stream_info:test_util\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"access_log_manager_impl_test\",\n    srcs = [\"access_log_manager_impl_test.cc\"],\n    deps = [\n        \"//source/common/access_log:access_log_manager_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/common/access_log/access_log_impl_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/config/accesslog/v3/accesslog.pb.validate.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"test/common/stream_info/test_util.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace AccessLog {\nnamespace {\n\nenvoy::config::accesslog::v3::AccessLog parseAccessLogFromV3Yaml(const std::string& yaml,\n                                                                 bool avoid_boosting = true) {\n  envoy::config::accesslog::v3::AccessLog access_log;\n  TestUtility::loadFromYamlAndValidate(yaml, access_log, false, avoid_boosting);\n  return access_log;\n}\n\nclass AccessLogImplTest : public testing::Test {\npublic:\n  AccessLogImplTest() : file_(new MockAccessLogFile()) {\n    ON_CALL(context_, runtime()).WillByDefault(ReturnRef(runtime_));\n    ON_CALL(context_, accessLogManager()).WillByDefault(ReturnRef(log_manager_));\n    ON_CALL(log_manager_, createAccessLog(_)).WillByDefault(Return(file_));\n    ON_CALL(*file_, write(_)).WillByDefault(SaveArg<0>(&output_));\n  }\n\n  Http::TestRequestHeaderMapImpl request_headers_{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  TestStreamInfo stream_info_;\n  std::shared_ptr<MockAccessLogFile> file_;\n  StringViewSaver output_;\n\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Envoy::AccessLog::MockAccessLogManager> log_manager_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n};\n\nTEST_F(AccessLogImplTest, LogMoreData) {\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n  stream_info_.response_flags_ = StreamInfo::ResponseFlag::UpstreamConnectionFailure;\n  request_headers_.addCopy(Http::Headers::get().UserAgent, \"user-agent-set\");\n  request_headers_.addCopy(Http::Headers::get().RequestId, \"id\");\n  request_headers_.addCopy(Http::Headers::get().Host, \"host\");\n  request_headers_.addCopy(Http::Headers::get().ForwardedFor, \"x.x.x.x\");\n\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  EXPECT_EQ(\"[1999-01-01T00:00:00.000Z] \\\"GET / HTTP/1.1\\\" 0 UF 1 2 3 - \\\"x.x.x.x\\\" \"\n            \"\\\"user-agent-set\\\" \\\"id\\\" \\\"host\\\" \\\"-\\\"\\n\",\n            output_);\n}\n\nTEST_F(AccessLogImplTest, DownstreamDisconnect) {\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n\n  auto cluster = std::make_shared<NiceMock<Upstream::MockClusterInfo>>();\n  stream_info_.upstream_host_ = Upstream::makeTestHostDescription(cluster, \"tcp://10.0.0.5:1234\");\n  stream_info_.response_flags_ = StreamInfo::ResponseFlag::DownstreamConnectionTermination;\n\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  EXPECT_EQ(\"[1999-01-01T00:00:00.000Z] \\\"GET / HTTP/1.1\\\" 0 DC 1 2 3 - \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\" \"\n            \"\\\"10.0.0.5:1234\\\"\\n\",\n            output_);\n}\n\nTEST_F(AccessLogImplTest, RouteName) {\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  format: \"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %ROUTE_NAME% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\"  \\\"%REQ(:AUTHORITY)%\\\"\\n\"\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n  stream_info_.route_name_ = \"route-test-name\";\n  stream_info_.response_flags_ = StreamInfo::ResponseFlag::UpstreamConnectionFailure;\n  request_headers_.addCopy(Http::Headers::get().UserAgent, \"user-agent-set\");\n  request_headers_.addCopy(Http::Headers::get().RequestId, \"id\");\n  request_headers_.addCopy(Http::Headers::get().Host, \"host\");\n  request_headers_.addCopy(Http::Headers::get().ForwardedFor, \"x.x.x.x\");\n\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  EXPECT_EQ(\n      \"[1999-01-01T00:00:00.000Z] \\\"GET / HTTP/1.1\\\" 0 UF route-test-name 1 2 3 - \\\"x.x.x.x\\\" \"\n      \"\\\"user-agent-set\\\" \\\"id\\\"  \\\"host\\\"\\n\",\n      output_);\n}\n\nTEST_F(AccessLogImplTest, EnvoyUpstreamServiceTime) {\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n  response_headers_.addCopy(Http::Headers::get().EnvoyUpstreamServiceTime, \"999\");\n\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  EXPECT_EQ(\"[1999-01-01T00:00:00.000Z] \\\"GET / HTTP/1.1\\\" 0 - 1 2 3 999 \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\" \"\n            \"\\\"-\\\"\\n\",\n            output_);\n}\n\nTEST_F(AccessLogImplTest, NoFilter) {\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  EXPECT_EQ(\n      \"[1999-01-01T00:00:00.000Z] \\\"GET / HTTP/1.1\\\" 0 - 1 2 3 - \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\"\\n\",\n      output_);\n}\n\nTEST_F(AccessLogImplTest, UpstreamHost) {\n  auto cluster = std::make_shared<NiceMock<Upstream::MockClusterInfo>>();\n  stream_info_.upstream_host_ = Upstream::makeTestHostDescription(cluster, \"tcp://10.0.0.5:1234\");\n\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  EXPECT_EQ(\"[1999-01-01T00:00:00.000Z] \\\"GET / HTTP/1.1\\\" 0 - 1 2 3 - \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\" \"\n            \"\\\"10.0.0.5:1234\\\"\\n\",\n            output_);\n}\n\nTEST_F(AccessLogImplTest, WithFilterMiss) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  or_filter:\n    filters:\n    - status_code_filter:\n        comparison:\n          op: GE\n          value:\n            default_value: 500\n            runtime_key: key_a\n    - duration_filter:\n        comparison:\n          op: GE\n          value:\n            default_value: 1000000\n            runtime_key: key_b\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.response_code_ = 200;\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, WithFilterHit) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n    or_filter:\n      filters:\n      - status_code_filter:\n          comparison:\n            op: GE\n            value:\n              default_value: 500\n              runtime_key: key_a\n      - status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 0\n              runtime_key: key_b\n      - duration_filter:\n          comparison:\n            op: GE\n            value:\n              default_value: 1000000\n              runtime_key: key_c\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(3);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.response_code_ = 500;\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.response_code_ = 200;\n  stream_info_.end_time_ =\n      stream_info_.startTimeMonotonic() + std::chrono::microseconds(1001000000000000);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, RuntimeFilter) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  runtime_filter:\n    runtime_key: access_log.test_key\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  Random::RandomGeneratorImpl random;\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  // Value is taken from random generator.\n  EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(42));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 0, 42, 100))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(43));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 0, 43, 100))\n      .WillOnce(Return(false));\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  // Value is taken from x-request-id.\n  request_headers_.addCopy(\"x-request-id\", \"000000ff-0000-0000-0000-000000000000\");\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 0, 55, 100))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 0, 55, 100))\n      .WillOnce(Return(false));\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, RuntimeFilterV2) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  runtime_filter:\n    runtime_key: access_log.test_key\n    percent_sampled:\n      numerator: 5\n      denominator: TEN_THOUSAND\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  Random::RandomGeneratorImpl random;\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  // Value is taken from random generator.\n  EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(42));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 5, 42, 10000))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(43));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 5, 43, 10000))\n      .WillOnce(Return(false));\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  // Value is taken from x-request-id.\n  request_headers_.addCopy(\"x-request-id\", \"000000ff-0000-0000-0000-000000000000\");\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 5, 255, 10000))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 5, 255, 10000))\n      .WillOnce(Return(false));\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, RuntimeFilterV2IndependentRandomness) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  runtime_filter:\n    runtime_key: access_log.test_key\n    percent_sampled:\n      numerator: 5\n      denominator: MILLION\n    use_independent_randomness: true\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  // Value should not be taken from x-request-id.\n  request_headers_.addCopy(\"x-request-id\", \"000000ff-0000-0000-0000-000000000000\");\n  EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(42));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 5, 42, 1000000))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(43));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"access_log.test_key\", 5, 43, 1000000))\n      .WillOnce(Return(false));\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, PathRewrite) {\n  request_headers_ = {{\":method\", \"GET\"}, {\":path\", \"/foo\"}, {\"x-envoy-original-path\", \"/bar\"}};\n\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  EXPECT_EQ(\"[1999-01-01T00:00:00.000Z] \\\"GET /bar HTTP/1.1\\\" 0 - 1 2 3 - \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\" \"\n            \"\\\"-\\\"\\n\",\n            output_);\n}\n\nTEST_F(AccessLogImplTest, HealthCheckTrue) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  not_health_check_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  Http::TestRequestHeaderMapImpl header_map{};\n  stream_info_.health_check_request_ = true;\n  EXPECT_CALL(*file_, write(_)).Times(0);\n\n  log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, HealthCheckFalse) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  not_health_check_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: \"/dev/null\"\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  Http::TestRequestHeaderMapImpl header_map{};\n  EXPECT_CALL(*file_, write(_));\n\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, RequestTracing) {\n  Random::RandomGeneratorImpl random;\n\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  traceable_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  {\n    Http::TestRequestHeaderMapImpl forced_header{{\"x-request-id\", random.uuid()}};\n    stream_info_.getRequestIDExtension()->setTraceStatus(forced_header, Http::TraceStatus::Forced);\n    EXPECT_CALL(*file_, write(_));\n    log->log(&forced_header, &response_headers_, &response_trailers_, stream_info_);\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl not_traceable{{\"x-request-id\", random.uuid()}};\n    EXPECT_CALL(*file_, write(_)).Times(0);\n    log->log(&not_traceable, &response_headers_, &response_trailers_, stream_info_);\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl sampled_header{{\"x-request-id\", random.uuid()}};\n    stream_info_.getRequestIDExtension()->setTraceStatus(sampled_header,\n                                                         Http::TraceStatus::Sampled);\n    EXPECT_CALL(*file_, write(_)).Times(0);\n    log->log(&sampled_header, &response_headers_, &response_trailers_, stream_info_);\n  }\n}\n\nTEST(AccessLogImplTestCtor, FiltersMissingInOrAndFilter) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  {\n    const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  or_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n    )EOF\";\n\n    EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context),\n                 EnvoyException);\n  }\n\n  {\n    const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  and_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n    )EOF\";\n\n    EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context),\n                 EnvoyException);\n  }\n}\n\nTEST_F(AccessLogImplTest, AndFilter) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  and_filter:\n    filters:\n      - status_code_filter:\n          comparison:\n            op: GE\n            value:\n              default_value: 500\n              runtime_key: key\n      - not_health_check_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n  stream_info_.response_code_ = 500;\n\n  {\n    EXPECT_CALL(*file_, write(_));\n    Http::TestRequestHeaderMapImpl header_map{{\"user-agent\", \"NOT/Envoy/HC\"}};\n\n    log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);\n  }\n\n  {\n    EXPECT_CALL(*file_, write(_)).Times(0);\n    Http::TestRequestHeaderMapImpl header_map{};\n    stream_info_.health_check_request_ = true;\n    log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);\n  }\n}\n\nTEST_F(AccessLogImplTest, OrFilter) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  or_filter:\n    filters:\n    - status_code_filter:\n        comparison:\n          op: GE\n          value:\n            default_value: 500\n            runtime_key: key\n    - not_health_check_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n  stream_info_.response_code_ = 500;\n\n  {\n    EXPECT_CALL(*file_, write(_));\n    Http::TestRequestHeaderMapImpl header_map{{\"user-agent\", \"NOT/Envoy/HC\"}};\n\n    log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);\n  }\n\n  {\n    EXPECT_CALL(*file_, write(_));\n    Http::TestRequestHeaderMapImpl header_map{{\"user-agent\", \"Envoy/HC\"}};\n    log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);\n  }\n}\n\nTEST_F(AccessLogImplTest, MultipleOperators) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  and_filter:\n    filters:\n    - or_filter:\n        filters:\n        - duration_filter:\n            comparison:\n              op: GE\n              value:\n                default_value: 10000\n                runtime_key: key_a\n        - status_code_filter:\n            comparison:\n              op: GE\n              value:\n                default_value: 500\n                runtime_key: key_b\n    - not_health_check_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n  stream_info_.response_code_ = 500;\n\n  {\n    EXPECT_CALL(*file_, write(_));\n    Http::TestRequestHeaderMapImpl header_map{};\n\n    log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);\n  }\n\n  {\n    EXPECT_CALL(*file_, write(_)).Times(0);\n    Http::TestRequestHeaderMapImpl header_map{};\n    stream_info_.health_check_request_ = true;\n\n    log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);\n  }\n}\n\nTEST(AccessLogFilterTest, DurationWithRuntimeKey) {\n  const std::string filter_yaml = R\"EOF(\nduration_filter:\n  comparison:\n    op: GE\n    value:\n      default_value: 1000000\n      runtime_key: key\n    )EOF\";\n\n  NiceMock<Runtime::MockLoader> runtime;\n\n  envoy::config::accesslog::v3::AccessLogFilter config;\n  TestUtility::loadFromYaml(filter_yaml, config);\n  DurationFilter filter(config.duration_filter(), runtime);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  TestStreamInfo stream_info;\n\n  stream_info.end_time_ = stream_info.startTimeMonotonic() + std::chrono::microseconds(100000);\n  EXPECT_CALL(runtime.snapshot_, getInteger(\"key\", 1000000)).WillOnce(Return(1));\n  EXPECT_TRUE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers));\n\n  EXPECT_CALL(runtime.snapshot_, getInteger(\"key\", 1000000)).WillOnce(Return(1000));\n  EXPECT_FALSE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers));\n\n  stream_info.end_time_ =\n      stream_info.startTimeMonotonic() + std::chrono::microseconds(100000001000);\n  EXPECT_CALL(runtime.snapshot_, getInteger(\"key\", 1000000)).WillOnce(Return(100000000));\n  EXPECT_TRUE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers));\n\n  stream_info.end_time_ = stream_info.startTimeMonotonic() + std::chrono::microseconds(10000);\n  EXPECT_CALL(runtime.snapshot_, getInteger(\"key\", 1000000)).WillOnce(Return(100000000));\n  EXPECT_FALSE(filter.evaluate(stream_info, request_headers, response_headers, response_trailers));\n}\n\nTEST(AccessLogFilterTest, StatusCodeWithRuntimeKey) {\n  const std::string filter_yaml = R\"EOF(\nstatus_code_filter:\n  comparison:\n    op: GE\n    value:\n      default_value: 300\n      runtime_key: key\n    )EOF\";\n\n  NiceMock<Runtime::MockLoader> runtime;\n\n  envoy::config::accesslog::v3::AccessLogFilter config;\n  TestUtility::loadFromYaml(filter_yaml, config);\n  StatusCodeFilter filter(config.status_code_filter(), runtime);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  TestStreamInfo info;\n\n  info.response_code_ = 400;\n  EXPECT_CALL(runtime.snapshot_, getInteger(\"key\", 300)).WillOnce(Return(350));\n  EXPECT_TRUE(filter.evaluate(info, request_headers, response_headers, response_trailers));\n\n  EXPECT_CALL(runtime.snapshot_, getInteger(\"key\", 300)).WillOnce(Return(500));\n  EXPECT_FALSE(filter.evaluate(info, request_headers, response_headers, response_trailers));\n}\n\nTEST_F(AccessLogImplTest, StatusCodeLessThan) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  status_code_filter:\n    comparison:\n      op: LE\n      value:\n        default_value: 499\n        runtime_key: hello\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  stream_info_.response_code_ = 499;\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"hello\", 499)).WillOnce(Return(499));\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.response_code_ = 500;\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"hello\", 499)).WillOnce(Return(499));\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, HeaderPresence) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  header_filter:\n    header:\n      name: test-header\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.addCopy(\"test-header\", \"present\");\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, HeaderExactMatch) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  header_filter:\n    header:\n      name: test-header\n      exact_match: exact-match-value\n\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.addCopy(\"test-header\", \"exact-match-value\");\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.remove(\"test-header\");\n  request_headers_.addCopy(\"test-header\", \"not-exact-match-value\");\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, HeaderRegexMatch) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  header_filter:\n    header:\n      name: test-header\n      safe_regex_match:\n        google_re2: {}\n        regex: \"\\\\d{3}\"\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.addCopy(\"test-header\", \"123\");\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.remove(\"test-header\");\n  request_headers_.addCopy(\"test-header\", \"1234\");\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.remove(\"test-header\");\n  request_headers_.addCopy(\"test-header\", \"123.456\");\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, HeaderRangeMatch) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  header_filter:\n    header:\n      name: test-header\n      range_match:\n        start: -10\n        end: 0\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.addCopy(\"test-header\", \"-1\");\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.remove(\"test-header\");\n  request_headers_.addCopy(\"test-header\", \"0\");\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.remove(\"test-header\");\n  request_headers_.addCopy(\"test-header\", \"somestring\");\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.remove(\"test-header\");\n  request_headers_.addCopy(\"test-header\", \"10.9\");\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.remove(\"test-header\");\n  request_headers_.addCopy(\"test-header\", \"-1somestring\");\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, ResponseFlagFilterAnyFlag) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  response_flag_filter: {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, ResponseFlagFilterSpecificFlag) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  response_flag_filter:\n    flags:\n      - UO\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, ResponseFlagFilterSeveralFlags) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  response_flag_filter:\n    flags:\n      - UO\n      - RL\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, ResponseFlagFilterAllFlagsInPGV) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  response_flag_filter:\n    flags:\n      - LH\n      - UH\n      - UT\n      - LR\n      - UR\n      - UF\n      - UC\n      - UO\n      - NR\n      - DI\n      - FI\n      - RL\n      - UAEX\n      - RLSE\n      - DC\n      - URX\n      - SI\n      - IH\n      - DPE\n      - UMSDR\n      - RFCF\n      - NFCF\n      - DT\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  static_assert(StreamInfo::ResponseFlag::LastFlag == 0x400000,\n                \"A flag has been added. Fix this code.\");\n\n  const std::vector<StreamInfo::ResponseFlag> all_response_flags = {\n      StreamInfo::ResponseFlag::FailedLocalHealthCheck,\n      StreamInfo::ResponseFlag::NoHealthyUpstream,\n      StreamInfo::ResponseFlag::UpstreamRequestTimeout,\n      StreamInfo::ResponseFlag::LocalReset,\n      StreamInfo::ResponseFlag::UpstreamRemoteReset,\n      StreamInfo::ResponseFlag::UpstreamConnectionFailure,\n      StreamInfo::ResponseFlag::UpstreamConnectionTermination,\n      StreamInfo::ResponseFlag::UpstreamOverflow,\n      StreamInfo::ResponseFlag::NoRouteFound,\n      StreamInfo::ResponseFlag::DelayInjected,\n      StreamInfo::ResponseFlag::FaultInjected,\n      StreamInfo::ResponseFlag::RateLimited,\n      StreamInfo::ResponseFlag::UnauthorizedExternalService,\n      StreamInfo::ResponseFlag::RateLimitServiceError,\n      StreamInfo::ResponseFlag::DownstreamConnectionTermination,\n      StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded,\n      StreamInfo::ResponseFlag::StreamIdleTimeout,\n      StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders,\n      StreamInfo::ResponseFlag::DownstreamProtocolError,\n      StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached,\n      StreamInfo::ResponseFlag::ResponseFromCacheFilter,\n      StreamInfo::ResponseFlag::NoFilterConfigFound,\n      StreamInfo::ResponseFlag::DurationTimeout};\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  for (const auto response_flag : all_response_flags) {\n    TestStreamInfo stream_info;\n    stream_info.setResponseFlag(response_flag);\n    EXPECT_CALL(*file_, write(_));\n    log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info);\n  }\n}\n\nTEST_F(AccessLogImplTest, ResponseFlagFilterUnsupportedFlag) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  response_flag_filter:\n    flags:\n      - UnsupportedFlag\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_),\n      ProtoValidationException,\n      \"Proto constraint validation failed (AccessLogValidationError.Filter: [\\\"embedded message \"\n      \"failed validation\\\"] | caused by AccessLogFilterValidationError.ResponseFlagFilter: \"\n      \"[\\\"embedded message failed validation\\\"] | caused by \"\n      \"ResponseFlagFilterValidationError.Flags[i]: [\\\"value must be in list \\\" [\\\"LH\\\" \\\"UH\\\" \"\n      \"\\\"UT\\\" \\\"LR\\\" \\\"UR\\\" \\\"UF\\\" \\\"UC\\\" \\\"UO\\\" \\\"NR\\\" \\\"DI\\\" \\\"FI\\\" \\\"RL\\\" \\\"UAEX\\\" \\\"RLSE\\\" \"\n      \"\\\"DC\\\" \\\"URX\\\" \\\"SI\\\" \\\"IH\\\" \\\"DPE\\\" \\\"UMSDR\\\" \\\"RFCF\\\" \\\"NFCF\\\" \\\"DT\\\"]]): name: \"\n      \"\\\"accesslog\\\"\\nfilter {\\n \"\n      \" \"\n      \"response_flag_filter {\\n    flags: \\\"UnsupportedFlag\\\"\\n  }\\n}\\ntyped_config {\\n  \"\n      \"[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\\n    path: \\\"/dev/null\\\"\\n  \"\n      \"}\\n}\\n\");\n}\n\nTEST_F(AccessLogImplTest, ValidateTypedConfig) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  response_flag_filter:\n    flags:\n      - UnsupportedFlag\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_),\n      ProtoValidationException,\n      \"Proto constraint validation failed (AccessLogValidationError.Filter: [\\\"embedded message \"\n      \"failed validation\\\"] | caused by AccessLogFilterValidationError.ResponseFlagFilter: \"\n      \"[\\\"embedded message failed validation\\\"] | caused by \"\n      \"ResponseFlagFilterValidationError.Flags[i]: [\\\"value must be in list \\\" [\\\"LH\\\" \\\"UH\\\" \"\n      \"\\\"UT\\\" \\\"LR\\\" \\\"UR\\\" \\\"UF\\\" \\\"UC\\\" \\\"UO\\\" \\\"NR\\\" \\\"DI\\\" \\\"FI\\\" \\\"RL\\\" \\\"UAEX\\\" \\\"RLSE\\\" \"\n      \"\\\"DC\\\" \\\"URX\\\" \\\"SI\\\" \\\"IH\\\" \\\"DPE\\\" \\\"UMSDR\\\" \\\"RFCF\\\" \\\"NFCF\\\" \\\"DT\\\"]]): name: \"\n      \"\\\"accesslog\\\"\\nfilter {\\n \"\n      \" \"\n      \"response_flag_filter {\\n    flags: \\\"UnsupportedFlag\\\"\\n  }\\n}\\ntyped_config {\\n  \"\n      \"[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\\n    path: \\\"/dev/null\\\"\\n  \"\n      \"}\\n}\\n\");\n}\n\nTEST_F(AccessLogImplTest, ValidGrpcStatusMessage) {\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  format: \"%GRPC_STATUS%\\n\"\n  )EOF\";\n\n  InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n  {\n    EXPECT_CALL(*file_, write(_));\n    response_trailers_.addCopy(Http::Headers::get().GrpcStatus, \"0\");\n    log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n    EXPECT_EQ(\"OK\\n\", output_);\n    response_trailers_.remove(Http::Headers::get().GrpcStatus);\n  }\n  {\n    InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n    EXPECT_CALL(*file_, write(_));\n    response_headers_.addCopy(Http::Headers::get().GrpcStatus, \"1\");\n    log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n    EXPECT_EQ(\"Canceled\\n\", output_);\n    response_headers_.remove(Http::Headers::get().GrpcStatus);\n  }\n  {\n    InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n    EXPECT_CALL(*file_, write(_));\n    response_headers_.addCopy(Http::Headers::get().GrpcStatus, \"-1\");\n    log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n    EXPECT_EQ(\"-1\\n\", output_);\n    response_headers_.remove(Http::Headers::get().GrpcStatus);\n  }\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterValues) {\n  const std::string yaml_template = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    statuses:\n      - {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n)EOF\";\n\n  const auto desc = envoy::config::accesslog::v3::GrpcStatusFilter::Status_descriptor();\n  const int grpcStatuses = static_cast<int>(Grpc::Status::WellKnownGrpcStatus::MaximumKnown) + 1;\n  if (desc->value_count() != grpcStatuses) {\n    FAIL() << \"Mismatch in number of gRPC statuses, GrpcStatus has \" << grpcStatuses\n           << \", GrpcStatusFilter_Status has \" << desc->value_count() << \".\";\n  }\n\n  for (int i = 0; i < desc->value_count(); i++) {\n    InstanceSharedPtr log = AccessLogFactory::fromProto(\n        parseAccessLogFromV3Yaml(fmt::format(yaml_template, desc->value(i)->name())), context_);\n\n    EXPECT_CALL(*file_, write(_));\n\n    response_trailers_.addCopy(Http::Headers::get().GrpcStatus, std::to_string(i));\n    log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n    response_trailers_.remove(Http::Headers::get().GrpcStatus);\n  }\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterUnsupportedValue) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    statuses:\n      - NOT_A_VALID_CODE\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_),\n                          EnvoyException, \".*\\\"NOT_A_VALID_CODE\\\" for type TYPE_ENUM.*\");\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterBlock) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    statuses:\n      - OK\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  const InstanceSharedPtr log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  response_trailers_.addCopy(Http::Headers::get().GrpcStatus, \"1\");\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterHttpCodes) {\n  const std::string yaml_template = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    statuses:\n      - {}\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n)EOF\";\n\n  // This mapping includes UNKNOWN <-> 200 because we expect that gRPC should provide an explicit\n  // status code for successes. In general, the only status codes that receive an HTTP mapping are\n  // those enumerated below with a non-UNKNOWN mapping. See: //source/common/grpc/status.cc and\n  // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md.\n  const std::vector<std::pair<std::string, uint64_t>> statusMapping = {\n      {\"UNKNOWN\", 200},           {\"INTERNAL\", 400},    {\"UNAUTHENTICATED\", 401},\n      {\"PERMISSION_DENIED\", 403}, {\"UNAVAILABLE\", 429}, {\"UNIMPLEMENTED\", 404},\n      {\"UNAVAILABLE\", 502},       {\"UNAVAILABLE\", 503}, {\"UNAVAILABLE\", 504}};\n\n  for (const auto& pair : statusMapping) {\n    stream_info_.response_code_ = pair.second;\n\n    const InstanceSharedPtr log = AccessLogFactory::fromProto(\n        parseAccessLogFromV3Yaml(fmt::format(yaml_template, pair.first)), context_);\n\n    EXPECT_CALL(*file_, write(_));\n    log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  }\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterNoCode) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    statuses:\n      - UNKNOWN\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  const InstanceSharedPtr log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterExclude) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    exclude: true\n    statuses:\n      - OK\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  const InstanceSharedPtr log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  for (int i = 0; i <= static_cast<int>(Grpc::Status::WellKnownGrpcStatus::MaximumKnown); i++) {\n    EXPECT_CALL(*file_, write(_)).Times(i == 0 ? 0 : 1);\n\n    response_trailers_.addCopy(Http::Headers::get().GrpcStatus, std::to_string(i));\n    log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n    response_trailers_.remove(Http::Headers::get().GrpcStatus);\n  }\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterExcludeFalse) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    exclude: false\n    statuses:\n      - OK\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  const InstanceSharedPtr log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  response_trailers_.addCopy(Http::Headers::get().GrpcStatus, \"0\");\n\n  EXPECT_CALL(*file_, write(_));\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, GrpcStatusFilterHeader) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  grpc_status_filter:\n    statuses:\n      - OK\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  const InstanceSharedPtr log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_));\n\n  response_headers_.addCopy(Http::Headers::get().GrpcStatus, \"0\");\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, MetadataFilter) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  metadata_filter:\n    matcher:\n      filter: \"some.namespace\"\n      path:\n        - key: \"a\"\n        - key: \"b\"\n        - key: \"c\"\n      value:\n        bool_match: true\n\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  TestStreamInfo stream_info;\n  ProtobufWkt::Struct metadata_val;\n  auto& fields_a = *metadata_val.mutable_fields();\n  auto& struct_b = *fields_a[\"a\"].mutable_struct_value();\n  auto& fields_b = *struct_b.mutable_fields();\n  auto& struct_c = *fields_b[\"b\"].mutable_struct_value();\n  auto& fields_c = *struct_c.mutable_fields();\n  fields_c[\"c\"].set_bool_value(true);\n\n  stream_info.setDynamicMetadata(\"some.namespace\", metadata_val);\n\n  const InstanceSharedPtr log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(1);\n\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info);\n  fields_c[\"c\"].set_bool_value(false);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n}\n\n// This is a regression test for fuzz bug https://oss-fuzz.com/testcase-detail/4863844862918656\n// where a missing matcher would attempt to create a ValueMatcher and crash in debug mode. Instead,\n// the configured metadata filter does not match.\nTEST_F(AccessLogImplTest, MetadataFilterNoMatcher) {\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  metadata_filter:\n    match_if_key_not_found: false\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  TestStreamInfo stream_info;\n  ProtobufWkt::Struct metadata_val;\n  stream_info.setDynamicMetadata(\"some.namespace\", metadata_val);\n\n  const InstanceSharedPtr log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  // If no matcher is set, then expect no logs.\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info);\n}\n\nTEST_F(AccessLogImplTest, MetadataFilterNoKey) {\n  const std::string default_true_yaml = R\"EOF(\nname: accesslog\nfilter:\n  metadata_filter:\n    matcher:\n      filter: \"some.namespace\"\n      path:\n        - key: \"x\"\n      value:\n        bool_match: true\n\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  const std::string default_false_yaml = R\"EOF(\nname: accesslog\nfilter:\n  metadata_filter:\n    matcher:\n      filter: \"some.namespace\"\n      path:\n        - key: \"y\"\n      value:\n        bool_match: true\n    match_if_key_not_found:\n      value: false\n\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  TestStreamInfo stream_info;\n  ProtobufWkt::Struct metadata_val;\n  auto& fields_a = *metadata_val.mutable_fields();\n  auto& struct_b = *fields_a[\"a\"].mutable_struct_value();\n  auto& fields_b = *struct_b.mutable_fields();\n  fields_b[\"b\"].set_bool_value(true);\n\n  stream_info.setDynamicMetadata(\"some.namespace\", metadata_val);\n\n  const InstanceSharedPtr default_false_log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(default_false_yaml), context_);\n  EXPECT_CALL(*file_, write(_)).Times(0);\n\n  default_false_log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info);\n\n  const InstanceSharedPtr default_true_log =\n      AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(default_true_yaml), context_);\n  EXPECT_CALL(*file_, write(_)).Times(1);\n\n  default_true_log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info);\n}\n\nclass TestHeaderFilterFactory : public ExtensionFilterFactory {\npublic:\n  ~TestHeaderFilterFactory() override = default;\n\n  FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config,\n                         Runtime::Loader&, Random::RandomGenerator&) override {\n    auto factory_config = Config::Utility::translateToFactoryConfig(\n        config, Envoy::ProtobufMessage::getNullValidationVisitor(), *this);\n    const auto& header_config =\n        TestUtility::downcastAndValidate<const envoy::config::accesslog::v3::HeaderFilter&>(\n            *factory_config);\n    return std::make_unique<HeaderFilter>(header_config);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<envoy::config::accesslog::v3::HeaderFilter>();\n  }\n\n  std::string name() const override { return \"test_header_filter\"; }\n};\n\nTEST_F(AccessLogImplTest, TestHeaderFilterPresence) {\n  TestHeaderFilterFactory factory;\n  Registry::InjectFactory<ExtensionFilterFactory> registration(factory);\n\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  extension_filter:\n    name: test_header_filter\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.filter.accesslog.v2.HeaderFilter\n      header:\n        name: test-header\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n\n  EXPECT_CALL(*file_, write(_)).Times(0);\n  logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n\n  request_headers_.addCopy(\"test-header\", \"foo/bar\");\n  EXPECT_CALL(*file_, write(_));\n  logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\n/**\n * Sample extension filter which allows every 1 of every `sample_rate` log attempts.\n */\nclass SampleExtensionFilter : public Filter {\npublic:\n  SampleExtensionFilter(uint32_t sample_rate) : sample_rate_(sample_rate) {}\n\n  // AccessLog::Filter\n  bool evaluate(const StreamInfo::StreamInfo&, const Http::RequestHeaderMap&,\n                const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) const override {\n    if (current_++ == 0) {\n      return true;\n    }\n    if (current_ >= sample_rate_) {\n      current_ = 0;\n    }\n    return false;\n  }\n\nprivate:\n  mutable uint32_t current_ = 0;\n  uint32_t sample_rate_;\n};\n\n/**\n * Sample extension filter factory which creates SampleExtensionFilter.\n */\nclass SampleExtensionFilterFactory : public ExtensionFilterFactory {\npublic:\n  ~SampleExtensionFilterFactory() override = default;\n\n  FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config,\n                         Runtime::Loader&, Random::RandomGenerator&) override {\n    auto factory_config = Config::Utility::translateToFactoryConfig(\n        config, Envoy::ProtobufMessage::getNullValidationVisitor(), *this);\n\n    ProtobufWkt::Struct struct_config =\n        *dynamic_cast<const ProtobufWkt::Struct*>(factory_config.get());\n    return std::make_unique<SampleExtensionFilter>(\n        static_cast<uint32_t>(struct_config.fields().at(\"rate\").number_value()));\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ProtobufWkt::Struct>();\n  }\n\n  std::string name() const override { return \"sample_extension_filter\"; }\n};\n\nTEST_F(AccessLogImplTest, SampleExtensionFilter) {\n  SampleExtensionFilterFactory factory;\n  Registry::InjectFactory<ExtensionFilterFactory> registration(factory);\n\n  const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  extension_filter:\n    name: sample_extension_filter\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        rate: 5\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n  InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_);\n  // For rate=5 expect 1st request to be recorded, 2nd-5th skipped, and 6th recorded.\n  EXPECT_CALL(*file_, write(_));\n  logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  for (int i = 0; i <= 3; ++i) {\n    EXPECT_CALL(*file_, write(_)).Times(0);\n    logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  }\n  EXPECT_CALL(*file_, write(_));\n  logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n}\n\nTEST_F(AccessLogImplTest, UnregisteredExtensionFilter) {\n  {\n    const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  extension_filter:\n    name: unregistered_extension_filter\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        foo: bar\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n  )EOF\";\n\n    EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_),\n                 EnvoyException);\n  }\n\n  {\n    const std::string yaml = R\"EOF(\nname: accesslog\nfilter:\n  extension_filter:\n    name: bar\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  path: /dev/null\n    )EOF\";\n\n    EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_),\n                 EnvoyException);\n  }\n}\n\n// Test that the deprecated extension names still function.\nTEST_F(AccessLogImplTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  {\n    envoy::config::accesslog::v3::AccessLog config;\n    config.set_name(\"envoy.access_loggers.file\");\n\n    EXPECT_NO_THROW(\n        Config::Utility::getAndCheckFactory<Server::Configuration::AccessLogInstanceFactory>(\n            config));\n  }\n\n  {\n    envoy::config::accesslog::v3::AccessLog config;\n    config.set_name(\"envoy.file_access_log\");\n\n    EXPECT_NO_THROW(\n        Config::Utility::getAndCheckFactory<Server::Configuration::AccessLogInstanceFactory>(\n            config));\n  }\n\n  {\n    envoy::config::accesslog::v3::AccessLog config;\n    config.set_name(\"envoy.http_grpc_access_log\");\n\n    EXPECT_NO_THROW(\n        Config::Utility::getAndCheckFactory<Server::Configuration::AccessLogInstanceFactory>(\n            config));\n  }\n\n  {\n    envoy::config::accesslog::v3::AccessLog config;\n    config.set_name(\"envoy.tcp_grpc_access_log\");\n\n    EXPECT_NO_THROW(\n        Config::Utility::getAndCheckFactory<Server::Configuration::AccessLogInstanceFactory>(\n            config));\n  }\n}\n\n} // namespace\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/access_log/access_log_manager_impl_test.cc",
    "content": "#include <memory>\n\n#include \"common/access_log/access_log_manager_impl.h\"\n#include \"common/filesystem/file_shared_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ByMove;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnNew;\nusing testing::ReturnRef;\nusing testing::Sequence;\n\nnamespace Envoy {\nnamespace AccessLog {\nnamespace {\n\nclass AccessLogManagerImplTest : public testing::Test {\nprotected:\n  AccessLogManagerImplTest()\n      : file_(new NiceMock<Filesystem::MockFile>), thread_factory_(Thread::threadFactoryForTest()),\n        access_log_manager_(timeout_40ms_, api_, dispatcher_, lock_, store_) {\n    EXPECT_CALL(file_system_, createFile(\"foo\"))\n        .WillOnce(Return(ByMove(std::unique_ptr<NiceMock<Filesystem::MockFile>>(file_))));\n\n    EXPECT_CALL(api_, fileSystem()).WillRepeatedly(ReturnRef(file_system_));\n    EXPECT_CALL(api_, threadFactory()).WillRepeatedly(ReturnRef(thread_factory_));\n  }\n\n  void waitForCounterEq(const std::string& name, uint64_t value) {\n    TestUtility::waitForCounterEq(store_, name, value, time_system_);\n  }\n\n  void waitForGaugeEq(const std::string& name, uint64_t value) {\n    TestUtility::waitForGaugeEq(store_, name, value, time_system_);\n  }\n\n  NiceMock<Api::MockApi> api_;\n  NiceMock<Filesystem::MockInstance> file_system_;\n  NiceMock<Filesystem::MockFile>* file_;\n  const std::chrono::milliseconds timeout_40ms_{40};\n  Stats::TestUtil::TestStore store_;\n  Thread::ThreadFactory& thread_factory_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Thread::MutexBasicLockable lock_;\n  AccessLogManagerImpl access_log_manager_;\n  Event::TestRealTimeSystem time_system_;\n};\n\nTEST_F(AccessLogManagerImplTest, BadFile) {\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  EXPECT_CALL(*file_, open_(_)).WillOnce(Return(ByMove(Filesystem::resultFailure<bool>(false, 0))));\n  EXPECT_THROW(access_log_manager_.createAccessLog(\"foo\"), EnvoyException);\n}\n\nTEST_F(AccessLogManagerImplTest, OpenFileWithRightFlags) {\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  EXPECT_CALL(*file_, open_(_))\n      .WillOnce(Invoke([](Filesystem::FlagSet flags) -> Api::IoCallBoolResult {\n        EXPECT_FALSE(flags[Filesystem::File::Operation::Read]);\n        EXPECT_TRUE(flags[Filesystem::File::Operation::Write]);\n        EXPECT_TRUE(flags[Filesystem::File::Operation::Append]);\n        EXPECT_TRUE(flags[Filesystem::File::Operation::Create]);\n        return Filesystem::resultSuccess<bool>(true);\n      }));\n  EXPECT_NE(nullptr, access_log_manager_.createAccessLog(\"foo\"));\n  EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n}\n\nTEST_F(AccessLogManagerImplTest, FlushToLogFilePeriodically) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n\n  EXPECT_CALL(*file_, open_(_)).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog(\"foo\");\n\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.write_failed\").value());\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.write_completed\").value());\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.flushed_by_timer\").value());\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.write_buffered\").value());\n\n  EXPECT_CALL(*timer, enableTimer(timeout_40ms_, _));\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([&](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(\n            4UL,\n            store_.gauge(\"filesystem.write_total_buffered\", Stats::Gauge::ImportMode::Accumulate)\n                .value());\n        EXPECT_EQ(0, data.compare(\"test\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  log_file->write(\"test\");\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 1) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  waitForCounterEq(\"filesystem.write_completed\", 1);\n  EXPECT_EQ(1UL, store_.counter(\"filesystem.write_buffered\").value());\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.flushed_by_timer\").value());\n  waitForGaugeEq(\"filesystem.write_total_buffered\", 0);\n\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([&](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(\n            5UL,\n            store_.gauge(\"filesystem.write_total_buffered\", Stats::Gauge::ImportMode::Accumulate)\n                .value());\n        EXPECT_EQ(0, data.compare(\"test2\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  log_file->write(\"test2\");\n  EXPECT_EQ(2UL, store_.counter(\"filesystem.write_buffered\").value());\n\n  // make sure timer is re-enabled on callback call\n  EXPECT_CALL(*timer, enableTimer(timeout_40ms_, _));\n  timer->invokeCallback();\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 2) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  waitForCounterEq(\"filesystem.write_completed\", 2);\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.write_failed\").value());\n  EXPECT_EQ(1UL, store_.counter(\"filesystem.flushed_by_timer\").value());\n  EXPECT_EQ(2UL, store_.counter(\"filesystem.write_buffered\").value());\n  waitForGaugeEq(\"filesystem.write_total_buffered\", 0);\n\n  EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n}\n\nTEST_F(AccessLogManagerImplTest, FlushToLogFileOnDemand) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n\n  EXPECT_CALL(*file_, open_(_)).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog(\"foo\");\n\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.flushed_by_timer\").value());\n\n  EXPECT_CALL(*timer, enableTimer(timeout_40ms_, _));\n\n  // The first write to a given file will start the flush thread. Because AccessManagerImpl::write\n  // holds the write_lock_ when the thread is started, the thread will flush on its first loop, once\n  // it obtains the write_lock_. Perform a write to get all that out of the way.\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n  log_file->write(\"prime-it\");\n  uint32_t expected_writes = 1;\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != expected_writes) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(0, data.compare(\"test\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  log_file->write(\"test\");\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    EXPECT_EQ(expected_writes, file_->num_writes_);\n  }\n\n  log_file->flush();\n  expected_writes++;\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != expected_writes) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  waitForCounterEq(\"filesystem.write_completed\", 2);\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.flushed_by_timer\").value());\n\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(0, data.compare(\"test2\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  // make sure timer is re-enabled on callback call\n  log_file->write(\"test2\");\n  EXPECT_CALL(*timer, enableTimer(timeout_40ms_, _));\n  timer->invokeCallback();\n  expected_writes++;\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != expected_writes) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n  EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n}\n\nTEST_F(AccessLogManagerImplTest, FlushCountsIOErrors) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n\n  EXPECT_CALL(*file_, open_(_)).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog(\"foo\");\n\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.write_failed\").value());\n\n  EXPECT_CALL(*timer, enableTimer(timeout_40ms_, _));\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(0, data.compare(\"test\"));\n        return Filesystem::resultFailure<ssize_t>(2UL, ENOSPC);\n      }));\n\n  log_file->write(\"test\");\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 1) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  waitForCounterEq(\"filesystem.write_failed\", 1);\n  EXPECT_EQ(0UL, store_.counter(\"filesystem.write_completed\").value());\n\n  EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n}\n\nTEST_F(AccessLogManagerImplTest, ReopenFile) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n\n  Sequence sq;\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog(\"foo\");\n\n  EXPECT_CALL(*file_, write_(_))\n      .InSequence(sq)\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(0, data.compare(\"before\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  log_file->write(\"before\");\n  timer->invokeCallback();\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 1) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  EXPECT_CALL(*file_, close_())\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n\n  EXPECT_CALL(*file_, write_(_))\n      .InSequence(sq)\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(0, data.compare(\"reopened\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  EXPECT_CALL(*file_, close_())\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n\n  log_file->reopen();\n  log_file->write(\"reopened\");\n  timer->invokeCallback();\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 2) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n}\n\n// Test that the flush timer will trigger file reopen even if no data is waiting.\nTEST_F(AccessLogManagerImplTest, ReopenFileOnTimerOnly) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n\n  Sequence sq;\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog(\"foo\");\n\n  EXPECT_CALL(*file_, write_(_))\n      .InSequence(sq)\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(0, data.compare(\"before\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  log_file->write(\"before\");\n  timer->invokeCallback();\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 1) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  EXPECT_CALL(*file_, close_())\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n\n  EXPECT_CALL(*file_, close_())\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n\n  log_file->reopen();\n  timer->invokeCallback();\n\n  {\n    Thread::LockGuard lock(file_->open_mutex_);\n    while (file_->num_opens_ != 2) {\n      file_->open_event_.wait(file_->open_mutex_);\n    }\n  }\n}\n\nTEST_F(AccessLogManagerImplTest, ReopenThrows) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n\n  EXPECT_CALL(*file_, write_(_))\n      .WillRepeatedly(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  Sequence sq;\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n\n  AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog(\"foo\");\n  EXPECT_CALL(*file_, close_())\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultFailure<bool>(false, 0))));\n\n  log_file->write(\"test write\");\n  timer->invokeCallback();\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 1) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n  log_file->reopen();\n\n  log_file->write(\"this is to force reopen\");\n  timer->invokeCallback();\n\n  {\n    Thread::LockGuard lock(file_->open_mutex_);\n    while (file_->num_opens_ != 2) {\n      file_->open_event_.wait(file_->open_mutex_);\n    }\n  }\n\n  // write call should not cause any exceptions\n  log_file->write(\"random data\");\n  timer->invokeCallback();\n\n  waitForCounterEq(\"filesystem.reopen_failed\", 1);\n}\n\nTEST_F(AccessLogManagerImplTest, BigDataChunkShouldBeFlushedWithoutTimer) {\n  EXPECT_CALL(*file_, open_(_)).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog(\"foo\");\n\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        EXPECT_EQ(0, data.compare(\"a\"));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  log_file->write(\"a\");\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 1) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n\n  // First write happens without waiting on thread_flush_. Now make a big string and it should be\n  // flushed even when timer is not enabled\n  EXPECT_CALL(*file_, write_(_))\n      .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        std::string expected(1024 * 64 + 1, 'b');\n        EXPECT_EQ(0, data.compare(expected));\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  std::string big_string(1024 * 64 + 1, 'b');\n  log_file->write(big_string);\n\n  {\n    Thread::LockGuard lock(file_->write_mutex_);\n    while (file_->num_writes_ != 2) {\n      file_->write_event_.wait(file_->write_mutex_);\n    }\n  }\n  EXPECT_CALL(*file_, close_()).WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n}\n\nTEST_F(AccessLogManagerImplTest, ReopenAllFiles) {\n  EXPECT_CALL(dispatcher_, createTimer_(_)).WillRepeatedly(ReturnNew<NiceMock<Event::MockTimer>>());\n\n  Sequence sq;\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log = access_log_manager_.createAccessLog(\"foo\");\n\n  NiceMock<Filesystem::MockFile>* file2 = new NiceMock<Filesystem::MockFile>;\n  EXPECT_CALL(file_system_, createFile(\"bar\"))\n      .WillOnce(Return(ByMove(std::unique_ptr<NiceMock<Filesystem::MockFile>>(file2))));\n\n  Sequence sq2;\n  EXPECT_CALL(*file2, open_(_))\n      .InSequence(sq2)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  AccessLogFileSharedPtr log2 = access_log_manager_.createAccessLog(\"bar\");\n\n  // Make sure that getting the access log with the same name returns the same underlying file.\n  EXPECT_EQ(log, access_log_manager_.createAccessLog(\"foo\"));\n  EXPECT_EQ(log2, access_log_manager_.createAccessLog(\"bar\"));\n\n  // Test that reopen reopens all of the files\n  EXPECT_CALL(*file_, write_(_))\n      .WillRepeatedly(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  EXPECT_CALL(*file2, write_(_))\n      .WillRepeatedly(Invoke([](absl::string_view data) -> Api::IoCallSizeResult {\n        return Filesystem::resultSuccess<ssize_t>(static_cast<ssize_t>(data.length()));\n      }));\n\n  EXPECT_CALL(*file_, close_())\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  EXPECT_CALL(*file2, close_())\n      .InSequence(sq2)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n\n  EXPECT_CALL(*file_, open_(_))\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  EXPECT_CALL(*file2, open_(_))\n      .InSequence(sq2)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n\n  access_log_manager_.reopen();\n\n  log->write(\"this is to force reopen\");\n  log2->write(\"this is to force reopen\");\n\n  {\n    Thread::LockGuard lock(file_->open_mutex_);\n    while (file_->num_opens_ != 2) {\n      file_->open_event_.wait(file_->open_mutex_);\n    }\n  }\n\n  {\n    Thread::LockGuard lock(file2->open_mutex_);\n    while (file2->num_opens_ != 2) {\n      file2->open_event_.wait(file2->open_mutex_);\n    }\n  }\n\n  EXPECT_CALL(*file_, close_())\n      .InSequence(sq)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n  EXPECT_CALL(*file2, close_())\n      .InSequence(sq2)\n      .WillOnce(Return(ByMove(Filesystem::resultSuccess<bool>(true))));\n}\n\n} // namespace\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:byte_order_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"buffer_fuzz_proto\",\n    srcs = [\"buffer_fuzz.proto\"],\n)\n\nenvoy_cc_test_library(\n    name = \"buffer_fuzz_lib\",\n    srcs = [\"buffer_fuzz.cc\"],\n    hdrs = [\"buffer_fuzz.h\"],\n    deps = [\n        \":buffer_fuzz_proto_cc_proto\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/network:address_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"buffer_fuzz_test\",\n    srcs = [\"buffer_fuzz_test.cc\"],\n    corpus = \"buffer_corpus\",\n    deps = [\":buffer_fuzz_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"buffer_test\",\n    srcs = [\"buffer_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"owned_impl_test\",\n    srcs = [\"owned_impl_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/network:address_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"watermark_buffer_test\",\n    srcs = [\"watermark_buffer_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:watermark_buffer_lib\",\n        \"//source/common/network:address_lib\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"zero_copy_input_stream_test\",\n    srcs = [\"zero_copy_input_stream_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"buffer_speed_test\",\n    srcs = [\"buffer_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"buffer_speed_test_benchmark_test\",\n    benchmark_binary = \"buffer_speed_test\",\n)\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/basic",
    "content": "actions {\n  add_buffer_fragment: 1\n}\nactions {\n  add_string: 3\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  prepend_string: 5\n}\nactions {\n  target_index: 1\n  prepend_buffer: 0\n}\nactions {\n  reserve_commit {\n    reserve_length: 2048\n    commit_length: 765\n  }\n}\nactions {\n  copy_out {\n    start: 7\n    length: 200\n  }\n}\nactions {\n  drain: 98\n}\nactions {\n  linearize: 45\n}\nactions {\n  target_index: 1\n  move {\n    source_index: 0\n    length: 23\n  }\n}\nactions {\n  target_index: 0\n  move {\n    source_index: 1\n  }\n}\nactions {\n  read: 2789\n}\nactions {\n  write: {}\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/case",
    "content": "actions { } actions { } actions { } actions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer_fragment: 738197504\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_string: 1684078592\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  prepend_string: 65536\n}\nactions {\n}\nactions {\n  prepend_string: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5080353465696256",
    "content": "actions { } actions {\n} actions {   add_string:   8 \n}\nactions {\n  target_index: 1\n  read: 256\n}\nactions {\n}\nactions {\n  drain: 0\n}\nactions {\n}\nactions {\n  target_index: 1\n  add_buffer: 196608\n}\nactions {\n  target_index: 1\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  target_index: 98\n}\nactions {\n}\nactions {\n  target_index: 1\n  add_buffer: 704643070\n}\nactions {\n  add_buffer: 4294967294\n}\nactions {\n  drain: 0\n}\nactions {\n  prepend_buffer: 0\n}\nactions {\n}\nactions {\n  target_index: 2789\n}\nactions {\n  target_index: 2789\n}\nactions {\n  prepend_string: 1600414817\n}\nactions {\n  target_index: 1\n}\nactions {\n  add_string: 2789\n}\nactions {\n  target_index: 1\n  add_buffer: 1\n}\nactions {\n  target_index: 2789\n}\nactions {\n  target_index: 1\n  add_buffer: 196608\n}\nactions {\n}\nactions {\n  target_index: 2789\n}\nactions {\n  target_index: 2789\n  add_buffer: 3841982464\n}\nactions {\n  drain: 0\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  add_string: 0\n}\nactions {\n}\nactions {\n  target_index: 1\n}\nactions {\n  target_index: 1\n  add_buffer: 4294967294\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  target_index: 2789\n  add_buffer: 1\n}\nactions {\n  target_index: 2789\n  add_buffer: 1\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  target_index: 1\n}\nactions {\n  target_index: 229\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 2789\n  add_buffer: 1\n}\nactions {\n  target_index: 1\n  add_buffer: 196608\n}\nactions {\n}\nactions {\n  target_index: 2789\n  add_buffer: 1\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5644734729551872",
    "content": "actions { } actions {\n} actions { } actions {   write {   } } actions { } actions { } actions { } actions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_string: 2097152\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  target_index: 1\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  add_buffer: 268435456\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  add_buffer: 4\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944",
    "content": "actions {\n  add_string: 11927552\n}\nactions {\n  target_index: 4\n  add_buffer: 0\n}\nactions {\n  prepend_string: 1869177088\n}\nactions {\n}\nactions {\n  move {\n    source_index: 4294967293\n  }\n}\nactions {\n}\nactions {\n  linearize: 8388608\n}\nactions {\n  prepend_string: 1869177088\n}\nactions {\n  linearize: 1\n}\nactions {\n  copy_out {\n    length: 4194304\n  }\n}\nactions {\n  drain: 1\n}\nactions {\n}\nactions {\n  add_string: 65534\n}\nactions {\n  target_index: 1769235297\n}\nactions {\n  add_string: 11927552\n}\nactions {\n  target_index: 3053453312\n  add_string: 11927552\n}\nactions {\n}\nactions {\n  target_index: 11927552\n  drain: 1\n}\nactions {\n  target_index: 1769235297\n}\nactions {\n  write {\n  }\n}\nactions {\n  target_index: 1769235297\n}\nactions {\n  linearize: 1\n}\nactions {\n  add_buffer_fragment: 1\n}\nactions {\n  copy_out {\n    length: 4194304\n  }\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5664992304562176",
    "content": "actions {   target_index:       4 } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 1\n  read: 32249\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  read: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_string: 32249\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5668091688648704",
    "content": "actions {   target_index: 1   read: 1 } actions {   add_buffer_fragment: 1 } actions {   prepend_string: 0 } actions {   add_string: 4 } actions {   target_index: 1   move {     length: 1   } } actions {   target_index: 1   add_buffer_fragment: 1 }\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5669274699431936",
    "content": "actions {   add_buffer_fragment: 0 } actions {   prepend_string: 5 } actions {   add_string: 0 } actions {   reserve_commit {     reserve_length: 1280   } } actions {   read: 1953 } \n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864",
    "content": "actions {\n  add_string: 6\n}\nactions {\n  reserve_commit {\n    reserve_length: 971\n    commit_length: 1\n  }\n}\nactions {\n  target_index: 1\n  add_string: 2\n}\nactions {\n  target_index: 1\n  prepend_string: 3\n}\nactions {\n  target_index: 1\n  prepend_buffer: 0\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  target_index: 1\n  move {\n    length: 11\n  }\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5744501719564288",
    "content": "actions { } actions {   read: 1 } actions {   read: 997 } actions { } actions { } actions { } actions {   add_buffer_fragment: 4 } actions {   read: 1 } actions {   read: 997 } actions { } actions {   add_buffer_fragment: 1 } actions {   target_index: 1   read: 1 } actions {   read: 4 } actions {   read: 997 } actions {   add_buffer_fragment: 1 } actions {   prepend_buffer: 4 } actions {   prepend_string: 4 } actions {   read: 1 } actions {   read: 997 } actions {   add_buffer_fragment: 4 } actions {   read: 1 } actions { } actions {   read: 2789 } actions { } actions {   read: 536 } actions {   write {   } } actions { }\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5760708737761280",
    "content": "actions { } actions { } actions { } actions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer_fragment: 738197504\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_string: 1684078592\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  prepend_string: 65536\n}\nactions {\n}\nactions {\n  prepend_string: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  read: 4\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  linearize: 0\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  read: 0\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  target_index: 5\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 5\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  read: 0\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  target_index: 5\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 5\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 5\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_buffer: 1\n}\nactions {\n  write {\n  }\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-new_buffer_fuzz_test-5714377684025344",
    "content": "actions {\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  read: 9476\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  target_index: 256\n  read: 1\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 2\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 4\n  move {\n  }\n}\nactions {\n  target_index: 2\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 2\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  target_index: 2\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 8\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n}\nactions {\n  move {\n    source_index: 8\n  }\n}\nactions {\n  target_index: 5\n  move {\n  }\n}\nactions {\n  add_string: 1646279268\n}\nactions {\n  target_index: 2\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 8\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  target_index: 4\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 2\n  move {\n  }\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 1\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 2\n  move {\n  }\n}\nactions {\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 1\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 4\n  move {\n  }\n}\nactions {\n  target_index: 4\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 4\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 8\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  target_index: 1\n}\nactions {\n  target_index: 4\n  move {\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\nactions {\n  move {\n    source_index: 4\n  }\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/crash-d60939b6186fa6186e0b574ac67aa6df8f1081cd",
    "content": "actions {\n  prepend_string: 1024\n}\nactions {\n}\nactions {\n  add_buffer_fragment: 1\n}\nactions {\n  read: 2789\n}\nactions {\n  read: 0\n}\nactions {\n  linearize: 45\n}\nactions {\n  reserve_commit {\n    reserve_length: 14\n    commit_length: 1048576\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  reserve_commit {\n    reserve_length: 2048\n    commit_length: 1048576\n  }\n}\nactions {\n  read: 0\n}\nactions {\n  drain: 1024\n}\nactions {\n  move {\n    length: 23\n  }\n}\nactions {\n  linearize: 45\n}\nactions {\n  copy_out {\n    length: 5\n  }\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_corpus/crash-ed103900aec1285149aafc05102a541d9ec51363",
    "content": "actions {\n  add_buffer_fragment: 1\n}\nactions {\n  add_string: 3\n}\nactions {\n  target_index: 1\n  add_buffer: 0\n}\nactions {\n  prepend_string: 5\n}\nactions {\n  target_index: 1\n  prepend_buffer: 0\n}\nactions {\n}\nactions {\n  copy_out {\n    start: 7\n    length: 200\n  }\n}\nactions {\n  drain: 98\n}\nactions {\n  linearize: 45\n}\nactions {\n  target_index: 1\n  move {\n    length: 23\n  }\n}\nactions {\n  move {\n    source_index: 1\n  }\n}\nactions {\n  read: 2789\n}\nactions {\n  write {\n  }\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_fuzz.cc",
    "content": "#include \"test/common/buffer/buffer_fuzz.h\"\n\n#include <fcntl.h>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/memory/stats.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"test/fuzz/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/match.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nnamespace {\n\n// The number of buffers tracked. Each buffer fuzzer action references one or\n// more of these. We don't need a ton of buffers to capture the range of\n// possible behaviors, at least two to properly model move operations, let's\n// assume only 3 for now.\nconstexpr uint32_t BufferCount = 3;\n\n// These data are exogenous to the buffer, we don't need to worry about their\n// deallocation, just keep them around until the fuzz run is over.\nstruct Context {\n  std::vector<std::unique_ptr<Buffer::BufferFragmentImpl>> fragments_;\n};\n\n// Bound the maximum allocation size per action. We want this to be able to at\n// least cover the span of multiple internal chunks. It looks like both\n// the new OwnedImpl and libevent have minimum chunks in O(a few kilobytes).\n// This makes sense in general, since you need to to minimize data structure\n// overhead. If we make this number too big, we risk spending a lot of time in\n// memcpy/memcmp and slowing down the fuzzer execution rate. The number below is\n// our current best compromise.\nconstexpr uint32_t MaxAllocation = 16 * 1024;\n\n// Hard bound on total bytes allocated across the trace.\nconstexpr uint32_t TotalMaxAllocation = 4 * MaxAllocation;\n\nuint32_t clampSize(uint32_t size, uint32_t max_alloc) {\n  return std::min(size, std::min(MaxAllocation, max_alloc));\n}\n\nvoid releaseFragmentAllocation(const void* p, size_t, const Buffer::BufferFragmentImpl*) {\n  ::free(const_cast<void*>(p));\n}\n\n// Test implementation of Buffer. Conceptually, this is just a string that we\n// can append/prepend to and consume bytes from the front of. However, naive\n// implementations with std::string involve lots of copying to support this, and\n// even std::stringbuf doesn't support cheap linearization. Instead we use a\n// flat array that takes advantage of the fact that the total number of bytes\n// allocated during fuzzing will be bounded by TotalMaxAllocation.\n//\n// The data structure is built around the concept of a large flat array of size\n// 2 * TotalMaxAllocation, with the initial start position set to the middle.\n// The goal is to make every mutating operation linear time, including\n// add() and prepend(), as well as supporting O(1) linearization (critical to\n// making it cheaper to compare results with the real buffer implementation).\n// We maintain a (start, length) pair and ensure via assertions that we never\n// walk off the edge; the caller should be guaranteeing this.\nclass StringBuffer : public Buffer::Instance {\npublic:\n  void addDrainTracker(std::function<void()> drain_tracker) override {\n    // Not implemented well.\n    ASSERT(false);\n    drain_tracker();\n  }\n\n  void add(const void* data, uint64_t size) override {\n    FUZZ_ASSERT(start_ + size_ + size <= data_.size());\n    ::memcpy(mutableEnd(), data, size);\n    size_ += size;\n  }\n\n  void addBufferFragment(Buffer::BufferFragment& fragment) override {\n    add(fragment.data(), fragment.size());\n    fragment.done();\n  }\n\n  void add(absl::string_view data) override { add(data.data(), data.size()); }\n\n  void add(const Buffer::Instance& data) override {\n    const StringBuffer& src = dynamic_cast<const StringBuffer&>(data);\n    add(src.start(), src.size_);\n  }\n\n  void prepend(absl::string_view data) override {\n    FUZZ_ASSERT(start_ >= data.size());\n    start_ -= data.size();\n    size_ += data.size();\n    ::memcpy(mutableStart(), data.data(), data.size());\n  }\n\n  void prepend(Instance& data) override {\n    StringBuffer& src = dynamic_cast<StringBuffer&>(data);\n    prepend(src.asStringView());\n    src.size_ = 0;\n  }\n\n  void commit(Buffer::RawSlice* iovecs, uint64_t num_iovecs) override {\n    FUZZ_ASSERT(num_iovecs == 1);\n    size_ += iovecs[0].len_;\n  }\n\n  void copyOut(size_t start, uint64_t size, void* data) const override {\n    ::memcpy(data, this->start() + start, size);\n  }\n\n  void drain(uint64_t size) override {\n    FUZZ_ASSERT(size <= size_);\n    start_ += size;\n    size_ -= size;\n  }\n\n  Buffer::RawSliceVector\n  getRawSlices(absl::optional<uint64_t> max_slices = absl::nullopt) const override {\n    ASSERT(!max_slices.has_value() || max_slices.value() >= 1);\n    return {{const_cast<char*>(start()), size_}};\n  }\n\n  uint64_t length() const override { return size_; }\n\n  void* linearize(uint32_t /*size*/) override {\n    // Sketchy, but probably will work for test purposes.\n    return mutableStart();\n  }\n\n  Buffer::SliceDataPtr extractMutableFrontSlice() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  void move(Buffer::Instance& rhs) override { move(rhs, rhs.length()); }\n\n  void move(Buffer::Instance& rhs, uint64_t length) override {\n    StringBuffer& src = dynamic_cast<StringBuffer&>(rhs);\n    add(src.start(), length);\n    src.start_ += length;\n    src.size_ -= length;\n  }\n\n  uint64_t reserve(uint64_t length, Buffer::RawSlice* iovecs, uint64_t num_iovecs) override {\n    FUZZ_ASSERT(num_iovecs > 0);\n    FUZZ_ASSERT(start_ + size_ + length <= data_.size());\n    iovecs[0].mem_ = mutableEnd();\n    iovecs[0].len_ = length;\n    return 1;\n  }\n\n  ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override {\n    UNREFERENCED_PARAMETER(length);\n    return asStringView().find({static_cast<const char*>(data), size}, start);\n  }\n\n  bool startsWith(absl::string_view data) const override {\n    return absl::StartsWith(asStringView(), data);\n  }\n\n  std::string toString() const override { return std::string(data_.data() + start_, size_); }\n\n  absl::string_view asStringView() const { return {start(), size_}; }\n\n  char* mutableStart() { return data_.data() + start_; }\n\n  const char* start() const { return data_.data() + start_; }\n\n  char* mutableEnd() { return mutableStart() + size_; }\n\n  const char* end() const { return start() + size_; }\n\n  std::array<char, 2 * TotalMaxAllocation> data_;\n  uint32_t start_{TotalMaxAllocation};\n  uint32_t size_{0};\n};\n\nusing BufferList = std::vector<std::unique_ptr<Buffer::Instance>>;\n\n// Process a single buffer operation.\nuint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, BufferList& buffers,\n                      const test::common::buffer::Action& action) {\n  const uint32_t target_index = action.target_index() % BufferCount;\n  Buffer::Instance& target_buffer = *buffers[target_index];\n  uint32_t allocated = 0;\n\n  switch (action.action_selector_case()) {\n  case test::common::buffer::Action::kAddBufferFragment: {\n    const uint32_t size = clampSize(action.add_buffer_fragment(), max_alloc);\n    allocated += size;\n    void* p = ::malloc(size);\n    FUZZ_ASSERT(p != nullptr);\n    ::memset(p, insert_value, size);\n    auto fragment =\n        std::make_unique<Buffer::BufferFragmentImpl>(p, size, releaseFragmentAllocation);\n    ctxt.fragments_.emplace_back(std::move(fragment));\n    target_buffer.addBufferFragment(*ctxt.fragments_.back());\n    break;\n  }\n  case test::common::buffer::Action::kAddString: {\n    const uint32_t size = clampSize(action.add_string(), max_alloc);\n    allocated += size;\n    const std::string data(size, insert_value);\n    target_buffer.add(data);\n    break;\n  }\n  case test::common::buffer::Action::kAddBuffer: {\n    const uint32_t source_index = action.add_buffer() % BufferCount;\n    if (target_index == source_index) {\n      break;\n    }\n    Buffer::Instance& source_buffer = *buffers[source_index];\n    if (source_buffer.length() > max_alloc) {\n      break;\n    }\n    allocated += source_buffer.length();\n    target_buffer.add(source_buffer);\n    break;\n  }\n  case test::common::buffer::Action::kPrependString: {\n    const uint32_t size = clampSize(action.prepend_string(), max_alloc);\n    allocated += size;\n    const std::string data(size, insert_value);\n    target_buffer.prepend(data);\n    break;\n  }\n  case test::common::buffer::Action::kPrependBuffer: {\n    const uint32_t source_index = action.prepend_buffer() % BufferCount;\n    if (target_index == source_index) {\n      break;\n    }\n    Buffer::Instance& source_buffer = *buffers[source_index];\n    if (source_buffer.length() > max_alloc) {\n      break;\n    }\n    allocated += source_buffer.length();\n    target_buffer.prepend(source_buffer);\n    break;\n  }\n  case test::common::buffer::Action::kReserveCommit: {\n    const uint32_t reserve_length = clampSize(action.reserve_commit().reserve_length(), max_alloc);\n    allocated += reserve_length;\n    if (reserve_length == 0) {\n      break;\n    }\n    constexpr uint32_t reserve_slices = 16;\n    Buffer::RawSlice slices[reserve_slices];\n    const uint32_t allocated_slices = target_buffer.reserve(reserve_length, slices, reserve_slices);\n    uint32_t allocated_length = 0;\n    for (uint32_t i = 0; i < allocated_slices; ++i) {\n      ::memset(slices[i].mem_, insert_value, slices[i].len_);\n      allocated_length += slices[i].len_;\n    }\n    FUZZ_ASSERT(reserve_length <= allocated_length);\n    const uint32_t target_length =\n        std::min(reserve_length, action.reserve_commit().commit_length());\n    uint32_t shrink_length = allocated_length;\n    int32_t shrink_slice = allocated_slices - 1;\n    while (shrink_length > target_length) {\n      FUZZ_ASSERT(shrink_slice >= 0);\n      const uint32_t available = slices[shrink_slice].len_;\n      const uint32_t remainder = shrink_length - target_length;\n      if (available >= remainder) {\n        slices[shrink_slice].len_ -= remainder;\n        break;\n      }\n      shrink_length -= available;\n      slices[shrink_slice--].len_ = 0;\n    }\n    target_buffer.commit(slices, allocated_slices);\n    break;\n  }\n  case test::common::buffer::Action::kCopyOut: {\n    const uint32_t start =\n        std::min(action.copy_out().start(), static_cast<uint32_t>(target_buffer.length()));\n    const uint32_t length =\n        std::min(static_cast<uint32_t>(target_buffer.length() - start), action.copy_out().length());\n    // Make this static to avoid potential continuous ASAN inspired allocation.\n    static uint8_t copy_buffer[TotalMaxAllocation];\n    target_buffer.copyOut(start, length, copy_buffer);\n    const std::string data = target_buffer.toString();\n    FUZZ_ASSERT(::memcmp(copy_buffer, data.data() + start, length) == 0);\n    break;\n  }\n  case test::common::buffer::Action::kDrain: {\n    const uint32_t previous_length = target_buffer.length();\n    const uint32_t drain_length =\n        std::min(static_cast<uint32_t>(target_buffer.length()), action.drain());\n    target_buffer.drain(drain_length);\n    FUZZ_ASSERT(previous_length - drain_length == target_buffer.length());\n    break;\n  }\n  case test::common::buffer::Action::kLinearize: {\n    const uint32_t linearize_size =\n        std::min(static_cast<uint32_t>(target_buffer.length()), action.linearize());\n    target_buffer.linearize(linearize_size);\n    break;\n  }\n  case test::common::buffer::Action::kMove: {\n    const uint32_t source_index = action.move().source_index() % BufferCount;\n    if (target_index == source_index) {\n      break;\n    }\n    Buffer::Instance& source_buffer = *buffers[source_index];\n    if (action.move().length() == 0) {\n      if (source_buffer.length() > max_alloc) {\n        break;\n      }\n      allocated += source_buffer.length();\n      target_buffer.move(source_buffer);\n    } else {\n      const uint32_t source_length =\n          std::min(static_cast<uint32_t>(source_buffer.length()), action.move().length());\n      const uint32_t move_length = clampSize(max_alloc, source_length);\n      if (move_length == 0) {\n        break;\n      }\n      target_buffer.move(source_buffer, move_length);\n      allocated += move_length;\n    }\n    break;\n  }\n  case test::common::buffer::Action::kRead: {\n    const uint32_t max_length = clampSize(action.read(), max_alloc);\n    allocated += max_length;\n    if (max_length == 0) {\n      break;\n    }\n    int pipe_fds[2] = {0, 0};\n    FUZZ_ASSERT(::pipe(pipe_fds) == 0);\n    Network::IoSocketHandleImpl io_handle(pipe_fds[0]);\n    FUZZ_ASSERT(::fcntl(pipe_fds[0], F_SETFL, O_NONBLOCK) == 0);\n    FUZZ_ASSERT(::fcntl(pipe_fds[1], F_SETFL, O_NONBLOCK) == 0);\n    std::string data(max_length, insert_value);\n    const ssize_t rc = ::write(pipe_fds[1], data.data(), max_length);\n    FUZZ_ASSERT(rc > 0);\n    Api::IoCallUint64Result result = io_handle.read(target_buffer, max_length);\n    FUZZ_ASSERT(result.rc_ == static_cast<uint64_t>(rc));\n    FUZZ_ASSERT(::close(pipe_fds[1]) == 0);\n    break;\n  }\n  case test::common::buffer::Action::kWrite: {\n    int pipe_fds[2] = {0, 0};\n    FUZZ_ASSERT(::pipe(pipe_fds) == 0);\n    Network::IoSocketHandleImpl io_handle(pipe_fds[1]);\n    FUZZ_ASSERT(::fcntl(pipe_fds[0], F_SETFL, O_NONBLOCK) == 0);\n    FUZZ_ASSERT(::fcntl(pipe_fds[1], F_SETFL, O_NONBLOCK) == 0);\n    uint64_t rc;\n    do {\n      const bool empty = target_buffer.length() == 0;\n      const std::string previous_data = target_buffer.toString();\n      const auto result = io_handle.write(target_buffer);\n      FUZZ_ASSERT(result.ok());\n      rc = result.rc_;\n      ENVOY_LOG_MISC(trace, \"Write rc: {} errno: {}\", rc,\n                     result.err_ != nullptr ? result.err_->getErrorDetails() : \"-\");\n      if (empty) {\n        FUZZ_ASSERT(rc == 0);\n      } else {\n        auto buf = std::make_unique<char[]>(rc);\n        FUZZ_ASSERT(static_cast<uint64_t>(::read(pipe_fds[0], buf.get(), rc)) == rc);\n        FUZZ_ASSERT(::memcmp(buf.get(), previous_data.data(), rc) == 0);\n      }\n    } while (rc > 0);\n    FUZZ_ASSERT(::close(pipe_fds[0]) == 0);\n    break;\n  }\n  case test::common::buffer::Action::kGetRawSlices: {\n    const uint64_t slices_needed = target_buffer.getRawSlices().size();\n    const uint64_t slices_tested =\n        std::min(slices_needed, static_cast<uint64_t>(action.get_raw_slices()));\n    if (slices_tested == 0) {\n      break;\n    }\n    Buffer::RawSliceVector raw_slices = target_buffer.getRawSlices(/*max_slices=*/slices_tested);\n    const uint64_t slices_obtained = raw_slices.size();\n    FUZZ_ASSERT(slices_obtained <= slices_needed);\n    uint64_t offset = 0;\n    const std::string data = target_buffer.toString();\n    for (const auto& raw_slices : raw_slices) {\n      FUZZ_ASSERT(::memcmp(raw_slices.mem_, data.data() + offset, raw_slices.len_) == 0);\n      offset += raw_slices.len_;\n    }\n    FUZZ_ASSERT(slices_needed != slices_tested || offset == target_buffer.length());\n    break;\n  }\n  case test::common::buffer::Action::kSearch: {\n    const std::string& content = action.search().content();\n    const uint32_t offset = action.search().offset();\n    const std::string data = target_buffer.toString();\n    FUZZ_ASSERT(target_buffer.search(content.data(), content.size(), offset) ==\n                static_cast<ssize_t>(target_buffer.toString().find(content, offset)));\n    break;\n  }\n  case test::common::buffer::Action::kStartsWith: {\n    const std::string data = target_buffer.toString();\n    FUZZ_ASSERT(target_buffer.startsWith(action.starts_with()) ==\n                (data.find(action.starts_with()) == 0));\n    break;\n  }\n  default:\n    // Maybe nothing is set?\n    break;\n  }\n\n  return allocated;\n}\n\n} // namespace\n\nvoid executeActions(const test::common::buffer::BufferFuzzTestCase& input, BufferList& buffers,\n                    BufferList& linear_buffers, Context& ctxt) {\n  // Soft bound on the available memory for allocation to avoid OOMs and\n  // timeouts.\n  uint32_t available_alloc = 2 * MaxAllocation;\n  constexpr auto max_actions = 128;\n  for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) {\n    const char insert_value = 'a' + i % 26;\n    const auto& action = input.actions(i);\n    ENVOY_LOG_MISC(debug, \"Action {}\", action.DebugString());\n    const uint32_t allocated = bufferAction(ctxt, insert_value, available_alloc, buffers, action);\n    const uint32_t linear_allocated =\n        bufferAction(ctxt, insert_value, available_alloc, linear_buffers, action);\n    FUZZ_ASSERT(allocated == linear_allocated);\n    FUZZ_ASSERT(allocated <= available_alloc);\n    available_alloc -= allocated;\n    // When tracing, dump everything.\n    for (uint32_t j = 0; j < BufferCount; ++j) {\n      ENVOY_LOG_MISC(trace, \"Buffer at index {}\", j);\n      ENVOY_LOG_MISC(trace, \"B: {}\", buffers[j]->toString());\n      ENVOY_LOG_MISC(trace, \"L: {}\", linear_buffers[j]->toString());\n    }\n    // Verification pass, only non-mutating methods for buffers.\n    uint64_t current_allocated_bytes = 0;\n    for (uint32_t j = 0; j < BufferCount; ++j) {\n      // As an optimization, since we know that StringBuffer is just going to\n      // return the pointer to its std::string array, we can avoid the\n      // toString() copy here.\n      const uint64_t linear_buffer_length = linear_buffers[j]->length();\n      if (buffers[j]->toString() !=\n          absl::string_view(\n              static_cast<const char*>(linear_buffers[j]->linearize(linear_buffer_length)),\n              linear_buffer_length)) {\n        ENVOY_LOG_MISC(debug, \"Mismatched buffers at index {}\", j);\n        ENVOY_LOG_MISC(debug, \"B: {}\", buffers[j]->toString());\n        ENVOY_LOG_MISC(debug, \"L: {}\", linear_buffers[j]->toString());\n        FUZZ_ASSERT(false);\n      }\n      FUZZ_ASSERT(buffers[j]->length() == linear_buffer_length);\n      current_allocated_bytes += linear_buffer_length;\n    }\n    ENVOY_LOG_MISC(debug, \"[{} MB allocated total]\", current_allocated_bytes / (1024.0 * 1024));\n    // We bail out if buffers get too big, otherwise we will OOM the sanitizer.\n    // We can't use Memory::Stats::totalCurrentlyAllocated() here as we don't\n    // have tcmalloc in ASAN builds, so just do a simple count.\n    if (current_allocated_bytes > TotalMaxAllocation) {\n      ENVOY_LOG_MISC(debug, \"Terminating early with total buffer length {} to avoid OOM\",\n                     current_allocated_bytes);\n      break;\n    }\n  }\n}\n\nvoid BufferFuzz::bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input) {\n  Context ctxt;\n  // Fuzzed buffers.\n  BufferList buffers;\n  // Shadow buffers based on StringBuffer.\n  BufferList linear_buffers;\n  for (uint32_t i = 0; i < BufferCount; ++i) {\n    buffers.emplace_back(new Buffer::OwnedImpl());\n    linear_buffers.emplace_back(new StringBuffer());\n  }\n  executeActions(input, buffers, linear_buffers, ctxt);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/buffer_fuzz.h",
    "content": "#pragma once\n\n#include \"test/common/buffer/buffer_fuzz.pb.h\"\n\nnamespace Envoy {\n\nclass BufferFuzz {\npublic:\n  static void bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input);\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/buffer_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.buffer;\n\nimport \"google/protobuf/empty.proto\";\n\nmessage ReserveCommit {\n  uint32 reserve_length = 1;\n  uint32 commit_length = 2;\n}\n\nmessage CopyOut {\n  uint32 start = 1;\n  uint32 length = 2;\n}\n\nmessage Move {\n  uint32 source_index = 1;\n  uint32 length = 2;\n}\n\nmessage Search {\n  string content = 1;\n  uint32 offset = 2;\n}\n\nmessage Action {\n  uint32 target_index = 1;\n  oneof action_selector {\n    uint32 add_buffer_fragment = 2;\n    uint32 add_string = 3;\n    uint32 add_buffer = 4;\n    uint32 prepend_string = 5;\n    uint32 prepend_buffer = 6;\n    ReserveCommit reserve_commit = 7;\n    CopyOut copy_out = 8;\n    uint32 drain = 9;\n    uint32 linearize = 10;\n    Move move = 11;\n    uint32 read = 12;\n    google.protobuf.Empty write = 13;\n    uint32 get_raw_slices = 14;\n    Search search = 15;\n    string starts_with = 16;\n  }\n}\n\nmessage BufferFuzzTestCase {\n  repeated Action actions = 1;\n}\n"
  },
  {
    "path": "test/common/buffer/buffer_fuzz_test.cc",
    "content": "#include \"test/common/buffer/buffer_fuzz.h\"\n#include \"test/common/buffer/buffer_fuzz.pb.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\n\n// Fuzz the old owned buffer implementation.\nDEFINE_PROTO_FUZZER(const test::common::buffer::BufferFuzzTestCase& input) {\n  Envoy::BufferFuzz::bufferFuzz(input);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/buffer_speed_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\n\nstatic constexpr uint64_t MaxBufferLength = 1024 * 1024;\n\n// The fragment needs to be heap allocated in order to survive past the processing done in the inner\n// loop in the benchmarks below. Do not attempt to release the actual contents of the buffer.\nvoid deleteFragment(const void*, size_t, const Buffer::BufferFragmentImpl* self) { delete self; }\n\n// Test the creation of an empty OwnedImpl.\nstatic void bufferCreateEmpty(benchmark::State& state) {\n  uint64_t length = 0;\n  for (auto _ : state) {\n    Buffer::OwnedImpl buffer;\n    length += buffer.length();\n  }\n  benchmark::DoNotOptimize(length);\n}\nBENCHMARK(bufferCreateEmpty);\n\n// Test the creation of an OwnedImpl with varying amounts of content.\nstatic void bufferCreate(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  uint64_t length = 0;\n  for (auto _ : state) {\n    Buffer::OwnedImpl buffer(input);\n    length += buffer.length();\n  }\n  benchmark::DoNotOptimize(length);\n}\nBENCHMARK(bufferCreate)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Grow an OwnedImpl in very small amounts.\nstatic void bufferAddSmallIncrement(benchmark::State& state) {\n  const std::string data(\"a\");\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer;\n  for (auto _ : state) {\n    buffer.add(input);\n    if (buffer.length() >= MaxBufferLength) {\n      // Keep the test's memory usage from growing too large.\n      // @note Ideally we could use state.PauseTiming()/ResumeTiming() to exclude\n      // the time spent in the drain operation, but those functions themselves are\n      // heavyweight enough to cloud the measurements:\n      // https://github.com/google/benchmark/issues/179\n      buffer.drain(buffer.length());\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferAddSmallIncrement)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5);\n\n// Test the appending of varying amounts of content from a string to an OwnedImpl.\nstatic void bufferAddString(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  for (auto _ : state) {\n    buffer.add(data);\n    if (buffer.length() >= MaxBufferLength) {\n      buffer.drain(buffer.length());\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferAddString)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Variant of bufferAddString that appends from another Buffer::Instance\n// rather than from a string.\nstatic void bufferAddBuffer(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  const Buffer::OwnedImpl to_add(data);\n  Buffer::OwnedImpl buffer(input);\n  for (auto _ : state) {\n    buffer.add(to_add);\n    if (buffer.length() >= MaxBufferLength) {\n      buffer.drain(buffer.length());\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferAddBuffer)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test the prepending of varying amounts of content from a string to an OwnedImpl.\nstatic void bufferPrependString(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  for (auto _ : state) {\n    buffer.prepend(data);\n    if (buffer.length() >= MaxBufferLength) {\n      buffer.drain(buffer.length());\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferPrependString)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test the prepending of one OwnedImpl to another.\nstatic void bufferPrependBuffer(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  for (auto _ : state) {\n    // The prepend method removes the content from its source buffer. To populate a new source\n    // buffer every time without the overhead of a copy, we use an BufferFragment that references\n    // (and never deletes) an external string.\n    Buffer::OwnedImpl to_add;\n    auto fragment =\n        std::make_unique<Buffer::BufferFragmentImpl>(input.data(), input.size(), deleteFragment);\n    to_add.addBufferFragment(*fragment.release());\n\n    buffer.prepend(to_add);\n    if (buffer.length() >= MaxBufferLength) {\n      buffer.drain(input.size());\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferPrependBuffer)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\nstatic void bufferDrain(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  const Buffer::OwnedImpl to_add(data);\n  Buffer::OwnedImpl buffer(input);\n\n  // On each iteration of the benchmark, add N bytes and drain a multiple of N, as specified\n  // by DrainCycleRatios. This exercises full-slice, partial-slice, and multi-slice code paths\n  // in the Buffer's drain implementation.\n  constexpr size_t DrainCycleSize = 7;\n  constexpr double DrainCycleRatios[DrainCycleSize] = {0.0, 1.5, 1, 1.5, 0, 2.0, 1.0};\n  uint64_t drain_size[DrainCycleSize];\n  for (size_t i = 0; i < DrainCycleSize; i++) {\n    drain_size[i] = state.range(0) * DrainCycleRatios[i];\n  }\n\n  size_t drain_cycle = 0;\n  for (auto _ : state) {\n    buffer.add(to_add);\n    buffer.drain(drain_size[drain_cycle]);\n    drain_cycle++;\n    drain_cycle %= DrainCycleSize;\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferDrain)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Drain an OwnedImpl in very small amounts.\nstatic void bufferDrainSmallIncrement(benchmark::State& state) {\n  const std::string data(1024 * 1024, 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  for (auto _ : state) {\n    buffer.drain(state.range(0));\n    if (buffer.length() == 0) {\n      buffer.add(input);\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferDrainSmallIncrement)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5);\n\n// Test the moving of content from one OwnedImpl to another.\nstatic void bufferMove(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer1(input);\n  Buffer::OwnedImpl buffer2(input);\n  for (auto _ : state) {\n    buffer1.move(buffer2); // now buffer1 has 2 copies of the input, and buffer2 is empty.\n    buffer2.move(buffer1, input.size()); // now buffer1 and buffer2 are the same size.\n  }\n  uint64_t length = buffer1.length();\n  benchmark::DoNotOptimize(length);\n}\nBENCHMARK(bufferMove)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test the moving of content from one OwnedImpl to another, one byte at a time, to\n// exercise the (likely inefficient) code path in the implementation that handles\n// partial moves.\nstatic void bufferMovePartial(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer1(input);\n  Buffer::OwnedImpl buffer2(input);\n  for (auto _ : state) {\n    while (buffer2.length() != 0) {\n      buffer1.move(buffer2, 1);\n    }\n    buffer2.move(buffer1, input.size()); // now buffer1 and buffer2 are the same size.\n  }\n  uint64_t length = buffer1.length();\n  benchmark::DoNotOptimize(length);\n}\nBENCHMARK(bufferMovePartial)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test the reserve+commit cycle, for the special case where the reserved space is\n// fully used (and therefore the commit size equals the reservation size).\nstatic void bufferReserveCommit(benchmark::State& state) {\n  Buffer::OwnedImpl buffer;\n  for (auto _ : state) {\n    constexpr uint64_t NumSlices = 2;\n    Buffer::RawSlice slices[NumSlices];\n    uint64_t slices_used = buffer.reserve(state.range(0), slices, NumSlices);\n    uint64_t bytes_to_commit = 0;\n    for (uint64_t i = 0; i < slices_used; i++) {\n      bytes_to_commit += static_cast<uint64_t>(slices[i].len_);\n    }\n    buffer.commit(slices, slices_used);\n    if (buffer.length() >= MaxBufferLength) {\n      buffer.drain(buffer.length());\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferReserveCommit)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test the reserve+commit cycle, for the common case where the reserved space is\n// only partially used (and therefore the commit size is smaller than the reservation size).\nstatic void bufferReserveCommitPartial(benchmark::State& state) {\n  Buffer::OwnedImpl buffer;\n  for (auto _ : state) {\n    constexpr uint64_t NumSlices = 2;\n    Buffer::RawSlice slices[NumSlices];\n    uint64_t slices_used = buffer.reserve(state.range(0), slices, NumSlices);\n    ASSERT(slices_used > 0);\n    // Commit one byte from the first slice and nothing from any subsequent slice.\n    uint64_t bytes_to_commit = 1;\n    slices[0].len_ = bytes_to_commit;\n    buffer.commit(slices, 1);\n    if (buffer.length() >= MaxBufferLength) {\n      buffer.drain(buffer.length());\n    }\n  }\n  benchmark::DoNotOptimize(buffer.length());\n}\nBENCHMARK(bufferReserveCommitPartial)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test the linearization of a buffer in the best case where the data is in one slice.\nstatic void bufferLinearizeSimple(benchmark::State& state) {\n  const std::string data(state.range(0), 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer;\n  for (auto _ : state) {\n    buffer.drain(buffer.length());\n    auto fragment =\n        std::make_unique<Buffer::BufferFragmentImpl>(input.data(), input.size(), deleteFragment);\n    buffer.addBufferFragment(*fragment.release());\n    benchmark::DoNotOptimize(buffer.linearize(state.range(0)));\n  }\n}\nBENCHMARK(bufferLinearizeSimple)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test the linearization of a buffer in the general case where the data is spread among\n// many slices.\nstatic void bufferLinearizeGeneral(benchmark::State& state) {\n  static constexpr uint64_t SliceSize = 1024;\n  const std::string data(SliceSize, 'a');\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer;\n  for (auto _ : state) {\n    buffer.drain(buffer.length());\n    do {\n      auto fragment =\n          std::make_unique<Buffer::BufferFragmentImpl>(input.data(), input.size(), deleteFragment);\n      buffer.addBufferFragment(*fragment.release());\n    } while (buffer.length() < static_cast<uint64_t>(state.range(0)));\n    benchmark::DoNotOptimize(buffer.linearize(state.range(0)));\n  }\n}\nBENCHMARK(bufferLinearizeGeneral)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test buffer search, for the simple case where there are no partial matches for\n// the pattern in the buffer.\nstatic void bufferSearch(benchmark::State& state) {\n  const std::string Pattern(16, 'b');\n  std::string data;\n  data.reserve(state.range(0) + Pattern.length());\n  data += std::string(state.range(0), 'a');\n  data += Pattern;\n\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  ssize_t result = 0;\n  for (auto _ : state) {\n    result += buffer.search(Pattern.c_str(), Pattern.length(), 0, 0);\n  }\n  benchmark::DoNotOptimize(result);\n}\nBENCHMARK(bufferSearch)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test buffer search, for the more challenging case where there are many partial matches\n// for the pattern in the buffer.\nstatic void bufferSearchPartialMatch(benchmark::State& state) {\n  const std::string Pattern(16, 'b');\n  const std::string PartialMatch(\"babbabbbabbbbabbbbbabbbbbbabbbbbbbabbbbbbbba\");\n  std::string data;\n  size_t num_partial_matches = 1 + state.range(0) / PartialMatch.length();\n  data.reserve(state.range(0) * num_partial_matches + Pattern.length());\n  for (size_t i = 0; i < num_partial_matches; i++) {\n    data += PartialMatch;\n  }\n  data += Pattern;\n\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  ssize_t result = 0;\n  for (auto _ : state) {\n    result += buffer.search(Pattern.c_str(), Pattern.length(), 0, 0);\n  }\n  benchmark::DoNotOptimize(result);\n}\nBENCHMARK(bufferSearchPartialMatch)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test buffer startsWith, for the simple case where there is no match for the pattern at the start\n// of the buffer.\nstatic void bufferStartsWith(benchmark::State& state) {\n  const std::string Pattern(16, 'b');\n  std::string data;\n  data.reserve(state.range(0) + Pattern.length());\n  data += std::string(state.range(0), 'a');\n  data += Pattern;\n\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  ssize_t result = 0;\n  for (auto _ : state) {\n    if (!buffer.startsWith({Pattern.c_str(), Pattern.length()})) {\n      result++;\n    }\n  }\n  benchmark::DoNotOptimize(result);\n}\nBENCHMARK(bufferStartsWith)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536);\n\n// Test buffer startsWith, when there is a match at the start of the buffer.\nstatic void bufferStartsWithMatch(benchmark::State& state) {\n  const std::string Prefix(state.range(1), 'b');\n  const std::string Suffix(\"babbabbbabbbbabbbbbabbbbbbabbbbbbbabbbbbbbba\");\n  std::string data = Prefix;\n  size_t num_suffixes = 1 + state.range(0) / Prefix.length();\n  data.reserve(Suffix.length() * num_suffixes + Prefix.length());\n  for (size_t i = 0; i < num_suffixes; i++) {\n    data += Suffix;\n  }\n\n  const absl::string_view input(data);\n  Buffer::OwnedImpl buffer(input);\n  ssize_t result = 0;\n  for (auto _ : state) {\n    if (buffer.startsWith({Prefix.c_str(), Prefix.length()})) {\n      result++;\n    }\n  }\n  benchmark::DoNotOptimize(result);\n}\nBENCHMARK(bufferStartsWithMatch)\n    ->Args({1, 1})\n    ->Args({4096, 16})\n    ->Args({16384, 256})\n    ->Args({65536, 4096});\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/buffer_test.cc",
    "content": "#include <limits>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"test/common/buffer/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Buffer {\nnamespace {\n\nclass DummySlice : public Slice {\npublic:\n  DummySlice(const std::string& data, const std::function<void()>& deletion_callback)\n      : Slice(0, data.size(), data.size()), deletion_callback_(deletion_callback) {\n    base_ = reinterpret_cast<uint8_t*>(const_cast<char*>(data.c_str()));\n  }\n  ~DummySlice() override {\n    if (deletion_callback_ != nullptr) {\n      deletion_callback_();\n    }\n  }\n\nprivate:\n  const std::function<void()> deletion_callback_;\n};\n\nclass OwnedSliceTest : public testing::Test {\nprotected:\n  static void expectReservationSuccess(const Slice::Reservation& reservation, const Slice& slice,\n                                       uint64_t reservation_size) {\n    EXPECT_NE(nullptr, reservation.mem_);\n    EXPECT_EQ(static_cast<const uint8_t*>(slice.data()) + slice.dataSize(), reservation.mem_);\n    EXPECT_EQ(reservation_size, reservation.len_);\n  }\n\n  static void expectReservationFailure(const Slice::Reservation& reservation, const Slice& slice,\n                                       uint64_t reservable_size) {\n    EXPECT_EQ(nullptr, reservation.mem_);\n    EXPECT_EQ(nullptr, reservation.mem_);\n    EXPECT_EQ(reservable_size, slice.reservableSize());\n  }\n\n  static void expectCommitSuccess(bool committed, const Slice& slice, uint64_t data_size,\n                                  uint64_t reservable_size) {\n    EXPECT_TRUE(committed);\n    EXPECT_EQ(data_size, slice.dataSize());\n    EXPECT_EQ(reservable_size, slice.reservableSize());\n  }\n};\n\nbool sliceMatches(const SlicePtr& slice, const std::string& expected) {\n  return slice != nullptr && slice->dataSize() == expected.size() &&\n         memcmp(slice->data(), expected.data(), expected.size()) == 0;\n}\n\nTEST_F(OwnedSliceTest, Create) {\n  static constexpr uint64_t Sizes[] = {0, 1, 64, 4096 - sizeof(OwnedSlice), 65535};\n  for (const auto size : Sizes) {\n    auto slice = OwnedSlice::create(size);\n    EXPECT_NE(nullptr, slice->data());\n    EXPECT_EQ(0, slice->dataSize());\n    EXPECT_LE(size, slice->reservableSize());\n  }\n}\n\nTEST_F(OwnedSliceTest, ReserveCommit) {\n  auto slice = OwnedSlice::create(100);\n  const uint64_t initial_capacity = slice->reservableSize();\n  EXPECT_LE(100, initial_capacity);\n\n  {\n    // Verify that a zero-byte reservation is rejected.\n    Slice::Reservation reservation = slice->reserve(0);\n    expectReservationFailure(reservation, *slice, initial_capacity);\n  }\n\n  {\n    // Create a reservation smaller than the reservable size.\n    // It should reserve the exact number of bytes requested.\n    Slice::Reservation reservation = slice->reserve(10);\n    expectReservationSuccess(reservation, *slice, 10);\n\n    // Request a second reservation while the first reservation remains uncommitted.\n    // This should succeed.\n    EXPECT_EQ(initial_capacity, slice->reservableSize());\n    Slice::Reservation reservation2 = slice->reserve(1);\n    expectReservationSuccess(reservation2, *slice, 1);\n\n    // Commit the entire reserved size.\n    bool committed = slice->commit(reservation);\n    expectCommitSuccess(committed, *slice, 10, initial_capacity - 10);\n\n    // Verify that a reservation can only be committed once.\n    EXPECT_FALSE(slice->commit(reservation));\n  }\n\n  {\n    // Request another reservation, and commit only part of it.\n    Slice::Reservation reservation = slice->reserve(10);\n    expectReservationSuccess(reservation, *slice, 10);\n    reservation.len_ = 5;\n    bool committed = slice->commit(reservation);\n    expectCommitSuccess(committed, *slice, 15, initial_capacity - 15);\n  }\n\n  {\n    // Request another reservation, and commit only part of it.\n    Slice::Reservation reservation = slice->reserve(10);\n    expectReservationSuccess(reservation, *slice, 10);\n    reservation.len_ = 5;\n    bool committed = slice->commit(reservation);\n    expectCommitSuccess(committed, *slice, 20, initial_capacity - 20);\n  }\n\n  {\n    // Request another reservation, and commit zero bytes of it.\n    // This should clear the reservation.\n    Slice::Reservation reservation = slice->reserve(10);\n    expectReservationSuccess(reservation, *slice, 10);\n    reservation.len_ = 0;\n    bool committed = slice->commit(reservation);\n    expectCommitSuccess(committed, *slice, 20, initial_capacity - 20);\n  }\n\n  {\n    // Try to commit a reservation from the wrong slice, and verify that the slice rejects it.\n    Slice::Reservation reservation = slice->reserve(10);\n    expectReservationSuccess(reservation, *slice, 10);\n    auto other_slice = OwnedSlice::create(100);\n    Slice::Reservation other_reservation = other_slice->reserve(10);\n    expectReservationSuccess(other_reservation, *other_slice, 10);\n    EXPECT_FALSE(slice->commit(other_reservation));\n    EXPECT_FALSE(other_slice->commit(reservation));\n\n    // Commit the reservations to the proper slices to clear them.\n    reservation.len_ = 0;\n    bool committed = slice->commit(reservation);\n    EXPECT_TRUE(committed);\n    other_reservation.len_ = 0;\n    committed = other_slice->commit(other_reservation);\n    EXPECT_TRUE(committed);\n  }\n\n  {\n    // Try to reserve more space than is available in the slice.\n    uint64_t reservable_size = slice->reservableSize();\n    Slice::Reservation reservation = slice->reserve(reservable_size + 1);\n    expectReservationSuccess(reservation, *slice, reservable_size);\n    bool committed = slice->commit(reservation);\n    expectCommitSuccess(committed, *slice, initial_capacity, 0);\n  }\n\n  {\n    // Now that the view has no more reservable space, verify that it rejects\n    // subsequent reservation requests.\n    Slice::Reservation reservation = slice->reserve(1);\n    expectReservationFailure(reservation, *slice, 0);\n  }\n}\n\nTEST_F(OwnedSliceTest, Drain) {\n  // Create a slice and commit all the available space.\n  auto slice = OwnedSlice::create(100);\n  Slice::Reservation reservation = slice->reserve(slice->reservableSize());\n  bool committed = slice->commit(reservation);\n  EXPECT_TRUE(committed);\n  EXPECT_EQ(0, slice->reservableSize());\n\n  // Drain some data from the front of the view and verify that the data start moves accordingly.\n  const uint8_t* original_data = static_cast<const uint8_t*>(slice->data());\n  uint64_t original_size = slice->dataSize();\n  slice->drain(0);\n  EXPECT_EQ(original_data, slice->data());\n  EXPECT_EQ(original_size, slice->dataSize());\n  slice->drain(10);\n  EXPECT_EQ(original_data + 10, slice->data());\n  EXPECT_EQ(original_size - 10, slice->dataSize());\n  slice->drain(50);\n  EXPECT_EQ(original_data + 60, slice->data());\n  EXPECT_EQ(original_size - 60, slice->dataSize());\n\n  // Drain all the remaining data.\n  slice->drain(slice->dataSize());\n  EXPECT_EQ(0, slice->dataSize());\n  EXPECT_EQ(original_size, slice->reservableSize());\n}\n\nTEST(UnownedSliceTest, CreateDelete) {\n  constexpr char input[] = \"hello world\";\n  bool release_callback_called = false;\n  BufferFragmentImpl fragment(\n      input, sizeof(input) - 1,\n      [&release_callback_called](const void*, size_t, const BufferFragmentImpl*) {\n        release_callback_called = true;\n      });\n  auto slice = std::make_unique<UnownedSlice>(fragment);\n  EXPECT_EQ(11, slice->dataSize());\n  EXPECT_EQ(0, slice->reservableSize());\n  EXPECT_EQ(0, memcmp(slice->data(), input, slice->dataSize()));\n  EXPECT_FALSE(release_callback_called);\n  slice.reset(nullptr);\n  EXPECT_TRUE(release_callback_called);\n}\n\nTEST(UnownedSliceTest, CreateDeleteOwnedBufferFragment) {\n  constexpr char input[] = \"hello world\";\n  bool release_callback_called = false;\n  auto fragment = OwnedBufferFragmentImpl::create(\n      {input, sizeof(input) - 1}, [&release_callback_called](const OwnedBufferFragmentImpl*) {\n        release_callback_called = true;\n      });\n  auto slice = std::make_unique<UnownedSlice>(*fragment);\n  EXPECT_EQ(11, slice->dataSize());\n  EXPECT_EQ(0, slice->reservableSize());\n  EXPECT_EQ(0, memcmp(slice->data(), input, slice->dataSize()));\n  EXPECT_FALSE(release_callback_called);\n  slice.reset(nullptr);\n  EXPECT_TRUE(release_callback_called);\n}\n\nTEST(SliceDequeTest, CreateDelete) {\n  bool slice1_deleted = false;\n  bool slice2_deleted = false;\n  bool slice3_deleted = false;\n\n  {\n    // Create an empty deque.\n    SliceDeque slices;\n    EXPECT_TRUE(slices.empty());\n    EXPECT_EQ(0, slices.size());\n\n    // Append a view to the deque.\n    const std::string slice1 = \"slice1\";\n    slices.emplace_back(\n        std::make_unique<DummySlice>(slice1, [&slice1_deleted]() { slice1_deleted = true; }));\n    EXPECT_FALSE(slices.empty());\n    ASSERT_EQ(1, slices.size());\n    EXPECT_FALSE(slice1_deleted);\n    EXPECT_TRUE(sliceMatches(slices.front(), slice1));\n\n    // Append another view to the deque, and verify that both views are accessible.\n    const std::string slice2 = \"slice2\";\n    slices.emplace_back(\n        std::make_unique<DummySlice>(slice2, [&slice2_deleted]() { slice2_deleted = true; }));\n    EXPECT_FALSE(slices.empty());\n    ASSERT_EQ(2, slices.size());\n    EXPECT_FALSE(slice1_deleted);\n    EXPECT_FALSE(slice2_deleted);\n    EXPECT_TRUE(sliceMatches(slices.front(), slice1));\n    EXPECT_TRUE(sliceMatches(slices.back(), slice2));\n\n    // Prepend a view to the deque, to exercise the ring buffer wraparound case.\n    const std::string slice3 = \"slice3\";\n    slices.emplace_front(\n        std::make_unique<DummySlice>(slice3, [&slice3_deleted]() { slice3_deleted = true; }));\n    EXPECT_FALSE(slices.empty());\n    ASSERT_EQ(3, slices.size());\n    EXPECT_FALSE(slice1_deleted);\n    EXPECT_FALSE(slice2_deleted);\n    EXPECT_FALSE(slice3_deleted);\n    EXPECT_TRUE(sliceMatches(slices.front(), slice3));\n    EXPECT_TRUE(sliceMatches(slices.back(), slice2));\n\n    // Remove the first view from the deque, and verify that its slice is deleted.\n    slices.pop_front();\n    EXPECT_FALSE(slices.empty());\n    ASSERT_EQ(2, slices.size());\n    EXPECT_FALSE(slice1_deleted);\n    EXPECT_FALSE(slice2_deleted);\n    EXPECT_TRUE(slice3_deleted);\n    EXPECT_TRUE(sliceMatches(slices.front(), slice1));\n    EXPECT_TRUE(sliceMatches(slices.back(), slice2));\n  }\n\n  EXPECT_TRUE(slice1_deleted);\n  EXPECT_TRUE(slice2_deleted);\n  EXPECT_TRUE(slice3_deleted);\n}\n\nTEST(BufferHelperTest, PeekI8) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 0xFE});\n    EXPECT_EQ(buffer.peekInt<int8_t>(), 0);\n    EXPECT_EQ(buffer.peekInt<int8_t>(0), 0);\n    EXPECT_EQ(buffer.peekInt<int8_t>(1), 1);\n    EXPECT_EQ(buffer.peekInt<int8_t>(2), -2);\n    EXPECT_EQ(buffer.length(), 3);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekInt<int8_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeByte(0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekInt<int8_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekLEI16) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekLEInt<int16_t>(), 0x0100);\n    EXPECT_EQ(buffer.peekLEInt<int16_t>(0), 0x0100);\n    EXPECT_EQ(buffer.peekLEInt<int16_t>(1), 0x0201);\n    EXPECT_EQ(buffer.peekLEInt<int16_t>(2), 0x0302);\n    EXPECT_EQ(buffer.peekLEInt<int16_t>(4), -1);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<int16_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 2, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<int16_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekLEI32) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekLEInt<int32_t>(), 0x03020100);\n    EXPECT_EQ(buffer.peekLEInt<int32_t>(0), 0x03020100);\n    EXPECT_EQ(buffer.peekLEInt<int32_t>(1), 0xFF030201);\n    EXPECT_EQ(buffer.peekLEInt<int32_t>(2), 0xFFFF0302);\n    EXPECT_EQ(buffer.peekLEInt<int32_t>(4), -1);\n    EXPECT_EQ(buffer.length(), 8);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<int32_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 4, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<int32_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekLEI64) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekLEInt<int64_t>(), 0x0706050403020100);\n    EXPECT_EQ(buffer.peekLEInt<int64_t>(0), 0x0706050403020100);\n    EXPECT_EQ(buffer.peekLEInt<int64_t>(1), 0xFF07060504030201);\n    EXPECT_EQ(buffer.peekLEInt<int64_t>(2), 0xFFFF070605040302);\n    EXPECT_EQ(buffer.peekLEInt<int64_t>(8), -1);\n\n    EXPECT_EQ(buffer.length(), 16);\n\n    // partial\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 4>()), 0x03020100);\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 4>(1)), 0x04030201);\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 2>()), 0x0100);\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 2>(1)), 0x0201);\n  }\n\n  {\n    // signed\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFE, 0xFF, 0xFF});\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 2>()), -1);\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 2>(2)), 255);  // 0x00FF\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 2>(3)), -256); // 0xFF00\n    EXPECT_EQ((buffer.peekLEInt<int64_t, 3>(5)), -2);   // 0xFFFFFE\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF});\n    EXPECT_THROW_WITH_MESSAGE(\n        (buffer.peekLEInt<int64_t, sizeof(int64_t)>(buffer.length() - sizeof(int64_t) + 1)),\n        EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<int64_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 8, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<int64_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekLEU16) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekLEInt<uint16_t>(), 0x0100);\n    EXPECT_EQ(buffer.peekLEInt<uint16_t>(0), 0x0100);\n    EXPECT_EQ(buffer.peekLEInt<uint16_t>(1), 0x0201);\n    EXPECT_EQ(buffer.peekLEInt<uint16_t>(2), 0x0302);\n    EXPECT_EQ(buffer.peekLEInt<uint16_t>(4), 0xFFFF);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<uint16_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 2, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<uint16_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekLEU32) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekLEInt<uint32_t>(), 0x03020100);\n    EXPECT_EQ(buffer.peekLEInt<uint32_t>(0), 0x03020100);\n    EXPECT_EQ(buffer.peekLEInt<uint32_t>(1), 0xFF030201);\n    EXPECT_EQ(buffer.peekLEInt<uint32_t>(2), 0xFFFF0302);\n    EXPECT_EQ(buffer.peekLEInt<uint32_t>(4), 0xFFFFFFFF);\n    EXPECT_EQ(buffer.length(), 8);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<uint32_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 4, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<uint32_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekLEU64) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekLEInt<uint64_t>(), 0x0706050403020100);\n    EXPECT_EQ(buffer.peekLEInt<uint64_t>(0), 0x0706050403020100);\n    EXPECT_EQ(buffer.peekLEInt<uint64_t>(1), 0xFF07060504030201);\n    EXPECT_EQ(buffer.peekLEInt<uint64_t>(2), 0xFFFF070605040302);\n    EXPECT_EQ(buffer.peekLEInt<uint64_t>(8), 0xFFFFFFFFFFFFFFFF);\n    EXPECT_EQ(buffer.length(), 16);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<uint64_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 8, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt<uint64_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekBEI16) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekBEInt<int16_t>(), 1);\n    EXPECT_EQ(buffer.peekBEInt<int16_t>(0), 1);\n    EXPECT_EQ(buffer.peekBEInt<int16_t>(1), 0x0102);\n    EXPECT_EQ(buffer.peekBEInt<int16_t>(2), 0x0203);\n    EXPECT_EQ(buffer.peekBEInt<int16_t>(4), -1);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<int16_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 2, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<int16_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekBEI32) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekBEInt<int32_t>(), 0x00010203);\n    EXPECT_EQ(buffer.peekBEInt<int32_t>(0), 0x00010203);\n    EXPECT_EQ(buffer.peekBEInt<int32_t>(1), 0x010203FF);\n    EXPECT_EQ(buffer.peekBEInt<int32_t>(2), 0x0203FFFF);\n    EXPECT_EQ(buffer.peekBEInt<int32_t>(4), -1);\n    EXPECT_EQ(buffer.length(), 8);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<int32_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 4, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<int32_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekBEI64) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekBEInt<int64_t>(), 0x0001020304050607);\n    EXPECT_EQ(buffer.peekBEInt<int64_t>(0), 0x0001020304050607);\n    EXPECT_EQ(buffer.peekBEInt<int64_t>(1), 0x01020304050607FF);\n    EXPECT_EQ(buffer.peekBEInt<int64_t>(2), 0x020304050607FFFF);\n    EXPECT_EQ(buffer.peekBEInt<int64_t>(8), -1);\n    EXPECT_EQ(buffer.length(), 16);\n\n    // partial\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 4>()), 0x00010203);\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 4>(1)), 0x01020304);\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 2>()), 0x0001);\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 2>(1)), 0x0102);\n  }\n\n  {\n    // signed\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFE});\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 2>()), -1);\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 2>(2)), -256); // 0xFF00\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 2>(3)), 255);  // 0x00FF\n    EXPECT_EQ((buffer.peekBEInt<int64_t, 3>(5)), -2);   // 0xFFFFFE\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF});\n    EXPECT_THROW_WITH_MESSAGE(\n        (buffer.peekBEInt<int64_t, sizeof(int64_t)>(buffer.length() - sizeof(int64_t) + 1)),\n        EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<int64_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 8, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<int64_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekBEU16) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekBEInt<uint16_t>(), 1);\n    EXPECT_EQ(buffer.peekBEInt<uint16_t>(0), 1);\n    EXPECT_EQ(buffer.peekBEInt<uint16_t>(1), 0x0102);\n    EXPECT_EQ(buffer.peekBEInt<uint16_t>(2), 0x0203);\n    EXPECT_EQ(buffer.peekBEInt<uint16_t>(4), 0xFFFF);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<uint16_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 2, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<uint16_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekBEU32) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekBEInt<uint32_t>(), 0x00010203);\n    EXPECT_EQ(buffer.peekBEInt<uint32_t>(0), 0x00010203);\n    EXPECT_EQ(buffer.peekBEInt<uint32_t>(1), 0x010203FF);\n    EXPECT_EQ(buffer.peekBEInt<uint32_t>(2), 0x0203FFFF);\n    EXPECT_EQ(buffer.peekBEInt<uint32_t>(4), 0xFFFFFFFF);\n    EXPECT_EQ(buffer.length(), 8);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<uint32_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 4, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<uint32_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekBEU64) {\n  {\n    Buffer::OwnedImpl buffer;\n    addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n    EXPECT_EQ(buffer.peekBEInt<uint64_t>(), 0x0001020304050607);\n    EXPECT_EQ(buffer.peekBEInt<uint64_t>(0), 0x0001020304050607);\n    EXPECT_EQ(buffer.peekBEInt<uint64_t>(1), 0x01020304050607FF);\n    EXPECT_EQ(buffer.peekBEInt<uint64_t>(2), 0x020304050607FFFF);\n    EXPECT_EQ(buffer.peekBEInt<uint64_t>(8), 0xFFFFFFFFFFFFFFFF);\n    EXPECT_EQ(buffer.length(), 16);\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<uint64_t>(0), EnvoyException, \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 8, 0);\n    EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt<uint64_t>(1), EnvoyException, \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, DrainI8) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 0xFE});\n  EXPECT_EQ(buffer.drainInt<int8_t>(), 0);\n  EXPECT_EQ(buffer.drainInt<int8_t>(), 1);\n  EXPECT_EQ(buffer.drainInt<int8_t>(), -2);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainLEI16) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainLEInt<int16_t>(), 0x0100);\n  EXPECT_EQ(buffer.drainLEInt<int16_t>(), 0x0302);\n  EXPECT_EQ(buffer.drainLEInt<int16_t>(), -1);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainLEI32) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainLEInt<int32_t>(), 0x03020100);\n  EXPECT_EQ(buffer.drainLEInt<int32_t>(), -1);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainLEI64) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainLEInt<int64_t>(), 0x0706050403020100);\n  EXPECT_EQ(buffer.drainLEInt<int64_t>(), -1);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainLEU32) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainLEInt<uint32_t>(), 0x03020100);\n  EXPECT_EQ(buffer.drainLEInt<uint32_t>(), 0xFFFFFFFF);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainLEU64) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainLEInt<uint64_t>(), 0x0706050403020100);\n  EXPECT_EQ(buffer.drainLEInt<uint64_t>(), 0xFFFFFFFFFFFFFFFF);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainBEI16) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainBEInt<int16_t>(), 1);\n  EXPECT_EQ(buffer.drainBEInt<int16_t>(), 0x0203);\n  EXPECT_EQ(buffer.drainBEInt<int16_t>(), -1);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainBEI32) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainBEInt<int32_t>(), 0x00010203);\n  EXPECT_EQ(buffer.drainBEInt<int32_t>(), -1);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainBEI64) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainBEInt<int64_t>(), 0x0001020304050607);\n  EXPECT_EQ(buffer.drainBEInt<int64_t>(), -1);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainBEU32) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainBEInt<uint32_t>(), 0x00010203);\n  EXPECT_EQ(buffer.drainBEInt<uint32_t>(), 0xFFFFFFFF);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, DrainBEU64) {\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n  EXPECT_EQ(buffer.drainBEInt<uint64_t>(), 0x0001020304050607);\n  EXPECT_EQ(buffer.drainBEInt<uint64_t>(), 0xFFFFFFFFFFFFFFFF);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, WriteI8) {\n  Buffer::OwnedImpl buffer;\n  buffer.writeByte(-128);\n  buffer.writeByte(-1);\n  buffer.writeByte(0);\n  buffer.writeByte(1);\n  buffer.writeByte(127);\n\n  EXPECT_EQ(std::string(\"\\x80\\xFF\\0\\x1\\x7F\", 5), buffer.toString());\n}\n\nTEST(BufferHelperTest, WriteLEI16) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int16_t>(std::numeric_limits<int16_t>::min());\n    EXPECT_EQ(std::string(\"\\0\\x80\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int16_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int16_t>(1);\n    EXPECT_EQ(std::string(\"\\x1\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int16_t>(std::numeric_limits<int16_t>::max());\n    EXPECT_EQ(\"\\xFF\\x7F\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteLEU16) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint16_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint16_t>(1);\n    EXPECT_EQ(std::string(\"\\x1\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint16_t>(static_cast<uint16_t>(std::numeric_limits<int16_t>::max()) + 1);\n    EXPECT_EQ(std::string(\"\\0\\x80\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint16_t>(std::numeric_limits<uint16_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteLEI32) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int32_t>(std::numeric_limits<int32_t>::min());\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x80\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int32_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int32_t>(1);\n    EXPECT_EQ(std::string(\"\\x1\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int32_t>(std::numeric_limits<int32_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\x7F\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteLEU32) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint32_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint32_t>(1);\n    EXPECT_EQ(std::string(\"\\x1\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint32_t>(static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) + 1);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x80\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<uint32_t>(std::numeric_limits<uint32_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\", buffer.toString());\n  }\n}\nTEST(BufferHelperTest, WriteLEI64) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int64_t>(std::numeric_limits<int64_t>::min());\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\\0\\0\\0\\x80\", 8), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int64_t>(1);\n    EXPECT_EQ(std::string(\"\\x1\\0\\0\\0\\0\\0\\0\\0\", 8), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int64_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\\0\\0\\0\\0\", 8), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeLEInt<int64_t>(std::numeric_limits<int64_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x7F\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteBEI16) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(std::numeric_limits<int16_t>::min());\n    EXPECT_EQ(std::string(\"\\x80\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(1);\n    EXPECT_EQ(std::string(\"\\0\\x1\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(std::numeric_limits<int16_t>::max());\n    EXPECT_EQ(\"\\x7F\\xFF\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteBEU16) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint16_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint16_t>(1);\n    EXPECT_EQ(std::string(\"\\0\\x1\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint16_t>(static_cast<uint16_t>(std::numeric_limits<int16_t>::max()) + 1);\n    EXPECT_EQ(std::string(\"\\x80\\0\", 2), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint16_t>(std::numeric_limits<uint16_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteBEI32) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(std::numeric_limits<int32_t>::min());\n    EXPECT_EQ(std::string(\"\\x80\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(1);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x1\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(std::numeric_limits<int32_t>::max());\n    EXPECT_EQ(\"\\x7F\\xFF\\xFF\\xFF\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteBEU32) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint32_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint32_t>(1);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x1\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint32_t>(static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) + 1);\n    EXPECT_EQ(std::string(\"\\x80\\0\\0\\0\", 4), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<uint32_t>(std::numeric_limits<uint32_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\", buffer.toString());\n  }\n}\nTEST(BufferHelperTest, WriteBEI64) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int64_t>(std::numeric_limits<int64_t>::min());\n    EXPECT_EQ(std::string(\"\\x80\\0\\0\\0\\0\\0\\0\\0\\0\", 8), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int64_t>(1);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\\0\\0\\0\\x1\", 8), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int64_t>(0);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\\0\\0\\0\\0\", 8), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int64_t>(std::numeric_limits<int64_t>::max());\n    EXPECT_EQ(\"\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\", buffer.toString());\n  }\n}\n\n} // namespace\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/owned_impl_test.cc",
    "content": "#include \"envoy/api/io_error.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"test/common/buffer/utility.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Buffer {\nnamespace {\n\nclass OwnedImplTest : public testing::Test {\npublic:\n  bool release_callback_called_ = false;\n\nprotected:\n  static void clearReservation(Buffer::RawSlice* iovecs, uint64_t num_iovecs, OwnedImpl& buffer) {\n    for (uint64_t i = 0; i < num_iovecs; i++) {\n      iovecs[i].len_ = 0;\n    }\n    buffer.commit(iovecs, num_iovecs);\n  }\n\n  static void commitReservation(Buffer::RawSlice* iovecs, uint64_t num_iovecs, OwnedImpl& buffer) {\n    buffer.commit(iovecs, num_iovecs);\n  }\n\n  static void expectSlices(std::vector<std::vector<int>> buffer_list, OwnedImpl& buffer) {\n    const auto& buffer_slices = buffer.describeSlicesForTest();\n    ASSERT_EQ(buffer_list.size(), buffer_slices.size());\n    for (uint64_t i = 0; i < buffer_slices.size(); i++) {\n      EXPECT_EQ(buffer_slices[i].data, buffer_list[i][0]);\n      EXPECT_EQ(buffer_slices[i].reservable, buffer_list[i][1]);\n      EXPECT_EQ(buffer_slices[i].capacity, buffer_list[i][2]);\n    }\n  }\n\n  static void expectFirstSlice(std::vector<int> slice_description, OwnedImpl& buffer) {\n    const auto& buffer_slices = buffer.describeSlicesForTest();\n    ASSERT_LE(1, buffer_slices.size());\n    EXPECT_EQ(buffer_slices[0].data, slice_description[0]);\n    EXPECT_EQ(buffer_slices[0].reservable, slice_description[1]);\n    EXPECT_EQ(buffer_slices[0].capacity, slice_description[2]);\n  }\n};\n\nTEST_F(OwnedImplTest, AddBufferFragmentNoCleanup) {\n  char input[] = \"hello world\";\n  BufferFragmentImpl frag(input, 11, nullptr);\n  Buffer::OwnedImpl buffer;\n  buffer.addBufferFragment(frag);\n  EXPECT_EQ(11, buffer.length());\n\n  buffer.drain(11);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(OwnedImplTest, AddBufferFragmentWithCleanup) {\n  std::string input(2048, 'a');\n  BufferFragmentImpl frag(\n      input.c_str(), input.size(),\n      [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; });\n  Buffer::OwnedImpl buffer;\n  buffer.addBufferFragment(frag);\n  EXPECT_EQ(2048, buffer.length());\n\n  buffer.drain(2000);\n  EXPECT_EQ(48, buffer.length());\n  EXPECT_FALSE(release_callback_called_);\n\n  buffer.drain(48);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_TRUE(release_callback_called_);\n}\n\nTEST_F(OwnedImplTest, AddEmptyFragment) {\n  char input[] = \"hello world\";\n  BufferFragmentImpl frag1(input, 11, [](const void*, size_t, const BufferFragmentImpl*) {});\n  BufferFragmentImpl frag2(\"\", 0, [this](const void*, size_t, const BufferFragmentImpl*) {\n    release_callback_called_ = true;\n  });\n  BufferFragmentImpl frag3(input, 11, [](const void*, size_t, const BufferFragmentImpl*) {});\n  Buffer::OwnedImpl buffer;\n  buffer.addBufferFragment(frag1);\n  EXPECT_EQ(11, buffer.length());\n\n  buffer.addBufferFragment(frag2);\n  EXPECT_EQ(11, buffer.length());\n\n  buffer.addBufferFragment(frag3);\n  EXPECT_EQ(22, buffer.length());\n\n  // Cover case of copying a buffer with an empty fragment.\n  Buffer::OwnedImpl buffer2;\n  buffer2.add(buffer);\n\n  // Cover copyOut\n  std::unique_ptr<char[]> outbuf(new char[buffer.length()]);\n  buffer.copyOut(0, buffer.length(), outbuf.get());\n\n  buffer.drain(22);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_TRUE(release_callback_called_);\n}\n\nTEST_F(OwnedImplTest, AddBufferFragmentDynamicAllocation) {\n  std::string input_str(2048, 'a');\n  char* input = new char[2048];\n  std::copy(input_str.c_str(), input_str.c_str() + 11, input);\n\n  BufferFragmentImpl* frag = new BufferFragmentImpl(\n      input, 2048, [this](const void* data, size_t, const BufferFragmentImpl* frag) {\n        release_callback_called_ = true;\n        delete[] static_cast<const char*>(data);\n        delete frag;\n      });\n\n  Buffer::OwnedImpl buffer;\n  buffer.addBufferFragment(*frag);\n  EXPECT_EQ(2048, buffer.length());\n\n  buffer.drain(2042);\n  EXPECT_EQ(6, buffer.length());\n  EXPECT_FALSE(release_callback_called_);\n\n  buffer.drain(6);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_TRUE(release_callback_called_);\n}\n\nTEST_F(OwnedImplTest, AddOwnedBufferFragmentWithCleanup) {\n  std::string input(2048, 'a');\n  const size_t expected_length = input.size();\n  auto frag = OwnedBufferFragmentImpl::create(\n      {input.c_str(), expected_length},\n      [this](const OwnedBufferFragmentImpl*) { release_callback_called_ = true; });\n  Buffer::OwnedImpl buffer;\n  buffer.addBufferFragment(*frag);\n  EXPECT_EQ(expected_length, buffer.length());\n\n  const uint64_t partial_drain_size = 5;\n  buffer.drain(partial_drain_size);\n  EXPECT_EQ(expected_length - partial_drain_size, buffer.length());\n  EXPECT_FALSE(release_callback_called_);\n\n  buffer.drain(expected_length - partial_drain_size);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_TRUE(release_callback_called_);\n}\n\n// Verify that OwnedBufferFragment work correctly when input buffer is allocated on the heap.\nTEST_F(OwnedImplTest, AddOwnedBufferFragmentDynamicAllocation) {\n  std::string input_str(2048, 'a');\n  const size_t expected_length = input_str.size();\n  char* input = new char[expected_length];\n  std::copy(input_str.c_str(), input_str.c_str() + expected_length, input);\n\n  auto* frag = OwnedBufferFragmentImpl::create({input, expected_length},\n                                               [this, input](const OwnedBufferFragmentImpl* frag) {\n                                                 release_callback_called_ = true;\n                                                 delete[] input;\n                                                 delete frag;\n                                               })\n                   .release();\n\n  Buffer::OwnedImpl buffer;\n  buffer.addBufferFragment(*frag);\n  EXPECT_EQ(expected_length, buffer.length());\n\n  const uint64_t partial_drain_size = 5;\n  buffer.drain(partial_drain_size);\n  EXPECT_EQ(expected_length - partial_drain_size, buffer.length());\n  EXPECT_FALSE(release_callback_called_);\n\n  buffer.drain(expected_length - partial_drain_size);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_TRUE(release_callback_called_);\n}\n\nTEST_F(OwnedImplTest, Add) {\n  const std::string string1 = \"Hello, \", string2 = \"World!\";\n  Buffer::OwnedImpl buffer;\n  buffer.add(string1);\n  EXPECT_EQ(string1.size(), buffer.length());\n  EXPECT_EQ(string1, buffer.toString());\n\n  buffer.add(string2);\n  EXPECT_EQ(string1.size() + string2.size(), buffer.length());\n  EXPECT_EQ(string1 + string2, buffer.toString());\n\n  // Append a large string that will only partially fit in the space remaining\n  // at the end of the buffer.\n  std::string big_suffix;\n  big_suffix.reserve(16385);\n  for (unsigned i = 0; i < 16; i++) {\n    big_suffix += std::string(1024, 'A' + i);\n  }\n  big_suffix.push_back('-');\n  buffer.add(big_suffix);\n  EXPECT_EQ(string1.size() + string2.size() + big_suffix.size(), buffer.length());\n  EXPECT_EQ(string1 + string2 + big_suffix, buffer.toString());\n}\n\nTEST_F(OwnedImplTest, Prepend) {\n  const std::string suffix = \"World!\", prefix = \"Hello, \";\n  Buffer::OwnedImpl buffer;\n  buffer.add(suffix);\n  buffer.prepend(prefix);\n\n  EXPECT_EQ(suffix.size() + prefix.size(), buffer.length());\n  EXPECT_EQ(prefix + suffix, buffer.toString());\n\n  // Prepend a large string that will only partially fit in the space remaining\n  // at the front of the buffer.\n  std::string big_prefix;\n  big_prefix.reserve(16385);\n  for (unsigned i = 0; i < 16; i++) {\n    big_prefix += std::string(1024, 'A' + i);\n  }\n  big_prefix.push_back('-');\n  buffer.prepend(big_prefix);\n  EXPECT_EQ(big_prefix.size() + prefix.size() + suffix.size(), buffer.length());\n  EXPECT_EQ(big_prefix + prefix + suffix, buffer.toString());\n}\n\nTEST_F(OwnedImplTest, PrependToEmptyBuffer) {\n  std::string data = \"Hello, World!\";\n  Buffer::OwnedImpl buffer;\n  buffer.prepend(data);\n\n  EXPECT_EQ(data.size(), buffer.length());\n  EXPECT_EQ(data, buffer.toString());\n\n  buffer.prepend(\"\");\n\n  EXPECT_EQ(data.size(), buffer.length());\n  EXPECT_EQ(data, buffer.toString());\n}\n\nTEST_F(OwnedImplTest, PrependBuffer) {\n  std::string suffix = \"World!\", prefix = \"Hello, \";\n  Buffer::OwnedImpl buffer;\n  buffer.add(suffix);\n  Buffer::OwnedImpl prefixBuffer;\n  prefixBuffer.add(prefix);\n\n  buffer.prepend(prefixBuffer);\n\n  EXPECT_EQ(suffix.size() + prefix.size(), buffer.length());\n  EXPECT_EQ(prefix + suffix, buffer.toString());\n  EXPECT_EQ(0, prefixBuffer.length());\n}\n\nTEST_F(OwnedImplTest, Write) {\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  Buffer::OwnedImpl buffer;\n  Network::IoSocketHandleImpl io_handle;\n  buffer.add(\"example\");\n  EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{7, 0}));\n  Api::IoCallUint64Result result = io_handle.write(buffer);\n  EXPECT_TRUE(result.ok());\n  EXPECT_EQ(7, result.rc_);\n  EXPECT_EQ(0, buffer.length());\n\n  buffer.add(\"example\");\n  EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{6, 0}));\n  result = io_handle.write(buffer);\n  EXPECT_TRUE(result.ok());\n  EXPECT_EQ(6, result.rc_);\n  EXPECT_EQ(1, buffer.length());\n\n  EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0}));\n  result = io_handle.write(buffer);\n  EXPECT_TRUE(result.ok());\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(1, buffer.length());\n\n  EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0}));\n  result = io_handle.write(buffer);\n  EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode());\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(1, buffer.length());\n\n  EXPECT_CALL(os_sys_calls, writev(_, _, _))\n      .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN}));\n  result = io_handle.write(buffer);\n  EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode());\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(1, buffer.length());\n\n  EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{1, 0}));\n  result = io_handle.write(buffer);\n  EXPECT_TRUE(result.ok());\n  EXPECT_EQ(1, result.rc_);\n  EXPECT_EQ(0, buffer.length());\n\n  EXPECT_CALL(os_sys_calls, writev(_, _, _)).Times(0);\n  result = io_handle.write(buffer);\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(OwnedImplTest, Read) {\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  Buffer::OwnedImpl buffer;\n  Network::IoSocketHandleImpl io_handle;\n  EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0}));\n  Api::IoCallUint64Result result = io_handle.read(buffer, 100);\n  EXPECT_TRUE(result.ok());\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty());\n\n  EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0}));\n  result = io_handle.read(buffer, 100);\n  EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode());\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty());\n\n  EXPECT_CALL(os_sys_calls, readv(_, _, _))\n      .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN}));\n  result = io_handle.read(buffer, 100);\n  EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode());\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty());\n\n  EXPECT_CALL(os_sys_calls, readv(_, _, _)).Times(0);\n  result = io_handle.read(buffer, 0);\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty());\n}\n\nTEST_F(OwnedImplTest, ExtractOwnedSlice) {\n  // Create a buffer with two owned slices.\n  Buffer::OwnedImpl buffer;\n  buffer.appendSliceForTest(\"abcde\");\n  const uint64_t expected_length0 = 5;\n  buffer.appendSliceForTest(\"123\");\n  const uint64_t expected_length1 = 3;\n  EXPECT_EQ(buffer.toString(), \"abcde123\");\n  RawSliceVector slices = buffer.getRawSlices();\n  EXPECT_EQ(2, slices.size());\n\n  // Extract first slice.\n  auto slice = buffer.extractMutableFrontSlice();\n  ASSERT_TRUE(slice);\n  auto slice_data = slice->getMutableData();\n  ASSERT_NE(slice_data.data(), nullptr);\n  EXPECT_EQ(slice_data.size(), expected_length0);\n  EXPECT_EQ(\"abcde\",\n            absl::string_view(reinterpret_cast<const char*>(slice_data.data()), slice_data.size()));\n  EXPECT_EQ(buffer.toString(), \"123\");\n\n  // Modify and re-add extracted first slice data to the end of the buffer.\n  auto slice_mutable_data = slice->getMutableData();\n  ASSERT_NE(slice_mutable_data.data(), nullptr);\n  EXPECT_EQ(slice_mutable_data.size(), expected_length0);\n  *slice_mutable_data.data() = 'A';\n  buffer.appendSliceForTest(slice_mutable_data.data(), slice_mutable_data.size());\n  EXPECT_EQ(buffer.toString(), \"123Abcde\");\n\n  // Extract second slice, leaving only the original first slice.\n  slice = buffer.extractMutableFrontSlice();\n  ASSERT_TRUE(slice);\n  slice_data = slice->getMutableData();\n  ASSERT_NE(slice_data.data(), nullptr);\n  EXPECT_EQ(slice_data.size(), expected_length1);\n  EXPECT_EQ(\"123\",\n            absl::string_view(reinterpret_cast<const char*>(slice_data.data()), slice_data.size()));\n  EXPECT_EQ(buffer.toString(), \"Abcde\");\n}\n\nTEST_F(OwnedImplTest, ExtractAfterSentinelDiscard) {\n  // Create a buffer with a sentinel and one owned slice.\n  Buffer::OwnedImpl buffer;\n  bool sentinel_discarded = false;\n  const Buffer::OwnedBufferFragmentImpl::Releasor sentinel_releasor{\n      [&](const Buffer::OwnedBufferFragmentImpl* sentinel) {\n        sentinel_discarded = true;\n        delete sentinel;\n      }};\n  auto sentinel =\n      Buffer::OwnedBufferFragmentImpl::create(absl::string_view(\"\", 0), sentinel_releasor);\n  buffer.addBufferFragment(*sentinel.release());\n\n  buffer.appendSliceForTest(\"abcde\");\n  const uint64_t expected_length = 5;\n  EXPECT_EQ(buffer.toString(), \"abcde\");\n  RawSliceVector slices = buffer.getRawSlices(); // only returns slices with data\n  EXPECT_EQ(1, slices.size());\n\n  // Extract owned slice after discarding sentinel.\n  EXPECT_FALSE(sentinel_discarded);\n  auto slice = buffer.extractMutableFrontSlice();\n  ASSERT_TRUE(slice);\n  EXPECT_TRUE(sentinel_discarded);\n  auto slice_data = slice->getMutableData();\n  ASSERT_NE(slice_data.data(), nullptr);\n  EXPECT_EQ(slice_data.size(), expected_length);\n  EXPECT_EQ(\"abcde\",\n            absl::string_view(reinterpret_cast<const char*>(slice_data.data()), slice_data.size()));\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(OwnedImplTest, DrainThenExtractOwnedSlice) {\n  // Create a buffer with two owned slices.\n  Buffer::OwnedImpl buffer;\n  buffer.appendSliceForTest(\"abcde\");\n  const uint64_t expected_length0 = 5;\n  buffer.appendSliceForTest(\"123\");\n  EXPECT_EQ(buffer.toString(), \"abcde123\");\n  RawSliceVector slices = buffer.getRawSlices();\n  EXPECT_EQ(2, slices.size());\n\n  // Partially drain the first slice.\n  const uint64_t partial_drain_size = 2;\n  buffer.drain(partial_drain_size);\n  EXPECT_EQ(buffer.toString(), static_cast<const char*>(\"abcde123\") + partial_drain_size);\n\n  // Extracted partially drained first slice, leaving the second slice.\n  auto slice = buffer.extractMutableFrontSlice();\n  ASSERT_TRUE(slice);\n  auto slice_data = slice->getMutableData();\n  ASSERT_NE(slice_data.data(), nullptr);\n  EXPECT_EQ(slice_data.size(), expected_length0 - partial_drain_size);\n  EXPECT_EQ(static_cast<const char*>(\"abcde\") + partial_drain_size,\n            absl::string_view(reinterpret_cast<const char*>(slice_data.data()), slice_data.size()));\n  EXPECT_EQ(buffer.toString(), \"123\");\n}\n\nTEST_F(OwnedImplTest, ExtractUnownedSlice) {\n  // Create a buffer with an unowned slice.\n  std::string input{\"unowned test slice\"};\n  const size_t expected_length0 = input.size();\n  auto frag = OwnedBufferFragmentImpl::create(\n      {input.c_str(), expected_length0},\n      [this](const OwnedBufferFragmentImpl*) { release_callback_called_ = true; });\n  Buffer::OwnedImpl buffer;\n  buffer.addBufferFragment(*frag);\n\n  bool drain_tracker_called{false};\n  buffer.addDrainTracker([&] { drain_tracker_called = true; });\n\n  // Add an owned slice to the end of the buffer.\n  EXPECT_EQ(expected_length0, buffer.length());\n  std::string owned_slice_content{\"another slice, but owned\"};\n  buffer.add(owned_slice_content);\n  const uint64_t expected_length1 = owned_slice_content.length();\n\n  // Partially drain the unowned slice.\n  const uint64_t partial_drain_size = 5;\n  buffer.drain(partial_drain_size);\n  EXPECT_EQ(expected_length0 - partial_drain_size + expected_length1, buffer.length());\n  EXPECT_FALSE(release_callback_called_);\n  EXPECT_FALSE(drain_tracker_called);\n\n  // Extract what remains of the unowned slice, leaving only the owned slice.\n  auto slice = buffer.extractMutableFrontSlice();\n  ASSERT_TRUE(slice);\n  EXPECT_TRUE(drain_tracker_called);\n  auto slice_data = slice->getMutableData();\n  ASSERT_NE(slice_data.data(), nullptr);\n  EXPECT_EQ(slice_data.size(), expected_length0 - partial_drain_size);\n  EXPECT_EQ(input.data() + partial_drain_size,\n            absl::string_view(reinterpret_cast<const char*>(slice_data.data()), slice_data.size()));\n  EXPECT_EQ(expected_length1, buffer.length());\n\n  // The underlying immutable unowned slice was discarded during the extract\n  // operation and replaced with a mutable copy. The drain trackers were\n  // called as part of the extract, implying that the release callback was called.\n  EXPECT_TRUE(release_callback_called_);\n}\n\nTEST_F(OwnedImplTest, ExtractWithDrainTracker) {\n  testing::InSequence s;\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"a\");\n\n  testing::MockFunction<void()> tracker1;\n  testing::MockFunction<void()> tracker2;\n  buffer.addDrainTracker(tracker1.AsStdFunction());\n  buffer.addDrainTracker(tracker2.AsStdFunction());\n\n  testing::MockFunction<void()> done;\n  EXPECT_CALL(tracker1, Call());\n  EXPECT_CALL(tracker2, Call());\n  EXPECT_CALL(done, Call());\n  auto slice = buffer.extractMutableFrontSlice();\n  // The test now has ownership of the slice, but the drain trackers were\n  // called as part of the extract operation\n  done.Call();\n  slice.reset();\n}\n\nTEST_F(OwnedImplTest, DrainTracking) {\n  testing::InSequence s;\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"a\");\n\n  testing::MockFunction<void()> tracker1;\n  testing::MockFunction<void()> tracker2;\n  buffer.addDrainTracker(tracker1.AsStdFunction());\n  buffer.addDrainTracker(tracker2.AsStdFunction());\n\n  testing::MockFunction<void()> done;\n  EXPECT_CALL(tracker1, Call());\n  EXPECT_CALL(tracker2, Call());\n  EXPECT_CALL(done, Call());\n  buffer.drain(buffer.length());\n  done.Call();\n}\n\nTEST_F(OwnedImplTest, MoveDrainTrackersWhenTransferingSlices) {\n  testing::InSequence s;\n\n  Buffer::OwnedImpl buffer1;\n  buffer1.add(\"a\");\n\n  testing::MockFunction<void()> tracker1;\n  buffer1.addDrainTracker(tracker1.AsStdFunction());\n\n  Buffer::OwnedImpl buffer2;\n  buffer2.add(\"b\");\n\n  testing::MockFunction<void()> tracker2;\n  buffer2.addDrainTracker(tracker2.AsStdFunction());\n\n  buffer2.add(std::string(10000, 'c'));\n  testing::MockFunction<void()> tracker3;\n  buffer2.addDrainTracker(tracker3.AsStdFunction());\n  EXPECT_EQ(2, buffer2.getRawSlices().size());\n\n  buffer1.move(buffer2);\n  EXPECT_EQ(10002, buffer1.length());\n  EXPECT_EQ(0, buffer2.length());\n  EXPECT_EQ(3, buffer1.getRawSlices().size());\n  EXPECT_EQ(0, buffer2.getRawSlices().size());\n\n  testing::MockFunction<void()> done;\n  EXPECT_CALL(tracker1, Call());\n  EXPECT_CALL(tracker2, Call());\n  EXPECT_CALL(tracker3, Call());\n  EXPECT_CALL(done, Call());\n  buffer1.drain(buffer1.length());\n  done.Call();\n}\n\nTEST_F(OwnedImplTest, MoveDrainTrackersWhenCopying) {\n  testing::InSequence s;\n\n  Buffer::OwnedImpl buffer1;\n  buffer1.add(\"a\");\n\n  testing::MockFunction<void()> tracker1;\n  buffer1.addDrainTracker(tracker1.AsStdFunction());\n\n  Buffer::OwnedImpl buffer2;\n  buffer2.add(\"b\");\n\n  testing::MockFunction<void()> tracker2;\n  buffer2.addDrainTracker(tracker2.AsStdFunction());\n\n  buffer1.move(buffer2);\n  EXPECT_EQ(2, buffer1.length());\n  EXPECT_EQ(0, buffer2.length());\n  EXPECT_EQ(1, buffer1.getRawSlices().size());\n  EXPECT_EQ(0, buffer2.getRawSlices().size());\n\n  buffer1.drain(1);\n  testing::MockFunction<void()> done;\n  EXPECT_CALL(tracker1, Call());\n  EXPECT_CALL(tracker2, Call());\n  EXPECT_CALL(done, Call());\n  buffer1.drain(1);\n  done.Call();\n}\n\nTEST_F(OwnedImplTest, PartialMoveDrainTrackers) {\n  testing::InSequence s;\n\n  Buffer::OwnedImpl buffer1;\n  buffer1.add(\"a\");\n\n  testing::MockFunction<void()> tracker1;\n  buffer1.addDrainTracker(tracker1.AsStdFunction());\n\n  Buffer::OwnedImpl buffer2;\n  buffer2.add(\"b\");\n\n  testing::MockFunction<void()> tracker2;\n  buffer2.addDrainTracker(tracker2.AsStdFunction());\n\n  buffer2.add(std::string(10000, 'c'));\n  testing::MockFunction<void()> tracker3;\n  buffer2.addDrainTracker(tracker3.AsStdFunction());\n  EXPECT_EQ(2, buffer2.getRawSlices().size());\n\n  // Move the first slice and associated trackers and part of the second slice to buffer1.\n  buffer1.move(buffer2, 4999);\n  EXPECT_EQ(5000, buffer1.length());\n  EXPECT_EQ(5002, buffer2.length());\n  EXPECT_EQ(3, buffer1.getRawSlices().size());\n  EXPECT_EQ(1, buffer2.getRawSlices().size());\n\n  testing::MockFunction<void()> done;\n  EXPECT_CALL(tracker1, Call());\n  buffer1.drain(1);\n\n  EXPECT_CALL(tracker2, Call());\n  EXPECT_CALL(done, Call());\n  buffer1.drain(buffer1.length());\n  done.Call();\n\n  // tracker3 remained in buffer2.\n  EXPECT_CALL(tracker3, Call());\n  buffer2.drain(buffer2.length());\n}\n\nTEST_F(OwnedImplTest, DrainTrackingOnDestruction) {\n  testing::InSequence s;\n\n  auto buffer = std::make_unique<Buffer::OwnedImpl>();\n  buffer->add(\"a\");\n\n  testing::MockFunction<void()> tracker;\n  buffer->addDrainTracker(tracker.AsStdFunction());\n\n  testing::MockFunction<void()> done;\n  EXPECT_CALL(tracker, Call());\n  EXPECT_CALL(done, Call());\n  buffer.reset();\n  done.Call();\n}\n\nTEST_F(OwnedImplTest, Linearize) {\n  Buffer::OwnedImpl buffer;\n\n  // Unowned slice to track when linearize kicks in.\n  std::string input(1000, 'a');\n  BufferFragmentImpl frag(\n      input.c_str(), input.size(),\n      [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; });\n  buffer.addBufferFragment(frag);\n\n  // Second slice with more data.\n  buffer.add(std::string(1000, 'b'));\n\n  // Linearize does not change the pointer associated with the first slice if requested size is less\n  // than or equal to size of the first slice.\n  EXPECT_EQ(input.c_str(), buffer.linearize(input.size()));\n  EXPECT_FALSE(release_callback_called_);\n\n  constexpr uint64_t LinearizeSize = 2000;\n  void* out_ptr = buffer.linearize(LinearizeSize);\n  EXPECT_TRUE(release_callback_called_);\n  EXPECT_EQ(input + std::string(1000, 'b'),\n            absl::string_view(reinterpret_cast<const char*>(out_ptr), LinearizeSize));\n}\n\nTEST_F(OwnedImplTest, LinearizeEmptyBuffer) {\n  Buffer::OwnedImpl buffer;\n  EXPECT_EQ(nullptr, buffer.linearize(0));\n}\n\nTEST_F(OwnedImplTest, LinearizeSingleSlice) {\n  auto buffer = std::make_unique<Buffer::OwnedImpl>();\n\n  // Unowned slice to track when linearize kicks in.\n  std::string input(1000, 'a');\n  BufferFragmentImpl frag(\n      input.c_str(), input.size(),\n      [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; });\n  buffer->addBufferFragment(frag);\n\n  EXPECT_EQ(input.c_str(), buffer->linearize(buffer->length()));\n  EXPECT_FALSE(release_callback_called_);\n\n  buffer.reset();\n  EXPECT_TRUE(release_callback_called_);\n}\n\nTEST_F(OwnedImplTest, LinearizeDrainTracking) {\n  constexpr uint32_t SmallChunk = 200;\n  constexpr uint32_t LargeChunk = 16384 - SmallChunk;\n  constexpr uint32_t LinearizeSize = SmallChunk + LargeChunk;\n\n  // Create a buffer with a eclectic combination of buffer OwnedSlice and UnownedSlices that will\n  // help us explore the properties of linearize.\n  Buffer::OwnedImpl buffer;\n\n  // Large add below the target linearize size.\n  testing::MockFunction<void()> tracker1;\n  buffer.add(std::string(LargeChunk, 'a'));\n  buffer.addDrainTracker(tracker1.AsStdFunction());\n\n  // Unowned slice which causes some fragmentation.\n  testing::MockFunction<void()> tracker2;\n  testing::MockFunction<void(const void*, size_t, const BufferFragmentImpl*)>\n      release_callback_tracker;\n  std::string frag_input(2 * SmallChunk, 'b');\n  BufferFragmentImpl frag(frag_input.c_str(), frag_input.size(),\n                          release_callback_tracker.AsStdFunction());\n  buffer.addBufferFragment(frag);\n  buffer.addDrainTracker(tracker2.AsStdFunction());\n\n  // And an unowned slice with 0 size, because.\n  testing::MockFunction<void()> tracker3;\n  testing::MockFunction<void(const void*, size_t, const BufferFragmentImpl*)>\n      release_callback_tracker2;\n  BufferFragmentImpl frag2(nullptr, 0, release_callback_tracker2.AsStdFunction());\n  buffer.addBufferFragment(frag2);\n  buffer.addDrainTracker(tracker3.AsStdFunction());\n\n  // Add a very large chunk\n  testing::MockFunction<void()> tracker4;\n  buffer.add(std::string(LargeChunk + LinearizeSize, 'c'));\n  buffer.addDrainTracker(tracker4.AsStdFunction());\n\n  // Small adds that create no gaps.\n  testing::MockFunction<void()> tracker5;\n  for (int i = 0; i < 105; ++i) {\n    buffer.add(std::string(SmallChunk, 'd'));\n  }\n  buffer.addDrainTracker(tracker5.AsStdFunction());\n\n  expectSlices({{16184, 136, 16320},\n                {400, 0, 400},\n                {0, 0, 0},\n                {32704, 0, 32704},\n                {4032, 0, 4032},\n                {4032, 0, 4032},\n                {4032, 0, 4032},\n                {4032, 0, 4032},\n                {4032, 0, 4032},\n                {704, 3328, 4032}},\n               buffer);\n\n  testing::InSequence s;\n  testing::MockFunction<void(int, int)> drain_tracker;\n  testing::MockFunction<void()> done_tracker;\n  EXPECT_CALL(tracker1, Call());\n  EXPECT_CALL(drain_tracker, Call(3 * LargeChunk + 108 * SmallChunk, 16384));\n  EXPECT_CALL(release_callback_tracker, Call(_, _, _));\n  EXPECT_CALL(tracker2, Call());\n  EXPECT_CALL(release_callback_tracker2, Call(_, _, _));\n  EXPECT_CALL(tracker3, Call());\n  EXPECT_CALL(drain_tracker, Call(2 * LargeChunk + 107 * SmallChunk, 16384));\n  EXPECT_CALL(drain_tracker, Call(LargeChunk + 106 * SmallChunk, 16384));\n  EXPECT_CALL(tracker4, Call());\n  EXPECT_CALL(drain_tracker, Call(105 * SmallChunk, 16384));\n  EXPECT_CALL(tracker5, Call());\n  EXPECT_CALL(drain_tracker, Call(4616, 4616));\n  EXPECT_CALL(done_tracker, Call());\n  for (auto& expected_first_slice : std::vector<std::vector<int>>{{16384, 4032, 20416},\n                                                                  {16384, 4032, 20416},\n                                                                  {16520, 0, 32704},\n                                                                  {16384, 4032, 20416},\n                                                                  {4616, 3512, 8128}}) {\n    const uint32_t write_size = std::min<uint32_t>(LinearizeSize, buffer.length());\n    buffer.linearize(write_size);\n    expectFirstSlice(expected_first_slice, buffer);\n    drain_tracker.Call(buffer.length(), write_size);\n    buffer.drain(write_size);\n  }\n  done_tracker.Call();\n\n  expectSlices({}, buffer);\n}\n\nTEST_F(OwnedImplTest, ReserveCommit) {\n  // This fragment will later be added to the buffer. It is declared in an enclosing scope to\n  // ensure it is not destructed until after the buffer is.\n  const std::string input = \"Hello, world\";\n  BufferFragmentImpl fragment(input.c_str(), input.size(), nullptr);\n\n  {\n    Buffer::OwnedImpl buffer;\n    // A zero-byte reservation should fail.\n    static constexpr uint64_t NumIovecs = 16;\n    Buffer::RawSlice iovecs[NumIovecs];\n    uint64_t num_reserved = buffer.reserve(0, iovecs, NumIovecs);\n    EXPECT_EQ(0, num_reserved);\n    clearReservation(iovecs, num_reserved, buffer);\n    EXPECT_EQ(0, buffer.length());\n\n    // Test and commit a small reservation. This should succeed.\n    num_reserved = buffer.reserve(1, iovecs, NumIovecs);\n    EXPECT_EQ(1, num_reserved);\n    // The implementation might provide a bigger reservation than requested.\n    EXPECT_LE(1, iovecs[0].len_);\n    iovecs[0].len_ = 1;\n    commitReservation(iovecs, num_reserved, buffer);\n    EXPECT_EQ(1, buffer.length());\n\n    // Request a reservation that fits in the remaining space at the end of the last slice.\n    num_reserved = buffer.reserve(1, iovecs, NumIovecs);\n    EXPECT_EQ(1, num_reserved);\n    EXPECT_LE(1, iovecs[0].len_);\n    iovecs[0].len_ = 1;\n    const void* slice1 = iovecs[0].mem_;\n    clearReservation(iovecs, num_reserved, buffer);\n\n    // Request a reservation that is too large to fit in the remaining space at the end of\n    // the last slice, and allow the buffer to use only one slice. This should result in the\n    // creation of a new slice within the buffer.\n    num_reserved = buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, 1);\n    EXPECT_EQ(1, num_reserved);\n    EXPECT_NE(slice1, iovecs[0].mem_);\n    clearReservation(iovecs, num_reserved, buffer);\n\n    // Request the same size reservation, but allow the buffer to use multiple slices. This\n    // should result in the buffer creating a second slice and splitting the reservation between the\n    // last two slices.\n    num_reserved = buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, NumIovecs);\n    EXPECT_EQ(2, num_reserved);\n    EXPECT_EQ(slice1, iovecs[0].mem_);\n    clearReservation(iovecs, num_reserved, buffer);\n\n    // Request a reservation that too big to fit in the existing slices. This should result\n    // in the creation of a third slice.\n    expectSlices({{1, 4031, 4032}}, buffer);\n    buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, NumIovecs);\n    expectSlices({{1, 4031, 4032}, {0, 4032, 4032}}, buffer);\n    const void* slice2 = iovecs[1].mem_;\n    num_reserved = buffer.reserve(8192, iovecs, NumIovecs);\n    expectSlices({{1, 4031, 4032}, {0, 4032, 4032}, {0, 4032, 4032}}, buffer);\n    EXPECT_EQ(3, num_reserved);\n    EXPECT_EQ(slice1, iovecs[0].mem_);\n    EXPECT_EQ(slice2, iovecs[1].mem_);\n    clearReservation(iovecs, num_reserved, buffer);\n\n    // Append a fragment to the buffer, and then request a small reservation. The buffer\n    // should make a new slice to satisfy the reservation; it cannot safely use any of\n    // the previously seen slices, because they are no longer at the end of the buffer.\n    expectSlices({{1, 4031, 4032}}, buffer);\n    buffer.addBufferFragment(fragment);\n    EXPECT_EQ(13, buffer.length());\n    num_reserved = buffer.reserve(1, iovecs, NumIovecs);\n    expectSlices({{1, 4031, 4032}, {12, 0, 12}, {0, 4032, 4032}}, buffer);\n    EXPECT_EQ(1, num_reserved);\n    EXPECT_NE(slice1, iovecs[0].mem_);\n    commitReservation(iovecs, num_reserved, buffer);\n    EXPECT_EQ(14, buffer.length());\n  }\n}\n\nTEST_F(OwnedImplTest, ReserveCommitReuse) {\n  Buffer::OwnedImpl buffer;\n  static constexpr uint64_t NumIovecs = 2;\n  Buffer::RawSlice iovecs[NumIovecs];\n\n  // Reserve 8KB and commit all but a few bytes of it, to ensure that\n  // the last slice of the buffer can hold part but not all of the\n  // next reservation. Note that the buffer implementation might\n  // allocate more than the requested 8KB. In case the implementation\n  // uses a power-of-two allocator, the subsequent reservations all\n  // request 16KB.\n  uint64_t num_reserved = buffer.reserve(8192, iovecs, NumIovecs);\n  EXPECT_EQ(1, num_reserved);\n  iovecs[0].len_ = 8000;\n  buffer.commit(iovecs, 1);\n  EXPECT_EQ(8000, buffer.length());\n\n  // Reserve 16KB. The resulting reservation should span 2 slices.\n  // Commit part of the first slice and none of the second slice.\n  num_reserved = buffer.reserve(16384, iovecs, NumIovecs);\n  EXPECT_EQ(2, num_reserved);\n  const void* first_slice = iovecs[0].mem_;\n  iovecs[0].len_ = 1;\n  expectSlices({{8000, 4224, 12224}, {0, 12224, 12224}}, buffer);\n  buffer.commit(iovecs, 1);\n  EXPECT_EQ(8001, buffer.length());\n  EXPECT_EQ(first_slice, iovecs[0].mem_);\n  // The second slice is now released because there's nothing in the second slice.\n  expectSlices({{8001, 4223, 12224}}, buffer);\n\n  // Reserve 16KB again.\n  num_reserved = buffer.reserve(16384, iovecs, NumIovecs);\n  expectSlices({{8001, 4223, 12224}, {0, 12224, 12224}}, buffer);\n  EXPECT_EQ(2, num_reserved);\n  EXPECT_EQ(static_cast<const uint8_t*>(first_slice) + 1,\n            static_cast<const uint8_t*>(iovecs[0].mem_));\n}\n\nTEST_F(OwnedImplTest, ReserveReuse) {\n  Buffer::OwnedImpl buffer;\n  static constexpr uint64_t NumIovecs = 2;\n  Buffer::RawSlice iovecs[NumIovecs];\n\n  // Reserve some space and leave it uncommitted.\n  uint64_t num_reserved = buffer.reserve(8192, iovecs, NumIovecs);\n  EXPECT_EQ(1, num_reserved);\n  const void* first_slice = iovecs[0].mem_;\n\n  // Reserve more space and verify that it begins with the same slice from the last reservation.\n  num_reserved = buffer.reserve(16384, iovecs, NumIovecs);\n  EXPECT_EQ(2, num_reserved);\n  EXPECT_EQ(first_slice, iovecs[0].mem_);\n  const void* second_slice = iovecs[1].mem_;\n\n  // Repeat the last reservation and verify that it yields the same slices.\n  num_reserved = buffer.reserve(16384, iovecs, NumIovecs);\n  EXPECT_EQ(2, num_reserved);\n  EXPECT_EQ(first_slice, iovecs[0].mem_);\n  EXPECT_EQ(second_slice, iovecs[1].mem_);\n  expectSlices({{0, 12224, 12224}, {0, 8128, 8128}}, buffer);\n\n  // Request a larger reservation, verify that the second entry is replaced with a block with a\n  // larger size.\n  num_reserved = buffer.reserve(30000, iovecs, NumIovecs);\n  const void* third_slice = iovecs[1].mem_;\n  EXPECT_EQ(2, num_reserved);\n  EXPECT_EQ(first_slice, iovecs[0].mem_);\n  EXPECT_EQ(12224, iovecs[0].len_);\n  EXPECT_NE(second_slice, iovecs[1].mem_);\n  EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_);\n  expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer);\n\n  // Repeating a the reservation request for a smaller block returns the previous entry.\n  num_reserved = buffer.reserve(16384, iovecs, NumIovecs);\n  EXPECT_EQ(2, num_reserved);\n  EXPECT_EQ(first_slice, iovecs[0].mem_);\n  EXPECT_EQ(second_slice, iovecs[1].mem_);\n  expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer);\n\n  // Repeat the larger reservation notice that it doesn't match the prior reservation for 30000\n  // bytes.\n  num_reserved = buffer.reserve(30000, iovecs, NumIovecs);\n  EXPECT_EQ(2, num_reserved);\n  EXPECT_EQ(first_slice, iovecs[0].mem_);\n  EXPECT_EQ(12224, iovecs[0].len_);\n  EXPECT_NE(second_slice, iovecs[1].mem_);\n  EXPECT_NE(third_slice, iovecs[1].mem_);\n  EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_);\n  expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {0, 20416, 20416}}, buffer);\n\n  // Commit the most recent reservation and verify the representation.\n  buffer.commit(iovecs, num_reserved);\n  expectSlices({{12224, 0, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {17776, 2640, 20416}},\n               buffer);\n\n  // Do another reservation.\n  num_reserved = buffer.reserve(16384, iovecs, NumIovecs);\n  EXPECT_EQ(2, num_reserved);\n  expectSlices({{12224, 0, 12224},\n                {0, 8128, 8128},\n                {0, 20416, 20416},\n                {17776, 2640, 20416},\n                {0, 16320, 16320}},\n               buffer);\n\n  // And commit.\n  buffer.commit(iovecs, num_reserved);\n  expectSlices({{12224, 0, 12224},\n                {0, 8128, 8128},\n                {0, 20416, 20416},\n                {20416, 0, 20416},\n                {13744, 2576, 16320}},\n               buffer);\n}\n\nTEST_F(OwnedImplTest, Search) {\n  // Populate a buffer with a string split across many small slices, to\n  // exercise edge cases in the search implementation.\n  static const char* Inputs[] = {\"ab\", \"a\", \"\", \"aaa\", \"b\", \"a\", \"aaa\", \"ab\", \"a\"};\n  Buffer::OwnedImpl buffer;\n  for (const auto& input : Inputs) {\n    buffer.appendSliceForTest(input);\n  }\n  EXPECT_STREQ(\"abaaaabaaaaaba\", buffer.toString().c_str());\n\n  EXPECT_EQ(-1, buffer.search(\"c\", 1, 0, 0));\n  EXPECT_EQ(0, buffer.search(\"\", 0, 0, 0));\n  EXPECT_EQ(buffer.length(), buffer.search(\"\", 0, buffer.length(), 0));\n  EXPECT_EQ(-1, buffer.search(\"\", 0, buffer.length() + 1, 0));\n  EXPECT_EQ(0, buffer.search(\"a\", 1, 0, 0));\n  EXPECT_EQ(1, buffer.search(\"b\", 1, 1, 0));\n  EXPECT_EQ(2, buffer.search(\"a\", 1, 1, 0));\n  EXPECT_EQ(0, buffer.search(\"abaa\", 4, 0, 0));\n  EXPECT_EQ(2, buffer.search(\"aaaa\", 4, 0, 0));\n  EXPECT_EQ(2, buffer.search(\"aaaa\", 4, 1, 0));\n  EXPECT_EQ(2, buffer.search(\"aaaa\", 4, 2, 0));\n  EXPECT_EQ(7, buffer.search(\"aaaaab\", 6, 0, 0));\n  EXPECT_EQ(0, buffer.search(\"abaaaabaaaaaba\", 14, 0, 0));\n  EXPECT_EQ(12, buffer.search(\"ba\", 2, 10, 0));\n  EXPECT_EQ(-1, buffer.search(\"abaaaabaaaaabaa\", 15, 0, 0));\n}\n\nTEST_F(OwnedImplTest, SearchWithLengthLimit) {\n  // Populate a buffer with a string split across many small slices, to\n  // exercise edge cases in the search implementation.\n  static const char* Inputs[] = {\"ab\", \"a\", \"\", \"aaa\", \"b\", \"a\", \"aaa\", \"ab\", \"a\"};\n  Buffer::OwnedImpl buffer;\n  for (const auto& input : Inputs) {\n    buffer.appendSliceForTest(input);\n  }\n  EXPECT_STREQ(\"abaaaabaaaaaba\", buffer.toString().c_str());\n\n  // The string is there, but the search is limited to 1 byte.\n  EXPECT_EQ(-1, buffer.search(\"b\", 1, 0, 1));\n  // The string is there, but the search is limited to 1 byte.\n  EXPECT_EQ(-1, buffer.search(\"ab\", 2, 0, 1));\n  // The string is there, but spans over 2 slices. The search length is enough\n  // to find it.\n  EXPECT_EQ(1, buffer.search(\"ba\", 2, 0, 3));\n  EXPECT_EQ(1, buffer.search(\"ba\", 2, 0, 5));\n  EXPECT_EQ(1, buffer.search(\"ba\", 2, 1, 2));\n  EXPECT_EQ(1, buffer.search(\"ba\", 2, 1, 5));\n  // The string spans over 3 slices. test different variations of search length\n  // and starting position.\n  EXPECT_EQ(2, buffer.search(\"aaaab\", 5, 2, 5));\n  EXPECT_EQ(-1, buffer.search(\"aaaab\", 5, 2, 3));\n  EXPECT_EQ(2, buffer.search(\"aaaab\", 5, 2, 6));\n  EXPECT_EQ(2, buffer.search(\"aaaab\", 5, 0, 8));\n  EXPECT_EQ(-1, buffer.search(\"aaaab\", 5, 0, 6));\n  // Test searching for the string which in in the last slice.\n  EXPECT_EQ(12, buffer.search(\"ba\", 2, 12, 2));\n  EXPECT_EQ(12, buffer.search(\"ba\", 2, 11, 3));\n  EXPECT_EQ(-1, buffer.search(\"ba\", 2, 11, 2));\n  // Test cases when length to search is larger than buffer\n  EXPECT_EQ(12, buffer.search(\"ba\", 2, 11, 10e6));\n}\n\nTEST_F(OwnedImplTest, StartsWith) {\n  // Populate a buffer with a string split across many small slices, to\n  // exercise edge cases in the startsWith implementation.\n  static const char* Inputs[] = {\"ab\", \"a\", \"\", \"aaa\", \"b\", \"a\", \"aaa\", \"ab\", \"a\"};\n  Buffer::OwnedImpl buffer;\n  for (const auto& input : Inputs) {\n    buffer.appendSliceForTest(input);\n  }\n  EXPECT_STREQ(\"abaaaabaaaaaba\", buffer.toString().c_str());\n\n  EXPECT_FALSE(buffer.startsWith({\"abaaaabaaaaabaXXX\", 17}));\n  EXPECT_FALSE(buffer.startsWith({\"c\", 1}));\n  EXPECT_TRUE(buffer.startsWith({\"\", 0}));\n  EXPECT_TRUE(buffer.startsWith({\"a\", 1}));\n  EXPECT_TRUE(buffer.startsWith({\"ab\", 2}));\n  EXPECT_TRUE(buffer.startsWith({\"aba\", 3}));\n  EXPECT_TRUE(buffer.startsWith({\"abaa\", 4}));\n  EXPECT_TRUE(buffer.startsWith({\"abaaaab\", 7}));\n  EXPECT_TRUE(buffer.startsWith({\"abaaaabaaaaaba\", 14}));\n  EXPECT_FALSE(buffer.startsWith({\"ba\", 2}));\n}\n\nTEST_F(OwnedImplTest, ToString) {\n  Buffer::OwnedImpl buffer;\n  EXPECT_EQ(\"\", buffer.toString());\n  auto append = [&buffer](absl::string_view str) { buffer.add(str.data(), str.size()); };\n  append(\"Hello, \");\n  EXPECT_EQ(\"Hello, \", buffer.toString());\n  append(\"world!\");\n  EXPECT_EQ(\"Hello, world!\", buffer.toString());\n\n  // From debug inspection, I find that a second fragment is created at >1000 bytes.\n  std::string long_string(5000, 'A');\n  append(long_string);\n  EXPECT_EQ(absl::StrCat(\"Hello, world!\" + long_string), buffer.toString());\n}\n\nTEST_F(OwnedImplTest, AppendSliceForTest) {\n  static constexpr size_t NumInputs = 3;\n  static constexpr const char* Inputs[] = {\"one\", \"2\", \"\", \"four\", \"\"};\n  Buffer::OwnedImpl buffer;\n  EXPECT_EQ(0, buffer.getRawSlices().size());\n  EXPECT_EQ(0, buffer.getRawSlices(NumInputs).size());\n  for (const auto& input : Inputs) {\n    buffer.appendSliceForTest(input);\n  }\n  // getRawSlices(max_slices) will only return the 3 slices with nonzero length.\n  RawSliceVector slices = buffer.getRawSlices(/*max_slices=*/NumInputs);\n  EXPECT_EQ(3, slices.size());\n\n  // Verify edge case where max_slices is -1 and +1 the actual non-empty slice count.\n  EXPECT_EQ(2, buffer.getRawSlices(/*max_slices=*/NumInputs - 1).size());\n  EXPECT_EQ(3, buffer.getRawSlices(/*max_slices=*/NumInputs + 1).size());\n\n  auto expectSlice = [](const RawSlice& slice, const char* expected) {\n    size_t length = strlen(expected);\n    EXPECT_EQ(length, slice.len_) << expected;\n    EXPECT_EQ(0, memcmp(slice.mem_, expected, length));\n  };\n\n  expectSlice(slices[0], \"one\");\n  expectSlice(slices[1], \"2\");\n  expectSlice(slices[2], \"four\");\n\n  // getRawSlices returns only the slices with nonzero length.\n  RawSliceVector slices_vector = buffer.getRawSlices();\n  EXPECT_EQ(3, slices_vector.size());\n\n  expectSlice(slices_vector[0], \"one\");\n  expectSlice(slices_vector[1], \"2\");\n  expectSlice(slices_vector[2], \"four\");\n}\n\n// Regression test for oss-fuzz issue\n// https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13263, where prepending\n// an empty buffer resulted in a corrupted libevent internal state.\nTEST_F(OwnedImplTest, PrependEmpty) {\n  Buffer::OwnedImpl buf;\n  Buffer::OwnedImpl other_buf;\n  char input[] = \"foo\";\n  BufferFragmentImpl frag(input, 3, nullptr);\n  buf.addBufferFragment(frag);\n  buf.prepend(\"\");\n  other_buf.move(buf, 1);\n  buf.add(\"bar\");\n  EXPECT_EQ(\"oobar\", buf.toString());\n  buf.drain(5);\n  EXPECT_EQ(0, buf.length());\n}\n\n// Regression test for oss-fuzz issues\n// https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=14466, empty commit\n// following a reserve resulted in a corrupted libevent internal state.\nTEST_F(OwnedImplTest, ReserveZeroCommit) {\n  BufferFragmentImpl frag(\"\", 0, nullptr);\n  Buffer::OwnedImpl buf;\n  buf.addBufferFragment(frag);\n  buf.prepend(\"bbbbb\");\n  buf.add(\"\");\n  constexpr uint32_t reserve_slices = 16;\n  Buffer::RawSlice slices[reserve_slices];\n  const uint32_t allocated_slices = buf.reserve(1280, slices, reserve_slices);\n  for (uint32_t i = 0; i < allocated_slices; ++i) {\n    slices[i].len_ = 0;\n  }\n  buf.commit(slices, allocated_slices);\n  os_fd_t pipe_fds[2] = {0, 0};\n  auto& os_sys_calls = Api::OsSysCallsSingleton::get();\n#ifdef WIN32\n  ASSERT_EQ(os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).rc_, 0);\n#else\n  ASSERT_EQ(pipe(pipe_fds), 0);\n#endif\n  Network::IoSocketHandleImpl io_handle(pipe_fds[0]);\n  ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[0], false).rc_, 0);\n  ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[1], false).rc_, 0);\n  const uint32_t max_length = 1953;\n  std::string data(max_length, 'e');\n  const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), max_length).rc_;\n  ASSERT_GT(rc, 0);\n  const uint32_t previous_length = buf.length();\n  Api::IoCallUint64Result result = io_handle.read(buf, max_length);\n  ASSERT_EQ(result.rc_, static_cast<uint64_t>(rc));\n  ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0);\n  ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length, 0));\n  EXPECT_EQ(\"bbbbb\", buf.toString().substr(0, 5));\n  expectSlices({{5, 0, 4032}, {1953, 2079, 4032}}, buf);\n}\n\nTEST_F(OwnedImplTest, ReadReserveAndCommit) {\n  BufferFragmentImpl frag(\"\", 0, nullptr);\n  Buffer::OwnedImpl buf;\n  buf.add(\"bbbbb\");\n\n  os_fd_t pipe_fds[2] = {0, 0};\n  auto& os_sys_calls = Api::OsSysCallsSingleton::get();\n#ifdef WIN32\n  ASSERT_EQ(os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).rc_, 0);\n#else\n  ASSERT_EQ(pipe(pipe_fds), 0);\n#endif\n  Network::IoSocketHandleImpl io_handle(pipe_fds[0]);\n  ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[0], false).rc_, 0);\n  ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[1], false).rc_, 0);\n\n  const uint32_t read_length = 32768;\n  std::string data = \"e\";\n  const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), data.size()).rc_;\n  ASSERT_GT(rc, 0);\n  Api::IoCallUint64Result result = io_handle.read(buf, read_length);\n  ASSERT_EQ(result.rc_, static_cast<uint64_t>(rc));\n  ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0);\n  EXPECT_EQ(\"bbbbbe\", buf.toString());\n  expectSlices({{6, 4026, 4032}}, buf);\n}\n\nTEST(OverflowDetectingUInt64, Arithmetic) {\n  OverflowDetectingUInt64 length;\n  length += 1;\n  length -= 1;\n  length -= 0;\n  EXPECT_DEATH(length -= 1, \"underflow\");\n  uint64_t half = uint64_t(1) << 63;\n  length += half;\n  length += (half - 1); // length is now 2^64 - 1\n  EXPECT_DEATH(length += 1, \"overflow\");\n}\n\nvoid TestBufferMove(uint64_t buffer1_length, uint64_t buffer2_length,\n                    uint64_t expected_slice_count) {\n  Buffer::OwnedImpl buffer1;\n  buffer1.add(std::string(buffer1_length, 'a'));\n  EXPECT_EQ(1, buffer1.getRawSlices().size());\n\n  Buffer::OwnedImpl buffer2;\n  buffer2.add(std::string(buffer2_length, 'b'));\n  EXPECT_EQ(1, buffer2.getRawSlices().size());\n\n  buffer1.move(buffer2);\n  EXPECT_EQ(expected_slice_count, buffer1.getRawSlices().size());\n  EXPECT_EQ(buffer1_length + buffer2_length, buffer1.length());\n  // Make sure `buffer2` was drained.\n  EXPECT_EQ(0, buffer2.length());\n}\n\n// Slice size large enough to prevent slice content from being coalesced into an existing slice\nconstexpr uint64_t kLargeSliceSize = 2048;\n\nTEST_F(OwnedImplTest, MoveBuffersWithLargeSlices) {\n  // Large slices should not be coalesced together\n  TestBufferMove(kLargeSliceSize, kLargeSliceSize, 2);\n}\n\nTEST_F(OwnedImplTest, MoveBuffersWithSmallSlices) {\n  // Small slices should be coalesced together\n  TestBufferMove(1, 1, 1);\n}\n\nTEST_F(OwnedImplTest, MoveSmallSliceIntoLargeSlice) {\n  // Small slices should be coalesced with a large one\n  TestBufferMove(kLargeSliceSize, 1, 1);\n}\n\nTEST_F(OwnedImplTest, MoveLargeSliceIntoSmallSlice) {\n  // Large slice should NOT be coalesced into the small one\n  TestBufferMove(1, kLargeSliceSize, 2);\n}\n\nTEST_F(OwnedImplTest, MoveSmallSliceIntoNotEnoughFreeSpace) {\n  // Small slice will not be coalesced if a previous slice does not have enough free space\n  // Slice buffer sizes are allocated in 4Kb increments\n  // Make first slice have 127 of free space (it is actually less as there is small overhead of the\n  // OwnedSlice object) And second slice 128 bytes\n  TestBufferMove(4096 - 127, 128, 2);\n}\n\n} // namespace\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/utility.h",
    "content": "#pragma once\n\n#include <initializer_list>\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Buffer {\nnamespace {\n\ninline void addRepeated(Buffer::Instance& buffer, int n, int8_t value) {\n  for (int i = 0; i < n; i++) {\n    buffer.add(&value, 1);\n  }\n}\n\ninline void addSeq(Buffer::Instance& buffer, const std::initializer_list<uint8_t> values) {\n  for (int8_t value : values) {\n    buffer.add(&value, 1);\n  }\n}\n\n} // namespace\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/watermark_buffer_test.cc",
    "content": "#include <array>\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"test/common/buffer/utility.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Buffer {\nnamespace {\n\nconst char TEN_BYTES[] = \"0123456789\";\n\nclass WatermarkBufferTest : public testing::Test {\npublic:\n  WatermarkBufferTest() { buffer_.setWatermarks(5, 10); }\n\n  Buffer::WatermarkBuffer buffer_{[&]() -> void { ++times_low_watermark_called_; },\n                                  [&]() -> void { ++times_high_watermark_called_; },\n                                  [&]() -> void { ++times_overflow_watermark_called_; }};\n  uint32_t times_low_watermark_called_{0};\n  uint32_t times_high_watermark_called_{0};\n  uint32_t times_overflow_watermark_called_{0};\n};\n\nTEST_F(WatermarkBufferTest, TestWatermark) { ASSERT_EQ(10, buffer_.highWatermark()); }\n\nTEST_F(WatermarkBufferTest, CopyOut) {\n  buffer_.add(\"hello world\");\n  std::array<char, 5> out;\n  buffer_.copyOut(0, out.size(), out.data());\n  EXPECT_EQ(std::string(out.data(), out.size()), \"hello\");\n\n  buffer_.copyOut(6, out.size(), out.data());\n  EXPECT_EQ(std::string(out.data(), out.size()), \"world\");\n\n  // Copy out zero bytes.\n  buffer_.copyOut(4, 0, out.data());\n}\n\nTEST_F(WatermarkBufferTest, AddChar) {\n  buffer_.add(TEN_BYTES, 10);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  buffer_.add(\"a\", 1);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(11, buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, AddString) {\n  buffer_.add(std::string(TEN_BYTES));\n  EXPECT_EQ(0, times_high_watermark_called_);\n  buffer_.add(std::string(\"a\"));\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(11, buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, AddBuffer) {\n  OwnedImpl first(TEN_BYTES);\n  buffer_.add(first);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  OwnedImpl second(\"a\");\n  buffer_.add(second);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(11, buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, Prepend) {\n  std::string suffix = \"World!\", prefix = \"Hello, \";\n\n  buffer_.add(suffix);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  buffer_.prepend(prefix);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, PrependToEmptyBuffer) {\n  std::string suffix = \"World!\", prefix = \"Hello, \";\n\n  buffer_.prepend(suffix);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  EXPECT_EQ(suffix.size(), buffer_.length());\n\n  buffer_.prepend(prefix.data());\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length());\n\n  buffer_.prepend(\"\");\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, PrependBuffer) {\n  std::string suffix = \"World!\", prefix = \"Hello, \";\n\n  uint32_t prefix_buffer_low_watermark_hits{0};\n  uint32_t prefix_buffer_high_watermark_hits{0};\n  uint32_t prefix_buffer_overflow_watermark_hits{0};\n  WatermarkBuffer prefixBuffer{[&]() -> void { ++prefix_buffer_low_watermark_hits; },\n                               [&]() -> void { ++prefix_buffer_high_watermark_hits; },\n                               [&]() -> void { ++prefix_buffer_overflow_watermark_hits; }};\n  prefixBuffer.setWatermarks(5, 10);\n  prefixBuffer.add(prefix);\n  prefixBuffer.add(suffix);\n\n  EXPECT_EQ(1, prefix_buffer_high_watermark_hits);\n  buffer_.prepend(prefixBuffer);\n\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(1, prefix_buffer_low_watermark_hits);\n  EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length());\n  EXPECT_EQ(prefix + suffix, buffer_.toString());\n  EXPECT_EQ(0, prefixBuffer.length());\n}\n\nTEST_F(WatermarkBufferTest, Commit) {\n  buffer_.add(TEN_BYTES, 10);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  RawSlice out;\n  buffer_.reserve(10, &out, 1);\n  memcpy(out.mem_, &TEN_BYTES[0], 10);\n  out.len_ = 10;\n  buffer_.commit(&out, 1);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(20, buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, Drain) {\n  // Draining from above to below the low watermark does nothing if the high\n  // watermark never got hit.\n  buffer_.add(TEN_BYTES, 10);\n  buffer_.drain(10);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  EXPECT_EQ(0, times_low_watermark_called_);\n\n  // Go above the high watermark then drain down to just at the low watermark.\n  buffer_.add(TEN_BYTES, 11);\n  buffer_.drain(5);\n  EXPECT_EQ(6, buffer_.length());\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(0, times_low_watermark_called_);\n\n  // Now drain below.\n  buffer_.drain(1);\n  EXPECT_EQ(1, times_low_watermark_called_);\n\n  // Going back above should trigger the high again\n  buffer_.add(TEN_BYTES, 10);\n  EXPECT_EQ(2, times_high_watermark_called_);\n}\n\nTEST_F(WatermarkBufferTest, DrainUsingExtract) {\n  // Similar to `Drain` test, but using extractMutableFrontSlice() instead of drain().\n  buffer_.add(TEN_BYTES, 10);\n  ASSERT_EQ(buffer_.length(), 10);\n  buffer_.extractMutableFrontSlice();\n  EXPECT_EQ(0, times_high_watermark_called_);\n  EXPECT_EQ(0, times_low_watermark_called_);\n\n  // Go above the high watermark then drain down to just at the low watermark.\n  buffer_.appendSliceForTest(TEN_BYTES, 5);\n  buffer_.appendSliceForTest(TEN_BYTES, 1);\n  buffer_.appendSliceForTest(TEN_BYTES, 5);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(0, times_low_watermark_called_);\n  auto slice0 = buffer_.extractMutableFrontSlice(); // essentially drain(5)\n  ASSERT_TRUE(slice0);\n  EXPECT_EQ(slice0->getMutableData().size(), 5);\n  EXPECT_EQ(6, buffer_.length());\n  EXPECT_EQ(0, times_low_watermark_called_);\n\n  // Now drain below.\n  auto slice1 = buffer_.extractMutableFrontSlice(); // essentially drain(1)\n  ASSERT_TRUE(slice1);\n  EXPECT_EQ(slice1->getMutableData().size(), 1);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(1, times_low_watermark_called_);\n\n  // Going back above should trigger the high again.\n  buffer_.add(TEN_BYTES, 10);\n  EXPECT_EQ(2, times_high_watermark_called_);\n}\n\n// Verify that low watermark callback is called on drain in the case where the\n// high watermark is non-zero and low watermark is 0.\nTEST_F(WatermarkBufferTest, DrainWithLowWatermarkOfZero) {\n  buffer_.setWatermarks(0, 10);\n\n  // Draining from above to below the low watermark does nothing if the high\n  // watermark never got hit.\n  buffer_.add(TEN_BYTES, 10);\n  buffer_.drain(10);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  EXPECT_EQ(0, times_low_watermark_called_);\n\n  // Go above the high watermark then drain down to just above the low watermark.\n  buffer_.add(TEN_BYTES, 11);\n  buffer_.drain(10);\n  EXPECT_EQ(1, buffer_.length());\n  EXPECT_EQ(0, times_low_watermark_called_);\n\n  // Now drain below.\n  buffer_.drain(1);\n  EXPECT_EQ(1, times_low_watermark_called_);\n\n  // Going back above should trigger the high again\n  buffer_.add(TEN_BYTES, 11);\n  EXPECT_EQ(2, times_high_watermark_called_);\n}\n\nTEST_F(WatermarkBufferTest, MoveFullBuffer) {\n  buffer_.add(TEN_BYTES, 10);\n  OwnedImpl data(\"a\");\n\n  EXPECT_EQ(0, times_high_watermark_called_);\n  buffer_.move(data);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(11, buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, MoveOneByte) {\n  buffer_.add(TEN_BYTES, 9);\n  OwnedImpl data(\"ab\");\n\n  buffer_.move(data, 1);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  EXPECT_EQ(10, buffer_.length());\n\n  buffer_.move(data, 1);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(11, buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, WatermarkFdFunctions) {\n  os_fd_t pipe_fds[2] = {0, 0};\n#ifdef WIN32\n  auto& os_sys_calls = Api::OsSysCallsSingleton::get();\n  ASSERT_EQ(0, os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).rc_);\n#else\n  ASSERT_EQ(0, pipe(pipe_fds));\n#endif\n\n  buffer_.add(TEN_BYTES, 10);\n  buffer_.add(TEN_BYTES, 10);\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(0, times_low_watermark_called_);\n\n  int bytes_written_total = 0;\n  Network::IoSocketHandleImpl io_handle1(pipe_fds[1]);\n  while (bytes_written_total < 20) {\n    Api::IoCallUint64Result result = io_handle1.write(buffer_);\n    if (!result.ok()) {\n      ASSERT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode());\n    } else {\n      bytes_written_total += result.rc_;\n    }\n  }\n  EXPECT_EQ(1, times_high_watermark_called_);\n  EXPECT_EQ(1, times_low_watermark_called_);\n  EXPECT_EQ(0, buffer_.length());\n\n  int bytes_read_total = 0;\n  Network::IoSocketHandleImpl io_handle2(pipe_fds[0]);\n  while (bytes_read_total < 20) {\n    Api::IoCallUint64Result result = io_handle2.read(buffer_, 20);\n    bytes_read_total += result.rc_;\n  }\n  EXPECT_EQ(2, times_high_watermark_called_);\n  EXPECT_EQ(20, buffer_.length());\n}\n\nTEST_F(WatermarkBufferTest, MoveWatermarks) {\n  buffer_.add(TEN_BYTES, 9);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  buffer_.setWatermarks(1, 9);\n  EXPECT_EQ(0, times_high_watermark_called_);\n  buffer_.setWatermarks(1, 8);\n  EXPECT_EQ(1, times_high_watermark_called_);\n\n  buffer_.setWatermarks(8, 20);\n  EXPECT_EQ(0, times_low_watermark_called_);\n  buffer_.setWatermarks(9, 20);\n  EXPECT_EQ(1, times_low_watermark_called_);\n  buffer_.setWatermarks(7, 20);\n  EXPECT_EQ(1, times_low_watermark_called_);\n  buffer_.setWatermarks(9, 20);\n  EXPECT_EQ(1, times_low_watermark_called_);\n  EXPECT_EQ(0, times_overflow_watermark_called_);\n\n  EXPECT_EQ(1, times_high_watermark_called_);\n  buffer_.setWatermarks(2);\n  EXPECT_EQ(2, times_high_watermark_called_);\n  EXPECT_EQ(1, times_low_watermark_called_);\n  EXPECT_EQ(0, times_overflow_watermark_called_);\n  buffer_.setWatermarks(0);\n  EXPECT_EQ(2, times_high_watermark_called_);\n  EXPECT_EQ(2, times_low_watermark_called_);\n  EXPECT_EQ(0, times_overflow_watermark_called_);\n  buffer_.setWatermarks(1);\n  EXPECT_EQ(3, times_high_watermark_called_);\n  EXPECT_EQ(2, times_low_watermark_called_);\n  EXPECT_EQ(0, times_overflow_watermark_called_);\n\n  // Fully drain the buffer.\n  buffer_.drain(9);\n  EXPECT_EQ(3, times_low_watermark_called_);\n  EXPECT_EQ(0, buffer_.length());\n  EXPECT_EQ(0, times_overflow_watermark_called_);\n}\n\nTEST_F(WatermarkBufferTest, GetRawSlices) {\n  buffer_.add(TEN_BYTES, 10);\n\n  RawSliceVector slices = buffer_.getRawSlices(/*max_slices=*/2);\n  ASSERT_EQ(1, slices.size());\n  EXPECT_EQ(10, slices[0].len_);\n  EXPECT_EQ(0, memcmp(slices[0].mem_, &TEN_BYTES[0], 10));\n\n  void* data_pointer = buffer_.linearize(10);\n  EXPECT_EQ(data_pointer, slices[0].mem_);\n}\n\nTEST_F(WatermarkBufferTest, Search) {\n  buffer_.add(TEN_BYTES, 10);\n\n  EXPECT_EQ(1, buffer_.search(&TEN_BYTES[1], 2, 0, 0));\n\n  EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5, 0));\n}\n\nTEST_F(WatermarkBufferTest, StartsWith) {\n  buffer_.add(TEN_BYTES, 10);\n\n  EXPECT_TRUE(buffer_.startsWith({TEN_BYTES, 2}));\n  EXPECT_TRUE(buffer_.startsWith({TEN_BYTES, 10}));\n  EXPECT_FALSE(buffer_.startsWith({&TEN_BYTES[1], 2}));\n}\n\nTEST_F(WatermarkBufferTest, MoveBackWithWatermarks) {\n  int high_watermark_buffer1 = 0;\n  int low_watermark_buffer1 = 0;\n  int overflow_watermark_buffer1 = 0;\n  Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; },\n                                  [&]() -> void { ++high_watermark_buffer1; },\n                                  [&]() -> void { ++overflow_watermark_buffer1; }};\n  buffer1.setWatermarks(5, 10);\n\n  // Stick 20 bytes in buffer_ and expect the high watermark is hit.\n  buffer_.add(TEN_BYTES, 10);\n  buffer_.add(TEN_BYTES, 10);\n  EXPECT_EQ(1, times_high_watermark_called_);\n\n  // Now move 10 bytes to the new buffer. Nothing should happen.\n  buffer1.move(buffer_, 10);\n  EXPECT_EQ(0, times_low_watermark_called_);\n  EXPECT_EQ(0, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n\n  // Move 10 more bytes to the new buffer. Both buffers should hit watermark callbacks.\n  buffer1.move(buffer_, 10);\n  EXPECT_EQ(1, times_low_watermark_called_);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, times_overflow_watermark_called_);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n\n  // Now move all the data back to the original buffer. Watermarks should trigger immediately.\n  buffer_.move(buffer1);\n  EXPECT_EQ(2, times_high_watermark_called_);\n  EXPECT_EQ(1, low_watermark_buffer1);\n  EXPECT_EQ(0, times_overflow_watermark_called_);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n}\n\nTEST_F(WatermarkBufferTest, OverflowWatermark) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues({{\"envoy.buffer.overflow_multiplier\", \"2\"}});\n\n  int high_watermark_buffer1 = 0;\n  int low_watermark_buffer1 = 0;\n  int overflow_watermark_buffer1 = 0;\n  Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; },\n                                  [&]() -> void { ++high_watermark_buffer1; },\n                                  [&]() -> void { ++overflow_watermark_buffer1; }};\n  buffer1.setWatermarks(5, 10);\n\n  buffer1.add(TEN_BYTES, 10);\n  EXPECT_EQ(0, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.add(\"a\", 1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.add(TEN_BYTES, 9);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.add(\"a\", 1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n  EXPECT_EQ(21, buffer1.length());\n  buffer1.add(\"a\", 1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n  EXPECT_EQ(22, buffer1.length());\n\n  // Overflow is only triggered once\n  buffer1.drain(18);\n  EXPECT_EQ(4, buffer1.length());\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(1, low_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n  buffer1.add(TEN_BYTES, 10);\n  EXPECT_EQ(2, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n  EXPECT_EQ(14, buffer1.length());\n  buffer1.add(TEN_BYTES, 6);\n  EXPECT_EQ(2, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n  EXPECT_EQ(20, buffer1.length());\n}\n\nTEST_F(WatermarkBufferTest, OverflowWatermarkDisabled) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues({{\"envoy.buffer.overflow_multiplier\", \"0\"}});\n\n  int high_watermark_buffer1 = 0;\n  int low_watermark_buffer1 = 0;\n  int overflow_watermark_buffer1 = 0;\n  Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; },\n                                  [&]() -> void { ++high_watermark_buffer1; },\n                                  [&]() -> void { ++overflow_watermark_buffer1; }};\n  buffer1.setWatermarks(5, 10);\n\n  buffer1.add(TEN_BYTES, 10);\n  EXPECT_EQ(0, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.add(\"a\", 1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.add(TEN_BYTES, 10);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  EXPECT_EQ(21, buffer1.length());\n}\n\nTEST_F(WatermarkBufferTest, OverflowWatermarkDisabledOnVeryHighValue) {\n// Disabling execution with TSAN as it causes the test to use too much memory\n// and time, making the test fail in some settings (such as CI)\n#if defined(__has_feature) && __has_feature(thread_sanitizer)\n  ENVOY_LOG_MISC(critical, \"WatermarkBufferTest::OverflowWatermarkDisabledOnVeryHighValue not \"\n                           \"supported by this compiler configuration\");\n#else\n  // Verifies that the overflow watermark is disabled when its value is higher\n  // than uint32_t max value\n  TestScopedRuntime scoped_runtime;\n\n  int high_watermark_buffer1 = 0;\n  int overflow_watermark_buffer1 = 0;\n  Buffer::WatermarkBuffer buffer1{[&]() -> void {}, [&]() -> void { ++high_watermark_buffer1; },\n                                  [&]() -> void { ++overflow_watermark_buffer1; }};\n\n  // Make sure the overflow threshold will be above std::numeric_limits<uint32_t>::max()\n  const uint64_t overflow_multiplier = 3;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.buffer.overflow_multiplier\", std::to_string(overflow_multiplier)}});\n  const uint32_t high_watermark_threshold =\n      (std::numeric_limits<uint32_t>::max() / overflow_multiplier) + 1;\n  buffer1.setWatermarks(high_watermark_threshold);\n\n  // Add many segments instead of full uint32_t::max to get around std::bad_alloc exception\n  const uint32_t segment_denominator = 128;\n  const uint32_t big_segment_len = std::numeric_limits<uint32_t>::max() / segment_denominator + 1;\n  for (uint32_t i = 0; i < segment_denominator; ++i) {\n    Buffer::RawSlice iovecs[2];\n    uint64_t num_reserved = buffer1.reserve(big_segment_len, iovecs, 2);\n    EXPECT_GE(num_reserved, 1);\n    buffer1.commit(iovecs, num_reserved);\n  }\n  EXPECT_GT(buffer1.length(), std::numeric_limits<uint32_t>::max());\n  EXPECT_LT(buffer1.length(), high_watermark_threshold * overflow_multiplier);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n\n  // Reserve and commit additional space on the buffer beyond the expected\n  // high_watermark_threshold * overflow_multiplier threshold.\n  // Adding high_watermark_threshold * overflow_multiplier - buffer1.length() + 1 bytes\n  Buffer::RawSlice iovecs[2];\n  uint64_t num_reserved = buffer1.reserve(\n      high_watermark_threshold * overflow_multiplier - buffer1.length() + 1, iovecs, 2);\n  EXPECT_GE(num_reserved, 1);\n  buffer1.commit(iovecs, num_reserved);\n  EXPECT_EQ(buffer1.length(), high_watermark_threshold * overflow_multiplier + 1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n#endif\n}\n\nTEST_F(WatermarkBufferTest, OverflowWatermarkEqualHighWatermark) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues({{\"envoy.buffer.overflow_multiplier\", \"1\"}});\n\n  int high_watermark_buffer1 = 0;\n  int low_watermark_buffer1 = 0;\n  int overflow_watermark_buffer1 = 0;\n  Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; },\n                                  [&]() -> void { ++high_watermark_buffer1; },\n                                  [&]() -> void { ++overflow_watermark_buffer1; }};\n  buffer1.setWatermarks(5, 10);\n\n  buffer1.add(TEN_BYTES, 10);\n  EXPECT_EQ(0, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.add(\"a\", 1);\n  EXPECT_EQ(0, low_watermark_buffer1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n\n  buffer1.drain(6);\n  EXPECT_EQ(1, low_watermark_buffer1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n  buffer1.add(TEN_BYTES, 10);\n  EXPECT_EQ(15, buffer1.length());\n  EXPECT_EQ(2, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n}\n\nTEST_F(WatermarkBufferTest, MoveWatermarksOverflow) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues({{\"envoy.buffer.overflow_multiplier\", \"2\"}});\n\n  int high_watermark_buffer1 = 0;\n  int low_watermark_buffer1 = 0;\n  int overflow_watermark_buffer1 = 0;\n  Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; },\n                                  [&]() -> void { ++high_watermark_buffer1; },\n                                  [&]() -> void { ++overflow_watermark_buffer1; }};\n  buffer1.setWatermarks(5, 10);\n  buffer1.add(TEN_BYTES, 9);\n  EXPECT_EQ(0, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.setWatermarks(1, 9);\n  EXPECT_EQ(0, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.setWatermarks(1, 8);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.setWatermarks(1, 5);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(0, overflow_watermark_buffer1);\n  buffer1.setWatermarks(1, 4);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n\n  // Overflow is only triggered once\n  buffer1.setWatermarks(3, 6);\n  EXPECT_EQ(0, low_watermark_buffer1);\n  EXPECT_EQ(1, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n  buffer1.drain(7);\n  buffer1.add(TEN_BYTES, 9);\n  EXPECT_EQ(11, buffer1.length());\n  EXPECT_EQ(1, low_watermark_buffer1);\n  EXPECT_EQ(2, high_watermark_buffer1);\n  EXPECT_EQ(1, overflow_watermark_buffer1);\n}\n\n} // namespace\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/buffer/zero_copy_input_stream_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n\n#include \"test/common/buffer/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Buffer {\nnamespace {\n\nclass ZeroCopyInputStreamTest : public testing::Test {\npublic:\n  ZeroCopyInputStreamTest() {\n    Buffer::OwnedImpl buffer{\"abcd\"};\n    stream_.move(buffer);\n  }\n\n  std::string slice_data_{\"abcd\"};\n  ZeroCopyInputStreamImpl stream_;\n\n  const void* data_;\n  int size_;\n};\n\nTEST_F(ZeroCopyInputStreamTest, Move) {\n  Buffer::OwnedImpl buffer{\"abcd\"};\n  stream_.move(buffer);\n\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(ZeroCopyInputStreamTest, Next) {\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(4, size_);\n  EXPECT_EQ(0, memcmp(slice_data_.data(), data_, size_));\n}\n\nTEST_F(ZeroCopyInputStreamTest, TwoSlices) {\n  // Make content larger than 512 bytes so it would not be coalesced when\n  // moved into the stream_ buffer.\n  Buffer::OwnedImpl buffer(std::string(1024, 'A'));\n  stream_.move(buffer);\n\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(4, size_);\n  EXPECT_EQ(0, memcmp(slice_data_.data(), data_, size_));\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(1024, size_);\n  EXPECT_THAT(absl::string_view(static_cast<const char*>(data_), size_),\n              testing::Each(testing::AllOf('A')));\n}\n\nTEST_F(ZeroCopyInputStreamTest, BackUp) {\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(4, size_);\n\n  stream_.BackUp(3);\n  EXPECT_EQ(1, stream_.ByteCount());\n\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(3, size_);\n  EXPECT_EQ(0, memcmp(\"bcd\", data_, size_));\n  EXPECT_EQ(4, stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamTest, BackUpFull) {\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(4, size_);\n\n  stream_.BackUp(4);\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(4, size_);\n  EXPECT_EQ(0, memcmp(\"abcd\", data_, size_));\n  EXPECT_EQ(4, stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamTest, ByteCount) {\n  EXPECT_EQ(0, stream_.ByteCount());\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(4, stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamTest, Finish) {\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(0, size_);\n  stream_.finish();\n  EXPECT_FALSE(stream_.Next(&data_, &size_));\n}\n\nclass ZeroCopyInputStreamSkipTest : public testing::Test {\npublic:\n  ZeroCopyInputStreamSkipTest() {\n    Buffer::OwnedImpl buffer;\n    buffer.addBufferFragment(buffer1_);\n    buffer.addBufferFragment(buffer2_);\n    buffer.addBufferFragment(buffer3_);\n    buffer.addBufferFragment(buffer4_);\n\n    stream_.move(buffer);\n  }\n\n  const std::string slice1_{\"This is the first slice of the message.\"};\n  const std::string slice2_{\"This is the second slice of the message.\"};\n  const std::string slice3_{\"This is the third slice of the message.\"};\n  const std::string slice4_{\"This is the fourth slice of the message.\"};\n  BufferFragmentImpl buffer1_{slice1_.data(), slice1_.size(), nullptr};\n  BufferFragmentImpl buffer2_{slice2_.data(), slice2_.size(), nullptr};\n  BufferFragmentImpl buffer3_{slice3_.data(), slice3_.size(), nullptr};\n  BufferFragmentImpl buffer4_{slice4_.data(), slice4_.size(), nullptr};\n\n  const size_t total_bytes_{slice1_.size() + slice2_.size() + slice3_.size() + slice4_.size()};\n  ZeroCopyInputStreamImpl stream_;\n\n  const void* data_;\n  int size_;\n\n  // Convert data_ buffer into a string\n  absl::string_view dataString() const {\n    return absl::string_view{reinterpret_cast<const char*>(data_), static_cast<size_t>(size_)};\n  }\n};\n\nTEST_F(ZeroCopyInputStreamSkipTest, SkipFirstPartialSlice) {\n  // Only skip the 10 bytes in the first slice.\n  constexpr int skip_count = 10;\n  EXPECT_TRUE(stream_.Skip(skip_count));\n\n  EXPECT_EQ(skip_count, stream_.ByteCount());\n\n  // Read the first slice\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice1_.size() - skip_count, size_);\n  EXPECT_EQ(slice1_.substr(skip_count), dataString());\n  EXPECT_EQ(slice1_.size(), stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamSkipTest, SkipFirstFullSlice) {\n  // Skip the full first slice\n  EXPECT_TRUE(stream_.Skip(slice1_.size()));\n\n  EXPECT_EQ(slice1_.size(), stream_.ByteCount());\n\n  // Read the second slice\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice2_.size(), size_);\n  EXPECT_EQ(slice2_, dataString());\n  EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamSkipTest, BackUpAndSkipToEndOfSlice) {\n  // Read the first slice, backUp 10 byes, skip 10 bytes to the end of the first slice.\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice1_.size(), size_);\n  EXPECT_EQ(slice1_, dataString());\n\n  constexpr int backup_count = 10;\n  stream_.BackUp(backup_count);\n  EXPECT_TRUE(stream_.Skip(backup_count));\n\n  EXPECT_EQ(slice1_.size(), stream_.ByteCount());\n\n  // Next read is the second slice\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice2_.size(), size_);\n  EXPECT_EQ(slice2_, dataString());\n  EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossTwoSlices) {\n  // Read the first slice, backUp 10 byes, skip 15 bytes; 5 bytes into the second slice.\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice1_.size(), size_);\n  EXPECT_EQ(slice1_, dataString());\n\n  constexpr int backup_count = 10; // the backup bytes to the end of first slice.\n  constexpr int skip_count = 5;    // The skip bytes in the second slice\n  stream_.BackUp(backup_count);\n  EXPECT_TRUE(stream_.Skip(backup_count + skip_count));\n\n  EXPECT_EQ(slice1_.size() + skip_count, stream_.ByteCount());\n\n  // Read the remain second slice\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice2_.size() - skip_count, size_);\n  EXPECT_EQ(slice2_.substr(skip_count), dataString());\n  EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossThreeSlices) {\n  // Read the first slice, backUp 10 byes, skip 10 + slice2.size + 5; 5 bytes into the third slice.\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice1_.size(), size_);\n  EXPECT_EQ(slice1_, dataString());\n\n  constexpr int backup_count = 10; // the backup bytes to the end of first slice.\n  constexpr int skip_count = 5;    // The skip bytes in the third slice\n  stream_.BackUp(backup_count);\n  EXPECT_TRUE(stream_.Skip(backup_count + slice2_.size() + skip_count));\n\n  EXPECT_EQ(slice1_.size() + slice2_.size() + skip_count, stream_.ByteCount());\n\n  // Read the remain third slice\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice3_.size() - skip_count, size_);\n  EXPECT_EQ(slice3_.substr(skip_count), dataString());\n  EXPECT_EQ(slice1_.size() + slice2_.size() + slice3_.size(), stream_.ByteCount());\n}\n\nTEST_F(ZeroCopyInputStreamSkipTest, SkipToEndOfBuffer) {\n  // Failed to skip one extra byte\n  EXPECT_FALSE(stream_.Skip(total_bytes_ + 1));\n\n  EXPECT_TRUE(stream_.Skip(total_bytes_));\n  EXPECT_EQ(total_bytes_, stream_.ByteCount());\n\n  // Failed to skip one extra byte\n  EXPECT_FALSE(stream_.Skip(1));\n}\n\nTEST_F(ZeroCopyInputStreamSkipTest, ReadFirstSkipToTheEnd) {\n  // Read the first slice, backUp 10 byes, skip to the end of buffer\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(slice1_.size(), size_);\n  EXPECT_EQ(slice1_, dataString());\n\n  constexpr int backup_count = 10; // the backup bytes to the end of first slice.\n  stream_.BackUp(backup_count);\n\n  EXPECT_TRUE(stream_.Skip(total_bytes_ - slice1_.size() + backup_count));\n  EXPECT_EQ(total_bytes_, stream_.ByteCount());\n\n  // Failed to skip one extra byte\n  EXPECT_FALSE(stream_.Skip(1));\n}\n\n} // namespace\n} // namespace Buffer\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"backoff_strategy_test\",\n    srcs = [\"backoff_strategy_test.cc\"],\n    deps = [\n        \"//source/common/common:backoff_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"assert_test\",\n    srcs = [\"assert_test.cc\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//test/test_common:logging_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"base64_test\",\n    srcs = [\"base64_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:base64_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"base64_fuzz_test\",\n    srcs = [\"base64_fuzz_test.cc\"],\n    corpus = \"base64_corpus\",\n    # Fuzzer is stable, no bugs, simple test target; avoid emitting CO2.\n    tags = [\"no_fuzz\"],\n    deps = [\"//source/common/common:base64_lib\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"utility_fuzz_test\",\n    srcs = [\"utility_fuzz_test.cc\"],\n    corpus = \"utility_corpus\",\n    # Fuzzer is stable, no bugs, simple test target; avoid emitting CO2.\n    tags = [\"no_fuzz\"],\n    deps = [\"//source/common/common:utility_lib\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"hash_fuzz_test\",\n    srcs = [\"hash_fuzz_test.cc\"],\n    corpus = \"hash_corpus\",\n    deps = [\"//source/common/common:hash_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"cleanup_test\",\n    srcs = [\"cleanup_test.cc\"],\n    deps = [\"//source/common/common:cleanup_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"mem_block_builder_test\",\n    srcs = [\"mem_block_builder_test.cc\"],\n    deps = [\"//source/common/common:mem_block_builder_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"phantom_test\",\n    srcs = [\"phantom_test.cc\"],\n    deps = [\"//source/common/common:phantom\"],\n)\n\nenvoy_cc_test(\n    name = \"fmt_test\",\n    srcs = [\"fmt_test.cc\"],\n    deps = [\n        \"//source/common/common:fmt_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"hash_test\",\n    srcs = [\"hash_test.cc\"],\n    deps = [\"//source/common/common:hash_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"hex_test\",\n    srcs = [\"hex_test.cc\"],\n    deps = [\"//source/common/common:hex_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"linked_object_test\",\n    srcs = [\"linked_object_test.cc\"],\n    deps = [\n        \"//source/common/common:linked_object\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"log_macros_test\",\n    srcs = [\"log_macros_test.cc\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:logging_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"fancy_log_macros_test\",\n    srcs = [\"log_macros_test.cc\"],\n    args = [\"--enable-fine-grain-logging\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:logging_lib\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"logger_speed_test\",\n    srcs = [\"logger_speed_test.cc\"],\n    external_deps = [\"benchmark\"],\n    deps = [\"//source/common/common:minimal_logger_lib\"],\n)\n\nenvoy_benchmark_test(\n    name = \"logger_speed_test_benchmark_test\",\n    benchmark_binary = \"logger_speed_test\",\n)\n\nenvoy_cc_test(\n    name = \"logger_test\",\n    srcs = [\"logger_test.cc\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"logger_fuzz_test\",\n    srcs = [\"logger_fuzz_test.cc\"],\n    corpus = \"logger_corpus\",\n    # TODO(github.com/envoyproxy/envoy#8893): Re-enable once more fuzz tests are added\n    tags = [\"no_fuzz\"],\n    deps = [\"//source/common/common:minimal_logger_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"matchers_test\",\n    srcs = [\"matchers_test.cc\"],\n    deps = [\n        \"//source/common/common:matchers_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"mutex_tracer_test\",\n    srcs = [\"mutex_tracer_test.cc\"],\n    deps = [\n        \"//source/common/common:mutex_tracer_lib\",\n        \"//test/test_common:contention_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"random_generator_test\",\n    srcs = [\"random_generator_test.cc\"],\n    deps = [\n        \"//source/common/common:random_generator_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    external_deps = [\n        \"abseil_strings\",\n    ],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"regex_test\",\n    srcs = [\"regex_test.cc\"],\n    deps = [\n        \"//source/common/common:regex_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"perf_annotation_test\",\n    srcs = [\"perf_annotation_test.cc\"],\n    deps = [\n        \"//source/common/common:perf_annotation_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"perf_annotation_disabled_test\",\n    srcs = [\"perf_annotation_disabled_test.cc\"],\n    deps = [\n        \"//source/common/common:perf_annotation_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"basic_resource_impl_test\",\n    srcs = [\"basic_resource_impl_test.cc\"],\n    deps = [\n        \"//source/common/common:basic_resource_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"token_bucket_impl_test\",\n    srcs = [\"token_bucket_impl_test.cc\"],\n    deps = [\n        \"//source/common/common:token_bucket_impl_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"callback_impl_test\",\n    srcs = [\"callback_impl_test.cc\"],\n    deps = [\"//source/common/common:callback_impl_lib\"],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"utility_speed_test\",\n    srcs = [\"utility_speed_test.cc\"],\n    external_deps = [\n        \"abseil_strings\",\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"utility_speed_test_benchmark_test\",\n    benchmark_binary = \"utility_speed_test\",\n)\n\nenvoy_cc_test(\n    name = \"lock_guard_test\",\n    srcs = [\"lock_guard_test.cc\"],\n    deps = [\n        \"//source/common/common:lock_guard_lib\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"thread_id_test\",\n    srcs = [\"thread_id_test.cc\"],\n    external_deps = [\"abseil_hash_testing\"],\n    deps = [\n        \"//source/common/common:thread_lib\",\n        \"//test/test_common:thread_factory_for_test_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"thread_test\",\n    srcs = [\"thread_test.cc\"],\n    deps = [\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:thread_synchronizer_lib\",\n        \"//test/test_common:thread_factory_for_test_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"stl_helpers_test\",\n    srcs = [\"stl_helpers_test.cc\"],\n    deps = [\n        \"//source/common/common:stl_helpers\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"version_test\",\n    srcs = [\"version_test.cc\"],\n    external_deps = [\n        \"abseil_strings\",\n    ],\n    deps = [\n        \"//source/common/version:version_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"statusor_test\",\n    srcs = [\"statusor_test.cc\"],\n    deps = [\n        \"//source/common/common:statusor_lib\",\n        \"//source/common/http:status_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/common/assert_test.cc",
    "content": "#include \"common/common/assert.h\"\n\n#include \"test/test_common/logging.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(ReleaseAssertDeathTest, VariousLogs) {\n  EXPECT_DEATH({ RELEASE_ASSERT(0, \"\"); }, \".*assert failure: 0.*\");\n  EXPECT_DEATH({ RELEASE_ASSERT(0, \"With some logs\"); },\n               \".*assert failure: 0. Details: With some logs.*\");\n  EXPECT_DEATH({ RELEASE_ASSERT(0 == EAGAIN, fmt::format(\"using {}\", \"fmt\")); },\n               \".*assert failure: 0 == EAGAIN. Details: using fmt.*\");\n}\n\nTEST(AssertDeathTest, VariousLogs) {\n  int expected_counted_failures;\n  int assert_fail_count = 0;\n  auto debug_assert_action_registration =\n      Assert::setDebugAssertionFailureRecordAction([&]() { assert_fail_count++; });\n\n#ifndef NDEBUG\n  EXPECT_DEATH({ ASSERT(0); }, \".*assert failure: 0.*\");\n  EXPECT_DEATH({ ASSERT(0, \"\"); }, \".*assert failure: 0.*\");\n  EXPECT_DEATH({ ASSERT(0, \"With some logs\"); }, \".*assert failure: 0. Details: With some logs.*\");\n  expected_counted_failures = 0;\n#elif defined(ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE)\n  EXPECT_LOG_CONTAINS(\"critical\", \"assert failure: 0\", ASSERT(0));\n  EXPECT_LOG_CONTAINS(\"critical\", \"assert failure: 0\", ASSERT(0, \"\"));\n  EXPECT_LOG_CONTAINS(\"critical\", \"assert failure: 0. Details: With some logs\",\n                      ASSERT(0, \"With some logs\"));\n  expected_counted_failures = 3;\n#else\n  EXPECT_NO_LOGS(ASSERT(0));\n  EXPECT_NO_LOGS(ASSERT(0, \"\"));\n  EXPECT_NO_LOGS(ASSERT(0, \"With some logs\"));\n  expected_counted_failures = 0;\n#endif\n\n  EXPECT_EQ(expected_counted_failures, assert_fail_count);\n}\n\nTEST(EnvoyBugDeathTest, VariousLogs) {\n  int envoy_bug_fail_count = 0;\n  // ENVOY_BUG actions only occur on power of two counts.\n  auto envoy_bug_action_registration =\n      Assert::setEnvoyBugFailureRecordAction([&]() { envoy_bug_fail_count++; });\n\n#ifndef NDEBUG\n  EXPECT_DEATH({ ENVOY_BUG(false, \"\"); }, \".*envoy bug failure: false.*\");\n  EXPECT_DEATH({ ENVOY_BUG(false, \"\"); }, \".*envoy bug failure: false.*\");\n  EXPECT_DEATH({ ENVOY_BUG(false, \"With some logs\"); },\n               \".*envoy bug failure: false. Details: With some logs.*\");\n  EXPECT_EQ(0, envoy_bug_fail_count);\n#else\n  // Same log lines trigger exponential back-off.\n  for (int i = 0; i < 4; i++) {\n    ENVOY_BUG(false, \"\");\n  }\n  // 3 counts because 1st, 2nd, and 4th instances are powers of 2.\n  EXPECT_EQ(3, envoy_bug_fail_count);\n\n  // Different log lines have separate counters for exponential back-off.\n  EXPECT_LOG_CONTAINS(\"error\", \"envoy bug failure: false\", ENVOY_BUG(false, \"\"));\n  EXPECT_LOG_CONTAINS(\"error\", \"envoy bug failure: false. Details: With some logs\",\n                      ENVOY_BUG(false, \"With some logs\"));\n  EXPECT_EQ(5, envoy_bug_fail_count);\n#endif\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/backoff_strategy_test.cc",
    "content": "#include \"common/common/backoff_strategy.h\"\n\n#include \"test/mocks/common.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\n\nTEST(ExponentialBackOffStrategyTest, JitteredBackOffBasicFlow) {\n  NiceMock<Random::MockRandomGenerator> random;\n  ON_CALL(random, random()).WillByDefault(Return(27));\n\n  JitteredExponentialBackOffStrategy jittered_back_off(25, 30, random);\n  EXPECT_EQ(2, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(27, jittered_back_off.nextBackOffMs());\n}\n\nTEST(ExponentialBackOffStrategyTest, JitteredBackOffBasicReset) {\n  NiceMock<Random::MockRandomGenerator> random;\n  ON_CALL(random, random()).WillByDefault(Return(27));\n\n  JitteredExponentialBackOffStrategy jittered_back_off(25, 30, random);\n  EXPECT_EQ(2, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(27, jittered_back_off.nextBackOffMs());\n\n  jittered_back_off.reset();\n  EXPECT_EQ(2, jittered_back_off.nextBackOffMs()); // Should start from start\n}\n\nTEST(ExponentialBackOffStrategyTest, JitteredBackOffDoesntOverflow) {\n  NiceMock<Random::MockRandomGenerator> random;\n  ON_CALL(random, random()).WillByDefault(Return(std::numeric_limits<uint64_t>::max() - 1));\n\n  JitteredExponentialBackOffStrategy jittered_back_off(1, std::numeric_limits<uint64_t>::max(),\n                                                       random);\n  for (int iter = 0; iter < 100; ++iter) {\n    EXPECT_GE(std::numeric_limits<uint64_t>::max(), jittered_back_off.nextBackOffMs());\n  }\n  EXPECT_EQ(std::numeric_limits<uint64_t>::max() - 1, jittered_back_off.nextBackOffMs());\n}\n\nTEST(ExponentialBackOffStrategyTest, JitteredBackOffWithMaxInterval) {\n  NiceMock<Random::MockRandomGenerator> random;\n  ON_CALL(random, random()).WillByDefault(Return(9999));\n\n  JitteredExponentialBackOffStrategy jittered_back_off(5, 100, random);\n  EXPECT_EQ(4, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(9, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(19, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(39, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(79, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(99, jittered_back_off.nextBackOffMs()); // Should return Max here\n  EXPECT_EQ(99, jittered_back_off.nextBackOffMs());\n}\n\nTEST(ExponentialBackOffStrategyTest, JitteredBackOffWithMaxIntervalReset) {\n  NiceMock<Random::MockRandomGenerator> random;\n  ON_CALL(random, random()).WillByDefault(Return(9999));\n\n  JitteredExponentialBackOffStrategy jittered_back_off(5, 100, random);\n  EXPECT_EQ(4, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(9, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(19, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(39, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(79, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(99, jittered_back_off.nextBackOffMs()); // Should return Max here\n  EXPECT_EQ(99, jittered_back_off.nextBackOffMs());\n\n  jittered_back_off.reset();\n  EXPECT_EQ(4, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(9, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(19, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(39, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(79, jittered_back_off.nextBackOffMs());\n  EXPECT_EQ(99, jittered_back_off.nextBackOffMs()); // Should return Max here\n  EXPECT_EQ(99, jittered_back_off.nextBackOffMs());\n}\n\nTEST(LowerBoundBackOffStrategyTest, JitteredBackOffWithLowRandomValue) {\n  NiceMock<Random::MockRandomGenerator> random;\n  ON_CALL(random, random()).WillByDefault(Return(22));\n\n  JitteredLowerBoundBackOffStrategy jittered_lower_bound_back_off(500, random);\n  EXPECT_EQ(522, jittered_lower_bound_back_off.nextBackOffMs());\n}\n\nTEST(LowerBoundBackOffStrategyTest, JitteredBackOffWithHighRandomValue) {\n  NiceMock<Random::MockRandomGenerator> random;\n  ON_CALL(random, random()).WillByDefault(Return(9999));\n\n  JitteredLowerBoundBackOffStrategy jittered_lower_bound_back_off(500, random);\n  EXPECT_EQ(749, jittered_lower_bound_back_off.nextBackOffMs());\n}\n\nTEST(FixedBackOffStrategyTest, FixedBackOffBasicReset) {\n  FixedBackOffStrategy fixed_back_off(30);\n  EXPECT_EQ(30, fixed_back_off.nextBackOffMs());\n  EXPECT_EQ(30, fixed_back_off.nextBackOffMs());\n\n  fixed_back_off.reset();\n  EXPECT_EQ(30, fixed_back_off.nextBackOffMs());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/base64_corpus/singleton",
    "content": "%\n"
  },
  {
    "path": "test/common/common/base64_fuzz_test.cc",
    "content": "#include \"common/common/base64.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  Envoy::Base64::encode(reinterpret_cast<const char*>(buf), len);\n  Envoy::Base64::decode(std::string(reinterpret_cast<const char*>(buf), len));\n  Envoy::Base64Url::encode(reinterpret_cast<const char*>(buf), len);\n  Envoy::Base64Url::decode(std::string(reinterpret_cast<const char*>(buf), len));\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/base64_test.cc",
    "content": "#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/base64.h\"\n\n#include \"test/test_common/printers.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nTEST(Base64Test, EmptyBufferEncode) {\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_EQ(\"\", Base64::encode(buffer, 0));\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(\"\\0\\0\", 2);\n    EXPECT_EQ(\"AAA=\", Base64::encode(buffer, 2));\n  }\n}\n\nTEST(Base64Test, SingleSliceBufferEncode) {\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"foo\", 3);\n  EXPECT_EQ(\"Zm9v\", Base64::encode(buffer, 3));\n  EXPECT_EQ(\"Zm8=\", Base64::encode(buffer, 2));\n}\n\nTEST(Base64Test, EncodeString) {\n  EXPECT_EQ(\"\", Base64::encode(\"\", 0));\n  EXPECT_EQ(\"AAA=\", Base64::encode(\"\\0\\0\", 2));\n  EXPECT_EQ(\"AAA\", Base64::encode(\"\\0\\0\", 2, false));\n  EXPECT_EQ(\"Zm9v\", Base64::encode(\"foo\", 3));\n  EXPECT_EQ(\"Zm8=\", Base64::encode(\"fo\", 2));\n  EXPECT_EQ(\"Zg==\", Base64::encode(\"f\", 1));\n  EXPECT_EQ(\"Zg\", Base64::encode(\"f\", 1, false));\n}\n\nTEST(Base64Test, Decode) {\n  EXPECT_EQ(\"\", Base64::decode(\"\"));\n  EXPECT_EQ(\"foo\", Base64::decode(\"Zm9v\"));\n  EXPECT_EQ(\"fo\", Base64::decode(\"Zm8=\"));\n  EXPECT_EQ(\"f\", Base64::decode(\"Zg==\"));\n  EXPECT_EQ(\"foobar\", Base64::decode(\"Zm9vYmFy\"));\n  EXPECT_EQ(\"foob\", Base64::decode(\"Zm9vYg==\"));\n\n  {\n    const char* test_string = \"\\0\\1\\2\\3\\b\\n\\t\";\n    EXPECT_FALSE(memcmp(test_string, Base64::decode(\"AAECAwgKCQ==\").data(), 7));\n  }\n\n  {\n    const char* test_string = \"\\0\\0\\0\\0als;jkopqitu[\\0opbjlcxnb35g]b[\\xaa\\b\\n\";\n    Buffer::OwnedImpl buffer;\n    buffer.add(test_string, 36);\n    EXPECT_FALSE(memcmp(test_string, Base64::decode(Base64::encode(buffer, 36)).data(), 36));\n  }\n\n  {\n    const char* test_string = \"\\0\\0\\0\\0als;jkopqitu[\\0opbjlcxnb35g]b[\\xaa\\b\\n\";\n    EXPECT_FALSE(memcmp(test_string, Base64::decode(Base64::encode(test_string, 36)).data(), 36));\n  }\n\n  {\n    std::string test_string = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\";\n    std::string decoded = Base64::decode(test_string);\n    Buffer::OwnedImpl buffer(decoded);\n    EXPECT_EQ(test_string, Base64::encode(buffer, decoded.length()));\n  }\n\n  {\n    const char* test_string = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\";\n    std::string decoded = Base64::decode(test_string);\n    EXPECT_EQ(test_string, Base64::encode(decoded.c_str(), decoded.length()));\n  }\n}\n\nTEST(Base64Test, DecodeFailure) {\n  EXPECT_EQ(\"\", Base64::decode(\"==Zg\"));\n  EXPECT_EQ(\"\", Base64::decode(\"=Zm8\"));\n  EXPECT_EQ(\"\", Base64::decode(\"Zm=8\"));\n  EXPECT_EQ(\"\", Base64::decode(\"Zg=A\"));\n  EXPECT_EQ(\"\", Base64::decode(\"Zh==\")); // 011001 100001 <- unused bit at tail\n  EXPECT_EQ(\"\", Base64::decode(\"Zm9=\")); // 011001 100110 111101 <- unused bit at tail\n  EXPECT_EQ(\"\", Base64::decode(\"Zg..\"));\n  EXPECT_EQ(\"\", Base64::decode(\"..Zg\"));\n  EXPECT_EQ(\"\", Base64::decode(\"A===\"));\n  EXPECT_EQ(\"\", Base64::decode(\"====\"));\n  EXPECT_EQ(\"\", Base64::decode(\"123\"));\n}\n\nTEST(Base64Test, DecodeWithoutPadding) {\n  EXPECT_EQ(\"foo\", Base64::decodeWithoutPadding(\"Zm9v\"));\n  EXPECT_EQ(\"fo\", Base64::decodeWithoutPadding(\"Zm8\"));\n  EXPECT_EQ(\"f\", Base64::decodeWithoutPadding(\"Zg\"));\n  EXPECT_EQ(\"foobar\", Base64::decodeWithoutPadding(\"Zm9vYmFy\"));\n  EXPECT_EQ(\"fooba\", Base64::decodeWithoutPadding(\"Zm9vYmE\"));\n  EXPECT_EQ(\"foob\", Base64::decodeWithoutPadding(\"Zm9vYg\"));\n\n  EXPECT_EQ(\"\", Base64::decodeWithoutPadding(\"\"));\n  EXPECT_EQ(\"\", Base64::decodeWithoutPadding(\"=\"));\n  EXPECT_EQ(\"\", Base64::decodeWithoutPadding(\"==\"));\n  EXPECT_EQ(\"\", Base64::decodeWithoutPadding(\"===\"));\n  EXPECT_EQ(\"\", Base64::decodeWithoutPadding(\"====\"));\n\n  EXPECT_EQ(\"f\", Base64::decodeWithoutPadding(\"Zg\"));\n  EXPECT_EQ(\"f\", Base64::decodeWithoutPadding(\"Zg=\"));\n  EXPECT_EQ(\"f\", Base64::decodeWithoutPadding(\"Zg==\"));\n}\n\nTEST(Base64Test, MultiSlicesBufferEncode) {\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"foob\", 4);\n  buffer.add(\"ar\", 2);\n  EXPECT_EQ(\"Zm9vYg==\", Base64::encode(buffer, 4));\n  EXPECT_EQ(\"Zm9vYmE=\", Base64::encode(buffer, 5));\n  EXPECT_EQ(\"Zm9vYmFy\", Base64::encode(buffer, 6));\n  EXPECT_EQ(\"Zm9vYmFy\", Base64::encode(buffer, 7));\n}\n\nTEST(Base64Test, BinaryBufferEncode) {\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"\\0\\1\\2\\3\", 4);\n  buffer.add(\"\\b\\n\\t\", 4);\n  buffer.add(\"\\xaa\\xbc\\xde\", 3);\n  EXPECT_EQ(\"AAECAwgKCQ==\", Base64::encode(buffer, 7));\n  EXPECT_EQ(\"AAECAwgKCQA=\", Base64::encode(buffer, 8));\n  EXPECT_EQ(\"AAECAwgKCQCq\", Base64::encode(buffer, 9));\n  EXPECT_EQ(\"AAECAwgKCQCqvA==\", Base64::encode(buffer, 10));\n  EXPECT_EQ(\"AAECAwgKCQCqvN4=\", Base64::encode(buffer, 30));\n}\n\nTEST(Base64UrlTest, EncodeString) {\n  EXPECT_EQ(\"\", Base64Url::encode(\"\", 0));\n  EXPECT_EQ(\"AAA\", Base64Url::encode(\"\\0\\0\", 2));\n  EXPECT_EQ(\"Zm9v\", Base64Url::encode(\"foo\", 3));\n  EXPECT_EQ(\"Zm8\", Base64Url::encode(\"fo\", 2));\n}\n\nTEST(Base64UrlTest, Decode) {\n  EXPECT_EQ(\"\", Base64Url::decode(\"\"));\n  EXPECT_EQ(\"foo\", Base64Url::decode(\"Zm9v\"));\n  EXPECT_EQ(\"fo\", Base64Url::decode(\"Zm8\"));\n  EXPECT_EQ(\"f\", Base64Url::decode(\"Zg\"));\n  EXPECT_EQ(\"foobar\", Base64Url::decode(\"Zm9vYmFy\"));\n  EXPECT_EQ(\"foob\", Base64Url::decode(\"Zm9vYg\"));\n\n  {\n    const char* test_string = \"\\0\\1\\2\\3\\b\\n\\t\";\n    EXPECT_FALSE(memcmp(test_string, Base64Url::decode(\"AAECAwgKCQ\").data(), 7));\n  }\n\n  {\n    const char* test_string = \"\\0\\0\\0\\0als;jkopqitu[\\0opbjlcxnb35g]b[\\xaa\\b\\n\";\n    EXPECT_FALSE(\n        memcmp(test_string, Base64Url::decode(Base64Url::encode(test_string, 36)).data(), 36));\n  }\n\n  {\n    const char* test_string = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_\";\n    std::string decoded = Base64Url::decode(test_string);\n    EXPECT_EQ(test_string, Base64Url::encode(decoded.c_str(), decoded.length()));\n  }\n\n  {\n    const char* url_test_string =\n        \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_\";\n    const char* test_string = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\";\n    EXPECT_EQ(Base64Url::decode(url_test_string), Base64::decode(test_string));\n  }\n}\n\nTEST(Base64UrlTest, DecodeFailure) {\n  EXPECT_EQ(\"\", Base64Url::decode(\"==Zg\"));\n  EXPECT_EQ(\"\", Base64Url::decode(\"=Zm8\"));\n  EXPECT_EQ(\"\", Base64Url::decode(\"Zm=8\"));\n  EXPECT_EQ(\"\", Base64Url::decode(\"Zg=A\"));\n  EXPECT_EQ(\"\", Base64Url::decode(\"Zh==\")); // 011001 100001 <- unused bit at tail\n  EXPECT_EQ(\"\", Base64Url::decode(\"Zm9=\")); // 011001 100110 111101 <- unused bit at tail\n  EXPECT_EQ(\"\", Base64Url::decode(\"Zg..\"));\n  EXPECT_EQ(\"\", Base64Url::decode(\"..Zg\"));\n  EXPECT_EQ(\"\", Base64Url::decode(\"A===\"));\n  EXPECT_EQ(\"\", Base64Url::decode(\"Zh\"));  // 011001 100001 <- unused bit at tail\n  EXPECT_EQ(\"\", Base64Url::decode(\"Zm9\")); // 011001 100110 111101 <- unused bit at tail\n  EXPECT_EQ(\"\", Base64Url::decode(\"A\"));\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/basic_resource_impl_test.cc",
    "content": "#include <limits>\n\n#include \"common/common/basic_resource_impl.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\n\nclass BasicResourceLimitImplTest : public testing::Test {\nprotected:\n  NiceMock<Runtime::MockLoader> runtime_;\n};\n\nTEST_F(BasicResourceLimitImplTest, NoArgsConstructorVerifyMax) {\n  BasicResourceLimitImpl br;\n\n  EXPECT_EQ(br.max(), std::numeric_limits<uint64_t>::max());\n}\n\nTEST_F(BasicResourceLimitImplTest, VerifySetClearMax) {\n  BasicResourceLimitImpl br(123);\n\n  EXPECT_EQ(br.max(), 123);\n  br.setMax(321);\n  EXPECT_EQ(br.max(), 321);\n  br.resetMax();\n  EXPECT_EQ(br.max(), std::numeric_limits<uint64_t>::max());\n}\n\nTEST_F(BasicResourceLimitImplTest, IncDecCount) {\n  BasicResourceLimitImpl br;\n\n  EXPECT_EQ(br.count(), 0);\n  br.inc();\n  EXPECT_EQ(br.count(), 1);\n  br.inc();\n  br.inc();\n  EXPECT_EQ(br.count(), 3);\n  br.dec();\n  EXPECT_EQ(br.count(), 2);\n  br.decBy(2);\n  EXPECT_EQ(br.count(), 0);\n}\n\nTEST_F(BasicResourceLimitImplTest, CanCreate) {\n  BasicResourceLimitImpl br(2);\n\n  EXPECT_TRUE(br.canCreate());\n  br.inc();\n  EXPECT_TRUE(br.canCreate());\n  br.inc();\n  EXPECT_FALSE(br.canCreate());\n  br.dec();\n  EXPECT_TRUE(br.canCreate());\n  br.dec();\n}\n\nTEST_F(BasicResourceLimitImplTest, RuntimeMods) {\n  BasicResourceLimitImpl br(1337, runtime_, \"trololo\");\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"trololo\", 1337)).WillOnce(Return(555));\n  EXPECT_EQ(br.max(), 555);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"trololo\", 1337)).WillOnce(Return(1337));\n  EXPECT_EQ(br.max(), 1337);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/callback_impl_test.cc",
    "content": "#include \"common/common/callback_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::InSequence;\n\nnamespace Envoy {\nnamespace Common {\n\nclass CallbackManagerTest : public testing::Test {\npublic:\n  MOCK_METHOD(void, called, (int arg));\n};\n\nTEST_F(CallbackManagerTest, All) {\n  InSequence s;\n\n  CallbackManager<int> manager;\n  CallbackHandle* handle1 = manager.add([this](int arg) -> void { called(arg); });\n  manager.add([this](int arg) -> void { called(arg * 2); });\n\n  EXPECT_CALL(*this, called(5));\n  EXPECT_CALL(*this, called(10));\n  manager.runCallbacks(5);\n\n  handle1->remove();\n  EXPECT_CALL(*this, called(10));\n  manager.runCallbacks(5);\n\n  EXPECT_CALL(*this, called(10));\n  EXPECT_CALL(*this, called(20));\n  CallbackHandle* handle3 = manager.add([this, &handle3](int arg) -> void {\n    called(arg * 4);\n    handle3->remove();\n  });\n  manager.runCallbacks(5);\n\n  EXPECT_CALL(*this, called(10));\n  manager.runCallbacks(5);\n}\n\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/cleanup_test.cc",
    "content": "#include \"common/common/cleanup.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(CleanupTest, ScopeExitCallback) {\n  bool callback_fired = false;\n  {\n    Cleanup cleanup([&callback_fired] { callback_fired = true; });\n    EXPECT_FALSE(callback_fired);\n  }\n  EXPECT_TRUE(callback_fired);\n}\n\nTEST(CleanupTest, Cancel) {\n  bool callback_fired = false;\n  {\n    Cleanup cleanup([&callback_fired] { callback_fired = true; });\n    EXPECT_FALSE(cleanup.cancelled());\n    cleanup.cancel();\n    EXPECT_FALSE(callback_fired);\n    EXPECT_TRUE(cleanup.cancelled());\n  }\n  EXPECT_FALSE(callback_fired);\n}\n\nTEST(RaiiListElementTest, DeleteOnDestruction) {\n  std::list<int> l;\n\n  {\n    EXPECT_EQ(l.size(), 0);\n    RaiiListElement<int> rle(l, 1);\n    EXPECT_EQ(l.size(), 1);\n  }\n  EXPECT_EQ(l.size(), 0);\n}\n\nTEST(RaiiListElementTest, CancelDelete) {\n  std::list<int> l;\n\n  {\n    EXPECT_EQ(l.size(), 0);\n    RaiiListElement<int> rle(l, 1);\n    EXPECT_EQ(l.size(), 1);\n    rle.cancel();\n  }\n  EXPECT_EQ(l.size(), 1);\n}\n\nTEST(RaiiListElementTest, DeleteOnErase) {\n  std::list<int> l;\n\n  {\n    EXPECT_EQ(l.size(), 0);\n    RaiiListElement<int> rle(l, 1);\n    rle.erase();\n    EXPECT_EQ(l.size(), 0);\n  }\n  EXPECT_EQ(l.size(), 0);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/fmt_test.cc",
    "content": "#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(FormatHelpersTest, Format) {\n  absl::string_view sv = \"This is my string.\";\n  absl::string_view my_string = sv.substr(8, 9);\n  absl::string_view is = sv.substr(5, 2);\n\n  EXPECT_EQ(\"it's my string!\", fmt::format(\"it's {}!\", my_string));\n  EXPECT_EQ(\"it's my string!\", fmt::format(\"it's {:s}!\", my_string));\n  EXPECT_EQ(\"**is**\", fmt::format(\"{:*^6}\", is));\n}\n\nTEST(FormatHelpersTest, FormatLogMessages) {\n  absl::string_view sv = \"formatted\";\n  ENVOY_LOG_MISC(info, \"fake {} message\", sv);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/hash_corpus/example",
    "content": "hello world"
  },
  {
    "path": "test/common/common/hash_fuzz_test.cc",
    "content": "#include \"common/common/hash.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\nnamespace {\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  const std::string input(reinterpret_cast<const char*>(buf), len);\n  { HashUtil::xxHash64(input); }\n  { HashUtil::djb2CaseInsensitiveHash(input); }\n  { MurmurHash::murmurHash2(input); }\n  if (len > 0) {\n    // Split the input string into two parts to make a key-value pair.\n    const size_t split_point = *reinterpret_cast<const uint8_t*>(buf) % len;\n    const std::string key = input.substr(0, split_point);\n    const std::string value = input.substr(split_point);\n    StringMap<std::string> map;\n    map[key] = value;\n    map.find(key);\n  }\n}\n\n} // namespace\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/hash_test.cc",
    "content": "#include \"common/common/hash.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nTEST(Hash, xxHash) {\n  EXPECT_EQ(3728699739546630719U, HashUtil::xxHash64(\"foo\"));\n  EXPECT_EQ(5234164152756840025U, HashUtil::xxHash64(\"bar\"));\n  EXPECT_EQ(8917841378505826757U, HashUtil::xxHash64(\"foo\\nbar\"));\n  EXPECT_EQ(4400747396090729504U, HashUtil::xxHash64(\"lyft\"));\n  EXPECT_EQ(17241709254077376921U, HashUtil::xxHash64(\"\"));\n}\n\nTEST(Hash, djb2CaseInsensitiveHash) {\n  EXPECT_EQ(211616621U, HashUtil::djb2CaseInsensitiveHash(\"foo\"));\n  EXPECT_EQ(211611524U, HashUtil::djb2CaseInsensitiveHash(\"bar\"));\n  EXPECT_EQ(282790909350396U, HashUtil::djb2CaseInsensitiveHash(\"foo\\nbar\"));\n  EXPECT_EQ(7195212308U, HashUtil::djb2CaseInsensitiveHash(\"lyft\"));\n  EXPECT_EQ(5381U, HashUtil::djb2CaseInsensitiveHash(\"\"));\n}\n\nTEST(Hash, murmurHash2) {\n  EXPECT_EQ(9631199822919835226U, MurmurHash::murmurHash2(\"foo\"));\n  EXPECT_EQ(11474628671133349555U, MurmurHash::murmurHash2(\"bar\"));\n  EXPECT_EQ(16306510975912980159U, MurmurHash::murmurHash2(\"foo\\nbar\"));\n  EXPECT_EQ(12847078931730529320U, MurmurHash::murmurHash2(\"lyft\"));\n  EXPECT_EQ(6142509188972423790U, MurmurHash::murmurHash2(\"\"));\n}\n\n#if __GLIBCXX__ >= 20130411 && __GLIBCXX__ <= 20180726\nTEST(Hash, stdhash) {\n  EXPECT_EQ(std::hash<std::string>()(std::string(\"foo\")), MurmurHash::murmurHash2(\"foo\"));\n  EXPECT_EQ(std::hash<std::string>()(std::string(\"bar\")), MurmurHash::murmurHash2(\"bar\"));\n  EXPECT_EQ(std::hash<std::string>()(std::string(\"foo\\nbar\")), MurmurHash::murmurHash2(\"foo\\nbar\"));\n  EXPECT_EQ(std::hash<std::string>()(std::string(\"lyft\")), MurmurHash::murmurHash2(\"lyft\"));\n  EXPECT_EQ(std::hash<std::string>()(std::string(\"\")), MurmurHash::murmurHash2(\"\"));\n}\n#endif\n\nTEST(Hash, sharedStringSet) {\n  SharedStringSet set;\n  auto foo = std::make_shared<std::string>(\"foo\");\n  set.insert(foo);\n  auto pos = set.find(\"foo\");\n  EXPECT_EQ(pos->get(), foo.get());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/hex_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/hex.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nTEST(Hex, SimpleEncode) {\n  std::vector<uint8_t> bytes = {0x01, 0x02, 0x03, 0x0a, 0x0b, 0x0c};\n  EXPECT_EQ(\"0102030a0b0c\", Hex::encode(bytes));\n}\n\nTEST(Hex, RoundTrip) {\n  std::vector<uint8_t> bytes;\n  for (uint8_t i = 0; i < UINT8_MAX; i++) {\n    bytes.push_back(i);\n  }\n\n  std::string hex = Hex::encode(bytes);\n  std::vector<uint8_t> decoded = Hex::decode(hex);\n\n  EXPECT_EQ(bytes, decoded);\n}\n\nTEST(Hex, BadHex) { EXPECT_EQ(0, Hex::decode(\"abcde\").size()); }\n\nTEST(Hex, DecodeUppercase) { EXPECT_EQ(4, Hex::decode(\"ABCDEFAB\").size()); }\n\nTEST(Hex, UIntToHex) {\n  std::string base16_string = Hex::uint64ToHex(2722130815203937912ULL);\n  EXPECT_EQ(\"25c6f38dd0600e78\", base16_string);\n  EXPECT_EQ(\"0000000000000000\", Hex::uint64ToHex(0ULL));\n}\n\nTEST(Hex, UInt32ToHex) {\n  EXPECT_EQ(\"00000000\", Hex::uint32ToHex(0));\n  EXPECT_EQ(\"ffffffff\", Hex::uint32ToHex(-1));\n  EXPECT_EQ(\"deadbeef\", Hex::uint32ToHex(3735928559));\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/linked_object_test.cc",
    "content": "#include \"common/common/linked_object.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass TestObject : public LinkedObject<TestObject> {\npublic:\n  TestObject() = default;\n};\n\nTEST(LinkedObjectTest, MoveIntoListFront) {\n  std::list<std::unique_ptr<TestObject>> list;\n  auto object = std::make_unique<TestObject>();\n  TestObject* object_ptr = object.get();\n  LinkedList::moveIntoList(std::move(object), list);\n  ASSERT_EQ(1, list.size());\n  ASSERT_EQ(object_ptr, list.front().get());\n\n  auto object2 = std::make_unique<TestObject>();\n  TestObject* object2_ptr = object2.get();\n  LinkedList::moveIntoList(std::move(object2), list);\n  ASSERT_EQ(2, list.size());\n  ASSERT_EQ(object2_ptr, list.front().get());\n  ASSERT_EQ(object_ptr, list.back().get());\n}\n\nTEST(LinkedObjectTest, MoveIntoListBack) {\n  std::list<std::unique_ptr<TestObject>> list;\n  std::unique_ptr<TestObject> object = std::make_unique<TestObject>();\n  TestObject* object_ptr = object.get();\n  LinkedList::moveIntoListBack(std::move(object), list);\n  ASSERT_EQ(1, list.size());\n  ASSERT_EQ(object_ptr, list.front().get());\n\n  auto object2 = std::make_unique<TestObject>();\n  TestObject* object2_ptr = object2.get();\n  LinkedList::moveIntoListBack(std::move(object2), list);\n  ASSERT_EQ(2, list.size());\n  ASSERT_EQ(object2_ptr, list.back().get());\n  ASSERT_EQ(object_ptr, list.front().get());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/lock_guard_test.cc",
    "content": "#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Thread {\nnamespace {\n\nclass LockGuardTest : public testing::Test {\nprotected:\n  LockGuardTest() = default;\n  int a_ ABSL_GUARDED_BY(a_mutex_){0};\n  MutexBasicLockable a_mutex_;\n  int b_{0};\n};\n\nTEST_F(LockGuardTest, TestLockGuard) {\n  LockGuard lock(a_mutex_);\n  EXPECT_EQ(1, ++a_);\n}\n\nTEST_F(LockGuardTest, TestOptionalLockGuard) {\n  OptionalLockGuard lock(nullptr);\n  EXPECT_EQ(1, ++b_);\n}\n\nTEST_F(LockGuardTest, TestReleasableLockGuard) {\n  ReleasableLockGuard lock(a_mutex_);\n  EXPECT_EQ(1, ++a_);\n  lock.release();\n}\n\nTEST_F(LockGuardTest, TestTryLockGuard) {\n  TryLockGuard lock(a_mutex_);\n\n  if (lock.tryLock()) {\n    // This test doesn't work, because a_mutex_ is guarded, and thread\n    // annotations don't work with TryLockGuard. The macro is defined in\n    // include/envoy/thread/thread.h.\n    DISABLE_TRYLOCKGUARD_ANNOTATION(EXPECT_EQ(1, ++a_));\n\n    // TryLockGuard does functionally work with unguarded variables.\n    EXPECT_EQ(1, ++b_);\n  }\n}\n\n} // namespace\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/log_macros_test.cc",
    "content": "#include <chrono>\n#include <functional>\n#include <iostream>\n#include <string>\n#include <thread>\n\n#include \"common/common/fancy_logger.h\"\n#include \"common/common/logger.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/logging.h\"\n\n#include \"absl/synchronization/barrier.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nusing namespace std::chrono_literals;\n\nclass TestFilterLog : public Logger::Loggable<Logger::Id::filter> {\npublic:\n  void logMessage() {\n    ENVOY_LOG(trace, \"fake message\");\n    ENVOY_LOG(debug, \"fake message\");\n    ENVOY_LOG(warn, \"fake message\");\n    ENVOY_LOG(error, \"fake message\");\n    ENVOY_LOG(critical, \"fake message\");\n    ENVOY_CONN_LOG(info, \"fake message\", connection_);\n    ENVOY_STREAM_LOG(info, \"fake message\", stream_);\n    ENVOY_CONN_LOG(error, \"fake error\", connection_);\n    ENVOY_STREAM_LOG(error, \"fake error\", stream_);\n  }\n\n  void logMessageEscapeSequences() { ENVOY_LOG_MISC(info, \"line 1 \\n line 2 \\t tab \\\\r test\"); }\n\nprivate:\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> stream_;\n};\n\nTEST(Logger, All) {\n  // This test exists just to ensure all macros compile and run with the expected arguments provided\n\n  TestFilterLog filter;\n  filter.logMessage();\n\n  // Misc logging with no facility.\n  ENVOY_LOG_MISC(info, \"fake message\");\n}\n\nTEST(Logger, EvaluateParams) {\n  uint32_t i = 1;\n\n  // Set logger's level to low level.\n  // Log message with higher severity and make sure that params were evaluated.\n  LogLevelSetter save_levels(spdlog::level::info);\n  ENVOY_LOG_MISC(warn, \"test message '{}'\", i++);\n\n  EXPECT_THAT(i, testing::Eq(2));\n}\n\nTEST(Logger, DoNotEvaluateParams) {\n  uint32_t i = 1;\n\n  // Set logger's logging level high and log a message with lower severity\n  // params should not be evaluated.\n  LogLevelSetter save_levels(spdlog::level::critical);\n  ENVOY_LOG_MISC(error, \"test message '{}'\", i++);\n  EXPECT_THAT(i, testing::Eq(1));\n}\n\nTEST(Logger, LogAsStatement) {\n  // Just log as part of if ... statement\n  uint32_t i = 1, j = 1;\n\n  // Set logger's logging level to high\n  LogLevelSetter save_levels(spdlog::level::critical);\n\n  // Make sure that if statement inside of LOGGER macro does not catch trailing\n  // else ....\n  if (true) // NOLINT(readability-braces-around-statements)\n    ENVOY_LOG_MISC(warn, \"test message 1 '{}'\", i++);\n  else // NOLINT(readability-braces-around-statements)\n    ENVOY_LOG_MISC(critical, \"test message 2 '{}'\", j++);\n\n  EXPECT_THAT(i, testing::Eq(1));\n  EXPECT_THAT(j, testing::Eq(1));\n\n  // Do the same with curly brackets\n  if (true) {\n    ENVOY_LOG_MISC(warn, \"test message 3 '{}'\", i++);\n  } else {\n    ENVOY_LOG_MISC(critical, \"test message 4 '{}'\", j++);\n  }\n\n  EXPECT_THAT(i, testing::Eq(1));\n  EXPECT_THAT(j, testing::Eq(1));\n}\n\nTEST(Logger, CheckLoggerLevel) {\n  class LogTestClass : public Logger::Loggable<Logger::Id::misc> {\n  public:\n    void setLevel(const spdlog::level::level_enum level) { ENVOY_LOGGER().set_level(level); }\n    uint32_t executeAtTraceLevel() {\n      if (ENVOY_LOG_CHECK_LEVEL(trace)) {\n        //  Logger's level was at least trace\n        return 1;\n      } else {\n        // Logger's level was higher than trace\n        return 2;\n      };\n    }\n  };\n\n  LogTestClass test_obj;\n\n  // Set Loggers severity low\n  test_obj.setLevel(spdlog::level::trace);\n  EXPECT_THAT(test_obj.executeAtTraceLevel(), testing::Eq(1));\n\n  test_obj.setLevel(spdlog::level::info);\n  EXPECT_THAT(test_obj.executeAtTraceLevel(), testing::Eq(2));\n}\n\nvoid spamCall(std::function<void()>&& call_to_spam, const uint32_t num_threads) {\n  std::vector<std::thread> threads(num_threads);\n  auto barrier = std::make_unique<absl::Barrier>(num_threads);\n\n  for (auto& thread : threads) {\n    thread = std::thread([&call_to_spam, &barrier] {\n      // Allow threads to accrue, to maximize concurrency on the call we are testing.\n      if (barrier->Block()) {\n        barrier.reset();\n      }\n      call_to_spam();\n    });\n  }\n  for (std::thread& thread : threads) {\n    thread.join();\n  }\n}\n\nclass SparseLogMacrosTest : public testing::TestWithParam<bool>,\n                            public Logger::Loggable<Logger::Id::filter> {\npublic:\n  SparseLogMacrosTest() : use_misc_macros_(GetParam()) { evaluations() = 0; }\n\n  void logSomething() {\n    if (use_misc_macros_) {\n      ENVOY_LOG_ONCE_MISC(error, \"foo1 '{}'\", evaluations()++);\n    } else {\n      ENVOY_LOG_ONCE(error, \"foo1 '{}'\", evaluations()++);\n    }\n  }\n\n  void logSomethingElse() {\n    if (use_misc_macros_) {\n      ENVOY_LOG_ONCE_MISC(error, \"foo2 '{}'\", evaluations()++);\n    } else {\n      ENVOY_LOG_ONCE(error, \"foo2 '{}'\", evaluations()++);\n    }\n  }\n\n  void logSomethingBelowLogLevelOnce() {\n    if (use_misc_macros_) {\n      ENVOY_LOG_ONCE_MISC(debug, \"foo3 '{}'\", evaluations()++);\n    } else {\n      ENVOY_LOG_ONCE(debug, \"foo3 '{}'\", evaluations()++);\n    }\n  }\n\n  void logSomethingThrice() {\n    if (use_misc_macros_) {\n      ENVOY_LOG_FIRST_N_MISC(error, 3, \"foo4 '{}'\", evaluations()++);\n    } else {\n      ENVOY_LOG_FIRST_N(error, 3, \"foo4 '{}'\", evaluations()++);\n    }\n  }\n\n  void logEverySeventh() {\n    if (use_misc_macros_) {\n      ENVOY_LOG_EVERY_NTH_MISC(error, 7, \"foo5 '{}'\", evaluations()++);\n    } else {\n      ENVOY_LOG_EVERY_NTH(error, 7, \"foo5 '{}'\", evaluations()++);\n    }\n  }\n\n  void logEveryPow2() {\n    if (use_misc_macros_) {\n      ENVOY_LOG_EVERY_POW_2_MISC(error, \"foo6 '{}'\", evaluations()++);\n    } else {\n      ENVOY_LOG_EVERY_POW_2(error, \"foo6 '{}'\", evaluations()++);\n    }\n  }\n\n  void logEverySecond() {\n    if (use_misc_macros_) {\n      ENVOY_LOG_PERIODIC_MISC(error, 1s, \"foo7 '{}'\", evaluations()++);\n    } else {\n      ENVOY_LOG_PERIODIC(error, 1s, \"foo7 '{}'\", evaluations()++);\n    }\n  }\n  std::atomic<int32_t>& evaluations() { MUTABLE_CONSTRUCT_ON_FIRST_USE(std::atomic<int32_t>); };\n\n  const bool use_misc_macros_;\n  LogLevelSetter save_levels_{spdlog::level::info};\n};\n\nINSTANTIATE_TEST_SUITE_P(MiscOrNot, SparseLogMacrosTest, testing::Values(false, true));\n\nTEST_P(SparseLogMacrosTest, All) {\n  constexpr uint32_t kNumThreads = 100;\n  spamCall(\n      [this]() {\n        logSomething();\n        logSomething();\n      },\n      kNumThreads);\n  EXPECT_EQ(1, evaluations());\n  spamCall(\n      [this]() {\n        logSomethingElse();\n        logSomethingElse();\n      },\n      kNumThreads);\n  // Two distinct log lines ought to result in two evaluations, and no more.\n  EXPECT_EQ(2, evaluations());\n\n  spamCall([this]() { logSomethingThrice(); }, kNumThreads);\n  // Single log line should be emitted 3 times.\n  EXPECT_EQ(5, evaluations());\n\n  spamCall([this]() { logEverySeventh(); }, kNumThreads);\n  // (100 threads / log every 7th) + 1s = 15 more evaluations upon logging very 7th.\n  EXPECT_EQ(20, evaluations());\n\n  logEveryPow2();\n  // First call ought to propagate.\n  EXPECT_EQ(21, evaluations());\n\n  spamCall([this]() { logEveryPow2(); }, kNumThreads);\n  // 64 is the highest power of two that fits when kNumThreads == 100.\n  // We should log on 2, 4, 8, 16, 32, 64, which means we can expect to add 6 more evaluations.\n  EXPECT_EQ(27, evaluations());\n\n  spamCall([this]() { logEverySecond(); }, kNumThreads);\n  // First call ought to evaluate.\n  EXPECT_EQ(28, evaluations());\n\n  // We expect one log entry / second. Therefore each spamCall ought to result in one\n  // more evaluation. This depends on real time and not sim time, hopefully 1 second\n  // is enough to not introduce flakes in practice.\n  std::this_thread::sleep_for(1s); // NOLINT\n  spamCall([this]() { logEverySecond(); }, kNumThreads);\n  EXPECT_EQ(29, evaluations());\n\n  spamCall([this]() { logSomethingBelowLogLevelOnce(); }, kNumThreads);\n  // We shouldn't observe additional argument evaluations for log lines below the configured\n  // log level.\n  EXPECT_EQ(29, evaluations());\n}\n\nTEST(RegistryTest, LoggerWithName) {\n  EXPECT_EQ(nullptr, Logger::Registry::logger(\"blah\"));\n  EXPECT_EQ(\"upstream\", Logger::Registry::logger(\"upstream\")->name());\n}\n\nclass FormatTest : public testing::Test {\npublic:\n  static void logMessageEscapeSequences() {\n    ENVOY_LOG_MISC(info, \"line 1 \\n line 2 \\t tab \\\\r test\");\n  }\n};\n\nTEST_F(FormatTest, OutputUnescaped) {\n  const Envoy::ExpectedLogMessages message{{\"info\", \"line 1 \\n line 2 \\t tab \\\\r test\"}};\n  EXPECT_LOG_CONTAINS_ALL_OF(message, logMessageEscapeSequences());\n}\n\nTEST_F(FormatTest, OutputEscaped) {\n  // Note this uses a raw string literal\n  const Envoy::ExpectedLogMessages message{{\"info\", R\"(line 1 \\n line 2 \\t tab \\\\r test)\"}};\n  EXPECT_LOG_CONTAINS_ALL_OF_ESCAPED(message, logMessageEscapeSequences());\n}\n\n/**\n * Test for Fancy Logger convenient macros.\n */\nTEST(Fancy, Global) {\n  FANCY_LOG(info, \"Hello world! Here's a line of fancy log!\");\n  FANCY_LOG(error, \"Fancy Error! Here's the second message!\");\n\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> stream_;\n  FANCY_CONN_LOG(warn, \"Fake info {} of connection\", connection_, 1);\n  FANCY_STREAM_LOG(warn, \"Fake warning {} of stream\", stream_, 1);\n\n  FANCY_LOG(critical, \"Critical message for later flush.\");\n  FANCY_FLUSH_LOG();\n}\n\nTEST(Fancy, FastPath) {\n  getFancyContext().setFancyLogger(__FILE__, spdlog::level::info);\n  for (int i = 0; i < 10; i++) {\n    FANCY_LOG(warn, \"Fake warning No. {}\", i);\n  }\n}\n\nTEST(Fancy, SetLevel) {\n  const char* file = \"P=NP_file\";\n  bool res = getFancyContext().setFancyLogger(file, spdlog::level::trace);\n  EXPECT_EQ(res, false);\n  SpdLoggerSharedPtr p = getFancyContext().getFancyLogEntry(file);\n  EXPECT_EQ(p, nullptr);\n\n  res = getFancyContext().setFancyLogger(__FILE__, spdlog::level::err);\n  EXPECT_EQ(res, true);\n  FANCY_LOG(error, \"Fancy Error! Here's a test for level.\");\n  FANCY_LOG(warn, \"Warning: you shouldn't see this message!\");\n  p = getFancyContext().getFancyLogEntry(__FILE__);\n  EXPECT_NE(p, nullptr);\n  EXPECT_EQ(p->level(), spdlog::level::err);\n\n  getFancyContext().setAllFancyLoggers(spdlog::level::info);\n  FANCY_LOG(info, \"Info: all loggers back to info.\");\n  FANCY_LOG(debug, \"Debug: you shouldn't see this message!\");\n  EXPECT_EQ(getFancyContext().getFancyLogEntry(__FILE__)->level(), spdlog::level::info);\n}\n\nTEST(Fancy, Iteration) {\n  FANCY_LOG(info, \"Info: iteration test begins.\");\n  getFancyContext().setAllFancyLoggers(spdlog::level::info);\n  std::string output = getFancyContext().listFancyLoggers();\n  EXPECT_EQ(output, \"   test/common/common/log_macros_test.cc: 2\\n\");\n  std::string log_format = \"[%T.%e][%t][%l][%n] %v\";\n  getFancyContext().setFancyLogger(__FILE__, spdlog::level::err);\n  // setDefaultFancyLevelFormat relies on previous default and might cause error online\n  // getFancyContext().setDefaultFancyLevelFormat(spdlog::level::warn, log_format);\n  FANCY_LOG(warn, \"Warning: now level is warning, format changed (Date removed).\");\n  FANCY_LOG(warn, getFancyContext().listFancyLoggers());\n  // EXPECT_EQ(getFancyContext().getFancyLogEntry(__FILE__)->level(),\n  //           spdlog::level::warn); // note fancy_default_level isn't changed\n}\n\nTEST(Fancy, Context) {\n  FANCY_LOG(info, \"Info: context API needs test.\");\n  bool enable_fine_grain_logging = Logger::Context::useFancyLogger();\n  printf(\" --> If use fancy logger: %d\\n\", enable_fine_grain_logging);\n  if (enable_fine_grain_logging) {\n    FANCY_LOG(critical, \"Cmd option set: all previous Envoy Log should be converted now!\");\n  }\n  Logger::Context::enableFancyLogger();\n  EXPECT_EQ(Logger::Context::useFancyLogger(), true);\n  EXPECT_EQ(Logger::Context::getFancyLogFormat(), \"[%Y-%m-%d %T.%e][%t][%l] [%g:%#] %v\");\n  // EXPECT_EQ(Logger::Context::getFancyDefaultLevel(),\n  //           spdlog::level::err); // default is error in test environment\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/logger_corpus/test",
    "content": "hello world\nrandom;string::::\\np\nhello \\r\\t\\n world \\n\nworld \\r\\r\\r\\ \\\\\\\\\\\\ \\r test   \\r\\n\n\\t\\t\\t\\t\\t\\t\n\\n\\n\\n\\n\\n\\n\n\\ \\ \\ \\ \\ \\ \\\n%&*@($&*#\\r\\r\\n\nno_crashes\n}{}{}{}\\r\\n"
  },
  {
    "path": "test/common/common/logger_fuzz_test.cc",
    "content": "#include \"common/common/logger.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  Logger::DelegatingLogSink::escapeLogLine(\n      absl::string_view(reinterpret_cast<const char*>(buf), len));\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/logger_speed_test.cc",
    "content": "#include <iostream>\n#include <string>\n\n#include \"common/common/fancy_logger.h\"\n#include \"common/common/logger.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\n\n/**\n * Benchmark for the main slow path, i.e. new logger creation here.\n */\nstatic void fancySlowPath(benchmark::State& state) {\n  FANCY_LOG(info, \"Slow path test begins.\");\n  std::atomic<spdlog::logger*> logger;\n  for (auto _ : state) {\n    UNREFERENCED_PARAMETER(_);\n    for (int i = 0; i < state.range(0); i++) {\n      std::string key = \"k\" + std::to_string(i + (state.thread_index << 8));\n      getFancyContext().initFancyLogger(key, logger);\n    }\n  }\n}\n\n#define FL FANCY_LOG(trace, \"Default\")\n#define FL_8                                                                                       \\\n  FL;                                                                                              \\\n  FL;                                                                                              \\\n  FL;                                                                                              \\\n  FL;                                                                                              \\\n  FL;                                                                                              \\\n  FL;                                                                                              \\\n  FL;                                                                                              \\\n  FL;\n#define FL_64                                                                                      \\\n  { FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 }\n#define FL_512                                                                                     \\\n  { FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 }\n#define FL_1024                                                                                    \\\n  { FL_512 FL_512 }\n\n/**\n * Benchmark for medium path, i.e. new site initialization within the same file.\n */\nstatic void fancyMediumPath(benchmark::State& state) {\n  FANCY_LOG(info, \"Medium path test begins.\");\n  for (auto _ : state) {\n    UNREFERENCED_PARAMETER(_);\n    // create different call sites for medium path\n    for (int i = 0; i < state.range(0); i++) {\n      FL_1024\n    }\n  }\n}\n\n/**\n * Benchmark for fast path, i.e. integration test of common scenario.\n */\nstatic void fancyFastPath(benchmark::State& state) {\n  // control log length to be the same as normal Envoy below\n  std::string msg(100 - strlen(__FILE__) + 4, '.');\n  spdlog::level::level_enum lv = state.range(1) ? spdlog::level::trace : spdlog::level::info;\n  getFancyContext().setFancyLogger(FANCY_KEY, lv);\n  for (auto _ : state) {\n    UNREFERENCED_PARAMETER(_);\n    for (int i = 0; i < state.range(0); i++) {\n      FANCY_LOG(trace, \"Fast path: {}\", msg);\n    }\n  }\n}\n\n/**\n * Benchmark for ENVOY_LOG to compare.\n */\nstatic void envoyNormal(benchmark::State& state) {\n  spdlog::level::level_enum lv = state.range(1) ? spdlog::level::trace : spdlog::level::info;\n  std::string msg(100, '.');\n  GET_MISC_LOGGER().set_level(lv);\n  for (auto _ : state) {\n    UNREFERENCED_PARAMETER(_);\n    for (int i = 0; i < state.range(0); i++) {\n      ENVOY_LOG_MISC(trace, \"Fast path: {}\", msg);\n    }\n  }\n}\n\n/**\n * Benchmark for a large number of level setting.\n */\nstatic void fancyLevelSetting(benchmark::State& state) {\n  FANCY_LOG(info, \"Level setting test begins.\");\n  for (auto _ : state) {\n    UNREFERENCED_PARAMETER(_);\n    for (int i = 0; i < state.range(0); i++) {\n      getFancyContext().setFancyLogger(__FILE__, spdlog::level::warn);\n    }\n  }\n}\n\n/**\n * Comparison with Envoy's level setting.\n */\nstatic void envoyLevelSetting(benchmark::State& state) {\n  ENVOY_LOG_MISC(info, \"Envoy's level setting begins.\");\n  for (auto _ : state) {\n    UNREFERENCED_PARAMETER(_);\n    for (int i = 0; i < state.range(0); i++) {\n      GET_MISC_LOGGER().set_level(spdlog::level::warn);\n    }\n  }\n}\n\n/**\n * Benchmarks in detail starts.\n */\nBENCHMARK(fancySlowPath)->Arg(1 << 10);\nBENCHMARK(fancySlowPath)->Arg(1 << 10)->Threads(20)->MeasureProcessCPUTime();\nBENCHMARK(fancySlowPath)->Arg(1 << 10)->Threads(200)->MeasureProcessCPUTime();\n\nBENCHMARK(fancyMediumPath)->Arg(1)->Iterations(1);\n// Seems medium path's concurrency test doesn't make sense (hard to do as well)\n\nBENCHMARK(fancyFastPath)->Args({1024, 0})->Args({1024, 1}); // First no actual log, then log\nBENCHMARK(fancyFastPath)->Args({1 << 10, 0})->Threads(20)->MeasureProcessCPUTime();\nBENCHMARK(fancyFastPath)->Args({1 << 10, 1})->Threads(20)->MeasureProcessCPUTime();\nBENCHMARK(fancyFastPath)->Args({1 << 10, 0})->Threads(200)->MeasureProcessCPUTime();\nBENCHMARK(fancyFastPath)->Args({1 << 10, 1})->Threads(200)->MeasureProcessCPUTime();\n\nBENCHMARK(envoyNormal)->Args({1024, 0})->Args({1024, 1});\nBENCHMARK(envoyNormal)->Args({1 << 10, 0})->Threads(20)->MeasureProcessCPUTime();\nBENCHMARK(envoyNormal)->Args({1 << 10, 1})->Threads(20)->MeasureProcessCPUTime();\nBENCHMARK(envoyNormal)->Args({1 << 10, 0})->Threads(200)->MeasureProcessCPUTime();\nBENCHMARK(envoyNormal)->Args({1 << 10, 1})->Threads(200)->MeasureProcessCPUTime();\n\nBENCHMARK(fancyLevelSetting)->Arg(1 << 10);\nBENCHMARK(envoyLevelSetting)->Arg(1 << 10);\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/logger_test.cc",
    "content": "#include <string>\n\n#include \"common/common/logger.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Logger {\n\nTEST(LoggerTest, StackingStderrSinkDelegate) {\n  StderrSinkDelegate stacked(Envoy::Logger::Registry::getSink());\n}\n\nTEST(LoggerEscapeTest, LinuxEOL) {\n  EXPECT_EQ(\"line 1 \\\\n line 2\\n\", DelegatingLogSink::escapeLogLine(\"line 1 \\n line 2\\n\"));\n}\n\nTEST(LoggerEscapeTest, WindowEOL) {\n  EXPECT_EQ(\"line 1 \\\\n line 2\\r\\n\", DelegatingLogSink::escapeLogLine(\"line 1 \\n line 2\\r\\n\"));\n}\n\nTEST(LoggerEscapeTest, NoTrailingWhitespace) {\n  EXPECT_EQ(\"line 1 \\\\n line 2\", DelegatingLogSink::escapeLogLine(\"line 1 \\n line 2\"));\n}\n\nTEST(LoggerEscapeTest, NoWhitespace) {\n  EXPECT_EQ(\"line1\", DelegatingLogSink::escapeLogLine(\"line1\"));\n}\n\nTEST(LoggerEscapeTest, AnyTrailingWhitespace) {\n  EXPECT_EQ(\"line 1 \\\\t tab 1 \\\\n line 2\\t\\n\",\n            DelegatingLogSink::escapeLogLine(\"line 1 \\t tab 1 \\n line 2\\t\\n\"));\n}\n\nTEST(LoggerEscapeTest, WhitespaceOnly) {\n  // 8 spaces\n  EXPECT_EQ(\"        \", DelegatingLogSink::escapeLogLine(\"        \"));\n\n  // Any whitespace characters\n  EXPECT_EQ(\"\\r\\n\\t \\r\\n \\n\", DelegatingLogSink::escapeLogLine(\"\\r\\n\\t \\r\\n \\n\"));\n}\n\nTEST(LoggerEscapeTest, Empty) { EXPECT_EQ(\"\", DelegatingLogSink::escapeLogLine(\"\")); }\n\n} // namespace Logger\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/matchers_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/type/matcher/v3/metadata.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n#include \"envoy/type/matcher/v3/value.pb.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/config/metadata.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Matcher {\nnamespace {\n\nTEST(MetadataTest, MatchNullValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_null_value(ProtobufWkt::NullValue::NULL_VALUE);\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_null_match();\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\nTEST(MetadataTest, MatchDoubleValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_number_value(9);\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_double_match()->set_exact(1);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_double_match()->set_exact(9);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  auto r = matcher.mutable_value()->mutable_double_match()->mutable_range();\n  r->set_start(9.1);\n  r->set_end(10);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  r = matcher.mutable_value()->mutable_double_match()->mutable_range();\n  r->set_start(8.9);\n  r->set_end(9);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  r = matcher.mutable_value()->mutable_double_match()->mutable_range();\n  r->set_start(9);\n  r->set_end(9.1);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\nTEST(MetadataTest, MatchStringExactValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_string_value(\"prod\");\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"prod\");\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\nTEST(MetadataTest, MatchStringPrefixValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_string_value(\"prodabc\");\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_string_match()->set_prefix(\"prodx\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_string_match()->set_prefix(\"prod\");\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\nTEST(MetadataTest, MatchStringSuffixValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_string_value(\"abcprod\");\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_string_match()->set_suffix(\"prodx\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_string_match()->set_suffix(\"prod\");\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\nTEST(MetadataTest, MatchStringContainsValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_string_value(\"abcprodef\");\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_string_match()->set_contains(\"pride\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->mutable_string_match()->set_contains(\"prod\");\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\nTEST(MetadataTest, MatchBoolValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_bool_value(true);\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->set_bool_match(false);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->set_bool_match(true);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\nTEST(MetadataTest, MatchPresentValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"label\")\n      .set_string_value(\"test\");\n  Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.b\", \"label\")\n      .set_number_value(1);\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.b\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->set_present_match(false);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  matcher.mutable_value()->set_present_match(true);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  matcher.clear_path();\n  matcher.add_path()->set_key(\"unknown\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n}\n\n// Helper function to retrieve the reference of an entry in a ListMatcher from a MetadataMatcher.\nenvoy::type::matcher::v3::ValueMatcher*\nlistMatchEntry(envoy::type::matcher::v3::MetadataMatcher* matcher) {\n  return matcher->mutable_value()->mutable_list_match()->mutable_one_of();\n}\n\nTEST(MetadataTest, MatchStringListValue) {\n  envoy::config::core::v3::Metadata metadata;\n  ProtobufWkt::Value& metadataValue =\n      Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"groups\");\n  ProtobufWkt::ListValue* values = metadataValue.mutable_list_value();\n  values->add_values()->set_string_value(\"first\");\n  values->add_values()->set_string_value(\"second\");\n  values->add_values()->set_string_value(\"third\");\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.a\");\n  matcher.add_path()->set_key(\"groups\");\n\n  listMatchEntry(&matcher)->mutable_string_match()->set_exact(\"second\");\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->mutable_string_match()->set_prefix(\"fi\");\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->mutable_string_match()->set_suffix(\"rd\");\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->mutable_string_match()->set_exact(\"fourth\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->mutable_string_match()->set_prefix(\"none\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  values->clear_values();\n  metadataValue.Clear();\n}\n\nTEST(MetadataTest, MatchBoolListValue) {\n  envoy::config::core::v3::Metadata metadata;\n  ProtobufWkt::Value& metadataValue =\n      Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"groups\");\n  ProtobufWkt::ListValue* values = metadataValue.mutable_list_value();\n  values->add_values()->set_bool_value(false);\n  values->add_values()->set_bool_value(false);\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.a\");\n  matcher.add_path()->set_key(\"groups\");\n\n  listMatchEntry(&matcher)->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->set_bool_match(true);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->set_bool_match(false);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  values->clear_values();\n  metadataValue.Clear();\n}\n\nTEST(MetadataTest, MatchDoubleListValue) {\n  envoy::config::core::v3::Metadata metadata;\n  ProtobufWkt::Value& metadataValue =\n      Envoy::Config::Metadata::mutableMetadataValue(metadata, \"envoy.filter.a\", \"groups\");\n  ProtobufWkt::ListValue* values = metadataValue.mutable_list_value();\n  values->add_values()->set_number_value(10);\n  values->add_values()->set_number_value(23);\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"envoy.filter.a\");\n  matcher.add_path()->set_key(\"groups\");\n\n  listMatchEntry(&matcher)->mutable_string_match()->set_exact(\"test\");\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->set_bool_match(true);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->mutable_double_match()->set_exact(9);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n  listMatchEntry(&matcher)->mutable_double_match()->set_exact(10);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  auto r = listMatchEntry(&matcher)->mutable_double_match()->mutable_range();\n  r->set_start(10);\n  r->set_end(15);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  r = listMatchEntry(&matcher)->mutable_double_match()->mutable_range();\n  r->set_start(20);\n  r->set_end(24);\n  EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  r = listMatchEntry(&matcher)->mutable_double_match()->mutable_range();\n  r->set_start(24);\n  r->set_end(26);\n  EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata));\n\n  values->clear_values();\n  metadataValue.Clear();\n}\n\nTEST(StringMatcher, ExactMatchIgnoreCase) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"exact\");\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"exact\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"EXACT\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"exacz\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n\n  matcher.set_ignore_case(true);\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"exact\"));\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"EXACT\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"exacz\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n}\n\nTEST(StringMatcher, PrefixMatchIgnoreCase) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_prefix(\"prefix\");\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"prefix-abc\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"PREFIX-ABC\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"prefiz-abc\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n\n  matcher.set_ignore_case(true);\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"prefix-abc\"));\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"PREFIX-ABC\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"prefiz-abc\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n}\n\nTEST(StringMatcher, SuffixMatchIgnoreCase) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_suffix(\"suffix\");\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"abc-suffix\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"ABC-SUFFIX\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"abc-suffiz\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n\n  matcher.set_ignore_case(true);\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"abc-suffix\"));\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"ABC-SUFFIX\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"abc-suffiz\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n}\n\nTEST(StringMatcher, ContainsMatchIgnoreCase) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_contains(\"contained-str\");\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"abc-contained-str-def\"));\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"contained-str\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"ABC-Contained-Str-DEF\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"abc-container-int-def\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n\n  matcher.set_ignore_case(true);\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"abc-contained-str-def\"));\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"abc-cOnTaInEd-str-def\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"abc-ContAineR-str-def\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"other\"));\n}\n\nTEST(StringMatcher, SafeRegexValue) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.mutable_safe_regex()->mutable_google_re2();\n  matcher.mutable_safe_regex()->set_regex(\"foo.*\");\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"foo\"));\n  EXPECT_TRUE(Matchers::StringMatcherImpl(matcher).match(\"foobar\"));\n  EXPECT_FALSE(Matchers::StringMatcherImpl(matcher).match(\"bar\"));\n}\n\nTEST(StringMatcher, RegexValueIgnoreCase) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_ignore_case(true);\n  matcher.set_hidden_envoy_deprecated_regex(\"foo\");\n  EXPECT_THROW_WITH_MESSAGE(Matchers::StringMatcherImpl(matcher).match(\"foo\"), EnvoyException,\n                            \"ignore_case has no effect for regex.\");\n}\n\nTEST(StringMatcher, SafeRegexValueIgnoreCase) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_ignore_case(true);\n  matcher.mutable_safe_regex()->mutable_google_re2();\n  matcher.mutable_safe_regex()->set_regex(\"foo\");\n  EXPECT_THROW_WITH_MESSAGE(Matchers::StringMatcherImpl(matcher).match(\"foo\"), EnvoyException,\n                            \"ignore_case has no effect for safe_regex.\");\n}\n\nTEST(PathMatcher, MatchExactPath) {\n  const auto matcher = Envoy::Matchers::PathMatcher::createExact(\"/exact\", false);\n\n  EXPECT_TRUE(matcher->match(\"/exact\"));\n  EXPECT_TRUE(matcher->match(\"/exact?param=val\"));\n  EXPECT_TRUE(matcher->match(\"/exact#fragment\"));\n  EXPECT_TRUE(matcher->match(\"/exact#fragment?param=val\"));\n  EXPECT_FALSE(matcher->match(\"/EXACT\"));\n  EXPECT_FALSE(matcher->match(\"/exacz\"));\n  EXPECT_FALSE(matcher->match(\"/exact-abc\"));\n  EXPECT_FALSE(matcher->match(\"/exacz?/exact\"));\n  EXPECT_FALSE(matcher->match(\"/exacz#/exact\"));\n}\n\nTEST(PathMatcher, MatchExactPathIgnoreCase) {\n  const auto matcher = Envoy::Matchers::PathMatcher::createExact(\"/exact\", true);\n\n  EXPECT_TRUE(matcher->match(\"/exact\"));\n  EXPECT_TRUE(matcher->match(\"/EXACT\"));\n  EXPECT_TRUE(matcher->match(\"/exact?param=val\"));\n  EXPECT_TRUE(matcher->match(\"/Exact#fragment\"));\n  EXPECT_TRUE(matcher->match(\"/EXACT#fragment?param=val\"));\n  EXPECT_FALSE(matcher->match(\"/exacz\"));\n  EXPECT_FALSE(matcher->match(\"/exact-abc\"));\n  EXPECT_FALSE(matcher->match(\"/exacz?/exact\"));\n  EXPECT_FALSE(matcher->match(\"/exacz#/exact\"));\n}\n\nTEST(PathMatcher, MatchPrefixPath) {\n  const auto matcher = Envoy::Matchers::PathMatcher::createPrefix(\"/prefix\", false);\n\n  EXPECT_TRUE(matcher->match(\"/prefix\"));\n  EXPECT_TRUE(matcher->match(\"/prefix-abc\"));\n  EXPECT_TRUE(matcher->match(\"/prefix?param=val\"));\n  EXPECT_TRUE(matcher->match(\"/prefix#fragment\"));\n  EXPECT_TRUE(matcher->match(\"/prefix#fragment?param=val\"));\n  EXPECT_FALSE(matcher->match(\"/PREFIX\"));\n  EXPECT_FALSE(matcher->match(\"/prefiz\"));\n  EXPECT_FALSE(matcher->match(\"/prefiz?/prefix\"));\n  EXPECT_FALSE(matcher->match(\"/prefiz#/prefix\"));\n}\n\nTEST(PathMatcher, MatchPrefixPathIgnoreCase) {\n  const auto matcher = Envoy::Matchers::PathMatcher::createPrefix(\"/prefix\", true);\n\n  EXPECT_TRUE(matcher->match(\"/prefix\"));\n  EXPECT_TRUE(matcher->match(\"/prefix-abc\"));\n  EXPECT_TRUE(matcher->match(\"/Prefix?param=val\"));\n  EXPECT_TRUE(matcher->match(\"/Prefix#fragment\"));\n  EXPECT_TRUE(matcher->match(\"/PREFIX#fragment?param=val\"));\n  EXPECT_TRUE(matcher->match(\"/PREFIX\"));\n  EXPECT_FALSE(matcher->match(\"/prefiz\"));\n  EXPECT_FALSE(matcher->match(\"/prefiz?/prefix\"));\n  EXPECT_FALSE(matcher->match(\"/prefiz#/prefix\"));\n}\n\nTEST(PathMatcher, MatchSuffixPath) {\n  envoy::type::matcher::v3::PathMatcher matcher;\n  matcher.mutable_path()->set_suffix(\"suffix\");\n\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/suffix\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/abc-suffix\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/suffix?param=val\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/suffix#fragment\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/suffix#fragment?param=val\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/suffiz\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/suffiz?param=suffix\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/suffiz#suffix\"));\n}\n\nTEST(PathMatcher, MatchContainsPath) {\n  envoy::type::matcher::v3::PathMatcher matcher;\n  matcher.mutable_path()->set_contains(\"contains\");\n\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/contains\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/abc-contains\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/contains-abc\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/abc-contains-def\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/abc-contains-def?param=val\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/abc-contains-def#fragment\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/abc-def#containsfragment?param=contains\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/abc-curtains-def\"));\n}\n\nTEST(PathMatcher, MatchRegexPath) {\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.mutable_safe_regex()->mutable_google_re2();\n  matcher.mutable_safe_regex()->set_regex(\".*regex.*\");\n\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/regex\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/regex/xyz\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/xyz/regex\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/regex?param=val\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/regex#fragment\"));\n  EXPECT_TRUE(Matchers::PathMatcher(matcher).match(\"/regex#fragment?param=val\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/regez\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/regez?param=regex\"));\n  EXPECT_FALSE(Matchers::PathMatcher(matcher).match(\"/regez#regex\"));\n}\n\n} // namespace\n} // namespace Matcher\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/mem_block_builder_test.cc",
    "content": "#include <vector>\n\n#include \"common/common/mem_block_builder.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(MemBlockBuilderTest, AppendUint8) {\n  MemBlockBuilder<uint8_t> mem_block(10);\n  EXPECT_EQ(10, mem_block.capacity());\n  mem_block.appendOne(5);\n  EXPECT_EQ(9, mem_block.capacityRemaining());\n  const uint8_t foo[] = {6, 7};\n  mem_block.appendData(absl::MakeConstSpan(foo, ABSL_ARRAYSIZE(foo)));\n  EXPECT_EQ(7, mem_block.capacityRemaining());\n\n  MemBlockBuilder<uint8_t> append;\n  EXPECT_EQ(0, append.capacity());\n  append.setCapacity(7);\n  EXPECT_EQ(7, append.capacity());\n  append.appendOne(8);\n  append.appendOne(9);\n  mem_block.appendBlock(append);\n\n  EXPECT_EQ(5, mem_block.capacityRemaining());\n  EXPECT_EQ((std::vector<uint8_t>{5, 6, 7, 8, 9}), mem_block.span());\n\n  append.appendBlock(mem_block);\n  EXPECT_EQ(0, append.capacityRemaining());\n  uint64_t size = append.size();\n  std::unique_ptr<uint8_t[]> data = append.release();\n  EXPECT_EQ((std::vector<uint8_t>{8, 9, 5, 6, 7, 8, 9}),\n            std::vector<uint8_t>(data.get(), data.get() + size));\n\n  mem_block.reset();\n  EXPECT_EQ(0, mem_block.capacity());\n}\n\nTEST(MemBlockBuilderTest, AppendUint32) {\n  MemBlockBuilder<uint32_t> mem_block(10);\n  EXPECT_EQ(10, mem_block.capacity());\n  mem_block.appendOne(100005);\n  EXPECT_EQ(9, mem_block.capacityRemaining());\n  const uint32_t foo[] = {100006, 100007};\n  mem_block.appendData(absl::MakeConstSpan(foo, ABSL_ARRAYSIZE(foo)));\n  EXPECT_EQ(7, mem_block.capacityRemaining());\n\n  MemBlockBuilder<uint32_t> append;\n  EXPECT_EQ(0, append.capacity());\n  append.setCapacity(7);\n  EXPECT_EQ(7, append.capacity());\n  append.appendOne(100008);\n  append.appendOne(100009);\n  mem_block.appendBlock(append);\n\n  EXPECT_EQ(5, mem_block.capacityRemaining());\n  EXPECT_EQ((std::vector<uint32_t>{100005, 100006, 100007, 100008, 100009}), mem_block.span());\n\n  append.appendBlock(mem_block);\n  EXPECT_EQ(0, append.capacityRemaining());\n  uint64_t size = append.size();\n  std::unique_ptr<uint32_t[]> data = append.release();\n  EXPECT_EQ((std::vector<uint32_t>{100008, 100009, 100005, 100006, 100007, 100008, 100009}),\n            std::vector<uint32_t>(data.get(), data.get() + size));\n\n  mem_block.reset();\n  EXPECT_EQ(0, mem_block.capacity());\n}\n\n#ifdef ENVOY_CONFIG_COVERAGE\n// For some reason, this test under coverage generates a list of testdata/*.\nstatic const char expected_death_regex[] = \"\";\n#else\nstatic const char expected_death_regex[] = \".*insufficient capacity.*\";\n#endif\n\nTEST(MemBlockBuilderTest, AppendOneTooMuch) {\n  MemBlockBuilder<uint8_t> mem_block(1);\n  mem_block.appendOne(1);\n  EXPECT_DEATH({ mem_block.appendOne(2); }, expected_death_regex);\n}\n\nTEST(MemBlockBuilderTest, AppendDataTooMuch) {\n  MemBlockBuilder<uint8_t> mem_block(1);\n  const uint8_t foo[] = {1, 2};\n  EXPECT_DEATH({ mem_block.appendData(absl::MakeConstSpan(foo, ABSL_ARRAYSIZE(foo))); },\n               expected_death_regex);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/mutex_tracer_test.cc",
    "content": "#include <chrono>\n#include <thread>\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/mutex_tracer_impl.h\"\n\n#include \"test/test_common/contention.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/mutex.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass MutexTracerTest : public testing::Test {\nprotected:\n  void SetUp() override { tracer_.reset(); }\n\n  // Since MutexTracerImpl::contentionHook is a private method, MutexTracerTest is a friend class.\n  void sendWaitCyclesToContentionHook(int64_t wait_cycles) {\n    tracer_.contentionHook(nullptr, nullptr, wait_cycles);\n  }\n\n  Thread::MutexBasicLockable mu_;\n  MutexTracerImpl& tracer_{MutexTracerImpl::getOrCreateTracer()};\n};\n\n// Call the contention hook manually.\nTEST_F(MutexTracerTest, AddN) {\n  EXPECT_EQ(tracer_.numContentions(), 0);\n  EXPECT_EQ(tracer_.currentWaitCycles(), 0);\n  EXPECT_EQ(tracer_.lifetimeWaitCycles(), 0);\n\n  sendWaitCyclesToContentionHook(2);\n\n  EXPECT_EQ(tracer_.numContentions(), 1);\n  EXPECT_EQ(tracer_.currentWaitCycles(), 2);\n  EXPECT_EQ(tracer_.lifetimeWaitCycles(), 2);\n\n  sendWaitCyclesToContentionHook(3);\n\n  EXPECT_EQ(tracer_.numContentions(), 2);\n  EXPECT_EQ(tracer_.currentWaitCycles(), 3);\n  EXPECT_EQ(tracer_.lifetimeWaitCycles(), 5);\n\n  sendWaitCyclesToContentionHook(0);\n\n  EXPECT_EQ(tracer_.numContentions(), 3);\n  EXPECT_EQ(tracer_.currentWaitCycles(), 0);\n  EXPECT_EQ(tracer_.lifetimeWaitCycles(), 5);\n}\n\n// Call the contention hook in a real contention scenario.\nTEST_F(MutexTracerTest, OneThreadNoContention) {\n  // Regular operation doesn't cause contention.\n  { Thread::LockGuard lock(mu_); }\n\n  EXPECT_EQ(tracer_.numContentions(), 0);\n  EXPECT_EQ(tracer_.currentWaitCycles(), 0);\n  EXPECT_EQ(tracer_.lifetimeWaitCycles(), 0);\n}\n\nTEST_F(MutexTracerTest, TryLockNoContention) {\n  // TryLocks don't cause contention.\n  {\n    Thread::LockGuard lock(mu_);\n    EXPECT_FALSE(mu_.tryLock());\n  }\n\n  EXPECT_EQ(tracer_.numContentions(), 0);\n  EXPECT_EQ(tracer_.currentWaitCycles(), 0);\n  EXPECT_EQ(tracer_.lifetimeWaitCycles(), 0);\n}\n\nTEST_F(MutexTracerTest, TwoThreadsWithContention) {\n  Api::ApiPtr api = Api::createApiForTest();\n  int64_t prev_num_contentions = tracer_.numContentions();\n  for (int i = 1; i <= 10; ++i) {\n    int64_t curr_num_lifetime_wait_cycles = tracer_.lifetimeWaitCycles();\n\n    Thread::TestUtil::ContentionGenerator contention_generator(*api);\n    contention_generator.generateContention(tracer_);\n    int64_t num_contentions = tracer_.numContentions();\n    EXPECT_LT(prev_num_contentions, num_contentions);\n    prev_num_contentions = num_contentions;\n    EXPECT_GT(tracer_.currentWaitCycles(), 0); // This shouldn't be hardcoded.\n    EXPECT_GT(tracer_.lifetimeWaitCycles(), 0);\n    EXPECT_GT(tracer_.lifetimeWaitCycles(), curr_num_lifetime_wait_cycles);\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/perf_annotation_disabled_test.cc",
    "content": "// By default, perf is disabled, but enable running tests with it disabled by locally\n// undefing if needed.\n#ifdef ENVOY_PERF_ANNOTATION\n#undef ENVOY_PERF_ANNOTATION\n#endif\n\n#include \"common/common/perf_annotation.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(PerfAnnotationDisabled, testPerfAnnotation) {\n  PERF_OPERATION(perf);\n  PERF_RECORD(perf, \"alpha\", \"0\");\n  PERF_RECORD(perf, \"beta\", \"1\");\n  PERF_RECORD(perf, \"alpha\", \"2\");\n  PERF_RECORD(perf, \"beta\", \"3\");\n  std::string report = PERF_TO_STRING();\n  EXPECT_TRUE(report.empty());\n  PERF_CLEAR();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/perf_annotation_test.cc",
    "content": "// When building from the command line, you can enable perf annotation globally with\n//   bazel --define=perf_annotation=enabled\n// You can also do this in on a per-file basis with by defining the macro manually. You\n// must be sure to do this in the modules where you are collecting the perf annotations\n// (PERF_OPERATION, PERF_RECORD) and also where you are reporting them (PERF_DUMP).\n#ifndef ENVOY_PERF_ANNOTATION\n#define ENVOY_PERF_ANNOTATION\n#endif\n\n#include \"common/common/perf_annotation.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass PerfAnnotationTest : public testing::Test {\nprotected:\n  void TearDown() override { PERF_CLEAR(); }\n};\n\n// Tests that the macros produce something in the report that includes the categories\n// and descriptions.\nTEST_F(PerfAnnotationTest, TestMacros) {\n  PERF_OPERATION(perf);\n  PERF_RECORD(perf, \"alpha\", \"0\");\n  PERF_RECORD(perf, \"beta\", \"1\");\n  PERF_RECORD(perf, \"alpha\", \"2\");\n  PERF_RECORD(perf, \"beta\", \"3\");\n  std::string report = PERF_TO_STRING();\n  EXPECT_TRUE(report.find(\" alpha \") != std::string::npos) << report;\n  EXPECT_TRUE(report.find(\" 0 \") != std::string::npos) << report;\n  EXPECT_TRUE(report.find(\" beta \") != std::string::npos) << report;\n  EXPECT_TRUE(report.find(\" 1 \") != std::string::npos) << report;\n  EXPECT_TRUE(report.find(\" alpha \") != std::string::npos) << report;\n  EXPECT_TRUE(report.find(\" 2 \") != std::string::npos) << report;\n  EXPECT_TRUE(report.find(\" beta \") != std::string::npos) << report;\n  EXPECT_TRUE(report.find(\" 3 \") != std::string::npos) << report;\n  PERF_DUMP();\n}\n\n// More detailed report-format testing, directly using the class.\nTEST_F(PerfAnnotationTest, TestFormat) {\n  PerfAnnotationContext* context = PerfAnnotationContext::getOrCreate();\n  for (int i = 0; i < 4; ++i) {\n    context->record(std::chrono::microseconds{1000 + 100 * i}, \"alpha\", \"1\");\n  }\n  for (int i = 0; i < 3; ++i) {\n    context->record(std::chrono::microseconds{30 - i}, \"beta\", \"3\");\n  }\n  context->record(std::chrono::microseconds{200}, \"gamma\", \"2\");\n  std::string report = context->toString();\n  EXPECT_EQ(\n      \"Duration(us)  # Calls  Mean(ns)  StdDev(ns)  Min(ns)  Max(ns)  Category  Description\\n\"\n      \"        4600        4   1150000      129099  1000000  1300000  alpha     1          \\n\"\n      \"         200        1    200000         nan   200000   200000  gamma     2          \\n\"\n      \"          87        3     29000        1000    28000    30000  beta      3          \\n\",\n      context->toString());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/phantom_test.cc",
    "content": "#include \"common/common/phantom.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nstruct PhantomTest {};\nstruct PhantomTest2 {};\n\nusing PhantomIntTest = Phantom<uint32_t, PhantomTest>;\nusing PhantomIntTest2 = Phantom<uint32_t, PhantomTest2>;\n\nTEST(PhantomTest, TypeBehavior) {\n  // Should not be possible to implicitly convert from two phantoms with different markers.\n  static_assert(!std::is_convertible<PhantomIntTest, PhantomTest2>::value,\n                \"should not be convertible\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/random_generator_test.cc",
    "content": "#include <numeric>\n\n#include \"common/common/random_generator.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Random {\nnamespace {\n\nTEST(Random, DISABLED_benchmarkRandom) {\n  Random::RandomGeneratorImpl random;\n\n  for (size_t i = 0; i < 1000000000; ++i) {\n    random.random();\n  }\n}\n\nTEST(Random, SanityCheckOfUniquenessRandom) {\n  Random::RandomGeneratorImpl random;\n  std::set<uint64_t> results;\n  const size_t num_of_results = 1000000;\n\n  for (size_t i = 0; i < num_of_results; ++i) {\n    results.insert(random.random());\n  }\n\n  EXPECT_EQ(num_of_results, results.size());\n}\n\nTEST(Random, SanityCheckOfStdLibRandom) {\n  Random::RandomGeneratorImpl random;\n\n  static const auto num_of_items = 100;\n  std::vector<uint64_t> v(num_of_items);\n  std::iota(v.begin(), v.end(), 0);\n\n  static const auto num_of_checks = 10000;\n  for (size_t i = 0; i < num_of_checks; ++i) {\n    const auto prev = v;\n    std::shuffle(v.begin(), v.end(), random);\n    EXPECT_EQ(v.size(), prev.size());\n    EXPECT_NE(v, prev);\n    EXPECT_FALSE(std::is_sorted(v.begin(), v.end()));\n  }\n}\n\nTEST(UUID, CheckLengthOfUUID) {\n  Random::RandomGeneratorImpl random;\n\n  std::string result = random.uuid();\n\n  size_t expected_length = 36;\n  EXPECT_EQ(expected_length, result.length());\n}\n\nTEST(UUID, SanityCheckOfUniqueness) {\n  std::set<std::string> uuids;\n  const size_t num_of_uuids = 100000;\n\n  Random::RandomGeneratorImpl random;\n  for (size_t i = 0; i < num_of_uuids; ++i) {\n    uuids.insert(random.uuid());\n  }\n\n  EXPECT_EQ(num_of_uuids, uuids.size());\n}\n\nTEST(Random, Bernoilli) {\n  Random::RandomGeneratorImpl random;\n\n  EXPECT_FALSE(random.bernoulli(0));\n  EXPECT_FALSE(random.bernoulli(-1));\n  EXPECT_TRUE(random.bernoulli(1));\n  EXPECT_TRUE(random.bernoulli(2));\n\n  int true_count = 0;\n  static const auto num_rolls = 100000;\n  for (size_t i = 0; i < num_rolls; ++i) {\n    if (random.bernoulli(0.4)) {\n      ++true_count;\n    }\n  }\n  EXPECT_NEAR(static_cast<double>(true_count) / num_rolls, 0.4, 0.01);\n}\n\n} // namespace\n} // namespace Random\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/regex_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n#include \"envoy/type/matcher/v3/regex.pb.h\"\n\n#include \"common/common/regex.h\"\n\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Regex {\nnamespace {\n\nTEST(Utility, ParseStdRegex) {\n  EXPECT_THROW_WITH_REGEX(Utility::parseStdRegex(\"(+invalid)\"), EnvoyException,\n                          \"Invalid regex '\\\\(\\\\+invalid\\\\)': .+\");\n\n  EXPECT_THROW_WITH_REGEX(Utility::parseStdRegexAsCompiledMatcher(\"(+invalid)\"), EnvoyException,\n                          \"Invalid regex '\\\\(\\\\+invalid\\\\)': .+\");\n\n  {\n    std::regex regex = Utility::parseStdRegex(\"x*\");\n    EXPECT_NE(0, regex.flags() & std::regex::optimize);\n  }\n\n  {\n    std::regex regex = Utility::parseStdRegex(\"x*\", std::regex::icase);\n    EXPECT_NE(0, regex.flags() & std::regex::icase);\n    EXPECT_EQ(0, regex.flags() & std::regex::optimize);\n  }\n\n  {\n    // Regression test to cover high-complexity regular expressions that throw on std::regex_match.\n    // Note that not all std::regex_match implementations will throw when matching against the\n    // expression below, but at least clang 9.0.0 under linux does.\n    auto matcher = Utility::parseStdRegexAsCompiledMatcher(\n        \"|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\");\n    EXPECT_FALSE(matcher->match(\"0\"));\n  }\n}\n\nTEST(Utility, ParseRegex) {\n  {\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.mutable_google_re2();\n    matcher.set_regex(\"(+invalid)\");\n    EXPECT_THROW_WITH_MESSAGE(Utility::parseRegex(matcher), EnvoyException,\n                              \"no argument for repetition operator: +\");\n  }\n\n  // Regression test for https://github.com/envoyproxy/envoy/issues/7728\n  {\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.mutable_google_re2();\n    matcher.set_regex(\"/asdf/.*\");\n    const auto compiled_matcher = Utility::parseRegex(matcher);\n    const std::string long_string = \"/asdf/\" + std::string(50 * 1024, 'a');\n    EXPECT_TRUE(compiled_matcher->match(long_string));\n  }\n\n  // Positive case to ensure no max program size is enforced.\n  {\n    TestScopedRuntime scoped_runtime;\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.set_regex(\"/asdf/.*\");\n    matcher.mutable_google_re2();\n    EXPECT_NO_THROW(Utility::parseRegex(matcher));\n  }\n\n  // Verify max program size with the deprecated field codepath plus runtime.\n  // The deprecated field codepath precedes any runtime settings.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"re2.max_program_size.error_level\", \"3\"}});\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.set_regex(\"/asdf/.*\");\n    matcher.mutable_google_re2()->mutable_max_program_size()->set_value(1);\n#ifndef GTEST_USES_SIMPLE_RE\n    EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException,\n                            \"RE2 program size of [0-9]+ > max program size of 1\\\\.\");\n#else\n    EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException,\n                            \"RE2 program size of \\\\d+ > max program size of 1\\\\.\");\n#endif\n  }\n\n  // Verify that an exception is thrown for the error level max program size.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"re2.max_program_size.error_level\", \"1\"}});\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.set_regex(\"/asdf/.*\");\n    matcher.mutable_google_re2();\n#ifndef GTEST_USES_SIMPLE_RE\n    EXPECT_THROW_WITH_REGEX(\n        Utility::parseRegex(matcher), EnvoyException,\n        \"RE2 program size of [0-9]+ > max program size of 1 set for the error level threshold\\\\.\");\n#else\n    EXPECT_THROW_WITH_REGEX(\n        Utility::parseRegex(matcher), EnvoyException,\n        \"RE2 program size of \\\\d+ > max program size of 1 set for the error level threshold\\\\.\");\n#endif\n  }\n\n  // Verify that the error level max program size defaults to 100 if not set by runtime.\n  {\n    TestScopedRuntime scoped_runtime;\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.set_regex(\n        \"/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*\");\n    matcher.mutable_google_re2();\n#ifndef GTEST_USES_SIMPLE_RE\n    EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException,\n                            \"RE2 program size of [0-9]+ > max program size of 100 set for the \"\n                            \"error level threshold\\\\.\");\n#else\n    EXPECT_THROW_WITH_REGEX(\n        Utility::parseRegex(matcher), EnvoyException,\n        \"RE2 program size of \\\\d+ > max program size of 100 set for the error level threshold\\\\.\");\n#endif\n  }\n\n  // Verify that a warning is logged for the warn level max program size.\n  {\n    TestScopedRuntime scoped_runtime;\n    Envoy::Stats::Counter& warn_count =\n        Runtime::LoaderSingleton::getExisting()->getRootScope().counterFromString(\n            \"re2.exceeded_warn_level\");\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"re2.max_program_size.warn_level\", \"1\"}});\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.set_regex(\"/asdf/.*\");\n    matcher.mutable_google_re2();\n    EXPECT_NO_THROW(Utility::parseRegex(matcher));\n    EXPECT_EQ(1, warn_count.value());\n    EXPECT_LOG_CONTAINS(\"warn\", \"> max program size of 1 set for the warn level threshold\",\n                        Utility::parseRegex(matcher));\n    EXPECT_EQ(2, warn_count.value());\n  }\n\n  // Verify that no check is performed if the warn level max program size is not set by runtime.\n  {\n    TestScopedRuntime scoped_runtime;\n    Envoy::Stats::Counter& warn_count =\n        Runtime::LoaderSingleton::getExisting()->getRootScope().counterFromString(\n            \"re2.exceeded_warn_level\");\n    envoy::type::matcher::v3::RegexMatcher matcher;\n    matcher.set_regex(\"/asdf/.*\");\n    matcher.mutable_google_re2();\n    EXPECT_NO_THROW(Utility::parseRegex(matcher));\n    EXPECT_LOG_NOT_CONTAINS(\"warn\", \"> max program size\", Utility::parseRegex(matcher));\n    EXPECT_EQ(0, warn_count.value());\n  }\n}\n\n} // namespace\n} // namespace Regex\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/statusor_test.cc",
    "content": "#include \"common/common/statusor.h\"\n#include \"common/http/status.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(StatusOr, Initialization) {\n  StatusOr<int> statusor(\n      Http::prematureResponseError(\"foobar\", Http::Code::ProxyAuthenticationRequired));\n  EXPECT_FALSE(statusor.ok());\n  EXPECT_TRUE(Http::isPrematureResponseError(statusor.status()));\n  EXPECT_EQ(\"foobar\", statusor.status().message());\n  EXPECT_EQ(Http::Code::ProxyAuthenticationRequired,\n            Http::getPrematureResponseHttpCode(statusor.status()));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/stl_helpers_test.cc",
    "content": "#include <sstream>\n\n#include \"common/common/stl_helpers.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(StlHelpersTest, TestOutputToStreamOperator) {\n  std::stringstream os;\n  std::vector<int> v{1, 2, 3, 4, 5};\n  os << v;\n  EXPECT_EQ(\"vector { 1, 2, 3, 4, 5 }\", os.str());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/thread_id_test.cc",
    "content": "#include \"common/common/thread.h\"\n\n#include \"test/test_common/thread_factory_for_test.h\"\n\n#include \"absl/hash/hash_testing.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace {\n\nTEST(ThreadId, Equality) {\n  auto& thread_factory = Thread::threadFactoryForTest();\n\n  Thread::ThreadId main_thread = thread_factory.currentThreadId();\n  Thread::ThreadId background_thread;\n  Thread::ThreadId null_thread;\n\n  Thread::threadFactoryForTest()\n      .createThread([&]() { background_thread = thread_factory.currentThreadId(); })\n      ->join();\n\n  EXPECT_EQ(main_thread, main_thread);\n  EXPECT_EQ(background_thread, background_thread);\n  EXPECT_EQ(null_thread, null_thread);\n\n  EXPECT_NE(main_thread, background_thread);\n  EXPECT_NE(main_thread, null_thread);\n  EXPECT_NE(background_thread, null_thread);\n}\n\nTEST(ThreadId, Hashability) {\n  auto& thread_factory = Thread::threadFactoryForTest();\n\n  Thread::ThreadId main_thread = thread_factory.currentThreadId();\n  Thread::ThreadId background_thread;\n  Thread::ThreadId null_thread;\n\n  Thread::threadFactoryForTest()\n      .createThread([&]() { background_thread = thread_factory.currentThreadId(); })\n      ->join();\n\n  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({\n      null_thread,\n      main_thread,\n      background_thread,\n  }));\n}\n\nTEST(ThreadId, CanGetId) {\n  Thread::ThreadId tid(10);\n  EXPECT_EQ(tid.getId(), 10);\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/thread_test.cc",
    "content": "#include <functional>\n\n#include \"common/common/thread.h\"\n#include \"common/common/thread_synchronizer.h\"\n\n#include \"test/test_common/thread_factory_for_test.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/synchronization/notification.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Thread {\nnamespace {\n\nclass ThreadAsyncPtrTest : public testing::Test {\nprotected:\n  ThreadFactory& thread_factory_{threadFactoryForTest()};\n};\n\n// Tests that two threads racing to create an object have well-defined\n// behavior.\nTEST_F(ThreadAsyncPtrTest, DeleteOnDestruct) {\n  AtomicPtr<std::string, AtomicPtrAllocMode::DeleteOnDestruct> str;\n  ThreadSynchronizer sync;\n  sync.enable();\n  sync.waitOn(\"creator\");\n\n  // On thread1, we will lazily instantiate the string as \"thread1\". However\n  // in the creation function we will block on a sync-point.\n  auto thread1 = thread_factory_.createThread(\n      [&str, &sync]() {\n        str.get([&sync]() -> std::string* {\n          sync.syncPoint(\"creator\");\n          return new std::string(\"thread1\");\n        });\n      },\n      Options{\"thread1\"});\n  EXPECT_EQ(\"thread1\", thread1->name());\n\n  sync.barrierOn(\"creator\");\n\n  // Now spawn a separate thread that will attempt to lazy-initialize the\n  // string as \"thread2\", but that allocator will never run because\n  // the allocator on thread1 has already locked the AtomicPtr's mutex.\n  auto thread2 = thread_factory_.createThread(\n      [&str]() { str.get([]() -> std::string* { return new std::string(\"thread2\"); }); },\n      Options{\"thread2\"});\n  EXPECT_EQ(\"thread2\", thread2->name());\n\n  // Now let thread1's initializer finish.\n  sync.signal(\"creator\");\n  thread1->join();\n  thread2->join();\n\n  // Now ensure the \"thread1\" value sticks past the thread lifetimes.\n  bool called = false;\n  EXPECT_EQ(\"thread1\", *str.get([&called]() -> std::string* {\n    called = true;\n    return nullptr;\n  }));\n  EXPECT_FALSE(called);\n}\n\n// Same test as AtomicPtrDeleteOnDestruct, except the allocator callbacks return\n// pointers to locals, rather than allocating the strings on the heap.\nTEST_F(ThreadAsyncPtrTest, DoNotDelete) {\n  const std::string thread1_str(\"thread1\");\n  const std::string thread2_str(\"thread2\");\n  AtomicPtr<const std::string, AtomicPtrAllocMode::DoNotDelete> str;\n  ThreadSynchronizer sync;\n  sync.enable();\n  sync.waitOn(\"creator\");\n\n  // On thread1, we will lazily instantiate the string as \"thread1\". However\n  // in the creation function we will block on a sync-point.\n  auto thread1 = thread_factory_.createThread(\n      [&str, &sync, &thread1_str]() {\n        str.get([&sync, &thread1_str]() -> const std::string* {\n          sync.syncPoint(\"creator\");\n          return &thread1_str;\n        });\n      },\n      Options{\"thread1\"});\n\n  sync.barrierOn(\"creator\");\n\n  // Now spawn a separate thread that will attempt to lazy-initialize the\n  // string as \"thread2\", but that allocator will never run because\n  // the allocator on thread1 has already locked the AtomicPtr's mutex.\n  auto thread2 = thread_factory_.createThread(\n      [&str, &thread2_str]() {\n        str.get([&thread2_str]() -> const std::string* { return &thread2_str; });\n      },\n      Options{\"thread2\"});\n\n  // Now let thread1's initializer finish.\n  sync.signal(\"creator\");\n  thread1->join();\n  thread2->join();\n\n  // Now ensure the \"thread1\" value sticks past the thread lifetimes.\n  bool called = false;\n  EXPECT_EQ(\"thread1\", *str.get([&called]() -> std::string* {\n    called = true;\n    return nullptr;\n  }));\n  EXPECT_FALSE(called);\n}\n\nTEST_F(ThreadAsyncPtrTest, ThreadSpammer) {\n  AtomicPtr<std::string, AtomicPtrAllocMode::DeleteOnDestruct> str;\n  absl::Notification go;\n  constexpr uint32_t num_threads = 100;\n  AtomicPtr<uint32_t, AtomicPtrAllocMode::DeleteOnDestruct> answer;\n  uint32_t calls = 0;\n  auto thread_fn = [&go, &answer, &calls]() {\n    go.WaitForNotification();\n    answer.get([&calls]() {\n      ++calls;\n      return new uint32_t(42);\n    });\n  };\n  std::vector<ThreadPtr> threads;\n  for (uint32_t i = 0; i < num_threads; ++i) {\n    std::string name = absl::StrCat(\"thread\", i);\n    threads.emplace_back(thread_factory_.createThread(thread_fn, Options{name}));\n    EXPECT_EQ(name, threads.back()->name());\n  }\n  EXPECT_EQ(0, calls);\n  go.Notify();\n  for (auto& thread : threads) {\n    thread->join();\n  }\n  EXPECT_EQ(1, calls);\n  EXPECT_EQ(42, *answer.get([&calls]() {\n    ++calls;\n    return nullptr;\n  }));\n  EXPECT_EQ(1, calls);\n}\n\n// Tests that null can be allocated, but the allocator will be re-called each\n// time until a non-null result is returned.\nTEST_F(ThreadAsyncPtrTest, Null) {\n  AtomicPtr<std::string, AtomicPtrAllocMode::DeleteOnDestruct> str;\n  uint32_t calls = 0;\n  EXPECT_EQ(nullptr, str.get([&calls]() -> std::string* {\n    ++calls;\n    return nullptr;\n  }));\n  EXPECT_EQ(nullptr, str.get([&calls]() -> std::string* {\n    ++calls;\n    return nullptr;\n  }));\n  EXPECT_EQ(2, calls);\n  EXPECT_EQ(\"x\", *str.get([&calls]() -> std::string* {\n    ++calls;\n    return new std::string(\"x\");\n  }));\n  EXPECT_EQ(3, calls);\n  EXPECT_EQ(\"x\", *str.get([&calls]() -> std::string* {\n    ++calls;\n    return nullptr;\n  }));\n  EXPECT_EQ(3, calls); // allocator was not called this last time.\n}\n\n// Tests array semantics. Note that AtomicPtr is implemented a 1-element\n// AtomicPtrArray, so there's no need to repeat the complex thread-race test\n// from AtomicPtr.\nTEST_F(ThreadAsyncPtrTest, Array) {\n  const uint32_t size = 5;\n  AtomicPtrArray<std::string, size, AtomicPtrAllocMode::DeleteOnDestruct> strs;\n  for (uint32_t i = 0; i < size; ++i) {\n    std::string val = absl::StrCat(\"x\", i);\n    EXPECT_EQ(val, *strs.get(i, [&val]() -> std::string* { return new std::string(val); }));\n  }\n  for (uint32_t i = 0; i < size; ++i) {\n    std::string val = absl::StrCat(\"x\", i);\n    // Second time through the array, the allocator will not be called, but\n    // we'll have all the expected values returned from get.\n    bool called = false;\n    EXPECT_EQ(val, *strs.get(i, [&called]() -> std::string* {\n      called = true;\n      return nullptr;\n    }));\n    EXPECT_FALSE(called);\n  }\n}\n\nTEST_F(ThreadAsyncPtrTest, ManagedAlloc) {\n  const uint32_t size = 5;\n  std::vector<std::unique_ptr<std::string>> pool;\n  AtomicPtrArray<std::string, size, AtomicPtrAllocMode::DoNotDelete> strs;\n  for (uint32_t i = 0; i < size; ++i) {\n    std::string val = absl::StrCat(\"x\", i);\n    EXPECT_EQ(val, *strs.get(i, [&pool, &val]() -> std::string* {\n      pool.emplace_back(std::make_unique<std::string>(val));\n      return pool.back().get();\n    }));\n  }\n}\n\nTEST_F(ThreadAsyncPtrTest, TruncateWait) {\n  absl::Notification notify;\n  auto thread = thread_factory_.createThread([&notify]() { notify.WaitForNotification(); },\n                                             Options{\"this name is way too long for posix\"});\n  notify.Notify();\n\n  // To make this test work on multiple platforms, just assume the first 10 characters\n  // are retained.\n  EXPECT_THAT(thread->name(), testing::StartsWith(\"this name \"));\n  thread->join();\n}\n\nTEST_F(ThreadAsyncPtrTest, TruncateNoWait) {\n  auto thread =\n      thread_factory_.createThread([]() {}, Options{\"this name is way too long for posix\"});\n\n  // In general, across platforms, just assume the first 10 characters are\n  // retained.\n  EXPECT_THAT(thread->name(), testing::StartsWith(\"this name \"));\n\n  // On Linux we can check for 15 exactly.\n#ifdef __linux__\n  EXPECT_EQ(\"this name is wa\", thread->name()) << \"truncated to 15 chars\";\n#endif\n\n  thread->join();\n}\n\nTEST_F(ThreadAsyncPtrTest, NameNotSpecifiedWait) {\n  absl::Notification notify;\n  auto thread = thread_factory_.createThread([&notify]() { notify.WaitForNotification(); });\n  notify.Notify();\n\n  // For linux builds, the thread name defaults to the name of the\n  // binary. However the name of the binary is different depending on whether\n  // this is a coverage test or not. Currently, this population does not occur\n  // for Mac or Windows.\n#ifdef __linux__\n  EXPECT_FALSE(thread->name().empty());\n#endif\n  thread->join();\n}\n\n} // namespace\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/token_bucket_impl_test.cc",
    "content": "#include <chrono>\n\n#include \"common/common/token_bucket_impl.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass TokenBucketImplTest : public testing::Test {\nprotected:\n  Event::SimulatedTimeSystem time_system_;\n};\n\n// Verifies TokenBucket initialization.\nTEST_F(TokenBucketImplTest, Initialization) {\n  TokenBucketImpl token_bucket{1, time_system_, -1.0};\n\n  EXPECT_EQ(1, token_bucket.consume(1, false));\n  EXPECT_EQ(0, token_bucket.consume(1, false));\n}\n\n// Verifies TokenBucket's maximum capacity.\nTEST_F(TokenBucketImplTest, MaxBucketSize) {\n  TokenBucketImpl token_bucket{3, time_system_, 1};\n\n  EXPECT_EQ(3, token_bucket.consume(3, false));\n  time_system_.setMonotonicTime(std::chrono::seconds(10));\n  EXPECT_EQ(0, token_bucket.consume(4, false));\n  EXPECT_EQ(3, token_bucket.consume(3, false));\n}\n\n// Verifies that TokenBucket can consume tokens.\nTEST_F(TokenBucketImplTest, Consume) {\n  TokenBucketImpl token_bucket{10, time_system_, 1};\n\n  EXPECT_EQ(0, token_bucket.consume(20, false));\n  EXPECT_EQ(9, token_bucket.consume(9, false));\n\n  EXPECT_EQ(1, token_bucket.consume(1, false));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(999));\n  EXPECT_EQ(0, token_bucket.consume(1, false));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(5999));\n  EXPECT_EQ(0, token_bucket.consume(6, false));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(6000));\n  EXPECT_EQ(6, token_bucket.consume(6, false));\n  EXPECT_EQ(0, token_bucket.consume(1, false));\n}\n\n// Verifies that TokenBucket can refill tokens.\nTEST_F(TokenBucketImplTest, Refill) {\n  TokenBucketImpl token_bucket{1, time_system_, 0.5};\n  EXPECT_EQ(1, token_bucket.consume(1, false));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(500));\n  EXPECT_EQ(0, token_bucket.consume(1, false));\n  time_system_.setMonotonicTime(std::chrono::milliseconds(1500));\n  EXPECT_EQ(0, token_bucket.consume(1, false));\n  time_system_.setMonotonicTime(std::chrono::milliseconds(2000));\n  EXPECT_EQ(1, token_bucket.consume(1, false));\n}\n\nTEST_F(TokenBucketImplTest, NextTokenAvailable) {\n  TokenBucketImpl token_bucket{10, time_system_, 5};\n  EXPECT_EQ(9, token_bucket.consume(9, false));\n  EXPECT_EQ(std::chrono::milliseconds(0), token_bucket.nextTokenAvailable());\n  EXPECT_EQ(1, token_bucket.consume(1, false));\n  EXPECT_EQ(0, token_bucket.consume(1, false));\n  EXPECT_EQ(std::chrono::milliseconds(200), token_bucket.nextTokenAvailable());\n}\n\n// Test partial consumption of tokens.\nTEST_F(TokenBucketImplTest, PartialConsumption) {\n  TokenBucketImpl token_bucket{16, time_system_, 16};\n  EXPECT_EQ(16, token_bucket.consume(18, true));\n  EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable());\n  time_system_.advanceTimeWait(std::chrono::milliseconds(62));\n  EXPECT_EQ(0, token_bucket.consume(1, true));\n  time_system_.advanceTimeWait(std::chrono::milliseconds(1));\n  EXPECT_EQ(1, token_bucket.consume(2, true));\n  EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable());\n}\n\n// Test reset functionality.\nTEST_F(TokenBucketImplTest, Reset) {\n  TokenBucketImpl token_bucket{16, time_system_, 16};\n  token_bucket.reset(1);\n  EXPECT_EQ(1, token_bucket.consume(2, true));\n  EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/utility_corpus/test",
    "content": "hello world\n"
  },
  {
    "path": "test/common/common/utility_fuzz_test.cc",
    "content": "#include \"common/common/utility.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\nnamespace {\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  {\n    uint64_t out;\n    const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n    StringUtil::atoull(string_buffer.c_str(), out);\n  }\n  {\n    const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n    StringUtil::escape(string_buffer);\n  }\n  {\n    const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n    StringUtil::toUpper(string_buffer);\n  }\n  {\n    const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n    StringUtil::trim(string_buffer);\n  }\n  {\n    const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n    StringUtil::ltrim(string_buffer);\n  }\n  {\n    const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n    StringUtil::rtrim(string_buffer);\n  }\n  if (len > 0) {\n    const size_t split_point = *reinterpret_cast<const uint8_t*>(buf) % len;\n    //  (string_buffer.substr(0, split_point), string_buffer.substr(split_point))\n    //  @param1: substring of buffer from beginning to split_point\n    //  @param2: substring of buffer from split_point to end of the string\n    {\n      const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n      absl::EndsWith(string_buffer.substr(0, split_point), string_buffer.substr(split_point));\n    }\n    {\n      const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n      StringUtil::cropLeft(string_buffer.substr(0, split_point), string_buffer.substr(split_point));\n    }\n    {\n      const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n      StringUtil::cropRight(string_buffer.substr(0, split_point),\n                            string_buffer.substr(split_point));\n    }\n    {\n      const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n\n      // sample random bit to use as the whitespace flag\n      bool trimWhitespace = split_point & 1;\n      const size_t split_point2 =\n          len > 1 ? reinterpret_cast<const uint8_t*>(buf)[1] % len : split_point;\n      const size_t split1 = std::min(split_point, split_point2);\n      const size_t split2 = std::max(split_point, split_point2);\n\n      StringUtil::findToken(string_buffer.substr(0, split1),\n                            string_buffer.substr(split1, split2 - split2),\n                            string_buffer.substr(split2), trimWhitespace);\n    }\n  }\n}\n\n} // namespace\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/utility_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n\n#include <random>\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\n\nstatic const char TextToTrim[] = \"\\t  the quick brown fox jumps over the lazy dog\\n\\r\\n\";\nstatic size_t TextToTrimLength = sizeof(TextToTrim) - 1;\n\nstatic const char AlreadyTrimmed[] = \"the quick brown fox jumps over the lazy dog\";\nstatic size_t AlreadyTrimmedLength = sizeof(AlreadyTrimmed) - 1;\n\nstatic const char CacheControl[] = \"private, max-age=300, no-transform\";\nstatic size_t CacheControlLength = sizeof(CacheControl) - 1;\n\n// NOLINT(namespace-envoy)\n\nstatic void BM_AccessLogDateTimeFormatter(benchmark::State& state) {\n  int outputBytes = 0;\n\n  // Generate a sequence of times for which the delta between each successive\n  // pair of times is uniformly distributed in the range (-10ms, 20ms).\n  // This is meant to simulate the situation where requests handled at\n  // approximately the same time may get logged out of order.\n  static Envoy::SystemTime time(std::chrono::seconds(1522796769));\n  static std::mt19937 prng(1); // PRNG with a fixed seed, for repeatability\n  static std::uniform_int_distribution<long> distribution(-10, 20);\n  for (auto _ : state) {\n    // TODO(brian-pane): The next line, which computes the next input timestamp,\n    // currently accounts for ~30% of the CPU time of this benchmark test. If\n    // the AccessLogDateTimeFormatter implementation is optimized further, we\n    // should precompute a sequence of input timestamps so the benchmark's own\n    // overhead won't obscure changes in the speed of the code being benchmarked.\n    time += std::chrono::milliseconds(static_cast<int>(distribution(prng)));\n    outputBytes += Envoy::AccessLogDateTimeFormatter::fromTime(time).length();\n  }\n  benchmark::DoNotOptimize(outputBytes);\n}\nBENCHMARK(BM_AccessLogDateTimeFormatter);\n\n// This benchmark is basically similar with the above BM_AccessLogDateTimeFormatter, the only\n// difference is the format string input for the Envoy::DateFormatter.\nstatic void BM_DateTimeFormatterWithSubseconds(benchmark::State& state) {\n  int outputBytes = 0;\n\n  Envoy::SystemTime time(std::chrono::seconds(1522796769));\n  std::mt19937 prng(1);\n  std::uniform_int_distribution<long> distribution(-10, 20);\n  Envoy::DateFormatter date_formatter(\"%Y-%m-%dT%H:%M:%s.%3f\");\n  for (auto _ : state) {\n    time += std::chrono::milliseconds(static_cast<int>(distribution(prng)));\n    outputBytes += date_formatter.fromTime(time).length();\n  }\n  benchmark::DoNotOptimize(outputBytes);\n}\nBENCHMARK(BM_DateTimeFormatterWithSubseconds);\n\n// This benchmark is basically similar with the above BM_DateTimeFormatterWithSubseconds, the\n// differences are: 1. the format string input is long with duplicated subseconds. 2. The purpose\n// is to test DateFormatter.parse() which is called in constructor.\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_DateTimeFormatterWithLongSubsecondsString(benchmark::State& state) {\n  int outputBytes = 0;\n\n  Envoy::SystemTime time(std::chrono::seconds(1522796769));\n  std::mt19937 prng(1);\n  std::uniform_int_distribution<long> distribution(-10, 20);\n  std::string input;\n  int num_duplicates = 400;\n  std::string duplicate_input = \"%%1f %1f, %2f, %3f, %4f, \";\n  for (int i = 0; i < num_duplicates; i++) {\n    absl::StrAppend(&input, duplicate_input, \"(\");\n  }\n  absl::StrAppend(&input, duplicate_input);\n\n  for (auto _ : state) {\n    Envoy::DateFormatter date_formatter(input);\n    time += std::chrono::milliseconds(static_cast<int>(distribution(prng)));\n    outputBytes += date_formatter.fromTime(time).length();\n  }\n  benchmark::DoNotOptimize(outputBytes);\n}\nBENCHMARK(BM_DateTimeFormatterWithLongSubsecondsString);\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_DateTimeFormatterWithoutSubseconds(benchmark::State& state) {\n  int outputBytes = 0;\n\n  Envoy::SystemTime time(std::chrono::seconds(1522796769));\n  std::mt19937 prng(1);\n  std::uniform_int_distribution<long> distribution(-10, 20);\n  Envoy::DateFormatter date_formatter(\"%Y-%m-%dT%H:%M:%s\");\n  for (auto _ : state) {\n    time += std::chrono::milliseconds(static_cast<int>(distribution(prng)));\n    outputBytes += date_formatter.fromTime(time).length();\n  }\n  benchmark::DoNotOptimize(outputBytes);\n}\nBENCHMARK(BM_DateTimeFormatterWithoutSubseconds);\n\nstatic void BM_RTrimStringView(benchmark::State& state) {\n  int accum = 0;\n  for (auto _ : state) {\n    absl::string_view text(TextToTrim, TextToTrimLength);\n    text = Envoy::StringUtil::rtrim(text);\n    accum += TextToTrimLength - text.size();\n  }\n  benchmark::DoNotOptimize(accum);\n}\nBENCHMARK(BM_RTrimStringView);\n\nstatic void BM_RTrimStringViewAlreadyTrimmed(benchmark::State& state) {\n  int accum = 0;\n  for (auto _ : state) {\n    absl::string_view text(AlreadyTrimmed, AlreadyTrimmedLength);\n    text = Envoy::StringUtil::rtrim(text);\n    accum += AlreadyTrimmedLength - text.size();\n  }\n  benchmark::DoNotOptimize(accum);\n}\nBENCHMARK(BM_RTrimStringViewAlreadyTrimmed);\n\nstatic void BM_RTrimStringViewAlreadyTrimmedAndMakeString(benchmark::State& state) {\n  int accum = 0;\n  for (auto _ : state) {\n    absl::string_view text(AlreadyTrimmed, AlreadyTrimmedLength);\n    std::string string_copy = std::string(Envoy::StringUtil::rtrim(text));\n    accum += AlreadyTrimmedLength - string_copy.size();\n  }\n  benchmark::DoNotOptimize(accum);\n}\nBENCHMARK(BM_RTrimStringViewAlreadyTrimmedAndMakeString);\n\nstatic void BM_FindToken(benchmark::State& state) {\n  const absl::string_view cache_control(CacheControl, CacheControlLength);\n  for (auto _ : state) {\n    RELEASE_ASSERT(Envoy::StringUtil::findToken(cache_control, \",\", \"no-transform\"), \"\");\n  }\n}\nBENCHMARK(BM_FindToken);\n\nstatic bool nextToken(absl::string_view& str, char delim, bool strip_whitespace,\n                      absl::string_view* token) {\n  while (!str.empty()) {\n    absl::string_view::size_type pos = str.find(delim);\n    if (pos == absl::string_view::npos) {\n      *token = str.substr(0, str.size());\n      str.remove_prefix(str.size()); // clears str\n    } else {\n      *token = str.substr(0, pos);\n      str.remove_prefix(pos + 1); // move past token and delim\n    }\n    if (strip_whitespace) {\n      *token = Envoy::StringUtil::trim(*token);\n    }\n    if (!token->empty()) {\n      return true;\n    }\n  }\n  return false;\n}\n\n// Experimental alternative implementation of StringUtil::findToken which doesn't create\n// a temp vector, but just iterates through the string_view, tokenizing, and matching against\n// the token we want. It appears to be about 2.5x to 3x faster on this testcase.\nstatic bool findTokenWithoutSplitting(absl::string_view str, char delim, absl::string_view token,\n                                      bool strip_whitespace) {\n  for (absl::string_view tok; nextToken(str, delim, strip_whitespace, &tok);) {\n    if (tok == token) {\n      return true;\n    }\n  }\n  return false;\n}\n\nstatic void BM_FindTokenWithoutSplitting(benchmark::State& state) {\n  const absl::string_view cache_control(CacheControl, CacheControlLength);\n  for (auto _ : state) {\n    RELEASE_ASSERT(findTokenWithoutSplitting(cache_control, ',', \"no-transform\", true), \"\");\n  }\n}\nBENCHMARK(BM_FindTokenWithoutSplitting);\n\nstatic void BM_FindTokenValueNestedSplit(benchmark::State& state) {\n  const absl::string_view cache_control(CacheControl, CacheControlLength);\n  absl::string_view max_age;\n  for (auto _ : state) {\n    for (absl::string_view token : Envoy::StringUtil::splitToken(cache_control, \",\")) {\n      auto name_value = Envoy::StringUtil::splitToken(token, \"=\");\n      if ((name_value.size() == 2) && (Envoy::StringUtil::trim(name_value[0]) == \"max-age\")) {\n        max_age = Envoy::StringUtil::trim(name_value[1]);\n      }\n    }\n    RELEASE_ASSERT(max_age == \"300\", \"\");\n  }\n}\nBENCHMARK(BM_FindTokenValueNestedSplit);\n\nstatic void BM_FindTokenValueSearchForEqual(benchmark::State& state) {\n  for (auto _ : state) {\n    const absl::string_view cache_control(CacheControl, CacheControlLength);\n    absl::string_view max_age;\n    for (absl::string_view token : Envoy::StringUtil::splitToken(cache_control, \",\")) {\n      absl::string_view::size_type equals = token.find('=');\n      if (equals != absl::string_view::npos &&\n          Envoy::StringUtil::trim(token.substr(0, equals)) == \"max-age\") {\n        max_age = Envoy::StringUtil::trim(token.substr(equals + 1));\n      }\n    }\n    RELEASE_ASSERT(max_age == \"300\", \"\");\n  }\n}\nBENCHMARK(BM_FindTokenValueSearchForEqual);\n\nstatic void BM_FindTokenValueNoSplit(benchmark::State& state) {\n  for (auto _ : state) {\n    absl::string_view cache_control(CacheControl, CacheControlLength);\n    absl::string_view max_age;\n    for (absl::string_view token; nextToken(cache_control, ',', true, &token);) {\n      absl::string_view name;\n      if (nextToken(token, '=', true, &name) && (name == \"max-age\")) {\n        max_age = Envoy::StringUtil::trim(token);\n      }\n    }\n    RELEASE_ASSERT(max_age == \"300\", \"\");\n  }\n}\nBENCHMARK(BM_FindTokenValueNoSplit);\n\nstatic void BM_RemoveTokensLong(benchmark::State& state) {\n  auto size = state.range(0);\n  std::string input(size, ',');\n  std::vector<std::string> to_remove;\n  StringUtil::CaseUnorderedSet to_remove_set;\n  for (decltype(size) i = 0; i < size; i++) {\n    to_remove.push_back(std::to_string(i));\n  }\n  for (int i = 0; i < size; i++) {\n    if (i & 1) {\n      to_remove_set.insert(to_remove[i]);\n    }\n    input.append(\",\");\n    input.append(to_remove[i]);\n  }\n  for (auto _ : state) {\n    Envoy::StringUtil::removeTokens(input, \",\", to_remove_set, \",\");\n    state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * input.size());\n  }\n}\nBENCHMARK(BM_RemoveTokensLong)->Range(8, 8 << 10);\n\nstatic void BM_IntervalSetInsert17(benchmark::State& state) {\n  for (auto _ : state) {\n    Envoy::IntervalSetImpl<size_t> interval_set;\n    interval_set.insert(7, 10);\n    interval_set.insert(-2, -1);\n    interval_set.insert(22, 23);\n    interval_set.insert(8, 15);\n    interval_set.insert(5, 12);\n    interval_set.insert(3, 3);\n    interval_set.insert(3, 4);\n    interval_set.insert(2, 4);\n    interval_set.insert(3, 6);\n    interval_set.insert(18, 19);\n    interval_set.insert(16, 17);\n    interval_set.insert(19, 20);\n    interval_set.insert(3, 6);\n    interval_set.insert(3, 20);\n    interval_set.insert(3, 22);\n    interval_set.insert(23, 9223372036854775806UL);\n    interval_set.insert(24, 9223372036854775805UL);\n  }\n}\nBENCHMARK(BM_IntervalSetInsert17);\n\nstatic void BM_IntervalSet4ToVector(benchmark::State& state) {\n  Envoy::IntervalSetImpl<size_t> interval_set;\n  interval_set.insert(7, 10);\n  interval_set.insert(-2, -1);\n  interval_set.insert(22, 23);\n  interval_set.insert(8, 15);\n  for (auto _ : state) {\n    benchmark::DoNotOptimize(interval_set.toVector());\n  }\n}\nBENCHMARK(BM_IntervalSet4ToVector);\n\nstatic void BM_IntervalSet50ToVector(benchmark::State& state) {\n  Envoy::IntervalSetImpl<size_t> interval_set;\n  for (size_t i = 0; i < 100; i += 2) {\n    interval_set.insert(i, i + 1);\n  }\n  for (auto _ : state) {\n    benchmark::DoNotOptimize(interval_set.toVector());\n  }\n}\nBENCHMARK(BM_IntervalSet50ToVector);\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/utility_test.cc",
    "content": "#include <chrono>\n#include <cmath>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::ContainerEq;\n#ifdef WIN32\nusing testing::HasSubstr;\nusing testing::Not;\n#endif\n\nnamespace Envoy {\n\nTEST(StringUtil, strtoull) {\n  uint64_t out;\n  const char* rest;\n\n  static const char* test_str = \"12345b\";\n  rest = StringUtil::strtoull(test_str, out);\n  EXPECT_NE(nullptr, rest);\n  EXPECT_EQ('b', *rest);\n  EXPECT_EQ(&test_str[5], rest);\n  EXPECT_EQ(12345U, out);\n\n  EXPECT_EQ(nullptr, StringUtil::strtoull(\"\", out));\n  EXPECT_EQ(nullptr, StringUtil::strtoull(\"b123\", out));\n\n  rest = StringUtil::strtoull(\"123\", out);\n  EXPECT_NE(nullptr, rest);\n  EXPECT_EQ('\\0', *rest);\n  EXPECT_EQ(123U, out);\n\n  EXPECT_NE(nullptr, StringUtil::strtoull(\"  456\", out));\n  EXPECT_EQ(456U, out);\n\n  EXPECT_NE(nullptr, StringUtil::strtoull(\"00789\", out));\n  EXPECT_EQ(789U, out);\n\n  // Hex\n  rest = StringUtil::strtoull(\"0x1234567890abcdefg\", out, 16);\n  EXPECT_NE(nullptr, rest);\n  EXPECT_EQ('g', *rest);\n  EXPECT_EQ(0x1234567890abcdefU, out);\n\n  // Explicit decimal\n  rest = StringUtil::strtoull(\"01234567890A\", out, 10);\n  EXPECT_NE(nullptr, rest);\n  EXPECT_EQ('A', *rest);\n  EXPECT_EQ(1234567890U, out);\n\n  // Octal\n  rest = StringUtil::strtoull(\"012345678\", out, 8);\n  EXPECT_NE(nullptr, rest);\n  EXPECT_EQ('8', *rest);\n  EXPECT_EQ(01234567U, out);\n\n  // Binary\n  rest = StringUtil::strtoull(\"01010101012\", out, 2);\n  EXPECT_NE(nullptr, rest);\n  EXPECT_EQ('2', *rest);\n  EXPECT_EQ(0b101010101U, out);\n\n  // Verify subsequent call to strtoull succeeds after the first one\n  // failed due to errno ERANGE\n  EXPECT_EQ(nullptr, StringUtil::strtoull(\"18446744073709551616\", out));\n  EXPECT_NE(nullptr, StringUtil::strtoull(\"18446744073709551615\", out));\n  EXPECT_EQ(18446744073709551615U, out);\n}\n\nTEST(StringUtil, atoull) {\n  uint64_t out;\n  EXPECT_FALSE(StringUtil::atoull(\"123b\", out));\n  EXPECT_FALSE(StringUtil::atoull(\"\", out));\n  EXPECT_FALSE(StringUtil::atoull(\"b123\", out));\n\n  EXPECT_TRUE(StringUtil::atoull(\"123\", out));\n  EXPECT_EQ(123U, out);\n\n  EXPECT_TRUE(StringUtil::atoull(\"  456\", out));\n  EXPECT_EQ(456U, out);\n\n  EXPECT_TRUE(StringUtil::atoull(\"00789\", out));\n  EXPECT_EQ(789U, out);\n\n  // Verify subsequent call to atoull succeeds after the first one\n  // failed due to errno ERANGE\n  EXPECT_FALSE(StringUtil::atoull(\"18446744073709551616\", out));\n  EXPECT_TRUE(StringUtil::atoull(\"18446744073709551615\", out));\n  EXPECT_EQ(18446744073709551615U, out);\n}\n\nTEST(DateUtil, All) {\n  EXPECT_FALSE(DateUtil::timePointValid(SystemTime()));\n  DangerousDeprecatedTestTime test_time;\n  EXPECT_TRUE(DateUtil::timePointValid(test_time.timeSystem().systemTime()));\n}\n\nTEST(DateUtil, NowToMilliseconds) {\n  Event::SimulatedTimeSystem test_time;\n  const SystemTime time_with_millis(std::chrono::seconds(12345) + std::chrono::milliseconds(67));\n  test_time.setSystemTime(time_with_millis);\n  EXPECT_EQ(12345067, DateUtil::nowToMilliseconds(test_time));\n}\n\nTEST(InputConstMemoryStream, All) {\n  {\n    InputConstMemoryStream istream{nullptr, 0};\n    std::string s;\n    istream >> s;\n    EXPECT_TRUE(s.empty());\n    EXPECT_TRUE(istream.eof());\n  }\n\n  {\n    std::string data{\"123\"};\n    InputConstMemoryStream istream{data.data(), data.size()};\n    int x;\n    istream >> x;\n    EXPECT_EQ(123, x);\n    EXPECT_TRUE(istream.eof());\n  }\n}\n\nTEST(StringUtil, WhitespaceChars) {\n  EXPECT_NE(nullptr, strchr(StringUtil::WhitespaceChars, ' '));\n  EXPECT_NE(nullptr, strchr(StringUtil::WhitespaceChars, '\\t'));\n  EXPECT_NE(nullptr, strchr(StringUtil::WhitespaceChars, '\\f'));\n  EXPECT_NE(nullptr, strchr(StringUtil::WhitespaceChars, '\\v'));\n  EXPECT_NE(nullptr, strchr(StringUtil::WhitespaceChars, '\\n'));\n  EXPECT_NE(nullptr, strchr(StringUtil::WhitespaceChars, '\\r'));\n}\n\nTEST(StringUtil, itoa) {\n  char buf[32];\n  EXPECT_THROW(StringUtil::itoa(buf, 20, 1), std::invalid_argument);\n\n  EXPECT_EQ(1UL, StringUtil::itoa(buf, sizeof(buf), 0));\n  EXPECT_STREQ(\"0\", buf);\n\n  EXPECT_EQ(2UL, StringUtil::itoa(buf, sizeof(buf), 10));\n  EXPECT_STREQ(\"10\", buf);\n\n  EXPECT_EQ(10UL, StringUtil::itoa(buf, sizeof(buf), 1234567890));\n  EXPECT_STREQ(\"1234567890\", buf);\n\n  EXPECT_EQ(20UL, StringUtil::itoa(buf, sizeof(buf), std::numeric_limits<uint64_t>::max()));\n  EXPECT_STREQ(\"18446744073709551615\", buf);\n}\n\nTEST(StringUtil, strlcpy) {\n  {\n    char dest[6];\n    EXPECT_EQ(5U, StringUtil::strlcpy(dest, std::string{\"hello\"}.c_str(), sizeof(dest)));\n    EXPECT_STREQ(\"hello\", dest);\n  }\n\n  {\n    char dest[6];\n    EXPECT_EQ(5U, StringUtil::strlcpy(dest, std::string{\"hello\"}.c_str(), 3));\n    EXPECT_STREQ(\"he\", dest);\n  }\n\n  {\n    char dest[3];\n    EXPECT_EQ(5U, StringUtil::strlcpy(dest, std::string{\"hello\"}.c_str(), sizeof(dest)));\n    EXPECT_STREQ(\"he\", dest);\n  }\n\n  {\n    char dest[3];\n    EXPECT_EQ(0U, StringUtil::strlcpy(dest, std::string{\"\"}.c_str(), sizeof(dest)));\n    EXPECT_STREQ(\"\", dest);\n  }\n\n  {\n    char dest[3] = \"yo\";\n\n    EXPECT_EQ(1U, StringUtil::strlcpy(dest, std::string{\"a\"}.c_str(), sizeof(dest)));\n    EXPECT_STREQ(\"a\", dest);\n\n    EXPECT_EQ(10U, StringUtil::strlcpy(dest, std::string{\"absolutely\"}.c_str(), sizeof(dest)));\n    EXPECT_STREQ(\"ab\", dest);\n  }\n}\n\nTEST(StringUtil, escape) {\n  EXPECT_EQ(StringUtil::escape(\"hello world\"), \"hello world\");\n  EXPECT_EQ(StringUtil::escape(\"hello\\nworld\\n\"), \"hello\\\\nworld\\\\n\");\n  EXPECT_EQ(StringUtil::escape(\"\\t\\nworld\\r\\n\"), \"\\\\t\\\\nworld\\\\r\\\\n\");\n  EXPECT_EQ(StringUtil::escape(\"{\\\"linux\\\": \\\"penguin\\\"}\"), \"{\\\\\\\"linux\\\\\\\": \\\\\\\"penguin\\\\\\\"}\");\n}\n\nTEST(StringUtil, toUpper) {\n  EXPECT_EQ(StringUtil::toUpper(\"\"), \"\");\n  EXPECT_EQ(StringUtil::toUpper(\"a\"), \"A\");\n  EXPECT_EQ(StringUtil::toUpper(\"Ba\"), \"BA\");\n  EXPECT_EQ(StringUtil::toUpper(\"X asdf aAf\"), \"X ASDF AAF\");\n}\n\nTEST(StringUtil, StringViewLtrim) {\n  EXPECT_EQ(\"\", StringUtil::ltrim(\"     \"));\n  EXPECT_EQ(\"hello \\t\\f\\v\\n\\r\", StringUtil::ltrim(\"   hello \\t\\f\\v\\n\\r\"));\n  EXPECT_EQ(\"hello \", StringUtil::ltrim(\"\\t\\f\\v\\n\\r   hello \"));\n  EXPECT_EQ(\"a b \", StringUtil::ltrim(\"\\t\\f\\v\\n\\ra b \"));\n  EXPECT_EQ(\"\", StringUtil::ltrim(\"\\t\\f\\v\\n\\r\"));\n  EXPECT_EQ(\"\", StringUtil::ltrim(\"\"));\n}\n\nTEST(StringUtil, StringViewRtrim) {\n  EXPECT_EQ(\"\", StringUtil::rtrim(\"     \"));\n  EXPECT_EQ(\"\\t\\f\\v\\n\\rhello\", StringUtil::rtrim(\"\\t\\f\\v\\n\\rhello \"));\n  EXPECT_EQ(\"\\t\\f\\v\\n\\r a b\", StringUtil::rtrim(\"\\t\\f\\v\\n\\r a b \\t\\f\\v\\n\\r\"));\n  EXPECT_EQ(\"\", StringUtil::rtrim(\"\\t\\f\\v\\n\\r\"));\n  EXPECT_EQ(\"\", StringUtil::rtrim(\"\"));\n}\n\nTEST(StringUtil, RemoveTrailingCharacters) {\n  EXPECT_EQ(\"\", StringUtil::removeTrailingCharacters(\"......\", '.'));\n  EXPECT_EQ(\"\\t\\f\\v\\n\\rhello \", StringUtil::removeTrailingCharacters(\"\\t\\f\\v\\n\\rhello \", '.'));\n  EXPECT_EQ(\"\\t\\f\\v\\n\\r a b\", StringUtil::removeTrailingCharacters(\"\\t\\f\\v\\n\\r a b.......\", '.'));\n  EXPECT_EQ(\"\", StringUtil::removeTrailingCharacters(\"\", '.'));\n}\n\nTEST(StringUtil, StringViewTrim) {\n  EXPECT_EQ(\"\", StringUtil::trim(\"   \"));\n  EXPECT_EQ(\"hello\", StringUtil::trim(\"\\t\\f\\v\\n\\r  hello   \"));\n  EXPECT_EQ(\"he llo\", StringUtil::trim(\" \\t\\f\\v\\n\\r he llo \\t\\f\\v\\n\\r\"));\n}\n\nTEST(StringUtil, StringViewCaseFindToken) {\n  EXPECT_TRUE(StringUtil::caseFindToken(\"hello; world\", \";\", \"HELLO\"));\n  EXPECT_FALSE(StringUtil::caseFindToken(\"hello; world\", \";\", \"TEST\"));\n  EXPECT_TRUE(StringUtil::caseFindToken(\"heLLo; world\", \";\", \"hello\"));\n  EXPECT_TRUE(StringUtil::caseFindToken(\"hello; world\", \";\", \"hello\"));\n  EXPECT_FALSE(StringUtil::caseFindToken(\"hello; world\", \".\", \"hello\"));\n  EXPECT_TRUE(StringUtil::caseFindToken(\"\", \",\", \"\"));\n  EXPECT_FALSE(StringUtil::caseFindToken(\"\", \"\", \"a\"));\n  EXPECT_TRUE(StringUtil::caseFindToken(\" \", \" \", \"\", true));\n  EXPECT_FALSE(StringUtil::caseFindToken(\" \", \" \", \"\", false));\n  EXPECT_TRUE(StringUtil::caseFindToken(\"A=5\", \".\", \"A=5\"));\n}\n\nTEST(StringUtil, StringViewCropRight) {\n  EXPECT_EQ(\"hello\", StringUtil::cropRight(\"hello; world\\t\\f\\v\\n\\r\", \";\"));\n  EXPECT_EQ(\"foo \", StringUtil::cropRight(\"foo ; ; ; ; ; ; \", \";\"));\n  EXPECT_EQ(\"\", StringUtil::cropRight(\";hello world\\t\\f\\v\\n\\r\", \";\"));\n  EXPECT_EQ(\" hel\", StringUtil::cropRight(\" hello alo\\t\\f\\v\\n\\r\", \"lo\"));\n  EXPECT_EQ(\"\\t\\f\\v\\n\\rhe 1\", StringUtil::cropRight(\"\\t\\f\\v\\n\\rhe 12\\t\\f\\v\\n\\r\", \"2\"));\n  EXPECT_EQ(\"hello\", StringUtil::cropRight(\"hello alo\\t\\f\\v\\n\\r\", \" a\"));\n  EXPECT_EQ(\"hello \", StringUtil::cropRight(\"hello alo\\t\\f\\v\\n\\r\", \"a\"));\n  EXPECT_EQ(\"abcd\", StringUtil::cropRight(\"abcd\", \";\"));\n}\n\nTEST(StringUtil, StringViewCropLeft) {\n  EXPECT_EQ(\" world\\t\\f\\v\\n\\r\", StringUtil::cropLeft(\"hello; world\\t\\f\\v\\n\\r\", \";\"));\n  EXPECT_EQ(\"hello world \", StringUtil::cropLeft(\";hello world \", \";\"));\n  EXPECT_EQ(\"\\t\\f\\v\\n\\ralo\", StringUtil::cropLeft(\"\\t\\f\\v\\n\\rhello\\t\\f\\v\\n\\ralo\", \"lo\"));\n  EXPECT_EQ(\"2\\t\\f\\v\\n\\r\", StringUtil::cropLeft(\"\\t\\f\\v\\n\\rhe 12\\t\\f\\v\\n\\r\", \"1\"));\n  EXPECT_EQ(\"lo\\t\\f\\v\\n\\r\", StringUtil::cropLeft(\"hello alo\\t\\f\\v\\n\\r\", \" a\"));\n  EXPECT_EQ(\" ; ; ; ; \", StringUtil::cropLeft(\"foo ; ; ; ; ; \", \";\"));\n  EXPECT_EQ(\"abcd\", StringUtil::cropLeft(\"abcd\", \";\"));\n  EXPECT_EQ(\"\", StringUtil::cropLeft(\"abcd\", \"abcd\"));\n}\n\nTEST(StringUtil, StringViewFindToken) {\n  EXPECT_TRUE(StringUtil::findToken(\"hello; world\", \";\", \"hello\"));\n  EXPECT_TRUE(StringUtil::findToken(\"abc; type=text\", \";=\", \"text\"));\n  EXPECT_TRUE(StringUtil::findToken(\"abc; type=text\", \";=\", \"abc\"));\n  EXPECT_TRUE(StringUtil::findToken(\"abc; type=text\", \";=\", \"type\"));\n  EXPECT_FALSE(StringUtil::findToken(\"abc; type=text\", \";=\", \" \"));\n  EXPECT_TRUE(StringUtil::findToken(\"abc; type=text\", \";=\", \" type\", false));\n  EXPECT_FALSE(StringUtil::findToken(\"hello; world\", \".\", \"hello\"));\n  EXPECT_TRUE(StringUtil::findToken(\"\", \",\", \"\"));\n  EXPECT_FALSE(StringUtil::findToken(\"\", \"\", \"a\"));\n  EXPECT_TRUE(StringUtil::findToken(\" \", \" \", \"\", true));\n  EXPECT_FALSE(StringUtil::findToken(\" \", \" \", \"\", false));\n  EXPECT_TRUE(StringUtil::findToken(\"A=5\", \".\", \"A=5\"));\n}\n\nTEST(StringUtil, StringViewCaseInsensitiveHash) {\n  EXPECT_EQ(8972312556107145900U, StringUtil::CaseInsensitiveHash()(\"hello world\"));\n}\n\nTEST(StringUtil, StringViewCaseInsensitiveCompare) {\n  EXPECT_TRUE(StringUtil::CaseInsensitiveCompare()(\"hello world\", \"hello world\"));\n  EXPECT_TRUE(StringUtil::CaseInsensitiveCompare()(\"HELLO world\", \"hello world\"));\n  EXPECT_FALSE(StringUtil::CaseInsensitiveCompare()(\"hello!\", \"hello world\"));\n}\n\nTEST(StringUtil, StringViewCaseUnorderedSet) {\n  StringUtil::CaseUnorderedSet words{\"Test\", \"hello\", \"WORLD\", \"Test\"};\n  EXPECT_EQ(3, words.size());\n  EXPECT_EQ(\"Test\", *(words.find(\"test\")));\n  EXPECT_EQ(\"hello\", *(words.find(\"HELLO\")));\n  EXPECT_EQ(\"WORLD\", *(words.find(\"world\")));\n  EXPECT_EQ(words.end(), words.find(\"hello world\"));\n}\n\nTEST(StringUtil, StringViewSplit) {\n  {\n    auto tokens = StringUtil::splitToken(\" one , two , three \", \",\", true);\n    EXPECT_EQ(3, tokens.size());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" one \") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" two \") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" three \") != tokens.end());\n    EXPECT_FALSE(std::find(tokens.begin(), tokens.end(), \"one\") != tokens.end());\n  }\n  {\n    auto tokens = StringUtil::splitToken(\" one , two , three \", \",\");\n    EXPECT_EQ(3, tokens.size());\n    EXPECT_FALSE(std::find(tokens.begin(), tokens.end(), \"one\") != tokens.end());\n    EXPECT_FALSE(std::find(tokens.begin(), tokens.end(), \"two\") != tokens.end());\n    EXPECT_FALSE(std::find(tokens.begin(), tokens.end(), \"three\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" one \") != tokens.end());\n  }\n  {\n    auto tokens = StringUtil::splitToken(\" one ,  , three=five \", \",=\", true);\n    EXPECT_EQ(4, tokens.size());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" one \") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"  \") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" three\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"five \") != tokens.end());\n  }\n  {\n    EXPECT_EQ(std::vector<absl::string_view>{\"hello\"}, StringUtil::splitToken(\",hello\", \",\"));\n    EXPECT_EQ(std::vector<absl::string_view>{}, StringUtil::splitToken(\"\", \",\"));\n    EXPECT_EQ(std::vector<absl::string_view>{\"a\"}, StringUtil::splitToken(\"a\", \",\"));\n    EXPECT_EQ(std::vector<absl::string_view>{\"hello\"}, StringUtil::splitToken(\"hello,\", \",\"));\n    EXPECT_EQ(std::vector<absl::string_view>{\"hello\"}, StringUtil::splitToken(\",hello\", \",\"));\n    EXPECT_EQ(std::vector<absl::string_view>{\"hello\"}, StringUtil::splitToken(\"hello, \", \", \"));\n    EXPECT_EQ(std::vector<absl::string_view>{}, StringUtil::splitToken(\",,\", \",\"));\n\n    EXPECT_THAT(std::vector<absl::string_view>({\"h\", \"e\", \"l\", \"l\", \"o\"}),\n                ContainerEq(StringUtil::splitToken(\"hello\", \"\")));\n    EXPECT_THAT(std::vector<absl::string_view>({\"hello\", \"world\"}),\n                ContainerEq(StringUtil::splitToken(\"hello world\", \" \")));\n    EXPECT_THAT(std::vector<absl::string_view>({\"hello\", \"world\"}),\n                ContainerEq(StringUtil::splitToken(\"hello   world\", \" \")));\n    EXPECT_THAT(std::vector<absl::string_view>({\"\", \"\", \"hello\", \"world\"}),\n                ContainerEq(StringUtil::splitToken(\"  hello world\", \" \", true)));\n    EXPECT_THAT(std::vector<absl::string_view>({\"hello\", \"world\", \"\"}),\n                ContainerEq(StringUtil::splitToken(\"hello world \", \" \", true)));\n    EXPECT_THAT(std::vector<absl::string_view>({\"hello\", \"world\"}),\n                ContainerEq(StringUtil::splitToken(\"hello world\", \" \", true)));\n  }\n  {\n    auto tokens = StringUtil::splitToken(\" one , two , three \", \",\", true, true);\n    EXPECT_EQ(3, tokens.size());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"one\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"two\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"three\") != tokens.end());\n  }\n  {\n    auto tokens = StringUtil::splitToken(\" one ,  , three=five \", \",=\", true, true);\n    EXPECT_EQ(4, tokens.size());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"one\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"three\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"five\") != tokens.end());\n  }\n  {\n    auto tokens = StringUtil::splitToken(\" one ,  , three=five \", \",=\", false, true);\n    EXPECT_EQ(3, tokens.size());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"one\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"three\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"five\") != tokens.end());\n  }\n  {\n    auto tokens = StringUtil::splitToken(\" one ,  , three=five \", \",=\", false);\n    EXPECT_EQ(4, tokens.size());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" one \") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"  \") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \" three\") != tokens.end());\n    EXPECT_TRUE(std::find(tokens.begin(), tokens.end(), \"five \") != tokens.end());\n  }\n}\n\nTEST(StringUtil, StringViewRemoveTokens) {\n  // Basic cases.\n  EXPECT_EQ(StringUtil::removeTokens(\"\", \",\", {\"two\"}, \",\"), \"\");\n  EXPECT_EQ(StringUtil::removeTokens(\"one\", \",\", {\"two\"}, \",\"), \"one\");\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two \", \",\", {\"two\"}, \",\"), \"one\");\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two \", \",\", {\"two\", \"one\"}, \",\"), \"\");\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two \", \",\", {\"one\"}, \",\"), \"two\");\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two,three \", \",\", {\"two\"}, \",\"), \"one,three\");\n  EXPECT_EQ(StringUtil::removeTokens(\" one , two , three \", \",\", {\"two\"}, \",\"), \"one,three\");\n  EXPECT_EQ(StringUtil::removeTokens(\" one , two , three \", \",\", {\"three\"}, \",\"), \"one,two\");\n  EXPECT_EQ(StringUtil::removeTokens(\" one , two , three \", \",\", {\"three\"}, \", \"), \"one, two\");\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two,three\", \",\", {\"two\", \"three\"}, \",\"), \"one\");\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two,three,four\", \",\", {\"two\", \"three\"}, \",\"), \"one,four\");\n  // Ignore case.\n  EXPECT_EQ(StringUtil::removeTokens(\"One,Two,Three,Four\", \",\", {\"two\", \"three\"}, \",\"), \"One,Four\");\n  // Longer joiner.\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two,three,four\", \",\", {\"two\", \"three\"}, \" , \"),\n            \"one , four\");\n  // Delimiters.\n  EXPECT_EQ(StringUtil::removeTokens(\"one,two;three \", \",;\", {\"two\"}, \",\"), \"one,three\");\n}\n\nTEST(StringUtil, removeCharacters) {\n  IntervalSetImpl<size_t> removals;\n  removals.insert(3, 5);\n  removals.insert(7, 10);\n  EXPECT_EQ(\"01256\", StringUtil::removeCharacters(\"0123456789\", removals));\n  removals.insert(0, 1);\n  EXPECT_EQ(\"1256x\", StringUtil::removeCharacters(\"0123456789x\", removals));\n}\n\nTEST(AccessLogDateTimeFormatter, fromTime) {\n  SystemTime time1(std::chrono::seconds(1522796769));\n  EXPECT_EQ(\"2018-04-03T23:06:09.000Z\", AccessLogDateTimeFormatter::fromTime(time1));\n  SystemTime time2(std::chrono::milliseconds(1522796769123));\n  EXPECT_EQ(\"2018-04-03T23:06:09.123Z\", AccessLogDateTimeFormatter::fromTime(time2));\n  SystemTime time3(std::chrono::milliseconds(1522796769999));\n  EXPECT_EQ(\"2018-04-03T23:06:09.999Z\", AccessLogDateTimeFormatter::fromTime(time3));\n  SystemTime time4(std::chrono::milliseconds(1522796768999));\n  EXPECT_EQ(\"2018-04-03T23:06:08.999Z\", AccessLogDateTimeFormatter::fromTime(time4));\n}\n\nTEST(Primes, isPrime) {\n  EXPECT_TRUE(Primes::isPrime(67));\n  EXPECT_FALSE(Primes::isPrime(49));\n  EXPECT_FALSE(Primes::isPrime(102));\n  EXPECT_TRUE(Primes::isPrime(103));\n}\n\nTEST(Primes, findPrimeLargerThan) {\n  EXPECT_EQ(67, Primes::findPrimeLargerThan(62));\n  EXPECT_EQ(107, Primes::findPrimeLargerThan(103));\n  EXPECT_EQ(10007, Primes::findPrimeLargerThan(9991));\n}\n\nclass WeightedClusterEntry {\npublic:\n  WeightedClusterEntry(const std::string name, const uint64_t weight)\n      : name_(name), weight_(weight) {}\n\n  const std::string& clusterName() const { return name_; }\n  uint64_t clusterWeight() const { return weight_; }\n\nprivate:\n  const std::string name_;\n  const uint64_t weight_;\n};\nusing WeightedClusterEntrySharedPtr = std::shared_ptr<WeightedClusterEntry>;\n\nTEST(WeightedClusterUtil, pickCluster) {\n  std::vector<WeightedClusterEntrySharedPtr> clusters;\n\n  std::unique_ptr<WeightedClusterEntry> cluster1(new WeightedClusterEntry(\"cluster1\", 10));\n  clusters.emplace_back(std::move(cluster1));\n\n  std::unique_ptr<WeightedClusterEntry> cluster2(new WeightedClusterEntry(\"cluster2\", 90));\n  clusters.emplace_back(std::move(cluster2));\n\n  EXPECT_EQ(\"cluster1\", WeightedClusterUtil::pickCluster(clusters, 100, 5, false)->clusterName());\n  EXPECT_EQ(\"cluster2\", WeightedClusterUtil::pickCluster(clusters, 80, 79, true)->clusterName());\n}\n\nstatic std::string intervalSetIntToString(const IntervalSetImpl<int>& interval_set) {\n  std::string out;\n  const char* prefix = \"\";\n  for (const auto& interval : interval_set.toVector()) {\n    absl::StrAppend(&out, prefix, \"[\", interval.first, \", \", interval.second, \")\");\n    prefix = \", \";\n  }\n  return out;\n}\n\nTEST(IntervalSet, testIntervalAccumulation) {\n  IntervalSetImpl<int> interval_set;\n  auto insert_and_print = [&interval_set](int left, int right) -> std::string {\n    interval_set.insert(left, right);\n    return intervalSetIntToString(interval_set);\n  };\n  EXPECT_EQ(\"[7, 10)\", insert_and_print(7, 10));\n  EXPECT_EQ(\"[-2, -1), [7, 10)\", insert_and_print(-2, -1));           // disjoint left\n  EXPECT_EQ(\"[-2, -1), [7, 10), [22, 23)\", insert_and_print(22, 23)); // disjoint right\n  EXPECT_EQ(\"[-2, -1), [7, 15), [22, 23)\", insert_and_print(8, 15));  // right overhang\n  EXPECT_EQ(\"[-2, -1), [5, 15), [22, 23)\", insert_and_print(5, 12));  // left overhang\n  EXPECT_EQ(\"[-2, -1), [5, 15), [22, 23)\", insert_and_print(3, 3));   // empty; no change\n  EXPECT_EQ(\"[-2, -1), [3, 4), [5, 15), [22, 23)\",                    // single-element add\n            insert_and_print(3, 4));\n  EXPECT_EQ(\"[-2, -1), [2, 4), [5, 15), [22, 23)\", // disjoint in middle\n            insert_and_print(2, 4));\n  EXPECT_EQ(\"[-2, -1), [2, 15), [22, 23)\", insert_and_print(3, 6)); // merge two intervals\n  EXPECT_EQ(\"[-2, -1), [2, 15), [18, 19), [22, 23)\",                // right disjoint\n            insert_and_print(18, 19));\n  EXPECT_EQ(\"[-2, -1), [2, 15), [16, 17), [18, 19), [22, 23)\", // middle disjoint\n            insert_and_print(16, 17));\n  EXPECT_EQ(\"[-2, -1), [2, 15), [16, 17), [18, 20), [22, 23)\", // merge [18,19) and [19,20)\n            insert_and_print(19, 20));\n  EXPECT_EQ(\"[-2, -1), [2, 15), [16, 17), [18, 20), [22, 23)\", // fully enclosed; no effect\n            insert_and_print(3, 6));\n  EXPECT_EQ(\"[-2, -1), [2, 20), [22, 23)\", insert_and_print(3, 20)); // merge across 3 intervals\n  EXPECT_EQ(\"[-2, -1), [2, 23)\", insert_and_print(3, 22));           // merge all via overlap\n  EXPECT_EQ(\"[-2, 23)\", insert_and_print(-2, 23));                   // merge all covering exact\n  EXPECT_EQ(\"[-3, 24)\", insert_and_print(-3, 24)); // merge all with overhand on both sides\n\n  interval_set.clear();\n  EXPECT_EQ(\"\", insert_and_print(10, 10));\n  EXPECT_EQ(\"[25, 26)\", insert_and_print(25, 26));\n  EXPECT_EQ(\"[5, 11), [25, 26)\", insert_and_print(5, 11));\n}\n\nTEST(IntervalSet, testIntervalTargeted) {\n  auto test = [](int left, int right) -> std::string {\n    IntervalSetImpl<int> interval_set;\n    interval_set.insert(15, 20);\n    interval_set.insert(25, 30);\n    interval_set.insert(35, 40);\n    interval_set.insert(left, right);\n    return intervalSetIntToString(interval_set);\n  };\n\n  // There are 3 spans, and there are 19 potentially interesting slots\n  // for each coordinate, with the constraint that each left < right.\n  // We'll do one test that left==right has no effect first. So there's\n  // about 19^2/2 = 180 combinations, which is a lot but not too bad. Of\n  // course many of these are essentially the same case but it's worth making\n  // sure there's no problems in corner cases.\n  //\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:  x   x   x   xxx  x  x   x  xxx  x  x   x  xxx x\n\n  // First the corner-case of an empty insertion, leaving the input unchanged.\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(2, 2));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:  [)\n  EXPECT_EQ(\"[2, 3), [15, 20), [25, 30), [35, 40)\", test(2, 3));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:  [   )   )   )))  )  )   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[2, 20), [25, 30), [35, 40)\", test(2, 15));\n  EXPECT_EQ(\"[2, 20), [25, 30), [35, 40)\", test(2, 17));\n  EXPECT_EQ(\"[2, 20), [25, 30), [35, 40)\", test(2, 19));\n  EXPECT_EQ(\"[2, 20), [25, 30), [35, 40)\", test(2, 20));\n  EXPECT_EQ(\"[2, 21), [25, 30), [35, 40)\", test(2, 21));\n  EXPECT_EQ(\"[2, 23), [25, 30), [35, 40)\", test(2, 23));\n  EXPECT_EQ(\"[2, 30), [35, 40)\", test(2, 25));\n  EXPECT_EQ(\"[2, 30), [35, 40)\", test(2, 27));\n  EXPECT_EQ(\"[2, 30), [35, 40)\", test(2, 29));\n  EXPECT_EQ(\"[2, 30), [35, 40)\", test(2, 30));\n  EXPECT_EQ(\"[2, 31), [35, 40)\", test(2, 31));\n  EXPECT_EQ(\"[2, 33), [35, 40)\", test(2, 33));\n  EXPECT_EQ(\"[2, 40)\", test(2, 35));\n  EXPECT_EQ(\"[2, 40)\", test(2, 37));\n  EXPECT_EQ(\"[2, 40)\", test(2, 39));\n  EXPECT_EQ(\"[2, 40)\", test(2, 40));\n  EXPECT_EQ(\"[2, 41)\", test(2, 41));\n  EXPECT_EQ(\"[2, 43)\", test(2, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:      [   )   )))  )  )   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(15, 17));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(15, 19));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(15, 20));\n  EXPECT_EQ(\"[15, 21), [25, 30), [35, 40)\", test(15, 21));\n  EXPECT_EQ(\"[15, 23), [25, 30), [35, 40)\", test(15, 23));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(15, 25));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(15, 27));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(15, 29));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(15, 30));\n  EXPECT_EQ(\"[15, 31), [35, 40)\", test(15, 31));\n  EXPECT_EQ(\"[15, 33), [35, 40)\", test(15, 33));\n  EXPECT_EQ(\"[15, 40)\", test(15, 35));\n  EXPECT_EQ(\"[15, 40)\", test(15, 37));\n  EXPECT_EQ(\"[15, 40)\", test(15, 39));\n  EXPECT_EQ(\"[15, 40)\", test(15, 40));\n  EXPECT_EQ(\"[15, 41)\", test(15, 41));\n  EXPECT_EQ(\"[15, 43)\", test(15, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:          [   )))  )  )   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(17, 19));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(17, 20));\n  EXPECT_EQ(\"[15, 21), [25, 30), [35, 40)\", test(17, 21));\n  EXPECT_EQ(\"[15, 23), [25, 30), [35, 40)\", test(17, 23));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(17, 25));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(17, 27));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(17, 29));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(17, 30));\n  EXPECT_EQ(\"[15, 31), [35, 40)\", test(17, 31));\n  EXPECT_EQ(\"[15, 33), [35, 40)\", test(17, 33));\n  EXPECT_EQ(\"[15, 40)\", test(17, 35));\n  EXPECT_EQ(\"[15, 40)\", test(17, 37));\n  EXPECT_EQ(\"[15, 40)\", test(17, 39));\n  EXPECT_EQ(\"[15, 40)\", test(17, 40));\n  EXPECT_EQ(\"[15, 41)\", test(17, 41));\n  EXPECT_EQ(\"[15, 43)\", test(17, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:              [))  )  )   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(19, 20));\n  EXPECT_EQ(\"[15, 21), [25, 30), [35, 40)\", test(19, 21));\n  EXPECT_EQ(\"[15, 23), [25, 30), [35, 40)\", test(19, 23));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(19, 25));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(19, 27));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(19, 29));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(19, 30));\n  EXPECT_EQ(\"[15, 31), [35, 40)\", test(19, 31));\n  EXPECT_EQ(\"[15, 33), [35, 40)\", test(19, 33));\n  EXPECT_EQ(\"[15, 40)\", test(19, 35));\n  EXPECT_EQ(\"[15, 40)\", test(19, 37));\n  EXPECT_EQ(\"[15, 40)\", test(19, 39));\n  EXPECT_EQ(\"[15, 40)\", test(19, 40));\n  EXPECT_EQ(\"[15, 41)\", test(19, 41));\n  EXPECT_EQ(\"[15, 43)\", test(19, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:               [)  )  )   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 21), [25, 30), [35, 40)\", test(20, 21));\n  EXPECT_EQ(\"[15, 23), [25, 30), [35, 40)\", test(20, 23));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(20, 25));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(20, 27));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(20, 29));\n  EXPECT_EQ(\"[15, 30), [35, 40)\", test(20, 30));\n  EXPECT_EQ(\"[15, 31), [35, 40)\", test(20, 31));\n  EXPECT_EQ(\"[15, 33), [35, 40)\", test(20, 33));\n  EXPECT_EQ(\"[15, 40)\", test(20, 35));\n  EXPECT_EQ(\"[15, 40)\", test(20, 37));\n  EXPECT_EQ(\"[15, 40)\", test(20, 39));\n  EXPECT_EQ(\"[15, 40)\", test(20, 40));\n  EXPECT_EQ(\"[15, 41)\", test(20, 41));\n  EXPECT_EQ(\"[15, 43)\", test(20, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                [  )  )   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [21, 23), [25, 30), [35, 40)\", test(21, 23));\n  EXPECT_EQ(\"[15, 20), [21, 30), [35, 40)\", test(21, 25));\n  EXPECT_EQ(\"[15, 20), [21, 30), [35, 40)\", test(21, 27));\n  EXPECT_EQ(\"[15, 20), [21, 30), [35, 40)\", test(21, 29));\n  EXPECT_EQ(\"[15, 20), [21, 30), [35, 40)\", test(21, 30));\n  EXPECT_EQ(\"[15, 20), [21, 31), [35, 40)\", test(21, 31));\n  EXPECT_EQ(\"[15, 20), [21, 33), [35, 40)\", test(21, 33));\n  EXPECT_EQ(\"[15, 20), [21, 40)\", test(21, 35));\n  EXPECT_EQ(\"[15, 20), [21, 40)\", test(21, 37));\n  EXPECT_EQ(\"[15, 20), [21, 40)\", test(21, 39));\n  EXPECT_EQ(\"[15, 20), [21, 40)\", test(21, 40));\n  EXPECT_EQ(\"[15, 20), [21, 41)\", test(21, 41));\n  EXPECT_EQ(\"[15, 20), [21, 43)\", test(21, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                   [  )   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [23, 30), [35, 40)\", test(23, 25));\n  EXPECT_EQ(\"[15, 20), [23, 30), [35, 40)\", test(23, 27));\n  EXPECT_EQ(\"[15, 20), [23, 30), [35, 40)\", test(23, 29));\n  EXPECT_EQ(\"[15, 20), [23, 30), [35, 40)\", test(23, 30));\n  EXPECT_EQ(\"[15, 20), [23, 31), [35, 40)\", test(23, 31));\n  EXPECT_EQ(\"[15, 20), [23, 33), [35, 40)\", test(23, 33));\n  EXPECT_EQ(\"[15, 20), [23, 40)\", test(23, 35));\n  EXPECT_EQ(\"[15, 20), [23, 40)\", test(23, 37));\n  EXPECT_EQ(\"[15, 20), [23, 40)\", test(23, 39));\n  EXPECT_EQ(\"[15, 20), [23, 40)\", test(23, 40));\n  EXPECT_EQ(\"[15, 20), [23, 41)\", test(23, 41));\n  EXPECT_EQ(\"[15, 20), [23, 43)\", test(23, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                      [   )  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(25, 27));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(25, 29));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(25, 30));\n  EXPECT_EQ(\"[15, 20), [25, 31), [35, 40)\", test(25, 31));\n  EXPECT_EQ(\"[15, 20), [25, 33), [35, 40)\", test(25, 33));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(25, 35));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(25, 37));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(25, 39));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(25, 40));\n  EXPECT_EQ(\"[15, 20), [25, 41)\", test(25, 41));\n  EXPECT_EQ(\"[15, 20), [25, 43)\", test(25, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                          [  )))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(27, 29));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(27, 30));\n  EXPECT_EQ(\"[15, 20), [25, 31), [35, 40)\", test(27, 31));\n  EXPECT_EQ(\"[15, 20), [25, 33), [35, 40)\", test(27, 33));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(27, 35));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(27, 37));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(27, 39));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(27, 40));\n  EXPECT_EQ(\"[15, 20), [25, 41)\", test(27, 41));\n  EXPECT_EQ(\"[15, 20), [25, 43)\", test(27, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                             [))  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(29, 30));\n  EXPECT_EQ(\"[15, 20), [25, 31), [35, 40)\", test(29, 31));\n  EXPECT_EQ(\"[15, 20), [25, 33), [35, 40)\", test(29, 33));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(29, 35));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(29, 37));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(29, 39));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(29, 40));\n  EXPECT_EQ(\"[15, 20), [25, 41)\", test(29, 41));\n  EXPECT_EQ(\"[15, 20), [25, 43)\", test(29, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                              [)  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 31), [35, 40)\", test(30, 31));\n  EXPECT_EQ(\"[15, 20), [25, 33), [35, 40)\", test(30, 33));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(30, 35));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(30, 37));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(30, 39));\n  EXPECT_EQ(\"[15, 20), [25, 40)\", test(30, 40));\n  EXPECT_EQ(\"[15, 20), [25, 41)\", test(30, 41));\n  EXPECT_EQ(\"[15, 20), [25, 43)\", test(30, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                               [  )  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [31, 33), [35, 40)\", test(31, 33));\n  EXPECT_EQ(\"[15, 20), [25, 30), [31, 40)\", test(31, 35));\n  EXPECT_EQ(\"[15, 20), [25, 30), [31, 40)\", test(31, 37));\n  EXPECT_EQ(\"[15, 20), [25, 30), [31, 40)\", test(31, 39));\n  EXPECT_EQ(\"[15, 20), [25, 30), [31, 40)\", test(31, 40));\n  EXPECT_EQ(\"[15, 20), [25, 30), [31, 41)\", test(31, 41));\n  EXPECT_EQ(\"[15, 20), [25, 30), [31, 43)\", test(31, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                                  [  )   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [33, 40)\", test(33, 35));\n  EXPECT_EQ(\"[15, 20), [25, 30), [33, 40)\", test(33, 37));\n  EXPECT_EQ(\"[15, 20), [25, 30), [33, 40)\", test(33, 39));\n  EXPECT_EQ(\"[15, 20), [25, 30), [33, 40)\", test(33, 40));\n  EXPECT_EQ(\"[15, 20), [25, 30), [33, 41)\", test(33, 41));\n  EXPECT_EQ(\"[15, 20), [25, 30), [33, 43)\", test(33, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                                     [   )  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(35, 37));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(35, 39));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(35, 40));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 41)\", test(35, 41));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 43)\", test(35, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                                         [  ))) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(37, 39));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(37, 40));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 41)\", test(37, 41));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 43)\", test(37, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                                            [)) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40)\", test(39, 40));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 41)\", test(39, 41));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 43)\", test(39, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                                             [) )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 41)\", test(40, 41));\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 43)\", test(40, 43));\n\n  // initial setup:         [15    20)      [25   30)      [35   35)\n  // insertion points:                                              [ )\n  EXPECT_EQ(\"[15, 20), [25, 30), [35, 40), [41, 43)\", test(41, 43));\n}\n\nTEST(WelfordStandardDeviation, AllEntriesTheSame) {\n  WelfordStandardDeviation wsd;\n  wsd.update(10);\n  wsd.update(10);\n  wsd.update(10);\n  EXPECT_EQ(10, wsd.mean());\n  EXPECT_EQ(0, wsd.computeStandardDeviation());\n}\n\nTEST(WelfordStandardDeviation, SmallVariance) {\n  WelfordStandardDeviation wsd;\n  wsd.update(10);\n  wsd.update(10);\n  wsd.update(10);\n  wsd.update(9);\n  wsd.update(11);\n  EXPECT_LT(0.5, wsd.computeStandardDeviation());\n  EXPECT_GT(1.0, wsd.computeStandardDeviation());\n  EXPECT_EQ(10, wsd.mean());\n}\n\nTEST(WelfordStandardDeviation, HugeVariance) {\n  WelfordStandardDeviation wsd;\n  wsd.update(20);\n  wsd.update(2000);\n  wsd.update(200000);\n  wsd.update(20000000);\n  EXPECT_EQ(5050505, wsd.mean());\n  EXPECT_LT(1000, wsd.computeStandardDeviation());\n}\n\nTEST(WelfordStandardDeviation, InsufficientData) {\n  WelfordStandardDeviation wsd;\n  wsd.update(10);\n  EXPECT_EQ(10, wsd.mean());\n  EXPECT_TRUE(std::isnan(wsd.computeStandardDeviation()));\n}\n\nTEST(DateFormatter, FromTime) {\n  const SystemTime time1(std::chrono::seconds(1522796769));\n  EXPECT_EQ(\"2018-04-03T23:06:09.000Z\", DateFormatter(\"%Y-%m-%dT%H:%M:%S.000Z\").fromTime(time1));\n  EXPECT_EQ(\"aaa23\", DateFormatter(std::string(3, 'a') + \"%H\").fromTime(time1));\n  const SystemTime time2(std::chrono::seconds(0));\n  EXPECT_EQ(\"1970-01-01T00:00:00.000Z\", DateFormatter(\"%Y-%m-%dT%H:%M:%S.000Z\").fromTime(time2));\n  EXPECT_EQ(\"aaa00\", DateFormatter(std::string(3, 'a') + \"%H\").fromTime(time2));\n}\n\n// Check the time complexity. Make sure DateFormatter can finish parsing long messy string without\n// crashing/freezing. This should pass in 0-2 seconds if O(n). Finish in 30-120 seconds if O(n^2)\nTEST(DateFormatter, ParseLongString) {\n  std::string input;\n  std::string expected_output;\n  int num_duplicates = 400;\n  std::string duplicate_input = \"%%1f %1f, %2f, %3f, %4f, \";\n  std::string duplicate_output = \"%1 1, 14, 142, 1420, \";\n  for (int i = 0; i < num_duplicates; i++) {\n    absl::StrAppend(&input, duplicate_input, \"(\");\n    absl::StrAppend(&expected_output, duplicate_output, \"(\");\n  }\n  absl::StrAppend(&input, duplicate_input);\n  absl::StrAppend(&expected_output, duplicate_output);\n\n  const SystemTime time1(std::chrono::seconds(1522796769) + std::chrono::milliseconds(142));\n  std::string output = DateFormatter(input).fromTime(time1);\n  EXPECT_EQ(expected_output, output);\n}\n\n// Verify that two DateFormatter patterns with the same ??? patterns but\n// different format strings don't false share cache entries. This is a\n// regression test for when they did.\nTEST(DateFormatter, FromTimeSameWildcard) {\n  const SystemTime time1(std::chrono::seconds(1522796769) + std::chrono::milliseconds(142));\n  EXPECT_EQ(\"2018-04-03T23:06:09.000Z142\",\n            DateFormatter(\"%Y-%m-%dT%H:%M:%S.000Z%3f\").fromTime(time1));\n  EXPECT_EQ(\"2018-04-03T23:06:09.000Z114\",\n            DateFormatter(\"%Y-%m-%dT%H:%M:%S.000Z%1f%2f\").fromTime(time1));\n}\n\nTEST(TrieLookupTable, AddItems) {\n  TrieLookupTable<const char*> trie;\n  const char* cstr_a = \"a\";\n  const char* cstr_b = \"b\";\n  const char* cstr_c = \"c\";\n\n  EXPECT_TRUE(trie.add(\"foo\", cstr_a));\n  EXPECT_TRUE(trie.add(\"bar\", cstr_b));\n  EXPECT_EQ(cstr_a, trie.find(\"foo\"));\n  EXPECT_EQ(cstr_b, trie.find(\"bar\"));\n\n  // overwrite_existing = false\n  EXPECT_FALSE(trie.add(\"foo\", cstr_c, false));\n  EXPECT_EQ(cstr_a, trie.find(\"foo\"));\n\n  // overwrite_existing = true\n  EXPECT_TRUE(trie.add(\"foo\", cstr_c));\n  EXPECT_EQ(cstr_c, trie.find(\"foo\"));\n}\n\nTEST(TrieLookupTable, LongestPrefix) {\n  TrieLookupTable<const char*> trie;\n  const char* cstr_a = \"a\";\n  const char* cstr_b = \"b\";\n  const char* cstr_c = \"c\";\n\n  EXPECT_TRUE(trie.add(\"foo\", cstr_a));\n  EXPECT_TRUE(trie.add(\"bar\", cstr_b));\n  EXPECT_TRUE(trie.add(\"baro\", cstr_c));\n\n  EXPECT_EQ(cstr_a, trie.find(\"foo\"));\n  EXPECT_EQ(cstr_a, trie.findLongestPrefix(\"foo\"));\n  EXPECT_EQ(cstr_a, trie.findLongestPrefix(\"foosball\"));\n\n  EXPECT_EQ(cstr_b, trie.find(\"bar\"));\n  EXPECT_EQ(cstr_b, trie.findLongestPrefix(\"bar\"));\n  EXPECT_EQ(cstr_b, trie.findLongestPrefix(\"baritone\"));\n  EXPECT_EQ(cstr_c, trie.findLongestPrefix(\"barometer\"));\n\n  EXPECT_EQ(nullptr, trie.find(\"toto\"));\n  EXPECT_EQ(nullptr, trie.findLongestPrefix(\"toto\"));\n  EXPECT_EQ(nullptr, trie.find(\" \"));\n  EXPECT_EQ(nullptr, trie.findLongestPrefix(\" \"));\n}\n\nTEST(InlineStorageTest, InlineString) {\n  InlineStringPtr hello = InlineString::create(\"Hello, world!\");\n  EXPECT_EQ(\"Hello, world!\", hello->toStringView());\n  EXPECT_EQ(\"Hello, world!\", hello->toString());\n}\n\n#ifdef WIN32\nTEST(ErrorDetailsTest, WindowsFormatMessage) {\n  // winsock2 error\n  EXPECT_NE(errorDetails(SOCKET_ERROR_AGAIN), \"\");\n  EXPECT_THAT(errorDetails(SOCKET_ERROR_AGAIN), Not(HasSubstr(\"\\r\\n\")));\n  EXPECT_NE(errorDetails(SOCKET_ERROR_AGAIN), \"Unknown error\");\n\n  // winsock2 error with a long message\n  EXPECT_NE(errorDetails(SOCKET_ERROR_MSG_SIZE), \"\");\n  EXPECT_THAT(errorDetails(SOCKET_ERROR_MSG_SIZE), Not(HasSubstr(\"\\r\\n\")));\n  EXPECT_NE(errorDetails(SOCKET_ERROR_MSG_SIZE), \"Unknown error\");\n\n  // regular Windows error\n  EXPECT_NE(errorDetails(ERROR_FILE_NOT_FOUND), \"\");\n  EXPECT_THAT(errorDetails(ERROR_FILE_NOT_FOUND), Not(HasSubstr(\"\\r\\n\")));\n  EXPECT_NE(errorDetails(ERROR_FILE_NOT_FOUND), \"Unknown error\");\n\n  // invalid error code\n  EXPECT_EQ(errorDetails(99999), \"Unknown error\");\n}\n#endif\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/common/version_test.cc",
    "content": "#include \"common/version/version.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\n// Class for accessing private members of the VersionInfo class.\nclass VersionInfoTestPeer {\npublic:\n  static const std::string& buildType() { return VersionInfo::buildType(); }\n  static const std::string& sslVersion() { return VersionInfo::sslVersion(); }\n  static envoy::config::core::v3::BuildVersion makeBuildVersion(const char* version) {\n    return VersionInfo::makeBuildVersion(version);\n  }\n};\n\nTEST(VersionTest, BuildVersion) {\n  auto build_version = VersionInfo::buildVersion();\n  std::string version_string =\n      absl::StrCat(build_version.version().major_number(), \".\",\n                   build_version.version().minor_number(), \".\", build_version.version().patch());\n\n  const auto& fields = build_version.metadata().fields();\n  if (fields.find(BuildVersionMetadataKeys::get().BuildLabel) != fields.end()) {\n    absl::StrAppend(&version_string, \"-\",\n                    fields.at(BuildVersionMetadataKeys::get().BuildLabel).string_value());\n  }\n  EXPECT_EQ(BUILD_VERSION_NUMBER, version_string);\n  EXPECT_EQ(VersionInfo::revision(),\n            fields.at(BuildVersionMetadataKeys::get().RevisionSHA).string_value());\n  EXPECT_EQ(VersionInfo::revisionStatus(),\n            fields.at(BuildVersionMetadataKeys::get().RevisionStatus).string_value());\n  EXPECT_EQ(VersionInfoTestPeer::buildType(),\n            fields.at(BuildVersionMetadataKeys::get().BuildType).string_value());\n  EXPECT_EQ(VersionInfoTestPeer::sslVersion(),\n            fields.at(BuildVersionMetadataKeys::get().SslVersion).string_value());\n}\n\nTEST(VersionTest, MakeBuildVersionWithLabel) {\n  auto build_version = VersionInfoTestPeer::makeBuildVersion(\"1.2.3-foo-bar\");\n  EXPECT_EQ(1, build_version.version().major_number());\n  EXPECT_EQ(2, build_version.version().minor_number());\n  EXPECT_EQ(3, build_version.version().patch());\n  const auto& fields = build_version.metadata().fields();\n  EXPECT_GE(fields.size(), 1);\n  EXPECT_EQ(\"foo-bar\", fields.at(BuildVersionMetadataKeys::get().BuildLabel).string_value());\n}\n\nTEST(VersionTest, MakeBuildVersionWithoutLabel) {\n  auto build_version = VersionInfoTestPeer::makeBuildVersion(\"1.2.3\");\n  EXPECT_EQ(1, build_version.version().major_number());\n  EXPECT_EQ(2, build_version.version().minor_number());\n  EXPECT_EQ(3, build_version.version().patch());\n  const auto& fields = build_version.metadata().fields();\n  EXPECT_EQ(fields.find(BuildVersionMetadataKeys::get().BuildLabel), fields.end());\n  // Other metadata should still be present\n  EXPECT_GE(fields.size(), 1);\n}\n\nTEST(VersionTest, MakeBadBuildVersion) {\n  auto build_version = VersionInfoTestPeer::makeBuildVersion(\"1.foo.3-bar\");\n  EXPECT_EQ(0, build_version.version().major_number());\n  EXPECT_EQ(0, build_version.version().minor_number());\n  EXPECT_EQ(0, build_version.version().patch());\n  const auto& fields = build_version.metadata().fields();\n  EXPECT_EQ(fields.find(BuildVersionMetadataKeys::get().BuildLabel), fields.end());\n  // Other metadata should still be present\n  EXPECT_GE(fields.size(), 1);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"api_shadow_test\",\n    srcs = [\"api_shadow_test.cc\"],\n    deps = [\"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\"],\n)\n\nenvoy_cc_test(\n    name = \"api_type_oracle_test\",\n    srcs = [\"api_type_oracle_test.cc\"],\n    deps = [\n        \"//source/common/config:api_type_oracle_lib\",\n        \"@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"decoded_resource_impl_test\",\n    srcs = [\"decoded_resource_impl_test.cc\"],\n    deps = [\n        \"//source/common/config:decoded_resource_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"delta_subscription_impl_test\",\n    srcs = [\"delta_subscription_impl_test.cc\"],\n    deps = [\n        \":delta_subscription_test_harness\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:grpc_subscription_lib\",\n        \"//source/common/config:new_grpc_mux_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"delta_subscription_state_test\",\n    srcs = [\"delta_subscription_state_test.cc\"],\n    deps = [\n        \"//source/common/config:delta_subscription_state_lib\",\n        \"//source/common/config:grpc_subscription_lib\",\n        \"//source/common/config:new_grpc_mux_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"filesystem_subscription_impl_test\",\n    srcs = [\"filesystem_subscription_impl_test.cc\"],\n    deps = [\n        \":filesystem_subscription_test_harness\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"filesystem_subscription_test_harness\",\n    srcs = [\"filesystem_subscription_test_harness.h\"],\n    deps = [\n        \":subscription_test_harness\",\n        \"//source/common/config:filesystem_subscription_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"grpc_mux_impl_test\",\n    srcs = [\"grpc_mux_impl_test.cc\"],\n    deps = [\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:grpc_mux_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"new_grpc_mux_impl_test\",\n    srcs = [\"new_grpc_mux_impl_test.cc\"],\n    deps = [\n        \"//source/common/config:new_grpc_mux_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/protobuf\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"grpc_stream_test\",\n    srcs = [\"grpc_stream_test.cc\"],\n    deps = [\n        \"//source/common/config:grpc_stream_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"grpc_subscription_impl_test\",\n    srcs = [\"grpc_subscription_impl_test.cc\"],\n    deps = [\n        \":grpc_subscription_test_harness\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"grpc_subscription_test_harness\",\n    hdrs = [\"grpc_subscription_test_harness.h\"],\n    deps = [\n        \":subscription_test_harness\",\n        \"//source/common/common:hash_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:grpc_mux_lib\",\n        \"//source/common/config:grpc_subscription_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"delta_subscription_test_harness\",\n    hdrs = [\"delta_subscription_test_harness.h\"],\n    deps = [\n        \":subscription_test_harness\",\n        \"//source/common/config:new_grpc_mux_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http_subscription_impl_test\",\n    srcs = [\"http_subscription_impl_test.cc\"],\n    deps = [\n        \":http_subscription_test_harness\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"http_subscription_test_harness\",\n    srcs = [\"http_subscription_test_harness.h\"],\n    deps = [\n        \":subscription_test_harness\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:http_subscription_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http:message_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"opaque_resource_decoder_impl_test\",\n    srcs = [\"opaque_resource_decoder_impl_test.cc\"],\n    deps = [\n        \"//source/common/config:opaque_resource_decoder_lib\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"subscription_factory_impl_test\",\n    srcs = [\"subscription_factory_impl_test.cc\"],\n    deps = [\n        \"//source/common/config:subscription_factory_lib\",\n        \"//source/common/config:udpa_resource_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"subscription_impl_test\",\n    srcs = [\"subscription_impl_test.cc\"],\n    deps = [\n        \":delta_subscription_test_harness\",\n        \":filesystem_subscription_test_harness\",\n        \":grpc_subscription_test_harness\",\n        \":http_subscription_test_harness\",\n        \":subscription_test_harness\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"subscription_test_harness\",\n    srcs = [\"subscription_test_harness.h\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"type_to_endpoint_test\",\n    srcs = [\"type_to_endpoint_test.cc\"],\n    deps = [\n        \"//source/common/config:type_to_endpoint_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"pausable_ack_queue_test\",\n    srcs = [\"pausable_ack_queue_test.cc\"],\n    deps = [\n        \"//source/common/config:pausable_ack_queue_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"metadata_test\",\n    srcs = [\"metadata_test.cc\"],\n    deps = [\n        \"//include/envoy/common:base_includes\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"runtime_utility_test\",\n    srcs = [\"runtime_utility_test.cc\"],\n    deps = [\n        \"//source/common/config:runtime_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/config:well_known_names\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@com_github_cncf_udpa//udpa/type/v1:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/cors/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"registry_test\",\n    srcs = [\"registry_test.cc\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"watch_map_test\",\n    srcs = [\"watch_map_test.cc\"],\n    deps = [\n        \"//source/common/config:watch_map_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"dummy_config_proto\",\n    srcs = [\"dummy_config.proto\"],\n)\n\nenvoy_cc_test(\n    name = \"config_provider_impl_test\",\n    srcs = [\"config_provider_impl_test.cc\"],\n    deps = [\n        \":dummy_config_proto_cc_proto\",\n        \"//source/common/config:config_provider_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"datasource_test\",\n    srcs = [\"datasource_test.cc\"],\n    deps = [\n        \"//source/common/common:empty_string\",\n        \"//source/common/config:datasource_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"udpa_context_params_test\",\n    srcs = [\"udpa_context_params_test.cc\"],\n    deps = [\n        \":udpa_test_utility_lib\",\n        \"//source/common/config:udpa_context_params_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"udpa_resource_test\",\n    srcs = [\"udpa_resource_test.cc\"],\n    deps = [\n        \":udpa_test_utility_lib\",\n        \"//source/common/config:udpa_resource_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"udpa_test_utility_lib\",\n    hdrs = [\"udpa_test_utility.h\"],\n)\n\nenvoy_proto_library(\n    name = \"version_converter_proto\",\n    srcs = [\"version_converter.proto\"],\n)\n\nenvoy_cc_test(\n    name = \"version_converter_test\",\n    srcs = [\"version_converter_test.cc\"],\n    deps = [\n        \":version_converter_proto_cc_proto\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/protobuf:well_known_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/common/config/api_shadow_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\n// Validate that deprecated fields are accessible via the shadow protos.\nTEST(ApiShadowTest, All) {\n  envoy::config::cluster::v3::Cluster cluster;\n\n  cluster.mutable_hidden_envoy_deprecated_tls_context();\n  cluster.set_lb_policy(\n      envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB);\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/api_type_oracle_test.cc",
    "content": "#include \"envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.h\"\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h\"\n\n#include \"common/config/api_type_oracle.h\"\n\n#include \"gtest/gtest.h\"\n\n// API_NO_BOOST_FILE\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nTEST(ApiTypeOracleTest, All) {\n  envoy::config::filter::http::ip_tagging::v2::IPTagging v2_config;\n  envoy::extensions::filters::http::ip_tagging::v3::IPTagging v3_config;\n  ProtobufWkt::Any non_api_type;\n\n  EXPECT_EQ(nullptr,\n            ApiTypeOracle::getEarlierVersionDescriptor(non_api_type.GetDescriptor()->full_name()));\n  EXPECT_EQ(nullptr,\n            ApiTypeOracle::getEarlierVersionDescriptor(v2_config.GetDescriptor()->full_name()));\n  const auto* desc =\n      ApiTypeOracle::getEarlierVersionDescriptor(v3_config.GetDescriptor()->full_name());\n  EXPECT_EQ(envoy::config::filter::http::ip_tagging::v2::IPTagging::descriptor()->full_name(),\n            desc->full_name());\n  EXPECT_EQ(envoy::config::filter::http::ip_tagging::v2::IPTagging::descriptor()->full_name(),\n            ApiTypeOracle::getEarlierVersionMessageTypeName(v3_config.GetDescriptor()->full_name())\n                .value());\n  EXPECT_EQ(\"envoy.config.filter.http.ip_tagging.v2.IPTagging\",\n            TypeUtil::typeUrlToDescriptorFullName(\n                \"type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging\"));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/config_provider_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/config_provider_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/common/config/dummy_config.pb.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nusing testing::InSequence;\n\nclass DummyConfigProviderManager;\n\nclass DummyConfig : public Envoy::Config::ConfigProvider::Config {\npublic:\n  DummyConfig() = default;\n  explicit DummyConfig(const test::common::config::DummyConfig& config_proto) {\n    protos_.push_back(config_proto);\n  }\n  void addProto(const test::common::config::DummyConfig& config_proto) {\n    protos_.push_back(config_proto);\n  }\n\n  uint32_t numProtos() const { return protos_.size(); }\n\nprivate:\n  std::vector<test::common::config::DummyConfig> protos_;\n};\n\nclass StaticDummyConfigProvider : public ImmutableConfigProviderBase {\npublic:\n  StaticDummyConfigProvider(const test::common::config::DummyConfig& config_proto,\n                            Server::Configuration::ServerFactoryContext& factory_context,\n                            DummyConfigProviderManager& config_provider_manager);\n\n  ~StaticDummyConfigProvider() override = default;\n\n  // Envoy::Config::ConfigProvider\n  const Protobuf::Message* getConfigProto() const override { return &config_proto_; }\n\n  // Envoy::Config::ConfigProvider\n  std::string getConfigVersion() const override { return \"\"; }\n\n  // Envoy::Config::ConfigProvider\n  ConfigConstSharedPtr getConfig() const override { return config_; }\n\nprivate:\n  ConfigConstSharedPtr config_;\n  test::common::config::DummyConfig config_proto_;\n};\n\nclass DummyConfigSubscription : public ConfigSubscriptionInstance,\n                                Envoy::Config::SubscriptionCallbacks {\npublic:\n  DummyConfigSubscription(const uint64_t manager_identifier,\n                          Server::Configuration::ServerFactoryContext& factory_context,\n                          DummyConfigProviderManager& config_provider_manager);\n  ~DummyConfigSubscription() override = default;\n\n  // Envoy::Config::ConfigSubscriptionCommonBase\n  void start() override {}\n\n  // Envoy::Config::ConfigSubscriptionInstance\n  ConfigProvider::ConfigConstSharedPtr\n  onConfigProtoUpdate(const Protobuf::Message& config_proto) override {\n    return std::make_shared<DummyConfig>(\n        static_cast<const test::common::config::DummyConfig&>(config_proto));\n  }\n\n  // Envoy::Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<DecodedResourceRef>& resources,\n                      const std::string& version_info) override {\n    const auto& config =\n        dynamic_cast<const test::common::config::DummyConfig&>(resources[0].get().resource());\n    if (checkAndApplyConfigUpdate(config, \"dummy_config\", version_info)) {\n      config_proto_ = config;\n    }\n\n    ConfigSubscriptionCommonBase::onConfigUpdate();\n  }\n  void onConfigUpdate(const std::vector<DecodedResourceRef>&,\n                      const Protobuf::RepeatedPtrField<std::string>&, const std::string&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  // Envoy::Config::SubscriptionCallbacks\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason,\n                            const EnvoyException*) override {}\n\n  const absl::optional<test::common::config::DummyConfig>& configProto() const {\n    return config_proto_;\n  }\n\nprivate:\n  absl::optional<test::common::config::DummyConfig> config_proto_;\n};\nusing DummyConfigSubscriptionSharedPtr = std::shared_ptr<DummyConfigSubscription>;\n\nclass DummyDynamicConfigProvider : public MutableConfigProviderCommonBase {\npublic:\n  explicit DummyDynamicConfigProvider(DummyConfigSubscriptionSharedPtr&& subscription)\n      : MutableConfigProviderCommonBase(std::move(subscription), ApiType::Full),\n        subscription_(static_cast<DummyConfigSubscription*>(\n            MutableConfigProviderCommonBase::subscription_.get())) {}\n\n  ~DummyDynamicConfigProvider() override = default;\n\n  DummyConfigSubscription& subscription() { return *subscription_; }\n\n  // Envoy::Config::ConfigProvider\n  const Protobuf::Message* getConfigProto() const override {\n    if (!subscription_->configProto().has_value()) {\n      return nullptr;\n    }\n    return &subscription_->configProto().value();\n  }\n  std::string getConfigVersion() const override { return \"\"; }\n\nprivate:\n  // Lifetime of this pointer is owned by the shared_ptr held by the base class.\n  DummyConfigSubscription* subscription_;\n};\n\nclass DummyConfigProviderManager : public ConfigProviderManagerImplBase {\npublic:\n  explicit DummyConfigProviderManager(Server::Admin& admin)\n      : ConfigProviderManagerImplBase(admin, \"dummy\") {}\n\n  ~DummyConfigProviderManager() override = default;\n\n  // Envoy::Config::ConfigProviderManagerImplBase\n  ProtobufTypes::MessagePtr dumpConfigs() const override {\n    auto config_dump = std::make_unique<test::common::config::DummyConfigsDump>();\n    for (const auto& element : configSubscriptions()) {\n      auto subscription = element.second.lock();\n      ASSERT(subscription);\n\n      if (subscription->configInfo()) {\n        auto* dynamic_config = config_dump->mutable_dynamic_dummy_configs()->Add();\n        dynamic_config->set_version_info(subscription->configInfo().value().last_config_version_);\n        dynamic_config->mutable_dummy_config()->MergeFrom(\n            static_cast<DummyConfigSubscription*>(subscription.get())->configProto().value());\n        TimestampUtil::systemClockToTimestamp(subscription->lastUpdated(),\n                                              *dynamic_config->mutable_last_updated());\n      }\n    }\n\n    for (const auto* provider : immutableConfigProviders(ConfigProviderInstanceType::Static)) {\n      ASSERT(provider->configProtoInfo<test::common::config::DummyConfig>());\n      auto* static_config = config_dump->mutable_static_dummy_configs()->Add();\n      static_config->mutable_dummy_config()->MergeFrom(\n          provider->configProtoInfo<test::common::config::DummyConfig>().value().config_proto_);\n      TimestampUtil::systemClockToTimestamp(provider->lastUpdated(),\n                                            *static_config->mutable_last_updated());\n    }\n\n    return config_dump;\n  }\n\n  // Envoy::Config::ConfigProviderManager\n  ConfigProviderPtr\n  createXdsConfigProvider(const Protobuf::Message& config_source_proto,\n                          Server::Configuration::ServerFactoryContext& factory_context,\n                          Init::Manager& init_manager, const std::string&,\n                          const Envoy::Config::ConfigProviderManager::OptionalArg&) override {\n    DummyConfigSubscriptionSharedPtr subscription = getSubscription<DummyConfigSubscription>(\n        config_source_proto, init_manager,\n        [&factory_context](const uint64_t manager_identifier,\n                           ConfigProviderManagerImplBase& config_provider_manager)\n            -> ConfigSubscriptionCommonBaseSharedPtr {\n          return std::make_shared<DummyConfigSubscription>(\n              manager_identifier, factory_context,\n              static_cast<DummyConfigProviderManager&>(config_provider_manager));\n        });\n\n    return std::make_unique<DummyDynamicConfigProvider>(std::move(subscription));\n  }\n\n  // Envoy::Config::ConfigProviderManager\n  ConfigProviderPtr\n  createStaticConfigProvider(const Protobuf::Message& config_proto,\n                             Server::Configuration::ServerFactoryContext& factory_context,\n                             const Envoy::Config::ConfigProviderManager::OptionalArg&) override {\n    return std::make_unique<StaticDummyConfigProvider>(\n        dynamic_cast<const test::common::config::DummyConfig&>(config_proto), factory_context,\n        *this);\n  }\n  ConfigProviderPtr\n  createStaticConfigProvider(std::vector<std::unique_ptr<const Protobuf::Message>>&&,\n                             Server::Configuration::ServerFactoryContext&,\n                             const OptionalArg&) override {\n    ASSERT(false, \"this provider does not expect multiple config protos\");\n    return nullptr;\n  }\n};\n\nDummyConfigSubscription::DummyConfigSubscription(\n    const uint64_t manager_identifier, Server::Configuration::ServerFactoryContext& factory_context,\n    DummyConfigProviderManager& config_provider_manager)\n    : ConfigSubscriptionInstance(\"DummyDS\", manager_identifier, config_provider_manager,\n                                 factory_context) {\n  // A nullptr is shared as the initial value.\n  initialize(nullptr);\n}\n\nStaticDummyConfigProvider::StaticDummyConfigProvider(\n    const test::common::config::DummyConfig& config_proto,\n    Server::Configuration::ServerFactoryContext& factory_context,\n    DummyConfigProviderManager& config_provider_manager)\n    : ImmutableConfigProviderBase(factory_context, config_provider_manager,\n                                  ConfigProviderInstanceType::Static, ApiType::Full),\n      config_(std::make_shared<DummyConfig>(config_proto)), config_proto_(config_proto) {}\n\nclass ConfigProviderImplTest : public testing::Test {\npublic:\n  void initialize() {\n    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_(\"dummy\", _));\n    provider_manager_ =\n        std::make_unique<DummyConfigProviderManager>(server_factory_context_.admin_);\n  }\n\n  Event::SimulatedTimeSystem& timeSystem() { return time_system_; }\n\nprotected:\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context_;\n  NiceMock<Init::MockManager> init_manager_;\n  std::unique_ptr<DummyConfigProviderManager> provider_manager_;\n};\n\ntest::common::config::DummyConfig parseDummyConfigFromYaml(const std::string& yaml) {\n  test::common::config::DummyConfig config;\n  TestUtility::loadFromYaml(yaml, config);\n  return config;\n}\n\n// Tests that dynamic config providers share ownership of the config\n// subscriptions, config protos and data structures generated as a result of the\n// configurations (i.e., the ConfigProvider::Config).\nTEST_F(ConfigProviderImplTest, SharedOwnership) {\n  initialize();\n  Init::ExpectableWatcherImpl watcher;\n  init_manager_.initialize(watcher);\n\n  envoy::config::core::v3::ApiConfigSource config_source_proto;\n  config_source_proto.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  ConfigProviderPtr provider1 = provider_manager_->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n\n  // No config protos have been received via the subscription yet.\n  EXPECT_FALSE(provider1->configProtoInfo<test::common::config::DummyConfig>().has_value());\n\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> untyped_dummy_configs;\n  const auto dummy_config = parseDummyConfigFromYaml(\"a: a dummy config\");\n\n  DummyConfigSubscription& subscription =\n      dynamic_cast<DummyDynamicConfigProvider&>(*provider1).subscription();\n  const auto decoded_resources = TestUtility::decodeResources({dummy_config}, \"a\");\n  subscription.onConfigUpdate(decoded_resources.refvec_, \"1\");\n\n  // Check that a newly created provider with the same config source will share\n  // the subscription, config proto and resulting ConfigProvider::Config.\n  ConfigProviderPtr provider2 = provider_manager_->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n\n  EXPECT_TRUE(provider2->configProtoInfo<test::common::config::DummyConfig>().has_value());\n  EXPECT_EQ(&dynamic_cast<DummyDynamicConfigProvider&>(*provider1).subscription(),\n            &dynamic_cast<DummyDynamicConfigProvider&>(*provider2).subscription());\n  EXPECT_EQ(&provider1->configProtoInfo<test::common::config::DummyConfig>().value().config_proto_,\n            &provider2->configProtoInfo<test::common::config::DummyConfig>().value().config_proto_);\n  EXPECT_EQ(provider1->config<const DummyConfig>().get(),\n            provider2->config<const DummyConfig>().get());\n\n  // Change the config source and verify that a new subscription is used.\n  config_source_proto.set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n  ConfigProviderPtr provider3 = provider_manager_->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n\n  EXPECT_NE(&dynamic_cast<DummyDynamicConfigProvider&>(*provider1).subscription(),\n            &dynamic_cast<DummyDynamicConfigProvider&>(*provider3).subscription());\n  EXPECT_NE(provider1->config<const DummyConfig>().get(),\n            provider3->config<const DummyConfig>().get());\n\n  dynamic_cast<DummyDynamicConfigProvider&>(*provider3)\n      .subscription()\n      .onConfigUpdate(decoded_resources.refvec_, \"provider3\");\n\n  EXPECT_EQ(2UL, static_cast<test::common::config::DummyConfigsDump*>(\n                     provider_manager_->dumpConfigs().get())\n                     ->dynamic_dummy_configs()\n                     .size());\n\n  // Test that tear down of config providers leads to correctly updating\n  // centralized state; this is validated using the config dump.\n  provider1.reset();\n  provider2.reset();\n\n  auto dynamic_dummy_configs =\n      static_cast<test::common::config::DummyConfigsDump*>(provider_manager_->dumpConfigs().get())\n          ->dynamic_dummy_configs();\n  EXPECT_EQ(1UL, dynamic_dummy_configs.size());\n\n  EXPECT_EQ(\"provider3\", dynamic_dummy_configs[0].version_info());\n\n  provider3.reset();\n\n  EXPECT_EQ(0UL, static_cast<test::common::config::DummyConfigsDump*>(\n                     provider_manager_->dumpConfigs().get())\n                     ->dynamic_dummy_configs()\n                     .size());\n}\n\n// A ConfigProviderManager that returns a dummy ConfigProvider.\nclass DummyConfigProviderManagerMockConfigProvider : public DummyConfigProviderManager {\npublic:\n  DummyConfigProviderManagerMockConfigProvider(Server::Admin& admin)\n      : DummyConfigProviderManager(admin) {}\n\n  ConfigProviderPtr\n  createXdsConfigProvider(const Protobuf::Message& config_source_proto,\n                          Server::Configuration::ServerFactoryContext& factory_context,\n                          Init::Manager& init_manager, const std::string&,\n                          const Envoy::Config::ConfigProviderManager::OptionalArg&) override {\n    DummyConfigSubscriptionSharedPtr subscription = getSubscription<DummyConfigSubscription>(\n        config_source_proto, init_manager,\n        [&factory_context](const uint64_t manager_identifier,\n                           ConfigProviderManagerImplBase& config_provider_manager)\n            -> ConfigSubscriptionCommonBaseSharedPtr {\n          return std::make_shared<DummyConfigSubscription>(\n              manager_identifier, factory_context,\n              static_cast<DummyConfigProviderManagerMockConfigProvider&>(config_provider_manager));\n        });\n    return std::make_unique<DummyDynamicConfigProvider>(std::move(subscription));\n  }\n};\n\n// Test that duplicate config updates will not trigger creation of a new ConfigProvider::Config.\nTEST_F(ConfigProviderImplTest, DuplicateConfigProto) {\n  InSequence sequence;\n  // This provider manager returns a DummyDynamicConfigProvider.\n  auto provider_manager = std::make_unique<DummyConfigProviderManagerMockConfigProvider>(\n      server_factory_context_.admin_);\n  envoy::config::core::v3::ApiConfigSource config_source_proto;\n  config_source_proto.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  ConfigProviderPtr provider = provider_manager->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n  auto* typed_provider = static_cast<DummyDynamicConfigProvider*>(provider.get());\n  auto& subscription = static_cast<DummyConfigSubscription&>(typed_provider->subscription());\n  EXPECT_EQ(subscription.getConfig(), nullptr);\n  // First time issuing a configUpdate(). A new ConfigProvider::Config should be created.\n  const auto dummy_config = parseDummyConfigFromYaml(\"a: a dynamic dummy config\");\n  const auto decoded_resources = TestUtility::decodeResources({dummy_config}, \"a\");\n  subscription.onConfigUpdate(decoded_resources.refvec_, \"1\");\n  EXPECT_NE(subscription.getConfig(), nullptr);\n  auto config_ptr = subscription.getConfig();\n  EXPECT_EQ(typed_provider->config<DummyConfig>().get(), config_ptr.get());\n  // Second time issuing the configUpdate(), this time with a duplicate proto. A new\n  // ConfigProvider::Config _should not_ be created.\n  subscription.onConfigUpdate(decoded_resources.refvec_, \"2\");\n  EXPECT_EQ(config_ptr, subscription.getConfig());\n  EXPECT_EQ(typed_provider->config<DummyConfig>().get(), config_ptr.get());\n}\n\n// An empty config provider tests on base class' constructor.\nclass InlineDummyConfigProvider : public ImmutableConfigProviderBase {\npublic:\n  InlineDummyConfigProvider(Server::Configuration::ServerFactoryContext& factory_context,\n                            DummyConfigProviderManager& config_provider_manager,\n                            ConfigProviderInstanceType instance_type)\n      : ImmutableConfigProviderBase(factory_context, config_provider_manager, instance_type,\n                                    ApiType::Full) {}\n  ConfigConstSharedPtr getConfig() const override { return nullptr; }\n  std::string getConfigVersion() const override { return \"\"; }\n  const Protobuf::Message* getConfigProto() const override { return nullptr; }\n};\n\nclass ConfigProviderImplDeathTest : public ConfigProviderImplTest {};\n\nTEST_F(ConfigProviderImplDeathTest, AssertionFailureOnIncorrectInstanceType) {\n  initialize();\n\n  InlineDummyConfigProvider foo(server_factory_context_, *provider_manager_,\n                                ConfigProviderInstanceType::Inline);\n  InlineDummyConfigProvider bar(server_factory_context_, *provider_manager_,\n                                ConfigProviderInstanceType::Static);\n  EXPECT_DEBUG_DEATH(InlineDummyConfigProvider(server_factory_context_, *provider_manager_,\n                                               ConfigProviderInstanceType::Xds),\n                     \"\");\n}\n\n// Tests that the base ConfigProvider*s are handling registration with the\n// /config_dump admin handler as well as generic bookkeeping such as timestamp\n// updates.\nTEST_F(ConfigProviderImplTest, ConfigDump) {\n  initialize();\n  // Empty dump first.\n  auto message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"dummy\"]();\n  const auto& dummy_config_dump =\n      static_cast<const test::common::config::DummyConfigsDump&>(*message_ptr);\n\n  test::common::config::DummyConfigsDump expected_config_dump;\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_dummy_configs:\ndynamic_dummy_configs:\n)EOF\",\n                            expected_config_dump);\n  EXPECT_EQ(expected_config_dump.DebugString(), dummy_config_dump.DebugString());\n\n  // Static config dump only.\n  std::string config_yaml = \"a: a static dummy config\";\n  timeSystem().setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  ConfigProviderPtr static_config = provider_manager_->createStaticConfigProvider(\n      parseDummyConfigFromYaml(config_yaml), server_factory_context_,\n      ConfigProviderManager::NullOptionalArg());\n  message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"dummy\"]();\n  const auto& dummy_config_dump2 =\n      static_cast<const test::common::config::DummyConfigsDump&>(*message_ptr);\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_dummy_configs:\n  - dummy_config: { a: a static dummy config }\n    last_updated: { seconds: 1234567891, nanos: 234000000 }\ndynamic_dummy_configs:\n)EOF\",\n                            expected_config_dump);\n  EXPECT_EQ(expected_config_dump.DebugString(), dummy_config_dump2.DebugString());\n\n  envoy::config::core::v3::ApiConfigSource config_source_proto;\n  config_source_proto.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  ConfigProviderPtr dynamic_provider = provider_manager_->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n\n  // Static + dynamic config dump.\n  const auto dummy_config = parseDummyConfigFromYaml(\"a: a dynamic dummy config\");\n\n  timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567));\n  DummyConfigSubscription& subscription =\n      dynamic_cast<DummyDynamicConfigProvider&>(*dynamic_provider).subscription();\n  const auto decoded_resources = TestUtility::decodeResources({dummy_config}, \"a\");\n  subscription.onConfigUpdate(decoded_resources.refvec_, \"v1\");\n\n  message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"dummy\"]();\n  const auto& dummy_config_dump3 =\n      static_cast<const test::common::config::DummyConfigsDump&>(*message_ptr);\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_dummy_configs:\n  - dummy_config: { a: a static dummy config }\n    last_updated: { seconds: 1234567891, nanos: 234000000 }\ndynamic_dummy_configs:\n  - version_info: v1\n    dummy_config: { a: a dynamic dummy config }\n    last_updated: { seconds: 1234567891, nanos: 567000000 }\n)EOF\",\n                            expected_config_dump);\n  EXPECT_EQ(expected_config_dump.DebugString(), dummy_config_dump3.DebugString());\n\n  ConfigProviderPtr static_config2 = provider_manager_->createStaticConfigProvider(\n      parseDummyConfigFromYaml(\"a: another static dummy config\"), server_factory_context_,\n      ConfigProviderManager::NullOptionalArg());\n  message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"dummy\"]();\n  const auto& dummy_config_dump4 =\n      static_cast<const test::common::config::DummyConfigsDump&>(*message_ptr);\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_dummy_configs:\n  - dummy_config: { a: another static dummy config }\n    last_updated: { seconds: 1234567891, nanos: 567000000 }\n  - dummy_config: { a: a static dummy config }\n    last_updated: { seconds: 1234567891, nanos: 234000000 }\ndynamic_dummy_configs:\n  - version_info: v1\n    dummy_config: { a: a dynamic dummy config }\n    last_updated: { seconds: 1234567891, nanos: 567000000 }\n)EOF\",\n                            expected_config_dump);\n  EXPECT_THAT(expected_config_dump, ProtoEqIgnoreRepeatedFieldOrdering(dummy_config_dump4));\n}\n\n// Tests that dynamic config providers enforce that the context's localInfo is\n// set, since it is used to obtain the node/cluster attributes required for\n// subscriptions.\nTEST_F(ConfigProviderImplTest, LocalInfoNotDefined) {\n  initialize();\n  server_factory_context_.local_info_.node_.set_cluster(\"\");\n  server_factory_context_.local_info_.node_.set_id(\"\");\n\n  envoy::config::core::v3::ApiConfigSource config_source_proto;\n  config_source_proto.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  EXPECT_THROW_WITH_MESSAGE(\n      provider_manager_->createXdsConfigProvider(config_source_proto, server_factory_context_,\n                                                 init_manager_, \"dummy_prefix\",\n                                                 ConfigProviderManager::NullOptionalArg()),\n      EnvoyException,\n      \"DummyDS: node 'id' and 'cluster' are required. Set it either in 'node' config or \"\n      \"via --service-node and --service-cluster options.\");\n}\n\nclass DeltaDummyConfigProviderManager;\n\nclass DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance,\n                                     Envoy::Config::SubscriptionCallbacks {\npublic:\n  using ProtoMap = std::map<std::string, test::common::config::DummyConfig>;\n\n  DeltaDummyConfigSubscription(const uint64_t manager_identifier,\n                               Server::Configuration::ServerFactoryContext& factory_context,\n                               DeltaDummyConfigProviderManager& config_provider_manager);\n\n  // Envoy::Config::ConfigSubscriptionCommonBase\n  void start() override {}\n\n  // Envoy::Config::SubscriptionCallbacks\n  void onConfigUpdate(const std::vector<DecodedResourceRef>& resources,\n                      const std::string& version_info) override {\n    if (resources.empty()) {\n      return;\n    }\n\n    // For simplicity, there is no logic here to track updates and/or removals to the existing\n    // config proto set (i.e., this is append only). Real xDS APIs will need to track additions,\n    // updates and removals to the config set and apply the diffs to the underlying config\n    // implementations.\n    for (const auto& resource : resources) {\n      const auto& dummy_config =\n          dynamic_cast<const test::common::config::DummyConfig&>(resource.get().resource());\n      proto_map_[version_info] = dummy_config;\n      // Propagate the new config proto to all worker threads.\n      applyConfigUpdate([&dummy_config](ConfigProvider::ConfigConstSharedPtr prev_config)\n                            -> ConfigProvider::ConfigConstSharedPtr {\n        auto* config = const_cast<DummyConfig*>(static_cast<const DummyConfig*>(prev_config.get()));\n        // Per above, append only for now.\n        config->addProto(dummy_config);\n        return prev_config;\n      });\n    }\n\n    ConfigSubscriptionCommonBase::onConfigUpdate();\n    setLastConfigInfo(absl::optional<LastConfigInfo>({absl::nullopt, version_info}));\n  }\n  void onConfigUpdate(const std::vector<DecodedResourceRef>&,\n                      const Protobuf::RepeatedPtrField<std::string>&, const std::string&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason,\n                            const EnvoyException*) override {\n    ConfigSubscriptionCommonBase::onConfigUpdateFailed();\n  }\n  const ProtoMap& protoMap() const { return proto_map_; }\n\nprivate:\n  ProtoMap proto_map_;\n};\nusing DeltaDummyConfigSubscriptionSharedPtr = std::shared_ptr<DeltaDummyConfigSubscription>;\n\nclass DeltaDummyDynamicConfigProvider : public Envoy::Config::MutableConfigProviderCommonBase {\npublic:\n  DeltaDummyDynamicConfigProvider(DeltaDummyConfigSubscriptionSharedPtr&& subscription)\n      : MutableConfigProviderCommonBase(std::move(subscription), ConfigProvider::ApiType::Delta),\n        subscription_(static_cast<DeltaDummyConfigSubscription*>(\n            MutableConfigProviderCommonBase::subscription_.get())) {}\n\n  DeltaDummyConfigSubscription& subscription() { return *subscription_; }\n\n  // Envoy::Config::ConfigProvider\n  ConfigProtoVector getConfigProtos() const override {\n    ConfigProtoVector proto_vector;\n    for (const auto& value_type : subscription_->protoMap()) {\n      proto_vector.push_back(&value_type.second);\n    }\n    return proto_vector;\n  }\n\n  std::string getConfigVersion() const override {\n    return (subscription_->configInfo().has_value())\n               ? subscription_->configInfo().value().last_config_version_\n               : \"\";\n  }\n\nprivate:\n  DeltaDummyConfigSubscription* subscription_;\n};\n\nclass DeltaDummyConfigProviderManager : public ConfigProviderManagerImplBase {\npublic:\n  DeltaDummyConfigProviderManager(Server::Admin& admin)\n      : ConfigProviderManagerImplBase(admin, \"dummy\") {}\n\n  // Envoy::Config::ConfigProviderManagerImplBase\n  ProtobufTypes::MessagePtr dumpConfigs() const override {\n    auto config_dump = std::make_unique<test::common::config::DeltaDummyConfigsDump>();\n    for (const auto& element : configSubscriptions()) {\n      auto subscription = element.second.lock();\n      ASSERT(subscription);\n\n      if (subscription->configInfo()) {\n        auto* dynamic_config = config_dump->mutable_dynamic_dummy_configs()->Add();\n        dynamic_config->set_version_info(subscription->configInfo().value().last_config_version_);\n        const auto* typed_subscription =\n            static_cast<DeltaDummyConfigSubscription*>(subscription.get());\n        const DeltaDummyConfigSubscription::ProtoMap& proto_map = typed_subscription->protoMap();\n        for (const auto& value_type : proto_map) {\n          dynamic_config->mutable_dummy_configs()->Add()->MergeFrom(value_type.second);\n        }\n        TimestampUtil::systemClockToTimestamp(subscription->lastUpdated(),\n                                              *dynamic_config->mutable_last_updated());\n      }\n    }\n\n    return config_dump;\n  }\n\n  // Envoy::Config::ConfigProviderManager\n  ConfigProviderPtr\n  createXdsConfigProvider(const Protobuf::Message& config_source_proto,\n                          Server::Configuration::ServerFactoryContext& factory_context,\n                          Init::Manager& init_manager, const std::string&,\n                          const Envoy::Config::ConfigProviderManager::OptionalArg&) override {\n    DeltaDummyConfigSubscriptionSharedPtr subscription =\n\n        getSubscription<DeltaDummyConfigSubscription>(\n            config_source_proto, init_manager,\n            [&factory_context](const uint64_t manager_identifier,\n                               ConfigProviderManagerImplBase& config_provider_manager)\n                -> ConfigSubscriptionCommonBaseSharedPtr {\n              return std::make_shared<DeltaDummyConfigSubscription>(\n                  manager_identifier, factory_context,\n                  static_cast<DeltaDummyConfigProviderManager&>(config_provider_manager));\n            });\n\n    return std::make_unique<DeltaDummyDynamicConfigProvider>(std::move(subscription));\n  }\n};\n\nDeltaDummyConfigSubscription::DeltaDummyConfigSubscription(\n    const uint64_t manager_identifier, Server::Configuration::ServerFactoryContext& factory_context,\n    DeltaDummyConfigProviderManager& config_provider_manager)\n    : DeltaConfigSubscriptionInstance(\"Dummy\", manager_identifier, config_provider_manager,\n                                      factory_context) {\n  initialize(\n      []() -> ConfigProvider::ConfigConstSharedPtr { return std::make_shared<DummyConfig>(); });\n}\n\nclass DeltaConfigProviderImplTest : public testing::Test {\npublic:\n  DeltaConfigProviderImplTest() {\n    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_(\"dummy\", _));\n    provider_manager_ =\n        std::make_unique<DeltaDummyConfigProviderManager>(server_factory_context_.admin_);\n  }\n\n  Event::SimulatedTimeSystem& timeSystem() { return time_system_; }\n\nprotected:\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context_;\n  NiceMock<Init::MockManager> init_manager_;\n  std::unique_ptr<DeltaDummyConfigProviderManager> provider_manager_;\n};\n\n// Validate that delta config subscriptions are shared across delta dynamic config providers and\n// that the underlying Config implementation can be shared as well.\nTEST_F(DeltaConfigProviderImplTest, MultipleDeltaSubscriptions) {\n  envoy::config::core::v3::ApiConfigSource config_source_proto;\n  config_source_proto.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  ConfigProviderPtr provider1 = provider_manager_->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n\n  // No config protos have been received via the subscription yet.\n  EXPECT_FALSE(provider1->configProtoInfoVector<test::common::config::DummyConfig>().has_value());\n\n  const auto dummy_config_0 = parseDummyConfigFromYaml(\"a: a dummy config\");\n  const auto dummy_config_1 = parseDummyConfigFromYaml(\"a: another dummy config\");\n  const auto decoded_resources =\n      TestUtility::decodeResources({dummy_config_0, dummy_config_1}, \"a\");\n\n  DeltaDummyConfigSubscription& subscription =\n      dynamic_cast<DeltaDummyDynamicConfigProvider&>(*provider1).subscription();\n  subscription.onConfigUpdate(decoded_resources.refvec_, \"1\");\n\n  ConfigProviderPtr provider2 = provider_manager_->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n\n  // Providers, config implementations (i.e., the DummyConfig) and config protos are\n  // expected to be shared for a given subscription.\n  EXPECT_EQ(&dynamic_cast<DeltaDummyDynamicConfigProvider&>(*provider1).subscription(),\n            &dynamic_cast<DeltaDummyDynamicConfigProvider&>(*provider2).subscription());\n  ASSERT_TRUE(provider2->configProtoInfoVector<test::common::config::DummyConfig>().has_value());\n  EXPECT_EQ(\n      provider1->configProtoInfoVector<test::common::config::DummyConfig>().value().config_protos_,\n      provider2->configProtoInfoVector<test::common::config::DummyConfig>().value().config_protos_);\n  EXPECT_EQ(provider1->config<const DummyConfig>().get(),\n            provider2->config<const DummyConfig>().get());\n  // Validate that the config protos are propagated to the thread local config implementation.\n  EXPECT_EQ(provider1->config<const DummyConfig>()->numProtos(), 2);\n\n  // Issue a second config update to validate that having multiple providers bound to the\n  // subscription causes a single update to the underlying shared config implementation.\n  subscription.onConfigUpdate(decoded_resources.refvec_, \"2\");\n  // NOTE: the config implementation is append only and _does not_ track updates/removals to the\n  // config proto set, so the expectation is to double the size of the set.\n  EXPECT_EQ(provider1->config<const DummyConfig>().get(),\n            provider2->config<const DummyConfig>().get());\n  EXPECT_EQ(provider1->config<const DummyConfig>()->numProtos(), 4);\n  EXPECT_EQ(provider1->configProtoInfoVector<test::common::config::DummyConfig>().value().version_,\n            \"2\");\n}\n\n// Tests a config update failure.\nTEST_F(DeltaConfigProviderImplTest, DeltaSubscriptionFailure) {\n  envoy::config::core::v3::ApiConfigSource config_source_proto;\n  config_source_proto.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  ConfigProviderPtr provider = provider_manager_->createXdsConfigProvider(\n      config_source_proto, server_factory_context_, init_manager_, \"dummy_prefix\",\n      ConfigProviderManager::NullOptionalArg());\n  DeltaDummyConfigSubscription& subscription =\n      dynamic_cast<DeltaDummyDynamicConfigProvider&>(*provider).subscription();\n  const auto time = std::chrono::milliseconds(1234567891234);\n  timeSystem().setSystemTime(time);\n  const EnvoyException ex(fmt::format(\"config failure\"));\n  // Verify the failure updates the lastUpdated() timestamp.\n  subscription.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure,\n                                    &ex);\n  EXPECT_EQ(std::chrono::time_point_cast<std::chrono::milliseconds>(provider->lastUpdated())\n                .time_since_epoch(),\n            time);\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/datasource_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/base.pb.validate.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/datasource.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\nusing ::testing::AtLeast;\nusing ::testing::NiceMock;\nusing ::testing::Return;\n\nclass AsyncDataSourceTest : public testing::Test {\nprotected:\n  using AsyncDataSourcePb = envoy::config::core::v3::AsyncDataSource;\n\n  NiceMock<Upstream::MockClusterManager> cm_;\n  Init::MockManager init_manager_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  Init::TargetHandlePtr init_target_handle_;\n  Api::ApiPtr api_{Api::createApiForTest()};\n  NiceMock<Random::MockRandomGenerator> random_;\n  Event::MockDispatcher dispatcher_;\n  Event::MockTimer* retry_timer_;\n  Event::TimerCb retry_timer_cb_;\n  NiceMock<Http::MockAsyncClientRequest> request_{&cm_.async_client_};\n\n  Config::DataSource::LocalAsyncDataProviderPtr local_data_provider_;\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_;\n\n  using AsyncClientSendFunc = std::function<Http::AsyncClient::Request*(\n      Http::RequestMessagePtr&, Http::AsyncClient::Callbacks&,\n      const Http::AsyncClient::RequestOptions)>;\n\n  void initialize(AsyncClientSendFunc func, int num_retries = 1) {\n    retry_timer_ = new Event::MockTimer();\n    EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) {\n      init_target_handle_ = target.createHandle(\"test\");\n    }));\n\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      retry_timer_cb_ = timer_cb;\n      return retry_timer_;\n    }));\n\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"cluster_1\"))\n        .Times(AtLeast(1))\n        .WillRepeatedly(ReturnRef(cm_.async_client_));\n\n    EXPECT_CALL(*retry_timer_, disableTimer());\n    if (num_retries == 1) {\n      EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(AtLeast(1)).WillRepeatedly(Invoke(func));\n    } else {\n      EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n          .Times(num_retries)\n          .WillRepeatedly(Invoke(func));\n    }\n  }\n};\n\nTEST_F(AsyncDataSourceTest, LoadLocalDataSource) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    local:\n      inline_string:\n        xxxxxx\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_local());\n\n  std::string async_data;\n\n  EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) {\n    init_target_handle_ = target.createHandle(\"test\");\n  }));\n\n  local_data_provider_ = std::make_unique<Config::DataSource::LocalAsyncDataProvider>(\n      init_manager_, config.local(), true, *api_, [&](const std::string& data) {\n        EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n        EXPECT_EQ(data, \"xxxxxx\");\n        async_data = data;\n      });\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n\n  init_target_handle_->initialize(init_watcher_);\n  EXPECT_EQ(async_data, \"xxxxxx\");\n}\n\nTEST_F(AsyncDataSourceTest, LoadRemoteDataSourceReturnFailure) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        xxxxxx\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                 const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n    callbacks.onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset);\n    return nullptr;\n  });\n\n  std::string async_data = \"non-empty\";\n  remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n      cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n      [&](const std::string& data) {\n        EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n        EXPECT_EQ(data, EMPTY_STRING);\n        async_data = data;\n      });\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _))\n      .WillOnce(Invoke(\n          [&](const std::chrono::milliseconds&, const ScopeTrackedObject*) { retry_timer_cb_(); }));\n  init_target_handle_->initialize(init_watcher_);\n\n  EXPECT_EQ(async_data, EMPTY_STRING);\n}\n\nTEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccessWith503) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        xxxxxx\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                 const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n    callbacks.onSuccess(\n        request_, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}})});\n    return nullptr;\n  });\n\n  std::string async_data = \"non-empty\";\n  remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n      cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n      [&](const std::string& data) {\n        EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n        EXPECT_EQ(data, EMPTY_STRING);\n        async_data = data;\n      });\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _))\n      .WillOnce(Invoke(\n          [&](const std::chrono::milliseconds&, const ScopeTrackedObject*) { retry_timer_cb_(); }));\n  init_target_handle_->initialize(init_watcher_);\n\n  EXPECT_EQ(async_data, EMPTY_STRING);\n}\n\nTEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccessWithEmptyBody) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        xxxxxx\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                 const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n    callbacks.onSuccess(\n        request_, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}})});\n    return nullptr;\n  });\n\n  std::string async_data = \"non-empty\";\n  remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n      cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n      [&](const std::string& data) {\n        EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n        EXPECT_EQ(data, EMPTY_STRING);\n        async_data = data;\n      });\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _))\n      .WillOnce(Invoke(\n          [&](const std::chrono::milliseconds&, const ScopeTrackedObject*) { retry_timer_cb_(); }));\n  init_target_handle_->initialize(init_watcher_);\n\n  EXPECT_EQ(async_data, EMPTY_STRING);\n}\n\nTEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccessIncorrectSha256) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        xxxxxx\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  const std::string body = \"hello world\";\n\n  initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                 const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n    Http::ResponseMessagePtr response(new Http::ResponseMessageImpl(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n    response->body().add(body);\n\n    callbacks.onSuccess(request_, std::move(response));\n    return nullptr;\n  });\n\n  std::string async_data = \"non-empty\";\n  remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n      cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n      [&](const std::string& data) {\n        EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n        EXPECT_EQ(data, EMPTY_STRING);\n        async_data = data;\n      });\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _))\n      .WillOnce(Invoke(\n          [&](const std::chrono::milliseconds&, const ScopeTrackedObject*) { retry_timer_cb_(); }));\n  init_target_handle_->initialize(init_watcher_);\n\n  EXPECT_EQ(async_data, EMPTY_STRING);\n}\n\nTEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccess) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  const std::string body = \"hello world\";\n  initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                 const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n    Http::ResponseMessagePtr response(new Http::ResponseMessageImpl(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n    response->body().add(body);\n\n    callbacks.onSuccess(request_, std::move(response));\n    return nullptr;\n  });\n\n  std::string async_data = \"non-empty\";\n  remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n      cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n      [&](const std::string& data) {\n        EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n        EXPECT_EQ(data, body);\n        async_data = data;\n      });\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n  init_target_handle_->initialize(init_watcher_);\n\n  EXPECT_EQ(async_data, body);\n}\n\nTEST_F(AsyncDataSourceTest, LoadRemoteDataSourceDoNotAllowEmpty) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        xxxxxx\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                 const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n    callbacks.onSuccess(\n        request_, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}})});\n    return nullptr;\n  });\n\n  std::string async_data = \"non-empty\";\n  remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n      cm_, init_manager_, config.remote(), dispatcher_, random_, false,\n      [&](const std::string& data) { async_data = data; });\n\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _))\n      .WillOnce(Invoke(\n          [&](const std::chrono::milliseconds&, const ScopeTrackedObject*) { retry_timer_cb_(); }));\n  init_target_handle_->initialize(init_watcher_);\n\n  EXPECT_EQ(async_data, \"non-empty\");\n}\n\nTEST_F(AsyncDataSourceTest, DatasourceReleasedBeforeFetchingData) {\n  const std::string body = \"hello world\";\n  std::string async_data = \"non-empty\";\n\n  {\n    AsyncDataSourcePb config;\n\n    std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9\n  )EOF\";\n    TestUtility::loadFromYamlAndValidate(yaml, config);\n    EXPECT_TRUE(config.has_remote());\n\n    initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                   const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n      Http::ResponseMessagePtr response(new Http::ResponseMessageImpl(\n          Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n      response->body().add(body);\n\n      callbacks.onSuccess(request_, std::move(response));\n      return nullptr;\n    });\n\n    remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n        cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n        [&](const std::string& data) {\n          EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n          EXPECT_EQ(data, body);\n          async_data = data;\n        });\n  }\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n  init_target_handle_->initialize(init_watcher_);\n  EXPECT_EQ(async_data, body);\n}\n\nTEST_F(AsyncDataSourceTest, LoadRemoteDataSourceWithRetry) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9\n      retry_policy:\n        retry_back_off:\n          base_interval: 1s\n        num_retries: 3\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  const std::string body = \"hello world\";\n  int num_retries = 3;\n\n  initialize(\n      [&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n          const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n        callbacks.onSuccess(\n            request_,\n            Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}})});\n        return nullptr;\n      },\n      num_retries);\n\n  std::string async_data = \"non-empty\";\n  remote_data_provider_ = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n      cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n      [&](const std::string& data) {\n        EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);\n        EXPECT_EQ(data, body);\n        async_data = data;\n      });\n\n  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _))\n      .WillRepeatedly(Invoke([&](const std::chrono::milliseconds&, const ScopeTrackedObject*) {\n        if (--num_retries == 0) {\n          EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n              .WillOnce(Invoke(\n                  [&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                      const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n                    Http::ResponseMessagePtr response(\n                        new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                            new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n                    response->body().add(body);\n\n                    callbacks.onSuccess(request_, std::move(response));\n                    return nullptr;\n                  }));\n        }\n\n        retry_timer_cb_();\n      }));\n  init_target_handle_->initialize(init_watcher_);\n\n  EXPECT_EQ(async_data, body);\n}\n\nTEST_F(AsyncDataSourceTest, BaseIntervalGreaterThanMaxInterval) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9\n      retry_policy:\n        retry_back_off:\n          base_interval: 10s\n          max_interval: 1s\n        num_retries: 3\n  )EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, config);\n  EXPECT_TRUE(config.has_remote());\n\n  EXPECT_THROW_WITH_MESSAGE(std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(\n                                cm_, init_manager_, config.remote(), dispatcher_, random_, true,\n                                [&](const std::string&) {}),\n                            EnvoyException,\n                            \"max_interval must be greater than or equal to the base_interval\");\n}\n\nTEST_F(AsyncDataSourceTest, BaseIntervalTest) {\n  AsyncDataSourcePb config;\n\n  std::string yaml = R\"EOF(\n    remote:\n      http_uri:\n        uri: https://example.com/data\n        cluster: cluster_1\n        timeout: 1s\n      sha256:\n        xxx\n      retry_policy:\n        retry_back_off:\n          base_interval: 0.0001s\n        num_retries: 3\n  )EOF\";\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, config), EnvoyException);\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/decoded_resource_impl_test.cc",
    "content": "#include \"common/config/decoded_resource_impl.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing ::testing::InvokeWithoutArgs;\nusing ::testing::Return;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nTEST(DecodedResourceImplTest, All) {\n  MockOpaqueResourceDecoder resource_decoder;\n  ProtobufWkt::Any some_opaque_resource;\n  some_opaque_resource.set_type_url(\"some_type_url\");\n\n  {\n    EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(some_opaque_resource)))\n        .WillOnce(InvokeWithoutArgs(\n            []() -> ProtobufTypes::MessagePtr { return std::make_unique<ProtobufWkt::Empty>(); }));\n    EXPECT_CALL(resource_decoder, resourceName(ProtoEq(ProtobufWkt::Empty())))\n        .WillOnce(Return(\"some_name\"));\n    DecodedResourceImpl decoded_resource(resource_decoder, some_opaque_resource, \"foo\");\n    EXPECT_EQ(\"some_name\", decoded_resource.name());\n    EXPECT_TRUE(decoded_resource.aliases().empty());\n    EXPECT_EQ(\"foo\", decoded_resource.version());\n    EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty()));\n    EXPECT_TRUE(decoded_resource.hasResource());\n  }\n\n  {\n    envoy::service::discovery::v3::Resource resource_wrapper;\n    resource_wrapper.set_name(\"real_name\");\n    resource_wrapper.add_aliases(\"bar\");\n    resource_wrapper.add_aliases(\"baz\");\n    resource_wrapper.mutable_resource()->MergeFrom(some_opaque_resource);\n    resource_wrapper.set_version(\"foo\");\n    EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(some_opaque_resource)))\n        .WillOnce(InvokeWithoutArgs(\n            []() -> ProtobufTypes::MessagePtr { return std::make_unique<ProtobufWkt::Empty>(); }));\n    EXPECT_CALL(resource_decoder, resourceName(ProtoEq(ProtobufWkt::Empty()))).Times(0);\n    DecodedResourceImpl decoded_resource(resource_decoder, resource_wrapper);\n    EXPECT_EQ(\"real_name\", decoded_resource.name());\n    EXPECT_EQ((std::vector<std::string>{\"bar\", \"baz\"}), decoded_resource.aliases());\n    EXPECT_EQ(\"foo\", decoded_resource.version());\n    EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty()));\n    EXPECT_TRUE(decoded_resource.hasResource());\n  }\n\n  {\n    envoy::service::discovery::v3::Resource resource_wrapper;\n    resource_wrapper.set_name(\"real_name\");\n    resource_wrapper.set_version(\"foo\");\n    resource_wrapper.add_aliases(\"bar\");\n    resource_wrapper.add_aliases(\"baz\");\n    EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(ProtobufWkt::Any())))\n        .WillOnce(InvokeWithoutArgs(\n            []() -> ProtobufTypes::MessagePtr { return std::make_unique<ProtobufWkt::Empty>(); }));\n    EXPECT_CALL(resource_decoder, resourceName(_)).Times(0);\n    DecodedResourceImpl decoded_resource(resource_decoder, resource_wrapper);\n    EXPECT_EQ(\"real_name\", decoded_resource.name());\n    EXPECT_EQ((std::vector<std::string>{\"bar\", \"baz\"}), decoded_resource.aliases());\n    EXPECT_EQ(\"foo\", decoded_resource.version());\n    EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty()));\n    EXPECT_FALSE(decoded_resource.hasResource());\n  }\n\n  {\n    auto message = std::make_unique<ProtobufWkt::Empty>();\n    DecodedResourceImpl decoded_resource(std::move(message), \"real_name\", {\"bar\", \"baz\"}, \"foo\");\n    EXPECT_EQ(\"real_name\", decoded_resource.name());\n    EXPECT_EQ((std::vector<std::string>{\"bar\", \"baz\"}), decoded_resource.aliases());\n    EXPECT_EQ(\"foo\", decoded_resource.version());\n    EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty()));\n    EXPECT_TRUE(decoded_resource.hasResource());\n  }\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/delta_subscription_impl_test.cc",
    "content": "#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/config/api_version.h\"\n\n#include \"test/common/config/delta_subscription_test_harness.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public testing::Test {\nprotected:\n  DeltaSubscriptionImplTest() = default;\n\n  // We need to destroy the subscription before the test's destruction, because the subscription's\n  // destructor removes its watch from the NewGrpcMuxImpl, and that removal process involves\n  // some things held by the test fixture.\n  void TearDown() override { doSubscriptionTearDown(); }\n};\n\nTEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) {\n  startSubscription({\"name1\", \"name2\", \"name3\"});\n  expectSendMessage({\"name4\"}, {\"name1\", \"name2\"}, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n  subscription_->updateResourceInterest({\"name3\", \"name4\"});\n  expectSendMessage({\"name1\", \"name2\"}, {}, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n  subscription_->updateResourceInterest({\"name1\", \"name2\", \"name3\", \"name4\"});\n  expectSendMessage({}, {\"name1\", \"name2\"}, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n  subscription_->updateResourceInterest({\"name3\", \"name4\"});\n  expectSendMessage({\"name1\", \"name2\"}, {}, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n  subscription_->updateResourceInterest({\"name1\", \"name2\", \"name3\", \"name4\"});\n  expectSendMessage({}, {\"name1\", \"name2\", \"name3\"}, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n  subscription_->updateResourceInterest({\"name4\"});\n}\n\n// Checks that after a pause(), no requests are sent until resume().\n// Also demonstrates the collapsing of subscription interest updates into a single\n// request. (This collapsing happens any time multiple updates arrive before a request\n// can be sent, not just with pausing: rate limiting or a down gRPC stream would also do it).\nTEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) {\n  startSubscription({\"name1\", \"name2\", \"name3\"});\n  auto resume_sub = subscription_->pause();\n  // If nested pause wasn't handled correctly, the single expectedSendMessage below would be\n  // insufficient.\n  auto nested_resume_sub = subscription_->pause();\n\n  expectSendMessage({\"name4\"}, {\"name1\", \"name2\"}, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n  // If not for the pause, these updates would make the expectSendMessage fail due to too many\n  // messages being sent.\n  subscription_->updateResourceInterest({\"name3\", \"name4\"});\n  subscription_->updateResourceInterest({\"name1\", \"name2\", \"name3\", \"name4\"});\n  subscription_->updateResourceInterest({\"name3\", \"name4\"});\n  subscription_->updateResourceInterest({\"name1\", \"name2\", \"name3\", \"name4\"});\n  subscription_->updateResourceInterest({\"name3\", \"name4\"});\n}\n\nTEST_F(DeltaSubscriptionImplTest, ResponseCausesAck) {\n  startSubscription({\"name1\"});\n  deliverConfigUpdate({\"name1\"}, \"someversion\", true);\n}\n\n// Checks that after a pause(), no ACK requests are sent until resume(), but that after the\n// resume, *all* ACKs that arrived during the pause are sent (in order).\nTEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) {\n  startSubscription({\"name1\", \"name2\", \"name3\"});\n  auto resume_sub = subscription_->pause();\n  // The server gives us our first version of resource name1.\n  // subscription_ now wants to ACK name1 (but can't due to pause).\n  {\n    auto message = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    auto* resource = message->mutable_resources()->Add();\n    resource->set_name(\"name1\");\n    resource->set_version(\"version1A\");\n    const std::string nonce = std::to_string(HashUtil::xxHash64(\"version1A\"));\n    message->set_nonce(nonce);\n    message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n    nonce_acks_required_.push(nonce);\n    static_cast<NewGrpcMuxImpl*>(subscription_->grpcMux().get())\n        ->onDiscoveryResponse(std::move(message), control_plane_stats_);\n  }\n  // The server gives us our first version of resource name2.\n  // subscription_ now wants to ACK name1 and then name2 (but can't due to pause).\n  {\n    auto message = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    auto* resource = message->mutable_resources()->Add();\n    resource->set_name(\"name2\");\n    resource->set_version(\"version2A\");\n    const std::string nonce = std::to_string(HashUtil::xxHash64(\"version2A\"));\n    message->set_nonce(nonce);\n    message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n    nonce_acks_required_.push(nonce);\n    static_cast<NewGrpcMuxImpl*>(subscription_->grpcMux().get())\n        ->onDiscoveryResponse(std::move(message), control_plane_stats_);\n  }\n  // The server gives us an updated version of resource name1.\n  // subscription_ now wants to ACK name1A, then name2, then name1B (but can't due to pause).\n  {\n    auto message = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    auto* resource = message->mutable_resources()->Add();\n    resource->set_name(\"name1\");\n    resource->set_version(\"version1B\");\n    const std::string nonce = std::to_string(HashUtil::xxHash64(\"version1B\"));\n    message->set_nonce(nonce);\n    message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n    nonce_acks_required_.push(nonce);\n    static_cast<NewGrpcMuxImpl*>(subscription_->grpcMux().get())\n        ->onDiscoveryResponse(std::move(message), control_plane_stats_);\n  }\n  // All ACK sendMessage()s will happen upon calling resume().\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _))\n      .WillRepeatedly(Invoke([this](Buffer::InstancePtr& buffer, bool) {\n        API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) message;\n        EXPECT_TRUE(Grpc::Common::parseBufferInstance(std::move(buffer), message));\n        const std::string nonce = message.response_nonce();\n        if (!nonce.empty()) {\n          nonce_acks_sent_.push(nonce);\n        }\n      }));\n  // DeltaSubscriptionTestHarness's dtor will check that all ACKs were sent with the correct nonces,\n  // in the correct order.\n}\n\nTEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) {\n  Stats::IsolatedStoreImpl stats_store;\n  SubscriptionStats stats(Utility::generateStats(stats_store));\n\n  envoy::config::core::v3::Node node;\n  node.set_id(\"fo0\");\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  EXPECT_CALL(local_info, node()).WillRepeatedly(testing::ReturnRef(node));\n\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Envoy::Config::RateLimitSettings rate_limit_settings;\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks;\n  NiceMock<Config::MockOpaqueResourceDecoder> resource_decoder;\n  auto* async_client = new Grpc::MockAsyncClient();\n\n  const Protobuf::MethodDescriptor* method_descriptor =\n      Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n          \"envoy.api.v2.EndpointDiscoveryService.StreamEndpoints\");\n  NewGrpcMuxImplSharedPtr xds_context = std::make_shared<NewGrpcMuxImpl>(\n      std::unique_ptr<Grpc::MockAsyncClient>(async_client), dispatcher, *method_descriptor,\n      envoy::config::core::v3::ApiVersion::AUTO, random, stats_store, rate_limit_settings,\n      local_info);\n\n  GrpcSubscriptionImplPtr subscription = std::make_unique<GrpcSubscriptionImpl>(\n      xds_context, callbacks, resource_decoder, stats, Config::TypeUrl::get().ClusterLoadAssignment,\n      dispatcher, std::chrono::milliseconds(12345), false);\n\n  EXPECT_CALL(*async_client, startRaw(_, _, _, _)).WillOnce(Return(nullptr));\n\n  subscription->start({\"name1\"});\n  subscription->updateResourceInterest({\"name1\", \"name2\"});\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/delta_subscription_state_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/delta_subscription_state.h\"\n#include \"common/config/utility.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Throw;\nusing testing::UnorderedElementsAre;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nconst char TypeUrl[] = \"type.googleapis.com/envoy.api.v2.Cluster\";\n\nclass DeltaSubscriptionStateTest : public testing::Test {\nprotected:\n  DeltaSubscriptionStateTest() : state_(TypeUrl, callbacks_, local_info_) {\n    state_.updateSubscriptionInterest({\"name1\", \"name2\", \"name3\"}, {});\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_THAT(cur_request.resource_names_subscribe(),\n                UnorderedElementsAre(\"name1\", \"name2\", \"name3\"));\n  }\n\n  UpdateAck deliverDiscoveryResponse(\n      const Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>& added_resources,\n      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n      const std::string& version_info, absl::optional<std::string> nonce = absl::nullopt,\n      bool expect_config_update_call = true) {\n    envoy::service::discovery::v3::DeltaDiscoveryResponse message;\n    *message.mutable_resources() = added_resources;\n    *message.mutable_removed_resources() = removed_resources;\n    message.set_system_version_info(version_info);\n    if (nonce.has_value()) {\n      message.set_nonce(nonce.value());\n    }\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)).Times(expect_config_update_call ? 1 : 0);\n    return state_.handleResponse(message);\n  }\n\n  UpdateAck deliverBadDiscoveryResponse(\n      const Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>& added_resources,\n      const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n      const std::string& version_info, std::string nonce, std::string error_message) {\n    envoy::service::discovery::v3::DeltaDiscoveryResponse message;\n    *message.mutable_resources() = added_resources;\n    *message.mutable_removed_resources() = removed_resources;\n    message.set_system_version_info(version_info);\n    message.set_nonce(nonce);\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)).WillOnce(Throw(EnvoyException(error_message)));\n    return state_.handleResponse(message);\n  }\n\n  NiceMock<MockUntypedConfigUpdateCallbacks> callbacks_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  // We start out interested in three resources: name1, name2, and name3.\n  DeltaSubscriptionState state_;\n};\n\nProtobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>\npopulateRepeatedResource(std::vector<std::pair<std::string, std::string>> items) {\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> add_to;\n  for (const auto& item : items) {\n    auto* resource = add_to.Add();\n    resource->set_name(item.first);\n    resource->set_version(item.second);\n  }\n  return add_to;\n}\n\n// Basic gaining/losing interest in resources should lead to subscription updates.\nTEST_F(DeltaSubscriptionStateTest, SubscribeAndUnsubscribe) {\n  {\n    state_.updateSubscriptionInterest({\"name4\"}, {\"name1\"});\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre(\"name4\"));\n    EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre(\"name1\"));\n  }\n  {\n    state_.updateSubscriptionInterest({\"name1\"}, {\"name3\", \"name4\"});\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre(\"name1\"));\n    EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre(\"name3\", \"name4\"));\n  }\n}\n\n// Delta xDS reliably queues up and sends all discovery requests, even in situations where it isn't\n// strictly necessary. E.g.: if you subscribe but then unsubscribe to a given resource, all before a\n// request was able to be sent, two requests will be sent. The following tests demonstrate this.\n//\n// If Envoy decided it wasn't interested in a resource and then (before a request was sent) decided\n// it was again, for all we know, it dropped that resource in between and needs to retrieve it\n// again. So, we *should* send a request \"re-\"subscribing. This means that the server needs to\n// interpret the resource_names_subscribe field as \"send these resources even if you think Envoy\n// already has them\".\nTEST_F(DeltaSubscriptionStateTest, RemoveThenAdd) {\n  state_.updateSubscriptionInterest({}, {\"name3\"});\n  state_.updateSubscriptionInterest({\"name3\"}, {});\n  envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request = state_.getNextRequestAckless();\n  EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre(\"name3\"));\n  EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty());\n}\n\n// Due to how our implementation provides the required behavior tested in RemoveThenAdd, the\n// add-then-remove case *also* causes the resource to be referred to in the request (as an\n// unsubscribe).\n// Unlike the remove-then-add case, this one really is unnecessary, and ideally we would have\n// the request simply not include any mention of the resource. Oh well.\n// This test is just here to illustrate that this behavior exists, not to enforce that it\n// should be like this. What *is* important: the server must happily and cleanly ignore\n// \"unsubscribe from [resource name I have never before referred to]\" requests.\nTEST_F(DeltaSubscriptionStateTest, AddThenRemove) {\n  state_.updateSubscriptionInterest({\"name4\"}, {});\n  state_.updateSubscriptionInterest({}, {\"name4\"});\n  envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request = state_.getNextRequestAckless();\n  EXPECT_TRUE(cur_request.resource_names_subscribe().empty());\n  EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre(\"name4\"));\n}\n\n// add/remove/add == add.\nTEST_F(DeltaSubscriptionStateTest, AddRemoveAdd) {\n  state_.updateSubscriptionInterest({\"name4\"}, {});\n  state_.updateSubscriptionInterest({}, {\"name4\"});\n  state_.updateSubscriptionInterest({\"name4\"}, {});\n  envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request = state_.getNextRequestAckless();\n  EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre(\"name4\"));\n  EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty());\n}\n\n// remove/add/remove == remove.\nTEST_F(DeltaSubscriptionStateTest, RemoveAddRemove) {\n  state_.updateSubscriptionInterest({}, {\"name3\"});\n  state_.updateSubscriptionInterest({\"name3\"}, {});\n  state_.updateSubscriptionInterest({}, {\"name3\"});\n  envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request = state_.getNextRequestAckless();\n  EXPECT_TRUE(cur_request.resource_names_subscribe().empty());\n  EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre(\"name3\"));\n}\n\n// Starts with 1,2,3. 4 is added/removed/added. In those same updates, 1,2,3 are\n// removed/added/removed. End result should be 4 added and 1,2,3 removed.\nTEST_F(DeltaSubscriptionStateTest, BothAddAndRemove) {\n  state_.updateSubscriptionInterest({\"name4\"}, {\"name1\", \"name2\", \"name3\"});\n  state_.updateSubscriptionInterest({\"name1\", \"name2\", \"name3\"}, {\"name4\"});\n  state_.updateSubscriptionInterest({\"name4\"}, {\"name1\", \"name2\", \"name3\"});\n  envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request = state_.getNextRequestAckless();\n  EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre(\"name4\"));\n  EXPECT_THAT(cur_request.resource_names_unsubscribe(),\n              UnorderedElementsAre(\"name1\", \"name2\", \"name3\"));\n}\n\nTEST_F(DeltaSubscriptionStateTest, CumulativeUpdates) {\n  state_.updateSubscriptionInterest({\"name4\"}, {});\n  state_.updateSubscriptionInterest({\"name5\"}, {});\n  envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request = state_.getNextRequestAckless();\n  EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre(\"name4\", \"name5\"));\n  EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty());\n}\n\n// Verifies that a sequence of good and bad responses from the server all get the appropriate\n// ACKs/NACKs from Envoy.\nTEST_F(DeltaSubscriptionStateTest, AckGenerated) {\n  // The xDS server's first response includes items for name1 and 2, but not 3.\n  {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> added_resources =\n        populateRepeatedResource({{\"name1\", \"version1A\"}, {\"name2\", \"version2A\"}});\n    UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, \"debug1\", \"nonce1\");\n    EXPECT_EQ(\"nonce1\", ack.nonce_);\n    EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code());\n  }\n  // The next response updates 1 and 2, and adds 3.\n  {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> added_resources =\n        populateRepeatedResource(\n            {{\"name1\", \"version1B\"}, {\"name2\", \"version2B\"}, {\"name3\", \"version3A\"}});\n    UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, \"debug2\", \"nonce2\");\n    EXPECT_EQ(\"nonce2\", ack.nonce_);\n    EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code());\n  }\n  // The next response tries but fails to update all 3, and so should produce a NACK.\n  {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> added_resources =\n        populateRepeatedResource(\n            {{\"name1\", \"version1C\"}, {\"name2\", \"version2C\"}, {\"name3\", \"version3B\"}});\n    UpdateAck ack = deliverBadDiscoveryResponse(added_resources, {}, \"debug3\", \"nonce3\", \"oh no\");\n    EXPECT_EQ(\"nonce3\", ack.nonce_);\n    EXPECT_NE(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code());\n  }\n  // The last response successfully updates all 3.\n  {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> added_resources =\n        populateRepeatedResource(\n            {{\"name1\", \"version1D\"}, {\"name2\", \"version2D\"}, {\"name3\", \"version3C\"}});\n    UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, \"debug4\", \"nonce4\");\n    EXPECT_EQ(\"nonce4\", ack.nonce_);\n    EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code());\n  }\n  // Bad response error detail is truncated if it's too large.\n  {\n    const std::string very_large_error_message(1 << 20, 'A');\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> added_resources =\n        populateRepeatedResource(\n            {{\"name1\", \"version1D\"}, {\"name2\", \"version2D\"}, {\"name3\", \"version3D\"}});\n    UpdateAck ack = deliverBadDiscoveryResponse(added_resources, {}, \"debug5\", \"nonce5\",\n                                                very_large_error_message);\n    EXPECT_EQ(\"nonce5\", ack.nonce_);\n    EXPECT_NE(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code());\n    EXPECT_TRUE(absl::EndsWith(ack.error_detail_.message(), \"AAAAAAA...(truncated)\"));\n    EXPECT_LT(ack.error_detail_.message().length(), very_large_error_message.length());\n  }\n}\n\n// Tests population of the initial_resource_versions map in the first request of a new stream.\n// Tests that\n// 1) resources we have a version of are present in the map,\n// 2) resources we are interested in but don't have are not present, and\n// 3) resources we have lost interest in are not present.\nTEST_F(DeltaSubscriptionStateTest, ResourceGoneLeadsToBlankInitialVersion) {\n  {\n    // The xDS server's first update includes items for name1 and 2, but not 3.\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> add1_2 =\n        populateRepeatedResource({{\"name1\", \"version1A\"}, {\"name2\", \"version2A\"}});\n    deliverDiscoveryResponse(add1_2, {}, \"debugversion1\");\n    state_.markStreamFresh(); // simulate a stream reconnection\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_EQ(\"version1A\", cur_request.initial_resource_versions().at(\"name1\"));\n    EXPECT_EQ(\"version2A\", cur_request.initial_resource_versions().at(\"name2\"));\n    EXPECT_EQ(cur_request.initial_resource_versions().end(),\n              cur_request.initial_resource_versions().find(\"name3\"));\n  }\n\n  {\n    // The next update updates 1, removes 2, and adds 3. The map should then have 1 and 3.\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> add1_3 =\n        populateRepeatedResource({{\"name1\", \"version1B\"}, {\"name3\", \"version3A\"}});\n    Protobuf::RepeatedPtrField<std::string> remove2;\n    *remove2.Add() = \"name2\";\n    deliverDiscoveryResponse(add1_3, remove2, \"debugversion2\");\n    state_.markStreamFresh(); // simulate a stream reconnection\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_EQ(\"version1B\", cur_request.initial_resource_versions().at(\"name1\"));\n    EXPECT_EQ(cur_request.initial_resource_versions().end(),\n              cur_request.initial_resource_versions().find(\"name2\"));\n    EXPECT_EQ(\"version3A\", cur_request.initial_resource_versions().at(\"name3\"));\n  }\n\n  {\n    // The next update removes 1 and 3. The map we send the server should be empty...\n    Protobuf::RepeatedPtrField<std::string> remove1_3;\n    *remove1_3.Add() = \"name1\";\n    *remove1_3.Add() = \"name3\";\n    deliverDiscoveryResponse({}, remove1_3, \"debugversion3\");\n    state_.markStreamFresh(); // simulate a stream reconnection\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_TRUE(cur_request.initial_resource_versions().empty());\n  }\n\n  {\n    // ...but our own map should remember our interest. In particular, losing interest in a\n    // resource should cause its name to appear in the next request's resource_names_unsubscribe.\n    state_.updateSubscriptionInterest({\"name4\"}, {\"name1\", \"name2\"});\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_THAT(cur_request.resource_names_subscribe(), UnorderedElementsAre(\"name4\"));\n    EXPECT_THAT(cur_request.resource_names_unsubscribe(), UnorderedElementsAre(\"name1\", \"name2\"));\n  }\n}\n\n// Upon a reconnection, the server is supposed to assume a blank slate for the Envoy's state\n// (hence the need for initial_resource_versions). The resource_names_subscribe of the first\n// message must therefore be every resource the Envoy is interested in.\n//\n// resource_names_unsubscribe, on the other hand, is always blank in the first request - even if,\n// in between the last request of the last stream and the first request of the new stream, Envoy\n// lost interest in a resource. The unsubscription implicitly takes effect by simply saying\n// nothing about the resource in the newly reconnected stream.\nTEST_F(DeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnect) {\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> add1_2 =\n      populateRepeatedResource({{\"name1\", \"version1A\"}, {\"name2\", \"version2A\"}});\n  deliverDiscoveryResponse(add1_2, {}, \"debugversion1\");\n\n  state_.updateSubscriptionInterest({\"name4\"}, {\"name1\"});\n  state_.markStreamFresh(); // simulate a stream reconnection\n  envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request = state_.getNextRequestAckless();\n  // Regarding the resource_names_subscribe field:\n  // name1: do not include: we lost interest.\n  // name2: yes do include: we're interested and we have a version of it.\n  // name3: yes do include: even though we don't have a version of it, we are interested.\n  // name4: yes do include: we are newly interested. (If this wasn't a stream reconnect, only\n  // name4\n  //                        would belong in this subscribe field).\n  EXPECT_THAT(cur_request.resource_names_subscribe(),\n              UnorderedElementsAre(\"name2\", \"name3\", \"name4\"));\n  EXPECT_TRUE(cur_request.resource_names_unsubscribe().empty());\n}\n\n// initial_resource_versions should not be present on messages after the first in a stream.\nTEST_F(DeltaSubscriptionStateTest, InitialVersionMapFirstMessageOnly) {\n  // First, verify that the first message of a new stream sends initial versions.\n  {\n    // The xDS server's first update gives us all three resources.\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> add_all =\n        populateRepeatedResource(\n            {{\"name1\", \"version1A\"}, {\"name2\", \"version2A\"}, {\"name3\", \"version3A\"}});\n    deliverDiscoveryResponse(add_all, {}, \"debugversion1\");\n    state_.markStreamFresh(); // simulate a stream reconnection\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_EQ(\"version1A\", cur_request.initial_resource_versions().at(\"name1\"));\n    EXPECT_EQ(\"version2A\", cur_request.initial_resource_versions().at(\"name2\"));\n    EXPECT_EQ(\"version3A\", cur_request.initial_resource_versions().at(\"name3\"));\n  }\n  // Then, after updating the resources but not reconnecting the stream, verify that initial\n  // versions are not sent.\n  {\n    state_.updateSubscriptionInterest({\"name4\"}, {});\n    // The xDS server updates our resources, and gives us our newly requested one too.\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> add_all =\n        populateRepeatedResource({{\"name1\", \"version1B\"},\n                                  {\"name2\", \"version2B\"},\n                                  {\"name3\", \"version3B\"},\n                                  {\"name4\", \"version4A\"}});\n    deliverDiscoveryResponse(add_all, {}, \"debugversion2\");\n    envoy::service::discovery::v3::DeltaDiscoveryRequest cur_request =\n        state_.getNextRequestAckless();\n    EXPECT_TRUE(cur_request.initial_resource_versions().empty());\n  }\n}\n\nTEST_F(DeltaSubscriptionStateTest, CheckUpdatePending) {\n  // Note that the test fixture ctor causes the first request to be \"sent\", so we start in the\n  // middle of a stream, with our initially interested resources having been requested already.\n  EXPECT_FALSE(state_.subscriptionUpdatePending());\n  state_.updateSubscriptionInterest({}, {}); // no change\n  EXPECT_FALSE(state_.subscriptionUpdatePending());\n  state_.markStreamFresh();\n  EXPECT_TRUE(state_.subscriptionUpdatePending());  // no change, BUT fresh stream\n  state_.updateSubscriptionInterest({}, {\"name3\"}); // one removed\n  EXPECT_TRUE(state_.subscriptionUpdatePending());\n  state_.updateSubscriptionInterest({\"name3\"}, {}); // one added\n  EXPECT_TRUE(state_.subscriptionUpdatePending());\n}\n\n// The next three tests test that duplicate resource names (whether additions or removals) cause\n// DeltaSubscriptionState to reject the update without even trying to hand it to the consuming\n// API's onConfigUpdate().\nTEST_F(DeltaSubscriptionStateTest, DuplicatedAdd) {\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> additions =\n      populateRepeatedResource({{\"name1\", \"version1A\"}, {\"name1\", \"sdfsdfsdfds\"}});\n  UpdateAck ack = deliverDiscoveryResponse(additions, {}, \"debugversion1\", absl::nullopt, false);\n  EXPECT_EQ(\"duplicate name name1 found among added/updated resources\",\n            ack.error_detail_.message());\n}\n\nTEST_F(DeltaSubscriptionStateTest, DuplicatedRemove) {\n  Protobuf::RepeatedPtrField<std::string> removals;\n  *removals.Add() = \"name1\";\n  *removals.Add() = \"name1\";\n  UpdateAck ack = deliverDiscoveryResponse({}, removals, \"debugversion1\", absl::nullopt, false);\n  EXPECT_EQ(\"duplicate name name1 found in the union of added+removed resources\",\n            ack.error_detail_.message());\n}\n\nTEST_F(DeltaSubscriptionStateTest, AddedAndRemoved) {\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> additions =\n      populateRepeatedResource({{\"name1\", \"version1A\"}});\n  Protobuf::RepeatedPtrField<std::string> removals;\n  *removals.Add() = \"name1\";\n  UpdateAck ack =\n      deliverDiscoveryResponse(additions, removals, \"debugversion1\", absl::nullopt, false);\n  EXPECT_EQ(\"duplicate name name1 found in the union of added+removed resources\",\n            ack.error_detail_.message());\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/delta_subscription_test_harness.h",
    "content": "#pragma once\n\n#include <queue>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/grpc_subscription_impl.h\"\n#include \"common/config/new_grpc_mux_impl.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/grpc/common.h\"\n\n#include \"test/common/config/subscription_test_harness.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Mock;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass DeltaSubscriptionTestHarness : public SubscriptionTestHarness {\npublic:\n  DeltaSubscriptionTestHarness() : DeltaSubscriptionTestHarness(std::chrono::milliseconds(0)) {}\n  DeltaSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout)\n      : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n            \"envoy.api.v2.EndpointDiscoveryService.StreamEndpoints\")),\n        async_client_(new Grpc::MockAsyncClient()) {\n    node_.set_id(\"fo0\");\n    EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_));\n    EXPECT_CALL(dispatcher_, createTimer_(_));\n    xds_context_ = std::make_shared<NewGrpcMuxImpl>(\n        std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_, *method_descriptor_,\n        envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_, rate_limit_settings_,\n        local_info_);\n    subscription_ = std::make_unique<GrpcSubscriptionImpl>(\n        xds_context_, callbacks_, resource_decoder_, stats_,\n        Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, init_fetch_timeout, false);\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  }\n\n  void doSubscriptionTearDown() override {\n    if (subscription_started_) {\n      EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n      subscription_.reset();\n    }\n  }\n\n  ~DeltaSubscriptionTestHarness() override {\n    while (!nonce_acks_required_.empty()) {\n      if (nonce_acks_sent_.empty()) {\n        // It's not enough to EXPECT_FALSE(nonce_acks_sent_.empty()), we need to skip the following\n        // EXPECT_EQ, otherwise the undefined .front() can get pretty bad.\n        EXPECT_FALSE(nonce_acks_sent_.empty());\n        break;\n      }\n      EXPECT_EQ(nonce_acks_required_.front(), nonce_acks_sent_.front());\n      nonce_acks_required_.pop();\n      nonce_acks_sent_.pop();\n    }\n    EXPECT_TRUE(nonce_acks_sent_.empty());\n  }\n\n  void startSubscription(const std::set<std::string>& cluster_names) override {\n    subscription_started_ = true;\n    last_cluster_names_ = cluster_names;\n    expectSendMessage(last_cluster_names_, \"\");\n    subscription_->start(cluster_names);\n  }\n\n  void expectSendMessage(const std::set<std::string>& cluster_names, const std::string& version,\n                         bool expect_node = false) override {\n    UNREFERENCED_PARAMETER(version);\n    UNREFERENCED_PARAMETER(expect_node);\n    expectSendMessage(cluster_names, {}, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n  }\n\n  void expectSendMessage(const std::set<std::string>& subscribe,\n                         const std::set<std::string>& unsubscribe, const Protobuf::int32 error_code,\n                         const std::string& error_message,\n                         std::map<std::string, std::string> initial_resource_versions) {\n    API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) expected_request;\n    expected_request.mutable_node()->CopyFrom(API_DOWNGRADE(node_));\n    std::copy(\n        subscribe.begin(), subscribe.end(),\n        Protobuf::RepeatedFieldBackInserter(expected_request.mutable_resource_names_subscribe()));\n    std::copy(\n        unsubscribe.begin(), unsubscribe.end(),\n        Protobuf::RepeatedFieldBackInserter(expected_request.mutable_resource_names_unsubscribe()));\n    if (!last_response_nonce_.empty()) {\n      nonce_acks_required_.push(last_response_nonce_);\n      last_response_nonce_ = \"\";\n    }\n    expected_request.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n\n    for (auto const& resource : initial_resource_versions) {\n      (*expected_request.mutable_initial_resource_versions())[resource.first] = resource.second;\n    }\n\n    if (error_code != Grpc::Status::WellKnownGrpcStatus::Ok) {\n      ::google::rpc::Status* error_detail = expected_request.mutable_error_detail();\n      error_detail->set_code(error_code);\n      error_detail->set_message(error_message);\n    }\n    EXPECT_CALL(async_stream_,\n                sendMessageRaw_(\n                    Grpc::ProtoBufferEqIgnoringField(expected_request, \"response_nonce\"), false))\n        .WillOnce([this](Buffer::InstancePtr& buffer, bool) {\n          API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) message;\n          EXPECT_TRUE(Grpc::Common::parseBufferInstance(std::move(buffer), message));\n          const std::string nonce = message.response_nonce();\n          if (!nonce.empty()) {\n            nonce_acks_sent_.push(nonce);\n          }\n        });\n  }\n\n  void deliverConfigUpdate(const std::vector<std::string>& cluster_names,\n                           const std::string& version, bool accept) override {\n    auto response = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    last_response_nonce_ = std::to_string(HashUtil::xxHash64(version));\n    response->set_nonce(last_response_nonce_);\n    response->set_system_version_info(version);\n    response->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n\n    Protobuf::RepeatedPtrField<envoy::config::endpoint::v3::ClusterLoadAssignment> typed_resources;\n    for (const auto& cluster : cluster_names) {\n      if (std::find(last_cluster_names_.begin(), last_cluster_names_.end(), cluster) !=\n          last_cluster_names_.end()) {\n        envoy::config::endpoint::v3::ClusterLoadAssignment* load_assignment = typed_resources.Add();\n        load_assignment->set_cluster_name(cluster);\n        auto* resource = response->add_resources();\n        resource->set_name(cluster);\n        resource->set_version(version);\n        resource->mutable_resource()->PackFrom(API_DOWNGRADE(*load_assignment));\n      }\n    }\n    Protobuf::RepeatedPtrField<std::string> removed_resources;\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, version)).WillOnce(ThrowOnRejectedConfig(accept));\n    if (accept) {\n      expectSendMessage({}, version);\n    } else {\n      EXPECT_CALL(callbacks_, onConfigUpdateFailed(\n                                  Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _));\n      expectSendMessage({}, {}, Grpc::Status::WellKnownGrpcStatus::Internal, \"bad config\", {});\n    }\n    static_cast<NewGrpcMuxImpl*>(subscription_->grpcMux().get())\n        ->onDiscoveryResponse(std::move(response), control_plane_stats_);\n    Mock::VerifyAndClearExpectations(&async_stream_);\n  }\n\n  void updateResourceInterest(const std::set<std::string>& cluster_names) override {\n    std::set<std::string> sub;\n    std::set<std::string> unsub;\n\n    std::set_difference(cluster_names.begin(), cluster_names.end(), last_cluster_names_.begin(),\n                        last_cluster_names_.end(), std::inserter(sub, sub.begin()));\n    std::set_difference(last_cluster_names_.begin(), last_cluster_names_.end(),\n                        cluster_names.begin(), cluster_names.end(),\n                        std::inserter(unsub, unsub.begin()));\n\n    expectSendMessage(sub, unsub, Grpc::Status::WellKnownGrpcStatus::Ok, \"\", {});\n    subscription_->updateResourceInterest(cluster_names);\n    last_cluster_names_ = cluster_names;\n  }\n\n  void expectConfigUpdateFailed() override {\n    EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, nullptr));\n  }\n\n  void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) override {\n    init_timeout_timer_ = new Event::MockTimer(&dispatcher_);\n    EXPECT_CALL(*init_timeout_timer_, enableTimer(timeout, _));\n  }\n\n  void expectDisableInitFetchTimeoutTimer() override {\n    EXPECT_CALL(*init_timeout_timer_, disableTimer());\n  }\n\n  void callInitFetchTimeoutCb() override { init_timeout_timer_->invokeCallback(); }\n\n  const Protobuf::MethodDescriptor* method_descriptor_;\n  Grpc::MockAsyncClient* async_client_;\n  Event::MockDispatcher dispatcher_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Grpc::MockAsyncStream async_stream_;\n  NewGrpcMuxImplSharedPtr xds_context_;\n  GrpcSubscriptionImplPtr subscription_;\n  std::string last_response_nonce_;\n  std::set<std::string> last_cluster_names_;\n  Envoy::Config::RateLimitSettings rate_limit_settings_;\n  Event::MockTimer* init_timeout_timer_;\n  envoy::config::core::v3::Node node_;\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks_;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder_{\"cluster_name\"};\n  std::queue<std::string> nonce_acks_required_;\n  std::queue<std::string> nonce_acks_sent_;\n  bool subscription_started_{};\n};\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/dummy_config.proto",
    "content": "// Provides protos for testing.\n\nsyntax = \"proto3\";\n\npackage test.common.config;\n\nimport \"google/protobuf/timestamp.proto\";\n\nmessage DummyConfig {\n  string a = 1;\n}\n\nmessage DummyConfigsDump {\n  message StaticConfigs {\n    DummyConfig dummy_config = 1;\n    google.protobuf.Timestamp last_updated = 2;\n  }\n\n  message DynamicConfigs {\n    string version_info = 1;\n    DummyConfig dummy_config = 2;\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  repeated StaticConfigs static_dummy_configs = 1;\n  repeated DynamicConfigs dynamic_dummy_configs = 2;\n}\n\nmessage DeltaDummyConfigsDump {\n  message DynamicConfigs {\n    string version_info = 1;\n    repeated DummyConfig dummy_configs = 2;\n    google.protobuf.Timestamp last_updated = 3;\n  }\n\n  repeated DynamicConfigs dynamic_dummy_configs = 2;\n}\n"
  },
  {
    "path": "test/common/config/filesystem_subscription_impl_test.cc",
    "content": "#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.validate.h\"\n\n#include \"test/common/config/filesystem_subscription_test_harness.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/test_common/logging.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\nusing testing::Throw;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass FilesystemSubscriptionImplTest : public testing::Test,\n                                       public FilesystemSubscriptionTestHarness {};\n\n// Validate that the client can recover from bad JSON responses.\nTEST_F(FilesystemSubscriptionImplTest, BadJsonRecovery) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _));\n  updateFile(\";!@#badjso n\");\n  EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n}\n\n// Validate that a file that is initially available results in a successful update.\nTEST_F(FilesystemSubscriptionImplTest, InitialFile) {\n  updateFile(\"{\\\"versionInfo\\\": \\\"0\\\", \\\"resources\\\": []}\", false);\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n}\n\n// Validate that if we fail to set a watch, we get a sensible warning.\nTEST(MiscFilesystemSubscriptionImplTest, BadWatch) {\n  Event::MockDispatcher dispatcher;\n  Stats::MockIsolatedStatsStore stats_store;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  SubscriptionStats stats{Utility::generateStats(stats_store)};\n  auto* watcher = new Filesystem::MockWatcher();\n  EXPECT_CALL(dispatcher, createFilesystemWatcher_()).WillOnce(Return(watcher));\n  EXPECT_CALL(*watcher, addWatch(_, _, _)).WillOnce(Throw(EnvoyException(\"bad path\")));\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks;\n  NiceMock<Config::MockOpaqueResourceDecoder> resource_decoder;\n  EXPECT_THROW_WITH_MESSAGE(FilesystemSubscriptionImpl(dispatcher, \"##!@/dev/null\", callbacks,\n                                                       resource_decoder, stats, validation_visitor,\n                                                       *api),\n                            EnvoyException, \"bad path\");\n}\n\n// Validate that the update_time statistic isn't changed when the configuration update gets\n// rejected.\nTEST_F(FilesystemSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _));\n  updateFile(\";!@#badjso n\");\n  EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, \"\"));\n}\n\n// Validate that the update_time statistic is changed after a trivial configuration update\n// (update that resulted in no change).\nTEST_F(FilesystemSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n  // Advance the simulated time.\n  simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1)));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, \"0\"));\n}\n\n// TODO(htuch): Add generic test harness support for collection subscriptions so that we can test\n// gRPC/HTTP transports similar to below.\nclass FilesystemCollectionSubscriptionImplTest : public testing::Test,\n                                                 Event::TestUsingSimulatedTime {\npublic:\n  FilesystemCollectionSubscriptionImplTest()\n      : path_(TestEnvironment::temporaryPath(\"lds.yaml\")),\n        stats_(Utility::generateStats(stats_store_)),\n        api_(Api::createApiForTest(stats_store_, simTime())), dispatcher_(setupDispatcher()),\n        subscription_(*dispatcher_, path_, callbacks_, resource_decoder_, stats_,\n                      ProtobufMessage::getStrictValidationVisitor(), *api_) {}\n  ~FilesystemCollectionSubscriptionImplTest() override { TestEnvironment::removePath(path_); }\n\n  Event::DispatcherPtr setupDispatcher() {\n    auto dispatcher = std::make_unique<Event::MockDispatcher>();\n    EXPECT_CALL(*dispatcher, createFilesystemWatcher_()).WillOnce(InvokeWithoutArgs([this] {\n      Filesystem::MockWatcher* mock_watcher = new Filesystem::MockWatcher();\n      EXPECT_CALL(*mock_watcher, addWatch(path_, Filesystem::Watcher::Events::MovedTo, _))\n          .WillOnce(Invoke([this](absl::string_view, uint32_t,\n                                  Filesystem::Watcher::OnChangedCb cb) { on_changed_cb_ = cb; }));\n      return mock_watcher;\n    }));\n    return dispatcher;\n  }\n\n  void updateFile(const std::string& yaml) {\n    // Write YAML contents to file, rename to path_ and invoke on change callback\n    const std::string temp_path = TestEnvironment::writeStringToFileForTest(\"lds.yaml.tmp\", yaml);\n    TestEnvironment::renameFile(temp_path, path_);\n    on_changed_cb_(Filesystem::Watcher::Events::MovedTo);\n  }\n\n  AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure,\n                           uint64_t version, absl::string_view version_text) {\n    if (attempt != stats_.update_attempt_.value()) {\n      return testing::AssertionFailure() << \"update_attempt: expected \" << attempt << \", got \"\n                                         << stats_.update_attempt_.value();\n    }\n    if (success != stats_.update_success_.value()) {\n      return testing::AssertionFailure() << \"update_success: expected \" << success << \", got \"\n                                         << stats_.update_success_.value();\n    }\n    if (rejected != stats_.update_rejected_.value()) {\n      return testing::AssertionFailure() << \"update_rejected: expected \" << rejected << \", got \"\n                                         << stats_.update_rejected_.value();\n    }\n    // The first attempt always fail.\n    if (1 + failure != stats_.update_failure_.value()) {\n      return testing::AssertionFailure() << \"update_failure: expected \" << 1 + failure << \", got \"\n                                         << stats_.update_failure_.value();\n    }\n    if (version != stats_.version_.value()) {\n      return testing::AssertionFailure()\n             << \"version: expected \" << version << \", got \" << stats_.version_.value();\n    }\n    if (version_text != stats_.version_text_.value()) {\n      return testing::AssertionFailure()\n             << \"version_text: expected \" << version << \", got \" << stats_.version_text_.value();\n    }\n    return testing::AssertionSuccess();\n  }\n\n  const std::string path_;\n  Stats::IsolatedStoreImpl stats_store_;\n  SubscriptionStats stats_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Filesystem::Watcher::OnChangedCb on_changed_cb_;\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks_;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::listener::v3::Listener>\n      resource_decoder_{\"name\"};\n  FilesystemCollectionSubscriptionImpl subscription_;\n};\n\n// Validate that an initial collection load succeeds, followed by a successful update, for inline\n// entries.\nTEST_F(FilesystemCollectionSubscriptionImplTest, InlineEntrySuccess) {\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::listener::v3::Listener>\n      resource_decoder(\"name\");\n  subscription_.start({});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, \"\"));\n  // Initial config load.\n  const auto inline_entry =\n      TestUtility::parseYaml<udpa::core::v1::CollectionEntry::InlineEntry>(R\"EOF(\nname: foo\nversion: resource.1\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n  name: foo\n  address:\n    socket_address:\n      protocol: TCP\n      address: 0.0.0.0\n      port_value: 10000\n  )EOF\");\n  const std::string resource = fmt::format(R\"EOF(\nversion: system.1\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.ListenerCollection\n  entries:\n  - inline_entry: {}\n  )EOF\",\n                                           MessageUtil::getJsonStringFromMessage(inline_entry));\n  DecodedResourcesWrapper decoded_resources;\n  decoded_resources.pushBack(std::make_unique<DecodedResourceImpl>(resource_decoder, inline_entry));\n  EXPECT_CALL(callbacks_,\n              onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), \"system.1\"));\n  updateFile(resource);\n  EXPECT_TRUE(statsAre(2, 1, 0, 0, 1471442407191366964, \"system.1\"));\n  // Update.\n  const auto inline_entry_2 =\n      TestUtility::parseYaml<udpa::core::v1::CollectionEntry::InlineEntry>(R\"EOF(\nname: foo\nversion: resource.2\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n  name: foo\n  address:\n    socket_address:\n      protocol: TCP\n      address: 0.0.0.1\n      port_value: 10001\n  )EOF\");\n  const std::string resource_2 = fmt::format(R\"EOF(\nversion: system.2\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.ListenerCollection\n  entries:\n  - inline_entry: {}\n  )EOF\",\n                                             MessageUtil::getJsonStringFromMessage(inline_entry_2));\n  {\n    DecodedResourcesWrapper decoded_resources_2;\n    decoded_resources_2.pushBack(\n        std::make_unique<DecodedResourceImpl>(resource_decoder, inline_entry_2));\n    EXPECT_CALL(callbacks_,\n                onConfigUpdate(DecodedResourcesEq(decoded_resources_2.refvec_), \"system.2\"));\n    updateFile(resource_2);\n  }\n  EXPECT_TRUE(statsAre(3, 2, 0, 0, 17889017004055064037ULL, \"system.2\"));\n}\n\n// Validate handling of invalid resource wrappers\nTEST_F(FilesystemCollectionSubscriptionImplTest, BadEnvelope) {\n  subscription_.start({});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, \"\"));\n  EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _));\n  // Unknown collection type.\n  updateFile(\"{}\");\n  EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, \"\"));\n  const std::string resource = R\"EOF(\nversion: system.1\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n  )EOF\";\n  EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _));\n  // Invalid collection type structure.\n  updateFile(resource);\n  EXPECT_TRUE(statsAre(3, 0, 0, 2, 0, \"\"));\n}\n\n// Validate handling of unknown fields.\nTEST_F(FilesystemCollectionSubscriptionImplTest, UnknownFields) {\n  subscription_.start({});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, \"\"));\n  const std::string resource = R\"EOF(\nversion: system.1\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.ListenerCollection\n  entries:\n  - inline_entry:\n      name: foo\n      version: resource.1\n      resource:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        unknown_bar: baz\n        address:\n          socket_address:\n            protocol: TCP\n            address: 0.0.0.0\n            port_value: 10000\n  )EOF\";\n  EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _));\n  updateFile(resource);\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, \"\"));\n}\n\n// Validate handling of rejected config.\nTEST_F(FilesystemCollectionSubscriptionImplTest, ConfigRejection) {\n  subscription_.start({});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, \"\"));\n  const std::string resource = R\"EOF(\nversion: system.1\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.ListenerCollection\n  entries:\n  - inline_entry:\n      name: foo\n      version: resource.1\n      resource:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            protocol: TCP\n            address: 0.0.0.0\n            port_value: 10000\n  )EOF\";\n  EXPECT_CALL(callbacks_, onConfigUpdate(_, _)).WillOnce(Throw(EnvoyException(\"blah\")));\n  EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _));\n  updateFile(resource);\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, \"\"));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/filesystem_subscription_test_harness.h",
    "content": "#pragma once\n\n#include <fstream>\n\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/filesystem_subscription_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/common/config/subscription_test_harness.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Config {\n\nclass FilesystemSubscriptionTestHarness : public SubscriptionTestHarness {\npublic:\n  FilesystemSubscriptionTestHarness()\n      : path_(TestEnvironment::temporaryPath(\"eds.json\")),\n        api_(Api::createApiForTest(stats_store_, simTime())), dispatcher_(setupDispatcher()),\n        subscription_(*dispatcher_, path_, callbacks_, resource_decoder_, stats_,\n                      validation_visitor_, *api_) {}\n\n  ~FilesystemSubscriptionTestHarness() override { TestEnvironment::removePath(path_); }\n\n  Event::DispatcherPtr setupDispatcher() {\n    auto dispatcher = std::make_unique<Event::MockDispatcher>();\n    EXPECT_CALL(*dispatcher, createFilesystemWatcher_()).WillOnce(InvokeWithoutArgs([this] {\n      Filesystem::MockWatcher* mock_watcher = new Filesystem::MockWatcher();\n      EXPECT_CALL(*mock_watcher, addWatch(path_, Filesystem::Watcher::Events::MovedTo, _))\n          .WillOnce(Invoke([this](absl::string_view, uint32_t,\n                                  Filesystem::Watcher::OnChangedCb cb) { on_changed_cb_ = cb; }));\n      return mock_watcher;\n    }));\n    return dispatcher;\n  }\n\n  void startSubscription(const std::set<std::string>& cluster_names) override {\n    std::ifstream config_file(path_);\n    file_at_start_ = config_file.good();\n    subscription_.start(cluster_names);\n  }\n\n  void updateResourceInterest(const std::set<std::string>& cluster_names) override {\n    subscription_.updateResourceInterest(cluster_names);\n  }\n\n  void updateFile(const std::string& json, bool run_dispatcher = true) {\n    // Write JSON contents to file, rename to path_ and invoke on change callback\n    const std::string temp_path = TestEnvironment::writeStringToFileForTest(\"eds.json.tmp\", json);\n    TestEnvironment::renameFile(temp_path, path_);\n    if (run_dispatcher) {\n      on_changed_cb_(Filesystem::Watcher::Events::MovedTo);\n    }\n  }\n\n  void expectSendMessage(const std::set<std::string>& cluster_names, const std::string& version,\n                         bool expect_node) override {\n    UNREFERENCED_PARAMETER(cluster_names);\n    UNREFERENCED_PARAMETER(version);\n    UNREFERENCED_PARAMETER(expect_node);\n  }\n\n  void deliverConfigUpdate(const std::vector<std::string>& cluster_names,\n                           const std::string& version, bool accept) override {\n    std::string file_json = \"{\\\"versionInfo\\\":\\\"\" + version + \"\\\",\\\"resources\\\":[\";\n    for (const auto& cluster : cluster_names) {\n      file_json += \"{\\\"@type\\\":\\\"type.googleapis.com/\"\n                   \"envoy.api.v2.ClusterLoadAssignment\\\",\\\"clusterName\\\":\\\"\" +\n                   cluster + \"\\\"},\";\n    }\n    file_json.pop_back();\n    file_json += \"]}\";\n    envoy::service::discovery::v3::DiscoveryResponse response_pb;\n    TestUtility::loadFromJson(file_json, response_pb);\n    const auto decoded_resources =\n        TestUtility::decodeResources<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n            response_pb, \"cluster_name\");\n    EXPECT_CALL(callbacks_, onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version))\n        .WillOnce(ThrowOnRejectedConfig(accept));\n    if (accept) {\n      version_ = version;\n    } else {\n      EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _));\n    }\n    updateFile(file_json);\n  }\n\n  AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure,\n                           uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version,\n                           absl::string_view version_text) override {\n    // The first attempt always fail unless there was a file there to begin with.\n    return SubscriptionTestHarness::statsAre(attempt, success, rejected,\n                                             failure + (file_at_start_ ? 0 : 1), init_fetch_timeout,\n                                             update_time, version, version_text);\n  }\n\n  void expectConfigUpdateFailed() override { stats_.update_failure_.inc(); }\n\n  void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds) override {\n    // initial_fetch_timeout not implemented.\n  }\n\n  void expectDisableInitFetchTimeoutTimer() override {\n    // initial_fetch_timeout not implemented\n  }\n\n  void callInitFetchTimeoutCb() override {\n    // initial_fetch_timeout not implemented\n  }\n\n  const std::string path_;\n  std::string version_;\n  Stats::IsolatedStoreImpl stats_store_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Filesystem::Watcher::OnChangedCb on_changed_cb_;\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks_;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder_{\"cluster_name\"};\n  FilesystemSubscriptionImpl subscription_;\n  bool file_at_start_{false};\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/grpc_mux_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/grpc_mux_impl.h\"\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AtLeast;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::IsSubstring;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\n// We test some mux specific stuff below, other unit test coverage for singleton use of GrpcMuxImpl\n// is provided in [grpc_]subscription_impl_test.cc.\nclass GrpcMuxImplTestBase : public testing::Test {\npublic:\n  GrpcMuxImplTestBase()\n      : async_client_(new Grpc::MockAsyncClient()),\n        control_plane_connected_state_(\n            stats_.gauge(\"control_plane.connected_state\", Stats::Gauge::ImportMode::NeverImport)),\n        control_plane_pending_requests_(\n            stats_.gauge(\"control_plane.pending_requests\", Stats::Gauge::ImportMode::NeverImport))\n\n  {}\n\n  void setup() {\n    grpc_mux_ = std::make_unique<GrpcMuxImpl>(\n        local_info_, std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_,\n        *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n            \"envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources\"),\n        envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, true);\n  }\n\n  void setup(const RateLimitSettings& custom_rate_limit_settings) {\n    grpc_mux_ = std::make_unique<GrpcMuxImpl>(\n        local_info_, std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_,\n        *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n            \"envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources\"),\n        envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, custom_rate_limit_settings,\n        true);\n  }\n\n  void expectSendMessage(const std::string& type_url,\n                         const std::vector<std::string>& resource_names, const std::string& version,\n                         bool first = false, const std::string& nonce = \"\",\n                         const Protobuf::int32 error_code = Grpc::Status::WellKnownGrpcStatus::Ok,\n                         const std::string& error_message = \"\") {\n    API_NO_BOOST(envoy::api::v2::DiscoveryRequest) expected_request;\n    if (first) {\n      expected_request.mutable_node()->CopyFrom(API_DOWNGRADE(local_info_.node()));\n    }\n    for (const auto& resource : resource_names) {\n      expected_request.add_resource_names(resource);\n    }\n    if (!version.empty()) {\n      expected_request.set_version_info(version);\n    }\n    expected_request.set_response_nonce(nonce);\n    expected_request.set_type_url(type_url);\n    if (error_code != Grpc::Status::WellKnownGrpcStatus::Ok) {\n      ::google::rpc::Status* error_detail = expected_request.mutable_error_detail();\n      error_detail->set_code(error_code);\n      error_detail->set_message(error_message);\n    }\n    EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(expected_request), false));\n  }\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Grpc::MockAsyncClient* async_client_;\n  Grpc::MockAsyncStream async_stream_;\n  GrpcMuxImplPtr grpc_mux_;\n  NiceMock<MockSubscriptionCallbacks> callbacks_;\n  NiceMock<MockOpaqueResourceDecoder> resource_decoder_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Stats::TestUtil::TestStore stats_;\n  Envoy::Config::RateLimitSettings rate_limit_settings_;\n  Stats::Gauge& control_plane_connected_state_;\n  Stats::Gauge& control_plane_pending_requests_;\n};\n\nclass GrpcMuxImplTest : public GrpcMuxImplTestBase {\npublic:\n  Event::SimulatedTimeSystem time_system_;\n};\n\n// Validate behavior when multiple type URL watches are maintained, watches are created/destroyed\n// (via RAII).\nTEST_F(GrpcMuxImplTest, MultipleTypeUrlStreams) {\n  setup();\n  InSequence s;\n  auto foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\", \"y\"}, callbacks_, resource_decoder_);\n  auto bar_sub = grpc_mux_->addWatch(\"bar\", {}, callbacks_, resource_decoder_);\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(\"foo\", {\"x\", \"y\"}, \"\", true);\n  expectSendMessage(\"bar\", {}, \"\");\n  grpc_mux_->start();\n  EXPECT_EQ(1, control_plane_connected_state_.value());\n  expectSendMessage(\"bar\", {\"z\"}, \"\");\n  auto bar_z_sub = grpc_mux_->addWatch(\"bar\", {\"z\"}, callbacks_, resource_decoder_);\n  expectSendMessage(\"bar\", {\"zz\", \"z\"}, \"\");\n  auto bar_zz_sub = grpc_mux_->addWatch(\"bar\", {\"zz\"}, callbacks_, resource_decoder_);\n  expectSendMessage(\"bar\", {\"z\"}, \"\");\n  expectSendMessage(\"bar\", {}, \"\");\n  expectSendMessage(\"foo\", {}, \"\");\n}\n\n// Validate behavior when multiple type URL watches are maintained and the stream is reset.\nTEST_F(GrpcMuxImplTest, ResetStream) {\n  InSequence s;\n\n  Event::MockTimer* timer = nullptr;\n  Event::TimerCb timer_cb;\n  EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([&timer, &timer_cb](Event::TimerCb cb) {\n    timer_cb = cb;\n    EXPECT_EQ(nullptr, timer);\n    timer = new Event::MockTimer();\n    return timer;\n  }));\n\n  setup();\n  auto foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\", \"y\"}, callbacks_, resource_decoder_);\n  auto bar_sub = grpc_mux_->addWatch(\"bar\", {}, callbacks_, resource_decoder_);\n  auto baz_sub = grpc_mux_->addWatch(\"baz\", {\"z\"}, callbacks_, resource_decoder_);\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(\"foo\", {\"x\", \"y\"}, \"\", true);\n  expectSendMessage(\"bar\", {}, \"\");\n  expectSendMessage(\"baz\", {\"z\"}, \"\");\n  grpc_mux_->start();\n\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _))\n      .Times(3);\n  EXPECT_CALL(random_, random());\n  ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock.\n  EXPECT_CALL(*timer, enableTimer(_, _));\n  grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, \"\");\n  EXPECT_EQ(0, control_plane_connected_state_.value());\n  EXPECT_EQ(0, control_plane_pending_requests_.value());\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(\"foo\", {\"x\", \"y\"}, \"\", true);\n  expectSendMessage(\"bar\", {}, \"\");\n  expectSendMessage(\"baz\", {\"z\"}, \"\");\n  timer_cb();\n\n  expectSendMessage(\"baz\", {}, \"\");\n  expectSendMessage(\"foo\", {}, \"\");\n}\n\n// Validate pause-resume behavior.\nTEST_F(GrpcMuxImplTest, PauseResume) {\n  setup();\n  InSequence s;\n  GrpcMuxWatchPtr foo_sub;\n  GrpcMuxWatchPtr foo_z_sub;\n  GrpcMuxWatchPtr foo_zz_sub;\n  foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\", \"y\"}, callbacks_, resource_decoder_);\n  {\n    ScopedResume a = grpc_mux_->pause(\"foo\");\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n    grpc_mux_->start();\n    expectSendMessage(\"foo\", {\"x\", \"y\"}, \"\", true);\n  }\n  {\n    ScopedResume a = grpc_mux_->pause(\"bar\");\n    expectSendMessage(\"foo\", {\"z\", \"x\", \"y\"}, \"\");\n    foo_z_sub = grpc_mux_->addWatch(\"foo\", {\"z\"}, callbacks_, resource_decoder_);\n  }\n  {\n    ScopedResume a = grpc_mux_->pause(\"foo\");\n    foo_zz_sub = grpc_mux_->addWatch(\"foo\", {\"zz\"}, callbacks_, resource_decoder_);\n    expectSendMessage(\"foo\", {\"zz\", \"z\", \"x\", \"y\"}, \"\");\n  }\n  // When nesting, we only have a single resumption.\n  {\n    ScopedResume a = grpc_mux_->pause(\"foo\");\n    ScopedResume b = grpc_mux_->pause(\"foo\");\n    foo_zz_sub = grpc_mux_->addWatch(\"foo\", {\"zz\"}, callbacks_, resource_decoder_);\n    expectSendMessage(\"foo\", {\"zz\", \"z\", \"x\", \"y\"}, \"\");\n  }\n  grpc_mux_->pause(\"foo\")->cancel();\n}\n\n// Validate behavior when type URL mismatches occur.\nTEST_F(GrpcMuxImplTest, TypeUrlMismatch) {\n  setup();\n\n  auto invalid_response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n  InSequence s;\n  auto foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\", \"y\"}, callbacks_, resource_decoder_);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(\"foo\", {\"x\", \"y\"}, \"\", true);\n  grpc_mux_->start();\n\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n    response->set_type_url(\"bar\");\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n  }\n\n  {\n    invalid_response->set_type_url(\"foo\");\n    invalid_response->mutable_resources()->Add()->set_type_url(\"bar\");\n    EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _))\n        .WillOnce(Invoke([](Envoy::Config::ConfigUpdateFailureReason, const EnvoyException* e) {\n          EXPECT_TRUE(IsSubstring(\n              \"\", \"\", \"bar does not match the message-wide type URL foo in DiscoveryResponse\",\n              e->what()));\n        }));\n\n    expectSendMessage(\n        \"foo\", {\"x\", \"y\"}, \"\", false, \"\", Grpc::Status::WellKnownGrpcStatus::Internal,\n        fmt::format(\"bar does not match the message-wide type URL foo in DiscoveryResponse {}\",\n                    invalid_response->DebugString()));\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(invalid_response));\n  }\n  expectSendMessage(\"foo\", {}, \"\");\n}\n\nTEST_F(GrpcMuxImplTest, RpcErrorMessageTruncated) {\n  setup();\n  auto invalid_response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n  InSequence s;\n  auto foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\", \"y\"}, callbacks_, resource_decoder_);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(\"foo\", {\"x\", \"y\"}, \"\", true);\n  grpc_mux_->start();\n\n  { // Large error message sent back to management server is truncated.\n    const std::string very_large_type_url(1 << 20, 'A');\n    invalid_response->set_type_url(\"foo\");\n    invalid_response->mutable_resources()->Add()->set_type_url(very_large_type_url);\n    EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _))\n        .WillOnce(Invoke([&very_large_type_url](Envoy::Config::ConfigUpdateFailureReason,\n                                                const EnvoyException* e) {\n          EXPECT_TRUE(IsSubstring(\n              \"\", \"\",\n              fmt::format(\"{} does not match the message-wide type URL foo in DiscoveryResponse\",\n                          very_large_type_url), // Local error message is not truncated.\n              e->what()));\n        }));\n    expectSendMessage(\"foo\", {\"x\", \"y\"}, \"\", false, \"\", Grpc::Status::WellKnownGrpcStatus::Internal,\n                      fmt::format(\"{}...(truncated)\", std::string(4096, 'A')));\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(invalid_response));\n  }\n  expectSendMessage(\"foo\", {}, \"\");\n}\n\n// Validate behavior when watches has an unknown resource name.\nTEST_F(GrpcMuxImplTest, WildcardWatch) {\n  setup();\n\n  InSequence s;\n  const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  auto foo_sub = grpc_mux_->addWatch(type_url, {}, callbacks_, resource_decoder);\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(type_url, {}, \"\", true);\n  grpc_mux_->start();\n\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n    response->set_type_url(type_url);\n    response->set_version_info(\"1\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n    load_assignment.set_cluster_name(\"x\");\n    response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment));\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, \"1\"))\n        .WillOnce(Invoke([&load_assignment](const std::vector<DecodedResourceRef>& resources,\n                                            const std::string&) {\n          EXPECT_EQ(1, resources.size());\n          const auto& expected_assignment =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[0].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment));\n        }));\n    expectSendMessage(type_url, {}, \"1\");\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n  }\n}\n\n// Validate behavior when watches specify resources (potentially overlapping).\nTEST_F(GrpcMuxImplTest, WatchDemux) {\n  setup();\n  InSequence s;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  NiceMock<MockSubscriptionCallbacks> foo_callbacks;\n  auto foo_sub = grpc_mux_->addWatch(type_url, {\"x\", \"y\"}, foo_callbacks, resource_decoder);\n  NiceMock<MockSubscriptionCallbacks> bar_callbacks;\n  auto bar_sub = grpc_mux_->addWatch(type_url, {\"y\", \"z\"}, bar_callbacks, resource_decoder);\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  // Should dedupe the \"x\" resource.\n  expectSendMessage(type_url, {\"y\", \"z\", \"x\"}, \"\", true);\n  grpc_mux_->start();\n\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n    response->set_type_url(type_url);\n    response->set_version_info(\"1\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n    load_assignment.set_cluster_name(\"x\");\n    response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment));\n    EXPECT_CALL(bar_callbacks, onConfigUpdate(_, \"1\")).Times(0);\n    EXPECT_CALL(foo_callbacks, onConfigUpdate(_, \"1\"))\n        .WillOnce(Invoke([&load_assignment](const std::vector<DecodedResourceRef>& resources,\n                                            const std::string&) {\n          EXPECT_EQ(1, resources.size());\n          const auto& expected_assignment =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[0].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment));\n        }));\n    expectSendMessage(type_url, {\"y\", \"z\", \"x\"}, \"1\");\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n  }\n\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n    response->set_type_url(type_url);\n    response->set_version_info(\"2\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_x;\n    load_assignment_x.set_cluster_name(\"x\");\n    response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment_x));\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_y;\n    load_assignment_y.set_cluster_name(\"y\");\n    response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment_y));\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_z;\n    load_assignment_z.set_cluster_name(\"z\");\n    response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment_z));\n    EXPECT_CALL(bar_callbacks, onConfigUpdate(_, \"2\"))\n        .WillOnce(Invoke([&load_assignment_y, &load_assignment_z](\n                             const std::vector<DecodedResourceRef>& resources, const std::string&) {\n          EXPECT_EQ(2, resources.size());\n          const auto& expected_assignment =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[0].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y));\n          const auto& expected_assignment_1 =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[1].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_z));\n        }));\n    EXPECT_CALL(foo_callbacks, onConfigUpdate(_, \"2\"))\n        .WillOnce(Invoke([&load_assignment_x, &load_assignment_y](\n                             const std::vector<DecodedResourceRef>& resources, const std::string&) {\n          EXPECT_EQ(2, resources.size());\n          const auto& expected_assignment =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[0].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_x));\n          const auto& expected_assignment_1 =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[1].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_y));\n        }));\n    expectSendMessage(type_url, {\"y\", \"z\", \"x\"}, \"2\");\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n  }\n\n  expectSendMessage(type_url, {\"x\", \"y\"}, \"2\");\n  expectSendMessage(type_url, {}, \"2\");\n}\n\n// Validate behavior when we have multiple watchers that send empty updates.\nTEST_F(GrpcMuxImplTest, MultipleWatcherWithEmptyUpdates) {\n  setup();\n  InSequence s;\n  const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  NiceMock<MockSubscriptionCallbacks> foo_callbacks;\n  auto foo_sub = grpc_mux_->addWatch(type_url, {\"x\", \"y\"}, foo_callbacks, resource_decoder_);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(type_url, {\"x\", \"y\"}, \"\", true);\n  grpc_mux_->start();\n\n  auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n  response->set_type_url(type_url);\n  response->set_version_info(\"1\");\n\n  EXPECT_CALL(foo_callbacks, onConfigUpdate(_, \"1\")).Times(0);\n  expectSendMessage(type_url, {\"x\", \"y\"}, \"1\");\n  grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n\n  expectSendMessage(type_url, {}, \"1\");\n}\n\n// Validate behavior when we have Single Watcher that sends Empty updates.\nTEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) {\n  setup();\n  const std::string& type_url = Config::TypeUrl::get().Cluster;\n  NiceMock<MockSubscriptionCallbacks> foo_callbacks;\n  auto foo_sub = grpc_mux_->addWatch(type_url, {}, foo_callbacks, resource_decoder_);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(type_url, {}, \"\", true);\n  grpc_mux_->start();\n\n  auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n  response->set_type_url(type_url);\n  response->set_version_info(\"1\");\n  // Validate that onConfigUpdate is called with empty resources.\n  EXPECT_CALL(foo_callbacks, onConfigUpdate(_, \"1\"))\n      .WillOnce(Invoke([](const std::vector<DecodedResourceRef>& resources, const std::string&) {\n        EXPECT_TRUE(resources.empty());\n      }));\n  expectSendMessage(type_url, {}, \"1\");\n  grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n}\n\n// Exactly one test requires a mock time system to provoke behavior that cannot\n// easily be achieved with a SimulatedTimeSystem.\nclass GrpcMuxImplTestWithMockTimeSystem : public GrpcMuxImplTestBase {\npublic:\n  Event::DelegatingTestTimeSystem<MockTimeSystem> mock_time_system_;\n};\n\n//  Verifies that rate limiting is not enforced with defaults.\nTEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) {\n  // Validate that only connection retry timer is enabled.\n  Event::MockTimer* timer = nullptr;\n  Event::TimerCb timer_cb;\n  EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([&timer, &timer_cb](Event::TimerCb cb) {\n    timer_cb = cb;\n    EXPECT_EQ(nullptr, timer);\n    timer = new Event::MockTimer();\n    return timer;\n  }));\n\n  // Validate that rate limiter is not created.\n  EXPECT_CALL(*mock_time_system_, monotonicTime()).Times(0);\n\n  setup();\n\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(AtLeast(99));\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n\n  const auto onReceiveMessage = [&](uint64_t burst) {\n    for (uint64_t i = 0; i < burst; i++) {\n      auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n      response->set_version_info(\"baz\");\n      response->set_nonce(\"bar\");\n      response->set_type_url(\"foo\");\n      grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n    }\n  };\n\n  auto foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\"}, callbacks_, resource_decoder_);\n  expectSendMessage(\"foo\", {\"x\"}, \"\", true);\n  grpc_mux_->start();\n\n  // Exhausts the limit.\n  onReceiveMessage(99);\n\n  // API calls go over the limit but we do not see the stat incremented.\n  onReceiveMessage(1);\n  EXPECT_EQ(0, stats_.counter(\"control_plane.rate_limit_enforced\").value());\n}\n\n//  Verifies that default rate limiting is enforced with empty RateLimitSettings.\nTEST_F(GrpcMuxImplTest, TooManyRequestsWithEmptyRateLimitSettings) {\n  // Validate that request drain timer is created.\n  Event::MockTimer* timer = nullptr;\n  Event::MockTimer* drain_request_timer = nullptr;\n\n  Event::TimerCb timer_cb;\n  Event::TimerCb drain_timer_cb;\n  EXPECT_CALL(dispatcher_, createTimer_(_))\n      .WillOnce(Invoke([&timer, &timer_cb](Event::TimerCb cb) {\n        timer_cb = cb;\n        EXPECT_EQ(nullptr, timer);\n        timer = new Event::MockTimer();\n        return timer;\n      }))\n      .WillOnce(Invoke([&drain_request_timer, &drain_timer_cb](Event::TimerCb cb) {\n        drain_timer_cb = cb;\n        EXPECT_EQ(nullptr, drain_request_timer);\n        drain_request_timer = new Event::MockTimer();\n        return drain_request_timer;\n      }));\n\n  RateLimitSettings custom_rate_limit_settings;\n  custom_rate_limit_settings.enabled_ = true;\n  setup(custom_rate_limit_settings);\n\n  // Attempt to send 99 messages. One of them is rate limited (and we never drain).\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(99);\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n\n  const auto onReceiveMessage = [&](uint64_t burst) {\n    for (uint64_t i = 0; i < burst; i++) {\n      auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n      response->set_version_info(\"baz\");\n      response->set_nonce(\"bar\");\n      response->set_type_url(\"foo\");\n      grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n    }\n  };\n\n  auto foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\"}, callbacks_, resource_decoder_);\n  expectSendMessage(\"foo\", {\"x\"}, \"\", true);\n  grpc_mux_->start();\n\n  // Validate that drain_request_timer is enabled when there are no tokens.\n  EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(100), _));\n  // The drain timer enable is checked twice, once when we limit, again when the watch is destroyed.\n  EXPECT_CALL(*drain_request_timer, enabled()).Times(11);\n  onReceiveMessage(110);\n  EXPECT_EQ(11, stats_.counter(\"control_plane.rate_limit_enforced\").value());\n  EXPECT_EQ(11, control_plane_pending_requests_.value());\n\n  // Validate that when we reset a stream with pending requests, it reverts back to the initial\n  // query (i.e. the queue is discarded).\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _));\n  EXPECT_CALL(random_, random());\n  ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock.\n  EXPECT_CALL(*timer, enableTimer(_, _));\n  grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, \"\");\n  EXPECT_EQ(11, control_plane_pending_requests_.value());\n  EXPECT_EQ(0, control_plane_connected_state_.value());\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false));\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  time_system_.setMonotonicTime(std::chrono::seconds(30));\n  timer_cb();\n  EXPECT_EQ(0, control_plane_pending_requests_.value());\n  // One more message on the way out when the watch is destroyed.\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false));\n}\n\n//  Verifies that rate limiting is enforced with custom RateLimitSettings.\nTEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) {\n  // Validate that request drain timer is created.\n  Event::MockTimer* timer = nullptr;\n  Event::MockTimer* drain_request_timer = nullptr;\n\n  Event::TimerCb timer_cb;\n  Event::TimerCb drain_timer_cb;\n\n  EXPECT_CALL(dispatcher_, createTimer_(_))\n      .WillOnce(Invoke([&timer, &timer_cb](Event::TimerCb cb) {\n        timer_cb = cb;\n        EXPECT_EQ(nullptr, timer);\n        timer = new Event::MockTimer();\n        return timer;\n      }))\n      .WillOnce(Invoke([&drain_request_timer, &drain_timer_cb](Event::TimerCb cb) {\n        drain_timer_cb = cb;\n        EXPECT_EQ(nullptr, drain_request_timer);\n        drain_request_timer = new Event::MockTimer();\n        return drain_request_timer;\n      }));\n\n  RateLimitSettings custom_rate_limit_settings;\n  custom_rate_limit_settings.enabled_ = true;\n  custom_rate_limit_settings.max_tokens_ = 250;\n  custom_rate_limit_settings.fill_rate_ = 2;\n  setup(custom_rate_limit_settings);\n\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(AtLeast(260));\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n\n  const auto onReceiveMessage = [&](uint64_t burst) {\n    for (uint64_t i = 0; i < burst; i++) {\n      auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n      response->set_version_info(\"baz\");\n      response->set_nonce(\"bar\");\n      response->set_type_url(\"foo\");\n      grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n    }\n  };\n\n  auto foo_sub = grpc_mux_->addWatch(\"foo\", {\"x\"}, callbacks_, resource_decoder_);\n  expectSendMessage(\"foo\", {\"x\"}, \"\", true);\n  grpc_mux_->start();\n\n  // Validate that rate limit is not enforced for 100 requests.\n  onReceiveMessage(100);\n  EXPECT_EQ(0, stats_.counter(\"control_plane.rate_limit_enforced\").value());\n\n  // Validate that drain_request_timer is enabled when there are no tokens.\n  EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(500), _));\n  EXPECT_CALL(*drain_request_timer, enabled()).Times(11);\n  onReceiveMessage(160);\n  EXPECT_EQ(11, stats_.counter(\"control_plane.rate_limit_enforced\").value());\n  EXPECT_EQ(11, control_plane_pending_requests_.value());\n\n  // Validate that drain requests call when there are multiple requests in queue.\n  time_system_.setMonotonicTime(std::chrono::seconds(10));\n  drain_timer_cb();\n\n  // Check that the pending_requests stat is updated with the queue drain.\n  EXPECT_EQ(0, control_plane_pending_requests_.value());\n}\n\n//  Verifies that a message with no resources is accepted.\nTEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) {\n  setup();\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n\n  const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n\n  grpc_mux_->start();\n  {\n    // subscribe and unsubscribe to simulate a cluster added and removed\n    expectSendMessage(type_url, {\"y\"}, \"\", true);\n    auto temp_sub = grpc_mux_->addWatch(type_url, {\"y\"}, callbacks_, resource_decoder_);\n    expectSendMessage(type_url, {}, \"\");\n  }\n\n  // simulate the server sending empty CLA message to notify envoy that the CLA was removed.\n  auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n  response->set_nonce(\"bar\");\n  response->set_version_info(\"1\");\n  response->set_type_url(type_url);\n\n  // TODO(fredlas) the expectation of no discovery request here is against the xDS spec.\n  // The upcoming xDS overhaul (part of/followup to PR7293) will fix this.\n  //\n  // This contains zero resources. No discovery request should be sent.\n  grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n\n  // when we add the new subscription version should be 1 and nonce should be bar\n  expectSendMessage(type_url, {\"x\"}, \"1\", false, \"bar\");\n\n  // simulate a new cluster x is added. add CLA subscription for it.\n  auto sub = grpc_mux_->addWatch(type_url, {\"x\"}, callbacks_, resource_decoder_);\n  expectSendMessage(type_url, {}, \"1\", false, \"bar\");\n}\n\n//  Verifies that a message with some resources is rejected when there are no watches.\nTEST_F(GrpcMuxImplTest, UnwatchedTypeRejectsResources) {\n  setup();\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n\n  const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n\n  grpc_mux_->start();\n  // subscribe and unsubscribe (by not keeping the return watch) so that the type is known to envoy\n  expectSendMessage(type_url, {\"y\"}, \"\", true);\n  expectSendMessage(type_url, {}, \"\");\n  grpc_mux_->addWatch(type_url, {\"y\"}, callbacks_, resource_decoder_);\n\n  // simulate the server sending CLA message to notify envoy that the CLA was added,\n  // even though envoy doesn't expect it. Envoy should reject this update.\n  auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n  response->set_nonce(\"bar\");\n  response->set_version_info(\"1\");\n  response->set_type_url(type_url);\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n  load_assignment.set_cluster_name(\"x\");\n  response->add_resources()->PackFrom(load_assignment);\n\n  // The message should be rejected.\n  expectSendMessage(type_url, {}, \"\", false, \"bar\");\n  EXPECT_LOG_CONTAINS(\"warning\", \"Ignoring unwatched type URL \" + type_url,\n                      grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)));\n}\n\nTEST_F(GrpcMuxImplTest, BadLocalInfoEmptyClusterName) {\n  EXPECT_CALL(local_info_, clusterName()).WillOnce(ReturnRef(EMPTY_STRING));\n  EXPECT_THROW_WITH_MESSAGE(\n      GrpcMuxImpl(\n          local_info_, std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_,\n          *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n              \"envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources\"),\n          envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, true),\n      EnvoyException,\n      \"ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via \"\n      \"--service-node and --service-cluster options.\");\n}\n\nTEST_F(GrpcMuxImplTest, BadLocalInfoEmptyNodeName) {\n  EXPECT_CALL(local_info_, nodeName()).WillOnce(ReturnRef(EMPTY_STRING));\n  EXPECT_THROW_WITH_MESSAGE(\n      GrpcMuxImpl(\n          local_info_, std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_,\n          *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n              \"envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources\"),\n          envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, true),\n      EnvoyException,\n      \"ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via \"\n      \"--service-node and --service-cluster options.\");\n}\n\n// Send discovery request with v2 resource type_url, receive discovery response with v3 resource\n// type_url.\nTEST_F(GrpcMuxImplTest, WatchV2ResourceV3) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\", \"true\"}});\n  setup();\n\n  InSequence s;\n  const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  const std::string& v3_type_url =\n      Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n          envoy::config::core::v3::ApiVersion::V3);\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  auto foo_sub = grpc_mux_->addWatch(v2_type_url, {}, callbacks_, resource_decoder);\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(v2_type_url, {}, \"\", true);\n  grpc_mux_->start();\n\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n    response->set_type_url(v3_type_url);\n    response->set_version_info(\"1\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n    load_assignment.set_cluster_name(\"x\");\n    response->add_resources()->PackFrom(load_assignment);\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, \"1\"))\n        .WillOnce(Invoke([&load_assignment](const std::vector<DecodedResourceRef>& resources,\n                                            const std::string&) {\n          EXPECT_EQ(1, resources.size());\n          const auto& expected_assignment =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[0].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment));\n        }));\n    expectSendMessage(v2_type_url, {}, \"1\");\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n  }\n}\n\n// Send discovery request with v3 resource type_url, receive discovery response with v2 resource\n// type_url.\nTEST_F(GrpcMuxImplTest, WatchV3ResourceV2) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\", \"true\"}});\n  setup();\n\n  InSequence s;\n  const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  const std::string& v3_type_url =\n      Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n          envoy::config::core::v3::ApiVersion::V3);\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  auto foo_sub = grpc_mux_->addWatch(v3_type_url, {}, callbacks_, resource_decoder);\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage(v3_type_url, {}, \"\", true);\n  grpc_mux_->start();\n\n  {\n\n    auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n    response->set_type_url(v2_type_url);\n    response->set_version_info(\"1\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n    load_assignment.set_cluster_name(\"x\");\n    response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment));\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, \"1\"))\n        .WillOnce(Invoke([&load_assignment](const std::vector<DecodedResourceRef>& resources,\n                                            const std::string&) {\n          EXPECT_EQ(1, resources.size());\n          const auto& expected_assignment =\n              dynamic_cast<const envoy::config::endpoint::v3::ClusterLoadAssignment&>(\n                  resources[0].get().resource());\n          EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment));\n        }));\n    expectSendMessage(v3_type_url, {}, \"1\");\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n  }\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/grpc_stream_test.cc",
    "content": "#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/grpc_stream.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass GrpcStreamTest : public testing::Test {\nprotected:\n  GrpcStreamTest()\n      : async_client_owner_(std::make_unique<Grpc::MockAsyncClient>()),\n        async_client_(async_client_owner_.get()),\n        grpc_stream_(&callbacks_, std::move(async_client_owner_),\n                     *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n                         \"envoy.api.v2.EndpointDiscoveryService.StreamEndpoints\"),\n                     random_, dispatcher_, stats_, rate_limit_settings_) {}\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Grpc::MockAsyncStream async_stream_;\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Envoy::Config::RateLimitSettings rate_limit_settings_;\n  NiceMock<MockGrpcStreamCallbacks> callbacks_;\n  std::unique_ptr<Grpc::MockAsyncClient> async_client_owner_;\n  Grpc::MockAsyncClient* async_client_;\n\n  GrpcStream<envoy::service::discovery::v3::DiscoveryRequest,\n             envoy::service::discovery::v3::DiscoveryResponse>\n      grpc_stream_;\n};\n\n// Tests that establishNewStream() establishes it, a second call does nothing, and a third call\n// after the stream was disconnected re-establishes it.\nTEST_F(GrpcStreamTest, EstablishStream) {\n  EXPECT_FALSE(grpc_stream_.grpcStreamAvailable());\n  // Successful establishment\n  {\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n    EXPECT_CALL(callbacks_, onStreamEstablished());\n    grpc_stream_.establishNewStream();\n    EXPECT_TRUE(grpc_stream_.grpcStreamAvailable());\n  }\n  // Idempotent\n  {\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).Times(0);\n    EXPECT_CALL(callbacks_, onStreamEstablished()).Times(0);\n    grpc_stream_.establishNewStream();\n    EXPECT_TRUE(grpc_stream_.grpcStreamAvailable());\n  }\n  grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, \"\");\n  EXPECT_FALSE(grpc_stream_.grpcStreamAvailable());\n  // Successful re-establishment\n  {\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n    EXPECT_CALL(callbacks_, onStreamEstablished());\n    grpc_stream_.establishNewStream();\n    EXPECT_TRUE(grpc_stream_.grpcStreamAvailable());\n  }\n}\n\n// A failure in the underlying gRPC machinery should result in grpcStreamAvailable() false. Calling\n// sendMessage would segfault.\nTEST_F(GrpcStreamTest, FailToEstablishNewStream) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks_, onEstablishmentFailure());\n  grpc_stream_.establishNewStream();\n  EXPECT_FALSE(grpc_stream_.grpcStreamAvailable());\n}\n\n// Checks that sendMessage correctly passes a DiscoveryRequest down to the underlying gRPC\n// machinery.\nTEST_F(GrpcStreamTest, SendMessage) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  grpc_stream_.establishNewStream();\n  envoy::service::discovery::v3::DiscoveryRequest request;\n  request.set_response_nonce(\"grpc_stream_test_noncense\");\n  EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(request), false));\n  grpc_stream_.sendMessage(request);\n}\n\n// Tests that, upon a call of the GrpcStream::onReceiveMessage() callback, which is called by the\n// underlying gRPC machinery, the received proto will make it up to the GrpcStreamCallbacks that the\n// GrpcStream was given.\nTEST_F(GrpcStreamTest, ReceiveMessage) {\n  envoy::service::discovery::v3::DiscoveryResponse response_copy;\n  response_copy.set_type_url(\"faketypeURL\");\n  auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>(response_copy);\n  envoy::service::discovery::v3::DiscoveryResponse received_message;\n  EXPECT_CALL(callbacks_, onDiscoveryResponse(_, _))\n      .WillOnce([&received_message](\n                    std::unique_ptr<envoy::service::discovery::v3::DiscoveryResponse>&& message,\n                    ControlPlaneStats&) { received_message = *message; });\n  grpc_stream_.onReceiveMessage(std::move(response));\n  EXPECT_TRUE(TestUtility::protoEqual(response_copy, received_message));\n}\n\n// If the value has only ever been 0, the stat should remain unused, including after an attempt to\n// write a 0 to it.\nTEST_F(GrpcStreamTest, QueueSizeStat) {\n  grpc_stream_.maybeUpdateQueueSizeStat(0);\n  Stats::Gauge& pending_requests =\n      stats_.gauge(\"control_plane.pending_requests\", Stats::Gauge::ImportMode::Accumulate);\n  EXPECT_FALSE(pending_requests.used());\n  grpc_stream_.maybeUpdateQueueSizeStat(123);\n  EXPECT_EQ(123, pending_requests.value());\n  grpc_stream_.maybeUpdateQueueSizeStat(0);\n  EXPECT_EQ(0, pending_requests.value());\n}\n\n// Just to add coverage to the no-op implementations of these callbacks (without exposing us to\n// crashes from a badly behaved peer like NOT_IMPLEMENTED_GCOVR_EXCL_LINE would).\nTEST_F(GrpcStreamTest, HeaderTrailerJustForCodeCoverage) {\n  Http::ResponseHeaderMapPtr response_headers{new Http::TestResponseHeaderMapImpl{}};\n  grpc_stream_.onReceiveInitialMetadata(std::move(response_headers));\n  Http::TestRequestHeaderMapImpl request_headers;\n  grpc_stream_.onCreateInitialMetadata(request_headers);\n  Http::ResponseTrailerMapPtr trailers{new Http::TestResponseTrailerMapImpl{}};\n  grpc_stream_.onReceiveTrailingMetadata(std::move(trailers));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/grpc_subscription_impl_test.cc",
    "content": "#include \"test/common/config/grpc_subscription_test_harness.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::InSequence;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass GrpcSubscriptionImplTest : public testing::Test, public GrpcSubscriptionTestHarness {};\n\n// Validate that stream creation results in a timer based retry and can recover.\nTEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) {\n  InSequence s;\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(nullptr));\n\n  // onConfigUpdateFailed() should not be called for gRPC stream connection failure\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _))\n      .Times(0);\n  EXPECT_CALL(random_, random());\n  EXPECT_CALL(*timer_, enableTimer(_, _));\n  subscription_->start({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, \"\"));\n  // Ensure this doesn't cause an issue by sending a request, since we don't\n  // have a gRPC stream.\n  subscription_->updateResourceInterest({\"cluster2\"});\n\n  // Retry and succeed.\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n\n  expectSendMessage({\"cluster2\"}, \"\", true);\n  timer_cb_();\n  EXPECT_TRUE(statsAre(3, 0, 0, 1, 0, 0, 0, \"\"));\n  verifyControlPlaneStats(1);\n}\n\n// Validate that the client can recover from a remote stream closure via retry.\nTEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  // onConfigUpdateFailed() should not be called for gRPC stream connection failure\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _))\n      .Times(0);\n  EXPECT_CALL(*timer_, enableTimer(_, _));\n  EXPECT_CALL(random_, random());\n  mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, \"\");\n  EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, \"\"));\n  verifyControlPlaneStats(0);\n\n  // Retry and succeed.\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage({\"cluster0\", \"cluster1\"}, \"\", true);\n  timer_cb_();\n  EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, \"\"));\n}\n\n// Validate that When the management server gets multiple requests for the same version, it can\n// ignore later ones. This allows the nonce to be used.\nTEST_F(GrpcSubscriptionImplTest, RepeatedNonce) {\n  InSequence s;\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  // First with the initial, empty version update to \"0\".\n  updateResourceInterest({\"cluster2\"});\n  EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster2\"}, \"0\", false);\n  EXPECT_TRUE(statsAre(3, 0, 1, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster2\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(4, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n  // Now with version \"0\" update to \"1\".\n  updateResourceInterest({\"cluster3\"});\n  EXPECT_TRUE(statsAre(5, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n  deliverConfigUpdate({\"cluster3\"}, \"42\", false);\n  EXPECT_TRUE(statsAre(6, 1, 2, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n  deliverConfigUpdate({\"cluster3\"}, \"42\", true);\n  EXPECT_TRUE(statsAre(7, 2, 2, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, \"42\"));\n}\n\nTEST_F(GrpcSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) {\n  InSequence s;\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster2\"}, \"0\", false);\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, \"\"));\n}\n\nTEST_F(GrpcSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) {\n  InSequence s;\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster2\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n\n  // Advance the simulated time and verify that a trivial update (no change) also changes the update\n  // time.\n  simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1)));\n  deliverConfigUpdate({\"cluster0\", \"cluster2\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(2, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, \"0\"));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/grpc_subscription_test_harness.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/grpc_mux_impl.h\"\n#include \"common/config/grpc_subscription_impl.h\"\n#include \"common/config/version_converter.h\"\n\n#include \"test/common/config/subscription_test_harness.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Mock;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Config {\n\nclass GrpcSubscriptionTestHarness : public SubscriptionTestHarness {\npublic:\n  GrpcSubscriptionTestHarness() : GrpcSubscriptionTestHarness(std::chrono::milliseconds(0)) {}\n\n  GrpcSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout)\n      : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n            \"envoy.api.v2.EndpointDiscoveryService.StreamEndpoints\")),\n        async_client_(new NiceMock<Grpc::MockAsyncClient>()), timer_(new Event::MockTimer()) {\n    node_.set_id(\"fo0\");\n    EXPECT_CALL(local_info_, node()).WillOnce(testing::ReturnRef(node_));\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      timer_cb_ = timer_cb;\n      return timer_;\n    }));\n\n    mux_ = std::make_shared<Config::GrpcMuxImpl>(\n        local_info_, std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_,\n        *method_descriptor_, envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_,\n        rate_limit_settings_, true);\n    subscription_ = std::make_unique<GrpcSubscriptionImpl>(\n        mux_, callbacks_, resource_decoder_, stats_, Config::TypeUrl::get().ClusterLoadAssignment,\n        dispatcher_, init_fetch_timeout, false);\n  }\n\n  ~GrpcSubscriptionTestHarness() override { EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); }\n\n  void expectSendMessage(const std::set<std::string>& cluster_names, const std::string& version,\n                         bool expect_node = false) override {\n    expectSendMessage(cluster_names, version, expect_node, Grpc::Status::WellKnownGrpcStatus::Ok,\n                      \"\");\n  }\n\n  void expectSendMessage(const std::set<std::string>& cluster_names, const std::string& version,\n                         bool expect_node, const Protobuf::int32 error_code,\n                         const std::string& error_message) {\n    UNREFERENCED_PARAMETER(expect_node);\n    API_NO_BOOST(envoy::api::v2::DiscoveryRequest) expected_request;\n    if (expect_node) {\n      expected_request.mutable_node()->CopyFrom(API_DOWNGRADE(node_));\n    }\n    for (const auto& cluster : cluster_names) {\n      expected_request.add_resource_names(cluster);\n    }\n    if (!version.empty()) {\n      expected_request.set_version_info(version);\n    }\n    expected_request.set_response_nonce(last_response_nonce_);\n    expected_request.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n    if (error_code != Grpc::Status::WellKnownGrpcStatus::Ok) {\n      ::google::rpc::Status* error_detail = expected_request.mutable_error_detail();\n      error_detail->set_code(error_code);\n      error_detail->set_message(error_message);\n    }\n    EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(expected_request), false));\n  }\n\n  void startSubscription(const std::set<std::string>& cluster_names) override {\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n    last_cluster_names_ = cluster_names;\n    expectSendMessage(last_cluster_names_, \"\", true);\n    subscription_->start(cluster_names);\n  }\n\n  void deliverConfigUpdate(const std::vector<std::string>& cluster_names,\n                           const std::string& version, bool accept) override {\n    std::unique_ptr<envoy::service::discovery::v3::DiscoveryResponse> response(\n        new envoy::service::discovery::v3::DiscoveryResponse());\n    response->set_version_info(version);\n    last_response_nonce_ = std::to_string(HashUtil::xxHash64(version));\n    response->set_nonce(last_response_nonce_);\n    response->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n    response->mutable_control_plane()->set_identifier(\"ground_control_foo123\");\n    Protobuf::RepeatedPtrField<envoy::config::endpoint::v3::ClusterLoadAssignment> typed_resources;\n    for (const auto& cluster : cluster_names) {\n      if (std::find(last_cluster_names_.begin(), last_cluster_names_.end(), cluster) !=\n          last_cluster_names_.end()) {\n        envoy::config::endpoint::v3::ClusterLoadAssignment* load_assignment = typed_resources.Add();\n        load_assignment->set_cluster_name(cluster);\n        response->add_resources()->PackFrom(API_DOWNGRADE(*load_assignment));\n      }\n    }\n    const auto decoded_resources =\n        TestUtility::decodeResources<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n            *response, \"cluster_name\");\n    EXPECT_CALL(callbacks_, onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version))\n        .WillOnce(ThrowOnRejectedConfig(accept));\n    if (accept) {\n      expectSendMessage(last_cluster_names_, version, false);\n      version_ = version;\n    } else {\n      EXPECT_CALL(callbacks_, onConfigUpdateFailed(\n                                  Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _));\n      expectSendMessage(last_cluster_names_, version_, false,\n                        Grpc::Status::WellKnownGrpcStatus::Internal, \"bad config\");\n    }\n    mux_->onDiscoveryResponse(std::move(response), control_plane_stats_);\n    EXPECT_EQ(control_plane_stats_.identifier_.value(), \"ground_control_foo123\");\n    Mock::VerifyAndClearExpectations(&async_stream_);\n  }\n\n  void updateResourceInterest(const std::set<std::string>& cluster_names) override {\n    // The \"watch\" mechanism means that updates that lose interest in a resource\n    // will first generate a request for [still watched resources, i.e. without newly unwatched\n    // ones] before generating the request for all of cluster_names.\n    // TODO(fredlas) this unnecessary second request will stop happening once the watch mechanism is\n    // no longer internally used by GrpcSubscriptionImpl.\n    std::set<std::string> both;\n    for (const auto& n : cluster_names) {\n      if (last_cluster_names_.find(n) != last_cluster_names_.end()) {\n        both.insert(n);\n      }\n    }\n    expectSendMessage(both, version_);\n    expectSendMessage(cluster_names, version_);\n    subscription_->updateResourceInterest(cluster_names);\n    last_cluster_names_ = cluster_names;\n  }\n\n  void expectConfigUpdateFailed() override {\n    EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, nullptr))\n        .WillOnce([this](ConfigUpdateFailureReason reason, const EnvoyException*) {\n          if (reason == ConfigUpdateFailureReason::FetchTimedout) {\n            stats_.init_fetch_timeout_.inc();\n          }\n        });\n  }\n\n  void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) override {\n    init_timeout_timer_ = new Event::MockTimer(&dispatcher_);\n    EXPECT_CALL(*init_timeout_timer_, enableTimer(timeout, _));\n  }\n\n  void expectDisableInitFetchTimeoutTimer() override {\n    EXPECT_CALL(*init_timeout_timer_, disableTimer());\n  }\n\n  void callInitFetchTimeoutCb() override { init_timeout_timer_->invokeCallback(); }\n\n  std::string version_;\n  const Protobuf::MethodDescriptor* method_descriptor_;\n  Grpc::MockAsyncClient* async_client_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  Event::MockDispatcher dispatcher_;\n  Random::MockRandomGenerator random_;\n  Event::MockTimer* timer_;\n  Event::TimerCb timer_cb_;\n  envoy::config::core::v3::Node node_;\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks_;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder_{\"cluster_name\"};\n  NiceMock<Grpc::MockAsyncStream> async_stream_;\n  GrpcMuxImplSharedPtr mux_;\n  GrpcSubscriptionImplPtr subscription_;\n  std::string last_response_nonce_;\n  std::set<std::string> last_cluster_names_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Envoy::Config::RateLimitSettings rate_limit_settings_;\n  Event::MockTimer* init_timeout_timer_;\n};\n\n// TODO(danielhochman): test with RDS and ensure version_info is same as what API returned\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/http_subscription_impl_test.cc",
    "content": "#include <memory>\n\n#include \"test/common/config/http_subscription_test_harness.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass HttpSubscriptionImplTest : public testing::Test, public HttpSubscriptionTestHarness {};\n\n// Validate that the client can recover from a remote fetch failure.\nTEST_F(HttpSubscriptionImplTest, OnRequestReset) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_CALL(random_gen_, random()).WillOnce(Return(0));\n  EXPECT_CALL(*timer_, enableTimer(_, _));\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _))\n      .Times(0);\n  http_callbacks_->onFailure(http_request_, Http::AsyncClient::FailureReason::Reset);\n  EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0, 0, \"\"));\n  timerTick();\n  EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"42\", true);\n  EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7919287270473417401, \"42\"));\n}\n\n// Validate that the client can recover from bad JSON responses.\nTEST_F(HttpSubscriptionImplTest, BadJsonRecovery) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  Http::ResponseHeaderMapPtr response_headers{\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  Http::ResponseMessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))};\n  message->body().add(\";!@#badjso n\");\n  EXPECT_CALL(random_gen_, random()).WillOnce(Return(0));\n  EXPECT_CALL(*timer_, enableTimer(_, _));\n  EXPECT_CALL(callbacks_,\n              onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _));\n  http_callbacks_->onSuccess(http_request_, std::move(message));\n  EXPECT_TRUE(statsAre(1, 0, 1, 0, 0, 0, 0, \"\"));\n  request_in_progress_ = false;\n  timerTick();\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n}\n\nTEST_F(HttpSubscriptionImplTest, ConfigNotModified) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  timerTick();\n  EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0, \"\"));\n\n  // accept and modify.\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true, true, \"200\");\n  EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n\n  // accept and does not modify.\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true, false, \"304\");\n  EXPECT_TRUE(statsAre(4, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n}\n\nTEST_F(HttpSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", false);\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, \"\"));\n}\n\nTEST_F(HttpSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n\n  // Advance the simulated time and verify that a trivial update (no change) also changes the update\n  // time.\n  simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1)));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, \"0\"));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/http_subscription_test_harness.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/config/http_subscription_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/common/config/subscription_test_harness.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Config {\n\nclass HttpSubscriptionTestHarness : public SubscriptionTestHarness {\npublic:\n  HttpSubscriptionTestHarness() : HttpSubscriptionTestHarness(std::chrono::milliseconds(0)) {}\n\n  HttpSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout)\n      : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n            \"envoy.api.v2.EndpointDiscoveryService.FetchEndpoints\")),\n        timer_(new Event::MockTimer()), http_request_(&cm_.async_client_) {\n    node_.set_id(\"fo0\");\n    EXPECT_CALL(local_info_, node()).WillOnce(testing::ReturnRef(node_));\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      timer_cb_ = timer_cb;\n      return timer_;\n    }));\n    subscription_ = std::make_unique<HttpSubscriptionImpl>(\n        local_info_, cm_, \"eds_cluster\", dispatcher_, random_gen_, std::chrono::milliseconds(1),\n        std::chrono::milliseconds(1000), *method_descriptor_,\n        Config::TypeUrl::get().ClusterLoadAssignment, envoy::config::core::v3::ApiVersion::AUTO,\n        callbacks_, resource_decoder_, stats_, init_fetch_timeout, validation_visitor_);\n  }\n\n  ~HttpSubscriptionTestHarness() override {\n    // Stop subscribing on the way out.\n    if (request_in_progress_) {\n      EXPECT_CALL(http_request_, cancel());\n    }\n  }\n\n  void expectSendMessage(const std::set<std::string>& cluster_names, const std::string& version,\n                         bool expect_node = false) override {\n    UNREFERENCED_PARAMETER(expect_node);\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"eds_cluster\"));\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(Invoke([this, cluster_names, version](Http::RequestMessagePtr& request,\n                                                        Http::AsyncClient::Callbacks& callbacks,\n                                                        const Http::AsyncClient::RequestOptions&) {\n          http_callbacks_ = &callbacks;\n          EXPECT_EQ(\"POST\", request->headers().getMethodValue());\n          EXPECT_EQ(Http::Headers::get().ContentTypeValues.Json,\n                    request->headers().getContentTypeValue());\n          EXPECT_EQ(\"eds_cluster\", request->headers().getHostValue());\n          EXPECT_EQ(\"/v2/discovery:endpoints\", request->headers().getPathValue());\n          std::string expected_request = \"{\";\n          if (!version_.empty()) {\n            expected_request += \"\\\"version_info\\\":\\\"\" + version + \"\\\",\";\n          }\n          expected_request += \"\\\"node\\\":{\\\"id\\\":\\\"fo0\\\"},\";\n          if (!cluster_names.empty()) {\n            std::string joined_cluster_names;\n            {\n              std::string delimiter = \"\\\",\\\"\";\n              std::ostringstream buf;\n              std::copy(cluster_names.begin(), cluster_names.end(),\n                        std::ostream_iterator<std::string>(buf, delimiter.c_str()));\n              std::string with_comma = buf.str();\n              joined_cluster_names = with_comma.substr(0, with_comma.length() - delimiter.length());\n            }\n            expected_request += \"\\\"resource_names\\\":[\\\"\" + joined_cluster_names + \"\\\"]\";\n          }\n          expected_request +=\n              \",\\\"type_url\\\":\\\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\\\"\";\n          expected_request += \"}\";\n          EXPECT_EQ(expected_request, request->bodyAsString());\n          EXPECT_EQ(fmt::format_int(expected_request.size()).str(),\n                    request->headers().getContentLengthValue());\n          request_in_progress_ = true;\n          return &http_request_;\n        }));\n  }\n\n  void startSubscription(const std::set<std::string>& cluster_names) override {\n    version_ = \"\";\n    cluster_names_ = cluster_names;\n    expectSendMessage(cluster_names, \"\");\n    subscription_->start(cluster_names);\n  }\n\n  void updateResourceInterest(const std::set<std::string>& cluster_names) override {\n    cluster_names_ = cluster_names;\n    expectSendMessage(cluster_names, version_);\n    subscription_->updateResourceInterest(cluster_names);\n    timer_cb_();\n  }\n\n  void deliverConfigUpdate(const std::vector<std::string>& cluster_names,\n                           const std::string& version, bool accept) override {\n    deliverConfigUpdate(cluster_names, version, accept, true, \"200\");\n  }\n\n  void deliverConfigUpdate(const std::vector<std::string>& cluster_names,\n                           const std::string& version, bool accept, bool modify,\n                           const std::string& response_code) {\n    std::string response_json = \"{\\\"version_info\\\":\\\"\" + version + \"\\\",\\\"resources\\\":[\";\n    for (const auto& cluster : cluster_names) {\n      response_json += \"{\\\"@type\\\":\\\"type.googleapis.com/\"\n                       \"envoy.api.v2.ClusterLoadAssignment\\\",\\\"cluster_name\\\":\\\"\" +\n                       cluster + \"\\\"},\";\n    }\n    response_json.pop_back();\n    response_json += \"]}\";\n    envoy::service::discovery::v3::DiscoveryResponse response_pb;\n    TestUtility::loadFromJson(response_json, response_pb);\n    Http::ResponseHeaderMapPtr response_headers{\n        new Http::TestResponseHeaderMapImpl{{\":status\", response_code}}};\n    Http::ResponseMessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))};\n    message->body().add(response_json);\n    const auto decoded_resources =\n        TestUtility::decodeResources<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n            response_pb, \"cluster_name\");\n\n    if (modify) {\n      EXPECT_CALL(callbacks_,\n                  onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version))\n          .WillOnce(ThrowOnRejectedConfig(accept));\n    }\n    if (!accept) {\n      EXPECT_CALL(callbacks_, onConfigUpdateFailed(\n                                  Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _));\n    }\n    EXPECT_CALL(random_gen_, random()).WillOnce(Return(0));\n    EXPECT_CALL(*timer_, enableTimer(_, _));\n    http_callbacks_->onSuccess(http_request_, std::move(message));\n    if (accept) {\n      version_ = version;\n    }\n    request_in_progress_ = false;\n    timerTick();\n  }\n\n  void expectConfigUpdateFailed() override {\n    EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, nullptr));\n  }\n\n  void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) override {\n    init_timeout_timer_ = new Event::MockTimer(&dispatcher_);\n    EXPECT_CALL(*init_timeout_timer_, enableTimer(std::chrono::milliseconds(timeout), _));\n  }\n\n  void expectDisableInitFetchTimeoutTimer() override {\n    EXPECT_CALL(*init_timeout_timer_, disableTimer());\n  }\n\n  void callInitFetchTimeoutCb() override { init_timeout_timer_->invokeCallback(); }\n\n  void timerTick() {\n    expectSendMessage(cluster_names_, version_);\n    timer_cb_();\n  }\n\n  bool request_in_progress_{};\n  std::string version_;\n  std::set<std::string> cluster_names_;\n  const Protobuf::MethodDescriptor* method_descriptor_;\n  Upstream::MockClusterManager cm_;\n  Event::MockDispatcher dispatcher_;\n  Event::MockTimer* timer_;\n  Event::TimerCb timer_cb_;\n  envoy::config::core::v3::Node node_;\n  Random::MockRandomGenerator random_gen_;\n  Http::MockAsyncClientRequest http_request_;\n  Http::AsyncClient::Callbacks* http_callbacks_;\n  Config::MockSubscriptionCallbacks callbacks_;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder_{\"cluster_name\"};\n  std::unique_ptr<HttpSubscriptionImpl> subscription_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Event::MockTimer* init_timeout_timer_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/metadata_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nTEST(MetadataTest, MetadataValue) {\n  envoy::config::core::v3::Metadata metadata;\n  Metadata::mutableMetadataValue(metadata, MetadataFilters::get().ENVOY_LB,\n                                 MetadataEnvoyLbKeys::get().CANARY)\n      .set_bool_value(true);\n  EXPECT_TRUE(Metadata::metadataValue(&metadata, MetadataFilters::get().ENVOY_LB,\n                                      MetadataEnvoyLbKeys::get().CANARY)\n                  .bool_value());\n  EXPECT_FALSE(Metadata::metadataValue(&metadata, \"foo\", \"bar\").bool_value());\n  EXPECT_FALSE(\n      Metadata::metadataValue(&metadata, MetadataFilters::get().ENVOY_LB, \"bar\").bool_value());\n}\n\nTEST(MetadataTest, MetadataValuePath) {\n  const std::string filter = \"com.test\";\n  envoy::config::core::v3::Metadata metadata;\n  std::vector<std::string> path{\"test_obj\", \"inner_key\"};\n  // not found case\n  EXPECT_EQ(Metadata::metadataValue(&metadata, filter, path).kind_case(),\n            ProtobufWkt::Value::KindCase::KIND_NOT_SET);\n  ProtobufWkt::Struct& filter_struct = (*metadata.mutable_filter_metadata())[filter];\n  auto obj = MessageUtil::keyValueStruct(\"inner_key\", \"inner_value\");\n  ProtobufWkt::Value val;\n  *val.mutable_struct_value() = obj;\n  (*filter_struct.mutable_fields())[\"test_obj\"] = val;\n  EXPECT_EQ(Metadata::metadataValue(&metadata, filter, path).string_value(), \"inner_value\");\n  // not found with longer path\n  path.push_back(\"bad_key\");\n  EXPECT_EQ(Metadata::metadataValue(&metadata, filter, path).kind_case(),\n            ProtobufWkt::Value::KindCase::KIND_NOT_SET);\n  // empty path returns not found\n  EXPECT_EQ(Metadata::metadataValue(&metadata, filter, std::vector<std::string>{}).kind_case(),\n            ProtobufWkt::Value::KindCase::KIND_NOT_SET);\n}\n\nclass TypedMetadataTest : public testing::Test {\npublic:\n  TypedMetadataTest() : registered_factory_(foo_factory_) {}\n\n  struct Foo : public TypedMetadata::Object {\n    Foo(std::string name) : name_(name) {}\n    std::string name_;\n  };\n\n  struct Bar : public TypedMetadata::Object {};\n\n  class FooFactory : public TypedMetadataFactory {\n  public:\n    std::string name() const override { return \"foo\"; }\n    // Throws EnvoyException (conversion failure) if d is empty.\n    std::unique_ptr<const TypedMetadata::Object>\n    parse(const ProtobufWkt::Struct& d) const override {\n      if (d.fields().find(\"name\") != d.fields().end()) {\n        return std::make_unique<Foo>(d.fields().at(\"name\").string_value());\n      }\n      throw EnvoyException(\"Cannot create a Foo when metadata is empty.\");\n    }\n  };\n\nprotected:\n  FooFactory foo_factory_;\n  Registry::InjectFactory<TypedMetadataFactory> registered_factory_;\n};\n\nTEST_F(TypedMetadataTest, OkTest) {\n  envoy::config::core::v3::Metadata metadata;\n  (*metadata.mutable_filter_metadata())[foo_factory_.name()] =\n      MessageUtil::keyValueStruct(\"name\", \"foo\");\n  TypedMetadataImpl<TypedMetadataFactory> typed(metadata);\n  EXPECT_NE(nullptr, typed.get<Foo>(foo_factory_.name()));\n  EXPECT_EQ(\"foo\", typed.get<Foo>(foo_factory_.name())->name_);\n  // A duck is a duck.\n  EXPECT_EQ(nullptr, typed.get<Bar>(foo_factory_.name()));\n}\n\nTEST_F(TypedMetadataTest, NoMetadataTest) {\n  envoy::config::core::v3::Metadata metadata;\n  TypedMetadataImpl<TypedMetadataFactory> typed(metadata);\n  EXPECT_EQ(nullptr, typed.get<Foo>(foo_factory_.name()));\n}\n\nTEST_F(TypedMetadataTest, MetadataRefreshTest) {\n  envoy::config::core::v3::Metadata metadata;\n  (*metadata.mutable_filter_metadata())[foo_factory_.name()] =\n      MessageUtil::keyValueStruct(\"name\", \"foo\");\n  TypedMetadataImpl<TypedMetadataFactory> typed(metadata);\n  EXPECT_NE(nullptr, typed.get<Foo>(foo_factory_.name()));\n  EXPECT_EQ(\"foo\", typed.get<Foo>(foo_factory_.name())->name_);\n\n  // Updated.\n  (*metadata.mutable_filter_metadata())[foo_factory_.name()] =\n      MessageUtil::keyValueStruct(\"name\", \"bar\");\n  TypedMetadataImpl<TypedMetadataFactory> typed2(metadata);\n  EXPECT_NE(nullptr, typed2.get<Foo>(foo_factory_.name()));\n  EXPECT_EQ(\"bar\", typed2.get<Foo>(foo_factory_.name())->name_);\n\n  // Deleted.\n  (*metadata.mutable_filter_metadata()).erase(foo_factory_.name());\n  TypedMetadataImpl<TypedMetadataFactory> typed3(metadata);\n  EXPECT_EQ(nullptr, typed3.get<Foo>(foo_factory_.name()));\n}\n\nTEST_F(TypedMetadataTest, InvalidMetadataTest) {\n  envoy::config::core::v3::Metadata metadata;\n  (*metadata.mutable_filter_metadata())[foo_factory_.name()] = ProtobufWkt::Struct();\n  EXPECT_THROW_WITH_MESSAGE(TypedMetadataImpl<TypedMetadataFactory> typed(metadata),\n                            Envoy::EnvoyException, \"Cannot create a Foo when metadata is empty.\");\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/new_grpc_mux_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/config/new_grpc_mux_impl.h\"\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\n// We test some mux specific stuff below, other unit test coverage for singleton use of\n// NewGrpcMuxImpl is provided in [grpc_]subscription_impl_test.cc.\nclass NewGrpcMuxImplTestBase : public testing::Test {\npublic:\n  NewGrpcMuxImplTestBase()\n      : async_client_(new Grpc::MockAsyncClient()),\n        control_plane_stats_(Utility::generateControlPlaneStats(stats_)),\n        control_plane_connected_state_(\n            stats_.gauge(\"control_plane.connected_state\", Stats::Gauge::ImportMode::NeverImport)) {}\n\n  void setup() {\n    grpc_mux_ = std::make_unique<NewGrpcMuxImpl>(\n        std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_,\n        *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n            \"envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources\"),\n        envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_,\n        local_info_);\n  }\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Grpc::MockAsyncClient* async_client_;\n  NiceMock<Grpc::MockAsyncStream> async_stream_;\n  NewGrpcMuxImplPtr grpc_mux_;\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks_;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder_{\"cluster_name\"};\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Stats::TestUtil::TestStore stats_;\n  Envoy::Config::RateLimitSettings rate_limit_settings_;\n  ControlPlaneStats control_plane_stats_;\n  Stats::Gauge& control_plane_connected_state_;\n};\n\nclass NewGrpcMuxImplTest : public NewGrpcMuxImplTestBase {\npublic:\n  Event::SimulatedTimeSystem time_system_;\n};\n\n// Test that we simply ignore a message for an unknown type_url, with no ill effects.\nTEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) {\n  setup();\n\n  const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  auto watch = grpc_mux_->addWatch(type_url, {}, callbacks_, resource_decoder_);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  grpc_mux_->start();\n\n  {\n    auto unexpected_response =\n        std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    unexpected_response->set_type_url(type_url);\n    unexpected_response->set_system_version_info(\"0\");\n    // empty response should call onConfigUpdate on wildcard watch\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, \"0\"));\n    grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_);\n  }\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    response->set_type_url(type_url);\n    response->set_system_version_info(\"1\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n    load_assignment.set_cluster_name(\"x\");\n    response->add_resources()->mutable_resource()->PackFrom(API_DOWNGRADE(load_assignment));\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, \"1\"))\n        .WillOnce(Invoke([&load_assignment](const std::vector<DecodedResourceRef>& added_resources,\n                                            const Protobuf::RepeatedPtrField<std::string>&,\n                                            const std::string&) {\n          EXPECT_EQ(1, added_resources.size());\n          EXPECT_TRUE(\n              TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment));\n        }));\n    grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_);\n  }\n}\n\n// DeltaDiscoveryResponse that comes in response to an on-demand request updates the watch with\n// resource's name. The watch is initially created with an alias used in the on-demand request.\nTEST_F(NewGrpcMuxImplTest, ConfigUpdateWithAliases) {\n  setup();\n\n  const std::string& type_url = Config::TypeUrl::get().VirtualHost;\n  auto watch = grpc_mux_->addWatch(type_url, {\"prefix\"}, callbacks_, resource_decoder_, true);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  grpc_mux_->start();\n\n  auto response = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n  response->set_type_url(type_url);\n  response->set_system_version_info(\"1\");\n\n  envoy::config::route::v3::VirtualHost vhost;\n  vhost.set_name(\"vhost_1\");\n  vhost.add_domains(\"domain1.test\");\n  vhost.add_domains(\"domain2.test\");\n\n  response->add_resources()->mutable_resource()->PackFrom(vhost);\n  response->mutable_resources()->at(0).set_name(\"prefix/vhost_1\");\n  response->mutable_resources()->at(0).add_aliases(\"prefix/domain1.test\");\n  response->mutable_resources()->at(0).add_aliases(\"prefix/domain2.test\");\n\n  grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_);\n\n  const auto& subscriptions = grpc_mux_->subscriptions();\n  auto sub = subscriptions.find(type_url);\n\n  EXPECT_TRUE(sub != subscriptions.end());\n  watch->update({});\n}\n\n// DeltaDiscoveryResponse that comes in response to an on-demand request that couldn't be resolved\n// will contain an empty Resource. The Resource's aliases field will be populated with the alias\n// originally used in the request.\nTEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) {\n  setup();\n\n  const std::string& type_url = Config::TypeUrl::get().VirtualHost;\n  auto watch = grpc_mux_->addWatch(type_url, {\"prefix\"}, callbacks_, resource_decoder_, true);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  grpc_mux_->start();\n\n  auto response = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n  response->set_type_url(type_url);\n  response->set_system_version_info(\"1\");\n\n  response->add_resources();\n  response->mutable_resources()->at(0).set_name(\"not-found\");\n  response->mutable_resources()->at(0).add_aliases(\"prefix/domain1.test\");\n}\n\n// Watch v2 resource type_url, receive discovery response with v3 resource type_url.\nTEST_F(NewGrpcMuxImplTest, V3ResourceResponseV2ResourceWatch) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\", \"true\"}});\n  setup();\n\n  // Watch for v2 resource type_url.\n  const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  const std::string& v3_type_url =\n      Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n          envoy::config::core::v3::ApiVersion::V3);\n  auto watch = grpc_mux_->addWatch(v2_type_url, {}, callbacks_, resource_decoder_);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  // Cluster is not watched, v3 resource is rejected.\n  grpc_mux_->start();\n  {\n    auto unexpected_response =\n        std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    envoy::config::cluster::v3::Cluster cluster;\n    unexpected_response->set_type_url(Config::getTypeUrl<envoy::config::cluster::v3::Cluster>(\n        envoy::config::core::v3::ApiVersion::V3));\n    unexpected_response->set_system_version_info(\"0\");\n    unexpected_response->add_resources()->mutable_resource()->PackFrom(cluster);\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, \"0\")).Times(0);\n    grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_);\n  }\n  // Cluster is not watched, v2 resource is rejected.\n  {\n    auto unexpected_response =\n        std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    envoy::config::cluster::v3::Cluster cluster;\n    unexpected_response->set_type_url(Config::TypeUrl::get().Cluster);\n    unexpected_response->set_system_version_info(\"0\");\n    unexpected_response->add_resources()->mutable_resource()->PackFrom(API_DOWNGRADE(cluster));\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, \"0\")).Times(0);\n    grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_);\n  }\n  // ClusterLoadAssignment v2 is watched, v3 resource will be accepted.\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    response->set_system_version_info(\"1\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n    load_assignment.set_cluster_name(\"x\");\n    response->add_resources()->mutable_resource()->PackFrom(load_assignment);\n    // Send response that contains resource with v3 type url.\n    response->set_type_url(v3_type_url);\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, \"1\"))\n        .WillOnce(Invoke([&load_assignment](const std::vector<DecodedResourceRef>& added_resources,\n                                            const Protobuf::RepeatedPtrField<std::string>&,\n                                            const std::string&) {\n          EXPECT_EQ(1, added_resources.size());\n          EXPECT_TRUE(\n              TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment));\n        }));\n    grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_);\n  }\n}\n\n// Watch v3 resource type_url, receive discovery response with v2 resource type_url.\nTEST_F(NewGrpcMuxImplTest, V2ResourceResponseV3ResourceWatch) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\", \"true\"}});\n  setup();\n\n  // Watch for v3 resource type_url.\n  const std::string& v3_type_url =\n      Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n          envoy::config::core::v3::ApiVersion::V3);\n  const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment;\n  auto watch = grpc_mux_->addWatch(v3_type_url, {}, callbacks_, resource_decoder_);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n\n  grpc_mux_->start();\n  // ClusterLoadAssignment v3 is watched, v2 resource will be accepted.\n  {\n    auto response = std::make_unique<envoy::service::discovery::v3::DeltaDiscoveryResponse>();\n    response->set_system_version_info(\"1\");\n    envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment;\n    load_assignment.set_cluster_name(\"x\");\n    response->add_resources()->mutable_resource()->PackFrom(API_DOWNGRADE(load_assignment));\n    // Send response that contains resource with v3 type url.\n    response->set_type_url(v2_type_url);\n    EXPECT_CALL(callbacks_, onConfigUpdate(_, _, \"1\"))\n        .WillOnce(Invoke([&load_assignment](const std::vector<DecodedResourceRef>& added_resources,\n                                            const Protobuf::RepeatedPtrField<std::string>&,\n                                            const std::string&) {\n          EXPECT_EQ(1, added_resources.size());\n          EXPECT_TRUE(\n              TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment));\n        }));\n    grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_);\n  }\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/opaque_resource_decoder_impl_test.cc",
    "content": "#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n\n#include \"common/config/opaque_resource_decoder_impl.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass OpaqueResourceDecoderImplTest : public testing::Test {\npublic:\n  std::pair<ProtobufTypes::MessagePtr, std::string>\n  decodeTypedResource(const envoy::config::endpoint::v3::ClusterLoadAssignment& typed_resource) {\n    ProtobufWkt::Any opaque_resource;\n    opaque_resource.PackFrom(typed_resource);\n    auto decoded_resource = resource_decoder_.decodeResource(opaque_resource);\n    const std::string name = resource_decoder_.resourceName(*decoded_resource);\n    return {std::move(decoded_resource), name};\n  }\n\n  ProtobufMessage::StrictValidationVisitorImpl validation_visitor_;\n  OpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment> resource_decoder_{\n      validation_visitor_, \"cluster_name\"};\n};\n\n// Negative test for bad type URL in Any.\nTEST_F(OpaqueResourceDecoderImplTest, WrongType) {\n  ProtobufWkt::Any opaque_resource;\n  opaque_resource.set_type_url(\"huh\");\n  EXPECT_THROW_WITH_REGEX(resource_decoder_.decodeResource(opaque_resource), EnvoyException,\n                          \"Unable to unpack\");\n}\n\n// If the Any is empty (no type set), the default instance of the opaque resource decoder type is\n// created.\nTEST_F(OpaqueResourceDecoderImplTest, Empty) {\n  ProtobufWkt::Any opaque_resource;\n  const auto decoded_resource = resource_decoder_.decodeResource(opaque_resource);\n  EXPECT_THAT(*decoded_resource, ProtoEq(envoy::config::endpoint::v3::ClusterLoadAssignment()));\n  EXPECT_EQ(\"\", resource_decoder_.resourceName(*decoded_resource));\n}\n\n// Negative test for protoc-gen-validate constraints.\nTEST_F(OpaqueResourceDecoderImplTest, ValidateFail) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment invalid_resource;\n  EXPECT_THROW(decodeTypedResource(invalid_resource), ProtoValidationException);\n}\n\n// When validation is skipped, verify that we can ignore unknown fields.\nTEST_F(OpaqueResourceDecoderImplTest, ValidateIgnored) {\n  ProtobufMessage::NullValidationVisitorImpl validation_visitor;\n  OpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment> resource_decoder{\n      validation_visitor, \"cluster_name\"};\n  envoy::config::endpoint::v3::ClusterLoadAssignment strange_resource;\n  strange_resource.set_cluster_name(\"fare\");\n  auto* unknown = strange_resource.GetReflection()->MutableUnknownFields(&strange_resource);\n  // add a field that doesn't exist in the proto definition:\n  unknown->AddFixed32(1000, 1);\n  ProtobufWkt::Any opaque_resource;\n  opaque_resource.PackFrom(strange_resource);\n  const auto decoded_resource = resource_decoder.decodeResource(opaque_resource);\n  EXPECT_THAT(*decoded_resource, ProtoEq(strange_resource));\n  EXPECT_EQ(\"fare\", resource_decoder_.resourceName(*decoded_resource));\n}\n\n// Handling of smuggled deprecated fields during Any conversion.\nTEST_F(OpaqueResourceDecoderImplTest, HiddenEnvoyDeprecatedFields) {\n  // This test is only valid in API-v3, and should be updated for API-v4, as\n  // the deprecated fields of API-v2 will be removed.\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment =\n      TestUtility::parseYaml<envoy::config::endpoint::v3::ClusterLoadAssignment>(R\"EOF(\n      cluster_name: fare\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 1.2.3.4\n                port_value: 80\n      policy:\n        overprovisioning_factor: 100\n        hidden_envoy_deprecated_disable_overprovisioning: true\n    )EOF\");\n  EXPECT_THROW_WITH_REGEX(decodeTypedResource(cluster_load_assignment), ProtoValidationException,\n                          \"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                          \"'envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.hidden_envoy_\"\n                          \"deprecated_disable_overprovisioning'\");\n}\n\n// Happy path.\nTEST_F(OpaqueResourceDecoderImplTest, Success) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_resource;\n  cluster_resource.set_cluster_name(\"foo\");\n  const auto result = decodeTypedResource(cluster_resource);\n  EXPECT_THAT(*result.first, ProtoEq(cluster_resource));\n  EXPECT_EQ(\"foo\", result.second);\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/pausable_ack_queue_test.cc",
    "content": "#include \"common/config/pausable_ack_queue.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nTEST(PausableAckQueueTest, TestEmpty) {\n  PausableAckQueue p;\n  EXPECT_EQ(0, p.size());\n  EXPECT_TRUE(p.empty());\n}\n\nTEST(PausableAckQueueTest, TestPush) {\n  PausableAckQueue p;\n  p.push(UpdateAck{\"bogusnonce\", \"bogustypeurl\"});\n  EXPECT_EQ(1, p.size());\n  EXPECT_FALSE(p.empty());\n  EXPECT_EQ(\"bogusnonce\", p.front().nonce_);\n  EXPECT_EQ(\"bogustypeurl\", p.front().type_url_);\n}\n\nTEST(PausableAckQueueTest, TestPop) {\n  PausableAckQueue p;\n  p.push(UpdateAck{\"bogusnonce\", \"bogustypeurl\"});\n  UpdateAck ack = p.popFront();\n  EXPECT_EQ(0, p.size());\n  EXPECT_TRUE(p.empty());\n  EXPECT_EQ(\"bogusnonce\", ack.nonce_);\n  EXPECT_EQ(\"bogustypeurl\", ack.type_url_);\n}\n\nTEST(PausableAckQueueTest, TestPauseResume) {\n  PausableAckQueue p;\n  p.push(UpdateAck{\"nonce1\", \"type1\"});\n  p.push(UpdateAck{\"nonce2\", \"type2\"});\n  p.push(UpdateAck{\"nonce3\", \"type1\"});\n  p.push(UpdateAck{\"nonce4\", \"type2\"});\n  EXPECT_EQ(4, p.size());\n  EXPECT_FALSE(p.empty());\n\n  // pausing 'type1' should make it invisible to the queue\n  p.pause(\"type1\");\n\n  // size() doesn't honor pause state, a bit strange but this is by design\n  EXPECT_EQ(4, p.size());\n\n  // validate that both front() and popFront() honor pause state\n  EXPECT_EQ(\"nonce2\", p.front().nonce_);\n  EXPECT_EQ(\"type2\", p.front().type_url_);\n\n  // validate the above result is invariant even if we nest pauses.\n  p.pause(\"type1\");\n  EXPECT_EQ(4, p.size());\n  EXPECT_EQ(\"nonce2\", p.front().nonce_);\n  EXPECT_EQ(\"type2\", p.front().type_url_);\n  p.resume(\"type1\");\n  EXPECT_EQ(\"nonce2\", p.front().nonce_);\n  EXPECT_EQ(\"type2\", p.front().type_url_);\n\n  UpdateAck ack = p.popFront();\n  EXPECT_EQ(\"nonce2\", ack.nonce_);\n  EXPECT_EQ(\"type2\", ack.type_url_);\n  EXPECT_EQ(3, p.size());\n\n  // validate that types come back when they're resumed\n  p.resume(\"type1\");\n\n  EXPECT_EQ(\"nonce1\", p.front().nonce_);\n  EXPECT_EQ(\"type1\", p.front().type_url_);\n\n  p.pause(\"type1\");\n\n  EXPECT_EQ(\"nonce4\", p.front().nonce_);\n  EXPECT_EQ(\"type2\", p.front().type_url_);\n  p.popFront();\n\n  EXPECT_EQ(2, p.size());\n\n  p.pause(\"type2\");\n  EXPECT_TRUE(p.empty());\n  // A bit strange but this is by design\n  EXPECT_EQ(2, p.size());\n\n  p.resume(\"type1\");\n  EXPECT_FALSE(p.empty());\n\n  EXPECT_EQ(\"nonce1\", p.front().nonce_);\n  EXPECT_EQ(\"type1\", p.front().type_url_);\n  p.popFront();\n\n  EXPECT_EQ(\"nonce3\", p.front().nonce_);\n  EXPECT_EQ(\"type1\", p.front().type_url_);\n  p.popFront();\n\n  EXPECT_TRUE(p.empty());\n  EXPECT_EQ(0, p.size());\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/registry_test.cc",
    "content": "#include <algorithm>\n\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/fmt.h\"\n\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nusing ::testing::Optional;\n\nclass InternalFactory : public Config::UntypedFactory {\npublic:\n  ~InternalFactory() override = default;\n  std::string category() const override { return \"\"; }\n};\n\nclass TestInternalFactory : public InternalFactory {\npublic:\n  std::string name() const override { return \"testing.internal.test\"; }\n};\n\nstatic Registry::RegisterInternalFactory<TestInternalFactory, InternalFactory>\n    test_internal_register;\n\n// Ensure that the internal test factory name isn't visible from a\n// published category. Note that the internal factory can't be in\n// a registered category by definition since it doesn't have a static\n// category method.\nTEST(RegistryTest, InternalFactoryNotPublished) {\n  TestInternalFactory test;\n\n  // Expect that the published categories don't lead to the internal factory.\n  for (const auto& ext : Envoy::Registry::FactoryCategoryRegistry::registeredFactories()) {\n    for (const auto& name : ext.second->registeredNames()) {\n      EXPECT_NE(name, test.name());\n    }\n  }\n\n  // Expect that the factory is present.\n  EXPECT_NE(Registry::FactoryRegistry<InternalFactory>::getFactory(\"testing.internal.test\"),\n            nullptr);\n}\n\nclass PublishedFactory : public Config::UntypedFactory {\npublic:\n  ~PublishedFactory() override = default;\n  std::string category() const override { return \"testing.published\"; }\n};\n\nclass TestPublishedFactory : public PublishedFactory {\npublic:\n  std::string name() const override { return \"testing.published.test\"; }\n};\n\nREGISTER_FACTORY(TestPublishedFactory, PublishedFactory);\n\nTEST(RegistryTest, DefaultFactoryPublished) {\n  const auto& factories = Envoy::Registry::FactoryCategoryRegistry::registeredFactories();\n\n  // Expect that the category is present.\n  ASSERT_NE(factories.find(\"testing.published\"), factories.end());\n\n  // Expect that the factory is listed in the right category.\n  const auto& names = factories.find(\"testing.published\")->second->registeredNames();\n  EXPECT_NE(std::find(names.begin(), names.end(), \"testing.published.test\"), std::end(names));\n\n  // Expect that the factory is present.\n  EXPECT_NE(Registry::FactoryRegistry<PublishedFactory>::getFactory(\"testing.published.test\"),\n            nullptr);\n\n  // Expect no version\n  auto const version =\n      factories.find(\"testing.published\")->second->getFactoryVersion(\"testing.published.test\");\n  EXPECT_FALSE(version.has_value());\n}\n\nclass TestWithDeprecatedPublishedFactory : public PublishedFactory {\npublic:\n  std::string name() const override { return \"testing.published.instead_name\"; }\n};\n\nREGISTER_FACTORY(TestWithDeprecatedPublishedFactory,\n                 PublishedFactory){\"testing.published.deprecated_name\"};\n\nTEST(RegistryTest, DEPRECATED_FEATURE_TEST(WithDeprecatedFactoryPublished)) {\n  EXPECT_EQ(\"testing.published.instead_name\",\n            Envoy::Registry::FactoryRegistry<PublishedFactory>::getFactory(\n                \"testing.published.deprecated_name\")\n                ->name());\n  EXPECT_LOG_CONTAINS(\"warn\",\n                      fmt::format(\"Using deprecated extension name '{}' for '{}'.\",\n                                  \"testing.published.deprecated_name\",\n                                  \"testing.published.instead_name\"),\n                      Envoy::Registry::FactoryRegistry<PublishedFactory>::getFactory(\n                          \"testing.published.deprecated_name\")\n                          ->name());\n}\n\nclass NoNamePublishedFactory : public PublishedFactory {\npublic:\n  std::string name() const override { return \"\"; }\n};\n\nTEST(RegistryTest, DEPRECATED_FEATURE_TEST(AssertsIfNoDeprecatedNameGiven)) {\n  // Expects an assert to raise if we register a factory that has an empty name\n  // and no associated deprecated names.\n  EXPECT_DEBUG_DEATH((Registry::RegisterFactory<NoNamePublishedFactory, PublishedFactory>({})),\n                     \"Attempted to register a factory without a name or deprecated name\");\n}\n\nclass TestVersionedFactory : public PublishedFactory {\npublic:\n  std::string name() const override { return \"testing.published.versioned\"; }\n};\n\nREGISTER_FACTORY(TestVersionedFactory,\n                 PublishedFactory){FACTORY_VERSION(2, 5, 39, {{\"build.label\", \"alpha\"}})};\n\n// Test registration of versioned factory\nTEST(RegistryTest, VersionedFactory) {\n  const auto& factories = Envoy::Registry::FactoryCategoryRegistry::registeredFactories();\n\n  // Expect that the category is present.\n  ASSERT_NE(factories.find(\"testing.published\"), factories.end());\n\n  // Expect that the factory is listed in the right category.\n  const auto& names = factories.find(\"testing.published\")->second->registeredNames();\n  EXPECT_NE(std::find(names.begin(), names.end(), \"testing.published.versioned\"), std::end(names));\n\n  // Expect that the factory is present.\n  EXPECT_NE(Registry::FactoryRegistry<PublishedFactory>::getFactory(\"testing.published.versioned\"),\n            nullptr);\n\n  auto version =\n      factories.find(\"testing.published\")->second->getFactoryVersion(\"testing.published.versioned\");\n  EXPECT_TRUE(version.has_value());\n  EXPECT_EQ(2, version.value().version().major_number());\n  EXPECT_EQ(5, version.value().version().minor_number());\n  EXPECT_EQ(39, version.value().version().patch());\n  EXPECT_EQ(1, version.value().metadata().fields().size());\n  EXPECT_EQ(\"alpha\", version.value().metadata().fields().at(\"build.label\").string_value());\n}\n\nclass TestVersionedWithDeprecatedNamesFactory : public PublishedFactory {\npublic:\n  std::string name() const override { return \"testing.published.versioned.instead_name\"; }\n};\n\nREGISTER_FACTORY(TestVersionedWithDeprecatedNamesFactory,\n                 PublishedFactory){FACTORY_VERSION(0, 0, 1, {{\"build.kind\", \"private\"}}),\n                                   {\"testing.published.versioned.deprecated_name\"}};\n\n// Test registration of versioned factory that also uses deprecated names\nTEST(RegistryTest, DEPRECATED_FEATURE_TEST(VersionedWithDeprecatedNamesFactory)) {\n  EXPECT_EQ(\"testing.published.versioned.instead_name\",\n            Envoy::Registry::FactoryRegistry<PublishedFactory>::getFactory(\n                \"testing.published.versioned.deprecated_name\")\n                ->name());\n  EXPECT_LOG_CONTAINS(\"warn\",\n                      fmt::format(\"Using deprecated extension name '{}' for '{}'.\",\n                                  \"testing.published.versioned.deprecated_name\",\n                                  \"testing.published.versioned.instead_name\"),\n                      Envoy::Registry::FactoryRegistry<PublishedFactory>::getFactory(\n                          \"testing.published.versioned.deprecated_name\")\n                          ->name());\n  const auto& factories = Envoy::Registry::FactoryCategoryRegistry::registeredFactories();\n  auto version = factories.find(\"testing.published\")\n                     ->second->getFactoryVersion(\"testing.published.versioned.instead_name\");\n  EXPECT_TRUE(version.has_value());\n  EXPECT_EQ(0, version.value().version().major_number());\n  EXPECT_EQ(0, version.value().version().minor_number());\n  EXPECT_EQ(1, version.value().version().patch());\n  EXPECT_EQ(1, version.value().metadata().fields().size());\n  EXPECT_EQ(\"private\", version.value().metadata().fields().at(\"build.kind\").string_value());\n  // Get the version using deprecated name and check that it matches the\n  // version obtained through the new name.\n  auto deprecated_version =\n      factories.find(\"testing.published\")\n          ->second->getFactoryVersion(\"testing.published.versioned.deprecated_name\");\n  EXPECT_TRUE(deprecated_version.has_value());\n  EXPECT_THAT(deprecated_version.value(), ProtoEq(version.value()));\n}\n\nTEST(RegistryTest, TestDoubleRegistrationByName) {\n  EXPECT_THROW_WITH_MESSAGE((Registry::RegisterFactory<TestPublishedFactory, PublishedFactory>()),\n                            EnvoyException,\n                            \"Double registration for name: 'testing.published.test'\");\n}\n\nclass PublishedFactoryWithNameAndCategory : public PublishedFactory {\npublic:\n  std::string category() const override { return \"testing.published.additional.category\"; }\n  std::string name() const override {\n    return \"testing.published.versioned.instead_name_and_category\";\n  }\n};\n\nTEST(RegistryTest, DEPRECATED_FEATURE_TEST(VersionedWithDeprecatedNamesFactoryAndNewCategory)) {\n  PublishedFactoryWithNameAndCategory test;\n\n  // Check the category is not registered\n  ASSERT_FALSE(Registry::FactoryCategoryRegistry::isRegistered(test.category()));\n\n  auto factory = Registry::RegisterFactory<PublishedFactoryWithNameAndCategory, PublishedFactory>(\n      FACTORY_VERSION(0, 0, 1, {{\"build.kind\", \"private\"}}),\n      {\"testing.published.versioned.deprecated_name_and_category\"});\n\n  // Check the category now registered\n  ASSERT_TRUE(Registry::FactoryCategoryRegistry::isRegistered(test.category()));\n\n  const auto& factories = Envoy::Registry::FactoryCategoryRegistry::registeredFactories();\n\n  auto version =\n      factories.find(\"testing.published.additional.category\")\n          ->second->getFactoryVersion(\"testing.published.versioned.instead_name_and_category\");\n\n  ASSERT_TRUE(version.has_value());\n  EXPECT_EQ(0, version.value().version().major_number());\n  EXPECT_EQ(0, version.value().version().minor_number());\n  EXPECT_EQ(1, version.value().version().patch());\n  EXPECT_EQ(1, version.value().metadata().fields().size());\n  EXPECT_EQ(\"private\", version.value().metadata().fields().at(\"build.kind\").string_value());\n\n  // Get the version using deprecated name and check that it matches the\n  // version obtained through the new name.\n  auto deprecated_version =\n      factories.find(\"testing.published.additional.category\")\n          ->second->getFactoryVersion(\"testing.published.versioned.deprecated_name_and_category\");\n  EXPECT_THAT(deprecated_version, Optional(ProtoEq(version.value())));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/runtime_utility_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/config/runtime_utility.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nTEST(RuntimeUtility, TranslateEmpty) {\n  envoy::config::bootstrap::v3::LayeredRuntime layered_runtime_config;\n  translateRuntime({}, layered_runtime_config);\n  envoy::config::bootstrap::v3::LayeredRuntime expected_runtime_config;\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"base\");\n    layer->mutable_static_layer();\n  }\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"admin\");\n    layer->mutable_admin_layer();\n  }\n  EXPECT_THAT(layered_runtime_config, ProtoEq(expected_runtime_config));\n}\n\nTEST(RuntimeUtility, TranslateSubdirOnly) {\n  envoy::config::bootstrap::v3::Runtime runtime_config;\n  runtime_config.set_symlink_root(\"foo\");\n  runtime_config.set_subdirectory(\"bar\");\n  envoy::config::bootstrap::v3::LayeredRuntime layered_runtime_config;\n  translateRuntime(runtime_config, layered_runtime_config);\n  envoy::config::bootstrap::v3::LayeredRuntime expected_runtime_config;\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"base\");\n    layer->mutable_static_layer();\n  }\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"root\");\n    layer->mutable_disk_layer()->set_symlink_root(\"foo\");\n    layer->mutable_disk_layer()->set_subdirectory(\"bar\");\n  }\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"admin\");\n    layer->mutable_admin_layer();\n  }\n  EXPECT_THAT(layered_runtime_config, ProtoEq(expected_runtime_config));\n}\n\nTEST(RuntimeUtility, TranslateSubdirOverride) {\n  envoy::config::bootstrap::v3::Runtime runtime_config;\n  runtime_config.set_symlink_root(\"foo\");\n  runtime_config.set_subdirectory(\"bar\");\n  runtime_config.set_override_subdirectory(\"baz\");\n  envoy::config::bootstrap::v3::LayeredRuntime layered_runtime_config;\n  translateRuntime(runtime_config, layered_runtime_config);\n  envoy::config::bootstrap::v3::LayeredRuntime expected_runtime_config;\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"base\");\n    layer->mutable_static_layer();\n  }\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"root\");\n    layer->mutable_disk_layer()->set_symlink_root(\"foo\");\n    layer->mutable_disk_layer()->set_subdirectory(\"bar\");\n  }\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"override\");\n    layer->mutable_disk_layer()->set_symlink_root(\"foo\");\n    layer->mutable_disk_layer()->set_subdirectory(\"baz\");\n    layer->mutable_disk_layer()->set_append_service_cluster(true);\n  }\n  {\n    auto* layer = expected_runtime_config.add_layers();\n    layer->set_name(\"admin\");\n    layer->mutable_admin_layer();\n  }\n  EXPECT_THAT(layered_runtime_config, ProtoEq(expected_runtime_config));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/subscription_factory_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.validate.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/subscription_factory_impl.h\"\n#include \"common/config/udpa_resource.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\nusing ::testing::Invoke;\nusing ::testing::Return;\nusing ::testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nclass SubscriptionFactoryTest : public testing::Test {\npublic:\n  SubscriptionFactoryTest()\n      : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_, random_)),\n        subscription_factory_(local_info_, dispatcher_, cm_, validation_visitor_, *api_, runtime_) {\n  }\n\n  SubscriptionPtr\n  subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config) {\n    return subscription_factory_.subscriptionFromConfigSource(\n        config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_,\n        resource_decoder_);\n  }\n\n  SubscriptionPtr collectionSubscriptionFromUrl(const std::string& udpa_url) {\n    const auto resource_locator = UdpaResourceIdentifier::decodeUrl(udpa_url);\n    return subscription_factory_.collectionSubscriptionFromUrl(\n        resource_locator, {}, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_,\n        callbacks_, resource_decoder_);\n  }\n\n  Upstream::MockClusterManager cm_;\n  Event::MockDispatcher dispatcher_;\n  Random::MockRandomGenerator random_;\n  MockSubscriptionCallbacks callbacks_;\n  MockOpaqueResourceDecoder resource_decoder_;\n  Http::MockAsyncClientRequest http_request_;\n  Stats::MockIsolatedStatsStore stats_store_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  SubscriptionFactoryImpl subscription_factory_;\n};\n\nclass SubscriptionFactoryTestApiConfigSource\n    : public SubscriptionFactoryTest,\n      public testing::WithParamInterface<envoy::config::core::v3::ApiConfigSource::ApiType> {};\n\nTEST_F(SubscriptionFactoryTest, NoConfigSpecifier) {\n  envoy::config::core::v3::ConfigSource config;\n  EXPECT_THROW_WITH_MESSAGE(\n      subscriptionFromConfigSource(config), EnvoyException,\n      \"Missing config source specifier in envoy::config::core::v3::ConfigSource\");\n}\n\nTEST_F(SubscriptionFactoryTest, RestClusterEmpty) {\n  envoy::config::core::v3::ConfigSource config;\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n\n  config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException,\n                          \"API configs must have either a gRPC service or a cluster name defined:\");\n}\n\nTEST_F(SubscriptionFactoryTest, GrpcClusterEmpty) {\n  envoy::config::core::v3::ConfigSource config;\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n\n  config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException,\n                          \"API configs must have either a gRPC service or a cluster name defined:\");\n}\n\nTEST_F(SubscriptionFactoryTest, RestClusterSingleton) {\n  envoy::config::core::v3::ConfigSource config;\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n\n  config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n  config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1);\n  config.mutable_api_config_source()->add_cluster_names(\"static_cluster\");\n  primary_clusters.insert(\"static_cluster\");\n\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  subscriptionFromConfigSource(config);\n}\n\nTEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) {\n  envoy::config::core::v3::ConfigSource config;\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n\n  config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1);\n  config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\n      \"static_cluster\");\n  primary_clusters.insert(\"static_cluster\");\n\n  envoy::config::core::v3::GrpcService expected_grpc_service;\n  expected_grpc_service.mutable_envoy_grpc()->set_cluster_name(\"static_cluster\");\n\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_CALL(cm_, grpcAsyncClientManager()).WillOnce(ReturnRef(cm_.async_client_manager_));\n  EXPECT_CALL(cm_.async_client_manager_,\n              factoryForGrpcService(ProtoEq(expected_grpc_service), _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        auto async_client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n        EXPECT_CALL(*async_client_factory, create()).WillOnce(Invoke([] {\n          return std::make_unique<Grpc::MockAsyncClient>();\n        }));\n        return async_client_factory;\n      }));\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n\n  subscriptionFromConfigSource(config);\n}\n\nTEST_F(SubscriptionFactoryTest, RestClusterMultiton) {\n  envoy::config::core::v3::ConfigSource config;\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n\n  config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n\n  config.mutable_api_config_source()->add_cluster_names(\"static_cluster_foo\");\n  primary_clusters.insert(\"static_cluster_foo\");\n\n  config.mutable_api_config_source()->add_cluster_names(\"static_cluster_bar\");\n  primary_clusters.insert(\"static_cluster_bar\");\n\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException,\n                          fmt::format(\"{} must have a singleton cluster name specified:\",\n                                      config.mutable_api_config_source()->GetTypeName()));\n}\n\nTEST_F(SubscriptionFactoryTest, GrpcClusterMultiton) {\n  envoy::config::core::v3::ConfigSource config;\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n\n  config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n\n  config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\n      \"static_cluster_foo\");\n  primary_clusters.insert(\"static_cluster_foo\");\n  config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\n      \"static_cluster_bar\");\n  primary_clusters.insert(\"static_cluster_bar\");\n\n  EXPECT_CALL(cm_, grpcAsyncClientManager()).WillRepeatedly(ReturnRef(cm_.async_client_manager_));\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n\n  EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException,\n                          fmt::format(\"{}::.DELTA_.GRPC must have a \"\n                                      \"single gRPC service specified:\",\n                                      config.mutable_api_config_source()->GetTypeName()));\n}\n\nTEST_F(SubscriptionFactoryTest, FilesystemSubscription) {\n  envoy::config::core::v3::ConfigSource config;\n  std::string test_path = TestEnvironment::temporaryDirectory();\n  config.set_path(test_path);\n  auto* watcher = new Filesystem::MockWatcher();\n  EXPECT_CALL(dispatcher_, createFilesystemWatcher_()).WillOnce(Return(watcher));\n  EXPECT_CALL(*watcher, addWatch(test_path, _, _));\n  EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _));\n  subscriptionFromConfigSource(config)->start({\"foo\"});\n}\n\nTEST_F(SubscriptionFactoryTest, FilesystemSubscriptionNonExistentFile) {\n  envoy::config::core::v3::ConfigSource config;\n  config.set_path(\"/blahblah\");\n  EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({\"foo\"}), EnvoyException,\n                            \"envoy::api::v2::Path must refer to an existing path in the system: \"\n                            \"'/blahblah' does not exist\")\n}\n\nTEST_F(SubscriptionFactoryTest, FilesystemCollectionSubscription) {\n  std::string test_path = TestEnvironment::temporaryDirectory();\n  auto* watcher = new Filesystem::MockWatcher();\n  EXPECT_CALL(dispatcher_, createFilesystemWatcher_()).WillOnce(Return(watcher));\n  EXPECT_CALL(*watcher, addWatch(test_path, _, _));\n  EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _));\n  // Unix paths start with /, Windows with c:/.\n  const std::string file_path = test_path[0] == '/' ? test_path.substr(1) : test_path;\n  collectionSubscriptionFromUrl(fmt::format(\"file:///{}\", file_path))->start({});\n}\n\nTEST_F(SubscriptionFactoryTest, FilesystemCollectionSubscriptionNonExistentFile){\n    EXPECT_THROW_WITH_MESSAGE(collectionSubscriptionFromUrl(\"file:///blahblah\")->start({}),\n                              EnvoyException,\n                              \"envoy::api::v2::Path must refer to an existing path in the system: \"\n                              \"'/blahblah' does not exist\")}\n\nTEST_F(SubscriptionFactoryTest, LegacySubscription) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  api_config_source->set_api_type(\n      envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY);\n  api_config_source->add_cluster_names(\"static_cluster\");\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  primary_clusters.insert(\"static_cluster\");\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config)->start({\"static_cluster\"}),\n                          EnvoyException, \"REST_LEGACY no longer a supported ApiConfigSource.*\");\n}\n\nTEST_F(SubscriptionFactoryTest, HttpSubscriptionCustomRequestTimeout) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n  api_config_source->add_cluster_names(\"static_cluster\");\n  api_config_source->mutable_refresh_delay()->set_seconds(1);\n  api_config_source->mutable_request_timeout()->set_seconds(5);\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  primary_clusters.insert(\"static_cluster\");\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2);\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(\"static_cluster\"));\n  EXPECT_CALL(\n      cm_.async_client_,\n      send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(5000))));\n  subscriptionFromConfigSource(config)->start({\"static_cluster\"});\n}\n\nTEST_F(SubscriptionFactoryTest, HttpSubscription) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n  api_config_source->add_cluster_names(\"static_cluster\");\n  api_config_source->mutable_refresh_delay()->set_seconds(1);\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  primary_clusters.insert(\"static_cluster\");\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2);\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(\"static_cluster\"));\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillOnce(Invoke([this](Http::RequestMessagePtr& request, Http::AsyncClient::Callbacks&,\n                              const Http::AsyncClient::RequestOptions&) {\n        EXPECT_EQ(\"POST\", request->headers().getMethodValue());\n        EXPECT_EQ(\"static_cluster\", request->headers().getHostValue());\n        EXPECT_EQ(\"/v2/discovery:endpoints\", request->headers().getPathValue());\n        return &http_request_;\n      }));\n  EXPECT_CALL(http_request_, cancel());\n  subscriptionFromConfigSource(config)->start({\"static_cluster\"});\n}\n\n// Confirm error when no refresh delay is set (not checked by schema).\nTEST_F(SubscriptionFactoryTest, HttpSubscriptionNoRefreshDelay) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n  api_config_source->add_cluster_names(\"static_cluster\");\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  primary_clusters.insert(\"static_cluster\");\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({\"static_cluster\"}),\n                            EnvoyException,\n                            \"refresh_delay is required for REST API configuration sources\");\n}\n\nTEST_F(SubscriptionFactoryTest, GrpcSubscription) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\"static_cluster\");\n  envoy::config::core::v3::GrpcService expected_grpc_service;\n  expected_grpc_service.mutable_envoy_grpc()->set_cluster_name(\"static_cluster\");\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  primary_clusters.insert(\"static_cluster\");\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_CALL(cm_, grpcAsyncClientManager()).WillOnce(ReturnRef(cm_.async_client_manager_));\n  EXPECT_CALL(cm_.async_client_manager_,\n              factoryForGrpcService(ProtoEq(expected_grpc_service), _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        auto async_client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n        EXPECT_CALL(*async_client_factory, create()).WillOnce(Invoke([] {\n          return std::make_unique<NiceMock<Grpc::MockAsyncClient>>();\n        }));\n        return async_client_factory;\n      }));\n  EXPECT_CALL(random_, random());\n  EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2);\n  // onConfigUpdateFailed() should not be called for gRPC stream connection failure\n  EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)).Times(0);\n  subscriptionFromConfigSource(config)->start({\"static_cluster\"});\n}\n\nTEST_F(SubscriptionFactoryTest, LogWarningOnDeprecatedApi) {\n  envoy::config::core::v3::ConfigSource config;\n\n  config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n  config.mutable_api_config_source()->set_transport_api_version(\n      envoy::config::core::v3::ApiVersion::V2);\n  NiceMock<Runtime::MockSnapshot> snapshot;\n  EXPECT_CALL(runtime_, snapshot()).WillRepeatedly(ReturnRef(snapshot));\n  EXPECT_CALL(snapshot, runtimeFeatureEnabled(_)).WillOnce(Return(true));\n  EXPECT_CALL(runtime_, countDeprecatedFeatureUse());\n\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  primary_clusters.insert(\"static_cluster\");\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n\n  EXPECT_LOG_CONTAINS(\n      \"warn\", \"xDS of version v2 has been deprecated\", try {\n        subscription_factory_.subscriptionFromConfigSource(\n            config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_,\n            resource_decoder_);\n      } catch (EnvoyException&){/* expected, we pass an empty configuration  */});\n}\n\nINSTANTIATE_TEST_SUITE_P(SubscriptionFactoryTestApiConfigSource,\n                         SubscriptionFactoryTestApiConfigSource,\n                         ::testing::Values(envoy::config::core::v3::ApiConfigSource::REST,\n                                           envoy::config::core::v3::ApiConfigSource::GRPC));\n\nTEST_P(SubscriptionFactoryTestApiConfigSource, NonExistentCluster) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  api_config_source->set_api_type(GetParam());\n  if (api_config_source->api_type() == envoy::config::core::v3::ApiConfigSource::GRPC) {\n    api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\n        \"static_cluster\");\n  } else {\n    api_config_source->add_cluster_names(\"static_cluster\");\n  }\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters));\n  EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({\"static_cluster\"}),\n                            EnvoyException,\n                            fmt::format(\"{} must have a statically defined \"\n                                        \"non-EDS cluster: 'static_cluster' does not exist, was \"\n                                        \"added via api, or is an EDS cluster\",\n                                        api_config_source->GetTypeName()));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/subscription_impl_test.cc",
    "content": "#include <memory>\n\n#include \"test/common/config/delta_subscription_test_harness.h\"\n#include \"test/common/config/filesystem_subscription_test_harness.h\"\n#include \"test/common/config/grpc_subscription_test_harness.h\"\n#include \"test/common/config/http_subscription_test_harness.h\"\n#include \"test/common/config/subscription_test_harness.h\"\n\nusing testing::InSequence;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nenum class SubscriptionType {\n  Grpc,\n  DeltaGrpc,\n  Http,\n  Filesystem,\n};\n\nclass SubscriptionImplTest : public testing::TestWithParam<SubscriptionType> {\npublic:\n  SubscriptionImplTest() : SubscriptionImplTest(std::chrono::milliseconds(0)) {}\n  SubscriptionImplTest(std::chrono::milliseconds init_fetch_timeout) {\n    switch (GetParam()) {\n    case SubscriptionType::Grpc:\n      test_harness_ = std::make_unique<GrpcSubscriptionTestHarness>(init_fetch_timeout);\n      break;\n    case SubscriptionType::DeltaGrpc:\n      test_harness_ = std::make_unique<GrpcSubscriptionTestHarness>(init_fetch_timeout);\n      break;\n    case SubscriptionType::Http:\n      test_harness_ = std::make_unique<HttpSubscriptionTestHarness>(init_fetch_timeout);\n      break;\n    case SubscriptionType::Filesystem:\n      test_harness_ = std::make_unique<FilesystemSubscriptionTestHarness>();\n      break;\n    }\n  }\n\n  void TearDown() override { test_harness_->doSubscriptionTearDown(); }\n\n  void startSubscription(const std::set<std::string>& cluster_names) {\n    test_harness_->startSubscription(cluster_names);\n  }\n\n  void updateResourceInterest(const std::set<std::string>& cluster_names) {\n    test_harness_->updateResourceInterest(cluster_names);\n  }\n\n  void expectSendMessage(const std::set<std::string>& cluster_names, const std::string& version,\n                         bool expect_node) {\n    test_harness_->expectSendMessage(cluster_names, version, expect_node);\n  }\n\n  AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure,\n                           uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version,\n                           std::string version_text) {\n    return test_harness_->statsAre(attempt, success, rejected, failure, init_fetch_timeout,\n                                   update_time, version, version_text);\n  }\n\n  void deliverConfigUpdate(const std::vector<std::string> cluster_names, const std::string& version,\n                           bool accept) {\n    test_harness_->deliverConfigUpdate(cluster_names, version, accept);\n  }\n\n  void expectConfigUpdateFailed() { test_harness_->expectConfigUpdateFailed(); }\n\n  void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) {\n    test_harness_->expectEnableInitFetchTimeoutTimer(timeout);\n  }\n\n  void expectDisableInitFetchTimeoutTimer() { test_harness_->expectDisableInitFetchTimeoutTimer(); }\n\n  void callInitFetchTimeoutCb() { test_harness_->callInitFetchTimeoutCb(); }\n\n  std::unique_ptr<SubscriptionTestHarness> test_harness_;\n};\n\nclass SubscriptionImplInitFetchTimeoutTest : public SubscriptionImplTest {\npublic:\n  SubscriptionImplInitFetchTimeoutTest() : SubscriptionImplTest(std::chrono::milliseconds(1000)) {}\n};\n\nSubscriptionType types[] = {SubscriptionType::Grpc, SubscriptionType::DeltaGrpc,\n                            SubscriptionType::Http, SubscriptionType::Filesystem};\nINSTANTIATE_TEST_SUITE_P(SubscriptionImplTest, SubscriptionImplTest, testing::ValuesIn(types));\nINSTANTIATE_TEST_SUITE_P(SubscriptionImplTest, SubscriptionImplInitFetchTimeoutTest,\n                         testing::ValuesIn(types));\n\n// Validate basic request-response succeeds.\nTEST_P(SubscriptionImplTest, InitialRequestResponse) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"v25-ubuntu18-beta\", true);\n  EXPECT_TRUE(\n      statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 18202868392629624077U, \"v25-ubuntu18-beta\"));\n}\n\n// Validate that multiple streamed updates succeed.\nTEST_P(SubscriptionImplTest, ResponseStream) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"1.2.3.4\", true);\n  EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 14026795738668939420U, \"1.2.3.4\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"5_6_7\", true);\n  EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS, 7612520132475921171U, \"5_6_7\"));\n}\n\n// Validate that the client can reject a config.\nTEST_P(SubscriptionImplTest, RejectConfig) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", false);\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, \"\"));\n}\n\n// Validate that the client can reject a config and accept the same config later.\nTEST_P(SubscriptionImplTest, RejectAcceptConfig) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", false);\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n  EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, \"0\"));\n}\n\n// Validate that the client can reject a config and accept another config later.\nTEST_P(SubscriptionImplTest, RejectAcceptNextConfig) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", false);\n  EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"1\", true);\n  EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U, \"1\"));\n}\n\n// Validate that stream updates send a message with the updated resources.\nTEST_P(SubscriptionImplTest, UpdateResources) {\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"42\", true);\n  EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, \"42\"));\n  updateResourceInterest({\"cluster2\"});\n  EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, \"42\"));\n}\n\n// Validate that initial fetch timer is created and calls callback on timeout\nTEST_P(SubscriptionImplInitFetchTimeoutTest, InitialFetchTimeout) {\n  if (GetParam() == SubscriptionType::Filesystem) {\n    return; // initial_fetch_timeout not implemented for filesystem.\n  }\n  InSequence s;\n  expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000));\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  if (GetParam() == SubscriptionType::Http) {\n    expectDisableInitFetchTimeoutTimer();\n  }\n  expectConfigUpdateFailed();\n\n  callInitFetchTimeoutCb();\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 1, 0, 0, \"\"));\n}\n\n// Validate that initial fetch timer is disabled on config update\nTEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnSuccess) {\n  InSequence s;\n  expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000));\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  expectDisableInitFetchTimeoutTimer();\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", true);\n}\n\n// Validate that initial fetch timer is disabled on config update failed\nTEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnFail) {\n  InSequence s;\n  expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000));\n  startSubscription({\"cluster0\", \"cluster1\"});\n  EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, \"\"));\n  expectDisableInitFetchTimeoutTimer();\n  deliverConfigUpdate({\"cluster0\", \"cluster1\"}, \"0\", false);\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/subscription_test_harness.h",
    "content": "#pragma once\n\n#include \"common/config/utility.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nconst uint64_t TEST_TIME_MILLIS = 42000;\n\n/**\n * Interface for different Subscription implementation test harnesses. This has common functionality\n * that we can use to write tests that work across all Subscription types. EDS is used as the API in\n * tests depending on SubscriptionTestHarness, as representative of a subscription API.\n */\nclass SubscriptionTestHarness : public Event::TestUsingSimulatedTime {\npublic:\n  SubscriptionTestHarness()\n      : stats_(Utility::generateStats(stats_store_)),\n        control_plane_stats_(Utility::generateControlPlaneStats(stats_store_)) {\n    simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS)));\n  }\n  virtual ~SubscriptionTestHarness() = default;\n\n  /**\n   * Start subscription and set related expectations.\n   * @param cluster_names initial cluster names to request via EDS.\n   */\n  virtual void startSubscription(const std::set<std::string>& cluster_names) PURE;\n\n  /**\n   * Update cluster names to be delivered via EDS.\n   * @param cluster_names cluster names.\n   */\n  virtual void updateResourceInterest(const std::set<std::string>& cluster_names) PURE;\n\n  /**\n   * Expect that an update request is sent by the Subscription implementation.\n   * @param cluster_names cluster names to expect in the request.\n   * @param version version_info to expect in the request.\n   * @param expect_node whether the node information should be expected\n   */\n  virtual void expectSendMessage(const std::set<std::string>& cluster_names,\n                                 const std::string& version, bool expect_node) PURE;\n\n  /**\n   * Deliver a response to the Subscription implementation and validate.\n   * @param cluster_names cluster names to provide in the response\n   * @param version version_info to provide in the response.\n   * @param accept will the onConfigUpdate() callback accept the response?\n   */\n  virtual void deliverConfigUpdate(const std::vector<std::string>& cluster_names,\n                                   const std::string& version, bool accept) PURE;\n\n  virtual testing::AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected,\n                                            uint32_t failure, uint32_t init_fetch_timeout,\n                                            uint64_t update_time, uint64_t version,\n                                            absl::string_view version_text) {\n    // TODO(fredlas) rework update_success_ to make sense across all xDS carriers. Its value in\n    // statsAre() calls in many tests will probably have to be changed.\n    UNREFERENCED_PARAMETER(attempt);\n    if (success != stats_.update_success_.value()) {\n      return testing::AssertionFailure() << \"update_success: expected \" << success << \", got \"\n                                         << stats_.update_success_.value();\n    }\n    if (rejected != stats_.update_rejected_.value()) {\n      return testing::AssertionFailure() << \"update_rejected: expected \" << rejected << \", got \"\n                                         << stats_.update_rejected_.value();\n    }\n    if (failure != stats_.update_failure_.value()) {\n      return testing::AssertionFailure() << \"update_failure: expected \" << failure << \", got \"\n                                         << stats_.update_failure_.value();\n    }\n    if (init_fetch_timeout != stats_.init_fetch_timeout_.value()) {\n      return testing::AssertionFailure() << \"init_fetch_timeout: expected \" << init_fetch_timeout\n                                         << \", got \" << stats_.init_fetch_timeout_.value();\n    }\n    if (update_time != stats_.update_time_.value()) {\n      return testing::AssertionFailure()\n             << \"update_time: expected \" << update_time << \", got \" << stats_.update_time_.value();\n    }\n    if (version != stats_.version_.value()) {\n      return testing::AssertionFailure()\n             << \"version: expected \" << version << \", got \" << stats_.version_.value();\n    }\n    if (version_text != stats_.version_text_.value()) {\n      return testing::AssertionFailure()\n             << \"version_text: expected \" << version << \", got \" << stats_.version_text_.value();\n    }\n    return testing::AssertionSuccess();\n  }\n\n  virtual void verifyControlPlaneStats(uint32_t connected_state) {\n    EXPECT_EQ(connected_state, control_plane_stats_.connected_state_.value());\n  }\n\n  virtual void expectConfigUpdateFailed() PURE;\n\n  virtual void expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds timeout) PURE;\n\n  virtual void expectDisableInitFetchTimeoutTimer() PURE;\n\n  virtual void callInitFetchTimeoutCb() PURE;\n\n  virtual void doSubscriptionTearDown() {}\n\n  Stats::TestUtil::TestStore stats_store_;\n  SubscriptionStats stats_;\n  ControlPlaneStats control_plane_stats_;\n};\n\nACTION_P(ThrowOnRejectedConfig, accept) {\n  if (!accept) {\n    throw EnvoyException(\"bad config\");\n  }\n}\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/type_to_endpoint_test.cc",
    "content": "#include \"envoy/api/v2/rds.pb.h\"\n#include \"envoy/service/route/v3/rds.pb.h\"\n\n#include \"common/config/type_to_endpoint.h\"\n\n#include \"gtest/gtest.h\"\n\n// API_NO_BOOST_FILE\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\n// Verify type-to-endpoint methods with RDS as an exemplar.\nTEST(TypeToEndpoint, All) {\n  // The dummy messages are included for link purposes only.\n  envoy::api::v2::RdsDummy _v2_rds_dummy;\n  envoy::service::route::v3::RdsDummy _v3_rds_dummy;\n\n  // Delta gRPC endpoints.\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.DeltaRoutes\",\n            deltaGrpcMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                            envoy::config::core::v3::ApiVersion::AUTO)\n                .full_name());\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.DeltaRoutes\",\n            deltaGrpcMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                            envoy::config::core::v3::ApiVersion::V2)\n                .full_name());\n  EXPECT_EQ(\"envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes\",\n            deltaGrpcMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                            envoy::config::core::v3::ApiVersion::V3)\n                .full_name());\n\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.DeltaRoutes\",\n            deltaGrpcMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                            envoy::config::core::v3::ApiVersion::AUTO)\n                .full_name());\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.DeltaRoutes\",\n            deltaGrpcMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                            envoy::config::core::v3::ApiVersion::V2)\n                .full_name());\n  EXPECT_EQ(\"envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes\",\n            deltaGrpcMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                            envoy::config::core::v3::ApiVersion::V3)\n                .full_name());\n\n  // SotW gRPC endpoints.\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n            sotwGrpcMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                           envoy::config::core::v3::ApiVersion::AUTO)\n                .full_name());\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n            sotwGrpcMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                           envoy::config::core::v3::ApiVersion::V2)\n                .full_name());\n  EXPECT_EQ(\"envoy.service.route.v3.RouteDiscoveryService.StreamRoutes\",\n            sotwGrpcMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                           envoy::config::core::v3::ApiVersion::V3)\n                .full_name());\n\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n            sotwGrpcMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                           envoy::config::core::v3::ApiVersion::AUTO)\n                .full_name());\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.StreamRoutes\",\n            sotwGrpcMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                           envoy::config::core::v3::ApiVersion::V2)\n                .full_name());\n  EXPECT_EQ(\"envoy.service.route.v3.RouteDiscoveryService.StreamRoutes\",\n            sotwGrpcMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                           envoy::config::core::v3::ApiVersion::V3)\n                .full_name());\n\n  // REST endpoints.\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.FetchRoutes\",\n            restMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                       envoy::config::core::v3::ApiVersion::AUTO)\n                .full_name());\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.FetchRoutes\",\n            restMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                       envoy::config::core::v3::ApiVersion::V2)\n                .full_name());\n  EXPECT_EQ(\"envoy.service.route.v3.RouteDiscoveryService.FetchRoutes\",\n            restMethod(\"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n                       envoy::config::core::v3::ApiVersion::V3)\n                .full_name());\n\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.FetchRoutes\",\n            restMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                       envoy::config::core::v3::ApiVersion::AUTO)\n                .full_name());\n  EXPECT_EQ(\"envoy.api.v2.RouteDiscoveryService.FetchRoutes\",\n            restMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                       envoy::config::core::v3::ApiVersion::V2)\n                .full_name());\n  EXPECT_EQ(\"envoy.service.route.v3.RouteDiscoveryService.FetchRoutes\",\n            restMethod(\"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n                       envoy::config::core::v3::ApiVersion::V3)\n                .full_name());\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/udpa_context_params_test.cc",
    "content": "#include \"common/config/udpa_context_params.h\"\n\n#include \"test/common/config/udpa_test_utility.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n#include \"udpa_test_utility.h\"\n\nusing ::testing::Pair;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\n// Validate all the node parameter renderers (except user_agent_build_version, which has its own\n// test below).\nTEST(UdpaContextParamsTest, NodeAll) {\n  envoy::config::core::v3::Node node;\n  TestUtility::loadFromYaml(R\"EOF(\n  id: some_id\n  cluster: some_cluster\n  user_agent_name: xds_client\n  user_agent_version: 1.2.3\n  locality:\n    region: some_region\n    zone: some_zone\n    sub_zone: some_sub_zone\n  metadata:\n    foo: true\n    bar: \"a\"\n    baz: 42\n  )EOF\",\n                            node);\n  const auto context_params = UdpaContextParams::encode(\n      node,\n      {\"id\", \"cluster\", \"user_agent_name\", \"user_agent_version\", \"locality.region\", \"locality.zone\",\n       \"locality.sub_zone\", \"metadata\"},\n      {}, {}, {});\n  EXPECT_CONTEXT_PARAMS(\n      context_params, Pair(\"udpa.node.cluster\", \"some_cluster\"), Pair(\"udpa.node.id\", \"some_id\"),\n      Pair(\"udpa.node.locality.sub_zone\", \"some_sub_zone\"),\n      Pair(\"udpa.node.locality.zone\", \"some_zone\"),\n      Pair(\"udpa.node.locality.region\", \"some_region\"), Pair(\"udpa.node.metadata.bar\", \"\\\"a\\\"\"),\n      Pair(\"udpa.node.metadata.baz\", \"42\"), Pair(\"udpa.node.metadata.foo\", \"true\"),\n      Pair(\"udpa.node.user_agent_name\", \"xds_client\"),\n      Pair(\"udpa.node.user_agent_version\", \"1.2.3\"));\n}\n\n// Validate that we can select a subset of node parameters.\nTEST(UdpaContextParamsTest, NodeParameterSelection) {\n  envoy::config::core::v3::Node node;\n  TestUtility::loadFromYaml(R\"EOF(\n  id: some_id\n  cluster: some_cluster\n  user_agent_name: xds_client\n  user_agent_version: 1.2.3\n  locality:\n    region: some_region\n    zone: some_zone\n    sub_zone: some_sub_zone\n  metadata:\n    foo: true\n    bar: \"a\"\n    baz: 42\n  )EOF\",\n                            node);\n  const auto context_params = UdpaContextParams::encode(\n      node, {\"cluster\", \"user_agent_version\", \"locality.region\", \"locality.sub_zone\"}, {}, {}, {});\n  EXPECT_CONTEXT_PARAMS(context_params, Pair(\"udpa.node.cluster\", \"some_cluster\"),\n                        Pair(\"udpa.node.locality.sub_zone\", \"some_sub_zone\"),\n                        Pair(\"udpa.node.locality.region\", \"some_region\"),\n                        Pair(\"udpa.node.user_agent_version\", \"1.2.3\"));\n}\n\n// Validate user_agent_build_version renderers.\nTEST(UdpaContextParamsTest, NodeUserAgentBuildVersion) {\n  envoy::config::core::v3::Node node;\n  TestUtility::loadFromYaml(R\"EOF(\n  user_agent_build_version:\n    version:\n      major_number: 1\n      minor_number: 2\n      patch: 3\n    metadata:\n      foo: true\n      bar: \"a\"\n      baz: 42\n  )EOF\",\n                            node);\n  const auto context_params = UdpaContextParams::encode(\n      node, {\"user_agent_build_version.version\", \"user_agent_build_version.metadata\"}, {}, {}, {});\n  EXPECT_CONTEXT_PARAMS(context_params,\n                        Pair(\"udpa.node.user_agent_build_version.metadata.bar\", \"\\\"a\\\"\"),\n                        Pair(\"udpa.node.user_agent_build_version.metadata.baz\", \"42\"),\n                        Pair(\"udpa.node.user_agent_build_version.metadata.foo\", \"true\"),\n                        Pair(\"udpa.node.user_agent_build_version.version\", \"1.2.3\"));\n}\n\n// Validate that resource locator context parameters are pass-thru.\nTEST(UdpaContextParamsTest, ResoureContextParams) {\n  udpa::core::v1::ContextParams resource_context_params;\n  TestUtility::loadFromYaml(R\"EOF(\n  params:\n    foo: \"\\\"some_string\\\"\"\n    bar: \"123\"\n    baz: \"true\"\n  )EOF\",\n                            resource_context_params);\n  const auto context_params = UdpaContextParams::encode({}, {}, resource_context_params, {}, {});\n  EXPECT_CONTEXT_PARAMS(context_params, Pair(\"bar\", \"123\"), Pair(\"baz\", \"true\"),\n                        Pair(\"foo\", \"\\\"some_string\\\"\"));\n}\n\n// Validate client feature capabilities context parameter transform.\nTEST(UdpaContextParamsTest, ClientFeatureCapabilities) {\n  const auto context_params =\n      UdpaContextParams::encode({}, {}, {}, {\"some.feature\", \"another.feature\"}, {});\n  EXPECT_CONTEXT_PARAMS(context_params, Pair(\"udpa.client_feature.another.feature\", \"true\"),\n                        Pair(\"udpa.client_feature.some.feature\", \"true\"));\n}\n\n// Validate per-resource well-known attributes transform.\nTEST(UdpaContextParamsTest, ResourceWktAttribs) {\n  const auto context_params =\n      UdpaContextParams::encode({}, {}, {}, {}, {{\"foo\", \"1\"}, {\"bar\", \"2\"}});\n  EXPECT_CONTEXT_PARAMS(context_params, Pair(\"udpa.resource.foo\", \"1\"),\n                        Pair(\"udpa.resource.bar\", \"2\"));\n}\n\n// Validate that the precedence relationships in the specification hold.\nTEST(UdpaContextParamsTest, Layering) {\n  envoy::config::core::v3::Node node;\n  TestUtility::loadFromYaml(R\"EOF(\n  id: some_id\n  cluster: some_cluster\n  )EOF\",\n                            node);\n  udpa::core::v1::ContextParams resource_context_params;\n  TestUtility::loadFromYaml(R\"EOF(\n  params:\n    id: another_id\n    udpa.node.cluster: another_cluster\n  )EOF\",\n                            resource_context_params);\n  const auto context_params = UdpaContextParams::encode(\n      node, {\"id\", \"cluster\"}, resource_context_params, {\"id\"}, {{\"cluster\", \"huh\"}});\n  EXPECT_CONTEXT_PARAMS(context_params, Pair(\"id\", \"another_id\"),\n                        Pair(\"udpa.client_feature.id\", \"true\"),\n                        Pair(\"udpa.node.cluster\", \"another_cluster\"),\n                        Pair(\"udpa.node.id\", \"some_id\"), Pair(\"udpa.resource.cluster\", \"huh\"));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/udpa_resource_test.cc",
    "content": "#include \"common/config/udpa_resource.h\"\n\n#include \"test/common/config/udpa_test_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing ::testing::ElementsAre;\nusing ::testing::Pair;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nconst std::string EscapedUrn =\n    \"udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//\"\n    \"baz?%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df\";\nconst std::string EscapedUrnWithManyQueryParams =\n    \"udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//\"\n    \"baz?%25%23%5B%5D%26%3D=bar&%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df&foo=%25%23%5B%5D%26%3D\";\nconst std::string EscapedUrlWithManyQueryParamsAndDirectives =\n    EscapedUrnWithManyQueryParams +\n    \"#entry=some_en%25%23%5B%5D%2Ctry,alt=udpa://fo%2525%252F%253F%2523o/bar%23alt=udpa://bar/\"\n    \"baz%2Centry=h%2525%2523%255B%255D%252Cuh\";\n\n// for all x. encodeUri(decodeUri(x)) = x where x comes from sample of valid udpa:// URIs.\n// TODO(htuch): write a fuzzer that validates this property as well.\nTEST(UdpaResourceIdentifierTest, DecodeEncode) {\n  const std::vector<std::string> uris = {\n      \"udpa:///envoy.config.listener.v3.Listener\",\n      \"udpa://foo/envoy.config.listener.v3.Listener\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar/baz\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar////baz\",\n      \"udpa://foo/envoy.config.listener.v3.Listener?ab=cde\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar?ab=cd\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=cde\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar/baz?=cd\",\n      \"udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=cde&ba=edc&z=f\",\n      EscapedUrn,\n      EscapedUrnWithManyQueryParams,\n  };\n  UdpaResourceIdentifier::EncodeOptions encode_options;\n  encode_options.sort_context_params_ = true;\n  for (const std::string& uri : uris) {\n    EXPECT_EQ(uri, UdpaResourceIdentifier::encodeUrn(UdpaResourceIdentifier::decodeUrn(uri),\n                                                     encode_options));\n    EXPECT_EQ(uri, UdpaResourceIdentifier::encodeUrl(UdpaResourceIdentifier::decodeUrl(uri),\n                                                     encode_options));\n  }\n}\n\n// Validate that URN decoding behaves as expected component-wise.\nTEST(UdpaResourceNameTest, DecodeSuccess) {\n  const auto resource_name = UdpaResourceIdentifier::decodeUrn(EscapedUrnWithManyQueryParams);\n  EXPECT_EQ(\"f123%/?#o\", resource_name.authority());\n  EXPECT_EQ(\"envoy.config.listener.v3.Listener\", resource_name.resource_type());\n  EXPECT_THAT(resource_name.id(), ElementsAre(\"b%:/?#[]ar\", \"\", \"baz\"));\n  EXPECT_CONTEXT_PARAMS(resource_name.context(), Pair(\"%#[]&=\", \"bar\"),\n                        Pair(\"%#[]&=ab\", \"cde%#[]&=f\"), Pair(\"foo\", \"%#[]&=\"));\n}\n\n// Validate that URL decoding behaves as expected component-wise.\nTEST(UdpaResourceLocatorTest, DecodeSuccess) {\n  const auto resource_locator =\n      UdpaResourceIdentifier::decodeUrl(EscapedUrlWithManyQueryParamsAndDirectives);\n  EXPECT_EQ(\"f123%/?#o\", resource_locator.authority());\n  EXPECT_EQ(\"envoy.config.listener.v3.Listener\", resource_locator.resource_type());\n  EXPECT_THAT(resource_locator.id(), ElementsAre(\"b%:/?#[]ar\", \"\", \"baz\"));\n  EXPECT_CONTEXT_PARAMS(resource_locator.exact_context(), Pair(\"%#[]&=\", \"bar\"),\n                        Pair(\"%#[]&=ab\", \"cde%#[]&=f\"), Pair(\"foo\", \"%#[]&=\"));\n  EXPECT_EQ(2, resource_locator.directives().size());\n  EXPECT_EQ(\"some_en%#[],try\", resource_locator.directives()[0].entry());\n  const auto& alt = resource_locator.directives()[1].alt();\n  EXPECT_EQ(\"fo%/?#o\", alt.authority());\n  EXPECT_EQ(\"bar\", alt.resource_type());\n  EXPECT_EQ(2, alt.directives().size());\n  const auto& inner_alt = alt.directives()[0].alt();\n  EXPECT_EQ(\"bar\", inner_alt.authority());\n  EXPECT_EQ(\"baz\", inner_alt.resource_type());\n  EXPECT_EQ(\"h%#[],uh\", alt.directives()[1].entry());\n}\n\n// Validate that the URN decoding behaves with a near-empty UDPA resource name.\nTEST(UdpaResourceLocatorTest, DecodeEmpty) {\n  const auto resource_name =\n      UdpaResourceIdentifier::decodeUrn(\"udpa:///envoy.config.listener.v3.Listener\");\n  EXPECT_TRUE(resource_name.authority().empty());\n  EXPECT_EQ(\"envoy.config.listener.v3.Listener\", resource_name.resource_type());\n  EXPECT_TRUE(resource_name.id().empty());\n  EXPECT_TRUE(resource_name.context().params().empty());\n}\n\n// Validate that the URL decoding behaves with a near-empty UDPA resource locator.\nTEST(UdpaResourceNameTest, DecodeEmpty) {\n  const auto resource_locator =\n      UdpaResourceIdentifier::decodeUrl(\"udpa:///envoy.config.listener.v3.Listener\");\n  EXPECT_TRUE(resource_locator.authority().empty());\n  EXPECT_EQ(\"envoy.config.listener.v3.Listener\", resource_locator.resource_type());\n  EXPECT_TRUE(resource_locator.id().empty());\n  EXPECT_TRUE(resource_locator.exact_context().params().empty());\n  EXPECT_TRUE(resource_locator.directives().empty());\n}\n\n// Negative tests for URN decoding.\nTEST(UdpaResourceNameTest, DecodeFail) {\n  {\n    EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrn(\"foo://\"),\n                              UdpaResourceIdentifier::DecodeException,\n                              \"foo:// does not have an udpa: scheme\");\n  }\n  {\n    EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrn(\"udpa://foo\"),\n                              UdpaResourceIdentifier::DecodeException,\n                              \"Resource type missing from /\");\n  }\n}\n\n// Negative tests for URL decoding.\nTEST(UdpaResourceLocatorTest, DecodeFail) {\n  {\n    EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl(\"foo://\"),\n                              UdpaResourceIdentifier::DecodeException,\n                              \"foo:// does not have a udpa:, http: or file: scheme\");\n  }\n  {\n    EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl(\"udpa://foo\"),\n                              UdpaResourceIdentifier::DecodeException,\n                              \"Resource type missing from /\");\n  }\n  {\n    EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl(\"udpa://foo/some-type#bar=baz\"),\n                              UdpaResourceIdentifier::DecodeException,\n                              \"Unknown fragment component bar=baz\");\n  }\n}\n\n// Validate parsing for udpa:, http: and file: schemes.\nTEST(UdpaResourceLocatorTest, Schemes) {\n  {\n    const auto resource_locator =\n        UdpaResourceIdentifier::decodeUrl(\"udpa://foo/bar/baz/blah?a=b#entry=m\");\n    EXPECT_EQ(udpa::core::v1::ResourceLocator::UDPA, resource_locator.scheme());\n    EXPECT_EQ(\"foo\", resource_locator.authority());\n    EXPECT_EQ(\"bar\", resource_locator.resource_type());\n    EXPECT_THAT(resource_locator.id(), ElementsAre(\"baz\", \"blah\"));\n    EXPECT_CONTEXT_PARAMS(resource_locator.exact_context(), Pair(\"a\", \"b\"));\n    EXPECT_EQ(1, resource_locator.directives().size());\n    EXPECT_EQ(\"m\", resource_locator.directives()[0].entry());\n    EXPECT_EQ(\"udpa://foo/bar/baz/blah?a=b#entry=m\",\n              UdpaResourceIdentifier::encodeUrl(resource_locator));\n  }\n  {\n    const auto resource_locator =\n        UdpaResourceIdentifier::decodeUrl(\"http://foo/bar/baz/blah?a=b#entry=m\");\n    EXPECT_EQ(udpa::core::v1::ResourceLocator::HTTP, resource_locator.scheme());\n    EXPECT_EQ(\"foo\", resource_locator.authority());\n    EXPECT_EQ(\"bar\", resource_locator.resource_type());\n    EXPECT_THAT(resource_locator.id(), ElementsAre(\"baz\", \"blah\"));\n    EXPECT_CONTEXT_PARAMS(resource_locator.exact_context(), Pair(\"a\", \"b\"));\n    EXPECT_EQ(1, resource_locator.directives().size());\n    EXPECT_EQ(\"m\", resource_locator.directives()[0].entry());\n    EXPECT_EQ(\"http://foo/bar/baz/blah?a=b#entry=m\",\n              UdpaResourceIdentifier::encodeUrl(resource_locator));\n  }\n  {\n    const auto resource_locator = UdpaResourceIdentifier::decodeUrl(\"file:///bar/baz/blah#entry=m\");\n    EXPECT_EQ(udpa::core::v1::ResourceLocator::FILE, resource_locator.scheme());\n    EXPECT_THAT(resource_locator.id(), ElementsAre(\"bar\", \"baz\", \"blah\"));\n    EXPECT_EQ(1, resource_locator.directives().size());\n    EXPECT_EQ(\"m\", resource_locator.directives()[0].entry());\n    EXPECT_EQ(\"file:///bar/baz/blah#entry=m\", UdpaResourceIdentifier::encodeUrl(resource_locator));\n  }\n}\n\n// extra tests for fragment handling\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/udpa_test_utility.h",
    "content": "#pragma once\n\n#include \"gtest/gtest.h\"\n\n#define EXPECT_CONTEXT_PARAMS(context_params, ...)                                                 \\\n  {                                                                                                \\\n    std::map<std::string, std::string> param_map((context_params).params().begin(),                \\\n                                                 (context_params).params().end());                 \\\n    EXPECT_THAT(param_map, ::testing::UnorderedElementsAre(__VA_ARGS__));                          \\\n  }\n\nnamespace Envoy {\nnamespace Config {\nnamespace {} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/utility_test.cc",
    "content": "#include \"envoy/api/v2/cluster.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/http/cors/v3/cors.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/config/api_version.h\"\n#include \"common/config/utility.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"udpa/type/v1/typed_struct.pb.h\"\n\nusing testing::_;\nusing testing::Ref;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nTEST(UtilityTest, ComputeHashedVersion) {\n  EXPECT_EQ(\"hash_2e1472b57af294d1\", Utility::computeHashedVersion(\"{}\").first);\n  EXPECT_EQ(\"hash_33bf00a859c4ba3f\", Utility::computeHashedVersion(\"foo\").first);\n}\n\nTEST(UtilityTest, ApiConfigSourceRefreshDelay) {\n  envoy::config::core::v3::ApiConfigSource api_config_source;\n  api_config_source.mutable_refresh_delay()->CopyFrom(\n      Protobuf::util::TimeUtil::MillisecondsToDuration(1234));\n  EXPECT_EQ(1234, Utility::apiConfigSourceRefreshDelay(api_config_source).count());\n}\n\nTEST(UtilityTest, ApiConfigSourceDefaultRequestTimeout) {\n  envoy::config::core::v3::ApiConfigSource api_config_source;\n  EXPECT_EQ(1000, Utility::apiConfigSourceRequestTimeout(api_config_source).count());\n}\n\nTEST(UtilityTest, ApiConfigSourceRequestTimeout) {\n  envoy::config::core::v3::ApiConfigSource api_config_source;\n  api_config_source.mutable_request_timeout()->CopyFrom(\n      Protobuf::util::TimeUtil::MillisecondsToDuration(1234));\n  EXPECT_EQ(1234, Utility::apiConfigSourceRequestTimeout(api_config_source).count());\n}\n\nTEST(UtilityTest, ConfigSourceDefaultInitFetchTimeout) {\n  envoy::config::core::v3::ConfigSource config_source;\n  EXPECT_EQ(15000, Utility::configSourceInitialFetchTimeout(config_source).count());\n}\n\nTEST(UtilityTest, ConfigSourceInitFetchTimeout) {\n  envoy::config::core::v3::ConfigSource config_source;\n  config_source.mutable_initial_fetch_timeout()->CopyFrom(\n      Protobuf::util::TimeUtil::MillisecondsToDuration(654));\n  EXPECT_EQ(654, Utility::configSourceInitialFetchTimeout(config_source).count());\n}\n\nTEST(UtilityTest, TranslateApiConfigSource) {\n  envoy::config::core::v3::ApiConfigSource api_config_source_rest_legacy;\n  Utility::translateApiConfigSource(\"test_rest_legacy_cluster\", 10000,\n                                    ApiType::get().UnsupportedRestLegacy,\n                                    api_config_source_rest_legacy);\n  EXPECT_EQ(\n      envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY,\n      api_config_source_rest_legacy.api_type());\n  EXPECT_EQ(10000,\n            DurationUtil::durationToMilliseconds(api_config_source_rest_legacy.refresh_delay()));\n  EXPECT_EQ(\"test_rest_legacy_cluster\", api_config_source_rest_legacy.cluster_names(0));\n\n  envoy::config::core::v3::ApiConfigSource api_config_source_rest;\n  Utility::translateApiConfigSource(\"test_rest_cluster\", 20000, ApiType::get().Rest,\n                                    api_config_source_rest);\n  EXPECT_EQ(envoy::config::core::v3::ApiConfigSource::REST, api_config_source_rest.api_type());\n  EXPECT_EQ(20000, DurationUtil::durationToMilliseconds(api_config_source_rest.refresh_delay()));\n  EXPECT_EQ(\"test_rest_cluster\", api_config_source_rest.cluster_names(0));\n\n  envoy::config::core::v3::ApiConfigSource api_config_source_grpc;\n  Utility::translateApiConfigSource(\"test_grpc_cluster\", 30000, ApiType::get().Grpc,\n                                    api_config_source_grpc);\n  EXPECT_EQ(envoy::config::core::v3::ApiConfigSource::GRPC, api_config_source_grpc.api_type());\n  EXPECT_EQ(30000, DurationUtil::durationToMilliseconds(api_config_source_grpc.refresh_delay()));\n  EXPECT_EQ(\"test_grpc_cluster\",\n            api_config_source_grpc.grpc_services(0).envoy_grpc().cluster_name());\n}\n\nTEST(UtilityTest, createTagProducer) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  auto producer = Utility::createTagProducer(bootstrap);\n  ASSERT(producer != nullptr);\n  std::vector<Stats::Tag> tags;\n  auto extracted_name = producer->produceTags(\"http.config_test.rq_total\", tags);\n  ASSERT_EQ(extracted_name, \"http.rq_total\");\n  ASSERT_EQ(tags.size(), 1);\n}\n\nTEST(UtilityTest, CheckFilesystemSubscriptionBackingPath) {\n  Api::ApiPtr api = Api::createApiForTest();\n\n  EXPECT_THROW_WITH_MESSAGE(\n      Utility::checkFilesystemSubscriptionBackingPath(\"foo\", *api), EnvoyException,\n      \"envoy::api::v2::Path must refer to an existing path in the system: 'foo' does not exist\");\n  std::string test_path = TestEnvironment::temporaryDirectory();\n  Utility::checkFilesystemSubscriptionBackingPath(test_path, *api);\n}\n\nTEST(UtilityTest, ParseDefaultRateLimitSettings) {\n  envoy::config::core::v3::ApiConfigSource api_config_source;\n  const RateLimitSettings& rate_limit_settings = Utility::parseRateLimitSettings(api_config_source);\n  EXPECT_EQ(false, rate_limit_settings.enabled_);\n  EXPECT_EQ(100, rate_limit_settings.max_tokens_);\n  EXPECT_EQ(10, rate_limit_settings.fill_rate_);\n}\n\nTEST(UtilityTest, ParseEmptyRateLimitSettings) {\n  envoy::config::core::v3::ApiConfigSource api_config_source;\n  api_config_source.mutable_rate_limit_settings();\n  const RateLimitSettings& rate_limit_settings = Utility::parseRateLimitSettings(api_config_source);\n  EXPECT_EQ(true, rate_limit_settings.enabled_);\n  EXPECT_EQ(100, rate_limit_settings.max_tokens_);\n  EXPECT_EQ(10, rate_limit_settings.fill_rate_);\n}\n\nTEST(UtilityTest, ParseRateLimitSettings) {\n  envoy::config::core::v3::ApiConfigSource api_config_source;\n  envoy::config::core::v3::RateLimitSettings* rate_limits =\n      api_config_source.mutable_rate_limit_settings();\n  rate_limits->mutable_max_tokens()->set_value(500);\n  rate_limits->mutable_fill_rate()->set_value(4);\n  const RateLimitSettings& rate_limit_settings = Utility::parseRateLimitSettings(api_config_source);\n  EXPECT_EQ(true, rate_limit_settings.enabled_);\n  EXPECT_EQ(500, rate_limit_settings.max_tokens_);\n  EXPECT_EQ(4, rate_limit_settings.fill_rate_);\n}\n\n// TEST(UtilityTest, FactoryForGrpcApiConfigSource) should catch misconfigured\n// API configs along the dimension of ApiConfigSource type.\nTEST(UtilityTest, FactoryForGrpcApiConfigSource) {\n  NiceMock<Grpc::MockAsyncClientManager> async_client_manager;\n  Stats::MockStore scope;\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    EXPECT_THROW_WITH_REGEX(\n        Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope,\n                                               false),\n        EnvoyException, \"API configs must have either a gRPC service or a cluster name defined:\");\n  }\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    api_config_source.add_grpc_services();\n    api_config_source.add_grpc_services();\n    EXPECT_THROW_WITH_REGEX(Utility::factoryForGrpcApiConfigSource(async_client_manager,\n                                                                   api_config_source, scope, false),\n                            EnvoyException,\n                            fmt::format(\"{}::.DELTA_.GRPC must have a single gRPC service \"\n                                        \"specified:\",\n                                        api_config_source.GetTypeName()));\n  }\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    api_config_source.add_cluster_names();\n    // this also logs a warning for setting REST cluster names for a gRPC API config.\n    EXPECT_THROW_WITH_REGEX(Utility::factoryForGrpcApiConfigSource(async_client_manager,\n                                                                   api_config_source, scope, false),\n                            EnvoyException,\n                            fmt::format(\"{}::.DELTA_.GRPC must not have a cluster name \"\n                                        \"specified:\",\n                                        api_config_source.GetTypeName()));\n  }\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    api_config_source.add_cluster_names();\n    api_config_source.add_cluster_names();\n    EXPECT_THROW_WITH_REGEX(Utility::factoryForGrpcApiConfigSource(async_client_manager,\n                                                                   api_config_source, scope, false),\n                            EnvoyException,\n                            fmt::format(\"{}::.DELTA_.GRPC must not have a cluster name \"\n                                        \"specified:\",\n                                        api_config_source.GetTypeName()));\n  }\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n    api_config_source.add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\"foo\");\n    // this also logs a warning for configuring gRPC clusters for a REST API config.\n    EXPECT_THROW_WITH_REGEX(Utility::factoryForGrpcApiConfigSource(async_client_manager,\n                                                                   api_config_source, scope, false),\n                            EnvoyException,\n                            fmt::format(\"{}, if not a gRPC type, must not have a gRPC service \"\n                                        \"specified:\",\n                                        api_config_source.GetTypeName()));\n  }\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n    api_config_source.add_cluster_names(\"foo\");\n    EXPECT_THROW_WITH_REGEX(Utility::factoryForGrpcApiConfigSource(async_client_manager,\n                                                                   api_config_source, scope, false),\n                            EnvoyException,\n                            fmt::format(\"{} type must be gRPC:\", api_config_source.GetTypeName()));\n  }\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    api_config_source.add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\"foo\");\n    envoy::config::core::v3::GrpcService expected_grpc_service;\n    expected_grpc_service.mutable_envoy_grpc()->set_cluster_name(\"foo\");\n    EXPECT_CALL(async_client_manager,\n                factoryForGrpcService(ProtoEq(expected_grpc_service), Ref(scope), false));\n    Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope, false);\n  }\n\n  {\n    envoy::config::core::v3::ApiConfigSource api_config_source;\n    api_config_source.set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    api_config_source.add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\"foo\");\n    EXPECT_CALL(\n        async_client_manager,\n        factoryForGrpcService(ProtoEq(api_config_source.grpc_services(0)), Ref(scope), true));\n    Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope, true);\n  }\n}\n\nTEST(UtilityTest, PrepareDnsRefreshStrategy) {\n  NiceMock<Random::MockRandomGenerator> random;\n\n  {\n    // dns_failure_refresh_rate not set.\n    envoy::config::cluster::v3::Cluster cluster;\n    BackOffStrategyPtr strategy =\n        Utility::prepareDnsRefreshStrategy<envoy::config::cluster::v3::Cluster>(cluster, 5000,\n                                                                                random);\n    EXPECT_NE(nullptr, dynamic_cast<FixedBackOffStrategy*>(strategy.get()));\n  }\n\n  {\n    // dns_failure_refresh_rate set.\n    envoy::config::cluster::v3::Cluster cluster;\n    cluster.mutable_dns_failure_refresh_rate()->mutable_base_interval()->set_seconds(7);\n    cluster.mutable_dns_failure_refresh_rate()->mutable_max_interval()->set_seconds(10);\n    BackOffStrategyPtr strategy =\n        Utility::prepareDnsRefreshStrategy<envoy::config::cluster::v3::Cluster>(cluster, 5000,\n                                                                                random);\n    EXPECT_NE(nullptr, dynamic_cast<JitteredExponentialBackOffStrategy*>(strategy.get()));\n  }\n\n  {\n    // dns_failure_refresh_rate set with invalid max_interval.\n    envoy::config::cluster::v3::Cluster cluster;\n    cluster.mutable_dns_failure_refresh_rate()->mutable_base_interval()->set_seconds(7);\n    cluster.mutable_dns_failure_refresh_rate()->mutable_max_interval()->set_seconds(2);\n    EXPECT_THROW_WITH_REGEX(Utility::prepareDnsRefreshStrategy<envoy::config::cluster::v3::Cluster>(\n                                cluster, 5000, random),\n                            EnvoyException,\n                            \"dns_failure_refresh_rate must have max_interval greater than \"\n                            \"or equal to the base_interval\");\n  }\n}\n\n// Validate that an opaque config of the wrong type throws during conversion.\nTEST(UtilityTest, AnyWrongType) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(42);\n  ProtobufWkt::Any typed_config;\n  typed_config.PackFrom(source_duration);\n  ProtobufWkt::Timestamp out;\n  EXPECT_THROW_WITH_REGEX(\n      Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                     ProtobufMessage::getStrictValidationVisitor(), out),\n      EnvoyException,\n      R\"(Unable to unpack as google.protobuf.Timestamp: \\[type.googleapis.com/google.protobuf.Duration\\] .*)\");\n}\n\nTEST(UtilityTest, TranslateAnyWrongToFactoryConfig) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(42);\n  ProtobufWkt::Any typed_config;\n  typed_config.PackFrom(source_duration);\n\n  MockTypedFactory factory;\n  EXPECT_CALL(factory, createEmptyConfigProto()).WillOnce(Invoke([]() -> ProtobufTypes::MessagePtr {\n    return ProtobufTypes::MessagePtr{new ProtobufWkt::Timestamp()};\n  }));\n\n  EXPECT_THROW_WITH_REGEX(\n      Utility::translateAnyToFactoryConfig(typed_config,\n                                           ProtobufMessage::getStrictValidationVisitor(), factory),\n      EnvoyException,\n      R\"(Unable to unpack as google.protobuf.Timestamp: \\[type.googleapis.com/google.protobuf.Duration\\] .*)\");\n}\n\nTEST(UtilityTest, TranslateAnyToFactoryConfig) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(42);\n  ProtobufWkt::Any typed_config;\n  typed_config.PackFrom(source_duration);\n\n  MockTypedFactory factory;\n  EXPECT_CALL(factory, createEmptyConfigProto()).WillOnce(Invoke([]() -> ProtobufTypes::MessagePtr {\n    return ProtobufTypes::MessagePtr{new ProtobufWkt::Duration()};\n  }));\n\n  auto config = Utility::translateAnyToFactoryConfig(\n      typed_config, ProtobufMessage::getStrictValidationVisitor(), factory);\n\n  EXPECT_THAT(*config, ProtoEq(source_duration));\n}\n\nvoid packTypedStructIntoAny(ProtobufWkt::Any& typed_config, const Protobuf::Message& inner) {\n  udpa::type::v1::TypedStruct typed_struct;\n  (*typed_struct.mutable_type_url()) =\n      absl::StrCat(\"type.googleapis.com/\", inner.GetDescriptor()->full_name());\n  MessageUtil::jsonConvert(inner, *typed_struct.mutable_value());\n  typed_config.PackFrom(typed_struct);\n}\n\n// Verify that udpa.type.v1.TypedStruct can be translated into google.protobuf.Struct\nTEST(UtilityTest, TypedStructToStruct) {\n  ProtobufWkt::Any typed_config;\n  ProtobufWkt::Struct untyped_struct;\n  (*untyped_struct.mutable_fields())[\"foo\"].set_string_value(\"bar\");\n  packTypedStructIntoAny(typed_config, untyped_struct);\n\n  ProtobufWkt::Struct out;\n  Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                 ProtobufMessage::getStrictValidationVisitor(), out);\n\n  EXPECT_THAT(out, ProtoEq(untyped_struct));\n}\n\n// Verify that regular Struct can be translated into an arbitrary message of correct type\n// (v2 API, no upgrading).\nTEST(UtilityTest, StructToClusterV2) {\n  ProtobufWkt::Any typed_config;\n  API_NO_BOOST(envoy::api::v2::Cluster) cluster;\n  ProtobufWkt::Struct cluster_struct;\n  const std::string cluster_config_yaml = R\"EOF(\n    drain_connections_on_host_removal: true\n  )EOF\";\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster);\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster_struct);\n\n  {\n    API_NO_BOOST(envoy::api::v2::Cluster) out;\n    Utility::translateOpaqueConfig({}, cluster_struct, ProtobufMessage::getNullValidationVisitor(),\n                                   out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n  {\n    API_NO_BOOST(envoy::api::v2::Cluster) out;\n    Utility::translateOpaqueConfig({}, cluster_struct,\n                                   ProtobufMessage::getStrictValidationVisitor(), out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n}\n\n// Verify that regular Struct can be translated into an arbitrary message of correct type\n// (v3 API, upgrading).\nTEST(UtilityTest, StructToClusterV3) {\n  ProtobufWkt::Any typed_config;\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster;\n  ProtobufWkt::Struct cluster_struct;\n  const std::string cluster_config_yaml = R\"EOF(\n    ignore_health_on_host_removal: true\n  )EOF\";\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster);\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster_struct);\n\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) out;\n    Utility::translateOpaqueConfig({}, cluster_struct, ProtobufMessage::getNullValidationVisitor(),\n                                   out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) out;\n    Utility::translateOpaqueConfig({}, cluster_struct,\n                                   ProtobufMessage::getStrictValidationVisitor(), out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n}\n\n// Verify that udpa.type.v1.TypedStruct can be translated into an arbitrary message of correct type\n// (v2 API, no upgrading).\nTEST(UtilityTest, TypedStructToClusterV2) {\n  ProtobufWkt::Any typed_config;\n  API_NO_BOOST(envoy::api::v2::Cluster) cluster;\n  const std::string cluster_config_yaml = R\"EOF(\n    drain_connections_on_host_removal: true\n  )EOF\";\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster);\n  packTypedStructIntoAny(typed_config, cluster);\n\n  {\n    API_NO_BOOST(envoy::api::v2::Cluster) out;\n    Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                   ProtobufMessage::getNullValidationVisitor(), out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n  {\n    API_NO_BOOST(envoy::api::v2::Cluster) out;\n    Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                   ProtobufMessage::getStrictValidationVisitor(), out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n}\n\n// Verify that udpa.type.v1.TypedStruct can be translated into an arbitrary message of correct type\n// (v3 API, upgrading).\nTEST(UtilityTest, TypedStructToClusterV3) {\n  ProtobufWkt::Any typed_config;\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster;\n  const std::string cluster_config_yaml = R\"EOF(\n    ignore_health_on_host_removal: true\n  )EOF\";\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster);\n  packTypedStructIntoAny(typed_config, cluster);\n\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) out;\n    Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                   ProtobufMessage::getNullValidationVisitor(), out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) out;\n    Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                   ProtobufMessage::getStrictValidationVisitor(), out);\n    EXPECT_THAT(out, ProtoEq(cluster));\n  }\n}\n\n// Verify that Any can be translated into an arbitrary message of correct type\n// (v2 API, no upgrading).\nTEST(UtilityTest, AnyToClusterV2) {\n  ProtobufWkt::Any typed_config;\n  API_NO_BOOST(envoy::api::v2::Cluster) cluster;\n  const std::string cluster_config_yaml = R\"EOF(\n    drain_connections_on_host_removal: true\n  )EOF\";\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster);\n  typed_config.PackFrom(cluster);\n\n  API_NO_BOOST(envoy::api::v2::Cluster) out;\n  Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                 ProtobufMessage::getStrictValidationVisitor(), out);\n  EXPECT_THAT(out, ProtoEq(cluster));\n}\n\n// Verify that Any can be translated into an arbitrary message of correct type\n// (v3 API, upgrading).\nTEST(UtilityTest, AnyToClusterV3) {\n  ProtobufWkt::Any typed_config;\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster;\n  const std::string cluster_config_yaml = R\"EOF(\n    ignore_health_on_host_removal: true\n  )EOF\";\n  TestUtility::loadFromYaml(cluster_config_yaml, cluster);\n  typed_config.PackFrom(cluster);\n\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) out;\n  Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                 ProtobufMessage::getStrictValidationVisitor(), out);\n  EXPECT_THAT(out, ProtoEq(cluster));\n}\n\n// Verify that translation from udpa.type.v1.TypedStruct into message of incorrect type fails\nTEST(UtilityTest, TypedStructToInvalidType) {\n  ProtobufWkt::Any typed_config;\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  const std::string bootstrap_config_yaml = R\"EOF(\n    admin:\n      access_log_path: /dev/null\n      address:\n        pipe:\n          path: \"/\"\n  )EOF\";\n  TestUtility::loadFromYaml(bootstrap_config_yaml, bootstrap);\n  packTypedStructIntoAny(typed_config, bootstrap);\n\n  ProtobufWkt::Any out;\n  EXPECT_THROW_WITH_REGEX(\n      Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                     ProtobufMessage::getStrictValidationVisitor(), out),\n      EnvoyException, \"Unable to parse JSON as proto\");\n}\n\n// Verify that ProtobufWkt::Empty can load into a typed factory with an empty config proto\nTEST(UtilityTest, EmptyToEmptyConfig) {\n  ProtobufWkt::Any typed_config;\n  ProtobufWkt::Empty empty_config;\n  typed_config.PackFrom(empty_config);\n\n  envoy::extensions::filters::http::cors::v3::Cors out;\n  Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),\n                                 ProtobufMessage::getStrictValidationVisitor(), out);\n  EXPECT_THAT(out, ProtoEq(envoy::extensions::filters::http::cors::v3::Cors()));\n}\n\nTEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTypes) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n\n  // API of type GRPC\n  api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n\n  // GRPC cluster without GRPC services.\n  EXPECT_THROW_WITH_REGEX(\n      Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source),\n      EnvoyException, \"API configs must have either a gRPC service or a cluster name defined:\");\n\n  // Non-existent cluster.\n  api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\"foo_cluster\");\n  EXPECT_THROW_WITH_MESSAGE(\n      Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source),\n      EnvoyException,\n      fmt::format(\"{} must have a statically defined non-EDS cluster: \"\n                  \"'foo_cluster' does not exist, was added via api, or is an EDS cluster\",\n                  api_config_source->GetTypeName()));\n\n  // All ok.\n  primary_clusters.insert(\"foo_cluster\");\n  Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source);\n\n  // API with cluster_names set should be rejected.\n  api_config_source->add_cluster_names(\"foo_cluster\");\n  EXPECT_THROW_WITH_REGEX(\n      Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source),\n      EnvoyException,\n      fmt::format(\"{}::.DELTA_.GRPC must not have a cluster name \"\n                  \"specified:\",\n                  api_config_source->GetTypeName()));\n}\n\nTEST(CheckApiConfigSourceSubscriptionBackingClusterTest, RestClusterTestAcrossTypes) {\n  envoy::config::core::v3::ConfigSource config;\n  auto* api_config_source = config.mutable_api_config_source();\n  Upstream::ClusterManager::ClusterSet primary_clusters;\n  api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST);\n\n  // Non-existent cluster.\n  api_config_source->add_cluster_names(\"foo_cluster\");\n  EXPECT_THROW_WITH_MESSAGE(\n      Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source),\n      EnvoyException,\n      fmt::format(\"{} must have a statically defined non-EDS cluster: \"\n                  \"'foo_cluster' does not exist, was added via api, or is an EDS cluster\",\n                  api_config_source->GetTypeName()));\n\n  // All ok.\n  primary_clusters.insert(\"foo_cluster\");\n  Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source);\n}\n\n// Validates CheckCluster functionality.\nTEST(UtilityTest, CheckCluster) {\n  Upstream::MockClusterManager cm;\n\n  // Validate that proper error is thrown, when cluster is not available.\n  EXPECT_CALL(cm, get(\"foo\")).WillOnce(Return(nullptr));\n  EXPECT_THROW_WITH_MESSAGE(Utility::checkCluster(\"prefix\", \"foo\", cm, false), EnvoyException,\n                            \"prefix: unknown cluster 'foo'\");\n\n  // Validate that proper error is thrown, when dynamic cluster is passed when it is not expected.\n  NiceMock<Upstream::MockThreadLocalCluster> api_cluster;\n  EXPECT_CALL(cm, get(\"foo\")).Times(2).WillRepeatedly(Return(&api_cluster));\n  EXPECT_CALL(api_cluster, info());\n  EXPECT_CALL(*api_cluster.cluster_.info_, addedViaApi()).WillOnce(Return(true));\n  EXPECT_THROW_WITH_MESSAGE(Utility::checkCluster(\"prefix\", \"foo\", cm, false), EnvoyException,\n                            \"prefix: invalid cluster 'foo': currently only \"\n                            \"static (non-CDS) clusters are supported\");\n  EXPECT_NO_THROW(Utility::checkCluster(\"prefix\", \"foo\", cm, true));\n\n  // Validate that bootstrap cluster does not throw any exceptions.\n  NiceMock<Upstream::MockThreadLocalCluster> bootstrap_cluster;\n  EXPECT_CALL(cm, get(\"foo\")).Times(2).WillRepeatedly(Return(&bootstrap_cluster));\n  EXPECT_NO_THROW(Utility::checkCluster(\"prefix\", \"foo\", cm, true));\n  EXPECT_NO_THROW(Utility::checkCluster(\"prefix\", \"foo\", cm, false));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/version_converter.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.config;\n\nimport \"google/protobuf/any.proto\";\n\nenum PreviousEnum {\n  PREV_DEFAULT = 0;\n  PREV_DEPRECATED_VALUE = 1;\n  PREV_OTHER_VALUE = 2;\n}\n\nenum NextEnum {\n  NEXT_DEFAULT = 0;\n  reserved 1;\n  NEXT_OTHER_VALUE = 2;\n}\n\nmessage PreviousVersion {\n  // Singleton scalars.\n  string string_field = 1;\n  bytes bytes_field = 2;\n  int32 int32_field = 3;\n  int64 int64_field = 4;\n  uint32 uint32_field = 5;\n  uint64 uint64_field = 6;\n  double double_field = 7;\n  float float_field = 8;\n  bool bool_field = 9;\n  PreviousEnum enum_field = 10;\n\n  // Singleton nested message.\n  message PreviousVersionNested {\n    google.protobuf.Any any_field = 1;\n  }\n  PreviousVersionNested nested_field = 11;\n\n  // Repeated entities.\n  repeated string repeated_scalar_field = 12;\n  repeated PreviousVersionNested repeated_nested_field = 13;\n\n  // Deprecations.\n  uint32 deprecated_field = 14 [deprecated = true];\n  PreviousEnum enum_field_with_deprecated_value = 15;\n}\n\nmessage NextVersion {\n  // Singleton scalars.\n  string string_field = 1;\n  bytes bytes_field = 2;\n  int32 int32_field = 3;\n  int64 int64_field = 4;\n  uint32 uint32_field = 5;\n  uint64 uint64_field = 6;\n  double double_field = 7;\n  float float_field = 8;\n  bool bool_field = 9;\n  PreviousEnum enum_field = 10;\n\n  // Singleton nested message.\n  message NextVersionNested {\n    google.protobuf.Any any_field = 1;\n  }\n  NextVersionNested nested_field = 11;\n\n  // Repeated entities.\n  repeated string repeated_scalar_field = 12;\n  repeated NextVersionNested repeated_nested_field = 13;\n\n  // Deprecations.\n  reserved 14;\n  NextEnum enum_field_with_deprecated_value = 15;\n  message NewMessageInThisVerion {\n  }\n\n  // New message present in this version but not PreviousVersion.\n  NewMessageInThisVerion new_message_in_this_version = 16;\n}\n"
  },
  {
    "path": "test/common/config/version_converter_test.cc",
    "content": "#include \"envoy/api/v2/cluster.pb.h\"\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/well_known.h\"\n\n#include \"test/common/config/version_converter.pb.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nbool hasOriginalTypeInformation(const Protobuf::Message& message) {\n  const Protobuf::Reflection* reflection = message.GetReflection();\n  const auto& unknown_field_set = reflection->GetUnknownFields(message);\n  for (int i = 0; i < unknown_field_set.field_count(); ++i) {\n    const auto& unknown_field = unknown_field_set.field(i);\n    if (unknown_field.number() == ProtobufWellKnown::OriginalTypeFieldNumber) {\n      return true;\n    }\n  }\n  return false;\n}\n\n// Wire-style upgrading between versions.\nTEST(VersionConverterTest, Upgrade) {\n  // Create a v2 Cluster message with some fields set.\n  API_NO_BOOST(envoy::api::v2::Cluster) source;\n  source.add_hosts();\n  source.mutable_load_assignment()->set_cluster_name(\"bar\");\n  source.mutable_eds_cluster_config()->set_service_name(\"foo\");\n  source.set_drain_connections_on_host_removal(true);\n  // Upgrade to a v3 Cluster.\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n  VersionConverter::upgrade(source, dst);\n  // Verify fields in v3 Cluster.\n  EXPECT_TRUE(hasOriginalTypeInformation(dst));\n  EXPECT_FALSE(dst.hidden_envoy_deprecated_hosts().empty());\n  EXPECT_FALSE(hasOriginalTypeInformation(dst.hidden_envoy_deprecated_hosts(0)));\n  EXPECT_EQ(\"bar\", dst.load_assignment().cluster_name());\n  EXPECT_FALSE(hasOriginalTypeInformation(dst.load_assignment()));\n  EXPECT_EQ(\"foo\", dst.eds_cluster_config().service_name());\n  EXPECT_TRUE(hasOriginalTypeInformation(dst.eds_cluster_config()));\n  EXPECT_TRUE(dst.ignore_health_on_host_removal());\n  // Recover a v2 Cluster from the v3 Cluster using original type information.\n  auto original_dynamic_msg = VersionConverter::recoverOriginal(dst);\n  const auto& original_msg = *original_dynamic_msg->msg_;\n  EXPECT_EQ(\"envoy.api.v2.Cluster\", original_msg.GetDescriptor()->full_name());\n  // Ensure that we erased any original type information and have the original\n  // message.\n  EXPECT_THAT(original_msg, ProtoEq(source));\n  // Verify that sub-messages work with VersionConverter::recoverOriginal, i.e.\n  // we are propagating original type information.\n  auto original_dynamic_sub_msg = VersionConverter::recoverOriginal(dst.eds_cluster_config());\n  const auto& original_sub_msg = *original_dynamic_sub_msg->msg_;\n  EXPECT_THAT(original_sub_msg, ProtoEq(source.eds_cluster_config()));\n}\n\n// Empty upgrade between version_converter.proto entities. TODO(htuch): consider migrating all the\n// upgrades in this test to version_converter.proto to reduce dependence on APIs that will be\n// removed at EOY.\nTEST(VersionConverterProto, UpgradeNextVersion) {\n  test::common::config::PreviousVersion source;\n  test::common::config::NextVersion dst;\n  VersionConverter::upgrade(source, dst);\n}\n\n// Bad UTF-8 can fail wire cast during upgrade.\nTEST(VersionConverterTest, UpgradeException) {\n  API_NO_BOOST(envoy::api::v2::Cluster) source;\n  source.mutable_eds_cluster_config()->set_service_name(\"UPST128\\tAM_HO\\001\\202\\247ST\");\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n  EXPECT_THROW_WITH_MESSAGE(VersionConverter::upgrade(source, dst), EnvoyException,\n                            \"Unable to deserialize during wireCast()\");\n}\n\n// Verify that VersionUtil::scrubHiddenEnvoyDeprecated recursively scrubs any\n// deprecated fields.\nTEST(VersionConverterTest, ScrubHiddenEnvoyDeprecated) {\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) msg;\n  msg.set_name(\"foo\");\n  msg.mutable_hidden_envoy_deprecated_tls_context();\n  EXPECT_TRUE(msg.has_hidden_envoy_deprecated_tls_context());\n  msg.mutable_load_balancing_policy()->add_policies()->mutable_hidden_envoy_deprecated_config();\n  EXPECT_TRUE(msg.load_balancing_policy().policies(0).has_hidden_envoy_deprecated_config());\n  VersionUtil::scrubHiddenEnvoyDeprecated(msg);\n  EXPECT_EQ(\"foo\", msg.name());\n  EXPECT_FALSE(msg.has_hidden_envoy_deprecated_tls_context());\n  EXPECT_FALSE(msg.load_balancing_policy().policies(0).has_hidden_envoy_deprecated_config());\n}\n\n// Validate that we can sensibly provide a JSON wire interpretation of messages\n// such as DiscoveryRequest based on transport API version.\nTEST(VersionConverter, GetJsonStringFromMessage) {\n  API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request;\n  discovery_request.mutable_node()->set_hidden_envoy_deprecated_build_version(\"foo\");\n  discovery_request.mutable_node()->set_user_agent_name(\"bar\");\n  const std::string v2_discovery_request = VersionConverter::getJsonStringFromMessage(\n      discovery_request, envoy::config::core::v3::ApiVersion::V2);\n  EXPECT_EQ(\"{\\\"node\\\":{\\\"build_version\\\":\\\"foo\\\",\\\"user_agent_name\\\":\\\"bar\\\"}}\",\n            v2_discovery_request);\n  const std::string auto_discovery_request = VersionConverter::getJsonStringFromMessage(\n      discovery_request, envoy::config::core::v3::ApiVersion::AUTO);\n  EXPECT_EQ(\"{\\\"node\\\":{\\\"build_version\\\":\\\"foo\\\",\\\"user_agent_name\\\":\\\"bar\\\"}}\",\n            auto_discovery_request);\n  const std::string v3_discovery_request = VersionConverter::getJsonStringFromMessage(\n      discovery_request, envoy::config::core::v3::ApiVersion::V3);\n  EXPECT_EQ(\"{\\\"node\\\":{\\\"user_agent_name\\\":\\\"bar\\\"}}\", v3_discovery_request);\n}\n\nbool hasUnknownFields(const Protobuf::Message& message) {\n  const Protobuf::Reflection* reflection = message.GetReflection();\n  const auto& unknown_field_set = reflection->GetUnknownFields(message);\n  return !unknown_field_set.empty();\n}\n\n// Validate that we can sensibly provide a gRPC wire interpretation of messages\n// such as DiscoveryRequest based on transport API version.\nTEST(VersionConverter, PrepareMessageForGrpcWire) {\n  API_NO_BOOST(envoy::api::v2::core::Node) v2_node;\n  v2_node.set_build_version(\"foo\");\n  v2_node.set_user_agent_name(\"bar\");\n  API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request;\n  discovery_request.mutable_node()->set_hidden_envoy_deprecated_build_version(\"foo\");\n  VersionConverter::upgrade(v2_node, *discovery_request.mutable_node());\n  {\n    API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request_copy;\n    discovery_request_copy.MergeFrom(discovery_request);\n    VersionConverter::prepareMessageForGrpcWire(discovery_request_copy,\n                                                envoy::config::core::v3::ApiVersion::V2);\n    API_NO_BOOST(envoy::api::v2::DiscoveryRequest) v2_discovery_request;\n    EXPECT_TRUE(v2_discovery_request.ParseFromString(discovery_request_copy.SerializeAsString()));\n    EXPECT_EQ(\"foo\", v2_discovery_request.node().build_version());\n    EXPECT_FALSE(hasUnknownFields(v2_discovery_request.node()));\n  }\n  {\n    API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request_copy;\n    discovery_request_copy.MergeFrom(discovery_request);\n    VersionConverter::prepareMessageForGrpcWire(discovery_request_copy,\n                                                envoy::config::core::v3::ApiVersion::AUTO);\n    API_NO_BOOST(envoy::api::v2::DiscoveryRequest) auto_discovery_request;\n    EXPECT_TRUE(auto_discovery_request.ParseFromString(discovery_request_copy.SerializeAsString()));\n    EXPECT_EQ(\"foo\", auto_discovery_request.node().build_version());\n    EXPECT_FALSE(hasUnknownFields(auto_discovery_request.node()));\n  }\n  {\n    API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request_copy;\n    discovery_request_copy.MergeFrom(discovery_request);\n    VersionConverter::prepareMessageForGrpcWire(discovery_request_copy,\n                                                envoy::config::core::v3::ApiVersion::V3);\n    API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) v3_discovery_request;\n    EXPECT_TRUE(v3_discovery_request.ParseFromString(discovery_request_copy.SerializeAsString()));\n    EXPECT_EQ(\"\", v3_discovery_request.node().hidden_envoy_deprecated_build_version());\n    EXPECT_FALSE(hasUnknownFields(v3_discovery_request.node()));\n  }\n}\n\n// Downgrading to an earlier version (where it exists).\nTEST(VersionConverterTest, DowngradeEarlier) {\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) source;\n  source.set_ignore_health_on_host_removal(true);\n  auto downgraded = VersionConverter::downgrade(source);\n  const Protobuf::Descriptor* desc = downgraded->msg_->GetDescriptor();\n  const Protobuf::Reflection* reflection = downgraded->msg_->GetReflection();\n  EXPECT_EQ(\"envoy.api.v2.Cluster\", desc->full_name());\n  EXPECT_EQ(true, reflection->GetBool(*downgraded->msg_,\n                                      desc->FindFieldByName(\"drain_connections_on_host_removal\")));\n}\n\n// Downgrading is idempotent if no earlier version.\nTEST(VersionConverterTest, DowngradeSame) {\n  API_NO_BOOST(envoy::api::v2::Cluster) source;\n  source.set_drain_connections_on_host_removal(true);\n  auto downgraded = VersionConverter::downgrade(source);\n  const Protobuf::Descriptor* desc = downgraded->msg_->GetDescriptor();\n  const Protobuf::Reflection* reflection = downgraded->msg_->GetReflection();\n  EXPECT_EQ(\"envoy.api.v2.Cluster\", desc->full_name());\n  EXPECT_EQ(true, reflection->GetBool(*downgraded->msg_,\n                                      desc->FindFieldByName(\"drain_connections_on_host_removal\")));\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/config/watch_map_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/watch_map.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\nusing ::testing::AtMost;\nusing ::testing::Invoke;\nusing ::testing::InvokeWithoutArgs;\nusing ::testing::NiceMock;\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\n\nvoid expectDeltaUpdate(\n    MockSubscriptionCallbacks& callbacks,\n    const std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment>& expected_resources,\n    const std::vector<std::string>& expected_removals, const std::string& version) {\n  EXPECT_CALL(callbacks, onConfigUpdate(_, _, _))\n      .WillOnce(Invoke([expected_resources, expected_removals,\n                        version](const std::vector<DecodedResourceRef>& gotten_resources,\n                                 const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n                                 const std::string&) {\n        EXPECT_EQ(expected_resources.size(), gotten_resources.size());\n        for (size_t i = 0; i < expected_resources.size(); i++) {\n          EXPECT_EQ(gotten_resources[i].get().version(), version);\n          EXPECT_TRUE(\n              TestUtility::protoEqual(gotten_resources[i].get().resource(), expected_resources[i]));\n        }\n        EXPECT_EQ(expected_removals.size(), removed_resources.size());\n        for (size_t i = 0; i < expected_removals.size(); i++) {\n          EXPECT_EQ(expected_removals[i], removed_resources[i]);\n        }\n      }));\n}\n\n// expectDeltaAndSotwUpdate() EXPECTs two birds with one function call: we want to cover both SotW\n// and delta, which, while mechanically different, can behave identically for our testing purposes.\n// Specifically, as a simplification for these tests, every still-present resource is updated in\n// every update. Therefore, a resource can never show up in the SotW update but not the delta\n// update. We can therefore use the same expected_resources for both.\nvoid expectDeltaAndSotwUpdate(\n    MockSubscriptionCallbacks& callbacks,\n    const std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment>& expected_resources,\n    const std::vector<std::string>& expected_removals, const std::string& version) {\n  EXPECT_CALL(callbacks, onConfigUpdate(_, version))\n      .WillOnce(Invoke([expected_resources](const std::vector<DecodedResourceRef>& gotten_resources,\n                                            const std::string&) {\n        EXPECT_EQ(expected_resources.size(), gotten_resources.size());\n        for (size_t i = 0; i < expected_resources.size(); i++) {\n          EXPECT_TRUE(\n              TestUtility::protoEqual(gotten_resources[i].get().resource(), expected_resources[i]));\n        }\n      }));\n  expectDeltaUpdate(callbacks, expected_resources, expected_removals, version);\n}\n\nvoid expectNoUpdate(MockSubscriptionCallbacks& callbacks, const std::string& version) {\n  EXPECT_CALL(callbacks, onConfigUpdate(_, version)).Times(0);\n  EXPECT_CALL(callbacks, onConfigUpdate(_, _, version)).Times(0);\n}\n\nvoid expectEmptySotwNoDeltaUpdate(MockSubscriptionCallbacks& callbacks,\n                                  const std::string& version) {\n  EXPECT_CALL(callbacks, onConfigUpdate(_, version))\n      .WillOnce(Invoke([](const std::vector<DecodedResourceRef>& gotten_resources,\n                          const std::string&) { EXPECT_EQ(gotten_resources.size(), 0); }));\n  EXPECT_CALL(callbacks, onConfigUpdate(_, _, version)).Times(0);\n}\n\nProtobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>\nwrapInResource(const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& anys,\n               const std::string& version) {\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> ret;\n  for (const auto& a : anys) {\n    envoy::config::endpoint::v3::ClusterLoadAssignment cur_endpoint;\n    a.UnpackTo(&cur_endpoint);\n    auto* cur_resource = ret.Add();\n    cur_resource->set_name(cur_endpoint.cluster_name());\n    cur_resource->mutable_resource()->CopyFrom(a);\n    cur_resource->set_version(version);\n  }\n  return ret;\n}\n\nvoid doDeltaUpdate(WatchMap& watch_map,\n                   const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& sotw_resources,\n                   const std::vector<std::string>& removed_names, const std::string& version) {\n\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> delta_resources =\n      wrapInResource(sotw_resources, version);\n  Protobuf::RepeatedPtrField<std::string> removed_names_proto;\n  for (const auto& n : removed_names) {\n    *removed_names_proto.Add() = n;\n  }\n  watch_map.onConfigUpdate(delta_resources, removed_names_proto, version);\n}\n\n// Similar to expectDeltaAndSotwUpdate(), but making the onConfigUpdate() happen, rather than\n// EXPECT-ing it.\nvoid doDeltaAndSotwUpdate(WatchMap& watch_map,\n                          const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& sotw_resources,\n                          const std::vector<std::string>& removed_names,\n                          const std::string& version) {\n  watch_map.onConfigUpdate(sotw_resources, version);\n  doDeltaUpdate(watch_map, sotw_resources, removed_names, version);\n}\n\n// Tests the simple case of a single watch. Checks that the watch will not be told of updates to\n// resources it doesn't care about. Checks that the watch can later decide it does care about them,\n// and then receive subsequent updates to them.\nTEST(WatchMapTest, Basic) {\n  MockSubscriptionCallbacks callbacks;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  WatchMap watch_map(false);\n  Watch* watch = watch_map.addWatch(callbacks, resource_decoder);\n\n  {\n    // nothing is interested, so become wildcard watch\n    // should callback with empty resource\n    expectDeltaAndSotwUpdate(callbacks, {}, {}, \"version1\");\n    doDeltaAndSotwUpdate(watch_map, {}, {}, \"version1\");\n  }\n  {\n    // The watch is interested in Alice and Bob...\n    std::set<std::string> update_to({\"alice\", \"bob\"});\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch, update_to);\n    EXPECT_EQ(update_to, added_removed.added_);\n    EXPECT_TRUE(added_removed.removed_.empty());\n\n    // ...the update is going to contain Bob and Carol...\n    Protobuf::RepeatedPtrField<ProtobufWkt::Any> updated_resources;\n    envoy::config::endpoint::v3::ClusterLoadAssignment bob;\n    bob.set_cluster_name(\"bob\");\n    updated_resources.Add()->PackFrom(bob);\n    envoy::config::endpoint::v3::ClusterLoadAssignment carol;\n    carol.set_cluster_name(\"carol\");\n    updated_resources.Add()->PackFrom(carol);\n\n    // ...so the watch should receive only Bob.\n    std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment> expected_resources;\n    expected_resources.push_back(bob);\n\n    expectDeltaAndSotwUpdate(callbacks, expected_resources, {}, \"version1\");\n    doDeltaAndSotwUpdate(watch_map, updated_resources, {}, \"version1\");\n  }\n  {\n    // The watch is now interested in Bob, Carol, Dave, Eve...\n    std::set<std::string> update_to({\"bob\", \"carol\", \"dave\", \"eve\"});\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch, update_to);\n    EXPECT_EQ(std::set<std::string>({\"carol\", \"dave\", \"eve\"}), added_removed.added_);\n    EXPECT_EQ(std::set<std::string>({\"alice\"}), added_removed.removed_);\n\n    // ...the update is going to contain Alice, Carol, Dave...\n    Protobuf::RepeatedPtrField<ProtobufWkt::Any> updated_resources;\n    envoy::config::endpoint::v3::ClusterLoadAssignment alice;\n    alice.set_cluster_name(\"alice\");\n    updated_resources.Add()->PackFrom(alice);\n    envoy::config::endpoint::v3::ClusterLoadAssignment carol;\n    carol.set_cluster_name(\"carol\");\n    updated_resources.Add()->PackFrom(carol);\n    envoy::config::endpoint::v3::ClusterLoadAssignment dave;\n    dave.set_cluster_name(\"dave\");\n    updated_resources.Add()->PackFrom(dave);\n\n    // ...so the watch should receive only Carol and Dave.\n    std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment> expected_resources;\n    expected_resources.push_back(carol);\n    expected_resources.push_back(dave);\n\n    expectDeltaAndSotwUpdate(callbacks, expected_resources, {\"bob\"}, \"version2\");\n    doDeltaAndSotwUpdate(watch_map, updated_resources, {\"bob\"}, \"version2\");\n  }\n}\n\n// Checks the following:\n// First watch on a resource name ==> updateWatchInterest() returns \"add it to subscription\"\n// Second watch on that name ==> updateWatchInterest() returns nothing about that name\n// Original watch loses interest ==> nothing\n// Second watch also loses interest ==> \"remove it from subscription\"\n// NOTE: we need the resource name \"dummy\" to keep either watch from ever having no names watched,\n// which is treated as interest in all names.\nTEST(WatchMapTest, Overlap) {\n  MockSubscriptionCallbacks callbacks1;\n  MockSubscriptionCallbacks callbacks2;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  WatchMap watch_map(false);\n  Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder);\n  Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder);\n\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> updated_resources;\n  envoy::config::endpoint::v3::ClusterLoadAssignment alice;\n  alice.set_cluster_name(\"alice\");\n  updated_resources.Add()->PackFrom(alice);\n\n  // First watch becomes interested.\n  {\n    std::set<std::string> update_to({\"alice\", \"dummy\"});\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch1, update_to);\n    EXPECT_EQ(update_to, added_removed.added_); // add to subscription\n    EXPECT_TRUE(added_removed.removed_.empty());\n    watch_map.updateWatchInterest(watch2, {\"dummy\"});\n\n    // *Only* first watch receives update.\n    expectDeltaAndSotwUpdate(callbacks1, {alice}, {}, \"version1\");\n    expectNoUpdate(callbacks2, \"version1\");\n    doDeltaAndSotwUpdate(watch_map, updated_resources, {}, \"version1\");\n  }\n  // Second watch becomes interested.\n  {\n    std::set<std::string> update_to({\"alice\", \"dummy\"});\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch2, update_to);\n    EXPECT_TRUE(added_removed.added_.empty()); // nothing happens\n    EXPECT_TRUE(added_removed.removed_.empty());\n\n    // Both watches receive update.\n    expectDeltaAndSotwUpdate(callbacks1, {alice}, {}, \"version2\");\n    expectDeltaAndSotwUpdate(callbacks2, {alice}, {}, \"version2\");\n    doDeltaAndSotwUpdate(watch_map, updated_resources, {}, \"version2\");\n  }\n  // First watch loses interest.\n  {\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch1, {\"dummy\"});\n    EXPECT_TRUE(added_removed.added_.empty()); // nothing happens\n    EXPECT_TRUE(added_removed.removed_.empty());\n\n    // Both watches receive the update. For watch2, this is obviously desired.\n    expectDeltaAndSotwUpdate(callbacks2, {alice}, {}, \"version3\");\n    // For watch1, it's more subtle: the WatchMap sees that this update has no\n    // resources watch1 cares about, but also knows that watch1 previously had\n    // some resources. So, it must inform watch1 that it now has no resources.\n    // (SotW only: delta's explicit removals avoid the need for this guessing.)\n    expectEmptySotwNoDeltaUpdate(callbacks1, \"version3\");\n    doDeltaAndSotwUpdate(watch_map, updated_resources, {}, \"version3\");\n  }\n  // Second watch loses interest.\n  {\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch2, {\"dummy\"});\n    EXPECT_TRUE(added_removed.added_.empty());\n    EXPECT_EQ(std::set<std::string>({\"alice\"}), added_removed.removed_); // remove from subscription\n  }\n}\n\n// These are regression tests for #11877, validate that when two watches point at the same\n// watched resource, and an update to one of the watches removes one or both of them, that\n// WatchMap defers deletes and doesn't crash.\nclass SameWatchRemoval : public testing::Test {\npublic:\n  SameWatchRemoval() : watch_map_(false) {}\n\n  void SetUp() override {\n    envoy::config::endpoint::v3::ClusterLoadAssignment alice;\n    alice.set_cluster_name(\"alice\");\n    updated_resources_.Add()->PackFrom(alice);\n    watch1_ = watch_map_.addWatch(callbacks1_, resource_decoder_);\n    watch2_ = watch_map_.addWatch(callbacks2_, resource_decoder_);\n    watch_map_.updateWatchInterest(watch1_, {\"alice\"});\n    watch_map_.updateWatchInterest(watch2_, {\"alice\"});\n  }\n\n  void removeAllInterest() {\n    ASSERT_FALSE(watch_cb_invoked_);\n    watch_cb_invoked_ = true;\n    watch_map_.removeWatch(watch1_);\n    watch_map_.removeWatch(watch2_);\n  }\n\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder_{\"cluster_name\"};\n  WatchMap watch_map_;\n  NiceMock<MockSubscriptionCallbacks> callbacks1_;\n  MockSubscriptionCallbacks callbacks2_;\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> updated_resources_;\n  Watch* watch1_;\n  Watch* watch2_;\n  bool watch_cb_invoked_{};\n};\n\nTEST_F(SameWatchRemoval, SameWatchRemovalSotw) {\n  EXPECT_CALL(callbacks1_, onConfigUpdate(_, _))\n      .Times(AtMost(1))\n      .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); }));\n  EXPECT_CALL(callbacks2_, onConfigUpdate(_, _))\n      .Times(AtMost(1))\n      .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); }));\n  watch_map_.onConfigUpdate(updated_resources_, \"version1\");\n}\n\nTEST_F(SameWatchRemoval, SameWatchRemovalDeltaAdd) {\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> delta_resources =\n      wrapInResource(updated_resources_, \"version1\");\n  Protobuf::RepeatedPtrField<std::string> removed_names_proto;\n\n  EXPECT_CALL(callbacks1_, onConfigUpdate(_, _, _))\n      .Times(AtMost(1))\n      .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); }));\n  EXPECT_CALL(callbacks2_, onConfigUpdate(_, _, _))\n      .Times(AtMost(1))\n      .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); }));\n  watch_map_.onConfigUpdate(delta_resources, removed_names_proto, \"version1\");\n}\n\nTEST_F(SameWatchRemoval, SameWatchRemovalDeltaRemove) {\n  Protobuf::RepeatedPtrField<std::string> removed_names_proto;\n  *removed_names_proto.Add() = \"alice\";\n  EXPECT_CALL(callbacks1_, onConfigUpdate(_, _, _))\n      .Times(AtMost(1))\n      .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); }));\n  EXPECT_CALL(callbacks2_, onConfigUpdate(_, _, _))\n      .Times(AtMost(1))\n      .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); }));\n  watch_map_.onConfigUpdate({}, removed_names_proto, \"version1\");\n}\n\n// Checks the following:\n// First watch on a resource name ==> updateWatchInterest() returns \"add it to subscription\"\n// Watch loses interest ==> \"remove it from subscription\"\n// Second watch on that name ==> \"add it to subscription\"\n// NOTE: we need the resource name \"dummy\" to keep either watch from ever having no names watched,\n// which is treated as interest in all names.\nTEST(WatchMapTest, AddRemoveAdd) {\n  MockSubscriptionCallbacks callbacks1;\n  MockSubscriptionCallbacks callbacks2;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  WatchMap watch_map(false);\n  Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder);\n  Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder);\n\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> updated_resources;\n  envoy::config::endpoint::v3::ClusterLoadAssignment alice;\n  alice.set_cluster_name(\"alice\");\n  updated_resources.Add()->PackFrom(alice);\n\n  // First watch becomes interested.\n  {\n    std::set<std::string> update_to({\"alice\", \"dummy\"});\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch1, update_to);\n    EXPECT_EQ(update_to, added_removed.added_); // add to subscription\n    EXPECT_TRUE(added_removed.removed_.empty());\n    watch_map.updateWatchInterest(watch2, {\"dummy\"});\n\n    // *Only* first watch receives update.\n    expectDeltaAndSotwUpdate(callbacks1, {alice}, {}, \"version1\");\n    expectNoUpdate(callbacks2, \"version1\");\n    doDeltaAndSotwUpdate(watch_map, updated_resources, {}, \"version1\");\n  }\n  // First watch loses interest.\n  {\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch1, {\"dummy\"});\n    EXPECT_TRUE(added_removed.added_.empty());\n    EXPECT_EQ(std::set<std::string>({\"alice\"}),\n              added_removed.removed_); // remove from subscription\n\n    // (The xDS client should have responded to updateWatchInterest()'s return value by removing\n    // Alice from the subscription, so onConfigUpdate() calls should be impossible right now.)\n  }\n  // Second watch becomes interested.\n  {\n    std::set<std::string> update_to({\"alice\", \"dummy\"});\n    AddedRemoved added_removed = watch_map.updateWatchInterest(watch2, update_to);\n    EXPECT_EQ(std::set<std::string>({\"alice\"}), added_removed.added_); // add to subscription\n    EXPECT_TRUE(added_removed.removed_.empty());\n\n    // Both watches receive the update. For watch2, this is obviously desired.\n    expectDeltaAndSotwUpdate(callbacks2, {alice}, {}, \"version2\");\n    // For watch1, it's more subtle: the WatchMap sees that this update has no\n    // resources watch1 cares about, but also knows that watch1 previously had\n    // some resources. So, it must inform watch1 that it now has no resources.\n    // (SotW only: delta's explicit removals avoid the need for this guessing.)\n    expectEmptySotwNoDeltaUpdate(callbacks1, \"version2\");\n    doDeltaAndSotwUpdate(watch_map, updated_resources, {}, \"version2\");\n  }\n}\n\n// Tests that nothing breaks if an update arrives that we entirely do not care about.\nTEST(WatchMapTest, UninterestingUpdate) {\n  MockSubscriptionCallbacks callbacks;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  WatchMap watch_map(false);\n  Watch* watch = watch_map.addWatch(callbacks, resource_decoder);\n  watch_map.updateWatchInterest(watch, {\"alice\"});\n\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> alice_update;\n  envoy::config::endpoint::v3::ClusterLoadAssignment alice;\n  alice.set_cluster_name(\"alice\");\n  alice_update.Add()->PackFrom(alice);\n\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> bob_update;\n  envoy::config::endpoint::v3::ClusterLoadAssignment bob;\n  bob.set_cluster_name(\"bob\");\n  bob_update.Add()->PackFrom(bob);\n\n  // We are watching for alice, and an update for just bob arrives. It should be ignored.\n  expectNoUpdate(callbacks, \"version1\");\n  doDeltaAndSotwUpdate(watch_map, bob_update, {}, \"version1\");\n  ::testing::Mock::VerifyAndClearExpectations(&callbacks);\n\n  // The server sends an update adding alice and removing bob. We pay attention only to alice.\n  expectDeltaAndSotwUpdate(callbacks, {alice}, {}, \"version2\");\n  doDeltaAndSotwUpdate(watch_map, alice_update, {}, \"version2\");\n  ::testing::Mock::VerifyAndClearExpectations(&callbacks);\n\n  // The server sends an update removing alice and adding bob. We pay attention only to alice.\n  expectDeltaAndSotwUpdate(callbacks, {}, {\"alice\"}, \"version3\");\n  doDeltaAndSotwUpdate(watch_map, bob_update, {\"alice\"}, \"version3\");\n  ::testing::Mock::VerifyAndClearExpectations(&callbacks);\n\n  // Clean removal of the watch: first update to \"interested in nothing\", then remove.\n  watch_map.updateWatchInterest(watch, {});\n  watch_map.removeWatch(watch);\n\n  // Finally, test that calling onConfigUpdate on a map with no watches doesn't break.\n  doDeltaAndSotwUpdate(watch_map, bob_update, {}, \"version4\");\n}\n\n// Tests that a watch that specifies no particular resource interest is treated as interested in\n// everything.\nTEST(WatchMapTest, WatchingEverything) {\n  MockSubscriptionCallbacks callbacks1;\n  MockSubscriptionCallbacks callbacks2;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  WatchMap watch_map(false);\n  /*Watch* watch1 = */ watch_map.addWatch(callbacks1, resource_decoder);\n  Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder);\n  // watch1 never specifies any names, and so is treated as interested in everything.\n  watch_map.updateWatchInterest(watch2, {\"alice\"});\n\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> updated_resources;\n  envoy::config::endpoint::v3::ClusterLoadAssignment alice;\n  alice.set_cluster_name(\"alice\");\n  updated_resources.Add()->PackFrom(alice);\n  envoy::config::endpoint::v3::ClusterLoadAssignment bob;\n  bob.set_cluster_name(\"bob\");\n  updated_resources.Add()->PackFrom(bob);\n\n  std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment> expected_resources1;\n  expected_resources1.push_back(alice);\n  expected_resources1.push_back(bob);\n  std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment> expected_resources2;\n  expected_resources2.push_back(alice);\n\n  expectDeltaAndSotwUpdate(callbacks1, expected_resources1, {}, \"version1\");\n  expectDeltaAndSotwUpdate(callbacks2, expected_resources2, {}, \"version1\");\n  doDeltaAndSotwUpdate(watch_map, updated_resources, {}, \"version1\");\n}\n\n// Delta onConfigUpdate has some slightly subtle details with how it handles the three cases where\n// a watch receives {only updates, updates+removals, only removals} to its resources. This test\n// exercise those cases. Also, the removal-only case tests that SotW does call a watch's\n// onConfigUpdate even if none of the watch's interested resources are among the updated\n// resources. (Which ensures we deliver empty config updates when a resource is dropped.)\nTEST(WatchMapTest, DeltaOnConfigUpdate) {\n  MockSubscriptionCallbacks callbacks1;\n  MockSubscriptionCallbacks callbacks2;\n  MockSubscriptionCallbacks callbacks3;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  WatchMap watch_map(false);\n  Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder);\n  Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder);\n  Watch* watch3 = watch_map.addWatch(callbacks3, resource_decoder);\n  watch_map.updateWatchInterest(watch1, {\"updated\"});\n  watch_map.updateWatchInterest(watch2, {\"updated\", \"removed\"});\n  watch_map.updateWatchInterest(watch3, {\"removed\"});\n\n  // First, create the \"removed\" resource. We want to test SotW being handed an empty\n  // onConfigUpdate. But, if SotW holds no resources, then an update with nothing it cares about\n  // will just not trigger any onConfigUpdate at all.\n  {\n    Protobuf::RepeatedPtrField<ProtobufWkt::Any> prepare_removed;\n    envoy::config::endpoint::v3::ClusterLoadAssignment will_be_removed_later;\n    will_be_removed_later.set_cluster_name(\"removed\");\n    prepare_removed.Add()->PackFrom(will_be_removed_later);\n    expectDeltaAndSotwUpdate(callbacks2, {will_be_removed_later}, {}, \"version0\");\n    expectDeltaAndSotwUpdate(callbacks3, {will_be_removed_later}, {}, \"version0\");\n    doDeltaAndSotwUpdate(watch_map, prepare_removed, {}, \"version0\");\n  }\n\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> update;\n  envoy::config::endpoint::v3::ClusterLoadAssignment updated;\n  updated.set_cluster_name(\"updated\");\n  update.Add()->PackFrom(updated);\n\n  expectDeltaAndSotwUpdate(callbacks1, {updated}, {}, \"version1\");          // only update\n  expectDeltaAndSotwUpdate(callbacks2, {updated}, {\"removed\"}, \"version1\"); // update+remove\n  expectDeltaAndSotwUpdate(callbacks3, {}, {\"removed\"}, \"version1\");        // only remove\n  doDeltaAndSotwUpdate(watch_map, update, {\"removed\"}, \"version1\");\n}\n\nTEST(WatchMapTest, OnConfigUpdateFailed) {\n  WatchMap watch_map(false);\n  // calling on empty map doesn't break\n  watch_map.onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr);\n\n  MockSubscriptionCallbacks callbacks1;\n  MockSubscriptionCallbacks callbacks2;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  watch_map.addWatch(callbacks1, resource_decoder);\n  watch_map.addWatch(callbacks2, resource_decoder);\n\n  EXPECT_CALL(callbacks1, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr));\n  EXPECT_CALL(callbacks2, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr));\n  watch_map.onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr);\n}\n\nTEST(WatchMapTest, OnConfigUpdateUsingNamespaces) {\n  MockSubscriptionCallbacks callbacks1;\n  MockSubscriptionCallbacks callbacks2;\n  MockSubscriptionCallbacks callbacks3;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder(\"cluster_name\");\n  WatchMap watch_map(true);\n  Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder);\n  Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder);\n  Watch* watch3 = watch_map.addWatch(callbacks3, resource_decoder);\n  watch_map.updateWatchInterest(watch1, {\"ns1\"});\n  watch_map.updateWatchInterest(watch2, {\"ns1\", \"ns2\"});\n  watch_map.updateWatchInterest(watch3, {\"ns3\"});\n\n  // verify update\n  {\n    Protobuf::RepeatedPtrField<ProtobufWkt::Any> update;\n    envoy::config::endpoint::v3::ClusterLoadAssignment resource;\n    resource.set_cluster_name(\"ns1/resource1\");\n    update.Add()->PackFrom(resource);\n    expectDeltaUpdate(callbacks1, {resource}, {}, \"version0\");\n    expectDeltaUpdate(callbacks2, {resource}, {}, \"version0\");\n    doDeltaUpdate(watch_map, update, {}, \"version0\");\n  }\n  // verify removal\n  {\n    Protobuf::RepeatedPtrField<ProtobufWkt::Any> update;\n    expectDeltaUpdate(callbacks2, {}, {\"ns2/removed\"}, \"version1\");\n    doDeltaUpdate(watch_map, update, {\"ns2/removed\"}, \"version1\");\n  }\n  // verify a not-found response to an on-demand request: such a response will contain an empty\n  // resource wrapper with the name and aliases fields containing the alias used in the request.\n  {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> empty_resources;\n    const auto version = \"version3\";\n    const auto not_resolved = \"ns3/not_resolved\";\n\n    auto* cur_resource = empty_resources.Add();\n    cur_resource->set_version(version);\n    cur_resource->set_name(not_resolved);\n    cur_resource->add_aliases(not_resolved);\n\n    EXPECT_CALL(callbacks3, onConfigUpdate(_, _, _))\n        .WillOnce(Invoke([not_resolved, version](\n                             const std::vector<DecodedResourceRef>& gotten_resources,\n                             const Protobuf::RepeatedPtrField<std::string>&, const std::string&) {\n          EXPECT_EQ(1, gotten_resources.size());\n          EXPECT_EQ(gotten_resources[0].get().version(), version);\n          EXPECT_FALSE(gotten_resources[0].get().hasResource());\n          EXPECT_EQ(gotten_resources[0].get().name(), not_resolved);\n          EXPECT_EQ(gotten_resources[0].get().aliases(), std::vector<std::string>{not_resolved});\n        }));\n\n    Protobuf::RepeatedPtrField<std::string> removed_names_proto;\n\n    watch_map.onConfigUpdate(empty_resources, removed_names_proto, \"version2\");\n  }\n}\n\n} // namespace\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/conn_pool/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"conn_pool_base_test\",\n    srcs = [\"conn_pool_base_test.cc\"],\n    deps = [\n        \"//source/common/conn_pool:conn_pool_base_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/common/conn_pool/conn_pool_base_test.cc",
    "content": "#include \"common/conn_pool/conn_pool_base.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace ConnectionPool {\n\nusing testing::AnyNumber;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\n\nclass TestActiveClient : public ActiveClient {\npublic:\n  using ActiveClient::ActiveClient;\n  void close() override { onEvent(Network::ConnectionEvent::LocalClose); }\n  uint64_t id() const override { return 1; }\n  bool closingWithIncompleteStream() const override { return false; }\n  size_t numActiveStreams() const override { return 1; }\n};\n\nclass TestPendingStream : public PendingStream {\npublic:\n  TestPendingStream(ConnPoolImplBase& parent, AttachContext& context)\n      : PendingStream(parent), context_(context) {}\n  AttachContext& context() override { return context_; }\n  AttachContext& context_;\n};\n\nclass TestConnPoolImplBase : public ConnPoolImplBase {\npublic:\n  using ConnPoolImplBase::ConnPoolImplBase;\n  ConnectionPool::Cancellable* newPendingStream(AttachContext& context) override {\n    auto entry = std::make_unique<TestPendingStream>(*this, context);\n    LinkedList::moveIntoList(std::move(entry), pending_streams_);\n    return pending_streams_.front().get();\n  }\n  MOCK_METHOD(ActiveClientPtr, instantiateActiveClient, ());\n  MOCK_METHOD(void, onPoolFailure,\n              (const Upstream::HostDescriptionConstSharedPtr& n, absl::string_view,\n               ConnectionPool::PoolFailureReason, AttachContext&));\n  MOCK_METHOD(void, onPoolReady, (ActiveClient&, AttachContext&));\n};\n\nclass ConnPoolImplBaseTest : public testing::Test {\npublic:\n  ConnPoolImplBaseTest()\n      : pool_(host_, Upstream::ResourcePriority::Default, dispatcher_, nullptr, nullptr) {\n    // Default connections to 1024 because the tests shouldn't be relying on the\n    // connection resource limit for most tests.\n    cluster_->resetResourceManager(1024, 1024, 1024, 1, 1);\n    ON_CALL(pool_, instantiateActiveClient).WillByDefault(Invoke([&]() -> ActiveClientPtr {\n      auto ret = std::make_unique<TestActiveClient>(pool_, stream_limit_, concurrent_streams_);\n      clients_.push_back(ret.get());\n      ret->real_host_description_ = descr_;\n      return ret;\n    }));\n  }\n\n  uint32_t stream_limit_ = 100;\n  uint32_t concurrent_streams_ = 1;\n  std::shared_ptr<NiceMock<Upstream::MockHostDescription>> descr_{\n      new NiceMock<Upstream::MockHostDescription>()};\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};\n  Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, \"tcp://127.0.0.1:80\")};\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  TestConnPoolImplBase pool_;\n  AttachContext context_;\n  std::vector<ActiveClient*> clients_;\n};\n\nTEST_F(ConnPoolImplBaseTest, BasicPrefetch) {\n  // Create more than one connection per new stream.\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  // On new stream, create 2 connections.\n  EXPECT_CALL(pool_, instantiateActiveClient).Times(2);\n  auto cancelable = pool_.newStream(context_);\n\n  cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess);\n  pool_.destructAllConnections();\n}\n\nTEST_F(ConnPoolImplBaseTest, PrefetchOnDisconnect) {\n  testing::InSequence s;\n\n  // Create more than one connection per new stream.\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  // On new stream, create 2 connections.\n  EXPECT_CALL(pool_, instantiateActiveClient).Times(2);\n  pool_.newStream(context_);\n\n  // If a connection fails, existing connections are purged. If a retry causes\n  // a new stream, make sure we create the correct number of connections.\n  EXPECT_CALL(pool_, onPoolFailure).WillOnce(InvokeWithoutArgs([&]() -> void {\n    pool_.newStream(context_);\n  }));\n  EXPECT_CALL(pool_, instantiateActiveClient).Times(1);\n  clients_[0]->close();\n\n  EXPECT_CALL(pool_, onPoolFailure);\n  pool_.destructAllConnections();\n}\n\nTEST_F(ConnPoolImplBaseTest, NoPrefetchIfUnhealthy) {\n  // Create more than one connection per new stream.\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  host_->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC);\n  EXPECT_EQ(host_->health(), Upstream::Host::Health::Unhealthy);\n\n  // On new stream, create 1 connection.\n  EXPECT_CALL(pool_, instantiateActiveClient).Times(1);\n  auto cancelable = pool_.newStream(context_);\n\n  cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess);\n  pool_.destructAllConnections();\n}\n\nTEST_F(ConnPoolImplBaseTest, NoPrefetchIfDegraded) {\n  // Create more than one connection per new stream.\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  EXPECT_EQ(host_->health(), Upstream::Host::Health::Healthy);\n  host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH);\n  EXPECT_EQ(host_->health(), Upstream::Host::Health::Degraded);\n\n  // On new stream, create 1 connection.\n  EXPECT_CALL(pool_, instantiateActiveClient).Times(1);\n  auto cancelable = pool_.newStream(context_);\n\n  cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess);\n  pool_.destructAllConnections();\n}\n\nTEST_F(ConnPoolImplBaseTest, ExplicitPrefetch) {\n  // Create more than one connection per new stream.\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n  EXPECT_CALL(pool_, instantiateActiveClient).Times(AnyNumber());\n\n  // With global prefetch off, we won't prefetch.\n  EXPECT_FALSE(pool_.maybePrefetch(0));\n  // With prefetch ratio of 1.1, we'll prefetch two connections.\n  // Currently, no number of subsequent calls to prefetch will increase that.\n  EXPECT_TRUE(pool_.maybePrefetch(1.1));\n  EXPECT_TRUE(pool_.maybePrefetch(1.1));\n  EXPECT_FALSE(pool_.maybePrefetch(1.1));\n\n  // With a higher prefetch ratio, more connections may be prefetched.\n  EXPECT_TRUE(pool_.maybePrefetch(3));\n\n  pool_.destructAllConnections();\n}\n\nTEST_F(ConnPoolImplBaseTest, ExplicitPrefetchNotHealthy) {\n  // Create more than one connection per new stream.\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  // Prefetch won't occur if the host is not healthy.\n  host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH);\n  EXPECT_FALSE(pool_.maybePrefetch(1));\n}\n\n} // namespace ConnectionPool\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/crypto/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\n        \"utility_test.cc\",\n    ],\n    external_deps = [\n        \"ssl\",\n    ],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"verify_signature_fuzz_proto\",\n    srcs = [\"verify_signature_fuzz.proto\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"get_sha_256_digest_fuzz_test\",\n    srcs = [\"get_sha_256_digest_fuzz_test.cc\"],\n    corpus = \"get_sha_256_digest_corpus\",\n    deps = [\"//source/extensions/common/crypto:utility_lib\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"verify_signature_fuzz_test\",\n    srcs = [\"verify_signature_fuzz_test.cc\"],\n    corpus = \"verify_signature_corpus\",\n    dictionaries = [\"verify_signature_fuzz_test.dict\"],\n    deps = [\n        \":verify_signature_fuzz_proto_cc_proto\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/crypto/get_sha_256_digest_fuzz_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/crypto/utility.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  Buffer::OwnedImpl buffer(buf, len);\n  auto digest = Common::Crypto::UtilitySingleton::get().getSha256Digest(buffer);\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/crypto/utility_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/hex.h\"\n#include \"common/crypto/utility.h\"\n\n#include \"extensions/common/crypto/crypto_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\nnamespace {\n\nTEST(UtilityTest, TestSha256Digest) {\n  const Buffer::OwnedImpl buffer(\"test data\");\n  const auto digest = UtilitySingleton::get().getSha256Digest(buffer);\n  EXPECT_EQ(\"916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9\",\n            Hex::encode(digest));\n}\n\nTEST(UtilityTest, TestSha256DigestWithEmptyBuffer) {\n  const Buffer::OwnedImpl buffer;\n  const auto digest = UtilitySingleton::get().getSha256Digest(buffer);\n  EXPECT_EQ(\"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\n            Hex::encode(digest));\n}\n\nTEST(UtilityTest, TestSha256DigestGrowingBuffer) {\n  // Adding multiple slices to the buffer\n  Buffer::OwnedImpl buffer(\"slice 1\");\n  auto digest = UtilitySingleton::get().getSha256Digest(buffer);\n  EXPECT_EQ(\"76571770bb46bdf51e1aba95b23c681fda27f6ae56a8a90898a4cb7556e19dcb\",\n            Hex::encode(digest));\n  buffer.add(\"slice 2\");\n  digest = UtilitySingleton::get().getSha256Digest(buffer);\n  EXPECT_EQ(\"290b462b0fe5edcf6b8532de3ca70da8ab77937212042bb959192ec6c9f95b9a\",\n            Hex::encode(digest));\n  buffer.add(\"slice 3\");\n  digest = UtilitySingleton::get().getSha256Digest(buffer);\n  EXPECT_EQ(\"29606bbf02fdc40007cdf799de36d931e3587dafc086937efd6599a4ea9397aa\",\n            Hex::encode(digest));\n}\n\nTEST(UtilityTest, TestSha256Hmac) {\n  const std::string key = \"key\";\n  auto hmac = UtilitySingleton::get().getSha256Hmac(std::vector<uint8_t>(key.begin(), key.end()),\n                                                    \"test data\");\n  EXPECT_EQ(\"087d9eb992628854842ca4dbf790f8164c80355c1e78b72789d830334927a84c\", Hex::encode(hmac));\n}\n\nTEST(UtilityTest, TestSha256HmacWithEmptyArguments) {\n  auto hmac = UtilitySingleton::get().getSha256Hmac(std::vector<uint8_t>(), \"\");\n  EXPECT_EQ(\"b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad\", Hex::encode(hmac));\n}\n\nTEST(UtilityTest, TestImportPublicKey) {\n  auto key = \"30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d\"\n             \"73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a6\"\n             \"2cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644\"\n             \"ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d\"\n             \"395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884\"\n             \"d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5\"\n             \"183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001\";\n\n  Common::Crypto::CryptoObjectPtr crypto_ptr(\n      Common::Crypto::UtilitySingleton::get().importPublicKey(Hex::decode(key)));\n  auto wrapper = Common::Crypto::Access::getTyped<Common::Crypto::PublicKeyObject>(*crypto_ptr);\n  EVP_PKEY* pkey = wrapper->getEVP_PKEY();\n  EXPECT_NE(nullptr, pkey);\n\n  key = \"badkey\";\n  crypto_ptr = Common::Crypto::UtilitySingleton::get().importPublicKey(Hex::decode(key));\n  wrapper = Common::Crypto::Access::getTyped<Common::Crypto::PublicKeyObject>(*crypto_ptr);\n  pkey = wrapper->getEVP_PKEY();\n  EXPECT_EQ(nullptr, pkey);\n}\n\nTEST(UtilityTest, TestVerifySignature) {\n  auto key = \"30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d\"\n             \"73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a6\"\n             \"2cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644\"\n             \"ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d\"\n             \"395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884\"\n             \"d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5\"\n             \"183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001\";\n  auto hash_func = \"sha256\";\n  auto signature =\n      \"345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a2\"\n      \"9c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5\"\n      \"ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf5\"\n      \"70a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace\"\n      \"1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6\"\n      \"295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332\";\n  auto data = \"hello\";\n\n  Common::Crypto::CryptoObjectPtr crypto_ptr(\n      Common::Crypto::UtilitySingleton::get().importPublicKey(Hex::decode(key)));\n  Common::Crypto::CryptoObject* crypto(crypto_ptr.get());\n\n  std::vector<uint8_t> text(data, data + strlen(data));\n\n  auto sig = Hex::decode(signature);\n  auto result = UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text);\n\n  EXPECT_EQ(true, result.result_);\n  EXPECT_EQ(\"\", result.error_message_);\n\n  result = UtilitySingleton::get().verifySignature(\"unknown\", *crypto, sig, text);\n  EXPECT_EQ(false, result.result_);\n  EXPECT_EQ(\"unknown is not supported.\", result.error_message_);\n\n  auto empty_crypto = std::make_unique<PublicKeyObject>();\n  result = UtilitySingleton::get().verifySignature(hash_func, *empty_crypto, sig, text);\n  EXPECT_EQ(false, result.result_);\n  EXPECT_EQ(\"Failed to initialize digest verify.\", result.error_message_);\n\n  data = \"baddata\";\n  text = std::vector<uint8_t>(data, data + strlen(data));\n  result = UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text);\n  EXPECT_EQ(false, result.result_);\n  EXPECT_EQ(\"Failed to verify digest. Error code: 0\", result.error_message_);\n\n  data = \"hello\";\n  text = std::vector<uint8_t>(data, data + strlen(data));\n  result = UtilitySingleton::get().verifySignature(hash_func, *crypto, Hex::decode(\"000000\"), text);\n  EXPECT_EQ(false, result.result_);\n  EXPECT_EQ(\"Failed to verify digest. Error code: 0\", result.error_message_);\n}\n\n} // namespace\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong",
    "content": "key: \"30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001\"\nhash_func: \"sha1\"\nsignature: \"345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a29c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf570a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332\"\ndata: \"hello\"\n"
  },
  {
    "path": "test/common/crypto/verify_signature_corpus/test_contains_sha256_correct",
    "content": "key: \"30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001\"\nhash_func: \"sha256\"\nsignature: \"345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a29c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf570a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332\"\ndata: \"hello\"\n"
  },
  {
    "path": "test/common/crypto/verify_signature_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.crypto;\n\nmessage VerifySignatureFuzzTestCase {\n  string key = 1;\n  string hash_func = 2;\n  string signature = 3;\n  string data = 4;\n}"
  },
  {
    "path": "test/common/crypto/verify_signature_fuzz_test.cc",
    "content": "#include \"common/common/hex.h\"\n#include \"common/crypto/utility.h\"\n\n#include \"test/common/crypto/verify_signature_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Common {\nnamespace Crypto {\nnamespace {\n\nDEFINE_PROTO_FUZZER(const test::common::crypto::VerifySignatureFuzzTestCase& input) {\n  const auto& key = input.key();\n  const auto& hash_func = input.hash_func();\n  const auto& signature = input.signature();\n  const auto& data = input.data();\n\n  Common::Crypto::CryptoObjectPtr crypto_ptr(\n      Common::Crypto::UtilitySingleton::get().importPublicKey(Hex::decode(key)));\n  Common::Crypto::CryptoObject* crypto(crypto_ptr.get());\n\n  std::vector<uint8_t> text(data.begin(), data.end());\n\n  const auto sig = Hex::decode(signature);\n  UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text);\n}\n\n} // namespace\n} // namespace Crypto\n} // namespace Common\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/crypto/verify_signature_fuzz_test.dict",
    "content": "# hash_func\n\"sha1\"\n\"sha224\"\n\"sha256\"\n\"sha384\"\n\"sha512\"\n"
  },
  {
    "path": "test/common/event/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"dispatcher_impl_test\",\n    srcs = [\"dispatcher_impl_test.cc\"],\n    tags = [\"fails_on_windows\"],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/event:deferred_task\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"file_event_impl_test\",\n    srcs = [\"file_event_impl_test.cc\"],\n    tags = [\"fails_on_windows\"],\n    deps = [\n        \"//include/envoy/event:file_event_interface\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"scaled_range_timer_manager_test\",\n    srcs = [\"scaled_range_timer_manager_test.cc\"],\n    deps = [\n        \"//source/common/event:scaled_range_timer_manager\",\n        \"//test/mocks/event:wrapped_dispatcher\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/event/dispatcher_impl_test.cc",
    "content": "#include <functional>\n\n#include \"envoy/thread/thread.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/event/deferred_task.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/event/timer_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Event {\nnamespace {\n\nvoid onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) {\n  // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new.\n  auto watcher = static_cast<ReadyWatcher*>(arg);\n  watcher->ready();\n}\n\nclass SchedulableCallbackImplTest : public testing::Test {\nprotected:\n  SchedulableCallbackImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  void createCallback(std::function<void()> cb) {\n    callbacks_.emplace_back(dispatcher_->createSchedulableCallback(cb));\n  }\n\n  Api::ApiPtr api_;\n  DispatcherPtr dispatcher_;\n  std::vector<SchedulableCallbackPtr> callbacks_;\n};\n\nTEST_F(SchedulableCallbackImplTest, ScheduleCurrentAndCancel) {\n  ReadyWatcher watcher;\n\n  auto cb = dispatcher_->createSchedulableCallback([&]() { watcher.ready(); });\n\n  // Cancel is a no-op if not scheduled.\n  cb->cancel();\n  dispatcher_->run(Dispatcher::RunType::Block);\n\n  // Callback is not invoked if cancelled before it executes.\n  cb->scheduleCallbackCurrentIteration();\n  EXPECT_TRUE(cb->enabled());\n  cb->cancel();\n  EXPECT_FALSE(cb->enabled());\n  dispatcher_->run(Dispatcher::RunType::Block);\n\n  // Scheduled callback executes.\n  cb->scheduleCallbackCurrentIteration();\n  EXPECT_CALL(watcher, ready());\n  dispatcher_->run(Dispatcher::RunType::Block);\n\n  // Callbacks implicitly cancelled if runner is deleted.\n  cb->scheduleCallbackCurrentIteration();\n  cb.reset();\n  dispatcher_->run(Dispatcher::RunType::Block);\n}\n\nTEST_F(SchedulableCallbackImplTest, ScheduleNextAndCancel) {\n  ReadyWatcher watcher;\n\n  auto cb = dispatcher_->createSchedulableCallback([&]() { watcher.ready(); });\n\n  // Cancel is a no-op if not scheduled.\n  cb->cancel();\n  dispatcher_->run(Dispatcher::RunType::Block);\n\n  // Callback is not invoked if cancelled before it executes.\n  cb->scheduleCallbackNextIteration();\n  EXPECT_TRUE(cb->enabled());\n  cb->cancel();\n  EXPECT_FALSE(cb->enabled());\n  dispatcher_->run(Dispatcher::RunType::Block);\n\n  // Scheduled callback executes.\n  cb->scheduleCallbackNextIteration();\n  EXPECT_CALL(watcher, ready());\n  dispatcher_->run(Dispatcher::RunType::Block);\n\n  // Callbacks implicitly cancelled if runner is deleted.\n  cb->scheduleCallbackNextIteration();\n  cb.reset();\n  dispatcher_->run(Dispatcher::RunType::Block);\n}\n\nTEST_F(SchedulableCallbackImplTest, ScheduleOrder) {\n  ReadyWatcher watcher0;\n  createCallback([&]() { watcher0.ready(); });\n  ReadyWatcher watcher1;\n  createCallback([&]() { watcher1.ready(); });\n  ReadyWatcher watcher2;\n  createCallback([&]() { watcher2.ready(); });\n\n  // Current iteration callbacks run in the order they are scheduled. Next iteration callbacks run\n  // after current iteration callbacks.\n  callbacks_[0]->scheduleCallbackNextIteration();\n  callbacks_[1]->scheduleCallbackCurrentIteration();\n  callbacks_[2]->scheduleCallbackCurrentIteration();\n  InSequence s;\n  EXPECT_CALL(watcher1, ready());\n  EXPECT_CALL(watcher2, ready());\n  EXPECT_CALL(watcher0, ready());\n  dispatcher_->run(Dispatcher::RunType::Block);\n}\n\nTEST_F(SchedulableCallbackImplTest, ScheduleChainingAndCancellation) {\n  DispatcherImpl* dispatcher_impl = static_cast<DispatcherImpl*>(dispatcher_.get());\n  ReadyWatcher prepare_watcher;\n  evwatch_prepare_new(&dispatcher_impl->base(), onWatcherReady, &prepare_watcher);\n\n  ReadyWatcher watcher0;\n  createCallback([&]() {\n    watcher0.ready();\n    callbacks_[1]->scheduleCallbackCurrentIteration();\n  });\n\n  ReadyWatcher watcher1;\n  createCallback([&]() {\n    watcher1.ready();\n    callbacks_[2]->scheduleCallbackCurrentIteration();\n    callbacks_[3]->scheduleCallbackCurrentIteration();\n    callbacks_[4]->scheduleCallbackCurrentIteration();\n    callbacks_[5]->scheduleCallbackNextIteration();\n  });\n\n  ReadyWatcher watcher2;\n  createCallback([&]() {\n    watcher2.ready();\n    EXPECT_TRUE(callbacks_[3]->enabled());\n    callbacks_[3]->cancel();\n    EXPECT_TRUE(callbacks_[4]->enabled());\n    callbacks_[4].reset();\n  });\n\n  ReadyWatcher watcher3;\n  createCallback([&]() { watcher3.ready(); });\n\n  ReadyWatcher watcher4;\n  createCallback([&]() { watcher4.ready(); });\n\n  ReadyWatcher watcher5;\n  createCallback([&]() { watcher5.ready(); });\n\n  // Chained callbacks run in the same event loop iteration, as signaled by a single call to\n  // prepare_watcher.ready(). watcher3 and watcher4 are not invoked because cb2 cancels\n  // cb3 and deletes cb4 as part of its execution. cb5 runs after a second call to the\n  // prepare callback since it's scheduled for the next iteration.\n  callbacks_[0]->scheduleCallbackCurrentIteration();\n  InSequence s;\n  EXPECT_CALL(prepare_watcher, ready());\n  EXPECT_CALL(watcher0, ready());\n  EXPECT_CALL(watcher1, ready());\n  EXPECT_CALL(watcher2, ready());\n  EXPECT_CALL(prepare_watcher, ready());\n  EXPECT_CALL(watcher5, ready());\n  dispatcher_->run(Dispatcher::RunType::Block);\n}\n\nTEST_F(SchedulableCallbackImplTest, RescheduleNext) {\n  DispatcherImpl* dispatcher_impl = static_cast<DispatcherImpl*>(dispatcher_.get());\n  ReadyWatcher prepare_watcher;\n  evwatch_prepare_new(&dispatcher_impl->base(), onWatcherReady, &prepare_watcher);\n\n  ReadyWatcher watcher0;\n  createCallback([&]() {\n    watcher0.ready();\n    // Callback 1 was scheduled from the previous iteration, expect it to fire in the current\n    // iteration despite the attempt to reschedule.\n    callbacks_[1]->scheduleCallbackNextIteration();\n    // Callback 2 expected to execute next iteration because current called before next.\n    callbacks_[2]->scheduleCallbackCurrentIteration();\n    callbacks_[2]->scheduleCallbackNextIteration();\n    // Callback 3 expected to execute next iteration because next was called before current.\n    callbacks_[3]->scheduleCallbackNextIteration();\n    callbacks_[3]->scheduleCallbackCurrentIteration();\n  });\n\n  ReadyWatcher watcher1;\n  createCallback([&]() { watcher1.ready(); });\n  ReadyWatcher watcher2;\n  createCallback([&]() { watcher2.ready(); });\n  ReadyWatcher watcher3;\n  createCallback([&]() { watcher3.ready(); });\n\n  // Schedule callbacks 0 and 1 outside the loop, both will run in the same iteration of the event\n  // loop.\n  callbacks_[0]->scheduleCallbackCurrentIteration();\n  callbacks_[1]->scheduleCallbackNextIteration();\n\n  InSequence s;\n  EXPECT_CALL(prepare_watcher, ready());\n  EXPECT_CALL(watcher0, ready());\n  EXPECT_CALL(watcher1, ready());\n  EXPECT_CALL(watcher2, ready());\n  EXPECT_CALL(prepare_watcher, ready());\n  EXPECT_CALL(watcher3, ready());\n  dispatcher_->run(Dispatcher::RunType::Block);\n}\n\nclass TestDeferredDeletable : public DeferredDeletable {\npublic:\n  TestDeferredDeletable(std::function<void()> on_destroy) : on_destroy_(on_destroy) {}\n  ~TestDeferredDeletable() override { on_destroy_(); }\n\nprivate:\n  std::function<void()> on_destroy_;\n};\n\nTEST(DeferredDeleteTest, DeferredDelete) {\n  InSequence s;\n  Api::ApiPtr api = Api::createApiForTest();\n  DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  ReadyWatcher watcher1;\n\n  dispatcher->deferredDelete(\n      DeferredDeletablePtr{new TestDeferredDeletable([&]() -> void { watcher1.ready(); })});\n\n  // The first one will get deleted inline.\n  EXPECT_CALL(watcher1, ready());\n  dispatcher->clearDeferredDeleteList();\n\n  // This one does a nested deferred delete. We should need two clear calls to actually get\n  // rid of it with the vector swapping. We also test that inline clear() call does nothing.\n  ReadyWatcher watcher2;\n  ReadyWatcher watcher3;\n  dispatcher->deferredDelete(DeferredDeletablePtr{new TestDeferredDeletable([&]() -> void {\n    watcher2.ready();\n    dispatcher->deferredDelete(\n        DeferredDeletablePtr{new TestDeferredDeletable([&]() -> void { watcher3.ready(); })});\n    dispatcher->clearDeferredDeleteList();\n  })});\n\n  EXPECT_CALL(watcher2, ready());\n  dispatcher->clearDeferredDeleteList();\n\n  EXPECT_CALL(watcher3, ready());\n  dispatcher->clearDeferredDeleteList();\n}\n\nTEST(DeferredTaskTest, DeferredTask) {\n  InSequence s;\n  Api::ApiPtr api = Api::createApiForTest();\n  DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  ReadyWatcher watcher1;\n\n  DeferredTaskUtil::deferredRun(*dispatcher, [&watcher1]() -> void { watcher1.ready(); });\n  // The first one will get deleted inline.\n  EXPECT_CALL(watcher1, ready());\n  dispatcher->clearDeferredDeleteList();\n\n  // Deferred task is scheduled FIFO.\n  ReadyWatcher watcher2;\n  ReadyWatcher watcher3;\n  DeferredTaskUtil::deferredRun(*dispatcher, [&watcher2]() -> void { watcher2.ready(); });\n  DeferredTaskUtil::deferredRun(*dispatcher, [&watcher3]() -> void { watcher3.ready(); });\n  EXPECT_CALL(watcher2, ready());\n  EXPECT_CALL(watcher3, ready());\n  dispatcher->clearDeferredDeleteList();\n}\n\nclass DispatcherImplTest : public testing::Test {\nprotected:\n  DispatcherImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {\n    dispatcher_thread_ = api_->threadFactory().createThread([this]() {\n      // Must create a keepalive timer to keep the dispatcher from exiting.\n      std::chrono::milliseconds time_interval(500);\n      keepalive_timer_ = dispatcher_->createTimer(\n          [this, time_interval]() { keepalive_timer_->enableTimer(time_interval); });\n      keepalive_timer_->enableTimer(time_interval);\n\n      dispatcher_->run(Dispatcher::RunType::Block);\n    });\n  }\n\n  ~DispatcherImplTest() override {\n    dispatcher_->exit();\n    dispatcher_thread_->join();\n  }\n\n  void timerTest(std::function<void(Timer&)> enable_timer_delegate) {\n    TimerPtr timer;\n    dispatcher_->post([this, &timer]() {\n      {\n        Thread::LockGuard lock(mu_);\n        timer = dispatcher_->createTimer([this]() {\n          {\n            Thread::LockGuard lock(mu_);\n            work_finished_ = true;\n          }\n          cv_.notifyOne();\n        });\n        EXPECT_FALSE(timer->enabled());\n      }\n      cv_.notifyOne();\n    });\n\n    Thread::LockGuard lock(mu_);\n    while (timer == nullptr) {\n      cv_.wait(mu_);\n    }\n    enable_timer_delegate(*timer);\n    while (!work_finished_) {\n      cv_.wait(mu_);\n    }\n  }\n\n  NiceMock<Stats::MockStore> scope_; // Used in InitializeStats, must outlive dispatcher_->exit().\n  Api::ApiPtr api_;\n  Thread::ThreadPtr dispatcher_thread_;\n  DispatcherPtr dispatcher_;\n  Thread::MutexBasicLockable mu_;\n  Thread::CondVar cv_;\n\n  bool work_finished_{false};\n  TimerPtr keepalive_timer_;\n};\n\n// TODO(mergeconflict): We also need integration testing to validate that the expected histograms\n// are written when `enable_dispatcher_stats` is true. See issue #6582.\nTEST_F(DispatcherImplTest, InitializeStats) {\n  EXPECT_CALL(scope_,\n              histogram(\"test.dispatcher.loop_duration_us\", Stats::Histogram::Unit::Microseconds));\n  EXPECT_CALL(scope_,\n              histogram(\"test.dispatcher.poll_delay_us\", Stats::Histogram::Unit::Microseconds));\n  dispatcher_->initializeStats(scope_, \"test.\");\n}\n\nTEST_F(DispatcherImplTest, Post) {\n  dispatcher_->post([this]() {\n    {\n      Thread::LockGuard lock(mu_);\n      work_finished_ = true;\n    }\n    cv_.notifyOne();\n  });\n\n  Thread::LockGuard lock(mu_);\n  while (!work_finished_) {\n    cv_.wait(mu_);\n  }\n}\n\n// Ensure that there is no deadlock related to calling a posted callback, or\n// destructing a closure when finished calling it.\nTEST_F(DispatcherImplTest, RunPostCallbacksLocking) {\n  class PostOnDestruct {\n  public:\n    PostOnDestruct(Dispatcher& dispatcher) : dispatcher_(dispatcher) {}\n    ~PostOnDestruct() {\n      dispatcher_.post([]() {});\n    }\n    void method() {}\n    Dispatcher& dispatcher_;\n  };\n\n  {\n    // Block dispatcher first to ensure that both posted events below are handled\n    // by a single call to runPostCallbacks().\n    //\n    // This also ensures that the post_lock_ is not held while callbacks are called,\n    // or else this would deadlock.\n    Thread::LockGuard lock(mu_);\n    dispatcher_->post([this]() { Thread::LockGuard lock(mu_); });\n\n    auto post_on_destruct = std::make_shared<PostOnDestruct>(*dispatcher_);\n    dispatcher_->post([=]() { post_on_destruct->method(); });\n    dispatcher_->post([this]() {\n      {\n        Thread::LockGuard lock(mu_);\n        work_finished_ = true;\n      }\n      cv_.notifyOne();\n    });\n  }\n\n  Thread::LockGuard lock(mu_);\n  while (!work_finished_) {\n    cv_.wait(mu_);\n  }\n}\n\nTEST_F(DispatcherImplTest, Timer) {\n  timerTest([](Timer& timer) { timer.enableTimer(std::chrono::milliseconds(0)); });\n  timerTest([](Timer& timer) { timer.enableTimer(std::chrono::milliseconds(50)); });\n  timerTest([](Timer& timer) { timer.enableHRTimer(std::chrono::microseconds(50)); });\n}\n\nTEST_F(DispatcherImplTest, TimerWithScope) {\n  TimerPtr timer;\n  MockScopedTrackedObject scope;\n  dispatcher_->post([this, &timer, &scope]() {\n    {\n      // Expect a call to dumpState. The timer will call onFatalError during\n      // the alarm interval, and if the scope is tracked correctly this will\n      // result in a dumpState call.\n      EXPECT_CALL(scope, dumpState(_, _));\n      Thread::LockGuard lock(mu_);\n      timer = dispatcher_->createTimer([this]() {\n        {\n          Thread::LockGuard lock(mu_);\n          static_cast<DispatcherImpl*>(dispatcher_.get())->onFatalError(std::cerr);\n          work_finished_ = true;\n        }\n        cv_.notifyOne();\n      });\n      EXPECT_FALSE(timer->enabled());\n    }\n    cv_.notifyOne();\n  });\n\n  Thread::LockGuard lock(mu_);\n  while (timer == nullptr) {\n    cv_.wait(mu_);\n  }\n  timer->enableTimer(std::chrono::milliseconds(50), &scope);\n\n  while (!work_finished_) {\n    cv_.wait(mu_);\n  }\n}\n\nTEST_F(DispatcherImplTest, IsThreadSafe) {\n  dispatcher_->post([this]() {\n    {\n      Thread::LockGuard lock(mu_);\n      // Thread safe because it is called within the dispatcher thread's context.\n      EXPECT_TRUE(dispatcher_->isThreadSafe());\n      work_finished_ = true;\n    }\n    cv_.notifyOne();\n  });\n\n  Thread::LockGuard lock(mu_);\n  while (!work_finished_) {\n    cv_.wait(mu_);\n  }\n  // Not thread safe because it is not called within the dispatcher thread's context.\n  EXPECT_FALSE(dispatcher_->isThreadSafe());\n}\n\nclass NotStartedDispatcherImplTest : public testing::Test {\nprotected:\n  NotStartedDispatcherImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  Api::ApiPtr api_;\n  DispatcherPtr dispatcher_;\n};\n\nTEST_F(NotStartedDispatcherImplTest, IsThreadSafe) {\n  // Thread safe because the dispatcher has not started.\n  // Therefore, no thread id has been assigned.\n  EXPECT_TRUE(dispatcher_->isThreadSafe());\n}\n\nclass DispatcherMonotonicTimeTest : public testing::Test {\nprotected:\n  DispatcherMonotonicTimeTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n  ~DispatcherMonotonicTimeTest() override = default;\n\n  Api::ApiPtr api_;\n  DispatcherPtr dispatcher_;\n  MonotonicTime time_;\n};\n\nTEST_F(DispatcherMonotonicTimeTest, UpdateApproximateMonotonicTime) {\n  dispatcher_->post([this]() {\n    {\n      MonotonicTime time1 = dispatcher_->approximateMonotonicTime();\n      dispatcher_->updateApproximateMonotonicTime();\n      MonotonicTime time2 = dispatcher_->approximateMonotonicTime();\n      EXPECT_LT(time1, time2);\n    }\n  });\n\n  dispatcher_->run(Dispatcher::RunType::Block);\n}\n\nTEST_F(DispatcherMonotonicTimeTest, ApproximateMonotonicTime) {\n  // approximateMonotonicTime is constant within one event loop run.\n  dispatcher_->post([this]() {\n    {\n      time_ = dispatcher_->approximateMonotonicTime();\n      EXPECT_EQ(time_, dispatcher_->approximateMonotonicTime());\n    }\n  });\n\n  dispatcher_->run(Dispatcher::RunType::Block);\n\n  // approximateMonotonicTime is increasing between event loop runs.\n  dispatcher_->post([this]() {\n    { EXPECT_LT(time_, dispatcher_->approximateMonotonicTime()); }\n  });\n\n  dispatcher_->run(Dispatcher::RunType::Block);\n}\n\nclass TimerImplTest : public testing::TestWithParam<bool> {\nprotected:\n  TimerImplTest() {\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.activate_timers_next_event_loop\",\n          activateTimersNextEventLoop() ? \"true\" : \"false\"}});\n    // Hook into event loop prepare and check events.\n    evwatch_prepare_new(&libevent_base_, onWatcherReady, &prepare_watcher_);\n    evwatch_check_new(&libevent_base_, onCheck, this);\n  }\n  ~TimerImplTest() override { ASSERT(check_callbacks_.empty()); }\n\n  bool activateTimersNextEventLoop() { return GetParam(); }\n\n  // Run a callback inside the event loop. The libevent monotonic time used for timer registration\n  // is frozen while within this callback, so timers enabled within this callback end up with the\n  // requested relative registration times. The callback can invoke advanceLibeventTime() to force\n  // the libevent monotonic time forward before libevent determines the list of triggered timers.\n  void runInEventLoop(std::function<void()> cb) {\n    check_callbacks_.emplace_back(cb);\n\n    // Add a callback to the event loop to force it to run at least once despite there being no\n    // registered timers yet.\n    auto callback = dispatcher_->createSchedulableCallback([]() {});\n    callback->scheduleCallbackCurrentIteration();\n\n    in_event_loop_ = true;\n    dispatcher_->run(Dispatcher::RunType::NonBlock);\n    in_event_loop_ = false;\n  }\n\n  // Advance time forward while updating the libevent's time cache and monotonic time reference.\n  // Pushing the monotonic time reference forward eliminates the possibility of time moving\n  // backwards and breaking the overly picky TimerImpl tests below.\n  void advanceLibeventTime(absl::Duration duration) {\n    ASSERT(in_event_loop_);\n    requested_advance_ += duration;\n    adjustCachedTime();\n  }\n\n  // Similar to advanceLibeventTime, but for use in mock callback actions. Monotonic time will be\n  // moved forward at the start of the next event loop iteration.\n  void advanceLibeventTimeNextIteration(absl::Duration duration) {\n    ASSERT(in_event_loop_);\n    requested_advance_ += duration;\n  }\n\n  Api::ApiPtr api_{Api::createApiForTest()};\n  DispatcherPtr dispatcher_{api_->allocateDispatcher(\"test_thread\")};\n  event_base& libevent_base_{static_cast<DispatcherImpl&>(*dispatcher_).base()};\n  ReadyWatcher prepare_watcher_;\n  std::vector<SchedulableCallbackPtr> callbacks_;\n\nprivate:\n  static void onCheck(evwatch*, const evwatch_check_cb_info*, void* arg) {\n    // `arg` contains the TimerImplTest passed in from evwatch_check_new.\n    auto self = static_cast<TimerImplTest*>(arg);\n    auto check_callbacks = self->check_callbacks_;\n    self->check_callbacks_.clear();\n    for (const auto& cb : check_callbacks) {\n      cb();\n    }\n    self->adjustCachedTime();\n  }\n\n  absl::Duration cachedTimeAsDuration() const {\n    timeval tv;\n    int ret = event_base_gettimeofday_cached(&libevent_base_, &tv);\n    RELEASE_ASSERT(ret == 0, \"event_base_gettimeofday_cached failed\");\n    return absl::DurationFromTimeval(tv);\n  }\n\n  void adjustCachedTime() {\n    auto start = cachedTimeAsDuration();\n    // Sanity check: ensure that cache time is in use.\n    EXPECT_EQ(start, cachedTimeAsDuration());\n\n    while (cachedTimeAsDuration() - start < requested_advance_) {\n      absl::SleepFor(absl::Milliseconds(1));\n      event_base_update_cache_time(&libevent_base_);\n    }\n    requested_advance_ = absl::ZeroDuration();\n  }\n\n  TestScopedRuntime scoped_runtime_;\n  absl::Duration requested_advance_ = absl::ZeroDuration();\n  std::vector<std::function<void()>> check_callbacks_;\n  bool in_event_loop_{};\n};\n\nINSTANTIATE_TEST_SUITE_P(DelayActivation, TimerImplTest, testing::Bool());\n\nTEST_P(TimerImplTest, TimerEnabledDisabled) {\n  InSequence s;\n\n  Event::TimerPtr timer = dispatcher_->createTimer([] {});\n  EXPECT_FALSE(timer->enabled());\n  timer->enableTimer(std::chrono::milliseconds(0));\n  EXPECT_TRUE(timer->enabled());\n  EXPECT_CALL(prepare_watcher_, ready());\n  dispatcher_->run(Dispatcher::RunType::NonBlock);\n  EXPECT_FALSE(timer->enabled());\n  timer->enableHRTimer(std::chrono::milliseconds(0));\n  EXPECT_TRUE(timer->enabled());\n  EXPECT_CALL(prepare_watcher_, ready());\n  dispatcher_->run(Dispatcher::RunType::NonBlock);\n  EXPECT_FALSE(timer->enabled());\n}\n\nTEST_P(TimerImplTest, ChangeTimerBackwardsBeforeRun) {\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  ReadyWatcher watcher3;\n  Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); });\n\n  // Expect watcher3 to trigger first because the deadlines for timers 1 and 2 was moved backwards.\n  InSequence s;\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher3, ready());\n  EXPECT_CALL(watcher2, ready());\n  EXPECT_CALL(watcher1, ready());\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::milliseconds(0));\n    timer2->enableTimer(std::chrono::milliseconds(1));\n    timer3->enableTimer(std::chrono::milliseconds(2));\n    timer2->enableTimer(std::chrono::milliseconds(3));\n    timer1->enableTimer(std::chrono::milliseconds(4));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n  });\n}\n\nTEST_P(TimerImplTest, ChangeTimerForwardsToZeroBeforeRun) {\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  if (activateTimersNextEventLoop()) {\n    // Expect watcher1 to trigger first because timer1's deadline was moved forward.\n    InSequence s;\n    EXPECT_CALL(prepare_watcher_, ready());\n    EXPECT_CALL(watcher1, ready());\n    EXPECT_CALL(watcher2, ready());\n  } else {\n    // Timers execute in the wrong order.\n    InSequence s;\n    EXPECT_CALL(prepare_watcher_, ready());\n    EXPECT_CALL(watcher2, ready());\n    EXPECT_CALL(watcher1, ready());\n  }\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::milliseconds(2));\n    timer2->enableTimer(std::chrono::milliseconds(1));\n    timer1->enableTimer(std::chrono::milliseconds(0));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n  });\n}\n\nTEST_P(TimerImplTest, ChangeTimerForwardsToNonZeroBeforeRun) {\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  // Expect watcher1 to trigger first because timer1's deadline was moved forward.\n  InSequence s;\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher1, ready());\n  EXPECT_CALL(watcher2, ready());\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::milliseconds(3));\n    timer2->enableTimer(std::chrono::milliseconds(2));\n    timer1->enableTimer(std::chrono::milliseconds(1));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n  });\n}\n\nTEST_P(TimerImplTest, ChangeLargeTimerForwardToZeroBeforeRun) {\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  // Expect watcher1 to trigger because timer1's deadline was moved forward.\n  InSequence s;\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher1, ready());\n  EXPECT_CALL(prepare_watcher_, ready());\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::seconds(2000));\n    timer2->enableTimer(std::chrono::seconds(1000));\n    timer1->enableTimer(std::chrono::seconds(0));\n  });\n}\n\nTEST_P(TimerImplTest, ChangeLargeTimerForwardToNonZeroBeforeRun) {\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  // Expect watcher1 to trigger because timer1's deadline was moved forward.\n  InSequence s;\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher1, ready());\n  EXPECT_CALL(prepare_watcher_, ready());\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::seconds(2000));\n    timer2->enableTimer(std::chrono::seconds(1000));\n    timer1->enableTimer(std::chrono::milliseconds(1));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n  });\n}\n\n// Timers scheduled at different times execute in order.\nTEST_P(TimerImplTest, TimerOrdering) {\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  ReadyWatcher watcher3;\n  Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); });\n\n  // Expect watcher calls to happen in order since timers have different times.\n  InSequence s;\n  if (activateTimersNextEventLoop()) {\n    EXPECT_CALL(prepare_watcher_, ready());\n    EXPECT_CALL(watcher1, ready());\n    EXPECT_CALL(watcher2, ready());\n    EXPECT_CALL(watcher3, ready());\n  } else {\n    EXPECT_CALL(prepare_watcher_, ready());\n    EXPECT_CALL(watcher1, ready());\n    EXPECT_CALL(watcher2, ready());\n    EXPECT_CALL(watcher3, ready());\n  }\n\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::milliseconds(0));\n    timer2->enableTimer(std::chrono::milliseconds(1));\n    timer3->enableTimer(std::chrono::milliseconds(2));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n\n    EXPECT_TRUE(timer1->enabled());\n    EXPECT_TRUE(timer2->enabled());\n    EXPECT_TRUE(timer3->enabled());\n  });\n}\n\n// Alarms that are scheduled to execute and are cancelled do not trigger.\nTEST_P(TimerImplTest, TimerOrderAndDisableAlarm) {\n  ReadyWatcher watcher3;\n  Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] {\n    timer2->disableTimer();\n    watcher1.ready();\n  });\n\n  // Expect watcher calls to happen in order since timers have different times.\n  InSequence s;\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher1, ready());\n  EXPECT_CALL(watcher3, ready());\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::milliseconds(0));\n    timer2->enableTimer(std::chrono::milliseconds(1));\n    timer3->enableTimer(std::chrono::milliseconds(2));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n\n    EXPECT_TRUE(timer1->enabled());\n    EXPECT_TRUE(timer2->enabled());\n    EXPECT_TRUE(timer3->enabled());\n  });\n}\n\n// Change the registration time for a timer that is already activated by disabling and re-enabling\n// the timer. Verify that execution is delayed.\nTEST_P(TimerImplTest, TimerOrderDisableAndReschedule) {\n  ReadyWatcher watcher4;\n  Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); });\n\n  ReadyWatcher watcher3;\n  Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] {\n    timer2->disableTimer();\n    timer2->enableTimer(std::chrono::milliseconds(0));\n    timer3->disableTimer();\n    timer3->enableTimer(std::chrono::milliseconds(1));\n    watcher1.ready();\n  });\n\n  // timer1 is expected to run first and reschedule timers 2 and 3. timer4 should fire before\n  // timer2 and timer3 since timer4's registration is unaffected.\n  InSequence s;\n  if (activateTimersNextEventLoop()) {\n    EXPECT_CALL(prepare_watcher_, ready());\n    EXPECT_CALL(watcher1, ready());\n    EXPECT_CALL(watcher4, ready());\n    // Sleep during prepare to ensure that enough time has elapsed before timer evaluation to ensure\n    // that timers 2 and 3 are picked up by the same loop iteration. Without the sleep the two\n    // timers could execute in different loop iterations.\n    EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() {\n      advanceLibeventTimeNextIteration(absl::Milliseconds(10));\n    }));\n    EXPECT_CALL(watcher2, ready());\n    EXPECT_CALL(watcher3, ready());\n  } else {\n    EXPECT_CALL(prepare_watcher_, ready());\n    EXPECT_CALL(watcher1, ready());\n    EXPECT_CALL(watcher4, ready());\n    EXPECT_CALL(watcher2, ready());\n    // Sleep in prepare cb to avoid flakiness if epoll_wait returns before the timer timeout.\n    EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() {\n      advanceLibeventTimeNextIteration(absl::Milliseconds(10));\n    }));\n    EXPECT_CALL(watcher3, ready());\n  }\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::milliseconds(0));\n    timer2->enableTimer(std::chrono::milliseconds(1));\n    timer3->enableTimer(std::chrono::milliseconds(2));\n    timer4->enableTimer(std::chrono::milliseconds(3));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n\n    EXPECT_TRUE(timer1->enabled());\n    EXPECT_TRUE(timer2->enabled());\n    EXPECT_TRUE(timer3->enabled());\n    EXPECT_TRUE(timer4->enabled());\n  });\n}\n\n// Change the registration time for a timer that is already activated by re-enabling the timer\n// without calling disableTimer first.\nTEST_P(TimerImplTest, TimerOrderAndReschedule) {\n  ReadyWatcher watcher4;\n  Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); });\n\n  ReadyWatcher watcher3;\n  Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); });\n\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] {\n    timer2->enableTimer(std::chrono::milliseconds(0));\n    timer3->enableTimer(std::chrono::milliseconds(1));\n    watcher1.ready();\n  });\n\n  // Rescheduling timers that are already scheduled to run in the current event loop iteration has\n  // no effect if the time delta is 0. Expect timers 1, 2 and 4 to execute in the original order.\n  // Timer 3 is delayed since it is rescheduled with a non-zero delta.\n  InSequence s;\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher1, ready());\n  if (activateTimersNextEventLoop()) {\n    EXPECT_CALL(watcher4, ready());\n    // Sleep during prepare to ensure that enough time has elapsed before timer evaluation to ensure\n    // that timers 2 and 3 are picked up by the same loop iteration. Without the sleep the two\n    // timers could execute in different loop iterations.\n    EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() {\n      advanceLibeventTimeNextIteration(absl::Milliseconds(10));\n    }));\n    EXPECT_CALL(watcher2, ready());\n    EXPECT_CALL(watcher3, ready());\n  } else {\n    EXPECT_CALL(watcher2, ready());\n    EXPECT_CALL(watcher4, ready());\n    // Sleep in prepare cb to avoid flakiness if epoll_wait returns before the timer timeout.\n    EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() {\n      advanceLibeventTimeNextIteration(absl::Milliseconds(10));\n    }));\n    EXPECT_CALL(watcher3, ready());\n  }\n  runInEventLoop([&]() {\n    timer1->enableTimer(std::chrono::milliseconds(0));\n    timer2->enableTimer(std::chrono::milliseconds(1));\n    timer3->enableTimer(std::chrono::milliseconds(2));\n    timer4->enableTimer(std::chrono::milliseconds(3));\n\n    // Advance time by 10ms so timers above all trigger in the same loop iteration.\n    advanceLibeventTime(absl::Milliseconds(10));\n\n    EXPECT_TRUE(timer1->enabled());\n    EXPECT_TRUE(timer2->enabled());\n    EXPECT_TRUE(timer3->enabled());\n    EXPECT_TRUE(timer4->enabled());\n  });\n}\n\nTEST_P(TimerImplTest, TimerChaining) {\n  ReadyWatcher watcher1;\n  Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); });\n\n  ReadyWatcher watcher2;\n  Event::TimerPtr timer2 = dispatcher_->createTimer([&] {\n    watcher2.ready();\n    timer1->enableTimer(std::chrono::milliseconds(0));\n  });\n\n  ReadyWatcher watcher3;\n  Event::TimerPtr timer3 = dispatcher_->createTimer([&] {\n    watcher3.ready();\n    timer2->enableTimer(std::chrono::milliseconds(0));\n  });\n\n  ReadyWatcher watcher4;\n  Event::TimerPtr timer4 = dispatcher_->createTimer([&] {\n    watcher4.ready();\n    timer3->enableTimer(std::chrono::milliseconds(0));\n  });\n\n  timer4->enableTimer(std::chrono::milliseconds(0));\n\n  EXPECT_FALSE(timer1->enabled());\n  EXPECT_FALSE(timer2->enabled());\n  EXPECT_FALSE(timer3->enabled());\n  EXPECT_TRUE(timer4->enabled());\n  InSequence s;\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher4, ready());\n  if (activateTimersNextEventLoop()) {\n    EXPECT_CALL(prepare_watcher_, ready());\n  }\n  EXPECT_CALL(watcher3, ready());\n  if (activateTimersNextEventLoop()) {\n    EXPECT_CALL(prepare_watcher_, ready());\n  }\n  EXPECT_CALL(watcher2, ready());\n  if (activateTimersNextEventLoop()) {\n    EXPECT_CALL(prepare_watcher_, ready());\n  }\n  EXPECT_CALL(watcher1, ready());\n  dispatcher_->run(Dispatcher::RunType::NonBlock);\n\n  EXPECT_FALSE(timer1->enabled());\n  EXPECT_FALSE(timer2->enabled());\n  EXPECT_FALSE(timer3->enabled());\n  EXPECT_FALSE(timer4->enabled());\n}\n\nTEST_P(TimerImplTest, TimerChainDisable) {\n  ReadyWatcher watcher;\n  Event::TimerPtr timer1;\n  Event::TimerPtr timer2;\n  Event::TimerPtr timer3;\n\n  auto timer_cb = [&] {\n    watcher.ready();\n    timer1->disableTimer();\n    timer2->disableTimer();\n    timer3->disableTimer();\n  };\n\n  timer1 = dispatcher_->createTimer(timer_cb);\n  timer2 = dispatcher_->createTimer(timer_cb);\n  timer3 = dispatcher_->createTimer(timer_cb);\n\n  timer3->enableTimer(std::chrono::milliseconds(0));\n  timer2->enableTimer(std::chrono::milliseconds(0));\n  timer1->enableTimer(std::chrono::milliseconds(0));\n\n  EXPECT_TRUE(timer1->enabled());\n  EXPECT_TRUE(timer2->enabled());\n  EXPECT_TRUE(timer3->enabled());\n  InSequence s;\n  // Only 1 call to watcher ready since the other 2 timers were disabled by the first timer.\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher, ready());\n  dispatcher_->run(Dispatcher::RunType::NonBlock);\n}\n\nTEST_P(TimerImplTest, TimerChainDelete) {\n  ReadyWatcher watcher;\n  Event::TimerPtr timer1;\n  Event::TimerPtr timer2;\n  Event::TimerPtr timer3;\n\n  auto timer_cb = [&] {\n    watcher.ready();\n    timer1.reset();\n    timer2.reset();\n    timer3.reset();\n  };\n\n  timer1 = dispatcher_->createTimer(timer_cb);\n  timer2 = dispatcher_->createTimer(timer_cb);\n  timer3 = dispatcher_->createTimer(timer_cb);\n\n  timer3->enableTimer(std::chrono::milliseconds(0));\n  timer2->enableTimer(std::chrono::milliseconds(0));\n  timer1->enableTimer(std::chrono::milliseconds(0));\n\n  EXPECT_TRUE(timer1->enabled());\n  EXPECT_TRUE(timer2->enabled());\n  EXPECT_TRUE(timer3->enabled());\n  InSequence s;\n  // Only 1 call to watcher ready since the other 2 timers were deleted by the first timer.\n  EXPECT_CALL(prepare_watcher_, ready());\n  EXPECT_CALL(watcher, ready());\n  dispatcher_->run(Dispatcher::RunType::NonBlock);\n}\n\nclass TimerImplTimingTest : public testing::Test {\npublic:\n  std::chrono::nanoseconds getTimerTiming(Event::SimulatedTimeSystem& time_system,\n                                          Dispatcher& dispatcher, Event::Timer& timer) {\n    const auto start = time_system.monotonicTime();\n    EXPECT_TRUE(timer.enabled());\n    dispatcher.run(Dispatcher::RunType::NonBlock);\n    while (timer.enabled()) {\n      time_system.advanceTimeAndRun(std::chrono::microseconds(1), dispatcher,\n                                    Dispatcher::RunType::NonBlock);\n#ifdef WIN32\n      // The event loop runs for a single iteration in NonBlock mode on Windows. A few iterations\n      // are required to ensure that next iteration callbacks have a chance to run before time\n      // advances once again.\n      dispatcher.run(Dispatcher::RunType::NonBlock);\n      dispatcher.run(Dispatcher::RunType::NonBlock);\n#endif\n    }\n    return time_system.monotonicTime() - start;\n  }\n};\n\n// Test the timer with a series of timings and measure they fire accurately\n// using simulated time. enableTimer() should be precise at the millisecond\n// level, whereas enableHRTimer should be precise at the microsecond level.\n// For good measure, also check that '0'/immediate does what it says on the tin.\nTEST_F(TimerImplTimingTest, TheoreticalTimerTiming) {\n  Event::SimulatedTimeSystem time_system;\n  Api::ApiPtr api = Api::createApiForTest(time_system);\n  DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  Event::TimerPtr timer = dispatcher->createTimer([&dispatcher] { dispatcher->exit(); });\n\n  const uint64_t timings[] = {0, 10, 50, 1234};\n  for (const uint64_t timing : timings) {\n    std::chrono::milliseconds ms(timing);\n    timer->enableTimer(ms);\n    EXPECT_EQ(std::chrono::duration_cast<std::chrono::milliseconds>(\n                  getTimerTiming(time_system, *dispatcher, *timer))\n                  .count(),\n              timing);\n\n    std::chrono::microseconds us(timing);\n    timer->enableHRTimer(us);\n    EXPECT_EQ(std::chrono::duration_cast<std::chrono::microseconds>(\n                  getTimerTiming(time_system, *dispatcher, *timer))\n                  .count(),\n              timing);\n  }\n}\n\nclass TimerUtilsTest : public testing::Test {\npublic:\n  template <typename Duration>\n  void checkConversion(const Duration& duration, const uint64_t expected_secs,\n                       const uint64_t expected_usecs) {\n    timeval tv;\n    TimerUtils::durationToTimeval(duration, tv);\n    EXPECT_EQ(tv.tv_sec, expected_secs);\n    EXPECT_EQ(tv.tv_usec, expected_usecs);\n  }\n};\n\nTEST_F(TimerUtilsTest, TimerNegativeValueThrows) {\n  timeval tv;\n  const int negative_sample = -1;\n  EXPECT_THROW_WITH_MESSAGE(\n      TimerUtils::durationToTimeval(std::chrono::seconds(negative_sample), tv), EnvoyException,\n      fmt::format(\"Negative duration passed to durationToTimeval(): {}\", negative_sample));\n}\n\nTEST_F(TimerUtilsTest, TimerValueConversion) {\n  // Check input is bounded.\n  checkConversion(std::chrono::nanoseconds::duration::max(), INT32_MAX, 0);\n  checkConversion(std::chrono::microseconds::duration::max(), INT32_MAX, 0);\n  checkConversion(std::chrono::milliseconds::duration::max(), INT32_MAX, 0);\n  checkConversion(std::chrono::seconds::duration::max(), INT32_MAX, 0);\n\n  // Test the clipping boundary\n  checkConversion(std::chrono::seconds(INT32_MAX) - std::chrono::seconds(1), INT32_MAX - 1, 0);\n  checkConversion(std::chrono::seconds(INT32_MAX) - std::chrono::nanoseconds(1), INT32_MAX - 1,\n                  999999);\n\n  // Basic test with zero milliseconds.\n  checkConversion(std::chrono::milliseconds(0), 0, 0);\n\n  // 2050 milliseconds is 2 seconds and 50000 microseconds.\n  checkConversion(std::chrono::milliseconds(2050), 2, 50000);\n\n  // Some arbitrary tests for good measure.\n  checkConversion(std::chrono::microseconds(233), 0, 233);\n\n  // Some arbitrary tests for good measure.\n  checkConversion(std::chrono::milliseconds(600014), 600, 14000);\n}\n\n} // namespace\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/event/file_event_impl_test.cc",
    "content": "#include <cstdint>\n\n#include \"envoy/event/file_event.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Event {\nnamespace {\n\nclass FileEventImplTest : public testing::Test {\npublic:\n  FileEventImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        os_sys_calls_(Api::OsSysCallsSingleton::get()) {}\n\n  void SetUp() override {\n#ifdef WIN32\n    ASSERT_EQ(0, os_sys_calls_.socketpair(AF_INET, SOCK_STREAM, 0, fds_).rc_);\n#else\n    ASSERT_EQ(0, os_sys_calls_.socketpair(AF_UNIX, SOCK_DGRAM, 0, fds_).rc_);\n#endif\n    int data = 1;\n\n    const Api::SysCallSizeResult result = os_sys_calls_.write(fds_[1], &data, sizeof(data));\n    ASSERT_EQ(sizeof(data), static_cast<size_t>(result.rc_));\n  }\n\n  void TearDown() override {\n    os_sys_calls_.close(fds_[0]);\n    os_sys_calls_.close(fds_[1]);\n  }\n\nprotected:\n  os_fd_t fds_[2];\n  Api::ApiPtr api_;\n  DispatcherPtr dispatcher_;\n  Api::OsSysCalls& os_sys_calls_;\n};\n\nclass FileEventImplActivateTest\n    : public testing::TestWithParam<std::tuple<Network::Address::IpVersion, bool>> {\npublic:\n  FileEventImplActivateTest() : os_sys_calls_(Api::OsSysCallsSingleton::get()) {\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.activate_fds_next_event_loop\",\n          activateFdsNextEventLoop() ? \"true\" : \"false\"}});\n  }\n\n  static void onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) {\n    // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new.\n    auto watcher = static_cast<ReadyWatcher*>(arg);\n    watcher->ready();\n  }\n\n  int domain() {\n    return std::get<0>(GetParam()) == Network::Address::IpVersion::v4 ? AF_INET : AF_INET6;\n  }\n  bool activateFdsNextEventLoop() { return std::get<1>(GetParam()); }\n\nprotected:\n  Api::OsSysCalls& os_sys_calls_;\n  TestScopedRuntime scoped_runtime_;\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    IpVersions, FileEventImplActivateTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool()));\n\nTEST_P(FileEventImplActivateTest, Activate) {\n  os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_;\n  ASSERT_TRUE(SOCKET_VALID(fd));\n\n  Api::ApiPtr api = Api::createApiForTest();\n  DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  ReadyWatcher read_event;\n  EXPECT_CALL(read_event, ready()).Times(1);\n  ReadyWatcher write_event;\n  EXPECT_CALL(write_event, ready()).Times(1);\n  ReadyWatcher closed_event;\n  EXPECT_CALL(closed_event, ready()).Times(1);\n\n  const FileTriggerType trigger = Event::PlatformDefaultTriggerType;\n\n  Event::FileEventPtr file_event = dispatcher->createFileEvent(\n      fd,\n      [&](uint32_t events) -> void {\n        if (events & FileReadyType::Read) {\n          read_event.ready();\n        }\n\n        if (events & FileReadyType::Write) {\n          write_event.ready();\n        }\n\n        if (events & FileReadyType::Closed) {\n          closed_event.ready();\n        }\n      },\n      trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed);\n\n  file_event->activate(FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed);\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n\n  os_sys_calls_.close(fd);\n}\n\nTEST_P(FileEventImplActivateTest, ActivateChaining) {\n  os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_;\n  ASSERT_TRUE(SOCKET_VALID(fd));\n\n  Api::ApiPtr api = Api::createApiForTest();\n  DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  ReadyWatcher fd_event;\n  ReadyWatcher read_event;\n  ReadyWatcher write_event;\n  ReadyWatcher closed_event;\n\n  ReadyWatcher prepare_watcher;\n  evwatch_prepare_new(&static_cast<DispatcherImpl*>(dispatcher.get())->base(), onWatcherReady,\n                      &prepare_watcher);\n\n  const FileTriggerType trigger = Event::PlatformDefaultTriggerType;\n\n  Event::FileEventPtr file_event = dispatcher->createFileEvent(\n      fd,\n      [&](uint32_t events) -> void {\n        fd_event.ready();\n        if (events & FileReadyType::Read) {\n          read_event.ready();\n          file_event->activate(FileReadyType::Write);\n          file_event->activate(FileReadyType::Closed);\n        }\n\n        if (events & FileReadyType::Write) {\n          write_event.ready();\n          file_event->activate(FileReadyType::Closed);\n        }\n\n        if (events & FileReadyType::Closed) {\n          closed_event.ready();\n        }\n      },\n      trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed);\n\n  testing::InSequence s;\n  // First loop iteration: handle scheduled read event and the real write event produced by poll.\n  // Note that the real and injected events are combined and delivered in a single call to the fd\n  // callback.\n  EXPECT_CALL(prepare_watcher, ready());\n  EXPECT_CALL(fd_event, ready());\n  EXPECT_CALL(read_event, ready());\n  EXPECT_CALL(write_event, ready());\n  if (activateFdsNextEventLoop()) {\n    // Second loop iteration: handle write and close events scheduled while handling read.\n    EXPECT_CALL(prepare_watcher, ready());\n    EXPECT_CALL(fd_event, ready());\n    EXPECT_CALL(write_event, ready());\n    EXPECT_CALL(closed_event, ready());\n    // Third loop iteration: handle close event scheduled while handling write.\n    EXPECT_CALL(prepare_watcher, ready());\n    EXPECT_CALL(fd_event, ready());\n    EXPECT_CALL(closed_event, ready());\n    // Fourth loop iteration: poll returned no new real events.\n    EXPECT_CALL(prepare_watcher, ready());\n  } else {\n    // Same loop iteration activation: handle write and close events scheduled while handling read.\n    EXPECT_CALL(fd_event, ready());\n    EXPECT_CALL(write_event, ready());\n    EXPECT_CALL(closed_event, ready());\n    // Second same loop iteration activation: handle close event scheduled while handling write.\n    EXPECT_CALL(fd_event, ready());\n    EXPECT_CALL(closed_event, ready());\n    // Second loop iteration: poll returned no new real events.\n    EXPECT_CALL(prepare_watcher, ready());\n  }\n\n  file_event->activate(FileReadyType::Read);\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n\n  os_sys_calls_.close(fd);\n}\n\nTEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) {\n  os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_;\n  ASSERT_TRUE(SOCKET_VALID(fd));\n\n  Api::ApiPtr api = Api::createApiForTest();\n  DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  ReadyWatcher fd_event;\n  ReadyWatcher read_event;\n  ReadyWatcher write_event;\n  ReadyWatcher closed_event;\n\n  ReadyWatcher prepare_watcher;\n  evwatch_prepare_new(&static_cast<DispatcherImpl*>(dispatcher.get())->base(), onWatcherReady,\n                      &prepare_watcher);\n\n  const FileTriggerType trigger = Event::PlatformDefaultTriggerType;\n\n  Event::FileEventPtr file_event = dispatcher->createFileEvent(\n      fd,\n      [&](uint32_t events) -> void {\n        fd_event.ready();\n        if (events & FileReadyType::Read) {\n          read_event.ready();\n          file_event->activate(FileReadyType::Closed);\n          file_event->setEnabled(FileReadyType::Write | FileReadyType::Closed);\n        }\n\n        if (events & FileReadyType::Write) {\n          write_event.ready();\n        }\n\n        if (events & FileReadyType::Closed) {\n          closed_event.ready();\n        }\n      },\n      trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed);\n\n  testing::InSequence s;\n  // First loop iteration: handle scheduled read event and the real write event produced by poll.\n  // Note that the real and injected events are combined and delivered in a single call to the fd\n  // callback.\n  EXPECT_CALL(prepare_watcher, ready());\n  EXPECT_CALL(fd_event, ready());\n  EXPECT_CALL(read_event, ready());\n  EXPECT_CALL(write_event, ready());\n  // Second loop iteration: handle real write event after resetting event mask via setEnabled. Close\n  // injected event is discarded by the setEnable call.\n  EXPECT_CALL(prepare_watcher, ready());\n  EXPECT_CALL(fd_event, ready());\n  EXPECT_CALL(write_event, ready());\n  // Third loop iteration: poll returned no new real events.\n  EXPECT_CALL(prepare_watcher, ready());\n\n  file_event->activate(FileReadyType::Read);\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n\n  os_sys_calls_.close(fd);\n}\n\n#ifndef WIN32 // Libevent on Windows doesn't support edge trigger.\nTEST_F(FileEventImplTest, EdgeTrigger) {\n  ReadyWatcher read_event;\n  EXPECT_CALL(read_event, ready()).Times(1);\n  ReadyWatcher write_event;\n  EXPECT_CALL(write_event, ready()).Times(1);\n\n  Event::FileEventPtr file_event = dispatcher_->createFileEvent(\n      fds_[0],\n      [&](uint32_t events) -> void {\n        if (events & FileReadyType::Read) {\n          read_event.ready();\n        }\n\n        if (events & FileReadyType::Write) {\n          write_event.ready();\n        }\n      },\n      FileTriggerType::Edge, FileReadyType::Read | FileReadyType::Write);\n\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n}\n#endif\n\nTEST_F(FileEventImplTest, LevelTrigger) {\n  ReadyWatcher read_event;\n  EXPECT_CALL(read_event, ready()).Times(2);\n  ReadyWatcher write_event;\n  EXPECT_CALL(write_event, ready()).Times(2);\n\n  int count = 2;\n  Event::FileEventPtr file_event = dispatcher_->createFileEvent(\n      fds_[0],\n      [&](uint32_t events) -> void {\n        if (count-- == 0) {\n          dispatcher_->exit();\n          return;\n        }\n        if (events & FileReadyType::Read) {\n          read_event.ready();\n        }\n\n        if (events & FileReadyType::Write) {\n          write_event.ready();\n        }\n      },\n      FileTriggerType::Level, FileReadyType::Read | FileReadyType::Write);\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_F(FileEventImplTest, SetEnabled) {\n  ReadyWatcher read_event;\n  EXPECT_CALL(read_event, ready()).Times(2);\n  ReadyWatcher write_event;\n  EXPECT_CALL(write_event, ready()).Times(2);\n\n  const FileTriggerType trigger = Event::PlatformDefaultTriggerType;\n\n  Event::FileEventPtr file_event = dispatcher_->createFileEvent(\n      fds_[0],\n      [&](uint32_t events) -> void {\n        if (events & FileReadyType::Read) {\n          read_event.ready();\n        }\n\n        if (events & FileReadyType::Write) {\n          write_event.ready();\n        }\n      },\n      trigger, FileReadyType::Read | FileReadyType::Write);\n\n  file_event->setEnabled(FileReadyType::Read);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  file_event->setEnabled(FileReadyType::Write);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  file_event->setEnabled(0);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  file_event->setEnabled(FileReadyType::Read | FileReadyType::Write);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n}\n\n} // namespace\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/event/scaled_range_timer_manager_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/event/timer.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/event/scaled_range_timer_manager.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/wrapped_dispatcher.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Event {\nnamespace {\n\nusing testing::ElementsAre;\nusing testing::InSequence;\nusing testing::MockFunction;\n\nclass ScopeTrackingDispatcher : public WrappedDispatcher {\npublic:\n  ScopeTrackingDispatcher(DispatcherPtr dispatcher)\n      : WrappedDispatcher(*dispatcher), dispatcher_(std::move(dispatcher)) {}\n\n  const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) override {\n    scope_ = object;\n    return impl_.setTrackedObject(object);\n  }\n\n  const ScopeTrackedObject* scope_{nullptr};\n\n  Dispatcher* impl() const { return dispatcher_.get(); }\n\nprivate:\n  DispatcherPtr dispatcher_;\n};\n\nclass ScaledRangeTimerManagerTest : public testing::Test, public TestUsingSimulatedTime {\npublic:\n  ScaledRangeTimerManagerTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  Api::ApiPtr api_;\n  ScopeTrackingDispatcher dispatcher_;\n};\n\nstruct TrackedTimer {\n  explicit TrackedTimer(ScaledRangeTimerManager& manager, TimeSystem& time_system)\n      : timer(manager.createTimer([trigger_times = trigger_times.get(), &time_system] {\n          trigger_times->push_back(time_system.monotonicTime());\n        })) {}\n  std::unique_ptr<std::vector<MonotonicTime>> trigger_times{\n      std::make_unique<std::vector<MonotonicTime>>()};\n  RangeTimerPtr timer;\n};\n\nTEST_F(ScaledRangeTimerManagerTest, CreateAndDestroy) {\n  ScaledRangeTimerManager manager(dispatcher_);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, CreateAndDestroyTimer) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  {\n    MockFunction<TimerCb> callback;\n    auto timer = manager.createTimer(callback.AsStdFunction());\n  }\n}\n\nTEST_F(ScaledRangeTimerManagerTest, CreateSingleScaledTimer) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n\n  timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10));\n  EXPECT_TRUE(timer->enabled());\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n  EXPECT_TRUE(timer->enabled());\n\n  EXPECT_CALL(callback, Call());\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n  EXPECT_FALSE(timer->enabled());\n}\n\nTEST_F(ScaledRangeTimerManagerTest, EnableAndDisableTimer) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n\n  timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(30));\n  EXPECT_TRUE(timer->enabled());\n\n  timer->disableTimer();\n  EXPECT_FALSE(timer->enabled());\n\n  // Provide some additional guarantee of safety by running the dispatcher for a little bit. This\n  // should be a no-op, and if not (because a timer was fired), that's a problem that will be caught\n  // by the strict mock callback.\n  simTime().advanceTimeAndRun(std::chrono::seconds(10), dispatcher_, Dispatcher::RunType::Block);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, DisableWhileDisabled) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n\n  EXPECT_FALSE(timer->enabled());\n  timer->disableTimer();\n\n  EXPECT_FALSE(timer->enabled());\n}\n\nTEST_F(ScaledRangeTimerManagerTest, DisableWhileWaitingForMin) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n  timer->enableTimer(std::chrono::seconds(10), std::chrono::seconds(100));\n  EXPECT_TRUE(timer->enabled());\n\n  timer->disableTimer();\n  EXPECT_FALSE(timer->enabled());\n}\n\nTEST_F(ScaledRangeTimerManagerTest, DisableWhileScalingMax) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n\n  timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(100));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n\n  EXPECT_TRUE(timer->enabled());\n\n  timer->disableTimer();\n  EXPECT_FALSE(timer->enabled());\n\n  // Run the dispatcher to make sure nothing happens when it's not supposed to.\n  simTime().advanceTimeAndRun(std::chrono::seconds(100), dispatcher_, Dispatcher::RunType::Block);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, DisableWithZeroMinTime) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n\n  timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(100));\n\n  EXPECT_TRUE(timer->enabled());\n\n  timer->disableTimer();\n  EXPECT_FALSE(timer->enabled());\n\n  // Run the dispatcher to make sure nothing happens when it's not supposed to.\n  simTime().advanceTimeAndRun(std::chrono::seconds(100), dispatcher_, Dispatcher::RunType::Block);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, TriggerWithZeroMinTime) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n\n  timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(10));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(9), dispatcher_, Dispatcher::RunType::Block);\n  EXPECT_CALL(callback, Call);\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, DisableFrontScalingMaxTimer) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback1, callback2;\n  auto timer1 = manager.createTimer(callback1.AsStdFunction());\n  auto timer2 = manager.createTimer(callback2.AsStdFunction());\n\n  // These timers have the same max-min.\n  timer1->enableTimer(std::chrono::seconds(5), std::chrono::seconds(30));\n  timer2->enableTimer(std::chrono::seconds(10), std::chrono::seconds(35));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n\n  timer1->disableTimer();\n  EXPECT_FALSE(timer1->enabled());\n  ASSERT_TRUE(timer2->enabled());\n\n  // Check that timer2 doesn't trigger when timer1 was originally going to, at start+30.\n  simTime().advanceTimeAndRun(std::chrono::seconds(20), dispatcher_, Dispatcher::RunType::Block);\n\n  // Advancing to timer2's max should trigger it.\n  EXPECT_CALL(callback2, Call);\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, DisableLaterScalingMaxTimer) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback1, callback2;\n  auto timer1 = manager.createTimer(callback1.AsStdFunction());\n  auto timer2 = manager.createTimer(callback2.AsStdFunction());\n\n  // These timers have the same max-min.\n  timer1->enableTimer(std::chrono::seconds(5), std::chrono::seconds(30));\n  timer2->enableTimer(std::chrono::seconds(10), std::chrono::seconds(35));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n\n  timer2->disableTimer();\n  EXPECT_FALSE(timer2->enabled());\n  ASSERT_TRUE(timer1->enabled());\n\n  // After the original windows for both timers have long expired, only the enabled one should fire.\n  EXPECT_CALL(callback1, Call);\n  simTime().advanceTimeAndRun(std::chrono::seconds(100), dispatcher_, Dispatcher::RunType::Block);\n}\n\nclass ScaledRangeTimerManagerTestWithScope : public ScaledRangeTimerManagerTest,\n                                             public testing::WithParamInterface<bool> {\npublic:\n  ScopeTrackedObject* getScope() { return GetParam() ? &scope_ : nullptr; }\n  MockScopedTrackedObject scope_;\n};\n\nTEST_P(ScaledRangeTimerManagerTestWithScope, ReRegisterOnCallback) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n\n  EXPECT_EQ(dispatcher_.scope_, nullptr);\n  {\n    InSequence s;\n    EXPECT_CALL(callback, Call).WillOnce([&] {\n      EXPECT_EQ(dispatcher_.scope_, getScope());\n      timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2), getScope());\n    });\n    EXPECT_CALL(callback, Call).WillOnce([&] { EXPECT_EQ(dispatcher_.scope_, getScope()); });\n  }\n\n  timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2), getScope());\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  EXPECT_EQ(dispatcher_.scope_, nullptr);\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n  EXPECT_EQ(dispatcher_.scope_, nullptr);\n\n  EXPECT_TRUE(timer->enabled());\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  EXPECT_FALSE(timer->enabled());\n};\n\nTEST_P(ScaledRangeTimerManagerTestWithScope, ScheduleWithScalingFactorZero) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n  manager.setScaleFactor(0);\n\n  EXPECT_CALL(callback, Call).WillOnce([&] { EXPECT_EQ(dispatcher_.scope_, getScope()); });\n\n  timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(1), getScope());\n  simTime().advanceTimeAndRun(std::chrono::milliseconds(1), dispatcher_,\n                              Dispatcher::RunType::Block);\n}\n\nINSTANTIATE_TEST_SUITE_P(WithAndWithoutScope, ScaledRangeTimerManagerTestWithScope,\n                         testing::Bool());\n\nTEST_F(ScaledRangeTimerManagerTest, SingleTimerTriggeredNoScaling) {\n  ScaledRangeTimerManager manager(dispatcher_);\n  bool triggered = false;\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n  EXPECT_CALL(callback, Call()).WillOnce([&] { triggered = true; });\n\n  timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(9));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n  EXPECT_FALSE(triggered);\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(4) - std::chrono::milliseconds(1), dispatcher_,\n                              Dispatcher::RunType::Block);\n  EXPECT_FALSE(triggered);\n\n  simTime().advanceTimeAndRun(std::chrono::milliseconds(1), dispatcher_,\n                              Dispatcher::RunType::Block);\n  EXPECT_TRUE(triggered);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, SingleTimerSameMinMax) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback;\n  auto timer = manager.createTimer(callback.AsStdFunction());\n  EXPECT_CALL(callback, Call());\n\n  timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(1));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n  EXPECT_FALSE(timer->enabled());\n}\n\nTEST_F(ScaledRangeTimerManagerTest, MultipleTimersNoScaling) {\n  ScaledRangeTimerManager manager(dispatcher_);\n  std::vector<TrackedTimer> timers;\n  timers.reserve(3);\n\n  const MonotonicTime start = simTime().monotonicTime();\n  for (int i = 0; i < 3; ++i) {\n    timers.emplace_back(manager, simTime());\n  }\n\n  timers[0].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(3));\n  timers[1].timer->enableTimer(std::chrono::seconds(2), std::chrono::seconds(6));\n  timers[2].timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(9));\n\n  for (int i = 0; i < 10; ++i) {\n    simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n  }\n\n  EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(3)));\n  EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(6)));\n  EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(9)));\n}\n\nTEST_F(ScaledRangeTimerManagerTest, MultipleTimersWithScaling) {\n  ScaledRangeTimerManager manager(dispatcher_);\n  std::vector<TrackedTimer> timers;\n  timers.reserve(3);\n\n  for (int i = 0; i < 3; ++i) {\n    timers.emplace_back(manager, simTime());\n  }\n\n  const MonotonicTime start = simTime().monotonicTime();\n\n  timers[0].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(3));\n  timers[1].timer->enableTimer(std::chrono::seconds(2), std::chrono::seconds(6));\n  timers[2].timer->enableTimer(std::chrono::seconds(6), std::chrono::seconds(10));\n\n  manager.setScaleFactor(0.5);\n\n  // Advance time to start = 1 second, so timers[0] hits its min.\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  // Advance time to start = 2, which should make timers[0] hit its scaled max.\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  // At 4x speed, timers[1] will fire in only 1 second.\n  manager.setScaleFactor(0.25);\n\n  // Advance time to start = 3, which should make timers[1] hit its scaled max.\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  // Advance time to start = 6, which is the minimum required for timers[2] to fire.\n  simTime().advanceTimeAndRun(std::chrono::seconds(3), dispatcher_, Dispatcher::RunType::Block);\n\n  manager.setScaleFactor(0);\n  // With a scale factor of 0, timers[2] should be ready to be fired immediately.\n  dispatcher_.run(Dispatcher::RunType::Block);\n\n  EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(2)));\n  EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(3)));\n  EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(6)));\n}\n\nTEST_F(ScaledRangeTimerManagerTest, MultipleTimersSameTimes) {\n  ScaledRangeTimerManager manager(dispatcher_);\n  std::vector<TrackedTimer> timers;\n  timers.reserve(3);\n\n  const MonotonicTime start = simTime().monotonicTime();\n\n  for (int i = 0; i < 3; ++i) {\n    timers.emplace_back(manager, simTime());\n    timers[i].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2));\n  }\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(2)));\n  EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(2)));\n  EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(2)));\n}\n\nTEST_F(ScaledRangeTimerManagerTest, MultipleTimersSameTimesFastClock) {\n  ScaledRangeTimerManager manager(dispatcher_);\n  std::vector<TrackedTimer> timers;\n  timers.reserve(3);\n\n  const MonotonicTime start = simTime().monotonicTime();\n\n  for (int i = 0; i < 3; ++i) {\n    timers.emplace_back(manager, simTime());\n    timers[i].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2));\n  }\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n  // The clock runs fast here before the dispatcher gets to the timer callbacks.\n  simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block);\n\n  EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(3)));\n  EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(3)));\n  EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(3)));\n}\n\nTEST_F(ScaledRangeTimerManagerTest, ScheduledWithScalingFactorZero) {\n  ScaledRangeTimerManager manager(dispatcher_);\n  manager.setScaleFactor(0);\n\n  TrackedTimer timer(manager, simTime());\n\n  // The timer should fire at start = 4 since the scaling factor is 0.\n  const MonotonicTime start = simTime().monotonicTime();\n  timer.timer->enableTimer(std::chrono::seconds(4), std::chrono::seconds(10));\n\n  for (int i = 0; i < 10; ++i) {\n    simTime().advanceTimeAndRun(std::chrono::seconds(4), dispatcher_, Dispatcher::RunType::Block);\n  }\n\n  EXPECT_THAT(*timer.trigger_times, ElementsAre(start + std::chrono::seconds(4)));\n}\n\nTEST_F(ScaledRangeTimerManagerTest, ScheduledWithMaxBeforeMin) {\n  // When max < min, the timer behaves the same as if max == min. This ensures that min is always\n  // respected, and max is respected as much as possible.\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  TrackedTimer timer(manager, simTime());\n\n  const MonotonicTime start = simTime().monotonicTime();\n  timer.timer->enableTimer(std::chrono::seconds(4), std::chrono::seconds(3));\n\n  for (int i = 0; i < 10; ++i) {\n    simTime().advanceTimeAndRun(std::chrono::seconds(4), dispatcher_, Dispatcher::RunType::Block);\n  }\n\n  EXPECT_THAT(*timer.trigger_times, ElementsAre(start + std::chrono::seconds(4)));\n}\n\nTEST_F(ScaledRangeTimerManagerTest, MultipleTimersTriggeredInTheSameEventLoopIteration) {\n  ScaledRangeTimerManager manager(dispatcher_);\n\n  MockFunction<TimerCb> callback1, callback2, callback3;\n  auto timer1 = manager.createTimer(callback1.AsStdFunction());\n  auto timer2 = manager.createTimer(callback2.AsStdFunction());\n  auto timer3 = manager.createTimer(callback3.AsStdFunction());\n\n  timer1->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10));\n  timer2->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10));\n  timer3->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n\n  DispatcherImpl* dispatcher_impl = static_cast<DispatcherImpl*>(dispatcher_.impl());\n  ASSERT(dispatcher_impl != nullptr);\n\n  ReadyWatcher prepare_watcher;\n  evwatch_prepare_new(\n      &dispatcher_impl->base(),\n      +[](evwatch*, const evwatch_prepare_cb_info*, void* arg) {\n        // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new.\n        auto watcher = static_cast<ReadyWatcher*>(arg);\n        watcher->ready();\n      },\n      &prepare_watcher);\n\n  ReadyWatcher schedulable_watcher;\n  SchedulableCallbackPtr schedulable_callback =\n      dispatcher_.createSchedulableCallback([&] { schedulable_watcher.ready(); });\n\n  testing::Expectation first_prepare = EXPECT_CALL(prepare_watcher, ready());\n  testing::ExpectationSet after_first_prepare;\n  after_first_prepare +=\n      EXPECT_CALL(schedulable_watcher, ready()).After(first_prepare).WillOnce([&] {\n        schedulable_callback->scheduleCallbackNextIteration();\n      });\n  after_first_prepare += EXPECT_CALL(callback1, Call).After(first_prepare);\n  after_first_prepare += EXPECT_CALL(callback2, Call).After(first_prepare);\n  after_first_prepare += EXPECT_CALL(callback3, Call).After(first_prepare);\n  testing::Expectation second_prepare =\n      EXPECT_CALL(prepare_watcher, ready()).After(after_first_prepare).WillOnce([&] {\n        schedulable_callback->scheduleCallbackNextIteration();\n      });\n  EXPECT_CALL(schedulable_watcher, ready()).After(second_prepare);\n\n  // Running outside the event loop, this should schedule a run on the next event loop iteration.\n  schedulable_callback->scheduleCallbackNextIteration();\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n  dispatcher_.run(Dispatcher::RunType::Block);\n}\n\nTEST_F(ScaledRangeTimerManagerTest, MultipleTimersWithChangeInScalingFactor) {\n  ScaledRangeTimerManager manager(dispatcher_);\n  const MonotonicTime start = simTime().monotonicTime();\n\n  std::vector<TrackedTimer> timers;\n  timers.reserve(4);\n  for (int i = 0; i < 4; i++) {\n    timers.emplace_back(manager, simTime());\n  }\n\n  timers[0].timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(15));\n  timers[1].timer->enableTimer(std::chrono::seconds(12), std::chrono::seconds(14));\n\n  manager.setScaleFactor(0.1);\n\n  timers[2].timer->enableTimer(std::chrono::seconds(7), std::chrono::seconds(21));\n  timers[3].timer->enableTimer(std::chrono::seconds(10), std::chrono::seconds(16));\n\n  // Advance to timer 0's min.\n  simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block);\n\n  manager.setScaleFactor(0.5);\n\n  // Now that the scale factor is 0.5, fire times are 0: start+10, 1: start+13, 2: start+14, 3:\n  // start+13. Advance to timer 2's min.\n  simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block);\n\n  // Advance to time start+9.\n  simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block);\n\n  manager.setScaleFactor(0.1);\n  // Now that the scale factor is reduced, fire times are 0: start+6, 1: start+12.2,\n  // 2: start+8.4, 3: start+10.6. Timers 0 and 2 should fire immediately since their\n  // trigger times are in the past.\n  dispatcher_.run(Dispatcher::RunType::Block);\n  EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(9)));\n  EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(9)));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n\n  // The time is now start+10. Re-enable timer 0.\n  ASSERT_FALSE(timers[0].timer->enabled());\n  timers[0].timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(13));\n\n  // Fire times are now 0: start+19, 1: start+13, 2: none, 3: start+13.\n  manager.setScaleFactor(0.5);\n\n  // Advance to timer 1's min.\n  simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block);\n\n  // Advance again to start+13, which should trigger both timers 1 and 3.\n  simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block);\n  EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(13)));\n  EXPECT_THAT(*timers[3].trigger_times, ElementsAre(start + std::chrono::seconds(13)));\n\n  simTime().advanceTimeAndRun(std::chrono::seconds(3), dispatcher_, Dispatcher::RunType::Block);\n\n  // The time is now start+16. Setting the scale factor to 0 should make timer 0 fire immediately.\n  manager.setScaleFactor(0);\n  dispatcher_.run(Dispatcher::RunType::Block);\n  EXPECT_THAT(*timers[0].trigger_times,\n              ElementsAre(start + std::chrono::seconds(9), start + std::chrono::seconds(16)));\n}\n\n} // namespace\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/filesystem/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"filesystem_impl_test\",\n    srcs = [\"filesystem_impl_test.cc\"],\n    deps = [\n        \"//source/common/filesystem:filesystem_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"directory_test\",\n    srcs = [\"directory_test.cc\"],\n    deps = [\n        \"//source/common/filesystem:directory_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"watcher_impl_test\",\n    srcs = [\"watcher_impl_test.cc\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/filesystem:watcher_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/filesystem/directory_test.cc",
    "content": "#include <fstream>\n#include <stack>\n#include <string>\n\n#include \"common/filesystem/directory.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass DirectoryTest : public testing::Test {\npublic:\n  DirectoryTest() : dir_path_(TestEnvironment::temporaryPath(\"envoy_test\")) {\n    files_to_remove_.push(dir_path_);\n  }\n\nprotected:\n  void SetUp() override { TestEnvironment::createPath(dir_path_); }\n\n  void TearDown() override {\n    while (!files_to_remove_.empty()) {\n      const std::string& f = files_to_remove_.top();\n      TestEnvironment::removePath(f);\n      files_to_remove_.pop();\n    }\n  }\n\n  void addSubDirs(std::list<std::string> sub_dirs) {\n    for (const std::string& dir_name : sub_dirs) {\n      const std::string full_path = dir_path_ + \"/\" + dir_name;\n      TestEnvironment::createPath(full_path);\n      files_to_remove_.push(full_path);\n    }\n  }\n\n  void addFiles(std::list<std::string> files) {\n    for (const std::string& file_name : files) {\n      const std::string full_path = dir_path_ + \"/\" + file_name;\n      { const std::ofstream file(full_path); }\n      files_to_remove_.push(full_path);\n    }\n  }\n\n  void addSymlinks(std::list<std::pair<std::string, std::string>> symlinks) {\n    for (const auto& link : symlinks) {\n      const std::string target_path = dir_path_ + \"/\" + link.first;\n      const std::string link_path = dir_path_ + \"/\" + link.second;\n      TestEnvironment::createSymlink(target_path, link_path);\n      files_to_remove_.push(link_path);\n    }\n  }\n\n  const std::string dir_path_;\n  std::stack<std::string> files_to_remove_;\n};\n\nstruct EntryHash {\n  std::size_t operator()(DirectoryEntry const& e) const noexcept {\n    return std::hash<std::string>{}(e.name_);\n  }\n};\n\nusing EntrySet = absl::node_hash_set<DirectoryEntry, EntryHash>;\n\nEntrySet getDirectoryContents(const std::string& dir_path, bool recursive) {\n  Directory directory(dir_path);\n  EntrySet ret;\n  for (const DirectoryEntry& entry : directory) {\n    ret.insert(entry);\n    if (entry.type_ == FileType::Directory && entry.name_ != \".\" && entry.name_ != \"..\" &&\n        recursive) {\n      std::string subdir_name = entry.name_;\n      EntrySet subdir = getDirectoryContents(dir_path + \"/\" + subdir_name, recursive);\n      for (const DirectoryEntry& entry : subdir) {\n        ret.insert({subdir_name + \"/\" + entry.name_, entry.type_});\n      }\n    }\n  }\n  return ret;\n}\n\n// Test that we can list a file in a directory\nTEST_F(DirectoryTest, DirectoryWithOneFile) {\n  addFiles({\"file\"});\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n      {\"file\", FileType::Regular},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that we can list a sub directory in a directory\nTEST_F(DirectoryTest, DirectoryWithOneDirectory) {\n  addSubDirs({\"sub_dir\"});\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n      {\"sub_dir\", FileType::Directory},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that we do not recurse into directories when listing files\nTEST_F(DirectoryTest, DirectoryWithFileInSubDirectory) {\n  addSubDirs({\"sub_dir\"});\n  addFiles({\"sub_dir/sub_file\"});\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n      {\"sub_dir\", FileType::Directory},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that when recursively creating DirectoryIterators, they do not interfere with each other\nTEST_F(DirectoryTest, RecursionIntoSubDirectory) {\n  addSubDirs({\"sub_dir\"});\n  addFiles({\"file\", \"sub_dir/sub_file\"});\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n      {\"file\", FileType::Regular},\n      {\"sub_dir\", FileType::Directory},\n      {\"sub_dir/sub_file\", FileType::Regular},\n      {\"sub_dir/.\", FileType::Directory},\n      {\"sub_dir/..\", FileType::Directory},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, true));\n}\n\n// Test that we can list a file and a sub directory in a directory\nTEST_F(DirectoryTest, DirectoryWithFileAndDirectory) {\n  addSubDirs({\"sub_dir\"});\n  addFiles({\"file\"});\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n      {\"sub_dir\", FileType::Directory},\n      {\"file\", FileType::Regular},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that a symlink to a file has type FileType::Regular\nTEST_F(DirectoryTest, DirectoryWithSymlinkToFile) {\n  addFiles({\"file\"});\n  addSymlinks({{\"file\", \"link\"}});\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n      {\"file\", FileType::Regular},\n      {\"link\", FileType::Regular},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that a symlink to a directory has type FileType::Directory\nTEST_F(DirectoryTest, DirectoryWithSymlinkToDirectory) {\n  addSubDirs({\"sub_dir\"});\n  addSymlinks({{\"sub_dir\", \"link_dir\"}});\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n      {\"sub_dir\", FileType::Directory},\n      {\"link_dir\", FileType::Directory},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that a broken symlink can be listed\nTEST_F(DirectoryTest, DirectoryWithBrokenSymlink) {\n  addSubDirs({\"sub_dir\"});\n  addSymlinks({{\"sub_dir\", \"link_dir\"}});\n  TestEnvironment::removePath(dir_path_ + \"/sub_dir\");\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n#ifndef WIN32\n      // On Linux, a broken directory link is simply a symlink to be rm'ed\n      {\"link_dir\", FileType::Regular},\n#else\n      // On Windows, a broken directory link remains a directory link to be rmdir'ed\n      {\"link_dir\", FileType::Directory},\n#endif\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that we can list an empty directory\nTEST_F(DirectoryTest, DirectoryWithEmptyDirectory) {\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path_, false));\n}\n\n// Test that the constructor throws an exception when given a non-existing path\nTEST(DirectoryIteratorImpl, NonExistingDir) {\n  const std::string dir_path(\"some/non/existing/dir\");\n\n#ifdef WIN32\n  EXPECT_THROW_WITH_MESSAGE(\n      DirectoryIteratorImpl dir_iterator(dir_path), EnvoyException,\n      fmt::format(\"unable to open directory {}: {}\", dir_path, ERROR_PATH_NOT_FOUND));\n#else\n  EXPECT_THROW_WITH_MESSAGE(\n      DirectoryIteratorImpl dir_iterator(dir_path), EnvoyException,\n      fmt::format(\"unable to open directory {}: No such file or directory\", dir_path));\n#endif\n}\n\n// Test that we correctly handle trailing path separators\nTEST(Directory, DirectoryHasTrailingPathSeparator) {\n#ifdef WIN32\n  const std::string dir_path(TestEnvironment::temporaryPath(\"envoy_test\") + \"\\\\\");\n#else\n  const std::string dir_path(TestEnvironment::temporaryPath(\"envoy_test\") + \"/\");\n#endif\n  TestEnvironment::createPath(dir_path);\n\n  const EntrySet expected = {\n      {\".\", FileType::Directory},\n      {\"..\", FileType::Directory},\n  };\n  EXPECT_EQ(expected, getDirectoryContents(dir_path, false));\n  TestEnvironment::removePath(dir_path);\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/filesystem/filesystem_impl_test.cc",
    "content": "#include <chrono>\n#include <string>\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nstatic constexpr FlagSet DefaultFlags{\n    1 << Filesystem::File::Operation::Read | 1 << Filesystem::File::Operation::Write |\n    1 << Filesystem::File::Operation::Create | 1 << Filesystem::File::Operation::Append};\n\nclass FileSystemImplTest : public testing::Test {\nprotected:\n  filesystem_os_id_t getFd(File* file) {\n#ifdef WIN32\n    auto file_impl = dynamic_cast<FileImplWin32*>(file);\n#else\n    auto file_impl = dynamic_cast<FileImplPosix*>(file);\n#endif\n    RELEASE_ASSERT(file_impl != nullptr, \"failed to cast File* to FileImpl*\");\n    return file_impl->fd_;\n  }\n#ifdef WIN32\n  InstanceImplWin32 file_system_;\n#else\n  Api::SysCallStringResult canonicalPath(const std::string& path) {\n    return file_system_.canonicalPath(path);\n  }\n  InstanceImplPosix file_system_;\n#endif\n};\n\nTEST_F(FileSystemImplTest, FileExists) {\n  EXPECT_FALSE(file_system_.fileExists(\"/dev/blahblahblah\"));\n#ifdef WIN32\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", \"x\");\n  EXPECT_TRUE(file_system_.fileExists(file_path));\n  EXPECT_TRUE(file_system_.fileExists(\"c:/windows\"));\n#else\n  EXPECT_TRUE(file_system_.fileExists(\"/dev/null\"));\n  EXPECT_TRUE(file_system_.fileExists(\"/dev\"));\n#endif\n}\n\nTEST_F(FileSystemImplTest, DirectoryExists) {\n  EXPECT_FALSE(file_system_.directoryExists(\"/dev/blahblah\"));\n#ifdef WIN32\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", \"x\");\n  EXPECT_FALSE(file_system_.directoryExists(file_path));\n  EXPECT_TRUE(file_system_.directoryExists(\"c:/windows\"));\n#else\n  EXPECT_FALSE(file_system_.directoryExists(\"/dev/null\"));\n  EXPECT_TRUE(file_system_.directoryExists(\"/dev\"));\n#endif\n}\n\nTEST_F(FileSystemImplTest, FileSize) {\n  EXPECT_EQ(0, file_system_.fileSize(std::string(Platform::null_device_path)));\n  EXPECT_EQ(-1, file_system_.fileSize(\"/dev/blahblahblah\"));\n  const std::string data = \"test string\\ntest\";\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", data);\n  EXPECT_EQ(data.length(), file_system_.fileSize(file_path));\n}\n\nTEST_F(FileSystemImplTest, FileReadToEndSuccess) {\n  const std::string data = \"test string\\ntest\";\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", data);\n\n  EXPECT_EQ(data, file_system_.fileReadToEnd(file_path));\n}\n\n// Files are read into std::string; verify that all bytes (e.g., non-ascii characters) come back\n// unmodified\nTEST_F(FileSystemImplTest, FileReadToEndSuccessBinary) {\n  std::string data;\n  for (unsigned i = 0; i < 256; i++) {\n    data.push_back(i);\n  }\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", data);\n\n  const std::string read = file_system_.fileReadToEnd(file_path);\n  const std::vector<uint8_t> binary_read(read.begin(), read.end());\n  EXPECT_EQ(binary_read.size(), 256);\n  for (unsigned i = 0; i < 256; i++) {\n    EXPECT_EQ(binary_read.at(i), i);\n  }\n}\n\nTEST_F(FileSystemImplTest, FileReadToEndDoesNotExist) {\n  unlink(TestEnvironment::temporaryPath(\"envoy_this_not_exist\").c_str());\n  EXPECT_THROW(file_system_.fileReadToEnd(TestEnvironment::temporaryPath(\"envoy_this_not_exist\")),\n               EnvoyException);\n}\n\nTEST_F(FileSystemImplTest, FileReadToEndDenylisted) {\n  EXPECT_THROW(file_system_.fileReadToEnd(\"/dev/urandom\"), EnvoyException);\n  EXPECT_THROW(file_system_.fileReadToEnd(\"/proc/cpuinfo\"), EnvoyException);\n  EXPECT_THROW(file_system_.fileReadToEnd(\"/sys/block/sda/dev\"), EnvoyException);\n}\n\n#ifndef WIN32\nTEST_F(FileSystemImplTest, CanonicalPathSuccess) { EXPECT_EQ(\"/\", canonicalPath(\"//\").rc_); }\n#endif\n\n#ifndef WIN32\nTEST_F(FileSystemImplTest, CanonicalPathFail) {\n  const Api::SysCallStringResult result = canonicalPath(\"/_some_non_existent_file\");\n  EXPECT_TRUE(result.rc_.empty());\n  EXPECT_EQ(\"No such file or directory\", errorDetails(result.errno_));\n}\n#endif\n\nTEST_F(FileSystemImplTest, SplitPathFromFilename) {\n  PathSplitResult result;\n  result = file_system_.splitPathFromFilename(\"/foo/bar/baz\");\n  EXPECT_EQ(result.directory_, \"/foo/bar\");\n  EXPECT_EQ(result.file_, \"baz\");\n  result = file_system_.splitPathFromFilename(\"/foo/bar\");\n  EXPECT_EQ(result.directory_, \"/foo\");\n  EXPECT_EQ(result.file_, \"bar\");\n  result = file_system_.splitPathFromFilename(\"/foo\");\n  EXPECT_EQ(result.directory_, \"/\");\n  EXPECT_EQ(result.file_, \"foo\");\n  result = file_system_.splitPathFromFilename(\"/\");\n  EXPECT_EQ(result.directory_, \"/\");\n  EXPECT_EQ(result.file_, \"\");\n  EXPECT_THROW(file_system_.splitPathFromFilename(\"nopathdelimeter\"), EnvoyException);\n#ifdef WIN32\n  result = file_system_.splitPathFromFilename(\"c:\\\\foo/bar\");\n  EXPECT_EQ(result.directory_, \"c:\\\\foo\");\n  EXPECT_EQ(result.file_, \"bar\");\n  result = file_system_.splitPathFromFilename(\"c:/foo\\\\bar\");\n  EXPECT_EQ(result.directory_, \"c:/foo\");\n  EXPECT_EQ(result.file_, \"bar\");\n  result = file_system_.splitPathFromFilename(\"c:\\\\foo\");\n  EXPECT_EQ(result.directory_, \"c:\\\\\");\n  EXPECT_EQ(result.file_, \"foo\");\n  result = file_system_.splitPathFromFilename(\"c:foo\");\n  EXPECT_EQ(result.directory_, \"c:\");\n  EXPECT_EQ(result.file_, \"foo\");\n  result = file_system_.splitPathFromFilename(\"c:\");\n  EXPECT_EQ(result.directory_, \"c:\");\n  EXPECT_EQ(result.file_, \"\");\n  result = file_system_.splitPathFromFilename(\"\\\\\\\\?\\\\C:\\\\\");\n  EXPECT_EQ(result.directory_, \"\\\\\\\\?\\\\C:\\\\\");\n  EXPECT_EQ(result.file_, \"\");\n#endif\n}\n\nTEST_F(FileSystemImplTest, IllegalPath) {\n  EXPECT_FALSE(file_system_.illegalPath(\"/\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"//\"));\n#ifdef WIN32\n  EXPECT_FALSE(file_system_.illegalPath(\"/dev\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"/dev/\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"/proc\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"/proc/\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"/sys\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"/sys/\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"/_some_non_existent_file\"));\n  EXPECT_TRUE(file_system_.illegalPath(R\"EOF(\\\\.\\foo)EOF\"));\n  EXPECT_TRUE(file_system_.illegalPath(R\"EOF(\\\\z\\foo)EOF\"));\n  EXPECT_TRUE(file_system_.illegalPath(R\"EOF(\\\\?\\nul)EOF\"));\n  EXPECT_FALSE(file_system_.illegalPath(R\"EOF(\\\\?\\C:\\foo)EOF\"));\n  EXPECT_FALSE(file_system_.illegalPath(R\"EOF(C:\\foo)EOF\"));\n  EXPECT_FALSE(file_system_.illegalPath(R\"EOF(C:\\foo/bar\\baz)EOF\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/foo\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:zfoo\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/foo/bar/baz\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/foo/b*ar/baz\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/foo/b?ar/baz\"));\n  EXPECT_TRUE(file_system_.illegalPath(R\"EOF(C:/i/x\"x)EOF\"));\n  EXPECT_TRUE(file_system_.illegalPath(std::string(\"C:/i\\0j\", 6)));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/\\177\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/\\alarm\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i/../j\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i/./j\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i/.j\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/j.\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/j \"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i///\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/NUL\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/nul\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/nul.ext\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/nul.ext.ext2\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/nul .ext\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/COM1\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/COM1/whoops\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/COM1.ext\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"C:/i/COM1  .ext\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i/COM1  ext\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i/COM1foo\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i/COM0\"));\n  EXPECT_FALSE(file_system_.illegalPath(\"C:/i/COM\"));\n#else\n  EXPECT_TRUE(file_system_.illegalPath(\"/dev\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"/dev/\"));\n  // Exception to allow opening from file descriptors. See #7258.\n  EXPECT_FALSE(file_system_.illegalPath(\"/dev/fd/0\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"/proc\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"/proc/\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"/sys\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"/sys/\"));\n  EXPECT_TRUE(file_system_.illegalPath(\"/_some_non_existent_file\"));\n#endif\n}\n\nTEST_F(FileSystemImplTest, ConstructedFileNotOpen) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n\n  FilePtr file = file_system_.createFile(new_file_path);\n  EXPECT_FALSE(file->isOpen());\n}\n\nTEST_F(FileSystemImplTest, Open) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n\n  FilePtr file = file_system_.createFile(new_file_path);\n  const Api::IoCallBoolResult result = file->open(DefaultFlags);\n  EXPECT_TRUE(result.rc_);\n  EXPECT_TRUE(file->isOpen());\n}\n\nTEST_F(FileSystemImplTest, OpenReadOnly) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n  static constexpr FlagSet ReadOnlyFlags{1 << Filesystem::File::Operation::Read |\n                                         1 << Filesystem::File::Operation::Create |\n                                         1 << Filesystem::File::Operation::Append};\n  FilePtr file = file_system_.createFile(new_file_path);\n  const Api::IoCallBoolResult result = file->open(ReadOnlyFlags);\n  EXPECT_TRUE(result.rc_);\n  EXPECT_TRUE(file->isOpen());\n}\n\nTEST_F(FileSystemImplTest, OpenTwice) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n\n  FilePtr file = file_system_.createFile(new_file_path);\n  EXPECT_EQ(getFd(file.get()), INVALID_HANDLE);\n\n  const Api::IoCallBoolResult result1 = file->open(DefaultFlags);\n  const filesystem_os_id_t initial_fd = getFd(file.get());\n  EXPECT_TRUE(result1.rc_);\n  EXPECT_TRUE(file->isOpen());\n\n  // check that we don't leak a file descriptor\n  const Api::IoCallBoolResult result2 = file->open(DefaultFlags);\n  EXPECT_EQ(initial_fd, getFd(file.get()));\n  EXPECT_TRUE(result2.rc_);\n  EXPECT_TRUE(file->isOpen());\n}\n\nTEST_F(FileSystemImplTest, OpenBadFilePath) {\n  FilePtr file = file_system_.createFile(\"\");\n  const Api::IoCallBoolResult result = file->open(DefaultFlags);\n  EXPECT_FALSE(result.rc_);\n}\n\nTEST_F(FileSystemImplTest, ExistingFile) {\n  const std::string file_path =\n      TestEnvironment::writeStringToFileForTest(\"test_envoy\", \"existing file\");\n\n  {\n    FilePtr file = file_system_.createFile(file_path);\n    const Api::IoCallBoolResult open_result = file->open(DefaultFlags);\n    EXPECT_TRUE(open_result.rc_);\n    std::string data(\" new data\");\n    const Api::IoCallSizeResult result = file->write(data);\n    EXPECT_EQ(data.length(), result.rc_);\n  }\n\n  auto contents = TestEnvironment::readFileToStringForTest(file_path);\n  EXPECT_EQ(\"existing file new data\", contents);\n}\n\nTEST_F(FileSystemImplTest, NonExistingFile) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n\n  {\n    FilePtr file = file_system_.createFile(new_file_path);\n    const Api::IoCallBoolResult open_result = file->open(DefaultFlags);\n    EXPECT_TRUE(open_result.rc_);\n    std::string data(\" new data\");\n    const Api::IoCallSizeResult result = file->write(data);\n    EXPECT_EQ(data.length(), result.rc_);\n  }\n\n  auto contents = TestEnvironment::readFileToStringForTest(new_file_path);\n  EXPECT_EQ(\" new data\", contents);\n}\n\nTEST_F(FileSystemImplTest, Close) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n\n  FilePtr file = file_system_.createFile(new_file_path);\n  const Api::IoCallBoolResult result1 = file->open(DefaultFlags);\n  EXPECT_TRUE(result1.rc_);\n  EXPECT_TRUE(file->isOpen());\n\n  const Api::IoCallBoolResult result2 = file->close();\n  EXPECT_TRUE(result2.rc_);\n  EXPECT_FALSE(file->isOpen());\n}\n\nTEST_F(FileSystemImplTest, WriteAfterClose) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n\n  FilePtr file = file_system_.createFile(new_file_path);\n  const Api::IoCallBoolResult bool_result1 = file->open(DefaultFlags);\n  EXPECT_TRUE(bool_result1.rc_);\n  const Api::IoCallBoolResult bool_result2 = file->close();\n  EXPECT_TRUE(bool_result2.rc_);\n  const Api::IoCallSizeResult size_result = file->write(\" new data\");\n  EXPECT_EQ(-1, size_result.rc_);\n  EXPECT_EQ(IoFileError::IoErrorCode::BadFd, size_result.err_->getErrorCode());\n}\n\nTEST_F(FileSystemImplTest, NonExistingFileAndReadOnly) {\n  const std::string new_file_path = TestEnvironment::temporaryPath(\"envoy_this_not_exist\");\n  ::unlink(new_file_path.c_str());\n\n  static constexpr FlagSet flag(static_cast<size_t>(Filesystem::File::Operation::Read));\n  FilePtr file = file_system_.createFile(new_file_path);\n  const Api::IoCallBoolResult open_result = file->open(flag);\n  EXPECT_FALSE(open_result.rc_);\n}\n\nTEST_F(FileSystemImplTest, ExistingReadOnlyFileAndWrite) {\n  const std::string file_path =\n      TestEnvironment::writeStringToFileForTest(\"test_envoy\", \"existing file\");\n\n  {\n    static constexpr FlagSet flag(static_cast<size_t>(Filesystem::File::Operation::Read));\n    FilePtr file = file_system_.createFile(file_path);\n    const Api::IoCallBoolResult open_result = file->open(flag);\n    EXPECT_TRUE(open_result.rc_);\n    std::string data(\" new data\");\n    const Api::IoCallSizeResult result = file->write(data);\n    EXPECT_TRUE(result.rc_ < 0);\n#ifdef WIN32\n    EXPECT_EQ(IoFileError::IoErrorCode::Permission, result.err_->getErrorCode());\n#else\n    EXPECT_EQ(IoFileError::IoErrorCode::BadFd, result.err_->getErrorCode());\n#endif\n  }\n\n  auto contents = TestEnvironment::readFileToStringForTest(file_path);\n  EXPECT_EQ(\"existing file\", contents);\n}\n\nTEST_F(FileSystemImplTest, TestIoFileError) {\n  IoFileError error1(HANDLE_ERROR_PERM);\n  EXPECT_EQ(IoFileError::IoErrorCode::Permission, error1.getErrorCode());\n  EXPECT_EQ(errorDetails(HANDLE_ERROR_PERM), error1.getErrorDetails());\n\n  IoFileError error2(HANDLE_ERROR_INVALID);\n  EXPECT_EQ(IoFileError::IoErrorCode::BadFd, error2.getErrorCode());\n  EXPECT_EQ(errorDetails(HANDLE_ERROR_INVALID), error2.getErrorDetails());\n\n  int not_known_error = 42;\n  IoFileError error3(not_known_error);\n  EXPECT_EQ(IoFileError::IoErrorCode::UnknownError, error3.getErrorCode());\n}\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/filesystem/watcher_impl_test.cc",
    "content": "#include <cstdint>\n#include <fstream>\n\n#include \"common/common/assert.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/filesystem/watcher_impl.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass WatcherImplTest : public testing::Test {\nprotected:\n  WatcherImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n};\n\nclass WatchCallback {\npublic:\n  MOCK_METHOD(void, called, (uint32_t));\n};\n\nTEST_F(WatcherImplTest, All) {\n  Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher();\n\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\").c_str());\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_link\").c_str());\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_target\").c_str());\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\").c_str());\n\n  TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test\"));\n  { std::ofstream file(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\")); }\n  TestEnvironment::createSymlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\"),\n                                 TestEnvironment::temporaryPath(\"envoy_test/watcher_link\"));\n\n  { std::ofstream file(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_target\")); }\n  TestEnvironment::createSymlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_target\"),\n                                 TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\"));\n\n  WatchCallback callback;\n  EXPECT_CALL(callback, called(Watcher::Events::MovedTo)).Times(2);\n  watcher->addWatch(TestEnvironment::temporaryPath(\"envoy_test/watcher_link\"),\n                    Watcher::Events::MovedTo, [&](uint32_t events) -> void {\n                      callback.called(events);\n                      dispatcher_->exit();\n                    });\n  TestEnvironment::renameFile(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\"),\n                              TestEnvironment::temporaryPath(\"envoy_test/watcher_link\"));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  TestEnvironment::createSymlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_target\"),\n                                 TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\"));\n  TestEnvironment::renameFile(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\"),\n                              TestEnvironment::temporaryPath(\"envoy_test/watcher_link\"));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_F(WatcherImplTest, Create) {\n  Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher();\n\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\").c_str());\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_link\").c_str());\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\").c_str());\n  unlink(TestEnvironment::temporaryPath(\"envoy_test/other_file\").c_str());\n\n  TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test\"));\n  { std::ofstream file(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\")); }\n\n  WatchCallback callback;\n  watcher->addWatch(TestEnvironment::temporaryPath(\"envoy_test/watcher_link\"),\n                    Watcher::Events::MovedTo, [&](uint32_t events) -> void {\n                      callback.called(events);\n                      dispatcher_->exit();\n                    });\n\n  { std::ofstream file(TestEnvironment::temporaryPath(\"envoy_test/other_file\")); }\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  EXPECT_CALL(callback, called(Watcher::Events::MovedTo));\n  TestEnvironment::createSymlink(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\"),\n                                 TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\"));\n  TestEnvironment::renameFile(TestEnvironment::temporaryPath(\"envoy_test/watcher_new_link\"),\n                              TestEnvironment::temporaryPath(\"envoy_test/watcher_link\"));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_F(WatcherImplTest, Modify) {\n  Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher();\n\n  TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test\"));\n  std::ofstream file(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\"));\n\n  WatchCallback callback;\n  watcher->addWatch(TestEnvironment::temporaryPath(\"envoy_test/watcher_target\"),\n                    Watcher::Events::Modified, [&](uint32_t events) -> void {\n                      callback.called(events);\n                      dispatcher_->exit();\n                    });\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  file << \"text\" << std::flush;\n  file.close();\n  EXPECT_CALL(callback, called(Watcher::Events::Modified));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_F(WatcherImplTest, BadPath) {\n  Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher();\n\n  EXPECT_THROW(\n      watcher->addWatch(\"this_is_not_a_file\", Watcher::Events::MovedTo, [&](uint32_t) -> void {}),\n      EnvoyException);\n\n  EXPECT_THROW(watcher->addWatch(\"this_is_not_a_dir/file\", Watcher::Events::MovedTo,\n                                 [&](uint32_t) -> void {}),\n               EnvoyException);\n}\n\nTEST_F(WatcherImplTest, ParentDirectoryRemoved) {\n  Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher();\n\n  TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test_empty\"));\n\n  WatchCallback callback;\n  EXPECT_CALL(callback, called(testing::_)).Times(0);\n\n  watcher->addWatch(TestEnvironment::temporaryPath(\"envoy_test_empty/watcher_link\"),\n                    Watcher::Events::MovedTo,\n                    [&](uint32_t events) -> void { callback.called(events); });\n\n  int rc = rmdir(TestEnvironment::temporaryPath(\"envoy_test_empty\").c_str());\n  EXPECT_EQ(0, rc);\n\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n}\n\nTEST_F(WatcherImplTest, RootDirectoryPath) {\n  Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher();\n\n#ifndef WIN32\n  EXPECT_NO_THROW(watcher->addWatch(\"/\", Watcher::Events::MovedTo, [&](uint32_t) -> void {}));\n#else\n  EXPECT_NO_THROW(watcher->addWatch(\"c:\\\\\", Watcher::Events::MovedTo, [&](uint32_t) -> void {}));\n#endif\n}\n\n// Skipping this test on Windows as there is no Windows API able to atomically move a\n// directory/symlink when the new name is a non-empty directory\n#ifndef WIN32\nTEST_F(WatcherImplTest, SymlinkAtomicRename) {\n  Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher();\n\n  TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test\"));\n  TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test/..timestamp1\"));\n  { std::ofstream file(TestEnvironment::temporaryPath(\"envoy_test/..timestamp1/watched_file\")); }\n\n  TestEnvironment::createSymlink(TestEnvironment::temporaryPath(\"envoy_test/..timestamp1\"),\n                                 TestEnvironment::temporaryPath(\"envoy_test/..data\"));\n  TestEnvironment::createSymlink(TestEnvironment::temporaryPath(\"envoy_test/..data/watched_file\"),\n                                 TestEnvironment::temporaryPath(\"envoy_test/watched_file\"));\n\n  WatchCallback callback;\n  EXPECT_CALL(callback, called(Watcher::Events::MovedTo));\n  watcher->addWatch(TestEnvironment::temporaryPath(\"envoy_test/\"), Watcher::Events::MovedTo,\n                    [&](uint32_t events) -> void {\n                      callback.called(events);\n                      dispatcher_->exit();\n                    });\n\n  TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test/..timestamp2\"));\n  { std::ofstream file(TestEnvironment::temporaryPath(\"envoy_test/..timestamp2/watched_file\")); }\n  TestEnvironment::createSymlink(TestEnvironment::temporaryPath(\"envoy_test/..timestamp2\"),\n                                 TestEnvironment::temporaryPath(\"envoy_test/..tmp\"));\n  TestEnvironment::renameFile(TestEnvironment::temporaryPath(\"envoy_test/..tmp\"),\n                              TestEnvironment::temporaryPath(\"envoy_test/..data\"));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n#endif\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/filter/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"filter_config_discovery_impl_test\",\n    srcs = [\"filter_config_discovery_impl_test.cc\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/common/filter/http:filter_config_discovery_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//source/extensions/filters/http/router:config\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/common/filter/http/filter_config_discovery_impl_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/core/v3/extension.pb.h\"\n#include \"envoy/config/core/v3/extension.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/filter/http/filter_config_discovery_impl.h\"\n#include \"common/json/json_loader.h\"\n\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Filter {\nnamespace Http {\nnamespace {\n\nclass FilterConfigDiscoveryTestBase : public testing::Test {\npublic:\n  FilterConfigDiscoveryTestBase() {\n    // For server_factory_context\n    ON_CALL(factory_context_, scope()).WillByDefault(ReturnRef(scope_));\n    ON_CALL(factory_context_, messageValidationContext())\n        .WillByDefault(ReturnRef(validation_context_));\n    EXPECT_CALL(validation_context_, dynamicValidationVisitor())\n        .WillRepeatedly(ReturnRef(validation_visitor_));\n    EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager_));\n    ON_CALL(init_manager_, add(_)).WillByDefault(Invoke([this](const Init::Target& target) {\n      init_target_handle_ = target.createHandle(\"test\");\n    }));\n    ON_CALL(init_manager_, initialize(_))\n        .WillByDefault(Invoke(\n            [this](const Init::Watcher& watcher) { init_target_handle_->initialize(watcher); }));\n    // Thread local storage assumes a single (main) thread with no workers.\n    ON_CALL(factory_context_.admin_, concurrency()).WillByDefault(Return(0));\n  }\n\n  Event::SimulatedTimeSystem& timeSystem() { return time_system_; }\n\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  NiceMock<Init::MockManager> init_manager_;\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  Init::TargetHandlePtr init_target_handle_;\n  NiceMock<Stats::MockIsolatedStatsStore> scope_;\n};\n\n// Test base class with a single provider.\nclass FilterConfigDiscoveryImplTest : public FilterConfigDiscoveryTestBase {\npublic:\n  FilterConfigDiscoveryImplTest() {\n    filter_config_provider_manager_ = std::make_unique<FilterConfigProviderManagerImpl>();\n  }\n  ~FilterConfigDiscoveryImplTest() override { factory_context_.thread_local_.shutdownThread(); }\n\n  FilterConfigProviderPtr createProvider(std::string name, bool warm) {\n    EXPECT_CALL(init_manager_, add(_));\n    envoy::config::core::v3::ConfigSource config_source;\n    TestUtility::loadFromYaml(\"ads: {}\", config_source);\n    return filter_config_provider_manager_->createDynamicFilterConfigProvider(\n        config_source, name, {\"envoy.extensions.filters.http.router.v3.Router\"}, factory_context_,\n        \"xds.\", !warm);\n  }\n\n  void setup(bool warm = true) {\n    provider_ = createProvider(\"foo\", warm);\n    callbacks_ = factory_context_.cluster_manager_.subscription_factory_.callbacks_;\n    EXPECT_CALL(*factory_context_.cluster_manager_.subscription_factory_.subscription_,\n                start(_, _));\n    if (!warm) {\n      EXPECT_CALL(init_watcher_, ready());\n    }\n    init_manager_.initialize(init_watcher_);\n  }\n\n  std::unique_ptr<FilterConfigProviderManager> filter_config_provider_manager_;\n  FilterConfigProviderPtr provider_;\n  Config::SubscriptionCallbacks* callbacks_{};\n};\n\nTEST_F(FilterConfigDiscoveryImplTest, DestroyReady) {\n  setup();\n  EXPECT_CALL(init_watcher_, ready());\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, Basic) {\n  InSequence s;\n  setup();\n  EXPECT_EQ(\"foo\", provider_->name());\n  EXPECT_EQ(absl::nullopt, provider_->config());\n\n  // Initial request.\n  {\n    const std::string response_yaml = R\"EOF(\n  version_info: \"1\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n    name: foo\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  )EOF\";\n    const auto response =\n        TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response_yaml);\n    const auto decoded_resources =\n        TestUtility::decodeResources<envoy::config::core::v3::TypedExtensionConfig>(response);\n\n    EXPECT_CALL(init_watcher_, ready());\n    callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info());\n    EXPECT_NE(absl::nullopt, provider_->config());\n    EXPECT_EQ(1UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n    EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_fail\").value());\n  }\n\n  // 2nd request with same response. Based on hash should not reload config.\n  {\n    const std::string response_yaml = R\"EOF(\n  version_info: \"2\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n    name: foo\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  )EOF\";\n    const auto response =\n        TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response_yaml);\n    const auto decoded_resources =\n        TestUtility::decodeResources<envoy::config::core::v3::TypedExtensionConfig>(response);\n    callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info());\n    EXPECT_EQ(1UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n    EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_fail\").value());\n  }\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, ConfigFailed) {\n  InSequence s;\n  setup();\n  EXPECT_CALL(init_watcher_, ready());\n  callbacks_->onConfigUpdateFailed(Config::ConfigUpdateFailureReason::FetchTimedout, {});\n  EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n  EXPECT_EQ(1UL, scope_.counter(\"xds.extension_config_discovery.foo.config_fail\").value());\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, TooManyResources) {\n  InSequence s;\n  setup();\n  const std::string response_yaml = R\"EOF(\n  version_info: \"1\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n    name: foo\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  - \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n    name: foo\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  )EOF\";\n  const auto response =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response_yaml);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::core::v3::TypedExtensionConfig>(response);\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_THROW_WITH_MESSAGE(\n      callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()),\n      EnvoyException, \"Unexpected number of resources in ExtensionConfigDS response: 2\");\n  EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, WrongName) {\n  InSequence s;\n  setup();\n  const std::string response_yaml = R\"EOF(\n  version_info: \"1\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n    name: bar\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  )EOF\";\n  const auto response =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response_yaml);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::core::v3::TypedExtensionConfig>(response);\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_THROW_WITH_MESSAGE(\n      callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()),\n      EnvoyException, \"Unexpected resource name in ExtensionConfigDS response: bar\");\n  EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, Incremental) {\n  InSequence s;\n  setup();\n  const std::string response_yaml = R\"EOF(\nversion_info: \"1\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n  name: foo\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n)EOF\";\n  const auto response =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response_yaml);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::core::v3::TypedExtensionConfig>(response);\n  Protobuf::RepeatedPtrField<std::string> remove;\n  *remove.Add() = \"bar\";\n  EXPECT_CALL(init_watcher_, ready());\n  callbacks_->onConfigUpdate(decoded_resources.refvec_, remove, response.version_info());\n  EXPECT_NE(absl::nullopt, provider_->config());\n  EXPECT_EQ(1UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n  EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_fail\").value());\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, ApplyWithoutWarming) {\n  InSequence s;\n  setup(false);\n  EXPECT_EQ(\"foo\", provider_->name());\n  EXPECT_EQ(absl::nullopt, provider_->config());\n  EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n  EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_fail\").value());\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, DualProviders) {\n  InSequence s;\n  setup();\n  auto provider2 = createProvider(\"foo\", true);\n  EXPECT_EQ(\"foo\", provider2->name());\n  EXPECT_EQ(absl::nullopt, provider2->config());\n  const std::string response_yaml = R\"EOF(\n  version_info: \"1\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n    name: foo\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  )EOF\";\n  const auto response =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response_yaml);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::core::v3::TypedExtensionConfig>(response);\n  EXPECT_CALL(init_watcher_, ready());\n  callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info());\n  EXPECT_NE(absl::nullopt, provider_->config());\n  EXPECT_NE(absl::nullopt, provider2->config());\n  EXPECT_EQ(1UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n}\n\nTEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) {\n  InSequence s;\n  setup();\n  auto provider2 = createProvider(\"foo\", true);\n  const std::string response_yaml = R\"EOF(\n  version_info: \"1\"\n  resources:\n  - \"@type\": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\n    name: foo\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n      pass_through_mode: false\n  )EOF\";\n  const auto response =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response_yaml);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::core::v3::TypedExtensionConfig>(response);\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_THROW_WITH_MESSAGE(\n      callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()),\n      EnvoyException,\n      \"Error: filter config has type URL envoy.config.filter.http.health_check.v2.HealthCheck but \"\n      \"expect envoy.extensions.filters.http.router.v3.Router.\");\n  EXPECT_EQ(0UL, scope_.counter(\"xds.extension_config_discovery.foo.config_reload\").value());\n}\n\n} // namespace\n} // namespace Http\n} // namespace Filter\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/formatter/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_proto_library(\n    name = \"substitution_formatter_fuzz_proto\",\n    srcs = [\"substitution_formatter_fuzz.proto\"],\n    deps = [\"//test/fuzz:common_proto\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"substitution_formatter_fuzz_test\",\n    srcs = [\"substitution_formatter_fuzz_test.cc\"],\n    corpus = \"substitution_formatter_corpus\",\n    dictionaries = [\n        \"substitution_formatter_fuzz_test.dict\",\n        \"//test/fuzz:headers.dict\",\n    ],\n    deps = [\n        \":substitution_formatter_fuzz_proto_cc_proto\",\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"substitution_formatter_test\",\n    srcs = [\"substitution_formatter_test.cc\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/router:string_accessor_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"substitution_format_string_test\",\n    srcs = [\"substitution_format_string_test.cc\"],\n    deps = [\n        \"//source/common/formatter:substitution_format_string_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"substitution_formatter_speed_test\",\n    srcs = [\"substitution_formatter_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/formatter:substitution_formatter_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/network:address_lib\",\n        \"//test/common/stream_info:test_util\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:printers_lib\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"substitution_formatter_speed_test_benchmark_test\",\n    benchmark_binary = \"substitution_formatter_speed_test\",\n)\n"
  },
  {
    "path": "test/common/formatter/substitution_format_string_test.cc",
    "content": "#include \"envoy/config/core/v3/substitution_format_string.pb.validate.h\"\n\n#include \"common/formatter/substitution_format_string.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Formatter {\n\nclass SubstitutionFormatStringUtilsTest : public ::testing::Test {\npublic:\n  SubstitutionFormatStringUtilsTest() {\n    absl::optional<uint32_t> response_code{200};\n    EXPECT_CALL(stream_info_, responseCode()).WillRepeatedly(Return(response_code));\n  }\n\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":method\", \"GET\"}, {\":path\", \"/bar/foo\"}, {\"content-type\", \"application/json\"}};\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  StreamInfo::MockStreamInfo stream_info_;\n  std::string body_;\n\n  envoy::config::core::v3::SubstitutionFormatString config_;\n};\n\nTEST_F(SubstitutionFormatStringUtilsTest, TestEmptyIsInvalid) {\n  envoy::config::core::v3::SubstitutionFormatString empty_config;\n  std::string err;\n  EXPECT_FALSE(Validate(empty_config, &err));\n}\n\nTEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigText) {\n  const std::string yaml = R\"EOF(\n  text_format: \"plain text, path=%REQ(:path)%, code=%RESPONSE_CODE%\"\n)EOF\";\n  TestUtility::loadFromYaml(yaml, config_);\n\n  auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_);\n  EXPECT_EQ(\"plain text, path=/bar/foo, code=200\",\n            formatter->format(request_headers_, response_headers_, response_trailers_, stream_info_,\n                              body_));\n}\n\nTEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigJson) {\n  const std::string yaml = R\"EOF(\n  json_format:\n    text: \"plain text\"\n    path: \"%REQ(:path)%\"\n    code: \"%RESPONSE_CODE%\"\n    headers:\n      content-type: \"%REQ(CONTENT-TYPE)%\"\n)EOF\";\n  TestUtility::loadFromYaml(yaml, config_);\n\n  auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_);\n  const auto out_json = formatter->format(request_headers_, response_headers_, response_trailers_,\n                                          stream_info_, body_);\n\n  const std::string expected = R\"EOF({\n    \"text\": \"plain text\",\n    \"path\": \"/bar/foo\",\n    \"code\": 200,\n    \"headers\": {\n      \"content-type\": \"application/json\"\n    }\n})EOF\";\n  EXPECT_TRUE(TestUtility::jsonStringEqual(out_json, expected));\n}\n\nTEST_F(SubstitutionFormatStringUtilsTest, TestInvalidConfigs) {\n  const std::vector<std::string> invalid_configs = {\n      R\"(\n  json_format:\n    field: true\n)\",\n      R\"(\n  json_format:\n    field: 200\n)\",\n  };\n  for (const auto& yaml : invalid_configs) {\n    TestUtility::loadFromYaml(yaml, config_);\n    EXPECT_THROW_WITH_MESSAGE(\n        SubstitutionFormatStringUtils::fromProtoConfig(config_), EnvoyException,\n        \"Only string values or nested structs are supported in the JSON access log format.\");\n  }\n}\n\n} // namespace Formatter\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096",
    "content": "format: \"%START_TIME(%f)%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376",
    "content": "format: \"%RESP(\\n )% %REQ(\\n)%\"\nresponse_trailers {\n}\nstream_info {\n  start_time: 7313138969067071779\n  response_code {\n    value: 262144\n  }\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536",
    "content": "format: \"%REQ(\\r)%\" "
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552",
    "content": "request_headers {   headers {     key: \"\\r\"   } } "
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296",
    "content": "format: \"%START_TIME(%f)%\" stream_info {   start_time: 18446744073709551615 }\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480",
    "content": "format : \"%DYNAMIC_METADATA()%\" stream_info {\n  dynamic_metadata {\n    filter_metadata {\n    key:\n      \"\" value {\n        fields {\n        key:\n          \"\" value {\n            list_value {\n              values {\n                struct_value {\n                  fields {\n                  key:\n                    \"\" value {\n                      list_value {\n                        values {\n                          struct_value {\n                            fields {\n                            key:\n                              \"\" value {\n                                list_value {\n                                  values {\n                                    struct_value {\n                                      fields {\n                                      key:\n                                        \"\" value {\n                                          list_value {\n                                            values {\n                                              list_value {\n                                                values {\n                                                  struct_value {\n                                                    fields {\n                                                    key:\n                                                      \"\" value {\n                                                        list_value {\n                                                          values {\n                                                            struct_value {\n                                                              fields {\n                                                              key:\n                                                                \"\" value {\n                                                                  list_value {\n                                                                    values {\n                                                                      struct_value {\n                                                                        fields {\n                                                                        key:\n                                                                          \"\" value {\n                                                                            struct_value {\n                                                                              fields {\n                                                                              key:\n                                                                                \"\" value {\n                                                                                  list_value {\n                                                                                    values {\n                                                                                      list_value {\n                                                                                        values {\n                                                                                          struct_value {\n                                                                                            fields {\n                                                                                            key:\n                                                                                              \"\" value {\n                                                                                                list_value {\n                                                                                                  values {\n                                                                                                    struct_value {\n                                                                                                      fields {\n                                                                                                      key:\n                                                                                                        \"\" value {\n                                                                                                          list_value {\n                                                                                                            values {\n                                                                                                              struct_value {\n                                                                                                                fields {\n                                                                                                                key:\n                                                                                                                  \"\" value {\n                                                                                                                    list_value {\n                                                                                                                      values {\n                                                                                                                        struct_value {\n                                                                                                                          fields {\n                                                                                                                          key:\n                                                                                                                            \"\" value {\n                                                                                                                              list_value {\n                                                                                                                                values {\n                                                                                                                                  struct_value {\n                                                                                                                                    fields {\n                                                                                                                                    key:\n                                                                                                                                      \"\" value {\n                                                                                                                                        list_value {\n                                                                                                                                          values {\n                                                                                                                                            struct_value {\n                                                                                                                                              fields {\n                                                                                                                                              key:\n                                                                                                                                                \"\" value {\n                                                                                                                                                  list_value {\n                                                                                                                                                    values {\n                                                                                                                                                      list_value {\n                                                                                                                                                        values {\n                                                                                                                                                          struct_value {\n                                                                                                                                                            fields {\n                                                                                                                                                              key: \"\"\n                                                                                                                                                              value {\n                                                                                                                                                                list_value {\n                                                                                                                                                                  values {\n                                                                                                                                                                    list_value {\n                                                                                                                                                                      values {\n                                                                                                                                                                      }\n                                                                                                                                                                    }\n                                                                                                                                                                  }\n                                                                                                                                                                }\n                                                                                                                                                              }\n                                                                                                                                                            }\n                                                                                                                                                          }\n                                                                                                                                                        }\n                                                                                                                                                      }\n                                                                                                                                                    }\n                                                                                                                                                  }\n                                                                                                                                                }\n                                                                                                                                              }\n                                                                                                                                            }\n                                                                                                                                          }\n                                                                                                                                        }\n                                                                                                                                      }\n                                                                                                                                    }\n                                                                                                                                  }\n                                                                                                                                }\n                                                                                                                              }\n                                                                                                                            }\n                                                                                                                          }\n                                                                                                                        }\n                                                                                                                      }\n                                                                                                                    }\n                                                                                                                  }\n                                                                                                                }\n                                                                                                              }\n                                                                                                            }\n                                                                                                          }\n                                                                                                        }\n                                                                                                      }\n                                                                                                    }\n                                                                                                  }\n                                                                                                }\n                                                                                              }\n                                                                                            }\n                                                                                          }\n                                                                                        }\n                                                                                      }\n                                                                                    }\n                                                                                  }\n                                                                                }\n                                                                              }\n                                                                            }\n                                                                          }\n                                                                        }\n                                                                      }\n                                                                    }\n                                                                  }\n                                                                }\n                                                              }\n                                                            }\n                                                          }\n                                                        }\n                                                      }\n                                                    }\n                                                  }\n                                                }\n                                              }\n                                            }\n                                          }\n                                        }\n                                      }\n                                    }\n                                  }\n                                }\n                              }\n                            }\n                          }\n                        }\n                      }\n                    }\n                  }\n                }\n              }\n            }\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz",
    "content": "headers_to_add {\n  header {\n    key: \"T\"\n    value: \"%START_TIME(%f%f%f%E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%E46116%f ff%E461%f%f%f%E461f%E46371%bff%E461%f%fE%f%E4E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%E46116%f ff%E461%f%f%f%E461f%E46111f%E0%f f461f%E46116%f%f%E461f%E46116%f%f%E256f%f%E461f%E46116%f%f%E461f%E46116%f f461f%E46116%f%f%E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%E46116%f f461f%E46116%f%f%E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f*E46116%f f461f%E46116%f%f%E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%E4116%f f461f%E46116%f%f%E461f%E46116%f%f%E1f%f%E9223372036854775809f%E46116%f%f%E461f%E46116%f f65075f%E46115%f%f%E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%En\u0001-\f*f461f%E1%f%f%E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%E46116%f f461f%E46E461f%E46116%f%f%E461f%E46116%f ff%E461%f%f%f%E461f%E46371%bff%E461%f%fE%f%E4E461f%E46116%f%f%E461f%f%E4-8682401458614789115%E46116%f ff%E461%f%f%f%E461f%E46111f%E46116%f f461f%E46116%f%f%E461f%E46116%f%f%E256f%f%E461f%E46116%f%f%E461f%E46116%f f461f%E46116%f%f%E98838f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%E46116%f f461f%E46116%f%f%E461f%E46116%f%f%E461f%f%E461f%E46116%f%f%E461f%E46116%f f461f%E46116%f%f%E461f%E46116%f%f%E466%f%f%E461f%E46116%f ff%E461%f%f%f%E195f%E65537%bff%E4ff%f%)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/dynamic_metadata",
    "content": "format: \"%DYNAMIC_METADATA(com.test:test_key)%|%DYNAMIC_METADATA(com.test:test_obj)%|%DYNAMIC_METADATA(com.test:test_obj:inner_key)%\"\nstream_info {\n  dynamic_metadata {\n    filter_metadata {\n      key: \"com.test\"\n      value: {\n        fields {\n          key: \"test_key\"\n          value: { string_value: \"test_value\" }\n        }\n        fields {\n          key: \"test_obj\"\n          value: {\n            struct_value {\n              fields {\n                key: \"inner_key\"\n                value: { string_value: \"inner_value\" }\n              }\n            }\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/empty",
    "content": ""
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/headers",
    "content": "format: \"{{%PROTOCOL%}}   %RESP(not exist)%++%RESP(test)% %REQ(FIRST?SECOND)% %RESP(FIRST?SECOND)%\\t@%TRAILER(THIRD)%@\\t%TRAILER(TEST?TEST-2)%[]\"\nrequest_headers {\n  headers {\n    key: \"first\"\n    value: \"GET\"\n  }\n  headers {\n    key: \":path\"\n    value: \"/\"\n  }\n}\nresponse_headers {\n  headers {\n    key: \"second\"\n    value: \"PUT\"\n  }\n  headers {\n    key: \"test\"\n    value: \"test\"\n  }\n}\nresponse_trailers {\n  headers {\n    key: \"third\"\n    value: \"POST\"\n  }\n  headers {\n    key: \"test-2\"\n    value: \"test-2\"\n  }\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_0",
    "content": "format: \"%REQ(\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_1",
    "content": "format: \"{{%PROTOCOL%}}   ++ %REQ(FIRST?SECOND)% %RESP(FIRST?SECOND)\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_10",
    "content": "format: \"%RESP(TEST):%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_11",
    "content": "format: \"%RESP(X?Y):%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_12",
    "content": "format: \"%RESP(X?Y):343o24%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_13",
    "content": "format: \"%RESP(X?Y):343o24%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_14",
    "content": "format: \"%REQ(TEST):10\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_15",
    "content": "format: \"REQ(:TEST):10%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_16",
    "content": "format: \"%REQ(TEST:10%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_17",
    "content": "format: \"%REQ(\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_18",
    "content": "format: \"%REQ(X?Y?Z)%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_19",
    "content": "format: \"%DYNAMIC_METADATA(TEST\"}\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_2",
    "content": "format: \"%REQ(FIRST?SECOND)T%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_3",
    "content": "format: \"ESP(FIRST)%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_4",
    "content": "format: \"%REQ(valid)% %NOT_VALID%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_5",
    "content": "format: \"%REQ(FIRST?SECOND%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_6",
    "content": "format: \"%%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_7",
    "content": "format: \"%protocol%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_8",
    "content": "format: \"%REQ(TEST):%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/invalid_9",
    "content": "format: \"%REQ(TEST):3q4%\"\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/plain_string",
    "content": "{}*JUST PLAIN string]\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/response_code",
    "content": "format: \"%RESPONSE_CODE%\"\nstream_info {\n  response_code {\n    value: 404\n  }\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/start_time_0",
    "content": "format: \"%START_TIME(%Y/%m/%d)%|%START_TIME(%s)%|%START_TIME(bad_format)%|%START_TIME%|%START_TIME(%f.%1f.%2f.%3f)%\"\nstream_info {\n  start_time: 1522280158\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/start_time_1",
    "content": "format: \"%START_TIME(%s.%3f)%|%START_TIME(%s.%4f)%|%START_TIME(%s.%5f)%|%START_TIME(%s.%6f)%\"\nstream_info {\n  start_time: 1522796769123456\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/start_time_2",
    "content": "format: \"%START_TIME(segment1:%s.%3f|segment2:%s.%4f|seg3:%s.%6f|%s-%3f-asdf-%9f|.%7f:segm5:%Y)%\"\nstream_info {\n  start_time: 1522796769123456\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/start_time_3",
    "content": "format: \"%START_TIME(%%%%|%%%%%f|%s%%%%%3f|%1f%%%%%s)%\"\nstream_info {\n  start_time: 1522796769123456\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_corpus/upstream_local_address",
    "content": "format: \"%UPSTREAM_LOCAL_ADDRESS%\"\nstream_info {\n  upstream_local_address {\n    socket_address {\n      address: \"10.1.2.3\",\n      port_value: 10001\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.substitution;\n\nimport \"test/fuzz/common.proto\";\n\nimport \"validate/validate.proto\";\n\n// Structured input for substitution_formatter_fuzz_test.\n\nmessage TestCase {\n  // Do not allow invalid header characters in %REQ(...)% and %RESP(...)%.\n  // It will crash the fuzzer on LowerCaseString asserts.\n  string format = 1 [(validate.rules).string.pattern = \"[^%\\\\(REQ|RESP\\\\)\\\\([^\\\\r\\\\0\\\\n]\\\\)%]\"];\n  test.fuzz.Headers request_headers = 2;\n  test.fuzz.Headers response_headers = 3;\n  test.fuzz.Headers response_trailers = 4;\n  test.fuzz.StreamInfo stream_info = 5;\n}\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_fuzz_test.cc",
    "content": "#include \"common/formatter/substitution_formatter.h\"\n\n#include \"test/common/formatter/substitution_formatter_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\nnamespace {\n\nDEFINE_PROTO_FUZZER(const test::common::substitution::TestCase& input) {\n  try {\n    TestUtility::validate(input);\n    std::vector<Formatter::FormatterProviderPtr> formatters =\n        Formatter::SubstitutionFormatParser::parse(input.format());\n    const auto& request_headers =\n        Fuzz::fromHeaders<Http::TestRequestHeaderMapImpl>(input.request_headers());\n    const auto& response_headers =\n        Fuzz::fromHeaders<Http::TestResponseHeaderMapImpl>(input.response_headers());\n    const auto& response_trailers =\n        Fuzz::fromHeaders<Http::TestResponseTrailerMapImpl>(input.response_trailers());\n    const std::unique_ptr<TestStreamInfo> stream_info = Fuzz::fromStreamInfo(input.stream_info());\n    for (const auto& it : formatters) {\n      it->format(request_headers, response_headers, response_trailers, *stream_info,\n                 absl::string_view());\n    }\n    ENVOY_LOG_MISC(trace, \"Success\");\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n}\n\n} // namespace\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_fuzz_test.dict",
    "content": "# format strings\n\"[%START_TIME%] \\\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \\\"%REQ(X-FORWARDED-FOR)%\\\" \\\"%REQ(USER-AGENT)%\\\" \\\"%REQ(X-REQUEST-ID)%\\\" \\\"%REQ(:AUTHORITY)%\\\" \\\"%UPSTREAM_HOST%\\\"\\x0A\"\n\"%START_TIME%\"\n\"%BYTES_RECEIVED%\"\n\"%PROTOCOL%\"\n\"%RESPONSE_CODE%\"\n\"%RESPONSE_CODE_DETAILS%\"\n\"%BYTES_SENT%\"\n\"%DURATION%\"\n\"%RESPONSE_DURATION%\"\n\"%RESPONSE_FLAGS%\"\n\"%RESPONSE_TX_DURATION%\"\n\"%ROUTE_NAME%\"\n\"%UPSTREAM_HOST%\"\n\"%UPSTREAM_CLUSTER%\"\n\"%UPSTREAM_LOCAL_ADDRESS%\"\n\"%UPSTREAM_TRANSPORT_FAILURE_REASON%\"\n\"%DOWNSTREAM_REMOTE_ADDRESS%\"\n\"%REQUESTED_SERVER_NAME%\"\n\"%REQ\"\n\"%TRAILER\"\n\"%RESP\"\n\"%DOWNSTREAM_PEER_CERT_V_END%\"\n\"%HOSTNAME%\""
  },
  {
    "path": "test/common/formatter/substitution_formatter_speed_test.cc",
    "content": "#include \"common/formatter/substitution_formatter.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"test/common/stream_info/test_util.h\"\n#include \"test/mocks/http/mocks.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\n\nnamespace {\n\nstd::unique_ptr<Envoy::Formatter::JsonFormatterImpl> makeJsonFormatter(bool typed) {\n  ProtobufWkt::Struct JsonLogFormat;\n  const std::string format_yaml = R\"EOF(\n    remote_address: '%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%'\n    start_time: '%START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%'\n    method: '%REQ(:METHOD)%'\n    url: '%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%'\n    protocol: '%PROTOCOL%'\n    respoinse_code: '%RESPONSE_CODE%'\n    bytes_sent: '%BYTES_SENT%'\n    duration: '%DURATION%'\n    referer: '%REQ(REFERER)%'\n    user-agent: '%REQ(USER-AGENT)%'\n  )EOF\";\n  TestUtility::loadFromYaml(format_yaml, JsonLogFormat);\n  return std::make_unique<Envoy::Formatter::JsonFormatterImpl>(JsonLogFormat, typed, false);\n}\n\nstd::unique_ptr<Envoy::TestStreamInfo> makeStreamInfo() {\n  auto stream_info = std::make_unique<Envoy::TestStreamInfo>();\n  stream_info->setDownstreamRemoteAddress(\n      std::make_shared<Envoy::Network::Address::Ipv4Instance>(\"203.0.113.1\"));\n  return stream_info;\n}\n\n} // namespace\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_AccessLogFormatter(benchmark::State& state) {\n  std::unique_ptr<Envoy::TestStreamInfo> stream_info = makeStreamInfo();\n  static const char* LogFormat =\n      \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% %START_TIME(%Y/%m/%dT%H:%M:%S%z %s)% \"\n      \"%REQ(:METHOD)% \"\n      \"%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% \"\n      \"s%RESPONSE_CODE% %BYTES_SENT% %DURATION% %REQ(REFERER)% \\\"%REQ(USER-AGENT)%\\\" - - -\\n\";\n\n  std::unique_ptr<Envoy::Formatter::FormatterImpl> formatter =\n      std::make_unique<Envoy::Formatter::FormatterImpl>(LogFormat, false);\n\n  size_t output_bytes = 0;\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  std::string body;\n  for (auto _ : state) {\n    output_bytes +=\n        formatter->format(request_headers, response_headers, response_trailers, *stream_info, body)\n            .length();\n  }\n  benchmark::DoNotOptimize(output_bytes);\n}\nBENCHMARK(BM_AccessLogFormatter);\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_JsonAccessLogFormatter(benchmark::State& state) {\n  std::unique_ptr<Envoy::TestStreamInfo> stream_info = makeStreamInfo();\n  std::unique_ptr<Envoy::Formatter::JsonFormatterImpl> json_formatter = makeJsonFormatter(false);\n\n  size_t output_bytes = 0;\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  std::string body;\n  for (auto _ : state) {\n    output_bytes +=\n        json_formatter\n            ->format(request_headers, response_headers, response_trailers, *stream_info, body)\n            .length();\n  }\n  benchmark::DoNotOptimize(output_bytes);\n}\nBENCHMARK(BM_JsonAccessLogFormatter);\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_TypedJsonAccessLogFormatter(benchmark::State& state) {\n  std::unique_ptr<Envoy::TestStreamInfo> stream_info = makeStreamInfo();\n  std::unique_ptr<Envoy::Formatter::JsonFormatterImpl> typed_json_formatter =\n      makeJsonFormatter(true);\n\n  size_t output_bytes = 0;\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  std::string body;\n  for (auto _ : state) {\n    output_bytes +=\n        typed_json_formatter\n            ->format(request_headers, response_headers, response_trailers, *stream_info, body)\n            .length();\n  }\n  benchmark::DoNotOptimize(output_bytes);\n}\nBENCHMARK(BM_TypedJsonAccessLogFormatter);\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/formatter/substitution_formatter_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/formatter/substitution_formatter.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/string_accessor_impl.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Const;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Formatter {\nnamespace {\n\nclass TestSerializedUnknownFilterState : public StreamInfo::FilterState::Object {\npublic:\n  ProtobufTypes::MessagePtr serializeAsProto() const override {\n    auto any = std::make_unique<ProtobufWkt::Any>();\n    any->set_type_url(\"UnknownType\");\n    any->set_value(\"\\xde\\xad\\xbe\\xef\");\n    return any;\n  }\n};\n\nclass TestSerializedStructFilterState : public StreamInfo::FilterState::Object {\npublic:\n  TestSerializedStructFilterState() : use_struct_(true) {\n    (*struct_.mutable_fields())[\"inner_key\"] = ValueUtil::stringValue(\"inner_value\");\n  }\n\n  explicit TestSerializedStructFilterState(const ProtobufWkt::Struct& s) : use_struct_(true) {\n    struct_.CopyFrom(s);\n  }\n\n  explicit TestSerializedStructFilterState(std::chrono::seconds seconds) {\n    duration_.set_seconds(seconds.count());\n  }\n\n  ProtobufTypes::MessagePtr serializeAsProto() const override {\n    if (use_struct_) {\n      auto s = std::make_unique<ProtobufWkt::Struct>();\n      s->CopyFrom(struct_);\n      return s;\n    }\n\n    auto d = std::make_unique<ProtobufWkt::Duration>();\n    d->CopyFrom(duration_);\n    return d;\n  }\n\nprivate:\n  const bool use_struct_{false};\n  ProtobufWkt::Struct struct_;\n  ProtobufWkt::Duration duration_;\n};\n\n// Class used to test serializeAsString and serializeAsProto of FilterState\nclass TestSerializedStringFilterState : public StreamInfo::FilterState::Object {\npublic:\n  TestSerializedStringFilterState(std::string str) : raw_string_(str) {}\n  absl::optional<std::string> serializeAsString() const override {\n    return raw_string_ + \" By PLAIN\";\n  }\n  ProtobufTypes::MessagePtr serializeAsProto() const override {\n    auto message = std::make_unique<ProtobufWkt::StringValue>();\n    message->set_value(raw_string_ + \" By TYPED\");\n    return message;\n  }\n\nprivate:\n  std::string raw_string_;\n};\n\nTEST(SubstitutionFormatUtilsTest, protocolToString) {\n  EXPECT_EQ(\"HTTP/1.0\",\n            SubstitutionFormatUtils::protocolToString(Http::Protocol::Http10).value().get());\n  EXPECT_EQ(\"HTTP/1.1\",\n            SubstitutionFormatUtils::protocolToString(Http::Protocol::Http11).value().get());\n  EXPECT_EQ(\"HTTP/2\",\n            SubstitutionFormatUtils::protocolToString(Http::Protocol::Http2).value().get());\n  EXPECT_EQ(absl::nullopt, SubstitutionFormatUtils::protocolToString({}));\n}\n\nTEST(SubstitutionFormatUtilsTest, protocolToStringOrDefault) {\n  EXPECT_EQ(\"HTTP/1.0\", SubstitutionFormatUtils::protocolToStringOrDefault(Http::Protocol::Http10));\n  EXPECT_EQ(\"HTTP/1.1\", SubstitutionFormatUtils::protocolToStringOrDefault(Http::Protocol::Http11));\n  EXPECT_EQ(\"HTTP/2\", SubstitutionFormatUtils::protocolToStringOrDefault(Http::Protocol::Http2));\n  EXPECT_EQ(\"-\", SubstitutionFormatUtils::protocolToStringOrDefault({}));\n}\n\nTEST(SubstitutionFormatterTest, plainStringFormatter) {\n  PlainStringFormatter formatter(\"plain\");\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n\n  EXPECT_EQ(\"plain\", formatter.format(request_headers, response_headers, response_trailers,\n                                      stream_info, body));\n  EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                    stream_info, body),\n              ProtoEq(ValueUtil::stringValue(\"plain\")));\n}\n\nTEST(SubstitutionFormatterTest, streamInfoFormatter) {\n  EXPECT_THROW(StreamInfoFormatter formatter(\"unknown_field\"), EnvoyException);\n\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  std::string body;\n\n  {\n    StreamInfoFormatter request_duration_format(\"REQUEST_DURATION\");\n    absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(5000000);\n    EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur));\n    EXPECT_EQ(\"5\", request_duration_format.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n    EXPECT_THAT(request_duration_format.formatValue(request_headers, response_headers,\n                                                    response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::numberValue(5.0)));\n  }\n\n  {\n    StreamInfoFormatter request_duration_format(\"REQUEST_DURATION\");\n    absl::optional<std::chrono::nanoseconds> dur;\n    EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur));\n    EXPECT_EQ(absl::nullopt, request_duration_format.format(request_headers, response_headers,\n                                                            response_trailers, stream_info, body));\n    EXPECT_THAT(request_duration_format.formatValue(request_headers, response_headers,\n                                                    response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    StreamInfoFormatter response_duration_format(\"RESPONSE_DURATION\");\n    absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(10000000);\n    EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur));\n    EXPECT_EQ(\"10\", response_duration_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(response_duration_format.formatValue(request_headers, response_headers,\n                                                     response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::numberValue(10.0)));\n  }\n\n  {\n    StreamInfoFormatter response_duration_format(\"RESPONSE_DURATION\");\n    absl::optional<std::chrono::nanoseconds> dur;\n    EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur));\n    EXPECT_EQ(absl::nullopt, response_duration_format.format(request_headers, response_headers,\n                                                             response_trailers, stream_info, body));\n    EXPECT_THAT(response_duration_format.formatValue(request_headers, response_headers,\n                                                     response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    StreamInfoFormatter ttlb_duration_format(\"RESPONSE_TX_DURATION\");\n\n    absl::optional<std::chrono::nanoseconds> dur_upstream = std::chrono::nanoseconds(10000000);\n    EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur_upstream));\n    absl::optional<std::chrono::nanoseconds> dur_downstream = std::chrono::nanoseconds(25000000);\n    EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream));\n\n    EXPECT_EQ(\"15\", ttlb_duration_format.format(request_headers, response_headers,\n                                                response_trailers, stream_info, body));\n    EXPECT_THAT(ttlb_duration_format.formatValue(request_headers, response_headers,\n                                                 response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::numberValue(15.0)));\n  }\n\n  {\n    StreamInfoFormatter ttlb_duration_format(\"RESPONSE_TX_DURATION\");\n\n    absl::optional<std::chrono::nanoseconds> dur_upstream;\n    EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur_upstream));\n    absl::optional<std::chrono::nanoseconds> dur_downstream;\n    EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream));\n\n    EXPECT_EQ(absl::nullopt, ttlb_duration_format.format(request_headers, response_headers,\n                                                         response_trailers, stream_info, body));\n    EXPECT_THAT(ttlb_duration_format.formatValue(request_headers, response_headers,\n                                                 response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    StreamInfoFormatter bytes_received_format(\"BYTES_RECEIVED\");\n    EXPECT_CALL(stream_info, bytesReceived()).WillRepeatedly(Return(1));\n    EXPECT_EQ(\"1\", bytes_received_format.format(request_headers, response_headers,\n                                                response_trailers, stream_info, body));\n    EXPECT_THAT(bytes_received_format.formatValue(request_headers, response_headers,\n                                                  response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::numberValue(1.0)));\n  }\n\n  {\n    StreamInfoFormatter protocol_format(\"PROTOCOL\");\n    absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n    EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n    EXPECT_EQ(\"HTTP/1.1\", protocol_format.format(request_headers, response_headers,\n                                                 response_trailers, stream_info, body));\n    EXPECT_THAT(protocol_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"HTTP/1.1\")));\n  }\n\n  {\n    StreamInfoFormatter response_format(\"RESPONSE_CODE\");\n    absl::optional<uint32_t> response_code{200};\n    EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code));\n    EXPECT_EQ(\"200\", response_format.format(request_headers, response_headers, response_trailers,\n                                            stream_info, body));\n    EXPECT_THAT(response_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::numberValue(200.0)));\n  }\n\n  {\n    StreamInfoFormatter response_code_format(\"RESPONSE_CODE\");\n    absl::optional<uint32_t> response_code;\n    EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code));\n    EXPECT_EQ(\"0\", response_code_format.format(request_headers, response_headers, response_trailers,\n                                               stream_info, body));\n    EXPECT_THAT(response_code_format.formatValue(request_headers, response_headers,\n                                                 response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::numberValue(0.0)));\n  }\n\n  {\n    StreamInfoFormatter response_format(\"RESPONSE_CODE_DETAILS\");\n    absl::optional<std::string> rc_details;\n    EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details));\n    EXPECT_EQ(absl::nullopt, response_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(response_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    StreamInfoFormatter response_code_format(\"RESPONSE_CODE_DETAILS\");\n    absl::optional<std::string> rc_details{\"via_upstream\"};\n    EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details));\n    EXPECT_EQ(\"via_upstream\", response_code_format.format(request_headers, response_headers,\n                                                          response_trailers, stream_info, body));\n    EXPECT_THAT(response_code_format.formatValue(request_headers, response_headers,\n                                                 response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"via_upstream\")));\n  }\n\n  {\n    StreamInfoFormatter termination_details_format(\"CONNECTION_TERMINATION_DETAILS\");\n    absl::optional<std::string> details;\n    EXPECT_CALL(stream_info, connectionTerminationDetails()).WillRepeatedly(ReturnRef(details));\n    EXPECT_EQ(absl::nullopt,\n              termination_details_format.format(request_headers, response_headers,\n                                                response_trailers, stream_info, body));\n    EXPECT_THAT(termination_details_format.formatValue(request_headers, response_headers,\n                                                       response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    StreamInfoFormatter termination_details_format(\"CONNECTION_TERMINATION_DETAILS\");\n    absl::optional<std::string> details{\"access_denied\"};\n    EXPECT_CALL(stream_info, connectionTerminationDetails()).WillRepeatedly(ReturnRef(details));\n    EXPECT_EQ(\"access_denied\",\n              termination_details_format.format(request_headers, response_headers,\n                                                response_trailers, stream_info, body));\n    EXPECT_THAT(termination_details_format.formatValue(request_headers, response_headers,\n                                                       response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"access_denied\")));\n  }\n\n  {\n    StreamInfoFormatter bytes_sent_format(\"BYTES_SENT\");\n    EXPECT_CALL(stream_info, bytesSent()).WillRepeatedly(Return(1));\n    EXPECT_EQ(\"1\", bytes_sent_format.format(request_headers, response_headers, response_trailers,\n                                            stream_info, body));\n    EXPECT_THAT(bytes_sent_format.formatValue(request_headers, response_headers, response_trailers,\n                                              stream_info, body),\n                ProtoEq(ValueUtil::numberValue(1.0)));\n  }\n\n  {\n    StreamInfoFormatter duration_format(\"DURATION\");\n    absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(15000000);\n    EXPECT_CALL(stream_info, requestComplete()).WillRepeatedly(Return(dur));\n    EXPECT_EQ(\"15\", duration_format.format(request_headers, response_headers, response_trailers,\n                                           stream_info, body));\n    EXPECT_THAT(duration_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::numberValue(15.0)));\n  }\n\n  {\n    StreamInfoFormatter response_flags_format(\"RESPONSE_FLAGS\");\n    ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::LocalReset))\n        .WillByDefault(Return(true));\n    EXPECT_EQ(\"LR\", response_flags_format.format(request_headers, response_headers,\n                                                 response_trailers, stream_info, body));\n    EXPECT_THAT(response_flags_format.formatValue(request_headers, response_headers,\n                                                  response_trailers, stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"LR\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"UPSTREAM_HOST\");\n    EXPECT_EQ(\"10.0.0.1:443\", upstream_format.format(request_headers, response_headers,\n                                                     response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"10.0.0.1:443\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"UPSTREAM_CLUSTER\");\n    const std::string upstream_cluster_name = \"cluster_name\";\n    EXPECT_CALL(stream_info.host_->cluster_, name())\n        .WillRepeatedly(ReturnRef(upstream_cluster_name));\n    EXPECT_EQ(\"cluster_name\", upstream_format.format(request_headers, response_headers,\n                                                     response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"cluster_name\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"UPSTREAM_HOST\");\n    EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    NiceMock<Api::MockOsSysCalls> os_sys_calls;\n    TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n    EXPECT_CALL(os_sys_calls, gethostname(_, _))\n        .WillOnce(Invoke([](char*, size_t) -> Api::SysCallIntResult {\n          return {-1, ENAMETOOLONG};\n        }));\n\n    StreamInfoFormatter upstream_format(\"HOSTNAME\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    NiceMock<Api::MockOsSysCalls> os_sys_calls;\n    TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n    EXPECT_CALL(os_sys_calls, gethostname(_, _))\n        .WillOnce(Invoke([](char* name, size_t) -> Api::SysCallIntResult {\n          StringUtil::strlcpy(name, \"myhostname\", 11);\n          return {0, 0};\n        }));\n\n    StreamInfoFormatter upstream_format(\"HOSTNAME\");\n    EXPECT_EQ(\"myhostname\", upstream_format.format(request_headers, response_headers,\n                                                   response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"myhostname\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"UPSTREAM_CLUSTER\");\n    EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_ADDRESS\");\n    EXPECT_EQ(\"127.0.0.2:0\", upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"127.0.0.2:0\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT\");\n    EXPECT_EQ(\"127.0.0.2\", upstream_format.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"127.0.0.2\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_PORT\");\n\n    // Validate for IPv4 address\n    auto address = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv4Instance(\"127.1.2.3\", 8443)};\n    EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address));\n    EXPECT_EQ(\"8443\", upstream_format.format(request_headers, response_headers, response_trailers,\n                                             stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"8443\")));\n\n    // Validate for IPv6 address\n    address =\n        Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance(\"::1\", 9443)};\n    EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address));\n    EXPECT_EQ(\"9443\", upstream_format.format(request_headers, response_headers, response_trailers,\n                                             stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"9443\")));\n\n    // Validate for Pipe\n    address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance(\"/foo\")};\n    EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address));\n    EXPECT_EQ(\"\", upstream_format.format(request_headers, response_headers, response_trailers,\n                                         stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT\");\n    EXPECT_EQ(\"127.0.0.1\", upstream_format.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"127.0.0.1\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_REMOTE_ADDRESS\");\n    EXPECT_EQ(\"127.0.0.1:0\", upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"127.0.0.1:0\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT\");\n    EXPECT_EQ(\"127.0.0.1\", upstream_format.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"127.0.0.1\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_DIRECT_REMOTE_ADDRESS\");\n    EXPECT_EQ(\"127.0.0.1:0\", upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"127.0.0.1:0\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"CONNECTION_ID\");\n    uint64_t id = 123;\n    EXPECT_CALL(stream_info, connectionID()).WillRepeatedly(Return(id));\n    EXPECT_EQ(\"123\", upstream_format.format(request_headers, response_headers, response_trailers,\n                                            stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::numberValue(id)));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"REQUESTED_SERVER_NAME\");\n    std::string requested_server_name = \"stub_server\";\n    EXPECT_CALL(stream_info, requestedServerName())\n        .WillRepeatedly(ReturnRef(requested_server_name));\n    EXPECT_EQ(\"stub_server\", upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"stub_server\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"REQUESTED_SERVER_NAME\");\n    std::string requested_server_name;\n    EXPECT_CALL(stream_info, requestedServerName())\n        .WillRepeatedly(ReturnRef(requested_server_name));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_URI_SAN\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::vector<std::string> sans{\"san\"};\n    EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"san\", upstream_format.format(request_headers, response_headers, response_trailers,\n                                            stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"san\")));\n  }\n\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_URI_SAN\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::vector<std::string> sans{\"san1\", \"san2\"};\n    EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"san1,san2\", upstream_format.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_URI_SAN\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, uriSanPeerCertificate())\n        .WillRepeatedly(Return(std::vector<std::string>()));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_URI_SAN\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_URI_SAN\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::vector<std::string> sans{\"san\"};\n    EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"san\", upstream_format.format(request_headers, response_headers, response_trailers,\n                                            stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"san\")));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_URI_SAN\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::vector<std::string> sans{\"san1\", \"san2\"};\n    EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"san1,san2\", upstream_format.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_URI_SAN\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, uriSanLocalCertificate())\n        .WillRepeatedly(Return(std::vector<std::string>()));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_URI_SAN\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_SUBJECT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::string subject_local = \"subject\";\n    EXPECT_CALL(*connection_info, subjectLocalCertificate())\n        .WillRepeatedly(ReturnRef(subject_local));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"subject\", upstream_format.format(request_headers, response_headers,\n                                                response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"subject\")));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_SUBJECT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, subjectLocalCertificate())\n        .WillRepeatedly(ReturnRef(EMPTY_STRING));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_LOCAL_SUBJECT\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SUBJECT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::string subject_peer = \"subject\";\n    EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"subject\", upstream_format.format(request_headers, response_headers,\n                                                response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"subject\")));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SUBJECT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SUBJECT\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_SESSION_ID\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::string session_id = \"deadbeef\";\n    EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(session_id));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"deadbeef\", upstream_format.format(request_headers, response_headers,\n                                                 response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"deadbeef\")));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_SESSION_ID\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_SESSION_ID\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_CIPHER\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, ciphersuiteString())\n        .WillRepeatedly(Return(\"TLS_DHE_RSA_WITH_AES_256_GCM_SHA384\"));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"TLS_DHE_RSA_WITH_AES_256_GCM_SHA384\",\n              upstream_format.format(request_headers, response_headers, response_trailers,\n                                     stream_info, body));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_CIPHER\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, ciphersuiteString()).WillRepeatedly(Return(\"\"));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_CIPHER\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_VERSION\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    std::string tlsVersion = \"TLSv1.2\";\n    EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(tlsVersion));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"TLSv1.2\", upstream_format.format(request_headers, response_headers,\n                                                response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"TLSv1.2\")));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_VERSION\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_TLS_VERSION\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_FINGERPRINT_256\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    std::string expected_sha = \"685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f\";\n    EXPECT_CALL(*connection_info, sha256PeerCertificateDigest())\n        .WillRepeatedly(ReturnRef(expected_sha));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers,\n                                                   response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(expected_sha)));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_FINGERPRINT_256\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    std::string expected_sha;\n    EXPECT_CALL(*connection_info, sha256PeerCertificateDigest())\n        .WillRepeatedly(ReturnRef(expected_sha));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_FINGERPRINT_256\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_FINGERPRINT_1\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    std::string expected_sha = \"685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f\";\n    EXPECT_CALL(*connection_info, sha1PeerCertificateDigest())\n        .WillRepeatedly(ReturnRef(expected_sha));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers,\n                                                   response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(expected_sha)));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_FINGERPRINT_1\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    std::string expected_sha;\n    EXPECT_CALL(*connection_info, sha1PeerCertificateDigest())\n        .WillRepeatedly(ReturnRef(expected_sha));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_FINGERPRINT_1\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SERIAL\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::string serial_number = \"b8b5ecc898f2124a\";\n    EXPECT_CALL(*connection_info, serialNumberPeerCertificate())\n        .WillRepeatedly(ReturnRef(serial_number));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"b8b5ecc898f2124a\", upstream_format.format(request_headers, response_headers,\n                                                         response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"b8b5ecc898f2124a\")));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SERIAL\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, serialNumberPeerCertificate())\n        .WillRepeatedly(ReturnRef(EMPTY_STRING));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SERIAL\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_ISSUER\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::string issuer_peer =\n        \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\";\n    EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(issuer_peer));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\",\n              upstream_format.format(request_headers, response_headers, response_trailers,\n                                     stream_info, body));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_ISSUER\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_ISSUER\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SUBJECT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    const std::string subject_peer =\n        \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\";\n    EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\",\n              upstream_format.format(request_headers, response_headers, response_trailers,\n                                     stream_info, body));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SUBJECT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_SUBJECT\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    std::string expected_cert = \"<some cert>\";\n    EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate())\n        .WillRepeatedly(ReturnRef(expected_cert));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(expected_cert, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(expected_cert)));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    std::string expected_cert = \"\";\n    EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate())\n        .WillRepeatedly(ReturnRef(expected_cert));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT_V_START\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    absl::Time abslStartTime =\n        TestUtility::parseTime(\"Dec 18 01:50:34 2018 GMT\", \"%b %e %H:%M:%S %Y GMT\");\n    SystemTime startTime = absl::ToChronoTime(abslStartTime);\n    EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(startTime));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"2018-12-18T01:50:34.000Z\",\n              upstream_format.format(request_headers, response_headers, response_trailers,\n                                     stream_info, body));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT_V_START\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(absl::nullopt));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT_V_START\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT_V_END\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    absl::Time abslEndTime =\n        TestUtility::parseTime(\"Dec 17 01:50:34 2020 GMT\", \"%b %e %H:%M:%S %Y GMT\");\n    SystemTime endTime = absl::ToChronoTime(abslEndTime);\n    EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(endTime));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(\"2020-12-17T01:50:34.000Z\",\n              upstream_format.format(request_headers, response_headers, response_trailers,\n                                     stream_info, body));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT_V_END\");\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, expirationPeerCertificate())\n        .WillRepeatedly(Return(absl::nullopt));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n    StreamInfoFormatter upstream_format(\"DOWNSTREAM_PEER_CERT_V_END\");\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"UPSTREAM_TRANSPORT_FAILURE_REASON\");\n    std::string upstream_transport_failure_reason = \"SSL error\";\n    EXPECT_CALL(stream_info, upstreamTransportFailureReason())\n        .WillRepeatedly(ReturnRef(upstream_transport_failure_reason));\n    EXPECT_EQ(\"SSL error\", upstream_format.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"SSL error\")));\n  }\n  {\n    StreamInfoFormatter upstream_format(\"UPSTREAM_TRANSPORT_FAILURE_REASON\");\n    std::string upstream_transport_failure_reason;\n    EXPECT_CALL(stream_info, upstreamTransportFailureReason())\n        .WillRepeatedly(ReturnRef(upstream_transport_failure_reason));\n    EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers,\n                                                    response_trailers, stream_info, body));\n    EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers,\n                                            stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n}\n\nTEST(SubstitutionFormatterTest, requestHeaderFormatter) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_header{{\":method\", \"PUT\"}};\n  Http::TestResponseTrailerMapImpl response_trailer{{\":method\", \"POST\"}, {\"test-2\", \"test-2\"}};\n  std::string body;\n\n  {\n    RequestHeaderFormatter formatter(\":Method\", \"\", absl::optional<size_t>());\n    EXPECT_EQ(\"GET\", formatter.format(request_header, response_header, response_trailer,\n                                      stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"GET\")));\n  }\n\n  {\n    RequestHeaderFormatter formatter(\":path\", \":method\", absl::optional<size_t>());\n    EXPECT_EQ(\"/\", formatter.format(request_header, response_header, response_trailer, stream_info,\n                                    body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"/\")));\n  }\n\n  {\n    RequestHeaderFormatter formatter(\":TEST\", \":METHOD\", absl::optional<size_t>());\n    EXPECT_EQ(\"GET\", formatter.format(request_header, response_header, response_trailer,\n                                      stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"GET\")));\n  }\n\n  {\n    RequestHeaderFormatter formatter(\"does_not_exist\", \"\", absl::optional<size_t>());\n    EXPECT_EQ(absl::nullopt, formatter.format(request_header, response_header, response_trailer,\n                                              stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    RequestHeaderFormatter formatter(\":Method\", \"\", absl::optional<size_t>(2));\n    EXPECT_EQ(\"GE\", formatter.format(request_header, response_header, response_trailer, stream_info,\n                                     body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"GE\")));\n  }\n}\n\nTEST(SubstitutionFormatterTest, responseHeaderFormatter) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_header{{\":method\", \"PUT\"}, {\"test\", \"test\"}};\n  Http::TestResponseTrailerMapImpl response_trailer{{\":method\", \"POST\"}, {\"test-2\", \"test-2\"}};\n  std::string body;\n\n  {\n    ResponseHeaderFormatter formatter(\":method\", \"\", absl::optional<size_t>());\n    EXPECT_EQ(\"PUT\", formatter.format(request_header, response_header, response_trailer,\n                                      stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"PUT\")));\n  }\n\n  {\n    ResponseHeaderFormatter formatter(\"test\", \":method\", absl::optional<size_t>());\n    EXPECT_EQ(\"test\", formatter.format(request_header, response_header, response_trailer,\n                                       stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"test\")));\n  }\n\n  {\n    ResponseHeaderFormatter formatter(\":path\", \":method\", absl::optional<size_t>());\n    EXPECT_EQ(\"PUT\", formatter.format(request_header, response_header, response_trailer,\n                                      stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"PUT\")));\n  }\n\n  {\n    ResponseHeaderFormatter formatter(\"does_not_exist\", \"\", absl::optional<size_t>());\n    EXPECT_EQ(absl::nullopt, formatter.format(request_header, response_header, response_trailer,\n                                              stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    ResponseHeaderFormatter formatter(\":method\", \"\", absl::optional<size_t>(2));\n    EXPECT_EQ(\"PU\", formatter.format(request_header, response_header, response_trailer, stream_info,\n                                     body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"PU\")));\n  }\n}\n\nTEST(SubstitutionFormatterTest, responseTrailerFormatter) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_header{{\":method\", \"PUT\"}, {\"test\", \"test\"}};\n  Http::TestResponseTrailerMapImpl response_trailer{{\":method\", \"POST\"}, {\"test-2\", \"test-2\"}};\n  std::string body;\n\n  {\n    ResponseTrailerFormatter formatter(\":method\", \"\", absl::optional<size_t>());\n    EXPECT_EQ(\"POST\", formatter.format(request_header, response_header, response_trailer,\n                                       stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"POST\")));\n  }\n\n  {\n    ResponseTrailerFormatter formatter(\"test-2\", \":method\", absl::optional<size_t>());\n    EXPECT_EQ(\"test-2\", formatter.format(request_header, response_header, response_trailer,\n                                         stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"test-2\")));\n  }\n\n  {\n    ResponseTrailerFormatter formatter(\":path\", \":method\", absl::optional<size_t>());\n    EXPECT_EQ(\"POST\", formatter.format(request_header, response_header, response_trailer,\n                                       stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"POST\")));\n  }\n\n  {\n    ResponseTrailerFormatter formatter(\"does_not_exist\", \"\", absl::optional<size_t>());\n    EXPECT_EQ(absl::nullopt, formatter.format(request_header, response_header, response_trailer,\n                                              stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::nullValue()));\n  }\n\n  {\n    ResponseTrailerFormatter formatter(\":method\", \"\", absl::optional<size_t>(2));\n    EXPECT_EQ(\"PO\", formatter.format(request_header, response_header, response_trailer, stream_info,\n                                     body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"PO\")));\n  }\n}\n\n/**\n * Populate a metadata object with the following test data:\n * \"com.test\": {\"test_key\":\"test_value\",\"test_obj\":{\"inner_key\":\"inner_value\"}}\n */\nvoid populateMetadataTestData(envoy::config::core::v3::Metadata& metadata) {\n  ProtobufWkt::Struct struct_obj;\n  auto& fields_map = *struct_obj.mutable_fields();\n  fields_map[\"test_key\"] = ValueUtil::stringValue(\"test_value\");\n  ProtobufWkt::Struct struct_inner;\n  (*struct_inner.mutable_fields())[\"inner_key\"] = ValueUtil::stringValue(\"inner_value\");\n  ProtobufWkt::Value val;\n  *val.mutable_struct_value() = struct_inner;\n  fields_map[\"test_obj\"] = val;\n  (*metadata.mutable_filter_metadata())[\"com.test\"] = struct_obj;\n}\n\nTEST(SubstitutionFormatterTest, DynamicMetadataFormatter) {\n  envoy::config::core::v3::Metadata metadata;\n  populateMetadataTestData(metadata);\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n  EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  std::string body;\n\n  {\n    DynamicMetadataFormatter formatter(\"com.test\", {}, absl::optional<size_t>());\n    std::string val =\n        formatter.format(request_headers, response_headers, response_trailers, stream_info, body)\n            .value();\n    EXPECT_TRUE(val.find(\"\\\"test_key\\\":\\\"test_value\\\"\") != std::string::npos);\n    EXPECT_TRUE(val.find(\"\\\"test_obj\\\":{\\\"inner_key\\\":\\\"inner_value\\\"}\") != std::string::npos);\n\n    ProtobufWkt::Value expected_val;\n    expected_val.mutable_struct_value()->CopyFrom(metadata.filter_metadata().at(\"com.test\"));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(expected_val));\n  }\n  {\n    DynamicMetadataFormatter formatter(\"com.test\", {\"test_key\"}, absl::optional<size_t>());\n    EXPECT_EQ(\"\\\"test_value\\\"\", formatter.format(request_headers, response_headers,\n                                                 response_trailers, stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"test_value\")));\n  }\n  {\n    DynamicMetadataFormatter formatter(\"com.test\", {\"test_obj\"}, absl::optional<size_t>());\n    EXPECT_EQ(\n        \"{\\\"inner_key\\\":\\\"inner_value\\\"}\",\n        formatter.format(request_headers, response_headers, response_trailers, stream_info, body));\n\n    ProtobufWkt::Value expected_val;\n    (*expected_val.mutable_struct_value()->mutable_fields())[\"inner_key\"] =\n        ValueUtil::stringValue(\"inner_value\");\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(expected_val));\n  }\n  {\n    DynamicMetadataFormatter formatter(\"com.test\", {\"test_obj\", \"inner_key\"},\n                                       absl::optional<size_t>());\n    EXPECT_EQ(\"\\\"inner_value\\\"\", formatter.format(request_headers, response_headers,\n                                                  response_trailers, stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"inner_value\")));\n  }\n\n  // not found cases\n  {\n    DynamicMetadataFormatter formatter(\"com.notfound\", {}, absl::optional<size_t>());\n    EXPECT_EQ(absl::nullopt, formatter.format(request_headers, response_headers, response_trailers,\n                                              stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    DynamicMetadataFormatter formatter(\"com.test\", {\"notfound\"}, absl::optional<size_t>());\n    EXPECT_EQ(absl::nullopt, formatter.format(request_headers, response_headers, response_trailers,\n                                              stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n  {\n    DynamicMetadataFormatter formatter(\"com.test\", {\"test_obj\", \"notfound\"},\n                                       absl::optional<size_t>());\n    EXPECT_EQ(absl::nullopt, formatter.format(request_headers, response_headers, response_trailers,\n                                              stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  // size limit\n  {\n    DynamicMetadataFormatter formatter(\"com.test\", {\"test_key\"}, absl::optional<size_t>(5));\n    EXPECT_EQ(\"\\\"test\", formatter.format(request_headers, response_headers, response_trailers,\n                                         stream_info, body));\n\n    // N.B. Does not truncate.\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"test_value\")));\n  }\n}\n\nTEST(SubstitutionFormatterTest, FilterStateFormatter) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n\n  stream_info.filter_state_->setData(\"key\",\n                                     std::make_unique<Router::StringAccessorImpl>(\"test_value\"),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  stream_info.filter_state_->setData(\"key-struct\",\n                                     std::make_unique<TestSerializedStructFilterState>(),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  stream_info.filter_state_->setData(\"key-no-serialization\",\n                                     std::make_unique<StreamInfo::FilterState::Object>(),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  stream_info.filter_state_->setData(\n      \"key-serialization-error\",\n      std::make_unique<TestSerializedStructFilterState>(std::chrono::seconds(-281474976710656)),\n      StreamInfo::FilterState::StateType::ReadOnly);\n  stream_info.filter_state_->setData(\n      \"test_key\", std::make_unique<TestSerializedStringFilterState>(\"test_value\"),\n      StreamInfo::FilterState::StateType::ReadOnly);\n  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n\n  {\n    FilterStateFormatter formatter(\"key\", absl::optional<size_t>(), false);\n\n    EXPECT_EQ(\"\\\"test_value\\\"\", formatter.format(request_headers, response_headers,\n                                                 response_trailers, stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"test_value\")));\n  }\n  {\n    FilterStateFormatter formatter(\"key-struct\", absl::optional<size_t>(), false);\n\n    EXPECT_EQ(\n        \"{\\\"inner_key\\\":\\\"inner_value\\\"}\",\n        formatter.format(request_headers, response_headers, response_trailers, stream_info, body));\n\n    ProtobufWkt::Value expected;\n    (*expected.mutable_struct_value()->mutable_fields())[\"inner_key\"] =\n        ValueUtil::stringValue(\"inner_value\");\n\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(expected));\n  }\n\n  // not found case\n  {\n    FilterStateFormatter formatter(\"key-not-found\", absl::optional<size_t>(), false);\n\n    EXPECT_EQ(absl::nullopt, formatter.format(request_headers, response_headers, response_trailers,\n                                              stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  // no serialization case\n  {\n    FilterStateFormatter formatter(\"key-no-serialization\", absl::optional<size_t>(), false);\n\n    EXPECT_EQ(absl::nullopt, formatter.format(request_headers, response_headers, response_trailers,\n                                              stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  // serialization error case\n  {\n    FilterStateFormatter formatter(\"key-serialization-error\", absl::optional<size_t>(), false);\n\n    EXPECT_EQ(absl::nullopt, formatter.format(request_headers, response_headers, response_trailers,\n                                              stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n\n  // size limit\n  {\n    FilterStateFormatter formatter(\"key\", absl::optional<size_t>(5), false);\n\n    EXPECT_EQ(\"\\\"test\", formatter.format(request_headers, response_headers, response_trailers,\n                                         stream_info, body));\n\n    // N.B. Does not truncate.\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"test_value\")));\n  }\n\n  // serializeAsString case\n  {\n    FilterStateFormatter formatter(\"test_key\", absl::optional<size_t>(), true);\n\n    EXPECT_EQ(\"test_value By PLAIN\", formatter.format(request_headers, response_headers,\n                                                      response_trailers, stream_info, body));\n  }\n\n  // size limit for serializeAsString\n  {\n    FilterStateFormatter formatter(\"test_key\", absl::optional<size_t>(10), true);\n\n    EXPECT_EQ(\"test_value\", formatter.format(request_headers, response_headers, response_trailers,\n                                             stream_info, body));\n  }\n\n  // no serialization case for serializeAsString\n  {\n    FilterStateFormatter formatter(\"key-no-serialization\", absl::optional<size_t>(), true);\n\n    EXPECT_EQ(absl::nullopt, formatter.format(request_headers, response_headers, response_trailers,\n                                              stream_info, body));\n    EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers,\n                                      stream_info, body),\n                ProtoEq(ValueUtil::nullValue()));\n  }\n}\n\nTEST(SubstitutionFormatterTest, StartTimeFormatter) {\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  std::string body;\n\n  {\n    StartTimeFormatter start_time_format(\"%Y/%m/%d\");\n    time_t test_epoch = 1522280158;\n    SystemTime time = std::chrono::system_clock::from_time_t(test_epoch);\n    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time));\n    EXPECT_EQ(\"2018/03/28\", start_time_format.format(request_headers, response_headers,\n                                                     response_trailers, stream_info, body));\n    EXPECT_THAT(start_time_format.formatValue(request_headers, response_headers, response_trailers,\n                                              stream_info, body),\n                ProtoEq(ValueUtil::stringValue(\"2018/03/28\")));\n  }\n\n  {\n    StartTimeFormatter start_time_format(\"\");\n    SystemTime time;\n    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time));\n    EXPECT_EQ(AccessLogDateTimeFormatter::fromTime(time),\n              start_time_format.format(request_headers, response_headers, response_trailers,\n                                       stream_info, body));\n    EXPECT_THAT(start_time_format.formatValue(request_headers, response_headers, response_trailers,\n                                              stream_info, body),\n                ProtoEq(ValueUtil::stringValue(AccessLogDateTimeFormatter::fromTime(time))));\n  }\n}\n\nTEST(SubstitutionFormatterTest, GrpcStatusFormatterTest) {\n  GrpcStatusFormatter formatter(\"grpc-status\", \"\", absl::optional<size_t>());\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  Http::TestRequestHeaderMapImpl request_header;\n  Http::TestResponseHeaderMapImpl response_header;\n  Http::TestResponseTrailerMapImpl response_trailer;\n  std::string body;\n\n  std::array<std::string, 17> grpc_statuses{\n      \"OK\",       \"Canceled\",       \"Unknown\",          \"InvalidArgument\",   \"DeadlineExceeded\",\n      \"NotFound\", \"AlreadyExists\",  \"PermissionDenied\", \"ResourceExhausted\", \"FailedPrecondition\",\n      \"Aborted\",  \"OutOfRange\",     \"Unimplemented\",    \"Internal\",          \"Unavailable\",\n      \"DataLoss\", \"Unauthenticated\"};\n  for (size_t i = 0; i < grpc_statuses.size(); ++i) {\n    response_trailer = Http::TestResponseTrailerMapImpl{{\"grpc-status\", std::to_string(i)}};\n    EXPECT_EQ(grpc_statuses[i], formatter.format(request_header, response_header, response_trailer,\n                                                 stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(grpc_statuses[i])));\n  }\n  {\n    response_trailer = Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"-1\"}};\n    EXPECT_EQ(\"-1\", formatter.format(request_header, response_header, response_trailer, stream_info,\n                                     body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"-1\")));\n    response_trailer = Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"42738\"}};\n    EXPECT_EQ(\"42738\", formatter.format(request_header, response_header, response_trailer,\n                                        stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"42738\")));\n    response_trailer.clear();\n  }\n  {\n    response_header = Http::TestResponseHeaderMapImpl{{\"grpc-status\", \"-1\"}};\n    EXPECT_EQ(\"-1\", formatter.format(request_header, response_header, response_trailer, stream_info,\n                                     body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"-1\")));\n    response_header = Http::TestResponseHeaderMapImpl{{\"grpc-status\", \"42738\"}};\n    EXPECT_EQ(\"42738\", formatter.format(request_header, response_header, response_trailer,\n                                        stream_info, body));\n    EXPECT_THAT(\n        formatter.formatValue(request_header, response_header, response_trailer, stream_info, body),\n        ProtoEq(ValueUtil::stringValue(\"42738\")));\n    response_header.clear();\n  }\n}\n\nvoid verifyJsonOutput(std::string json_string,\n                      absl::node_hash_map<std::string, std::string> expected_map) {\n  const auto parsed = Json::Factory::loadFromString(json_string);\n\n  // Every json log line should have only one newline character, and it should be the last character\n  // in the string\n  const auto newline_pos = json_string.find('\\n');\n  EXPECT_NE(newline_pos, std::string::npos);\n  EXPECT_EQ(newline_pos, json_string.length() - 1);\n\n  for (const auto& pair : expected_map) {\n    EXPECT_EQ(parsed->getString(pair.first), pair.second);\n  }\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterPlainStringTest) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header;\n  Http::TestResponseHeaderMapImpl response_header;\n  Http::TestResponseTrailerMapImpl response_trailer;\n  std::string body;\n\n  envoy::config::core::v3::Metadata metadata;\n  populateMetadataTestData(metadata);\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {\n      {\"plain_string\", \"plain_string_value\"}};\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    plain_string: plain_string_value\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  verifyJsonOutput(\n      formatter.format(request_header, response_header, response_trailer, stream_info, body),\n      expected_json_map);\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterNestedObject) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header;\n  Http::TestResponseHeaderMapImpl response_header;\n  Http::TestResponseTrailerMapImpl response_trailer;\n  std::string body;\n\n  envoy::config::core::v3::Metadata metadata;\n  populateMetadataTestData(metadata);\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    level_one:\n      level_two:\n        level_three:\n          plain_string: plain_string_value\n          protocol: '%PROTOCOL%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  const std::string expected = R\"EOF({\n    \"level_one\": {\n      \"level_two\": {\n        \"level_three\": {\n          \"plain_string\": \"plain_string_value\",\n          \"protocol\": \"HTTP/1.1\"\n        }\n      }\n    }\n  })EOF\";\n  std::string out_json =\n      formatter.format(request_header, response_header, response_trailer, stream_info, body);\n  EXPECT_TRUE(TestUtility::jsonStringEqual(out_json, expected));\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterSingleOperatorTest) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header;\n  Http::TestResponseHeaderMapImpl response_header;\n  Http::TestResponseTrailerMapImpl response_trailer;\n  std::string body;\n\n  envoy::config::core::v3::Metadata metadata;\n  populateMetadataTestData(metadata);\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {{\"protocol\", \"HTTP/1.1\"}};\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    protocol: '%PROTOCOL%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  verifyJsonOutput(\n      formatter.format(request_header, response_header, response_trailer, stream_info, body),\n      expected_json_map);\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterNonExistentHeaderTest) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{{\"some_request_header\", \"SOME_REQUEST_HEADER\"}};\n  Http::TestResponseHeaderMapImpl response_header{{\"some_response_header\", \"SOME_RESPONSE_HEADER\"}};\n  Http::TestResponseTrailerMapImpl response_trailer;\n  std::string body;\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {\n      {\"protocol\", \"HTTP/1.1\"},\n      {\"some_request_header\", \"SOME_REQUEST_HEADER\"},\n      {\"nonexistent_response_header\", \"-\"},\n      {\"some_response_header\", \"SOME_RESPONSE_HEADER\"}};\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    protocol: '%PROTOCOL%'\n    some_request_header: '%REQ(some_request_header)%'\n    nonexistent_response_header: '%RESP(nonexistent_response_header)%'\n    some_response_header: '%RESP(some_response_header)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n\n  verifyJsonOutput(\n      formatter.format(request_header, response_header, response_trailer, stream_info, body),\n      expected_json_map);\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterAlternateHeaderTest) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{\n      {\"request_present_header\", \"REQUEST_PRESENT_HEADER\"}};\n  Http::TestResponseHeaderMapImpl response_header{\n      {\"response_present_header\", \"RESPONSE_PRESENT_HEADER\"}};\n  Http::TestResponseTrailerMapImpl response_trailer;\n  std::string body;\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {\n      {\"request_present_header_or_request_absent_header\", \"REQUEST_PRESENT_HEADER\"},\n      {\"request_absent_header_or_request_present_header\", \"REQUEST_PRESENT_HEADER\"},\n      {\"response_absent_header_or_response_absent_header\", \"RESPONSE_PRESENT_HEADER\"},\n      {\"response_present_header_or_response_absent_header\", \"RESPONSE_PRESENT_HEADER\"}};\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    request_present_header_or_request_absent_header: '%REQ(request_present_header?request_absent_header)%'\n    request_absent_header_or_request_present_header: '%REQ(request_absent_header?request_present_header)%'\n    response_absent_header_or_response_absent_header: '%RESP(response_absent_header?response_present_header)%'\n    response_present_header_or_response_absent_header: '%RESP(response_present_header?response_absent_header)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n\n  verifyJsonOutput(\n      formatter.format(request_header, response_header, response_trailer, stream_info, body),\n      expected_json_map);\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterDynamicMetadataTest) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{{\"first\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_header{{\"second\", \"PUT\"}, {\"test\", \"test\"}};\n  Http::TestResponseTrailerMapImpl response_trailer{{\"third\", \"POST\"}, {\"test-2\", \"test-2\"}};\n  std::string body;\n\n  envoy::config::core::v3::Metadata metadata;\n  populateMetadataTestData(metadata);\n  EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n  EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {\n      {\"test_key\", \"\\\"test_value\\\"\"},\n      {\"test_obj\", \"{\\\"inner_key\\\":\\\"inner_value\\\"}\"},\n      {\"test_obj.inner_key\", \"\\\"inner_value\\\"\"}};\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    test_key: '%DYNAMIC_METADATA(com.test:test_key)%'\n    test_obj: '%DYNAMIC_METADATA(com.test:test_obj)%'\n    test_obj.inner_key: '%DYNAMIC_METADATA(com.test:test_obj:inner_key)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  verifyJsonOutput(\n      formatter.format(request_header, response_header, response_trailer, stream_info, body),\n      expected_json_map);\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterTypedDynamicMetadataTest) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{{\"first\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_header{{\"second\", \"PUT\"}, {\"test\", \"test\"}};\n  Http::TestResponseTrailerMapImpl response_trailer{{\"third\", \"POST\"}, {\"test-2\", \"test-2\"}};\n  std::string body;\n\n  envoy::config::core::v3::Metadata metadata;\n  populateMetadataTestData(metadata);\n  EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n  EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    test_key: '%DYNAMIC_METADATA(com.test:test_key)%'\n    test_obj: '%DYNAMIC_METADATA(com.test:test_obj)%'\n    test_obj.inner_key: '%DYNAMIC_METADATA(com.test:test_obj:inner_key)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, true, false);\n\n  const std::string json =\n      formatter.format(request_header, response_header, response_trailer, stream_info, body);\n  ProtobufWkt::Struct output;\n  MessageUtil::loadFromJson(json, output);\n\n  const auto& fields = output.fields();\n  EXPECT_EQ(\"test_value\", fields.at(\"test_key\").string_value());\n  EXPECT_EQ(\"inner_value\", fields.at(\"test_obj.inner_key\").string_value());\n  EXPECT_EQ(\"inner_value\",\n            fields.at(\"test_obj\").struct_value().fields().at(\"inner_key\").string_value());\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterFilterStateTest) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n  stream_info.filter_state_->setData(\"test_key\",\n                                     std::make_unique<Router::StringAccessorImpl>(\"test_value\"),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  stream_info.filter_state_->setData(\"test_obj\",\n                                     std::make_unique<TestSerializedStructFilterState>(),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {\n      {\"test_key\", \"\\\"test_value\\\"\"}, {\"test_obj\", \"{\\\"inner_key\\\":\\\"inner_value\\\"}\"}};\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    test_key: '%FILTER_STATE(test_key)%'\n    test_obj: '%FILTER_STATE(test_obj)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  verifyJsonOutput(\n      formatter.format(request_headers, response_headers, response_trailers, stream_info, body),\n      expected_json_map);\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterOmitEmptyTest) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n\n  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n  EXPECT_CALL(Const(stream_info), dynamicMetadata()).Times(testing::AtLeast(1));\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n      test_key_filter_state: '%FILTER_STATE(nonexistent_key)%'\n      test_key_req: '%REQ(nonexistent_key)%'\n      test_key_res: '%RESP(nonexistent_key)%'\n      test_key_dynamic_metadata: '%DYNAMIC_METADATA(nonexistent_key)%'\n    )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, true);\n\n  verifyJsonOutput(\n      formatter.format(request_headers, response_headers, response_trailers, stream_info, body),\n      {});\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterTypedFilterStateTest) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n  stream_info.filter_state_->setData(\"test_key\",\n                                     std::make_unique<Router::StringAccessorImpl>(\"test_value\"),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  stream_info.filter_state_->setData(\"test_obj\",\n                                     std::make_unique<TestSerializedStructFilterState>(),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    test_key: '%FILTER_STATE(test_key)%'\n    test_obj: '%FILTER_STATE(test_obj)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, true, false);\n\n  std::string json =\n      formatter.format(request_headers, response_headers, response_trailers, stream_info, body);\n  ProtobufWkt::Struct output;\n  MessageUtil::loadFromJson(json, output);\n\n  const auto& fields = output.fields();\n  EXPECT_EQ(\"test_value\", fields.at(\"test_key\").string_value());\n  EXPECT_EQ(\"inner_value\",\n            fields.at(\"test_obj\").struct_value().fields().at(\"inner_key\").string_value());\n}\n\n// Test new specifier (PLAIN/TYPED) of FilterState. Ensure that after adding additional specifier,\n// the FilterState can call the serializeAsProto or serializeAsString methods correctly.\nTEST(SubstitutionFormatterTest, FilterStateSpeciferTest) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n  stream_info.filter_state_->setData(\n      \"test_key\", std::make_unique<TestSerializedStringFilterState>(\"test_value\"),\n      StreamInfo::FilterState::StateType::ReadOnly);\n  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {\n      {\"test_key_plain\", \"test_value By PLAIN\"},\n      {\"test_key_typed\", \"\\\"test_value By TYPED\\\"\"},\n  };\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    test_key_plain: '%FILTER_STATE(test_key:PLAIN)%'\n    test_key_typed: '%FILTER_STATE(test_key:TYPED)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  verifyJsonOutput(\n      formatter.format(request_headers, response_headers, response_trailers, stream_info, body),\n      expected_json_map);\n}\n\n// Test new specifier (PLAIN/TYPED) of FilterState and convert the output log string to proto\n// and then verify the result.\nTEST(SubstitutionFormatterTest, TypedFilterStateSpeciferTest) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n  stream_info.filter_state_->setData(\n      \"test_key\", std::make_unique<TestSerializedStringFilterState>(\"test_value\"),\n      StreamInfo::FilterState::StateType::ReadOnly);\n  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    test_key_plain: '%FILTER_STATE(test_key:PLAIN)%'\n    test_key_typed: '%FILTER_STATE(test_key:TYPED)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, true, false);\n\n  std::string json =\n      formatter.format(request_headers, response_headers, response_trailers, stream_info, body);\n\n  ProtobufWkt::Struct output;\n  MessageUtil::loadFromJson(json, output);\n\n  const auto& fields = output.fields();\n  EXPECT_EQ(\"test_value By PLAIN\", fields.at(\"test_key_plain\").string_value());\n  EXPECT_EQ(\"test_value By TYPED\", fields.at(\"test_key_typed\").string_value());\n}\n\n// Error specifier will cause an exception to be thrown.\nTEST(SubstitutionFormatterTest, FilterStateErrorSpeciferTest) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n  stream_info.filter_state_->setData(\n      \"test_key\", std::make_unique<TestSerializedStringFilterState>(\"test_value\"),\n      StreamInfo::FilterState::StateType::ReadOnly);\n\n  // 'ABCDE' is error specifier.\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    test_key_plain: '%FILTER_STATE(test_key:ABCDE)%'\n    test_key_typed: '%FILTER_STATE(test_key:TYPED)%'\n  )EOF\",\n                            key_mapping);\n  EXPECT_THROW_WITH_MESSAGE(JsonFormatterImpl formatter(key_mapping, false, false), EnvoyException,\n                            \"Invalid filter state serialize type, only support PLAIN/TYPED.\");\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterStartTimeTest) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header;\n  Http::TestResponseHeaderMapImpl response_header;\n  Http::TestResponseTrailerMapImpl response_trailer;\n  std::string body;\n\n  time_t expected_time_in_epoch = 1522280158;\n  SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch);\n  EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time));\n\n  absl::node_hash_map<std::string, std::string> expected_json_map = {\n      {\"simple_date\", \"2018/03/28\"},\n      {\"test_time\", fmt::format(\"{}\", expected_time_in_epoch)},\n      {\"bad_format\", \"bad_format\"},\n      {\"default\", \"2018-03-28T23:35:58.000Z\"},\n      {\"all_zeroes\", \"000000000.0.00.000\"}};\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    simple_date: '%START_TIME(%Y/%m/%d)%'\n    test_time: '%START_TIME(%s)%'\n    bad_format: '%START_TIME(bad_format)%'\n    default: '%START_TIME%'\n    all_zeroes: '%START_TIME(%f.%1f.%2f.%3f)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, false, false);\n\n  verifyJsonOutput(\n      formatter.format(request_header, response_header, response_trailer, stream_info, body),\n      expected_json_map);\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterMultiTokenTest) {\n  {\n    StreamInfo::MockStreamInfo stream_info;\n    Http::TestRequestHeaderMapImpl request_header{{\"some_request_header\", \"SOME_REQUEST_HEADER\"}};\n    Http::TestResponseHeaderMapImpl response_header{\n        {\"some_response_header\", \"SOME_RESPONSE_HEADER\"}};\n    Http::TestResponseTrailerMapImpl response_trailer;\n    std::string body;\n\n    absl::node_hash_map<std::string, std::string> expected_json_map = {\n        {\"multi_token_field\", \"HTTP/1.1 plainstring SOME_REQUEST_HEADER SOME_RESPONSE_HEADER\"}};\n\n    ProtobufWkt::Struct key_mapping;\n    TestUtility::loadFromYaml(R\"EOF(\n      multi_token_field: '%PROTOCOL% plainstring %REQ(some_request_header)% %RESP(some_response_header)%'\n    )EOF\",\n                              key_mapping);\n    for (const bool preserve_types : {false, true}) {\n      JsonFormatterImpl formatter(key_mapping, preserve_types, false);\n\n      absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n      EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n\n      const auto parsed = Json::Factory::loadFromString(\n          formatter.format(request_header, response_header, response_trailer, stream_info, body));\n      for (const auto& pair : expected_json_map) {\n        EXPECT_EQ(parsed->getString(pair.first), pair.second);\n      }\n    }\n  }\n}\n\nTEST(SubstitutionFormatterTest, JsonFormatterTypedTest) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info;\n  std::string body;\n  EXPECT_CALL(Const(stream_info), lastDownstreamRxByteReceived())\n      .WillRepeatedly(Return(std::chrono::nanoseconds(5000000)));\n\n  ProtobufWkt::Value list;\n  list.mutable_list_value()->add_values()->set_bool_value(true);\n  list.mutable_list_value()->add_values()->set_string_value(\"two\");\n  list.mutable_list_value()->add_values()->set_number_value(3.14);\n\n  ProtobufWkt::Struct s;\n  (*s.mutable_fields())[\"list\"] = list;\n\n  stream_info.filter_state_->setData(\"test_obj\",\n                                     std::make_unique<TestSerializedStructFilterState>(s),\n                                     StreamInfo::FilterState::StateType::ReadOnly);\n  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n\n  ProtobufWkt::Struct key_mapping;\n  TestUtility::loadFromYaml(R\"EOF(\n    request_duration: '%REQUEST_DURATION%'\n    request_duration_multi: '%REQUEST_DURATION%ms'\n    filter_state: '%FILTER_STATE(test_obj)%'\n  )EOF\",\n                            key_mapping);\n  JsonFormatterImpl formatter(key_mapping, true, false);\n\n  const auto json =\n      formatter.format(request_headers, response_headers, response_trailers, stream_info, body);\n  ProtobufWkt::Struct output;\n  MessageUtil::loadFromJson(json, output);\n\n  EXPECT_THAT(output.fields().at(\"request_duration\"), ProtoEq(ValueUtil::numberValue(5.0)));\n  EXPECT_THAT(output.fields().at(\"request_duration_multi\"), ProtoEq(ValueUtil::stringValue(\"5ms\")));\n\n  ProtobufWkt::Value expected;\n  expected.mutable_struct_value()->CopyFrom(s);\n  EXPECT_THAT(output.fields().at(\"filter_state\"), ProtoEq(expected));\n}\n\nTEST(SubstitutionFormatterTest, CompositeFormatterSuccess) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{{\"first\", \"GET\"}, {\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_header{{\"second\", \"PUT\"}, {\"test\", \"test\"}};\n  Http::TestResponseTrailerMapImpl response_trailer{{\"third\", \"POST\"}, {\"test-2\", \"test-2\"}};\n  std::string body;\n\n  {\n    const std::string format = \"{{%PROTOCOL%}}   %RESP(not exist)%++%RESP(test)% \"\n                               \"%REQ(FIRST?SECOND)% %RESP(FIRST?SECOND)%\"\n                               \"\\t@%TRAILER(THIRD)%@\\t%TRAILER(TEST?TEST-2)%[]\";\n    FormatterImpl formatter(format, false);\n\n    absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;\n    EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));\n\n    EXPECT_EQ(\n        \"{{HTTP/1.1}}   -++test GET PUT\\t@POST@\\ttest-2[]\",\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n\n  {\n    const std::string format = \"{}*JUST PLAIN string]\";\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(format, formatter.format(request_header, response_header, response_trailer,\n                                       stream_info, body));\n  }\n\n  {\n    const std::string format = \"%REQ(first):3%|%REQ(first):1%|%RESP(first?second):2%|%REQ(first):\"\n                               \"10%|%TRAILER(second?third):3%\";\n\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(\"GET|G|PU|GET|POS\", formatter.format(request_header, response_header,\n                                                   response_trailer, stream_info, body));\n  }\n\n  {\n    envoy::config::core::v3::Metadata metadata;\n    populateMetadataTestData(metadata);\n    EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n    EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n    const std::string format = \"%DYNAMIC_METADATA(com.test:test_key)%|%DYNAMIC_METADATA(com.test:\"\n                               \"test_obj)%|%DYNAMIC_METADATA(com.test:test_obj:inner_key)%\";\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(\n        \"\\\"test_value\\\"|{\\\"inner_key\\\":\\\"inner_value\\\"}|\\\"inner_value\\\"\",\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n\n  {\n    EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n    stream_info.filter_state_->setData(\"testing\",\n                                       std::make_unique<Router::StringAccessorImpl>(\"test_value\"),\n                                       StreamInfo::FilterState::StateType::ReadOnly,\n                                       StreamInfo::FilterState::LifeSpan::FilterChain);\n    stream_info.filter_state_->setData(\"serialized\",\n                                       std::make_unique<TestSerializedUnknownFilterState>(),\n                                       StreamInfo::FilterState::StateType::ReadOnly,\n                                       StreamInfo::FilterState::LifeSpan::FilterChain);\n    const std::string format = \"%FILTER_STATE(testing)%|%FILTER_STATE(serialized)%|\"\n                               \"%FILTER_STATE(testing):8%|%FILTER_STATE(nonexisting)%\";\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(\n        \"\\\"test_value\\\"|-|\\\"test_va|-\",\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n\n  {\n    const std::string format = \"%START_TIME(%Y/%m/%d)%|%START_TIME(%s)%|%START_TIME(bad_format)%|\"\n                               \"%START_TIME%|%START_TIME(%f.%1f.%2f.%3f)%\";\n\n    time_t expected_time_in_epoch = 1522280158;\n    SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch);\n    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time));\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(\n        fmt::format(\"2018/03/28|{}|bad_format|2018-03-28T23:35:58.000Z|000000000.0.00.000\",\n                    expected_time_in_epoch),\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n\n  {\n    // This tests the beginning of time.\n    const std::string format = \"%START_TIME(%Y/%m/%d)%|%START_TIME(%s)%|%START_TIME(bad_format)%|\"\n                               \"%START_TIME%|%START_TIME(%f.%1f.%2f.%3f)%\";\n\n    const time_t test_epoch = 0;\n    const SystemTime time = std::chrono::system_clock::from_time_t(test_epoch);\n    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time));\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(\n        \"1970/01/01|0|bad_format|1970-01-01T00:00:00.000Z|000000000.0.00.000\",\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n\n  {\n    // This tests multiple START_TIMEs.\n    const std::string format =\n        \"%START_TIME(%s.%3f)%|%START_TIME(%s.%4f)%|%START_TIME(%s.%5f)%|%START_TIME(%s.%6f)%\";\n    const SystemTime start_time(std::chrono::microseconds(1522796769123456));\n    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(start_time));\n    FormatterImpl formatter(format, false);\n    EXPECT_EQ(\n        \"1522796769.123|1522796769.1234|1522796769.12345|1522796769.123456\",\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n\n  {\n    const std::string format =\n        \"%START_TIME(segment1:%s.%3f|segment2:%s.%4f|seg3:%s.%6f|%s-%3f-asdf-%9f|.%7f:segm5:%Y)%\";\n    const SystemTime start_time(std::chrono::microseconds(1522796769123456));\n    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(start_time));\n    FormatterImpl formatter(format, false);\n    EXPECT_EQ(\n        \"segment1:1522796769.123|segment2:1522796769.1234|seg3:1522796769.123456|1522796769-\"\n        \"123-asdf-123456000|.1234560:segm5:2018\",\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n\n  {\n    // This tests START_TIME specifier that has shorter segments when formatted, i.e.\n    // absl::FormatTime(\"%%%%\"\") equals \"%%\", %1f will have 1 as its size.\n    const std::string format = \"%START_TIME(%%%%|%%%%%f|%s%%%%%3f|%1f%%%%%s)%\";\n    const SystemTime start_time(std::chrono::microseconds(1522796769123456));\n    EXPECT_CALL(stream_info, startTime()).WillOnce(Return(start_time));\n    FormatterImpl formatter(format, false);\n    EXPECT_EQ(\n        \"%%|%%123456000|1522796769%%123|1%%1522796769\",\n        formatter.format(request_header, response_header, response_trailer, stream_info, body));\n  }\n#ifndef WIN32\n  {\n    const std::string format = \"%START_TIME(%E4n)%\";\n    const SystemTime start_time(std::chrono::microseconds(1522796769123456));\n    EXPECT_CALL(stream_info, startTime()).WillOnce(Return(start_time));\n    FormatterImpl formatter(format);\n    EXPECT_EQ(\"%E4n\", formatter.format(request_header, response_header, response_trailer,\n                                       stream_info, body));\n  }\n#endif\n}\n\nTEST(SubstitutionFormatterTest, CompositeFormatterEmpty) {\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_header{};\n  Http::TestResponseHeaderMapImpl response_header{};\n  Http::TestResponseTrailerMapImpl response_trailer{};\n  std::string body;\n\n  {\n    const std::string format = \"%PROTOCOL%|%RESP(not exist)%|\"\n                               \"%REQ(FIRST?SECOND)%|%RESP(FIRST?SECOND)%|\"\n                               \"%TRAILER(THIRD)%|%TRAILER(TEST?TEST-2)%\";\n    FormatterImpl formatter(format, false);\n\n    EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(absl::nullopt));\n\n    EXPECT_EQ(\"-|-|-|-|-|-\", formatter.format(request_header, response_header, response_trailer,\n                                              stream_info, body));\n  }\n\n  {\n    const std::string format = \"%PROTOCOL%|%RESP(not exist)%|\"\n                               \"%REQ(FIRST?SECOND)%%RESP(FIRST?SECOND)%|\"\n                               \"%TRAILER(THIRD)%|%TRAILER(TEST?TEST-2)%\";\n    FormatterImpl formatter(format, true);\n\n    EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(absl::nullopt));\n\n    EXPECT_EQ(\"||||\", formatter.format(request_header, response_header, response_trailer,\n                                       stream_info, body));\n  }\n\n  {\n    envoy::config::core::v3::Metadata metadata;\n    EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n    EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n    const std::string format = \"%DYNAMIC_METADATA(com.test:test_key)%|%DYNAMIC_METADATA(com.test:\"\n                               \"test_obj)%|%DYNAMIC_METADATA(com.test:test_obj:inner_key)%\";\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(\"-|-|-\", formatter.format(request_header, response_header, response_trailer,\n                                        stream_info, body));\n  }\n\n  {\n    envoy::config::core::v3::Metadata metadata;\n    EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n    EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n    const std::string format = \"%DYNAMIC_METADATA(com.test:test_key)%|%DYNAMIC_METADATA(com.test:\"\n                               \"test_obj)%|%DYNAMIC_METADATA(com.test:test_obj:inner_key)%\";\n    FormatterImpl formatter(format, true);\n\n    EXPECT_EQ(\"||\", formatter.format(request_header, response_header, response_trailer, stream_info,\n                                     body));\n  }\n\n  {\n    EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n    const std::string format = \"%FILTER_STATE(testing)%|%FILTER_STATE(serialized)%|\"\n                               \"%FILTER_STATE(testing):8%|%FILTER_STATE(nonexisting)%\";\n    FormatterImpl formatter(format, false);\n\n    EXPECT_EQ(\"-|-|-|-\", formatter.format(request_header, response_header, response_trailer,\n                                          stream_info, body));\n  }\n\n  {\n    EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));\n    const std::string format = \"%FILTER_STATE(testing)%|%FILTER_STATE(serialized)%|\"\n                               \"%FILTER_STATE(testing):8%|%FILTER_STATE(nonexisting)%\";\n    FormatterImpl formatter(format, true);\n\n    EXPECT_EQ(\"|||\", formatter.format(request_header, response_header, response_trailer,\n                                      stream_info, body));\n  }\n}\n\nTEST(SubstitutionFormatterTest, ParserFailures) {\n  SubstitutionFormatParser parser;\n\n  std::vector<std::string> test_cases = {\n      \"{{%PROTOCOL%}}   ++ %REQ(FIRST?SECOND)% %RESP(FIRST?SECOND)\",\n      \"%REQ(FIRST?SECOND)T%\",\n      \"RESP(FIRST)%\",\n      \"%REQ(valid)% %NOT_VALID%\",\n      \"%REQ(FIRST?SECOND%\",\n      \"%%\",\n      \"%%HOSTNAME%PROTOCOL%\",\n      \"%protocol%\",\n      \"%REQ(TEST):%\",\n      \"%REQ(TEST):3q4%\",\n      \"%REQ(\\n)%\",\n      \"%REQ(?\\n)%\",\n      \"%RESP(TEST):%\",\n      \"%RESP(X?Y):%\",\n      \"%RESP(X?Y):343o24%\",\n      \"%REQ(TEST):10\",\n      \"REQ(:TEST):10%\",\n      \"%REQ(TEST:10%\",\n      \"%REQ(\",\n      \"%REQ(X?Y?Z)%\",\n      \"%TRAILER(TEST):%\",\n      \"%TRAILER(TEST):23u1%\",\n      \"%TRAILER(X?Y?Z)%\",\n      \"%TRAILER(:TEST):10\",\n      \"%DYNAMIC_METADATA(TEST\",\n      \"%FILTER_STATE(TEST\",\n      \"%FILTER_STATE()%\",\n      \"%START_TIME(%85n)%\",\n      \"%START_TIME(%#__88n)%\",\n      \"%START_TIME(%En%)%\",\n      \"%START_TIME(%4En%)%\",\n      \"%START_TIME(%On%)%\",\n      \"%START_TIME(%4On%)%\"};\n\n  for (const std::string& test_case : test_cases) {\n    EXPECT_THROW(parser.parse(test_case), EnvoyException) << test_case;\n  }\n}\n\nTEST(SubstitutionFormatterTest, ParserSuccesses) {\n  SubstitutionFormatParser parser;\n\n  std::vector<std::string> test_cases = {\"%START_TIME(%E4n%)%\", \"%START_TIME(%O4n%)%\"};\n\n  for (const std::string& test_case : test_cases) {\n    EXPECT_NO_THROW(parser.parse(test_case));\n  }\n}\n\n} // namespace\n} // namespace Formatter\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_select_google_grpc\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"async_client_impl_test\",\n    srcs = [\"async_client_impl_test.cc\"],\n    deps = [\n        \"//source/common/grpc:async_client_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/proto:helloworld_proto_cc_proto\",\n        \"//test/test_common:test_time_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"async_client_manager_impl_test\",\n    srcs = [\"async_client_manager_impl_test.cc\"],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/grpc:async_client_manager_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"codec_fuzz_test\",\n    srcs = [\"codec_fuzz_test.cc\"],\n    corpus = \"codec_corpus\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/proto:helloworld_proto_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"codec_test\",\n    srcs = [\"codec_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//test/common/buffer:utility_lib\",\n        \"//test/proto:helloworld_proto_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"common_test\",\n    srcs = [\"common_test.cc\"],\n    deps = [\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/proto:helloworld_proto_cc_proto\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"context_impl_test\",\n    srcs = [\"context_impl_test.cc\"],\n    deps = [\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:global_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"google_grpc_utils_test\",\n    srcs = envoy_select_google_grpc([\"google_grpc_utils_test.cc\"]),\n    deps = [\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//test/proto:helloworld_proto_cc_proto\",\n        \"//test/test_common:utility_lib\",\n    ] + envoy_select_google_grpc([\"//source/common/grpc:google_grpc_utils_lib\"]),\n)\n\nenvoy_cc_test(\n    name = \"google_async_client_impl_test\",\n    srcs = envoy_select_google_grpc([\"google_async_client_impl_test.cc\"]),\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/proto:helloworld_proto_cc_proto\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n    ] + envoy_select_google_grpc([\"//source/common/grpc:google_async_client_lib\"]),\n)\n\nenvoy_cc_test(\n    name = \"google_grpc_creds_test\",\n    srcs = envoy_select_google_grpc([\"google_grpc_creds_test.cc\"]),\n    data = [\":service_key.json\"],\n    deps = [\n        \":utility_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:utility_lib\",\n    ] + envoy_select_google_grpc([\"//source/common/grpc:google_grpc_creds_lib\"]),\n)\n\nenvoy_cc_test_library(\n    name = \"grpc_client_integration_lib\",\n    hdrs = [\"grpc_client_integration.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"grpc_client_integration_test_harness_lib\",\n    hdrs = [\"grpc_client_integration_test_harness.h\"],\n    deps = [\n        \":grpc_client_integration_lib\",\n        \":utility_lib\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/http/http2:conn_pool_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n        \"//test/proto:helloworld_proto_cc_proto\",\n        \"//test/test_common:global_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"grpc_client_integration_test\",\n    srcs = [\"grpc_client_integration_test.cc\"],\n    deps = [\n        \":grpc_client_integration_test_harness_lib\",\n        \"//source/common/grpc:async_client_lib\",\n        \"//source/extensions/grpc_credentials:well_known_names\",\n        \"//source/extensions/grpc_credentials/example:config\",\n    ] + envoy_select_google_grpc([\"//source/common/grpc:google_async_client_lib\"]),\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    data = [\"//test/config/integration/certs\"],\n    deps = [\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/common/grpc/async_client_impl_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"common/grpc/async_client_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/proto/helloworld.pb.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\nclass EnvoyAsyncClientImplTest : public testing::Test {\npublic:\n  EnvoyAsyncClientImplTest()\n      : method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName(\"SayHello\")) {\n    envoy::config::core::v3::GrpcService config;\n    config.mutable_envoy_grpc()->set_cluster_name(\"test_cluster\");\n    grpc_client_ = std::make_unique<AsyncClientImpl>(cm_, config, test_time_.timeSystem());\n    ON_CALL(cm_, httpAsyncClientForCluster(\"test_cluster\")).WillByDefault(ReturnRef(http_client_));\n  }\n\n  const Protobuf::MethodDescriptor* method_descriptor_;\n  NiceMock<Http::MockAsyncClient> http_client_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  AsyncClient<helloworld::HelloRequest, helloworld::HelloReply> grpc_client_;\n  DangerousDeprecatedTestTime test_time_;\n};\n\n// Validate that the host header is the cluster name in grpc config.\nTEST_F(EnvoyAsyncClientImplTest, HostIsClusterNameByDefault) {\n  NiceMock<MockAsyncStreamCallbacks<helloworld::HelloReply>> grpc_callbacks;\n  Http::AsyncClient::StreamCallbacks* http_callbacks;\n\n  Http::MockAsyncClientStream http_stream;\n  EXPECT_CALL(http_client_, start(_, _))\n      .WillOnce(\n          Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks,\n                                                 const Http::AsyncClient::StreamOptions&) {\n            http_callbacks = &callbacks;\n            return &http_stream;\n          }));\n\n  EXPECT_CALL(grpc_callbacks,\n              onCreateInitialMetadata(testing::Truly([](Http::RequestHeaderMap& headers) {\n                return headers.Host()->value() == \"test_cluster\";\n              })));\n  EXPECT_CALL(http_stream, sendHeaders(_, _))\n      .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); }));\n  auto grpc_stream =\n      grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions());\n  EXPECT_EQ(grpc_stream, nullptr);\n}\n\n// Validate that the host header is the authority field in grpc config.\nTEST_F(EnvoyAsyncClientImplTest, HostIsOverrideByConfig) {\n  envoy::config::core::v3::GrpcService config;\n  config.mutable_envoy_grpc()->set_cluster_name(\"test_cluster\");\n  config.mutable_envoy_grpc()->set_authority(\"demo.com\");\n\n  grpc_client_ = std::make_unique<AsyncClientImpl>(cm_, config, test_time_.timeSystem());\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(\"test_cluster\"))\n      .WillRepeatedly(ReturnRef(http_client_));\n\n  NiceMock<MockAsyncStreamCallbacks<helloworld::HelloReply>> grpc_callbacks;\n  Http::AsyncClient::StreamCallbacks* http_callbacks;\n\n  Http::MockAsyncClientStream http_stream;\n  EXPECT_CALL(http_client_, start(_, _))\n      .WillOnce(\n          Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks,\n                                                 const Http::AsyncClient::StreamOptions&) {\n            http_callbacks = &callbacks;\n            return &http_stream;\n          }));\n\n  EXPECT_CALL(grpc_callbacks,\n              onCreateInitialMetadata(testing::Truly([](Http::RequestHeaderMap& headers) {\n                return headers.Host()->value() == \"demo.com\";\n              })));\n  EXPECT_CALL(http_stream, sendHeaders(_, _))\n      .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); }));\n  auto grpc_stream =\n      grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions());\n  EXPECT_EQ(grpc_stream, nullptr);\n}\n\n// Validate that a failure in the HTTP client returns immediately with status\n// UNAVAILABLE.\nTEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) {\n  MockAsyncStreamCallbacks<helloworld::HelloReply> grpc_callbacks;\n  ON_CALL(http_client_, start(_, _)).WillByDefault(Return(nullptr));\n  EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, \"\"));\n  auto grpc_stream =\n      grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions());\n  EXPECT_EQ(grpc_stream, nullptr);\n}\n\n// Validate that a failure in the HTTP client returns immediately with status\n// UNAVAILABLE.\nTEST_F(EnvoyAsyncClientImplTest, RequestHttpStartFail) {\n  MockAsyncRequestCallbacks<helloworld::HelloReply> grpc_callbacks;\n  ON_CALL(http_client_, start(_, _)).WillByDefault(Return(nullptr));\n  EXPECT_CALL(grpc_callbacks, onFailure(Status::WellKnownGrpcStatus::Unavailable, \"\", _));\n  helloworld::HelloRequest request_msg;\n\n  Tracing::MockSpan active_span;\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(active_span, spawnChild_(_, \"async test_cluster egress\", _))\n      .WillOnce(Return(child_span));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"test_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"14\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span, finishSpan());\n  EXPECT_CALL(*child_span, injectContext(_)).Times(0);\n\n  auto* grpc_request = grpc_client_->send(*method_descriptor_, request_msg, grpc_callbacks,\n                                          active_span, Http::AsyncClient::RequestOptions());\n  EXPECT_EQ(grpc_request, nullptr);\n}\n\n// Validate that a failure to sendHeaders() in the HTTP client returns\n// immediately with status INTERNAL.\nTEST_F(EnvoyAsyncClientImplTest, StreamHttpSendHeadersFail) {\n  MockAsyncStreamCallbacks<helloworld::HelloReply> grpc_callbacks;\n  Http::AsyncClient::StreamCallbacks* http_callbacks;\n  Http::MockAsyncClientStream http_stream;\n  EXPECT_CALL(http_client_, start(_, _))\n      .WillOnce(\n          Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks,\n                                                 const Http::AsyncClient::StreamOptions&) {\n            http_callbacks = &callbacks;\n            return &http_stream;\n          }));\n  EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_));\n  EXPECT_CALL(http_stream, sendHeaders(_, _))\n      .WillOnce(Invoke([&http_callbacks](Http::HeaderMap& headers, bool end_stream) {\n        UNREFERENCED_PARAMETER(headers);\n        UNREFERENCED_PARAMETER(end_stream);\n        http_callbacks->onReset();\n      }));\n  EXPECT_CALL(grpc_callbacks, onReceiveTrailingMetadata_(_));\n  EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Internal, \"\"));\n  auto grpc_stream =\n      grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions());\n  EXPECT_EQ(grpc_stream, nullptr);\n}\n\n// Validate that a failure to sendHeaders() in the HTTP client returns\n// immediately with status INTERNAL.\nTEST_F(EnvoyAsyncClientImplTest, RequestHttpSendHeadersFail) {\n  MockAsyncRequestCallbacks<helloworld::HelloReply> grpc_callbacks;\n  Http::AsyncClient::StreamCallbacks* http_callbacks;\n  Http::MockAsyncClientStream http_stream;\n  EXPECT_CALL(http_client_, start(_, _))\n      .WillOnce(\n          Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks,\n                                                 const Http::AsyncClient::StreamOptions&) {\n            http_callbacks = &callbacks;\n            return &http_stream;\n          }));\n  EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_));\n  EXPECT_CALL(http_stream, sendHeaders(_, _))\n      .WillOnce(Invoke([&http_callbacks](Http::HeaderMap& headers, bool end_stream) {\n        UNREFERENCED_PARAMETER(headers);\n        UNREFERENCED_PARAMETER(end_stream);\n        http_callbacks->onReset();\n      }));\n  EXPECT_CALL(grpc_callbacks, onFailure(Status::WellKnownGrpcStatus::Internal, \"\", _));\n  helloworld::HelloRequest request_msg;\n\n  Tracing::MockSpan active_span;\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(active_span, spawnChild_(_, \"async test_cluster egress\", _))\n      .WillOnce(Return(child_span));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"test_cluster\")));\n  EXPECT_CALL(*child_span, injectContext(_));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"13\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span, finishSpan());\n\n  auto* grpc_request = grpc_client_->send(*method_descriptor_, request_msg, grpc_callbacks,\n                                          active_span, Http::AsyncClient::RequestOptions());\n  EXPECT_EQ(grpc_request, nullptr);\n}\n\n// Validate that when the cluster is not present the grpc_client returns immediately with\n// status UNAVAILABLE and error message \"Cluster not available\"\nTEST_F(EnvoyAsyncClientImplTest, StreamHttpClientException) {\n  MockAsyncStreamCallbacks<helloworld::HelloReply> grpc_callbacks;\n  ON_CALL(cm_, get(_)).WillByDefault(Return(nullptr));\n  EXPECT_CALL(grpc_callbacks,\n              onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, \"Cluster not available\"));\n  auto grpc_stream =\n      grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions());\n  EXPECT_EQ(grpc_stream, nullptr);\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/async_client_manager_impl_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/grpc/async_client_manager_impl.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::Return;\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\nclass AsyncClientManagerImplTest : public testing::Test {\npublic:\n  AsyncClientManagerImplTest()\n      : api_(Api::createApiForTest()), stat_names_(scope_.symbolTable()),\n        async_client_manager_(cm_, tls_, test_time_.timeSystem(), *api_, stat_names_) {}\n\n  Upstream::MockClusterManager cm_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Stats::MockStore scope_;\n  DangerousDeprecatedTestTime test_time_;\n  Api::ApiPtr api_;\n  StatNames stat_names_;\n  AsyncClientManagerImpl async_client_manager_;\n};\n\nTEST_F(AsyncClientManagerImplTest, EnvoyGrpcOk) {\n  envoy::config::core::v3::GrpcService grpc_service;\n  grpc_service.mutable_envoy_grpc()->set_cluster_name(\"foo\");\n\n  Upstream::ClusterManager::ClusterInfoMap cluster_map;\n  Upstream::MockClusterMockPrioritySet cluster;\n  cluster_map.emplace(\"foo\", cluster);\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map));\n  EXPECT_CALL(cluster, info());\n  EXPECT_CALL(*cluster.info_, addedViaApi());\n\n  async_client_manager_.factoryForGrpcService(grpc_service, scope_, false);\n}\n\nTEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknown) {\n  envoy::config::core::v3::GrpcService grpc_service;\n  grpc_service.mutable_envoy_grpc()->set_cluster_name(\"foo\");\n\n  EXPECT_CALL(cm_, clusters());\n  EXPECT_THROW_WITH_MESSAGE(\n      async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException,\n      \"Unknown gRPC client cluster 'foo'\");\n}\n\nTEST_F(AsyncClientManagerImplTest, EnvoyGrpcDynamicCluster) {\n  envoy::config::core::v3::GrpcService grpc_service;\n  grpc_service.mutable_envoy_grpc()->set_cluster_name(\"foo\");\n\n  Upstream::ClusterManager::ClusterInfoMap cluster_map;\n  Upstream::MockClusterMockPrioritySet cluster;\n  cluster_map.emplace(\"foo\", cluster);\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map));\n  EXPECT_CALL(cluster, info());\n  EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true));\n  EXPECT_THROW_WITH_MESSAGE(\n      async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException,\n      \"gRPC client cluster 'foo' is not static\");\n}\n\nTEST_F(AsyncClientManagerImplTest, GoogleGrpc) {\n  EXPECT_CALL(scope_, createScope_(\"grpc.foo.\"));\n  envoy::config::core::v3::GrpcService grpc_service;\n  grpc_service.mutable_google_grpc()->set_stat_prefix(\"foo\");\n\n#ifdef ENVOY_GOOGLE_GRPC\n  EXPECT_NE(nullptr, async_client_manager_.factoryForGrpcService(grpc_service, scope_, false));\n#else\n  EXPECT_THROW_WITH_MESSAGE(\n      async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException,\n      \"Google C++ gRPC client is not linked\");\n#endif\n}\n\nTEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknownOk) {\n  envoy::config::core::v3::GrpcService grpc_service;\n  grpc_service.mutable_envoy_grpc()->set_cluster_name(\"foo\");\n\n  EXPECT_CALL(cm_, clusters()).Times(0);\n  ASSERT_NO_THROW(async_client_manager_.factoryForGrpcService(grpc_service, scope_, true));\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/codec_corpus/empty",
    "content": ""
  },
  {
    "path": "test/common/grpc/codec_fuzz_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/codec.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/proto/helloworld.pb.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace Fuzz {\n\n// Fuzz the Grpc::decode() implementation, validating that decode(encode(x)) ==\n// x for all x, regardless of how the encoded buffer is partitioned. Models\n// frame boundary conditions and also trailing random crud, which effectively\n// models line noise input to the decoder as well.\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  FuzzedDataProvider provider(buf, len);\n\n  // We probably won't learn a ton more after a few frames worth.\n  const uint32_t num_encode_frames = provider.ConsumeIntegralInRange(0, 3);\n  // Model a buffer containing the wire input to the decoder.\n  Buffer::OwnedImpl wire_buffer;\n  // We populate these proto requests and then encode them into wire_buffer.\n  std::vector<std::unique_ptr<helloworld::HelloRequest>> requests;\n  // Bounding the size of each request somewhat for sanity sake. We are trading\n  // off being able to see what happens at really large sizes, e.g. 16MB, but\n  // that will just be too slow, lots of memcpy.\n  const size_t MaxRequestNameSize = 96 * 1024;\n  for (uint32_t i = 0; i < num_encode_frames; ++i) {\n    requests.emplace_back(new helloworld::HelloRequest());\n    requests.back()->set_name_bytes(provider.ConsumeRandomLengthString(MaxRequestNameSize));\n    // Encode the proto to bytes.\n    const std::string request_buffer = requests.back()->SerializeAsString();\n    // Encode the gRPC header.\n    std::array<uint8_t, 5> header;\n    Encoder encoder;\n    encoder.newFrame(GRPC_FH_DEFAULT, request_buffer.size(), header);\n    // Add header and byte representation of request to wire.\n    wire_buffer.add(header.data(), 5);\n    wire_buffer.add(request_buffer.data(), request_buffer.size());\n  }\n\n  // Add random crud at the end to see if we can make the decoder unhappy in\n  // non-standard ways.\n  {\n    const std::string crud = provider.ConsumeRandomLengthString(MaxRequestNameSize);\n    wire_buffer.add(crud.data(), crud.size());\n  }\n\n  Decoder decoder;\n  std::vector<Frame> frames;\n  // We now decode the wire contents, piecemeal.\n  while (wire_buffer.length() > 0) {\n    // We'll try and pick a partition for the remaining content, so that we\n    // enable the fuzzer to explore different ways to cut it.\n    const uint64_t decode_length =\n        provider.remaining_bytes() == 0\n            ? wire_buffer.length()\n            : provider.ConsumeIntegralInRange<uint64_t>(0, wire_buffer.length());\n    Buffer::OwnedImpl decode_buffer;\n    decode_buffer.move(wire_buffer, decode_length);\n    const bool decode_result = decoder.decode(decode_buffer, frames);\n    // If we have recovered the original frames, we're decoding garbage. It\n    // might end up being a valid frame, but there is no predictability, so just\n    // drain and move on. If we haven't recovered the original frames, we\n    // shouldn't have any errors and should be consuming all of decode_buffer.\n    if (frames.size() >= num_encode_frames) {\n      decode_buffer.drain(decode_buffer.length());\n    } else {\n      FUZZ_ASSERT(decode_result);\n      FUZZ_ASSERT(decode_buffer.length() == 0);\n    }\n  }\n\n  // Verify that the original requests are correctly decoded.\n  FUZZ_ASSERT(frames.size() >= num_encode_frames);\n  for (uint32_t i = 0; i < num_encode_frames; ++i) {\n    helloworld::HelloRequest decoded_request;\n    FUZZ_ASSERT(decoded_request.ParseFromArray(\n        frames[i].data_->linearize(frames[i].data_->length()), frames[i].data_->length()));\n    FUZZ_ASSERT(decoded_request.name_bytes() == requests[i]->name_bytes());\n  }\n}\n\n} // namespace Fuzz\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/codec_test.cc",
    "content": "#include <array>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/codec.h\"\n\n#include \"test/common/buffer/utility.h\"\n#include \"test/proto/helloworld.pb.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\nTEST(GrpcCodecTest, encodeHeader) {\n  Encoder encoder;\n  std::array<uint8_t, 5> buffer;\n\n  encoder.newFrame(GRPC_FH_DEFAULT, 1, buffer);\n  EXPECT_EQ(buffer[0], GRPC_FH_DEFAULT);\n  EXPECT_EQ(buffer[1], 0);\n  EXPECT_EQ(buffer[2], 0);\n  EXPECT_EQ(buffer[3], 0);\n  EXPECT_EQ(buffer[4], 1);\n\n  encoder.newFrame(GRPC_FH_COMPRESSED, 1, buffer);\n  EXPECT_EQ(buffer[0], GRPC_FH_COMPRESSED);\n  EXPECT_EQ(buffer[1], 0);\n  EXPECT_EQ(buffer[2], 0);\n  EXPECT_EQ(buffer[3], 0);\n  EXPECT_EQ(buffer[4], 1);\n\n  encoder.newFrame(GRPC_FH_DEFAULT, 0x100, buffer);\n  EXPECT_EQ(buffer[0], GRPC_FH_DEFAULT);\n  EXPECT_EQ(buffer[1], 0);\n  EXPECT_EQ(buffer[2], 0);\n  EXPECT_EQ(buffer[3], 1);\n  EXPECT_EQ(buffer[4], 0);\n\n  encoder.newFrame(GRPC_FH_DEFAULT, 0x10000, buffer);\n  EXPECT_EQ(buffer[0], GRPC_FH_DEFAULT);\n  EXPECT_EQ(buffer[1], 0);\n  EXPECT_EQ(buffer[2], 1);\n  EXPECT_EQ(buffer[3], 0);\n  EXPECT_EQ(buffer[4], 0);\n\n  encoder.newFrame(GRPC_FH_DEFAULT, 0x1000000, buffer);\n  EXPECT_EQ(buffer[0], GRPC_FH_DEFAULT);\n  EXPECT_EQ(buffer[1], 1);\n  EXPECT_EQ(buffer[2], 0);\n  EXPECT_EQ(buffer[3], 0);\n  EXPECT_EQ(buffer[4], 0);\n}\n\nTEST(GrpcCodecTest, decodeIncompleteFrame) {\n  helloworld::HelloRequest request;\n  request.set_name(\"hello\");\n  std::string request_buffer = request.SerializeAsString();\n\n  Buffer::OwnedImpl buffer;\n  std::array<uint8_t, 5> header;\n  Encoder encoder;\n  encoder.newFrame(GRPC_FH_DEFAULT, request.ByteSize(), header);\n  buffer.add(header.data(), 5);\n  buffer.add(request_buffer.c_str(), 5);\n\n  std::vector<Frame> frames;\n  Decoder decoder;\n  EXPECT_TRUE(decoder.decode(buffer, frames));\n  EXPECT_EQ(static_cast<size_t>(0), buffer.length());\n  EXPECT_EQ(static_cast<size_t>(0), frames.size());\n  EXPECT_EQ(static_cast<uint32_t>(request.ByteSize()), decoder.length());\n  EXPECT_EQ(true, decoder.hasBufferedData());\n\n  buffer.add(request_buffer.c_str() + 5);\n  EXPECT_TRUE(decoder.decode(buffer, frames));\n  EXPECT_EQ(static_cast<size_t>(0), buffer.length());\n  EXPECT_EQ(static_cast<size_t>(1), frames.size());\n  EXPECT_EQ(static_cast<uint32_t>(0), decoder.length());\n  EXPECT_EQ(false, decoder.hasBufferedData());\n  helloworld::HelloRequest decoded_request;\n  EXPECT_TRUE(decoded_request.ParseFromArray(frames[0].data_->linearize(frames[0].data_->length()),\n                                             frames[0].data_->length()));\n  EXPECT_EQ(\"hello\", decoded_request.name());\n}\n\nTEST(GrpcCodecTest, decodeInvalidFrame) {\n  helloworld::HelloRequest request;\n  request.set_name(\"hello\");\n\n  Buffer::OwnedImpl buffer;\n  std::array<uint8_t, 5> header;\n  Encoder encoder;\n  encoder.newFrame(0b10u, request.ByteSize(), header);\n  buffer.add(header.data(), 5);\n  buffer.add(request.SerializeAsString());\n  size_t size = buffer.length();\n\n  std::vector<Frame> frames;\n  Decoder decoder;\n  EXPECT_FALSE(decoder.decode(buffer, frames));\n  EXPECT_EQ(size, buffer.length());\n}\n\nTEST(GrpcCodecTest, decodeEmptyFrame) {\n  Buffer::OwnedImpl buffer(\"\\0\\0\\0\\0\", 5);\n\n  Decoder decoder;\n  std::vector<Frame> frames;\n  EXPECT_TRUE(decoder.decode(buffer, frames));\n\n  EXPECT_EQ(1, frames.size());\n  EXPECT_EQ(0, frames[0].length_);\n}\n\nTEST(GrpcCodecTest, decodeSingleFrame) {\n  helloworld::HelloRequest request;\n  request.set_name(\"hello\");\n\n  Buffer::OwnedImpl buffer;\n  std::array<uint8_t, 5> header;\n  Encoder encoder;\n  encoder.newFrame(GRPC_FH_DEFAULT, request.ByteSize(), header);\n  buffer.add(header.data(), 5);\n  buffer.add(request.SerializeAsString());\n\n  std::vector<Frame> frames;\n  Decoder decoder;\n  EXPECT_TRUE(decoder.decode(buffer, frames));\n  EXPECT_EQ(static_cast<size_t>(0), buffer.length());\n  EXPECT_EQ(frames.size(), static_cast<uint64_t>(1));\n  EXPECT_EQ(GRPC_FH_DEFAULT, frames[0].flags_);\n  EXPECT_EQ(static_cast<uint64_t>(request.ByteSize()), frames[0].length_);\n\n  helloworld::HelloRequest result;\n  result.ParseFromArray(frames[0].data_->linearize(frames[0].data_->length()),\n                        frames[0].data_->length());\n  EXPECT_EQ(\"hello\", result.name());\n}\n\nTEST(GrpcCodecTest, decodeMultipleFrame) {\n  helloworld::HelloRequest request;\n  request.set_name(\"hello\");\n\n  Buffer::OwnedImpl buffer;\n  std::array<uint8_t, 5> header;\n  Encoder encoder;\n  encoder.newFrame(GRPC_FH_DEFAULT, request.ByteSize(), header);\n  for (int i = 0; i < 1009; i++) {\n    buffer.add(header.data(), 5);\n    buffer.add(request.SerializeAsString());\n  }\n\n  std::vector<Frame> frames;\n  Decoder decoder;\n  EXPECT_TRUE(decoder.decode(buffer, frames));\n  EXPECT_EQ(static_cast<size_t>(0), buffer.length());\n  EXPECT_EQ(frames.size(), static_cast<uint64_t>(1009));\n  for (Frame& frame : frames) {\n    EXPECT_EQ(GRPC_FH_DEFAULT, frame.flags_);\n    EXPECT_EQ(static_cast<uint64_t>(request.ByteSize()), frame.length_);\n\n    helloworld::HelloRequest result;\n    result.ParseFromArray(frame.data_->linearize(frame.data_->length()), frame.data_->length());\n    EXPECT_EQ(\"hello\", result.name());\n  }\n}\n\nTEST(GrpcCodecTest, FrameInspectorTest) {\n  {\n    Buffer::OwnedImpl buffer;\n    FrameInspector counter;\n    EXPECT_EQ(0, counter.inspect(buffer));\n    EXPECT_EQ(counter.state(), State::FhFlag);\n    EXPECT_EQ(counter.frameCount(), 0);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    FrameInspector counter;\n    Buffer::addSeq(buffer, {0});\n    EXPECT_EQ(1, counter.inspect(buffer));\n    EXPECT_EQ(counter.state(), State::FhLen0);\n    EXPECT_EQ(counter.frameCount(), 1);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    FrameInspector counter;\n    Buffer::addSeq(buffer, {1, 0, 0, 0, 1, 0xFF});\n    EXPECT_EQ(1, counter.inspect(buffer));\n    EXPECT_EQ(counter.state(), State::FhFlag);\n    EXPECT_EQ(counter.frameCount(), 1);\n  }\n\n  {\n    FrameInspector counter;\n    Buffer::OwnedImpl buffer1;\n    Buffer::addSeq(buffer1, {1, 0, 0, 0});\n    EXPECT_EQ(1, counter.inspect(buffer1));\n    EXPECT_EQ(counter.state(), State::FhLen3);\n    EXPECT_EQ(counter.frameCount(), 1);\n    Buffer::OwnedImpl buffer2;\n    Buffer::addSeq(buffer2, {1, 0xFF});\n    EXPECT_EQ(0, counter.inspect(buffer2));\n    EXPECT_EQ(counter.frameCount(), 1);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    FrameInspector counter;\n    Buffer::addSeq(buffer, {1, 0, 0, 0, 1, 0xFF});\n    Buffer::addSeq(buffer, {0, 0, 0, 0, 2, 0xFF, 0xFF});\n    EXPECT_EQ(2, counter.inspect(buffer));\n    EXPECT_EQ(counter.state(), State::FhFlag);\n    EXPECT_EQ(counter.frameCount(), 2);\n  }\n\n  {\n    Buffer::OwnedImpl buffer1;\n    Buffer::OwnedImpl buffer2;\n    FrameInspector counter;\n    // message spans two buffers\n    Buffer::addSeq(buffer1, {1, 0, 0, 0, 2, 0xFF});\n    Buffer::addSeq(buffer2, {0xFF, 0, 0, 0, 0, 2, 0xFF, 0xFF});\n    EXPECT_EQ(1, counter.inspect(buffer1));\n    EXPECT_EQ(1, counter.inspect(buffer2));\n    EXPECT_EQ(counter.state(), State::FhFlag);\n    EXPECT_EQ(counter.frameCount(), 2);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    FrameInspector counter;\n    // Add longer byte sequence\n    Buffer::addSeq(buffer, {1, 0, 0, 1, 0});\n    Buffer::addRepeated(buffer, 1 << 8, 0xFF);\n    // Start second message\n    Buffer::addSeq(buffer, {0});\n    EXPECT_EQ(2, counter.inspect(buffer));\n    EXPECT_EQ(counter.state(), State::FhLen0);\n    EXPECT_EQ(counter.frameCount(), 2);\n  }\n\n  {\n    // two empty messages\n    Buffer::OwnedImpl buffer;\n    FrameInspector counter;\n    Buffer::addRepeated(buffer, 10, 0);\n    EXPECT_EQ(2, counter.inspect(buffer));\n    EXPECT_EQ(counter.frameCount(), 2);\n  }\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/common_test.cc",
    "content": "#include \"envoy/common/platform.h\"\n\n#include \"common/grpc/common.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/proto/helloworld.pb.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nTEST(GrpcContextTest, GetGrpcStatus) {\n  Http::TestResponseHeaderMapImpl ok_trailers{{\"grpc-status\", \"0\"}};\n  EXPECT_EQ(Status::Ok, Common::getGrpcStatus(ok_trailers).value());\n\n  Http::TestResponseHeaderMapImpl no_status_trailers{{\"foo\", \"bar\"}};\n  EXPECT_FALSE(Common::getGrpcStatus(no_status_trailers));\n\n  Http::TestResponseHeaderMapImpl aborted_trailers{{\"grpc-status\", \"10\"}};\n  EXPECT_EQ(Status::Aborted, Common::getGrpcStatus(aborted_trailers).value());\n\n  Http::TestResponseHeaderMapImpl unauth_trailers{{\"grpc-status\", \"16\"}};\n  EXPECT_EQ(Status::Unauthenticated, Common::getGrpcStatus(unauth_trailers).value());\n\n  Http::TestResponseHeaderMapImpl invalid_trailers{{\"grpc-status\", \"-1\"}};\n  EXPECT_EQ(Status::InvalidCode, Common::getGrpcStatus(invalid_trailers).value());\n\n  Http::TestResponseHeaderMapImpl user_defined_invalid_trailers{{\"grpc-status\", \"1024\"}};\n  EXPECT_EQ(Status::InvalidCode, Common::getGrpcStatus(invalid_trailers).value());\n\n  Http::TestResponseHeaderMapImpl user_defined_trailers{{\"grpc-status\", \"1024\"}};\n  EXPECT_EQ(1024, Common::getGrpcStatus(user_defined_trailers, true).value());\n}\n\nTEST(GrpcContextTest, GetGrpcStatusWithFallbacks) {\n  Http::TestResponseHeaderMapImpl ok_status_headers{{\"grpc-status\", \"0\"}};\n  Http::TestResponseHeaderMapImpl no_status_headers{{\"foo\", \"bar\"}};\n  Http::TestResponseTrailerMapImpl ok_status_trailers{{\"grpc-status\", \"0\"}};\n  Http::TestResponseTrailerMapImpl no_status_trailers{{\"foo\", \"bar\"}};\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  EXPECT_CALL(info, responseCode()).WillRepeatedly(testing::Return(404));\n\n  EXPECT_EQ(Status::Ok, Common::getGrpcStatus(ok_status_trailers, no_status_headers, info).value());\n\n  EXPECT_EQ(Status::Ok, Common::getGrpcStatus(no_status_trailers, ok_status_headers, info).value());\n\n  EXPECT_EQ(Status::Unimplemented,\n            Common::getGrpcStatus(no_status_trailers, no_status_headers, info).value());\n\n  NiceMock<StreamInfo::MockStreamInfo> info_without_code;\n  EXPECT_FALSE(Common::getGrpcStatus(no_status_trailers, no_status_headers, info_without_code));\n}\n\nTEST(GrpcContextTest, GetGrpcMessage) {\n  Http::TestResponseTrailerMapImpl empty_trailers;\n  EXPECT_EQ(\"\", Common::getGrpcMessage(empty_trailers));\n\n  Http::TestResponseTrailerMapImpl error_trailers{{\"grpc-message\", \"Some error\"}};\n  EXPECT_EQ(\"Some error\", Common::getGrpcMessage(error_trailers));\n\n  Http::TestResponseTrailerMapImpl empty_error_trailers{{\"grpc-message\", \"\"}};\n  EXPECT_EQ(\"\", Common::getGrpcMessage(empty_error_trailers));\n}\n\nTEST(GrpcContextTest, GetGrpcTimeout) {\n  Http::TestRequestHeaderMapImpl empty_headers;\n  EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(empty_headers));\n\n  Http::TestRequestHeaderMapImpl empty_grpc_timeout{{\"grpc-timeout\", \"\"}};\n  EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(empty_grpc_timeout));\n\n  Http::TestRequestHeaderMapImpl missing_unit{{\"grpc-timeout\", \"123\"}};\n  EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(missing_unit));\n\n  Http::TestRequestHeaderMapImpl illegal_unit{{\"grpc-timeout\", \"123F\"}};\n  EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(illegal_unit));\n\n  Http::TestRequestHeaderMapImpl unit_hours{{\"grpc-timeout\", \"0H\"}};\n  EXPECT_EQ(std::chrono::milliseconds(0), Common::getGrpcTimeout(unit_hours));\n\n  Http::TestRequestHeaderMapImpl zero_hours{{\"grpc-timeout\", \"1H\"}};\n  EXPECT_EQ(std::chrono::milliseconds(60 * 60 * 1000), Common::getGrpcTimeout(zero_hours));\n\n  Http::TestRequestHeaderMapImpl unit_minutes{{\"grpc-timeout\", \"1M\"}};\n  EXPECT_EQ(std::chrono::milliseconds(60 * 1000), Common::getGrpcTimeout(unit_minutes));\n\n  Http::TestRequestHeaderMapImpl unit_seconds{{\"grpc-timeout\", \"1S\"}};\n  EXPECT_EQ(std::chrono::milliseconds(1000), Common::getGrpcTimeout(unit_seconds));\n\n  Http::TestRequestHeaderMapImpl unit_milliseconds{{\"grpc-timeout\", \"12345678m\"}};\n  EXPECT_EQ(std::chrono::milliseconds(12345678), Common::getGrpcTimeout(unit_milliseconds));\n\n  Http::TestRequestHeaderMapImpl unit_microseconds{{\"grpc-timeout\", \"1000001u\"}};\n  EXPECT_EQ(std::chrono::milliseconds(1001), Common::getGrpcTimeout(unit_microseconds));\n\n  Http::TestRequestHeaderMapImpl unit_nanoseconds{{\"grpc-timeout\", \"12345678n\"}};\n  EXPECT_EQ(std::chrono::milliseconds(13), Common::getGrpcTimeout(unit_nanoseconds));\n\n  // Max 8 digits and no leading whitespace or +- signs are not enforced on decode,\n  // so we don't test for them.\n}\n\nTEST(GrpcCommonTest, GrpcStatusDetailsBin) {\n  Http::TestResponseTrailerMapImpl empty_trailers;\n  EXPECT_FALSE(Common::getGrpcStatusDetailsBin(empty_trailers));\n\n  Http::TestResponseTrailerMapImpl invalid_value{{\"grpc-status-details-bin\", \"invalid\"}};\n  EXPECT_FALSE(Common::getGrpcStatusDetailsBin(invalid_value));\n\n  Http::TestResponseTrailerMapImpl unpadded_value{\n      {\"grpc-status-details-bin\", \"CAUSElJlc291cmNlIG5vdCBmb3VuZA\"}};\n  auto status = Common::getGrpcStatusDetailsBin(unpadded_value);\n  ASSERT_TRUE(status);\n  EXPECT_EQ(Status::WellKnownGrpcStatus::NotFound, status->code());\n  EXPECT_EQ(\"Resource not found\", status->message());\n\n  Http::TestResponseTrailerMapImpl padded_value{\n      {\"grpc-status-details-bin\", \"CAUSElJlc291cmNlIG5vdCBmb3VuZA==\"}};\n  status = Common::getGrpcStatusDetailsBin(padded_value);\n  ASSERT_TRUE(status);\n  EXPECT_EQ(Status::WellKnownGrpcStatus::NotFound, status->code());\n  EXPECT_EQ(\"Resource not found\", status->message());\n}\n\nTEST(GrpcContextTest, ToGrpcTimeout) {\n  Http::TestRequestHeaderMapImpl headers;\n\n  Common::toGrpcTimeout(std::chrono::milliseconds(0UL), headers);\n  EXPECT_EQ(\"0m\", headers.getGrpcTimeoutValue());\n\n  Common::toGrpcTimeout(std::chrono::milliseconds(1UL), headers);\n  EXPECT_EQ(\"1m\", headers.getGrpcTimeoutValue());\n\n  Common::toGrpcTimeout(std::chrono::milliseconds(100000000UL), headers);\n  EXPECT_EQ(\"100000S\", headers.getGrpcTimeoutValue());\n\n  Common::toGrpcTimeout(std::chrono::milliseconds(100000000000UL), headers);\n  EXPECT_EQ(\"1666666M\", headers.getGrpcTimeoutValue());\n\n  Common::toGrpcTimeout(std::chrono::milliseconds(9000000000000UL), headers);\n  EXPECT_EQ(\"2500000H\", headers.getGrpcTimeoutValue());\n\n  Common::toGrpcTimeout(std::chrono::milliseconds(360000000000000UL), headers);\n  EXPECT_EQ(\"99999999H\", headers.getGrpcTimeoutValue());\n\n  Common::toGrpcTimeout(std::chrono::milliseconds(UINT64_MAX), headers);\n  EXPECT_EQ(\"99999999H\", headers.getGrpcTimeoutValue());\n}\n\nTEST(GrpcContextTest, PrepareHeaders) {\n  {\n    Http::RequestMessagePtr message =\n        Common::prepareHeaders(\"cluster\", \"service_name\", \"method_name\", absl::nullopt);\n\n    EXPECT_EQ(\"POST\", message->headers().getMethodValue());\n    EXPECT_EQ(\"/service_name/method_name\", message->headers().getPathValue());\n    EXPECT_EQ(\"cluster\", message->headers().getHostValue());\n    EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n  }\n  {\n    Http::RequestMessagePtr message = Common::prepareHeaders(\n        \"cluster\", \"service_name\", \"method_name\", absl::optional<std::chrono::milliseconds>(1));\n\n    EXPECT_EQ(\"POST\", message->headers().getMethodValue());\n    EXPECT_EQ(\"/service_name/method_name\", message->headers().getPathValue());\n    EXPECT_EQ(\"cluster\", message->headers().getHostValue());\n    EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n    EXPECT_EQ(\"1m\", message->headers().getGrpcTimeoutValue());\n  }\n  {\n    Http::RequestMessagePtr message = Common::prepareHeaders(\n        \"cluster\", \"service_name\", \"method_name\", absl::optional<std::chrono::seconds>(1));\n\n    EXPECT_EQ(\"POST\", message->headers().getMethodValue());\n    EXPECT_EQ(\"/service_name/method_name\", message->headers().getPathValue());\n    EXPECT_EQ(\"cluster\", message->headers().getHostValue());\n    EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n    EXPECT_EQ(\"1000m\", message->headers().getGrpcTimeoutValue());\n  }\n  {\n    Http::RequestMessagePtr message = Common::prepareHeaders(\n        \"cluster\", \"service_name\", \"method_name\", absl::optional<std::chrono::minutes>(1));\n\n    EXPECT_EQ(\"POST\", message->headers().getMethodValue());\n    EXPECT_EQ(\"/service_name/method_name\", message->headers().getPathValue());\n    EXPECT_EQ(\"cluster\", message->headers().getHostValue());\n    EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n    EXPECT_EQ(\"60000m\", message->headers().getGrpcTimeoutValue());\n  }\n  {\n    Http::RequestMessagePtr message = Common::prepareHeaders(\n        \"cluster\", \"service_name\", \"method_name\", absl::optional<std::chrono::hours>(1));\n\n    EXPECT_EQ(\"POST\", message->headers().getMethodValue());\n    EXPECT_EQ(\"/service_name/method_name\", message->headers().getPathValue());\n    EXPECT_EQ(\"cluster\", message->headers().getHostValue());\n    EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n    EXPECT_EQ(\"3600000m\", message->headers().getGrpcTimeoutValue());\n  }\n  {\n    Http::RequestMessagePtr message = Common::prepareHeaders(\n        \"cluster\", \"service_name\", \"method_name\", absl::optional<std::chrono::hours>(100000000));\n\n    EXPECT_EQ(\"POST\", message->headers().getMethodValue());\n    EXPECT_EQ(\"/service_name/method_name\", message->headers().getPathValue());\n    EXPECT_EQ(\"cluster\", message->headers().getHostValue());\n    EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n    EXPECT_EQ(\"99999999H\", message->headers().getGrpcTimeoutValue());\n  }\n  {\n    Http::RequestMessagePtr message =\n        Common::prepareHeaders(\"cluster\", \"service_name\", \"method_name\",\n                               absl::optional<std::chrono::milliseconds>(100000000000));\n\n    EXPECT_EQ(\"POST\", message->headers().getMethodValue());\n    EXPECT_EQ(\"/service_name/method_name\", message->headers().getPathValue());\n    EXPECT_EQ(\"cluster\", message->headers().getHostValue());\n    EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n    EXPECT_EQ(\"1666666M\", message->headers().getGrpcTimeoutValue());\n  }\n}\n\nTEST(GrpcContextTest, GrpcToHttpStatus) {\n  const std::vector<std::pair<Status::GrpcStatus, uint64_t>> test_set = {\n      {Status::WellKnownGrpcStatus::Ok, 200},\n      {Status::WellKnownGrpcStatus::Canceled, 499},\n      {Status::WellKnownGrpcStatus::Unknown, 500},\n      {Status::WellKnownGrpcStatus::InvalidArgument, 400},\n      {Status::WellKnownGrpcStatus::DeadlineExceeded, 504},\n      {Status::WellKnownGrpcStatus::NotFound, 404},\n      {Status::WellKnownGrpcStatus::AlreadyExists, 409},\n      {Status::WellKnownGrpcStatus::PermissionDenied, 403},\n      {Status::WellKnownGrpcStatus::ResourceExhausted, 429},\n      {Status::WellKnownGrpcStatus::FailedPrecondition, 400},\n      {Status::WellKnownGrpcStatus::Aborted, 409},\n      {Status::WellKnownGrpcStatus::OutOfRange, 400},\n      {Status::WellKnownGrpcStatus::Unimplemented, 501},\n      {Status::WellKnownGrpcStatus::Internal, 500},\n      {Status::WellKnownGrpcStatus::Unavailable, 503},\n      {Status::WellKnownGrpcStatus::DataLoss, 500},\n      {Status::WellKnownGrpcStatus::Unauthenticated, 401},\n      {Status::WellKnownGrpcStatus::InvalidCode, 500},\n  };\n  for (const auto& test_case : test_set) {\n    EXPECT_EQ(test_case.second, Grpc::Utility::grpcToHttpStatus(test_case.first));\n  }\n}\n\nTEST(GrpcContextTest, HttpToGrpcStatus) {\n  const std::vector<std::pair<uint64_t, Status::GrpcStatus>> test_set = {\n      {400, Status::WellKnownGrpcStatus::Internal},\n      {401, Status::WellKnownGrpcStatus::Unauthenticated},\n      {403, Status::WellKnownGrpcStatus::PermissionDenied},\n      {404, Status::WellKnownGrpcStatus::Unimplemented},\n      {429, Status::WellKnownGrpcStatus::Unavailable},\n      {502, Status::WellKnownGrpcStatus::Unavailable},\n      {503, Status::WellKnownGrpcStatus::Unavailable},\n      {504, Status::WellKnownGrpcStatus::Unavailable},\n      {500, Status::WellKnownGrpcStatus::Unknown},\n  };\n  for (const auto& test_case : test_set) {\n    EXPECT_EQ(test_case.second, Grpc::Utility::httpToGrpcStatus(test_case.first));\n  }\n}\n\nTEST(GrpcContextTest, HasGrpcContentType) {\n  {\n    Http::TestRequestHeaderMapImpl headers{};\n    EXPECT_FALSE(Common::hasGrpcContentType(headers));\n  }\n  auto isGrpcContentType = [](const std::string& s) {\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", s}};\n    return Common::hasGrpcContentType(headers);\n  };\n  EXPECT_FALSE(isGrpcContentType(\"\"));\n  EXPECT_FALSE(isGrpcContentType(\"application/text\"));\n  EXPECT_TRUE(isGrpcContentType(\"application/grpc\"));\n  EXPECT_TRUE(isGrpcContentType(\"application/grpc+\"));\n  EXPECT_TRUE(isGrpcContentType(\"application/grpc+foo\"));\n  EXPECT_FALSE(isGrpcContentType(\"application/grpc-\"));\n  EXPECT_FALSE(isGrpcContentType(\"application/grpc-web\"));\n  EXPECT_FALSE(isGrpcContentType(\"application/grpc-web+foo\"));\n}\n\nTEST(GrpcContextTest, IsGrpcRequestHeader) {\n  Http::TestRequestHeaderMapImpl is{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\"content-type\", \"application/grpc\"}};\n  EXPECT_TRUE(Common::isGrpcRequestHeaders(is));\n  Http::TestRequestHeaderMapImpl is_not{{\":method\", \"CONNECT\"},\n                                        {\"content-type\", \"application/grpc\"}};\n  EXPECT_FALSE(Common::isGrpcRequestHeaders(is_not));\n}\n\nTEST(GrpcContextTest, IsGrpcResponseHeader) {\n  Http::TestResponseHeaderMapImpl grpc_status_only{{\":status\", \"500\"}, {\"grpc-status\", \"14\"}};\n  EXPECT_TRUE(Common::isGrpcResponseHeaders(grpc_status_only, true));\n  EXPECT_FALSE(Common::isGrpcResponseHeaders(grpc_status_only, false));\n\n  Http::TestResponseHeaderMapImpl grpc_response_header{{\":status\", \"200\"},\n                                                       {\"content-type\", \"application/grpc\"}};\n  EXPECT_FALSE(Common::isGrpcResponseHeaders(grpc_response_header, true));\n  EXPECT_TRUE(Common::isGrpcResponseHeaders(grpc_response_header, false));\n\n  Http::TestResponseHeaderMapImpl json_response_header{{\":status\", \"200\"},\n                                                       {\"content-type\", \"application/json\"}};\n  EXPECT_FALSE(Common::isGrpcResponseHeaders(json_response_header, true));\n  EXPECT_FALSE(Common::isGrpcResponseHeaders(json_response_header, false));\n}\n\nTEST(GrpcContextTest, ValidateResponse) {\n  {\n    Http::ResponseMessageImpl response(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}});\n    response.trailers(\n        Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"0\"}}});\n    EXPECT_NO_THROW(Common::validateResponse(response));\n  }\n  {\n    Http::ResponseMessageImpl response(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}});\n    EXPECT_THROW_WITH_MESSAGE(Common::validateResponse(response), Exception,\n                              \"non-200 response code\");\n  }\n  {\n    Http::ResponseMessageImpl response(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}});\n    response.trailers(\n        Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"100\"}}});\n    EXPECT_THROW_WITH_MESSAGE(Common::validateResponse(response), Exception,\n                              \"bad grpc-status trailer\");\n  }\n  {\n    Http::ResponseMessageImpl response(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}});\n    response.trailers(\n        Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"4\"}}});\n    EXPECT_THROW_WITH_MESSAGE(Common::validateResponse(response), Exception, \"\");\n  }\n  {\n    Http::ResponseMessageImpl response(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}});\n    response.trailers(Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{\n        {\"grpc-status\", \"4\"}, {\"grpc-message\", \"custom error\"}}});\n    EXPECT_THROW_WITH_MESSAGE(Common::validateResponse(response), Exception, \"custom error\");\n  }\n  {\n    Http::ResponseMessageImpl response(Http::ResponseHeaderMapPtr{\n        new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"100\"}}});\n    EXPECT_THROW_WITH_MESSAGE(Common::validateResponse(response), Exception,\n                              \"bad grpc-status header\");\n  }\n  {\n    Http::ResponseMessageImpl response(Http::ResponseHeaderMapPtr{\n        new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"4\"}}});\n    EXPECT_THROW_WITH_MESSAGE(Common::validateResponse(response), Exception, \"\");\n  }\n  {\n    Http::ResponseMessageImpl response(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{\n            {\":status\", \"200\"}, {\"grpc-status\", \"4\"}, {\"grpc-message\", \"custom error\"}}});\n    EXPECT_THROW_WITH_MESSAGE(Common::validateResponse(response), Exception, \"custom error\");\n  }\n}\n\n// Ensure that the correct gPRC header is constructed for a Buffer::Instance.\nTEST(GrpcContextTest, PrependGrpcFrameHeader) {\n  auto buffer = std::make_unique<Buffer::OwnedImpl>();\n  buffer->add(\"test\", 4);\n  std::array<char, 5> expected_header;\n  expected_header[0] = 0; // flags\n  const uint32_t nsize = htonl(4);\n  std::memcpy(&expected_header[1], reinterpret_cast<const void*>(&nsize), sizeof(uint32_t));\n  std::string header_string(&expected_header[0], 5);\n  Common::prependGrpcFrameHeader(*buffer);\n  EXPECT_EQ(buffer->toString(), header_string + \"test\");\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/context_impl_test.cc",
    "content": "#include \"envoy/common/platform.h\"\n\n#include \"common/grpc/common.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nTEST(GrpcContextTest, ChargeStats) {\n  NiceMock<Upstream::MockClusterInfo> cluster;\n  Stats::TestSymbolTable symbol_table_;\n  Stats::StatNamePool pool(*symbol_table_);\n  const Stats::StatName service = pool.add(\"service\");\n  const Stats::StatName method = pool.add(\"method\");\n  Context::RequestStatNames request_names{service, method};\n  ContextImpl context(*symbol_table_);\n  context.chargeStat(cluster, request_names, true);\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.success\").value());\n  EXPECT_EQ(0U, cluster.stats_store_.counter(\"grpc.service.method.failure\").value());\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.total\").value());\n\n  context.chargeStat(cluster, request_names, false);\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.success\").value());\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.failure\").value());\n  EXPECT_EQ(2U, cluster.stats_store_.counter(\"grpc.service.method.total\").value());\n\n  context.chargeRequestMessageStat(cluster, request_names, 3);\n  context.chargeResponseMessageStat(cluster, request_names, 4);\n  EXPECT_EQ(3U, cluster.stats_store_.counter(\"grpc.service.method.request_message_count\").value());\n  EXPECT_EQ(4U, cluster.stats_store_.counter(\"grpc.service.method.response_message_count\").value());\n\n  context.chargeRequestMessageStat(cluster, {}, 3);\n  context.chargeResponseMessageStat(cluster, {}, 4);\n  EXPECT_EQ(3U, cluster.stats_store_.counter(\"grpc.request_message_count\").value());\n  EXPECT_EQ(4U, cluster.stats_store_.counter(\"grpc.response_message_count\").value());\n\n  Http::TestResponseTrailerMapImpl trailers;\n  trailers.setGrpcStatus(\"0\");\n  const Http::HeaderEntry* status = trailers.GrpcStatus();\n  context.chargeStat(cluster, Context::Protocol::Grpc, request_names, status);\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.0\").value());\n  EXPECT_EQ(2U, cluster.stats_store_.counter(\"grpc.service.method.success\").value());\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.failure\").value());\n  EXPECT_EQ(3U, cluster.stats_store_.counter(\"grpc.service.method.total\").value());\n\n  trailers.setGrpcStatus(\"1\");\n  context.chargeStat(cluster, Context::Protocol::Grpc, request_names, status);\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.0\").value());\n  EXPECT_EQ(1U, cluster.stats_store_.counter(\"grpc.service.method.1\").value());\n  EXPECT_EQ(2U, cluster.stats_store_.counter(\"grpc.service.method.success\").value());\n  EXPECT_EQ(2U, cluster.stats_store_.counter(\"grpc.service.method.failure\").value());\n  EXPECT_EQ(4U, cluster.stats_store_.counter(\"grpc.service.method.total\").value());\n}\n\nTEST(GrpcContextTest, ResolveServiceAndMethod) {\n  std::string service;\n  std::string method;\n  Http::TestRequestHeaderMapImpl headers;\n  headers.setPath(\"/service_name/method_name?a=b\");\n  const Http::HeaderEntry* path = headers.Path();\n  Stats::TestSymbolTable symbol_table;\n  ContextImpl context(*symbol_table);\n  absl::optional<Context::RequestStatNames> request_names =\n      context.resolveDynamicServiceAndMethod(path);\n  EXPECT_TRUE(request_names);\n  EXPECT_EQ(\"service_name\", absl::get<Stats::DynamicName>(request_names->service_));\n  EXPECT_EQ(\"method_name\", absl::get<Stats::DynamicName>(request_names->method_));\n  headers.setPath(\"\");\n  EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path));\n  headers.setPath(\"/\");\n  EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path));\n  headers.setPath(\"//\");\n  EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path));\n  headers.setPath(\"/service_name\");\n  EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path));\n  headers.setPath(\"/service_name/\");\n  EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path));\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/google_async_client_impl_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/grpc/google_async_client_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/proto/helloworld.pb.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\nclass MockGenericStub : public GoogleStub {\npublic:\n  MOCK_METHOD(grpc::GenericClientAsyncReaderWriter*, PrepareCall_,\n              (grpc::ClientContext * context, const grpc::string& method,\n               grpc::CompletionQueue* cq));\n\n  std::unique_ptr<grpc::GenericClientAsyncReaderWriter>\n  PrepareCall(grpc::ClientContext* context, const grpc::string& method,\n              grpc::CompletionQueue* cq) override {\n    return std::unique_ptr<grpc::GenericClientAsyncReaderWriter>(PrepareCall_(context, method, cq));\n  }\n};\n\nclass MockStubFactory : public GoogleStubFactory {\npublic:\n  GoogleStubSharedPtr createStub(std::shared_ptr<grpc::Channel> /*channel*/) override {\n    return shared_stub_;\n  }\n\n  MockGenericStub* stub_ = new MockGenericStub();\n  GoogleStubSharedPtr shared_stub_{stub_};\n};\n\nclass EnvoyGoogleAsyncClientImplTest : public testing::Test {\npublic:\n  EnvoyGoogleAsyncClientImplTest()\n      : stats_store_(new Stats::IsolatedStoreImpl), api_(Api::createApiForTest(*stats_store_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")), scope_(stats_store_),\n        method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName(\"SayHello\")),\n        stat_names_(scope_->symbolTable()) {\n\n    auto* google_grpc = config_.mutable_google_grpc();\n    google_grpc->set_target_uri(\"fake_address\");\n    google_grpc->set_stat_prefix(\"test_cluster\");\n    tls_ = std::make_unique<GoogleAsyncClientThreadLocal>(*api_);\n  }\n\n  virtual void initialize() {\n    grpc_client_ = std::make_unique<GoogleAsyncClientImpl>(*dispatcher_, *tls_, stub_factory_,\n                                                           scope_, config_, *api_, stat_names_);\n  }\n\n  envoy::config::core::v3::GrpcService config_;\n  DangerousDeprecatedTestTime test_time_;\n  Stats::IsolatedStoreImpl* stats_store_; // Ownership transferred to scope_.\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Stats::ScopeSharedPtr scope_;\n  GoogleAsyncClientThreadLocalPtr tls_;\n  MockStubFactory stub_factory_;\n  const Protobuf::MethodDescriptor* method_descriptor_;\n  StatNames stat_names_;\n  AsyncClient<helloworld::HelloRequest, helloworld::HelloReply> grpc_client_;\n};\n\n// Validate that a failure in gRPC stub call creation returns immediately with\n// status UNAVAILABLE.\nTEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) {\n  initialize();\n\n  EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr));\n  MockAsyncStreamCallbacks<helloworld::HelloReply> grpc_callbacks;\n  EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_));\n  EXPECT_CALL(grpc_callbacks, onReceiveTrailingMetadata_(_));\n  EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, \"\"));\n  auto grpc_stream =\n      grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions());\n  EXPECT_TRUE(grpc_stream == nullptr);\n}\n\n// Validate that a failure in gRPC stub call creation returns immediately with\n// status UNAVAILABLE.\nTEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) {\n  initialize();\n\n  EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr));\n  MockAsyncRequestCallbacks<helloworld::HelloReply> grpc_callbacks;\n  EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_));\n  EXPECT_CALL(grpc_callbacks, onFailure(Status::WellKnownGrpcStatus::Unavailable, \"\", _));\n  helloworld::HelloRequest request_msg;\n\n  Tracing::MockSpan active_span;\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(active_span, spawnChild_(_, \"async test_cluster egress\", _))\n      .WillOnce(Return(child_span));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"test_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"14\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span, finishSpan());\n  EXPECT_CALL(*child_span, injectContext(_));\n\n  auto* grpc_request = grpc_client_->send(*method_descriptor_, request_msg, grpc_callbacks,\n                                          active_span, Http::AsyncClient::RequestOptions());\n  EXPECT_TRUE(grpc_request == nullptr);\n}\n\nclass EnvoyGoogleLessMockedAsyncClientImplTest : public EnvoyGoogleAsyncClientImplTest {\npublic:\n  void initialize() override {\n    grpc_client_ = std::make_unique<GoogleAsyncClientImpl>(*dispatcher_, *tls_, real_stub_factory_,\n                                                           scope_, config_, *api_, stat_names_);\n  }\n\n  GoogleGenericStubFactory real_stub_factory_;\n};\n\nTEST_F(EnvoyGoogleLessMockedAsyncClientImplTest, TestOverflow) {\n  // Set an (unreasonably) low byte limit.\n  auto* google_grpc = config_.mutable_google_grpc();\n  google_grpc->mutable_per_stream_buffer_limit_bytes()->set_value(1);\n  initialize();\n\n  NiceMock<MockAsyncStreamCallbacks<helloworld::HelloReply>> grpc_callbacks;\n  AsyncStream<helloworld::HelloRequest> grpc_stream =\n      grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::RequestOptions());\n  EXPECT_FALSE(grpc_stream == nullptr);\n  EXPECT_FALSE(grpc_stream->isAboveWriteBufferHighWatermark());\n\n  // With no data in the message, it won't back up.\n  helloworld::HelloRequest request_msg;\n  grpc_stream->sendMessage(request_msg, false);\n  EXPECT_FALSE(grpc_stream->isAboveWriteBufferHighWatermark());\n\n  // With actual data we pass the very small byte limit.\n  request_msg.set_name(\"bob\");\n  grpc_stream->sendMessage(request_msg, false);\n  EXPECT_TRUE(grpc_stream->isAboveWriteBufferHighWatermark());\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/google_grpc_creds_test.cc",
    "content": "#include <cstdlib>\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"common/grpc/google_grpc_creds_impl.h\"\n\n#include \"test/common/grpc/utility.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\n// In general, below, we force execution of all paths, but because the\n// underlying grpc::{CallCredentials,ChannelCredentials} don't have any real way\n// of getting at the underlying state, we can at best just make sure we don't\n// crash, compare with nullptr and/or look at vector lengths.\n\nclass CredsUtilityTest : public testing::Test {\npublic:\n  CredsUtilityTest() : api_(Api::createApiForTest()) {}\n\n  Api::ApiPtr api_;\n};\n\nTEST_F(CredsUtilityTest, GetChannelCredentials) {\n  EXPECT_EQ(nullptr, CredsUtility::getChannelCredentials({}, *api_));\n  envoy::config::core::v3::GrpcService::GoogleGrpc config;\n  auto* creds = config.mutable_channel_credentials();\n  EXPECT_EQ(nullptr, CredsUtility::getChannelCredentials(config, *api_));\n  creds->mutable_ssl_credentials();\n  EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_));\n  creds->mutable_local_credentials();\n  EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_));\n\n  const std::string var_name = \"GOOGLE_APPLICATION_CREDENTIALS\";\n  EXPECT_EQ(nullptr, ::getenv(var_name.c_str()));\n  const std::string creds_path = TestEnvironment::runfilesPath(\"test/common/grpc/service_key.json\");\n  TestEnvironment::setEnvVar(var_name, creds_path, 0);\n  creds->mutable_google_default();\n  EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_));\n  TestEnvironment::unsetEnvVar(var_name);\n}\n\nTEST_F(CredsUtilityTest, DefaultSslChannelCredentials) {\n  EXPECT_NE(nullptr, CredsUtility::defaultSslChannelCredentials({}, *api_));\n  envoy::config::core::v3::GrpcService config;\n  auto* creds = config.mutable_google_grpc()->mutable_channel_credentials();\n  EXPECT_NE(nullptr, CredsUtility::defaultSslChannelCredentials(config, *api_));\n  creds->mutable_ssl_credentials();\n  EXPECT_NE(nullptr, CredsUtility::defaultSslChannelCredentials(config, *api_));\n}\n\nTEST_F(CredsUtilityTest, CallCredentials) {\n  EXPECT_TRUE(CredsUtility::callCredentials({}).empty());\n  {\n    // Invalid refresh token doesn't crash and gets elided.\n    envoy::config::core::v3::GrpcService::GoogleGrpc config;\n    config.add_call_credentials()->set_google_refresh_token(\"invalid\");\n    EXPECT_TRUE(CredsUtility::callCredentials(config).empty());\n  }\n  {\n    // Singleton access token succeeds.\n    envoy::config::core::v3::GrpcService::GoogleGrpc config;\n    config.add_call_credentials()->set_access_token(\"foo\");\n    EXPECT_EQ(1, CredsUtility::callCredentials(config).size());\n  }\n  {\n    // Multiple call credentials.\n    envoy::config::core::v3::GrpcService::GoogleGrpc config;\n    config.add_call_credentials()->set_access_token(\"foo\");\n    config.add_call_credentials()->mutable_google_compute_engine();\n    EXPECT_EQ(2, CredsUtility::callCredentials(config).size());\n  }\n  // The full set of call credentials are evaluated below in\n  // CredsUtility.DefaultChannelCredentials.\n}\n\nTEST_F(CredsUtilityTest, DefaultChannelCredentials) {\n  { EXPECT_NE(nullptr, CredsUtility::defaultChannelCredentials({}, *api_)); }\n  {\n    envoy::config::core::v3::GrpcService config;\n    TestUtility::setTestSslGoogleGrpcConfig(config, true);\n    EXPECT_NE(nullptr, CredsUtility::defaultChannelCredentials(config, *api_));\n  }\n  {\n    envoy::config::core::v3::GrpcService config;\n    TestUtility::setTestSslGoogleGrpcConfig(config, true);\n    auto* google_grpc = config.mutable_google_grpc();\n    google_grpc->add_call_credentials()->set_access_token(\"foo\");\n    google_grpc->add_call_credentials()->mutable_google_compute_engine();\n    google_grpc->add_call_credentials()->set_google_refresh_token(R\"EOF(\n    {\n      \"client_id\": \"123\",\n      \"client_secret\": \"foo\",\n      \"refresh_token\": \"bar\",\n      \"type\": \"authorized_user\"\n    }\n    )EOF\");\n    {\n      auto* service_account_jwt_access =\n          google_grpc->add_call_credentials()->mutable_service_account_jwt_access();\n      service_account_jwt_access->set_json_key(R\"EOF(\n      {\n        \"private_key\": \"foo\",\n        \"private_key_id\": \"bar\",\n        \"client_id\": \"123\",\n        \"client_email\": \"foo@bar\",\n        \"type\": \"service_account\"\n      }\n      )EOF\");\n      service_account_jwt_access->set_token_lifetime_seconds(123);\n    }\n    {\n      auto* google_iam = google_grpc->add_call_credentials()->mutable_google_iam();\n      google_iam->set_authorization_token(\"foo\");\n      google_iam->set_authority_selector(\"bar\");\n    }\n    // Should be ignored..\n    google_grpc->add_call_credentials()->mutable_from_plugin()->set_name(\"foo\");\n    EXPECT_NE(nullptr, CredsUtility::defaultChannelCredentials(config, *api_));\n  }\n  {\n    envoy::config::core::v3::GrpcService config;\n    TestUtility::setTestSslGoogleGrpcConfig(config, true);\n    auto* sts_service = config.mutable_google_grpc()->add_call_credentials()->mutable_sts_service();\n    sts_service->set_token_exchange_service_uri(\"http://tokenexchangeservice.com\");\n    sts_service->set_subject_token_path(\"/var/run/example_token\");\n    sts_service->set_subject_token_type(\"urn:ietf:params:oauth:token-type:access_token\");\n    EXPECT_NE(nullptr, CredsUtility::defaultChannelCredentials(config, *api_));\n  }\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/google_grpc_utils_test.cc",
    "content": "#include \"envoy/common/platform.h\"\n\n#include \"common/grpc/google_grpc_utils.h\"\n\n#include \"test/proto/helloworld.pb.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::HasSubstr;\nusing testing::Pair;\nusing testing::UnorderedElementsAre;\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\nTEST(GoogleGrpcUtilsTest, MakeBufferInstanceEmpty) {\n  grpc::ByteBuffer byte_buffer;\n  GoogleGrpcUtils::makeBufferInstance(byte_buffer);\n}\n\nTEST(GoogleGrpcUtilsTest, MakeByteBufferEmpty) {\n  auto buffer = std::make_unique<Buffer::OwnedImpl>();\n  GoogleGrpcUtils::makeByteBuffer(std::move(buffer));\n  buffer = nullptr;\n  GoogleGrpcUtils::makeByteBuffer(std::move(buffer));\n}\n\nTEST(GoogleGrpcUtilsTest, MakeBufferInstance1) {\n  grpc::Slice slice(\"test\");\n  grpc::ByteBuffer byte_buffer(&slice, 1);\n  auto buffer_instance = GoogleGrpcUtils::makeBufferInstance(byte_buffer);\n  EXPECT_EQ(buffer_instance->toString(), \"test\");\n}\n\n// Test building a Buffer::Instance from 3 grpc::Slice(s).\nTEST(GoogleGrpcUtilsTest, MakeBufferInstance3) {\n  std::array<grpc::Slice, 3> slices = {\n      {grpc::string(\"test\"), grpc::string(\" \"), grpc::string(\"this\")}};\n  grpc::ByteBuffer byte_buffer(&slices[0], 3);\n  auto buffer_instance = GoogleGrpcUtils::makeBufferInstance(byte_buffer);\n  EXPECT_EQ(buffer_instance->toString(), \"test this\");\n}\n\nTEST(GoogleGrpcUtilsTest, MakeByteBuffer1) {\n  auto buffer = std::make_unique<Buffer::OwnedImpl>();\n  buffer->add(\"test\", 4);\n  auto byte_buffer = GoogleGrpcUtils::makeByteBuffer(std::move(buffer));\n  std::vector<grpc::Slice> slices;\n  RELEASE_ASSERT(byte_buffer.Dump(&slices).ok(), \"\");\n  std::string str;\n  for (auto& s : slices) {\n    str.append(std::string(reinterpret_cast<const char*>(s.begin()), s.size()));\n  }\n  EXPECT_EQ(str, \"test\");\n}\n\n// Test building a grpc::ByteBuffer from a Buffer::Instance with 3 slices.\nTEST(GoogleGrpcUtilsTest, MakeByteBuffer3) {\n  auto buffer = std::make_unique<Buffer::OwnedImpl>();\n  Buffer::BufferFragmentImpl f1(\"test\", 4, nullptr);\n  buffer->addBufferFragment(f1);\n  Buffer::BufferFragmentImpl f2(\" \", 1, nullptr);\n  buffer->addBufferFragment(f2);\n  Buffer::BufferFragmentImpl f3(\"this\", 4, nullptr);\n  buffer->addBufferFragment(f3);\n  auto byte_buffer = GoogleGrpcUtils::makeByteBuffer(std::move(buffer));\n  std::vector<grpc::Slice> slices;\n  RELEASE_ASSERT(byte_buffer.Dump(&slices).ok(), \"\");\n  std::string str;\n  for (auto& s : slices) {\n    str.append(std::string(reinterpret_cast<const char*>(s.begin()), s.size()));\n  }\n  EXPECT_EQ(str, \"test this\");\n}\n\n// Test building a Buffer::Instance from a grpc::ByteBuffer from a Buffer::Instance with 3 slices.\nTEST(GoogleGrpcUtilsTest, ByteBufferInstanceRoundTrip) {\n  std::array<grpc::Slice, 3> slices = {\n      {grpc::string(\"test\"), grpc::string(\" \"), grpc::string(\"this\")}};\n  grpc::ByteBuffer byte_buffer(&slices[0], 3);\n  auto buffer_instance1 = GoogleGrpcUtils::makeBufferInstance(byte_buffer);\n  auto byte_buffer2 = GoogleGrpcUtils::makeByteBuffer(std::move(buffer_instance1));\n  auto buffer_instance2 = GoogleGrpcUtils::makeBufferInstance(byte_buffer2);\n  EXPECT_EQ(buffer_instance2->toString(), \"test this\");\n}\n\n// Validate that we build the grpc::ChannelArguments as expected.\nTEST(GoogleGrpcUtilsTest, ChannelArgsFromConfig) {\n  const auto config = TestUtility::parseYaml<envoy::config::core::v3::GrpcService>(R\"EOF(\n  google_grpc:\n    channel_args:\n      args:\n        grpc.http2.max_pings_without_data: { int_value: 3 }\n        grpc.default_authority: { string_value: foo }\n        grpc.http2.max_ping_strikes: { int_value: 5 }\n        grpc.ssl_target_name_override: { string_value: bar }\n  )EOF\");\n  const grpc::ChannelArguments channel_args = GoogleGrpcUtils::channelArgsFromConfig(config);\n  grpc_channel_args effective_args = channel_args.c_channel_args();\n  absl::node_hash_map<std::string, std::string> string_args;\n  absl::node_hash_map<std::string, int> int_args;\n  for (uint32_t n = 0; n < effective_args.num_args; ++n) {\n    const grpc_arg arg = effective_args.args[n];\n    ASSERT_TRUE(arg.type == GRPC_ARG_STRING || arg.type == GRPC_ARG_INTEGER);\n    if (arg.type == GRPC_ARG_STRING) {\n      string_args[arg.key] = arg.value.string;\n    } else if (arg.type == GRPC_ARG_INTEGER) {\n      int_args[arg.key] = arg.value.integer;\n    }\n  }\n  EXPECT_THAT(string_args,\n              UnorderedElementsAre(Pair(\"grpc.ssl_target_name_override\", \"bar\"),\n                                   Pair(\"grpc.primary_user_agent\", HasSubstr(\"grpc-c++/\")),\n                                   Pair(\"grpc.default_authority\", \"foo\")));\n  EXPECT_THAT(int_args, UnorderedElementsAre(Pair(\"grpc.http2.max_ping_strikes\", 5),\n                                             Pair(\"grpc.http2.max_pings_without_data\", 3)));\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/grpc_client_integration.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\n// Support parameterizing over gRPC client type.\nenum class ClientType { EnvoyGrpc, GoogleGrpc };\n// Support parameterizing over state-of-the-world xDS vs delta xDS.\nenum class SotwOrDelta { Sotw, Delta };\n\nclass BaseGrpcClientIntegrationParamTest {\npublic:\n  virtual ~BaseGrpcClientIntegrationParamTest() = default;\n  virtual Network::Address::IpVersion ipVersion() const PURE;\n  virtual ClientType clientType() const PURE;\n\n  void setGrpcService(envoy::config::core::v3::GrpcService& grpc_service,\n                      const std::string& cluster_name,\n                      Network::Address::InstanceConstSharedPtr address) {\n    // Set a 5 minute timeout to avoid flakes. If this causes a real test timeout the test is\n    // broken and/or should be using simulated time.\n    grpc_service.mutable_timeout()->CopyFrom(Protobuf::util::TimeUtil::SecondsToDuration(300));\n    switch (clientType()) {\n    case ClientType::EnvoyGrpc:\n      grpc_service.mutable_envoy_grpc()->set_cluster_name(cluster_name);\n      break;\n    case ClientType::GoogleGrpc: {\n      auto* google_grpc = grpc_service.mutable_google_grpc();\n      google_grpc->set_target_uri(address->asString());\n      google_grpc->set_stat_prefix(cluster_name);\n      break;\n    }\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n};\n\nclass GrpcClientIntegrationParamTest\n    : public BaseGrpcClientIntegrationParamTest,\n      public testing::TestWithParam<std::tuple<Network::Address::IpVersion, ClientType>> {\npublic:\n  static std::string protocolTestParamsToString(\n      const ::testing::TestParamInfo<std::tuple<Network::Address::IpVersion, ClientType>>& p) {\n    return fmt::format(\"{}_{}\",\n                       std::get<0>(p.param) == Network::Address::IpVersion::v4 ? \"IPv4\" : \"IPv6\",\n                       std::get<1>(p.param) == ClientType::GoogleGrpc ? \"GoogleGrpc\" : \"EnvoyGrpc\");\n  }\n  Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); }\n  ClientType clientType() const override { return std::get<1>(GetParam()); }\n};\n\nclass VersionedGrpcClientIntegrationParamTest\n    : public BaseGrpcClientIntegrationParamTest,\n      public testing::TestWithParam<std::tuple<Network::Address::IpVersion, ClientType,\n                                               envoy::config::core::v3::ApiVersion>> {\npublic:\n  static std::string protocolTestParamsToString(\n      const ::testing::TestParamInfo<std::tuple<Network::Address::IpVersion, ClientType,\n                                                envoy::config::core::v3::ApiVersion>>& p) {\n    return fmt::format(\"{}_{}_{}\",\n                       std::get<0>(p.param) == Network::Address::IpVersion::v4 ? \"IPv4\" : \"IPv6\",\n                       std::get<1>(p.param) == ClientType::GoogleGrpc ? \"GoogleGrpc\" : \"EnvoyGrpc\",\n                       std::get<2>(p.param) == envoy::config::core::v3::ApiVersion::V3\n                           ? \"V3\"\n                           : envoy::config::core::v3::ApiVersion::V2 ? \"V2\" : \"AUTO\");\n  }\n  Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); }\n  ClientType clientType() const override { return std::get<1>(GetParam()); }\n  envoy::config::core::v3::ApiVersion apiVersion() const { return std::get<2>(GetParam()); }\n};\n\nclass DeltaSotwIntegrationParamTest\n    : public BaseGrpcClientIntegrationParamTest,\n      public testing::TestWithParam<\n          std::tuple<Network::Address::IpVersion, ClientType, SotwOrDelta>> {\npublic:\n  ~DeltaSotwIntegrationParamTest() override = default;\n  static std::string\n  protocolTestParamsToString(const ::testing::TestParamInfo<\n                             std::tuple<Network::Address::IpVersion, ClientType, SotwOrDelta>>& p) {\n    return fmt::format(\"{}_{}_{}\",\n                       std::get<0>(p.param) == Network::Address::IpVersion::v4 ? \"IPv4\" : \"IPv6\",\n                       std::get<1>(p.param) == ClientType::GoogleGrpc ? \"GoogleGrpc\" : \"EnvoyGrpc\",\n                       std::get<2>(p.param) == SotwOrDelta::Delta ? \"Delta\" : \"StateOfTheWorld\");\n  }\n  Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); }\n  ClientType clientType() const override { return std::get<1>(GetParam()); }\n  SotwOrDelta sotwOrDelta() const { return std::get<2>(GetParam()); }\n};\n\n// Skip tests based on gRPC client type.\n#define SKIP_IF_GRPC_CLIENT(client_type)                                                           \\\n  if (clientType() == (client_type)) {                                                             \\\n    return;                                                                                        \\\n  }\n\n// Skip tests based on xDS delta vs state-of-the-world.\n#define SKIP_IF_XDS_IS(xds)                                                                        \\\n  if (sotwOrDelta() == (xds)) {                                                                    \\\n    return;                                                                                        \\\n  }\n\n#ifdef ENVOY_GOOGLE_GRPC\n#define GRPC_CLIENT_INTEGRATION_PARAMS                                                             \\\n  testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),                     \\\n                   testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc))\n#define VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS                                                   \\\n  testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),                     \\\n                   testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc),     \\\n                   testing::Values(envoy::config::core::v3::ApiVersion::V3,                        \\\n                                   envoy::config::core::v3::ApiVersion::V2,                        \\\n                                   envoy::config::core::v3::ApiVersion::AUTO))\n#define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS                                                  \\\n  testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),                     \\\n                   testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc),     \\\n                   testing::Values(Grpc::SotwOrDelta::Sotw, Grpc::SotwOrDelta::Delta))\n#else\n#define GRPC_CLIENT_INTEGRATION_PARAMS                                                             \\\n  testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),                     \\\n                   testing::Values(Grpc::ClientType::EnvoyGrpc))\n#define VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS                                                   \\\n  testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),                     \\\n                   testing::Values(Grpc::ClientType::EnvoyGrpc),                                   \\\n                   testing::Values(envoy::config::core::v3::ApiVersion::V3,                        \\\n                                   envoy::config::core::v3::ApiVersion::V2,                        \\\n                                   envoy::config::core::v3::ApiVersion::AUTO))\n#define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS                                                  \\\n  testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),                     \\\n                   testing::Values(Grpc::ClientType::EnvoyGrpc),                                   \\\n                   testing::Values(Grpc::SotwOrDelta::Sotw, Grpc::SotwOrDelta::Delta))\n#endif // ENVOY_GOOGLE_GRPC\n\n} // namespace Grpc\n} // namespace Envoy"
  },
  {
    "path": "test/common/grpc/grpc_client_integration_test.cc",
    "content": "#ifdef ENVOY_GOOGLE_GRPC\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"common/grpc/google_async_client_impl.h\"\n\n#include \"extensions/grpc_credentials/well_known_names.h\"\n\n#endif\n\n#include \"test/common/grpc/grpc_client_integration_test_harness.h\"\n\nusing testing::Eq;\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\n// Parameterize the loopback test server socket address and gRPC client type.\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, GrpcClientIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS,\n                         GrpcClientIntegrationParamTest::protocolTestParamsToString);\n\n// Validate that a simple request-reply stream works.\nTEST_P(GrpcClientIntegrationTest, BasicStream) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest();\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendReply();\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a client destruction with open streams cleans up appropriately.\nTEST_P(GrpcClientIntegrationTest, ClientDestruct) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest();\n  grpc_client_.reset();\n}\n\n// Validate that a simple request-reply unary RPC works.\nTEST_P(GrpcClientIntegrationTest, BasicRequest) {\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that multiple streams work.\nTEST_P(GrpcClientIntegrationTest, MultiStream) {\n  initialize();\n  auto stream_0 = createStream(empty_metadata_);\n  auto stream_1 = createStream(empty_metadata_);\n  stream_0->sendRequest();\n  stream_1->sendRequest();\n  stream_0->sendServerInitialMetadata(empty_metadata_);\n  stream_0->sendReply();\n  stream_1->sendServerTrailers(Status::WellKnownGrpcStatus::Unavailable, \"\", empty_metadata_, true);\n  stream_0->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that multiple request-reply unary RPCs works.\nTEST_P(GrpcClientIntegrationTest, MultiRequest) {\n  initialize();\n  auto request_0 = createRequest(empty_metadata_);\n  auto request_1 = createRequest(empty_metadata_);\n  request_1->sendReply();\n  request_0->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a non-200 HTTP status results in the expected gRPC error.\nTEST_P(GrpcClientIntegrationTest, HttpNon200Status) {\n  initialize();\n  for (const auto http_response_status : {400, 401, 403, 404, 429, 431}) {\n    auto stream = createStream(empty_metadata_);\n    const Http::TestResponseHeaderMapImpl reply_headers{\n        {\":status\", std::to_string(http_response_status)}};\n    stream->expectInitialMetadata(empty_metadata_);\n    stream->expectTrailingMetadata(empty_metadata_);\n    // Technically this should be\n    // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md\n    // as given by Grpc::Utility::httpToGrpcStatus(), but the Google gRPC client treats\n    // this as WellKnownGrpcStatus::Canceled.\n    stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Canceled);\n    stream->fake_stream_->encodeHeaders(reply_headers, true);\n    dispatcher_helper_.runDispatcher();\n  }\n}\n\n// Validate that a non-200 HTTP status results in fallback to grpc-status.\nTEST_P(GrpcClientIntegrationTest, GrpcStatusFallback) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  const Http::TestResponseHeaderMapImpl reply_headers{\n      {\":status\", \"404\"},\n      {\"grpc-status\", std::to_string(enumToInt(Status::WellKnownGrpcStatus::PermissionDenied))},\n      {\"grpc-message\", \"error message\"}};\n  stream->expectInitialMetadata(empty_metadata_);\n  stream->expectTrailingMetadata(empty_metadata_);\n  stream->expectGrpcStatus(Status::WellKnownGrpcStatus::PermissionDenied);\n  stream->fake_stream_->encodeHeaders(reply_headers, true);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a HTTP-level reset is handled as an INTERNAL gRPC error.\nTEST_P(GrpcClientIntegrationTest, HttpReset) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n  stream->expectTrailingMetadata(empty_metadata_);\n  stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Internal);\n  stream->fake_stream_->encodeResetStream();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a reply with bad gRPC framing (compressed frames with Envoy\n// client) is handled as an INTERNAL gRPC error.\nTEST_P(GrpcClientIntegrationTest, BadReplyGrpcFraming) {\n  initialize();\n  // Only testing behavior of Envoy client, since Google client handles\n  // compressed frames.\n  SKIP_IF_GRPC_CLIENT(ClientType::GoogleGrpc);\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest();\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->expectTrailingMetadata(empty_metadata_);\n  stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Internal);\n  Buffer::OwnedImpl reply_buffer(\"\\xde\\xad\\xbe\\xef\\x00\", 5);\n  stream->fake_stream_->encodeData(reply_buffer, true);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that custom channel args can be set on the Google gRPC client.\n//\nTEST_P(GrpcClientIntegrationTest, CustomChannelArgs) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  channel_args_.emplace_back(\"grpc.primary_user_agent\", \"test_agent\");\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n  EXPECT_THAT(stream_headers_->get_(\"user-agent\"), testing::HasSubstr(\"test_agent\"));\n}\n\n// Validate that a reply with bad protobuf is handled as an INTERNAL gRPC error.\nTEST_P(GrpcClientIntegrationTest, BadReplyProtobuf) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest();\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->expectTrailingMetadata(empty_metadata_);\n  stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Internal);\n  Buffer::OwnedImpl reply_buffer(\"\\x00\\x00\\x00\\x00\\x02\\xff\\xff\", 7);\n  stream->fake_stream_->encodeData(reply_buffer, true);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a reply with bad protobuf is handled as an INTERNAL gRPC error.\nTEST_P(GrpcClientIntegrationTest, BadRequestReplyProtobuf) {\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->fake_stream_->startGrpcStream();\n  EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"0\")));\n  EXPECT_CALL(*request, onFailure(Status::Internal, \"\", _)).WillExitIfNeeded();\n  EXPECT_CALL(*request->child_span_, finishSpan());\n  dispatcher_helper_.setStreamEventPending();\n  Buffer::OwnedImpl reply_buffer(\"\\x00\\x00\\x00\\x00\\x02\\xff\\xff\", 7);\n  Common::prependGrpcFrameHeader(reply_buffer);\n  request->fake_stream_->encodeData(reply_buffer, false);\n  request->fake_stream_->finishGrpcStream(Grpc::Status::Ok);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that an out-of-range gRPC status is handled as an INVALID_CODE gRPC\n// error.\nTEST_P(GrpcClientIntegrationTest, OutOfRangeGrpcStatus) {\n  initialize();\n  // TODO(htuch): there is an UBSAN issue with Google gRPC client library\n  // handling of out-of-range status codes, see\n  // https://circleci.com/gh/envoyproxy/envoy/20234?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link\n  // Need to fix this issue upstream first.\n  SKIP_IF_GRPC_CLIENT(ClientType::GoogleGrpc);\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendReply();\n  EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded();\n  dispatcher_helper_.setStreamEventPending();\n  stream->expectGrpcStatus(Status::WellKnownGrpcStatus::InvalidCode);\n  const Http::TestResponseTrailerMapImpl reply_trailers{{\"grpc-status\", std::to_string(0x1337)}};\n  stream->fake_stream_->encodeTrailers(reply_trailers);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a missing gRPC status is handled as an UNKNOWN gRPC error.\nTEST_P(GrpcClientIntegrationTest, MissingGrpcStatus) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendReply();\n  EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded();\n  dispatcher_helper_.setStreamEventPending();\n  stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Unknown);\n  const Http::TestResponseTrailerMapImpl reply_trailers{{\"some\", \"other header\"}};\n  stream->fake_stream_->encodeTrailers(reply_trailers);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a reply terminated without trailers is handled as a gRPC error.\nTEST_P(GrpcClientIntegrationTest, ReplyNoTrailers) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest();\n  stream->sendServerInitialMetadata(empty_metadata_);\n  helloworld::HelloReply reply;\n  reply.set_message(HELLO_REPLY);\n  EXPECT_CALL(*stream, onReceiveMessage_(HelloworldReplyEq(HELLO_REPLY))).WillExitIfNeeded();\n  dispatcher_helper_.setStreamEventPending();\n  stream->expectTrailingMetadata(empty_metadata_);\n  stream->expectGrpcStatus(Status::WellKnownGrpcStatus::InvalidCode);\n  auto serialized_response = Grpc::Common::serializeToGrpcFrame(reply);\n  stream->fake_stream_->encodeData(*serialized_response, true);\n  stream->fake_stream_->encodeResetStream();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that sending client initial metadata works.\nTEST_P(GrpcClientIntegrationTest, StreamClientInitialMetadata) {\n  initialize();\n  const TestMetadata initial_metadata = {\n      {Http::LowerCaseString(\"foo\"), \"bar\"},\n      {Http::LowerCaseString(\"baz\"), \"blah\"},\n  };\n  auto stream = createStream(initial_metadata);\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", empty_metadata_, true);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that sending client initial metadata works.\nTEST_P(GrpcClientIntegrationTest, RequestClientInitialMetadata) {\n  initialize();\n  const TestMetadata initial_metadata = {\n      {Http::LowerCaseString(\"foo\"), \"bar\"},\n      {Http::LowerCaseString(\"baz\"), \"blah\"},\n  };\n  auto request = createRequest(initial_metadata);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that setting service-wide client initial metadata works.\nTEST_P(GrpcClientIntegrationTest, RequestServiceWideInitialMetadata) {\n  service_wide_initial_metadata_.emplace_back(Http::LowerCaseString(\"foo\"), \"bar\");\n  service_wide_initial_metadata_.emplace_back(Http::LowerCaseString(\"baz\"), \"blah\");\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that receiving server initial metadata works.\nTEST_P(GrpcClientIntegrationTest, ServerInitialMetadata) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest();\n  const TestMetadata initial_metadata = {\n      {Http::LowerCaseString(\"foo\"), \"bar\"},\n      {Http::LowerCaseString(\"baz\"), \"blah\"},\n      {Http::LowerCaseString(\"binary-bin\"), \"help\"},\n  };\n  stream->sendServerInitialMetadata(initial_metadata);\n  stream->sendReply();\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that receiving server trailing metadata works.\nTEST_P(GrpcClientIntegrationTest, ServerTrailingMetadata) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest();\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendReply();\n  const TestMetadata trailing_metadata = {\n      {Http::LowerCaseString(\"foo\"), \"bar\"},\n      {Http::LowerCaseString(\"baz\"), \"blah\"},\n  };\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", trailing_metadata);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a trailers-only response is handled for streams.\nTEST_P(GrpcClientIntegrationTest, StreamTrailersOnly) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", empty_metadata_, true);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a trailers-only response is handled for requests, where it is\n// an error.\nTEST_P(GrpcClientIntegrationTest, RequestTrailersOnly) {\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  const Http::TestResponseTrailerMapImpl reply_headers{{\":status\", \"200\"}, {\"grpc-status\", \"0\"}};\n  EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"0\")));\n  EXPECT_CALL(*request->child_span_,\n              setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*request, onFailure(Status::Internal, \"\", _)).WillExitIfNeeded();\n  dispatcher_helper_.setStreamEventPending();\n  EXPECT_CALL(*request->child_span_, finishSpan());\n  request->fake_stream_->encodeTrailers(reply_headers);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a trailers RESOURCE_EXHAUSTED reply is handled.\nTEST_P(GrpcClientIntegrationTest, ResourceExhaustedError) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendReply();\n  dispatcher_helper_.runDispatcher();\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::ResourceExhausted, \"error message\",\n                             empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a trailers Unauthenticated reply is handled.\nTEST_P(GrpcClientIntegrationTest, UnauthenticatedError) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Unauthenticated, \"error message\",\n                             empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a trailers reply is still handled even if a grpc status code larger than\n// MaximumKnown, is handled.\nTEST_P(GrpcClientIntegrationTest, MaximumKnownPlusOne) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendServerTrailers(\n      static_cast<Status::GrpcStatus>(Status::WellKnownGrpcStatus::MaximumKnown + 1),\n      \"error message\", empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that we can continue to receive after a local close.\nTEST_P(GrpcClientIntegrationTest, ReceiveAfterLocalClose) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendRequest(true);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendReply();\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that reset() doesn't explode on a half-closed stream (local).\nTEST_P(GrpcClientIntegrationTest, ResetAfterCloseLocal) {\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->grpc_stream_->closeStream();\n  ASSERT_TRUE(stream->fake_stream_->waitForEndStream(dispatcher_helper_.dispatcher_));\n  stream->grpc_stream_->resetStream();\n  dispatcher_helper_.dispatcher_.run(Event::Dispatcher::RunType::NonBlock);\n  ASSERT_TRUE(stream->fake_stream_->waitForReset());\n}\n\n// Validate that request cancel() works.\nTEST_P(GrpcClientIntegrationTest, CancelRequest) {\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  EXPECT_CALL(*request->child_span_,\n              setTag(Eq(Tracing::Tags::get().Status), Eq(Tracing::Tags::get().Canceled)));\n  EXPECT_CALL(*request->child_span_, finishSpan());\n  request->grpc_request_->cancel();\n  dispatcher_helper_.dispatcher_.run(Event::Dispatcher::RunType::NonBlock);\n  ASSERT_TRUE(request->fake_stream_->waitForReset());\n}\n\n// Parameterize the loopback test server socket address and gRPC client type.\nINSTANTIATE_TEST_SUITE_P(SslIpVersionsClientType, GrpcSslClientIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS,\n                         GrpcClientIntegrationParamTest::protocolTestParamsToString);\n\n// Validate that a simple request-reply unary RPC works with SSL.\nTEST_P(GrpcSslClientIntegrationTest, BasicSslRequest) {\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a simple request-reply unary RPC works with SSL + client certs.\nTEST_P(GrpcSslClientIntegrationTest, BasicSslRequestWithClientCert) {\n  use_client_cert_ = true;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n#ifdef ENVOY_GOOGLE_GRPC\n// AccessToken credential validation tests.\nclass GrpcAccessTokenClientIntegrationTest : public GrpcSslClientIntegrationTest {\npublic:\n  void expectExtraHeaders(FakeStream& fake_stream) override {\n    AssertionResult result = fake_stream.waitForHeadersComplete();\n    RELEASE_ASSERT(result, result.message());\n    std::vector<absl::string_view> auth_headers;\n    Http::HeaderUtility::getAllOfHeader(fake_stream.headers(), \"authorization\", auth_headers);\n    if (!access_token_value_.empty()) {\n      EXPECT_EQ(\"Bearer \" + access_token_value_, auth_headers[0]);\n    }\n    if (!access_token_value_2_.empty()) {\n      EXPECT_EQ(\"Bearer \" + access_token_value_2_, auth_headers[1]);\n    }\n  }\n\n  envoy::config::core::v3::GrpcService createGoogleGrpcConfig() override {\n    auto config = GrpcClientIntegrationTest::createGoogleGrpcConfig();\n    auto* google_grpc = config.mutable_google_grpc();\n    google_grpc->set_credentials_factory_name(credentials_factory_name_);\n    auto* ssl_creds = google_grpc->mutable_channel_credentials()->mutable_ssl_credentials();\n    ssl_creds->mutable_root_certs()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n    google_grpc->add_call_credentials()->set_access_token(access_token_value_);\n    if (!access_token_value_2_.empty()) {\n      google_grpc->add_call_credentials()->set_access_token(access_token_value_2_);\n    }\n    if (!refresh_token_value_.empty()) {\n      google_grpc->add_call_credentials()->set_google_refresh_token(refresh_token_value_);\n    }\n    return config;\n  }\n\n  std::string access_token_value_{};\n  std::string access_token_value_2_{};\n  std::string refresh_token_value_{};\n  std::string credentials_factory_name_{};\n};\n\n// Parameterize the loopback test server socket address and gRPC client type.\nINSTANTIATE_TEST_SUITE_P(SslIpVersionsClientType, GrpcAccessTokenClientIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS,\n                         GrpcClientIntegrationParamTest::protocolTestParamsToString);\n\n// Validate that a simple request-reply unary RPC works with AccessToken auth.\nTEST_P(GrpcAccessTokenClientIntegrationTest, AccessTokenAuthRequest) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  access_token_value_ = \"accesstokenvalue\";\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().AccessTokenExample;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that a simple request-reply stream RPC works with AccessToken auth..\nTEST_P(GrpcAccessTokenClientIntegrationTest, AccessTokenAuthStream) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  access_token_value_ = \"accesstokenvalue\";\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().AccessTokenExample;\n  initialize();\n  auto stream = createStream(empty_metadata_);\n  stream->sendServerInitialMetadata(empty_metadata_);\n  stream->sendRequest();\n  stream->sendReply();\n  stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, \"\", empty_metadata_);\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that multiple access tokens are accepted\nTEST_P(GrpcAccessTokenClientIntegrationTest, MultipleAccessTokens) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  access_token_value_ = \"accesstokenvalue\";\n  access_token_value_2_ = \"accesstokenvalue2\";\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().AccessTokenExample;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that extra params are accepted\nTEST_P(GrpcAccessTokenClientIntegrationTest, ExtraCredentialParams) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  access_token_value_ = \"accesstokenvalue\";\n  refresh_token_value_ = \"refreshtokenvalue\";\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().AccessTokenExample;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that no access token still works\nTEST_P(GrpcAccessTokenClientIntegrationTest, NoAccessTokens) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().AccessTokenExample;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that an unknown credentials factory name throws an EnvoyException\nTEST_P(GrpcAccessTokenClientIntegrationTest, InvalidCredentialFactory) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  credentials_factory_name_ = \"unknown\";\n  EXPECT_THROW_WITH_MESSAGE(initialize(), EnvoyException,\n                            \"Unknown google grpc credentials factory: unknown\");\n}\n\n#endif\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/grpc_client_integration_test_harness.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/grpc/async_client_impl.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/context_impl.h\"\n\n#ifdef ENVOY_GOOGLE_GRPC\n#include \"common/grpc/google_async_client_impl.h\"\n#endif\n\n#include \"common/http/async_client_impl.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/http2/conn_pool.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/network/connection_impl.h\"\n#include \"common/network/raw_buffer_socket.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/common/grpc/utility.h\"\n#include \"test/integration/fake_upstream.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n#include \"test/proto/helloworld.pb.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\nconst char HELLO_REQUEST[] = \"ABC\";\nconst char HELLO_REPLY[] = \"DEFG\";\n\nMATCHER_P(HelloworldReplyEq, rhs, \"\") { return arg.message() == rhs; }\n\nusing TestMetadata = std::vector<std::pair<Http::LowerCaseString, std::string>>;\n\n// Use in EXPECT_CALL(foo, bar(_)).WillExitIfNeeded() to exit dispatcher loop if\n// there are no longer any pending events in DispatcherHelper.\n#define WillExitIfNeeded()                                                                         \\\n  WillOnce(InvokeWithoutArgs([this] { dispatcher_helper_.exitDispatcherIfNeeded(); }))\n\n// Utility to assist with keeping track of pending gmock expected events when\n// deferring execution to the dispatcher. The dispatcher can be run using this\n// helper until all pending events are completed.\nclass DispatcherHelper {\npublic:\n  DispatcherHelper(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {}\n\n  void exitDispatcherIfNeeded() {\n    ENVOY_LOG_MISC(debug, \"Checking exit with {} events pending\", pending_stream_events_);\n    ASSERT(pending_stream_events_ > 0);\n    if (--pending_stream_events_ == 0) {\n      dispatcher_.exit();\n    }\n  }\n\n  void runDispatcher() {\n    ENVOY_LOG_MISC(debug, \"Run dispatcher with {} events pending\", pending_stream_events_);\n    if (pending_stream_events_ > 0) {\n      dispatcher_.run(Event::Dispatcher::RunType::Block);\n    }\n  }\n\n  void setStreamEventPending() {\n    ++pending_stream_events_;\n    ENVOY_LOG_MISC(debug, \"Set event pending, now {} events pending\", pending_stream_events_);\n  }\n\n  uint32_t pending_stream_events_{};\n  Event::Dispatcher& dispatcher_;\n};\n\n// Stream related test utilities.\nclass HelloworldStream : public MockAsyncStreamCallbacks<helloworld::HelloReply> {\npublic:\n  HelloworldStream(DispatcherHelper& dispatcher_helper) : dispatcher_helper_(dispatcher_helper) {}\n\n  void sendRequest(bool end_stream = false) {\n    helloworld::HelloRequest request_msg;\n    request_msg.set_name(HELLO_REQUEST);\n    grpc_stream_->sendMessage(request_msg, end_stream);\n\n    helloworld::HelloRequest received_msg;\n    AssertionResult result =\n        fake_stream_->waitForGrpcMessage(dispatcher_helper_.dispatcher_, received_msg);\n    RELEASE_ASSERT(result, result.message());\n    EXPECT_THAT(request_msg, ProtoEq(received_msg));\n  }\n\n  void expectInitialMetadata(const TestMetadata& metadata) {\n    EXPECT_CALL(*this, onReceiveInitialMetadata_(_))\n        .WillOnce(Invoke([this, &metadata](const Http::HeaderMap& received_headers) {\n          Http::TestResponseHeaderMapImpl stream_headers(received_headers);\n          for (const auto& value : metadata) {\n            EXPECT_EQ(value.second, stream_headers.get_(value.first));\n          }\n          dispatcher_helper_.exitDispatcherIfNeeded();\n        }));\n    dispatcher_helper_.setStreamEventPending();\n  }\n\n  void expectTrailingMetadata(const TestMetadata& metadata) {\n    EXPECT_CALL(*this, onReceiveTrailingMetadata_(_))\n        .WillOnce(Invoke([this, &metadata](const Http::HeaderMap& received_headers) {\n          Http::TestResponseTrailerMapImpl stream_headers(received_headers);\n          for (auto& value : metadata) {\n            EXPECT_EQ(value.second, stream_headers.get_(value.first));\n          }\n          dispatcher_helper_.exitDispatcherIfNeeded();\n        }));\n    dispatcher_helper_.setStreamEventPending();\n  }\n\n  void sendServerInitialMetadata(const TestMetadata& metadata) {\n    Http::HeaderMapPtr reply_headers{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n    for (auto& value : metadata) {\n      reply_headers->addReference(value.first, value.second);\n    }\n    expectInitialMetadata(metadata);\n    fake_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl(*reply_headers), false);\n  }\n\n  void sendReply() {\n    helloworld::HelloReply reply;\n    reply.set_message(HELLO_REPLY);\n    EXPECT_CALL(*this, onReceiveMessage_(HelloworldReplyEq(HELLO_REPLY))).WillExitIfNeeded();\n    dispatcher_helper_.setStreamEventPending();\n    fake_stream_->sendGrpcMessage<helloworld::HelloReply>(reply);\n  }\n\n  void expectGrpcStatus(Status::GrpcStatus grpc_status) {\n    if (grpc_status == Status::WellKnownGrpcStatus::InvalidCode) {\n      EXPECT_CALL(*this, onRemoteClose(_, _)).WillExitIfNeeded();\n    } else if (grpc_status > Status::WellKnownGrpcStatus::MaximumKnown) {\n      EXPECT_CALL(*this, onRemoteClose(Status::WellKnownGrpcStatus::InvalidCode, _))\n          .WillExitIfNeeded();\n    } else {\n      EXPECT_CALL(*this, onRemoteClose(grpc_status, _)).WillExitIfNeeded();\n    }\n    dispatcher_helper_.setStreamEventPending();\n  }\n\n  void sendServerTrailers(Status::GrpcStatus grpc_status, const std::string& grpc_message,\n                          const TestMetadata& metadata, bool trailers_only = false) {\n    Http::TestResponseTrailerMapImpl reply_trailers{\n        {\"grpc-status\", std::to_string(enumToInt(grpc_status))}};\n    if (!grpc_message.empty()) {\n      reply_trailers.addCopy(\"grpc-message\", grpc_message);\n    }\n    if (trailers_only) {\n      reply_trailers.addCopy(\":status\", \"200\");\n    }\n    for (const auto& value : metadata) {\n      reply_trailers.addCopy(value.first, value.second);\n    }\n    if (trailers_only) {\n      expectInitialMetadata(empty_metadata_);\n    }\n    expectTrailingMetadata(metadata);\n    expectGrpcStatus(grpc_status);\n    if (trailers_only) {\n      fake_stream_->encodeHeaders(reply_trailers, true);\n    } else {\n      fake_stream_->encodeTrailers(reply_trailers);\n    }\n  }\n\n  void closeStream() {\n    grpc_stream_->closeStream();\n    AssertionResult result = fake_stream_->waitForEndStream(dispatcher_helper_.dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n  }\n\n  DispatcherHelper& dispatcher_helper_;\n  FakeStream* fake_stream_{};\n  AsyncStream<helloworld::HelloRequest> grpc_stream_{};\n  const TestMetadata empty_metadata_;\n};\n\nusing HelloworldStreamPtr = std::unique_ptr<HelloworldStream>;\n\n// Request related test utilities.\nclass HelloworldRequest : public MockAsyncRequestCallbacks<helloworld::HelloReply> {\npublic:\n  HelloworldRequest(DispatcherHelper& dispatcher_helper) : dispatcher_helper_(dispatcher_helper) {}\n\n  void sendReply() {\n    fake_stream_->startGrpcStream();\n    helloworld::HelloReply reply;\n    reply.set_message(HELLO_REPLY);\n    EXPECT_CALL(*child_span_, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"0\")));\n    EXPECT_CALL(*this, onSuccess_(HelloworldReplyEq(HELLO_REPLY), _)).WillExitIfNeeded();\n    EXPECT_CALL(*child_span_, finishSpan());\n    dispatcher_helper_.setStreamEventPending();\n    fake_stream_->sendGrpcMessage(reply);\n    fake_stream_->finishGrpcStream(Grpc::Status::Ok);\n  }\n\n  DispatcherHelper& dispatcher_helper_;\n  FakeStream* fake_stream_{};\n  AsyncRequest* grpc_request_{};\n  Tracing::MockSpan* child_span_{new Tracing::MockSpan()};\n};\n\nusing HelloworldRequestPtr = std::unique_ptr<HelloworldRequest>;\n\nclass GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest {\npublic:\n  GrpcClientIntegrationTest()\n      : method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName(\"SayHello\")),\n        api_(Api::createApiForTest(*stats_store_, test_time_.timeSystem())),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        http_context_(stats_store_->symbolTable()) {}\n\n  virtual void initialize() {\n    if (fake_upstream_ == nullptr) {\n      fake_upstream_ = std::make_unique<FakeUpstream>(0, FakeHttpConnection::Type::HTTP2,\n                                                      ipVersion(), test_time_.timeSystem());\n    }\n    switch (clientType()) {\n    case ClientType::EnvoyGrpc:\n      grpc_client_ = createAsyncClientImpl();\n      break;\n    case ClientType::GoogleGrpc: {\n      grpc_client_ = createGoogleAsyncClientImpl();\n      break;\n    }\n    }\n    // Setup a test timeout (also needed to maintain an active event in the dispatcher so that\n    // .run() will block until timeout rather than exit immediately).\n    timeout_timer_ = dispatcher_->createTimer([this] {\n      FAIL() << \"Test timeout\";\n      dispatcher_->exit();\n    });\n    timeout_timer_->enableTimer(std::chrono::milliseconds(10000));\n  }\n\n  void TearDown() override {\n    if (fake_connection_) {\n      AssertionResult result = fake_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n      fake_connection_.reset();\n    }\n  }\n\n  void fillServiceWideInitialMetadata(envoy::config::core::v3::GrpcService& config) {\n    for (const auto& item : service_wide_initial_metadata_) {\n      auto* header_value = config.add_initial_metadata();\n      header_value->set_key(item.first.get());\n      header_value->set_value(item.second);\n    }\n  }\n\n  // Create a Grpc::AsyncClientImpl instance backed by enough fake/mock\n  // infrastructure to initiate a loopback TCP connection to fake_upstream_.\n  RawAsyncClientPtr createAsyncClientImpl() {\n    client_connection_ = std::make_unique<Network::ClientConnectionImpl>(\n        *dispatcher_, fake_upstream_->localAddress(), nullptr,\n        std::move(async_client_transport_socket_), nullptr);\n    ON_CALL(*mock_cluster_info_, connectTimeout())\n        .WillByDefault(Return(std::chrono::milliseconds(10000)));\n    EXPECT_CALL(*mock_cluster_info_, name()).WillRepeatedly(ReturnRef(fake_cluster_name_));\n    EXPECT_CALL(cm_, get(_)).WillRepeatedly(Return(&thread_local_cluster_));\n    EXPECT_CALL(thread_local_cluster_, info()).WillRepeatedly(Return(cluster_info_ptr_));\n    Upstream::MockHost::MockCreateConnectionData connection_data{client_connection_.release(),\n                                                                 host_description_ptr_};\n    EXPECT_CALL(*mock_host_, createConnection_(_, _)).WillRepeatedly(Return(connection_data));\n    EXPECT_CALL(*mock_host_, cluster()).WillRepeatedly(ReturnRef(*cluster_info_ptr_));\n    EXPECT_CALL(*mock_host_description_, locality()).WillRepeatedly(ReturnRef(host_locality_));\n    http_conn_pool_ = std::make_unique<Http::Http2::ProdConnPoolImpl>(\n        *dispatcher_, random_, host_ptr_, Upstream::ResourcePriority::Default, nullptr, nullptr);\n    EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n        .WillRepeatedly(Return(http_conn_pool_.get()));\n    http_async_client_ = std::make_unique<Http::AsyncClientImpl>(\n        cluster_info_ptr_, *stats_store_, *dispatcher_, local_info_, cm_, runtime_, random_,\n        std::move(shadow_writer_ptr_), http_context_);\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(fake_cluster_name_))\n        .WillRepeatedly(ReturnRef(*http_async_client_));\n    EXPECT_CALL(cm_, get(Eq(fake_cluster_name_))).WillRepeatedly(Return(&thread_local_cluster_));\n    envoy::config::core::v3::GrpcService config;\n    config.mutable_envoy_grpc()->set_cluster_name(fake_cluster_name_);\n    fillServiceWideInitialMetadata(config);\n    return std::make_unique<AsyncClientImpl>(cm_, config, dispatcher_->timeSource());\n  }\n\n  virtual envoy::config::core::v3::GrpcService createGoogleGrpcConfig() {\n    envoy::config::core::v3::GrpcService config;\n    auto* google_grpc = config.mutable_google_grpc();\n    google_grpc->set_target_uri(fake_upstream_->localAddress()->asString());\n    google_grpc->set_stat_prefix(\"fake_cluster\");\n    for (const auto& config_arg : channel_args_) {\n      (*google_grpc->mutable_channel_args()->mutable_args())[config_arg.first].set_string_value(\n          config_arg.second);\n    }\n    fillServiceWideInitialMetadata(config);\n    return config;\n  }\n\n  RawAsyncClientPtr createGoogleAsyncClientImpl() {\n#ifdef ENVOY_GOOGLE_GRPC\n    google_tls_ = std::make_unique<GoogleAsyncClientThreadLocal>(*api_);\n    GoogleGenericStubFactory stub_factory;\n    return std::make_unique<GoogleAsyncClientImpl>(*dispatcher_, *google_tls_, stub_factory,\n                                                   stats_scope_, createGoogleGrpcConfig(), *api_,\n                                                   google_grpc_stat_names_);\n#else\n    NOT_REACHED_GCOVR_EXCL_LINE;\n#endif\n  }\n\n  void expectInitialHeaders(FakeStream& fake_stream, const TestMetadata& initial_metadata) {\n    AssertionResult result = fake_stream.waitForHeadersComplete();\n    RELEASE_ASSERT(result, result.message());\n    stream_headers_ = std::make_unique<Http::TestRequestHeaderMapImpl>(fake_stream.headers());\n    EXPECT_EQ(\"POST\", stream_headers_->get_(\":method\"));\n    EXPECT_EQ(\"/helloworld.Greeter/SayHello\", stream_headers_->get_(\":path\"));\n    EXPECT_EQ(\"application/grpc\", stream_headers_->get_(\"content-type\"));\n    EXPECT_EQ(\"trailers\", stream_headers_->get_(\"te\"));\n    for (const auto& value : initial_metadata) {\n      EXPECT_EQ(value.second, stream_headers_->get_(value.first));\n    }\n    for (const auto& value : service_wide_initial_metadata_) {\n      EXPECT_EQ(value.second, stream_headers_->get_(value.first));\n    }\n  }\n\n  virtual void expectExtraHeaders(FakeStream&) {}\n\n  HelloworldRequestPtr createRequest(const TestMetadata& initial_metadata) {\n    auto request = std::make_unique<HelloworldRequest>(dispatcher_helper_);\n    EXPECT_CALL(*request, onCreateInitialMetadata(_))\n        .WillOnce(Invoke([&initial_metadata](Http::HeaderMap& headers) {\n          for (const auto& value : initial_metadata) {\n            headers.addReference(value.first, value.second);\n          }\n        }));\n    helloworld::HelloRequest request_msg;\n    request_msg.set_name(HELLO_REQUEST);\n\n    Tracing::MockSpan active_span;\n    EXPECT_CALL(active_span, spawnChild_(_, \"async fake_cluster egress\", _))\n        .WillOnce(Return(request->child_span_));\n    EXPECT_CALL(*request->child_span_,\n                setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(fake_cluster_name_)));\n    EXPECT_CALL(*request->child_span_,\n                setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n    EXPECT_CALL(*request->child_span_, injectContext(_));\n\n    request->grpc_request_ = grpc_client_->send(*method_descriptor_, request_msg, *request,\n                                                active_span, Http::AsyncClient::RequestOptions());\n    EXPECT_NE(request->grpc_request_, nullptr);\n\n    if (!fake_connection_) {\n      AssertionResult result =\n          fake_upstream_->waitForHttpConnection(*dispatcher_, fake_connection_);\n      RELEASE_ASSERT(result, result.message());\n    }\n    fake_streams_.emplace_back();\n    AssertionResult result = fake_connection_->waitForNewStream(*dispatcher_, fake_streams_.back());\n    RELEASE_ASSERT(result, result.message());\n    auto& fake_stream = *fake_streams_.back();\n    request->fake_stream_ = &fake_stream;\n\n    expectInitialHeaders(fake_stream, initial_metadata);\n    expectExtraHeaders(fake_stream);\n\n    helloworld::HelloRequest received_msg;\n    result = fake_stream.waitForGrpcMessage(*dispatcher_, received_msg);\n    RELEASE_ASSERT(result, result.message());\n    EXPECT_THAT(request_msg, ProtoEq(received_msg));\n\n    return request;\n  }\n\n  HelloworldStreamPtr createStream(const TestMetadata& initial_metadata) {\n    auto stream = std::make_unique<HelloworldStream>(dispatcher_helper_);\n    EXPECT_CALL(*stream, onCreateInitialMetadata(_))\n        .WillOnce(Invoke([&initial_metadata](Http::HeaderMap& headers) {\n          for (const auto& value : initial_metadata) {\n            headers.addReference(value.first, value.second);\n          }\n        }));\n\n    stream->grpc_stream_ =\n        grpc_client_->start(*method_descriptor_, *stream, Http::AsyncClient::StreamOptions());\n    EXPECT_NE(stream->grpc_stream_, nullptr);\n\n    if (!fake_connection_) {\n      AssertionResult result =\n          fake_upstream_->waitForHttpConnection(*dispatcher_, fake_connection_);\n      RELEASE_ASSERT(result, result.message());\n    }\n    fake_streams_.emplace_back();\n    AssertionResult result = fake_connection_->waitForNewStream(*dispatcher_, fake_streams_.back());\n    RELEASE_ASSERT(result, result.message());\n    auto& fake_stream = *fake_streams_.back();\n    stream->fake_stream_ = &fake_stream;\n\n    expectInitialHeaders(fake_stream, initial_metadata);\n    expectExtraHeaders(fake_stream);\n\n    return stream;\n  }\n\n  DangerousDeprecatedTestTime test_time_;\n  std::unique_ptr<FakeUpstream> fake_upstream_;\n  FakeHttpConnectionPtr fake_connection_;\n  std::vector<FakeStreamPtr> fake_streams_;\n  const Protobuf::MethodDescriptor* method_descriptor_;\n  Stats::TestSymbolTable symbol_table_;\n  Stats::IsolatedStoreImpl* stats_store_ = new Stats::IsolatedStoreImpl(*symbol_table_);\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  DispatcherHelper dispatcher_helper_{*dispatcher_};\n  Stats::ScopeSharedPtr stats_scope_{stats_store_};\n  Grpc::StatNames google_grpc_stat_names_{stats_store_->symbolTable()};\n  TestMetadata service_wide_initial_metadata_;\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> stream_headers_;\n  std::vector<std::pair<std::string, std::string>> channel_args_;\n#ifdef ENVOY_GOOGLE_GRPC\n  GoogleAsyncClientThreadLocalPtr google_tls_;\n#endif\n  AsyncClient<helloworld::HelloRequest, helloworld::HelloReply> grpc_client_;\n  Event::TimerPtr timeout_timer_;\n  const TestMetadata empty_metadata_;\n\n  // Fake/mock infrastructure for Grpc::AsyncClientImpl upstream.\n  Network::TransportSocketPtr async_client_transport_socket_{new Network::RawBufferSocket()};\n  const std::string fake_cluster_name_{\"fake_cluster\"};\n  Upstream::MockClusterManager cm_;\n  Upstream::MockClusterInfo* mock_cluster_info_ = new NiceMock<Upstream::MockClusterInfo>();\n  Upstream::ClusterInfoConstSharedPtr cluster_info_ptr_{mock_cluster_info_};\n  Upstream::MockThreadLocalCluster thread_local_cluster_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Runtime::MockLoader runtime_;\n  Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{test_time_.timeSystem()};\n  NiceMock<Random::MockRandomGenerator> random_;\n  Http::AsyncClientPtr http_async_client_;\n  Http::ConnectionPool::InstancePtr http_conn_pool_;\n  Http::ContextImpl http_context_;\n  envoy::config::core::v3::Locality host_locality_;\n  Upstream::MockHost* mock_host_ = new NiceMock<Upstream::MockHost>();\n  Upstream::MockHostDescription* mock_host_description_ =\n      new NiceMock<Upstream::MockHostDescription>();\n  Upstream::HostDescriptionConstSharedPtr host_description_ptr_{mock_host_description_};\n  Upstream::HostConstSharedPtr host_ptr_{mock_host_};\n  Router::MockShadowWriter* mock_shadow_writer_ = new Router::MockShadowWriter();\n  Router::ShadowWriterPtr shadow_writer_ptr_{mock_shadow_writer_};\n  Network::ClientConnectionPtr client_connection_;\n};\n\n// SSL connection credential validation tests.\nclass GrpcSslClientIntegrationTest : public GrpcClientIntegrationTest {\npublic:\n  GrpcSslClientIntegrationTest() {\n    ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_));\n  }\n  void TearDown() override {\n    // Reset some state in the superclass before we destruct context_manager_ in our destructor, it\n    // doesn't like dangling contexts at destruction.\n    GrpcClientIntegrationTest::TearDown();\n    fake_upstream_.reset();\n    async_client_transport_socket_.reset();\n    client_connection_.reset();\n  }\n\n  envoy::config::core::v3::GrpcService createGoogleGrpcConfig() override {\n    auto config = GrpcClientIntegrationTest::createGoogleGrpcConfig();\n    TestUtility::setTestSslGoogleGrpcConfig(config, use_client_cert_);\n    return config;\n  }\n\n  void initialize() override {\n    envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n    auto* common_tls_context = tls_context.mutable_common_tls_context();\n    auto* validation_context = common_tls_context->mutable_validation_context();\n    validation_context->mutable_trusted_ca()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n    if (use_client_cert_) {\n      auto* tls_cert = common_tls_context->add_tls_certificates();\n      tls_cert->mutable_certificate_chain()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"));\n      tls_cert->mutable_private_key()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n    }\n    auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ClientContextConfigImpl>(\n        tls_context, factory_context_);\n\n    mock_host_description_->socket_factory_ =\n        std::make_unique<Extensions::TransportSockets::Tls::ClientSslSocketFactory>(\n            std::move(cfg), context_manager_, *stats_store_);\n    async_client_transport_socket_ =\n        mock_host_description_->socket_factory_->createTransportSocket(nullptr);\n    fake_upstream_ = std::make_unique<FakeUpstream>(createUpstreamSslContext(), 0,\n                                                    FakeHttpConnection::Type::HTTP2, ipVersion(),\n                                                    test_time_.timeSystem());\n\n    GrpcClientIntegrationTest::initialize();\n  }\n\n  Network::TransportSocketFactoryPtr createUpstreamSslContext() {\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    auto* common_tls_context = tls_context.mutable_common_tls_context();\n    common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2);\n    auto* tls_cert = common_tls_context->add_tls_certificates();\n    tls_cert->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcert.pem\"));\n    tls_cert->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamkey.pem\"));\n    if (use_client_cert_) {\n      tls_context.mutable_require_client_certificate()->set_value(true);\n      auto* validation_context = common_tls_context->mutable_validation_context();\n      validation_context->mutable_trusted_ca()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n    }\n\n    auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n        tls_context, factory_context_);\n\n    static Stats::Scope* upstream_stats_store = new Stats::IsolatedStoreImpl();\n    return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n        std::move(cfg), context_manager_, *upstream_stats_store, std::vector<std::string>{});\n  }\n\n  bool use_client_cert_{};\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context_;\n};\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/grpc/service_key.json",
    "content": "{\n  \"type\": \"service_account\",\n  \"project_id\": \"teset-project\",\n  \"private_key_id\": \"xxx\",\n  \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nspUMkfFsoTfa\\n-----END PRIVATE KEY-----\\n\",\n  \"client_email\": \"test@test.iam.gserviceaccount.com\",\n  \"client_id\": \"42\",\n  \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n  \"token_uri\": \"https://oauth2.googleapis.com/token\",\n  \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n  \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/test%test-dev.iam.gserviceaccount.com\"\n}\n"
  },
  {
    "path": "test/common/grpc/utility.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n\n#include \"test/test_common/environment.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass TestUtility {\npublic:\n  static void setTestSslGoogleGrpcConfig(envoy::config::core::v3::GrpcService& config,\n                                         bool use_client_cert) {\n    auto* google_grpc = config.mutable_google_grpc();\n    auto* ssl_creds = google_grpc->mutable_channel_credentials()->mutable_ssl_credentials();\n    ssl_creds->mutable_root_certs()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n    if (use_client_cert) {\n      ssl_creds->mutable_private_key()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n      ssl_creds->mutable_cert_chain()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"));\n    }\n  }\n};\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/html/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/common/html:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/html/utility_test.cc",
    "content": "#include \"common/html/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Html {\nnamespace {\n\nTEST(HttpUtility, SanitizeHtml) {\n  EXPECT_EQ(\"simple text, no cares/worries\", Utility::sanitize(\"simple text, no cares/worries\"));\n  EXPECT_EQ(\"a&amp;b\", Utility::sanitize(\"a&b\"));\n  EXPECT_EQ(\"&lt;script&gt;\", Utility::sanitize(\"<script>\"));\n}\n\n} // namespace\n} // namespace Html\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"async_client_impl_test\",\n    srcs = [\"async_client_impl_test.cc\"],\n    deps = [\n        \":common_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:async_client_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/upstreams/http/generic:config\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:test_time_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"async_client_utility_test\",\n    srcs = [\"async_client_utility_test.cc\"],\n    deps = [\n        \"//source/common/http:async_client_utility_lib\",\n        \"//test/mocks/http:http_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"codec_client_test\",\n    srcs = [\"codec_client_test.cc\"],\n    deps = [\n        \":common_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/http:exception_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"codec_impl_fuzz_proto\",\n    srcs = [\"codec_impl_fuzz.proto\"],\n    deps = [\"//test/fuzz:common_proto\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"codec_impl_fuzz_test\",\n    srcs = [\"codec_impl_fuzz_test.cc\"],\n    corpus = \"codec_impl_corpus\",\n    deps = [\n        \":codec_impl_fuzz_proto_cc_proto\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http/http1:codec_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//test/common/http/http2:codec_impl_test_util\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"filter_manager_test\",\n    srcs = [\"filter_manager_test.cc\"],\n    deps = [\n        \"//source/common/http:filter_manager_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_reply:local_reply_mocks\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"codec_wrappers_test\",\n    srcs = [\"codec_wrappers_test.cc\"],\n    deps = [\n        \"//source/common/http:codec_wrappers_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"codes_test\",\n    srcs = [\"codes_test.cc\"],\n    deps = [\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:codes_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"codes_speed_test\",\n    srcs = [\"codes_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/http:codes_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"codes_speed_test_benchmark_test\",\n    benchmark_binary = \"codes_speed_test\",\n)\n\nenvoy_cc_test_library(\n    name = \"common_lib\",\n    srcs = [\"common.cc\"],\n    hdrs = [\"common.h\"],\n    deps = [\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/http:codec_client_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/event:event_mocks\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"conn_manager_impl_fuzz_proto\",\n    srcs = [\"conn_manager_impl_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"conn_manager_impl_fuzz_test\",\n    srcs = [\"conn_manager_impl_fuzz_test.cc\"],\n    corpus = \"conn_manager_impl_corpus\",\n    deps = [\n        \":conn_manager_impl_fuzz_proto_cc_proto\",\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:conn_manager_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/http:date_provider_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/http:request_id_extension_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/server:drain_manager_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"conn_manager_impl_test\",\n    srcs = [\n        # Split to avoid compiler OOM, especially on ASAN.\n        \"conn_manager_impl_test.cc\",\n        \"conn_manager_impl_test_2.cc\",\n        \"conn_manager_impl_test_base.cc\",\n        \"conn_manager_impl_test_base.h\",\n    ],\n    shard_count = 3,\n    deps = [\n        \"//source/common/http:conn_manager_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/extensions/access_loggers/file:file_access_log_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"conn_manager_utility_test\",\n    srcs = [\"conn_manager_utility_test.cc\"],\n    deps = [\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:conn_manager_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:request_id_extension_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"date_provider_impl_test\",\n    srcs = [\"date_provider_impl_test.cc\"],\n    deps = [\n        \"//source/common/http:date_provider_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"header_map_impl_test\",\n    srcs = [\"header_map_impl_test.cc\"],\n    deps = [\n        \"//source/common/http:header_list_view_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"header_map_impl_speed_test\",\n    srcs = [\"header_map_impl_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"header_map_impl_speed_test_benchmark_test\",\n    benchmark_binary = \"header_map_impl_speed_test\",\n)\n\nenvoy_proto_library(\n    name = \"header_map_impl_fuzz_proto\",\n    srcs = [\"header_map_impl_fuzz.proto\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"header_map_impl_fuzz_test\",\n    srcs = [\"header_map_impl_fuzz_test.cc\"],\n    corpus = \"header_map_impl_corpus\",\n    deps = [\n        \":header_map_impl_fuzz_proto_cc_proto\",\n        \"//source/common/http:header_map_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"header_utility_test\",\n    srcs = [\"header_utility_test.cc\"],\n    deps = [\n        \"//source/common/http:header_utility_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"user_agent_test\",\n    srcs = [\"user_agent_test.cc\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:user_agent_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"utility_fuzz_proto\",\n    srcs = [\"utility_fuzz.proto\"],\n    deps = [\"@envoy_api//envoy/config/core/v3:pkg\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"utility_fuzz_test\",\n    srcs = [\"utility_fuzz_test.cc\"],\n    corpus = \"utility_corpus\",\n    deps = [\n        \":utility_fuzz_proto_cc_proto\",\n        \"//source/common/http:utility_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/common/http:exception_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"path_utility_test\",\n    srcs = [\"path_utility_test.cc\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:path_utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"request_id_extension_uuid_impl_test\",\n    srcs = [\"request_id_extension_uuid_impl_test.cc\"],\n    deps = [\n        \"//source/common/http:request_id_extension_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"status_test\",\n    srcs = [\"status_test.cc\"],\n    deps = [\n        \"//source/common/http:status_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"path_utility_fuzz_proto\",\n    srcs = [\"path_utility_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"path_utility_fuzz_test\",\n    srcs = [\"path_utility_fuzz_test.cc\"],\n    corpus = \"path_utility_corpus\",\n    deps = [\n        \":path_utility_fuzz_proto_cc_proto\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:path_utility_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/http/async_client_impl_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/async_client_impl.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\nnamespace {\n\nclass AsyncClientImplTest : public testing::Test {\npublic:\n  AsyncClientImplTest()\n      : http_context_(stats_store_.symbolTable()),\n        client_(cm_.thread_local_cluster_.cluster_.info_, stats_store_, dispatcher_, local_info_,\n                cm_, runtime_, random_,\n                Router::ShadowWriterPtr{new NiceMock<Router::MockShadowWriter>()}, http_context_) {\n    message_->headers().setMethod(\"GET\");\n    message_->headers().setHost(\"host\");\n    message_->headers().setPath(\"/\");\n    ON_CALL(*cm_.conn_pool_.host_, locality())\n        .WillByDefault(ReturnRef(envoy::config::core::v3::Locality().default_instance()));\n  }\n\n  virtual void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) {\n    EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n    EXPECT_CALL(callbacks_, onSuccess_(_, _))\n        .WillOnce(Invoke([sent_request, code](const AsyncClient::Request& request,\n                                              ResponseMessage* response) -> void {\n          // Verify that callback is called with the same request handle as returned by\n          // AsyncClient::send().\n          EXPECT_EQ(sent_request, &request);\n          EXPECT_EQ(code, Utility::getResponseStatus(response->headers()));\n        }));\n  }\n\n  void expectResponseHeaders(MockAsyncClientStreamCallbacks& callbacks, uint64_t code,\n                             bool end_stream) {\n    EXPECT_CALL(callbacks, onHeaders_(_, end_stream))\n        .WillOnce(Invoke([code](ResponseHeaderMap& headers, bool) -> void {\n          EXPECT_EQ(std::to_string(code), headers.getStatusValue());\n        }));\n  }\n\n  RequestMessagePtr message_{new RequestMessageImpl()};\n  Stats::MockIsolatedStatsStore stats_store_;\n  MockAsyncClientCallbacks callbacks_;\n  MockAsyncClientStreamCallbacks stream_callbacks_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<MockRequestEncoder> stream_encoder_;\n  ResponseDecoder* response_decoder_{};\n  NiceMock<Event::MockTimer>* timer_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Http::ContextImpl http_context_;\n  AsyncClientImpl client_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n};\n\nclass AsyncClientImplTracingTest : public AsyncClientImplTest {\npublic:\n  Tracing::MockSpan parent_span_;\n  const std::string child_span_name_{\"Test Child Span Name\"};\n\n  void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) override {\n    EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _))\n        .WillOnce(Invoke([](Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) {\n          span.setTag(\"onBeforeFinalizeUpstreamSpan\", \"called\");\n          ASSERT_NE(nullptr, response_headers);\n        }));\n    EXPECT_CALL(callbacks_, onSuccess_(_, _))\n        .WillOnce(Invoke([sent_request, code](const AsyncClient::Request& request,\n                                              ResponseMessage* response) -> void {\n          // Verify that callback is called with the same request handle as returned by\n          // AsyncClient::send().\n          EXPECT_EQ(sent_request, &request);\n          EXPECT_EQ(code, Utility::getResponseStatus(response->headers()));\n        }));\n  }\n};\n\nTEST_F(AsyncClientImplTest, BasicStream) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.addCopy(\"x-envoy-internal\", \"true\");\n  headers.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  headers.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), true));\n\n  expectResponseHeaders(stream_callbacks_, 200, false);\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, true);\n\n  response_decoder_->decode100ContinueHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"100\"}}));\n  response_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"200\"}}), false);\n  response_decoder_->decodeData(*body, true);\n\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_200\").value());\n  EXPECT_EQ(1UL, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                     .counter(\"internal.upstream_rq_200\")\n                     .value());\n}\n\nTEST_F(AsyncClientImplTest, Basic) {\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl copy(message_->headers());\n  copy.addCopy(\"x-envoy-internal\", \"true\");\n  copy.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  copy.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&copy), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true));\n\n  auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 200);\n\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  response_decoder_->decodeData(data, true);\n\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_200\").value());\n  EXPECT_EQ(1UL, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                     .counter(\"internal.upstream_rq_200\")\n                     .value());\n}\n\nTEST_F(AsyncClientImplTracingTest, Basic) {\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl copy(message_->headers());\n  copy.addCopy(\"x-envoy-internal\", \"true\");\n  copy.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  copy.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(parent_span_, spawnChild_(_, \"async fake_cluster egress\", _))\n      .WillOnce(Return(child_span));\n\n  AsyncClient::RequestOptions options = AsyncClient::RequestOptions().setParentSpan(parent_span_);\n  EXPECT_CALL(*child_span, setSampled(true));\n  EXPECT_CALL(*child_span, injectContext(_));\n\n  auto* request = client_.send(std::move(message_), callbacks_, options);\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 200);\n\n  EXPECT_CALL(*child_span, setTag(Eq(\"onBeforeFinalizeUpstreamSpan\"), Eq(\"called\")));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.1\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.1:443\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(*child_span, finishSpan());\n\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  response_decoder_->decodeData(data, true);\n}\n\nTEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) {\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl copy(message_->headers());\n  copy.addCopy(\"x-envoy-internal\", \"true\");\n  copy.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  copy.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(parent_span_, spawnChild_(_, child_span_name_, _)).WillOnce(Return(child_span));\n\n  AsyncClient::RequestOptions options = AsyncClient::RequestOptions()\n                                            .setParentSpan(parent_span_)\n                                            .setChildSpanName(child_span_name_)\n                                            .setSampled(false);\n  EXPECT_CALL(*child_span, setSampled(false));\n  EXPECT_CALL(*child_span, injectContext(_));\n\n  auto* request = client_.send(std::move(message_), callbacks_, options);\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 200);\n\n  EXPECT_CALL(*child_span, setTag(Eq(\"onBeforeFinalizeUpstreamSpan\"), Eq(\"called\")));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.1\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.1:443\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(*child_span, finishSpan());\n\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  response_decoder_->decodeData(data, true);\n}\n\nTEST_F(AsyncClientImplTest, BasicHashPolicy) {\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, auto,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            // this is the hash of :path header value \"/\"\n            EXPECT_EQ(16761507700594825962UL, context->computeHashKey().value());\n            return &cm_.conn_pool_;\n          }));\n\n  TestRequestHeaderMapImpl copy(message_->headers());\n  copy.addCopy(\"x-envoy-internal\", \"true\");\n  copy.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  copy.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&copy), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true));\n\n  AsyncClient::RequestOptions options;\n  Protobuf::RepeatedPtrField<envoy::config::route::v3::RouteAction::HashPolicy> hash_policy;\n  hash_policy.Add()->mutable_header()->set_header_name(\":path\");\n  options.setHashPolicy(hash_policy);\n\n  auto* request = client_.send(std::move(message_), callbacks_, options);\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 200);\n\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  response_decoder_->decodeData(data, true);\n}\n\nTEST_F(AsyncClientImplTest, Retry) {\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"upstream.use_retry\", 100))\n      .WillByDefault(Return(true));\n  RequestMessage* message_copy = message_.get();\n\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true));\n\n  message_->headers().setReferenceEnvoyRetryOn(Headers::get().EnvoyRetryOnValues._5xx);\n\n  auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request, nullptr);\n\n  // Expect retry and retry timer create.\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), true);\n\n  // Retry request.\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_copy->headers()), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true));\n  timer_->invokeCallback();\n\n  // Normal response.\n  expectSuccess(request, 200);\n  ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers2), true);\n}\n\nTEST_F(AsyncClientImplTest, RetryWithStream) {\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"upstream.use_retry\", 100))\n      .WillByDefault(Return(true));\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), true));\n\n  headers.setReferenceEnvoyRetryOn(Headers::get().EnvoyRetryOnValues._5xx);\n  AsyncClient::Stream* stream =\n      client_.start(stream_callbacks_, AsyncClient::StreamOptions().setBufferBodyForRetry(true));\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, true);\n\n  // Expect retry and retry timer create.\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), true);\n\n  // Retry request.\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), true));\n  timer_->invokeCallback();\n\n  // Normal response.\n  expectResponseHeaders(stream_callbacks_, 200, true);\n  EXPECT_CALL(stream_callbacks_, onComplete());\n  ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers2), true);\n}\n\nTEST_F(AsyncClientImplTest, MultipleStreams) {\n  // Start stream 1\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers(message_->headers());\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), true));\n\n  expectResponseHeaders(stream_callbacks_, 200, false);\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, true);\n\n  // Start stream 2\n  Buffer::InstancePtr body2{new Buffer::OwnedImpl(\"test body\")};\n  NiceMock<MockRequestEncoder> stream_encoder2;\n  ResponseDecoder* response_decoder2{};\n  MockAsyncClientStreamCallbacks stream_callbacks2;\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder2, cm_.conn_pool_.host_, stream_info_);\n        response_decoder2 = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers2(message_->headers());\n  EXPECT_CALL(stream_encoder2, encodeHeaders(HeaderMapEqualRef(&headers2), false));\n  EXPECT_CALL(stream_encoder2, encodeData(BufferEqual(body2.get()), true));\n\n  expectResponseHeaders(stream_callbacks2, 503, true);\n  EXPECT_CALL(stream_callbacks2, onComplete());\n\n  AsyncClient::Stream* stream2 = client_.start(stream_callbacks2, AsyncClient::StreamOptions());\n  stream2->sendHeaders(headers2, false);\n  stream2->sendData(*body2, true);\n\n  // Finish stream 2.\n  ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  response_decoder2->decodeHeaders(std::move(response_headers2), true);\n\n  // Finish stream 1.\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  response_decoder_->decodeData(*body, true);\n}\n\nTEST_F(AsyncClientImplTest, MultipleRequests) {\n  // Send request 1\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true));\n\n  auto* request1 = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request1, nullptr);\n\n  // Send request 2.\n  RequestMessagePtr message2{new RequestMessageImpl()};\n  HttpTestUtility::addDefaultHeaders(message2->headers());\n  NiceMock<MockRequestEncoder> stream_encoder2;\n  ResponseDecoder* response_decoder2{};\n  MockAsyncClientCallbacks callbacks2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder2, cm_.conn_pool_.host_, stream_info_);\n        response_decoder2 = &decoder;\n        return nullptr;\n      }));\n  EXPECT_CALL(stream_encoder2, encodeHeaders(HeaderMapEqualRef(&message2->headers()), true));\n\n  auto* request2 = client_.send(std::move(message2), callbacks2, AsyncClient::RequestOptions());\n  EXPECT_NE(request2, nullptr);\n\n  // Send request 3.\n  RequestMessagePtr message3{new RequestMessageImpl()};\n  HttpTestUtility::addDefaultHeaders(message3->headers());\n  NiceMock<MockRequestEncoder> stream_encoder3;\n  ResponseDecoder* response_decoder3{};\n  MockAsyncClientCallbacks callbacks3;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder3, cm_.conn_pool_.host_, stream_info_);\n        response_decoder3 = &decoder;\n        return nullptr;\n      }));\n  EXPECT_CALL(stream_encoder3, encodeHeaders(HeaderMapEqualRef(&message3->headers()), true));\n\n  auto* request3 = client_.send(std::move(message3), callbacks3, AsyncClient::RequestOptions());\n  EXPECT_NE(request3, nullptr);\n\n  // Finish request 2.\n  ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(callbacks2, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  EXPECT_CALL(callbacks2, onSuccess_(_, _))\n      .WillOnce(Invoke(\n          [request2](const AsyncClient::Request& request, ResponseMessage* response) -> void {\n            // Verify that callback is called with the same request handle as returned by\n            // AsyncClient::send().\n            EXPECT_EQ(request2, &request);\n            EXPECT_EQ(503, Utility::getResponseStatus(response->headers()));\n          }));\n  response_decoder2->decodeHeaders(std::move(response_headers2), true);\n\n  // Finish request 1.\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  expectSuccess(request1, 200);\n  response_decoder_->decodeData(data, true);\n\n  // Finish request 3.\n  ResponseHeaderMapPtr response_headers3(new TestResponseHeaderMapImpl{{\":status\", \"500\"}});\n  EXPECT_CALL(callbacks3, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  EXPECT_CALL(callbacks3, onSuccess_(_, _))\n      .WillOnce(Invoke(\n          [request3](const AsyncClient::Request& request, ResponseMessage* response) -> void {\n            // Verify that callback is called with the same request handle as returned by\n            // AsyncClient::send().\n            EXPECT_EQ(request3, &request);\n            EXPECT_EQ(500, Utility::getResponseStatus(response->headers()));\n          }));\n  response_decoder3->decodeHeaders(std::move(response_headers3), true);\n}\n\nTEST_F(AsyncClientImplTest, StreamAndRequest) {\n  // Send request\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true));\n\n  auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request, nullptr);\n\n  // Start stream\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n  NiceMock<MockRequestEncoder> stream_encoder2;\n  ResponseDecoder* response_decoder2{};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder2, cm_.conn_pool_.host_, stream_info_);\n        response_decoder2 = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(stream_encoder2, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder2, encodeData(BufferEqual(body.get()), true));\n\n  expectResponseHeaders(stream_callbacks_, 200, false);\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, true);\n\n  // Finish stream.\n  ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder2->decodeHeaders(std::move(response_headers2), false);\n  response_decoder2->decodeData(*body, true);\n\n  // Finish request.\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  expectSuccess(request, 200);\n  response_decoder_->decodeData(data, true);\n}\n\nTEST_F(AsyncClientImplTest, StreamWithTrailers) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  TestRequestTrailerMapImpl trailers{{\"some\", \"request_trailer\"}};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), false));\n  EXPECT_CALL(stream_encoder_, encodeTrailers(HeaderMapEqualRef(&trailers)));\n\n  expectResponseHeaders(stream_callbacks_, 200, false);\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), false));\n  TestResponseTrailerMapImpl expected_trailers{{\"some\", \"trailer\"}};\n  EXPECT_CALL(stream_callbacks_, onTrailers_(HeaderMapEqualRef(&expected_trailers)));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, false);\n  stream->sendTrailers(trailers);\n\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  response_decoder_->decodeData(*body, false);\n  response_decoder_->decodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n}\n\nTEST_F(AsyncClientImplTest, Trailers) {\n  message_->body().add(\"test body\");\n  Buffer::Instance& data = message_->body();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true));\n\n  auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 200);\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  response_decoder_->decodeData(data, false);\n  response_decoder_->decodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n}\n\nTEST_F(AsyncClientImplTest, ImmediateReset) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n\n  auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 503);\n  stream_encoder_.getStream().resetStream(StreamResetReason::RemoteReset);\n\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_503\").value());\n}\n\nTEST_F(AsyncClientImplTest, LocalResetAfterStreamStart) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.addCopy(\"x-envoy-internal\", \"true\");\n  headers.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  headers.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), false));\n\n  TestResponseHeaderMapImpl expected_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_headers), false));\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), false));\n  EXPECT_CALL(stream_callbacks_, onReset());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, false);\n\n  response_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"200\"}}), false);\n  response_decoder_->decodeData(*body, false);\n\n  stream->reset();\n}\n\nTEST_F(AsyncClientImplTest, SendDataAfterRemoteClosure) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.addCopy(\"x-envoy-internal\", \"true\");\n  headers.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  headers.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n\n  TestResponseHeaderMapImpl expected_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_headers), false));\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n\n  response_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"200\"}}), false);\n  response_decoder_->decodeData(*body, true);\n\n  EXPECT_CALL(stream_encoder_, encodeData(_, _)).Times(0);\n  stream->sendData(*body, true);\n}\n\nTEST_F(AsyncClientImplTest, SendTrailersRemoteClosure) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.addCopy(\"x-envoy-internal\", \"true\");\n  headers.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  headers.addCopy(\":scheme\", \"http\");\n\n  TestRequestTrailerMapImpl trailers;\n  trailers.addCopy(\"x-test-trailer\", \"1\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n\n  TestResponseHeaderMapImpl expected_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_headers), false));\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n\n  response_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"200\"}}), false);\n  response_decoder_->decodeData(*body, true);\n\n  EXPECT_CALL(stream_encoder_, encodeTrailers(_)).Times(0);\n  stream->sendTrailers(trailers);\n}\n\n// Validate behavior when the stream's onHeaders() callback performs a stream\n// reset.\nTEST_F(AsyncClientImplTest, ResetInOnHeaders) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.addCopy(\"x-envoy-internal\", \"true\");\n  headers.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  headers.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), false));\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n\n  TestResponseHeaderMapImpl expected_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_headers), false))\n      .WillOnce(Invoke([&stream](HeaderMap&, bool) { stream->reset(); }));\n  EXPECT_CALL(stream_callbacks_, onData(_, _)).Times(0);\n  EXPECT_CALL(stream_callbacks_, onReset());\n\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, false);\n\n  Http::StreamDecoderFilterCallbacks* filter_callbacks =\n      static_cast<Http::AsyncStreamImpl*>(stream);\n  filter_callbacks->encodeHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"200\"}}), false, \"details\");\n}\n\nTEST_F(AsyncClientImplTest, RemoteResetAfterStreamStart) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.addCopy(\"x-envoy-internal\", \"true\");\n  headers.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  headers.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), false));\n\n  TestResponseHeaderMapImpl expected_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_headers), false));\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), false));\n  EXPECT_CALL(stream_callbacks_, onReset());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, false);\n\n  response_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"200\"}}), false);\n  response_decoder_->decodeData(*body, false);\n\n  stream_encoder_.getStream().resetStream(StreamResetReason::RemoteReset);\n}\n\nTEST_F(AsyncClientImplTest, ResetAfterResponseStart) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n\n  auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request, nullptr);\n\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  EXPECT_CALL(callbacks_, onFailure(_, _))\n      .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request,\n                                                AsyncClient::FailureReason reason) {\n        // Verify that callback is called with the same request handle as returned by\n        // AsyncClient::send().\n        EXPECT_EQ(&request, sent_request);\n        EXPECT_EQ(reason, AsyncClient::FailureReason::Reset);\n      }));\n\n  ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder_->decodeHeaders(std::move(response_headers), false);\n  stream_encoder_.getStream().resetStream(StreamResetReason::RemoteReset);\n}\n\nTEST_F(AsyncClientImplTest, ResetStream) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n  EXPECT_CALL(stream_callbacks_, onReset());\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(message_->headers(), true);\n  stream->reset();\n}\n\nTEST_F(AsyncClientImplTest, CancelRequest) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  AsyncClient::Request* request =\n      client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  request->cancel();\n}\n\nTEST_F(AsyncClientImplTracingTest, CancelRequest) {\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(parent_span_, spawnChild_(_, \"async fake_cluster egress\", _))\n      .WillOnce(Return(child_span));\n\n  AsyncClient::RequestOptions options = AsyncClient::RequestOptions().setParentSpan(parent_span_);\n  EXPECT_CALL(*child_span, setSampled(true));\n  EXPECT_CALL(*child_span, injectContext(_));\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _))\n      .WillOnce(Invoke([](Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) {\n        span.setTag(\"onBeforeFinalizeUpstreamSpan\", \"called\");\n        // Since this is  a failure, we expect no response headers.\n        ASSERT_EQ(nullptr, response_headers);\n      }));\n  AsyncClient::Request* request = client_.send(std::move(message_), callbacks_, options);\n\n  EXPECT_CALL(*child_span, setTag(Eq(\"onBeforeFinalizeUpstreamSpan\"), Eq(\"called\")));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.1\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.1:443\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"0\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Canceled), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span, finishSpan());\n\n  request->cancel();\n}\n\nTEST_F(AsyncClientImplTest, DestroyWithActiveStream) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false));\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n  EXPECT_CALL(stream_callbacks_, onReset());\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(message_->headers(), false);\n}\n\nTEST_F(AsyncClientImplTest, DestroyWithActiveRequest) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n\n  auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions());\n  EXPECT_NE(request, nullptr);\n\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  EXPECT_CALL(callbacks_, onFailure(_, _))\n      .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request,\n                                                AsyncClient::FailureReason reason) {\n        // Verify that callback is called with the same request handle as returned by\n        // AsyncClient::send().\n        EXPECT_EQ(&request, sent_request);\n        EXPECT_EQ(reason, AsyncClient::FailureReason::Reset);\n      }));\n}\n\nTEST_F(AsyncClientImplTracingTest, DestroyWithActiveRequest) {\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(parent_span_, spawnChild_(_, \"async fake_cluster egress\", _))\n      .WillOnce(Return(child_span));\n\n  AsyncClient::RequestOptions options = AsyncClient::RequestOptions().setParentSpan(parent_span_);\n  EXPECT_CALL(*child_span, setSampled(true));\n  EXPECT_CALL(*child_span, injectContext(_));\n\n  auto* request = client_.send(std::move(message_), callbacks_, options);\n  EXPECT_NE(request, nullptr);\n\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  EXPECT_CALL(callbacks_, onFailure(_, _))\n      .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request,\n                                                AsyncClient::FailureReason reason) {\n        // Verify that callback is called with the same request handle as returned by\n        // AsyncClient::send().\n        EXPECT_EQ(&request, sent_request);\n        EXPECT_EQ(reason, AsyncClient::FailureReason::Reset);\n      }));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.1\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.1:443\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"0\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)))\n      .Times(AnyNumber());\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ErrorReason), Eq(\"Reset\")));\n  EXPECT_CALL(*child_span, finishSpan());\n}\n\nTEST_F(AsyncClientImplTest, PoolFailure) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(),\n                                nullptr);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  EXPECT_CALL(callbacks_, onSuccess_(_, _))\n      .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void {\n        // The callback gets called before AsyncClient::send() completes, which means that we don't\n        // have a request handle to compare to.\n        EXPECT_NE(nullptr, &request);\n        EXPECT_EQ(503, Utility::getResponseStatus(response->headers()));\n      }));\n\n  EXPECT_EQ(nullptr, client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()));\n\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_503\").value());\n}\n\nTEST_F(AsyncClientImplTest, PoolFailureWithBody) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(),\n                                nullptr);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  EXPECT_CALL(callbacks_, onSuccess_(_, _))\n      .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void {\n        // The callback gets called before AsyncClient::send() completes, which means that we don't\n        // have a request handle to compare to.\n        EXPECT_NE(nullptr, &request);\n        EXPECT_EQ(503, Utility::getResponseStatus(response->headers()));\n      }));\n  message_->body().add(\"hello\");\n  EXPECT_EQ(nullptr, client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()));\n\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_503\").value());\n}\n\nTEST_F(AsyncClientImplTest, StreamTimeout) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _));\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n\n  TestRequestHeaderMapImpl expected_timeout{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_timeout), false));\n  EXPECT_CALL(stream_callbacks_, onData(_, true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(\n      stream_callbacks_, AsyncClient::StreamOptions().setTimeout(std::chrono::milliseconds(40)));\n  stream->sendHeaders(message_->headers(), true);\n  timer_->invokeCallback();\n\n  EXPECT_EQ(1UL,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_timeout\")\n                .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_504\").value());\n}\n\nTEST_F(AsyncClientImplTest, StreamTimeoutHeadReply) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  RequestMessagePtr message{new RequestMessageImpl()};\n  HttpTestUtility::addDefaultHeaders(message->headers(), \"HEAD\");\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message->headers()), true));\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _));\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n\n  TestRequestHeaderMapImpl expected_timeout{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_timeout), true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  AsyncClient::Stream* stream = client_.start(\n      stream_callbacks_, AsyncClient::StreamOptions().setTimeout(std::chrono::milliseconds(40)));\n  stream->sendHeaders(message->headers(), true);\n  timer_->invokeCallback();\n}\n\nTEST_F(AsyncClientImplTest, RequestTimeout) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _));\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n\n  auto* request =\n      client_.send(std::move(message_), callbacks_,\n                   AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(40)));\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 504);\n  timer_->invokeCallback();\n\n  EXPECT_EQ(1UL,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_timeout\")\n                .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_504\").value());\n}\n\nTEST_F(AsyncClientImplTracingTest, RequestTimeout) {\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _));\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n  EXPECT_CALL(parent_span_, spawnChild_(_, \"async fake_cluster egress\", _))\n      .WillOnce(Return(child_span));\n\n  AsyncClient::RequestOptions options = AsyncClient::RequestOptions()\n                                            .setParentSpan(parent_span_)\n                                            .setTimeout(std::chrono::milliseconds(40));\n  EXPECT_CALL(*child_span, setSampled(true));\n  EXPECT_CALL(*child_span, injectContext(_));\n\n  auto* request = client_.send(std::move(message_), callbacks_, options);\n  EXPECT_NE(request, nullptr);\n\n  expectSuccess(request, 504);\n\n  EXPECT_CALL(*child_span, setTag(Eq(\"onBeforeFinalizeUpstreamSpan\"), Eq(\"called\")));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.1\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.1:443\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"504\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"UT\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)))\n      .Times(AnyNumber());\n  EXPECT_CALL(*child_span, finishSpan());\n  timer_->invokeCallback();\n}\n\nTEST_F(AsyncClientImplTest, DisableTimer) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(200), _));\n  EXPECT_CALL(*timer_, disableTimer());\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n  AsyncClient::Request* request =\n      client_.send(std::move(message_), callbacks_,\n                   AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(200)));\n  EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1);\n  request->cancel();\n}\n\nTEST_F(AsyncClientImplTest, DisableTimerWithStream) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](StreamDecoder&,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        return nullptr;\n      }));\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true));\n  timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _));\n  EXPECT_CALL(*timer_, disableTimer());\n  EXPECT_CALL(stream_encoder_.stream_, resetStream(_));\n  EXPECT_CALL(stream_callbacks_, onReset());\n\n  AsyncClient::Stream* stream = client_.start(\n      stream_callbacks_, AsyncClient::StreamOptions().setTimeout(std::chrono::milliseconds(40)));\n  stream->sendHeaders(message_->headers(), true);\n  stream->reset();\n}\n\nTEST_F(AsyncClientImplTest, MultipleDataStream) {\n  Buffer::InstancePtr body{new Buffer::OwnedImpl(\"test body\")};\n  Buffer::InstancePtr body2{new Buffer::OwnedImpl(\"test body2\")};\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder,\n                           ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_);\n        response_decoder_ = &decoder;\n        return nullptr;\n      }));\n\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.addCopy(\"x-envoy-internal\", \"true\");\n  headers.addCopy(\"x-forwarded-for\", \"127.0.0.1\");\n  headers.addCopy(\":scheme\", \"http\");\n\n  EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&headers), false));\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body.get()), false));\n\n  TestResponseHeaderMapImpl expected_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_headers), false));\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body.get()), false));\n\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  stream->sendData(*body, false);\n\n  response_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{\":status\", \"200\"}}), false);\n  response_decoder_->decodeData(*body, false);\n\n  EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(body2.get()), true));\n  EXPECT_CALL(stream_callbacks_, onData(BufferEqual(body2.get()), true));\n  EXPECT_CALL(stream_callbacks_, onComplete());\n\n  stream->sendData(*body2, true);\n  response_decoder_->decodeData(*body2, true);\n\n  EXPECT_EQ(\n      1UL,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_200\").value());\n  EXPECT_EQ(1UL, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                     .counter(\"internal.upstream_rq_200\")\n                     .value());\n}\n\nTEST_F(AsyncClientImplTest, WatermarkCallbacks) {\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  Http::StreamDecoderFilterCallbacks* filter_callbacks =\n      static_cast<Http::AsyncStreamImpl*>(stream);\n  filter_callbacks->onDecoderFilterAboveWriteBufferHighWatermark();\n  EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark());\n  filter_callbacks->onDecoderFilterAboveWriteBufferHighWatermark();\n  EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark());\n  filter_callbacks->onDecoderFilterBelowWriteBufferLowWatermark();\n  EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark());\n  filter_callbacks->onDecoderFilterBelowWriteBufferLowWatermark();\n  EXPECT_FALSE(stream->isAboveWriteBufferHighWatermark());\n  EXPECT_CALL(stream_callbacks_, onReset());\n}\n\nTEST_F(AsyncClientImplTest, RdsGettersTest) {\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  stream->sendHeaders(headers, false);\n  Http::StreamDecoderFilterCallbacks* filter_callbacks =\n      static_cast<Http::AsyncStreamImpl*>(stream);\n  auto route = filter_callbacks->route();\n  ASSERT_NE(nullptr, route);\n  auto route_entry = route->routeEntry();\n  ASSERT_NE(nullptr, route_entry);\n  auto& path_match_criterion = route_entry->pathMatchCriterion();\n  EXPECT_EQ(\"\", path_match_criterion.matcher());\n  EXPECT_EQ(Router::PathMatchType::None, path_match_criterion.matchType());\n  const auto& route_config = route_entry->virtualHost().routeConfig();\n  EXPECT_EQ(\"\", route_config.name());\n  EXPECT_EQ(0, route_config.internalOnlyHeaders().size());\n  EXPECT_EQ(nullptr, route_config.route(headers, stream_info_, 0));\n  auto cluster_info = filter_callbacks->clusterInfo();\n  ASSERT_NE(nullptr, cluster_info);\n  EXPECT_EQ(cm_.thread_local_cluster_.cluster_.info_, cluster_info);\n  EXPECT_CALL(stream_callbacks_, onReset());\n}\n\nTEST_F(AsyncClientImplTest, DumpState) {\n  TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  AsyncClient::Stream* stream = client_.start(stream_callbacks_, AsyncClient::StreamOptions());\n  Http::StreamDecoderFilterCallbacks* filter_callbacks =\n      static_cast<Http::AsyncStreamImpl*>(stream);\n\n  std::stringstream out;\n  filter_callbacks->scope().dumpState(out);\n  std::string state = out.str();\n  EXPECT_THAT(state, testing::HasSubstr(\"protocol_: 1\"));\n\n  EXPECT_CALL(stream_callbacks_, onReset());\n}\n\n} // namespace\n\n// Must not be in anonymous namespace for friend to work.\nclass AsyncClientImplUnitTest : public testing::Test {\npublic:\n  AsyncStreamImpl::RouteImpl route_impl_{\n      \"foo\", absl::nullopt,\n      Protobuf::RepeatedPtrField<envoy::config::route::v3::RouteAction::HashPolicy>()};\n  AsyncStreamImpl::NullVirtualHost vhost_;\n  AsyncStreamImpl::NullConfig config_;\n};\n\n// Test the extended fake route that AsyncClient uses.\nTEST_F(AsyncClientImplUnitTest, RouteImplInitTest) {\n  EXPECT_EQ(nullptr, route_impl_.decorator());\n  EXPECT_EQ(nullptr, route_impl_.tracingConfig());\n  EXPECT_EQ(nullptr, route_impl_.perFilterConfig(\"\"));\n  EXPECT_EQ(Code::InternalServerError, route_impl_.routeEntry()->clusterNotFoundResponseCode());\n  EXPECT_EQ(nullptr, route_impl_.routeEntry()->corsPolicy());\n  EXPECT_EQ(nullptr, route_impl_.routeEntry()->hashPolicy());\n  EXPECT_EQ(1, route_impl_.routeEntry()->hedgePolicy().initialRequests());\n  EXPECT_EQ(0, route_impl_.routeEntry()->hedgePolicy().additionalRequestChance().numerator());\n  EXPECT_FALSE(route_impl_.routeEntry()->hedgePolicy().hedgeOnPerTryTimeout());\n  EXPECT_EQ(nullptr, route_impl_.routeEntry()->metadataMatchCriteria());\n  EXPECT_TRUE(route_impl_.routeEntry()->rateLimitPolicy().empty());\n  EXPECT_TRUE(route_impl_.routeEntry()->rateLimitPolicy().getApplicableRateLimit(0).empty());\n  EXPECT_EQ(absl::nullopt, route_impl_.routeEntry()->idleTimeout());\n  EXPECT_EQ(absl::nullopt, route_impl_.routeEntry()->grpcTimeoutOffset());\n  EXPECT_TRUE(route_impl_.routeEntry()->opaqueConfig().empty());\n  EXPECT_TRUE(route_impl_.routeEntry()->includeVirtualHostRateLimits());\n  EXPECT_TRUE(route_impl_.routeEntry()->metadata().filter_metadata().empty());\n  EXPECT_EQ(nullptr,\n            route_impl_.routeEntry()->typedMetadata().get<Config::TypedMetadata::Object>(\"bar\"));\n  EXPECT_EQ(nullptr, route_impl_.routeEntry()->perFilterConfig(\"bar\"));\n  EXPECT_TRUE(route_impl_.routeEntry()->upgradeMap().empty());\n  EXPECT_EQ(false, route_impl_.routeEntry()->internalRedirectPolicy().enabled());\n  EXPECT_TRUE(route_impl_.routeEntry()->shadowPolicies().empty());\n  EXPECT_TRUE(route_impl_.routeEntry()->virtualHost().rateLimitPolicy().empty());\n  EXPECT_EQ(nullptr, route_impl_.routeEntry()->virtualHost().corsPolicy());\n  EXPECT_EQ(nullptr, route_impl_.routeEntry()->virtualHost().perFilterConfig(\"bar\"));\n  EXPECT_FALSE(route_impl_.routeEntry()->virtualHost().includeAttemptCountInRequest());\n  EXPECT_FALSE(route_impl_.routeEntry()->virtualHost().includeAttemptCountInResponse());\n  EXPECT_FALSE(route_impl_.routeEntry()->virtualHost().routeConfig().usesVhds());\n  EXPECT_EQ(nullptr, route_impl_.routeEntry()->tlsContextMatchCriteria());\n}\n\nTEST_F(AsyncClientImplUnitTest, NullConfig) {\n  EXPECT_FALSE(config_.mostSpecificHeaderMutationsWins());\n}\n\nTEST_F(AsyncClientImplUnitTest, NullVirtualHost) {\n  EXPECT_EQ(std::numeric_limits<uint32_t>::max(), vhost_.retryShadowBufferLimit());\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/async_client_utility_test.cc",
    "content": "#include \"common/http/async_client_utility.h\"\n\n#include \"test/mocks/http/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::StrictMock;\n\nnamespace Envoy {\nnamespace Http {\nnamespace {\n\nclass AsyncClientRequestTrackerTest : public testing::Test {\npublic:\n  std::unique_ptr<AsyncClientRequestTracker> active_requests_{\n      std::make_unique<AsyncClientRequestTracker>()};\n\n  NiceMock<MockAsyncClient> async_client_;\n  StrictMock<MockAsyncClientRequest> request1_{&async_client_};\n  StrictMock<MockAsyncClientRequest> request2_{&async_client_};\n  StrictMock<MockAsyncClientRequest> request3_{&async_client_};\n};\n\nTEST_F(AsyncClientRequestTrackerTest, ShouldSupportRemoveWithoutAdd) {\n  // Should not fail.\n  active_requests_->remove(request1_);\n}\n\nTEST_F(AsyncClientRequestTrackerTest, OnDestructDoNothingIfThereAreNoActiveRequests) {\n  // Trigger destruction.\n  active_requests_.reset();\n}\n\nTEST_F(AsyncClientRequestTrackerTest, OnDestructCancelActiveRequests) {\n  // Include active requests.\n  active_requests_->add(request1_);\n  active_requests_->add(request2_);\n  active_requests_->add(request3_);\n  // Exclude active requests.\n  active_requests_->remove(request2_);\n\n  // Must cancel active requests on destruction.\n  EXPECT_CALL(request1_, cancel());\n  EXPECT_CALL(request3_, cancel());\n\n  // Trigger destruction.\n  active_requests_.reset();\n}\n\n} // namespace\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/codec_client_test.cc",
    "content": "#include <memory>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/http/codec_client.h\"\n#include \"common/http/exception.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AtMost;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Pointee;\nusing testing::Ref;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::Throw;\n\nnamespace Envoy {\nnamespace Http {\nnamespace {\n\nclass CodecClientTest : public testing::Test {\npublic:\n  CodecClientTest() {\n    connection_ = new NiceMock<Network::MockClientConnection>();\n\n    EXPECT_CALL(*connection_, detectEarlyCloseWhenReadDisabled(false));\n    EXPECT_CALL(*connection_, addConnectionCallbacks(_)).WillOnce(SaveArgAddress(&connection_cb_));\n    EXPECT_CALL(*connection_, connect());\n    EXPECT_CALL(*connection_, addReadFilter(_))\n        .WillOnce(\n            Invoke([this](Network::ReadFilterSharedPtr filter) -> void { filter_ = filter; }));\n\n    codec_ = new Http::MockClientConnection();\n\n    Network::ClientConnectionPtr connection{connection_};\n    EXPECT_CALL(dispatcher_, createTimer_(_));\n    client_ = std::make_unique<CodecClientForTest>(CodecClient::Type::HTTP1, std::move(connection),\n                                                   codec_, nullptr, host_, dispatcher_);\n    ON_CALL(*connection_, streamInfo()).WillByDefault(ReturnRef(stream_info_));\n  }\n\n  ~CodecClientTest() override { EXPECT_EQ(0U, client_->numActiveRequests()); }\n\n  Event::MockDispatcher dispatcher_;\n  Network::MockClientConnection* connection_;\n  Http::MockClientConnection* codec_;\n  std::unique_ptr<CodecClientForTest> client_;\n  Network::ConnectionCallbacks* connection_cb_;\n  Network::ReadFilterSharedPtr filter_;\n  std::shared_ptr<Upstream::MockIdleTimeEnabledClusterInfo> cluster_{\n      new NiceMock<Upstream::MockIdleTimeEnabledClusterInfo>()};\n  Upstream::HostDescriptionConstSharedPtr host_{\n      Upstream::makeTestHostDescription(cluster_, \"tcp://127.0.0.1:80\")};\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n};\n\nTEST_F(CodecClientTest, NotCallDetectEarlyCloseWhenReadDiabledUsingHttp3) {\n  auto connection = std::make_unique<NiceMock<Network::MockClientConnection>>();\n\n  EXPECT_CALL(*connection, detectEarlyCloseWhenReadDisabled(false)).Times(0);\n  EXPECT_CALL(*connection, addConnectionCallbacks(_)).WillOnce(SaveArgAddress(&connection_cb_));\n  EXPECT_CALL(*connection, connect());\n  EXPECT_CALL(*connection, addReadFilter(_));\n  auto codec = new Http::MockClientConnection();\n\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  auto client = std::make_unique<CodecClientForTest>(\n      CodecClient::Type::HTTP3, std::move(connection), codec, nullptr, host_, dispatcher_);\n}\n\nTEST_F(CodecClientTest, BasicHeaderOnlyResponse) {\n  ResponseDecoder* inner_decoder;\n  NiceMock<MockRequestEncoder> inner_encoder;\n  EXPECT_CALL(*codec_, newStream(_))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder) -> RequestEncoder& {\n        inner_decoder = &decoder;\n        return inner_encoder;\n      }));\n\n  Http::MockResponseDecoder outer_decoder;\n  client_->newStream(outer_decoder);\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(outer_decoder, decodeHeaders_(Pointee(Ref(*response_headers)), true));\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n}\n\nTEST_F(CodecClientTest, BasicResponseWithBody) {\n  ResponseDecoder* inner_decoder;\n  NiceMock<MockRequestEncoder> inner_encoder;\n  EXPECT_CALL(*codec_, newStream(_))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder) -> RequestEncoder& {\n        inner_decoder = &decoder;\n        return inner_encoder;\n      }));\n\n  Http::MockResponseDecoder outer_decoder;\n  client_->newStream(outer_decoder);\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(outer_decoder, decodeHeaders_(Pointee(Ref(*response_headers)), false));\n  inner_decoder->decodeHeaders(std::move(response_headers), false);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(outer_decoder, decodeData(Ref(buffer), true));\n  inner_decoder->decodeData(buffer, true);\n}\n\nTEST_F(CodecClientTest, DisconnectBeforeHeaders) {\n  ResponseDecoder* inner_decoder;\n  NiceMock<MockRequestEncoder> inner_encoder;\n  EXPECT_CALL(*codec_, newStream(_))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder) -> RequestEncoder& {\n        inner_decoder = &decoder;\n        return inner_encoder;\n      }));\n\n  Http::MockResponseDecoder outer_decoder;\n  Http::StreamEncoder& request_encoder = client_->newStream(outer_decoder);\n  Http::MockStreamCallbacks callbacks;\n  request_encoder.getStream().addCallbacks(callbacks);\n\n  // When we get a remote close with an active request we should try to send zero bytes through\n  // the codec.\n  EXPECT_CALL(callbacks, onResetStream(StreamResetReason::ConnectionTermination, _));\n  EXPECT_CALL(*codec_, dispatch(_));\n  connection_cb_->onEvent(Network::ConnectionEvent::Connected);\n  connection_cb_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(CodecClientTest, IdleTimerWithNoActiveRequests) {\n  ResponseDecoder* inner_decoder;\n  NiceMock<MockRequestEncoder> inner_encoder;\n  EXPECT_CALL(*codec_, newStream(_))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder) -> RequestEncoder& {\n        inner_decoder = &decoder;\n        return inner_encoder;\n      }));\n\n  Http::MockResponseDecoder outer_decoder;\n  Http::StreamEncoder& request_encoder = client_->newStream(outer_decoder);\n  Http::MockStreamCallbacks callbacks;\n  request_encoder.getStream().addCallbacks(callbacks);\n  connection_cb_->onEvent(Network::ConnectionEvent::Connected);\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(outer_decoder, decodeHeaders_(Pointee(Ref(*response_headers)), false));\n  inner_decoder->decodeHeaders(std::move(response_headers), false);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(outer_decoder, decodeData(Ref(buffer), true));\n  inner_decoder->decodeData(buffer, true);\n  EXPECT_NE(client_->idleTimer(), nullptr);\n\n  // Close the client and validate idleTimer is reset\n  EXPECT_EQ(client_->numActiveRequests(), 0);\n  client_->close();\n  // TODO(ramaraochavali): Use default connection mock handlers for raising events.\n  connection_cb_->onEvent(Network::ConnectionEvent::LocalClose);\n  EXPECT_EQ(client_->idleTimer(), nullptr);\n}\n\nTEST_F(CodecClientTest, IdleTimerClientRemoteCloseWithActiveRequests) {\n  ResponseDecoder* inner_decoder;\n  NiceMock<MockRequestEncoder> inner_encoder;\n  EXPECT_CALL(*codec_, newStream(_))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder) -> RequestEncoder& {\n        inner_decoder = &decoder;\n        return inner_encoder;\n      }));\n\n  Http::MockResponseDecoder outer_decoder;\n  Http::StreamEncoder& request_encoder = client_->newStream(outer_decoder);\n  Http::MockStreamCallbacks callbacks;\n  request_encoder.getStream().addCallbacks(callbacks);\n\n  // When we get a remote close with an active request validate idleTimer is reset after client\n  // close\n  EXPECT_CALL(callbacks, onResetStream(StreamResetReason::ConnectionTermination, _));\n  EXPECT_CALL(*codec_, dispatch(_));\n  EXPECT_NE(client_->numActiveRequests(), 0);\n  connection_cb_->onEvent(Network::ConnectionEvent::Connected);\n  connection_cb_->onEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(client_->idleTimer(), nullptr);\n}\n\nTEST_F(CodecClientTest, IdleTimerClientLocalCloseWithActiveRequests) {\n  ResponseDecoder* inner_decoder;\n  NiceMock<MockRequestEncoder> inner_encoder;\n  EXPECT_CALL(*codec_, newStream(_))\n      .WillOnce(Invoke([&](ResponseDecoder& decoder) -> RequestEncoder& {\n        inner_decoder = &decoder;\n        return inner_encoder;\n      }));\n\n  Http::MockResponseDecoder outer_decoder;\n  Http::StreamEncoder& request_encoder = client_->newStream(outer_decoder);\n  Http::MockStreamCallbacks callbacks;\n  request_encoder.getStream().addCallbacks(callbacks);\n\n  // When we get a local close with an active request validate idleTimer is reset after client close\n  EXPECT_CALL(callbacks, onResetStream(StreamResetReason::ConnectionTermination, _));\n  connection_cb_->onEvent(Network::ConnectionEvent::Connected);\n  // TODO(ramaraochavali): Use default connection mock handlers for raising events.\n  client_->close();\n  connection_cb_->onEvent(Network::ConnectionEvent::LocalClose);\n  EXPECT_EQ(client_->idleTimer(), nullptr);\n}\n\nTEST_F(CodecClientTest, ProtocolError) {\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(codecProtocolError(\"protocol error\")));\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  Buffer::OwnedImpl data;\n  filter_->onData(data, false);\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_protocol_error_.value());\n}\n\nTEST_F(CodecClientTest, 408Response) {\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillOnce(Return(prematureResponseError(\"\", Code::RequestTimeout)));\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  Buffer::OwnedImpl data;\n  filter_->onData(data, false);\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_protocol_error_.value());\n}\n\nTEST_F(CodecClientTest, PrematureResponse) {\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(prematureResponseError(\"\", Code::OK)));\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  Buffer::OwnedImpl data;\n  filter_->onData(data, false);\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_protocol_error_.value());\n}\n\nTEST_F(CodecClientTest, WatermarkPassthrough) {\n  EXPECT_CALL(*codec_, onUnderlyingConnectionAboveWriteBufferHighWatermark());\n  connection_cb_->onAboveWriteBufferHighWatermark();\n\n  EXPECT_CALL(*codec_, onUnderlyingConnectionBelowWriteBufferLowWatermark());\n  connection_cb_->onBelowWriteBufferLowWatermark();\n}\n\nTEST_F(CodecClientTest, SSLConnectionInfo) {\n  std::string session_id = \"D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B\";\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id));\n  EXPECT_CALL(*connection_, ssl()).WillRepeatedly(Return(connection_info));\n  connection_cb_->onEvent(Network::ConnectionEvent::Connected);\n  EXPECT_NE(nullptr, stream_info_.downstreamSslConnection());\n  EXPECT_EQ(session_id, stream_info_.downstreamSslConnection()->sessionId());\n}\n\n// Test the codec getting input from a real TCP connection.\nclass CodecNetworkTest : public testing::TestWithParam<Network::Address::IpVersion> {\npublic:\n  CodecNetworkTest() : api_(Api::createApiForTest()), stream_info_(api_->timeSource()) {\n    dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n    auto socket = std::make_shared<Network::TcpListenSocket>(\n        Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n    Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n        socket->localAddress(), source_address_, Network::Test::createRawBufferSocket(), nullptr);\n    upstream_listener_ = dispatcher_->createListener(std::move(socket), listener_callbacks_, true,\n                                                     ENVOY_TCP_BACKLOG_SIZE);\n    client_connection_ = client_connection.get();\n    client_connection_->addConnectionCallbacks(client_callbacks_);\n\n    codec_ = new Http::MockClientConnection();\n    client_ =\n        std::make_unique<CodecClientForTest>(CodecClient::Type::HTTP1, std::move(client_connection),\n                                             codec_, nullptr, host_, *dispatcher_);\n\n    int expected_callbacks = 2;\n    EXPECT_CALL(listener_callbacks_, onAccept_(_))\n        .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n          upstream_connection_ = dispatcher_->createServerConnection(\n              std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n          upstream_connection_->addConnectionCallbacks(upstream_callbacks_);\n\n          expected_callbacks--;\n          if (expected_callbacks == 0) {\n            dispatcher_->exit();\n          }\n        }));\n\n    EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(InvokeWithoutArgs([&]() -> void {\n          expected_callbacks--;\n          if (expected_callbacks == 0) {\n            dispatcher_->exit();\n          }\n        }));\n\n    // Since we mocked the connected event, we need to mock these close events even though we don't\n    // care about them in these tests.\n    EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose)).Times(AtMost(1));\n    EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)).Times(AtMost(1));\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void createNewStream() {\n    ResponseDecoder* inner_decoder;\n    EXPECT_CALL(*codec_, newStream(_))\n        .WillOnce(Invoke([&](ResponseDecoder& decoder) -> RequestEncoder& {\n          inner_decoder = &decoder;\n          return inner_encoder_;\n        }));\n\n    client_->newStream(outer_decoder_);\n  }\n\n  void close() {\n    client_->close();\n    EXPECT_CALL(upstream_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\nprotected:\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Network::ListenerPtr upstream_listener_;\n  Network::MockTcpListenerCallbacks listener_callbacks_;\n  Network::MockConnectionHandler connection_handler_;\n  Network::Address::InstanceConstSharedPtr source_address_;\n  Http::MockClientConnection* codec_;\n  std::unique_ptr<CodecClientForTest> client_;\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};\n  Upstream::HostDescriptionConstSharedPtr host_{\n      Upstream::makeTestHostDescription(cluster_, \"tcp://127.0.0.1:80\")};\n  Network::ConnectionPtr upstream_connection_;\n  NiceMock<Network::MockConnectionCallbacks> upstream_callbacks_;\n  Network::ClientConnection* client_connection_{};\n  NiceMock<Network::MockConnectionCallbacks> client_callbacks_;\n  NiceMock<MockRequestEncoder> inner_encoder_;\n  NiceMock<MockResponseDecoder> outer_decoder_;\n  StreamInfo::StreamInfoImpl stream_info_;\n};\n\n// Send a block of data from upstream, and ensure it is received by the codec.\nTEST_P(CodecNetworkTest, SendData) {\n  createNewStream();\n\n  const std::string full_data = \"HTTP/1.1 200 OK\\r\\ncontent-length: 0\\r\\n\";\n  Buffer::OwnedImpl data(full_data);\n  upstream_connection_->write(data, false);\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    EXPECT_EQ(full_data, data.toString());\n    dispatcher_->exit();\n    return Http::okStatus();\n  }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_CALL(inner_encoder_.stream_, resetStream(_));\n  close();\n}\n\n// Send a block of data, and then have upstream close the connection.\n// Make sure that the data is passed on as is the network event.\nTEST_P(CodecNetworkTest, SendHeadersAndClose) {\n  createNewStream();\n\n  // Send some header data.\n  const std::string full_data = \"HTTP/1.1 200 OK\\r\\ncontent-length: 0\\r\\n\";\n  Buffer::OwnedImpl data(full_data);\n  upstream_connection_->write(data, false);\n  upstream_connection_->close(Network::ConnectionCloseType::FlushWrite);\n  EXPECT_CALL(*codec_, dispatch(_))\n      .Times(2)\n      .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        EXPECT_EQ(full_data, data.toString());\n        return Http::okStatus();\n      }))\n      .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        EXPECT_EQ(\"\", data.toString());\n        return Http::okStatus();\n      }));\n  // Because the headers are not complete, the disconnect will reset the stream.\n  // Note even if the final \\r\\n were appended to the header data, enough of the\n  // codec state is mocked out that the data would not be framed and the stream\n  // would not be finished.\n  EXPECT_CALL(inner_encoder_.stream_, resetStream(_)).WillOnce(InvokeWithoutArgs([&]() -> void {\n    for (auto callbacks : inner_encoder_.stream_.callbacks_) {\n      callbacks->onResetStream(StreamResetReason::RemoteReset, absl::string_view());\n    }\n    dispatcher_->exit();\n  }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Mark the stream read disabled, then send a block of data and close the connection. Ensure the\n// data is drained before the connection close is processed.\n// Regression test for https://github.com/envoyproxy/envoy/issues/1679\nTEST_P(CodecNetworkTest, SendHeadersAndCloseUnderReadDisable) {\n  createNewStream();\n\n  client_connection_->readDisable(true);\n  const std::string full_data = \"HTTP/1.1 200 OK\\r\\ncontent-length: 0\\r\\n\\r\\n\";\n  Buffer::OwnedImpl data(full_data);\n  upstream_connection_->write(data, false);\n  upstream_connection_->close(Network::ConnectionCloseType::FlushWrite);\n\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  client_connection_->readDisable(false);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .Times(2)\n      .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        EXPECT_EQ(full_data, data.toString());\n        return Http::okStatus();\n      }))\n      .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        EXPECT_EQ(\"\", data.toString());\n        return Http::okStatus();\n      }));\n  EXPECT_CALL(inner_encoder_.stream_, resetStream(_)).WillOnce(InvokeWithoutArgs([&]() -> void {\n    for (auto callbacks : inner_encoder_.stream_.callbacks_) {\n      callbacks->onResetStream(StreamResetReason::RemoteReset, absl::string_view());\n    }\n    dispatcher_->exit();\n  }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, CodecNetworkTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n} // namespace\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/100-continue",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"expect\"\n        value: \"100-continue\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      continue_headers {\n        headers {\n          key: \":status\"\n          value: \"100\"\n        }\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"404\"\n        }\n      }\n      end_stream: true\n    }\n  }\n}\nactions { quiesce_drain {} }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/absolute_url_bad",
    "content": "h1_settings {\n  server {\n    allow_absolute_url: true\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"http:///foo\"\n      }\n    }\n    end_stream: true\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/absolute_url_disallow",
    "content": "h1_settings {\n  server {\n    allow_absolute_url: false\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"http://foo.com:34/bar\"\n      }\n    }\n    end_stream: true\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/absolute_url_ok",
    "content": "h1_settings {\n  server {\n    allow_absolute_url: true\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"http://foo.com:34/bar\"\n      }\n    }\n    end_stream: true\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/chunked",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data_value: \"4\\r\\nWiki\\r\\n5\\r\\npedia\\r\\nE\\r\\n in\\r\\n\\r\\nchunks.\\r\\n0\\r\\n\\r\\n\"\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      data_value: \"4\\r\\nWiki\\r\\n5\\r\\npedia\\r\\nE\\r\\n in\\r\\n\\r\\nchunks.\\r\\n0\\r\\n\\r\\n\"\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-codec_impl_fuzz_test-5687788200001536",
    "content": "h2_settings {\n  client {\n    hpack_table_size: 35072\n    initial_connection_window_size: 35072\n  }\n  server {\n    hpack_table_size: 257\n    initial_stream_window_size: 4294836216\n    initial_connection_window_size: 4294835968\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\000\\000\\000]\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\177H\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"YY\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\000\\000\\000]\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        value: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"BB\"\n      }\n    }\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"````````````````````````````````````````````````````````yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\\000\\225yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy````````````````````````````\"\n      }\n    }\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 34\n    value: 1545\n    server: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"````````````````````````````````````````````````````````yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\\000\\225yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy````````````````````````````\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 34\n    value: 1545\n    server: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\000\\000\\000]\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        value: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\177\\177\\177\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"````````````````````````````````````````````````````````yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\\000\\225yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy````````````````````````````\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\177H\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\000\\000\\000]\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\000\\000\\000]\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"````````````````````````````````````````````````````````yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\\000\\225yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy````````````````````````````\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"````````````````````````````````````````````````````````yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\\000\\225yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy````````````````````````````\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\177H\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"YY\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"YY\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\177H\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 34\n    value: 1\n    server: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 34\n    value: 1545\n    server: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"YY\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 34\n    value: 1545\n    server: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        value: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\177\\177\\177\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        value: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 1\n    offset: 1\n    value: 63\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-codec_impl_fuzz_test-5692024096817152",
    "content": "h2_settings {\n  client {\n    hpack_table_size: 103\n    max_concurrent_streams: 103\n    initial_stream_window_size: 103\n  }\n}\nactions {\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"transfer-encodinG\"\n        value: \"Yh2_settings {\\n  client {\\n    hpack_table_size: 103\\n    max_concurrent_streams: 103\\n    initial_stream_window_size: 103\\n  }\\n}\\nactions {\\n}\\nactions {\\n  new_stream {\\n    request_headers {\\n      headers {\\n        key: \\\"transfer-encodinG\\\"\\n        value: \\\"Y\\\"\\n      }\\n    }\\n  }\\n}\\n\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-codec_impl_fuzz_test-5723814130876416",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"expect\"\n        value: \"100-continue\"\n      }\n    }\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n      }\n    }\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n      }\n    }\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-codec_impl_fuzz_test-5750359880892416",
    "content": "actions {\n  new_stream {\n  }\n}\nactions {\n  mutate {\n    buffer: 2\n    offset: 2\n    value: 2\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5102523695497216",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    request {\n      trailers {\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n      }\n      end_stream: true\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5107763548520448",
    "content": "h2_settings {   server {     max_concurrent_streams: 3     initial_connection_window_size: 1   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   client_drain {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   mutate {     buffer: 1     offset: 1     value: 1     server: true   } } actions {   mutate {     buffer: 1     offset: 3     value: 7     server: true   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   quiesce_drain {   } } actions {   new_stream {   } } "
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5629973466710016",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n    }\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5635096546639872",
    "content": "h2_settings { server {  max_concurrent_streams: 2 } } actions {   new_stream {   } } actions {   new_stream {     request_headers {     }   } } actions {   new_stream {   } } actions { } actions {   new_stream {     request_headers {     }   } } actions { } actions {   new_stream {     request_headers {     }   } } actions {   new_stream {   } } actions {   new_stream {     request_headers {       headers {         key: \" \"       }     }   } } actions {   new_stream {   } } actions { } actions {   quiesce_drain {   } } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions {   quiesce_drain {   } } actions { } actions {   new_stream {     request_headers {     }   } } actions { } actions { } actions {   new_stream {   } } actions {   new_stream {     request_headers {     }   } } actions { } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions { } actions { } actions {   new_stream {   } } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {     request_headers {       headers {         key: \":method\"         value: \"  �\"       }       headers {         key: \":path\"         value: \"�\"       }       headers {         key: \":scheme\"         value: \"ttp\"       }       headers {         key: \":authority\"         value: \"foo.com\"       }     }   } } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions {   new_stream {   } } actions { } actions {   stream_action {     stream_id: 4   } } actions {   new_stream {     end_stream: true   } } actions { } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions {   new_stream {   } } actions {   swap_buffer {     server: true   } } actions { } actions { } actions { } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions { } actions {   server_drain {   } } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions { } actions { } actions {   new_stream {   } } actions {   new_stream {     request_headers {     }   } } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions {   new_stream {     request_headers {     }   } } actions {   new_stream {   } } actions {   swap_buffer {     buffer: 8    } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions { } actions { } actions { } actions {   new_stream {   } } actions { } actions {   new_stream {   } } actions {   client_drain {   } } actions { } actions {   server_drain {   } } actions {   stream_action {   } } actions { } actions {   new_stream {   } } actions {   new_stream {   } } actions {   new_stream {   } } actions {   quiesce_drain {   } } actions { } actions { } actions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  stream_action {\n  }\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  swap_buffer {\n    buffer: 350\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n  stream_action {\n    stream_id: 65537\n    request {\n      data: 1\n    }\n  }\n}\nactions {\n}\nactions {\n  mutate {\n    offset: 524288\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  stream_action {\n    response {\n    }\n  }\n}\nactions {\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  swap_buffer {\n    buffer: 1\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  stream_action {\n    stream_id: 4\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  stream_action {\n    request {\n    }\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  mutate {\n    server: true\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  stream_action {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  mutate {\n    offset: 2\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  mutate {\n    buffer: 10223616\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5635865126895616",
    "content": "actions {\n  quiesce_drain {\n  }\n}\nactions {\n  stream_action {\n    stream_id: 2097152\n    response {\n      continue_headers {\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"GET\"\n        value: \"/ddddddddddddd\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/ddddddddddddd\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"0\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"GET\"\n      }\n      headers {\n        key: \"GET\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"connection\"\n        value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      data: 64512\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n        }\n      }\n      end_stream: true\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n        }\n      }\n      end_stream: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1024\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,,,up,,,,upg1ade\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,,,[,(,5,,,,pg1ade\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      reset_stream: 2097152\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"connection\"\n          value: \",,,,,,X,,[,(,5.,,,,,up,,,,upgeta1ade\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5650111579815936",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \"5\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"connection\"\n        value: \"upgrade\"\n      }\n      headers {\n        key: \"upgrade\"\n        value: \"WebSocket\"\n      }\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n}\nactions {\n  quiesce_drain {\n  }\n}\nactions {\n  stream_action {\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n        headers {\n          key: \"connection\"\n          value: \"foo.com\"\n        }\n        headers {\n          key: \"upgrade\"\n          value: \"WebSocket\"\n        }\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      data: 5\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  swap_buffer {\n    buffer: 64\n  }\n}\nactions {\n  stream_action {\n    request {\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      reset_stream: 168452352\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5657409819770880",
    "content": "actions { } actions {   new_stream {     request_headers {       headers {         key: \":method\"         value: \"GET\"       }       headers {         key: \":path\"         value: \"/\"       }     }   } } actions {   client_drain {   } } actions { } actions { } actions { } actions { } actions { } actions {   new_stream {     request_headers {       headers {         key: \":method\"       }       headers {         key: \":path\"       }     }   } } actions { } actions { } actions { } actions { } actions {   stream_action {     request {       data: 0     }   } } actions { } actions { } actions { } actions { } actions { } actions { } actions { }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5658640424370176",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":method\"         value: \"GET\"       }       headers {         key: \":path\"         value: \"/\"       }     }   } } actions {   new_stream {   } } actions {   stream_action {     response {       continue_headers {       }     }   } } actions {   mutate {     offset: 35     value: 54     server: true   } } actions {   stream_action {     response {       headers {       }     }   } }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5693519941861376",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":method\"         value: \"GET\"       }       headers {         key: \":path\"         value: \"/\"       }     }   } } actions {   client_drain {   } } actions {   stream_action {     request {       trailers {       }     }   } } actions {   stream_action {     stream_id: 1     response {       headers {       }     }   } } actions { } actions { } actions {   client_drain {   } } actions { } actions {   stream_action {     response {       trailers {       }     }   } } actions { } actions { } actions { } actions { } actions { } actions { } actions { }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"5\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"r\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"5\"\n      }\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      data: 1\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      trailers {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5699757025263616",
    "content": "actions {\n  client_drain {\n  }\n}\nactions {\n  mutate {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":scheme\"\n        value: \"T\"\n      }\n      headers {\n        key: \":method\"\n        value: \"%\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n    }\n    metadata {\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  server_drain {\n  }\n}\nactions {\n  stream_action {\n    request {\n      data: 16761078\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      data: 142541011\n    }\n    dispatching_action {\n      continue_headers {\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      trailers {\n      }\n    }\n    dispatching_action {\n      data: 0\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5720162173452288",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":path\"       }       headers {         key: \":method\"       }       headers {         key: \"transfer-encodinG\\0  \"       }     }   } }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5722972495544320",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":method\"         value: \"GET\"       }       headers {         key: \":path\"         value: \"/\"       }     }   } } actions {   new_stream {   } } actions {   stream_action {     request {       trailers {       }     }   } } actions {   stream_action {     response {       headers {       }     }   } } actions {   new_stream {   } } actions {   stream_action {     response {       trailers {       }     }   } }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5726642969772032",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":method\"         value: \"                                                                                       _he      ke  new_st e  new_st    kr   ke st e  new_st    t_he      ke  new_st e  new_st    kr   ke  n      key:e      ke  new_st e  new_st    kr   ke  n      _he      ke  new_st e  new_st    kr   ke st e  new_st    t_he      ke  new_st e  new_st    kr   ke  n      key:e      ke  new_st e  new_st    kr   ke    key:e      ke  new_st e  new_st    kr   ke  n      _he      ke  new_st e  new_st    kr   ke st e  new_s   ke  n      key:e      ke  new_st e  new_st    kr   ke  n      _he      ke  new_st e  new_st    kr   ke st e  new_st    t_he      ke  new_st e  new_st    kr   ke  n      key:e      ke  new_st e  new_st    kr   ke    key:e      ke  new_st e  new_st    kr   ke  n      _he      ke  new_st e  new_st    kr   ke st e  new_st    t_he      ke  new_st e  new_st    kr  n      key:e      ke  new_st e  new_st    kr   ke    key:e      ke  new_st e  new_st    kr   ke  n      _h  new_st    t_he      ke  new_st e  new_st    kr  n      key:e      ke  new_st e  new_st    kr   ke    key:e      ke  new_st e    ke    key:e      ke  new_st e  new_st    kr   ke  n      _he      ke  new_st e  new_st    kr   ke st e  new_st    t_he      ke  new_st e  new_st    kr  n      key:e      ke  new_st e  new_st    kr   ke    key:e      ke  new_st e  new_st    kr   ke  n      _h  new_st    t_he      ke  new_st e  new_st    kr  n      key:e      ke  new_st e  new_st    kr   ke    key:e      ke  new_st e  new_st    kr   ke  n      _he      ke  new_st e  new_st    kr   ke st e  new_st    t_he      ke  new_st e  new_st    kr   ke  n      key:e      ke  he      ke  new_st e  new_st    kr   ke st e  new_st    t_he      ket_he      ke  new_st e  new_st    kr    e  new_st    t_he      ke  new_st e  new_st    kr   ke  n      key:e      ke  newstrey:_]E]u___   }\\n},\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GETactions {\\n  muta{\\n        ketruest_he       key: ctions {\\n  ers {\\n      headers {\\n        key: ctions {\\n  new_streamTnrtasfTkey: ctioew:  new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers {\\n      he: r-e     key: ctionsest_heade headers {u        key: cti new_streew_stream {asfer-e           key: c{u   amrtasfer-headers {headers {\\n      headers {u   new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers {\\n      he: r-e     key: ctionsest_heade headers {u        key: cti new_streew_stream {asfer-e           key: c{u   amTkey: ctioew:  new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers {\\n      headers headers {u   new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers {\\n      he: r-e     key: ctionsest_heade headers {u        key: cti  headers {u   new_stream {asfer-e           key: ctioew: r-e     oew: r-e     key: ctionsest_headers {\\n      he: r-e     key: ctionsest_heade headers {u        key: cti new_streew_stream {asfer-e           key: c{u   amTkey: ctioew:  new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers new_stream {asfer-e           key: ctioew: r-ede headers {u        key: cti new_streesfer-headest_heade headers {u        key: cti  headers {u   new_stream {asfer-e           key: ctioew: r-e     oew: r-e     key: ctionsest_headers {\\n      he: r-e     key: ctionsest_heade headers {u        key: cti new_streew_stream {asfer-e           key: c{u   amTkey: ctioew:  new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers new_stream {asfer-e           key: ctioew: r-ede headers {u        key: cti new_streesfer-headers {@      headers {u   new_stream {asfer-e           key: ctioew: \\\"Tnrtasfer-e     key: ction: cti new_stream {reew_stream {asfer-e           key: c{u   amTkey: ctioew:  new_stream {asfer-e           key: ctioew: r-e     key: ctionsest_headers new_stream {asfer-e           key: ctioew: r-ede headers {u        key: cti new_streesfer-headers {@      headers {u   new_stream {asfer-e           key: ctioew: \\\"Tnrtasfer-e     key: ction: cti new_stream {\\n  new_streame: s {\\n  new_streamTnrtasfe   \"\n      }\n      headers {\n        key: \":path\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5728207897624576",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \" \"         value: \" \"       }       headers {         value: \"�\"       }       headers {         key: \":method\"         value: \"GET\"       }     }   } } actions {   mutate {     buffer: 2     offset: 2     value: 2   } } actions {   quiesce_drain {   } } \n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5731902089592832",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":method\"         value: \"�\"       }       headers {         key: \":path\"         value: \"�\"       }       headers {         key: \":scheme\"         value: \"T\"       }       headers {         key: \":authority\"         value: \"T\"       }     }   } } actions {   client_drain {   } } actions {   stream_action {     request {       read_disable: true     }   } } actions {   stream_action {     response {       read_disable: false     }   } }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5748356020699136",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":method\"       }       headers {       }       headers {         key: \":path\"       }     }   } }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-6299606751641600",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \"transfer-encodinG\"       }       headers {         key: \":path\"       }       headers {         key: \":method\"       }     }   } }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/connect",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"CONNECT\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com:80\"\n      }\n    }\n    end_stream: false\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/empty",
    "content": ""
  },
  {
    "path": "test/common/http/codec_impl_corpus/example",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"foo\"\n        value: \"bar\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"foo\"\n        value: \"bar\"\n      }\n    }\n    end_stream: true\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 128000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 3000000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 54\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      metadata {\n        metadata {\n\t  key: \"a\"\n\t  value: \"a\"\n\t}\n      }\t       \n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      metadata {\n        metadata {\n\t  key: \"a\"\n\t  value: \"a\"\n\t}\n      }\t       \n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      data: 5\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      data: 2\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n        headers {\n          key: \"cookie\"\n          value: \"foo2=bar2\"\n        }\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 3\n    request {\n      reset_stream: 0\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 4\n    response {\n      reset_stream: 0\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/goaway",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions {\n  mutate {\n    buffer: 2\n    offset: 2\n    value: 8\n  }\n}\nactions {\n  mutate {\n    buffer: 2\n    offset: 3\n    value: 7\n  }\n}\nactions {\n  mutate {\n    buffer: 2\n    offset: 8\n    value: 0\n  }\n}\nactions { quiesce_drain {} }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/h1_dispatch_after_reset",
    "content": "h1_settings {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":scheme\"\n        value: \"blah\"\n      }\n      headers {\n        key: \"content-length\"\n        value: \"55\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 73711616\n      end_stream: true\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/head",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"HEAD\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n      end_stream: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/http_10",
    "content": "h1_settings {\n  server {\n    accept_http_10: true\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  mutate {\n    offset: 13\n    value: 48\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 3000000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      data: 5\n      end_stream: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/metadata",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\n\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 128000\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      metadata {\n        metadata {\n          key: \"aaaaaaaaaaaaaaaaaaaaaaaa\"\n          value: \"bbbbbbbbbbbbbbbbbbbbbbb\"\n\t      }\n                metadata {\n          key: \"aaaaaaaaaaaaaaaaaaaaaaaa\"\n          value: \"bbbbbbbbbbbbbbbbbbbbbbb\"\n\t              }\n                        metadata {\n          key: \"aaaaaaaaaaaaaaaaaaaaaaaa\"\n          value: \"bbbbbbbbbbbbbbbbbbbbbbb\"\n\t      }\n      }\t       \n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 2\n    response {\n      metadata {\n        metadata {\n          key: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n          value: \"bbbbbbbbbbbbbbbbbbbbbbb\"\n\t      }\n      }\t       \n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/metadata_corrupt",
    "content": "actions {\n new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      metadata {\n        metadata {\n          key: \"header_key1\"\n          value: \"header_value1\"\n\t      }\n                metadata {\n          key: \"header_key2\"\n          value: \"header_value2\"\n\t              }\n                        metadata {\n          key: \"header_key3\"\n          value: \"header_value3\"\n\t      }\n      }\t       \n    }\n  }\n}\nactions {\n\tmutate {\n\tbuffer: 0\n\toffset: 8\n\tvalue: 0\n\t}\n}\nactions { quiesce_drain {} }"
  },
  {
    "path": "test/common/http/codec_impl_corpus/metadata_dispatch",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 128000\n    }\n    dispatching_action {\n       metadata {\n        metadata {\n          key: \"aaaaaaaaaaaaaaaaaaaaaaaa\"\n          value: \"bbbbbbbbbbbbbbbbbbbbbbb\"\n\t      }\n                metadata {\n          key: \"aaaaaaaaaaaaaaaaaaaaaaaa\"\n          value: \"bbbbbbbbbbbbbbbbbbbbbbb\"\n\t              }\n                        metadata {\n          key: \"aaaaaaaaaaaaaaaaaaaaaaaa\"\n          value: \"bbbbbbbbbbbbbbbbbbbbbbb\"\n\t     \t  }\n\t}\n      }\n    \n  }\n}\nactions { quiesce_drain {} }"
  },
  {
    "path": "test/common/http/codec_impl_corpus/method_connect",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"CONNECT\"\n      }\n    }\n  }\n}"
  },
  {
    "path": "test/common/http/codec_impl_corpus/multi_stream",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 3000000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      data: 5\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 3000000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 54\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      data: 5\n      end_stream: true\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/protocol_exception",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n    }\n  }\n}\nactions {\n  mutate {\n    buffer: 0\n    offset: 2\n    value: 123\n  }\n}\nactions { quiesce_drain {} }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/read_disable",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      read_disable: true\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/reset_stream",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      reset_stream: 1\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 3000000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 54\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    response {\n      data: 5\n      end_stream: true\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/response_204_A",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n    actions {\n}\nactions {\n  mutate {\n    offset: 255\n    value: 255\n  }\n}\nactions {\n}\nactions {\n}  headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"204\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/response_204_B",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n    }\n  }\n}\nactions {\n  client_drain {\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"204\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      data: 64\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/simple_stream",
    "content": "actions {\n  new_stream {\n      metadata {\n        metadata {\n\t\t key: \"\"\n\t\t value: \"\"\n\t}\n\tmetadata {\n\t \t key: \"new_key\"\n\t\t value: \"new_value\"\n\t}\n      }\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 3000000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      data: 5\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/swap_buffer",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 123\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1\n    request {\n      data: 1234\n    }\n  }\n}\nactions {\n  swap_buffer {\n    buffer: 1\n  }\n}\nactions { quiesce_drain {} }\n"
  },
  {
    "path": "test/common/http/codec_impl_corpus/upgrade",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"connection\"\n        value: \"upgrade\"\n      }\n      headers {\n        key: \"upgrade\"\n        value: \"WebSocket\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 3000000\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions { quiesce_drain {} }\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data: 54\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \"connection\"\n          value: \"upgrade\"\n        }\n        headers {\n          key: \"upgrade\"\n          value: \"WebSocket\"\n        }\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"content-length\"\n          value: \"5\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      data: 5\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: true\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      read_disable: false\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.http;\n\nimport \"google/protobuf/empty.proto\";\n\nimport \"validate/validate.proto\";\nimport \"test/fuzz/common.proto\";\n\n// Structured input for H2 codec_impl_fuzz_test.\n\nmessage NewStream {\n  // Optional metadata  before request headers.\n  // Metadata sent after request headers can be send via a directional action.\n  test.fuzz.Metadata metadata = 3;\n  test.fuzz.Headers request_headers = 1 [(validate.rules).message.required = true];\n  bool end_stream = 2;\n}\n\nmessage DirectionalAction {\n  oneof directional_action_selector {\n    option (validate.required) = true;\n    test.fuzz.Headers continue_headers = 1;\n    test.fuzz.Headers headers = 2;\n    uint32 data = 3;\n    string data_value = 8;\n    test.fuzz.Headers trailers = 4;\n    test.fuzz.Metadata metadata = 9;\n    uint32 reset_stream = 5;\n    bool read_disable = 6;\n  }\n  bool end_stream = 7;\n}\n\nmessage StreamAction {\n  // Index into list of created streams (not HTTP/2 level stream ID).\n  uint32 stream_id = 1;\n  oneof stream_action_selector {\n    option (validate.required) = true;\n    DirectionalAction request = 2;\n    DirectionalAction response = 3;\n  }\n  // Optionally set a dispatching action. This is a directional action that will\n  // be called while the stream action is sending headers, data, or trailers.\n  // This will only apply to request stream actions (so that the dispatching\n  // action occurs in the response direction). This may happen as a result of a\n  // filter sending a direct response.\n  DirectionalAction dispatching_action = 4;\n}\n\nmessage MutateAction {\n  // Buffer index.\n  uint32 buffer = 1;\n  // Offset within buffer.\n  uint32 offset = 2;\n  // Value to set (only lower byte is significant).\n  uint32 value = 3;\n  // Server connection buffer? Otherwise client.\n  bool server = 4;\n}\n\nmessage SwapBufferAction {\n  // Target buffer index to swap with. The buffer at index 0 is swapped with the\n  // target buffer.\n  uint32 buffer = 1;\n  // Server connection buffer? Otherwise client.\n  bool server = 2;\n}\n\nmessage Action {\n  oneof action_selector {\n    option (validate.required) = true;\n    // Create new stream.\n    NewStream new_stream = 1;\n    // Perform an action on an existing stream.\n    StreamAction stream_action = 2;\n    // Mutate a connection buffer.\n    MutateAction mutate = 3;\n    // Swap two fragments in a connection buffer.\n    SwapBufferAction swap_buffer = 4;\n    // Drain client connection buffer.\n    google.protobuf.Empty client_drain = 5;\n    // Drain server connection buffer.\n    google.protobuf.Empty server_drain = 6;\n    // Drain client/server buffers alternatively until both are empty.\n    google.protobuf.Empty quiesce_drain = 7;\n  }\n}\n\nmessage Http1ServerSettings {\n  bool allow_absolute_url = 1;\n  bool accept_http_10 = 2;\n  string default_host_for_http_10 = 3;\n}\n\nmessage Http1ClientServerSettings {\n  Http1ServerSettings server = 2;\n}\n\n// Setting X below is interpreted as min_valid_setting + X % (1 +\n// max_valid_setting - min_valid_setting).\nmessage Http2Settings {\n  uint32 hpack_table_size = 1;\n  uint32 max_concurrent_streams = 2;\n  uint32 initial_stream_window_size = 3;\n  uint32 initial_connection_window_size = 4;\n}\n\nmessage Http2ClientServerSettings {\n  Http2Settings client = 1;\n  Http2Settings server = 2;\n}\n\nmessage CodecImplFuzzTestCase {\n  // The fuzzer will run actions on both H1 and H2 codecs. The settings below\n  // provide codec-specific parameters.\n  Http1ClientServerSettings h1_settings = 1;\n  Http2ClientServerSettings h2_settings = 2;\n  repeated Action actions = 3;\n}\n"
  },
  {
    "path": "test/common/http/codec_impl_fuzz_test.cc",
    "content": "#include \"envoy/stats/scope.h\"\n\n// Fuzzer for the H1/H2 codecs. This is similar in structure to\n// //test/common/http/http2:codec_impl_test, where a client H2 codec is wired\n// via shared memory to a server H2 codec and stream actions are applied. We\n// fuzz the various client/server H1/H2 codec API operations and in addition\n// apply fuzzing at the wire level by modeling explicit mutation, reordering and\n// drain operations on the connection buffers between client and server.\n\n#include <functional>\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http2/codec_impl.h\"\n\n#include \"test/common/http/codec_impl_fuzz.pb.validate.h\"\n#include \"test/common/http/http2/codec_impl_test_util.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\n\nnamespace Envoy {\nnamespace Http {\n\nnamespace Http2Utility = ::Envoy::Http2::Utility;\n\n// Force drain on each action, useful for figuring out what is going on when\n// debugging.\nconstexpr bool DebugMode = false;\n\ntemplate <class T> T fromSanitizedHeaders(const test::fuzz::Headers& headers) {\n  return Fuzz::fromHeaders<T>(headers, {\"transfer-encoding\"});\n}\n\n// Template specialization for TestRequestHeaderMapImpl to include a Host header. This guards\n// against missing host headers in CONNECT requests that would have failed parsing on ingress.\n// TODO(#10878): When proper error handling is introduced for non-dispatching codec calls, remove\n// this and fail gracefully.\ntemplate <>\nTestRequestHeaderMapImpl\nfromSanitizedHeaders<TestRequestHeaderMapImpl>(const test::fuzz::Headers& headers) {\n  return Fuzz::fromHeaders<TestRequestHeaderMapImpl>(headers, {\"transfer-encoding\"},\n                                                     {\":authority\", \":method\", \":path\"});\n}\n\n// Convert from test proto Http1ServerSettings to Http1Settings.\nHttp1Settings fromHttp1Settings(const test::common::http::Http1ServerSettings& settings) {\n  Http1Settings h1_settings;\n\n  h1_settings.allow_absolute_url_ = settings.allow_absolute_url();\n  h1_settings.accept_http_10_ = settings.accept_http_10();\n  h1_settings.default_host_for_http_10_ = settings.default_host_for_http_10();\n\n  return h1_settings;\n}\n\nenvoy::config::core::v3::Http2ProtocolOptions\nfromHttp2Settings(const test::common::http::Http2Settings& settings) {\n  envoy::config::core::v3::Http2ProtocolOptions options(\n      ::Envoy::Http2::Utility::initializeAndValidateOptions(\n          envoy::config::core::v3::Http2ProtocolOptions()));\n  // We apply an offset and modulo interpretation to settings to ensure that\n  // they are valid. Rejecting invalid settings is orthogonal to the fuzzed\n  // code.\n  options.mutable_hpack_table_size()->set_value(settings.hpack_table_size());\n  options.mutable_max_concurrent_streams()->set_value(\n      Http2Utility::OptionsLimits::MIN_MAX_CONCURRENT_STREAMS +\n      settings.max_concurrent_streams() %\n          (1 + Http2Utility::OptionsLimits::MAX_MAX_CONCURRENT_STREAMS -\n           Http2Utility::OptionsLimits::MIN_MAX_CONCURRENT_STREAMS));\n  options.mutable_initial_stream_window_size()->set_value(\n      Http2Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE +\n      settings.initial_stream_window_size() %\n          (1 + Http2Utility::OptionsLimits::MAX_INITIAL_STREAM_WINDOW_SIZE -\n           Http2Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE));\n  options.mutable_initial_connection_window_size()->set_value(\n      Http2Utility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE +\n      settings.initial_connection_window_size() %\n          (1 + Http2Utility::OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE -\n           Http2Utility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE));\n  options.set_allow_metadata(true);\n  return options;\n}\n\nusing StreamResetCallbackFn = std::function<void()>;\n\n// Internal representation of stream state. Encapsulates the stream state, mocks\n// and encoders for both the request/response.\nclass HttpStream : public LinkedObject<HttpStream> {\npublic:\n  // We track stream state here to prevent illegal operations, e.g. applying an\n  // encodeData() to the codec after encodeTrailers(). This is necessary to\n  // maintain the preconditions for operations on the codec at the API level. Of\n  // course, it's the codecs must be robust to wire-level violations. We\n  // explore these violations via MutateAction and SwapAction at the connection\n  // buffer level.\n  enum class StreamState : int { PendingHeaders, PendingDataOrTrailers, Closed };\n\n  struct DirectionalState {\n    // TODO(mattklein123): Split this more clearly into request and response directional state.\n    RequestEncoder* request_encoder_;\n    ResponseEncoder* response_encoder_;\n    NiceMock<MockResponseDecoder> response_decoder_;\n    NiceMock<MockRequestDecoder> request_decoder_;\n    NiceMock<MockStreamCallbacks> stream_callbacks_;\n    StreamState stream_state_;\n    bool local_closed_{false};\n    bool remote_closed_{false};\n    uint32_t read_disable_count_{};\n\n    bool isLocalOpen() const { return !local_closed_; }\n\n    void closeLocal() {\n      local_closed_ = true;\n      if (local_closed_ && remote_closed_) {\n        stream_state_ = StreamState::Closed;\n      }\n    }\n\n    void closeRemote() {\n      remote_closed_ = true;\n      if (local_closed_ && remote_closed_) {\n        stream_state_ = StreamState::Closed;\n      }\n    }\n  } request_, response_;\n\n  HttpStream(ClientConnection& client, const TestRequestHeaderMapImpl& request_headers,\n             bool end_stream, StreamResetCallbackFn stream_reset_callback)\n      : stream_reset_callback_(stream_reset_callback) {\n    request_.request_encoder_ = &client.newStream(response_.response_decoder_);\n    ON_CALL(request_.stream_callbacks_, onResetStream(_, _))\n        .WillByDefault(InvokeWithoutArgs([this] {\n          ENVOY_LOG_MISC(trace, \"reset request for stream index {}\", stream_index_);\n          resetStream();\n          stream_reset_callback_();\n        }));\n    ON_CALL(response_.stream_callbacks_, onResetStream(_, _))\n        .WillByDefault(InvokeWithoutArgs([this] {\n          ENVOY_LOG_MISC(trace, \"reset response for stream index {}\", stream_index_);\n          // Reset the client stream when we know the server stream has been reset. This ensures\n          // that the internal book keeping resetStream() below is consistent with the state of the\n          // client codec state, which is necessary to prevent multiple simultaneous streams for the\n          // HTTP/1 codec.\n          request_.request_encoder_->getStream().resetStream(StreamResetReason::LocalReset);\n          resetStream();\n          stream_reset_callback_();\n        }));\n    ON_CALL(request_.request_decoder_, decodeHeaders_(_, true))\n        .WillByDefault(InvokeWithoutArgs([this] {\n          // The HTTP/1 codec needs this to cleanup any latent stream resources.\n          response_.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset);\n          request_.closeRemote();\n        }));\n    ON_CALL(request_.request_decoder_, decodeData(_, true)).WillByDefault(InvokeWithoutArgs([this] {\n      // The HTTP/1 codec needs this to cleanup any latent stream resources.\n      response_.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset);\n      request_.closeRemote();\n    }));\n    ON_CALL(request_.request_decoder_, decodeTrailers_(_)).WillByDefault(InvokeWithoutArgs([this] {\n      // The HTTP/1 codec needs this to cleanup any latent stream resources.\n      response_.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset);\n      request_.closeRemote();\n    }));\n    ON_CALL(response_.response_decoder_, decodeHeaders_(_, true))\n        .WillByDefault(InvokeWithoutArgs([this] { response_.closeRemote(); }));\n    ON_CALL(response_.response_decoder_, decodeData(_, true))\n        .WillByDefault(InvokeWithoutArgs([this] { response_.closeRemote(); }));\n    ON_CALL(response_.response_decoder_, decodeTrailers_(_))\n        .WillByDefault(InvokeWithoutArgs([this] { response_.closeRemote(); }));\n    if (!end_stream) {\n      request_.request_encoder_->getStream().addCallbacks(request_.stream_callbacks_);\n    }\n\n    request_.request_encoder_->encodeHeaders(request_headers, end_stream);\n    request_.stream_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers;\n    response_.stream_state_ = StreamState::PendingHeaders;\n  }\n\n  void resetStream() {\n    request_.closeLocal();\n    request_.closeRemote();\n    response_.closeLocal();\n    response_.closeRemote();\n  }\n\n  // Some stream action applied in either the request or response direction.\n  void directionalAction(DirectionalState& state,\n                         const test::common::http::DirectionalAction& directional_action) {\n    const bool end_stream = directional_action.end_stream();\n    const bool response = &state == &response_;\n    switch (directional_action.directional_action_selector_case()) {\n    case test::common::http::DirectionalAction::kContinueHeaders: {\n      if (state.isLocalOpen() && state.stream_state_ == StreamState::PendingHeaders) {\n        auto headers =\n            fromSanitizedHeaders<TestResponseHeaderMapImpl>(directional_action.continue_headers());\n        headers.setReferenceKey(Headers::get().Status, \"100\");\n        state.response_encoder_->encode100ContinueHeaders(headers);\n      }\n      break;\n    }\n    case test::common::http::DirectionalAction::kHeaders: {\n      if (state.isLocalOpen() && state.stream_state_ == StreamState::PendingHeaders) {\n        if (response) {\n          auto headers =\n              fromSanitizedHeaders<TestResponseHeaderMapImpl>(directional_action.headers());\n          if (headers.Status() == nullptr) {\n            headers.setReferenceKey(Headers::get().Status, \"200\");\n          }\n          state.response_encoder_->encodeHeaders(headers, end_stream);\n        } else {\n          state.request_encoder_->encodeHeaders(\n              fromSanitizedHeaders<TestRequestHeaderMapImpl>(directional_action.headers()),\n              end_stream);\n        }\n        if (end_stream) {\n          state.closeLocal();\n        } else {\n          state.stream_state_ = StreamState::PendingDataOrTrailers;\n        }\n      }\n      break;\n    }\n    case test::common::http::DirectionalAction::kData: {\n      if (state.isLocalOpen() && state.stream_state_ == StreamState::PendingDataOrTrailers) {\n        Buffer::OwnedImpl buf(std::string(directional_action.data() % (1024 * 1024), 'a'));\n        if (response) {\n          state.response_encoder_->encodeData(buf, end_stream);\n        } else {\n          state.request_encoder_->encodeData(buf, end_stream);\n        }\n        if (end_stream) {\n          state.closeLocal();\n        }\n      }\n      break;\n    }\n    case test::common::http::DirectionalAction::kDataValue: {\n      if (state.isLocalOpen() && state.stream_state_ == StreamState::PendingDataOrTrailers) {\n        Buffer::OwnedImpl buf(directional_action.data_value());\n        if (response) {\n          state.response_encoder_->encodeData(buf, end_stream);\n        } else {\n          state.request_encoder_->encodeData(buf, end_stream);\n        }\n        if (end_stream) {\n          state.closeLocal();\n        }\n      }\n      break;\n    }\n    case test::common::http::DirectionalAction::kTrailers: {\n      if (state.isLocalOpen() && state.stream_state_ == StreamState::PendingDataOrTrailers) {\n        if (response) {\n          state.response_encoder_->encodeTrailers(\n              fromSanitizedHeaders<TestResponseTrailerMapImpl>(directional_action.trailers()));\n        } else {\n          state.request_encoder_->encodeTrailers(\n              fromSanitizedHeaders<TestRequestTrailerMapImpl>(directional_action.trailers()));\n        }\n        state.stream_state_ = StreamState::Closed;\n        state.closeLocal();\n      }\n      break;\n    }\n    case test::common::http::DirectionalAction::kMetadata: {\n      if (state.isLocalOpen() && state.stream_state_ != StreamState::Closed) {\n        if (response) {\n          state.response_encoder_->encodeMetadata(\n              Fuzz::fromMetadata(directional_action.metadata()));\n        } else {\n          state.request_encoder_->encodeMetadata(Fuzz::fromMetadata(directional_action.metadata()));\n        }\n      }\n      break;\n    }\n    case test::common::http::DirectionalAction::kResetStream: {\n      if (state.stream_state_ != StreamState::Closed) {\n        StreamEncoder* encoder;\n        if (response) {\n          encoder = state.response_encoder_;\n        } else {\n          encoder = state.request_encoder_;\n        }\n        encoder->getStream().resetStream(\n            static_cast<Http::StreamResetReason>(directional_action.reset_stream()));\n        request_.stream_state_ = response_.stream_state_ = StreamState::Closed;\n      }\n      break;\n    }\n    case test::common::http::DirectionalAction::kReadDisable: {\n      if (state.stream_state_ != StreamState::Closed) {\n        const bool disable = directional_action.read_disable();\n        if (state.read_disable_count_ == 0 && !disable) {\n          return;\n        }\n        if (disable) {\n          ++state.read_disable_count_;\n        } else {\n          --state.read_disable_count_;\n        }\n        StreamEncoder* encoder;\n        if (response) {\n          encoder = state.response_encoder_;\n        } else {\n          encoder = state.request_encoder_;\n        }\n        encoder->getStream().readDisable(disable);\n      }\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n  }\n\n  void streamAction(const test::common::http::StreamAction& stream_action) {\n    switch (stream_action.stream_action_selector_case()) {\n    case test::common::http::StreamAction::kRequest: {\n      ENVOY_LOG_MISC(debug, \"Request stream action on {} in state {} {}\", stream_index_,\n                     static_cast<int>(request_.stream_state_),\n                     static_cast<int>(response_.stream_state_));\n      if (stream_action.has_dispatching_action()) {\n        // Simulate some response action while dispatching request headers, data, or trailers. This\n        // may happen as a result of a filter sending a direct response.\n        ENVOY_LOG_MISC(debug, \"Setting dispatching action  on {} in state {} {}\", stream_index_,\n                       static_cast<int>(request_.stream_state_),\n                       static_cast<int>(response_.stream_state_));\n        auto request_action = stream_action.dispatching_action().directional_action_selector_case();\n        if (request_action == test::common::http::DirectionalAction::kHeaders) {\n          EXPECT_CALL(request_.request_decoder_, decodeHeaders_(_, _))\n              .WillOnce(InvokeWithoutArgs(\n                  [&] { directionalAction(response_, stream_action.dispatching_action()); }));\n        } else if (request_action == test::common::http::DirectionalAction::kData) {\n          EXPECT_CALL(request_.request_decoder_, decodeData(_, _))\n              .Times(testing::AtLeast(1))\n              .WillRepeatedly(InvokeWithoutArgs(\n                  [&] { directionalAction(response_, stream_action.dispatching_action()); }));\n        } else if (request_action == test::common::http::DirectionalAction::kTrailers) {\n          EXPECT_CALL(request_.request_decoder_, decodeTrailers_(_))\n              .WillOnce(InvokeWithoutArgs(\n                  [&] { directionalAction(response_, stream_action.dispatching_action()); }));\n        }\n      }\n      // Perform the stream action.\n      directionalAction(request_, stream_action.request());\n      break;\n    }\n    case test::common::http::StreamAction::kResponse: {\n      ENVOY_LOG_MISC(debug, \"Response stream action on {} in state {} {}\", stream_index_,\n                     static_cast<int>(request_.stream_state_),\n                     static_cast<int>(response_.stream_state_));\n      directionalAction(response_, stream_action.response());\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n    ENVOY_LOG_MISC(debug, \"Stream action complete\");\n  }\n\n  bool active() const {\n    return request_.stream_state_ != StreamState::Closed ||\n           response_.stream_state_ != StreamState::Closed;\n  }\n\n  int32_t stream_index_{-1};\n  StreamResetCallbackFn stream_reset_callback_;\n};\n\n// Buffer between client and server H1/H2 codecs. This models each write operation\n// as adding a distinct fragment that might be reordered with other fragments in\n// the buffer via swap() or modified with mutate().\nclass ReorderBuffer {\npublic:\n  ReorderBuffer(Connection& connection, const bool& should_close_connection)\n      : connection_(connection), should_close_connection_(should_close_connection) {}\n\n  void add(Buffer::Instance& data) {\n    bufs_.emplace_back();\n    bufs_.back().move(data);\n  }\n\n  Http::Status drain() {\n    Status status = Http::okStatus();\n    while (!bufs_.empty()) {\n      Buffer::OwnedImpl& buf = bufs_.front();\n      while (buf.length() > 0) {\n        if (should_close_connection_) {\n          ENVOY_LOG_MISC(trace, \"Buffer dispatch disabled, stopping drain\");\n          return codecClientError(\"preventing buffer drain due to connection closure\");\n        }\n        status = connection_.dispatch(buf);\n        if (!status.ok()) {\n          ENVOY_LOG_MISC(trace, \"Error status: {}\", status.message());\n          return status;\n        }\n      }\n      bufs_.pop_front();\n    }\n    return status;\n  }\n\n  void mutate(uint32_t buffer, uint32_t offset, uint8_t value) {\n    if (bufs_.empty()) {\n      return;\n    }\n    Buffer::OwnedImpl& buf = bufs_[buffer % bufs_.size()];\n    if (buf.length() == 0) {\n      return;\n    }\n    uint8_t* p = reinterpret_cast<uint8_t*>(buf.linearize(buf.length())) + offset % buf.length();\n    ENVOY_LOG_MISC(trace, \"Mutating {} to {}\", *p, value);\n    *p = value;\n  }\n\n  void swap(uint32_t buffer) {\n    if (bufs_.empty()) {\n      return;\n    }\n    const uint32_t effective_index = buffer % bufs_.size();\n    if (effective_index == 0) {\n      return;\n    }\n    Buffer::OwnedImpl tmp;\n    tmp.move(bufs_[0]);\n    bufs_[0].move(bufs_[effective_index]);\n    bufs_[effective_index].move(tmp);\n  }\n\n  bool empty() const { return bufs_.empty(); }\n\n  Connection& connection_;\n  std::deque<Buffer::OwnedImpl> bufs_;\n  // A reference to a flag indicating whether the reorder buffer is allowed to dispatch data to\n  // the connection (reference to should_close_connection).\n  const bool& should_close_connection_;\n};\n\nusing HttpStreamPtr = std::unique_ptr<HttpStream>;\n\nnamespace {\n\nenum class HttpVersion { Http1, Http2 };\n\nvoid codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersion http_version) {\n  Stats::IsolatedStoreImpl stats_store;\n  NiceMock<Network::MockConnection> client_connection;\n  const envoy::config::core::v3::Http2ProtocolOptions client_http2_options{\n      fromHttp2Settings(input.h2_settings().client())};\n  const Http1Settings client_http1settings;\n  NiceMock<MockConnectionCallbacks> client_callbacks;\n  NiceMock<Network::MockConnection> server_connection;\n  NiceMock<MockServerConnectionCallbacks> server_callbacks;\n  NiceMock<Random::MockRandomGenerator> random;\n  uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB;\n  uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT;\n  uint32_t max_response_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT;\n  const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW;\n\n  Http1::CodecStats::AtomicPtr http1_stats;\n  Http2::CodecStats::AtomicPtr http2_stats;\n  ClientConnectionPtr client;\n  ServerConnectionPtr server;\n  const bool http2 = http_version == HttpVersion::Http2;\n\n  if (http2) {\n    client = std::make_unique<Http2::ClientConnectionImpl>(\n        client_connection, client_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store),\n        random, client_http2_options, max_request_headers_kb, max_response_headers_count,\n        Http2::ProdNghttp2SessionFactory::get());\n  } else {\n    client = std::make_unique<Http1::ClientConnectionImpl>(\n        client_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), client_callbacks,\n        client_http1settings, max_response_headers_count);\n  }\n\n  if (http2) {\n    const envoy::config::core::v3::Http2ProtocolOptions server_http2_options{\n        fromHttp2Settings(input.h2_settings().server())};\n    server = std::make_unique<Http2::ServerConnectionImpl>(\n        server_connection, server_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store),\n        random, server_http2_options, max_request_headers_kb, max_request_headers_count,\n        headers_with_underscores_action);\n  } else {\n    const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())};\n    server = std::make_unique<Http1::ServerConnectionImpl>(\n        server_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), server_callbacks,\n        server_http1settings, max_request_headers_kb, max_request_headers_count,\n        headers_with_underscores_action);\n  }\n\n  // We track whether the connection should be closed for HTTP/1, since stream resets imply\n  // connection closes.\n  bool should_close_connection = false;\n\n  // The buffers will be blocked from dispatching data if should_close_connection is set to true.\n  // This prevents sending data if a stream reset occurs during the test cleanup when using HTTP/1.\n  ReorderBuffer client_write_buf{*server, should_close_connection};\n  ReorderBuffer server_write_buf{*client, should_close_connection};\n\n  ON_CALL(client_connection, write(_, _))\n      .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n        ENVOY_LOG_MISC(trace, \"client -> server {} bytes\", data.length());\n        client_write_buf.add(data);\n      }));\n  ON_CALL(server_connection, write(_, _))\n      .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n        ENVOY_LOG_MISC(trace, \"server -> client {} bytes: {}\", data.length(), data.toString());\n        server_write_buf.add(data);\n      }));\n\n  // We hold Streams in pending_streams between the request encodeHeaders in the\n  // Stream constructor and server newStream() callback, where we learn about\n  // the response encoder and can complete Stream initialization.\n  std::list<HttpStreamPtr> pending_streams;\n  std::list<HttpStreamPtr> streams;\n  // For new streams when we aren't expecting one (e.g. as a result of a mutation).\n  NiceMock<MockRequestDecoder> orphan_request_decoder;\n\n  ON_CALL(server_callbacks, newStream(_, _))\n      .WillByDefault(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        if (pending_streams.empty()) {\n          return orphan_request_decoder;\n        }\n        auto stream_ptr = pending_streams.front()->removeFromList(pending_streams);\n        HttpStream* const stream = stream_ptr.get();\n        LinkedList::moveIntoListBack(std::move(stream_ptr), streams);\n        stream->response_.response_encoder_ = &encoder;\n        encoder.getStream().addCallbacks(stream->response_.stream_callbacks_);\n        stream->stream_index_ = streams.size() - 1;\n        return stream->request_.request_decoder_;\n      }));\n\n  auto client_server_buf_drain = [&client_write_buf, &server_write_buf] {\n    Http::Status status = Http::okStatus();\n    while (!client_write_buf.empty() || !server_write_buf.empty()) {\n      status = client_write_buf.drain();\n      if (!status.ok()) {\n        return status;\n      }\n      status = server_write_buf.drain();\n      if (!status.ok()) {\n        return status;\n      }\n    }\n    return status;\n  };\n\n  constexpr auto max_actions = 1024;\n  bool codec_error = false;\n  for (int i = 0; i < std::min(max_actions, input.actions().size()) && !should_close_connection &&\n                  !codec_error;\n       ++i) {\n    const auto& action = input.actions(i);\n    ENVOY_LOG_MISC(trace, \"action {} with {} streams\", action.DebugString(), streams.size());\n    switch (action.action_selector_case()) {\n    case test::common::http::Action::kNewStream: {\n      if (!http2) {\n        // HTTP/1 codec needs to have existing streams complete, so make it\n        // easier to achieve a successful multi-stream example by flushing.\n        if (!client_server_buf_drain().ok()) {\n          codec_error = true;\n          break;\n        }\n        // HTTP/1 client codec can only have a single active stream.\n        if (!pending_streams.empty() || (!streams.empty() && streams.back()->active())) {\n          ENVOY_LOG_MISC(trace, \"Skipping new stream as HTTP/1 and already have existing stream\");\n          continue;\n        }\n      }\n      HttpStreamPtr stream = std::make_unique<HttpStream>(\n          *client,\n          fromSanitizedHeaders<TestRequestHeaderMapImpl>(action.new_stream().request_headers()),\n          action.new_stream().end_stream(), [&should_close_connection, http2]() {\n            // HTTP/1 codec has stream reset implying connection close.\n            if (!http2) {\n              should_close_connection = true;\n            }\n          });\n      LinkedList::moveIntoListBack(std::move(stream), pending_streams);\n      break;\n    }\n    case test::common::http::Action::kStreamAction: {\n      const auto& stream_action = action.stream_action();\n      if (streams.empty()) {\n        break;\n      }\n      // Index into list of created streams (not HTTP/2 level stream ID).\n      const uint32_t stream_id = stream_action.stream_id() % streams.size();\n      ENVOY_LOG_MISC(trace, \"action for stream index {}\", stream_id);\n      (*std::next(streams.begin(), stream_id))->streamAction(stream_action);\n      break;\n    }\n    case test::common::http::Action::kMutate: {\n      const auto& mutate = action.mutate();\n      ReorderBuffer& write_buf = mutate.server() ? server_write_buf : client_write_buf;\n      write_buf.mutate(mutate.buffer(), mutate.offset(), mutate.value());\n      break;\n    }\n    case test::common::http::Action::kSwapBuffer: {\n      const auto& swap_buffer = action.swap_buffer();\n      ReorderBuffer& write_buf = swap_buffer.server() ? server_write_buf : client_write_buf;\n      write_buf.swap(swap_buffer.buffer());\n      break;\n    }\n    case test::common::http::Action::kClientDrain: {\n      if (!client_write_buf.drain().ok()) {\n        codec_error = true;\n        break;\n      }\n      break;\n    }\n    case test::common::http::Action::kServerDrain: {\n      if (!server_write_buf.drain().ok()) {\n        codec_error = true;\n        break;\n      }\n      break;\n    }\n    case test::common::http::Action::kQuiesceDrain: {\n      if (!client_server_buf_drain().ok()) {\n        codec_error = true;\n        break;\n      }\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n    if (DebugMode && !should_close_connection && !codec_error) {\n      if (!client_server_buf_drain().ok()) {\n        codec_error = true;\n        break;\n      }\n    }\n  }\n  // Drain all remaining buffers, unless the connection is effectively closed.\n  if (!should_close_connection && !codec_error) {\n    if (!client_server_buf_drain().ok()) {\n      codec_error = true;\n    }\n  }\n  if (!codec_error && http2) {\n    dynamic_cast<Http2::ClientConnectionImpl&>(*client).goAway();\n    dynamic_cast<Http2::ServerConnectionImpl&>(*server).goAway();\n  }\n}\n\n} // namespace\n\n// Fuzz the H1/H2 codec implementations.\nDEFINE_PROTO_FUZZER(const test::common::http::CodecImplFuzzTestCase& input) {\n  try {\n    // Validate input early.\n    TestUtility::validate(input);\n    codecFuzz(input, HttpVersion::Http1);\n    codecFuzz(input, HttpVersion::Http2);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/codec_wrappers_test.cc",
    "content": "#include \"common/http/codec_wrappers.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Http {\n\nclass MockRequestEncoderWrapper : public RequestEncoderWrapper {\npublic:\n  MockRequestEncoderWrapper() : RequestEncoderWrapper(inner_encoder_) {}\n  void onEncodeComplete() override { encode_complete_ = true; }\n\n  MockRequestEncoder& innerEncoder() { return inner_encoder_; }\n  bool encodeComplete() const { return encode_complete_; }\n\nprivate:\n  MockRequestEncoder inner_encoder_;\n  bool encode_complete_{};\n};\n\nTEST(RequestEncoderWrapper, HeaderOnlyEncode) {\n  MockRequestEncoderWrapper wrapper;\n\n  EXPECT_CALL(wrapper.innerEncoder(), encodeHeaders(_, true));\n  wrapper.encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}, {\":authority\", \"foo\"}}, true);\n  EXPECT_TRUE(wrapper.encodeComplete());\n}\n\nTEST(RequestEncoderWrapper, HeaderAndBodyEncode) {\n  MockRequestEncoderWrapper wrapper;\n\n  EXPECT_CALL(wrapper.innerEncoder(), encodeHeaders(_, false));\n  wrapper.encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}, {\":authority\", \"foo\"}}, false);\n  EXPECT_FALSE(wrapper.encodeComplete());\n\n  Buffer::OwnedImpl data;\n  EXPECT_CALL(wrapper.innerEncoder(), encodeData(_, true));\n  wrapper.encodeData(data, true);\n  EXPECT_TRUE(wrapper.encodeComplete());\n}\n\nTEST(RequestEncoderWrapper, HeaderAndBodyAndTrailersEncode) {\n  MockRequestEncoderWrapper wrapper;\n\n  EXPECT_CALL(wrapper.innerEncoder(), encodeHeaders(_, false));\n  wrapper.encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}, {\":authority\", \"foo\"}}, false);\n  EXPECT_FALSE(wrapper.encodeComplete());\n\n  Buffer::OwnedImpl data;\n  EXPECT_CALL(wrapper.innerEncoder(), encodeData(_, false));\n  wrapper.encodeData(data, false);\n  EXPECT_FALSE(wrapper.encodeComplete());\n\n  EXPECT_CALL(wrapper.innerEncoder(), encodeTrailers(_));\n  wrapper.encodeTrailers(TestRequestTrailerMapImpl{{\"trailing\", \"header\"}});\n  EXPECT_TRUE(wrapper.encodeComplete());\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/codes_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n\n#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/stats/stats.h\"\n\n#include \"common/http/codes.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\nnamespace Http {\n\ntemplate <class SymbolTableClass> class CodeUtilitySpeedTest {\npublic:\n  CodeUtilitySpeedTest()\n      : global_store_(symbol_table_), cluster_scope_(symbol_table_), code_stats_(symbol_table_),\n        pool_(symbol_table_), from_az_(pool_.add(\"from_az\")), prefix_(pool_.add(\"prefix\")),\n        req_vcluster_name_(pool_.add(\"req_vcluster_name\")),\n        test_cluster_(pool_.add(\"test-cluster\")), test_vhost_(pool_.add(\"test-vhost\")),\n        to_az_(pool_.add(\"to_az\")), vhost_name_(pool_.add(\"vhost_name\")) {}\n\n  void addResponse(uint64_t code, bool canary, bool internal_request,\n                   Stats::StatName request_vhost_name = Stats::StatName(),\n                   Stats::StatName request_vcluster_name = Stats::StatName(),\n                   Stats::StatName from_az = Stats::StatName(),\n                   Stats::StatName to_az = Stats::StatName()) {\n    Http::CodeStats::ResponseStatInfo info{\n        global_store_,      cluster_scope_,        prefix_, code,  internal_request,\n        request_vhost_name, request_vcluster_name, from_az, to_az, canary};\n\n    code_stats_.chargeResponseStat(info);\n  }\n\n  void addResponses() {\n    addResponse(201, false, false);\n    addResponse(301, false, true);\n    addResponse(401, false, false);\n    addResponse(501, false, true);\n    addResponse(200, true, true);\n    addResponse(300, false, false);\n    Stats::StatName empty_stat_name;\n    addResponse(500, true, false);\n    addResponse(200, false, false, test_vhost_, test_cluster_);\n    addResponse(200, false, false, empty_stat_name, empty_stat_name, from_az_, to_az_);\n  }\n\n  void responseTiming() {\n    Http::CodeStats::ResponseTimingInfo info{\n        global_store_, cluster_scope_, prefix_,     std::chrono::milliseconds(5),\n        true,          true,           vhost_name_, req_vcluster_name_,\n        from_az_,      to_az_};\n    code_stats_.chargeResponseTiming(info);\n  }\n\n  SymbolTableClass symbol_table_;\n  Stats::IsolatedStoreImpl global_store_;\n  Stats::IsolatedStoreImpl cluster_scope_;\n  Http::CodeStatsImpl code_stats_;\n  Stats::StatNamePool pool_;\n  const Stats::StatName from_az_;\n  const Stats::StatName prefix_;\n  const Stats::StatName req_vcluster_name_;\n  const Stats::StatName test_cluster_;\n  const Stats::StatName test_vhost_;\n  const Stats::StatName to_az_;\n  const Stats::StatName vhost_name_;\n};\n\n} // namespace Http\n} // namespace Envoy\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_AddResponsesRealSymtab(benchmark::State& state) {\n  Envoy::Http::CodeUtilitySpeedTest<Envoy::Stats::SymbolTableImpl> context;\n\n  for (auto _ : state) {\n    context.addResponses();\n  }\n}\nBENCHMARK(BM_AddResponsesRealSymtab);\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_ResponseTimingRealSymtab(benchmark::State& state) {\n  Envoy::Http::CodeUtilitySpeedTest<Envoy::Stats::SymbolTableImpl> context;\n\n  for (auto _ : state) {\n    context.responseTiming();\n  }\n}\nBENCHMARK(BM_ResponseTimingRealSymtab);\n"
  },
  {
    "path": "test/common/http/codes_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/stats/stats.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Property;\n\nnamespace Envoy {\nnamespace Http {\n\nclass CodeUtilityTest : public testing::Test {\npublic:\n  CodeUtilityTest()\n      : global_store_(*symbol_table_), cluster_scope_(*symbol_table_), code_stats_(*symbol_table_),\n        pool_(*symbol_table_) {}\n\n  void addResponse(uint64_t code, bool canary, bool internal_request,\n                   const std::string& request_vhost_name = EMPTY_STRING,\n                   const std::string& request_vcluster_name = EMPTY_STRING,\n                   const std::string& from_az = EMPTY_STRING,\n                   const std::string& to_az = EMPTY_STRING) {\n    Stats::StatName prefix = pool_.add(\"prefix\");\n    Stats::StatName from_zone = pool_.add(from_az);\n    Stats::StatName to_zone = pool_.add(to_az);\n    Stats::StatName vhost_name = pool_.add(request_vhost_name);\n    Stats::StatName vcluster_name = pool_.add(request_vcluster_name);\n    Http::CodeStats::ResponseStatInfo info{\n        global_store_, cluster_scope_, prefix,    code,    internal_request,\n        vhost_name,    vcluster_name,  from_zone, to_zone, canary};\n\n    code_stats_.chargeResponseStat(info);\n  }\n\n  Stats::TestSymbolTable symbol_table_;\n  Stats::TestUtil::TestStore global_store_;\n  Stats::TestUtil::TestStore cluster_scope_;\n  Http::CodeStatsImpl code_stats_;\n  Stats::StatNamePool pool_;\n};\n\nTEST_F(CodeUtilityTest, GroupStrings) {\n  EXPECT_EQ(\"1xx\", CodeUtility::groupStringForResponseCode(Code::SwitchingProtocols));\n  EXPECT_EQ(\"2xx\", CodeUtility::groupStringForResponseCode(Code::OK));\n  EXPECT_EQ(\"3xx\", CodeUtility::groupStringForResponseCode(Code::Found));\n  EXPECT_EQ(\"4xx\", CodeUtility::groupStringForResponseCode(Code::NotFound));\n  EXPECT_EQ(\"5xx\", CodeUtility::groupStringForResponseCode(Code::NotImplemented));\n  EXPECT_EQ(\"\", CodeUtility::groupStringForResponseCode(static_cast<Code>(600)));\n}\n\nTEST_F(CodeUtilityTest, NoCanary) {\n  addResponse(201, false, false);\n  addResponse(301, false, true);\n  addResponse(401, false, false);\n  addResponse(501, false, true);\n\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_2xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_201\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_2xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_201\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_3xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_301\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_3xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_301\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_4xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_401\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_4xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_401\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_5xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_501\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_5xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_501\").value());\n\n  EXPECT_EQ(4U, cluster_scope_.counter(\"prefix.upstream_rq_completed\").value());\n  EXPECT_EQ(2U, cluster_scope_.counter(\"prefix.external.upstream_rq_completed\").value());\n  EXPECT_EQ(2U, cluster_scope_.counter(\"prefix.internal.upstream_rq_completed\").value());\n\n  EXPECT_EQ(19U, cluster_scope_.counters().size());\n}\n\nTEST_F(CodeUtilityTest, Canary) {\n  addResponse(100, true, true);\n  addResponse(200, true, true);\n  addResponse(300, false, false);\n  addResponse(500, true, false);\n\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_1xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_100\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_1xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_100\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.canary.upstream_rq_1xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.canary.upstream_rq_100\").value());\n\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_2xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_200\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_2xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.internal.upstream_rq_200\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.canary.upstream_rq_2xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.canary.upstream_rq_200\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_3xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_300\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_3xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_300\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_5xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.upstream_rq_500\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_5xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_500\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.canary.upstream_rq_5xx\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.canary.upstream_rq_500\").value());\n\n  EXPECT_EQ(4U, cluster_scope_.counter(\"prefix.upstream_rq_completed\").value());\n  EXPECT_EQ(2U, cluster_scope_.counter(\"prefix.external.upstream_rq_completed\").value());\n  EXPECT_EQ(2U, cluster_scope_.counter(\"prefix.internal.upstream_rq_completed\").value());\n  EXPECT_EQ(3U, cluster_scope_.counter(\"prefix.canary.upstream_rq_completed\").value());\n\n  EXPECT_EQ(26U, cluster_scope_.counters().size());\n}\n\nTEST_F(CodeUtilityTest, UnknownResponseCodes) {\n  addResponse(23, true, true);\n  addResponse(600, false, false);\n  addResponse(1000000, false, true);\n\n  EXPECT_EQ(3U, cluster_scope_.counter(\"prefix.upstream_rq_unknown\").value());\n  EXPECT_EQ(2U, cluster_scope_.counter(\"prefix.internal.upstream_rq_unknown\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.canary.upstream_rq_unknown\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.external.upstream_rq_unknown\").value());\n\n  EXPECT_EQ(8U, cluster_scope_.counters().size());\n}\n\nTEST_F(CodeUtilityTest, All) {\n  const std::vector<std::pair<Code, std::string>> test_set = {\n      std::make_pair(Code::Continue, \"Continue\"),\n      std::make_pair(Code::SwitchingProtocols, \"Switching Protocols\"),\n      std::make_pair(Code::OK, \"OK\"),\n      std::make_pair(Code::Created, \"Created\"),\n      std::make_pair(Code::Accepted, \"Accepted\"),\n      std::make_pair(Code::NonAuthoritativeInformation, \"Non-Authoritative Information\"),\n      std::make_pair(Code::NoContent, \"No Content\"),\n      std::make_pair(Code::ResetContent, \"Reset Content\"),\n      std::make_pair(Code::PartialContent, \"Partial Content\"),\n      std::make_pair(Code::MultiStatus, \"Multi-Status\"),\n      std::make_pair(Code::AlreadyReported, \"Already Reported\"),\n      std::make_pair(Code::IMUsed, \"IM Used\"),\n      std::make_pair(Code::MultipleChoices, \"Multiple Choices\"),\n      std::make_pair(Code::MovedPermanently, \"Moved Permanently\"),\n      std::make_pair(Code::Found, \"Found\"),\n      std::make_pair(Code::SeeOther, \"See Other\"),\n      std::make_pair(Code::NotModified, \"Not Modified\"),\n      std::make_pair(Code::UseProxy, \"Use Proxy\"),\n      std::make_pair(Code::TemporaryRedirect, \"Temporary Redirect\"),\n      std::make_pair(Code::PermanentRedirect, \"Permanent Redirect\"),\n      std::make_pair(Code::BadRequest, \"Bad Request\"),\n      std::make_pair(Code::Unauthorized, \"Unauthorized\"),\n      std::make_pair(Code::PaymentRequired, \"Payment Required\"),\n      std::make_pair(Code::Forbidden, \"Forbidden\"),\n      std::make_pair(Code::NotFound, \"Not Found\"),\n      std::make_pair(Code::MethodNotAllowed, \"Method Not Allowed\"),\n      std::make_pair(Code::NotAcceptable, \"Not Acceptable\"),\n      std::make_pair(Code::ProxyAuthenticationRequired, \"Proxy Authentication Required\"),\n      std::make_pair(Code::RequestTimeout, \"Request Timeout\"),\n      std::make_pair(Code::Conflict, \"Conflict\"),\n      std::make_pair(Code::Gone, \"Gone\"),\n      std::make_pair(Code::LengthRequired, \"Length Required\"),\n      std::make_pair(Code::PreconditionFailed, \"Precondition Failed\"),\n      std::make_pair(Code::PayloadTooLarge, \"Payload Too Large\"),\n      std::make_pair(Code::URITooLong, \"URI Too Long\"),\n      std::make_pair(Code::UnsupportedMediaType, \"Unsupported Media Type\"),\n      std::make_pair(Code::RangeNotSatisfiable, \"Range Not Satisfiable\"),\n      std::make_pair(Code::ExpectationFailed, \"Expectation Failed\"),\n      std::make_pair(Code::MisdirectedRequest, \"Misdirected Request\"),\n      std::make_pair(Code::UnprocessableEntity, \"Unprocessable Entity\"),\n      std::make_pair(Code::Locked, \"Locked\"),\n      std::make_pair(Code::FailedDependency, \"Failed Dependency\"),\n      std::make_pair(Code::UpgradeRequired, \"Upgrade Required\"),\n      std::make_pair(Code::PreconditionRequired, \"Precondition Required\"),\n      std::make_pair(Code::TooManyRequests, \"Too Many Requests\"),\n      std::make_pair(Code::RequestHeaderFieldsTooLarge, \"Request Header Fields Too Large\"),\n      std::make_pair(Code::InternalServerError, \"Internal Server Error\"),\n      std::make_pair(Code::NotImplemented, \"Not Implemented\"),\n      std::make_pair(Code::BadGateway, \"Bad Gateway\"),\n      std::make_pair(Code::ServiceUnavailable, \"Service Unavailable\"),\n      std::make_pair(Code::GatewayTimeout, \"Gateway Timeout\"),\n      std::make_pair(Code::HTTPVersionNotSupported, \"HTTP Version Not Supported\"),\n      std::make_pair(Code::VariantAlsoNegotiates, \"Variant Also Negotiates\"),\n      std::make_pair(Code::InsufficientStorage, \"Insufficient Storage\"),\n      std::make_pair(Code::LoopDetected, \"Loop Detected\"),\n      std::make_pair(Code::NotExtended, \"Not Extended\"),\n      std::make_pair(Code::NetworkAuthenticationRequired, \"Network Authentication Required\"),\n      std::make_pair(static_cast<Code>(600), \"Unknown\")};\n\n  for (const auto& test_case : test_set) {\n    EXPECT_EQ(test_case.second, CodeUtility::toString(test_case.first));\n  }\n\n  EXPECT_EQ(std::string(\"Unknown\"), CodeUtility::toString(static_cast<Code>(600)));\n}\n\nTEST_F(CodeUtilityTest, RequestVirtualCluster) {\n  addResponse(200, false, false, \"test-vhost\", \"test-cluster\");\n\n  EXPECT_EQ(1U,\n            global_store_.counter(\"vhost.test-vhost.vcluster.test-cluster.upstream_rq_completed\")\n                .value());\n  EXPECT_EQ(\n      1U, global_store_.counter(\"vhost.test-vhost.vcluster.test-cluster.upstream_rq_2xx\").value());\n  EXPECT_EQ(\n      1U, global_store_.counter(\"vhost.test-vhost.vcluster.test-cluster.upstream_rq_200\").value());\n}\n\nTEST_F(CodeUtilityTest, PerZoneStats) {\n  addResponse(200, false, false, \"\", \"\", \"from_az\", \"to_az\");\n\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.zone.from_az.to_az.upstream_rq_completed\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.zone.from_az.to_az.upstream_rq_200\").value());\n  EXPECT_EQ(1U, cluster_scope_.counter(\"prefix.zone.from_az.to_az.upstream_rq_2xx\").value());\n}\n\nTEST_F(CodeUtilityTest, ResponseTimingTest) {\n  Stats::MockStore global_store;\n  Stats::MockStore cluster_scope;\n\n  Stats::StatNameManagedStorage prefix(\"prefix\", *symbol_table_);\n  Http::CodeStats::ResponseTimingInfo info{global_store,\n                                           cluster_scope,\n                                           pool_.add(\"prefix\"),\n                                           std::chrono::milliseconds(5),\n                                           true,\n                                           true,\n                                           pool_.add(\"vhost_name\"),\n                                           pool_.add(\"req_vcluster_name\"),\n                                           pool_.add(\"from_az\"),\n                                           pool_.add(\"to_az\")};\n\n  EXPECT_CALL(cluster_scope,\n              histogram(\"prefix.upstream_rq_time\", Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(cluster_scope, deliverHistogramToSinks(\n                                 Property(&Stats::Metric::name, \"prefix.upstream_rq_time\"), 5));\n\n  EXPECT_CALL(cluster_scope,\n              histogram(\"prefix.canary.upstream_rq_time\", Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(\n      cluster_scope,\n      deliverHistogramToSinks(Property(&Stats::Metric::name, \"prefix.canary.upstream_rq_time\"), 5));\n\n  EXPECT_CALL(cluster_scope,\n              histogram(\"prefix.internal.upstream_rq_time\", Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(cluster_scope,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"prefix.internal.upstream_rq_time\"), 5));\n  EXPECT_CALL(global_store,\n              histogram(\"vhost.vhost_name.vcluster.req_vcluster_name.upstream_rq_time\",\n                        Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(global_store,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name,\n                           \"vhost.vhost_name.vcluster.req_vcluster_name.upstream_rq_time\"),\n                  5));\n\n  EXPECT_CALL(cluster_scope, histogram(\"prefix.zone.from_az.to_az.upstream_rq_time\",\n                                       Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(cluster_scope,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"prefix.zone.from_az.to_az.upstream_rq_time\"), 5));\n  Http::CodeStatsImpl code_stats(*symbol_table_);\n  code_stats.chargeResponseTiming(info);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/common.cc",
    "content": "#include \"common.h\"\n\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n\nnamespace Envoy {\nvoid HttpTestUtility::addDefaultHeaders(Http::RequestHeaderMap& headers,\n                                        const std::string default_method) {\n  headers.setScheme(\"http\");\n  headers.setMethod(default_method);\n  headers.setHost(\"host\");\n  headers.setPath(\"/\");\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/common.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/http/conn_pool.h\"\n\n#include \"common/http/codec_client.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n\nnamespace Envoy {\n/**\n * A fake CodecClient that 1) allows a mock codec to be passed in and 2) Allows for a destroy\n * callback.\n */\nclass CodecClientForTest : public Http::CodecClient {\npublic:\n  using DestroyCb = std::function<void(CodecClient*)>;\n  CodecClientForTest(CodecClient::Type type, Network::ClientConnectionPtr&& connection,\n                     Http::ClientConnection* codec, DestroyCb destroy_cb,\n                     Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher)\n      : CodecClient(type, std::move(connection), host, dispatcher), destroy_cb_(destroy_cb) {\n    codec_.reset(codec);\n  }\n  ~CodecClientForTest() override {\n    if (destroy_cb_) {\n      destroy_cb_(this);\n    }\n  }\n  void raiseGoAway(Http::GoAwayErrorCode error_code) { onGoAway(error_code); }\n  Event::Timer* idleTimer() { return idle_timer_.get(); }\n\n  DestroyCb destroy_cb_;\n};\n\n/**\n * Mock callbacks used for conn pool testing.\n */\nstruct ConnPoolCallbacks : public Http::ConnectionPool::Callbacks {\n  void onPoolReady(Http::RequestEncoder& encoder, Upstream::HostDescriptionConstSharedPtr host,\n                   const StreamInfo::StreamInfo&) override {\n    outer_encoder_ = &encoder;\n    host_ = host;\n    pool_ready_.ready();\n  }\n\n  void onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view,\n                     Upstream::HostDescriptionConstSharedPtr host) override {\n    host_ = host;\n    reason_ = reason;\n    pool_failure_.ready();\n  }\n\n  ConnectionPool::PoolFailureReason reason_;\n  ReadyWatcher pool_failure_;\n  ReadyWatcher pool_ready_;\n  Http::RequestEncoder* outer_encoder_{};\n  Upstream::HostDescriptionConstSharedPtr host_;\n};\n\n/**\n * Common utility functions for HTTP tests.\n */\nclass HttpTestUtility {\npublic:\n  static void addDefaultHeaders(Http::RequestHeaderMap& headers,\n                                const std::string default_method = \"GET\");\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_common.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/config_provider.h\"\n#include \"envoy/router/rds.h\"\n\n#include \"test/mocks/router/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Http {\nnamespace ConnectionManagerImplHelper {\n\n// Test RouteConfigProvider that returns a mocked config.\nstruct RouteConfigProvider : public Router::RouteConfigProvider {\n  RouteConfigProvider(TimeSource& time_source) : time_source_(time_source) {}\n\n  // Router::RouteConfigProvider\n  Router::ConfigConstSharedPtr config() override { return route_config_; }\n  absl::optional<ConfigInfo> configInfo() const override { return {}; }\n  SystemTime lastUpdated() const override { return time_source_.systemTime(); }\n  void onConfigUpdate() override {}\n  void validateConfig(const envoy::api::v2::RouteConfiguration&) const override {}\n\n  TimeSource& time_source_;\n  std::shared_ptr<Router::MockConfig> route_config_{new NiceMock<Router::MockConfig>()};\n};\n\n// Test ScopedRouteConfigProvider that returns a mocked config.\nstruct ScopedRouteConfigProvider : public Config::ConfigProvider {\n  ScopedRouteConfigProvider(TimeSource& time_source)\n      : config_(std::make_shared<Router::MockScopedConfig>()), time_source_(time_source) {}\n\n  ~ScopedRouteConfigProvider() override = default;\n\n  // Config::ConfigProvider\n  SystemTime lastUpdated() const override { return time_source_.systemTime(); }\n  const Protobuf::Message* getConfigProto() const override { return nullptr; }\n  Envoy::Config::ConfigProvider::ConfigProtoVector getConfigProtos() const override { return {}; }\n  std::string getConfigVersion() const override { return \"\"; }\n  ConfigConstSharedPtr getConfig() const override { return config_; }\n  ApiType apiType() const override { return ApiType::Delta; }\n\n  std::shared_ptr<Router::MockScopedConfig> config_;\n  TimeSource& time_source_;\n};\n\n} // namespace ConnectionManagerImplHelper\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n    end_stream: true\n    status: HEADER_CONTINUE_AND_END_STREAM\n  }\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch",
    "content": "actions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":scheme\"\n        value: \"t\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"\\'\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1634017305\n    request {\n      trailers {\n        headers {\n          headers {\n            key: \"&\"\n          }\n        }\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1073741824\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1073741824\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":scheme\"\n        value: \"\\'\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GOT\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n    end_stream: true\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":scheme\"\n        value: \"t\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"\\'\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \"/\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  stream_action {\n    request {\n      throw_decoder_exception {\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      data {\n        status: DATA_STOP_ITERATION_NO_BUFFER\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"\\'\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1073741824\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      data {\n        decoder_filter_callback_action {\n          add_decoded_data {\n            size: 262144\n          }\n        }\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":scheme\"\n        value: \"t\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"\\'\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"&\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 4294967295\n  }\n}\nactions {\n  stream_action {\n    stream_id: 4\n  }\n}"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost",
    "content": "actions {\n  stream_action {\n    response {\n      data: 2683\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":scheme\"\n        value: \"t\"\n      }\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"\\'\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKjKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKAKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKEKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZKKKKKKKKKKKKKdKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK>KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK]KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK957191215689797641957=bar1\"\n      }\n    }\n    end_stream: true\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n    end_stream: true\n    status: HEADER_STOP_ALL_ITERATION_AND_WATERMARK\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  stream_action {\n    stream_id: 721420288\n  }\n}\nactions {\n  stream_action {\n    stream_id: 1024\n  }\n}\nactions {\n  stream_action {\n    request {\n      trailers {\n        status: TRAILER_STOP_ITERATION\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5638706466652160",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n      }\n      headers {\n        key: \"foo\"\n        value: \"/\"\n      }\n      headers {\n        key: \"cookie\"\n      }\n      headers {\n        key: \"/\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"/\"\n        value: \"GT\"\n      }\n      headers {\n        value: \"/\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n      }\n    }\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":path\"         value: \"/\"       }       headers {         key: \":authority\"       }     }   } } actions {   stream_action {     request {       continue_decoding {       }     }   } }\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864",
    "content": "actions {\n  stream_action {\n    request {\n      trailers {\n        headers {\n          headers {\n            key: \"foo\"\n            value: \"bar\"\n          }\n        }\n        decoder_filter_callback_action {\n          add_decoded_data {\n            size: 1000000\n          }\n        }\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      data {\n        size: 3000000\n        status: DATA_STOP_ITERATION_AND_BUFFER\n        decoder_filter_callback_action {\n          add_decoded_data {\n            size: 1000000\n          }\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 5505024\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    request {\n      continue_decoding {\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      data: 5\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5679723404328960",
    "content": "actions {\n}\nactions {\n  stream_action {\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"GET\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n      }\n    }\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5687458439102464",
    "content": "actions {   new_stream {   } }\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5714279517126656",
    "content": "actions {   new_stream {     request_headers {       headers {         key: \":authority\"         value: \" \"       }     }   } } "
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/codec_exception",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"foo\"\n        value: \"bar\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      throw_decoder_exception {}\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/empty",
    "content": ""
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/example",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.com\"\n      }\n      headers {\n        key: \"blah\"\n        value: \"nosniff\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo=bar\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"foo2=bar2\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      data {\n        size: 3000000\n        decoder_filter_callback_action {\n          add_decoded_data {\n            size: 1000000\n          }\n        }\n        status: DATA_STOP_ITERATION_AND_BUFFER\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      continue_decoding {}\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      continue_headers {}\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      data: 5\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    request {\n      trailers {\n        headers {\n          headers {\n            key: \"foo\"\n            value: \"bar\"\n          }\n        }\n        decoder_filter_callback_action {\n          add_decoded_data {\n            size: 1000000\n          }\n        }\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      trailers {\n        headers {\n          key: \"foo\"\n          value: \"bar\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/invalid_host",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"G?T\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \"cookie\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"foo.c/m\"\n      }\n      headers {\n        key: \":path\"\n        value: \"foo-968957191215689797641957=bar1\"\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  stream_action {\n    stream_id: 67108864\n    request {\n      data {\n        size: 67108864\n      }\n    }\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}\nactions {\n  new_stream {\n  }\n}"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/missing_host",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \"foo\"\n        value: \"bar\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/regression_test_reuse_codec",
    "content": "actions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\"\n      }\n    }\n  }\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/state_local_complete",
    "content": "actions {\n  new_stream {\n    end_stream: true\n  }\n}\nactions {\n  stream_action {\n    response {\n      continue_headers {\n        headers {\n          key: \"\\177\\177\\177\\177\\177\\177\\177\\177\"\n        }\n      }\n    }\n  }\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/status_163",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n    }\n  }\n}\nactions {\n  stream_action {\n    response {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"162\"\n        }\n      }\n    }\n  }\n}"
  },
  {
    "path": "test/common/http/conn_manager_impl_corpus/upgrade_test_case",
    "content": "actions {\n  new_stream {\n    request_headers {\n      headers {\n        key: \":method\"\n        value: \"GET\"\n      }\n      headers {\n        key: \":path\"\n        value: \"/\"\n      }\n      headers {\n        key: \":scheme\"\n        value: \"http\"\n      }\n      headers {\n        key: \":authority\"\n        value: \"host\"\n      }\n      headers {\n        key: \"connection\"\n        value: \"upgrade\"\n      }\n      headers {\n        key: \"upgrade\"\n        value: \"WebSocket\"\n      }\n    }\n  }\n}\n\n\nactions {\n  stream_action {\n    stream_id: 0\n    response {\n      headers {\n        headers {\n          key: \"connection\"\n          value: \"upgrade\"\n        }\n        headers {\n          key: \"upgrade\"\n          value: \"WebSocket\"\n        }\n        headers {\n          key: \":status\"\n          value: \"101\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.http;\n\nimport \"google/protobuf/empty.proto\";\n\nimport \"test/fuzz/common.proto\";\n\nimport \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto\";\n\n// Structured input for conn_manager_impl_fuzz_test.\n\nmessage NewStream {\n  test.fuzz.Headers request_headers = 1;\n  bool end_stream = 2;\n  HeaderStatus status = 3;\n}\n\nenum HeaderStatus {\n  HEADER_CONTINUE = 0;\n  HEADER_STOP_ITERATION = 1;\n  HEADER_CONTINUE_AND_END_STREAM = 2;\n  HEADER_STOP_ALL_ITERATION_AND_BUFFER = 3;\n  HEADER_STOP_ALL_ITERATION_AND_WATERMARK = 4;\n}\n\nenum DataStatus {\n  DATA_CONTINUE = 0;\n  DATA_STOP_ITERATION_AND_BUFFER = 1;\n  DATA_STOP_ITERATION_AND_WATERMARK = 2;\n  DATA_STOP_ITERATION_NO_BUFFER = 3;\n}\n\nenum TrailerStatus {\n  TRAILER_CONTINUE = 0;\n  TRAILER_STOP_ITERATION = 1;\n}\n\nmessage DecoderFilterCallbackAction {\n  message AddDecodedData {\n    uint32 size = 1;\n    bool streaming = 2;\n  }\n  oneof decoder_filter_callback_action_selector {\n    // TODO(htuch): More decoder filer callback actions\n    AddDecodedData add_decoded_data = 1;\n  }\n}\n\nmessage RequestAction {\n  message DataAction {\n    uint32 size = 1;\n    bool end_stream = 2;\n    DataStatus status = 3;\n    DecoderFilterCallbackAction decoder_filter_callback_action = 4;\n  }\n  message TrailerAction {\n    test.fuzz.Headers headers = 1;\n    TrailerStatus status = 2;\n    DecoderFilterCallbackAction decoder_filter_callback_action = 3;\n  }\n  oneof request_action_selector {\n    DataAction data = 1;\n    TrailerAction trailers = 2;\n    google.protobuf.Empty continue_decoding = 3;\n    // Dispatch no longer throws, but rather returns an error status.\n    google.protobuf.Empty throw_decoder_exception = 4 [deprecated = true];\n    google.protobuf.Empty return_decoder_error = 5;\n    // TODO(htuch): Model and fuzz watermark events.\n  }\n}\n\n// TODO(htuch): Model and fuzz encoder filter buffering/resumption and different status returns.\nmessage ResponseAction {\n  oneof response_action_selector {\n    test.fuzz.Headers continue_headers = 1;\n    test.fuzz.Headers headers = 2;\n    uint32 data = 3;\n    test.fuzz.Headers trailers = 4;\n    // TODO(htuch): Model and fuzz watermark events.\n  }\n  bool end_stream = 7;\n}\n\nmessage StreamAction {\n  uint32 stream_id = 1;\n  oneof stream_action_selector {\n    RequestAction request = 2;\n    ResponseAction response = 3;\n  }\n}\n\nmessage Action {\n  oneof action_selector {\n    // Create new stream.\n    NewStream new_stream = 1;\n    // Perform an action on an existing stream.\n    StreamAction stream_action = 2;\n  }\n}\n\nmessage ConnManagerImplTestCase {\n  repeated Action actions = 1;\n  envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n      .ForwardClientCertDetails forward_client_cert = 2;\n}\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_fuzz_test.cc",
    "content": "// This fuzzer explores the behavior of HCM with replay of trace actions that describe the behavior\n// of a mocked codec and decoder/encoder filters. It is only partially complete (~60% test coverage\n// with supplied corpus), since HCM has a lot of behavior to model, requiring investment in building\n// out modeling actions and a corpus, which is time consuming and may not have significant security\n// of functional correctness payoff beyond existing tests. Places where we could increase fuzz\n// coverage include:\n// * Watermarks\n// * WebSocket upgrades\n// * Tracing and stats.\n// * Encode filter actions (e.g. modeling stop/continue, only done for decoder today).\n// * SSL\n// * Idle/drain timeouts.\n// * HTTP 1.0 special cases\n// * Fuzz config settings\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/http/conn_manager_impl.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/http/date_provider_impl.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/request_id_extension_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/common/http/conn_manager_impl_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Http {\n\nclass FuzzConfig : public ConnectionManagerConfig {\npublic:\n  struct RouteConfigProvider : public Router::RouteConfigProvider {\n    RouteConfigProvider(TimeSource& time_source) : time_source_(time_source) {}\n\n    // Router::RouteConfigProvider\n    Router::ConfigConstSharedPtr config() override { return route_config_; }\n    absl::optional<ConfigInfo> configInfo() const override { return {}; }\n    SystemTime lastUpdated() const override { return time_source_.systemTime(); }\n    void onConfigUpdate() override {}\n\n    TimeSource& time_source_;\n    std::shared_ptr<Router::MockConfig> route_config_{new NiceMock<Router::MockConfig>()};\n  };\n\n  FuzzConfig(envoy::extensions::filters::network::http_connection_manager::v3::\n                 HttpConnectionManager::ForwardClientCertDetails forward_client_cert)\n      : stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_),\n                                        POOL_HISTOGRAM(fake_stats_))},\n               \"\", fake_stats_),\n        tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))},\n        listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_stats_))},\n        local_reply_(LocalReply::Factory::createDefault()) {\n    ON_CALL(route_config_provider_, lastUpdated()).WillByDefault(Return(time_system_.systemTime()));\n    ON_CALL(scoped_route_config_provider_, lastUpdated())\n        .WillByDefault(Return(time_system_.systemTime()));\n    access_logs_.emplace_back(std::make_shared<NiceMock<AccessLog::MockInstance>>());\n    request_id_extension_ = RequestIDExtensionFactory::defaultInstance(random_);\n    forward_client_cert_ = fromClientCert(forward_client_cert);\n  }\n\n  void newStream() {\n    if (!codec_) {\n      codec_ = new NiceMock<MockServerConnection>();\n    }\n    decoder_filter_ = new NiceMock<MockStreamDecoderFilter>();\n    encoder_filter_ = new NiceMock<MockStreamEncoderFilter>();\n    EXPECT_CALL(filter_factory_, createFilterChain(_))\n        .WillOnce(Invoke([this](FilterChainFactoryCallbacks& callbacks) -> void {\n          callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{decoder_filter_});\n          callbacks.addStreamEncoderFilter(StreamEncoderFilterSharedPtr{encoder_filter_});\n        }));\n    EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n        .WillOnce(Invoke([this](StreamDecoderFilterCallbacks& callbacks) -> void {\n          decoder_filter_->callbacks_ = &callbacks;\n          callbacks.streamInfo().setResponseCodeDetails(\"\");\n        }));\n    EXPECT_CALL(*encoder_filter_, setEncoderFilterCallbacks(_));\n    EXPECT_CALL(filter_factory_, createUpgradeFilterChain(\"WebSocket\", _, _))\n        .WillRepeatedly(Invoke([&](absl::string_view, const Http::FilterChainFactory::UpgradeMap*,\n                                   FilterChainFactoryCallbacks& callbacks) -> bool {\n          filter_factory_.createFilterChain(callbacks);\n          return true;\n        }));\n  }\n\n  Http::ForwardClientCertType\n  fromClientCert(envoy::extensions::filters::network::http_connection_manager::v3::\n                     HttpConnectionManager::ForwardClientCertDetails forward_client_cert) {\n    switch (forward_client_cert) {\n    case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        SANITIZE:\n      return Http::ForwardClientCertType::Sanitize;\n    case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        FORWARD_ONLY:\n      return Http::ForwardClientCertType::ForwardOnly;\n    case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        APPEND_FORWARD:\n      return Http::ForwardClientCertType::AppendForward;\n    case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        SANITIZE_SET:\n      return Http::ForwardClientCertType::SanitizeSet;\n    case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        ALWAYS_FORWARD_ONLY:\n      return Http::ForwardClientCertType::AlwaysForwardOnly;\n    default:\n      return Http::ForwardClientCertType::Sanitize;\n    }\n  }\n\n  // Http::ConnectionManagerConfig\n\n  RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; }\n  const std::list<AccessLog::InstanceSharedPtr>& accessLogs() override { return access_logs_; }\n  ServerConnectionPtr createCodec(Network::Connection&, const Buffer::Instance&,\n                                  ServerConnectionCallbacks&) override {\n    return ServerConnectionPtr{codec_};\n  }\n  DateProvider& dateProvider() override { return date_provider_; }\n  std::chrono::milliseconds drainTimeout() const override { return std::chrono::milliseconds(100); }\n  FilterChainFactory& filterFactory() override { return filter_factory_; }\n  bool generateRequestId() const override { return true; }\n  bool preserveExternalRequestId() const override { return false; }\n  bool alwaysSetRequestIdInResponse() const override { return false; }\n  uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; }\n  uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; }\n  absl::optional<std::chrono::milliseconds> idleTimeout() const override { return idle_timeout_; }\n  bool isRoutable() const override { return true; }\n  absl::optional<std::chrono::milliseconds> maxConnectionDuration() const override {\n    return max_connection_duration_;\n  }\n  absl::optional<std::chrono::milliseconds> maxStreamDuration() const override {\n    return max_stream_duration_;\n  }\n  std::chrono::milliseconds streamIdleTimeout() const override { return stream_idle_timeout_; }\n  std::chrono::milliseconds requestTimeout() const override { return request_timeout_; }\n  std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; }\n  Router::RouteConfigProvider* routeConfigProvider() override {\n    if (use_srds_) {\n      return nullptr;\n    }\n    return &route_config_provider_;\n  }\n  Config::ConfigProvider* scopedRouteConfigProvider() override {\n    if (use_srds_) {\n      return &scoped_route_config_provider_;\n    }\n    return nullptr;\n  }\n  const std::string& serverName() const override { return server_name_; }\n  HttpConnectionManagerProto::ServerHeaderTransformation\n  serverHeaderTransformation() const override {\n    return server_transformation_;\n  }\n  ConnectionManagerStats& stats() override { return stats_; }\n  ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; }\n  bool useRemoteAddress() const override { return use_remote_address_; }\n  const Http::InternalAddressConfig& internalAddressConfig() const override {\n    return internal_address_config_;\n  }\n  uint32_t xffNumTrustedHops() const override { return 0; }\n  bool skipXffAppend() const override { return false; }\n  const std::string& via() const override { return EMPTY_STRING; }\n  Http::ForwardClientCertType forwardClientCert() const override { return forward_client_cert_; }\n  const std::vector<Http::ClientCertDetailsType>& setCurrentClientCertDetails() const override {\n    return set_current_client_cert_details_;\n  }\n  const Network::Address::Instance& localAddress() override { return local_address_; }\n  const absl::optional<std::string>& userAgent() override { return user_agent_; }\n  Tracing::HttpTracerSharedPtr tracer() override { return http_tracer_; }\n  const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); }\n  ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; }\n  bool proxy100Continue() const override { return proxy_100_continue_; }\n  bool streamErrorOnInvalidHttpMessaging() const override {\n    return stream_error_on_invalid_http_messaging_;\n  }\n  const Http::Http1Settings& http1Settings() const override { return http1_settings_; }\n  bool shouldNormalizePath() const override { return false; }\n  bool shouldMergeSlashes() const override { return false; }\n  bool shouldStripMatchingPort() const override { return false; }\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n  headersWithUnderscoresAction() const override {\n    return envoy::config::core::v3::HttpProtocolOptions::ALLOW;\n  }\n  const LocalReply::LocalReply& localReply() const override { return *local_reply_; }\n\n  const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      config_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  RequestIDExtensionSharedPtr request_id_extension_;\n  std::list<AccessLog::InstanceSharedPtr> access_logs_;\n  MockServerConnection* codec_{};\n  MockStreamDecoderFilter* decoder_filter_{};\n  MockStreamEncoderFilter* encoder_filter_{};\n  NiceMock<MockFilterChainFactory> filter_factory_;\n  Event::SimulatedTimeSystem time_system_;\n  SlowDateProviderImpl date_provider_{time_system_};\n  bool use_srds_{};\n  Router::MockRouteConfigProvider route_config_provider_;\n  Router::MockScopedRouteConfigProvider scoped_route_config_provider_;\n  std::string server_name_;\n  HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{\n      HttpConnectionManagerProto::OVERWRITE};\n  Stats::IsolatedStoreImpl fake_stats_;\n  ConnectionManagerStats stats_;\n  ConnectionManagerTracingStats tracing_stats_;\n  ConnectionManagerListenerStats listener_stats_;\n  uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB};\n  uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT};\n  absl::optional<std::chrono::milliseconds> idle_timeout_;\n  absl::optional<std::chrono::milliseconds> max_connection_duration_;\n  absl::optional<std::chrono::milliseconds> max_stream_duration_;\n  std::chrono::milliseconds stream_idle_timeout_{};\n  std::chrono::milliseconds request_timeout_{};\n  std::chrono::milliseconds delayed_close_timeout_{};\n  bool use_remote_address_{true};\n  Http::ForwardClientCertType forward_client_cert_;\n  std::vector<Http::ClientCertDetailsType> set_current_client_cert_details_;\n  Network::Address::Ipv4Instance local_address_{\"127.0.0.1\"};\n  absl::optional<std::string> user_agent_;\n  Tracing::HttpTracerSharedPtr http_tracer_{std::make_shared<NiceMock<Tracing::MockHttpTracer>>()};\n  TracingConnectionManagerConfigPtr tracing_config_;\n  bool proxy_100_continue_{true};\n  bool stream_error_on_invalid_http_messaging_ = false;\n  bool preserve_external_request_id_{false};\n  Http::Http1Settings http1_settings_;\n  Http::DefaultInternalAddressConfig internal_address_config_;\n  bool normalize_path_{true};\n  LocalReply::LocalReplyPtr local_reply_;\n};\n\n// Internal representation of stream state. Encapsulates the stream state, mocks\n// and encoders for both the request/response.\nclass FuzzStream {\npublic:\n  // We track stream state here to prevent illegal operations, e.g. applying an\n  // encodeData() to the codec after encodeTrailers(). This is necessary to\n  // maintain the preconditions for operations on the codec at the API level. Of\n  // course, it's the codecs must be robust to wire-level violations. We\n  // explore these violations via MutateAction and SwapAction at the connection\n  // buffer level.\n  enum class StreamState {\n    PendingHeaders,\n    PendingNonInformationalHeaders,\n    PendingDataOrTrailers,\n    Closed\n  };\n\n  FuzzStream(ConnectionManagerImpl& conn_manager, FuzzConfig& config,\n             const HeaderMap& request_headers,\n             test::common::http::HeaderStatus decode_header_status, bool end_stream)\n      : conn_manager_(conn_manager), config_(config) {\n    config_.newStream();\n    request_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers;\n    response_state_ = StreamState::PendingHeaders;\n    decoder_filter_ = config.decoder_filter_;\n    encoder_filter_ = config.encoder_filter_;\n    EXPECT_CALL(*config_.codec_, dispatch(_))\n        .WillOnce(InvokeWithoutArgs([this, &request_headers, end_stream] {\n          decoder_ = &conn_manager_.newStream(encoder_);\n          auto headers = std::make_unique<TestRequestHeaderMapImpl>(request_headers);\n          if (headers->Method() == nullptr) {\n            headers->setReferenceKey(Headers::get().Method, \"GET\");\n          }\n          if (headers->Host() != nullptr &&\n              !HeaderUtility::authorityIsValid(headers->getHostValue())) {\n            // Sanitize host header so we don't fail at ASSERTs that verify header sanity checks\n            // which should have been performed by the codec.\n            headers->setHost(Fuzz::replaceInvalidHostCharacters(headers->getHostValue()));\n          }\n          // If sendLocalReply is called:\n          ON_CALL(encoder_, encodeHeaders(_, true))\n              .WillByDefault(Invoke([this](const ResponseHeaderMap&, bool end_stream) -> void {\n                response_state_ =\n                    end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers;\n              }));\n          decoder_->decodeHeaders(std::move(headers), end_stream);\n          return Http::okStatus();\n        }));\n    ON_CALL(*decoder_filter_, decodeHeaders(_, _))\n        .WillByDefault(InvokeWithoutArgs([this, decode_header_status,\n                                          end_stream]() -> Http::FilterHeadersStatus {\n          header_status_ = fromHeaderStatus(decode_header_status);\n          // When a filter should not return ContinueAndEndStream when send with end_stream set\n          // (see https://github.com/envoyproxy/envoy/pull/4885#discussion_r232176826)\n          if (end_stream && (*header_status_ == Http::FilterHeadersStatus::ContinueAndEndStream)) {\n            *header_status_ = Http::FilterHeadersStatus::Continue;\n          }\n          return *header_status_;\n        }));\n    fakeOnData();\n    FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_));\n  }\n\n  void fakeOnData() {\n    Buffer::OwnedImpl fake_input;\n    conn_manager_.onData(fake_input, false);\n  }\n\n  Http::FilterHeadersStatus fromHeaderStatus(test::common::http::HeaderStatus status) {\n    switch (status) {\n    case test::common::http::HeaderStatus::HEADER_CONTINUE:\n      return Http::FilterHeadersStatus::Continue;\n    case test::common::http::HeaderStatus::HEADER_STOP_ITERATION:\n      return Http::FilterHeadersStatus::StopIteration;\n    case test::common::http::HeaderStatus::HEADER_CONTINUE_AND_END_STREAM:\n      return Http::FilterHeadersStatus::ContinueAndEndStream;\n    case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_BUFFER:\n      return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n    case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_WATERMARK:\n      return Http::FilterHeadersStatus::StopAllIterationAndWatermark;\n    default:\n      return Http::FilterHeadersStatus::Continue;\n    }\n  }\n\n  Http::FilterDataStatus fromDataStatus(test::common::http::DataStatus status) {\n    switch (status) {\n    case test::common::http::DataStatus::DATA_CONTINUE:\n      return Http::FilterDataStatus::Continue;\n    case test::common::http::DataStatus::DATA_STOP_ITERATION_AND_BUFFER:\n      return Http::FilterDataStatus::StopIterationAndBuffer;\n    case test::common::http::DataStatus::DATA_STOP_ITERATION_AND_WATERMARK:\n      return Http::FilterDataStatus::StopIterationAndWatermark;\n    case test::common::http::DataStatus::DATA_STOP_ITERATION_NO_BUFFER:\n      return Http::FilterDataStatus::StopIterationNoBuffer;\n    default:\n      return Http::FilterDataStatus::Continue;\n    }\n  }\n\n  Http::FilterTrailersStatus fromTrailerStatus(test::common::http::TrailerStatus status) {\n    switch (status) {\n    case test::common::http::TrailerStatus::TRAILER_CONTINUE:\n      return Http::FilterTrailersStatus::Continue;\n    case test::common::http::TrailerStatus::TRAILER_STOP_ITERATION:\n      return Http::FilterTrailersStatus::StopIteration;\n    default:\n      return Http::FilterTrailersStatus::Continue;\n    }\n  }\n\n  void decoderFilterCallbackAction(\n      const test::common::http::DecoderFilterCallbackAction& decoder_filter_callback_action) {\n    switch (decoder_filter_callback_action.decoder_filter_callback_action_selector_case()) {\n    case test::common::http::DecoderFilterCallbackAction::kAddDecodedData: {\n      if (request_state_ == StreamState::PendingDataOrTrailers) {\n        Buffer::OwnedImpl buf(std::string(\n            decoder_filter_callback_action.add_decoded_data().size() % (1024 * 1024), 'a'));\n        decoder_filter_->callbacks_->addDecodedData(\n            buf, decoder_filter_callback_action.add_decoded_data().streaming());\n      }\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n  }\n\n  void requestAction(StreamState& state, const test::common::http::RequestAction& request_action) {\n    switch (request_action.request_action_selector_case()) {\n    case test::common::http::RequestAction::kData: {\n      if (state == StreamState::PendingDataOrTrailers) {\n        const auto& data_action = request_action.data();\n        ON_CALL(*decoder_filter_, decodeData(_, _))\n            .WillByDefault(InvokeWithoutArgs([this, &data_action]() -> Http::FilterDataStatus {\n              if (data_action.has_decoder_filter_callback_action()) {\n                decoderFilterCallbackAction(data_action.decoder_filter_callback_action());\n              }\n              data_status_ = fromDataStatus(data_action.status());\n              return *data_status_;\n            }));\n        EXPECT_CALL(*config_.codec_, dispatch(_)).WillOnce(InvokeWithoutArgs([this, &data_action] {\n          Buffer::OwnedImpl buf(std::string(data_action.size() % (1024 * 1024), 'a'));\n          decoder_->decodeData(buf, data_action.end_stream());\n          return Http::okStatus();\n        }));\n        fakeOnData();\n        FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_));\n        state = data_action.end_stream() ? StreamState::Closed : StreamState::PendingDataOrTrailers;\n      }\n      break;\n    }\n    case test::common::http::RequestAction::kTrailers: {\n      if (state == StreamState::PendingDataOrTrailers) {\n        const auto& trailers_action = request_action.trailers();\n        ON_CALL(*decoder_filter_, decodeTrailers(_))\n            .WillByDefault(\n                InvokeWithoutArgs([this, &trailers_action]() -> Http::FilterTrailersStatus {\n                  if (trailers_action.has_decoder_filter_callback_action()) {\n                    decoderFilterCallbackAction(trailers_action.decoder_filter_callback_action());\n                  }\n                  return fromTrailerStatus(trailers_action.status());\n                }));\n        EXPECT_CALL(*config_.codec_, dispatch(_))\n            .WillOnce(InvokeWithoutArgs([this, &trailers_action] {\n              decoder_->decodeTrailers(std::make_unique<TestRequestTrailerMapImpl>(\n                  Fuzz::fromHeaders<TestRequestTrailerMapImpl>(trailers_action.headers())));\n              return Http::okStatus();\n            }));\n        fakeOnData();\n        FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_));\n        state = StreamState::Closed;\n      }\n      break;\n    }\n    case test::common::http::RequestAction::kContinueDecoding: {\n      if (header_status_ == FilterHeadersStatus::StopAllIterationAndBuffer ||\n          header_status_ == FilterHeadersStatus::StopAllIterationAndWatermark ||\n          (header_status_ == FilterHeadersStatus::StopIteration &&\n           (data_status_ == FilterDataStatus::StopIterationAndBuffer ||\n            data_status_ == FilterDataStatus::StopIterationAndWatermark ||\n            data_status_ == FilterDataStatus::StopIterationNoBuffer))) {\n        decoder_filter_->callbacks_->continueDecoding();\n      }\n      break;\n    }\n    case test::common::http::RequestAction::kThrowDecoderException:\n    // Dispatch no longer throws, execute subsequent kReturnDecoderError case.\n    case test::common::http::RequestAction::kReturnDecoderError: {\n      if (state == StreamState::PendingDataOrTrailers) {\n        EXPECT_CALL(*config_.codec_, dispatch(_))\n            .WillOnce(testing::Return(codecProtocolError(\"blah\")));\n        fakeOnData();\n        FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_));\n        state = StreamState::Closed;\n      }\n      break;\n    }\n    default:\n      // Maybe nothing is set or not a request action?\n      break;\n    }\n  }\n\n  void responseAction(StreamState& state,\n                      const test::common::http::ResponseAction& response_action) {\n    const bool end_stream = response_action.end_stream();\n    switch (response_action.response_action_selector_case()) {\n    case test::common::http::ResponseAction::kContinueHeaders: {\n      if (state == StreamState::PendingHeaders) {\n        auto headers = std::make_unique<TestResponseHeaderMapImpl>(\n            Fuzz::fromHeaders<TestResponseHeaderMapImpl>(response_action.continue_headers()));\n        headers->setReferenceKey(Headers::get().Status, \"100\");\n        decoder_filter_->callbacks_->encode100ContinueHeaders(std::move(headers));\n        // We don't allow multiple 100-continue headers in HCM, UpstreamRequest is responsible\n        // for coalescing.\n        state = StreamState::PendingNonInformationalHeaders;\n      }\n      break;\n    }\n    case test::common::http::ResponseAction::kHeaders: {\n      if (state == StreamState::PendingHeaders ||\n          state == StreamState::PendingNonInformationalHeaders) {\n        auto headers = std::make_unique<TestResponseHeaderMapImpl>(\n            Fuzz::fromHeaders<TestResponseHeaderMapImpl>(response_action.headers()));\n        // The client codec will ensure we always have a valid :status.\n        // Similarly, local replies should always contain this.\n        uint64_t status;\n        try {\n          status = Utility::getResponseStatus(*headers);\n        } catch (const CodecClientException&) {\n          headers->setReferenceKey(Headers::get().Status, \"200\");\n        }\n        // The only 1xx header that may be provided to encodeHeaders() is a 101 upgrade,\n        // guaranteed by the codec parsers. See include/envoy/http/filter.h.\n        if (CodeUtility::is1xx(status) && status != enumToInt(Http::Code::SwitchingProtocols)) {\n          headers->setReferenceKey(Headers::get().Status, \"200\");\n        }\n        decoder_filter_->callbacks_->encodeHeaders(std::move(headers), end_stream, \"details\");\n        state = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers;\n      }\n      break;\n    }\n    case test::common::http::ResponseAction::kData: {\n      if (state == StreamState::PendingDataOrTrailers) {\n        Buffer::OwnedImpl buf(std::string(response_action.data() % (1024 * 1024), 'a'));\n        decoder_filter_->callbacks_->encodeData(buf, end_stream);\n        state = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers;\n      }\n      break;\n    }\n    case test::common::http::ResponseAction::kTrailers: {\n      if (state == StreamState::PendingDataOrTrailers) {\n        decoder_filter_->callbacks_->encodeTrailers(std::make_unique<TestResponseTrailerMapImpl>(\n            Fuzz::fromHeaders<TestResponseTrailerMapImpl>(response_action.trailers())));\n        state = StreamState::Closed;\n      }\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n  }\n\n  void streamAction(const test::common::http::StreamAction& stream_action) {\n    switch (stream_action.stream_action_selector_case()) {\n    case test::common::http::StreamAction::kRequest: {\n      requestAction(request_state_, stream_action.request());\n      break;\n    }\n    case test::common::http::StreamAction::kResponse: {\n      responseAction(response_state_, stream_action.response());\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n  }\n\n  ConnectionManagerImpl& conn_manager_;\n  FuzzConfig& config_;\n  RequestDecoder* decoder_{};\n  NiceMock<MockResponseEncoder> encoder_;\n  MockStreamDecoderFilter* decoder_filter_{};\n  MockStreamEncoderFilter* encoder_filter_{};\n  StreamState request_state_;\n  StreamState response_state_;\n  absl::optional<Http::FilterHeadersStatus> header_status_;\n  absl::optional<Http::FilterDataStatus> data_status_;\n};\n\nusing FuzzStreamPtr = std::unique_ptr<FuzzStream>;\n\nDEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  } catch (const Envoy::ProtobufMessage::DeprecatedProtoFieldException& e) {\n    ENVOY_LOG_MISC(debug, \"DeprecatedProtoFieldException: {}\", e.what());\n    return;\n  }\n\n  FuzzConfig config(input.forward_client_cert());\n  NiceMock<Network::MockDrainDecision> drain_close;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::SymbolTableImpl symbol_table;\n  Http::ContextImpl http_context(symbol_table);\n  NiceMock<Runtime::MockLoader> runtime;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Upstream::MockClusterManager> cluster_manager;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks;\n  NiceMock<Server::MockOverloadManager> overload_manager;\n  auto ssl_connection = std::make_shared<Ssl::MockConnectionInfo>();\n  bool connection_alive = true;\n\n  ON_CALL(filter_callbacks.connection_, ssl()).WillByDefault(Return(ssl_connection));\n  ON_CALL(Const(filter_callbacks.connection_), ssl()).WillByDefault(Return(ssl_connection));\n  ON_CALL(filter_callbacks.connection_, close(_))\n      .WillByDefault(InvokeWithoutArgs([&connection_alive] { connection_alive = false; }));\n  filter_callbacks.connection_.local_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\");\n  filter_callbacks.connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\");\n\n  ConnectionManagerImpl conn_manager(config, drain_close, random, http_context, runtime, local_info,\n                                     cluster_manager, overload_manager, config.time_system_);\n  conn_manager.initializeReadFilterCallbacks(filter_callbacks);\n\n  std::vector<FuzzStreamPtr> streams;\n\n  for (const auto& action : input.actions()) {\n    ENVOY_LOG_MISC(trace, \"action {} with {} streams\", action.DebugString(), streams.size());\n    if (!connection_alive) {\n      ENVOY_LOG_MISC(trace, \"skipping due to dead connection\");\n      break;\n    }\n\n    switch (action.action_selector_case()) {\n    case test::common::http::Action::kNewStream: {\n      streams.emplace_back(new FuzzStream(\n          conn_manager, config,\n          Fuzz::fromHeaders<TestRequestHeaderMapImpl>(action.new_stream().request_headers(),\n                                                      /* ignore_headers =*/{}, {\":authority\"}),\n          action.new_stream().status(), action.new_stream().end_stream()));\n      break;\n    }\n    case test::common::http::Action::kStreamAction: {\n      const auto& stream_action = action.stream_action();\n      if (streams.empty()) {\n        break;\n      }\n      (*std::next(streams.begin(), stream_action.stream_id() % streams.size()))\n          ->streamAction(stream_action);\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n  }\n\n  filter_callbacks.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n  filter_callbacks.connection_.dispatcher_.clearDeferredDeleteList();\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_test.cc",
    "content": "#include \"test/common/http/conn_manager_impl_test_base.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/test_runtime.h\"\n\nusing testing::_;\nusing testing::An;\nusing testing::AnyNumber;\nusing testing::AtLeast;\nusing testing::Eq;\nusing testing::HasSubstr;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\n\nTEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) {\n  setup(false, \"envoy-custom-server\", false);\n\n  // Store the basic request encoder during filter chain setup.\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .Times(2)\n      .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_NE(nullptr, headers.ForwardedFor());\n        EXPECT_EQ(\"http\", headers.getForwardedProtoValue());\n        if (headers.Path()->value() == \"/healthcheck\") {\n          filter->callbacks_->streamInfo().healthCheck(true);\n        }\n\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)).Times(2);\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .Times(2)\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(2);\n\n  // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers\n  // only request into it. Then we respond into the filter.\n  EXPECT_CALL(*codec_, dispatch(_))\n      .Times(2)\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        // Test not charging stats on the second call.\n        if (data.length() == 4) {\n          RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n              {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n          decoder_->decodeHeaders(std::move(headers), true);\n        } else {\n          RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n              {\":authority\", \"host\"}, {\":path\", \"/healthcheck\"}, {\":method\", \"GET\"}}};\n          decoder_->decodeHeaders(std::move(headers), true);\n        }\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        // Drain 2 so that on the 2nd iteration we will hit zero.\n        data.drain(2);\n        return Http::okStatus();\n      }));\n\n  // Kick off the incoming data. Use extra data which should cause a redispatch.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_completed_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_completed_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) {\n  proxy_100_continue_ = true;\n  setup(false, \"envoy-custom-server\", false);\n\n  // Store the basic request encoder during filter chain setup.\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_NE(nullptr, headers.ForwardedFor());\n        EXPECT_EQ(\"http\", headers.getForwardedProtoValue());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n\n  // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers\n  // only request into it. Then we respond into the filter.\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        // Test not charging stats on the second call.\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{\":status\", \"100\"}}};\n        filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers));\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_1xx_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_1xx_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value());\n  EXPECT_EQ(2U, stats_.named_.downstream_rq_completed_.value());\n  EXPECT_EQ(2U, listener_stats_.downstream_rq_completed_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFiltersProxyingDisabled) {\n  proxy_100_continue_ = false;\n  setup(false, \"envoy-custom-server\", false);\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // Akin to 100ContinueResponseWithEncoderFilters below, but with\n  // proxy_100_continue_ false. Verify the filters do not get the 100 continue\n  // headers.\n  EXPECT_CALL(*encoder_filters_[0], encode100ContinueHeaders(_)).Times(0);\n  EXPECT_CALL(*encoder_filters_[1], encode100ContinueHeaders(_)).Times(0);\n  EXPECT_CALL(response_encoder_, encode100ContinueHeaders(_)).Times(0);\n  ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{\":status\", \"100\"}}};\n  decoder_filters_[0]->callbacks_->encode100ContinueHeaders(std::move(continue_headers));\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFilters) {\n  proxy_100_continue_ = true;\n  setup(false, \"envoy-custom-server\", false);\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  EXPECT_CALL(*encoder_filters_[0], encode100ContinueHeaders(_))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[1], encode100ContinueHeaders(_))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encode100ContinueHeaders(_));\n  ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{\":status\", \"100\"}}};\n  decoder_filters_[0]->callbacks_->encode100ContinueHeaders(std::move(continue_headers));\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, PauseResume100Continue) {\n  proxy_100_continue_ = true;\n  setup(false, \"envoy-custom-server\", false);\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // Stop the 100-Continue at encoder filter 1. Encoder filter 0 should not yet receive the\n  // 100-Continue\n  EXPECT_CALL(*encoder_filters_[1], encode100ContinueHeaders(_))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[0], encode100ContinueHeaders(_)).Times(0);\n  EXPECT_CALL(response_encoder_, encode100ContinueHeaders(_)).Times(0);\n  ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{\":status\", \"100\"}}};\n  decoder_filters_[1]->callbacks_->encode100ContinueHeaders(std::move(continue_headers));\n\n  // Have the encoder filter 1 continue. Make sure the 100-Continue is resumed as expected.\n  EXPECT_CALL(*encoder_filters_[0], encode100ContinueHeaders(_))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encode100ContinueHeaders(_));\n  encoder_filters_[1]->callbacks_->continueEncoding();\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  doRemoteClose();\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/10923.\nTEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) {\n  proxy_100_continue_ = true;\n  setup(false, \"envoy-custom-server\", false);\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  // Allow headers to pass.\n  EXPECT_CALL(*filter, decodeHeaders(_, false))\n      .WillRepeatedly(\n          InvokeWithoutArgs([]() -> FilterHeadersStatus { return FilterHeadersStatus::Continue; }));\n  // Pause and then resume the decode pipeline, this is key to triggering #10923.\n  EXPECT_CALL(*filter, decodeData(_, false)).WillOnce(InvokeWithoutArgs([]() -> FilterDataStatus {\n    return FilterDataStatus::StopIterationAndBuffer;\n  }));\n  EXPECT_CALL(*filter, decodeData(_, true))\n      .WillRepeatedly(\n          InvokeWithoutArgs([]() -> FilterDataStatus { return FilterDataStatus::Continue; }));\n\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        // Test not charging stats on the second call.\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        decoder_->decodeHeaders(std::move(headers), false);\n        // Allow the decode pipeline to pause.\n        decoder_->decodeData(data, false);\n\n        ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{\":status\", \"100\"}}};\n        filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers));\n\n        // Resume decode pipeline after encoding 100 continue headers, we're now\n        // ready to trigger #10923.\n        decoder_->decodeData(data, true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_1xx_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_1xx_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value());\n  EXPECT_EQ(2U, stats_.named_.downstream_rq_completed_.value());\n  EXPECT_EQ(2U, listener_stats_.downstream_rq_completed_.value());\n}\n\n// By default, Envoy will set the server header to the server name, here \"custom-value\"\nTEST_F(HttpConnectionManagerImplTest, ServerHeaderOverwritten) {\n  setup(false, \"custom-value\", false);\n  setUpEncoderAndDecoder(false, false);\n\n  sendRequestHeadersAndData();\n  const ResponseHeaderMap* altered_headers = sendResponseHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"server\", \"foo\"}}});\n  EXPECT_EQ(\"custom-value\", altered_headers->getServerValue());\n\n  doRemoteClose();\n}\n\n// When configured APPEND_IF_ABSENT if the server header is present it will be retained.\nTEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendPresent) {\n  server_transformation_ = HttpConnectionManagerProto::APPEND_IF_ABSENT;\n  setup(false, \"custom-value\", false);\n  setUpEncoderAndDecoder(false, false);\n\n  sendRequestHeadersAndData();\n  const ResponseHeaderMap* altered_headers = sendResponseHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"server\", \"foo\"}}});\n  EXPECT_EQ(\"foo\", altered_headers->getServerValue());\n\n  doRemoteClose();\n}\n\n// When configured APPEND_IF_ABSENT if the server header is absent the server name will be set.\nTEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendAbsent) {\n  server_transformation_ = HttpConnectionManagerProto::APPEND_IF_ABSENT;\n  setup(false, \"custom-value\", false);\n  setUpEncoderAndDecoder(false, false);\n\n  sendRequestHeadersAndData();\n  const ResponseHeaderMap* altered_headers =\n      sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}});\n  EXPECT_EQ(\"custom-value\", altered_headers->getServerValue());\n\n  doRemoteClose();\n}\n\n// When configured PASS_THROUGH, the server name will pass through.\nTEST_F(HttpConnectionManagerImplTest, ServerHeaderPassthroughPresent) {\n  server_transformation_ = HttpConnectionManagerProto::PASS_THROUGH;\n  setup(false, \"custom-value\", false);\n  setUpEncoderAndDecoder(false, false);\n\n  sendRequestHeadersAndData();\n  const ResponseHeaderMap* altered_headers = sendResponseHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"server\", \"foo\"}}});\n  EXPECT_EQ(\"foo\", altered_headers->getServerValue());\n\n  doRemoteClose();\n}\n\n// When configured PASS_THROUGH, the server header will not be added if absent.\nTEST_F(HttpConnectionManagerImplTest, ServerHeaderPassthroughAbsent) {\n  server_transformation_ = HttpConnectionManagerProto::PASS_THROUGH;\n  setup(false, \"custom-value\", false);\n  setUpEncoderAndDecoder(false, false);\n\n  sendRequestHeadersAndData();\n  const ResponseHeaderMap* altered_headers =\n      sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}});\n  EXPECT_TRUE(altered_headers->Server() == nullptr);\n\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":path\", \"http://api.lyft.com/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  // This test also verifies that decoder/encoder filters have onDestroy() called only once.\n  auto* filter = new MockStreamFilter();\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamFilter(StreamFilterSharedPtr{filter});\n      }));\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n  EXPECT_CALL(*filter, setEncoderFilterCallbacks(_));\n\n  EXPECT_CALL(*filter, encodeHeaders(_, true));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"404\", headers.getStatusValue());\n        EXPECT_EQ(\"absolute_path_rejected\",\n                  filter->decoder_callbacks_->streamInfo().responseCodeDetails().value());\n      }));\n  EXPECT_CALL(*filter, onStreamComplete());\n  EXPECT_CALL(*filter, onDestroy());\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\n// Invalid paths are rejected with 400.\nTEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) {\n  InSequence s;\n  setup(false, \"\");\n  // Enable path sanitizer\n  normalize_path_ = true;\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"},\n        {\":path\", \"/ab%00c\"}, // \"%00\" is not valid in path according to RFC\n        {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n  EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true));\n\n  // This test also verifies that decoder/encoder filters have onDestroy() called only once.\n  auto* filter = new MockStreamFilter();\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamFilter(StreamFilterSharedPtr{filter});\n      }));\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n  EXPECT_CALL(*filter, setEncoderFilterCallbacks(_));\n  EXPECT_CALL(*filter, encodeHeaders(_, true));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"400\", headers.getStatusValue());\n        EXPECT_EQ(\"path_normalization_failed\",\n                  filter->decoder_callbacks_->streamInfo().responseCodeDetails().value());\n      }));\n  EXPECT_CALL(*filter, onStreamComplete());\n  EXPECT_CALL(*filter, onDestroy());\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\n// Filters observe normalized paths, not the original path, when path\n// normalization is configured.\nTEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) {\n  setup(false, \"\");\n  // Enable path sanitizer\n  normalize_path_ = true;\n  const std::string original_path = \"/x/%2E%2e/z\";\n  const std::string normalized_path = \"/z\";\n\n  auto* filter = new MockStreamFilter();\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus {\n        EXPECT_EQ(normalized_path, header_map.getPathValue());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":path\", original_path}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*filter, onStreamComplete());\n  EXPECT_CALL(*filter, onDestroy());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// The router observes normalized paths, not the original path, when path\n// normalization is configured.\nTEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) {\n  setup(false, \"\");\n  // Enable path sanitizer\n  normalize_path_ = true;\n  const std::string original_path = \"/x/%2E%2e/z\";\n  const std::string normalized_path = \"/z\";\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":path\", original_path}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  const std::string fake_cluster_name = \"fake_cluster\";\n\n  std::shared_ptr<Upstream::MockThreadLocalCluster> fake_cluster =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n  std::shared_ptr<Router::MockRoute> route = std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name));\n\n  EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _))\n      .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map,\n                           const StreamInfo::StreamInfo&, uint64_t) {\n        EXPECT_EQ(normalized_path, header_map.getPathValue());\n        return route;\n      }));\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {}));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Clean up.\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RouteOverride) {\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(2, 0);\n  const std::string foo_bar_baz_cluster_name = \"foo_bar_baz\";\n  const std::string foo_bar_cluster_name = \"foo_bar\";\n  const std::string foo_cluster_name = \"foo\";\n  const std::string default_cluster_name = \"default\";\n\n  std::shared_ptr<Upstream::MockThreadLocalCluster> foo_bar_baz_cluster =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n\n  std::shared_ptr<Upstream::MockThreadLocalCluster> foo_bar_cluster =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n  EXPECT_CALL(cluster_manager_, get(absl::string_view{foo_bar_cluster_name}))\n      .WillOnce(Return(foo_bar_cluster.get()));\n\n  std::shared_ptr<Upstream::MockThreadLocalCluster> foo_cluster =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n\n  std::shared_ptr<Upstream::MockThreadLocalCluster> default_cluster =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n  EXPECT_CALL(cluster_manager_, get(absl::string_view{default_cluster_name}))\n      .Times(2)\n      .WillRepeatedly(Return(default_cluster.get()));\n\n  std::shared_ptr<Router::MockRoute> foo_bar_baz_route =\n      std::make_shared<NiceMock<Router::MockRoute>>();\n\n  std::shared_ptr<Router::MockRoute> foo_bar_route =\n      std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(foo_bar_route->route_entry_, clusterName()).WillOnce(ReturnRef(foo_bar_cluster_name));\n\n  std::shared_ptr<Router::MockRoute> foo_route = std::make_shared<NiceMock<Router::MockRoute>>();\n\n  std::shared_ptr<Router::MockRoute> default_route =\n      std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(default_route->route_entry_, clusterName())\n      .Times(2)\n      .WillRepeatedly(ReturnRef(default_cluster_name));\n\n  using ::testing::InSequence;\n  {\n    InSequence seq;\n    EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _))\n        .WillOnce(Return(default_route));\n\n    // This filter iterates through all possible route matches and choose the last matched route\n    EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n        .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n          EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route());\n          EXPECT_EQ(default_route->routeEntry(),\n                    decoder_filters_[0]->callbacks_->streamInfo().routeEntry());\n          EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo());\n\n          // Not clearing cached route returns cached route and doesn't invoke cb.\n          Router::RouteConstSharedPtr route = decoder_filters_[0]->callbacks_->route(\n              [](Router::RouteConstSharedPtr, Router::RouteEvalStatus) -> Router::RouteMatchStatus {\n                ADD_FAILURE() << \"When route cache is not cleared CB should not be invoked\";\n                return Router::RouteMatchStatus::Accept;\n              });\n          EXPECT_EQ(default_route, route);\n\n          int ctr = 0;\n          const Router::RouteCallback& cb =\n              [&](Router::RouteConstSharedPtr route,\n                  Router::RouteEvalStatus route_eval_status) -> Router::RouteMatchStatus {\n            EXPECT_LE(ctr, 3);\n            if (ctr == 0) {\n              ++ctr;\n              EXPECT_EQ(foo_bar_baz_route, route);\n              EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes);\n              return Router::RouteMatchStatus::Continue;\n            }\n\n            if (ctr == 1) {\n              ++ctr;\n              EXPECT_EQ(foo_bar_route, route);\n              EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes);\n              return Router::RouteMatchStatus::Continue;\n            }\n\n            if (ctr == 2) {\n              ++ctr;\n              EXPECT_EQ(foo_route, route);\n              EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes);\n              return Router::RouteMatchStatus::Continue;\n            }\n\n            if (ctr == 3) {\n              ++ctr;\n              EXPECT_EQ(default_route, route);\n              EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::NoMoreRoutes);\n              return Router::RouteMatchStatus::Accept;\n            }\n            return Router::RouteMatchStatus::Accept;\n          };\n\n          decoder_filters_[0]->callbacks_->clearRouteCache();\n          route = decoder_filters_[0]->callbacks_->route(cb);\n\n          EXPECT_EQ(default_route, route);\n          EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route());\n          EXPECT_EQ(default_route->routeEntry(),\n                    decoder_filters_[0]->callbacks_->streamInfo().routeEntry());\n          EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo());\n\n          return FilterHeadersStatus::Continue;\n        }));\n\n    // This route config expected to be invoked for all matching routes\n    EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _))\n        .WillOnce(Invoke([&](const Router::RouteCallback& cb, const Http::RequestHeaderMap&,\n                             const Envoy::StreamInfo::StreamInfo&,\n                             uint64_t) -> Router::RouteConstSharedPtr {\n          EXPECT_EQ(cb(foo_bar_baz_route, Router::RouteEvalStatus::HasMoreRoutes),\n                    Router::RouteMatchStatus::Continue);\n          EXPECT_EQ(cb(foo_bar_route, Router::RouteEvalStatus::HasMoreRoutes),\n                    Router::RouteMatchStatus::Continue);\n          EXPECT_EQ(cb(foo_route, Router::RouteEvalStatus::HasMoreRoutes),\n                    Router::RouteMatchStatus::Continue);\n          EXPECT_EQ(cb(default_route, Router::RouteEvalStatus::NoMoreRoutes),\n                    Router::RouteMatchStatus::Accept);\n          return default_route;\n        }));\n\n    EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n    // This filter chooses second route\n    EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n        .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n          EXPECT_EQ(default_route, decoder_filters_[1]->callbacks_->route());\n          EXPECT_EQ(default_route->routeEntry(),\n                    decoder_filters_[1]->callbacks_->streamInfo().routeEntry());\n          EXPECT_EQ(default_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo());\n\n          int ctr = 0;\n          const Router::RouteCallback& cb =\n              [&](Router::RouteConstSharedPtr route,\n                  Router::RouteEvalStatus route_eval_status) -> Router::RouteMatchStatus {\n            EXPECT_LE(ctr, 1);\n            if (ctr == 0) {\n              ++ctr;\n              EXPECT_EQ(foo_bar_baz_route, route);\n              EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes);\n              return Router::RouteMatchStatus::Continue;\n            }\n\n            if (ctr == 1) {\n              ++ctr;\n              EXPECT_EQ(foo_bar_route, route);\n              EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes);\n              return Router::RouteMatchStatus::Accept;\n            }\n            return Router::RouteMatchStatus::Accept;\n          };\n\n          decoder_filters_[0]->callbacks_->clearRouteCache();\n          decoder_filters_[1]->callbacks_->route(cb);\n\n          EXPECT_EQ(foo_bar_route, decoder_filters_[1]->callbacks_->route());\n          EXPECT_EQ(foo_bar_route->routeEntry(),\n                    decoder_filters_[1]->callbacks_->streamInfo().routeEntry());\n          EXPECT_EQ(foo_bar_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo());\n\n          return FilterHeadersStatus::Continue;\n        }));\n\n    // This route config expected to be invoked for first two matching routes\n    EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _))\n        .WillOnce(Invoke([&](const Router::RouteCallback& cb, const Http::RequestHeaderMap&,\n                             const Envoy::StreamInfo::StreamInfo&,\n                             uint64_t) -> Router::RouteConstSharedPtr {\n          EXPECT_EQ(cb(foo_bar_baz_route, Router::RouteEvalStatus::HasMoreRoutes),\n                    Router::RouteMatchStatus::Continue);\n          EXPECT_EQ(cb(foo_bar_route, Router::RouteEvalStatus::HasMoreRoutes),\n                    Router::RouteMatchStatus::Accept);\n          return foo_bar_route;\n        }));\n\n    EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  }\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Clean up.\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Filters observe host header w/o port's part when port's removal is configured\nTEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) {\n  setup(false, \"\");\n  // Enable port removal\n  strip_matching_port_ = true;\n  const std::string original_host = \"host:443\";\n  const std::string normalized_host = \"host\";\n\n  auto* filter = new MockStreamFilter();\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus {\n        EXPECT_EQ(normalized_host, header_map.getHostValue());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", original_host}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Clean up.\n  EXPECT_CALL(*filter, onStreamComplete());\n  EXPECT_CALL(*filter, onDestroy());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// The router observes host header w/o port, not the original host, when\n// remove_port is configured\nTEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) {\n  setup(false, \"\");\n  // Enable port removal\n  strip_matching_port_ = true;\n  const std::string original_host = \"host:443\";\n  const std::string normalized_host = \"host\";\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", original_host}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  const std::string fake_cluster_name = \"fake_cluster\";\n\n  std::shared_ptr<Upstream::MockThreadLocalCluster> fake_cluster =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n  std::shared_ptr<Router::MockRoute> route = std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name));\n\n  EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _))\n      .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map,\n                           const StreamInfo::StreamInfo&, uint64_t) {\n        EXPECT_EQ(normalized_host, header_map.getHostValue());\n        return route;\n      }));\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {}));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Clean up.\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateNotSet) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.preserve_upstream_date\", \"false\"}});\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n  const auto* modified_headers = sendResponseHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"server\", \"foo\"}}});\n  ASSERT_TRUE(modified_headers);\n  EXPECT_TRUE(modified_headers->Date());\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateNotSet) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.preserve_upstream_date\", \"true\"}});\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n  const auto* modified_headers = sendResponseHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"server\", \"foo\"}}});\n  ASSERT_TRUE(modified_headers);\n  EXPECT_TRUE(modified_headers->Date());\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateSet) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.preserve_upstream_date\", \"false\"}});\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n  const std::string expected_date{\"Tue, 15 Nov 1994 08:12:31 GMT\"};\n  const auto* modified_headers =\n      sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{\n          {\":status\", \"200\"}, {\"server\", \"foo\"}, {\"date\", expected_date.c_str()}}});\n  ASSERT_TRUE(modified_headers);\n  ASSERT_TRUE(modified_headers->Date());\n  EXPECT_NE(expected_date, modified_headers->getDateValue());\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateSet) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.preserve_upstream_date\", \"true\"}});\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n  const std::string expected_date{\"Tue, 15 Nov 1994 08:12:31 GMT\"};\n  const auto* modified_headers =\n      sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{\n          {\":status\", \"200\"}, {\"server\", \"foo\"}, {\"date\", expected_date.c_str()}}});\n  ASSERT_TRUE(modified_headers);\n  ASSERT_TRUE(modified_headers->Date());\n  EXPECT_EQ(expected_date, modified_headers->getDateValue());\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateFromCache) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.preserve_upstream_date\", \"false\"}});\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n  encoder_filters_[0]->callbacks_->streamInfo().setResponseFlag(\n      StreamInfo::ResponseFlag::ResponseFromCacheFilter);\n  const std::string expected_date{\"Tue, 15 Nov 1994 08:12:31 GMT\"};\n  const auto* modified_headers =\n      sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{\n          {\":status\", \"200\"}, {\"server\", \"foo\"}, {\"date\", expected_date.c_str()}}});\n  ASSERT_TRUE(modified_headers);\n  ASSERT_TRUE(modified_headers->Date());\n  EXPECT_EQ(expected_date, modified_headers->getDateValue());\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) {\n  setup(false, \"\");\n\n  auto* span = new NiceMock<Tracing::MockSpan>();\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const Tracing::Config& config, const HeaderMap&, const StreamInfo::StreamInfo&,\n                     const Tracing::Decision) -> Tracing::Span* {\n            EXPECT_EQ(Tracing::OperationName::Ingress, config.operationName());\n\n            return span;\n          }));\n  // No decorator.\n  EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator())\n      .WillRepeatedly(Return(nullptr));\n  envoy::type::v3::FractionalPercent percent1;\n  percent1.set_numerator(100);\n  envoy::type::v3::FractionalPercent percent2;\n  percent2.set_numerator(10000);\n  percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n\n  struct TracingTagMetaSuite {\n    using Factory =\n        std::function<Tracing::CustomTagConstSharedPtr(const std::string&, const std::string&)>;\n    std::string prefix;\n    Factory factory;\n  };\n  struct TracingTagSuite {\n    bool has_conn;\n    bool has_route;\n    std::list<Tracing::CustomTagConstSharedPtr> custom_tags;\n    std::string tag;\n    std::string tag_value;\n  };\n  std::vector<TracingTagMetaSuite> tracing_tag_meta_cases = {\n      {\"l-tag\",\n       [](const std::string& t, const std::string& v) {\n         envoy::type::tracing::v3::CustomTag::Literal literal;\n         literal.set_value(v);\n         return std::make_shared<Tracing::LiteralCustomTag>(t, literal);\n       }},\n      {\"e-tag\",\n       [](const std::string& t, const std::string& v) {\n         envoy::type::tracing::v3::CustomTag::Environment e;\n         e.set_default_value(v);\n         return std::make_shared<Tracing::EnvironmentCustomTag>(t, e);\n       }},\n      {\"x-tag\",\n       [](const std::string& t, const std::string& v) {\n         envoy::type::tracing::v3::CustomTag::Header h;\n         h.set_default_value(v);\n         return std::make_shared<Tracing::RequestHeaderCustomTag>(t, h);\n       }},\n      {\"m-tag\", [](const std::string& t, const std::string& v) {\n         envoy::type::tracing::v3::CustomTag::Metadata m;\n         m.mutable_kind()->mutable_host();\n         m.set_default_value(v);\n         return std::make_shared<Tracing::MetadataCustomTag>(t, m);\n       }}};\n  std::vector<TracingTagSuite> tracing_tag_cases;\n  for (const TracingTagMetaSuite& ms : tracing_tag_meta_cases) {\n    const std::string& t1 = ms.prefix + \"-1\";\n    const std::string& v1 = ms.prefix + \"-v1\";\n    tracing_tag_cases.push_back({true, false, {ms.factory(t1, v1)}, t1, v1});\n\n    const std::string& t2 = ms.prefix + \"-2\";\n    const std::string& v2 = ms.prefix + \"-v2\";\n    const std::string& rv2 = ms.prefix + \"-r2\";\n    tracing_tag_cases.push_back({true, true, {ms.factory(t2, v2), ms.factory(t2, rv2)}, t2, rv2});\n\n    const std::string& t3 = ms.prefix + \"-3\";\n    const std::string& rv3 = ms.prefix + \"-r3\";\n    tracing_tag_cases.push_back({false, true, {ms.factory(t3, rv3)}, t3, rv3});\n  }\n  Tracing::CustomTagMap conn_tracing_tags = {\n      {\":method\", requestHeaderCustomTag(\":method\")}}; // legacy test case\n  Tracing::CustomTagMap route_tracing_tags;\n  for (TracingTagSuite& s : tracing_tag_cases) {\n    if (s.has_conn) {\n      const Tracing::CustomTagConstSharedPtr& ptr = s.custom_tags.front();\n      conn_tracing_tags.emplace(ptr->tag(), ptr);\n      s.custom_tags.pop_front();\n    }\n    if (s.has_route) {\n      const Tracing::CustomTagConstSharedPtr& ptr = s.custom_tags.front();\n      route_tracing_tags.emplace(ptr->tag(), ptr);\n      s.custom_tags.pop_front();\n    }\n  }\n  tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(\n      TracingConnectionManagerConfig{Tracing::OperationName::Ingress, conn_tracing_tags, percent1,\n                                     percent2, percent1, false, 256});\n  NiceMock<Router::MockRouteTracing> route_tracing;\n  ON_CALL(route_tracing, getClientSampling()).WillByDefault(ReturnRef(percent1));\n  ON_CALL(route_tracing, getRandomSampling()).WillByDefault(ReturnRef(percent2));\n  ON_CALL(route_tracing, getOverallSampling()).WillByDefault(ReturnRef(percent1));\n  ON_CALL(route_tracing, getCustomTags()).WillByDefault(ReturnRef(route_tracing_tags));\n  ON_CALL(*route_config_provider_.route_config_->route_, tracingConfig())\n      .WillByDefault(Return(&route_tracing));\n\n  EXPECT_CALL(*span, finishSpan());\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  // Verify tag is set based on the request headers.\n  EXPECT_CALL(*span, setTag(Eq(\":method\"), Eq(\"GET\")));\n  for (const TracingTagSuite& s : tracing_tag_cases) {\n    EXPECT_CALL(*span, setTag(Eq(s.tag), Eq(s.tag_value)));\n  }\n  // Verify if the activeSpan interface returns reference to the current span.\n  EXPECT_CALL(*span, setTag(Eq(\"service-cluster\"), Eq(\"scoobydoo\")));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*span, setOperation(_)).Times(0);\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n        filter->callbacks_->activeSpan().setTag(\"service-cluster\", \"scoobydoo\");\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Should be no 'x-envoy-decorator-operation' response header.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(nullptr, headers.EnvoyDecoratorOperation());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(1UL, tracing_stats_.service_forced_.value());\n  EXPECT_EQ(0UL, tracing_stats_.random_sampling_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorator) {\n  setup(false, \"\");\n\n  auto* span = new NiceMock<Tracing::MockSpan>();\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const Tracing::Config& config, const HeaderMap&, const StreamInfo::StreamInfo&,\n                     const Tracing::Decision) -> Tracing::Span* {\n            EXPECT_EQ(Tracing::OperationName::Ingress, config.operationName());\n\n            return span;\n          }));\n  route_config_provider_.route_config_->route_->decorator_.operation_ = \"testOp\";\n  EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator()).Times(2);\n  EXPECT_CALL(route_config_provider_.route_config_->route_->decorator_, apply(_))\n      .WillOnce(Invoke(\n          [&](const Tracing::Span& apply_to_span) -> void { EXPECT_EQ(span, &apply_to_span); }));\n  EXPECT_EQ(true, route_config_provider_.route_config_->route_->decorator_.propagate());\n  EXPECT_CALL(*span, finishSpan());\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*span, setOperation(_)).Times(0);\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n        filter->callbacks_->activeSpan().setTag(\"service-cluster\", \"scoobydoo\");\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Verify decorator operation response header has been defined.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"testOp\", headers.getEnvoyDecoratorOperationValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecoratorPropagateFalse) {\n  setup(false, \"\");\n\n  auto* span = new NiceMock<Tracing::MockSpan>();\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const Tracing::Config& config, const HeaderMap&, const StreamInfo::StreamInfo&,\n                     const Tracing::Decision) -> Tracing::Span* {\n            EXPECT_EQ(Tracing::OperationName::Ingress, config.operationName());\n\n            return span;\n          }));\n  route_config_provider_.route_config_->route_->decorator_.operation_ = \"testOp\";\n  ON_CALL(route_config_provider_.route_config_->route_->decorator_, propagate())\n      .WillByDefault(Return(false));\n  EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator()).Times(2);\n  EXPECT_CALL(route_config_provider_.route_config_->route_->decorator_, apply(_))\n      .WillOnce(Invoke(\n          [&](const Tracing::Span& apply_to_span) -> void { EXPECT_EQ(span, &apply_to_span); }));\n  EXPECT_CALL(*span, finishSpan());\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*span, setOperation(_)).Times(0);\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n        filter->callbacks_->activeSpan().setTag(\"service-cluster\", \"scoobydoo\");\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Verify decorator operation response header has NOT been defined (i.e. not propagated).\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(nullptr, headers.EnvoyDecoratorOperation());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecoratorOverrideOp) {\n  setup(false, \"\");\n\n  auto* span = new NiceMock<Tracing::MockSpan>();\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const Tracing::Config& config, const HeaderMap&, const StreamInfo::StreamInfo&,\n                     const Tracing::Decision) -> Tracing::Span* {\n            EXPECT_EQ(Tracing::OperationName::Ingress, config.operationName());\n\n            return span;\n          }));\n  route_config_provider_.route_config_->route_->decorator_.operation_ = \"initOp\";\n  EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator()).Times(2);\n  EXPECT_CALL(route_config_provider_.route_config_->route_->decorator_, apply(_))\n      .WillOnce(Invoke(\n          [&](const Tracing::Span& apply_to_span) -> void { EXPECT_EQ(span, &apply_to_span); }));\n  EXPECT_CALL(*span, finishSpan());\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*span, setOperation(Eq(\"testOp\")));\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"},\n                                         {\"x-envoy-decorator-operation\", \"testOp\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n        filter->callbacks_->activeSpan().setTag(\"service-cluster\", \"scoobydoo\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Should be no 'x-envoy-decorator-operation' response header, as decorator\n  // was overridden by request header.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(nullptr, headers.EnvoyDecoratorOperation());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorator) {\n  setup(false, \"\");\n  envoy::type::v3::FractionalPercent percent1;\n  percent1.set_numerator(100);\n  envoy::type::v3::FractionalPercent percent2;\n  percent2.set_numerator(10000);\n  percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n  tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(\n      TracingConnectionManagerConfig{Tracing::OperationName::Egress,\n                                     {{\":method\", requestHeaderCustomTag(\":method\")}},\n                                     percent1,\n                                     percent2,\n                                     percent1,\n                                     false,\n                                     256});\n\n  auto* span = new NiceMock<Tracing::MockSpan>();\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const Tracing::Config& config, const HeaderMap&, const StreamInfo::StreamInfo&,\n                     const Tracing::Decision) -> Tracing::Span* {\n            EXPECT_EQ(Tracing::OperationName::Egress, config.operationName());\n\n            return span;\n          }));\n  route_config_provider_.route_config_->route_->decorator_.operation_ = \"testOp\";\n  EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator()).Times(2);\n  EXPECT_CALL(route_config_provider_.route_config_->route_->decorator_, apply(_))\n      .WillOnce(Invoke(\n          [&](const Tracing::Span& apply_to_span) -> void { EXPECT_EQ(span, &apply_to_span); }));\n  EXPECT_EQ(true, route_config_provider_.route_config_->route_->decorator_.propagate());\n  EXPECT_CALL(*span, finishSpan());\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*span, setOperation(_)).Times(0);\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n        filter->callbacks_->activeSpan().setTag(\"service-cluster\", \"scoobydoo\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_NE(nullptr, headers.EnvoyDecoratorOperation());\n        // Verify that decorator operation has been set as request header.\n        EXPECT_EQ(\"testOp\", headers.getEnvoyDecoratorOperationValue());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecoratorPropagateFalse) {\n  setup(false, \"\");\n  envoy::type::v3::FractionalPercent percent1;\n  percent1.set_numerator(100);\n  envoy::type::v3::FractionalPercent percent2;\n  percent2.set_numerator(10000);\n  percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n  tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(\n      TracingConnectionManagerConfig{Tracing::OperationName::Egress,\n                                     {{\":method\", requestHeaderCustomTag(\":method\")}},\n                                     percent1,\n                                     percent2,\n                                     percent1,\n                                     false,\n                                     256});\n\n  auto* span = new NiceMock<Tracing::MockSpan>();\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const Tracing::Config& config, const HeaderMap&, const StreamInfo::StreamInfo&,\n                     const Tracing::Decision) -> Tracing::Span* {\n            EXPECT_EQ(Tracing::OperationName::Egress, config.operationName());\n\n            return span;\n          }));\n  route_config_provider_.route_config_->route_->decorator_.operation_ = \"testOp\";\n  ON_CALL(route_config_provider_.route_config_->route_->decorator_, propagate())\n      .WillByDefault(Return(false));\n  EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator()).Times(2);\n  EXPECT_CALL(route_config_provider_.route_config_->route_->decorator_, apply(_))\n      .WillOnce(Invoke(\n          [&](const Tracing::Span& apply_to_span) -> void { EXPECT_EQ(span, &apply_to_span); }));\n  EXPECT_CALL(*span, finishSpan());\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*span, setOperation(_)).Times(0);\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n        filter->callbacks_->activeSpan().setTag(\"service-cluster\", \"scoobydoo\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Verify that decorator operation has NOT been set as request header (propagate is false)\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_EQ(nullptr, headers.EnvoyDecoratorOperation());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecoratorOverrideOp) {\n  setup(false, \"\");\n  envoy::type::v3::FractionalPercent percent1;\n  percent1.set_numerator(100);\n  envoy::type::v3::FractionalPercent percent2;\n  percent2.set_numerator(10000);\n  percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n  tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(\n      TracingConnectionManagerConfig{Tracing::OperationName::Egress,\n                                     {{\":method\", requestHeaderCustomTag(\":method\")}},\n                                     percent1,\n                                     percent2,\n                                     percent1,\n                                     false,\n                                     256});\n\n  auto* span = new NiceMock<Tracing::MockSpan>();\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const Tracing::Config& config, const HeaderMap&, const StreamInfo::StreamInfo&,\n                     const Tracing::Decision) -> Tracing::Span* {\n            EXPECT_EQ(Tracing::OperationName::Egress, config.operationName());\n\n            return span;\n          }));\n  route_config_provider_.route_config_->route_->decorator_.operation_ = \"initOp\";\n  EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator()).Times(2);\n  EXPECT_CALL(route_config_provider_.route_config_->route_->decorator_, apply(_))\n      .WillOnce(Invoke(\n          [&](const Tracing::Span& apply_to_span) -> void { EXPECT_EQ(span, &apply_to_span); }));\n  EXPECT_CALL(*span, finishSpan());\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  // Verify that span operation overridden by value supplied in response header.\n  EXPECT_CALL(*span, setOperation(Eq(\"testOp\")));\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{\n            {\":status\", \"200\"}, {\"x-envoy-decorator-operation\", \"testOp\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n        filter->callbacks_->activeSpan().setTag(\"service-cluster\", \"scoobydoo\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest,\n       StartAndFinishSpanNormalFlowEgressDecoratorOverrideOpNoActiveSpan) {\n  setup(false, \"\");\n  envoy::type::v3::FractionalPercent percent1;\n  percent1.set_numerator(100);\n  envoy::type::v3::FractionalPercent percent2;\n  percent2.set_numerator(10000);\n  percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n  tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(\n      TracingConnectionManagerConfig{Tracing::OperationName::Egress,\n                                     {{\":method\", requestHeaderCustomTag(\":method\")}},\n                                     percent1,\n                                     percent2,\n                                     percent1,\n                                     false,\n                                     256});\n\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(false));\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  // Treat request as internal, otherwise x-request-id header will be overwritten.\n  use_remote_address_ = false;\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{\n            {\":status\", \"200\"}, {\"x-envoy-decorator-operation\", \"testOp\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestAccessLog) {\n  static constexpr char local_address[] = \"0.0.0.0\";\n  static constexpr char xff_address[] = \"1.2.3.4\";\n\n  // stream_info.downstreamRemoteAddress will infer the address from request\n  // headers instead of the physical connection\n  use_remote_address_ = false;\n  setup(false, \"\");\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_TRUE(stream_info.responseCode());\n        EXPECT_EQ(stream_info.responseCode().value(), uint32_t(200));\n        EXPECT_NE(nullptr, stream_info.downstreamLocalAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamRemoteAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamDirectRemoteAddress());\n        EXPECT_NE(nullptr, stream_info.routeEntry());\n\n        EXPECT_EQ(stream_info.downstreamRemoteAddress()->ip()->addressAsString(), xff_address);\n        EXPECT_EQ(stream_info.downstreamDirectRemoteAddress()->ip()->addressAsString(),\n                  local_address);\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-forwarded-for\", xff_address},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestFilterCanEnrichAccessLogs) {\n  setup(false, \"\");\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  EXPECT_CALL(*filter, onStreamComplete()).WillOnce(Invoke([&]() {\n    ProtobufWkt::Value metadata_value;\n    metadata_value.set_string_value(\"value\");\n    ProtobufWkt::Struct metadata;\n    metadata.mutable_fields()->insert({\"field\", metadata_value});\n    filter->callbacks_->streamInfo().setDynamicMetadata(\"metadata_key\", metadata);\n  }));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        auto dynamic_meta = stream_info.dynamicMetadata().filter_metadata().at(\"metadata_key\");\n        EXPECT_EQ(\"value\", dynamic_meta.fields().at(\"field\").string_value());\n      }));\n\n  NiceMock<MockResponseEncoder> encoder;\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        RequestDecoder* decoder = &conn_manager_->newStream(encoder);\n\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":method\", \"GET\"}, {\":authority\", \"host\"}, {\":path\", \"/\"}}};\n        decoder->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestDownstreamDisconnectAccessLog) {\n  setup(false, \"\");\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_FALSE(stream_info.responseCode());\n        EXPECT_TRUE(stream_info.hasAnyResponseFlag());\n        EXPECT_TRUE(\n            stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination));\n        EXPECT_EQ(\"downstream_remote_disconnect\", stream_info.responseCodeDetails().value());\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":method\", \"GET\"}, {\":authority\", \"host\"}, {\":path\", \"/\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) {\n  setup(false, \"\");\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_TRUE(stream_info.responseCode());\n        EXPECT_EQ(stream_info.responseCode().value(), uint32_t(200));\n        EXPECT_NE(nullptr, stream_info.downstreamLocalAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamRemoteAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamDirectRemoteAddress());\n        EXPECT_NE(nullptr, stream_info.routeEntry());\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n        ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{\"x-trailer\", \"1\"}}};\n        filter->callbacks_->encodeTrailers(std::move(response_trailers));\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestAccessLogWithInvalidRequest) {\n  setup(false, \"\");\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_TRUE(stream_info.responseCode());\n        EXPECT_EQ(stream_info.responseCode().value(), uint32_t(400));\n        EXPECT_EQ(\"missing_host_header\", stream_info.responseCodeDetails().value());\n        EXPECT_NE(nullptr, stream_info.downstreamLocalAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamRemoteAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamDirectRemoteAddress());\n        EXPECT_EQ(nullptr, stream_info.routeEntry());\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        // These request headers are missing the necessary \":host\"\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"}, {\":path\", \"/\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n        data.drain(0);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n}\n\nclass StreamErrorOnInvalidHttpMessageTest : public HttpConnectionManagerImplTest {\npublic:\n  void sendInvalidRequestAndVerifyConnectionState(bool stream_error_on_invalid_http_message) {\n    setup(false, \"\");\n\n    EXPECT_CALL(*codec_, dispatch(_))\n        .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n          decoder_ = &conn_manager_->newStream(response_encoder_);\n\n          // These request headers are missing the necessary \":host\"\n          RequestHeaderMapPtr headers{\n              new TestRequestHeaderMapImpl{{\":method\", \"GET\"}, {\":path\", \"/\"}}};\n          decoder_->decodeHeaders(std::move(headers), true);\n          data.drain(0);\n          return Http::okStatus();\n        }));\n\n    auto* filter = new MockStreamFilter();\n    EXPECT_CALL(filter_factory_, createFilterChain(_))\n        .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n          callbacks.addStreamFilter(StreamFilterSharedPtr{filter});\n        }));\n    EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n    EXPECT_CALL(*filter, setEncoderFilterCallbacks(_));\n\n    // codec stream error\n    EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage())\n        .WillOnce(Return(stream_error_on_invalid_http_message));\n    EXPECT_CALL(*filter, encodeHeaders(_, true));\n    EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n        .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n          EXPECT_EQ(\"400\", headers.getStatusValue());\n          EXPECT_EQ(\"missing_host_header\",\n                    filter->decoder_callbacks_->streamInfo().responseCodeDetails().value());\n          if (!stream_error_on_invalid_http_message) {\n            EXPECT_NE(nullptr, headers.Connection());\n            EXPECT_EQ(\"close\", headers.getConnectionValue());\n          } else {\n            EXPECT_EQ(nullptr, headers.Connection());\n          }\n        }));\n\n    EXPECT_CALL(*filter, onStreamComplete());\n    EXPECT_CALL(*filter, onDestroy());\n\n    Buffer::OwnedImpl fake_input;\n    conn_manager_->onData(fake_input, false);\n  }\n};\n\nTEST_F(StreamErrorOnInvalidHttpMessageTest, ConnectionTerminatedIfCodecStreamErrorIsFalse) {\n  sendInvalidRequestAndVerifyConnectionState(false);\n}\n\nTEST_F(StreamErrorOnInvalidHttpMessageTest, ConnectionOpenIfCodecStreamErrorIsTrue) {\n  sendInvalidRequestAndVerifyConnectionState(true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestAccessLogSsl) {\n  setup(true, \"\");\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_TRUE(stream_info.responseCode());\n        EXPECT_EQ(stream_info.responseCode().value(), uint32_t(200));\n        EXPECT_NE(nullptr, stream_info.downstreamLocalAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamRemoteAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamDirectRemoteAddress());\n        EXPECT_NE(nullptr, stream_info.downstreamSslConnection());\n        EXPECT_NE(nullptr, stream_info.routeEntry());\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n        ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{\"x-trailer\", \"1\"}}};\n        filter->callbacks_->encodeTrailers(std::move(response_trailers));\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) {\n  setup(false, \"\");\n\n  // Disable tracing.\n  tracing_config_.reset();\n\n  EXPECT_CALL(*tracer_, startSpan_(_, _, _, _)).Times(0);\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"tracing.global_enabled\",\n                                             An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillByDefault(Return(true));\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                         {\":authority\", \"host\"},\n                                         {\":path\", \"/\"},\n                                         {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, NoPath) {\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":method\", \"NOT_CONNECT\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"404\", headers.getStatusValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\n// No idle timeout when route idle timeout is implied at both global and\n// per-route level. The connection manager config is responsible for managing\n// the default configuration aspects.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNotConfigured) {\n  setup(false, \"\");\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_(_)).Times(0);\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        decoder_->decodeHeaders(std::move(headers), false);\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// When the global timeout is configured, the timer is enabled before we receive\n// headers, if it fires we don't faceplant.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) {\n  stream_idle_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* idle_timer = setUpTimer();\n    EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _));\n    conn_manager_->newStream(response_encoder_);\n\n    // Expect resetIdleTimer() to be called for the response\n    // encodeHeaders()/encodeData().\n    EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2);\n    EXPECT_CALL(*idle_timer, disableTimer());\n    idle_timer->invokeCallback();\n    return Http::okStatus();\n  }));\n\n  // 408 direct response after timeout.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"408\", headers.getStatusValue());\n      }));\n  std::string response_body;\n  EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(\"stream timeout\", response_body);\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdleTimeout) {\n  stream_idle_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  std::shared_ptr<MockStreamEncoderFilter> filter(new NiceMock<MockStreamEncoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamEncoderFilter(filter);\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* idle_timer = setUpTimer();\n    EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _));\n    conn_manager_->newStream(response_encoder_);\n\n    // Expect resetIdleTimer() to be called for the response\n    // encodeHeaders()/encodeData().\n    EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2);\n    EXPECT_CALL(*idle_timer, disableTimer());\n    // Simulate and idle timeout so that the filter chain gets created.\n    idle_timer->invokeCallback();\n    return Http::okStatus();\n  }));\n\n  // This should not be called as we don't have request headers.\n  EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)).Times(0);\n\n  EXPECT_CALL(*filter, encodeHeaders(_, _))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        // Under heavy load it is possible that stream timeout will be reached before any headers\n        // were received. Envoy will create a local reply that will go through the encoder filter\n        // chain. We want to make sure that encoder filters get a null route object.\n        auto route = filter->callbacks_->route();\n        EXPECT_EQ(route.get(), nullptr);\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*filter, encodeData(_, _));\n  EXPECT_CALL(*filter, encodeComplete());\n\n  EXPECT_CALL(*filter, onStreamComplete());\n  EXPECT_CALL(*filter, onDestroy());\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, _));\n  EXPECT_CALL(response_encoder_, encodeData(_, _));\n\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) {\n  stream_idle_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* idle_timer = setUpTimer();\n    EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _));\n    conn_manager_->newStream(response_encoder_);\n\n    // Expect resetIdleTimer() to be called for the response\n    // encodeHeaders()/encodeData().\n    EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2);\n    EXPECT_CALL(*idle_timer, disableTimer());\n    idle_timer->invokeCallback();\n    return Http::okStatus();\n  }));\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  // 408 direct response after timeout.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"408\", headers.getStatusValue());\n      }));\n\n  std::string response_body;\n  EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_TRUE(stream_info.responseCode());\n        EXPECT_TRUE(stream_info.hasAnyResponseFlag());\n        EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout));\n      }));\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(\"stream timeout\", response_body);\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value());\n}\n\n// Test timeout variants.\nTEST_F(HttpConnectionManagerImplTest, DurationTimeout) {\n  stream_idle_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n  setupFilterChain(1, 0);\n  RequestHeaderMap* latched_headers = nullptr;\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  // Create the stream.\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        Event::MockTimer* idle_timer = setUpTimer();\n        EXPECT_CALL(*idle_timer, enableTimer(_, _));\n        RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_);\n        EXPECT_CALL(*idle_timer, enableTimer(_, _));\n        EXPECT_CALL(*idle_timer, disableTimer());\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        latched_headers = headers.get();\n        decoder->decodeHeaders(std::move(headers), false);\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Clear and refresh the route cache (checking clusterInfo refreshes the route cache)\n  decoder_filters_[0]->callbacks_->clearRouteCache();\n  decoder_filters_[0]->callbacks_->clusterInfo();\n\n  Event::MockTimer* timer = setUpTimer();\n\n  // Set a max duration of 30ms and make sure a 30ms timer is set.\n  {\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(30), _));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration())\n        .Times(2)\n        .WillRepeatedly(Return(std::chrono::milliseconds(30)));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // Clear the timeout and make sure the timer is disabled.\n  {\n    EXPECT_CALL(*timer, disableTimer());\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration())\n        .Times(1)\n        .WillRepeatedly(Return(absl::nullopt));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // With no route timeout, but HCM defaults, the HCM defaults will be used.\n  {\n    max_stream_duration_ = std::chrono::milliseconds(17);\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(17), _));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration())\n        .Times(1)\n        .WillRepeatedly(Return(absl::nullopt));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n    max_stream_duration_ = absl::nullopt;\n  }\n\n  // Add a gRPC header, but not a gRPC timeout and verify the timer is unchanged.\n  latched_headers->setGrpcTimeout(\"1M\");\n  {\n    EXPECT_CALL(*timer, disableTimer());\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration())\n        .Times(1)\n        .WillRepeatedly(Return(absl::nullopt));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // With a gRPC header of 1M and a gRPC header max of 0, respect the gRPC header.\n  {\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(0)));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(60000), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // With a gRPC header and a larger gRPC header cap, respect the gRPC header.\n  {\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(20000000)));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(60000), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // With a gRPC header and a small gRPC header cap, use the cap.\n  {\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(20)));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(20), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  latched_headers->setGrpcTimeout(\"0m\");\n  // With a gRPC header of 0, use the header\n  {\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(20)));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(0), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  latched_headers->setGrpcTimeout(\"1M\");\n  // With a timeout of 20ms and an offset of 10ms, set a timeout for 10ms.\n  {\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(20)));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_,\n                grpcTimeoutHeaderOffset())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(10)));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(10), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // With a timeout of 20ms and an offset of 30ms, set a timeout for 0ms\n  {\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(20)));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_,\n                grpcTimeoutHeaderOffset())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(30)));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(0), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // With a gRPC timeout of 20ms, and 5ms used already when the route was\n  // refreshed, set a timer for 15ms.\n  {\n    test_time_.timeSystem().setMonotonicTime(MonotonicTime(std::chrono::milliseconds(5)));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(20)));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_,\n                grpcTimeoutHeaderOffset())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(absl::nullopt));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(15), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // With a gRPC timeout of 20ms, and 25ms used already when the route was\n  // refreshed, set a timer for now (0ms)\n  {\n    test_time_.timeSystem().setMonotonicTime(MonotonicTime(std::chrono::milliseconds(25)));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(std::chrono::milliseconds(20)));\n    EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_,\n                grpcTimeoutHeaderOffset())\n        .Times(AnyNumber())\n        .WillRepeatedly(Return(absl::nullopt));\n    EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(0), _));\n    decoder_filters_[0]->callbacks_->clearRouteCache();\n    decoder_filters_[0]->callbacks_->clusterInfo();\n  }\n\n  // Cleanup.\n  EXPECT_CALL(*timer, disableTimer());\n  EXPECT_CALL(*decoder_filters_[0], onStreamComplete());\n  EXPECT_CALL(*decoder_filters_[0], onDestroy());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Per-route timeouts override the global stream idle timeout.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteOverride) {\n  stream_idle_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n  ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout())\n      .WillByDefault(Return(std::chrono::milliseconds(30)));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        Event::MockTimer* idle_timer = setUpTimer();\n        EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _));\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(30), _));\n        EXPECT_CALL(*idle_timer, disableTimer());\n        decoder_->decodeHeaders(std::move(headers), false);\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Per-route zero timeout overrides the global stream idle timeout.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteZeroOverride) {\n  stream_idle_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n  ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout())\n      .WillByDefault(Return(std::chrono::milliseconds(0)));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        Event::MockTimer* idle_timer = setUpTimer();\n        EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _));\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        EXPECT_CALL(*idle_timer, disableTimer());\n        decoder_->decodeHeaders(std::move(headers), false);\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value());\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Validate the per-stream idle timeout after having sent downstream headers.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders) {\n  setup(false, \"\");\n  ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout())\n      .WillByDefault(Return(std::chrono::milliseconds(10)));\n\n  // Codec sends downstream request headers.\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n\n    Event::MockTimer* idle_timer = setUpTimer();\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    // Expect resetIdleTimer() to be called for the response\n    // encodeHeaders()/encodeData().\n    EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2);\n    EXPECT_CALL(*idle_timer, disableTimer());\n    idle_timer->invokeCallback();\n\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  // 408 direct response after timeout.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"408\", headers.getStatusValue());\n      }));\n  std::string response_body;\n  EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(\"stream timeout\", response_body);\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value());\n}\n\n// Validate the per-stream idle timer is properly disabled when the stream terminates normally.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNormalTermination) {\n  setup(false, \"\");\n  ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout())\n      .WillByDefault(Return(std::chrono::milliseconds(10)));\n\n  // Codec sends downstream request headers.\n  Event::MockTimer* idle_timer = setUpTimer();\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*idle_timer, disableTimer());\n  conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value());\n}\n\n// Validate the per-stream idle timeout after having sent downstream\n// headers+body.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeadersAndBody) {\n  setup(false, \"\");\n  ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout())\n      .WillByDefault(Return(std::chrono::milliseconds(10)));\n\n  // Codec sends downstream request headers.\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n\n    Event::MockTimer* idle_timer = setUpTimer();\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeData(data, false);\n\n    // Expect resetIdleTimer() to be called for the response\n    // encodeHeaders()/encodeData().\n    EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2);\n    EXPECT_CALL(*idle_timer, disableTimer());\n    idle_timer->invokeCallback();\n\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  // 408 direct response after timeout.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"408\", headers.getStatusValue());\n      }));\n  std::string response_body;\n  EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(\"stream timeout\", response_body);\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value());\n}\n\n// Validate the per-stream idle timeout after upstream headers have been sent.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) {\n  setup(false, \"\");\n  ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout())\n      .WillByDefault(Return(std::chrono::milliseconds(10)));\n\n  // Store the basic request encoder during filter chain setup.\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n\n  // Codec sends downstream request headers, upstream response headers are\n  // encoded.\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n\n    Event::MockTimer* idle_timer = setUpTimer();\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    filter->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n    EXPECT_CALL(*idle_timer, disableTimer());\n    idle_timer->invokeCallback();\n\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  // 200 upstream response.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"200\", headers.getStatusValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value());\n}\n\n// Validate the per-stream idle timeout after a sequence of header/data events.\nTEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) {\n  setup(false, \"\");\n  ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout())\n      .WillByDefault(Return(std::chrono::milliseconds(10)));\n  proxy_100_continue_ = true;\n\n  // Store the basic request encoder during filter chain setup.\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n\n  // Codec sends downstream request headers, upstream response headers are\n  // encoded, data events happen in various directions.\n  Event::MockTimer* idle_timer = setUpTimer();\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    ResponseHeaderMapPtr response_continue_headers{\n        new TestResponseHeaderMapImpl{{\":status\", \"100\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    filter->callbacks_->encode100ContinueHeaders(std::move(response_continue_headers));\n\n    ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n    filter->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeData(data, false);\n\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    decoder_->decodeTrailers(std::move(trailers));\n\n    Buffer::OwnedImpl fake_response(\"world\");\n    EXPECT_CALL(*idle_timer, enableTimer(_, _));\n    filter->callbacks_->encodeData(fake_response, false);\n\n    EXPECT_CALL(*idle_timer, disableTimer());\n    idle_timer->invokeCallback();\n\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  // 100 continue.\n  EXPECT_CALL(response_encoder_, encode100ContinueHeaders(_));\n\n  // 200 upstream response.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"200\", headers.getStatusValue());\n      }));\n\n  std::string response_body;\n  EXPECT_CALL(response_encoder_, encodeData(_, false)).WillOnce(AddBufferToString(&response_body));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value());\n  EXPECT_EQ(\"world\", response_body);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledByDefault) {\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0);\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledIfSetToZero) {\n  request_timeout_ = std::chrono::milliseconds(0);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0);\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutValidlyConfigured) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* request_timer = setUpTimer();\n    EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _));\n    EXPECT_CALL(*request_timer, disableTimer());\n\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  std::string response_body;\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* request_timer = setUpTimer();\n    EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);\n    EXPECT_CALL(*request_timer, disableTimer()).Times(AtLeast(1));\n\n    EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n        .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n          EXPECT_EQ(\"408\", headers.getStatusValue());\n        }));\n    EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body));\n\n    conn_manager_->newStream(response_encoder_);\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_)).Times(2);\n    request_timer->invokeCallback();\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_timeout_.value());\n  EXPECT_EQ(\"request timeout\", response_body);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteRequestWithHeader) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* request_timer = setUpTimer();\n    EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);\n    EXPECT_CALL(*request_timer, disableTimer()).Times(1);\n\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n\n    // the second parameter 'false' leaves the stream open\n    decoder_->decodeHeaders(std::move(headers), false);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithHeader) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* request_timer = setUpTimer();\n    EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);\n\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n\n    EXPECT_CALL(*request_timer, disableTimer()).Times(2);\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithData) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    Event::MockTimer* request_timer = setUpTimer();\n    EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);\n\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"POST\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    EXPECT_CALL(*request_timer, disableTimer()).Times(2);\n    decoder_->decodeData(data, true);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithTrailers) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    Event::MockTimer* request_timer = setUpTimer();\n    EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n    decoder_->decodeData(data, false);\n\n    EXPECT_CALL(*request_timer, disableTimer()).Times(2);\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n    decoder_->decodeTrailers(std::move(trailers));\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, _));\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* request_timer = setUpTimer();\n    EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);\n\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    EXPECT_CALL(*request_timer, disableTimer()).Times(2);\n    ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n    filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n    filter->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnConnectionTermination) {\n  request_timeout_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  Event::MockTimer* request_timer = setUpTimer();\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n\n    decoder_->decodeHeaders(std::move(headers), false);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n\n  EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);\n  conn_manager_->onData(fake_input, false); // kick off request\n\n  EXPECT_CALL(*request_timer, disableTimer()).Times(1);\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, MaxStreamDurationDisabledIfSetToZero) {\n  max_stream_duration_ = std::chrono::milliseconds(0);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0);\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, MaxStreamDurationValidlyConfigured) {\n  max_stream_duration_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    Event::MockTimer* duration_timer = setUpTimer();\n\n    EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _));\n    EXPECT_CALL(*duration_timer, disableTimer());\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) {\n  max_stream_duration_ = std::chrono::milliseconds(10);\n  setup(false, \"\");\n  Event::MockTimer* duration_timer = setUpTimer();\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)).Times(1);\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n\n  EXPECT_CALL(*duration_timer, disableTimer());\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  duration_timer->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_max_duration_reached_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_rx_reset_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, Http10Rejected) {\n  setup(false, \"\");\n  EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10));\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":method\", \"GET\"}, {\":path\", \"/\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"426\", headers.getStatusValue());\n        EXPECT_EQ(\"close\", headers.getConnectionValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, Http10ConnCloseLegacy) {\n  http1_settings_.accept_http_10_ = true;\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fixed_connection_close\", \"false\"}});\n  setup(false, \"\");\n  EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10));\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host:80\"}, {\":method\", \"CONNECT\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"close\", headers.getConnectionValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, ProxyConnectLegacyClose) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fixed_connection_close\", \"false\"}});\n  setup(false, \"\");\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host:80\"}, {\":method\", \"CONNECT\"}, {\"proxy-connection\", \"close\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"close\", headers.getConnectionValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, ConnectLegacyClose) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fixed_connection_close\", \"false\"}});\n  setup(false, \"\");\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":method\", \"CONNECT\"}, {\"connection\", \"close\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"close\", headers.getConnectionValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackNotCalledIfResetStreamValidly) {\n  max_stream_duration_ = std::chrono::milliseconds(5000);\n  setup(false, \"\");\n  Event::MockTimer* duration_timer = setUpTimer();\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)).Times(1);\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false); // kick off request\n\n  EXPECT_CALL(*duration_timer, disableTimer());\n  conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(0U, stats_.named_.downstream_rq_max_duration_reached_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_rx_reset_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) {\n  setup(false, \"\");\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                                             {\":method\", \"GET\"},\n                                                             {\":path\", \"/\"},\n                                                             {\"connection\", \"Upgrade\"},\n                                                             {\"upgrade\", \"websocket\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n    // Try sending trailers after the headers which will be rejected, just to\n    // test the HCM logic that further decoding will not be passed to the\n    // filters once the early response path is kicked off.\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"bazzz\", \"bar\"}}};\n    decoder_->decodeTrailers(std::move(trailers));\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"403\", headers.getStatusValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_ws_on_non_ws_route_.value());\n}\n\n// Make sure for upgrades, we do not append Connection: Close when draining.\nTEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) {\n  setup(false, \"envoy-custom-server\", false);\n\n  // Store the basic request encoder during filter chain setup.\n  auto* filter = new MockStreamFilter();\n  EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true));\n\n  EXPECT_CALL(*filter, decodeHeaders(_, false))\n      .WillRepeatedly(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus {\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*filter, encodeHeaders(_, false))\n      .WillRepeatedly(Invoke(\n          [&](HeaderMap&, bool) -> FilterHeadersStatus { return FilterHeadersStatus::Continue; }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_NE(nullptr, headers.Connection());\n        EXPECT_EQ(\"upgrade\", headers.getConnectionValue());\n      }));\n\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n  EXPECT_CALL(*filter, setEncoderFilterCallbacks(_));\n\n  EXPECT_CALL(filter_factory_, createUpgradeFilterChain(_, _, _))\n      .WillRepeatedly(Invoke([&](absl::string_view, const Http::FilterChainFactory::UpgradeMap*,\n                                 FilterChainFactoryCallbacks& callbacks) -> bool {\n        callbacks.addStreamFilter(StreamFilterSharedPtr{filter});\n        return true;\n      }));\n\n  // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers\n  // only request into it. Then we respond into the filter.\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                                                 {\":method\", \"GET\"},\n                                                                 {\":path\", \"/\"},\n                                                                 {\"connection\", \"Upgrade\"},\n                                                                 {\"upgrade\", \"foo\"}}};\n        decoder_->decodeHeaders(std::move(headers), false);\n\n        filter->decoder_callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{\n            {\":status\", \"101\"}, {\"Connection\", \"upgrade\"}, {\"upgrade\", \"foo\"}}};\n        filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Kick off the incoming data. Use extra data which should cause a redispatch.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*filter, onStreamComplete());\n  EXPECT_CALL(*filter, onDestroy());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Make sure CONNECT requests hit the upgrade filter path.\nTEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) {\n  setup(false, \"envoy-custom-server\", false);\n\n  EXPECT_CALL(filter_factory_, createUpgradeFilterChain(\"CONNECT\", _, _))\n      .WillRepeatedly(Return(true));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":method\", \"CONNECT\"}}};\n        decoder_->decodeHeaders(std::move(headers), false);\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Kick off the incoming data. Use extra data which should cause a redispatch.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, ConnectWithEmptyPath) {\n  setup(false, \"envoy-custom-server\", false);\n\n  EXPECT_CALL(filter_factory_, createUpgradeFilterChain(\"CONNECT\", _, _))\n      .WillRepeatedly(Return(true));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"\"}, {\":method\", \"CONNECT\"}}};\n        decoder_->decodeHeaders(std::move(headers), false);\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  // Kick off the incoming data. Use extra data which should cause a redispatch.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy(false);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, ConnectLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.stop_faking_paths\", \"false\"}});\n\n  setup(false, \"envoy-custom-server\", false);\n\n  EXPECT_CALL(filter_factory_, createUpgradeFilterChain(\"CONNECT\", _, _))\n      .WillRepeatedly(Return(false));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{\n            new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":method\", \"CONNECT\"}}};\n        decoder_->decodeHeaders(std::move(headers), false);\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, _))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"403\", headers.getStatusValue());\n      }));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/10138\nTEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(1, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true));\n  EXPECT_CALL(*codec_, shutdownNotice());\n  Event::MockTimer* drain_timer = setUpTimer();\n  EXPECT_CALL(*drain_timer, enableTimer(_, _));\n  expectOnDestroy();\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n  // Fake a protocol error that races with the drain timeout. This will cause a local close.\n  // Also fake the local close not closing immediately.\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(codecProtocolError(\"protocol error\")));\n  EXPECT_CALL(*drain_timer, disableTimer());\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay))\n      .WillOnce(Return());\n  conn_manager_->onData(fake_input, false);\n\n  // Now fire the close event which should have no effect as all close work has already been done.\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest,\n       FilterThatWaitsForBodyCanBeCalledAfterFilterThatAddsBodyEvenIfItIsNotLast) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  // 3 filters:\n  // 1st filter adds a body\n  // 2nd filter waits for the body\n  // 3rd filter simulates router filter.\n  setupFilterChain(3, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus {\n        Buffer::OwnedImpl body(\"body\");\n        decoder_filters_[0]->callbacks_->addDecodedData(body, false);\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Invoke([](RequestHeaderMap&, bool) -> FilterHeadersStatus {\n        return FilterHeadersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Invoke(\n          [](Buffer::Instance&, bool) -> FilterDataStatus { return FilterDataStatus::Continue; }));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false))\n      .WillOnce(Invoke([](RequestHeaderMap&, bool) -> FilterHeadersStatus {\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[2], decodeData(_, true))\n      .WillOnce(Invoke(\n          [](Buffer::Instance&, bool) -> FilterDataStatus { return FilterDataStatus::Continue; }));\n  EXPECT_CALL(*decoder_filters_[2], decodeComplete());\n\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, DrainClose) {\n  setup(true, \"\");\n\n  MockStreamDecoderFilter* filter = new NiceMock<MockStreamDecoderFilter>();\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_NE(nullptr, headers.ForwardedFor());\n        EXPECT_EQ(\"https\", headers.getForwardedProtoValue());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"300\"}}};\n  Event::MockTimer* drain_timer = setUpTimer();\n  EXPECT_CALL(*drain_timer, enableTimer(_, _));\n  EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true));\n  EXPECT_CALL(*codec_, shutdownNotice());\n  filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n  EXPECT_EQ(ssl_connection_.get(), filter->callbacks_->connection()->ssl().get());\n\n  EXPECT_CALL(*codec_, goAway());\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n  EXPECT_CALL(*drain_timer, disableTimer());\n  drain_timer->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_drain_close_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_3xx_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_3xx_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_completed_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_completed_.value());\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_test_2.cc",
    "content": "#include \"test/common/http/conn_manager_impl_test_base.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/test_runtime.h\"\n\nusing testing::_;\nusing testing::AtLeast;\nusing testing::HasSubstr;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::Mock;\nusing testing::Property;\nusing testing::Ref;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\n\nTEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) {\n  setup(false, \"envoy-server-test\");\n  setupFilterChain(1, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  startRequest();\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_NE(nullptr, headers.Server());\n        EXPECT_EQ(\"envoy-server-test\", headers.getServerValue());\n      }));\n  EXPECT_CALL(*decoder_filters_[0], onStreamComplete());\n  EXPECT_CALL(*decoder_filters_[0], onDestroy());\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n}\n\nTEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) {\n  setup(false, \"envoy-server-test\");\n\n  setupFilterChain(1, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  startRequest();\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_NE(nullptr, headers.Connection());\n        EXPECT_EQ(\"close\", headers.getConnectionValue());\n        EXPECT_EQ(nullptr, headers.ProxyConnection());\n      }));\n  EXPECT_CALL(*decoder_filters_[0], onStreamComplete());\n  EXPECT_CALL(*decoder_filters_[0], onDestroy());\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n}\n\nTEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) {\n  setup(false, \"\");\n\n  // This is like ResponseBeforeRequestComplete, but it tests the case where we start the reply\n  // before the request completes, but don't finish the reply until after the request completes.\n  MockStreamDecoderFilter* filter = new NiceMock<MockStreamDecoderFilter>();\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*filter, decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  // Start the request\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n    return Http::okStatus();\n  }));\n\n  Buffer::OwnedImpl fake_input(\"hello\");\n  conn_manager_->onData(fake_input, false);\n\n  // Start the response\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_NE(nullptr, headers.Server());\n        EXPECT_EQ(\"\", headers.getServerValue());\n      }));\n  filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  filter->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  // Finish the request.\n  EXPECT_CALL(*filter, decodeData(_, true));\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_->decodeData(data, true);\n    return Http::okStatus();\n  }));\n\n  conn_manager_->onData(fake_input, false);\n\n  // Since we started the response before the request was complete, we will still close the\n  // connection since we already sent a connection: close header. We won't \"reset\" the stream\n  // however.\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n  Buffer::OwnedImpl fake_response(\"world\");\n  filter->callbacks_->encodeData(fake_response, true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, DownstreamDisconnect) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    conn_manager_->newStream(response_encoder_);\n    data.drain(2);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0);\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Now raise a remote disconnection, we should see the filter get reset called.\n  conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, DownstreamProtocolError) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    conn_manager_->newStream(response_encoder_);\n    return codecProtocolError(\"protocol error\");\n  }));\n\n  EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_));\n  EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0);\n\n  // A protocol exception should result in reset of the streams followed by a remote or local close\n  // depending on whether the downstream client closes the connection prior to the delayed close\n  // timer firing.\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAccessLog) {\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n  access_logs_ = {handler};\n  setup(false, \"\");\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_FALSE(stream_info.responseCode());\n        EXPECT_TRUE(stream_info.hasAnyResponseFlag());\n        EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError));\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status {\n    conn_manager_->newStream(response_encoder_);\n    return codecProtocolError(\"protocol error\");\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAfterHeadersAccessLog) {\n  setup(false, \"\");\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n        callbacks.addAccessLogHandler(handler);\n      }));\n\n  EXPECT_CALL(*handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_FALSE(stream_info.responseCode());\n        EXPECT_TRUE(stream_info.hasAnyResponseFlag());\n        EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError));\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":method\", \"GET\"}, {\":authority\", \"host\"}, {\":path\", \"/\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n\n    return codecProtocolError(\"protocol error\");\n  }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n}\n\n// Verify that FrameFloodException causes connection to be closed abortively.\nTEST_F(HttpConnectionManagerImplTest, FrameFloodError) {\n  std::shared_ptr<AccessLog::MockInstance> log_handler =\n      std::make_shared<NiceMock<AccessLog::MockInstance>>();\n  access_logs_ = {log_handler};\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    conn_manager_->newStream(response_encoder_);\n    return bufferFloodError(\"too many outbound frames.\");\n  }));\n\n  EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_));\n  EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0);\n\n  // FrameFloodException should result in reset of the streams followed by abortive close.\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n\n  EXPECT_CALL(*log_handler, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        ASSERT_TRUE(stream_info.responseCodeDetails().has_value());\n        EXPECT_EQ(\"codec error: too many outbound frames.\",\n                  stream_info.responseCodeDetails().value());\n      }));\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  EXPECT_LOG_NOT_CONTAINS(\"warning\", \"downstream HTTP flood\",\n                          conn_manager_->onData(fake_input, false));\n\n  EXPECT_TRUE(filter_callbacks_.connection_.streamInfo().hasResponseFlag(\n      StreamInfo::ResponseFlag::DownstreamProtocolError));\n}\n\nTEST_F(HttpConnectionManagerImplTest, IdleTimeoutNoCodec) {\n  // Not used in the test.\n  delete codec_;\n\n  idle_timeout_ = (std::chrono::milliseconds(10));\n  Event::MockTimer* idle_timer = setUpTimer();\n  EXPECT_CALL(*idle_timer, enableTimer(_, _));\n  setup(false, \"\");\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  EXPECT_CALL(*idle_timer, disableTimer());\n  idle_timer->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_idle_timeout_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, IdleTimeout) {\n  idle_timeout_ = (std::chrono::milliseconds(10));\n  Event::MockTimer* idle_timer = setUpTimer();\n  EXPECT_CALL(*idle_timer, enableTimer(_, _));\n  setup(false, \"\");\n\n  MockStreamDecoderFilter* filter = new NiceMock<MockStreamDecoderFilter>();\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*idle_timer, disableTimer());\n  EXPECT_CALL(*filter, decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*filter, decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n\n  startRequest(true, \"hello\");\n\n  EXPECT_CALL(*idle_timer, enableTimer(_, _));\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n  Event::MockTimer* drain_timer = setUpTimer();\n  EXPECT_CALL(*drain_timer, enableTimer(_, _));\n  idle_timer->invokeCallback();\n\n  EXPECT_CALL(*codec_, goAway());\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n  EXPECT_CALL(*idle_timer, disableTimer());\n  EXPECT_CALL(*drain_timer, disableTimer());\n  drain_timer->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_idle_timeout_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, ConnectionDurationNoCodec) {\n  // Not used in the test.\n  delete codec_;\n\n  max_connection_duration_ = (std::chrono::milliseconds(10));\n  Event::MockTimer* connection_duration_timer = setUpTimer();\n  EXPECT_CALL(*connection_duration_timer, enableTimer(_, _));\n  setup(false, \"\");\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  EXPECT_CALL(*connection_duration_timer, disableTimer());\n\n  connection_duration_timer->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_max_duration_reached_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, ConnectionDuration) {\n  max_connection_duration_ = (std::chrono::milliseconds(10));\n  Event::MockTimer* connection_duration_timer = setUpTimer();\n  EXPECT_CALL(*connection_duration_timer, enableTimer(_, _));\n  setup(false, \"\");\n\n  MockStreamDecoderFilter* filter = new NiceMock<MockStreamDecoderFilter>();\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*filter, decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*filter, decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n\n  // Kick off the incoming data.\n  startRequest(true, \"hello\");\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n  Event::MockTimer* drain_timer = setUpTimer();\n  EXPECT_CALL(*drain_timer, enableTimer(_, _));\n  connection_duration_timer->invokeCallback();\n\n  EXPECT_CALL(*codec_, goAway());\n  EXPECT_CALL(filter_callbacks_.connection_,\n              close(Network::ConnectionCloseType::FlushWriteAndDelay));\n  EXPECT_CALL(*connection_duration_timer, disableTimer());\n  EXPECT_CALL(*drain_timer, disableTimer());\n  drain_timer->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_max_duration_reached_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) {\n  setup(false, \"\");\n\n  setupFilterChain(2, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the request.\n  startRequest(true, \"hello\");\n\n  // Mimic a decoder filter that trapped data and now sends on the headers.\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus {\n        // Now filter 2 will send a complete response.\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), true,\n                                                       \"details\");\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n  expectOnDestroy();\n\n  // Response is already complete so we drop buffered body data when we continue.\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).Times(0);\n  decoder_filters_[0]->callbacks_->continueDecoding();\n}\n\nTEST_F(HttpConnectionManagerImplTest, DoubleBuffering) {\n  setup(false, \"\");\n  setupFilterChain(3, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_data_copy(\"hello\");\n  startRequest(true, \"hello\");\n\n  // Continue iteration and stop and buffer on the 2nd filter.\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  // Continue iteration. We expect the 3rd filter to not receive double data but for the buffered\n  // data to have been kept inline as it moves through.\n  EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[2], decodeData(BufferEqual(&fake_data_copy), true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n  EXPECT_CALL(*decoder_filters_[2], decodeComplete());\n  decoder_filters_[1]->callbacks_->continueDecoding();\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) {\n  setup(false, \"\");\n  setupFilterChain(2, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  startRequest();\n\n  // Continue headers only of filter 1.\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  // Stop zero byte data.\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  Buffer::OwnedImpl zero;\n  decoder_->decodeData(zero, true);\n\n  // Continue.\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"hello\");\n    decoder_->decodeData(fake_data, false);\n\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"bazzz\", \"bar\"}}};\n    decoder_->decodeTrailers(std::move(trailers));\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(2, 2);\n\n  Http::LowerCaseString trailer_key(\"foo\");\n  std::string trailers_data(\"trailers\");\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_))\n      .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus {\n        Http::LowerCaseString key(\"foo\");\n        EXPECT_EQ(trailers.get(key), nullptr);\n        return FilterTrailersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // set up encodeHeaders expectations\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  // invoke encodeHeaders\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n\n  // set up encodeData expectations\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n\n  // invoke encodeData\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[0]->callbacks_->encodeData(response_body, false);\n  // set up encodeTrailer expectations\n  EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_))\n      .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus {\n        // assert that the trailers set in the previous filter was ignored\n        Http::LowerCaseString key(\"foo\");\n        EXPECT_EQ(trailers.get(key), nullptr);\n        return FilterTrailersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n  expectOnDestroy();\n\n  // invoke encodeTrailers\n  decoder_filters_[0]->callbacks_->encodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) {\n  setup(false, \"\");\n  setupFilterChain(2, 2);\n\n  std::string trailers_data(\"trailers\");\n  Http::LowerCaseString trailer_key(\"foo\");\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterDataStatus {\n        decoder_filters_[0]->callbacks_->addDecodedTrailers().addCopy(trailer_key, trailers_data);\n        return FilterDataStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // ensure that the second decodeData call sees end_stream = false\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n\n  // since we added trailers, we should see decodeTrailers\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)).WillOnce(Invoke([&](HeaderMap& trailers) {\n    // ensure that we see the trailers set in decodeData\n    Http::LowerCaseString key(\"foo\");\n    auto t = trailers.get(key);\n    ASSERT(t);\n    EXPECT_EQ(t->value(), trailers_data.c_str());\n    return FilterTrailersStatus::Continue;\n  }));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the request.\n  startRequest(true, \"hello\");\n\n  // set up encodeHeaders expectations\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  // invoke encodeHeaders\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n\n  // set up encodeData expectations\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterDataStatus {\n        encoder_filters_[1]->callbacks_->addEncodedTrailers().addCopy(trailer_key, trailers_data);\n        return FilterDataStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  // ensure encodeData calls after setting header sees end_stream = false\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n\n  // since we added trailers, we should see encodeTrailer callbacks\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)).WillOnce(Invoke([&](HeaderMap& trailers) {\n    // ensure that we see the trailers set in decodeData\n    Http::LowerCaseString key(\"foo\");\n    auto t = trailers.get(key);\n    EXPECT_EQ(t->value(), trailers_data.c_str());\n    return FilterTrailersStatus::Continue;\n  }));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n\n  // Ensure that we call encodeTrailers\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n\n  expectOnDestroy();\n  // invoke encodeData\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[0]->callbacks_->encodeData(response_body, true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"hello\");\n    decoder_->decodeData(fake_data, false);\n\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n    decoder_->decodeTrailers(std::move(trailers));\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  Buffer::OwnedImpl trailers_data(\"hello\");\n  EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {\n        decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, true);\n        return FilterTrailersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(Ref(trailers_data), false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[1]->callbacks_->encodeData(response_body, false);\n  EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {\n        encoder_filters_[1]->callbacks_->addEncodedData(trailers_data, true);\n        return FilterTrailersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(Ref(trailers_data), false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n  expectOnDestroy();\n\n  decoder_filters_[1]->callbacks_->encodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n}\n\n// Don't send data frames, only headers and trailers.\nTEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFrames) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n    decoder_->decodeTrailers(std::move(trailers));\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(2, 1);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  Buffer::OwnedImpl trailers_data(\"hello\");\n  EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {\n        decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, false);\n        return FilterTrailersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {\n        encoder_filters_[0]->callbacks_->addEncodedData(trailers_data, false);\n        return FilterTrailersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n  expectOnDestroy();\n\n  decoder_filters_[0]->callbacks_->encodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n}\n\n// Don't send data frames, only headers and trailers.\nTEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAfterCallback) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n    decoder_->decodeTrailers(std::move(trailers));\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(2, 1);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  Buffer::OwnedImpl trailers_data(\"hello\");\n  EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {\n        decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, false);\n        return FilterTrailersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {\n        encoder_filters_[0]->callbacks_->addEncodedData(trailers_data, false);\n        return FilterTrailersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n\n  decoder_filters_[0]->callbacks_->encodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n  expectOnDestroy();\n\n  encoder_filters_[0]->callbacks_->continueEncoding();\n}\n\n// Add*Data during the *Data callbacks.\nTEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl data1(\"hello\");\n    decoder_->decodeData(data1, false);\n\n    Buffer::OwnedImpl data2(\"world\");\n    decoder_->decodeData(data2, true);\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterDataStatus {\n        decoder_filters_[0]->callbacks_->addDecodedData(data, true);\n        EXPECT_EQ(decoder_filters_[0]->callbacks_->decodingBuffer()->toString(), \"helloworld\");\n        return FilterDataStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterDataStatus {\n        encoder_filters_[1]->callbacks_->addEncodedData(data, true);\n        EXPECT_EQ(encoder_filters_[1]->callbacks_->encodingBuffer()->toString(), \"goodbye\");\n        return FilterDataStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n  Buffer::OwnedImpl data1(\"good\");\n  decoder_filters_[1]->callbacks_->encodeData(data1, false);\n  Buffer::OwnedImpl data2(\"bye\");\n  decoder_filters_[1]->callbacks_->encodeData(data2, true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) {\n  setup(false, \"\");\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        Buffer::OwnedImpl data(\"hello\");\n        decoder_filters_[0]->callbacks_->addDecodedData(data, true);\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data.\n  startRequest(true);\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        Buffer::OwnedImpl data(\"hello\");\n        encoder_filters_[1]->callbacks_->addEncodedData(data, true);\n        EXPECT_EQ(5UL, encoder_filters_[0]->callbacks_->encodingBuffer()->length());\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true, \"details\");\n}\n\nTEST_F(HttpConnectionManagerImplTest, Filter) {\n  setup(false, \"\");\n\n  setupFilterChain(3, 2);\n  const std::string fake_cluster1_name = \"fake_cluster1\";\n  const std::string fake_cluster2_name = \"fake_cluster2\";\n\n  std::shared_ptr<Upstream::MockThreadLocalCluster> fake_cluster1 =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n  EXPECT_CALL(cluster_manager_, get(_))\n      .WillOnce(Return(fake_cluster1.get()))\n      .WillOnce(Return(nullptr));\n\n  std::shared_ptr<Router::MockRoute> route1 = std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name));\n  std::shared_ptr<Router::MockRoute> route2 = std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(route2->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster2_name));\n\n  EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _))\n      .WillOnce(Return(route1))\n      .WillOnce(Return(route2))\n      .WillOnce(Return(nullptr));\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route());\n        EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry());\n        EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo());\n        decoder_filters_[0]->callbacks_->clearRouteCache();\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route());\n        EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry());\n        // RDS & CDS consistency problem: route2 points to fake_cluster2, which doesn't exist.\n        EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->clusterInfo());\n        decoder_filters_[1]->callbacks_->clearRouteCache();\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->clusterInfo());\n        EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->route());\n        EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->streamInfo().routeEntry());\n        return FilterHeadersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*decoder_filters_[2], decodeComplete());\n\n  // Kick off the incoming data.\n  startRequest(true);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) {\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // Mimic the upstream connection backing up. The router would call\n  // onDecoderFilterAboveWriteBufferHighWatermark which should readDisable the stream and increment\n  // stats.\n  EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_));\n  EXPECT_CALL(stream_, readDisable(true));\n  ASSERT(decoder_filters_[0]->callbacks_ != nullptr);\n  decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark();\n  EXPECT_EQ(1U, stats_.named_.downstream_flow_control_paused_reading_total_.value());\n\n  // Resume the flow of data. When the router buffer drains it calls\n  // onDecoderFilterBelowWriteBufferLowWatermark which should re-enable reads on the stream.\n  EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_));\n  EXPECT_CALL(stream_, readDisable(false));\n  ASSERT(decoder_filters_[0]->callbacks_ != nullptr);\n  decoder_filters_[0]->callbacks_->onDecoderFilterBelowWriteBufferLowWatermark();\n  EXPECT_EQ(1U, stats_.named_.downstream_flow_control_resumed_reading_total_.value());\n\n  // Backup upstream once again.\n  EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_));\n  EXPECT_CALL(stream_, readDisable(true));\n  ASSERT(decoder_filters_[0]->callbacks_ != nullptr);\n  decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark();\n  EXPECT_EQ(2U, stats_.named_.downstream_flow_control_paused_reading_total_.value());\n\n  // Send a full response.\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n  expectOnDestroy();\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true, \"details\");\n}\n\nTEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWithLazyCreation) {\n  setup(false, \"\");\n\n  // Make sure codec_ is created.\n  EXPECT_CALL(*codec_, dispatch(_));\n  Buffer::OwnedImpl fake_input(\"\");\n  conn_manager_->onData(fake_input, false);\n\n  // Mark the connection manger as backed up before the stream is created.\n  ASSERT_EQ(decoder_filters_.size(), 0);\n  EXPECT_CALL(*codec_, onUnderlyingConnectionAboveWriteBufferHighWatermark());\n  conn_manager_->onAboveWriteBufferHighWatermark();\n\n  // Create the stream. Defer the creation of the filter chain by not sending\n  // complete headers.\n  {\n    setUpBufferLimits();\n    EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n      decoder_ = &conn_manager_->newStream(response_encoder_);\n      // Call the high buffer callbacks as the codecs do.\n      stream_callbacks_->onAboveWriteBufferHighWatermark();\n      return Http::okStatus();\n    }));\n\n    // Send fake data to kick off newStream being created.\n    Buffer::OwnedImpl fake_input2(\"asdf\");\n    conn_manager_->onData(fake_input2, false);\n  }\n\n  // Now set up the filter chain by sending full headers. The filters should be\n  // immediately appraised that the low watermark is in effect.\n  {\n    setupFilterChain(2, 2);\n    EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0);\n    EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n      RequestHeaderMapPtr headers{\n          new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n      decoder_->decodeHeaders(std::move(headers), true);\n      return Http::okStatus();\n    }));\n    EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n        .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n          Buffer::OwnedImpl data(\"hello\");\n          decoder_filters_[0]->callbacks_->addDecodedData(data, true);\n          return FilterHeadersStatus::Continue;\n        }));\n    EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n    sendRequestHeadersAndData();\n    ASSERT_GE(decoder_filters_.size(), 1);\n    MockDownstreamWatermarkCallbacks callbacks;\n    EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark());\n    decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks);\n\n    // Ensures that when new callbacks are registered they get invoked immediately\n    // and the already-registered callbacks do not.\n    MockDownstreamWatermarkCallbacks callbacks2;\n    EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark());\n    decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2);\n  }\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithLazyCreation) {\n  setup(false, \"\");\n\n  // Make sure codec_ is created.\n  EXPECT_CALL(*codec_, dispatch(_));\n  Buffer::OwnedImpl fake_input(\"\");\n  conn_manager_->onData(fake_input, false);\n\n  // Mark the connection manger as backed up before the stream is created.\n  ASSERT_EQ(decoder_filters_.size(), 0);\n  EXPECT_CALL(*codec_, onUnderlyingConnectionAboveWriteBufferHighWatermark());\n  conn_manager_->onAboveWriteBufferHighWatermark();\n\n  // Create the stream. Defer the creation of the filter chain by not sending\n  // complete headers.\n  {\n    setUpBufferLimits();\n    EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n      decoder_ = &conn_manager_->newStream(response_encoder_);\n      // Call the high buffer callbacks as the codecs do.\n      stream_callbacks_->onAboveWriteBufferHighWatermark();\n      return Http::okStatus();\n    }));\n\n    // Send fake data to kick off newStream being created.\n    Buffer::OwnedImpl fake_input2(\"asdf\");\n    conn_manager_->onData(fake_input2, false);\n  }\n\n  // Now before the filter chain is created, fire the low watermark callbacks\n  // and ensure it is passed down to the stream.\n  ASSERT(stream_callbacks_ != nullptr);\n  EXPECT_CALL(*codec_, onUnderlyingConnectionBelowWriteBufferLowWatermark())\n      .WillOnce(Invoke([&]() -> void { stream_callbacks_->onBelowWriteBufferLowWatermark(); }));\n  conn_manager_->onBelowWriteBufferLowWatermark();\n\n  // Now set up the filter chain by sending full headers. The filters should\n  // not get any watermark callbacks.\n  {\n    setupFilterChain(2, 2);\n    EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0);\n    EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n      RequestHeaderMapPtr headers{\n          new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n      decoder_->decodeHeaders(std::move(headers), true);\n      return Http::okStatus();\n    }));\n    EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n        .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n          Buffer::OwnedImpl data(\"hello\");\n          decoder_filters_[0]->callbacks_->addDecodedData(data, true);\n          return FilterHeadersStatus::Continue;\n        }));\n    EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n    sendRequestHeadersAndData();\n    ASSERT_GE(decoder_filters_.size(), 1);\n    MockDownstreamWatermarkCallbacks callbacks;\n    EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0);\n    EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0);\n    decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks);\n  }\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) {\n  initial_buffer_limit_ = 100;\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // Check initial limits.\n  EXPECT_EQ(initial_buffer_limit_, decoder_filters_[0]->callbacks_->decoderBufferLimit());\n  EXPECT_EQ(initial_buffer_limit_, encoder_filters_[0]->callbacks_->encoderBufferLimit());\n\n  // Check lowering the limits.\n  decoder_filters_[0]->callbacks_->setDecoderBufferLimit(initial_buffer_limit_ - 1);\n  EXPECT_EQ(initial_buffer_limit_ - 1, decoder_filters_[0]->callbacks_->decoderBufferLimit());\n\n  // Check raising the limits.\n  decoder_filters_[0]->callbacks_->setDecoderBufferLimit(initial_buffer_limit_ + 1);\n  EXPECT_EQ(initial_buffer_limit_ + 1, decoder_filters_[0]->callbacks_->decoderBufferLimit());\n  EXPECT_EQ(initial_buffer_limit_ + 1, encoder_filters_[0]->callbacks_->encoderBufferLimit());\n\n  // Verify turning off buffer limits works.\n  decoder_filters_[0]->callbacks_->setDecoderBufferLimit(0);\n  EXPECT_EQ(0, decoder_filters_[0]->callbacks_->decoderBufferLimit());\n\n  // Once the limits are turned off can be turned on again.\n  decoder_filters_[0]->callbacks_->setDecoderBufferLimit(100);\n  EXPECT_EQ(100, decoder_filters_[0]->callbacks_->decoderBufferLimit());\n\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) {\n  log_handler_ = std::make_shared<NiceMock<AccessLog::MockInstance>>();\n\n  initial_buffer_limit_ = 1;\n  streaming_filter_ = true;\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n\n  // The filter is a streaming filter. Sending 4 bytes should hit the\n  // watermark limit and disable reads on the stream.\n  EXPECT_CALL(stream_, readDisable(true));\n  sendRequestHeadersAndData();\n\n  // Change the limit so the buffered data is below the new watermark. The\n  // stream should be read-enabled\n  EXPECT_CALL(stream_, readDisable(false));\n  int buffer_len = decoder_filters_[0]->callbacks_->decodingBuffer()->length();\n  decoder_filters_[0]->callbacks_->setDecoderBufferLimit((buffer_len + 1) * 2);\n\n  // Start the response\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  MockDownstreamWatermarkCallbacks callbacks;\n  decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks);\n  MockDownstreamWatermarkCallbacks callbacks2;\n  decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2);\n\n  // Now overload the buffer with response data. The downstream watermark\n  // callbacks should be called.\n  EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark());\n  Buffer::OwnedImpl fake_response(\"A long enough string to go over watermarks\");\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark));\n  decoder_filters_[0]->callbacks_->encodeData(fake_response, false);\n\n  // unregister callbacks2\n  decoder_filters_[0]->callbacks_->removeDownstreamWatermarkCallbacks(callbacks2);\n\n  // Change the limit so the buffered data is below the new watermark.\n  buffer_len = encoder_filters_[1]->callbacks_->encodingBuffer()->length();\n  EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark());\n  EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0);\n  encoder_filters_[1]->callbacks_->setEncoderBufferLimit((buffer_len + 1) * 2);\n\n  EXPECT_CALL(*log_handler_, log(_, _, _, _))\n      .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,\n                          const StreamInfo::StreamInfo& stream_info) {\n        EXPECT_FALSE(stream_info.hasAnyResponseFlag());\n      }));\n\n  expectOnDestroy();\n  EXPECT_CALL(stream_, removeCallbacks(_));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimits) {\n  initial_buffer_limit_ = 10;\n  streaming_filter_ = false;\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // Set the filter to be a buffering filter. Sending any data will hit the\n  // watermark limit and result in a 413 being sent to the user.\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"413\"}, {\"content-length\", \"17\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(HeaderMapEqualRef(&response_headers), false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  Buffer::OwnedImpl data(\"A longer string\");\n  decoder_filters_[0]->callbacks_->addDecodedData(data, false);\n  const auto rc_details = encoder_filters_[1]->callbacks_->streamInfo().responseCodeDetails();\n  EXPECT_EQ(\"request_payload_too_large\", rc_details.value());\n\n  doRemoteClose();\n}\n\n// Return 413 from an intermediate filter and make sure we don't continue the filter chain.\nTEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) {\n  InSequence s;\n  initial_buffer_limit_ = 10;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"hello\");\n    decoder_->decodeData(fake_data, false);\n\n    Buffer::OwnedImpl fake_data2(\"world world\");\n    decoder_->decodeData(fake_data2, true);\n    return Http::okStatus();\n  }));\n\n  setUpBufferLimits();\n  setupFilterChain(2, 1);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"413\"}, {\"content-length\", \"17\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(HeaderMapEqualRef(&response_headers), false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  doRemoteClose(false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) {\n  initial_buffer_limit_ = 10;\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // Start the response without processing the request headers through all\n  // filters.\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  // Now overload the buffer with response data. The filter returns\n  // StopIterationAndBuffer, which will trigger an early response.\n\n  expectOnDestroy();\n  Buffer::OwnedImpl fake_response(\"A long enough string to go over watermarks\");\n  // Fake response starts doing through the filter.\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  std::string response_body;\n  // The 500 goes directly to the encoder.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> FilterHeadersStatus {\n        // Make sure this is a 500\n        EXPECT_EQ(\"500\", headers.getStatusValue());\n        // Make sure Envoy standard sanitization has been applied.\n        EXPECT_TRUE(headers.Date() != nullptr);\n        EXPECT_EQ(\"response_payload_too_large\",\n                  decoder_filters_[0]->callbacks_->streamInfo().responseCodeDetails().value());\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body));\n  decoder_filters_[0]->callbacks_->encodeData(fake_response, false);\n  EXPECT_EQ(\"Internal Server Error\", response_body);\n\n  EXPECT_EQ(1U, stats_.named_.rs_too_large_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) {\n  initial_buffer_limit_ = 10;\n  setup(false, \"\");\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // Start the response, and make sure the request headers are fully processed.\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  // Now overload the buffer with response data. The filter returns\n  // StopIterationAndBuffer, which will trigger an early reset.\n  const std::string data = \"A long enough string to go over watermarks\";\n  Buffer::OwnedImpl fake_response(data);\n  InSequence s;\n  EXPECT_CALL(stream_, removeCallbacks(_));\n  expectOnDestroy(false);\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(stream_, resetStream(_));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n  EXPECT_LOG_CONTAINS(\n      \"debug\",\n      \"Resetting stream due to response_payload_too_large. Prior headers have already been sent\",\n      decoder_filters_[0]->callbacks_->encodeData(fake_response, false););\n  EXPECT_EQ(1U, stats_.named_.rs_too_large_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterHeadReply) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"HEAD\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(1, 1);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, \"Bad request\", nullptr,\n                                                        absl::nullopt, \"\");\n        return FilterHeadersStatus::Continue;\n      }));\n\n  EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true))\n      .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_EQ(\"11\", headers.getContentLengthValue());\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n  expectOnDestroy();\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11));\n  conn_manager_->onData(fake_input, false);\n}\n\n// Verify that if an encoded stream has been ended, but gets stopped by a filter chain, we end\n// up resetting the stream in the doEndStream() path (e.g., via filter reset due to timeout, etc.),\n// we emit a reset to the codec.\nTEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) {\n  setup(false, \"\");\n  setupFilterChain(1, 1);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, \"Bad request\", nullptr,\n                                                        absl::nullopt, \"\");\n        return FilterHeadersStatus::Continue;\n      }));\n\n  EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_EQ(\"11\", headers.getContentLengthValue());\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterDataStatus {\n        return FilterDataStatus::StopIterationAndBuffer;\n      }));\n\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the request\n  startRequest(true);\n\n  EXPECT_CALL(response_encoder_.stream_, resetStream(_));\n  expectOnDestroy();\n  encoder_filters_[0]->callbacks_->resetStream();\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamHeaders) {\n  setup(false, \"\");\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data.\n  startRequest();\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n\n  expectOnDestroy();\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      makeHeaderMap<TestResponseHeaderMapImpl>({{\":status\", \"200\"}}), false, \"details\");\n\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[1]->callbacks_->encodeData(response_body, true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) {\n  setup(false, \"\");\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the request\n  startRequest(true, \"hello\");\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n\n  expectOnDestroy();\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      makeHeaderMap<TestResponseHeaderMapImpl>({{\":status\", \"200\"}}), false, \"details\");\n\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[1]->callbacks_->encodeData(response_body, true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    auto headers = makeHeaderMap<TestRequestHeaderMapImpl>(\n        {{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}});\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"hello\");\n    decoder_->decodeData(fake_data, false);\n\n    auto trailers = makeHeaderMap<TestRequestTrailerMapImpl>({{\"foo\", \"bar\"}});\n    decoder_->decodeTrailers(std::move(trailers));\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n\n  expectOnDestroy();\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      makeHeaderMap<TestResponseHeaderMapImpl>({{\":status\", \"200\"}}), false, \"details\");\n\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[1]->callbacks_->encodeData(response_body, false);\n\n  auto response_trailers = makeHeaderMap<TestResponseTrailerMapImpl>({{\"x-trailer\", \"1\"}});\n  decoder_filters_[1]->callbacks_->encodeTrailers(std::move(response_trailers));\n}\n\n// Filter continues headers iteration without ending the stream, then injects a body later.\nTEST_F(HttpConnectionManagerImplTest, FilterContinueDontEndStreamInjectBody) {\n  setup(false, \"\");\n  setupFilterChain(2, 2);\n\n  // Decode filter 0 changes end_stream to false.\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndDontEndStream));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n\n  // Kick off the incoming data.\n  startRequest(true);\n\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Decode filter 0 injects request body later.\n  Buffer::OwnedImpl data(\"hello\");\n  decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(data, true);\n\n  // Encode filter 1 changes end_stream to false.\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::ContinueAndDontEndStream));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      makeHeaderMap<TestResponseHeaderMapImpl>({{\":status\", \"200\"}}), true, \"details\");\n\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n\n  // Encode filter 1 injects request body later.\n  Buffer::OwnedImpl data2(\"hello\");\n  encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(data2, true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, FilterAddBodyContinuation) {\n  setup(false, \"\");\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the incoming request.\n  startRequest(true);\n\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  Buffer::OwnedImpl data(\"hello\");\n  decoder_filters_[0]->callbacks_->addDecodedData(data, true);\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true, \"details\");\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n\n  Buffer::OwnedImpl data2(\"hello\");\n  encoder_filters_[1]->callbacks_->addEncodedData(data2, true);\n  encoder_filters_[1]->callbacks_->continueEncoding();\n}\n\n// This test verifies proper sequences of decodeData() and encodeData() are called\n// when all filers return \"CONTINUE\" in following case:\n//\n// 3 decode filters:\n//\n//   filter0->decodeHeaders(_, true)\n//     return CONTINUE\n//   filter1->decodeHeaders(_, true)\n//     filter1->addDecodeData()\n//     return CONTINUE\n//   filter2->decodeHeaders(_, false)\n//     return CONTINUE\n//   filter2->decodeData(_, true)\n//     return CONTINUE\n//\n//   filter0->decodeData(, true) is NOT called.\n//   filter1->decodeData(, true) is NOT called.\n//\n// 3 encode filters:\n//\n//   filter2->encodeHeaders(_, true)\n//     return CONTINUE\n//   filter1->encodeHeaders(_, true)\n//     filter1->addEncodeData()\n//     return CONTINUE\n//   filter0->decodeHeaders(_, false)\n//     return CONTINUE\n//   filter0->decodeData(_, true)\n//     return CONTINUE\n//\n//   filter2->encodeData(, true) is NOT called.\n//   filter1->encodeData(, true) is NOT called.\n//\nTEST_F(HttpConnectionManagerImplTest, AddDataWithAllContinue) {\n  setup(false, \"\");\n  setupFilterChain(3, 3);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        Buffer::OwnedImpl data2(\"hello\");\n        decoder_filters_[1]->callbacks_->addDecodedData(data2, true);\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[2], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[2], decodeComplete());\n\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)).Times(0);\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).Times(0);\n\n  // Kick off the incoming data.\n  startRequest(true);\n\n  // For encode direction\n  EXPECT_CALL(*encoder_filters_[2], encodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[2], encodeComplete());\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        Buffer::OwnedImpl data2(\"goodbyte\");\n        encoder_filters_[1]->callbacks_->addEncodedData(data2, true);\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n\n  EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0);\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0);\n\n  decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[2]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true, \"details\");\n}\n\n// This test verifies proper sequences of decodeData() and encodeData() are called\n// when the first filer is \"stopped\" and \"continue\" in following case:\n//\n// 3 decode filters:\n//\n//   filter0->decodeHeaders(_, true)\n//     return STOP\n//   filter0->continueDecoding()\n//   filter1->decodeHeaders(_, true)\n//     filter1->addDecodeData()\n//     return CONTINUE\n//   filter2->decodeHeaders(_, false)\n//     return CONTINUE\n//   filter2->decodeData(_, true)\n//     return CONTINUE\n//\n//   filter0->decodeData(, true) is NOT called.\n//   filter1->decodeData(, true) is NOT called.\n//\n// 3 encode filters:\n//\n//   filter2->encodeHeaders(_, true)\n//     return STOP\n//   filter2->continueEncoding()\n//   filter1->encodeHeaders(_, true)\n//     filter1->addEncodeData()\n//     return CONTINUE\n//   filter0->decodeHeaders(_, false)\n//     return CONTINUE\n//   filter0->decodeData(_, true)\n//     return CONTINUE\n//\n//   filter2->encodeData(, true) is NOT called.\n//   filter1->encodeData(, true) is NOT called.\n//\nTEST_F(HttpConnectionManagerImplTest, AddDataWithStopAndContinue) {\n  setup(false, \"\");\n\n  setupFilterChain(3, 3);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the request.\n  startRequest(true);\n\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        Buffer::OwnedImpl data2(\"hello\");\n        decoder_filters_[1]->callbacks_->addDecodedData(data2, true);\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  // This fail, it is called twice.\n  EXPECT_CALL(*decoder_filters_[2], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[2], decodeComplete());\n\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)).Times(0);\n  // This fail, it is called once\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).Times(0);\n\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  // For encode direction\n  EXPECT_CALL(*encoder_filters_[2], encodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[2], encodeComplete());\n\n  decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[2]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true, \"details\");\n\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        Buffer::OwnedImpl data2(\"goodbyte\");\n        encoder_filters_[1]->callbacks_->addEncodedData(data2, true);\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n\n  EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0);\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0);\n\n  encoder_filters_[2]->callbacks_->continueEncoding();\n}\n\n// Use filter direct decode/encodeData() calls without trailers.\nTEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) {\n  setup(false, \"\");\n  EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _));\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  Buffer::OwnedImpl decode_buffer;\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) {\n        decode_buffer.move(data);\n        return FilterDataStatus::StopIterationNoBuffer;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the request.\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11));\n  startRequest(true, \"hello\");\n\n  Buffer::OwnedImpl decoded_data_to_forward;\n  decoded_data_to_forward.move(decode_buffer, 2);\n  EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual(\"he\"), false))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n  decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decoded_data_to_forward, false);\n\n  EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual(\"llo\"), true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decode_buffer, true);\n\n  // Response path.\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  Buffer::OwnedImpl encoder_buffer;\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) {\n        encoder_buffer.move(data);\n        return FilterDataStatus::StopIterationNoBuffer;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[1]->callbacks_->encodeData(response_body, true);\n\n  Buffer::OwnedImpl encoded_data_to_forward;\n  encoded_data_to_forward.move(encoder_buffer, 3);\n  EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual(\"res\"), false));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n  encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoded_data_to_forward, false);\n\n  EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual(\"ponse\"), true));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n  encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoder_buffer, true);\n}\n\n// Use filter direct decode/encodeData() calls with trailers.\nTEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"hello\");\n    decoder_->decodeData(fake_data, false);\n\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n    decoder_->decodeTrailers(std::move(trailers));\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _));\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n\n  Buffer::OwnedImpl decode_buffer;\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) {\n        decode_buffer.move(data);\n        return FilterDataStatus::StopIterationNoBuffer;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  Buffer::OwnedImpl decoded_data_to_forward;\n  decoded_data_to_forward.move(decode_buffer, 2);\n  EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual(\"he\"), false))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n  decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decoded_data_to_forward, false);\n\n  EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual(\"llo\"), false))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n  decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decode_buffer, false);\n\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  // Response path.\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  Buffer::OwnedImpl encoder_buffer;\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) {\n        encoder_buffer.move(data);\n        return FilterDataStatus::StopIterationNoBuffer;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n\n  decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[1]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[1]->callbacks_->encodeData(response_body, false);\n  decoder_filters_[1]->callbacks_->encodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n\n  Buffer::OwnedImpl encoded_data_to_forward;\n  encoded_data_to_forward.move(encoder_buffer, 3);\n  EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual(\"res\"), false));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n  encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoded_data_to_forward, false);\n\n  EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual(\"ponse\"), false));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n  encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoder_buffer, false);\n\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n  expectOnDestroy();\n  encoder_filters_[1]->callbacks_->continueEncoding();\n}\n\nTEST_F(HttpConnectionManagerImplTest, MultipleFilters) {\n  InSequence s;\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"hello\");\n    decoder_->decodeData(fake_data, false);\n\n    Buffer::OwnedImpl fake_data2(\"world\");\n    decoder_->decodeData(fake_data2, true);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _));\n  setupFilterChain(3, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(route_config_provider_.route_config_->route_,\n                  decoder_filters_[0]->callbacks_->route());\n        EXPECT_EQ(ssl_connection_.get(),\n                  decoder_filters_[0]->callbacks_->connection()->ssl().get());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11));\n  conn_manager_->onData(fake_input, false);\n\n  // Mimic a decoder filter that trapped data and now sends it on, since the data was buffered\n  // by the first filter, we expect to get it in 1 decodeData() call.\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(route_config_provider_.route_config_->route_,\n                  decoder_filters_[1]->callbacks_->route());\n        EXPECT_EQ(ssl_connection_.get(),\n                  decoder_filters_[1]->callbacks_->connection()->ssl().get());\n        return FilterHeadersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[2], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n  EXPECT_CALL(*decoder_filters_[2], decodeComplete());\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  // Now start encoding and mimic trapping in the encoding filter.\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));\n  EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::StopIteration));\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  EXPECT_EQ(ssl_connection_.get(), encoder_filters_[1]->callbacks_->connection()->ssl().get());\n  decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[2]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[2]->callbacks_->encodeData(response_body, false);\n  decoder_filters_[2]->callbacks_->encodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n  EXPECT_EQ(ssl_connection_.get(), decoder_filters_[2]->callbacks_->connection()->ssl().get());\n\n  // Now finish the encode.\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, false))\n      .WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeData(_, false));\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n  expectOnDestroy();\n  encoder_filters_[1]->callbacks_->continueEncoding();\n\n  EXPECT_EQ(ssl_connection_.get(), encoder_filters_[0]->callbacks_->connection()->ssl().get());\n}\n\nTEST(HttpConnectionManagerTracingStatsTest, verifyTracingStats) {\n  Stats::IsolatedStoreImpl stats;\n  ConnectionManagerTracingStats tracing_stats{CONN_MAN_TRACING_STATS(POOL_COUNTER(stats))};\n\n  EXPECT_THROW(\n      ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::HealthCheck, tracing_stats),\n      std::invalid_argument);\n\n  ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::ClientForced, tracing_stats);\n  EXPECT_EQ(1UL, tracing_stats.client_enabled_.value());\n\n  ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::NotTraceableRequestId, tracing_stats);\n  EXPECT_EQ(1UL, tracing_stats.not_traceable_.value());\n\n  ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::Sampling, tracing_stats);\n  EXPECT_EQ(1UL, tracing_stats.random_sampling_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) {\n  Server::OverloadActionState stop_accepting_requests = Server::OverloadActionState(0.8);\n  ON_CALL(overload_manager_.overload_state_,\n          getState(Server::OverloadActionNames::get().StopAcceptingRequests))\n      .WillByDefault(ReturnRef(stop_accepting_requests));\n\n  setup(false, \"\");\n\n  EXPECT_CALL(random_, random())\n      .WillRepeatedly(Return(static_cast<float>(Random::RandomGenerator::max()) * 0.5));\n\n  // 503 direct response when overloaded.\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"503\", headers.getStatusValue());\n      }));\n  std::string response_body;\n  EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body));\n\n  startRequest();\n\n  EXPECT_EQ(\"envoy overloaded\", response_body);\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_overload_close_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, DisableHttp1KeepAliveWhenOverloaded) {\n  Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState(0.8);\n  ON_CALL(overload_manager_.overload_state_,\n          getState(Server::OverloadActionNames::get().DisableHttpKeepAlive))\n      .WillByDefault(ReturnRef(disable_http_keep_alive));\n\n  codec_->protocol_ = Protocol::Http11;\n  setup(false, \"\");\n\n  EXPECT_CALL(random_, random())\n      .WillRepeatedly(Return(static_cast<float>(Random::RandomGenerator::max()) * 0.5));\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                                                 {\":path\", \"/\"},\n                                                                 {\":method\", \"GET\"},\n                                                                 {\"connection\", \"keep-alive\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"close\", headers.getConnectionValue());\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_overload_disable_keepalive_.value());\n}\n\nclass DrainH2HttpConnectionManagerImplTest : public HttpConnectionManagerImplTest,\n                                             public testing::WithParamInterface<bool> {\npublic:\n  DrainH2HttpConnectionManagerImplTest() {\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2\", \"true\"}});\n  }\n\nprivate:\n  TestScopedRuntime runtime_;\n};\n\n// Verify that, if the runtime option is enabled, HTTP2 connections will receive\n// a GOAWAY message when the overload action is triggered.\nTEST_P(DrainH2HttpConnectionManagerImplTest, DisableHttp2KeepAliveWhenOverloaded) {\n  Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState::saturated();\n  ON_CALL(overload_manager_.overload_state_,\n          getState(Server::OverloadActionNames::get().DisableHttpKeepAlive))\n      .WillByDefault(ReturnRef(disable_http_keep_alive));\n\n  codec_->protocol_ = Protocol::Http2;\n  setup(false, \"\");\n  if (GetParam()) {\n    EXPECT_CALL(*codec_, shutdownNotice);\n  }\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                                                 {\":path\", \"/\"},\n                                                                 {\":method\", \"GET\"},\n                                                                 {\"connection\", \"keep-alive\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n  Mock::VerifyAndClearExpectations(codec_);\n  EXPECT_EQ(1, stats_.named_.downstream_cx_overload_disable_keepalive_.value());\n}\n\nINSTANTIATE_TEST_SUITE_P(WithRuntimeOverride, DrainH2HttpConnectionManagerImplTest,\n                         testing::Bool());\n\nTEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathFirstFilter) {\n  setup(false, \"envoy-custom-server\", false);\n  setUpEncoderAndDecoder(true, true);\n\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Verify that once the decoder_filters_[0]'s continueDecoding() is called, decoder_filters_[1]'s\n  // decodeHeaders() is called, and both filters receive data and trailers consequently.\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  decoder_filters_[0]->callbacks_->continueDecoding();\n\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathSecondFilter) {\n  setup(false, \"envoy-custom-server\", false);\n  setUpEncoderAndDecoder(true, false);\n\n  // Verify headers go through both filters, and data and trailers go through the first filter only.\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _))\n      .WillOnce(Return(FilterHeadersStatus::StopAllIterationAndBuffer));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  // Kick off the incoming data.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  // Verify that once the decoder_filters_[1]'s continueDecoding() is called, both data and trailers\n  // go through the second filter.\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  decoder_filters_[1]->callbacks_->continueDecoding();\n\n  doRemoteClose();\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPath) {\n  setup(false, \"envoy-custom-server\", false);\n  setUpEncoderAndDecoder(false, false);\n  sendRequestHeadersAndData();\n\n  // encoder_filters_[1] is the first filter in the chain.\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Invoke([&](HeaderMap&, bool) -> FilterHeadersStatus {\n        return FilterHeadersStatus::StopAllIterationAndBuffer;\n      }));\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n\n  // Invoke encodeData while all iteration is stopped and make sure the filters do not have\n  // encodeData called.\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).Times(0);\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).Times(0);\n  Buffer::OwnedImpl response_body(\"response\");\n  decoder_filters_[0]->callbacks_->encodeData(response_body, false);\n  decoder_filters_[0]->callbacks_->encodeTrailers(\n      ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n\n  // Verify that once encoder_filters_[1]'s continueEncoding() is called, encoder_filters_[0]'s\n  // encodeHeaders() is called, and both filters receive data and trailers consequently.\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeData(_, _));\n  EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_))\n      .WillOnce(Return(FilterTrailersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeTrailers(_));\n  EXPECT_CALL(*encoder_filters_[0], encodeComplete());\n  EXPECT_CALL(*encoder_filters_[1], encodeComplete());\n  expectOnDestroy();\n  encoder_filters_[1]->callbacks_->continueEncoding();\n}\n\nTEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenDraining) {\n  setup(false, \"\");\n\n  EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true));\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});\n      }));\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                                                 {\":path\", \"/\"},\n                                                                 {\":method\", \"GET\"},\n                                                                 {\"connection\", \"keep-alive\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n        filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n        data.drain(4);\n        return Http::okStatus();\n      }));\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(\"close\", headers.getConnectionValue());\n      }));\n\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestSessionTrace) {\n  setup(false, \"\");\n\n  // Set up the codec.\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        data.drain(4);\n        return Http::okStatus();\n      }));\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  setupFilterChain(1, 1);\n\n  // Create a new stream\n  decoder_ = &conn_manager_->newStream(response_encoder_);\n\n  // Send headers to that stream, and verify we both set and clear the tracked object.\n  {\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"POST\"}}};\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_))\n        .Times(2)\n        .WillOnce(Invoke([](const ScopeTrackedObject* object) -> const ScopeTrackedObject* {\n          ASSERT(object != nullptr); // On the first call, this should be the active stream.\n          std::stringstream out;\n          object->dumpState(out);\n          std::string state = out.str();\n          EXPECT_THAT(state,\n                      testing::HasSubstr(\"filter_manager_callbacks_.requestHeaders():   empty\"));\n          EXPECT_THAT(state, testing::HasSubstr(\"protocol_: 1\"));\n          return nullptr;\n        }))\n        .WillRepeatedly(Return(nullptr));\n    EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n        .WillOnce(Invoke([](HeaderMap&, bool) -> FilterHeadersStatus {\n          return FilterHeadersStatus::StopIteration;\n        }));\n    decoder_->decodeHeaders(std::move(headers), false);\n  }\n\n  // Send trailers to that stream, and verify by this point headers are in logged state.\n  {\n    RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_))\n        .Times(2)\n        .WillOnce(Invoke([](const ScopeTrackedObject* object) -> const ScopeTrackedObject* {\n          ASSERT(object != nullptr); // On the first call, this should be the active stream.\n          std::stringstream out;\n          object->dumpState(out);\n          std::string state = out.str();\n          EXPECT_THAT(state, testing::HasSubstr(\"filter_manager_callbacks_.requestHeaders(): \\n\"));\n          EXPECT_THAT(state, testing::HasSubstr(\"':authority', 'host'\\n\"));\n          EXPECT_THAT(state, testing::HasSubstr(\"protocol_: 1\"));\n          return nullptr;\n        }))\n        .WillRepeatedly(Return(nullptr));\n    EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n    EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))\n        .WillOnce(Return(FilterTrailersStatus::StopIteration));\n    decoder_->decodeTrailers(std::move(trailers));\n  }\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// SRDS no scope found.\nTEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) {\n  setup(false, \"\", true, true);\n  setupFilterChain(1, 0); // Recreate the chain for second stream.\n\n  EXPECT_CALL(*static_cast<const Router::MockScopedConfig*>(\n                  scopedRouteConfigProvider()->config<Router::ScopedConfig>().get()),\n              getRouteConfig(_))\n      .Times(2)\n      .WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":method\", \"GET\"}, {\":path\", \"/foo\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(nullptr, decoder_filters_[0]->callbacks_->route());\n        return FilterHeadersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true.\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// SRDS updating scopes affects routing.\nTEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) {\n  setup(false, \"\", true, true);\n\n  EXPECT_CALL(*static_cast<const Router::MockScopedConfig*>(\n                  scopedRouteConfigProvider()->config<Router::ScopedConfig>().get()),\n              getRouteConfig(_))\n      .Times(3)\n      .WillOnce(Return(nullptr))\n      .WillOnce(Return(nullptr))        // refreshCachedRoute first time.\n      .WillOnce(Return(route_config_)); // triggered by callbacks_->route(), SRDS now updated.\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":method\", \"GET\"}, {\":path\", \"/foo\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n  const std::string fake_cluster1_name = \"fake_cluster1\";\n  std::shared_ptr<Router::MockRoute> route1 = std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name));\n  std::shared_ptr<Upstream::MockThreadLocalCluster> fake_cluster1 =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n  EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get()));\n  EXPECT_CALL(*route_config_, route(_, _, _, _)).WillOnce(Return(route1));\n  // First no-scope-found request will be handled by decoder_filters_[0].\n  setupFilterChain(1, 0);\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(nullptr, decoder_filters_[0]->callbacks_->route());\n\n        // Clear route and next call on callbacks_->route() will trigger a re-snapping of the\n        // snapped_route_config_.\n        decoder_filters_[0]->callbacks_->clearRouteCache();\n\n        // Now route config provider returns something.\n        EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route());\n        EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry());\n        EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo());\n        return FilterHeadersStatus::StopIteration;\n\n        return FilterHeadersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true.\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// SRDS Scope header update cause cross-scope reroute.\nTEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) {\n  setup(false, \"\", true, true);\n\n  std::shared_ptr<Router::MockConfig> route_config1 =\n      std::make_shared<NiceMock<Router::MockConfig>>();\n  std::shared_ptr<Router::MockConfig> route_config2 =\n      std::make_shared<NiceMock<Router::MockConfig>>();\n  std::shared_ptr<Router::MockRoute> route1 = std::make_shared<NiceMock<Router::MockRoute>>();\n  std::shared_ptr<Router::MockRoute> route2 = std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(*route_config1, route(_, _, _, _)).WillRepeatedly(Return(route1));\n  EXPECT_CALL(*route_config2, route(_, _, _, _)).WillRepeatedly(Return(route2));\n  EXPECT_CALL(*static_cast<const Router::MockScopedConfig*>(\n                  scopedRouteConfigProvider()->config<Router::ScopedConfig>().get()),\n              getRouteConfig(_))\n      // 1. Snap scoped route config;\n      // 2. refreshCachedRoute (both in decodeHeaders(headers,end_stream);\n      // 3. then refreshCachedRoute triggered by decoder_filters_[1]->callbacks_->route().\n      .Times(3)\n      .WillRepeatedly(Invoke([&](const HeaderMap& headers) -> Router::ConfigConstSharedPtr {\n        auto& test_headers = dynamic_cast<const TestRequestHeaderMapImpl&>(headers);\n        if (test_headers.get_(\"scope_key\") == \"foo\") {\n          return route_config1;\n        }\n        return route_config2;\n      }));\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":method\", \"GET\"}, {\"scope_key\", \"foo\"}, {\":path\", \"/foo\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n  setupFilterChain(2, 0);\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route());\n        auto& test_headers = dynamic_cast<TestRequestHeaderMapImpl&>(headers);\n        // Clear cached route and change scope key to \"bar\".\n        decoder_filters_[0]->callbacks_->clearRouteCache();\n        test_headers.remove(\"scope_key\");\n        test_headers.addCopy(\"scope_key\", \"bar\");\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus {\n        auto& test_headers = dynamic_cast<TestRequestHeaderMapImpl&>(headers);\n        EXPECT_EQ(test_headers.get_(\"scope_key\"), \"bar\");\n        // Route now switched to route2 as header \"scope_key\" has changed.\n        EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route());\n        EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// SRDS scoped RouteConfiguration found and route found.\nTEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) {\n  setup(false, \"\", true, true);\n  setupFilterChain(1, 0);\n\n  const std::string fake_cluster1_name = \"fake_cluster1\";\n  std::shared_ptr<Router::MockRoute> route1 = std::make_shared<NiceMock<Router::MockRoute>>();\n  EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name));\n  std::shared_ptr<Upstream::MockThreadLocalCluster> fake_cluster1 =\n      std::make_shared<NiceMock<Upstream::MockThreadLocalCluster>>();\n  EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get()));\n  EXPECT_CALL(*scopedRouteConfigProvider()->config<Router::MockScopedConfig>(), getRouteConfig(_))\n      // 1. decodeHeaders() snapping route config.\n      // 2. refreshCachedRoute() later in the same decodeHeaders().\n      .Times(2);\n  EXPECT_CALL(\n      *static_cast<const Router::MockConfig*>(\n          scopedRouteConfigProvider()->config<Router::MockScopedConfig>()->route_config_.get()),\n      route(_, _, _, _))\n      .WillOnce(Return(route1));\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n        {\":authority\", \"host\"}, {\":method\", \"GET\"}, {\":path\", \"/foo\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    data.drain(4);\n    return Http::okStatus();\n  }));\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {\n        EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route());\n        EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry());\n        EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo());\n        return FilterHeadersStatus::StopIteration;\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, NewConnection) {\n  setup(false, \"\", true, true);\n\n  filter_callbacks_.connection_.stream_info_.protocol_ = absl::nullopt;\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol());\n  EXPECT_EQ(Network::FilterStatus::Continue, conn_manager_->onNewConnection());\n  EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_total_.value());\n  EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_active_.value());\n\n  filter_callbacks_.connection_.stream_info_.protocol_ = Envoy::Http::Protocol::Http3;\n  codec_->protocol_ = Http::Protocol::Http3;\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol());\n  EXPECT_CALL(*codec_, protocol()).Times(AtLeast(1));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, conn_manager_->onNewConnection());\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_total_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_active_.value());\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestHeadersSize) {\n  // Test with Headers only request, No Data, No response.\n  setup(false, \"\");\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), true);\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(1, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  std::shared_ptr<NiceMock<Upstream::MockHostDescription>> host_{\n      new NiceMock<Upstream::MockHostDescription>()};\n  filter_callbacks_.upstreamHost(host_);\n\n  EXPECT_CALL(\n      host_->cluster_.request_response_size_stats_store_,\n      deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_headers_size\"), 30));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_body_size\"), 0));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rs_body_size\"), 0));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestBodySize) {\n  // Test Request with Headers and Data, No response.\n  setup(false, \"\");\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"12345\");\n    decoder_->decodeData(fake_data, true);\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(1, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  std::shared_ptr<NiceMock<Upstream::MockHostDescription>> host_{\n      new NiceMock<Upstream::MockHostDescription>()};\n  filter_callbacks_.upstreamHost(host_);\n\n  EXPECT_CALL(\n      host_->cluster_.request_response_size_stats_store_,\n      deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_headers_size\"), 30));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_body_size\"), 5));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rs_body_size\"), 0));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  expectOnDestroy();\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseHeadersSize) {\n  // Test with Header only response.\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"1234\");\n    decoder_->decodeData(fake_data, true);\n\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(1, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  std::shared_ptr<NiceMock<Upstream::MockHostDescription>> host_{\n      new NiceMock<Upstream::MockHostDescription>()};\n  filter_callbacks_.upstreamHost(host_);\n\n  EXPECT_CALL(\n      host_->cluster_.request_response_size_stats_store_,\n      deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_headers_size\"), 30));\n\n  // Response headers are internally mutated and we record final response headers.\n  // for example in the below test case, response headers are modified as\n  // {':status', '200' 'date', 'Mon, 06 Jul 2020 06:08:55 GMT' 'server', ''}\n  // whose size is 49 instead of original response headers size 10({\":status\", \"200\"}).\n  EXPECT_CALL(\n      host_->cluster_.request_response_size_stats_store_,\n      deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rs_headers_size\"), 49));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_body_size\"), 4));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rs_body_size\"), 0));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, true));\n  expectOnDestroy();\n\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true, \"details\");\n}\n\nTEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseBodySize) {\n  // Test with response headers and body.\n  setup(false, \"\");\n\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), false);\n\n    Buffer::OwnedImpl fake_data(\"1234\");\n    decoder_->decodeData(fake_data, true);\n\n    return Http::okStatus();\n  }));\n\n  setupFilterChain(1, 0);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))\n      .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));\n\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n\n  std::shared_ptr<NiceMock<Upstream::MockHostDescription>> host_{\n      new NiceMock<Upstream::MockHostDescription>()};\n  filter_callbacks_.upstreamHost(host_);\n\n  EXPECT_CALL(\n      host_->cluster_.request_response_size_stats_store_,\n      deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_headers_size\"), 30));\n  EXPECT_CALL(\n      host_->cluster_.request_response_size_stats_store_,\n      deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rs_headers_size\"), 49));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rq_body_size\"), 4));\n  EXPECT_CALL(host_->cluster_.request_response_size_stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_rs_body_size\"), 11));\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  conn_manager_->onData(fake_input, false);\n\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false, \"details\");\n\n  EXPECT_CALL(response_encoder_, encodeData(_, true));\n  expectOnDestroy();\n\n  Buffer::OwnedImpl fake_response(\"hello-world\");\n  decoder_filters_[0]->callbacks_->encodeData(fake_response, true);\n}\n\nTEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) {\n  setup(false, \"envoy-custom-server\", false);\n\n  filter_callbacks_.connection_.stream_info_.protocol_ = Envoy::Http::Protocol::Http3;\n  codec_->protocol_ = Http::Protocol::Http3;\n  EXPECT_EQ(Network::FilterStatus::StopIteration, conn_manager_->onNewConnection());\n\n  // Store the basic request encoder during filter chain setup.\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillOnce(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        EXPECT_NE(nullptr, headers.ForwardedFor());\n        EXPECT_EQ(\"http\", headers.getForwardedProtoValue());\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n\n  // Pretend to get a new stream and then fire a headers only request into it. Then we respond into\n  // the filter.\n  RequestDecoder& decoder = conn_manager_->newStream(response_encoder_);\n  RequestHeaderMapPtr headers{\n      new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n  decoder.decodeHeaders(std::move(headers), true);\n\n  ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  filter->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"details\");\n\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_rq_completed_.value());\n  EXPECT_EQ(1U, listener_stats_.downstream_rq_completed_.value());\n  EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_total_.value());\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  conn_manager_.reset();\n  EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_active_.value());\n}\n\nnamespace {\n\nclass SimpleType : public StreamInfo::FilterState::Object {\npublic:\n  SimpleType(int value) : value_(value) {}\n  int access() const { return value_; }\n\nprivate:\n  int value_;\n};\n\n} // namespace\n\nTEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) {\n  filter_callbacks_.connection_.stream_info_.filter_state_->setData(\n      \"connection_provided_data\", std::make_shared<SimpleType>(555),\n      StreamInfo::FilterState::StateType::ReadOnly);\n\n  setup(false, \"envoy-custom-server\", false);\n  setupFilterChain(1, 0, /* num_requests = */ 3);\n\n  EXPECT_CALL(*codec_, dispatch(_))\n      .Times(2)\n      .WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status {\n        decoder_ = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        decoder_->decodeHeaders(std::move(headers), true);\n        return Http::okStatus();\n      }));\n  {\n    InSequence s;\n    EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))\n        .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus {\n          decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData(\n              \"per_filter_chain\", std::make_unique<SimpleType>(1),\n              StreamInfo::FilterState::StateType::ReadOnly,\n              StreamInfo::FilterState::LifeSpan::FilterChain);\n          decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData(\n              \"per_downstream_request\", std::make_unique<SimpleType>(2),\n              StreamInfo::FilterState::StateType::ReadOnly,\n              StreamInfo::FilterState::LifeSpan::Request);\n          decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData(\n              \"per_downstream_connection\", std::make_unique<SimpleType>(3),\n              StreamInfo::FilterState::StateType::ReadOnly,\n              StreamInfo::FilterState::LifeSpan::Connection);\n          return FilterHeadersStatus::StopIteration;\n        }));\n    EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true))\n        .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus {\n          EXPECT_FALSE(\n              decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"per_filter_chain\"));\n          EXPECT_TRUE(\n              decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"per_downstream_request\"));\n          EXPECT_TRUE(\n              decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"per_downstream_connection\"));\n          EXPECT_TRUE(\n              decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"connection_provided_data\"));\n          return FilterHeadersStatus::StopIteration;\n        }));\n    EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true))\n        .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus {\n          EXPECT_FALSE(\n              decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"per_filter_chain\"));\n          EXPECT_FALSE(\n              decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"per_downstream_request\"));\n          EXPECT_TRUE(\n              decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"per_downstream_connection\"));\n          EXPECT_TRUE(\n              decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData<SimpleType>(\n                  \"connection_provided_data\"));\n          return FilterHeadersStatus::StopIteration;\n        }));\n  }\n\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[0], onStreamComplete());\n  EXPECT_CALL(*decoder_filters_[0], onDestroy());\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n  EXPECT_CALL(*decoder_filters_[2], decodeComplete());\n\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n  decoder_filters_[0]->callbacks_->recreateStream();\n  conn_manager_->onData(fake_input, false);\n\n  // The connection life time data should have been written to the connection filter state.\n  EXPECT_TRUE(filter_callbacks_.connection_.stream_info_.filter_state_->hasData<SimpleType>(\n      \"per_downstream_connection\"));\n  EXPECT_CALL(*decoder_filters_[1], onStreamComplete());\n  EXPECT_CALL(*decoder_filters_[1], onDestroy());\n  EXPECT_CALL(*decoder_filters_[2], onStreamComplete());\n  EXPECT_CALL(*decoder_filters_[2], onDestroy());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nclass HttpConnectionManagerImplDeathTest : public HttpConnectionManagerImplTest {\npublic:\n  Router::RouteConfigProvider* routeConfigProvider() override {\n    return route_config_provider2_.get();\n  }\n  Config::ConfigProvider* scopedRouteConfigProvider() override {\n    return scoped_route_config_provider2_.get();\n  }\n\n  std::shared_ptr<Router::MockRouteConfigProvider> route_config_provider2_;\n  std::shared_ptr<Router::MockScopedRouteConfigProvider> scoped_route_config_provider2_;\n};\n\n// HCM config can only have either RouteConfigProvider or ScopedRoutesConfigProvider.\nTEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) {\n  setup(false, \"\");\n\n  Buffer::OwnedImpl fake_input(\"1234\");\n  EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status {\n    conn_manager_->newStream(response_encoder_);\n    return Http::okStatus();\n  }));\n  // Either RDS or SRDS should be set.\n  EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false),\n                     \"Either routeConfigProvider or scopedRouteConfigProvider should be set in \"\n                     \"ConnectionManagerImpl.\");\n\n  route_config_provider2_ = std::make_shared<NiceMock<Router::MockRouteConfigProvider>>();\n\n  // Only route config provider valid.\n  EXPECT_NO_THROW(conn_manager_->onData(fake_input, false));\n\n  scoped_route_config_provider2_ =\n      std::make_shared<NiceMock<Router::MockScopedRouteConfigProvider>>();\n  // Can't have RDS and SRDS provider in the same time.\n  EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false),\n                     \"Either routeConfigProvider or scopedRouteConfigProvider should be set in \"\n                     \"ConnectionManagerImpl.\");\n\n  route_config_provider2_.reset();\n  // Only scoped route config provider valid.\n  EXPECT_NO_THROW(conn_manager_->onData(fake_input, false));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_test_base.cc",
    "content": "#include \"test/common/http/conn_manager_impl_test_base.h\"\n\nusing testing::AtLeast;\nusing testing::InSequence;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\n\nHttpConnectionManagerImplTest::HttpConnectionManagerImplTest()\n    : http_context_(fake_stats_.symbolTable()), access_log_path_(\"dummy_path\"),\n      access_logs_{AccessLog::InstanceSharedPtr{new Extensions::AccessLoggers::File::FileAccessLog(\n          access_log_path_, {}, Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(),\n          log_manager_)}},\n      codec_(new NiceMock<MockServerConnection>()),\n      stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_),\n                                      POOL_HISTOGRAM(fake_stats_))},\n             \"\", fake_stats_),\n\n      listener_stats_({CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}),\n      request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)),\n      local_reply_(LocalReply::Factory::createDefault()) {\n\n  ON_CALL(route_config_provider_, lastUpdated())\n      .WillByDefault(Return(test_time_.timeSystem().systemTime()));\n  ON_CALL(scoped_route_config_provider_, lastUpdated())\n      .WillByDefault(Return(test_time_.timeSystem().systemTime()));\n  // response_encoder_ is not a NiceMock on purpose. This prevents complaining about this\n  // method only.\n  EXPECT_CALL(response_encoder_, getStream()).Times(AtLeast(0));\n}\n\nHttpConnectionManagerImplTest::~HttpConnectionManagerImplTest() {\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n}\n\nTracing::CustomTagConstSharedPtr\nHttpConnectionManagerImplTest::requestHeaderCustomTag(const std::string& header) {\n  envoy::type::tracing::v3::CustomTag::Header headerTag;\n  headerTag.set_name(header);\n  return std::make_shared<Tracing::RequestHeaderCustomTag>(header, headerTag);\n}\n\nvoid HttpConnectionManagerImplTest::setup(bool ssl, const std::string& server_name, bool tracing,\n                                          bool use_srds) {\n  use_srds_ = use_srds;\n  if (ssl) {\n    ssl_connection_ = std::make_shared<Ssl::MockConnectionInfo>();\n  }\n\n  server_name_ = server_name;\n  ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_connection_));\n  ON_CALL(Const(filter_callbacks_.connection_), ssl()).WillByDefault(Return(ssl_connection_));\n  filter_callbacks_.connection_.local_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 443);\n  filter_callbacks_.connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\");\n  conn_manager_ = std::make_unique<ConnectionManagerImpl>(\n      *this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_,\n      overload_manager_, test_time_.timeSystem());\n  conn_manager_->initializeReadFilterCallbacks(filter_callbacks_);\n\n  if (tracing) {\n    envoy::type::v3::FractionalPercent percent1;\n    percent1.set_numerator(100);\n    envoy::type::v3::FractionalPercent percent2;\n    percent2.set_numerator(10000);\n    percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n    tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(\n        TracingConnectionManagerConfig{Tracing::OperationName::Ingress,\n                                       {{\":method\", requestHeaderCustomTag(\":method\")}},\n                                       percent1,\n                                       percent2,\n                                       percent1,\n                                       false,\n                                       256});\n  }\n}\n\nvoid HttpConnectionManagerImplTest::setupFilterChain(int num_decoder_filters,\n                                                     int num_encoder_filters, int num_requests) {\n  // NOTE: The length/repetition in this routine allows InSequence to work correctly in an outer\n  // scope.\n  for (int i = 0; i < num_decoder_filters * num_requests; i++) {\n    decoder_filters_.push_back(new MockStreamDecoderFilter());\n  }\n\n  for (int i = 0; i < num_encoder_filters * num_requests; i++) {\n    encoder_filters_.push_back(new MockStreamEncoderFilter());\n  }\n\n  InSequence s;\n  for (int req = 0; req < num_requests; req++) {\n    EXPECT_CALL(filter_factory_, createFilterChain(_))\n        .WillOnce(Invoke([num_decoder_filters, num_encoder_filters, req,\n                          this](FilterChainFactoryCallbacks& callbacks) -> void {\n          if (log_handler_.get()) {\n            callbacks.addAccessLogHandler(log_handler_);\n          }\n          for (int i = 0; i < num_decoder_filters; i++) {\n            callbacks.addStreamDecoderFilter(\n                StreamDecoderFilterSharedPtr{decoder_filters_[req * num_decoder_filters + i]});\n          }\n\n          for (int i = 0; i < num_encoder_filters; i++) {\n            callbacks.addStreamEncoderFilter(\n                StreamEncoderFilterSharedPtr{encoder_filters_[req * num_encoder_filters + i]});\n          }\n        }));\n\n    for (int i = 0; i < num_decoder_filters; i++) {\n      EXPECT_CALL(*decoder_filters_[req * num_decoder_filters + i], setDecoderFilterCallbacks(_));\n    }\n\n    for (int i = 0; i < num_encoder_filters; i++) {\n      EXPECT_CALL(*encoder_filters_[req * num_encoder_filters + i], setEncoderFilterCallbacks(_));\n    }\n  }\n}\n\nvoid HttpConnectionManagerImplTest::setUpBufferLimits() {\n  ON_CALL(response_encoder_, getStream()).WillByDefault(ReturnRef(stream_));\n  EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_));\n  EXPECT_CALL(stream_, addCallbacks(_))\n      .WillOnce(Invoke(\n          [&](Http::StreamCallbacks& callbacks) -> void { stream_callbacks_ = &callbacks; }));\n  EXPECT_CALL(stream_, setFlushTimeout(_));\n}\n\nvoid HttpConnectionManagerImplTest::setUpEncoderAndDecoder(bool request_with_data_and_trailers,\n                                                           bool decode_headers_stop_all) {\n  setUpBufferLimits();\n  EXPECT_CALL(*codec_, dispatch(_))\n      .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> Http::Status {\n        RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_);\n        RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{\n            {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n        if (request_with_data_and_trailers) {\n          decoder->decodeHeaders(std::move(headers), false);\n\n          Buffer::OwnedImpl fake_data(\"12345\");\n          decoder->decodeData(fake_data, false);\n\n          RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{\"foo\", \"bar\"}}};\n          decoder->decodeTrailers(std::move(trailers));\n        } else {\n          decoder->decodeHeaders(std::move(headers), true);\n        }\n        return Http::okStatus();\n      }));\n\n  setupFilterChain(2, 2);\n\n  EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, _))\n      .WillOnce(InvokeWithoutArgs([&, decode_headers_stop_all]() -> FilterHeadersStatus {\n        Buffer::OwnedImpl data(\"hello\");\n        decoder_filters_[0]->callbacks_->addDecodedData(data, true);\n        if (decode_headers_stop_all) {\n          return FilterHeadersStatus::StopAllIterationAndBuffer;\n        } else {\n          return FilterHeadersStatus::Continue;\n        }\n      }));\n  EXPECT_CALL(*decoder_filters_[0], decodeComplete());\n}\n\nvoid HttpConnectionManagerImplTest::startRequest(bool end_stream,\n                                                 absl::optional<std::string> body) {\n  EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {\n    decoder_ = &conn_manager_->newStream(response_encoder_);\n    RequestHeaderMapPtr headers{\n        new TestRequestHeaderMapImpl{{\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}}};\n    decoder_->decodeHeaders(std::move(headers), end_stream && !body.has_value());\n    if (body.has_value()) {\n      Buffer::OwnedImpl fake_data(body.value());\n      decoder_->decodeData(fake_data, end_stream);\n    }\n    return Http::okStatus();\n  }));\n  Buffer::OwnedImpl fake_input;\n  conn_manager_->onData(fake_input, false);\n}\n\nEvent::MockTimer* HttpConnectionManagerImplTest::setUpTimer() {\n  // this timer belongs to whatever by whatever next creates a timer.\n  // See Envoy::Event::MockTimer for details.\n  return new Event::MockTimer(&filter_callbacks_.connection_.dispatcher_);\n}\n\nvoid HttpConnectionManagerImplTest::sendRequestHeadersAndData() {\n  EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::StopIteration));\n  auto status = streaming_filter_ ? FilterDataStatus::StopIterationAndWatermark\n                                  : FilterDataStatus::StopIterationAndBuffer;\n  EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).WillOnce(Return(status));\n  EXPECT_CALL(*decoder_filters_[1], decodeComplete());\n\n  // Kick off the incoming data. |fake_input| is not sent, but instead kicks\n  // off sending the headers and |data| queued up in setUpEncoderAndDecoder().\n  Buffer::OwnedImpl fake_input(\"asdf\");\n  conn_manager_->onData(fake_input, false);\n}\n\nResponseHeaderMap*\nHttpConnectionManagerImplTest::sendResponseHeaders(ResponseHeaderMapPtr&& response_headers) {\n  ResponseHeaderMap* altered_response_headers = nullptr;\n\n  EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _))\n      .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus {\n        altered_response_headers = &headers;\n        return FilterHeadersStatus::Continue;\n      }));\n  EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false))\n      .WillOnce(Return(FilterHeadersStatus::Continue));\n  EXPECT_CALL(response_encoder_, encodeHeaders(_, false));\n  decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(\"\");\n  decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, \"details\");\n  return altered_response_headers;\n}\n\nvoid HttpConnectionManagerImplTest::expectOnDestroy(bool deferred) {\n  for (auto filter : decoder_filters_) {\n    EXPECT_CALL(*filter, onStreamComplete());\n  }\n  {\n    auto setup_filter_expect = [](MockStreamEncoderFilter* filter) {\n      EXPECT_CALL(*filter, onStreamComplete());\n    };\n    std::for_each(encoder_filters_.rbegin(), encoder_filters_.rend(), setup_filter_expect);\n  }\n\n  for (auto filter : decoder_filters_) {\n    EXPECT_CALL(*filter, onDestroy());\n  }\n  {\n    auto setup_filter_expect = [](MockStreamEncoderFilter* filter) {\n      EXPECT_CALL(*filter, onDestroy());\n    };\n    std::for_each(encoder_filters_.rbegin(), encoder_filters_.rend(), setup_filter_expect);\n  }\n\n  if (deferred) {\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n  }\n}\n\nvoid HttpConnectionManagerImplTest::doRemoteClose(bool deferred) {\n  EXPECT_CALL(stream_, removeCallbacks(_));\n  expectOnDestroy(deferred);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/conn_manager_impl_test_base.h",
    "content": "#pragma once\n\n#include \"common/http/conn_manager_impl.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/http/date_provider_impl.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/access_loggers/file/file_access_log_impl.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Http {\n\nclass HttpConnectionManagerImplTest : public testing::Test, public ConnectionManagerConfig {\npublic:\n  struct RouteConfigProvider : public Router::RouteConfigProvider {\n    RouteConfigProvider(TimeSource& time_source) : time_source_(time_source) {}\n\n    // Router::RouteConfigProvider\n    Router::ConfigConstSharedPtr config() override { return route_config_; }\n    absl::optional<ConfigInfo> configInfo() const override { return {}; }\n    SystemTime lastUpdated() const override { return time_source_.systemTime(); }\n    void onConfigUpdate() override {}\n\n    TimeSource& time_source_;\n    std::shared_ptr<Router::MockConfig> route_config_{new NiceMock<Router::MockConfig>()};\n  };\n\n  HttpConnectionManagerImplTest();\n  ~HttpConnectionManagerImplTest() override;\n  Tracing::CustomTagConstSharedPtr requestHeaderCustomTag(const std::string& header);\n  void setup(bool ssl, const std::string& server_name, bool tracing = true, bool use_srds = false);\n  void setupFilterChain(int num_decoder_filters, int num_encoder_filters, int num_requests = 1);\n  void setUpBufferLimits();\n\n  // If request_with_data_and_trailers is true, includes data and trailers in the request. If\n  // decode_headers_stop_all is true, decoder_filters_[0]'s callback decodeHeaders() returns\n  // StopAllIterationAndBuffer.\n  void setUpEncoderAndDecoder(bool request_with_data_and_trailers, bool decode_headers_stop_all);\n\n  // Sends request headers, and stashes the new stream in decoder_;\n  void startRequest(bool end_stream = false, absl::optional<std::string> body = absl::nullopt);\n\n  Event::MockTimer* setUpTimer();\n  void sendRequestHeadersAndData();\n  ResponseHeaderMap* sendResponseHeaders(ResponseHeaderMapPtr&& response_headers);\n  void expectOnDestroy(bool deferred = true);\n  void doRemoteClose(bool deferred = true);\n\n  // Http::ConnectionManagerConfig\n  const std::list<AccessLog::InstanceSharedPtr>& accessLogs() override { return access_logs_; }\n  ServerConnectionPtr createCodec(Network::Connection&, const Buffer::Instance&,\n                                  ServerConnectionCallbacks&) override {\n    return ServerConnectionPtr{codec_};\n  }\n  DateProvider& dateProvider() override { return date_provider_; }\n  std::chrono::milliseconds drainTimeout() const override { return std::chrono::milliseconds(100); }\n  FilterChainFactory& filterFactory() override { return filter_factory_; }\n  bool generateRequestId() const override { return true; }\n  bool preserveExternalRequestId() const override { return false; }\n  bool alwaysSetRequestIdInResponse() const override { return false; }\n  uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; }\n  uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; }\n  absl::optional<std::chrono::milliseconds> idleTimeout() const override { return idle_timeout_; }\n  bool isRoutable() const override { return true; }\n  absl::optional<std::chrono::milliseconds> maxConnectionDuration() const override {\n    return max_connection_duration_;\n  }\n  std::chrono::milliseconds streamIdleTimeout() const override { return stream_idle_timeout_; }\n  std::chrono::milliseconds requestTimeout() const override { return request_timeout_; }\n  std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; }\n  absl::optional<std::chrono::milliseconds> maxStreamDuration() const override {\n    return max_stream_duration_;\n  }\n  bool use_srds_{};\n  Router::RouteConfigProvider* routeConfigProvider() override {\n    if (use_srds_) {\n      return nullptr;\n    }\n    return &route_config_provider_;\n  }\n  Config::ConfigProvider* scopedRouteConfigProvider() override {\n    if (use_srds_) {\n      return &scoped_route_config_provider_;\n    }\n    return nullptr;\n  }\n  const std::string& serverName() const override { return server_name_; }\n  HttpConnectionManagerProto::ServerHeaderTransformation\n  serverHeaderTransformation() const override {\n    return server_transformation_;\n  }\n  ConnectionManagerStats& stats() override { return stats_; }\n  ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; }\n  bool useRemoteAddress() const override { return use_remote_address_; }\n  const Http::InternalAddressConfig& internalAddressConfig() const override {\n    return internal_address_config_;\n  }\n  uint32_t xffNumTrustedHops() const override { return 0; }\n  bool skipXffAppend() const override { return false; }\n  const std::string& via() const override { return EMPTY_STRING; }\n  Http::ForwardClientCertType forwardClientCert() const override { return forward_client_cert_; }\n  const std::vector<Http::ClientCertDetailsType>& setCurrentClientCertDetails() const override {\n    return set_current_client_cert_details_;\n  }\n  const Network::Address::Instance& localAddress() override { return local_address_; }\n  const absl::optional<std::string>& userAgent() override { return user_agent_; }\n  Tracing::HttpTracerSharedPtr tracer() override { return tracer_; }\n  const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); }\n  ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; }\n  bool proxy100Continue() const override { return proxy_100_continue_; }\n  bool streamErrorOnInvalidHttpMessaging() const override {\n    return stream_error_on_invalid_http_messaging_;\n  }\n  const Http::Http1Settings& http1Settings() const override { return http1_settings_; }\n  bool shouldNormalizePath() const override { return normalize_path_; }\n  bool shouldMergeSlashes() const override { return merge_slashes_; }\n  bool shouldStripMatchingPort() const override { return strip_matching_port_; }\n  RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; }\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n  headersWithUnderscoresAction() const override {\n    return headers_with_underscores_action_;\n  }\n  const LocalReply::LocalReply& localReply() const override { return *local_reply_; }\n\n  Envoy::Event::SimulatedTimeSystem test_time_;\n  NiceMock<Router::MockRouteConfigProvider> route_config_provider_;\n  std::shared_ptr<Router::MockConfig> route_config_{new NiceMock<Router::MockConfig>()};\n  NiceMock<Router::MockScopedRouteConfigProvider> scoped_route_config_provider_;\n  Stats::IsolatedStoreImpl fake_stats_;\n  Http::ContextImpl http_context_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Envoy::AccessLog::MockAccessLogManager> log_manager_;\n  std::string access_log_path_;\n  std::list<AccessLog::InstanceSharedPtr> access_logs_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  MockServerConnection* codec_;\n  NiceMock<MockFilterChainFactory> filter_factory_;\n  ConnectionManagerStats stats_;\n  ConnectionManagerTracingStats tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))};\n  NiceMock<Network::MockDrainDecision> drain_close_;\n  std::unique_ptr<ConnectionManagerImpl> conn_manager_;\n  std::string server_name_;\n  HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{\n      HttpConnectionManagerProto::OVERWRITE};\n  Network::Address::Ipv4Instance local_address_{\"127.0.0.1\"};\n  bool use_remote_address_{true};\n  Http::DefaultInternalAddressConfig internal_address_config_;\n  Http::ForwardClientCertType forward_client_cert_{Http::ForwardClientCertType::Sanitize};\n  std::vector<Http::ClientCertDetailsType> set_current_client_cert_details_;\n  absl::optional<std::string> user_agent_;\n  uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB};\n  uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT};\n  absl::optional<std::chrono::milliseconds> idle_timeout_;\n  absl::optional<std::chrono::milliseconds> max_connection_duration_;\n  std::chrono::milliseconds stream_idle_timeout_{};\n  std::chrono::milliseconds request_timeout_{};\n  std::chrono::milliseconds delayed_close_timeout_{};\n  absl::optional<std::chrono::milliseconds> max_stream_duration_{};\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  RequestDecoder* decoder_{};\n  std::shared_ptr<Ssl::MockConnectionInfo> ssl_connection_;\n  std::shared_ptr<NiceMock<Tracing::MockHttpTracer>> tracer_{\n      std::make_shared<NiceMock<Tracing::MockHttpTracer>>()};\n  TracingConnectionManagerConfigPtr tracing_config_;\n  SlowDateProviderImpl date_provider_{test_time_.timeSystem()};\n  MockStream stream_;\n  Http::StreamCallbacks* stream_callbacks_{nullptr};\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  NiceMock<Server::MockOverloadManager> overload_manager_;\n  uint32_t initial_buffer_limit_{};\n  bool streaming_filter_{false};\n  Stats::IsolatedStoreImpl fake_listener_stats_;\n  ConnectionManagerListenerStats listener_stats_;\n  bool proxy_100_continue_ = false;\n  bool stream_error_on_invalid_http_messaging_ = false;\n  bool preserve_external_request_id_ = false;\n  Http::Http1Settings http1_settings_;\n  bool normalize_path_ = false;\n  bool merge_slashes_ = false;\n  bool strip_matching_port_ = false;\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW;\n  NiceMock<Network::MockClientConnection> upstream_conn_; // for websocket tests\n  NiceMock<Tcp::ConnectionPool::MockInstance> conn_pool_; // for websocket tests\n  RequestIDExtensionSharedPtr request_id_extension_;\n  const LocalReply::LocalReplyPtr local_reply_;\n\n  // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest.\n  NiceMock<MockResponseEncoder> response_encoder_;\n  std::vector<MockStreamDecoderFilter*> decoder_filters_;\n  std::vector<MockStreamEncoderFilter*> encoder_filters_;\n  std::shared_ptr<AccessLog::MockInstance> log_handler_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/conn_manager_utility_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/random_generator.h\"\n#include \"common/http/conn_manager_utility.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/request_id_extension_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::An;\nusing testing::Matcher;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\n\nclass MockRequestIDExtension : public RequestIDExtension {\npublic:\n  explicit MockRequestIDExtension(Random::RandomGenerator& random)\n      : real_(RequestIDExtensionFactory::defaultInstance(random)) {\n    ON_CALL(*this, set(_, _))\n        .WillByDefault([this](Http::RequestHeaderMap& request_headers, bool force) {\n          return real_->set(request_headers, force);\n        });\n    ON_CALL(*this, setInResponse(_, _))\n        .WillByDefault([this](Http::ResponseHeaderMap& response_headers,\n                              const Http::RequestHeaderMap& request_headers) {\n          return real_->setInResponse(response_headers, request_headers);\n        });\n    ON_CALL(*this, modBy(_, _, _))\n        .WillByDefault([this](const Http::RequestHeaderMap& request_headers, uint64_t& out,\n                              uint64_t mod) { return real_->modBy(request_headers, out, mod); });\n    ON_CALL(*this, getTraceStatus(_))\n        .WillByDefault([this](const Http::RequestHeaderMap& request_headers) {\n          return real_->getTraceStatus(request_headers);\n        });\n    ON_CALL(*this, setTraceStatus(_, _))\n        .WillByDefault([this](Http::RequestHeaderMap& request_headers, TraceStatus trace_status) {\n          real_->setTraceStatus(request_headers, trace_status);\n        });\n  }\n\n  MOCK_METHOD(void, set, (Http::RequestHeaderMap&, bool));\n  MOCK_METHOD(void, setInResponse, (Http::ResponseHeaderMap&, const Http::RequestHeaderMap&));\n  MOCK_METHOD(bool, modBy, (const Http::RequestHeaderMap&, uint64_t&, uint64_t));\n  MOCK_METHOD(TraceStatus, getTraceStatus, (const Http::RequestHeaderMap&));\n  MOCK_METHOD(void, setTraceStatus, (Http::RequestHeaderMap&, TraceStatus));\n\nprivate:\n  RequestIDExtensionSharedPtr real_;\n};\n\nclass MockInternalAddressConfig : public Http::InternalAddressConfig {\npublic:\n  MOCK_METHOD(bool, isInternalAddress, (const Network::Address::Instance&), (const));\n};\n\nclass MockConnectionManagerConfig : public ConnectionManagerConfig {\npublic:\n  MockConnectionManagerConfig() {\n    ON_CALL(*this, generateRequestId()).WillByDefault(Return(true));\n    ON_CALL(*this, isRoutable()).WillByDefault(Return(true));\n    ON_CALL(*this, preserveExternalRequestId()).WillByDefault(Return(false));\n    ON_CALL(*this, alwaysSetRequestIdInResponse()).WillByDefault(Return(false));\n  }\n\n  // Http::ConnectionManagerConfig\n  ServerConnectionPtr createCodec(Network::Connection& connection, const Buffer::Instance& instance,\n                                  ServerConnectionCallbacks& callbacks) override {\n    return ServerConnectionPtr{createCodec_(connection, instance, callbacks)};\n  }\n\n  MOCK_METHOD(RequestIDExtensionSharedPtr, requestIDExtension, ());\n  MOCK_METHOD(const std::list<AccessLog::InstanceSharedPtr>&, accessLogs, ());\n  MOCK_METHOD(ServerConnection*, createCodec_,\n              (Network::Connection&, const Buffer::Instance&, ServerConnectionCallbacks&));\n  MOCK_METHOD(DateProvider&, dateProvider, ());\n  MOCK_METHOD(std::chrono::milliseconds, drainTimeout, (), (const));\n  MOCK_METHOD(FilterChainFactory&, filterFactory, ());\n  MOCK_METHOD(bool, generateRequestId, (), (const));\n  MOCK_METHOD(bool, preserveExternalRequestId, (), (const));\n  MOCK_METHOD(bool, alwaysSetRequestIdInResponse, (), (const));\n  MOCK_METHOD(uint32_t, maxRequestHeadersKb, (), (const));\n  MOCK_METHOD(uint32_t, maxRequestHeadersCount, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, idleTimeout, (), (const));\n  MOCK_METHOD(bool, isRoutable, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, maxConnectionDuration, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, maxStreamDuration, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, streamIdleTimeout, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, requestTimeout, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, delayedCloseTimeout, (), (const));\n  MOCK_METHOD(Router::RouteConfigProvider*, routeConfigProvider, ());\n  MOCK_METHOD(Config::ConfigProvider*, scopedRouteConfigProvider, ());\n  MOCK_METHOD(const std::string&, serverName, (), (const));\n  MOCK_METHOD(HttpConnectionManagerProto::ServerHeaderTransformation, serverHeaderTransformation,\n              (), (const));\n  MOCK_METHOD(ConnectionManagerStats&, stats, ());\n  MOCK_METHOD(ConnectionManagerTracingStats&, tracingStats, ());\n  MOCK_METHOD(bool, useRemoteAddress, (), (const));\n  const Http::InternalAddressConfig& internalAddressConfig() const override {\n    return *internal_address_config_;\n  }\n\n  MOCK_METHOD(bool, unixSocketInternal, ());\n  MOCK_METHOD(uint32_t, xffNumTrustedHops, (), (const));\n  MOCK_METHOD(bool, skipXffAppend, (), (const));\n  MOCK_METHOD(const std::string&, via, (), (const));\n  MOCK_METHOD(Http::ForwardClientCertType, forwardClientCert, (), (const));\n  MOCK_METHOD(const std::vector<Http::ClientCertDetailsType>&, setCurrentClientCertDetails, (),\n              (const));\n  MOCK_METHOD(const Network::Address::Instance&, localAddress, ());\n  MOCK_METHOD(const absl::optional<std::string>&, userAgent, ());\n  MOCK_METHOD(const Http::TracingConnectionManagerConfig*, tracingConfig, ());\n  MOCK_METHOD(Tracing::HttpTracerSharedPtr, tracer, ());\n  MOCK_METHOD(ConnectionManagerListenerStats&, listenerStats, ());\n  MOCK_METHOD(bool, proxy100Continue, (), (const));\n  MOCK_METHOD(bool, streamErrorOnInvalidHttpMessaging, (), (const));\n  MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const));\n  MOCK_METHOD(bool, shouldNormalizePath, (), (const));\n  MOCK_METHOD(bool, shouldMergeSlashes, (), (const));\n  MOCK_METHOD(bool, shouldStripMatchingPort, (), (const));\n  MOCK_METHOD(envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction,\n              headersWithUnderscoresAction, (), (const));\n  MOCK_METHOD(const LocalReply::LocalReply&, localReply, (), (const));\n\n  std::unique_ptr<Http::InternalAddressConfig> internal_address_config_ =\n      std::make_unique<DefaultInternalAddressConfig>();\n};\n\nconst Http::LowerCaseString& traceStatusHeader() {\n  static Http::LowerCaseString header(\"x-trace-status\");\n  return header;\n}\n\nclass ConnectionManagerUtilityTest : public testing::Test {\npublic:\n  ConnectionManagerUtilityTest()\n      : request_id_extension_(std::make_shared<NiceMock<MockRequestIDExtension>>(random_)),\n        local_reply_(LocalReply::Factory::createDefault()) {\n    ON_CALL(config_, userAgent()).WillByDefault(ReturnRef(user_agent_));\n\n    envoy::type::v3::FractionalPercent percent1;\n    percent1.set_numerator(100);\n    envoy::type::v3::FractionalPercent percent2;\n    percent2.set_numerator(10000);\n    percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n    tracing_config_ = {\n        Tracing::OperationName::Ingress, {}, percent1, percent2, percent1, false, 256};\n    ON_CALL(config_, tracingConfig()).WillByDefault(Return(&tracing_config_));\n    ON_CALL(config_, localReply()).WillByDefault(ReturnRef(*local_reply_));\n\n    ON_CALL(config_, via()).WillByDefault(ReturnRef(via_));\n    ON_CALL(config_, requestIDExtension()).WillByDefault(Return(request_id_extension_));\n  }\n\n  struct MutateRequestRet {\n    bool operator==(const MutateRequestRet& rhs) const {\n      return downstream_address_ == rhs.downstream_address_ && internal_ == rhs.internal_;\n    }\n\n    std::string downstream_address_;\n    bool internal_;\n  };\n\n  // This is a convenience method used to call mutateRequestHeaders(). It is done in this\n  // convoluted way to force tests to check both the final downstream address as well as whether\n  // the request is internal/external, given the importance of these two pieces of data.\n  MutateRequestRet callMutateRequestHeaders(RequestHeaderMap& headers, Protocol) {\n    MutateRequestRet ret;\n    ret.downstream_address_ = ConnectionManagerUtility::mutateRequestHeaders(\n                                  headers, connection_, config_, route_config_, local_info_)\n                                  ->asString();\n    ConnectionManagerUtility::mutateTracingRequestHeader(headers, runtime_, config_, &route_);\n    ret.internal_ = HeaderUtility::isEnvoyInternalRequest(headers);\n    return ret;\n  }\n\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  std::shared_ptr<NiceMock<MockRequestIDExtension>> request_id_extension_;\n  NiceMock<MockConnectionManagerConfig> config_;\n  NiceMock<Router::MockConfig> route_config_;\n  NiceMock<Router::MockRoute> route_;\n  absl::optional<std::string> user_agent_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Http::TracingConnectionManagerConfig tracing_config_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  LocalReply::LocalReplyPtr local_reply_;\n  std::string canary_node_{\"canary\"};\n  std::string empty_node_;\n  std::string via_;\n};\n\n// Tests for ConnectionManagerUtility::determineNextProtocol.\nTEST_F(ConnectionManagerUtilityTest, DetermineNextProtocol) {\n  {\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return(\"hello\"));\n    Buffer::OwnedImpl data(\"\");\n    EXPECT_EQ(\"hello\", ConnectionManagerUtility::determineNextProtocol(connection, data));\n  }\n\n  {\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return(\"\"));\n    Buffer::OwnedImpl data(\"\");\n    EXPECT_EQ(\"\", ConnectionManagerUtility::determineNextProtocol(connection, data));\n  }\n\n  {\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return(\"\"));\n    Buffer::OwnedImpl data(\"GET / HTTP/1.1\");\n    EXPECT_EQ(\"\", ConnectionManagerUtility::determineNextProtocol(connection, data));\n  }\n\n  {\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return(\"\"));\n    Buffer::OwnedImpl data(\"PRI * HTTP/2.0\\r\\n\");\n    EXPECT_EQ(Utility::AlpnNames::get().Http2,\n              ConnectionManagerUtility::determineNextProtocol(connection, data));\n  }\n\n  {\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return(\"\"));\n    Buffer::OwnedImpl data(\"PRI * HTTP/2\");\n    EXPECT_EQ(Utility::AlpnNames::get().Http2,\n              ConnectionManagerUtility::determineNextProtocol(connection, data));\n  }\n\n  {\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return(\"\"));\n    Buffer::OwnedImpl data(\"PRI * HTTP/\");\n    EXPECT_EQ(\"\", ConnectionManagerUtility::determineNextProtocol(connection, data));\n  }\n\n  {\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return(\"\"));\n    Buffer::OwnedImpl data(\" PRI * HTTP/2\");\n    EXPECT_EQ(\"\", ConnectionManagerUtility::determineNextProtocol(connection, data));\n  }\n}\n\n// Verify external request and XFF is set when we are using remote address and the address is\n// external.\nTEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenNotLocalHostRemoteAddress) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"12.12.12.12\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers;\n\n  EXPECT_EQ((MutateRequestRet{\"12.12.12.12:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(connection_.remote_address_->ip()->addressAsString(),\n            headers.get_(Headers::get().ForwardedFor));\n}\n\n// Verify that we don't append XFF when skipXffAppend(), even if using remote\n// address and where the address is external.\nTEST_F(ConnectionManagerUtilityTest, SkipXffAppendUseRemoteAddress) {\n  EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true));\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"12.12.12.12\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers;\n\n  EXPECT_EQ((MutateRequestRet{\"12.12.12.12:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_FALSE(headers.has(Headers::get().ForwardedFor));\n}\n\n// Verify that we pass-thru XFF when skipXffAppend(), even if using remote\n// address and where the address is external.\nTEST_F(ConnectionManagerUtilityTest, SkipXffAppendPassThruUseRemoteAddress) {\n  EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true));\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"12.12.12.12\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"198.51.100.1\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"12.12.12.12:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"198.51.100.1\", headers.getForwardedForValue());\n}\n\nTEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) {\n  TestScopedRuntime scoped_runtime;\n\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1));\n  EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true));\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"12.12.12.12\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-proto\", \"https\"}};\n\n  callMutateRequestHeaders(headers, Protocol::Http2);\n  EXPECT_EQ(\"https\", headers.getForwardedProtoValue());\n}\n\nTEST_F(ConnectionManagerUtilityTest, OverwriteForwardedProtoWhenExternal) {\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(0));\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\");\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-proto\", \"https\"}};\n  Network::Address::Ipv4Instance local_address(\"10.3.2.1\");\n  ON_CALL(config_, localAddress()).WillByDefault(ReturnRef(local_address));\n\n  callMutateRequestHeaders(headers, Protocol::Http2);\n  EXPECT_EQ(\"http\", headers.getForwardedProtoValue());\n}\n\n// Verify internal request and XFF is set when we are using remote address and the address is\n// internal according to user configuration.\nTEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenUserConfiguredRemoteAddress) {\n  auto config = std::make_unique<NiceMock<MockInternalAddressConfig>>();\n  ON_CALL(*config, isInternalAddress).WillByDefault(Return(true));\n  config_.internal_address_config_ = std::move(config);\n\n  Network::Address::Ipv4Instance local_address(\"10.3.2.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(config_, localAddress()).WillByDefault(ReturnRef(local_address));\n\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"12.12.12.12\");\n\n  TestRequestHeaderMapImpl headers;\n  EXPECT_EQ((MutateRequestRet{\"12.12.12.12:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"12.12.12.12\", headers.get_(Headers::get().ForwardedFor));\n}\n\n// Verify internal request and XFF is set when we are using remote address the address is internal.\nTEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenLocalHostRemoteAddress) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\");\n  Network::Address::Ipv4Instance local_address(\"10.3.2.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(config_, localAddress()).WillByDefault(ReturnRef(local_address));\n  TestRequestHeaderMapImpl headers;\n\n  EXPECT_EQ((MutateRequestRet{\"127.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(local_address.ip()->addressAsString(), headers.get_(Headers::get().ForwardedFor));\n}\n\n// Verify that we trust Nth address from XFF when using remote address with xff_num_trusted_hops.\nTEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWithXFFTrustedHops) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"203.0.113.128\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"198.51.100.1\"}};\n  EXPECT_EQ((MutateRequestRet{\"198.51.100.1:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(headers.EnvoyExternalAddress()->value(), \"198.51.100.1\");\n}\n\n// Verify that xff_num_trusted_hops works when not using remote address.\nTEST_F(ConnectionManagerUtilityTest, UseXFFTrustedHopsWithoutRemoteAddress) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false));\n  ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"198.51.100.2, 198.51.100.1\"}};\n  EXPECT_EQ((MutateRequestRet{\"198.51.100.2:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(headers.EnvoyExternalAddress(), nullptr);\n}\n\n// Verify that we don't set the via header on requests/responses when empty.\nTEST_F(ConnectionManagerUtilityTest, ViaEmpty) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n\n  TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(request_headers, Protocol::Http2));\n  EXPECT_FALSE(request_headers.has(Headers::get().Via));\n\n  TestResponseHeaderMapImpl response_headers;\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_,\n                                                  via_);\n  EXPECT_FALSE(response_headers.has(Headers::get().Via));\n}\n\n// Verify that we append a non-empty via header on requests/responses.\nTEST_F(ConnectionManagerUtilityTest, ViaAppend) {\n  via_ = \"foo\";\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n\n  TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(request_headers, Protocol::Http2));\n  EXPECT_EQ(\"foo\", request_headers.get_(Headers::get().Via));\n\n  TestResponseHeaderMapImpl response_headers;\n  // Pretend we're doing a 100-continue transform here.\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n  // The actual response header processing.\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_,\n                                                  via_);\n  EXPECT_EQ(\"foo\", response_headers.get_(Headers::get().Via));\n}\n\n// Verify that we don't set user agent when it is already set.\nTEST_F(ConnectionManagerUtilityTest, UserAgentDontSet) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"user-agent\", \"foo\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"foo\", headers.get_(Headers::get().UserAgent));\n  EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceCluster));\n  EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceNode));\n}\n\n// Verify that we do set user agent when it is empty.\nTEST_F(ConnectionManagerUtilityTest, UserAgentSetWhenIncomingEmpty) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(local_info_, nodeName()).WillByDefault(ReturnRef(canary_node_));\n  user_agent_ = \"bar\";\n  TestRequestHeaderMapImpl headers{{\"user-agent\", \"\"},\n                                   {\"x-envoy-downstream-service-cluster\", \"foo\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"bar\", headers.get_(Headers::get().UserAgent));\n  EXPECT_EQ(\"bar\", headers.get_(Headers::get().EnvoyDownstreamServiceCluster));\n  EXPECT_EQ(\"canary\", headers.get_(Headers::get().EnvoyDownstreamServiceNode));\n}\n\n// Test not-cleaning/cleaning the force trace headers in different scenarios.\nTEST_F(ConnectionManagerUtilityTest, InternalServiceForceTrace) {\n  const std::string uuid = \"f4dca0a9-12c7-4307-8002-969403baf480\";\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false));\n\n  {\n    // Internal request, make traceable.\n    TestRequestHeaderMapImpl headers{\n        {\"x-forwarded-for\", \"10.0.0.1\"}, {\"x-request-id\", uuid}, {\"x-envoy-force-trace\", \"true\"}};\n    EXPECT_CALL(random_, uuid()).Times(0);\n    EXPECT_CALL(runtime_.snapshot_,\n                featureEnabled(\"tracing.global_enabled\",\n                               An<const envoy::type::v3::FractionalPercent&>(), _))\n        .WillOnce(Return(true));\n\n    EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    // Traceable (forced trace) variant of uuid\n    EXPECT_EQ(\"f4dca0a9-12c7-a307-8002-969403baf480\", headers.get_(Headers::get().RequestId));\n  }\n\n  {\n    // Not internal request, force trace header should be cleaned.\n    TestRequestHeaderMapImpl headers{\n        {\"x-forwarded-for\", \"34.0.0.1\"}, {\"x-request-id\", uuid}, {\"x-envoy-force-trace\", \"true\"}};\n    EXPECT_CALL(random_, uuid()).Times(0);\n    EXPECT_CALL(runtime_.snapshot_,\n                featureEnabled(\"tracing.random_sampling\",\n                               An<const envoy::type::v3::FractionalPercent&>(), _))\n        .WillOnce(Return(false));\n    EXPECT_CALL(runtime_.snapshot_,\n                featureEnabled(\"tracing.global_enabled\",\n                               An<const envoy::type::v3::FractionalPercent&>(), _))\n        .WillOnce(Return(true));\n\n    EXPECT_EQ((MutateRequestRet{\"34.0.0.1:0\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    EXPECT_EQ(uuid, headers.get_(Headers::get().RequestId));\n    EXPECT_FALSE(headers.has(Headers::get().EnvoyForceTrace));\n  }\n}\n\n// Test generating request-id in various edge request scenarios.\nTEST_F(ConnectionManagerUtilityTest, EdgeRequestRegenerateRequestIdAndWipeDownstream) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"34.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"tracing.global_enabled\",\n                                             An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillByDefault(Return(true));\n\n  {\n    TestRequestHeaderMapImpl headers{{\"x-envoy-downstream-service-cluster\", \"foo\"},\n                                     {\"x-request-id\", \"will_be_regenerated\"}};\n    EXPECT_CALL(random_, uuid());\n\n    EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"tracing.client_enabled\", Matcher<uint64_t>(_)))\n        .Times(0);\n    EXPECT_EQ((MutateRequestRet{\"34.0.0.1:0\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceCluster));\n    EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceNode));\n    // No changes to uuid as x-client-trace-id is missing.\n    EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId));\n  }\n\n  {\n    // Runtime does not allow to make request traceable even though x-client-trace-id set.\n    TestRequestHeaderMapImpl headers{{\"x-envoy-downstream-service-cluster\", \"foo\"},\n                                     {\"x-request-id\", \"will_be_regenerated\"},\n                                     {\"x-client-trace-id\", \"trace-id\"}};\n    EXPECT_CALL(random_, uuid());\n    EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"tracing.client_enabled\",\n                                                   An<const envoy::type::v3::FractionalPercent&>()))\n        .WillOnce(Return(false));\n\n    EXPECT_EQ((MutateRequestRet{\"34.0.0.1:0\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceCluster));\n    EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId));\n  }\n\n  {\n    // Runtime is enabled for tracing and x-client-trace-id set.\n    TestRequestHeaderMapImpl headers{{\"x-envoy-downstream-service-cluster\", \"foo\"},\n                                     {\"x-request-id\", \"will_be_regenerated\"},\n                                     {\"x-client-trace-id\", \"trace-id\"}};\n    EXPECT_CALL(random_, uuid());\n    EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"tracing.client_enabled\",\n                                                   An<const envoy::type::v3::FractionalPercent&>()))\n        .WillOnce(Return(true));\n\n    EXPECT_EQ((MutateRequestRet{\"34.0.0.1:0\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceCluster));\n    // Traceable (client trace) variant of random_.uuid_\n    EXPECT_EQ(\"a121e9e1-feae-b136-9e0e-6fac343d56c9\", headers.get_(Headers::get().RequestId));\n  }\n}\n\n// This tests that an external request, but not an edge request (because not using remote address)\n// does not overwrite x-request-id. This happens in the internal ingress case.\nTEST_F(ConnectionManagerUtilityTest, ExternalRequestPreserveRequestIdAndDownstream) {\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false));\n  EXPECT_CALL(connection_, remoteAddress()).Times(0);\n  TestRequestHeaderMapImpl headers{{\"x-envoy-downstream-service-cluster\", \"foo\"},\n                                   {\"x-request-id\", \"id\"},\n                                   {\"x-forwarded-for\", \"34.0.0.1\"}};\n\n  EXPECT_CALL(local_info_, nodeName()).Times(0);\n\n  EXPECT_EQ((MutateRequestRet{\"34.0.0.1:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"foo\", headers.get_(Headers::get().EnvoyDownstreamServiceCluster));\n  EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceNode));\n  EXPECT_EQ(\"id\", headers.get_(Headers::get().RequestId));\n}\n\n// Verify that we don't overwrite user agent, but do set x-envoy-downstream-service-cluster\n// correctly.\nTEST_F(ConnectionManagerUtilityTest, UserAgentSetIncomingUserAgent) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n\n  user_agent_ = \"bar\";\n  TestRequestHeaderMapImpl headers{{\"user-agent\", \"foo\"},\n                                   {\"x-envoy-downstream-service-cluster\", \"foo\"}};\n  EXPECT_CALL(local_info_, nodeName()).WillOnce(ReturnRef(empty_node_));\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_FALSE(headers.has(Headers::get().EnvoyDownstreamServiceNode));\n  EXPECT_EQ(\"foo\", headers.get_(Headers::get().UserAgent));\n  EXPECT_EQ(\"bar\", headers.get_(Headers::get().EnvoyDownstreamServiceCluster));\n}\n\n// Verify that we set both user agent and x-envoy-downstream-service-cluster.\nTEST_F(ConnectionManagerUtilityTest, UserAgentSetNoIncomingUserAgent) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  user_agent_ = \"bar\";\n  TestRequestHeaderMapImpl headers;\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"bar\", headers.get_(Headers::get().UserAgent));\n  EXPECT_EQ(\"bar\", headers.get_(Headers::get().EnvoyDownstreamServiceCluster));\n}\n\n// Test different permutations of request-id generation.\nTEST_F(ConnectionManagerUtilityTest, RequestIdGeneratedWhenItsNotPresent) {\n  {\n    TestRequestHeaderMapImpl headers{{\":authority\", \"host\"}, {\":path\", \"/\"}};\n    EXPECT_CALL(random_, uuid()).WillOnce(Return(\"generated_uuid\"));\n\n    EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    EXPECT_EQ(\"generated_uuid\", headers.get_(\"x-request-id\"));\n  }\n\n  {\n    Random::RandomGeneratorImpl rand;\n    TestRequestHeaderMapImpl headers{{\"x-client-trace-id\", \"trace-id\"}};\n    const std::string uuid = rand.uuid();\n    EXPECT_CALL(random_, uuid()).WillOnce(Return(uuid));\n\n    EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    // x-request-id should not be set to be traceable as it's not edge request\n    EXPECT_EQ(uuid, headers.get_(\"x-request-id\"));\n  }\n}\n\n// Make sure we do not overwrite x-request-id if the request is internal.\nTEST_F(ConnectionManagerUtilityTest, DoNotOverrideRequestIdIfPresentWhenInternalRequest) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-request-id\", \"original_request_id\"}};\n  EXPECT_CALL(random_, uuid()).Times(0);\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"original_request_id\", headers.get_(\"x-request-id\"));\n}\n\n// Make sure that we do overwrite x-request-id for \"edge\" external requests.\nTEST_F(ConnectionManagerUtilityTest, OverrideRequestIdForExternalRequests) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"134.2.2.11\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-request-id\", \"original\"}};\n  EXPECT_CALL(random_, uuid()).WillOnce(Return(\"override\"));\n\n  EXPECT_EQ((MutateRequestRet{\"134.2.2.11:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"override\", headers.get_(\"x-request-id\"));\n}\n\n// A request that uses remote address and is from an external address should be treated as an\n// external request with all internal only headers cleaned.\nTEST_F(ConnectionManagerUtilityTest, ExternalAddressExternalRequestUseRemote) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"50.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  route_config_.internal_only_headers_.push_back(LowerCaseString(\"custom_header\"));\n  TestRequestHeaderMapImpl headers{{\"x-envoy-decorator-operation\", \"foo\"},\n                                   {\"x-envoy-downstream-service-cluster\", \"foo\"},\n                                   {\"x-envoy-hedge-on-per-try-timeout\", \"foo\"},\n                                   {\"x-envoy-retriable-status-codes\", \"123,456\"},\n                                   {\"x-envoy-retry-on\", \"foo\"},\n                                   {\"x-envoy-retry-grpc-on\", \"foo\"},\n                                   {\"x-envoy-max-retries\", \"foo\"},\n                                   {\"x-envoy-upstream-alt-stat-name\", \"foo\"},\n                                   {\"x-envoy-upstream-rq-timeout-alt-response\", \"204\"},\n                                   {\"x-envoy-upstream-rq-timeout-ms\", \"foo\"},\n                                   {\"x-envoy-expected-rq-timeout-ms\", \"10\"},\n                                   {\"x-envoy-ip-tags\", \"bar\"},\n                                   {\"x-envoy-original-url\", \"my_url\"},\n                                   {\"custom_header\", \"foo\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"50.0.0.1:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"50.0.0.1\", headers.get_(\"x-envoy-external-address\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-decorator-operation\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-downstream-service-cluster\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-hedge-on-per-try-timeout\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-retriable-status-codes\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-retry-on\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-retry-grpc-on\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-max-retries\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-upstream-alt-stat-name\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-alt-response\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-expected-rq-timeout-ms\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-ip-tags\"));\n  EXPECT_FALSE(headers.has(\"x-envoy-original-url\"));\n  EXPECT_FALSE(headers.has(\"custom_header\"));\n}\n\n// A request that is from an external address, but does not use remote address, should pull the\n// address from XFF.\nTEST_F(ConnectionManagerUtilityTest, ExternalAddressExternalRequestDontUseRemote) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"60.0.0.2\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false));\n  TestRequestHeaderMapImpl headers{{\"x-envoy-external-address\", \"60.0.0.1\"},\n                                   {\"x-forwarded-for\", \"60.0.0.1\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"60.0.0.1:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"60.0.0.1\", headers.get_(\"x-envoy-external-address\"));\n  EXPECT_EQ(\"60.0.0.1\", headers.get_(\"x-forwarded-for\"));\n}\n\n// Verify that if XFF is invalid we fall back to remote address, even if it is a pipe.\nTEST_F(ConnectionManagerUtilityTest, PipeAddressInvalidXFFtDontUseRemote) {\n  connection_.remote_address_ = std::make_shared<Network::Address::PipeInstance>(\"/blah\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"blah\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"/blah\", false}), callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_FALSE(headers.has(\"x-envoy-external-address\"));\n}\n\n// Verify that we treat a request as external even if the direct remote is internal and XFF\n// includes only internal addresses. Note that this is legacy behavior. See the comments\n// in mutateRequestHeaders() for more info.\nTEST_F(ConnectionManagerUtilityTest, AppendInternalAddressXffNotInternalRequest) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"10.0.0.2\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"10.0.0.2,10.0.0.1\", headers.get_(\"x-forwarded-for\"));\n}\n\n// A request that is from an internal address and uses remote address should be an internal request.\n// It should also preserve x-envoy-external-address.\nTEST_F(ConnectionManagerUtilityTest, ExternalAddressInternalRequestUseRemote) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.1\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-envoy-external-address\", \"60.0.0.1\"},\n                                   {\"x-envoy-expected-rq-timeout-ms\", \"10\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(\"60.0.0.1\", headers.get_(\"x-envoy-external-address\"));\n  EXPECT_EQ(\"10.0.0.1\", headers.get_(\"x-forwarded-for\"));\n  EXPECT_EQ(\"10\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n}\n\n// Make sure we don't remove connection headers for WS requests.\nTEST_F(ConnectionManagerUtilityTest, DoNotRemoveConnectionUpgradeForWebSocketRequests) {\n  TestRequestHeaderMapImpl headers{{\"connection\", \"upgrade\"}, {\"upgrade\", \"websocket\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http11));\n  EXPECT_EQ(\"upgrade\", headers.get_(\"connection\"));\n  EXPECT_EQ(\"websocket\", headers.get_(\"upgrade\"));\n}\n\n// Make sure we do remove connection headers for non-WS requests.\nTEST_F(ConnectionManagerUtilityTest, RemoveConnectionUpgradeForNonWebSocketRequests) {\n  TestRequestHeaderMapImpl headers{{\"connection\", \"close\"}, {\"upgrade\", \"websocket\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http11));\n  EXPECT_FALSE(headers.has(\"connection\"));\n  EXPECT_FALSE(headers.has(\"upgrade\"));\n}\n\n// Test cleaning response headers.\nTEST_F(ConnectionManagerUtilityTest, MutateResponseHeaders) {\n  TestResponseHeaderMapImpl response_headers{\n      {\"connection\", \"foo\"}, {\"transfer-encoding\", \"foo\"}, {\"custom_header\", \"custom_value\"}};\n  TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"request-id\"}};\n\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n\n  EXPECT_EQ(1UL, response_headers.size());\n  EXPECT_EQ(\"custom_value\", response_headers.get_(\"custom_header\"));\n  EXPECT_FALSE(response_headers.has(\"x-request-id\"));\n  EXPECT_FALSE(response_headers.has(Headers::get().Via));\n}\n\n// Make sure we don't remove connection headers on all Upgrade responses.\nTEST_F(ConnectionManagerUtilityTest, DoNotRemoveConnectionUpgradeForWebSocketResponses) {\n  TestRequestHeaderMapImpl request_headers{{\"connection\", \"UpGrAdE\"}, {\"upgrade\", \"foo\"}};\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"101\"},\n                                             {\"connection\", \"upgrade\"},\n                                             {\"transfer-encoding\", \"foo\"},\n                                             {\"upgrade\", \"bar\"}};\n  EXPECT_TRUE(Utility::isUpgrade(request_headers));\n  EXPECT_TRUE(Utility::isUpgrade(response_headers));\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n\n  EXPECT_EQ(3UL, response_headers.size()) << response_headers;\n  EXPECT_EQ(\"upgrade\", response_headers.get_(\"connection\"));\n  EXPECT_EQ(\"bar\", response_headers.get_(\"upgrade\"));\n  EXPECT_EQ(\"101\", response_headers.get_(\":status\"));\n}\n\n// Make sure we don't add a content-length header on Upgrade responses.\nTEST_F(ConnectionManagerUtilityTest, DoNotAddConnectionLengthForWebSocket101Responses) {\n  TestRequestHeaderMapImpl request_headers{{\"connection\", \"UpGrAdE\"}, {\"upgrade\", \"foo\"}};\n  TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"101\"}, {\"connection\", \"upgrade\"}, {\"upgrade\", \"bar\"}};\n  EXPECT_TRUE(Utility::isUpgrade(request_headers));\n  EXPECT_TRUE(Utility::isUpgrade(response_headers));\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n\n  EXPECT_EQ(3UL, response_headers.size()) << response_headers;\n  EXPECT_EQ(\"upgrade\", response_headers.get_(\"connection\"));\n  EXPECT_EQ(\"bar\", response_headers.get_(\"upgrade\"));\n  EXPECT_EQ(\"101\", response_headers.get_(\":status\"));\n}\n\nTEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) {\n  // Test clearing non-upgrade request and response headers\n  {\n    TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"request-id\"}};\n    TestResponseHeaderMapImpl response_headers{\n        {\"connection\", \"foo\"}, {\"transfer-encoding\", \"bar\"}, {\"custom_header\", \"custom_value\"}};\n    EXPECT_FALSE(Utility::isUpgrade(request_headers));\n    EXPECT_FALSE(Utility::isUpgrade(response_headers));\n    ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_,\n                                                    \"\");\n\n    EXPECT_EQ(1UL, response_headers.size()) << response_headers;\n    EXPECT_EQ(\"custom_value\", response_headers.get_(\"custom_header\"));\n  }\n\n  // Test with the request headers not valid upgrade headers\n  {\n    TestRequestHeaderMapImpl request_headers{{\"upgrade\", \"foo\"}};\n    TestResponseHeaderMapImpl response_headers{{\"connection\", \"upgrade\"},\n                                               {\"transfer-encoding\", \"eep\"},\n                                               {\"upgrade\", \"foo\"},\n                                               {\"custom_header\", \"custom_value\"}};\n    EXPECT_FALSE(Utility::isUpgrade(request_headers));\n    EXPECT_TRUE(Utility::isUpgrade(response_headers));\n    ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_,\n                                                    \"\");\n\n    EXPECT_EQ(1UL, response_headers.size()) << response_headers;\n    EXPECT_EQ(\"custom_value\", response_headers.get_(\"custom_header\"));\n  }\n\n  // Test with the response headers not valid upgrade headers\n  {\n    TestRequestHeaderMapImpl request_headers{{\"connection\", \"UpGrAdE\"}, {\"upgrade\", \"foo\"}};\n    TestResponseHeaderMapImpl response_headers{{\"transfer-encoding\", \"foo\"}, {\"upgrade\", \"bar\"}};\n    EXPECT_TRUE(Utility::isUpgrade(request_headers));\n    EXPECT_FALSE(Utility::isUpgrade(response_headers));\n    ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_,\n                                                    \"\");\n\n    EXPECT_EQ(0UL, response_headers.size()) << response_headers;\n  }\n}\n\nTEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequestsLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fix_upgrade_response\", \"false\"}});\n\n  // Test with the request headers not valid upgrade headers\n  {\n    TestRequestHeaderMapImpl request_headers{{\"upgrade\", \"foo\"}};\n    TestResponseHeaderMapImpl response_headers{{\"connection\", \"upgrade\"},\n                                               {\"transfer-encoding\", \"eep\"},\n                                               {\"upgrade\", \"foo\"},\n                                               {\"custom_header\", \"custom_value\"}};\n    EXPECT_FALSE(Utility::isUpgrade(request_headers));\n    EXPECT_TRUE(Utility::isUpgrade(response_headers));\n    ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_,\n                                                    \"\");\n\n    EXPECT_EQ(2UL, response_headers.size()) << response_headers;\n    EXPECT_EQ(\"custom_value\", response_headers.get_(\"custom_header\"));\n    EXPECT_EQ(\"foo\", response_headers.get_(\"upgrade\"));\n  }\n\n  // Test with the response headers not valid upgrade headers\n  {\n    TestRequestHeaderMapImpl request_headers{{\"connection\", \"UpGrAdE\"}, {\"upgrade\", \"foo\"}};\n    TestResponseHeaderMapImpl response_headers{{\"transfer-encoding\", \"foo\"}, {\"upgrade\", \"bar\"}};\n    EXPECT_TRUE(Utility::isUpgrade(request_headers));\n    EXPECT_FALSE(Utility::isUpgrade(response_headers));\n    ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_,\n                                                    \"\");\n\n    EXPECT_EQ(1UL, response_headers.size()) << response_headers;\n    EXPECT_EQ(\"bar\", response_headers.get_(\"upgrade\"));\n  }\n}\n\n// Test that we correctly return x-request-id if we were requested to force a trace.\nTEST_F(ConnectionManagerUtilityTest, MutateResponseHeadersReturnXRequestId) {\n  TestResponseHeaderMapImpl response_headers;\n  TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"request-id\"},\n                                           {\"x-envoy-force-trace\", \"true\"}};\n\n  EXPECT_CALL(*request_id_extension_,\n              setInResponse(testing::Ref(response_headers), testing::Ref(request_headers)))\n      .Times(1);\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n  EXPECT_EQ(\"request-id\", response_headers.get_(\"x-request-id\"));\n}\n\n// Test that we do not return x-request-id if we were not requested to force a trace.\nTEST_F(ConnectionManagerUtilityTest, SkipMutateResponseHeadersReturnXRequestId) {\n  TestResponseHeaderMapImpl response_headers;\n  TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"request-id\"}};\n\n  EXPECT_CALL(*request_id_extension_,\n              setInResponse(testing::Ref(response_headers), testing::Ref(request_headers)))\n      .Times(0);\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n  EXPECT_EQ(\"\", response_headers.get_(\"x-request-id\"));\n}\n\n// Test that we do return x-request-id if we were asked to always return it even if trace is not\n// forced.\nTEST_F(ConnectionManagerUtilityTest, AlwaysMutateResponseHeadersReturnXRequestId) {\n  TestResponseHeaderMapImpl response_headers;\n  TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"request-id\"}};\n\n  EXPECT_CALL(*request_id_extension_,\n              setInResponse(testing::Ref(response_headers), testing::Ref(request_headers)))\n      .Times(1);\n  ON_CALL(config_, alwaysSetRequestIdInResponse()).WillByDefault(Return(true));\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n  EXPECT_EQ(\"request-id\", response_headers.get_(\"x-request-id\"));\n}\n\n// Test full sanitization of x-forwarded-client-cert.\nTEST_F(ConnectionManagerUtilityTest, MtlsSanitizeClientCert) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::Sanitize));\n  std::vector<Http::ClientCertDetailsType> details;\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-client-cert\", \"By=test;URI=abc;DNS=example.com\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_FALSE(headers.has(\"x-forwarded-client-cert\"));\n}\n\n// Test that we sanitize and set x-forwarded-client-cert.\nTEST_F(ConnectionManagerUtilityTest, MtlsForwardOnlyClientCert) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::ForwardOnly));\n  std::vector<Http::ClientCertDetailsType> details;\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{\n      {\"x-forwarded-client-cert\", \"By=test://foo.com/fe;URI=test://bar.com/be;DNS=example.com\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\"By=test://foo.com/fe;URI=test://bar.com/be;DNS=example.com\",\n            headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// The server (local) identity is foo.com/be. The client does not set XFCC.\nTEST_F(ConnectionManagerUtilityTest, MtlsSetForwardClientCert) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));\n  const std::vector<std::string> local_uri_sans{\"test://foo.com/be\"};\n  EXPECT_CALL(*ssl, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans));\n  std::string expected_sha(\"abcdefg\");\n  EXPECT_CALL(*ssl, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha));\n  const std::vector<std::string> peer_uri_sans{\"test://foo.com/fe\"};\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(peer_uri_sans));\n  std::string expected_pem(\"%3D%3Dabc%0Ade%3D\");\n  EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(expected_pem));\n  std::string expected_chain_pem(expected_pem + \"%3D%3Dlmn%0Aop%3D\");\n  EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificateChain())\n      .WillOnce(ReturnRef(expected_chain_pem));\n  std::vector<std::string> expected_dns = {\"www.example.com\"};\n  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillOnce(Return(expected_dns));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::AppendForward));\n  std::vector<Http::ClientCertDetailsType> details = std::vector<Http::ClientCertDetailsType>();\n  details.push_back(Http::ClientCertDetailsType::URI);\n  details.push_back(Http::ClientCertDetailsType::Cert);\n  details.push_back(Http::ClientCertDetailsType::Chain);\n  details.push_back(Http::ClientCertDetailsType::DNS);\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers;\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\"By=test://foo.com/be;\"\n            \"Hash=abcdefg;\"\n            \"URI=test://foo.com/fe;\"\n            \"Cert=\\\"%3D%3Dabc%0Ade%3D\\\";\"\n            \"Chain=\\\"%3D%3Dabc%0Ade%3D%3D%3Dlmn%0Aop%3D\\\";\"\n            \"DNS=www.example.com\",\n            headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// This test assumes the following scenario:\n// The client identity is foo.com/fe, and the server (local) identity is foo.com/be. The client\n// also sends the XFCC header with the authentication result of the previous hop, (bar.com/be\n// calling foo.com/fe).\nTEST_F(ConnectionManagerUtilityTest, MtlsAppendForwardClientCert) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));\n  const std::vector<std::string> local_uri_sans{\"test://foo.com/be\"};\n  EXPECT_CALL(*ssl, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans));\n  std::string expected_sha(\"abcdefg\");\n  EXPECT_CALL(*ssl, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha));\n  const std::vector<std::string> peer_uri_sans{\"test://foo.com/fe\"};\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(peer_uri_sans));\n  std::string expected_pem(\"%3D%3Dabc%0Ade%3D\");\n  EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(expected_pem));\n  std::string expected_chain_pem(expected_pem + \"%3D%3Dlmn%0Aop%3D\");\n  EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificateChain())\n      .WillOnce(ReturnRef(expected_chain_pem));\n  std::vector<std::string> expected_dns = {\"www.example.com\"};\n  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillOnce(Return(expected_dns));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::AppendForward));\n  std::vector<Http::ClientCertDetailsType> details = std::vector<Http::ClientCertDetailsType>();\n  details.push_back(Http::ClientCertDetailsType::URI);\n  details.push_back(Http::ClientCertDetailsType::Cert);\n  details.push_back(Http::ClientCertDetailsType::Chain);\n  details.push_back(Http::ClientCertDetailsType::DNS);\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-client-cert\", \"By=test://foo.com/fe;\"\n                                                               \"URI=test://bar.com/be;\"\n                                                               \"DNS=test.com;DNS=test.com\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\n      \"By=test://foo.com/fe;URI=test://bar.com/be;DNS=test.com;DNS=test.com,\"\n      \"By=test://foo.com/be;Hash=abcdefg;URI=test://foo.com/fe;\"\n      \"Cert=\\\"%3D%3Dabc%0Ade%3D\\\";Chain=\\\"%3D%3Dabc%0Ade%3D%3D%3Dlmn%0Aop%3D\\\";DNS=www.example.com\",\n      headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// This test assumes the following scenario:\n// The client identity is foo.com/fe, and the server (local) identity is foo.com/be. The client\n// also sends the XFCC header with the authentication result of the previous hop, (bar.com/be\n// calling foo.com/fe).\nTEST_F(ConnectionManagerUtilityTest, MtlsAppendForwardClientCertLocalSanEmpty) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));\n  EXPECT_CALL(*ssl, uriSanLocalCertificate()).WillOnce(Return(std::vector<std::string>()));\n  std::string expected_sha(\"abcdefg\");\n  EXPECT_CALL(*ssl, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha));\n  const std::vector<std::string> peer_uri_sans{\"test://foo.com/fe\"};\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(peer_uri_sans));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::AppendForward));\n  std::vector<Http::ClientCertDetailsType> details = std::vector<Http::ClientCertDetailsType>();\n  details.push_back(Http::ClientCertDetailsType::URI);\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{\n      {\"x-forwarded-client-cert\", \"By=test://foo.com/fe;Hash=xyz;URI=test://bar.com/be\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\"By=test://foo.com/fe;Hash=xyz;URI=test://bar.com/be,\"\n            \"Hash=abcdefg;URI=test://foo.com/fe\",\n            headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// This test assumes the following scenario:\n// The client identity is foo.com/fe, and the server (local) identity is foo.com/be. The client\n// also sends the XFCC header with the authentication result of the previous hop, (bar.com/be\n// calling foo.com/fe).\nTEST_F(ConnectionManagerUtilityTest, MtlsSanitizeSetClientCert) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));\n  const std::vector<std::string> local_uri_sans{\"test://foo.com/be\"};\n  EXPECT_CALL(*ssl, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans));\n  std::string expected_sha(\"abcdefg\");\n  EXPECT_CALL(*ssl, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha));\n  std::string peer_subject(\"/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=test.lyft.com\");\n  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillOnce(ReturnRef(peer_subject));\n  const std::vector<std::string> peer_uri_sans{\"test://foo.com/fe\"};\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(peer_uri_sans));\n  std::string expected_pem(\"abcde=\");\n  EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(expected_pem));\n  std::string expected_chain_pem(expected_pem + \"lmnop=\");\n  EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificateChain())\n      .WillOnce(ReturnRef(expected_chain_pem));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::SanitizeSet));\n  std::vector<Http::ClientCertDetailsType> details = std::vector<Http::ClientCertDetailsType>();\n  details.push_back(Http::ClientCertDetailsType::Subject);\n  details.push_back(Http::ClientCertDetailsType::URI);\n  details.push_back(Http::ClientCertDetailsType::Cert);\n  details.push_back(Http::ClientCertDetailsType::Chain);\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{\n      {\"x-forwarded-client-cert\", \"By=test://foo.com/fe;URI=test://bar.com/be\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\"By=test://foo.com/be;Hash=abcdefg;Subject=\\\"/C=US/ST=CA/L=San \"\n            \"Francisco/OU=Lyft/CN=test.lyft.com\\\";URI=test://foo.com/\"\n            \"fe;Cert=\\\"abcde=\\\";Chain=\\\"abcde=lmnop=\\\"\",\n            headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// This test assumes the following scenario:\n// The client identity is foo.com/fe, and the server (local) identity is foo.com/be. The client\n// also sends the XFCC header with the authentication result of the previous hop, (bar.com/be\n// calling foo.com/fe).\nTEST_F(ConnectionManagerUtilityTest, MtlsSanitizeSetClientCertPeerSanEmpty) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));\n  const std::vector<std::string> local_uri_sans{\"test://foo.com/be\"};\n  EXPECT_CALL(*ssl, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans));\n  std::string expected_sha(\"abcdefg\");\n  EXPECT_CALL(*ssl, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha));\n  std::string peer_subject = \"/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=test.lyft.com\";\n  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillOnce(ReturnRef(peer_subject));\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(std::vector<std::string>()));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::SanitizeSet));\n  std::vector<Http::ClientCertDetailsType> details = std::vector<Http::ClientCertDetailsType>();\n  details.push_back(Http::ClientCertDetailsType::Subject);\n  details.push_back(Http::ClientCertDetailsType::URI);\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{\n      {\"x-forwarded-client-cert\", \"By=test://foo.com/fe;URI=test://bar.com/be\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\"By=test://foo.com/be;Hash=abcdefg;Subject=\\\"/C=US/ST=CA/L=San \"\n            \"Francisco/OU=Lyft/CN=test.lyft.com\\\";URI=\",\n            headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// forward_only, append_forward and sanitize_set are only effective in mTLS connection.\nTEST_F(ConnectionManagerUtilityTest, TlsSanitizeClientCertWhenForward) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(false));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::ForwardOnly));\n  std::vector<Http::ClientCertDetailsType> details;\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-client-cert\", \"By=test;URI=abc\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_FALSE(headers.has(\"x-forwarded-client-cert\"));\n}\n\n// always_forward_only works regardless whether the connection is TLS/mTLS.\nTEST_F(ConnectionManagerUtilityTest, TlsAlwaysForwardOnlyClientCert) {\n  auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(false));\n  ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::AlwaysForwardOnly));\n  std::vector<Http::ClientCertDetailsType> details;\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{\n      {\"x-forwarded-client-cert\", \"By=test://foo.com/fe;URI=test://bar.com/be\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\"By=test://foo.com/fe;URI=test://bar.com/be\", headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// forward_only, append_forward and sanitize_set are only effective in mTLS connection.\nTEST_F(ConnectionManagerUtilityTest, NonTlsSanitizeClientCertWhenForward) {\n  ON_CALL(connection_, ssl()).WillByDefault(Return(nullptr));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::ForwardOnly));\n  std::vector<Http::ClientCertDetailsType> details;\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-client-cert\", \"By=test;URI=abc\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_FALSE(headers.has(\"x-forwarded-client-cert\"));\n}\n\n// always_forward_only works regardless whether the connection is TLS/mTLS.\nTEST_F(ConnectionManagerUtilityTest, NonTlsAlwaysForwardClientCert) {\n  ON_CALL(connection_, ssl()).WillByDefault(Return(nullptr));\n  ON_CALL(config_, forwardClientCert())\n      .WillByDefault(Return(Http::ForwardClientCertType::AlwaysForwardOnly));\n  std::vector<Http::ClientCertDetailsType> details;\n  ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));\n  TestRequestHeaderMapImpl headers{\n      {\"x-forwarded-client-cert\", \"By=test://foo.com/fe;URI=test://bar.com/be\"}};\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.3:50000\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_TRUE(headers.has(\"x-forwarded-client-cert\"));\n  EXPECT_EQ(\"By=test://foo.com/fe;URI=test://bar.com/be\", headers.get_(\"x-forwarded-client-cert\"));\n}\n\n// Sampling, global on.\nTEST_F(ConnectionManagerUtilityTest, RandomSamplingWhenGlobalSet) {\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.random_sampling\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"}};\n  EXPECT_CALL(*request_id_extension_,\n              setTraceStatus(testing::Ref(request_headers), TraceStatus::Sampled))\n      .Times(1);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::Sampled, request_id_extension_->getTraceStatus(request_headers));\n}\n\nTEST_F(ConnectionManagerUtilityTest, SamplingWithoutRouteOverride) {\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.random_sampling\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"}};\n  EXPECT_CALL(*request_id_extension_,\n              setTraceStatus(testing::Ref(request_headers), TraceStatus::Sampled))\n      .Times(1);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::Sampled, request_id_extension_->getTraceStatus(request_headers));\n}\n\nTEST_F(ConnectionManagerUtilityTest, SamplingWithRouteOverride) {\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.random_sampling\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(false));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(false));\n\n  NiceMock<Router::MockRouteTracing> tracingConfig;\n  EXPECT_CALL(route_, tracingConfig()).WillRepeatedly(Return(&tracingConfig));\n  const envoy::type::v3::FractionalPercent percent;\n  EXPECT_CALL(tracingConfig, getClientSampling()).WillRepeatedly(ReturnRef(percent));\n  EXPECT_CALL(tracingConfig, getRandomSampling()).WillRepeatedly(ReturnRef(percent));\n  EXPECT_CALL(tracingConfig, getOverallSampling()).WillRepeatedly(ReturnRef(percent));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"}};\n  EXPECT_CALL(*request_id_extension_,\n              setTraceStatus(testing::Ref(request_headers), TraceStatus::NoTrace))\n      .Times(1);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers));\n}\n\n// Sampling must not be done on client traced.\nTEST_F(ConnectionManagerUtilityTest, SamplingMustNotBeDoneOnClientTraced) {\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.random_sampling\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .Times(0);\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n\n  // The x_request_id has TRACE_FORCED(a) set in the TRACE_BYTE_POSITION(14) character.\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"x-request-id\", \"125a4afb-6f55-a4ba-ad80-413f09f48a28\"}};\n  EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::Forced, request_id_extension_->getTraceStatus(request_headers));\n}\n\n// Sampling, global off.\nTEST_F(ConnectionManagerUtilityTest, NoTraceWhenSamplingSetButGlobalNotSet) {\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.random_sampling\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(false));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"}};\n  EXPECT_CALL(*request_id_extension_,\n              setTraceStatus(testing::Ref(request_headers), TraceStatus::Sampled))\n      .Times(1);\n  EXPECT_CALL(*request_id_extension_,\n              setTraceStatus(testing::Ref(request_headers), TraceStatus::NoTrace))\n      .Times(1);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers));\n}\n\n// Client, client enabled, global on.\nTEST_F(ConnectionManagerUtilityTest, ClientSamplingWhenGlobalSet) {\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"tracing.client_enabled\",\n                                                 An<const envoy::type::v3::FractionalPercent&>()))\n      .WillOnce(Return(true));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"x-client-trace-id\", \"f4dca0a9-12c7-4307-8002-969403baf480\"},\n      {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"}};\n  EXPECT_CALL(*request_id_extension_,\n              setTraceStatus(testing::Ref(request_headers), TraceStatus::Client))\n      .Times(1);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::Client, request_id_extension_->getTraceStatus(request_headers));\n}\n\n// Client, client disabled, global on.\nTEST_F(ConnectionManagerUtilityTest, NoTraceWhenClientSamplingNotSetAndGlobalSet) {\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"tracing.client_enabled\",\n                                                 An<const envoy::type::v3::FractionalPercent&>()))\n      .WillOnce(Return(false));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.random_sampling\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(false));\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"x-client-trace-id\", \"f4dca0a9-12c7-4307-8002-969403baf480\"},\n      {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"}};\n  EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers));\n}\n\n// Forced, global on.\nTEST_F(ConnectionManagerUtilityTest, ForcedTracedWhenGlobalSet) {\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false));\n  // Internal request, make traceable.\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"10.0.0.1\"},\n                                   {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"},\n                                   {\"x-envoy-force-trace\", \"true\"}};\n  EXPECT_CALL(random_, uuid()).Times(0);\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(true));\n  EXPECT_CALL(*request_id_extension_, setTraceStatus(testing::Ref(headers), TraceStatus::Forced))\n      .Times(1);\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(TraceStatus::Forced, request_id_extension_->getTraceStatus(headers));\n}\n\n// Forced, global off.\nTEST_F(ConnectionManagerUtilityTest, NoTraceWhenForcedTracedButGlobalNotSet) {\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false));\n  // Internal request, make traceable.\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"10.0.0.1\"},\n                                   {\"x-request-id\", \"125a4afb-6f55-44ba-ad80-413f09f48a28\"},\n                                   {\"x-envoy-force-trace\", \"true\"}};\n  EXPECT_CALL(random_, uuid()).Times(0);\n  EXPECT_CALL(\n      runtime_.snapshot_,\n      featureEnabled(\"tracing.global_enabled\", An<const envoy::type::v3::FractionalPercent&>(), _))\n      .WillOnce(Return(false));\n  EXPECT_CALL(*request_id_extension_, setTraceStatus(testing::Ref(headers), TraceStatus::Forced))\n      .Times(1);\n  EXPECT_CALL(*request_id_extension_, setTraceStatus(testing::Ref(headers), TraceStatus::NoTrace))\n      .Times(1);\n\n  EXPECT_EQ((MutateRequestRet{\"10.0.0.1:0\", true}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(headers));\n}\n\n// Forced, global on, broken uuid.\nTEST_F(ConnectionManagerUtilityTest, NoTraceOnBrokenUuid) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-force-trace\", \"true\"},\n                                                 {\"x-request-id\", \"bb\"}};\n  EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0);\n  callMutateRequestHeaders(request_headers, Protocol::Http2);\n\n  EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers));\n}\n\nTEST_F(ConnectionManagerUtilityTest, RemovesProxyResponseHeaders) {\n  Http::TestRequestHeaderMapImpl request_headers{{}};\n  Http::TestResponseHeaderMapImpl response_headers{{\"keep-alive\", \"timeout=60\"},\n                                                   {\"proxy-connection\", \"proxy-header\"}};\n  EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0);\n  ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, \"\");\n\n  EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers));\n\n  EXPECT_FALSE(response_headers.has(\"keep-alive\"));\n  EXPECT_FALSE(response_headers.has(\"proxy-connection\"));\n}\n\n// maybeNormalizePath() returns true with an empty path.\nTEST_F(ConnectionManagerUtilityTest, SanitizeEmptyPath) {\n  ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false));\n  TestRequestHeaderMapImpl original_headers;\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  EXPECT_TRUE(ConnectionManagerUtility::maybeNormalizePath(header_map, config_));\n  EXPECT_EQ(original_headers, header_map);\n}\n\n// maybeNormalizePath() does nothing by default.\nTEST_F(ConnectionManagerUtilityTest, SanitizePathDefaultOff) {\n  ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false));\n  TestRequestHeaderMapImpl original_headers;\n  original_headers.setPath(\"/xyz/../a\");\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  ConnectionManagerUtility::maybeNormalizePath(header_map, config_);\n  EXPECT_EQ(original_headers, header_map);\n}\n\n// maybeNormalizePath() leaves already normal paths alone.\nTEST_F(ConnectionManagerUtilityTest, SanitizePathNormalPath) {\n  ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl original_headers;\n  original_headers.setPath(\"/xyz\");\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  ConnectionManagerUtility::maybeNormalizePath(header_map, config_);\n  EXPECT_EQ(original_headers, header_map);\n}\n\n// maybeNormalizePath() normalizes relative paths.\nTEST_F(ConnectionManagerUtilityTest, SanitizePathRelativePAth) {\n  ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl original_headers;\n  original_headers.setPath(\"/xyz/../abc\");\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  ConnectionManagerUtility::maybeNormalizePath(header_map, config_);\n  EXPECT_EQ(header_map.getPathValue(), \"/abc\");\n}\n\n// maybeNormalizePath() does not touch adjacent slashes by default.\nTEST_F(ConnectionManagerUtilityTest, MergeSlashesDefaultOff) {\n  ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true));\n  ON_CALL(config_, shouldMergeSlashes()).WillByDefault(Return(false));\n  TestRequestHeaderMapImpl original_headers;\n  original_headers.setPath(\"/xyz///abc\");\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  ConnectionManagerUtility::maybeNormalizePath(header_map, config_);\n  EXPECT_EQ(header_map.getPathValue(), \"/xyz///abc\");\n}\n\n// maybeNormalizePath() merges adjacent slashes.\nTEST_F(ConnectionManagerUtilityTest, MergeSlashes) {\n  ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true));\n  ON_CALL(config_, shouldMergeSlashes()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl original_headers;\n  original_headers.setPath(\"/xyz///abc\");\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  ConnectionManagerUtility::maybeNormalizePath(header_map, config_);\n  EXPECT_EQ(header_map.getPathValue(), \"/xyz/abc\");\n}\n\n// maybeNormalizePath() merges adjacent slashes if normalization if off.\nTEST_F(ConnectionManagerUtilityTest, MergeSlashesWithoutNormalization) {\n  ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false));\n  ON_CALL(config_, shouldMergeSlashes()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl original_headers;\n  original_headers.setPath(\"/xyz/..//abc\");\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  ConnectionManagerUtility::maybeNormalizePath(header_map, config_);\n  EXPECT_EQ(header_map.getPathValue(), \"/xyz/../abc\");\n}\n\n// maybeNormalizeHost() removes port part from host header.\nTEST_F(ConnectionManagerUtilityTest, RemovePort) {\n  ON_CALL(config_, shouldStripMatchingPort()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl original_headers;\n  original_headers.setHost(\"host:443\");\n\n  TestRequestHeaderMapImpl header_map(original_headers);\n  ConnectionManagerUtility::maybeNormalizeHost(header_map, config_, 443);\n  EXPECT_EQ(header_map.getHostValue(), \"host\");\n}\n\n// test preserve_external_request_id true does not reset the passed requestId if passed\nTEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestId) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"134.2.2.11\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-request-id\", \"my-request-id\"},\n                                   {\"x-forwarded-for\", \"198.51.100.1\"}};\n  EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1);\n  EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0);\n  EXPECT_EQ((MutateRequestRet{\"134.2.2.11:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_CALL(random_, uuid()).Times(0);\n  EXPECT_EQ(\"my-request-id\", headers.get_(\"x-request-id\"));\n}\n\n// test preserve_external_request_id true but generates new request id when not passed\nTEST_F(ConnectionManagerUtilityTest, PreseverExternalRequestIdNoReqId) {\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"134.2.2.11\");\n  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n  ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"198.51.100.1\"}};\n  EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1);\n  EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0);\n  EXPECT_EQ((MutateRequestRet{\"134.2.2.11:0\", false}),\n            callMutateRequestHeaders(headers, Protocol::Http2));\n  EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId));\n}\n\n// test preserve_external_request_id true and no edge_request passing requestId should keep the\n// requestID\nTEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestIdNoEdgeRequestKeepRequestId) {\n  ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers{{\"x-request-id\", \"myReqId\"}};\n  EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1);\n  EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0);\n  callMutateRequestHeaders(headers, Protocol::Http2);\n  EXPECT_EQ(\"myReqId\", headers.get_(Headers::get().RequestId));\n}\n\n// test preserve_external_request_id true and no edge_request not passing requestId should generate\n// new request id\nTEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestIdNoEdgeRequestGenerateNewRequestId) {\n  ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true));\n  TestRequestHeaderMapImpl headers;\n  EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1);\n  EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0);\n  callMutateRequestHeaders(headers, Protocol::Http2);\n  EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId));\n}\n\n// test preserve_external_request_id false edge request generates new request id\nTEST_F(ConnectionManagerUtilityTest, NoPreserveExternalRequestIdEdgeRequestGenerateRequestId) {\n  ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(false));\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"134.2.2.11\");\n\n  // with request id\n  {\n    ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));\n    TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"198.51.100.1\"},\n                                     {\"x-request-id\", \"my-request-id\"}};\n    EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), true)).Times(1);\n    EXPECT_CALL(*request_id_extension_, set(_, false)).Times(0);\n    EXPECT_EQ((MutateRequestRet{\"134.2.2.11:0\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId));\n  }\n\n  // with no request id\n  {\n    TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"198.51.100.1\"}};\n    EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), true)).Times(1);\n    EXPECT_CALL(*request_id_extension_, set(_, false)).Times(0);\n    EXPECT_EQ((MutateRequestRet{\"134.2.2.11:0\", false}),\n              callMutateRequestHeaders(headers, Protocol::Http2));\n    EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId));\n  }\n}\n\n// test preserve_external_request_id false not edge request\nTEST_F(ConnectionManagerUtilityTest, NoPreserveExternalRequestIdNoEdgeRequest) {\n  ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(false));\n\n  // with no request id\n  {\n    TestRequestHeaderMapImpl headers;\n    EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1);\n    EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0);\n    callMutateRequestHeaders(headers, Protocol::Http2);\n    EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId));\n  }\n\n  // with request id\n  {\n    TestRequestHeaderMapImpl headers{{\"x-request-id\", \"my-request-id\"}};\n    EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1);\n    EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0);\n    callMutateRequestHeaders(headers, Protocol::Http2);\n    EXPECT_EQ(\"my-request-id\", headers.get_(Headers::get().RequestId));\n  }\n}\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/date_provider_impl_test.cc",
    "content": "#include <chrono>\n\n#include \"common/http/date_provider_impl.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Http {\n\nTEST(DateProviderImplTest, All) {\n  Event::MockDispatcher dispatcher;\n  NiceMock<ThreadLocal::MockInstance> tls;\n  Event::MockTimer* timer = new Event::MockTimer(&dispatcher);\n  EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(500), _));\n\n  TlsCachingDateProviderImpl provider(dispatcher, tls);\n  TestResponseHeaderMapImpl headers;\n  provider.setDateHeader(headers);\n  EXPECT_NE(nullptr, headers.Date());\n\n  EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(500), _));\n  timer->invokeCallback();\n\n  headers.removeDate();\n  provider.setDateHeader(headers);\n  EXPECT_NE(nullptr, headers.Date());\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/filter_manager_test.cc",
    "content": "#include \"envoy/stream_info/filter_state.h\"\n\n#include \"common/http/filter_manager.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_reply/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Http {\nnamespace {\nclass FilterManagerTest : public testing::Test {\npublic:\n  void initialize() {\n    filter_manager_ = std::make_unique<FilterManager>(\n        filter_manager_callbacks_, dispatcher_, connection_, 0, true, 10000, filter_factory_,\n        local_reply_, protocol_, time_source_, filter_state_,\n        StreamInfo::FilterState::LifeSpan::Connection);\n  }\n\n  std::unique_ptr<FilterManager> filter_manager_;\n  NiceMock<MockFilterManagerCallbacks> filter_manager_callbacks_;\n  Event::MockDispatcher dispatcher_;\n  NiceMock<Network::MockConnection> connection_;\n  Envoy::Http::MockFilterChainFactory filter_factory_;\n  LocalReply::MockLocalReply local_reply_;\n  Protocol protocol_{Protocol::Http2};\n  NiceMock<MockTimeSystem> time_source_;\n  StreamInfo::FilterStateSharedPtr filter_state_ =\n      std::make_shared<StreamInfo::FilterStateImpl>(StreamInfo::FilterState::LifeSpan::Connection);\n};\n\n// Verifies that the local reply persists the gRPC classification even if the request headers are\n// modified.\nTEST_F(FilterManagerTest, SendLocalReplyDuringDecodingGrpcClassiciation) {\n  initialize();\n\n  std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(*filter, decodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        headers.setContentType(\"text/plain\");\n\n        filter->callbacks_->sendLocalReply(Code::InternalServerError, \"\", nullptr, absl::nullopt,\n                                           \"\");\n\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  RequestHeaderMapPtr grpc_headers{\n      new TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                   {\":path\", \"/\"},\n                                   {\":method\", \"GET\"},\n                                   {\"content-type\", \"application/grpc\"}}};\n\n  ON_CALL(filter_manager_callbacks_, requestHeaders())\n      .WillByDefault(Return(absl::make_optional(std::ref(*grpc_headers))));\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(filter);\n      }));\n\n  filter_manager_->createFilterChain();\n\n  filter_manager_->requestHeadersInitialized();\n  EXPECT_CALL(local_reply_, rewrite(_, _, _, _, _, _));\n  EXPECT_CALL(filter_manager_callbacks_, setResponseHeaders_(_))\n      .WillOnce(Invoke([](auto& response_headers) {\n        EXPECT_THAT(response_headers,\n                    HeaderHasValueRef(Http::Headers::get().ContentType, \"application/grpc\"));\n      }));\n  EXPECT_CALL(filter_manager_callbacks_, resetIdleTimer());\n  EXPECT_CALL(filter_manager_callbacks_, encodeHeaders(_, _));\n  EXPECT_CALL(filter_manager_callbacks_, endStream());\n  filter_manager_->decodeHeaders(*grpc_headers, true);\n  filter_manager_->destroyFilters();\n}\n\n// Verifies that the local reply persists the gRPC classification even if the request headers are\n// modified when directly encoding a response.\nTEST_F(FilterManagerTest, SendLocalReplyDuringEncodingGrpcClassiciation) {\n  initialize();\n\n  std::shared_ptr<MockStreamDecoderFilter> decoder_filter(new NiceMock<MockStreamDecoderFilter>());\n\n  EXPECT_CALL(*decoder_filter, decodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus {\n        headers.setContentType(\"text/plain\");\n\n        ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n        decoder_filter->callbacks_->encodeHeaders(std::move(response_headers), true, \"test\");\n\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  std::shared_ptr<MockStreamFilter> encoder_filter(new NiceMock<MockStreamFilter>());\n\n  EXPECT_CALL(*encoder_filter, encodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](auto&, bool) -> FilterHeadersStatus {\n        encoder_filter->encoder_callbacks_->sendLocalReply(Code::InternalServerError, \"\", nullptr,\n                                                           absl::nullopt, \"\");\n        return FilterHeadersStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(filter_factory_, createFilterChain(_))\n      .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {\n        callbacks.addStreamDecoderFilter(decoder_filter);\n        callbacks.addStreamFilter(encoder_filter);\n      }));\n\n  RequestHeaderMapPtr grpc_headers{\n      new TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                   {\":path\", \"/\"},\n                                   {\":method\", \"GET\"},\n                                   {\"content-type\", \"application/grpc\"}}};\n\n  ON_CALL(filter_manager_callbacks_, requestHeaders())\n      .WillByDefault(Return(absl::make_optional(std::ref(*grpc_headers))));\n  filter_manager_->createFilterChain();\n\n  filter_manager_->requestHeadersInitialized();\n  EXPECT_CALL(local_reply_, rewrite(_, _, _, _, _, _));\n  EXPECT_CALL(filter_manager_callbacks_, setResponseHeaders_(_))\n      .WillOnce(Invoke([](auto&) {}))\n      .WillOnce(Invoke([](auto& response_headers) {\n        EXPECT_THAT(response_headers,\n                    HeaderHasValueRef(Http::Headers::get().ContentType, \"application/grpc\"));\n      }));\n  EXPECT_CALL(filter_manager_callbacks_, encodeHeaders(_, _));\n  EXPECT_CALL(filter_manager_callbacks_, endStream());\n  filter_manager_->decodeHeaders(*grpc_headers, true);\n  filter_manager_->destroyFilters();\n}\n} // namespace\n} // namespace Http\n} // namespace Envoy"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/appendheader",
    "content": "actions {\n  set_reference_key {\n    key: \":method\"\n    value: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\n\n"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/clusterfuzz-testcase-header_map_impl_fuzz_test-5633882138869760-prefix",
    "content": "actions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference_key {\n    key: \"2\"\n    uint64_value: 0\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference_key {\n    key: \"2\"\n    uint64_value: 0\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  lookup: \"google.protobuf.FileD\"\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  lookup: \"google.protobuf.FileD\"\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  remove: \"\\016\"\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    clear {\n    }\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    clear {\n    }\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    clear {\n    }\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  lookup: \"google.protobuf.FileD\"\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    append: \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  lookup: \"te\"\n}\nactions {\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    append: \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_copy {\n    key: \"te\\000\\000\\000\\000\\000\\007uc\"\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    append: \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\"\n  }\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    clear {\n    }\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  set_reference {\n    key: \"%\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  lookup: \"te\"\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference_key {\n    key: \"2\"\n    uint64_value: 0\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  set_reference {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    append: \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  set_reference {\n    key: \"\\000\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    clear {\n    }\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    set_reference: \"\\006\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    append: \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_copy {\n    key: \"te\\000\\000\\000\\000\\000\\007uc\"\n  }\n}\nactions {\n}\nactions {\n  lookup: \"google.protobuf.FileD\"\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  remove: \"\\030\"\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference_key {\n    key: \"2\"\n    uint64_value: 0\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    value: \",\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    value: \"5\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_copy {\n    key: \"te\"\n  }\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"1\"\n  }\n}\nactions {\n  add_reference {\n    key: \"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\"\n  }\n}\nactions {\n  add_reference {\n    key: \":\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    append: \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  lookup: \"te\"\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  get_and_mutate {\n    find: \"\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n  copy {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n  add_reference {\n  }\n}\nactions {\n}\nactions {\n}"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/clusterfuzz-testcase-minimized-header_map_impl_fuzz_test-5182326490791936",
    "content": "actions {   add_reference {   } } actions {   get_and_mutate {     append: \"\"   } } actions {   get_and_mutate { set_copy: \"                                \"   } }\n"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/clusterfuzz-testcase-minimized-header_map_impl_fuzz_test-5689833624698880",
    "content": "actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions {   set_reference {   } } actions { } actions {   get_and_mutate {     append: \"\"   } } actions { } actions {   get_and_mutate {     set_integer: 0   } } actions { } actions { } actions { } actions { } actions { } actions { } actions { } actions { } \n"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/clusterfuzz-testcase-minimized-header_map_impl_fuzz_test-6363647045533696",
    "content": "actions {\n  mutate_and_move {\n    key: \"#\"\n    append: \"~\"\n  }\n}\n"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/crash-5fb09ca426eb21db14151b94fd74d418b49042e4",
    "content": "actions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"m\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  add_copy {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    uint64_value: 37\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  lookup: \"foo\"\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"baz\"\n    value: \"bar\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  lookup: \"foo\"\n}\nactions {\n  mutate_and_move {\n    key: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    uint64_value: 42\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_uint64_key\"\n    uint64_value: 42\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"m\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_string_key\"\n    string_value: \"barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"baz\"\n    value: \"bar\"\n  }\n}\nactions {\n  add_reference {\n    value: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  remove_prefix: \"foo\"\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    clear {\n    }\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_copy: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  lookup: \"foo\"\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  copy {\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  lookup: \"foo\"\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  remove: \"f\"\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    append: \"\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"aa\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    uint64_value: 37\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  lookup: \"_\"\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  lookup: \":method\"\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_reference {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    clear {\n    }\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  add_reference {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  lookup: \":method\"\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_uint64_key\"\n    uint64_value: 42\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_integer: 0\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  set_reference {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n}\n"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/empty",
    "content": ""
  },
  {
    "path": "test/common/http/header_map_impl_corpus/example",
    "content": "actions {\n  add_reference {\n    key: \"foo\"\n    value: \"bar\"\n  }\n}\nactions {\n  add_reference {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_string_key\"\n    string_value: \"barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_uint64_key\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_uint64_key\"\n    uint64_value: 37\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_uint64_key\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_uint64_key\"\n    uint64_value: 37\n  }\n}\nactions {\n  set_reference {\n    key: \"foo\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\n\nactions {\n  add_reference {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  add_reference {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    string_value: \"bar\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    uint64_value: 37\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    string_value: \"bar\"\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    uint64_value: 37\n  }\n}\nactions {\n  set_reference {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"aa\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    clear: {}\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    find: \"a\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_copy: \"a\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_integer: 0\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_reference: \"a\"\n  }\n}\nactions {\n  copy: {}\n}\nactions {\n  lookup: \":method\"\n}\nactions {\n  lookup: \"foo\"\n}\nactions {\n  remove: \"f\"\n}\nactions {\n  remove_prefix: \"foo\"\n}\nactions {\n  remove: \":m\"\n}\nactions {\n  remove_prefix: \":m\"\n}\n"
  },
  {
    "path": "test/common/http/header_map_impl_corpus/example_lazymap",
    "content": "actions {\n  add_reference {\n    key: \"foo\"\n    value: \"bar\"\n  }\n}\nactions {\n  add_reference {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_string_key\"\n    string_value: \"barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_uint64_key\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_reference_key {\n    key: \"foo_uint64_key\"\n    uint64_value: 37\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_string_key\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_uint64_key\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_copy {\n    key: \"foo_uint64_key\"\n    uint64_value: 37\n  }\n}\nactions {\n  set_reference {\n    key: \"foo\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \"foo\"\n    value: \"baz\"\n  }\n}\n\nactions {\n  add_reference {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  add_reference {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    string_value: \"bar\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_reference_key {\n    key: \":method\"\n    uint64_value: 37\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    string_value: \"bar\"\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    string_value: \"baz\"\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    uint64_value: 42\n  }\n}\nactions {\n  add_copy {\n    key: \":method\"\n    uint64_value: 37\n  }\n}\nactions {\n  set_reference {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"bar\"\n  }\n}\nactions {\n  set_reference_key {\n    key: \":method\"\n    value: \"baz\"\n  }\n}\n\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    append: \"aa\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    clear: {}\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    find: \"a\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_copy: \"a\"\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_integer: 0\n  }\n}\nactions {\n  get_and_mutate {\n    key: \":method\"\n    set_reference: \"a\"\n  }\n}\nactions {\n  copy: {}\n}\nactions {\n  lookup: \":method\"\n}\nactions {\n  lookup: \"foo\"\n}\nactions {\n  remove: \"f\"\n}\nactions {\n  remove_prefix: \"foo\"\n}\nactions {\n  remove: \":m\"\n}\nactions {\n  remove_prefix: \":m\"\n}\nconfig {\n  lazy_map_min_size: 0\n}\n"
  },
  {
    "path": "test/common/http/header_map_impl_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.http;\n\nimport \"google/protobuf/empty.proto\";\n\nimport \"validate/validate.proto\";\n\n// Structured input for header_map_impl_fuzz_test.\n\nmessage AddReference {\n  string key = 1;\n  string value = 2;\n}\n\nmessage AddReferenceKey {\n  string key = 1;\n  oneof value_selector {\n    string string_value = 2;\n    uint64 uint64_value = 3;\n  }\n}\n\nmessage AddCopy {\n  string key = 1;\n  oneof value_selector {\n    string string_value = 2;\n    uint64 uint64_value = 3;\n  }\n}\n\nmessage SetReference {\n  string key = 1;\n  string value = 2;\n}\n\nmessage SetReferenceKey {\n  string key = 1;\n  string value = 2;\n}\n\nmessage GetAndMutate {\n  string key = 1;\n  oneof mutate_selector {\n    string append = 2;\n    google.protobuf.Empty clear = 3;\n    string find = 4;\n    string set_copy = 5;\n    uint64 set_integer = 6;\n    string set_reference = 7;\n  }\n}\n\nmessage MutateAndMove {\n  string key = 1;\n  oneof mutate_selector {\n    string append = 2;\n    string set_copy = 3;\n    uint64 set_integer = 4;\n    string set_reference = 5;\n  }\n}\n\nmessage Append {\n  string key = 1;\n  string value = 2;\n}\n\nmessage Get {\n  string key = 1;\n}\n\nmessage Action {\n  oneof action_selector {\n    option (validate.required) = true;\n    AddReference add_reference = 1;\n    AddReferenceKey add_reference_key = 2;\n    AddCopy add_copy = 3;\n    SetReference set_reference = 4;\n    SetReferenceKey set_reference_key = 5;\n    GetAndMutate get_and_mutate = 6 [deprecated = true];\n    Get get = 13;\n    MutateAndMove mutate_and_move = 12;\n    Append append = 11;\n    google.protobuf.Empty copy = 7;\n    string remove = 9;\n    string remove_prefix = 10;\n  }\n}\n\nmessage Config {\n  uint32 lazy_map_min_size = 1;\n}\n\nmessage HeaderMapImplFuzzTestCase {\n  repeated Action actions = 1;\n  // Optional threshold value configuration for the lazy header-map.\n  Config config = 2;\n}\n"
  },
  {
    "path": "test/common/http/header_map_impl_fuzz_test.cc",
    "content": "#include <functional>\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"test/common/http/header_map_impl_fuzz.pb.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"absl/strings/ascii.h\"\n\nusing Envoy::Fuzz::replaceInvalidCharacters;\n\nnamespace Envoy {\n\n// Fuzz the header map implementation.\nDEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) {\n  TestScopedRuntime runtime;\n  // Set the lazy header-map threshold if found.\n  if (input.has_config()) {\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.http.headermap.lazy_map_min_size\",\n          absl::StrCat(input.config().lazy_map_min_size())}});\n  }\n\n  auto header_map = Http::RequestHeaderMapImpl::create();\n  std::vector<std::unique_ptr<Http::LowerCaseString>> lower_case_strings;\n  std::vector<std::unique_ptr<std::string>> strings;\n  uint64_t set_integer;\n  constexpr auto max_actions = 128;\n  for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) {\n    const auto& action = input.actions(i);\n    ENVOY_LOG_MISC(debug, \"Action {}\", action.DebugString());\n    switch (action.action_selector_case()) {\n    case test::common::http::Action::kAddReference: {\n      const auto& add_reference = action.add_reference();\n      lower_case_strings.emplace_back(\n          std::make_unique<Http::LowerCaseString>(replaceInvalidCharacters(add_reference.key())));\n      strings.emplace_back(\n          std::make_unique<std::string>(replaceInvalidCharacters(add_reference.value())));\n      header_map->addReference(*lower_case_strings.back(), *strings.back());\n      break;\n    }\n    case test::common::http::Action::kAddReferenceKey: {\n      const auto& add_reference_key = action.add_reference_key();\n      lower_case_strings.emplace_back(std::make_unique<Http::LowerCaseString>(\n          replaceInvalidCharacters(add_reference_key.key())));\n      switch (add_reference_key.value_selector_case()) {\n      case test::common::http::AddReferenceKey::kStringValue:\n        header_map->addReferenceKey(*lower_case_strings.back(),\n                                    replaceInvalidCharacters(add_reference_key.string_value()));\n        break;\n      case test::common::http::AddReferenceKey::kUint64Value:\n        header_map->addReferenceKey(*lower_case_strings.back(), add_reference_key.uint64_value());\n        break;\n      default:\n        break;\n      }\n      break;\n    }\n    case test::common::http::Action::kAddCopy: {\n      const auto& add_copy = action.add_copy();\n      const Http::LowerCaseString key{replaceInvalidCharacters(add_copy.key())};\n      switch (add_copy.value_selector_case()) {\n      case test::common::http::AddCopy::kStringValue:\n        header_map->addCopy(key, replaceInvalidCharacters(add_copy.string_value()));\n        break;\n      case test::common::http::AddCopy::kUint64Value:\n        header_map->addCopy(key, add_copy.uint64_value());\n        break;\n      default:\n        break;\n      }\n      break;\n    }\n    case test::common::http::Action::kSetReference: {\n      const auto& set_reference = action.set_reference();\n      lower_case_strings.emplace_back(\n          std::make_unique<Http::LowerCaseString>(replaceInvalidCharacters(set_reference.key())));\n      strings.emplace_back(\n          std::make_unique<std::string>(replaceInvalidCharacters(set_reference.value())));\n      header_map->setReference(*lower_case_strings.back(), *strings.back());\n      break;\n    }\n    case test::common::http::Action::kSetReferenceKey: {\n      const auto& set_reference_key = action.set_reference_key();\n      lower_case_strings.emplace_back(std::make_unique<Http::LowerCaseString>(\n          replaceInvalidCharacters(set_reference_key.key())));\n      header_map->setReferenceKey(*lower_case_strings.back(),\n                                  replaceInvalidCharacters(set_reference_key.value()));\n      break;\n    }\n    case test::common::http::Action::kGet: {\n      const auto& get = action.get();\n      const auto* header_entry =\n          header_map->get(Http::LowerCaseString(replaceInvalidCharacters(get.key())));\n      if (header_entry != nullptr) {\n        // Do some read-only stuff.\n        (void)strlen(std::string(header_entry->key().getStringView()).c_str());\n        (void)strlen(std::string(header_entry->value().getStringView()).c_str());\n        header_entry->key().empty();\n        header_entry->value().empty();\n      }\n      break;\n    }\n    case test::common::http::Action::kMutateAndMove: {\n      const auto& mutate_and_move = action.mutate_and_move();\n      lower_case_strings.emplace_back(\n          std::make_unique<Http::LowerCaseString>(replaceInvalidCharacters(mutate_and_move.key())));\n      // Randomly (using fuzzer data) set the header_field to either be of type Reference or Inline\n      const auto& str = lower_case_strings.back();\n      Http::HeaderString header_field; // By default it's Inline\n      if ((!str->get().empty()) && (str->get().at(0) & 0x1)) {\n        // Keeping header_field as Inline\n        header_field.setCopy(str->get());\n        // inlineTransform can only be applied to Inline type!\n        header_field.inlineTransform(absl::ascii_tolower);\n      } else {\n        // Changing header_field to Reference\n        header_field.setReference(str->get());\n      }\n      Http::HeaderString header_value;\n      // Do some mutation or parameterized action.\n      switch (mutate_and_move.mutate_selector_case()) {\n      case test::common::http::MutateAndMove::kAppend:\n        header_value.append(replaceInvalidCharacters(mutate_and_move.append()).c_str(),\n                            mutate_and_move.append().size());\n        break;\n      case test::common::http::MutateAndMove::kSetCopy:\n        header_value.setCopy(replaceInvalidCharacters(mutate_and_move.set_copy()));\n        break;\n      case test::common::http::MutateAndMove::kSetInteger:\n        set_integer = mutate_and_move.set_integer();\n        header_value.setInteger(set_integer);\n        break;\n      case test::common::http::MutateAndMove::kSetReference:\n        strings.emplace_back(std::make_unique<std::string>(\n            replaceInvalidCharacters(mutate_and_move.set_reference())));\n        header_value.setReference(*strings.back());\n        break;\n      default:\n        break;\n      }\n      // Can't addViaMove on an empty header value.\n      if (!header_value.empty()) {\n        header_map->addViaMove(std::move(header_field), std::move(header_value));\n      }\n      break;\n    }\n    case test::common::http::Action::kAppend: {\n      const auto& append = action.append();\n      lower_case_strings.emplace_back(\n          std::make_unique<Http::LowerCaseString>(replaceInvalidCharacters(append.key())));\n      strings.emplace_back(std::make_unique<std::string>(replaceInvalidCharacters(append.value())));\n      header_map->appendCopy(*lower_case_strings.back(), *strings.back());\n      break;\n    }\n    case test::common::http::Action::kCopy: {\n      header_map = Http::createHeaderMap<Http::RequestHeaderMapImpl>(*header_map);\n      break;\n    }\n    case test::common::http::Action::kRemove: {\n      header_map->remove(Http::LowerCaseString(replaceInvalidCharacters(action.remove())));\n      break;\n    }\n    case test::common::http::Action::kRemovePrefix: {\n      header_map->removePrefix(\n          Http::LowerCaseString(replaceInvalidCharacters(action.remove_prefix())));\n      break;\n    }\n    case test::common::http::Action::kGetAndMutate: {\n      // Deprecated. Can not get and mutate entries.\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n    // Exercise some read-only accessors.\n    header_map->size();\n    header_map->byteSize();\n    header_map->iterate([](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n      header.key();\n      header.value();\n      return Http::HeaderMap::Iterate::Continue;\n    });\n    header_map->iterateReverse([](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n      header.key();\n      header.value();\n      return Http::HeaderMap::Iterate::Continue;\n    });\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/header_map_impl_speed_test.cc",
    "content": "#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\nnamespace Http {\n\n/**\n * Add several dummy headers to a HeaderMap.\n * @param num_headers the number of dummy headers to add.\n */\nstatic void addDummyHeaders(HeaderMap& headers, size_t num_headers,\n                            const std::string prefix = \"dummy-key-\") {\n  for (size_t i = 0; i < num_headers; i++) {\n    headers.addCopy(LowerCaseString(prefix + std::to_string(i)), \"abcd\");\n  }\n}\n\n/** Measure the construction/destruction speed of RequestHeaderMapImpl.*/\nstatic void headerMapImplCreate(benchmark::State& state) {\n  // Make sure first time construction is not counted.\n  Http::ResponseHeaderMapImpl::create();\n  for (auto _ : state) { // NOLINT\n    auto headers = Http::ResponseHeaderMapImpl::create();\n    benchmark::DoNotOptimize(headers->size());\n  }\n}\nBENCHMARK(headerMapImplCreate);\n\n/**\n * Measure the speed of setting/overwriting a header value. The numeric Arg passed\n * by the BENCHMARK(...) macro call below indicates how many dummy headers this test\n * will add to the HeaderMapImpl before testing the setReference() method. That helps\n * identify whether the speed of setReference() is dependent on the number of other\n * headers in the HeaderMapImpl.\n */\nstatic void headerMapImplSetReference(benchmark::State& state) {\n  const LowerCaseString key(\"example-key\");\n  const std::string value(\"01234567890123456789\");\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  for (auto _ : state) { // NOLINT\n    headers->setReference(key, value);\n  }\n  benchmark::DoNotOptimize(headers->size());\n}\nBENCHMARK(headerMapImplSetReference)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of retrieving a header value. The numeric Arg passed by the\n * BENCHMARK(...) macro call below indicates how many dummy headers this test\n * will add to the HeaderMapImpl during test setup. The relative performance of\n * this test for different Arg values will help reveal how the speed of the get()\n * method depends (or doesn't depend) on the number of other headers in the\n * HeaderMapImpl.\n */\nstatic void headerMapImplGet(benchmark::State& state) {\n  const LowerCaseString key(\"example-key\");\n  const std::string value(\"01234567890123456789\");\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  headers->setReference(key, value);\n  size_t successes = 0;\n  for (auto _ : state) { // NOLINT\n    successes += (headers->get(key) != nullptr);\n  }\n  benchmark::DoNotOptimize(successes);\n}\nBENCHMARK(headerMapImplGet)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the retrieval speed of a header for which HeaderMapImpl is expected to\n * provide special optimizations.\n */\nstatic void headerMapImplGetInline(benchmark::State& state) {\n  const std::string value(\"01234567890123456789\");\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  headers->setReferenceConnection(value);\n  size_t size = 0;\n  for (auto _ : state) { // NOLINT\n    size += headers->Connection()->value().size();\n  }\n  benchmark::DoNotOptimize(size);\n}\nBENCHMARK(headerMapImplGetInline)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of writing to a header for which HeaderMapImpl is expected to\n * provide special optimizations.\n */\nstatic void headerMapImplSetInlineMacro(benchmark::State& state) {\n  const std::string value(\"01234567890123456789\");\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  for (auto _ : state) { // NOLINT\n    headers->setReferenceConnection(value);\n  }\n  benchmark::DoNotOptimize(headers->size());\n}\nBENCHMARK(headerMapImplSetInlineMacro)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of writing to a header for which HeaderMapImpl is expected to\n * provide special optimizations.\n */\nstatic void headerMapImplSetInlineInteger(benchmark::State& state) {\n  uint64_t value = 12345;\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  for (auto _ : state) { // NOLINT\n    headers->setConnection(value);\n  }\n  benchmark::DoNotOptimize(headers->size());\n}\nBENCHMARK(headerMapImplSetInlineInteger)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/** Measure the speed of the byteSize() estimation method. */\nstatic void headerMapImplGetByteSize(benchmark::State& state) {\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  uint64_t size = 0;\n  for (auto _ : state) { // NOLINT\n    size += headers->byteSize();\n  }\n  benchmark::DoNotOptimize(size);\n}\nBENCHMARK(headerMapImplGetByteSize)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/** Measure the speed of iteration with a lightweight callback. */\nstatic void headerMapImplIterate(benchmark::State& state) {\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  size_t num_callbacks = 0;\n  addDummyHeaders(*headers, state.range(0));\n  auto counting_callback = [&num_callbacks](const HeaderEntry&) -> HeaderMap::Iterate {\n    num_callbacks++;\n    return HeaderMap::Iterate::Continue;\n  };\n  for (auto _ : state) { // NOLINT\n    headers->iterate(counting_callback);\n  }\n  benchmark::DoNotOptimize(num_callbacks);\n}\nBENCHMARK(headerMapImplIterate)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of removing a header by key name.\n * @note The measured time for each iteration includes the time needed to add\n *       one copy of the header.\n */\nstatic void headerMapImplRemove(benchmark::State& state) {\n  const LowerCaseString key(\"example-key\");\n  const std::string value(\"01234567890123456789\");\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  for (auto _ : state) { // NOLINT\n    headers->addReference(key, value);\n    headers->remove(key);\n  }\n  benchmark::DoNotOptimize(headers->size());\n}\nBENCHMARK(headerMapImplRemove)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of removing a header by key name, for the special case of\n * a key for which HeaderMapImpl is expected to provide special optimization.\n * @note The measured time for each iteration includes the time needed to add\n *       one copy of the header.\n */\nstatic void headerMapImplRemoveInline(benchmark::State& state) {\n  const LowerCaseString key(\"connection\");\n  const std::string value(\"01234567890123456789\");\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  for (auto _ : state) { // NOLINT\n    headers->addReference(key, value);\n    headers->remove(key);\n  }\n  benchmark::DoNotOptimize(headers->size());\n}\nBENCHMARK(headerMapImplRemoveInline)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of creating a HeaderMapImpl and populating it with a realistic\n * set of response headers.\n */\nstatic void headerMapImplPopulate(benchmark::State& state) {\n  const std::pair<LowerCaseString, std::string> headers_to_add[] = {\n      {LowerCaseString(\"cache-control\"), \"max-age=0, private, must-revalidate\"},\n      {LowerCaseString(\"content-encoding\"), \"gzip\"},\n      {LowerCaseString(\"content-type\"), \"text/html; charset=utf-8\"},\n      {LowerCaseString(\"date\"), \"Wed, 23 Jan 2019 04:00:00 GMT\"},\n      {LowerCaseString(\"server\"), \"envoy\"},\n      {LowerCaseString(\"x-custom-header-1\"), \"example 1\"},\n      {LowerCaseString(\"x-custom-header-2\"), \"example 2\"},\n      {LowerCaseString(\"x-custom-header-3\"), \"example 3\"},\n      {LowerCaseString(\"set-cookie\"), \"_cookie1=12345678; path = /; secure\"},\n      {LowerCaseString(\"set-cookie\"), \"_cookie2=12345678; path = /; secure\"},\n  };\n  for (auto _ : state) { // NOLINT\n    auto headers = Http::ResponseHeaderMapImpl::create();\n    for (const auto& key_value : headers_to_add) {\n      headers->addReference(key_value.first, key_value.second);\n    }\n    benchmark::DoNotOptimize(headers->size());\n  }\n}\nBENCHMARK(headerMapImplPopulate);\n\n/**\n * Measure the speed of encoding headers as part of upgraded requests (HTTP/1 to HTTP/2)\n * @note The measured time for each iteration includes the time needed to add\n *       a varying number of headers (set by the benchmark's argument).\n */\nstatic void headerMapImplEmulateH1toH2Upgrade(benchmark::State& state) {\n  uint32_t total_len = 0; // Accumulates the length of all header keys and values.\n  auto headers = Http::RequestHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  headers->setConnection(Http::Headers::get().ConnectionValues.Upgrade);\n  headers->setUpgrade(Http::Headers::get().UpgradeValues.H2c);\n\n  for (auto _ : state) { // NOLINT\n    // Emulate the encodeHeaders method upgrade part.\n    Http::RequestHeaderMapPtr modified_headers = createHeaderMap<RequestHeaderMapImpl>(*headers);\n    benchmark::DoNotOptimize(headers->getUpgradeValue());\n    // Emulate the Http::Utility::transformUpgradeRequestFromH1toH2 function.\n    modified_headers->setReferenceMethod(Http::Headers::get().MethodValues.Connect);\n    modified_headers->setProtocol(headers->getUpgradeValue());\n    modified_headers->removeUpgrade();\n    modified_headers->removeConnection();\n    if (modified_headers->getContentLengthValue() == \"0\") {\n      modified_headers->removeContentLength();\n    }\n    // Emulate the headers iteration in the buildHeaders method.\n    modified_headers->iterate([&total_len](const HeaderEntry& header) -> HeaderMap::Iterate {\n      const absl::string_view header_key = header.key().getStringView();\n      const absl::string_view header_value = header.value().getStringView();\n      total_len += header_key.length() + header_value.length();\n      return HeaderMap::Iterate::Continue;\n    });\n    // modified_headers destruction time also being measured.\n  }\n  benchmark::DoNotOptimize(headers->size());\n  benchmark::DoNotOptimize(total_len);\n}\nBENCHMARK(headerMapImplEmulateH1toH2Upgrade)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of decoding headers as part of upgraded responses (HTTP/2 to HTTP/1)\n * @note The measured time for each iteration includes the time needed to add\n *       a varying number of headers (set by the benchmark's argument).\n */\nstatic void headerMapImplEmulateH2toH1Upgrade(benchmark::State& state) {\n  uint32_t total_len = 0; // Accumulates the length of all header keys and values.\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, state.range(0));\n  headers->setStatus(200);\n\n  for (auto _ : state) { // NOLINT\n    // Emulate the Http::Utility::transformUpgradeResponseFromH2toH1 function.\n    benchmark::DoNotOptimize(headers->getStatusValue());\n    headers->setUpgrade(Http::Headers::get().UpgradeValues.H2c);\n    headers->setReferenceConnection(Http::Headers::get().ConnectionValues.Upgrade);\n    headers->setStatus(101);\n    // Emulate a decodeHeaders function that iterates over the headers.\n    headers->iterate([&total_len](const HeaderEntry& header) -> HeaderMap::Iterate {\n      const absl::string_view header_key = header.key().getStringView();\n      const absl::string_view header_value = header.value().getStringView();\n      total_len += header_key.length() + header_value.length();\n      return HeaderMap::Iterate::Continue;\n    });\n  }\n  benchmark::DoNotOptimize(headers->size());\n  benchmark::DoNotOptimize(total_len);\n}\nBENCHMARK(headerMapImplEmulateH2toH1Upgrade)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n/**\n * Measure the speed of removing a varying number of headers by key name prefix from\n * a header-map that contains 80 headers that do not have that prefix.\n */\nstatic void headerMapImplRemovePrefix(benchmark::State& state) {\n  const LowerCaseString prefix(\"X-prefix\");\n  auto headers = Http::ResponseHeaderMapImpl::create();\n  addDummyHeaders(*headers, 80);\n  for (auto _ : state) { // NOLINT\n    // Add the headers with the prefix\n    state.PauseTiming();\n    addDummyHeaders(*headers, state.range(0), prefix.get());\n    state.ResumeTiming();\n    headers->removePrefix(prefix);\n  }\n  benchmark::DoNotOptimize(headers->size());\n}\nBENCHMARK(headerMapImplRemovePrefix)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50);\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/header_map_impl_test.cc",
    "content": "#include <algorithm>\n#include <memory>\n#include <string>\n\n#include \"common/http/header_list_view.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing ::testing::ElementsAre;\nusing ::testing::InSequence;\n\nnamespace Envoy {\nnamespace Http {\n\nTEST(HeaderStringTest, All) {\n  // Static LowerCaseString constructor\n  {\n    LowerCaseString static_string(\"hello\");\n    HeaderString string(static_string);\n    EXPECT_EQ(\"hello\", string.getStringView());\n    EXPECT_EQ(static_string.get(), string.getStringView());\n    EXPECT_EQ(5U, string.size());\n  }\n\n  // Static LowerCaseString operators\n  {\n    LowerCaseString banana(\"banana\");\n    LowerCaseString lemon(\"lemon\");\n    EXPECT_TRUE(banana < lemon);\n    EXPECT_TRUE(banana != lemon);\n    EXPECT_TRUE(banana == banana);\n  }\n\n  // Static std::string constructor\n  {\n    std::string static_string(\"HELLO\");\n    HeaderString string(static_string);\n    EXPECT_EQ(\"HELLO\", string.getStringView());\n    EXPECT_EQ(static_string, string.getStringView());\n    EXPECT_EQ(5U, string.size());\n  }\n\n  // Static move constructor\n  {\n    std::string static_string(\"HELLO\");\n    HeaderString string1(static_string);\n    HeaderString string2(std::move(string1));\n    EXPECT_EQ(\"HELLO\", string2.getStringView());\n    EXPECT_EQ(static_string, string1.getStringView()); // NOLINT(bugprone-use-after-move)\n    EXPECT_EQ(static_string, string2.getStringView());\n    EXPECT_EQ(5U, string1.size());\n    EXPECT_EQ(5U, string2.size());\n  }\n\n  // Inline move constructor\n  {\n    HeaderString string;\n    string.setCopy(\"hello\");\n    EXPECT_FALSE(string.isReference());\n    HeaderString string2(std::move(string));\n    EXPECT_TRUE(string.empty()); // NOLINT(bugprone-use-after-move)\n    EXPECT_FALSE(string.isReference());\n    EXPECT_FALSE(string2.isReference());\n    string.append(\"world\", 5);\n    EXPECT_EQ(\"world\", string.getStringView());\n    EXPECT_EQ(5UL, string.size());\n    EXPECT_EQ(\"hello\", string2.getStringView());\n    EXPECT_EQ(5UL, string2.size());\n  }\n\n  // Inline move large constructor\n  {\n    std::string large(4096, 'a');\n    HeaderString string;\n    string.setCopy(large);\n    EXPECT_FALSE(string.isReference());\n    HeaderString string2(std::move(string));\n    EXPECT_TRUE(string.empty()); // NOLINT(bugprone-use-after-move)\n    EXPECT_FALSE(string.isReference());\n    EXPECT_FALSE(string2.isReference());\n    string.append(\"b\", 1);\n    EXPECT_EQ(\"b\", string.getStringView());\n    EXPECT_EQ(1UL, string.size());\n    EXPECT_EQ(large, string2.getStringView());\n    EXPECT_EQ(4096UL, string2.size());\n  }\n\n  // Static to inline number.\n  {\n    std::string static_string(\"HELLO\");\n    HeaderString string(static_string);\n    string.setInteger(5);\n    EXPECT_FALSE(string.isReference());\n    EXPECT_EQ(\"5\", string.getStringView());\n  }\n\n  // Static to inline string.\n  {\n    std::string static_string(\"HELLO\");\n    HeaderString string(static_string);\n    string.setCopy(static_string);\n    EXPECT_FALSE(string.isReference());\n    EXPECT_EQ(\"HELLO\", string.getStringView());\n  }\n\n  // Inline rtrim removes trailing whitespace only.\n  {\n    const std::string data_with_leading_lws = \" \\t\\f\\v  data\";\n    const std::string data_with_leading_and_trailing_lws = data_with_leading_lws + \" \\t\\f\\v\";\n    HeaderString string;\n    string.append(data_with_leading_and_trailing_lws.data(),\n                  data_with_leading_and_trailing_lws.size());\n    EXPECT_EQ(data_with_leading_and_trailing_lws, string.getStringView());\n    string.rtrim();\n    EXPECT_NE(data_with_leading_and_trailing_lws, string.getStringView());\n    EXPECT_EQ(data_with_leading_lws, string.getStringView());\n  }\n\n  // Static clear() does nothing.\n  {\n    std::string static_string(\"HELLO\");\n    HeaderString string(static_string);\n    EXPECT_TRUE(string.isReference());\n    string.clear();\n    EXPECT_TRUE(string.isReference());\n    EXPECT_EQ(\"HELLO\", string.getStringView());\n  }\n\n  // Static to append.\n  {\n    std::string static_string(\"HELLO\");\n    HeaderString string(static_string);\n    EXPECT_TRUE(string.isReference());\n    string.append(\"a\", 1);\n    EXPECT_EQ(\"HELLOa\", string.getStringView());\n  }\n\n  // Copy inline\n  {\n    HeaderString string;\n    string.setCopy(\"hello\");\n    EXPECT_EQ(\"hello\", string.getStringView());\n    EXPECT_EQ(5U, string.size());\n  }\n\n  // Copy dynamic\n  {\n    HeaderString string;\n    std::string large_value(4096, 'a');\n    string.setCopy(large_value);\n    EXPECT_EQ(large_value, string.getStringView());\n    EXPECT_NE(large_value.c_str(), string.getStringView().data());\n    EXPECT_EQ(4096U, string.size());\n  }\n\n  // Copy twice dynamic\n  {\n    HeaderString string;\n    std::string large_value1(4096, 'a');\n    string.setCopy(large_value1);\n    std::string large_value2(2048, 'b');\n    string.setCopy(large_value2);\n    EXPECT_EQ(large_value2, string.getStringView());\n    EXPECT_NE(large_value2.c_str(), string.getStringView().data());\n    EXPECT_EQ(2048U, string.size());\n  }\n\n  // Copy twice dynamic with reallocate\n  {\n    HeaderString string;\n    std::string large_value1(4096, 'a');\n    string.setCopy(large_value1);\n    std::string large_value2(16384, 'b');\n    string.setCopy(large_value2);\n    EXPECT_EQ(large_value2, string.getStringView());\n    EXPECT_NE(large_value2.c_str(), string.getStringView().data());\n    EXPECT_EQ(16384U, string.size());\n  }\n\n  // Copy twice inline to dynamic\n  {\n    HeaderString string;\n    std::string large_value1(16, 'a');\n    string.setCopy(large_value1);\n    std::string large_value2(16384, 'b');\n    string.setCopy(large_value2);\n    EXPECT_EQ(large_value2, string.getStringView());\n    EXPECT_NE(large_value2.c_str(), string.getStringView().data());\n    EXPECT_EQ(16384U, string.size());\n  }\n\n  // Copy, exactly filling inline capacity\n  //\n  // ASAN does not catch the clobber in the case where the code writes one past the\n  // end of the inline buffer. To ensure coverage the next block checks that setCopy\n  // is not introducing a NUL in a way that does not rely on an actual clobber getting\n  // detected.\n  {\n    HeaderString string;\n    std::string large(128, 'z');\n    string.setCopy(large);\n    EXPECT_FALSE(string.isReference());\n    EXPECT_EQ(string.getStringView(), large);\n  }\n\n  // Copy, exactly filling dynamic capacity\n  //\n  // ASAN should catch a write one past the end of the inline buffer. This test\n  // forces a dynamic buffer with one copy and then fills it with the next.\n  {\n    HeaderString string;\n    // Force dynamic vector allocation with setCopy of inline buffer size + 1.\n    std::string large1(129, 'z');\n    string.setCopy(large1);\n    EXPECT_FALSE(string.isReference());\n    // Dynamic capacity in setCopy is 2x required by the size.\n    // So to fill it exactly setCopy with a total of 256 chars.\n    std::string large2(256, 'z');\n    string.setCopy(large2);\n    EXPECT_FALSE(string.isReference());\n    EXPECT_EQ(string.getStringView(), large2);\n  }\n\n  // Append, small buffer to inline\n  {\n    HeaderString string;\n    std::string test(128, 'a');\n    string.append(test.c_str(), test.size());\n    EXPECT_FALSE(string.isReference());\n    string.append(\"a\", 1);\n    EXPECT_FALSE(string.isReference());\n    test += 'a';\n    EXPECT_EQ(test, string.getStringView());\n  }\n\n  // Append into inline twice, then shift to dynamic.\n  {\n    HeaderString string;\n    string.append(\"hello\", 5);\n    EXPECT_EQ(\"hello\", string.getStringView());\n    EXPECT_EQ(5U, string.size());\n    string.append(\"world\", 5);\n    EXPECT_EQ(\"helloworld\", string.getStringView());\n    EXPECT_EQ(10U, string.size());\n    std::string large(4096, 'a');\n    string.append(large.c_str(), large.size());\n    large = \"helloworld\" + large;\n    EXPECT_EQ(large, string.getStringView());\n    EXPECT_EQ(4106U, string.size());\n  }\n\n  // Append, realloc close to limit with small buffer.\n  {\n    HeaderString string;\n    std::string large(129, 'a');\n    string.append(large.c_str(), large.size());\n    EXPECT_FALSE(string.isReference());\n    std::string large2(120, 'b');\n    string.append(large2.c_str(), large2.size());\n    std::string large3(32, 'c');\n    string.append(large3.c_str(), large3.size());\n    EXPECT_EQ((large + large2 + large3), string.getStringView());\n    EXPECT_EQ(281U, string.size());\n  }\n\n  // Append, exactly filling dynamic capacity\n  //\n  // ASAN should catch a write one past the end of the dynamic buffer. This test\n  // forces a dynamic buffer with one copy and then fills it with the next.\n  {\n    HeaderString string;\n    // Force dynamic allocation with setCopy of inline buffer size + 1.\n    std::string large1(129, 'z');\n    string.setCopy(large1);\n    EXPECT_FALSE(string.isReference());\n    // Dynamic capacity in setCopy is 2x required by the size.\n    // So to fill it exactly append 127 chars for a total of 256 chars.\n    std::string large2(127, 'z');\n    string.append(large2.c_str(), large2.size());\n    EXPECT_FALSE(string.isReference());\n    EXPECT_EQ(string.getStringView(), large1 + large2);\n  }\n\n  // Set integer, inline\n  {\n    HeaderString string;\n    string.setInteger(123456789);\n    EXPECT_EQ(\"123456789\", string.getStringView());\n    EXPECT_EQ(9U, string.size());\n  }\n\n  // Set integer, dynamic\n  {\n    HeaderString string;\n    std::string large(129, 'a');\n    string.append(large.c_str(), large.size());\n    string.setInteger(123456789);\n    EXPECT_EQ(\"123456789\", string.getStringView());\n    EXPECT_EQ(9U, string.size());\n    EXPECT_FALSE(string.isReference());\n  }\n\n  // Set static, switch to inline, back to static.\n  {\n    const std::string static_string = \"hello world\";\n    HeaderString string;\n    string.setReference(static_string);\n    EXPECT_EQ(string.getStringView(), static_string);\n    EXPECT_EQ(11U, string.size());\n    EXPECT_TRUE(string.isReference());\n\n    const std::string large(129, 'a');\n    string.setCopy(large);\n    EXPECT_NE(string.getStringView().data(), large.c_str());\n    EXPECT_FALSE(string.isReference());\n\n    string.setReference(static_string);\n    EXPECT_EQ(string.getStringView(), static_string);\n    EXPECT_EQ(11U, string.size());\n    EXPECT_TRUE(string.isReference());\n  }\n\n  // getString\n  {\n    std::string static_string(\"HELLO\");\n    HeaderString headerString1(static_string);\n    absl::string_view retString1 = headerString1.getStringView();\n    EXPECT_EQ(\"HELLO\", retString1);\n    EXPECT_EQ(5U, retString1.size());\n\n    HeaderString headerString2;\n    absl::string_view retString2 = headerString2.getStringView();\n    EXPECT_EQ(0U, retString2.size());\n  }\n\n  // inlineTransform\n  {\n    const std::string static_string = \"HELLO\";\n    HeaderString string;\n    string.setCopy(static_string);\n    string.inlineTransform([](char c) { return static_cast<uint8_t>(tolower(c)); });\n    EXPECT_FALSE(string.isReference());\n    EXPECT_EQ(5U, string.size());\n    EXPECT_EQ(string.getStringView(), \"hello\");\n    string.inlineTransform(toupper);\n    EXPECT_EQ(string.getStringView(), static_string);\n    EXPECT_EQ(5U, string.size());\n    EXPECT_FALSE(string.isReference());\n  }\n}\n\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    custom_header_1(Http::LowerCaseString{\"foo_custom_header\"});\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    custom_header_1_copy(Http::LowerCaseString{\"foo_custom_header\"});\n\nclass HeaderMapImplTest : public testing::TestWithParam<uint32_t> {\npublic:\n  HeaderMapImplTest() {\n    // Set the lazy map threshold using the test parameter.\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.http.headermap.lazy_map_min_size\", absl::StrCat(GetParam())}});\n  }\n\n  static std::string testParamsToString(const ::testing::TestParamInfo<uint32_t>& params) {\n    return absl::StrCat(params.param);\n  }\n\n  TestScopedRuntime runtime;\n};\n\nINSTANTIATE_TEST_SUITE_P(HeaderMapThreshold, HeaderMapImplTest,\n                         testing::Values(0, 1, std::numeric_limits<uint32_t>::max()),\n                         HeaderMapImplTest::testParamsToString);\n\n// Make sure that the same header registered twice points to the same location.\nTEST_P(HeaderMapImplTest, CustomRegisteredHeaders) {\n  TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(custom_header_1.handle(), custom_header_1_copy.handle());\n  EXPECT_EQ(nullptr, headers.getInline(custom_header_1.handle()));\n  EXPECT_EQ(nullptr, headers.getInline(custom_header_1_copy.handle()));\n  headers.setInline(custom_header_1.handle(), 42);\n  EXPECT_EQ(\"42\", headers.getInlineValue(custom_header_1_copy.handle()));\n  EXPECT_EQ(\"foo_custom_header\",\n            headers.getInline(custom_header_1.handle())->key().getStringView());\n}\n\n#define TEST_INLINE_HEADER_FUNCS(name)                                                             \\\n  header_map->addCopy(Headers::get().name, #name);                                                 \\\n  EXPECT_EQ(header_map->name()->value().getStringView(), #name);                                   \\\n  header_map->remove##name();                                                                      \\\n  EXPECT_EQ(nullptr, header_map->name());                                                          \\\n  header_map->set##name(#name);                                                                    \\\n  EXPECT_EQ(header_map->get(Headers::get().name)->value().getStringView(), #name);\n\n// Make sure that the O(1) headers are wired up properly.\nTEST_P(HeaderMapImplTest, AllInlineHeaders) {\n  {\n    auto header_map = RequestHeaderMapImpl::create();\n    INLINE_REQ_HEADERS(TEST_INLINE_HEADER_FUNCS)\n    INLINE_REQ_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS)\n  }\n  {\n      // No request trailer O(1) headers.\n  } {\n    auto header_map = ResponseHeaderMapImpl::create();\n    INLINE_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS)\n    INLINE_REQ_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS)\n    INLINE_RESP_HEADERS_TRAILERS(TEST_INLINE_HEADER_FUNCS)\n  }\n  {\n    auto header_map = ResponseTrailerMapImpl::create();\n    INLINE_RESP_HEADERS_TRAILERS(TEST_INLINE_HEADER_FUNCS)\n  }\n}\n\nTEST_P(HeaderMapImplTest, InlineInsert) {\n  TestRequestHeaderMapImpl headers;\n  EXPECT_TRUE(headers.empty());\n  EXPECT_EQ(0, headers.size());\n  EXPECT_EQ(nullptr, headers.Host());\n  headers.setHost(\"hello\");\n  EXPECT_FALSE(headers.empty());\n  EXPECT_EQ(1, headers.size());\n  EXPECT_EQ(\":authority\", headers.Host()->key().getStringView());\n  EXPECT_EQ(\"hello\", headers.getHostValue());\n  EXPECT_EQ(\"hello\", headers.get(Headers::get().Host)->value().getStringView());\n}\n\nTEST_P(HeaderMapImplTest, InlineAppend) {\n  {\n    TestRequestHeaderMapImpl headers;\n    // Create via header and append.\n    headers.setVia(\"\");\n    headers.appendVia(\"1.0 fred\", \",\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred\");\n    headers.appendVia(\"1.1 nowhere.com\", \",\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred,1.1 nowhere.com\");\n  }\n  {\n    // Append to via header without explicitly creating first.\n    TestRequestHeaderMapImpl headers;\n    headers.appendVia(\"1.0 fred\", \",\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred\");\n    headers.appendVia(\"1.1 nowhere.com\", \",\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred,1.1 nowhere.com\");\n  }\n  {\n    // Custom delimiter.\n    TestRequestHeaderMapImpl headers;\n    headers.setVia(\"\");\n    headers.appendVia(\"1.0 fred\", \", \");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred\");\n    headers.appendVia(\"1.1 nowhere.com\", \", \");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred, 1.1 nowhere.com\");\n  }\n  {\n    // Append and then later set.\n    TestRequestHeaderMapImpl headers;\n    headers.appendVia(\"1.0 fred\", \",\");\n    headers.appendVia(\"1.1 nowhere.com\", \",\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred,1.1 nowhere.com\");\n    headers.setVia(\"2.0 override\");\n    EXPECT_EQ(headers.getViaValue(), \"2.0 override\");\n  }\n  {\n    // Set and then append. This mimics how GrpcTimeout is set.\n    TestRequestHeaderMapImpl headers;\n    headers.setGrpcTimeout(42);\n    EXPECT_EQ(headers.getGrpcTimeoutValue(), \"42\");\n    headers.appendGrpcTimeout(\"s\", \"\");\n    EXPECT_EQ(headers.getGrpcTimeoutValue(), \"42s\");\n  }\n}\n\nTEST_P(HeaderMapImplTest, MoveIntoInline) {\n  TestRequestHeaderMapImpl headers;\n  HeaderString key;\n  key.setCopy(Headers::get().EnvoyRetryOn.get());\n  HeaderString value;\n  value.setCopy(\"hello\");\n  headers.addViaMove(std::move(key), std::move(value));\n  EXPECT_EQ(\"x-envoy-retry-on\", headers.EnvoyRetryOn()->key().getStringView());\n  EXPECT_EQ(\"hello\", headers.getEnvoyRetryOnValue());\n\n  HeaderString key2;\n  key2.setCopy(Headers::get().EnvoyRetryOn.get());\n  HeaderString value2;\n  value2.setCopy(\"there\");\n  headers.addViaMove(std::move(key2), std::move(value2));\n  EXPECT_EQ(\"x-envoy-retry-on\", headers.EnvoyRetryOn()->key().getStringView());\n  EXPECT_EQ(\"hello,there\", headers.getEnvoyRetryOnValue());\n}\n\nTEST_P(HeaderMapImplTest, Remove) {\n  TestRequestHeaderMapImpl headers;\n\n  // Add random header and then remove by name.\n  LowerCaseString static_key(\"hello\");\n  std::string ref_value(\"value\");\n  headers.addReference(static_key, ref_value);\n  EXPECT_EQ(\"value\", headers.get(static_key)->value().getStringView());\n  EXPECT_TRUE(headers.get(static_key)->value().isReference());\n  EXPECT_EQ(1UL, headers.size());\n  EXPECT_FALSE(headers.empty());\n  EXPECT_EQ(1UL, headers.remove(static_key));\n  EXPECT_EQ(nullptr, headers.get(static_key));\n  EXPECT_EQ(0UL, headers.size());\n  EXPECT_TRUE(headers.empty());\n\n  // Add and remove by inline.\n  EXPECT_EQ(0UL, headers.removeContentLength());\n  headers.setContentLength(5);\n  EXPECT_EQ(\"5\", headers.getContentLengthValue());\n  EXPECT_EQ(1UL, headers.size());\n  EXPECT_FALSE(headers.empty());\n  EXPECT_EQ(1UL, headers.removeContentLength());\n  EXPECT_EQ(nullptr, headers.ContentLength());\n  EXPECT_EQ(0UL, headers.size());\n  EXPECT_TRUE(headers.empty());\n\n  // Add inline and remove by name.\n  headers.setContentLength(5);\n  EXPECT_EQ(\"5\", headers.getContentLengthValue());\n  EXPECT_EQ(1UL, headers.size());\n  EXPECT_FALSE(headers.empty());\n  EXPECT_EQ(1UL, headers.remove(Headers::get().ContentLength));\n  EXPECT_EQ(nullptr, headers.ContentLength());\n  EXPECT_EQ(0UL, headers.size());\n  EXPECT_TRUE(headers.empty());\n\n  // Try to remove nonexistent headers.\n  EXPECT_EQ(0UL, headers.remove(static_key));\n  EXPECT_EQ(0UL, headers.remove(Headers::get().ContentLength));\n}\n\nTEST_P(HeaderMapImplTest, RemoveHost) {\n  TestRequestHeaderMapImpl headers;\n  headers.setHost(\"foo\");\n  EXPECT_EQ(\"foo\", headers.get_(\"host\"));\n  EXPECT_EQ(\"foo\", headers.get_(\":authority\"));\n  // Make sure that when we remove by \"host\" without using the inline functions, the mapping to\n  // \":authority\" still takes place.\n  // https://github.com/envoyproxy/envoy/pull/12160\n  EXPECT_EQ(1UL, headers.remove(\"host\"));\n  EXPECT_EQ(\"\", headers.get_(\"host\"));\n  EXPECT_EQ(\"\", headers.get_(\":authority\"));\n  EXPECT_EQ(nullptr, headers.Host());\n}\n\nTEST_P(HeaderMapImplTest, RemoveIf) {\n  LowerCaseString key1 = LowerCaseString(\"X-postfix-foo\");\n  LowerCaseString key2 = LowerCaseString(\"X-postfix-\");\n  LowerCaseString key3 = LowerCaseString(\"x-postfix-eep\");\n\n  {\n    TestRequestHeaderMapImpl headers;\n    headers.addReference(key1, \"value\");\n    headers.addReference(key2, \"value\");\n    headers.addReference(key3, \"value\");\n\n    EXPECT_EQ(0UL, headers.removeIf([](const HeaderEntry&) -> bool { return false; }));\n\n    EXPECT_EQ(2UL, headers.removeIf([](const HeaderEntry& entry) -> bool {\n      return absl::EndsWith(entry.key().getStringView(), \"foo\") ||\n             absl::EndsWith(entry.key().getStringView(), \"eep\");\n    }));\n\n    TestRequestHeaderMapImpl expected{{\"X-postfix-\", \"value\"}};\n    EXPECT_EQ(expected, headers);\n  }\n\n  // Test multiple entries with same key but different value.\n  {\n    TestRequestHeaderMapImpl headers;\n    headers.addReference(key1, \"valueA\");\n    headers.addReference(key1, \"valueB\");\n    headers.addReference(key1, \"valueC\");\n    headers.addReference(key2, \"valueB\");\n    headers.addReference(key3, \"valueC\");\n\n    EXPECT_EQ(5UL, headers.size());\n    EXPECT_EQ(2UL, headers.removeIf([](const HeaderEntry& entry) -> bool {\n      return absl::EndsWith(entry.value().getStringView(), \"B\");\n    }));\n\n    // Make sure key1 other values still exist.\n    TestRequestHeaderMapImpl expected{\n        {key1.get(), \"valueA\"}, {key1.get(), \"valueC\"}, {key3.get(), \"valueC\"}};\n    EXPECT_EQ(expected, headers);\n  }\n}\n\nTEST_P(HeaderMapImplTest, RemovePrefix) {\n  // These will match.\n  LowerCaseString key1 = LowerCaseString(\"X-prefix-foo\");\n  LowerCaseString key3 = LowerCaseString(\"X-Prefix-\");\n  LowerCaseString key5 = LowerCaseString(\"x-prefix-eep\");\n  // These will not.\n  LowerCaseString key2 = LowerCaseString(\" x-prefix-foo\");\n  LowerCaseString key4 = LowerCaseString(\"y-x-prefix-foo\");\n\n  TestRequestHeaderMapImpl headers;\n  headers.addReference(key1, \"value\");\n  headers.addReference(key2, \"value\");\n  headers.addReference(key3, \"value\");\n  headers.addReference(key4, \"value\");\n  headers.addReference(key5, \"value\");\n\n  // Test removing the first header, middle headers, and the end header.\n  EXPECT_EQ(3UL, headers.removePrefix(LowerCaseString(\"x-prefix-\")));\n  EXPECT_EQ(nullptr, headers.get(key1));\n  EXPECT_NE(nullptr, headers.get(key2));\n  EXPECT_EQ(nullptr, headers.get(key3));\n  EXPECT_NE(nullptr, headers.get(key4));\n  EXPECT_EQ(nullptr, headers.get(key5));\n\n  // Try to remove headers with no prefix match.\n  EXPECT_EQ(0UL, headers.removePrefix(LowerCaseString(\"foo\")));\n\n  // Remove all headers.\n  EXPECT_EQ(2UL, headers.removePrefix(LowerCaseString(\"\")));\n  EXPECT_EQ(nullptr, headers.get(key2));\n  EXPECT_EQ(nullptr, headers.get(key4));\n\n  // Add inline and remove by prefix\n  headers.setContentLength(5);\n  EXPECT_EQ(\"5\", headers.getContentLengthValue());\n  EXPECT_EQ(1UL, headers.size());\n  EXPECT_FALSE(headers.empty());\n  EXPECT_EQ(1UL, headers.removePrefix(LowerCaseString(\"content\")));\n  EXPECT_EQ(nullptr, headers.ContentLength());\n}\n\nclass HeaderAndValueCb\n    : public testing::MockFunction<void(const std::string&, const std::string&)> {\npublic:\n  HeaderMap::ConstIterateCb asIterateCb() {\n    return [this](const Http::HeaderEntry& header) -> HeaderMap::Iterate {\n      Call(std::string(header.key().getStringView()), std::string(header.value().getStringView()));\n      return HeaderMap::Iterate::Continue;\n    };\n  }\n};\n\nTEST_P(HeaderMapImplTest, SetRemovesAllValues) {\n  TestRequestHeaderMapImpl headers;\n\n  LowerCaseString key1(\"hello\");\n  LowerCaseString key2(\"olleh\");\n  std::string ref_value1(\"world\");\n  std::string ref_value2(\"planet\");\n  std::string ref_value3(\"globe\");\n  std::string ref_value4(\"earth\");\n  std::string ref_value5(\"blue marble\");\n\n  headers.addReference(key1, ref_value1);\n  headers.addReference(key2, ref_value2);\n  headers.addReference(key1, ref_value3);\n  headers.addReference(key1, ref_value4);\n\n  {\n    HeaderAndValueCb cb;\n\n    InSequence seq;\n    EXPECT_CALL(cb, Call(\"hello\", \"world\"));\n    EXPECT_CALL(cb, Call(\"olleh\", \"planet\"));\n    EXPECT_CALL(cb, Call(\"hello\", \"globe\"));\n    EXPECT_CALL(cb, Call(\"hello\", \"earth\"));\n\n    headers.iterate(cb.asIterateCb());\n  }\n\n  headers.setReference(key1, ref_value5); // set moves key to end\n\n  {\n    HeaderAndValueCb cb;\n\n    InSequence seq;\n    EXPECT_CALL(cb, Call(\"olleh\", \"planet\"));\n    EXPECT_CALL(cb, Call(\"hello\", \"blue marble\"));\n\n    headers.iterate(cb.asIterateCb());\n  }\n}\n\nTEST_P(HeaderMapImplTest, DoubleInlineAdd) {\n  {\n    TestRequestHeaderMapImpl headers;\n    const std::string foo(\"foo\");\n    const std::string bar(\"bar\");\n    headers.addReference(Headers::get().ContentLength, foo);\n    headers.addReference(Headers::get().ContentLength, bar);\n    EXPECT_EQ(\"foo,bar\", headers.getContentLengthValue());\n    EXPECT_EQ(1UL, headers.size());\n  }\n  {\n    TestRequestHeaderMapImpl headers;\n    headers.addReferenceKey(Headers::get().ContentLength, \"foo\");\n    headers.addReferenceKey(Headers::get().ContentLength, \"bar\");\n    EXPECT_EQ(\"foo,bar\", headers.getContentLengthValue());\n    EXPECT_EQ(1UL, headers.size());\n  }\n  {\n    TestRequestHeaderMapImpl headers;\n    headers.addReferenceKey(Headers::get().ContentLength, 5);\n    headers.addReferenceKey(Headers::get().ContentLength, 6);\n    EXPECT_EQ(\"5,6\", headers.getContentLengthValue());\n    EXPECT_EQ(1UL, headers.size());\n  }\n  {\n    TestRequestHeaderMapImpl headers;\n    const std::string foo(\"foo\");\n    headers.addReference(Headers::get().ContentLength, foo);\n    headers.addReferenceKey(Headers::get().ContentLength, 6);\n    EXPECT_EQ(\"foo,6\", headers.getContentLengthValue());\n    EXPECT_EQ(1UL, headers.size());\n  }\n}\n\n// Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't\n// combine set-cookie headers\nTEST_P(HeaderMapImplTest, DoubleCookieAdd) {\n  TestRequestHeaderMapImpl headers;\n  const std::string foo(\"foo\");\n  const std::string bar(\"bar\");\n  const LowerCaseString& set_cookie = Http::Headers::get().SetCookie;\n  headers.addReference(set_cookie, foo);\n  headers.addReference(set_cookie, bar);\n  EXPECT_EQ(2UL, headers.size());\n\n  std::vector<absl::string_view> out;\n  Http::HeaderUtility::getAllOfHeader(headers, \"set-cookie\", out);\n  ASSERT_EQ(out.size(), 2);\n  ASSERT_EQ(out[0], \"foo\");\n  ASSERT_EQ(out[1], \"bar\");\n}\n\nTEST_P(HeaderMapImplTest, DoubleInlineSet) {\n  TestRequestHeaderMapImpl headers;\n  headers.setReferenceKey(Headers::get().ContentType, \"blah\");\n  headers.setReferenceKey(Headers::get().ContentType, \"text/html\");\n  EXPECT_EQ(\"text/html\", headers.getContentTypeValue());\n  EXPECT_EQ(1UL, headers.size());\n}\n\nTEST_P(HeaderMapImplTest, AddReferenceKey) {\n  TestRequestHeaderMapImpl headers;\n  LowerCaseString foo(\"hello\");\n  headers.addReferenceKey(foo, \"world\");\n  EXPECT_NE(\"world\", headers.get(foo)->value().getStringView().data());\n  EXPECT_EQ(\"world\", headers.get(foo)->value().getStringView());\n}\n\nTEST_P(HeaderMapImplTest, SetReferenceKey) {\n  TestRequestHeaderMapImpl headers;\n  LowerCaseString foo(\"hello\");\n  headers.setReferenceKey(foo, \"world\");\n  EXPECT_NE(\"world\", headers.get(foo)->value().getStringView().data());\n  EXPECT_EQ(\"world\", headers.get(foo)->value().getStringView());\n\n  headers.setReferenceKey(foo, \"monde\");\n  EXPECT_NE(\"monde\", headers.get(foo)->value().getStringView().data());\n  EXPECT_EQ(\"monde\", headers.get(foo)->value().getStringView());\n}\n\nTEST_P(HeaderMapImplTest, SetCopyOldBehavior) {\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.http_set_copy_replace_all_headers\", \"false\"}});\n\n  TestRequestHeaderMapImpl headers;\n  LowerCaseString foo(\"hello\");\n  headers.setCopy(foo, \"world\");\n  EXPECT_EQ(\"world\", headers.get(foo)->value().getStringView());\n\n  // Overwrite value.\n  headers.setCopy(foo, \"monde\");\n  EXPECT_EQ(\"monde\", headers.get(foo)->value().getStringView());\n\n  // Add another foo header.\n  headers.addCopy(foo, \"monde2\");\n  EXPECT_EQ(headers.size(), 2);\n\n  // Only the first foo header is overridden.\n  headers.setCopy(foo, \"override-monde\");\n  EXPECT_EQ(headers.size(), 2);\n\n  HeaderAndValueCb cb;\n\n  InSequence seq;\n  EXPECT_CALL(cb, Call(\"hello\", \"override-monde\"));\n  EXPECT_CALL(cb, Call(\"hello\", \"monde2\"));\n  headers.iterate(cb.asIterateCb());\n\n  // Test setting an empty string and then overriding.\n  EXPECT_EQ(2UL, headers.remove(foo));\n  EXPECT_EQ(headers.size(), 0);\n  const std::string empty;\n  headers.setCopy(foo, empty);\n  EXPECT_EQ(headers.size(), 1);\n  headers.setCopy(foo, \"not-empty\");\n  EXPECT_EQ(headers.get(foo)->value().getStringView(), \"not-empty\");\n\n  // Use setCopy with inline headers both indirectly and directly.\n  headers.clear();\n  EXPECT_EQ(headers.size(), 0);\n  headers.setCopy(Headers::get().Path, \"/\");\n  EXPECT_EQ(headers.size(), 1);\n  EXPECT_EQ(headers.getPathValue(), \"/\");\n  headers.setPath(\"/foo\");\n  EXPECT_EQ(headers.size(), 1);\n  EXPECT_EQ(headers.getPathValue(), \"/foo\");\n}\n\nTEST_P(HeaderMapImplTest, SetCopyNewBehavior) {\n  TestRequestHeaderMapImpl headers;\n  LowerCaseString foo(\"hello\");\n  headers.setCopy(foo, \"world\");\n  EXPECT_EQ(\"world\", headers.get(foo)->value().getStringView());\n\n  // Overwrite value.\n  headers.setCopy(foo, \"monde\");\n  EXPECT_EQ(\"monde\", headers.get(foo)->value().getStringView());\n\n  // Add another foo header.\n  headers.addCopy(foo, \"monde2\");\n  EXPECT_EQ(headers.size(), 2);\n\n  // The foo header is overridden.\n  headers.setCopy(foo, \"override-monde\");\n  EXPECT_EQ(headers.size(), 1);\n\n  HeaderAndValueCb cb;\n\n  InSequence seq;\n  EXPECT_CALL(cb, Call(\"hello\", \"override-monde\"));\n  headers.iterate(cb.asIterateCb());\n\n  // Test setting an empty string and then overriding.\n  EXPECT_EQ(1UL, headers.remove(foo));\n  EXPECT_EQ(headers.size(), 0);\n  const std::string empty;\n  headers.setCopy(foo, empty);\n  EXPECT_EQ(headers.size(), 1);\n  headers.setCopy(foo, \"not-empty\");\n  EXPECT_EQ(headers.get(foo)->value().getStringView(), \"not-empty\");\n\n  // Use setCopy with inline headers both indirectly and directly.\n  headers.clear();\n  EXPECT_EQ(headers.size(), 0);\n  headers.setCopy(Headers::get().Path, \"/\");\n  EXPECT_EQ(headers.size(), 1);\n  EXPECT_EQ(headers.getPathValue(), \"/\");\n  headers.setPath(\"/foo\");\n  EXPECT_EQ(headers.size(), 1);\n  EXPECT_EQ(headers.getPathValue(), \"/foo\");\n}\n\nTEST_P(HeaderMapImplTest, AddCopy) {\n  TestRequestHeaderMapImpl headers;\n\n  // Start with a string value.\n  std::unique_ptr<LowerCaseString> lcKeyPtr(new LowerCaseString(\"hello\"));\n  headers.addCopy(*lcKeyPtr, \"world\");\n\n  const HeaderString& value = headers.get(*lcKeyPtr)->value();\n\n  EXPECT_EQ(\"world\", value.getStringView());\n  EXPECT_EQ(5UL, value.size());\n\n  lcKeyPtr.reset();\n\n  const HeaderString& value2 = headers.get(LowerCaseString(\"hello\"))->value();\n\n  EXPECT_EQ(\"world\", value2.getStringView());\n  EXPECT_EQ(5UL, value2.size());\n  EXPECT_EQ(value.getStringView(), value2.getStringView());\n  EXPECT_EQ(1UL, headers.size());\n\n  // Repeat with an int value.\n  //\n  // addReferenceKey and addCopy can both add multiple instances of a\n  // given header, so we need to delete the old \"hello\" header.\n  // Test that removing will return 0 byte size.\n  EXPECT_EQ(1UL, headers.remove(LowerCaseString(\"hello\")));\n  EXPECT_EQ(headers.byteSize(), 0);\n\n  // Build \"hello\" with string concatenation to make it unlikely that the\n  // compiler is just reusing the same string constant for everything.\n  lcKeyPtr = std::make_unique<LowerCaseString>(std::string(\"he\") + \"llo\");\n  EXPECT_STREQ(\"hello\", lcKeyPtr->get().c_str());\n\n  headers.addCopy(*lcKeyPtr, 42);\n\n  const HeaderString& value3 = headers.get(*lcKeyPtr)->value();\n\n  EXPECT_EQ(\"42\", value3.getStringView());\n  EXPECT_EQ(2UL, value3.size());\n\n  lcKeyPtr.reset();\n\n  const HeaderString& value4 = headers.get(LowerCaseString(\"hello\"))->value();\n\n  EXPECT_EQ(\"42\", value4.getStringView());\n  EXPECT_EQ(2UL, value4.size());\n  EXPECT_EQ(1UL, headers.size());\n\n  // Here, again, we'll build yet another key string.\n  LowerCaseString lcKey3(std::string(\"he\") + \"ll\" + \"o\");\n  EXPECT_STREQ(\"hello\", lcKey3.get().c_str());\n\n  EXPECT_EQ(\"42\", headers.get(lcKey3)->value().getStringView());\n  EXPECT_EQ(2UL, headers.get(lcKey3)->value().size());\n\n  LowerCaseString envoy_retry_on(\"x-envoy-retry-on\");\n  headers.addCopy(envoy_retry_on, \"max-age=1345\");\n  EXPECT_EQ(\"max-age=1345\", headers.get(envoy_retry_on)->value().getStringView());\n  EXPECT_EQ(\"max-age=1345\", headers.getEnvoyRetryOnValue());\n  headers.addCopy(envoy_retry_on, \"public\");\n  EXPECT_EQ(\"max-age=1345,public\", headers.get(envoy_retry_on)->value().getStringView());\n  headers.addCopy(envoy_retry_on, \"\");\n  EXPECT_EQ(\"max-age=1345,public\", headers.get(envoy_retry_on)->value().getStringView());\n  headers.addCopy(envoy_retry_on, 123);\n  EXPECT_EQ(\"max-age=1345,public,123\", headers.get(envoy_retry_on)->value().getStringView());\n  headers.addCopy(envoy_retry_on, std::numeric_limits<uint64_t>::max());\n  EXPECT_EQ(\"max-age=1345,public,123,18446744073709551615\",\n            headers.get(envoy_retry_on)->value().getStringView());\n}\n\nTEST_P(HeaderMapImplTest, Equality) {\n  TestRequestHeaderMapImpl headers1;\n  TestRequestHeaderMapImpl headers2;\n  EXPECT_EQ(headers1, headers2);\n\n  headers1.addCopy(LowerCaseString(\"hello\"), \"world\");\n  EXPECT_FALSE(headers1 == headers2);\n\n  headers2.addCopy(LowerCaseString(\"foo\"), \"bar\");\n  EXPECT_FALSE(headers1 == headers2);\n}\n\nTEST_P(HeaderMapImplTest, LargeCharInHeader) {\n  TestRequestHeaderMapImpl headers;\n  LowerCaseString static_key(\"\\x90hello\");\n  std::string ref_value(\"value\");\n  headers.addReference(static_key, ref_value);\n  EXPECT_EQ(\"value\", headers.get(static_key)->value().getStringView());\n}\n\nTEST_P(HeaderMapImplTest, Iterate) {\n  TestRequestHeaderMapImpl headers;\n  headers.addCopy(LowerCaseString(\"hello\"), \"world\");\n  headers.addCopy(LowerCaseString(\"foo\"), \"xxx\");\n  headers.addCopy(LowerCaseString(\"world\"), \"hello\");\n  LowerCaseString foo_key(\"foo\");\n  headers.setReferenceKey(foo_key, \"bar\"); // set moves key to end\n\n  HeaderAndValueCb cb;\n\n  InSequence seq;\n  EXPECT_CALL(cb, Call(\"hello\", \"world\"));\n  EXPECT_CALL(cb, Call(\"world\", \"hello\"));\n  EXPECT_CALL(cb, Call(\"foo\", \"bar\"));\n  headers.iterate(cb.asIterateCb());\n}\n\nTEST_P(HeaderMapImplTest, IterateReverse) {\n  TestRequestHeaderMapImpl headers;\n  headers.addCopy(LowerCaseString(\"hello\"), \"world\");\n  headers.addCopy(LowerCaseString(\"foo\"), \"bar\");\n  LowerCaseString world_key(\"world\");\n  headers.setReferenceKey(world_key, \"hello\");\n\n  HeaderAndValueCb cb;\n\n  InSequence seq;\n  EXPECT_CALL(cb, Call(\"world\", \"hello\"));\n  EXPECT_CALL(cb, Call(\"foo\", \"bar\"));\n  // no \"hello\"\n  headers.iterateReverse([&cb](const Http::HeaderEntry& header) -> HeaderMap::Iterate {\n    cb.Call(std::string(header.key().getStringView()), std::string(header.value().getStringView()));\n    if (header.key().getStringView() != \"foo\") {\n      return HeaderMap::Iterate::Continue;\n    } else {\n      return HeaderMap::Iterate::Break;\n    }\n  });\n}\n\nTEST_P(HeaderMapImplTest, Get) {\n  {\n    auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), \"/\"}, {\"hello\", \"world\"}});\n    EXPECT_EQ(\"/\", headers.get(LowerCaseString(\":path\"))->value().getStringView());\n    EXPECT_EQ(\"world\", headers.get(LowerCaseString(\"hello\"))->value().getStringView());\n    EXPECT_EQ(nullptr, headers.get(LowerCaseString(\"foo\")));\n  }\n\n  {\n    auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), \"/\"}, {\"hello\", \"world\"}});\n    // There is not HeaderMap method to set a header and copy both the key and value.\n    const LowerCaseString path(\":path\");\n    headers.setReferenceKey(path, \"/new_path\");\n    EXPECT_EQ(\"/new_path\", headers.get(LowerCaseString(\":path\"))->value().getStringView());\n    const LowerCaseString foo(\"hello\");\n    headers.setReferenceKey(foo, \"world2\");\n    EXPECT_EQ(\"world2\", headers.get(foo)->value().getStringView());\n    EXPECT_EQ(nullptr, headers.get(LowerCaseString(\"foo\")));\n  }\n}\n\nTEST_P(HeaderMapImplTest, CreateHeaderMapFromIterator) {\n  std::vector<std::pair<LowerCaseString, std::string>> iter_headers{\n      {LowerCaseString(Headers::get().Path), \"/\"}, {LowerCaseString(\"hello\"), \"world\"}};\n  auto headers = createHeaderMap<RequestHeaderMapImpl>(iter_headers.cbegin(), iter_headers.cend());\n  EXPECT_EQ(\"/\", headers->get(LowerCaseString(\":path\"))->value().getStringView());\n  EXPECT_EQ(\"world\", headers->get(LowerCaseString(\"hello\"))->value().getStringView());\n  EXPECT_EQ(nullptr, headers->get(LowerCaseString(\"foo\")));\n}\n\nTEST_P(HeaderMapImplTest, TestHeaderList) {\n  std::array<std::string, 2> keys{Headers::get().Path.get(), \"hello\"};\n  std::array<std::string, 2> values{\"/\", \"world\"};\n\n  auto headers = TestRequestHeaderMapImpl({{keys[0], values[0]}, {keys[1], values[1]}});\n  HeaderListView header_list(headers);\n  auto to_string_views =\n      [](const HeaderListView::HeaderStringRefs& strs) -> std::vector<absl::string_view> {\n    std::vector<absl::string_view> str_views(strs.size());\n    std::transform(strs.begin(), strs.end(), str_views.begin(),\n                   [](auto value) -> absl::string_view { return value.get().getStringView(); });\n    return str_views;\n  };\n\n  EXPECT_THAT(to_string_views(header_list.keys()), ElementsAre(\":path\", \"hello\"));\n  EXPECT_THAT(to_string_views(header_list.values()), ElementsAre(\"/\", \"world\"));\n}\n\nTEST_P(HeaderMapImplTest, TestAppendHeader) {\n  // Test appending to a string with a value.\n  {\n    TestRequestHeaderMapImpl headers;\n    LowerCaseString foo(\"key1\");\n    headers.addCopy(foo, \"some;\");\n    headers.appendCopy(foo, \"test\");\n    EXPECT_EQ(headers.get(foo)->value().getStringView(), \"some;,test\");\n  }\n\n  // Test appending to an empty string.\n  {\n    TestRequestHeaderMapImpl headers;\n    LowerCaseString key2(\"key2\");\n    headers.appendCopy(key2, \"my tag data\");\n    EXPECT_EQ(headers.get(key2)->value().getStringView(), \"my tag data\");\n  }\n\n  // Test empty data case.\n  {\n    TestRequestHeaderMapImpl headers;\n    LowerCaseString key3(\"key3\");\n    headers.addCopy(key3, \"empty\");\n    headers.appendCopy(key3, \"\");\n    EXPECT_EQ(headers.get(key3)->value().getStringView(), \"empty\");\n  }\n  // Regression test for appending to an empty string with a short string, then\n  // setting integer.\n  {\n    TestRequestHeaderMapImpl headers;\n    const std::string empty;\n    headers.setPath(empty);\n    // Append with default delimiter.\n    headers.appendPath(\" \", \",\");\n    headers.setPath(0);\n    EXPECT_EQ(\"0\", headers.getPathValue());\n    EXPECT_EQ(1U, headers.Path()->value().size());\n  }\n  // Test append for inline headers using this method and append##name.\n  {\n    TestRequestHeaderMapImpl headers;\n    headers.addCopy(Headers::get().Via, \"1.0 fred\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred\");\n    headers.appendCopy(Headers::get().Via, \"1.1 p.example.net\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred,1.1 p.example.net\");\n    headers.appendVia(\"1.1 new.example.net\", \",\");\n    EXPECT_EQ(headers.getViaValue(), \"1.0 fred,1.1 p.example.net,1.1 new.example.net\");\n  }\n}\n\nTEST(TestHeaderMapImplDeathTest, TestHeaderLengthChecks) {\n  HeaderString value;\n  value.setCopy(\"some;\");\n  EXPECT_DEATH(value.append(nullptr, std::numeric_limits<uint32_t>::max()),\n               \"Trying to allocate overly large headers.\");\n\n  std::string source(\"hello\");\n  HeaderString reference;\n  reference.setReference(source);\n  EXPECT_DEATH(reference.append(nullptr, std::numeric_limits<uint32_t>::max()),\n               \"Trying to allocate overly large headers.\");\n}\n\nTEST_P(HeaderMapImplTest, PseudoHeaderOrder) {\n  HeaderAndValueCb cb;\n\n  {\n    LowerCaseString foo(\"hello\");\n    Http::TestRequestHeaderMapImpl headers{};\n    EXPECT_EQ(0UL, headers.size());\n    EXPECT_TRUE(headers.empty());\n\n    headers.addReferenceKey(foo, \"world\");\n    EXPECT_EQ(1UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    headers.setReferenceKey(Headers::get().ContentType, \"text/html\");\n    EXPECT_EQ(2UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    // Pseudo header gets inserted before non-pseudo headers\n    headers.setReferenceKey(Headers::get().Method, \"PUT\");\n    EXPECT_EQ(3UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    InSequence seq;\n    EXPECT_CALL(cb, Call(\":method\", \"PUT\"));\n    EXPECT_CALL(cb, Call(\"hello\", \"world\"));\n    EXPECT_CALL(cb, Call(\"content-type\", \"text/html\"));\n\n    headers.iterate(cb.asIterateCb());\n\n    // Removal of the header before which pseudo-headers are inserted\n    EXPECT_EQ(1UL, headers.remove(foo));\n    EXPECT_EQ(2UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    EXPECT_CALL(cb, Call(\":method\", \"PUT\"));\n    EXPECT_CALL(cb, Call(\"content-type\", \"text/html\"));\n\n    headers.iterate(cb.asIterateCb());\n\n    // Next pseudo-header goes after other pseudo-headers, but before normal headers\n    headers.setReferenceKey(Headers::get().Path, \"/test\");\n    EXPECT_EQ(3UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    EXPECT_CALL(cb, Call(\":method\", \"PUT\"));\n    EXPECT_CALL(cb, Call(\":path\", \"/test\"));\n    EXPECT_CALL(cb, Call(\"content-type\", \"text/html\"));\n\n    headers.iterate(cb.asIterateCb());\n\n    // Removing the last normal header\n    EXPECT_EQ(1UL, headers.remove(Headers::get().ContentType));\n    EXPECT_EQ(2UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    EXPECT_CALL(cb, Call(\":method\", \"PUT\"));\n    EXPECT_CALL(cb, Call(\":path\", \"/test\"));\n\n    headers.iterate(cb.asIterateCb());\n\n    // Adding a new pseudo-header after removing the last normal header\n    headers.setReferenceKey(Headers::get().Host, \"host\");\n    EXPECT_EQ(3UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    EXPECT_CALL(cb, Call(\":method\", \"PUT\"));\n    EXPECT_CALL(cb, Call(\":path\", \"/test\"));\n    EXPECT_CALL(cb, Call(\":authority\", \"host\"));\n\n    headers.iterate(cb.asIterateCb());\n\n    // Adding the first normal header\n    headers.setReferenceKey(Headers::get().ContentType, \"text/html\");\n    EXPECT_EQ(4UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    EXPECT_CALL(cb, Call(\":method\", \"PUT\"));\n    EXPECT_CALL(cb, Call(\":path\", \"/test\"));\n    EXPECT_CALL(cb, Call(\":authority\", \"host\"));\n    EXPECT_CALL(cb, Call(\"content-type\", \"text/html\"));\n\n    headers.iterate(cb.asIterateCb());\n\n    // Removing all pseudo-headers\n    EXPECT_EQ(1UL, headers.remove(Headers::get().Path));\n    EXPECT_EQ(1UL, headers.remove(Headers::get().Method));\n    EXPECT_EQ(1UL, headers.remove(Headers::get().Host));\n    EXPECT_EQ(1UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    EXPECT_CALL(cb, Call(\"content-type\", \"text/html\"));\n\n    headers.iterate(cb.asIterateCb());\n\n    // Removing all headers\n    EXPECT_EQ(1UL, headers.remove(Headers::get().ContentType));\n    EXPECT_EQ(0UL, headers.size());\n    EXPECT_TRUE(headers.empty());\n\n    // Adding a lone pseudo-header\n    headers.setReferenceKey(Headers::get().Status, \"200\");\n    EXPECT_EQ(1UL, headers.size());\n    EXPECT_FALSE(headers.empty());\n\n    EXPECT_CALL(cb, Call(\":status\", \"200\"));\n\n    headers.iterate(cb.asIterateCb());\n  }\n\n  // Starting with a normal header\n  {\n    auto headers = TestRequestHeaderMapImpl({{Headers::get().ContentType.get(), \"text/plain\"},\n                                             {Headers::get().Method.get(), \"GET\"},\n                                             {Headers::get().Path.get(), \"/\"},\n                                             {\"hello\", \"world\"},\n                                             {Headers::get().Host.get(), \"host\"}});\n\n    InSequence seq;\n    EXPECT_CALL(cb, Call(\":method\", \"GET\"));\n    EXPECT_CALL(cb, Call(\":path\", \"/\"));\n    EXPECT_CALL(cb, Call(\":authority\", \"host\"));\n    EXPECT_CALL(cb, Call(\"content-type\", \"text/plain\"));\n    EXPECT_CALL(cb, Call(\"hello\", \"world\"));\n\n    headers.iterate(cb.asIterateCb());\n  }\n\n  // Starting with a pseudo-header\n  {\n    auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), \"/\"},\n                                             {Headers::get().ContentType.get(), \"text/plain\"},\n                                             {Headers::get().Method.get(), \"GET\"},\n                                             {\"hello\", \"world\"},\n                                             {Headers::get().Host.get(), \"host\"}});\n\n    InSequence seq;\n    EXPECT_CALL(cb, Call(\":path\", \"/\"));\n    EXPECT_CALL(cb, Call(\":method\", \"GET\"));\n    EXPECT_CALL(cb, Call(\":authority\", \"host\"));\n    EXPECT_CALL(cb, Call(\"content-type\", \"text/plain\"));\n    EXPECT_CALL(cb, Call(\"hello\", \"world\"));\n\n    headers.iterate(cb.asIterateCb());\n  }\n}\n\n// Validate that TestRequestHeaderMapImpl copy construction and assignment works. This is a\n// regression for where we were missing a valid copy constructor and had the\n// default (dangerous) move semantics takeover.\nTEST_P(HeaderMapImplTest, TestRequestHeaderMapImplCopy) {\n  TestRequestHeaderMapImpl foo;\n  foo.addCopy(LowerCaseString(\"foo\"), \"bar\");\n  auto headers = std::make_unique<TestRequestHeaderMapImpl>(foo);\n  EXPECT_EQ(\"bar\", headers->get(LowerCaseString(\"foo\"))->value().getStringView());\n  TestRequestHeaderMapImpl baz{{\"foo\", \"baz\"}};\n  baz = *headers;\n  EXPECT_EQ(\"bar\", baz.get(LowerCaseString(\"foo\"))->value().getStringView());\n  const TestRequestHeaderMapImpl& baz2 = baz;\n  baz = baz2;\n  EXPECT_EQ(\"bar\", baz.get(LowerCaseString(\"foo\"))->value().getStringView());\n}\n\n// Make sure 'host' -> ':authority' auto translation only occurs for request headers.\nTEST_P(HeaderMapImplTest, HostHeader) {\n  TestRequestHeaderMapImpl request_headers{{\"host\", \"foo\"}};\n  EXPECT_EQ(request_headers.size(), 1);\n  EXPECT_EQ(request_headers.get_(\":authority\"), \"foo\");\n\n  TestRequestTrailerMapImpl request_trailers{{\"host\", \"foo\"}};\n  EXPECT_EQ(request_trailers.size(), 1);\n  EXPECT_EQ(request_trailers.get_(\"host\"), \"foo\");\n\n  TestResponseHeaderMapImpl response_headers{{\"host\", \"foo\"}};\n  EXPECT_EQ(response_headers.size(), 1);\n  EXPECT_EQ(response_headers.get_(\"host\"), \"foo\");\n\n  TestResponseTrailerMapImpl response_trailers{{\"host\", \"foo\"}};\n  EXPECT_EQ(response_trailers.size(), 1);\n  EXPECT_EQ(response_trailers.get_(\"host\"), \"foo\");\n}\n\nTEST_P(HeaderMapImplTest, TestInlineHeaderAdd) {\n  TestRequestHeaderMapImpl foo;\n  foo.addCopy(LowerCaseString(\":path\"), \"GET\");\n  EXPECT_EQ(foo.size(), 1);\n  EXPECT_TRUE(foo.Path() != nullptr);\n}\n\nTEST_P(HeaderMapImplTest, ClearHeaderMap) {\n  TestRequestHeaderMapImpl headers;\n  LowerCaseString static_key(\"hello\");\n  std::string ref_value(\"value\");\n\n  // Add random header and then clear.\n  headers.addReference(static_key, ref_value);\n  EXPECT_EQ(\"value\", headers.get(static_key)->value().getStringView());\n  EXPECT_TRUE(headers.get(static_key)->value().isReference());\n  EXPECT_EQ(1UL, headers.size());\n  EXPECT_FALSE(headers.empty());\n  headers.clear();\n  EXPECT_EQ(nullptr, headers.get(static_key));\n  EXPECT_EQ(0UL, headers.size());\n  EXPECT_EQ(headers.byteSize(), 0);\n  EXPECT_TRUE(headers.empty());\n\n  // Add inline and clear.\n  headers.setContentLength(5);\n  EXPECT_EQ(\"5\", headers.getContentLengthValue());\n  EXPECT_EQ(1UL, headers.size());\n  EXPECT_FALSE(headers.empty());\n  headers.clear();\n  EXPECT_EQ(nullptr, headers.ContentLength());\n  EXPECT_EQ(0UL, headers.size());\n  EXPECT_EQ(headers.byteSize(), 0);\n  EXPECT_TRUE(headers.empty());\n\n  // Add mixture of headers.\n  headers.addReference(static_key, ref_value);\n  headers.setContentLength(5);\n  headers.addCopy(static_key, \"new_value\");\n  EXPECT_EQ(3UL, headers.size());\n  EXPECT_FALSE(headers.empty());\n  headers.clear();\n  EXPECT_EQ(nullptr, headers.ContentLength());\n  EXPECT_EQ(0UL, headers.size());\n  EXPECT_EQ(headers.byteSize(), 0);\n  EXPECT_TRUE(headers.empty());\n}\n\n// Validates byte size is properly accounted for in different inline header setting scenarios.\nTEST_P(HeaderMapImplTest, InlineHeaderByteSize) {\n  {\n    TestRequestHeaderMapImpl headers;\n    std::string foo = \"foo\";\n    headers.setHost(foo);\n    EXPECT_EQ(headers.byteSize(), 13);\n  }\n  {\n    // Overwrite an inline headers with set.\n    TestRequestHeaderMapImpl headers;\n    std::string foo = \"foo\";\n    headers.setHost(foo);\n    std::string big_foo = \"big_foo\";\n    headers.setHost(big_foo);\n    EXPECT_EQ(headers.byteSize(), 17);\n  }\n  {\n    // Overwrite an inline headers with setReference and clear.\n    TestRequestHeaderMapImpl headers;\n    std::string foo = \"foo\";\n    headers.setHost(foo);\n    EXPECT_EQ(headers.byteSize(), 13);\n    std::string big_foo = \"big_foo\";\n    headers.setReferenceHost(big_foo);\n    EXPECT_EQ(headers.byteSize(), 17);\n    EXPECT_EQ(1UL, headers.removeHost());\n    EXPECT_EQ(headers.byteSize(), 0);\n  }\n  {\n    // Overwrite an inline headers with set integer value.\n    TestResponseHeaderMapImpl headers;\n    uint64_t status = 200;\n    headers.setStatus(status);\n    EXPECT_EQ(headers.byteSize(), 10);\n    uint64_t newStatus = 500;\n    headers.setStatus(newStatus);\n    EXPECT_EQ(headers.byteSize(), 10);\n    EXPECT_EQ(1UL, headers.removeStatus());\n    EXPECT_EQ(headers.byteSize(), 0);\n  }\n  {\n    // Set an inline header, remove, and rewrite.\n    TestResponseHeaderMapImpl headers;\n    uint64_t status = 200;\n    headers.setStatus(status);\n    EXPECT_EQ(headers.byteSize(), 10);\n    EXPECT_EQ(1UL, headers.removeStatus());\n    EXPECT_EQ(headers.byteSize(), 0);\n    uint64_t newStatus = 500;\n    headers.setStatus(newStatus);\n    EXPECT_EQ(headers.byteSize(), 10);\n  }\n}\n\nTEST_P(HeaderMapImplTest, ValidHeaderString) {\n  EXPECT_TRUE(validHeaderString(\"abc\"));\n  EXPECT_FALSE(validHeaderString(absl::string_view(\"a\\000bc\", 4)));\n  EXPECT_FALSE(validHeaderString(\"abc\\n\"));\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/header_utility_test.cc",
    "content": "#include <regex>\n#include <vector>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/protocol.h\"\n#include \"envoy/json/json_object.h\"\n\n#include \"common/http/header_utility.h\"\n#include \"common/json/json_loader.h\"\n\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nenvoy::config::route::v3::HeaderMatcher parseHeaderMatcherFromYaml(const std::string& yaml) {\n  envoy::config::route::v3::HeaderMatcher header_matcher;\n  TestUtility::loadFromYaml(yaml, header_matcher);\n  return header_matcher;\n}\n\nclass HeaderUtilityTest : public testing::Test {\npublic:\n  const HeaderEntry& hostHeaderEntry(const std::string& host_value, bool set_connect = false) {\n    headers_.setHost(host_value);\n    if (set_connect) {\n      headers_.setMethod(Http::Headers::get().MethodValues.Connect);\n    }\n    return *headers_.Host();\n  }\n  TestRequestHeaderMapImpl headers_;\n};\n\n// Port's part from host header get removed\nTEST_F(HeaderUtilityTest, RemovePortsFromHost) {\n  const std::vector<std::pair<std::string, std::string>> host_headers{\n      {\"localhost\", \"localhost\"},           // w/o port part\n      {\"localhost:443\", \"localhost\"},       // name w/ port\n      {\"\", \"\"},                             // empty\n      {\":443\", \"\"},                         // just port\n      {\"192.168.1.1\", \"192.168.1.1\"},       // ipv4\n      {\"192.168.1.1:443\", \"192.168.1.1\"},   // ipv4 w/ port\n      {\"[fc00::1]:443\", \"[fc00::1]\"},       // ipv6 w/ port\n      {\"[fc00::1]\", \"[fc00::1]\"},           // ipv6\n      {\":\", \":\"},                           // malformed string #1\n      {\"]:\", \"]:\"},                         // malformed string #2\n      {\":abc\", \":abc\"},                     // malformed string #3\n      {\"localhost:80\", \"localhost:80\"},     // port not matching w/ hostname\n      {\"192.168.1.1:80\", \"192.168.1.1:80\"}, // port not matching w/ ipv4\n      {\"[fc00::1]:80\", \"[fc00::1]:80\"}      // port not matching w/ ipv6\n  };\n\n  for (const auto& host_pair : host_headers) {\n    auto& host_header = hostHeaderEntry(host_pair.first);\n    HeaderUtility::stripPortFromHost(headers_, 443);\n    EXPECT_EQ(host_header.value().getStringView(), host_pair.second);\n  }\n}\n\n// Port's part from host header won't be removed if method is \"connect\"\nTEST_F(HeaderUtilityTest, RemovePortsFromHostConnect) {\n  const std::vector<std::pair<std::string, std::string>> host_headers{\n      {\"localhost:443\", \"localhost:443\"},\n  };\n  for (const auto& host_pair : host_headers) {\n    auto& host_header = hostHeaderEntry(host_pair.first, true);\n    HeaderUtility::stripPortFromHost(headers_, 443);\n    EXPECT_EQ(host_header.value().getStringView(), host_pair.second);\n  }\n}\n\nTEST(GetAllOfHeaderAsStringTest, All) {\n  const LowerCaseString test_header(\"test\");\n  {\n    TestRequestHeaderMapImpl headers;\n    const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header);\n    EXPECT_FALSE(ret.result().has_value());\n    EXPECT_TRUE(ret.backingString().empty());\n  }\n  {\n    TestRequestHeaderMapImpl headers{{\"test\", \"foo\"}};\n    const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header);\n    EXPECT_EQ(\"foo\", ret.result().value());\n    EXPECT_TRUE(ret.backingString().empty());\n  }\n  {\n    TestRequestHeaderMapImpl headers{{\"test\", \"foo\"}, {\"test\", \"bar\"}};\n    const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header);\n    EXPECT_EQ(\"foo,bar\", ret.result().value());\n    EXPECT_EQ(\"foo,bar\", ret.backingString());\n  }\n  {\n    TestRequestHeaderMapImpl headers{{\"test\", \"\"}, {\"test\", \"bar\"}};\n    const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header);\n    EXPECT_EQ(\",bar\", ret.result().value());\n    EXPECT_EQ(\",bar\", ret.backingString());\n  }\n  {\n    TestRequestHeaderMapImpl headers{{\"test\", \"\"}, {\"test\", \"\"}};\n    const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header);\n    EXPECT_EQ(\",\", ret.result().value());\n    EXPECT_EQ(\",\", ret.backingString());\n  }\n  {\n    TestRequestHeaderMapImpl headers{\n        {\"test\", \"a\"}, {\"test\", \"b\"}, {\"test\", \"c\"}, {\"test\", \"\"}, {\"test\", \"\"}};\n    const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header);\n    EXPECT_EQ(\"a,b,c,,\", ret.result().value());\n    EXPECT_EQ(\"a,b,c,,\", ret.backingString());\n    // Make sure copying the return value works correctly.\n    const auto ret2 = ret; // NOLINT(performance-unnecessary-copy-initialization)\n    EXPECT_EQ(ret2.result(), ret.result());\n    EXPECT_EQ(ret2.backingString(), ret.backingString());\n    EXPECT_EQ(ret2.result().value().data(), ret2.backingString().data());\n    EXPECT_NE(ret2.result().value().data(), ret.backingString().data());\n  }\n}\n\nTEST(HeaderDataConstructorTest, NoSpecifierSet) {\n  const std::string yaml = R\"EOF(\nname: test-header\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Present, header_data.header_match_type_);\n}\n\nTEST(HeaderDataConstructorTest, ExactMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\nexact_match: value\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Value, header_data.header_match_type_);\n  EXPECT_EQ(\"value\", header_data.value_);\n}\n\nTEST(HeaderDataConstructorTest, RegexMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\nregex_match: value\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Regex, header_data.header_match_type_);\n  EXPECT_EQ(\"\", header_data.value_);\n}\n\nTEST(HeaderDataConstructorTest, RangeMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\nrange_match:\n  start: 0\n  end: -10\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Range, header_data.header_match_type_);\n  EXPECT_EQ(\"\", header_data.value_);\n  EXPECT_EQ(0, header_data.range_.start());\n  EXPECT_EQ(-10, header_data.range_.end());\n}\n\nTEST(HeaderDataConstructorTest, PresentMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\npresent_match: true\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Present, header_data.header_match_type_);\n  EXPECT_EQ(\"\", header_data.value_);\n}\n\nTEST(HeaderDataConstructorTest, PrefixMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\nprefix_match: value\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Prefix, header_data.header_match_type_);\n  EXPECT_EQ(\"value\", header_data.value_);\n}\n\nTEST(HeaderDataConstructorTest, SuffixMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\nsuffix_match: value\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Suffix, header_data.header_match_type_);\n  EXPECT_EQ(\"value\", header_data.value_);\n}\n\nTEST(HeaderDataConstructorTest, ContainsMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\ncontains_match: somevalueinside\n  )EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Contains, header_data.header_match_type_);\n  EXPECT_EQ(\"somevalueinside\", header_data.value_);\n}\n\nTEST(HeaderDataConstructorTest, InvertMatchSpecifier) {\n  const std::string yaml = R\"EOF(\nname: test-header\nexact_match: value\ninvert_match: true\n)EOF\";\n\n  HeaderUtility::HeaderData header_data =\n      HeaderUtility::HeaderData(parseHeaderMatcherFromYaml(yaml));\n\n  EXPECT_EQ(\"test-header\", header_data.name_.get());\n  EXPECT_EQ(HeaderUtility::HeaderMatchType::Value, header_data.header_match_type_);\n  EXPECT_EQ(\"value\", header_data.value_);\n  EXPECT_EQ(true, header_data.invert_match_);\n}\n\nTEST(HeaderDataConstructorTest, GetAllOfHeader) {\n  TestRequestHeaderMapImpl headers{\n      {\"foo\", \"val1\"}, {\"bar\", \"bar2\"}, {\"foo\", \"eep, bar\"}, {\"foo\", \"\"}};\n\n  std::vector<absl::string_view> foo_out;\n  Http::HeaderUtility::getAllOfHeader(headers, \"foo\", foo_out);\n  ASSERT_EQ(foo_out.size(), 3);\n  ASSERT_EQ(foo_out[0], \"val1\");\n  ASSERT_EQ(foo_out[1], \"eep, bar\");\n  ASSERT_EQ(foo_out[2], \"\");\n\n  std::vector<absl::string_view> bar_out;\n  Http::HeaderUtility::getAllOfHeader(headers, \"bar\", bar_out);\n  ASSERT_EQ(bar_out.size(), 1);\n  ASSERT_EQ(bar_out[0], \"bar2\");\n\n  std::vector<absl::string_view> eep_out;\n  Http::HeaderUtility::getAllOfHeader(headers, \"eep\", eep_out);\n  ASSERT_EQ(eep_out.size(), 0);\n}\n\nTEST(MatchHeadersTest, MayMatchOneOrMoreRequestHeader) {\n  TestRequestHeaderMapImpl headers{{\"some-header\", \"a\"}, {\"other-header\", \"b\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nregex_match: (a|b)\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(headers, header_data));\n\n  headers.addCopy(\"match-header\", \"a\");\n  // With a single \"match-header\" this regex will match.\n  EXPECT_TRUE(HeaderUtility::matchHeaders(headers, header_data));\n\n  headers.addCopy(\"match-header\", \"b\");\n  // With two \"match-header\" we now logically have \"a,b\" as the value, so the regex will not match.\n  EXPECT_FALSE(HeaderUtility::matchHeaders(headers, header_data));\n\n  header_data[0] = std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(R\"EOF(\nname: match-header\nexact_match: a,b\n  )EOF\"));\n  // Make sure that an exact match on \"a,b\" does in fact work.\n  EXPECT_TRUE(HeaderUtility::matchHeaders(headers, header_data));\n\n  TestScopedRuntime runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.http_match_on_all_headers\", \"false\"}});\n  // Flipping runtime to false should make \"a,b\" no longer match because we will match on the first\n  // header only.\n  EXPECT_FALSE(HeaderUtility::matchHeaders(headers, header_data));\n\n  header_data[0] = std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(R\"EOF(\nname: match-header\nexact_match: a\n  )EOF\"));\n  // With runtime off, exact match on \"a\" should pass.\n  EXPECT_TRUE(HeaderUtility::matchHeaders(headers, header_data));\n}\n\nTEST(MatchHeadersTest, MustMatchAllHeaderData) {\n  TestRequestHeaderMapImpl matching_headers_1{{\"match-header-A\", \"1\"}, {\"match-header-B\", \"2\"}};\n  TestRequestHeaderMapImpl matching_headers_2{\n      {\"match-header-A\", \"3\"}, {\"match-header-B\", \"4\"}, {\"match-header-C\", \"5\"}};\n  TestRequestHeaderMapImpl unmatching_headers_1{{\"match-header-A\", \"6\"}};\n  TestRequestHeaderMapImpl unmatching_headers_2{{\"match-header-B\", \"7\"}};\n  TestRequestHeaderMapImpl unmatching_headers_3{{\"match-header-A\", \"8\"}, {\"match-header-C\", \"9\"}};\n  TestRequestHeaderMapImpl unmatching_headers_4{{\"match-header-C\", \"10\"}, {\"match-header-D\", \"11\"}};\n\n  const std::string yamlA = R\"EOF(\nname: match-header-A\n  )EOF\";\n\n  const std::string yamlB = R\"EOF(\nname: match-header-B\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yamlA)));\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yamlB)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers_1, header_data));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers_2, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_1, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_2, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_3, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_4, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderPresence) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"value\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"other-header\", \"value\"}};\n  const std::string yaml = R\"EOF(\nname: match-header\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderExactMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"match-value\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"other-value\"},\n                                              {\"other-header\", \"match-value\"}};\n  const std::string yaml = R\"EOF(\nname: match-header\nexact_match: match-value\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderExactMatchInverse) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"other-value\"},\n                                            {\"other-header\", \"match-value\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"match-value\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nexact_match: match-value\ninvert_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderRegexMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"123\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"1234\"},\n                                              {\"match-header\", \"123.456\"}};\n  const std::string yaml = R\"EOF(\nname: match-header\nregex_match: \\d{3}\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderSafeRegexMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"123\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"1234\"},\n                                              {\"match-header\", \"123.456\"}};\n  const std::string yaml = R\"EOF(\nname: match-header\nsafe_regex_match:\n  google_re2: {}\n  regex: \\d{3}\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderRegexInverseMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"1234\"}, {\"match-header\", \"123.456\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"123\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nregex_match: \\d{3}\ninvert_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderRangeMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"-1\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"0\"},\n                                              {\"match-header\", \"somestring\"},\n                                              {\"match-header\", \"10.9\"},\n                                              {\"match-header\", \"-1somestring\"}};\n  const std::string yaml = R\"EOF(\nname: match-header\nrange_match:\n  start: -10\n  end: 0\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderRangeInverseMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"0\"},\n                                            {\"match-header\", \"somestring\"},\n                                            {\"match-header\", \"10.9\"},\n                                            {\"match-header\", \"-1somestring\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"-1\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nrange_match:\n  start: -10\n  end: 0\ninvert_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderPresentMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"123\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"nonmatch-header\", \"1234\"},\n                                              {\"other-nonmatch-header\", \"123.456\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\npresent_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderPresentInverseMatch) {\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"123\"}};\n  TestRequestHeaderMapImpl matching_headers{{\"nonmatch-header\", \"1234\"},\n                                            {\"other-nonmatch-header\", \"123.456\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\npresent_match: true\ninvert_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderPrefixMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"value123\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"123value\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nprefix_match: value\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderPrefixInverseMatch) {\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"value123\"}};\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"123value\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nprefix_match: value\ninvert_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderSuffixMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"123value\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"value123\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nsuffix_match: value\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderSuffixInverseMatch) {\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"123value\"}};\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"value123\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\nsuffix_match: value\ninvert_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderContainsMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"123onevalue456\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"123anothervalue456\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\ncontains_match: onevalue\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n}\n\nTEST(MatchHeadersTest, HeaderContainsInverseMatch) {\n  TestRequestHeaderMapImpl matching_headers{{\"match-header\", \"123onevalue456\"}};\n  TestRequestHeaderMapImpl unmatching_headers{{\"match-header\", \"123anothervalue456\"}};\n\n  const std::string yaml = R\"EOF(\nname: match-header\ncontains_match: onevalue\ninvert_match: true\n  )EOF\";\n\n  std::vector<HeaderUtility::HeaderDataPtr> header_data;\n  header_data.push_back(\n      std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yaml)));\n  EXPECT_TRUE(HeaderUtility::matchHeaders(unmatching_headers, header_data));\n  EXPECT_FALSE(HeaderUtility::matchHeaders(matching_headers, header_data));\n}\n\nTEST(HeaderIsValidTest, InvalidHeaderValuesAreRejected) {\n  // ASCII values 1-31 are control characters (with the exception of ASCII\n  // values 9, 10, and 13 which are a horizontal tab, line feed, and carriage\n  // return, respectively), and are not valid in an HTTP header, per\n  // RFC 7230, section 3.2\n  for (int i = 0; i < 32; i++) {\n    if (i == 9) {\n      continue;\n    }\n\n    EXPECT_FALSE(HeaderUtility::headerValueIsValid(std::string(1, i)));\n  }\n}\n\nTEST(HeaderIsValidTest, ValidHeaderValuesAreAccepted) {\n  EXPECT_TRUE(HeaderUtility::headerValueIsValid(\"some-value\"));\n  EXPECT_TRUE(HeaderUtility::headerValueIsValid(\"Some Other Value\"));\n}\n\nTEST(HeaderIsValidTest, AuthorityIsValid) {\n  EXPECT_TRUE(HeaderUtility::authorityIsValid(\"strangebutlegal$-%&'\"));\n  EXPECT_FALSE(HeaderUtility::authorityIsValid(\"illegal{}\"));\n}\n\nTEST(HeaderIsValidTest, IsConnect) {\n  EXPECT_TRUE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{{\":method\", \"CONNECT\"}}));\n  EXPECT_FALSE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"}}));\n  EXPECT_FALSE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{}));\n}\n\nTEST(HeaderIsValidTest, IsConnectResponse) {\n  RequestHeaderMapPtr connect_request{new TestRequestHeaderMapImpl{{\":method\", \"CONNECT\"}}};\n  RequestHeaderMapPtr get_request{new TestRequestHeaderMapImpl{{\":method\", \"GET\"}}};\n  TestResponseHeaderMapImpl success_response{{\":status\", \"200\"}};\n  TestResponseHeaderMapImpl failure_response{{\":status\", \"500\"}};\n\n  EXPECT_TRUE(HeaderUtility::isConnectResponse(connect_request.get(), success_response));\n  EXPECT_FALSE(HeaderUtility::isConnectResponse(connect_request.get(), failure_response));\n  EXPECT_FALSE(HeaderUtility::isConnectResponse(nullptr, success_response));\n  EXPECT_FALSE(HeaderUtility::isConnectResponse(get_request.get(), success_response));\n}\n\nTEST(HeaderAddTest, HeaderAdd) {\n  TestRequestHeaderMapImpl headers{{\"myheader1\", \"123value\"}};\n  TestRequestHeaderMapImpl headers_to_add{{\"myheader2\", \"456value\"}};\n\n  HeaderUtility::addHeaders(headers, headers_to_add);\n\n  headers_to_add.iterate([&headers](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n    Http::LowerCaseString lower_key{std::string(entry.key().getStringView())};\n    EXPECT_EQ(entry.value().getStringView(), headers.get(lower_key)->value().getStringView());\n    return Http::HeaderMap::Iterate::Continue;\n  });\n}\n\nTEST(HeaderIsValidTest, HeaderNameContainsUnderscore) {\n  EXPECT_FALSE(HeaderUtility::headerNameContainsUnderscore(\"cookie\"));\n  EXPECT_FALSE(HeaderUtility::headerNameContainsUnderscore(\"x-something\"));\n  EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore(\"_cookie\"));\n  EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore(\"cookie_\"));\n  EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore(\"x_something\"));\n}\n\nTEST(PercentEncoding, ShouldCloseConnection) {\n  EXPECT_TRUE(HeaderUtility::shouldCloseConnection(Protocol::Http10,\n                                                   TestRequestHeaderMapImpl{{\"foo\", \"bar\"}}));\n  EXPECT_FALSE(HeaderUtility::shouldCloseConnection(\n      Protocol::Http10, TestRequestHeaderMapImpl{{\"connection\", \"keep-alive\"}}));\n  EXPECT_FALSE(HeaderUtility::shouldCloseConnection(\n      Protocol::Http10, TestRequestHeaderMapImpl{{\"connection\", \"foo, keep-alive\"}}));\n\n  EXPECT_FALSE(HeaderUtility::shouldCloseConnection(Protocol::Http11,\n                                                    TestRequestHeaderMapImpl{{\"foo\", \"bar\"}}));\n  EXPECT_TRUE(HeaderUtility::shouldCloseConnection(\n      Protocol::Http11, TestRequestHeaderMapImpl{{\"connection\", \"close\"}}));\n  EXPECT_TRUE(HeaderUtility::shouldCloseConnection(\n      Protocol::Http11, TestRequestHeaderMapImpl{{\"connection\", \"te,close\"}}));\n  EXPECT_TRUE(HeaderUtility::shouldCloseConnection(\n      Protocol::Http11, TestRequestHeaderMapImpl{{\"proxy-connection\", \"close\"}}));\n  EXPECT_TRUE(HeaderUtility::shouldCloseConnection(\n      Protocol::Http11, TestRequestHeaderMapImpl{{\"proxy-connection\", \"foo,close\"}}));\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http1/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"header_formatter_test\",\n    srcs = [\"header_formatter_test.cc\"],\n    deps = [\n        \"//source/common/http/http1:header_formatter_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"codec_impl_test\",\n    srcs = [\"codec_impl_test.cc\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:exception_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http/http1:codec_legacy_lib\",\n        \"//source/common/http/http1:codec_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"conn_pool_test\",\n    srcs = [\"conn_pool_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/http/http1:conn_pool_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/common/http:common_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:transport_socket_match_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/http/http1/codec_impl_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/codec.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http1/codec_impl_legacy.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::StrictMock;\n\nnamespace Envoy {\nnamespace Http {\nnamespace {\nstd::string createHeaderFragment(int num_headers) {\n  // Create a header field with num_headers headers.\n  std::string headers;\n  for (int i = 0; i < num_headers; i++) {\n    headers += \"header\" + std::to_string(i) + \": \" + \"\\r\\n\";\n  }\n  return headers;\n}\n\nBuffer::OwnedImpl createBufferWithNByteSlices(absl::string_view input, size_t max_slice_size) {\n  Buffer::OwnedImpl buffer;\n  for (size_t offset = 0; offset < input.size(); offset += max_slice_size) {\n    buffer.appendSliceForTest(input.substr(offset, max_slice_size));\n  }\n  // Verify that the buffer contains the right number of slices.\n  ASSERT(buffer.getRawSlices().size() == (input.size() + max_slice_size - 1) / max_slice_size);\n  return buffer;\n}\n} // namespace\n\nclass Http1CodecTestBase {\nprotected:\n  Http::Http1::CodecStats& http1CodecStats() {\n    return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, store_);\n  }\n\n  Stats::TestUtil::TestStore store_;\n  Http::Http1::CodecStats::AtomicPtr http1_codec_stats_;\n};\n\nclass Http1ServerConnectionImplTest : public Http1CodecTestBase,\n                                      public testing::TestWithParam<bool> {\npublic:\n  bool testingNewCodec() { return GetParam(); }\n\n  void initialize() {\n    if (testingNewCodec()) {\n      codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, headers_with_underscores_action_);\n    } else {\n      codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, headers_with_underscores_action_);\n    }\n  }\n\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Http::MockServerConnectionCallbacks> callbacks_;\n  NiceMock<Http1Settings> codec_settings_;\n  Http::ServerConnectionPtr codec_;\n\n  void expectHeadersTest(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer,\n                         TestRequestHeaderMapImpl& expected_headers);\n  void expect400(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer,\n                 absl::string_view details = \"\");\n  void testRequestHeadersExceedLimit(std::string header_string, absl::string_view details = \"\");\n  void testTrailersExceedLimit(std::string trailer_string, bool enable_trailers);\n  void testRequestHeadersAccepted(std::string header_string);\n  // Used to test if trailers are decoded/encoded\n  void expectTrailersTest(bool enable_trailers);\n\n  void testServerAllowChunkedContentLength(uint32_t content_length, bool allow_chunked_length);\n\n  // Send the request, and validate the received request headers.\n  // Then send a response just to clean up.\n  void\n  sendAndValidateRequestAndSendResponse(absl::string_view raw_request,\n                                        const TestRequestHeaderMapImpl& expected_request_headers) {\n    Buffer::OwnedImpl buffer(raw_request);\n    sendAndValidateRequestAndSendResponse(buffer, expected_request_headers);\n  }\n\n  void\n  sendAndValidateRequestAndSendResponse(Buffer::Instance& buffer,\n                                        const TestRequestHeaderMapImpl& expected_request_headers) {\n    NiceMock<MockRequestDecoder> decoder;\n    Http::ResponseEncoder* response_encoder = nullptr;\n    EXPECT_CALL(callbacks_, newStream(_, _))\n        .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n          response_encoder = &encoder;\n          return decoder;\n        }));\n    EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_request_headers), true));\n    auto status = codec_->dispatch(buffer);\n    EXPECT_TRUE(status.ok());\n    EXPECT_EQ(0U, buffer.length());\n    response_encoder->encodeHeaders(TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  }\n\nprotected:\n  uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB};\n  uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT};\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW};\n};\n\nvoid Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_url,\n                                              Buffer::OwnedImpl& buffer,\n                                              absl::string_view details) {\n  InSequence sequence;\n\n  if (allow_absolute_url) {\n    codec_settings_.allow_absolute_url_ = allow_absolute_url;\n    if (testingNewCodec()) {\n      codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n    } else {\n      codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n    }\n  }\n\n  MockRequestDecoder decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, \"Bad Request\", _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(p, codec_->protocol());\n  if (!details.empty()) {\n    EXPECT_EQ(details, response_encoder->getStream().responseDetails());\n  }\n}\n\nvoid Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_absolute_url,\n                                                      Buffer::OwnedImpl& buffer,\n                                                      TestRequestHeaderMapImpl& expected_headers) {\n  InSequence sequence;\n\n  // Make a new 'codec' with the right settings\n  if (allow_absolute_url) {\n    codec_settings_.allow_absolute_url_ = allow_absolute_url;\n    if (testingNewCodec()) {\n      codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n    } else {\n      codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n    }\n  }\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n  EXPECT_EQ(p, codec_->protocol());\n}\n\nvoid Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) {\n  initialize();\n\n  // Make a new 'codec' with the right settings\n  if (enable_trailers) {\n    codec_settings_.enable_trailers_ = enable_trailers;\n    if (testingNewCodec()) {\n      codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n    } else {\n      codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n          max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n    }\n  }\n\n  InSequence sequence;\n  StrictMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder&, bool) -> RequestDecoder& { return decoder; }));\n\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n\n  Buffer::OwnedImpl expected_data(\"Hello World\");\n  if (enable_trailers) {\n    // Verify that body data is delivered before trailers.\n    EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n    EXPECT_CALL(decoder, decodeTrailers_);\n  } else {\n    EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n    EXPECT_CALL(decoder, decodeData(_, true));\n  }\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n\"\n                           \"6\\r\\nHello \\r\\n\"\n                           \"5\\r\\nWorld\\r\\n\"\n                           \"0\\r\\nhello: world\\r\\nsecond: header\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\nvoid Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_string,\n                                                            bool enable_trailers) {\n  initialize();\n  // Make a new 'codec' with the right settings\n  codec_settings_.enable_trailers_ = enable_trailers;\n  if (testingNewCodec()) {\n    codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  } else {\n    codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  }\n  std::string exception_reason;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder&, bool) -> RequestDecoder& { return decoder; }));\n\n  if (enable_trailers) {\n    EXPECT_CALL(decoder, decodeHeaders_(_, false));\n    EXPECT_CALL(decoder, decodeData(_, false));\n  } else {\n    EXPECT_CALL(decoder, decodeData(_, false));\n    EXPECT_CALL(decoder, decodeData(_, true));\n  }\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\n\"\n                           \"Host: host\\r\\n\"\n                           \"Transfer-Encoding: chunked\\r\\n\\r\\n\"\n                           \"4\\r\\n\"\n                           \"body\\r\\n0\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  buffer = Buffer::OwnedImpl(trailer_string);\n  if (enable_trailers) {\n    EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n    status = codec_->dispatch(buffer);\n    EXPECT_TRUE(isCodecProtocolError(status));\n    EXPECT_EQ(status.message(), \"trailers size exceeds limit\");\n  } else {\n    // If trailers are not enabled, we expect Envoy to simply skip over the large\n    // trailers as if nothing has happened!\n    status = codec_->dispatch(buffer);\n    EXPECT_TRUE(status.ok());\n  }\n}\nvoid Http1ServerConnectionImplTest::testRequestHeadersExceedLimit(std::string header_string,\n                                                                  absl::string_view details) {\n  initialize();\n\n  std::string exception_reason;\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  buffer = Buffer::OwnedImpl(header_string + \"\\r\\n\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"headers size exceeds limit\");\n  if (!details.empty()) {\n    EXPECT_EQ(details, response_encoder->getStream().responseDetails());\n  }\n}\n\nvoid Http1ServerConnectionImplTest::testRequestHeadersAccepted(std::string header_string) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  buffer = Buffer::OwnedImpl(header_string + \"\\r\\n\");\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n}\n\nvoid Http1ServerConnectionImplTest::testServerAllowChunkedContentLength(uint32_t content_length,\n                                                                        bool allow_chunked_length) {\n  codec_settings_.allow_chunked_length_ = allow_chunked_length;\n  if (testingNewCodec()) {\n    codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  } else {\n    codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  }\n\n  MockRequestDecoder decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":path\", \"/\"},\n      {\":method\", \"POST\"},\n      {\"transfer-encoding\", \"chunked\"},\n  };\n  Buffer::OwnedImpl expected_data(\"Hello World\");\n\n  if (allow_chunked_length) {\n    EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n    EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n    EXPECT_CALL(decoder, decodeData(_, true));\n  } else {\n    EXPECT_CALL(decoder, decodeHeaders_(_, _)).Times(0);\n    EXPECT_CALL(decoder, decodeData(_, _)).Times(0);\n    EXPECT_CALL(decoder, sendLocalReply(false, Http::Code::BadRequest, \"Bad Request\", _, _, _));\n  }\n\n  Buffer::OwnedImpl buffer(\n      fmt::format(\"POST / HTTP/1.1\\r\\ntransfer-encoding: chunked\\r\\ncontent-length: {}\\r\\n\\r\\n\"\n                  \"6\\r\\nHello \\r\\n\"\n                  \"5\\r\\nWorld\\r\\n\"\n                  \"0\\r\\n\\r\\n\",\n                  content_length));\n\n  auto status = codec_->dispatch(buffer);\n\n  if (allow_chunked_length) {\n    EXPECT_TRUE(status.ok());\n  } else {\n    EXPECT_TRUE(isCodecProtocolError(status));\n    EXPECT_EQ(status.message(),\n              \"http/1.1 protocol error: both 'Content-Length' and 'Transfer-Encoding' are set.\");\n    EXPECT_EQ(\"http1.content_length_and_chunked_not_allowed\",\n              response_encoder->getStream().responseDetails());\n  }\n}\n\nINSTANTIATE_TEST_SUITE_P(Codecs, Http1ServerConnectionImplTest, testing::Bool(),\n                         [](const testing::TestParamInfo<bool>& param) {\n                           return param.param ? \"New\" : \"Legacy\";\n                         });\n\nTEST_P(Http1ServerConnectionImplTest, EmptyHeader) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\"Test\", \"\"},\n      {\"Hello\", \"World\"},\n      {\":path\", \"/\"},\n      {\":method\", \"GET\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\nTest:\\r\\nHello: World\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\n// We support the identity encoding, but because it does not end in chunked encoding we reject it\n// per RFC 7230 Section 3.3.3\nTEST_P(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\ntransfer-encoding: identity\\r\\n\\r\\n\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: unsupported transfer encoding\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, UnsupportedEncoding) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\ntransfer-encoding: gzip\\r\\n\\r\\n\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: unsupported transfer encoding\");\n}\n\n// Verify that data in the two body chunks is merged before the call to decodeData.\nTEST_P(Http1ServerConnectionImplTest, ChunkedBody) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":path\", \"/\"},\n      {\":method\", \"POST\"},\n      {\"transfer-encoding\", \"chunked\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  Buffer::OwnedImpl expected_data(\"Hello World\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  // Call to decodeData(\"\", true) happens after.\n  Buffer::OwnedImpl empty(\"\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&empty), true));\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n\"\n                           \"6\\r\\nHello \\r\\n\"\n                           \"5\\r\\nWorld\\r\\n\"\n                           \"0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\n// Verify dispatch behavior when dispatching an incomplete chunk, and resumption of the parse via a\n// second dispatch.\nTEST_P(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":path\", \"/\"},\n      {\":method\", \"POST\"},\n      {\"transfer-encoding\", \"chunked\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  Buffer::OwnedImpl expected_data1(\"Hello Worl\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false));\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n\"\n                           \"6\\r\\nHello \\r\\n\"\n                           \"5\\r\\nWorl\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  // Process the rest of the body and final chunk.\n  Buffer::OwnedImpl expected_data2(\"d\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false));\n  EXPECT_CALL(decoder, decodeData(_, true));\n\n  Buffer::OwnedImpl buffer2(\"d\\r\\n\"\n                            \"0\\r\\n\\r\\n\");\n  status = codec_->dispatch(buffer2);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer2.length());\n}\n\n// Verify that headers and chunked body are processed correctly and data is merged before the\n// decodeData call even if delivered in a buffer that holds 1 byte per slice.\nTEST_P(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":path\", \"/\"},\n      {\":method\", \"POST\"},\n      {\"transfer-encoding\", \"chunked\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  Buffer::OwnedImpl expected_data(\"Hello World\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  EXPECT_CALL(decoder, decodeData(_, true));\n\n  Buffer::OwnedImpl buffer =\n      createBufferWithNByteSlices(\"POST / HTTP/1.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n\"\n                                  \"6\\r\\nHello \\r\\n\"\n                                  \"5\\r\\nWorld\\r\\n\"\n                                  \"0\\r\\n\\r\\n\",\n                                  1);\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\nTEST_P(Http1ServerConnectionImplTest, ChunkedBodyCase) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":path\", \"/\"},\n      {\":method\", \"POST\"},\n      {\"transfer-encoding\", \"Chunked\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  Buffer::OwnedImpl expected_data(\"Hello World\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  EXPECT_CALL(decoder, decodeData(_, true));\n\n  Buffer::OwnedImpl buffer(\n      \"POST / HTTP/1.1\\r\\ntransfer-encoding: Chunked\\r\\n\\r\\nb\\r\\nHello World\\r\\n0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\n// Verify that body dispatch does not happen after detecting a parse error processing a chunk\n// header.\nTEST_P(Http1ServerConnectionImplTest, InvalidChunkHeader) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":path\", \"/\"},\n      {\":method\", \"POST\"},\n      {\"transfer-encoding\", \"chunked\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  EXPECT_CALL(decoder, decodeData(_, _)).Times(0);\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n\"\n                           \"6\\r\\nHello \\r\\n\"\n                           \"invalid\\r\\nWorl\");\n\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, IdentityAndChunkedBody) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\ntransfer-encoding: \"\n                           \"identity,chunked\\r\\n\\r\\nb\\r\\nHello World\\r\\n0\\r\\n\\r\\n\");\n\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: unsupported transfer encoding\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, HostWithLWS) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"host\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}};\n\n  // Regression test spaces before and after the host header value.\n  sendAndValidateRequestAndSendResponse(\"GET / HTTP/1.1\\r\\nHost: host \\r\\n\\r\\n\", expected_headers);\n\n  // Regression test tabs before and after the host header value.\n  sendAndValidateRequestAndSendResponse(\"GET / HTTP/1.1\\r\\nHost:\thost\t\\r\\n\\r\\n\",\n                                        expected_headers);\n\n  // Regression test mixed spaces and tabs before and after the host header value.\n  sendAndValidateRequestAndSendResponse(\n      \"GET / HTTP/1.1\\r\\nHost: \t \t  host\t\t  \t \\r\\n\\r\\n\", expected_headers);\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/10270. Linear whitespace at the\n// beginning and end of a header value should be stripped. Whitespace in the middle should be\n// preserved.\nTEST_P(Http1ServerConnectionImplTest, InnerLWSIsPreserved) {\n  initialize();\n\n  // Header with many spaces surrounded by non-whitespace characters to ensure that dispatching is\n  // split across multiple dispatch calls. The threshold used here comes from Envoy preferring 16KB\n  // reads, but the important part is that the header value is split such that the pieces have\n  // leading and trailing whitespace characters.\n  const std::string header_value_with_inner_lws = \"v\" + std::string(32 * 1024, ' ') + \"v\";\n  TestRequestHeaderMapImpl expected_headers{{\":authority\", \"host\"},\n                                            {\":path\", \"/\"},\n                                            {\":method\", \"GET\"},\n                                            {\"header_field\", header_value_with_inner_lws}};\n\n  {\n    // Regression test spaces in the middle are preserved\n    Buffer::OwnedImpl header_buffer = createBufferWithNByteSlices(\n        \"GET / HTTP/1.1\\r\\nHost: host\\r\\nheader_field: \" + header_value_with_inner_lws + \"\\r\\n\\r\\n\",\n        16 * 1024);\n    EXPECT_EQ(3, header_buffer.getRawSlices().size());\n    sendAndValidateRequestAndSendResponse(header_buffer, expected_headers);\n  }\n\n  {\n    // Regression test spaces before and after are removed\n    Buffer::OwnedImpl header_buffer = createBufferWithNByteSlices(\n        \"GET / HTTP/1.1\\r\\nHost: host\\r\\nheader_field:  \" + header_value_with_inner_lws +\n            \"  \\r\\n\\r\\n\",\n        16 * 1024);\n    EXPECT_EQ(3, header_buffer.getRawSlices().size());\n    sendAndValidateRequestAndSendResponse(header_buffer, expected_headers);\n  }\n}\n\nTEST_P(Http1ServerConnectionImplTest, CodecHasCorrectStreamErrorIfTrue) {\n  codec_settings_.stream_error_on_invalid_http_message_ = true;\n  if (GetParam()) {\n    codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  } else {\n    codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  }\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\");\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(response_encoder->streamErrorOnInvalidHttpMessage());\n}\n\nTEST_P(Http1ServerConnectionImplTest, CodecHasCorrectStreamErrorIfFalse) {\n  codec_settings_.stream_error_on_invalid_http_message_ = false;\n  if (GetParam()) {\n    codec_ = std::make_unique<Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  } else {\n    codec_ = std::make_unique<Legacy::Http1::ServerConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_,\n        max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  }\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\");\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  auto status = codec_->dispatch(buffer);\n  EXPECT_FALSE(response_encoder->streamErrorOnInvalidHttpMessage());\n}\n\nTEST_P(Http1ServerConnectionImplTest, CodecHasDefaultStreamErrorIfNotSet) {\n  initialize();\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\");\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  auto status = codec_->dispatch(buffer);\n  EXPECT_FALSE(response_encoder->streamErrorOnInvalidHttpMessage());\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http10) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{{\":path\", \"/\"}, {\":method\", \"GET\"}};\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n  EXPECT_EQ(Protocol::Http10, codec_->protocol());\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{{\":path\", \"/\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.0\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http10, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http10Absolute) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"www.somewhere.com\"}, {\":path\", \"/foobar\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\"GET http://www.somewhere.com/foobar HTTP/1.0\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http10, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http10MultipleResponses) {\n  initialize();\n\n  MockRequestDecoder decoder;\n  // Send a full HTTP/1.0 request and proxy a response.\n  {\n    Buffer::OwnedImpl buffer(\n        \"GET /foobar HTTP/1.0\\r\\nHost: www.somewhere.com\\r\\nconnection: keep-alive\\r\\n\\r\\n\");\n    Http::ResponseEncoder* response_encoder = nullptr;\n    EXPECT_CALL(callbacks_, newStream(_, _))\n        .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n          response_encoder = &encoder;\n          return decoder;\n        }));\n\n    EXPECT_CALL(decoder, decodeHeaders_(_, true));\n    auto status = codec_->dispatch(buffer);\n    EXPECT_TRUE(status.ok());\n\n    std::string output;\n    ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n    TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n    response_encoder->encodeHeaders(headers, true);\n    EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n    EXPECT_EQ(Protocol::Http10, codec_->protocol());\n  }\n\n  // Now send an HTTP/1.1 request and make sure the protocol is tracked correctly.\n  {\n    TestRequestHeaderMapImpl expected_headers{\n        {\":authority\", \"www.somewhere.com\"}, {\":path\", \"/foobar\"}, {\":method\", \"GET\"}};\n    Buffer::OwnedImpl buffer(\"GET /foobar HTTP/1.1\\r\\nHost: www.somewhere.com\\r\\n\\r\\n\");\n\n    Http::ResponseEncoder* response_encoder = nullptr;\n    EXPECT_CALL(callbacks_, newStream(_, _))\n        .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n          response_encoder = &encoder;\n          return decoder;\n        }));\n    EXPECT_CALL(decoder, decodeHeaders_(_, true));\n    auto status = codec_->dispatch(buffer);\n    EXPECT_TRUE(status.ok());\n    EXPECT_EQ(Protocol::Http11, codec_->protocol());\n  }\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath1) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"www.somewhere.com\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\"GET http://www.somewhere.com/ HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath2) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"www.somewhere.com\"}, {\":path\", \"/foo/bar\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\"GET http://www.somewhere.com/foo/bar HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"www.somewhere.com:4532\"}, {\":path\", \"/foo/bar\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\n      \"GET http://www.somewhere.com:4532/foo/bar HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"bah\"}, {\":path\", \"/foo/bar\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\"GET /foo/bar HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11InvalidRequest) {\n  initialize();\n\n  // Invalid because www.somewhere.com is not an absolute path nor an absolute url\n  Buffer::OwnedImpl buffer(\"GET www.somewhere.com HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expect400(Protocol::Http11, true, buffer, \"http1.codec_error\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) {\n  initialize();\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder&, bool) -> RequestDecoder& { return decoder; }));\n\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n  // Verify that body is delivered as soon as the final chunk marker is found, even if an error is\n  // found while processing trailers.\n  Buffer::OwnedImpl expected_data(\"body\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\n\"\n                           \"Host: host\\r\\n\"\n                           \"Transfer-Encoding: chunked\\r\\n\\r\\n\"\n                           \"4\\r\\n\"\n                           \"body\\r\\n0\\r\\n\"\n                           \"badtrailer\\r\\n\\r\\n\");\n\n  EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, \"Bad Request\", _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"www.somewhere.com\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\"GET http://www.somewhere.com HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathBad) {\n  initialize();\n\n  Buffer::OwnedImpl buffer(\"GET * HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expect400(Protocol::Http11, true, buffer, \"http1.invalid_url\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) {\n  initialize();\n\n  Buffer::OwnedImpl buffer(\"GET http://foobar.com:1000000 HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expect400(Protocol::Http11, true, buffer);\n}\n\nTEST_P(Http1ServerConnectionImplTest, SketchyConnectionHeader) {\n  initialize();\n\n  Buffer::OwnedImpl buffer(\n      \"GET / HTTP/1.1\\r\\nHost: bah\\r\\nConnection: a,b,c,d,e,f,g,h,i,j,k,l,m\\r\\n\\r\\n\");\n  expect400(Protocol::Http11, true, buffer, \"http1.connection_header_rejected\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11RelativeOnly) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"bah\"}, {\":path\", \"http://www.somewhere.com/\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\"GET http://www.somewhere.com/ HTTP/1.1\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, false, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, Http11Options) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"www.somewhere.com\"}, {\":path\", \"*\"}, {\":method\", \"OPTIONS\"}};\n  Buffer::OwnedImpl buffer(\"OPTIONS * HTTP/1.1\\r\\nHost: www.somewhere.com\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, SimpleGet) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{{\":path\", \"/\"}, {\":method\", \"GET\"}};\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\nTEST_P(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.early_errors_via_hcm\", \"false\"}});\n  initialize();\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).Times(0);\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)).Times(0);\n\n  Buffer::OwnedImpl buffer(\"bad\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n}\n\n// Test that if the stream is not created at the time an error is detected, it\n// is created as part of sending the protocol error.\nTEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) {\n  initialize();\n\n  MockRequestDecoder decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n  // Check that before any headers are parsed, requests do not look like HEAD or gRPC requests.\n  EXPECT_CALL(decoder, sendLocalReply(false, _, _, _, _, _));\n\n  Buffer::OwnedImpl buffer(\"bad\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n}\n\n// This behavior was observed during CVE-2019-18801 and helped to limit the\n// scope of affected Envoy configurations.\nTEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) {\n  initialize();\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl buffer(\"BAD / HTTP/1.1\\r\\nHost: foo\\r\\n\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n}\n\nTEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) {\n  initialize();\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl buffer(\"G\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n\n  Buffer::OwnedImpl buffer2(\"g\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n}\n\nTEST_P(Http1ServerConnectionImplTest, FloodProtection) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Buffer::OwnedImpl local_buffer;\n  // Read a request and send a response, without draining the response from the\n  // connection buffer. The first two should not cause problems.\n  for (int i = 0; i < 2; ++i) {\n    Http::ResponseEncoder* response_encoder = nullptr;\n    EXPECT_CALL(callbacks_, newStream(_, _))\n        .WillOnce(Invoke([&](Http::ResponseEncoder& encoder, bool) -> Http::RequestDecoder& {\n          response_encoder = &encoder;\n          return decoder;\n        }));\n\n    Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(buffer);\n    EXPECT_TRUE(status.ok());\n    EXPECT_EQ(0U, buffer.length());\n\n    // In most tests the write output is serialized to a buffer here it is\n    // ignored to build up queued \"end connection\" sentinels.\n    EXPECT_CALL(connection_, write(_, _))\n\n        .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> void {\n          // Move the response out of data while preserving the buffer fragment sentinels.\n          local_buffer.move(data);\n        }));\n\n    TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n    response_encoder->encodeHeaders(headers, true);\n  }\n\n  // Trying to accept a third request with two buffered responses in the queue should trigger flood\n  // protection.\n  {\n    Http::ResponseEncoder* response_encoder = nullptr;\n    EXPECT_CALL(callbacks_, newStream(_, _))\n        .WillOnce(Invoke([&](Http::ResponseEncoder& encoder, bool) -> Http::RequestDecoder& {\n          response_encoder = &encoder;\n          return decoder;\n        }));\n\n    Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(buffer);\n    EXPECT_TRUE(isBufferFloodError(status));\n    EXPECT_EQ(status.message(), \"Too many responses queued.\");\n    EXPECT_EQ(1, store_.counter(\"http1.response_flood\").value());\n  }\n}\n\nTEST_P(Http1ServerConnectionImplTest, FloodProtectionOff) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.http1_flood_protection\", \"false\"}});\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Buffer::OwnedImpl local_buffer;\n  // With flood protection off, many responses can be queued up.\n  for (int i = 0; i < 4; ++i) {\n    Http::ResponseEncoder* response_encoder = nullptr;\n    EXPECT_CALL(callbacks_, newStream(_, _))\n        .WillOnce(Invoke([&](Http::ResponseEncoder& encoder, bool) -> Http::RequestDecoder& {\n          response_encoder = &encoder;\n          return decoder;\n        }));\n\n    Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(buffer);\n    EXPECT_TRUE(status.ok());\n    EXPECT_EQ(0U, buffer.length());\n\n    // In most tests the write output is serialized to a buffer here it is\n    // ignored to build up queued \"end connection\" sentinels.\n    EXPECT_CALL(connection_, write(_, _))\n\n        .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> void {\n          // Move the response out of data while preserving the buffer fragment sentinels.\n          local_buffer.move(data);\n        }));\n\n    TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n    response_encoder->encodeHeaders(headers, true);\n  }\n}\n\nTEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"hello\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}};\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\nHOST: hello\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\n// Ensures that requests with invalid HTTP header values are properly rejected\n// when the runtime guard is enabled for the feature.\nTEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) {\n  TestScopedRuntime scoped_runtime;\n  // When the runtime-guarded feature is enabled, invalid header values\n  // should result in a rejection.\n\n  initialize();\n\n  MockRequestDecoder decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n  Buffer::OwnedImpl buffer(\n      absl::StrCat(\"GET / HTTP/1.1\\r\\nHOST: h.com\\r\\nfoo: \", std::string(1, 3), \"\\r\\n\"));\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: header value contains invalid chars\");\n  EXPECT_EQ(\"http1.invalid_characters\", response_encoder->getStream().responseDetails());\n}\n\n// Ensures that request headers with names containing the underscore character are allowed\n// when the option is set to allow.\nTEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) {\n  headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW;\n  initialize();\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"h.com\"},\n      {\":path\", \"/\"},\n      {\":method\", \"GET\"},\n      {\"foo_bar\", \"bar\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n\n  Buffer::OwnedImpl buffer(absl::StrCat(\"GET / HTTP/1.1\\r\\nHOST: h.com\\r\\nfoo_bar: bar\\r\\n\\r\\n\"));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n  EXPECT_EQ(0, store_.counter(\"http1.dropped_headers_with_underscores\").value());\n}\n\n// Ensures that request headers with names containing the underscore character are dropped\n// when the option is set to drop headers.\nTEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) {\n  headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER;\n  initialize();\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"h.com\"},\n      {\":path\", \"/\"},\n      {\":method\", \"GET\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n\n  Buffer::OwnedImpl buffer(absl::StrCat(\"GET / HTTP/1.1\\r\\nHOST: h.com\\r\\nfoo_bar: bar\\r\\n\\r\\n\"));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n  EXPECT_EQ(1, store_.counter(\"http1.dropped_headers_with_underscores\").value());\n}\n\n// Ensures that request with header names containing the underscore character are rejected\n// when the option is set to reject request.\nTEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) {\n  headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST;\n  initialize();\n\n  MockRequestDecoder decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(absl::StrCat(\"GET / HTTP/1.1\\r\\nHOST: h.com\\r\\nfoo_bar: bar\\r\\n\\r\\n\"));\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: header name contains underscores\");\n  EXPECT_EQ(\"http1.unexpected_underscore\", response_encoder->getStream().responseDetails());\n  EXPECT_EQ(1, store_.counter(\"http1.requests_rejected_with_underscores_in_headers\").value());\n}\n\nTEST_P(Http1ServerConnectionImplTest, HeaderInvalidAuthority) {\n  TestScopedRuntime scoped_runtime;\n\n  initialize();\n\n  MockRequestDecoder decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n  Buffer::OwnedImpl buffer(absl::StrCat(\"GET / HTTP/1.1\\r\\nHOST: h.\\\"com\\r\\n\\r\\n\"));\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(),\n            \"http/1.1 protocol error: request headers failed spec compliance checks\");\n  EXPECT_EQ(\"http.invalid_authority\", response_encoder->getStream().responseDetails());\n}\n\n// Mutate an HTTP GET with embedded NULs, this should always be rejected in some\n// way (not necessarily with \"head value contains NUL\" though).\nTEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) {\n  const std::string example_input = \"GET / HTTP/1.1\\r\\nHOST: h.com\\r\\nfoo: barbaz\\r\\n\";\n\n  for (size_t n = 1; n < example_input.size(); ++n) {\n    initialize();\n\n    InSequence sequence;\n\n    MockRequestDecoder decoder;\n    EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n    Buffer::OwnedImpl buffer(\n        absl::StrCat(example_input.substr(0, n), std::string(1, '\\0'), example_input.substr(n)));\n    EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n    auto status = codec_->dispatch(buffer);\n    EXPECT_FALSE(status.ok());\n    EXPECT_TRUE(isCodecProtocolError(status));\n    EXPECT_THAT(status.message(), testing::HasSubstr(\"http/1.1 protocol error:\"));\n  }\n}\n\n// Mutate an HTTP GET with CR or LF. These can cause an error status or maybe\n// result in a valid decodeHeaders(). In any case, the validHeaderString()\n// ASSERTs should validate we never have any embedded CR or LF.\nTEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) {\n  const std::string example_input = \"GET / HTTP/1.1\\r\\nHOST: h.com\\r\\nfoo: barbaz\\r\\n\";\n\n  for (const char c : {'\\r', '\\n'}) {\n    for (size_t n = 1; n < example_input.size(); ++n) {\n      initialize();\n\n      InSequence sequence;\n\n      NiceMock<MockRequestDecoder> decoder;\n      EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n      Buffer::OwnedImpl buffer(\n          absl::StrCat(example_input.substr(0, n), std::string(1, c), example_input.substr(n)));\n      // May or may not cause an error status, but should never trip on a debug ASSERT.\n      auto status = codec_->dispatch(buffer);\n    }\n  }\n}\n\nTEST_P(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\"content-length\", \"5\"}, {\":path\", \"/\"}, {\":method\", \"POST\"}};\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false))\n      .WillOnce(Invoke([&](Http::RequestHeaderMapPtr&, bool) -> void {\n        connection_.state_ = Network::Connection::State::Closing;\n      }));\n  EXPECT_CALL(decoder, decodeData(_, _)).Times(0);\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\ncontent-length: 5\\r\\n\\r\\n12345\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_NE(0U, buffer.length());\n}\n\nTEST_P(Http1ServerConnectionImplTest, PostWithContentLength) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\"content-length\", \"5\"}, {\":path\", \"/\"}, {\":method\", \"POST\"}};\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n\n  Buffer::OwnedImpl expected_data1(\"12345\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false));\n\n  Buffer::OwnedImpl expected_data2;\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true));\n\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\ncontent-length: 5\\r\\n\\r\\n12345\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\n// Verify that headers and body with content length are processed correctly and data is merged\n// before the decodeData call even if delivered in a buffer that holds 1 byte per slice.\nTEST_P(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) {\n  initialize();\n\n  InSequence sequence;\n\n  MockRequestDecoder decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\"content-length\", \"5\"}, {\":path\", \"/\"}, {\":method\", \"POST\"}};\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n\n  Buffer::OwnedImpl expected_data1(\"12345\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false));\n\n  Buffer::OwnedImpl expected_data2;\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true));\n\n  Buffer::OwnedImpl buffer =\n      createBufferWithNByteSlices(\"POST / HTTP/1.1\\r\\ncontent-length: 5\\r\\n\\r\\n12345\", 1);\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n}\n\nTEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponse) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n  response_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n}\n\n// As with Http1ClientConnectionImplTest.LargeHeaderRequestEncode but validate\n// the response encoder instead of request encoder.\nTEST_P(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  const std::string long_header_value = std::string(79 * 1024, 'a');\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}, {\"foo\", long_header_value}};\n  response_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\nfoo: \" + long_header_value + \"\\r\\ncontent-length: 0\\r\\n\\r\\n\",\n            output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) {\n  codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase;\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{\n      {\":status\", \"200\"}, {\"some-header\", \"foo\"}, {\"some#header\", \"baz\"}};\n  response_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\nSome-Header: foo\\r\\nSome#Header: baz\\r\\nContent-Length: 0\\r\\n\\r\\n\",\n            output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"204\"}};\n  response_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"HTTP/1.1 204 No Content\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  response_encoder->encode100ContinueHeaders(continue_headers);\n  EXPECT_EQ(\"HTTP/1.1 100 Continue\\r\\n\\r\\n\", output);\n  output.clear();\n\n  // Test the special case where we encode 100 headers (no content length may be\n  // appended) then 200 headers (content length 0 will be appended).\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n  response_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, MetadataTest) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  MetadataMap metadata_map = {{\"key\", \"value\"}};\n  MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n  MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  response_encoder->encodeMetadata(metadata_map_vector);\n  EXPECT_EQ(1, store_.counter(\"http1.metadata_not_supported_error\").value());\n}\n\nTEST_P(Http1ServerConnectionImplTest, ChunkedResponse) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(Invoke([&output](Buffer::Instance& data, bool) {\n    // Verify that individual writes into the codec's output buffer were coalesced into a single\n    // slice\n    ASSERT_EQ(1, data.getRawSlices().size());\n    output.append(data.toString());\n    data.drain(data.length());\n  }));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n  response_encoder->encodeHeaders(headers, false);\n\n  Buffer::OwnedImpl data(\"Hello World\");\n  response_encoder->encodeData(data, true);\n\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\nb\\r\\nHello \"\n            \"World\\r\\n0\\r\\n\\r\\n\",\n            output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) {\n  codec_settings_.enable_trailers_ = true;\n  initialize();\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n  response_encoder->encodeHeaders(headers, false);\n\n  Buffer::OwnedImpl data(\"Hello World\");\n  response_encoder->encodeData(data, false);\n\n  TestResponseTrailerMapImpl trailers{{\"foo\", \"bar\"}, {\"foo\", \"baz\"}};\n  response_encoder->encodeTrailers(trailers);\n\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\nb\\r\\nHello \"\n            \"World\\r\\n0\\r\\nfoo: bar\\r\\nfoo: baz\\r\\n\\r\\n\",\n            output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, ContentLengthResponse) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}, {\"content-length\", \"11\"}};\n  response_encoder->encodeHeaders(headers, false);\n\n  Buffer::OwnedImpl data(\"Hello World\");\n  response_encoder->encodeData(data, true);\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ncontent-length: 11\\r\\n\\r\\nHello World\", output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, HeadRequestResponse) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"HEAD / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}, {\"content-length\", \"5\"}};\n  response_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ncontent-length: 5\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"HEAD / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n  response_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, DoubleRequest) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .Times(2)\n      .WillRepeatedly(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  std::string request(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  Buffer::OwnedImpl buffer(request);\n  buffer.add(request);\n\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(request.size(), buffer.length());\n\n  response_encoder->encodeHeaders(TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  status = codec_->dispatch(buffer);\n  EXPECT_EQ(0U, buffer.length());\n}\n\nTEST_P(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); }\n\nTEST_P(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); }\n\nTEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"www.somewhere.com\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}};\n  Buffer::OwnedImpl buffer(\n      \"GET http://www.somewhere.com/ HTTP/1.1\\r\\nConnection: \"\n      \"Upgrade, HTTP2-Settings\\r\\nUpgrade: h2c\\r\\nHTTP2-Settings: token64\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{{\":authority\", \"www.somewhere.com\"},\n                                            {\":path\", \"/\"},\n                                            {\":method\", \"GET\"},\n                                            {\"connection\", \"Close\"}};\n  Buffer::OwnedImpl buffer(\"GET http://www.somewhere.com/ HTTP/1.1\\r\\nConnection: \"\n                           \"Upgrade, Close, HTTP2-Settings\\r\\nUpgrade: h2c\\r\\nHTTP2-Settings: \"\n                           \"token64\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) {\n  initialize();\n\n  TestRequestHeaderMapImpl expected_headers{{\":authority\", \"www.somewhere.com\"},\n                                            {\":path\", \"/\"},\n                                            {\":method\", \"GET\"},\n                                            {\"connection\", \"Close\"}};\n  Buffer::OwnedImpl buffer(\"GET http://www.somewhere.com/ HTTP/1.1\\r\\nConnection: \"\n                           \"Upgrade, Close, HTTP2-Settings, Etc\\r\\nUpgrade: h2c\\r\\nHTTP2-Settings: \"\n                           \"token64\\r\\nHost: bah\\r\\n\\r\\n\");\n  expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);\n}\n\nTEST_P(Http1ServerConnectionImplTest, UpgradeRequest) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl buffer(\n      \"POST / HTTP/1.1\\r\\nConnection: upgrade\\r\\nUpgrade: foo\\r\\ncontent-length:5\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n\n  Buffer::OwnedImpl expected_data1(\"12345\");\n  Buffer::OwnedImpl body(\"12345\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false));\n  status = codec_->dispatch(body);\n\n  Buffer::OwnedImpl expected_data2(\"abcd\");\n  Buffer::OwnedImpl websocket_payload(\"abcd\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false));\n  status = codec_->dispatch(websocket_payload);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl expected_data(\"12345abcd\");\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\nConnection: upgrade\\r\\nUpgrade: \"\n                           \"foo\\r\\ncontent-length:5\\r\\n\\r\\n12345abcd\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  // Even with T-E chunked, the data should neither be inspected for (the not\n  // present in this unit test) chunks, but simply passed through.\n  Buffer::OwnedImpl expected_data(\"12345abcd\");\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  Buffer::OwnedImpl buffer(\"POST / HTTP/1.1\\r\\nConnection: upgrade\\r\\nUpgrade: \"\n                           \"foo\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n12345abcd\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  // Make sure we avoid the deferred_end_stream_headers_ optimization for\n  // requests-with-no-body.\n  Buffer::OwnedImpl expected_data(\"abcd\");\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  Buffer::OwnedImpl buffer(\n      \"GET / HTTP/1.1\\r\\nConnection: upgrade\\r\\nUpgrade: foo\\r\\ncontent-length: 0\\r\\n\\r\\nabcd\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n}\n\n// Test that 101 upgrade responses do not contain content-length or transfer-encoding headers.\nTEST_P(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\nConnection: upgrade\\r\\nUpgrade: foo\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0U, buffer.length());\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestResponseHeaderMapImpl headers{{\":status\", \"101\"}};\n  response_encoder->encodeHeaders(headers, false);\n  EXPECT_EQ(\"HTTP/1.1 101 Switching Protocols\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  TestRequestHeaderMapImpl expected_headers{\n      {\":authority\", \"host:80\"},\n      {\":method\", \"CONNECT\"},\n  };\n  EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  Buffer::OwnedImpl buffer(\"CONNECT host:80 HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n\n  Buffer::OwnedImpl expected_data(\"abcd\");\n  Buffer::OwnedImpl connect_payload(\"abcd\");\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  status = codec_->dispatch(connect_payload);\n  EXPECT_TRUE(status.ok());\n}\n\n// We use the absolute URL parsing code for CONNECT requests, but it does not\n// actually allow absolute URLs.\nTEST_P(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl buffer(\"CONNECT http://host:80 HTTP/1.1\\r\\n\\r\\n\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n}\n\nTEST_P(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  Buffer::OwnedImpl expected_data(\"abcd\");\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  Buffer::OwnedImpl buffer(\"CONNECT host:80 HTTP/1.1\\r\\n\\r\\nabcd\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 CONNECT with body has no defined\n  // semantics: Envoy will reject chunked CONNECT requests.\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  Buffer::OwnedImpl buffer(\n      \"CONNECT host:80 HTTP/1.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n12345abcd\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: unsupported transfer encoding\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  // Make sure we avoid the deferred_end_stream_headers_ optimization for\n  // requests-with-no-body.\n  Buffer::OwnedImpl buffer(\"CONNECT host:80 HTTP/1.1\\r\\ncontent-length: 1\\r\\n\\r\\nabcd\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"http/1.1 protocol error: unsupported content length\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) {\n  initialize();\n\n  InSequence sequence;\n  NiceMock<MockRequestDecoder> decoder;\n  EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder));\n\n  // Make sure we avoid the deferred_end_stream_headers_ optimization for\n  // requests-with-no-body.\n  Buffer::OwnedImpl expected_data(\"abcd\");\n  EXPECT_CALL(decoder, decodeHeaders_(_, false));\n  EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false));\n  Buffer::OwnedImpl buffer(\"CONNECT host:80 HTTP/1.1\\r\\ncontent-length: 0\\r\\n\\r\\nabcd\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ServerConnectionImplTest, WatermarkTest) {\n  EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10));\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n\n  Http::MockStreamCallbacks stream_callbacks;\n  response_encoder->getStream().addCallbacks(stream_callbacks);\n\n  // Fake a call from the underlying Network::Connection and verify the stream is notified.\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  static_cast<ServerConnection*>(codec_.get())\n      ->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());\n  TestResponseHeaderMapImpl headers{{\":status\", \"200\"}};\n  response_encoder->encodeHeaders(headers, false);\n\n  // Fake out the underlying Network::Connection buffer being drained.\n  EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());\n  static_cast<ServerConnection*>(codec_.get())\n      ->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n}\n\nTEST_P(Http1ServerConnectionImplTest, TestSmugglingDisallowChunkedContentLength0) {\n  testServerAllowChunkedContentLength(0, false);\n}\nTEST_P(Http1ServerConnectionImplTest, TestSmugglingDisallowChunkedContentLength1) {\n  // content-length less than POST body size\n  testServerAllowChunkedContentLength(1, false);\n}\nTEST_P(Http1ServerConnectionImplTest, TestSmugglingDisallowChunkedContentLength100) {\n  // content-length greater than POST body size\n  testServerAllowChunkedContentLength(100, false);\n}\n\nTEST_P(Http1ServerConnectionImplTest, TestSmugglingAllowChunkedContentLength0) {\n  testServerAllowChunkedContentLength(0, true);\n}\nTEST_P(Http1ServerConnectionImplTest, TestSmugglingAllowChunkedContentLength1) {\n  // content-length less than POST body size\n  testServerAllowChunkedContentLength(1, true);\n}\nTEST_P(Http1ServerConnectionImplTest, TestSmugglingAllowChunkedContentLength100) {\n  // content-length greater than POST body size\n  testServerAllowChunkedContentLength(100, true);\n}\n\nclass Http1ClientConnectionImplTest : public Http1CodecTestBase,\n                                      public testing::TestWithParam<bool> {\npublic:\n  bool testingNewCodec() { return GetParam(); }\n\n  void initialize() {\n    if (testingNewCodec()) {\n      codec_ = std::make_unique<Http1::ClientConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_);\n    } else {\n      codec_ = std::make_unique<Legacy::Http1::ClientConnectionImpl>(\n          connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_);\n    }\n  }\n\n  void readDisableOnRequestEncoder(RequestEncoder* request_encoder, bool disable) {\n    if (testingNewCodec()) {\n      dynamic_cast<Http1::RequestEncoderImpl*>(request_encoder)->readDisable(disable);\n    } else {\n      dynamic_cast<Legacy::Http1::RequestEncoderImpl*>(request_encoder)->readDisable(disable);\n    }\n  }\n\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Http::MockConnectionCallbacks> callbacks_;\n  NiceMock<Http1Settings> codec_settings_;\n  Http::ClientConnectionPtr codec_;\n\n  void testClientAllowChunkedContentLength(uint32_t content_length, bool allow_chunked_length);\n\nprotected:\n  Stats::TestUtil::TestStore store_;\n  uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT};\n};\n\nINSTANTIATE_TEST_SUITE_P(Codecs, Http1ClientConnectionImplTest, testing::Bool(),\n                         [](const testing::TestParamInfo<bool>& param) {\n                           return param.param ? \"New\" : \"Legacy\";\n                         });\n\nvoid Http1ClientConnectionImplTest::testClientAllowChunkedContentLength(uint32_t content_length,\n                                                                        bool allow_chunked_length) {\n  codec_settings_.allow_chunked_length_ = allow_chunked_length;\n  if (testingNewCodec()) {\n    codec_ = std::make_unique<Http1::ClientConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_);\n  } else {\n    codec_ = std::make_unique<Legacy::Http1::ClientConnectionImpl>(\n        connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_);\n  }\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  TestResponseHeaderMapImpl expected_headers{{\":status\", \"200\"}, {\"transfer-encoding\", \"chunked\"}};\n  Buffer::OwnedImpl expected_data(\"Hello World\");\n\n  if (allow_chunked_length) {\n    EXPECT_CALL(response_decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n    EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false));\n    EXPECT_CALL(response_decoder, decodeData(_, true));\n  } else {\n    EXPECT_CALL(response_decoder, decodeHeaders_(_, _)).Times(0);\n    EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0);\n  }\n\n  Buffer::OwnedImpl buffer(\n      fmt::format(\"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\ncontent-length: {}\\r\\n\\r\\n\"\n                  \"6\\r\\nHello \\r\\n\"\n                  \"5\\r\\nWorld\\r\\n\"\n                  \"0\\r\\n\\r\\n\",\n                  content_length));\n  auto status = codec_->dispatch(buffer);\n\n  if (allow_chunked_length) {\n    EXPECT_TRUE(status.ok());\n  } else {\n    EXPECT_TRUE(isCodecProtocolError(status));\n    EXPECT_EQ(status.message(),\n              \"http/1.1 protocol error: both 'Content-Length' and 'Transfer-Encoding' are set.\");\n  };\n}\n\nTEST_P(Http1ClientConnectionImplTest, SimpleGet) {\n  initialize();\n\n  MockResponseDecoder response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}};\n  request_encoder.encodeHeaders(headers, true);\n  EXPECT_EQ(\"GET / HTTP/1.1\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) {\n  codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase;\n\n  initialize();\n\n  MockResponseDecoder response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\"my-custom-header\", \"hey\"}};\n  request_encoder.encodeHeaders(headers, true);\n  EXPECT_EQ(\"GET / HTTP/1.1\\r\\nMy-Custom-Header: hey\\r\\nContent-Length: 0\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ClientConnectionImplTest, HostHeaderTranslate) {\n  initialize();\n\n  MockResponseDecoder response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n  EXPECT_EQ(\"GET / HTTP/1.1\\r\\nhost: host\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n}\n\nTEST_P(Http1ClientConnectionImplTest, Reset) {\n  initialize();\n\n  MockResponseDecoder response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n\n  Http::MockStreamCallbacks callbacks;\n  request_encoder.getStream().addCallbacks(callbacks);\n  EXPECT_CALL(callbacks, onResetStream(StreamResetReason::LocalReset, _));\n  request_encoder.getStream().resetStream(StreamResetReason::LocalReset);\n}\n\n// Verify that we correctly enable reads on the connection when the final response is\n// received.\nTEST_P(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) {\n  initialize();\n\n  MockResponseDecoder response_decoder;\n  auto* request_encoder = &codec_->newStream(response_decoder);\n  // Manually read disable.\n  EXPECT_CALL(connection_, readDisable(true)).Times(2);\n  readDisableOnRequestEncoder(request_encoder, true);\n  readDisableOnRequestEncoder(request_encoder, true);\n\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n\n  // Request.\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder->encodeHeaders(headers, true);\n  EXPECT_EQ(\"GET / HTTP/1.1\\r\\nhost: host\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n  output.clear();\n\n  // When the response is sent, the read disable should be unwound.\n  EXPECT_CALL(connection_, readDisable(false)).Times(2);\n\n  // Response.\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n  Buffer::OwnedImpl response(\"HTTP/1.1 503 Service Unavailable\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, PrematureResponse) {\n  initialize();\n\n  Buffer::OwnedImpl response(\"HTTP/1.1 408 Request Timeout\\r\\nConnection: Close\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(isPrematureResponseError(status));\n}\n\nTEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse503) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n  Buffer::OwnedImpl response(\"HTTP/1.1 503 Service Unavailable\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse200) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, HeadRequest) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"HEAD\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\nContent-Length: 20\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, 204Response) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n  Buffer::OwnedImpl response(\"HTTP/1.1 204 OK\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\n// 204 No Content with Content-Length is barred by RFC 7230, Section 3.3.2.\nTEST_P(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) {\n  // By default, content-length is barred.\n  {\n    initialize();\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    Buffer::OwnedImpl response(\"HTTP/1.1 204 OK\\r\\nContent-Length: 20\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_FALSE(status.ok());\n  }\n\n  // Test with feature disabled: content-length allowed.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.strict_1xx_and_204_response_headers\", \"false\"}});\n\n    initialize();\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    Buffer::OwnedImpl response(\"HTTP/1.1 204 OK\\r\\nContent-Length: 20\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_TRUE(status.ok());\n  }\n}\n\n// 204 No Content with Content-Length: 0 is technically barred by RFC 7230, Section 3.3.2, but we\n// allow it.\nTEST_P(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) {\n  {\n    initialize();\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n    Buffer::OwnedImpl response(\"HTTP/1.1 204 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_TRUE(status.ok());\n  }\n\n  // Test with feature disabled: content-length allowed.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.strict_1xx_and_204_response_headers\", \"false\"}});\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n    Buffer::OwnedImpl response(\"HTTP/1.1 204 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_TRUE(status.ok());\n  }\n}\n\n// 204 No Content with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1.\nTEST_P(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) {\n  // By default, transfer-encoding is barred.\n  {\n    initialize();\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    Buffer::OwnedImpl response(\"HTTP/1.1 204 OK\\r\\nTransfer-Encoding: chunked\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_FALSE(status.ok());\n  }\n\n  // Test with feature disabled: transfer-encoding allowed.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.strict_1xx_and_204_response_headers\", \"false\"}});\n\n    initialize();\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    Buffer::OwnedImpl response(\"HTTP/1.1 204 OK\\r\\nTransfer-Encoding: chunked\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_TRUE(status.ok());\n  }\n}\n\n// 100 response followed by 200 results in a [decode100ContinueHeaders, decodeHeaders] sequence.\nTEST_P(Http1ClientConnectionImplTest, ContinueHeaders) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_));\n  EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0);\n  Buffer::OwnedImpl initial_response(\"HTTP/1.1 100 Continue\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(initial_response);\n  EXPECT_TRUE(status.ok());\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0);\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\n\\r\\n\");\n  status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\n// Multiple 100 responses are passed to the response encoder (who is responsible for coalescing).\nTEST_P(Http1ClientConnectionImplTest, MultipleContinueHeaders) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_));\n  EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0);\n  Buffer::OwnedImpl initial_response(\"HTTP/1.1 100 Continue\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(initial_response);\n  EXPECT_TRUE(status.ok());\n\n  EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_));\n  EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0);\n  Buffer::OwnedImpl another_100_response(\"HTTP/1.1 100 Continue\\r\\n\\r\\n\");\n  status = codec_->dispatch(another_100_response);\n  EXPECT_TRUE(status.ok());\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0);\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\n\\r\\n\");\n  status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\n// 101/102 headers etc. are passed to the response encoder (who is responsibly for deciding to\n// upgrade, ignore, etc.).\nTEST_P(Http1ClientConnectionImplTest, 1xxNonContinueHeaders) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl response(\"HTTP/1.1 102 Processing\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\n// 101 Switching Protocol with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1.\nTEST_P(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) {\n  // By default, transfer-encoding is barred.\n  {\n    initialize();\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    Buffer::OwnedImpl response(\n        \"HTTP/1.1 101 Switching Protocols\\r\\nTransfer-Encoding: chunked\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_FALSE(status.ok());\n  }\n\n  // Test with feature disabled: transfer-encoding allowed.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.strict_1xx_and_204_response_headers\", \"false\"}});\n\n    initialize();\n\n    NiceMock<MockResponseDecoder> response_decoder;\n    Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n    TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n    request_encoder.encodeHeaders(headers, true);\n\n    Buffer::OwnedImpl response(\n        \"HTTP/1.1 101 Switching Protocols\\r\\nTransfer-Encoding: chunked\\r\\n\\r\\n\");\n    auto status = codec_->dispatch(response);\n    EXPECT_TRUE(status.ok());\n  }\n}\n\nTEST_P(Http1ClientConnectionImplTest, BadEncodeParams) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n\n  // Need to set :method and :path.\n  // New and legacy codecs will behave differently on errors from processing outbound data. The\n  // legacy codecs will throw an exception (that presently will be uncaught in contexts like\n  // sendLocalReply), while the new codecs temporarily RELEASE_ASSERT until Envoy handles errors on\n  // outgoing data.\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  if (testingNewCodec()) {\n    EXPECT_DEATH(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{\":path\", \"/\"}}, true),\n                 \":method and :path must be specified\");\n    EXPECT_DEATH(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{\":method\", \"GET\"}}, true),\n                 \":method and :path must be specified\");\n  } else {\n    EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{\":path\", \"/\"}}, true),\n                 CodecClientException);\n    EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{\":method\", \"GET\"}}, true),\n                 CodecClientException);\n  }\n}\n\nTEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  Buffer::OwnedImpl expected_data1(\"Hello World\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false));\n\n  Buffer::OwnedImpl expected_data2;\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), true));\n\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\n\\r\\nHello World\");\n  auto status = codec_->dispatch(response);\n\n  Buffer::OwnedImpl empty;\n  status = codec_->dispatch(empty);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, ResponseWithTrailers) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\n\\r\\ntransfer-encoding: chunked\\r\\n\\r\\nb\\r\\nHello \"\n                             \"World\\r\\n0\\r\\nhello: world\\r\\nsecond: header\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_EQ(0UL, response.length());\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, GiantPath) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\" + std::string(16384, 'a')}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\nContent-Length: 20\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, PrematureUpgradeResponse) {\n  initialize();\n\n  // make sure upgradeAllowed doesn't cause crashes if run with no pending response.\n  Buffer::OwnedImpl response(\n      \"HTTP/1.1 200 OK\\r\\nContent-Length: 5\\r\\nConnection: upgrade\\r\\nUpgrade: websocket\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(isPrematureResponseError(status));\n}\n\nTEST_P(Http1ClientConnectionImplTest, UpgradeResponse) {\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                   {\":path\", \"/\"},\n                                   {\":authority\", \"host\"},\n                                   {\"connection\", \"upgrade\"},\n                                   {\"upgrade\", \"websocket\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  // Send upgrade headers\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl response(\n      \"HTTP/1.1 200 OK\\r\\nContent-Length: 5\\r\\nConnection: upgrade\\r\\nUpgrade: websocket\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n\n  // Send body payload\n  Buffer::OwnedImpl expected_data1(\"12345\");\n  Buffer::OwnedImpl body(\"12345\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false));\n  status = codec_->dispatch(body);\n\n  // Send websocket payload\n  Buffer::OwnedImpl expected_data2(\"abcd\");\n  Buffer::OwnedImpl websocket_payload(\"abcd\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false));\n  status = codec_->dispatch(websocket_payload);\n  EXPECT_TRUE(status.ok());\n}\n\n// Same data as above, but make sure directDispatch immediately hands off any\n// outstanding data.\nTEST_P(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) {\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                   {\":path\", \"/\"},\n                                   {\":authority\", \"host\"},\n                                   {\"connection\", \"upgrade\"},\n                                   {\"upgrade\", \"websocket\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  // Send upgrade headers\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl expected_data(\"12345abcd\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false));\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\nContent-Length: 5\\r\\nConnection: \"\n                             \"upgrade\\r\\nUpgrade: websocket\\r\\n\\r\\n12345abcd\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, ConnectResponse) {\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"CONNECT\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  // Send response headers\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\nContent-Length: 5\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n\n  // Send body payload\n  Buffer::OwnedImpl expected_data1(\"12345\");\n  Buffer::OwnedImpl body(\"12345\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false));\n  status = codec_->dispatch(body);\n\n  // Send connect payload\n  Buffer::OwnedImpl expected_data2(\"abcd\");\n  Buffer::OwnedImpl connect_payload(\"abcd\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false));\n  status = codec_->dispatch(connect_payload);\n  EXPECT_TRUE(status.ok());\n}\n\n// Same data as above, but make sure directDispatch immediately hands off any\n// outstanding data.\nTEST_P(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) {\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"CONNECT\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  // Send response headers and payload\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl expected_data(\"12345abcd\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)).Times(1);\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\n\\r\\n12345abcd\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, ConnectRejected) {\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"CONNECT\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, false));\n  Buffer::OwnedImpl expected_data(\"12345abcd\");\n  EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false));\n  Buffer::OwnedImpl response(\"HTTP/1.1 400 OK\\r\\n\\r\\n12345abcd\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ClientConnectionImplTest, WatermarkTest) {\n  EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10));\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  Http::MockStreamCallbacks stream_callbacks;\n  request_encoder.getStream().addCallbacks(stream_callbacks);\n\n  // Fake a call from the underlying Network::Connection and verify the stream is notified.\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  static_cast<ClientConnection*>(codec_.get())\n      ->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n\n  // Do a large write. This will result in the buffer temporarily going over the\n  // high watermark and then draining.\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  // Fake out the underlying Network::Connection buffer being drained.\n  EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());\n  static_cast<ClientConnection*>(codec_.get())\n      ->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/3589. Upstream sends multiple\n// responses to the same request. The request causes the write buffer to go above high\n// watermark. When the 2nd response is received, we throw a premature response exception, and the\n// caller attempts to close the connection. This causes the network connection to attempt to write\n// pending data, even in the no flush scenario, which can cause us to go below low watermark\n// which then raises callbacks for a stream that no longer exists.\nTEST_P(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) {\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  Http::MockStreamCallbacks stream_callbacks;\n  request_encoder.getStream().addCallbacks(stream_callbacks);\n\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  // Fake a call from the underlying Network::Connection and verify the stream is notified.\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  static_cast<ClientConnection*>(codec_.get())\n      ->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, true));\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n\n  Buffer::OwnedImpl response2(\"HTTP/1.1 400 Bad Request\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n  status = codec_->dispatch(response2);\n  EXPECT_TRUE(isPrematureResponseError(status));\n\n  // Fake a call for going below the low watermark. Make sure no stream callbacks get called.\n  EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(0);\n  static_cast<ClientConnection*>(codec_.get())\n      ->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/10655. Make sure we correctly\n// handle going below low watermark when closing the connection during a completion callback.\nTEST_P(Http1ClientConnectionImplTest, LowWatermarkDuringClose) {\n  initialize();\n\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  Http::MockStreamCallbacks stream_callbacks;\n  request_encoder.getStream().addCallbacks(stream_callbacks);\n\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  // Fake a call from the underlying Network::Connection and verify the stream is notified.\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  static_cast<ClientConnection*>(codec_.get())\n      ->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, true))\n      .WillOnce(Invoke([&](ResponseHeaderMapPtr&, bool) {\n        // Fake a call for going below the low watermark. Make sure no stream callbacks get called.\n        EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(0);\n        static_cast<ClientConnection*>(codec_.get())\n            ->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n      }));\n  Buffer::OwnedImpl response(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\");\n  auto status = codec_->dispatch(response);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeTrailersRejected) {\n  // Default limit of 60 KiB\n  std::string long_string = \"big: \" + std::string(60 * 1024, 'q') + \"\\r\\n\\r\\n\\r\\n\";\n  testTrailersExceedLimit(long_string, true);\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) {\n  // Construct partial headers with a long field name that exceeds the default limit of 60KiB.\n  std::string long_string = \"bigfield\" + std::string(60 * 1024, 'q');\n  testTrailersExceedLimit(long_string, true);\n}\n\n// Tests that the default limit for the number of request headers is 100.\nTEST_P(Http1ServerConnectionImplTest, ManyTrailersRejected) {\n  // Send a request with 101 headers.\n  testTrailersExceedLimit(createHeaderFragment(101) + \"\\r\\n\\r\\n\", true);\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) {\n  // Default limit of 60 KiB\n  std::string long_string = \"big: \" + std::string(60 * 1024, 'q') + \"\\r\\n\\r\\n\\r\\n\";\n  testTrailersExceedLimit(long_string, false);\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) {\n  // Default limit of 60 KiB\n  std::string long_string = \"bigfield\" + std::string(60 * 1024, 'q') + \": value\\r\\n\\r\\n\\r\\n\";\n  testTrailersExceedLimit(long_string, false);\n}\n\n// Tests that the default limit for the number of request headers is 100.\nTEST_P(Http1ServerConnectionImplTest, ManyTrailersIgnored) {\n  // Send a request with 101 headers.\n  testTrailersExceedLimit(createHeaderFragment(101) + \"\\r\\n\\r\\n\", false);\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeRequestUrlRejected) {\n  initialize();\n\n  std::string exception_reason;\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n\n  // Default limit of 60 KiB\n  std::string long_url = \"/\" + std::string(60 * 1024, 'q');\n  Buffer::OwnedImpl buffer(\"GET \" + long_url + \" HTTP/1.1\\r\\n\");\n\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"headers size exceeds limit\");\n  EXPECT_EQ(\"http1.headers_too_large\", response_encoder->getStream().responseDetails());\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) {\n  // Default limit of 60 KiB\n  std::string long_string = \"big: \" + std::string(60 * 1024, 'q') + \"\\r\\n\";\n  testRequestHeadersExceedLimit(long_string, \"\");\n}\n\n// Tests that the default limit for the number of request headers is 100.\nTEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) {\n  // Send a request with 101 headers.\n  testRequestHeadersExceedLimit(createHeaderFragment(101), \"http1.too_many_headers\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) {\n  // Default limit of 60 KiB\n  initialize();\n\n  std::string exception_reason;\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n\n  std::string long_string = std::string(1024, 'q');\n  for (int i = 0; i < 59; i++) {\n    buffer = Buffer::OwnedImpl(fmt::format(\"big: {}\\r\\n\", long_string));\n    status = codec_->dispatch(buffer);\n  }\n  // the 60th 1kb header should induce overflow\n  buffer = Buffer::OwnedImpl(fmt::format(\"big: {}\\r\\n\", long_string));\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"headers size exceeds limit\");\n  EXPECT_EQ(\"http1.headers_too_large\", response_encoder->getStream().responseDetails());\n}\n\n// Tests that the 101th request header causes overflow with the default max number of request\n// headers.\nTEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) {\n  // Default limit of 100.\n  initialize();\n\n  std::string exception_reason;\n  NiceMock<MockRequestDecoder> decoder;\n  Http::ResponseEncoder* response_encoder = nullptr;\n  EXPECT_CALL(callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder = &encoder;\n        return decoder;\n      }));\n  Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n\n  // Dispatch 100 headers.\n  buffer = Buffer::OwnedImpl(createHeaderFragment(100));\n  status = codec_->dispatch(buffer);\n\n  // The final 101th header should induce overflow.\n  buffer = Buffer::OwnedImpl(\"header101:\\r\\n\\r\\n\");\n  EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _));\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"headers size exceeds limit\");\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) {\n  max_request_headers_kb_ = 65;\n  std::string long_string = \"big: \" + std::string(64 * 1024, 'q') + \"\\r\\n\";\n  testRequestHeadersAccepted(long_string);\n}\n\nTEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) {\n  max_request_headers_kb_ = 96;\n  std::string long_string = \"big: \" + std::string(95 * 1024, 'q') + \"\\r\\n\";\n  testRequestHeadersAccepted(long_string);\n}\n\n// Tests that the number of request headers is configurable.\nTEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) {\n  max_request_headers_count_ = 150;\n  // Create a request with 150 headers.\n  testRequestHeadersAccepted(createHeaderFragment(150));\n}\n\n// Tests that incomplete response headers of 80 kB header value fails.\nTEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  Buffer::OwnedImpl buffer(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  std::string long_header = \"big: \" + std::string(80 * 1024, 'q');\n  buffer = Buffer::OwnedImpl(long_header);\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"headers size exceeds limit\");\n}\n\n// Tests that incomplete response headers with a 80 kB header field fails.\nTEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) {\n  initialize();\n\n  NiceMock<MockRequestDecoder> decoder;\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  Buffer::OwnedImpl buffer(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  std::string long_header = \"big: \" + std::string(80 * 1024, 'q');\n  buffer = Buffer::OwnedImpl(long_header);\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"headers size exceeds limit\");\n}\n\n// Tests that the size of response headers for HTTP/1 must be under 80 kB.\nTEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  Buffer::OwnedImpl buffer(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  EXPECT_TRUE(status.ok());\n  std::string long_header = \"big: \" + std::string(79 * 1024, 'q') + \"\\r\\n\";\n  buffer = Buffer::OwnedImpl(long_header);\n  status = codec_->dispatch(buffer);\n}\n\n// Regression test for CVE-2019-18801. Large method headers should not trigger\n// ASSERTs or ASAN, which they previously did.\nTEST_P(Http1ClientConnectionImplTest, LargeMethodRequestEncode) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  const std::string long_method = std::string(79 * 1024, 'a');\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{\n      {\":method\", long_method}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n  request_encoder.encodeHeaders(headers, true);\n  EXPECT_EQ(long_method + \" / HTTP/1.1\\r\\nhost: host\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n}\n\n// As with LargeMethodEncode, but for the path header. This was not an issue\n// in CVE-2019-18801, but the related code does explicit size calculations on\n// both path and method (these are the two distinguished headers). So,\n// belt-and-braces.\nTEST_P(Http1ClientConnectionImplTest, LargePathRequestEncode) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  const std::string long_path = std::string(79 * 1024, '/');\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{\n      {\":method\", \"GET\"}, {\":path\", long_path}, {\":authority\", \"host\"}};\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n  request_encoder.encodeHeaders(headers, true);\n  EXPECT_EQ(\"GET \" + long_path + \" HTTP/1.1\\r\\nhost: host\\r\\ncontent-length: 0\\r\\n\\r\\n\", output);\n}\n\n// As with LargeMethodEncode, but for an arbitrary header. This was not an issue\n// in CVE-2019-18801.\nTEST_P(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  const std::string long_header_value = std::string(79 * 1024, 'a');\n  TestRequestHeaderMapImpl headers{\n      {\":method\", \"GET\"}, {\"foo\", long_header_value}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  std::string output;\n  ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output));\n  request_encoder.encodeHeaders(headers, true);\n  EXPECT_EQ(\"GET / HTTP/1.1\\r\\nhost: host\\r\\nfoo: \" + long_header_value +\n                \"\\r\\ncontent-length: 0\\r\\n\\r\\n\",\n            output);\n}\n\n// Exception called when the number of response headers exceeds the default value of 100.\nTEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) {\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  Buffer::OwnedImpl buffer(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  buffer = Buffer::OwnedImpl(createHeaderFragment(101) + \"\\r\\n\");\n\n  status = codec_->dispatch(buffer);\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_EQ(status.message(), \"headers size exceeds limit\");\n}\n\n// Tests that the number of response headers is configurable.\nTEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) {\n  max_response_headers_count_ = 152;\n\n  initialize();\n\n  NiceMock<MockResponseDecoder> response_decoder;\n  Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);\n  TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  request_encoder.encodeHeaders(headers, true);\n\n  Buffer::OwnedImpl buffer(\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\");\n  auto status = codec_->dispatch(buffer);\n  // Response already contains one header.\n  buffer = Buffer::OwnedImpl(createHeaderFragment(150) + \"\\r\\n\");\n  status = codec_->dispatch(buffer);\n}\n\nTEST_P(Http1ClientConnectionImplTest, TestResponseSplit0) {\n  testClientAllowChunkedContentLength(0, false);\n}\n\nTEST_P(Http1ClientConnectionImplTest, TestResponseSplit1) {\n  testClientAllowChunkedContentLength(1, false);\n}\n\nTEST_P(Http1ClientConnectionImplTest, TestResponseSplit100) {\n  testClientAllowChunkedContentLength(100, false);\n}\n\nTEST_P(Http1ClientConnectionImplTest, TestResponseSplitAllowChunkedLength0) {\n  testClientAllowChunkedContentLength(0, true);\n}\n\nTEST_P(Http1ClientConnectionImplTest, TestResponseSplitAllowChunkedLength1) {\n  testClientAllowChunkedContentLength(1, true);\n}\n\nTEST_P(Http1ClientConnectionImplTest, TestResponseSplitAllowChunkedLength100) {\n  testClientAllowChunkedContentLength(100, true);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http1/conn_pool_test.cc",
    "content": "#include <memory>\n#include <vector>\n\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/http/codec_client.h\"\n#include \"common/http/http1/conn_pool.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/transport_socket_match.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Property;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\nnamespace {\n\n/**\n * A test version of ConnPoolImpl that allows for mocking beneath the codec clients.\n */\nclass ConnPoolImplForTest : public ConnPoolImpl {\npublic:\n  ConnPoolImplForTest(Event::MockDispatcher& dispatcher,\n                      Upstream::ClusterInfoConstSharedPtr cluster,\n                      NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb)\n      : ConnPoolImpl(dispatcher, random_, Upstream::makeTestHost(cluster, \"tcp://127.0.0.1:9000\"),\n                     Upstream::ResourcePriority::Default, nullptr, nullptr),\n        api_(Api::createApiForTest()), mock_dispatcher_(dispatcher),\n        mock_upstream_ready_cb_(upstream_ready_cb) {}\n\n  ~ConnPoolImplForTest() override {\n    EXPECT_EQ(0U, ready_clients_.size());\n    EXPECT_EQ(0U, busy_clients_.size());\n    EXPECT_EQ(0U, pending_streams_.size());\n  }\n\n  struct TestCodecClient {\n    Http::MockClientConnection* codec_;\n    Network::MockClientConnection* connection_;\n    CodecClient* codec_client_;\n    Event::MockTimer* connect_timer_;\n    Event::DispatcherPtr client_dispatcher_;\n  };\n\n  CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override {\n    // We expect to own the connection, but already have it, so just release it to prevent it from\n    // getting deleted.\n    data.connection_.release();\n    return CodecClientPtr{createCodecClient_()};\n  }\n\n  MOCK_METHOD(CodecClient*, createCodecClient_, ());\n  MOCK_METHOD(void, onClientDestroy, ());\n\n  void expectClientCreate(Protocol protocol = Protocol::Http11) {\n    test_clients_.emplace_back();\n    TestCodecClient& test_client = test_clients_.back();\n    test_client.connection_ = new NiceMock<Network::MockClientConnection>();\n    test_client.codec_ = new NiceMock<Http::MockClientConnection>();\n    test_client.connect_timer_ = new NiceMock<Event::MockTimer>(&mock_dispatcher_);\n    std::shared_ptr<Upstream::MockClusterInfo> cluster{new NiceMock<Upstream::MockClusterInfo>()};\n    test_client.client_dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n    Network::ClientConnectionPtr connection{test_client.connection_};\n    test_client.codec_client_ = new CodecClientForTest(\n        CodecClient::Type::HTTP1, std::move(connection), test_client.codec_,\n        [this](CodecClient* codec_client) -> void {\n          for (auto i = test_clients_.begin(); i != test_clients_.end(); i++) {\n            if (i->codec_client_ == codec_client) {\n              onClientDestroy();\n              test_clients_.erase(i);\n              return;\n            }\n          }\n        },\n        Upstream::makeTestHost(cluster, \"tcp://127.0.0.1:9000\"), *test_client.client_dispatcher_);\n    EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _));\n    EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Return(test_client.connection_));\n    EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_));\n    ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol));\n  }\n\n  void expectEnableUpstreamReady() {\n    EXPECT_FALSE(upstream_ready_enabled_);\n    EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration())\n        .Times(1)\n        .RetiresOnSaturation();\n  }\n\n  void expectAndRunUpstreamReady() {\n    EXPECT_TRUE(upstream_ready_enabled_);\n    mock_upstream_ready_cb_->invokeCallback();\n    EXPECT_FALSE(upstream_ready_enabled_);\n  }\n\n  Api::ApiPtr api_;\n  Event::MockDispatcher& mock_dispatcher_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Event::MockSchedulableCallback>* mock_upstream_ready_cb_;\n  std::vector<TestCodecClient> test_clients_;\n};\n\n/**\n * Test fixture for all connection pool tests.\n */\nclass Http1ConnPoolImplTest : public testing::Test {\npublic:\n  Http1ConnPoolImplTest()\n      : upstream_ready_cb_(new NiceMock<Event::MockSchedulableCallback>(&dispatcher_)),\n        conn_pool_(\n            std::make_unique<ConnPoolImplForTest>(dispatcher_, cluster_, upstream_ready_cb_)) {}\n\n  ~Http1ConnPoolImplTest() override {\n    EXPECT_EQ(\"\", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges()));\n  }\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};\n  NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb_;\n  std::unique_ptr<ConnPoolImplForTest> conn_pool_;\n  NiceMock<Runtime::MockLoader> runtime_;\n};\n\n/**\n * Helper for dealing with an active test request.\n */\nstruct ActiveTestRequest {\n  enum class Type { Pending, CreateConnection, Immediate };\n\n  ActiveTestRequest(Http1ConnPoolImplTest& parent, size_t client_index, Type type)\n      : parent_(parent), client_index_(client_index) {\n    uint64_t active_rq_observed =\n        parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count();\n    uint64_t current_rq_total = parent_.cluster_->stats_.upstream_rq_total_.value();\n    if (type == Type::CreateConnection) {\n      parent.conn_pool_->expectClientCreate();\n    }\n\n    if (type == Type::Immediate) {\n      expectNewStream();\n    }\n\n    handle_ = parent.conn_pool_->newStream(outer_decoder_, callbacks_);\n\n    if (type == Type::Immediate) {\n      EXPECT_EQ(nullptr, handle_);\n    } else {\n      EXPECT_NE(nullptr, handle_);\n    }\n\n    if (type == Type::CreateConnection) {\n      expectNewStream();\n      EXPECT_CALL(*parent_.conn_pool_->test_clients_[client_index_].connect_timer_, disableTimer());\n      parent.conn_pool_->test_clients_[client_index_].connection_->raiseEvent(\n          Network::ConnectionEvent::Connected);\n    }\n    if (type != Type::Pending) {\n      EXPECT_EQ(current_rq_total + 1, parent_.cluster_->stats_.upstream_rq_total_.value());\n      EXPECT_EQ(active_rq_observed + 1,\n                parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default)\n                    .requests()\n                    .count());\n    }\n  }\n\n  void completeResponse(bool with_body) {\n    // Test additional metric writes also.\n    Http::ResponseHeaderMapPtr response_headers(\n        new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"x-envoy-upstream-canary\", \"true\"}});\n\n    inner_decoder_->decodeHeaders(std::move(response_headers), !with_body);\n    if (with_body) {\n      Buffer::OwnedImpl data;\n      inner_decoder_->decodeData(data, true);\n    }\n  }\n\n  void expectNewStream() {\n    EXPECT_CALL(*parent_.conn_pool_->test_clients_[client_index_].codec_, newStream(_))\n        .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(request_encoder_)));\n    EXPECT_CALL(callbacks_.pool_ready_, ready());\n  }\n\n  void startRequest() {\n    callbacks_.outer_encoder_->encodeHeaders(\n        TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  }\n\n  Http1ConnPoolImplTest& parent_;\n  size_t client_index_;\n  NiceMock<MockResponseDecoder> outer_decoder_;\n  Http::ConnectionPool::Cancellable* handle_{};\n  NiceMock<MockRequestEncoder> request_encoder_;\n  Http::ResponseDecoder* inner_decoder_{};\n  ConnPoolCallbacks callbacks_;\n};\n\n/**\n * Verify that the pool's host is a member of the cluster the pool was constructed with.\n */\nTEST_F(Http1ConnPoolImplTest, Host) { EXPECT_EQ(cluster_.get(), &conn_pool_->host()->cluster()); }\n\n/**\n * Verify that connections are drained when requested.\n */\nTEST_F(Http1ConnPoolImplTest, DrainConnections) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n  InSequence s;\n\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);\n  r1.startRequest();\n\n  ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection);\n  r2.startRequest();\n\n  r1.completeResponse(false);\n\n  // This will destroy the ready client and set requests remaining to 1 on the busy client.\n  conn_pool_->drainConnections();\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  // This will destroy the busy client when the response finishes.\n  r2.completeResponse(false);\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test all timing stats are set.\n */\nTEST_F(Http1ConnPoolImplTest, VerifyTimingStats) {\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_connect_ms\"), _));\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_length_ms\"), _));\n\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);\n  r1.startRequest();\n  r1.completeResponse(false);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Verify that we set the ALPN fallback.\n */\nTEST_F(Http1ConnPoolImplTest, VerifyAlpnFallback) {\n  // Override the TransportSocketFactory with a mock version we can add expectations to.\n  auto factory = std::make_unique<Network::MockTransportSocketFactory>();\n  EXPECT_CALL(*factory, createTransportSocket(_))\n      .WillOnce(Invoke(\n          [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr {\n            EXPECT_TRUE(options != nullptr);\n            EXPECT_EQ(options->applicationProtocolFallback(),\n                      Http::Utility::AlpnNames::get().Http11);\n            return std::make_unique<Network::RawBufferSocket>();\n          }));\n  cluster_->transport_socket_matcher_ =\n      std::make_unique<NiceMock<Upstream::MockTransportSocketMatcher>>(std::move(factory));\n\n  new NiceMock<Event::MockSchedulableCallback>(&dispatcher_);\n\n  // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at\n  // our test transport socket factory.\n  conn_pool_ = std::make_unique<ConnPoolImplForTest>(dispatcher_, cluster_, upstream_ready_cb_);\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate(Protocol::Http11);\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that buffer limits are set.\n */\nTEST_F(Http1ConnPoolImplTest, VerifyBufferLimits) {\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192));\n  EXPECT_CALL(*conn_pool_->test_clients_.back().connection_, setBufferLimits(8192));\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Verify that canceling pending connections within the callback works.\n */\nTEST_F(Http1ConnPoolImplTest, VerifyCancelInCallback) {\n  Http::ConnectionPool::Cancellable* handle1{};\n  // In this scenario, all connections must succeed, so when\n  // one fails, the others are canceled.\n  // Note: We rely on the fact that the implementation cancels the second request first,\n  // to simplify the test.\n  ConnPoolCallbacks callbacks1;\n  EXPECT_CALL(callbacks1.pool_failure_, ready()).Times(0);\n  ConnPoolCallbacks callbacks2;\n  EXPECT_CALL(callbacks2.pool_failure_, ready()).WillOnce(Invoke([&]() -> void {\n    handle1->cancel(Envoy::ConnectionPool::CancelPolicy::Default);\n  }));\n\n  NiceMock<MockResponseDecoder> outer_decoder;\n  // Create the first client.\n  conn_pool_->expectClientCreate();\n  handle1 = conn_pool_->newStream(outer_decoder, callbacks1);\n  ASSERT_NE(nullptr, handle1);\n\n  // Create the second client.\n  Http::ConnectionPool::Cancellable* handle2 = conn_pool_->newStream(outer_decoder, callbacks2);\n  ASSERT_NE(nullptr, handle2);\n\n  // Simulate connection failure.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Tests a request that generates a new connection, completes, and then a second request that uses\n * the same connection.\n */\nTEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);\n  r1.startRequest();\n  r1.completeResponse(false);\n\n  // Request 2 should not.\n  ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Immediate);\n  r2.startRequest();\n  r2.completeResponse(true);\n\n  // Cause the connection to go away.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test when we overflow max pending requests.\n */\nTEST_F(Http1ConnPoolImplTest, MaxPendingRequests) {\n  cluster_->resetResourceManager(1, 1, 1024, 1, 1);\n\n  EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value());\n\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockResponseDecoder> outer_decoder2;\n  ConnPoolCallbacks callbacks2;\n  EXPECT_CALL(callbacks2.pool_failure_, ready());\n  Http::ConnectionPool::Cancellable* handle2 = conn_pool_->newStream(outer_decoder2, callbacks2);\n  EXPECT_EQ(nullptr, handle2);\n  EXPECT_EQ(callbacks2.reason_, ConnectionPool::PoolFailureReason::Overflow);\n\n  EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value());\n\n  handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value());\n}\n\n/**\n * Tests a connection failure before a request is bound which should result in the pending request\n * getting purged.\n */\nTEST_F(Http1ConnPoolImplTest, ConnectFailure) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value());\n}\n\n/**\n * Tests that connection creation time is recorded correctly even in cases where\n * there are multiple pending connection creation attempts to the same upstream.\n */\nTEST_F(Http1ConnPoolImplTest, MeasureConnectTime) {\n  constexpr uint64_t sleep1_ms = 20;\n  constexpr uint64_t sleep2_ms = 10;\n  constexpr uint64_t sleep3_ms = 5;\n  Event::SimulatedTimeSystem simulated_time;\n\n  // Allow concurrent creation of 2 upstream connections.\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n\n  InSequence s;\n\n  // Start the first connect attempt.\n  conn_pool_->expectClientCreate();\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending);\n\n  // Move time forward and start the second connect attempt.\n  simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep1_ms));\n  conn_pool_->expectClientCreate();\n  ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::Pending);\n\n  // Move time forward, signal that the first connect completed and verify the time to connect.\n  uint64_t upstream_cx_connect_ms1 = 0;\n  simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep2_ms));\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_connect_ms\"), _))\n      .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1));\n  r1.expectNewStream();\n  EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1);\n\n  // Move time forward, signal that the second connect completed and verify the time to connect.\n  uint64_t upstream_cx_connect_ms2 = 0;\n  simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep3_ms));\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_connect_ms\"), _))\n      .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2));\n  r2.expectNewStream();\n  EXPECT_CALL(*conn_pool_->test_clients_[1].connect_timer_, disableTimer());\n  conn_pool_->test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2);\n\n  // Cleanup, cause the connections to go away.\n  while (!conn_pool_->test_clients_.empty()) {\n    EXPECT_CALL(\n        cluster_->stats_store_,\n        deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_length_ms\"), _));\n    EXPECT_CALL(*conn_pool_, onClientDestroy());\n    conn_pool_->test_clients_.front().connection_->raiseEvent(\n        Network::ConnectionEvent::RemoteClose);\n    dispatcher_.clearDeferredDeleteList();\n  }\n}\n\n/**\n * Tests a connect timeout. Also test that we can add a new request during ejection processing.\n */\nTEST_F(Http1ConnPoolImplTest, ConnectTimeout) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder1;\n  ConnPoolCallbacks callbacks1;\n  conn_pool_->expectClientCreate();\n  EXPECT_NE(nullptr, conn_pool_->newStream(outer_decoder1, callbacks1));\n\n  NiceMock<MockResponseDecoder> outer_decoder2;\n  ConnPoolCallbacks callbacks2;\n  EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void {\n    conn_pool_->expectClientCreate();\n    EXPECT_NE(nullptr, conn_pool_->newStream(outer_decoder2, callbacks2));\n  }));\n\n  conn_pool_->test_clients_[0].connect_timer_->invokeCallback();\n\n  EXPECT_CALL(callbacks2.pool_failure_, ready());\n  conn_pool_->test_clients_[1].connect_timer_->invokeCallback();\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value());\n}\n\n/**\n * Test cancelling before the request is bound to a connection.\n */\nTEST_F(Http1ConnPoolImplTest, CancelBeforeBound) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default);\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Cause the connection to go away.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test cancelling with CloseExcess\n */\nTEST_F(Http1ConnPoolImplTest, CancelExcessBeforeBound) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  handle->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  // Unlike CancelBeforeBound there is no need to raise a close event to destroy the connection.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test an upstream disconnection while there is a bound request.\n */\nTEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // We should get a reset callback when the connection disconnects.\n  Http::MockStreamCallbacks stream_callbacks;\n  EXPECT_CALL(stream_callbacks, onResetStream(StreamResetReason::ConnectionTermination, _));\n  request_encoder.getStream().addCallbacks(stream_callbacks);\n\n  // Kill the connection while it has an active request.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that we correctly handle reaching max connections.\n */\nTEST_F(Http1ConnPoolImplTest, MaxConnections) {\n  InSequence s;\n\n  EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.cx_open_.value());\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder1;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder1, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  // Request 2 should not kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder2;\n  ConnPoolCallbacks callbacks2;\n  handle = conn_pool_->newStream(outer_decoder2, callbacks2);\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value());\n  EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value());\n\n  EXPECT_NE(nullptr, handle);\n\n  // Connect event will bind to request 1.\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Finishing request 1 will immediately bind to request 2.\n  conn_pool_->expectEnableUpstreamReady();\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks2.pool_ready_, ready());\n\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n\n  conn_pool_->expectAndRunUpstreamReady();\n  callbacks2.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list.\n  response_headers = std::make_unique<TestResponseHeaderMapImpl>(\n      std::initializer_list<std::pair<std::string, std::string>>{{\":status\", \"200\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n\n  // Cause the connection to go away.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test when upstream closes connection without 'connection: close' like\n * https://github.com/envoyproxy/envoy/pull/2715\n */\nTEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder1;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder1, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  // Request 2 should not kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder2;\n  ConnPoolCallbacks callbacks2;\n  handle = conn_pool_->newStream(outer_decoder2, callbacks2);\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value());\n\n  EXPECT_NE(nullptr, handle);\n\n  // Connect event will bind to request 1.\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Finishing request 1 will schedule binding the connection to request 2.\n  conn_pool_->expectEnableUpstreamReady();\n\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n\n  // Cause the connection to go away.\n  conn_pool_->expectClientCreate();\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  conn_pool_->expectAndRunUpstreamReady();\n\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks2.pool_ready_, ready());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  callbacks2.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list.\n  response_headers = std::make_unique<TestResponseHeaderMapImpl>(\n      std::initializer_list<std::pair<std::string, std::string>>{{\":status\", \"200\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test when upstream sends us 'connection: close'\n */\nTEST_F(Http1ConnPoolImplTest, ConnectionCloseHeader) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Response with 'connection: close' which should cause the connection to go away.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  ResponseHeaderMapPtr response_headers(\n      new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"Connection\", \"Close\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());\n}\n\n/**\n * Test when upstream sends us 'proxy-connection: close'\n */\nTEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeader) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  // Response with 'proxy-connection: close' which should cause the connection to go away, even if\n  // there are other tokens in that header.\n  ResponseHeaderMapPtr response_headers(\n      new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"Proxy-Connection\", \"Close, foo\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());\n}\n\n/**\n * Test legacy behavior when upstream sends us 'proxy-connection: close'\n */\nTEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeaderLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fixed_connection_close\", \"false\"}});\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Response with 'proxy-connection: close' which should cause the connection to go away, even if\n  // there are other tokens in that header.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  ResponseHeaderMapPtr response_headers(\n      new TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"Proxy-Connection\", \"Close\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());\n}\n\n/**\n * Test when upstream is HTTP/1.0 and does not send 'connection: keep-alive'\n */\nTEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAlive) {\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate(Protocol::Http10);\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Response without 'connection: keep-alive' which should cause the connection to go away.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  ResponseHeaderMapPtr response_headers(\n      new TestResponseHeaderMapImpl{{\":protocol\", \"HTTP/1.0\"}, {\":status\", \"200\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());\n}\n\n/**\n * Test legacy behavior when upstream is HTTP/1.0 and does not send 'connection: keep-alive'\n */\nTEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAliveLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fixed_connection_close\", \"false\"}});\n  InSequence s;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate(Protocol::Http10);\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Response without 'connection: keep-alive' which should cause the connection to go away.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  ResponseHeaderMapPtr response_headers(\n      new TestResponseHeaderMapImpl{{\":protocol\", \"HTTP/1.0\"}, {\":status\", \"200\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());\n}\n\n/**\n * Test when we reach max requests per connection.\n */\nTEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) {\n  InSequence s;\n\n  cluster_->max_requests_per_connection_ = 1;\n\n  // Request 1 should kick off a new connection.\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Response with 'connection: close' which should cause the connection to go away.\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  inner_decoder->decodeHeaders(std::move(response_headers), true);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value());\n}\n\nTEST_F(Http1ConnPoolImplTest, ConcurrentConnections) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n  InSequence s;\n\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);\n  r1.startRequest();\n\n  ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection);\n  r2.startRequest();\n\n  ActiveTestRequest r3(*this, 0, ActiveTestRequest::Type::Pending);\n\n  // Finish r1, which gets r3 going.\n  conn_pool_->expectEnableUpstreamReady();\n  r3.expectNewStream();\n\n  r1.completeResponse(false);\n  conn_pool_->expectAndRunUpstreamReady();\n  r3.startRequest();\n  EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value());\n\n  r2.completeResponse(false);\n  r3.completeResponse(false);\n\n  // Disconnect both clients.\n  EXPECT_CALL(*conn_pool_, onClientDestroy()).Times(2);\n  conn_pool_->test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http1ConnPoolImplTest, DrainCallback) {\n  InSequence s;\n  ReadyWatcher drained;\n\n  EXPECT_CALL(drained, ready());\n  conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); });\n\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);\n  ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending);\n  r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default);\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value());\n\n  EXPECT_CALL(drained, ready());\n  r1.startRequest();\n  r1.completeResponse(false);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\n// Test draining a connection pool that has a pending connection.\nTEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) {\n  InSequence s;\n  ReadyWatcher drained;\n\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); });\n  EXPECT_CALL(*conn_pool_->test_clients_[0].connection_,\n              close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(drained, ready());\n  handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default);\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) {\n  InSequence s;\n\n  NiceMock<MockResponseDecoder> outer_decoder;\n  ConnPoolCallbacks callbacks;\n  conn_pool_->expectClientCreate();\n  Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  NiceMock<MockRequestEncoder> request_encoder;\n  ResponseDecoder* inner_decoder;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder)));\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n  EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  callbacks.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  inner_decoder->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false);\n  Buffer::OwnedImpl dummy_data(\"12345\");\n  inner_decoder->decodeData(dummy_data, false);\n\n  Buffer::OwnedImpl empty_data;\n  EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, dispatch(BufferEqual(&empty_data)))\n      .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {\n        // Simulate the onResponseComplete call to decodeData since dispatch is mocked out.\n        inner_decoder->decodeData(data, true);\n        return Http::okStatus();\n      }));\n\n  EXPECT_CALL(*conn_pool_->test_clients_[0].connection_,\n              close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http1ConnPoolImplTest, NoActiveConnectionsByDefault) {\n  EXPECT_FALSE(conn_pool_->hasActiveConnections());\n}\n\nTEST_F(Http1ConnPoolImplTest, ActiveRequestHasActiveConnectionsTrue) {\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);\n  r1.startRequest();\n\n  EXPECT_TRUE(conn_pool_->hasActiveConnections());\n\n  // cleanup\n  r1.completeResponse(false);\n  conn_pool_->drainConnections();\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_F(Http1ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnections) {\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection);\n  r1.startRequest();\n  r1.completeResponse(false);\n\n  EXPECT_FALSE(conn_pool_->hasActiveConnections());\n\n  conn_pool_->drainConnections();\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) {\n  conn_pool_->expectClientCreate();\n  ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending);\n\n  EXPECT_TRUE(conn_pool_->hasActiveConnections());\n\n  EXPECT_CALL(*conn_pool_, onClientDestroy());\n  r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default);\n  EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value());\n  conn_pool_->drainConnections();\n  conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value());\n}\n\n} // namespace\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http1/header_formatter_test.cc",
    "content": "#include \"common/http/http1/header_formatter.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http1 {\nTEST(ProperCaseHeaderKeyFormatterTest, Formatting) {\n  ProperCaseHeaderKeyFormatter formatter;\n\n  const std::string downcased = \"content-type\";\n  EXPECT_EQ(formatter.format(downcased), \"Content-Type\");\n\n  const std::string special_characters = \"a!d#sa-lo\";\n  EXPECT_EQ(formatter.format(special_characters), \"A!D#Sa-Lo\");\n\n  const std::string empty;\n  EXPECT_EQ(formatter.format(empty), \"\");\n\n  const std::string single_character = \"a\";\n  EXPECT_EQ(formatter.format(single_character), \"A\");\n}\n} // namespace Http1\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nCODEC_TEST_DEPS = [\n    \":codec_impl_test_util\",\n    \"//source/common/event:dispatcher_lib\",\n    \"//source/common/http:exception_lib\",\n    \"//source/common/http:header_map_lib\",\n    \"//source/common/http:header_utility_lib\",\n    \"//source/common/http/http2:codec_legacy_lib\",\n    \"//source/common/http/http2:codec_lib\",\n    \"//source/common/runtime:runtime_lib\",\n    \"//source/common/stats:stats_lib\",\n    \"//test/common/http:common_lib\",\n    \"//test/common/http/http2:http2_frame\",\n    \"//test/common/stats:stat_test_utility_lib\",\n    \"//test/mocks/http:http_mocks\",\n    \"//test/mocks/init:init_mocks\",\n    \"//test/mocks/local_info:local_info_mocks\",\n    \"//test/mocks/network:network_mocks\",\n    \"//test/mocks/protobuf:protobuf_mocks\",\n    \"//test/mocks/thread_local:thread_local_mocks\",\n    \"//test/mocks/upstream:transport_socket_match_mocks\",\n    \"//test/mocks/upstream:upstream_mocks\",\n    \"//test/test_common:logging_lib\",\n    \"//test/test_common:registry_lib\",\n    \"//test/test_common:test_runtime_lib\",\n    \"//test/test_common:utility_lib\",\n]\n\nenvoy_cc_test(\n    name = \"codec_impl_test\",\n    srcs = [\"codec_impl_test.cc\"],\n    # The default codec is the legacy codec. Override runtime flag for testing new codec.\n    args = [\n        \"--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior\",\n    ],\n    shard_count = 5,\n    deps = CODEC_TEST_DEPS,\n)\n\nenvoy_cc_test(\n    name = \"codec_impl_legacy_test\",\n    srcs = [\"codec_impl_test.cc\"],\n    # The default codec is the legacy codec. Verify the runtime flag for the new codec is disabled.\n    args = [\n        \"--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior\",\n    ],\n    shard_count = 5,\n    deps = CODEC_TEST_DEPS,\n)\n\nenvoy_cc_test_library(\n    name = \"codec_impl_test_util\",\n    hdrs = [\"codec_impl_test_util.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/http/http2:codec_legacy_lib\",\n        \"//source/common/http/http2:codec_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"conn_pool_test\",\n    srcs = [\"conn_pool_test.cc\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http/http2:conn_pool_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/common/http:common_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:transport_socket_match_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"http2_frame\",\n    srcs = [\"http2_frame.cc\"],\n    hdrs = [\"http2_frame.h\"],\n    external_deps = [\n        \"nghttp2\",\n    ],\n    deps = [\n        \"//include/envoy/http:metadata_interface_with_external_headers\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:macros\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"frame_replay_lib\",\n    srcs = [\"frame_replay.cc\"],\n    hdrs = [\"frame_replay.h\"],\n    deps = [\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//test/common/http:common_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"frame_replay_test\",\n    srcs = [\"frame_replay_test.cc\"],\n    data = [\n        \"request_header_corpus/simple_example_huffman\",\n        \"request_header_corpus/simple_example_plain\",\n        \"response_header_corpus/simple_example_huffman\",\n        \"response_header_corpus/simple_example_plain\",\n    ],\n    deps = [\n        \":frame_replay_lib\",\n        \"//test/common/http/http2:codec_impl_test_util\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"metadata_encoder_decoder_test\",\n    srcs = [\"metadata_encoder_decoder_test.cc\"],\n    external_deps = [\n        \"nghttp2\",\n    ],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http/http2:metadata_decoder_lib\",\n        \"//source/common/http/http2:metadata_encoder_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//test/common/http/http2:http2_frame\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http2_frame_test\",\n    srcs = [\"http2_frame_test.cc\"],\n    deps = [\n        \"//include/envoy/http:metadata_interface\",\n        \"//source/common/http/http2:metadata_encoder_lib\",\n        \"//test/common/http/http2:http2_frame\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"protocol_constraints_test\",\n    srcs = [\"protocol_constraints_test.cc\"],\n    deps = [\n        \"//source/common/http/http2:protocol_constraints_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"response_header_fuzz_test\",\n    srcs = [\"response_header_fuzz_test.cc\"],\n    corpus = \"response_header_corpus\",\n    deps = [\n        \":frame_replay_lib\",\n        \"//test/common/http/http2:codec_impl_test_util\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"request_header_fuzz_test\",\n    srcs = [\"request_header_fuzz_test.cc\"],\n    corpus = \"request_header_corpus\",\n    deps = [\n        \":frame_replay_lib\",\n        \"//test/common/http/http2:codec_impl_test_util\",\n    ],\n)\n"
  },
  {
    "path": "test/common/http/http2/codec_impl_test.cc",
    "content": "#include <cstdint>\n#include <string>\n\n#include \"envoy/http/codec.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/http/exception.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/common/http/http2/http2_frame.h\"\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"codec_impl_test_util.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::AtLeast;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nusing Http2SettingsTuple = ::testing::tuple<uint32_t, uint32_t, uint32_t, uint32_t>;\nusing Http2SettingsTestParam = ::testing::tuple<Http2SettingsTuple, Http2SettingsTuple>;\nnamespace CommonUtility = ::Envoy::Http2::Utility;\n\nclass Http2CodecImplTestFixture {\npublic:\n  // The Http::Connection::dispatch method does not throw (any more). However unit tests in this\n  // file use codecs for sending test data through mock network connections to the codec under test.\n  // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under\n  // test, through mock connections and sending codec. As a result error returned by the dispatch\n  // method of the codec under test invoked by the ConnectionWrapper is thrown as an exception. Note\n  // that exception goes only through the mock network connection and sending codec, i.e. it is\n  // thrown only through the test harness code. Specific exception types are to distinguish error\n  // codes returned when processing requests or responses.\n  // TODO(yanavlasov): modify the code to verify test expectations at the point of calling codec\n  //                   under test through the ON_CALL expectations in the\n  //                   setupDefaultConnectionMocks() method. This will make the exceptions below\n  //                   unnecessary.\n  struct ClientCodecError : public std::runtime_error {\n    ClientCodecError(Http::Status&& status)\n        : std::runtime_error(std::string(status.message())), status_(std::move(status)) {}\n    const char* what() const noexcept override { return status_.message().data(); }\n    const Http::Status status_;\n  };\n\n  struct ServerCodecError : public std::runtime_error {\n    ServerCodecError(Http::Status&& status)\n        : std::runtime_error(std::string(status.message())), status_(std::move(status)) {}\n    const char* what() const noexcept override { return status_.message().data(); }\n    const Http::Status status_;\n  };\n\n  struct ConnectionWrapper {\n    Http::Status dispatch(const Buffer::Instance& data, Connection& connection) {\n      Http::Status status = Http::okStatus();\n      buffer_.add(data);\n      if (!dispatching_) {\n        while (buffer_.length() > 0) {\n          dispatching_ = true;\n          status = connection.dispatch(buffer_);\n          if (!status.ok()) {\n            // Exit early if we hit an error status.\n            return status;\n          }\n          dispatching_ = false;\n        }\n      }\n      return status;\n    }\n\n    bool dispatching_{};\n    Buffer::OwnedImpl buffer_;\n  };\n\n  enum SettingsTupleIndex {\n    HpackTableSize = 0,\n    MaxConcurrentStreams,\n    InitialStreamWindowSize,\n    InitialConnectionWindowSize\n  };\n\n  Http2CodecImplTestFixture() = default;\n  Http2CodecImplTestFixture(Http2SettingsTuple client_settings, Http2SettingsTuple server_settings)\n      : client_settings_(client_settings), server_settings_(server_settings) {\n    // Make sure we explicitly test for stream flush timer creation.\n    EXPECT_CALL(client_connection_.dispatcher_, createTimer_(_)).Times(0);\n    EXPECT_CALL(server_connection_.dispatcher_, createTimer_(_)).Times(0);\n  }\n  virtual ~Http2CodecImplTestFixture() {\n    client_connection_.dispatcher_.clearDeferredDeleteList();\n    if (client_ != nullptr) {\n      client_.reset();\n      EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, \"http2.streams_active\")->value());\n      EXPECT_EQ(0,\n                TestUtility::findGauge(client_stats_store_, \"http2.pending_send_bytes\")->value());\n    }\n    server_connection_.dispatcher_.clearDeferredDeleteList();\n    if (server_ != nullptr) {\n      server_.reset();\n      EXPECT_EQ(0, TestUtility::findGauge(server_stats_store_, \"http2.streams_active\")->value());\n      EXPECT_EQ(0,\n                TestUtility::findGauge(server_stats_store_, \"http2.pending_send_bytes\")->value());\n    }\n  }\n\n  virtual void initialize() {\n    http2OptionsFromTuple(client_http2_options_, client_settings_);\n    http2OptionsFromTuple(server_http2_options_, server_settings_);\n    if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n      client_ = std::make_unique<TestClientConnectionImplNew>(\n          client_connection_, client_callbacks_, client_stats_store_, client_http2_options_,\n          max_request_headers_kb_, max_response_headers_count_,\n          ProdNghttp2SessionFactoryNew::get());\n      server_ = std::make_unique<TestServerConnectionImplNew>(\n          server_connection_, server_callbacks_, server_stats_store_, server_http2_options_,\n          max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_);\n    } else {\n      client_ = std::make_unique<TestClientConnectionImplLegacy>(\n          client_connection_, client_callbacks_, client_stats_store_, client_http2_options_,\n          max_request_headers_kb_, max_response_headers_count_,\n          ProdNghttp2SessionFactoryLegacy::get());\n      server_ = std::make_unique<TestServerConnectionImplLegacy>(\n          server_connection_, server_callbacks_, server_stats_store_, server_http2_options_,\n          max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_);\n    }\n    request_encoder_ = &client_->newStream(response_decoder_);\n    setupDefaultConnectionMocks();\n\n    EXPECT_CALL(server_callbacks_, newStream(_, _))\n        .WillRepeatedly(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n          response_encoder_ = &encoder;\n          encoder.getStream().addCallbacks(server_stream_callbacks_);\n          encoder.getStream().setFlushTimeout(std::chrono::milliseconds(30000));\n          return request_decoder_;\n        }));\n  }\n\n  void setupDefaultConnectionMocks() {\n    ON_CALL(client_connection_, write(_, _))\n        .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n          if (corrupt_metadata_frame_) {\n            corruptMetadataFramePayload(data);\n          }\n          auto status = server_wrapper_.dispatch(data, *server_);\n          if (!status.ok()) {\n            throw ServerCodecError(std::move(status));\n          }\n        }));\n    ON_CALL(server_connection_, write(_, _))\n        .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n          auto status = client_wrapper_.dispatch(data, *client_);\n          if (!status.ok()) {\n            throw ClientCodecError(std::move(status));\n          }\n        }));\n  }\n\n  void http2OptionsFromTuple(envoy::config::core::v3::Http2ProtocolOptions& options,\n                             const absl::optional<const Http2SettingsTuple>& tp) {\n    options.mutable_hpack_table_size()->set_value(\n        (tp.has_value()) ? ::testing::get<SettingsTupleIndex::HpackTableSize>(*tp)\n                         : CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE);\n    options.mutable_max_concurrent_streams()->set_value(\n        (tp.has_value()) ? ::testing::get<SettingsTupleIndex::MaxConcurrentStreams>(*tp)\n                         : CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS);\n    options.mutable_initial_stream_window_size()->set_value(\n        (tp.has_value()) ? ::testing::get<SettingsTupleIndex::InitialStreamWindowSize>(*tp)\n                         : CommonUtility::OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE);\n    options.mutable_initial_connection_window_size()->set_value(\n        (tp.has_value()) ? ::testing::get<SettingsTupleIndex::InitialConnectionWindowSize>(*tp)\n                         : CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE);\n    options.set_allow_metadata(allow_metadata_);\n    options.mutable_override_stream_error_on_invalid_http_message()->set_value(\n        stream_error_on_invalid_http_messaging_);\n    options.mutable_max_outbound_frames()->set_value(max_outbound_frames_);\n    options.mutable_max_outbound_control_frames()->set_value(max_outbound_control_frames_);\n    options.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(\n        max_consecutive_inbound_frames_with_empty_payload_);\n    options.mutable_max_inbound_priority_frames_per_stream()->set_value(\n        max_inbound_priority_frames_per_stream_);\n    options.mutable_max_inbound_window_update_frames_per_data_frame_sent()->set_value(\n        max_inbound_window_update_frames_per_data_frame_sent_);\n  }\n\n  // corruptMetadataFramePayload assumes data contains at least 10 bytes of the beginning of a\n  // frame.\n  void corruptMetadataFramePayload(Buffer::Instance& data) {\n    const size_t length = data.length();\n    const size_t corrupt_start = 10;\n    if (length < corrupt_start || length > METADATA_MAX_PAYLOAD_SIZE) {\n      ENVOY_LOG_MISC(error, \"data size too big or too small\");\n      return;\n    }\n    corruptAtOffset(data, corrupt_start, 0xff);\n  }\n\n  void corruptAtOffset(Buffer::Instance& data, size_t index, char new_value) {\n    if (data.length() == 0) {\n      return;\n    }\n    reinterpret_cast<uint8_t*>(data.linearize(data.length()))[index % data.length()] = new_value;\n  }\n\n  void expectDetailsRequest(const absl::string_view details) {\n    EXPECT_EQ(details, request_encoder_->getStream().responseDetails());\n  }\n\n  void expectDetailsResponse(const absl::string_view details) {\n    EXPECT_EQ(details, response_encoder_->getStream().responseDetails());\n  }\n\n  absl::optional<const Http2SettingsTuple> client_settings_;\n  absl::optional<const Http2SettingsTuple> server_settings_;\n  bool allow_metadata_ = false;\n  bool stream_error_on_invalid_http_messaging_ = false;\n  Stats::TestUtil::TestStore client_stats_store_;\n  envoy::config::core::v3::Http2ProtocolOptions client_http2_options_;\n  NiceMock<Network::MockConnection> client_connection_;\n  MockConnectionCallbacks client_callbacks_;\n  std::unique_ptr<TestClientConnection> client_;\n  ConnectionWrapper client_wrapper_;\n  Stats::TestUtil::TestStore server_stats_store_;\n  envoy::config::core::v3::Http2ProtocolOptions server_http2_options_;\n  NiceMock<Network::MockConnection> server_connection_;\n  MockServerConnectionCallbacks server_callbacks_;\n  std::unique_ptr<TestServerConnection> server_;\n  ConnectionWrapper server_wrapper_;\n  MockResponseDecoder response_decoder_;\n  RequestEncoder* request_encoder_;\n  MockRequestDecoder request_decoder_;\n  ResponseEncoder* response_encoder_{};\n  MockStreamCallbacks server_stream_callbacks_;\n  // Corrupt a metadata frame payload.\n  bool corrupt_metadata_frame_ = false;\n\n  uint32_t max_request_headers_kb_ = Http::DEFAULT_MAX_REQUEST_HEADERS_KB;\n  uint32_t max_request_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT;\n  uint32_t max_response_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT;\n  uint32_t max_outbound_frames_ = CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES;\n  uint32_t max_outbound_control_frames_ =\n      CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES;\n  uint32_t max_consecutive_inbound_frames_with_empty_payload_ =\n      CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD;\n  uint32_t max_inbound_priority_frames_per_stream_ =\n      CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM;\n  uint32_t max_inbound_window_update_frames_per_data_frame_sent_ =\n      CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT;\n  envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n      headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW};\n};\n\nclass Http2CodecImplTest : public ::testing::TestWithParam<Http2SettingsTestParam>,\n                           protected Http2CodecImplTestFixture {\npublic:\n  Http2CodecImplTest()\n      : Http2CodecImplTestFixture(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam())) {}\n\nprotected:\n  void priorityFlood() {\n    initialize();\n\n    TestRequestHeaderMapImpl request_headers;\n    HttpTestUtility::addDefaultHeaders(request_headers, \"POST\");\n    EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n    request_encoder_->encodeHeaders(request_headers, false);\n\n    nghttp2_priority_spec spec = {0, 10, 0};\n    // HTTP/2 codec adds 1 to the number of active streams when computing PRIORITY frames limit\n    constexpr uint32_t max_allowed =\n        2 * CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM;\n    for (uint32_t i = 0; i < max_allowed + 1; ++i) {\n      EXPECT_EQ(0, nghttp2_submit_priority(client_->session(), NGHTTP2_FLAG_NONE, 1, &spec));\n    }\n  }\n\n  void windowUpdateFlood() {\n    initialize();\n\n    TestRequestHeaderMapImpl request_headers;\n    HttpTestUtility::addDefaultHeaders(request_headers);\n    EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n    request_encoder_->encodeHeaders(request_headers, true);\n\n    // Send one DATA frame back\n    EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n    EXPECT_CALL(response_decoder_, decodeData(_, false));\n    TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    response_encoder_->encodeHeaders(response_headers, false);\n    Buffer::OwnedImpl data(\"0\");\n    EXPECT_NO_THROW(response_encoder_->encodeData(data, false));\n\n    // See the limit formula in the\n    // `Envoy::Http::Http2::ServerConnectionImpl::checkInboundFrameLimits()' method.\n    constexpr uint32_t max_allowed =\n        1 + 2 * (CommonUtility::OptionsLimits::\n                     DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT +\n                 1);\n    for (uint32_t i = 0; i < max_allowed + 1; ++i) {\n      EXPECT_EQ(0, nghttp2_submit_window_update(client_->session(), NGHTTP2_FLAG_NONE, 1, 1));\n    }\n  }\n\n  void emptyDataFlood(Buffer::OwnedImpl& data) {\n    initialize();\n\n    TestRequestHeaderMapImpl request_headers;\n    HttpTestUtility::addDefaultHeaders(request_headers, \"POST\");\n    EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n    request_encoder_->encodeHeaders(request_headers, false);\n\n    // HTTP/2 codec does not send empty DATA frames with no END_STREAM flag.\n    // To make this work, send raw bytes representing empty DATA frames bypassing client codec.\n    Http2Frame emptyDataFrame = Http2Frame::makeEmptyDataFrame(Http2Frame::makeClientStreamId(0));\n    constexpr uint32_t max_allowed =\n        CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD;\n    for (uint32_t i = 0; i < max_allowed + 1; ++i) {\n      data.add(emptyDataFrame.data(), emptyDataFrame.size());\n    }\n  }\n};\n\nTEST_P(Http2CodecImplTest, ShutdownNotice) {\n  initialize();\n  EXPECT_EQ(absl::nullopt, request_encoder_->http1StreamEncoderOptions());\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  EXPECT_CALL(client_callbacks_, onGoAway(_));\n  server_->shutdownNotice();\n  server_->goAway();\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, true));\n  response_encoder_->encodeHeaders(response_headers, true);\n}\n\n// 100 response followed by 200 results in a [decode100ContinueHeaders, decodeHeaders] sequence.\nTEST_P(Http2CodecImplTest, ContinueHeaders) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_));\n  response_encoder_->encode100ContinueHeaders(continue_headers);\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, true));\n  response_encoder_->encodeHeaders(response_headers, true);\n};\n\n// nghttp2 rejects trailers with :status.\nTEST_P(Http2CodecImplTest, TrailerStatus) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_));\n  response_encoder_->encode100ContinueHeaders(continue_headers);\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n\n  // nghttp2 doesn't allow :status in trailers\n  EXPECT_THROW(response_encoder_->encode100ContinueHeaders(continue_headers), ClientCodecError);\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n};\n\n// Multiple 100 responses are passed to the response encoder (who is responsible for coalescing).\nTEST_P(Http2CodecImplTest, MultipleContinueHeaders) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_));\n  response_encoder_->encode100ContinueHeaders(continue_headers);\n  EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_));\n  response_encoder_->encode100ContinueHeaders(continue_headers);\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, true));\n  response_encoder_->encodeHeaders(response_headers, true);\n};\n\n// 101/102 headers etc. are passed to the response encoder (who is responsibly for deciding to\n// upgrade, ignore, etc.).\nTEST_P(Http2CodecImplTest, 1xxNonContinueHeaders) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl other_headers{{\":status\", \"102\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(other_headers, false);\n};\n\n// nghttp2 treats 101 inside an HTTP/2 stream as an invalid HTTP header field.\nTEST_P(Http2CodecImplTest, Invalid101SwitchingProtocols) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl upgrade_headers{{\":status\", \"101\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0);\n  EXPECT_THROW(response_encoder_->encodeHeaders(upgrade_headers, false), ClientCodecError);\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n}\n\nTEST_P(Http2CodecImplTest, InvalidContinueWithFin) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError);\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n}\n\nTEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) {\n  stream_error_on_invalid_http_messaging_ = true;\n  initialize();\n\n  MockStreamCallbacks request_callbacks;\n  request_encoder_->getStream().addCallbacks(request_callbacks);\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  // Buffer client data to avoid mock recursion causing lifetime issues.\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); }));\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  response_encoder_->encodeHeaders(continue_headers, true);\n\n  // Flush pending data.\n  EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _));\n  setupDefaultConnectionMocks();\n  auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_);\n  EXPECT_TRUE(status.ok());\n\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n  expectDetailsRequest(\"http2.violation.of.messaging.rule\");\n}\n\nTEST_P(Http2CodecImplTest, CodecHasCorrectStreamErrorIfFalse) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  EXPECT_FALSE(response_encoder_->streamErrorOnInvalidHttpMessage());\n}\n\nTEST_P(Http2CodecImplTest, CodecHasCorrectStreamErrorIfTrue) {\n  stream_error_on_invalid_http_messaging_ = true;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  EXPECT_TRUE(response_encoder_->streamErrorOnInvalidHttpMessage());\n}\n\nTEST_P(Http2CodecImplTest, InvalidRepeatContinue) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_));\n  response_encoder_->encode100ContinueHeaders(continue_headers);\n\n  EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError);\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n};\n\nTEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) {\n  stream_error_on_invalid_http_messaging_ = true;\n  initialize();\n\n  MockStreamCallbacks request_callbacks;\n  request_encoder_->getStream().addCallbacks(request_callbacks);\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_));\n  response_encoder_->encode100ContinueHeaders(continue_headers);\n\n  // Buffer client data to avoid mock recursion causing lifetime issues.\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); }));\n\n  response_encoder_->encodeHeaders(continue_headers, true);\n\n  // Flush pending data.\n  EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _));\n  setupDefaultConnectionMocks();\n  auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_);\n  EXPECT_TRUE(status.ok());\n\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n  expectDetailsRequest(\"http2.violation.of.messaging.rule\");\n};\n\nTEST_P(Http2CodecImplTest, Invalid204WithContentLength) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"204\"}, {\"content-length\", \"3\"}};\n  // What follows is a hack to get headers that should span into continuation frames. The default\n  // maximum frame size is 16K. We will add 3,000 headers that will take us above this size and\n  // not easily compress with HPACK. (I confirmed this generates 26,468 bytes of header data\n  // which should contain a continuation.)\n  for (unsigned i = 1; i < 3000; i++) {\n    response_headers.addCopy(std::to_string(i), std::to_string(i));\n  }\n\n  EXPECT_LOG_CONTAINS(\n      \"debug\",\n      \"Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], \"\n      \"value: [3]\",\n      EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError));\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n};\n\nTEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) {\n  stream_error_on_invalid_http_messaging_ = true;\n  initialize();\n\n  MockStreamCallbacks request_callbacks;\n  request_encoder_->getStream().addCallbacks(request_callbacks);\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  // Buffer client data to avoid mock recursion causing lifetime issues.\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); }));\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"204\"}, {\"content-length\", \"3\"}};\n  // What follows is a hack to get headers that should span into continuation frames. The default\n  // maximum frame size is 16K. We will add 3,000 headers that will take us above this size and\n  // not easily compress with HPACK. (I confirmed this generates 26,468 bytes of header data\n  // which should contain a continuation.)\n  for (int i = 1; i < 3000; i++) {\n    response_headers.addCopy(std::to_string(i), std::to_string(i));\n  }\n\n  response_encoder_->encodeHeaders(response_headers, false);\n\n  // Flush pending data.\n  EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _));\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _));\n  setupDefaultConnectionMocks();\n  auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_);\n  EXPECT_TRUE(status.ok());\n\n  EXPECT_EQ(1, client_stats_store_.counter(\"http2.rx_messaging_error\").value());\n  expectDetailsRequest(\"http2.invalid.header.field\");\n};\n\nTEST_P(Http2CodecImplTest, RefusedStreamReset) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  MockStreamCallbacks callbacks;\n  request_encoder_->getStream().addCallbacks(callbacks);\n  EXPECT_CALL(server_stream_callbacks_,\n              onResetStream(StreamResetReason::LocalRefusedStreamReset, _));\n  EXPECT_CALL(callbacks, onResetStream(StreamResetReason::RemoteRefusedStreamReset, _));\n  response_encoder_->getStream().resetStream(StreamResetReason::LocalRefusedStreamReset);\n}\n\nTEST_P(Http2CodecImplTest, InvalidHeadersFrame) {\n  initialize();\n\n  EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), ServerCodecError);\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.rx_messaging_error\").value());\n}\n\nTEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) {\n  stream_error_on_invalid_http_messaging_ = true;\n  initialize();\n\n  MockStreamCallbacks request_callbacks;\n  request_encoder_->getStream().addCallbacks(request_callbacks);\n\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));\n\n  request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true);\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _));\n  EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::RemoteReset, _));\n  auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_);\n  EXPECT_TRUE(status.ok());\n  expectDetailsResponse(\"http2.violation.of.messaging.rule\");\n}\n\nTEST_P(Http2CodecImplTest, TrailingHeaders) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n  EXPECT_CALL(request_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl hello(\"hello\");\n  request_encoder_->encodeData(hello, false);\n  EXPECT_CALL(request_decoder_, decodeTrailers_(_));\n  request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{\"trailing\", \"header\"}});\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(response_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl world(\"world\");\n  response_encoder_->encodeData(world, false);\n  EXPECT_CALL(response_decoder_, decodeTrailers_(_));\n  response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{\"trailing\", \"header\"}});\n}\n\n// When having empty trailers, codec submits empty buffer and end_stream instead.\nTEST_P(Http2CodecImplTest, IgnoreTrailingEmptyHeaders) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.http2_skip_encoding_empty_trailers\", \"true\"}});\n\n  initialize();\n\n  Buffer::OwnedImpl empty_buffer;\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n  EXPECT_CALL(request_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl hello(\"hello\");\n  request_encoder_->encodeData(hello, false);\n  EXPECT_CALL(request_decoder_, decodeData(BufferEqual(&empty_buffer), true));\n  request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{});\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(response_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl world(\"world\");\n  response_encoder_->encodeData(world, false);\n  EXPECT_CALL(response_decoder_, decodeData(BufferEqual(&empty_buffer), true));\n  response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{});\n}\n\n// When having empty trailers and \"envoy.reloadable_features.http2_skip_encoding_empty_trailers\" is\n// turned off, codec submits empty trailers.\nTEST_P(Http2CodecImplTest, SubmitTrailingEmptyHeaders) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.http2_skip_encoding_empty_trailers\", \"false\"}});\n\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n  EXPECT_CALL(request_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl hello(\"hello\");\n  request_encoder_->encodeData(hello, false);\n  EXPECT_CALL(request_decoder_, decodeTrailers_(_));\n  request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{});\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(response_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl world(\"world\");\n  response_encoder_->encodeData(world, false);\n  EXPECT_CALL(response_decoder_, decodeTrailers_(_));\n  response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{});\n}\n\nTEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) {\n  initialize();\n\n  // Buffer server data so we can make sure we don't get any window updates.\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n  EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));\n  request_encoder_->encodeData(body, false);\n  request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{\"trailing\", \"header\"}});\n\n  // Flush pending data.\n  setupDefaultConnectionMocks();\n  EXPECT_CALL(request_decoder_, decodeTrailers_(_));\n  auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_);\n  EXPECT_TRUE(status.ok());\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(response_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl world(\"world\");\n  response_encoder_->encodeData(world, false);\n  EXPECT_CALL(response_decoder_, decodeTrailers_(_));\n  response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{\"trailing\", \"header\"}});\n}\n\nTEST_P(Http2CodecImplTest, SmallMetadataVecTest) {\n  allow_metadata_ = true;\n  initialize();\n\n  // Generates a valid stream_id by sending a request header.\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  MetadataMapVector metadata_map_vector;\n  const int size = 10;\n  for (int i = 0; i < size; i++) {\n    MetadataMap metadata_map = {\n        {\"header_key1\", \"header_value1\"},\n        {\"header_key2\", \"header_value2\"},\n        {\"header_key3\", \"header_value3\"},\n        {\"header_key4\", \"header_value4\"},\n    };\n    MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n    metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  }\n\n  EXPECT_CALL(request_decoder_, decodeMetadata_(_)).Times(size);\n  request_encoder_->encodeMetadata(metadata_map_vector);\n\n  EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size);\n  response_encoder_->encodeMetadata(metadata_map_vector);\n}\n\nTEST_P(Http2CodecImplTest, LargeMetadataVecTest) {\n  allow_metadata_ = true;\n  initialize();\n\n  // Generates a valid stream_id by sending a request header.\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  MetadataMapVector metadata_map_vector;\n  const int size = 10;\n  for (int i = 0; i < size; i++) {\n    MetadataMap metadata_map = {\n        {\"header_key1\", std::string(50 * 1024, 'a')},\n    };\n    MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n    metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  }\n\n  EXPECT_CALL(request_decoder_, decodeMetadata_(_)).Times(size);\n  request_encoder_->encodeMetadata(metadata_map_vector);\n\n  EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size);\n  response_encoder_->encodeMetadata(metadata_map_vector);\n}\n\nTEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) {\n  allow_metadata_ = true;\n  initialize();\n\n  // Generates a valid stream_id by sending a request header.\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  MetadataMap metadata_map = {\n      {\"header_key1\", \"header_value1\"},\n      {\"header_key2\", \"header_value2\"},\n      {\"header_key3\", \"header_value3\"},\n      {\"header_key4\", \"header_value4\"},\n  };\n  MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n  MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n\n  corrupt_metadata_frame_ = true;\n  EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), ServerCodecError,\n                            \"The user callback function failed\");\n}\n\n// Encode response metadata while dispatching request data from the client, so\n// that nghttp2 can't fill the metadata frames' payloads until dispatching\n// is finished.\nTEST_P(Http2CodecImplTest, EncodeMetadataWhileDispatchingTest) {\n  allow_metadata_ = true;\n  initialize();\n\n  MetadataMapVector metadata_map_vector;\n  const int size = 10;\n  for (int i = 0; i < size; i++) {\n    MetadataMap metadata_map = {\n        {\"header_key1\", \"header_value1\"},\n        {\"header_key2\", \"header_value2\"},\n        {\"header_key3\", \"header_value3\"},\n        {\"header_key4\", \"header_value4\"},\n    };\n    MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n    metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  }\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)).WillOnce(InvokeWithoutArgs([&]() -> void {\n    response_encoder_->encodeMetadata(metadata_map_vector);\n  }));\n  EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size);\n  request_encoder_->encodeHeaders(request_headers, true);\n}\n\n// Validate the keepalive PINGs are sent and received correctly.\nTEST_P(Http2CodecImplTest, ConnectionKeepalive) {\n  constexpr uint32_t interval_ms = 100;\n  constexpr uint32_t timeout_ms = 200;\n  client_http2_options_.mutable_connection_keepalive()->mutable_interval()->set_nanos(interval_ms *\n                                                                                      1000 * 1000);\n  client_http2_options_.mutable_connection_keepalive()->mutable_timeout()->set_nanos(timeout_ms *\n                                                                                     1000 * 1000);\n  client_http2_options_.mutable_connection_keepalive()->mutable_interval_jitter()->set_value(0);\n  auto timeout_timer = new Event::MockTimer(&client_connection_.dispatcher_); /* */\n  auto send_timer = new Event::MockTimer(&client_connection_.dispatcher_);\n  EXPECT_CALL(*timeout_timer, disableTimer());\n  EXPECT_CALL(*send_timer, enableTimer(std::chrono::milliseconds(interval_ms), _));\n  initialize();\n\n  // Trigger sending a PING, and validate that an ACK is received based on the timeout timer\n  // being disabled and the interval being re-enabled.\n  EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(timeout_ms), _));\n  EXPECT_CALL(*timeout_timer, disableTimer()); // This indicates that an ACK was received.\n  EXPECT_CALL(*send_timer, enableTimer(std::chrono::milliseconds(interval_ms), _));\n  send_timer->callback_();\n\n  // Test that a timeout closes the connection.\n  EXPECT_CALL(client_connection_, close(Network::ConnectionCloseType::NoFlush));\n  timeout_timer->callback_();\n}\n\n// Validate that jitter is added as expected based on configuration.\nTEST_P(Http2CodecImplTest, ConnectionKeepaliveJitter) {\n  client_http2_options_.mutable_connection_keepalive()->mutable_interval()->set_seconds(1);\n  client_http2_options_.mutable_connection_keepalive()->mutable_timeout()->set_seconds(1);\n  client_http2_options_.mutable_connection_keepalive()->mutable_interval_jitter()->set_value(10);\n  /*auto timeout_timer = */ new NiceMock<Event::MockTimer>(&client_connection_.dispatcher_);\n  auto send_timer = new Event::MockTimer(&client_connection_.dispatcher_);\n\n  constexpr std::chrono::milliseconds min_expected(1000);\n  constexpr std::chrono::milliseconds max_expected(1099); // 1000ms + 10%\n  std::chrono::milliseconds min_observed(5000);\n  std::chrono::milliseconds max_observed(0);\n  EXPECT_CALL(*send_timer, enableTimer(_, _))\n      .WillRepeatedly(Invoke([&](const std::chrono::milliseconds& ms, const ScopeTrackedObject*) {\n        EXPECT_GE(ms, std::chrono::milliseconds(1000));\n        EXPECT_LE(ms, std::chrono::milliseconds(1100));\n        max_observed = std::max(max_observed, ms);\n        min_observed = std::min(min_observed, ms);\n      }));\n  initialize();\n\n  for (uint64_t i = 0; i < 250; i++) {\n    EXPECT_CALL(client_->random_generator_, random()).WillOnce(Return(i));\n    send_timer->callback_();\n  }\n\n  EXPECT_EQ(min_observed.count(), min_expected.count());\n  EXPECT_EQ(max_observed.count(), max_expected.count());\n}\n\nclass Http2CodecImplDeferredResetTest : public Http2CodecImplTest {};\n\nTEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) {\n  initialize();\n\n  InSequence s;\n\n  MockStreamCallbacks client_stream_callbacks;\n  request_encoder_->getStream().addCallbacks(client_stream_callbacks);\n\n  // Do a request, but pause server dispatch so we don't send window updates. This will result in a\n  // deferred reset, followed by a pending frames flush which will cause the stream to actually\n  // be reset immediately since we are outside of dispatch context.\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  request_encoder_->encodeHeaders(request_headers, false);\n  Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));\n  EXPECT_CALL(client_stream_callbacks, onAboveWriteBufferHighWatermark()).Times(AnyNumber());\n  request_encoder_->encodeData(body, true);\n  EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::LocalReset, _));\n  request_encoder_->getStream().resetStream(StreamResetReason::LocalReset);\n\n  // Dispatch server. We expect to see some data.\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() -> void {\n    // Start a response inside the headers callback. This should not result in the client\n    // seeing any headers as the stream should already be reset on the other side, even though\n    // we don't know about it yet.\n    TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    response_encoder_->encodeHeaders(response_headers, false);\n  }));\n  EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _));\n\n  setupDefaultConnectionMocks();\n  auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_);\n  EXPECT_TRUE(status.ok());\n}\n\nTEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) {\n  initialize();\n\n  InSequence s;\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // In this case we do the same thing as DeferredResetClient but on the server side.\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); }));\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  response_encoder_->encodeHeaders(response_headers, false);\n  Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));\n  EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber());\n  auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_);\n  EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _));\n  response_encoder_->encodeData(body, true);\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _));\n  EXPECT_CALL(*flush_timer, disableTimer());\n  response_encoder_->getStream().resetStream(StreamResetReason::LocalReset);\n\n  MockStreamCallbacks client_stream_callbacks;\n  request_encoder_->getStream().addCallbacks(client_stream_callbacks);\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _));\n  setupDefaultConnectionMocks();\n  auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_);\n  EXPECT_TRUE(status.ok());\n}\n\nclass Http2CodecImplFlowControlTest : public Http2CodecImplTest {};\n\n// Back up the pending_sent_data_ buffer in the client connection and make sure the watermarks fire\n// as expected.\n//\n// This also tests the readDisable logic in StreamImpl, verifying that h2 bytes are consumed\n// when the stream has readDisable(true) called.\nTEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) {\n  initialize();\n  MockStreamCallbacks callbacks;\n  request_encoder_->getStream().addCallbacks(callbacks);\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  TestRequestHeaderMapImpl expected_headers;\n  HttpTestUtility::addDefaultHeaders(expected_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // Force the server stream to be read disabled. This will cause it to stop sending window\n  // updates to the client.\n  server_->getStream(1)->readDisable(true);\n  EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, \"http2.streams_active\")->value());\n  EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, \"http2.streams_active\")->value());\n\n  uint32_t initial_stream_window =\n      nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1);\n  // If this limit is changed, this test will fail due to the initial large writes being divided\n  // into more than 4 frames. Fast fail here with this explanatory comment.\n  ASSERT_EQ(65535, initial_stream_window);\n  // Make sure the limits were configured properly in test set up.\n  EXPECT_EQ(initial_stream_window, server_->getStream(1)->bufferLimit());\n  EXPECT_EQ(initial_stream_window, client_->getStream(1)->bufferLimit());\n\n  // One large write gets broken into smaller frames.\n  EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AnyNumber());\n  Buffer::OwnedImpl long_data(std::string(initial_stream_window, 'a'));\n  request_encoder_->encodeData(long_data, false);\n\n  // Verify that the window is full. The client will not send more data to the server for this\n  // stream.\n  EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1));\n  EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1));\n  EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1));\n\n  // Now that the flow control window is full, further data causes the send buffer to back up.\n  Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a'));\n  request_encoder_->encodeData(more_long_data, false);\n  EXPECT_EQ(initial_stream_window, client_->getStreamPendingSendDataLength(1));\n  EXPECT_EQ(initial_stream_window,\n            TestUtility::findGauge(client_stats_store_, \"http2.pending_send_bytes\")->value());\n  EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1));\n\n  // If we go over the limit, the stream callbacks should fire.\n  EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark());\n  Buffer::OwnedImpl last_byte(\"!\");\n  request_encoder_->encodeData(last_byte, false);\n  EXPECT_EQ(initial_stream_window + 1, client_->getStreamPendingSendDataLength(1));\n  EXPECT_EQ(initial_stream_window + 1,\n            TestUtility::findGauge(client_stats_store_, \"http2.pending_send_bytes\")->value());\n\n  // Now create a second stream on the connection.\n  MockResponseDecoder response_decoder2;\n  RequestEncoder* request_encoder2 = &client_->newStream(response_decoder_);\n  StreamEncoder* response_encoder2;\n  MockStreamCallbacks server_stream_callbacks2;\n  MockRequestDecoder request_decoder2;\n  // When the server stream is created it should check the status of the\n  // underlying connection. Pretend it is overrun.\n  EXPECT_CALL(server_connection_, aboveHighWatermark()).WillOnce(Return(true));\n  EXPECT_CALL(server_stream_callbacks2, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(server_callbacks_, newStream(_, _))\n      .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        response_encoder2 = &encoder;\n        encoder.getStream().addCallbacks(server_stream_callbacks2);\n        return request_decoder2;\n      }));\n  EXPECT_CALL(request_decoder2, decodeHeaders_(_, false));\n  request_encoder2->encodeHeaders(request_headers, false);\n\n  // Add the stream callbacks belatedly. On creation the stream should have\n  // been noticed that the connection was backed up. Any new subscriber to\n  // stream callbacks should get a callback when they addCallbacks.\n  MockStreamCallbacks callbacks2;\n  EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark());\n  request_encoder_->getStream().addCallbacks(callbacks2);\n\n  // Add a third callback to make testing removal mid-watermark call below more interesting.\n  MockStreamCallbacks callbacks3;\n  EXPECT_CALL(callbacks3, onAboveWriteBufferHighWatermark());\n  request_encoder_->getStream().addCallbacks(callbacks3);\n\n  // Now unblock the server's stream. This will cause the bytes to be consumed, flow control\n  // updates to be sent, and the client to flush all queued data.\n  // For bonus corner case coverage, remove callback2 in the middle of runLowWatermarkCallbacks()\n  // and ensure it is not called.\n  EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([&]() -> void {\n    request_encoder_->getStream().removeCallbacks(callbacks2);\n  }));\n  EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0);\n  EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark());\n  server_->getStream(1)->readDisable(false);\n  EXPECT_EQ(0, client_->getStreamPendingSendDataLength(1));\n  EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, \"http2.pending_send_bytes\")->value());\n  // The extra 1 byte sent won't trigger another window update, so the final window should be the\n  // initial window minus the last 1 byte flush from the client to server.\n  EXPECT_EQ(initial_stream_window - 1,\n            nghttp2_session_get_stream_local_window_size(server_->session(), 1));\n  EXPECT_EQ(initial_stream_window - 1,\n            nghttp2_session_get_stream_remote_window_size(client_->session(), 1));\n}\n\n// Set up the same asTestFlowControlInPendingSendData, but tears the stream down with an early reset\n// once the flow control window is full up.\nTEST_P(Http2CodecImplFlowControlTest, EarlyResetRestoresWindow) {\n  initialize();\n  MockStreamCallbacks callbacks;\n  request_encoder_->getStream().addCallbacks(callbacks);\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  TestRequestHeaderMapImpl expected_headers;\n  HttpTestUtility::addDefaultHeaders(expected_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // Force the server stream to be read disabled. This will cause it to stop sending window\n  // updates to the client.\n  server_->getStream(1)->readDisable(true);\n\n  uint32_t initial_stream_window =\n      nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1);\n  uint32_t initial_connection_window = nghttp2_session_get_remote_window_size(client_->session());\n  // If this limit is changed, this test will fail due to the initial large writes being divided\n  // into more than 4 frames. Fast fail here with this explanatory comment.\n  ASSERT_EQ(65535, initial_stream_window);\n  // One large write may get broken into smaller frames.\n  EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AnyNumber());\n  Buffer::OwnedImpl long_data(std::string(initial_stream_window, 'a'));\n  // The one giant write will cause the buffer to go over the limit, then drain and go back under\n  // the limit.\n  request_encoder_->encodeData(long_data, false);\n\n  // Verify that the window is full. The client will not send more data to the server for this\n  // stream.\n  EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1));\n  EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1));\n  EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1));\n  EXPECT_GT(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session()));\n\n  EXPECT_CALL(server_stream_callbacks_,\n              onResetStream(StreamResetReason::LocalRefusedStreamReset, _));\n  EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0);\n  EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0);\n  EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(0);\n  EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()).Times(0);\n  EXPECT_CALL(callbacks, onResetStream(StreamResetReason::RemoteRefusedStreamReset, _))\n      .WillOnce(Invoke([&](StreamResetReason, absl::string_view) -> void {\n        // Test the case where the reset callbacks cause the socket to fill up,\n        // causing the underlying connection to back up. Given the stream is\n        // being destroyed the watermark callbacks should not fire (mocks for Times(0)\n        // above)\n        client_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n        client_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n        server_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n        server_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n      }));\n  response_encoder_->getStream().resetStream(StreamResetReason::LocalRefusedStreamReset);\n\n  // Regression test that the window is consumed even if the stream is destroyed early.\n  EXPECT_EQ(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session()));\n}\n\n// Test the HTTP2 pending_recv_data_ buffer going over and under watermark limits.\nTEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) {\n  initialize();\n  MockStreamCallbacks callbacks;\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  TestRequestHeaderMapImpl expected_headers;\n  HttpTestUtility::addDefaultHeaders(expected_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // Set artificially small watermarks to make the recv buffer easy to overrun. In production,\n  // the recv buffer can be overrun by a client which negotiates a larger\n  // SETTINGS_MAX_FRAME_SIZE but there's no current easy way to tweak that in\n  // envoy (without sending raw HTTP/2 frames) so we lower the buffer limit instead.\n  server_->setStreamWriteBufferWatermarks(1, 10, 20);\n\n  EXPECT_CALL(request_decoder_, decodeData(_, false));\n  Buffer::OwnedImpl data(std::string(40, 'a'));\n  request_encoder_->encodeData(data, false);\n}\n\n// Verify that we create and disable the stream flush timer when trailers follow a stream that\n// does not have enough window.\nTEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBody) {\n  initialize();\n\n  InSequence s;\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_);\n  EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _));\n  Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));\n  response_encoder_->encodeData(body, false);\n  response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{\"trailing\", \"header\"}});\n\n  // Send window updates from the client.\n  setupDefaultConnectionMocks();\n  EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  EXPECT_CALL(response_decoder_, decodeTrailers_(_));\n  EXPECT_CALL(*flush_timer, disableTimer());\n  auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_);\n  EXPECT_TRUE(status.ok());\n  EXPECT_EQ(0, server_stats_store_.counter(\"http2.tx_flush_timeout\").value());\n}\n\n// Verify that we create and handle the stream flush timeout when trailers follow a stream that\n// does not have enough window.\nTEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBodyFlushTimeout) {\n  initialize();\n\n  InSequence s;\n  MockStreamCallbacks client_stream_callbacks;\n  request_encoder_->getStream().addCallbacks(client_stream_callbacks);\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_);\n  EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _));\n  Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));\n  response_encoder_->encodeData(body, false);\n  response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{\"trailing\", \"header\"}});\n\n  // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but\n  // we do get a reset on the client.\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _));\n  flush_timer->invokeCallback();\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.tx_flush_timeout\").value());\n}\n\n// Verify that we create and handle the stream flush timeout when there is a large body that\n// does not have enough window.\nTEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) {\n  initialize();\n\n  InSequence s;\n  MockStreamCallbacks client_stream_callbacks;\n  request_encoder_->getStream().addCallbacks(client_stream_callbacks);\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_);\n  EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _));\n  Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));\n  response_encoder_->encodeData(body, true);\n\n  // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but\n  // we do get a reset on the client.\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _));\n  flush_timer->invokeCallback();\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.tx_flush_timeout\").value());\n}\n\n// Verify that when an incoming protocol error races with a stream flush timeout we correctly\n// disable the flush timeout and do not attempt to reset the stream.\nTEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeoutAfterGoaway) {\n  initialize();\n\n  InSequence s;\n  MockStreamCallbacks client_stream_callbacks;\n  request_encoder_->getStream().addCallbacks(client_stream_callbacks);\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(\n          Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));\n  response_encoder_->encodeHeaders(response_headers, false);\n  EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1));\n  auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_);\n  EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _));\n  Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));\n  response_encoder_->encodeData(body, true);\n\n  // Force a protocol error.\n  Buffer::OwnedImpl garbage_data(\"this should cause a protocol error\");\n  EXPECT_CALL(client_callbacks_, onGoAway(_));\n  EXPECT_CALL(*flush_timer, disableTimer());\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  auto status = server_wrapper_.dispatch(garbage_data, *server_);\n  EXPECT_FALSE(status.ok());\n  EXPECT_EQ(0, server_stats_store_.counter(\"http2.tx_flush_timeout\").value());\n}\n\nTEST_P(Http2CodecImplTest, WatermarkUnderEndStream) {\n  initialize();\n  MockStreamCallbacks callbacks;\n  request_encoder_->getStream().addCallbacks(callbacks);\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // The 'true' on encodeData will set local_end_stream_ on the client but not\n  // the server. Verify that client watermark callbacks will not be called, but\n  // server callbacks may be called by simulating connection overflow on both\n  // ends.\n  EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0);\n  EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0);\n  EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark());\n  EXPECT_CALL(request_decoder_, decodeData(_, true)).WillOnce(InvokeWithoutArgs([&]() -> void {\n    client_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n    client_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n    server_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n    server_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n  }));\n  Buffer::OwnedImpl hello(\"hello\");\n  request_encoder_->encodeData(hello, true);\n\n  // The 'true' on encodeData will set local_end_stream_ on the server. Verify\n  // that neither client nor server watermark callbacks will be called again.\n  EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0);\n  EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0);\n  EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(0);\n  EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()).Times(0);\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(HeaderMapEqual(&response_headers), true))\n      .WillOnce(InvokeWithoutArgs([&]() -> void {\n        client_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n        client_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n        server_->onUnderlyingConnectionAboveWriteBufferHighWatermark();\n        server_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n      }));\n  response_encoder_->encodeHeaders(response_headers, true);\n}\n\nclass Http2CodecImplStreamLimitTest : public Http2CodecImplTest {};\n\n// Regression test for issue #3076.\n//\n// TODO(PiotrSikora): add tests that exercise both scenarios: before and after receiving\n// the HTTP/2 SETTINGS frame.\nTEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) {\n  http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam()));\n  http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam()));\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n    client_ = std::make_unique<TestClientConnectionImplNew>(\n        client_connection_, client_callbacks_, client_stats_store_, client_http2_options_,\n        max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactoryNew::get());\n    server_ = std::make_unique<TestServerConnectionImplNew>(\n        server_connection_, server_callbacks_, server_stats_store_, server_http2_options_,\n        max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_);\n\n  } else {\n    client_ = std::make_unique<TestClientConnectionImplLegacy>(\n        client_connection_, client_callbacks_, client_stats_store_, client_http2_options_,\n        max_request_headers_kb_, max_response_headers_count_,\n        ProdNghttp2SessionFactoryLegacy::get());\n    server_ = std::make_unique<TestServerConnectionImplLegacy>(\n        server_connection_, server_callbacks_, server_stats_store_, server_http2_options_,\n        max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_);\n  }\n  for (int i = 0; i < 101; ++i) {\n    request_encoder_ = &client_->newStream(response_decoder_);\n    setupDefaultConnectionMocks();\n    EXPECT_CALL(server_callbacks_, newStream(_, _))\n        .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n          response_encoder_ = &encoder;\n          encoder.getStream().addCallbacks(server_stream_callbacks_);\n          return request_decoder_;\n        }));\n\n    TestRequestHeaderMapImpl request_headers;\n    HttpTestUtility::addDefaultHeaders(request_headers);\n    EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n    request_encoder_->encodeHeaders(request_headers, true);\n  }\n}\n\n#define HTTP2SETTINGS_SMALL_WINDOW_COMBINE                                                         \\\n  ::testing::Combine(                                                                              \\\n      ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE),                   \\\n      ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS),             \\\n      ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE),             \\\n      ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE))\n\n// Deferred reset tests use only small windows so that we can test certain conditions.\nINSTANTIATE_TEST_SUITE_P(Http2CodecImplDeferredResetTest, Http2CodecImplDeferredResetTest,\n                         ::testing::Combine(HTTP2SETTINGS_SMALL_WINDOW_COMBINE,\n                                            HTTP2SETTINGS_SMALL_WINDOW_COMBINE));\n\n// Flow control tests only use only small windows so that we can test certain conditions.\nINSTANTIATE_TEST_SUITE_P(Http2CodecImplFlowControlTest, Http2CodecImplFlowControlTest,\n                         ::testing::Combine(HTTP2SETTINGS_SMALL_WINDOW_COMBINE,\n                                            HTTP2SETTINGS_SMALL_WINDOW_COMBINE));\n\n// we separate default/edge cases here to avoid combinatorial explosion\n#define HTTP2SETTINGS_DEFAULT_COMBINE                                                              \\\n  ::testing::Combine(                                                                              \\\n      ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE),                   \\\n      ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS),             \\\n      ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE),         \\\n      ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE))\n\n// Stream limit test only uses the default values because not all combinations of\n// edge settings allow for the number of streams needed by the test.\nINSTANTIATE_TEST_SUITE_P(Http2CodecImplStreamLimitTest, Http2CodecImplStreamLimitTest,\n                         ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE,\n                                            HTTP2SETTINGS_DEFAULT_COMBINE));\n\nINSTANTIATE_TEST_SUITE_P(Http2CodecImplTestDefaultSettings, Http2CodecImplTest,\n                         ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE,\n                                            HTTP2SETTINGS_DEFAULT_COMBINE));\n\n#define HTTP2SETTINGS_EDGE_COMBINE                                                                 \\\n  ::testing::Combine(                                                                              \\\n      ::testing::Values(CommonUtility::OptionsLimits::MIN_HPACK_TABLE_SIZE,                        \\\n                        CommonUtility::OptionsLimits::MAX_HPACK_TABLE_SIZE),                       \\\n      ::testing::Values(CommonUtility::OptionsLimits::MIN_MAX_CONCURRENT_STREAMS,                  \\\n                        CommonUtility::OptionsLimits::MAX_MAX_CONCURRENT_STREAMS),                 \\\n      ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE,              \\\n                        CommonUtility::OptionsLimits::MAX_INITIAL_STREAM_WINDOW_SIZE),             \\\n      ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE,          \\\n                        CommonUtility::OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE))\n\n// Make sure we have coverage for high and low values for various  combinations and permutations\n// of HTTP settings in at least one test fixture.\n// Use with caution as any test using this runs 255 times.\nusing Http2CodecImplTestAll = Http2CodecImplTest;\n\nINSTANTIATE_TEST_SUITE_P(Http2CodecImplTestDefaultSettings, Http2CodecImplTestAll,\n                         ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE,\n                                            HTTP2SETTINGS_DEFAULT_COMBINE));\nINSTANTIATE_TEST_SUITE_P(Http2CodecImplTestEdgeSettings, Http2CodecImplTestAll,\n                         ::testing::Combine(HTTP2SETTINGS_EDGE_COMBINE,\n                                            HTTP2SETTINGS_EDGE_COMBINE));\n\nTEST(Http2CodecUtility, reconstituteCrumbledCookies) {\n  {\n    HeaderString key;\n    HeaderString value;\n    HeaderString cookies;\n    EXPECT_FALSE(Utility::reconstituteCrumbledCookies(key, value, cookies));\n    EXPECT_TRUE(cookies.empty());\n  }\n\n  {\n    HeaderString key(Headers::get().ContentLength);\n    HeaderString value;\n    value.setInteger(5);\n    HeaderString cookies;\n    EXPECT_FALSE(Utility::reconstituteCrumbledCookies(key, value, cookies));\n    EXPECT_TRUE(cookies.empty());\n  }\n\n  {\n    HeaderString key(Headers::get().Cookie);\n    HeaderString value;\n    value.setCopy(\"a=b\", 3);\n    HeaderString cookies;\n    EXPECT_TRUE(Utility::reconstituteCrumbledCookies(key, value, cookies));\n    EXPECT_EQ(cookies, \"a=b\");\n\n    HeaderString key2(Headers::get().Cookie);\n    HeaderString value2;\n    value2.setCopy(\"c=d\", 3);\n    EXPECT_TRUE(Utility::reconstituteCrumbledCookies(key2, value2, cookies));\n    EXPECT_EQ(cookies, \"a=b; c=d\");\n  }\n}\n\nMATCHER_P(HasValue, m, \"\") {\n  if (!arg.has_value()) {\n    *result_listener << \"does not contain a value\";\n    return false;\n  }\n  const auto& value = arg.value();\n  return ExplainMatchResult(m, value, result_listener);\n};\n\nclass Http2CustomSettingsTestBase : public Http2CodecImplTestFixture {\npublic:\n  struct SettingsParameter {\n    uint16_t identifier;\n    uint32_t value;\n  };\n\n  Http2CustomSettingsTestBase(Http2SettingsTuple client_settings,\n                              Http2SettingsTuple server_settings, bool validate_client)\n      : Http2CodecImplTestFixture(client_settings, server_settings),\n        validate_client_(validate_client) {}\n\n  ~Http2CustomSettingsTestBase() override = default;\n\n  // Sets the custom settings parameters specified by |parameters| in the |options| proto.\n  void setHttp2CustomSettingsParameters(envoy::config::core::v3::Http2ProtocolOptions& options,\n                                        std::vector<SettingsParameter> parameters) {\n    for (const auto& parameter : parameters) {\n      envoy::config::core::v3::Http2ProtocolOptions::SettingsParameter* custom_param =\n          options.mutable_custom_settings_parameters()->Add();\n      custom_param->mutable_identifier()->set_value(parameter.identifier);\n      custom_param->mutable_value()->set_value(parameter.value);\n    }\n  }\n\n  // Returns the Http2ProtocolOptions proto which specifies the settings parameters to be sent to\n  // the endpoint being validated.\n  envoy::config::core::v3::Http2ProtocolOptions& getCustomOptions() {\n    return validate_client_ ? server_http2_options_ : client_http2_options_;\n  }\n\n  // Returns the endpoint being validated.\n  const TestCodecSettingsProvider& getSettingsProvider() {\n    if (validate_client_) {\n      return *client_;\n    }\n    return *server_;\n  }\n\n  // Returns the settings tuple which specifies a subset of the settings parameters to be sent to\n  // the endpoint being validated.\n  const Http2SettingsTuple& getSettingsTuple() {\n    ASSERT(client_settings_.has_value() && server_settings_.has_value());\n    return validate_client_ ? *server_settings_ : *client_settings_;\n  }\n\nprotected:\n  bool validate_client_{false};\n};\n\nclass Http2CustomSettingsTest\n    : public Http2CustomSettingsTestBase,\n      public ::testing::TestWithParam<\n          ::testing::tuple<Http2SettingsTuple, Http2SettingsTuple, bool>> {\npublic:\n  Http2CustomSettingsTest()\n      : Http2CustomSettingsTestBase(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam()),\n                                    ::testing::get<2>(GetParam())) {}\n};\nINSTANTIATE_TEST_SUITE_P(Http2CodecImplTestEdgeSettings, Http2CustomSettingsTest,\n                         ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE,\n                                            HTTP2SETTINGS_DEFAULT_COMBINE, ::testing::Bool()));\n\n// Validates that custom parameters (those which are not explicitly named in the\n// envoy::config::core::v3::Http2ProtocolOptions proto) are properly sent and processed by\n// client and server connections.\nTEST_P(Http2CustomSettingsTest, UserDefinedSettings) {\n  std::vector<SettingsParameter> custom_parameters{{0x10, 10}, {0x11, 20}};\n  setHttp2CustomSettingsParameters(getCustomOptions(), custom_parameters);\n  initialize();\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _));\n  request_encoder_->encodeHeaders(request_headers, false);\n  uint32_t hpack_table_size =\n      ::testing::get<SettingsTupleIndex::HpackTableSize>(getSettingsTuple());\n  if (hpack_table_size != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) {\n    EXPECT_THAT(\n        getSettingsProvider().getRemoteSettingsParameterValue(NGHTTP2_SETTINGS_HEADER_TABLE_SIZE),\n        HasValue(hpack_table_size));\n  }\n  uint32_t max_concurrent_streams =\n      ::testing::get<SettingsTupleIndex::MaxConcurrentStreams>(getSettingsTuple());\n  if (max_concurrent_streams != NGHTTP2_INITIAL_MAX_CONCURRENT_STREAMS) {\n    EXPECT_THAT(getSettingsProvider().getRemoteSettingsParameterValue(\n                    NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS),\n                HasValue(max_concurrent_streams));\n  }\n  uint32_t initial_stream_window_size =\n      ::testing::get<SettingsTupleIndex::InitialStreamWindowSize>(getSettingsTuple());\n  if (max_concurrent_streams != NGHTTP2_INITIAL_WINDOW_SIZE) {\n    EXPECT_THAT(\n        getSettingsProvider().getRemoteSettingsParameterValue(NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE),\n        HasValue(initial_stream_window_size));\n  }\n  // Validate that custom parameters are received by the endpoint (client or server) under\n  // test.\n  for (const auto& parameter : custom_parameters) {\n    EXPECT_THAT(getSettingsProvider().getRemoteSettingsParameterValue(parameter.identifier),\n                HasValue(parameter.value));\n  }\n}\n\n// Tests request headers whose size is larger than the default limit of 60K.\nTEST_P(Http2CodecImplTest, LargeRequestHeadersInvokeResetStream) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  std::string long_string = std::string(63 * 1024, 'q');\n  request_headers.addCopy(\"big\", long_string);\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1);\n  request_encoder_->encodeHeaders(request_headers, false);\n}\n\n// Large request headers are accepted when max limit configured.\nTEST_P(Http2CodecImplTest, LargeRequestHeadersAccepted) {\n  max_request_headers_kb_ = 64;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  std::string long_string = std::string(63 * 1024, 'q');\n  request_headers.addCopy(\"big\", long_string);\n\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _));\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  request_encoder_->encodeHeaders(request_headers, false);\n}\n\n// Tests request headers with name containing underscore are dropped when the option is set to drop\n// header.\nTEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreDropped) {\n  headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  TestRequestHeaderMapImpl expected_headers(request_headers);\n  request_headers.addCopy(\"bad_header\", \"something\");\n  EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _));\n  request_encoder_->encodeHeaders(request_headers, false);\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.dropped_headers_with_underscores\").value());\n}\n\n// Tests that request with header names containing underscore are rejected when the option is set to\n// reject request.\nTEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreRejectedByDefault) {\n  headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  request_headers.addCopy(\"bad_header\", \"something\");\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1);\n  request_encoder_->encodeHeaders(request_headers, false);\n  EXPECT_EQ(\n      1,\n      server_stats_store_.counter(\"http2.requests_rejected_with_underscores_in_headers\").value());\n}\n\n// Tests request headers with name containing underscore are allowed when the option is set to\n// allow.\nTEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAllowed) {\n  headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  request_headers.addCopy(\"bad_header\", \"something\");\n  TestRequestHeaderMapImpl expected_headers(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _));\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  request_encoder_->encodeHeaders(request_headers, false);\n  EXPECT_EQ(0, server_stats_store_.counter(\"http2.dropped_headers_with_underscores\").value());\n}\n\n// This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801.\n// Large method headers should not trigger ASSERTs or ASAN. The underlying issue\n// in CVE-2019-18801 only affected the HTTP/1 encoder, but we include a test\n// here for belt-and-braces. This also demonstrates that the HTTP/2 codec will\n// accept arbitrary :method headers, unlike the HTTP/1 codec (see\n// Http1ServerConnectionImplTest.RejectInvalidMethod for comparison).\nTEST_P(Http2CodecImplTest, LargeMethodRequestEncode) {\n  max_request_headers_kb_ = 80;\n  initialize();\n\n  const std::string long_method = std::string(79 * 1024, 'a');\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  request_headers.setReferenceKey(Headers::get().Method, long_method);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&request_headers), false));\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  request_encoder_->encodeHeaders(request_headers, false);\n}\n\n// Tests stream reset when the number of request headers exceeds the default maximum of 100.\nTEST_P(Http2CodecImplTest, ManyRequestHeadersInvokeResetStream) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  for (int i = 0; i < 100; i++) {\n    request_headers.addCopy(std::to_string(i), \"\");\n  }\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1);\n  request_encoder_->encodeHeaders(request_headers, false);\n}\n\n// Tests that max number of request headers is configurable.\nTEST_P(Http2CodecImplTest, ManyRequestHeadersAccepted) {\n  max_request_headers_count_ = 150;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  for (int i = 0; i < 145; i++) {\n    request_headers.addCopy(std::to_string(i), \"\");\n  }\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _));\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  request_encoder_->encodeHeaders(request_headers, false);\n}\n\n// Tests that max number of response headers is configurable.\nTEST_P(Http2CodecImplTest, ManyResponseHeadersAccepted) {\n  max_response_headers_count_ = 110;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"compression\", \"test\"}};\n  for (int i = 0; i < 105; i++) {\n    response_headers.addCopy(std::to_string(i), \"\");\n  }\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, true));\n  response_encoder_->encodeHeaders(response_headers, true);\n}\n\nTEST_P(Http2CodecImplTest, LargeRequestHeadersAtLimitAccepted) {\n  uint32_t codec_limit_kb = 64;\n  max_request_headers_kb_ = codec_limit_kb;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  std::string key = \"big\";\n  uint32_t head_room = 77;\n  uint32_t long_string_length =\n      codec_limit_kb * 1024 - request_headers.byteSize() - key.length() - head_room;\n  std::string long_string = std::string(long_string_length, 'q');\n  request_headers.addCopy(key, long_string);\n\n  // The amount of data sent to the codec is not equivalent to the size of the\n  // request headers that Envoy computes, as the codec limits based on the\n  // entire http2 frame. The exact head room needed (76) was found through iteration.\n  ASSERT_EQ(request_headers.byteSize() + head_room, codec_limit_kb * 1024);\n\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _));\n  request_encoder_->encodeHeaders(request_headers, true);\n}\n\nTEST_P(Http2CodecImplTest, LargeRequestHeadersOverDefaultCodecLibraryLimit) {\n  max_request_headers_kb_ = 66;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  std::string long_string = std::string(65 * 1024, 'q');\n  request_headers.addCopy(\"big\", long_string);\n\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1);\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  request_encoder_->encodeHeaders(request_headers, true);\n}\n\nTEST_P(Http2CodecImplTest, LargeRequestHeadersExceedPerHeaderLimit) {\n  // The name-value pair max is set by NGHTTP2_HD_MAX_NV in lib/nghttp2_hd.h to 64KB, and\n  // creates a per-request header limit for us in h2. Note that the nghttp2\n  // calculated byte size will differ from envoy due to H2 compression and frames.\n\n  max_request_headers_kb_ = 81;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  std::string long_string = std::string(80 * 1024, 'q');\n  request_headers.addCopy(\"big\", long_string);\n\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(client_callbacks_, onGoAway(_));\n  server_->shutdownNotice();\n  server_->goAway();\n  request_encoder_->encodeHeaders(request_headers, true);\n}\n\nTEST_P(Http2CodecImplTest, ManyLargeRequestHeadersUnderPerHeaderLimit) {\n  max_request_headers_kb_ = 81;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  std::string long_string = std::string(1024, 'q');\n  for (int i = 0; i < 80; i++) {\n    request_headers.addCopy(std::to_string(i), long_string);\n  }\n\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1);\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  request_encoder_->encodeHeaders(request_headers, true);\n}\n\nTEST_P(Http2CodecImplTest, LargeRequestHeadersAtMaxConfigurable) {\n  // Raising the limit past this triggers some unexpected nghttp2 error.\n  // Further debugging required to increase past ~96 KiB.\n  max_request_headers_kb_ = 96;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  std::string long_string = std::string(1024, 'q');\n  for (int i = 0; i < 95; i++) {\n    request_headers.addCopy(std::to_string(i), long_string);\n  }\n\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1);\n  EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);\n  request_encoder_->encodeHeaders(request_headers, true);\n}\n\n// Note this is Http2CodecImplTestAll not Http2CodecImplTest, to test\n// compression with min and max HPACK table size.\nTEST_P(Http2CodecImplTestAll, TestCodecHeaderCompression) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));\n  request_encoder_->encodeHeaders(request_headers, true);\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"compression\", \"test\"}};\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, true));\n  response_encoder_->encodeHeaders(response_headers, true);\n\n  // Sanity check to verify that state of encoders and decoders matches.\n  EXPECT_EQ(nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session()),\n            nghttp2_session_get_hd_inflate_dynamic_table_size(client_->session()));\n  EXPECT_EQ(nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session()),\n            nghttp2_session_get_hd_inflate_dynamic_table_size(server_->session()));\n\n  // Verify that headers are compressed only when both client and server advertise table size\n  // > 0:\n  if (client_http2_options_.hpack_table_size().value() &&\n      server_http2_options_.hpack_table_size().value()) {\n    EXPECT_NE(0, nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session()));\n    EXPECT_NE(0, nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session()));\n  } else {\n    EXPECT_EQ(0, nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session()));\n    EXPECT_EQ(0, nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session()));\n  }\n}\n\n// Verify that codec detects PING flood\nTEST_P(Http2CodecImplTest, PingFlood) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // Send one frame above the outbound control queue size limit\n  for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1;\n       ++i) {\n    EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr));\n  }\n\n  int ack_count = 0;\n  Buffer::OwnedImpl buffer;\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(Invoke([&buffer, &ack_count](Buffer::Instance& frame, bool) {\n        ++ack_count;\n        buffer.move(frame);\n      }));\n\n  // Legacy codec does not propagate error details and uses generic error message\n  EXPECT_THROW_WITH_MESSAGE(\n      client_->sendPendingFrames().IgnoreError(), ServerCodecError,\n      Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")\n          ? \"Too many control frames in the outbound queue.\"\n          : \"Too many frames in the outbound queue.\");\n  EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES);\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.outbound_control_flood\").value());\n}\n\n// Verify that codec allows PING flood when mitigation is disabled\nTEST_P(Http2CodecImplTest, PingFloodMitigationDisabled) {\n  max_outbound_control_frames_ = 2147483647;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // Send one frame above the outbound control queue size limit\n  for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1;\n       ++i) {\n    EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr));\n  }\n\n  EXPECT_CALL(server_connection_, write(_, _))\n      .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1);\n  EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError());\n}\n\n// Verify that outbound control frame counter decreases when send buffer is drained\nTEST_P(Http2CodecImplTest, PingFloodCounterReset) {\n  // Ping frames are 17 bytes each so 237 full frames and a partial frame fit in the current min\n  // size for buffer slices. Setting the limit to 2x+1 the number that fits in a single slice allows\n  // the logic below that verifies drain and overflow thresholds.\n  static const int kMaxOutboundControlFrames = 475;\n  max_outbound_control_frames_ = kMaxOutboundControlFrames;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  for (int i = 0; i < kMaxOutboundControlFrames; ++i) {\n    EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr));\n  }\n\n  int ack_count = 0;\n  Buffer::OwnedImpl buffer;\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(Invoke([&buffer, &ack_count](Buffer::Instance& frame, bool) {\n        ++ack_count;\n        buffer.move(frame);\n      }));\n\n  // We should be 1 frame under the control frame flood mitigation threshold.\n  EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError());\n  EXPECT_EQ(ack_count, kMaxOutboundControlFrames);\n\n  // Drain floor(kMaxOutboundFrames / 2) slices from the send buffer\n  buffer.drain(buffer.length() / 2);\n\n  // Send floor(kMaxOutboundFrames / 2) more pings.\n  for (int i = 0; i < kMaxOutboundControlFrames / 2; ++i) {\n    EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr));\n  }\n  // The number of outbound frames should be half of max so the connection should not be\n  // terminated.\n  EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError());\n  EXPECT_EQ(ack_count, kMaxOutboundControlFrames + kMaxOutboundControlFrames / 2);\n\n  // 1 more ping frame should overflow the outbound frame limit.\n  EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr));\n  // Legacy codec does not propagate error details and uses generic error message\n  EXPECT_THROW_WITH_MESSAGE(\n      client_->sendPendingFrames().IgnoreError(), ServerCodecError,\n      Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")\n          ? \"Too many control frames in the outbound queue.\"\n          : \"Too many frames in the outbound queue.\");\n}\n\n// Verify that codec detects flood of outbound HEADER frames\nTEST_P(Http2CodecImplTest, ResponseHeadersFlood) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  int frame_count = 0;\n  Buffer::OwnedImpl buffer;\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) {\n        ++frame_count;\n        buffer.move(frame);\n      }));\n\n  auto* violation_callback =\n      new NiceMock<Event::MockSchedulableCallback>(&server_connection_.dispatcher_);\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1; ++i) {\n    EXPECT_NO_THROW(response_encoder_->encodeHeaders(response_headers, false));\n  }\n\n  EXPECT_TRUE(violation_callback->enabled_);\n  EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush));\n  violation_callback->invokeCallback();\n\n  EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1);\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.outbound_flood\").value());\n}\n\n// Verify that codec detects flood of outbound DATA frames\nTEST_P(Http2CodecImplTest, ResponseDataFlood) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  int frame_count = 0;\n  Buffer::OwnedImpl buffer;\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) {\n        ++frame_count;\n        buffer.move(frame);\n      }));\n\n  auto* violation_callback =\n      new NiceMock<Event::MockSchedulableCallback>(&server_connection_.dispatcher_);\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  response_encoder_->encodeHeaders(response_headers, false);\n  // Account for the single HEADERS frame above\n  for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; ++i) {\n    Buffer::OwnedImpl data(\"0\");\n    EXPECT_NO_THROW(response_encoder_->encodeData(data, false));\n  }\n\n  EXPECT_TRUE(violation_callback->enabled_);\n  EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush));\n  violation_callback->invokeCallback();\n\n  EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1);\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.outbound_flood\").value());\n}\n\n// Verify that codec allows outbound DATA flood when mitigation is disabled\nTEST_P(Http2CodecImplTest, ResponseDataFloodMitigationDisabled) {\n  max_outbound_control_frames_ = 2147483647;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  // +2 is to account for HEADERS and PING ACK, that is used to trigger mitigation\n  EXPECT_CALL(server_connection_, write(_, _))\n      .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 2);\n  EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)).Times(1);\n  EXPECT_CALL(response_decoder_, decodeData(_, false))\n      .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES);\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  response_encoder_->encodeHeaders(response_headers, false);\n  // Account for the single HEADERS frame above\n  for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; ++i) {\n    Buffer::OwnedImpl data(\"0\");\n    EXPECT_NO_THROW(response_encoder_->encodeData(data, false));\n  }\n  // Presently flood mitigation is done only when processing downstream data\n  // So we need to send stream from downstream client to trigger mitigation\n  EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr));\n  EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError());\n}\n\n// Verify that outbound frame counter decreases when send buffer is drained\nTEST_P(Http2CodecImplTest, ResponseDataFloodCounterReset) {\n  static const int kMaxOutboundFrames = 100;\n  max_outbound_frames_ = kMaxOutboundFrames;\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  int frame_count = 0;\n  Buffer::OwnedImpl buffer;\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) {\n        ++frame_count;\n        buffer.move(frame);\n      }));\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  response_encoder_->encodeHeaders(response_headers, false);\n  // Account for the single HEADERS frame above\n  for (uint32_t i = 0; i < kMaxOutboundFrames - 1; ++i) {\n    Buffer::OwnedImpl data(\"0\");\n    EXPECT_NO_THROW(response_encoder_->encodeData(data, false));\n  }\n\n  EXPECT_EQ(frame_count, kMaxOutboundFrames);\n  // Drain kMaxOutboundFrames / 2 slices from the send buffer\n  buffer.drain(buffer.length() / 2);\n\n  auto* violation_callback =\n      new NiceMock<Event::MockSchedulableCallback>(&server_connection_.dispatcher_);\n\n  for (uint32_t i = 0; i < kMaxOutboundFrames / 2 + 1; ++i) {\n    Buffer::OwnedImpl data(\"0\");\n    EXPECT_NO_THROW(response_encoder_->encodeData(data, false));\n  }\n\n  EXPECT_TRUE(violation_callback->enabled_);\n  EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush));\n  violation_callback->invokeCallback();\n}\n\n// Verify that control frames are added to the counter of outbound frames of all types.\nTEST_P(Http2CodecImplTest, PingStacksWithDataFlood) {\n  initialize();\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  EXPECT_CALL(request_decoder_, decodeHeaders_(_, false));\n  request_encoder_->encodeHeaders(request_headers, false);\n\n  int frame_count = 0;\n  Buffer::OwnedImpl buffer;\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) {\n        ++frame_count;\n        buffer.move(frame);\n      }));\n\n  TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  response_encoder_->encodeHeaders(response_headers, false);\n  // Account for the single HEADERS frame above\n  for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) {\n    Buffer::OwnedImpl data(\"0\");\n    EXPECT_NO_THROW(response_encoder_->encodeData(data, false));\n  }\n  // Send one PING frame above the outbound queue size limit\n  EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr));\n  EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError,\n                            \"Too many frames in the outbound queue.\");\n\n  EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES);\n  EXPECT_EQ(1, server_stats_store_.counter(\"http2.outbound_flood\").value());\n}\n\nTEST_P(Http2CodecImplTest, PriorityFlood) {\n  priorityFlood();\n  // Legacy codec does not propagate error details and uses generic error message\n  EXPECT_THROW_WITH_MESSAGE(\n      client_->sendPendingFrames().IgnoreError(), ServerCodecError,\n      Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")\n          ? \"Too many PRIORITY frames\"\n          : \"Flooding was detected in this HTTP/2 session, and it must be closed\");\n}\n\nTEST_P(Http2CodecImplTest, PriorityFloodOverride) {\n  max_inbound_priority_frames_per_stream_ = 2147483647;\n\n  priorityFlood();\n  EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError());\n}\n\nTEST_P(Http2CodecImplTest, WindowUpdateFlood) {\n  windowUpdateFlood();\n  // Legacy codec does not propagate error details and uses generic error message\n  EXPECT_THROW_WITH_MESSAGE(\n      client_->sendPendingFrames().IgnoreError(), ServerCodecError,\n      Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")\n          ? \"Too many WINDOW_UPDATE frames\"\n          : \"Flooding was detected in this HTTP/2 session, and it must be closed\");\n}\n\nTEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) {\n  max_inbound_window_update_frames_per_data_frame_sent_ = 2147483647;\n  windowUpdateFlood();\n  EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError());\n}\n\nTEST_P(Http2CodecImplTest, EmptyDataFlood) {\n  Buffer::OwnedImpl data;\n  emptyDataFlood(data);\n  EXPECT_CALL(request_decoder_, decodeData(_, false));\n  auto status = server_wrapper_.dispatch(data, *server_);\n  EXPECT_FALSE(status.ok());\n  if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n    EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(status));\n    EXPECT_EQ(\"Too many consecutive frames with an empty payload\", status.message());\n  } else {\n    // Legacy codec does not propagate error details and uses generic error message\n    EXPECT_TRUE(isBufferFloodError(status));\n    EXPECT_EQ(\"Flooding was detected in this HTTP/2 session, and it must be closed\",\n              status.message());\n  }\n}\n\nTEST_P(Http2CodecImplTest, EmptyDataFloodOverride) {\n  max_consecutive_inbound_frames_with_empty_payload_ = 2147483647;\n  Buffer::OwnedImpl data;\n  emptyDataFlood(data);\n  EXPECT_CALL(request_decoder_, decodeData(_, false))\n      .Times(\n          CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD +\n          1);\n  auto status = server_wrapper_.dispatch(data, *server_);\n  EXPECT_TRUE(status.ok());\n}\n\n// CONNECT without upgrade type gets tagged with \"bytestream\"\nTEST_P(Http2CodecImplTest, ConnectTest) {\n  client_http2_options_.set_allow_connect(true);\n  server_http2_options_.set_allow_connect(true);\n  initialize();\n  MockStreamCallbacks callbacks;\n  request_encoder_->getStream().addCallbacks(callbacks);\n\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  request_headers.setReferenceKey(Headers::get().Method, Http::Headers::get().MethodValues.Connect);\n  TestRequestHeaderMapImpl expected_headers;\n  HttpTestUtility::addDefaultHeaders(expected_headers);\n  expected_headers.setReferenceKey(Headers::get().Method,\n                                   Http::Headers::get().MethodValues.Connect);\n  expected_headers.setReferenceKey(Headers::get().Protocol, \"bytestream\");\n  EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false));\n  request_encoder_->encodeHeaders(request_headers, false);\n}\n\ntemplate <typename, typename> class TestNghttp2SessionFactory;\n\n// Test client for H/2 METADATA frame edge cases.\ntemplate <typename TestClientConnectionImplType>\nclass MetadataTestClientConnectionImpl : public TestClientConnectionImplType {\npublic:\n  MetadataTestClientConnectionImpl(\n      Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope,\n      const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n      uint32_t max_request_headers_kb, uint32_t max_request_headers_count,\n      typename TestClientConnectionImplType::SessionFactory& http2_session_factory)\n      : TestClientConnectionImplType(connection, callbacks, scope, http2_options,\n                                     max_request_headers_kb, max_request_headers_count,\n                                     http2_session_factory) {}\n\n  // Overrides TestClientConnectionImpl::submitMetadata().\n  bool submitMetadata(const MetadataMapVector& metadata_map_vector, int32_t stream_id) override {\n    // Creates metadata payload.\n    encoder_.createPayload(metadata_map_vector);\n    for (uint8_t flags : encoder_.payloadFrameFlagBytes()) {\n      int result =\n          nghttp2_submit_extension(TestClientConnectionImplType::session(),\n                                   ::Envoy::Http::METADATA_FRAME_TYPE, flags, stream_id, nullptr);\n      if (result != 0) {\n        return false;\n      }\n    }\n    // Triggers nghttp2 to populate the payloads of the METADATA frames.\n    int result = nghttp2_session_send(TestClientConnectionImplType::session());\n    return result == 0;\n  }\n\nprotected:\n  template <typename, typename> friend class TestNghttp2SessionFactory;\n\n  MetadataEncoder encoder_;\n};\n\nusing MetadataTestClientConnectionImplNew =\n    MetadataTestClientConnectionImpl<TestClientConnectionImplNew>;\nusing MetadataTestClientConnectionImplLegacy =\n    MetadataTestClientConnectionImpl<TestClientConnectionImplLegacy>;\n\nstruct Nghttp2SessionFactoryDeleter {\n  virtual ~Nghttp2SessionFactoryDeleter() = default;\n};\n\ntemplate <typename Nghttp2SessionFactoryType, typename TestClientConnectionImplType>\nclass TestNghttp2SessionFactory : public Nghttp2SessionFactoryType,\n                                  public Nghttp2SessionFactoryDeleter {\npublic:\n  ~TestNghttp2SessionFactory() override {\n    nghttp2_session_callbacks_del(callbacks_);\n    nghttp2_option_del(options_);\n  }\n\n  nghttp2_session* create(const nghttp2_session_callbacks*,\n                          typename Nghttp2SessionFactoryType::ConnectionImplType* connection,\n                          const nghttp2_option*) override;\n\n  void init(nghttp2_session*, typename Nghttp2SessionFactoryType::ConnectionImplType*,\n            const envoy::config::core::v3::Http2ProtocolOptions&) override {}\n\nprivate:\n  nghttp2_session_callbacks* callbacks_;\n  nghttp2_option* options_;\n};\n\ntemplate <typename Nghttp2SessionFactoryType, typename TestClientConnectionImplType>\nnghttp2_session*\nTestNghttp2SessionFactory<Nghttp2SessionFactoryType, TestClientConnectionImplType>::create(\n    const nghttp2_session_callbacks*,\n    typename Nghttp2SessionFactoryType::ConnectionImplType* connection, const nghttp2_option*) {\n  // Only need to provide callbacks required to send METADATA frames.\n  nghttp2_session_callbacks_new(&callbacks_);\n  nghttp2_session_callbacks_set_pack_extension_callback(\n      callbacks_,\n      [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*,\n         void* user_data) -> ssize_t {\n        // Double cast required due to multiple inheritance.\n        return static_cast<MetadataTestClientConnectionImpl<TestClientConnectionImplType>*>(\n                   static_cast<typename Nghttp2SessionFactoryType::ConnectionImplType*>(user_data))\n            ->encoder_.packNextFramePayload(data, length);\n      });\n  nghttp2_session_callbacks_set_send_callback(\n      callbacks_,\n      [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t {\n        // Cast down to MetadataTestClientConnectionImpl to leverage friendship.\n        auto status_or_len =\n            static_cast<MetadataTestClientConnectionImpl<TestClientConnectionImplType>*>(\n                static_cast<typename Nghttp2SessionFactoryType::ConnectionImplType*>(user_data))\n                ->onSend(data, length);\n        if (status_or_len.ok()) {\n          return status_or_len.value();\n        }\n        return NGHTTP2_ERR_CALLBACK_FAILURE;\n      });\n  nghttp2_option_new(&options_);\n  nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE);\n  nghttp2_session* session;\n  nghttp2_session_client_new2(&session, callbacks_, connection, options_);\n  return session;\n}\n\ntemplate <>\nnghttp2_session* TestNghttp2SessionFactory<Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory,\n                                           TestClientConnectionImplLegacy>::\n    create(const nghttp2_session_callbacks*,\n           Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory::ConnectionImplType* connection,\n           const nghttp2_option*) {\n  // Only need to provide callbacks required to send METADATA frames.\n  nghttp2_session_callbacks_new(&callbacks_);\n  nghttp2_session_callbacks_set_pack_extension_callback(\n      callbacks_,\n      [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*,\n         void* user_data) -> ssize_t {\n        // Double cast required due to multiple inheritance.\n        return static_cast<MetadataTestClientConnectionImpl<TestClientConnectionImplLegacy>*>(\n                   static_cast<\n                       Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory::ConnectionImplType*>(\n                       user_data))\n            ->encoder_.packNextFramePayload(data, length);\n      });\n  nghttp2_session_callbacks_set_send_callback(\n      callbacks_,\n      [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t {\n        // Cast down to MetadataTestClientConnectionImpl to leverage friendship.\n        return static_cast<MetadataTestClientConnectionImpl<TestClientConnectionImplLegacy>*>(\n                   static_cast<typename Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory::\n                                   ConnectionImplType*>(user_data))\n            ->onSend(data, length);\n      });\n  nghttp2_option_new(&options_);\n  nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE);\n  nghttp2_session* session;\n  nghttp2_session_client_new2(&session, callbacks_, connection, options_);\n  return session;\n}\n\nusing TestNghttp2SessionFactoryNew =\n    TestNghttp2SessionFactory<ProdNghttp2SessionFactory, TestClientConnectionImplNew>;\nusing TestNghttp2SessionFactoryLegacy =\n    TestNghttp2SessionFactory<Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory,\n                              TestClientConnectionImplLegacy>;\n\nclass Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testing::Test {\npublic:\n  Http2CodecMetadataTest() = default;\n\nprotected:\n  void initialize() override {\n    allow_metadata_ = true;\n    http2OptionsFromTuple(client_http2_options_, client_settings_);\n    http2OptionsFromTuple(server_http2_options_, server_settings_);\n    if (Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.new_codec_behavior\")) {\n      std::unique_ptr<TestNghttp2SessionFactoryNew> session_factory =\n          std::make_unique<TestNghttp2SessionFactoryNew>();\n      client_ = std::make_unique<MetadataTestClientConnectionImplNew>(\n          client_connection_, client_callbacks_, client_stats_store_, client_http2_options_,\n          max_request_headers_kb_, max_response_headers_count_, *session_factory);\n      server_ = std::make_unique<TestServerConnectionImplNew>(\n          server_connection_, server_callbacks_, server_stats_store_, server_http2_options_,\n          max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_);\n      http2_session_factory_ = std::move(session_factory);\n    } else {\n      std::unique_ptr<TestNghttp2SessionFactoryLegacy> session_factory =\n          std::make_unique<TestNghttp2SessionFactoryLegacy>();\n      client_ = std::make_unique<MetadataTestClientConnectionImplLegacy>(\n          client_connection_, client_callbacks_, client_stats_store_, client_http2_options_,\n          max_request_headers_kb_, max_response_headers_count_, *session_factory);\n      server_ = std::make_unique<TestServerConnectionImplLegacy>(\n          server_connection_, server_callbacks_, server_stats_store_, server_http2_options_,\n          max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_);\n      http2_session_factory_ = std::move(session_factory);\n    }\n    ON_CALL(client_connection_, write(_, _))\n        .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n          ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok());\n        }));\n    ON_CALL(server_connection_, write(_, _))\n        .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n          ASSERT_TRUE(client_wrapper_.dispatch(data, *client_).ok());\n        }));\n  }\n\nprivate:\n  std::unique_ptr<Nghttp2SessionFactoryDeleter> http2_session_factory_;\n};\n\n// Validates noop handling of METADATA frames without a known stream ID.\n// This is required per RFC 7540, section 5.1.1, which states that stream ID = 0 can be used for\n// \"connection control\" messages, and per the H2 METADATA spec (source/docs/h2_metadata.md), which\n// states that these frames can be received prior to the headers.\nTEST_F(Http2CodecMetadataTest, UnknownStreamId) {\n  initialize();\n  MetadataMap metadata_map = {{\"key\", \"value\"}};\n  MetadataMapVector metadata_vector;\n  metadata_vector.emplace_back(std::make_unique<MetadataMap>(metadata_map));\n  // SETTINGS are required as part of the preface.\n  ASSERT_EQ(nghttp2_submit_settings(client_->session(), NGHTTP2_FLAG_NONE, nullptr, 0), 0);\n  // Validate both the ID = 0 special case and a non-zero ID not already bound to a stream (any ID >\n  // 0 for this test).\n  EXPECT_TRUE(client_->submitMetadata(metadata_vector, 0));\n  EXPECT_TRUE(client_->submitMetadata(metadata_vector, 1000));\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/codec_impl_test_util.h",
    "content": "#pragma once\n\n#include \"envoy/http/codec.h\"\n\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/http/http2/codec_impl_legacy.h\"\n#include \"common/http/utility.h\"\n\n#include \"test/mocks/common.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nclass TestCodecStatsProvider {\npublic:\n  TestCodecStatsProvider(Stats::Scope& scope) : scope_(scope) {}\n\n  Http::Http2::CodecStats& http2CodecStats() {\n    return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, scope_);\n  }\n\n  Stats::Scope& scope_;\n  Http::Http2::CodecStats::AtomicPtr http2_codec_stats_;\n};\n\nclass TestCodecSettingsProvider {\npublic:\n  // Returns the value of the SETTINGS parameter keyed by |identifier| sent by the remote endpoint.\n  absl::optional<uint32_t> getRemoteSettingsParameterValue(int32_t identifier) const {\n    const auto it = settings_.find(identifier);\n    if (it == settings_.end()) {\n      return absl::nullopt;\n    }\n    return it->second;\n  }\n\n  // protected:\n  // Stores SETTINGS parameters contained in |settings_frame| to make them available via\n  // getRemoteSettingsParameterValue().\n  void onSettingsFrame(const nghttp2_settings& settings_frame) {\n    for (uint32_t i = 0; i < settings_frame.niv; ++i) {\n      auto result = settings_.insert(\n          std::make_pair(settings_frame.iv[i].settings_id, settings_frame.iv[i].value));\n      // It is possible to have duplicate settings parameters, each new parameter replaces any\n      // existing value.\n      // https://tools.ietf.org/html/rfc7540#section-6.5\n      if (!result.second) {\n        ENVOY_LOG_MISC(debug, \"Duplicated settings parameter {} with value {}\",\n                       settings_frame.iv[i].settings_id, settings_frame.iv[i].value);\n        settings_.erase(result.first);\n        // Guaranteed success here.\n        settings_.insert(\n            std::make_pair(settings_frame.iv[i].settings_id, settings_frame.iv[i].value));\n      }\n    }\n  }\n\nprivate:\n  absl::node_hash_map<int32_t, uint32_t> settings_;\n};\n\nstruct ServerCodecFacade : public virtual Connection {\n  virtual nghttp2_session* session() PURE;\n  virtual Http::Stream* getStream(int32_t stream_id) PURE;\n  virtual uint32_t getStreamUnconsumedBytes(int32_t stream_id) PURE;\n  virtual void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark,\n                                              uint32_t high_watermark) PURE;\n};\n\nclass TestServerConnection : public TestCodecStatsProvider,\n                             public TestCodecSettingsProvider,\n                             public ServerCodecFacade {\npublic:\n  TestServerConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {}\n};\n\ntemplate <typename CodecImplType>\nclass TestServerConnectionImpl : public TestServerConnection, public CodecImplType {\npublic:\n  TestServerConnectionImpl(\n      Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope,\n      const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n      uint32_t max_request_headers_kb, uint32_t max_request_headers_count,\n      envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n          headers_with_underscores_action)\n      : TestServerConnection(scope),\n        CodecImplType(connection, callbacks, http2CodecStats(), random_, http2_options,\n                      max_request_headers_kb, max_request_headers_count,\n                      headers_with_underscores_action) {}\n\n  // ServerCodecFacade\n  nghttp2_session* session() override { return CodecImplType::session_; }\n  Http::Stream* getStream(int32_t stream_id) override {\n    return CodecImplType::getStream(stream_id);\n  }\n  uint32_t getStreamUnconsumedBytes(int32_t stream_id) override {\n    return CodecImplType::getStream(stream_id)->unconsumed_bytes_;\n  }\n  void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark,\n                                      uint32_t high_watermark) override {\n    CodecImplType::getStream(stream_id)->setWriteBufferWatermarks(low_watermark, high_watermark);\n  }\n\nprotected:\n  // Overrides ServerConnectionImpl::onSettingsForTest().\n  void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); }\n\n  testing::NiceMock<Random::MockRandomGenerator> random_;\n};\n\nusing TestServerConnectionImplLegacy =\n    TestServerConnectionImpl<Envoy::Http::Legacy::Http2::ServerConnectionImpl>;\nusing TestServerConnectionImplNew =\n    TestServerConnectionImpl<Envoy::Http::Http2::ServerConnectionImpl>;\n\nstruct ClientCodecFacade : public ClientConnection {\n  virtual nghttp2_session* session() PURE;\n  virtual Http::Stream* getStream(int32_t stream_id) PURE;\n  virtual uint64_t getStreamPendingSendDataLength(int32_t stream_id) PURE;\n  virtual Status sendPendingFrames() PURE;\n  virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) PURE;\n};\n\nclass TestClientConnection : public TestCodecStatsProvider,\n                             public TestCodecSettingsProvider,\n                             public ClientCodecFacade {\npublic:\n  TestClientConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {}\n\n  testing::NiceMock<Random::MockRandomGenerator> random_generator_;\n};\n\ntemplate <typename CodecImplType>\nclass TestClientConnectionImpl : public TestClientConnection, public CodecImplType {\npublic:\n  TestClientConnectionImpl(Network::Connection& connection, Http::ConnectionCallbacks& callbacks,\n                           Stats::Scope& scope,\n                           const envoy::config::core::v3::Http2ProtocolOptions& http2_options,\n                           uint32_t max_request_headers_kb, uint32_t max_request_headers_count,\n                           typename CodecImplType::SessionFactory& http2_session_factory)\n      : TestClientConnection(scope),\n        CodecImplType(connection, callbacks, http2CodecStats(), random_generator_, http2_options,\n                      max_request_headers_kb, max_request_headers_count, http2_session_factory) {}\n\n  // ClientCodecFacade\n  RequestEncoder& newStream(ResponseDecoder& response_decoder) override {\n    return CodecImplType::newStream(response_decoder);\n  }\n  nghttp2_session* session() override { return CodecImplType::session_; }\n  Http::Stream* getStream(int32_t stream_id) override {\n    return CodecImplType::getStream(stream_id);\n  }\n  uint64_t getStreamPendingSendDataLength(int32_t stream_id) override {\n    return CodecImplType::getStream(stream_id)->pending_send_data_.length();\n  }\n  Status sendPendingFrames() override;\n  // Submits an H/2 METADATA frame to the peer.\n  // Returns true on success, false otherwise.\n  bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) override {\n    UNREFERENCED_PARAMETER(mm_vector);\n    UNREFERENCED_PARAMETER(stream_id);\n    return false;\n  }\n\nprotected:\n  // Overrides ClientConnectionImpl::onSettingsForTest().\n  void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); }\n};\n\ntemplate <typename CodecImplType>\nStatus TestClientConnectionImpl<CodecImplType>::sendPendingFrames() {\n  return CodecImplType::sendPendingFrames();\n}\n\ntemplate <>\nStatus\nTestClientConnectionImpl<Envoy::Http::Legacy::Http2::ClientConnectionImpl>::sendPendingFrames() {\n  Envoy::Http::Legacy::Http2::ClientConnectionImpl::sendPendingFrames();\n  return okStatus();\n}\n\nusing TestClientConnectionImplLegacy =\n    TestClientConnectionImpl<Envoy::Http::Legacy::Http2::ClientConnectionImpl>;\nusing TestClientConnectionImplNew =\n    TestClientConnectionImpl<Envoy::Http::Http2::ClientConnectionImpl>;\n\nusing ProdNghttp2SessionFactoryLegacy = Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory;\nusing ProdNghttp2SessionFactoryNew = Envoy::Http::Http2::ProdNghttp2SessionFactory;\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/conn_pool_test.cc",
    "content": "#include <cstdint>\n#include <memory>\n#include <vector>\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/http/http2/conn_pool.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/transport_socket_match.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Property;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nclass TestConnPoolImpl : public ConnPoolImpl {\npublic:\n  using ConnPoolImpl::ConnPoolImpl;\n\n  CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override {\n    // We expect to own the connection, but already have it, so just release it to prevent it from\n    // getting deleted.\n    data.connection_.release();\n    return CodecClientPtr{createCodecClient_(data)};\n  }\n\n  MOCK_METHOD(CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData & data));\n};\n\nclass ActiveTestRequest;\n\nclass Http2ConnPoolImplTest : public testing::Test {\npublic:\n  struct TestCodecClient {\n    Http::MockClientConnection* codec_;\n    Network::MockClientConnection* connection_;\n    CodecClientForTest* codec_client_;\n    Event::MockTimer* connect_timer_;\n    Event::DispatcherPtr client_dispatcher_;\n  };\n\n  Http2ConnPoolImplTest()\n      : api_(Api::createApiForTest(stats_store_)),\n        pool_(std::make_unique<TestConnPoolImpl>(\n            dispatcher_, random_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr)) {\n    // Default connections to 1024 because the tests shouldn't be relying on the\n    // connection resource limit for most tests.\n    cluster_->resetResourceManager(1024, 1024, 1024, 1, 1);\n  }\n\n  ~Http2ConnPoolImplTest() override {\n    EXPECT_EQ(\"\", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges()));\n  }\n\n  void createTestClients(int num_clients) {\n    // Create N clients.\n    for (int i = 0; i < num_clients; ++i) {\n      test_clients_.emplace_back();\n      TestCodecClient& test_client = test_clients_.back();\n      test_client.connection_ = new NiceMock<Network::MockClientConnection>();\n      test_client.codec_ = new NiceMock<Http::MockClientConnection>();\n      test_client.connect_timer_ = new NiceMock<Event::MockTimer>();\n      test_client.client_dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n    }\n\n    // Outside the for loop, set the createTimer expectations.\n    EXPECT_CALL(dispatcher_, createTimer_(_))\n        .Times(num_clients)\n        .WillRepeatedly(Invoke([this](Event::TimerCb cb) {\n          test_clients_[timer_index_].connect_timer_->callback_ = cb;\n          return test_clients_[timer_index_++].connect_timer_;\n        }));\n    // Loop again through the last num_clients entries to set enableTimer expectations.\n    // Ideally this could be done in the loop above but it breaks InSequence\n    // assertions.\n    for (size_t i = test_clients_.size() - num_clients; i < test_clients_.size(); ++i) {\n      TestCodecClient& test_client = test_clients_[i];\n      EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _));\n    }\n  }\n\n  void expectConnectionSetupForClient(int num_clients,\n                                      absl::optional<uint32_t> buffer_limits = {}) {\n    // Set the createClientConnection mocks. The createCodecClient_ invoke\n    // below takes care of making sure connection_index_ is updated.\n    EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _))\n        .Times(num_clients)\n        .WillRepeatedly(InvokeWithoutArgs([this]() -> Network::ClientConnection* {\n          return test_clients_[connection_index_].connection_;\n        }));\n\n    // Loop through the last num_clients clients, setting up codec clients and\n    // per-client mocks.\n    for (size_t i = test_clients_.size() - num_clients; i < test_clients_.size(); ++i) {\n      TestCodecClient& test_client = test_clients_[i];\n      auto cluster = std::make_shared<NiceMock<Upstream::MockClusterInfo>>();\n      Network::ClientConnectionPtr connection{test_client.connection_};\n      test_client.codec_client_ = new CodecClientForTest(\n          CodecClient::Type::HTTP1, std::move(connection), test_client.codec_,\n          [this](CodecClient*) -> void { onClientDestroy(); },\n          Upstream::makeTestHost(cluster, \"tcp://127.0.0.1:9000\"), *test_client.client_dispatcher_);\n      if (buffer_limits) {\n        EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes())\n            .Times(num_clients)\n            .WillRepeatedly(Return(*buffer_limits));\n        EXPECT_CALL(*test_client.connection_, setBufferLimits(*buffer_limits)).Times(1);\n      }\n    }\n    // Finally (for InSequence tests) set up createCodecClient and make sure the\n    // index is incremented to avoid returning the same client more than once.\n    EXPECT_CALL(*pool_, createCodecClient_(_))\n        .Times(num_clients)\n        .WillRepeatedly(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* {\n          return test_clients_[connection_index_++].codec_client_;\n        }));\n  }\n\n  // Creates a new test client, expecting a new connection to be created and associated\n  // with the new client.\n  void expectClientCreate(absl::optional<uint32_t> buffer_limits = {}) {\n    createTestClients(1);\n    expectConnectionSetupForClient(1, buffer_limits);\n  }\n  void expectClientsCreate(int num_clients) {\n    createTestClients(num_clients);\n    expectConnectionSetupForClient(num_clients, absl::nullopt);\n  }\n\n  // Connects a pending connection for client with the given index.\n  void expectClientConnect(size_t index);\n  // Connects a pending connection for client with the given index, asserting\n  // that the provided request receives onPoolReady.\n  void expectClientConnect(size_t index, ActiveTestRequest& r);\n  // Asserts that onPoolReady is called on the request.\n  void expectStreamConnect(size_t index, ActiveTestRequest& r);\n\n  // Resets the connection belonging to the provided index, asserting that the\n  // provided request receives onPoolFailure.\n  void expectClientReset(size_t index, ActiveTestRequest& r, bool local_failure);\n  // Asserts that the provided requests receives onPoolFailure.\n  void expectStreamReset(ActiveTestRequest& r);\n\n  /**\n   * Closes a test client.\n   */\n  void closeClient(size_t index);\n\n  /**\n   * Closes all test clients.\n   */\n  void closeAllClients();\n\n  /**\n   * Completes an active request. Useful when this flow is not part of the main test assertions.\n   */\n  void completeRequest(ActiveTestRequest& r);\n\n  /**\n   * Completes an active request and closes the upstream connection. Useful when this flow is\n   * not part of the main test assertions.\n   */\n  void completeRequestCloseUpstream(size_t index, ActiveTestRequest& r);\n\n  MOCK_METHOD(void, onClientDestroy, ());\n\n  int timer_index_{};\n  int connection_index_{};\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};\n  Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, \"tcp://127.0.0.1:80\")};\n  std::unique_ptr<TestConnPoolImpl> pool_;\n  std::vector<TestCodecClient> test_clients_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Random::MockRandomGenerator random_;\n};\n\nclass ActiveTestRequest {\npublic:\n  ActiveTestRequest(Http2ConnPoolImplTest& test, size_t client_index, bool expect_connected) {\n    if (expect_connected) {\n      EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_))\n          .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_)));\n      EXPECT_CALL(callbacks_.pool_ready_, ready());\n      EXPECT_EQ(nullptr, test.pool_->newStream(decoder_, callbacks_));\n    } else {\n      handle_ = test.pool_->newStream(decoder_, callbacks_);\n      EXPECT_NE(nullptr, handle_);\n    }\n  }\n\n  MockResponseDecoder decoder_;\n  ConnPoolCallbacks callbacks_;\n  ResponseDecoder* inner_decoder_{};\n  NiceMock<MockRequestEncoder> inner_encoder_;\n  ConnectionPool::Cancellable* handle_{};\n};\n\nvoid Http2ConnPoolImplTest::expectClientConnect(size_t index) {\n  EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer());\n  test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n}\n\nvoid Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) {\n  expectStreamConnect(index, r);\n  expectClientConnect(index);\n}\n\nvoid Http2ConnPoolImplTest::expectStreamConnect(size_t index, ActiveTestRequest& r) {\n  EXPECT_CALL(*test_clients_[index].codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_)));\n  EXPECT_CALL(r.callbacks_.pool_ready_, ready());\n}\n\nvoid Http2ConnPoolImplTest::expectClientReset(size_t index, ActiveTestRequest& r,\n                                              bool local_failure) {\n  expectStreamReset(r);\n  EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer());\n  if (local_failure) {\n    test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::LocalClose);\n    EXPECT_EQ(r.callbacks_.reason_, ConnectionPool::PoolFailureReason::LocalConnectionFailure);\n  } else {\n    test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n    EXPECT_EQ(r.callbacks_.reason_, ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n  }\n}\n\nvoid Http2ConnPoolImplTest::expectStreamReset(ActiveTestRequest& r) {\n  EXPECT_CALL(r.callbacks_.pool_failure_, ready());\n}\n\nvoid Http2ConnPoolImplTest::closeClient(size_t index) {\n  test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\nvoid Http2ConnPoolImplTest::closeAllClients() {\n  for (auto& test_client : test_clients_) {\n    test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  }\n  EXPECT_CALL(*this, onClientDestroy()).Times(test_clients_.size());\n  dispatcher_.clearDeferredDeleteList();\n}\n\nvoid Http2ConnPoolImplTest::completeRequest(ActiveTestRequest& r) {\n  EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true));\n  r.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(r.decoder_, decodeHeaders_(_, true));\n  r.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n}\n\nvoid Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTestRequest& r) {\n  completeRequest(r);\n  closeClient(index);\n}\n\n/**\n * Verify that the pool retains and returns the host it was constructed with.\n */\nTEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_->host()); }\n\n/**\n * Verify that idle connections are closed immediately when draining.\n */\nTEST_F(Http2ConnPoolImplTest, DrainConnectionIdle) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r(*this, 0, false);\n  expectClientConnect(0, r);\n  completeRequest(r);\n\n  EXPECT_CALL(*this, onClientDestroy());\n  pool_->drainConnections();\n}\n\n/**\n * Verify that we set the ALPN fallback.\n */\nTEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) {\n  InSequence s;\n\n  // Override the TransportSocketFactory with a mock version we can add expectations to.\n  auto factory = std::make_unique<Network::MockTransportSocketFactory>();\n  auto factory_ptr = factory.get();\n  cluster_->transport_socket_matcher_ =\n      std::make_unique<NiceMock<Upstream::MockTransportSocketMatcher>>(std::move(factory));\n\n  // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at\n  // our test transport socket factory.\n  host_ = Upstream::makeTestHost(cluster_, \"tcp://127.0.0.1:80\");\n  pool_ = std::make_unique<TestConnPoolImpl>(dispatcher_, random_, host_,\n                                             Upstream::ResourcePriority::Default, nullptr, nullptr);\n\n  // This requires some careful set up of expectations ordering: the call to createTransportSocket\n  // happens before all the connection set up but after the test client is created (due to some)\n  // of the mocks that are constructed as part of the test client.\n  createTestClients(1);\n  EXPECT_CALL(*factory_ptr, createTransportSocket(_))\n      .WillOnce(Invoke(\n          [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr {\n            EXPECT_TRUE(options != nullptr);\n            EXPECT_EQ(options->applicationProtocolFallback(),\n                      Http::Utility::AlpnNames::get().Http2);\n            return std::make_unique<Network::RawBufferSocket>();\n          }));\n  expectConnectionSetupForClient(1);\n  ActiveTestRequest r(*this, 0, false);\n  expectClientConnect(0, r);\n  EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true));\n  r.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(r.decoder_, decodeHeaders_(_, true));\n  EXPECT_CALL(*this, onClientDestroy());\n  r.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  // Close connections.\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Verify that a ready connection with a request in progress is moved to\n * draining and closes when the request completes.\n */\nTEST_F(Http2ConnPoolImplTest, DrainConnectionReadyWithRequest) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r(*this, 0, false);\n  expectClientConnect(0, r);\n  EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true));\n  r.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  pool_->drainConnections();\n\n  EXPECT_CALL(r.decoder_, decodeHeaders_(_, true));\n  EXPECT_CALL(*this, onClientDestroy());\n  r.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n}\n\n/**\n * Verify that a busy connection is moved to draining and closes when all requests\n * complete.\n */\nTEST_F(Http2ConnPoolImplTest, DrainConnectionBusy) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r(*this, 0, false);\n  expectClientConnect(0, r);\n  EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true));\n  r.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  pool_->drainConnections();\n\n  EXPECT_CALL(r.decoder_, decodeHeaders_(_, true));\n  EXPECT_CALL(*this, onClientDestroy());\n  r.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n}\n\n/**\n * Verify that draining connections with a pending request does not\n * close the connection, but draining without a pending request does close\n * the connection.\n */\nTEST_F(Http2ConnPoolImplTest, DrainConnectionConnecting) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r(*this, 0, false);\n\n  // Pending request prevents the connection from being drained\n  pool_->drainConnections();\n\n  // Cancel the pending request, and then the connection can be closed.\n  r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default);\n  EXPECT_CALL(*this, onClientDestroy());\n  pool_->drainConnections();\n}\n\n/**\n * Verify that on CloseExcess, the connection is destroyed immediately.\n */\nTEST_F(Http2ConnPoolImplTest, CloseExcess) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r(*this, 0, false);\n\n  // Pending request prevents the connection from being drained\n  pool_->drainConnections();\n\n  EXPECT_CALL(*this, onClientDestroy());\n  r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n}\n\n/**\n * Verify that on CloseExcess connections are destroyed when they can be.\n */\nTEST_F(Http2ConnPoolImplTest, CloseExcessTwo) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n\n  expectClientCreate();\n  ActiveTestRequest r2(*this, 0, false);\n  {\n    EXPECT_CALL(*this, onClientDestroy());\n    r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n\n  {\n    EXPECT_CALL(*this, onClientDestroy());\n    r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n}\n\n/**\n * Verify that on CloseExcess, the connections are destroyed iff they are actually excess.\n */\nTEST_F(Http2ConnPoolImplTest, CloseExcessMultipleRequests) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(3);\n  InSequence s;\n\n  // With 3 requests per connection, the first request will result in a client\n  // connection, and the next two will be queued for that connection.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  ActiveTestRequest r3(*this, 0, false);\n\n  // The fourth request will kick off a second connection, and the fifth will plan to share it.\n  expectClientCreate();\n  ActiveTestRequest r4(*this, 0, false);\n  ActiveTestRequest r5(*this, 0, false);\n\n  // The section below cancels the active requests in fairly random order, to\n  // ensure there's no association between the requests and the clients created\n  // for them.\n\n  // The first cancel will not destroy any clients, as there are still four pending\n  // requests and they can not all share the first connection.\n  {\n    EXPECT_CALL(*this, onClientDestroy()).Times(0);\n    r5.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n  // The second cancel will destroy one client, as there will be three pending requests\n  // remaining, and they only need one connection.\n  {\n    EXPECT_CALL(*this, onClientDestroy());\n    r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n\n  // The next two calls will not destroy the final client, as there are two other\n  // pending requests waiting on it.\n  {\n    EXPECT_CALL(*this, onClientDestroy()).Times(0);\n    r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n    r4.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n  // Finally with the last request gone, the final client is destroyed.\n  {\n    EXPECT_CALL(*this, onClientDestroy());\n    r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n}\n\nTEST_F(Http2ConnPoolImplTest, CloseExcessMixedMultiplexing) {\n  InSequence s;\n\n  // Create clients with in-order capacity:\n  // 3  2  6\n  // Connection capacity is min(max requests per connection, max concurrent streams).\n  // Use maxRequestsPerConnection here since max requests is tested above.\n  EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(3));\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  ActiveTestRequest r3(*this, 0, false);\n\n  EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(2));\n  expectClientCreate();\n  ActiveTestRequest r4(*this, 0, false);\n  ActiveTestRequest r5(*this, 0, false);\n\n  EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(6));\n  expectClientCreate();\n  ActiveTestRequest r6(*this, 0, false);\n\n  // 6 requests, capacity [3, 2, 6] - the first cancel should tear down the client with [3]\n  // since we destroy oldest first and [3, 2] can handle the remaining 5 requests.\n  {\n    EXPECT_CALL(*this, onClientDestroy());\n    r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n\n  // 5 requests, capacity [3, 2] - no teardown\n  {\n    EXPECT_CALL(*this, onClientDestroy()).Times(0);\n    r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n  // 4 requests, capacity [3, 2] - canceling one destroys the client with [2]\n  {\n    EXPECT_CALL(*this, onClientDestroy());\n    r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n\n  // 3 requests, capacity [3]. Tear down the last channel when all 3 are canceled.\n  {\n    EXPECT_CALL(*this, onClientDestroy()).Times(0);\n    r4.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n    r5.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n  {\n    EXPECT_CALL(*this, onClientDestroy());\n    r6.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  }\n}\n\n/**\n * Verify that connections are drained when requested.\n */\nTEST_F(Http2ConnPoolImplTest, DrainConnections) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n\n  InSequence s;\n  cluster_->max_requests_per_connection_ = 1;\n\n  // Test drain connections call prior to any connections being created.\n  pool_->drainConnections();\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // With max_streams == 1, the second request moves the first connection\n  // to draining.\n  expectClientCreate();\n  ActiveTestRequest r2(*this, 1, false);\n  expectClientConnect(1, r2);\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // This will move the second connection to draining.\n  pool_->drainConnections();\n\n  // This will destroy the 2 draining connections.\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\n// Test that cluster.http2_protocol_options.max_concurrent_streams limits\n// concurrent requests and causes additional connections to be created.\nTEST_F(Http2ConnPoolImplTest, MaxConcurrentRequestsPerStream) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n\n  InSequence s;\n\n  {\n    // Create request and complete it.\n    expectClientCreate();\n    ActiveTestRequest r(*this, 0, false);\n    expectClientConnect(0, r);\n    completeRequest(r);\n  }\n\n  // Previous request completed, so this one will re-use the connection.\n  {\n    ActiveTestRequest r(*this, 0, true);\n    completeRequest(r);\n  }\n\n  // Two concurrent requests causes one additional connection to be created.\n  {\n    ActiveTestRequest r1(*this, 0, true);\n    expectClientCreate();\n    ActiveTestRequest r2(*this, 1, false);\n    expectClientConnect(1, r2);\n\n    // Complete one of them, and create another, and it will re-use the connection.\n    completeRequest(r2);\n    ActiveTestRequest r3(*this, 1, true);\n\n    completeRequest(r1);\n    completeRequest(r3);\n  }\n\n  // Create two more requests; both should use existing connections.\n  {\n    ActiveTestRequest r1(*this, 1, true);\n    ActiveTestRequest r2(*this, 0, true);\n    completeRequest(r1);\n    completeRequest(r2);\n  }\n\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_total_.value());\n}\n\n// Verifies that requests are queued up in the conn pool until the connection becomes ready.\nTEST_F(Http2ConnPoolImplTest, PendingStreams) {\n  InSequence s;\n\n  // Create three requests. These should be queued up.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  ActiveTestRequest r3(*this, 0, false);\n\n  // The connection now becomes ready. This should cause all the queued requests to be sent.\n  expectStreamConnect(0, r1);\n  expectStreamConnect(0, r2);\n  expectClientConnect(0, r3);\n\n  // Send a request through each stream.\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true));\n  r3.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Since we now have an active connection, subsequent requests should connect immediately.\n  ActiveTestRequest r4(*this, 0, true);\n\n  // Clean up everything.\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\n// Verifies that the correct number of CONNECTING connections are created for\n// the pending requests, when the total requests per connection is limited\nTEST_F(Http2ConnPoolImplTest, PendingStreamsNumberConnectingTotalRequestsPerConnection) {\n  cluster_->max_requests_per_connection_ = 2;\n  InSequence s;\n\n  // Create three requests. The 3rd should create a 2nd connection due to the limit\n  // of 2 requests per connection.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  expectClientCreate();\n  ActiveTestRequest r3(*this, 1, false);\n\n  // The connection now becomes ready. This should cause all the queued requests to be sent.\n  expectStreamConnect(0, r1);\n  expectClientConnect(0, r2);\n  expectClientConnect(1, r3);\n\n  // Send a request through each stream.\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true));\n  r3.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Clean up everything.\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n// Verifies that the correct number of CONNECTING connections are created for\n// the pending requests, when the concurrent requests per connection is limited\nTEST_F(Http2ConnPoolImplTest, PendingStreamsNumberConnectingConcurrentRequestsPerConnection) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(2);\n  InSequence s;\n\n  // Create three requests. The 3rd should create a 2nd connection due to the limit\n  // of 2 requests per connection.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  expectClientCreate();\n  ActiveTestRequest r3(*this, 1, false);\n\n  // The connection now becomes ready. This should cause all the queued requests to be sent.\n  expectStreamConnect(0, r1);\n  expectClientConnect(0, r2);\n  expectClientConnect(1, r3);\n\n  // Send a request through each stream.\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true));\n  r3.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  // Clean up everything.\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n// Verifies that requests are queued up in the conn pool and fail when the connection\n// fails to be established.\nTEST_F(Http2ConnPoolImplTest, PendingStreamsFailure) {\n  InSequence s;\n  cluster_->max_requests_per_connection_ = 10;\n\n  // Create three requests. These should be queued up.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  ActiveTestRequest r3(*this, 0, false);\n\n  // The connection now becomes ready. This should cause all the queued requests to be sent.\n  // Note that these occur in reverse order due to the order we purge pending requests in.\n  expectStreamReset(r3);\n  expectStreamReset(r2);\n  expectClientReset(0, r1, false);\n\n  expectClientCreate();\n  // Since we have no active connection, subsequence requests will queue until\n  // the new connection is established.\n  ActiveTestRequest r4(*this, 1, false);\n  expectClientConnect(1, r4);\n\n  // Clean up everything.\n  test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\n// Verifies resets due to local connection closes are tracked correctly.\nTEST_F(Http2ConnPoolImplTest, LocalFailure) {\n  InSequence s;\n  cluster_->max_requests_per_connection_ = 10;\n\n  // Create three requests. These should be queued up.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  ActiveTestRequest r3(*this, 0, false);\n\n  // The connection now becomes ready. This should cause all the queued requests to be sent.\n  // Note that these occur in reverse order due to the order we purge pending requests in.\n  expectStreamReset(r3);\n  expectStreamReset(r2);\n  expectClientReset(0, r1, true);\n\n  EXPECT_CALL(*this, onClientDestroy());\n}\n\n// Verifies that requests are queued up in the conn pool and respect max request circuit breaking\n// when the connection is established.\nTEST_F(Http2ConnPoolImplTest, PendingStreamsRequestOverflow) {\n  InSequence s;\n\n  // Inflate the resource count to just under the limit.\n  auto& requests = host_->cluster().resourceManager(Upstream::ResourcePriority::Default).requests();\n  for (uint64_t i = 0; i < requests.max() - 1; ++i) {\n    requests.inc();\n  }\n\n  // Create three requests. These should be queued up.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  ActiveTestRequest r2(*this, 0, false);\n  ActiveTestRequest r3(*this, 0, false);\n\n  // We queued up three requests, but we can only afford one before hitting the circuit\n  // breaker. Thus, we expect to see 2 resets and one successful connect.\n  expectStreamConnect(0, r1);\n  expectStreamReset(r2);\n  expectStreamReset(r3);\n  expectClientConnect(0);\n\n  // Clean up everything.\n  for (uint64_t i = 0; i < requests.max() - 1; ++i) {\n    requests.dec();\n  }\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\n// Verifies that we honor the max pending requests circuit breaker.\nTEST_F(Http2ConnPoolImplTest, PendingStreamsMaxPendingCircuitBreaker) {\n  InSequence s;\n\n  // Inflate the resource count to just under the limit.\n  auto& pending_reqs =\n      host_->cluster().resourceManager(Upstream::ResourcePriority::Default).pendingRequests();\n  for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) {\n    pending_reqs.inc();\n  }\n\n  // Create two requests. The first one should be enqueued, while the second one\n  // should fail fast due to us being above the max pending requests limit.\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n\n  MockResponseDecoder decoder;\n  ConnPoolCallbacks callbacks;\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  EXPECT_EQ(nullptr, pool_->newStream(decoder, callbacks));\n\n  expectStreamConnect(0, r1);\n  expectClientConnect(0);\n\n  // Clean up everything.\n  for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) {\n    pending_reqs.dec();\n  }\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, VerifyConnectionTimingStats) {\n  InSequence s;\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_connect_ms\"), _));\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true));\n  r1.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_length_ms\"), _));\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\n/**\n * Test that buffer limits are set.\n */\nTEST_F(Http2ConnPoolImplTest, VerifyBufferLimits) {\n  InSequence s;\n  expectClientCreate(8192);\n  ActiveTestRequest r1(*this, 0, false);\n\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true));\n  r1.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, RequestAndResponse) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_active_.value());\n  EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true));\n  r1.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  ActiveTestRequest r2(*this, 0, true);\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true));\n  r2.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, LocalReset) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, false);\n  r1.callbacks_.outer_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset);\n\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value());\n  EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value());\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, RemoteReset) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, false);\n  r1.inner_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_rx_reset_.value());\n  EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value());\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) {\n  InSequence s;\n  cluster_->max_requests_per_connection_ = 1;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  ReadyWatcher drained;\n  pool_->addDrainedCallback([&]() -> void { drained.ready(); });\n\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  EXPECT_CALL(drained, ready());\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n\n  InSequence s;\n  cluster_->max_requests_per_connection_ = 1;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  expectClientCreate();\n  ActiveTestRequest r2(*this, 1, false);\n  expectClientConnect(1, r2);\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  ReadyWatcher drained;\n  pool_->addDrainedCallback([&]() -> void { drained.ready(); });\n\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true));\n  r2.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  EXPECT_CALL(drained, ready());\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, DrainPrimary) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n\n  InSequence s;\n  cluster_->max_requests_per_connection_ = 1;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  expectClientCreate();\n  ActiveTestRequest r2(*this, 1, false);\n  expectClientConnect(1, r2);\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  ReadyWatcher drained;\n  pool_->addDrainedCallback([&]() -> void { drained.ready(); });\n\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true));\n  r2.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  EXPECT_CALL(drained, ready());\n  EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true));\n  r1.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n\n  InSequence s;\n  cluster_->max_requests_per_connection_ = 1;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true));\n  r1.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  expectClientCreate();\n  ActiveTestRequest r2(*this, 1, false);\n  expectClientConnect(1, r2);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true));\n  r2.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  ReadyWatcher drained;\n  EXPECT_CALL(drained, ready());\n  pool_->addDrainedCallback([&]() -> void { drained.ready(); });\n\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_F(Http2ConnPoolImplTest, ConnectTimeout) {\n  InSequence s;\n\n  EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value());\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  EXPECT_CALL(r1.callbacks_.pool_failure_, ready());\n  test_clients_[0].connect_timer_->invokeCallback();\n  EXPECT_EQ(r1.callbacks_.reason_, ConnectionPool::PoolFailureReason::Timeout);\n\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value());\n\n  expectClientCreate();\n  ActiveTestRequest r2(*this, 1, false);\n  expectClientConnect(1, r2);\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true));\n  r2.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_timeout_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value());\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, MaxGlobalRequests) {\n  cluster_->resetResourceManager(1024, 1024, 1, 1, 1);\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n\n  ConnPoolCallbacks callbacks;\n  MockResponseDecoder decoder;\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  EXPECT_EQ(nullptr, pool_->newStream(decoder, callbacks));\n\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy());\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, GoAway) {\n  InSequence s;\n\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true));\n  r1.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true));\n  r1.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  test_clients_[0].codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n\n  expectClientCreate();\n  ActiveTestRequest r2(*this, 1, false);\n  expectClientConnect(1, r2);\n  EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true));\n  r2.callbacks_.outer_encoder_->encodeHeaders(\n      TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, true);\n  EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true));\n  r2.inner_decoder_->decodeHeaders(\n      ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n\n  test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_CALL(*this, onClientDestroy()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_close_notify_.value());\n}\n\nTEST_F(Http2ConnPoolImplTest, NoActiveConnectionsByDefault) {\n  EXPECT_FALSE(pool_->hasActiveConnections());\n}\n\n// Show that an active request on the primary connection is considered active.\nTEST_F(Http2ConnPoolImplTest, ActiveConnectionsHasActiveRequestsTrue) {\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n\n  EXPECT_TRUE(pool_->hasActiveConnections());\n\n  completeRequestCloseUpstream(0, r1);\n}\n\n// Show that pending requests are considered active.\nTEST_F(Http2ConnPoolImplTest, PendingStreamsConsideredActive) {\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n\n  EXPECT_TRUE(pool_->hasActiveConnections());\n\n  expectClientConnect(0, r1);\n  completeRequestCloseUpstream(0, r1);\n}\n\n// Show that even if there is a primary client still, if all of its requests have completed, then it\n// does not have any active connections.\nTEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnections) {\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  completeRequest(r1);\n\n  EXPECT_FALSE(pool_->hasActiveConnections());\n\n  closeClient(0);\n}\n\n// Show that if connections are draining, they're still considered active.\nTEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) {\n  cluster_->max_requests_per_connection_ = 1;\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  pool_->drainConnections();\n\n  EXPECT_TRUE(pool_->hasActiveConnections());\n\n  completeRequest(r1);\n  closeClient(0);\n}\n\n// Show that once we've drained all connections, there are no longer any active.\nTEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) {\n  cluster_->max_requests_per_connection_ = 1;\n  expectClientCreate();\n  ActiveTestRequest r1(*this, 0, false);\n  expectClientConnect(0, r1);\n  pool_->drainConnections();\n  completeRequest(r1);\n\n  EXPECT_FALSE(pool_->hasActiveConnections());\n\n  closeClient(0);\n}\n\nTEST_F(Http2ConnPoolImplTest, PrefetchWithoutMultiplexing) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  // With one request per connection, and prefetch 1.5, the first request will\n  // kick off 2 connections.\n  expectClientsCreate(2);\n  ActiveTestRequest r1(*this, 0, false);\n\n  // With another incoming request, we'll have 2 in flight and want 1.5*2 so\n  // create one connection.\n  expectClientsCreate(1);\n  ActiveTestRequest r2(*this, 0, false);\n\n  // With a third request we'll have 3 in flight and want 1.5*3 -> 5 so kick off\n  // two again.\n  expectClientsCreate(2);\n  ActiveTestRequest r3(*this, 0, false);\n\n  r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  pool_->drainConnections();\n\n  closeAllClients();\n}\n\nTEST_F(Http2ConnPoolImplTest, PrefetchOff) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.allow_prefetch\", \"false\"}});\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  // Despite the prefetch ratio, no prefetch will happen due to the runtime\n  // disable.\n  expectClientsCreate(1);\n  ActiveTestRequest r1(*this, 0, false);\n\n  // Clean up.\n  r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  pool_->drainConnections();\n  closeAllClients();\n}\n\nTEST_F(Http2ConnPoolImplTest, PrefetchWithMultiplexing) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(2);\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  // With two requests per connection, and prefetch 1.5, the first request will\n  // only kick off 1 connection.\n  expectClientsCreate(1);\n  ActiveTestRequest r1(*this, 0, false);\n\n  // With another incoming request, we'll have capacity(2) in flight and want 1.5*2 so\n  // create an additional connection.\n  expectClientsCreate(1);\n  ActiveTestRequest r2(*this, 0, false);\n\n  // Clean up.\n  r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  pool_->drainConnections();\n  closeAllClients();\n}\n\nTEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  // With one request per connection, and prefetch 1.5, the first request will\n  // kick off 2 connections.\n  expectClientsCreate(2);\n  ActiveTestRequest r1(*this, 0, false);\n\n  // When the first client connects, r1 will be assigned.\n  expectClientConnect(0, r1);\n  // When the second connects, there is no waiting stream request to assign.\n  expectClientConnect(1);\n\n  // The next incoming request will immediately be assigned a stream, and also\n  // kick off a prefetch.\n  expectClientsCreate(1);\n  ActiveTestRequest r2(*this, 1, true);\n\n  // Clean up.\n  completeRequest(r1);\n  completeRequest(r2);\n  pool_->drainConnections();\n  closeAllClients();\n}\n\nTEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  expectClientsCreate(2);\n  ActiveTestRequest r1(*this, 0, false);\n\n  // When the first client connects, r1 will be assigned.\n  expectClientConnect(0, r1);\n\n  // Now cause the prefetched connection to fail. We should try to create\n  // another in its place.\n  expectClientsCreate(1);\n  test_clients_[1].connect_timer_->invokeCallback();\n\n  // Clean up.\n  completeRequest(r1);\n  pool_->drainConnections();\n  closeAllClients();\n}\n\nTEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) {\n  cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1);\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.00));\n\n  // First request prefetches an additional connection.\n  expectClientsCreate(1);\n  ActiveTestRequest r1(*this, 0, false);\n\n  // Second request does not prefetch.\n  expectClientsCreate(1);\n  ActiveTestRequest r2(*this, 0, false);\n\n  // Change the prefetch ratio to force the connection to no longer be excess.\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(2));\n  // Closing off the second request should bring us back to 1 request in queue,\n  // desired capacity 2, so will not close the connection.\n  EXPECT_CALL(*this, onClientDestroy()).Times(0);\n  r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n\n  // Clean up.\n  r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);\n  pool_->drainConnections();\n  closeAllClients();\n}\n\n// Test that maybePrefetch is passed up to the base class implementation.\nTEST_F(Http2ConnPoolImplTest, MaybePrefetch) {\n  ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5));\n\n  EXPECT_FALSE(pool_->maybePrefetch(0));\n\n  expectClientsCreate(1);\n  EXPECT_TRUE(pool_->maybePrefetch(2));\n\n  pool_->drainConnections();\n  closeAllClients();\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/frame_replay.cc",
    "content": "#include \"test/common/http/http2/frame_replay.h\"\n\n#include \"common/common/hex.h\"\n#include \"common/common/macros.h\"\n#include \"common/http/utility.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/test_common/environment.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nFileFrame::FileFrame(absl::string_view path) : api_(Api::createApiForTest()) {\n  const std::string contents = api_->fileSystem().fileReadToEnd(\n      TestEnvironment::runfilesPath(\"test/common/http/http2/\" + std::string(path)));\n  frame_.resize(contents.size());\n  contents.copy(reinterpret_cast<char*>(frame_.data()), frame_.size());\n}\n\nstd::unique_ptr<std::istream> FileFrame::istream() {\n  const std::string frame_string{reinterpret_cast<char*>(frame_.data()), frame_.size()};\n  return std::make_unique<std::istringstream>(frame_string);\n}\n\nconst Frame& WellKnownFrames::clientConnectionPrefaceFrame() {\n  CONSTRUCT_ON_FIRST_USE(std::vector<uint8_t>,\n                         {0x50, 0x52, 0x49, 0x20, 0x2a, 0x20, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x32,\n                          0x2e, 0x30, 0x0d, 0x0a, 0x0d, 0x0a, 0x53, 0x4d, 0x0d, 0x0a, 0x0d, 0x0a});\n}\n\nconst Frame& WellKnownFrames::defaultSettingsFrame() {\n  CONSTRUCT_ON_FIRST_USE(std::vector<uint8_t>,\n                         {0x00, 0x00, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,\n                          0x7f, 0xff, 0xff, 0xff, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00});\n}\n\nconst Frame& WellKnownFrames::initialWindowUpdateFrame() {\n  CONSTRUCT_ON_FIRST_USE(std::vector<uint8_t>, {0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00,\n                                                0x00, 0x0f, 0xff, 0x00, 0x01});\n}\n\nvoid FrameUtils::fixupHeaders(Frame& frame) {\n  constexpr size_t frame_header_len = 9; // from RFC 7540\n  while (frame.size() < frame_header_len) {\n    frame.emplace_back(0x00);\n  }\n  size_t headers_len = frame.size() - frame_header_len;\n  frame[2] = headers_len & 0xff;\n  headers_len >>= 8;\n  frame[1] = headers_len & 0xff;\n  headers_len >>= 8;\n  frame[0] = headers_len & 0xff;\n  // HEADERS frame with END_STREAM | END_HEADERS for stream 1.\n  size_t offset = 3;\n  for (const uint8_t b : {0x01, 0x05, 0x00, 0x00, 0x00, 0x01}) {\n    frame[offset++] = b;\n  }\n}\n\nCodecFrameInjector::CodecFrameInjector(const std::string& injector_name)\n    : options_(::Envoy::Http2::Utility::initializeAndValidateOptions(\n          envoy::config::core::v3::Http2ProtocolOptions())),\n      injector_name_(injector_name) {}\n\nClientCodecFrameInjector::ClientCodecFrameInjector() : CodecFrameInjector(\"server\") {\n  ON_CALL(client_connection_, write(_, _))\n      .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n        ENVOY_LOG_MISC(\n            trace, \"client write: {}\",\n            Hex::encode(static_cast<uint8_t*>(data.linearize(data.length())), data.length()));\n        data.drain(data.length());\n      }));\n}\n\nServerCodecFrameInjector::ServerCodecFrameInjector() : CodecFrameInjector(\"client\") {\n  EXPECT_CALL(server_callbacks_, newStream(_, _))\n      .WillRepeatedly(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {\n        encoder.getStream().addCallbacks(server_stream_callbacks_);\n        return request_decoder_;\n      }));\n\n  ON_CALL(server_connection_, write(_, _))\n      .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void {\n        ENVOY_LOG_MISC(\n            trace, \"server write: {}\",\n            Hex::encode(static_cast<uint8_t*>(data.linearize(data.length())), data.length()));\n        data.drain(data.length());\n      }));\n}\n\nHttp::Status CodecFrameInjector::write(const Frame& frame, Http::Connection& connection) {\n  Buffer::OwnedImpl buffer;\n  buffer.add(frame.data(), frame.size());\n  ENVOY_LOG_MISC(trace, \"{} write: {}\", injector_name_, Hex::encode(frame.data(), frame.size()));\n  auto status = Http::okStatus();\n  while (buffer.length() > 0 && status.ok()) {\n    status = connection.dispatch(buffer);\n  }\n  ENVOY_LOG_MISC(trace, \"Status: {}\", status.message());\n  return status;\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/frame_replay.h",
    "content": "#include <cstdint>\n#include <memory>\n#include <vector>\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n// A byte vector representation of an HTTP/2 frame.\nusing Frame = std::vector<uint8_t>;\n\n// An HTTP/2 frame derived from a file location.\nclass FileFrame {\npublic:\n  FileFrame(absl::string_view path);\n\n  Frame& frame() { return frame_; }\n  std::unique_ptr<std::istream> istream();\n\n  Frame frame_;\n  Api::ApiPtr api_;\n};\n\n// Some standards HTTP/2 frames for setting up a connection. The contents for these and the seed\n// corpus were captured via logging the hex bytes in codec_impl_test's write() connection mocks in\n// setupDefaultConnectionMocks().\nclass WellKnownFrames {\npublic:\n  static const Frame& clientConnectionPrefaceFrame();\n  static const Frame& defaultSettingsFrame();\n  static const Frame& initialWindowUpdateFrame();\n};\n\nclass FrameUtils {\npublic:\n  // Modify a given frame so that it has the HTTP/2 frame header for a valid\n  // HEADERS frame.\n  static void fixupHeaders(Frame& frame);\n};\n\nclass CodecFrameInjector {\npublic:\n  CodecFrameInjector(const std::string& injector_name);\n\n  // Writes the data using the Http::Connection's nghttp2 session.\n  Http::Status write(const Frame& frame, Http::Connection& connection);\n\n  envoy::config::core::v3::Http2ProtocolOptions options_;\n  Stats::IsolatedStoreImpl stats_store_;\n  const std::string injector_name_;\n};\n\n// Holds mock and environment placeholders for an HTTP/2 client codec. Sets up expectations for\n// the behavior of callbacks and the request decoder.\nclass ClientCodecFrameInjector : public CodecFrameInjector {\npublic:\n  ClientCodecFrameInjector();\n\n  ::testing::NiceMock<Network::MockConnection> client_connection_;\n  MockConnectionCallbacks client_callbacks_;\n  MockResponseDecoder response_decoder_;\n  RequestEncoder* request_encoder_;\n  MockStreamCallbacks client_stream_callbacks_;\n};\n\n// Holds mock and environment placeholders for an HTTP/2 server codec. Sets up expectations for\n// the behavior of callbacks and the request decoder.\nclass ServerCodecFrameInjector : public CodecFrameInjector {\npublic:\n  ServerCodecFrameInjector();\n\n  ::testing::NiceMock<Network::MockConnection> server_connection_;\n  MockServerConnectionCallbacks server_callbacks_;\n  MockRequestDecoder request_decoder_;\n  MockStreamCallbacks server_stream_callbacks_;\n};\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/frame_replay_test.cc",
    "content": "#include \"common/http/exception.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/common/http/http2/codec_impl_test_util.h\"\n#include \"test/common/http/http2/frame_replay.h\"\n\n#include \"gtest/gtest.h\"\n\n#define EXPECT_NEXT_BYTES(istream, ...)                                                            \\\n  do {                                                                                             \\\n    std::vector<uint8_t> expected_bytes{__VA_ARGS__};                                              \\\n    std::vector<uint8_t> actual_bytes(expected_bytes.size());                                      \\\n    istream->read(reinterpret_cast<char*>(actual_bytes.data()), expected_bytes.size());            \\\n    EXPECT_EQ(actual_bytes, expected_bytes);                                                       \\\n  } while (0)\n\nusing testing::AnyNumber;\nusing testing::InvokeWithoutArgs;\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\nnamespace {\n\n// For organizational purposes only.\nclass RequestFrameCommentTest : public ::testing::Test {};\nclass ResponseFrameCommentTest : public ::testing::Test {};\n\n// Creates and sets up a stream to reply to.\nvoid setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImplNew& connection) {\n  codec.request_encoder_ = &connection.newStream(codec.response_decoder_);\n  codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_);\n  // Setup a single stream to inject frames as a reply to.\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  codec.request_encoder_->encodeHeaders(request_headers, true);\n}\n\n// Validate that a simple Huffman encoded request HEADERS frame can be decoded.\nTEST_F(RequestFrameCommentTest, SimpleExampleHuffman) {\n  FileFrame header{\"request_header_corpus/simple_example_huffman\"};\n\n  // Validate HEADERS content matches intent.\n  auto header_bytes = header.istream();\n  // Payload size is 18 bytes.\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x12);\n  // HEADERS frame with END_STREAM | END_HEADERS for stream 1.\n  EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01);\n  // Static table :scheme: http, :method: GET\n  EXPECT_NEXT_BYTES(header_bytes, 0x86, 0x82);\n  // Static table :authority, Huffman 'host'\n  EXPECT_NEXT_BYTES(header_bytes, 0x41, 0x83, 0x9c, 0xe8, 0x4f);\n  // Static table :path: /\n  EXPECT_NEXT_BYTES(header_bytes, 0x84);\n  // Huffman foo: barbaz\n  EXPECT_NEXT_BYTES(header_bytes, 0x40, 0x82, 0x94, 0xe7, 0x85, 0x8c, 0x76, 0x46, 0x3f, 0x7f);\n\n  // Validate HEADERS decode.\n  ServerCodecFrameInjector codec;\n  TestServerConnectionImplNew connection(\n      codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_,\n      Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n      envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok());\n  EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n  EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n  TestRequestHeaderMapImpl expected_headers;\n  HttpTestUtility::addDefaultHeaders(expected_headers);\n  expected_headers.addCopy(\"foo\", \"barbaz\");\n  EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n  EXPECT_TRUE(codec.write(header.frame(), connection).ok());\n}\n\n// Validate that a simple Huffman encoded response HEADERS frame can be decoded.\nTEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) {\n  FileFrame header{\"response_header_corpus/simple_example_huffman\"};\n\n  // Validate HEADERS content matches intent.\n  auto header_bytes = header.istream();\n\n  // Payload size is 15 bytes.\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x0f);\n  // HEADERS frame with END_STREAM | END_HEADERS for stream 1.\n  EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01);\n  // Static table :status: 200\n  EXPECT_NEXT_BYTES(header_bytes, 0x88);\n  // Huffman compression: test\n  EXPECT_NEXT_BYTES(header_bytes, 0x40, 0x88, 0x21, 0xe9, 0xae, 0xc2, 0xa1, 0x06, 0x3d, 0x5f, 0x83,\n                    0x49, 0x50, 0x9f);\n\n  // Validate HEADERS decode.\n  ClientCodecFrameInjector codec;\n  TestClientConnectionImplNew connection(\n      codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_,\n      Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n      ProdNghttp2SessionFactory::get());\n  setupStream(codec, connection);\n\n  EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n  EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n  TestResponseHeaderMapImpl expected_headers;\n  expected_headers.addCopy(\":status\", \"200\");\n  expected_headers.addCopy(\"compression\", \"test\");\n  EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n  EXPECT_TRUE(codec.write(header.frame(), connection).ok());\n}\n\n// Validate that a simple non-Huffman request HEADERS frame with no static table user either can be\n// decoded.\nTEST_F(RequestFrameCommentTest, SimpleExamplePlain) {\n  FileFrame header{\"request_header_corpus/simple_example_plain\"};\n\n  // Validate HEADERS content matches intent.\n  auto header_bytes = header.istream();\n  // Payload size is 65 bytes.\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x41);\n  // HEADERS frame with END_STREAM | END_HEADERS for stream 1.\n  EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01);\n  // Literal unindexed :scheme: http\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x07, 0x3A, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x65);\n  EXPECT_NEXT_BYTES(header_bytes, 0x04, 0x68, 0x74, 0x74, 0x70);\n  // Literal unindexed :method: GET\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x07, 0x3A, 0x6D, 0x65, 0x74, 0x68, 0x6F, 0x64);\n  EXPECT_NEXT_BYTES(header_bytes, 0x03, 0x47, 0x45, 0x54);\n  // Literal unindexed :authority: host\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x0A, 0x3A, 0x61, 0x75, 0x74, 0x68, 0x6F, 0x72, 0x69, 0x74,\n                    0x79);\n  EXPECT_NEXT_BYTES(header_bytes, 0x04, 0x68, 0x6F, 0x73, 0x74);\n  // Literal unindexed :path: /\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x05, 0x3A, 0x70, 0x61, 0x74, 0x68);\n  EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x2F);\n  // Literal unindexed foo: barbaz\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x03, 0x66, 0x6F, 0x6F);\n  EXPECT_NEXT_BYTES(header_bytes, 0x06, 0x62, 0x61, 0x72, 0x62, 0x61, 0x7A);\n\n  // Validate HEADERS decode.\n  ServerCodecFrameInjector codec;\n  TestServerConnectionImplNew connection(\n      codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_,\n      Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n      envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok());\n  EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n  EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n  TestRequestHeaderMapImpl expected_headers;\n  HttpTestUtility::addDefaultHeaders(expected_headers);\n  expected_headers.addCopy(\"foo\", \"barbaz\");\n  EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n  EXPECT_TRUE(codec.write(header.frame(), connection).ok());\n}\n\n// Validate that a simple non-Huffman response HEADERS frame with no static table user either can be\n// decoded.\nTEST_F(ResponseFrameCommentTest, SimpleExamplePlain) {\n  FileFrame header{\"response_header_corpus/simple_example_plain\"};\n\n  // Validate HEADERS content matches intent.\n  auto header_bytes = header.istream();\n\n  // Payload size is 15 bytes.\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x1F);\n  // HEADERS frame with END_STREAM | END_HEADERS for stream 1.\n  EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01);\n  // Literal unindexed :status: 200\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x07, 0x3A, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x03, 0x32,\n                    0x30, 0x30);\n  // Literal unindexed compression: test\n  EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x0B, 0x63, 0x6F, 0x6D, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69,\n                    0x6F, 0x6E, 0x04, 0x74, 0x65, 0x73, 0x74);\n\n  // Validate HEADERS decode.\n  ClientCodecFrameInjector codec;\n  TestClientConnectionImplNew connection(\n      codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_,\n      Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n      ProdNghttp2SessionFactory::get());\n  setupStream(codec, connection);\n\n  EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n  EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n  TestResponseHeaderMapImpl expected_headers;\n  expected_headers.addCopy(\":status\", \"200\");\n  expected_headers.addCopy(\"compression\", \"test\");\n  EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true));\n  EXPECT_TRUE(codec.write(header.frame(), connection).ok());\n}\n\n// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS frame doesn't crash or\n// trigger ASSERTs. This is a litmus test for the HTTP/2 codec (nghttp2) to demonstrate that it\n// doesn't suffer from the issue reported for http-parser in CVE-2019-9900. See also\n// https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a non-compressed frame with no\n// Huffman encoding to simplify.\nTEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) {\n  FileFrame header{\"request_header_corpus/simple_example_plain\"};\n\n  for (size_t offset = 0; offset < header.frame().size(); ++offset) {\n    for (const char c : {'\\0', '\\n', '\\r'}) {\n      // Corrupt a single byte in the HEADERS.\n      const char original = header.frame()[offset];\n      header.frame()[offset] = c;\n      // Play the frames back.\n      ServerCodecFrameInjector codec;\n      TestServerConnectionImplNew connection(\n          codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_,\n          Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n          envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n      EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok());\n      EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n      EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n      EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber());\n      EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber());\n      auto status = codec.write(header.frame(), connection);\n      if (isCodecProtocolError(status)) {\n        ENVOY_LOG_MISC(trace, \"CodecProtocolError: {}\", status.message());\n      }\n      header.frame()[offset] = original;\n    }\n  }\n}\n\n// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS frame doesn't crash or\n// trigger ASSERTs. This is a litmus test for the HTTP/2 codec (nghttp2) to demonstrate that it\n// doesn't suffer from the issue reported for http-parser in CVE-2019-9900. See also\n// https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a non-compressed frame with no\n// Huffman encoding to simplify.\nTEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderFrame) {\n  FileFrame header{\"response_header_corpus/simple_example_plain\"};\n\n  for (size_t offset = 0; offset < header.frame().size(); ++offset) {\n    for (const char c : {'\\0', '\\n', '\\r'}) {\n      // Corrupt a single byte in the HEADERS.\n      const char original = header.frame()[offset];\n      header.frame()[offset] = c;\n      // Play the frames back.\n      ClientCodecFrameInjector codec;\n      TestClientConnectionImplNew connection(\n          codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_,\n          Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n          ProdNghttp2SessionFactory::get());\n      setupStream(codec, connection);\n\n      EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n      EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n      EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(AnyNumber());\n      EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber());\n      auto status = codec.write(header.frame(), connection);\n      if (isCodecProtocolError(status)) {\n        ENVOY_LOG_MISC(trace, \"CodecProtocolError: {}\", status.message());\n      }\n      header.frame()[offset] = original;\n    }\n  }\n}\n\n// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS field name or value\n// yields a CodecProtocolException or stream reset. This is a litmus test for the HTTP/2 codec\n// (nghttp2) to demonstrate that it doesn't suffer from the issue reported for http-parser in\n// CVE-2019-9900. See also https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a\n// non-compressed frame with no Huffman encoding to simplify.\nTEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) {\n  FileFrame header{\"request_header_corpus/simple_example_plain\"};\n\n  for (size_t offset = header.frame().size() - 11 /* foo: offset */; offset < header.frame().size();\n       ++offset) {\n    for (const char c : {'\\0', '\\n', '\\r'}) {\n      // Corrupt a single byte in the HEADERS.\n      const char original = header.frame()[offset];\n      header.frame()[offset] = c;\n      // Play the frames back.\n      ServerCodecFrameInjector codec;\n      TestServerConnectionImplNew connection(\n          codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_,\n          Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n          envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n      EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok());\n      EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n      EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n      bool stream_reset = false;\n      EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(0);\n      EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _))\n          .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; }));\n      bool codec_exception = false;\n      auto status = codec.write(header.frame(), connection);\n      if (isCodecProtocolError(status)) {\n        codec_exception = true;\n      }\n      EXPECT_TRUE(stream_reset || codec_exception);\n      header.frame()[offset] = original;\n    }\n  }\n}\n\n// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS field name or value\n// yields a CodecProtocolException or stream reset. This is a litmus test for the HTTP/2 codec\n// (nghttp2) to demonstrate that it doesn't suffer from the issue reported for http-parser in\n// CVE-2019-9900. See also https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a\n// non-compressed frame with no Huffman encoding to simplify.\nTEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderField) {\n  FileFrame header{\"response_header_corpus/simple_example_plain\"};\n\n  for (size_t offset = header.frame().size() - 17 /* test: offset */;\n       offset < header.frame().size(); ++offset) {\n    for (const char c : {'\\0', '\\n', '\\r'}) {\n      // Corrupt a single byte in the HEADERS.\n      const char original = header.frame()[offset];\n      header.frame()[offset] = c;\n      // Play the frames back.\n      ClientCodecFrameInjector codec;\n      TestClientConnectionImplNew connection(\n          codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_,\n          Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n          ProdNghttp2SessionFactory::get());\n      setupStream(codec, connection);\n\n      EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok());\n      EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok());\n      bool stream_reset = false;\n      EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(0);\n      EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _))\n          .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; }));\n      bool codec_exception = false;\n      auto status = codec.write(header.frame(), connection);\n      if (isCodecProtocolError(status)) {\n        codec_exception = true;\n      }\n      EXPECT_TRUE(stream_reset || codec_exception);\n      header.frame()[offset] = original;\n    }\n  }\n}\n\n} // namespace\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/http2_frame.cc",
    "content": "#include \"test/common/http/http2/http2_frame.h\"\n\n#include <type_traits>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/hex.h\"\n\n#include \"nghttp2/nghttp2.h\"\n\nnamespace {\n\n// Converts stream ID to the network byte order. Supports all values in the range [0, 2^30).\nuint32_t makeNetworkOrderStreamId(uint32_t stream_id) { return htonl(stream_id); }\n\n// All this templatized stuff is for the typesafe constexpr bitwise ORing of the \"enum class\" values\ntemplate <typename First, typename... Rest> struct FirstArgType {\n  using type = First; // NOLINT(readability-identifier-naming)\n};\n\ntemplate <typename Flag> constexpr uint8_t orFlags(Flag flag) { return static_cast<uint8_t>(flag); }\n\ntemplate <typename Flag, typename... Flags> constexpr uint8_t orFlags(Flag first, Flags... rest) {\n  static_assert(std::is_same<Flag, typename FirstArgType<Flags...>::type>::value,\n                \"All flag types must be the same!\");\n  return static_cast<uint8_t>(first) | orFlags(rest...);\n}\n\n} // namespace\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nconst char Http2Frame::Preamble[25] = \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\";\n\nvoid Http2Frame::setHeader(absl::string_view header) {\n  ASSERT(header.size() >= HeaderSize);\n  data_.assign(HeaderSize, 0);\n  // TODO(adisuissa): memcpy is discouraged as it may be unsafe. This should be\n  // use a safer memcpy alternative (example: https://abseil.io/tips/93)\n  memcpy(data_.data(), header.data(), HeaderSize);\n  data_.resize(HeaderSize + payloadSize());\n}\n\nvoid Http2Frame::setPayload(absl::string_view payload) {\n  ASSERT(payload.size() >= payloadSize());\n  ASSERT(data_.capacity() >= HeaderSize + payloadSize());\n  memcpy(&data_[HeaderSize], payload.data(), payloadSize());\n}\n\nuint32_t Http2Frame::payloadSize() const {\n  return (uint32_t(data_[0]) << 16) + (uint32_t(data_[1]) << 8) + uint32_t(data_[2]);\n}\n\nHttp2Frame::ResponseStatus Http2Frame::responseStatus() const {\n  if (empty() || Type::Headers != type() || size() <= HeaderSize ||\n      ((data_[HeaderSize] & 0x80) == 0)) {\n    return ResponseStatus::Unknown;\n  }\n  // See https://tools.ietf.org/html/rfc7541#appendix-A for header values\n  switch (static_cast<StaticHeaderIndex>(data_[HeaderSize] & 0x7f)) {\n  case StaticHeaderIndex::Status200:\n    return ResponseStatus::Ok;\n  case StaticHeaderIndex::Status404:\n    return ResponseStatus::NotFound;\n  default:\n    break;\n  }\n  return ResponseStatus::Unknown;\n}\n\nvoid Http2Frame::buildHeader(Type type, uint32_t payload_size, uint8_t flags, uint32_t stream_id) {\n  data_.assign(payload_size + HeaderSize, 0);\n  setPayloadSize(payload_size);\n  data_[3] = static_cast<uint8_t>(type);\n  data_[4] = flags;\n  if (stream_id) {\n    memcpy(&data_[5], &stream_id, sizeof(stream_id));\n  }\n}\n\nvoid Http2Frame::setPayloadSize(uint32_t size) {\n  data_[0] = (size >> 16) & 0xff;\n  data_[1] = (size >> 8) & 0xff;\n  data_[2] = size & 0xff;\n}\n\nvoid Http2Frame::appendHpackInt(uint64_t value, unsigned char prefix_mask) {\n  if (value < prefix_mask) {\n    data_.push_back(value);\n  } else {\n    data_.push_back(prefix_mask);\n    value -= prefix_mask;\n\n    while (value >= 128) {\n      data_.push_back((value & 0x7f) | 0x80);\n      value >>= 7;\n    }\n    data_.push_back(value);\n  }\n}\n\n// See https://tools.ietf.org/html/rfc7541#section-6.1 for header representations\n\nvoid Http2Frame::appendStaticHeader(StaticHeaderIndex index) {\n  data_.push_back(0x80 | static_cast<uint8_t>(index));\n}\n\nvoid Http2Frame::appendHeaderWithoutIndexing(StaticHeaderIndex index, absl::string_view value) {\n  appendHpackInt(static_cast<uint8_t>(index), 0xf);\n  appendHpackInt(value.size(), 0x7f);\n  appendData(value);\n}\n\nvoid Http2Frame::appendHeaderWithoutIndexing(const Header& header) {\n  data_.push_back(0);\n  appendHpackInt(header.key_.size(), 0x7f);\n  appendData(header.key_);\n  appendHpackInt(header.value_.size(), 0x7f);\n  appendData(header.value_);\n}\n\nvoid Http2Frame::appendEmptyHeader() {\n  data_.push_back(0x40);\n  data_.push_back(0x00);\n  data_.push_back(0x00);\n}\n\nHttp2Frame Http2Frame::makePingFrame(absl::string_view data) {\n  static constexpr size_t kPingPayloadSize = 8;\n  Http2Frame frame;\n  frame.buildHeader(Type::Ping, kPingPayloadSize);\n  ASSERT(frame.data_.capacity() >= HeaderSize + std::min(kPingPayloadSize, data.size()));\n  if (!data.empty()) {\n    memcpy(&frame.data_[HeaderSize], data.data(), std::min(kPingPayloadSize, data.size()));\n  }\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeEmptySettingsFrame(SettingsFlags flags) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Settings, 0, static_cast<uint8_t>(flags));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeEmptyHeadersFrame(uint32_t stream_index, HeadersFlags flags) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Headers, 0, static_cast<uint8_t>(flags),\n                    makeNetworkOrderStreamId(stream_index));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeEmptyContinuationFrame(uint32_t stream_index, HeadersFlags flags) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Continuation, 0, static_cast<uint8_t>(flags),\n                    makeNetworkOrderStreamId(stream_index));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeEmptyDataFrame(uint32_t stream_index, DataFlags flags) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Data, 0, static_cast<uint8_t>(flags),\n                    makeNetworkOrderStreamId(stream_index));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makePriorityFrame(uint32_t stream_index, uint32_t dependent_index) {\n  static constexpr size_t kPriorityPayloadSize = 5;\n  Http2Frame frame;\n  frame.buildHeader(Type::Priority, kPriorityPayloadSize, 0,\n                    makeNetworkOrderStreamId(stream_index));\n  const uint32_t dependent_net = makeNetworkOrderStreamId(dependent_index);\n  ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t));\n  memcpy(&frame.data_[HeaderSize], reinterpret_cast<const void*>(&dependent_net), sizeof(uint32_t));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeEmptyPushPromiseFrame(uint32_t stream_index,\n                                                 uint32_t promised_stream_index,\n                                                 HeadersFlags flags) {\n  static constexpr size_t kEmptyPushPromisePayloadSize = 4;\n  Http2Frame frame;\n  frame.buildHeader(Type::PushPromise, kEmptyPushPromisePayloadSize, static_cast<uint8_t>(flags),\n                    makeNetworkOrderStreamId(stream_index));\n  const uint32_t promised_stream_id = makeNetworkOrderStreamId(promised_stream_index);\n  ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t));\n  memcpy(&frame.data_[HeaderSize], reinterpret_cast<const void*>(&promised_stream_id),\n         sizeof(uint32_t));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeResetStreamFrame(uint32_t stream_index, ErrorCode error_code) {\n  static constexpr size_t kResetStreamPayloadSize = 4;\n  Http2Frame frame;\n  frame.buildHeader(Type::RstStream, kResetStreamPayloadSize, 0,\n                    makeNetworkOrderStreamId(stream_index));\n  const uint32_t error = static_cast<uint32_t>(error_code);\n  ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t));\n  memcpy(&frame.data_[HeaderSize], reinterpret_cast<const void*>(&error), sizeof(uint32_t));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeEmptyGoAwayFrame(uint32_t last_stream_index, ErrorCode error_code) {\n  static constexpr size_t kEmptyGoAwayPayloadSize = 8;\n  Http2Frame frame;\n  frame.buildHeader(Type::GoAway, kEmptyGoAwayPayloadSize, 0);\n  const uint32_t last_stream_id = makeNetworkOrderStreamId(last_stream_index);\n  ASSERT(frame.data_.capacity() >= HeaderSize + 4 + sizeof(uint32_t));\n  memcpy(&frame.data_[HeaderSize], reinterpret_cast<const void*>(&last_stream_id),\n         sizeof(uint32_t));\n  const uint32_t error = static_cast<uint32_t>(error_code);\n  memcpy(&frame.data_[HeaderSize + 4], reinterpret_cast<const void*>(&error), sizeof(uint32_t));\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeWindowUpdateFrame(uint32_t stream_index, uint32_t increment) {\n  static constexpr size_t kWindowUpdatePayloadSize = 4;\n  Http2Frame frame;\n  frame.buildHeader(Type::WindowUpdate, kWindowUpdatePayloadSize, 0,\n                    makeNetworkOrderStreamId(stream_index));\n  const uint32_t increment_net = htonl(increment);\n  ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t));\n  memcpy(&frame.data_[HeaderSize], reinterpret_cast<const void*>(&increment_net), sizeof(uint32_t));\n  return frame;\n}\n\n// Note: encoder in codebase persists multiple maps, with each map representing an individual frame.\nHttp2Frame Http2Frame::makeMetadataFrameFromMetadataMap(uint32_t stream_index,\n                                                        MetadataMap& metadata_map,\n                                                        MetadataFlags flags) {\n  const int numberOfNameValuePairs = metadata_map.size();\n  absl::FixedArray<nghttp2_nv> nameValues(numberOfNameValuePairs);\n  absl::FixedArray<nghttp2_nv>::iterator iterator = nameValues.begin();\n  for (const auto& metadata : metadata_map) {\n    *iterator = {const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(metadata.first.data())),\n                 const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(metadata.second.data())),\n                 metadata.first.size(), metadata.second.size(), NGHTTP2_NV_FLAG_NO_INDEX};\n    ++iterator;\n  }\n\n  nghttp2_hd_deflater* deflater;\n  // Note: this has no effect, as metadata frames do not add onto Dynamic table.\n  const int maxDynamicTableSize = 4096;\n  nghttp2_hd_deflate_new(&deflater, maxDynamicTableSize);\n\n  const size_t upperBoundBufferLength =\n      nghttp2_hd_deflate_bound(deflater, nameValues.begin(), numberOfNameValuePairs);\n\n  uint8_t* buffer = new uint8_t[upperBoundBufferLength];\n\n  const size_t numberOfBytesInMetadataPayload = nghttp2_hd_deflate_hd(\n      deflater, buffer, upperBoundBufferLength, nameValues.begin(), numberOfNameValuePairs);\n\n  Http2Frame frame;\n  frame.buildHeader(Type::Metadata, numberOfBytesInMetadataPayload, static_cast<uint8_t>(flags),\n                    makeNetworkOrderStreamId(stream_index));\n  std::vector<uint8_t> bufferVector(buffer, buffer + numberOfBytesInMetadataPayload);\n  frame.appendDataAfterHeaders(bufferVector);\n  delete[] buffer;\n  nghttp2_hd_deflate_del(deflater);\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeMalformedRequest(uint32_t stream_index) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndStream, HeadersFlags::EndHeaders),\n                    makeNetworkOrderStreamId(stream_index));\n  frame.appendStaticHeader(\n      StaticHeaderIndex::Status200); // send :status as request header, which is invalid\n  frame.adjustPayloadSize();\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeMalformedRequestWithZerolenHeader(uint32_t stream_index,\n                                                             absl::string_view host,\n                                                             absl::string_view path) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndStream, HeadersFlags::EndHeaders),\n                    makeNetworkOrderStreamId(stream_index));\n  frame.appendStaticHeader(StaticHeaderIndex::MethodGet);\n  frame.appendStaticHeader(StaticHeaderIndex::SchemeHttps);\n  frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Path, path);\n  frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Host, host);\n  frame.appendEmptyHeader();\n  frame.adjustPayloadSize();\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeRequest(uint32_t stream_index, absl::string_view host,\n                                   absl::string_view path) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndStream, HeadersFlags::EndHeaders),\n                    makeNetworkOrderStreamId(stream_index));\n  frame.appendStaticHeader(StaticHeaderIndex::MethodGet);\n  frame.appendStaticHeader(StaticHeaderIndex::SchemeHttps);\n  frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Path, path);\n  frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Host, host);\n  frame.adjustPayloadSize();\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeRequest(uint32_t stream_index, absl::string_view host,\n                                   absl::string_view path,\n                                   const std::vector<Header> extra_headers) {\n  auto frame = makeRequest(stream_index, host, path);\n  for (const auto& header : extra_headers) {\n    frame.appendHeaderWithoutIndexing(header);\n  }\n  frame.adjustPayloadSize();\n  return frame;\n}\n\nHttp2Frame Http2Frame::makePostRequest(uint32_t stream_index, absl::string_view host,\n                                       absl::string_view path) {\n  Http2Frame frame;\n  frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndHeaders),\n                    makeNetworkOrderStreamId(stream_index));\n  frame.appendStaticHeader(StaticHeaderIndex::MethodPost);\n  frame.appendStaticHeader(StaticHeaderIndex::SchemeHttps);\n  frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Path, path);\n  frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Host, host);\n  frame.adjustPayloadSize();\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeGenericFrame(absl::string_view contents) {\n  Http2Frame frame;\n  frame.appendData(contents);\n  return frame;\n}\n\nHttp2Frame Http2Frame::makeGenericFrameFromHexDump(absl::string_view contents) {\n  Http2Frame frame;\n  frame.appendData(Hex::decode(std::string(contents)));\n  return frame;\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/http2_frame.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/http/metadata_interface.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\n// Rudimentary facility for building and parsing of HTTP2 frames for unit tests\nclass Http2Frame {\n  using DataContainer = std::vector<uint8_t>;\n\npublic:\n  Http2Frame() = default;\n\n  using Iterator = DataContainer::iterator;\n  using ConstIterator = DataContainer::const_iterator;\n\n  static constexpr size_t HeaderSize = 9;\n  static const char Preamble[25];\n\n  enum class Type : uint8_t {\n    Data = 0,\n    Headers,\n    Priority,\n    RstStream,\n    Settings,\n    PushPromise,\n    Ping,\n    GoAway,\n    WindowUpdate,\n    Continuation,\n    Metadata = 77,\n  };\n\n  enum class SettingsFlags : uint8_t {\n    None = 0,\n    Ack = 1,\n  };\n\n  enum class HeadersFlags : uint8_t {\n    None = 0,\n    EndStream = 1,\n    EndHeaders = 4,\n  };\n\n  enum class DataFlags : uint8_t {\n    None = 0,\n    EndStream = 1,\n  };\n\n  enum class MetadataFlags : uint8_t {\n    None = 0,\n    EndMetadata = 4,\n  };\n\n  // See https://tools.ietf.org/html/rfc7541#appendix-A for static header indexes\n  enum class StaticHeaderIndex : uint8_t {\n    Unknown,\n    MethodGet = 2,\n    MethodPost = 3,\n    Path = 4,\n    Status200 = 8,\n    Status404 = 13,\n    SchemeHttps = 7,\n    Host = 38,\n  };\n\n  enum class ErrorCode : uint8_t {\n    NoError = 0,\n    ProtocolError,\n    InternalError,\n    FlowControlError,\n    SettingsTimeout,\n    StreamClosed,\n    FrameSizeError,\n    RefusedStream,\n    Cancel,\n    CompressionError,\n    ConnectError,\n    EnhanceYourCalm,\n    InadequateSecurity,\n    Http11Required\n  };\n\n  enum class ResponseStatus { Unknown, Ok, NotFound };\n\n  struct Header {\n    Header(absl::string_view key, absl::string_view value) : key_(key), value_(value) {}\n    std::string key_;\n    std::string value_;\n  };\n\n  /**\n   * Make client stream ID out of the given ID in the host byte order, ensuring that the stream id\n   * is odd as required by https://tools.ietf.org/html/rfc7540#section-5.1.1\n   * Use this function to create client stream ids for methods creating HTTP/2 frames.\n   * @param stream_id some stream id that will be used to create the client stream id.\n   * @return an odd number client stream id.\n   */\n  static uint32_t makeClientStreamId(uint32_t stream_id) { return (stream_id << 1) | 1; }\n\n  // Methods for creating HTTP2 frames\n  static Http2Frame makePingFrame(absl::string_view data = {});\n  static Http2Frame makeEmptySettingsFrame(SettingsFlags flags = SettingsFlags::None);\n  static Http2Frame makeEmptyHeadersFrame(uint32_t stream_index,\n                                          HeadersFlags flags = HeadersFlags::None);\n  static Http2Frame makeEmptyContinuationFrame(uint32_t stream_index,\n                                               HeadersFlags flags = HeadersFlags::None);\n  static Http2Frame makeEmptyDataFrame(uint32_t stream_index, DataFlags flags = DataFlags::None);\n  static Http2Frame makePriorityFrame(uint32_t stream_index, uint32_t dependent_index);\n\n  static Http2Frame makeEmptyPushPromiseFrame(uint32_t stream_index, uint32_t promised_stream_index,\n                                              HeadersFlags flags = HeadersFlags::None);\n  static Http2Frame makeResetStreamFrame(uint32_t stream_index, ErrorCode error_code);\n  static Http2Frame makeEmptyGoAwayFrame(uint32_t last_stream_index, ErrorCode error_code);\n\n  static Http2Frame makeWindowUpdateFrame(uint32_t stream_index, uint32_t increment);\n  static Http2Frame makeMetadataFrameFromMetadataMap(uint32_t stream_index,\n                                                     MetadataMap& metadata_map,\n                                                     MetadataFlags flags);\n\n  static Http2Frame makeMalformedRequest(uint32_t stream_index);\n  static Http2Frame makeMalformedRequestWithZerolenHeader(uint32_t stream_index,\n                                                          absl::string_view host,\n                                                          absl::string_view path);\n  static Http2Frame makeRequest(uint32_t stream_index, absl::string_view host,\n                                absl::string_view path);\n  static Http2Frame makeRequest(uint32_t stream_index, absl::string_view host,\n                                absl::string_view path, const std::vector<Header> extra_headers);\n  static Http2Frame makePostRequest(uint32_t stream_index, absl::string_view host,\n                                    absl::string_view path);\n  /**\n   * Creates a frame with the given contents. This frame can be\n   * malformed/invalid depending on the given contents.\n   * @param contents the contents of the newly created frame.\n   * @return an Http2Frame that is comprised of the given contents.\n   */\n  static Http2Frame makeGenericFrame(absl::string_view contents);\n  static Http2Frame makeGenericFrameFromHexDump(absl::string_view contents);\n\n  Type type() const { return static_cast<Type>(data_[3]); }\n  ResponseStatus responseStatus() const;\n\n  // Copy HTTP2 header. The `header` parameter must at least be HeaderSize long.\n  // Allocates payload size based on the value in the header.\n  void setHeader(absl::string_view header);\n\n  // Copy payloadSize() bytes from the `payload`. The `payload` must be at least payloadSize() long.\n  void setPayload(absl::string_view payload);\n\n  // Convert to `std::string` for convenience.\n  explicit operator std::string() const {\n    if (data_.empty()) {\n      return {};\n    }\n    return std::string(reinterpret_cast<const char*>(data()), size());\n  }\n\n  uint32_t payloadSize() const;\n  // Total size of the frame\n  size_t size() const { return data_.size(); }\n  // Access to the raw frame bytes\n  const uint8_t* data() const { return data_.data(); }\n  Iterator begin() { return data_.begin(); }\n  Iterator end() { return data_.end(); }\n  ConstIterator begin() const { return data_.begin(); }\n  ConstIterator end() const { return data_.end(); }\n  bool empty() const { return data_.empty(); }\n\nprivate:\n  void buildHeader(Type type, uint32_t payload_size = 0, uint8_t flags = 0, uint32_t stream_id = 0);\n  void setPayloadSize(uint32_t size);\n\n  // This method appends HPACK encoded uint64_t to the payload. adjustPayloadSize() must be called\n  // after calling this method (possibly multiple times) to write new payload length to the HTTP2\n  // header.\n  void appendHpackInt(uint64_t value, unsigned char prefix_mask);\n  void appendData(absl::string_view data) { data_.insert(data_.end(), data.begin(), data.end()); }\n  void appendData(std::vector<uint8_t> data) {\n    data_.insert(data_.end(), data.begin(), data.end());\n  }\n  void appendDataAfterHeaders(std::vector<uint8_t> data) {\n    std::copy(data.begin(), data.end(), data_.begin() + 9);\n  }\n\n  // Headers are directly encoded\n  void appendStaticHeader(StaticHeaderIndex index);\n  void appendHeaderWithoutIndexing(StaticHeaderIndex index, absl::string_view value);\n  void appendHeaderWithoutIndexing(const Header& header);\n  void appendEmptyHeader();\n\n  // This method updates payload length in the HTTP2 header based on the size of the data_\n  void adjustPayloadSize() {\n    ASSERT(size() >= HeaderSize);\n    setPayloadSize(size() - HeaderSize);\n  }\n\n  DataContainer data_;\n};\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/http2_frame_test.cc",
    "content": "#include <string>\n\n#include \"envoy/http/metadata_interface.h\"\n\n#include \"common/http/http2/metadata_encoder.h\"\n\n#include \"test/common/http/http2/http2_frame.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n// From metadata map\nTEST(EqualityMetadataFrame, Http2FrameTest) {\n  MetadataMap metadataMap = {{\"Connections\", \"15\"}, {\"Timeout Seconds\", \"10\"}};\n  Http2Frame http2FrameFromUtility = Http2Frame::makeMetadataFrameFromMetadataMap(\n      1, metadataMap, Http2Frame::MetadataFlags::EndMetadata);\n  std::string payloadFromHttp2Frame(http2FrameFromUtility);\n  // Note: the actual encoding of the metadata map is non-deterministic and flaky. This is okay!\n  ASSERT_EQ(static_cast<int>(http2FrameFromUtility.type()), 0x4D); // type\n  ASSERT_EQ(payloadFromHttp2Frame[4], 4);                          // flags\n  ASSERT_EQ(std::to_string(payloadFromHttp2Frame[8]),\n            std::to_string(1)); // stream_id\n}\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/metadata_encoder_decoder_test.cc",
    "content": "#include \"envoy/http/metadata_interface.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/http/http2/metadata_decoder.h\"\n#include \"common/http/http2/metadata_encoder.h\"\n\n#include \"test/test_common/logging.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"http2_frame.h\"\n#include \"nghttp2/nghttp2.h\"\n\n// A global variable in nghttp2 to disable preface and initial settings for tests.\n// TODO(soya3129): Remove after issue https://github.com/nghttp2/nghttp2/issues/1246 is fixed.\nextern \"C\" {\nextern int nghttp2_enable_strict_preface;\n}\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\nnamespace {\n\nstatic const uint64_t STREAM_ID = 1;\n\n// The buffer stores data sent by encoder and received by decoder.\nstruct TestBuffer {\n  uint8_t buf[1024 * 1024] = {0};\n  size_t length = 0;\n};\n\n// The application data structure passes to nghttp2 session.\nstruct UserData {\n  MetadataEncoder* encoder;\n  MetadataDecoder* decoder;\n  // Stores data sent by encoder and received by the decoder.\n  TestBuffer* output_buffer;\n};\n\n// Nghttp2 callback function for sending extension frame.\nstatic ssize_t pack_extension_callback(nghttp2_session* session, uint8_t* buf, size_t len,\n                                       const nghttp2_frame*, void* user_data) {\n  EXPECT_NE(nullptr, session);\n\n  MetadataEncoder* encoder = reinterpret_cast<UserData*>(user_data)->encoder;\n  const uint64_t size_copied = encoder->packNextFramePayload(buf, len);\n\n  return static_cast<ssize_t>(size_copied);\n}\n\n// Nghttp2 callback function for receiving extension frame.\nstatic int on_extension_chunk_recv_callback(nghttp2_session* session, const nghttp2_frame_hd* hd,\n                                            const uint8_t* data, size_t len, void* user_data) {\n  EXPECT_NE(nullptr, session);\n  EXPECT_GE(hd->length, len);\n\n  MetadataDecoder* decoder = reinterpret_cast<UserData*>(user_data)->decoder;\n  bool success = decoder->receiveMetadata(data, len);\n  return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\n// Nghttp2 callback function for unpack extension frames.\nstatic int unpack_extension_callback(nghttp2_session* session, void** payload,\n                                     const nghttp2_frame_hd* hd, void* user_data) {\n  EXPECT_NE(nullptr, session);\n  EXPECT_NE(nullptr, hd);\n  EXPECT_NE(nullptr, payload);\n\n  MetadataDecoder* decoder = reinterpret_cast<UserData*>(user_data)->decoder;\n  bool result = decoder->onMetadataFrameComplete((hd->flags == END_METADATA_FLAG) ? true : false);\n  return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;\n}\n\n// Nghttp2 callback function for sending data to peer.\nstatic ssize_t send_callback(nghttp2_session* session, const uint8_t* buf, size_t len, int flags,\n                             void* user_data) {\n  EXPECT_NE(nullptr, session);\n  EXPECT_LE(0, flags);\n\n  TestBuffer* buffer = (reinterpret_cast<UserData*>(user_data))->output_buffer;\n  memcpy(buffer->buf + buffer->length, buf, len);\n  buffer->length += len;\n  return len;\n}\n\n} // namespace\n\nclass MetadataEncoderDecoderTest : public testing::Test {\npublic:\n  void initialize(MetadataCallback cb) {\n    decoder_ = std::make_unique<MetadataDecoder>(cb);\n\n    // Enables extension frame.\n    nghttp2_option_new(&option_);\n    nghttp2_option_set_user_recv_extension_type(option_, METADATA_FRAME_TYPE);\n\n    // Sets callback functions.\n    nghttp2_session_callbacks_new(&callbacks_);\n    nghttp2_session_callbacks_set_pack_extension_callback(callbacks_, pack_extension_callback);\n    nghttp2_session_callbacks_set_send_callback(callbacks_, send_callback);\n    nghttp2_session_callbacks_set_on_extension_chunk_recv_callback(\n        callbacks_, on_extension_chunk_recv_callback);\n    nghttp2_session_callbacks_set_unpack_extension_callback(callbacks_, unpack_extension_callback);\n\n    // Sets application data to pass to nghttp2 session.\n    user_data_.encoder = &encoder_;\n    user_data_.decoder = decoder_.get();\n    user_data_.output_buffer = &output_buffer_;\n\n    // Creates new nghttp2 session.\n    nghttp2_enable_strict_preface = 0;\n    nghttp2_session_client_new2(&session_, callbacks_, &user_data_, option_);\n    nghttp2_enable_strict_preface = 1;\n  }\n\n  void cleanUp() {\n    nghttp2_session_del(session_);\n    nghttp2_session_callbacks_del(callbacks_);\n    nghttp2_option_del(option_);\n  }\n\n  void verifyMetadataMapVector(MetadataMapVector& expect, MetadataMapPtr&& metadata_map_ptr) {\n    for (const auto& metadata : *metadata_map_ptr) {\n      EXPECT_EQ(expect.front()->find(metadata.first)->second, metadata.second);\n    }\n    expect.erase(expect.begin());\n  }\n\n  void submitMetadata(const MetadataMapVector& metadata_map_vector) {\n    // Creates metadata payload.\n    encoder_.createPayload(metadata_map_vector);\n    for (uint8_t flags : encoder_.payloadFrameFlagBytes()) {\n      int result =\n          nghttp2_submit_extension(session_, METADATA_FRAME_TYPE, flags, STREAM_ID, nullptr);\n      EXPECT_EQ(0, result);\n    }\n    // Triggers nghttp2 to populate the payloads of the METADATA frames.\n    int result = nghttp2_session_send(session_);\n    EXPECT_EQ(0, result);\n  }\n\n  nghttp2_session* session_ = nullptr;\n  nghttp2_session_callbacks* callbacks_;\n  MetadataEncoder encoder_;\n  std::unique_ptr<MetadataDecoder> decoder_;\n  nghttp2_option* option_;\n  int count_ = 0;\n\n  // Stores data received by peer.\n  TestBuffer output_buffer_;\n\n  // Application data passed to nghttp2.\n  UserData user_data_;\n\n  Random::RandomGeneratorImpl random_generator_;\n};\n\nTEST_F(MetadataEncoderDecoderTest, TestMetadataSizeLimit) {\n  MetadataMap metadata_map = {\n      {\"header_key1\", std::string(1024 * 1024 + 1, 'a')},\n  };\n  MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n  MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n\n  // Verifies the encoding/decoding result in decoder's callback functions.\n  initialize([this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n    this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n  });\n\n  // metadata_map exceeds size limit.\n  EXPECT_LOG_CONTAINS(\"error\", \"exceeds the max bound.\",\n                      EXPECT_FALSE(encoder_.createPayload(metadata_map_vector)));\n\n  std::string payload = std::string(1024 * 1024 + 1, 'a');\n  EXPECT_FALSE(\n      decoder_->receiveMetadata(reinterpret_cast<const uint8_t*>(payload.data()), payload.size()));\n\n  cleanUp();\n}\n\nTEST_F(MetadataEncoderDecoderTest, TestDecodeBadData) {\n  MetadataMap metadata_map = {\n      {\"header_key1\", \"header_value1\"},\n  };\n  MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n  MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n\n  // Verifies the encoding/decoding result in decoder's callback functions.\n  initialize([this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n    this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n  });\n  submitMetadata(metadata_map_vector);\n\n  // Messes up with the encoded payload, and passes it to the decoder.\n  output_buffer_.buf[10] |= 0xff;\n  decoder_->receiveMetadata(output_buffer_.buf, output_buffer_.length);\n  EXPECT_FALSE(decoder_->onMetadataFrameComplete(true));\n\n  cleanUp();\n}\n\n// Checks if accumulated metadata size reaches size limit, returns failure.\nTEST_F(MetadataEncoderDecoderTest, VerifyEncoderDecoderMultipleMetadataReachSizeLimit) {\n  MetadataMap metadata_map_empty = {};\n  MetadataCallback cb = [](std::unique_ptr<MetadataMap>) -> void {};\n  initialize(cb);\n\n  ssize_t result = 0;\n\n  for (int i = 0; i < 100; i++) {\n    // Cleans up the output buffer.\n    memset(output_buffer_.buf, 0, output_buffer_.length);\n    output_buffer_.length = 0;\n\n    MetadataMap metadata_map = {\n        {\"header_key1\", std::string(10000, 'a')},\n        {\"header_key2\", std::string(10000, 'b')},\n    };\n    MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n    MetadataMapVector metadata_map_vector;\n    metadata_map_vector.push_back(std::move(metadata_map_ptr));\n\n    // Encode and decode the second MetadataMap.\n    decoder_->callback_ = [this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n      this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n    };\n    submitMetadata(metadata_map_vector);\n\n    result = nghttp2_session_mem_recv(session_, output_buffer_.buf, output_buffer_.length);\n    if (result < 0) {\n      break;\n    }\n  }\n  // Verifies max metadata limit reached.\n  EXPECT_LT(result, 0);\n  EXPECT_LE(decoder_->max_payload_size_bound_, decoder_->total_payload_size_);\n\n  cleanUp();\n}\n\n// Tests encoding/decoding small metadata map vectors.\nTEST_F(MetadataEncoderDecoderTest, EncodeMetadataMapVectorSmall) {\n  MetadataMap metadata_map = {\n      {\"header_key1\", std::string(5, 'a')},\n      {\"header_key2\", std::string(5, 'b')},\n  };\n  MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n  MetadataMap metadata_map_2 = {\n      {\"header_key3\", std::string(5, 'a')},\n      {\"header_key4\", std::string(5, 'b')},\n  };\n  MetadataMapPtr metadata_map_ptr_2 = std::make_unique<MetadataMap>(metadata_map);\n  MetadataMap metadata_map_3 = {\n      {\"header_key1\", std::string(1, 'a')},\n      {\"header_key2\", std::string(1, 'b')},\n  };\n  MetadataMapPtr metadata_map_ptr_3 = std::make_unique<MetadataMap>(metadata_map);\n\n  MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  metadata_map_vector.push_back(std::move(metadata_map_ptr_2));\n  metadata_map_vector.push_back(std::move(metadata_map_ptr_3));\n\n  // Verifies the encoding/decoding result in decoder's callback functions.\n  initialize([this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n    this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n  });\n  submitMetadata(metadata_map_vector);\n\n  // Verifies flag and payload are encoded correctly.\n  const uint64_t consume_size = random_generator_.random() % output_buffer_.length;\n  nghttp2_session_mem_recv(session_, output_buffer_.buf, consume_size);\n  nghttp2_session_mem_recv(session_, output_buffer_.buf + consume_size,\n                           output_buffer_.length - consume_size);\n\n  cleanUp();\n}\n\n// Tests encoding/decoding large metadata map vectors.\nTEST_F(MetadataEncoderDecoderTest, EncodeMetadataMapVectorLarge) {\n  MetadataMapVector metadata_map_vector;\n  for (int i = 0; i < 10; i++) {\n    MetadataMap metadata_map = {\n        {\"header_key1\", std::string(50000, 'a')},\n        {\"header_key2\", std::string(50000, 'b')},\n    };\n    MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n    metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  }\n  // Verifies the encoding/decoding result in decoder's callback functions.\n  initialize([this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n    this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n  });\n  submitMetadata(metadata_map_vector);\n  // Verifies flag and payload are encoded correctly.\n  const uint64_t consume_size = random_generator_.random() % output_buffer_.length;\n  nghttp2_session_mem_recv(session_, output_buffer_.buf, consume_size);\n  nghttp2_session_mem_recv(session_, output_buffer_.buf + consume_size,\n                           output_buffer_.length - consume_size);\n  cleanUp();\n}\n\n// Tests encoding/decoding with fuzzed metadata size.\nTEST_F(MetadataEncoderDecoderTest, EncodeFuzzedMetadata) {\n  MetadataMapVector metadata_map_vector;\n  for (int i = 0; i < 10; i++) {\n    Random::RandomGeneratorImpl random;\n    int value_size_1 = random.random() % (2 * Http::METADATA_MAX_PAYLOAD_SIZE) + 1;\n    int value_size_2 = random.random() % (2 * Http::METADATA_MAX_PAYLOAD_SIZE) + 1;\n    MetadataMap metadata_map = {\n        {\"header_key1\", std::string(value_size_1, 'a')},\n        {\"header_key2\", std::string(value_size_2, 'a')},\n    };\n    MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n    metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  }\n\n  // Verifies the encoding/decoding result in decoder's callback functions.\n  initialize([this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n    this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n  });\n  submitMetadata(metadata_map_vector);\n\n  // Verifies flag and payload are encoded correctly.\n  nghttp2_session_mem_recv(session_, output_buffer_.buf, output_buffer_.length);\n\n  cleanUp();\n}\n\nTEST_F(MetadataEncoderDecoderTest, EncodeDecodeFrameTest) {\n  MetadataMap metadataMap = {\n      {\"Connections\", \"15\"},\n      {\"Timeout Seconds\", \"10\"},\n  };\n  MetadataMapPtr metadataMapPtr = std::make_unique<MetadataMap>(metadataMap);\n  MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadataMapPtr));\n  Http2Frame http2FrameFromUltility = Http2Frame::makeMetadataFrameFromMetadataMap(\n      1, metadataMap, Http2Frame::MetadataFlags::EndMetadata);\n  MetadataDecoder decoder([this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n    this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n  });\n  decoder.receiveMetadata(http2FrameFromUltility.data() + 9, http2FrameFromUltility.size() - 9);\n  decoder.onMetadataFrameComplete(true);\n}\n\nusing MetadataEncoderDecoderDeathTest = MetadataEncoderDecoderTest;\n\n// Crash if a caller tries to pack more frames than the encoder has data for.\nTEST_F(MetadataEncoderDecoderDeathTest, PackTooManyFrames) {\n  MetadataMap metadata_map = {\n      {\"header_key1\", std::string(5, 'a')},\n      {\"header_key2\", std::string(5, 'b')},\n  };\n  MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);\n  MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n\n  initialize([this, &metadata_map_vector](MetadataMapPtr&& metadata_map_ptr) -> void {\n    this->verifyMetadataMapVector(metadata_map_vector, std::move(metadata_map_ptr));\n  });\n  submitMetadata(metadata_map_vector);\n\n  // Try to send an extra METADATA frame. Submitting the frame to nghttp2 should succeed, but\n  // pack_extension_callback should fail, and that failure will propagate through\n  // nghttp2_session_send. How to handle the failure is up to the HTTP/2 codec (in practice, it will\n  // throw a CodecProtocolException).\n  int result = nghttp2_submit_extension(session_, METADATA_FRAME_TYPE, 0, STREAM_ID, nullptr);\n  EXPECT_EQ(0, result);\n  EXPECT_DEATH(nghttp2_session_send(session_),\n               \"No payload remaining to pack into a METADATA frame.\");\n\n  cleanUp();\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/protocol_constraints_test.cc",
    "content": "#include \"common/http/http2/protocol_constraints.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\n\nclass ProtocolConstraintsTest : public ::testing::Test {\nprotected:\n  Http::Http2::CodecStats& http2CodecStats() {\n    return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, stats_store_);\n  }\n\n  Stats::TestUtil::TestStore stats_store_;\n  Http::Http2::CodecStats::AtomicPtr http2_codec_stats_;\n  envoy::config::core::v3::Http2ProtocolOptions options_;\n};\n\nTEST_F(ProtocolConstraintsTest, DefaultStatusOk) {\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  EXPECT_TRUE(constraints.status().ok());\n}\n\nTEST_F(ProtocolConstraintsTest, OutboundControlFrameFlood) {\n  options_.mutable_max_outbound_frames()->set_value(20);\n  options_.mutable_max_outbound_control_frames()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  constraints.incrementOutboundFrameCount(true);\n  constraints.incrementOutboundFrameCount(true);\n  EXPECT_TRUE(constraints.checkOutboundFrameLimits().ok());\n  constraints.incrementOutboundFrameCount(true);\n  EXPECT_FALSE(constraints.checkOutboundFrameLimits().ok());\n  EXPECT_TRUE(isBufferFloodError(constraints.status()));\n  EXPECT_EQ(\"Too many control frames in the outbound queue.\", constraints.status().message());\n  EXPECT_EQ(1, stats_store_.counter(\"http2.outbound_control_flood\").value());\n}\n\nTEST_F(ProtocolConstraintsTest, OutboundFrameFlood) {\n  options_.mutable_max_outbound_frames()->set_value(5);\n  options_.mutable_max_outbound_control_frames()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  constraints.incrementOutboundFrameCount(false);\n  constraints.incrementOutboundFrameCount(false);\n  constraints.incrementOutboundFrameCount(false);\n  EXPECT_TRUE(constraints.checkOutboundFrameLimits().ok());\n  constraints.incrementOutboundFrameCount(false);\n  constraints.incrementOutboundFrameCount(false);\n  constraints.incrementOutboundFrameCount(false);\n  EXPECT_FALSE(constraints.checkOutboundFrameLimits().ok());\n  EXPECT_TRUE(isBufferFloodError(constraints.status()));\n  EXPECT_EQ(\"Too many frames in the outbound queue.\", constraints.status().message());\n  EXPECT_EQ(1, stats_store_.counter(\"http2.outbound_flood\").value());\n}\n\n// Verify that the `status()` method reflects the first violation and is not modified by subsequent\n// violations of outbound flood limits\nTEST_F(ProtocolConstraintsTest, OutboundFrameFloodStatusIsIdempotent) {\n  options_.mutable_max_outbound_frames()->set_value(5);\n  options_.mutable_max_outbound_control_frames()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  // First trigger control frame flood\n  constraints.incrementOutboundFrameCount(true);\n  constraints.incrementOutboundFrameCount(true);\n  constraints.incrementOutboundFrameCount(true);\n  EXPECT_TRUE(isBufferFloodError(constraints.checkOutboundFrameLimits()));\n  EXPECT_EQ(\"Too many control frames in the outbound queue.\", constraints.status().message());\n  // Then trigger flood check for all frame types\n  constraints.incrementOutboundFrameCount(false);\n  constraints.incrementOutboundFrameCount(false);\n  constraints.incrementOutboundFrameCount(false);\n  EXPECT_FALSE(constraints.checkOutboundFrameLimits().ok());\n  EXPECT_TRUE(isBufferFloodError(constraints.status()));\n  // The status should still reflect the first violation\n  EXPECT_EQ(\"Too many control frames in the outbound queue.\", constraints.status().message());\n  EXPECT_EQ(1, stats_store_.counter(\"http2.outbound_control_flood\").value());\n  EXPECT_EQ(0, stats_store_.counter(\"http2.outbound_flood\").value());\n}\n\nTEST_F(ProtocolConstraintsTest, InboundZeroLenData) {\n  options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  nghttp2_frame_hd frame;\n  frame.type = NGHTTP2_DATA;\n  frame.length = 0;\n  frame.flags = 0;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 0)));\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.status()));\n  EXPECT_EQ(1, stats_store_.counter(\"http2.inbound_empty_frames_flood\").value());\n}\n\n// Verify that the `status()` method reflects the first violation and is not modified by subsequent\n// violations of outbound or inbound flood limits\nTEST_F(ProtocolConstraintsTest, OutboundAndInboundFrameFloodStatusIsIdempotent) {\n  options_.mutable_max_outbound_frames()->set_value(5);\n  options_.mutable_max_outbound_control_frames()->set_value(2);\n  options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  // First trigger inbound frame flood\n  nghttp2_frame_hd frame;\n  frame.type = NGHTTP2_DATA;\n  frame.length = 0;\n  frame.flags = 0;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 0)));\n\n  // Then trigger outbound control flood\n  constraints.incrementOutboundFrameCount(true);\n  constraints.incrementOutboundFrameCount(true);\n  constraints.incrementOutboundFrameCount(true);\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.checkOutboundFrameLimits()));\n  EXPECT_EQ(1, stats_store_.counter(\"http2.inbound_empty_frames_flood\").value());\n  EXPECT_EQ(0, stats_store_.counter(\"http2.outbound_control_flood\").value());\n}\n\nTEST_F(ProtocolConstraintsTest, InboundZeroLenDataWithPadding) {\n  options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  nghttp2_frame_hd frame;\n  frame.type = NGHTTP2_DATA;\n  frame.length = 8;\n  frame.flags = 0;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 8).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 8).ok());\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 8)));\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.status()));\n  EXPECT_EQ(1, stats_store_.counter(\"http2.inbound_empty_frames_flood\").value());\n}\n\nTEST_F(ProtocolConstraintsTest, InboundZeroLenDataEndStreamResetCounter) {\n  options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  nghttp2_frame_hd frame;\n  frame.type = NGHTTP2_DATA;\n  frame.length = 0;\n  frame.flags = 0;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  frame.flags = NGHTTP2_FLAG_END_STREAM;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  frame.flags = 0;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 0)));\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.status()));\n  EXPECT_EQ(1, stats_store_.counter(\"http2.inbound_empty_frames_flood\").value());\n}\n\nTEST_F(ProtocolConstraintsTest, Priority) {\n  options_.mutable_max_inbound_priority_frames_per_stream()->set_value(2);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  // Create one stream\n  nghttp2_frame_hd frame;\n  frame.type = NGHTTP2_HEADERS;\n  frame.length = 1;\n  frame.flags = NGHTTP2_FLAG_END_HEADERS;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n\n  frame.type = NGHTTP2_PRIORITY;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(isBufferFloodError(constraints.trackInboundFrames(&frame, 0)));\n  EXPECT_TRUE(isBufferFloodError(constraints.status()));\n  EXPECT_EQ(\"Too many PRIORITY frames\", constraints.status().message());\n  EXPECT_EQ(1, stats_store_.counter(\"http2.inbound_priority_frames_flood\").value());\n}\n\nTEST_F(ProtocolConstraintsTest, WindowUpdate) {\n  options_.mutable_max_inbound_window_update_frames_per_data_frame_sent()->set_value(1);\n  ProtocolConstraints constraints(http2CodecStats(), options_);\n  // Create one stream\n  nghttp2_frame_hd frame;\n  frame.type = NGHTTP2_HEADERS;\n  frame.length = 1;\n  frame.flags = NGHTTP2_FLAG_END_HEADERS;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  // Send 2 DATA frames\n  constraints.incrementOutboundDataFrameCount();\n  constraints.incrementOutboundDataFrameCount();\n\n  frame.type = NGHTTP2_WINDOW_UPDATE;\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok());\n  EXPECT_TRUE(isBufferFloodError(constraints.trackInboundFrames(&frame, 0)));\n  EXPECT_TRUE(isBufferFloodError(constraints.status()));\n  EXPECT_EQ(\"Too many WINDOW_UPDATE frames\", constraints.status().message());\n  EXPECT_EQ(1, stats_store_.counter(\"http2.inbound_window_update_frames_flood\").value());\n}\n\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/request_header_corpus/crash-da39a3ee5e6b4b0d3255bfef95601890afd80709",
    "content": ""
  },
  {
    "path": "test/common/http/http2/request_header_fuzz_test.cc",
    "content": "// Fuzzer for H2 response HEADERS frames. Unlike codec_impl_fuzz_test, this is\n// stateless and focuses only on HEADERS. This technique also plays well with\n// uncompressed HEADERS fuzzing.\n\n#include \"common/http/exception.h\"\n\n#include \"test/common/http/http2/codec_impl_test_util.h\"\n#include \"test/common/http/http2/frame_replay.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\nnamespace {\n\nvoid Replay(const Frame& frame, ServerCodecFrameInjector& codec) {\n  // Create the server connection containing the nghttp2 session.\n  TestServerConnectionImplNew connection(\n      codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_,\n      Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n      envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n  Http::Status status = Http::okStatus();\n  status = codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection);\n  status = codec.write(WellKnownFrames::defaultSettingsFrame(), connection);\n  status = codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection);\n  status = codec.write(frame, connection);\n}\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  // Create static objects.\n  static ServerCodecFrameInjector codec;\n  Frame frame;\n  frame.assign(buf, buf + len);\n  // Replay with the fuzzer bytes.\n  Replay(frame, codec);\n  // Try again, but fixup the HEADERS frame to make it a valid HEADERS.\n  FrameUtils::fixupHeaders(frame);\n  Replay(frame, codec);\n}\n\n} // namespace\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/http2/response_header_fuzz_test.cc",
    "content": "// Fuzzer for H2 response HEADERS frames. Unlike codec_impl_fuzz_test, this is\n// stateless and focuses only on HEADERS. This technique also plays well with\n// uncompressed HEADERS fuzzing.\n\n#include \"common/http/exception.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/common/http/http2/codec_impl_test_util.h\"\n#include \"test/common/http/http2/frame_replay.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace Http2 {\nnamespace {\n\nvoid Replay(const Frame& frame, ClientCodecFrameInjector& codec) {\n  // Create the client connection containing the nghttp2 session.\n  TestClientConnectionImplNew connection(\n      codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_,\n      Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n      ProdNghttp2SessionFactory::get());\n  // Create a new stream.\n  Http::Status status = Http::okStatus();\n  codec.request_encoder_ = &connection.newStream(codec.response_decoder_);\n  codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_);\n  // Setup a single stream to inject frames as a reply to.\n  TestRequestHeaderMapImpl request_headers;\n  HttpTestUtility::addDefaultHeaders(request_headers);\n  codec.request_encoder_->encodeHeaders(request_headers, true);\n\n  // Send frames.\n  status = codec.write(WellKnownFrames::defaultSettingsFrame(), connection);\n  status = codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection);\n  status = codec.write(frame, connection);\n}\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  static ClientCodecFrameInjector codec;\n  Frame frame;\n  frame.assign(buf, buf + len);\n  // Replay with the fuzzer bytes.\n  Replay(frame, codec);\n  // Try again, but fixup the HEADERS frame to make it a valid HEADERS.\n  FrameUtils::fixupHeaders(frame);\n  Replay(frame, codec);\n}\n\n} // namespace\n} // namespace Http2\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/path_utility_corpus/Testcase_1",
    "content": "    canonical_path {\n        request_headers {\n            headers {\n                key: \":path\"\n                value: \"/\"\n            }\n            headers {\n                key: \":authority\"\n                value: \"foo.com\"\n            }\n        }\n    }\n"
  },
  {
    "path": "test/common/http/path_utility_corpus/Testcase_2",
    "content": "    merge_slashes {\n        request_headers {\n            headers {\n                key: \":path\"\n                value: \"/\"\n            }\n            headers {\n                key: \":authority\"\n                value: \"foo.com\"\n            }\n        }\n    }\n"
  },
  {
    "path": "test/common/http/path_utility_corpus/Testcase_3",
    "content": "    remove_query_and_fragment {\n        path : \"www.google.com/kitten?user=envoydev\"\n    }\n"
  },
  {
    "path": "test/common/http/path_utility_corpus/Testcase_4",
    "content": "    merge_slashes {\n        request_headers {\n            headers {\n                key: \":path\"\n                value: \"//path/to/file\"\n            }\n            headers {\n                key: \":authority\"\n                value: \"foo.com\"\n            }\n        }\n    }\n"
  },
  {
    "path": "test/common/http/path_utility_corpus/clusterfuzz-testcase-minimized-path_utility_fuzz_test-5770162224234496",
    "content": "merge_slashes {\n  request_headers {\n    headers {\n      key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/path_utility_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.http;\n\nimport \"validate/validate.proto\";\nimport \"test/fuzz/common.proto\";\n\nmessage CanonicalPath {\n  test.fuzz.Headers request_headers = 1;\n}\n\nmessage MergeSlashes {\n  test.fuzz.Headers request_headers = 1;\n}\n\nmessage RemoveQueryAndFragment {\n  string path = 1;\n}\n\nmessage PathUtilityTestCase {\n  oneof path_utility_selector {\n    CanonicalPath canonical_path = 1;\n    MergeSlashes merge_slashes = 2;\n    RemoveQueryAndFragment remove_query_and_fragment = 3;\n  }\n}\n"
  },
  {
    "path": "test/common/http/path_utility_fuzz_test.cc",
    "content": "#include \"common/http/path_utility.h\"\n\n#include \"test/common/http/path_utility_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\nnamespace {\nDEFINE_PROTO_FUZZER(const test::common::http::PathUtilityTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n\n  switch (input.path_utility_selector_case()) {\n  case test::common::http::PathUtilityTestCase::kCanonicalPath: {\n    auto request_headers = fromHeaders<Http::TestRequestHeaderMapImpl>(\n        input.canonical_path().request_headers(), {},\n        {\":path\"}); // needs to have path header in order to be valid\n    Http::PathUtil::canonicalPath(request_headers);\n    ASSERT(!request_headers.getPathValue().empty());\n    break;\n  }\n  case test::common::http::PathUtilityTestCase::kMergeSlashes: {\n    auto request_headers = fromHeaders<Http::TestRequestHeaderMapImpl>(\n        input.merge_slashes().request_headers(), {}, {\":path\"});\n    Http::PathUtil::mergeSlashes(request_headers);\n    break;\n  }\n  case test::common::http::PathUtilityTestCase::kRemoveQueryAndFragment: {\n    auto path = input.remove_query_and_fragment().path();\n    auto sanitized_path = Http::PathUtil::removeQueryAndFragment(path);\n    ASSERT(path.find(sanitized_path) != std::string::npos);\n    break;\n  }\n  default:\n    break;\n  }\n}\n\n} // namespace\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/path_utility_test.cc",
    "content": "#include <utility>\n#include <vector>\n\n#include \"common/http/path_utility.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass PathUtilityTest : public testing::Test {\npublic:\n  // This is an indirect way to build a header entry for\n  // PathUtil::canonicalPath(), since we don't have direct access to the\n  // HeaderMapImpl constructor.\n  const HeaderEntry& pathHeaderEntry(const std::string& path_value) {\n    headers_.setPath(path_value);\n    return *headers_.Path();\n  }\n  const HeaderEntry& hostHeaderEntry(const std::string& host_value) {\n    headers_.setHost(host_value);\n    return *headers_.Host();\n  }\n  TestRequestHeaderMapImpl headers_;\n};\n\n// Already normalized path don't change.\nTEST_F(PathUtilityTest, AlreadyNormalPaths) {\n  const std::vector<std::string> normal_paths{\"/xyz\", \"/x/y/z\"};\n  for (const auto& path : normal_paths) {\n    auto& path_header = pathHeaderEntry(path);\n    const auto result = PathUtil::canonicalPath(headers_);\n    EXPECT_TRUE(result) << \"original path: \" << path;\n    EXPECT_EQ(path_header.value().getStringView(), absl::string_view(path));\n  }\n}\n\n// Invalid paths are rejected.\nTEST_F(PathUtilityTest, InvalidPaths) {\n  const std::vector<std::string> invalid_paths{\"/xyz/.%00../abc\", \"/xyz/%00.%00./abc\",\n                                               \"/xyz/AAAAA%%0000/abc\"};\n  for (const auto& path : invalid_paths) {\n    pathHeaderEntry(path);\n    EXPECT_FALSE(PathUtil::canonicalPath(headers_)) << \"original path: \" << path;\n  }\n}\n\n// Paths that are valid get normalized.\nTEST_F(PathUtilityTest, NormalizeValidPaths) {\n  const std::vector<std::pair<std::string, std::string>> non_normal_pairs{\n      {\"/a/b/../c\", \"/a/c\"},        // parent dir\n      {\"/a/b/./c\", \"/a/b/c\"},       // current dir\n      {\"a/b/../c\", \"/a/c\"},         // non / start\n      {\"/a/b/../../../../c\", \"/c\"}, // out number parent\n      {\"/a/..\\\\c\", \"/c\"},           // \"..\\\\\" canonicalization\n      {\"/%c0%af\", \"/%c0%af\"},       // 2 bytes unicode reserved characters\n      {\"/%5c%25\", \"/%5c%25\"},       // reserved characters\n      {\"/a/b/%2E%2E/c\", \"/a/c\"}     // %2E escape\n  };\n\n  for (const auto& path_pair : non_normal_pairs) {\n    auto& path_header = pathHeaderEntry(path_pair.first);\n    const auto result = PathUtil::canonicalPath(headers_);\n    EXPECT_TRUE(result) << \"original path: \" << path_pair.first;\n    EXPECT_EQ(path_header.value().getStringView(), path_pair.second)\n        << \"original path: \" << path_pair.second;\n  }\n}\n\n// Paths that are valid get normalized.\nTEST_F(PathUtilityTest, NormalizeCasePath) {\n  const std::vector<std::pair<std::string, std::string>> non_normal_pairs{\n      {\"/A/B/C\", \"/A/B/C\"},           // not normalize to lower case\n      {\"/a/b/%2E%2E/c\", \"/a/c\"},      // %2E can be normalized to .\n      {\"/a/b/%2e%2e/c\", \"/a/c\"},      // %2e can be normalized to .\n      {\"/a/%2F%2f/c\", \"/a/%2F%2f/c\"}, // %2F is not normalized to %2f\n  };\n\n  for (const auto& path_pair : non_normal_pairs) {\n    auto& path_header = pathHeaderEntry(path_pair.first);\n    const auto result = PathUtil::canonicalPath(headers_);\n    EXPECT_TRUE(result) << \"original path: \" << path_pair.first;\n    EXPECT_EQ(path_header.value().getStringView(), path_pair.second)\n        << \"original path: \" << path_pair.second;\n  }\n}\n// These test cases are explicitly not covered above:\n// \"/../c\\r\\n\\\"  '\\n' '\\r' should be excluded by http parser\n// \"/a/\\0c\",     '\\0' should be excluded by http parser\n\n// Paths that are valid get normalized.\nTEST_F(PathUtilityTest, MergeSlashes) {\n  auto mergeSlashes = [this](const std::string& path_value) {\n    auto& path_header = pathHeaderEntry(path_value);\n    PathUtil::mergeSlashes(headers_);\n    auto sanitized_path_value = path_header.value().getStringView();\n    return std::string(sanitized_path_value);\n  };\n  EXPECT_EQ(\"\", mergeSlashes(\"\"));                        // empty\n  EXPECT_EQ(\"a/b/c\", mergeSlashes(\"a//b/c\"));             // relative\n  EXPECT_EQ(\"/a/b/c/\", mergeSlashes(\"/a//b/c/\"));         // ends with slash\n  EXPECT_EQ(\"a/b/c/\", mergeSlashes(\"a//b/c/\"));           // relative ends with slash\n  EXPECT_EQ(\"/a\", mergeSlashes(\"/a\"));                    // no-op\n  EXPECT_EQ(\"/a/b/c\", mergeSlashes(\"//a/b/c\"));           // double / start\n  EXPECT_EQ(\"/a/b/c\", mergeSlashes(\"/a//b/c\"));           // double / in the middle\n  EXPECT_EQ(\"/a/b/c/\", mergeSlashes(\"/a/b/c//\"));         // double / end\n  EXPECT_EQ(\"/a/b/c\", mergeSlashes(\"/a///b/c\"));          // triple / in the middle\n  EXPECT_EQ(\"/a/b/c\", mergeSlashes(\"/a////b/c\"));         // quadruple / in the middle\n  EXPECT_EQ(\"/a/b?a=///c\", mergeSlashes(\"/a//b?a=///c\")); // slashes in the query are ignored\n  EXPECT_EQ(\"/a/b?\", mergeSlashes(\"/a//b?\"));             // empty query\n  EXPECT_EQ(\"/a/?b\", mergeSlashes(\"//a/?b\"));             // ends with slash + query\n}\n\nTEST_F(PathUtilityTest, RemoveQueryAndFragment) {\n  EXPECT_EQ(\"\", PathUtil::removeQueryAndFragment(\"\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc?\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc?param=value\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc?param=value1&param=value2\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc??\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc??param=value\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc#\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc#fragment\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc#fragment?param=value\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc##\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc#?\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc#?param=value\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc?#\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc?#fragment\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc?param=value#\"));\n  EXPECT_EQ(\"/abc\", PathUtil::removeQueryAndFragment(\"/abc?param=value#fragment\"));\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/request_id_extension_uuid_impl_test.cc",
    "content": "#include <string>\n\n#include \"common/common/random_generator.h\"\n#include \"common/http/request_id_extension_uuid_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Http {\n\nTEST(UUIDRequestIDExtensionTest, SetRequestID) {\n  testing::StrictMock<Random::MockRandomGenerator> random;\n  UUIDRequestIDExtension uuid_utils(random);\n  TestRequestHeaderMapImpl request_headers;\n\n  EXPECT_CALL(random, uuid()).Times(1).WillOnce(Return(\"first-request-id\"));\n  uuid_utils.set(request_headers, true);\n  EXPECT_EQ(\"first-request-id\", request_headers.get_(Headers::get().RequestId));\n\n  EXPECT_CALL(random, uuid()).Times(1).WillOnce(Return(\"second-request-id\"));\n  uuid_utils.set(request_headers, true);\n  EXPECT_EQ(\"second-request-id\", request_headers.get_(Headers::get().RequestId));\n}\n\nTEST(UUIDRequestIDExtensionTest, EnsureRequestID) {\n  testing::StrictMock<Random::MockRandomGenerator> random;\n  UUIDRequestIDExtension uuid_utils(random);\n  TestRequestHeaderMapImpl request_headers;\n\n  EXPECT_CALL(random, uuid()).Times(1).WillOnce(Return(\"first-request-id\"));\n  uuid_utils.set(request_headers, false);\n  EXPECT_EQ(\"first-request-id\", request_headers.get_(Headers::get().RequestId));\n\n  EXPECT_CALL(random, uuid()).Times(0);\n  uuid_utils.set(request_headers, false);\n  EXPECT_EQ(\"first-request-id\", request_headers.get_(Headers::get().RequestId));\n}\n\nTEST(UUIDRequestIDExtensionTest, PreserveRequestIDInResponse) {\n  testing::StrictMock<Random::MockRandomGenerator> random;\n  UUIDRequestIDExtension uuid_utils(random);\n  TestRequestHeaderMapImpl request_headers;\n  TestResponseHeaderMapImpl response_headers;\n\n  uuid_utils.setInResponse(response_headers, request_headers);\n  EXPECT_EQ(nullptr, response_headers.get(Headers::get().RequestId));\n\n  request_headers.setRequestId(\"some-request-id\");\n  uuid_utils.setInResponse(response_headers, request_headers);\n  EXPECT_EQ(\"some-request-id\", response_headers.get_(Headers::get().RequestId));\n\n  request_headers.removeRequestId();\n  response_headers.setRequestId(\"another-request-id\");\n  uuid_utils.setInResponse(response_headers, request_headers);\n  EXPECT_EQ(\"another-request-id\", response_headers.get_(Headers::get().RequestId));\n\n  request_headers.setRequestId(\"\");\n  uuid_utils.setInResponse(response_headers, request_headers);\n  EXPECT_EQ(\"\", response_headers.get_(Headers::get().RequestId));\n}\n\nTEST(UUIDRequestIDExtensionTest, ModRequestIDBy) {\n  Random::RandomGeneratorImpl random;\n  UUIDRequestIDExtension uuid_utils(random);\n  TestRequestHeaderMapImpl request_headers;\n\n  uint64_t result;\n  EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 10000));\n\n  request_headers.setRequestId(\"fffffff\");\n  EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 10000));\n\n  request_headers.setRequestId(\"fffffffz-0012-0110-00ff-0c00400600ff\");\n  EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 10000));\n\n  request_headers.setRequestId(\"00000000-0000-0000-0000-000000000000\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100));\n  EXPECT_EQ(0, result);\n\n  request_headers.setRequestId(\"00000001-0000-0000-0000-000000000000\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100));\n  EXPECT_EQ(1, result);\n\n  request_headers.setRequestId(\"0000000f-0000-0000-0000-00000000000a\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100));\n  EXPECT_EQ(15, result);\n\n  request_headers.setRequestId(\"\");\n  EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 100));\n\n  request_headers.setRequestId(\"000000ff-0000-0000-0000-000000000000\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100));\n  EXPECT_EQ(55, result);\n\n  request_headers.setRequestId(\"000000ff-0000-0000-0000-000000000000\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 10000));\n  EXPECT_EQ(255, result);\n\n  request_headers.setRequestId(\"a0090100-0012-0110-00ff-0c00400600ff\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 137));\n  EXPECT_EQ(8, result);\n\n  request_headers.setRequestId(\"ffffffff-0012-0110-00ff-0c00400600ff\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100));\n  EXPECT_EQ(95, result);\n\n  request_headers.setRequestId(\"ffffffff-0012-0110-00ff-0c00400600ff\");\n  EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 10000));\n  EXPECT_EQ(7295, result);\n}\n\nTEST(UUIDRequestIDExtensionTest, RequestIDModDistribution) {\n  Random::RandomGeneratorImpl random;\n  UUIDRequestIDExtension uuid_utils(random);\n  TestRequestHeaderMapImpl request_headers;\n\n  const int mod = 100;\n  const int required_percentage = 11;\n  int total_samples = 0;\n  int interesting_samples = 0;\n\n  for (int i = 0; i < 500000; ++i) {\n    std::string uuid = random.uuid();\n\n    const char c = uuid[19];\n    ASSERT_TRUE(uuid[14] == '4');                              // UUID version 4 (random)\n    ASSERT_TRUE(c == '8' || c == '9' || c == 'a' || c == 'b'); // UUID variant 1 (RFC4122)\n\n    uint64_t value;\n    request_headers.setRequestId(uuid);\n    ASSERT_TRUE(uuid_utils.modBy(request_headers, value, mod));\n\n    if (value < required_percentage) {\n      interesting_samples++;\n    }\n    total_samples++;\n  }\n\n  EXPECT_NEAR(required_percentage / 100.0, interesting_samples * 1.0 / total_samples, 0.002);\n}\n\nTEST(UUIDRequestIDExtensionTest, DISABLED_benchmark) {\n  Random::RandomGeneratorImpl random;\n\n  for (int i = 0; i < 100000000; ++i) {\n    random.uuid();\n  }\n}\n\nTEST(UUIDRequestIDExtensionTest, SetTraceStatus) {\n  Random::RandomGeneratorImpl random;\n  UUIDRequestIDExtension uuid_utils(random);\n  TestRequestHeaderMapImpl request_headers;\n  request_headers.setRequestId(random.uuid());\n\n  EXPECT_EQ(TraceStatus::NoTrace, uuid_utils.getTraceStatus(request_headers));\n\n  uuid_utils.setTraceStatus(request_headers, TraceStatus::Sampled);\n  EXPECT_EQ(TraceStatus::Sampled, uuid_utils.getTraceStatus(request_headers));\n\n  uuid_utils.setTraceStatus(request_headers, TraceStatus::Client);\n  EXPECT_EQ(TraceStatus::Client, uuid_utils.getTraceStatus(request_headers));\n\n  uuid_utils.setTraceStatus(request_headers, TraceStatus::Forced);\n  EXPECT_EQ(TraceStatus::Forced, uuid_utils.getTraceStatus(request_headers));\n\n  uuid_utils.setTraceStatus(request_headers, TraceStatus::NoTrace);\n  EXPECT_EQ(TraceStatus::NoTrace, uuid_utils.getTraceStatus(request_headers));\n\n  // Invalid request ID.\n  request_headers.setRequestId(\"\");\n  uuid_utils.setTraceStatus(request_headers, TraceStatus::Forced);\n  EXPECT_EQ(request_headers.getRequestIdValue(), \"\");\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/status_test.cc",
    "content": "#include \"common/http/status.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nTEST(Status, Ok) {\n  auto status = okStatus();\n  EXPECT_TRUE(status.ok());\n  EXPECT_TRUE(status.message().empty());\n  EXPECT_EQ(\"OK\", toString(status));\n  EXPECT_EQ(StatusCode::Ok, getStatusCode(status));\n  EXPECT_FALSE(isCodecProtocolError(status));\n  EXPECT_FALSE(isBufferFloodError(status));\n  EXPECT_FALSE(isPrematureResponseError(status));\n  EXPECT_FALSE(isCodecClientError(status));\n  EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status));\n}\n\nTEST(Status, CodecProtocolError) {\n  auto status = codecProtocolError(\"foobar\");\n  EXPECT_FALSE(status.ok());\n  EXPECT_EQ(\"foobar\", status.message());\n  EXPECT_EQ(\"CodecProtocolError: foobar\", toString(status));\n  EXPECT_EQ(StatusCode::CodecProtocolError, getStatusCode(status));\n  EXPECT_TRUE(isCodecProtocolError(status));\n  EXPECT_FALSE(isBufferFloodError(status));\n  EXPECT_FALSE(isPrematureResponseError(status));\n  EXPECT_FALSE(isCodecClientError(status));\n  EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status));\n}\n\nTEST(Status, BufferFloodError) {\n  auto status = bufferFloodError(\"foobar\");\n  EXPECT_FALSE(status.ok());\n  EXPECT_EQ(\"foobar\", status.message());\n  EXPECT_EQ(\"BufferFloodError: foobar\", toString(status));\n  EXPECT_EQ(StatusCode::BufferFloodError, getStatusCode(status));\n  EXPECT_FALSE(isCodecProtocolError(status));\n  EXPECT_TRUE(isBufferFloodError(status));\n  EXPECT_FALSE(isPrematureResponseError(status));\n  EXPECT_FALSE(isCodecClientError(status));\n  EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status));\n}\n\nTEST(Status, PrematureResponseError) {\n  auto status = prematureResponseError(\"foobar\", Http::Code::ProxyAuthenticationRequired);\n  EXPECT_FALSE(status.ok());\n  EXPECT_EQ(\"foobar\", status.message());\n  EXPECT_EQ(\"PrematureResponseError: HTTP code: 407: foobar\", toString(status));\n  EXPECT_EQ(StatusCode::PrematureResponseError, getStatusCode(status));\n  EXPECT_FALSE(isCodecProtocolError(status));\n  EXPECT_FALSE(isBufferFloodError(status));\n  EXPECT_TRUE(isPrematureResponseError(status));\n  EXPECT_EQ(Http::Code::ProxyAuthenticationRequired, getPrematureResponseHttpCode(status));\n  EXPECT_FALSE(isCodecClientError(status));\n  EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status));\n}\n\nTEST(Status, CodecClientError) {\n  auto status = codecClientError(\"foobar\");\n  EXPECT_FALSE(status.ok());\n  EXPECT_EQ(\"foobar\", status.message());\n  EXPECT_EQ(\"CodecClientError: foobar\", toString(status));\n  EXPECT_EQ(StatusCode::CodecClientError, getStatusCode(status));\n  EXPECT_FALSE(isCodecProtocolError(status));\n  EXPECT_FALSE(isBufferFloodError(status));\n  EXPECT_FALSE(isPrematureResponseError(status));\n  EXPECT_TRUE(isCodecClientError(status));\n  EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status));\n}\n\nTEST(Status, InboundFramesWithEmptyPayload) {\n  auto status = inboundFramesWithEmptyPayloadError();\n  EXPECT_FALSE(status.ok());\n  EXPECT_EQ(\"Too many consecutive frames with an empty payload\", status.message());\n  EXPECT_EQ(\"InboundFramesWithEmptyPayloadError: Too many consecutive frames with an empty payload\",\n            toString(status));\n  EXPECT_EQ(StatusCode::InboundFramesWithEmptyPayload, getStatusCode(status));\n  EXPECT_FALSE(isCodecProtocolError(status));\n  EXPECT_FALSE(isBufferFloodError(status));\n  EXPECT_FALSE(isPrematureResponseError(status));\n  EXPECT_FALSE(isCodecClientError(status));\n  EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(status));\n}\n\nTEST(Status, ReturnIfError) {\n\n  auto outer = [](Status (*inner)()) {\n    RETURN_IF_ERROR(inner());\n    return bufferFloodError(\"boom\");\n  };\n\n  auto result = outer([]() { return okStatus(); });\n  EXPECT_FALSE(result.ok());\n  EXPECT_EQ(\"boom\", result.message());\n  EXPECT_TRUE(isBufferFloodError(result));\n  result = outer([]() { return codecClientError(\"foobar\"); });\n  EXPECT_FALSE(result.ok());\n  EXPECT_TRUE(isCodecClientError(result));\n  EXPECT_EQ(\"foobar\", result.message());\n\n  // Check that passing a `Status` object directly into the RETURN_IF_ERROR works.\n  auto direct_status = [](const Status& status) {\n    RETURN_IF_ERROR(status);\n    return bufferFloodError(\"baz\");\n  };\n  result = direct_status(codecClientError(\"foobar\"));\n  EXPECT_FALSE(result.ok());\n  EXPECT_TRUE(isCodecClientError(result));\n  EXPECT_EQ(\"foobar\", result.message());\n\n  result = direct_status(okStatus());\n  EXPECT_FALSE(result.ok());\n  EXPECT_EQ(\"baz\", result.message());\n  EXPECT_TRUE(isBufferFloodError(result));\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/user_agent_test.cc",
    "content": "#include \"common/http/header_map_impl.h\"\n#include \"common/http/user_agent.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Property;\n\nnamespace Envoy {\nnamespace Http {\nnamespace {\n\nTEST(UserAgentTest, All) {\n  Stats::MockStore stat_store;\n  NiceMock<Stats::MockHistogram> original_histogram;\n  original_histogram.unit_ = Stats::Histogram::Unit::Milliseconds;\n  Event::SimulatedTimeSystem time_system;\n  Stats::HistogramCompletableTimespanImpl span(original_histogram, time_system);\n\n  EXPECT_CALL(stat_store.counter_, inc()).Times(5);\n  EXPECT_CALL(stat_store, counter(\"test.user_agent.ios.downstream_cx_total\"));\n  EXPECT_CALL(stat_store, counter(\"test.user_agent.ios.downstream_rq_total\"));\n  EXPECT_CALL(stat_store, counter(\"test.user_agent.ios.downstream_cx_destroy_remote_active_rq\"));\n  EXPECT_CALL(stat_store, histogram(\"test.user_agent.ios.downstream_cx_length_ms\",\n                                    Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(\n      stat_store,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"test.user_agent.ios.downstream_cx_length_ms\"), _));\n\n  UserAgentContext context(stat_store.symbolTable());\n  Stats::StatNamePool pool(stat_store.symbolTable());\n  Stats::StatName prefix = pool.add(\"test\");\n  {\n    UserAgent ua(context);\n    ua.initializeFromHeaders(TestRequestHeaderMapImpl{{\"user-agent\", \"aaa iOS bbb\"}}, prefix,\n                             stat_store);\n    ua.initializeFromHeaders(TestRequestHeaderMapImpl{{\"user-agent\", \"aaa android bbb\"}}, prefix,\n                             stat_store);\n    ua.completeConnectionLength(span);\n  }\n\n  EXPECT_CALL(stat_store, counter(\"test.user_agent.android.downstream_cx_total\"));\n  EXPECT_CALL(stat_store, counter(\"test.user_agent.android.downstream_rq_total\"));\n  EXPECT_CALL(stat_store,\n              counter(\"test.user_agent.android.downstream_cx_destroy_remote_active_rq\"));\n  EXPECT_CALL(stat_store, histogram(\"test.user_agent.android.downstream_cx_length_ms\",\n                                    Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(\n      stat_store,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"test.user_agent.android.downstream_cx_length_ms\"), _));\n\n  {\n    UserAgent ua(context);\n    ua.initializeFromHeaders(TestRequestHeaderMapImpl{{\"user-agent\", \"aaa android bbb\"}}, prefix,\n                             stat_store);\n    ua.completeConnectionLength(span);\n    ua.onConnectionDestroy(Network::ConnectionEvent::RemoteClose, true);\n  }\n\n  {\n    UserAgent ua(context);\n    ua.initializeFromHeaders(TestRequestHeaderMapImpl{{\"user-agent\", \"aaa bbb\"}}, prefix,\n                             stat_store);\n    ua.initializeFromHeaders(TestRequestHeaderMapImpl{{\"user-agent\", \"aaa android bbb\"}}, prefix,\n                             stat_store);\n    ua.completeConnectionLength(span);\n    ua.onConnectionDestroy(Network::ConnectionEvent::RemoteClose, false);\n  }\n\n  {\n    UserAgent ua(context);\n    ua.initializeFromHeaders(TestRequestHeaderMapImpl{}, prefix, stat_store);\n    ua.completeConnectionLength(span);\n  }\n}\n\n} // namespace\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736",
    "content": "initialize_and_validate {\n  custom_settings_parameters {\n    identifier {\n      value: 11008\n    }\n    value {\n      value: 65536\n    }\n  }\n  custom_settings_parameters {\n    identifier {\n      value: 11008\n    }\n    value {\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5653272551751680",
    "content": "parse_cookie_value {\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"~\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \":\\\\3\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \")\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"?\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"?\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"q;q;q;q;q;q;q;q;q;q;q;p;q;q;q;q;q;q; q;q;q;q;q;q;q;p;q;q;q;q;q;q; \"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"~\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"~\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"?\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"~\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"~\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"q;q;q;q;q;q;q;q;q;q;q;p;q;q;q;q;q;q; q;q;q;q;q;q;q;p;q;q;q;q;q;q; \"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\\033\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"q;q;q;q;q;q;q;q;q;q;q;p;q;q;q;q;q;q; q;q;q;q;q;q;q;p;q;q;q;q;q;q; \"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"X\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"u\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n  cookies: \"\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/clusterfuzz-testcase-utility_fuzz_test-5206456636276736",
    "content": "get_last_address_from_xff {\n  xff: \" \\000\\000\\000\\000\\000\\000\\000\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/clusterfuzz-testcase-utility_fuzz_test-5735325211557888",
    "content": "parse_cookie_value {\n  cookies: \"\\027\\000\\000\\000\\000\\000\\000\\000\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/extract_host_path_from_uri_0",
    "content": "extract_host_path_from_uri: \"scheme://dns.name/x/y/z\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/extract_host_path_from_uri_1",
    "content": "extract_host_path_from_uri: \"dns.name\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/extract_host_path_from_uri_2",
    "content": "extract_host_path_from_uri: \"dns.name/x/y/z\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/extract_host_path_from_uri_3",
    "content": "extract_host_path_from_uri: \"/x/y/z\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/extract_host_path_from_uri_4",
    "content": "extract_host_path_from_uri: \"scheme://adf-scheme://adf\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/extract_host_path_from_uri_5",
    "content": "extract_host_path_from_uri: \"://\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/extract_host_path_from_uri_6",
    "content": "extract_host_path_from_uri: \"/:/adsf\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/find_query_string_0",
    "content": "find_query_string: \"/hello?world\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/find_query_string_1",
    "content": "find_query_string: \"/hello?w\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/find_query_string_2",
    "content": "find_query_string: \"/hello?world=\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/find_query_string_3",
    "content": "find_query_string: \"/hello?world==\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/get_last_address_from_xff_0",
    "content": "get_last_address_from_xff {\n  xff: \"192.0.2.10, 192.0.2.1, 10.0.0.1\"\n  num_to_skip: 0\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/get_last_address_from_xff_1",
    "content": "get_last_address_from_xff {\n  xff: \"192.0.2.10, 192.0.2.1 ,10.0.0.1,10.0.0.2\"\n  num_to_skip: 2\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/get_last_address_from_xff_2",
    "content": "get_last_address_from_xff {\n  xff: \", bad\"\n  num_to_skip: 0\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/get_last_address_from_xff_3",
    "content": "get_last_address_from_xff {\n  xff: \"\"\n  num_to_skip: 0\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/get_last_address_from_xff_4",
    "content": "get_last_address_from_xff {\n  xff: \",\"\n  num_to_skip: 0\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/get_last_address_from_xff_5",
    "content": "get_last_address_from_xff {\n  xff: \", \"\n  num_to_skip: 0\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/has_set_cookie_0",
    "content": "has_set_cookie {\n  cookies: \"somekey=somevalue\"\n  cookies: \"abc=def; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  cookies: \"key2=value2; Secure\"\n  key: \"abc\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/has_set_cookie_1",
    "content": "has_set_cookie {\n  cookies: \"somekey=somevalue\"\n  cookies: \"abc=def; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  cookies: \"key2=value2; Secure\"\n  key: \"somekey\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/has_set_cookie_2",
    "content": "has_set_cookie {\n  cookies: \"somekey=somevalue\"\n  cookies: \"abc=def; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  cookies: \"key2=value2; Secure\"\n  key: \"ghi\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/has_set_cookie_3",
    "content": "has_set_cookie {\n  cookies: \"somekey =somevalue\"\n  cookies: \"abc\"\n  cookies: \"key2=value2; Secure\"\n  key: \"abc\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/has_set_cookie_4",
    "content": "has_set_cookie {\n  cookies: \"somekey =somevalue\"\n  cookies: \"abc\"\n  cookies: \"key2=value2; Secure\"\n  key: \"key2\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/make_set_cookie_value_0",
    "content": "make_set_cookie_value {\n  key: \"token1\"\n  value: \"abc\"\n  path: \"/hello\"\n  max_age: 30\n  httponly: false\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/make_set_cookie_value_1",
    "content": "make_set_cookie_value {\n  key: \"token1\"\n  value: \"world\"\n  path: \"/hello?=\"\n  max_age: 10\n  httponly: false\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/make_set_cookie_value_2",
    "content": "make_set_cookie_value {\n  key: \"token2\"\n  value: \"abcde\"\n  path: \"/hello?a\"\n  max_age: 30\n  httponly: false\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/make_set_cookie_value_3",
    "content": "make_set_cookie_value {\n  key: \"token1\"\n  value: \"abc\"\n  path: \"/hello\"\n  max_age: 10\n  httponly: true\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_authority_string_0",
    "content": "parse_authority_string: \"1.2.3.4\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_authority_string_1",
    "content": "parse_authority_string: \"[a:b:c:d::]:0\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_authority_string_2",
    "content": "parse_authority_string: \"example.com\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_authority_string_3",
    "content": "parse_authority_string: \"localhost:10000\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_authority_string_4",
    "content": "parse_authority_string: \"0.0.0.0:4000\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_0",
    "content": "parse_cookie_value {\n  cookies: \"token1=abc123; = \"\n  cookies: \"token2=abc123;   \"\n  cookies: \"; token3=abc123;\"\n  cookies: \"=; token4=\\\"abc123\\\"\"\n  key: \"token1\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_1",
    "content": "parse_cookie_value {\n  cookies: \"token1=abc123; = \"\n  cookies: \"token2=abc123;   \"\n  cookies: \"; token3=abc123;\"\n  cookies: \"=; token4=\\\"abc123\\\"\"\n  key: \"token2\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_2",
    "content": "parse_cookie_value {\n  cookies: \"token1=abc123; = \"\n  cookies: \"token2=abc123;   \"\n  cookies: \"; token3=abc123;\"\n  cookies: \"=; token4=\\\"abc123\\\"\"\n  key: \"token3\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_3",
    "content": "parse_cookie_value {\n  cookies: \"token1=abc123; = \"\n  cookies: \"token2=abc123;   \"\n  cookies: \"; token3=abc123;\"\n  cookies: \"=; token4=\\\"abc123\\\"\"\n  key: \"token4\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_4",
    "content": "parse_cookie_value {\n  cookies: \"somekey=somevalue; someotherkey=someothervalue\"\n  cookies: \"abc=def; token=abc123; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  cookies: \"key2=value2; key3=value3\"\n  key: \"token\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_5",
    "content": "parse_cookie_value {\n  cookies: \"dquote=\\\"; quoteddquote=\\\"\\\"\\\"\"\n  cookies: \"leadingdquote=\\\"foobar;\"\n  cookies: \"abc=def; token=\\\"abc123\\\"; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  key: \"token\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_6",
    "content": "parse_cookie_value {\n  cookies: \"dquote=\\\"; quoteddquote=\\\"\\\"\\\"\"\n  cookies: \"leadingdquote=\\\"foobar;\"\n  cookies: \"abc=def; token=\\\"abc123\\\"; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  key: \"dquote\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_7",
    "content": "parse_cookie_value {\n  cookies: \"dquote=\\\"; quoteddquote=\\\"\\\"\\\"\"\n  cookies: \"leadingdquote=\\\"foobar;\"\n  cookies: \"abc=def; token=\\\"abc123\\\"; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  key: \"quoteddquote\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_cookie_value_8",
    "content": "parse_cookie_value {\n  cookies: \"dquote=\\\"; quoteddquote=\\\"\\\"\\\"\"\n  cookies: \"leadingdquote=\\\"foobar;\"\n  cookies: \"abc=def; token=\\\"abc123\\\"; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"\n  key: \"leadingdquote\"\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_parameters_0",
    "content": "parse_parameters {\n  data: \"/hello\"\n  start: 1\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_parameters_1",
    "content": "parse_parameters {\n  data: \"/hello?\"\n  start: 1\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_parameters_2",
    "content": "parse_parameters {\n  data: \"/hello?hello\"\n  start: 2\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_parameters_3",
    "content": "parse_parameters {\n  data: \"/hello?hello=\"\n  start: 0\n}\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_0",
    "content": "parse_query_string: \"/hello\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_1",
    "content": "parse_query_string: \"/hello?\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_2",
    "content": "parse_query_string: \"/hello?hello\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_3",
    "content": "parse_query_string: \"/hello?hello=world\";\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_4",
    "content": "parse_query_string: \"/hello?hello=\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_5",
    "content": "parse_query_string: \"/hello?hello=&\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_6",
    "content": "parse_query_string: \"/hello?hello=&hello2=world2\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/parse_query_string_7",
    "content": "parse_query_string: \"/logging?name=admin&level=trace\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/percent_decoding_string_0",
    "content": "percent_decoding_string: \"too%20lar%20\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/percent_decoding_string_1",
    "content": "percent_decoding_string: \"too%20larg%e\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/percent_decoding_string_2",
    "content": "percent_decoding_string: \"too%20large%\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/percent_encoding_string_0",
    "content": "percent_encoding_string: \"too lar \""
  },
  {
    "path": "test/common/http/utility_corpus/percent_encoding_string_1",
    "content": "percent_encoding_string: \"too larg%e\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/percent_encoding_string_2",
    "content": "percent_encoding_string: \"too large%\"\n"
  },
  {
    "path": "test/common/http/utility_corpus/valid",
    "content": "find_query_string: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\7\\177\\177\\17U²@/177\\177N¿77\\177\"\n"
  },
  {
    "path": "test/common/http/utility_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.http;\n\nimport \"validate/validate.proto\";\nimport \"envoy/config/core/v3/protocol.proto\";\n\n// Structured input for utility_fuzz_test.\n\nmessage CookiesKey {\n  repeated string cookies = 1;\n  string key = 2;\n}\n\nmessage GetLastAddressFromXff {\n  string xff = 1;\n  uint32 num_to_skip = 2;\n}\n\nmessage Parameter {\n  string data = 1;\n  uint32 start = 2;\n}\n\nmessage CookieValue {\n  string key = 1;\n  string value = 2;\n  string path = 3;\n  int64 max_age = 4;\n  bool httponly = 5;\n}\n\nmessage UtilityTestCase {\n  reserved 3; // formerly has_set_cookie\n\n  oneof utility_selector {\n    string parse_query_string = 1;\n    CookiesKey parse_cookie_value = 2;\n    GetLastAddressFromXff get_last_address_from_xff = 4;\n    string extract_host_path_from_uri = 5;\n    string percent_encoding_string = 6;\n    string percent_decoding_string = 7;\n    Parameter parse_parameters = 8;\n    string find_query_string = 9\n        [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE, strict: false}];\n    CookieValue make_set_cookie_value = 10;\n    string parse_authority_string = 11;\n    envoy.config.core.v3.Http2ProtocolOptions initialize_and_validate = 12;\n  }\n}\n"
  },
  {
    "path": "test/common/http/utility_fuzz_test.cc",
    "content": "#include \"common/http/utility.h\"\n\n#include \"test/common/http/utility_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\nnamespace {\n\nDEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n  switch (input.utility_selector_case()) {\n  case test::common::http::UtilityTestCase::kParseQueryString: {\n    // TODO(dio): Add the case when using parseAndDecodeQueryString().\n    Http::Utility::parseQueryString(input.parse_query_string());\n    break;\n  }\n  case test::common::http::UtilityTestCase::kParseCookieValue: {\n    const auto& parse_cookie_value = input.parse_cookie_value();\n    // Use the production RequestHeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts.\n    auto headers = Http::RequestHeaderMapImpl::create();\n    for (const std::string& cookie : parse_cookie_value.cookies()) {\n      headers->addCopy(Http::LowerCaseString(\"cookie\"), replaceInvalidCharacters(cookie));\n    }\n    Http::Utility::parseCookieValue(*headers, parse_cookie_value.key());\n    break;\n  }\n  case test::common::http::UtilityTestCase::kGetLastAddressFromXff: {\n    const auto& get_last_address_from_xff = input.get_last_address_from_xff();\n    // Use the production RequestHeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts.\n    auto headers = Http::RequestHeaderMapImpl::create();\n    headers->addCopy(Http::LowerCaseString(\"x-forwarded-for\"),\n                     replaceInvalidCharacters(get_last_address_from_xff.xff()));\n    // Take num_to_skip modulo 32 to avoid wasting time in lala land.\n    Http::Utility::getLastAddressFromXFF(*headers, get_last_address_from_xff.num_to_skip() % 32);\n    break;\n  }\n  case test::common::http::UtilityTestCase::kExtractHostPathFromUri: {\n    absl::string_view host;\n    absl::string_view path;\n    Http::Utility::extractHostPathFromUri(input.extract_host_path_from_uri(), host, path);\n    break;\n  }\n  case test::common::http::UtilityTestCase::kPercentEncodingString: {\n    Http::Utility::PercentEncoding::encode(input.percent_encoding_string());\n    break;\n  }\n  case test::common::http::UtilityTestCase::kPercentDecodingString: {\n    Http::Utility::PercentEncoding::decode(input.percent_decoding_string());\n    break;\n  }\n  case test::common::http::UtilityTestCase::kParseParameters: {\n    const auto& parse_parameters = input.parse_parameters();\n    // TODO(dio): Add a case when doing parse_parameters with decode_params flag true.\n    Http::Utility::parseParameters(parse_parameters.data(), parse_parameters.start(),\n                                   /*decode_params*/ false);\n    break;\n  }\n  case test::common::http::UtilityTestCase::kFindQueryString: {\n    Http::HeaderString path(input.find_query_string());\n    Http::Utility::findQueryStringStart(path);\n    break;\n  }\n  case test::common::http::UtilityTestCase::kMakeSetCookieValue: {\n    const auto& cookie_value = input.make_set_cookie_value();\n    std::chrono::seconds max_age(cookie_value.max_age());\n    Http::Utility::makeSetCookieValue(cookie_value.key(), cookie_value.value(), cookie_value.path(),\n                                      max_age, cookie_value.httponly());\n    break;\n  }\n  case test::common::http::UtilityTestCase::kParseAuthorityString: {\n    const auto& authority_string = input.parse_authority_string();\n    Http::Utility::parseAuthority(authority_string);\n    break;\n  }\n  case test::common::http::UtilityTestCase::kInitializeAndValidate: {\n    const auto& options = input.initialize_and_validate();\n    try {\n      Http2::Utility::initializeAndValidateOptions(options);\n    } catch (EnvoyException& e) {\n      absl::string_view msg = e.what();\n      // initializeAndValidateOptions throws exceptions for 4 different reasons due to malformed\n      // settings, so check for them and allow any other exceptions through\n      if (absl::StartsWith(\n              msg, \"server push is not supported by Envoy and can not be enabled via a SETTINGS \"\n                   \"parameter.\") ||\n          absl::StartsWith(\n              msg, \"the \\\"allow_connect\\\" SETTINGS parameter must only be configured through the \"\n                   \"named field\") ||\n          absl::StartsWith(\n              msg, \"inconsistent HTTP/2 custom SETTINGS parameter(s) detected; identifiers =\") ||\n          absl::EndsWith(\n              msg, \"HTTP/2 SETTINGS parameter(s) can not be configured through both named and \"\n                   \"custom parameters\")) {\n        ENVOY_LOG_MISC(trace, \"Caught exception {} in initializeAndValidateOptions test\", e.what());\n      } else {\n        throw EnvoyException(e.what());\n      }\n    }\n    break;\n  }\n\n  default:\n    // Nothing to do.\n    break;\n  }\n}\n\n} // namespace\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/http/utility_test.cc",
    "content": "#include <array>\n#include <cstdint>\n#include <string>\n\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.validate.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/http/exception.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Http {\n\nTEST(HttpUtility, parseQueryString) {\n  EXPECT_EQ(Utility::QueryParams(), Utility::parseQueryString(\"/hello\"));\n  EXPECT_EQ(Utility::QueryParams(), Utility::parseAndDecodeQueryString(\"/hello\"));\n\n  EXPECT_EQ(Utility::QueryParams(), Utility::parseQueryString(\"/hello?\"));\n  EXPECT_EQ(Utility::QueryParams(), Utility::parseAndDecodeQueryString(\"/hello?\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}}), Utility::parseQueryString(\"/hello?hello\"));\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}}),\n            Utility::parseAndDecodeQueryString(\"/hello?hello\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"world\"}}),\n            Utility::parseQueryString(\"/hello?hello=world\"));\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"world\"}}),\n            Utility::parseAndDecodeQueryString(\"/hello?hello=world\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}}), Utility::parseQueryString(\"/hello?hello=\"));\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}}),\n            Utility::parseAndDecodeQueryString(\"/hello?hello=\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}}), Utility::parseQueryString(\"/hello?hello=&\"));\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}}),\n            Utility::parseAndDecodeQueryString(\"/hello?hello=&\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}, {\"hello2\", \"world2\"}}),\n            Utility::parseQueryString(\"/hello?hello=&hello2=world2\"));\n  EXPECT_EQ(Utility::QueryParams({{\"hello\", \"\"}, {\"hello2\", \"world2\"}}),\n            Utility::parseAndDecodeQueryString(\"/hello?hello=&hello2=world2\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"name\", \"admin\"}, {\"level\", \"trace\"}}),\n            Utility::parseQueryString(\"/logging?name=admin&level=trace\"));\n  EXPECT_EQ(Utility::QueryParams({{\"name\", \"admin\"}, {\"level\", \"trace\"}}),\n            Utility::parseAndDecodeQueryString(\"/logging?name=admin&level=trace\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"param_value_has_encoded_ampersand\", \"a%26b\"}}),\n            Utility::parseQueryString(\"/hello?param_value_has_encoded_ampersand=a%26b\"));\n  EXPECT_EQ(Utility::QueryParams({{\"param_value_has_encoded_ampersand\", \"a&b\"}}),\n            Utility::parseAndDecodeQueryString(\"/hello?param_value_has_encoded_ampersand=a%26b\"));\n\n  EXPECT_EQ(Utility::QueryParams({{\"params_has_encoded_%26\", \"a%26b\"}, {\"ok\", \"1\"}}),\n            Utility::parseQueryString(\"/hello?params_has_encoded_%26=a%26b&ok=1\"));\n  EXPECT_EQ(Utility::QueryParams({{\"params_has_encoded_&\", \"a&b\"}, {\"ok\", \"1\"}}),\n            Utility::parseAndDecodeQueryString(\"/hello?params_has_encoded_%26=a%26b&ok=1\"));\n\n  // A sample of request path with query strings by Prometheus:\n  // https://github.com/envoyproxy/envoy/issues/10926#issuecomment-651085261.\n  EXPECT_EQ(\n      Utility::QueryParams(\n          {{\"filter\",\n            \"%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_\"\n            \"bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29\"}}),\n      Utility::parseQueryString(\n          \"/stats?filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_\"\n          \"bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29\"));\n  EXPECT_EQ(\n      Utility::QueryParams(\n          {{\"filter\", \"(cluster.upstream_(rq_total|rq_time_sum|rq_time_count|rq_time_bucket|rq_xx|\"\n                      \"rq_complete|rq_active|cx_active))|(server.version)\"}}),\n      Utility::parseAndDecodeQueryString(\n          \"/stats?filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_\"\n          \"bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29\"));\n}\n\nTEST(HttpUtility, getResponseStatus) {\n  EXPECT_THROW(Utility::getResponseStatus(TestResponseHeaderMapImpl{}), CodecClientException);\n  EXPECT_EQ(200U, Utility::getResponseStatus(TestResponseHeaderMapImpl{{\":status\", \"200\"}}));\n}\n\nTEST(HttpUtility, isWebSocketUpgradeRequest) {\n  EXPECT_FALSE(Utility::isWebSocketUpgradeRequest(TestRequestHeaderMapImpl{}));\n  EXPECT_FALSE(\n      Utility::isWebSocketUpgradeRequest(TestRequestHeaderMapImpl{{\"connection\", \"upgrade\"}}));\n  EXPECT_FALSE(\n      Utility::isWebSocketUpgradeRequest(TestRequestHeaderMapImpl{{\"upgrade\", \"websocket\"}}));\n  EXPECT_FALSE(Utility::isWebSocketUpgradeRequest(\n      TestRequestHeaderMapImpl{{\"Connection\", \"close\"}, {\"Upgrade\", \"websocket\"}}));\n  EXPECT_FALSE(Utility::isUpgrade(\n      TestRequestHeaderMapImpl{{\"Connection\", \"IsNotAnUpgrade\"}, {\"Upgrade\", \"websocket\"}}));\n\n  EXPECT_TRUE(Utility::isWebSocketUpgradeRequest(\n      TestRequestHeaderMapImpl{{\"Connection\", \"upgrade\"}, {\"Upgrade\", \"websocket\"}}));\n  EXPECT_TRUE(Utility::isWebSocketUpgradeRequest(\n      TestRequestHeaderMapImpl{{\"connection\", \"upgrade\"}, {\"upgrade\", \"websocket\"}}));\n  EXPECT_TRUE(Utility::isWebSocketUpgradeRequest(\n      TestRequestHeaderMapImpl{{\"connection\", \"Upgrade\"}, {\"upgrade\", \"WebSocket\"}}));\n}\n\nTEST(HttpUtility, isUpgrade) {\n  EXPECT_FALSE(Utility::isUpgrade(TestRequestHeaderMapImpl{}));\n  EXPECT_FALSE(Utility::isUpgrade(TestRequestHeaderMapImpl{{\"connection\", \"upgrade\"}}));\n  EXPECT_FALSE(Utility::isUpgrade(TestRequestHeaderMapImpl{{\"upgrade\", \"foo\"}}));\n  EXPECT_FALSE(\n      Utility::isUpgrade(TestRequestHeaderMapImpl{{\"Connection\", \"close\"}, {\"Upgrade\", \"foo\"}}));\n  EXPECT_FALSE(Utility::isUpgrade(\n      TestRequestHeaderMapImpl{{\"Connection\", \"IsNotAnUpgrade\"}, {\"Upgrade\", \"foo\"}}));\n  EXPECT_FALSE(Utility::isUpgrade(\n      TestRequestHeaderMapImpl{{\"Connection\", \"Is Not An Upgrade\"}, {\"Upgrade\", \"foo\"}}));\n\n  EXPECT_TRUE(\n      Utility::isUpgrade(TestRequestHeaderMapImpl{{\"Connection\", \"upgrade\"}, {\"Upgrade\", \"foo\"}}));\n  EXPECT_TRUE(\n      Utility::isUpgrade(TestRequestHeaderMapImpl{{\"connection\", \"upgrade\"}, {\"upgrade\", \"foo\"}}));\n  EXPECT_TRUE(\n      Utility::isUpgrade(TestRequestHeaderMapImpl{{\"connection\", \"Upgrade\"}, {\"upgrade\", \"FoO\"}}));\n  EXPECT_TRUE(Utility::isUpgrade(\n      TestRequestHeaderMapImpl{{\"connection\", \"keep-alive, Upgrade\"}, {\"upgrade\", \"FOO\"}}));\n}\n\n// Start with H1 style websocket request headers. Transform to H2 and back.\nTEST(HttpUtility, H1H2H1Request) {\n  TestRequestHeaderMapImpl converted_headers = {\n      {\":method\", \"GET\"}, {\"Upgrade\", \"foo\"}, {\"Connection\", \"upgrade\"}};\n  const TestRequestHeaderMapImpl original_headers(converted_headers);\n\n  ASSERT_TRUE(Utility::isUpgrade(converted_headers));\n  ASSERT_FALSE(Utility::isH2UpgradeRequest(converted_headers));\n  Utility::transformUpgradeRequestFromH1toH2(converted_headers);\n\n  ASSERT_FALSE(Utility::isUpgrade(converted_headers));\n  ASSERT_TRUE(Utility::isH2UpgradeRequest(converted_headers));\n  Utility::transformUpgradeRequestFromH2toH1(converted_headers);\n\n  ASSERT_TRUE(Utility::isUpgrade(converted_headers));\n  ASSERT_FALSE(Utility::isH2UpgradeRequest(converted_headers));\n  ASSERT_EQ(converted_headers, original_headers);\n}\n\n// Start with H2 style websocket request headers. Transform to H1 and back.\nTEST(HttpUtility, H2H1H2Request) {\n  TestRequestHeaderMapImpl converted_headers = {{\":method\", \"CONNECT\"}, {\":protocol\", \"websocket\"}};\n  const TestRequestHeaderMapImpl original_headers(converted_headers);\n\n  ASSERT_FALSE(Utility::isUpgrade(converted_headers));\n  ASSERT_TRUE(Utility::isH2UpgradeRequest(converted_headers));\n  Utility::transformUpgradeRequestFromH2toH1(converted_headers);\n\n  ASSERT_TRUE(Utility::isUpgrade(converted_headers));\n  ASSERT_FALSE(Utility::isH2UpgradeRequest(converted_headers));\n  Utility::transformUpgradeRequestFromH1toH2(converted_headers);\n\n  ASSERT_FALSE(Utility::isUpgrade(converted_headers));\n  ASSERT_TRUE(Utility::isH2UpgradeRequest(converted_headers));\n  converted_headers.removeContentLength();\n  ASSERT_EQ(converted_headers, original_headers);\n}\n\nTEST(HttpUtility, ConnectBytestreamSpecialCased) {\n  TestRequestHeaderMapImpl headers = {{\":method\", \"CONNECT\"}, {\":protocol\", \"bytestream\"}};\n  ASSERT_FALSE(Utility::isH2UpgradeRequest(headers));\n}\n\n// Start with H1 style websocket response headers. Transform to H2 and back.\nTEST(HttpUtility, H1H2H1Response) {\n  TestResponseHeaderMapImpl converted_headers = {\n      {\":status\", \"101\"}, {\"upgrade\", \"websocket\"}, {\"connection\", \"upgrade\"}};\n  const TestResponseHeaderMapImpl original_headers(converted_headers);\n\n  ASSERT_TRUE(Utility::isUpgrade(converted_headers));\n  Utility::transformUpgradeResponseFromH1toH2(converted_headers);\n\n  ASSERT_FALSE(Utility::isUpgrade(converted_headers));\n  Utility::transformUpgradeResponseFromH2toH1(converted_headers, \"websocket\");\n\n  ASSERT_TRUE(Utility::isUpgrade(converted_headers));\n  ASSERT_EQ(converted_headers, original_headers);\n}\n\n// Users of the transformation functions should not expect the results to be\n// identical. Because the headers are always added in a set order, the original\n// header order may not be preserved.\nTEST(HttpUtility, OrderNotPreserved) {\n  TestRequestHeaderMapImpl expected_headers = {\n      {\":method\", \"GET\"}, {\"Upgrade\", \"foo\"}, {\"Connection\", \"upgrade\"}};\n\n  TestRequestHeaderMapImpl converted_headers = {\n      {\":method\", \"GET\"}, {\"Connection\", \"upgrade\"}, {\"Upgrade\", \"foo\"}};\n\n  Utility::transformUpgradeRequestFromH1toH2(converted_headers);\n  Utility::transformUpgradeRequestFromH2toH1(converted_headers);\n  EXPECT_EQ(converted_headers, expected_headers);\n}\n\n// A more serious problem with using WebSocket help for general Upgrades, is that method for\n// WebSocket is always GET but the method for other upgrades is allowed to be a\n// POST. This is a documented weakness in Envoy docs and can be addressed with\n// a custom x-envoy-original-method header if it is ever needed.\nTEST(HttpUtility, MethodNotPreserved) {\n  TestRequestHeaderMapImpl expected_headers = {\n      {\":method\", \"GET\"}, {\"Upgrade\", \"foo\"}, {\"Connection\", \"upgrade\"}};\n\n  TestRequestHeaderMapImpl converted_headers = {\n      {\":method\", \"POST\"}, {\"Upgrade\", \"foo\"}, {\"Connection\", \"upgrade\"}};\n\n  Utility::transformUpgradeRequestFromH1toH2(converted_headers);\n  Utility::transformUpgradeRequestFromH2toH1(converted_headers);\n  EXPECT_EQ(converted_headers, expected_headers);\n}\n\nTEST(HttpUtility, ContentLengthMangling) {\n  // Content-Length of 0 is removed on the request path.\n  {\n    TestRequestHeaderMapImpl request_headers = {\n        {\":method\", \"GET\"}, {\"Upgrade\", \"foo\"}, {\"Connection\", \"upgrade\"}, {\"content-length\", \"0\"}};\n    Utility::transformUpgradeRequestFromH1toH2(request_headers);\n    EXPECT_TRUE(request_headers.ContentLength() == nullptr);\n  }\n\n  // Non-zero Content-Length is not removed on the request path.\n  {\n    TestRequestHeaderMapImpl request_headers = {\n        {\":method\", \"GET\"}, {\"Upgrade\", \"foo\"}, {\"Connection\", \"upgrade\"}, {\"content-length\", \"1\"}};\n    Utility::transformUpgradeRequestFromH1toH2(request_headers);\n    EXPECT_FALSE(request_headers.ContentLength() == nullptr);\n  }\n\n  // Content-Length of 0 is removed on the response path.\n  {\n    TestResponseHeaderMapImpl response_headers = {{\":status\", \"101\"},\n                                                  {\"upgrade\", \"websocket\"},\n                                                  {\"connection\", \"upgrade\"},\n                                                  {\"content-length\", \"0\"}};\n    Utility::transformUpgradeResponseFromH1toH2(response_headers);\n    EXPECT_TRUE(response_headers.ContentLength() == nullptr);\n  }\n\n  // Non-zero Content-Length is not removed on the response path.\n  {\n    TestResponseHeaderMapImpl response_headers = {{\":status\", \"101\"},\n                                                  {\"upgrade\", \"websocket\"},\n                                                  {\"connection\", \"upgrade\"},\n                                                  {\"content-length\", \"1\"}};\n    Utility::transformUpgradeResponseFromH1toH2(response_headers);\n    EXPECT_FALSE(response_headers.ContentLength() == nullptr);\n  }\n}\n\nTEST(HttpUtility, appendXff) {\n  {\n    TestRequestHeaderMapImpl headers;\n    Network::Address::Ipv4Instance address(\"127.0.0.1\");\n    Utility::appendXff(headers, address);\n    EXPECT_EQ(\"127.0.0.1\", headers.get_(\"x-forwarded-for\"));\n  }\n\n  {\n    TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"10.0.0.1\"}};\n    Network::Address::Ipv4Instance address(\"127.0.0.1\");\n    Utility::appendXff(headers, address);\n    EXPECT_EQ(\"10.0.0.1,127.0.0.1\", headers.get_(\"x-forwarded-for\"));\n  }\n\n  {\n    TestRequestHeaderMapImpl headers{{\"x-forwarded-for\", \"10.0.0.1\"}};\n    Network::Address::PipeInstance address(\"/foo\");\n    Utility::appendXff(headers, address);\n    EXPECT_EQ(\"10.0.0.1\", headers.get_(\"x-forwarded-for\"));\n  }\n}\n\nTEST(HttpUtility, appendVia) {\n  {\n    TestResponseHeaderMapImpl headers;\n    Utility::appendVia(headers, \"foo\");\n    EXPECT_EQ(\"foo\", headers.get_(\"via\"));\n  }\n\n  {\n    TestResponseHeaderMapImpl headers{{\"via\", \"foo\"}};\n    Utility::appendVia(headers, \"bar\");\n    EXPECT_EQ(\"foo, bar\", headers.get_(\"via\"));\n  }\n}\n\nTEST(HttpUtility, createSslRedirectPath) {\n  {\n    TestRequestHeaderMapImpl headers{{\":authority\", \"www.lyft.com\"}, {\":path\", \"/hello\"}};\n    EXPECT_EQ(\"https://www.lyft.com/hello\", Utility::createSslRedirectPath(headers));\n  }\n}\n\nnamespace {\n\nenvoy::config::core::v3::Http2ProtocolOptions\nparseHttp2OptionsFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) {\n  envoy::config::core::v3::Http2ProtocolOptions http2_options;\n  TestUtility::loadFromYamlAndValidate(yaml, http2_options, false, avoid_boosting);\n  return ::Envoy::Http2::Utility::initializeAndValidateOptions(http2_options);\n}\n\n} // namespace\n\nTEST(HttpUtility, parseHttp2Settings) {\n  {\n    using ::Envoy::Http2::Utility::OptionsLimits;\n    auto http2_options = parseHttp2OptionsFromV3Yaml(\"{}\");\n    EXPECT_EQ(OptionsLimits::DEFAULT_HPACK_TABLE_SIZE, http2_options.hpack_table_size().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS,\n              http2_options.max_concurrent_streams().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE,\n              http2_options.initial_stream_window_size().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE,\n              http2_options.initial_connection_window_size().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES,\n              http2_options.max_outbound_frames().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES,\n              http2_options.max_outbound_control_frames().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD,\n              http2_options.max_consecutive_inbound_frames_with_empty_payload().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM,\n              http2_options.max_inbound_priority_frames_per_stream().value());\n    EXPECT_EQ(OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT,\n              http2_options.max_inbound_window_update_frames_per_data_frame_sent().value());\n  }\n\n  {\n    const std::string yaml = R\"EOF(\nhpack_table_size: 1\nmax_concurrent_streams: 2\ninitial_stream_window_size: 65535\ninitial_connection_window_size: 65535\n    )EOF\";\n    auto http2_options = parseHttp2OptionsFromV3Yaml(yaml);\n    EXPECT_EQ(1U, http2_options.hpack_table_size().value());\n    EXPECT_EQ(2U, http2_options.max_concurrent_streams().value());\n    EXPECT_EQ(65535U, http2_options.initial_stream_window_size().value());\n    EXPECT_EQ(65535U, http2_options.initial_connection_window_size().value());\n  }\n}\n\nTEST(HttpUtility, ValidateStreamErrors) {\n  // Both false, the result should be false.\n  envoy::config::core::v3::Http2ProtocolOptions http2_options;\n  EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options)\n                   .override_stream_error_on_invalid_http_message()\n                   .value());\n\n  // If the new value is not present, the legacy value is respected.\n  http2_options.set_stream_error_on_invalid_http_messaging(true);\n  EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options)\n                  .override_stream_error_on_invalid_http_message()\n                  .value());\n\n  // If the new value is present, it is used.\n  http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(true);\n  http2_options.set_stream_error_on_invalid_http_messaging(false);\n  EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options)\n                  .override_stream_error_on_invalid_http_message()\n                  .value());\n\n  // Invert values - the new value should still be used.\n  http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(false);\n  http2_options.set_stream_error_on_invalid_http_messaging(true);\n  EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options)\n                   .override_stream_error_on_invalid_http_message()\n                   .value());\n}\n\nTEST(HttpUtility, ValidateStreamErrorsWithHcm) {\n  envoy::config::core::v3::Http2ProtocolOptions http2_options;\n  http2_options.set_stream_error_on_invalid_http_messaging(true);\n  EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options)\n                  .override_stream_error_on_invalid_http_message()\n                  .value());\n\n  // If the HCM value is present it will take precedence over the old value.\n  Protobuf::BoolValue hcm_value;\n  hcm_value.set_value(false);\n  EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value)\n                   .override_stream_error_on_invalid_http_message()\n                   .value());\n  // The HCM value will be ignored if initializeAndValidateOptions is told it is not present.\n  EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, false, hcm_value)\n                  .override_stream_error_on_invalid_http_message()\n                  .value());\n\n  // The override_stream_error_on_invalid_http_message takes precedence over the\n  // global one.\n  http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(true);\n  EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value)\n                  .override_stream_error_on_invalid_http_message()\n                  .value());\n\n  {\n    // With runtime flipped, override is ignored.\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.hcm_stream_error_on_invalid_message\", \"false\"}});\n    EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value)\n                    .override_stream_error_on_invalid_http_message()\n                    .value());\n  }\n}\n\nTEST(HttpUtility, ValidateStreamErrorConfigurationForHttp1) {\n  envoy::config::core::v3::Http1ProtocolOptions http1_options;\n  Protobuf::BoolValue hcm_value;\n\n  // nothing explicitly configured, default to false (i.e. default stream error behavior for HCM)\n  EXPECT_FALSE(\n      Utility::parseHttp1Settings(http1_options, hcm_value).stream_error_on_invalid_http_message_);\n\n  // http1_options.stream_error overrides HCM.stream_error\n  http1_options.mutable_override_stream_error_on_invalid_http_message()->set_value(true);\n  hcm_value.set_value(false);\n  EXPECT_TRUE(\n      Utility::parseHttp1Settings(http1_options, hcm_value).stream_error_on_invalid_http_message_);\n\n  // http1_options.stream_error overrides HCM.stream_error (flip boolean value)\n  http1_options.mutable_override_stream_error_on_invalid_http_message()->set_value(false);\n  hcm_value.set_value(true);\n  EXPECT_FALSE(\n      Utility::parseHttp1Settings(http1_options, hcm_value).stream_error_on_invalid_http_message_);\n\n  http1_options.clear_override_stream_error_on_invalid_http_message();\n\n  // fallback to HCM.stream_error\n  hcm_value.set_value(true);\n  EXPECT_TRUE(\n      Utility::parseHttp1Settings(http1_options, hcm_value).stream_error_on_invalid_http_message_);\n\n  // fallback to HCM.stream_error (flip boolean value)\n  hcm_value.set_value(false);\n  EXPECT_FALSE(\n      Utility::parseHttp1Settings(http1_options, hcm_value).stream_error_on_invalid_http_message_);\n}\n\nTEST(HttpUtility, getLastAddressFromXFF) {\n  {\n    const std::string first_address = \"192.0.2.10\";\n    const std::string second_address = \"192.0.2.1\";\n    const std::string third_address = \"10.0.0.1\";\n    TestRequestHeaderMapImpl request_headers{\n        {\"x-forwarded-for\", \"192.0.2.10, 192.0.2.1, 10.0.0.1\"}};\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(third_address, ret.address_->ip()->addressAsString());\n    EXPECT_FALSE(ret.single_address_);\n    ret = Utility::getLastAddressFromXFF(request_headers, 1);\n    EXPECT_EQ(second_address, ret.address_->ip()->addressAsString());\n    EXPECT_FALSE(ret.single_address_);\n    ret = Utility::getLastAddressFromXFF(request_headers, 2);\n    EXPECT_EQ(first_address, ret.address_->ip()->addressAsString());\n    EXPECT_FALSE(ret.single_address_);\n    ret = Utility::getLastAddressFromXFF(request_headers, 3);\n    EXPECT_EQ(nullptr, ret.address_);\n    EXPECT_FALSE(ret.single_address_);\n  }\n  {\n    const std::string first_address = \"192.0.2.10\";\n    const std::string second_address = \"192.0.2.1\";\n    const std::string third_address = \"10.0.0.1\";\n    const std::string fourth_address = \"10.0.0.2\";\n    TestRequestHeaderMapImpl request_headers{\n        {\"x-forwarded-for\", \"192.0.2.10, 192.0.2.1 ,10.0.0.1,10.0.0.2\"}};\n\n    // No space on the left.\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(fourth_address, ret.address_->ip()->addressAsString());\n    EXPECT_FALSE(ret.single_address_);\n\n    // No space on either side.\n    ret = Utility::getLastAddressFromXFF(request_headers, 1);\n    EXPECT_EQ(third_address, ret.address_->ip()->addressAsString());\n    EXPECT_FALSE(ret.single_address_);\n\n    // Exercise rtrim() and ltrim().\n    ret = Utility::getLastAddressFromXFF(request_headers, 2);\n    EXPECT_EQ(second_address, ret.address_->ip()->addressAsString());\n    EXPECT_FALSE(ret.single_address_);\n\n    // No space trimming.\n    ret = Utility::getLastAddressFromXFF(request_headers, 3);\n    EXPECT_EQ(first_address, ret.address_->ip()->addressAsString());\n    EXPECT_FALSE(ret.single_address_);\n\n    // No address found.\n    ret = Utility::getLastAddressFromXFF(request_headers, 4);\n    EXPECT_EQ(nullptr, ret.address_);\n    EXPECT_FALSE(ret.single_address_);\n  }\n  {\n    TestRequestHeaderMapImpl request_headers{{\"x-forwarded-for\", \"\"}};\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(nullptr, ret.address_);\n    EXPECT_FALSE(ret.single_address_);\n  }\n  {\n    TestRequestHeaderMapImpl request_headers{{\"x-forwarded-for\", \",\"}};\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(nullptr, ret.address_);\n    EXPECT_FALSE(ret.single_address_);\n  }\n  {\n    TestRequestHeaderMapImpl request_headers{{\"x-forwarded-for\", \", \"}};\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(nullptr, ret.address_);\n    EXPECT_FALSE(ret.single_address_);\n  }\n  {\n    TestRequestHeaderMapImpl request_headers{{\"x-forwarded-for\", \", bad\"}};\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(nullptr, ret.address_);\n    EXPECT_FALSE(ret.single_address_);\n  }\n  {\n    TestRequestHeaderMapImpl request_headers;\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(nullptr, ret.address_);\n    EXPECT_FALSE(ret.single_address_);\n  }\n  {\n    const std::string first_address = \"34.0.0.1\";\n    TestRequestHeaderMapImpl request_headers{{\"x-forwarded-for\", first_address}};\n    auto ret = Utility::getLastAddressFromXFF(request_headers);\n    EXPECT_EQ(first_address, ret.address_->ip()->addressAsString());\n    EXPECT_TRUE(ret.single_address_);\n  }\n}\n\nTEST(HttpUtility, TestParseCookie) {\n  TestRequestHeaderMapImpl headers{\n      {\"someheader\", \"10.0.0.1\"},\n      {\"cookie\", \"somekey=somevalue; someotherkey=someothervalue\"},\n      {\"cookie\", \"abc=def; token=abc123; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"},\n      {\"cookie\", \"key2=value2; key3=value3\"}};\n\n  std::string key{\"token\"};\n  std::string value = Utility::parseCookieValue(headers, key);\n  EXPECT_EQ(value, \"abc123\");\n}\n\nTEST(HttpUtility, TestParseCookieBadValues) {\n  TestRequestHeaderMapImpl headers{{\"cookie\", \"token1=abc123; = \"},\n                                   {\"cookie\", \"token2=abc123;   \"},\n                                   {\"cookie\", \"; token3=abc123;\"},\n                                   {\"cookie\", \"=; token4=\\\"abc123\\\"\"}};\n\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"token1\"), \"abc123\");\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"token2\"), \"abc123\");\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"token3\"), \"abc123\");\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"token4\"), \"abc123\");\n}\n\nTEST(HttpUtility, TestParseCookieWithQuotes) {\n  TestRequestHeaderMapImpl headers{\n      {\"someheader\", \"10.0.0.1\"},\n      {\"cookie\", \"dquote=\\\"; quoteddquote=\\\"\\\"\\\"\"},\n      {\"cookie\", \"leadingdquote=\\\"foobar;\"},\n      {\"cookie\", \"abc=def; token=\\\"abc123\\\"; Expires=Wed, 09 Jun 2021 10:18:14 GMT\"}};\n\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"token\"), \"abc123\");\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"dquote\"), \"\\\"\");\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"quoteddquote\"), \"\\\"\");\n  EXPECT_EQ(Utility::parseCookieValue(headers, \"leadingdquote\"), \"\\\"foobar\");\n}\n\nTEST(HttpUtility, TestMakeSetCookieValue) {\n  EXPECT_EQ(\"name=\\\"value\\\"; Max-Age=10\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"\", std::chrono::seconds(10), false));\n  EXPECT_EQ(\"name=\\\"value\\\"\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"\", std::chrono::seconds::zero(), false));\n  EXPECT_EQ(\"name=\\\"value\\\"; Max-Age=10; HttpOnly\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"\", std::chrono::seconds(10), true));\n  EXPECT_EQ(\"name=\\\"value\\\"; HttpOnly\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"\", std::chrono::seconds::zero(), true));\n\n  EXPECT_EQ(\"name=\\\"value\\\"; Max-Age=10; Path=/\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"/\", std::chrono::seconds(10), false));\n  EXPECT_EQ(\"name=\\\"value\\\"; Path=/\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"/\", std::chrono::seconds::zero(), false));\n  EXPECT_EQ(\"name=\\\"value\\\"; Max-Age=10; Path=/; HttpOnly\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"/\", std::chrono::seconds(10), true));\n  EXPECT_EQ(\"name=\\\"value\\\"; Path=/; HttpOnly\",\n            Utility::makeSetCookieValue(\"name\", \"value\", \"/\", std::chrono::seconds::zero(), true));\n}\n\nTEST(HttpUtility, SendLocalReply) {\n  MockStreamDecoderFilterCallbacks callbacks;\n  bool is_reset = false;\n\n  EXPECT_CALL(callbacks, encodeHeaders_(_, false));\n  EXPECT_CALL(callbacks, encodeData(_, true));\n  EXPECT_CALL(callbacks, streamInfo());\n  Utility::sendLocalReply(\n      is_reset, callbacks,\n      Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, \"large\", absl::nullopt, false});\n}\n\nTEST(HttpUtility, SendLocalGrpcReply) {\n  MockStreamDecoderFilterCallbacks callbacks;\n  bool is_reset = false;\n\n  EXPECT_CALL(callbacks, streamInfo());\n  EXPECT_CALL(callbacks, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.getStatusValue(), \"200\");\n        EXPECT_NE(headers.GrpcStatus(), nullptr);\n        EXPECT_EQ(headers.getGrpcStatusValue(),\n                  std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unknown)));\n        EXPECT_NE(headers.GrpcMessage(), nullptr);\n        EXPECT_EQ(headers.getGrpcMessageValue(), \"large\");\n      }));\n  Utility::sendLocalReply(\n      is_reset, callbacks,\n      Utility::LocalReplyData{true, Http::Code::PayloadTooLarge, \"large\", absl::nullopt, false});\n}\n\nTEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) {\n  MockStreamDecoderFilterCallbacks callbacks;\n  bool is_reset = false;\n\n  const std::string json = R\"EOF(\n{\n    \"error\": {\n        \"code\": 401,\n        \"message\": \"Unauthorized\"\n    }\n}\n  )EOF\";\n\n  EXPECT_CALL(callbacks, streamInfo());\n  EXPECT_CALL(callbacks, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.getStatusValue(), \"200\");\n        EXPECT_NE(headers.GrpcStatus(), nullptr);\n        EXPECT_EQ(headers.getGrpcStatusValue(),\n                  std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unauthenticated)));\n        EXPECT_NE(headers.GrpcMessage(), nullptr);\n        const auto& encoded = Utility::PercentEncoding::encode(json);\n        EXPECT_EQ(headers.getGrpcMessageValue(), encoded);\n      }));\n  Utility::sendLocalReply(\n      is_reset, callbacks,\n      Utility::LocalReplyData{true, Http::Code::Unauthorized, json, absl::nullopt, false});\n}\n\nTEST(HttpUtility, RateLimitedGrpcStatus) {\n  MockStreamDecoderFilterCallbacks callbacks;\n\n  EXPECT_CALL(callbacks, streamInfo()).Times(testing::AnyNumber());\n  EXPECT_CALL(callbacks, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_NE(headers.GrpcStatus(), nullptr);\n        EXPECT_EQ(headers.getGrpcStatusValue(),\n                  std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable)));\n      }));\n  Utility::sendLocalReply(\n      false, callbacks,\n      Utility::LocalReplyData{true, Http::Code::TooManyRequests, \"\", absl::nullopt, false});\n\n  EXPECT_CALL(callbacks, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_NE(headers.GrpcStatus(), nullptr);\n        EXPECT_EQ(headers.getGrpcStatusValue(),\n                  std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted)));\n      }));\n  Utility::sendLocalReply(\n      false, callbacks,\n      Utility::LocalReplyData{true, Http::Code::TooManyRequests, \"\",\n                              absl::make_optional<Grpc::Status::GrpcStatus>(\n                                  Grpc::Status::WellKnownGrpcStatus::ResourceExhausted),\n                              false});\n}\n\nTEST(HttpUtility, SendLocalReplyDestroyedEarly) {\n  MockStreamDecoderFilterCallbacks callbacks;\n  bool is_reset = false;\n\n  EXPECT_CALL(callbacks, streamInfo());\n  EXPECT_CALL(callbacks, encodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() -> void {\n    is_reset = true;\n  }));\n  EXPECT_CALL(callbacks, encodeData(_, true)).Times(0);\n  Utility::sendLocalReply(\n      is_reset, callbacks,\n      Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, \"large\", absl::nullopt, false});\n}\n\nTEST(HttpUtility, SendLocalReplyHeadRequest) {\n  MockStreamDecoderFilterCallbacks callbacks;\n  bool is_reset = false;\n  EXPECT_CALL(callbacks, streamInfo());\n  EXPECT_CALL(callbacks, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.getContentLengthValue(), fmt::format(\"{}\", strlen(\"large\")));\n      }));\n  Utility::sendLocalReply(\n      is_reset, callbacks,\n      Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, \"large\", absl::nullopt, true});\n}\n\nTEST(HttpUtility, TestExtractHostPathFromUri) {\n  absl::string_view host, path;\n\n  // FQDN\n  Utility::extractHostPathFromUri(\"scheme://dns.name/x/y/z\", host, path);\n  EXPECT_EQ(host, \"dns.name\");\n  EXPECT_EQ(path, \"/x/y/z\");\n\n  // Just the host part\n  Utility::extractHostPathFromUri(\"dns.name\", host, path);\n  EXPECT_EQ(host, \"dns.name\");\n  EXPECT_EQ(path, \"/\");\n\n  // Just host and path\n  Utility::extractHostPathFromUri(\"dns.name/x/y/z\", host, path);\n  EXPECT_EQ(host, \"dns.name\");\n  EXPECT_EQ(path, \"/x/y/z\");\n\n  // Just the path\n  Utility::extractHostPathFromUri(\"/x/y/z\", host, path);\n  EXPECT_EQ(host, \"\");\n  EXPECT_EQ(path, \"/x/y/z\");\n\n  // Some invalid URI\n  Utility::extractHostPathFromUri(\"scheme://adf-scheme://adf\", host, path);\n  EXPECT_EQ(host, \"adf-scheme:\");\n  EXPECT_EQ(path, \"//adf\");\n\n  Utility::extractHostPathFromUri(\"://\", host, path);\n  EXPECT_EQ(host, \"\");\n  EXPECT_EQ(path, \"/\");\n\n  Utility::extractHostPathFromUri(\"/:/adsf\", host, path);\n  EXPECT_EQ(host, \"\");\n  EXPECT_EQ(path, \"/:/adsf\");\n}\n\nTEST(HttpUtility, LocalPathFromFilePath) {\n  EXPECT_EQ(\"/\", Utility::localPathFromFilePath(\"\"));\n  EXPECT_EQ(\"c:/\", Utility::localPathFromFilePath(\"c:/\"));\n  EXPECT_EQ(\"Z:/foo/bar\", Utility::localPathFromFilePath(\"Z:/foo/bar\"));\n  EXPECT_EQ(\"/foo/bar\", Utility::localPathFromFilePath(\"foo/bar\"));\n}\n\nTEST(HttpUtility, TestPrepareHeaders) {\n  envoy::config::core::v3::HttpUri http_uri;\n  http_uri.set_uri(\"scheme://dns.name/x/y/z\");\n\n  Http::RequestMessagePtr message = Utility::prepareHeaders(http_uri);\n\n  EXPECT_EQ(\"/x/y/z\", message->headers().getPathValue());\n  EXPECT_EQ(\"dns.name\", message->headers().getHostValue());\n}\n\nTEST(HttpUtility, QueryParamsToString) {\n  EXPECT_EQ(\"\", Utility::queryParamsToString(Utility::QueryParams({})));\n  EXPECT_EQ(\"?a=1\", Utility::queryParamsToString(Utility::QueryParams({{\"a\", \"1\"}})));\n  EXPECT_EQ(\"?a=1&b=2\",\n            Utility::queryParamsToString(Utility::QueryParams({{\"a\", \"1\"}, {\"b\", \"2\"}})));\n}\n\nTEST(HttpUtility, ResetReasonToString) {\n  EXPECT_EQ(\"connection failure\",\n            Utility::resetReasonToString(Http::StreamResetReason::ConnectionFailure));\n  EXPECT_EQ(\"connection termination\",\n            Utility::resetReasonToString(Http::StreamResetReason::ConnectionTermination));\n  EXPECT_EQ(\"local reset\", Utility::resetReasonToString(Http::StreamResetReason::LocalReset));\n  EXPECT_EQ(\"local refused stream reset\",\n            Utility::resetReasonToString(Http::StreamResetReason::LocalRefusedStreamReset));\n  EXPECT_EQ(\"overflow\", Utility::resetReasonToString(Http::StreamResetReason::Overflow));\n  EXPECT_EQ(\"remote reset\", Utility::resetReasonToString(Http::StreamResetReason::RemoteReset));\n  EXPECT_EQ(\"remote refused stream reset\",\n            Utility::resetReasonToString(Http::StreamResetReason::RemoteRefusedStreamReset));\n}\n\n// Verify that it resolveMostSpecificPerFilterConfigGeneric works with nil routes.\nTEST(HttpUtility, ResolveMostSpecificPerFilterConfigNilRoute) {\n  EXPECT_EQ(nullptr, Utility::resolveMostSpecificPerFilterConfigGeneric(\"envoy.filter\", nullptr));\n}\n\nclass TestConfig : public Router::RouteSpecificFilterConfig {\npublic:\n  int state_;\n  void merge(const TestConfig& other) { state_ += other.state_; }\n};\n\n// Verify that resolveMostSpecificPerFilterConfig works and we get back the original type.\nTEST(HttpUtility, ResolveMostSpecificPerFilterConfig) {\n  TestConfig testConfig;\n\n  const std::string filter_name = \"envoy.filter\";\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks;\n\n  // make the file callbacks return our test config\n  ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&testConfig));\n\n  // test the we get the same object back (as this goes through the dynamic_cast)\n  auto resolved_filter_config = Utility::resolveMostSpecificPerFilterConfig<TestConfig>(\n      filter_name, filter_callbacks.route());\n  EXPECT_EQ(&testConfig, resolved_filter_config);\n}\n\n// Verify that resolveMostSpecificPerFilterConfigGeneric indeed returns the most specific per\n// filter config.\nTEST(HttpUtility, ResolveMostSpecificPerFilterConfigGeneric) {\n  const std::string filter_name = \"envoy.filter\";\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks;\n\n  const Router::RouteSpecificFilterConfig one;\n  const Router::RouteSpecificFilterConfig two;\n  const Router::RouteSpecificFilterConfig three;\n\n  // Test when there's nothing on the route\n  EXPECT_EQ(nullptr, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name,\n                                                                        filter_callbacks.route()));\n\n  // Testing in reverse order, so that the method always returns the last object.\n  ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&one));\n  EXPECT_EQ(&one, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name,\n                                                                     filter_callbacks.route()));\n\n  ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)).WillByDefault(Return(&two));\n  EXPECT_EQ(&two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name,\n                                                                     filter_callbacks.route()));\n\n  ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&three));\n  EXPECT_EQ(&three, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name,\n                                                                       filter_callbacks.route()));\n\n  // Cover the case of no route entry\n  ON_CALL(*filter_callbacks.route_, routeEntry()).WillByDefault(Return(nullptr));\n  EXPECT_EQ(&two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name,\n                                                                     filter_callbacks.route()));\n}\n\n// Verify that traversePerFilterConfigGeneric traverses in the order of specificity.\nTEST(HttpUtility, TraversePerFilterConfigIteratesInOrder) {\n  const std::string filter_name = \"envoy.filter\";\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks;\n\n  // Create configs to test; to ease of testing instead of using real objects\n  // we will use pointers that are actually indexes.\n  const std::vector<Router::RouteSpecificFilterConfig> nullconfigs(5);\n  size_t num_configs = 1;\n  ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&nullconfigs[num_configs]));\n  num_configs++;\n  ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&nullconfigs[num_configs]));\n  num_configs++;\n  ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&nullconfigs[num_configs]));\n\n  // a vector to save which configs are visited by the traversePerFilterConfigGeneric\n  std::vector<size_t> visited_configs(num_configs, 0);\n\n  // Iterate; save the retrieved config index in the iteration index in visited_configs.\n  size_t index = 0;\n  Utility::traversePerFilterConfigGeneric(filter_name, filter_callbacks.route(),\n                                          [&](const Router::RouteSpecificFilterConfig& cfg) {\n                                            int cfg_index = &cfg - nullconfigs.data();\n                                            visited_configs[index] = cfg_index - 1;\n                                            index++;\n                                          });\n\n  // Make sure all methods were called, and in order.\n  for (size_t i = 0; i < visited_configs.size(); i++) {\n    EXPECT_EQ(i, visited_configs[i]);\n  }\n}\n\n// Verify that traversePerFilterConfig works and we get back the original type.\nTEST(HttpUtility, TraversePerFilterConfigTyped) {\n  TestConfig testConfig;\n\n  const std::string filter_name = \"envoy.filter\";\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks;\n\n  // make the file callbacks return our test config\n  ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&testConfig));\n\n  // iterate the configs\n  size_t index = 0;\n  Utility::traversePerFilterConfig<TestConfig>(filter_name, filter_callbacks.route(),\n                                               [&](const TestConfig&) { index++; });\n\n  // make sure that the callback was called (which means that the dynamic_cast worked.)\n  EXPECT_EQ(1, index);\n}\n\n// Verify that merging works as expected and we get back the merged result.\nTEST(HttpUtility, GetMergedPerFilterConfig) {\n  TestConfig baseTestConfig, routeTestConfig;\n\n  baseTestConfig.state_ = 1;\n  routeTestConfig.state_ = 1;\n\n  const std::string filter_name = \"envoy.filter\";\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks;\n\n  // make the file callbacks return our test config\n  ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&baseTestConfig));\n  ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name))\n      .WillByDefault(Return(&routeTestConfig));\n\n  // merge the configs\n  auto merged_cfg = Utility::getMergedPerFilterConfig<TestConfig>(\n      filter_name, filter_callbacks.route(),\n      [&](TestConfig& base_cfg, const TestConfig& route_cfg) { base_cfg.merge(route_cfg); });\n\n  // make sure that the callback was called (which means that the dynamic_cast worked.)\n  ASSERT_TRUE(merged_cfg.has_value());\n  EXPECT_EQ(2, merged_cfg.value().state_);\n}\n\nTEST(HttpUtility, CheckIsIpAddress) {\n  std::array<std::tuple<bool, std::string, std::string, absl::optional<uint32_t>>, 15> patterns{\n      std::make_tuple(true, \"1.2.3.4\", \"1.2.3.4\", absl::nullopt),\n      std::make_tuple(true, \"1.2.3.4:0\", \"1.2.3.4\", 0),\n      std::make_tuple(true, \"0.0.0.0:4000\", \"0.0.0.0\", 4000),\n      std::make_tuple(true, \"127.0.0.1:0\", \"127.0.0.1\", 0),\n      std::make_tuple(true, \"[::]:0\", \"::\", 0),\n      std::make_tuple(true, \"[::]\", \"::\", absl::nullopt),\n      std::make_tuple(true, \"[1::2:3]:0\", \"1::2:3\", 0),\n      std::make_tuple(true, \"[a::1]:0\", \"a::1\", 0),\n      std::make_tuple(true, \"[a:b:c:d::]:0\", \"a:b:c:d::\", 0),\n      std::make_tuple(false, \"example.com\", \"example.com\", absl::nullopt),\n      std::make_tuple(false, \"example.com:8000\", \"example.com\", 8000),\n      std::make_tuple(false, \"example.com:abc\", \"example.com:abc\", absl::nullopt),\n      std::make_tuple(false, \"localhost:10000\", \"localhost\", 10000),\n      std::make_tuple(false, \"localhost\", \"localhost\", absl::nullopt),\n      std::make_tuple(false, \"\", \"\", absl::nullopt)};\n\n  for (const auto& pattern : patterns) {\n    bool status_pattern = std::get<0>(pattern);\n    const auto& try_host = std::get<1>(pattern);\n    const auto& expect_host = std::get<2>(pattern);\n    const auto& expect_port = std::get<3>(pattern);\n\n    const auto host_attributes = Utility::parseAuthority(try_host);\n\n    EXPECT_EQ(status_pattern, host_attributes.is_ip_address_);\n    EXPECT_EQ(expect_host, host_attributes.host_);\n    EXPECT_EQ(expect_port, host_attributes.port_);\n  }\n}\n\n// Validates TE header is stripped if it contains an unsupported value\n// Also validate the behavior if a nominated header does not exist\nTEST(HttpUtility, TestTeHeaderGzipTrailersSanitized) {\n  TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, mike, sam, will, close\"},\n      {\"te\", \"gzip, trailers\"},\n      {\"sam\", \"bar\"},\n      {\"will\", \"baz\"},\n  };\n\n  // Expect that the set of headers is valid and can be sanitized\n  EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers));\n\n  Http::TestRequestHeaderMapImpl sanitized_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te,close\"},\n      {\"te\", \"trailers\"},\n  };\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\n// Validates that if the connection header is nominated, the\n// true connection header is not removed\nTEST(HttpUtility, TestNominatedConnectionHeader) {\n  TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, mike, sam, will, connection, close\"},\n      {\"te\", \"gzip\"},\n      {\"sam\", \"bar\"},\n      {\"will\", \"baz\"},\n  };\n  EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers));\n\n  TestRequestHeaderMapImpl sanitized_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"close\"},\n  };\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\n// Validate that if the connection header is nominated, we\n// sanitize correctly preserving other nominated headers with\n// supported values\nTEST(HttpUtility, TestNominatedConnectionHeader2) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, mike, sam, will, connection, close\"},\n      {\"te\", \"trailers\"},\n      {\"sam\", \"bar\"},\n      {\"will\", \"baz\"},\n  };\n  EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers));\n\n  Http::TestRequestHeaderMapImpl sanitized_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te,close\"},\n      {\"te\", \"trailers\"},\n  };\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\n// Validate that connection is rejected if pseudo headers are nominated\n// This includes an extra comma to ensure that the resulting\n// header is still correct\nTEST(HttpUtility, TestNominatedPseudoHeader) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, :path,, :method, :authority, connection, close\"},\n      {\"te\", \"trailers\"},\n  };\n\n  // Headers remain unchanged since there are nominated pseudo headers\n  Http::TestRequestHeaderMapImpl sanitized_headers(request_headers);\n\n  EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers));\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\n// Validate that we can sanitize the headers when splitting\n// the Connection header results in empty tokens\nTEST(HttpUtility, TestSanitizeEmptyTokensFromHeaders) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, foo,, bar, close\"},\n      {\"te\", \"trailers\"},\n      {\"foo\", \"monday\"},\n      {\"bar\", \"friday\"},\n  };\n  EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers));\n\n  Http::TestRequestHeaderMapImpl sanitized_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te,close\"},\n      {\"te\", \"trailers\"},\n  };\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\n// Validate that we fail the request if there are too many\n// nominated headers\nTEST(HttpUtility, TestTooManyNominatedHeaders) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, connection, close, seahawks, niners, chargers, rams, raiders, \"\n                     \"cardinals, eagles, giants, ravens\"},\n      {\"te\", \"trailers\"},\n  };\n\n  // Headers remain unchanged because there are too many nominated headers\n  Http::TestRequestHeaderMapImpl sanitized_headers(request_headers);\n\n  EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers));\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\nTEST(HttpUtility, TestRejectNominatedXForwardedFor) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, x-forwarded-for\"},\n      {\"te\", \"trailers\"},\n  };\n\n  // Headers remain unchanged due to nominated X-Forwarded* header\n  Http::TestRequestHeaderMapImpl sanitized_headers(request_headers);\n\n  EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers));\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\nTEST(HttpUtility, TestRejectNominatedXForwardedHost) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, x-forwarded-host\"},\n      {\"te\", \"trailers\"},\n  };\n\n  // Headers remain unchanged due to nominated X-Forwarded* header\n  Http::TestRequestHeaderMapImpl sanitized_headers(request_headers);\n\n  EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers));\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\nTEST(HttpUtility, TestRejectNominatedXForwardedProto) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, x-forwarded-proto\"},\n      {\"te\", \"TrAiLeRs\"},\n  };\n\n  // Headers are not sanitized due to nominated X-Forwarded* header\n  EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers));\n\n  Http::TestRequestHeaderMapImpl sanitized_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, x-forwarded-proto\"},\n      {\"te\", \"trailers\"},\n  };\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\nTEST(HttpUtility, TestRejectTrailersSubString) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, close\"},\n      {\"te\", \"SemisWithTripleTrailersAreAthing\"},\n  };\n  EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers));\n\n  Http::TestRequestHeaderMapImpl sanitized_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"close\"},\n  };\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\nTEST(HttpUtility, TestRejectTeHeaderTooLong) {\n  Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"no-headers.com\"},\n      {\"x-request-foo\", \"downstram\"},\n      {\"connection\", \"te, close\"},\n      {\"te\", \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"\n             \"1234567890abcdef\"},\n  };\n\n  // Headers remain unchanged because the TE value is too long\n  Http::TestRequestHeaderMapImpl sanitized_headers(request_headers);\n\n  EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers));\n  EXPECT_EQ(sanitized_headers, request_headers);\n}\n\nTEST(Url, ParsingFails) {\n  Utility::Url url;\n  EXPECT_FALSE(url.initialize(\"\", false));\n  EXPECT_FALSE(url.initialize(\"foo\", false));\n  EXPECT_FALSE(url.initialize(\"http://\", false));\n  EXPECT_FALSE(url.initialize(\"random_scheme://host.com/path\", false));\n  EXPECT_FALSE(url.initialize(\"http://www.foo.com\", true));\n  EXPECT_FALSE(url.initialize(\"foo.com\", true));\n}\n\nvoid validateUrl(absl::string_view raw_url, absl::string_view expected_scheme,\n                 absl::string_view expected_host_port, absl::string_view expected_path) {\n  Utility::Url url;\n  ASSERT_TRUE(url.initialize(raw_url, false)) << \"Failed to initialize \" << raw_url;\n  EXPECT_EQ(url.scheme(), expected_scheme);\n  EXPECT_EQ(url.hostAndPort(), expected_host_port);\n  EXPECT_EQ(url.pathAndQueryParams(), expected_path);\n}\n\nvoid validateConnectUrl(absl::string_view raw_url, absl::string_view expected_host_port) {\n  Utility::Url url;\n  ASSERT_TRUE(url.initialize(raw_url, true)) << \"Failed to initialize \" << raw_url;\n  EXPECT_TRUE(url.scheme().empty());\n  EXPECT_TRUE(url.pathAndQueryParams().empty());\n  EXPECT_EQ(url.hostAndPort(), expected_host_port);\n}\n\nTEST(Url, ParsingTest) {\n  // Test url with no explicit path (with and without port)\n  validateUrl(\"http://www.host.com\", \"http\", \"www.host.com\", \"/\");\n  validateUrl(\"http://www.host.com:80\", \"http\", \"www.host.com:80\", \"/\");\n\n  // Test url with \"/\" path.\n  validateUrl(\"http://www.host.com:80/\", \"http\", \"www.host.com:80\", \"/\");\n  validateUrl(\"http://www.host.com/\", \"http\", \"www.host.com\", \"/\");\n\n  // Test url with \"?\".\n  validateUrl(\"http://www.host.com:80/?\", \"http\", \"www.host.com:80\", \"/?\");\n  validateUrl(\"http://www.host.com/?\", \"http\", \"www.host.com\", \"/?\");\n\n  // Test url with \"?\" but without slash.\n  validateUrl(\"http://www.host.com:80?\", \"http\", \"www.host.com:80\", \"?\");\n  validateUrl(\"http://www.host.com?\", \"http\", \"www.host.com\", \"?\");\n\n  // Test url with multi-character path\n  validateUrl(\"http://www.host.com:80/path\", \"http\", \"www.host.com:80\", \"/path\");\n  validateUrl(\"http://www.host.com/path\", \"http\", \"www.host.com\", \"/path\");\n\n  // Test url with multi-character path and ? at the end\n  validateUrl(\"http://www.host.com:80/path?\", \"http\", \"www.host.com:80\", \"/path?\");\n  validateUrl(\"http://www.host.com/path?\", \"http\", \"www.host.com\", \"/path?\");\n\n  // Test https scheme\n  validateUrl(\"https://www.host.com\", \"https\", \"www.host.com\", \"/\");\n\n  // Test url with query parameter\n  validateUrl(\"http://www.host.com:80/?query=param\", \"http\", \"www.host.com:80\", \"/?query=param\");\n  validateUrl(\"http://www.host.com/?query=param\", \"http\", \"www.host.com\", \"/?query=param\");\n\n  // Test url with query parameter but without slash\n  validateUrl(\"http://www.host.com:80?query=param\", \"http\", \"www.host.com:80\", \"?query=param\");\n  validateUrl(\"http://www.host.com?query=param\", \"http\", \"www.host.com\", \"?query=param\");\n\n  // Test url with multi-character path and query parameter\n  validateUrl(\"http://www.host.com:80/path?query=param\", \"http\", \"www.host.com:80\",\n              \"/path?query=param\");\n  validateUrl(\"http://www.host.com/path?query=param\", \"http\", \"www.host.com\", \"/path?query=param\");\n\n  // Test url with multi-character path and more than one query parameter\n  validateUrl(\"http://www.host.com:80/path?query=param&query2=param2\", \"http\", \"www.host.com:80\",\n              \"/path?query=param&query2=param2\");\n  validateUrl(\"http://www.host.com/path?query=param&query2=param2\", \"http\", \"www.host.com\",\n              \"/path?query=param&query2=param2\");\n  // Test url with multi-character path, more than one query parameter and fragment\n  validateUrl(\"http://www.host.com:80/path?query=param&query2=param2#fragment\", \"http\",\n              \"www.host.com:80\", \"/path?query=param&query2=param2#fragment\");\n  validateUrl(\"http://www.host.com/path?query=param&query2=param2#fragment\", \"http\", \"www.host.com\",\n              \"/path?query=param&query2=param2#fragment\");\n}\n\nTEST(Url, ParsingForConnectTest) {\n  validateConnectUrl(\"host.com:443\", \"host.com:443\");\n  validateConnectUrl(\"host.com:80\", \"host.com:80\");\n}\n\nvoid validatePercentEncodingEncodeDecode(absl::string_view source,\n                                         absl::string_view expected_encoded) {\n  EXPECT_EQ(Utility::PercentEncoding::encode(source), expected_encoded);\n  EXPECT_EQ(Utility::PercentEncoding::decode(expected_encoded), source);\n}\n\nTEST(PercentEncoding, EncodeDecode) {\n  const std::string json = R\"EOF(\n{\n    \"error\": {\n        \"code\": 401,\n        \"message\": \"Unauthorized\"\n    }\n}\n  )EOF\";\n  validatePercentEncodingEncodeDecode(json, \"%0A{%0A    \\\"error\\\": {%0A        \\\"code\\\": 401,%0A   \"\n                                            \"     \\\"message\\\": \\\"Unauthorized\\\"%0A    }%0A}%0A  \");\n  validatePercentEncodingEncodeDecode(\"too large\", \"too large\");\n  validatePercentEncodingEncodeDecode(\"_-ok-_\", \"_-ok-_\");\n}\n\nTEST(PercentEncoding, Decoding) {\n  EXPECT_EQ(Utility::PercentEncoding::decode(\"a%26b\"), \"a&b\");\n  EXPECT_EQ(Utility::PercentEncoding::decode(\"hello%20world\"), \"hello world\");\n  EXPECT_EQ(Utility::PercentEncoding::decode(\"upstream%7Cdownstream\"), \"upstream|downstream\");\n  EXPECT_EQ(\n      Utility::PercentEncoding::decode(\n          \"filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_bucket%\"\n          \"7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29\"),\n      \"filter=(cluster.upstream_(rq_total|rq_time_sum|rq_time_count|rq_time_bucket|rq_xx|rq_\"\n      \"complete|rq_active|cx_active))|(server.version)\");\n}\n\nTEST(PercentEncoding, DecodingWithTrailingInput) {\n  EXPECT_EQ(Utility::PercentEncoding::decode(\"too%20lar%20\"), \"too lar \");\n  EXPECT_EQ(Utility::PercentEncoding::decode(\"too%20larg%e\"), \"too larg%e\");\n  EXPECT_EQ(Utility::PercentEncoding::decode(\"too%20large%\"), \"too large%\");\n}\n\nTEST(PercentEncoding, Encoding) {\n  EXPECT_EQ(Utility::PercentEncoding::encode(\"too%large\"), \"too%25large\");\n  EXPECT_EQ(Utility::PercentEncoding::encode(\"too%!large/\"), \"too%25!large/\");\n  EXPECT_EQ(Utility::PercentEncoding::encode(\"too%!large/\", \"%!/\"), \"too%25%21large%2F\");\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/init/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"watcher_impl_test\",\n    srcs = [\"watcher_impl_test.cc\"],\n    deps = [\n        \"//test/mocks/init:init_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"target_impl_test\",\n    srcs = [\"target_impl_test.cc\"],\n    deps = [\n        \"//test/mocks/init:init_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"manager_impl_test\",\n    srcs = [\"manager_impl_test.cc\"],\n    deps = [\n        \"//source/common/init:manager_lib\",\n        \"//test/mocks/init:init_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/common/init/manager_impl_test.cc",
    "content": "#include \"common/init/manager_impl.h\"\n\n#include \"test/mocks/init/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nusing ::testing::InSequence;\n\nnamespace Envoy {\nnamespace Init {\nnamespace {\n\nvoid expectUninitialized(const Manager& m) { EXPECT_EQ(Manager::State::Uninitialized, m.state()); }\nvoid expectInitializing(const Manager& m) { EXPECT_EQ(Manager::State::Initializing, m.state()); }\nvoid expectInitialized(const Manager& m) { EXPECT_EQ(Manager::State::Initialized, m.state()); }\n\nTEST(InitManagerImplTest, AddImmediateTargetsWhenUninitialized) {\n  InSequence s;\n\n  ManagerImpl m(\"test\");\n  expectUninitialized(m);\n\n  ExpectableTargetImpl t1(\"t1\");\n  m.add(t1);\n\n  ExpectableTargetImpl t2(\"t2\");\n  m.add(t2);\n\n  ExpectableWatcherImpl w;\n\n  // initialization should complete immediately\n  t1.expectInitializeWillCallReady();\n  t2.expectInitializeWillCallReady();\n  w.expectReady();\n  m.initialize(w);\n  expectInitialized(m);\n}\n\nTEST(InitManagerImplTest, AddAsyncTargetsWhenUninitialized) {\n  InSequence s;\n\n  ManagerImpl m(\"test\");\n  expectUninitialized(m);\n\n  ExpectableTargetImpl t1(\"t1\");\n  m.add(t1);\n\n  ExpectableTargetImpl t2(\"t2\");\n  m.add(t2);\n\n  ExpectableWatcherImpl w;\n\n  // initialization should begin\n  t1.expectInitialize();\n  t2.expectInitialize();\n  m.initialize(w);\n  expectInitializing(m);\n\n  // should still be initializing after first target initializes\n  t1.ready();\n  expectInitializing(m);\n\n  // initialization should finish after second target initializes\n  w.expectReady();\n  t2.ready();\n  expectInitialized(m);\n}\n\nTEST(InitManagerImplTest, AddMixedTargetsWhenUninitialized) {\n  InSequence s;\n\n  ManagerImpl m(\"test\");\n  expectUninitialized(m);\n\n  ExpectableTargetImpl t1(\"t1\");\n  m.add(t1);\n\n  ExpectableTargetImpl t2(\"t2\");\n  m.add(t2);\n\n  ExpectableWatcherImpl w;\n\n  // initialization should begin, and first target will initialize immediately\n  t1.expectInitializeWillCallReady();\n  t2.expectInitialize();\n  m.initialize(w);\n  expectInitializing(m);\n\n  // initialization should finish after second target initializes\n  w.expectReady();\n  t2.ready();\n  expectInitialized(m);\n}\n\nTEST(InitManagerImplTest, AddImmediateTargetWhenInitializing) {\n  InSequence s;\n\n  ManagerImpl m(\"test\");\n  expectUninitialized(m);\n\n  ExpectableTargetImpl t1(\"t1\");\n  m.add(t1);\n\n  ExpectableWatcherImpl w;\n\n  // initialization should begin\n  t1.expectInitialize();\n  m.initialize(w);\n  expectInitializing(m);\n\n  // adding an immediate target shouldn't finish initialization\n  ExpectableTargetImpl t2(\"t2\");\n  t2.expectInitializeWillCallReady();\n  m.add(t2);\n  expectInitializing(m);\n\n  // initialization should finish after original target initializes\n  w.expectReady();\n  t1.ready();\n  expectInitialized(m);\n}\n\nTEST(InitManagerImplTest, UnavailableTarget) {\n  InSequence s;\n\n  ManagerImpl m(\"test\");\n  expectUninitialized(m);\n\n  // add a target and destroy it\n  {\n    ExpectableTargetImpl t(\"t\");\n    m.add(t);\n    t.expectInitialize().Times(0);\n  }\n\n  ExpectableWatcherImpl w;\n\n  // initialization should complete despite the destroyed target\n  w.expectReady();\n  m.initialize(w);\n  expectInitialized(m);\n}\n\nTEST(InitManagerImplTest, UnavailableManager) {\n  InSequence s;\n\n  ExpectableTargetImpl t(\"t\");\n  ExpectableWatcherImpl w;\n\n  {\n    ManagerImpl m(\"test\");\n    expectUninitialized(m);\n\n    m.add(t);\n\n    // initialization should begin before destroying the manager\n    t.expectInitialize();\n    m.initialize(w);\n    expectInitializing(m);\n  }\n\n  // the watcher should not be notified when the target is initialized\n  w.expectReady().Times(0);\n  t.ready();\n}\n\nTEST(InitManagerImplTest, UnavailableWatcher) {\n  InSequence s;\n\n  ManagerImpl m(\"test\");\n  expectUninitialized(m);\n\n  ExpectableTargetImpl t(\"t\");\n  m.add(t);\n\n  {\n    ExpectableWatcherImpl w;\n\n    // initialization should begin before destroying the watcher\n    t.expectInitialize();\n    m.initialize(w);\n    expectInitializing(m);\n\n    w.expectReady().Times(0);\n  }\n\n  // initialization should finish without notifying the watcher\n  t.ready();\n}\n\n} // namespace\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/init/target_impl_test.cc",
    "content": "#include \"test/mocks/init/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nusing ::testing::InSequence;\n\nnamespace Envoy {\nnamespace Init {\nnamespace {\n\n// Testing common cases for all the target implementation.\ntemplate <typename T> class TargetImplTest : public ::testing::Test {};\nTYPED_TEST_SUITE_P(TargetImplTest);\n\ntemplate <typename T> std::string getName() { return \"\"; }\ntemplate <> std::string getName<ExpectableTargetImpl>() { return \"target test\"; }\ntemplate <> std::string getName<ExpectableSharedTargetImpl>() { return \"shared target test\"; }\nTYPED_TEST_P(TargetImplTest, Name) {\n  TypeParam target;\n  EXPECT_EQ(getName<TypeParam>(), target.name());\n}\n\nTYPED_TEST_P(TargetImplTest, InitializeWhenAvailable) {\n  InSequence s;\n\n  TypeParam target;\n  ExpectableWatcherImpl watcher;\n\n  // initializing the target through its handle should invoke initialize()...\n  target.expectInitialize();\n  EXPECT_TRUE(target.createHandle(\"test\")->initialize(watcher));\n\n  // calling ready() on the target should invoke the saved watcher handle...\n  watcher.expectReady();\n  EXPECT_TRUE(target.ready());\n\n  // calling ready() a second time should have no effect.\n  watcher.expectReady().Times(0);\n  EXPECT_FALSE(target.ready());\n}\n\n// Initializing TargetHandle return false if uninitialized SharedTarget is destroyed.\nTYPED_TEST_P(TargetImplTest, InitializeWhenUnavailable) {\n  InSequence s;\n  ExpectableWatcherImpl watcher;\n  TargetHandlePtr handle;\n  {\n    TypeParam target;\n\n    // initializing the target after it's been destroyed should do nothing.\n    handle = target.createHandle(\"test\");\n    target.expectInitialize().Times(0);\n    // target destroyed here\n  }\n  EXPECT_FALSE(handle->initialize(watcher));\n}\n\nTYPED_TEST_P(TargetImplTest, ReadyWhenWatcherUnavailable) {\n  TypeParam target;\n  {\n    ExpectableWatcherImpl watcher;\n\n    // initializing the target through its handle should invoke initialize()...\n    target.expectInitialize();\n    EXPECT_TRUE(target.createHandle(\"test\")->initialize(watcher));\n\n    // calling ready() on the target after the watcher has been destroyed should do nothing.\n    watcher.expectReady().Times(0);\n    // watcher destroyed here\n  }\n  EXPECT_FALSE(target.ready());\n}\n\nREGISTER_TYPED_TEST_SUITE_P(TargetImplTest, Name, InitializeWhenAvailable,\n                            InitializeWhenUnavailable, ReadyWhenWatcherUnavailable);\nusing TargetImplTypes = ::testing::Types<ExpectableTargetImpl, ExpectableSharedTargetImpl>;\nINSTANTIATE_TYPED_TEST_SUITE_P(Init, TargetImplTest, TargetImplTypes);\n\nTYPED_TEST_SUITE(TargetImplTest, TargetImplTypes);\n\n// Below are the specialized tests for different implementations of Target\n\n// Initializing TargetHandle return false if initialized SharedTarget is destroyed.\nTEST(SharedTargetImplTest, ReInitializeWhenUnavailable) {\n  InSequence s;\n  ExpectableWatcherImpl w;\n  TargetHandlePtr handle;\n  {\n    ExpectableSharedTargetImpl target;\n\n    target.expectInitialize();\n    TargetHandlePtr handle1 = target.createHandle(\"m1\");\n    ExpectableWatcherImpl w1;\n    EXPECT_TRUE(handle1->initialize(w1));\n\n    // initializing the target after it's been destroyed should do nothing.\n    handle = target.createHandle(\"m2\");\n    target.expectInitialize().Times(0);\n    // target destroyed\n  }\n  EXPECT_FALSE(handle->initialize(w));\n}\n\n// SharedTarget notifies multiple watchers.\nTEST(SharedTargetImplTest, NotifyAllWatcherWhenInitialization) {\n  InSequence s;\n  ExpectableWatcherImpl w1;\n  ExpectableSharedTargetImpl target;\n\n  target.expectInitialize();\n  TargetHandlePtr handle1 = target.createHandle(\"m1\");\n  EXPECT_TRUE(handle1->initialize(w1));\n\n  ExpectableWatcherImpl w2;\n  target.expectInitialize().Times(0);\n  TargetHandlePtr handle2 = target.createHandle(\"m2\");\n  // calling ready() on the target should invoke all the saved watchers.\n  w1.expectReady();\n  EXPECT_TRUE(target.ready());\n  w2.expectReady();\n  EXPECT_TRUE(handle2->initialize(w2));\n}\n\n// Initialized SharedTarget notifies further watcher immediately at second initialization attempt.\nTEST(SharedTargetImplTest, InitializedSharedTargetNotifyWatcherWhenAddedAgain) {\n  InSequence s;\n  ExpectableWatcherImpl w1;\n  ExpectableSharedTargetImpl target;\n\n  target.expectInitialize();\n  TargetHandlePtr handle1 = target.createHandle(\"m1\");\n  EXPECT_TRUE(handle1->initialize(w1));\n\n  // calling ready() on the target should invoke the saved watcher handle(s).\n  w1.expectReady();\n  EXPECT_TRUE(target.ready());\n\n  ExpectableWatcherImpl w2;\n  target.expectInitialize().Times(0);\n  TargetHandlePtr handle2 = target.createHandle(\"m2\");\n  // w2 is notified with no further target.ready().\n  w2.expectReady();\n  EXPECT_TRUE(handle2->initialize(w2));\n}\n\nTEST(SharedTargetImplTest, EarlySharedTargetReadyNotifyWatchers) {\n  InSequence s;\n\n  ExpectableSharedTargetImpl target;\n\n  // No watcher yet. Nothing will be notified at this moment.\n  EXPECT_FALSE(target.ready());\n\n  // It's arguable if the shared target should be initialized after ready()\n  // is already invoked.\n  target.expectInitialize().Times(0);\n\n  ExpectableWatcherImpl w1;\n  TargetHandlePtr handle1 = target.createHandle(\"m1\");\n  // w1 is notified with no further target.ready().\n  w1.expectReady();\n  EXPECT_TRUE(handle1->initialize(w1));\n\n  ExpectableWatcherImpl w2;\n  target.expectInitialize().Times(0);\n  TargetHandlePtr handle2 = target.createHandle(\"m2\");\n  // w2 is notified with no further target.ready().\n  w2.expectReady();\n  EXPECT_TRUE(handle2->initialize(w2));\n}\n} // namespace\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/init/watcher_impl_test.cc",
    "content": "#include \"test/mocks/init/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Init {\nnamespace {\n\nTEST(InitWatcherImplTest, Name) {\n  ExpectableWatcherImpl watcher;\n  EXPECT_EQ(\"test\", watcher.name());\n}\n\nTEST(InitWatcherImplTest, ReadyWhenAvailable) {\n  ExpectableWatcherImpl watcher;\n\n  // notifying the watcher through its handle should invoke ready().\n  watcher.expectReady();\n  EXPECT_TRUE(watcher.createHandle(\"test\")->ready());\n}\n\nTEST(InitWatcherImplTest, ReadyWhenUnavailable) {\n  WatcherHandlePtr handle;\n  {\n    ExpectableWatcherImpl watcher;\n\n    // notifying the watcher after it's been destroyed should do nothing.\n    handle = watcher.createHandle(\"test\");\n    watcher.expectReady().Times(0);\n  }\n  EXPECT_FALSE(handle->ready());\n}\n\n} // namespace\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/json/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_fuzz_test(\n    name = \"json_fuzz_test\",\n    srcs = [\"json_fuzz_test.cc\"],\n    corpus = \"json_corpus\",\n    deps = [\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"json_loader_test\",\n    srcs = [\"json_loader_test.cc\"],\n    deps = [\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n    \"envoy_py_test_binary\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_py_test_binary(\n    name = \"generate_test_data\",\n    srcs = [\n        \"generate_test_data.py\",\n        \"util.py\",\n    ] + glob([\"test_*.py\"]),\n)\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/README.md",
    "content": "# Schema Testing\n\nSchema testing works by using Python to generate JSON files as input for a parameterized test in the C++ framework. This was done since it is far simpler to manipulate JSON objects in Python than it is in C++.\n\nOn each test run, Bazel will execute `generate_test_data.py`. This will write a JSON file per test. Each file contains the name of the schema to test against, the blob of data to validate, and whether or not the validation should throw an error.\n\nEach schema gets its own Python file in `test_data/`. The file must be named `test_*.py` for it to be executed. It must contain the function `def test(writer)`.\n\nIf the schema you want to test does not have a file, please create one. See other files for the boilerplate of writing a suite of tests.\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/generate_test_data.py",
    "content": "#!/usr/bin/env python\n\nimport glob\nimport os\nimport shutil\nimport util\n\n\ndef main():\n  test_dir = os.path.join(os.environ['TEST_TMPDIR'], 'config_schemas_test')\n  # Clean after previous run. This might happen e.g. with \"threadsafe\" Death Tests,\n  # where child process re-executes the unit test binary in the same workspace.\n  if os.path.isdir(test_dir):\n    shutil.rmtree(test_dir)\n  os.mkdir(test_dir)\n  writer = util.TestWriter(test_dir)\n\n  # test discovery and execution\n  test_files = glob.glob(os.path.join(os.path.dirname(__file__), \"test_*.py\"))\n  for test_file in test_files:\n    module_name = os.path.splitext(os.path.basename(test_file))[0]\n    __import__(module_name).test(writer)\n\n\nif __name__ == '__main__':\n  main()\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_access_log_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nACCESS_LOG_BLOB = {\n    \"access_log\": [{\n        \"filter\": {\n            \"type\":\n                \"logical_and\",\n            \"filters\": [{\n                \"type\": \"not_healthcheck\"\n            }, {\n                \"type\": \"runtime\",\n                \"key\": \"access_log.front_access_log\"\n            }]\n        },\n        \"path\": \"/var/log/envoy/access.log\"\n    }, {\n        \"filter\": {\n            \"type\":\n                \"logical_or\",\n            \"filters\": [{\n                \"runtime_key\": \"access_log.access_error.status\",\n                \"type\": \"status_code\",\n                \"value\": 500,\n                \"op\": \">=\"\n            }, {\n                \"type\": \"status_code\",\n                \"value\": 429,\n                \"op\": \"=\"\n            }, {\n                \"runtime_key\": \"access_log.access_error.duration\",\n                \"type\": \"duration\",\n                \"value\": 1000,\n                \"op\": \">=\"\n            }, {\n                \"type\": \"traceable_request\"\n            }]\n        },\n        \"path\": \"/var/log/envoy/access_error.log\"\n    }]\n}\n\n\ndef test(writer):\n  for idx, item in enumerate(ACCESS_LOG_BLOB[\"access_log\"]):\n    writer.write_test_file(\n        'Valid_idx_' + str(idx),\n        schema='ACCESS_LOG_SCHEMA',\n        data=get_blob(item),\n        throws=False,\n    )\n\n  blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1]\n  blob['filter']['filters'][0]['op'] = '<'\n  writer.write_test_file(\n      'FilterOperatorIsNotSupportedLessThan',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n\n  blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1]\n  blob['filter']['filters'][0]['op'] = '<='\n  writer.write_test_file(\n      'FilterOperatorIsNotSupportedLessThanEqual',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n\n  blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1]\n  blob['filter']['filters'][0]['op'] = '>'\n  writer.write_test_file(\n      'FilterOperatorIsNotSupportedGreaterThan',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n\n  blob = {\"path\": \"/dev/null\", \"filter\": {\"type\": \"unknown\"}}\n  writer.write_test_file(\n      'FilterTypeIsNotSupported',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n\n  blob = {\"path\": \"/dev/null\", \"filter\": {\"type\": \"logical_or\", \"filters\": []}}\n  writer.write_test_file(\n      'LessThanTwoFiltersInListNoneLogicalOrThrows',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n\n  blob = {\"path\": \"/dev/null\", \"filter\": {\"type\": \"logical_and\", \"filters\": []}}\n  writer.write_test_file(\n      'LessThanTwoFiltersInListNoneLogicalAndThrows',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n\n  blob = {\n      \"path\": \"/dev/null\",\n      \"filter\": {\n          \"type\": \"logical_or\",\n          \"filters\": [{\n              \"type\": \"not_healthcheck\"\n          }]\n      }\n  }\n  writer.write_test_file(\n      'LessThanTwoFiltersInListOneLogicalOrThrows',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n\n  blob = {\n      \"path\": \"/dev/null\",\n      \"filter\": {\n          \"type\": \"logical_and\",\n          \"filters\": [{\n              \"type\": \"not_healthcheck\"\n          }]\n      }\n  }\n  writer.write_test_file(\n      'LessThanTwoFiltersInListOneLogicalAndThrows',\n      schema='ACCESS_LOG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_cluster_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nCLUSTER_BLOB = {\n    \"name\": \"foo\",\n    \"connect_timeout_ms\": 250,\n    \"type\": \"sds\",\n    \"lb_type\": \"least_request\",\n    \"features\": \"http2\",\n    \"service_name\": \"foo\",\n    \"health_check\": {\n        \"type\": \"http\",\n        \"timeout_ms\": 2000,\n        \"interval_ms\": 10000,\n        \"interval_jitter_ms\": 10000,\n        \"unhealthy_threshold\": 2,\n        \"healthy_threshold\": 2,\n        \"path\": \"/healthcheck\",\n        \"service_name\": \"foo\"\n    },\n    \"outlier_detection\": {}\n}\n\n\ndef test(writer):\n\n  writer.write_test_file(\n      'Valid',\n      schema='CLUSTER_SCHEMA',\n      data=get_blob(CLUSTER_BLOB),\n      throws=False,\n  )\n\n  blob = get_blob(CLUSTER_BLOB)\n  blob['features'] = \"nonexistentfeature\"\n  writer.write_test_file(\n      'UnsupportedFeature',\n      schema='CLUSTER_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_http_conn_network_filter_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nHTTP_CONN_NETWORK_FILTER_BLOB = {\n    \"idle_timeout_s\": 300,\n    \"stat_prefix\": \"router\",\n    \"use_remote_address\": true,\n    \"server_name\": \"envoy-123\",\n    \"access_log\": [],\n    \"tracing\": {\n        \"request_headers_for_tags\": [\"x-source\"],\n        \"operation_name\": \"ingress\"\n    },\n    \"filters\": [{\n        \"config\": {\n            \"endpoint\": \"/healthcheck\",\n            \"pass_through_mode\": false\n        },\n        \"name\": \"health_check\"\n    }, {\n        \"config\": {},\n        \"name\": \"router\"\n    }],\n    \"route_config\": {},\n    \"add_user_agent\": true,\n    \"codec_type\": \"auto\"\n}\n\n\ndef test(writer):\n\n  writer.write_test_file(\n      'Valid',\n      schema='HTTP_CONN_NETWORK_FILTER_SCHEMA',\n      data=get_blob(HTTP_CONN_NETWORK_FILTER_BLOB),\n      throws=False,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_http_router_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nROUTER_HTTP_FILTER_BLOB = {\"dynamic_stats\": true}\n\n\ndef test(writer):\n  writer.write_test_file(\n      'Valid',\n      schema='ROUTER_HTTP_FILTER_SCHEMA',\n      data=get_blob(ROUTER_HTTP_FILTER_BLOB),\n      throws=False,\n  )\n\n  writer.write_test_file(\n      'ValidDefaults',\n      schema='ROUTER_HTTP_FILTER_SCHEMA',\n      data={},\n      throws=False,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_listener_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nLISTENER_BLOB = {\n    \"address\": \"tcp://0.0.0.0:9300\",\n    \"ssl_context\": {\n        \"alpn_protocols\": \"h2,http/1.1\",\n        \"cert_chain_file\": \"/etc/cert.pem\",\n        \"private_key_file\": \"/etc/key.pem\"\n    },\n    \"use_proxy_proto\": true,\n    \"filters\": []\n}\n\n\ndef test(writer):\n\n  writer.write_test_file(\n      'Valid',\n      schema='LISTENER_SCHEMA',\n      data=get_blob(LISTENER_BLOB),\n      throws=False,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_route_configuration_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nROUTE_CONFIGURATION_BLOB = {\n    \"virtual_hosts\": [{\n        \"domains\": [\"production.example.com\"],\n        \"require_ssl\": \"all\",\n        \"routes\": [{\n            \"host_redirect\": \"example.com\",\n            \"prefix\": \"/\"\n        },],\n        \"name\": \"production_redirect\"\n    }],\n    \"internal_only_headers\": [\"x-role\", \"x-source\"],\n    \"response_headers_to_remove\": [\"x-powered-by\"]\n}\n\n\ndef test(writer):\n\n  writer.write_test_file(\n      'Valid',\n      schema='ROUTE_CONFIGURATION_SCHEMA',\n      data=get_blob(ROUTE_CONFIGURATION_BLOB),\n      throws=False,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_route_entry_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nROUTE_ENTRY_CONFIGURATION_BLOB = {\n    \"prefix\": \"/route\",\n    \"cluster\": \"local_service_grpc\",\n    \"priority\": \"high\"\n}\n\n\ndef test(writer):\n\n  writer.write_test_file(\n      'Valid',\n      schema='ROUTE_ENTRY_CONFIGURATION_SCHEMA',\n      data=get_blob(ROUTE_ENTRY_CONFIGURATION_BLOB),\n      throws=False,\n  )\n\n  blob = {\"prefix\": \"/foo\", \"cluster\": \"local_service_grpc\", \"priority\": \"foo\"}\n  writer.write_test_file(\n      'InvalidPriority',\n      schema='ROUTE_ENTRY_CONFIGURATION_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/test_top_level_config_schema.py",
    "content": "from util import get_blob\nfrom util import true, false\n\nTOP_LEVEL_CONFIG_BLOB = {\n    \"listeners\": [{\n        \"address\": \"tcp://127.0.0.1:1234\",\n        \"filters\": []\n    }],\n    \"cluster_manager\": {\n        \"clusters\": []\n    },\n    \"admin\": {\n        \"access_log_path\": \"/var/log/envoy/admin_access.log\",\n        \"address\": \"tcp://0.0.0.0:9901\"\n    },\n    \"watchdog_miss_timeout_ms\": 100,\n    \"watchdog_megamiss_timeout_ms\": 200,\n    \"watchdog_kill_timeout_ms\": 300,\n    \"watchdog_multikill_timeout_ms\": 400,\n    \"tracing\": {\n        \"http\": {\n            \"driver\": {\n                \"type\": \"lightstep\",\n                \"config\": {\n                    \"access_token_file\": \"/etc/envoy/envoy.cfg\",\n                    \"collector_cluster\": \"foo\"\n                }\n            }\n        }\n    }\n}\n\n\ndef test(writer):\n  writer.write_test_file(\n      'Valid',\n      schema='TOP_LEVEL_CONFIG_SCHEMA',\n      data=get_blob(TOP_LEVEL_CONFIG_BLOB),\n      throws=False,\n  )\n\n  blob = get_blob(TOP_LEVEL_CONFIG_BLOB)\n  blob['tracing']['http']['driver']['type'] = 'unknown'\n  writer.write_test_file(\n      'UnsupportedTracingDriver',\n      schema='TOP_LEVEL_CONFIG_SCHEMA',\n      data=blob,\n      throws=True,\n  )\n"
  },
  {
    "path": "test/common/json/config_schemas_test_data/util.py",
    "content": "import copy\nimport json\nimport os\n\n# convenience when dealing with json\ntrue, false = True, False\n\n\ndef get_blob(blob):\n  return copy.deepcopy(blob)\n\n\nclass TestWriter(object):\n\n  def __init__(self, test_dir):\n    self.test_dir = test_dir\n\n  def write_test_file(self, name, schema, data, throws):\n    test_filename = os.path.join(self.test_dir, 'schematest-%s-%s.json' % (schema, name))\n    if os.path.isfile(test_filename):\n      raise ValueError('Test with that name and schema already exists: {}'.format(test_filename))\n\n    with open(test_filename, 'w+') as fh:\n      json.dump({\"schema\": schema, \"throws\": throws, \"data\": data}, fh, indent=True)\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_descriptors.json",
    "content": "    {\n      \"descriptors\": [\n         [{\"key\": \"hello\", \"value\": \"world\"}, {\"key\": \"foo\", \"value\": \"bar\"}],\n         [{\"key\": \"foo2\", \"value\": \"bar2\"}]\n       ]\n    }\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_double.json",
    "content": "{\"hello\": \n[2.0]}\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_double_null.json",
    "content": "{\"hello\": [null]}\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_empty.json",
    "content": "[]\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_empty_braces.json",
    "content": "{}\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_empty_inner.json",
    "content": "{\"foo\": []}\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_failure.json",
    "content": "{\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_hello_bool.json",
    "content": "{\"hello\":true}\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_hello_int.json",
    "content": "{\"hello\":123}\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_nested_int_list.json",
    "content": "{\"1\":{\"11\":\"111\"},\"2\":{\"22\":\"222\"}}\n"
  },
  {
    "path": "test/common/json/json_corpus/basic_unterminated.json",
    "content": "{\"hello\": \n        \n\"world\"\n"
  },
  {
    "path": "test/common/json/json_corpus/clusterfuzz-testcase-minimized-json_fuzz_test-5724109283786752",
    "content": "{s:\"08955690052\"}\n"
  },
  {
    "path": "test/common/json/json_corpus/deep_recursion.json",
    "content": "[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n"
  },
  {
    "path": "test/common/json/json_corpus/double_some_values.json",
    "content": "{\"value1\": 10.5, \"value2\": -12.3}\n"
  },
  {
    "path": "test/common/json/json_corpus/int_max_min.json",
    "content": "{\"max\":9223372036854775807, \"min\":-9223372036854775808}\n"
  },
  {
    "path": "test/common/json/json_corpus/int_too_high.json",
    "content": "{\"val\":9223372036854775808}\n"
  },
  {
    "path": "test/common/json/json_corpus/int_too_low.json",
    "content": "{\"val\":-9223372036854775809}\n"
  },
  {
    "path": "test/common/json/json_corpus/missing_enclosing_document.json",
    "content": "\"listeners\" : [\n  {\n    \"address\": \"tcp://127.0.0.1:1234\",\n    \"filters\": []\n  }\n]\n"
  },
  {
    "path": "test/common/json/json_corpus/some_complex_example.json",
    "content": "{\n    \"env\": \"production\",\n    \"hosts\": [\n        {\n            \"ip_address\": \"10.0.14.27\",\n            \"last_check_in\": \"2015-12-10 22:05:30.286993+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f6647241\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.14.29\",\n            \"last_check_in\": \"2015-12-10 22:05:17.258545+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f0647247\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.205\",\n            \"last_check_in\": \"2015-12-10 22:05:19.921349+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-95fc6525\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.208\",\n            \"last_check_in\": \"2015-12-10 22:05:16.312951+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d6ec7266\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.209\",\n            \"last_check_in\": \"2015-12-10 22:05:22.075746+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d5ec7265\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.43\",\n            \"last_check_in\": \"2015-12-10 22:05:16.808453+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": true,\n                \"instance_id\": \"i-11e726a1\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\",\n                \"load_balancing_weight\": 50\n            }\n        },\n        {\n            \"ip_address\": \"10.0.26.143\",\n            \"last_check_in\": \"2015-12-10 22:06:08.487769+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-9f7aee2f\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.42.185\",\n            \"last_check_in\": \"2015-12-10 22:05:03.713194+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-3edbb280\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.141\",\n            \"last_check_in\": \"2015-12-10 22:05:15.724613+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-dd7b1063\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.142\",\n            \"last_check_in\": \"2015-12-10 22:06:04.331018+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-de7b1060\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.143\",\n            \"last_check_in\": \"2015-12-10 22:05:24.151133+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-df7b1061\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.0\",\n            \"last_check_in\": \"2015-12-10 22:06:09.605907+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-497f7efe\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.1\",\n            \"last_check_in\": \"2015-12-10 22:06:09.036531+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-4e7f7ef9\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        }\n    ],\n    \"service\": \"fare\"\n}\n"
  },
  {
    "path": "test/common/json/json_fuzz_test.cc",
    "content": "#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\n// We have multiple third party JSON parsers in Envoy, both RapidJSON and Protobuf.\n// We only fuzz protobuf today, since RapidJSON is deprecated and has known\n// limitations when we have deeply nested structures. Do not use RapidJSON for\n// anything new in Envoy! See https://github.com/envoyproxy/envoy/issues/4705.\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  std::string json_string{reinterpret_cast<const char*>(buf), len};\n\n  // Load via Protobuf JSON parsing, if we can.\n  ProtobufWkt::Struct message;\n  try {\n    MessageUtil::loadFromJson(json_string, message);\n    // We should be able to serialize, parse again and get the same result.\n    ProtobufWkt::Struct message2;\n    MessageUtil::loadFromJson(MessageUtil::getJsonStringFromMessage(message), message2);\n    FUZZ_ASSERT(TestUtility::protoEqual(message, message2));\n\n    // MessageUtil::getYamlStringFromMessage automatically convert types, so we have to do another\n    // round-trip.\n    std::string yaml = MessageUtil::getYamlStringFromMessage(message);\n    ProtobufWkt::Struct yaml_message;\n    MessageUtil::loadFromYaml(yaml, yaml_message);\n\n    ProtobufWkt::Struct message3;\n    MessageUtil::loadFromYaml(MessageUtil::getYamlStringFromMessage(yaml_message), message3);\n    FUZZ_ASSERT(TestUtility::protoEqual(yaml_message, message3));\n  } catch (const Envoy::EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"Failed due to {}\", e.what());\n  }\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/json/json_loader_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"common/json/json_loader.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Json {\nnamespace {\n\nclass JsonLoaderTest : public testing::Test {\nprotected:\n  JsonLoaderTest() : api_(Api::createApiForTest()) {}\n\n  Api::ApiPtr api_;\n};\n\nTEST_F(JsonLoaderTest, Basic) {\n  EXPECT_THROW(Factory::loadFromString(\"{\"), Exception);\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\":123}\");\n    EXPECT_TRUE(json->hasObject(\"hello\"));\n    EXPECT_FALSE(json->hasObject(\"world\"));\n    EXPECT_FALSE(json->empty());\n    EXPECT_THROW(json->getObject(\"world\"), Exception);\n    EXPECT_THROW(json->getObject(\"hello\"), Exception);\n    EXPECT_THROW(json->getBoolean(\"hello\"), Exception);\n    EXPECT_THROW(json->getObjectArray(\"hello\"), Exception);\n    EXPECT_THROW(json->getString(\"hello\"), Exception);\n\n    EXPECT_THROW_WITH_MESSAGE(json->getString(\"hello\"), Exception,\n                              \"key 'hello' missing or not a string from lines 1-1\");\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\":\\\"123\\\"\\n}\");\n    EXPECT_THROW_WITH_MESSAGE(json->getInteger(\"hello\"), Exception,\n                              \"key 'hello' missing or not an integer from lines 1-2\");\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\":true}\");\n    EXPECT_TRUE(json->getBoolean(\"hello\"));\n    EXPECT_TRUE(json->getBoolean(\"hello\", false));\n    EXPECT_FALSE(json->getBoolean(\"world\", false));\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\": [\\\"a\\\", \\\"b\\\", 3]}\");\n    EXPECT_THROW(json->getStringArray(\"hello\"), Exception);\n    EXPECT_THROW(json->getStringArray(\"world\"), Exception);\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\":123}\");\n    EXPECT_EQ(123, json->getInteger(\"hello\", 456));\n    EXPECT_EQ(456, json->getInteger(\"world\", 456));\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\": \\n[123]}\");\n\n    EXPECT_THROW_WITH_MESSAGE(\n        json->getObjectArray(\"hello\").at(0)->getString(\"hello\"), Exception,\n        \"JSON field from line 2 accessed with type 'Object' does not match actual type 'Integer'.\");\n  }\n\n  {\n    EXPECT_THROW_WITH_MESSAGE(\n        Factory::loadFromString(\"{\\\"hello\\\": \\n\\n\\\"world\\\"\"), Exception,\n        \"JSON supplied is not valid. Error(offset 19, line 3): Missing a comma or \"\n        \"'}' after an object member.\\n\");\n  }\n\n  {\n    ObjectSharedPtr json_object = Factory::loadFromString(\"[\\\"foo\\\",\\\"bar\\\"]\");\n    EXPECT_FALSE(json_object->empty());\n  }\n\n  {\n    ObjectSharedPtr json_object = Factory::loadFromString(\"[]\");\n    EXPECT_TRUE(json_object->empty());\n  }\n\n  {\n    ObjectSharedPtr json =\n        Factory::loadFromString(\"{\\\"1\\\":{\\\"11\\\":\\\"111\\\"},\\\"2\\\":{\\\"22\\\":\\\"222\\\"}}\");\n    int pos = 0;\n    json->iterate([&pos](const std::string& key, const Json::Object& value) {\n      EXPECT_TRUE(key == \"1\" || key == \"2\");\n\n      if (key == \"1\") {\n        EXPECT_EQ(\"111\", value.getString(\"11\"));\n      } else {\n        EXPECT_EQ(\"222\", value.getString(\"22\"));\n      }\n\n      pos++;\n      return true;\n    });\n\n    EXPECT_EQ(2, pos);\n  }\n\n  {\n    ObjectSharedPtr json =\n        Factory::loadFromString(\"{\\\"1\\\":{\\\"11\\\":\\\"111\\\"},\\\"2\\\":{\\\"22\\\":\\\"222\\\"}}\");\n    int pos = 0;\n    json->iterate([&pos](const std::string& key, const Json::Object& value) {\n      EXPECT_TRUE(key == \"1\" || key == \"2\");\n\n      if (key == \"1\") {\n        EXPECT_EQ(\"111\", value.getString(\"11\"));\n      } else {\n        EXPECT_EQ(\"222\", value.getString(\"22\"));\n      }\n\n      pos++;\n      return false;\n    });\n\n    EXPECT_EQ(1, pos);\n  }\n\n  {\n    std::string json = R\"EOF(\n    {\n      \"descriptors\": [\n         [{\"key\": \"hello\", \"value\": \"world\"}, {\"key\": \"foo\", \"value\": \"bar\"}],\n         [{\"key\": \"foo2\", \"value\": \"bar2\"}]\n       ]\n    }\n    )EOF\";\n\n    ObjectSharedPtr config = Factory::loadFromString(json);\n    EXPECT_EQ(2U, config->getObjectArray(\"descriptors\")[0]->asObjectArray().size());\n    EXPECT_EQ(1U, config->getObjectArray(\"descriptors\")[1]->asObjectArray().size());\n  }\n\n  {\n    std::string json = R\"EOF(\n    {\n      \"descriptors\": [\"hello\", \"world\"]\n    }\n    )EOF\";\n\n    ObjectSharedPtr config = Factory::loadFromString(json);\n    std::vector<ObjectSharedPtr> array = config->getObjectArray(\"descriptors\");\n    EXPECT_THROW(array[0]->asObjectArray(), Exception);\n  }\n\n  {\n    std::string json = R\"EOF({})EOF\";\n    ObjectSharedPtr config = Factory::loadFromString(json);\n    ObjectSharedPtr object = config->getObject(\"foo\", true);\n    EXPECT_EQ(2, object->getInteger(\"bar\", 2));\n    EXPECT_TRUE(object->empty());\n  }\n\n  {\n    std::string json = R\"EOF({\"foo\": []})EOF\";\n    ObjectSharedPtr config = Factory::loadFromString(json);\n    EXPECT_TRUE(config->getStringArray(\"foo\").empty());\n  }\n\n  {\n    std::string json = R\"EOF({\"foo\": [\"bar\", \"baz\"]})EOF\";\n    ObjectSharedPtr config = Factory::loadFromString(json);\n    EXPECT_FALSE(config->getStringArray(\"foo\").empty());\n  }\n\n  {\n    std::string json = R\"EOF({})EOF\";\n    ObjectSharedPtr config = Factory::loadFromString(json);\n    EXPECT_THROW(config->getStringArray(\"foo\"), EnvoyException);\n  }\n\n  {\n    std::string json = R\"EOF({})EOF\";\n    ObjectSharedPtr config = Factory::loadFromString(json);\n    EXPECT_TRUE(config->getStringArray(\"foo\", true).empty());\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\": \\n[2.0]}\");\n    EXPECT_THROW(json->getObjectArray(\"hello\").at(0)->getDouble(\"foo\"), Exception);\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"hello\\\": \\n[null]}\");\n    EXPECT_THROW(json->getObjectArray(\"hello\").at(0)->getDouble(\"foo\"), Exception);\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{}\");\n    EXPECT_THROW((void)json->getObjectArray(\"hello\").empty(), Exception);\n  }\n\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{}\");\n    EXPECT_TRUE(json->getObjectArray(\"hello\", true).empty());\n  }\n}\n\nTEST_F(JsonLoaderTest, Integer) {\n  {\n    ObjectSharedPtr json =\n        Factory::loadFromString(\"{\\\"max\\\":9223372036854775807, \\\"min\\\":-9223372036854775808}\");\n    EXPECT_EQ(std::numeric_limits<int64_t>::max(), json->getInteger(\"max\"));\n    EXPECT_EQ(std::numeric_limits<int64_t>::min(), json->getInteger(\"min\"));\n  }\n  {\n    EXPECT_THROW(Factory::loadFromString(\"{\\\"val\\\":9223372036854775808}\"), EnvoyException);\n\n    // I believe this is a bug with rapidjson.\n    // It silently eats numbers below min int64_t with no exception.\n    // Fail when reading key instead of on parse.\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"val\\\":-9223372036854775809}\");\n    EXPECT_THROW(json->getInteger(\"val\"), EnvoyException);\n  }\n}\n\nTEST_F(JsonLoaderTest, Double) {\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"value1\\\": 10.5, \\\"value2\\\": -12.3}\");\n    EXPECT_EQ(10.5, json->getDouble(\"value1\"));\n    EXPECT_EQ(-12.3, json->getDouble(\"value2\"));\n  }\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"foo\\\": 13.22}\");\n    EXPECT_EQ(13.22, json->getDouble(\"foo\", 0));\n    EXPECT_EQ(0, json->getDouble(\"bar\", 0));\n  }\n  {\n    ObjectSharedPtr json = Factory::loadFromString(\"{\\\"foo\\\": \\\"bar\\\"}\");\n    EXPECT_THROW(json->getDouble(\"foo\"), Exception);\n  }\n}\n\nTEST_F(JsonLoaderTest, Hash) {\n  ObjectSharedPtr json1 = Factory::loadFromString(\"{\\\"value1\\\": 10.5, \\\"value2\\\": -12.3}\");\n  ObjectSharedPtr json2 = Factory::loadFromString(\"{\\\"value2\\\": -12.3, \\\"value1\\\": 10.5}\");\n  ObjectSharedPtr json3 = Factory::loadFromString(\"  {  \\\"value2\\\":  -12.3, \\\"value1\\\":  10.5} \");\n  ObjectSharedPtr json4 = Factory::loadFromString(\"{\\\"value1\\\": 10.5}\");\n\n  // Objects with keys in different orders should be the same\n  EXPECT_EQ(json1->hash(), json2->hash());\n  // Whitespace is ignored\n  EXPECT_EQ(json2->hash(), json3->hash());\n  // Ensure different hash is computed for different objects\n  EXPECT_NE(json1->hash(), json4->hash());\n}\n\nTEST_F(JsonLoaderTest, Schema) {\n  {\n    std::string invalid_json_schema = R\"EOF(\n    {\n      \"properties\": {\"value1\"}\n    }\n    )EOF\";\n\n    std::string invalid_schema = R\"EOF(\n    {\n      \"properties\" : {\n        \"value1\": {\"type\" : \"faketype\"}\n      }\n    }\n    )EOF\";\n\n    std::string different_schema = R\"EOF(\n    {\n      \"properties\" : {\n        \"value1\" : {\"type\" : \"number\"}\n      },\n      \"additionalProperties\" : false\n    }\n    )EOF\";\n\n    std::string valid_schema = R\"EOF(\n    {\n      \"properties\": {\n        \"value1\": {\"type\" : \"number\"},\n        \"value2\": {\"type\": \"string\"}\n      },\n      \"additionalProperties\": false\n    }\n    )EOF\";\n\n    std::string json_string = R\"EOF(\n    {\n      \"value1\": 10,\n      \"value2\" : \"test\"\n    }\n    )EOF\";\n\n    ObjectSharedPtr json = Factory::loadFromString(json_string);\n    EXPECT_THROW(json->validateSchema(invalid_json_schema), std::invalid_argument);\n    EXPECT_THROW(json->validateSchema(invalid_schema), Exception);\n    EXPECT_THROW(json->validateSchema(different_schema), Exception);\n    EXPECT_NO_THROW(json->validateSchema(valid_schema));\n  }\n\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"value1\": [false, 2.01, 3, null],\n      \"value2\" : \"test\"\n    }\n    )EOF\";\n\n    std::string empty_schema = R\"EOF({})EOF\";\n\n    ObjectSharedPtr json = Factory::loadFromString(json_string);\n    EXPECT_NO_THROW(json->validateSchema(empty_schema));\n  }\n}\n\nTEST_F(JsonLoaderTest, NestedSchema) {\n\n  std::string schema = R\"EOF(\n  {\n    \"properties\": {\n      \"value1\": {\"type\" : \"number\"},\n      \"value2\": {\"type\": \"string\"}\n    },\n    \"additionalProperties\": false\n  }\n  )EOF\";\n\n  std::string json_string = R\"EOF(\n  {\n    \"bar\": \"baz\",\n    \"foo\": {\n      \"value1\": \"should have been a number\",\n      \"value2\" : \"test\"\n    }\n  }\n  )EOF\";\n\n  ObjectSharedPtr json = Factory::loadFromString(json_string);\n\n  EXPECT_THROW_WITH_MESSAGE(json->getObject(\"foo\")->validateSchema(schema), Exception,\n                            \"JSON at lines 4-7 does not conform to schema.\\n Invalid schema: \"\n                            \"#/properties/value1\\n Schema violation: type\\n Offending document \"\n                            \"key: #/value1\");\n}\n\nTEST_F(JsonLoaderTest, MissingEnclosingDocument) {\n\n  std::string json_string = R\"EOF(\n  \"listeners\" : [\n    {\n      \"address\": \"tcp://127.0.0.1:1234\",\n      \"filters\": []\n    }\n  ]\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(Factory::loadFromString(json_string), Exception,\n                            \"JSON supplied is not valid. Error(offset 14, line 2): Terminate \"\n                            \"parsing due to Handler error.\\n\");\n}\n\nTEST_F(JsonLoaderTest, AsString) {\n  ObjectSharedPtr json = Factory::loadFromString(\"{\\\"name1\\\": \\\"value1\\\", \\\"name2\\\": true}\");\n  json->iterate([&](const std::string& key, const Json::Object& value) {\n    EXPECT_TRUE(key == \"name1\" || key == \"name2\");\n\n    if (key == \"name1\") {\n      EXPECT_EQ(\"value1\", value.asString());\n    } else {\n      EXPECT_THROW(value.asString(), Exception);\n    }\n    return true;\n  });\n}\n\n} // namespace\n} // namespace Json\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/local_reply/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"local_reply_test\",\n    srcs = [\"local_reply_test.cc\"],\n    deps = [\n        \"//source/common/local_reply:local_reply_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/common/local_reply/local_reply_test.cc",
    "content": "#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h\"\n#include \"envoy/http/codes.h\"\n\n#include \"common/http/header_utility.h\"\n#include \"common/local_reply/local_reply.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace LocalReply {\nnamespace {\n\nconst Http::Code TestInitCode = Http::Code::OK;\nconst std::string TestInitBody = \"Init body text\";\nconst absl::string_view TestInitContentType = \"content-type\";\n} // namespace\n\nclass LocalReplyTest : public testing::Test {\npublic:\n  LocalReplyTest() : stream_info_(time_system_.timeSystem()) { resetData(TestInitCode); }\n\n  void resetData(Http::Code code) {\n    code_ = code;\n    body_ = TestInitBody;\n    content_type_ = TestInitContentType;\n  }\n  void resetData(uint32_t code) { resetData(static_cast<Http::Code>(code)); }\n\n  Http::Code code_;\n  std::string body_;\n  absl::string_view content_type_;\n\n  Http::TestRequestHeaderMapImpl request_headers_{{\":method\", \"GET\"}, {\":path\", \"/bar/foo\"}};\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Event::SimulatedTimeSystem time_system_;\n  StreamInfo::StreamInfoImpl stream_info_;\n\n  envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig config_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n};\n\nTEST_F(LocalReplyTest, TestEmptyConfig) {\n  // Empty LocalReply config.\n  auto local = Factory::create(config_, context_);\n\n  local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, TestInitCode);\n  EXPECT_EQ(stream_info_.response_code_, static_cast<uint32_t>(TestInitCode));\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(),\n            std::to_string(enumToInt(TestInitCode)));\n  EXPECT_EQ(body_, TestInitBody);\n  EXPECT_EQ(content_type_, \"text/plain\");\n}\n\nTEST_F(LocalReplyTest, TestDefaultLocalReply) {\n  // Default LocalReply should be the same as empty config.\n  auto local = Factory::createDefault();\n\n  local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, TestInitCode);\n  EXPECT_EQ(stream_info_.response_code_, static_cast<uint32_t>(TestInitCode));\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(),\n            std::to_string(enumToInt(TestInitCode)));\n  EXPECT_EQ(body_, TestInitBody);\n  EXPECT_EQ(content_type_, \"text/plain\");\n}\n\nTEST_F(LocalReplyTest, TestInvalidConfigEmptyFilter) {\n  // Invalid config: a mapper should have a valid filter\n  const std::string yaml = R\"(\n    mappers:\n    - status_code: 401\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n\n  std::string err;\n  EXPECT_FALSE(Validate(config_, &err));\n}\n\nTEST_F(LocalReplyTest, TestInvalidConfigStatusCode) {\n  // Invalid config: status_code should be at range [200, 600)\n  const std::string yaml = R\"(\n    mappers:\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 400\n              runtime_key: key_b\n      status_code: 100\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n\n  std::string err;\n  EXPECT_FALSE(Validate(config_, &err));\n}\n\nTEST_F(LocalReplyTest, TestDefaultTextFormatter) {\n  // Default text formatter without any mappers\n  const std::string yaml = R\"(\n  body_format:\n     text_format: \"%LOCAL_REPLY_BODY% %RESPONSE_CODE%\"\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n  auto local = Factory::create(config_, context_);\n\n  local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, TestInitCode);\n  EXPECT_EQ(stream_info_.response_code_, static_cast<uint32_t>(TestInitCode));\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(),\n            std::to_string(enumToInt(TestInitCode)));\n  EXPECT_EQ(body_, \"Init body text 200\");\n  EXPECT_EQ(content_type_, \"text/plain\");\n}\n\nTEST_F(LocalReplyTest, TestDefaultJsonFormatter) {\n  // Default json formatter without any mappers\n  const std::string yaml = R\"(\n  body_format:\n    json_format:\n      text: \"plain text\"\n      path: \"%REQ(:path)%\"\n      code: \"%RESPONSE_CODE%\"\n      body: \"%LOCAL_REPLY_BODY%\"\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n  auto local = Factory::create(config_, context_);\n\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, TestInitCode);\n  EXPECT_EQ(stream_info_.response_code_, static_cast<uint32_t>(TestInitCode));\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(),\n            std::to_string(enumToInt(TestInitCode)));\n  EXPECT_EQ(content_type_, \"application/json\");\n\n  const std::string expected = R\"({\n    \"text\": \"plain text\",\n    \"path\": \"/bar/foo\",\n    \"code\": 200,\n    \"body\": \"Init body text\"\n})\";\n  EXPECT_TRUE(TestUtility::jsonStringEqual(body_, expected));\n}\n\nTEST_F(LocalReplyTest, TestMapperRewrite) {\n  // Match with response_code, and rewrite the code and body.\n  const std::string yaml = R\"(\n    mappers:\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 400\n              runtime_key: key_b\n      status_code: 401\n      body:\n        inline_string: \"400 body text\"\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 410\n              runtime_key: key_b\n      body:\n        inline_string: \"410 body text\"\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 420\n              runtime_key: key_b\n      status_code: 421\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 430\n              runtime_key: key_b\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n  auto local = Factory::create(config_, context_);\n\n  // code=400 matches the first filter; rewrite code and body\n  resetData(400);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(401));\n  EXPECT_EQ(stream_info_.response_code_, 401U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"401\");\n  EXPECT_EQ(body_, \"400 body text\");\n  EXPECT_EQ(content_type_, \"text/plain\");\n\n  // code=410 matches the second filter; rewrite body only\n  resetData(410);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(410));\n  EXPECT_EQ(stream_info_.response_code_, 410U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"410\");\n  EXPECT_EQ(body_, \"410 body text\");\n  EXPECT_EQ(content_type_, \"text/plain\");\n\n  // code=420 matches the third filter; rewrite code only\n  resetData(420);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(421));\n  EXPECT_EQ(stream_info_.response_code_, 421U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"421\");\n  EXPECT_EQ(body_, TestInitBody);\n  EXPECT_EQ(content_type_, \"text/plain\");\n\n  // code=430 matches the fourth filter; rewrite nothing\n  resetData(430);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(430));\n  EXPECT_EQ(stream_info_.response_code_, 430U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"430\");\n  EXPECT_EQ(body_, TestInitBody);\n  EXPECT_EQ(content_type_, \"text/plain\");\n}\n\nTEST_F(LocalReplyTest, TestMapperFormat) {\n  // Match with response_code, and rewrite the code and body.\n  const std::string yaml = R\"(\n    mappers:\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 400\n              runtime_key: key_b\n      status_code: 401\n      body:\n        inline_string: \"401 body text\"\n      body_format_override:\n        json_format:\n          text: \"401 filter formatter\"\n          path: \"%REQ(:path)%\"\n          code: \"%RESPONSE_CODE%\"\n          body: \"%LOCAL_REPLY_BODY%\"\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 410\n              runtime_key: key_b\n      status_code: 411\n      body:\n        inline_string: \"411 body text\"\n    body_format:\n      text_format: \"%LOCAL_REPLY_BODY% %RESPONSE_CODE% default formatter\"\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n  auto local = Factory::create(config_, context_);\n\n  // code=400 matches the first filter; rewrite code and body\n  // has its own formatter\n  resetData(400);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(401));\n  EXPECT_EQ(stream_info_.response_code_, 401U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"401\");\n  EXPECT_EQ(content_type_, \"application/json\");\n\n  const std::string expected = R\"({\n    \"text\": \"401 filter formatter\",\n    \"path\": \"/bar/foo\",\n    \"code\": 401,\n    \"body\": \"401 body text\"\n})\";\n  EXPECT_TRUE(TestUtility::jsonStringEqual(body_, expected));\n\n  // code=410 matches the second filter; rewrite code and body\n  // but using default formatter\n  resetData(410);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(411));\n  EXPECT_EQ(stream_info_.response_code_, 411U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"411\");\n  EXPECT_EQ(body_, \"411 body text 411 default formatter\");\n  EXPECT_EQ(content_type_, \"text/plain\");\n}\n\nTEST_F(LocalReplyTest, TestHeaderAddition) {\n  // Default text formatter without any mappers\n  const std::string yaml = R\"(\n    mappers:\n    - filter:\n        status_code_filter:\n          comparison:\n            op: GE\n            value:\n              default_value: 0\n              runtime_key: key_b\n      headers_to_add:\n        - header:\n            key: foo-1\n            value: bar1\n          append: true\n        - header:\n            key: foo-2\n            value: override-bar2\n          append: false\n        - header:\n            key: foo-3\n            value: append-bar3\n          append: true\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n  auto local = Factory::create(config_, context_);\n\n  response_headers_.addCopy(\"foo-2\", \"bar2\");\n  response_headers_.addCopy(\"foo-3\", \"bar3\");\n  local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, TestInitCode);\n  EXPECT_EQ(stream_info_.response_code_, static_cast<uint32_t>(TestInitCode));\n  EXPECT_EQ(content_type_, \"text/plain\");\n\n  EXPECT_EQ(response_headers_.get_(\"foo-1\"), \"bar1\");\n  EXPECT_EQ(response_headers_.get_(\"foo-2\"), \"override-bar2\");\n  std::vector<absl::string_view> out;\n  Http::HeaderUtility::getAllOfHeader(response_headers_, \"foo-3\", out);\n  ASSERT_EQ(out.size(), 2);\n  ASSERT_EQ(out[0], \"bar3\");\n  ASSERT_EQ(out[1], \"append-bar3\");\n}\n\nTEST_F(LocalReplyTest, TestMapperWithContentType) {\n  // Match with response_code, and rewrite the code and body.\n  const std::string yaml = R\"(\n    mappers:\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 400\n              runtime_key: key_b\n      status_code: 401\n      body:\n        inline_string: \"401 body text\"\n      body_format_override:\n        text_format: \"<h1>%LOCAL_REPLY_BODY%</h1>\"\n        content_type: \"text/html; charset=UTF-8\"\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 410\n              runtime_key: key_b\n      status_code: 411\n      body:\n        inline_string: \"411 body text\"\n    - filter:\n        status_code_filter:\n          comparison:\n            op: EQ\n            value:\n              default_value: 420\n              runtime_key: key_b\n      status_code: 421\n      body:\n        inline_string: \"421 body text\"\n      body_format_override:\n        text_format: \"%LOCAL_REPLY_BODY%\"\n    body_format:\n      text_format: \"<h1>%LOCAL_REPLY_BODY%</h1> %RESPONSE_CODE% default formatter\"\n      content_type: \"text/html; charset=UTF-8\"\n)\";\n  TestUtility::loadFromYaml(yaml, config_);\n  auto local = Factory::create(config_, context_);\n\n  // code=400 matches the first filter; rewrite code and body\n  // has its own formatter.\n  // content-type is explicitly set to text/html; charset=UTF-8.\n  resetData(400);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(401));\n  EXPECT_EQ(stream_info_.response_code_, 401U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"401\");\n  EXPECT_EQ(body_, \"<h1>401 body text</h1>\");\n  EXPECT_EQ(content_type_, \"text/html; charset=UTF-8\");\n\n  // code=410 matches the second filter; rewrite code and body\n  // but using default formatter.\n  // content-type is explicitly set to text/html; charset=UTF-8.\n  resetData(410);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(411));\n  EXPECT_EQ(stream_info_.response_code_, 411U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"411\");\n  EXPECT_EQ(body_, \"<h1>411 body text</h1> 411 default formatter\");\n  EXPECT_EQ(content_type_, \"text/html; charset=UTF-8\");\n\n  // code=420 matches the third filter; rewrite code and body\n  // has its own formatter.\n  // default content-type is set based on reply format type.\n  resetData(420);\n  local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_);\n  EXPECT_EQ(code_, static_cast<Http::Code>(421));\n  EXPECT_EQ(stream_info_.response_code_, 421U);\n  EXPECT_EQ(response_headers_.Status()->value().getStringView(), \"421\");\n  EXPECT_EQ(body_, \"421 body text\");\n  EXPECT_EQ(content_type_, \"text/plain\");\n}\n\n} // namespace LocalReply\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/memory/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"debug_test\",\n    srcs = [\"debug_test.cc\"],\n    deps = [\"//source/common/memory:stats_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"heap_shrinker_test\",\n    srcs = [\"heap_shrinker_test.cc\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/memory:heap_shrinker_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/server:overload_manager_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/memory/debug_test.cc",
    "content": "#include \"common/memory/stats.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Memory {\nnamespace {\n\n#ifdef ENVOY_MEMORY_DEBUG_ENABLED\n\nconstexpr int ArraySize = 10;\n\nstruct MyStruct {\n  MyStruct() : x_(0) {} // words_ is uninitialized; will have whatever allocator left there.\n  uint64_t x_;\n  uint64_t words_[ArraySize];\n};\n\nTEST(MemoryDebug, ByteSize) {\n  uint64_t before = Stats::totalCurrentlyAllocated();\n  auto ptr = std::make_unique<MyStruct>();\n  uint64_t after = Stats::totalCurrentlyAllocated();\n  EXPECT_LE(sizeof(MyStruct), after - before);\n}\n\nTEST(MemoryDebug, ScribbleOnNew) {\n  auto ptr = std::make_unique<MyStruct>();\n  for (int i = 0; i < ArraySize; ++i) {\n    // This is the pattern written by tcmalloc's debug library.\n    EXPECT_EQ(0xabababababababab, ptr->words_[i]);\n  }\n}\n\nTEST(MemoryDebug, ScribbleOnDelete) {\n  uint64_t* words;\n  {\n    auto ptr = std::make_unique<MyStruct>();\n    words = ptr->words_;\n  }\n  for (int i = 0; i < ArraySize; ++i) {\n    // This is the pattern written by tcmalloc's debug library on destruction.\n    // Note: this test cannot be run under valgrind or asan.\n    EXPECT_EQ(0xcdcdcdcdcdcdcdcd, words[i]);\n  }\n}\n\nTEST(MemoryDebug, ZeroByteAlloc) { auto ptr = std::make_unique<uint8_t[]>(0); }\n\n#endif // ENVOY_MEMORY_DEBUG_ENABLED\n\n} // namespace\n} // namespace Memory\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/memory/heap_shrinker_test.cc",
    "content": "#include \"common/event/dispatcher_impl.h\"\n#include \"common/memory/heap_shrinker.h\"\n#include \"common/memory/stats.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/server/overload_manager.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Memory {\nnamespace {\n\nclass HeapShrinkerTest : public testing::Test {\nprotected:\n  HeapShrinkerTest()\n      : api_(Api::createApiForTest(stats_, time_system_)),\n        dispatcher_(\"test_thread\", *api_, time_system_) {}\n\n  void step() {\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(10000), dispatcher_,\n                                   Event::Dispatcher::RunType::NonBlock);\n  }\n\n  Envoy::Stats::TestUtil::TestStore stats_;\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherImpl dispatcher_;\n  NiceMock<Server::MockOverloadManager> overload_manager_;\n  Event::TimerCb timer_cb_;\n};\n\nTEST_F(HeapShrinkerTest, DoNotShrinkWhenNotConfigured) {\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(overload_manager_, registerForAction(_, _, _)).WillOnce(Return(false));\n  EXPECT_CALL(dispatcher, createTimer_(_)).Times(0);\n  HeapShrinker h(dispatcher, overload_manager_, stats_);\n}\n\nTEST_F(HeapShrinkerTest, ShrinkWhenTriggered) {\n  Server::OverloadActionCb action_cb;\n  EXPECT_CALL(overload_manager_, registerForAction(_, _, _))\n      .WillOnce(Invoke([&](const std::string&, Event::Dispatcher&, Server::OverloadActionCb cb) {\n        action_cb = cb;\n        return true;\n      }));\n\n  HeapShrinker h(dispatcher_, overload_manager_, stats_);\n\n  auto data = std::make_unique<char[]>(5000000);\n  const uint64_t physical_mem_before_shrink =\n      Stats::totalCurrentlyReserved() - Stats::totalPageHeapUnmapped();\n  data.reset();\n\n  Envoy::Stats::Counter& shrink_count =\n      stats_.counter(\"overload.envoy.overload_actions.shrink_heap.shrink_count\");\n  action_cb(Server::OverloadActionState::saturated());\n  step();\n  EXPECT_EQ(1, shrink_count.value());\n\n  const uint64_t physical_mem_after_shrink =\n      Stats::totalCurrentlyReserved() - Stats::totalPageHeapUnmapped();\n#if defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC)\n  EXPECT_GE(physical_mem_before_shrink, physical_mem_after_shrink);\n#else\n  EXPECT_EQ(physical_mem_before_shrink, physical_mem_after_shrink);\n#endif\n  Stats::dumpStatsToLog();\n\n  step();\n  EXPECT_EQ(2, shrink_count.value());\n\n  action_cb(Server::OverloadActionState::inactive());\n  step();\n  step();\n  EXPECT_EQ(2, shrink_count.value());\n}\n\n} // namespace\n} // namespace Memory\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"listener_impl_test_base_lib\",\n    hdrs = [\"listener_impl_test_base.h\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"address_impl_test\",\n    srcs = [\"address_impl_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"address_impl_speed_test\",\n    srcs = [\"address_impl_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/network:address_lib\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"address_impl_speed_test_benchmark_test\",\n    benchmark_binary = \"address_impl_speed_test\",\n)\n\nenvoy_cc_test(\n    name = \"cidr_range_test\",\n    srcs = [\"cidr_range_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:cidr_range_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"connection_impl_test\",\n    srcs = [\"connection_impl_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"apple_dns_impl_test\",\n    srcs = select({\n        \"//bazel:apple\": [\"apple_dns_impl_test.cc\"],\n        \"//conditions:default\": [],\n    }),\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n    ] + select({\n        \"//bazel:apple\": [\"//source/common/network:dns_lib\"],\n        \"//conditions:default\": [],\n    }),\n)\n\nenvoy_cc_test(\n    name = \"dns_impl_test\",\n    srcs = [\"dns_impl_test.cc\"],\n    args = [\n        # Used in createDnsResolver to force creation of DnsResolverImpl when running test on macOS.\n        \"--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:dns_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"filter_manager_impl_test\",\n    srcs = [\"filter_manager_impl_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:filter_manager_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/tcp_proxy\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/filters/network/ratelimit:ratelimit_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/extensions/filters/common/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"lc_trie_test\",\n    srcs = [\"lc_trie_test.cc\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:cidr_range_lib\",\n        \"//source/common/network:lc_trie_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"listen_socket_impl_test\",\n    srcs = [\"listen_socket_impl_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//test/mocks/network:io_handle_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"listener_impl_test\",\n    srcs = [\"listener_impl_test.cc\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//test/common/network:listener_impl_test_base_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"udp_listener_impl_test_base_lib\",\n    hdrs = [\"udp_listener_impl_test_base.h\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"udp_listener_impl_test\",\n    srcs = [\"udp_listener_impl_test.cc\"],\n    deps = [\n        \":udp_listener_impl_test_base_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//source/common/network:udp_packet_writer_handler_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/common/network:listener_impl_test_base_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"udp_listener_impl_batch_writer_test\",\n    srcs = [\"udp_listener_impl_batch_writer_test.cc\"],\n    tags = [\n        # Skipping as quiche quic_gso_batch_writer.h does not exist on Windows\n        \"skip_on_windows\",\n    ],\n    deps = [\n        \":udp_listener_impl_test_base_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//source/common/network:udp_packet_writer_handler_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_lib\",\n        \"//test/common/network:listener_impl_test_base_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_mock_syscall_wrapper_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"resolver_test\",\n    srcs = [\"resolver_impl_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/protobuf\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"socket_option_test\",\n    srcs = [\"socket_option_test.h\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"socket_option_impl_test\",\n    srcs = [\"socket_option_impl_test.cc\"],\n    deps = [\n        \":socket_option_test\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"socket_option_factory_test\",\n    srcs = [\"socket_option_factory_test.cc\"],\n    external_deps = [\"abseil_str_format\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"addr_family_aware_socket_option_impl_test\",\n    srcs = [\"addr_family_aware_socket_option_impl_test.cc\"],\n    deps = [\n        \":socket_option_test\",\n        \"//source/common/network:addr_family_aware_socket_option_lib\",\n        \"//source/common/network:address_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"utility_fuzz_test\",\n    srcs = [\"utility_fuzz_test.cc\"],\n    corpus = \"utility_corpus\",\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"lc_trie_speed_test\",\n    srcs = [\"lc_trie_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/network:lc_trie_lib\",\n        \"//source/common/network:utility_lib\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"lc_trie_speed_test_benchmark_test\",\n    benchmark_binary = \"lc_trie_speed_test\",\n)\n\nenvoy_cc_test(\n    name = \"io_socket_handle_impl_test\",\n    srcs = [\"io_socket_handle_impl_test.cc\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"transport_socket_options_impl_test\",\n    srcs = [\"transport_socket_options_impl_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/stream_info:filter_state_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"filter_matcher_test\",\n    srcs = [\"filter_matcher_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:filter_matcher_lib\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/common/network/addr_family_aware_socket_option_impl_test.cc",
    "content": "#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/network/addr_family_aware_socket_option_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/common/network/socket_option_test.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nclass AddrFamilyAwareSocketOptionImplTest : public SocketOptionTest {\nprotected:\n  void SetUp() override {\n    EXPECT_CALL(os_sys_calls_, socket)\n        .WillRepeatedly(Invoke([this](int domain, int type, int protocol) {\n          return os_sys_calls_actual_.socket(domain, type, protocol);\n        }));\n    EXPECT_CALL(os_sys_calls_, close(_)).Times(testing::AnyNumber());\n  }\n};\n\n// We fail to set the option when the underlying setsockopt syscall fails.\nTEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) {\n  EXPECT_CALL(socket_, ipVersion).WillRepeatedly(testing::Return(absl::nullopt));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      {},\n      1};\n  EXPECT_LOG_CONTAINS(\"warning\", \"Failed to set IP socket option on non-IP socket\",\n                      EXPECT_FALSE(socket_option.setOption(\n                          socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n}\n\n// If a platform supports IPv4 socket option variant for an IPv4 address, it works\nTEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      {},\n      1};\n  testSetSocketOptionSuccess(socket_option, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1,\n                             {envoy::config::core::v3::SocketOption::STATE_PREBIND});\n}\n\n// If a platform doesn't support IPv4 socket option variant for an IPv4 address we fail\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1};\n  EXPECT_LOG_CONTAINS(\"warning\", \"Failed to set unsupported option on socket\",\n                      EXPECT_FALSE(socket_option.setOption(\n                          socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n}\n\n// If a platform doesn't support IPv4 and IPv6 socket option variants for an IPv4 address, we fail\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) {\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1};\n  EXPECT_LOG_CONTAINS(\"warning\", \"Failed to set unsupported option on socket\",\n                      EXPECT_FALSE(socket_option.setOption(\n                          socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n}\n\n// If a platform supports IPv4 and IPv6 socket option variants for an IPv4 address, we apply the\n// IPv4 variant\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1};\n  testSetSocketOptionSuccess(socket_option, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1,\n                             {envoy::config::core::v3::SocketOption::STATE_PREBIND});\n}\n\n// If a platform supports IPv6 socket option variant for an IPv6 address it works\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      {},\n      ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11),\n      1};\n  testSetSocketOptionSuccess(socket_option, ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1,\n                             {envoy::config::core::v3::SocketOption::STATE_PREBIND});\n}\n\n// If a platform supports only the IPv4 variant for an IPv6 address,\n// we apply the IPv4 variant.\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      {},\n      1};\n  testSetSocketOptionSuccess(socket_option, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1,\n                             {envoy::config::core::v3::SocketOption::STATE_PREBIND});\n}\n\n// If a platform supports IPv4 and IPv6 socket option variants for an IPv6 address,\n// AddrFamilyAwareSocketOptionImpl::setIpSocketOption() works with the IPv6 variant.\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1};\n  testSetSocketOptionSuccess(socket_option, ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1,\n                             {envoy::config::core::v3::SocketOption::STATE_PREBIND});\n}\n\n// GetSocketOptionName returns the v4 information for a v4 address\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1};\n  auto result =\n      socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  ASSERT_TRUE(result.has_value());\n  EXPECT_EQ(result.value(), makeDetails(ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1));\n}\n\n// GetSocketOptionName returns the v4 information for a v6 address\nTEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5};\n  auto result =\n      socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  ASSERT_TRUE(result.has_value());\n  EXPECT_EQ(result.value(), makeDetails(ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5));\n}\n\n// GetSocketOptionName returns nullopt if the state is wrong\nTEST_F(AddrFamilyAwareSocketOptionImplTest, GetSocketOptionWrongState) {\n  EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6));\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5};\n  auto result =\n      socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_BOUND);\n  EXPECT_FALSE(result.has_value());\n}\n\n// GetSocketOptionName returns nullopt if the version could not be determined\nTEST_F(AddrFamilyAwareSocketOptionImplTest, GetSocketOptionCannotDetermineVersion) {\n  AddrFamilyAwareSocketOptionImpl socket_option{\n      envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10),\n      ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5};\n\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>();\n  EXPECT_CALL(socket_, ipVersion).WillOnce(testing::Return(absl::nullopt));\n  auto result =\n      socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  EXPECT_FALSE(result.has_value());\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/address_impl_speed_test.cc",
    "content": "#include \"common/common/fmt.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\n\nstatic void Ipv4InstanceCreate(benchmark::State& state) {\n  sockaddr_in addr;\n  addr.sin_family = AF_INET;\n  addr.sin_port = htons(443);\n  static constexpr uint32_t Addr = 0xc00002ff; // From the RFC 5737 example range.\n  addr.sin_addr.s_addr = htonl(Addr);\n  for (auto _ : state) {\n    Ipv4Instance address(&addr);\n    benchmark::DoNotOptimize(address.ip());\n  }\n}\nBENCHMARK(Ipv4InstanceCreate);\n\nstatic void Ipv6InstanceCreate(benchmark::State& state) {\n  sockaddr_in6 addr;\n  addr.sin6_family = AF_INET6;\n  addr.sin6_port = htons(443);\n  static const char* Addr = \"2001:DB8::1234\"; // From the RFC 3849 example range.\n  inet_pton(AF_INET6, Addr, &addr.sin6_addr);\n  for (auto _ : state) {\n    Ipv6Instance address(addr);\n    benchmark::DoNotOptimize(address.ip());\n  }\n}\nBENCHMARK(Ipv6InstanceCreate);\n\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/address_impl_test.cc",
    "content": "#include <iostream>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\nnamespace {\n\nbool addressesEqual(const InstanceConstSharedPtr& a, const Instance& b) {\n  if (a == nullptr || a->type() != Type::Ip || b.type() != Type::Ip) {\n    return false;\n  } else {\n    return a->ip()->addressAsString() == b.ip()->addressAsString();\n  }\n}\n\nvoid testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6only) {\n  auto addr_port = Network::Utility::parseInternetAddressAndPort(\n      fmt::format(\"{}:0\", Network::Test::getAnyAddressUrlString(ip_version)), v6only);\n  ASSERT_NE(addr_port, nullptr);\n\n  if (addr_port->ip()->port() == 0) {\n    addr_port = Network::Test::findOrCheckFreePort(addr_port, Socket::Type::Stream);\n  }\n  ASSERT_NE(addr_port, nullptr);\n  ASSERT_NE(addr_port->ip(), nullptr);\n\n  // Create a socket on which we'll listen for connections from clients.\n  SocketImpl sock(Socket::Type::Stream, addr_port);\n  EXPECT_TRUE(sock.ioHandle().isOpen()) << addr_port->asString();\n\n  // Check that IPv6 sockets accept IPv6 connections only.\n  if (addr_port->ip()->version() == IpVersion::v6) {\n    int socket_v6only = 0;\n    socklen_t size_int = sizeof(socket_v6only);\n    ASSERT_GE(sock.getSocketOption(IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int).rc_, 0);\n    EXPECT_EQ(v6only, socket_v6only != 0);\n  }\n\n  // Bind the socket to the desired address and port.\n  const Api::SysCallIntResult result = sock.bind(addr_port);\n  ASSERT_EQ(result.rc_, 0) << addr_port->asString() << \"\\nerror: \" << errorDetails(result.errno_)\n                           << \"\\nerrno: \" << result.errno_;\n\n  // Do a bare listen syscall. Not bothering to accept connections as that would\n  // require another thread.\n  ASSERT_EQ(sock.listen(128).rc_, 0);\n\n  auto client_connect = [](Address::InstanceConstSharedPtr addr_port) {\n    // Create a client socket and connect to the server.\n    SocketImpl client_sock(Socket::Type::Stream, addr_port);\n\n    EXPECT_TRUE(client_sock.ioHandle().isOpen()) << addr_port->asString();\n\n    // Instance::socket creates a non-blocking socket, which that extends all the way to the\n    // operation of ::connect(), so connect returns with errno==EWOULDBLOCK before the tcp\n    // handshake can complete. For testing convenience, re-enable blocking on the socket\n    // so that connect will wait for the handshake to complete.\n    ASSERT_EQ(client_sock.setBlockingForTest(true).rc_, 0);\n\n    // Connect to the server.\n    const Api::SysCallIntResult result = client_sock.connect(addr_port);\n    ASSERT_EQ(result.rc_, 0) << addr_port->asString() << \"\\nerror: \" << errorDetails(result.errno_)\n                             << \"\\nerrno: \" << result.errno_;\n  };\n\n  auto client_addr_port = Network::Utility::parseInternetAddressAndPort(\n      fmt::format(\"{}:{}\", Network::Test::getLoopbackAddressUrlString(ip_version),\n                  addr_port->ip()->port()),\n      v6only);\n  ASSERT_NE(client_addr_port, nullptr);\n  client_connect(client_addr_port);\n\n  if (!v6only) {\n    ASSERT_EQ(IpVersion::v6, addr_port->ip()->version());\n    auto v4_addr_port = Network::Utility::parseInternetAddress(\n        Network::Test::getLoopbackAddressUrlString(Network::Address::IpVersion::v4),\n        addr_port->ip()->port(), true);\n    ASSERT_NE(v4_addr_port, nullptr);\n    client_connect(v4_addr_port);\n  }\n}\n} // namespace\n\nclass AddressImplSocketTest : public testing::TestWithParam<IpVersion> {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, AddressImplSocketTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AddressImplSocketTest, SocketBindAndConnect) {\n  // Test listening on and connecting to an unused port with an IP loopback address.\n  testSocketBindAndConnect(GetParam(), true);\n}\n\nTEST(Ipv4CompatAddressImplSocktTest, SocketBindAndConnect) {\n  if (TestEnvironment::shouldRunTestForIpVersion(Network::Address::IpVersion::v6)) {\n    testSocketBindAndConnect(Network::Address::IpVersion::v6, false);\n  }\n}\n\nTEST(Ipv4InstanceTest, SockaddrToString) {\n  // Test addresses from various RFC 5735 reserved ranges\n  static const char* addresses[] = {\"0.0.0.0\",        \"0.0.0.255\",       \"0.0.255.255\",\n                                    \"0.255.255.255\",  \"192.0.2.0\",       \"198.151.100.1\",\n                                    \"198.151.100.10\", \"198.151.100.100\", \"10.0.0.1\",\n                                    \"10.0.20.1\",      \"10.3.201.1\",      \"255.255.255.255\"};\n\n  for (const auto address : addresses) {\n    sockaddr_in addr4;\n    addr4.sin_family = AF_INET;\n    EXPECT_EQ(1, inet_pton(AF_INET, address, &addr4.sin_addr));\n    addr4.sin_port = 0;\n    EXPECT_STREQ(address, Ipv4Instance::sockaddrToString(addr4).c_str());\n  }\n}\n\nTEST(Ipv4InstanceTest, SocketAddress) {\n  sockaddr_in addr4;\n  addr4.sin_family = AF_INET;\n  EXPECT_EQ(1, inet_pton(AF_INET, \"1.2.3.4\", &addr4.sin_addr));\n  addr4.sin_port = htons(6502);\n\n  Ipv4Instance address(&addr4);\n  EXPECT_EQ(\"1.2.3.4:6502\", address.asString());\n  EXPECT_EQ(\"1.2.3.4:6502\", address.asStringView());\n  EXPECT_EQ(\"1.2.3.4:6502\", address.logicalName());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"1.2.3.4\", address.ip()->addressAsString());\n  EXPECT_EQ(6502U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v4, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"1.2.3.4\"), address));\n  EXPECT_EQ(nullptr, address.ip()->ipv6());\n  EXPECT_TRUE(address.ip()->isUnicastAddress());\n  EXPECT_EQ(nullptr, address.pipe());\n  EXPECT_EQ(nullptr, address.envoyInternalAddress());\n}\n\nTEST(Ipv4InstanceTest, AddressOnly) {\n  Ipv4Instance address(\"3.4.5.6\");\n  EXPECT_EQ(\"3.4.5.6:0\", address.asString());\n  EXPECT_EQ(\"3.4.5.6:0\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"3.4.5.6\", address.ip()->addressAsString());\n  EXPECT_EQ(0U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v4, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"3.4.5.6\"), address));\n  EXPECT_TRUE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv4InstanceTest, AddressAndPort) {\n  Ipv4Instance address(\"127.0.0.1\", 80);\n  EXPECT_EQ(\"127.0.0.1:80\", address.asString());\n  EXPECT_EQ(\"127.0.0.1:80\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"127.0.0.1\", address.ip()->addressAsString());\n  EXPECT_FALSE(address.ip()->isAnyAddress());\n  EXPECT_EQ(80U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v4, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"127.0.0.1\"), address));\n  EXPECT_TRUE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv4InstanceTest, PortOnly) {\n  Ipv4Instance address(443);\n  EXPECT_EQ(\"0.0.0.0:443\", address.asString());\n  EXPECT_EQ(\"0.0.0.0:443\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"0.0.0.0\", address.ip()->addressAsString());\n  EXPECT_TRUE(address.ip()->isAnyAddress());\n  EXPECT_EQ(443U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v4, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"0.0.0.0\"), address));\n  EXPECT_FALSE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv4InstanceTest, Multicast) {\n  Ipv4Instance address(\"230.0.0.1\");\n  EXPECT_EQ(\"230.0.0.1:0\", address.asString());\n  EXPECT_EQ(\"230.0.0.1:0\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"230.0.0.1\", address.ip()->addressAsString());\n  EXPECT_FALSE(address.ip()->isAnyAddress());\n  EXPECT_EQ(0U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v4, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"230.0.0.1\"), address));\n  EXPECT_FALSE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv4InstanceTest, Broadcast) {\n  Ipv4Instance address(\"255.255.255.255\");\n  EXPECT_EQ(\"255.255.255.255:0\", address.asString());\n  EXPECT_EQ(\"255.255.255.255:0\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"255.255.255.255\", address.ip()->addressAsString());\n  EXPECT_EQ(0U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v4, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"255.255.255.255\"), address));\n  EXPECT_FALSE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv4InstanceTest, BadAddress) {\n  EXPECT_THROW(Ipv4Instance(\"foo\"), EnvoyException);\n  EXPECT_THROW(Ipv4Instance(\"bar\", 1), EnvoyException);\n}\n\nTEST(Ipv6InstanceTest, SocketAddress) {\n  sockaddr_in6 addr6;\n  addr6.sin6_family = AF_INET6;\n  EXPECT_EQ(1, inet_pton(AF_INET6, \"01:023::00Ef\", &addr6.sin6_addr));\n  addr6.sin6_port = htons(32000);\n\n  Ipv6Instance address(addr6);\n  EXPECT_EQ(\"[1:23::ef]:32000\", address.asString());\n  EXPECT_EQ(\"[1:23::ef]:32000\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"1:23::ef\", address.ip()->addressAsString());\n  EXPECT_FALSE(address.ip()->isAnyAddress());\n  EXPECT_EQ(32000U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v6, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"1:0023::0Ef\"), address));\n  EXPECT_EQ(nullptr, address.ip()->ipv4());\n  EXPECT_TRUE(address.ip()->isUnicastAddress());\n  EXPECT_EQ(nullptr, address.pipe());\n  EXPECT_EQ(nullptr, address.envoyInternalAddress());\n}\n\nTEST(Ipv6InstanceTest, AddressOnly) {\n  Ipv6Instance address(\"2001:0db8:85a3:0000:0000:8a2e:0370:7334\");\n  EXPECT_EQ(\"[2001:db8:85a3::8a2e:370:7334]:0\", address.asString());\n  EXPECT_EQ(\"[2001:db8:85a3::8a2e:370:7334]:0\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"2001:db8:85a3::8a2e:370:7334\", address.ip()->addressAsString());\n  EXPECT_EQ(0U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v6, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(\n      Network::Utility::parseInternetAddress(\"2001:db8:85a3::8a2e:0370:7334\"), address));\n  EXPECT_TRUE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv6InstanceTest, AddressAndPort) {\n  Ipv6Instance address(\"::0001\", 80);\n  EXPECT_EQ(\"[::1]:80\", address.asString());\n  EXPECT_EQ(\"[::1]:80\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"::1\", address.ip()->addressAsString());\n  EXPECT_EQ(80U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v6, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"0:0:0:0:0:0:0:1\"), address));\n  EXPECT_TRUE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv6InstanceTest, PortOnly) {\n  Ipv6Instance address(443);\n  EXPECT_EQ(\"[::]:443\", address.asString());\n  EXPECT_EQ(\"[::]:443\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"::\", address.ip()->addressAsString());\n  EXPECT_TRUE(address.ip()->isAnyAddress());\n  EXPECT_EQ(443U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v6, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress(\"::0000\"), address));\n  EXPECT_FALSE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv6InstanceTest, Multicast) {\n  Ipv6Instance address(\"FF00::\");\n  EXPECT_EQ(\"[ff00::]:0\", address.asString());\n  EXPECT_EQ(\"[ff00::]:0\", address.asStringView());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"ff00::\", address.ip()->addressAsString());\n  EXPECT_FALSE(address.ip()->isAnyAddress());\n  EXPECT_EQ(0U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v6, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(\n      Network::Utility::parseInternetAddress(\"FF00:0000:0000:0000:0000:0000:0000:0000\"), address));\n  EXPECT_FALSE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv6InstanceTest, Broadcast) {\n  Ipv6Instance address(\"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF\");\n  EXPECT_EQ(\"[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]:0\", address.asString());\n  EXPECT_EQ(Type::Ip, address.type());\n  EXPECT_EQ(\"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\", address.ip()->addressAsString());\n  EXPECT_EQ(0U, address.ip()->port());\n  EXPECT_EQ(IpVersion::v6, address.ip()->version());\n  EXPECT_TRUE(addressesEqual(\n      Network::Utility::parseInternetAddress(\"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF\"), address));\n  EXPECT_FALSE(address.ip()->isUnicastAddress());\n}\n\nTEST(Ipv6InstanceTest, BadAddress) {\n  EXPECT_THROW(Ipv6Instance(\"foo\"), EnvoyException);\n  EXPECT_THROW(Ipv6Instance(\"bar\", 1), EnvoyException);\n}\n\nTEST(PipeInstanceTest, Basic) {\n  PipeInstance address(\"/foo\");\n  EXPECT_EQ(\"/foo\", address.asString());\n  EXPECT_EQ(Type::Pipe, address.type());\n  EXPECT_EQ(nullptr, address.ip());\n  EXPECT_EQ(nullptr, address.envoyInternalAddress());\n}\n\nTEST(InteralInstanceTest, Basic) {\n  EnvoyInternalInstance address(\"listener_foo\");\n  EXPECT_EQ(\"envoy://listener_foo\", address.asString());\n  EXPECT_EQ(Type::EnvoyInternal, address.type());\n  EXPECT_EQ(nullptr, address.ip());\n  EXPECT_EQ(nullptr, address.pipe());\n  EXPECT_NE(nullptr, address.envoyInternalAddress());\n  EXPECT_EQ(nullptr, address.sockAddr());\n  EXPECT_EQ(static_cast<decltype(address.sockAddrLen())>(0), address.sockAddrLen());\n}\n\n#ifndef WIN32\nTEST(PipeInstanceTest, BasicPermission) {\n  std::string path = TestEnvironment::unixDomainSocketPath(\"foo.sock\");\n\n  const mode_t mode = 0777;\n  PipeInstance pipe(path, mode);\n  InstanceConstSharedPtr address = std::make_shared<PipeInstance>(pipe);\n  SocketImpl sock(Socket::Type::Stream, address);\n\n  EXPECT_TRUE(sock.ioHandle().isOpen()) << pipe.asString();\n\n  Api::SysCallIntResult result = sock.bind(address);\n  ASSERT_EQ(result.rc_, 0) << pipe.asString() << \"\\nerror: \" << errorDetails(result.errno_)\n                           << \"\\terrno: \" << result.errno_;\n\n  Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get();\n  struct stat stat_buf;\n  result = os_sys_calls.stat(path.c_str(), &stat_buf);\n  EXPECT_EQ(result.rc_, 0);\n  // Get file permissions bits\n  ASSERT_EQ(stat_buf.st_mode & 07777, mode)\n      << path << std::oct << \"\\t\" << (stat_buf.st_mode & 07777) << std::dec << \"\\t\"\n      << (stat_buf.st_mode) << errorDetails(result.errno_);\n}\n#endif\n\nTEST(PipeInstanceTest, PermissionFail) {\n  NiceMock<Api::MockOsSysCalls> os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n  std::string path = TestEnvironment::unixDomainSocketPath(\"foo.sock\");\n\n  const mode_t mode = 0777;\n  PipeInstance pipe(path, mode);\n  InstanceConstSharedPtr address = std::make_shared<PipeInstance>(pipe);\n  SocketImpl sock(Socket::Type::Stream, address);\n\n  EXPECT_TRUE(sock.ioHandle().isOpen()) << pipe.asString();\n\n  EXPECT_CALL(os_sys_calls, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0}));\n  EXPECT_CALL(os_sys_calls, chmod(_, _)).WillOnce(Return(Api::SysCallIntResult{-1, 0}));\n  EXPECT_THROW_WITH_REGEX(sock.bind(address), EnvoyException, \"Failed to create socket with mode\");\n}\n\nTEST(PipeInstanceTest, AbstractNamespacePermission) {\n#if defined(__linux__)\n  std::string path = \"@/foo\";\n  const mode_t mode = 0777;\n  EXPECT_THROW_WITH_REGEX(PipeInstance address(path, mode), EnvoyException,\n                          \"Cannot set mode for Abstract AF_UNIX sockets\");\n\n  sockaddr_un sun;\n  sun.sun_family = AF_UNIX;\n  StringUtil::strlcpy(&sun.sun_path[1], path.data(), path.size());\n  sun.sun_path[0] = '\\0';\n  socklen_t ss_len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sun.sun_path);\n\n  EXPECT_THROW_WITH_REGEX(PipeInstance address(&sun, ss_len, mode), EnvoyException,\n                          \"Cannot set mode for Abstract AF_UNIX sockets\");\n#endif\n}\n\nTEST(PipeInstanceTest, AbstractNamespace) {\n#if defined(__linux__)\n  PipeInstance address(\"@/foo\");\n  EXPECT_EQ(\"@/foo\", address.asString());\n  EXPECT_EQ(\"@/foo\", address.asStringView());\n  EXPECT_EQ(Type::Pipe, address.type());\n  EXPECT_EQ(nullptr, address.ip());\n#else\n  EXPECT_THROW(PipeInstance address(\"@/foo\"), EnvoyException);\n#endif\n}\n\nTEST(PipeInstanceTest, BadAddress) {\n  std::string long_address(1000, 'X');\n  EXPECT_THROW_WITH_REGEX(PipeInstance address(long_address), EnvoyException,\n                          \"exceeds maximum UNIX domain socket path size\");\n}\n\n// Validate that embedded nulls in abstract socket addresses are included and represented with '@'.\nTEST(PipeInstanceTest, EmbeddedNullAbstractNamespace) {\n  std::string embedded_null(\"@/foo/bar\");\n  embedded_null[5] = '\\0'; // Set embedded null.\n#if defined(__linux__)\n  PipeInstance address(embedded_null);\n  EXPECT_EQ(\"@/foo@bar\", address.asString());\n  EXPECT_EQ(\"@/foo@bar\", address.asStringView());\n  EXPECT_EQ(Type::Pipe, address.type());\n  EXPECT_EQ(nullptr, address.ip());\n#else\n  EXPECT_THROW(PipeInstance address(embedded_null), EnvoyException);\n#endif\n}\n\n// Reject embedded nulls in filesystem pathname addresses.\nTEST(PipeInstanceTest, EmbeddedNullPathError) {\n  std::string embedded_null(\"/foo/bar\");\n  embedded_null[4] = '\\0'; // Set embedded null.\n  EXPECT_THROW_WITH_REGEX(PipeInstance address(embedded_null), EnvoyException,\n                          \"contains embedded null characters\");\n}\n\nTEST(PipeInstanceTest, UnlinksExistingFile) {\n  const auto bind_uds_socket = [](const std::string& path) {\n    PipeInstance pipe(path);\n    InstanceConstSharedPtr address = std::make_shared<PipeInstance>(pipe);\n    SocketImpl sock(Socket::Type::Stream, address);\n\n    EXPECT_TRUE(sock.ioHandle().isOpen()) << pipe.asString();\n\n    const Api::SysCallIntResult result = sock.bind(address);\n\n    ASSERT_EQ(result.rc_, 0) << pipe.asString() << \"\\nerror: \" << errorDetails(result.errno_)\n                             << \"\\nerrno: \" << result.errno_;\n  };\n\n  const std::string path = TestEnvironment::unixDomainSocketPath(\"UnlinksExistingFile.sock\");\n  bind_uds_socket(path);\n  bind_uds_socket(path); // after closing, second bind to the same path should succeed.\n}\n\nTEST(AddressFromSockAddrDeathTest, IPv4) {\n  sockaddr_storage ss;\n  auto& sin = reinterpret_cast<sockaddr_in&>(ss);\n\n  sin.sin_family = AF_INET;\n  EXPECT_EQ(1, inet_pton(AF_INET, \"1.2.3.4\", &sin.sin_addr));\n  sin.sin_port = htons(6502);\n\n  EXPECT_DEATH(addressFromSockAddr(ss, 1), \"ss_len\");\n  EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) - 1), \"ss_len\");\n  EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) + 1), \"ss_len\");\n\n  EXPECT_EQ(\"1.2.3.4:6502\", addressFromSockAddr(ss, sizeof(sockaddr_in))->asString());\n\n  // Invalid family.\n  sin.sin_family = AF_UNSPEC;\n  EXPECT_THROW(addressFromSockAddr(ss, sizeof(sockaddr_in)), EnvoyException);\n}\n\nTEST(AddressFromSockAddrDeathTest, IPv6) {\n  sockaddr_storage ss;\n  auto& sin6 = reinterpret_cast<sockaddr_in6&>(ss);\n\n  sin6.sin6_family = AF_INET6;\n  EXPECT_EQ(1, inet_pton(AF_INET6, \"01:023::00Ef\", &sin6.sin6_addr));\n  sin6.sin6_port = htons(32000);\n\n  EXPECT_DEATH(addressFromSockAddr(ss, 1), \"ss_len\");\n  EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) - 1), \"ss_len\");\n  EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) + 1), \"ss_len\");\n\n  EXPECT_EQ(\"[1:23::ef]:32000\", addressFromSockAddr(ss, sizeof(sockaddr_in6))->asString());\n\n  // Test that IPv4-mapped IPv6 address is returned as an Ipv4Instance when 'v6only' parameter is\n  // 'false', but not otherwise.\n  EXPECT_EQ(1, inet_pton(AF_INET6, \"::ffff:192.0.2.128\", &sin6.sin6_addr));\n  EXPECT_EQ(IpVersion::v4, addressFromSockAddr(ss, sizeof(sockaddr_in6), false)->ip()->version());\n  EXPECT_EQ(\"192.0.2.128:32000\", addressFromSockAddr(ss, sizeof(sockaddr_in6), false)->asString());\n  EXPECT_EQ(IpVersion::v6, addressFromSockAddr(ss, sizeof(sockaddr_in6), true)->ip()->version());\n  EXPECT_EQ(\"[::ffff:192.0.2.128]:32000\",\n            addressFromSockAddr(ss, sizeof(sockaddr_in6), true)->asString());\n}\n\nTEST(AddressFromSockAddrDeathTest, Pipe) {\n  sockaddr_storage ss;\n  auto& sun = reinterpret_cast<sockaddr_un&>(ss);\n  sun.sun_family = AF_UNIX;\n\n  StringUtil::strlcpy(sun.sun_path, \"/some/path\", sizeof sun.sun_path);\n\n  EXPECT_DEATH(addressFromSockAddr(ss, 1), \"ss_len\");\n  EXPECT_DEATH(addressFromSockAddr(ss, offsetof(struct sockaddr_un, sun_path)), \"ss_len\");\n\n  socklen_t ss_len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sun.sun_path);\n  EXPECT_EQ(\"/some/path\", addressFromSockAddr(ss, ss_len)->asString());\n\n  // Abstract socket namespace.\n  StringUtil::strlcpy(&sun.sun_path[1], \"/some/abstract/path\", sizeof sun.sun_path);\n  sun.sun_path[0] = '\\0';\n  ss_len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(\"/some/abstract/path\");\n#if defined(__linux__)\n  EXPECT_EQ(\"@/some/abstract/path\", addressFromSockAddr(ss, ss_len)->asString());\n#else\n  EXPECT_THROW(addressFromSockAddr(ss, ss_len), EnvoyException);\n#endif\n}\n\n// Test comparisons between all the different (known) test classes.\nstruct TestCase {\n  enum InstanceType { Ipv4, Ipv6, Pipe, Internal };\n\n  TestCase() = default;\n  TestCase(enum InstanceType type, const std::string& address, uint32_t port)\n      : address_(address), type_(type), port_(port) {}\n  TestCase(const TestCase& rhs) = default;\n\n  bool operator==(const TestCase& rhs) {\n    return (type_ == rhs.type_ && address_ == rhs.address_ && port_ == rhs.port_);\n  }\n\n  std::string address_;\n  enum InstanceType type_ { Ipv4 };\n  uint32_t port_ = 0; // Ignored for Pipe\n};\n\nclass MixedAddressTest : public testing::TestWithParam<::testing::tuple<TestCase, TestCase>> {\npublic:\nprotected:\n  InstanceConstSharedPtr testCaseToInstance(const struct TestCase& test_case) {\n    // Catch default construction.\n    if (test_case.address_.empty()) {\n      return nullptr;\n    }\n    switch (test_case.type_) {\n    case TestCase::Ipv4:\n      return std::make_shared<Ipv4Instance>(test_case.address_, test_case.port_);\n      break;\n    case TestCase::Ipv6:\n      return std::make_shared<Ipv6Instance>(test_case.address_, test_case.port_);\n      break;\n    case TestCase::Pipe:\n      return std::make_shared<PipeInstance>(test_case.address_);\n      break;\n    case TestCase::Internal:\n      return std::make_shared<EnvoyInternalInstance>(test_case.address_);\n      break;\n    }\n    return nullptr;\n  }\n};\n\nTEST_P(MixedAddressTest, Equality) {\n  TestCase lhs_case = ::testing::get<0>(GetParam());\n  const TestCase& rhs_case = ::testing::get<1>(GetParam());\n  InstanceConstSharedPtr lhs = testCaseToInstance(lhs_case);\n  InstanceConstSharedPtr rhs = testCaseToInstance(rhs_case);\n  if (lhs_case == rhs_case) {\n    EXPECT_EQ(*lhs, *rhs) << lhs->asString() << \" != \" << rhs->asString();\n  } else {\n    EXPECT_NE(*lhs, *rhs) << lhs->asString() << \" == \" << rhs->asString();\n  }\n}\n\nstruct TestCase test_cases[] = {\n    {TestCase::Ipv4, \"1.2.3.4\", 1},          {TestCase::Ipv4, \"1.2.3.4\", 2},\n    {TestCase::Ipv4, \"1.2.3.5\", 1},          {TestCase::Ipv6, \"01:023::00ef\", 1},\n    {TestCase::Ipv6, \"01:023::00ef\", 2},     {TestCase::Ipv6, \"01:023::00ed\", 1},\n    {TestCase::Pipe, \"/path/to/pipe/1\", 0},  {TestCase::Pipe, \"/path/to/pipe/2\", 0},\n    {TestCase::Internal, \"listener_foo\", 0}, {TestCase::Internal, \"listener_bar\", 0}};\n\nINSTANTIATE_TEST_SUITE_P(AddressCrossProduct, MixedAddressTest,\n                         ::testing::Combine(::testing::ValuesIn(test_cases),\n                                            ::testing::ValuesIn(test_cases)));\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/apple_dns_impl_test.cc",
    "content": "#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/dns.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/apple_dns_impl.h\"\n#include \"common/network/filter_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/container/node_hash_map.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Contains;\nusing testing::InSequence;\nusing testing::IsSupersetOf;\nusing testing::NiceMock;\nusing testing::Not;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\n// Note: this test suite is, unfortunately, not hermetic. Apple's APIs do not allow overriding the\n// IP address used for resolution via API calls (only in system settings), and worse\n// yet does not allow overriding the port number used _at all_. Therefore, the tests do not use a\n// test DNS server like in dns_impl_test, and thus affords less flexibility in testing scenarios: no\n// concurrent requests, no expressive error responses, etc. Further experiments could be done in\n// order to create a test connection that is reachable locally (potentially by binding port 53 --\n// default for DNS). However, @junr03's initial attempts were not successful.\nclass AppleDnsImplTest : public testing::Test {\npublic:\n  AppleDnsImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  void SetUp() override { resolver_ = dispatcher_->createDnsResolver({}, false); }\n\n  ActiveDnsQuery* resolveWithExpectations(const std::string& address,\n                                          const DnsLookupFamily lookup_family,\n                                          const DnsResolver::ResolutionStatus expected_status,\n                                          const bool expected_results) {\n    return resolver_->resolve(\n        address, lookup_family,\n        [=](DnsResolver::ResolutionStatus status, std::list<DnsResponse>&& results) -> void {\n          EXPECT_EQ(expected_status, status);\n          if (expected_results) {\n            EXPECT_FALSE(results.empty());\n            for (const auto& result : results) {\n              if (lookup_family == DnsLookupFamily::V4Only) {\n                EXPECT_NE(nullptr, result.address_->ip()->ipv4());\n              } else if (lookup_family == DnsLookupFamily::V6Only) {\n                EXPECT_NE(nullptr, result.address_->ip()->ipv6());\n              }\n            }\n          }\n          dispatcher_->exit();\n        });\n  }\n\n  ActiveDnsQuery* resolveWithUnreferencedParameters(const std::string& address,\n                                                    const DnsLookupFamily lookup_family,\n                                                    bool expected_to_execute) {\n    return resolver_->resolve(address, lookup_family,\n                              [expected_to_execute](DnsResolver::ResolutionStatus status,\n                                                    std::list<DnsResponse>&& results) -> void {\n                                if (!expected_to_execute) {\n                                  FAIL();\n                                }\n                                UNREFERENCED_PARAMETER(status);\n                                UNREFERENCED_PARAMETER(results);\n                              });\n  }\n\n  template <typename T>\n  ActiveDnsQuery* resolveWithException(const std::string& address,\n                                       const DnsLookupFamily lookup_family, T exception_object) {\n    return resolver_->resolve(address, lookup_family,\n                              [exception_object](DnsResolver::ResolutionStatus status,\n                                                 std::list<DnsResponse>&& results) -> void {\n                                UNREFERENCED_PARAMETER(status);\n                                UNREFERENCED_PARAMETER(results);\n                                throw exception_object;\n                              });\n  }\n\nprotected:\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  DnsResolverSharedPtr resolver_;\n};\n\nTEST_F(AppleDnsImplTest, InvalidConfigOptions) {\n  EXPECT_DEATH(\n      dispatcher_->createDnsResolver({}, true),\n      \"using TCP for DNS lookups is not possible when using Apple APIs for DNS resolution\");\n  EXPECT_DEATH(\n      dispatcher_->createDnsResolver({nullptr}, false),\n      \"defining custom resolvers is not possible when using Apple APIs for DNS resolution\");\n}\n\n// Validate that when AppleDnsResolverImpl is destructed with outstanding requests,\n// that we don't invoke any callbacks if the query was cancelled. This is a regression test from\n// development, where segfaults were encountered due to callback invocations on\n// destruction.\nTEST_F(AppleDnsImplTest, DestructPending) {\n  ActiveDnsQuery* query = resolveWithUnreferencedParameters(\"\", DnsLookupFamily::V4Only, 0);\n  ASSERT_NE(nullptr, query);\n  query->cancel();\n}\n\nTEST_F(AppleDnsImplTest, LocalLookup) {\n  EXPECT_NE(nullptr, resolveWithExpectations(\"localhost\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success, true));\n}\n\nTEST_F(AppleDnsImplTest, DnsIpAddressVersion) {\n  EXPECT_NE(nullptr, resolveWithExpectations(\"google.com\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success, true));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"google.com\", DnsLookupFamily::V4Only,\n                                             DnsResolver::ResolutionStatus::Success, true));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"google.com\", DnsLookupFamily::V6Only,\n                                             DnsResolver::ResolutionStatus::Success, true));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_F(AppleDnsImplTest, CallbackException) {\n  EXPECT_NE(nullptr, resolveWithException<EnvoyException>(\"1.2.3.4\", DnsLookupFamily::V4Only,\n                                                          EnvoyException(\"Envoy exception\")));\n  EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException,\n                            \"Envoy exception\");\n}\n\nTEST_F(AppleDnsImplTest, CallbackException2) {\n  EXPECT_NE(nullptr, resolveWithException<std::runtime_error>(\"1.2.3.4\", DnsLookupFamily::V4Only,\n                                                              std::runtime_error(\"runtime error\")));\n  EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException,\n                            \"runtime error\");\n}\n\nTEST_F(AppleDnsImplTest, CallbackException3) {\n  EXPECT_NE(nullptr,\n            resolveWithException<std::string>(\"1.2.3.4\", DnsLookupFamily::V4Only, std::string()));\n  EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException,\n                            \"unknown\");\n}\n\n// Validate working of cancellation provided by ActiveDnsQuery return.\nTEST_F(AppleDnsImplTest, Cancel) {\n  ActiveDnsQuery* query =\n      resolveWithUnreferencedParameters(\"some.domain\", DnsLookupFamily::Auto, false);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"google.com\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success, true));\n\n  ASSERT_NE(nullptr, query);\n  query->cancel();\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_F(AppleDnsImplTest, Timeout) {\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.domain\", DnsLookupFamily::V6Only,\n                                             DnsResolver::ResolutionStatus::Failure, false));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/cidr_range_test.cc",
    "content": "#include <iostream>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/cidr_range.h\"\n#include \"common/network/utility.h\"\n\n#include \"gtest/gtest.h\"\n\n// We are adding things into the std namespace.\n// Note that this is technically undefined behavior!\nnamespace std {\n\n// Pair\ntemplate <typename First, typename Second>\nstd::ostream& operator<<(std::ostream& out, const std::pair<First, Second>& p) {\n  return out << '(' << p.first << \", \" << p.second << ')';\n}\n\n} // namespace std\n\nnamespace Envoy {\n\nnamespace Network {\nnamespace Address {\nnamespace {\n\nTEST(TruncateIpAddressAndLength, Various) {\n  std::map<std::pair<std::string, int>, std::pair<std::string, int>> test_cases = {\n      // IPv4\n      {{\"1.2.3.5\", -100}, {\"\", -1}},\n      {{\"1.2.3.5\", 0}, {\"0.0.0.0\", 0}},\n      {{\"1.2.3.5\", 1}, {\"0.0.0.0\", 1}},\n      {{\"1.2.3.5\", 7}, {\"0.0.0.0\", 7}},\n      {{\"1.2.3.5\", 8}, {\"1.0.0.0\", 8}},\n      {{\"1.2.3.5\", 14}, {\"1.0.0.0\", 14}},\n      {{\"1.2.3.5\", 15}, {\"1.2.0.0\", 15}},\n      {{\"1.2.3.5\", 22}, {\"1.2.0.0\", 22}},\n      {{\"1.2.3.5\", 23}, {\"1.2.2.0\", 23}},\n      {{\"1.2.3.5\", 24}, {\"1.2.3.0\", 24}},\n      {{\"1.2.3.5\", 29}, {\"1.2.3.0\", 29}},\n      {{\"1.2.3.5\", 30}, {\"1.2.3.4\", 30}},\n      {{\"1.2.3.5\", 31}, {\"1.2.3.4\", 31}},\n      {{\"1.2.3.5\", 32}, {\"1.2.3.5\", 32}},\n      {{\"1.2.3.5\", 33}, {\"1.2.3.5\", 32}},\n      // IPv6\n      {{\"::\", -100}, {\"\", -1}},\n      {{\"ffff::ffff\", 0}, {\"::\", 0}},\n      {{\"ffff::ffff\", 1}, {\"8000::\", 1}},\n      {{\"ffff::ffff\", 7}, {\"fe00::\", 7}},\n      {{\"ffff::ffff\", 8}, {\"ff00::\", 8}},\n      {{\"ffff::ffff\", 9}, {\"ff80::\", 9}},\n      {{\"ffff::ffff\", 10}, {\"ffc0::\", 10}},\n      {{\"ffff::ffff\", 15}, {\"fffe::\", 15}},\n      {{\"ffff::ffff\", 16}, {\"ffff::\", 16}},\n      {{\"ffff::ffff\", 17}, {\"ffff::\", 17}},\n      {{\"ffff::ffff\", 112}, {\"ffff::\", 112}},\n      {{\"ffff::ffff\", 113}, {\"ffff::8000\", 113}},\n      {{\"ffff::ffff\", 119}, {\"ffff::fe00\", 119}},\n      {{\"ffff::ffff\", 120}, {\"ffff::ff00\", 120}},\n      {{\"ffff::ffff\", 121}, {\"ffff::ff80\", 121}},\n      {{\"ffff::ffff\", 127}, {\"ffff::fffe\", 127}},\n      {{\"ffff::ffff\", 128}, {\"ffff::ffff\", 128}},\n      {{\"ffff::ffff\", 999}, {\"ffff::ffff\", 128}},\n  };\n  test_cases.size();\n  for (const auto& kv : test_cases) {\n    InstanceConstSharedPtr inPtr = Utility::parseInternetAddress(kv.first.first);\n    EXPECT_NE(inPtr, nullptr) << kv.first.first;\n    int length_io = kv.first.second;\n    InstanceConstSharedPtr outPtr = CidrRange::truncateIpAddressAndLength(inPtr, &length_io);\n    if (kv.second.second == -1) {\n      EXPECT_EQ(outPtr, nullptr) << outPtr->asString() << \"\\n\" << kv;\n      EXPECT_EQ(length_io, -1) << kv;\n    } else {\n      ASSERT_NE(outPtr, nullptr) << kv;\n      EXPECT_EQ(outPtr->ip()->addressAsString(), kv.second.first) << kv;\n      EXPECT_EQ(length_io, kv.second.second) << kv;\n    }\n  }\n}\n\nTEST(IsInRange, Various) {\n  {\n    CidrRange rng = CidrRange::create(\"foo\");\n    EXPECT_FALSE(rng.isValid());\n    EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"0.0.0.0\")));\n  }\n\n  {\n    CidrRange rng = CidrRange::create(\"10.255.255.255/0\");\n    EXPECT_TRUE(rng.isValid());\n    EXPECT_EQ(rng.asString(), \"0.0.0.0/0\");\n    EXPECT_EQ(rng.length(), 0);\n    EXPECT_EQ(rng.ip()->version(), IpVersion::v4);\n    EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"10.255.255.255\")));\n    EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"9.255.255.255\")));\n    EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"0.0.0.0\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"::\")));\n    EXPECT_FALSE(rng.isInRange(PipeInstance(\"foo\")));\n  }\n\n  {\n    CidrRange rng = CidrRange::create(\"10.255.255.255/10\");\n    EXPECT_TRUE(rng.isValid());\n    EXPECT_EQ(rng.asString(), \"10.192.0.0/10\");\n    EXPECT_EQ(rng.length(), 10);\n    EXPECT_EQ(rng.ip()->version(), IpVersion::v4);\n    EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"10.255.255.255\")));\n    EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"9.255.255.255\")));\n    EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"0.0.0.0\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"::\")));\n  }\n\n  {\n    CidrRange rng = CidrRange::create(\"::/0\");\n    EXPECT_TRUE(rng.isValid());\n    EXPECT_EQ(rng.asString(), \"::/0\");\n    EXPECT_EQ(rng.length(), 0);\n    EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"::\")));\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"::1\")));\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"2001::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"0.0.0.0\")));\n    EXPECT_FALSE(rng.isInRange(PipeInstance(\"foo\")));\n  }\n\n  {\n    CidrRange rng = CidrRange::create(\"::1/128\");\n    EXPECT_TRUE(rng.isValid());\n    EXPECT_EQ(rng.asString(), \"::1/128\");\n    EXPECT_EQ(rng.length(), 128);\n    EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"::1\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"0.0.0.0\")));\n  }\n\n  {\n    CidrRange rng = CidrRange::create(\"2001:abcd:ef01:2345:6789:abcd:ef01:234/64\");\n    EXPECT_TRUE(rng.isValid());\n    EXPECT_EQ(rng.asString(), \"2001:abcd:ef01:2345::/64\");\n    EXPECT_EQ(rng.length(), 64);\n    EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"2001:abcd:ef01:2345::1\")));\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"2001:abcd:ef01:2345::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001:abcd::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001:abcd:ef01:2340::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2002::\")));\n  }\n\n  {\n    CidrRange rng = CidrRange::create(\"2001:abcd:ef01:2345:6789:abcd:ef01:234/60\");\n    EXPECT_TRUE(rng.isValid());\n    EXPECT_EQ(rng.asString(), \"2001:abcd:ef01:2340::/60\");\n    EXPECT_EQ(rng.length(), 60);\n    EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"2001:abcd:ef01:2345::\")));\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"2001:abcd:ef01:2340::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001:abcd:ef01:2330::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001:abcd::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001:abcd:ef00::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001::\")));\n    EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2002::\")));\n  }\n}\n\nTEST(CidrRangeTest, OperatorIsEqual) {\n  {\n    CidrRange rng1 = CidrRange::create(\"192.0.0.0/8\");\n    CidrRange rng2 = CidrRange::create(\"192.168.0.0/16\");\n    EXPECT_FALSE(rng1 == rng2);\n  }\n\n  {\n    CidrRange rng1 = CidrRange::create(\"192.0.0.0/8\");\n    CidrRange rng2 = CidrRange::create(\"192.168.0.0/8\");\n    EXPECT_TRUE(rng1 == rng2);\n  }\n\n  {\n    CidrRange rng1 = CidrRange::create(\"192.0.0.0/8\");\n    CidrRange rng2 = CidrRange::create(\"2001::/8\");\n    EXPECT_FALSE(rng1 == rng2);\n  }\n\n  {\n    CidrRange rng1 = CidrRange::create(\"2002::/16\");\n    CidrRange rng2 = CidrRange::create(\"2001::/16\");\n    EXPECT_FALSE(rng1 == rng2);\n  }\n\n  {\n    CidrRange rng1 = CidrRange::create(\"2002::/16\");\n    CidrRange rng2 = CidrRange::create(\"192.168.0.1/16\");\n    EXPECT_FALSE(rng1 == rng2);\n  }\n\n  {\n    CidrRange rng1 = CidrRange::create(\"2002::/16\");\n    CidrRange rng2 = CidrRange::create(\"2002::1/16\");\n    EXPECT_TRUE(rng1 == rng2);\n  }\n}\n\nTEST(CidrRangeTest, InvalidCidrRange) {\n  CidrRange rng1 = CidrRange::create(\"foo\");\n  EXPECT_EQ(nullptr, rng1.ip());\n  EXPECT_EQ(\"/-1\", rng1.asString());\n  // Not equal due to invalid CidrRange.\n  EXPECT_FALSE(rng1 == rng1);\n\n  CidrRange rng2 = CidrRange::create(\"192.0.0.0/8\");\n  EXPECT_FALSE(rng1 == rng2);\n}\n\nTEST(Ipv4CidrRangeTest, InstanceConstSharedPtrAndLengthCtor) {\n  InstanceConstSharedPtr ptr = Utility::parseInternetAddress(\"1.2.3.5\");\n  CidrRange rng(CidrRange::create(ptr, 31)); // Copy ctor.\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.length(), 31);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v4);\n  EXPECT_EQ(rng.asString(), \"1.2.3.4/31\");\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"1.2.3.3\")));\n  EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"1.2.3.4\")));\n  EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"1.2.3.5\")));\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"1.2.3.6\")));\n\n  CidrRange rng2(CidrRange::create(ptr, -1)); // Invalid length.\n  EXPECT_FALSE(rng2.isValid());\n\n  ptr.reset();\n  CidrRange rng3(CidrRange::create(ptr, 10)); // Invalid address.\n  EXPECT_FALSE(rng3.isValid());\n}\n\nTEST(Ipv4CidrRangeTest, StringAndLengthCtor) {\n  CidrRange rng;\n  rng = CidrRange::create(\"1.2.3.4\", 31); // Assignment operator.\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.asString(), \"1.2.3.4/31\");\n  EXPECT_EQ(rng.length(), 31);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v4);\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"1.2.3.3\")));\n  EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"1.2.3.4\")));\n  EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"1.2.3.5\")));\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"1.2.3.6\")));\n\n  rng = CidrRange::create(\"1.2.3.4\", -10); // Invalid length.\n  EXPECT_FALSE(rng.isValid());\n\n  EXPECT_THROW(CidrRange::create(\"bogus\", 31), EnvoyException); // Invalid address.\n}\n\nTEST(Ipv4CidrRangeTest, StringCtor) {\n  CidrRange rng = CidrRange::create(\"1.2.3.4/31\");\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.asString(), \"1.2.3.4/31\");\n  EXPECT_EQ(rng.length(), 31);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v4);\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"1.2.3.3\")));\n  EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"1.2.3.4\")));\n  EXPECT_TRUE(rng.isInRange(Ipv4Instance(\"1.2.3.5\")));\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"1.2.3.6\")));\n\n  CidrRange rng2 = CidrRange::create(\"1.2.3.4/-10\"); // Invalid length.\n  EXPECT_FALSE(rng2.isValid());\n\n  EXPECT_THROW(CidrRange::create(\"bogus/31\"), EnvoyException); // Invalid address.\n\n  CidrRange rng4 = CidrRange::create(\"/31\"); // Missing address.\n  EXPECT_FALSE(rng4.isValid());\n\n  CidrRange rng5 = CidrRange::create(\"1.2.3.4/\"); // Missing length.\n  EXPECT_FALSE(rng5.isValid());\n}\n\nTEST(Ipv4CidrRangeTest, BigRange) {\n  CidrRange rng = CidrRange::create(\"10.255.255.255/8\");\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.asString(), \"10.0.0.0/8\");\n  EXPECT_EQ(rng.length(), 8);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v4);\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"9.255.255.255\")));\n  std::string addr;\n  for (int i = 0; i < 256; ++i) {\n    addr = fmt::format(\"10.{}.0.1\", i);\n    EXPECT_TRUE(rng.isInRange(Ipv4Instance(addr))) << addr;\n    addr = fmt::format(\"10.{}.255.255\", i);\n    EXPECT_TRUE(rng.isInRange(Ipv4Instance(addr))) << addr;\n  }\n  EXPECT_FALSE(rng.isInRange(Ipv4Instance(\"11.0.0.0\")));\n}\n\nTEST(Ipv6CidrRange, InstanceConstSharedPtrAndLengthCtor) {\n  InstanceConstSharedPtr ptr = Utility::parseInternetAddress(\"abcd::0345\");\n  CidrRange rng(CidrRange::create(ptr, 127)); // Copy ctor.\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.length(), 127);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n  EXPECT_EQ(rng.asString(), \"abcd::344/127\");\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"abcd::343\")));\n  EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"abcd::344\")));\n  EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"abcd::345\")));\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"abcd::346\")));\n\n  CidrRange rng2(CidrRange::create(ptr, -1)); // Invalid length.\n  EXPECT_FALSE(rng2.isValid());\n\n  ptr.reset();\n  CidrRange rng3(CidrRange::create(ptr, 127)); // Invalid address.\n  EXPECT_FALSE(rng3.isValid());\n}\n\nTEST(Ipv6CidrRange, StringAndLengthCtor) {\n  CidrRange rng;\n  rng = CidrRange::create(\"ff::ffff\", 122); // Assignment operator.\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.asString(), \"ff::ffc0/122\");\n  EXPECT_EQ(rng.length(), 122);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"ff::ffbf\")));\n  EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"ff::ffc0\")));\n  EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"ff::ffff\")));\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"::1:0\")));\n\n  rng = CidrRange::create(\"::ffff\", -2); // Invalid length.\n  EXPECT_FALSE(rng.isValid());\n\n  EXPECT_THROW(CidrRange::create(\"bogus\", 122), EnvoyException); // Invalid address.\n}\n\nTEST(Ipv6CidrRange, StringCtor) {\n  CidrRange rng = CidrRange::create(\"ff::fc1f/118\");\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.asString(), \"ff::fc00/118\");\n  EXPECT_EQ(rng.length(), 118);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"ff::fbff\")));\n  EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"ff::fc00\")));\n  EXPECT_TRUE(rng.isInRange(Ipv6Instance(\"ff::ffff\")));\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"::1:00\")));\n\n  CidrRange rng2 = CidrRange::create(\"::fc1f/-10\"); // Invalid length.\n  EXPECT_FALSE(rng2.isValid());\n\n  EXPECT_THROW(CidrRange::create(\"::fc1f00/118\"), EnvoyException); // Invalid address.\n\n  CidrRange rng4 = CidrRange::create(\"/118\"); // Missing address.\n  EXPECT_FALSE(rng4.isValid());\n\n  CidrRange rng5 = CidrRange::create(\"::fc1f/\"); // Missing length.\n  EXPECT_FALSE(rng5.isValid());\n}\n\nTEST(Ipv6CidrRange, BigRange) {\n  std::string prefix = \"2001:0db8:85a3:0000\";\n  CidrRange rng = CidrRange::create(prefix + \"::/64\");\n  EXPECT_TRUE(rng.isValid());\n  EXPECT_EQ(rng.asString(), \"2001:db8:85a3::/64\");\n  EXPECT_EQ(rng.length(), 64);\n  EXPECT_EQ(rng.ip()->version(), IpVersion::v6);\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001:0db8:85a2:ffff:ffff:ffff:ffff:ffff\")));\n  std::string addr;\n  for (char c : std::string(\"0123456789abcdef\")) {\n    addr = fmt::format(\"{}:000{}::\", prefix, std::string(1, c));\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(addr))) << addr << \" not in \" << rng.asString();\n    addr = fmt::format(\"{}:fff{}:ffff:ffff:ffff\", prefix, std::string(1, c));\n    EXPECT_TRUE(rng.isInRange(Ipv6Instance(addr))) << addr << \" not in \" << rng.asString();\n  }\n  EXPECT_FALSE(rng.isInRange(Ipv6Instance(\"2001:0db8:85a4::\")));\n}\n\nProtobuf::RepeatedPtrField<envoy::config::core::v3::CidrRange>\nmakeCidrRangeList(const std::vector<std::pair<std::string, uint32_t>>& ranges) {\n  Protobuf::RepeatedPtrField<envoy::config::core::v3::CidrRange> ret;\n  for (auto& range : ranges) {\n    auto new_element = ret.Add();\n    new_element->set_address_prefix(range.first);\n    new_element->mutable_prefix_len()->set_value(range.second);\n  }\n  return ret;\n}\n\nTEST(IpListTest, Errors) {\n  {\n    EXPECT_THROW({ IpList list(makeCidrRangeList({{\"foo\", 0}})); }, EnvoyException);\n  }\n}\n\nTEST(IpListTest, SpecificAddressAllowed) {\n  IpList list(makeCidrRangeList({{\"192.168.1.1\", 24}}));\n\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.1.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.1.3\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.1.255\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"192.168.3.0\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"192.168.0.0\")));\n}\n\nTEST(IpListTest, Normal) {\n  IpList list(makeCidrRangeList({{\"192.168.3.0\", 24}, {\"50.1.2.3\", 32}, {\"10.15.0.0\", 16}}));\n\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.3\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.255\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"192.168.2.255\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"192.168.4.0\")));\n\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"50.1.2.3\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"50.1.2.2\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"50.1.2.4\")));\n\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"10.15.0.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"10.15.90.90\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"10.15.255.255\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"10.14.255.255\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"10.16.0.0\")));\n\n  EXPECT_FALSE(list.contains(Address::Ipv6Instance(\"::1\")));\n  EXPECT_FALSE(list.contains(Address::PipeInstance(\"foo\")));\n}\n\nTEST(IpListTest, AddressVersionMix) {\n  IpList list(makeCidrRangeList({{\"192.168.3.0\", 24}, {\"2001:db8:85a3::\", 64}, {\"::1\", 128}}));\n\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.3\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.255\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"192.168.2.255\")));\n  EXPECT_FALSE(list.contains(Address::Ipv4Instance(\"192.168.4.0\")));\n\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"2001:db8:85a3::\")));\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"2001:db8:85a3:0:1::\")));\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"2001:db8:85a3::ffff:ffff:ffff:ffff\")));\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"2001:db8:85a3::ffff\")));\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"2001:db8:85a3::1\")));\n  EXPECT_FALSE(list.contains(Address::Ipv6Instance(\"2001:db8:85a3:1::\")));\n  EXPECT_FALSE(list.contains(Address::Ipv6Instance(\"2002:db8:85a3::\")));\n\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"::1\")));\n  EXPECT_FALSE(list.contains(Address::Ipv6Instance(\"::\")));\n\n  EXPECT_FALSE(list.contains(Address::PipeInstance(\"foo\")));\n}\n\nTEST(IpListTest, MatchAny) {\n  IpList list(makeCidrRangeList({{\"0.0.0.0\", 0}}));\n\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.3\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.255\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.0.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.0.0.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"1.1.1.1\")));\n\n  EXPECT_FALSE(list.contains(Address::Ipv6Instance(\"::1\")));\n  EXPECT_FALSE(list.contains(Address::PipeInstance(\"foo\")));\n}\n\nTEST(IpListTest, MatchAnyAll) {\n  IpList list(makeCidrRangeList({{\"0.0.0.0\", 0}, {\"::\", 0}}));\n\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.3\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.3.255\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.168.0.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"192.0.0.0\")));\n  EXPECT_TRUE(list.contains(Address::Ipv4Instance(\"1.1.1.1\")));\n\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"::1\")));\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"::\")));\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"2001:db8:85a3::\")));\n  EXPECT_TRUE(list.contains(Address::Ipv6Instance(\"ffee::\")));\n\n  EXPECT_FALSE(list.contains(Address::PipeInstance(\"foo\")));\n}\n\n} // namespace\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/connection_impl_test.cc",
    "content": "#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/fmt.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/connection_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::DoAll;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\nusing testing::SaveArg;\nusing testing::Sequence;\nusing testing::StrictMock;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nTEST(RawBufferSocket, TestBasics) {\n  TransportSocketPtr raw_buffer_socket(Network::Test::createRawBufferSocket());\n  EXPECT_FALSE(raw_buffer_socket->ssl());\n  EXPECT_TRUE(raw_buffer_socket->canFlushClose());\n  EXPECT_EQ(\"\", raw_buffer_socket->protocol());\n}\n\nTEST(ConnectionImplUtility, updateBufferStats) {\n  StrictMock<Stats::MockCounter> counter;\n  StrictMock<Stats::MockGauge> gauge;\n  uint64_t previous_total = 0;\n\n  InSequence s;\n  EXPECT_CALL(counter, add(5));\n  EXPECT_CALL(gauge, add(5));\n  ConnectionImplUtility::updateBufferStats(5, 5, previous_total, counter, gauge);\n  EXPECT_EQ(5UL, previous_total);\n\n  EXPECT_CALL(counter, add(1));\n  EXPECT_CALL(gauge, sub(1));\n  ConnectionImplUtility::updateBufferStats(1, 4, previous_total, counter, gauge);\n\n  EXPECT_CALL(gauge, sub(4));\n  ConnectionImplUtility::updateBufferStats(0, 0, previous_total, counter, gauge);\n\n  EXPECT_CALL(counter, add(3));\n  EXPECT_CALL(gauge, add(3));\n  ConnectionImplUtility::updateBufferStats(3, 3, previous_total, counter, gauge);\n}\n\nTEST(ConnectionImplBaseUtility, addIdToHashKey) {\n  uint64_t connection_id = 0x0123456789abcdef;\n  std::vector<uint8_t> hash{{0xff, 0xfe, 0xfd, 0xfc}};\n  ConnectionImplBase::addIdToHashKey(hash, connection_id);\n  ASSERT_EQ(12, hash.size());\n  EXPECT_EQ(0xff, hash[0]);\n  EXPECT_EQ(0xfe, hash[1]);\n  EXPECT_EQ(0xfd, hash[2]);\n  EXPECT_EQ(0xfc, hash[3]);\n  EXPECT_EQ(0xef, hash[4]);\n  EXPECT_EQ(0xcd, hash[5]);\n  EXPECT_EQ(0xab, hash[6]);\n  EXPECT_EQ(0x89, hash[7]);\n  EXPECT_EQ(0x67, hash[8]);\n  EXPECT_EQ(0x45, hash[9]);\n  EXPECT_EQ(0x23, hash[10]);\n  EXPECT_EQ(0x01, hash[11]);\n}\n\nclass ConnectionImplDeathTest : public testing::TestWithParam<Address::IpVersion> {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionImplDeathTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ConnectionImplDeathTest, BadFd) {\n  Api::ApiPtr api = Api::createApiForTest();\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>();\n  StreamInfo::StreamInfoImpl stream_info(dispatcher->timeSource());\n  EXPECT_DEATH(\n      ConnectionImpl(*dispatcher,\n                     std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n                     Network::Test::createRawBufferSocket(), stream_info, false),\n      \".*assert failure: SOCKET_VALID\\\\(fd\\\\)\");\n}\n\nclass TestClientConnectionImpl : public Network::ClientConnectionImpl {\npublic:\n  using ClientConnectionImpl::ClientConnectionImpl;\n  Buffer::WatermarkBuffer& readBuffer() { return read_buffer_; }\n};\n\nclass ConnectionImplTest : public testing::TestWithParam<Address::IpVersion> {\nprotected:\n  ConnectionImplTest() : api_(Api::createApiForTest(time_system_)), stream_info_(time_system_) {}\n\n  void setUpBasicConnection() {\n    if (dispatcher_.get() == nullptr) {\n      dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n    }\n    socket_ = std::make_shared<Network::TcpListenSocket>(\n        Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n    listener_ =\n        dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE);\n#if defined(__clang__) && defined(__has_feature) && __has_feature(address_sanitizer)\n    // There is a bug in clang with AddressSanitizer on the CI such that the code below reports:\n    //\n    //   runtime error: constructor call on address 0x6190000b4a80 with insufficient space for\n    //   an object of type 'Envoy::Network::(anonymous namespace)::TestClientConnectionImpl'\n    //   0x6190000b4a80: note: pointer points here\n    //   05 01 80 39  be be be be be be be be  be be be be be be be be  be be be be be be be be\n    //   be be be be\n    //\n    // However, the workaround below trips gcc on the CI, which reports:\n    //\n    //   size check failed 2304 1280 38\n    //   CorrectSize(p, size, tcmalloc::DefaultAlignPolicy())\n    //\n    // so we only use it for clang with AddressSanitizer builds.\n    auto x = malloc(sizeof(TestClientConnectionImpl) + 1024);\n    new (x) TestClientConnectionImpl(*dispatcher_, socket_->localAddress(), source_address_,\n                                     Network::Test::createRawBufferSocket(), socket_options_);\n    client_connection_.reset(reinterpret_cast<TestClientConnectionImpl*>(x));\n#else\n    client_connection_ = std::make_unique<Network::TestClientConnectionImpl>(\n        *dispatcher_, socket_->localAddress(), source_address_,\n        Network::Test::createRawBufferSocket(), socket_options_);\n#endif\n    client_connection_->addConnectionCallbacks(client_callbacks_);\n    EXPECT_EQ(nullptr, client_connection_->ssl());\n    const Network::ClientConnection& const_connection = *client_connection_;\n    EXPECT_EQ(nullptr, const_connection.ssl());\n    EXPECT_FALSE(client_connection_->localAddressRestored());\n  }\n\n  void connect() {\n    int expected_callbacks = 2;\n    client_connection_->connect();\n    read_filter_ = std::make_shared<NiceMock<MockReadFilter>>();\n    EXPECT_CALL(listener_callbacks_, onAccept_(_))\n        .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n          server_connection_ = dispatcher_->createServerConnection(\n              std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n          server_connection_->addConnectionCallbacks(server_callbacks_);\n          server_connection_->addReadFilter(read_filter_);\n\n          expected_callbacks--;\n          if (expected_callbacks == 0) {\n            dispatcher_->exit();\n          }\n        }));\n    EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n          expected_callbacks--;\n          if (expected_callbacks == 0) {\n            dispatcher_->exit();\n          }\n        }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void disconnect(bool wait_for_remote_close) {\n    if (client_write_buffer_) {\n      EXPECT_CALL(*client_write_buffer_, drain(_))\n          .Times(AnyNumber())\n          .WillRepeatedly(\n              Invoke([&](uint64_t size) -> void { client_write_buffer_->baseDrain(size); }));\n    }\n    EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose));\n    client_connection_->close(ConnectionCloseType::NoFlush);\n    if (wait_for_remote_close) {\n      EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n          .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n      dispatcher_->run(Event::Dispatcher::RunType::Block);\n    } else {\n      dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n    }\n  }\n\n  void useMockBuffer() {\n    // This needs to be called before the dispatcher is created.\n    ASSERT(dispatcher_.get() == nullptr);\n\n    MockBufferFactory* factory = new StrictMock<MockBufferFactory>;\n    dispatcher_ = api_->allocateDispatcher(\"test_thread\", Buffer::WatermarkFactoryPtr{factory});\n    // The first call to create a client session will get a MockBuffer.\n    // Other calls for server sessions will by default get a normal OwnedImpl.\n    EXPECT_CALL(*factory, create_(_, _, _))\n        .Times(AnyNumber())\n        .WillOnce(Invoke([&](std::function<void()> below_low, std::function<void()> above_high,\n                             std::function<void()> above_overflow) -> Buffer::Instance* {\n          client_write_buffer_ = new MockWatermarkBuffer(below_low, above_high, above_overflow);\n          return client_write_buffer_;\n        }))\n        .WillRepeatedly(Invoke([](std::function<void()> below_low, std::function<void()> above_high,\n                                  std::function<void()> above_overflow) -> Buffer::Instance* {\n          return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow);\n        }));\n  }\n\nprotected:\n  struct ConnectionMocks {\n    std::unique_ptr<NiceMock<Event::MockDispatcher>> dispatcher_;\n    Event::MockTimer* timer_;\n    std::unique_ptr<NiceMock<MockTransportSocket>> transport_socket_;\n    NiceMock<Event::MockFileEvent>* file_event_;\n    Event::FileReadyCb* file_ready_cb_;\n  };\n\n  ConnectionMocks createConnectionMocks(bool create_timer = true) {\n    auto dispatcher = std::make_unique<NiceMock<Event::MockDispatcher>>();\n    EXPECT_CALL(dispatcher->buffer_factory_, create_(_, _, _))\n        .WillRepeatedly(Invoke([](std::function<void()> below_low, std::function<void()> above_high,\n                                  std::function<void()> above_overflow) -> Buffer::Instance* {\n          // ConnectionImpl calls Envoy::MockBufferFactory::create(), which calls create_() and\n          // wraps the returned raw pointer below with a unique_ptr.\n          return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow);\n        }));\n\n    Event::MockTimer* timer = nullptr;\n    if (create_timer) {\n      // This timer will be returned (transferring ownership) to the ConnectionImpl when\n      // createTimer() is called to allocate the delayed close timer.\n      timer = new Event::MockTimer(dispatcher.get());\n    }\n\n    NiceMock<Event::MockFileEvent>* file_event = new NiceMock<Event::MockFileEvent>;\n    EXPECT_CALL(*dispatcher, createFileEvent_(0, _, _, _))\n        .WillOnce(DoAll(SaveArg<1>(&file_ready_cb_), Return(file_event)));\n\n    auto transport_socket = std::make_unique<NiceMock<MockTransportSocket>>();\n    EXPECT_CALL(*transport_socket, canFlushClose()).WillRepeatedly(Return(true));\n\n    return ConnectionMocks{std::move(dispatcher), timer, std::move(transport_socket), file_event,\n                           &file_ready_cb_};\n  }\n  Network::TestClientConnectionImpl* testClientConnection() {\n    return dynamic_cast<Network::TestClientConnectionImpl*>(client_connection_.get());\n  }\n\n  Event::FileReadyCb file_ready_cb_;\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  std::shared_ptr<Network::TcpListenSocket> socket_{nullptr};\n  Network::MockTcpListenerCallbacks listener_callbacks_;\n  Network::MockConnectionHandler connection_handler_;\n  Network::ListenerPtr listener_;\n  Network::ClientConnectionPtr client_connection_;\n  StrictMock<MockConnectionCallbacks> client_callbacks_;\n  Network::ConnectionPtr server_connection_;\n  StrictMock<Network::MockConnectionCallbacks> server_callbacks_;\n  std::shared_ptr<MockReadFilter> read_filter_;\n  MockWatermarkBuffer* client_write_buffer_ = nullptr;\n  Address::InstanceConstSharedPtr source_address_;\n  Socket::OptionsSharedPtr socket_options_;\n  StreamInfo::StreamInfoImpl stream_info_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionImplTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ConnectionImplTest, UniqueId) {\n  setUpBasicConnection();\n  disconnect(false);\n  uint64_t first_id = client_connection_->id();\n  setUpBasicConnection();\n  EXPECT_NE(first_id, client_connection_->id());\n  disconnect(false);\n}\n\nTEST_P(ConnectionImplTest, CloseDuringConnectCallback) {\n  setUpBasicConnection();\n\n  Buffer::OwnedImpl buffer(\"hello world\");\n  client_connection_->write(buffer, false);\n  client_connection_->connect();\n\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        client_connection_->close(ConnectionCloseType::NoFlush);\n      }));\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose));\n\n  read_filter_ = std::make_shared<NiceMock<MockReadFilter>>();\n\n  EXPECT_CALL(listener_callbacks_, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection_ = dispatcher_->createServerConnection(\n            std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n        server_connection_->addConnectionCallbacks(server_callbacks_);\n        server_connection_->addReadFilter(read_filter_);\n      }));\n\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(ConnectionImplTest, ImmediateConnectError) {\n  dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n\n  // Using a broadcast/multicast address as the connection destinations address causes an\n  // immediate error return from connect().\n  Address::InstanceConstSharedPtr broadcast_address;\n  socket_ = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  if (socket_->localAddress()->ip()->version() == Address::IpVersion::v4) {\n    broadcast_address = std::make_shared<Address::Ipv4Instance>(\"224.0.0.1\", 0);\n  } else {\n    broadcast_address = std::make_shared<Address::Ipv6Instance>(\"ff02::1\", 0);\n  }\n\n  client_connection_ = dispatcher_->createClientConnection(\n      broadcast_address, source_address_, Network::Test::createRawBufferSocket(), nullptr);\n  client_connection_->addConnectionCallbacks(client_callbacks_);\n  client_connection_->connect();\n\n  // Verify that also the immediate connect errors generate a remote close event.\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(ConnectionImplTest, SocketOptions) {\n  Network::ClientConnectionPtr upstream_connection_;\n\n  setUpBasicConnection();\n\n  Buffer::OwnedImpl buffer(\"hello world\");\n  client_connection_->write(buffer, false);\n  client_connection_->connect();\n\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        client_connection_->close(ConnectionCloseType::NoFlush);\n      }));\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose));\n\n  read_filter_ = std::make_shared<NiceMock<MockReadFilter>>();\n\n  auto option = std::make_shared<MockSocketOption>();\n\n  EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_PREBIND))\n      .WillOnce(Return(true));\n  EXPECT_CALL(listener_callbacks_, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        socket->addOption(option);\n        server_connection_ = dispatcher_->createServerConnection(\n            std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n        server_connection_->addConnectionCallbacks(server_callbacks_);\n        server_connection_->addReadFilter(read_filter_);\n\n        upstream_connection_ = dispatcher_->createClientConnection(\n            socket_->localAddress(), source_address_, Network::Test::createRawBufferSocket(),\n            server_connection_->socketOptions());\n      }));\n\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        upstream_connection_->close(ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  // Assert that upstream connection gets the socket options\n  ASSERT(upstream_connection_ != nullptr);\n  ASSERT(upstream_connection_->socketOptions() != nullptr);\n  ASSERT(upstream_connection_->socketOptions()->front() == option);\n}\n\nTEST_P(ConnectionImplTest, SocketOptionsFailureTest) {\n  Network::ClientConnectionPtr upstream_connection_;\n  StrictMock<Network::MockConnectionCallbacks> upstream_callbacks_;\n\n  setUpBasicConnection();\n\n  Buffer::OwnedImpl buffer(\"hello world\");\n  client_connection_->write(buffer, false);\n  client_connection_->connect();\n\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        client_connection_->close(ConnectionCloseType::NoFlush);\n      }));\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose));\n\n  read_filter_ = std::make_shared<NiceMock<MockReadFilter>>();\n\n  auto option = std::make_shared<MockSocketOption>();\n\n  EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_PREBIND))\n      .WillOnce(Return(false));\n  EXPECT_CALL(listener_callbacks_, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        socket->addOption(option);\n        server_connection_ = dispatcher_->createServerConnection(\n            std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n        server_connection_->addConnectionCallbacks(server_callbacks_);\n        server_connection_->addReadFilter(read_filter_);\n\n        upstream_connection_ = dispatcher_->createClientConnection(\n            socket_->localAddress(), source_address_, Network::Test::createRawBufferSocket(),\n            server_connection_->socketOptions());\n        upstream_connection_->addConnectionCallbacks(upstream_callbacks_);\n      }));\n\n  EXPECT_CALL(upstream_callbacks_, onEvent(ConnectionEvent::LocalClose));\n\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        upstream_connection_->close(ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nstruct MockConnectionStats {\n  Connection::ConnectionStats toBufferStats() {\n    return {rx_total_,   rx_current_,   tx_total_,\n            tx_current_, &bind_errors_, &delayed_close_timeouts_};\n  }\n\n  StrictMock<Stats::MockCounter> rx_total_;\n  StrictMock<Stats::MockGauge> rx_current_;\n  StrictMock<Stats::MockCounter> tx_total_;\n  StrictMock<Stats::MockGauge> tx_current_;\n  StrictMock<Stats::MockCounter> bind_errors_;\n  StrictMock<Stats::MockCounter> delayed_close_timeouts_;\n};\n\nstruct NiceMockConnectionStats {\n  Connection::ConnectionStats toBufferStats() {\n    return {rx_total_,   rx_current_,   tx_total_,\n            tx_current_, &bind_errors_, &delayed_close_timeouts_};\n  }\n\n  NiceMock<Stats::MockCounter> rx_total_;\n  NiceMock<Stats::MockGauge> rx_current_;\n  NiceMock<Stats::MockCounter> tx_total_;\n  NiceMock<Stats::MockGauge> tx_current_;\n  NiceMock<Stats::MockCounter> bind_errors_;\n  NiceMock<Stats::MockCounter> delayed_close_timeouts_;\n};\n\nTEST_P(ConnectionImplTest, ConnectionStats) {\n  setUpBasicConnection();\n\n  MockConnectionStats client_connection_stats;\n  client_connection_->setConnectionStats(client_connection_stats.toBufferStats());\n  client_connection_->connect();\n\n  std::shared_ptr<MockWriteFilter> write_filter(new MockWriteFilter());\n  std::shared_ptr<MockFilter> filter(new MockFilter());\n  client_connection_->addFilter(filter);\n  client_connection_->addWriteFilter(write_filter);\n\n  Sequence s1;\n  EXPECT_CALL(*write_filter, onWrite(_, _))\n      .InSequence(s1)\n      .WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_CALL(*write_filter, onWrite(_, _)).InSequence(s1).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, onWrite(_, _)).InSequence(s1).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)).InSequence(s1);\n  EXPECT_CALL(client_connection_stats.tx_total_, add(4)).InSequence(s1);\n\n  read_filter_ = std::make_shared<NiceMock<MockReadFilter>>();\n  MockConnectionStats server_connection_stats;\n  EXPECT_CALL(listener_callbacks_, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection_ = dispatcher_->createServerConnection(\n            std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n        server_connection_->addConnectionCallbacks(server_callbacks_);\n        server_connection_->setConnectionStats(server_connection_stats.toBufferStats());\n        server_connection_->addReadFilter(read_filter_);\n        EXPECT_EQ(\"\", server_connection_->nextProtocol());\n      }));\n\n  Sequence s2;\n  EXPECT_CALL(server_connection_stats.rx_total_, add(4)).InSequence(s2);\n  EXPECT_CALL(server_connection_stats.rx_current_, add(4)).InSequence(s2);\n  EXPECT_CALL(server_connection_stats.rx_current_, sub(4)).InSequence(s2);\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)).InSequence(s2);\n\n  EXPECT_CALL(*read_filter_, onNewConnection());\n  EXPECT_CALL(*read_filter_, onData(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterStatus {\n        data.drain(data.length());\n        server_connection_->close(ConnectionCloseType::FlushWrite);\n        return FilterStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n  Buffer::OwnedImpl data(\"1234\");\n  client_connection_->write(data, false);\n  client_connection_->write(data, false);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Ensure the new counter logic in ReadDisable avoids tripping asserts in ReadDisable guarding\n// against actual enabling twice in a row.\nTEST_P(ConnectionImplTest, ReadDisable) {\n  ConnectionMocks mocks = createConnectionMocks(false);\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(0);\n  auto connection = std::make_unique<Network::ConnectionImpl>(\n      *mocks.dispatcher_,\n      std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n      std::move(mocks.transport_socket_), stream_info_, true);\n\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_));\n  connection->readDisable(true);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_));\n  connection->readDisable(false);\n\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_));\n  connection->readDisable(true);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0);\n  connection->readDisable(true);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0);\n  connection->readDisable(false);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_));\n  connection->readDisable(false);\n\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_));\n  connection->readDisable(true);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0);\n  connection->readDisable(true);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0);\n  connection->readDisable(false);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0);\n  connection->readDisable(true);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0);\n  connection->readDisable(false);\n  EXPECT_CALL(*mocks.file_event_, setEnabled(_));\n  connection->readDisable(false);\n\n  connection->close(ConnectionCloseType::NoFlush);\n}\n\n// The HTTP/1 codec handles pipelined connections by relying on readDisable(false) resulting in the\n// subsequent request being dispatched. Regression test this behavior.\nTEST_P(ConnectionImplTest, ReadEnableDispatches) {\n  setUpBasicConnection();\n  connect();\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n\n  {\n    Buffer::OwnedImpl buffer(\"data\");\n    server_connection_->write(buffer, false);\n    EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"data\"), false))\n        .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus {\n          dispatcher_->exit();\n          return FilterStatus::StopIteration;\n        }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  {\n    client_connection_->readDisable(true);\n    EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"data\"), false))\n        .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> FilterStatus {\n          buffer.drain(buffer.length());\n          dispatcher_->exit();\n          return FilterStatus::StopIteration;\n        }));\n    client_connection_->readDisable(false);\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  disconnect(true);\n}\n\n// Make sure if we readDisable(true) and schedule a 'kick' and then\n// readDisable(false) the kick doesn't happen.\nTEST_P(ConnectionImplTest, KickUndone) {\n  setUpBasicConnection();\n  connect();\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n  Buffer::Instance* connection_buffer = nullptr;\n\n  {\n    Buffer::OwnedImpl buffer(\"data\");\n    server_connection_->write(buffer, false);\n    EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"data\"), false))\n        .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> FilterStatus {\n          dispatcher_->exit();\n          connection_buffer = &buffer;\n          return FilterStatus::StopIteration;\n        }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  {\n    // Like ReadEnableDispatches above, read disable and read enable to kick off\n    // an extra read. But then readDisable again and make sure the kick doesn't\n    // happen.\n    client_connection_->readDisable(true);\n    client_connection_->readDisable(false); // Sets dispatch_buffered_data_\n    client_connection_->readDisable(true);\n    EXPECT_CALL(*client_read_filter, onData(_, _)).Times(0);\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  // Now drain the connection's buffer and try to do a read which should _not_\n  // pass up the stack (no data is read)\n  {\n    connection_buffer->drain(connection_buffer->length());\n    client_connection_->readDisable(false);\n    EXPECT_CALL(*client_read_filter, onData(_, _)).Times(0);\n    // Data no longer buffered - even if dispatch_buffered_data_ lingered it should have no effect.\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  disconnect(true);\n}\n\n// Ensure that calls to readDisable on a closed connection are handled gracefully. Known past issues\n// include a crash on https://github.com/envoyproxy/envoy/issues/3639, and ASSERT failure followed\n// by infinite loop in https://github.com/envoyproxy/envoy/issues/9508\nTEST_P(ConnectionImplTest, ReadDisableAfterCloseHandledGracefully) {\n  setUpBasicConnection();\n\n  client_connection_->readDisable(true);\n  client_connection_->readDisable(false);\n\n  client_connection_->readDisable(true);\n  client_connection_->readDisable(true);\n  client_connection_->readDisable(false);\n  client_connection_->readDisable(false);\n\n  client_connection_->readDisable(true);\n  client_connection_->readDisable(true);\n  disconnect(false);\n#ifndef NDEBUG\n  // When running in debug mode, verify that calls to readDisable and readEnabled on a closed socket\n  // trigger ASSERT failures.\n  EXPECT_DEBUG_DEATH(client_connection_->readEnabled(), \"\");\n  EXPECT_DEBUG_DEATH(client_connection_->readDisable(true), \"\");\n  EXPECT_DEBUG_DEATH(client_connection_->readDisable(false), \"\");\n#else\n  // When running in release mode, verify that calls to readDisable change the readEnabled state.\n  client_connection_->readDisable(false);\n  client_connection_->readDisable(true);\n  client_connection_->readDisable(false);\n  EXPECT_FALSE(client_connection_->readEnabled());\n  client_connection_->readDisable(false);\n  EXPECT_TRUE(client_connection_->readEnabled());\n#endif\n}\n\n// On our current macOS build, the client connection does not get the early\n// close notification and instead gets the close after reading the FIN.\n// The Windows backend in libevent does not support the EV_CLOSED flag\n// so it won't detect the early close\n#if !defined(__APPLE__) && !defined(WIN32)\nTEST_P(ConnectionImplTest, EarlyCloseOnReadDisabledConnection) {\n  setUpBasicConnection();\n  connect();\n\n  client_connection_->readDisable(true);\n\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); }));\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose));\n  server_connection_->close(ConnectionCloseType::FlushWrite);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n#endif\n\nTEST_P(ConnectionImplTest, CloseOnReadDisableWithoutCloseDetection) {\n  setUpBasicConnection();\n  connect();\n\n  client_connection_->detectEarlyCloseWhenReadDisabled(false);\n  client_connection_->readDisable(true);\n\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(0);\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose))\n      .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); }));\n  server_connection_->close(ConnectionCloseType::FlushWrite);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  client_connection_->readDisable(false);\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test that connection half-close is sent and received properly.\nTEST_P(ConnectionImplTest, HalfClose) {\n  setUpBasicConnection();\n  connect();\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  server_connection_->enableHalfClose(true);\n  client_connection_->enableHalfClose(true);\n  client_connection_->addReadFilter(client_read_filter);\n\n  EXPECT_CALL(*read_filter_, onData(_, true)).WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n    dispatcher_->exit();\n    return FilterStatus::StopIteration;\n  }));\n\n  Buffer::OwnedImpl empty_buffer;\n  client_connection_->write(empty_buffer, true);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  Buffer::OwnedImpl buffer(\"data\");\n  server_connection_->write(buffer, false);\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"data\"), false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> FilterStatus {\n        buffer.drain(buffer.length());\n        dispatcher_->exit();\n        return FilterStatus::StopIteration;\n      }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose));\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose));\n  server_connection_->write(empty_buffer, true);\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"\"), true))\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n        dispatcher_->exit();\n        return FilterStatus::StopIteration;\n      }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test that connections do not detect early close when half-close is enabled\nTEST_P(ConnectionImplTest, HalfCloseNoEarlyCloseDetection) {\n  setUpBasicConnection();\n  connect();\n\n  server_connection_->enableHalfClose(true);\n  server_connection_->readDisable(true);\n\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(0);\n  EXPECT_CALL(*read_filter_, onData(_, _)).Times(0);\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose))\n      .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); }));\n  client_connection_->close(ConnectionCloseType::FlushWrite);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  server_connection_->readDisable(false);\n  EXPECT_CALL(*read_filter_, onData(_, _)).WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n    dispatcher_->exit();\n    return FilterStatus::StopIteration;\n  }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose));\n  server_connection_->close(ConnectionCloseType::NoFlush);\n}\n\n// Test that as watermark levels are changed, the appropriate callbacks are triggered.\nTEST_P(ConnectionImplTest, WriteWatermarks) {\n  useMockBuffer();\n\n  setUpBasicConnection();\n  EXPECT_FALSE(client_connection_->aboveHighWatermark());\n\n  // Stick 5 bytes in the connection buffer.\n  std::unique_ptr<Buffer::OwnedImpl> buffer(new Buffer::OwnedImpl(\"hello\"));\n  int buffer_len = buffer->length();\n  EXPECT_CALL(*client_write_buffer_, move(_));\n  client_write_buffer_->move(*buffer);\n\n  {\n    // Go from watermarks being off to being above the high watermark.\n    EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark());\n    EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()).Times(0);\n    client_connection_->setBufferLimits(buffer_len - 3);\n    EXPECT_TRUE(client_connection_->aboveHighWatermark());\n  }\n\n  {\n    // Go from above the high watermark to in between both.\n    EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark()).Times(0);\n    EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()).Times(0);\n    client_connection_->setBufferLimits(buffer_len + 1);\n    EXPECT_TRUE(client_connection_->aboveHighWatermark());\n  }\n\n  {\n    // Go from above the high watermark to below the low watermark.\n    EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark()).Times(0);\n    EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark());\n    client_connection_->setBufferLimits(buffer_len * 3);\n    EXPECT_FALSE(client_connection_->aboveHighWatermark());\n  }\n\n  {\n    // Go back in between and verify neither callback is called.\n    EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark()).Times(0);\n    EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()).Times(0);\n    client_connection_->setBufferLimits(buffer_len * 2);\n    EXPECT_FALSE(client_connection_->aboveHighWatermark());\n  }\n\n  disconnect(false);\n}\n\n// Test that as watermark levels are changed, the appropriate callbacks are triggered.\nTEST_P(ConnectionImplTest, ReadWatermarks) {\n\n  setUpBasicConnection();\n  client_connection_->setBufferLimits(2);\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n  connect();\n\n  auto on_filter_data_exit = [&](Buffer::Instance&, bool) -> FilterStatus {\n    dispatcher_->exit();\n    return FilterStatus::StopIteration;\n  };\n\n  EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered());\n  EXPECT_TRUE(client_connection_->readEnabled());\n  // Add 4 bytes to the buffer and verify the connection becomes read disabled.\n  {\n    Buffer::OwnedImpl buffer(\"data\");\n    server_connection_->write(buffer, false);\n    EXPECT_CALL(*client_read_filter, onData(_, false)).WillOnce(Invoke(on_filter_data_exit));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered());\n    EXPECT_FALSE(client_connection_->readEnabled());\n  }\n\n  // Drain 3 bytes from the buffer. This bring sit below the low watermark, and\n  // read enables, as well as triggering a kick for the remaining byte.\n  {\n    testClientConnection()->readBuffer().drain(3);\n    EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered());\n    EXPECT_TRUE(client_connection_->readEnabled());\n\n    EXPECT_CALL(*client_read_filter, onData(_, false));\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  // Add 3 bytes to the buffer and verify the connection becomes read disabled\n  // again.\n  {\n    Buffer::OwnedImpl buffer(\"bye\");\n    server_connection_->write(buffer, false);\n    EXPECT_CALL(*client_read_filter, onData(_, false)).WillOnce(Invoke(on_filter_data_exit));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered());\n    EXPECT_FALSE(client_connection_->readEnabled());\n  }\n\n  // Now have the consumer read disable.\n  // This time when the buffer is drained, there will be no kick as the consumer\n  // does not want to read.\n  {\n    client_connection_->readDisable(true);\n    testClientConnection()->readBuffer().drain(3);\n    EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered());\n    EXPECT_FALSE(client_connection_->readEnabled());\n\n    EXPECT_CALL(*client_read_filter, onData(_, false)).Times(0);\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  // Now read enable again.\n  // Inside the onData call, readDisable and readEnable. This should trigger\n  // another kick on the next dispatcher loop, so onData gets called twice.\n  {\n    client_connection_->readDisable(false);\n    EXPECT_CALL(*client_read_filter, onData(_, false))\n        .Times(2)\n        .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus {\n          client_connection_->readDisable(true);\n          client_connection_->readDisable(false);\n          return FilterStatus::StopIteration;\n        }))\n        .WillRepeatedly(Invoke(on_filter_data_exit));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  // Test the same logic for dispatched_buffered_data from the\n  // onReadReady() (read_disable_count_ != 0) path.\n  {\n    // Fill the buffer and verify the socket is read disabled.\n    Buffer::OwnedImpl buffer(\"bye\");\n    server_connection_->write(buffer, false);\n    EXPECT_CALL(*client_read_filter, onData(_, false))\n        .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus {\n          dispatcher_->exit();\n          return FilterStatus::StopIteration;\n        }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n    EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered());\n    EXPECT_FALSE(client_connection_->readEnabled());\n\n    // Read disable and read enable, to set dispatch_buffered_data_ true.\n    client_connection_->readDisable(true);\n    client_connection_->readDisable(false);\n    // Now event loop. This hits the early on-Read path. As above, read\n    // disable and read enable from inside the stack of onData, to ensure that\n    // dispatch_buffered_data_ works correctly.\n    EXPECT_CALL(*client_read_filter, onData(_, false))\n        .Times(2)\n        .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus {\n          client_connection_->readDisable(true);\n          client_connection_->readDisable(false);\n          return FilterStatus::StopIteration;\n        }))\n        .WillRepeatedly(Invoke(on_filter_data_exit));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  disconnect(true);\n}\n\n// Write some data to the connection. It will automatically attempt to flush\n// it to the upstream file descriptor via a write() call to buffer_, which is\n// configured to succeed and accept all bytes read.\nTEST_P(ConnectionImplTest, BasicWrite) {\n  useMockBuffer();\n\n  setUpBasicConnection();\n\n  connect();\n\n  // Send the data to the connection and verify it is sent upstream.\n  std::string data_to_write = \"hello world\";\n  Buffer::OwnedImpl buffer_to_write(data_to_write);\n  std::string data_written;\n  EXPECT_CALL(*client_write_buffer_, move(_))\n      .WillRepeatedly(DoAll(AddBufferToStringWithoutDraining(&data_written),\n                            Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove)));\n  EXPECT_CALL(*client_write_buffer_, drain(_))\n      .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackDrains));\n  client_connection_->write(buffer_to_write, false);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(data_to_write, data_written);\n\n  disconnect(true);\n}\n\n// Similar to BasicWrite, only with watermarks set.\nTEST_P(ConnectionImplTest, WriteWithWatermarks) {\n  useMockBuffer();\n\n  setUpBasicConnection();\n\n  connect();\n\n  client_connection_->setBufferLimits(2);\n\n  std::string data_to_write = \"hello world\";\n  Buffer::OwnedImpl first_buffer_to_write(data_to_write);\n  std::string data_written;\n  EXPECT_CALL(*client_write_buffer_, move(_))\n      .WillRepeatedly(DoAll(AddBufferToStringWithoutDraining(&data_written),\n                            Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove)));\n  EXPECT_CALL(*client_write_buffer_, drain(_))\n      .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackDrains));\n  // The write() call on the connection will buffer enough data to bring the connection above the\n  // high watermark but the subsequent drain immediately brings it back below.\n  // A nice future performance optimization would be to latch if the socket is writable in the\n  // connection_impl, and try an immediate drain inside of write() to avoid thrashing here.\n  EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark());\n\n  client_connection_->write(first_buffer_to_write, false);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(data_to_write, data_written);\n\n  // Now do the write again, but this time configure os_sys_calls to reject the write\n  // with errno set to EAGAIN. This should result in going above the high\n  // watermark and not returning.\n  Buffer::OwnedImpl second_buffer_to_write(data_to_write);\n  EXPECT_CALL(*client_write_buffer_, move(_))\n      .WillRepeatedly(DoAll(AddBufferToStringWithoutDraining(&data_written),\n                            Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove)));\n  NiceMock<Api::MockOsSysCalls> os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n  EXPECT_CALL(os_sys_calls, writev(_, _, _))\n      .WillOnce(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult {\n        dispatcher_->exit();\n        // Return to default os_sys_calls implementation\n        os_calls.~TestThreadsafeSingletonInjector();\n        return {-1, SOCKET_ERROR_AGAIN};\n      }));\n  // The write() call on the connection will buffer enough data to bring the connection above the\n  // high watermark and as the data will not flush it should not return below the watermark.\n  EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark());\n  EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()).Times(0);\n  client_connection_->write(second_buffer_to_write, false);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  // Clean up the connection. The close() (called via disconnect) will attempt to flush. The\n  // call to write() will succeed, bringing the connection back under the low watermark.\n  EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()).Times(1);\n\n  disconnect(true);\n}\n\n// Read and write random bytes and ensure we don't encounter issues.\nTEST_P(ConnectionImplTest, WatermarkFuzzing) {\n  useMockBuffer();\n  setUpBasicConnection();\n\n  connect();\n  client_connection_->setBufferLimits(10);\n\n  TestRandomGenerator rand;\n  int bytes_buffered = 0;\n  int new_bytes_buffered = 0;\n\n  bool is_below = true;\n  bool is_above = false;\n\n  NiceMock<Api::MockOsSysCalls> os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n  ON_CALL(os_sys_calls, writev(_, _, _))\n      .WillByDefault(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult {\n        return {-1, SOCKET_ERROR_AGAIN};\n      }));\n  ON_CALL(*client_write_buffer_, drain(_))\n      .WillByDefault(testing::Invoke(client_write_buffer_, &MockWatermarkBuffer::baseDrain));\n  EXPECT_CALL(*client_write_buffer_, drain(_)).Times(AnyNumber());\n\n  // Randomly write 1-20 bytes and read 1-30 bytes per loop.\n  for (int i = 0; i < 50; ++i) {\n    // The bytes to read this loop.\n    int bytes_to_write = rand.random() % 20 + 1;\n    // The bytes buffered at the beginning of this loop.\n    bytes_buffered = new_bytes_buffered;\n    // Bytes to flush upstream.\n    int bytes_to_flush = std::min<int>(rand.random() % 30 + 1, bytes_to_write + bytes_buffered);\n    // The number of bytes buffered at the end of this loop.\n    new_bytes_buffered = bytes_buffered + bytes_to_write - bytes_to_flush;\n    ENVOY_LOG_MISC(trace,\n                   \"Loop iteration {} bytes_to_write {} bytes_to_flush {} bytes_buffered is {} and \"\n                   \"will be be {}\",\n                   i, bytes_to_write, bytes_to_flush, bytes_buffered, new_bytes_buffered);\n\n    std::string data(bytes_to_write, 'a');\n    Buffer::OwnedImpl buffer_to_write(data);\n\n    // If the current bytes buffered plus the bytes we write this loop go over\n    // the watermark and we're not currently above, we will get a callback for\n    // going above.\n    if (bytes_to_write + bytes_buffered > 11 && is_below) {\n      ENVOY_LOG_MISC(trace, \"Expect onAboveWriteBufferHighWatermark\");\n      EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark());\n      is_below = false;\n      is_above = true;\n    }\n    // If after the bytes are flushed upstream the number of bytes remaining is\n    // below the low watermark and the bytes were not previously below the low\n    // watermark, expect the callback for going below.\n    if (new_bytes_buffered <= 5 && is_above) {\n      ENVOY_LOG_MISC(trace, \"Expect onBelowWriteBufferLowWatermark\");\n      EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark());\n      is_below = true;\n      is_above = false;\n    }\n\n    // Do the actual work. Write |buffer_to_write| bytes to the connection and\n    // drain |bytes_to_flush| before having writev syscall fail with EAGAIN\n    EXPECT_CALL(*client_write_buffer_, move(_))\n        .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove));\n    EXPECT_CALL(os_sys_calls, writev(_, _, _))\n        .WillOnce(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult {\n          client_write_buffer_->drain(bytes_to_flush);\n          return {-1, SOCKET_ERROR_AGAIN};\n        }))\n        .WillRepeatedly(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult {\n          return {-1, SOCKET_ERROR_AGAIN};\n        }));\n\n    client_connection_->write(buffer_to_write, false);\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()).Times(AnyNumber());\n  disconnect(true);\n}\n\nTEST_P(ConnectionImplTest, BindTest) {\n  std::string address_string = TestUtility::getIpv4Loopback();\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    source_address_ = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv4Instance(address_string, 0, nullptr)};\n  } else {\n    address_string = \"::1\";\n    source_address_ = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv6Instance(address_string, 0, nullptr)};\n  }\n  setUpBasicConnection();\n  connect();\n  EXPECT_EQ(address_string, server_connection_->remoteAddress()->ip()->addressAsString());\n\n  disconnect(true);\n}\n\nTEST_P(ConnectionImplTest, BindFromSocketTest) {\n  std::string address_string = TestUtility::getIpv4Loopback();\n  Address::InstanceConstSharedPtr new_source_address;\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    new_source_address = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv4Instance(address_string, 0, nullptr)};\n  } else {\n    address_string = \"::1\";\n    new_source_address = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv6Instance(address_string, 0, nullptr)};\n  }\n  auto option = std::make_shared<NiceMock<MockSocketOption>>();\n  EXPECT_CALL(*option, setOption(_, Eq(envoy::config::core::v3::SocketOption::STATE_PREBIND)))\n      .WillOnce(Invoke([&](Socket& socket, envoy::config::core::v3::SocketOption::SocketState) {\n        socket.setLocalAddress(new_source_address);\n        return true;\n      }));\n\n  socket_options_ = std::make_shared<Socket::Options>();\n  socket_options_->emplace_back(std::move(option));\n  setUpBasicConnection();\n  connect();\n  EXPECT_EQ(address_string, server_connection_->remoteAddress()->ip()->addressAsString());\n\n  disconnect(true);\n}\nTEST_P(ConnectionImplTest, BindFailureTest) {\n  // Swap the constraints from BindTest to create an address family mismatch.\n  if (GetParam() == Network::Address::IpVersion::v6) {\n    const std::string address_string = TestUtility::getIpv4Loopback();\n    source_address_ = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv4Instance(address_string, 0, nullptr)};\n  } else {\n    const std::string address_string = \"::1\";\n    source_address_ = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv6Instance(address_string, 0, nullptr)};\n  }\n  dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n  socket_ = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  listener_ =\n      dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  client_connection_ = dispatcher_->createClientConnection(\n      socket_->localAddress(), source_address_, Network::Test::createRawBufferSocket(), nullptr);\n\n  MockConnectionStats connection_stats;\n  client_connection_->setConnectionStats(connection_stats.toBufferStats());\n  client_connection_->addConnectionCallbacks(client_callbacks_);\n  EXPECT_CALL(connection_stats.bind_errors_, inc());\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n}\n\n// ReadOnCloseTest verifies that the read filter's onData function is invoked with available data\n// when the connection is closed.\nTEST_P(ConnectionImplTest, ReadOnCloseTest) {\n  setUpBasicConnection();\n  connect();\n\n  // Close without flush immediately invokes this callback.\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose));\n\n  const int buffer_size = 32;\n  Buffer::OwnedImpl data(std::string(buffer_size, 'a'));\n  client_connection_->write(data, false);\n  client_connection_->close(ConnectionCloseType::NoFlush);\n\n  EXPECT_CALL(*read_filter_, onNewConnection());\n  EXPECT_CALL(*read_filter_, onData(_, _))\n      .Times(1)\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterStatus {\n        EXPECT_EQ(buffer_size, data.length());\n        return FilterStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// EmptyReadOnCloseTest verifies that the read filter's onData function is not invoked on empty\n// read events due to connection closure.\nTEST_P(ConnectionImplTest, EmptyReadOnCloseTest) {\n  setUpBasicConnection();\n  connect();\n\n  // Write some data and verify that the read filter's onData callback is invoked exactly once.\n  const int buffer_size = 32;\n  Buffer::OwnedImpl data(std::string(buffer_size, 'a'));\n  EXPECT_CALL(*read_filter_, onNewConnection());\n  EXPECT_CALL(*read_filter_, onData(_, _))\n      .Times(1)\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterStatus {\n        EXPECT_EQ(buffer_size, data.length());\n        dispatcher_->exit();\n        return FilterStatus::StopIteration;\n      }));\n  client_connection_->write(data, false);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  disconnect(true);\n}\n\n// Test that a FlushWrite close immediately triggers a close after the write buffer is flushed.\nTEST_P(ConnectionImplTest, FlushWriteCloseTest) {\n  setUpBasicConnection();\n  connect();\n\n  InSequence s1;\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  server_connection_->setDelayedCloseTimeout(std::chrono::milliseconds(100));\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n\n  NiceMockConnectionStats stats;\n  server_connection_->setConnectionStats(stats.toBufferStats());\n\n  Buffer::OwnedImpl data(\"data\");\n  server_connection_->write(data, false);\n\n  // Server connection flushes the write and immediately closes the socket.\n  // There shouldn't be a read/close race here (see issue #2929), since the client is blocked on\n  // reading and the connection should close gracefully via FIN.\n\n  EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0);\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)).Times(1);\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"data\"), false))\n      .Times(1)\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n        time_system_.setMonotonicTime(std::chrono::milliseconds(50));\n        dispatcher_->exit();\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(1);\n  server_connection_->close(ConnectionCloseType::FlushWrite);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test that a FlushWriteAndDelay close causes Envoy to flush the write and wait for the\n// client/peer to close (until a configured timeout which is not expected to trigger in this\n// test).\n//\n// libevent does not provide early close notifications on the currently supported non-Linux\n// builds, so the server connection is never notified of the close. For now, we have chosen to\n// disable tests that rely on this behavior on macOS and Windows (see\n// https://github.com/envoyproxy/envoy/pull/4299).\n#if !defined(__APPLE__) && !defined(WIN32)\nTEST_P(ConnectionImplTest, FlushWriteAndDelayCloseTest) {\n  setUpBasicConnection();\n  connect();\n\n  InSequence s1;\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  server_connection_->setDelayedCloseTimeout(std::chrono::milliseconds(100));\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n\n  NiceMockConnectionStats stats;\n  server_connection_->setConnectionStats(stats.toBufferStats());\n\n  Buffer::OwnedImpl data(\"Connection: Close\");\n  server_connection_->write(data, false);\n\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"Connection: Close\"), false))\n      .Times(1)\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n        // Advance time by 50ms; delayed close timer should _not_ trigger.\n        time_system_.setMonotonicTime(std::chrono::milliseconds(50));\n        client_connection_->close(ConnectionCloseType::NoFlush);\n        return FilterStatus::StopIteration;\n      }));\n\n  // Client closes the connection so delayed close timer on the server conn should not fire.\n  EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0);\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)).Times(1);\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .Times(1)\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  server_connection_->close(ConnectionCloseType::FlushWriteAndDelay);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n#endif\n\n// Test that a FlushWriteAndDelay close triggers a timeout which forces Envoy to close the\n// connection when a client has not issued a close within the configured interval.\nTEST_P(ConnectionImplTest, FlushWriteAndDelayCloseTimerTriggerTest) {\n  setUpBasicConnection();\n  connect();\n\n  InSequence s1;\n\n  // This timer will be forced to trigger by ensuring time advances by >50ms during the test.\n  server_connection_->setDelayedCloseTimeout(std::chrono::milliseconds(50));\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n\n  NiceMockConnectionStats stats;\n  server_connection_->setConnectionStats(stats.toBufferStats());\n\n  Buffer::OwnedImpl data(\"Connection: Close\");\n  server_connection_->write(data, false);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n\n  // The client _will not_ close the connection. Instead, expect the delayed close timer to\n  // trigger on the server connection.\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"Connection: Close\"), false))\n      .Times(1)\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n        time_system_.setMonotonicTime(std::chrono::milliseconds(100));\n        return FilterStatus::StopIteration;\n      }));\n  server_connection_->close(ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(1);\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)).Times(1);\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .Times(1)\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test that a close(FlushWrite) after a delayed close timer has been enabled via\n// close(FlushWriteAndDelay) will trigger a socket close after the flush is complete.\nTEST_P(ConnectionImplTest, FlushWriteAfterFlushWriteAndDelayWithPendingWrite) {\n  setUpBasicConnection();\n  connect();\n\n  InSequence s1;\n  // The actual timeout is insignificant, we just need to enable delayed close processing by\n  // setting it to > 0.\n  server_connection_->setDelayedCloseTimeout(std::chrono::milliseconds(50));\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n  NiceMockConnectionStats stats;\n  server_connection_->setConnectionStats(stats.toBufferStats());\n\n  Buffer::OwnedImpl data(\"Connection: Close\");\n  server_connection_->write(data, false);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n\n  // The delayed close timer will be enabled by this call. Data in the write buffer hasn't been\n  // flushed yet since the dispatcher has not run.\n  server_connection_->close(ConnectionCloseType::FlushWriteAndDelay);\n  // The timer won't be disabled but this close() overwrites the delayed close state such that a\n  // successful flush will immediately close the socket.\n  server_connection_->close(ConnectionCloseType::FlushWrite);\n\n  // The socket close will happen as a result of the write flush and not due to the delayed close\n  // timer triggering.\n  EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0);\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)).Times(1);\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"Connection: Close\"), false))\n      .Times(1)\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n        time_system_.setMonotonicTime(std::chrono::milliseconds(100));\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .Times(1)\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test that a close(FlushWrite) triggers an immediate close when a delayed close timer has been\n// enabled via a prior close(FlushWriteAndDelay).\nTEST_P(ConnectionImplTest, FlushWriteAfterFlushWriteAndDelayWithoutPendingWrite) {\n  setUpBasicConnection();\n  connect();\n\n  InSequence s1;\n  // The actual timeout is insignificant, we just need to enable delayed close processing by\n  // setting it to > 0.\n  server_connection_->setDelayedCloseTimeout(std::chrono::milliseconds(50));\n\n  std::shared_ptr<MockReadFilter> client_read_filter(new NiceMock<MockReadFilter>());\n  client_connection_->addReadFilter(client_read_filter);\n  NiceMockConnectionStats stats;\n  server_connection_->setConnectionStats(stats.toBufferStats());\n\n  Buffer::OwnedImpl data(\"Connection: Close\");\n  server_connection_->write(data, false);\n\n  server_connection_->close(ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"Connection: Close\"), false))\n      .Times(1)\n      .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus {\n        dispatcher_->exit();\n        return FilterStatus::StopIteration;\n      }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  // The write buffer has been flushed and a delayed close timer has been set. The socket close\n  // will happen as part of the close() since the timeout is no longer required.\n  EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)).Times(1);\n  server_connection_->close(ConnectionCloseType::FlushWrite);\n  EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0);\n  EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n      .Times(1)\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test that delayed close processing can be disabled by setting the delayed close timeout\n// interval to 0.\nTEST_P(ConnectionImplTest, FlushWriteAndDelayConfigDisabledTest) {\n  InSequence s1;\n\n  NiceMock<MockConnectionCallbacks> callbacks;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(dispatcher.buffer_factory_, create_(_, _, _))\n      .WillRepeatedly(Invoke([](std::function<void()> below_low, std::function<void()> above_high,\n                                std::function<void()> above_overflow) -> Buffer::Instance* {\n        return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow);\n      }));\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(0);\n  std::unique_ptr<Network::ConnectionImpl> server_connection(new Network::ConnectionImpl(\n      dispatcher, std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n      std::make_unique<NiceMock<MockTransportSocket>>(), stream_info_, true));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n\n  // Ensure the delayed close timer is not created when the delayedCloseTimeout config value is\n  // set to 0.\n  server_connection->setDelayedCloseTimeout(std::chrono::milliseconds(0));\n  EXPECT_CALL(dispatcher, createTimer_(_)).Times(0);\n\n  NiceMockConnectionStats stats;\n  server_connection->setConnectionStats(stats.toBufferStats());\n\n  EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0);\n  server_connection->close(ConnectionCloseType::FlushWriteAndDelay);\n  // Advance time by a value larger than the delayed close timeout default (1000ms). This would\n  // trigger the delayed close timer callback if set.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10000));\n\n  // Since the delayed close timer never triggers, the connection never closes. Close it here to\n  // end the test cleanly due to the (fd == -1) assert in ~ConnectionImpl().\n  server_connection->close(ConnectionCloseType::NoFlush);\n}\n\n// Test that the delayed close timer is reset while write flushes are happening when a connection\n// is in delayed close mode.\nTEST_P(ConnectionImplTest, DelayedCloseTimerResetWithPendingWriteBufferFlushes) {\n  ConnectionMocks mocks = createConnectionMocks();\n  MockTransportSocket* transport_socket = mocks.transport_socket_.get();\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(0);\n  auto server_connection = std::make_unique<Network::ConnectionImpl>(\n      *mocks.dispatcher_,\n      std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n      std::move(mocks.transport_socket_), stream_info_, true);\n\n#ifndef NDEBUG\n  // Ignore timer enabled() calls used to check timer state in ASSERTs.\n  EXPECT_CALL(*mocks.timer_, enabled()).Times(AnyNumber());\n#endif\n\n  InSequence s1;\n  // The actual timeout is insignificant, we just need to enable delayed close processing by\n  // setting it to > 0.\n  auto timeout = std::chrono::milliseconds(100);\n  server_connection->setDelayedCloseTimeout(timeout);\n\n  EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write))\n      .WillOnce(Invoke(*mocks.file_ready_cb_));\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"data\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult {\n        // Do not drain the buffer and return 0 bytes processed to simulate backpressure.\n        return IoResult{PostIoAction::KeepOpen, 0, false};\n      }));\n  Buffer::OwnedImpl data(\"data\");\n  server_connection->write(data, false);\n\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1);\n  server_connection->close(ConnectionCloseType::FlushWriteAndDelay);\n\n  // The write ready event cb (ConnectionImpl::onWriteReady()) will reset the timer to its\n  // original timeout value to avoid triggering while the write buffer is being actively flushed.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"data\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult {\n        // Partial flush.\n        uint64_t bytes_drained = 1;\n        buffer.drain(bytes_drained);\n        return IoResult{PostIoAction::KeepOpen, bytes_drained, false};\n      }));\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1);\n  (*mocks.file_ready_cb_)(Event::FileReadyType::Write);\n\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"ata\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult {\n        // Flush the entire buffer.\n        uint64_t bytes_drained = buffer.length();\n        buffer.drain(buffer.length());\n        return IoResult{PostIoAction::KeepOpen, bytes_drained, false};\n      }));\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1);\n  (*mocks.file_ready_cb_)(Event::FileReadyType::Write);\n\n  // Force the delayed close timeout to trigger so the connection is cleaned up.\n  mocks.timer_->invokeCallback();\n}\n\n// Test that the delayed close timer is not reset by spurious fd Write events that either consume 0\n// bytes from the output buffer or are delivered after close(FlushWriteAndDelay).\nTEST_P(ConnectionImplTest, IgnoreSpuriousFdWriteEventsDuringFlushWriteAndDelay) {\n  ConnectionMocks mocks = createConnectionMocks();\n  MockTransportSocket* transport_socket = mocks.transport_socket_.get();\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(0);\n  auto server_connection = std::make_unique<Network::ConnectionImpl>(\n      *mocks.dispatcher_,\n      std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n      std::move(mocks.transport_socket_), stream_info_, true);\n\n#ifndef NDEBUG\n  // Ignore timer enabled() calls used to check timer state in ASSERTs.\n  EXPECT_CALL(*mocks.timer_, enabled()).Times(AnyNumber());\n#endif\n\n  InSequence s1;\n  // The actual timeout is insignificant, we just need to enable delayed close processing by\n  // setting it to > 0.\n  auto timeout = std::chrono::milliseconds(100);\n  server_connection->setDelayedCloseTimeout(timeout);\n\n  EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write))\n      .WillOnce(Invoke(*mocks.file_ready_cb_));\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"data\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult {\n        // Do not drain the buffer and return 0 bytes processed to simulate backpressure.\n        return IoResult{PostIoAction::KeepOpen, 0, false};\n      }));\n  Buffer::OwnedImpl data(\"data\");\n  server_connection->write(data, false);\n\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1);\n  server_connection->close(ConnectionCloseType::FlushWriteAndDelay);\n\n  // The write ready event cb (ConnectionImpl::onWriteReady()) will reset the timer to its\n  // original timeout value to avoid triggering while the write buffer is being actively flushed.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"data\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult {\n        // Partial flush.\n        uint64_t bytes_drained = 1;\n        buffer.drain(bytes_drained);\n        return IoResult{PostIoAction::KeepOpen, bytes_drained, false};\n      }));\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1);\n  (*mocks.file_ready_cb_)(Event::FileReadyType::Write);\n\n  // Handle a write event and drain 0 bytes from the buffer. Verify that the timer is not reset.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"ata\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult {\n        // Don't consume any bytes.\n        return IoResult{PostIoAction::KeepOpen, 0, false};\n      }));\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(0);\n  (*mocks.file_ready_cb_)(Event::FileReadyType::Write);\n\n  // Handle a write event and drain the remainder of the buffer. Verify that the timer is reset.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"ata\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult {\n        // Flush the entire buffer.\n        ASSERT(buffer.length() > 0);\n        uint64_t bytes_drained = buffer.length();\n        buffer.drain(buffer.length());\n        EXPECT_EQ(server_connection->state(), Connection::State::Closing);\n        return IoResult{PostIoAction::KeepOpen, bytes_drained, false};\n      }));\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1);\n  (*mocks.file_ready_cb_)(Event::FileReadyType::Write);\n\n  // Handle a write event after entering the half-closed state. Verify that the timer is not reset\n  // because write consumed 0 bytes from the empty buffer.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult {\n        EXPECT_EQ(server_connection->state(), Connection::State::Closing);\n        return IoResult{PostIoAction::KeepOpen, 0, false};\n      }));\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(0);\n  (*mocks.file_ready_cb_)(Event::FileReadyType::Write);\n\n  // Handle a write event that somehow drains bytes from an empty output buffer. Since\n  // some bytes were consumed, the timer is reset.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult {\n        EXPECT_EQ(server_connection->state(), Connection::State::Closing);\n        return IoResult{PostIoAction::KeepOpen, 1, false};\n      }));\n  EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1);\n  (*mocks.file_ready_cb_)(Event::FileReadyType::Write);\n\n  // Force the delayed close timeout to trigger so the connection is cleaned up.\n  mocks.timer_->invokeCallback();\n}\n\n// Test that tearing down the connection will disable the delayed close timer.\nTEST_P(ConnectionImplTest, DelayedCloseTimeoutDisableOnSocketClose) {\n  ConnectionMocks mocks = createConnectionMocks();\n  MockTransportSocket* transport_socket = mocks.transport_socket_.get();\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(0);\n  auto server_connection = std::make_unique<Network::ConnectionImpl>(\n      *mocks.dispatcher_,\n      std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n      std::move(mocks.transport_socket_), stream_info_, true);\n\n  InSequence s1;\n\n  // The actual timeout is insignificant, we just need to enable delayed close processing by\n  // setting it to > 0.\n  server_connection->setDelayedCloseTimeout(std::chrono::milliseconds(100));\n\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write))\n      .WillOnce(Invoke(*mocks.file_ready_cb_));\n  // The buffer must be drained when write() is called on the connection to allow the close() to\n  // enable the timer.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"data\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult {\n        buffer.drain(buffer.length());\n        return IoResult{PostIoAction::KeepOpen, buffer.length(), false};\n      }));\n  server_connection->write(data, false);\n  EXPECT_CALL(*mocks.timer_, enableTimer(_, _)).Times(1);\n  // Enable the delayed close timer.\n  server_connection->close(ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_CALL(*mocks.timer_, disableTimer()).Times(1);\n  // This close() will call closeSocket(), which should disable the timer to avoid triggering it\n  // after the connection's data structures have been reset.\n  server_connection->close(ConnectionCloseType::NoFlush);\n}\n\n// Test that the delayed close timeout callback is resilient to connection teardown edge cases.\nTEST_P(ConnectionImplTest, DelayedCloseTimeoutNullStats) {\n  ConnectionMocks mocks = createConnectionMocks();\n  MockTransportSocket* transport_socket = mocks.transport_socket_.get();\n  IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(0);\n  auto server_connection = std::make_unique<Network::ConnectionImpl>(\n      *mocks.dispatcher_,\n      std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n      std::move(mocks.transport_socket_), stream_info_, true);\n\n  InSequence s1;\n\n  // The actual timeout is insignificant, we just need to enable delayed close processing by\n  // setting it to > 0.\n  server_connection->setDelayedCloseTimeout(std::chrono::milliseconds(100));\n\n  // NOTE: Avoid providing stats storage to the connection via setConnectionStats(). This\n  // guarantees that connection_stats_ is a nullptr and that the callback resiliency validation\n  // below tests that edge case.\n\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write))\n      .WillOnce(Invoke(*mocks.file_ready_cb_));\n  // The buffer must be drained when write() is called on the connection to allow the close() to\n  // enable the timer.\n  EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(\"data\"), _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult {\n        buffer.drain(buffer.length());\n        return IoResult{PostIoAction::KeepOpen, buffer.length(), false};\n      }));\n  server_connection->write(data, false);\n\n  EXPECT_CALL(*mocks.timer_, enableTimer(_, _)).Times(1);\n  server_connection->close(ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_CALL(*mocks.timer_, disableTimer()).Times(1);\n  // The following close() will call closeSocket() and reset internal data structures such as\n  // stats.\n  server_connection->close(ConnectionCloseType::NoFlush);\n}\n\nclass FakeReadFilter : public Network::ReadFilter {\npublic:\n  FakeReadFilter() = default;\n  ~FakeReadFilter() override {\n    EXPECT_TRUE(callbacks_ != nullptr);\n    // The purpose is to verify that when FilterManger is destructed, ConnectionSocketImpl is not\n    // destructed, and ConnectionSocketImpl can still be accessed via ReadFilterCallbacks.\n    EXPECT_TRUE(callbacks_->connection().state() != Network::Connection::State::Open);\n  }\n\n  Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n    data.drain(data.length());\n    return Network::FilterStatus::Continue;\n  }\n\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n\n  void initializeReadFilterCallbacks(ReadFilterCallbacks& callbacks) override {\n    callbacks_ = &callbacks;\n  }\n\nprivate:\n  ReadFilterCallbacks* callbacks_{nullptr};\n};\n\nclass MockTransportConnectionImplTest : public testing::Test {\npublic:\n  MockTransportConnectionImplTest() : stream_info_(dispatcher_.timeSource()) {\n    EXPECT_CALL(dispatcher_.buffer_factory_, create_(_, _, _))\n        .WillRepeatedly(Invoke([](std::function<void()> below_low, std::function<void()> above_high,\n                                  std::function<void()> above_overflow) -> Buffer::Instance* {\n          return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow);\n        }));\n\n    file_event_ = new Event::MockFileEvent;\n    EXPECT_CALL(dispatcher_, createFileEvent_(0, _, _, _))\n        .WillOnce(DoAll(SaveArg<1>(&file_ready_cb_), Return(file_event_)));\n    transport_socket_ = new NiceMock<MockTransportSocket>;\n    EXPECT_CALL(*transport_socket_, setTransportSocketCallbacks(_))\n        .WillOnce(Invoke([this](TransportSocketCallbacks& callbacks) {\n          transport_socket_callbacks_ = &callbacks;\n        }));\n    IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(0);\n    connection_ = std::make_unique<ConnectionImpl>(\n        dispatcher_, std::make_unique<ConnectionSocketImpl>(std::move(io_handle), nullptr, nullptr),\n        TransportSocketPtr(transport_socket_), stream_info_, true);\n    connection_->addConnectionCallbacks(callbacks_);\n  }\n\n  ~MockTransportConnectionImplTest() override { connection_->close(ConnectionCloseType::NoFlush); }\n\n  // This may be invoked for doWrite() on the transport to simulate all the data\n  // being written.\n  static IoResult SimulateSuccessfulWrite(Buffer::Instance& buffer, bool) {\n    uint64_t size = buffer.length();\n    buffer.drain(size);\n    return {PostIoAction::KeepOpen, size, false};\n  }\n\n  std::unique_ptr<ConnectionImpl> connection_;\n  Event::MockDispatcher dispatcher_;\n  NiceMock<MockConnectionCallbacks> callbacks_;\n  MockTransportSocket* transport_socket_;\n  Event::MockFileEvent* file_event_;\n  Event::FileReadyCb file_ready_cb_;\n  TransportSocketCallbacks* transport_socket_callbacks_;\n  StreamInfo::StreamInfoImpl stream_info_;\n};\n\n// The purpose of this case is to verify the destructor order of the object.\n// FilterManager relies on ConnectionSocketImpl, so the FilterManager can be\n// destructed after the ConnectionSocketImpl is destructed.\n//\n// Ref: https://github.com/envoyproxy/envoy/issues/5313\nTEST_F(MockTransportConnectionImplTest, ObjectDestructOrder) {\n  connection_->addReadFilter(std::make_shared<Network::FakeReadFilter>());\n  connection_->enableHalfClose(true);\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .Times(2)\n      .WillRepeatedly(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n  file_ready_cb_(Event::FileReadyType::Read);\n  file_ready_cb_(Event::FileReadyType::Read);\n}\n\n// Test that BytesSentCb is invoked at the correct times\nTEST_F(MockTransportConnectionImplTest, BytesSentCallback) {\n  uint64_t bytes_sent = 0;\n  uint64_t cb_called = 0;\n  connection_->addBytesSentCallback([&](uint64_t arg) {\n    cb_called++;\n    bytes_sent = arg;\n  });\n\n  // 100 bytes were sent; expect BytesSent event\n  EXPECT_CALL(*transport_socket_, doWrite(_, _))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 100, false}));\n  file_ready_cb_(Event::FileReadyType::Write);\n  EXPECT_EQ(cb_called, 1);\n  EXPECT_EQ(bytes_sent, 100);\n  cb_called = false;\n  bytes_sent = 0;\n\n  // 0 bytes were sent; no event\n  EXPECT_CALL(*transport_socket_, doWrite(_, _))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  file_ready_cb_(Event::FileReadyType::Write);\n  EXPECT_EQ(cb_called, 0);\n\n  // Reading should not cause BytesSent\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 1, false}));\n  file_ready_cb_(Event::FileReadyType::Read);\n  EXPECT_EQ(cb_called, 0);\n\n  // Closed event should not raise a BytesSent event (but does raise RemoteClose)\n  EXPECT_CALL(callbacks_, onEvent(ConnectionEvent::RemoteClose));\n  file_ready_cb_(Event::FileReadyType::Closed);\n  EXPECT_EQ(cb_called, 0);\n}\n\n// Make sure that multiple registered callbacks all get called\nTEST_F(MockTransportConnectionImplTest, BytesSentMultiple) {\n  uint64_t cb_called1 = 0;\n  uint64_t cb_called2 = 0;\n  uint64_t bytes_sent1 = 0;\n  uint64_t bytes_sent2 = 0;\n  connection_->addBytesSentCallback([&](uint64_t arg) {\n    cb_called1++;\n    bytes_sent1 = arg;\n  });\n\n  connection_->addBytesSentCallback([&](uint64_t arg) {\n    cb_called2++;\n    bytes_sent2 = arg;\n  });\n\n  EXPECT_CALL(*transport_socket_, doWrite(_, _))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 100, false}));\n  file_ready_cb_(Event::FileReadyType::Write);\n  EXPECT_EQ(cb_called1, 1);\n  EXPECT_EQ(cb_called2, 1);\n  EXPECT_EQ(bytes_sent1, 100);\n  EXPECT_EQ(bytes_sent2, 100);\n}\n\n// Test that if a callback closes the connection, further callbacks are not called.\nTEST_F(MockTransportConnectionImplTest, BytesSentCloseInCallback) {\n  // Order is not defined, so register two callbacks that both close the connection. Only\n  // one of them should be called.\n  uint64_t cb_called = 0;\n  Connection::BytesSentCb cb = [&](uint64_t) {\n    cb_called++;\n    connection_->close(ConnectionCloseType::NoFlush);\n  };\n  connection_->addBytesSentCallback(cb);\n  connection_->addBytesSentCallback(cb);\n\n  EXPECT_CALL(*transport_socket_, doWrite(_, _))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 100, false}));\n  file_ready_cb_(Event::FileReadyType::Write);\n\n  EXPECT_EQ(cb_called, 1);\n  EXPECT_EQ(connection_->state(), Connection::State::Closed);\n}\n\n// Test that onWrite does not have end_stream set, with half-close disabled\nTEST_F(MockTransportConnectionImplTest, FullCloseWrite) {\n  const std::string val(\"some data\");\n  Buffer::OwnedImpl buffer(val);\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write)).WillOnce(Invoke(file_ready_cb_));\n  EXPECT_CALL(*transport_socket_, doWrite(BufferStringEqual(val), false))\n      .WillOnce(Invoke(SimulateSuccessfulWrite));\n  connection_->write(buffer, false);\n}\n\n// Test that onWrite has end_stream set correctly, with half-close enabled\nTEST_F(MockTransportConnectionImplTest, HalfCloseWrite) {\n  connection_->enableHalfClose(true);\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write))\n      .WillRepeatedly(Invoke(file_ready_cb_));\n\n  const std::string val(\"some data\");\n  Buffer::OwnedImpl buffer(val);\n  EXPECT_CALL(*transport_socket_, doWrite(BufferStringEqual(val), false))\n      .WillOnce(Invoke(SimulateSuccessfulWrite));\n  connection_->write(buffer, false);\n\n  EXPECT_CALL(*transport_socket_, doWrite(_, true)).WillOnce(Invoke(SimulateSuccessfulWrite));\n  connection_->write(buffer, true);\n}\n\nTEST_F(MockTransportConnectionImplTest, ReadMultipleEndStream) {\n  std::shared_ptr<MockReadFilter> read_filter(new NiceMock<MockReadFilter>());\n  connection_->enableHalfClose(true);\n  connection_->addReadFilter(read_filter);\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .Times(2)\n      .WillRepeatedly(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n  EXPECT_CALL(*read_filter, onData(_, true)).Times(1);\n  file_ready_cb_(Event::FileReadyType::Read);\n  file_ready_cb_(Event::FileReadyType::Read);\n}\n\n// Test that if both sides half-close, the connection is closed, with the read half-close coming\n// first.\nTEST_F(MockTransportConnectionImplTest, BothHalfCloseReadFirst) {\n  std::shared_ptr<MockReadFilter> read_filter(new NiceMock<MockReadFilter>());\n  connection_->enableHalfClose(true);\n  connection_->addReadFilter(read_filter);\n\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n  file_ready_cb_(Event::FileReadyType::Read);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write)).WillOnce(Invoke(file_ready_cb_));\n  EXPECT_CALL(callbacks_, onEvent(ConnectionEvent::LocalClose));\n  connection_->write(buffer, true);\n}\n\n// Test that if both sides half-close, the connection is closed, with the write half-close coming\n// first.\nTEST_F(MockTransportConnectionImplTest, BothHalfCloseWriteFirst) {\n  std::shared_ptr<MockReadFilter> read_filter(new NiceMock<MockReadFilter>());\n  connection_->enableHalfClose(true);\n  connection_->addReadFilter(read_filter);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write)).WillOnce(Invoke(file_ready_cb_));\n  connection_->write(buffer, true);\n\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n  EXPECT_CALL(callbacks_, onEvent(ConnectionEvent::RemoteClose));\n  file_ready_cb_(Event::FileReadyType::Read);\n}\n\n// Test that if both sides half-close, but writes have not yet been written to the Transport, that\n// the connection closes only when the writes complete flushing. The write half-close happens\n// first.\nTEST_F(MockTransportConnectionImplTest, BothHalfCloseWritesNotFlushedWriteFirst) {\n  std::shared_ptr<MockReadFilter> read_filter(new NiceMock<MockReadFilter>());\n  connection_->enableHalfClose(true);\n  connection_->addReadFilter(read_filter);\n\n  Buffer::OwnedImpl buffer(\"data\");\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write)).WillOnce(Invoke(file_ready_cb_));\n  connection_->write(buffer, true);\n\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n  file_ready_cb_(Event::FileReadyType::Read);\n\n  EXPECT_CALL(callbacks_, onEvent(ConnectionEvent::LocalClose));\n  EXPECT_CALL(*transport_socket_, doWrite(_, true)).WillOnce(Invoke(SimulateSuccessfulWrite));\n  file_ready_cb_(Event::FileReadyType::Write);\n}\n\n// Test that if both sides half-close, but writes have not yet been written to the Transport, that\n// the connection closes only when the writes complete flushing. The read half-close happens\n// first.\nTEST_F(MockTransportConnectionImplTest, BothHalfCloseWritesNotFlushedReadFirst) {\n  std::shared_ptr<MockReadFilter> read_filter(new NiceMock<MockReadFilter>());\n  connection_->enableHalfClose(true);\n  connection_->addReadFilter(read_filter);\n\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n  file_ready_cb_(Event::FileReadyType::Read);\n\n  Buffer::OwnedImpl buffer(\"data\");\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write)).WillOnce(Invoke(file_ready_cb_));\n  connection_->write(buffer, true);\n\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Invoke([](Buffer::Instance& data, bool) -> IoResult {\n        uint64_t len = data.length();\n        data.drain(len);\n        return {PostIoAction::KeepOpen, len, false};\n      }));\n  EXPECT_CALL(callbacks_, onEvent(ConnectionEvent::LocalClose));\n  file_ready_cb_(Event::FileReadyType::Write);\n}\n\n// Test that if end_stream is raised, but a filter stops iteration, that end_stream\n// propagates correctly.\nTEST_F(MockTransportConnectionImplTest, ReadEndStreamStopIteration) {\n  const std::string val(\"a\");\n  std::shared_ptr<MockReadFilter> read_filter1(new StrictMock<MockReadFilter>());\n  std::shared_ptr<MockReadFilter> read_filter2(new StrictMock<MockReadFilter>());\n  connection_->enableHalfClose(true);\n  connection_->addReadFilter(read_filter1);\n  connection_->addReadFilter(read_filter2);\n\n  EXPECT_CALL(*read_filter1, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*read_filter2, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Invoke([val](Buffer::Instance& buffer) -> IoResult {\n        buffer.add(val.c_str(), val.size());\n        return {PostIoAction::KeepOpen, val.size(), true};\n      }));\n\n  EXPECT_CALL(*read_filter1, onData(BufferStringEqual(val), true))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  file_ready_cb_(Event::FileReadyType::Read);\n\n  EXPECT_CALL(*read_filter2, onData(BufferStringEqual(val), true))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  read_filter1->callbacks_->continueReading();\n}\n\n// Test that if end_stream is written, but a filter stops iteration, that end_stream\n// propagates correctly.\nTEST_F(MockTransportConnectionImplTest, WriteEndStreamStopIteration) {\n  const std::string val(\"a\");\n  std::shared_ptr<MockWriteFilter> write_filter1(new StrictMock<MockWriteFilter>());\n  std::shared_ptr<MockWriteFilter> write_filter2(new StrictMock<MockWriteFilter>());\n  connection_->enableHalfClose(true);\n  connection_->addWriteFilter(write_filter2);\n  connection_->addWriteFilter(write_filter1);\n\n  EXPECT_CALL(*write_filter1, onWrite(BufferStringEqual(val), true))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  Buffer::OwnedImpl buffer(val);\n  connection_->write(buffer, true);\n\n  EXPECT_CALL(*write_filter1, onWrite(BufferStringEqual(val), true))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*write_filter2, onWrite(BufferStringEqual(val), true))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write));\n  connection_->write(buffer, true);\n}\n\n// Validate that when the transport signals ConnectionEvent::Connected, that we\n// check for pending write buffer content.\nTEST_F(MockTransportConnectionImplTest, WriteReadyOnConnected) {\n  InSequence s;\n\n  // Queue up some data in write buffer, simulating what happens in SSL handshake.\n  const std::string val(\"some data\");\n  Buffer::OwnedImpl buffer(val);\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write)).WillOnce(Invoke(file_ready_cb_));\n  EXPECT_CALL(*transport_socket_, doWrite(BufferStringEqual(val), false))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  connection_->write(buffer, false);\n\n  // A read event happens, resulting in handshake completion and\n  // raiseEvent(Network::ConnectionEvent::Connected). Since we have data queued\n  // in the write buffer, we should see a doWrite with this data.\n  EXPECT_CALL(*transport_socket_, doRead(_)).WillOnce(InvokeWithoutArgs([this] {\n    transport_socket_callbacks_->raiseEvent(Network::ConnectionEvent::Connected);\n    return IoResult{PostIoAction::KeepOpen, 0, false};\n  }));\n  EXPECT_CALL(*transport_socket_, doWrite(BufferStringEqual(val), false))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  file_ready_cb_(Event::FileReadyType::Read);\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n}\n\n// Test the interface used by external consumers.\nTEST_F(MockTransportConnectionImplTest, FlushWriteBuffer) {\n  InSequence s;\n\n  // Queue up some data in write buffer.\n  const std::string val(\"some data\");\n  Buffer::OwnedImpl buffer(val);\n  EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write)).WillOnce(Invoke(file_ready_cb_));\n  EXPECT_CALL(*transport_socket_, doWrite(BufferStringEqual(val), false))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  connection_->write(buffer, false);\n\n  // A read event triggers underlying socket to ask for more data.\n  EXPECT_CALL(*transport_socket_, doRead(_)).WillOnce(InvokeWithoutArgs([this] {\n    transport_socket_callbacks_->flushWriteBuffer();\n    return IoResult{PostIoAction::KeepOpen, 0, false};\n  }));\n  EXPECT_CALL(*transport_socket_, doWrite(BufferStringEqual(val), false))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  file_ready_cb_(Event::FileReadyType::Read);\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, true}));\n}\n\n// Fixture for validating behavior after a connection is closed.\nclass PostCloseConnectionImplTest : public MockTransportConnectionImplTest {\nprotected:\n  // Setup connection, single read event.\n  void initialize() {\n    connection_->addReadFilter(read_filter_);\n    connection_->setDelayedCloseTimeout(std::chrono::milliseconds(100));\n\n    EXPECT_CALL(*transport_socket_, doRead(_))\n        .WillOnce(Invoke([this](Buffer::Instance& buffer) -> IoResult {\n          buffer.add(val_.c_str(), val_.size());\n          return {PostIoAction::KeepOpen, val_.size(), false};\n        }));\n    EXPECT_CALL(*read_filter_, onNewConnection());\n    EXPECT_CALL(*read_filter_, onData(_, _));\n    file_ready_cb_(Event::FileReadyType::Read);\n  }\n\n  void writeSomeData() {\n    Buffer::OwnedImpl buffer(\"data\");\n    EXPECT_CALL(*file_event_, activate(Event::FileReadyType::Write));\n    connection_->write(buffer, false);\n  }\n\n  const std::string val_{\"a\"};\n  std::shared_ptr<MockReadFilter> read_filter_{new StrictMock<MockReadFilter>()};\n};\n\n// Test that if a read event occurs after\n// close(ConnectionCloseType::FlushWriteAndDelay), the read is not propagated to\n// a read filter.\nTEST_F(PostCloseConnectionImplTest, ReadAfterCloseFlushWriteDelayIgnored) {\n  InSequence s;\n  initialize();\n\n  // Delayed connection close.\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  EXPECT_CALL(*file_event_, setEnabled(Event::FileReadyType::Closed));\n  connection_->close(ConnectionCloseType::FlushWriteAndDelay);\n\n  // Read event, doRead() happens on connection but no filter onData().\n  EXPECT_CALL(*read_filter_, onData(_, _)).Times(0);\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Invoke([this](Buffer::Instance& buffer) -> IoResult {\n        buffer.add(val_.c_str(), val_.size());\n        return {PostIoAction::KeepOpen, val_.size(), false};\n      }));\n  file_ready_cb_(Event::FileReadyType::Read);\n  // Deferred close.\n  EXPECT_CALL(*transport_socket_, closeSocket(_));\n}\n\n// Test that if a read event occurs after\n// close(ConnectionCloseType::FlushWriteAndDelay) with pending write data, the\n// read is not propagated to a read filter.\nTEST_F(PostCloseConnectionImplTest, ReadAfterCloseFlushWriteDelayIgnoredWithWriteData) {\n  InSequence s;\n  initialize();\n  writeSomeData();\n\n  // Delayed connection close.\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  // With half-close semantics enabled we will not wait for early close notification.\n  // See the `Envoy::Network::ConnectionImpl::readDisable()' method for more details.\n  EXPECT_CALL(*file_event_, setEnabled(0));\n  connection_->enableHalfClose(true);\n  connection_->close(ConnectionCloseType::FlushWriteAndDelay);\n\n  // Read event, doRead() happens on connection but no filter onData().\n  EXPECT_CALL(*read_filter_, onData(_, _)).Times(0);\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Invoke([this](Buffer::Instance& buffer) -> IoResult {\n        buffer.add(val_.c_str(), val_.size());\n        return {PostIoAction::KeepOpen, val_.size(), false};\n      }));\n  file_ready_cb_(Event::FileReadyType::Read);\n  // We have data written above in writeSomeData(), it will be flushed here.\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  // Deferred close.\n  EXPECT_CALL(*transport_socket_, closeSocket(_));\n}\n\n// Test that if a read event occurs after\n// close(ConnectionCloseType::FlushWriteAndDelay) with pending write data and a\n// transport socket than canFlushClose(), the read is not propagated to a read\n// filter.\nTEST_F(PostCloseConnectionImplTest, ReadAfterCloseFlushWriteDelayIgnoredCanFlushClose) {\n  InSequence s;\n  initialize();\n  writeSomeData();\n\n  // The path of interest is when the transport socket canFlushClose().\n  ON_CALL(*transport_socket_, canFlushClose()).WillByDefault(Return(true));\n\n  // Delayed connection close.\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  EXPECT_CALL(*file_event_, setEnabled(Event::FileReadyType::Write | Event::FileReadyType::Closed));\n  connection_->close(ConnectionCloseType::FlushWriteAndDelay);\n\n  // Read event, doRead() happens on connection but no filter onData().\n  EXPECT_CALL(*read_filter_, onData(_, _)).Times(0);\n  EXPECT_CALL(*transport_socket_, doRead(_))\n      .WillOnce(Invoke([this](Buffer::Instance& buffer) -> IoResult {\n        buffer.add(val_.c_str(), val_.size());\n        return {PostIoAction::KeepOpen, val_.size(), false};\n      }));\n  file_ready_cb_(Event::FileReadyType::Read);\n\n  // Deferred close.\n  EXPECT_CALL(*transport_socket_, closeSocket(_));\n}\n\n// Test that if a read event occurs after close(ConnectionCloseType::NoFlush),\n// then no read is attempted from the transport socket and hence the read is not\n// propagated to a read filter.\nTEST_F(PostCloseConnectionImplTest, NoReadAfterCloseNoFlush) {\n  InSequence s;\n  initialize();\n\n  // Immediate connection close.\n  EXPECT_CALL(*transport_socket_, closeSocket(_));\n  connection_->close(ConnectionCloseType::NoFlush);\n\n  // We don't even see a doRead(), let alone an onData() callback.\n  EXPECT_CALL(*read_filter_, onData(_, _)).Times(0);\n  EXPECT_CALL(*transport_socket_, doRead(_)).Times(0);\n  file_ready_cb_(Event::FileReadyType::Read);\n}\n\n// Test that if a read event occurs after close(ConnectionCloseType::FlushWrite),\n// then no read is attempted from the transport socket and hence the read is not\n// propagated to a read filter.\nTEST_F(PostCloseConnectionImplTest, NoReadAfterCloseFlushWrite) {\n  InSequence s;\n  initialize();\n\n  // Connection flush and close.\n  EXPECT_CALL(*transport_socket_, closeSocket(_));\n  connection_->close(ConnectionCloseType::FlushWrite);\n\n  // We don't even see a doRead(), let alone an onData() callback.\n  EXPECT_CALL(*read_filter_, onData(_, _)).Times(0);\n  EXPECT_CALL(*transport_socket_, doRead(_)).Times(0);\n  file_ready_cb_(Event::FileReadyType::Read);\n}\n\n// Test that if a read event occurs after close(ConnectionCloseType::FlushWrite)\n// with pending write data, then no read is attempted from the transport socket\n// and hence the read is not propagated to a read filter.\nTEST_F(PostCloseConnectionImplTest, NoReadAfterCloseFlushWriteWriteData) {\n  InSequence s;\n  initialize();\n  writeSomeData();\n\n  // Connection flush and close. We have data written above in writeSomeData(),\n  // it will be flushed here.\n  EXPECT_CALL(*transport_socket_, doWrite(_, true))\n      .WillOnce(Return(IoResult{PostIoAction::KeepOpen, 0, false}));\n  EXPECT_CALL(*transport_socket_, closeSocket(_));\n  connection_->close(ConnectionCloseType::FlushWrite);\n\n  // We don't even see a doRead(), let alone an onData() callback.\n  EXPECT_CALL(*read_filter_, onData(_, _)).Times(0);\n  EXPECT_CALL(*transport_socket_, doRead(_)).Times(0);\n  file_ready_cb_(Event::FileReadyType::Read);\n}\n\nclass ReadBufferLimitTest : public ConnectionImplTest {\npublic:\n  void readBufferLimitTest(uint32_t read_buffer_limit, uint32_t expected_chunk_size) {\n    const uint32_t buffer_size = 256 * 1024;\n    dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n    socket_ = std::make_shared<Network::TcpListenSocket>(\n        Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n    listener_ =\n        dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE);\n\n    client_connection_ = dispatcher_->createClientConnection(\n        socket_->localAddress(), Network::Address::InstanceConstSharedPtr(),\n        Network::Test::createRawBufferSocket(), nullptr);\n    client_connection_->addConnectionCallbacks(client_callbacks_);\n    client_connection_->connect();\n\n    read_filter_ = std::make_shared<NiceMock<MockReadFilter>>();\n    EXPECT_CALL(listener_callbacks_, onAccept_(_))\n        .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n          server_connection_ = dispatcher_->createServerConnection(\n              std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n          server_connection_->setBufferLimits(read_buffer_limit);\n          server_connection_->addReadFilter(read_filter_);\n          EXPECT_EQ(\"\", server_connection_->nextProtocol());\n          EXPECT_EQ(read_buffer_limit, server_connection_->bufferLimit());\n        }));\n\n    uint32_t filter_seen = 0;\n\n    EXPECT_CALL(*read_filter_, onNewConnection());\n    EXPECT_CALL(*read_filter_, onData(_, _))\n        .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) -> FilterStatus {\n          EXPECT_GE(expected_chunk_size, data.length());\n          filter_seen += data.length();\n          data.drain(data.length());\n          if (filter_seen == buffer_size) {\n            server_connection_->close(ConnectionCloseType::FlushWrite);\n          }\n          return FilterStatus::StopIteration;\n        }));\n\n    EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n          EXPECT_EQ(buffer_size, filter_seen);\n          dispatcher_->exit();\n        }));\n\n    Buffer::OwnedImpl data(std::string(buffer_size, 'a'));\n    client_connection_->write(data, false);\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ReadBufferLimitTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ReadBufferLimitTest, NoLimit) { readBufferLimitTest(0, 256 * 1024); }\n\nTEST_P(ReadBufferLimitTest, SomeLimit) {\n  const uint32_t read_buffer_limit = 32 * 1024;\n  // Envoy has soft limits, so as long as the first read is <= read_buffer_limit - 1 it will do a\n  // second read. The effective chunk size is then read_buffer_limit - 1 + MaxReadSize,\n  // which is currently 16384.\n  readBufferLimitTest(read_buffer_limit, read_buffer_limit - 1 + 16384);\n}\n\nclass TcpClientConnectionImplTest : public testing::TestWithParam<Address::IpVersion> {\nprotected:\n  TcpClientConnectionImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n};\nINSTANTIATE_TEST_SUITE_P(IpVersions, TcpClientConnectionImplTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(TcpClientConnectionImplTest, BadConnectNotConnRefused) {\n  Address::InstanceConstSharedPtr address;\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    // Connecting to 255.255.255.255 will cause a perm error and not ECONNREFUSED which is a\n    // different path in libevent. Make sure this doesn't crash.\n    address = Utility::resolveUrl(\"tcp://255.255.255.255:1\");\n  } else {\n    // IPv6 reserved multicast address.\n    address = Utility::resolveUrl(\"tcp://[ff00::]:1\");\n  }\n  ClientConnectionPtr connection =\n      dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                          Network::Test::createRawBufferSocket(), nullptr);\n  connection->connect();\n  connection->noDelay(true);\n  connection->close(ConnectionCloseType::NoFlush);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(TcpClientConnectionImplTest, BadConnectConnRefused) {\n  // Connecting to an invalid port on localhost will cause ECONNREFUSED which is a different code\n  // path from other errors. Test this also.\n  ClientConnectionPtr connection = dispatcher_->createClientConnection(\n      Utility::resolveUrl(\n          fmt::format(\"tcp://{}:1\", Network::Test::getLoopbackAddressUrlString(GetParam()))),\n      Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr);\n  connection->connect();\n  connection->noDelay(true);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nclass PipeClientConnectionImplTest : public testing::Test {\nprotected:\n  PipeClientConnectionImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  const std::string path_{TestEnvironment::unixDomainSocketPath(\"foo\")};\n};\n\n// Validate we skip setting socket options on UDS.\nTEST_F(PipeClientConnectionImplTest, SkipSocketOptions) {\n  auto option = std::make_shared<MockSocketOption>();\n  EXPECT_CALL(*option, setOption(_, _)).Times(0);\n  auto options = std::make_shared<Socket::Options>();\n  options->emplace_back(option);\n  ClientConnectionPtr connection = dispatcher_->createClientConnection(\n      Utility::resolveUrl(\"unix://\" + path_), Network::Address::InstanceConstSharedPtr(),\n      Network::Test::createRawBufferSocket(), options);\n  connection->close(ConnectionCloseType::NoFlush);\n}\n\n// Validate we skip setting source address.\nTEST_F(PipeClientConnectionImplTest, SkipSourceAddress) {\n  ClientConnectionPtr connection = dispatcher_->createClientConnection(\n      Utility::resolveUrl(\"unix://\" + path_), Utility::resolveUrl(\"tcp://1.2.3.4:5\"),\n      Network::Test::createRawBufferSocket(), nullptr);\n  connection->close(ConnectionCloseType::NoFlush);\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/dns_impl_test.cc",
    "content": "#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/dns.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/dns_impl.h\"\n#include \"common/network/filter_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/container/node_hash_map.h\"\n#include \"ares.h\"\n#include \"ares_dns.h\"\n#include \"gtest/gtest.h\"\n\n#if !defined(WIN32)\n#include <arpa/nameser.h>\n#include <arpa/nameser_compat.h>\n#else\n#include \"nameser.h\"\n#endif\n\nusing testing::_;\nusing testing::Contains;\nusing testing::InSequence;\nusing testing::IsSupersetOf;\nusing testing::NiceMock;\nusing testing::Not;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\n// List of IP address (in human readable format).\nusing IpList = std::list<std::string>;\n// Map from hostname to IpList.\nusing HostMap = absl::node_hash_map<std::string, IpList>;\n// Map from hostname to CNAME\nusing CNameMap = absl::node_hash_map<std::string, std::string>;\n// Represents a single TestDnsServer query state and lifecycle. This implements\n// just enough of RFC 1035 to handle queries we generate in the tests below.\nenum class RecordType { A, AAAA };\n\nclass TestDnsServerQuery {\npublic:\n  TestDnsServerQuery(ConnectionPtr connection, const HostMap& hosts_a, const HostMap& hosts_aaaa,\n                     const CNameMap& cnames, const std::chrono::seconds& record_ttl, bool refused)\n      : connection_(std::move(connection)), hosts_a_(hosts_a), hosts_aaaa_(hosts_aaaa),\n        cnames_(cnames), record_ttl_(record_ttl), refused_(refused) {\n    connection_->addReadFilter(Network::ReadFilterSharedPtr{new ReadFilter(*this)});\n  }\n\n  ~TestDnsServerQuery() { connection_->close(ConnectionCloseType::NoFlush); }\n\n  // Utility to encode a dns string in the rfc format. Example: \\004some\\004good\\006domain\n  // RFC link: https://www.ietf.org/rfc/rfc1035.txt\n  static std::string encodeDnsName(const std::string& input) {\n    auto name_split = StringUtil::splitToken(input, \".\");\n    std::string res;\n    for (const auto& it : name_split) {\n      res += static_cast<char>(it.size());\n      const std::string part{it};\n      res.append(part);\n    }\n    return res;\n  }\n\nprivate:\n  struct ReadFilter : public Network::ReadFilterBaseImpl {\n    ReadFilter(TestDnsServerQuery& parent) : parent_(parent) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n      onDataInternal(data);\n      return Network::FilterStatus::StopIteration;\n    }\n\n    // Hack: void returning variation of onData to allow gtest assertions.\n    void onDataInternal(Buffer::Instance& data) {\n      buffer_.add(data);\n      while (true) {\n        if (size_ == 0) {\n          uint16_t size_n;\n          if (buffer_.length() < sizeof(size_n)) {\n            // If we don't have enough bytes to determine size, wait until we do.\n            return;\n          }\n          void* mem = buffer_.linearize(sizeof(size_n));\n          std::memcpy(reinterpret_cast<void*>(&size_n), mem, sizeof(size_n));\n          buffer_.drain(sizeof(size_n));\n          size_ = ntohs(size_n);\n        }\n\n        if (buffer_.length() < size_) {\n          // If we don't have enough bytes to read the complete query, wait until\n          // we do.\n          return;\n        }\n\n        // Expect requests to be small, so stack allocation is fine for test code.\n        unsigned char* request = static_cast<unsigned char*>(buffer_.linearize(size_));\n        // Only expecting a single question.\n        ASSERT_EQ(1, DNS_HEADER_QDCOUNT(request));\n        // Decode the question and perform lookup.\n        const unsigned char* question = request + HFIXEDSZ;\n        // The number of bytes the encoded question name takes up in the request.\n        // Useful in the response when generating resource records containing the\n        // name.\n        long name_len;\n        // Get host name from query and use the name to lookup a record\n        // in a host map. If the query type is of type A, then perform the lookup in\n        // the hosts_a_ host map. If the query type is of type AAAA, then perform the\n        // lookup in the `hosts_aaaa_` host map.\n        char* name;\n        ASSERT_EQ(ARES_SUCCESS, ares_expand_name(question, request, size_, &name, &name_len));\n        const std::list<std::string>* ips = nullptr;\n        // We only expect resources of type A or AAAA.\n        const int q_type = DNS_QUESTION_TYPE(question + name_len);\n        std::string cname;\n        // check if we have a cname. If so, we will need to send a response element with the cname\n        // and lookup the ips of the cname and send back those ips (if any) too\n        auto cit = parent_.cnames_.find(name);\n        if (cit != parent_.cnames_.end()) {\n          cname = cit->second;\n        }\n        const char* hostLookup = name;\n        const unsigned char* ip_question = question;\n        long ip_name_len = name_len;\n        std::string encodedCname;\n        if (!cname.empty()) {\n          ASSERT_TRUE(cname.size() <= 253);\n          hostLookup = cname.c_str();\n          encodedCname = TestDnsServerQuery::encodeDnsName(cname);\n          ip_question = reinterpret_cast<const unsigned char*>(encodedCname.c_str());\n          ip_name_len =\n              encodedCname.size() + 1; //+1 as we need to include the final null terminator\n        }\n        ASSERT_TRUE(q_type == T_A || q_type == T_AAAA);\n        if (q_type == T_A) {\n          auto it = parent_.hosts_a_.find(hostLookup);\n          if (it != parent_.hosts_a_.end()) {\n            ips = &it->second;\n          }\n        } else {\n          auto it = parent_.hosts_aaaa_.find(hostLookup);\n          if (it != parent_.hosts_aaaa_.end()) {\n            ips = &it->second;\n          }\n        }\n        ares_free_string(name);\n\n        int answer_size = ips != nullptr ? ips->size() : 0;\n        answer_size += !encodedCname.empty() ? 1 : 0;\n\n        // The response begins with the initial part of the request\n        // (including the question section).\n        const size_t response_base_len = HFIXEDSZ + name_len + QFIXEDSZ;\n        absl::FixedArray<unsigned char> response_buf(response_base_len);\n        unsigned char* response_base = response_buf.begin();\n        memcpy(response_base, request, response_base_len);\n        DNS_HEADER_SET_QR(response_base, 1);\n        DNS_HEADER_SET_AA(response_base, 0);\n        if (parent_.refused_) {\n          DNS_HEADER_SET_RCODE(response_base, REFUSED);\n        } else {\n          DNS_HEADER_SET_RCODE(response_base, answer_size > 0 ? NOERROR : NXDOMAIN);\n        }\n        DNS_HEADER_SET_ANCOUNT(response_base, answer_size);\n        DNS_HEADER_SET_NSCOUNT(response_base, 0);\n        DNS_HEADER_SET_ARCOUNT(response_base, 0);\n        // Total response size will be computed according to cname response size + ip response sizes\n        size_t response_ip_rest_len;\n        if (q_type == T_A) {\n          response_ip_rest_len =\n              ips != nullptr ? ips->size() * (ip_name_len + RRFIXEDSZ + sizeof(in_addr)) : 0;\n        } else {\n          response_ip_rest_len =\n              ips != nullptr ? ips->size() * (ip_name_len + RRFIXEDSZ + sizeof(in6_addr)) : 0;\n        }\n        size_t response_cname_len =\n            !encodedCname.empty() ? name_len + RRFIXEDSZ + encodedCname.size() + 1 : 0;\n        const uint16_t response_size_n =\n            htons(response_base_len + response_ip_rest_len + response_cname_len);\n        Buffer::OwnedImpl write_buffer;\n        // Write response header\n        write_buffer.add(&response_size_n, sizeof(response_size_n));\n        write_buffer.add(response_base, response_base_len);\n\n        // if we have a cname, create a resource record\n        if (!encodedCname.empty()) {\n          unsigned char cname_rr_fixed[RRFIXEDSZ];\n          DNS_RR_SET_TYPE(cname_rr_fixed, T_CNAME);\n          DNS_RR_SET_LEN(cname_rr_fixed, encodedCname.size() + 1);\n          DNS_RR_SET_CLASS(cname_rr_fixed, C_IN);\n          DNS_RR_SET_TTL(cname_rr_fixed, parent_.record_ttl_.count());\n          write_buffer.add(question, name_len);\n          write_buffer.add(cname_rr_fixed, RRFIXEDSZ);\n          write_buffer.add(encodedCname.c_str(), encodedCname.size() + 1);\n        }\n\n        // Create a resource record for each IP found in the host map.\n        unsigned char response_rr_fixed[RRFIXEDSZ];\n        if (q_type == T_A) {\n          DNS_RR_SET_TYPE(response_rr_fixed, T_A);\n          DNS_RR_SET_LEN(response_rr_fixed, sizeof(in_addr));\n        } else {\n          DNS_RR_SET_TYPE(response_rr_fixed, T_AAAA);\n          DNS_RR_SET_LEN(response_rr_fixed, sizeof(in6_addr));\n        }\n        DNS_RR_SET_CLASS(response_rr_fixed, C_IN);\n        DNS_RR_SET_TTL(response_rr_fixed, parent_.record_ttl_.count());\n        if (ips != nullptr) {\n          for (const auto& it : *ips) {\n            write_buffer.add(ip_question, ip_name_len);\n            write_buffer.add(response_rr_fixed, RRFIXEDSZ);\n            if (q_type == T_A) {\n              in_addr addr;\n              ASSERT_EQ(1, inet_pton(AF_INET, it.c_str(), &addr));\n              write_buffer.add(&addr, sizeof(addr));\n            } else {\n              in6_addr addr;\n              ASSERT_EQ(1, inet_pton(AF_INET6, it.c_str(), &addr));\n              write_buffer.add(&addr, sizeof(addr));\n            }\n          }\n        }\n        parent_.connection_->write(write_buffer, false);\n\n        // Reset query state, time for the next one.\n        buffer_.drain(size_);\n        size_ = 0;\n      }\n    }\n\n    TestDnsServerQuery& parent_;\n    // The expected size of the current DNS query to read. If zero, indicates that\n    // no DNS query is in progress and that a 2 byte size is expected from the\n    // client to indicate the next DNS query size.\n    uint16_t size_ = 0;\n    Buffer::OwnedImpl buffer_;\n  };\n\nprivate:\n  ConnectionPtr connection_;\n  const HostMap& hosts_a_;\n  const HostMap& hosts_aaaa_;\n  const CNameMap& cnames_;\n  const std::chrono::seconds& record_ttl_;\n  bool refused_{};\n};\n\nclass TestDnsServer : public TcpListenerCallbacks {\npublic:\n  TestDnsServer(Event::Dispatcher& dispatcher)\n      : dispatcher_(dispatcher), record_ttl_(0), stream_info_(dispatcher.timeSource()) {}\n\n  void onAccept(ConnectionSocketPtr&& socket) override {\n    Network::ConnectionPtr new_connection = dispatcher_.createServerConnection(\n        std::move(socket), Network::Test::createRawBufferSocket(), stream_info_);\n    TestDnsServerQuery* query = new TestDnsServerQuery(std::move(new_connection), hosts_a_,\n                                                       hosts_aaaa_, cnames_, record_ttl_, refused_);\n    queries_.emplace_back(query);\n  }\n\n  void onReject() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  void addHosts(const std::string& hostname, const IpList& ip, const RecordType& type) {\n    if (type == RecordType::A) {\n      hosts_a_[hostname] = ip;\n    } else if (type == RecordType::AAAA) {\n      hosts_aaaa_[hostname] = ip;\n    }\n  }\n\n  void addCName(const std::string& hostname, const std::string& cname) {\n    cnames_[hostname] = cname;\n  }\n\n  void setRecordTtl(const std::chrono::seconds& ttl) { record_ttl_ = ttl; }\n  void setRefused(bool refused) { refused_ = refused; }\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n\n  HostMap hosts_a_;\n  HostMap hosts_aaaa_;\n  CNameMap cnames_;\n  std::chrono::seconds record_ttl_;\n  bool refused_{};\n  // All queries are tracked so we can do resource reclamation when the test is\n  // over.\n  std::vector<std::unique_ptr<TestDnsServerQuery>> queries_;\n  StreamInfo::StreamInfoImpl stream_info_;\n};\n\n} // namespace\n\nclass DnsResolverImplPeer {\npublic:\n  DnsResolverImplPeer(DnsResolverImpl* resolver) : resolver_(resolver) {}\n\n  ares_channel channel() const { return resolver_->channel_; }\n  bool isChannelDirty() const { return resolver_->dirty_channel_; }\n  const absl::node_hash_map<int, Event::FileEventPtr>& events() { return resolver_->events_; }\n  // Reset the channel state for a DnsResolverImpl such that it will only use\n  // TCP and optionally has a zero timeout (for validating timeout behavior).\n  void resetChannelTcpOnly(bool zero_timeout) {\n    ares_destroy(resolver_->channel_);\n    ares_options options;\n    // TCP-only connections to TestDnsServer, since even loopback UDP can be\n    // lossy with a server under load.\n    options.flags = ARES_FLAG_USEVC;\n    // Avoid host-specific domain search behavior when testing to improve\n    // determinism.\n    options.ndomains = 0;\n    options.timeout = 0;\n    resolver_->initializeChannel(&options, ARES_OPT_FLAGS | ARES_OPT_DOMAINS |\n                                               (zero_timeout ? ARES_OPT_TIMEOUTMS : 0));\n  }\n\nprivate:\n  DnsResolverImpl* resolver_;\n};\n\nclass DnsImplConstructor : public testing::Test {\nprotected:\n  DnsImplConstructor()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n};\n\nTEST_F(DnsImplConstructor, SupportsCustomResolvers) {\n  char addr4str[INET_ADDRSTRLEN];\n  // we pick a port that isn't 53 as the default resolve.conf might be\n  // set to point to localhost.\n  auto addr4 = Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:54\");\n  char addr6str[INET6_ADDRSTRLEN];\n  auto addr6 = Network::Utility::parseInternetAddressAndPort(\"[::1]:54\");\n  auto resolver = dispatcher_->createDnsResolver({addr4, addr6}, false);\n  auto peer = std::make_unique<DnsResolverImplPeer>(dynamic_cast<DnsResolverImpl*>(resolver.get()));\n  ares_addr_port_node* resolvers;\n  int result = ares_get_servers_ports(peer->channel(), &resolvers);\n  EXPECT_EQ(result, ARES_SUCCESS);\n  EXPECT_EQ(resolvers->family, AF_INET);\n  EXPECT_EQ(resolvers->udp_port, 54);\n  EXPECT_STREQ(inet_ntop(AF_INET, &resolvers->addr.addr4, addr4str, INET_ADDRSTRLEN), \"127.0.0.1\");\n  EXPECT_EQ(resolvers->next->family, AF_INET6);\n  EXPECT_EQ(resolvers->next->udp_port, 54);\n  EXPECT_STREQ(inet_ntop(AF_INET6, &resolvers->next->addr.addr6, addr6str, INET6_ADDRSTRLEN),\n               \"::1\");\n  ares_free_data(resolvers);\n}\n\n// Custom instance that dispatches everything to a regular instance except for asString(), where\n// it borks the port.\nclass CustomInstance : public Address::Instance {\npublic:\n  CustomInstance(const std::string& address, uint32_t port) : instance_(address, port) {\n    antagonistic_name_ = fmt::format(\"{}:borked_port_{}\", address, port);\n  }\n  ~CustomInstance() override = default;\n\n  // Address::Instance\n  bool operator==(const Address::Instance& rhs) const override {\n    return asString() == rhs.asString();\n  }\n  const std::string& asString() const override { return antagonistic_name_; }\n  absl::string_view asStringView() const override { return antagonistic_name_; }\n  const std::string& logicalName() const override { return antagonistic_name_; }\n  const Address::Ip* ip() const override { return instance_.ip(); }\n  const Address::Pipe* pipe() const override { return instance_.pipe(); }\n  const Address::EnvoyInternalAddress* envoyInternalAddress() const override {\n    return instance_.envoyInternalAddress();\n  }\n  const sockaddr* sockAddr() const override { return instance_.sockAddr(); }\n  socklen_t sockAddrLen() const override { return instance_.sockAddrLen(); }\n  Address::Type type() const override { return instance_.type(); }\n  const SocketInterface& socketInterface() const override {\n    return SocketInterfaceSingleton::get();\n  }\n\nprivate:\n  std::string antagonistic_name_;\n  Address::Ipv4Instance instance_;\n};\n\nTEST_F(DnsImplConstructor, SupportCustomAddressInstances) {\n  auto test_instance(std::make_shared<CustomInstance>(\"127.0.0.1\", 45));\n  EXPECT_EQ(test_instance->asString(), \"127.0.0.1:borked_port_45\");\n  auto resolver = dispatcher_->createDnsResolver({test_instance}, false);\n  auto peer = std::make_unique<DnsResolverImplPeer>(dynamic_cast<DnsResolverImpl*>(resolver.get()));\n  ares_addr_port_node* resolvers;\n  int result = ares_get_servers_ports(peer->channel(), &resolvers);\n  EXPECT_EQ(result, ARES_SUCCESS);\n  EXPECT_EQ(resolvers->family, AF_INET);\n  EXPECT_EQ(resolvers->udp_port, 45);\n  char addr4str[INET_ADDRSTRLEN];\n  EXPECT_STREQ(inet_ntop(AF_INET, &resolvers->addr.addr4, addr4str, INET_ADDRSTRLEN), \"127.0.0.1\");\n  ares_free_data(resolvers);\n}\n\nTEST_F(DnsImplConstructor, BadCustomResolvers) {\n  envoy::config::core::v3::Address pipe_address;\n  pipe_address.mutable_pipe()->set_path(\"foo\");\n  auto pipe_instance = Network::Utility::protobufAddressToAddress(pipe_address);\n  EXPECT_THROW_WITH_MESSAGE(dispatcher_->createDnsResolver({pipe_instance}, false), EnvoyException,\n                            \"DNS resolver 'foo' is not an IP address\");\n}\n\nclass DnsImplTest : public testing::TestWithParam<Address::IpVersion> {\npublic:\n  DnsImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  void SetUp() override {\n    resolver_ = dispatcher_->createDnsResolver({}, use_tcp_for_dns_lookups());\n\n    // Instantiate TestDnsServer and listen on a random port on the loopback address.\n    server_ = std::make_unique<TestDnsServer>(*dispatcher_);\n    socket_ = std::make_shared<Network::TcpListenSocket>(\n        Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n    listener_ = dispatcher_->createListener(socket_, *server_, true, ENVOY_TCP_BACKLOG_SIZE);\n\n    // Point c-ares at the listener with no search domains and TCP-only.\n    peer_ = std::make_unique<DnsResolverImplPeer>(dynamic_cast<DnsResolverImpl*>(resolver_.get()));\n    if (tcp_only()) {\n      peer_->resetChannelTcpOnly(zero_timeout());\n    }\n    ares_set_servers_ports_csv(peer_->channel(), socket_->localAddress()->asString().c_str());\n  }\n\n  void TearDown() override {\n    // Make sure we clean this up before dispatcher destruction.\n    listener_.reset();\n    server_.reset();\n  }\n\n  static std::list<Address::InstanceConstSharedPtr>\n  getAddressList(const std::list<DnsResponse>& response) {\n    std::list<Address::InstanceConstSharedPtr> address;\n\n    for_each(response.begin(), response.end(),\n             [&](DnsResponse resp) { address.emplace_back(resp.address_); });\n    return address;\n  }\n\n  static std::list<std::string> getAddressAsStringList(const std::list<DnsResponse>& response) {\n    std::list<std::string> address;\n\n    for_each(response.begin(), response.end(), [&](DnsResponse resp) {\n      address.emplace_back(resp.address_->ip()->addressAsString());\n    });\n    return address;\n  }\n\n  ActiveDnsQuery* resolveWithExpectations(const std::string& address,\n                                          const DnsLookupFamily lookup_family,\n                                          const DnsResolver::ResolutionStatus expected_status,\n                                          const std::list<std::string>& expected_results,\n                                          const std::list<std::string>& expected_absent_results,\n                                          const absl::optional<std::chrono::seconds> expected_ttl) {\n    return resolver_->resolve(\n        address, lookup_family,\n        [=](DnsResolver::ResolutionStatus status, std::list<DnsResponse>&& results) -> void {\n          EXPECT_EQ(expected_status, status);\n\n          std::list<std::string> address_as_string_list = getAddressAsStringList(results);\n          // Note localhost is getting a special treatment here due to circle ci's hosts file.\n          // If the coverage job is moved from circle, this can be simplified to only the exact\n          // list match.\n          // https://github.com/envoyproxy/envoy/pull/10137#issuecomment-592525544\n          if (address == \"localhost\" && lookup_family == DnsLookupFamily::V4Only) {\n            EXPECT_THAT(address_as_string_list, IsSupersetOf(expected_results));\n          } else {\n            EXPECT_EQ(expected_results, address_as_string_list);\n          }\n\n          for (const auto& expected_absent_result : expected_absent_results) {\n            EXPECT_THAT(address_as_string_list, Not(Contains(expected_absent_result)));\n          }\n\n          if (expected_ttl) {\n            std::list<Address::InstanceConstSharedPtr> address_list = getAddressList(results);\n            for (const auto& address : results) {\n              EXPECT_EQ(address.ttl_, expected_ttl.value());\n            }\n          }\n\n          dispatcher_->exit();\n        });\n  }\n\n  ActiveDnsQuery* resolveWithUnreferencedParameters(const std::string& address,\n                                                    const DnsLookupFamily lookup_family,\n                                                    bool expected_to_execute) {\n    return resolver_->resolve(address, lookup_family,\n                              [expected_to_execute](DnsResolver::ResolutionStatus status,\n                                                    std::list<DnsResponse>&& results) -> void {\n                                if (!expected_to_execute) {\n                                  FAIL();\n                                }\n                                UNREFERENCED_PARAMETER(status);\n                                UNREFERENCED_PARAMETER(results);\n                              });\n  }\n\n  template <typename T>\n  ActiveDnsQuery* resolveWithException(const std::string& address,\n                                       const DnsLookupFamily lookup_family, T exception_object) {\n    return resolver_->resolve(address, lookup_family,\n                              [exception_object](DnsResolver::ResolutionStatus status,\n                                                 std::list<DnsResponse>&& results) -> void {\n                                UNREFERENCED_PARAMETER(status);\n                                UNREFERENCED_PARAMETER(results);\n                                throw exception_object;\n                              });\n  }\n\nprotected:\n  // Should the DnsResolverImpl use a zero timeout for c-ares queries?\n  virtual bool zero_timeout() const { return false; }\n  virtual bool tcp_only() const { return true; }\n  virtual bool use_tcp_for_dns_lookups() const { return false; }\n  std::unique_ptr<TestDnsServer> server_;\n  std::unique_ptr<DnsResolverImplPeer> peer_;\n  Network::MockConnectionHandler connection_handler_;\n  std::shared_ptr<Network::TcpListenSocket> socket_;\n  std::unique_ptr<Network::Listener> listener_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  DnsResolverSharedPtr resolver_;\n};\n\n// Parameterize the DNS test server socket address.\nINSTANTIATE_TEST_SUITE_P(IpVersions, DnsImplTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Validate that when DnsResolverImpl is destructed with outstanding requests,\n// that we don't invoke any callbacks if the query was cancelled. This is a regression test from\n// development, where segfaults were encountered due to callback invocations on\n// destruction.\nTEST_P(DnsImplTest, DestructPending) {\n  ActiveDnsQuery* query = resolveWithUnreferencedParameters(\"\", DnsLookupFamily::V4Only, false);\n  ASSERT_NE(nullptr, query);\n  query->cancel();\n  // Also validate that pending events are around to exercise the resource\n  // reclamation path.\n  EXPECT_GT(peer_->events().size(), 0U);\n}\n\nTEST_P(DnsImplTest, DestructCallback) {\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\"}, RecordType::A);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.domain\", DnsLookupFamily::Auto,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n\n  // This simulates destruction thanks to another query setting the dirty_channel_ bit, thus causing\n  // a subsequent result to call ares_destroy.\n  peer_->resetChannelTcpOnly(zero_timeout());\n  ares_set_servers_ports_csv(peer_->channel(), socket_->localAddress()->asString().c_str());\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Validate basic success/fail lookup behavior. The empty request will connect\n// to TestDnsServer, but localhost should resolve via the hosts file with no\n// asynchronous behavior or network events.\nTEST_P(DnsImplTest, LocalLookup) {\n  std::list<Address::InstanceConstSharedPtr> address_list;\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  if (GetParam() == Address::IpVersion::v4) {\n    EXPECT_EQ(nullptr, resolveWithExpectations(\"localhost\", DnsLookupFamily::V4Only,\n                                               DnsResolver::ResolutionStatus::Success,\n                                               {\"127.0.0.1\"}, {\"::1\"}, absl::nullopt));\n  }\n\n  if (GetParam() == Address::IpVersion::v6) {\n    const std::string error_msg =\n        \"Synchronous DNS IPv6 localhost resolution failed. Please verify localhost resolves to ::1 \"\n        \"in /etc/hosts, since this misconfiguration is a common cause of these failures.\";\n    EXPECT_EQ(nullptr, resolveWithExpectations(\"localhost\", DnsLookupFamily::V6Only,\n                                               DnsResolver::ResolutionStatus::Success, {\"::1\"},\n                                               {\"127.0.0.1\"}, absl::nullopt))\n        << error_msg;\n\n    EXPECT_EQ(nullptr, resolveWithExpectations(\"localhost\", DnsLookupFamily::Auto,\n                                               DnsResolver::ResolutionStatus::Success, {\"::1\"},\n                                               {\"127.0.0.1\"}, absl::nullopt))\n        << error_msg;\n  }\n}\n\nTEST_P(DnsImplTest, DnsIpAddressVersion) {\n  server_->addHosts(\"some.good.domain\", {\"1.2.3.4\"}, RecordType::A);\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success, {\"1.2.3.4\"},\n                                             {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V4Only,\n                                             DnsResolver::ResolutionStatus::Success, {\"1.2.3.4\"},\n                                             {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V6Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(DnsImplTest, DnsIpAddressVersionV6) {\n  server_->addHosts(\"some.good.domain\", {\"1::2\"}, RecordType::AAAA);\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success, {\"1::2\"}, {},\n                                             absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V6Only,\n                                             DnsResolver::ResolutionStatus::Success, {\"1::2\"}, {},\n                                             absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Validate exception behavior during c-ares callbacks.\nTEST_P(DnsImplTest, CallbackException) {\n  // Force immediate resolution, which will trigger a c-ares exception unsafe\n  // state providing regression coverage for #4307.\n  EXPECT_EQ(nullptr, resolveWithException<EnvoyException>(\"1.2.3.4\", DnsLookupFamily::V4Only,\n                                                          EnvoyException(\"Envoy exception\")));\n  EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException,\n                            \"Envoy exception\");\n  EXPECT_EQ(nullptr, resolveWithException<std::runtime_error>(\"1.2.3.4\", DnsLookupFamily::V4Only,\n                                                              std::runtime_error(\"runtime error\")));\n  EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException,\n                            \"runtime error\");\n  EXPECT_EQ(nullptr,\n            resolveWithException<std::string>(\"1.2.3.4\", DnsLookupFamily::V4Only, std::string()));\n  EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException,\n                            \"unknown\");\n}\n\n// Validate that the c-ares channel is destroyed and re-initialized when c-ares returns\n// ARES_ECONNREFUSED as its callback status.\nTEST_P(DnsImplTest, DestroyChannelOnRefused) {\n  ASSERT_FALSE(peer_->isChannelDirty());\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\"}, RecordType::A);\n  server_->setRefused(true);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n  // The c-ares channel should be dirty because the TestDnsServer replied with return code REFUSED;\n  // This test, and the way the TestDnsServerQuery is setup, relies on the fact that Envoy's\n  // c-ares channel is configured **without** the ARES_FLAG_NOCHECKRESP flag. This causes c-ares to\n  // discard packets with REFUSED, and thus Envoy receives ARES_ECONNREFUSED due to the code here:\n  // https://github.com/c-ares/c-ares/blob/d7e070e7283f822b1d2787903cce3615536c5610/ares_process.c#L654\n  // If that flag needs to be set, or c-ares changes its handling this test will need to be updated\n  // to create another condition where c-ares invokes onAresGetAddrInfoCallback with status ==\n  // ARES_ECONNREFUSED.\n  EXPECT_TRUE(peer_->isChannelDirty());\n\n  server_->setRefused(false);\n\n  // Resolve will destroy the original channel and create a new one.\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n  // However, the fresh channel initialized by production code does not point to the TestDnsServer.\n  // This means that resolution will return ARES_ENOTFOUND. This should not dirty the channel.\n  EXPECT_FALSE(peer_->isChannelDirty());\n\n  // Reset the channel to point to the TestDnsServer, and make sure resolution is healthy.\n  if (tcp_only()) {\n    peer_->resetChannelTcpOnly(zero_timeout());\n  }\n  ares_set_servers_ports_csv(peer_->channel(), socket_->localAddress()->asString().c_str());\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success,\n                                             {\"201.134.56.7\"}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n  EXPECT_FALSE(peer_->isChannelDirty());\n}\n\n// Validate success/fail lookup behavior via TestDnsServer. This exercises the\n// network event handling in DnsResolverImpl.\nTEST_P(DnsImplTest, RemoteAsyncLookup) {\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\"}, RecordType::A);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.bad.domain\", DnsLookupFamily::Auto,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success,\n                                             {\"201.134.56.7\"}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Validate that multiple A records are correctly passed to the callback.\nTEST_P(DnsImplTest, MultiARecordLookup) {\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"}, RecordType::A);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::Auto,\n                                    DnsResolver::ResolutionStatus::Success,\n                                    {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(DnsImplTest, CNameARecordLookupV4) {\n  server_->addCName(\"root.cnam.domain\", \"result.cname.domain\");\n  server_->addHosts(\"result.cname.domain\", {\"201.134.56.7\"}, RecordType::A);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"root.cnam.domain\", DnsLookupFamily::V4Only,\n                                             DnsResolver::ResolutionStatus::Success,\n                                             {\"201.134.56.7\"}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(DnsImplTest, CNameARecordLookupWithV6) {\n  server_->addCName(\"root.cnam.domain\", \"result.cname.domain\");\n  server_->addHosts(\"result.cname.domain\", {\"201.134.56.7\"}, RecordType::A);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"root.cnam.domain\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success,\n                                             {\"201.134.56.7\"}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(DnsImplTest, MultiARecordLookupWithV6) {\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"}, RecordType::A);\n  server_->addHosts(\"some.good.domain\", {\"1::2\", \"1::2:3\", \"1::2:3:4\"}, RecordType::AAAA);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Success,\n                                    {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success,\n                                             {{\"1::2\", \"1::2:3\", \"1::2:3:4\"}}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V6Only,\n                                             DnsResolver::ResolutionStatus::Success,\n                                             {{\"1::2\", \"1::2:3\", \"1::2:3:4\"}}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Validate working of cancellation provided by ActiveDnsQuery return.\nTEST_P(DnsImplTest, Cancel) {\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\"}, RecordType::A);\n\n  ActiveDnsQuery* query =\n      resolveWithUnreferencedParameters(\"some.domain\", DnsLookupFamily::Auto, false);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::Auto,\n                                             DnsResolver::ResolutionStatus::Success,\n                                             {\"201.134.56.7\"}, {}, absl::nullopt));\n\n  ASSERT_NE(nullptr, query);\n  query->cancel();\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Validate working of querying ttl of resource record.\nTEST_P(DnsImplTest, RecordTtlLookup) {\n  if (GetParam() == Address::IpVersion::v4) {\n    EXPECT_EQ(nullptr, resolveWithExpectations(\"localhost\", DnsLookupFamily::V4Only,\n                                               DnsResolver::ResolutionStatus::Success,\n                                               {\"127.0.0.1\"}, {}, std::chrono::seconds(0)));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  if (GetParam() == Address::IpVersion::v6) {\n    EXPECT_EQ(nullptr, resolveWithExpectations(\"localhost\", DnsLookupFamily::V6Only,\n                                               DnsResolver::ResolutionStatus::Success, {\"::1\"}, {},\n                                               std::chrono::seconds(0)));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    EXPECT_EQ(nullptr, resolveWithExpectations(\"localhost\", DnsLookupFamily::Auto,\n                                               DnsResolver::ResolutionStatus::Success, {\"::1\"}, {},\n                                               std::chrono::seconds(0)));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"}, RecordType::A);\n  server_->addHosts(\"some.good.domain\", {\"1::2\", \"1::2:3\", \"1::2:3:4\"}, RecordType::AAAA);\n  server_->setRecordTtl(std::chrono::seconds(300));\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Success,\n                                    {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"},\n                                    {\"1::2\", \"1::2:3\", \"1::2:3:4\"}, std::chrono::seconds(300)));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\n                         \"some.good.domain\", DnsLookupFamily::Auto,\n                         DnsResolver::ResolutionStatus::Success, {\"1::2\", \"1::2:3\", \"1::2:3:4\"},\n                         {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"}, std::chrono::seconds(300)));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_NE(nullptr, resolveWithExpectations(\n                         \"some.good.domain\", DnsLookupFamily::V6Only,\n                         DnsResolver::ResolutionStatus::Success, {\"1::2\", \"1::2:3\", \"1::2:3:4\"},\n                         {\"201.134.56.7\", \"123.4.5.6\", \"6.5.4.3\"}, std::chrono::seconds(300)));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  server_->addHosts(\"domain.onion\", {\"1.2.3.4\"}, RecordType::A);\n  server_->addHosts(\"domain.onion.\", {\"2.3.4.5\"}, RecordType::A);\n\n  // test onion domain\n  EXPECT_EQ(nullptr,\n            resolveWithExpectations(\"domain.onion\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(nullptr,\n            resolveWithExpectations(\"domain.onion.\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Validate that the resolution timeout timer is enabled if we don't resolve\n// immediately.\nTEST_P(DnsImplTest, PendingTimerEnable) {\n  InSequence s;\n  std::vector<Network::Address::InstanceConstSharedPtr> vec{};\n  Event::MockDispatcher dispatcher;\n  Event::MockTimer* timer = new NiceMock<Event::MockTimer>();\n  EXPECT_CALL(dispatcher, createTimer_(_)).WillOnce(Return(timer));\n  resolver_ = std::make_shared<DnsResolverImpl>(dispatcher, vec, false);\n  Event::FileEvent* file_event = new NiceMock<Event::MockFileEvent>();\n  EXPECT_CALL(dispatcher, createFileEvent_(_, _, _, _)).WillOnce(Return(file_event));\n  EXPECT_CALL(*timer, enableTimer(_, _));\n  EXPECT_NE(nullptr, resolveWithUnreferencedParameters(\"some.bad.domain.invalid\",\n                                                       DnsLookupFamily::V4Only, true));\n}\n\nclass DnsImplZeroTimeoutTest : public DnsImplTest {\nprotected:\n  bool zero_timeout() const override { return true; }\n};\n\n// Parameterize the DNS test server socket address.\nINSTANTIATE_TEST_SUITE_P(IpVersions, DnsImplZeroTimeoutTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Validate that timeouts result in an empty callback.\nTEST_P(DnsImplZeroTimeoutTest, Timeout) {\n  server_->addHosts(\"some.good.domain\", {\"201.134.56.7\"}, RecordType::A);\n\n  EXPECT_NE(nullptr,\n            resolveWithExpectations(\"some.good.domain\", DnsLookupFamily::V4Only,\n                                    DnsResolver::ResolutionStatus::Failure, {}, {}, absl::nullopt));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nclass DnsImplAresFlagsForTcpTest : public DnsImplTest {\nprotected:\n  bool tcp_only() const override { return false; }\n  bool use_tcp_for_dns_lookups() const override { return true; }\n};\n\n// Parameterize the DNS test server socket address.\nINSTANTIATE_TEST_SUITE_P(IpVersions, DnsImplAresFlagsForTcpTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Validate that c_ares flag `ARES_FLAG_USEVC` is set when boolean property\n// `use_tcp_for_dns_lookups` is enabled.\nTEST_P(DnsImplAresFlagsForTcpTest, TcpLookupsEnabled) {\n  server_->addCName(\"root.cnam.domain\", \"result.cname.domain\");\n  server_->addHosts(\"result.cname.domain\", {\"201.134.56.7\"}, RecordType::A);\n  ares_options opts{};\n  int optmask = 0;\n  EXPECT_EQ(ARES_SUCCESS, ares_save_options(peer_->channel(), &opts, &optmask));\n  EXPECT_TRUE((opts.flags & ARES_FLAG_USEVC) == ARES_FLAG_USEVC);\n  EXPECT_NE(nullptr,\n            resolveWithUnreferencedParameters(\"root.cnam.domain\", DnsLookupFamily::Auto, true));\n  ares_destroy_options(&opts);\n}\n\nclass DnsImplAresFlagsForUdpTest : public DnsImplTest {\nprotected:\n  bool tcp_only() const override { return false; }\n};\n\n// Parameterize the DNS test server socket address.\nINSTANTIATE_TEST_SUITE_P(IpVersions, DnsImplAresFlagsForUdpTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Validate that c_ares flag `ARES_FLAG_USEVC` is not set when boolean property\n// `use_tcp_for_dns_lookups` is disabled.\nTEST_P(DnsImplAresFlagsForUdpTest, UdpLookupsEnabled) {\n  server_->addCName(\"root.cnam.domain\", \"result.cname.domain\");\n  server_->addHosts(\"result.cname.domain\", {\"201.134.56.7\"}, RecordType::A);\n  ares_options opts{};\n  int optmask = 0;\n  EXPECT_EQ(ARES_SUCCESS, ares_save_options(peer_->channel(), &opts, &optmask));\n  EXPECT_FALSE((opts.flags & ARES_FLAG_USEVC) == ARES_FLAG_USEVC);\n  EXPECT_NE(nullptr,\n            resolveWithUnreferencedParameters(\"root.cnam.domain\", DnsLookupFamily::Auto, true));\n  ares_destroy_options(&opts);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/filter_manager_impl_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/filter_manager_impl.h\"\n#include \"common/tcp_proxy/tcp_proxy.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/filters/network/ratelimit/ratelimit.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/extensions/filters/common/ratelimit/mocks.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ratelimit/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::WithArgs;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nclass NetworkFilterManagerTest : public testing::Test {\npublic:\n  void SetUp() override {\n    EXPECT_CALL(connection_, getReadBuffer).WillRepeatedly(Invoke([this]() {\n      return StreamBuffer{read_buffer_, read_end_stream_};\n    }));\n    EXPECT_CALL(connection_, getWriteBuffer).WillRepeatedly(Invoke([this]() {\n      return StreamBuffer{write_buffer_, write_end_stream_};\n    }));\n  }\n\n  NiceMock<MockFilterManagerConnection> connection_;\n\n  Buffer::OwnedImpl read_buffer_;\n  Buffer::OwnedImpl write_buffer_;\n  bool read_end_stream_{};\n  bool write_end_stream_{};\n};\n\nclass LocalMockFilter : public MockFilter {\npublic:\n  ~LocalMockFilter() override {\n    // Make sure the upstream host is still valid in the filter destructor.\n    callbacks_->upstreamHost()->address();\n  }\n};\n\nTEST_F(NetworkFilterManagerTest, All) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  EXPECT_CALL(*filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  read_filter->callbacks_->continueReading();\n\n  read_buffer_.add(\"hello\");\n  read_end_stream_ = false;\n  EXPECT_CALL(*read_filter, onData(BufferStringEqual(\"hello\"), false))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  manager.onRead();\n\n  read_buffer_.add(\"world\");\n  EXPECT_CALL(*filter, onData(BufferStringEqual(\"helloworld\"), false))\n      .WillOnce(Return(FilterStatus::Continue));\n  read_filter->callbacks_->continueReading();\n\n  write_buffer_.add(\"foo\");\n  write_end_stream_ = false;\n  EXPECT_CALL(*filter, onWrite(BufferStringEqual(\"foo\"), false))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  manager.onWrite();\n\n  write_buffer_.add(\"bar\");\n  EXPECT_CALL(*filter, onWrite(BufferStringEqual(\"foobar\"), false))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*write_filter, onWrite(BufferStringEqual(\"foobar\"), false))\n      .WillOnce(Return(FilterStatus::Continue));\n  manager.onWrite();\n}\n\nTEST_F(NetworkFilterManagerTest, ConnectionClosedBeforeRunningFilter) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closing));\n  EXPECT_CALL(*read_filter, onNewConnection()).Times(0);\n  EXPECT_CALL(*read_filter, onData(_, _)).Times(0);\n  EXPECT_CALL(*filter, onNewConnection()).Times(0);\n  EXPECT_CALL(*filter, onData(_, _)).Times(0);\n  manager.onRead();\n\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closed));\n  EXPECT_CALL(*filter, onWrite(_, _)).Times(0);\n  manager.onWrite();\n}\n\nTEST_F(NetworkFilterManagerTest, FilterReturnStopAndNoCallback) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  read_buffer_.add(\"hello\");\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*read_filter, onData(BufferStringEqual(\"hello\"), _))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_CALL(*filter, onNewConnection()).Times(0);\n  EXPECT_CALL(*filter, onData(_, _)).Times(0);\n  manager.onRead();\n\n  EXPECT_CALL(*filter, onWrite(_, _)).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_CALL(*write_filter, onWrite(_, _)).Times(0);\n  manager.onWrite();\n}\n\nTEST_F(NetworkFilterManagerTest, ReadFilterCloseConnectionAndReturnContinue) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  read_buffer_.add(\"hello\");\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Open));\n  EXPECT_CALL(*read_filter, onData(BufferStringEqual(\"hello\"), _))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closing));\n  EXPECT_CALL(*filter, onData(_, _)).Times(0);\n  manager.onRead();\n\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closed));\n  EXPECT_CALL(*filter, onWrite(_, _)).Times(0);\n  manager.onWrite();\n}\n\nTEST_F(NetworkFilterManagerTest, WriteFilterCloseConnectionAndReturnContinue) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  read_buffer_.add(\"hello\");\n  EXPECT_CALL(*read_filter, onData(BufferStringEqual(\"hello\"), _))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  manager.onRead();\n\n  read_buffer_.add(\"world\");\n  EXPECT_CALL(*filter, onData(BufferStringEqual(\"helloworld\"), _))\n      .WillOnce(Return(FilterStatus::Continue));\n  read_filter->callbacks_->continueReading();\n\n  write_buffer_.add(\"foo\");\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Open));\n  EXPECT_CALL(*filter, onWrite(BufferStringEqual(\"foo\"), _))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closing));\n  EXPECT_CALL(*write_filter, onWrite(_, _)).Times(0);\n  manager.onWrite();\n}\n\nTEST_F(NetworkFilterManagerTest, ReadCloseConnectionReturnStopAndCallback) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  read_buffer_.add(\"hello\");\n  EXPECT_CALL(*read_filter, onData(BufferStringEqual(\"hello\"), _))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  manager.onRead();\n\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closing));\n  EXPECT_CALL(*filter, onData(_, _)).Times(0);\n  read_filter->callbacks_->continueReading();\n\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closed));\n  EXPECT_CALL(*filter, onWrite(_, _)).Times(0);\n  manager.onWrite();\n}\n\nTEST_F(NetworkFilterManagerTest, WriteCloseConnectionReturnStopAndCallback) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  read_buffer_.add(\"hello\");\n  EXPECT_CALL(*read_filter, onData(BufferStringEqual(\"hello\"), _))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, onData(BufferStringEqual(\"hello\"), _))\n      .WillOnce(Return(FilterStatus::Continue));\n  manager.onRead();\n\n  write_buffer_.add(\"foo\");\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Open));\n  EXPECT_CALL(*filter, onWrite(BufferStringEqual(\"foo\"), _))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  manager.onWrite();\n\n  EXPECT_CALL(connection_, state()).WillOnce(Return(Connection::State::Closed));\n  EXPECT_CALL(*filter, onWrite(_, _)).Times(0);\n  EXPECT_CALL(*write_filter, onWrite(_, _)).Times(0);\n  manager.onWrite();\n}\n\n// Test that end_stream is delivered in the correct order with the data, even\n// if FilterStatus::StopIteration occurs.\nTEST_F(NetworkFilterManagerTest, EndStream) {\n  InSequence s;\n\n  Upstream::HostDescription* host_description(new NiceMock<Upstream::MockHostDescription>());\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new LocalMockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  read_filter->callbacks_->upstreamHost(Upstream::HostDescriptionConstSharedPtr{host_description});\n  EXPECT_EQ(read_filter->callbacks_->upstreamHost(), filter->callbacks_->upstreamHost());\n\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  read_buffer_.add(\"hello\");\n  read_end_stream_ = true;\n  EXPECT_CALL(*read_filter, onData(BufferStringEqual(\"hello\"), true))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  manager.onRead();\n\n  read_buffer_.add(\"world\");\n  EXPECT_CALL(*filter, onData(BufferStringEqual(\"helloworld\"), true))\n      .WillOnce(Return(FilterStatus::Continue));\n  read_filter->callbacks_->continueReading();\n\n  write_buffer_.add(\"foo\");\n  write_end_stream_ = true;\n  EXPECT_CALL(*filter, onWrite(BufferStringEqual(\"foo\"), true))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  manager.onWrite();\n\n  write_buffer_.add(\"bar\");\n  EXPECT_CALL(*filter, onWrite(BufferStringEqual(\"foobar\"), true))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*write_filter, onWrite(BufferStringEqual(\"foobar\"), true))\n      .WillOnce(Return(FilterStatus::Continue));\n  manager.onWrite();\n}\n\n// This is a very important flow so make sure it works correctly in aggregate.\nTEST_F(NetworkFilterManagerTest, RateLimitAndTcpProxy) {\n  InSequence s;\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  NiceMock<MockClientConnection> upstream_connection;\n  NiceMock<Tcp::ConnectionPool::MockInstance> conn_pool;\n  FilterManagerImpl manager(connection_);\n\n  std::string rl_yaml = R\"EOF(\ndomain: foo\ndescriptors:\n- entries:\n  - key: hello\n    value: world\nstat_prefix: name\n    )EOF\";\n\n  ON_CALL(factory_context.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.tcp_filter_enabled\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.tcp_filter_enforcing\", 100))\n      .WillByDefault(Return(true));\n\n  envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{};\n  TestUtility::loadFromYaml(rl_yaml, proto_config);\n\n  Extensions::NetworkFilters::RateLimitFilter::ConfigSharedPtr rl_config(\n      new Extensions::NetworkFilters::RateLimitFilter::Config(proto_config, factory_context.scope_,\n                                                              factory_context.runtime_loader_));\n  Extensions::Filters::Common::RateLimit::MockClient* rl_client =\n      new Extensions::Filters::Common::RateLimit::MockClient();\n  manager.addReadFilter(std::make_shared<Extensions::NetworkFilters::RateLimitFilter::Filter>(\n      rl_config, Extensions::Filters::Common::RateLimit::ClientPtr{rl_client}));\n\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy;\n  tcp_proxy.set_stat_prefix(\"name\");\n  tcp_proxy.set_cluster(\"fake_cluster\");\n  TcpProxy::ConfigSharedPtr tcp_proxy_config(new TcpProxy::Config(tcp_proxy, factory_context));\n  manager.addReadFilter(\n      std::make_shared<TcpProxy::Filter>(tcp_proxy_config, factory_context.cluster_manager_));\n\n  Extensions::Filters::Common::RateLimit::RequestCallbacks* request_callbacks{};\n  EXPECT_CALL(*rl_client, limit(_, \"foo\",\n                                testing::ContainerEq(\n                                    std::vector<RateLimit::Descriptor>{{{{\"hello\", \"world\"}}}}),\n                                testing::A<Tracing::Span&>()))\n      .WillOnce(WithArgs<0>(\n          Invoke([&](Extensions::Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks = &callbacks;\n          })));\n\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  EXPECT_CALL(factory_context.cluster_manager_, tcpConnPoolForCluster(\"fake_cluster\", _, _))\n      .WillOnce(Return(&conn_pool));\n\n  request_callbacks->complete(Extensions::Filters::Common::RateLimit::LimitStatus::OK, nullptr,\n                              nullptr, nullptr);\n\n  conn_pool.poolReady(upstream_connection);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(upstream_connection, write(BufferEqual(&buffer), _));\n  read_buffer_.add(\"hello\");\n  manager.onRead();\n\n  connection_.raiseEvent(ConnectionEvent::RemoteClose);\n}\n\nTEST_F(NetworkFilterManagerTest, InjectReadDataToFilterChain) {\n  InSequence s;\n\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new MockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  EXPECT_CALL(*read_filter, onNewConnection()).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(manager.initializeReadFilters(), true);\n\n  EXPECT_CALL(*filter, onNewConnection()).WillOnce(Return(FilterStatus::Continue));\n  read_filter->callbacks_->continueReading();\n\n  read_buffer_.add(\"hello\");\n  read_end_stream_ = true;\n\n  Buffer::OwnedImpl injected_buffer(\"greetings\");\n  EXPECT_CALL(*filter, onData(BufferStringEqual(\"greetings\"), false))\n      .WillOnce(Return(FilterStatus::Continue));\n  read_filter->callbacks_->injectReadDataToFilterChain(injected_buffer, false);\n\n  injected_buffer.add(\" everyone\");\n  EXPECT_CALL(*filter, onData(BufferStringEqual(\"greetings everyone\"), true))\n      .WillOnce(Return(FilterStatus::Continue));\n  read_filter->callbacks_->injectReadDataToFilterChain(injected_buffer, true);\n}\n\nTEST_F(NetworkFilterManagerTest, InjectWriteDataToFilterChain) {\n  InSequence s;\n\n  MockReadFilter* read_filter(new MockReadFilter());\n  MockWriteFilter* write_filter(new MockWriteFilter());\n  MockFilter* filter(new MockFilter());\n\n  FilterManagerImpl manager(connection_);\n  manager.addReadFilter(ReadFilterSharedPtr{read_filter});\n  manager.addWriteFilter(WriteFilterSharedPtr{write_filter});\n  manager.addFilter(FilterSharedPtr{filter});\n\n  Buffer::OwnedImpl injected_buffer(\"greetings\");\n  EXPECT_CALL(*write_filter, onWrite(BufferStringEqual(\"greetings\"), false))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(connection_, rawWrite(BufferStringEqual(\"greetings\"), false));\n  filter->write_callbacks_->injectWriteDataToFilterChain(injected_buffer, false);\n\n  injected_buffer.add(\" everyone!\");\n  EXPECT_CALL(*write_filter, onWrite(BufferStringEqual(\" everyone!\"), true))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(connection_, rawWrite(BufferStringEqual(\" everyone!\"), true));\n  filter->write_callbacks_->injectWriteDataToFilterChain(injected_buffer, true);\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/filter_matcher_test.cc",
    "content": "#include \"common/network/address_impl.h\"\n#include \"common/network/filter_matcher.h\"\n\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\nstruct CallbackHandle {\n  std::unique_ptr<Network::MockListenerFilterCallbacks> callback_;\n  std::unique_ptr<Network::MockConnectionSocket> socket_;\n  Address::InstanceConstSharedPtr address_;\n};\n} // namespace\nclass ListenerFilterMatcherTest : public testing::Test {\npublic:\n  std::unique_ptr<CallbackHandle> createCallbackOnPort(int port) {\n    auto handle = std::make_unique<CallbackHandle>();\n    handle->address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", port);\n    handle->socket_ = std::make_unique<MockConnectionSocket>();\n    handle->callback_ = std::make_unique<MockListenerFilterCallbacks>();\n    EXPECT_CALL(*(handle->socket_), localAddress()).WillRepeatedly(ReturnRef(handle->address_));\n    EXPECT_CALL(*(handle->callback_), socket()).WillRepeatedly(ReturnRef(*(handle->socket_)));\n    return handle;\n  }\n  envoy::config::listener::v3::ListenerFilterChainMatchPredicate createPortPredicate(int port_start,\n                                                                                     int port_end) {\n    envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred;\n    auto ports = pred.mutable_destination_port_range();\n    ports->set_start(port_start);\n    ports->set_end(port_end);\n    return pred;\n  }\n};\n\nTEST_F(ListenerFilterMatcherTest, DstPortMatcher) {\n  auto pred = createPortPredicate(80, 81);\n  auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred);\n  auto handle79 = createCallbackOnPort(79);\n  auto handle80 = createCallbackOnPort(80);\n  auto handle81 = createCallbackOnPort(81);\n  EXPECT_FALSE(matcher->matches(*(handle79->callback_)));\n  EXPECT_TRUE(matcher->matches(*(handle80->callback_)));\n  EXPECT_FALSE(matcher->matches(*(handle81->callback_)));\n}\n\nTEST_F(ListenerFilterMatcherTest, AnyMatdcher) {\n  envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred;\n  pred.set_any_match(true);\n  auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred);\n  auto handle79 = createCallbackOnPort(79);\n  auto handle80 = createCallbackOnPort(80);\n  auto handle81 = createCallbackOnPort(81);\n  EXPECT_TRUE(matcher->matches(*(handle79->callback_)));\n  EXPECT_TRUE(matcher->matches(*(handle80->callback_)));\n  EXPECT_TRUE(matcher->matches(*(handle81->callback_)));\n}\n\nTEST_F(ListenerFilterMatcherTest, NotMatcher) {\n  auto pred = createPortPredicate(80, 81);\n  envoy::config::listener::v3::ListenerFilterChainMatchPredicate not_pred;\n  not_pred.mutable_not_match()->MergeFrom(pred);\n  auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(not_pred);\n  auto handle79 = createCallbackOnPort(79);\n  auto handle80 = createCallbackOnPort(80);\n  auto handle81 = createCallbackOnPort(81);\n  EXPECT_TRUE(matcher->matches(*(handle79->callback_)));\n  EXPECT_FALSE(matcher->matches(*(handle80->callback_)));\n  EXPECT_TRUE(matcher->matches(*(handle81->callback_)));\n}\n\nTEST_F(ListenerFilterMatcherTest, OrMatcher) {\n  auto pred80 = createPortPredicate(80, 81);\n  auto pred443 = createPortPredicate(443, 444);\n\n  envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred;\n  pred.mutable_or_match()->mutable_rules()->Add()->MergeFrom(pred80);\n  pred.mutable_or_match()->mutable_rules()->Add()->MergeFrom(pred443);\n\n  auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred);\n  auto handle80 = createCallbackOnPort(80);\n  auto handle443 = createCallbackOnPort(443);\n  auto handle3306 = createCallbackOnPort(3306);\n\n  EXPECT_FALSE(matcher->matches(*(handle3306->callback_)));\n  EXPECT_TRUE(matcher->matches(*(handle80->callback_)));\n  EXPECT_TRUE(matcher->matches(*(handle443->callback_)));\n}\n\nTEST_F(ListenerFilterMatcherTest, AndMatcher) {\n  auto pred80_3306 = createPortPredicate(80, 3306);\n  auto pred443_3306 = createPortPredicate(443, 3306);\n\n  envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred;\n  pred.mutable_and_match()->mutable_rules()->Add()->MergeFrom(pred80_3306);\n  pred.mutable_and_match()->mutable_rules()->Add()->MergeFrom(pred443_3306);\n\n  auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred);\n  auto handle80 = createCallbackOnPort(80);\n  auto handle443 = createCallbackOnPort(443);\n  auto handle3306 = createCallbackOnPort(3306);\n\n  EXPECT_FALSE(matcher->matches(*(handle3306->callback_)));\n  EXPECT_FALSE(matcher->matches(*(handle80->callback_)));\n  EXPECT_TRUE(matcher->matches(*(handle443->callback_)));\n}\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/io_socket_handle_impl_test.cc",
    "content": "#include \"common/common/utility.h\"\n#include \"common/network/io_socket_error_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nTEST(IoSocketHandleImplTest, TestIoSocketError) {\n  IoSocketError error1(SOCKET_ERROR_AGAIN);\n  EXPECT_DEBUG_DEATH(error1.getErrorCode(),\n                     \".*assert failure: .* Details: Didn't use getIoSocketEagainInstance.*\");\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_AGAIN),\n            IoSocketError::getIoSocketEagainInstance()->getErrorDetails());\n\n  IoSocketError error2(SOCKET_ERROR_NOT_SUP);\n  EXPECT_EQ(IoSocketError::IoErrorCode::NoSupport, error2.getErrorCode());\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_NOT_SUP), error2.getErrorDetails());\n\n  IoSocketError error3(SOCKET_ERROR_AF_NO_SUP);\n  EXPECT_EQ(IoSocketError::IoErrorCode::AddressFamilyNoSupport, error3.getErrorCode());\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_AF_NO_SUP), error3.getErrorDetails());\n\n  IoSocketError error4(SOCKET_ERROR_IN_PROGRESS);\n  EXPECT_EQ(IoSocketError::IoErrorCode::InProgress, error4.getErrorCode());\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_IN_PROGRESS), error4.getErrorDetails());\n\n  IoSocketError error5(SOCKET_ERROR_PERM);\n  EXPECT_EQ(IoSocketError::IoErrorCode::Permission, error5.getErrorCode());\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_PERM), error5.getErrorDetails());\n\n  IoSocketError error6(SOCKET_ERROR_MSG_SIZE);\n  EXPECT_EQ(IoSocketError::IoErrorCode::MessageTooBig, error6.getErrorCode());\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_MSG_SIZE), error6.getErrorDetails());\n\n  IoSocketError error7(SOCKET_ERROR_INTR);\n  EXPECT_EQ(IoSocketError::IoErrorCode::Interrupt, error7.getErrorCode());\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_INTR), error7.getErrorDetails());\n\n  IoSocketError error8(SOCKET_ERROR_ADDR_NOT_AVAIL);\n  EXPECT_EQ(IoSocketError::IoErrorCode::AddressNotAvailable, error8.getErrorCode());\n  EXPECT_EQ(errorDetails(SOCKET_ERROR_ADDR_NOT_AVAIL), error8.getErrorDetails());\n\n  // Random unknown error\n  IoSocketError error9(123);\n  EXPECT_EQ(IoSocketError::IoErrorCode::UnknownError, error9.getErrorCode());\n  EXPECT_EQ(errorDetails(123), error9.getErrorDetails());\n}\n\n#ifdef TCP_INFO\n\nTEST(IoSocketHandleImpl, LastRoundTripTimeReturnsEmptyOptionalIfGetSocketFails) {\n  NiceMock<Envoy::Api::MockOsSysCalls> os_sys_calls;\n  auto os_calls =\n      std::make_unique<Envoy::TestThreadsafeSingletonInjector<Envoy::Api::OsSysCallsImpl>>(\n          &os_sys_calls);\n  EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _)).WillOnce(Return(-1));\n\n  IoSocketHandleImpl io_handle;\n  EXPECT_THAT(io_handle.lastRoundTripTime(), Eq(absl::optional<std::chrono::milliseconds>{}));\n}\n\nTEST(IoSocketHandleImpl, LastRoundTripTimeReturnsRttIfSuccessful) {\n  NiceMock<Envoy::Api::MockOsSysCalls> os_sys_calls;\n  auto os_calls =\n      std::make_unique<Envoy::TestThreadsafeSingletonInjector<Envoy::Api::OsSysCallsImpl>>(\n          &os_sys_calls);\n  EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _))\n      .WillOnce(DoAll(WithArg<3>(Invoke([](void* optval) {\n                        static_cast<struct tcp_info*>(optval)->tcpi_rtt = 35;\n                      })),\n                      Return(0)));\n\n  IoSocketHandleImpl io_handle;\n  EXPECT_THAT(io_handle.lastRoundTripTime(), Eq(absl::optional<std::chrono::milliseconds>{35}));\n}\n\n#endif\n\n#ifndef TCP_INFO\n\nTEST(IoSocketHandleImpl, LastRoundTripTimeAlwaysReturnsEmptyOptional) {\n  IoSocketHandleImpl io_handle;\n  EXPECT_THAT(io_handle.lastRoundTripTime(), Eq(absl::optional<std::chrono::milliseconds>{}));\n}\n\n#endif\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/lc_trie_speed_test.cc",
    "content": "#include \"common/network/lc_trie.h\"\n#include \"common/network/utility.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace {\n\nstruct AddressInputs {\n  AddressInputs() {\n    // Random test addresses from RFC 5737 netblocks\n    static const std::string test_addresses[] = {\n        \"192.0.2.225\",   \"198.51.100.55\", \"198.51.100.105\", \"192.0.2.150\",   \"203.0.113.162\",\n        \"203.0.113.110\", \"203.0.113.99\",  \"198.51.100.23\",  \"198.51.100.24\", \"203.0.113.12\"};\n    for (const auto& address : test_addresses) {\n      addresses_.push_back(Envoy::Network::Utility::parseInternetAddress(address));\n    }\n  }\n\n  std::vector<Envoy::Network::Address::InstanceConstSharedPtr> addresses_;\n};\n\nstruct CidrInputs {\n  CidrInputs() {\n    // Construct three sets of prefixes: one consisting of 1,024 addresses in an\n    // RFC 5737 netblock, another consisting of those same addresses plus\n    // 0.0.0.0/0 (to exercise the LC Trie's support for nested prefixes),\n    // and finally a set containing only 0.0.0.0/0.\n    for (int i = 0; i < 32; i++) {\n      for (int j = 0; j < 32; j++) {\n        tag_data_.emplace_back(\n            std::pair<std::string, std::vector<Envoy::Network::Address::CidrRange>>(\n                {\"tag_1\",\n                 {Envoy::Network::Address::CidrRange::create(\n                     fmt::format(\"192.0.{}.{}/32\", i, j))}}));\n      }\n    }\n    tag_data_nested_prefixes_ = tag_data_;\n    tag_data_nested_prefixes_.emplace_back(\n        std::pair<std::string, std::vector<Envoy::Network::Address::CidrRange>>(\n            {\"tag_0\", {Envoy::Network::Address::CidrRange::create(\"0.0.0.0/0\")}}));\n    tag_data_minimal_.emplace_back(\n        std::pair<std::string, std::vector<Envoy::Network::Address::CidrRange>>(\n            {\"tag_1\", {Envoy::Network::Address::CidrRange::create(\"0.0.0.0/0\")}}));\n  }\n\n  std::vector<std::pair<std::string, std::vector<Envoy::Network::Address::CidrRange>>> tag_data_;\n  std::vector<std::pair<std::string, std::vector<Envoy::Network::Address::CidrRange>>>\n      tag_data_nested_prefixes_;\n  std::vector<std::pair<std::string, std::vector<Envoy::Network::Address::CidrRange>>>\n      tag_data_minimal_;\n};\n\n} // namespace\n\nnamespace Envoy {\n\nstatic void lcTrieConstruct(benchmark::State& state) {\n  CidrInputs inputs;\n\n  std::unique_ptr<Envoy::Network::LcTrie::LcTrie<std::string>> trie;\n  for (auto _ : state) {\n    trie = std::make_unique<Envoy::Network::LcTrie::LcTrie<std::string>>(inputs.tag_data_);\n  }\n  benchmark::DoNotOptimize(trie);\n}\n\nBENCHMARK(lcTrieConstruct);\n\nstatic void lcTrieConstructNested(benchmark::State& state) {\n  CidrInputs inputs;\n\n  std::unique_ptr<Envoy::Network::LcTrie::LcTrie<std::string>> trie;\n  for (auto _ : state) {\n    trie = std::make_unique<Envoy::Network::LcTrie::LcTrie<std::string>>(\n        inputs.tag_data_nested_prefixes_);\n  }\n  benchmark::DoNotOptimize(trie);\n}\n\nBENCHMARK(lcTrieConstructNested);\n\nstatic void lcTrieConstructMinimal(benchmark::State& state) {\n  CidrInputs inputs;\n\n  std::unique_ptr<Envoy::Network::LcTrie::LcTrie<std::string>> trie;\n  for (auto _ : state) {\n    trie = std::make_unique<Envoy::Network::LcTrie::LcTrie<std::string>>(inputs.tag_data_minimal_);\n  }\n  benchmark::DoNotOptimize(trie);\n}\n\nBENCHMARK(lcTrieConstructMinimal);\n\nstatic void lcTrieLookup(benchmark::State& state) {\n  CidrInputs cidr_inputs;\n  AddressInputs address_inputs;\n  std::unique_ptr<Envoy::Network::LcTrie::LcTrie<std::string>> lc_trie =\n      std::make_unique<Envoy::Network::LcTrie::LcTrie<std::string>>(cidr_inputs.tag_data_);\n\n  static size_t i = 0;\n  size_t output_tags = 0;\n  for (auto _ : state) {\n    i++;\n    i %= address_inputs.addresses_.size();\n    output_tags += lc_trie->getData(address_inputs.addresses_[i]).size();\n  }\n  benchmark::DoNotOptimize(output_tags);\n}\n\nBENCHMARK(lcTrieLookup);\n\nstatic void lcTrieLookupWithNestedPrefixes(benchmark::State& state) {\n  CidrInputs cidr_inputs;\n  AddressInputs address_inputs;\n  std::unique_ptr<Envoy::Network::LcTrie::LcTrie<std::string>> lc_trie_nested_prefixes =\n      std::make_unique<Envoy::Network::LcTrie::LcTrie<std::string>>(\n          cidr_inputs.tag_data_nested_prefixes_);\n\n  static size_t i = 0;\n  size_t output_tags = 0;\n  for (auto _ : state) {\n    i++;\n    i %= address_inputs.addresses_.size();\n    output_tags += lc_trie_nested_prefixes->getData(address_inputs.addresses_[i]).size();\n  }\n  benchmark::DoNotOptimize(output_tags);\n}\n\nBENCHMARK(lcTrieLookupWithNestedPrefixes);\n\nstatic void lcTrieLookupMinimal(benchmark::State& state) {\n  CidrInputs cidr_inputs;\n  AddressInputs address_inputs;\n  std::unique_ptr<Envoy::Network::LcTrie::LcTrie<std::string>> lc_trie_minimal =\n      std::make_unique<Envoy::Network::LcTrie::LcTrie<std::string>>(cidr_inputs.tag_data_minimal_);\n\n  static size_t i = 0;\n  size_t output_tags = 0;\n  for (auto _ : state) {\n    i++;\n    i %= address_inputs.addresses_.size();\n    output_tags += lc_trie_minimal->getData(address_inputs.addresses_[i]).size();\n  }\n  benchmark::DoNotOptimize(output_tags);\n}\n\nBENCHMARK(lcTrieLookupMinimal);\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/lc_trie_test.cc",
    "content": "#include <memory>\n\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/cidr_range.h\"\n#include \"common/network/lc_trie.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace LcTrie {\n\nclass LcTrieTest : public testing::Test {\npublic:\n  void setup(const std::vector<std::vector<std::string>>& cidr_range_strings,\n             bool exclusive = false, double fill_factor = 0, uint32_t root_branch_factor = 0) {\n    std::vector<std::pair<std::string, std::vector<Address::CidrRange>>> output;\n    for (size_t i = 0; i < cidr_range_strings.size(); i++) {\n      std::pair<std::string, std::vector<Address::CidrRange>> ip_tags;\n      ip_tags.first = fmt::format(\"tag_{0}\", i);\n      for (const auto& j : cidr_range_strings[i]) {\n        ip_tags.second.push_back(Address::CidrRange::create(j));\n      }\n      output.push_back(ip_tags);\n    }\n    // Use custom fill factors and root branch factors if they are in the valid range.\n    if ((fill_factor > 0) && (fill_factor <= 1) && (root_branch_factor > 0)) {\n      trie_ =\n          std::make_unique<LcTrie<std::string>>(output, exclusive, fill_factor, root_branch_factor);\n    } else {\n      trie_ = std::make_unique<LcTrie<std::string>>(output, exclusive);\n    }\n  }\n\n  void expectIPAndTags(\n      const std::vector<std::pair<std::string, std::vector<std::string>>>& test_output) {\n    for (const auto& kv : test_output) {\n      std::vector<std::string> expected(kv.second);\n      std::sort(expected.begin(), expected.end());\n      std::vector<std::string> actual(trie_->getData(Utility::parseInternetAddress(kv.first)));\n      std::sort(actual.begin(), actual.end());\n      EXPECT_EQ(expected, actual);\n    }\n  }\n\n  std::unique_ptr<LcTrie<std::string>> trie_;\n};\n\n// Use the default constructor values.\nTEST_F(LcTrieTest, IPv4Defaults) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"0.0.0.0/4\"},   // tag_0\n      {\"16.0.0.0/4\"},  // tag_1\n      {\"40.0.0.0/5\"},  // tag_2\n      {\"64.0.0.0/3\"},  // tag_3\n      {\"96.0.0.0/4\"},  // tag_4\n      {\"112.0.0.0/4\"}, // tag_5\n      {\"128.0.0.0/3\"}, // tag_6\n      {\"160.0.0.0/6\"}, // tag_7\n      {\"164.0.0.0/6\"}, // tag_8\n      {\"168.0.0.0/5\"}, // tag_9\n      {\"176.0.0.0/5\"}, // tag_10\n      {\"184.0.0.0/5\"}, // tag_11\n      {\"192.0.0.0/3\"}, // tag_12\n      {\"232.0.0.0/8\"}, // tag_13\n      {\"233.0.0.0/8\"}, // tag_14\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"0.0.0.0\", {\"tag_0\"}},     {\"16.0.0.1\", {\"tag_1\"}},\n      {\"40.0.0.255\", {\"tag_2\"}},  {\"64.0.130.0\", {\"tag_3\"}},\n      {\"96.0.0.10\", {\"tag_4\"}},   {\"112.0.0.0\", {\"tag_5\"}},\n      {\"128.0.0.1\", {\"tag_6\"}},   {\"160.0.0.1\", {\"tag_7\"}},\n      {\"164.255.0.0\", {\"tag_8\"}}, {\"168.0.0.0\", {\"tag_9\"}},\n      {\"176.0.0.1\", {\"tag_10\"}},  {\"184.0.0.1\", {\"tag_11\"}},\n      {\"192.0.0.0\", {\"tag_12\"}},  {\"232.0.80.0\", {\"tag_13\"}},\n      {\"233.0.0.1\", {\"tag_14\"}},  {\"::1\", {}},\n  };\n  expectIPAndTags(test_case);\n}\n\n// There was a bug in the C++ port that didn't update the index for the next address in the trie.\n// For the data set below, the address \"164.255.0.0\" returned no tag instead of \"tag_8\".\nTEST_F(LcTrieTest, RootBranchingFactor) {\n  double fill_factor = 0.75;\n  uint32_t root_branching_factor = 16;\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"0.0.0.0/4\"},   // tag_0\n      {\"16.0.0.0/4\"},  // tag_1\n      {\"40.0.0.0/5\"},  // tag_2\n      {\"64.0.0.0/3\"},  // tag_3\n      {\"96.0.0.0/4\"},  // tag_4\n      {\"112.0.0.0/4\"}, // tag_5\n      {\"128.0.0.0/3\"}, // tag_6\n      {\"160.0.0.0/6\"}, // tag_7\n      {\"164.0.0.0/6\"}, // tag_8\n      {\"168.0.0.0/5\"}, // tag_9\n      {\"176.0.0.0/5\"}, // tag_10\n      {\"184.0.0.0/5\"}, // tag_11\n      {\"192.0.0.0/3\"}, // tag_12\n      {\"232.0.0.0/8\"}, // tag_13\n      {\"233.0.0.0/8\"}, // tag_14\n  };\n  setup(cidr_range_strings, false, fill_factor, root_branching_factor);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"0.0.0.0\", {\"tag_0\"}},     {\"16.0.0.1\", {\"tag_1\"}},\n      {\"40.0.0.255\", {\"tag_2\"}},  {\"64.0.130.0\", {\"tag_3\"}},\n      {\"96.0.0.10\", {\"tag_4\"}},   {\"112.0.0.0\", {\"tag_5\"}},\n      {\"128.0.0.1\", {\"tag_6\"}},   {\"160.0.0.1\", {\"tag_7\"}},\n      {\"164.255.0.0\", {\"tag_8\"}}, {\"168.0.0.0\", {\"tag_9\"}},\n      {\"176.0.0.1\", {\"tag_10\"}},  {\"184.0.0.1\", {\"tag_11\"}},\n      {\"192.0.0.0\", {\"tag_12\"}},  {\"232.0.80.0\", {\"tag_13\"}},\n      {\"233.0.0.1\", {\"tag_14\"}},  {\"::1\", {}},\n  };\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, IPv4AddressSizeBoundaries) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"1.2.3.4/24\", \"10.255.255.255/32\"},                           // tag_0\n      {\"54.233.128.0/17\", \"205.251.192.100/26\", \"52.220.191.10/30\"}, // tag_1\n      {\"10.255.255.254/32\"}                                          // tag_2\n  };\n\n  setup(cidr_range_strings);\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"205.251.192.100\", {\"tag_1\"}},\n      {\"10.255.255.255\", {\"tag_0\"}},\n      {\"52.220.191.10\", {\"tag_1\"}},\n      {\"10.255.255.254\", {\"tag_2\"}},\n      {\"18.232.0.255\", {}}};\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, IPv4Boundaries) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"0.0.0.0/1\"},                                 // tag_0\n      {\"2001:abcd:ef01:2345:6789:abcd:ef01:234/64\"}, // tag_1\n      {\"128.0.0.0/1\"},                               // tag_2\n  };\n\n  setup(cidr_range_strings);\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"10.255.255.255\", {\"tag_0\"}},\n      {\"205.251.192.100\", {\"tag_2\"}},\n  };\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, IPv6) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"2406:da00:2000::/40\", \"::1/128\"},            // tag_0\n      {\"2001:abcd:ef01:2345:6789:abcd:ef01:234/64\"}, // tag_1\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"2406:da00:2000::1\", {\"tag_0\"}},\n      {\"2001:abcd:ef01:2345::1\", {\"tag_1\"}},\n      {\"::1\", {\"tag_0\"}},\n      {\"1.2.3.4\", {}},\n      {\"2400:ffff:ff00::\", {}},\n  };\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, IPv6AddressSizeBoundaries) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"2406:da00:2000::/40\", \"::1/128\"},            // tag_0\n      {\"2001:abcd:ef01:2345:6789:abcd:ef01:234/64\"}, // tag_1\n      {\"::/128\"},                                    // tag_2\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"::1\", {\"tag_0\"}},\n      {\"2406:da00:2000::1\", {\"tag_0\"}},\n      {\"2001:abcd:ef01:2345::1\", {\"tag_1\"}},\n      {\"::\", {\"tag_2\"}},\n      {\"::2\", {}},\n  };\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, IPv6Boundaries) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"8000::/1\"},   // tag_0\n      {\"1.2.3.4/24\"}, // tag_1\n      {\"::/1\"},       // tag_2\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"::1\", {\"tag_2\"}},\n      {\"::2\", {\"tag_2\"}},\n      {\"8000::1\", {\"tag_0\"}},\n  };\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, CatchAllIPv4Prefix) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"1.2.3.4/0\"},                                // tag_0\n      {\"2001:abcd:ef01:2345:6789:abcd:ef01:234/64\"} // tag_1\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"2001:abcd:ef01:2345::1\", {\"tag_1\"}},\n      {\"1.2.3.4\", {\"tag_0\"}},\n      {\"255.255.255.255\", {\"tag_0\"}},\n      {\"2400:ffff:ff00::\", {}},\n  };\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, CatchAllIPv6Prefix) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"::/0\"},      // tag_0\n      {\"1.2.3.4/24\"} // tag_1\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"2001:abcd:ef01:2345::1\", {\"tag_0\"}},\n      {\"1.2.3.4\", {\"tag_1\"}},\n      {\"abcd::343\", {\"tag_0\"}},\n      {\"255.255.255.255\", {}}};\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, BothIpvVersions) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"2406:da00:2000::/40\", \"::1/128\"},                            // tag_0\n      {\"2001:abcd:ef01:2345:6789:abcd:ef01:234/64\"},                 // tag_1\n      {\"1.2.3.4/24\", \"10.255.255.255/32\"},                           // tag_2\n      {\"54.233.128.0/17\", \"205.251.192.100/26\", \"52.220.191.10/30\"}, // tag_3\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"205.251.192.100\", {\"tag_3\"}},\n      {\"10.255.255.255\", {\"tag_2\"}},\n      {\"52.220.191.10\", {\"tag_3\"}},\n      {\"2406:da00:2000::1\", {\"tag_0\"}},\n      {\"2001:abcd:ef01:2345::1\", {\"tag_1\"}},\n      {\"::1\", {\"tag_0\"}},\n      {\"18.232.0.255\", {}},\n      {\"2400:ffff:ff00::\", {}},\n  };\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, NestedPrefixes) {\n  const std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"203.0.113.0/24\", \"203.0.113.128/25\"}, // tag_0\n      {\"203.0.113.255/32\"},                   // tag_1\n      {\"198.51.100.0/24\"},                    // tag_2\n      {\"2001:db8::/96\", \"2001:db8::8000/97\"}, // tag_3\n      {\"2001:db8::ffff/128\"},                 // tag_4\n      {\"2001:db8:1::/48\"},                    // tag_5\n      {\"2001:db8:1::/128\", \"2001:db8:1::/48\"} // tag_6\n  };\n  setup(cidr_range_strings);\n\n  const std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"203.0.113.0\", {\"tag_0\"}},\n      {\"203.0.113.192\", {\"tag_0\"}},\n      {\"203.0.113.255\", {\"tag_0\", \"tag_1\"}},\n      {\"198.51.100.1\", {\"tag_2\"}},\n      {\"2001:db8::ffff\", {\"tag_3\", \"tag_4\"}},\n      {\"2001:db8:1::ffff\", {\"tag_5\", \"tag_6\"}}};\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, NestedPrefixesWithCatchAll) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"0.0.0.0/0\"},                          // tag_0\n      {\"203.0.113.0/24\"},                     // tag_1\n      {\"203.0.113.128/25\"},                   // tag_2\n      {\"198.51.100.0/24\"},                    // tag_3\n      {\"::0/0\"},                              // tag_4\n      {\"2001:db8::/96\", \"2001:db8::8000/97\"}, // tag_5\n      {\"2001:db8::ffff/128\"},                 // tag_6\n      {\"2001:db8:1::/48\"},                    // tag_7\n      {\"203.0.113.0/24\"}                      // tag_8 (same subnet as tag_1)\n  };\n  setup(cidr_range_strings);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"203.0.0.0\", {\"tag_0\"}},\n      {\"203.0.113.0\", {\"tag_0\", \"tag_1\", \"tag_8\"}},\n      {\"203.0.113.192\", {\"tag_0\", \"tag_1\", \"tag_2\", \"tag_8\"}},\n      {\"203.0.113.255\", {\"tag_0\", \"tag_1\", \"tag_2\", \"tag_8\"}},\n      {\"198.51.100.1\", {\"tag_0\", \"tag_3\"}},\n      {\"2001:db8::ffff\", {\"tag_4\", \"tag_5\", \"tag_6\"}},\n      {\"2001:db8:1::ffff\", {\"tag_4\", \"tag_7\"}}};\n  expectIPAndTags(test_case);\n}\n\nTEST_F(LcTrieTest, ExclusiveNestedPrefixesWithCatchAll) {\n  std::vector<std::vector<std::string>> cidr_range_strings = {\n      {\"0.0.0.0/0\"},                          // tag_0\n      {\"203.0.113.0/24\"},                     // tag_1\n      {\"203.0.113.128/25\"},                   // tag_2\n      {\"198.51.100.0/24\"},                    // tag_3\n      {\"::0/0\"},                              // tag_4\n      {\"2001:db8::/96\", \"2001:db8::8000/97\"}, // tag_5\n      {\"2001:db8::ffff/128\"},                 // tag_6\n      {\"2001:db8:1::/48\"},                    // tag_7\n      {\"203.0.113.0/24\"}                      // tag_8 (same subnet as tag_1)\n  };\n  setup(cidr_range_strings, true);\n\n  std::vector<std::pair<std::string, std::vector<std::string>>> test_case = {\n      {\"203.0.0.0\", {\"tag_0\"}},       {\"203.0.113.0\", {\"tag_1\", \"tag_8\"}},\n      {\"203.0.113.192\", {\"tag_2\"}},   {\"203.0.113.255\", {\"tag_2\"}},\n      {\"198.51.100.1\", {\"tag_3\"}},    {\"2001:db8::ffff\", {\"tag_6\"}},\n      {\"2001:db8:1::ffff\", {\"tag_7\"}}};\n  expectIPAndTags(test_case);\n}\n\n// Ensure the trie will reject inputs that would cause it to exceed the maximum 2^20 nodes\n// when using the default fill factor.\nTEST_F(LcTrieTest, MaximumEntriesExceptionDefault) {\n  static const size_t num_prefixes = 1 << 19;\n  Address::CidrRange address = Address::CidrRange::create(\"10.0.0.1/8\");\n  std::vector<Address::CidrRange> prefixes;\n  prefixes.reserve(num_prefixes);\n  for (size_t i = 0; i < num_prefixes; i++) {\n    prefixes.push_back(address);\n  }\n  EXPECT_EQ(num_prefixes, prefixes.size());\n\n  std::pair<std::string, std::vector<Address::CidrRange>> ip_tag =\n      std::make_pair(\"bad_tag\", prefixes);\n  std::vector<std::pair<std::string, std::vector<Address::CidrRange>>> ip_tags_input{ip_tag};\n  EXPECT_THROW_WITH_MESSAGE(new LcTrie<std::string>(ip_tags_input), EnvoyException,\n                            \"The input vector has '524288' CIDR range entries. \"\n                            \"LC-Trie can only support '262144' CIDR ranges with \"\n                            \"the specified fill factor.\");\n}\n\n// Ensure the trie will reject inputs that would cause it to exceed the maximum 2^20 nodes\n// when using a fill factor override.\nTEST_F(LcTrieTest, MaximumEntriesExceptionOverride) {\n  static const size_t num_prefixes = 8192;\n  std::vector<Address::CidrRange> prefixes;\n  prefixes.reserve(num_prefixes);\n  for (size_t i = 0; i < 16; i++) {\n    for (size_t j = 0; j < 16; j++) {\n      for (size_t k = 0; k < 32; k++) {\n        prefixes.emplace_back(Address::CidrRange::create(fmt::format(\"10.{}.{}.{}/8\", i, j, k)));\n      }\n    }\n  }\n  EXPECT_EQ(num_prefixes, prefixes.size());\n\n  std::pair<std::string, std::vector<Address::CidrRange>> ip_tag =\n      std::make_pair(\"bad_tag\", prefixes);\n  std::vector<std::pair<std::string, std::vector<Address::CidrRange>>> ip_tags_input{ip_tag};\n  EXPECT_THROW_WITH_MESSAGE(new LcTrie<std::string>(ip_tags_input, false, 0.01), EnvoyException,\n                            \"The input vector has '8192' CIDR range entries. \"\n                            \"LC-Trie can only support '5242' CIDR ranges with \"\n                            \"the specified fill factor.\");\n}\n\n} // namespace LcTrie\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/listen_socket_impl_test.cc",
    "content": "#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/exception.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\ntemplate <Network::Socket::Type Type>\nclass ListenSocketImplTest : public testing::TestWithParam<Address::IpVersion> {\nprotected:\n  ListenSocketImplTest() : version_(GetParam()) {}\n  const Address::IpVersion version_;\n\n  template <typename... Args>\n  std::unique_ptr<ListenSocketImpl> createListenSocketPtr(Args&&... args) {\n    using NetworkSocketTraitType = NetworkSocketTrait<Type>;\n\n    return std::make_unique<NetworkListenSocket<NetworkSocketTraitType>>(\n        std::forward<Args>(args)...);\n  }\n\n  void testBindSpecificPort() {\n    // This test has a small but real risk of flaky behavior if another thread or process should\n    // bind to our assigned port during the interval between closing the fd and re-binding. In an\n    // attempt to avoid this, we allow for retrying by placing the core of the test in a loop with\n    // a catch of the SocketBindException, indicating we couldn't bind, at which point we retry.\n    const int kLoopLimit = 20;\n    int loop_number = 0;\n    while (true) {\n      ++loop_number;\n\n      auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Socket::Type::Stream);\n      auto addr = addr_fd.first;\n      SocketPtr& sock = addr_fd.second;\n      EXPECT_TRUE(sock->ioHandle().isOpen());\n\n      // Confirm that we got a reasonable address and port.\n      ASSERT_EQ(Address::Type::Ip, addr->type());\n      ASSERT_EQ(version_, addr->ip()->version());\n      ASSERT_LT(0U, addr->ip()->port());\n\n      // Release the socket and re-bind it.\n      EXPECT_TRUE(sock->isOpen());\n      sock->close();\n\n      auto option = std::make_unique<MockSocketOption>();\n      auto options = std::make_shared<std::vector<Network::Socket::OptionConstSharedPtr>>();\n      EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_PREBIND))\n          .WillOnce(Return(true));\n      options->emplace_back(std::move(option));\n      std::unique_ptr<ListenSocketImpl> socket1;\n      try {\n        socket1 = createListenSocketPtr(addr, options, true);\n      } catch (SocketBindException& e) {\n        if (e.errorNumber() != EADDRINUSE) {\n          ADD_FAILURE() << \"Unexpected failure (\" << e.errorNumber()\n                        << \") to bind a free port: \" << e.what();\n          throw;\n        } else if (loop_number >= kLoopLimit) {\n          ADD_FAILURE() << \"Too many failures (\" << loop_number\n                        << \") to bind a specific port: \" << e.what();\n          return;\n        }\n        continue;\n      }\n\n      // TODO (conqerAtapple): This is unfortunate. We should be able to templatize this\n      // instead of if block.\n      auto os_sys_calls = Api::OsSysCallsSingleton::get();\n      if (NetworkSocketTrait<Type>::type == Socket::Type::Stream) {\n        EXPECT_EQ(0, socket1->listen(0).rc_);\n      }\n\n      EXPECT_EQ(addr->ip()->port(), socket1->localAddress()->ip()->port());\n      EXPECT_EQ(addr->ip()->addressAsString(), socket1->localAddress()->ip()->addressAsString());\n      EXPECT_EQ(Type, socket1->socketType());\n\n      auto option2 = std::make_unique<MockSocketOption>();\n      auto options2 = std::make_shared<std::vector<Network::Socket::OptionConstSharedPtr>>();\n      EXPECT_CALL(*option2, setOption(_, envoy::config::core::v3::SocketOption::STATE_PREBIND))\n          .WillOnce(Return(true));\n      options2->emplace_back(std::move(option2));\n      // The address and port are bound already, should throw exception.\n      EXPECT_THROW(createListenSocketPtr(addr, options2, true), SocketBindException);\n\n      // Release socket and re-bind it.\n      socket1->close();\n\n      // Test createListenSocketPtr from IoHandlePtr's os_fd_t constructor\n      int domain = version_ == Address::IpVersion::v4 ? AF_INET : AF_INET6;\n      auto socket_result = os_sys_calls.socket(domain, SOCK_STREAM, 0);\n      EXPECT_TRUE(SOCKET_VALID(socket_result.rc_));\n      Network::IoHandlePtr io_handle = std::make_unique<IoSocketHandleImpl>(socket_result.rc_);\n      auto socket3 = createListenSocketPtr(std::move(io_handle), addr, nullptr);\n      EXPECT_EQ(socket3->localAddress()->asString(), addr->asString());\n\n      // Test successful.\n      return;\n    }\n  }\n\n  void testBindPortZero() {\n    auto loopback = Network::Test::getCanonicalLoopbackAddress(version_);\n    auto socket = createListenSocketPtr(loopback, nullptr, true);\n    EXPECT_EQ(Address::Type::Ip, socket->localAddress()->type());\n    EXPECT_EQ(version_, socket->localAddress()->ip()->version());\n    EXPECT_EQ(loopback->ip()->addressAsString(), socket->localAddress()->ip()->addressAsString());\n    EXPECT_GT(socket->localAddress()->ip()->port(), 0U);\n    EXPECT_EQ(Type, socket->socketType());\n  }\n};\n\nusing ListenSocketImplTestTcp = ListenSocketImplTest<Network::Socket::Type::Stream>;\nusing ListenSocketImplTestUdp = ListenSocketImplTest<Network::Socket::Type::Datagram>;\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ListenSocketImplTestTcp,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ListenSocketImplTestUdp,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ListenSocketImplTestTcp, BindSpecificPort) { testBindSpecificPort(); }\n\n/*\n * A simple implementation to test some of ListenSocketImpl's accessors without requiring\n * stack interaction.\n */\nclass TestListenSocket : public ListenSocketImpl {\npublic:\n  TestListenSocket(Address::InstanceConstSharedPtr address)\n      : ListenSocketImpl(std::make_unique<Network::IoSocketHandleImpl>(), address) {}\n  Socket::Type socketType() const override { return Socket::Type::Stream; }\n};\n\nTEST_P(ListenSocketImplTestTcp, SetLocalAddress) {\n  std::string address_str = \"10.1.2.3\";\n  if (version_ == Address::IpVersion::v6) {\n    address_str = \"1::2\";\n  }\n\n  Address::InstanceConstSharedPtr address = Network::Utility::parseInternetAddress(address_str);\n\n  TestListenSocket socket(Utility::getIpv4AnyAddress());\n\n  socket.setLocalAddress(address);\n\n  EXPECT_EQ(socket.localAddress(), address);\n}\n\nTEST_P(ListenSocketImplTestUdp, BindSpecificPort) { testBindSpecificPort(); }\n\n// Validate that we get port allocation when binding to port zero.\nTEST_P(ListenSocketImplTestTcp, BindPortZero) { testBindPortZero(); }\n\nTEST_P(ListenSocketImplTestUdp, BindPortZero) { testBindPortZero(); }\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/listener_impl_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/exception.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/tcp_listener_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"test/common/network/listener_impl_test_base.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nstatic void errorCallbackTest(Address::IpVersion version) {\n  // Force the error callback to fire by closing the socket under the listener. We run this entire\n  // test in the forked process to avoid confusion when the fork happens.\n  Api::ApiPtr api = Api::createApiForTest();\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version), nullptr, true);\n  Network::MockTcpListenerCallbacks listener_callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      Network::Test::createRawBufferSocket(), nullptr);\n  client_connection->connect();\n\n  StreamInfo::StreamInfoImpl stream_info(dispatcher->timeSource());\n  EXPECT_CALL(listener_callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void {\n        Network::ConnectionPtr conn = dispatcher->createServerConnection(\n            std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info);\n        client_connection->close(ConnectionCloseType::NoFlush);\n        conn->close(ConnectionCloseType::NoFlush);\n        socket->close();\n      }));\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n}\n\nclass ListenerImplDeathTest : public testing::TestWithParam<Address::IpVersion> {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, ListenerImplDeathTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\nTEST_P(ListenerImplDeathTest, ErrorCallback) {\n  EXPECT_DEATH(errorCallbackTest(GetParam()), \".*listener accept failure.*\");\n}\n\nclass TestTcpListenerImpl : public TcpListenerImpl {\npublic:\n  TestTcpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket,\n                      TcpListenerCallbacks& cb, bool bind_to_port,\n                      uint32_t tcp_backlog = ENVOY_TCP_BACKLOG_SIZE)\n      : TcpListenerImpl(dispatcher, std::move(socket), cb, bind_to_port, tcp_backlog) {}\n\n  MOCK_METHOD(Address::InstanceConstSharedPtr, getLocalAddress, (os_fd_t fd));\n};\n\nusing TcpListenerImplTest = ListenerImplTestBase;\nINSTANTIATE_TEST_SUITE_P(IpVersions, TcpListenerImplTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Test that socket options are set after the listener is setup.\nTEST_P(TcpListenerImplTest, SetListeningSocketOptionsSuccess) {\n  Network::MockTcpListenerCallbacks listener_callbacks;\n  Network::MockConnectionHandler connection_handler;\n\n  auto socket = std::make_shared<TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true);\n  std::shared_ptr<MockSocketOption> option = std::make_shared<MockSocketOption>();\n  socket->addOption(option);\n  EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_LISTENING))\n      .WillOnce(Return(true));\n  TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true);\n}\n\n// Test that an exception is thrown if there is an error setting socket options.\nTEST_P(TcpListenerImplTest, SetListeningSocketOptionsError) {\n  Network::MockTcpListenerCallbacks listener_callbacks;\n  Network::MockConnectionHandler connection_handler;\n\n  auto socket = std::make_shared<TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true);\n  std::shared_ptr<MockSocketOption> option = std::make_shared<MockSocketOption>();\n  socket->addOption(option);\n  EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_LISTENING))\n      .WillOnce(Return(false));\n  EXPECT_THROW_WITH_MESSAGE(TestTcpListenerImpl(dispatcherImpl(), socket, listener_callbacks, true),\n                            CreateListenerException,\n                            fmt::format(\"cannot set post-listen socket option on socket: {}\",\n                                        socket->localAddress()->asString()));\n}\n\nTEST_P(TcpListenerImplTest, UseActualDst) {\n  auto socket = std::make_shared<TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true);\n  auto socketDst = std::make_shared<TcpListenSocket>(alt_address_, nullptr, false);\n  Network::MockTcpListenerCallbacks listener_callbacks1;\n  Network::MockConnectionHandler connection_handler;\n  // Do not redirect since use_original_dst is false.\n  Network::TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks1, true);\n  Network::MockTcpListenerCallbacks listener_callbacks2;\n  Network::TestTcpListenerImpl listenerDst(dispatcherImpl(), socketDst, listener_callbacks2, false);\n\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      Network::Test::createRawBufferSocket(), nullptr);\n  client_connection->connect();\n\n  EXPECT_CALL(listener, getLocalAddress(_)).Times(0);\n\n  StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource());\n  EXPECT_CALL(listener_callbacks2, onAccept_(_)).Times(0);\n  EXPECT_CALL(listener_callbacks1, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void {\n        Network::ConnectionPtr conn = dispatcher_->createServerConnection(\n            std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info);\n        EXPECT_EQ(*conn->localAddress(), *socket->localAddress());\n        client_connection->close(ConnectionCloseType::NoFlush);\n        conn->close(ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(TcpListenerImplTest, GlobalConnectionLimitEnforcement) {\n  // Required to manipulate runtime values when there is no test server.\n  TestScopedRuntime scoped_runtime;\n\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"overload.global_downstream_max_connections\", \"2\"}});\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true);\n  Network::MockTcpListenerCallbacks listener_callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher_->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  std::vector<Network::ClientConnectionPtr> client_connections;\n  std::vector<Network::ConnectionPtr> server_connections;\n  StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource());\n  EXPECT_CALL(listener_callbacks, onAccept_(_))\n      .WillRepeatedly(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void {\n        server_connections.emplace_back(dispatcher_->createServerConnection(\n            std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info));\n        dispatcher_->exit();\n      }));\n\n  auto initiate_connections = [&](const int count) {\n    for (int i = 0; i < count; ++i) {\n      client_connections.emplace_back(dispatcher_->createClientConnection(\n          socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n          Network::Test::createRawBufferSocket(), nullptr));\n      client_connections.back()->connect();\n    }\n  };\n\n  initiate_connections(5);\n  EXPECT_CALL(listener_callbacks, onReject()).Times(3);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  // We expect any server-side connections that get created to populate 'server_connections'.\n  EXPECT_EQ(2, server_connections.size());\n\n  // Let's increase the allowed connections and try sending more connections.\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"overload.global_downstream_max_connections\", \"3\"}});\n  initiate_connections(5);\n  EXPECT_CALL(listener_callbacks, onReject()).Times(4);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(3, server_connections.size());\n\n  // Clear the limit and verify there's no longer a limit.\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"overload.global_downstream_max_connections\", \"\"}});\n  initiate_connections(10);\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(13, server_connections.size());\n\n  for (const auto& conn : client_connections) {\n    conn->close(ConnectionCloseType::NoFlush);\n  }\n  for (const auto& conn : server_connections) {\n    conn->close(ConnectionCloseType::NoFlush);\n  }\n\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"overload.global_downstream_max_connections\", \"\"}});\n}\n\nTEST_P(TcpListenerImplTest, WildcardListenerUseActualDst) {\n  auto socket = std::make_shared<TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true);\n  Network::MockTcpListenerCallbacks listener_callbacks;\n  Network::MockConnectionHandler connection_handler;\n  // Do not redirect since use_original_dst is false.\n  Network::TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true);\n\n  auto local_dst_address = Network::Utility::getAddressWithPort(\n      *Network::Test::getCanonicalLoopbackAddress(version_), socket->localAddress()->ip()->port());\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      local_dst_address, Network::Address::InstanceConstSharedPtr(),\n      Network::Test::createRawBufferSocket(), nullptr);\n  client_connection->connect();\n\n  StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource());\n  EXPECT_CALL(listener_callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        Network::ConnectionPtr conn = dispatcher_->createServerConnection(\n            std::move(socket), Network::Test::createRawBufferSocket(), stream_info);\n        EXPECT_EQ(*conn->localAddress(), *local_dst_address);\n        client_connection->close(ConnectionCloseType::NoFlush);\n        conn->close(ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test for the correct behavior when a listener is configured with an ANY address that allows\n// receiving IPv4 connections on an IPv6 socket. In this case the address instances of both\n// local and remote addresses of the connection should be IPv4 instances, as the connection really\n// is an IPv4 connection.\nTEST_P(TcpListenerImplTest, WildcardListenerIpv4Compat) {\n  auto option = std::make_unique<MockSocketOption>();\n  auto options = std::make_shared<std::vector<Network::Socket::OptionConstSharedPtr>>();\n  EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_PREBIND))\n      .WillOnce(Return(true));\n  options->emplace_back(std::move(option));\n\n  auto socket = std::make_shared<TcpListenSocket>(Network::Test::getAnyAddress(version_, true),\n                                                  options, true);\n  Network::MockTcpListenerCallbacks listener_callbacks;\n  Network::MockConnectionHandler connection_handler;\n\n  ASSERT_TRUE(socket->localAddress()->ip()->isAnyAddress());\n\n  // Do not redirect since use_original_dst is false.\n  Network::TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true);\n\n  auto listener_address = Network::Utility::getAddressWithPort(\n      *Network::Test::getCanonicalLoopbackAddress(version_), socket->localAddress()->ip()->port());\n  auto local_dst_address = Network::Utility::getAddressWithPort(\n      *Network::Utility::getCanonicalIpv4LoopbackAddress(), socket->localAddress()->ip()->port());\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      local_dst_address, Network::Address::InstanceConstSharedPtr(),\n      Network::Test::createRawBufferSocket(), nullptr);\n  client_connection->connect();\n\n  StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource());\n  EXPECT_CALL(listener_callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        Network::ConnectionPtr conn = dispatcher_->createServerConnection(\n            std::move(socket), Network::Test::createRawBufferSocket(), stream_info);\n        EXPECT_EQ(conn->localAddress()->ip()->version(), conn->remoteAddress()->ip()->version());\n        EXPECT_EQ(conn->localAddress()->asString(), local_dst_address->asString());\n        EXPECT_EQ(*conn->localAddress(), *local_dst_address);\n        client_connection->close(ConnectionCloseType::NoFlush);\n        conn->close(ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(TcpListenerImplTest, DisableAndEnableListener) {\n  testing::InSequence s1;\n\n  auto socket = std::make_shared<TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true);\n  MockTcpListenerCallbacks listener_callbacks;\n  MockConnectionCallbacks connection_callbacks;\n  TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true);\n\n  // When listener is disabled, the timer should fire before any connection is accepted.\n  listener.disable();\n\n  ClientConnectionPtr client_connection =\n      dispatcher_->createClientConnection(socket->localAddress(), Address::InstanceConstSharedPtr(),\n                                          Network::Test::createRawBufferSocket(), nullptr);\n  client_connection->addConnectionCallbacks(connection_callbacks);\n  client_connection->connect();\n\n  EXPECT_CALL(listener_callbacks, onAccept_(_)).Times(0);\n  EXPECT_CALL(connection_callbacks, onEvent(_))\n      .WillOnce(Invoke([&](Network::ConnectionEvent event) -> void {\n        EXPECT_EQ(event, Network::ConnectionEvent::Connected);\n        dispatcher_->exit();\n      }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  // When the listener is re-enabled, the pending connection should be accepted.\n  listener.enable();\n\n  EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce(Invoke([&](ConnectionSocketPtr&) -> void {\n    client_connection->close(ConnectionCloseType::NoFlush);\n  }));\n  EXPECT_CALL(connection_callbacks, onEvent(_))\n      .WillOnce(Invoke([&](Network::ConnectionEvent event) -> void {\n        EXPECT_NE(event, Network::ConnectionEvent::Connected);\n        dispatcher_->exit();\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/listener_impl_test_base.h",
    "content": "#pragma once\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Network {\n\n// Captures common infrastructure needed by both ListenerImplTest and UdpListenerImplTest.\nclass ListenerImplTestBase : public testing::TestWithParam<Address::IpVersion> {\nprotected:\n  ListenerImplTestBase()\n      : version_(GetParam()),\n        alt_address_(Network::Test::findOrCheckFreePort(\n            Network::Test::getCanonicalLoopbackAddress(version_), Socket::Type::Stream)),\n        api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  Event::DispatcherImpl& dispatcherImpl() {\n    // We need access to the concrete impl type in order to instantiate a\n    // Test[Udp]Listener, which instantiates a [Udp]ListenerImpl, which requires\n    // a DispatcherImpl to access DispatcherImpl::base_, which is not part of\n    // the Dispatcher API.\n    Event::DispatcherImpl* impl = dynamic_cast<Event::DispatcherImpl*>(dispatcher_.get());\n    RELEASE_ASSERT(impl, \"dispatcher dynamic-cast to DispatcherImpl failed\");\n    return *impl;\n  }\n\n  const Address::IpVersion version_;\n  const Address::InstanceConstSharedPtr alt_address_;\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/resolver_impl_test.cc",
    "content": "#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/network/resolver.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/resolver_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Address {\nnamespace {\n\nclass IpResolverTest : public testing::Test {\npublic:\n  Resolver* resolver_{Registry::FactoryRegistry<Resolver>::getFactory(\"envoy.ip\")};\n};\n\nTEST_F(IpResolverTest, Basic) {\n  envoy::config::core::v3::SocketAddress socket_address;\n  socket_address.set_address(\"1.2.3.4\");\n  socket_address.set_port_value(443);\n  auto address = resolver_->resolve(socket_address);\n  EXPECT_EQ(address->ip()->addressAsString(), \"1.2.3.4\");\n  EXPECT_EQ(address->ip()->port(), 443);\n}\n\nTEST_F(IpResolverTest, DisallowsNamedPort) {\n  envoy::config::core::v3::SocketAddress socket_address;\n  socket_address.set_address(\"1.2.3.4\");\n  socket_address.set_named_port(\"http\");\n  EXPECT_THROW_WITH_MESSAGE(\n      resolver_->resolve(socket_address), EnvoyException,\n      fmt::format(\"IP resolver can't handle port specifier type {}\",\n                  envoy::config::core::v3::SocketAddress::PortSpecifierCase::kNamedPort));\n}\n\nTEST(ResolverTest, FromProtoAddress) {\n  envoy::config::core::v3::Address ipv4_address;\n  ipv4_address.mutable_socket_address()->set_address(\"1.2.3.4\");\n  ipv4_address.mutable_socket_address()->set_port_value(5);\n  EXPECT_EQ(\"1.2.3.4:5\", resolveProtoAddress(ipv4_address)->asString());\n\n  envoy::config::core::v3::Address ipv6_address;\n  ipv6_address.mutable_socket_address()->set_address(\"1::1\");\n  ipv6_address.mutable_socket_address()->set_port_value(2);\n  EXPECT_EQ(\"[1::1]:2\", resolveProtoAddress(ipv6_address)->asString());\n\n  envoy::config::core::v3::Address pipe_address;\n  pipe_address.mutable_pipe()->set_path(\"/foo/bar\");\n  EXPECT_EQ(\"/foo/bar\", resolveProtoAddress(pipe_address)->asString());\n}\n\nTEST(ResolverTest, InternalListenerNameFromProtoAddress) {\n  envoy::config::core::v3::Address internal_listener_address;\n  internal_listener_address.mutable_envoy_internal_address()->set_server_listener_name(\n      \"internal_listener_foo\");\n  EXPECT_EQ(\"envoy://internal_listener_foo\",\n            resolveProtoAddress(internal_listener_address)->asString());\n}\n\nTEST(ResolverTest, UninitializedInternalAddressFromProtoAddress) {\n  envoy::config::core::v3::Address internal_address;\n  internal_address.mutable_envoy_internal_address();\n  EXPECT_DEATH(resolveProtoAddress(internal_address), \"panic\");\n}\n\n// Validate correct handling of ipv4_compat field.\nTEST(ResolverTest, FromProtoAddressV4Compat) {\n  {\n    envoy::config::core::v3::Address ipv6_address;\n    ipv6_address.mutable_socket_address()->set_address(\"1::1\");\n    ipv6_address.mutable_socket_address()->set_port_value(2);\n    auto resolved_addr = resolveProtoAddress(ipv6_address);\n    EXPECT_EQ(\"[1::1]:2\", resolved_addr->asString());\n  }\n  {\n    envoy::config::core::v3::Address ipv6_address;\n    ipv6_address.mutable_socket_address()->set_address(\"1::1\");\n    ipv6_address.mutable_socket_address()->set_port_value(2);\n    ipv6_address.mutable_socket_address()->set_ipv4_compat(true);\n    auto resolved_addr = resolveProtoAddress(ipv6_address);\n    EXPECT_EQ(\"[1::1]:2\", resolved_addr->asString());\n  }\n}\n\nclass TestResolver : public Resolver {\npublic:\n  InstanceConstSharedPtr\n  resolve(const envoy::config::core::v3::SocketAddress& socket_address) override {\n    const std::string& logical = socket_address.address();\n    const std::string physical = getPhysicalName(logical);\n    const std::string port = getPort(socket_address);\n    return InstanceConstSharedPtr{new MockResolvedAddress(fmt::format(\"{}:{}\", logical, port),\n                                                          fmt::format(\"{}:{}\", physical, port))};\n  }\n\n  void addMapping(const std::string& logical, const std::string& physical) {\n    name_mappings_[logical] = physical;\n  }\n\n  std::string name() const override { return \"envoy.test.resolver\"; }\n\nprivate:\n  std::string getPhysicalName(const std::string& logical) {\n    auto it = name_mappings_.find(logical);\n    if (it == name_mappings_.end()) {\n      throw EnvoyException(\"no such mapping exists\");\n    }\n    return it->second;\n  }\n\n  std::string getPort(const envoy::config::core::v3::SocketAddress& socket_address) {\n    switch (socket_address.port_specifier_case()) {\n    case envoy::config::core::v3::SocketAddress::PortSpecifierCase::kNamedPort:\n      return socket_address.named_port();\n    case envoy::config::core::v3::SocketAddress::PortSpecifierCase::kPortValue:\n    // default to port 0 if no port value is specified\n    case envoy::config::core::v3::SocketAddress::PortSpecifierCase::PORT_SPECIFIER_NOT_SET:\n      return absl::StrCat(\"\", socket_address.port_value());\n\n    default:\n      throw EnvoyException(\n          absl::StrCat(\"Unknown port specifier type \", socket_address.port_specifier_case()));\n    }\n  }\n\n  std::map<std::string, std::string> name_mappings_;\n};\n\nTEST(ResolverTest, NonStandardResolver) {\n  TestResolver test_resolver;\n  test_resolver.addMapping(\"foo\", \"1.2.3.4\");\n  test_resolver.addMapping(\"bar\", \"4.3.2.1\");\n  Registry::InjectFactory<Resolver> register_resolver(test_resolver);\n\n  {\n    envoy::config::core::v3::Address address;\n    auto socket = address.mutable_socket_address();\n    socket->set_address(\"foo\");\n    socket->set_port_value(5);\n    socket->set_resolver_name(\"envoy.test.resolver\");\n    auto instance = resolveProtoAddress(address);\n    EXPECT_EQ(\"1.2.3.4:5\", instance->asString());\n    EXPECT_EQ(\"foo:5\", instance->logicalName());\n  }\n  {\n    envoy::config::core::v3::Address address;\n    auto socket = address.mutable_socket_address();\n    socket->set_address(\"bar\");\n    socket->set_named_port(\"http\");\n    socket->set_resolver_name(\"envoy.test.resolver\");\n    auto instance = resolveProtoAddress(address);\n    EXPECT_EQ(\"4.3.2.1:http\", instance->asString());\n    EXPECT_EQ(\"bar:http\", instance->logicalName());\n  }\n}\n\nTEST(ResolverTest, UninitializedAddress) {\n  envoy::config::core::v3::Address address;\n  EXPECT_THROW_WITH_MESSAGE(resolveProtoAddress(address), EnvoyException, \"Address must be set: \");\n}\n\nTEST(ResolverTest, NoSuchResolver) {\n  envoy::config::core::v3::Address address;\n  auto socket = address.mutable_socket_address();\n  socket->set_address(\"foo\");\n  socket->set_port_value(5);\n  socket->set_resolver_name(\"envoy.test.resolver\");\n  EXPECT_THROW_WITH_MESSAGE(resolveProtoAddress(address), EnvoyException,\n                            \"Unknown address resolver: envoy.test.resolver\");\n}\n\n} // namespace\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/socket_option_factory_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/socket_option_impl.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"absl/strings/str_format.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nclass SocketOptionFactoryTest : public testing::Test {\npublic:\n  SocketOptionFactoryTest() = default;\n\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{[this]() {\n    // Before injecting OsSysCallsImpl, make sure validateIpv{4,6}Supported is called so the static\n    // bool is initialized without requiring to mock ::socket and ::close. :( :(\n    std::make_unique<Address::Ipv4Instance>(\"1.2.3.4\", 5678);\n    std::make_unique<Address::Ipv6Instance>(\"::1:2:3:4\", 5678);\n    return &os_sys_calls_mock_;\n  }()};\n\nprotected:\n  testing::NiceMock<MockListenSocket> socket_mock_;\n  Api::MockOsSysCalls os_sys_calls_mock_;\n\n  void SetUp() override { socket_mock_.local_address_.reset(); }\n  void makeSocketV4() {\n    socket_mock_.local_address_ = std::make_unique<Address::Ipv4Instance>(\"1.2.3.4\", 5678);\n  }\n  void makeSocketV6() {\n    socket_mock_.local_address_ = std::make_unique<Address::Ipv6Instance>(\"::1:2:3:4\", 5678);\n  }\n};\n\n#define CHECK_OPTION_SUPPORTED(option)                                                             \\\n  if (!option.hasValue()) {                                                                        \\\n    return;                                                                                        \\\n  }\n\n// TODO(klarose): Simplify these tests once https://github.com/envoyproxy/envoy/pull/5351 is merged.\n\nTEST_F(SocketOptionFactoryTest, TestBuildSocketMarkOptions) {\n\n  // use a shared_ptr due to applyOptions requiring one\n  std::shared_ptr<Socket::Options> options = SocketOptionFactory::buildSocketMarkOptions(100);\n\n  const auto expected_option = ENVOY_SOCKET_SO_MARK;\n  CHECK_OPTION_SUPPORTED(expected_option);\n\n  const int type = expected_option.level();\n  const int option = expected_option.option();\n  EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int)))\n      .WillOnce(Invoke([type, option](int input_type, int input_option, const void* optval,\n                                      socklen_t) -> Api::SysCallIntResult {\n        EXPECT_EQ(100, *static_cast<const int*>(optval));\n        EXPECT_EQ(type, input_type);\n        EXPECT_EQ(option, input_option);\n        return {0, 0};\n      }));\n\n  EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_,\n                                            envoy::config::core::v3::SocketOption::STATE_PREBIND));\n}\n\nTEST_F(SocketOptionFactoryTest, TestBuildIpv4TransparentOptions) {\n  makeSocketV4();\n\n  // use a shared_ptr due to applyOptions requiring one\n  std::shared_ptr<Socket::Options> options = SocketOptionFactory::buildIpTransparentOptions();\n\n  const auto expected_option = ENVOY_SOCKET_IP_TRANSPARENT;\n  CHECK_OPTION_SUPPORTED(expected_option);\n\n  const int type = expected_option.level();\n  const int option = expected_option.option();\n  EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int)))\n      .Times(2)\n      .WillRepeatedly(Invoke([type, option](int input_type, int input_option, const void* optval,\n                                            socklen_t) -> Api::SysCallIntResult {\n        EXPECT_EQ(type, input_type);\n        EXPECT_EQ(option, input_option);\n        EXPECT_EQ(1, *static_cast<const int*>(optval));\n        return {0, 0};\n      }));\n  EXPECT_CALL(socket_mock_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4));\n  EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_,\n                                            envoy::config::core::v3::SocketOption::STATE_PREBIND));\n  EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_,\n                                            envoy::config::core::v3::SocketOption::STATE_BOUND));\n}\n\nTEST_F(SocketOptionFactoryTest, TestBuildIpv6TransparentOptions) {\n  makeSocketV6();\n\n  // use a shared_ptr due to applyOptions requiring one\n  std::shared_ptr<Socket::Options> options = SocketOptionFactory::buildIpTransparentOptions();\n\n  const auto expected_option = ENVOY_SOCKET_IPV6_TRANSPARENT;\n  CHECK_OPTION_SUPPORTED(expected_option);\n\n  const int type = expected_option.level();\n  const int option = expected_option.option();\n  EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int)))\n      .Times(2)\n      .WillRepeatedly(Invoke([type, option](int input_type, int input_option, const void* optval,\n                                            socklen_t) -> Api::SysCallIntResult {\n        EXPECT_EQ(type, input_type);\n        EXPECT_EQ(option, input_option);\n        EXPECT_EQ(1, *static_cast<const int*>(optval));\n        return {0, 0};\n      }));\n\n  EXPECT_CALL(socket_mock_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6));\n  EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_,\n                                            envoy::config::core::v3::SocketOption::STATE_PREBIND));\n  EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_,\n                                            envoy::config::core::v3::SocketOption::STATE_BOUND));\n}\n\nTEST_F(SocketOptionFactoryTest, TestBuildLiteralOptions) {\n  Protobuf::RepeatedPtrField<envoy::config::core::v3::SocketOption> socket_options_proto;\n  Envoy::Protobuf::TextFormat::Parser parser;\n  envoy::config::core::v3::SocketOption socket_option_proto;\n  struct linger expected_linger;\n  expected_linger.l_onoff = 1;\n  expected_linger.l_linger = 3456;\n  absl::string_view linger_bstr{reinterpret_cast<const char*>(&expected_linger),\n                                sizeof(struct linger)};\n  std::string linger_bstr_formatted = testing::PrintToString(linger_bstr);\n  static const char linger_option_format[] = R\"proto(\n    state: STATE_PREBIND\n    level: %d\n    name: %d\n    buf_value: %s\n  )proto\";\n  auto linger_option =\n      absl::StrFormat(linger_option_format, SOL_SOCKET, SO_LINGER, linger_bstr_formatted);\n  ASSERT_TRUE(parser.ParseFromString(linger_option, &socket_option_proto));\n  *socket_options_proto.Add() = socket_option_proto;\n  static const char keepalive_option_format[] = R\"proto(\n    state: STATE_PREBIND\n    level: %d\n    name: %d\n    int_value: 1\n  )proto\";\n  auto keepalive_option = absl::StrFormat(keepalive_option_format, SOL_SOCKET, SO_KEEPALIVE);\n  ASSERT_TRUE(parser.ParseFromString(keepalive_option, &socket_option_proto));\n  *socket_options_proto.Add() = socket_option_proto;\n\n  auto socket_options = SocketOptionFactory::buildLiteralOptions(socket_options_proto);\n  EXPECT_EQ(2, socket_options->size());\n  auto option_details = socket_options->at(0)->getOptionDetails(\n      socket_mock_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  EXPECT_TRUE(option_details.has_value());\n  EXPECT_EQ(SOL_SOCKET, option_details->name_.level());\n  EXPECT_EQ(SO_LINGER, option_details->name_.option());\n  EXPECT_EQ(linger_bstr, option_details->value_);\n\n  option_details = socket_options->at(1)->getOptionDetails(\n      socket_mock_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  EXPECT_TRUE(option_details.has_value());\n  EXPECT_EQ(SOL_SOCKET, option_details->name_.level());\n  EXPECT_EQ(SO_KEEPALIVE, option_details->name_.option());\n  int value = 1;\n  absl::string_view value_bstr{reinterpret_cast<const char*>(&value), sizeof(int)};\n  EXPECT_EQ(value_bstr, option_details->value_);\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/socket_option_impl_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"test/common/network/socket_option_test.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nclass SocketOptionImplTest : public SocketOptionTest {};\n\nTEST_F(SocketOptionImplTest, BadFd) {\n  absl::string_view zero(\"\\0\\0\\0\\0\", 4);\n  Api::SysCallIntResult result =\n      SocketOptionImpl::setSocketOption(socket_, {}, zero.data(), zero.size());\n  EXPECT_EQ(-1, result.rc_);\n  EXPECT_EQ(SOCKET_ERROR_NOT_SUP, result.errno_);\n}\n\nTEST_F(SocketOptionImplTest, HasName) {\n  auto optname = ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_SNDBUF);\n\n  // Verify that the constructor macro sets all the fields correctly.\n  EXPECT_TRUE(optname.hasValue());\n  EXPECT_EQ(SOL_SOCKET, optname.level());\n  EXPECT_EQ(SO_SNDBUF, optname.option());\n  EXPECT_EQ(\"SOL_SOCKET/SO_SNDBUF\", optname.name());\n\n  // The default constructor should not have a value, i.e. should\n  // be unsupported.\n  EXPECT_FALSE(SocketOptionName().hasValue());\n\n  // If we fail to set an option, verify that the log message\n  // contains the option name so the operator can debug.\n  SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_PREBIND, optname, 1};\n  EXPECT_CALL(socket_, setSocketOption(_, _, _, _))\n      .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n        EXPECT_EQ(1, *static_cast<const int*>(optval));\n        return {-1, 0};\n      }));\n\n  EXPECT_LOG_CONTAINS(\n      \"warning\", \"Setting SOL_SOCKET/SO_SNDBUF option on socket failed\",\n      socket_option.setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND));\n}\n\nTEST_F(SocketOptionImplTest, SetOptionSuccessTrue) {\n  SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_PREBIND,\n                                 ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1};\n  EXPECT_CALL(socket_, setSocketOption(5, 10, _, sizeof(int)))\n      .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n        EXPECT_EQ(1, *static_cast<const int*>(optval));\n        return {0, 0};\n      }));\n  EXPECT_TRUE(\n      socket_option.setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND));\n}\n\nTEST_F(SocketOptionImplTest, GetOptionDetailsCorrectState) {\n  SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_PREBIND,\n                                 ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1};\n\n  auto result =\n      socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  ASSERT_TRUE(result.has_value());\n  EXPECT_EQ(*result, makeDetails(ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1));\n}\n\nTEST_F(SocketOptionImplTest, GetMoreOptionDetailsCorrectState) {\n  SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_LISTENING,\n                                 ENVOY_MAKE_SOCKET_OPTION_NAME(7, 9), 5};\n\n  auto result = socket_option.getOptionDetails(\n      socket_, envoy::config::core::v3::SocketOption::STATE_LISTENING);\n  ASSERT_TRUE(result.has_value());\n  EXPECT_EQ(*result, makeDetails(ENVOY_MAKE_SOCKET_OPTION_NAME(7, 9), 5));\n}\n\nTEST_F(SocketOptionImplTest, GetOptionDetailsFailureWrongState) {\n  SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_LISTENING,\n                                 ENVOY_MAKE_SOCKET_OPTION_NAME(7, 9), 5};\n\n  auto result =\n      socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_BOUND);\n  EXPECT_FALSE(result.has_value());\n}\n\nTEST_F(SocketOptionImplTest, GetUnsupportedOptReturnsNullopt) {\n  SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_LISTENING,\n                                 Network::SocketOptionName(), 5};\n\n  auto result = socket_option.getOptionDetails(\n      socket_, envoy::config::core::v3::SocketOption::STATE_LISTENING);\n  EXPECT_FALSE(result.has_value());\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/socket_option_test.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Invoke;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nclass SocketOptionTest : public testing::Test {\npublic:\n  SocketOptionTest() {\n    socket_.local_address_.reset();\n\n    EXPECT_CALL(os_sys_calls_, socket(_, _, _))\n        .Times(AnyNumber())\n        .WillRepeatedly(\n            Invoke([this](int domain, int type, int protocol) -> Api::SysCallSocketResult {\n              return os_sys_calls_actual_.socket(domain, type, protocol);\n            }));\n    EXPECT_CALL(os_sys_calls_, setsocketblocking(_, _))\n        .Times(AnyNumber())\n        .WillRepeatedly(Invoke([this](os_fd_t sockfd, bool block) -> Api::SysCallIntResult {\n          return os_sys_calls_actual_.setsocketblocking(sockfd, block);\n        }));\n    EXPECT_CALL(os_sys_calls_, setsockopt_(_, IPPROTO_IPV6, IPV6_V6ONLY, _, _))\n        .Times(AnyNumber())\n        .WillRepeatedly(Invoke([this](os_fd_t sockfd, int level, int optname, const void* optval,\n                                      socklen_t optlen) -> int {\n          return os_sys_calls_actual_.setsockopt(sockfd, level, optname, optval, optlen).rc_;\n        }));\n    EXPECT_CALL(os_sys_calls_, getsockopt_(_, _, _, _, _))\n        .Times(AnyNumber())\n        .WillRepeatedly(Invoke(\n            [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int {\n              return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_;\n            }));\n    EXPECT_CALL(os_sys_calls_, getsockname(_, _, _))\n        .Times(AnyNumber())\n        .WillRepeatedly(Invoke(\n            [this](os_fd_t sockfd, sockaddr* name, socklen_t* namelen) -> Api::SysCallIntResult {\n              return os_sys_calls_actual_.getsockname(sockfd, name, namelen);\n            }));\n  }\n\n  NiceMock<MockListenSocket> socket_;\n  Api::MockOsSysCalls os_sys_calls_;\n  Api::OsSysCallsImpl os_sys_calls_actual_;\n\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{[this]() {\n    // Before injecting OsSysCallsImpl, make sure validateIpv{4,6}Supported is called so the static\n    // bool is initialized without requiring to mock ::socket and ::close.\n    std::make_unique<Address::Ipv4Instance>(\"1.2.3.4\", 5678);\n    std::make_unique<Address::Ipv6Instance>(\"::1:2:3:4\", 5678);\n    return &os_sys_calls_;\n  }()};\n\n  void testSetSocketOptionSuccess(\n      Socket::Option& socket_option, Network::SocketOptionName option_name, int option_val,\n      const std::set<envoy::config::core::v3::SocketOption::SocketState>& when) {\n    for (auto state : when) {\n      if (option_name.hasValue()) {\n        EXPECT_CALL(socket_,\n                    setSocketOption(option_name.level(), option_name.option(), _, sizeof(int)))\n            .WillOnce(Invoke(\n                [option_val](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n                  EXPECT_EQ(option_val, *static_cast<const int*>(optval));\n                  return {0, 0};\n                }));\n        EXPECT_TRUE(socket_option.setOption(socket_, state));\n      } else {\n        EXPECT_FALSE(socket_option.setOption(socket_, state));\n      }\n    }\n\n    // The set of SocketOption::SocketState for which this option should not be set.\n    // Initialize to all the states, and remove states that are passed in.\n    std::list<envoy::config::core::v3::SocketOption::SocketState> unset_socketstates{\n        envoy::config::core::v3::SocketOption::STATE_PREBIND,\n        envoy::config::core::v3::SocketOption::STATE_BOUND,\n        envoy::config::core::v3::SocketOption::STATE_LISTENING,\n    };\n    unset_socketstates.remove_if(\n        [&](envoy::config::core::v3::SocketOption::SocketState state) -> bool {\n          return when.find(state) != when.end();\n        });\n    for (auto state : unset_socketstates) {\n      EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)).Times(0);\n      EXPECT_TRUE(socket_option.setOption(socket_, state));\n    }\n  }\n\n  Socket::Option::Details makeDetails(Network::SocketOptionName name, int value) {\n    absl::string_view value_as_bstr(reinterpret_cast<const char*>(&value), sizeof(value));\n\n    Socket::Option::Details expected_info;\n    expected_info.name_ = name;\n    expected_info.value_ = std::string(value_as_bstr);\n\n    return expected_info;\n  }\n};\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/transport_socket_options_impl_test.cc",
    "content": "#include \"common/http/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/application_protocol.h\"\n#include \"common/network/proxy_protocol_filter_state.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/network/upstream_server_name.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nclass TransportSocketOptionsImplTest : public testing::Test {\npublic:\n  TransportSocketOptionsImplTest()\n      : filter_state_(StreamInfo::FilterState::LifeSpan::FilterChain) {}\n\nprotected:\n  StreamInfo::FilterStateImpl filter_state_;\n};\n\nTEST_F(TransportSocketOptionsImplTest, Nullptr) {\n  EXPECT_EQ(nullptr, TransportSocketOptionsUtility::fromFilterState(filter_state_));\n  filter_state_.setData(\n      \"random_key_has_no_effect\", std::make_unique<UpstreamServerName>(\"www.example.com\"),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(nullptr, TransportSocketOptionsUtility::fromFilterState(filter_state_));\n}\n\nTEST_F(TransportSocketOptionsImplTest, UpstreamServer) {\n  filter_state_.setData(\n      UpstreamServerName::key(), std::make_unique<UpstreamServerName>(\"www.example.com\"),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);\n  filter_state_.setData(ProxyProtocolFilterState::key(),\n                        std::make_unique<ProxyProtocolFilterState>(Network::ProxyProtocolData{\n                            Network::Address::InstanceConstSharedPtr(\n                                new Network::Address::Ipv4Instance(\"202.168.0.13\", 52000)),\n                            Network::Address::InstanceConstSharedPtr(\n                                new Network::Address::Ipv4Instance(\"174.2.2.222\", 80))}),\n                        StreamInfo::FilterState::StateType::ReadOnly,\n                        StreamInfo::FilterState::LifeSpan::FilterChain);\n  auto transport_socket_options = TransportSocketOptionsUtility::fromFilterState(filter_state_);\n  EXPECT_EQ(absl::make_optional<std::string>(\"www.example.com\"),\n            transport_socket_options->serverNameOverride());\n  EXPECT_EQ(\"202.168.0.13:52000\",\n            transport_socket_options->proxyProtocolOptions()->src_addr_->asStringView());\n  EXPECT_TRUE(transport_socket_options->applicationProtocolListOverride().empty());\n}\n\nTEST_F(TransportSocketOptionsImplTest, ApplicationProtocols) {\n  std::vector<std::string> http_alpns{Http::Utility::AlpnNames::get().Http2,\n                                      Http::Utility::AlpnNames::get().Http11};\n  filter_state_.setData(\n      ApplicationProtocols::key(), std::make_unique<ApplicationProtocols>(http_alpns),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);\n  auto transport_socket_options = TransportSocketOptionsUtility::fromFilterState(filter_state_);\n  EXPECT_EQ(absl::nullopt, transport_socket_options->serverNameOverride());\n  EXPECT_EQ(http_alpns, transport_socket_options->applicationProtocolListOverride());\n}\n\nTEST_F(TransportSocketOptionsImplTest, Both) {\n  std::vector<std::string> http_alpns{Http::Utility::AlpnNames::get().Http2,\n                                      Http::Utility::AlpnNames::get().Http11};\n  filter_state_.setData(\n      UpstreamServerName::key(), std::make_unique<UpstreamServerName>(\"www.example.com\"),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);\n  filter_state_.setData(\n      ApplicationProtocols::key(), std::make_unique<ApplicationProtocols>(http_alpns),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);\n  auto transport_socket_options = TransportSocketOptionsUtility::fromFilterState(filter_state_);\n  EXPECT_EQ(absl::make_optional<std::string>(\"www.example.com\"),\n            transport_socket_options->serverNameOverride());\n  EXPECT_EQ(http_alpns, transport_socket_options->applicationProtocolListOverride());\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/udp_listener_impl_batch_writer_test.cc",
    "content": "#include <cstddef>\n#include <iterator>\n#include <memory>\n#include <string>\n#include <vector>\n\n#ifdef __GNUC__\n#pragma GCC diagnostic push\n// QUICHE allows unused parameters.\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n// QUICHE uses offsetof().\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wtype-limits\"\n\n#include \"quiche/quic/test_tools/quic_mock_syscall_wrapper.h\"\n\n#pragma GCC diagnostic pop\n#else\n#include \"quiche/quic/test_tools/quic_mock_syscall_wrapper.h\"\n#endif\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/quic_listeners/quiche/udp_gso_batch_writer.h\"\n\n#include \"test/common/network/udp_listener_impl_test_base.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/time/time.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nsize_t getPacketLength(const msghdr* msg) {\n  size_t length = 0;\n  for (size_t i = 0; i < msg->msg_iovlen; ++i) {\n    length += msg->msg_iov[i].iov_len;\n  }\n  return length;\n}\n\nclass UdpListenerImplBatchWriterTest : public UdpListenerImplTestBase {\npublic:\n  void SetUp() override {\n    // Set listening socket options and set UdpGsoBatchWriter\n    server_socket_->addOptions(SocketOptionFactory::buildIpPacketInfoOptions());\n    server_socket_->addOptions(SocketOptionFactory::buildRxQueueOverFlowOptions());\n    listener_ = std::make_unique<UdpListenerImpl>(\n        dispatcherImpl(), server_socket_, listener_callbacks_, dispatcherImpl().timeSource());\n    udp_packet_writer_ = std::make_unique<Quic::UdpGsoBatchWriter>(\n        server_socket_->ioHandle(), listener_config_.listenerScope());\n    ON_CALL(listener_callbacks_, udpPacketWriter()).WillByDefault(ReturnRef(*udp_packet_writer_));\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplBatchWriterTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n/**\n * Tests UDP Packet Writer To Send packets in Batches to a client\n *  1. Setup a udp listener and client socket\n *  2. Send different sized payloads to client.\n *     - Verify that the packets are buffered as long as payload\n *       length matches gso_size.\n *     - When payload size > gso_size verify that the new payload is\n *       buffered and already buffered packets are sent to client\n *     - When payload size < gso_size verify that the new payload is\n *       sent along with the already buffered payloads.\n *  3. Call UdpPacketWriter's External Flush\n *     - Verify that the internal buffer is emptied and the\n *       total_bytes_sent counter is updated accordingly.\n */\nTEST_P(UdpListenerImplBatchWriterTest, SendData) {\n  EXPECT_TRUE(udp_packet_writer_->isBatchMode());\n  Address::InstanceConstSharedPtr send_from_addr = getNonDefaultSourceAddress();\n\n  absl::FixedArray<std::string> payloads{\"length7\", \"length7\", \"len<7\",\n                                         \"length7\", \"length7\", \"length>7\"};\n  std::string internal_buffer(\"\");\n  std::string last_buffered(\"\");\n  std::list<std::string> pkts_to_send;\n  bool send_buffered_pkts = false;\n\n  // Get initial value of total_bytes_sent\n  uint64_t total_bytes_sent =\n      listener_config_.listenerScope().counterFromString(\"total_bytes_sent\").value();\n\n  for (const auto& payload : payloads) {\n    Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n    buffer->add(payload);\n    UdpSendData send_data{send_from_addr->ip(), *client_.localAddress(), *buffer};\n\n    auto send_result = listener_->send(send_data);\n    EXPECT_TRUE(send_result.ok()) << \"send() failed : \" << send_result.err_->getErrorDetails();\n    EXPECT_EQ(send_result.rc_, payload.length());\n\n    // Verify udp_packet_writer stats for batch writing\n    if (internal_buffer.length() == 0 ||       /* internal buffer is empty*/\n        payload.compare(last_buffered) == 0) { /*len(payload) == gso_size*/\n      pkts_to_send.emplace_back(payload);\n      internal_buffer.append(payload);\n      last_buffered = payload;\n    } else if (payload.compare(last_buffered) < 0) { /*len(payload) < gso_size*/\n      pkts_to_send.emplace_back(payload);\n      internal_buffer.clear();\n      last_buffered.clear();\n      send_buffered_pkts = true;\n    } else { /*len(payload) > gso_size*/\n      internal_buffer = payload;\n      last_buffered = payload;\n      send_buffered_pkts = true;\n    }\n\n    EXPECT_EQ(listener_config_.listenerScope()\n                  .gaugeFromString(\"internal_buffer_size\", Stats::Gauge::ImportMode::NeverImport)\n                  .value(),\n              internal_buffer.length());\n\n    // Verify that the total_bytes_sent is only updated when the packets\n    // are actually sent to the client, and not on being buffered.\n    if (send_buffered_pkts) {\n      for (const auto& pkt : pkts_to_send) {\n        total_bytes_sent += pkt.length();\n      }\n      pkts_to_send.clear();\n      if (last_buffered.length() != 0) {\n        pkts_to_send.emplace_back(last_buffered);\n      }\n      send_buffered_pkts = false;\n    }\n    EXPECT_EQ(listener_config_.listenerScope().counterFromString(\"total_bytes_sent\").value(),\n              total_bytes_sent);\n  }\n\n  // Test External Flush\n  auto flush_result = udp_packet_writer_->flush();\n  EXPECT_TRUE(flush_result.ok());\n  EXPECT_EQ(listener_config_.listenerScope()\n                .gaugeFromString(\"internal_buffer_size\", Stats::Gauge::ImportMode::NeverImport)\n                .value(),\n            0);\n  total_bytes_sent += payloads.back().length();\n\n  EXPECT_EQ(listener_config_.listenerScope().counterFromString(\"total_bytes_sent\").value(),\n            total_bytes_sent);\n}\n\n/**\n * Tests UDP Packet writer behavior when socket is write-blocked.\n * 1. Setup the udp_listener and have a payload buffered in the internal buffer.\n * 2. Then set the socket to return EWOULDBLOCK error on sendmsg and write a\n *    different sized buffer to the packet writer.\n *    - Ensure that a buffer shorter than the initial buffer is added to the\n *      Internal Buffer.\n *    - A buffer longer than the initial buffer should not get appended to the\n *      Internal Buffer.\n */\nTEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) {\n  // Quic Mock Objects\n  quic::test::MockQuicSyscallWrapper os_sys_calls;\n  quic::ScopedGlobalSyscallWrapperOverride os_calls(&os_sys_calls);\n\n  // The initial payload to be buffered\n  std::string initial_payload(\"length7\");\n\n  // Get initial value of total_bytes_sent\n  uint64_t total_bytes_sent =\n      listener_config_.listenerScope().counterFromString(\"total_bytes_sent\").value();\n\n  // Possible following payloads to be sent after the initial payload\n  absl::FixedArray<std::string> following_payloads{\"length<7\", \"len<7\"};\n\n  for (const auto& following_payload : following_payloads) {\n    std::string internal_buffer(\"\");\n\n    // First have initial payload added to the udp_packet_writer's internal buffer.\n    Buffer::InstancePtr initial_buffer(new Buffer::OwnedImpl());\n    initial_buffer->add(initial_payload);\n    UdpSendData initial_send_data{send_to_addr_->ip(), *server_socket_->localAddress(),\n                                  *initial_buffer};\n    auto send_result = listener_->send(initial_send_data);\n    internal_buffer.append(initial_payload);\n    EXPECT_TRUE(send_result.ok());\n    EXPECT_EQ(send_result.rc_, initial_payload.length());\n    EXPECT_FALSE(udp_packet_writer_->isWriteBlocked());\n    EXPECT_EQ(listener_config_.listenerScope()\n                  .gaugeFromString(\"internal_buffer_size\", Stats::Gauge::ImportMode::NeverImport)\n                  .value(),\n              initial_payload.length());\n    EXPECT_EQ(listener_config_.listenerScope().counterFromString(\"total_bytes_sent\").value(),\n              total_bytes_sent);\n\n    // Mock the socket to be write blocked on sendmsg syscall\n    EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _))\n        .WillOnce(Invoke([](int /*sockfd*/, const msghdr* /*msg*/, int /*flags*/) {\n          errno = EWOULDBLOCK;\n          return -1;\n        }));\n\n    // Now send the following payload\n    Buffer::InstancePtr following_buffer(new Buffer::OwnedImpl());\n    following_buffer->add(following_payload);\n    UdpSendData following_send_data{send_to_addr_->ip(), *server_socket_->localAddress(),\n                                    *following_buffer};\n    send_result = listener_->send(following_send_data);\n\n    if (following_payload.length() < initial_payload.length()) {\n      // The following payload should get buffered if it is\n      // shorter than initial payload\n      EXPECT_TRUE(send_result.ok());\n      EXPECT_EQ(send_result.rc_, following_payload.length());\n      EXPECT_FALSE(udp_packet_writer_->isWriteBlocked());\n      internal_buffer.append(following_payload);\n      // Send another packet and verify that writer gets blocked later\n      EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _))\n          .WillOnce(Invoke([](int /*sockfd*/, const msghdr* /*msg*/, int /*flags*/) {\n            errno = EWOULDBLOCK;\n            return -1;\n          }));\n      following_buffer->add(following_payload);\n      UdpSendData final_send_data{send_to_addr_->ip(), *server_socket_->localAddress(),\n                                  *following_buffer};\n      send_result = listener_->send(final_send_data);\n    }\n\n    EXPECT_FALSE(send_result.ok());\n    EXPECT_EQ(send_result.rc_, 0);\n    EXPECT_TRUE(udp_packet_writer_->isWriteBlocked());\n    EXPECT_EQ(listener_config_.listenerScope().counterFromString(\"total_bytes_sent\").value(),\n              total_bytes_sent);\n    EXPECT_EQ(listener_config_.listenerScope()\n                  .gaugeFromString(\"internal_buffer_size\", Stats::Gauge::ImportMode::NeverImport)\n                  .value(),\n              internal_buffer.length());\n\n    // Reset write blocked status and verify correct buffer is flushed\n    udp_packet_writer_->setWritable();\n    EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _))\n        .WillOnce(Invoke([&](int /*sockfd*/, const msghdr* msg, int /*flags*/) {\n          EXPECT_EQ(internal_buffer.length(), getPacketLength(msg));\n          return internal_buffer.length();\n        }));\n    auto flush_result = udp_packet_writer_->flush();\n    EXPECT_TRUE(flush_result.ok());\n    EXPECT_EQ(flush_result.rc_, 0);\n    EXPECT_FALSE(udp_packet_writer_->isWriteBlocked());\n    EXPECT_EQ(listener_config_.listenerScope()\n                  .gaugeFromString(\"internal_buffer_size\", Stats::Gauge::ImportMode::NeverImport)\n                  .value(),\n              0);\n    total_bytes_sent += internal_buffer.length();\n    EXPECT_EQ(listener_config_.listenerScope().counterFromString(\"total_bytes_sent\").value(),\n              total_bytes_sent);\n  }\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/udp_listener_impl_test.cc",
    "content": "#include <cstddef>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/api/os_sys_calls.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/network/udp_packet_writer_handler_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/common/network/udp_listener_impl_test_base.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/time/time.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\n// UdpGro is only supported on Linux versions >= 5.0. Also, the\n// underlying platform only performs the payload concatenation when\n// packets are sent from a network namespace different to that of\n// the client. Currently, the testing framework does not support\n// this behavior.\n// This helper allows to intercept the supportsUdpGro syscall and\n// toggle the gro behavior as per individual test requirements.\nclass MockSupportsUdpGro : public Api::OsSysCallsImpl {\npublic:\n  MOCK_METHOD(bool, supportsUdpGro, (), (const));\n};\n\nclass UdpListenerImplTest : public UdpListenerImplTestBase {\npublic:\n  void SetUp() override {\n    ON_CALL(udp_gro_syscall_, supportsUdpGro()).WillByDefault(Return(false));\n\n    // Set listening socket options.\n    server_socket_->addOptions(SocketOptionFactory::buildIpPacketInfoOptions());\n    server_socket_->addOptions(SocketOptionFactory::buildRxQueueOverFlowOptions());\n    if (Api::OsSysCallsSingleton::get().supportsUdpGro()) {\n      server_socket_->addOptions(SocketOptionFactory::buildUdpGroOptions());\n    }\n    listener_ = std::make_unique<UdpListenerImpl>(\n        dispatcherImpl(), server_socket_, listener_callbacks_, dispatcherImpl().timeSource());\n    udp_packet_writer_ = std::make_unique<Network::UdpDefaultWriter>(server_socket_->ioHandle());\n    ON_CALL(listener_callbacks_, udpPacketWriter()).WillByDefault(ReturnRef(*udp_packet_writer_));\n  }\n\n  NiceMock<MockSupportsUdpGro> udp_gro_syscall_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls{&udp_gro_syscall_};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Test that socket options are set after the listener is setup.\nTEST_P(UdpListenerImplTest, UdpSetListeningSocketOptionsSuccess) {\n  MockUdpListenerCallbacks listener_callbacks;\n  auto socket = std::make_shared<Network::UdpListenSocket>(Network::Test::getAnyAddress(version_),\n                                                           nullptr, true);\n  std::shared_ptr<MockSocketOption> option = std::make_shared<MockSocketOption>();\n  socket->addOption(option);\n  EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_BOUND))\n      .WillOnce(Return(true));\n  UdpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks,\n                           dispatcherImpl().timeSource());\n\n#ifdef SO_RXQ_OVFL\n  // Verify that overflow detection is enabled.\n  int get_overflow = 0;\n  socklen_t int_size = static_cast<socklen_t>(sizeof(get_overflow));\n  const Api::SysCallIntResult result =\n      server_socket_->getSocketOption(SOL_SOCKET, SO_RXQ_OVFL, &get_overflow, &int_size);\n  EXPECT_EQ(0, result.rc_);\n  EXPECT_EQ(1, get_overflow);\n#endif\n}\n\n/**\n * Tests UDP listener for actual destination and data.\n */\nTEST_P(UdpListenerImplTest, UseActualDstUdp) {\n  // We send 2 packets\n  const std::string first(\"first\");\n  client_.write(first, *send_to_addr_);\n  const std::string second(\"second\");\n  client_.write(second, *send_to_addr_);\n\n  EXPECT_CALL(listener_callbacks_, onReadReady());\n  EXPECT_CALL(listener_callbacks_, onData(_))\n      .WillOnce(Invoke([&](const UdpRecvData& data) -> void {\n        validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u);\n        EXPECT_EQ(data.buffer_->toString(), first);\n      }))\n      .WillOnce(Invoke([&](const UdpRecvData& data) -> void {\n        validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u);\n        EXPECT_EQ(data.buffer_->toString(), second);\n\n        dispatcher_->exit();\n      }));\n\n  EXPECT_CALL(listener_callbacks_, onWriteReady(_))\n      .WillRepeatedly(Invoke([&](const Socket& socket) {\n        EXPECT_EQ(&socket.ioHandle(), &server_socket_->ioHandle());\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n/**\n * Tests UDP listener for read and write callbacks with actual data.\n */\nTEST_P(UdpListenerImplTest, UdpEcho) {\n  // We send 17 packets and expect it to echo.\n  absl::FixedArray<std::string> client_data({\"first\", \"second\", \"third\", \"forth\", \"fifth\", \"sixth\",\n                                             \"seventh\", \"eighth\", \"ninth\", \"tenth\", \"eleventh\",\n                                             \"twelveth\", \"thirteenth\", \"fourteenth\", \"fifteenth\",\n                                             \"sixteenth\", \"seventeenth\"});\n  for (const auto& i : client_data) {\n    client_.write(i, *send_to_addr_);\n  }\n\n  // For unit test purposes, we assume that the data was received in order.\n  Address::InstanceConstSharedPtr test_peer_address;\n\n  std::vector<std::string> server_received_data;\n\n  EXPECT_CALL(listener_callbacks_, onReadReady());\n  EXPECT_CALL(listener_callbacks_, onData(_))\n      .WillOnce(Invoke([&](const UdpRecvData& data) -> void {\n        validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u);\n\n        test_peer_address = data.addresses_.peer_;\n\n        const std::string data_str = data.buffer_->toString();\n        EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]);\n\n        server_received_data.push_back(data_str);\n      }))\n      .WillRepeatedly(Invoke([&](const UdpRecvData& data) -> void {\n        validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u);\n\n        const std::string data_str = data.buffer_->toString();\n        EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]);\n\n        server_received_data.push_back(data_str);\n      }));\n\n  EXPECT_CALL(listener_callbacks_, onWriteReady(_)).WillOnce(Invoke([&](const Socket& socket) {\n    EXPECT_EQ(&socket.ioHandle(), &server_socket_->ioHandle());\n    ASSERT_NE(test_peer_address, nullptr);\n\n    for (const auto& data : server_received_data) {\n      const std::string::size_type data_size = data.length() + 1;\n      uint64_t total_sent = 0;\n      const void* void_data = static_cast<const void*>(data.c_str() + total_sent);\n      Buffer::RawSlice slice{const_cast<void*>(void_data), data_size - total_sent};\n\n      Api::IoCallUint64Result send_rc = Api::ioCallUint64ResultNoError();\n      do {\n        send_rc = Network::Utility::writeToSocket(const_cast<Socket*>(&socket)->ioHandle(), &slice,\n                                                  1, nullptr, *test_peer_address);\n\n        if (send_rc.ok()) {\n          total_sent += send_rc.rc_;\n          if (total_sent >= data_size) {\n            break;\n          }\n        } else if (send_rc.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) {\n          break;\n        }\n      } while (((send_rc.rc_ == 0) &&\n                (send_rc.err_->getErrorCode() == Api::IoError::IoErrorCode::Again)) ||\n               (total_sent < data_size));\n\n      EXPECT_EQ(total_sent, data_size);\n    }\n\n    server_received_data.clear();\n    dispatcher_->exit();\n  }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n/**\n * Tests UDP listener's `enable` and `disable` APIs.\n */\nTEST_P(UdpListenerImplTest, UdpListenerEnableDisable) {\n  auto const* server_ip = server_socket_->localAddress()->ip();\n  ASSERT_NE(server_ip, nullptr);\n\n  // We first disable the listener and then send two packets.\n  // - With the listener disabled, we expect that none of the callbacks will be\n  // called.\n  // - When the listener is enabled back, we expect the callbacks to be called\n  listener_->disable();\n  const std::string first(\"first\");\n  client_.write(first, *send_to_addr_);\n  const std::string second(\"second\");\n  client_.write(second, *send_to_addr_);\n\n  EXPECT_CALL(listener_callbacks_, onReadReady()).Times(0);\n  EXPECT_CALL(listener_callbacks_, onData(_)).Times(0);\n\n  EXPECT_CALL(listener_callbacks_, onWriteReady(_)).Times(0);\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  listener_->enable();\n\n  EXPECT_CALL(listener_callbacks_, onReadReady());\n  EXPECT_CALL(listener_callbacks_, onData(_))\n      .Times(2)\n      .WillOnce(Return())\n      .WillOnce(Invoke([&](const UdpRecvData& data) -> void {\n        validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u);\n\n        EXPECT_EQ(data.buffer_->toString(), second);\n\n        dispatcher_->exit();\n      }));\n\n  EXPECT_CALL(listener_callbacks_, onWriteReady(_))\n      .WillRepeatedly(Invoke([&](const Socket& socket) {\n        EXPECT_EQ(&socket.ioHandle(), &server_socket_->ioHandle());\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n/**\n * Tests UDP listener's error callback.\n */\nTEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) {\n  auto const* server_ip = server_socket_->localAddress()->ip();\n  ASSERT_NE(server_ip, nullptr);\n\n  // When the `receive` system call returns an error, we expect the `onReceiveError`\n  // callback called with `SyscallError` parameter.\n  const std::string first(\"first\");\n  client_.write(first, *send_to_addr_);\n\n  EXPECT_CALL(listener_callbacks_, onData(_)).Times(0);\n\n  EXPECT_CALL(listener_callbacks_, onWriteReady(_)).WillOnce(Invoke([&](const Socket& socket) {\n    EXPECT_EQ(&socket.ioHandle(), &server_socket_->ioHandle());\n  }));\n\n  EXPECT_CALL(listener_callbacks_, onReadReady());\n  EXPECT_CALL(listener_callbacks_, onReceiveError(_))\n      .WillOnce(Invoke([&](Api::IoError::IoErrorCode err) -> void {\n        ASSERT_EQ(Api::IoError::IoErrorCode::NoSupport, err);\n\n        dispatcher_->exit();\n      }));\n  // Inject mocked OsSysCalls implementation to mock a read failure.\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n  EXPECT_CALL(os_sys_calls, supportsUdpGro());\n  EXPECT_CALL(os_sys_calls, supportsMmsg());\n  EXPECT_CALL(os_sys_calls, recvmsg(_, _, _))\n      .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP}));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n/**\n * Tests UDP listener for sending datagrams to destination.\n *  1. Setup a udp listener and client socket\n *  2. Send the data from the udp listener to the client socket and validate the contents and source\n * address.\n */\nTEST_P(UdpListenerImplTest, SendData) {\n  EXPECT_FALSE(udp_packet_writer_->isBatchMode());\n  const std::string payload(\"hello world\");\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  buffer->add(payload);\n\n  Address::InstanceConstSharedPtr send_from_addr = getNonDefaultSourceAddress();\n\n  UdpSendData send_data{send_from_addr->ip(), *client_.localAddress(), *buffer};\n\n  auto send_result = listener_->send(send_data);\n\n  EXPECT_TRUE(send_result.ok()) << \"send() failed : \" << send_result.err_->getErrorDetails();\n\n  const uint64_t bytes_to_read = payload.length();\n  UdpRecvData data;\n  client_.recv(data);\n  EXPECT_EQ(bytes_to_read, data.buffer_->length());\n  EXPECT_EQ(send_from_addr->asString(), data.addresses_.peer_->asString());\n  EXPECT_EQ(data.buffer_->toString(), payload);\n\n  // Verify External Flush is a No-op\n  auto flush_result = udp_packet_writer_->flush();\n  EXPECT_TRUE(flush_result.ok());\n  EXPECT_EQ(0, flush_result.rc_);\n}\n\n/**\n * The send fails because the server_socket is created with bind=false.\n */\nTEST_P(UdpListenerImplTest, SendDataError) {\n  const std::string payload(\"hello world\");\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  buffer->add(payload);\n  // send data to itself\n  UdpSendData send_data{send_to_addr_->ip(), *server_socket_->localAddress(), *buffer};\n\n  // Inject mocked OsSysCalls implementation to mock a write failure.\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  EXPECT_CALL(os_sys_calls, sendmsg(_, _, _))\n      .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN}));\n  auto send_result = listener_->send(send_data);\n  EXPECT_FALSE(send_result.ok());\n  EXPECT_EQ(send_result.err_->getErrorCode(), Api::IoError::IoErrorCode::Again);\n  // Failed write shouldn't drain the data.\n  EXPECT_EQ(payload.length(), buffer->length());\n  // Verify the writer is set to blocked\n  EXPECT_TRUE(udp_packet_writer_->isWriteBlocked());\n\n  // Reset write_blocked status\n  udp_packet_writer_->setWritable();\n  EXPECT_FALSE(udp_packet_writer_->isWriteBlocked());\n\n  EXPECT_CALL(os_sys_calls, sendmsg(_, _, _))\n      .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP}));\n  send_result = listener_->send(send_data);\n  EXPECT_FALSE(send_result.ok());\n  EXPECT_EQ(send_result.err_->getErrorCode(), Api::IoError::IoErrorCode::NoSupport);\n  // Failed write shouldn't drain the data.\n  EXPECT_EQ(payload.length(), buffer->length());\n\n  ON_CALL(os_sys_calls, sendmsg(_, _, _))\n      .WillByDefault(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_INVAL}));\n  // EINVAL should cause RELEASE_ASSERT.\n  EXPECT_DEATH(listener_->send(send_data), \"Invalid argument passed in\");\n}\n\n/**\n * Test that multiple stacked packets of the same size are properly segmented\n * when UDP GRO is enabled on the platform.\n */\n#ifdef UDP_GRO\nTEST_P(UdpListenerImplTest, UdpGroBasic) {\n  // We send 4 packets (3 of equal length and 1 as a trail), which are concatenated together by\n  // kernel supporting udp gro. Verify the concatenated packet is transformed back into individual\n  // packets\n  absl::FixedArray<std::string> client_data({\"Equal!!!\", \"Length!!\", \"Messages\", \"trail\"});\n\n  for (const auto& i : client_data) {\n    client_.write(i, *send_to_addr_);\n  }\n\n  // The concatenated payload received from kernel supporting udp_gro\n  std::string stacked_message = absl::StrJoin(client_data, \"\");\n\n  // Mock OsSysCalls to mimic kernel behavior for packet concatenation\n  // based on udp_gro. supportsUdpGro should return true and recvmsg should\n  // return the concatenated payload with the gso_size set appropriately.\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n  EXPECT_CALL(os_sys_calls, supportsUdpGro).WillRepeatedly(Return(true));\n  EXPECT_CALL(os_sys_calls, supportsMmsg).Times(0);\n\n  EXPECT_CALL(os_sys_calls, recvmsg(_, _, _))\n      .WillOnce(Invoke([&](os_fd_t, msghdr* msg, int) {\n        // Set msg_name and msg_namelen\n        if (client_.localAddress()->ip()->version() == Address::IpVersion::v4) {\n          sockaddr_storage ss;\n          auto ipv4_addr = reinterpret_cast<sockaddr_in*>(&ss);\n          memset(ipv4_addr, 0, sizeof(sockaddr_in));\n          ipv4_addr->sin_family = AF_INET;\n          ipv4_addr->sin_addr.s_addr = htonl(INADDR_LOOPBACK);\n          ipv4_addr->sin_port = client_.localAddress()->ip()->port();\n          msg->msg_namelen = sizeof(sockaddr_in);\n          *reinterpret_cast<sockaddr_in*>(msg->msg_name) = *ipv4_addr;\n        } else if (client_.localAddress()->ip()->version() == Address::IpVersion::v6) {\n          sockaddr_storage ss;\n          auto ipv6_addr = reinterpret_cast<sockaddr_in6*>(&ss);\n          memset(ipv6_addr, 0, sizeof(sockaddr_in6));\n          ipv6_addr->sin6_family = AF_INET6;\n          ipv6_addr->sin6_addr = in6addr_loopback;\n          ipv6_addr->sin6_port = client_.localAddress()->ip()->port();\n          *reinterpret_cast<sockaddr_in6*>(msg->msg_name) = *ipv6_addr;\n          msg->msg_namelen = sizeof(sockaddr_in6);\n        }\n\n        // Set msg_iovec\n        EXPECT_EQ(msg->msg_iovlen, 1);\n        memcpy(msg->msg_iov[0].iov_base, stacked_message.data(), stacked_message.length());\n        msg->msg_iov[0].iov_len = stacked_message.length();\n\n        // Set control headers\n        memset(msg->msg_control, 0, msg->msg_controllen);\n        cmsghdr* cmsg = CMSG_FIRSTHDR(msg);\n        if (send_to_addr_->ip()->version() == Address::IpVersion::v4) {\n          cmsg->cmsg_level = IPPROTO_IP;\n#ifndef IP_RECVDSTADDR\n          cmsg->cmsg_type = IP_PKTINFO;\n          cmsg->cmsg_len = CMSG_LEN(sizeof(in_pktinfo));\n          reinterpret_cast<in_pktinfo*>(CMSG_DATA(cmsg))->ipi_addr.s_addr =\n              send_to_addr_->ip()->ipv4()->address();\n#else\n          cmsg.cmsg_type = IP_RECVDSTADDR;\n          cmsg->cmsg_len = CMSG_LEN(sizeof(in_addr));\n          *reinterpret_cast<in_addr*>(CMSG_DATA(cmsg)) = send_to_addr_->ip()->ipv4()->address();\n#endif\n        } else if (send_to_addr_->ip()->version() == Address::IpVersion::v6) {\n          cmsg->cmsg_len = CMSG_LEN(sizeof(in6_pktinfo));\n          cmsg->cmsg_level = IPPROTO_IPV6;\n          cmsg->cmsg_type = IPV6_PKTINFO;\n          auto pktinfo = reinterpret_cast<in6_pktinfo*>(CMSG_DATA(cmsg));\n          pktinfo->ipi6_ifindex = 0;\n          *(reinterpret_cast<absl::uint128*>(pktinfo->ipi6_addr.s6_addr)) =\n              send_to_addr_->ip()->ipv6()->address();\n        }\n\n        // Set gso_size\n        cmsg = CMSG_NXTHDR(msg, cmsg);\n        cmsg->cmsg_level = SOL_UDP;\n        cmsg->cmsg_type = UDP_GRO;\n        cmsg->cmsg_len = CMSG_LEN(sizeof(uint16_t));\n        const uint16_t gso_size = 8;\n        *reinterpret_cast<uint16_t*>(CMSG_DATA(cmsg)) = gso_size;\n\n#ifdef SO_RXQ_OVFL\n        // Set SO_RXQ_OVFL\n        cmsg = CMSG_NXTHDR(msg, cmsg);\n        EXPECT_NE(cmsg, nullptr);\n        cmsg->cmsg_level = SOL_SOCKET;\n        cmsg->cmsg_type = SO_RXQ_OVFL;\n        cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));\n        const uint32_t overflow = 0;\n        *reinterpret_cast<uint32_t*>(CMSG_DATA(cmsg)) = overflow;\n#endif\n        return Api::SysCallSizeResult{static_cast<long>(stacked_message.length()), 0};\n      }))\n      .WillRepeatedly(Return(Api::SysCallSizeResult{-1, EAGAIN}));\n\n  EXPECT_CALL(listener_callbacks_, onReadReady());\n  EXPECT_CALL(listener_callbacks_, onData(_))\n      .WillOnce(Invoke([&](const UdpRecvData& data) -> void {\n        validateRecvCallbackParams(data, client_data.size());\n\n        const std::string data_str = data.buffer_->toString();\n        EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]);\n      }))\n      .WillRepeatedly(Invoke([&](const UdpRecvData& data) -> void {\n        validateRecvCallbackParams(data, client_data.size());\n\n        const std::string data_str = data.buffer_->toString();\n        EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]);\n      }));\n\n  EXPECT_CALL(listener_callbacks_, onWriteReady(_)).WillOnce(Invoke([&](const Socket& socket) {\n    EXPECT_EQ(&socket.ioHandle(), &server_socket_->ioHandle());\n    dispatcher_->exit();\n  }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n#endif\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/udp_listener_impl_test_base.h",
    "content": "#include <cstddef>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/network/udp_packet_writer_handler_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/common/network/listener_impl_test_base.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/time/time.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass UdpListenerImplTestBase : public ListenerImplTestBase {\npublic:\n  UdpListenerImplTestBase()\n      : server_socket_(createServerSocket(true)), send_to_addr_(getServerLoopbackAddress()) {\n    time_system_.advanceTimeWait(std::chrono::milliseconds(100));\n  }\n\nprotected:\n  Address::Instance* getServerLoopbackAddress() {\n    if (version_ == Address::IpVersion::v4) {\n      return new Address::Ipv4Instance(Network::Test::getLoopbackAddressString(version_),\n                                       server_socket_->localAddress()->ip()->port());\n    }\n    return new Address::Ipv6Instance(Network::Test::getLoopbackAddressString(version_),\n                                     server_socket_->localAddress()->ip()->port());\n  }\n\n  SocketSharedPtr createServerSocket(bool bind) {\n    // Set IP_FREEBIND to allow sendmsg to send with non-local IPv6 source address.\n    return std::make_shared<UdpListenSocket>(Network::Test::getCanonicalLoopbackAddress(version_),\n#ifdef IP_FREEBIND\n                                             SocketOptionFactory::buildIpFreebindOptions(),\n#else\n                                             nullptr,\n#endif\n                                             bind);\n  }\n\n  Address::InstanceConstSharedPtr getNonDefaultSourceAddress() {\n    // Use a self address that is unlikely to be picked by source address discovery\n    // algorithm if not specified in recvmsg/recvmmsg. Port is not taken into\n    // consideration.\n    Address::InstanceConstSharedPtr send_from_addr;\n    if (version_ == Address::IpVersion::v4) {\n      // Linux kernel regards any 127.x.x.x as local address. But Mac OS doesn't.\n      send_from_addr = std::make_shared<Address::Ipv4Instance>(\n          \"127.0.0.1\", server_socket_->localAddress()->ip()->port());\n    } else {\n      // Only use non-local v6 address if IP_FREEBIND is supported. Otherwise use\n      // ::1 to avoid EINVAL error. Unfortunately this can't verify that sendmsg with\n      // customized source address is doing the work because kernel also picks ::1\n      // if it's not specified in cmsghdr.\n      send_from_addr = std::make_shared<Address::Ipv6Instance>(\n#ifdef IP_FREEBIND\n          \"::9\",\n#else\n          \"::1\",\n#endif\n          server_socket_->localAddress()->ip()->port());\n    }\n    return send_from_addr;\n  }\n\n  // Validates receive data, source/destination address and received time.\n  void validateRecvCallbackParams(const UdpRecvData& data, size_t num_packet_per_recv) {\n    ASSERT_NE(data.addresses_.local_, nullptr);\n\n    ASSERT_NE(data.addresses_.peer_, nullptr);\n    ASSERT_NE(data.addresses_.peer_->ip(), nullptr);\n\n    EXPECT_EQ(data.addresses_.local_->asString(), send_to_addr_->asString());\n\n    EXPECT_EQ(data.addresses_.peer_->ip()->addressAsString(),\n              client_.localAddress()->ip()->addressAsString());\n\n    EXPECT_EQ(*data.addresses_.local_, *send_to_addr_);\n\n    EXPECT_EQ(time_system_.monotonicTime(),\n              data.receive_time_ +\n                  std::chrono::milliseconds(\n                      (num_packets_received_by_listener_ % num_packet_per_recv) * 100));\n    // Advance time so that next onData() should have different received time.\n    time_system_.advanceTimeWait(std::chrono::milliseconds(100));\n    ++num_packets_received_by_listener_;\n  }\n\n  SocketSharedPtr server_socket_;\n  Network::Test::UdpSyncPeer client_{GetParam()};\n  Address::InstanceConstSharedPtr send_to_addr_;\n  NiceMock<MockUdpListenerCallbacks> listener_callbacks_;\n  NiceMock<MockListenerConfig> listener_config_;\n  std::unique_ptr<UdpListenerImpl> listener_;\n  size_t num_packets_received_by_listener_{0};\n  Network::UdpPacketWriterPtr udp_packet_writer_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/utility_corpus/test",
    "content": "127.0.0.1:0"
  },
  {
    "path": "test/common/network/utility_fuzz_test.cc",
    "content": "#include \"envoy/config/core/v3/address.pb.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  const std::string string_buffer(reinterpret_cast<const char*>(buf), len);\n\n  try {\n    Network::Utility::parseInternetAddress(string_buffer);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n\n  try {\n    Network::Utility::parseInternetAddressAndPort(string_buffer);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n\n  try {\n    std::list<Network::PortRange> port_range_list;\n    Network::Utility::parsePortRangeList(string_buffer, port_range_list);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n\n  try {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_pipe()->set_path(string_buffer);\n    Network::Utility::protobufAddressToAddress(proto_address);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n\n  try {\n    FuzzedDataProvider provider(buf, len);\n    envoy::config::core::v3::Address proto_address;\n    const auto port_value = provider.ConsumeIntegral<uint16_t>();\n    const std::string address_value = provider.ConsumeRemainingBytesAsString();\n    proto_address.mutable_socket_address()->set_address(address_value);\n    proto_address.mutable_socket_address()->set_port_value(port_value);\n    Network::Utility::protobufAddressToAddress(proto_address);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n\n  try {\n    envoy::config::core::v3::Address proto_address;\n    Network::Address::Ipv4Instance address(string_buffer);\n    Network::Utility::addressToProtobufAddress(address, proto_address);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n\n  try {\n    envoy::config::core::v3::Address proto_address;\n    Network::Address::PipeInstance address(string_buffer);\n    Network::Utility::addressToProtobufAddress(address, proto_address);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/network/utility_test.cc",
    "content": "#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace {\n\nTEST(NetworkUtility, Url) {\n  EXPECT_EQ(\"foo\", Utility::hostFromTcpUrl(\"tcp://foo:1234\"));\n  EXPECT_EQ(1234U, Utility::portFromTcpUrl(\"tcp://foo:1234\"));\n  EXPECT_THROW(Utility::hostFromTcpUrl(\"bogus://foo:1234\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromTcpUrl(\"bogus://foo:1234\"), EnvoyException);\n  EXPECT_THROW(Utility::hostFromTcpUrl(\"abc://foo\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromTcpUrl(\"abc://foo\"), EnvoyException);\n  EXPECT_THROW(Utility::hostFromTcpUrl(\"tcp://foo\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromTcpUrl(\"tcp://foo\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromTcpUrl(\"tcp://foo:bar\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromTcpUrl(\"tcp://https://foo:1234\"), EnvoyException);\n  EXPECT_THROW(Utility::hostFromTcpUrl(\"\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromTcpUrl(\"tcp://foo:999999999999\"), EnvoyException);\n}\n\nTEST(NetworkUtility, udpUrl) {\n  EXPECT_EQ(\"foo\", Utility::hostFromUdpUrl(\"udp://foo:1234\"));\n  EXPECT_EQ(1234U, Utility::portFromUdpUrl(\"udp://foo:1234\"));\n  EXPECT_THROW(Utility::hostFromUdpUrl(\"bogus://foo:1234\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromUdpUrl(\"bogus://foo:1234\"), EnvoyException);\n  EXPECT_THROW(Utility::hostFromUdpUrl(\"tcp://foo\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromUdpUrl(\"tcp://foo:1234\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromUdpUrl(\"udp://https://foo:1234\"), EnvoyException);\n  EXPECT_THROW(Utility::hostFromUdpUrl(\"\"), EnvoyException);\n  EXPECT_THROW(Utility::portFromUdpUrl(\"udp://foo:999999999999\"), EnvoyException);\n}\n\nTEST(NetworkUtility, resolveUrl) {\n  EXPECT_THROW(Utility::resolveUrl(\"foo\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"abc://foo\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://1.2.3.4:1234/\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://127.0.0.1:8001/\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://127.0.0.1:0/foo\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://127.0.0.1:\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://192.168.3.3\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://192.168.3.3.3:0\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://192.168.3:0\"), EnvoyException);\n\n  EXPECT_THROW(Utility::resolveUrl(\"udp://1.2.3.4:1234/\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://127.0.0.1:8001/\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://127.0.0.1:0/foo\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://127.0.0.1:\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://192.168.3.3\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://192.168.3.3.3:0\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://192.168.3:0\"), EnvoyException);\n\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://[::1]\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://[:::1]:1\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"tcp://foo:0\"), EnvoyException);\n\n  EXPECT_THROW(Utility::resolveUrl(\"udp://[::1]\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://[:::1]:1\"), EnvoyException);\n  EXPECT_THROW(Utility::resolveUrl(\"udp://foo:0\"), EnvoyException);\n\n  EXPECT_EQ(\"\", Utility::resolveUrl(\"unix://\")->asString());\n  EXPECT_EQ(\"foo\", Utility::resolveUrl(\"unix://foo\")->asString());\n  EXPECT_EQ(\"tmp\", Utility::resolveUrl(\"unix://tmp\")->asString());\n  EXPECT_EQ(\"tmp/server\", Utility::resolveUrl(\"unix://tmp/server\")->asString());\n\n  EXPECT_EQ(\"1.2.3.4:1234\", Utility::resolveUrl(\"tcp://1.2.3.4:1234\")->asString());\n  EXPECT_EQ(\"0.0.0.0:0\", Utility::resolveUrl(\"tcp://0.0.0.0:0\")->asString());\n  EXPECT_EQ(\"127.0.0.1:0\", Utility::resolveUrl(\"tcp://127.0.0.1:0\")->asString());\n\n  EXPECT_EQ(\"[::1]:1\", Utility::resolveUrl(\"tcp://[::1]:1\")->asString());\n  EXPECT_EQ(\"[::]:0\", Utility::resolveUrl(\"tcp://[::]:0\")->asString());\n  EXPECT_EQ(\"[1::2:3]:4\", Utility::resolveUrl(\"tcp://[1::2:3]:4\")->asString());\n  EXPECT_EQ(\"[a::1]:0\", Utility::resolveUrl(\"tcp://[a::1]:0\")->asString());\n  EXPECT_EQ(\"[a:b:c:d::]:0\", Utility::resolveUrl(\"tcp://[a:b:c:d::]:0\")->asString());\n\n  EXPECT_EQ(\"1.2.3.4:1234\", Utility::resolveUrl(\"udp://1.2.3.4:1234\")->asString());\n  EXPECT_EQ(\"0.0.0.0:0\", Utility::resolveUrl(\"udp://0.0.0.0:0\")->asString());\n  EXPECT_EQ(\"127.0.0.1:0\", Utility::resolveUrl(\"udp://127.0.0.1:0\")->asString());\n\n  EXPECT_EQ(\"[::1]:1\", Utility::resolveUrl(\"udp://[::1]:1\")->asString());\n  EXPECT_EQ(\"[::]:0\", Utility::resolveUrl(\"udp://[::]:0\")->asString());\n  EXPECT_EQ(\"[1::2:3]:4\", Utility::resolveUrl(\"udp://[1::2:3]:4\")->asString());\n  EXPECT_EQ(\"[a::1]:0\", Utility::resolveUrl(\"udp://[a::1]:0\")->asString());\n  EXPECT_EQ(\"[a:b:c:d::]:0\", Utility::resolveUrl(\"udp://[a:b:c:d::]:0\")->asString());\n}\n\nTEST(NetworkUtility, ParseInternetAddress) {\n  EXPECT_THROW(Utility::parseInternetAddress(\"\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"1.2.3\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"1.2.3.4.5\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"1.2.3.256\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"foo\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"0:0:0:0\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"fffff::\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"/foo\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"[::]\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddress(\"[::1]:1\"), EnvoyException);\n\n  EXPECT_EQ(\"1.2.3.4:0\", Utility::parseInternetAddress(\"1.2.3.4\")->asString());\n  EXPECT_EQ(\"0.0.0.0:0\", Utility::parseInternetAddress(\"0.0.0.0\")->asString());\n  EXPECT_EQ(\"127.0.0.1:0\", Utility::parseInternetAddress(\"127.0.0.1\")->asString());\n\n  EXPECT_EQ(\"[::1]:0\", Utility::parseInternetAddress(\"::1\")->asString());\n  EXPECT_EQ(\"[::]:0\", Utility::parseInternetAddress(\"::\")->asString());\n  EXPECT_EQ(\"[1::2:3]:0\", Utility::parseInternetAddress(\"1::2:3\")->asString());\n  EXPECT_EQ(\"[a::1]:0\", Utility::parseInternetAddress(\"a::1\")->asString());\n  EXPECT_EQ(\"[a:b:c:d::]:0\", Utility::parseInternetAddress(\"a:b:c:d::\")->asString());\n}\n\nTEST(NetworkUtility, ParseInternetAddressAndPort) {\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3.4\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3.4:\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3.4::1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3.4:-1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\":1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\" :1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3:1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3.4]:2\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3.4:65536\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"1.2.3.4:8008/\"), EnvoyException);\n\n  EXPECT_EQ(\"0.0.0.0:0\", Utility::parseInternetAddressAndPort(\"0.0.0.0:0\")->asString());\n  EXPECT_EQ(\"255.255.255.255:65535\",\n            Utility::parseInternetAddressAndPort(\"255.255.255.255:65535\")->asString());\n  EXPECT_EQ(\"127.0.0.1:0\", Utility::parseInternetAddressAndPort(\"127.0.0.1:0\")->asString());\n\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"::1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"::\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[[::]]:1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[::]:1]:2\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"]:[::1]:2\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[1.2.3.4:0\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[1.2.3.4]:0\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[::]:\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[::]:-1\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[::]:bogus\"), EnvoyException);\n  EXPECT_THROW(Utility::parseInternetAddressAndPort(\"[1::1]:65536\"), EnvoyException);\n\n  EXPECT_EQ(\"[::]:0\", Utility::parseInternetAddressAndPort(\"[::]:0\")->asString());\n  EXPECT_EQ(\"[1::1]:65535\", Utility::parseInternetAddressAndPort(\"[1::1]:65535\")->asString());\n  EXPECT_EQ(\"[::1]:0\", Utility::parseInternetAddressAndPort(\"[::1]:0\")->asString());\n}\n\nclass NetworkUtilityGetLocalAddress : public testing::TestWithParam<Address::IpVersion> {};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, NetworkUtilityGetLocalAddress,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(NetworkUtilityGetLocalAddress, GetLocalAddress) {\n  EXPECT_NE(nullptr, Utility::getLocalAddress(GetParam()));\n}\n\nTEST(NetworkUtility, GetOriginalDst) {\n  testing::NiceMock<Network::MockConnectionSocket> socket;\n#ifdef SOL_IP\n  EXPECT_CALL(socket, ipVersion()).WillOnce(testing::Return(absl::nullopt));\n#endif\n  EXPECT_EQ(nullptr, Utility::getOriginalDst(socket));\n}\n\nTEST(NetworkUtility, LocalConnection) {\n  Network::Address::InstanceConstSharedPtr local_addr;\n  Network::Address::InstanceConstSharedPtr remote_addr;\n\n  testing::NiceMock<Network::MockConnectionSocket> socket;\n\n  EXPECT_CALL(socket, localAddress()).WillRepeatedly(testing::ReturnRef(local_addr));\n  EXPECT_CALL(socket, remoteAddress()).WillRepeatedly(testing::ReturnRef(remote_addr));\n\n  local_addr = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\");\n  remote_addr = std::make_shared<Network::Address::PipeInstance>(\"/pipe/path\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::PipeInstance>(\"/pipe/path\");\n  remote_addr = std::make_shared<Network::Address::PipeInstance>(\"/pipe/path\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\");\n  remote_addr = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.2\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::Ipv4Instance>(\"4.4.4.4\");\n  remote_addr = std::make_shared<Network::Address::Ipv4Instance>(\"8.8.8.8\");\n  EXPECT_FALSE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::Ipv4Instance>(\"4.4.4.4\");\n  remote_addr = std::make_shared<Network::Address::Ipv4Instance>(\"4.4.4.4\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::Ipv4Instance>(\"4.4.4.4\", 1234);\n  remote_addr = std::make_shared<Network::Address::Ipv4Instance>(\"4.4.4.4\", 4321);\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::1\");\n  remote_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::1\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  local_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::2\");\n  remote_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::1\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  remote_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::3\");\n  EXPECT_FALSE(Utility::isSameIpOrLoopback(socket));\n\n  remote_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::2\");\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  remote_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::2\", 4321);\n  local_addr = std::make_shared<Network::Address::Ipv6Instance>(\"::2\", 1234);\n  EXPECT_TRUE(Utility::isSameIpOrLoopback(socket));\n\n  remote_addr = std::make_shared<Network::Address::Ipv6Instance>(\"fd00::\");\n  EXPECT_FALSE(Utility::isSameIpOrLoopback(socket));\n}\n\nTEST(NetworkUtility, InternalAddress) {\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv4Instance(\"127.0.0.1\")));\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv4Instance(\"10.0.0.1\")));\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv4Instance(\"192.168.0.0\")));\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv4Instance(\"172.16.0.0\")));\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv4Instance(\"172.30.2.1\")));\n  EXPECT_FALSE(Utility::isInternalAddress(Address::Ipv4Instance(\"192.167.0.0\")));\n  EXPECT_FALSE(Utility::isInternalAddress(Address::Ipv4Instance(\"172.32.0.0\")));\n  EXPECT_FALSE(Utility::isInternalAddress(Address::Ipv4Instance(\"11.0.0.1\")));\n\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv6Instance(\"fd00::\")));\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv6Instance(\"::1\")));\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv6Instance(\"fdff::\")));\n  EXPECT_TRUE(Utility::isInternalAddress(Address::Ipv6Instance(\"fd01::\")));\n  EXPECT_TRUE(\n      Utility::isInternalAddress(Address::Ipv6Instance(\"fd12:3456:7890:1234:5678:9012:3456:7890\")));\n  EXPECT_FALSE(Utility::isInternalAddress(Address::Ipv6Instance(\"fd::\")));\n  EXPECT_FALSE(Utility::isInternalAddress(Address::Ipv6Instance(\"::\")));\n  EXPECT_FALSE(Utility::isInternalAddress(Address::Ipv6Instance(\"fc00::\")));\n  EXPECT_FALSE(Utility::isInternalAddress(Address::Ipv6Instance(\"fe00::\")));\n\n  EXPECT_FALSE(Utility::isInternalAddress(Address::PipeInstance(\"/hello\")));\n}\n\nTEST(NetworkUtility, LoopbackAddress) {\n  {\n    Address::Ipv4Instance address(\"127.0.0.1\");\n    EXPECT_TRUE(Utility::isLoopbackAddress(address));\n  }\n  {\n    Address::Ipv4Instance address(\"10.0.0.1\");\n    EXPECT_FALSE(Utility::isLoopbackAddress(address));\n  }\n  {\n    Address::PipeInstance address(\"/foo\");\n    EXPECT_FALSE(Utility::isLoopbackAddress(address));\n  }\n  {\n    Address::Ipv6Instance address(\"::1\");\n    EXPECT_TRUE(Utility::isLoopbackAddress(address));\n  }\n  {\n    Address::Ipv6Instance address(\"::\");\n    EXPECT_FALSE(Utility::isLoopbackAddress(address));\n  }\n  EXPECT_EQ(\"127.0.0.1:0\", Utility::getCanonicalIpv4LoopbackAddress()->asString());\n  EXPECT_EQ(\"[::1]:0\", Utility::getIpv6LoopbackAddress()->asString());\n}\n\nTEST(NetworkUtility, AnyAddress) {\n  {\n    Address::InstanceConstSharedPtr any = Utility::getIpv4AnyAddress();\n    ASSERT_NE(any, nullptr);\n    EXPECT_EQ(any->type(), Address::Type::Ip);\n    EXPECT_EQ(any->ip()->version(), Address::IpVersion::v4);\n    EXPECT_EQ(any->asString(), \"0.0.0.0:0\");\n    EXPECT_EQ(any, Utility::getIpv4AnyAddress());\n  }\n  {\n    Address::InstanceConstSharedPtr any = Utility::getIpv6AnyAddress();\n    ASSERT_NE(any, nullptr);\n    EXPECT_EQ(any->type(), Address::Type::Ip);\n    EXPECT_EQ(any->ip()->version(), Address::IpVersion::v6);\n    EXPECT_EQ(any->asString(), \"[::]:0\");\n    EXPECT_EQ(any, Utility::getIpv6AnyAddress());\n  }\n}\n\nTEST(NetworkUtility, ParseProtobufAddress) {\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_socket_address()->set_address(\"127.0.0.1\");\n    proto_address.mutable_socket_address()->set_port_value(1234);\n    EXPECT_EQ(\"127.0.0.1:1234\", Utility::protobufAddressToAddress(proto_address)->asString());\n  }\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_socket_address()->set_address(\"::1\");\n    proto_address.mutable_socket_address()->set_port_value(1234);\n    EXPECT_EQ(\"[::1]:1234\", Utility::protobufAddressToAddress(proto_address)->asString());\n  }\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_pipe()->set_path(\"/tmp/unix-socket\");\n    EXPECT_EQ(\"/tmp/unix-socket\", Utility::protobufAddressToAddress(proto_address)->asString());\n  }\n#if defined(__linux__)\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_pipe()->set_path(\"@/tmp/abstract-unix-socket\");\n    EXPECT_EQ(\"@/tmp/abstract-unix-socket\",\n              Utility::protobufAddressToAddress(proto_address)->asString());\n  }\n#endif\n}\n\nTEST(NetworkUtility, AddressToProtobufAddress) {\n  {\n    envoy::config::core::v3::Address proto_address;\n    Address::Ipv4Instance address(\"127.0.0.1\");\n    Utility::addressToProtobufAddress(address, proto_address);\n    EXPECT_EQ(true, proto_address.has_socket_address());\n    EXPECT_EQ(\"127.0.0.1\", proto_address.socket_address().address());\n    EXPECT_EQ(0, proto_address.socket_address().port_value());\n  }\n  {\n    envoy::config::core::v3::Address proto_address;\n    Address::PipeInstance address(\"/hello\");\n    Utility::addressToProtobufAddress(address, proto_address);\n    EXPECT_EQ(true, proto_address.has_pipe());\n    EXPECT_EQ(\"/hello\", proto_address.pipe().path());\n  }\n}\n\nTEST(NetworkUtility, ProtobufAddressSocketType) {\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_socket_address();\n    EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address));\n  }\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_socket_address()->set_protocol(\n        envoy::config::core::v3::SocketAddress::TCP);\n    EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address));\n  }\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_socket_address()->set_protocol(\n        envoy::config::core::v3::SocketAddress::UDP);\n    EXPECT_EQ(Socket::Type::Datagram, Utility::protobufAddressSocketType(proto_address));\n  }\n  {\n    envoy::config::core::v3::Address proto_address;\n    proto_address.mutable_pipe();\n    EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address));\n  }\n}\n\nTEST(PortRangeListTest, Errors) {\n  {\n    std::string port_range_str = \"a1\";\n    std::list<PortRange> port_range_list;\n    EXPECT_THROW(Utility::parsePortRangeList(port_range_str, port_range_list), EnvoyException);\n  }\n\n  {\n    std::string port_range_str = \"1A\";\n    std::list<PortRange> port_range_list;\n    EXPECT_THROW(Utility::parsePortRangeList(port_range_str, port_range_list), EnvoyException);\n  }\n\n  {\n    std::string port_range_str = \"1_1\";\n    std::list<PortRange> port_range_list;\n    EXPECT_THROW(Utility::parsePortRangeList(port_range_str, port_range_list), EnvoyException);\n  }\n\n  {\n    std::string port_range_str = \"1,1X1\";\n    std::list<PortRange> port_range_list;\n    EXPECT_THROW(Utility::parsePortRangeList(port_range_str, port_range_list), EnvoyException);\n  }\n\n  {\n    std::string port_range_str = \"1,1*1\";\n    std::list<PortRange> port_range_list;\n    EXPECT_THROW(Utility::parsePortRangeList(port_range_str, port_range_list), EnvoyException);\n  }\n}\n\nstatic Address::Ipv4Instance makeFromPort(uint32_t port) {\n  return Address::Ipv4Instance(\"0.0.0.0\", port);\n}\n\nTEST(PortRangeListTest, Normal) {\n  {\n    std::string port_range_str = \"1\";\n    std::list<PortRange> port_range_list;\n\n    Utility::parsePortRangeList(port_range_str, port_range_list);\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(1), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(makeFromPort(2), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(Address::PipeInstance(\"/foo\"), port_range_list));\n  }\n\n  {\n    std::string port_range_str = \"1024-2048\";\n    std::list<PortRange> port_range_list;\n\n    Utility::parsePortRangeList(port_range_str, port_range_list);\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(1024), port_range_list));\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(2048), port_range_list));\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(1536), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(makeFromPort(1023), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(makeFromPort(2049), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(makeFromPort(0), port_range_list));\n  }\n\n  {\n    std::string port_range_str = \"1,10-100,1000-10000,65535\";\n    std::list<PortRange> port_range_list;\n\n    Utility::parsePortRangeList(port_range_str, port_range_list);\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(1), port_range_list));\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(50), port_range_list));\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(5000), port_range_list));\n    EXPECT_TRUE(Utility::portInRangeList(makeFromPort(65535), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(makeFromPort(2), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(makeFromPort(200), port_range_list));\n    EXPECT_FALSE(Utility::portInRangeList(makeFromPort(20000), port_range_list));\n  }\n}\n\n// TODO(ccaraman): Support big-endian. These tests operate under the assumption that the machine\n// byte order is little-endian.\nTEST(AbslUint128, TestByteOrder) {\n  {\n    Address::Ipv6Instance address(\"::1\");\n    uint64_t high = 0x100000000000000;\n    EXPECT_EQ(absl::MakeUint128(high, 0), address.ip()->ipv6()->address());\n    EXPECT_EQ(absl::MakeUint128(high, 0),\n              Utility::Ip6htonl(Utility::Ip6ntohl(address.ip()->ipv6()->address())));\n\n    EXPECT_EQ(absl::uint128(1), Utility::Ip6ntohl(address.ip()->ipv6()->address()));\n  }\n  {\n    Address::Ipv6Instance address(\"1::\");\n    EXPECT_EQ(absl::uint128(256), address.ip()->ipv6()->address());\n    EXPECT_EQ(absl::uint128(256),\n              Utility::Ip6htonl(Utility::Ip6ntohl(address.ip()->ipv6()->address())));\n\n    uint64_t high = 0x001000000000000;\n    EXPECT_EQ(absl::MakeUint128(high, 0), Utility::Ip6ntohl(address.ip()->ipv6()->address()));\n  }\n  {\n    Address::Ipv6Instance address(\"2001:abcd:ef01:2345:6789:abcd:ef01:234\");\n    uint64_t low = 0x452301EFCDAB0120;\n    uint64_t high = 0x340201EFCDAB8967;\n    EXPECT_EQ(absl::MakeUint128(high, low), address.ip()->ipv6()->address());\n    EXPECT_EQ(absl::MakeUint128(high, low),\n              Utility::Ip6htonl(Utility::Ip6ntohl(address.ip()->ipv6()->address())));\n  }\n  {\n    Address::Ipv6Instance address(\"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\");\n    EXPECT_EQ(absl::Uint128Max(), address.ip()->ipv6()->address());\n    EXPECT_EQ(absl::Uint128Max(), Utility::Ip6ntohl(address.ip()->ipv6()->address()));\n  }\n  {\n    TestRandomGenerator rand;\n    absl::uint128 random_number = absl::MakeUint128(rand.random(), rand.random());\n    EXPECT_EQ(random_number, Utility::Ip6htonl(Utility::Ip6ntohl(random_number)));\n  }\n}\n\n} // namespace\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/protobuf/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"message_validator_impl_test\",\n    srcs = [\"message_validator_impl_test.cc\"],\n    deps = [\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/proto:deprecated_proto_cc_proto\",\n        \"//test/proto:sensitive_proto_cc_proto\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/core:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"type_util_test\",\n    srcs = [\"type_util_test.cc\"],\n    deps = [\n        \"//source/common/protobuf:type_util_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"value_util_fuzz_test\",\n    srcs = [\"value_util_fuzz_test.cc\"],\n    corpus = \"value_util_corpus\",\n    # Fuzzer is stable, no bugs, simple test target; avoid emitting CO2.\n    tags = [\"no_fuzz\"],\n    deps = [\"//source/common/protobuf:utility_lib\"],\n)\n"
  },
  {
    "path": "test/common/protobuf/message_validator_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace ProtobufMessage {\nnamespace {\n\n// The null validation visitor doesn't do anything on unknown fields.\nTEST(NullValidationVisitorImpl, UnknownField) {\n  NullValidationVisitorImpl null_validation_visitor;\n  EXPECT_TRUE(null_validation_visitor.skipValidation());\n  EXPECT_NO_THROW(null_validation_visitor.onUnknownField(\"foo\"));\n}\n\n// The warning validation visitor logs and bumps stats on unknown fields\nTEST(WarningValidationVisitorImpl, UnknownField) {\n  Stats::TestUtil::TestStore stats;\n  Stats::Counter& unknown_counter = stats.counter(\"counter\");\n  WarningValidationVisitorImpl warning_validation_visitor;\n  // we want to be executed.\n  EXPECT_FALSE(warning_validation_visitor.skipValidation());\n  // First time around we should log.\n  EXPECT_LOG_CONTAINS(\"warn\", \"Unknown field: foo\",\n                      warning_validation_visitor.onUnknownField(\"foo\"));\n  // Duplicate descriptions don't generate a log the second time around.\n  EXPECT_LOG_NOT_CONTAINS(\"warn\", \"Unknown field: foo\",\n                          warning_validation_visitor.onUnknownField(\"foo\"));\n  // Unrelated variable increments.\n  EXPECT_LOG_CONTAINS(\"warn\", \"Unknown field: bar\",\n                      warning_validation_visitor.onUnknownField(\"bar\"));\n  // When we set the stats counter, the above increments are transferred.\n  EXPECT_EQ(0, unknown_counter.value());\n  warning_validation_visitor.setUnknownCounter(unknown_counter);\n  EXPECT_EQ(2, unknown_counter.value());\n  // A third unknown field is tracked in stats post-initialization.\n  EXPECT_LOG_CONTAINS(\"warn\", \"Unknown field: baz\",\n                      warning_validation_visitor.onUnknownField(\"baz\"));\n  EXPECT_EQ(3, unknown_counter.value());\n}\n\n// The strict validation visitor throws on unknown fields.\nTEST(StrictValidationVisitorImpl, UnknownField) {\n  StrictValidationVisitorImpl strict_validation_visitor;\n  EXPECT_FALSE(strict_validation_visitor.skipValidation());\n  EXPECT_THROW_WITH_MESSAGE(strict_validation_visitor.onUnknownField(\"foo\"),\n                            UnknownProtoFieldException,\n                            \"Protobuf message (foo) has unknown fields\");\n}\n\n} // namespace\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/protobuf/type_util_test.cc",
    "content": "#include \"common/protobuf/type_util.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Config {\nnamespace {\nTEST(TypeUtilTest, TypeUrlHelperFunction) {\n  EXPECT_EQ(\"envoy.config.filter.http.ip_tagging.v2.IPTagging\",\n            TypeUtil::typeUrlToDescriptorFullName(\n                \"type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging\"));\n  EXPECT_EQ(\n      \"type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging\",\n      TypeUtil::descriptorFullNameToTypeUrl(\"envoy.config.filter.http.ip_tagging.v2.IPTagging\"));\n}\n} // namespace\n} // namespace Config\n} // namespace Envoy"
  },
  {
    "path": "test/common/protobuf/utility_test.cc",
    "content": "#include \"envoy/api/v2/cluster.pb.h\"\n#include \"envoy/api/v2/core/base.pb.h\"\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.validate.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.validate.h\"\n#include \"envoy/config/cluster/v3/filter.pb.h\"\n#include \"envoy/config/cluster/v3/filter.pb.validate.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/config/api_version.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/proto/deprecated.pb.h\"\n#include \"test/proto/sensitive.pb.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/node_hash_set.h\"\n#include \"gtest/gtest.h\"\n#include \"udpa/type/v1/typed_struct.pb.h\"\n\nusing namespace std::chrono_literals;\n\nnamespace Envoy {\n\nclass RuntimeStatsHelper {\npublic:\n  RuntimeStatsHelper()\n      : api_(Api::createApiForTest(store_)),\n        runtime_deprecated_feature_use_(store_.counter(\"runtime.deprecated_feature_use\")),\n        deprecated_feature_seen_since_process_start_(\n            store_.gauge(\"runtime.deprecated_feature_seen_since_process_start\",\n                         Stats::Gauge::ImportMode::NeverImport)) {\n    envoy::config::bootstrap::v3::LayeredRuntime config;\n    config.add_layers()->mutable_admin_layer();\n    loader_ = std::make_unique<Runtime::ScopedLoaderSingleton>(\n        Runtime::LoaderPtr{new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, store_,\n                                                   generator_, validation_visitor_, *api_)});\n  }\n\n  Event::MockDispatcher dispatcher_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Stats::TestUtil::TestStore store_;\n  Random::MockRandomGenerator generator_;\n  Api::ApiPtr api_;\n  std::unique_ptr<Runtime::ScopedLoaderSingleton> loader_;\n  Stats::Counter& runtime_deprecated_feature_use_;\n  Stats::Gauge& deprecated_feature_seen_since_process_start_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n};\n\nclass ProtobufUtilityTest : public testing::Test, protected RuntimeStatsHelper {};\n\nTEST_F(ProtobufUtilityTest, ConvertPercentNaNDouble) {\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  common_config_.mutable_healthy_panic_threshold()->set_value(\n      std::numeric_limits<double>::quiet_NaN());\n  EXPECT_THROW(PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(common_config_, healthy_panic_threshold, 0.5),\n               EnvoyException);\n}\n\nTEST_F(ProtobufUtilityTest, ConvertPercentNaN) {\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  common_config_.mutable_healthy_panic_threshold()->set_value(\n      std::numeric_limits<double>::quiet_NaN());\n  EXPECT_THROW(PROTOBUF_PERCENT_TO_ROUNDED_INTEGER_OR_DEFAULT(common_config_,\n                                                              healthy_panic_threshold, 100, 50),\n               EnvoyException);\n}\n\nnamespace ProtobufPercentHelper {\n\nTEST_F(ProtobufUtilityTest, EvaluateFractionalPercent) {\n  { // 0/100 (default)\n    envoy::type::v3::FractionalPercent percent;\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 0));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 50));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 100));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 1000));\n  }\n  { // 5/100\n    envoy::type::v3::FractionalPercent percent;\n    percent.set_numerator(5);\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 0));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 4));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 5));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 50));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 100));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 104));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 105));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 204));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 1000));\n  }\n  { // 75/100\n    envoy::type::v3::FractionalPercent percent;\n    percent.set_numerator(75);\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 0));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 4));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 5));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 74));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 80));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 100));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 104));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 105));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 200));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 274));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 280));\n  }\n  { // 5/10000\n    envoy::type::v3::FractionalPercent percent;\n    percent.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n    percent.set_numerator(5);\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 0));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 4));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 5));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 50));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 100));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 9000));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 10000));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 10004));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 10005));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 20004));\n  }\n  { // 5/MILLION\n    envoy::type::v3::FractionalPercent percent;\n    percent.set_denominator(envoy::type::v3::FractionalPercent::MILLION);\n    percent.set_numerator(5);\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 0));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 4));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 5));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 50));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 100));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 9000));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 10000));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 10004));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 10005));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 900005));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 900000));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 1000000));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 1000004));\n    EXPECT_FALSE(evaluateFractionalPercent(percent, 1000005));\n    EXPECT_TRUE(evaluateFractionalPercent(percent, 2000004));\n  }\n}\n\n} // namespace ProtobufPercentHelper\n\nTEST_F(ProtobufUtilityTest, MessageUtilHash) {\n  ProtobufWkt::Struct s;\n  (*s.mutable_fields())[\"ab\"].set_string_value(\"fgh\");\n  (*s.mutable_fields())[\"cde\"].set_string_value(\"ij\");\n\n  ProtobufWkt::Any a1;\n  a1.PackFrom(s);\n  // The two base64 encoded Struct to test map is identical to the struct above, this tests whether\n  // a map is deterministically serialized and hashed.\n  ProtobufWkt::Any a2 = a1;\n  a2.set_value(Base64::decode(\"CgsKA2NkZRIEGgJpagoLCgJhYhIFGgNmZ2g=\"));\n  ProtobufWkt::Any a3 = a1;\n  a3.set_value(Base64::decode(\"CgsKAmFiEgUaA2ZnaAoLCgNjZGUSBBoCaWo=\"));\n\n  EXPECT_EQ(MessageUtil::hash(a1), MessageUtil::hash(a2));\n  EXPECT_EQ(MessageUtil::hash(a2), MessageUtil::hash(a3));\n  EXPECT_NE(0, MessageUtil::hash(a1));\n  EXPECT_NE(MessageUtil::hash(s), MessageUtil::hash(a1));\n}\n\nTEST_F(ProtobufUtilityTest, MessageUtilHashAndEqualToIgnoreOriginalTypeField) {\n  ProtobufWkt::Struct s;\n  (*s.mutable_fields())[\"ab\"].set_string_value(\"fgh\");\n  EXPECT_EQ(1, s.fields_size());\n  envoy::api::v2::core::Metadata mv2;\n  mv2.mutable_filter_metadata()->insert({\"xyz\", s});\n  EXPECT_EQ(1, mv2.filter_metadata_size());\n\n  // Add the OriginalTypeFieldNumber as unknown field.\n  envoy::config::core::v3::Metadata mv3;\n  Config::VersionConverter::upgrade(mv2, mv3);\n\n  // Add another unknown field.\n  {\n    const Protobuf::Reflection* reflection = mv3.GetReflection();\n    auto* unknown_field_set = reflection->MutableUnknownFields(&mv3);\n    auto set_size = unknown_field_set->field_count();\n    // 183412668 is the magic number OriginalTypeFieldNumber. The successor number should not be\n    // occupied.\n    unknown_field_set->AddFixed32(183412668 + 1, 1);\n    EXPECT_EQ(set_size + 1, unknown_field_set->field_count()) << \"Fail to add an unknown field\";\n  }\n\n  envoy::config::core::v3::Metadata mv3dup = mv3;\n  ASSERT_EQ(MessageUtil::hash(mv3), MessageUtil::hash(mv3dup));\n  ASSERT(MessageUtil()(mv3, mv3dup));\n}\n\nTEST_F(ProtobufUtilityTest, RepeatedPtrUtilDebugString) {\n  Protobuf::RepeatedPtrField<ProtobufWkt::UInt32Value> repeated;\n  EXPECT_EQ(\"[]\", RepeatedPtrUtil::debugString(repeated));\n  repeated.Add()->set_value(10);\n  EXPECT_EQ(\"[value: 10\\n]\", RepeatedPtrUtil::debugString(repeated));\n  repeated.Add()->set_value(20);\n  EXPECT_EQ(\"[value: 10\\n, value: 20\\n]\", RepeatedPtrUtil::debugString(repeated));\n}\n\n// Validated exception thrown when downcastAndValidate observes a PGV failures.\nTEST_F(ProtobufUtilityTest, DowncastAndValidateFailedValidation) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.mutable_static_resources()->add_clusters();\n  EXPECT_THROW(TestUtility::validate(bootstrap), ProtoValidationException);\n  EXPECT_THROW(\n      TestUtility::downcastAndValidate<const envoy::config::bootstrap::v3::Bootstrap&>(bootstrap),\n      ProtoValidationException);\n}\n\n// Validated exception thrown when downcastAndValidate observes a unknown field.\nTEST_F(ProtobufUtilityTest, DowncastAndValidateUnknownFields) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.GetReflection()->MutableUnknownFields(&bootstrap)->AddVarint(1, 0);\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::validate(bootstrap), EnvoyException,\n                            \"Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with \"\n                            \"unknown field set {1}) has unknown fields\");\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::validate(bootstrap), EnvoyException,\n                            \"Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with \"\n                            \"unknown field set {1}) has unknown fields\");\n}\n\n// Validated exception thrown when downcastAndValidate observes a nested unknown field.\nTEST_F(ProtobufUtilityTest, DowncastAndValidateUnknownFieldsNested) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  auto* cluster = bootstrap.mutable_static_resources()->add_clusters();\n  cluster->GetReflection()->MutableUnknownFields(cluster)->AddVarint(1, 0);\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::validate(*cluster), EnvoyException,\n                            \"Protobuf message (type envoy.config.cluster.v3.Cluster with \"\n                            \"unknown field set {1}) has unknown fields\");\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::validate(bootstrap), EnvoyException,\n                            \"Protobuf message (type envoy.config.cluster.v3.Cluster with \"\n                            \"unknown field set {1}) has unknown fields\");\n}\n\nTEST_F(ProtobufUtilityTest, LoadBinaryProtoFromFile) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.mutable_cluster_manager()\n      ->mutable_upstream_bind_config()\n      ->mutable_source_address()\n      ->set_address(\"1.1.1.1\");\n\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb\", bootstrap.SerializeAsString());\n\n  envoy::config::bootstrap::v3::Bootstrap proto_from_file;\n  TestUtility::loadFromFile(filename, proto_from_file, *api_);\n  EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n  EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file));\n}\n\nTEST_F(ProtobufUtilityTest, DEPRECATED_FEATURE_TEST(LoadBinaryV2ProtoFromFile)) {\n  // Allow the use of v2.Bootstrap.runtime.\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.deprecated_features:envoy.config.bootstrap.v2.Bootstrap.runtime\", \"True \"}});\n  envoy::config::bootstrap::v2::Bootstrap bootstrap;\n  bootstrap.mutable_runtime()->set_symlink_root(\"/\");\n\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb\", bootstrap.SerializeAsString());\n\n  envoy::config::bootstrap::v3::Bootstrap proto_from_file;\n  TestUtility::loadFromFile(filename, proto_from_file, *api_);\n  EXPECT_EQ(\"/\", proto_from_file.hidden_envoy_deprecated_runtime().symlink_root());\n  EXPECT_GT(runtime_deprecated_feature_use_.value(), 0);\n}\n\n// An unknown field (or with wrong type) in a message is rejected.\nTEST_F(ProtobufUtilityTest, LoadBinaryProtoUnknownFieldFromFile) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(42);\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb\", source_duration.SerializeAsString());\n  // Verify without boosting\n  envoy::config::bootstrap::v3::Bootstrap proto_from_file;\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_, false),\n                            EnvoyException,\n                            \"Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with \"\n                            \"unknown field set {1}) has unknown fields\");\n\n  // Verify with boosting\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_, true),\n                            EnvoyException,\n                            \"Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with \"\n                            \"unknown field set {1}) has unknown fields\");\n}\n\n// Multiple unknown fields (or with wrong type) in a message are rejected.\nTEST_F(ProtobufUtilityTest, LoadBinaryProtoUnknownMultipleFieldsFromFile) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(42);\n  source_duration.set_nanos(42);\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb\", source_duration.SerializeAsString());\n  envoy::config::bootstrap::v3::Bootstrap proto_from_file;\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_),\n                            EnvoyException,\n                            \"Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with \"\n                            \"unknown field set {1, 2}) has unknown fields\");\n}\n\nTEST_F(ProtobufUtilityTest, LoadTextProtoFromFile) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.mutable_cluster_manager()\n      ->mutable_upstream_bind_config()\n      ->mutable_source_address()\n      ->set_address(\"1.1.1.1\");\n\n  std::string bootstrap_text;\n  ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text));\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb_text\", bootstrap_text);\n\n  envoy::config::bootstrap::v3::Bootstrap proto_from_file;\n  TestUtility::loadFromFile(filename, proto_from_file, *api_);\n  EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n  EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file));\n}\n\nTEST_F(ProtobufUtilityTest, LoadJsonFromFileNoBoosting) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.mutable_cluster_manager()\n      ->mutable_upstream_bind_config()\n      ->mutable_source_address()\n      ->set_address(\"1.1.1.1\");\n\n  std::string bootstrap_text;\n  ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text));\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb_text\", bootstrap_text);\n\n  envoy::config::bootstrap::v3::Bootstrap proto_from_file;\n  TestUtility::loadFromFile(filename, proto_from_file, *api_);\n  EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n  EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file));\n}\n\nTEST_F(ProtobufUtilityTest, DEPRECATED_FEATURE_TEST(LoadV2TextProtoFromFile)) {\n  API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) bootstrap;\n  bootstrap.mutable_node()->set_build_version(\"foo\");\n\n  std::string bootstrap_text;\n  ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text));\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb_text\", bootstrap_text);\n\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_from_file;\n  TestUtility::loadFromFile(filename, proto_from_file, *api_);\n  EXPECT_GT(runtime_deprecated_feature_use_.value(), 0);\n  EXPECT_EQ(\"foo\", proto_from_file.node().hidden_envoy_deprecated_build_version());\n}\n\nTEST_F(ProtobufUtilityTest, LoadTextProtoFromFile_Failure) {\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb_text\", \"invalid {\");\n\n  envoy::config::bootstrap::v3::Bootstrap proto_from_file;\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_),\n                            EnvoyException,\n                            \"Unable to parse file \\\"\" + filename +\n                                \"\\\" as a text protobuf (type envoy.config.bootstrap.v3.Bootstrap)\");\n}\n\n// String fields annotated as sensitive should be converted to the string \"[redacted]\". String\n// fields that are neither annotated as sensitive nor contained in a sensitive message should be\n// left alone.\nTEST_F(ProtobufUtilityTest, RedactString) {\n  envoy::test::Sensitive actual, expected;\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_string: This field should be redacted.\nsensitive_repeated_string:\n  - This field should be redacted (1 of 2).\n  - This field should be redacted (2 of 2).\ninsensitive_string: This field should not be redacted.\ninsensitive_repeated_string:\n  - This field should not be redacted (1 of 2).\n  - This field should not be redacted (2 of 2).\n)EOF\",\n                            actual);\n\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_string: '[redacted]'\nsensitive_repeated_string:\n  - '[redacted]'\n  - '[redacted]'\ninsensitive_string: This field should not be redacted.\ninsensitive_repeated_string:\n  - This field should not be redacted (1 of 2).\n  - This field should not be redacted (2 of 2).\n)EOF\",\n                            expected);\n\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Bytes fields annotated as sensitive should be converted to the ASCII / UTF-8 encoding of the\n// string \"[redacted]\". Bytes fields that are neither annotated as sensitive nor contained in a\n// sensitive message should be left alone.\nTEST_F(ProtobufUtilityTest, RedactBytes) {\n  envoy::test::Sensitive actual, expected;\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_bytes: VGhlc2UgYnl0ZXMgc2hvdWxkIGJlIHJlZGFjdGVkLg==\nsensitive_repeated_bytes:\n  - VGhlc2UgYnl0ZXMgc2hvdWxkIGJlIHJlZGFjdGVkICgxIG9mIDIpLg==\n  - VGhlc2UgYnl0ZXMgc2hvdWxkIGJlIHJlZGFjdGVkICgyIG9mIDIpLg==\ninsensitive_bytes: VGhlc2UgYnl0ZXMgc2hvdWxkIG5vdCBiZSByZWRhY3RlZC4=\ninsensitive_repeated_bytes:\n  - VGhlc2UgYnl0ZXMgc2hvdWxkIG5vdCBiZSByZWRhY3RlZCAoMSBvZiAyKS4=\n  - VGhlc2UgYnl0ZXMgc2hvdWxkIG5vdCBiZSByZWRhY3RlZCAoMiBvZiAyKS4=\n)EOF\",\n                            actual);\n\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_bytes: W3JlZGFjdGVkXQ==\nsensitive_repeated_bytes:\n  - W3JlZGFjdGVkXQ==\n  - W3JlZGFjdGVkXQ==\ninsensitive_bytes: VGhlc2UgYnl0ZXMgc2hvdWxkIG5vdCBiZSByZWRhY3RlZC4=\ninsensitive_repeated_bytes:\n  - VGhlc2UgYnl0ZXMgc2hvdWxkIG5vdCBiZSByZWRhY3RlZCAoMSBvZiAyKS4=\n  - VGhlc2UgYnl0ZXMgc2hvdWxkIG5vdCBiZSByZWRhY3RlZCAoMiBvZiAyKS4=\n)EOF\",\n                            expected);\n\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Ints annotated as sensitive should be cleared. Ints that are neither annotated as sensitive nor\n// contained in a sensitive message should be left alone. Note that the same logic should apply to\n// any primitive type other than strings and bytes, although we omit tests for that here.\nTEST_F(ProtobufUtilityTest, RedactInts) {\n  envoy::test::Sensitive actual, expected;\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_int: 1\nsensitive_repeated_int:\n  - 1\n  - 2\ninsensitive_int: 1\ninsensitive_repeated_int:\n  - 1\n  - 2\n)EOF\",\n                            actual);\n\n  TestUtility::loadFromYaml(R\"EOF(\ninsensitive_int: 1\ninsensitive_repeated_int:\n  - 1\n  - 2\n)EOF\",\n                            expected);\n\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Messages annotated as sensitive should have all their fields redacted recursively. Messages that\n// are neither annotated as sensitive nor contained in a sensitive message should be left alone.\nTEST_F(ProtobufUtilityTest, RedactMessage) {\n  envoy::test::Sensitive actual, expected;\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_message:\n  insensitive_string: This field should be redacted because of its parent.\n  insensitive_repeated_string:\n    - This field should be redacted because of its parent (1 of 2).\n    - This field should be redacted because of its parent (2 of 2).\n  insensitive_int: 1\n  insensitive_repeated_int:\n    - 1\n    - 2\nsensitive_repeated_message:\n  - insensitive_string: This field should be redacted because of its parent (1 of 2).\n    insensitive_repeated_string:\n      - This field should be redacted because of its parent (1 of 4).\n      - This field should be redacted because of its parent (2 of 4).\n    insensitive_int: 1\n    insensitive_repeated_int:\n      - 1\n      - 2\n  - insensitive_string: This field should be redacted because of its parent (2 of 2).\n    insensitive_repeated_string:\n      - This field should be redacted because of its parent (3 of 4).\n      - This field should be redacted because of its parent (4 of 4).\n    insensitive_int: 2\n    insensitive_repeated_int:\n      - 3\n      - 4\ninsensitive_message:\n  insensitive_string: This field should not be redacted.\n  insensitive_repeated_string:\n    - This field should not be redacted (1 of 2).\n    - This field should not be redacted (2 of 2).\n  insensitive_int: 1\n  insensitive_repeated_int:\n    - 1\n    - 2\ninsensitive_repeated_message:\n  - insensitive_string: This field should not be redacted (1 of 2).\n    insensitive_repeated_string:\n      - This field should not be redacted (1 of 4).\n      - This field should not be redacted (2 of 4).\n    insensitive_int: 1\n    insensitive_repeated_int:\n      - 1\n      - 2\n  - insensitive_string: This field should not be redacted (2 of 2).\n    insensitive_repeated_string:\n      - This field should not be redacted (3 of 4).\n      - This field should not be redacted (4 of 4).\n    insensitive_int: 2\n    insensitive_repeated_int:\n      - 3\n      - 4\n)EOF\",\n                            actual);\n\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_message:\n  insensitive_string: '[redacted]'\n  insensitive_repeated_string:\n    - '[redacted]'\n    - '[redacted]'\nsensitive_repeated_message:\n  - insensitive_string: '[redacted]'\n    insensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\n  - insensitive_string: '[redacted]'\n    insensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\ninsensitive_message:\n  insensitive_string: This field should not be redacted.\n  insensitive_repeated_string:\n    - This field should not be redacted (1 of 2).\n    - This field should not be redacted (2 of 2).\n  insensitive_int: 1\n  insensitive_repeated_int:\n    - 1\n    - 2\ninsensitive_repeated_message:\n  - insensitive_string: This field should not be redacted (1 of 2).\n    insensitive_repeated_string:\n      - This field should not be redacted (1 of 4).\n      - This field should not be redacted (2 of 4).\n    insensitive_int: 1\n    insensitive_repeated_int:\n      - 1\n      - 2\n  - insensitive_string: This field should not be redacted (2 of 2).\n    insensitive_repeated_string:\n      - This field should not be redacted (3 of 4).\n      - This field should not be redacted (4 of 4).\n    insensitive_int: 2\n    insensitive_repeated_int:\n      - 3\n      - 4\n)EOF\",\n                            expected);\n\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Messages packed into `Any` should be treated the same as normal messages.\nTEST_F(ProtobufUtilityTest, RedactAny) {\n  envoy::test::Sensitive actual, expected;\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_any:\n  '@type': type.googleapis.com/envoy.test.Sensitive\n  insensitive_string: This field should be redacted because of its parent.\n  insensitive_repeated_string:\n    - This field should be redacted because of its parent (1 of 2).\n    - This field should be redacted because of its parent (2 of 2).\n  insensitive_int: 1\n  insensitive_repeated_int:\n    - 1\n    - 2\nsensitive_repeated_any:\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    insensitive_string: This field should be redacted because of its parent (1 of 2).\n    insensitive_repeated_string:\n      - This field should be redacted because of its parent (1 of 4).\n      - This field should be redacted because of its parent (2 of 4).\n    insensitive_int: 1\n    insensitive_repeated_int:\n      - 1\n      - 2\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    insensitive_string: This field should be redacted because of its parent (2 of 2).\n    insensitive_repeated_string:\n      - This field should be redacted because of its parent (3 of 4).\n      - This field should be redacted because of its parent (4 of 4).\n    insensitive_int: 2\n    insensitive_repeated_int:\n      - 3\n      - 4\ninsensitive_any:\n  '@type': type.googleapis.com/envoy.test.Sensitive\n  sensitive_string: This field should be redacted.\n  sensitive_repeated_string:\n    - This field should be redacted (1 of 2).\n    - This field should be redacted (2 of 2).\n  sensitive_int: 1\n  sensitive_repeated_int:\n    - 1\n    - 2\n  insensitive_string: This field should not be redacted.\n  insensitive_repeated_string:\n    - This field should not be redacted (1 of 2).\n    - This field should not be redacted (2 of 2).\n  insensitive_int: 1\n  insensitive_repeated_int:\n    - 1\n    - 2\ninsensitive_repeated_any:\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    sensitive_string: This field should be redacted (1 of 2).\n    sensitive_repeated_string:\n      - This field should be redacted (1 of 4).\n      - This field should be redacted (2 of 4).\n    sensitive_int: 1\n    sensitive_repeated_int:\n      - 1\n      - 2\n    insensitive_string: This field should not be redacted.\n    insensitive_repeated_string:\n      - This field should not be redacted (1 of 4).\n      - This field should not be redacted (2 of 4).\n    insensitive_int: 1\n    insensitive_repeated_int:\n      - 1\n      - 2\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    sensitive_string: This field should be redacted (2 of 2).\n    sensitive_repeated_string:\n      - This field should be redacted (3 of 4).\n      - This field should be redacted (4 of 4).\n    sensitive_int: 2\n    sensitive_repeated_int:\n      - 3\n      - 4\n    insensitive_string: This field should not be redacted.\n    insensitive_repeated_string:\n      - This field should not be redacted (3 of 4).\n      - This field should not be redacted (4 of 4).\n    insensitive_int: 2\n    insensitive_repeated_int:\n      - 3\n      - 4\n)EOF\",\n                            actual);\n\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_any:\n  '@type': type.googleapis.com/envoy.test.Sensitive\n  insensitive_string: '[redacted]'\n  insensitive_repeated_string:\n    - '[redacted]'\n    - '[redacted]'\nsensitive_repeated_any:\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    insensitive_string: '[redacted]'\n    insensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    insensitive_string: '[redacted]'\n    insensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\ninsensitive_any:\n  '@type': type.googleapis.com/envoy.test.Sensitive\n  sensitive_string: '[redacted]'\n  sensitive_repeated_string:\n    - '[redacted]'\n    - '[redacted]'\n  insensitive_string: This field should not be redacted.\n  insensitive_repeated_string:\n    - This field should not be redacted (1 of 2).\n    - This field should not be redacted (2 of 2).\n  insensitive_int: 1\n  insensitive_repeated_int:\n    - 1\n    - 2\ninsensitive_repeated_any:\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    sensitive_string: '[redacted]'\n    sensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\n    insensitive_string: This field should not be redacted.\n    insensitive_repeated_string:\n      - This field should not be redacted (1 of 4).\n      - This field should not be redacted (2 of 4).\n    insensitive_int: 1\n    insensitive_repeated_int:\n      - 1\n      - 2\n  - '@type': type.googleapis.com/envoy.test.Sensitive\n    sensitive_string: '[redacted]'\n    sensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\n    insensitive_string: This field should not be redacted.\n    insensitive_repeated_string:\n      - This field should not be redacted (3 of 4).\n      - This field should not be redacted (4 of 4).\n    insensitive_int: 2\n    insensitive_repeated_int:\n      - 3\n      - 4\n)EOF\",\n                            expected);\n\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Empty `Any` can be trivially redacted.\nTEST_F(ProtobufUtilityTest, RedactEmptyAny) {\n  ProtobufWkt::Any actual;\n  TestUtility::loadFromYaml(R\"EOF(\n'@type': type.googleapis.com/envoy.test.Sensitive\n)EOF\",\n                            actual);\n\n  ProtobufWkt::Any expected = actual;\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Messages packed into `Any` with unknown type URLs are skipped.\nTEST_F(ProtobufUtilityTest, RedactAnyWithUnknownTypeUrl) {\n  ProtobufWkt::Any actual;\n  // Note, `loadFromYaml` validates the type when populating `Any`, so we have to pass the real type\n  // first and substitute an unknown message type after loading.\n  TestUtility::loadFromYaml(R\"EOF(\n'@type': type.googleapis.com/envoy.test.Sensitive\nsensitive_string: This field is sensitive, but we have no way of knowing.\n)EOF\",\n                            actual);\n  actual.set_type_url(\"type.googleapis.com/envoy.unknown.Message\");\n\n  ProtobufWkt::Any expected = actual;\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Messages packed into `TypedStruct` should be treated the same as normal messages. Note that\n// ints are quoted as strings here because that's what happens in the JSON conversion.\nTEST_F(ProtobufUtilityTest, RedactTypedStruct) {\n  envoy::test::Sensitive actual, expected;\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_typed_struct:\n  type_url: type.googleapis.com/envoy.test.Sensitive\n  value:\n    insensitive_string: This field should be redacted because of its parent.\n    insensitive_repeated_string:\n      - This field should be redacted because of its parent (1 of 2).\n      - This field should be redacted because of its parent (2 of 2).\n    insensitive_int: '1'\n    insensitive_repeated_int:\n      - '1'\n      - '2'\nsensitive_repeated_typed_struct:\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      insensitive_string: This field should be redacted because of its parent (1 of 2).\n      insensitive_repeated_string:\n        - This field should be redacted because of its parent (1 of 4).\n        - This field should be redacted because of its parent (2 of 4).\n      insensitive_int: '1'\n      insensitive_repeated_int:\n        - '1'\n        - '2'\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      insensitive_string: This field should be redacted because of its parent (2 of 2).\n      insensitive_repeated_string:\n        - This field should be redacted because of its parent (3 of 4).\n        - This field should be redacted because of its parent (4 of 4).\n      insensitive_int: '2'\n      insensitive_repeated_int:\n        - '3'\n        - '4'\ninsensitive_typed_struct:\n  type_url: type.googleapis.com/envoy.test.Sensitive\n  value:\n    sensitive_string: This field should be redacted.\n    sensitive_repeated_string:\n      - This field should be redacted (1 of 2).\n      - This field should be redacted (2 of 2).\n    sensitive_int: '1'\n    sensitive_repeated_int:\n      - '1'\n      - '2'\n    insensitive_string: This field should not be redacted.\n    insensitive_repeated_string:\n      - This field should not be redacted (1 of 2).\n      - This field should not be redacted (2 of 2).\n    insensitive_int: '1'\n    insensitive_repeated_int:\n      - '1'\n      - '2'\ninsensitive_repeated_typed_struct:\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      sensitive_string: This field should be redacted (1 of 2).\n      sensitive_repeated_string:\n        - This field should be redacted (1 of 4).\n        - This field should be redacted (2 of 4).\n      sensitive_int: '1'\n      sensitive_repeated_int:\n        - '1'\n        - '2'\n      insensitive_string: This field should not be redacted.\n      insensitive_repeated_string:\n        - This field should not be redacted (1 of 4).\n        - This field should not be redacted (2 of 4).\n      insensitive_int: '1'\n      insensitive_repeated_int:\n        - '1'\n        - '2'\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      sensitive_string: This field should be redacted (2 of 2).\n      sensitive_repeated_string:\n        - This field should be redacted (3 of 4).\n        - This field should be redacted (4 of 4).\n      sensitive_int: '2'\n      sensitive_repeated_int:\n        - '3'\n        - '4'\n      insensitive_string: This field should not be redacted.\n      insensitive_repeated_string:\n        - This field should not be redacted (3 of 4).\n        - This field should not be redacted (4 of 4).\n      insensitive_int: '2'\n      insensitive_repeated_int:\n        - '3'\n        - '4'\n)EOF\",\n                            actual);\n\n  TestUtility::loadFromYaml(R\"EOF(\nsensitive_typed_struct:\n  type_url: type.googleapis.com/envoy.test.Sensitive\n  value:\n    insensitive_string: '[redacted]'\n    insensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\nsensitive_repeated_typed_struct:\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      insensitive_string: '[redacted]'\n      insensitive_repeated_string:\n        - '[redacted]'\n        - '[redacted]'\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      insensitive_string: '[redacted]'\n      insensitive_repeated_string:\n        - '[redacted]'\n        - '[redacted]'\ninsensitive_typed_struct:\n  type_url: type.googleapis.com/envoy.test.Sensitive\n  value:\n    sensitive_string: '[redacted]'\n    sensitive_repeated_string:\n      - '[redacted]'\n      - '[redacted]'\n    insensitive_string: This field should not be redacted.\n    insensitive_repeated_string:\n      - This field should not be redacted (1 of 2).\n      - This field should not be redacted (2 of 2).\n    insensitive_int: '1'\n    insensitive_repeated_int:\n      - '1'\n      - '2'\ninsensitive_repeated_typed_struct:\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      sensitive_string: '[redacted]'\n      sensitive_repeated_string:\n        - '[redacted]'\n        - '[redacted]'\n      insensitive_string: This field should not be redacted.\n      insensitive_repeated_string:\n        - This field should not be redacted (1 of 4).\n        - This field should not be redacted (2 of 4).\n      insensitive_int: '1'\n      insensitive_repeated_int:\n        - '1'\n        - '2'\n  - type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      sensitive_string: '[redacted]'\n      sensitive_repeated_string:\n        - '[redacted]'\n        - '[redacted]'\n      insensitive_string: This field should not be redacted.\n      insensitive_repeated_string:\n        - This field should not be redacted (3 of 4).\n        - This field should not be redacted (4 of 4).\n      insensitive_int: '2'\n      insensitive_repeated_int:\n        - '3'\n        - '4'\n)EOF\",\n                            expected);\n\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Empty `TypedStruct` can be trivially redacted.\nTEST_F(ProtobufUtilityTest, RedactEmptyTypedStruct) {\n  udpa::type::v1::TypedStruct actual;\n  TestUtility::loadFromYaml(R\"EOF(\ntype_url: type.googleapis.com/envoy.test.Sensitive\n)EOF\",\n                            actual);\n\n  udpa::type::v1::TypedStruct expected = actual;\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Messages packed into `TypedStruct` with unknown type URLs are skipped.\nTEST_F(ProtobufUtilityTest, RedactTypedStructWithUnknownTypeUrl) {\n  udpa::type::v1::TypedStruct actual;\n  TestUtility::loadFromYaml(R\"EOF(\ntype_url: type.googleapis.com/envoy.unknown.Message\nvalue:\n  sensitive_string: This field is sensitive, but we have no way of knowing.\n)EOF\",\n                            actual);\n\n  udpa::type::v1::TypedStruct expected = actual;\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\n// Deeply-nested opaque protos (`Any` and `TypedStruct`), which are reified using the\n// `DynamicMessageFactory`, should be redacted correctly.\nTEST_F(ProtobufUtilityTest, RedactDeeplyNestedOpaqueProtos) {\n  envoy::test::Sensitive actual, expected;\n  TestUtility::loadFromYaml(R\"EOF(\ninsensitive_any:\n  '@type': type.googleapis.com/envoy.test.Sensitive\n  insensitive_any:\n    '@type': type.googleapis.com/envoy.test.Sensitive\n    sensitive_string: This field should be redacted (1 of 4).\n  insensitive_typed_struct:\n    type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      sensitive_string: This field should be redacted (2 of 4).\ninsensitive_typed_struct:\n  type_url: type.googleapis.com/envoy.test.Sensitive\n  value:\n    insensitive_any:\n      '@type': type.googleapis.com/envoy.test.Sensitive\n      sensitive_string: This field should be redacted (3 of 4).\n    insensitive_typed_struct:\n      type_url: type.googleapis.com/envoy.test.Sensitive\n      value:\n        sensitive_string: This field should be redacted (4 of 4).\n)EOF\",\n                            actual);\n  TestUtility::loadFromYaml(R\"EOF(\ninsensitive_any:\n  '@type': type.googleapis.com/envoy.test.Sensitive\n  insensitive_any:\n    '@type': type.googleapis.com/envoy.test.Sensitive\n    sensitive_string: '[redacted]'\n  insensitive_typed_struct:\n    type_url: type.googleapis.com/envoy.test.Sensitive\n    value:\n      sensitive_string: '[redacted]'\ninsensitive_typed_struct:\n  type_url: type.googleapis.com/envoy.test.Sensitive\n  value:\n    insensitive_any:\n      '@type': type.googleapis.com/envoy.test.Sensitive\n      sensitive_string: '[redacted]'\n    insensitive_typed_struct:\n      type_url: type.googleapis.com/envoy.test.Sensitive\n      value:\n        sensitive_string: '[redacted]'\n)EOF\",\n                            expected);\n  MessageUtil::redact(actual);\n  EXPECT_TRUE(TestUtility::protoEqual(expected, actual));\n}\n\nTEST_F(ProtobufUtilityTest, KeyValueStruct) {\n  const ProtobufWkt::Struct obj = MessageUtil::keyValueStruct(\"test_key\", \"test_value\");\n  EXPECT_EQ(obj.fields_size(), 1);\n  EXPECT_EQ(obj.fields().at(\"test_key\").kind_case(), ProtobufWkt::Value::KindCase::kStringValue);\n  EXPECT_EQ(obj.fields().at(\"test_key\").string_value(), \"test_value\");\n}\n\nTEST_F(ProtobufUtilityTest, KeyValueStructMap) {\n  const ProtobufWkt::Struct obj = MessageUtil::keyValueStruct(\n      {{\"test_key\", \"test_value\"}, {\"test_another_key\", \"test_another_value\"}});\n  EXPECT_EQ(obj.fields_size(), 2);\n  EXPECT_EQ(obj.fields().at(\"test_key\").kind_case(), ProtobufWkt::Value::KindCase::kStringValue);\n  EXPECT_EQ(obj.fields().at(\"test_key\").string_value(), \"test_value\");\n  EXPECT_EQ(obj.fields().at(\"test_another_key\").kind_case(),\n            ProtobufWkt::Value::KindCase::kStringValue);\n  EXPECT_EQ(obj.fields().at(\"test_another_key\").string_value(), \"test_another_value\");\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilEqual_NullValues) {\n  ProtobufWkt::Value v1, v2;\n  v1.set_null_value(ProtobufWkt::NULL_VALUE);\n  v2.set_null_value(ProtobufWkt::NULL_VALUE);\n\n  ProtobufWkt::Value other;\n  other.set_string_value(\"s\");\n\n  EXPECT_TRUE(ValueUtil::equal(v1, v2));\n  EXPECT_FALSE(ValueUtil::equal(v1, other));\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilEqual_StringValues) {\n  ProtobufWkt::Value v1, v2, v3;\n  v1.set_string_value(\"s\");\n  v2.set_string_value(\"s\");\n  v3.set_string_value(\"not_s\");\n\n  EXPECT_TRUE(ValueUtil::equal(v1, v2));\n  EXPECT_FALSE(ValueUtil::equal(v1, v3));\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilEqual_NumberValues) {\n  ProtobufWkt::Value v1, v2, v3;\n  v1.set_number_value(1.0);\n  v2.set_number_value(1.0);\n  v3.set_number_value(100.0);\n\n  EXPECT_TRUE(ValueUtil::equal(v1, v2));\n  EXPECT_FALSE(ValueUtil::equal(v1, v3));\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilEqual_BoolValues) {\n  ProtobufWkt::Value v1, v2, v3;\n  v1.set_bool_value(true);\n  v2.set_bool_value(true);\n  v3.set_bool_value(false);\n\n  EXPECT_TRUE(ValueUtil::equal(v1, v2));\n  EXPECT_FALSE(ValueUtil::equal(v1, v3));\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilEqual_StructValues) {\n  ProtobufWkt::Value string_val1, string_val2, bool_val;\n\n  string_val1.set_string_value(\"s1\");\n  string_val2.set_string_value(\"s2\");\n  bool_val.set_bool_value(true);\n\n  ProtobufWkt::Value v1, v2, v3, v4;\n  v1.mutable_struct_value()->mutable_fields()->insert({\"f1\", string_val1});\n  v1.mutable_struct_value()->mutable_fields()->insert({\"f2\", bool_val});\n\n  v2.mutable_struct_value()->mutable_fields()->insert({\"f1\", string_val1});\n  v2.mutable_struct_value()->mutable_fields()->insert({\"f2\", bool_val});\n\n  v3.mutable_struct_value()->mutable_fields()->insert({\"f1\", string_val2});\n  v3.mutable_struct_value()->mutable_fields()->insert({\"f2\", bool_val});\n\n  v4.mutable_struct_value()->mutable_fields()->insert({\"f1\", string_val1});\n\n  EXPECT_TRUE(ValueUtil::equal(v1, v2));\n  EXPECT_FALSE(ValueUtil::equal(v1, v3));\n  EXPECT_FALSE(ValueUtil::equal(v1, v4));\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilEqual_ListValues) {\n  ProtobufWkt::Value v1, v2, v3, v4;\n  v1.mutable_list_value()->add_values()->set_string_value(\"s\");\n  v1.mutable_list_value()->add_values()->set_bool_value(true);\n\n  v2.mutable_list_value()->add_values()->set_string_value(\"s\");\n  v2.mutable_list_value()->add_values()->set_bool_value(true);\n\n  v3.mutable_list_value()->add_values()->set_bool_value(true);\n  v3.mutable_list_value()->add_values()->set_string_value(\"s\");\n\n  v4.mutable_list_value()->add_values()->set_string_value(\"s\");\n\n  EXPECT_TRUE(ValueUtil::equal(v1, v2));\n  EXPECT_FALSE(ValueUtil::equal(v1, v3));\n  EXPECT_FALSE(ValueUtil::equal(v1, v4));\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilHash) {\n  ProtobufWkt::Value v;\n  v.set_string_value(\"s1\");\n\n  EXPECT_NE(ValueUtil::hash(v), 0);\n}\n\nTEST_F(ProtobufUtilityTest, MessageUtilLoadYamlDouble) {\n  ProtobufWkt::DoubleValue v;\n  MessageUtil::loadFromYaml(\"value: 1.0\", v, ProtobufMessage::getNullValidationVisitor());\n  EXPECT_DOUBLE_EQ(1.0, v.value());\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilLoadFromYamlScalar) {\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"null\").ShortDebugString(), \"null_value: NULL_VALUE\");\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"true\").ShortDebugString(), \"bool_value: true\");\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"1\").ShortDebugString(), \"number_value: 1\");\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"9223372036854775807\").ShortDebugString(),\n            \"string_value: \\\"9223372036854775807\\\"\");\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"\\\"foo\\\"\").ShortDebugString(), \"string_value: \\\"foo\\\"\");\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"foo\").ShortDebugString(), \"string_value: \\\"foo\\\"\");\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilLoadFromYamlObject) {\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"[foo, bar]\").ShortDebugString(),\n            \"list_value { values { string_value: \\\"foo\\\" } values { string_value: \\\"bar\\\" } }\");\n  EXPECT_EQ(ValueUtil::loadFromYaml(\"foo: bar\").ShortDebugString(),\n            \"struct_value { fields { key: \\\"foo\\\" value { string_value: \\\"bar\\\" } } }\");\n}\n\nTEST_F(ProtobufUtilityTest, ValueUtilLoadFromYamlException) {\n  std::string bad_yaml = R\"EOF(\nadmin:\n  access_log_path: /dev/null\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n)EOF\";\n\n  EXPECT_THROW_WITH_REGEX(ValueUtil::loadFromYaml(bad_yaml), EnvoyException, \"bad conversion\");\n  EXPECT_THROW_WITHOUT_REGEX(ValueUtil::loadFromYaml(bad_yaml), EnvoyException,\n                             \"Unexpected YAML exception\");\n}\n\nTEST_F(ProtobufUtilityTest, HashedValue) {\n  ProtobufWkt::Value v1, v2, v3;\n  v1.set_string_value(\"s\");\n  v2.set_string_value(\"s\");\n  v3.set_string_value(\"not_s\");\n\n  HashedValue hv1(v1), hv2(v2), hv3(v3);\n\n  EXPECT_EQ(hv1, hv2);\n  EXPECT_NE(hv1, hv3);\n\n  HashedValue copy(hv1); // NOLINT(performance-unnecessary-copy-initialization)\n  EXPECT_EQ(hv1, copy);\n}\n\nTEST_F(ProtobufUtilityTest, HashedValueStdHash) {\n  ProtobufWkt::Value v1, v2, v3;\n  v1.set_string_value(\"s\");\n  v2.set_string_value(\"s\");\n  v3.set_string_value(\"not_s\");\n\n  HashedValue hv1(v1), hv2(v2), hv3(v3);\n\n  absl::node_hash_set<HashedValue> set;\n  set.emplace(hv1);\n  set.emplace(hv2);\n  set.emplace(hv3);\n\n  EXPECT_EQ(set.size(), 2); // hv1 == hv2\n  EXPECT_NE(set.find(hv1), set.end());\n  EXPECT_NE(set.find(hv3), set.end());\n}\n\n// MessageUtility::anyConvert() with the wrong type throws.\nTEST_F(ProtobufUtilityTest, AnyConvertWrongType) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(42);\n  ProtobufWkt::Any source_any;\n  source_any.PackFrom(source_duration);\n  EXPECT_THROW_WITH_REGEX(\n      TestUtility::anyConvert<ProtobufWkt::Timestamp>(source_any), EnvoyException,\n      R\"(Unable to unpack as google.protobuf.Timestamp: \\[type.googleapis.com/google.protobuf.Duration\\] .*)\");\n}\n\n// Validated exception thrown when anyConvertAndValidate observes a PGV failures.\nTEST_F(ProtobufUtilityTest, AnyConvertAndValidateFailedValidation) {\n  envoy::config::cluster::v3::Filter filter;\n  ProtobufWkt::Any source_any;\n  source_any.PackFrom(filter);\n  EXPECT_THROW(MessageUtil::anyConvertAndValidate<envoy::config::cluster::v3::Filter>(\n                   source_any, ProtobufMessage::getStrictValidationVisitor()),\n               ProtoValidationException);\n}\n\n// MessageUtility::unpackTo() with the wrong type throws.\nTEST_F(ProtobufUtilityTest, UnpackToWrongType) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(42);\n  ProtobufWkt::Any source_any;\n  source_any.PackFrom(source_duration);\n  ProtobufWkt::Timestamp dst;\n  EXPECT_THROW_WITH_REGEX(\n      MessageUtil::unpackTo(source_any, dst), EnvoyException,\n      R\"(Unable to unpack as google.protobuf.Timestamp: \\[type.googleapis.com/google.protobuf.Duration\\] .*)\");\n}\n\n// MessageUtility::unpackTo() with API message works at same version.\nTEST_F(ProtobufUtilityTest, UnpackToSameVersion) {\n  {\n    API_NO_BOOST(envoy::api::v2::Cluster) source;\n    source.set_drain_connections_on_host_removal(true);\n    ProtobufWkt::Any source_any;\n    source_any.PackFrom(source);\n    API_NO_BOOST(envoy::api::v2::Cluster) dst;\n    MessageUtil::unpackTo(source_any, dst);\n    EXPECT_TRUE(dst.drain_connections_on_host_removal());\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) source;\n    source.set_ignore_health_on_host_removal(true);\n    ProtobufWkt::Any source_any;\n    source_any.PackFrom(source);\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n    MessageUtil::unpackTo(source_any, dst);\n    EXPECT_TRUE(dst.ignore_health_on_host_removal());\n  }\n}\n\n// MessageUtility::unpackTo() with API message works across version.\nTEST_F(ProtobufUtilityTest, UnpackToNextVersion) {\n  API_NO_BOOST(envoy::api::v2::Cluster) source;\n  source.set_drain_connections_on_host_removal(true);\n  ProtobufWkt::Any source_any;\n  source_any.PackFrom(source);\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n  MessageUtil::unpackTo(source_any, dst);\n  EXPECT_GT(runtime_deprecated_feature_use_.value(), 0);\n  EXPECT_TRUE(dst.ignore_health_on_host_removal());\n}\n\n// Validate warning messages on v2 upgrades.\nTEST_F(ProtobufUtilityTest, V2UpgradeWarningLogs) {\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n  // First attempt works.\n  EXPECT_LOG_CONTAINS(\"warn\", \"Configuration does not parse cleanly as v3\",\n                      MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\", dst,\n                                                ProtobufMessage::getNullValidationVisitor()));\n  // Second attempt immediately after fails.\n  EXPECT_LOG_NOT_CONTAINS(\"warn\", \"Configuration does not parse cleanly as v3\",\n                          MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\",\n                                                    dst,\n                                                    ProtobufMessage::getNullValidationVisitor()));\n  // Third attempt works, since this is a different log message.\n  EXPECT_LOG_CONTAINS(\"warn\", \"Configuration does not parse cleanly as v3\",\n                      MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: false}\", dst,\n                                                ProtobufMessage::getNullValidationVisitor()));\n  // This is kind of terrible, but it's hard to do dependency injection at onVersionUpgradeWarn().\n  std::this_thread::sleep_for(5s); // NOLINT\n  // We can log the original warning again.\n  EXPECT_LOG_CONTAINS(\"warn\", \"Configuration does not parse cleanly as v3\",\n                      MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\", dst,\n                                                ProtobufMessage::getNullValidationVisitor()));\n}\n\n// MessageUtility::loadFromJson() throws on garbage JSON.\nTEST_F(ProtobufUtilityTest, LoadFromJsonGarbage) {\n  envoy::config::cluster::v3::Cluster dst;\n  EXPECT_THROW_WITH_REGEX(MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true\", dst,\n                                                    ProtobufMessage::getNullValidationVisitor()),\n                          EnvoyException, \"Unable to parse JSON as proto.*after key:value pair.\");\n}\n\n// MessageUtility::loadFromJson() with API message works at same version.\nTEST_F(ProtobufUtilityTest, LoadFromJsonSameVersion) {\n  {\n    API_NO_BOOST(envoy::api::v2::Cluster) dst;\n    MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\", dst,\n                              ProtobufMessage::getNullValidationVisitor());\n    EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n    EXPECT_TRUE(dst.drain_connections_on_host_removal());\n  }\n  {\n    API_NO_BOOST(envoy::api::v2::Cluster) dst;\n    MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\", dst,\n                              ProtobufMessage::getStrictValidationVisitor());\n    EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n    EXPECT_TRUE(dst.drain_connections_on_host_removal());\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n    MessageUtil::loadFromJson(\"{ignore_health_on_host_removal: true}\", dst,\n                              ProtobufMessage::getNullValidationVisitor());\n    EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n    EXPECT_TRUE(dst.ignore_health_on_host_removal());\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n    MessageUtil::loadFromJson(\"{ignore_health_on_host_removal: true}\", dst,\n                              ProtobufMessage::getStrictValidationVisitor());\n    EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n    EXPECT_TRUE(dst.ignore_health_on_host_removal());\n  }\n}\n\n// MessageUtility::loadFromJson() avoids boosting when version specified.\nTEST_F(ProtobufUtilityTest, LoadFromJsonNoBoosting) {\n  envoy::config::cluster::v3::Cluster dst;\n  EXPECT_THROW_WITH_REGEX(\n      MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\", dst,\n                                ProtobufMessage::getStrictValidationVisitor(), false),\n      EnvoyException, \"INVALID_ARGUMENT:drain_connections_on_host_removal: Cannot find field.\");\n}\n\n// MessageUtility::loadFromJson() with API message works across version.\nTEST_F(ProtobufUtilityTest, LoadFromJsonNextVersion) {\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n    MessageUtil::loadFromJson(\"{use_tcp_for_dns_lookups: true}\", dst,\n                              ProtobufMessage::getNullValidationVisitor());\n    EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n    EXPECT_TRUE(dst.use_tcp_for_dns_lookups());\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n    MessageUtil::loadFromJson(\"{use_tcp_for_dns_lookups: true}\", dst,\n                              ProtobufMessage::getStrictValidationVisitor());\n    EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n    EXPECT_TRUE(dst.use_tcp_for_dns_lookups());\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n    MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\", dst,\n                              ProtobufMessage::getNullValidationVisitor());\n    EXPECT_GT(runtime_deprecated_feature_use_.value(), 0);\n    EXPECT_TRUE(dst.ignore_health_on_host_removal());\n  }\n  {\n    API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst;\n    MessageUtil::loadFromJson(\"{drain_connections_on_host_removal: true}\", dst,\n                              ProtobufMessage::getStrictValidationVisitor());\n    EXPECT_GT(runtime_deprecated_feature_use_.value(), 0);\n    EXPECT_TRUE(dst.ignore_health_on_host_removal());\n  }\n}\n\nTEST_F(ProtobufUtilityTest, JsonConvertSuccess) {\n  envoy::config::bootstrap::v3::Bootstrap source;\n  source.set_flags_path(\"foo\");\n  ProtobufWkt::Struct tmp;\n  envoy::config::bootstrap::v3::Bootstrap dest;\n  TestUtility::jsonConvert(source, tmp);\n  TestUtility::jsonConvert(tmp, dest);\n  EXPECT_EQ(\"foo\", dest.flags_path());\n}\n\nTEST_F(ProtobufUtilityTest, JsonConvertUnknownFieldSuccess) {\n  const ProtobufWkt::Struct obj = MessageUtil::keyValueStruct(\"test_key\", \"test_value\");\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  EXPECT_NO_THROW(\n      MessageUtil::jsonConvert(obj, ProtobufMessage::getNullValidationVisitor(), bootstrap));\n}\n\nTEST_F(ProtobufUtilityTest, JsonConvertFail) {\n  ProtobufWkt::Duration source_duration;\n  source_duration.set_seconds(-281474976710656);\n  ProtobufWkt::Struct dest_struct;\n  EXPECT_THROW_WITH_REGEX(TestUtility::jsonConvert(source_duration, dest_struct), EnvoyException,\n                          \"Unable to convert protobuf message to JSON string.*\"\n                          \"seconds exceeds limit for field:  seconds: -281474976710656\\n\");\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/3665.\nTEST_F(ProtobufUtilityTest, JsonConvertCamelSnake) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  // Make sure we use a field eligible for snake/camel case translation.\n  bootstrap.mutable_cluster_manager()->set_local_cluster_name(\"foo\");\n  ProtobufWkt::Struct json;\n  TestUtility::jsonConvert(bootstrap, json);\n  // Verify we can round-trip. This didn't cause the #3665 regression, but useful as a sanity check.\n  TestUtility::loadFromJson(MessageUtil::getJsonStringFromMessage(json, false), bootstrap);\n  // Verify we don't do a camel case conversion.\n  EXPECT_EQ(\"foo\", json.fields()\n                       .at(\"cluster_manager\")\n                       .struct_value()\n                       .fields()\n                       .at(\"local_cluster_name\")\n                       .string_value());\n}\n\n// Test the jsonConvertValue happy path. Failure modes are converted by jsonConvert tests.\nTEST_F(ProtobufUtilityTest, JsonConvertValueSuccess) {\n  {\n    envoy::config::bootstrap::v3::Bootstrap source;\n    source.set_flags_path(\"foo\");\n    ProtobufWkt::Value tmp;\n    envoy::config::bootstrap::v3::Bootstrap dest;\n    MessageUtil::jsonConvertValue(source, tmp);\n    TestUtility::jsonConvert(tmp, dest);\n    EXPECT_EQ(\"foo\", dest.flags_path());\n  }\n\n  {\n    ProtobufWkt::StringValue source;\n    source.set_value(\"foo\");\n    ProtobufWkt::Value dest;\n    MessageUtil::jsonConvertValue(source, dest);\n\n    ProtobufWkt::Value expected;\n    expected.set_string_value(\"foo\");\n    EXPECT_THAT(dest, ProtoEq(expected));\n  }\n}\n\nTEST_F(ProtobufUtilityTest, YamlLoadFromStringFail) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  // Verify loadFromYaml can parse valid YAML string.\n  TestUtility::loadFromYaml(\"node: { id: node1 }\", bootstrap);\n  // Verify loadFromYaml throws error when the input is an invalid YAML string.\n  EXPECT_THROW_WITH_MESSAGE(\n      TestUtility::loadFromYaml(\"not_a_yaml_that_can_be_converted_to_json\", bootstrap),\n      EnvoyException, \"Unable to convert YAML as JSON: not_a_yaml_that_can_be_converted_to_json\");\n  // When wrongly inputted by a file path, loadFromYaml throws an error.\n  EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromYaml(\"/home/configs/config.yaml\", bootstrap),\n                            EnvoyException,\n                            \"Unable to convert YAML as JSON: /home/configs/config.yaml\");\n  // Verify loadFromYaml throws error when the input leads to an Array. This error message is\n  // arguably more useful than only \"Unable to convert YAML as JSON\".\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(\"- node: { id: node1 }\", bootstrap),\n                          EnvoyException,\n                          \"Unable to parse JSON as proto.*Root element must be a message.*\");\n}\n\nTEST_F(ProtobufUtilityTest, GetFlowYamlStringFromMessage) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.set_flags_path(\"foo\");\n  EXPECT_EQ(\"{flags_path: foo}\", MessageUtil::getYamlStringFromMessage(bootstrap, false, false));\n}\n\nTEST_F(ProtobufUtilityTest, GetBlockYamlStringFromMessage) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.set_flags_path(\"foo\");\n  EXPECT_EQ(\"flags_path: foo\", MessageUtil::getYamlStringFromMessage(bootstrap, true, false));\n}\n\nTEST_F(ProtobufUtilityTest, GetBlockYamlStringFromRecursiveMessage) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  bootstrap.set_flags_path(\"foo\");\n  bootstrap.mutable_node();\n  bootstrap.mutable_static_resources()->add_listeners()->set_name(\"http\");\n\n  const std::string expected_yaml = R\"EOF(\nnode:\n  {}\nstatic_resources:\n  listeners:\n    - name: http\nflags_path: foo)EOF\";\n  EXPECT_EQ(expected_yaml, \"\\n\" + MessageUtil::getYamlStringFromMessage(bootstrap, true, false));\n}\n\nTEST(DurationUtilTest, OutOfRange) {\n  {\n    ProtobufWkt::Duration duration;\n    duration.set_seconds(-1);\n    EXPECT_THROW(DurationUtil::durationToMilliseconds(duration), DurationUtil::OutOfRangeException);\n  }\n  {\n    ProtobufWkt::Duration duration;\n    duration.set_nanos(-1);\n    EXPECT_THROW(DurationUtil::durationToMilliseconds(duration), DurationUtil::OutOfRangeException);\n  }\n  {\n    ProtobufWkt::Duration duration;\n    duration.set_nanos(1000000000);\n    EXPECT_THROW(DurationUtil::durationToMilliseconds(duration), DurationUtil::OutOfRangeException);\n  }\n  {\n    ProtobufWkt::Duration duration;\n    duration.set_seconds(Protobuf::util::TimeUtil::kDurationMaxSeconds + 1);\n    EXPECT_THROW(DurationUtil::durationToMilliseconds(duration), DurationUtil::OutOfRangeException);\n  }\n}\n\nclass DeprecatedFieldsTest : public testing::TestWithParam<bool>, protected RuntimeStatsHelper {\nprotected:\n  DeprecatedFieldsTest() : with_upgrade_(GetParam()) {}\n\n  void checkForDeprecation(const Protobuf::Message& message) {\n    if (with_upgrade_) {\n      envoy::test::deprecation_test::UpgradedBase upgraded_message;\n      Config::VersionConverter::upgrade(message, upgraded_message);\n      MessageUtil::checkForUnexpectedFields(upgraded_message,\n                                            ProtobufMessage::getStrictValidationVisitor());\n    } else {\n      MessageUtil::checkForUnexpectedFields(message, ProtobufMessage::getStrictValidationVisitor());\n    }\n  }\n\n  const bool with_upgrade_;\n};\n\nINSTANTIATE_TEST_SUITE_P(Versions, DeprecatedFieldsTest, testing::Values(false, true));\n\nTEST_P(DeprecatedFieldsTest, NoCrashIfRuntimeMissing) {\n  loader_.reset();\n\n  envoy::test::deprecation_test::Base base;\n  base.set_not_deprecated(\"foo\");\n  // Fatal checks for a non-deprecated field should cause no problem.\n  checkForDeprecation(base);\n}\n\nTEST_P(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) {\n  envoy::test::deprecation_test::Base base;\n  base.set_not_deprecated(\"foo\");\n  // Fatal checks for a non-deprecated field should cause no problem.\n  checkForDeprecation(base);\n  EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n  EXPECT_EQ(0, deprecated_feature_seen_since_process_start_.value());\n}\n\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecated)) {\n  envoy::test::deprecation_test::Base base;\n  base.set_is_deprecated(\"foo\");\n  // Non-fatal checks for a deprecated field should log rather than throw an exception.\n  EXPECT_LOG_CONTAINS(\"warning\",\n                      \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'\",\n                      checkForDeprecation(base));\n  EXPECT_EQ(1, runtime_deprecated_feature_use_.value());\n  EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value());\n}\n\n// Use of a deprecated and disallowed field should result in an exception.\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowed)) {\n  envoy::test::deprecation_test::Base base;\n  base.set_is_deprecated_fatal(\"foo\");\n  EXPECT_THROW_WITH_REGEX(\n      checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException,\n      \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'\");\n}\n\nTEST_P(DeprecatedFieldsTest,\n       DEPRECATED_FEATURE_TEST(IndividualFieldDisallowedWithRuntimeOverride)) {\n  envoy::test::deprecation_test::Base base;\n  base.set_is_deprecated_fatal(\"foo\");\n\n  // Make sure this is set up right.\n  EXPECT_THROW_WITH_REGEX(\n      checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException,\n      \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'\");\n  // The config will be rejected, so the feature will not be used.\n  EXPECT_EQ(0, runtime_deprecated_feature_use_.value());\n\n  // Now create a new snapshot with this feature allowed.\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.deprecated_features:envoy.test.deprecation_test.Base.is_deprecated_fatal\",\n        \"True \"}});\n\n  // Now the same deprecation check should only trigger a warning.\n  EXPECT_LOG_CONTAINS(\n      \"warning\",\n      \"Using runtime overrides to continue using now fatal-by-default deprecated option \"\n      \"'envoy.test.deprecation_test.Base.is_deprecated_fatal'\",\n      checkForDeprecation(base));\n  EXPECT_EQ(1, runtime_deprecated_feature_use_.value());\n}\n\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) {\n  envoy::test::deprecation_test::Base base;\n  base.set_is_deprecated(\"foo\");\n\n  EXPECT_LOG_CONTAINS(\"warning\",\n                      \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'\",\n                      checkForDeprecation(base));\n  EXPECT_EQ(1, runtime_deprecated_feature_use_.value());\n\n  // Now create a new snapshot with this feature disallowed.\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.deprecated_features:envoy.test.deprecation_test.Base.is_deprecated\", \" false\"}});\n\n  EXPECT_THROW_WITH_REGEX(\n      checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException,\n      \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'\");\n  EXPECT_EQ(1, runtime_deprecated_feature_use_.value());\n}\n\n// Note that given how Envoy config parsing works, the first time we hit a\n// 'fatal' error and throw, we won't log future warnings. That said, this tests\n// the case of the warning occurring before the fatal error.\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MixOfFatalAndWarnings)) {\n  envoy::test::deprecation_test::Base base;\n  base.set_is_deprecated(\"foo\");\n  base.set_is_deprecated_fatal(\"foo\");\n  EXPECT_LOG_CONTAINS(\n      \"warning\", \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'\", {\n        EXPECT_THROW_WITH_REGEX(\n            checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException,\n            \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'\");\n      });\n}\n\n// Present (unused) deprecated messages should be detected as deprecated.\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MessageDeprecated)) {\n  envoy::test::deprecation_test::Base base;\n  base.mutable_deprecated_message();\n  EXPECT_LOG_CONTAINS(\n      \"warning\", \"Using deprecated option 'envoy.test.deprecation_test.Base.deprecated_message'\",\n      checkForDeprecation(base));\n  EXPECT_EQ(1, runtime_deprecated_feature_use_.value());\n}\n\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(InnerMessageDeprecated)) {\n  envoy::test::deprecation_test::Base base;\n  base.mutable_not_deprecated_message()->set_inner_not_deprecated(\"foo\");\n  // Checks for a non-deprecated field shouldn't trigger warnings\n  EXPECT_LOG_NOT_CONTAINS(\"warning\", \"Using deprecated option\", checkForDeprecation(base));\n\n  base.mutable_not_deprecated_message()->set_inner_deprecated(\"bar\");\n  // Checks for a deprecated sub-message should result in a warning.\n  EXPECT_LOG_CONTAINS(\n      \"warning\",\n      \"Using deprecated option 'envoy.test.deprecation_test.Base.InnerMessage.inner_deprecated'\",\n      checkForDeprecation(base));\n}\n\n// Check that repeated sub-messages get validated.\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(SubMessageDeprecated)) {\n  envoy::test::deprecation_test::Base base;\n  base.add_repeated_message();\n  base.add_repeated_message()->set_inner_deprecated(\"foo\");\n  base.add_repeated_message();\n\n  // Fatal checks for a repeated deprecated sub-message should result in an exception.\n  EXPECT_LOG_CONTAINS(\"warning\",\n                      \"Using deprecated option \"\n                      \"'envoy.test.deprecation_test.Base.InnerMessage.inner_deprecated'\",\n                      checkForDeprecation(base));\n}\n\n// Check that deprecated repeated messages trigger\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RepeatedMessageDeprecated)) {\n  envoy::test::deprecation_test::Base base;\n  base.add_deprecated_repeated_message();\n\n  // Fatal checks for a repeated deprecated sub-message should result in an exception.\n  EXPECT_LOG_CONTAINS(\"warning\",\n                      \"Using deprecated option \"\n                      \"'envoy.test.deprecation_test.Base.deprecated_repeated_message'\",\n                      checkForDeprecation(base));\n}\n\n// Check that deprecated enum values trigger for default values\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecatedDefault)) {\n  envoy::test::deprecation_test::Base base;\n  base.mutable_enum_container();\n\n  EXPECT_LOG_CONTAINS(\n      \"warning\",\n      \"Using the default now-deprecated value DEPRECATED_DEFAULT for enum \"\n      \"'envoy.test.deprecation_test.Base.InnerMessageWithDeprecationEnum.deprecated_enum' from \"\n      \"file deprecated.proto. This enum value will be removed from Envoy soon so a non-default \"\n      \"value must now be explicitly set.\",\n      checkForDeprecation(base));\n}\n\n// Check that deprecated enum values trigger for non-default values\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecated)) {\n  envoy::test::deprecation_test::Base base;\n  base.mutable_enum_container()->set_deprecated_enum(\n      envoy::test::deprecation_test::Base::DEPRECATED_NOT_DEFAULT);\n\n  EXPECT_LOG_CONTAINS(\n      \"warning\",\n      \"Using deprecated value DEPRECATED_NOT_DEFAULT for enum \"\n      \"'envoy.test.deprecation_test.Base.InnerMessageWithDeprecationEnum.deprecated_enum' \"\n      \"from file deprecated.proto. This enum value will be removed from Envoy soon.\",\n      checkForDeprecation(base));\n}\n\n// Make sure the runtime overrides for protos work, by checking the non-fatal to\n// fatal option.\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RuntimeOverrideEnumDefault)) {\n  envoy::test::deprecation_test::Base base;\n  base.mutable_enum_container();\n\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.deprecated_features:envoy.test.deprecation_test.Base.DEPRECATED_DEFAULT\", \"false\"}});\n\n  // Make sure this is set up right.\n  EXPECT_THROW_WITH_REGEX(checkForDeprecation(base),\n                          Envoy::ProtobufMessage::DeprecatedProtoFieldException,\n                          \"Using the default now-deprecated value DEPRECATED_DEFAULT\");\n}\n\n// Make sure the runtime overrides for allowing fatal enums work.\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnum)) {\n  envoy::test::deprecation_test::Base base;\n  base.mutable_enum_container()->set_deprecated_enum(\n      envoy::test::deprecation_test::Base::DEPRECATED_FATAL);\n  EXPECT_THROW_WITH_REGEX(checkForDeprecation(base),\n                          Envoy::ProtobufMessage::DeprecatedProtoFieldException,\n                          \"Using deprecated value DEPRECATED_FATAL\");\n\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.deprecated_features:envoy.test.deprecation_test.Base.DEPRECATED_FATAL\", \"true\"}});\n\n  EXPECT_LOG_CONTAINS(\n      \"warning\",\n      \"Using runtime overrides to continue using now fatal-by-default deprecated value \"\n      \"DEPRECATED_FATAL for enum \"\n      \"'envoy.test.deprecation_test.Base.InnerMessageWithDeprecationEnum.deprecated_enum' \"\n      \"from file deprecated.proto. This enum value will be removed from Envoy soon.\",\n      checkForDeprecation(base));\n}\n\n// Verify that direct use of a hidden_envoy_deprecated field fails, but upgrade\n// succeeds\nTEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(ManualDeprecatedFieldAddition)) {\n  // Create a base message and insert a deprecated field. When upgrading the\n  // deprecated field should be set as deprecated, and a warning should be logged\n  envoy::test::deprecation_test::Base base_should_warn =\n      TestUtility::parseYaml<envoy::test::deprecation_test::Base>(R\"EOF(\n      not_deprecated: field1\n      is_deprecated: hidden_field1\n      not_deprecated_message:\n        inner_not_deprecated: subfield1\n      repeated_message:\n        - inner_not_deprecated: subfield2\n    )EOF\");\n\n  // Non-fatal checks for a deprecated field should log rather than throw an exception.\n  EXPECT_LOG_CONTAINS(\"warning\",\n                      \"Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'\",\n                      checkForDeprecation(base_should_warn));\n  EXPECT_EQ(1, runtime_deprecated_feature_use_.value());\n  EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value());\n\n  // Create an upgraded message and insert a deprecated field. This is a bypass\n  // of the upgrading procedure validation, and should fail\n  envoy::test::deprecation_test::UpgradedBase base_should_fail =\n      TestUtility::parseYaml<envoy::test::deprecation_test::UpgradedBase>(R\"EOF(\n      not_deprecated: field1\n      hidden_envoy_deprecated_is_deprecated: hidden_field1\n      not_deprecated_message:\n        inner_not_deprecated: subfield1\n      repeated_message:\n        - inner_not_deprecated: subfield2\n    )EOF\");\n\n  EXPECT_THROW_WITH_REGEX(\n      MessageUtil::checkForUnexpectedFields(base_should_fail,\n                                            ProtobufMessage::getStrictValidationVisitor()),\n      ProtoValidationException,\n      \"Illegal use of hidden_envoy_deprecated_ V2 field \"\n      \"'envoy.test.deprecation_test.UpgradedBase.hidden_envoy_deprecated_is_deprecated'\");\n  // The config will be rejected, so the feature will not be used.\n  EXPECT_EQ(1, runtime_deprecated_feature_use_.value());\n  EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value());\n}\n\nclass TimestampUtilTest : public testing::Test, public ::testing::WithParamInterface<int64_t> {};\n\nTEST_P(TimestampUtilTest, SystemClockToTimestampTest) {\n  // Generate an input time_point<system_clock>,\n  std::chrono::time_point<std::chrono::system_clock> epoch_time;\n  auto time_original = epoch_time + std::chrono::milliseconds(GetParam());\n\n  // And convert that to Timestamp.\n  ProtobufWkt::Timestamp timestamp;\n  TimestampUtil::systemClockToTimestamp(time_original, timestamp);\n\n  // Then convert that Timestamp back into a time_point<system_clock>,\n  std::chrono::time_point<std::chrono::system_clock> time_reflected =\n      epoch_time +\n      std::chrono::milliseconds(Protobuf::util::TimeUtil::TimestampToMilliseconds(timestamp));\n\n  EXPECT_EQ(time_original, time_reflected);\n}\n\nINSTANTIATE_TEST_SUITE_P(TimestampUtilTestAcrossRange, TimestampUtilTest,\n                         ::testing::Values(-1000 * 60 * 60 * 24 * 7, // week\n                                           -1000 * 60 * 60 * 24,     // day\n                                           -1000 * 60 * 60,          // hour\n                                           -1000 * 60,               // minute\n                                           -1000,                    // second\n                                           -1,                       // millisecond\n                                           0,\n                                           1,                      // millisecond\n                                           1000,                   // second\n                                           1000 * 60,              // minute\n                                           1000 * 60 * 60,         // hour\n                                           1000 * 60 * 60 * 24,    // day\n                                           1000 * 60 * 60 * 24 * 7 // week\n                                           ));\n\nTEST(StatusCode, Strings) {\n  int last_code = static_cast<int>(ProtobufUtil::error::UNAUTHENTICATED);\n  for (int i = 0; i < last_code; ++i) {\n    EXPECT_NE(MessageUtil::CodeEnumToString(static_cast<ProtobufUtil::error::Code>(i)), \"\");\n  }\n  ASSERT_EQ(\"UNKNOWN\",\n            MessageUtil::CodeEnumToString(static_cast<ProtobufUtil::error::Code>(last_code + 1)));\n  ASSERT_EQ(\"OK\", MessageUtil::CodeEnumToString(ProtobufUtil::error::OK));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/protobuf/value_util_corpus/empty",
    "content": ""
  },
  {
    "path": "test/common/protobuf/value_util_corpus/string_value",
    "content": "string_value: \"foo\"\n"
  },
  {
    "path": "test/common/protobuf/value_util_corpus/struct_value",
    "content": "struct_value {\n  fields {\n    key: \"null\"\n    value {\n      null_value: NULL_VALUE\n    }\n  }\n  fields {\n    key: \"number\"\n    value {\n      number_value: 3.14159265358979323\n    }\n  }\n  fields {\n    key: \"string\"\n    value {\n      string_value: \"string\"\n    }\n  }\n  fields {\n    key: \"bool\"\n    value {\n      bool_value: true\n    }\n  }\n  fields {\n    key: \"list\"\n    value {\n      list_value {\n        values {\n          string_value: \"some\"\n        }\n        values {\n          string_value: \"thing\"\n        }\n      }\n    }\n  }\n  fields {\n    key: \"struct\"\n    value {\n      struct_value {\n        fields {\n          key: \"foo\"\n          value {\n            number_value: 42\n          }\n        }\n        fields {\n          key: \"bar\"\n          value {\n            number_value: 37\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/protobuf/value_util_fuzz_test.cc",
    "content": "#include \"common/protobuf/utility.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nDEFINE_PROTO_FUZZER(const ProtobufWkt::Value& input) { ValueUtil::equal(input, input); }\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_binary\",\n    \"envoy_cc_test_library\",\n    \"envoy_directory_genrule\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"config_impl_test\",\n    deps = [\":config_impl_test_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"config_impl_test_lib\",\n    srcs = [\"config_impl_test.cc\"],\n    deps = [\n        \":route_fuzz_proto_cc_proto\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/router:config_lib\",\n        \"//source/common/stream_info:filter_state_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/upstream:retry_priority_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"config_impl_headermap_benchmark_test\",\n    srcs = [\"config_impl_headermap_benchmark_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/router:config_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"header_parser_fuzz_proto\",\n    srcs = [\"header_parser_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"header_parser_fuzz_test\",\n    srcs = [\"header_parser_fuzz_test.cc\"],\n    corpus = \"header_parser_corpus\",\n    deps = [\n        \":header_parser_fuzz_proto_cc_proto\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/router:header_parser_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"reset_header_parser_test\",\n    srcs = [\"reset_header_parser_test.cc\"],\n    deps = [\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/router:reset_header_parser_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"rds_impl_test\",\n    srcs = [\"rds_impl_test.cc\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/router:rds_lib\",\n        \"//source/server/admin:admin_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"scoped_config_impl_test\",\n    srcs = [\"scoped_config_impl_test.cc\"],\n    external_deps = [\n        \"abseil_strings\",\n    ],\n    deps = [\n        \"//source/common/router:scoped_config_lib\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"scoped_rds_test\",\n    srcs = [\"scoped_rds_test.cc\"],\n    external_deps = [\n        \"abseil_strings\",\n    ],\n    deps = [\n        \"//include/envoy/config:subscription_interface\",\n        \"//include/envoy/init:manager_interface\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/router:scoped_rds_lib\",\n        \"//source/server/admin:admin_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"vhds_test\",\n    srcs = [\"vhds_test.cc\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/router:rds_lib\",\n        \"//source/common/router:vhds_lib\",\n        \"//source/server/admin:admin_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"retry_state_impl_test\",\n    srcs = [\"retry_state_impl_test.cc\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/router:reset_header_parser_lib\",\n        \"//source/common/router:retry_state_lib\",\n        \"//source/common/upstream:resource_manager_lib\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"route_fuzz_proto\",\n    srcs = [\"route_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg\",\n    ],\n)\n\n# envoy_cc_test_binary is generating mostly static binary regardless of config\nenvoy_cc_test_binary(\n    name = \"config_impl_test_static\",\n    deps = [\n        \":config_impl_test_lib\",\n        \"//test:main\",\n    ],\n)\n\nsh_binary(\n    name = \"corpus_from_config_impl_sh\",\n    srcs = [\"corpus_from_config_impl.sh\"],\n)\n\nenvoy_directory_genrule(\n    name = \"corpus_from_config_impl\",\n    testonly = 1,\n    srcs = [\n        # This is deliberately in srcs, since we run into host/target confusion\n        # otherwise in oss-fuzz builds.\n        \":config_impl_test_static\",\n    ],\n    cmd = \" \".join([\n        \"$(location corpus_from_config_impl_sh)\",\n        \"$(location //test/common/router:config_impl_test_static)\",\n    ]),\n    tools = [\":corpus_from_config_impl_sh\"],\n)\n\nfilegroup(\n    name = \"route_corpus\",\n    testonly = 1,\n    srcs = select({\n        # TODO(asraa): Clean this up for cross-compilation. Right now we assume\n        # the host and target are the same on x86 builds, so we only execute the\n        # corpus generation binary on x86 platforms.\n        \"//bazel:x86\": [\":corpus_from_config_impl\"],\n        \"//conditions:default\": [],\n    }) + glob([\n        \"route_corpus/**\",\n    ]),\n)\n\nenvoy_cc_fuzz_test(\n    name = \"route_fuzz_test\",\n    srcs = [\"route_fuzz_test.cc\"],\n    corpus = \":route_corpus\",\n    deps = [\n        \":route_fuzz_proto_cc_proto\",\n        \"//source/common/router:config_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"router_ratelimit_test\",\n    srcs = [\"router_ratelimit_test.cc\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/router:config_lib\",\n        \"//source/common/router:router_ratelimit_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"router_test\",\n    srcs = [\"router_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/network:application_protocol_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/router:router_lib\",\n        \"//source/common/stream_info:uint32_accessor_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/upstreams/http/generic:config\",\n        \"//source/extensions/upstreams/http/http:config\",\n        \"//source/extensions/upstreams/http/tcp:config\",\n        \"//test/common/http:common_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/upstreams/http/http/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/upstreams/http/tcp/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"router_upstream_log_test\",\n    srcs = [\"router_upstream_log_test.cc\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/router:router_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/upstreams/http/generic:config\",\n        \"//test/common/http:common_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"shadow_writer_impl_test\",\n    srcs = [\"shadow_writer_impl_test.cc\"],\n    deps = [\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/router:shadow_writer_lib\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"header_formatter_test\",\n    srcs = [\"header_formatter_test.cc\"],\n    deps = [\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/router:header_formatter_lib\",\n        \"//source/common/router:header_parser_lib\",\n        \"//source/common/router:string_accessor_lib\",\n        \"//source/common/stream_info:filter_state_lib\",\n        \"//test/common/stream_info:test_int_accessor_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"string_accessor_impl_test\",\n    srcs = [\"string_accessor_impl_test.cc\"],\n    deps = [\n        \"//source/common/router:string_accessor_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"upstream_request_test\",\n    srcs = [\"upstream_request_test.cc\"],\n    deps = [\n        \"//source/common/router:router_lib\",\n        \"//test/mocks/router:router_filter_interface\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"router_fuzz_proto\",\n    srcs = [\"router_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/router/v3:pkg\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"config_impl_speed_test\",\n    srcs = [\"config_impl_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/router:config_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"config_impl_benchmark_test\",\n    benchmark_binary = \"config_impl_speed_test\",\n)\n"
  },
  {
    "path": "test/common/router/config_impl_headermap_benchmark_test.cc",
    "content": "#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route.pb.validate.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/router/config_impl.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"benchmark/benchmark.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Router {\n\n/**\n * Measure the time it takes to iterate over country route configurations until\n * the default route is taken. This emulates a case where the router has 250\n * different configuration (for 250 countries), and multiple requests that\n * aren't matched are tested against. The test allows the performance comparison\n * of different header map implementations.\n *\n * Note: the benchmark includes the time to setup the config routes and add all\n * the request headers once.\n * */\nstatic void manyCountryRoutesLongHeaders(benchmark::State& state) {\n  // Add a route configuration with multiple route, each has a different\n  // x-country<N> header required to that route.\n  const size_t countries_num = 250;\n  const Http::LowerCaseString country_header_name(\"x-country\");\n  envoy::config::route::v3::RouteConfiguration proto_config;\n  auto main_virtual_host = proto_config.mutable_virtual_hosts()->Add();\n  main_virtual_host->set_name(\"default\");\n  main_virtual_host->mutable_domains()->Add(\"*\");\n  // Add countries routes.\n  std::vector<std::string> countries;\n  for (size_t i = 0; i < countries_num; i++) {\n    auto country_name = absl::StrCat(\"country\", i);\n    countries.push_back(country_name);\n    // Add the country route.\n    auto new_routes = main_virtual_host->mutable_routes()->Add();\n    new_routes->mutable_match()->set_prefix(\"/\");\n    new_routes->mutable_route()->set_cluster(country_name);\n    auto headers_matcher = new_routes->mutable_match()->mutable_headers()->Add();\n    headers_matcher->set_name(country_header_name.get());\n    headers_matcher->set_exact_match(country_name);\n  }\n  // Add the default route.\n  auto new_routes = main_virtual_host->mutable_routes()->Add();\n  new_routes->mutable_match()->set_prefix(\"/\");\n  new_routes->mutable_route()->set_cluster(\"default\");\n\n  // Setup the config parsing.\n  Api::ApiPtr api(Api::createApiForTest());\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context;\n  ON_CALL(factory_context, api()).WillByDefault(ReturnRef(*api));\n  ConfigImpl config(proto_config, factory_context, ProtobufMessage::getNullValidationVisitor(),\n                    true);\n\n  const auto stream_info = NiceMock<Envoy::StreamInfo::MockStreamInfo>();\n  auto req_headers = Http::TestRequestHeaderMapImpl{{\":authority\", \"www.lyft.com\"},\n                                                    {\":path\", \"/\"},\n                                                    {\":method\", \"GET\"},\n                                                    {\"x-forwarded-proto\", \"http\"}};\n  // Add dummy headers to reach ~100 headers (limit per request).\n  for (int i = 0; i < 90; i++) {\n    req_headers.addCopy(Http::LowerCaseString(absl::StrCat(\"dummyheader\", i)), \"some_value\");\n  }\n  req_headers.addReferenceKey(country_header_name, absl::StrCat(\"country\", countries_num));\n  for (auto _ : state) { // NOLINT\n    auto& result = config.route(req_headers, stream_info, 0)->routeEntry()->clusterName();\n    benchmark::DoNotOptimize(result);\n  }\n}\nBENCHMARK(manyCountryRoutesLongHeaders)\n    ->Arg(0)\n    ->Arg(1)\n    ->Arg(5)\n    ->Arg(10)\n    ->Arg(100)\n    ->Arg(1000)\n    ->Arg(5000)\n    ->Arg(10000);\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/config_impl_speed_test.cc",
    "content": "#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route.pb.validate.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/router/config_impl.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"benchmark/benchmark.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nusing envoy::config::route::v3::DirectResponseAction;\nusing envoy::config::route::v3::Route;\nusing envoy::config::route::v3::RouteConfiguration;\nusing envoy::config::route::v3::RouteMatch;\nusing envoy::config::route::v3::VirtualHost;\nusing testing::NiceMock;\nusing testing::ReturnRef;\n\n/**\n * Generates a request with the path:\n * - /shelves/shelf_x/route_x\n */\nstatic Http::TestRequestHeaderMapImpl genRequestHeaders(int route_num) {\n  return Http::TestRequestHeaderMapImpl{\n      {\":authority\", \"www.google.com\"},\n      {\":method\", \"GET\"},\n      {\":path\", absl::StrCat(\"/shelves/shelf_\", route_num, \"/route_\", route_num)},\n      {\"x-forwarded-proto\", \"http\"}};\n}\n\n/**\n * Generates the route config for the type of matcher being tested.\n */\nstatic RouteConfiguration genRouteConfig(benchmark::State& state,\n                                         RouteMatch::PathSpecifierCase match_type) {\n  // Create the base route config.\n  RouteConfiguration route_config;\n  VirtualHost* v_host = route_config.add_virtual_hosts();\n  v_host->set_name(\"default\");\n  v_host->add_domains(\"*\");\n\n  // Create `n` regex routes. The last route will be the only one matched.\n  for (int i = 0; i < state.range(0); ++i) {\n    Route* route = v_host->add_routes();\n    DirectResponseAction* direct_response = route->mutable_direct_response();\n    direct_response->set_status(200);\n    RouteMatch* match = route->mutable_match();\n\n    switch (match_type) {\n    case RouteMatch::PathSpecifierCase::kPrefix: {\n      match->set_prefix(absl::StrCat(\"/shelves/shelf_\", i, \"/\"));\n      break;\n    }\n    case RouteMatch::PathSpecifierCase::kPath: {\n      match->set_prefix(absl::StrCat(\"/shelves/shelf_\", i, \"/route_\", i));\n      break;\n    }\n    case RouteMatch::PathSpecifierCase::kSafeRegex: {\n      envoy::type::matcher::v3::RegexMatcher* regex = match->mutable_safe_regex();\n      regex->mutable_google_re2();\n      regex->set_regex(absl::StrCat(\"^/shelves/[^\\\\\\\\/]+/route_\", i, \"$\"));\n      break;\n    }\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  return route_config;\n}\n\n/**\n * Measure the speed of doing a route match against a route table of varying sizes.\n * Why? Currently, route matching is linear in first-to-win ordering.\n *\n * We construct the first `n - 1` items in the route table so they are not\n * matched by the incoming request. Only the last route will be matched.\n * We then time how long it takes for the request to be matched against the\n * last route.\n */\nstatic void bmRouteTableSize(benchmark::State& state, RouteMatch::PathSpecifierCase match_type) {\n  // Setup router for benchmarking.\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.preserve_query_string_in_path_redirects\", \"false\"}});\n  Api::ApiPtr api = Api::createApiForTest();\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  ON_CALL(factory_context, api()).WillByDefault(ReturnRef(*api));\n\n  // Create router config.\n  ConfigImpl config(genRouteConfig(state, match_type), factory_context,\n                    ProtobufMessage::getNullValidationVisitor(), true);\n\n  for (auto _ : state) { // NOLINT\n    // Do the actual timing here.\n    // Single request that will match the last route in the config.\n    int last_route_num = state.range(0) - 1;\n    config.route(genRequestHeaders(last_route_num), stream_info, 0);\n  }\n}\n\n/**\n * Benchmark a route table with path prefix matchers in the form of:\n * - /shelves/shelf_1/...\n * - /shelves/shelf_2/...\n * - etc.\n */\nstatic void bmRouteTableSizeWithPathPrefixMatch(benchmark::State& state) {\n  bmRouteTableSize(state, RouteMatch::PathSpecifierCase::kPrefix);\n}\n\n/**\n * Benchmark a route table with exact path matchers in the form of:\n * - /shelves/shelf_1/route_1\n * - /shelves/shelf_2/route_2\n * - etc.\n */\nstatic void bmRouteTableSizeWithExactPathMatch(benchmark::State& state) {\n  bmRouteTableSize(state, RouteMatch::PathSpecifierCase::kPath);\n}\n\n/**\n * Benchmark a route table with regex path matchers in the form of:\n * - /shelves/{shelf_id}/route_1\n * - /shelves/{shelf_id}/route_2\n * - etc.\n *\n * This represents common OpenAPI path templating.\n */\nstatic void bmRouteTableSizeWithRegexMatch(benchmark::State& state) {\n  bmRouteTableSize(state, RouteMatch::PathSpecifierCase::kSafeRegex);\n}\n\nBENCHMARK(bmRouteTableSizeWithPathPrefixMatch)->RangeMultiplier(2)->Ranges({{1, 2 << 13}});\nBENCHMARK(bmRouteTableSizeWithExactPathMatch)->RangeMultiplier(2)->Ranges({{1, 2 << 13}});\nBENCHMARK(bmRouteTableSizeWithRegexMatch)->RangeMultiplier(2)->Ranges({{1, 2 << 13}});\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/config_impl_test.cc",
    "content": "#include <chrono>\n#include <fstream>\n#include <list>\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route.pb.validate.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"test/common/router/route_fuzz.pb.h\"\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/upstream/retry_priority.h\"\n#include \"test/mocks/upstream/retry_priority_factory.h\"\n#include \"test/mocks/upstream/test_retry_host_predicate_factory.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ContainerEq;\nusing testing::Eq;\nusing testing::Matcher;\nusing testing::MockFunction;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\n// Wrap ConfigImpl, the target of tests to allow us to regenerate the route_fuzz_test\n// corpus when run with:\n//   bazel run //test/common/router:config_impl_test\n//     --test_env=\"ROUTE_CORPUS_PATH=$PWD/test/common/router/route_corpus\"\nclass TestConfigImpl : public ConfigImpl {\npublic:\n  TestConfigImpl(const envoy::config::route::v3::RouteConfiguration& config,\n                 Server::Configuration::ServerFactoryContext& factory_context,\n                 bool validate_clusters_default)\n      : ConfigImpl(config, factory_context, ProtobufMessage::getNullValidationVisitor(),\n                   validate_clusters_default),\n        config_(config) {}\n\n  void setupRouteConfig(const Http::RequestHeaderMap& headers, uint64_t random_value) const {\n    absl::optional<std::string> corpus_path =\n        TestEnvironment::getOptionalEnvVar(\"GENRULE_OUTPUT_DIR\");\n    if (corpus_path) {\n      static uint32_t n;\n      test::common::router::RouteTestCase route_test_case;\n      route_test_case.mutable_config()->MergeFrom(config_);\n      route_test_case.mutable_headers()->MergeFrom(Fuzz::toHeaders(headers));\n      route_test_case.set_random_value(random_value);\n      const std::string path = fmt::format(\"{}/generated_corpus_{}\", corpus_path.value(), n++);\n      const std::string corpus = route_test_case.DebugString();\n      {\n        std::ofstream corpus_file(path);\n        ENVOY_LOG_MISC(debug, \"Writing {} to {}\", corpus, path);\n        corpus_file << corpus;\n      }\n    }\n  }\n\n  RouteConstSharedPtr route(const Http::RequestHeaderMap& headers,\n                            const Envoy::StreamInfo::StreamInfo& stream_info,\n                            uint64_t random_value) const override {\n\n    setupRouteConfig(headers, random_value);\n    return ConfigImpl::route(headers, stream_info, random_value);\n  }\n\n  RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers,\n                            const StreamInfo::StreamInfo& stream_info,\n                            uint64_t random_value) const override {\n\n    setupRouteConfig(headers, random_value);\n    return ConfigImpl::route(cb, headers, stream_info, random_value);\n  }\n\n  RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers) const {\n    return route(cb, headers, NiceMock<Envoy::StreamInfo::MockStreamInfo>(), 0);\n  }\n\n  RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, uint64_t random_value) const {\n    return route(headers, NiceMock<Envoy::StreamInfo::MockStreamInfo>(), random_value);\n  }\n\n  const envoy::config::route::v3::RouteConfiguration config_;\n};\n\nHttp::TestRequestHeaderMapImpl genPathlessHeaders(const std::string& host,\n                                                  const std::string& method) {\n  return Http::TestRequestHeaderMapImpl{{\":authority\", host},         {\":method\", method},\n                                        {\"x-safe\", \"safe\"},           {\"x-global-nope\", \"global\"},\n                                        {\"x-vhost-nope\", \"vhost\"},    {\"x-route-nope\", \"route\"},\n                                        {\"x-forwarded-proto\", \"http\"}};\n}\n\nHttp::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path,\n                                          const std::string& method,\n                                          const std::string& forwarded_proto) {\n  auto hdrs = Http::TestRequestHeaderMapImpl{\n      {\":authority\", host},        {\":path\", path},\n      {\":method\", method},         {\"x-safe\", \"safe\"},\n      {\"x-global-nope\", \"global\"}, {\"x-vhost-nope\", \"vhost\"},\n      {\"x-route-nope\", \"route\"},   {\"x-forwarded-proto\", forwarded_proto}};\n\n  if (forwarded_proto.empty()) {\n    hdrs.remove(\"x-forwarded-proto\");\n  }\n\n  return hdrs;\n}\n\nHttp::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path,\n                                          const std::string& method) {\n  return genHeaders(host, path, method, \"http\");\n}\n\n// Loads a V3 RouteConfiguration yaml\nenvoy::config::route::v3::RouteConfiguration\nparseRouteConfigurationFromYaml(const std::string& yaml) {\n  envoy::config::route::v3::RouteConfiguration route_config;\n  // Load the file and keep the annotations (in case of an upgrade) to make sure\n  // validate() observes the upgrade\n  TestUtility::loadFromYaml(yaml, route_config, true);\n  TestUtility::validate(route_config);\n  return route_config;\n}\n\nclass ConfigImplTestBase {\nprotected:\n  ConfigImplTestBase() : api_(Api::createApiForTest()) {\n    ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_));\n  }\n\n  std::string virtualHostName(const RouteEntry* route) {\n    Stats::StatName name = route->virtualHost().statName();\n    return factory_context_.scope().symbolTable().toString(name);\n  }\n\n  std::string virtualClusterName(const RouteEntry* route, Http::TestRequestHeaderMapImpl& headers) {\n    Stats::StatName name = route->virtualCluster(headers)->statName();\n    return factory_context_.scope().symbolTable().toString(name);\n  }\n\n  std::string responseHeadersConfig(const bool most_specific_wins, const bool append) const {\n    const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"www.lyft.com\"]\n    response_headers_to_add:\n      - header:\n          key: x-global-header1\n          value: vhost-override\n        append: {1}\n      - header:\n          key: x-vhost-header1\n          value: vhost1-www2\n        append: {1}\n    response_headers_to_remove: [\"x-vhost-remove\"]\n    routes:\n      - match:\n          prefix: \"/new_endpoint\"\n        route:\n          prefix_rewrite: \"/api/new_endpoint\"\n          cluster: www2\n        response_headers_to_add:\n          - header:\n              key: x-route-header\n              value: route-override\n            append: {1}\n          - header:\n              key: x-global-header1\n              value: route-override\n            append: {1}\n          - header:\n              key: x-vhost-header1\n              value: route-override\n            append: {1}\n      - match:\n          path: \"/\"\n        route:\n          cluster: root_www2\n        response_headers_to_add:\n          - header:\n              key: x-route-header\n              value: route-allpath\n            append: {1}\n        response_headers_to_remove: [\"x-route-remove\"]\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: \"www2\"\n  - name: www2_staging\n    domains: [\"www-staging.lyft.net\"]\n    response_headers_to_add:\n      - header:\n          key: x-vhost-header1\n          value: vhost1-www2_staging\n        append: {1}\n    routes:\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: www2_staging\n        response_headers_to_add:\n          - header:\n              key: x-route-header\n              value: route-allprefix\n            append: {1}\n  - name: default\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: \"instant-server\"\ninternal_only_headers: [\"x-lyft-user-id\"]\nresponse_headers_to_add:\n  - header:\n      key: x-global-header1\n      value: global1\n    append: {1}\nresponse_headers_to_remove: [\"x-global-remove\"]\nmost_specific_header_mutations_wins: {0}\n)EOF\";\n\n    return fmt::format(yaml, most_specific_wins, append);\n  }\n\n  std::string requestHeadersConfig(const bool most_specific_wins) {\n    const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"www.lyft.com\"]\n    request_headers_to_add:\n      - header:\n          key: x-global-header\n          value: vhost-www2\n        append: false\n      - header:\n          key: x-vhost-header\n          value: vhost-www2\n        append: false\n    request_headers_to_remove: [\"x-vhost-nope\"]\n    routes:\n      - match:\n          prefix: \"/endpoint\"\n        request_headers_to_add:\n          - header:\n              key: x-global-header\n              value: route-endpoint\n            append: false\n          - header:\n              key: x-vhost-header\n              value: route-endpoint\n            append: false\n          - header:\n              key: x-route-header\n              value: route-endpoint\n            append: false\n        request_headers_to_remove: [\"x-route-nope\"]\n        route:\n          cluster: www2\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: www2\n  - name: default\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: default\nrequest_headers_to_add:\n  - header:\n      key: x-global-header\n      value: global\n    append: false\nrequest_headers_to_remove: [\"x-global-nope\"]\nmost_specific_header_mutations_wins: {0}\n)EOF\";\n\n    return fmt::format(yaml, most_specific_wins);\n  }\n\n  Stats::TestSymbolTable symbol_table_;\n  Api::ApiPtr api_;\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context_;\n  Event::SimulatedTimeSystem test_time_;\n};\n\nclass RouteMatcherTest : public testing::Test, public ConfigImplTestBase {};\n\n// When removing legacy fields this test can be removed.\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: regex\n  domains:\n  - bat.com\n  routes:\n  - match:\n      regex: \"/t[io]c\"\n    route:\n      cluster: clock\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \"/baa+\"\n    route:\n      cluster: sheep\n  - match:\n      regex: \".*/\\\\d{3}$\"\n    route:\n      cluster: three_numbers\n      prefix_rewrite: \"/rewrote\"\n  - match:\n      regex: \".*\"\n    route:\n      cluster: regex_default\n- name: regex2\n  domains:\n  - bat2.com\n  routes:\n  - match:\n      regex: ''\n    route:\n      cluster: nothingness\n  - match:\n      regex: \".*\"\n    route:\n      cluster: regex_default\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: instant-server\n      timeout: 30s\n  virtual_clusters:\n  - pattern: \"^/rides$\"\n    method: POST\n    name: ride_request\n  - pattern: \"^/rides/\\\\d+$\"\n    method: PUT\n    name: update_ride\n  - pattern: \"^/users/\\\\d+/chargeaccounts$\"\n    method: POST\n    name: cc_add\n  - pattern: \"^/users/\\\\d+/chargeaccounts/(?!validate)\\\\w+$\"\n    method: PUT\n    name: cc_add\n  - pattern: \"^/users$\"\n    method: POST\n    name: create_user_login\n  - pattern: \"^/users/\\\\d+$\"\n    method: PUT\n    name: update_user\n  )EOF\";\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // Regular Expression matching\n  EXPECT_EQ(\"clock\",\n            config.route(genHeaders(\"bat.com\", \"/tic\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"clock\",\n            config.route(genHeaders(\"bat.com\", \"/toc\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/tac\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/tick\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/tic/toc\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"sheep\",\n            config.route(genHeaders(\"bat.com\", \"/baa\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\n      \"sheep\",\n      config.route(genHeaders(\"bat.com\", \"/baaaaaaaaaaaa\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/ba\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"nothingness\",\n            config.route(genHeaders(\"bat2.com\", \"\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat2.com\", \"/foo\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat2.com\", \" \", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_TRUE(config.route(genPathlessHeaders(\"bat2.com\", \"GET\"), 0) == nullptr);\n\n  // Regular Expression matching with query string params\n  EXPECT_EQ(\n      \"clock\",\n      config.route(genHeaders(\"bat.com\", \"/tic?tac=true\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\n      \"regex_default\",\n      config.route(genHeaders(\"bat.com\", \"/tac?tic=true\", \"GET\"), 0)->routeEntry()->clusterName());\n\n  // Virtual cluster testing.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides\", \"GET\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides/blah\", \"POST\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides\", \"POST\");\n    EXPECT_EQ(\"ride_request\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides/123\", \"PUT\");\n    EXPECT_EQ(\"update_ride\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides/123/456\", \"POST\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/users/123/chargeaccounts\", \"POST\");\n    EXPECT_EQ(\"cc_add\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/users/123/chargeaccounts/hello123\", \"PUT\");\n    EXPECT_EQ(\"cc_add\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/users/123/chargeaccounts/validate\", \"PUT\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/foo/bar\", \"PUT\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/users\", \"POST\");\n    EXPECT_EQ(\"create_user_login\",\n              virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/users/123\", \"PUT\");\n    EXPECT_EQ(\"update_user\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/something/else\", \"GET\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n}\n\nTEST_F(RouteMatcherTest, TestConnectRoutes) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: connect\n  domains:\n  - bat3.com\n  routes:\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \"foobar\"\n    route:\n      cluster: connect_break\n  - match:\n      connect_matcher:\n        {}\n    route:\n      cluster: connect_match\n      prefix_rewrite: \"/rewrote\"\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \".*\"\n    route:\n      cluster: connect_fallthrough\n- name: connect2\n  domains:\n  - bat4.com\n  routes:\n  - match:\n      connect_matcher:\n        {}\n    redirect: { path_redirect: /new_path }\n- name: connect3\n  domains:\n  - bat5.com\n  routes:\n  - match:\n      connect_matcher:\n        {}\n      headers:\n      - name: x-safe\n        exact_match: \"safe\"\n    route:\n      cluster: connect_header_match\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: instant-server\n      timeout: 30s\n  virtual_clusters:\n  - headers:\n    - name: \":path\"\n      safe_regex_match:\n        google_re2: {}\n        regex: \"^/users/\\\\d+/location$\"\n    - name: \":method\"\n      exact_match: POST\n    name: ulu\n  )EOF\";\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // Connect matching\n  EXPECT_EQ(\"connect_match\",\n            config.route(genHeaders(\"bat3.com\", \" \", \"CONNECT\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\n      \"connect_match\",\n      config.route(genPathlessHeaders(\"bat3.com\", \"CONNECT\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"connect_fallthrough\",\n            config.route(genHeaders(\"bat3.com\", \" \", \"GET\"), 0)->routeEntry()->clusterName());\n\n  // Prefix rewrite for CONNECT with path (for HTTP/2)\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"bat3.com\", \"/api/locations?works=true\", \"CONNECT\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/rewrote?works=true\", headers.get_(Http::Headers::get().Path));\n  }\n  // Prefix rewrite for CONNECT without path (for non-crashing)\n  {\n    Http::TestRequestHeaderMapImpl headers = genPathlessHeaders(\"bat4.com\", \"CONNECT\");\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    ASSERT(redirect != nullptr);\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://bat4.com/new_path\", redirect->newPath(headers));\n  }\n\n  // Header matching (for HTTP/1.1)\n  EXPECT_EQ(\n      \"connect_header_match\",\n      config.route(genPathlessHeaders(\"bat5.com\", \"CONNECT\"), 0)->routeEntry()->clusterName());\n\n  // Header matching (for HTTP/2)\n  EXPECT_EQ(\"connect_header_match\",\n            config.route(genHeaders(\"bat5.com\", \" \", \"CONNECT\"), 0)->routeEntry()->clusterName());\n}\n\nTEST_F(RouteMatcherTest, TestRoutes) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - lyft.com\n  - www.lyft.com\n  - w.lyft.com\n  - ww.lyft.com\n  - wwww.lyft.com\n  routes:\n  - match:\n      prefix: \"/new_endpoint\"\n    route:\n      prefix_rewrite: \"/api/new_endpoint\"\n      cluster: www2\n  - match:\n      prefix: \"/newforreg1_endpoint\"\n    route:\n      regex_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"^/new(.*?)_endpoint(.*)$\"\n        substitution: /\\1_rewritten_endpoint\\2\n      cluster: www2\n  - match:\n      prefix: \"/newforreg2_endpoint\"\n    route:\n      regex_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"e\"\n        substitution: \"X\"\n      cluster: www2\n  - match:\n      path: \"/exact/path/for/regex1\"\n      case_sensitive: true\n    route:\n      cluster: www2\n      regex_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"[aeioe]\"\n        substitution: \"V\"\n  - match:\n      path: \"/\"\n    route:\n      cluster: root_www2\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n- name: www2_staging\n  domains:\n  - www-staging.lyft.net\n  - www-staging-orca.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2_staging\n- name: wildcard\n  domains:\n  - \"*.foo.com\"\n  - \"*-bar.baz.com\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: wildcard\n- name: wildcard2\n  domains:\n  - \"*.baz.com\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: wildcard2\n- name: regex\n  domains:\n  - bat.com\n  routes:\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \"/t[io]c\"\n    route:\n      cluster: clock\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \"/baa+\"\n    route:\n      cluster: sheep\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \".*/\\\\d{3}$\"\n    route:\n      cluster: three_numbers\n      prefix_rewrite: \"/rewrote\"\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \".*/\\\\d{4}$\"\n    route:\n      cluster: four_numbers\n      regex_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"(^.*)/(\\\\d{4})$\"\n        substitution: /four/\\2/endpoint\\1\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \".*\"\n    route:\n      cluster: regex_default\n- name: regex2\n  domains:\n  - bat2.com\n  routes:\n  - match:\n      safe_regex:\n        google_re2: {}\n        regex: \".*\"\n    route:\n      cluster: regex_default\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/api/application_data\"\n    route:\n      cluster: ats\n  - match:\n      path: \"/api/locations\"\n      case_sensitive: false\n    route:\n      cluster: locations\n      prefix_rewrite: \"/rewrote\"\n  - match:\n      prefix: \"/api/leads/me\"\n    route:\n      cluster: ats\n  - match:\n      prefix: \"/host/rewrite/me\"\n    route:\n      cluster: ats\n      host_rewrite_literal: new_host\n  - match:\n      prefix: \"/oldhost/rewrite/me\"\n    route:\n      cluster: ats\n      host_rewrite_literal: new_oldhost\n  - match:\n      path: \"/foo\"\n      case_sensitive: true\n    route:\n      prefix_rewrite: \"/bar\"\n      cluster: instant-server\n  - match:\n      path: \"/tar\"\n      case_sensitive: false\n    route:\n      prefix_rewrite: \"/car\"\n      cluster: instant-server\n  - match:\n      prefix: \"/newhost/rewrite/me\"\n      case_sensitive: false\n    route:\n      cluster: ats\n      host_rewrite_literal: new_host\n  - match:\n      path: \"/FOOD\"\n      case_sensitive: false\n    route:\n      prefix_rewrite: \"/cAndy\"\n      cluster: ats\n  - match:\n      path: \"/ApplEs\"\n      case_sensitive: true\n    route:\n      prefix_rewrite: \"/oranGES\"\n      cluster: instant-server\n  - match:\n      path: \"/rewrite-host-with-header-value\"\n    request_headers_to_add:\n    - header:\n        key: x-rewrite-host\n        value: rewrote\n    route:\n      cluster: ats\n      host_rewrite_header: x-rewrite-host\n  - match:\n      path: \"/do-not-rewrite-host-with-header-value\"\n    route:\n      cluster: ats\n      host_rewrite_header: x-rewrite-host\n  - match:\n      path: \"/rewrite-host-with-path-regex/envoyproxy.io\"\n    route:\n      cluster: ats\n      host_rewrite_path_regex:\n        pattern:\n          google_re2: {}\n          regex: \"^/.+/(.+)$\"\n        substitution: \\1\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: instant-server\n      timeout: 30s\n  virtual_clusters:\n  - headers:\n    - name: \":path\"\n      safe_regex_match:\n        google_re2: {}\n        regex: \"^/rides$\"\n    - name: \":method\"\n      exact_match: POST\n    name: ride_request\n  - headers:\n    - name: \":path\"\n      safe_regex_match:\n        google_re2: {}\n        regex: \"^/rides/\\\\d+$\"\n    - name: \":method\"\n      exact_match: PUT\n    name: update_ride\n  - headers:\n    - name: \":path\"\n      safe_regex_match:\n        google_re2: {}\n        regex: \"^/users/\\\\d+/chargeaccounts$\"\n    - name: \":method\"\n      exact_match: POST\n    name: cc_add\n  - headers:\n    - name: \":path\"\n      safe_regex_match:\n        google_re2: {}\n        regex: \"^/users$\"\n    - name: \":method\"\n      exact_match: POST\n    name: create_user_login\n  - headers:\n    - name: \":path\"\n      safe_regex_match:\n        google_re2: {}\n        regex: \"^/users/\\\\d+$\"\n    - name: \":method\"\n      exact_match: PUT\n    name: update_user\n  - headers:\n    - name: \":path\"\n      safe_regex_match:\n        google_re2: {}\n        regex: \"^/users/\\\\d+/location$\"\n    - name: \":method\"\n      exact_match: POST\n    name: ulu\n  )EOF\";\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // No host header, no x-forwarded-proto and no path header testing.\n  EXPECT_EQ(nullptr,\n            config.route(Http::TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\":method\", \"GET\"}}, 0));\n  EXPECT_EQ(nullptr, config.route(Http::TestRequestHeaderMapImpl{{\":authority\", \"foo\"},\n                                                                 {\":path\", \"/\"},\n                                                                 {\":method\", \"GET\"}},\n                                  0));\n  EXPECT_EQ(nullptr, config.route(Http::TestRequestHeaderMapImpl{{\":authority\", \"foo\"},\n                                                                 {\":method\", \"CONNECT\"},\n                                                                 {\"x-forwarded-proto\", \"http\"}},\n                                  0));\n\n  // Base routing testing.\n  EXPECT_EQ(\"instant-server\",\n            config.route(genHeaders(\"api.lyft.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"ats\", config.route(genHeaders(\"api.lyft.com\", \"/api/leads/me\", \"GET\"), 0)\n                       ->routeEntry()\n                       ->clusterName());\n  EXPECT_EQ(\"ats\", config.route(genHeaders(\"api.lyft.com\", \"/api/application_data\", \"GET\"), 0)\n                       ->routeEntry()\n                       ->clusterName());\n\n  EXPECT_EQ(\"locations\",\n            config.route(genHeaders(\"api.lyft.com\", \"/api/locations?works=true\", \"GET\"), 0)\n                ->routeEntry()\n                ->clusterName());\n  EXPECT_EQ(\"locations\", config.route(genHeaders(\"api.lyft.com\", \"/api/locations\", \"GET\"), 0)\n                             ->routeEntry()\n                             ->clusterName());\n  EXPECT_EQ(\"www2\",\n            config.route(genHeaders(\"lyft.com\", \"/foo\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"root_www2\",\n            config.route(genHeaders(\"wwww.lyft.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"www2\",\n            config.route(genHeaders(\"LYFT.COM\", \"/foo\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"root_www2\",\n            config.route(genHeaders(\"wWww.LyfT.coM\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n\n  // Wildcards\n  EXPECT_EQ(\"wildcard\",\n            config.route(genHeaders(\"www.foo.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\n      \"wildcard\",\n      config.route(genHeaders(\"foo-bar.baz.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"wildcard2\",\n            config.route(genHeaders(\"-bar.baz.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"wildcard2\",\n            config.route(genHeaders(\"bar.baz.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"instant-server\",\n            config.route(genHeaders(\".foo.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"instant-server\",\n            config.route(genHeaders(\"foo.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n\n  // Regular Expression matching\n  EXPECT_EQ(\"clock\",\n            config.route(genHeaders(\"bat.com\", \"/tic\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"clock\",\n            config.route(genHeaders(\"bat.com\", \"/toc\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/tac\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/tick\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/tic/toc\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"sheep\",\n            config.route(genHeaders(\"bat.com\", \"/baa\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\n      \"sheep\",\n      config.route(genHeaders(\"bat.com\", \"/baaaaaaaaaaaa\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat.com\", \"/ba\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat2.com\", \"/foo\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"regex_default\",\n            config.route(genHeaders(\"bat2.com\", \" \", \"GET\"), 0)->routeEntry()->clusterName());\n\n  // Regular Expression matching with query string params\n  EXPECT_EQ(\n      \"clock\",\n      config.route(genHeaders(\"bat.com\", \"/tic?tac=true\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\n      \"regex_default\",\n      config.route(genHeaders(\"bat.com\", \"/tac?tic=true\", \"GET\"), 0)->routeEntry()->clusterName());\n\n  // Timeout testing.\n  EXPECT_EQ(std::chrono::milliseconds(30000),\n            config.route(genHeaders(\"api.lyft.com\", \"/\", \"GET\"), 0)->routeEntry()->timeout());\n  EXPECT_EQ(\n      std::chrono::milliseconds(15000),\n      config.route(genHeaders(\"api.lyft.com\", \"/api/leads/me\", \"GET\"), 0)->routeEntry()->timeout());\n\n  // Prefix rewrite testing.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/new_endpoint/foo\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www2\", route->clusterName());\n    EXPECT_EQ(\"www2\", virtualHostName(route));\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/api/new_endpoint/foo\", headers.get_(Http::Headers::get().Path));\n    EXPECT_EQ(\"/new_endpoint/foo\", headers.get_(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Prefix rewrite testing (x-envoy-* headers suppressed).\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/new_endpoint/foo\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www2\", route->clusterName());\n    EXPECT_EQ(\"www2\", virtualHostName(route));\n    route->finalizeRequestHeaders(headers, stream_info, false);\n    EXPECT_EQ(\"/api/new_endpoint/foo\", headers.get_(Http::Headers::get().Path));\n    EXPECT_FALSE(headers.has(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Prefix rewrite on path match with query string params\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/api/locations?works=true\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/rewrote?works=true\", headers.get_(Http::Headers::get().Path));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/foo\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/bar\", headers.get_(Http::Headers::get().Path));\n  }\n\n  // Regular expression path rewrite after prefix match testing.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/newforreg1_endpoint/foo\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www2\", route->clusterName());\n    EXPECT_EQ(\"www2\", virtualHostName(route));\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/forreg1_rewritten_endpoint/foo\", headers.get_(Http::Headers::get().Path));\n    EXPECT_EQ(\"/newforreg1_endpoint/foo\", headers.get_(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Regular expression path rewrite after prefix match testing, replace every\n  // occurrence, excluding query parameters.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/newforreg2_endpoint/tee?test=me\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www2\", route->clusterName());\n    EXPECT_EQ(\"www2\", virtualHostName(route));\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/nXwforrXg2_Xndpoint/tXX?test=me\", headers.get_(Http::Headers::get().Path));\n    EXPECT_EQ(\"/newforreg2_endpoint/tee?test=me\",\n              headers.get_(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Regular expression path rewrite after exact path match testing.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/exact/path/for/regex1\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www2\", route->clusterName());\n    EXPECT_EQ(\"www2\", virtualHostName(route));\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/VxVct/pVth/fVr/rVgVx1\", headers.get_(Http::Headers::get().Path));\n    EXPECT_EQ(\"/exact/path/for/regex1\", headers.get_(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Regular expression path rewrite after exact path match testing,\n  // with query parameters.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/exact/path/for/regex1?test=aeiou\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www2\", route->clusterName());\n    EXPECT_EQ(\"www2\", virtualHostName(route));\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/VxVct/pVth/fVr/rVgVx1?test=aeiou\", headers.get_(Http::Headers::get().Path));\n    EXPECT_EQ(\"/exact/path/for/regex1?test=aeiou\",\n              headers.get_(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Host rewrite testing.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/host/rewrite/me\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"new_host\", headers.get_(Http::Headers::get().Host));\n  }\n\n  // Rewrites host using supplied header.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/rewrite-host-with-header-value\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"rewrote\", headers.get_(Http::Headers::get().Host));\n  }\n\n  // Does not rewrite host because of missing header.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/do-not-rewrite-host-with-header-value\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"api.lyft.com\", headers.get_(Http::Headers::get().Host));\n  }\n\n  // Rewrites host using path.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/rewrite-host-with-path-regex/envoyproxy.io\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"envoyproxy.io\", headers.get_(Http::Headers::get().Host));\n  }\n\n  // Rewrites host using path, removes query parameters\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\n        \"api.lyft.com\", \"/rewrite-host-with-path-regex/envoyproxy.io?query=query\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"envoyproxy.io\", headers.get_(Http::Headers::get().Host));\n  }\n\n  // Case sensitive rewrite matching test.\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/API/locations?works=true\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/rewrote?works=true\", headers.get_(Http::Headers::get().Path));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/fooD\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/cAndy\", headers.get_(Http::Headers::get().Path));\n  }\n\n  // Case sensitive is set to true and will not rewrite\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/FOO\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/FOO\", headers.get_(Http::Headers::get().Path));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/ApPles\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/ApPles\", headers.get_(Http::Headers::get().Path));\n  }\n\n  // Case insensitive set to false so there is no rewrite\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/oLDhost/rewrite/me\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"api.lyft.com\", headers.get_(Http::Headers::get().Host));\n  }\n\n  // Case sensitive is set to false and will not rewrite\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/Tart\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/Tart\", headers.get_(Http::Headers::get().Path));\n  }\n\n  // Case sensitive is set to false and will not rewrite\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/newhost/rewrite/me\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"new_host\", headers.get_(Http::Headers::get().Host));\n  }\n\n  // Prefix rewrite for regular expression matching\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"bat.com\", \"/647\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/rewrote\", headers.get_(Http::Headers::get().Path));\n  }\n\n  // Prefix rewrite for regular expression matching with query string\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"bat.com\", \"/970?foo=true\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/rewrote?foo=true\", headers.get_(Http::Headers::get().Path));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"bat.com\", \"/foo/bar/238?bar=true\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/rewrote?bar=true\", headers.get_(Http::Headers::get().Path));\n  }\n\n  // Regular expression rewrite for regular expression matching\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"bat.com\", \"/xx/yy/6472\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/four/6472/endpoint/xx/yy\", headers.get_(Http::Headers::get().Path));\n    EXPECT_EQ(\"/xx/yy/6472\", headers.get_(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Regular expression rewrite for regular expression matching, with query parameters.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"bat.com\", \"/xx/yy/6472?test=foo\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"/four/6472/endpoint/xx/yy?test=foo\", headers.get_(Http::Headers::get().Path));\n    EXPECT_EQ(\"/xx/yy/6472?test=foo\", headers.get_(Http::Headers::get().EnvoyOriginalPath));\n  }\n\n  // Virtual cluster testing.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides\", \"GET\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides/blah\", \"POST\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides\", \"POST\");\n    EXPECT_EQ(\"ride_request\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides/123\", \"PUT\");\n    EXPECT_EQ(\"update_ride\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/rides/123/456\", \"POST\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/foo/bar\", \"PUT\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/users\", \"POST\");\n    EXPECT_EQ(\"create_user_login\",\n              virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/users/123\", \"PUT\");\n    EXPECT_EQ(\"update_user\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"api.lyft.com\", \"/users/123/location\", \"POST\");\n    EXPECT_EQ(\"ulu\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/something/else\", \"GET\");\n    EXPECT_EQ(\"other\", virtualClusterName(config.route(headers, 0)->routeEntry(), headers));\n  }\n}\n\nTEST_F(RouteMatcherTest, TestRoutesWithWildcardAndDefaultOnly) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: wildcard\n    domains: [\"*.solo.io\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: \"wildcard\" }\n  - name: default\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: \"default\" }\n  )EOF\";\n\n  const auto proto_config = parseRouteConfigurationFromYaml(yaml);\n  TestConfigImpl config(proto_config, factory_context_, true);\n\n  EXPECT_EQ(\"wildcard\",\n            config.route(genHeaders(\"gloo.solo.io\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"default\",\n            config.route(genHeaders(\"example.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n}\n\n// When deprecating regex: this test can be removed.\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutesWithInvalidRegexLegacy)) {\n  std::string invalid_route = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [\"*\"]\n    routes:\n      - match: { regex: \"/(+invalid)\" }\n        route: { cluster: \"regex\" }\n  )EOF\";\n\n  std::string invalid_virtual_cluster = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: \"regex\" }\n    virtual_clusters:\n      - pattern: \"^/(+invalid)\"\n        name: \"invalid\"\n  )EOF\";\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(invalid_route), factory_context_, true),\n      EnvoyException, \"Invalid regex '/\\\\(\\\\+invalid\\\\)':\");\n\n  EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster),\n                                         factory_context_, true),\n                          EnvoyException, \"Invalid regex '\\\\^/\\\\(\\\\+invalid\\\\)':\");\n}\n\nTEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) {\n  std::string invalid_route = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [\"*\"]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/(+invalid)\"\n        route: { cluster: \"regex\" }\n  )EOF\";\n\n  std::string invalid_virtual_cluster = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: \"regex\" }\n    virtual_clusters:\n      name: \"invalid\"\n      headers:\n        name: \"invalid\"\n        safe_regex_match:\n          google_re2: {}\n          regex: \"^/(+invalid)\"\n  )EOF\";\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(invalid_route), factory_context_, true),\n      EnvoyException, \"no argument for repetition operator:\");\n\n  EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster),\n                                         factory_context_, true),\n                          EnvoyException, \"no argument for repetition operator\");\n}\n\n// Virtual cluster that contains neither pattern nor regex. This must be checked while pattern is\n// deprecated.\nTEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: \"regex\" }\n    virtual_clusters:\n      - name: \"invalid\"\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"virtual clusters must define either 'pattern' or 'headers'\");\n}\n\n// Validates behavior of request_headers_to_add at router, vhost, and route levels.\nTEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - lyft.com\n  - www.lyft.com\n  - w.lyft.com\n  - ww.lyft.com\n  - wwww.lyft.com\n  request_headers_to_add:\n  - header:\n      key: x-global-header1\n      value: vhost-override\n  - header:\n      key: x-vhost-header1\n      value: vhost1-www2\n  routes:\n  - match:\n      prefix: \"/new_endpoint\"\n    route:\n      prefix_rewrite: \"/api/new_endpoint\"\n      cluster: www2\n    request_headers_to_add:\n    - header:\n        key: x-global-header1\n        value: route-override\n    - header:\n        key: x-vhost-header1\n        value: route-override\n    - header:\n        key: x-route-header\n        value: route-new_endpoint\n  - match:\n      path: \"/\"\n    route:\n      cluster: root_www2\n    request_headers_to_add:\n    - header:\n        key: x-route-header\n        value: route-allpath\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n- name: www2_staging\n  domains:\n  - www-staging.lyft.net\n  - www-staging-orca.lyft.com\n  request_headers_to_add:\n  - header:\n      key: x-vhost-header1\n      value: vhost1-www2_staging\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2_staging\n    request_headers_to_add:\n    - header:\n        key: x-route-header\n        value: route-allprefix\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: instant-server\n      timeout: 3s\ninternal_only_headers:\n- x-lyft-user-id\nresponse_headers_to_add:\n- header:\n    key: x-envoy-upstream-canary\n    value: 'true'\nresponse_headers_to_remove:\n- x-envoy-upstream-canary\n- x-envoy-virtual-cluster\nrequest_headers_to_add:\n- header:\n    key: x-global-header1\n    value: global1\n  )EOF\";\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // Request header manipulation testing.\n  {\n    {\n      Http::TestRequestHeaderMapImpl headers =\n          genHeaders(\"www.lyft.com\", \"/new_endpoint/foo\", \"GET\");\n      const RouteEntry* route = config.route(headers, 0)->routeEntry();\n      route->finalizeRequestHeaders(headers, stream_info, true);\n      EXPECT_EQ(\"route-override\", headers.get_(\"x-global-header1\"));\n      EXPECT_EQ(\"route-override\", headers.get_(\"x-vhost-header1\"));\n      EXPECT_EQ(\"route-new_endpoint\", headers.get_(\"x-route-header\"));\n    }\n\n    // Multiple routes can have same route-level headers with different values.\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n      const RouteEntry* route = config.route(headers, 0)->routeEntry();\n      route->finalizeRequestHeaders(headers, stream_info, true);\n      EXPECT_EQ(\"vhost-override\", headers.get_(\"x-global-header1\"));\n      EXPECT_EQ(\"vhost1-www2\", headers.get_(\"x-vhost-header1\"));\n      EXPECT_EQ(\"route-allpath\", headers.get_(\"x-route-header\"));\n    }\n\n    // Multiple virtual hosts can have same virtual host level headers with different values.\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"www-staging.lyft.net\", \"/foo\", \"GET\");\n      const RouteEntry* route = config.route(headers, 0)->routeEntry();\n      route->finalizeRequestHeaders(headers, stream_info, true);\n      EXPECT_EQ(\"global1\", headers.get_(\"x-global-header1\"));\n      EXPECT_EQ(\"vhost1-www2_staging\", headers.get_(\"x-vhost-header1\"));\n      EXPECT_EQ(\"route-allprefix\", headers.get_(\"x-route-header\"));\n    }\n\n    // Global headers.\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"api.lyft.com\", \"/\", \"GET\");\n      const RouteEntry* route = config.route(headers, 0)->routeEntry();\n      route->finalizeRequestHeaders(headers, stream_info, true);\n      EXPECT_EQ(\"global1\", headers.get_(\"x-global-header1\"));\n    }\n  }\n}\n\n// Validates behavior of request_headers_to_add at router, vhost, and route levels when append is\n// disabled.\nTEST_F(RouteMatcherTest, TestRequestHeadersToAddWithAppendFalse) {\n  const std::string yaml = requestHeadersConfig(false);\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  envoy::config::route::v3::RouteConfiguration route_config = parseRouteConfigurationFromYaml(yaml);\n\n  TestConfigImpl config(route_config, factory_context_, true);\n\n  // Request header manipulation testing.\n  {\n    // Global and virtual host override route, route overrides route action.\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/endpoint\", \"GET\");\n      const RouteEntry* route = config.route(headers, 0)->routeEntry();\n      route->finalizeRequestHeaders(headers, stream_info, true);\n      // Added headers.\n      EXPECT_EQ(\"global\", headers.get_(\"x-global-header\"));\n      EXPECT_EQ(\"vhost-www2\", headers.get_(\"x-vhost-header\"));\n      EXPECT_EQ(\"route-endpoint\", headers.get_(\"x-route-header\"));\n      // Removed headers.\n      EXPECT_FALSE(headers.has(\"x-global-nope\"));\n      EXPECT_FALSE(headers.has(\"x-vhost-nope\"));\n      EXPECT_FALSE(headers.has(\"x-route-nope\"));\n    }\n\n    // Global overrides virtual host.\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n      const RouteEntry* route = config.route(headers, 0)->routeEntry();\n      route->finalizeRequestHeaders(headers, stream_info, true);\n      // Added headers.\n      EXPECT_EQ(\"global\", headers.get_(\"x-global-header\"));\n      EXPECT_EQ(\"vhost-www2\", headers.get_(\"x-vhost-header\"));\n      EXPECT_FALSE(headers.has(\"x-route-header\"));\n      // Removed headers.\n      EXPECT_FALSE(headers.has(\"x-global-nope\"));\n      EXPECT_FALSE(headers.has(\"x-vhost-nope\"));\n      EXPECT_TRUE(headers.has(\"x-route-nope\"));\n    }\n\n    // Global only.\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.example.com\", \"/\", \"GET\");\n      const RouteEntry* route = config.route(headers, 0)->routeEntry();\n      route->finalizeRequestHeaders(headers, stream_info, true);\n      // Added headers.\n      EXPECT_EQ(\"global\", headers.get_(\"x-global-header\"));\n      EXPECT_FALSE(headers.has(\"x-vhost-header\"));\n      EXPECT_FALSE(headers.has(\"x-route-header\"));\n      // Removed headers.\n      EXPECT_FALSE(headers.has(\"x-global-nope\"));\n      EXPECT_TRUE(headers.has(\"x-vhost-nope\"));\n      EXPECT_TRUE(headers.has(\"x-route-nope\"));\n    }\n  }\n}\n\nTEST_F(RouteMatcherTest, TestRequestHeadersToAddWithAppendFalseMostSpecificWins) {\n  const std::string yaml = requestHeadersConfig(true);\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // Route overrides vhost and global.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/endpoint\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    // Added headers.\n    EXPECT_EQ(\"route-endpoint\", headers.get_(\"x-global-header\"));\n    EXPECT_EQ(\"route-endpoint\", headers.get_(\"x-vhost-header\"));\n    EXPECT_EQ(\"route-endpoint\", headers.get_(\"x-route-header\"));\n    // Removed headers.\n    EXPECT_FALSE(headers.has(\"x-global-nope\"));\n    EXPECT_FALSE(headers.has(\"x-vhost-nope\"));\n    EXPECT_FALSE(headers.has(\"x-route-nope\"));\n  }\n\n  // Virtual overrides global.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    // Added headers.\n    EXPECT_EQ(\"vhost-www2\", headers.get_(\"x-global-header\"));\n    EXPECT_EQ(\"vhost-www2\", headers.get_(\"x-vhost-header\"));\n    EXPECT_FALSE(headers.has(\"x-route-header\"));\n    // Removed headers.\n    EXPECT_FALSE(headers.has(\"x-global-nope\"));\n    EXPECT_FALSE(headers.has(\"x-vhost-nope\"));\n    EXPECT_TRUE(headers.has(\"x-route-nope\"));\n  }\n}\n\n// Validates behavior of response_headers_to_add and response_headers_to_remove at router, vhost,\n// and route levels.\nTEST_F(RouteMatcherTest, TestAddRemoveResponseHeaders) {\n  const std::string yaml = responseHeadersConfig(false, true);\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // Response header manipulation testing.\n  {\n    {\n      Http::TestRequestHeaderMapImpl req_headers =\n          genHeaders(\"www.lyft.com\", \"/new_endpoint/foo\", \"GET\");\n      const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n      Http::TestResponseHeaderMapImpl headers;\n      route->finalizeResponseHeaders(headers, stream_info);\n      EXPECT_EQ(\"route-override\", headers.get_(\"x-global-header1\"));\n      EXPECT_EQ(\"route-override\", headers.get_(\"x-vhost-header1\"));\n      EXPECT_EQ(\"route-override\", headers.get_(\"x-route-header\"));\n    }\n\n    // Multiple routes can have same route-level headers with different values.\n    {\n      Http::TestRequestHeaderMapImpl req_headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n      const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n      Http::TestResponseHeaderMapImpl headers;\n      route->finalizeResponseHeaders(headers, stream_info);\n      EXPECT_EQ(\"vhost-override\", headers.get_(\"x-global-header1\"));\n      EXPECT_EQ(\"vhost1-www2\", headers.get_(\"x-vhost-header1\"));\n      EXPECT_EQ(\"route-allpath\", headers.get_(\"x-route-header\"));\n    }\n\n    // Multiple virtual hosts can have same virtual host level headers with different values.\n    {\n      Http::TestRequestHeaderMapImpl req_headers =\n          genHeaders(\"www-staging.lyft.net\", \"/foo\", \"GET\");\n      const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n      Http::TestResponseHeaderMapImpl headers;\n      route->finalizeResponseHeaders(headers, stream_info);\n      EXPECT_EQ(\"global1\", headers.get_(\"x-global-header1\"));\n      EXPECT_EQ(\"vhost1-www2_staging\", headers.get_(\"x-vhost-header1\"));\n      EXPECT_EQ(\"route-allprefix\", headers.get_(\"x-route-header\"));\n    }\n\n    // Global headers.\n    {\n      Http::TestRequestHeaderMapImpl req_headers = genHeaders(\"api.lyft.com\", \"/\", \"GET\");\n      const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n      Http::TestResponseHeaderMapImpl headers;\n      route->finalizeResponseHeaders(headers, stream_info);\n      EXPECT_EQ(\"global1\", headers.get_(\"x-global-header1\"));\n    }\n  }\n\n  EXPECT_THAT(std::list<Http::LowerCaseString>{Http::LowerCaseString(\"x-lyft-user-id\")},\n              ContainerEq(config.internalOnlyHeaders()));\n}\n\nTEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendFalse) {\n  const std::string yaml = responseHeadersConfig(false, false);\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  Http::TestRequestHeaderMapImpl req_headers =\n      genHeaders(\"www.lyft.com\", \"/new_endpoint/foo\", \"GET\");\n  const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n  Http::TestResponseHeaderMapImpl headers;\n  route->finalizeResponseHeaders(headers, stream_info);\n  EXPECT_EQ(\"global1\", headers.get_(\"x-global-header1\"));\n  EXPECT_EQ(\"vhost1-www2\", headers.get_(\"x-vhost-header1\"));\n  EXPECT_EQ(\"route-override\", headers.get_(\"x-route-header\"));\n}\n\nTEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendMostSpecificWins) {\n  const std::string yaml = responseHeadersConfig(true, false);\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  Http::TestRequestHeaderMapImpl req_headers =\n      genHeaders(\"www.lyft.com\", \"/new_endpoint/foo\", \"GET\");\n  const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n  Http::TestResponseHeaderMapImpl headers;\n  route->finalizeResponseHeaders(headers, stream_info);\n  EXPECT_EQ(\"route-override\", headers.get_(\"x-global-header1\"));\n  EXPECT_EQ(\"route-override\", headers.get_(\"x-vhost-header1\"));\n  EXPECT_EQ(\"route-override\", headers.get_(\"x-route-header\"));\n}\n\nTEST_F(RouteMatcherTest, TestAddGlobalResponseHeaderRemoveFromRoute) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match:\n          prefix: \"/cacheable\"\n        route:\n          cluster: www2\n        response_headers_to_remove: [\"cache-control\"]\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: \"www2\"\nresponse_headers_to_add:\n  - header:\n      key: cache-control\n      value: private\nmost_specific_header_mutations_wins: true\n)EOF\";\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl req_headers = genHeaders(\"www.lyft.com\", \"/cacheable\", \"GET\");\n    const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n    Http::TestResponseHeaderMapImpl headers;\n    route->finalizeResponseHeaders(headers, stream_info);\n    EXPECT_FALSE(headers.has(\"cache-control\"));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl req_headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    const RouteEntry* route = config.route(req_headers, 0)->routeEntry();\n    Http::TestResponseHeaderMapImpl headers;\n    route->finalizeResponseHeaders(headers, stream_info);\n    EXPECT_EQ(\"private\", headers.get_(\"cache-control\"));\n  }\n}\n\n// Validate that we can't add :-prefixed request headers.\nTEST_F(RouteMatcherTest, TestRequestHeadersToAddNoPseudoHeader) {\n  for (const std::string& header :\n       {\":path\", \":authority\", \":method\", \":scheme\", \":status\", \":protocol\"}) {\n    const std::string yaml = fmt::format(R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"*\"]\n    request_headers_to_add:\n      - header:\n          key: {}\n          value: vhost-www2\n        append: false\n)EOF\",\n                                         header);\n\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n    envoy::config::route::v3::RouteConfiguration route_config =\n        parseRouteConfigurationFromYaml(yaml);\n\n    EXPECT_THROW_WITH_MESSAGE(TestConfigImpl config(route_config, factory_context_, true),\n                              EnvoyException, \":-prefixed headers may not be modified\");\n  }\n}\n\n// Validate that we can't remove :-prefixed request headers.\nTEST_F(RouteMatcherTest, TestRequestHeadersToRemoveNoPseudoHeader) {\n  for (const std::string& header :\n       {\":path\", \":authority\", \":method\", \":scheme\", \":status\", \":protocol\", \"host\"}) {\n    const std::string yaml = fmt::format(R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"*\"]\n    request_headers_to_remove:\n      - {}\n)EOF\",\n                                         header);\n\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n    envoy::config::route::v3::RouteConfiguration route_config =\n        parseRouteConfigurationFromYaml(yaml);\n\n    EXPECT_THROW_WITH_MESSAGE(TestConfigImpl config(route_config, factory_context_, true),\n                              EnvoyException, \":-prefixed or host headers may not be removed\");\n  }\n}\n\nTEST_F(RouteMatcherTest, Priority) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: local_service_grpc\n      priority: high\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster: local_service_grpc\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(Upstream::ResourcePriority::High,\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)->routeEntry()->priority());\n  EXPECT_EQ(Upstream::ResourcePriority::Default,\n            config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)->routeEntry()->priority());\n}\n\nTEST_F(RouteMatcherTest, NoHostRewriteAndAutoRewrite) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: local_service\n      host_rewrite: foo\n      auto_host_rewrite: true\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, NoHostRewriteAndAutoRewriteHeader) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: local_service\n      host_rewrite: foo\n      auto_host_rewrite_header: \"dummy-header\"\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, NoAutoRewriteAndAutoRewriteHeader) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: local_service\n      auto_host_rewrite: true\n      auto_host_rewrite_header: \"dummy-header\"\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, HeaderMatchedRouting) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n      headers:\n      - name: test_header\n        exact_match: test\n    route:\n      cluster: local_service_with_headers\n  - match:\n      prefix: \"/\"\n      headers:\n      - name: test_header_multiple1\n        exact_match: test1\n      - name: test_header_multiple2\n        exact_match: test2\n    route:\n      cluster: local_service_with_multiple_headers\n  - match:\n      prefix: \"/\"\n      headers:\n      - name: test_header_presence\n        present_match: true\n    route:\n      cluster: local_service_with_empty_headers\n  - match:\n      prefix: \"/\"\n      headers:\n      - name: test_header_pattern\n        safe_regex_match:\n          google_re2: {}\n          regex: \"^user=test-\\\\d+$\"\n    route:\n      cluster: local_service_with_header_pattern_set_regex\n  - match:\n      prefix: \"/\"\n      headers:\n      - name: test_header_pattern\n        exact_match: \"^customer=test-\\\\d+$\"\n    route:\n      cluster: local_service_with_header_pattern_unset_regex\n  - match:\n      prefix: \"/\"\n      headers:\n      - name: test_header_range\n        range_match:\n          start: 1\n          end: 10\n    route:\n      cluster: local_service_with_header_range\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: local_service_without_headers\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header\", \"test\");\n    EXPECT_EQ(\"local_service_with_headers\", config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_multiple1\", \"test1\");\n    headers.addCopy(\"test_header_multiple2\", \"test2\");\n    EXPECT_EQ(\"local_service_with_multiple_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"non_existent_header\", \"foo\");\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_presence\", \"test\");\n    EXPECT_EQ(\"local_service_with_empty_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_pattern\", \"user=test-1223\");\n    EXPECT_EQ(\"local_service_with_header_pattern_set_regex\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_pattern\", \"customer=test-1223\");\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_range\", \"9\");\n    EXPECT_EQ(\"local_service_with_header_range\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_range\", \"19\");\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n}\n\n// Verify the fixes for https://github.com/envoyproxy/envoy/issues/2406\n// When removing regex_match this test can be removed entirely.\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidHeaderMatchedRoutingConfigLegacy)) {\n  std::string value_with_regex_chars = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header\n              exact_match: \"(+not a regex)\"\n        route: { cluster: \"local_service\" }\n  )EOF\";\n\n  std::string invalid_regex = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header\n              regex_match: \"(+invalid regex)\"\n        route: { cluster: \"local_service\" }\n  )EOF\";\n\n  EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars),\n                                 factory_context_, true));\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true),\n      EnvoyException, \"Invalid regex\");\n}\n\n// Verify the fixes for https://github.com/envoyproxy/envoy/issues/2406\nTEST_F(RouteMatcherTest, InvalidHeaderMatchedRoutingConfig) {\n  std::string value_with_regex_chars = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header\n              exact_match: \"(+not a regex)\"\n        route: { cluster: \"local_service\" }\n  )EOF\";\n\n  std::string invalid_regex = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header\n              safe_regex_match:\n                google_re2: {}\n                regex: \"(+invalid regex)\"\n        route: { cluster: \"local_service\" }\n  )EOF\";\n\n  EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars),\n                                 factory_context_, true));\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true),\n      EnvoyException, \"no argument for repetition operator\");\n}\n\n// When removing value: simply remove that section of the config and the relevant test.\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(QueryParamMatchedRouting)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n      query_parameters:\n      - name: id\n        value: \"\\\\d+[02468]\"\n        regex: true\n      - name: debug\n    route:\n      cluster: local_service_with_multiple_query_parameters\n  - match:\n      prefix: \"/\"\n      query_parameters:\n      - name: param\n        value: test\n    route:\n      cluster: local_service_with_query_parameter\n  - match:\n      prefix: \"/\"\n      query_parameters:\n      - name: debug\n    route:\n      cluster: local_service_with_valueless_query_parameter\n  - match:\n      prefix: \"/\"\n      query_parameters:\n      - name: debug2\n        present_match: true\n    route:\n      cluster: local_service_with_present_match_query_parameter\n  - match:\n      prefix: \"/\"\n      query_parameters:\n      - name: debug3\n        string_match:\n          exact: foo\n    route:\n      cluster: local_service_with_string_match_query_parameter\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: local_service_without_query_parameters\n\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/\", \"GET\");\n    EXPECT_EQ(\"local_service_without_query_parameters\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/?\", \"GET\");\n    EXPECT_EQ(\"local_service_without_query_parameters\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/?param=testing\", \"GET\");\n    EXPECT_EQ(\"local_service_without_query_parameters\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/?param=test\", \"GET\");\n    EXPECT_EQ(\"local_service_with_query_parameter\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/?debug\", \"GET\");\n    EXPECT_EQ(\"local_service_with_valueless_query_parameter\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/?debug2\", \"GET\");\n    EXPECT_EQ(\"local_service_with_present_match_query_parameter\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/?debug3=foo\", \"GET\");\n    EXPECT_EQ(\"local_service_with_string_match_query_parameter\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"example.com\", \"/?debug=2\", \"GET\");\n    EXPECT_EQ(\"local_service_with_valueless_query_parameter\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"example.com\", \"/?param=test&debug&id=01\", \"GET\");\n    EXPECT_EQ(\"local_service_with_query_parameter\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"example.com\", \"/?param=test&debug&id=02\", \"GET\");\n    EXPECT_EQ(\"local_service_with_multiple_query_parameters\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n}\n\n// When removing value: this test can be removed.\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidQueryParamMatchedRoutingConfig)) {\n  std::string value_with_regex_chars = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          query_parameters:\n            - name: test_param\n              value: \"(+not a regex)\"\n        route: { cluster: \"local_service\" }\n  )EOF\";\n\n  std::string invalid_regex = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          query_parameters:\n            - name: test_param\n              value: \"(+invalid regex)\"\n              regex: true\n        route: { cluster: \"local_service\" }\n  )EOF\";\n\n  EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars),\n                                 factory_context_, true));\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true),\n      EnvoyException, \"Invalid regex\");\n}\n\nclass RouterMatcherHashPolicyTest : public testing::Test, public ConfigImplTestBase {\nprotected:\n  RouterMatcherHashPolicyTest()\n      : add_cookie_nop_(\n            [](const std::string&, const std::string&, std::chrono::seconds) { return \"\"; }) {\n    const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: foo\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster: bar\n  )EOF\";\n    route_config_ = parseRouteConfigurationFromYaml(yaml);\n  }\n\n  envoy::config::route::v3::RouteAction::HashPolicy* firstRouteHashPolicy() {\n    auto hash_policies = route_config_.mutable_virtual_hosts(0)\n                             ->mutable_routes(0)\n                             ->mutable_route()\n                             ->mutable_hash_policy();\n    if (!hash_policies->empty()) {\n      return hash_policies->Mutable(0);\n    } else {\n      return hash_policies->Add();\n    }\n  }\n\n  TestConfigImpl& config() {\n    if (config_ == nullptr) {\n      config_ = std::make_unique<TestConfigImpl>(route_config_, factory_context_, true);\n    }\n    return *config_;\n  }\n\n  envoy::config::route::v3::RouteConfiguration route_config_;\n  Http::HashPolicy::AddCookieCallback add_cookie_nop_;\n\nprivate:\n  std::unique_ptr<TestConfigImpl> config_;\n};\n\nTEST_F(RouterMatcherHashPolicyTest, HashHeaders) {\n  firstRouteHashPolicy()->mutable_header()->set_header_name(\"foo_header\");\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                 nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"foo_header\", \"bar\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_TRUE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/bar\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_EQ(nullptr, route->routeEntry()->hashPolicy());\n  }\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashHeadersRegexSubstitution) {\n  // Apply a regex substitution before hashing.\n  auto* header = firstRouteHashPolicy()->mutable_header();\n  header->set_header_name(\":path\");\n  auto* regex_spec = header->mutable_regex_rewrite();\n  regex_spec->set_substitution(\"\\\\1\");\n  auto* pattern = regex_spec->mutable_pattern();\n  pattern->mutable_google_re2();\n  pattern->set_regex(\"^/(\\\\w+)$\");\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    const auto foo_hash_value = 3728699739546630719;\n    EXPECT_EQ(route->routeEntry()\n                  ->hashPolicy()\n                  ->generateHash(nullptr, headers, add_cookie_nop_, nullptr)\n                  .value(),\n              foo_hash_value);\n  }\n}\n\nclass RouterMatcherCookieHashPolicyTest : public RouterMatcherHashPolicyTest {\npublic:\n  RouterMatcherCookieHashPolicyTest() {\n    firstRouteHashPolicy()->mutable_cookie()->set_name(\"hash\");\n  }\n};\n\nTEST_F(RouterMatcherCookieHashPolicyTest, NoTtl) {\n  {\n    // With no cookie, no hash is generated.\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                 nullptr));\n  }\n  {\n    // With no matching cookie, no hash is generated.\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"choco=late; su=gar\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                 nullptr));\n  }\n  {\n    // Matching cookie produces a valid hash.\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"choco=late; hash=brown\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_TRUE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                nullptr));\n  }\n  {\n    // The hash policy is per-route.\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/bar\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_EQ(nullptr, route->routeEntry()->hashPolicy());\n  }\n}\n\nTEST_F(RouterMatcherCookieHashPolicyTest, DifferentCookies) {\n  // Different cookies produce different hashes.\n  uint64_t hash_1, hash_2;\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"hash=brown\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_1 = route->routeEntry()\n                 ->hashPolicy()\n                 ->generateHash(nullptr, headers, add_cookie_nop_, nullptr)\n                 .value();\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"hash=green\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_2 = route->routeEntry()\n                 ->hashPolicy()\n                 ->generateHash(nullptr, headers, add_cookie_nop_, nullptr)\n                 .value();\n  }\n  EXPECT_NE(hash_1, hash_2);\n}\n\nTEST_F(RouterMatcherCookieHashPolicyTest, TtlSet) {\n  firstRouteHashPolicy()->mutable_cookie()->mutable_ttl()->set_seconds(42);\n\n  MockFunction<std::string(const std::string&, const std::string&, long)> mock_cookie_cb;\n  auto add_cookie = [&mock_cookie_cb](const std::string& name, const std::string& path,\n                                      std::chrono::seconds ttl) -> std::string {\n    return mock_cookie_cb.Call(name, path, ttl.count());\n  };\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_CALL(mock_cookie_cb, Call(\"hash\", \"\", 42));\n    EXPECT_TRUE(\n        route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie, nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"choco=late; su=gar\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_CALL(mock_cookie_cb, Call(\"hash\", \"\", 42));\n    EXPECT_TRUE(\n        route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie, nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"choco=late; hash=brown\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_TRUE(\n        route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie, nullptr));\n  }\n  {\n    uint64_t hash_1, hash_2;\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n      Router::RouteConstSharedPtr route = config().route(headers, 0);\n      EXPECT_CALL(mock_cookie_cb, Call(\"hash\", \"\", 42)).WillOnce(Return(\"AAAAAAA\"));\n      hash_1 = route->routeEntry()\n                   ->hashPolicy()\n                   ->generateHash(nullptr, headers, add_cookie, nullptr)\n                   .value();\n    }\n    {\n      Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n      Router::RouteConstSharedPtr route = config().route(headers, 0);\n      EXPECT_CALL(mock_cookie_cb, Call(\"hash\", \"\", 42)).WillOnce(Return(\"BBBBBBB\"));\n      hash_2 = route->routeEntry()\n                   ->hashPolicy()\n                   ->generateHash(nullptr, headers, add_cookie, nullptr)\n                   .value();\n    }\n    EXPECT_NE(hash_1, hash_2);\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/bar\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_EQ(nullptr, route->routeEntry()->hashPolicy());\n  }\n}\n\nTEST_F(RouterMatcherCookieHashPolicyTest, SetSessionCookie) {\n  firstRouteHashPolicy()->mutable_cookie()->mutable_ttl()->set_seconds(0);\n\n  MockFunction<std::string(const std::string&, const std::string&, long)> mock_cookie_cb;\n  auto add_cookie = [&mock_cookie_cb](const std::string& name, const std::string& path,\n                                      std::chrono::seconds ttl) -> std::string {\n    return mock_cookie_cb.Call(name, path, ttl.count());\n  };\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_CALL(mock_cookie_cb, Call(\"hash\", \"\", 0));\n    EXPECT_TRUE(\n        route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie, nullptr));\n  }\n}\n\nTEST_F(RouterMatcherCookieHashPolicyTest, SetCookiePath) {\n  firstRouteHashPolicy()->mutable_cookie()->mutable_ttl()->set_seconds(0);\n  firstRouteHashPolicy()->mutable_cookie()->set_path(\"/\");\n\n  MockFunction<std::string(const std::string&, const std::string&, long)> mock_cookie_cb;\n  auto add_cookie = [&mock_cookie_cb](const std::string& name, const std::string& path,\n                                      std::chrono::seconds ttl) -> std::string {\n    return mock_cookie_cb.Call(name, path, ttl.count());\n  };\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_CALL(mock_cookie_cb, Call(\"hash\", \"/\", 0));\n    EXPECT_TRUE(\n        route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie, nullptr));\n  }\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashIp) {\n  Network::Address::Ipv4Instance valid_address(\"1.2.3.4\");\n  firstRouteHashPolicy()->mutable_connection_properties()->set_source_ip(true);\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                 nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_TRUE(route->routeEntry()->hashPolicy()->generateHash(&valid_address, headers,\n                                                                add_cookie_nop_, nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    uint64_t old_hash = config()\n                            .route(headers, 0)\n                            ->routeEntry()\n                            ->hashPolicy()\n                            ->generateHash(&valid_address, headers, add_cookie_nop_, nullptr)\n                            .value();\n    headers.addCopy(\"foo_header\", \"bar\");\n    EXPECT_EQ(old_hash, config()\n                            .route(headers, 0)\n                            ->routeEntry()\n                            ->hashPolicy()\n                            ->generateHash(&valid_address, headers, add_cookie_nop_, nullptr)\n                            .value());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/bar\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_EQ(nullptr, route->routeEntry()->hashPolicy());\n  }\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashIpNonIpAddress) {\n  NiceMock<Network::MockIp> bad_ip;\n  NiceMock<Network::MockResolvedAddress> bad_ip_address(\"\", \"\");\n  firstRouteHashPolicy()->mutable_connection_properties()->set_source_ip(true);\n  {\n    ON_CALL(bad_ip_address, ip()).WillByDefault(Return(nullptr));\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(&bad_ip_address, headers,\n                                                                 add_cookie_nop_, nullptr));\n  }\n  {\n    const std::string empty;\n    ON_CALL(bad_ip_address, ip()).WillByDefault(Return(&bad_ip));\n    ON_CALL(bad_ip, addressAsString()).WillByDefault(ReturnRef(empty));\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(&bad_ip_address, headers,\n                                                                 add_cookie_nop_, nullptr));\n  }\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashIpv4DifferentAddresses) {\n  firstRouteHashPolicy()->mutable_connection_properties()->set_source_ip(true);\n  {\n    // Different addresses should produce different hashes.\n    Network::Address::Ipv4Instance first_ip(\"1.2.3.4\");\n    Network::Address::Ipv4Instance second_ip(\"4.3.2.1\");\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    const auto hash_policy = config().route(headers, 0)->routeEntry()->hashPolicy();\n    const uint64_t hash_1 =\n        hash_policy->generateHash(&first_ip, headers, add_cookie_nop_, nullptr).value();\n    const uint64_t hash_2 =\n        hash_policy->generateHash(&second_ip, headers, add_cookie_nop_, nullptr).value();\n    EXPECT_NE(hash_1, hash_2);\n  }\n  {\n    // Same IP addresses but different ports should produce the same hash.\n    Network::Address::Ipv4Instance first_ip(\"1.2.3.4\", 8081);\n    Network::Address::Ipv4Instance second_ip(\"1.2.3.4\", 1331);\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    const auto hash_policy = config().route(headers, 0)->routeEntry()->hashPolicy();\n    const uint64_t hash_1 =\n        hash_policy->generateHash(&first_ip, headers, add_cookie_nop_, nullptr).value();\n    const uint64_t hash_2 =\n        hash_policy->generateHash(&second_ip, headers, add_cookie_nop_, nullptr).value();\n    EXPECT_EQ(hash_1, hash_2);\n  }\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashIpv6DifferentAddresses) {\n  firstRouteHashPolicy()->mutable_connection_properties()->set_source_ip(true);\n  {\n    // Different addresses should produce different hashes.\n    Network::Address::Ipv6Instance first_ip(\"2001:0db8:85a3:0000:0000::\");\n    Network::Address::Ipv6Instance second_ip(\"::1\");\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    const auto hash_policy = config().route(headers, 0)->routeEntry()->hashPolicy();\n    const uint64_t hash_1 =\n        hash_policy->generateHash(&first_ip, headers, add_cookie_nop_, nullptr).value();\n    const uint64_t hash_2 =\n        hash_policy->generateHash(&second_ip, headers, add_cookie_nop_, nullptr).value();\n    EXPECT_NE(hash_1, hash_2);\n  }\n  {\n    // Same IP addresses but different ports should produce the same hash.\n    Network::Address::Ipv6Instance first_ip(\"1:2:3:4:5::\", 8081);\n    Network::Address::Ipv6Instance second_ip(\"1:2:3:4:5::\", 1331);\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    const auto hash_policy = config().route(headers, 0)->routeEntry()->hashPolicy();\n    const uint64_t hash_1 =\n        hash_policy->generateHash(&first_ip, headers, add_cookie_nop_, nullptr).value();\n    const uint64_t hash_2 =\n        hash_policy->generateHash(&second_ip, headers, add_cookie_nop_, nullptr).value();\n    EXPECT_EQ(hash_1, hash_2);\n  }\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashQueryParameters) {\n  firstRouteHashPolicy()->mutable_query_parameter()->set_name(\"param\");\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                 nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo?param=xyz\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_TRUE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/bar?param=xyz\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy());\n  }\n}\n\nclass RouterMatcherFilterStateHashPolicyTest : public RouterMatcherHashPolicyTest {\npublic:\n  RouterMatcherFilterStateHashPolicyTest()\n      : filter_state_(std::make_shared<StreamInfo::FilterStateImpl>(\n            StreamInfo::FilterState::LifeSpan::FilterChain)) {\n\n    filter_state_->setData(\"null-value\", nullptr, StreamInfo::FilterState::StateType::ReadOnly,\n                           StreamInfo::FilterState::LifeSpan::FilterChain);\n    filter_state_->setData(\"nonhashable\", std::make_unique<NonHashable>(),\n                           StreamInfo::FilterState::StateType::ReadOnly,\n                           StreamInfo::FilterState::LifeSpan::FilterChain);\n    filter_state_->setData(\"hashable\", std::make_unique<HashableObj>(),\n                           StreamInfo::FilterState::StateType::ReadOnly,\n                           StreamInfo::FilterState::LifeSpan::FilterChain);\n  }\n  class NonHashable : public StreamInfo::FilterState::Object {};\n  class HashableObj : public StreamInfo::FilterState::Object, public Http::Hashable {\n    absl::optional<uint64_t> hash() const override { return 12345; };\n  };\n\nprotected:\n  StreamInfo::FilterStateSharedPtr filter_state_;\n  Http::TestRequestHeaderMapImpl headers_{genHeaders(\"www.lyft.com\", \"/foo\", \"GET\")};\n};\n\n// No such key.\nTEST_F(RouterMatcherFilterStateHashPolicyTest, KeyNotFound) {\n  firstRouteHashPolicy()->mutable_filter_state()->set_key(\"not-in-filterstate\");\n  Router::RouteConstSharedPtr route = config().route(headers_, 0);\n  EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers_, add_cookie_nop_,\n                                                               filter_state_));\n}\n// Key has no value.\nTEST_F(RouterMatcherFilterStateHashPolicyTest, NullValue) {\n  firstRouteHashPolicy()->mutable_filter_state()->set_key(\"null-value\");\n  Router::RouteConstSharedPtr route = config().route(headers_, 0);\n  EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers_, add_cookie_nop_,\n                                                               filter_state_));\n}\n// Nonhashable.\nTEST_F(RouterMatcherFilterStateHashPolicyTest, ValueNonHashable) {\n  firstRouteHashPolicy()->mutable_filter_state()->set_key(\"nonhashable\");\n  Router::RouteConstSharedPtr route = config().route(headers_, 0);\n  EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers_, add_cookie_nop_,\n                                                               filter_state_));\n}\n// Hashable Key.\nTEST_F(RouterMatcherFilterStateHashPolicyTest, Hashable) {\n  firstRouteHashPolicy()->mutable_filter_state()->set_key(\"hashable\");\n  Router::RouteConstSharedPtr route = config().route(headers_, 0);\n  const auto h = route->routeEntry()->hashPolicy()->generateHash(nullptr, headers_, add_cookie_nop_,\n                                                                 filter_state_);\n  EXPECT_TRUE(h);\n  EXPECT_EQ(h, 12345UL);\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashMultiple) {\n  auto route = route_config_.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_route();\n  route->add_hash_policy()->mutable_header()->set_header_name(\"foo_header\");\n  route->add_hash_policy()->mutable_connection_properties()->set_source_ip(true);\n  Network::Address::Ipv4Instance address(\"4.3.2.1\");\n\n  uint64_t hash_h, hash_ip, hash_both;\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    EXPECT_FALSE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_,\n                                                                 nullptr));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"foo_header\", \"bar\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_h = route->routeEntry()\n                 ->hashPolicy()\n                 ->generateHash(nullptr, headers, add_cookie_nop_, nullptr)\n                 .value();\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_ip = route->routeEntry()\n                  ->hashPolicy()\n                  ->generateHash(&address, headers, add_cookie_nop_, nullptr)\n                  .value();\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    headers.addCopy(\"foo_header\", \"bar\");\n    hash_both = route->routeEntry()\n                    ->hashPolicy()\n                    ->generateHash(&address, headers, add_cookie_nop_, nullptr)\n                    .value();\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    headers.addCopy(\"foo_header\", \"bar\");\n    // stability\n    EXPECT_EQ(hash_both, route->routeEntry()\n                             ->hashPolicy()\n                             ->generateHash(&address, headers, add_cookie_nop_, nullptr)\n                             .value());\n  }\n  EXPECT_NE(hash_ip, hash_h);\n  EXPECT_NE(hash_ip, hash_both);\n  EXPECT_NE(hash_h, hash_both);\n}\n\nTEST_F(RouterMatcherHashPolicyTest, HashTerminal) {\n  // Hash policy list: cookie, header [terminal=true], user_ip.\n  auto route = route_config_.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_route();\n  route->add_hash_policy()->mutable_cookie()->set_name(\"cookie_hash\");\n  auto* header_hash = route->add_hash_policy();\n  header_hash->mutable_header()->set_header_name(\"foo_header\");\n  header_hash->set_terminal(true);\n  route->add_hash_policy()->mutable_connection_properties()->set_source_ip(true);\n  Network::Address::Ipv4Instance address1(\"4.3.2.1\");\n  Network::Address::Ipv4Instance address2(\"1.2.3.4\");\n\n  uint64_t hash_1, hash_2;\n  // Test terminal works when there is hash computed, the rest of the policy\n  // list is ignored.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"cookie_hash=foo;\");\n    headers.addCopy(\"foo_header\", \"bar\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_1 = route->routeEntry()\n                 ->hashPolicy()\n                 ->generateHash(&address1, headers, add_cookie_nop_, nullptr)\n                 .value();\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    headers.addCopy(\"Cookie\", \"cookie_hash=foo;\");\n    headers.addCopy(\"foo_header\", \"bar\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_2 = route->routeEntry()\n                 ->hashPolicy()\n                 ->generateHash(&address2, headers, add_cookie_nop_, nullptr)\n                 .value();\n  }\n  EXPECT_EQ(hash_1, hash_2);\n\n  // If no hash computed after evaluating a hash policy, the rest of the policy\n  // list is evaluated.\n  {\n    // Input: {}, {}, address1. Hash on address1.\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_1 = route->routeEntry()\n                 ->hashPolicy()\n                 ->generateHash(&address1, headers, add_cookie_nop_, nullptr)\n                 .value();\n  }\n  {\n    // Input: {}, {}, address2. Hash on address2.\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config().route(headers, 0);\n    hash_2 = route->routeEntry()\n                 ->hashPolicy()\n                 ->generateHash(&address2, headers, add_cookie_nop_, nullptr)\n                 .value();\n  }\n  EXPECT_NE(hash_1, hash_2);\n}\n\nTEST_F(RouterMatcherHashPolicyTest, InvalidHashPolicies) {\n  {\n    auto hash_policy = firstRouteHashPolicy();\n    EXPECT_EQ(envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::\n                  POLICY_SPECIFIER_NOT_SET,\n              hash_policy->policy_specifier_case());\n    EXPECT_THROW(config(), EnvoyException);\n  }\n  {\n    auto route = route_config_.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_route();\n    route->add_hash_policy()->mutable_header()->set_header_name(\"foo_header\");\n    route->add_hash_policy()->mutable_connection_properties()->set_source_ip(true);\n    auto hash_policy = route->add_hash_policy();\n    EXPECT_EQ(envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::\n                  POLICY_SPECIFIER_NOT_SET,\n              hash_policy->policy_specifier_case());\n    EXPECT_THROW(config(), EnvoyException);\n  }\n}\n\nTEST_F(RouteMatcherTest, ClusterHeader) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster_header: \":authority\"\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster_header: some_header\n      timeout: 0s\n  )EOF\";\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(\n      \"some_cluster\",\n      config.route(genHeaders(\"some_cluster\", \"/foo\", \"GET\"), 0)->routeEntry()->clusterName());\n\n  EXPECT_EQ(\n      \"\", config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)->routeEntry()->clusterName());\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/bar\", \"GET\");\n    headers.addCopy(\"some_header\", \"some_cluster\");\n    Router::RouteConstSharedPtr route = config.route(headers, 0);\n    EXPECT_EQ(\"some_cluster\", route->routeEntry()->clusterName());\n\n    // Make sure things forward and don't crash.\n    // TODO(mattklein123): Make this a real test of behavior.\n    EXPECT_EQ(std::chrono::milliseconds(0), route->routeEntry()->timeout());\n    route->routeEntry()->finalizeRequestHeaders(headers, stream_info, true);\n    route->routeEntry()->priority();\n    route->routeEntry()->rateLimitPolicy();\n    route->routeEntry()->retryPolicy();\n    route->routeEntry()->shadowPolicies();\n    route->routeEntry()->virtualCluster(headers);\n    route->routeEntry()->virtualHost();\n    route->routeEntry()->virtualHost().rateLimitPolicy();\n    route->routeEntry()->pathMatchCriterion();\n    route->routeEntry()->hedgePolicy();\n    route->routeEntry()->maxGrpcTimeout();\n    route->routeEntry()->grpcTimeoutOffset();\n    route->routeEntry()->upgradeMap();\n    route->routeEntry()->internalRedirectPolicy();\n  }\n}\n\nTEST_F(RouteMatcherTest, ContentType) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n      headers:\n      - name: content-type\n        exact_match: application/grpc\n    route:\n      cluster: local_service_grpc\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: local_service\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    EXPECT_EQ(\"local_service\",\n              config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"content-type\", \"application/grpc\");\n    EXPECT_EQ(\"local_service_grpc\", config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"content-type\", \"foo\");\n    EXPECT_EQ(\"local_service\", config.route(headers, 0)->routeEntry()->clusterName());\n  }\n}\n\nTEST_F(RouteMatcherTest, DurationTimeouts) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: local_service_grpc\n  - match:\n      prefix: \"/\"\n    route:\n      max_stream_duration:\n        max_stream_duration: 0.01s\n        grpc_timeout_header_max: 0.02s\n        grpc_timeout_header_offset: 0.03s\n      cluster: local_service_grpc\n      )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    auto entry = config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)->routeEntry();\n    EXPECT_EQ(std::chrono::milliseconds(10), entry->maxStreamDuration());\n    EXPECT_EQ(std::chrono::milliseconds(20), entry->grpcTimeoutHeaderMax());\n    EXPECT_EQ(std::chrono::milliseconds(30), entry->grpcTimeoutHeaderOffset());\n  }\n}\n\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(GrpcTimeoutOffset)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: local_service_grpc\n  - match:\n      prefix: \"/\"\n    route:\n      grpc_timeout_offset: 0.01s\n      cluster: local_service_grpc\n      )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    EXPECT_EQ(\n        absl::make_optional(std::chrono::milliseconds(10)),\n        config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)->routeEntry()->grpcTimeoutOffset());\n  }\n  EXPECT_EQ(absl::nullopt, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                               ->routeEntry()\n                               ->grpcTimeoutOffset());\n}\n\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(GrpcTimeoutOffsetOfDynamicRoute)) {\n  // A DynamicRouteEntry will be created when 'cluster_header' is set.\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: local_service_grpc\n      max_grpc_timeout: 0.1s\n      grpc_timeout_offset: 0.01s\n  - match:\n      prefix: \"/\"\n    route:\n      max_grpc_timeout: 0.2s\n      grpc_timeout_offset: 0.02s\n      cluster_header: request_to\n      )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl reqeust_headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    reqeust_headers.addCopy(Http::LowerCaseString(\"reqeust_to\"), \"dynamic_grpc_service\");\n    EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(20)),\n              config.route(reqeust_headers, 0)->routeEntry()->grpcTimeoutOffset());\n    EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(200)),\n              config.route(reqeust_headers, 0)->routeEntry()->maxGrpcTimeout());\n  }\n  {\n\n    EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(10)),\n              config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                  ->routeEntry()\n                  ->grpcTimeoutOffset());\n    EXPECT_EQ(\n        absl::make_optional(std::chrono::milliseconds(100)),\n        config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)->routeEntry()->maxGrpcTimeout());\n  }\n}\n\nTEST_F(RouteMatcherTest, FractionalRuntime) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"www2\"\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          runtime_fraction:\n            default_value:\n              numerator: 50\n              denominator: MILLION\n            runtime_key: \"bogus_key\"\n        route:\n          cluster: \"something_else\"\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: \"www2\"\n  )EOF\";\n\n  Runtime::MockSnapshot snapshot;\n  ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot));\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false);\n\n  EXPECT_CALL(snapshot, featureEnabled(\"bogus_key\",\n                                       Matcher<const envoy::type::v3::FractionalPercent&>(_), 41))\n      .WillRepeatedly(Return(true));\n  EXPECT_EQ(\n      \"something_else\",\n      config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 41)->routeEntry()->clusterName());\n\n  EXPECT_CALL(snapshot, featureEnabled(\"bogus_key\",\n                                       Matcher<const envoy::type::v3::FractionalPercent&>(_), 43))\n      .WillRepeatedly(Return(false));\n  EXPECT_EQ(\n      \"www2\",\n      config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 43)->routeEntry()->clusterName());\n}\n\nTEST_F(RouteMatcherTest, ShadowClusterNotFound) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      request_mirror_policy:\n        cluster: some_cluster\n      cluster: www2\n  )EOF\";\n\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"www2\")))\n      .WillRepeatedly(Return(&factory_context_.cluster_manager_.thread_local_cluster_));\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"some_cluster\")))\n      .WillRepeatedly(Return(nullptr));\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, ClusterNotFound) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2\n  )EOF\";\n\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(nullptr));\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, ClusterNotFoundNotChecking) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2\n  )EOF\";\n\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(nullptr));\n\n  TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, false);\n}\n\nTEST_F(RouteMatcherTest, ClusterNotFoundNotCheckingViaConfig) {\n  const std::string yaml = R\"EOF(\nvalidate_clusters: false\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www\n  )EOF\";\n\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(nullptr));\n\n  TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n}\n\nTEST_F(RouteMatcherTest, AttemptCountHeader) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"www2\"\n    domains: [\"www.lyft.com\"]\n    include_request_attempt_count: true\n    include_attempt_count_in_response: true\n    routes:\n      - match: { prefix: \"/\"}\n        route:\n          cluster: \"whatever\"\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_TRUE(config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                  ->routeEntry()\n                  ->includeAttemptCountInRequest());\n\n  EXPECT_TRUE(config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                  ->routeEntry()\n                  ->includeAttemptCountInResponse());\n}\n\nTEST_F(RouteMatcherTest, ClusterNotFoundResponseCode) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"www2\"\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\"}\n        route:\n          cluster: \"not_found\"\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false);\n\n  Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n\n  EXPECT_EQ(\"not_found\", config.route(headers, 0)->routeEntry()->clusterName());\n  EXPECT_EQ(Http::Code::ServiceUnavailable,\n            config.route(headers, 0)->routeEntry()->clusterNotFoundResponseCode());\n}\n\nTEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig503) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"www2\"\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\"}\n        route:\n          cluster: \"not_found\"\n          cluster_not_found_response_code: SERVICE_UNAVAILABLE\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false);\n\n  Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n\n  EXPECT_EQ(\"not_found\", config.route(headers, 0)->routeEntry()->clusterName());\n  EXPECT_EQ(Http::Code::ServiceUnavailable,\n            config.route(headers, 0)->routeEntry()->clusterNotFoundResponseCode());\n}\n\nTEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig404) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"www2\"\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\"}\n        route:\n          cluster: \"not_found\"\n          cluster_not_found_response_code: NOT_FOUND\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false);\n\n  Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n\n  EXPECT_EQ(\"not_found\", config.route(headers, 0)->routeEntry()->clusterName());\n  EXPECT_EQ(Http::Code::NotFound,\n            config.route(headers, 0)->routeEntry()->clusterNotFoundResponseCode());\n}\n\n// TODO(dereka) DEPRECATED_FEATURE_TEST can be removed when `request_mirror_policy` is removed.\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      request_mirror_policy:\n        cluster: some_cluster\n      cluster: www2\n  - match:\n      prefix: \"/bar\"\n    route:\n      request_mirror_policy:\n        cluster: some_cluster2\n        runtime_fraction:\n          default_value:\n            numerator: 20\n            denominator: HUNDRED\n          runtime_key: foo\n      cluster: www2\n  - match:\n      prefix: \"/baz\"\n    route:\n      cluster: www2\n  - match:\n      prefix: \"/boz\"\n    route:\n      request_mirror_policies:\n        - cluster: some_cluster\n        - cluster: some_cluster2\n          runtime_fraction:\n            default_value:\n              numerator: 20\n              denominator: HUNDRED\n            runtime_key: foo\n      cluster: www2\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  const auto& foo_shadow_policies =\n      config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)->routeEntry()->shadowPolicies();\n  EXPECT_EQ(1, foo_shadow_policies.size());\n  EXPECT_EQ(\"some_cluster\", foo_shadow_policies[0]->cluster());\n  EXPECT_EQ(\"\", foo_shadow_policies[0]->runtimeKey());\n\n  const auto& bar_shadow_policies =\n      config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)->routeEntry()->shadowPolicies();\n  EXPECT_EQ(1, bar_shadow_policies.size());\n  EXPECT_EQ(\"some_cluster2\", bar_shadow_policies[0]->cluster());\n  EXPECT_EQ(\"foo\", bar_shadow_policies[0]->runtimeKey());\n\n  EXPECT_EQ(0, config.route(genHeaders(\"www.lyft.com\", \"/baz\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->shadowPolicies()\n                   .size());\n\n  const auto& boz_shadow_policies =\n      config.route(genHeaders(\"www.lyft.com\", \"/boz\", \"GET\"), 0)->routeEntry()->shadowPolicies();\n  EXPECT_EQ(2, boz_shadow_policies.size());\n  EXPECT_EQ(\"some_cluster\", boz_shadow_policies[0]->cluster());\n  EXPECT_EQ(\"\", boz_shadow_policies[0]->runtimeKey());\n  EXPECT_EQ(\"some_cluster2\", boz_shadow_policies[1]->cluster());\n  EXPECT_EQ(\"foo\", boz_shadow_policies[1]->runtimeKey());\n}\n\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(ShadowPolicyAndPolicies)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      request_mirror_policy:\n        cluster: some_cluster\n      request_mirror_policies:\n      - cluster: some_other_cluster\n      cluster: www2\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Cannot specify both request_mirror_policy and request_mirror_policies\");\n}\n\nclass RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {};\n\n// When removing runtime_key: this test can be removed.\nTEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RequestMirrorPolicy)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: mirror\n    domains: [mirror.lyft.com]\n    routes:\n      - match: { prefix: \"/\"}\n        route:\n          cluster: foo\n          request_mirror_policy:\n            cluster: foo_mirror\n            runtime_key: will_be_ignored\n            runtime_fraction:\n               default_value:\n                 numerator: 20\n                 denominator: HUNDRED\n               runtime_key: mirror_key\n\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(\"foo_mirror\", config.route(genHeaders(\"mirror.lyft.com\", \"/foo\", \"GET\"), 0)\n                              ->routeEntry()\n                              ->shadowPolicies()[0]\n                              ->cluster());\n\n  // `runtime_fraction` takes precedence over the deprecated `runtime_key` field.\n  EXPECT_EQ(\"mirror_key\", config.route(genHeaders(\"mirror.lyft.com\", \"/foo\", \"GET\"), 0)\n                              ->routeEntry()\n                              ->shadowPolicies()[0]\n                              ->runtimeKey());\n\n  const auto& default_value = config.route(genHeaders(\"mirror.lyft.com\", \"/foo\", \"GET\"), 0)\n                                  ->routeEntry()\n                                  ->shadowPolicies()[0]\n                                  ->defaultValue();\n  EXPECT_EQ(20, default_value.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED, default_value.denominator());\n}\n\nTEST_F(RouteMatcherTest, Retry) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2\n      retry_policy:\n        retry_on: connect-failure\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster: www2\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n      retry_policy:\n        per_try_timeout: 1s\n        num_retries: 3\n        retry_on: 5xx,gateway-error,connect-failure,reset\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(std::chrono::milliseconds(0),\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(1U, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .numRetries());\n  EXPECT_EQ(RetryPolicy::RETRY_ON_CONNECT_FAILURE,\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .retryOn());\n\n  EXPECT_EQ(std::chrono::milliseconds(0),\n            config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(1, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->retryPolicy()\n                   .numRetries());\n  EXPECT_EQ(0U, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .retryOn());\n\n  EXPECT_EQ(std::chrono::milliseconds(1000),\n            config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(3U, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .numRetries());\n  EXPECT_EQ(RetryPolicy::RETRY_ON_CONNECT_FAILURE | RetryPolicy::RETRY_ON_5XX |\n                RetryPolicy::RETRY_ON_GATEWAY_ERROR | RetryPolicy::RETRY_ON_RESET,\n            config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .retryOn());\n}\n\nTEST_F(RouteMatcherTest, RetryVirtualHostLevel) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- domains: [www.lyft.com]\n  per_request_buffer_limit_bytes: 8\n  name: www\n  retry_policy: {num_retries: 3, per_try_timeout: 1s, retry_on: '5xx,gateway-error,connect-failure,reset'}\n  routes:\n  - match: {prefix: /foo}\n    per_request_buffer_limit_bytes: 7\n    route:\n      cluster: www\n      retry_policy: {retry_on: connect-failure}\n  - match: {prefix: /bar}\n    route: {cluster: www}\n  - match: {prefix: /}\n    route: {cluster: www}\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // Route level retry policy takes precedence.\n  EXPECT_EQ(std::chrono::milliseconds(0),\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(1U, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .numRetries());\n  EXPECT_EQ(RetryPolicy::RETRY_ON_CONNECT_FAILURE,\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .retryOn());\n  EXPECT_EQ(7U, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryShadowBufferLimit());\n\n  // Virtual Host level retry policy kicks in.\n  EXPECT_EQ(std::chrono::milliseconds(1000),\n            config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(3U, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .numRetries());\n  EXPECT_EQ(RetryPolicy::RETRY_ON_CONNECT_FAILURE | RetryPolicy::RETRY_ON_5XX |\n                RetryPolicy::RETRY_ON_GATEWAY_ERROR | RetryPolicy::RETRY_ON_RESET,\n            config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .retryOn());\n  EXPECT_EQ(std::chrono::milliseconds(1000),\n            config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(3U, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .numRetries());\n  EXPECT_EQ(RetryPolicy::RETRY_ON_CONNECT_FAILURE | RetryPolicy::RETRY_ON_5XX |\n                RetryPolicy::RETRY_ON_GATEWAY_ERROR | RetryPolicy::RETRY_ON_RESET,\n            config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .retryOn());\n  EXPECT_EQ(8U, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryShadowBufferLimit());\n}\n\nTEST_F(RouteMatcherTest, GrpcRetry) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2\n      retry_policy:\n        retry_on: connect-failure\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster: www2\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n      retry_policy:\n        per_try_timeout: 1s\n        num_retries: 3\n        retry_on: 5xx,deadline-exceeded,resource-exhausted\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(std::chrono::milliseconds(0),\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(1U, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .numRetries());\n  EXPECT_EQ(RetryPolicy::RETRY_ON_CONNECT_FAILURE,\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .retryOn());\n\n  EXPECT_EQ(std::chrono::milliseconds(0),\n            config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(1, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->retryPolicy()\n                   .numRetries());\n  EXPECT_EQ(0U, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .retryOn());\n\n  EXPECT_EQ(std::chrono::milliseconds(1000),\n            config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .perTryTimeout());\n  EXPECT_EQ(3U, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                    ->routeEntry()\n                    ->retryPolicy()\n                    .numRetries());\n  EXPECT_EQ(RetryPolicy::RETRY_ON_5XX | RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED |\n                RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED,\n            config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .retryOn());\n}\n\n// Test route-specific retry back-off intervals.\nTEST_F(RouteMatcherTest, RetryBackOffIntervals) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2\n      retry_policy:\n        retry_back_off:\n          base_interval: 0.050s\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster: www2\n      retry_policy:\n        retry_back_off:\n          base_interval: 0.100s\n          max_interval: 0.500s\n  - match:\n      prefix: \"/baz\"\n    route:\n      cluster: www2\n      retry_policy:\n        retry_back_off:\n          base_interval: 0.0001s # < 1 ms\n          max_interval: 0.0001s\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n      retry_policy:\n        retry_on: connect-failure\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(50),\n            config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .baseInterval());\n\n  EXPECT_EQ(absl::nullopt, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                               ->routeEntry()\n                               ->retryPolicy()\n                               .maxInterval());\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(100),\n            config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .baseInterval());\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(500),\n            config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .maxInterval());\n\n  // Sub-millisecond interval converted to 1 ms.\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(1),\n            config.route(genHeaders(\"www.lyft.com\", \"/baz\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .baseInterval());\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(1),\n            config.route(genHeaders(\"www.lyft.com\", \"/baz\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .maxInterval());\n\n  EXPECT_EQ(absl::nullopt, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                               ->routeEntry()\n                               ->retryPolicy()\n                               .baseInterval());\n\n  EXPECT_EQ(absl::nullopt, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                               ->routeEntry()\n                               ->retryPolicy()\n                               .maxInterval());\n}\n\n// Test invalid route-specific retry back-off configs.\nTEST_F(RouteMatcherTest, InvalidRetryBackOff) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: backoff\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          cluster: backoff\n          retry_policy:\n            retry_back_off:\n              base_interval: 10s\n              max_interval: 5s\n)EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"retry_policy.max_interval must greater than or equal to the base_interval\");\n}\n\nTEST_F(RouteMatcherTest, RateLimitedRetryBackOff) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/no-backoff\"\n    route:\n      cluster: www\n  - match:\n      prefix: \"/sub-ms-interval\"\n    route:\n      cluster: www\n      retry_policy:\n        rate_limited_retry_back_off:\n          reset_headers:\n          - name: Retry-After\n            format: SECONDS\n          max_interval: 0.0001s # < 1 ms\n  - match:\n      prefix: \"/typical-backoff\"\n    route:\n      cluster: www\n      retry_policy:\n        rate_limited_retry_back_off:\n          reset_headers:\n          - name: Retry-After\n            format: SECONDS\n          - name: RateLimit-Reset\n            format: UNIX_TIMESTAMP\n          max_interval: 0.050s\n  )EOF\";\n\n  const time_t known_date_time = 1000000000;\n  test_time_.setSystemTime(std::chrono::system_clock::from_time_t(known_date_time));\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // has no ratelimit retry back off\n  EXPECT_EQ(true, config.route(genHeaders(\"www.lyft.com\", \"/no-backoff\", \"GET\"), 0)\n                      ->routeEntry()\n                      ->retryPolicy()\n                      .resetHeaders()\n                      .empty());\n  EXPECT_EQ(std::chrono::milliseconds(300000),\n            config.route(genHeaders(\"www.lyft.com\", \"/no-backoff\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .resetMaxInterval());\n\n  // has sub millisecond interval\n  EXPECT_EQ(1, config.route(genHeaders(\"www.lyft.com\", \"/sub-ms-interval\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->retryPolicy()\n                   .resetHeaders()\n                   .size());\n  EXPECT_EQ(std::chrono::milliseconds(1),\n            config.route(genHeaders(\"www.lyft.com\", \"/sub-ms-interval\", \"GET\"), 0)\n                ->routeEntry()\n                ->retryPolicy()\n                .resetMaxInterval());\n\n  // a typical configuration\n  Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/typical-backoff\", \"GET\");\n  const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy();\n  EXPECT_EQ(2, retry_policy.resetHeaders().size());\n\n  Http::TestResponseHeaderMapImpl expected_0{{\"Retry-After\", \"2\"}};\n  Http::TestResponseHeaderMapImpl expected_1{{\"RateLimit-Reset\", \"1000000005\"}};\n\n  EXPECT_EQ(std::chrono::milliseconds(2000),\n            retry_policy.resetHeaders()[0]->parseInterval(test_time_.timeSystem(), expected_0));\n  EXPECT_EQ(std::chrono::milliseconds(5000),\n            retry_policy.resetHeaders()[1]->parseInterval(test_time_.timeSystem(), expected_1));\n\n  EXPECT_EQ(std::chrono::milliseconds(50), retry_policy.resetMaxInterval());\n}\n\nTEST_F(RouteMatcherTest, HedgeRouteLevel) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- domains: [www.lyft.com]\n  name: www\n  routes:\n  - match: {prefix: /foo}\n    route:\n      cluster: www\n      hedge_policy:\n        initial_requests: 3\n        additional_request_chance:\n          numerator: 4\n          denominator: HUNDRED\n  - match: {prefix: /bar}\n    route: {cluster: www}\n  - match: {prefix: /}\n    route:\n      cluster: www\n      hedge_policy:\n        hedge_on_per_try_timeout: true\n        initial_requests: 5\n        additional_request_chance:\n          numerator: 40\n          denominator: HUNDRED\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(3, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->hedgePolicy()\n                   .initialRequests());\n  EXPECT_EQ(false, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                       ->routeEntry()\n                       ->hedgePolicy()\n                       .hedgeOnPerTryTimeout());\n  envoy::type::v3::FractionalPercent percent =\n      config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n          ->routeEntry()\n          ->hedgePolicy()\n          .additionalRequestChance();\n  EXPECT_EQ(4, percent.numerator());\n  EXPECT_EQ(100, ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent.denominator()));\n\n  EXPECT_EQ(1, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->hedgePolicy()\n                   .initialRequests());\n  EXPECT_EQ(false, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                       ->routeEntry()\n                       ->hedgePolicy()\n                       .hedgeOnPerTryTimeout());\n  percent = config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->hedgePolicy()\n                .additionalRequestChance();\n  EXPECT_EQ(0, percent.numerator());\n\n  EXPECT_EQ(5, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->hedgePolicy()\n                   .initialRequests());\n  EXPECT_EQ(true, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                      ->routeEntry()\n                      ->hedgePolicy()\n                      .hedgeOnPerTryTimeout());\n  percent = config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->hedgePolicy()\n                .additionalRequestChance();\n  EXPECT_EQ(40, percent.numerator());\n  EXPECT_EQ(100, ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent.denominator()));\n}\n\nTEST_F(RouteMatcherTest, HedgeVirtualHostLevel) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- domains: [www.lyft.com]\n  name: www\n  hedge_policy: {initial_requests: 3}\n  routes:\n  - match: {prefix: /foo}\n    route:\n      cluster: www\n      hedge_policy: {hedge_on_per_try_timeout: true}\n  - match: {prefix: /bar}\n    route:\n      hedge_policy: {additional_request_chance: {numerator: 30, denominator: HUNDRED}}\n      cluster: www\n  - match: {prefix: /}\n    route: {cluster: www}\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // Route level hedge policy takes precedence.\n  EXPECT_EQ(1, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->hedgePolicy()\n                   .initialRequests());\n  EXPECT_EQ(true, config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n                      ->routeEntry()\n                      ->hedgePolicy()\n                      .hedgeOnPerTryTimeout());\n  envoy::type::v3::FractionalPercent percent =\n      config.route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), 0)\n          ->routeEntry()\n          ->hedgePolicy()\n          .additionalRequestChance();\n  EXPECT_EQ(0, percent.numerator());\n\n  // Virtual Host level hedge policy kicks in.\n  EXPECT_EQ(1, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->hedgePolicy()\n                   .initialRequests());\n  EXPECT_EQ(false, config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                       ->routeEntry()\n                       ->hedgePolicy()\n                       .hedgeOnPerTryTimeout());\n  percent = config.route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), 0)\n                ->routeEntry()\n                ->hedgePolicy()\n                .additionalRequestChance();\n  EXPECT_EQ(30, percent.numerator());\n  EXPECT_EQ(100, ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent.denominator()));\n\n  EXPECT_EQ(3, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                   ->routeEntry()\n                   ->hedgePolicy()\n                   .initialRequests());\n  EXPECT_EQ(false, config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                       ->routeEntry()\n                       ->hedgePolicy()\n                       .hedgeOnPerTryTimeout());\n  percent = config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)\n                ->routeEntry()\n                ->hedgePolicy()\n                .additionalRequestChance();\n  EXPECT_EQ(0, percent.numerator());\n}\n\nTEST_F(RouteMatcherTest, TestBadDefaultConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n- name: www2_staging\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2_staging\ninternal_only_headers:\n- x-lyft-user-id\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, TestDuplicateDomainConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n- name: www2_staging\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2_staging\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\n// Test to detect if hostname matches are case-insensitive\nTEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) {\n  std::string yaml = R\"EOF(\nname: foo\nvirtual_hosts:\n  - name: www2\n    domains: [www.lyft.com]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: www2 }\n  - name: www2_staging\n    domains: [www.LYFt.cOM]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: www2_staging }\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com in \"\n      \"route foo\");\n}\n\nTEST_F(RouteMatcherTest, TestDuplicateWildcardDomainConfig) {\n  const std::string yaml = R\"EOF(\nname: foo\nvirtual_hosts:\n- name: www2\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: www2 }\n- name: www2_staging\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: www2_staging }\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Only a single wildcard domain is permitted in route foo\");\n}\n\nTEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) {\n  const std::string yaml = R\"EOF(\nname: foo\nvirtual_hosts:\n- name: www2\n  domains: [\"*.lyft.com\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: www2 }\n- name: www2_staging\n  domains: [\"*.LYFT.COM\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: www2_staging }\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Only unique values for domains are permitted. Duplicate entry of domain *.lyft.com in route \"\n      \"foo\");\n}\n\nTEST_F(RouteMatcherTest, TestDuplicatePrefixWildcardDomainConfig) {\n  const std::string yaml = R\"EOF(\nname: foo\nvirtual_hosts:\n- name: www2\n  domains: [\"bar.*\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: www2 }\n- name: www2_staging\n  domains: [\"BAR.*\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: www2_staging }\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Only unique values for domains are permitted. Duplicate entry of domain bar.* in route foo\");\n}\n\nTEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewrites) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/foo\" }\n    route:\n      prefix_rewrite: \"/\\ndroptable\"\n      cluster: www\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"RouteActionValidationError.PrefixRewrite:.*value does not match regex pattern\");\n}\n\nTEST_F(RouteMatcherTest, TestInvalidCharactersInHostRewrites) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/foo\" }\n    route:\n      host_rewrite: \"new_host\\ndroptable\"\n      cluster: www\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"RouteActionValidationError.HostRewriteLiteral:.*value does not match regex pattern\");\n}\n\nTEST_F(RouteMatcherTest, TestInvalidCharactersInAutoHostRewrites) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/foo\" }\n    route:\n      auto_host_rewrite_header: \"x-host\\ndroptable\"\n      cluster: www\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"RouteActionValidationError.HostRewriteHeader:.*value does not match regex pattern\");\n}\n\nTEST_F(RouteMatcherTest, TestInvalidCharactersInHostRedirect) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/foo\" }\n    redirect: { host_redirect: \"new.host\\ndroptable\" }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"RedirectActionValidationError.HostRedirect:.*value does not match regex pattern\");\n}\n\nTEST_F(RouteMatcherTest, TestInvalidCharactersInPathRedirect) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/foo\" }\n    redirect: { path_redirect: \"/new_path\\ndroptable\" }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"RedirectActionValidationError.PathRedirect:.*value does not match regex pattern\");\n}\n\nTEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewriteRedirect) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/foo\" }\n    redirect: { prefix_rewrite: \"/new/prefix\\ndroptable\"}\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"RedirectActionValidationError.PrefixRewrite:.*value does not match regex pattern\");\n}\n\nTEST_F(RouteMatcherTest, TestPrefixAndRegexRewrites) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains: [\"bar.*\"]\n  routes:\n  - match: { prefix: \"/foo\" }\n    route:\n      prefix_rewrite: /\n      regex_rewrite:\n        pattern:\n          google_re2: {}\n          regex: foo\n        substitution: bar\n      cluster: www2\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Cannot specify both prefix_rewrite and regex_rewrite\");\n}\n\nTEST_F(RouteMatcherTest, TestDomainMatchOrderConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: exact\n  domains: [\"www.example.com\", \"www.example.cc\", \"wwww.example.com\" ]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: exact }\n- name: suffix\n  domains: [\"*w.example.com\" ]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: suffix }\n- name: prefix\n  domains: [\"www.example.c*\", \"ww.example.c*\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: prefix }\n- name: default\n  domains: [\"*\"]\n  routes:\n  - match: { prefix: \"/\" }\n    route: { cluster: default }\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(\n      \"exact\",\n      config.route(genHeaders(\"www.example.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\n      \"exact\",\n      config.route(genHeaders(\"wwww.example.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"exact\",\n            config.route(genHeaders(\"www.example.cc\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"suffix\",\n            config.route(genHeaders(\"ww.example.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"prefix\",\n            config.route(genHeaders(\"www.example.co\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"default\",\n            config.route(genHeaders(\"w.example.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  EXPECT_EQ(\"default\",\n            config.route(genHeaders(\"www.example.c\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n}\n\nTEST_F(RouteMatcherTest, NoProtocolInHeadersWhenTlsIsRequired) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www\n  require_tls: all\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  // route may be called early in some edge cases and \"x-forwarded-proto\" will not be set.\n  Http::TestRequestHeaderMapImpl headers{{\":authority\", \"www.lyft.com\"}, {\":path\", \"/\"}};\n  EXPECT_EQ(nullptr, config.route(headers, 0));\n}\n\n/**\n * @brief  Generate headers for testing\n * @param ssl set true to insert \"x-forwarded-proto: https\", else \"x-forwarded-proto: http\"\n * @param internal nullopt for no such \"x-envoy-internal\" header, or explicit \"true/false\"\n * @return Http::TestRequestHeaderMapImpl\n */\nstatic Http::TestRequestHeaderMapImpl genRedirectHeaders(const std::string& host,\n                                                         const std::string& path, bool ssl,\n                                                         absl::optional<bool> internal) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\":authority\", host}, {\":path\", path}, {\"x-forwarded-proto\", ssl ? \"https\" : \"http\"}};\n  if (internal.has_value()) {\n    headers.addCopy(\"x-envoy-internal\", internal.value() ? \"true\" : \"false\");\n  }\n\n  return headers;\n}\n\nTEST_F(RouteMatcherTest, RouteName) {\n  std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"www2\"\n    domains: [\"www.lyft.com\"]\n    routes:\n      - name: \"route-test\"\n        match: { prefix: \"/\"}\n        route:\n          cluster: \"ufesservice\"\n  - name: redirect\n    domains: [redirect.lyft.com]\n    routes:\n      - name: \"route-test-2\"\n        match: { path: /host }\n        redirect: { host_redirect: new.lyft.com }\n  )EOF\";\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context;\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context, false);\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    EXPECT_EQ(\"route-test\", config.route(headers, 0)->routeEntry()->routeName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    EXPECT_EQ(\"route-test-2\", redirect->routeName());\n  }\n}\n\nTEST_F(RouteMatcherTest, DirectResponse) {\n  const auto pathname =\n      TestEnvironment::writeStringToFileForTest(\"direct_response_body\", \"Example text 3\");\n\n  static const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [www.lyft.com]\n    require_tls: all\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: www2 }\n  - name: api\n    domains: [api.lyft.com]\n    require_tls: external_only\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: www2 }\n  - name: redirect\n    domains: [redirect.lyft.com]\n    routes:\n      - match: { path: /host }\n        redirect: { host_redirect: new.lyft.com }\n      - match: { path: /path }\n        redirect: { path_redirect: /new_path }\n      - match: { path: /https }\n        redirect: { https_redirect: true }\n      - match: { path: /host_path }\n        redirect: { host_redirect: new.lyft.com, path_redirect: /new_path }\n      - match: { path: /host_https }\n        redirect: { host_redirect: new.lyft.com, https_redirect: true }\n      - match: { path: /path_https }\n        redirect: { path_redirect: /new_path, https_redirect: true }\n      - match: { path: /host_path_https }\n        redirect: { host_redirect: new.lyft.com, path_redirect: /new_path, https_redirect: true }\n      - match: { path: /port }\n        redirect: { port_redirect: 8080 }\n      - match: { path: /host_port }\n        redirect: { host_redirect: new.lyft.com, port_redirect: 8080 }\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: new.lyft.com, port_redirect: 8080 }\n  - name: redirect_domain_port_80\n    domains: [redirect.lyft.com:80]\n    routes:\n      - match: { path: /ws }\n        redirect: { scheme_redirect: ws }\n      - match: { path: /host_path_https }\n        redirect: { host_redirect: new.lyft.com, path_redirect: /new_path, https_redirect: true }\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: new.lyft.com, port_redirect: 8080 }\n  - name: redirect_domain_port_443\n    domains: [redirect.lyft.com:443]\n    routes:\n      - match: { path: /ws }\n        redirect: { scheme_redirect: ws }\n      - match: { path: /host_path_http }\n        redirect: { scheme_redirect: http, host_redirect: new.lyft.com, path_redirect: /new_path}\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: new.lyft.com, port_redirect: 8080 }\n  - name: redirect_domain_port_8080\n    domains: [redirect.lyft.com:8080]\n    routes:\n      - match: { path: /port }\n        redirect: { port_redirect: 8181 }\n  - name: redirect_ipv4\n    domains: [10.0.0.1]\n    routes:\n      - match: { path: /port }\n        redirect: { port_redirect: 8080 }\n      - match: { path: /host_port }\n        redirect: { host_redirect: 20.0.0.2, port_redirect: 8080 }\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: 20.0.0.2, port_redirect: 8080 }\n  - name: redirect_ipv4_port_8080\n    domains: [10.0.0.1:8080]\n    routes:\n      - match: { path: /port }\n        redirect: { port_redirect: 8181 }\n  - name: redirect_ipv4_port_80\n    domains: [10.0.0.1:80]\n    routes:\n      - match: { path: /ws }\n        redirect: { scheme_redirect: ws }\n      - match: { path: /host_path_https }\n        redirect: { host_redirect: 20.0.0.2, path_redirect: /new_path, https_redirect: true }\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: 20.0.0.2, port_redirect: 8080 }\n  - name: redirect_ipv4_port_443\n    domains: [10.0.0.1:443]\n    routes:\n      - match: { path: /ws }\n        redirect: { scheme_redirect: ws }\n      - match: { path: /host_path_http }\n        redirect: { scheme_redirect: http, host_redirect: 20.0.0.2, path_redirect: /new_path}\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: 20.0.0.2, port_redirect: 8080 }\n  - name: redirect_ipv6\n    domains: [\"[fe80::1]\"]\n    routes:\n      - match: { path: /port }\n        redirect: { port_redirect: 8080 }\n      - match: { path: /host_port }\n        redirect: { host_redirect: \"[fe80::2]\", port_redirect: 8080 }\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: \"[fe80::2]\", port_redirect: 8080 }\n  - name: redirect_ipv6_port_8080\n    domains: [\"[fe80::1]:8080\"]\n    routes:\n      - match: { path: /port }\n        redirect: { port_redirect: 8181 }\n  - name: redirect_ipv6_port_80\n    domains: [\"[fe80::1]:80\"]\n    routes:\n      - match: { path: /ws }\n        redirect: { scheme_redirect: ws }\n      - match: { path: /host_path_https }\n        redirect: { host_redirect: \"[fe80::2]\", path_redirect: /new_path, https_redirect: true }\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: \"[fe80::2]\", port_redirect: 8080 }\n  - name: redirect_ipv6_port_443\n    domains: [\"[fe80::1]:443\"]\n    routes:\n      - match: { path: /ws }\n        redirect: { scheme_redirect: ws }\n      - match: { path: /host_path_http }\n        redirect: { scheme_redirect: http, host_redirect: \"[fe80::2]\", path_redirect: /new_path}\n      - match: { path: /scheme_host_port }\n        redirect: { scheme_redirect: ws, host_redirect: \"[fe80::2]\", port_redirect: 8080 }\n  - name: direct\n    domains: [direct.example.com]\n    routes:\n    - match: { prefix: /gone }\n      direct_response:\n        status: 410\n        body: { inline_bytes: \"RXhhbXBsZSB0ZXh0IDE=\" }\n    - match: { prefix: /error }\n      direct_response:\n        status: 500\n        body: { inline_string: \"Example text 2\" }\n    - match: { prefix: /no_body }\n      direct_response:\n        status: 200\n    - match: { prefix: /static }\n      direct_response:\n        status: 200\n        body: { filename: )EOF\" + pathname +\n                                  R\"EOF(}\n    - match: { prefix: / }\n      route: { cluster: www2 }\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  EXPECT_EQ(nullptr, config.route(genRedirectHeaders(\"www.foo.com\", \"/foo\", true, true), 0));\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\"www.lyft.com\", \"/foo\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"www.lyft.com\", \"/foo\", false, false);\n    EXPECT_EQ(\"https://www.lyft.com/foo\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n    EXPECT_EQ(nullptr, config.route(headers, 0)->decorator());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"api.lyft.com\", \"/foo\", false, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"api.lyft.com\", \"/foo\", false, false);\n    EXPECT_EQ(\"https://api.lyft.com/foo\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"api.lyft.com\", \"/foo\", false, absl::nullopt /* no x-envoy-internal header */);\n    EXPECT_EQ(\"https://api.lyft.com/foo\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host\", false, false);\n    EXPECT_EQ(\"http://new.lyft.com/host\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host_path\", true, false);\n    EXPECT_EQ(\"https://new.lyft.com/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"direct.example.com\", \"/gone\", true, false);\n    EXPECT_EQ(Http::Code::Gone, config.route(headers, 0)->directResponseEntry()->responseCode());\n    EXPECT_EQ(\"Example text 1\", config.route(headers, 0)->directResponseEntry()->responseBody());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"direct.example.com\", \"/error\", true, false);\n    EXPECT_EQ(Http::Code::InternalServerError,\n              config.route(headers, 0)->directResponseEntry()->responseCode());\n    EXPECT_EQ(\"Example text 2\", config.route(headers, 0)->directResponseEntry()->responseBody());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"direct.example.com\", \"/no_body\", true, false);\n    EXPECT_EQ(Http::Code::OK, config.route(headers, 0)->directResponseEntry()->responseCode());\n    EXPECT_TRUE(config.route(headers, 0)->directResponseEntry()->responseBody().empty());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"direct.example.com\", \"/static\", true, false);\n    EXPECT_EQ(Http::Code::OK, config.route(headers, 0)->directResponseEntry()->responseCode());\n    EXPECT_EQ(\"Example text 3\", config.route(headers, 0)->directResponseEntry()->responseBody());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"direct.example.com\", \"/other\", true, false);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/https\", false, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/https\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n    EXPECT_EQ(nullptr, config.route(headers, 0)->perFilterConfig(\"bar\"));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host_https\", false, false);\n    EXPECT_EQ(\"https://new.lyft.com/host_https\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path_https\", false, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host_path_https\", false, false);\n    EXPECT_EQ(\"https://new.lyft.com/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/port\", false, false);\n    EXPECT_EQ(\"http://redirect.lyft.com:8080/port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com:8080\", \"/port\", false, false);\n    EXPECT_EQ(\"http://redirect.lyft.com:8181/port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host_port\", false, false);\n    EXPECT_EQ(\"http://new.lyft.com:8080/host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/scheme_host_port\", false, false);\n    EXPECT_EQ(\"ws://new.lyft.com:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com:80\", \"/ws\", true, false);\n    EXPECT_EQ(\"ws://redirect.lyft.com:80/ws\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com:80\", \"/host_path_https\", false, false);\n    EXPECT_EQ(\"https://new.lyft.com/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com:80\", \"/scheme_host_port\", false, false);\n    EXPECT_EQ(\"ws://new.lyft.com:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com:443\", \"/ws\", false, false);\n    EXPECT_EQ(\"ws://redirect.lyft.com:443/ws\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com:443\", \"/host_path_http\", true, false);\n    EXPECT_EQ(\"http://new.lyft.com/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com:443\", \"/scheme_host_port\", true, false);\n    EXPECT_EQ(\"ws://new.lyft.com:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\"10.0.0.1\", \"/port\", false, false);\n    EXPECT_EQ(\"http://10.0.0.1:8080/port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1:8080\", \"/port\", false, false);\n    EXPECT_EQ(\"http://10.0.0.1:8181/port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1\", \"/host_port\", false, false);\n    EXPECT_EQ(\"http://20.0.0.2:8080/host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1\", \"/scheme_host_port\", false, false);\n    EXPECT_EQ(\"ws://20.0.0.2:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\"10.0.0.1:80\", \"/ws\", true, false);\n    EXPECT_EQ(\"ws://10.0.0.1:80/ws\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1:80\", \"/host_path_https\", false, false);\n    EXPECT_EQ(\"https://20.0.0.2/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1:80\", \"/scheme_host_port\", false, false);\n    EXPECT_EQ(\"ws://20.0.0.2:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1:443\", \"/ws\", false, false);\n    EXPECT_EQ(\"ws://10.0.0.1:443/ws\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1:443\", \"/host_path_http\", true, false);\n    EXPECT_EQ(\"http://20.0.0.2/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"10.0.0.1:443\", \"/scheme_host_port\", true, false);\n    EXPECT_EQ(\"ws://20.0.0.2:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\"[fe80::1]\", \"/port\", false, false);\n\n    EXPECT_EQ(\"http://[fe80::1]:8080/port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]:8080\", \"/port\", false, false);\n    EXPECT_EQ(\"http://[fe80::1]:8181/port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]\", \"/host_port\", false, false);\n    EXPECT_EQ(\"http://[fe80::2]:8080/host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]\", \"/scheme_host_port\", false, false);\n    EXPECT_EQ(\"ws://[fe80::2]:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\"[fe80::1]:80\", \"/ws\", true, false);\n    EXPECT_EQ(\"ws://[fe80::1]:80/ws\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]:80\", \"/host_path_https\", false, false);\n    EXPECT_EQ(\"https://[fe80::2]/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]:80\", \"/scheme_host_port\", false, false);\n    EXPECT_EQ(\"ws://[fe80::2]:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]:443\", \"/ws\", false, false);\n    EXPECT_EQ(\"ws://[fe80::1]:443/ws\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]:443\", \"/host_path_http\", true, false);\n    EXPECT_EQ(\"http://[fe80::2]/new_path\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"[fe80::1]:443\", \"/scheme_host_port\", true, false);\n    EXPECT_EQ(\"ws://[fe80::2]:8080/scheme_host_port\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n}\n\nTEST_F(RouteMatcherTest, ExclusiveRouteEntryOrDirectResponseEntry) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n- name: redirect\n  domains:\n  - redirect.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    redirect:\n      host_redirect: new.lyft.com\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\"www.lyft.com\", \"/foo\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n    EXPECT_EQ(\"www2\", config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/foo\", false, false);\n    EXPECT_EQ(\"http://new.lyft.com/foo\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n    EXPECT_EQ(nullptr, config.route(headers, 0)->routeEntry());\n  }\n}\n\nTEST_F(RouteMatcherTest, ExclusiveWeightedClustersEntryOrDirectResponseEntry) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      weighted_clusters:\n        clusters:\n        - name: www2\n          weight: 100\n- name: redirect\n  domains:\n  - redirect.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    redirect:\n      host_redirect: new.lyft.com\n  - match:\n      prefix: \"/foo1\"\n    redirect:\n      host_redirect: \"[fe80::1]\"\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\"www.lyft.com\", \"/foo\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n    EXPECT_EQ(\"www2\", config.route(headers, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/foo\", false, false);\n    EXPECT_EQ(\"http://new.lyft.com/foo\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n    EXPECT_EQ(nullptr, config.route(headers, 0)->routeEntry());\n  }\n}\n\nstruct Foo : public Envoy::Config::TypedMetadata::Object {};\nstruct Baz : public Envoy::Config::TypedMetadata::Object {\n  Baz(std::string n) : name(n) {}\n  std::string name;\n};\nclass BazFactory : public HttpRouteTypedMetadataFactory {\npublic:\n  std::string name() const override { return \"baz\"; }\n  // Returns nullptr (conversion failure) if d is empty.\n  std::unique_ptr<const Envoy::Config::TypedMetadata::Object>\n  parse(const ProtobufWkt::Struct& d) const override {\n    if (d.fields().find(\"name\") != d.fields().end()) {\n      return std::make_unique<Baz>(d.fields().at(\"name\").string_value());\n    }\n    throw EnvoyException(\"Cannot create a Baz when metadata is empty.\");\n  }\n};\n\nTEST_F(RouteMatcherTest, WeightedClusters) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: www1\n    domains: [\"www1.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\" }\n        metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {name: meh} } }\n        decorator:\n          operation: hello\n        route:\n          weighted_clusters:\n            clusters:\n              - name: cluster1\n                weight: 30\n              - name: cluster2\n                weight: 30\n              - name: cluster3\n                weight: 40\n  - name: www2\n    domains: [\"www2.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            clusters:\n              - name: cluster1\n                weight: 2000\n              - name: cluster2\n                weight: 3000\n              - name: cluster3\n                weight: 5000\n            total_weight: 10000\n  - name: www3\n    domains: [\"www3.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            runtime_key_prefix: www3_weights\n            clusters:\n              - name: cluster1\n                weight: 30\n              - name: cluster2\n                weight: 30\n              - name: cluster3\n                weight: 40\n  - name: www4\n    domains: [\"www4.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            runtime_key_prefix: www4_weights\n            clusters:\n              - name: cluster1\n                weight: 2000\n              - name: cluster2\n                weight: 3000\n              - name: cluster3\n                weight: 5000\n            total_weight: 10000\n  )EOF\";\n\n  BazFactory baz_factory;\n  Registry::InjectFactory<HttpRouteTypedMetadataFactory> registered_factory(baz_factory);\n  auto& runtime = factory_context_.runtime_loader_;\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"www1.lyft.com\", \"/foo\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n  }\n\n  // Weighted Cluster with no runtime, default total weight\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www1.lyft.com\", \"/foo\", \"GET\");\n    EXPECT_EQ(\"cluster1\", config.route(headers, 115)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 445)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", config.route(headers, 560)->routeEntry()->clusterName());\n  }\n\n  // Make sure weighted cluster entries call through to the parent when needed.\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www1.lyft.com\", \"/foo\", \"GET\");\n    auto route = config.route(headers, 115);\n    const RouteEntry* route_entry = route->routeEntry();\n    EXPECT_EQ(nullptr, route_entry->hashPolicy());\n    EXPECT_TRUE(route_entry->opaqueConfig().empty());\n    EXPECT_FALSE(route_entry->autoHostRewrite());\n    // Default behavior when include_vh_rate_limits is not set, similar to\n    // VhRateLimitOptions::Override\n    EXPECT_FALSE(route_entry->includeVirtualHostRateLimits());\n    EXPECT_EQ(Http::Code::ServiceUnavailable, route_entry->clusterNotFoundResponseCode());\n    EXPECT_EQ(nullptr, route_entry->corsPolicy());\n    EXPECT_EQ(\"test_value\",\n              Envoy::Config::Metadata::metadataValue(&route_entry->metadata(), \"com.bar.foo\", \"baz\")\n                  .string_value());\n    EXPECT_EQ(nullptr, route_entry->typedMetadata().get<Foo>(baz_factory.name()));\n    EXPECT_EQ(\"meh\", route_entry->typedMetadata().get<Baz>(baz_factory.name())->name);\n    EXPECT_EQ(\"hello\", route->decorator()->getOperation());\n\n    Http::TestResponseHeaderMapImpl response_headers;\n    StreamInfo::MockStreamInfo stream_info;\n    route_entry->finalizeResponseHeaders(response_headers, stream_info);\n    EXPECT_EQ(response_headers, Http::TestResponseHeaderMapImpl{});\n  }\n\n  // Weighted Cluster with no runtime, total weight = 10000\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www2.lyft.com\", \"/foo\", \"GET\");\n    EXPECT_EQ(\"cluster1\", config.route(headers, 1150)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 4500)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", config.route(headers, 8900)->routeEntry()->clusterName());\n  }\n\n  // Weighted Cluster with valid runtime values, default total weight\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www3.lyft.com\", \"/foo\", \"GET\");\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(\"www3\", 100, _)).WillRepeatedly(Return(true));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www3_weights.cluster1\", 30))\n        .WillRepeatedly(Return(80));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www3_weights.cluster2\", 30))\n        .WillRepeatedly(Return(10));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www3_weights.cluster3\", 40))\n        .WillRepeatedly(Return(10));\n\n    EXPECT_EQ(\"cluster1\", config.route(headers, 45)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 82)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", config.route(headers, 92)->routeEntry()->clusterName());\n  }\n\n  // Weighted Cluster with invalid runtime values, default total weight\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www3.lyft.com\", \"/foo\", \"GET\");\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(\"www3\", 100, _)).WillRepeatedly(Return(true));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www3_weights.cluster1\", 30))\n        .WillRepeatedly(Return(10));\n\n    // We return an invalid value here, one that is greater than 100\n    // Expect any random value > 10 to always land in cluster2.\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www3_weights.cluster2\", 30))\n        .WillRepeatedly(Return(120));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www3_weights.cluster3\", 40))\n        .WillRepeatedly(Return(10));\n\n    EXPECT_EQ(\"cluster1\", config.route(headers, 1005)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 82)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 92)->routeEntry()->clusterName());\n  }\n\n  // Weighted Cluster with runtime values, total weight = 10000\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www4.lyft.com\", \"/foo\", \"GET\");\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(\"www4\", 100, _)).WillRepeatedly(Return(true));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www4_weights.cluster1\", 2000))\n        .WillRepeatedly(Return(8000));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www4_weights.cluster2\", 3000))\n        .WillRepeatedly(Return(1000));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www4_weights.cluster3\", 5000))\n        .WillRepeatedly(Return(1000));\n\n    EXPECT_EQ(\"cluster1\", config.route(headers, 1150)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 8100)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", config.route(headers, 9200)->routeEntry()->clusterName());\n  }\n\n  // Weighted Cluster with invalid runtime values, total weight = 10000\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www4.lyft.com\", \"/foo\", \"GET\");\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(\"www4\", 100, _)).WillRepeatedly(Return(true));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www4_weights.cluster1\", 2000))\n        .WillRepeatedly(Return(1000));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www4_weights.cluster2\", 3000))\n        .WillRepeatedly(Return(12000));\n    EXPECT_CALL(runtime.snapshot_, getInteger(\"www4_weights.cluster3\", 5000))\n        .WillRepeatedly(Return(1000));\n\n    EXPECT_EQ(\"cluster1\", config.route(headers, 500)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 1500)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", config.route(headers, 9999)->routeEntry()->clusterName());\n  }\n}\n\nTEST_F(RouteMatcherTest, ExclusiveWeightedClustersOrClusterConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      weighted_clusters:\n        clusters:\n        - name: cluster1\n          weight: 30\n        - name: cluster2\n          weight: 30\n        - name: cluster3\n          weight: 40\n      cluster: www2\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, WeightedClustersMissingClusterList) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      weighted_clusters:\n        runtime_key_prefix: www2\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, WeightedClustersEmptyClustersList) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      weighted_clusters:\n        runtime_key_prefix: www2\n        clusters: []\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, WeightedClustersSumOFWeightsNotEqualToMax) {\n  std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            clusters:\n              - name: cluster1\n                weight: 3\n              - name: cluster2\n                weight: 3\n              - name: cluster3\n                weight: 3\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Sum of weights in the weighted_cluster should add up to 100\");\n\n  yaml = R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            total_weight: 99\n            clusters:\n              - name: cluster1\n                weight: 3\n              - name: cluster2\n                weight: 3\n              - name: cluster3\n                weight: 3\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Sum of weights in the weighted_cluster should add up to 99\");\n}\n\nTEST_F(RouteMatcherTest, TestWeightedClusterWithMissingWeights) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      weighted_clusters:\n        clusters:\n        - name: cluster1\n          weight: 50\n        - name: cluster2\n          weight: 50\n        - name: cluster3\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, TestWeightedClusterInvalidClusterName) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      weighted_clusters:\n        clusters:\n        - name: cluster1\n          weight: 33\n        - name: cluster2\n          weight: 33\n        - name: cluster3-invalid\n          weight: 34\n  )EOF\";\n\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"cluster1\")))\n      .WillRepeatedly(Return(&factory_context_.cluster_manager_.thread_local_cluster_));\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"cluster2\")))\n      .WillRepeatedly(Return(&factory_context_.cluster_manager_.thread_local_cluster_));\n  EXPECT_CALL(factory_context_.cluster_manager_, get(Eq(\"cluster3-invalid\")))\n      .WillRepeatedly(Return(nullptr));\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(RouteMatcherTest, TestWeightedClusterHeaderManipulation) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: www2\n    domains: [\"www.lyft.com\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            clusters:\n              - name: cluster1\n                weight: 50\n                request_headers_to_add:\n                  - header:\n                      key: x-req-cluster\n                      value: cluster1\n                response_headers_to_add:\n                  - header:\n                      key: x-resp-cluster\n                      value: cluster1\n                response_headers_to_remove: [ \"x-remove-cluster1\" ]\n              - name: cluster2\n                weight: 50\n                request_headers_to_add:\n                  - header:\n                      key: x-req-cluster\n                      value: cluster2\n                response_headers_to_add:\n                  - header:\n                      key: x-resp-cluster\n                      value: cluster2\n                response_headers_to_remove: [ \"x-remove-cluster2\" ]\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Http::TestResponseHeaderMapImpl resp_headers({{\"x-remove-cluster1\", \"value\"}});\n    const RouteEntry* route = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"cluster1\", route->clusterName());\n\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"cluster1\", headers.get_(\"x-req-cluster\"));\n\n    route->finalizeResponseHeaders(resp_headers, stream_info);\n    EXPECT_EQ(\"cluster1\", resp_headers.get_(\"x-resp-cluster\"));\n    EXPECT_FALSE(resp_headers.has(\"x-remove-cluster1\"));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Http::TestResponseHeaderMapImpl resp_headers({{\"x-remove-cluster2\", \"value\"}});\n    const RouteEntry* route = config.route(headers, 55)->routeEntry();\n    EXPECT_EQ(\"cluster2\", route->clusterName());\n\n    route->finalizeRequestHeaders(headers, stream_info, true);\n    EXPECT_EQ(\"cluster2\", headers.get_(\"x-req-cluster\"));\n\n    route->finalizeResponseHeaders(resp_headers, stream_info);\n    EXPECT_EQ(\"cluster2\", resp_headers.get_(\"x-resp-cluster\"));\n    EXPECT_FALSE(resp_headers.has(\"x-remove-cluster2\"));\n  }\n}\n\nTEST(NullConfigImplTest, All) {\n  NullConfigImpl config;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"redirect.lyft.com\", \"/baz\", true, false);\n  EXPECT_EQ(nullptr, config.route(headers, stream_info, 0));\n  EXPECT_EQ(0UL, config.internalOnlyHeaders().size());\n  EXPECT_EQ(\"\", config.name());\n}\n\nclass BadHttpRouteConfigurationsTest : public testing::Test, public ConfigImplTestBase {};\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\nfake_entry: fake_type\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadVirtualHostConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  router:\n    cluster: my_cluster\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n    timeout: 1234s\n  )EOF\";\n\n  EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n               EnvoyException);\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPath) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n      path: \"/foo\"\n    route:\n      cluster: www2\n  )EOF\";\n\n#ifndef GTEST_USES_SIMPLE_RE\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|path)' for \"\n      \"type oneof\");\n#else\n  EXPECT_THAT_THROWS_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      ::testing::AnyOf(\n          ::testing::ContainsRegex(\n              \"invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for \"\n              \"type oneof\"),\n          ::testing::ContainsRegex(\n              \"invalid value oneof field 'path_specifier' is already set. Cannot set 'path' for \"\n              \"type oneof\")));\n#endif\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - route:\n      cluster: www2\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"RouteValidationError.Match: \\\\[\\\"value is required\\\"\\\\]\");\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n      regex: \"/[bc]at\"\n    route:\n      cluster: www2\n  )EOF\";\n\n#ifndef GTEST_USES_SIMPLE_RE\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|regex)' for \"\n      \"type oneof\");\n#else\n  EXPECT_THAT_THROWS_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      ::testing::AnyOf(\n          ::testing::ContainsRegex(\n              \"invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for \"\n              \"type oneof\"),\n          ::testing::ContainsRegex(\n              \"invalid value oneof field 'path_specifier' is already set. Cannot set 'regex' for \"\n              \"type oneof\")));\n#endif\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoAction) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/api\"\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"caused by field: \\\"action\\\", reason: is required\");\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      path: \"/foo\"\n      regex: \"/[bc]at\"\n    route:\n      cluster: www2\n  )EOF\";\n\n#ifndef GTEST_USES_SIMPLE_RE\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"invalid value oneof field 'path_specifier' is already set. Cannot set '(path|regex)' for \"\n      \"type oneof\");\n#else\n  EXPECT_THAT_THROWS_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      ::testing::AnyOf(\n          ::testing::ContainsRegex(\n              \"invalid value oneof field 'path_specifier' is already set. Cannot set 'path' for \"\n              \"type oneof\"),\n          ::testing::ContainsRegex(\n              \"invalid value oneof field 'path_specifier' is already set. Cannot set 'regex' for \"\n              \"type oneof\")));\n#endif\n}\n\nTEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n      path: \"/foo\"\n      regex: \"/[bc]at\"\n    route:\n      cluster: www2\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"invalid value oneof field 'path_specifier' is already set.\");\n}\n\nTEST_F(RouteMatcherTest, TestOpaqueConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/api\"\n    route:\n      cluster: ats\n    metadata:\n      filter_metadata:\n        envoy.filters.http.router:\n          name1: value1\n          name2: value2\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  const std::multimap<std::string, std::string>& opaque_config =\n      config.route(genHeaders(\"api.lyft.com\", \"/api\", \"GET\"), 0)->routeEntry()->opaqueConfig();\n\n  EXPECT_EQ(opaque_config.find(\"name1\")->second, \"value1\");\n  EXPECT_EQ(opaque_config.find(\"name2\")->second, \"value2\");\n}\n\n// Test that the deprecated name works for opaque configs.\nTEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestOpaqueConfigUsingDeprecatedName)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/api\"\n    route:\n      cluster: ats\n    metadata:\n      filter_metadata:\n        envoy.router:\n          name1: value1\n          name2: value2\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  const std::multimap<std::string, std::string>& opaque_config =\n      config.route(genHeaders(\"api.lyft.com\", \"/api\", \"GET\"), 0)->routeEntry()->opaqueConfig();\n\n  EXPECT_EQ(opaque_config.find(\"name1\")->second, \"value1\");\n  EXPECT_EQ(opaque_config.find(\"name2\")->second, \"value2\");\n}\n\nclass RoutePropertyTest : public testing::Test, public ConfigImplTestBase {};\n\nTEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(ExcludeVHRateLimits)) {\n  std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n  )EOF\";\n\n  Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n  std::unique_ptr<TestConfigImpl> config_ptr;\n\n  config_ptr = std::make_unique<TestConfigImpl>(parseRouteConfigurationFromYaml(yaml),\n                                                factory_context_, true);\n  // Default behavior when include_vh_rate_limits is not set, similar to\n  // VhRateLimitOptions::Override\n  EXPECT_FALSE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits());\n\n  yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n      rate_limits:\n      - actions:\n        - remote_address: {}\n  )EOF\";\n\n  config_ptr = std::make_unique<TestConfigImpl>(parseRouteConfigurationFromYaml(yaml),\n                                                factory_context_, true);\n  EXPECT_FALSE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits());\n\n  yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n      include_vh_rate_limits: true\n      rate_limits:\n      - actions:\n        - remote_address: {}\n  )EOF\";\n\n  config_ptr = std::make_unique<TestConfigImpl>(parseRouteConfigurationFromYaml(yaml),\n                                                factory_context_, true);\n  EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits());\n}\n\n// When allow_origin: and allow_origin_regex: are removed, simply remove them\n// and the relevant checks below.\nTEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsConfig)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"default\"\n    domains: [\"*\"]\n    cors:\n      allow_origin: [\"test-origin\"]\n      allow_origin_regex:\n      - .*\\.envoyproxy\\.io\n      allow_origin_string_match:\n      - safe_regex:\n          google_re2: {}\n          regex: .*\\.envoyproxy\\.io\n      allow_methods: \"test-methods\"\n      allow_headers: \"test-headers\"\n      expose_headers: \"test-expose-headers\"\n      max_age: \"test-max-age\"\n      allow_credentials: true\n      filter_enabled:\n        runtime_key: \"cors.www.enabled\"\n        default_value:\n          numerator: 0\n          denominator: \"HUNDRED\"\n      shadow_enabled:\n        runtime_key: \"cors.www.shadow_enabled\"\n        default_value:\n          numerator: 100\n          denominator: \"HUNDRED\"\n    routes:\n      - match:\n          prefix: \"/api\"\n        route:\n          cluster: \"ats\"\n)EOF\";\n\n  Runtime::MockSnapshot snapshot;\n  EXPECT_CALL(snapshot, featureEnabled(\"cors.www.enabled\",\n                                       Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .WillOnce(Return(false));\n  EXPECT_CALL(snapshot, featureEnabled(\"cors.www.shadow_enabled\",\n                                       Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .WillOnce(Return(true));\n  EXPECT_CALL(factory_context_.runtime_loader_, snapshot()).WillRepeatedly(ReturnRef(snapshot));\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false);\n\n  const Router::CorsPolicy* cors_policy =\n      config.route(genHeaders(\"api.lyft.com\", \"/api\", \"GET\"), 0)\n          ->routeEntry()\n          ->virtualHost()\n          .corsPolicy();\n\n  EXPECT_EQ(cors_policy->enabled(), false);\n  EXPECT_EQ(cors_policy->shadowEnabled(), true);\n  EXPECT_EQ(3, cors_policy->allowOrigins().size());\n  EXPECT_EQ(cors_policy->allowMethods(), \"test-methods\");\n  EXPECT_EQ(cors_policy->allowHeaders(), \"test-headers\");\n  EXPECT_EQ(cors_policy->exposeHeaders(), \"test-expose-headers\");\n  EXPECT_EQ(cors_policy->maxAge(), \"test-max-age\");\n  EXPECT_EQ(cors_policy->allowCredentials(), true);\n}\n\nTEST_F(RoutePropertyTest, TestRouteCorsConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: \"default\"\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/api\"\n        route:\n          cluster: \"ats\"\n          cors:\n            allow_origin_string_match:\n            - exact: \"test-origin\"\n            allow_methods: \"test-methods\"\n            allow_headers: \"test-headers\"\n            expose_headers: \"test-expose-headers\"\n            max_age: \"test-max-age\"\n            allow_credentials: true\n            filter_enabled:\n              runtime_key: \"cors.www.enabled\"\n              default_value:\n                numerator: 0\n                denominator: \"HUNDRED\"\n            shadow_enabled:\n              runtime_key: \"cors.www.shadow_enabled\"\n              default_value:\n                numerator: 100\n                denominator: \"HUNDRED\"\n)EOF\";\n\n  Runtime::MockSnapshot snapshot;\n  EXPECT_CALL(snapshot, featureEnabled(\"cors.www.enabled\",\n                                       Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .WillOnce(Return(false));\n  EXPECT_CALL(snapshot, featureEnabled(\"cors.www.shadow_enabled\",\n                                       Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .WillOnce(Return(true));\n  EXPECT_CALL(factory_context_.runtime_loader_, snapshot()).WillRepeatedly(ReturnRef(snapshot));\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false);\n\n  const Router::CorsPolicy* cors_policy =\n      config.route(genHeaders(\"api.lyft.com\", \"/api\", \"GET\"), 0)->routeEntry()->corsPolicy();\n\n  EXPECT_EQ(cors_policy->enabled(), false);\n  EXPECT_EQ(cors_policy->shadowEnabled(), true);\n  EXPECT_EQ(1, cors_policy->allowOrigins().size());\n  EXPECT_EQ(cors_policy->allowMethods(), \"test-methods\");\n  EXPECT_EQ(cors_policy->allowHeaders(), \"test-headers\");\n  EXPECT_EQ(cors_policy->exposeHeaders(), \"test-expose-headers\");\n  EXPECT_EQ(cors_policy->maxAge(), \"test-max-age\");\n  EXPECT_EQ(cors_policy->allowCredentials(), true);\n}\n\n// When allow-origin: is removed, this test can be removed.\nTEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TTestVHostCorsLegacyConfig)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: default\n  domains:\n  - \"*\"\n  cors:\n    allow_origin:\n    - test-origin\n    allow_methods: test-methods\n    allow_headers: test-headers\n    expose_headers: test-expose-headers\n    max_age: test-max-age\n    allow_credentials: true\n  routes:\n  - match:\n      prefix: \"/api\"\n    route:\n      cluster: ats\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  const Router::CorsPolicy* cors_policy =\n      config.route(genHeaders(\"api.lyft.com\", \"/api\", \"GET\"), 0)\n          ->routeEntry()\n          ->virtualHost()\n          .corsPolicy();\n\n  EXPECT_EQ(cors_policy->enabled(), true);\n  EXPECT_EQ(cors_policy->shadowEnabled(), false);\n  EXPECT_EQ(1, cors_policy->allowOrigins().size());\n  EXPECT_EQ(cors_policy->allowMethods(), \"test-methods\");\n  EXPECT_EQ(cors_policy->allowHeaders(), \"test-headers\");\n  EXPECT_EQ(cors_policy->exposeHeaders(), \"test-expose-headers\");\n  EXPECT_EQ(cors_policy->maxAge(), \"test-max-age\");\n  EXPECT_EQ(cors_policy->allowCredentials(), true);\n}\n\n// When allow-origin: is removed, this test can be removed.\nTEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsLegacyConfig)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/api\"\n    route:\n      cluster: ats\n      cors:\n        allow_origin:\n        - test-origin\n        allow_methods: test-methods\n        allow_headers: test-headers\n        expose_headers: test-expose-headers\n        max_age: test-max-age\n        allow_credentials: true\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  const Router::CorsPolicy* cors_policy =\n      config.route(genHeaders(\"api.lyft.com\", \"/api\", \"GET\"), 0)->routeEntry()->corsPolicy();\n\n  EXPECT_EQ(cors_policy->enabled(), true);\n  EXPECT_EQ(cors_policy->shadowEnabled(), false);\n  EXPECT_EQ(1, cors_policy->allowOrigins().size());\n  EXPECT_EQ(cors_policy->allowMethods(), \"test-methods\");\n  EXPECT_EQ(cors_policy->allowHeaders(), \"test-headers\");\n  EXPECT_EQ(cors_policy->exposeHeaders(), \"test-expose-headers\");\n  EXPECT_EQ(cors_policy->maxAge(), \"test-max-age\");\n  EXPECT_EQ(cors_policy->allowCredentials(), true);\n}\n\nTEST_F(RoutePropertyTest, TestBadCorsConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: default\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/api\"\n    route:\n      cluster: ats\n      cors:\n        enabled: 0\n)EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Unable to parse JSON as proto .*: invalid value 0 for type TYPE_BOOL\");\n}\n\nTEST_F(RouteMatcherTest, Decorator) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: local_service\n  domains:\n  - \"*\"\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: foo\n    decorator:\n      operation: myFoo\n      propagate: false\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster: bar\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/foo\", \"GET\");\n    Router::RouteConstSharedPtr route = config.route(headers, 0);\n    Tracing::MockSpan span;\n    EXPECT_CALL(span, setOperation(Eq(\"myFoo\")));\n    route->decorator()->apply(span);\n    EXPECT_EQ(false, route->decorator()->propagate());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/bar\", \"GET\");\n    Router::RouteConstSharedPtr route = config.route(headers, 0);\n    EXPECT_EQ(nullptr, route->decorator());\n  }\n}\n\nclass CustomRequestHeadersTest : public testing::Test, public ConfigImplTestBase {};\n\nTEST_F(CustomRequestHeadersTest, AddNewHeader) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - lyft.com\n  - www.lyft.com\n  - w.lyft.com\n  - ww.lyft.com\n  - wwww.lyft.com\n  request_headers_to_add:\n  - header:\n      key: x-client-ip\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  routes:\n  - match:\n      prefix: \"/new_endpoint\"\n    route:\n      prefix_rewrite: \"/api/new_endpoint\"\n      cluster: www2\n    request_headers_to_add:\n    - header:\n        key: x-client-ip\n        value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\nrequest_headers_to_add:\n- header:\n    key: x-client-ip\n    value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  )EOF\";\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/new_endpoint/foo\", \"GET\");\n  const RouteEntry* route = config.route(headers, 0)->routeEntry();\n  route->finalizeRequestHeaders(headers, stream_info, true);\n  EXPECT_EQ(\"127.0.0.1\", headers.get_(\"x-client-ip\"));\n}\n\nTEST_F(CustomRequestHeadersTest, CustomHeaderWrongFormat) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - lyft.com\n  - www.lyft.com\n  - w.lyft.com\n  - ww.lyft.com\n  - wwww.lyft.com\n  request_headers_to_add:\n  - header:\n      key: x-client-ip\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  routes:\n  - match:\n      prefix: \"/new_endpoint\"\n    route:\n      prefix_rewrite: \"/api/new_endpoint\"\n      cluster: www2\n    request_headers_to_add:\n    - header:\n        key: x-client-ip\n        value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT\"\nrequest_headers_to_add:\n- header:\n    key: x-client-ip\n    value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT\"\n  )EOF\";\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n      EnvoyException,\n      \"Invalid header configuration. Un-terminated variable expression \"\n      \"'DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT'\");\n}\n\nTEST(MetadataMatchCriteriaImpl, Create) {\n  auto v1 = ProtobufWkt::Value();\n  v1.set_string_value(\"v1\");\n  auto v2 = ProtobufWkt::Value();\n  v2.set_number_value(2.0);\n  auto v3 = ProtobufWkt::Value();\n  v3.set_bool_value(true);\n\n  auto metadata_struct = ProtobufWkt::Struct();\n  auto mutable_fields = metadata_struct.mutable_fields();\n  mutable_fields->insert({\"a\", v1});\n  mutable_fields->insert({\"b\", v2});\n  mutable_fields->insert({\"c\", v3});\n\n  auto matches = MetadataMatchCriteriaImpl(metadata_struct);\n\n  EXPECT_EQ(matches.metadataMatchCriteria().size(), 3);\n  auto it = matches.metadataMatchCriteria().begin();\n  EXPECT_EQ((*it)->name(), \"a\");\n  EXPECT_EQ((*it)->value().value().string_value(), \"v1\");\n  it++;\n\n  EXPECT_EQ((*it)->name(), \"b\");\n  EXPECT_EQ((*it)->value().value().number_value(), 2.0);\n  it++;\n\n  EXPECT_EQ((*it)->name(), \"c\");\n  EXPECT_EQ((*it)->value().value().bool_value(), true);\n}\n\nTEST(MetadataMatchCriteriaImpl, Merge) {\n  auto pv1 = ProtobufWkt::Value();\n  pv1.set_string_value(\"v1\");\n  auto pv2 = ProtobufWkt::Value();\n  pv2.set_number_value(2.0);\n  auto pv3 = ProtobufWkt::Value();\n  pv3.set_bool_value(true);\n\n  auto parent_struct = ProtobufWkt::Struct();\n  auto parent_fields = parent_struct.mutable_fields();\n  parent_fields->insert({\"a\", pv1});\n  parent_fields->insert({\"b\", pv2});\n  parent_fields->insert({\"c\", pv3});\n\n  auto parent_matches = MetadataMatchCriteriaImpl(parent_struct);\n\n  auto v1 = ProtobufWkt::Value();\n  v1.set_string_value(\"override1\");\n  auto v2 = ProtobufWkt::Value();\n  v2.set_string_value(\"v2\");\n  auto v3 = ProtobufWkt::Value();\n  v3.set_string_value(\"override3\");\n\n  auto metadata_struct = ProtobufWkt::Struct();\n  auto mutable_fields = metadata_struct.mutable_fields();\n  mutable_fields->insert({\"a\", v1});\n  mutable_fields->insert({\"b++\", v2});\n  mutable_fields->insert({\"c\", v3});\n\n  MetadataMatchCriteriaConstPtr matches = parent_matches.mergeMatchCriteria(metadata_struct);\n\n  EXPECT_EQ(matches->metadataMatchCriteria().size(), 4);\n  auto it = matches->metadataMatchCriteria().begin();\n  EXPECT_EQ((*it)->name(), \"a\");\n  EXPECT_EQ((*it)->value().value().string_value(), \"override1\");\n  it++;\n\n  EXPECT_EQ((*it)->name(), \"b\");\n  EXPECT_EQ((*it)->value().value().number_value(), 2.0);\n  it++;\n\n  EXPECT_EQ((*it)->name(), \"b++\");\n  EXPECT_EQ((*it)->value().value().string_value(), \"v2\");\n  it++;\n\n  EXPECT_EQ((*it)->name(), \"c\");\n  EXPECT_EQ((*it)->value().value().string_value(), \"override3\");\n}\n\nTEST(MetadataMatchCriteriaImpl, Filter) {\n  auto pv1 = ProtobufWkt::Value();\n  pv1.set_string_value(\"v1\");\n  auto pv2 = ProtobufWkt::Value();\n  pv2.set_number_value(2.0);\n  auto pv3 = ProtobufWkt::Value();\n  pv3.set_bool_value(true);\n\n  auto metadata_matches = ProtobufWkt::Struct();\n  auto parent_fields = metadata_matches.mutable_fields();\n  parent_fields->insert({\"a\", pv1});\n  parent_fields->insert({\"b\", pv2});\n  parent_fields->insert({\"c\", pv3});\n\n  auto matches = MetadataMatchCriteriaImpl(metadata_matches);\n  auto filtered_matches1 = matches.filterMatchCriteria({\"b\", \"c\"});\n  auto filtered_matches2 = matches.filterMatchCriteria({\"a\"});\n\n  EXPECT_EQ(matches.metadataMatchCriteria().size(), 3);\n  EXPECT_EQ(filtered_matches1->metadataMatchCriteria().size(), 2);\n  EXPECT_EQ(filtered_matches2->metadataMatchCriteria().size(), 1);\n\n  EXPECT_EQ(filtered_matches1->metadataMatchCriteria()[0]->name(), \"b\");\n  EXPECT_EQ(filtered_matches1->metadataMatchCriteria()[0]->value().value().number_value(), 2.0);\n  EXPECT_EQ(filtered_matches1->metadataMatchCriteria()[1]->name(), \"c\");\n  EXPECT_EQ(filtered_matches1->metadataMatchCriteria()[1]->value().value().bool_value(), true);\n\n  EXPECT_EQ(filtered_matches2->metadataMatchCriteria()[0]->name(), \"a\");\n  EXPECT_EQ(filtered_matches2->metadataMatchCriteria()[0]->value().value().string_value(), \"v1\");\n}\n\nclass RouteEntryMetadataMatchTest : public testing::Test, public ConfigImplTestBase {};\n\nTEST_F(RouteEntryMetadataMatchTest, ParsesMetadata) {\n  auto route_config = envoy::config::route::v3::RouteConfiguration();\n  auto* vhost = route_config.add_virtual_hosts();\n  vhost->set_name(\"vhost\");\n  vhost->add_domains(\"www.lyft.com\");\n\n  // route provides metadata matches combined from RouteAction and WeightedCluster\n  auto* route = vhost->add_routes();\n  route->mutable_match()->set_prefix(\"/both\");\n  auto* route_action = route->mutable_route();\n  auto* weighted_cluster = route_action->mutable_weighted_clusters()->add_clusters();\n  weighted_cluster->set_name(\"www1\");\n  weighted_cluster->mutable_weight()->set_value(100);\n  Envoy::Config::Metadata::mutableMetadataValue(*weighted_cluster->mutable_metadata_match(),\n                                                Envoy::Config::MetadataFilters::get().ENVOY_LB,\n                                                \"r1_wc_key\")\n      .set_string_value(\"r1_wc_value\");\n  Envoy::Config::Metadata::mutableMetadataValue(*route_action->mutable_metadata_match(),\n                                                Envoy::Config::MetadataFilters::get().ENVOY_LB,\n                                                \"r1_key\")\n      .set_string_value(\"r1_value\");\n\n  // route provides metadata matches from WeightedCluster only\n  route = vhost->add_routes();\n  route->mutable_match()->set_prefix(\"/cluster-only\");\n  route_action = route->mutable_route();\n  weighted_cluster = route_action->mutable_weighted_clusters()->add_clusters();\n  weighted_cluster->set_name(\"www2\");\n  weighted_cluster->mutable_weight()->set_value(100);\n  Envoy::Config::Metadata::mutableMetadataValue(*weighted_cluster->mutable_metadata_match(),\n                                                Envoy::Config::MetadataFilters::get().ENVOY_LB,\n                                                \"r2_wc_key\")\n      .set_string_value(\"r2_wc_value\");\n\n  // route provides metadata matches from RouteAction only\n  route = vhost->add_routes();\n  route->mutable_match()->set_prefix(\"/route-only\");\n  route_action = route->mutable_route();\n  route_action->set_cluster(\"www3\");\n  Envoy::Config::Metadata::mutableMetadataValue(*route_action->mutable_metadata_match(),\n                                                Envoy::Config::MetadataFilters::get().ENVOY_LB,\n                                                \"r3_key\")\n      .set_string_value(\"r3_value\");\n\n  // route provides metadata matches from RouteAction (but WeightedCluster exists)\n  route = vhost->add_routes();\n  route->mutable_match()->set_prefix(\"/cluster-passthrough\");\n  route_action = route->mutable_route();\n  weighted_cluster = route_action->mutable_weighted_clusters()->add_clusters();\n  weighted_cluster->set_name(\"www4\");\n  weighted_cluster->mutable_weight()->set_value(100);\n  Envoy::Config::Metadata::mutableMetadataValue(*route_action->mutable_metadata_match(),\n                                                Envoy::Config::MetadataFilters::get().ENVOY_LB,\n                                                \"r4_key\")\n      .set_string_value(\"r4_value\");\n\n  TestConfigImpl config(route_config, factory_context_, true);\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"www.lyft.com\", \"/both\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n\n    auto* route_entry = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www1\", route_entry->clusterName());\n    auto* matches = route_entry->metadataMatchCriteria();\n    EXPECT_NE(matches, nullptr);\n    EXPECT_EQ(matches->metadataMatchCriteria().size(), 2);\n    EXPECT_EQ(matches->metadataMatchCriteria().at(0)->name(), \"r1_key\");\n    EXPECT_EQ(matches->metadataMatchCriteria().at(1)->name(), \"r1_wc_key\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"www.lyft.com\", \"/cluster-only\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n\n    auto* route_entry = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www2\", route_entry->clusterName());\n    auto* matches = route_entry->metadataMatchCriteria();\n    EXPECT_NE(matches, nullptr);\n    EXPECT_EQ(matches->metadataMatchCriteria().size(), 1);\n    EXPECT_EQ(matches->metadataMatchCriteria().at(0)->name(), \"r2_wc_key\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"www.lyft.com\", \"/route-only\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n\n    auto* route_entry = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www3\", route_entry->clusterName());\n    auto* matches = route_entry->metadataMatchCriteria();\n    EXPECT_NE(matches, nullptr);\n    EXPECT_EQ(matches->metadataMatchCriteria().size(), 1);\n    EXPECT_EQ(matches->metadataMatchCriteria().at(0)->name(), \"r3_key\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"www.lyft.com\", \"/cluster-passthrough\", true, true);\n    EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry());\n\n    auto* route_entry = config.route(headers, 0)->routeEntry();\n    EXPECT_EQ(\"www4\", route_entry->clusterName());\n    auto* matches = route_entry->metadataMatchCriteria();\n    EXPECT_NE(matches, nullptr);\n    EXPECT_EQ(matches->metadataMatchCriteria().size(), 1);\n    EXPECT_EQ(matches->metadataMatchCriteria().at(0)->name(), \"r4_key\");\n  }\n}\n\nclass ConfigUtilityTest : public testing::Test, public ConfigImplTestBase {};\n\nTEST_F(ConfigUtilityTest, ParseResponseCode) {\n  const std::vector<\n      std::pair<envoy::config::route::v3::RedirectAction::RedirectResponseCode, Http::Code>>\n      test_set = {\n          std::make_pair(envoy::config::route::v3::RedirectAction::MOVED_PERMANENTLY,\n                         Http::Code::MovedPermanently),\n          std::make_pair(envoy::config::route::v3::RedirectAction::FOUND, Http::Code::Found),\n          std::make_pair(envoy::config::route::v3::RedirectAction::SEE_OTHER, Http::Code::SeeOther),\n          std::make_pair(envoy::config::route::v3::RedirectAction::TEMPORARY_REDIRECT,\n                         Http::Code::TemporaryRedirect),\n          std::make_pair(envoy::config::route::v3::RedirectAction::PERMANENT_REDIRECT,\n                         Http::Code::PermanentRedirect)};\n  for (const auto& test_case : test_set) {\n    EXPECT_EQ(test_case.second, ConfigUtility::parseRedirectResponseCode(test_case.first));\n  }\n}\n\nTEST_F(ConfigUtilityTest, ParseDirectResponseBody) {\n  envoy::config::route::v3::Route route;\n  EXPECT_EQ(EMPTY_STRING, ConfigUtility::parseDirectResponseBody(route, *api_));\n\n  route.mutable_direct_response()->mutable_body()->set_filename(\"missing_file\");\n  EXPECT_THROW_WITH_MESSAGE(ConfigUtility::parseDirectResponseBody(route, *api_), EnvoyException,\n                            \"response body file missing_file does not exist\");\n\n  std::string body(4097, '*');\n  auto filename = TestEnvironment::writeStringToFileForTest(\"body\", body);\n  route.mutable_direct_response()->mutable_body()->set_filename(filename);\n  std::string expected_message(\"response body file \" + filename +\n                               \" size is 4097 bytes; maximum is 4096\");\n  EXPECT_THROW_WITH_MESSAGE(ConfigUtility::parseDirectResponseBody(route, *api_), EnvoyException,\n                            expected_message);\n}\n\nTEST_F(RouteConfigurationV2, RedirectCode) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: redirect\n    domains: [redirect.lyft.com]\n    routes:\n      - match: { prefix: \"/\"}\n        redirect: { host_redirect: new.lyft.com, response_code: TEMPORARY_REDIRECT }\n\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(nullptr, config.route(genRedirectHeaders(\"www.foo.com\", \"/foo\", true, true), 0));\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/foo\", false, false);\n    EXPECT_EQ(\"http://new.lyft.com/foo\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n    EXPECT_EQ(Http::Code::TemporaryRedirect,\n              config.route(headers, 0)->directResponseEntry()->responseCode());\n  }\n}\n\n// Test the parsing of direct response configurations within routes.\nTEST_F(RouteConfigurationV2, DirectResponse) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: direct\n    domains: [example.com]\n    routes:\n      - match: { prefix: \"/\"}\n        direct_response: { status: 200, body: { inline_string: \"content\" } }\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  const auto* direct_response =\n      config.route(genHeaders(\"example.com\", \"/\", \"GET\"), 0)->directResponseEntry();\n  EXPECT_NE(nullptr, direct_response);\n  EXPECT_EQ(Http::Code::OK, direct_response->responseCode());\n  EXPECT_STREQ(\"content\", direct_response->responseBody().c_str());\n}\n\n// Test the parsing of a direct response configuration where the response body is too large.\nTEST_F(RouteConfigurationV2, DirectResponseTooLarge) {\n  std::string response_body(4097, 'A');\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: direct\n    domains: [example.com]\n    routes:\n      - match: { prefix: \"/\"}\n        direct_response:\n          status: 200\n          body:\n            inline_string: )EOF\" +\n                           response_body + \"\\n\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl invalid_config(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n      EnvoyException, \"response body size is 4097 bytes; maximum is 4096\");\n}\n\nvoid checkPathMatchCriterion(const Route* route, const std::string& expected_matcher,\n                             PathMatchType expected_type) {\n  ASSERT_NE(nullptr, route);\n  const auto route_entry = route->routeEntry();\n  ASSERT_NE(nullptr, route_entry);\n  const auto& match_criterion = route_entry->pathMatchCriterion();\n  EXPECT_EQ(expected_matcher, match_criterion.matcher());\n  EXPECT_EQ(expected_type, match_criterion.matchType());\n}\n\n// Test loading broken config throws EnvoyException.\nTEST_F(RouteConfigurationV2, BrokenTypedMetadata) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\"}\n        route: { cluster: www2 }\n        metadata: { filter_metadata: { com.bar.foo: { baz: test_value },\n                                       baz: {} } }\n  )EOF\";\n  BazFactory baz_factory;\n  Registry::InjectFactory<HttpRouteTypedMetadataFactory> registered_factory(baz_factory);\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n      Envoy::EnvoyException, \"Cannot create a Baz when metadata is empty.\");\n}\n\nTEST_F(RouteConfigurationV2, RouteConfigGetters) {\n  const std::string yaml = R\"EOF(\nname: foo\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/rege[xy]\"\n        route: { cluster: ww2 }\n      - match: { path: \"/exact-path\" }\n        route: { cluster: ww2 }\n      - match: { prefix: \"/\"}\n        route: { cluster: www2 }\n        metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {name: bluh} } }\n  )EOF\";\n  BazFactory baz_factory;\n  Registry::InjectFactory<HttpRouteTypedMetadataFactory> registered_factory(baz_factory);\n  const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  checkPathMatchCriterion(config.route(genHeaders(\"www.foo.com\", \"/regex\", \"GET\"), 0).get(),\n                          \"/rege[xy]\", PathMatchType::Regex);\n  checkPathMatchCriterion(config.route(genHeaders(\"www.foo.com\", \"/exact-path\", \"GET\"), 0).get(),\n                          \"/exact-path\", PathMatchType::Exact);\n  const auto route = config.route(genHeaders(\"www.foo.com\", \"/\", \"GET\"), 0);\n  checkPathMatchCriterion(route.get(), \"/\", PathMatchType::Prefix);\n\n  const auto route_entry = route->routeEntry();\n  const auto& metadata = route_entry->metadata();\n  const auto& typed_metadata = route_entry->typedMetadata();\n\n  EXPECT_EQ(\"test_value\",\n            Envoy::Config::Metadata::metadataValue(&metadata, \"com.bar.foo\", \"baz\").string_value());\n  EXPECT_NE(nullptr, typed_metadata.get<Baz>(baz_factory.name()));\n  EXPECT_EQ(\"bluh\", typed_metadata.get<Baz>(baz_factory.name())->name);\n\n  EXPECT_EQ(\"bar\", symbol_table_->toString(route_entry->virtualHost().statName()));\n  EXPECT_EQ(\"foo\", route_entry->virtualHost().routeConfig().name());\n}\n\nTEST_F(RouteConfigurationV2, RouteTracingConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/first\"\n        tracing:\n          client_sampling:\n            numerator: 1\n        route: { cluster: ww2 }\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/second\"\n        tracing:\n          overall_sampling:\n            numerator: 1\n        route: { cluster: ww2 }\n      - match: { path: \"/third\" }\n        tracing:\n          client_sampling:\n            numerator: 1\n          random_sampling:\n            numerator: 200\n            denominator: 1\n          overall_sampling:\n            numerator: 3\n          custom_tags:\n          - tag: ltag\n            literal:\n              value: lvalue\n          - tag: etag\n            environment:\n              name: E_TAG\n          - tag: rtag\n            request_header:\n              name: X-Tag\n          - tag: mtag\n            metadata:\n              kind: { route: {} }\n              metadata_key:\n                key: com.bar.foo\n                path: [ { key: xx }, { key: yy } ]\n        route: { cluster: ww2 }\n  )EOF\";\n  BazFactory baz_factory;\n  Registry::InjectFactory<HttpRouteTypedMetadataFactory> registered_factory(baz_factory);\n  const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  const auto route1 = config.route(genHeaders(\"www.foo.com\", \"/first\", \"GET\"), 0);\n  const auto route2 = config.route(genHeaders(\"www.foo.com\", \"/second\", \"GET\"), 0);\n  const auto route3 = config.route(genHeaders(\"www.foo.com\", \"/third\", \"GET\"), 0);\n\n  // Check default values for random and overall sampling\n  EXPECT_EQ(100, route1->tracingConfig()->getRandomSampling().numerator());\n  EXPECT_EQ(0, route1->tracingConfig()->getRandomSampling().denominator());\n  EXPECT_EQ(100, route1->tracingConfig()->getOverallSampling().numerator());\n  EXPECT_EQ(0, route1->tracingConfig()->getOverallSampling().denominator());\n\n  // Check default values for client sampling\n  EXPECT_EQ(100, route2->tracingConfig()->getClientSampling().numerator());\n  EXPECT_EQ(0, route2->tracingConfig()->getClientSampling().denominator());\n\n  EXPECT_EQ(1, route3->tracingConfig()->getClientSampling().numerator());\n  EXPECT_EQ(0, route3->tracingConfig()->getClientSampling().denominator());\n  EXPECT_EQ(200, route3->tracingConfig()->getRandomSampling().numerator());\n  EXPECT_EQ(1, route3->tracingConfig()->getRandomSampling().denominator());\n  EXPECT_EQ(3, route3->tracingConfig()->getOverallSampling().numerator());\n  EXPECT_EQ(0, route3->tracingConfig()->getOverallSampling().denominator());\n\n  std::vector<std::string> custom_tags{\"ltag\", \"etag\", \"rtag\", \"mtag\"};\n  const Tracing::CustomTagMap& map = route3->tracingConfig()->getCustomTags();\n  for (const std::string& custom_tag : custom_tags) {\n    EXPECT_NE(map.find(custom_tag), map.end());\n  }\n}\n\n// Test to check Prefix Rewrite for redirects\nTEST_F(RouteConfigurationV2, RedirectPrefixRewrite) {\n  std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: redirect\n    domains: [redirect.lyft.com]\n    routes:\n      - match: { prefix: \"/prefix\"}\n        redirect: { prefix_rewrite: \"/new/prefix\" }\n      - match: { path: \"/path/\" }\n        redirect: { prefix_rewrite: \"/new/path/\" }\n      - match: { prefix: \"/host/prefix\" }\n        redirect: { host_redirect: new.lyft.com, prefix_rewrite: \"/new/prefix\"}\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/[r][e][g][e][x].*\"\n        redirect: { prefix_rewrite: \"/new/regex-prefix/\" }\n      - match: { prefix: \"/http/prefix\"}\n        redirect: { prefix_rewrite: \"/https/prefix\" , https_redirect: true }\n      - match: { prefix: \"/ignore-this/\"}\n        redirect: { prefix_rewrite: \"/\" }\n      - match: { prefix: \"/ignore-this\"}\n        redirect: { prefix_rewrite: \"/\" }\n      - match: { prefix: \"/ignore-substring\"}\n        redirect: { prefix_rewrite: \"/\" }\n      - match: { prefix: \"/service-hello/\"}\n        redirect: { prefix_rewrite: \"/\" }\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(nullptr, config.route(genRedirectHeaders(\"www.foo.com\", \"/foo\", true, true), 0));\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/prefix/some/path/?lang=eng&con=US\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/new/prefix/some/path/?lang=eng&con=US\",\n              redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/\", true, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path/\", redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host/prefix/1\", true, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"https://new.lyft.com/new/prefix/1\", redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/regex/hello/\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/new/regex-prefix/\", redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/http/prefix/\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"https://redirect.lyft.com/https/prefix/\", redirect->newPath(headers));\n  }\n  {\n    // The following matches to the redirect action match value equals to `/ignore-this` instead of\n    // `/ignore-this/`.\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/ignore-this\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/\", redirect->newPath(headers));\n  }\n  {\n    // The following matches to the redirect action match value equals to `/ignore-this/` instead of\n    // `/ignore-this`.\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/ignore-this/\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/\", redirect->newPath(headers));\n  }\n  {\n    // The same as previous test request, the following matches to the redirect action match value\n    // equals to `/ignore-this/` instead of `/ignore-this`.\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"redirect.lyft.com\", \"/ignore-this/however/use/the/rest/of/this/path\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/however/use/the/rest/of/this/path\",\n              redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/ignore-this/use/\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/use/\", redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/ignore-substringto/use/\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/to/use/\", redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/ignore-substring-to/use/\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/-to/use/\", redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/service-hello/a/b/c\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/a/b/c\", redirect->newPath(headers));\n  }\n}\n\nTEST_F(RouteConfigurationV2, PathRedirectQueryNotPreserved) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.preserve_query_string_in_path_redirects\", \"false\"}});\n\n  std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: redirect\n    domains: [redirect.lyft.com]\n    routes:\n      - match: { path: \"/path/redirect/\"}\n        redirect: { path_redirect: \"/new/path-redirect/\" }\n      - match: { path: \"/path/redirect/strip-query/true\"}\n        redirect: { path_redirect: \"/new/path-redirect/\", strip_query: \"true\" }\n      - match: { path: \"/path/redirect/query\"}\n        redirect: { path_redirect: \"/new/path-redirect?foo=1\" }\n      - match: { path: \"/path/redirect/query-with-strip\"}\n        redirect: { path_redirect: \"/new/path-redirect?foo=2\", strip_query: \"true\" }\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  EXPECT_EQ(nullptr, config.route(genRedirectHeaders(\"www.foo.com\", \"/foo\", true, true), 0));\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/?lang=eng&con=US\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect/\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"redirect.lyft.com\", \"/path/redirect/strip-query/true?lang=eng&con=US\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect/\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/query\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=1\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/query?bar=1\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=1\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/query-with-strip\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=2\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"redirect.lyft.com\", \"/path/redirect/query-with-strip?bar=1\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=2\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n}\n\n// Test to check Strip Query for redirect messages\nTEST_F(RouteConfigurationV2, RedirectStripQuery) {\n  std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: redirect\n    domains: [redirect.lyft.com]\n    routes:\n      - match: { prefix: \"/query/true\"}\n        redirect: { prefix_rewrite: \"/new/prefix\", strip_query: \"true\" }\n      - match: { prefix: \"/query/false\" }\n        redirect: { prefix_rewrite: \"/new/prefix\", strip_query: \"false\" }\n      - match: { path: \"/host/query-default\" }\n        redirect: { host_redirect: new.lyft.com }\n      - match: { path: \"/path/redirect/\"}\n        redirect: { path_redirect: \"/new/path-redirect/\" }\n      - match: { path: \"/path/redirect/strip-query/true\"}\n        redirect: { path_redirect: \"/new/path-redirect/\", strip_query: \"true\" }\n      - match: { path: \"/path/redirect/query\"}\n        redirect: { path_redirect: \"/new/path-redirect?foo=1\" }\n      - match: { path: \"/path/redirect/query-with-strip\"}\n        redirect: { path_redirect: \"/new/path-redirect?foo=2\", strip_query: \"true\" }\n      - match: { prefix: \"/all/combinations\"}\n        redirect: { host_redirect: \"new.lyft.com\", prefix_rewrite: \"/new/prefix\" , https_redirect: \"true\", strip_query: \"true\" }\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  EXPECT_EQ(nullptr, config.route(genRedirectHeaders(\"www.foo.com\", \"/foo\", true, true), 0));\n\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/query/true?lang=eng&con=US\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"http://redirect.lyft.com/new/prefix\", redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"redirect.lyft.com\", \"/query/false/some/path?lang=eng&con=US\", true, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/prefix/some/path?lang=eng&con=US\",\n              redirect->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/host/query-default?lang=eng&con=US\", true, false);\n    EXPECT_EQ(\"https://new.lyft.com/host/query-default?lang=eng&con=US\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/?lang=eng&con=US\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect/?lang=eng&con=US\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"redirect.lyft.com\", \"/path/redirect/strip-query/true?lang=eng&con=US\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect/\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/query\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=1\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/query?bar=1\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=1\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"redirect.lyft.com\", \"/path/redirect/query-with-strip\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=2\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"redirect.lyft.com\", \"/path/redirect/query-with-strip?bar=1\", true, false);\n    EXPECT_EQ(\"https://redirect.lyft.com/new/path-redirect?foo=2\",\n              config.route(headers, 0)->directResponseEntry()->newPath(headers));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genRedirectHeaders(\n        \"redirect.lyft.com\", \"/all/combinations/here/we/go?key=value\", false, false);\n    const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry();\n    redirect->rewritePathHeader(headers, true);\n    EXPECT_EQ(\"https://new.lyft.com/new/prefix/here/we/go\", redirect->newPath(headers));\n  }\n}\n\nTEST_F(RouteMatcherTest, HeaderMatchedRoutingV2) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header\n              exact_match: test\n        route:\n          cluster: local_service_with_headers\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_multiple1\n              exact_match: test1\n            - name: test_header_multiple2\n              exact_match: test2\n        route:\n          cluster: local_service_with_multiple_headers\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_presence\n        route:\n          cluster: local_service_with_empty_headers\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_pattern\n              safe_regex_match:\n                google_re2: {}\n                regex: \"^user=test-\\\\d+$\"\n        route:\n          cluster: local_service_with_header_pattern_set_regex\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_pattern\n              exact_match: \"^customer=test-\\\\d+$\"\n        route:\n          cluster: local_service_with_header_pattern_unset_regex\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_range\n              range_match:\n                 start: -9223372036854775808\n                 end: -10\n        route:\n          cluster: local_service_with_header_range_test1\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_multiple_range\n              range_match:\n                 start: -10\n                 end: 1\n            - name: test_header_multiple_exact\n              exact_match: test\n        route:\n          cluster: local_service_with_header_range_test2\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_range\n              range_match:\n                 start: 1\n                 end: 10\n        route:\n          cluster: local_service_with_header_range_test3\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_range\n              range_match:\n                 start: 9223372036854775801\n                 end: 9223372036854775807\n        route:\n          cluster: local_service_with_header_range_test4\n      - match:\n          prefix: \"/\"\n          headers:\n            - name: test_header_range\n              exact_match: \"9223372036854775807\"\n        route:\n          cluster: local_service_with_header_range_test5\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: local_service_without_headers\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(genHeaders(\"www.lyft.com\", \"/\", \"GET\"), 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header\", \"test\");\n    EXPECT_EQ(\"local_service_with_headers\", config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_multiple1\", \"test1\");\n    headers.addCopy(\"test_header_multiple2\", \"test2\");\n    EXPECT_EQ(\"local_service_with_multiple_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"non_existent_header\", \"foo\");\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_presence\", \"test\");\n    EXPECT_EQ(\"local_service_with_empty_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_pattern\", \"user=test-1223\");\n    EXPECT_EQ(\"local_service_with_header_pattern_set_regex\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_pattern\", \"customer=test-1223\");\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_range\", \"-9223372036854775808\");\n    EXPECT_EQ(\"local_service_with_header_range_test1\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_multiple_range\", \"-9\");\n    headers.addCopy(\"test_header_multiple_exact\", \"test\");\n    EXPECT_EQ(\"local_service_with_header_range_test2\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_range\", \"9\");\n    EXPECT_EQ(\"local_service_with_header_range_test3\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_range\", \"9223372036854775807\");\n    EXPECT_EQ(\"local_service_with_header_range_test5\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_multiple_range\", \"-9\");\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/\", \"GET\");\n    headers.addCopy(\"test_header_range\", \"19\");\n    EXPECT_EQ(\"local_service_without_headers\",\n              config.route(headers, 0)->routeEntry()->clusterName());\n  }\n}\n\n// Test Route Matching based on connection Tls Context.\n// Validate configured and default settings are routed to the correct cluster.\nTEST_F(RouteMatcherTest, TlsContextMatching) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: local_service\n    domains: [\"*\"]\n    routes:\n      - match:\n          prefix: \"/peer-cert-test\"\n          tls_context:\n            presented: true\n        route:\n          cluster: server_peer-cert-presented\n      - match:\n          prefix: \"/peer-cert-test\"\n          tls_context:\n            presented: false\n        route:\n          cluster: server_peer-cert-not-presented\n      - match:\n          prefix: \"/peer-validated-cert-test\"\n          tls_context:\n            validated: true\n        route:\n          cluster: server_peer-cert-validated\n      - match:\n          prefix: \"/peer-validated-cert-test\"\n          tls_context:\n            validated: false\n        route:\n          cluster: server_peer-cert-not-validated\n      - match:\n          prefix: \"/peer-cert-no-tls-context-match\"\n        route:\n          cluster: server_peer-cert-no-tls-context-match\n      - match:\n          prefix: \"/\"\n        route:\n          cluster: local_service_without_headers\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/peer-cert-test\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-presented\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(false));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers = genHeaders(\"www.lyft.com\", \"/peer-cert-test\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-not-presented\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(false));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/peer-cert-no-tls-context-match\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-no-tls-context-match\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/peer-cert-no-tls-context-match\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-no-tls-context-match\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/peer-validated-cert-test\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-validated\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(false));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/peer-validated-cert-test\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-not-validated\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(false));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/peer-cert-no-tls-context-match\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-no-tls-context-match\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n    EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true));\n    EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true));\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/peer-cert-no-tls-context-match\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-no-tls-context-match\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n\n  {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    std::shared_ptr<Ssl::MockConnectionInfo> connection_info;\n    EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n    Http::TestRequestHeaderMapImpl headers =\n        genHeaders(\"www.lyft.com\", \"/peer-cert-no-tls-context-match\", \"GET\");\n    EXPECT_EQ(\"server_peer-cert-no-tls-context-match\",\n              config.route(headers, stream_info, 0)->routeEntry()->clusterName());\n  }\n}\n\nTEST_F(RouteConfigurationV2, RegexPrefixWithNoRewriteWorksWhenPathChanged) {\n\n  // Setup regex route entry. the regex is trivial, that's ok as we only want to test that\n  // path change works.\n  std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [regex.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route: { cluster: some-cluster }\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n  {\n    // Get our regex route entry\n    Http::TestRequestHeaderMapImpl headers =\n        genRedirectHeaders(\"regex.lyft.com\", \"/regex\", true, false);\n    const RouteEntry* route_entry = config.route(headers, 0)->routeEntry();\n\n    // simulate a filter changing the path\n    headers.remove(\":path\");\n    headers.addCopy(\":path\", \"/not-the-original-regex\");\n\n    // no re-write was specified; so this should not throw\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    EXPECT_NO_THROW(route_entry->finalizeRequestHeaders(headers, stream_info, false));\n  }\n}\n\nTEST_F(RouteConfigurationV2, NoIdleTimeout) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const RouteEntry* route_entry = config.route(headers, 0)->routeEntry();\n  EXPECT_EQ(absl::nullopt, route_entry->idleTimeout());\n}\n\nTEST_F(RouteConfigurationV2, ZeroIdleTimeout) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          idle_timeout: 0s\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const RouteEntry* route_entry = config.route(headers, 0)->routeEntry();\n  EXPECT_EQ(0, route_entry->idleTimeout().value().count());\n}\n\nTEST_F(RouteConfigurationV2, ExplicitIdleTimeout) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          idle_timeout: 7s\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const RouteEntry* route_entry = config.route(headers, 0)->routeEntry();\n  EXPECT_EQ(7 * 1000, route_entry->idleTimeout().value().count());\n}\n\nTEST_F(RouteConfigurationV2, RetriableStatusCodes) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          retry_policy:\n            retriable_status_codes: [100, 200]\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy();\n  const std::vector<uint32_t> expected_codes{100, 200};\n  EXPECT_EQ(expected_codes, retry_policy.retriableStatusCodes());\n}\n\nTEST_F(RouteConfigurationV2, RetriableHeaders) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          retry_policy:\n            retriable_headers:\n            - name: \":status\"\n              exact_match: \"500\"\n            - name: X-Upstream-Pushback\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy();\n  ASSERT_EQ(2, retry_policy.retriableHeaders().size());\n\n  Http::TestResponseHeaderMapImpl expected_0{{\":status\", \"500\"}};\n  Http::TestResponseHeaderMapImpl unexpected_0{{\":status\", \"200\"}};\n  Http::TestResponseHeaderMapImpl expected_1{{\"x-upstream-pushback\", \"bar\"}};\n  Http::TestResponseHeaderMapImpl unexpected_1{{\"x-test\", \"foo\"}};\n\n  EXPECT_TRUE(retry_policy.retriableHeaders()[0]->matchesHeaders(expected_0));\n  EXPECT_FALSE(retry_policy.retriableHeaders()[0]->matchesHeaders(unexpected_0));\n  EXPECT_TRUE(retry_policy.retriableHeaders()[1]->matchesHeaders(expected_1));\n  EXPECT_FALSE(retry_policy.retriableHeaders()[1]->matchesHeaders(unexpected_1));\n}\n\nTEST_F(RouteConfigurationV2, UpgradeConfigs) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          upgrade_configs:\n            - upgrade_type: Websocket\n            - upgrade_type: disabled\n              enabled: false\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const RouteEntry::UpgradeMap& upgrade_map = config.route(headers, 0)->routeEntry()->upgradeMap();\n  EXPECT_TRUE(upgrade_map.find(\"websocket\")->second);\n  EXPECT_TRUE(upgrade_map.find(\"foo\") == upgrade_map.end());\n  EXPECT_FALSE(upgrade_map.find(\"disabled\")->second);\n}\n\nTEST_F(RouteConfigurationV2, DuplicateUpgradeConfigs) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          upgrade_configs:\n            - upgrade_type: Websocket\n            - upgrade_type: WebSocket\n              enabled: false\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Duplicate upgrade WebSocket\");\n}\n\nTEST_F(RouteConfigurationV2, BadConnectConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          upgrade_configs:\n            - upgrade_type: Websocket\n              connect_config: {}\n              enabled: false\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Non-CONNECT upgrade type Websocket has ConnectConfig\");\n}\n\n// Verifies that we're creating a new instance of the retry plugins on each call instead of always\n// returning the same one.\nTEST_F(RouteConfigurationV2, RetryPluginsAreNotReused) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          retry_policy:\n            retry_host_predicate:\n            - name: envoy.test_host_predicate\n            retry_priority:\n              name: envoy.test_retry_priority\n  )EOF\";\n\n  Upstream::MockRetryPriority priority{{}, {}};\n  Upstream::MockRetryPriorityFactory priority_factory(priority);\n  Registry::InjectFactory<Upstream::RetryPriorityFactory> inject_priority_factory(priority_factory);\n\n  Upstream::TestRetryHostPredicateFactory host_predicate_factory;\n  Registry::InjectFactory<Upstream::RetryHostPredicateFactory> inject_predicate_factory(\n      host_predicate_factory);\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy();\n  const auto priority1 = retry_policy.retryPriority();\n  const auto priority2 = retry_policy.retryPriority();\n  EXPECT_NE(priority1, priority2);\n  const auto predicates1 = retry_policy.retryHostPredicates();\n  const auto predicates2 = retry_policy.retryHostPredicates();\n  EXPECT_NE(predicates1, predicates2);\n}\n\nTEST_F(RouteConfigurationV2, InternalRedirectIsDisabledWhenNotSpecifiedInRouteAction) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const auto& internal_redirect_policy =\n      config.route(headers, 0)->routeEntry()->internalRedirectPolicy();\n  EXPECT_FALSE(internal_redirect_policy.enabled());\n}\n\nTEST_F(RouteConfigurationV2, DefaultInternalRedirectPolicyIsSensible) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          internal_redirect_policy: {}\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const auto& internal_redirect_policy =\n      config.route(headers, 0)->routeEntry()->internalRedirectPolicy();\n  EXPECT_TRUE(internal_redirect_policy.enabled());\n  EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(302)));\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(200)));\n  EXPECT_EQ(1, internal_redirect_policy.maxInternalRedirects());\n  EXPECT_TRUE(internal_redirect_policy.predicates().empty());\n  EXPECT_FALSE(internal_redirect_policy.isCrossSchemeRedirectAllowed());\n}\n\nTEST_F(RouteConfigurationV2, InternalRedirectPolicyDropsInvalidRedirectCode) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          internal_redirect_policy:\n            redirect_response_codes: [301, 302, 303, 304]\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const auto& internal_redirect_policy =\n      config.route(headers, 0)->routeEntry()->internalRedirectPolicy();\n  EXPECT_TRUE(internal_redirect_policy.enabled());\n  EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(301)));\n  EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(302)));\n  EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(303)));\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(304)));\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(305)));\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(306)));\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(307)));\n}\n\nTEST_F(RouteConfigurationV2, InternalRedirectPolicyDropsInvalidRedirectCodeCauseEmptySet) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: regex\n    domains: [idle.lyft.com]\n    routes:\n      - match:\n          safe_regex:\n            google_re2: {}\n            regex: \"/regex\"\n        route:\n          cluster: some-cluster\n          internal_redirect_policy:\n            redirect_response_codes: [200, 304]\n  )EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  Http::TestRequestHeaderMapImpl headers =\n      genRedirectHeaders(\"idle.lyft.com\", \"/regex\", true, false);\n  const auto& internal_redirect_policy =\n      config.route(headers, 0)->routeEntry()->internalRedirectPolicy();\n  EXPECT_TRUE(internal_redirect_policy.enabled());\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(302)));\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(304)));\n  EXPECT_FALSE(\n      internal_redirect_policy.shouldRedirectForResponseCode(static_cast<Http::Code>(200)));\n}\n\nclass PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase {\npublic:\n  PerFilterConfigsTest()\n      : registered_factory_(factory_), registered_default_factory_(default_factory_) {}\n\n  struct DerivedFilterConfig : public RouteSpecificFilterConfig {\n    ProtobufWkt::Timestamp config_;\n  };\n  class TestFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\n  public:\n    TestFilterConfig() : EmptyHttpFilterConfig(\"test.filter\") {}\n\n    Http::FilterFactoryCb createFilter(const std::string&,\n                                       Server::Configuration::FactoryContext&) override {\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n    ProtobufTypes::MessagePtr createEmptyRouteConfigProto() override {\n      return ProtobufTypes::MessagePtr{new ProtobufWkt::Timestamp()};\n    }\n    ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n      // Override this to guarantee that we have a different factory mapping by-type.\n      return ProtobufTypes::MessagePtr{new ProtobufWkt::Timestamp()};\n    }\n    Router::RouteSpecificFilterConfigConstSharedPtr\n    createRouteSpecificFilterConfig(const Protobuf::Message& message,\n                                    Server::Configuration::ServerFactoryContext&,\n                                    ProtobufMessage::ValidationVisitor&) override {\n      auto obj = std::make_shared<DerivedFilterConfig>();\n      obj->config_.MergeFrom(message);\n      return obj;\n    }\n  };\n  class DefaultTestFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\n  public:\n    DefaultTestFilterConfig() : EmptyHttpFilterConfig(\"test.default.filter\") {}\n\n    Http::FilterFactoryCb createFilter(const std::string&,\n                                       Server::Configuration::FactoryContext&) override {\n      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n    }\n    ProtobufTypes::MessagePtr createEmptyRouteConfigProto() override {\n      return ProtobufTypes::MessagePtr{new ProtobufWkt::Struct()};\n    }\n  };\n\n  void checkEach(const std::string& yaml, uint32_t expected_entry, uint32_t expected_route,\n                 uint32_t expected_vhost) {\n    const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n    const auto route = config.route(genHeaders(\"www.foo.com\", \"/\", \"GET\"), 0);\n    const auto* route_entry = route->routeEntry();\n    const auto& vhost = route_entry->virtualHost();\n\n    check(route_entry->perFilterConfigTyped<DerivedFilterConfig>(factory_.name()), expected_entry,\n          \"route entry\");\n    check(route->perFilterConfigTyped<DerivedFilterConfig>(factory_.name()), expected_route,\n          \"route\");\n    check(vhost.perFilterConfigTyped<DerivedFilterConfig>(factory_.name()), expected_vhost,\n          \"virtual host\");\n  }\n\n  void check(const DerivedFilterConfig* cfg, uint32_t expected_seconds, std::string source) {\n    EXPECT_NE(nullptr, cfg) << \"config should not be null for source: \" << source;\n    EXPECT_EQ(expected_seconds, cfg->config_.seconds())\n        << \"config value does not match expected for source: \" << source;\n  }\n\n  void checkNoPerFilterConfig(const std::string& yaml) {\n    const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n\n    const auto route = config.route(genHeaders(\"www.foo.com\", \"/\", \"GET\"), 0);\n    const auto* route_entry = route->routeEntry();\n    const auto& vhost = route_entry->virtualHost();\n\n    EXPECT_EQ(nullptr,\n              route_entry->perFilterConfigTyped<DerivedFilterConfig>(default_factory_.name()));\n    EXPECT_EQ(nullptr, route->perFilterConfigTyped<DerivedFilterConfig>(default_factory_.name()));\n    EXPECT_EQ(nullptr, vhost.perFilterConfigTyped<DerivedFilterConfig>(default_factory_.name()));\n  }\n\n  TestFilterConfig factory_;\n  Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory> registered_factory_;\n  DefaultTestFilterConfig default_factory_;\n  Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory>\n      registered_default_factory_;\n};\n\nTEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(TypedConfigFilterError)) {\n  {\n    const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n    per_filter_config: { unknown.filter: {} }\n    typed_per_filter_config:\n      unknown.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Timestamp\n)EOF\";\n\n    EXPECT_THROW_WITH_MESSAGE(\n        TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n        EnvoyException, \"Only one of typed_configs or configs can be specified\");\n  }\n\n  {\n    const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n        per_filter_config: { unknown.filter: {} }\n        typed_per_filter_config:\n          unknown.filter:\n            \"@type\": type.googleapis.com/google.protobuf.Timestamp\n)EOF\";\n\n    EXPECT_THROW_WITH_MESSAGE(\n        TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true),\n        EnvoyException, \"Only one of typed_configs or configs can be specified\");\n  }\n}\n\nTEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(UnknownFilterStruct)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n    per_filter_config: { unknown.filter: {} }\n)EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Didn't find a registered implementation for name: 'unknown.filter'\");\n}\n\nTEST_F(PerFilterConfigsTest, UnknownFilterAny) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n    typed_per_filter_config:\n      unknown.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Timestamp\n)EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException,\n      \"Didn't find a registered implementation for name: 'unknown.filter'\");\n}\n\n// Test that a trivially specified NamedHttpFilterConfigFactory ignores per_filter_config without\n// error.\nTEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(DefaultFilterImplementationStruct)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n    per_filter_config: { test.default.filter: { seconds: 123} }\n)EOF\";\n\n  checkNoPerFilterConfig(yaml);\n}\n\nTEST_F(PerFilterConfigsTest, DefaultFilterImplementationAny) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n    typed_per_filter_config:\n      test.default.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n        value:\n          seconds: 123\n)EOF\";\n\n  checkNoPerFilterConfig(yaml);\n}\n\nTEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(RouteLocalConfig)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n        per_filter_config: { test.filter: { seconds: 123 } }\n    per_filter_config: { test.filter: { seconds: 456 } }\n)EOF\";\n\n  checkEach(yaml, 123, 123, 456);\n}\n\nTEST_F(PerFilterConfigsTest, RouteLocalTypedConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n        typed_per_filter_config:\n          test.filter:\n            \"@type\": type.googleapis.com/google.protobuf.Timestamp\n            value:\n              seconds: 123\n    typed_per_filter_config:\n      test.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n        value:\n          seconds: 456\n)EOF\";\n\n  checkEach(yaml, 123, 123, 456);\n}\n\nTEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterConfig)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            clusters:\n              - name: baz\n                weight: 100\n                per_filter_config: { test.filter: { seconds: 789 } }\n    per_filter_config: { test.filter: { seconds: 1011 } }\n)EOF\";\n\n  checkEach(yaml, 789, 789, 1011);\n}\n\nTEST_F(PerFilterConfigsTest, WeightedClusterTypedConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            clusters:\n              - name: baz\n                weight: 100\n                typed_per_filter_config:\n                  test.filter:\n                    \"@type\": type.googleapis.com/google.protobuf.Timestamp\n                    value:\n                      seconds: 789\n    typed_per_filter_config:\n      test.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Timestamp\n        value:\n          seconds: 1011\n)EOF\";\n\n  checkEach(yaml, 789, 789, 1011);\n}\n\nTEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterFallthroughConfig)) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            clusters:\n              - name: baz\n                weight: 100\n        per_filter_config: { test.filter: { seconds: 1213 } }\n    per_filter_config: { test.filter: { seconds: 1415 } }\n)EOF\";\n\n  checkEach(yaml, 1213, 1213, 1415);\n}\n\nTEST_F(PerFilterConfigsTest, WeightedClusterFallthroughTypedConfig) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route:\n          weighted_clusters:\n            clusters:\n              - name: baz\n                weight: 100\n        typed_per_filter_config:\n          test.filter:\n            \"@type\": type.googleapis.com/google.protobuf.Timestamp\n            value:\n              seconds: 1213\n    typed_per_filter_config:\n      test.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Timestamp\n        value:\n          seconds: 1415\n)EOF\";\n\n  checkEach(yaml, 1213, 1213, 1415);\n}\n\nclass RouteMatchOverrideTest : public testing::Test, public ConfigImplTestBase {};\n\nTEST_F(RouteMatchOverrideTest, VerifyAllMatchableRoutes) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/foo\" }\n        route:\n          cluster: foo\n      - match: { prefix: \"/\" }\n        route:\n          cluster: default\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  std::vector<std::string> clusters{\"default\", \"foo\", \"foo_bar\", \"foo_bar_baz\"};\n\n  RouteConstSharedPtr accepted_route = config.route(\n      [&clusters](RouteConstSharedPtr route,\n                  RouteEvalStatus route_eval_status) -> RouteMatchStatus {\n        EXPECT_FALSE(clusters.empty());\n        EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName());\n        clusters.pop_back();\n        if (clusters.empty()) {\n          EXPECT_EQ(route_eval_status, RouteEvalStatus::NoMoreRoutes);\n          return RouteMatchStatus::Accept;\n        }\n        EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes);\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/foo/bar/baz\", \"GET\"));\n  EXPECT_EQ(accepted_route->routeEntry()->clusterName(), \"default\");\n}\n\nTEST_F(RouteMatchOverrideTest, VerifyRouteOverrideStops) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/foo\" }\n        route:\n          cluster: foo\n      - match: { prefix: \"/\" }\n        route:\n          cluster: default\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  std::vector<std::string> clusters{\"foo\", \"foo_bar\"};\n\n  RouteConstSharedPtr accepted_route = config.route(\n      [&clusters](RouteConstSharedPtr route,\n                  RouteEvalStatus route_eval_status) -> RouteMatchStatus {\n        EXPECT_FALSE(clusters.empty());\n        EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName());\n        clusters.pop_back();\n        EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes);\n\n        if (clusters.empty()) {\n          return RouteMatchStatus::Accept; // Do not match default route\n        }\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/foo/bar\", \"GET\"));\n  EXPECT_EQ(accepted_route->routeEntry()->clusterName(), \"foo\");\n}\n\nTEST_F(RouteMatchOverrideTest, StopWhenNoMoreRoutes) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/foo\" }\n        route:\n          cluster: foo\n      - match: { prefix: \"/\" }\n        route:\n          cluster: default\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  std::vector<std::string> clusters{\"default\", \"foo\", \"foo_bar\", \"foo_bar_baz\"};\n\n  RouteConstSharedPtr accepted_route = config.route(\n      [&clusters](RouteConstSharedPtr route,\n                  RouteEvalStatus route_eval_status) -> RouteMatchStatus {\n        EXPECT_FALSE(clusters.empty());\n        EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName());\n        clusters.pop_back();\n\n        if (clusters.empty()) {\n          EXPECT_EQ(route_eval_status, RouteEvalStatus::NoMoreRoutes);\n        } else {\n          EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes);\n        }\n        // Returning continue when no more routes are available will be ignored by ConfigImpl::route\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/foo/bar/baz\", \"GET\"));\n  EXPECT_EQ(accepted_route, nullptr);\n}\n\nTEST_F(RouteMatchOverrideTest, NullRouteOnNoRouteMatch) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/foo\" }\n        route:\n          cluster: foo\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  RouteConstSharedPtr accepted_route = config.route(\n      [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus {\n        ADD_FAILURE()\n            << \"RouteCallback should not be invoked since there are no matching route to override\";\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/\", \"GET\"));\n  EXPECT_EQ(accepted_route, nullptr);\n}\n\nTEST_F(RouteMatchOverrideTest, NullRouteOnNoHostMatch) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"www.acme.com\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/\" }\n        route:\n          cluster: default\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  RouteConstSharedPtr accepted_route = config.route(\n      [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus {\n        ADD_FAILURE()\n            << \"RouteCallback should not be invoked since there are no matching route to override\";\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/\", \"GET\"));\n  EXPECT_EQ(accepted_route, nullptr);\n}\n\nTEST_F(RouteMatchOverrideTest, NullRouteOnNullXForwardedProto) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/\" }\n        route:\n          cluster: default\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  RouteConstSharedPtr accepted_route = config.route(\n      [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus {\n        ADD_FAILURE()\n            << \"RouteCallback should not be invoked since there are no matching route to override\";\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/\", \"GET\", \"\"));\n  EXPECT_EQ(accepted_route, nullptr);\n}\n\nTEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsAll) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/\" }\n        route:\n          cluster: default\n    require_tls: ALL\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  RouteConstSharedPtr accepted_route = config.route(\n      [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus {\n        ADD_FAILURE()\n            << \"RouteCallback should not be invoked since there are no matching route to override\";\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/\", \"GET\"));\n  EXPECT_NE(nullptr, dynamic_cast<const SslRedirectRoute*>(accepted_route.get()));\n}\n\nTEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsInternal) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/foo/bar/baz\" }\n        route:\n          cluster: foo_bar_baz\n      - match: { prefix: \"/foo/bar\" }\n        route:\n          cluster: foo_bar\n      - match: { prefix: \"/\" }\n        route:\n          cluster: default\n    require_tls: EXTERNAL_ONLY\n)EOF\";\n\n  TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true);\n  RouteConstSharedPtr accepted_route = config.route(\n      [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus {\n        ADD_FAILURE()\n            << \"RouteCallback should not be invoked since there are no matching route to override\";\n        return RouteMatchStatus::Continue;\n      },\n      genHeaders(\"bat.com\", \"/\", \"GET\"));\n  EXPECT_NE(nullptr, dynamic_cast<const SslRedirectRoute*>(accepted_route.get()));\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/corpus_from_config_impl.sh",
    "content": "#!/bin/sh\n\n# Helper shell script for :corpus_from_config_impl genrule in BUILD.\n\nset -e\n\n# Set NORUNFILES so test/main doesn't fail when runfiles manifest is not found.\nNORUNFILES=1 \"$@\"\n\n# Verify at least one entry is actually generated\n[ -e \"${GENRULE_OUTPUT_DIR}\"/generated_corpus_0 ]\n"
  },
  {
    "path": "test/common/router/header_formatter_test.cc",
    "content": "#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route.pb.validate.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/protocol.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/router/header_formatter.h\"\n#include \"common/router/header_parser.h\"\n#include \"common/router/string_accessor_impl.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"test/common/stream_info/test_int_accessor.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nstatic envoy::config::route::v3::Route parseRouteFromV3Yaml(const std::string& yaml,\n                                                            bool avoid_boosting = true) {\n  envoy::config::route::v3::Route route;\n  TestUtility::loadFromYaml(yaml, route, false, avoid_boosting);\n  return route;\n}\n\nclass StreamInfoHeaderFormatterTest : public testing::Test {\npublic:\n  void testFormatting(const Envoy::StreamInfo::MockStreamInfo& stream_info,\n                      const std::string& variable, const std::string& expected_output) {\n    {\n      auto f = StreamInfoHeaderFormatter(variable, false);\n      const std::string formatted_string = f.format(stream_info);\n      EXPECT_EQ(expected_output, formatted_string);\n    }\n  }\n\n  void testFormatting(const std::string& variable, const std::string& expected_output) {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    testFormatting(stream_info, variable, expected_output);\n  }\n\n  void testInvalidFormat(const std::string& variable) {\n    EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(variable, false), EnvoyException,\n                              fmt::format(\"field '{}' not supported as custom header\", variable));\n  }\n};\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamRemoteAddressVariable) {\n  testFormatting(\"DOWNSTREAM_REMOTE_ADDRESS\", \"127.0.0.1:0\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamRemoteAddressWithoutPortVariable) {\n  testFormatting(\"DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT\", \"127.0.0.1\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalAddressVariable) {\n  testFormatting(\"DOWNSTREAM_LOCAL_ADDRESS\", \"127.0.0.2:0\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalPortVariable) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  // Validate for IPv4 address\n  auto address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(\"127.1.2.3\", 8443)};\n  EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_PORT\", \"8443\");\n\n  // Validate for IPv6 address\n  address =\n      Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance(\"::1\", 9443)};\n  EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_PORT\", \"9443\");\n\n  // Validate for Pipe\n  address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance(\"/foo\")};\n  EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_PORT\", \"\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalAddressWithoutPortVariable) {\n  testFormatting(\"DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT\", \"127.0.0.2\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestformatWithUpstreamRemoteAddressVariable) {\n  testFormatting(\"UPSTREAM_REMOTE_ADDRESS\", \"10.0.0.1:443\");\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  stream_info.host_.reset();\n  testFormatting(stream_info, \"UPSTREAM_REMOTE_ADDRESS\", \"\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestformatWithHostnameVariable) {\n  {\n    NiceMock<Api::MockOsSysCalls> os_sys_calls;\n    TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n    EXPECT_CALL(os_sys_calls, gethostname(_, _))\n        .WillOnce(Invoke([](char*, size_t) -> Api::SysCallIntResult {\n          return {-1, ENAMETOOLONG};\n        }));\n    testFormatting(\"HOSTNAME\", \"-\");\n  }\n\n  {\n    NiceMock<Api::MockOsSysCalls> os_sys_calls;\n    TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n    EXPECT_CALL(os_sys_calls, gethostname(_, _))\n        .WillOnce(Invoke([](char* name, size_t) -> Api::SysCallIntResult {\n          StringUtil::strlcpy(name, \"myhostname\", 11);\n          return {0, 0};\n        }));\n    testFormatting(\"HOSTNAME\", \"myhostname\");\n  }\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithProtocolVariable) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  absl::optional<Envoy::Http::Protocol> protocol = Envoy::Http::Protocol::Http11;\n  ON_CALL(stream_info, protocol()).WillByDefault(ReturnPointee(&protocol));\n\n  testFormatting(stream_info, \"PROTOCOL\", \"HTTP/1.1\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerUriSanVariableSingleSan) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::vector<std::string> sans{\"san\"};\n  ON_CALL(*connection_info, uriSanPeerCertificate()).WillByDefault(Return(sans));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_URI_SAN\", \"san\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerUriSanVariableMultipleSans) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::vector<std::string> sans{\"san1\", \"san2\"};\n  ON_CALL(*connection_info, uriSanPeerCertificate()).WillByDefault(Return(sans));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_URI_SAN\", \"san1,san2\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerUriSanEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, uriSanPeerCertificate())\n      .WillByDefault(Return(std::vector<std::string>()));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_URI_SAN\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_URI_SAN\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanVariableSingleSan) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::vector<std::string> sans{\"san\"};\n  ON_CALL(*connection_info, uriSanLocalCertificate()).WillByDefault(Return(sans));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_URI_SAN\", \"san\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanVariableMultipleSans) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::vector<std::string> sans{\"san1\", \"san2\"};\n  ON_CALL(*connection_info, uriSanLocalCertificate()).WillByDefault(Return(sans));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_URI_SAN\", \"san1,san2\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanVariableNoSans) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, uriSanLocalCertificate())\n      .WillByDefault(Return(std::vector<std::string>()));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_URI_SAN\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_URI_SAN\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalSubject) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string subject = \"subject\";\n  ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(subject));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_SUBJECT\", \"subject\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalSubjectEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string subject;\n  ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(subject));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_SUBJECT\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalSubjectNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_LOCAL_SUBJECT\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsSessionId) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string session_id = \"deadbeef\";\n  ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_SESSION_ID\", \"deadbeef\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsSessionIdEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string session_id;\n  ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_SESSION_ID\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsSessionIdNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_SESSION_ID\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsCipher) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, ciphersuiteString())\n      .WillByDefault(Return(\"TLS_DHE_RSA_WITH_AES_256_GCM_SHA384\"));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_CIPHER\", \"TLS_DHE_RSA_WITH_AES_256_GCM_SHA384\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsCipherEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, ciphersuiteString()).WillByDefault(Return(\"\"));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_CIPHER\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsCipherNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_CIPHER\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersion) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string tls_version = \"TLSv1.2\";\n  ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tls_version));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_VERSION\", \"TLSv1.2\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersionEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(EMPTY_STRING));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_VERSION\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersionNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_TLS_VERSION\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256Fingerprint) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string expected_sha = \"685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f\";\n  ON_CALL(*connection_info, sha256PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_FINGERPRINT_256\",\n                 \"685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256FingerprintEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string expected_sha;\n  ON_CALL(*connection_info, sha256PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_FINGERPRINT_256\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256FingerprintNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_FINGERPRINT_256\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1Fingerprint) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string expected_sha = \"685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f\";\n  ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_FINGERPRINT_1\",\n                 \"685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1FingerprintEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string expected_sha;\n  ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_FINGERPRINT_1\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1FingerprintNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_FINGERPRINT_1\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerial) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::string serial_number = \"b8b5ecc898f2124a\";\n  ON_CALL(*connection_info, serialNumberPeerCertificate()).WillByDefault(ReturnRef(serial_number));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_SERIAL\", \"b8b5ecc898f2124a\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerialEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::string serial_number;\n  ON_CALL(*connection_info, serialNumberPeerCertificate()).WillByDefault(ReturnRef(serial_number));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_SERIAL\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerialNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_SERIAL\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerIssuer) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::string issuer_peer =\n      \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\";\n  ON_CALL(*connection_info, issuerPeerCertificate()).WillByDefault(ReturnRef(issuer_peer));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_ISSUER\",\n                 \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerIssuerEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::string issuer_peer;\n  ON_CALL(*connection_info, issuerPeerCertificate()).WillByDefault(ReturnRef(issuer_peer));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_ISSUER\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerIssuerNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_ISSUER\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSubject) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::string subject_peer =\n      \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\";\n  ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(subject_peer));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_SUBJECT\",\n                 \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSubjectEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  const std::string subject_peer;\n  ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(subject_peer));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_SUBJECT\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSubjectNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_SUBJECT\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCert) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string expected_cert = \"<some cert>\";\n  ON_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate())\n      .WillByDefault(ReturnRef(expected_cert));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT\", expected_cert);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  std::string expected_cert;\n  ON_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate())\n      .WillByDefault(ReturnRef(expected_cert));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVStart) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  absl::Time abslStartTime =\n      TestUtility::parseTime(\"Dec 18 01:50:34 2018 GMT\", \"%b %e %H:%M:%S %Y GMT\");\n  SystemTime startTime = absl::ToChronoTime(abslStartTime);\n  ON_CALL(*connection_info, validFromPeerCertificate()).WillByDefault(Return(startTime));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT_V_START\", \"2018-12-18T01:50:34.000Z\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVStartEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, validFromPeerCertificate()).WillByDefault(Return(absl::nullopt));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT_V_START\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVStartNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT_V_START\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVEnd) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  absl::Time abslStartTime =\n      TestUtility::parseTime(\"Dec 17 01:50:34 2020 GMT\", \"%b %e %H:%M:%S %Y GMT\");\n  SystemTime startTime = absl::ToChronoTime(abslStartTime);\n  ON_CALL(*connection_info, expirationPeerCertificate()).WillByDefault(Return(startTime));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT_V_END\", \"2020-12-17T01:50:34.000Z\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVEndEmpty) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, expirationPeerCertificate()).WillByDefault(Return(absl::nullopt));\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT_V_END\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVEndNoTls) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n  testFormatting(stream_info, \"DOWNSTREAM_PEER_CERT_V_END\", EMPTY_STRING);\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithUpstreamMetadataVariable) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  std::shared_ptr<NiceMock<Envoy::Upstream::MockHostDescription>> host(\n      new NiceMock<Envoy::Upstream::MockHostDescription>());\n\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>(\n      TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n          R\"EOF(\n        filter_metadata:\n          namespace:\n            key: value\n            nested:\n              str_key: str_value\n              \"escaped,key\": escaped_key_value\n              bool_key1: true\n              bool_key2: false\n              num_key1: 1\n              num_key2: 3.14\n              null_key: null\n              list_key: [ list_element ]\n              struct_key:\n                deep_key: deep_value\n      )EOF\"));\n\n  // Prove we're testing the expected types.\n  const auto& nested_struct =\n      Envoy::Config::Metadata::metadataValue(metadata.get(), \"namespace\", \"nested\").struct_value();\n  EXPECT_EQ(nested_struct.fields().at(\"str_key\").kind_case(), ProtobufWkt::Value::kStringValue);\n  EXPECT_EQ(nested_struct.fields().at(\"bool_key1\").kind_case(), ProtobufWkt::Value::kBoolValue);\n  EXPECT_EQ(nested_struct.fields().at(\"bool_key2\").kind_case(), ProtobufWkt::Value::kBoolValue);\n  EXPECT_EQ(nested_struct.fields().at(\"num_key1\").kind_case(), ProtobufWkt::Value::kNumberValue);\n  EXPECT_EQ(nested_struct.fields().at(\"num_key1\").kind_case(), ProtobufWkt::Value::kNumberValue);\n  EXPECT_EQ(nested_struct.fields().at(\"null_key\").kind_case(), ProtobufWkt::Value::kNullValue);\n  EXPECT_EQ(nested_struct.fields().at(\"list_key\").kind_case(), ProtobufWkt::Value::kListValue);\n  EXPECT_EQ(nested_struct.fields().at(\"struct_key\").kind_case(), ProtobufWkt::Value::kStructValue);\n\n  ON_CALL(stream_info, upstreamHost()).WillByDefault(Return(host));\n  ON_CALL(*host, metadata()).WillByDefault(Return(metadata));\n\n  // Top-level value.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"key\\\"])\", \"value\");\n\n  // Nested string value.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"str_key\\\"])\",\n                 \"str_value\");\n\n  // Boolean values.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"bool_key1\\\"])\",\n                 \"true\");\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"bool_key2\\\"])\",\n                 \"false\");\n\n  // Number values.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"num_key1\\\"])\", \"1\");\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"num_key2\\\"])\",\n                 \"3.14\");\n\n  // Deeply nested value.\n  testFormatting(stream_info,\n                 \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"struct_key\\\", \\\"deep_key\\\"])\",\n                 \"deep_value\");\n\n  // Initial metadata lookup fails.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"wrong_namespace\\\", \\\"key\\\"])\", \"\");\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"not_found\\\"])\", \"\");\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"not_found\\\", \\\"key\\\"])\", \"\");\n\n  // Nested metadata lookup fails.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"not_found\\\"])\", \"\");\n\n  // Nested metadata lookup returns non-struct intermediate value.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"key\\\", \\\"invalid\\\"])\", \"\");\n\n  // Struct values are not rendered.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"struct_key\\\"])\", \"\");\n\n  // List values are not rendered.\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"nested\\\", \\\"list_key\\\"])\", \"\");\n}\n\n// Replaces the test of user-defined-headers acting as a Query of Death with\n// size checks on user defined headers.\nTEST_F(StreamInfoHeaderFormatterTest, ValidateLimitsOnUserDefinedHeaders) {\n  {\n    envoy::config::route::v3::RouteConfiguration route;\n    envoy::config::core::v3::HeaderValueOption* header =\n        route.mutable_request_headers_to_add()->Add();\n    std::string long_string(16385, 'a');\n    header->mutable_header()->set_key(\"header_name\");\n    header->mutable_header()->set_value(long_string);\n    header->mutable_append()->set_value(true);\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(route), ProtoValidationException,\n                            \"Proto constraint validation failed.*\");\n  }\n  {\n    envoy::config::route::v3::RouteConfiguration route;\n    for (int i = 0; i < 1001; ++i) {\n      envoy::config::core::v3::HeaderValueOption* header =\n          route.mutable_request_headers_to_add()->Add();\n      header->mutable_header()->set_key(\"header_name\");\n      header->mutable_header()->set_value(\"value\");\n    }\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(route), ProtoValidationException,\n                            \"Proto constraint validation failed.*\");\n  }\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithUpstreamMetadataVariableMissingHost) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  std::shared_ptr<NiceMock<Envoy::Upstream::MockHostDescription>> host;\n  ON_CALL(stream_info, upstreamHost()).WillByDefault(Return(host));\n\n  testFormatting(stream_info, \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"key\\\"])\", \"\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithRequestMetadata) {\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  envoy::config::core::v3::Metadata metadata;\n  ProtobufWkt::Struct struct_obj;\n\n  auto& fields_map = *struct_obj.mutable_fields();\n  fields_map[\"foo\"] = ValueUtil::stringValue(\"bar\");\n  (*metadata.mutable_filter_metadata())[\"envoy.lb\"] = struct_obj;\n\n  EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n  EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n\n  testFormatting(stream_info, \"DYNAMIC_METADATA([\\\"envoy.lb\\\", \\\"foo\\\"])\", \"bar\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithPerRequestStateVariable) {\n  Envoy::StreamInfo::FilterStateSharedPtr filter_state(\n      std::make_shared<Envoy::StreamInfo::FilterStateImpl>(\n          Envoy::StreamInfo::FilterState::LifeSpan::FilterChain));\n  filter_state->setData(\"testing\", std::make_unique<StringAccessorImpl>(\"test_value\"),\n                        StreamInfo::FilterState::StateType::ReadOnly,\n                        StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(\"test_value\", filter_state->getDataReadOnly<StringAccessor>(\"testing\").asString());\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  ON_CALL(stream_info, filterState()).WillByDefault(ReturnRef(filter_state));\n  ON_CALL(Const(stream_info), filterState()).WillByDefault(ReturnRef(*filter_state));\n\n  testFormatting(stream_info, \"PER_REQUEST_STATE(testing)\", \"test_value\");\n  testFormatting(stream_info, \"PER_REQUEST_STATE(testing2)\", \"\");\n  EXPECT_EQ(\"test_value\", filter_state->getDataReadOnly<StringAccessor>(\"testing\").asString());\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, TestFormatWithNonStringPerRequestStateVariable) {\n  Envoy::StreamInfo::FilterStateSharedPtr filter_state(\n      std::make_shared<Envoy::StreamInfo::FilterStateImpl>(\n          Envoy::StreamInfo::FilterState::LifeSpan::FilterChain));\n  filter_state->setData(\"testing\", std::make_unique<StreamInfo::TestIntAccessor>(1),\n                        StreamInfo::FilterState::StateType::ReadOnly,\n                        StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(1, filter_state->getDataReadOnly<StreamInfo::TestIntAccessor>(\"testing\").access());\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  ON_CALL(stream_info, filterState()).WillByDefault(ReturnRef(filter_state));\n  ON_CALL(Const(stream_info), filterState()).WillByDefault(ReturnRef(*filter_state));\n\n  testFormatting(stream_info, \"PER_REQUEST_STATE(testing)\", \"\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, WrongFormatOnPerRequestStateVariable) {\n  // No parameters\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"PER_REQUEST_STATE()\", false), EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"PER_REQUEST_STATE(<data_name>), actual format \"\n                            \"PER_REQUEST_STATE()\");\n\n  // Missing single parens\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"PER_REQUEST_STATE(testing\", false),\n                            EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"PER_REQUEST_STATE(<data_name>), actual format \"\n                            \"PER_REQUEST_STATE(testing\");\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"PER_REQUEST_STATE testing)\", false),\n                            EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"PER_REQUEST_STATE(<data_name>), actual format \"\n                            \"PER_REQUEST_STATE testing)\");\n}\n\nTEST_F(StreamInfoHeaderFormatterTest, UnknownVariable) { testInvalidFormat(\"INVALID_VARIABLE\"); }\n\nTEST_F(StreamInfoHeaderFormatterTest, WrongFormatOnUpstreamMetadataVariable) {\n  // Invalid JSON.\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"UPSTREAM_METADATA(abcd)\", false),\n                            EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                            \"UPSTREAM_METADATA(abcd), because JSON supplied is not valid. \"\n                            \"Error(offset 0, line 1): Invalid value.\\n\");\n\n  // No parameters.\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"UPSTREAM_METADATA\", false), EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                            \"UPSTREAM_METADATA\");\n\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"UPSTREAM_METADATA()\", false), EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                            \"UPSTREAM_METADATA(), because JSON supplied is not valid. \"\n                            \"Error(offset 0, line 1): The document is empty.\\n\");\n\n  // One parameter.\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"UPSTREAM_METADATA([\\\"ns\\\"])\", false),\n                            EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                            \"UPSTREAM_METADATA([\\\"ns\\\"])\");\n\n  // Missing close paren.\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"UPSTREAM_METADATA(\", false), EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                            \"UPSTREAM_METADATA(\");\n\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"UPSTREAM_METADATA([a,b,c,d]\", false),\n                            EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                            \"UPSTREAM_METADATA([a,b,c,d]\");\n\n  EXPECT_THROW_WITH_MESSAGE(StreamInfoHeaderFormatter(\"UPSTREAM_METADATA([\\\"a\\\",\\\"b\\\"]\", false),\n                            EnvoyException,\n                            \"Invalid header configuration. Expected format \"\n                            \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n                            \"UPSTREAM_METADATA([\\\"a\\\",\\\"b\\\"]\");\n\n  // Non-string elements.\n  EXPECT_THROW_WITH_MESSAGE(\n      StreamInfoHeaderFormatter(\"UPSTREAM_METADATA([\\\"a\\\", 1])\", false), EnvoyException,\n      \"Invalid header configuration. Expected format \"\n      \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n      \"UPSTREAM_METADATA([\\\"a\\\", 1]), because JSON field from line 1 accessed with type 'String' \"\n      \"does not match actual type 'Integer'.\");\n\n  // Invalid string elements.\n  EXPECT_THROW_WITH_MESSAGE(\n      StreamInfoHeaderFormatter(\"UPSTREAM_METADATA([\\\"a\\\", \\\"\\\\unothex\\\"])\", false), EnvoyException,\n      \"Invalid header configuration. Expected format \"\n      \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n      \"UPSTREAM_METADATA([\\\"a\\\", \\\"\\\\unothex\\\"]), because JSON supplied is not valid. \"\n      \"Error(offset 7, line 1): Incorrect hex digit after \\\\u escape in string.\\n\");\n\n  // Non-array parameters.\n  EXPECT_THROW_WITH_MESSAGE(\n      StreamInfoHeaderFormatter(\"UPSTREAM_METADATA({\\\"a\\\":1})\", false), EnvoyException,\n      \"Invalid header configuration. Expected format \"\n      \"UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", ...]), actual format \"\n      \"UPSTREAM_METADATA({\\\"a\\\":1}), because JSON field from line 1 accessed with type 'Array' \"\n      \"does not match actual type 'Object'.\");\n}\n\nTEST(HeaderParserTest, TestParseInternal) {\n  struct TestCase {\n    std::string input_;\n    absl::optional<std::string> expected_output_;\n    absl::optional<std::string> expected_exception_;\n  };\n\n  static const TestCase test_cases[] = {\n      // Valid inputs\n      {\"\", {}, {}},\n      {\"%PROTOCOL%\", {\"HTTP/1.1\"}, {}},\n      {\"[%PROTOCOL%\", {\"[HTTP/1.1\"}, {}},\n      {\"%PROTOCOL%]\", {\"HTTP/1.1]\"}, {}},\n      {\"[%PROTOCOL%]\", {\"[HTTP/1.1]\"}, {}},\n      {\"%%%PROTOCOL%\", {\"%HTTP/1.1\"}, {}},\n      {\"%PROTOCOL%%%\", {\"HTTP/1.1%\"}, {}},\n      {\"%%%PROTOCOL%%%\", {\"%HTTP/1.1%\"}, {}},\n      {\"%DOWNSTREAM_REMOTE_ADDRESS%\", {\"127.0.0.1:0\"}, {}},\n      {\"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\", {\"127.0.0.1\"}, {}},\n      {\"%DOWNSTREAM_LOCAL_ADDRESS%\", {\"127.0.0.2:0\"}, {}},\n      {\"%DOWNSTREAM_LOCAL_PORT%\", {\"0\"}, {}},\n      {\"%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%\", {\"127.0.0.2\"}, {}},\n      {\"%UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"])%\", {\"value\"}, {}},\n      {\"[%UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"])%\", {\"[value\"}, {}},\n      {\"%UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"])%]\", {\"value]\"}, {}},\n      {\"[%UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"])%]\", {\"[value]\"}, {}},\n      {\"%UPSTREAM_METADATA([\\\"ns\\\", \\t \\\"key\\\"])%\", {\"value\"}, {}},\n      {\"%UPSTREAM_METADATA([\\\"ns\\\", \\n \\\"key\\\"])%\", {\"value\"}, {}},\n      {\"%UPSTREAM_METADATA( \\t [ \\t \\\"ns\\\" \\t , \\t \\\"key\\\" \\t ] \\t )%\", {\"value\"}, {}},\n      {R\"EOF(%UPSTREAM_METADATA([\"\\\"quoted\\\"\", \"\\\"key\\\"\"])%)EOF\", {\"value\"}, {}},\n      {\"%UPSTREAM_REMOTE_ADDRESS%\", {\"10.0.0.1:443\"}, {}},\n      {\"%PER_REQUEST_STATE(testing)%\", {\"test_value\"}, {}},\n      {\"%REQ(x-request-id)%\", {\"123\"}, {}},\n      {\"%START_TIME%\", {\"2018-04-03T23:06:09.123Z\"}, {}},\n      {\"%RESPONSE_FLAGS%\", {\"LR\"}, {}},\n      {\"%RESPONSE_CODE_DETAILS%\", {\"via_upstream\"}, {}},\n\n      // Unescaped %\n      {\"%\", {}, {\"Invalid header configuration. Un-escaped % at position 0\"}},\n      {\"before %\", {}, {\"Invalid header configuration. Un-escaped % at position 7\"}},\n      {\"%% infix %\", {}, {\"Invalid header configuration. Un-escaped % at position 9\"}},\n\n      // Unknown variable names\n      {\"%INVALID%\", {}, {\"field 'INVALID' not supported as custom header\"}},\n      {\"before %INVALID%\", {}, {\"field 'INVALID' not supported as custom header\"}},\n      {\"%INVALID% after\", {}, {\"field 'INVALID' not supported as custom header\"}},\n      {\"before %INVALID% after\", {}, {\"field 'INVALID' not supported as custom header\"}},\n\n      // Un-terminated variable expressions.\n      {\"%VAR\", {}, {\"Invalid header configuration. Un-terminated variable expression 'VAR'\"}},\n      {\"%%%VAR\", {}, {\"Invalid header configuration. Un-terminated variable expression 'VAR'\"}},\n      {\"before %VAR\",\n       {},\n       {\"Invalid header configuration. Un-terminated variable expression 'VAR'\"}},\n      {\"before %%%VAR\",\n       {},\n       {\"Invalid header configuration. Un-terminated variable expression 'VAR'\"}},\n      {\"before %VAR after\",\n       {},\n       {\"Invalid header configuration. Un-terminated variable expression 'VAR after'\"}},\n      {\"before %%%VAR after\",\n       {},\n       {\"Invalid header configuration. Un-terminated variable expression 'VAR after'\"}},\n      {\"% \", {}, {\"Invalid header configuration. Un-terminated variable expression ' '\"}},\n\n      // Parsing errors in variable expressions that take a JSON-array parameter.\n      {\"%UPSTREAM_METADATA(no array)%\",\n       {},\n       {\"Invalid header configuration. Expected format UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", \"\n        \"...]), actual format UPSTREAM_METADATA(no array), because JSON supplied is not valid. \"\n        \"Error(offset 1, line 1): Invalid value.\\n\"}},\n      {\"%UPSTREAM_METADATA( no array)%\",\n       {},\n       {\"Invalid header configuration. Expected format UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", \"\n        \"...]), actual format UPSTREAM_METADATA( no array), because JSON supplied is not valid. \"\n        \"Error(offset 2, line 1): Invalid value.\\n\"}},\n      {\"%UPSTREAM_METADATA([\\\"unterminated array\\\")%\",\n       {},\n       {\"Invalid header configuration. Expecting ',', ']', or whitespace after \"\n        \"'UPSTREAM_METADATA([\\\"unterminated array\\\"', but found ')'\"}},\n      {\"%UPSTREAM_METADATA([not-a-string])%\",\n       {},\n       {\"Invalid header configuration. Expecting '\\\"' or whitespace after 'UPSTREAM_METADATA([', \"\n        \"but found 'n'\"}},\n      {\"%UPSTREAM_METADATA([\\\"\\\\\",\n       {},\n       {\"Invalid header configuration. Un-terminated backslash in JSON string after \"\n        \"'UPSTREAM_METADATA([\\\"'\"}},\n      {\"%UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"]x\",\n       {},\n       {\"Invalid header configuration. Expecting ')' or whitespace after \"\n        \"'UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"]', but found 'x'\"}},\n      {\"%UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"])x\",\n       {},\n       {\"Invalid header configuration. Expecting '%' or whitespace after \"\n        \"'UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"])', but found 'x'\"}},\n\n      {\"%PER_REQUEST_STATE no parens%\",\n       {},\n       {\"Invalid header configuration. Expected format PER_REQUEST_STATE(<data_name>), \"\n        \"actual format PER_REQUEST_STATE no parens\"}},\n\n      {\"%REQ%\",\n       {},\n       {\"Invalid header configuration. Expected format REQ(<header-name>), \"\n        \"actual format REQ\"}},\n      {\"%REQ no parens%\",\n       {},\n       {\"Invalid header configuration. Expected format REQ(<header-name>), \"\n        \"actual format REQno parens\"}},\n\n      // Invalid arguments\n      {\"%UPSTREAM_METADATA%\",\n       {},\n       {\"Invalid header configuration. Expected format UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", \"\n        \"...]), actual format UPSTREAM_METADATA\"}},\n      {\"%UPSTREAM_METADATA([\\\"ns\\\"])%\",\n       {},\n       {\"Invalid header configuration. Expected format UPSTREAM_METADATA([\\\"namespace\\\", \\\"k\\\", \"\n        \"...]), actual format UPSTREAM_METADATA([\\\"ns\\\"])\"}},\n      {\"%START_TIME(%85n)%\", {}, {\"Invalid header configuration. Format string contains newline.\"}},\n\n  };\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  absl::optional<Envoy::Http::Protocol> protocol = Envoy::Http::Protocol::Http11;\n  ON_CALL(stream_info, protocol()).WillByDefault(ReturnPointee(&protocol));\n\n  std::shared_ptr<NiceMock<Envoy::Upstream::MockHostDescription>> host(\n      new NiceMock<Envoy::Upstream::MockHostDescription>());\n  ON_CALL(stream_info, upstreamHost()).WillByDefault(Return(host));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  request_headers.addCopy(Http::LowerCaseString(std::string(\"x-request-id\")), 123);\n  ON_CALL(stream_info, getRequestHeaders()).WillByDefault(Return(&request_headers));\n\n  // Upstream metadata with percent signs in the key.\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>(\n      TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n          R\"EOF(\n        filter_metadata:\n          ns:\n            key: value\n          '\"quoted\"':\n            '\"key\"': value\n      )EOF\"));\n  ON_CALL(*host, metadata()).WillByDefault(Return(metadata));\n\n  // \"2018-04-03T23:06:09.123Z\".\n  const SystemTime start_time(std::chrono::milliseconds(1522796769123));\n  ON_CALL(stream_info, startTime()).WillByDefault(Return(start_time));\n\n  Envoy::StreamInfo::FilterStateSharedPtr filter_state(\n      std::make_shared<Envoy::StreamInfo::FilterStateImpl>(\n          Envoy::StreamInfo::FilterState::LifeSpan::FilterChain));\n  filter_state->setData(\"testing\", std::make_unique<StringAccessorImpl>(\"test_value\"),\n                        StreamInfo::FilterState::StateType::ReadOnly,\n                        StreamInfo::FilterState::LifeSpan::FilterChain);\n  ON_CALL(stream_info, filterState()).WillByDefault(ReturnRef(filter_state));\n  ON_CALL(Const(stream_info), filterState()).WillByDefault(ReturnRef(*filter_state));\n\n  ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::LocalReset))\n      .WillByDefault(Return(true));\n\n  absl::optional<std::string> rc_details{\"via_upstream\"};\n  ON_CALL(stream_info, responseCodeDetails()).WillByDefault(ReturnRef(rc_details));\n\n  for (const auto& test_case : test_cases) {\n    Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption> to_add;\n    envoy::config::core::v3::HeaderValueOption* header = to_add.Add();\n    header->mutable_header()->set_key(\"x-header\");\n    header->mutable_header()->set_value(test_case.input_);\n\n    if (test_case.expected_exception_) {\n      EXPECT_FALSE(test_case.expected_output_);\n      EXPECT_THROW_WITH_MESSAGE(HeaderParser::configure(to_add), EnvoyException,\n                                test_case.expected_exception_.value());\n      continue;\n    }\n\n    HeaderParserPtr req_header_parser = HeaderParser::configure(to_add);\n\n    Http::TestRequestHeaderMapImpl header_map{{\":method\", \"POST\"}};\n    req_header_parser->evaluateHeaders(header_map, stream_info);\n\n    std::string descriptor = fmt::format(\"for test case input: {}\", test_case.input_);\n\n    if (!test_case.expected_output_) {\n      EXPECT_FALSE(header_map.has(\"x-header\")) << descriptor;\n      continue;\n    }\n\n    EXPECT_TRUE(header_map.has(\"x-header\")) << descriptor;\n    EXPECT_EQ(test_case.expected_output_.value(), header_map.get_(\"x-header\")) << descriptor;\n  }\n}\n\nTEST(HeaderParserTest, EvaluateHeaders) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: \"www2\"\n  prefix_rewrite: \"/api/new_endpoint\"\nrequest_headers_to_add:\n  - header:\n      key: \"x-client-ip\"\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n    append: true\n  - header:\n      key: \"x-client-ip-port\"\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS%\"\n    append: true\n)EOF\";\n\n  HeaderParserPtr req_header_parser =\n      HeaderParser::configure(parseRouteFromV3Yaml(yaml).request_headers_to_add());\n  Http::TestRequestHeaderMapImpl header_map{{\":method\", \"POST\"}};\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  req_header_parser->evaluateHeaders(header_map, stream_info);\n  EXPECT_TRUE(header_map.has(\"x-client-ip\"));\n  EXPECT_TRUE(header_map.has(\"x-client-ip-port\"));\n}\n\nTEST(HeaderParserTest, EvaluateEmptyHeaders) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: \"www2\"\n  prefix_rewrite: \"/api/new_endpoint\"\nrequest_headers_to_add:\n  - header:\n      key: \"x-key\"\n      value: \"%UPSTREAM_METADATA([\\\"namespace\\\", \\\"key\\\"])%\"\n    append: true\n)EOF\";\n\n  HeaderParserPtr req_header_parser =\n      HeaderParser::configure(parseRouteFromV3Yaml(yaml).request_headers_to_add());\n  Http::TestRequestHeaderMapImpl header_map{{\":method\", \"POST\"}};\n  std::shared_ptr<NiceMock<Envoy::Upstream::MockHostDescription>> host(\n      new NiceMock<Envoy::Upstream::MockHostDescription>());\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>();\n  ON_CALL(stream_info, upstreamHost()).WillByDefault(Return(host));\n  ON_CALL(*host, metadata()).WillByDefault(Return(metadata));\n  req_header_parser->evaluateHeaders(header_map, stream_info);\n  EXPECT_FALSE(header_map.has(\"x-key\"));\n}\n\nTEST(HeaderParserTest, EvaluateStaticHeaders) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: \"www2\"\n  prefix_rewrite: \"/api/new_endpoint\"\nrequest_headers_to_add:\n  - header:\n      key: \"static-header\"\n      value: \"static-value\"\n    append: true\n)EOF\";\n\n  HeaderParserPtr req_header_parser =\n      HeaderParser::configure(parseRouteFromV3Yaml(yaml).request_headers_to_add());\n  Http::TestRequestHeaderMapImpl header_map{{\":method\", \"POST\"}};\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  req_header_parser->evaluateHeaders(header_map, stream_info);\n  EXPECT_TRUE(header_map.has(\"static-header\"));\n  EXPECT_EQ(\"static-value\", header_map.get_(\"static-header\"));\n}\n\nTEST(HeaderParserTest, EvaluateCompoundHeaders) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: www2\nrequest_headers_to_add:\n  - header:\n      key: \"x-prefix\"\n      value: \"prefix-%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  - header:\n      key: \"x-suffix\"\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%-suffix\"\n  - header:\n      key: \"x-both\"\n      value: \"prefix-%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%-suffix\"\n  - header:\n      key: \"x-escaping-1\"\n      value: \"%%%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%%%\"\n  - header:\n      key: \"x-escaping-2\"\n      value: \"%%%%%%\"\n  - header:\n      key: \"x-multi\"\n      value: \"%PROTOCOL% from %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  - header:\n      key: \"x-multi-back-to-back\"\n      value: \"%PROTOCOL%%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  - header:\n      key: \"x-metadata\"\n      value: \"%UPSTREAM_METADATA([\\\"namespace\\\", \\\"%key%\\\"])%\"\n  - header:\n      key: \"x-per-request\"\n      value: \"%PER_REQUEST_STATE(testing)%\"\nrequest_headers_to_remove: [\"x-nope\"]\n  )EOF\";\n\n  const auto route = parseRouteFromV3Yaml(yaml);\n  HeaderParserPtr req_header_parser =\n      HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove());\n  Http::TestRequestHeaderMapImpl header_map{\n      {\":method\", \"POST\"}, {\"x-safe\", \"safe\"}, {\"x-nope\", \"nope\"}};\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  absl::optional<Envoy::Http::Protocol> protocol = Envoy::Http::Protocol::Http11;\n  ON_CALL(stream_info, protocol()).WillByDefault(ReturnPointee(&protocol));\n\n  std::shared_ptr<NiceMock<Envoy::Upstream::MockHostDescription>> host(\n      new NiceMock<Envoy::Upstream::MockHostDescription>());\n  ON_CALL(stream_info, upstreamHost()).WillByDefault(Return(host));\n\n  // Metadata with percent signs in the key.\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>(\n      TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n          R\"EOF(\n        filter_metadata:\n          namespace:\n            \"%key%\": value\n      )EOF\"));\n  ON_CALL(*host, metadata()).WillByDefault(Return(metadata));\n\n  Envoy::StreamInfo::FilterStateSharedPtr filter_state(\n      std::make_shared<Envoy::StreamInfo::FilterStateImpl>(\n          Envoy::StreamInfo::FilterState::LifeSpan::FilterChain));\n  filter_state->setData(\"testing\", std::make_unique<StringAccessorImpl>(\"test_value\"),\n                        StreamInfo::FilterState::StateType::ReadOnly,\n                        StreamInfo::FilterState::LifeSpan::FilterChain);\n  ON_CALL(stream_info, filterState()).WillByDefault(ReturnRef(filter_state));\n  ON_CALL(Const(stream_info), filterState()).WillByDefault(ReturnRef(*filter_state));\n\n  req_header_parser->evaluateHeaders(header_map, stream_info);\n\n  EXPECT_TRUE(header_map.has(\"x-prefix\"));\n  EXPECT_EQ(\"prefix-127.0.0.1\", header_map.get_(\"x-prefix\"));\n\n  EXPECT_TRUE(header_map.has(\"x-suffix\"));\n  EXPECT_EQ(\"127.0.0.1-suffix\", header_map.get_(\"x-suffix\"));\n\n  EXPECT_TRUE(header_map.has(\"x-both\"));\n  EXPECT_EQ(\"prefix-127.0.0.1-suffix\", header_map.get_(\"x-both\"));\n\n  EXPECT_TRUE(header_map.has(\"x-escaping-1\"));\n  EXPECT_EQ(\"%127.0.0.1%\", header_map.get_(\"x-escaping-1\"));\n\n  EXPECT_TRUE(header_map.has(\"x-escaping-2\"));\n  EXPECT_EQ(\"%%%\", header_map.get_(\"x-escaping-2\"));\n\n  EXPECT_TRUE(header_map.has(\"x-multi\"));\n  EXPECT_EQ(\"HTTP/1.1 from 127.0.0.1\", header_map.get_(\"x-multi\"));\n\n  EXPECT_TRUE(header_map.has(\"x-multi-back-to-back\"));\n  EXPECT_EQ(\"HTTP/1.1127.0.0.1\", header_map.get_(\"x-multi-back-to-back\"));\n\n  EXPECT_TRUE(header_map.has(\"x-metadata\"));\n  EXPECT_EQ(\"value\", header_map.get_(\"x-metadata\"));\n\n  EXPECT_TRUE(header_map.has(\"x-per-request\"));\n  EXPECT_EQ(\"test_value\", header_map.get_(\"x-per-request\"));\n\n  EXPECT_TRUE(header_map.has(\"x-safe\"));\n  EXPECT_FALSE(header_map.has(\"x-nope\"));\n}\n\nTEST(HeaderParserTest, EvaluateHeadersWithAppendFalse) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: \"www2\"\n  prefix_rewrite: \"/api/new_endpoint\"\nrequest_headers_to_add:\n  - header:\n      key: \"static-header\"\n      value: \"static-value\"\n    append: true\n  - header:\n      key: \"x-client-ip\"\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n    append: true\n  - header:\n      key: \"x-request-start\"\n      value: \"%START_TIME(%s%3f)%\"\n    append: true\n  - header:\n      key: \"x-request-start-default\"\n      value: \"%START_TIME%\"\n    append: true\n  - header:\n      key: \"x-request-start-range\"\n      value: \"%START_TIME(%f, %1f, %2f, %3f, %4f, %5f, %6f, %7f, %8f, %9f)%\"\n    append: true\n)EOF\";\n\n  // Disable append mode.\n  envoy::config::route::v3::Route route = parseRouteFromV3Yaml(yaml);\n  route.mutable_request_headers_to_add(0)->mutable_append()->set_value(false);\n  route.mutable_request_headers_to_add(1)->mutable_append()->set_value(false);\n  route.mutable_request_headers_to_add(2)->mutable_append()->set_value(false);\n\n  HeaderParserPtr req_header_parser =\n      Router::HeaderParser::configure(route.request_headers_to_add());\n  Http::TestRequestHeaderMapImpl header_map{\n      {\":method\", \"POST\"}, {\"static-header\", \"old-value\"}, {\"x-client-ip\", \"0.0.0.0\"}};\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  const SystemTime start_time(std::chrono::microseconds(1522796769123456));\n  EXPECT_CALL(stream_info, startTime()).Times(3).WillRepeatedly(Return(start_time));\n\n  req_header_parser->evaluateHeaders(header_map, stream_info);\n  EXPECT_TRUE(header_map.has(\"static-header\"));\n  EXPECT_EQ(\"static-value\", header_map.get_(\"static-header\"));\n  EXPECT_TRUE(header_map.has(\"x-client-ip\"));\n  EXPECT_EQ(\"127.0.0.1\", header_map.get_(\"x-client-ip\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start\"));\n  EXPECT_EQ(\"1522796769123\", header_map.get_(\"x-request-start\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start-default\"));\n  EXPECT_EQ(\"2018-04-03T23:06:09.123Z\", header_map.get_(\"x-request-start-default\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start-range\"));\n  EXPECT_EQ(\"123456000, 1, 12, 123, 1234, 12345, 123456, 1234560, 12345600, 123456000\",\n            header_map.get_(\"x-request-start-range\"));\n\n  using CountMap = absl::flat_hash_map<std::string, int>;\n  CountMap counts;\n  header_map.iterate([&counts](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    absl::string_view key = header.key().getStringView();\n    CountMap::iterator i = counts.find(key);\n    if (i == counts.end()) {\n      counts.insert({std::string(key), 1});\n    } else {\n      i->second++;\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  EXPECT_EQ(1, counts[\"static-header\"]);\n  EXPECT_EQ(1, counts[\"x-client-ip\"]);\n  EXPECT_EQ(1, counts[\"x-request-start\"]);\n}\n\nTEST(HeaderParserTest, EvaluateResponseHeaders) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: \"www2\"\nresponse_headers_to_add:\n  - header:\n      key: \"x-client-ip\"\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n    append: true\n  - header:\n      key: \"x-client-ip-port\"\n      value: \"%DOWNSTREAM_REMOTE_ADDRESS%\"\n    append: true\n  - header:\n      key: \"x-request-start\"\n      value: \"%START_TIME(%s.%3f)%\"\n    append: true\n  - header:\n      key: \"x-request-start-multiple\"\n      value: \"%START_TIME(%s.%3f)% %START_TIME% %START_TIME(%s)%\"\n    append: true\n  - header:\n      key: \"x-request-start-f\"\n      value: \"%START_TIME(f)%\"\n    append: true\n  - header:\n      key: \"x-request-start-range\"\n      value: \"%START_TIME(%f, %1f, %2f, %3f, %4f, %5f, %6f, %7f, %8f, %9f)%\"\n    append: true\n  - header:\n      key: \"x-request-start-default\"\n      value: \"%START_TIME%\"\n    append: true\n  - header:\n      key: \"set-cookie\"\n      value: \"foo\"\n  - header:\n      key: \"set-cookie\"\n      value: \"bar\"\n    append: true\n\nresponse_headers_to_remove: [\"x-nope\"]\n)EOF\";\n\n  const auto route = parseRouteFromV3Yaml(yaml);\n  HeaderParserPtr resp_header_parser =\n      HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove());\n  Http::TestRequestHeaderMapImpl header_map{\n      {\":method\", \"POST\"}, {\"x-safe\", \"safe\"}, {\"x-nope\", \"nope\"}};\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  // Initialize start_time as 2018-04-03T23:06:09.123Z in microseconds.\n  const SystemTime start_time(std::chrono::microseconds(1522796769123456));\n  EXPECT_CALL(stream_info, startTime()).Times(7).WillRepeatedly(Return(start_time));\n\n  resp_header_parser->evaluateHeaders(header_map, stream_info);\n  EXPECT_TRUE(header_map.has(\"x-client-ip\"));\n  EXPECT_TRUE(header_map.has(\"x-client-ip-port\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start-multiple\"));\n  EXPECT_TRUE(header_map.has(\"x-safe\"));\n  EXPECT_FALSE(header_map.has(\"x-nope\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start\"));\n  EXPECT_EQ(\"1522796769.123\", header_map.get_(\"x-request-start\"));\n  EXPECT_EQ(\"1522796769.123 2018-04-03T23:06:09.123Z 1522796769\",\n            header_map.get_(\"x-request-start-multiple\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start-f\"));\n  EXPECT_EQ(\"f\", header_map.get_(\"x-request-start-f\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start-default\"));\n  EXPECT_EQ(\"2018-04-03T23:06:09.123Z\", header_map.get_(\"x-request-start-default\"));\n  EXPECT_TRUE(header_map.has(\"x-request-start-range\"));\n  EXPECT_EQ(\"123456000, 1, 12, 123, 1234, 12345, 123456, 1234560, 12345600, 123456000\",\n            header_map.get_(\"x-request-start-range\"));\n  EXPECT_EQ(\"foo\", header_map.get_(\"set-cookie\"));\n\n  // Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't\n  // combine set-cookie headers\n  std::vector<absl::string_view> out;\n  Http::HeaderUtility::getAllOfHeader(header_map, \"set-cookie\", out);\n  ASSERT_EQ(out.size(), 2);\n  ASSERT_EQ(out[0], \"foo\");\n  ASSERT_EQ(out[1], \"bar\");\n}\n\nTEST(HeaderParserTest, EvaluateRequestHeadersRemoveBeforeAdd) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: www2\nrequest_headers_to_add:\n  - header:\n      key: \"x-foo-header\"\n      value: \"bar\"\nrequest_headers_to_remove: [\"x-foo-header\"]\n)EOF\";\n\n  const auto route = parseRouteFromV3Yaml(yaml);\n  HeaderParserPtr req_header_parser =\n      HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove());\n  Http::TestRequestHeaderMapImpl header_map{{\"x-foo-header\", \"foo\"}};\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  req_header_parser->evaluateHeaders(header_map, stream_info);\n  EXPECT_EQ(\"bar\", header_map.get_(\"x-foo-header\"));\n}\n\nTEST(HeaderParserTest, EvaluateResponseHeadersRemoveBeforeAdd) {\n  const std::string yaml = R\"EOF(\nmatch: { prefix: \"/new_endpoint\" }\nroute:\n  cluster: www2\nresponse_headers_to_add:\n  - header:\n      key: \"x-foo-header\"\n      value: \"bar\"\nresponse_headers_to_remove: [\"x-foo-header\"]\n)EOF\";\n\n  const auto route = parseRouteFromV3Yaml(yaml);\n  HeaderParserPtr resp_header_parser =\n      HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove());\n  Http::TestResponseHeaderMapImpl header_map{{\"x-foo-header\", \"foo\"}};\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  resp_header_parser->evaluateHeaders(header_map, stream_info);\n  EXPECT_EQ(\"bar\", header_map.get_(\"x-foo-header\"));\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/address_0",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/address_1",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%DOWNSTREAM_LOCAL_ADDRESS%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/address_2",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/address_3",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%DOWNSTREAM_REMOTE_ADDRESS%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-header_parser_fuzz_test-5107723602493440",
    "content": "headers_to_add {\n}\nheaders_to_add {\n  header {\n    key: \"te\"\n    value: \"l\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"te\"\n    value: \"@\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-header_parser_fuzz_test-5163306626580480",
    "content": "headers_to_add {\n  header {\n    key: \"P\"\n    value: \"%PER_REQUEST_STATE(oB]$T)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(B%444ssa4%s4%>TME(B%128sY$T_)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%PER_REQUEST_STATE(dB]$T)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n    value: \"%PER_REQUEST_STATE(dB]$T)%\"\n  }\n  append {\n    value: true\n  }\n}\nheaders_to_add {\n  header {\n    key: \"]\"\n    value: \"%UPSTREAM_METADATA([\\\"\\\", \\\"\\\"])%\"\n  }\n}\nstream_info {\n  dynamic_metadata {\n    filter_metadata {\n      key: \"\\000\\000{\\000+p\"\n      value {\n      }\n    }\n    filter_metadata {\n      key: \"\\000}\"\n      value {\n      }\n    }\n    filter_metadata {\n      key: \"\\000}\"\n      value {\n      }\n    }\n    filter_metadata {\n      key: \"K\"\n      value {\n        fields {\n          key: \"\"\n          value {\n          }\n        }\n      }\n    }\n  }\n  upstream_metadata {\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"\"\n          value {\n            string_value: \"c\\000\\000\\000\\000\\000\\000\\000\"\n          }\n        }\n      }\n    }\n    filter_metadata {\n      key: \"-\"\n      value {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-header_parser_fuzz_test-5648325682921472",
    "content": "headers_to_add {\n  header {\n    key: \"Q\"\n    value: \"%START_TIME(%TART_TIME(%f%f%f\\000%f%f%fTE(%f%f%f\\305\\257E11111(%f%f%ff;%f%f%f%f%f%f%%f%f%f%f%f%f%f%f%f%f%f%f%f111(%f%f%ff;%f%f%f%f%f%f%%f%f%f%f%f%f%f%f%f%f%f%f%f\\000%f%f%fTE(%f%f%f%E11f%f%f%f%f%f%f\\000%f%f%fTf%%f%f%f%f%f%f%f%f%f%f%f%f%f\\000%f%f%fTE(%f%f%f%E11f%f%f%f%f%f%f\\000%f%f%fTE(%f%f%f%E11111(%E111%f%ff%f%ff;%f%f%f%f%\\000%f%f%fTE(%f%f%f%E11f%f%f%f%f%f%f\\000%f%f%fTf%%f%f%f%f%f%f%f%f%f%f%f%f%f\\000%f%f%fTE(%f%f%f%E11f%f%f%f%f%f%f\\000%f%f%fTE(%f%f%f%E11111(%E111%f%ff%f%ff;%f%f%f%f%f%f%f%f%f%f%ff%f%)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-header_parser_fuzz_test-5702537941876736",
    "content": "headers_to_add {\n  header {\n    key: \"m\"\n    value: \"%DOWNSTREAM_PEER_SUBJECT%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-header_parser_fuzz_test-5710655463620608",
    "content": "headers_to_add {\n  header {\n    key: \"P\\000\\000\\000\\000\\006b|H\"\n    value: \"%START_TIMEt_in(%?f,%%%% %5f%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%UPSTREAM_HOST%%%%%%%%%%[ZZZZZZ_%START_TIMEt_in(%?f,%%%% %5f%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%UPSTREAM_HOST%%%%%%%%DOWNSTREAM_PEER_CERT%%[ZZZZZZ_%START_TIMEt_in(%?f,%%%% %5f%%%%%%%%%%\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177%DOWNSTREAM_PEER_URI_SAN%\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177%DOWNSTREAM_LOCAL_URI_SAN%\\177\\177\\177%DOWNSTREAM_PEER_CERT_V_START%\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%UPSTREAM_HOST%%%%%%%%%%[ZZZZZZ_%START_TIME(%)%\"\n  }\n}\nstream_info {\n  start_time: 63\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-header_parser_fuzz_test-6195059702628352",
    "content": "headers_to_add {\n  header {\n    value: \"%START_TIMEY()%T      %START_TIME(f, %�{{{{{_�����������85request_i:\t1�227 f55555_n555555555555%85nfo 5#555.55f, %1f,  %85/5_inf %8,,,,,,,,,,,,,,,,,,,,,,,,,55555 start_timefo 5#5555#555.55f, %1f,  %85/55ime:\t15227 f %1f,  %8555555555  %85/5555Fme:\t15227 f-5555_inf 965L5559f)%\"\n  }\n}\nstream_info {\n  start_time:\t1522796769123\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-4709439954485248",
    "content": "headers_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%f%f%f%E92E1%f%f%E4/1f%E1%f%f%E4/1f%E46E%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768E4/1f%E47E768f%\\305\\2231%f%f%E461f%Eff%f%E916%0f%6f%)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%f%f%f%E92E1%f%f%E4/1f%E1%f%f%E4/1f%E3E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E46E768f%\\305\\2231%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E%%Eff%f%E0f%6f%)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%f%f%f%E92E1%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%EE768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f461f%Enf%f%f%E768-5517521057234699755%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E46E768f%\\305\\2231%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%EE47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E11f%E1%f%f%E%%Eff%f%E0f%6f%)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5191408676241408",
    "content": "headers_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%Qf{pbot\\204N{{{{{B%)%%START_TIME(kB\\377\\177?Be{{{{{{{{{{{{{f{{\\377\\377{{{B%)%%START_TIME({B%)%%%%%%%%%%START_TIME(%Qf{prot\\2043{%@%%START_TIME8%)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"1\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%:f{prot\\002 %2\\003%%%043{{{[{B%)%%START_TIME(kB\\377\\177?BB{{--------------------------------------------%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\017%%%%%%G%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\271%%%%%%%%%%%%%%%%%%%%%%%%\\377\\377%%%%%%%%%%%%%%+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%START_TIME(%)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"1\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(`QfdBB, %5f, %6f, %4294967295f, %8f%1f, %2f, %3f,f, %1f, %2f, %3f,%6f,, %6f,(%7f, %8f, % %68f, % %4f, %5f, %6f, %4294967295f, %8f, 9f)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%f,%)%%START_TIME(kB\\177\\177?BB{{{{B%)%%START_TIME(%)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%Qf{pbot\\204N{{{{{B%)%%START_TIME(kB\\377\\177?Be{{{{{{{{{{{{{f{{\\377\\377{{{B%)%%START_TIME({B%)%%%%%%%%%%START_TIME(%Qf{prot\\2043{%@%%START_TIME8%)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"1\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"?\"\n    value: \"%START_TIME(`QfdBB, %5f, %6f, %4294967295f, %8f%1f, %2f, %3f,f, %1f, %2f, %3f,%6f,, %6f,(%7f, %8f, % %68f, % %4f, %5f, %6f, %4294967295f, %8f, 9f)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"1\"\n    value: \"1\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"0\"\n    value: \"%START_TIMEY()%5+5555FmehWNSTRSAM_LOCAL_ADDRESS%%DOWNSTREAM_LOCAL_ADDRESS%\\002DO\\024f,f,  +89fCOL%6020\\002COL%\\200\\377\\377\\377\\20020\\220\\022\\220%%%PROT5COL%\\2003J0\\220\\2220\\222\\220%%%PROTOeOL%220%%%PR\\200\\03360\\\\23J0\\220\\2220}222\\002\\002N0\\2220}222\\220%%%\\020R%\\200;60m220\\220%%%PROTOC220\\002\\220%%%55555555  %85+3555Fme:\\37105227 f-55S5_inf    %START_TIME(f)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(`QfdBB, %5f, %6f, %4294967295f, %8f%1f, %2f, %3f,f, %1f, %2f, %3f,%6f,, %6f,(%7f, %8f, % %68f, % %4f, %5f, %6f, %4294967295f, %8f, 9f)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(`QfdBB, %5f, %6f, %4294967295f, %8f%1f, %2f, %3f,f, %1f, %2f, %3f,%6f,, %6f,(%7f, %8f, % %68f, % %4f, %5f, %6f, %429f, %6f, %4294969f)%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(`QfdBB, %5f, %6f, %4294967295f, %f, %2f, %3f,f, %1f, %2f, %3f,%6f,, %6f,(%7f, %8f, % %68f, % %4f, %5f, %6f, %4294967295f, %8f, 9f)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5201773654704128",
    "content": "headers_to_remove: \">\\000\\000\\000\\000\\000\\000\\000\"\nstream_info {\n  dynamic_metadata {\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"\"\n          value {\n            list_value {\n              values {\n                null_value: NULL_VALUE\n              }\n              values {\n              }\n              values {\n                null_value: NULL_VALUE\n              }\n              values {\n                string_value: \"\"\n              }\n              values {\n                null_value: NULL_VALUE\n              }\n            }\n          }\n        }\n        fields {\n          key: \"!\"\n          value {\n          }\n        }\n        fields {\n          key: \"$headers_to_add {\\n  header {\\n    key: \\\"Q(Q(\\\"\\n  }\\n}\\nheaders_to_add {\\n  header {\\n    key: \\\"A\\\"\\n    value: \\\"%START_TIMEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_MET_AEtig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNbMIC_METAg(%?f, %DYNAMIG_METAEt_ig(%?f,d%DYNAMIC_METAg(%?ME9Ag(%?f, %DYNAMIC_METAEt_?f, %DYNAMIC_MEIC_METAg(%?f, %DYDYNAMIC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMI5_METAEt_ig(%?f,d%IC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_MEVAEt_ig(%?f,d%DYNAMIC_METAg(%?f, %DYNAMIC_METAEt_ig(%?f,d%DYNAMIC_METAg(%?f,f, %DYNAMIC_METADATA()%\\\"\\n  }\\n}\\nstream_info {\\n  address {\\n    pipe {\\n      path: \\\"p\\\"\\n      mode: 1\\n    }\\n  }\\n}\\n\"\n          value {\n          }\n        }\n      }\n    }\n    filter_metadata {\n      key: \">\\000\\000\\000\\000\\000\\000\\000\"\n      value {\n      }\n    }\n  }\n  requested_server_name: \" \"\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5630125928873984",
    "content": "headers_to_add {   header {     key: \" \"     value: \"%START_TIME(�)%\"   } } stream_info {   start_time: 72059116831228591 }\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5647641023610880",
    "content": "headers_to_add {   header {     key: \" \"     value: \"%START_TIME()%\"   } } request_info {   start_time: 9799832698963886077 } \n"
  },
  {
    "path": "test/common/router/header_parser_corpus/compound_headers",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-prefix\"\n    value: \"prefix-%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-suffix\"\n    value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%-suffix\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-both\"\n    value: \"prefix-%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%-suffix\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-escaping-1\"\n    value: \"%%%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%%%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-escaping-2\"\n    value: \"%%%%%%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-multi\"\n    value: \"%PROTOCOL% from %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-multi-back-to-back\"\n    value: \"%PROTOCOL%%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n  }\n}\nheaders_to_add {\n  header {\n    key: \"x-metadata\"\n    value: \"%UPSTREAM_METADATA([\\\"namespace\\\", \\\"%key%\\\"])%\"\n  }\n}\nstream_info {\n  upstream_metadata {\n    filter_metadata {\n      key: \"namespace\"\n      value: {\n        fields {\n          key: \"%key%\"\n          value: { string_value: \"value\" }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/crash-af52fc744a3a7d7c9fe632ca457830ca323023bd",
    "content": "headers_to_add {\n  header {\n    key: \"%START_TIMEY()%T      %START_TIME(f, %\\357\\277\\275{{{{{_\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\27585request_i:\\t1\\357\\277\\275227 f55555_n555555555555%85nfo 5#555.55f, %1f,  %85/5_inf %8,,,,,,,,,,,,,,,,,,,,,,,,,55555 start_timefo 5#5555#555.55f, %1f,  %85/55ime:\\t15227 f %1f,  %8555555555  %85/5555Fme:\\t15227 f-5555_inf 965L5559f)%\"\n    value: \"%START_TIMEY()%T      %START_TIME(f, %\\357\\277\\275{{{{{_\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\275\\357\\277\\27585request_i:\\t1\\357\\277\\275227 f55555_n555555555555%85nfo 5#555.55f, %1f,  %85/5_inf %8,,,,,,,,,,,,,,,,,,,,,,,,,55555 start_timefo 5#5555#555.55f, %1f,  %85/55ime:\\t15227 f %1f,  %8555555555  %85/5555Fme:\\t15227 f-5555_inf 965L5559f)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/foo",
    "content": "headers_to_add {\n  header {\n    key: \"foo\"\n    value: \"%START_TIME(%0\\240&&&&&&&&zzzzzzzzzzzzzamA(24d\\240\\240\\240\\240\\240\\240\\240\\240Q240\\240\\240\\240\\240\\020\\240^240&&7&&&&&&&&&&\\006&val\\177\\377\\376&&aenam\\001s %1f,  %85/5_inf %8,,,,,,,,,,,,t_timefo 5#5555#555.5,  %85/5_inf %8,,,,,,,,,,,,t_t  %85555555\\2005  %85/5555Fme:\\t15227 f-5555_inf 965L5$59f)%\"\n  }\n}\nheaders_to_remove: \"7\"\nstream_info {\n  upstream_metadata {\n    filter_metadata {\n      key: \"\"\n      value {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_0",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_1",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%% infix %\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_10",
    "content": "headers_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%4On%)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_11",
    "content": "headers_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%4En%)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_2",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%INVALID%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_3",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%VAR\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_4",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%%%VAR\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_5",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"before %%%VAR after\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_6",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%UPSTREAM_METADATA(no array)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_7",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%UPSTREAM_METADATA%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_8",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%UPSTREAM_METADATA([\\\"ns\\\"])%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/invalid_9",
    "content": "headers_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%En%)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/protocol",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%%%PROTOCOL%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/start_time",
    "content": "headers_to_add {\n  header {\n    key: \"x-request-start\"\n    value: \"%START_TIME(%s.%3f)%\"\n  }\n  append: { value: true }\n}\nheaders_to_add {\n  header {\n    key: \"x-request-start-f\"\n    value: \"%START_TIME(f)%\"\n  }\n  append: { value: true }\n}\nheaders_to_add {\n  header {\n    key: \"x-request-start-range\"\n    value: \"%START_TIME(%f, %1f, %2f, %3f, %4f, %5f, %6f, %7f, %8f, %9f)%\"\n  }\n  append: { value: true }\n}\nheaders_to_add {\n  header {\n    key: \"x-request-start-default\"\n    value: \"%START_TIME%\"\n  }\n}\nstream_info {\n  start_time: 1522796769123\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/timeout_test_case",
    "content": "headers_to_add {\n  header {\n    key: \"  \"\n    value: \"%START_TIME(`Qf;BBBBB)%%START_TIME(%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %2f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1%3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f,\\016~\\177 %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3  , ,,2, fff%f%13%(%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, % %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %25f, %6f, %4294967295f, %8f, 9f)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/upstream_metadata_0",
    "content": "headers_to_add {\n  header {\n    key: \"x-header\"\n    value: \"%UPSTREAM_METADATA([\\\"ns\\\", \\\"key\\\"])%\"\n  }\n}\nstream_info {\n  start_time: 1522796769123\n  upstream_metadata {\n    filter_metadata {\n      key: \"ns\"\n      value: {\n        fields {\n          key: \"key\"\n          value: { string_value: \"value\" }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/upstream_metadata_1",
    "content": "headers_to_add {\n  header {\n    key: \"x-header\"\n    value: \"%UPSTREAM_METADATA( \\t [ \\t \\\"ns\\\" \\t , \\t \\\"key\\\" \\t ] \\t )%\"\n  }\n}\nstream_info {\n  start_time: 1522796769123\n  upstream_metadata {\n    filter_metadata {\n      key: \"ns\"\n      value: {\n        fields {\n          key: \"key\"\n          value: { string_value: \"value\" }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_corpus/valid",
    "content": "headers_to_add {\n  header {\n    key: \"A\"\n    value: \"%START_TIME(%E4n%)%\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/header_parser_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.router;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"test/fuzz/common.proto\";\n\nimport \"validate/validate.proto\";\n\n// Structured input for header_parser_fuzz_test.\n\nmessage TestCase {\n  repeated envoy.config.core.v3.HeaderValueOption headers_to_add = 1;\n  repeated string headers_to_remove = 2\n      [(validate.rules).repeated .items.string.well_known_regex = HTTP_HEADER_NAME];\n  test.fuzz.StreamInfo stream_info = 3;\n}\n"
  },
  {
    "path": "test/common/router/header_parser_fuzz_test.cc",
    "content": "#include \"common/http/header_map_impl.h\"\n#include \"common/router/header_parser.h\"\n\n#include \"test/common/router/header_parser_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\nnamespace {\n\nDEFINE_PROTO_FUZZER(const test::common::router::TestCase& input) {\n  try {\n    TestUtility::validate(input);\n    Router::HeaderParserPtr parser =\n        Router::HeaderParser::configure(input.headers_to_add(), input.headers_to_remove());\n    Http::TestRequestHeaderMapImpl header_map;\n    std::unique_ptr<TestStreamInfo> test_stream_info = fromStreamInfo(input.stream_info());\n    parser->evaluateHeaders(header_map, *test_stream_info);\n    ENVOY_LOG_MISC(trace, \"Success\");\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n}\n\n} // namespace\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/rds_impl_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/admin/v3/config_dump.pb.validate.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/router/rds_impl.h\"\n\n#include \"server/admin/admin.h\"\n\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nenvoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\nparseHttpConnectionManagerFromYaml(const std::string& yaml_string) {\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      http_connection_manager;\n  TestUtility::loadFromYaml(yaml_string, http_connection_manager);\n  return http_connection_manager;\n}\n\nclass RdsTestBase : public testing::Test {\npublic:\n  RdsTestBase() {\n    // For server_factory_context\n    ON_CALL(server_factory_context_, scope()).WillByDefault(ReturnRef(scope_));\n    ON_CALL(server_factory_context_, messageValidationContext())\n        .WillByDefault(ReturnRef(validation_context_));\n    EXPECT_CALL(validation_context_, dynamicValidationVisitor())\n        .WillRepeatedly(ReturnRef(validation_visitor_));\n\n    ON_CALL(outer_init_manager_, add(_)).WillByDefault(Invoke([this](const Init::Target& target) {\n      init_target_handle_ = target.createHandle(\"test\");\n    }));\n    ON_CALL(outer_init_manager_, initialize(_))\n        .WillByDefault(Invoke(\n            [this](const Init::Watcher& watcher) { init_target_handle_->initialize(watcher); }));\n  }\n\n  Event::SimulatedTimeSystem& timeSystem() { return time_system_; }\n\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  NiceMock<Init::MockManager> outer_init_manager_;\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  Init::TargetHandlePtr init_target_handle_;\n  Envoy::Config::SubscriptionCallbacks* rds_callbacks_{};\n  NiceMock<Stats::MockIsolatedStatsStore> scope_;\n};\n\nclass RdsImplTest : public RdsTestBase {\npublic:\n  RdsImplTest() {\n    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_(\"routes\", _));\n    route_config_provider_manager_ =\n        std::make_unique<RouteConfigProviderManagerImpl>(server_factory_context_.admin_);\n  }\n  ~RdsImplTest() override { server_factory_context_.thread_local_.shutdownThread(); }\n\n  void setup() {\n    const std::string config_yaml = R\"EOF(\nrds:\n  config_source:\n    api_config_source:\n      api_type: REST\n      cluster_names:\n      - foo_cluster\n      refresh_delay: 1s\n  route_config_name: foo_route_config\ncodec_type: auto\nstat_prefix: foo\nhttp_filters:\n- name: http_dynamo_filter\n  config: {}\n    )EOF\";\n\n    EXPECT_CALL(outer_init_manager_, add(_));\n    rds_ = RouteConfigProviderUtil::create(\n        parseHttpConnectionManagerFromYaml(config_yaml), server_factory_context_,\n        validation_visitor_, outer_init_manager_, \"foo.\", *route_config_provider_manager_);\n    rds_callbacks_ = server_factory_context_.cluster_manager_.subscription_factory_.callbacks_;\n    EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_,\n                start(_, _));\n    outer_init_manager_.initialize(init_watcher_);\n  }\n\n  RouteConstSharedPtr route(Http::TestRequestHeaderMapImpl headers) {\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    headers.addCopy(\"x-forwarded-proto\", \"http\");\n    return rds_->config()->route(headers, stream_info, 0);\n  }\n\n  NiceMock<Server::MockInstance> server_;\n  RouteConfigProviderManagerImplPtr route_config_provider_manager_;\n  RouteConfigProviderSharedPtr rds_;\n};\n\nTEST_F(RdsImplTest, RdsAndStatic) {\n  const std::string config_yaml = R\"EOF(\nrds: {}\nroute_config: {}\ncodec_type: auto\nstat_prefix: foo\nhttp_filters:\n- name: http_dynamo_filter\n  config: {}\n    )EOF\";\n\n  EXPECT_THROW(RouteConfigProviderUtil::create(parseHttpConnectionManagerFromYaml(config_yaml),\n                                               server_factory_context_, validation_visitor_,\n                                               outer_init_manager_, \"foo.\",\n                                               *route_config_provider_manager_),\n               EnvoyException);\n}\n\nTEST_F(RdsImplTest, DestroyDuringInitialize) {\n  InSequence s;\n  setup();\n  // EXPECT_CALL(server_factory_context_, scope());\n  EXPECT_CALL(init_watcher_, ready());\n  rds_.reset();\n}\n\nTEST_F(RdsImplTest, Basic) {\n  InSequence s;\n  Buffer::OwnedImpl empty;\n  Buffer::OwnedImpl data;\n\n  setup();\n\n  // Make sure the initial empty route table works.\n  EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{\":authority\", \"foo\"}}));\n\n  // Initial request.\n  const std::string response1_json = R\"EOF(\n{\n  \"version_info\": \"1\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n      \"name\": \"foo_route_config\",\n      \"virtual_hosts\": null\n    }\n  ]\n}\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_json);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::route::v3::RouteConfiguration>(response1);\n\n  EXPECT_CALL(init_watcher_, ready());\n  rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n  EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{\":authority\", \"foo\"}}));\n\n  // 2nd request with same response. Based on hash should not reload config.\n  rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n  EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{\":authority\", \"foo\"}}));\n\n  // Load the config and verified shared count.\n  ConfigConstSharedPtr config = rds_->config();\n  EXPECT_EQ(2, config.use_count());\n\n  // Third request.\n  const std::string response2_json = R\"EOF(\n{\n  \"version_info\": \"2\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n      \"name\": \"foo_route_config\",\n      \"virtual_hosts\": [\n        {\n          \"name\": \"integration\",\n          \"domains\": [\n            \"*\"\n          ],\n          \"routes\": [\n            {\n              \"match\": {\n                \"prefix\": \"/foo\"\n              },\n              \"route\": {\n                \"cluster_header\": \":authority\"\n              }\n            }\n          ]\n        }\n      ]\n    }\n  ]\n}\n  )EOF\";\n  auto response2 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response2_json);\n  const auto decoded_resources_2 =\n      TestUtility::decodeResources<envoy::config::route::v3::RouteConfiguration>(response2);\n\n  // Make sure we don't lookup/verify clusters.\n  EXPECT_CALL(server_factory_context_.cluster_manager_, get(Eq(\"bar\"))).Times(0);\n  rds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info());\n  EXPECT_EQ(\"foo\", route(Http::TestRequestHeaderMapImpl{{\":authority\", \"foo\"}, {\":path\", \"/foo\"}})\n                       ->routeEntry()\n                       ->clusterName());\n\n  // Old config use count should be 1 now.\n  EXPECT_EQ(1, config.use_count());\n  EXPECT_EQ(2UL, scope_.counter(\"foo.rds.foo_route_config.config_reload\").value());\n}\n\n// Validate behavior when the config is delivered but it fails PGV validation.\nTEST_F(RdsImplTest, FailureInvalidConfig) {\n  InSequence s;\n\n  setup();\n\n  const std::string response1_json = R\"EOF(\n{\n  \"version_info\": \"1\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n      \"name\": \"INVALID_NAME_FOR_route_config\",\n      \"virtual_hosts\": null\n    }\n  ]\n}\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_json);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::route::v3::RouteConfiguration>(response1);\n\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_THROW_WITH_MESSAGE(\n      rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()),\n      EnvoyException,\n      \"Unexpected RDS configuration (expecting foo_route_config): INVALID_NAME_FOR_route_config\");\n}\n\n// rds and vhds configurations change together\nTEST_F(RdsImplTest, VHDSandRDSupdateTogether) {\n  setup();\n\n  const std::string response1_json = R\"EOF(\n{\n  \"version_info\": \"1\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n      \"name\": \"foo_route_config\",\n      \"virtual_hosts\": [\n        {\n          \"name\": \"foo\",\n          \"domains\": [\n            \"foo\"\n          ],\n          \"routes\": [\n            {\n              \"match\": {\n                \"prefix\": \"/foo\"\n              },\n              \"route\": {\n                \"cluster\": \"foo\"\n              }\n            }\n          ]\n        }\n      ],\n      \"vhds\": {\n        \"config_source\": {\n          \"api_config_source\": {\n            \"api_type\": \"DELTA_GRPC\",\n            \"grpc_services\": {\n              \"envoy_grpc\": {\n                \"cluster_name\": \"xds_cluster\"\n              }\n            }\n          }\n        }\n      }\n    }\n  ]\n}\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_json);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::route::v3::RouteConfiguration>(response1);\n\n  EXPECT_CALL(init_watcher_, ready());\n  rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n  EXPECT_TRUE(rds_->config()->usesVhds());\n\n  EXPECT_EQ(\"foo\", route(Http::TestRequestHeaderMapImpl{{\":authority\", \"foo\"}, {\":path\", \"/foo\"}})\n                       ->routeEntry()\n                       ->clusterName());\n}\n\n// Validate behavior when the config fails delivery at the subscription level.\nTEST_F(RdsImplTest, FailureSubscription) {\n  InSequence s;\n\n  setup();\n\n  EXPECT_CALL(init_watcher_, ready());\n  // onConfigUpdateFailed() should not be called for gRPC stream connection failure\n  rds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {});\n}\n\n// Verifies that a queued up request for a virtual host update doesn't crash if\n// RdsRouteConfigProvider is deallocated\nTEST_F(RdsImplTest, VirtualHostUpdateWhenProviderHasBeenDeallocated) {\n  const std::string rds_config = R\"EOF(\nrds:\n  route_config_name: my_route\n  config_source:\n    api_config_source:\n      api_type: GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n)EOF\";\n\n  Event::PostCb post_cb;\n  testing::NiceMock<Event::MockDispatcher> local_thread_dispatcher;\n  testing::MockFunction<void(bool)> mock_callback;\n  {\n    auto rds = RouteConfigProviderUtil::create(\n        parseHttpConnectionManagerFromYaml(rds_config), server_factory_context_,\n        validation_visitor_, outer_init_manager_, \"foo.\", *route_config_provider_manager_);\n\n    EXPECT_CALL(server_factory_context_.dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n    rds->requestVirtualHostsUpdate(\n        \"testing\", local_thread_dispatcher,\n        std::make_shared<Http::RouteConfigUpdatedCallback>(\n            Http::RouteConfigUpdatedCallback(mock_callback.AsStdFunction())));\n  }\n\n  // Invoke the callback that was scheduled on the main thread\n  // RdsRouteConfigProvider in rds is out of scope and callback's captured parameters are no longer\n  // valid\n  EXPECT_CALL(mock_callback, Call(_)).Times(0);\n  EXPECT_NO_THROW(post_cb());\n}\n\nclass RdsRouteConfigSubscriptionTest : public RdsTestBase {\npublic:\n  RdsRouteConfigSubscriptionTest() {\n    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_(\"routes\", _));\n    route_config_provider_manager_ =\n        std::make_unique<RouteConfigProviderManagerImpl>(server_factory_context_.admin_);\n  }\n\n  ~RdsRouteConfigSubscriptionTest() override {\n    server_factory_context_.thread_local_.shutdownThread();\n  }\n\n  RouteConfigProviderManagerImplPtr route_config_provider_manager_;\n};\n\n// Verifies that maybeCreateInitManager() creates a noop init manager if the main init manager is in\n// Initialized state already\nTEST_F(RdsRouteConfigSubscriptionTest, CreatesNoopInitManager) {\n  const std::string rds_config = R\"EOF(\n  route_config_name: my_route\n  config_source:\n    api_config_source:\n      api_type: GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n)EOF\";\n  const auto rds =\n      TestUtility::parseYaml<envoy::extensions::filters::network::http_connection_manager::v3::Rds>(\n          rds_config);\n  const auto route_config_provider = route_config_provider_manager_->createRdsRouteConfigProvider(\n      rds, server_factory_context_, \"stat_prefix\", outer_init_manager_);\n  RdsRouteConfigSubscription& subscription =\n      (dynamic_cast<RdsRouteConfigProviderImpl*>(route_config_provider.get()))->subscription();\n  init_watcher_.expectReady().Times(1); // The parent_init_target_ will call once.\n  outer_init_manager_.initialize(init_watcher_);\n  std::unique_ptr<Init::ManagerImpl> noop_init_manager;\n  std::unique_ptr<Cleanup> init_vhds;\n  subscription.maybeCreateInitManager(\"version_info\", noop_init_manager, init_vhds);\n  // local_init_manager_ is not ready yet as the local_init_target_ is not ready.\n  EXPECT_EQ(init_vhds, nullptr);\n  EXPECT_EQ(noop_init_manager, nullptr);\n  // Now mark local_init_target_ ready by forcing an update failure.\n  auto* rds_callbacks_ = server_factory_context_.cluster_manager_.subscription_factory_.callbacks_;\n  EnvoyException e(\"test\");\n  rds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected,\n                                       &e);\n  // Now noop init manager will be created as local_init_manager_ is initialized.\n  subscription.maybeCreateInitManager(\"version_info\", noop_init_manager, init_vhds);\n  EXPECT_NE(init_vhds, nullptr);\n  EXPECT_NE(noop_init_manager, nullptr);\n}\n\nclass RouteConfigProviderManagerImplTest : public RdsTestBase {\npublic:\n  void setup() {\n    // Get a RouteConfigProvider. This one should create an entry in the RouteConfigProviderManager.\n    rds_.set_route_config_name(\"foo_route_config\");\n    rds_.mutable_config_source()->set_path(\"foo_path\");\n    provider_ = route_config_provider_manager_->createRdsRouteConfigProvider(\n        rds_, server_factory_context_, \"foo_prefix.\", outer_init_manager_);\n    rds_callbacks_ = server_factory_context_.cluster_manager_.subscription_factory_.callbacks_;\n  }\n\n  RouteConfigProviderManagerImplTest() {\n    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_(\"routes\", _));\n    route_config_provider_manager_ =\n        std::make_unique<RouteConfigProviderManagerImpl>(server_factory_context_.admin_);\n  }\n\n  ~RouteConfigProviderManagerImplTest() override {\n    server_factory_context_.thread_local_.shutdownThread();\n  }\n\n  envoy::extensions::filters::network::http_connection_manager::v3::Rds rds_;\n  RouteConfigProviderManagerImplPtr route_config_provider_manager_;\n  RouteConfigProviderSharedPtr provider_;\n};\n\nenvoy::config::route::v3::RouteConfiguration\nparseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) {\n  envoy::config::route::v3::RouteConfiguration route_config;\n  TestUtility::loadFromYaml(yaml, route_config, true, avoid_boosting);\n  return route_config;\n}\n\nTEST_F(RouteConfigProviderManagerImplTest, ConfigDump) {\n  auto message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"routes\"]();\n  const auto& route_config_dump =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::RoutesConfigDump&>(*message_ptr);\n\n  // No routes at all, no last_updated timestamp\n  envoy::admin::v3::RoutesConfigDump expected_route_config_dump;\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_route_configs:\ndynamic_route_configs:\n)EOF\",\n                            expected_route_config_dump);\n  EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump.DebugString());\n\n  const std::string config_yaml = R\"EOF(\nname: foo\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n)EOF\";\n\n  timeSystem().setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  // Only static route.\n  RouteConfigProviderPtr static_config =\n      route_config_provider_manager_->createStaticRouteConfigProvider(\n          parseRouteConfigurationFromV3Yaml(config_yaml), server_factory_context_,\n          validation_visitor_);\n  message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"routes\"]();\n  const auto& route_config_dump2 =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::RoutesConfigDump&>(*message_ptr);\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_route_configs:\n  - route_config:\n      \"@type\": type.googleapis.com/envoy.config.route.v3.RouteConfiguration\n      name: foo\n      virtual_hosts:\n        - name: bar\n          domains: [\"*\"]\n          routes:\n            - match: { prefix: \"/\" }\n              route: { cluster: baz }\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\ndynamic_route_configs:\n)EOF\",\n                            expected_route_config_dump);\n  EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump2.DebugString());\n\n  // Static + dynamic.\n  setup();\n  EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_,\n              start(_, _));\n  outer_init_manager_.initialize(init_watcher_);\n\n  const std::string response1_json = R\"EOF(\n{\n  \"version_info\": \"1\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\",\n      \"name\": \"foo_route_config\",\n      \"virtual_hosts\": null\n    }\n  ]\n}\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_json);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::route::v3::RouteConfiguration>(response1);\n\n  EXPECT_CALL(init_watcher_, ready());\n  rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n  message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"routes\"]();\n  const auto& route_config_dump3 =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::RoutesConfigDump&>(*message_ptr);\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_route_configs:\n  - route_config:\n      \"@type\": type.googleapis.com/envoy.config.route.v3.RouteConfiguration\n      name: foo\n      virtual_hosts:\n        - name: bar\n          domains: [\"*\"]\n          routes:\n            - match: { prefix: \"/\" }\n              route: { cluster: baz }\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\ndynamic_route_configs:\n  - version_info: \"1\"\n    route_config:\n      \"@type\": type.googleapis.com/envoy.config.route.v3.RouteConfiguration\n      name: foo_route_config\n      virtual_hosts:\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n)EOF\",\n                            expected_route_config_dump);\n  EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump3.DebugString());\n}\n\nTEST_F(RouteConfigProviderManagerImplTest, Basic) {\n  Buffer::OwnedImpl data;\n\n  // Get a RouteConfigProvider. This one should create an entry in the RouteConfigProviderManager.\n  setup();\n\n  EXPECT_FALSE(provider_->configInfo().has_value());\n\n  const auto route_config = parseRouteConfigurationFromV3Yaml(R\"EOF(\nname: foo_route_config\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n)EOF\");\n  const auto decoded_resources = TestUtility::decodeResources({route_config});\n\n  server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources.refvec_, \"1\");\n\n  RouteConfigProviderSharedPtr provider2 =\n      route_config_provider_manager_->createRdsRouteConfigProvider(\n          rds_, server_factory_context_, \"foo_prefix\", outer_init_manager_);\n\n  // provider2 should have route config immediately after create\n  EXPECT_TRUE(provider2->configInfo().has_value());\n\n  EXPECT_EQ(provider_, provider2) << \"fail to obtain the same rds config provider object\";\n\n  // So this means that both provider have same subscription.\n  EXPECT_EQ(&dynamic_cast<RdsRouteConfigProviderImpl&>(*provider_).subscription(),\n            &dynamic_cast<RdsRouteConfigProviderImpl&>(*provider2).subscription());\n  EXPECT_EQ(&provider_->configInfo().value().config_, &provider2->configInfo().value().config_);\n\n  envoy::extensions::filters::network::http_connection_manager::v3::Rds rds2;\n  rds2.set_route_config_name(\"foo_route_config\");\n  rds2.mutable_config_source()->set_path(\"bar_path\");\n  RouteConfigProviderSharedPtr provider3 =\n      route_config_provider_manager_->createRdsRouteConfigProvider(\n          rds2, server_factory_context_, \"foo_prefix\", outer_init_manager_);\n  EXPECT_NE(provider3, provider_);\n  server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources.refvec_, \"provider3\");\n  EXPECT_EQ(2UL,\n            route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs().size());\n\n  provider_.reset();\n  provider2.reset();\n\n  // All shared_ptrs to the provider pointed at by provider1, and provider2 have been deleted, so\n  // now we should only have the provider pointed at by provider3.\n  auto dynamic_route_configs =\n      route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs();\n  EXPECT_EQ(1UL, dynamic_route_configs.size());\n\n  // Make sure the left one is provider3\n  EXPECT_EQ(\"provider3\", dynamic_route_configs[0].version_info());\n\n  provider3.reset();\n\n  EXPECT_EQ(0UL,\n            route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs().size());\n}\n\nTEST_F(RouteConfigProviderManagerImplTest, SameProviderOnTwoInitManager) {\n  Buffer::OwnedImpl data;\n  // Get a RouteConfigProvider. This one should create an entry in the RouteConfigProviderManager.\n  setup();\n\n  EXPECT_FALSE(provider_->configInfo().has_value());\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> mock_factory_context2;\n\n  Init::WatcherImpl real_watcher(\"real\", []() {});\n  Init::ManagerImpl real_init_manager(\"real\");\n\n  RouteConfigProviderSharedPtr provider2 =\n      route_config_provider_manager_->createRdsRouteConfigProvider(rds_, mock_factory_context2,\n                                                                   \"foo_prefix\", real_init_manager);\n\n  EXPECT_FALSE(provider2->configInfo().has_value());\n\n  EXPECT_EQ(provider_, provider2) << \"fail to obtain the same rds config provider object\";\n  real_init_manager.initialize(real_watcher);\n  EXPECT_EQ(Init::Manager::State::Initializing, real_init_manager.state());\n\n  {\n    const auto route_config = parseRouteConfigurationFromV3Yaml(R\"EOF(\nname: foo_route_config\nvirtual_hosts:\n  - name: bar\n    domains: [\"*\"]\n    routes:\n      - match: { prefix: \"/\" }\n        route: { cluster: baz }\n)EOF\");\n    const auto decoded_resources = TestUtility::decodeResources({route_config});\n\n    server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n        decoded_resources.refvec_, \"1\");\n\n    EXPECT_TRUE(provider_->configInfo().has_value());\n    EXPECT_TRUE(provider2->configInfo().has_value());\n    EXPECT_EQ(Init::Manager::State::Initialized, real_init_manager.state());\n  }\n}\n\nTEST_F(RouteConfigProviderManagerImplTest, OnConfigUpdateEmpty) {\n  setup();\n  EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_,\n              start(_, _));\n  outer_init_manager_.initialize(init_watcher_);\n  EXPECT_CALL(init_watcher_, ready());\n  server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate({}, \"\");\n}\n\nTEST_F(RouteConfigProviderManagerImplTest, OnConfigUpdateWrongSize) {\n  setup();\n  EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_,\n              start(_, _));\n  outer_init_manager_.initialize(init_watcher_);\n  envoy::config::route::v3::RouteConfiguration route_config;\n  const auto decoded_resources = TestUtility::decodeResources({route_config, route_config});\n  EXPECT_CALL(init_watcher_, ready());\n  EXPECT_THROW_WITH_MESSAGE(\n      server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n          decoded_resources.refvec_, \"\"),\n      EnvoyException, \"Unexpected RDS resource length: 2\");\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/7939\nTEST_F(RouteConfigProviderManagerImplTest, ConfigDumpAfterConfigRejected) {\n  auto message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"routes\"]();\n  const auto& route_config_dump =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::RoutesConfigDump&>(*message_ptr);\n\n  // No routes at all, no last_updated timestamp\n  envoy::admin::v3::RoutesConfigDump expected_route_config_dump;\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_route_configs:\ndynamic_route_configs:\n)EOF\",\n                            expected_route_config_dump);\n  EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump.DebugString());\n\n  timeSystem().setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  // dynamic.\n  setup();\n  EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_,\n              start(_, _));\n  outer_init_manager_.initialize(init_watcher_);\n\n  const std::string response1_yaml = R\"EOF(\nversion_info: '1'\nresources:\n- \"@type\": type.googleapis.com/envoy.api.v2.RouteConfiguration\n  name: foo_route_config\n  virtual_hosts:\n  - name: integration\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/foo\"\n      route:\n        cluster_header: \":authority\"\n  - name: duplicate\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/foo\"\n      route:\n        cluster_header: \":authority\"\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_yaml);\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::route::v3::RouteConfiguration>(response1);\n\n  EXPECT_CALL(init_watcher_, ready());\n\n  EXPECT_THROW_WITH_MESSAGE(\n      rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()),\n      EnvoyException, \"Only a single wildcard domain is permitted in route foo_route_config\");\n\n  message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"routes\"]();\n  const auto& route_config_dump3 =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::RoutesConfigDump&>(*message_ptr);\n  TestUtility::loadFromYaml(R\"EOF(\nstatic_route_configs:\ndynamic_route_configs:\n)EOF\",\n                            expected_route_config_dump);\n  EXPECT_EQ(expected_route_config_dump.DebugString(), route_config_dump3.DebugString());\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/reset_header_parser_test.cc",
    "content": "#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/protocol.h\"\n#include \"envoy/json/json_object.h\"\n\n#include \"common/json/json_loader.h\"\n#include \"common/router/reset_header_parser.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nenvoy::config::route::v3::RetryPolicy::ResetHeader\nparseResetHeaderParserFromYaml(const std::string& yaml) {\n  envoy::config::route::v3::RetryPolicy::ResetHeader reset_header;\n  TestUtility::loadFromYaml(yaml, reset_header);\n  return reset_header;\n}\n\nTEST(ResetHeaderParserConstructorTest, FormatUnset) {\n  const std::string yaml = R\"EOF(\nname: retry-after\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  EXPECT_EQ(\"retry-after\", reset_header_parser.name().get());\n  EXPECT_EQ(ResetHeaderFormat::Seconds, reset_header_parser.format());\n}\n\nTEST(ResetHeaderParserConstructorTest, FormatSeconds) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: SECONDS\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  EXPECT_EQ(\"retry-after\", reset_header_parser.name().get());\n  EXPECT_EQ(ResetHeaderFormat::Seconds, reset_header_parser.format());\n}\n\nTEST(ResetHeaderParserConstructorTest, FormatUnixTimestamp) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: UNIX_TIMESTAMP\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  EXPECT_EQ(\"retry-after\", reset_header_parser.name().get());\n  EXPECT_EQ(ResetHeaderFormat::UnixTimestamp, reset_header_parser.format());\n}\n\nclass ResetHeaderParserParseIntervalTest : public testing::Test {\npublic:\n  ResetHeaderParserParseIntervalTest() {\n    const time_t known_date_time = 1000000000;\n    test_time_.setSystemTime(std::chrono::system_clock::from_time_t(known_date_time));\n  }\n\n  Event::SimulatedTimeSystem test_time_;\n};\n\nTEST_F(ResetHeaderParserParseIntervalTest, NoHeaderMatches) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: SECONDS\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n\n  EXPECT_EQ(absl::nullopt,\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesButUnsupportedFormatDate) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: SECONDS\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\"retry-after\", \"Fri, 17 Jul 2020 11:59:51 GMT\"}};\n\n  EXPECT_EQ(absl::nullopt,\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesButUnsupportedFormatFloat) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: SECONDS\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"retry-after\", \"2.5\"}};\n\n  EXPECT_EQ(absl::nullopt,\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesSupportedFormatSeconds) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: SECONDS\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"retry-after\", \"5\"}};\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(5000),\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesSupportedFormatSecondsCaseInsensitive) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: SECONDS\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"Retry-After\", \"5\"}};\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(5000),\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesButUnsupportedFormatTimestampFloat) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: UNIX_TIMESTAMP\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"retry-after\", \"1595320702.1234\"}};\n\n  EXPECT_EQ(absl::nullopt,\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesSupportedFormatTimestampButInThePast) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: UNIX_TIMESTAMP\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"retry-after\", \"999999999\"}};\n\n  EXPECT_EQ(absl::nullopt,\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesSupportedFormatTimestampEmptyInterval) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: UNIX_TIMESTAMP\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"retry-after\", \"1000000000\"}};\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(0),\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\nTEST_F(ResetHeaderParserParseIntervalTest, HeaderMatchesSupportedFormatTimestampNonEmptyInterval) {\n  const std::string yaml = R\"EOF(\nname: retry-after\nformat: UNIX_TIMESTAMP\n  )EOF\";\n\n  ResetHeaderParserImpl reset_header_parser =\n      ResetHeaderParserImpl(parseResetHeaderParserFromYaml(yaml));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"retry-after\", \"1000000007\"}};\n\n  EXPECT_EQ(absl::optional<std::chrono::milliseconds>(7000),\n            reset_header_parser.parseInterval(test_time_.timeSystem(), response_headers));\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/retry_state_impl_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/router/reset_header_parser.h\"\n#include \"common/router/retry_state_impl.h\"\n#include \"common/upstream/resource_manager_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nclass RouterRetryStateImplTest : public testing::Test {\npublic:\n  enum TestResourceType { Connection, Request, PendingRequest, Retry };\n\n  RouterRetryStateImplTest() : callback_([this]() -> void { callback_ready_.ready(); }) {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"upstream.use_retry\", 100))\n        .WillByDefault(Return(true));\n  }\n\n  void setup() {\n    Http::TestRequestHeaderMapImpl headers;\n    setup(headers);\n  }\n\n  void setup(Http::RequestHeaderMap& request_headers) {\n    state_ = RetryStateImpl::create(policy_, request_headers, cluster_, &virtual_cluster_, runtime_,\n                                    random_, dispatcher_, test_time_.timeSystem(),\n                                    Upstream::ResourcePriority::Default);\n  }\n\n  void expectTimerCreateAndEnable() {\n    retry_timer_ = new Event::MockTimer(&dispatcher_);\n    EXPECT_CALL(*retry_timer_, enableTimer(_, _));\n  }\n\n  void incrOutstandingResource(TestResourceType resource, uint32_t num) {\n    for (uint32_t i = 0; i < num; ++i) {\n      switch (resource) {\n      case TestResourceType::Retry:\n        cluster_.resourceManager(Upstream::ResourcePriority::Default).retries().inc();\n        resource_manager_cleanup_tasks_.emplace_back([this]() {\n          cluster_.resourceManager(Upstream::ResourcePriority::Default).retries().dec();\n        });\n        break;\n      case TestResourceType::Connection:\n        cluster_.resourceManager(Upstream::ResourcePriority::Default).connections().inc();\n        resource_manager_cleanup_tasks_.emplace_back([this]() {\n          cluster_.resourceManager(Upstream::ResourcePriority::Default).connections().dec();\n        });\n        break;\n      case TestResourceType::Request:\n        cluster_.resourceManager(Upstream::ResourcePriority::Default).requests().inc();\n        resource_manager_cleanup_tasks_.emplace_back([this]() {\n          cluster_.resourceManager(Upstream::ResourcePriority::Default).requests().dec();\n        });\n        break;\n      case TestResourceType::PendingRequest:\n        cluster_.resourceManager(Upstream::ResourcePriority::Default).pendingRequests().inc();\n        resource_manager_cleanup_tasks_.emplace_back([this]() {\n          cluster_.resourceManager(Upstream::ResourcePriority::Default).pendingRequests().dec();\n        });\n        break;\n      }\n    }\n  }\n\n  void cleanupOutstandingResources() {\n    for (auto& task : resource_manager_cleanup_tasks_) {\n      task();\n    }\n    resource_manager_cleanup_tasks_.clear();\n  }\n\n  void verifyPolicyWithRemoteResponse(const std::string& retry_on,\n                                      const std::string& response_status, const bool is_grpc) {\n    Http::TestRequestHeaderMapImpl request_headers;\n    if (is_grpc) {\n      request_headers.setEnvoyRetryGrpcOn(retry_on);\n    } else {\n      request_headers.setEnvoyRetryOn(retry_on);\n    }\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers;\n    if (is_grpc) {\n      response_headers.setStatus(\"200\");\n      response_headers.setGrpcStatus(response_status);\n    } else {\n      response_headers.setStatus(response_status);\n    }\n\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n    EXPECT_CALL(callback_ready_, ready());\n    retry_timer_->invokeCallback();\n\n    EXPECT_EQ(RetryStatus::NoRetryLimitExceeded,\n              state_->shouldRetryHeaders(response_headers, callback_));\n\n    EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n    EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n    EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value());\n    EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n  }\n\n  void TearDown() override { cleanupOutstandingResources(); }\n\n  Event::SimulatedTimeSystem test_time_;\n  NiceMock<TestRetryPolicy> policy_;\n  NiceMock<Upstream::MockClusterInfo> cluster_;\n  TestVirtualCluster virtual_cluster_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Event::MockDispatcher dispatcher_;\n  Event::MockTimer* retry_timer_{};\n  RetryStatePtr state_;\n  ReadyWatcher callback_ready_;\n  RetryState::DoRetryCallback callback_;\n  std::vector<std::function<void()>> resource_manager_cleanup_tasks_;\n\n  const Http::StreamResetReason remote_reset_{Http::StreamResetReason::RemoteReset};\n  const Http::StreamResetReason remote_refused_stream_reset_{\n      Http::StreamResetReason::RemoteRefusedStreamReset};\n  const Http::StreamResetReason overflow_reset_{Http::StreamResetReason::Overflow};\n  const Http::StreamResetReason connect_failure_{Http::StreamResetReason::ConnectionFailure};\n};\n\nTEST_F(RouterRetryStateImplTest, PolicyNoneRemoteReset) {\n  Http::TestRequestHeaderMapImpl request_headers;\n  setup(request_headers);\n  EXPECT_EQ(nullptr, state_);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyRefusedStream) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"refused-stream\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(remote_refused_stream_reset_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded,\n            state_->shouldRetryReset(remote_refused_stream_reset_, callback_));\n\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, Policy5xxResetOverflow) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryReset(overflow_reset_, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, Policy5xxRemoteReset) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(remote_reset_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_));\n\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, Policy5xxRemote503) {\n  verifyPolicyWithRemoteResponse(\"5xx\" /* retry_on */, \"503\" /* response_status */,\n                                 false /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, Policy5xxRemote503Overloaded) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"503\"},\n                                                   {\"x-envoy-overloaded\", \"true\"}};\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyResourceExhaustedRemoteRateLimited) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-grpc-on\", \"resource-exhausted\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"grpc-status\", \"8\"}, {\"x-envoy-ratelimited\", \"true\"}};\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyEnvoyRateLimitedRemoteRateLimited) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"envoy-ratelimited\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"429\"},\n                                                   {\"x-envoy-ratelimited\", \"true\"}};\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded,\n            state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote502) {\n  verifyPolicyWithRemoteResponse(\"gateway-error\" /* retry_on */, \"502\" /* response_status */,\n                                 false /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote503) {\n  verifyPolicyWithRemoteResponse(\"gateway-error\" /* retry_on */, \"503\" /* response_status */,\n                                 false /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote504) {\n  verifyPolicyWithRemoteResponse(\"gateway-error\" /* retry_on */, \"504\" /* response_status */,\n                                 false /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGatewayErrorResetOverflow) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"gateway-error\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryReset(overflow_reset_, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemoteReset) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"gateway-error\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(remote_reset_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_));\n\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGrpcCancelled) {\n  verifyPolicyWithRemoteResponse(\"cancelled\" /* retry_on */, \"1\" /* response_status */,\n                                 true /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGrpcDeadlineExceeded) {\n  verifyPolicyWithRemoteResponse(\"deadline-exceeded\" /* retry_on */, \"4\" /* response_status */,\n                                 true /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGrpcResourceExhausted) {\n  verifyPolicyWithRemoteResponse(\"resource-exhausted\" /* retry_on */, \"8\" /* response_status */,\n                                 true /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGrpcUnavilable) {\n  verifyPolicyWithRemoteResponse(\"unavailable\" /* retry_on */, \"14\" /* response_status */,\n                                 true /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyGrpcInternal) {\n  verifyPolicyWithRemoteResponse(\"internal\" /* retry_on */, \"13\" /* response_status */,\n                                 true /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, Policy5xxRemote200RemoteReset) {\n  // Don't retry after reply start.\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(remote_reset_, callback_));\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_));\n\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, RuntimeGuard) {\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.use_retry\", 100))\n      .WillOnce(Return(false));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryReset(remote_reset_, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyConnectFailureOtherReset) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"connect-failure\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryReset(remote_reset_, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyConnectFailureResetConnectFailure) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"connect-failure\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyRetriable4xxRetry) {\n  verifyPolicyWithRemoteResponse(\"retriable-4xx\", \"409\", false /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyRetriable4xxNoRetry) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"retriable-4xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"400\"}};\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyRetriable4xxReset) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"retriable-4xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryReset(remote_reset_, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, RetriableStatusCodes) {\n  policy_.retriable_status_codes_.push_back(409);\n  verifyPolicyWithRemoteResponse(\"retriable-status-codes\", \"409\", false /* is_grpc */);\n}\n\nTEST_F(RouterRetryStateImplTest, RetriableStatusCodesUpstreamReset) {\n  policy_.retriable_status_codes_.push_back(409);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"retriable-status-codes\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryReset(remote_reset_, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, RetriableStatusCodesHeader) {\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"retriable-status-codes\"},\n                                                   {\"x-envoy-retriable-status-codes\", \"200\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    expectTimerCreateAndEnable();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"retriable-status-codes\"},\n                                                   {\"x-envoy-retriable-status-codes\", \"418,200\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    expectTimerCreateAndEnable();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retry-on\", \"retriable-status-codes\"},\n        {\"x-envoy-retriable-status-codes\", \"   418 junk,200\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    expectTimerCreateAndEnable();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retry-on\", \"retriable-status-codes\"},\n        {\"x-envoy-retriable-status-codes\", \"   418 junk,xxx200\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n}\n\n// Test that when 'retriable-headers' policy is set via request header, certain configured headers\n// trigger retries.\nTEST_F(RouterRetryStateImplTest, RetriableHeadersPolicySetViaRequestHeader) {\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_5XX;\n\n  Protobuf::RepeatedPtrField<envoy::config::route::v3::HeaderMatcher> matchers;\n  auto* matcher = matchers.Add();\n  matcher->set_name(\"X-Upstream-Pushback\");\n\n  policy_.retriable_headers_ = Http::HeaderUtility::buildHeaderMatcherVector(matchers);\n\n  // No retries based on response headers: retry mode isn't enabled.\n  {\n    Http::TestRequestHeaderMapImpl request_headers;\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                     {\"x-upstream-pushback\", \"true\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  // Retries based on response headers: retry mode enabled via request header.\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"retriable-headers\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n    expectTimerCreateAndEnable();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                     {\"x-upstream-pushback\", \"true\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n}\n\n// Test that when 'retriable-headers' policy is set via retry policy configuration,\n// configured header matcher conditions trigger retries.\nTEST_F(RouterRetryStateImplTest, RetriableHeadersPolicyViaRetryPolicyConfiguration) {\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_RETRIABLE_HEADERS;\n\n  Protobuf::RepeatedPtrField<envoy::config::route::v3::HeaderMatcher> matchers;\n\n  auto* matcher1 = matchers.Add();\n  matcher1->set_name(\"X-Upstream-Pushback\");\n\n  auto* matcher2 = matchers.Add();\n  matcher2->set_name(\"should-retry\");\n  matcher2->set_exact_match(\"yes\");\n\n  auto* matcher3 = matchers.Add();\n  matcher3->set_name(\"X-Verdict\");\n  matcher3->set_prefix_match(\"retry\");\n\n  auto* matcher4 = matchers.Add();\n  matcher4->set_name(\":status\");\n  matcher4->mutable_range_match()->set_start(500);\n  matcher4->mutable_range_match()->set_end(505);\n\n  policy_.retriable_headers_ = Http::HeaderUtility::buildHeaderMatcherVector(matchers);\n\n  auto setup_request = [this]() {\n    Http::TestRequestHeaderMapImpl request_headers;\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n  };\n\n  // matcher1: header presence (any value).\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    expectTimerCreateAndEnable();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                     {\"x-upstream-pushback\", \"true\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    expectTimerCreateAndEnable();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                     {\"x-upstream-pushback\", \"false\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  // matcher2: exact header value match.\n  {\n    setup_request();\n    expectTimerCreateAndEnable();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"should-retry\", \"yes\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"should-retry\", \"no\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  // matcher3: prefix match.\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                     {\"x-verdict\", \"retry-please\"}};\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                     {\"x-verdict\", \"dont-retry-please\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  // matcher4: status code range (note half-open semantics: [start, end)).\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"499\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"503\"}};\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"504\"}};\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    setup_request();\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"505\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n}\n\n// Test various combinations of retry headers set via request headers.\nTEST_F(RouterRetryStateImplTest, RetriableHeadersSetViaRequestHeader) {\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retry-on\", \"retriable-headers\"},\n        {\"x-envoy-retriable-header-names\", \"X-Upstream-Pushback,FOOBAR\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    expectTimerCreateAndEnable();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\"x-upstream-pushback\", \"yes\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retry-on\", \"retriable-headers\"},\n        {\"x-envoy-retriable-header-names\", \"X-Upstream-Pushback,  FOOBAR  \"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    expectTimerCreateAndEnable();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\"foobar\", \"false\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retry-on\", \"retriable-headers\"},\n        {\"x-envoy-retriable-header-names\", \"X-Upstream-Pushback,,FOOBAR\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n}\n\n// Test merging retriable headers set via request headers and via config file.\nTEST_F(RouterRetryStateImplTest, RetriableHeadersMergedConfigAndRequestHeaders) {\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_RETRIABLE_HEADERS;\n\n  Protobuf::RepeatedPtrField<envoy::config::route::v3::HeaderMatcher> matchers;\n\n  // Config says: retry if response is not 200.\n  auto* matcher = matchers.Add();\n  matcher->set_name(\":status\");\n  matcher->set_exact_match(\"200\");\n  matcher->set_invert_match(true);\n\n  policy_.retriable_headers_ = Http::HeaderUtility::buildHeaderMatcherVector(matchers);\n\n  // No retries according to config.\n  {\n    Http::TestRequestHeaderMapImpl request_headers;\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  // Request header supplements the config: as a result we retry on 200.\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retriable-header-names\", \"  :status,  FOOBAR  \"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    expectTimerCreateAndEnable();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyResetRemoteReset) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"reset\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(remote_reset_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_));\n\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, PolicyLimitedByRequestHeaders) {\n  Protobuf::RepeatedPtrField<envoy::config::route::v3::HeaderMatcher> matchers;\n  auto* matcher = matchers.Add();\n  matcher->set_name(\":method\");\n  matcher->set_exact_match(\"GET\");\n\n  auto* matcher2 = matchers.Add();\n  matcher2->set_name(\":method\");\n  matcher2->set_exact_match(\"HEAD\");\n\n  policy_.retriable_request_headers_ = Http::HeaderUtility::buildHeaderMatcherVector(matchers);\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_FALSE(state_->enabled());\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                   {\"x-envoy-retry-on\", \"retriable-4xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"409\"}};\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"HEAD\"},\n                                                   {\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n    expectTimerCreateAndEnable();\n    EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  // Sanity check that we're only enabling retries for the configured retry-on.\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"HEAD\"},\n                                                   {\"x-envoy-retry-on\", \"retriable-4xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n    EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                   {\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_FALSE(state_->enabled());\n  }\n}\n\nTEST_F(RouterRetryStateImplTest, RouteConfigNoRetriesAllowed) {\n  policy_.num_retries_ = 0;\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE;\n  setup();\n\n  EXPECT_TRUE(state_->enabled());\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded,\n            state_->shouldRetryReset(connect_failure_, callback_));\n\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, RouteConfigNoHeaderConfig) {\n  policy_.num_retries_ = 1;\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE;\n  Http::TestRequestHeaderMapImpl request_headers;\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n}\n\nTEST_F(RouterRetryStateImplTest, NoAvailableRetries) {\n  cluster_.resetResourceManager(0, 0, 0, 0, 0);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"connect-failure\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  EXPECT_EQ(RetryStatus::NoOverflow, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_overflow_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_overflow_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, MaxRetriesHeader) {\n  // The max retries header will take precedence over the policy\n  policy_.num_retries_ = 4;\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"connect-failure\"},\n                                                 {\"x-envoy-retry-grpc-on\", \"cancelled\"},\n                                                 {\"x-envoy-max-retries\", \"3\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_EQ(1UL, cluster_.circuit_breakers_stats_.rq_retry_open_.value());\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded,\n            state_->shouldRetryReset(connect_failure_, callback_));\n\n  EXPECT_EQ(3UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_success_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(3UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_success_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, Backoff) {\n  policy_.num_retries_ = 3;\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE;\n  Http::TestRequestHeaderMapImpl request_headers;\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(190));\n  retry_timer_ = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(15), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(190));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(40), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(190));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(90), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_));\n\n  EXPECT_EQ(3UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_success_.value());\n  EXPECT_EQ(3UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_success_.value());\n  EXPECT_EQ(0UL, cluster_.circuit_breakers_stats_.rq_retry_open_.value());\n}\n\n// Test customized retry back-off intervals.\nTEST_F(RouterRetryStateImplTest, CustomBackOffInterval) {\n  policy_.num_retries_ = 10;\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE;\n  policy_.base_interval_ = std::chrono::milliseconds(100);\n  policy_.max_interval_ = std::chrono::milliseconds(1200);\n  Http::TestRequestHeaderMapImpl request_headers;\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(149));\n  retry_timer_ = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(49), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(350));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(150), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(751));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(351), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(2399));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(799), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(2399));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(1199), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n}\n\n// Test the default maximum retry back-off interval.\nTEST_F(RouterRetryStateImplTest, CustomBackOffIntervalDefaultMax) {\n  policy_.num_retries_ = 10;\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE;\n  policy_.base_interval_ = std::chrono::milliseconds(100);\n  Http::TestRequestHeaderMapImpl request_headers;\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(149));\n  retry_timer_ = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(49), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(350));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(150), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(751));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(351), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(2999));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(599), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(2999));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(999), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n}\n\nTEST_F(RouterRetryStateImplTest, ParseRateLimitedResetInterval) {\n  // Set a fixed system time to be used for all these tests\n  const time_t known_date_time = 1000000000;\n  test_time_.setSystemTime(std::chrono::system_clock::from_time_t(known_date_time));\n\n  Protobuf::RepeatedPtrField<envoy::config::route::v3::RetryPolicy::ResetHeader> reset_headers;\n  auto* reset_header_1 = reset_headers.Add();\n  reset_header_1->set_name(\"Retry-After\");\n  reset_header_1->set_format(envoy::config::route::v3::RetryPolicy::SECONDS);\n\n  auto* reset_header_2 = reset_headers.Add();\n  reset_header_2->set_name(\"X-RateLimit-Reset\");\n  reset_header_2->set_format(envoy::config::route::v3::RetryPolicy::UNIX_TIMESTAMP);\n\n  policy_.reset_headers_ = ResetHeaderParserImpl::buildResetHeaderParserVector(reset_headers);\n\n  // Failure case: Matches reset header (seconds) but exceeds max_interval (>5min)\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"429\"}, {\"Retry-After\", \"301\"}};\n    EXPECT_EQ(absl::nullopt, state_->parseResetInterval(response_headers));\n  }\n\n  // Failure case: Matches reset header (timestamp) but exceeds max_interval (>5min)\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"429\"},\n                                                     {\"X-RateLimit-Reset\", \"1000000301\"}};\n    EXPECT_EQ(absl::nullopt, state_->parseResetInterval(response_headers));\n  }\n\n  // The only reset header matches (seconds) and the header value is in within range\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"429\"}, {\"Retry-After\", \"300\"}};\n    EXPECT_EQ(absl::optional<std::chrono::milliseconds>(300000),\n              state_->parseResetInterval(response_headers));\n  }\n\n  // The only reset header matches (timestamp) and the header value is in within range\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"429\"},\n                                                     {\"x-ratelimit-reset\", \"1000000300\"}};\n    EXPECT_EQ(absl::optional<std::chrono::milliseconds>(300000),\n              state_->parseResetInterval(response_headers));\n  }\n\n  // The second (timestamp) and third (seconds) reset headers match but Retry-After comes first in\n  // reset_headers so it is used\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    Http::TestResponseHeaderMapImpl response_headers{\n        {\":status\", \"429\"}, {\"x-ratelimit-reset\", \"1000000002\"}, {\"retry-after\", \"3\"}};\n    EXPECT_EQ(absl::optional<std::chrono::milliseconds>(3000),\n              state_->parseResetInterval(response_headers));\n  }\n}\n\nTEST_F(RouterRetryStateImplTest, RateLimitedRetryBackoffStrategy) {\n  Protobuf::RepeatedPtrField<envoy::config::route::v3::RetryPolicy::ResetHeader> reset_headers;\n  auto* reset_header = reset_headers.Add();\n  reset_header->set_name(\"Retry-After\");\n  reset_header->set_format(envoy::config::route::v3::RetryPolicy::SECONDS);\n\n  policy_.num_retries_ = 4;\n  policy_.reset_headers_ = ResetHeaderParserImpl::buildResetHeaderParserVector(reset_headers);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  retry_timer_ = new Event::MockTimer(&dispatcher_);\n  Http::TestResponseHeaderMapImpl response_headers_reset_1{{\":status\", \"500\"},\n                                                           {\"retry-after\", \"2\"}};\n  Http::TestResponseHeaderMapImpl response_headers_plain{{\":status\", \"500\"}};\n  Http::TestResponseHeaderMapImpl response_headers_reset_2{{\":status\", \"500\"},\n                                                           {\"retry-after\", \"5\"}};\n  Http::TestResponseHeaderMapImpl response_headers_reset_invalid{{\":status\", \"500\"},\n                                                                 {\"retry-after\", \"0\"}};\n\n  // reset header present -> ratelimit backoff used\n  EXPECT_CALL(random_, random()).WillOnce(Return(190));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(2190), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers_reset_1, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  // reset header not present -> exponential backoff used\n  EXPECT_CALL(random_, random()).WillOnce(Return(190));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(15), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers_plain, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  // reset header present -> ratelimit backoff used\n  EXPECT_CALL(random_, random()).WillOnce(Return(2190));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(7190), _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers_reset_2, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  // reset header present but invalid -> exponential backoff used\n  EXPECT_CALL(random_, random()).WillOnce(Return(190));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(40), _));\n  EXPECT_EQ(RetryStatus::Yes,\n            state_->shouldRetryHeaders(response_headers_reset_invalid, callback_));\n  EXPECT_CALL(callback_ready_, ready());\n  retry_timer_->invokeCallback();\n\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded,\n            state_->shouldRetryHeaders(response_headers_reset_2, callback_));\n\n  EXPECT_EQ(2UL, cluster_.stats().upstream_rq_retry_backoff_ratelimited_.value());\n  EXPECT_EQ(2UL, cluster_.stats().upstream_rq_retry_backoff_exponential_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, HostSelectionAttempts) {\n  policy_.host_selection_max_attempts_ = 2;\n  policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE;\n\n  setup();\n\n  EXPECT_EQ(2, state_->hostSelectionMaxAttempts());\n}\n\nTEST_F(RouterRetryStateImplTest, Cancel) {\n  // Cover the case where we start a retry, and then we get destructed. This is how the router\n  // uses the implementation in the cancel case.\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"connect-failure\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, ZeroMaxRetriesHeader) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"connect-failure\"},\n                                                 {\"x-envoy-retry-grpc-on\", \"cancelled\"},\n                                                 {\"x-envoy-max-retries\", \"0\"}};\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  EXPECT_EQ(RetryStatus::NoRetryLimitExceeded,\n            state_->shouldRetryReset(connect_failure_, callback_));\n\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\n// Check that if there are 0 remaining retries available but we get\n// non-retriable headers, we return No rather than NoRetryLimitExceeded.\nTEST_F(RouterRetryStateImplTest, NoPreferredOverLimitExceeded) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"},\n                                                 {\"x-envoy-max-retries\", \"1\"}};\n  setup(request_headers);\n\n  Http::TestResponseHeaderMapImpl bad_response_headers{{\":status\", \"503\"}};\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(bad_response_headers, callback_));\n\n  Http::TestResponseHeaderMapImpl good_response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(good_response_headers, callback_));\n\n  EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value());\n  EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value());\n  EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value());\n}\n\nTEST_F(RouterRetryStateImplTest, BudgetAvailableRetries) {\n  // Expect no available retries from resource manager and override the max_retries CB via retry\n  // budget. As configured, there are no allowed retries via max_retries CB.\n  cluster_.resetResourceManagerWithRetryBudget(\n      0 /* cx */, 0 /* rq_pending */, 0 /* rq */, 0 /* rq_retry */, 0 /* conn_pool */,\n      20.0 /* budget_percent */, 3 /* min_retry_concurrency */);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  expectTimerCreateAndEnable();\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, BudgetNoAvailableRetries) {\n  // Expect no available retries from resource manager. Override the max_retries CB via a retry\n  // budget that won't let any retries. As configured, there are 5 allowed retries via max_retries\n  // CB.\n  cluster_.resetResourceManagerWithRetryBudget(\n      0 /* cx */, 0 /* rq_pending */, 20 /* rq */, 5 /* rq_retry */, 0 /* conn_pool */,\n      0 /* budget_percent */, 0 /* min_retry_concurrency */);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n  EXPECT_EQ(RetryStatus::NoOverflow, state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, BudgetVerifyMinimumConcurrency) {\n  // Expect no available retries from resource manager.\n  cluster_.resetResourceManagerWithRetryBudget(\n      0 /* cx */, 0 /* rq_pending */, 0 /* rq */, 0 /* rq_retry */, 0 /* conn_pool */,\n      20.0 /* budget_percent */, 3 /* min_retry_concurrency */);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"},\n                                                 {\"x-envoy-max-retries\", \"42\"}};\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  // Load up 2 outstanding retries and verify the 3rd one is allowed when there are no outstanding\n  // requests. This verifies the minimum allowed outstanding retries before the budget is scaled\n  // with the request concurrency.\n  incrOutstandingResource(TestResourceType::Retry, 2);\n\n  expectTimerCreateAndEnable();\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n\n  // 3 outstanding retries.\n  incrOutstandingResource(TestResourceType::Retry, 1);\n\n  EXPECT_EQ(RetryStatus::NoOverflow, state_->shouldRetryHeaders(response_headers, callback_));\n\n  incrOutstandingResource(TestResourceType::Request, 20);\n\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n\n  // 4 outstanding retries.\n  incrOutstandingResource(TestResourceType::Retry, 1);\n\n  EXPECT_EQ(RetryStatus::NoOverflow, state_->shouldRetryHeaders(response_headers, callback_));\n\n  // Override via runtime and expect successful retry.\n  std::string value(\"100\");\n  EXPECT_CALL(cluster_.runtime_.snapshot_, get(\"fake_clusterretry_budget.budget_percent\"))\n      .WillRepeatedly(Return(value));\n  EXPECT_CALL(cluster_.runtime_.snapshot_, getDouble(\"fake_clusterretry_budget.budget_percent\", _))\n      .WillRepeatedly(Return(100.0));\n\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _));\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, BudgetRuntimeSetOnly) {\n  // Expect no available retries from resource manager, so no retries allowed according to\n  // max_retries CB. Don't configure retry budgets. We'll rely on runtime config only.\n  cluster_.resetResourceManager(0 /* cx */, 0 /* rq_pending */, 0 /* rq */, 0 /* rq_retry */,\n                                0 /* conn_pool */);\n\n  std::string value(\"20\");\n  EXPECT_CALL(cluster_.runtime_.snapshot_, get(\"fake_clusterretry_budget.min_retry_concurrency\"))\n      .WillRepeatedly(Return(value));\n  EXPECT_CALL(cluster_.runtime_.snapshot_, get(\"fake_clusterretry_budget.budget_percent\"))\n      .WillRepeatedly(Return(value));\n  EXPECT_CALL(cluster_.runtime_.snapshot_, getDouble(\"fake_clusterretry_budget.budget_percent\", _))\n      .WillRepeatedly(Return(20.0));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-retry-on\", \"5xx\"}};\n\n  setup(request_headers);\n  EXPECT_TRUE(state_->enabled());\n\n  incrOutstandingResource(TestResourceType::Retry, 2);\n\n  expectTimerCreateAndEnable();\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n  EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_));\n}\n\nTEST_F(RouterRetryStateImplTest, ParseRetryOn) {\n  // RETRY_ON_5XX             0x1\n  // RETRY_ON_GATEWAY_ERROR   0x2\n  // RETRY_ON_CONNECT_FAILURE 0x4\n  std::string config = \"5xx,gateway-error,connect-failure\";\n  auto result = RetryStateImpl::parseRetryOn(config);\n  EXPECT_EQ(result.first, 7);\n  EXPECT_TRUE(result.second);\n\n  config = \"xxx,gateway-error,connect-failure\";\n  result = RetryStateImpl::parseRetryOn(config);\n  EXPECT_EQ(result.first, 6);\n  EXPECT_FALSE(result.second);\n\n  config = \" 5xx,gateway-error ,  connect-failure   \";\n  result = RetryStateImpl::parseRetryOn(config);\n  EXPECT_EQ(result.first, 7);\n  EXPECT_TRUE(result.second);\n\n  config = \" 5 xx,gateway-error ,  connect-failure   \";\n  result = RetryStateImpl::parseRetryOn(config);\n  EXPECT_EQ(result.first, 6);\n  EXPECT_FALSE(result.second);\n}\n\nTEST_F(RouterRetryStateImplTest, ParseRetryGrpcOn) {\n  // RETRY_ON_GRPC_CANCELLED             0x20\n  // RETRY_ON_GRPC_DEADLINE_EXCEEDED     0x40\n  // RETRY_ON_GRPC_RESOURCE_EXHAUSTED    0x80\n  std::string config = \"cancelled,deadline-exceeded,resource-exhausted\";\n  auto result = RetryStateImpl::parseRetryGrpcOn(config);\n  EXPECT_EQ(result.first, 224);\n  EXPECT_TRUE(result.second);\n\n  config = \"cancelled,deadline-exceeded,resource-exhaust\";\n  result = RetryStateImpl::parseRetryGrpcOn(config);\n  EXPECT_EQ(result.first, 96);\n  EXPECT_FALSE(result.second);\n\n  config = \"   cancelled,deadline-exceeded   ,   resource-exhausted   \";\n  result = RetryStateImpl::parseRetryGrpcOn(config);\n  EXPECT_EQ(result.first, 224);\n  EXPECT_TRUE(result.second);\n\n  config = \"   cancelled,deadline-exceeded   ,   resource- exhausted   \";\n  result = RetryStateImpl::parseRetryGrpcOn(config);\n  EXPECT_EQ(result.first, 96);\n  EXPECT_FALSE(result.second);\n}\n\nTEST_F(RouterRetryStateImplTest, RemoveAllRetryHeaders) {\n  // Make sure retry related headers are removed when the policy is enabled.\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retry-on\", \"5xx,retriable-header-names,retriable-status-codes\"},\n        {\"x-envoy-retry-grpc-on\", \"resource-exhausted\"},\n        {\"x-envoy-retriable-header-names\", \"X-Upstream-Pushback\"},\n        {\"x-envoy-retriable-status-codes\", \"418,420\"},\n        {\"x-envoy-max-retries\", \"7\"},\n        {\"x-envoy-hedge-on-per-try-timeout\", \"true\"},\n        {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"2\"},\n    };\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-grpc-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-max-retries\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retriable-header-names\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retriable-status-codes\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-hedge-on-per-try-timeout\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n  }\n\n  // Make sure retry related headers are removed even if the policy is disabled.\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retriable-header-names\", \"X-Upstream-Pushback\"},\n        {\"x-envoy-retriable-status-codes\", \"418,420\"},\n        {\"x-envoy-max-retries\", \"7\"},\n        {\"x-envoy-hedge-on-per-try-timeout\", \"true\"},\n        {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"2\"},\n    };\n    setup(request_headers);\n    EXPECT_EQ(nullptr, state_);\n\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-grpc-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-max-retries\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retriable-header-names\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retriable-status-codes\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-hedge-on-per-try-timeout\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n  }\n\n  // Repeat policy is enabled case with runtime flag disabled.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.consume_all_retry_headers\", \"false\"}});\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retry-on\", \"5xx,retriable-header-names,retriable-status-codes\"},\n        {\"x-envoy-retry-grpc-on\", \"resource-exhausted\"},\n        {\"x-envoy-retriable-header-names\", \"X-Upstream-Pushback\"},\n        {\"x-envoy-retriable-status-codes\", \"418,420\"},\n        {\"x-envoy-max-retries\", \"7\"},\n        {\"x-envoy-hedge-on-per-try-timeout\", \"true\"},\n        {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"2\"},\n    };\n    setup(request_headers);\n    EXPECT_TRUE(state_->enabled());\n\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-grpc-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-max-retries\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-retriable-header-names\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-retriable-status-codes\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-hedge-on-per-try-timeout\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n  }\n\n  // Repeat policy is disabled case with runtime flag disabled.\n  {\n    TestScopedRuntime scoped_runtime;\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.consume_all_retry_headers\", \"false\"}});\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"x-envoy-retriable-header-names\", \"X-Upstream-Pushback\"},\n        {\"x-envoy-retriable-status-codes\", \"418,420\"},\n        {\"x-envoy-max-retries\", \"7\"},\n        {\"x-envoy-hedge-on-per-try-timeout\", \"true\"},\n        {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"2\"},\n    };\n    setup(request_headers);\n    EXPECT_EQ(nullptr, state_);\n\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-retry-grpc-on\"));\n    EXPECT_FALSE(request_headers.has(\"x-envoy-max-retries\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-retriable-header-names\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-retriable-status-codes\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-hedge-on-per-try-timeout\"));\n    EXPECT_TRUE(request_headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n  }\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/route_corpus/Response_headers_to_remove",
    "content": "config {\n  virtual_hosts {\n    name: \"j\"\n    domains: \"$\"\n    routes {\n      match {\n        path: \")\"\n      }\n      route {\n        weighted_clusters {\n          clusters {\n            name: \"$$\"\n            weight {\n              value: 870\n            }\n            metadata_match {\n              filter_metadata {\n                key: \"envoy.lb\"\n                value {\n                }\n              }\n            }\n            request_headers_to_remove: \"&\"\n          }\n          clusters {\n            name: \"$\"\n            weight {\n              value: 1868759072\n            }\n            metadata_match {\n              filter_metadata {\n                key: \"envoy.lb\"\n                value {\n                  fields {\n                    key: \"\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"  \"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \")\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"-209\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"1\"\n                    value {\n                      string_value: \"z\"\n                    }\n                  }\n                  fields {\n                    key: \"2\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"5\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"8\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"@\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"Q\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"]\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"^\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"i\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"j\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"p\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"x\"\n                    value {\n                    }\n                  }\n                }\n              }\n            }\n          }\n          runtime_key_prefix: \"\\177\"\n        }\n        metadata_match {\n          filter_metadata {\n            key: \"envoy.lb\"\n            value {\n              fields {\n                key: \"9\"\n                value {\n                }\n              }\n            }\n          }\n        }\n        upgrade_configs {\n        }\n      }\n    }\n    response_headers_to_remove: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n    include_attempt_count_in_response: true\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4592245302362112",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \" \"     routes {       match {         prefix: \"\"         grpc {         }       }       route {         cluster: \" \"       }       response_headers_to_remove: \"    \"     }     routes {       match {         path: \" \"       }       route {         cluster: \" \"       }       request_headers_to_add {         header {           key: \"       0\\000\\000\\000\\000\\000\\000\"\n        }\n      }\n      response_headers_to_remove: \"J\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n      }\n      route {\n        cluster: \"#\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"\"\n      }\n      route {\n        cluster: \"W\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"\"\n      }\n      redirect {\n      }\n    }\n    routes {\n      match {\n        path: \"?\"\n      }\n      route {\n        cluster: \"2\"\n      }\n    }\n    routes {\n      match {\n        path: \"/\"\n      }\n      redirect {\n      }\n    }\n    routes {\n      match {\n        path: \"\"\n      }\n      route {\n        cluster: \"$\"\n      }\n    }\n    routes {\n      match {\n        path: \"?\"\n      }\n      route {\n        cluster: \"W\"\n      }\n    }\n    routes {\n      match {\n        path: \"\"\n      }\n      route {\n        cluster: \"n\"\n      }\n    }\n    routes {\n      match {\n        path: \"\"\n      }\n      route {\n        cluster_header: \"J\"\n      }\n    }\n    routes {\n      match {\n        path: \"W\"\n      }\n      route {\n        cluster: \"$\"\n      }\n      response_headers_to_remove: \"\\022\"\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        path: \"?\"\n      }\n      route {\n        cluster: \"\\037\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"\"\n      }\n      redirect {\n      }\n    }\n    routes {\n      match {\n        path: \"?\"\n      }\n      redirect {\n      }\n    }\n    routes {\n      match {\n        prefix: \"\"\n      }\n      route {\n        cluster: \"$\"\n      }\n      response_headers_to_remove: \"\"\n    }\n  }\n  vhds {\n    config_source {\n      path: \"2\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz",
    "content": "config {\n  virtual_hosts {\n    name: \"&\\006\\000\\000\\000\"\n    domains: \"-\"\n    require_tls: ALL\n    response_headers_to_remove: \"\\0Ï3\\022\\362\\211\\245\\247V\\036\"\n    request_headers_to_remove: \"\\003\\022\\360\\234\\254\\265V\\036\"\n  }\n}\nrandom_value: 67070975\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz",
    "content": "config {\n  virtual_hosts {\n    name: \"/\"\n    domains: \"\"\n    domains: \"\"\n    domains: \"*\"\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n      name: \"J\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n            value: true\n          }\n        }\n      }\n      response_headers_to_remove: \"\\021\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\25537\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"W\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\020\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n            value: true\n          }\n        }\n      }\n      response_headers_to_remove: \"\\021\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n      filter_action {\n      }\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n            value: true\n          }\n        }\n      }\n      response_headers_to_remove: \"\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n            value: true\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n        }\n      }\n      response_headers_to_remove: \"\\021\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\001\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\0s#\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n      request_headers_to_remove: \"J\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n    routes {\n      match {\n        prefix: \"\"\n        grpc {\n        }\n      }\n      route {\n        cluster_header: \"J\"\n        upgrade_configs {\n          enabled {\n          }\n        }\n      }\n      response_headers_to_remove: \"\\022\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5074413991231488.fuzz",
    "content": "config {\n  virtual_hosts {\n    name: \"P\"\n    domains: \"m\"\n    routes {\n      match {\n        path: \"\"\n      }\n      route {\n        cluster: \"i\"\n        upgrade_configs {\n        }\n        upgrade_configs {\n          upgrade_type: \"5\"\n        }\n        upgrade_configs {\n          upgrade_type: \"2\"\n        }\n        upgrade_configs {\n          upgrade_type: \"6\"\n        }\n        upgrade_configs {\n          upgrade_type: \"m\"\n        }\n        upgrade_configs {\n          upgrade_type: \"Q\"\n        }\n        upgrade_configs {\n          upgrade_type: \"\\361\\214\\273\\215+\\003\\0\u0012#\u001534\\360\\231\\250\\222\\026\"\n        }\n        upgrade_configs {\n          upgrade_type: \"i\"\n        }\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"P\"\n    domains: \"m\"\n    routes {\n      match {\n        path: \"\"\n      }\n      route {\n        cluster: \"i\"\n        upgrade_configs {\n        }\n        upgrade_configs {\n          upgrade_type: \"5\"\n        }\n        upgrade_configs {\n          upgrade_type: \"2\"\n        }\n        upgrade_configs {\n          upgrade_type: \"6\"\n        }\n        upgrade_configs {\n          upgrade_type: \"m\"\n        }\n        upgrade_configs {\n          upgrade_type: \"Q\"\n        }\n        upgrade_configs {\n          upgrade_type: \"\\361\\214\\273\\215+\\003\\001\\234\\360\\231\\250\\222\\026\"\n        }\n        upgrade_configs {\n          upgrade_type: \"i\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5077190058704896",
    "content": "config {   internal_only_headers: \"\\0  \" } "
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5118898564497408.fuzz",
    "content": "config {\n  virtual_hosts {\n    name: \"*\"\n    domains: \"*\"\n    routes {\n      match {\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"\\001\\000\\000\\000\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177?\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\1|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||t|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||.||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||.....................................................................................................................................................-3489372105301376950.n......................................................................................................................................................................|..................................................................................................................................................................................................................................................\\016..............................................................................................................................................................................................................................................................................................................................................................................................................\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177|177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177w\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177          }\\n        }\\n     nnnnnnnnnnnnnnnnnnnnn   hash_policy {\\n          header {\\n            header_name: \\\"n\\\"\\n          }\\n        }\\n        hash_policy {\\n          header {\\n            header_name: \\\"e\\\"\\n          }\\n        }\\nnnnnnnn      }\\n    }\\n  }\\n}\\n\"\n        }\n      }\n      redirect {\n        strip_query: true\n      }\n    }\n    routes {\n      match {\n        case_sensitive {\n          value: true\n        }\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnennnnnnnnnnnnnnnnnnnnnnnnn\"\n        }\n      }\n      redirect {\n        strip_query: true\n      }\n    }\n  }\n  response_headers_to_remove: \"\"\n  response_headers_to_remove: \"\"\n}\nheaders {\n  headers {\n  }\n  headers {\n    key: \"x-e`voy-iuternal\"\n  }\n  headers {\n  }\n  headers {\n    key: \"date\"\n    value: \"*\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"=\"\n  }\n  headers {\n    key: \":method\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"x-e`voy-iuternal\"\n  }\n  headers {\n    value: \"\\177\\177\\177\\177\\177\\177\\177\\025\"\n  }\n  headers {\n    key: \":method\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"date\"\n  }\n  headers {\n    key: \"TE\"\n    value: \"?\"\n  }\n  headers {\n    value: \"api.lyft.c?m\"\n  }\n  headers {\n    key: \":method\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"*\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"*\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"*\"\n  }\n  headers {\n  }\n  headers {\n    key: \"x-forwarded-proto\"\n  }\n  headers {\n    key: \"date\"\n  }\n  headers {\n  }\n  headers {\n    key: \"date\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"host\"\n  }\n  headers {\n    key: \"date\"\n  }\n  headers {\n  }\n  headers {\n    key: \":method\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"=\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"host\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"host\"\n  }\n  headers {\n  }\n  headers {\n    key: \"host\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \":method\"\n  }\n  headers {\n    key: \"TE\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"=\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \":path\"\n  }\n  headers {\n  }\n  headers {\n    value: \"\\001\\000\\000\\000\\000\\000\\000?\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \":path\"\n  }\n  headers {\n    key: \"x-forwarded-proto\"\n  }\n  headers {\n    key: \":path\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"host\"\n  }\n  headers {\n    key: \"x-forwarded-proto\"\n  }\n  headers {\n    value: \"api.lyft.c?m\"\n  }\n  headers {\n  }\n  headers {\n    value: \"api.lyft.c?m\"\n  }\n  headers {\n  }\n  headers {\n    value: \"date\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"date\"\n    value: \"*\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \":method\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"x-forwarded-proto\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"TE\"\n    value: \"?\"\n  }\n  headers {\n    value: \"*\"\n  }\n  headers {\n    value: \"\\177\\177\\177\\177\\177\\177\\177\\025\"\n  }\n  headers {\n  }\n  headers {\n    key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"date\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    value: \"*\"\n  }\n  headers {\n  }\n  headers {\n    key: \"TE\"\n    value: \"?\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n    key: \"date\"\n  }\n  headers {\n  }\n  headers {\n  }\n  headers {\n  }\n}"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5142800207708160",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"*\"     routes {       match {         path: \"/\"       }       route {         cluster: \" \"         prefix_rewrite: \" \"       }     }   }   request_headers_to_remove: \":path\" }\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5198208916520960",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"*\"     routes {       match {         path: \"\"       }       route {         cluster: \"\"       }     }   } } headers {   headers {     key: \":path\"   } } \n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5206842068697088",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"\"     rate_limits {       actions {         request_headers {           header_name: \"\\n\"           descriptor_key: \" \"         }       }     }   } } "
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5634743613259776",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"\"     routes {       match {         path: \"\"       }       route {         cluster: \" \"         hash_policy {           header {             header_name: \"\\n\"           }         }       }     }   } } "
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5635252339343360",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"\"     routes {       match {         path: \"\"         headers {           name: \"\\r\"         }       }       redirect {       }     }   } } "
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5650952886943744",
    "content": "config {\n  vhds {\n    config_source {\n      api_config_source {\n        request_timeout {\n          nanos: -2147483648\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5654717359718400",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"*\"     routes {       match {         path: \"/\"       }       route {         cluster: \" \"         host_rewrite: \" \"       }     }   }   request_headers_to_remove: \"host\" }\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5661762636742656",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"\"     routes {       match {         path: \"\"       }       redirect {       }       tracing {         custom_tags {           tag: \" \"           request_header {             name: \"\\n\"           }         }       }     }   } } "
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5699465522970624",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"\\r\"   } } "
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5731276071370752",
    "content": "config {\n  virtual_hosts {\n    name: \"e\"\n    domains: \"e\"\n    cors {\n      filter_enabled {\n        default_value {\n        }\n      }\n      shadow_enabled {\n        default_value {\n        }\n      }\n    }\n    response_headers_to_add {\n      header {\n        key: \"A\\000\\000\\000\\000\\000\\000\\000\"\n      }\n    }\n  }\n  vhds {\n    config_source {\n      ads {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120",
    "content": "config {\n  virtual_hosts {\n    name: \"[\"\n    domains: \"bat.com\"\n    routes {\n      match {\n        safe_regex {\n          google_re2 {\n          }\n          regex: \".\"\n        }\n      }\n      filter_action {\n      }\n    }\n  }\n}\nheaders {\n  headers {\n    key: \":authority\"\n    value: \"bat.com\"\n  }\n  headers {\n    key: \":path\"\n    value: \"b\"\n  }\n  headers {\n    key: \"x-forwarded-proto\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5750746072481792",
    "content": "config {   virtual_hosts {     name: \" \"     domains: \"\"     routes {       match {         path: \"\"       }       route {         cluster: \" \"         upgrade_configs {           upgrade_type: \"\\n\"         }       }     }   } } "
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-6249350586171392",
    "content": "config {\n  virtual_hosts {\n    name: \"7ard0\" require_tls: 8  domains: \"\"\n    routes {\n      match {\n        case_sensitive {\n          value: true\n        }\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn\"\n        }\n      }\n      redirect {\n        host_redirect: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn\"\n        strip_query: true\n      }\n      name: \"\\020\"\n    }\n    routes {\n      match {\n        case_sensitive {\n          value: true\n        }\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn.nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn\"\n        }\n      }\n      redirect {\n        path_redirect: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\1%7\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n        strip_query: true\n      }\n    }\n    routes {\n      match {\n        path: \"\"\n      }\n      direct_response {\n        status: 246\n        body {\n          inline_bytes: \".\"\n        }\n      }\n    }\n    routes {\n      match {\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn?nnnnnnnnnnnnnnnnnnnnnnnnnnnnn^nnnnnnnnnnnnnnn\"\n        }\n      }\n      redirect {\n        host_redirect: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn\"\n        path_redirect: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\u001c177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\17\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n        strip_query: true\n      }\n    }\n    routes {\n      match {\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn?nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn\"\n        }\n      }\n      redirect {\n        host_redirect: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn.nnnnnnnnnnnnnnnnnnnnnnnn\"\n        path_redirect: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177K177\\177\\177\\177\\177\\177\\177\\177\"\n        strip_query: true\n      }\n    }\n    routes {\n      match {\n        case_sensitive {\n          value: true\n        }\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn?nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnXnnnnnnn\"\n        }\n      }\n      redirect {\n        strip_query: true\n      }\n    }\n    routes {\n      match {\n        case_sensitive {\n          value: true\n        }\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \".\"\n        }\n      }\n      redirect {\n        path_redirect: \"\\177\\177\\177\\177\\177\\177\\177\\177\\17%\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n        strip_query: true\n      }\n    }\n    routes {\n      match {\n        case_sensitive {\n          value: true\n        }\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 1868323924\n            }\n          }\n          regex: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn\"\n        }\n      }\n      redirect {\n        host_redirect: \"nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn\"\n        strip_query: true\n      }\n      name: \"\\020\"\n    }\n    routes {\n      match {\n        path: \"\"\n      }\n      direct_response {\n        status: 246\n        body {\n          inline_bytes: \".\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5084150522707968.fuzz",
    "content": "config {\n  virtual_hosts {\n    name: \"*.c\"\n    domains: \"**c\"\n    domains: \"\\001\\000\\000\\000\\0005}T\"\n    cors {\n      filter_enabled {\n        default_value {\n          numerator: 2\n        }\n        runtime_key: \"jconfig {\\n  virtual_hosts {\\n  }\\n  virtual_hosts {\\n    name: \\\".*?\\\"\\n    domains: \\\"*.c\\\"\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n            metadata_match {\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        cluster: \\\"A\\\"\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n            }\\n          }\\n        }\\n      }\\n      request_headers_to_remove: \\\"X\\\"\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        cluster: \\\"Q\\\"\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\"Q\\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n        }\\n        metadata_match {\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        metadata_match {\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n            metadata_match {\\n              filter_metadata {\\n                key: \\\"envoy.lb\\\"\\n                value {\\n                  fields {\\n                    key: \\\"\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\" \\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"255\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"8\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"@\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"|[|||||\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"||x|\\\"\\n                    value {\\n                      bool_value: true\\n                    }\\n                  }\\n                }\\n              }\\n            }\\n          }\\n          runtime_key_prefix: \\\".*?\\\"\\n        }\\n        metadata_match {\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n            metadata_match {\\n              filter_metadata {\\n                key: \\\"envoy.lb\\\"\\n                value {\\n                  fields {\\n                    key: \\\"\\\"\\n                    value {\\n                      bool_value: true\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"$\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"255\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"6\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"@\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"|[|||||||\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"|t||||\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"||x|\\\"\\n                    value {\\n                    }\\n                  }\\n                }\\n              }\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"0\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                  struct_value {\\n                  }\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    cors {\\n      allow_origin: \\\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\\"\\n      allow_origin: \\\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\\"\\n      filter_enabled {\\n        default_value {\\n          numerator: 2\\n        }\\n        runtime_key: \\\"jjjjjjjjjjjjjjjjjjjjjjjjjjjj\\\"\\n      }\\n    }\\n  }\\n  virtual_hosts {\\n    name: \\\"*.c\\\"\\n    domains: \\\"&\\\"\\n    domains: \\\"regex\\\"\\n    domains: \\\"*-bar.bcard\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"lyft.com\\\"\\n    domains: \\\"K\\\"\\n    domains: \\\"*?foo.com\\\"\\n    domains: \\\"e\\\"\\n    domains: \\\"*.fio.co~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~m\\\"\\n    domains: \\\"*.foo.ko~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~m\\\"\\n    domains: \\\"*.\\\\342\\\\237\\\\205\\\\321\\\\234om\\\"\\n    domains: \\\"*.Woo.com\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\"*.*.bacom\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"**.oo.com\\\"\\n    domains: \\\"*..c\\\"\\n    domains: \\\"*.f.oocom\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\"*-bar.b~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~)))))))))\\\\017))))))))))))))>))))))))))))))))))))))))\\\\023)))))))))))))))))))))))))))))))))))))))))\\\\032))))))))))))))))))))))))))))))))))))))d\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"*.foo.co~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\\\240~m\\\"\\n    domains: \\\"1*\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\";\\\"\\n    domains: \\\">\\\"\\n    domains: \\\"*%foo.com\\\"\\n    domains: \\\"wwww.lyft.com\\\"\\n    domains: \\\"**c\\\"\\n    domains: \\\"*-bar.baz.\\\\177om))))))))))))))4))T\\\"\\n    domains: \\\"m*.c\\\"\\n    domains: \\\"Z\\\"\\n    domains: \\\"*.foo.co_\\\"\\n    domains: \\\"o\\\"\\n    domains: \\\"*.#m\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"\\\\\\\"\\\"\\n    domains: \\\"[\\\"\\n    domains: \\\"v\\\"\\n    domains: \\\"x-forwarde\\\"\\n    domains: \\\"x\\\"\\n    domains: \\\"(\\\"\\n    domains: \\\"o.com\\\"\\n    domains: \\\"*.foo.oom\\\"\\n    domains: \\\"?\\\"\\n    domains: \\\"N\\\"\\n    domains: \\\"*.cc\\\"\\n    domains: \\\"*.#t\\\"\\n    domains: \\\"*-bar.baz.\\\\177om)))))))\\\\033))))))4))T\\\"\\n    domains: \\\"*.foo.Oo~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~m\\\"\\n    domains: \\\"d{3}\\\"\\n    domains: \\\"*-bar.baz))))))))\\\\010))))))))))))))))))))))d\\\"\\n    domains: \\\"*-baz.com\\\"\\n    domains: \\\"**.oo.com\\\"\\n    domains: \\\"*-bar.baz)))))))))))))))))))))))))))))))d\\\"\\n    domains: \\\"\\\\177\\\\177\\\\177+\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\" \\\"\\n    domains: \\\"l?\\\"\\n    domains: \\\"~\\\"\\n    cors {\\n      enabled {\\n        value: true\\n      }\\n    }\\n  }\\n  virtual_hosts {\\n    name: \\\"*.foo.com\\\"\\n    cors {\\n      max_age: \\\"_\\\"\\n      filter_enabled {\\n        default_value {\\n          numerator: 2\\n        }\\n        runtime_key: \\\"jjjjjjjjjjjjjjjjjjjjjjjjjjjj\\\"\\n      }\\n    }\\n  }\\n  virtual_hosts {\\n  }\\n  virtual_hosts {\\n    domains: \\\"*-bar.baz))))))))\\\\010))))))))))))))))))))))d\\\"\\n    domains: \\\"*.foo.co~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\\\240~m\\\"\\n    domains: \\\"o\\\"\\n    domains: \\\"*.foo.ko~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~m\\\"\\n    domains: \\\"*.\\\\342\\\\237\\\\205\\\\321\\\\234om\\\"\\n    domains: \\\"regex\\\"\\n    domains: \\\"*.*.bacom\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\"*.fio.co~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~m\\\"\\n    domains: \\\"x\\\"\\n    domains: \\\"*.foo.cnm\\\"\\n    domains: \\\"~\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"lyft.com\\\"\\n    domains: \\\"*.c*\\\"\\n    domains: \\\"e\\\"\\n    domains: \\\"m*.c\\\"\\n    domains: \\\"*-bar.baz)))))))))))))))))))))))))))))))d\\\"\\n    domains: \\\"\\\\\\\"\\\"\\n    domains: \\\"(\\\"\\n    domains: \\\"*-bar.b~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~)))))))))\\\\017))))))))))))))>))))))))))))))))))))))))\\\\023)))))))))))))))))))))))))))))))))))))))))\\\\032))))))))))))))))))))))))))))))))))))))d\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"*%foo.com\\\"\\n    domains: \\\"K\\\"\\n    domains: \\\"?\\\"\\n    domains: \\\">\\\"\\n    domains: \\\"v\\\"\\n    domains: \\\"*.Woo.com\\\"\\n    domains: \\\"**.oo.com\\\"\\n    domains: \\\"d{3}\\\"\\n    domains: \\\"**.oo.com\\\"\\n    domains: \\\"o.com\\\"\\n    domains: \\\"*-bar.bcard\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\"N\\\"\\n    domains: \\\"[\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\";\\\"\\n    domains: \\\" \\\"\\n    domains: \\\"*?foo.com\\\"\\n    domains: \\\"&\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\"*.foo.oom\\\"\\n    domains: \\\"Z\\\"\\n    domains: \\\"*.#t\\\"\\n    domains: \\\"*.foo.Oo~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~m\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"x-forwarde\\\"\\n    domains: \\\"*.f.oocom\\\"\\n    domains: \\\"*.#m\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"*c.m\\\"\\n  }\\n  virtual_hosts {\\n    domains: \\\"*.c\\\"\\n    hedge_policy {\\n      additional_request_chance {\\n        numerator: 2\\n        denominator: TEN_THOUSAND\\n      }\\n    }\\n  }\\n  virtual_hosts {\\n    name: \\\"*.foo.com\\\"\\n    cors {\\n      max_age: \\\"_\\\"\\n      filter_enabled {\\n        default_value {\\n          numerator: 2\\n        }\\n        runtime_key: \\\"jjjjjjjjjjjjjjjjjjjjjjjjjjjj\\\"\\n      }\\n    }\\n  }\\n  virtual_hosts {\\n  }\\n  virtual_hosts {\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\"**c\\\"\\n    domains: \\\";\\\"\\n    domains: \\\"*.cm\\\"\\n    domains: \\\"*-bar.bcard\\\"\\n    domains: \\\"*.f.oocom\\\"\\n    domains: \\\"\\\\\\\"\\\"\\n    domains: \\\"o.com\\\"\\n    domains: \\\"\\\\177\\\\177\\\\177+\\\"\\n    domains: \\\"[\\\"\\n    domains: \\\"*.foo.com\\\"\\n    domains: \\\" \\\"\\n    domains: \\\"*.*.bacom\\\"\\n    domains: \\\"regex\\\"\\n    cors {\\n      enabled {\\n        value: true\\n      }\\n    }\\n  }\\n  virtual_hosts {\\n    name: \\\"*.foo.com\\\"\\n    cors {\\n      allow_origin: \\\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\\"\\n      filter_enabled {\\n        default_value {\\n          numerator: 2\\n        }\\n        runtime_key: \\\"jjjjjjjjjjjjjjjjjjjjjjjjjjjj\\\"\\n      }\\n    }\\n  }\\n  virtual_hosts {\\n  }\\n  virtual_hosts {\\n    name: \\\".*?\\\"\\n    domains: \\\"*.c\\\"\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n      jjjjjjjjjjjjjj      }\\n            metadata_match {\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        cluster: \\\"A\\\"\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n            }\\n          }\\n        }\\n      }\\n      request_headers_to_remove: \\\"X\\\"\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        cluster: \\\"Q\\\"\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\"Q\\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n        }\\n        metadata_match {\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        metadata_match {\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n            metadata_match {\\n              filter_metadata {\\n                key: \\\"envoy.lb\\\"\\n                value {\\n                  fields {\\n                    key: \\\"\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\" \\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"255\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"8\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"@\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"|[|||||\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"||x|\\\"\\n                    value {\\n                      bool_value: true\\n                    }\\n                  }\\n                }\\n              }\\n            }\\n          }\\n          runtime_key_prefix: \\\".*?\\\"\\n        }\\n        metadata_match {\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        path: \\\" \\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n            metadata_match {\\n              filter_metadata {\\n                key: \\\"envoy.lb\\\"\\n                value {\\n                  fields {\\n                    key: \\\"\\\"\\n                    value {\\n                      bool_value: true\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"$\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"255\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"6\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"@\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"|[|||||||\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"|t||||\\\"\\n                    value {\\n                    }\\n                  }\\n                  fields {\\n                    key: \\\"||x|\\\"\\n                    value {\\n                    }\\n                  }\\n                }\\n              }\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"6\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"@\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||x|\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                  bool_value: true\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    routes {\\n      match {\\n        regex: \\\"U\\\"\\n      }\\n      route {\\n        weighted_clusters {\\n          clusters {\\n            name: \\\"www1\\\"\\n            weight {\\n              value: 100\\n            }\\n          }\\n        }\\n        metadata_match {\\n          filter_metadata {\\n            key: \\\"envoy.lb\\\"\\n            value {\\n              fields {\\n                key: \\\"\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\" \\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"0\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"255\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|[|||||||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||\\\"\\n                value {\\n                }\\n              }\\n              fields {\\n                key: \\\"|t||||\\\"\\n   jjjjjjjjjjjj             value {\\n                  struct_value {\\n                  }\\n                }\\n              }\\n              fields {\\n                key: \\\"||||||||\\\"\\n                value {\\n                }\\n              }\\n            }\\n          }\\n        }\\n      }\\n    }\\n    cors {\\n      allow_origin: \\\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\\"\\n      allow_origin: \\\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\\"\\n      filter_enabled {\\n        default_value {\\n          numerator: 2\\n        }\\n        runtime_key: \\\"jjjjjjjjjjjjjjjjjjjjjjjjjjjj\\\"\\n      }\\n    }\\n  }\\n  internal_only_headers: \\\"\\\\000\\\\000\\\\000\\\\000\\\\j000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\"\\n}\\n\"\n      }\n    }\n    hedge_policy {\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5088096376324096",
    "content": "config {\n  virtual_hosts {\n    name: \"p\"\n    domains: \"b\"\n    routes {\n      match {\n        path: \"z\"\n      }\n      route {\n        cluster_header: \"\\000\\000\\001\\003\"\n        prefix_rewrite: \" \"\n        cors {\n          allow_origin: \"\"\n          allow_headers: \"b\"\n        }\n      }\n    }\n    cors {\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"e\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"j\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"e\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n      allow_origin: \"b\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5137346677178368",
    "content": "config {\n  virtual_hosts {\n    name: \" \"\n    domains: \"*\"\n    routes {\n      match {\n        regex: \".*\"\n      }\n      route {\n        cluster: \"}\"\n        prefix_rewrite: \"%\"\n      }\n    }\n  }\n}\nheaders {\n  headers {\n    key: \":path\"\n    value: \"\\001\\000\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5647162250625024",
    "content": "config {\n  virtual_hosts {\n    name: \"regex\"\n    domains: \"bat.com\"\n    domains: \"*\"\n    domains: \"w.lyft.com\"\n    routes {\n      match {\n        regex: \"/\"\n      }\n      route {\n        cluster: \"regex\"\n        prefix_rewrite: \"ewrote\"\n      }\n      request_headers_to_add {\n        header {\n          key: \"\\177\\177\\177\\177\\177\\177\\177\\177\"\n        }\n      }\n    }\n  }\n  request_headers_to_add {\n    header {\n      key: \":path\"\n      value: \"`\"\n    }\n    append {\n    }\n  }\n  request_headers_to_add {\n    header {\n      key: \"`\"\n      value: \"`\"\n    }\n    append {\n    }\n  }\n  validate_clusters {\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5671270751141888",
    "content": "config {\n  virtual_hosts {\n    name: \"j\"\n    domains: \"$\"\n    routes {\n      match {\n        path: \")\"\n      }\n      route {\n        weighted_clusters {\n          clusters {\n            name: \"$$\"\n            weight {\n              value: 870\n            }\n            metadata_match {\n              filter_metadata {\n                key: \"envoy.lb\"\n                value {\n                }\n              }\n            }\n            request_headers_to_remove: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n          }\n          clusters {\n            name: \"$\"\n            weight {\n              value: 1868759072\n            }\n            metadata_match {\n              filter_metadata {\n                key: \"envoy.lb\"\n                value {\n                  fields {\n                    key: \"\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"  \"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \")\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"-209\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"1\"\n                    value {\n                      string_value: \"z\"\n                    }\n                  }\n                  fields {\n                    key: \"2\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"5\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"8\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"@\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"Q\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"]\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"^\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"i\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"j\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"p\"\n                    value {\n                    }\n                  }\n                  fields {\n                    key: \"x\"\n                    value {\n                    }\n                  }\n                }\n              }\n            }\n          }\n          runtime_key_prefix: \"\\177\"\n        }\n        metadata_match {\n          filter_metadata {\n            key: \"envoy.lb\"\n            value {\n              fields {\n                key: \"9\"\n                value {\n                }\n              }\n            }\n          }\n        }\n        upgrade_configs {\n        }\n      }\n    }\n    response_headers_to_remove: \"&\"\n    include_attempt_count_in_response: true\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/config_impl_test_0",
    "content": "config {\n  virtual_hosts {\n    name: \"www2\"\n    domains: \"lyft.com\"\n    domains: \"www.lyft.com\"\n    domains: \"w.lyft.com\"\n    domains: \"ww.lyft.com\"\n    domains: \"wwww.lyft.com\"\n    routes {\n      match {\n        prefix: \"/new_endpoint\"\n      }\n      route {\n        cluster: \"www2\"\n        prefix_rewrite: \"/api/new_endpoint\"\n      }\n    }\n    routes {\n      match {\n        path: \"/\"\n      }\n      route {\n        cluster: \"root_www2\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"www2\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"www2_staging\"\n    domains: \"www-staging.lyft.net\"\n    domains: \"www-staging-orca.lyft.com\"\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"www2_staging\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"wildcard\"\n    domains: \"*.foo.com\"\n    domains: \"*-bar.baz.com\"\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"wildcard\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"wildcard2\"\n    domains: \"*.baz.com\"\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"wildcard2\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"regex\"\n    domains: \"bat.com\"\n    routes {\n      match {\n        regex: \"/t[io]c\"\n      }\n      route {\n        cluster: \"clock\"\n      }\n    }\n    routes {\n      match {\n        regex: \"/baa+\"\n      }\n      route {\n        cluster: \"sheep\"\n      }\n    }\n    routes {\n      match {\n        regex: \".*/\\\\d{3}$\"\n      }\n      route {\n        cluster: \"three_numbers\"\n        prefix_rewrite: \"/rewrote\"\n      }\n    }\n    routes {\n      match {\n        regex: \".*\"\n      }\n      route {\n        cluster: \"regex_default\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"regex2\"\n    domains: \"bat2.com\"\n    routes {\n      match {\n        regex: \"\"\n      }\n      route {\n        cluster: \"nothingness\"\n      }\n    }\n    routes {\n      match {\n        regex: \".*\"\n      }\n      route {\n        cluster: \"regex_default\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"default\"\n    domains: \"*\"\n    routes {\n      match {\n        prefix: \"/api/application_data\"\n      }\n      route {\n        cluster: \"ats\"\n      }\n    }\n    routes {\n      match {\n        path: \"/api/locations\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"locations\"\n        prefix_rewrite: \"/rewrote\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/api/leads/me\"\n      }\n      route {\n        cluster: \"ats\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/host/rewrite/me\"\n      }\n      route {\n        cluster: \"ats\"\n        host_rewrite: \"new_host\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/oldhost/rewrite/me\"\n      }\n      route {\n        cluster: \"ats\"\n        host_rewrite: \"new_oldhost\"\n      }\n    }\n    routes {\n      match {\n        path: \"/foo\"\n        case_sensitive {\n          value: true\n        }\n      }\n      route {\n        cluster: \"instant-server\"\n        prefix_rewrite: \"/bar\"\n      }\n    }\n    routes {\n      match {\n        path: \"/tar\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"instant-server\"\n        prefix_rewrite: \"/car\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/newhost/rewrite/me\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"ats\"\n        host_rewrite: \"new_host\"\n      }\n    }\n    routes {\n      match {\n        path: \"/FOOD\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"ats\"\n        prefix_rewrite: \"/cAndy\"\n      }\n    }\n    routes {\n      match {\n        path: \"/ApplEs\"\n        case_sensitive {\n          value: true\n        }\n      }\n      route {\n        cluster: \"instant-server\"\n        prefix_rewrite: \"/oranGES\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"instant-server\"\n        timeout {\n          seconds: 30\n        }\n      }\n    }\n    virtual_clusters {\n      pattern: \"^/rides$\"\n      name: \"ride_request\"\n      method: POST\n    }\n    virtual_clusters {\n      pattern: \"^/rides/\\\\d+$\"\n      name: \"update_ride\"\n      method: PUT\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+/chargeaccounts$\"\n      name: \"cc_add\"\n      method: POST\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+/chargeaccounts/(?!validate)\\\\w+$\"\n      name: \"cc_add\"\n      method: PUT\n    }\n    virtual_clusters {\n      pattern: \"^/users$\"\n      name: \"create_user_login\"\n      method: POST\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+$\"\n      name: \"update_user\"\n      method: PUT\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+/location$\"\n      name: \"ulu\"\n      method: POST\n    }\n  }\n}\nheaders {\n  headers {\n    key: \":authority\"\n    value: \"api.lyft.com\"\n  }\n  headers {\n    key: \":path\"\n    value: \"/\"\n  }\n  headers {\n    key: \":method\"\n    value: \"GET\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/empty",
    "content": ""
  },
  {
    "path": "test/common/router/route_corpus/internal_redirect_nullderef",
    "content": "config {\n  virtual_hosts {\n    name: \"q\"\n    domains: \"\"\n    routes {\n      match {\n        path: \"\"\n      }\n      route {\n        cluster: \".\"\n        internal_redirect_policy {\n          predicates {\n            name: \":\"\n            typed_config {\n              value: \"-\"\n            }\n          }\n        }\n      }\n    }\n  }\n}\nrandom_value: 1"
  },
  {
    "path": "test/common/router/route_corpus/regex",
    "content": "config {\n  virtual_hosts {\n    name: \"www2\"\n    domains: \"lyft.com\"\n    domains: \"www.lyft.com\"\n    domains: \"w.lyft.com\"\n    domains: \"ww.lyft.com\"\n    domains: \"wwww.lyft.com\"\n    routes {\n      name: \"regex-matcher\"\n      match {\n        regex: \"/a/.*\"\n      }\n      route {\n        cluster: \"www2\"\n        prefix_rewrite: \"/api/new_endpoint\"\n      }\n    }\n    routes {\n      match {\n        path: \"/\"\n      }\n      route {\n        cluster: \"root_www2\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"www2\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"www2_staging\"\n    domains: \"www-staging.lyft.net\"\n    domains: \"www-staging-orca.lyft.com\"\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"www2_staging\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"wildcard\"\n    domains: \"*.foo.com\"\n    domains: \"*-bar.baz.com\"\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"wildcard\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"wildcard2\"\n    domains: \"*.baz.com\"\n    routes {\n      match {\n        prefix: \"/\"\n      }\n      route {\n        cluster: \"wildcard2\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"regex\"\n    domains: \"bat.com\"\n    routes {\n      match {\n        regex: \"/t[io]c\"\n      }\n      route {\n        cluster: \"clock\"\n      }\n    }\n    routes {\n      match {\n        regex: \"/baa+\"\n      }\n      route {\n        cluster: \"sheep\"\n      }\n    }\n    routes {\n      match {\n        regex: \".*/\\\\d{3}$\"\n      }\n      route {\n        cluster: \"three_numbers\"\n        prefix_rewrite: \"/rewrote\"\n      }\n    }\n    routes {\n      match {\n        regex: \".*\"\n      }\n      route {\n        cluster: \"regex_default\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"regex2\"\n    domains: \"bat2.com\"\n    routes {\n      match {\n        regex: \"\"\n      }\n      route {\n        cluster: \"nothingness\"\n      }\n    }\n    routes {\n      match {\n        regex: \".*\"\n      }\n      route {\n        cluster: \"regex_default\"\n      }\n    }\n  }\n  virtual_hosts {\n    name: \"default\"\n    domains: \"*\"\n    routes {\n     name: \"default wildcard\"\n      match {\n        regex: \"/a.*\"\n      }\n      route {\n        cluster: \"ats\"\n      }\n    }\n    routes {\n      match {\n        path: \"/api/locations\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"locations\"\n        prefix_rewrite: \"/rewrote\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/api/leads/me\"\n      }\n      route {\n        cluster: \"ats\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/host/rewrite/me\"\n      }\n      route {\n        cluster: \"ats\"\n        host_rewrite: \"new_host\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/oldhost/rewrite/me\"\n      }\n      route {\n        cluster: \"ats\"\n        host_rewrite: \"new_oldhost\"\n      }\n    }\n    routes {\n      match {\n        path: \"/foo\"\n        case_sensitive {\n          value: true\n        }\n      }\n      route {\n        cluster: \"instant-server\"\n        prefix_rewrite: \"/bar\"\n      }\n    }\n    routes {\n      match {\n        path: \"/tar\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"instant-server\"\n        prefix_rewrite: \"/car\"\n      }\n    }\n    routes {\n      match {\n        prefix: \"/newhost/rewrite/me\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"ats\"\n        host_rewrite: \"new_host\"\n      }\n    }\n    routes {\n      match {\n        path: \"/FOOD\"\n        case_sensitive {\n        }\n      }\n      route {\n        cluster: \"ats\"\n        prefix_rewrite: \"/cAndy\"\n      }\n    }\n    routes {\n      match {\n        path: \"/ApplEs\"\n        case_sensitive {\n          value: true\n        }\n      }\n      route {\n        cluster: \"instant-server\"\n        prefix_rewrite: \"/oranGES\"\n      }\n    }\n    routes {\n      name: \"empty regex\"\n      match {\n        regex: \".*\"\n      }\n      route {\n        cluster: \"instant-server\"\n        timeout {\n          seconds: 30\n        }\n      }\n    }\n    virtual_clusters {\n      pattern: \"^/rides$\"\n      name: \"ride_request\"\n      method: POST\n    }\n    virtual_clusters {\n      pattern: \"^/rides/\\\\d+$\"\n      name: \"update_ride\"\n      method: PUT\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+/chargeaccounts$\"\n      name: \"cc_add\"\n      method: POST\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+/chargeaccounts/(?!validate)\\\\w+$\"\n      name: \"cc_add\"\n      method: PUT\n    }\n    virtual_clusters {\n      pattern: \"^/users$\"\n      name: \"create_user_login\"\n      method: POST\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+$\"\n      name: \"update_user\"\n      method: PUT\n    }\n    virtual_clusters {\n      pattern: \"^/users/\\\\d+/location$\"\n      name: \"ulu\"\n      method: POST\n    }\n  }\n}\nheaders {\n  headers {\n    key: \":authority\"\n    value: \"lyft.com\"\n  }\n  headers {\n    key: \":path\"\n    value: \"/a/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n  }\n  headers {\n    key: \":method\"\n    value: \"GET\"\n  }\n}\n"
  },
  {
    "path": "test/common/router/route_corpus/valid_headers_to_remove",
    "content": "config {\n  virtual_hosts {\n    name: \"e\"\n    domains: \"*\"\n    require_tls: ALL\n    cors {\n      allow_methods: \"?\"\n      allow_headers: \"\\000\\000\\000\\000\\000\\000\\000\\000\"\n      max_age: \"e\"\n      allow_credentials {\n        value: true\n      }\n      allow_origin_string_match {\n        safe_regex {\n          google_re2 {\n          }\n          regex: \"\\000\\000\\000-\\000\\000\"\n        }\n      }\n      allow_origin_string_match {\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 543382106\n            }\n          }\n          regex: \"\\000\\000\\000\\000\\000\\000\\000\\000\"\n        }\n      }\n      allow_origin_string_match {\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 543382106\n            }\n          }\n          regex: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\363\\273\\271\\260\\362\\215\\217\\270\\362\\275\\246\\217\\361\\250\\252\\235\\361\\233\\273\\226\\363\\232\\203\\254\\363\\223\\243\\222\\364\\200\\265\\232\\362\\216\\240\\256\\363\\253\\252\\201\\361\\205\\214\\273\\363\\211\\262\\206\\364\\217\\217\\236\\360\\250\\216\\235\\360\\237\\265\\217\\361\\234\\223\\251\\361\\271\\210\\201\\361\\241\\200\\254\\361\\247\\235\\276\\364\\200\\247\\204\\361\\215\\222\\221\\364\\210\\204\\246\\360\\262\\231\\222\\362\\230\\220\\274\\364\\205\\217\\245\\363\\237\\271\\236\\364\\217\\245\\255\\362\\251\\200\\224\\362\\206\\221\\261\\361\\244\\251\\215\\361\\266\\223\\223\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177|35142047902481845977\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177?\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177?\\177\\177\\177\\177\\177\"\n        }\n      }\n      allow_origin_string_match {\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 543382106\n            }\n          }\n          regex: \"^\"\n        }\n      }\n      allow_origin_string_match {\n        safe_regex {\n          google_re2 {\n            max_program_size {\n              value: 543382106\n            }\n          }\n          regex: \"\\000\\000\\000\\000\\000\\000\\000\\000\"\n        }\n      }\n    }\n    request_headers_to_remove: \"\\361`>ùHöxj/^Ò\nž|èL1\\251\\251\\361\\251\\251\\251\\361\\251\\251\\251\\361\\251\\251\\251\\361\\251\\251\\251\\361\\251\\251\\251\\361\\251\\251\\251\\361\\251\\251\\251\\361\\251\\251\\251)\"\n  }\n}"
  },
  {
    "path": "test/common/router/route_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.router;\n\nimport \"envoy/config/route/v3/route.proto\";\nimport \"test/fuzz/common.proto\";\n\n// Structured input for route_fuzz_test.\n\nmessage RouteTestCase {\n  envoy.config.route.v3.RouteConfiguration config = 1;\n  test.fuzz.Headers headers = 2;\n  uint32 random_value = 3;\n}\n"
  },
  {
    "path": "test/common/router/route_fuzz_test.cc",
    "content": "#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route.pb.validate.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/router/config_impl.h\"\n\n#include \"test/common/router/route_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/mocks/server/instance.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\n// Remove regex matching route configs.\nenvoy::config::route::v3::RouteConfiguration\ncleanRouteConfig(envoy::config::route::v3::RouteConfiguration route_config) {\n  envoy::config::route::v3::RouteConfiguration clean_config = route_config;\n  auto virtual_hosts = clean_config.mutable_virtual_hosts();\n  std::for_each(virtual_hosts->begin(), virtual_hosts->end(),\n                [](envoy::config::route::v3::VirtualHost& virtual_host) {\n                  auto routes = virtual_host.mutable_routes();\n                  for (int i = 0; i < routes->size();) {\n                    if (routes->Get(i).has_filter_action()) {\n                      routes->erase(routes->begin() + i);\n                    } else {\n                      ++i;\n                    }\n                  }\n                });\n\n  return clean_config;\n}\n\n// TODO(htuch): figure out how to generate via a genrule from config_impl_test the full corpus.\nDEFINE_PROTO_FUZZER(const test::common::router::RouteTestCase& input) {\n  static NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  static NiceMock<Server::Configuration::MockServerFactoryContext> factory_context;\n  try {\n    TestUtility::validate(input);\n    ConfigImpl config(cleanRouteConfig(input.config()), factory_context,\n                      ProtobufMessage::getNullValidationVisitor(), true);\n    auto headers = Fuzz::fromHeaders<Http::TestRequestHeaderMapImpl>(input.headers());\n    auto route = config.route(headers, stream_info, input.random_value());\n    if (route != nullptr && route->routeEntry() != nullptr) {\n      route->routeEntry()->finalizeRequestHeaders(headers, stream_info, true);\n    }\n    ENVOY_LOG_MISC(trace, \"Success\");\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n  }\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/router_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.router;\n\nimport \"validate/validate.proto\";\n\nimport \"google/protobuf/empty.proto\";\n\nimport \"test/fuzz/common.proto\";\n\nimport \"envoy/extensions/filters/http/router/v3/router.proto\";\n\nmessage DirectionalAction {\n  oneof response_action_selector {\n    test.fuzz.Headers headers = 1;\n    uint32 data = 2;\n    test.fuzz.Headers trailers = 3;\n  }\n  bool end_stream = 4;\n}\n\nmessage StreamAction {\n  oneof stream_action_selector {\n    DirectionalAction request = 1;\n    DirectionalAction response = 2;\n  }\n}\n\nmessage Action {\n  oneof action_selector {\n    StreamAction stream_action = 1;\n    google.protobuf.Empty advance_time = 2;\n    google.protobuf.Empty force_retry = 3;\n  }\n}\n\nmessage RouterTestCase {\n  repeated Action actions = 1;\n  envoy.extensions.filters.http.router.v3.Router config = 2;\n}\n"
  },
  {
    "path": "test/common/router/router_ratelimit_test.cc",
    "content": "#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.validate.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/router/router_ratelimit.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/ratelimit/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nenvoy::config::route::v3::RateLimit parseRateLimitFromV3Yaml(const std::string& yaml_string,\n                                                             bool avoid_boosting = true) {\n  envoy::config::route::v3::RateLimit rate_limit;\n  TestUtility::loadFromYaml(yaml_string, rate_limit, false, avoid_boosting);\n  TestUtility::validate(rate_limit);\n  return rate_limit;\n}\n\nTEST(BadRateLimitConfiguration, MissingActions) {\n  EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(\"{}\"), EnvoyException,\n                          \"value must contain at least\");\n}\n\nTEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) {\n  const std::string yaml_one = R\"EOF(\nactions:\n- request_headers: {}\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_one), EnvoyException,\n                          \"value length must be at least\");\n\n  const std::string yaml_two = R\"EOF(\nactions:\n- request_headers:\n    header_name: test\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_two), EnvoyException,\n                          \"value length must be at least\");\n\n  const std::string yaml_three = R\"EOF(\nactions:\n- request_headers:\n    descriptor_key: test\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_three), EnvoyException,\n                          \"value length must be at least\");\n}\n\nstatic Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path,\n                                                 const std::string& method) {\n  return Http::TestRequestHeaderMapImpl{\n      {\":authority\", host}, {\":path\", path}, {\":method\", method}, {\"x-forwarded-proto\", \"http\"}};\n}\n\nclass RateLimitConfiguration : public testing::Test {\npublic:\n  void setupTest(const std::string& yaml) {\n    envoy::config::route::v3::RouteConfiguration route_config;\n    TestUtility::loadFromYaml(yaml, route_config);\n    config_ =\n        std::make_unique<ConfigImpl>(route_config, factory_context_, any_validation_visitor_, true);\n  }\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context_;\n  ProtobufMessage::NullValidationVisitorImpl any_validation_visitor_;\n  std::unique_ptr<ConfigImpl> config_;\n  Http::TestRequestHeaderMapImpl header_;\n  const RouteEntry* route_;\n  Network::Address::Ipv4Instance default_remote_address_{\"10.0.0.1\"};\n  const envoy::config::core::v3::Metadata* dynamic_metadata_;\n};\n\nTEST_F(RateLimitConfiguration, NoApplicableRateLimit) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2\n      rate_limits:\n      - actions:\n        - remote_address: {}\n  - match:\n      prefix: \"/bar\"\n    route:\n      cluster: www2\n  )EOF\";\n\n  setupTest(yaml);\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_EQ(0U, config_->route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), stream_info, 0)\n                    ->routeEntry()\n                    ->rateLimitPolicy()\n                    .getApplicableRateLimit(0)\n                    .size());\n}\n\nTEST_F(RateLimitConfiguration, NoRateLimitPolicy) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2\n  )EOF\";\n\n  setupTest(yaml);\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  route_ = config_->route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), stream_info, 0)->routeEntry();\n  EXPECT_EQ(0U, route_->rateLimitPolicy().getApplicableRateLimit(0).size());\n  EXPECT_TRUE(route_->rateLimitPolicy().empty());\n}\n\nTEST_F(RateLimitConfiguration, TestGetApplicationRateLimit) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2\n      rate_limits:\n      - actions:\n        - remote_address: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  route_ = config_->route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), stream_info, 0)->routeEntry();\n  EXPECT_FALSE(route_->rateLimitPolicy().empty());\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limits =\n      route_->rateLimitPolicy().getApplicableRateLimit(0);\n  EXPECT_EQ(1U, rate_limits.size());\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route_, descriptors, \"\", header_, default_remote_address_,\n                                   dynamic_metadata_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              testing::ContainerEq(descriptors));\n}\n\nTEST_F(RateLimitConfiguration, TestVirtualHost) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: www2test\n  rate_limits:\n  - actions:\n    - destination_cluster: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  route_ = config_->route(genHeaders(\"www.lyft.com\", \"/bar\", \"GET\"), stream_info, 0)->routeEntry();\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limits =\n      route_->virtualHost().rateLimitPolicy().getApplicableRateLimit(0);\n  EXPECT_EQ(1U, rate_limits.size());\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route_, descriptors, \"service_cluster\", header_,\n                                   default_remote_address_, dynamic_metadata_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"destination_cluster\", \"www2test\"}}}}),\n              testing::ContainerEq(descriptors));\n}\n\nTEST_F(RateLimitConfiguration, Stages) {\n  const std::string yaml = R\"EOF(\nvirtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n  - match:\n      prefix: \"/foo\"\n    route:\n      cluster: www2test\n      rate_limits:\n      - stage: 1\n        actions:\n        - remote_address: {}\n      - actions:\n        - destination_cluster: {}\n      - actions:\n        - destination_cluster: {}\n        - source_cluster: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n  route_ = config_->route(genHeaders(\"www.lyft.com\", \"/foo\", \"GET\"), stream_info, 0)->routeEntry();\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limits =\n      route_->rateLimitPolicy().getApplicableRateLimit(0);\n  EXPECT_EQ(2U, rate_limits.size());\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route_, descriptors, \"service_cluster\", header_,\n                                   default_remote_address_, dynamic_metadata_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>(\n                  {{{{\"destination_cluster\", \"www2test\"}}},\n                   {{{\"destination_cluster\", \"www2test\"}, {\"source_cluster\", \"service_cluster\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  descriptors.clear();\n  rate_limits = route_->rateLimitPolicy().getApplicableRateLimit(1UL);\n  EXPECT_EQ(1U, rate_limits.size());\n\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route_, descriptors, \"service_cluster\", header_,\n                                   default_remote_address_, dynamic_metadata_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  rate_limits = route_->rateLimitPolicy().getApplicableRateLimit(10UL);\n  EXPECT_TRUE(rate_limits.empty());\n}\n\nclass RateLimitPolicyEntryTest : public testing::Test {\npublic:\n  void setupTest(const std::string& yaml) {\n    rate_limit_entry_ = std::make_unique<RateLimitPolicyEntryImpl>(parseRateLimitFromV3Yaml(yaml));\n    descriptors_.clear();\n  }\n\n  std::unique_ptr<RateLimitPolicyEntryImpl> rate_limit_entry_;\n  Http::TestRequestHeaderMapImpl header_;\n  NiceMock<MockRouteEntry> route_;\n  std::vector<Envoy::RateLimit::Descriptor> descriptors_;\n  Network::Address::Ipv4Instance default_remote_address_{\"10.0.0.1\"};\n  const envoy::config::core::v3::Metadata* dynamic_metadata_;\n};\n\nTEST_F(RateLimitPolicyEntryTest, RateLimitPolicyEntryMembers) {\n  const std::string yaml = R\"EOF(\nstage: 2\ndisable_key: no_ratelimit\nactions:\n- remote_address: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  EXPECT_EQ(2UL, rate_limit_entry_->stage());\n  EXPECT_EQ(\"no_ratelimit\", rate_limit_entry_->disableKey());\n}\n\nTEST_F(RateLimitPolicyEntryTest, RemoteAddress) {\n  const std::string yaml = R\"EOF(\nactions:\n- remote_address: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\n// Verify no descriptor is emitted if remote is a pipe.\nTEST_F(RateLimitPolicyEntryTest, PipeAddress) {\n  const std::string yaml = R\"EOF(\nactions:\n- remote_address: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  Network::Address::PipeInstance pipe_address(\"/hello\");\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, pipe_address,\n                                         dynamic_metadata_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, SourceService) {\n  const std::string yaml = R\"EOF(\nactions:\n- source_cluster: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header_,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_THAT(\n      std::vector<Envoy::RateLimit::Descriptor>({{{{\"source_cluster\", \"service_cluster\"}}}}),\n      testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, DestinationService) {\n  const std::string yaml = R\"EOF(\nactions:\n- destination_cluster: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header_,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_THAT(\n      std::vector<Envoy::RateLimit::Descriptor>({{{{\"destination_cluster\", \"fake_cluster\"}}}}),\n      testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, RequestHeaders) {\n  const std::string yaml = R\"EOF(\nactions:\n- request_headers:\n    header_name: x-header-name\n    descriptor_key: my_header_name\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-name\", \"test_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"my_header_name\", \"test_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\n// Validate that a descriptor is added if the missing request header\n// has skip_if_absent set to true\nTEST_F(RateLimitPolicyEntryTest, RequestHeadersWithSkipIfAbsent) {\n  const std::string yaml = R\"EOF(\nactions:\n- request_headers:\n    header_name: x-header-name\n    descriptor_key: my_header_name\n    skip_if_absent: false\n- request_headers:\n    header_name: x-header\n    descriptor_key: my_header\n    skip_if_absent: true\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-name\", \"test_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"my_header_name\", \"test_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\n// Tests if the descriptors are added if one of the headers is missing\n// and skip_if_absent is set to default value which is false\nTEST_F(RateLimitPolicyEntryTest, RequestHeadersWithDefaultSkipIfAbsent) {\n  const std::string yaml = R\"EOF(\nactions:\n- request_headers:\n    header_name: x-header-name\n    descriptor_key: my_header_name\n    skip_if_absent: false\n- request_headers:\n    header_name: x-header\n    descriptor_key: my_header\n    skip_if_absent: false\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-test\", \"test_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) {\n  const std::string yaml = R\"EOF(\nactions:\n- request_headers:\n    header_name: x-header\n    descriptor_key: my_header_name\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-name\", \"test_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, RateLimitKey) {\n  const std::string yaml = R\"EOF(\nactions:\n- generic_key:\n    descriptor_value: fake_key\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"generic_key\", \"fake_key\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, GenericKeyWithSetDescriptorKey) {\n  const std::string yaml = R\"EOF(\nactions:\n- generic_key:\n    descriptor_key: fake_key\n    descriptor_value: fake_value\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"fake_key\", \"fake_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, GenericKeyWithEmptyDescriptorKey) {\n  const std::string yaml = R\"EOF(\nactions:\n- generic_key:\n    descriptor_key: \"\"\n    descriptor_value: fake_value\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"generic_key\", \"fake_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) {\n  const std::string yaml = R\"EOF(\nactions:\n- dynamic_metadata:\n    descriptor_key: fake_key\n    default_value: fake_value\n    metadata_key:\n      key: 'envoy.xxx'\n      path:\n      - key: test\n      - key: prop\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  envoy.xxx:\n    test:\n      prop: foo\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"fake_key\", \"foo\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\n// Tests that the default_value is used in the descriptor when the metadata_key is empty.\nTEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatchWithDefaultValue) {\n  const std::string yaml = R\"EOF(\nactions:\n- dynamic_metadata:\n    descriptor_key: fake_key\n    default_value: fake_value\n    metadata_key:\n      key: 'envoy.xxx'\n      path:\n      - key: test\n      - key: prop\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  envoy.xxx:\n    another_key:\n      prop: foo\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"fake_key\", \"fake_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) {\n  const std::string yaml = R\"EOF(\nactions:\n- dynamic_metadata:\n    descriptor_key: fake_key\n    metadata_key:\n      key: 'envoy.xxx'\n      path:\n      - key: test\n      - key: prop\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  envoy.xxx:\n    another_key:\n      prop: foo\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) {\n  const std::string yaml = R\"EOF(\nactions:\n- dynamic_metadata:\n    descriptor_key: fake_key\n    metadata_key:\n      key: 'envoy.xxx'\n      path:\n      - key: test\n      - key: prop\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  envoy.xxx:\n    test:\n      prop: \"\"\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n\n  EXPECT_TRUE(descriptors_.empty());\n}\n// Tests that no descriptor is generated when both the metadata_key and default_value are empty.\nTEST_F(RateLimitPolicyEntryTest, DynamicMetaDataAndDefaultValueEmpty) {\n  const std::string yaml = R\"EOF(\nactions:\n- dynamic_metadata:\n    descriptor_key: fake_key\n    default_value: \"\"\n    metadata_key:\n      key: 'envoy.xxx'\n      path:\n      - key: test\n      - key: prop\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  envoy.xxx:\n    another_key:\n      prop: \"\"\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNonStringMatch) {\n  const std::string yaml = R\"EOF(\nactions:\n- dynamic_metadata:\n    descriptor_key: fake_key\n    metadata_key:\n      key: 'envoy.xxx'\n      path:\n      - key: test\n      - key: prop\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  envoy.xxx:\n    test:\n      prop:\n        foo: bar\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) {\n  const std::string yaml = R\"EOF(\nactions:\n- header_value_match:\n    descriptor_value: fake_value\n    headers:\n    - name: x-header-name\n      exact_match: test_value\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-name\", \"test_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"header_match\", \"fake_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, HeaderValueMatchNoMatch) {\n  const std::string yaml = R\"EOF(\nactions:\n- header_value_match:\n    descriptor_value: fake_value\n    headers:\n    - name: x-header-name\n      exact_match: test_value\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-name\", \"not_same_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersNotPresent) {\n  const std::string yaml = R\"EOF(\nactions:\n- header_value_match:\n    descriptor_value: fake_value\n    expect_match: false\n    headers:\n    - name: x-header-name\n      exact_match: test_value\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-name\", \"not_same_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"header_match\", \"fake_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersPresent) {\n  const std::string yaml = R\"EOF(\nactions:\n- header_value_match:\n    descriptor_value: fake_value\n    expect_match: false\n    headers:\n    - name: x-header-name\n      exact_match: test_value\n  )EOF\";\n\n  setupTest(yaml);\n  Http::TestRequestHeaderMapImpl header{{\"x-header-name\", \"test_value\"}};\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header, default_remote_address_,\n                                         dynamic_metadata_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, CompoundActions) {\n  const std::string yaml = R\"EOF(\nactions:\n- destination_cluster: {}\n- source_cluster: {}\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header_,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_THAT(\n      std::vector<Envoy::RateLimit::Descriptor>(\n          {{{{\"destination_cluster\", \"fake_cluster\"}, {\"source_cluster\", \"service_cluster\"}}}}),\n      testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, CompoundActionsNoDescriptor) {\n  const std::string yaml = R\"EOF(\nactions:\n- destination_cluster: {}\n- header_value_match:\n    descriptor_value: fake_value\n    headers:\n    - name: x-header-name\n      exact_match: test_value\n  )EOF\";\n\n  setupTest(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", header_,\n                                         default_remote_address_, dynamic_metadata_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverride) {\n  const std::string yaml = R\"EOF(\nactions:\n- generic_key:\n    descriptor_value: limited_fake_key\nlimit:\n dynamic_metadata:\n   metadata_key:\n     key: test.filter.key\n     path:\n      - key: test\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  test.filter.key:\n    test:\n      requests_per_unit: 42\n      unit: HOUR\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n  EXPECT_THAT(\n      std::vector<Envoy::RateLimit::Descriptor>(\n          {{{{\"generic_key\", \"limited_fake_key\"}}, {{42, envoy::type::v3::RateLimitUnit::HOUR}}}}),\n      testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideNotFound) {\n  const std::string yaml = R\"EOF(\nactions:\n- generic_key:\n    descriptor_value: limited_fake_key\nlimit:\n dynamic_metadata:\n   metadata_key:\n     key: unknown.key\n     path:\n      - key: test\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  test.filter.key:\n    test:\n      requests_per_unit: 42\n      unit: HOUR\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"generic_key\", \"limited_fake_key\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideWrongType) {\n  const std::string yaml = R\"EOF(\nactions:\n- generic_key:\n    descriptor_value: limited_fake_key\nlimit:\n dynamic_metadata:\n   metadata_key:\n     key: test.filter.key\n     path:\n      - key: test\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  test.filter.key:\n    test: some_string\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"generic_key\", \"limited_fake_key\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideWrongUnit) {\n  const std::string yaml = R\"EOF(\nactions:\n- generic_key:\n    descriptor_value: limited_fake_key\nlimit:\n dynamic_metadata:\n   metadata_key:\n     key: test.filter.key\n     path:\n      - key: test\n  )EOF\";\n\n  setupTest(yaml);\n\n  std::string metadata_yaml = R\"EOF(\nfilter_metadata:\n  test.filter.key:\n    test:\n      requests_per_unit: 42\n      unit: NOT_A_UNIT\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, metadata);\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", header_, default_remote_address_,\n                                         &metadata);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"generic_key\", \"limited_fake_key\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/router_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/extensions/upstreams/http/http/v3/http_connection_pool.pb.h\"\n#include \"envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/config/metadata.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/network/application_protocol.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/upstream_server_name.h\"\n#include \"common/network/upstream_subject_alt_names.h\"\n#include \"common/network/utility.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/router/debug_config.h\"\n#include \"common/router/router.h\"\n#include \"common/stream_info/uint32_accessor_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::AssertionFailure;\nusing testing::AssertionResult;\nusing testing::AssertionSuccess;\nusing testing::AtLeast;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Matcher;\nusing testing::MockFunction;\nusing testing::NiceMock;\nusing testing::Property;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::StartsWith;\n\nnamespace Envoy {\nnamespace Router {\n\nclass RouterTestFilter : public Filter {\npublic:\n  using Filter::Filter;\n  // Filter\n  RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&,\n                                 const Upstream::ClusterInfo&, const VirtualCluster*,\n                                 Runtime::Loader&, Random::RandomGenerator&, Event::Dispatcher&,\n                                 TimeSource&, Upstream::ResourcePriority) override {\n    EXPECT_EQ(nullptr, retry_state_);\n    retry_state_ = new NiceMock<MockRetryState>();\n    if (reject_all_hosts_) {\n      // Set up RetryState to always reject the host\n      ON_CALL(*retry_state_, shouldSelectAnotherHost(_)).WillByDefault(Return(true));\n    }\n    return RetryStatePtr{retry_state_};\n  }\n\n  const Network::Connection* downstreamConnection() const override {\n    return &downstream_connection_;\n  }\n\n  NiceMock<Network::MockConnection> downstream_connection_;\n  MockRetryState* retry_state_{};\n  bool reject_all_hosts_ = false;\n};\n\nclass RouterTestBase : public testing::Test {\npublic:\n  RouterTestBase(bool start_child_span, bool suppress_envoy_headers,\n                 Protobuf::RepeatedPtrField<std::string> strict_headers_to_check)\n      : http_context_(stats_store_.symbolTable()), shadow_writer_(new MockShadowWriter()),\n        config_(\"test.\", local_info_, stats_store_, cm_, runtime_, random_,\n                ShadowWriterPtr{shadow_writer_}, true, start_child_span, suppress_envoy_headers,\n                false, std::move(strict_headers_to_check), test_time_.timeSystem(), http_context_),\n        router_(config_) {\n    router_.setDecoderFilterCallbacks(callbacks_);\n    upstream_locality_.set_zone(\"to_az\");\n\n    ON_CALL(*cm_.conn_pool_.host_, address()).WillByDefault(Return(host_address_));\n    ON_CALL(*cm_.conn_pool_.host_, locality()).WillByDefault(ReturnRef(upstream_locality_));\n    router_.downstream_connection_.local_address_ = host_address_;\n    router_.downstream_connection_.remote_address_ =\n        Network::Utility::parseInternetAddressAndPort(\"1.2.3.4:80\");\n\n    // Make the \"system time\" non-zero, because 0 is considered invalid by DateUtil.\n    test_time_.setMonotonicTime(std::chrono::milliseconds(50));\n\n    // Allow any number of setTrackedObject calls for the dispatcher strict mock.\n    EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber());\n  }\n\n  void expectResponseTimerCreate() {\n    response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);\n    EXPECT_CALL(*response_timeout_, enableTimer(_, _));\n    EXPECT_CALL(*response_timeout_, disableTimer());\n  }\n\n  void expectPerTryTimerCreate() {\n    per_try_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);\n    EXPECT_CALL(*per_try_timeout_, enableTimer(_, _));\n    EXPECT_CALL(*per_try_timeout_, disableTimer());\n  }\n\n  void expectMaxStreamDurationTimerCreate() {\n    max_stream_duration_timer_ = new Event::MockTimer(&callbacks_.dispatcher_);\n    EXPECT_CALL(*max_stream_duration_timer_, enableTimer(_, _));\n    EXPECT_CALL(*max_stream_duration_timer_, disableTimer());\n  }\n\n  AssertionResult verifyHostUpstreamStats(uint64_t success, uint64_t error) {\n    if (success != cm_.conn_pool_.host_->stats_.rq_success_.value()) {\n      return AssertionFailure() << fmt::format(\"rq_success {} does not match expected {}\",\n                                               cm_.conn_pool_.host_->stats_.rq_success_.value(),\n                                               success);\n    }\n    if (error != cm_.conn_pool_.host_->stats_.rq_error_.value()) {\n      return AssertionFailure() << fmt::format(\"rq_error {} does not match expected {}\",\n                                               cm_.conn_pool_.host_->stats_.rq_error_.value(),\n                                               error);\n    }\n    return AssertionSuccess();\n  }\n\n  void verifyMetadataMatchCriteriaFromRequest(bool route_entry_has_match) {\n    ProtobufWkt::Struct request_struct, route_struct;\n    ProtobufWkt::Value val;\n\n    // Populate metadata like StreamInfo.setDynamicMetadata() would.\n    auto& fields_map = *request_struct.mutable_fields();\n    val.set_string_value(\"v3.1\");\n    fields_map[\"version\"] = val;\n    val.set_string_value(\"devel\");\n    fields_map[\"stage\"] = val;\n    (*callbacks_.stream_info_.metadata_\n          .mutable_filter_metadata())[Envoy::Config::MetadataFilters::get().ENVOY_LB] =\n        request_struct;\n\n    // Populate route entry's metadata which will be overridden.\n    val.set_string_value(\"v3.0\");\n    fields_map = *request_struct.mutable_fields();\n    fields_map[\"version\"] = val;\n    MetadataMatchCriteriaImpl route_entry_matches(route_struct);\n\n    if (route_entry_has_match) {\n      ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria())\n          .WillByDefault(Return(&route_entry_matches));\n    } else {\n      ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria())\n          .WillByDefault(Return(nullptr));\n    }\n\n    EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n        .WillOnce(Invoke(\n            [&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n              auto match = context->metadataMatchCriteria()->metadataMatchCriteria();\n              EXPECT_EQ(match.size(), 2);\n              auto it = match.begin();\n\n              // Note: metadataMatchCriteria() keeps its entries sorted, so the order for checks\n              // below matters.\n\n              // `stage` was only set by the request, not by the route entry.\n              EXPECT_EQ((*it)->name(), \"stage\");\n              EXPECT_EQ((*it)->value().value().string_value(), \"devel\");\n              it++;\n\n              // `version` should be what came from the request, overriding the route entry.\n              EXPECT_EQ((*it)->name(), \"version\");\n              EXPECT_EQ((*it)->value().value().string_value(), \"v3.1\");\n\n              // When metadataMatchCriteria() is computed from dynamic metadata, the result should\n              // be cached.\n              EXPECT_EQ(context->metadataMatchCriteria(), context->metadataMatchCriteria());\n\n              return &cm_.conn_pool_;\n            }));\n    EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n    expectResponseTimerCreate();\n\n    Http::TestRequestHeaderMapImpl headers;\n    HttpTestUtility::addDefaultHeaders(headers);\n    router_.decodeHeaders(headers, true);\n\n    // When the router filter gets reset we should cancel the pool request.\n    EXPECT_CALL(cancellable_, cancel(_));\n    router_.onDestroy();\n  }\n\n  void verifyAttemptCountInRequestBasic(bool set_include_attempt_count_in_request,\n                                        absl::optional<int> preset_count, int expected_count) {\n    setIncludeAttemptCountInRequest(set_include_attempt_count_in_request);\n\n    EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n    expectResponseTimerCreate();\n\n    Http::TestRequestHeaderMapImpl headers;\n    HttpTestUtility::addDefaultHeaders(headers);\n    if (preset_count) {\n      headers.setEnvoyAttemptCount(preset_count.value());\n    }\n    router_.decodeHeaders(headers, true);\n\n    EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));\n\n    // When the router filter gets reset we should cancel the pool request.\n    EXPECT_CALL(cancellable_, cancel(_));\n    router_.onDestroy();\n    EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n    EXPECT_EQ(0U,\n              callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n    EXPECT_EQ(0U,\n              callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  }\n\n  void verifyAttemptCountInResponseBasic(bool set_include_attempt_count_in_response,\n                                         absl::optional<int> preset_count, int expected_count) {\n    setIncludeAttemptCountInResponse(set_include_attempt_count_in_response);\n\n    NiceMock<Http::MockRequestEncoder> encoder1;\n    Http::ResponseDecoder* response_decoder = nullptr;\n    EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n        .WillOnce(Invoke(\n            [&](Http::ResponseDecoder& decoder,\n                Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n              response_decoder = &decoder;\n              callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n              return nullptr;\n            }));\n    expectResponseTimerCreate();\n\n    Http::TestRequestHeaderMapImpl headers;\n    HttpTestUtility::addDefaultHeaders(headers);\n    router_.decodeHeaders(headers, true);\n\n    Http::ResponseHeaderMapPtr response_headers(\n        new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n    if (preset_count) {\n      response_headers->setEnvoyAttemptCount(preset_count.value());\n    }\n\n    EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n    EXPECT_CALL(callbacks_, encodeHeaders_(_, true))\n        .WillOnce(Invoke([expected_count](Http::ResponseHeaderMap& headers, bool) {\n          EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));\n        }));\n    response_decoder->decodeHeaders(std::move(response_headers), true);\n    EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n    EXPECT_EQ(1U,\n              callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  }\n\n  void sendRequest(bool end_stream = true) {\n    if (end_stream) {\n      EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(1);\n    }\n    EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n        .WillOnce(Invoke(\n            [&](Http::ResponseDecoder& decoder,\n                Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n              response_decoder_ = &decoder;\n              EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(testing::AtLeast(2));\n              callbacks.onPoolReady(original_encoder_, cm_.conn_pool_.host_, upstream_stream_info_);\n              return nullptr;\n            }));\n    HttpTestUtility::addDefaultHeaders(default_request_headers_);\n    router_.decodeHeaders(default_request_headers_, end_stream);\n  }\n\n  void enableRedirects(uint32_t max_internal_redirects = 1) {\n    ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, enabled())\n        .WillByDefault(Return(true));\n    ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_,\n            shouldRedirectForResponseCode(_))\n        .WillByDefault(Return(true));\n    ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, maxInternalRedirects())\n        .WillByDefault(Return(max_internal_redirects));\n    ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_,\n            isCrossSchemeRedirectAllowed())\n        .WillByDefault(Return(false));\n    ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_));\n  }\n\n  void setNumPreviousRedirect(uint32_t num_previous_redirects) {\n    callbacks_.streamInfo().filterState()->setData(\n        \"num_internal_redirects\",\n        std::make_shared<StreamInfo::UInt32AccessorImpl>(num_previous_redirects),\n        StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Request);\n  }\n\n  void setIncludeAttemptCountInRequest(bool include) {\n    ON_CALL(callbacks_.route_->route_entry_, includeAttemptCountInRequest())\n        .WillByDefault(Return(include));\n  }\n\n  void setIncludeAttemptCountInResponse(bool include) {\n    ON_CALL(callbacks_.route_->route_entry_, includeAttemptCountInResponse())\n        .WillByDefault(Return(include));\n  }\n\n  void setUpstreamMaxStreamDuration(uint32_t seconds) {\n    common_http_protocol_options_.mutable_max_stream_duration()->MergeFrom(\n        ProtobufUtil::TimeUtil::MillisecondsToDuration(seconds));\n    ON_CALL(cm_.conn_pool_.host_->cluster_, commonHttpProtocolOptions())\n        .WillByDefault(ReturnRef(common_http_protocol_options_));\n  }\n\n  void enableHedgeOnPerTryTimeout() {\n    callbacks_.route_->route_entry_.hedge_policy_.hedge_on_per_try_timeout_ = true;\n    callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_ =\n        envoy::type::v3::FractionalPercent{};\n    callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_.set_numerator(0);\n    callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_.set_denominator(\n        envoy::type::v3::FractionalPercent::HUNDRED);\n  }\n\n  void testAppendCluster(absl::optional<Http::LowerCaseString> cluster_header_name);\n  void testAppendUpstreamHost(absl::optional<Http::LowerCaseString> hostname_header_name,\n                              absl::optional<Http::LowerCaseString> host_address_header_name);\n  void testDoNotForward(absl::optional<Http::LowerCaseString> not_forwarded_header_name);\n\n  Event::SimulatedTimeSystem test_time_;\n  std::string upstream_zone_{\"to_az\"};\n  envoy::config::core::v3::Locality upstream_locality_;\n  envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_;\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Envoy::ConnectionPool::MockCancellable cancellable_;\n  Http::ContextImpl http_context_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  MockShadowWriter* shadow_writer_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  FilterConfig config_;\n  RouterTestFilter router_;\n  Event::MockTimer* response_timeout_{};\n  Event::MockTimer* per_try_timeout_{};\n  Event::MockTimer* max_stream_duration_timer_{};\n  Network::Address::InstanceConstSharedPtr host_address_{\n      Network::Utility::resolveUrl(\"tcp://10.0.0.5:9211\")};\n  NiceMock<Http::MockRequestEncoder> original_encoder_;\n  NiceMock<Http::MockRequestEncoder> second_encoder_;\n  NiceMock<Network::MockConnection> connection_;\n  Http::ResponseDecoder* response_decoder_ = nullptr;\n  Http::TestRequestHeaderMapImpl default_request_headers_;\n  Http::ResponseHeaderMapPtr redirect_headers_{\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"302\"}, {\"location\", \"http://www.foo.com\"}}};\n  NiceMock<Tracing::MockSpan> span_;\n  NiceMock<StreamInfo::MockStreamInfo> upstream_stream_info_;\n};\n\nclass RouterTest : public RouterTestBase {\npublic:\n  RouterTest() : RouterTestBase(false, false, Protobuf::RepeatedPtrField<std::string>{}) {\n    EXPECT_CALL(callbacks_, activeSpan()).WillRepeatedly(ReturnRef(span_));\n  };\n};\n\nclass RouterTestSuppressEnvoyHeaders : public RouterTestBase {\npublic:\n  RouterTestSuppressEnvoyHeaders()\n      : RouterTestBase(false, true, Protobuf::RepeatedPtrField<std::string>{}) {}\n};\n\nTEST_F(RouterTest, UpdateServerNameFilterState) {\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();\n  dummy_option.value().set_auto_sni(true);\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions())\n      .WillByDefault(ReturnRef(dummy_option));\n  ON_CALL(callbacks_.stream_info_, filterState())\n      .WillByDefault(ReturnRef(stream_info.filterState()));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  stream_info.filterState()->setData(Network::UpstreamServerName::key(),\n                                     std::make_unique<Network::UpstreamServerName>(\"dummy\"),\n                                     StreamInfo::FilterState::StateType::Mutable);\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(\"host\",\n            stream_info.filterState()\n                ->getDataReadOnly<Network::UpstreamServerName>(Network::UpstreamServerName::key())\n                .value());\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, UpdateSubjectAltNamesFilterState) {\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();\n  dummy_option.value().set_auto_san_validation(true);\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions())\n      .WillByDefault(ReturnRef(dummy_option));\n  ON_CALL(callbacks_.stream_info_, filterState())\n      .WillByDefault(ReturnRef(stream_info.filterState()));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(\"host\", stream_info.filterState()\n                        ->getDataReadOnly<Network::UpstreamSubjectAltNames>(\n                            Network::UpstreamSubjectAltNames::key())\n                        .value()[0]);\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, RouteNotFound) {\n  EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(nullptr));\n\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1UL, stats_store_.counter(\"test.no_route\").value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_EQ(callbacks_.details(), \"route_not_found\");\n}\n\nTEST_F(RouterTest, ClusterNotFound) {\n  EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  ON_CALL(cm_, get(_)).WillByDefault(Return(nullptr));\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1UL, stats_store_.counter(\"test.no_cluster\").value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_EQ(callbacks_.details(), \"cluster_not_found\");\n}\n\nTEST_F(RouterTest, PoolFailureWithPriority) {\n  ON_CALL(callbacks_.route_->route_entry_, priority())\n      .WillByDefault(Return(Upstream::ResourcePriority::High));\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, Upstream::ResourcePriority::High, _, &router_));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)\n                           -> Http::ConnectionPool::Cancellable* {\n        callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,\n                                \"tls version mismatch\", cm_.conn_pool_.host_);\n        return nullptr;\n      }));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"139\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure));\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n  // Pool failure, so upstream request was not initiated.\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_EQ(callbacks_.details(),\n            \"upstream_reset_before_response_started{connection failure,tls version mismatch}\");\n}\n\nTEST_F(RouterTest, Http1Upstream) {\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional<Http::Protocol>(), _));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(callbacks_.route_->route_entry_, finalizeRequestHeaders(_, _, true));\n  EXPECT_CALL(span_, injectContext(_));\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(\"10\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\n// We don't get x-envoy-expected-rq-timeout-ms or an indication to insert\n// x-envoy-original-path in the basic upstream test when Envoy header\n// suppression is configured.\nTEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) {\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional<Http::Protocol>(), _));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(callbacks_.route_->route_entry_, finalizeRequestHeaders(_, _, false));\n  router_.decodeHeaders(headers, true);\n  EXPECT_FALSE(headers.has(\"x-envoy-expected-rq-timeout-ms\"));\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, Http2Upstream) {\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional<Http::Protocol>(), _));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(span_, injectContext(_));\n  router_.decodeHeaders(headers, true);\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, HashPolicy) {\n  ON_CALL(callbacks_.route_->route_entry_, hashPolicy())\n      .WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));\n  EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))\n      .WillOnce(Return(absl::optional<uint64_t>(10)));\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            EXPECT_EQ(10UL, context->computeHashKey().value());\n            return &cm_.conn_pool_;\n          }));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, HashPolicyNoHash) {\n  ON_CALL(callbacks_.route_->route_entry_, hashPolicy())\n      .WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));\n  EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))\n      .WillOnce(Return(absl::optional<uint64_t>()));\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, &router_))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            EXPECT_FALSE(context->computeHashKey());\n            return &cm_.conn_pool_;\n          }));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, HashKeyNoHashPolicy) {\n  ON_CALL(callbacks_.route_->route_entry_, hashPolicy()).WillByDefault(Return(nullptr));\n  EXPECT_FALSE(router_.computeHashKey().has_value());\n}\n\nTEST_F(RouterTest, AddCookie) {\n  ON_CALL(callbacks_.route_->route_entry_, hashPolicy())\n      .WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return &cancellable_;\n          }));\n\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            EXPECT_EQ(10UL, context->computeHashKey().value());\n            return &cm_.conn_pool_;\n          }));\n\n  std::string cookie_value;\n  EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))\n      .WillOnce(Invoke([&](const Network::Address::Instance*, const Http::HeaderMap&,\n                           const Http::HashPolicy::AddCookieCallback add_cookie,\n                           const StreamInfo::FilterStateSharedPtr) {\n        cookie_value = add_cookie(\"foo\", \"\", std::chrono::seconds(1337));\n        return absl::optional<uint64_t>(10);\n      }));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void {\n        EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().getStringView()},\n                  \"foo=\\\"\" + cookie_value + \"\\\"; Max-Age=1337; HttpOnly\");\n      }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_EQ(callbacks_.details(), \"via_upstream\");\n  // When the router filter gets reset we should cancel the pool request.\n  router_.onDestroy();\n}\n\nTEST_F(RouterTest, AddCookieNoDuplicate) {\n  ON_CALL(callbacks_.route_->route_entry_, hashPolicy())\n      .WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return &cancellable_;\n          }));\n\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            EXPECT_EQ(10UL, context->computeHashKey().value());\n            return &cm_.conn_pool_;\n          }));\n\n  EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))\n      .WillOnce(Invoke([&](const Network::Address::Instance*, const Http::HeaderMap&,\n                           const Http::HashPolicy::AddCookieCallback add_cookie,\n                           const StreamInfo::FilterStateSharedPtr) {\n        // this should be ignored\n        add_cookie(\"foo\", \"\", std::chrono::seconds(1337));\n        return absl::optional<uint64_t>(10);\n      }));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void {\n        EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().getStringView()},\n                  \"foo=baz\");\n      }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"set-cookie\", \"foo=baz\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  // When the router filter gets reset we should cancel the pool request.\n  router_.onDestroy();\n}\n\nTEST_F(RouterTest, AddMultipleCookies) {\n  ON_CALL(callbacks_.route_->route_entry_, hashPolicy())\n      .WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return &cancellable_;\n          }));\n\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            EXPECT_EQ(10UL, context->computeHashKey().value());\n            return &cm_.conn_pool_;\n          }));\n\n  std::string choco_c, foo_c;\n  EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))\n      .WillOnce(Invoke([&](const Network::Address::Instance*, const Http::HeaderMap&,\n                           const Http::HashPolicy::AddCookieCallback add_cookie,\n                           const StreamInfo::FilterStateSharedPtr) {\n        choco_c = add_cookie(\"choco\", \"\", std::chrono::seconds(15));\n        foo_c = add_cookie(\"foo\", \"/path\", std::chrono::seconds(1337));\n        return absl::optional<uint64_t>(10);\n      }));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void {\n        MockFunction<void(const std::string&)> cb;\n        EXPECT_CALL(cb, Call(\"foo=\\\"\" + foo_c + \"\\\"; Max-Age=1337; Path=/path; HttpOnly\"));\n        EXPECT_CALL(cb, Call(\"choco=\\\"\" + choco_c + \"\\\"; Max-Age=15; HttpOnly\"));\n\n        headers.iterate([&cb](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n          if (header.key() == Http::Headers::get().SetCookie.get()) {\n            cb.Call(std::string(header.value().getStringView()));\n          }\n          return Http::HeaderMap::Iterate::Continue;\n        });\n      }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  router_.onDestroy();\n}\n\nTEST_F(RouterTest, MetadataNoOp) { EXPECT_EQ(nullptr, router_.metadataMatchCriteria()); }\n\nTEST_F(RouterTest, MetadataMatchCriteria) {\n  ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria())\n      .WillByDefault(Return(&callbacks_.route_->route_entry_.metadata_matches_criteria_));\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            EXPECT_EQ(context->metadataMatchCriteria(),\n                      &callbacks_.route_->route_entry_.metadata_matches_criteria_);\n            return &cm_.conn_pool_;\n          }));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n}\n\nTEST_F(RouterTest, MetadataMatchCriteriaFromRequest) {\n  verifyMetadataMatchCriteriaFromRequest(true);\n}\n\nTEST_F(RouterTest, MetadataMatchCriteriaFromRequestNoRouteEntryMatch) {\n  verifyMetadataMatchCriteriaFromRequest(false);\n}\n\nTEST_F(RouterTest, NoMetadataMatchCriteria) {\n  ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria()).WillByDefault(Return(nullptr));\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            EXPECT_EQ(context->metadataMatchCriteria(), nullptr);\n            return &cm_.conn_pool_;\n          }));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n}\n\nTEST_F(RouterTest, CancelBeforeBoundToPool) {\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, NoHost) {\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)).WillOnce(Return(nullptr));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"19\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_rq_maintenance_mode\")\n                    .value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_EQ(callbacks_.details(), \"no_healthy_upstream\");\n}\n\nTEST_F(RouterTest, MaintenanceMode) {\n  EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"503\"},\n                                                   {\"content-length\", \"16\"},\n                                                   {\"content-type\", \"text/plain\"},\n                                                   {\"x-envoy-overloaded\", \"true\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow));\n  EXPECT_CALL(span_, injectContext(_)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_rq_maintenance_mode\")\n                    .value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->load_report_stats_store_\n                    .counter(\"upstream_rq_dropped\")\n                    .value());\n  EXPECT_EQ(callbacks_.details(), \"maintenance_mode\");\n}\n\n// Validate that we don't set x-envoy-overloaded when Envoy header suppression\n// is enabled.\nTEST_F(RouterTestSuppressEnvoyHeaders, MaintenanceMode) {\n  EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(true));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"16\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n}\n\nTEST_F(RouterTest, ResponseCodeDetailsSetByUpstream) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\n// Validate that x-envoy-upstream-service-time is added on a regular\n// request/response path.\nTEST_F(RouterTest, EnvoyUpstreamServiceTime) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([](Http::HeaderMap& headers, bool) {\n        EXPECT_NE(nullptr, headers.get(Http::Headers::get().EnvoyUpstreamServiceTime));\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\n// Validate that x-envoy-attempt-count is added to request headers when the option is true.\nTEST_F(RouterTest, EnvoyAttemptCountInRequest) {\n  verifyAttemptCountInRequestBasic(\n      /* set_include_attempt_count_in_request */ true,\n      /* preset_count*/ absl::nullopt,\n      /* expected_count */ 1);\n}\n\n// Validate that x-envoy-attempt-count is overwritten by the router on request headers, if the\n// header is sent from the downstream and the option is set to true.\nTEST_F(RouterTest, EnvoyAttemptCountInRequestOverwritten) {\n  verifyAttemptCountInRequestBasic(\n      /* set_include_attempt_count_in_request */ true,\n      /* preset_count*/ 123,\n      /* expected_count */ 1);\n}\n\n// Validate that x-envoy-attempt-count is not overwritten by the router on request headers, if the\n// header is sent from the downstream and the option is set to false.\nTEST_F(RouterTest, EnvoyAttemptCountInRequestNotOverwritten) {\n  verifyAttemptCountInRequestBasic(\n      /* set_include_attempt_count_in_request */ false,\n      /* preset_count*/ 123,\n      /* expected_count */ 123);\n}\n\nTEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) {\n  setIncludeAttemptCountInRequest(true);\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Initial request has 1 attempt.\n  EXPECT_EQ(1, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // The retry should cause the header to increase to 2.\n  EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(cm_.conn_pool_.host_->health_checker_, setUnhealthy()).Times(0);\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Validate that x-envoy-attempt-count is added when option is true.\nTEST_F(RouterTest, EnvoyAttemptCountInResponse) {\n  verifyAttemptCountInResponseBasic(\n      /* set_include_attempt_count_in_response */ true,\n      /* preset_count */ absl::nullopt,\n      /* expected_count */ 1);\n}\n\n// Validate that x-envoy-attempt-count is overwritten by the router on response headers, if the\n// header is sent from the upstream and the option is set to true.\nTEST_F(RouterTest, EnvoyAttemptCountInResponseOverwritten) {\n  verifyAttemptCountInResponseBasic(\n      /* set_include_attempt_count_in_response */ true,\n      /* preset_count */ 123,\n      /* expected_count */ 1);\n}\n\n// Validate that x-envoy-attempt-count is not overwritten by the router on response headers, if the\n// header is sent from the upstream and the option is not set to true.\nTEST_F(RouterTest, EnvoyAttemptCountInResponseNotOverwritten) {\n  verifyAttemptCountInResponseBasic(\n      /* set_include_attempt_count_in_response */ false,\n      /* preset_count */ 123,\n      /* expected_count */ 123);\n}\n\n// Validate that we don't set x-envoy-attempt-count in responses before an upstream attempt is made.\nTEST_F(RouterTestSuppressEnvoyHeaders, EnvoyAttemptCountInResponseNotPresent) {\n  setIncludeAttemptCountInResponse(true);\n\n  EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(true));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"16\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n}\n\n// Validate that x-envoy-attempt-count is present in local replies after an upstream attempt is\n// made.\nTEST_F(RouterTest, EnvoyAttemptCountInResponsePresentWithLocalReply) {\n  setIncludeAttemptCountInResponse(true);\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)\n                           -> Http::ConnectionPool::Cancellable* {\n        callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,\n                                absl::string_view(), cm_.conn_pool_.host_);\n        return nullptr;\n      }));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"503\"},\n                                                   {\"content-length\", \"91\"},\n                                                   {\"content-type\", \"text/plain\"},\n                                                   {\"x-envoy-attempt-count\", \"1\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure));\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  // Pool failure, so upstream request was never initiated.\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n  EXPECT_EQ(callbacks_.details(), \"upstream_reset_before_response_started{connection failure}\");\n}\n\n// Validate that the x-envoy-attempt-count header in the downstream response reflects the number of\n// of upstream requests that occurred when retries take place.\nTEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) {\n  setIncludeAttemptCountInResponse(true);\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(cm_.conn_pool_.host_->health_checker_, setUnhealthy()).Times(0);\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([](Http::ResponseHeaderMap& headers, bool) {\n        // Because a retry happened the number of attempts in the response headers should be 2.\n        EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Validate that the cluster is appended to the response when configured.\nvoid RouterTestBase::testAppendCluster(absl::optional<Http::LowerCaseString> cluster_header_name) {\n  auto debug_config = std::make_unique<DebugConfig>(\n      /* append_cluster */ true,\n      /* cluster_header */ cluster_header_name,\n      /* append_upstream_host */ false,\n      /* hostname_header */ absl::nullopt,\n      /* host_address_header */ absl::nullopt,\n      /* do_not_forward */ false,\n      /* not_forwarded_header */ absl::nullopt);\n  callbacks_.streamInfo().filterState()->setData(DebugConfig::key(), std::move(debug_config),\n                                                 StreamInfo::FilterState::StateType::ReadOnly,\n                                                 StreamInfo::FilterState::LifeSpan::FilterChain);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&cluster_header_name](Http::HeaderMap& headers, bool) {\n        const Http::HeaderEntry* cluster_header =\n            headers.get(cluster_header_name.value_or(Http::Headers::get().EnvoyCluster));\n        EXPECT_NE(nullptr, cluster_header);\n        EXPECT_EQ(\"fake_cluster\", cluster_header->value().getStringView());\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\n// Append cluster with default header name.\nTEST_F(RouterTest, AppendCluster0) { testAppendCluster(absl::nullopt); }\n\n// Append cluster with custom header name.\nTEST_F(RouterTest, AppendCluster1) {\n  testAppendCluster(absl::make_optional(Http::LowerCaseString(\"x-custom-cluster\")));\n}\n\n// Validate that the upstream hostname and address are appended to the response when configured.\nvoid RouterTestBase::testAppendUpstreamHost(\n    absl::optional<Http::LowerCaseString> hostname_header_name,\n    absl::optional<Http::LowerCaseString> host_address_header_name) {\n  auto debug_config = std::make_unique<DebugConfig>(\n      /* append_cluster */ false,\n      /* cluster_header */ absl::nullopt,\n      /* append_upstream_host */ true,\n      /* hostname_header */ hostname_header_name,\n      /* host_address_header */ host_address_header_name,\n      /* do_not_forward */ false,\n      /* not_forwarded_header */ absl::nullopt);\n  callbacks_.streamInfo().filterState()->setData(DebugConfig::key(), std::move(debug_config),\n                                                 StreamInfo::FilterState::StateType::ReadOnly,\n                                                 StreamInfo::FilterState::LifeSpan::FilterChain);\n  cm_.conn_pool_.host_->hostname_ = \"scooby.doo\";\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&hostname_header_name, &host_address_header_name](Http::HeaderMap& headers,\n                                                                          bool) {\n        const Http::HeaderEntry* hostname_header =\n            headers.get(hostname_header_name.value_or(Http::Headers::get().EnvoyUpstreamHostname));\n        EXPECT_NE(nullptr, hostname_header);\n        EXPECT_EQ(\"scooby.doo\", hostname_header->value().getStringView());\n\n        const Http::HeaderEntry* host_address_header = headers.get(\n            host_address_header_name.value_or(Http::Headers::get().EnvoyUpstreamHostAddress));\n        EXPECT_NE(nullptr, host_address_header);\n        EXPECT_EQ(\"10.0.0.5:9211\", host_address_header->value().getStringView());\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\n// Append hostname and address with default header names.\nTEST_F(RouterTest, AppendUpstreamHost00) { testAppendUpstreamHost(absl::nullopt, absl::nullopt); }\n\n// Append hostname and address with custom host address header name.\nTEST_F(RouterTest, AppendUpstreamHost01) {\n  testAppendUpstreamHost(\n      absl::nullopt, absl::make_optional(Http::LowerCaseString(\"x-custom-upstream-host-address\")));\n}\n\n// Append hostname and address with custom hostname header name.\nTEST_F(RouterTest, AppendUpstreamHost10) {\n  testAppendUpstreamHost(absl::make_optional(Http::LowerCaseString(\"x-custom-upstream-hostname\")),\n                         absl::nullopt);\n}\n\n// Append hostname and address with custom header names.\nTEST_F(RouterTest, AppendUpstreamHost11) {\n  testAppendUpstreamHost(\n      absl::make_optional(Http::LowerCaseString(\"x-custom-upstream-hostname\")),\n      absl::make_optional(Http::LowerCaseString(\"x-custom-upstream-host-address\")));\n}\n\n// Validate that the request is not forwarded upstream when configured.\nvoid RouterTestBase::testDoNotForward(\n    absl::optional<Http::LowerCaseString> not_forwarded_header_name) {\n  auto debug_config = std::make_unique<DebugConfig>(\n      /* append_cluster */ false,\n      /* cluster_header */ absl::nullopt,\n      /* append_upstream_host */ false,\n      /* hostname_header */ absl::nullopt,\n      /* host_address_header */ absl::nullopt,\n      /* do_not_forward */ true,\n      /* not_forwarded_header */ not_forwarded_header_name);\n  callbacks_.streamInfo().filterState()->setData(DebugConfig::key(), std::move(debug_config),\n                                                 StreamInfo::FilterState::StateType::ReadOnly,\n                                                 StreamInfo::FilterState::LifeSpan::FilterChain);\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"204\"},\n      {not_forwarded_header_name.value_or(Http::Headers::get().EnvoyNotForwarded).get(), \"true\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n}\n\n// Do not forward, with default not-forwarded header name\nTEST_F(RouterTest, DoNotForward0) { testDoNotForward(absl::nullopt); }\n\n// Do not forward, with custom not-forwarded header name\nTEST_F(RouterTest, DoNotForward1) {\n  testDoNotForward(absl::make_optional(Http::LowerCaseString(\"x-custom-not-forwarded\")));\n}\n\n// Validate that all DebugConfig options play nicely with each other.\nTEST_F(RouterTest, AllDebugConfig) {\n  auto debug_config = std::make_unique<DebugConfig>(\n      /* append_cluster */ true,\n      /* cluster_header */ absl::nullopt,\n      /* append_upstream_host */ true,\n      /* hostname_header */ absl::nullopt,\n      /* host_address_header */ absl::nullopt,\n      /* do_not_forward */ true,\n      /* not_forwarded_header */ absl::nullopt);\n  callbacks_.streamInfo().filterState()->setData(DebugConfig::key(), std::move(debug_config),\n                                                 StreamInfo::FilterState::StateType::ReadOnly,\n                                                 StreamInfo::FilterState::LifeSpan::FilterChain);\n  cm_.conn_pool_.host_->hostname_ = \"scooby.doo\";\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"204\"},\n      {\"x-envoy-cluster\", \"fake_cluster\"},\n      {\"x-envoy-upstream-hostname\", \"scooby.doo\"},\n      {\"x-envoy-upstream-host-address\", \"10.0.0.5:9211\"},\n      {\"x-envoy-not-forwarded\", \"true\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n}\n\n// Validate that x-envoy-upstream-service-time is not added when Envoy header\n// suppression is enabled.\n// TODO(htuch): Probably should be TEST_P with\n// RouterTest.EnvoyUpstreamServiceTime, this is getting verbose..\nTEST_F(RouterTestSuppressEnvoyHeaders, EnvoyUpstreamServiceTime) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  Http::TestResponseHeaderMapImpl downstream_response_headers{\n      {\":status\", \"200\"}, {\"x-envoy-upstream-service-time\", \"0\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([](Http::HeaderMap& headers, bool) {\n        EXPECT_EQ(nullptr, headers.get(Http::Headers::get().EnvoyUpstreamServiceTime));\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\nTEST_F(RouterTest, NoRetriesOverflow) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // RetryOverflow kicks in.\n  EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _))\n      .WillOnce(Return(RetryStatus::NoOverflow));\n  EXPECT_CALL(cm_.conn_pool_.host_->health_checker_, setUnhealthy()).Times(0);\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 2));\n}\n\nTEST_F(RouterTest, ResetDuringEncodeHeaders) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  EXPECT_CALL(callbacks_, removeDownstreamWatermarkCallbacks(_));\n  EXPECT_CALL(callbacks_, addDownstreamWatermarkCallbacks(_));\n  EXPECT_CALL(encoder, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::HeaderMap&, bool) -> void {\n        encoder.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n      }));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  // First connection is successful and reset happens later on.\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\nTEST_F(RouterTest, UpstreamTimeout) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n  EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(0);\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  response_timeout_->invokeCallback();\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_timeout\")\n                .value());\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_timeout_.value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Verify the timeout budget histograms are filled out correctly when using a\n// global and per-try timeout in a successful request.\nTEST_F(RouterTest, TimeoutBudgetHistogramStat) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"400\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"200\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Global timeout budget used.\n  EXPECT_CALL(\n      cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_percent_used\"), 20ull));\n  // Per-try budget used.\n  EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_per_try_percent_used\"),\n                  40ull));\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  test_time_.advanceTimeWait(std::chrono::milliseconds(80));\n  response_decoder->decodeData(data, true);\n}\n\n// Verify the timeout budget histograms are filled out correctly when using a\n// global and per-try timeout in a failed request.\nTEST_F(RouterTest, TimeoutBudgetHistogramStatFailure) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"400\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"200\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Global timeout budget used.\n  EXPECT_CALL(\n      cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_percent_used\"), 20ull));\n  // Per-try budget used.\n  EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_per_try_percent_used\"),\n                  40ull));\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"500\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  test_time_.advanceTimeWait(std::chrono::milliseconds(80));\n  response_decoder->decodeData(data, true);\n}\n\n// Verify the timeout budget histograms are filled out correctly when only using a global timeout.\nTEST_F(RouterTest, TimeoutBudgetHistogramStatOnlyGlobal) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"200\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Global timeout budget used.\n  EXPECT_CALL(\n      cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_percent_used\"), 40ull));\n  // Per-try budget used is zero out of an infinite timeout.\n  EXPECT_CALL(\n      cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_per_try_percent_used\"), 0ull));\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  test_time_.advanceTimeWait(std::chrono::milliseconds(80));\n  response_decoder->decodeData(data, true);\n}\n\n// Verify the timeout budget histograms are filled out correctly across retries.\nTEST_F(RouterTest, TimeoutBudgetHistogramStatDuringRetries) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"},\n                                         {\"x-envoy-upstream-rq-timeout-ms\", \"400\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"100\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Per-try budget used on the first request.\n  EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_per_try_percent_used\"),\n                  100ull));\n  // Global timeout histogram does not fire on the first request.\n  EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_percent_used\"), _))\n      .Times(0);\n\n  // Per-try timeout.\n  test_time_.advanceTimeWait(std::chrono::milliseconds(100));\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"504\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504));\n  response_decoder1->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Per-try budget exhausted on the second try.\n  EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_per_try_percent_used\"),\n                  100ull));\n  // Global timeout percentage used across both tries.\n  EXPECT_CALL(\n      cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_percent_used\"), 50ull));\n\n  // Trigger second request failure.\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n  EXPECT_CALL(encoder2.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  test_time_.advanceTimeWait(std::chrono::milliseconds(100));\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(1);\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  per_try_timeout_->invokeCallback();\n\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_rq_per_try_timeout\")\n                    .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 2));\n}\n\n// Verify the timeout budget histograms are filled out correctly when the global timeout occurs\n// during a retry.\nTEST_F(RouterTest, TimeoutBudgetHistogramStatDuringGlobalTimeout) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"},\n                                         {\"x-envoy-upstream-rq-timeout-ms\", \"400\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"320\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Per-try budget used on the first request.\n  EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_per_try_percent_used\"),\n                  50ull));\n  // Global timeout histogram does not fire on the first request.\n  EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_percent_used\"), _))\n      .Times(0);\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  test_time_.advanceTimeWait(std::chrono::milliseconds(160));\n  response_decoder1->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Global timeout was hit, fires 100.\n  EXPECT_CALL(\n      cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_percent_used\"), 100ull));\n  // Per-try budget used on the second request won't fire because the global timeout was hit.\n  EXPECT_CALL(\n      cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"upstream_rq_timeout_budget_per_try_percent_used\"), _))\n      .Times(0);\n\n  // Trigger global timeout.\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n  EXPECT_CALL(encoder2.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  test_time_.advanceTimeWait(std::chrono::milliseconds(240));\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(0);\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  response_timeout_->invokeCallback();\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_timeout\")\n                .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 2));\n}\n\n// Validate gRPC OK response stats are sane when response is trailers only.\nTEST_F(RouterTest, GrpcOkTrailersOnly) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"0\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\n// Validate gRPC AlreadyExists response stats are sane when response is trailers only.\nTEST_F(RouterTest, GrpcAlreadyExistsTrailersOnly) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"6\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(409));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\n// Validate gRPC Unavailable response stats are sane when response is trailers only.\nTEST_F(RouterTest, GrpcOutlierDetectionUnavailableStatusCode) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"14\"}});\n  // Outlier detector will use the gRPC response status code.\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Validate gRPC Internal response stats are sane when response is trailers only.\nTEST_F(RouterTest, GrpcInternalTrailersOnly) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"13\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(500));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Validate gRPC response stats are sane when response is ended in a DATA\n// frame.\nTEST_F(RouterTest, GrpcDataEndStream) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  Buffer::OwnedImpl data;\n  response_decoder->decodeData(data, true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Validate gRPC response stats are sane when response is reset after initial\n// response HEADERS.\nTEST_F(RouterTest, GrpcReset) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n  EXPECT_EQ(1UL, stats_store_.counter(\"test.rq_reset_after_downstream_response_started\").value());\n}\n\n// Validate gRPC OK response stats are sane when response is not trailers only.\nTEST_F(RouterTest, GrpcOk) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(2);\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n\n  EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(2);\n  Http::ResponseTrailerMapPtr response_trailers(\n      new Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"0\"}});\n  response_decoder->decodeTrailers(std::move(response_trailers));\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\n// Validate gRPC Internal response stats are sane when response is not trailers only.\nTEST_F(RouterTest, GrpcInternal) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  Http::ResponseTrailerMapPtr response_trailers(\n      new Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"13\"}});\n  response_decoder->decodeTrailers(std::move(response_trailers));\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\nTEST_F(RouterTest, UpstreamTimeoutWithAltResponse) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-alt-response\", \"204\"},\n                                         {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n  EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"204\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(0);\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(204)));\n  response_timeout_->invokeCallback();\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_timeout\")\n                .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Verifies that the per try timeout is initialized once the downstream request has been read.\nTEST_F(RouterTest, UpstreamPerTryTimeout) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-internal\", \"true\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  // We verify that both timeouts are started after decodeData(_, true) is called. This\n  // verifies that we are not starting the initial per try timeout on the first onPoolReady.\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n  EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  per_try_timeout_->invokeCallback();\n\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_rq_per_try_timeout\")\n                    .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Verifies that the per try timeout starts when onPoolReady is called when it occurs\n// after the downstream request has been read.\nTEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  Http::ConnectionPool::Callbacks* pool_callbacks;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            pool_callbacks = &callbacks;\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-internal\", \"true\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  // Global timeout starts when decodeData(_, true) is called.\n  expectResponseTimerCreate();\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n\n  // Per try timeout starts when onPoolReady is called.\n  expectPerTryTimerCreate();\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  pool_callbacks->onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n  EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  per_try_timeout_->invokeCallback();\n\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_rq_per_try_timeout\")\n                    .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Ensures that the per try callback is not set until the stream becomes available.\nTEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) {\n  InSequence s;\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  Http::ConnectionPool::Callbacks* pool_callbacks;\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            pool_callbacks = &callbacks;\n            return nullptr;\n          }));\n\n  response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);\n  EXPECT_CALL(*response_timeout_, enableTimer(_, _));\n\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-internal\", \"true\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n\n  per_try_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);\n  EXPECT_CALL(*per_try_timeout_, enableTimer(_, _));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  // The per try timeout timer should not be started yet.\n  pool_callbacks->onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  EXPECT_CALL(*per_try_timeout_, disableTimer());\n  EXPECT_CALL(*response_timeout_, disableTimer());\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  per_try_timeout_->invokeCallback();\n\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_rq_per_try_timeout\")\n                    .value());\n  EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Tests that a retry is sent after the first request hits the per try timeout, but then\n// headers received in response to the first request are still used (and the 2nd request\n// canceled).\nTEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(2);\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  per_try_timeout_->invokeCallback();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // We should not have updated any stats yet because no requests have been\n  // canceled\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n\n  // Now write a 200 back. We expect the 2nd stream to be reset and stats to be\n  // incremented properly.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(encoder2.stream_, resetStream(_));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n        EXPECT_TRUE(end_stream);\n      }));\n  response_decoder1->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n\n  // TODO: Verify hedge stats here once they are implemented.\n}\n\n// Tests that an upstream request is reset even if it can't be retried as long as there is\n// another in-flight request we're waiting on.\n// Sequence:\n// 1) first upstream request per try timeout\n// 2) second upstream request sent\n// 3) second upstream request gets 5xx, retries exhausted, assert it's reset\n// 4) first upstream request gets 2xx\nTEST_F(RouterTest, HedgedPerTryTimeoutResetsOnBadHeaders) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(2);\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  per_try_timeout_->invokeCallback();\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // We should not have updated any stats yet because no requests have been\n  // canceled\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n\n  // Now write a 5xx back on the 2nd request with no retries remaining. The 2nd request\n  // should be reset immediately.\n  Http::ResponseHeaderMapPtr bad_response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"500\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(500));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(encoder2.stream_, resetStream(_));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _))\n      .WillOnce(Return(RetryStatus::NoOverflow));\n  // Not end_stream, otherwise we wouldn't need to reset.\n  response_decoder2->decodeHeaders(std::move(bad_response_headers), false);\n\n  // Now write a 200 back. We expect the 2nd stream to be reset and stats to be\n  // incremented properly.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n        EXPECT_TRUE(end_stream);\n      }));\n  response_decoder1->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n\n  // TODO: Verify hedge stats here once they are implemented.\n}\n\n// Three requests sent: 1) 5xx error, 2) per try timeout, 3) gets good response\n// headers.\nTEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n  expectPerTryTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"500\"}});\n  // Local origin connect success happens for first and third try.\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(2);\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(500));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  router_.retry_state_->expectHeadersRetry();\n  response_decoder1->decodeHeaders(std::move(response_headers1), true);\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Now trigger a per try timeout on the 2nd request, expect a 3rd\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  NiceMock<Http::MockRequestEncoder> encoder3;\n  Http::ResponseDecoder* response_decoder3 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder3 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder3, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  per_try_timeout_->invokeCallback();\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(3U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Now write a 200 back. We expect the 2nd stream to be reset and stats to be\n  // incremented properly.\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(encoder2.stream_, resetStream(_));\n  EXPECT_CALL(encoder3.stream_, resetStream(_)).Times(0);\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n        EXPECT_TRUE(end_stream);\n      }));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  response_decoder3->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n\n  // TODO: Verify hedge stats here once they are implemented.\n}\n\n// First request times out and is retried, and then a response is received.\n// Make sure we don't attempt to retry because we already retried for timeout.\nTEST_F(RouterTest, RetryOnlyOnceForSameUpstreamRequest) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(2);\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  per_try_timeout_->invokeCallback();\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n\n  // Now send a 5xx back and make sure we don't ask whether we should retry it.\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"500\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(500));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).Times(0);\n  EXPECT_CALL(*router_.retry_state_, wouldRetryFromHeaders(_)).WillOnce(Return(true));\n  response_decoder1->decodeHeaders(std::move(response_headers1), true);\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n\n  response_timeout_->invokeCallback();\n}\n\n// Sequence: upstream request hits soft per try timeout and is retried, and\n// then \"bad\" response headers come back before the retry has been scheduled.\n// Ensures that the \"bad\" headers are not sent downstream because there is\n// still an attempt pending.\nTEST_F(RouterTest, BadHeadersDroppedIfPreviousRetryScheduled) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(2);\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  per_try_timeout_->invokeCallback();\n\n  expectPerTryTimerCreate();\n\n  // Now send a 5xx back and make sure we don't ask whether we should retry it\n  // and also that we don't respond downstream with it.\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"500\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(500));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).Times(0);\n  EXPECT_CALL(*router_.retry_state_, wouldRetryFromHeaders(_)).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  response_decoder1->decodeHeaders(std::move(response_headers1), true);\n\n  // Now trigger the retry for the per try timeout earlier.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n        EXPECT_TRUE(end_stream);\n      }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder2->decodeHeaders(std::move(response_headers2), true);\n}\n\n// Test retrying a request, when the first attempt fails before the client\n// has sent any of the body.\nTEST_F(RouterTest, RetryRequestBeforeBody) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}, {\"myheader\", \"present\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  router_.retry_state_->expectResetRetry();\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef(\"myheader\", \"present\"), false));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Complete request. Ensure original headers are present.\n  const std::string body(\"body\");\n  EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body), true));\n  Buffer::OwnedImpl buf(body);\n  router_.decodeData(buf, true);\n\n  // Send successful response, verify success.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Test retrying a request, when the first attempt fails while the client\n// is sending the body.\nTEST_F(RouterTest, RetryRequestDuringBody) {\n  Buffer::OwnedImpl decoding_buffer;\n  EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}, {\"myheader\", \"present\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  const std::string body1(\"body1\");\n  Buffer::OwnedImpl buf1(body1);\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  router_.decodeData(buf1, false);\n\n  router_.retry_state_->expectResetRetry();\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef(\"myheader\", \"present\"), false));\n  EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1), false));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Complete request. Ensure original headers are present.\n  const std::string body2(\"body2\");\n  EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body2), true));\n  Buffer::OwnedImpl buf2(body2);\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  router_.decodeData(buf2, true);\n\n  // Send successful response, verify success.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Test retrying a request, when the first attempt fails while the client\n// is sending the body, with more data arriving in between upstream attempts\n// (which would normally happen during the backoff timer interval), but not end_stream.\nTEST_F(RouterTest, RetryRequestDuringBodyDataBetweenAttemptsNotEndStream) {\n  Buffer::OwnedImpl decoding_buffer;\n  EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}, {\"myheader\", \"present\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  const std::string body1(\"body1\");\n  Buffer::OwnedImpl buf1(body1);\n  EXPECT_CALL(*router_.retry_state_, enabled()).Times(3).WillRepeatedly(Return(true));\n  router_.decodeData(buf1, false);\n\n  router_.retry_state_->expectResetRetry();\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  const std::string body2(\"body2\");\n  Buffer::OwnedImpl buf2(body2);\n  router_.decodeData(buf2, false);\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef(\"myheader\", \"present\"), false));\n  EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1 + body2), false));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Complete request. Ensure original headers are present.\n  const std::string body3(\"body3\");\n  EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body3), true));\n  Buffer::OwnedImpl buf3(body3);\n  router_.decodeData(buf3, true);\n\n  // Send successful response, verify success.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Test retrying a request, when the first attempt fails while the client\n// is sending the body, with the rest of the request arriving in between upstream\n// request attempts.\nTEST_F(RouterTest, RetryRequestDuringBodyCompleteBetweenAttempts) {\n  Buffer::OwnedImpl decoding_buffer;\n  EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}, {\"myheader\", \"present\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  const std::string body1(\"body1\");\n  Buffer::OwnedImpl buf1(body1);\n  EXPECT_CALL(*router_.retry_state_, enabled()).Times(2).WillRepeatedly(Return(true));\n  router_.decodeData(buf1, false);\n\n  router_.retry_state_->expectResetRetry();\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  // Complete request while there is no upstream request.\n  const std::string body2(\"body2\");\n  Buffer::OwnedImpl buf2(body2);\n  router_.decodeData(buf2, true);\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef(\"myheader\", \"present\"), false));\n  EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1 + body2), true));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Send successful response, verify success.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Test retrying a request, when the first attempt fails while the client\n// is sending the body, with the trailers arriving in between upstream\n// request attempts.\nTEST_F(RouterTest, RetryRequestDuringBodyTrailerBetweenAttempts) {\n  Buffer::OwnedImpl decoding_buffer;\n  EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}, {\"myheader\", \"present\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  const std::string body1(\"body1\");\n  Buffer::OwnedImpl buf1(body1);\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  router_.decodeData(buf1, false);\n\n  router_.retry_state_->expectResetRetry();\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  // Complete request while there is no upstream request.\n  Http::TestRequestTrailerMapImpl trailers{{\"some\", \"trailer\"}};\n  router_.decodeTrailers(trailers);\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef(\"myheader\", \"present\"), false));\n  EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1), false));\n  EXPECT_CALL(encoder2, encodeTrailers(HeaderMapEqualRef(&trailers)));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Send successful response, verify success.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n      }));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Test retrying a request, when the first attempt fails while the client\n// is sending the body, with the rest of the request arriving in between upstream\n// request attempts, but exceeding the buffer limit causing a downstream request abort.\nTEST_F(RouterTest, RetryRequestDuringBodyBufferLimitExceeded) {\n  Buffer::OwnedImpl decoding_buffer;\n  EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));\n  EXPECT_CALL(callbacks_.route_->route_entry_, retryShadowBufferLimit()).WillOnce(Return(10));\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}, {\"myheader\", \"present\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  const std::string body1(\"body1\");\n  Buffer::OwnedImpl buf1(body1);\n  EXPECT_CALL(*router_.retry_state_, enabled()).Times(2).WillRepeatedly(Return(true));\n  router_.decodeData(buf1, false);\n\n  router_.retry_state_->expectResetRetry();\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  // Complete request while there is no upstream request.\n  const std::string body2(50, 'a');\n  Buffer::OwnedImpl buf2(body2);\n  router_.decodeData(buf2, false);\n\n  EXPECT_EQ(callbacks_.details(), \"request_payload_exceeded_retry_buffer_limit\");\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"retry_or_shadow_abandoned\")\n                    .value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\n// Two requests are sent (slow request + hedged retry) and then global timeout\n// is hit. Verify everything gets cleaned up.\nTEST_F(RouterTest, HedgedPerTryTimeoutGlobalTimeout) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(2);\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  per_try_timeout_->invokeCallback();\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n\n  // Now trigger global timeout, expect everything to be reset\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(1);\n  EXPECT_CALL(encoder2.stream_, resetStream(_)).Times(1);\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"504\");\n      }));\n  response_timeout_->invokeCallback();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 2));\n  EXPECT_EQ(2, cm_.conn_pool_.host_->stats_.rq_timeout_.value());\n  // TODO: Verify hedge stats here once they are implemented.\n}\n\n// Sequence: 1) per try timeout w/ hedge retry, 2) second request gets a 5xx\n// response, no retries remaining 3) first request gets a 5xx response.\nTEST_F(RouterTest, HedgingRetriesExhaustedBadResponse) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(1);\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  per_try_timeout_->invokeCallback();\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(1);\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n\n  // Now trigger a 503 in response to the second request.\n  Http::ResponseHeaderMapPtr bad_response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _))\n      .WillOnce(Return(RetryStatus::NoRetryLimitExceeded));\n  response_decoder2->decodeHeaders(std::move(bad_response_headers1), true);\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Now trigger a 502 in response to the first request.\n  Http::ResponseHeaderMapPtr bad_response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"502\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(502));\n\n  // We should not call shouldRetryHeaders() because you never retry the same\n  // request twice.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).Times(0);\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"502\");\n      }));\n  response_decoder1->decodeHeaders(std::move(bad_response_headers2), true);\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 2));\n}\n\n// Sequence: 1) per try timeout w/ hedge retry, 2) first request gets reset by upstream,\n// 3) 2nd request gets a 200 which should be sent downstream.\nTEST_F(RouterTest, HedgingRetriesProceedAfterReset) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder1 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder1 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  // First is reset\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _))\n      .Times(1);\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(2);\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  per_try_timeout_->invokeCallback();\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Http::ResponseDecoder* response_decoder2 = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder2 = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n\n  // Now trigger an upstream reset in response to the first request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_));\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We should not call shouldRetryReset() because you never retry the same\n  // request twice.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(0);\n\n  // Now trigger a 200 in response to the second request.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n      }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder2->decodeHeaders(std::move(response_headers), true);\n\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Sequence: 1) request with data hits per try timeout w/ hedge retry, 2)\n// second request is immediately reset 3) 1st request gets a 200.\n// The goal of this test is to ensure that the router can properly detect that an immediate\n// reset happens and that we don't accidentally write data twice on the first request.\nTEST_F(RouterTest, HedgingRetryImmediatelyReset) {\n  enableHedgeOnPerTryTimeout();\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                        absl::optional<uint64_t>(absl::nullopt)))\n      .Times(1);\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n  Buffer::OwnedImpl body(\"test body\");\n  EXPECT_CALL(encoder, encodeData(_, _)).Times(1);\n  Buffer::InstancePtr body_data(new Buffer::OwnedImpl(\"hello\"));\n  router_.retry_state_->expectHedgedPerTryTimeoutRetry();\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, true));\n\n  EXPECT_CALL(\n      cm_.conn_pool_.host_->outlier_detector_,\n      putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));\n  EXPECT_CALL(encoder.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  per_try_timeout_->invokeCallback();\n\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)\n                           -> Http::ConnectionPool::Cancellable* {\n        EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n        EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n                    putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n        callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,\n                                absl::string_view(), cm_.conn_pool_.host_);\n        return nullptr;\n      }));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _))\n      .WillOnce(Return(RetryStatus::NoRetryLimitExceeded));\n  ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));\n  router_.retry_state_->callback_();\n\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Now trigger a 200 in response to the first request.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n\n  // The request was already retried when the per try timeout occurred so it\n  // should't even consult the retry state.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).Times(0);\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.Status()->value(), \"200\");\n      }));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n  // Pool failure for the first try, so only 1 upstream request was made.\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, RetryNoneHealthy) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  expectResponseTimerCreate();\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  router_.retry_state_->expectResetRetry();\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  encoder1.stream_.resetStream(Http::StreamResetReason::LocalReset);\n\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)).WillOnce(Return(nullptr));\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"19\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream));\n  router_.retry_state_->callback_();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n  // Pool failure for the first try, so only 1 upstream request was made.\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, RetryUpstreamReset) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, addDecodedData(_, _)).Times(1);\n  Buffer::OwnedImpl body(\"test body\");\n  router_.decodeData(body, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  router_.retry_state_->expectResetRetry();\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  // We expect this reset to kick off a new request.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n                        putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                                  absl::optional<uint64_t>(absl::nullopt)));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\nTEST_F(RouterTest, NoRetryWithBodyLimit) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  // Set a per route body limit which disallows any buffering.\n  EXPECT_CALL(callbacks_.route_->route_entry_, retryShadowBufferLimit()).WillOnce(Return(0));\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  // Unlike RetryUpstreamReset above the data won't be buffered as the body exceeds the buffer limit\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, addDecodedData(_, _)).Times(0);\n  Buffer::OwnedImpl body(\"t\");\n  router_.decodeData(body, false);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n}\n\n// Verifies that when the request fails with an upstream reset (per try timeout in this case)\n// before an upstream host has been established, then the onHostAttempted function will not be\n// invoked. This ensures that we're not passing a null host to the retry plugins.\nTEST_F(RouterTest, RetryUpstreamPerTryTimeout) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"},\n                                         {\"x-envoy-internal\", \"true\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  router_.retry_state_->expectResetRetry();\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  per_try_timeout_->invokeCallback();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect this reset to kick off a new request.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n                        putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,\n                                  absl::optional<uint64_t>(absl::nullopt)));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Asserts that onHostAttempted is *not* called when the upstream connection fails in such\n// a way that no host is present.\nTEST_F(RouterTest, RetryUpstreamConnectionFailure) {\n  Http::ConnectionPool::Callbacks* conn_pool_callbacks;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)\n                           -> Http::ConnectionPool::Cancellable* {\n        conn_pool_callbacks = &callbacks;\n        return nullptr;\n      }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)).Times(0);\n\n  router_.retry_state_->expectResetRetry();\n\n  conn_pool_callbacks->onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,\n                                     absl::string_view(), nullptr);\n  // Pool failure, so no upstream request was made.\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseDecoder* response_decoder = nullptr;\n  // We expect this reset to kick off a new request.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\nTEST_F(RouterTest, DontResetStartedResponseOnUpstreamPerTryTimeout) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectPerTryTimerCreate();\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-internal\", \"true\"},\n                                         {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Since the response is already started we don't retry.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  Buffer::OwnedImpl body(\"test body\");\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  per_try_timeout_->invokeCallback();\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  response_decoder->decodeData(body, true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n  EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_rq_per_try_timeout\")\n                    .value());\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, RetryUpstreamResetResponseStarted) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Since the response is already started we don't retry.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  // Normally, sendLocalReply will actually send the reply, but in this case the\n  // HCM will detect the headers have already been sent and not route through\n  // the encoder again.\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _, _, _, _)).WillOnce(testing::InvokeWithoutArgs([] {\n  }));\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n  // For normal HTTP, once we have a 200 we consider this a success, even if a\n  // later reset occurs.\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\n// The router filter is responsible for not propagating 100-continue headers after the initial 100.\nTEST_F(RouterTest, Coalesce100ContinueHeaders) {\n  // Setup.\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Initial 100-continue, this is processed normally.\n  EXPECT_CALL(callbacks_, encode100ContinueHeaders_(_));\n  {\n    Http::ResponseHeaderMapPtr continue_headers(\n        new Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n    response_decoder->decode100ContinueHeaders(std::move(continue_headers));\n  }\n  EXPECT_EQ(\n      1U,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_100\").value());\n\n  // No encode100ContinueHeaders() invocation for the second 100-continue (but we continue to track\n  // stats from upstream).\n  EXPECT_CALL(callbacks_, encode100ContinueHeaders_(_)).Times(0);\n  {\n    Http::ResponseHeaderMapPtr continue_headers(\n        new Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n    response_decoder->decode100ContinueHeaders(std::move(continue_headers));\n  }\n  EXPECT_EQ(\n      2U,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_100\").value());\n\n  // Reset stream and cleanup.\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, RetryUpstreamReset100ContinueResponseStarted) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // The 100-continue will result in resetting retry_state_, so when the stream\n  // is reset we won't even check shouldRetryReset() (or shouldRetryHeaders()).\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(0);\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).Times(0);\n  EXPECT_CALL(callbacks_, encode100ContinueHeaders_(_));\n  Http::ResponseHeaderMapPtr continue_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n  response_decoder->decode100ContinueHeaders(std::move(continue_headers));\n  EXPECT_EQ(\n      1U,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_100\").value());\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, RetryUpstream5xx) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(cm_.conn_pool_.host_->health_checker_, setUnhealthy()).Times(0);\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\nTEST_F(RouterTest, RetryTimeoutDuringRetryDelay) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // Fire timeout.\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResponseTime(_)).Times(0);\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  response_timeout_->invokeCallback();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n}\n\nTEST_F(RouterTest, MaxStreamDurationValidlyConfiguredWithoutRetryPolicy) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  setUpstreamMaxStreamDuration(500);\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectMaxStreamDurationTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  max_stream_duration_timer_->invokeCallback();\n\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n}\n\nTEST_F(RouterTest, MaxStreamDurationDisabledIfSetToZero) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  setUpstreamMaxStreamDuration(0);\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  // not to be called timer creation.\n  EXPECT_CALL(callbacks_.dispatcher_, createTimer_).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n}\n\nTEST_F(RouterTest, MaxStreamDurationCallbackNotCalled) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  setUpstreamMaxStreamDuration(5000);\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectMaxStreamDurationTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n}\n\nTEST_F(RouterTest, MaxStreamDurationWhenDownstreamAlreadyStartedWithoutRetryPolicy) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  setUpstreamMaxStreamDuration(500);\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectMaxStreamDurationTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  max_stream_duration_timer_->invokeCallback();\n\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\nTEST_F(RouterTest, MaxStreamDurationWithRetryPolicy) {\n  // First upstream request\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  setUpstreamMaxStreamDuration(500);\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectMaxStreamDurationTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"reset\"},\n                                         {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  router_.retry_state_->expectResetRetry();\n  max_stream_duration_timer_->invokeCallback();\n\n  // Second upstream request\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  setUpstreamMaxStreamDuration(500);\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectMaxStreamDurationTimerCreate();\n  router_.retry_state_->callback_();\n\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\nTEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  Envoy::ConnectionPool::MockCancellable cancellable;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::ResponseDecoder& decoder,\n                           Http::ConnectionPool::Callbacks&) -> Http::ConnectionPool::Cancellable* {\n        response_decoder = &decoder;\n        return &cancellable;\n      }));\n  router_.retry_state_->callback_();\n\n  // Fire timeout.\n  EXPECT_CALL(cancellable, cancel(_));\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResponseTime(_)).Times(0);\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  response_timeout_->invokeCallback();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n  // Timeout fired so no retry was done.\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\n// Retry timeout during a retry delay leading to no upstream host, as well as an alt response code.\nTEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltResponseCode) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"},\n                                         {\"x-envoy-internal\", \"true\"},\n                                         {\"x-envoy-upstream-rq-timeout-alt-response\", \"204\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  Envoy::ConnectionPool::MockCancellable cancellable;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::ResponseDecoder& decoder,\n                           Http::ConnectionPool::Callbacks&) -> Http::ConnectionPool::Cancellable* {\n        response_decoder = &decoder;\n        return &cancellable;\n      }));\n  router_.retry_state_->callback_();\n\n  // Fire timeout.\n  EXPECT_CALL(cancellable, cancel(_));\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));\n\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResponseTime(_)).Times(0);\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"204\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  response_timeout_->invokeCallback();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n  // no retry was done.\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, RetryUpstream5xxNotComplete) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  Buffer::InstancePtr body_data(new Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));\n\n  Http::TestRequestTrailerMapImpl trailers{{\"some\", \"trailer\"}};\n  router_.decodeTrailers(trailers);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(encoder1.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), false);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));\n  EXPECT_CALL(encoder2, encodeHeaders(_, false));\n  EXPECT_CALL(encoder2, encodeData(_, false));\n  EXPECT_CALL(encoder2, encodeTrailers(_));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResponseTime(_));\n  EXPECT_CALL(cm_.conn_pool_.host_->health_checker_, setUnhealthy());\n  Http::ResponseHeaderMapPtr response_headers2(new Http::TestResponseHeaderMapImpl{\n      {\":status\", \"200\"}, {\"x-envoy-immediate-health-check-fail\", \"true\"}});\n  response_decoder->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"retry.upstream_rq_503\")\n                .value());\n  EXPECT_EQ(\n      1U,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"upstream_rq_200\").value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"zone.zone_name.to_az.upstream_rq_200\")\n                    .value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"zone.zone_name.to_az.upstream_rq_2xx\")\n                    .value());\n}\n\n// Validate gRPC Cancelled response stats are sane when retry is taking effect.\nTEST_F(RouterTest, RetryUpstreamGrpcCancelled) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-grpc-on\", \"cancelled\"},\n                                         {\"x-envoy-internal\", \"true\"},\n                                         {\"content-type\", \"application/grpc\"},\n                                         {\"grpc-timeout\", \"20S\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // gRPC with status \"cancelled\" (1)\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"1\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(499));\n  response_decoder->decodeHeaders(std::move(response_headers1), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the grpc-status to result in a retried request.\n  EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"0\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Verifies that the initial host is select with max host count of one, but during retries\n// RetryPolicy will be consulted.\nTEST_F(RouterTest, RetryRespsectsMaxHostSelectionCount) {\n  router_.reject_all_hosts_ = true;\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  ON_CALL(*router_.retry_state_, hostSelectionMaxAttempts()).WillByDefault(Return(3));\n  // The router should accept any host at this point, since we're not in a retry.\n  EXPECT_EQ(1, router_.hostSelectionRetryCount());\n\n  Buffer::InstancePtr body_data(new Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));\n\n  Http::TestRequestTrailerMapImpl trailers{{\"some\", \"trailer\"}};\n  router_.decodeTrailers(trailers);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(encoder1.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), false);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));\n  EXPECT_CALL(encoder2, encodeHeaders(_, false));\n  EXPECT_CALL(encoder2, encodeData(_, false));\n  EXPECT_CALL(encoder2, encodeTrailers(_));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Now that we're triggered a retry, we should see the configured number of host selections.\n  EXPECT_EQ(3, router_.hostSelectionRetryCount());\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(cm_.conn_pool_.host_->health_checker_, setUnhealthy()).Times(0);\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\n// Verifies that the initial request accepts any host, but during retries\n// RetryPolicy will be consulted.\nTEST_F(RouterTest, RetryRespectsRetryHostPredicate) {\n  router_.reject_all_hosts_ = true;\n\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  NiceMock<Upstream::MockHost> host;\n  // The router should accept any host at this point, since we're not in a retry.\n  EXPECT_FALSE(router_.shouldSelectAnotherHost(host));\n\n  Buffer::InstancePtr body_data(new Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));\n\n  Http::TestRequestTrailerMapImpl trailers{{\"some\", \"trailer\"}};\n  router_.decodeTrailers(trailers);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // 5xx response.\n  router_.retry_state_->expectHeadersRetry();\n  Http::ResponseHeaderMapPtr response_headers1(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  EXPECT_CALL(encoder1.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503));\n  response_decoder->decodeHeaders(std::move(response_headers1), false);\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 1));\n\n  // We expect the 5xx response to kick off a new request.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));\n  EXPECT_CALL(encoder2, encodeHeaders(_, false));\n  EXPECT_CALL(encoder2, encodeData(_, false));\n  EXPECT_CALL(encoder2, encodeTrailers(_));\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Now that we're triggered a retry, we should see the router reject hosts.\n  EXPECT_TRUE(router_.shouldSelectAnotherHost(host));\n\n  // Normal response.\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  EXPECT_CALL(cm_.conn_pool_.host_->health_checker_, setUnhealthy()).Times(0);\n  Http::ResponseHeaderMapPtr response_headers2(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  response_decoder->decodeHeaders(std::move(response_headers2), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 1));\n}\n\nTEST_F(RouterTest, InternalRedirectRejectedWhenReachingMaxInternalRedirect) {\n  enableRedirects(3);\n  setNumPreviousRedirect(3);\n  sendRequest();\n\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n\n  Buffer::OwnedImpl data(\"1234567890\");\n  response_decoder_->decodeData(data, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n  EXPECT_EQ(1UL,\n            stats_store_.counter(\"test.passthrough_internal_redirect_too_many_redirects\").value());\n}\n\nTEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) {\n  enableRedirects();\n  sendRequest();\n\n  redirect_headers_->setLocation(\"\");\n\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n\n  Buffer::OwnedImpl data(\"1234567890\");\n  response_decoder_->decodeData(data, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n  EXPECT_EQ(1UL, stats_store_.counter(\"test.passthrough_internal_redirect_bad_location\").value());\n}\n\nTEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) {\n  enableRedirects();\n  sendRequest();\n\n  redirect_headers_->setLocation(\"h\");\n\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n\n  Buffer::OwnedImpl data(\"1234567890\");\n  response_decoder_->decodeData(data, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n  EXPECT_EQ(1UL, stats_store_.counter(\"test.passthrough_internal_redirect_bad_location\").value());\n}\n\nTEST_F(RouterTest, InternalRedirectRejectedWithoutCompleteRequest) {\n  enableRedirects();\n\n  sendRequest(false);\n\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n\n  Buffer::OwnedImpl data(\"1234567890\");\n  response_decoder_->decodeData(data, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n}\n\nTEST_F(RouterTest, InternalRedirectRejectedWithoutLocation) {\n  enableRedirects();\n\n  sendRequest();\n\n  redirect_headers_->removeLocation();\n\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n  Buffer::OwnedImpl data(\"1234567890\");\n  response_decoder_->decodeData(data, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n}\n\nTEST_F(RouterTest, InternalRedirectRejectedWithBody) {\n  enableRedirects();\n\n  sendRequest();\n\n  Buffer::InstancePtr body_data(new Buffer::OwnedImpl(\"random_fake_data\"));\n  EXPECT_CALL(callbacks_, decodingBuffer()).WillOnce(Return(body_data.get()));\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n  Buffer::OwnedImpl data(\"1234567890\");\n  response_decoder_->decodeData(data, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n}\n\nTEST_F(RouterTest, CrossSchemeRedirectRejectedByPolicy) {\n  enableRedirects();\n\n  sendRequest();\n\n  redirect_headers_->setLocation(\"https://www.foo.com\");\n\n  EXPECT_CALL(callbacks_, decodingBuffer()).Times(1);\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n  EXPECT_EQ(1UL, stats_store_.counter(\"test.passthrough_internal_redirect_unsafe_scheme\").value());\n}\n\nTEST_F(RouterTest, InternalRedirectRejectedByPredicate) {\n  enableRedirects();\n\n  sendRequest();\n\n  redirect_headers_->setLocation(\"http://www.foo.com/some/path\");\n\n  auto mock_predicate = std::make_shared<NiceMock<MockInternalRedirectPredicate>>();\n\n  EXPECT_CALL(callbacks_, decodingBuffer()).Times(1);\n  EXPECT_CALL(callbacks_, clearRouteCache()).Times(1);\n  EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, predicates())\n      .WillOnce(Return(std::vector<InternalRedirectPredicateSharedPtr>({mock_predicate})));\n  EXPECT_CALL(*mock_predicate, acceptTargetRoute(_, _, _, _)).WillOnce(Return(false));\n  ON_CALL(*mock_predicate, name()).WillByDefault(Return(\"mock_predicate\"));\n  EXPECT_CALL(callbacks_, recreateStream()).Times(0);\n\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_failed_total\")\n                    .value());\n  EXPECT_EQ(1UL, stats_store_.counter(\"test.passthrough_internal_redirect_predicate\").value());\n\n  // Make sure the original host/path is preserved.\n  EXPECT_EQ(\"host\", default_request_headers_.getHostValue());\n  EXPECT_EQ(\"/\", default_request_headers_.getPathValue());\n  // Make sure x-envoy-original-url is not set for unsuccessful redirect.\n  EXPECT_EQ(nullptr, default_request_headers_.EnvoyOriginalUrl());\n}\n\nTEST_F(RouterTest, HttpInternalRedirectSucceeded) {\n  enableRedirects(3);\n  setNumPreviousRedirect(2);\n  default_request_headers_.setForwardedProto(\"http\");\n  sendRequest();\n\n  EXPECT_CALL(callbacks_, decodingBuffer()).Times(1);\n  EXPECT_CALL(callbacks_, clearRouteCache()).Times(1);\n  EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true));\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_succeeded_total\")\n                    .value());\n\n  // In production, the HCM recreateStream would have called this.\n  router_.onDestroy();\n  EXPECT_EQ(3, callbacks_.streamInfo()\n                   .filterState()\n                   ->getDataMutable<StreamInfo::UInt32Accessor>(\"num_internal_redirects\")\n                   .value());\n}\n\nTEST_F(RouterTest, HttpsInternalRedirectSucceeded) {\n  auto ssl_connection = std::make_shared<Ssl::MockConnectionInfo>();\n  enableRedirects(3);\n  setNumPreviousRedirect(1);\n\n  sendRequest();\n\n  redirect_headers_->setLocation(\"https://www.foo.com\");\n  EXPECT_CALL(connection_, ssl()).Times(1).WillOnce(Return(ssl_connection));\n  EXPECT_CALL(callbacks_, decodingBuffer()).Times(1);\n  EXPECT_CALL(callbacks_, clearRouteCache()).Times(1);\n  EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true));\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_succeeded_total\")\n                    .value());\n\n  // In production, the HCM recreateStream would have called this.\n  router_.onDestroy();\n}\n\nTEST_F(RouterTest, CrossSchemeRedirectAllowedByPolicy) {\n  auto ssl_connection = std::make_shared<Ssl::MockConnectionInfo>();\n  enableRedirects();\n\n  sendRequest();\n\n  redirect_headers_->setLocation(\"http://www.foo.com\");\n  EXPECT_CALL(connection_, ssl()).Times(1).WillOnce(Return(ssl_connection));\n  EXPECT_CALL(callbacks_, decodingBuffer()).Times(1);\n  EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_,\n              isCrossSchemeRedirectAllowed())\n      .WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, clearRouteCache()).Times(1);\n  EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true));\n  response_decoder_->decodeHeaders(std::move(redirect_headers_), false);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_internal_redirect_succeeded_total\")\n                    .value());\n\n  // In production, the HCM recreateStream would have called this.\n  router_.onDestroy();\n}\n\nTEST_F(RouterTest, Shadow) {\n  ShadowPolicyPtr policy = std::make_unique<TestShadowPolicy>(\"foo\", \"bar\");\n  callbacks_.route_->route_entry_.shadow_policies_.push_back(std::move(policy));\n  policy = std::make_unique<TestShadowPolicy>(\"fizz\", \"buzz\", envoy::type::v3::FractionalPercent(),\n                                              false);\n  callbacks_.route_->route_entry_.shadow_policies_.push_back(std::move(policy));\n  ON_CALL(callbacks_, streamId()).WillByDefault(Return(43));\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"bar\", 0, 43, 10000)).WillOnce(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"buzz\", 0, 43, 10000)).WillOnce(Return(true));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  Buffer::InstancePtr body_data(new Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(callbacks_, addDecodedData(_, true));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));\n\n  Http::TestRequestTrailerMapImpl trailers{{\"some\", \"trailer\"}};\n  EXPECT_CALL(callbacks_, decodingBuffer())\n      .Times(AtLeast(2))\n      .WillRepeatedly(Return(body_data.get()));\n  EXPECT_CALL(*shadow_writer_, shadow_(\"foo\", _, _))\n      .WillOnce(Invoke([](const std::string&, Http::RequestMessagePtr& request,\n                          const Http::AsyncClient::RequestOptions& options) -> void {\n        EXPECT_NE(request->body().length(), 0);\n        EXPECT_NE(nullptr, request->trailers());\n        EXPECT_EQ(absl::optional<std::chrono::milliseconds>(10), options.timeout);\n        EXPECT_TRUE(options.sampled_);\n      }));\n  EXPECT_CALL(*shadow_writer_, shadow_(\"fizz\", _, _))\n      .WillOnce(Invoke([](const std::string&, Http::RequestMessagePtr& request,\n                          const Http::AsyncClient::RequestOptions& options) -> void {\n        EXPECT_NE(request->body().length(), 0);\n        EXPECT_NE(nullptr, request->trailers());\n        EXPECT_EQ(absl::optional<std::chrono::milliseconds>(10), options.timeout);\n        EXPECT_FALSE(options.sampled_);\n      }));\n  router_.decodeTrailers(trailers);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n}\n\nTEST_F(RouterTest, AltStatName) {\n  // Also test no upstream timeout here.\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n  EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-alt-stat-name\", \"alt_stat\"},\n                                         {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200));\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResponseTime(_));\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                          {\"x-envoy-upstream-canary\", \"true\"},\n                                          {\"x-envoy-virtual-cluster\", \"hello\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n\n  EXPECT_EQ(1U,\n            stats_store_.counter(\"vhost.fake_vhost.vcluster.fake_virtual_cluster.upstream_rq_200\")\n                .value());\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"canary.upstream_rq_200\")\n                .value());\n  EXPECT_EQ(\n      1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"alt_stat.upstream_rq_200\")\n              .value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"alt_stat.zone.zone_name.to_az.upstream_rq_200\")\n                    .value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"alt_stat.zone.zone_name.to_az.upstream_rq_200\")\n                    .value());\n}\n\nTEST_F(RouterTest, Redirect) {\n  MockDirectResponseEntry direct_response;\n  std::string route_name(\"route-test-name\");\n  EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return(\"hello\"));\n  EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));\n  EXPECT_CALL(direct_response, rewritePathHeader(_, _));\n  EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::MovedPermanently));\n  EXPECT_CALL(direct_response, responseBody()).WillOnce(ReturnRef(EMPTY_STRING));\n  EXPECT_CALL(direct_response, finalizeResponseHeaders(_, _));\n  EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));\n  absl::string_view route_name_view(route_name);\n  EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"301\"}, {\"location\", \"hello\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n}\n\nTEST_F(RouterTest, RedirectFound) {\n  MockDirectResponseEntry direct_response;\n  std::string route_name(\"route-test-name\");\n  EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return(\"hello\"));\n  EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));\n  EXPECT_CALL(direct_response, rewritePathHeader(_, _));\n  EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::Found));\n  EXPECT_CALL(direct_response, responseBody()).WillOnce(ReturnRef(EMPTY_STRING));\n  EXPECT_CALL(direct_response, finalizeResponseHeaders(_, _));\n  EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));\n  absl::string_view route_name_view(route_name);\n  EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"302\"}, {\"location\", \"hello\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n}\n\nTEST_F(RouterTest, DirectResponse) {\n  NiceMock<MockDirectResponseEntry> direct_response;\n  std::string route_name(\"route-test-name\");\n  EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));\n  EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::OK));\n  EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n  EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));\n  absl::string_view route_name_view(route_name);\n  EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  EXPECT_CALL(span_, injectContext(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());\n}\n\nTEST_F(RouterTest, DirectResponseWithBody) {\n  NiceMock<MockDirectResponseEntry> direct_response;\n  std::string route_name(\"route-test-name\");\n  EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));\n  EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::OK));\n  const std::string response_body(\"static response\");\n  EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(response_body));\n  EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));\n  absl::string_view route_name_view(route_name);\n  EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"content-length\", \"15\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());\n}\n\nTEST_F(RouterTest, DirectResponseWithLocation) {\n  NiceMock<MockDirectResponseEntry> direct_response;\n  std::string route_name(\"route-test-name\");\n  EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return(\"http://host/\"));\n  EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));\n  EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::Created));\n  EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n  EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));\n  absl::string_view route_name_view(route_name);\n  EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"201\"},\n                                                   {\"location\", \"http://host/\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  EXPECT_CALL(span_, injectContext(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());\n}\n\nTEST_F(RouterTest, DirectResponseWithoutLocation) {\n  NiceMock<MockDirectResponseEntry> direct_response;\n  std::string route_name(\"route-test-name\");\n  EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return(\"http://host/\"));\n  EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));\n  EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::OK));\n  EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(EMPTY_STRING));\n  EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));\n  absl::string_view route_name_view(route_name);\n  EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  EXPECT_CALL(span_, injectContext(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());\n}\n\n// Allows verifying the state of the upstream StreamInfo\nclass TestAccessLog : public AccessLog::Instance {\npublic:\n  explicit TestAccessLog(std::function<void(const StreamInfo::StreamInfo&)> func) : func_(func) {}\n\n  void log(const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*,\n           const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& info) override {\n    func_(info);\n  }\n\nprivate:\n  std::function<void(const StreamInfo::StreamInfo&)> func_;\n};\n\n// Verifies that we propagate the upstream connection filter state to the upstream request filter\n// state.\nTEST_F(RouterTest, PropagatesUpstreamFilterState) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n\n  // This pattern helps ensure that we're actually invoking the callback.\n  bool filter_state_verified = false;\n  router_.config().upstream_logs_.push_back(\n      std::make_shared<TestAccessLog>([&](const auto& stream_info) {\n        filter_state_verified = stream_info.upstreamFilterState()->hasDataWithName(\"upstream data\");\n      }));\n\n  upstream_stream_info_.filterState()->setData(\n      \"upstream data\", std::make_unique<StreamInfo::UInt32AccessorImpl>(123),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection);\n  expectResponseTimerCreate();\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n\n  EXPECT_TRUE(filter_state_verified);\n}\n\nTEST_F(RouterTest, UpstreamSSLConnection) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n\n  std::string session_id = \"D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B\";\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id));\n  upstream_stream_info_.setDownstreamSslConnection(connection_info);\n\n  expectResponseTimerCreate();\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n\n  ASSERT_NE(nullptr, callbacks_.streamInfo().upstreamSslConnection());\n  EXPECT_EQ(session_id, callbacks_.streamInfo().upstreamSslConnection()->sessionId());\n}\n\n// Verify that upstream timing information is set into the StreamInfo after the upstream\n// request completes.\nTEST_F(RouterTest, UpstreamTimingSingleRequest) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  StreamInfo::StreamInfoImpl stream_info(test_time_.timeSystem());\n  ON_CALL(callbacks_, streamInfo()).WillByDefault(ReturnRef(stream_info));\n  EXPECT_FALSE(stream_info.firstUpstreamTxByteSent().has_value());\n  EXPECT_FALSE(stream_info.lastUpstreamTxByteSent().has_value());\n  EXPECT_FALSE(stream_info.firstUpstreamRxByteReceived().has_value());\n  EXPECT_FALSE(stream_info.lastUpstreamRxByteReceived().has_value());\n\n  Http::TestRequestHeaderMapImpl headers{};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  test_time_.advanceTimeWait(std::chrono::milliseconds(32));\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n  test_time_.advanceTimeWait(std::chrono::milliseconds(43));\n\n  // Confirm we still have no upstream timing data. It won't be set until after the\n  // stream has ended.\n  EXPECT_FALSE(stream_info.firstUpstreamTxByteSent().has_value());\n  EXPECT_FALSE(stream_info.lastUpstreamTxByteSent().has_value());\n  EXPECT_FALSE(stream_info.firstUpstreamRxByteReceived().has_value());\n  EXPECT_FALSE(stream_info.lastUpstreamRxByteReceived().has_value());\n\n  response_decoder->decodeData(data, true);\n\n  // Now these should be set.\n  EXPECT_TRUE(stream_info.firstUpstreamTxByteSent().has_value());\n  EXPECT_TRUE(stream_info.lastUpstreamTxByteSent().has_value());\n  EXPECT_TRUE(stream_info.firstUpstreamRxByteReceived().has_value());\n  EXPECT_TRUE(stream_info.lastUpstreamRxByteReceived().has_value());\n\n  // Timings should match our sleep() calls.\n  EXPECT_EQ(stream_info.lastUpstreamRxByteReceived().value() -\n                stream_info.firstUpstreamRxByteReceived().value(),\n            std::chrono::milliseconds(43));\n  EXPECT_EQ(stream_info.lastUpstreamTxByteSent().value() -\n                stream_info.firstUpstreamTxByteSent().value(),\n            std::chrono::milliseconds(32));\n}\n\n// Verify that upstream timing information is set into the StreamInfo when a\n// retry occurs (and not before).\nTEST_F(RouterTest, UpstreamTimingRetry) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  StreamInfo::StreamInfoImpl stream_info(test_time_);\n  ON_CALL(callbacks_, streamInfo()).WillByDefault(ReturnRef(stream_info));\n\n  // Check that upstream timing is updated after the first request.\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n\n  router_.retry_state_->expectHeadersRetry();\n\n  test_time_.advanceTimeWait(std::chrono::milliseconds(32));\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  test_time_.advanceTimeWait(std::chrono::milliseconds(43));\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  // Check that upstream timing is not set when a retry will occur.\n  Http::ResponseHeaderMapPtr bad_response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}});\n  response_decoder->decodeHeaders(std::move(bad_response_headers), true);\n  EXPECT_FALSE(stream_info.firstUpstreamTxByteSent().has_value());\n  EXPECT_FALSE(stream_info.lastUpstreamTxByteSent().has_value());\n  EXPECT_FALSE(stream_info.firstUpstreamRxByteReceived().has_value());\n  EXPECT_FALSE(stream_info.lastUpstreamRxByteReceived().has_value());\n\n  router_.retry_state_->callback_();\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n  MonotonicTime retry_time = test_time_.monotonicTime();\n\n  Http::ResponseHeaderMapPtr good_response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(good_response_headers), false);\n\n  test_time_.advanceTimeWait(std::chrono::milliseconds(153));\n\n  response_decoder->decodeData(data, true);\n\n  EXPECT_TRUE(stream_info.firstUpstreamTxByteSent().has_value());\n  EXPECT_TRUE(stream_info.lastUpstreamTxByteSent().has_value());\n  EXPECT_TRUE(stream_info.firstUpstreamRxByteReceived().has_value());\n  EXPECT_TRUE(stream_info.lastUpstreamRxByteReceived().has_value());\n\n  EXPECT_EQ(stream_info.lastUpstreamRxByteReceived().value() -\n                stream_info.firstUpstreamRxByteReceived().value(),\n            std::chrono::milliseconds(153));\n\n  // Time spent in upstream tx is 0 because we're using simulated time and\n  // don't have a good way to insert a \"sleep\" there, but values being present\n  // and equal to the time the retry was sent is good enough of a test.\n  EXPECT_EQ(stream_info.lastUpstreamTxByteSent().value() -\n                stream_info.firstUpstreamTxByteSent().value(),\n            std::chrono::milliseconds(0));\n  EXPECT_EQ(stream_info.lastUpstreamTxByteSent().value() +\n                stream_info.startTimeMonotonic().time_since_epoch(),\n            retry_time.time_since_epoch());\n  EXPECT_EQ(stream_info.firstUpstreamTxByteSent().value() +\n                stream_info.startTimeMonotonic().time_since_epoch(),\n            retry_time.time_since_epoch());\n}\n\n// Verify that upstream timing information is set into the StreamInfo when a\n// global timeout occurs.\nTEST_F(RouterTest, UpstreamTimingTimeout) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  StreamInfo::StreamInfoImpl stream_info(test_time_);\n  ON_CALL(callbacks_, streamInfo()).WillByDefault(ReturnRef(stream_info));\n\n  expectResponseTimerCreate();\n  test_time_.advanceTimeWait(std::chrono::milliseconds(10));\n\n  // Check that upstream timing is updated after the first request.\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"50\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  EXPECT_FALSE(stream_info.lastUpstreamRxByteReceived().has_value());\n\n  test_time_.advanceTimeWait(std::chrono::milliseconds(13));\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  test_time_.advanceTimeWait(std::chrono::milliseconds(33));\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n\n  test_time_.advanceTimeWait(std::chrono::milliseconds(99));\n  response_timeout_->invokeCallback();\n\n  EXPECT_TRUE(stream_info.firstUpstreamTxByteSent().has_value());\n  EXPECT_TRUE(stream_info.lastUpstreamTxByteSent().has_value());\n  EXPECT_TRUE(stream_info.firstUpstreamRxByteReceived().has_value());\n  EXPECT_FALSE(stream_info.lastUpstreamRxByteReceived()\n                   .has_value()); // False because no end_stream was seen.\n  EXPECT_EQ(stream_info.firstUpstreamTxByteSent().value(), std::chrono::milliseconds(10));\n  EXPECT_EQ(stream_info.lastUpstreamTxByteSent().value(), std::chrono::milliseconds(23));\n  EXPECT_EQ(stream_info.firstUpstreamRxByteReceived().value(), std::chrono::milliseconds(56));\n}\n\nTEST(RouterFilterUtilityTest, FinalHedgingParamsHedgeOnPerTryTimeout) {\n  Http::TestRequestHeaderMapImpl empty_headers;\n  { // route says true, header not present, expect true.\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = true;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams =\n        FilterUtility::finalHedgingParams(route, empty_headers);\n    EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n  { // route says false, header not present, expect false.\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = false;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams =\n        FilterUtility::finalHedgingParams(route, empty_headers);\n    EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n  { // route says false, header says true, expect true.\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-hedge-on-per-try-timeout\", \"true\"}};\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = false;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);\n    EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n  { // route says false, header says false, expect false.\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-hedge-on-per-try-timeout\", \"false\"}};\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = false;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);\n    EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n  { // route says true, header says false, expect false.\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-hedge-on-per-try-timeout\", \"false\"}};\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = true;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);\n    EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n  { // route says true, header says true, expect true.\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-hedge-on-per-try-timeout\", \"true\"}};\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = true;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);\n    EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n  { // route says true, header is invalid, expect true.\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-hedge-on-per-try-timeout\", \"bad\"}};\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = true;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);\n    EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n  { // route says false, header is invalid, expect false.\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-hedge-on-per-try-timeout\", \"bad\"}};\n    NiceMock<MockRouteEntry> route;\n    route.hedge_policy_.hedge_on_per_try_timeout_ = false;\n    EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));\n    FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);\n    EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);\n  }\n}\n\nTEST(RouterFilterUtilityTest, FinalTimeout) {\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers;\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(10), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_EQ(\"15\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"bad\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(10), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_EQ(\"10\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"15\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"15\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"5\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, true, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"15\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, true, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"15\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_EQ(\"15m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(7), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"7\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(10);\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(0)));\n    Http::TestRequestHeaderMapImpl headers;\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(10), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"10\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"5\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout()).WillRepeatedly(Return(absl::nullopt));\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(10), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(1000), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_EQ(\"1000m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(999), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_EQ(\"999m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"0m\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(999), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_EQ(\"999m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));\n    EXPECT_CALL(route, grpcTimeoutOffset())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"100m\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(90), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));\n    EXPECT_CALL(route, grpcTimeoutOffset())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1m\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(1), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"},\n                                           {\"x-envoy-upstream-rq-timeout-ms\", \"15\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_EQ(\"15\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_EQ(\"15m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"},\n                                           {\"x-envoy-upstream-rq-timeout-ms\", \"bad\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(1000), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_EQ(\"1000\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_EQ(\"1000m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"},\n                                           {\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"15\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"15\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_EQ(\"15m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"},\n                                           {\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"5\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_EQ(\"5m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"},\n                                           {\"x-envoy-upstream-rq-timeout-ms\", \"15\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(7), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"7\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_EQ(\"7m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, maxGrpcTimeout())\n        .WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));\n    route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);\n    Http::TestRequestHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                           {\"grpc-timeout\", \"1000m\"},\n                                           {\"x-envoy-upstream-rq-timeout-ms\", \"15\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, true, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"5\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_EQ(\"5m\", headers.get_(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-expected-rq-timeout-ms\", \"8\"}};\n    // Make ingress envoy respect `x-envoy-expected-rq-timeout-ms` header.\n    bool respect_expected_rq_timeout = true;\n    FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(\n        route, headers, true, false, false, respect_expected_rq_timeout);\n    EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_EQ(\"8\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-expected-rq-timeout-ms\", \"8\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"4\"}};\n    // Make ingress envoy respect `x-envoy-expected-rq-timeout-ms` header.\n    bool respect_expected_rq_timeout = true;\n    FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(\n        route, headers, true, false, false, respect_expected_rq_timeout);\n    EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(4), timeout.per_try_timeout_);\n    EXPECT_EQ(\"4\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"8\"}};\n    // Test that ingress envoy populates `x-envoy-expected-rq-timeout-ms` header if it has not been\n    // set by egress envoy.\n    bool respect_expected_rq_timeout = true;\n    FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(\n        route, headers, true, false, false, respect_expected_rq_timeout);\n    EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"8\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"8\"}};\n    // Make envoy override `x-envoy-expected-rq-timeout-ms` header.\n    // Test that ingress envoy sets `x-envoy-expected-rq-timeout-ms` header.\n    bool respect_expected_rq_timeout = false;\n    FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(\n        route, headers, true, false, false, respect_expected_rq_timeout);\n    EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-per-try-timeout-ms\"));\n    EXPECT_EQ(\"8\", headers.get_(\"x-envoy-expected-rq-timeout-ms\"));\n    EXPECT_FALSE(headers.has(\"grpc-timeout\"));\n  }\n}\n\nTEST(RouterFilterUtilityTest, FinalTimeoutSupressEnvoyHeaders) {\n  {\n    NiceMock<MockRouteEntry> route;\n    EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-rq-timeout-ms\", \"15\"}};\n    FilterUtility::TimeoutData timeout =\n        FilterUtility::finalTimeout(route, headers, true, false, false, false);\n    EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);\n    EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);\n    EXPECT_FALSE(headers.has(\"x-envoy-upstream-rq-timeout-ms\"));\n  }\n}\n\nTEST(RouterFilterUtilityTest, SetUpstreamScheme) {\n  {\n    Http::TestRequestHeaderMapImpl headers;\n    FilterUtility::setUpstreamScheme(headers, false);\n    EXPECT_EQ(\"http\", headers.get_(\":scheme\"));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers;\n    FilterUtility::setUpstreamScheme(headers, true);\n    EXPECT_EQ(\"https\", headers.get_(\":scheme\"));\n  }\n}\n\nTEST(RouterFilterUtilityTest, ShouldShadow) {\n  {\n    TestShadowPolicy policy;\n    NiceMock<Runtime::MockLoader> runtime;\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(_, _, _, _)).Times(0);\n    EXPECT_FALSE(FilterUtility::shouldShadow(policy, runtime, 5));\n  }\n  {\n    TestShadowPolicy policy(\"cluster\");\n    NiceMock<Runtime::MockLoader> runtime;\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(_, _, _, _)).Times(0);\n    EXPECT_TRUE(FilterUtility::shouldShadow(policy, runtime, 5));\n  }\n  {\n    TestShadowPolicy policy(\"cluster\", \"foo\");\n    NiceMock<Runtime::MockLoader> runtime;\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(\"foo\", 0, 5, 10000)).WillOnce(Return(false));\n    EXPECT_FALSE(FilterUtility::shouldShadow(policy, runtime, 5));\n  }\n  {\n    TestShadowPolicy policy(\"cluster\", \"foo\");\n    NiceMock<Runtime::MockLoader> runtime;\n    EXPECT_CALL(runtime.snapshot_, featureEnabled(\"foo\", 0, 5, 10000)).WillOnce(Return(true));\n    EXPECT_TRUE(FilterUtility::shouldShadow(policy, runtime, 5));\n  }\n  // Use default value instead of runtime key.\n  {\n    envoy::type::v3::FractionalPercent fractional_percent;\n    fractional_percent.set_numerator(5);\n    fractional_percent.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n    TestShadowPolicy policy(\"cluster\", \"foo\", fractional_percent);\n    NiceMock<Runtime::MockLoader> runtime;\n    EXPECT_CALL(runtime.snapshot_,\n                featureEnabled(\"foo\", Matcher<const envoy::type::v3::FractionalPercent&>(_), 3))\n        .WillOnce(Return(true));\n    EXPECT_TRUE(FilterUtility::shouldShadow(policy, runtime, 3));\n  }\n}\n\nTEST_F(RouterTest, CanaryStatusTrue) {\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n  EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-alt-stat-name\", \"alt_stat\"},\n                                         {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                          {\"x-envoy-upstream-canary\", \"false\"},\n                                          {\"x-envoy-virtual-cluster\", \"hello\"}});\n  ON_CALL(*cm_.conn_pool_.host_, canary()).WillByDefault(Return(true));\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"canary.upstream_rq_200\")\n                .value());\n}\n\nTEST_F(RouterTest, CanaryStatusFalse) {\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n  EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-upstream-alt-stat-name\", \"alt_stat\"},\n                                         {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                          {\"x-envoy-upstream-canary\", \"false\"},\n                                          {\"x-envoy-virtual-cluster\", \"hello\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n  EXPECT_TRUE(verifyHostUpstreamStats(1, 0));\n\n  EXPECT_EQ(0U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"canary.upstream_rq_200\")\n                .value());\n}\n\nTEST_F(RouterTest, AutoHostRewriteEnabled) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  std::string req_host{\"foo.bar.com\"};\n\n  Http::TestRequestHeaderMapImpl incoming_headers;\n  HttpTestUtility::addDefaultHeaders(incoming_headers);\n  incoming_headers.setHost(req_host);\n\n  cm_.conn_pool_.host_->hostname_ = \"scooby.doo\";\n  Http::TestRequestHeaderMapImpl outgoing_headers;\n  HttpTestUtility::addDefaultHeaders(outgoing_headers);\n  outgoing_headers.setHost(cm_.conn_pool_.host_->hostname_);\n\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)\n                           -> Http::ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n        return nullptr;\n      }));\n\n  // :authority header in the outgoing request should match the DNS name of\n  // the selected upstream host\n  EXPECT_CALL(encoder, encodeHeaders(HeaderMapEqualRef(&outgoing_headers), true))\n      .WillOnce(Invoke([&](const Http::HeaderMap&, bool) -> void {\n        encoder.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n      }));\n\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n  EXPECT_CALL(callbacks_.route_->route_entry_, autoHostRewrite()).WillOnce(Return(true));\n  router_.decodeHeaders(incoming_headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, AutoHostRewriteDisabled) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  std::string req_host{\"foo.bar.com\"};\n\n  Http::TestRequestHeaderMapImpl incoming_headers;\n  HttpTestUtility::addDefaultHeaders(incoming_headers);\n  incoming_headers.setHost(req_host);\n\n  cm_.conn_pool_.host_->hostname_ = \"scooby.doo\";\n\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)\n                           -> Http::ConnectionPool::Cancellable* {\n        callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n        return nullptr;\n      }));\n\n  // :authority header in the outgoing request should match the :authority header of\n  // the incoming request\n  EXPECT_CALL(encoder, encodeHeaders(HeaderMapEqualRef(&incoming_headers), true))\n      .WillOnce(Invoke([&](const Http::HeaderMap&, bool) -> void {\n        encoder.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n      }));\n\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n  EXPECT_CALL(callbacks_.route_->route_entry_, autoHostRewrite()).WillOnce(Return(false));\n  router_.decodeHeaders(incoming_headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\nTEST_F(RouterTest, UpstreamSocketOptionsReturnedEmpty) {\n  EXPECT_CALL(callbacks_, getUpstreamSocketOptions())\n      .WillOnce(Return(Network::Socket::OptionsSharedPtr()));\n\n  auto options = router_.upstreamSocketOptions();\n\n  EXPECT_EQ(options.get(), nullptr);\n}\n\nTEST_F(RouterTest, UpstreamSocketOptionsReturnedNonEmpty) {\n  Network::Socket::OptionsSharedPtr to_return =\n      Network::SocketOptionFactory::buildIpTransparentOptions();\n  EXPECT_CALL(callbacks_, getUpstreamSocketOptions()).WillOnce(Return(to_return));\n\n  auto options = router_.upstreamSocketOptions();\n\n  EXPECT_EQ(to_return, options);\n}\n\nTEST_F(RouterTest, ApplicationProtocols) {\n  callbacks_.streamInfo().filterState()->setData(\n      Network::ApplicationProtocols::key(),\n      std::make_unique<Network::ApplicationProtocols>(std::vector<std::string>{\"foo\", \"bar\"}),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);\n\n  EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _))\n      .WillOnce(\n          Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional<Http::Protocol>,\n                     Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* {\n            Network::TransportSocketOptionsSharedPtr transport_socket_options =\n                context->upstreamTransportSocketOptions();\n            EXPECT_NE(transport_socket_options, nullptr);\n            EXPECT_FALSE(transport_socket_options->applicationProtocolListOverride().empty());\n            EXPECT_EQ(transport_socket_options->applicationProtocolListOverride().size(), 2);\n            EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[0], \"foo\");\n            EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[1], \"bar\");\n            return &cm_.conn_pool_;\n          }));\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_));\n\n  expectResponseTimerCreate();\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(span_, injectContext(_));\n  router_.decodeHeaders(headers, true);\n\n  // When the router filter gets reset we should cancel the pool request.\n  EXPECT_CALL(cancellable_, cancel(_));\n  router_.onDestroy();\n  EXPECT_TRUE(verifyHostUpstreamStats(0, 0));\n  EXPECT_EQ(0U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n}\n\n// Verify that CONNECT payload is not sent upstream until :200 response headers\n// are received.\nTEST_F(RouterTest, ConnectPauseAndResume) {\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  EXPECT_CALL(encoder, encodeHeaders(_, false));\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.setMethod(\"CONNECT\");\n  router_.decodeHeaders(headers, false);\n\n  // Make sure any early data does not go upstream.\n  EXPECT_CALL(encoder, encodeData(_, _)).Times(0);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n\n  // Now send the response headers, and ensure the deferred payload is proxied.\n  EXPECT_CALL(encoder, encodeData(_, _));\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n}\n\n// Verify that CONNECT payload is not sent upstream if non-200 response headers are received.\nTEST_F(RouterTest, ConnectPauseNoResume) {\n  // Explicitly configure an HTTP upstream, to test factory creation.\n  cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =\n      absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();\n  envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto http_config;\n  cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()\n      .mutable_typed_config()\n      ->PackFrom(http_config);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  EXPECT_CALL(encoder, encodeHeaders(_, false));\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.setMethod(\"CONNECT\");\n  router_.decodeHeaders(headers, false);\n\n  // Make sure any early data does not go upstream.\n  EXPECT_CALL(encoder, encodeData(_, _)).Times(0);\n  Buffer::OwnedImpl data;\n  router_.decodeData(data, true);\n\n  // Now send the response headers, and ensure the deferred payload is not proxied.\n  EXPECT_CALL(encoder, encodeData(_, _)).Times(0);\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"400\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n}\n\nTEST_F(RouterTest, ConnectExplicitTcpUpstream) {\n  // Explicitly configure an TCP upstream, to test factory creation.\n  cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =\n      absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();\n  envoy::extensions::upstreams::http::tcp::v3::TcpConnectionPoolProto tcp_config;\n  cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()\n      .mutable_typed_config()\n      ->PackFrom(tcp_config);\n  callbacks_.route_->route_entry_.connect_config_ =\n      absl::make_optional<RouteEntry::ConnectConfig>();\n\n  // Make sure newConnection is called on the TCP pool, not newStream on the HTTP pool.\n  EXPECT_CALL(cm_.tcp_conn_pool_, newConnection(_));\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  headers.setMethod(\"CONNECT\");\n  router_.decodeHeaders(headers, false);\n\n  router_.onDestroy();\n}\n\nclass WatermarkTest : public RouterTest {\npublic:\n  void sendRequest(bool header_only_request = true, bool pool_ready = true) {\n    EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n        .WillOnce(Return(std::chrono::milliseconds(0)));\n    EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);\n\n    EXPECT_CALL(stream_, addCallbacks(_))\n        .Times(num_add_callbacks_)\n        .WillOnce(\n            Invoke([&](Http::StreamCallbacks& callbacks) { stream_callbacks_ = &callbacks; }));\n    EXPECT_CALL(encoder_, getStream()).WillRepeatedly(ReturnRef(stream_));\n    EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n        .WillOnce(Invoke(\n            [&](Http::ResponseDecoder& decoder,\n                Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n              response_decoder_ = &decoder;\n              pool_callbacks_ = &callbacks;\n              if (pool_ready) {\n                callbacks.onPoolReady(encoder_, cm_.conn_pool_.host_, upstream_stream_info_);\n              }\n              return nullptr;\n            }));\n    HttpTestUtility::addDefaultHeaders(headers_);\n    router_.decodeHeaders(headers_, header_only_request);\n    if (pool_ready) {\n      EXPECT_EQ(\n          1U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n    }\n  }\n  void sendResponse() {\n    response_decoder_->decodeHeaders(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, true);\n  }\n\n  NiceMock<Http::MockRequestEncoder> encoder_;\n  NiceMock<Http::MockStream> stream_;\n  Http::StreamCallbacks* stream_callbacks_;\n  Http::ResponseDecoder* response_decoder_ = nullptr;\n  Http::TestRequestHeaderMapImpl headers_;\n  Http::ConnectionPool::Callbacks* pool_callbacks_{nullptr};\n  int num_add_callbacks_{1};\n};\n\nTEST_F(WatermarkTest, DownstreamWatermarks) {\n  sendRequest();\n\n  stream_callbacks_->onAboveWriteBufferHighWatermark();\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_backed_up_total\")\n                    .value());\n  stream_callbacks_->onBelowWriteBufferLowWatermark();\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_drained_total\")\n                    .value());\n\n  sendResponse();\n}\n\nTEST_F(WatermarkTest, UpstreamWatermarks) {\n  sendRequest(false);\n\n  response_decoder_->decodeHeaders(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}, false);\n\n  ASSERT(callbacks_.callbacks_.begin() != callbacks_.callbacks_.end());\n  Envoy::Http::DownstreamWatermarkCallbacks* watermark_callbacks = *callbacks_.callbacks_.begin();\n\n  EXPECT_CALL(encoder_, getStream()).WillOnce(ReturnRef(stream_));\n  EXPECT_CALL(stream_, readDisable(_));\n  watermark_callbacks->onAboveWriteBufferHighWatermark();\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_paused_reading_total\")\n                    .value());\n\n  EXPECT_CALL(encoder_, getStream()).WillOnce(ReturnRef(stream_));\n  EXPECT_CALL(stream_, readDisable(_));\n  watermark_callbacks->onBelowWriteBufferLowWatermark();\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_resumed_reading_total\")\n                    .value());\n\n  Buffer::OwnedImpl data;\n  EXPECT_CALL(encoder_, getStream()).Times(2).WillRepeatedly(ReturnRef(stream_));\n  response_decoder_->decodeData(data, true);\n}\n\nTEST_F(WatermarkTest, FilterWatermarks) {\n  EXPECT_CALL(callbacks_, decoderBufferLimit()).Times(3).WillRepeatedly(Return(10));\n  router_.setDecoderFilterCallbacks(callbacks_);\n  // Send the headers sans-fin, and don't flag the pool as ready.\n  sendRequest(false, false);\n\n  // Send 10 bytes of body to fill the 10 byte buffer.\n  Buffer::OwnedImpl data(\"1234567890\");\n  router_.decodeData(data, false);\n  EXPECT_EQ(0u, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_backed_up_total\")\n                    .value());\n\n  // Send one extra byte. This should cause the buffer to go over the limit and pause downstream\n  // data.\n  Buffer::OwnedImpl last_byte(\"!\");\n  router_.decodeData(last_byte, true);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_backed_up_total\")\n                    .value());\n\n  // Now set up the downstream connection. The encoder will be given the buffered request body,\n  // The mock invocation below drains it, and the buffer will go under the watermark limit again.\n  EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_drained_total\")\n                    .value());\n  EXPECT_CALL(encoder_, encodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> void { data.drain(data.length()); }));\n  pool_callbacks_->onPoolReady(encoder_, cm_.conn_pool_.host_, upstream_stream_info_);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_drained_total\")\n                    .value());\n\n  sendResponse();\n}\n\nTEST_F(WatermarkTest, FilterWatermarksUnwound) {\n  num_add_callbacks_ = 0;\n  EXPECT_CALL(callbacks_, decoderBufferLimit()).Times(3).WillRepeatedly(Return(10));\n  router_.setDecoderFilterCallbacks(callbacks_);\n  // Send the headers sans-fin, and don't flag the pool as ready.\n  sendRequest(false, false);\n\n  // Send 11 bytes of body to fill the 10 byte buffer.\n  Buffer::OwnedImpl data(\"1234567890!\");\n  router_.decodeData(data, false);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_backed_up_total\")\n                    .value());\n\n  // Set up a pool failure, and make sure the flow control blockage is undone.\n  pool_callbacks_->onPoolFailure(Http::ConnectionPool::PoolFailureReason::RemoteConnectionFailure,\n                                 absl::string_view(), nullptr);\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_flow_control_drained_total\")\n                    .value());\n}\n\n// Same as RetryRequestNotComplete but with decodeData larger than the buffer\n// limit, no retry will occur.\nTEST_F(WatermarkTest, RetryRequestNotComplete) {\n  EXPECT_CALL(callbacks_, decoderBufferLimit()).Times(2).WillRepeatedly(Return(10));\n  router_.setDecoderFilterCallbacks(callbacks_);\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillRepeatedly(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::UpstreamRemoteReset));\n  EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))\n      .WillRepeatedly(Invoke([&](const Upstream::HostDescriptionConstSharedPtr& host) -> void {\n        EXPECT_EQ(host_address_, host->address());\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  router_.decodeHeaders(headers, false);\n  Buffer::OwnedImpl data(\"1234567890123\");\n  EXPECT_CALL(*router_.retry_state_, enabled()).Times(1).WillOnce(Return(true));\n  EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).Times(0);\n  EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(0);\n  // This will result in retry_state_ being deleted.\n  router_.decodeData(data, false);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // This should not trigger a retry as the retry state has been deleted.\n  EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n  EXPECT_EQ(callbacks_.details(), \"upstream_reset_before_response_started{remote reset}\");\n}\n\nclass RouterTestChildSpan : public RouterTestBase {\npublic:\n  RouterTestChildSpan() : RouterTestBase(true, false, Protobuf::RepeatedPtrField<std::string>{}) {}\n};\n\n// Make sure child spans start/inject/finish with a normal flow.\n// An upstream request succeeds and a single span is created.\nTEST_F(RouterTestChildSpan, BasicFlow) {\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n  EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(*child_span, injectContext(_));\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(callbacks_.active_span_, spawnChild_(_, \"router fake_cluster egress\", _))\n      .WillOnce(Return(child_span));\n  EXPECT_CALL(callbacks_, tracingConfig());\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.0\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.5:9211\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(*child_span, finishSpan());\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n}\n\n// Make sure child spans start/inject/finish with a reset flow.\n// The upstream responds back to envoy before the reset, so the span has fields that represent a\n// response and reset.\nTEST_F(RouterTestChildSpan, ResetFlow) {\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n  EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(*child_span, injectContext(_));\n            callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(callbacks_.active_span_, spawnChild_(_, \"router fake_cluster egress\", _))\n      .WillOnce(Return(child_span));\n  EXPECT_CALL(callbacks_, tracingConfig());\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Upstream responds back to envoy.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  response_decoder->decodeHeaders(std::move(response_headers), false);\n\n  // The reset occurs after the upstream response, so the span has a valid status code but also an\n  // error.\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.0\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.5:9211\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"UR\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ErrorReason), Eq(\"remote reset\")));\n  EXPECT_CALL(*child_span, finishSpan());\n  encoder.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n}\n\n// Make sure child spans start/inject/finish with a cancellation flow.\n// An upstream request is created but is then cancelled before. The resulting span has the\n// cancellation fields.\nTEST_F(RouterTestChildSpan, CancelFlow) {\n  EXPECT_CALL(callbacks_.route_->route_entry_, timeout())\n      .WillOnce(Return(std::chrono::milliseconds(0)));\n  EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);\n\n  NiceMock<Http::MockRequestEncoder> encoder;\n  Tracing::MockSpan* child_span{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)\n                           -> Http::ConnectionPool::Cancellable* {\n        EXPECT_CALL(*child_span, injectContext(_));\n        callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_);\n        return nullptr;\n      }));\n\n  Http::TestRequestHeaderMapImpl headers;\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(callbacks_.active_span_, spawnChild_(_, \"router fake_cluster egress\", _))\n      .WillOnce(Return(child_span));\n  EXPECT_CALL(callbacks_, tracingConfig());\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Destroy the router, causing the upstream request to be cancelled.\n  // Response code on span is 0 because the upstream never sent a response.\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.0\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.5:9211\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"0\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span,\n              setTag(Eq(Tracing::Tags::get().Canceled), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(*child_span, finishSpan());\n  router_.onDestroy();\n}\n\n// Make sure child spans start/inject/finish with retry flow.\n// The first request will fail because of an upstream reset, so the span will be annotated with the\n// reset reason. The second request will succeed, so the span will be annotated with 200 OK.\nTEST_F(RouterTestChildSpan, ResetRetryFlow) {\n  NiceMock<Http::MockRequestEncoder> encoder1;\n  Http::ResponseDecoder* response_decoder = nullptr;\n  Tracing::MockSpan* child_span_1{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(*child_span_1, injectContext(_));\n            callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n  expectResponseTimerCreate();\n\n  // Upstream responds back to envoy simulating an upstream reset.\n  Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"}, {\"x-envoy-internal\", \"true\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n  EXPECT_CALL(callbacks_.active_span_, spawnChild_(_, \"router fake_cluster egress\", _))\n      .WillOnce(Return(child_span_1));\n  EXPECT_CALL(callbacks_, tracingConfig());\n  router_.decodeHeaders(headers, true);\n  EXPECT_EQ(1U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // The span should be annotated with the reset-related fields.\n  EXPECT_CALL(*child_span_1,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.0\")));\n  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.5:9211\")));\n  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"0\")));\n  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"UR\")));\n  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)))\n      .Times(2);\n  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().ErrorReason), Eq(\"remote reset\")));\n  EXPECT_CALL(*child_span_1, finishSpan());\n\n  router_.retry_state_->expectResetRetry();\n  encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n\n  // We expect this reset to kick off a new request.\n  NiceMock<Http::MockRequestEncoder> encoder2;\n  Tracing::MockSpan* child_span_2{new Tracing::MockSpan()};\n  EXPECT_CALL(cm_.conn_pool_, newStream(_, _))\n      .WillOnce(Invoke(\n          [&](Http::ResponseDecoder& decoder,\n              Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n            response_decoder = &decoder;\n            EXPECT_CALL(*child_span_2, injectContext(_));\n            EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));\n            callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_);\n            return nullptr;\n          }));\n\n  EXPECT_CALL(callbacks_.active_span_, spawnChild_(_, \"router fake_cluster egress\", _))\n      .WillOnce(Return(child_span_2));\n  EXPECT_CALL(callbacks_, tracingConfig());\n  EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().RetryCount), Eq(\"1\")));\n\n  router_.retry_state_->callback_();\n  EXPECT_EQ(2U,\n            callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());\n\n  // Upstream responds back with a normal response. Span should be annotated as usual.\n  Http::ResponseHeaderMapPtr response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  EXPECT_CALL(*child_span_2,\n              setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.0\")));\n  EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq(\"10.0.0.5:9211\")));\n  EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(*child_span_2, finishSpan());\n  response_decoder->decodeHeaders(std::move(response_headers), true);\n}\n\nProtobuf::RepeatedPtrField<std::string> protobufStrList(const std::vector<std::string>& v) {\n  Protobuf::RepeatedPtrField<std::string> res;\n  for (auto& field : v) {\n    *res.Add() = field;\n  }\n\n  return res;\n}\n\nclass RouterTestStrictCheckOneHeader : public RouterTestBase,\n                                       public testing::WithParamInterface<std::string> {\npublic:\n  RouterTestStrictCheckOneHeader() : RouterTestBase(false, false, protobufStrList({GetParam()})){};\n};\n\nINSTANTIATE_TEST_SUITE_P(StrictHeaderCheck, RouterTestStrictCheckOneHeader,\n                         testing::Values(\"x-envoy-upstream-rq-timeout-ms\",\n                                         \"x-envoy-upstream-rq-per-try-timeout-ms\",\n                                         \"x-envoy-max-retries\", \"x-envoy-retry-on\",\n                                         \"x-envoy-retry-grpc-on\"));\n\n// Each test param instantiates a router that strict-checks one particular header.\n// This test decodes a set of headers with invalid values and asserts that the\n// strict header check only fails for the single header specified by the test param\nTEST_P(RouterTestStrictCheckOneHeader, SingleInvalidHeader) {\n  Http::TestRequestHeaderMapImpl req_headers{\n      {\"X-envoy-Upstream-rq-timeout-ms\", \"10.0\"},\n      {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"1.0\"},\n      {\"x-envoy-max-retries\", \"2.0\"},\n      {\"x-envoy-retry-on\", \"5xx,cancelled\"},                // 'cancelled' is an invalid entry\n      {\"x-envoy-retry-grpc-on\", \"5xx,cancelled, internal\"}, // '5xx' is an invalid entry\n  };\n  HttpTestUtility::addDefaultHeaders(req_headers);\n  auto checked_header = GetParam();\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& response_headers, bool end_stream) -> void {\n        EXPECT_EQ(enumToInt(Http::Code::BadRequest),\n                  Envoy::Http::Utility::getResponseStatus(response_headers));\n        EXPECT_FALSE(end_stream);\n      }));\n\n  EXPECT_CALL(callbacks_, encodeData(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool end_stream) -> void {\n        EXPECT_THAT(data.toString(),\n                    StartsWith(fmt::format(\"invalid header '{}' with value \", checked_header)));\n        EXPECT_TRUE(end_stream);\n      }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, router_.decodeHeaders(req_headers, true));\n  EXPECT_EQ(callbacks_.details(),\n            fmt::format(\"request_headers_failed_strict_check{{{}}}\", checked_header));\n}\n\nclass RouterTestStrictCheckSomeHeaders\n    : public RouterTestBase,\n      public testing::WithParamInterface<std::vector<std::string>> {\npublic:\n  RouterTestStrictCheckSomeHeaders() : RouterTestBase(false, false, protobufStrList(GetParam())){};\n};\n\nINSTANTIATE_TEST_SUITE_P(StrictHeaderCheck, RouterTestStrictCheckSomeHeaders,\n                         testing::Values(std::vector<std::string>{\"x-envoy-upstream-rq-timeout-ms\",\n                                                                  \"x-envoy-max-retries\"},\n                                         std::vector<std::string>{}));\n\n// Request has headers with invalid values, but headers are *excluded* from the\n// set to which strict-checks apply. Assert that these headers are not rejected.\nTEST_P(RouterTestStrictCheckSomeHeaders, IgnoreOmittedHeaders) {\n  // Invalid, but excluded from the configured set of headers to strictly-check\n  Http::TestRequestHeaderMapImpl headers{\n      {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"1.0\"},\n      {\"x-envoy-upstream-rq-timeout-ms\", \"5000\"},\n      {\"x-envoy-retry-on\", \"5xx,cancelled\"},\n  };\n  HttpTestUtility::addDefaultHeaders(headers);\n\n  expectResponseTimerCreate();\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, router_.decodeHeaders(headers, true));\n  router_.onDestroy();\n}\n\nconst std::vector<std::string> SUPPORTED_STRICT_CHECKED_HEADERS = {\n    \"x-envoy-upstream-rq-timeout-ms\", \"x-envoy-upstream-rq-per-try-timeout-ms\", \"x-envoy-retry-on\",\n    \"x-envoy-retry-grpc-on\", \"x-envoy-max-retries\"};\n\nclass RouterTestStrictCheckAllHeaders\n    : public RouterTestBase,\n      public testing::WithParamInterface<std::tuple<std::string, std::string>> {\npublic:\n  RouterTestStrictCheckAllHeaders()\n      : RouterTestBase(false, false, protobufStrList(SUPPORTED_STRICT_CHECKED_HEADERS)){};\n};\n\nINSTANTIATE_TEST_SUITE_P(StrictHeaderCheck, RouterTestStrictCheckAllHeaders,\n                         testing::Combine(testing::ValuesIn(SUPPORTED_STRICT_CHECKED_HEADERS),\n                                          testing::ValuesIn(SUPPORTED_STRICT_CHECKED_HEADERS)));\n\n// Each instance of this test configures a router to strict-validate all\n// supported headers and asserts that a request with invalid values set for some\n// *pair* of headers is rejected.\nTEST_P(RouterTestStrictCheckAllHeaders, MultipleInvalidHeaders) {\n  const auto& header1 = std::get<0>(GetParam());\n  const auto& header2 = std::get<1>(GetParam());\n  Http::TestRequestHeaderMapImpl headers{{header1, \"invalid\"}, {header2, \"invalid\"}};\n  HttpTestUtility::addDefaultHeaders(headers);\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders));\n\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _))\n      .WillOnce(Invoke([&](Http::ResponseHeaderMap& response_headers, bool end_stream) -> void {\n        EXPECT_EQ(enumToInt(Http::Code::BadRequest),\n                  Envoy::Http::Utility::getResponseStatus(response_headers));\n        EXPECT_FALSE(end_stream);\n      }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, router_.decodeHeaders(headers, true));\n  EXPECT_THAT(callbacks_.details(),\n              StartsWith(fmt::format(\"request_headers_failed_strict_check{{\")));\n  router_.onDestroy();\n}\n\n// Request has headers with invalid values, but headers are *excluded* from the\n// set to which strict-checks apply. Assert that these headers are not rejected.\nTEST(RouterFilterUtilityTest, StrictCheckValidHeaders) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\"X-envoy-Upstream-rq-timeout-ms\", \"100\"},\n      {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"100\"},\n      {\"x-envoy-max-retries\", \"2\"},\n      {\"not-checked\", \"always passes\"},\n      {\"x-envoy-retry-on\", \"5xx,gateway-error,retriable-4xx,refused-stream,connect-failure,\"\n                           \"retriable-status-codes , reset\"}, // space is allowed\n      {\"x-envoy-retry-grpc-on\",\n       \"cancelled,internal,deadline-exceeded,resource-exhausted , unavailable\"}, // space is allowed\n  };\n\n  for (const auto& target : SUPPORTED_STRICT_CHECKED_HEADERS) {\n    EXPECT_TRUE(\n        FilterUtility::StrictHeaderChecker::checkHeader(headers, Http::LowerCaseString(target))\n            .valid_)\n        << fmt::format(\"'{}' should have passed strict validation\", target);\n  }\n\n  Http::TestRequestHeaderMapImpl failing_headers{\n      {\"X-envoy-Upstream-rq-timeout-ms\", \"10.0\"},\n      {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"1.0\"},\n      {\"x-envoy-max-retries\", \"2.0\"},\n      {\"x-envoy-retry-on\", \"5xx,cancelled\"},                // 'cancelled' is an invalid entry\n      {\"x-envoy-retry-grpc-on\", \"5xx,cancelled, internal\"}, // '5xx' is an invalid entry\n  };\n\n  for (const auto& target : SUPPORTED_STRICT_CHECKED_HEADERS) {\n    EXPECT_FALSE(FilterUtility::StrictHeaderChecker::checkHeader(failing_headers,\n                                                                 Http::LowerCaseString(target))\n                     .valid_)\n        << fmt::format(\"'{}' should have failed strict validation\", target);\n  }\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/router_upstream_log_test.cc",
    "content": "#include <ctime>\n#include <memory>\n#include <regex>\n\n#include \"envoy/config/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/router/v3/router.pb.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/router/router.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nabsl::optional<envoy::config::accesslog::v3::AccessLog> testUpstreamLog() {\n  // Custom format without timestamps or durations.\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  format: \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% %RESPONSE_CODE%\n    %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %REQ(:AUTHORITY)% %UPSTREAM_HOST%\n    %UPSTREAM_LOCAL_ADDRESS% %RESP(X-UPSTREAM-HEADER)% %TRAILER(X-TRAILER)%\\n\"\n  path: \"/dev/null\"\n  )EOF\";\n\n  envoy::config::accesslog::v3::AccessLog upstream_log;\n  TestUtility::loadFromYaml(yaml, upstream_log);\n\n  return absl::optional<envoy::config::accesslog::v3::AccessLog>(upstream_log);\n}\n\n} // namespace\n\nclass TestFilter : public Filter {\npublic:\n  using Filter::Filter;\n\n  // Filter\n  RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&,\n                                 const Upstream::ClusterInfo&, const VirtualCluster*,\n                                 Runtime::Loader&, Random::RandomGenerator&, Event::Dispatcher&,\n                                 TimeSource&, Upstream::ResourcePriority) override {\n    EXPECT_EQ(nullptr, retry_state_);\n    retry_state_ = new NiceMock<MockRetryState>();\n    return RetryStatePtr{retry_state_};\n  }\n\n  const Network::Connection* downstreamConnection() const override {\n    return &downstream_connection_;\n  }\n\n  NiceMock<Network::MockConnection> downstream_connection_;\n  MockRetryState* retry_state_{};\n};\n\nclass RouterUpstreamLogTest : public testing::Test {\npublic:\n  void init(absl::optional<envoy::config::accesslog::v3::AccessLog> upstream_log) {\n    envoy::extensions::filters::http::router::v3::Router router_proto;\n\n    if (upstream_log) {\n      ON_CALL(*context_.access_log_manager_.file_, write(_))\n          .WillByDefault(\n              Invoke([&](absl::string_view data) { output_.push_back(std::string(data)); }));\n\n      envoy::config::accesslog::v3::AccessLog* current_upstream_log =\n          router_proto.add_upstream_log();\n      current_upstream_log->CopyFrom(upstream_log.value());\n    }\n\n    config_ = std::make_shared<FilterConfig>(\"prefix.\", context_,\n                                             ShadowWriterPtr(new MockShadowWriter()), router_proto);\n    router_ = std::make_shared<TestFilter>(*config_);\n    router_->setDecoderFilterCallbacks(callbacks_);\n    EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(testing::AnyNumber());\n\n    upstream_locality_.set_zone(\"to_az\");\n\n    ON_CALL(*context_.cluster_manager_.conn_pool_.host_, address())\n        .WillByDefault(Return(host_address_));\n    ON_CALL(*context_.cluster_manager_.conn_pool_.host_, locality())\n        .WillByDefault(ReturnRef(upstream_locality_));\n    router_->downstream_connection_.local_address_ = host_address_;\n    router_->downstream_connection_.remote_address_ =\n        Network::Utility::parseInternetAddressAndPort(\"1.2.3.4:80\");\n  }\n\n  void expectResponseTimerCreate() {\n    response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);\n    EXPECT_CALL(*response_timeout_, enableTimer(_, _));\n    EXPECT_CALL(*response_timeout_, disableTimer());\n  }\n\n  void expectPerTryTimerCreate() {\n    per_try_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);\n    EXPECT_CALL(*per_try_timeout_, enableTimer(_, _));\n    EXPECT_CALL(*per_try_timeout_, disableTimer());\n  }\n\n  void\n  run(uint64_t response_code,\n      const std::initializer_list<std::pair<std::string, std::string>>& request_headers_init,\n      const std::initializer_list<std::pair<std::string, std::string>>& response_headers_init,\n      const std::initializer_list<std::pair<std::string, std::string>>& response_trailers_init) {\n    NiceMock<Http::MockRequestEncoder> encoder;\n    Http::ResponseDecoder* response_decoder = nullptr;\n\n    EXPECT_CALL(context_.cluster_manager_.conn_pool_, newStream(_, _))\n        .WillOnce(Invoke(\n            [&](Http::ResponseDecoder& decoder,\n                Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n              response_decoder = &decoder;\n              EXPECT_CALL(encoder.stream_, connectionLocalAddress())\n                  .WillRepeatedly(ReturnRef(upstream_local_address1_));\n              callbacks.onPoolReady(encoder, context_.cluster_manager_.conn_pool_.host_,\n                                    stream_info_);\n              return nullptr;\n            }));\n    expectResponseTimerCreate();\n\n    Http::TestRequestHeaderMapImpl headers(request_headers_init);\n    HttpTestUtility::addDefaultHeaders(headers);\n    router_->decodeHeaders(headers, true);\n\n    EXPECT_CALL(*router_->retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n\n    Http::ResponseHeaderMapPtr response_headers(\n        new Http::TestResponseHeaderMapImpl(response_headers_init));\n    response_headers->setStatus(response_code);\n\n    EXPECT_CALL(context_.cluster_manager_.conn_pool_.host_->outlier_detector_,\n                putHttpResponseCode(response_code));\n    response_decoder->decodeHeaders(std::move(response_headers), false);\n\n    Http::ResponseTrailerMapPtr response_trailers(\n        new Http::TestResponseTrailerMapImpl(response_trailers_init));\n    response_decoder->decodeTrailers(std::move(response_trailers));\n  }\n\n  void run() { run(200, {}, {}, {}); }\n\n  void runWithRetry() {\n    NiceMock<Http::MockRequestEncoder> encoder1;\n    Http::ResponseDecoder* response_decoder = nullptr;\n\n    EXPECT_CALL(context_.cluster_manager_.conn_pool_, newStream(_, _))\n        .WillOnce(Invoke(\n            [&](Http::ResponseDecoder& decoder,\n                Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n              response_decoder = &decoder;\n              EXPECT_CALL(encoder1.stream_, connectionLocalAddress())\n                  .WillRepeatedly(ReturnRef(upstream_local_address1_));\n              callbacks.onPoolReady(encoder1, context_.cluster_manager_.conn_pool_.host_,\n                                    stream_info_);\n              return nullptr;\n            }));\n    expectPerTryTimerCreate();\n    expectResponseTimerCreate();\n\n    Http::TestRequestHeaderMapImpl headers{{\"x-envoy-retry-on\", \"5xx\"},\n                                           {\"x-envoy-internal\", \"true\"},\n                                           {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"5\"}};\n    HttpTestUtility::addDefaultHeaders(headers);\n    router_->decodeHeaders(headers, true);\n\n    router_->retry_state_->expectResetRetry();\n    EXPECT_CALL(context_.cluster_manager_.conn_pool_.host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n    per_try_timeout_->invokeCallback();\n\n    // We expect this reset to kick off a new request.\n    NiceMock<Http::MockRequestEncoder> encoder2;\n    EXPECT_CALL(context_.cluster_manager_.conn_pool_, newStream(_, _))\n        .WillOnce(Invoke(\n            [&](Http::ResponseDecoder& decoder,\n                Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {\n              response_decoder = &decoder;\n              EXPECT_CALL(context_.cluster_manager_.conn_pool_.host_->outlier_detector_,\n                          putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess, _));\n              EXPECT_CALL(encoder2.stream_, connectionLocalAddress())\n                  .WillRepeatedly(ReturnRef(upstream_local_address2_));\n              callbacks.onPoolReady(encoder2, context_.cluster_manager_.conn_pool_.host_,\n                                    stream_info_);\n              return nullptr;\n            }));\n    expectPerTryTimerCreate();\n    router_->retry_state_->callback_();\n\n    // Normal response.\n    EXPECT_CALL(*router_->retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No));\n    Http::ResponseHeaderMapPtr response_headers(\n        new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n    EXPECT_CALL(context_.cluster_manager_.conn_pool_.host_->outlier_detector_,\n                putHttpResponseCode(200));\n    response_decoder->decodeHeaders(std::move(response_headers), true);\n  }\n\n  std::vector<std::string> output_;\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n\n  envoy::config::core::v3::Locality upstream_locality_;\n  Network::Address::InstanceConstSharedPtr host_address_{\n      Network::Utility::resolveUrl(\"tcp://10.0.0.5:9211\")};\n  Network::Address::InstanceConstSharedPtr upstream_local_address1_{\n      Network::Utility::resolveUrl(\"tcp://10.0.0.5:10211\")};\n  Network::Address::InstanceConstSharedPtr upstream_local_address2_{\n      Network::Utility::resolveUrl(\"tcp://10.0.0.5:10212\")};\n  Event::MockTimer* response_timeout_{};\n  Event::MockTimer* per_try_timeout_{};\n\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  std::shared_ptr<FilterConfig> config_;\n  std::shared_ptr<TestFilter> router_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n};\n\nTEST_F(RouterUpstreamLogTest, NoLogConfigured) {\n  init({});\n  run();\n\n  EXPECT_TRUE(output_.empty());\n}\n\nTEST_F(RouterUpstreamLogTest, LogSingleTry) {\n  init(testUpstreamLog());\n  run();\n\n  EXPECT_EQ(output_.size(), 1U);\n  EXPECT_EQ(output_.front(), \"GET / HTTP/1.0 200 - 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\\n\");\n}\n\nTEST_F(RouterUpstreamLogTest, LogRetries) {\n  init(testUpstreamLog());\n  runWithRetry();\n\n  EXPECT_EQ(output_.size(), 2U);\n  EXPECT_EQ(output_.front(), \"GET / HTTP/1.0 0 UT 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\\n\");\n  EXPECT_EQ(output_.back(), \"GET / HTTP/1.0 200 - 0 0 host 10.0.0.5:9211 10.0.0.5:10212 - -\\n\");\n}\n\nTEST_F(RouterUpstreamLogTest, LogFailure) {\n  init(testUpstreamLog());\n  run(503, {}, {}, {});\n\n  EXPECT_EQ(output_.size(), 1U);\n  EXPECT_EQ(output_.front(), \"GET / HTTP/1.0 503 - 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\\n\");\n}\n\nTEST_F(RouterUpstreamLogTest, LogHeaders) {\n  init(testUpstreamLog());\n  run(200, {{\"x-envoy-original-path\", \"/foo\"}}, {{\"x-upstream-header\", \"abcdef\"}},\n      {{\"x-trailer\", \"value\"}});\n\n  EXPECT_EQ(output_.size(), 1U);\n  EXPECT_EQ(output_.front(),\n            \"GET /foo HTTP/1.0 200 - 0 0 host 10.0.0.5:9211 10.0.0.5:10211 abcdef value\\n\");\n}\n\n// Test timestamps and durations are emitted.\nTEST_F(RouterUpstreamLogTest, LogTimestampsAndDurations) {\n  const std::string yaml = R\"EOF(\nname: accesslog\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n  format: \"[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\n    %DURATION% %RESPONSE_DURATION% %REQUEST_DURATION%\"\n  path: \"/dev/null\"\n  )EOF\";\n\n  envoy::config::accesslog::v3::AccessLog upstream_log;\n  TestUtility::loadFromYaml(yaml, upstream_log);\n\n  init(absl::optional<envoy::config::accesslog::v3::AccessLog>(upstream_log));\n  run(200, {{\"x-envoy-original-path\", \"/foo\"}}, {}, {});\n\n  EXPECT_EQ(output_.size(), 1U);\n\n  // REQUEST_DURATION is \"-\" because it represents how long it took to receive the downstream\n  // request which is not known to the upstream request.\n  std::regex log_regex(\n      R\"EOF(^\\[(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2})\\.\\d{3}Z\\] GET /foo HTTP/1.0 \\d+ \\d+ -$)EOF\");\n  std::smatch matches;\n  EXPECT_TRUE(std::regex_match(output_.front(), matches, log_regex));\n\n  const absl::Time timestamp = TestUtility::parseTime(matches[1].str(), \"%Y-%m-%dT%H:%M:%S\");\n\n  std::time_t log_time = absl::ToTimeT(timestamp);\n  std::time_t now = std::time(nullptr);\n\n  // Check that timestamp is close enough.\n  EXPECT_LE(std::abs(std::difftime(log_time, now)), 300);\n}\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/scoped_config_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/router/scoped_config_impl.h\"\n\n#include \"test/mocks/router/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nusing ::Envoy::Http::TestRequestHeaderMapImpl;\nusing ::testing::NiceMock;\n\nclass FooFragment : public ScopeKeyFragmentBase {\npublic:\n  uint64_t hash() const override { return 1; }\n};\n\nTEST(ScopeKeyFragmentBaseTest, EqualSign) {\n  FooFragment foo;\n  StringKeyFragment bar(\"a random string\");\n\n  EXPECT_NE(foo, bar);\n}\n\nTEST(ScopeKeyFragmentBaseTest, HashStable) {\n  FooFragment foo1;\n  FooFragment foo2;\n\n  // Two FooFragments equal because their hash equals.\n  EXPECT_EQ(foo1, foo2);\n  EXPECT_EQ(foo1.hash(), foo2.hash());\n\n  // Hash value doesn't change.\n  StringKeyFragment a(\"abcdefg\");\n  auto hash_value = a.hash();\n  for (int i = 0; i < 100; ++i) {\n    EXPECT_EQ(hash_value, a.hash());\n    EXPECT_EQ(StringKeyFragment(\"abcdefg\").hash(), hash_value);\n  }\n}\n\nTEST(StringKeyFragmentTest, Empty) {\n  StringKeyFragment a(\"\");\n  StringKeyFragment b(\"\");\n  EXPECT_EQ(a, b);\n  EXPECT_EQ(a.hash(), b.hash());\n\n  StringKeyFragment non_empty(\"ABC\");\n\n  EXPECT_NE(a, non_empty);\n  EXPECT_NE(a.hash(), non_empty.hash());\n}\n\nTEST(StringKeyFragmentTest, Normal) {\n  StringKeyFragment str(\"Abc\");\n\n  StringKeyFragment same_str(\"Abc\");\n  EXPECT_EQ(str, same_str);\n\n  StringKeyFragment upper_cased_str(\"ABC\");\n  EXPECT_NE(str, upper_cased_str);\n\n  StringKeyFragment another_str(\"DEF\");\n  EXPECT_NE(str, another_str);\n}\n\nTEST(HeaderValueExtractorImplDeathTest, InvalidConfig) {\n  ScopedRoutes::ScopeKeyBuilder::FragmentBuilder config;\n  // Type not set, ASSERT only fails in debug mode.\n#if !defined(NDEBUG)\n  EXPECT_DEATH(HeaderValueExtractorImpl(std::move(config)), \"header_value_extractor is not set.\");\n#else\n  EXPECT_THROW_WITH_REGEX(HeaderValueExtractorImpl(std::move(config)), ProtoValidationException,\n                          \"HeaderValueExtractor extract_type not set.+\");\n#endif // !defined(NDEBUG)\n\n  // Index non-zero when element separator is an empty string.\n  std::string yaml_plain = R\"EOF(\n  header_value_extractor:\n   name: 'foo_header'\n   element_separator: ''\n   index: 1\n)EOF\";\n  TestUtility::loadFromYaml(yaml_plain, config);\n\n  EXPECT_THROW_WITH_REGEX(HeaderValueExtractorImpl(std::move(config)), ProtoValidationException,\n                          \"Index > 0 for empty string element separator.\");\n  // extract_type not set.\n  yaml_plain = R\"EOF(\n  header_value_extractor:\n   name: 'foo_header'\n   element_separator: ''\n)EOF\";\n  TestUtility::loadFromYaml(yaml_plain, config);\n\n  EXPECT_THROW_WITH_REGEX(HeaderValueExtractorImpl(std::move(config)), ProtoValidationException,\n                          \"HeaderValueExtractor extract_type not set.+\");\n}\n\nTEST(HeaderValueExtractorImplTest, HeaderExtractionByIndex) {\n  ScopedRoutes::ScopeKeyBuilder::FragmentBuilder config;\n  std::string yaml_plain = R\"EOF(\n  header_value_extractor:\n   name: 'foo_header'\n   element_separator: ','\n   index: 1\n)EOF\";\n\n  TestUtility::loadFromYaml(yaml_plain, config);\n  HeaderValueExtractorImpl extractor(std::move(config));\n  std::unique_ptr<ScopeKeyFragmentBase> fragment = extractor.computeFragment(\n      TestRequestHeaderMapImpl{{\"foo_header\", \"part-0,part-1:value_bluh\"}});\n\n  EXPECT_NE(fragment, nullptr);\n  EXPECT_EQ(*fragment, StringKeyFragment{\"part-1:value_bluh\"});\n\n  // No such header.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{{\"bar_header\", \"part-0\"}});\n  EXPECT_EQ(fragment, nullptr);\n\n  // Empty header value.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"\"},\n  });\n  EXPECT_EQ(fragment, nullptr);\n\n  // Index out of bound.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"part-0\"},\n  });\n  EXPECT_EQ(fragment, nullptr);\n\n  // Element is empty.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"part-0,,,bluh\"},\n  });\n  EXPECT_NE(fragment, nullptr);\n  EXPECT_EQ(*fragment, StringKeyFragment(\"\"));\n}\n\nTEST(HeaderValueExtractorImplTest, HeaderExtractionByKey) {\n  ScopedRoutes::ScopeKeyBuilder::FragmentBuilder config;\n  std::string yaml_plain = R\"EOF(\n  header_value_extractor:\n   name: 'foo_header'\n   element_separator: ';'\n   element:\n    key: 'bar'\n    separator: '=>'\n)EOF\";\n\n  TestUtility::loadFromYaml(yaml_plain, config);\n  HeaderValueExtractorImpl extractor(std::move(config));\n  std::unique_ptr<ScopeKeyFragmentBase> fragment =\n      extractor.computeFragment(TestRequestHeaderMapImpl{\n          {\"foo_header\", \"part-0;bar=>bluh;foo=>foo_value\"},\n      });\n\n  EXPECT_NE(fragment, nullptr);\n  EXPECT_EQ(*fragment, StringKeyFragment{\"bluh\"});\n\n  // No such header.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"bluh\", \"part-0;\"},\n  });\n  EXPECT_EQ(fragment, nullptr);\n\n  // Empty header value.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"\"},\n  });\n  EXPECT_EQ(fragment, nullptr);\n\n  // No such key.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"part-0\"},\n  });\n  EXPECT_EQ(fragment, nullptr);\n\n  // Empty value.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"bluh;;bar=>;foo=>last_value\"},\n  });\n  EXPECT_NE(fragment, nullptr);\n  EXPECT_EQ(*fragment, StringKeyFragment{\"\"});\n\n  // Duplicate values, the first value returned.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"bluh;;bar=>value1;bar=>value2;bluh;;bar=>last_value\"},\n  });\n  EXPECT_NE(fragment, nullptr);\n  EXPECT_EQ(*fragment, StringKeyFragment{\"value1\"});\n\n  // No separator in the element, value is set to empty string.\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"bluh;;bar;bar=>value2;bluh;;bar=>last_value\"},\n  });\n  EXPECT_NE(fragment, nullptr);\n  EXPECT_EQ(*fragment, StringKeyFragment{\"\"});\n}\n\nTEST(HeaderValueExtractorImplTest, ElementSeparatorEmpty) {\n  ScopedRoutes::ScopeKeyBuilder::FragmentBuilder config;\n  std::string yaml_plain = R\"EOF(\n  header_value_extractor:\n   name: 'foo_header'\n   element_separator: ''\n   element:\n    key: 'bar'\n    separator: '='\n)EOF\";\n\n  TestUtility::loadFromYaml(yaml_plain, config);\n  HeaderValueExtractorImpl extractor(std::move(config));\n  std::unique_ptr<ScopeKeyFragmentBase> fragment =\n      extractor.computeFragment(TestRequestHeaderMapImpl{\n          {\"foo_header\", \"bar=b;c=d;e=f\"},\n      });\n  EXPECT_NE(fragment, nullptr);\n  EXPECT_EQ(*fragment, StringKeyFragment{\"b;c=d;e=f\"});\n\n  fragment = extractor.computeFragment(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"a=b;bar=d;e=f\"},\n  });\n  EXPECT_EQ(fragment, nullptr);\n}\n\n// Helper function which makes a ScopeKey from a list of strings.\nScopeKey makeKey(const std::vector<const char*>& parts) {\n  ScopeKey key;\n  for (const auto& part : parts) {\n    key.addFragment(std::make_unique<StringKeyFragment>(part));\n  }\n  return key;\n}\n\nTEST(ScopeKeyDeathTest, AddNullFragment) {\n  ScopeKey key;\n#if !defined(NDEBUG)\n  EXPECT_DEBUG_DEATH(key.addFragment(nullptr), \"null fragment not allowed in ScopeKey.\");\n#endif\n}\n\nTEST(ScopeKeyTest, Unmatches) {\n  ScopeKey key1;\n  ScopeKey key2;\n  // Empty key != empty key.\n  EXPECT_NE(key1, key2);\n\n  // Empty key != non-empty key.\n  EXPECT_NE(key1, makeKey({\"\"}));\n\n  EXPECT_EQ(makeKey({\"a\", \"b\", \"c\"}), makeKey({\"a\", \"b\", \"c\"}));\n\n  // Order matters.\n  EXPECT_EQ(makeKey({\"a\", \"b\", \"c\"}), makeKey({\"a\", \"b\", \"c\"}));\n  EXPECT_NE(makeKey({\"a\", \"c\", \"b\"}), makeKey({\"a\", \"b\", \"c\"}));\n\n  // Two keys of different length won't match.\n  EXPECT_NE(makeKey({\"a\", \"b\"}), makeKey({\"a\", \"b\", \"c\"}));\n\n  // Case sensitive.\n  EXPECT_NE(makeKey({\"a\", \"b\"}), makeKey({\"A\", \"b\"}));\n}\n\nTEST(ScopeKeyTest, Matches) {\n  // An empty string fragment equals another.\n  EXPECT_EQ(makeKey({\"\", \"\"}), makeKey({\"\", \"\"}));\n  EXPECT_EQ(makeKey({\"a\", \"\", \"\"}), makeKey({\"a\", \"\", \"\"}));\n\n  // Non empty fragments comparison.\n  EXPECT_EQ(makeKey({\"A\", \"b\"}), makeKey({\"A\", \"b\"}));\n}\n\nTEST(ScopeKeyBuilderImplTest, Parse) {\n  std::string yaml_plain = R\"EOF(\n  fragments:\n  - header_value_extractor:\n      name: 'foo_header'\n      element_separator: ','\n      element:\n        key: 'bar'\n        separator: '='\n  - header_value_extractor:\n      name: 'bar_header'\n      element_separator: ';'\n      index: 2\n)EOF\";\n\n  ScopedRoutes::ScopeKeyBuilder config;\n  TestUtility::loadFromYaml(yaml_plain, config);\n  ScopeKeyBuilderImpl key_builder(std::move(config));\n\n  ScopeKeyPtr key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"a=b,bar=bar_value,e=f\"},\n      {\"bar_header\", \"a=b;bar=bar_value;index2\"},\n  });\n  EXPECT_NE(key, nullptr);\n  EXPECT_EQ(*key, makeKey({\"bar_value\", \"index2\"}));\n\n  // Empty string fragment is fine.\n  key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"a=b,bar,e=f\"},\n      {\"bar_header\", \"a=b;bar=bar_value;\"},\n  });\n  EXPECT_NE(key, nullptr);\n  EXPECT_EQ(*key, makeKey({\"\", \"\"}));\n\n  // Key not found.\n  key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"a=b,meh,e=f\"},\n      {\"bar_header\", \"a=b;bar=bar_value;\"},\n  });\n  EXPECT_EQ(key, nullptr);\n\n  // Index out of bound.\n  key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"a=b,bar=bar_value,e=f\"},\n      {\"bar_header\", \"a=b;bar=bar_value\"},\n  });\n  EXPECT_EQ(key, nullptr);\n\n  // Header missing.\n  key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"a=b,bar=bar_value,e=f\"},\n      {\"foobar_header\", \"a=b;bar=bar_value;index2\"},\n  });\n  EXPECT_EQ(key, nullptr);\n\n  // Header value empty.\n  key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"\"},\n      {\"bar_header\", \"a=b;bar=bar_value;index2\"},\n  });\n  EXPECT_EQ(key, nullptr);\n\n  // Case sensitive.\n  key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{\n      {\"foo_header\", \"a=b,Bar=bar_value,e=f\"},\n      {\"bar_header\", \"a=b;bar=bar_value;index2\"},\n  });\n  EXPECT_EQ(key, nullptr);\n}\n\nclass ScopedRouteInfoTest : public testing::Test {\npublic:\n  void SetUp() override {\n    std::string yaml_plain = R\"EOF(\n    name: foo_scope\n    route_configuration_name: foo_route\n    key:\n      fragments:\n        - string_key: foo\n        - string_key: bar\n)EOF\";\n    TestUtility::loadFromYaml(yaml_plain, scoped_route_config_);\n\n    route_config_ = std::make_shared<NiceMock<MockConfig>>();\n    route_config_->name_ = \"foo_route\";\n  }\n\n  envoy::config::route::v3::RouteConfiguration route_configuration_;\n  envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config_;\n  std::shared_ptr<MockConfig> route_config_;\n  std::unique_ptr<ScopedRouteInfo> info_;\n};\n\nTEST_F(ScopedRouteInfoTest, Creation) {\n  envoy::config::route::v3::ScopedRouteConfiguration config_copy = scoped_route_config_;\n  info_ = std::make_unique<ScopedRouteInfo>(std::move(scoped_route_config_), route_config_);\n  EXPECT_EQ(info_->routeConfig().get(), route_config_.get());\n  EXPECT_TRUE(TestUtility::protoEqual(info_->configProto(), config_copy));\n  EXPECT_EQ(info_->scopeName(), \"foo_scope\");\n  EXPECT_EQ(info_->scopeKey(), makeKey({\"foo\", \"bar\"}));\n}\n\nclass ScopedConfigImplTest : public testing::Test {\npublic:\n  void SetUp() override {\n    std::string yaml_plain = R\"EOF(\n  fragments:\n  - header_value_extractor:\n      name: 'foo_header'\n      element_separator: ','\n      element:\n        key: 'bar'\n        separator: '='\n  - header_value_extractor:\n      name: 'bar_header'\n      element_separator: ';'\n      index: 2\n)EOF\";\n    TestUtility::loadFromYaml(yaml_plain, key_builder_config_);\n\n    scope_info_a_ = makeScopedRouteInfo(R\"EOF(\n    name: foo_scope\n    route_configuration_name: foo_route\n    key:\n      fragments:\n        - string_key: foo\n        - string_key: bar\n)EOF\");\n    scope_info_a_v2_ = makeScopedRouteInfo(R\"EOF(\n    name: foo_scope\n    route_configuration_name: foo_route\n    key:\n      fragments:\n        - string_key: xyz\n        - string_key: xyz\n)EOF\");\n    scope_info_b_ = makeScopedRouteInfo(R\"EOF(\n    name: bar_scope\n    route_configuration_name: bar_route\n    key:\n      fragments:\n        - string_key: bar\n        - string_key: baz\n)EOF\");\n  }\n  std::shared_ptr<ScopedRouteInfo> makeScopedRouteInfo(const std::string& route_config_yaml) {\n    envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config;\n    TestUtility::loadFromYaml(route_config_yaml, scoped_route_config);\n\n    std::shared_ptr<MockConfig> route_config = std::make_shared<NiceMock<MockConfig>>();\n    route_config->name_ = scoped_route_config.route_configuration_name();\n    return std::make_shared<ScopedRouteInfo>(std::move(scoped_route_config),\n                                             std::move(route_config));\n  }\n\n  std::shared_ptr<ScopedRouteInfo> scope_info_a_;\n  std::shared_ptr<ScopedRouteInfo> scope_info_a_v2_;\n  std::shared_ptr<ScopedRouteInfo> scope_info_b_;\n  ScopedRoutes::ScopeKeyBuilder key_builder_config_;\n  std::unique_ptr<ScopedConfigImpl> scoped_config_impl_;\n};\n\n// Test a ScopedConfigImpl returns the correct route Config.\nTEST_F(ScopedConfigImplTest, PickRoute) {\n  scoped_config_impl_ = std::make_unique<ScopedConfigImpl>(std::move(key_builder_config_));\n  scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_});\n  scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_b_});\n\n  // Key (foo, bar) maps to scope_info_a_.\n  ConfigConstSharedPtr route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{\n      {\"foo_header\", \",,key=value,bar=foo,\"},\n      {\"bar_header\", \";val1;bar;val3\"},\n  });\n  EXPECT_EQ(route_config, scope_info_a_->routeConfig());\n\n  // Key (bar, baz) maps to scope_info_b_.\n  route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{\n      {\"foo_header\", \",,key=value,bar=bar,\"},\n      {\"bar_header\", \";val1;baz;val3\"},\n  });\n  EXPECT_EQ(route_config, scope_info_b_->routeConfig());\n\n  // No such key (bar, NOT_BAZ).\n  route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{\n      {\"foo_header\", \",key=value,bar=bar,\"},\n      {\"bar_header\", \";val1;NOT_BAZ;val3\"},\n  });\n  EXPECT_EQ(route_config, nullptr);\n}\n\n// Test a ScopedConfigImpl returns the correct route Config before and after scope config update.\nTEST_F(ScopedConfigImplTest, Update) {\n  scoped_config_impl_ = std::make_unique<ScopedConfigImpl>(std::move(key_builder_config_));\n\n  TestRequestHeaderMapImpl headers{\n      {\"foo_header\", \",,key=value,bar=foo,\"},\n      {\"bar_header\", \";val1;bar;val3\"},\n  };\n  // Empty ScopeConfig.\n  EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr);\n\n  // Add scope_key (bar, baz).\n  scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_b_});\n  // scope_info_a_ not found\n  EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr);\n  // scope_info_b_ found\n  EXPECT_EQ(scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{\n                {\"foo_header\", \",,key=v,bar=bar,\"}, {\"bar_header\", \";val1;baz\"}}),\n            scope_info_b_->routeConfig());\n\n  // Add scope_key (foo, bar).\n  scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_});\n  // Found scope_info_a_.\n  EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), scope_info_a_->routeConfig());\n\n  // Update scope foo_scope.\n  scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_v2_});\n  EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr);\n\n  // foo_scope now is keyed by (xyz, xyz).\n  EXPECT_EQ(scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{\n                {\"foo_header\", \",bar=xyz,foo=bar\"}, {\"bar_header\", \";;xyz\"}}),\n            scope_info_a_v2_->routeConfig());\n\n  // Remove scope \"foo_scope\".\n  scoped_config_impl_->removeRoutingScopes({\"foo_scope\"});\n  // scope_info_a_ is gone.\n  EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr);\n\n  // Now delete some non-existent scopes.\n  EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScopes(\n      {\"foo_scope1\", \"base_scope\", \"bluh_scope\", \"xyz_scope\"}));\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/scoped_rds_test.cc",
    "content": "#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/admin/v3/config_dump.pb.validate.h\"\n#include \"envoy/api/v2/route.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.validate.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/init/manager.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/config/grpc_mux_impl.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/router/scoped_rds.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/strings/substitute.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AnyNumber;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::IsNull;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nusing ::Envoy::Http::TestRequestHeaderMapImpl;\n\nenvoy::config::route::v3::ScopedRouteConfiguration\nparseScopedRouteConfigurationFromYaml(const std::string& yaml) {\n  envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config;\n  TestUtility::loadFromYaml(yaml, scoped_route_config, true);\n  return scoped_route_config;\n}\n\nenvoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\nparseHttpConnectionManagerFromYaml(const std::string& config_yaml) {\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      http_connection_manager;\n  TestUtility::loadFromYaml(config_yaml, http_connection_manager, true);\n  return http_connection_manager;\n}\n\nclass ScopedRoutesTestBase : public testing::Test {\nprotected:\n  ScopedRoutesTestBase() {\n    ON_CALL(server_factory_context_, messageValidationContext())\n        .WillByDefault(ReturnRef(validation_context_));\n    EXPECT_CALL(validation_context_, dynamicValidationVisitor())\n        .WillRepeatedly(ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n\n    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_(\"routes\", _));\n    route_config_provider_manager_ =\n        std::make_unique<RouteConfigProviderManagerImpl>(server_factory_context_.admin_);\n\n    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_(\"route_scopes\", _));\n    config_provider_manager_ = std::make_unique<ScopedRoutesConfigProviderManager>(\n        server_factory_context_.admin_, *route_config_provider_manager_);\n  }\n\n  ~ScopedRoutesTestBase() override { server_factory_context_.thread_local_.shutdownThread(); }\n\n  // The delta style API helper.\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>\n  anyToResource(Protobuf::RepeatedPtrField<ProtobufWkt::Any>& resources,\n                const std::string& version) {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> added_resources;\n    for (const auto& resource_any : resources) {\n      auto config =\n          TestUtility::anyConvert<envoy::config::route::v3::ScopedRouteConfiguration>(resource_any);\n      auto* to_add = added_resources.Add();\n      to_add->set_name(config.name());\n      to_add->set_version(version);\n      to_add->mutable_resource()->PackFrom(config);\n    }\n    return added_resources;\n  }\n\n  Event::SimulatedTimeSystem& timeSystem() { return time_system_; }\n\n  NiceMock<Init::MockManager> context_init_manager_;\n  NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  // server_factory_context_ is used by rds\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context_;\n  RouteConfigProviderManagerPtr route_config_provider_manager_;\n  ScopedRoutesConfigProviderManagerPtr config_provider_manager_;\n\n  Event::SimulatedTimeSystem time_system_;\n\n  NiceMock<Event::MockDispatcher> event_dispatcher_;\n};\n\nclass ScopedRdsTest : public ScopedRoutesTestBase {\nprotected:\n  void setup() {\n    ON_CALL(server_factory_context_.cluster_manager_, adsMux())\n        .WillByDefault(Return(std::make_shared<::Envoy::Config::NullGrpcMuxImpl>()));\n\n    InSequence s;\n    // Since server_factory_context_.cluster_manager_.subscription_factory_.callbacks_ is taken by\n    // the SRDS subscription. We need to return a different MockSubscription here for each RDS\n    // subscription. To build the map from RDS route_config_name to the RDS subscription, we need to\n    // get the route_config_name by mocking start() on the Config::Subscription.\n\n    // srds subscription\n    EXPECT_CALL(server_factory_context_.cluster_manager_.subscription_factory_,\n                subscriptionFromConfigSource(_, _, _, _, _))\n        .Times(AnyNumber());\n    // rds subscription\n    EXPECT_CALL(\n        server_factory_context_.cluster_manager_.subscription_factory_,\n        subscriptionFromConfigSource(\n            _,\n            Eq(Grpc::Common::typeUrl(\n                API_NO_BOOST(envoy::api::v2::RouteConfiguration)().GetDescriptor()->full_name())),\n            _, _, _))\n        .Times(AnyNumber())\n        .WillRepeatedly(\n            Invoke([this](const envoy::config::core::v3::ConfigSource&, absl::string_view,\n                          Stats::Scope&, Envoy::Config::SubscriptionCallbacks& callbacks,\n                          Envoy::Config::OpaqueResourceDecoder&) {\n              auto ret = std::make_unique<NiceMock<Envoy::Config::MockSubscription>>();\n              rds_subscription_by_config_subscription_[ret.get()] = &callbacks;\n              EXPECT_CALL(*ret, start(_, _))\n                  .WillOnce(Invoke([this, config_sub_addr = ret.get()](\n                                       const std::set<std::string>& resource_names, const bool) {\n                    EXPECT_EQ(resource_names.size(), 1);\n                    auto iter = rds_subscription_by_config_subscription_.find(config_sub_addr);\n                    EXPECT_NE(iter, rds_subscription_by_config_subscription_.end());\n                    rds_subscription_by_name_[*resource_names.begin()] = iter->second;\n                  }));\n              return ret;\n            }));\n\n    ON_CALL(context_init_manager_, add(_)).WillByDefault(Invoke([this](const Init::Target& target) {\n      target_handles_.push_back(target.createHandle(\"test\"));\n    }));\n    ON_CALL(context_init_manager_, initialize(_))\n        .WillByDefault(Invoke([this](const Init::Watcher& watcher) {\n          for (auto& handle_ : target_handles_) {\n            handle_->initialize(watcher);\n          }\n        }));\n\n    const std::string config_yaml = R\"EOF(\nname: foo_scoped_routes\nscope_key_builder:\n  fragments:\n    - header_value_extractor:\n        name: Addr\n        element:\n          key: x-foo-key\n          separator: ;\n)EOF\";\n    envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes\n        scoped_routes_config;\n    TestUtility::loadFromYaml(config_yaml, scoped_routes_config);\n    provider_ = config_provider_manager_->createXdsConfigProvider(\n        scoped_routes_config.scoped_rds(), server_factory_context_, context_init_manager_, \"foo.\",\n        ScopedRoutesConfigProviderManagerOptArg(scoped_routes_config.name(),\n                                                scoped_routes_config.rds_config_source(),\n                                                scoped_routes_config.scope_key_builder()));\n    srds_subscription_ = server_factory_context_.cluster_manager_.subscription_factory_.callbacks_;\n  }\n\n  void srdsUpdateWithYaml(std::vector<std::string> const& config_yamls,\n                          std::string const& version) {\n    std::vector<envoy::config::route::v3::ScopedRouteConfiguration> resources;\n    resources.reserve(config_yamls.size());\n    for (std::string const& config_yaml : config_yamls) {\n      resources.push_back(parseScopedRouteConfigurationFromYaml(config_yaml));\n    }\n    const auto decoded_resources = TestUtility::decodeResources(resources);\n    EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, version));\n  }\n\n  // Helper function which pushes an update to given RDS subscription, the start(_) of the\n  // subscription must have been called.\n  void pushRdsConfig(const std::vector<std::string>& route_config_names,\n                     const std::string& version) {\n    const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: test\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: bluh }}\n)EOF\";\n    for (const std::string& name : route_config_names) {\n      const auto route_config =\n          TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(\n              fmt::format(route_config_tmpl, name));\n      const auto decoded_resources = TestUtility::decodeResources({route_config});\n      if (rds_subscription_by_name_.find(name) == rds_subscription_by_name_.end()) {\n        continue;\n      }\n      rds_subscription_by_name_[name]->onConfigUpdate(decoded_resources.refvec_, version);\n    }\n  }\n\n  ScopedRdsConfigProvider* getScopedRdsProvider() const {\n    return dynamic_cast<ScopedRdsConfigProvider*>(provider_.get());\n  }\n  // Helper function which returns the ScopedRouteMap of the subscription.\n  const ScopedRouteMap& getScopedRouteMap() const {\n    return getScopedRdsProvider()->subscription().scopedRouteMap();\n  }\n\n  Envoy::Config::SubscriptionCallbacks* srds_subscription_{};\n  Envoy::Config::ConfigProviderPtr provider_;\n  std::list<Init::TargetHandlePtr> target_handles_;\n  Init::ExpectableWatcherImpl init_watcher_;\n\n  // RDS mocks.\n  absl::flat_hash_map<Envoy::Config::Subscription*, Envoy::Config::SubscriptionCallbacks*>\n      rds_subscription_by_config_subscription_;\n  absl::flat_hash_map<std::string, Envoy::Config::SubscriptionCallbacks*> rds_subscription_by_name_;\n\n  Envoy::Stats::Gauge& all_scopes_{server_factory_context_.scope_.gauge(\n      \"foo.scoped_rds.foo_scoped_routes.all_scopes\", Stats::Gauge::ImportMode::Accumulate)};\n  Envoy::Stats::Gauge& active_scopes_{server_factory_context_.scope_.gauge(\n      \"foo.scoped_rds.foo_scoped_routes.active_scopes\", Stats::Gauge::ImportMode::Accumulate)};\n  Envoy::Stats::Gauge& on_demand_scopes_{server_factory_context_.scope_.gauge(\n      \"foo.scoped_rds.foo_scoped_routes.on_demand_scopes\", Stats::Gauge::ImportMode::Accumulate)};\n};\n\n// Tests that multiple uniquely named non-conflict resources are allowed in config updates.\nTEST_F(ScopedRdsTest, MultipleResourcesSotw) {\n  setup();\n\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml2 = R\"EOF(\nname: foo_scope2\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n  init_watcher_.expectReady(); // Only the SRDS parent_init_target_.\n  context_init_manager_.initialize(init_watcher_);\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource_2});\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, \"1\"));\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n  EXPECT_EQ(2UL, active_scopes_.value());\n\n  // Verify the config is a ScopedConfigImpl instance, both scopes point to \"\" as RDS hasn't kicked\n  // in yet(NullConfigImpl returned).\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"\");\n  // RDS updates foo_routes.\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"foo_routes\");\n\n  // Delete foo_scope2.\n  const auto decoded_resources_2 = TestUtility::decodeResources({resource});\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, \"3\"));\n  EXPECT_EQ(1UL, all_scopes_.value());\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope\"), 1);\n  EXPECT_EQ(2UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  // now scope key \"x-bar-key\" points to nowhere.\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}}),\n              IsNull());\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n}\n\n// Tests that multiple uniquely named non-conflict resources are allowed in config updates.\nTEST_F(ScopedRdsTest, MultipleResourcesDelta) {\n  setup();\n  init_watcher_.expectReady();\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml2 = R\"EOF(\nname: foo_scope2\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n\n  // Delta API.\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource_2});\n  context_init_manager_.initialize(init_watcher_);\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, \"1\"));\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n\n  // Verify the config is a ScopedConfigImpl instance, both scopes point to \"\" as RDS hasn't kicked\n  // in yet(NullConfigImpl returned).\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"\");\n  // RDS updates foo_routes.\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"foo_routes\");\n\n  // Delete foo_scope2.\n  Protobuf::RepeatedPtrField<std::string> deletes;\n  *deletes.Add() = \"foo_scope2\";\n  const auto decoded_resources_2 = TestUtility::decodeResources({resource});\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, deletes, \"2\"));\n  EXPECT_EQ(1UL, all_scopes_.value());\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope\"), 1);\n  EXPECT_EQ(2UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  // now scope key \"x-bar-key\" points to nowhere.\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}}),\n              IsNull());\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n}\n\n// Tests that conflict resources in the same push are detected.\nTEST_F(ScopedRdsTest, MultipleResourcesWithKeyConflictSotW) {\n  setup();\n\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml2 = R\"EOF(\nname: foo_scope2\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n  init_watcher_.expectReady().Times(0); // The onConfigUpdate will simply throw an exception.\n  context_init_manager_.initialize(init_watcher_);\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource_2});\n  EXPECT_THROW_WITH_REGEX(\n      srds_subscription_->onConfigUpdate(decoded_resources.refvec_, \"1\"), EnvoyException,\n      \".*scope key conflict found, first scope is 'foo_scope', second scope is 'foo_scope2'\");\n  EXPECT_EQ(\n      // Fully rejected.\n      0UL, server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n               .value());\n  // Scope key \"x-foo-key\" points to nowhere.\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}}),\n              IsNull());\n  EXPECT_EQ(server_factory_context_.scope_.counter(\"foo.rds.foo_routes.config_reload\").value(),\n            0UL);\n}\n\n// Tests that conflict resources in the same push are detected in delta api form.\nTEST_F(ScopedRdsTest, MultipleResourcesWithKeyConflictDelta) {\n  setup();\n\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml2 = R\"EOF(\nname: foo_scope2\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n  init_watcher_.expectReady().Times(0); // The onConfigUpdate will simply throw an exception.\n  context_init_manager_.initialize(init_watcher_);\n\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource_2});\n  EXPECT_THROW_WITH_REGEX(\n      srds_subscription_->onConfigUpdate(decoded_resources.refvec_, \"1\"), EnvoyException,\n      \".*scope key conflict found, first scope is 'foo_scope', second scope is 'foo_scope2'\");\n  EXPECT_EQ(\n      // Fully rejected.\n      0UL, server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n               .value());\n  // Scope key \"x-foo-key\" points to nowhere.\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}}),\n              IsNull());\n  EXPECT_EQ(server_factory_context_.scope_.counter(\"foo.rds.foo_routes.config_reload\").value(),\n            0UL);\n}\n\n// Tests that scope-key conflict resources in different config updates are handled correctly.\nTEST_F(ScopedRdsTest, ScopeKeyReuseInDifferentPushes) {\n  setup();\n\n  const std::string config_yaml1 = R\"EOF(\nname: foo_scope1\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const std::string config_yaml2 = R\"EOF(\nname: foo_scope2\nroute_configuration_name: bar_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml1);\n  const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource_2});\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, \"1\"));\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  // Scope key \"x-foo-key\" points to nowhere.\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  // No RDS \"foo_routes\" config push happened yet, Router::NullConfig is returned.\n  EXPECT_THAT(getScopedRdsProvider()\n                  ->config<ScopedConfigImpl>()\n                  ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                  ->name(),\n              \"\");\n  pushRdsConfig({\"foo_routes\", \"bar_routes\"}, \"111\");\n  EXPECT_EQ(server_factory_context_.scope_.counter(\"foo.rds.foo_routes.config_reload\").value(),\n            1UL);\n  EXPECT_EQ(server_factory_context_.scope_.counter(\"foo.rds.bar_routes.config_reload\").value(),\n            1UL);\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n\n  const std::string config_yaml3 = R\"EOF(\nname: foo_scope3\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n\n  // Remove foo_scope1 and add a new scope3 reuses the same scope_key.\n  const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml3);\n  const auto decoded_resources_2 = TestUtility::decodeResources({resource_2, resource_3});\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, \"2\"));\n  EXPECT_EQ(2UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  // foo_scope is deleted, and foo_scope2 is added.\n  EXPECT_EQ(all_scopes_.value(), 2UL);\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope1\"), 0);\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope2\"), 1);\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope3\"), 1);\n  // The same scope-key now points to the same route table.\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n\n  // Push a new scope foo_scope4 with the same key as foo_scope2 but a different route-table, this\n  // ends in an exception.\n  const std::string config_yaml4 = R\"EOF(\nname: foo_scope4\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml4);\n  const auto decoded_resources_3 =\n      TestUtility::decodeResources({resource_2, resource_3, resource_4});\n  EXPECT_THROW_WITH_REGEX(\n      srds_subscription_->onConfigUpdate(decoded_resources_3.refvec_, \"3\"), EnvoyException,\n      \"scope key conflict found, first scope is 'foo_scope2', second scope is 'foo_scope4'\");\n  EXPECT_EQ(2UL, all_scopes_.value());\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope1\"), 0);\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope2\"), 1);\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope3\"), 1);\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"bar_routes\");\n\n  // Delete foo_scope2, and push a new foo_scope4 with the same scope key but different route-table.\n  const auto decoded_resources_4 = TestUtility::decodeResources({resource_3, resource_4});\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_4.refvec_, \"4\"));\n  EXPECT_EQ(server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value(),\n            3UL);\n  EXPECT_EQ(2UL, all_scopes_.value());\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope3\"), 1);\n  EXPECT_EQ(getScopedRouteMap().count(\"foo_scope4\"), 1);\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"foo_routes\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n}\n\n// Tests that only one resource is provided during a config update.\nTEST_F(ScopedRdsTest, InvalidDuplicateResourceSotw) {\n  setup();\n  init_watcher_.expectReady().Times(\n      0); // parent_init_target_ ready will be called by onConfigUpdateFailed\n  context_init_manager_.initialize(init_watcher_);\n\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource});\n  EXPECT_THROW_WITH_MESSAGE(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, \"1\"),\n                            EnvoyException,\n                            \"Error adding/updating scoped route(s): duplicate scoped route \"\n                            \"configuration 'foo_scope' found\");\n}\n\n// Tests duplicate resources in the same update, should be fully rejected.\nTEST_F(ScopedRdsTest, InvalidDuplicateResourceDelta) {\n  setup();\n  init_watcher_.expectReady().Times(0);\n  context_init_manager_.initialize(init_watcher_);\n\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource});\n  EXPECT_THROW_WITH_MESSAGE(\n      srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, \"1\"), EnvoyException,\n      \"Error adding/updating scoped route(s): duplicate scoped route configuration 'foo_scope' \"\n      \"found\");\n  EXPECT_EQ(\n      // Fully rejected.\n      0UL, server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n               .value());\n  // Scope key \"x-foo-key\" points to nowhere.\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}}),\n              IsNull());\n  EXPECT_EQ(server_factory_context_.scope_.counter(\"foo.rds.foo_routes.config_reload\").value(),\n            0UL);\n}\n\n// Tests a config update failure.\nTEST_F(ScopedRdsTest, ConfigUpdateFailure) {\n  setup();\n\n  const auto time = std::chrono::milliseconds(1234567891234);\n  timeSystem().setSystemTime(time);\n  const EnvoyException ex(fmt::format(\"config failure\"));\n  // Verify the failure updates the lastUpdated() timestamp.\n  srds_subscription_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected,\n                                           &ex);\n  EXPECT_EQ(std::chrono::time_point_cast<std::chrono::milliseconds>(provider_->lastUpdated())\n                .time_since_epoch(),\n            time);\n}\n\n// Tests that the /config_dump handler returns the corresponding scoped routing\n// config.\nTEST_F(ScopedRdsTest, ConfigDump) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  auto message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"route_scopes\"]();\n  const auto& scoped_routes_config_dump =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::ScopedRoutesConfigDump&>(\n          *message_ptr);\n\n  // No routes at all(no SRDS push yet), no last_updated timestamp\n  envoy::admin::v3::ScopedRoutesConfigDump expected_config_dump;\n  TestUtility::loadFromYaml(R\"EOF(\ninline_scoped_route_configs:\ndynamic_scoped_route_configs:\n)EOF\",\n                            expected_config_dump);\n  EXPECT_TRUE(TestUtility::protoEqual(expected_config_dump, scoped_routes_config_dump));\n\n  timeSystem().setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  const std::string hcm_base_config_yaml = R\"EOF(\ncodec_type: auto\nstat_prefix: foo\nhttp_filters:\n  - name: http_dynamo_filter\n    config:\nscoped_routes:\n  name: $0\n  scope_key_builder:\n    fragments:\n      - header_value_extractor:\n          name: Addr\n          index: 0\n$1\n)EOF\";\n  const std::string inline_scoped_route_configs_yaml = R\"EOF(\n  scoped_route_configurations_list:\n    scoped_route_configurations:\n      - name: foo\n        route_configuration_name: foo-route-config\n        key:\n          fragments: { string_key: \"172.10.10.10\" }\n      - name: foo2\n        route_configuration_name: foo-route-config2\n        key:\n          fragments: { string_key: \"172.10.10.20\" }\n)EOF\";\n  // Only load the inline scopes.\n  Envoy::Config::ConfigProviderPtr inline_config = ScopedRoutesConfigProviderUtil::create(\n      parseHttpConnectionManagerFromYaml(absl::Substitute(hcm_base_config_yaml, \"foo-scoped-routes\",\n                                                          inline_scoped_route_configs_yaml)),\n      server_factory_context_, context_init_manager_, \"foo.\", *config_provider_manager_);\n  message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"route_scopes\"]();\n  const auto& scoped_routes_config_dump2 =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::ScopedRoutesConfigDump&>(\n          *message_ptr);\n  TestUtility::loadFromYaml(R\"EOF(\ninline_scoped_route_configs:\n  - name: foo-scoped-routes\n    scoped_route_configs:\n     - name: foo\n       \"@type\": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\n       route_configuration_name: foo-route-config\n       key:\n         fragments: { string_key: \"172.10.10.10\" }\n     - name: foo2\n       \"@type\": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\n       route_configuration_name: foo-route-config2\n       key:\n         fragments: { string_key: \"172.10.10.20\" }\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\ndynamic_scoped_route_configs:\n)EOF\",\n                            expected_config_dump);\n  EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump2));\n\n  // Now SRDS kicks off.\n  Protobuf::RepeatedPtrField<ProtobufWkt::Any> resources;\n  const auto resource = parseScopedRouteConfigurationFromYaml(R\"EOF(\nname: dynamic-foo\nroute_configuration_name: dynamic-foo-route-config\nkey:\n  fragments: { string_key: \"172.30.30.10\" }\n)EOF\");\n\n  timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567));\n  const auto decoded_resources = TestUtility::decodeResources({resource});\n  srds_subscription_->onConfigUpdate(decoded_resources.refvec_, \"1\");\n\n  TestUtility::loadFromYaml(R\"EOF(\ninline_scoped_route_configs:\n  - name: foo-scoped-routes\n    scoped_route_configs:\n     - name: foo\n       \"@type\": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\n       route_configuration_name: foo-route-config\n       key:\n         fragments: { string_key: \"172.10.10.10\" }\n     - name: foo2\n       \"@type\": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\n       route_configuration_name: foo-route-config2\n       key:\n         fragments: { string_key: \"172.10.10.20\" }\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\ndynamic_scoped_route_configs:\n  - name: foo_scoped_routes\n    scoped_route_configs:\n      - name: dynamic-foo\n        \"@type\": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\n        route_configuration_name: dynamic-foo-route-config\n        key:\n          fragments: { string_key: \"172.30.30.10\" }\n    last_updated:\n      seconds: 1234567891\n      nanos: 567000000\n    version_info: \"1\"\n)EOF\",\n                            expected_config_dump);\n  message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"route_scopes\"]();\n  const auto& scoped_routes_config_dump3 =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::ScopedRoutesConfigDump&>(\n          *message_ptr);\n  EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump3));\n\n  srds_subscription_->onConfigUpdate({}, \"2\");\n  TestUtility::loadFromYaml(R\"EOF(\ninline_scoped_route_configs:\n  - name: foo-scoped-routes\n    scoped_route_configs:\n     - name: foo\n       \"@type\": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\n       route_configuration_name: foo-route-config\n       key:\n         fragments: { string_key: \"172.10.10.10\" }\n     - name: foo2\n       \"@type\": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\n       route_configuration_name: foo-route-config2\n       key:\n         fragments: { string_key: \"172.10.10.20\" }\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\ndynamic_scoped_route_configs:\n  - name: foo_scoped_routes\n    last_updated:\n      seconds: 1234567891\n      nanos: 567000000\n    version_info: \"2\"\n)EOF\",\n                            expected_config_dump);\n  message_ptr =\n      server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_[\"route_scopes\"]();\n  const auto& scoped_routes_config_dump4 =\n      TestUtility::downcastAndValidate<const envoy::admin::v3::ScopedRoutesConfigDump&>(\n          *message_ptr);\n  EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump4));\n}\n\n// Tests that SRDS only allows creation of delta static config providers.\nTEST_F(ScopedRdsTest, DeltaStaticConfigProviderOnly) {\n  // Use match all regex due to lack of distinctive matchable output for\n  // coverage test.\n  EXPECT_DEATH(config_provider_manager_->createStaticConfigProvider(\n                   parseScopedRouteConfigurationFromYaml(R\"EOF(\nname: dynamic-foo\nroute_configuration_name: static-foo-route-config\nkey:\n  fragments: { string_key: \"172.30.30.10\" }\n)EOF\"),\n                   server_factory_context_,\n                   Envoy::Config::ConfigProviderManager::NullOptionalArg()),\n               \".*\");\n}\n\n// Tests whether scope key conflict with updated scopes is ignored.\nTEST_F(ScopedRdsTest, IgnoreConflictWithUpdatedScopeDelta) {\n  setup();\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml2 = R\"EOF(\nname: bar_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n\n  // Delta API.\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource_2});\n  context_init_manager_.initialize(init_watcher_);\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, \"1\"));\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n\n  const std::string config_yaml3 = R\"EOF(\nname: bar_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml4 = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n  const auto decoded_resources_2 = TestUtility::decodeResources({resource_3, resource_4});\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, {}, \"2\"));\n  EXPECT_EQ(2UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n}\n\n// Tests whether scope key conflict with updated scopes is ignored.\nTEST_F(ScopedRdsTest, IgnoreConflictWithUpdatedScopeSotW) {\n  setup();\n  const std::string config_yaml = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml2 = R\"EOF(\nname: bar_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n\n  // Delta API.\n  const auto decoded_resources = TestUtility::decodeResources({resource, resource_2});\n  context_init_manager_.initialize(init_watcher_);\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, \"1\"));\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n\n  const std::string config_yaml3 = R\"EOF(\nname: bar_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml);\n  const std::string config_yaml4 = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n  const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml2);\n  const auto decoded_resources_2 = TestUtility::decodeResources({resource_3, resource_4});\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, \"2\"));\n  EXPECT_EQ(2UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n}\n\n// Compare behavior of a lazy scope and an eager scope scopes that share that same route\n// configuration. Route config of on demand scope shouldn't be loaded.\nTEST_F(ScopedRdsTest, OnDemandScopeNotLoadedWithoutRequest) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  // Scope should be loaded eagerly by default.\n  const std::string eager_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n\n  // On demand scope should be loaded lazily.\n  const std::string lazy_resource = R\"EOF(\nname: foo_scope2\nroute_configuration_name: foo_routes\non_demand: true\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n\n  srdsUpdateWithYaml({lazy_resource, eager_resource}, \"1\");\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n\n  // Verify the config is a ScopedConfigImpl instance, both scopes point to \"\" as RDS hasn't kicked\n  // in yet(NullConfigImpl returned).\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  // Route config for foo key is NullConfigImpl and route config for bar key is nullptr\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"\");\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}}),\n              IsNull());\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  // Scope foo now have route config but route config for scope bar is still nullptr.\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}}),\n              IsNull());\n  EXPECT_EQ(2UL, all_scopes_.value());\n  EXPECT_EQ(1UL, active_scopes_.value());\n  EXPECT_EQ(1UL, on_demand_scopes_.value());\n}\n\n// Push Rds update after on demand request, route configuration should be initialized.\nTEST_F(ScopedRdsTest, PushRdsAfterOndemandRequest) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  // Scope should be loaded eagerly by default.\n  const std::string eager_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n\n  // On demand scope should be loaded lazily.\n  const std::string lazy_resource = R\"EOF(\nname: foo_scope2\nroute_configuration_name: foo_routes\non_demand: true\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n\n  srdsUpdateWithYaml({eager_resource, lazy_resource}, \"1\");\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n\n  // Verify the config is a ScopedConfigImpl instance, both scopes point to \"\" as RDS hasn't kicked\n  // in yet(NullConfigImpl returned).\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"\");\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}}),\n              IsNull());\n  EXPECT_EQ(1UL, active_scopes_.value());\n\n  ScopeKeyPtr scope_key = getScopedRdsProvider()->config<ScopedConfigImpl>()->computeScopeKey(\n      TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}});\n  EXPECT_CALL(event_dispatcher_, post(_)).Times(1);\n  std::function<void(bool)> route_config_updated_cb = [](bool) {};\n  getScopedRdsProvider()->onDemandRdsUpdate(std::move(scope_key), event_dispatcher_,\n                                            std::move(route_config_updated_cb));\n  // After on demand request, push rds update, both scopes should find the route configuration.\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"foo_routes\");\n  // Now we have 1 active on demand scope and 1 eager loading scope.\n  EXPECT_EQ(2UL, all_scopes_.value());\n  EXPECT_EQ(2UL, active_scopes_.value());\n  EXPECT_EQ(1UL, on_demand_scopes_.value());\n}\n\nTEST_F(ScopedRdsTest, PushRdsBeforeOndemandRequest) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  // Scope should be loaded eagerly by default.\n  const std::string eager_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n\n  // On demand scope should be loaded lazily.\n  const std::string lazy_resource = R\"EOF(\nname: foo_scope2\nroute_configuration_name: foo_routes\non_demand: true\nkey:\n  fragments:\n    - string_key: x-bar-key\n)EOF\";\n\n  srdsUpdateWithYaml({eager_resource, lazy_resource}, \"1\");\n  EXPECT_EQ(1UL,\n            server_factory_context_.scope_.counter(\"foo.scoped_rds.foo_scoped_routes.config_reload\")\n                .value());\n  EXPECT_EQ(2UL, all_scopes_.value());\n\n  // Verify the config is a ScopedConfigImpl instance, both scopes point to \"\" as RDS hasn't kicked\n  // in yet(NullConfigImpl returned).\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"\");\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}}),\n              IsNull());\n  // Push rds update before on demand srds request.\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  ScopeKeyPtr scope_key = getScopedRdsProvider()->config<ScopedConfigImpl>()->computeScopeKey(\n      TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}});\n  EXPECT_CALL(server_factory_context_.dispatcher_, post(_)).Times(1);\n  EXPECT_CALL(event_dispatcher_, post(_)).Times(1);\n  std::function<void(bool)> route_config_updated_cb = [](bool) {};\n  getScopedRdsProvider()->onDemandRdsUpdate(std::move(scope_key), event_dispatcher_,\n                                            std::move(route_config_updated_cb));\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-bar-key\"}})\n                ->name(),\n            \"foo_routes\");\n}\n\n// Change a scope from lazy to eager will enable eager loading.\nTEST_F(ScopedRdsTest, UpdateOnDemandScopeToEagerScope) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  // On demand scope should be loaded lazily.\n  const std::string lazy_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\non_demand: true\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n\n  srdsUpdateWithYaml({lazy_resource}, \"1\");\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}}),\n              IsNull());\n  EXPECT_EQ(0UL, active_scopes_.value());\n  EXPECT_EQ(1UL, on_demand_scopes_.value());\n  // The on demand scope will be overwritten.\n  const std::string eager_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  srdsUpdateWithYaml({eager_resource}, \"2\");\n  EXPECT_EQ(1UL, all_scopes_.value());\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"\");\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  // Now we have 1 eager scope.\n  EXPECT_EQ(1UL, active_scopes_.value());\n  EXPECT_EQ(0UL, on_demand_scopes_.value());\n  EXPECT_EQ(1UL, all_scopes_.value());\n}\n\n// Change a scope from eager to lazy will delete the route table.\nTEST_F(ScopedRdsTest, UpdateEagerScopeToOnDemandScope) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n\n  const std::string eager_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n\n  srdsUpdateWithYaml({eager_resource}, \"1\");\n  EXPECT_EQ(1UL, active_scopes_.value());\n  EXPECT_EQ(0UL, on_demand_scopes_.value());\n  // The scope is eager loading and rds update will be accepted.\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  ASSERT_THAT(getScopedRdsProvider(), Not(IsNull()));\n  ASSERT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>(), Not(IsNull()));\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  // Update the scope to on demand, rds provider and the route config will be deleted.\n  const std::string lazy_resource = R\"EOF(\n  name: foo_scope\n  route_configuration_name: foo_routes\n  on_demand: true\n  key:\n    fragments:\n      - string_key: x-bar-key\n  )EOF\";\n  srdsUpdateWithYaml({lazy_resource}, \"2\");\n  EXPECT_THAT(getScopedRdsProvider()->config<ScopedConfigImpl>()->getRouteConfig(\n                  TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}}),\n              IsNull());\n  // The new scope will be on demand and inactive after srds update.\n  EXPECT_EQ(0UL, active_scopes_.value());\n  EXPECT_EQ(1UL, on_demand_scopes_.value());\n  EXPECT_EQ(1UL, all_scopes_.value());\n} // namespace\n\n// Post on demand callbacks multiple times, all should be executed after rds update.\nTEST_F(ScopedRdsTest, MultipleOnDemandUpdatedCallback) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  // On demand scope should be loaded lazily.\n  const std::string lazy_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\non_demand: true\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n  srdsUpdateWithYaml({lazy_resource}, \"1\");\n\n  EXPECT_EQ(0UL, active_scopes_.value());\n  EXPECT_EQ(1UL, on_demand_scopes_.value());\n  // All the on demand updated callbacks will be executed when the route table comes.\n  for (int i = 0; i < 5; i++) {\n    ScopeKeyPtr scope_key = getScopedRdsProvider()->config<ScopedConfigImpl>()->computeScopeKey(\n        TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}});\n    std::function<void(bool)> route_config_updated_cb = [](bool) {};\n    getScopedRdsProvider()->onDemandRdsUpdate(std::move(scope_key), event_dispatcher_,\n                                              std::move(route_config_updated_cb));\n  }\n  // After on demand request, push rds update, the callbacks will be executed.\n  EXPECT_CALL(event_dispatcher_, post(_)).Times(5);\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n  // Route table have been fetched, callbacks will be executed immediately.\n  for (int i = 0; i < 5; i++) {\n    EXPECT_CALL(event_dispatcher_, post(_)).Times(1);\n    ScopeKeyPtr scope_key = getScopedRdsProvider()->config<ScopedConfigImpl>()->computeScopeKey(\n        TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}});\n    std::function<void(bool)> route_config_updated_cb = [](bool) {};\n    getScopedRdsProvider()->onDemandRdsUpdate(std::move(scope_key), event_dispatcher_,\n                                              std::move(route_config_updated_cb));\n  }\n  // Activating the same on_demand scope multiple times, active_scopes is still 1.\n  EXPECT_EQ(getScopedRdsProvider()\n                ->config<ScopedConfigImpl>()\n                ->getRouteConfig(TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}})\n                ->name(),\n            \"foo_routes\");\n  EXPECT_EQ(1UL, active_scopes_.value());\n  EXPECT_EQ(1UL, on_demand_scopes_.value());\n}\n\nTEST_F(ScopedRdsTest, DanglingSubscriptionOnDemandUpdate) {\n  setup();\n  std::function<void(bool)> route_config_updated_cb = [](bool) {};\n  Event::PostCb temp_post_cb;\n  EXPECT_CALL(server_factory_context_.dispatcher_, post(_))\n      .WillOnce(testing::SaveArg<0>(&temp_post_cb));\n  std::shared_ptr<ScopeKey> scope_key =\n      getScopedRdsProvider()->config<ScopedConfigImpl>()->computeScopeKey(\n          TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}});\n  getScopedRdsProvider()->onDemandRdsUpdate(scope_key, event_dispatcher_,\n                                            std::move(route_config_updated_cb));\n  // Destroy the scoped_rds subscription by destroying its only config provider.\n  provider_.reset();\n  EXPECT_CALL(event_dispatcher_, post(_)).Times(1);\n  EXPECT_NO_THROW(temp_post_cb());\n}\n\n// Delete the on demand scope before on demand update in main thread.\nTEST_F(ScopedRdsTest, OnDemandScopeDeleted) {\n  setup();\n  init_watcher_.expectReady();\n  context_init_manager_.initialize(init_watcher_);\n  // On demand scope should be loaded lazily.\n  const std::string lazy_resource = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_routes\non_demand: true\nkey:\n  fragments:\n    - string_key: x-foo-key\n)EOF\";\n\n  srdsUpdateWithYaml({lazy_resource}, \"1\");\n  EXPECT_EQ(0UL, active_scopes_.value());\n  EXPECT_EQ(1UL, on_demand_scopes_.value());\n  // All the on demand updated callbacks will be executed when the route table comes.\n  {\n    ScopeKeyPtr scope_key = getScopedRdsProvider()->config<ScopedConfigImpl>()->computeScopeKey(\n        TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}});\n    std::function<void(bool)> route_config_updated_cb = [](bool scope_exist) {\n      EXPECT_TRUE(scope_exist);\n    };\n    getScopedRdsProvider()->onDemandRdsUpdate(std::move(scope_key), event_dispatcher_,\n                                              std::move(route_config_updated_cb));\n  }\n  // After on demand request, push rds update, the callbacks will be executed.\n  EXPECT_CALL(event_dispatcher_, post(_)).Times(1);\n  pushRdsConfig({\"foo_routes\"}, \"111\");\n\n  ScopeKeyPtr scope_key = getScopedRdsProvider()->config<ScopedConfigImpl>()->computeScopeKey(\n      TestRequestHeaderMapImpl{{\"Addr\", \"x-foo-key;x-foo-key\"}});\n  // Delete the scope route.\n  EXPECT_NO_THROW(srds_subscription_->onConfigUpdate({}, \"2\"));\n  EXPECT_EQ(0UL, all_scopes_.value());\n  EXPECT_CALL(event_dispatcher_, post(_)).Times(1);\n  // Scope no longer exists after srds update.\n  std::function<void(bool)> route_config_updated_cb = [](bool scope_exist) {\n    EXPECT_FALSE(scope_exist);\n  };\n  getScopedRdsProvider()->onDemandRdsUpdate(std::move(scope_key), event_dispatcher_,\n                                            std::move(route_config_updated_cb));\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/shadow_writer_impl_test.cc",
    "content": "#include <chrono>\n#include <string>\n\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/router/shadow_writer_impl.h\"\n\n#include \"test/mocks/upstream/cluster_manager.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nclass ShadowWriterImplTest : public testing::Test {\npublic:\n  void expectShadowWriter(absl::string_view host, absl::string_view shadowed_host) {\n    Http::RequestMessagePtr message(new Http::RequestMessageImpl());\n    message->headers().setHost(host);\n    EXPECT_CALL(cm_, get(Eq(\"foo\")));\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"foo\")).WillOnce(ReturnRef(cm_.async_client_));\n    auto options = Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(5));\n    EXPECT_CALL(cm_.async_client_, send_(_, _, options))\n        .WillOnce(Invoke(\n            [&](Http::RequestMessagePtr& inner_message, Http::AsyncClient::Callbacks& callbacks,\n                const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n              EXPECT_EQ(message, inner_message);\n              EXPECT_EQ(shadowed_host, message->headers().getHostValue());\n              callback_ = &callbacks;\n              return &request_;\n            }));\n    writer_.shadow(\"foo\", std::move(message), options);\n  }\n\n  Upstream::MockClusterManager cm_;\n  ShadowWriterImpl writer_{cm_};\n  Http::MockAsyncClientRequest request_{&cm_.async_client_};\n  Http::AsyncClient::Callbacks* callback_{};\n};\n\nTEST_F(ShadowWriterImplTest, Success) {\n  InSequence s;\n\n  expectShadowWriter(\"cluster1\", \"cluster1-shadow\");\n  Http::ResponseMessagePtr response(new Http::ResponseMessageImpl());\n  callback_->onSuccess(request_, std::move(response));\n}\n\nTEST_F(ShadowWriterImplTest, Failure) {\n  InSequence s;\n\n  expectShadowWriter(\"cluster1:8000\", \"cluster1-shadow:8000\");\n  callback_->onFailure(request_, Http::AsyncClient::FailureReason::Reset);\n}\n\nTEST_F(ShadowWriterImplTest, NoCluster) {\n  InSequence s;\n\n  Http::RequestMessagePtr message(new Http::RequestMessageImpl());\n  EXPECT_CALL(cm_, get(Eq(\"foo\"))).WillOnce(Return(nullptr));\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(\"foo\")).Times(0);\n  auto options = Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(5));\n  writer_.shadow(\"foo\", std::move(message), options);\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/string_accessor_impl_test.cc",
    "content": "#include \"common/router/string_accessor_impl.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nTEST(StringAccessorImplTest, Storage) {\n  const char* const TestString = \"test string 1\";\n  StringAccessorImpl accessor(TestString);\n\n  EXPECT_EQ(TestString, accessor.asString());\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/upstream_request_test.cc",
    "content": "#include \"common/router/upstream_request.h\"\n\n#include \"test/mocks/router/router_filter_interface.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nclass UpstreamRequestTest : public testing::Test {\npublic:\n  NiceMock<MockRouterFilterInterface> router_filter_interface_;\n  UpstreamRequest upstream_request_{router_filter_interface_,\n                                    std::make_unique<NiceMock<Router::MockGenericConnPool>>()};\n};\n\n// UpstreamRequest is responsible processing for passing 101 upgrade headers to onUpstreamHeaders.\nTEST_F(UpstreamRequestTest, Decode101UpgradeHeaders) {\n  auto upgrade_headers = std::make_unique<Http::TestResponseHeaderMapImpl>(\n      Http::TestResponseHeaderMapImpl({{\":status\", \"101\"}}));\n  EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _));\n  upstream_request_.decodeHeaders(std::move(upgrade_headers), false);\n}\n\n// UpstreamRequest is responsible for ignoring non-{100,101} 1xx headers.\nTEST_F(UpstreamRequestTest, IgnoreOther1xxHeaders) {\n  auto other_headers = std::make_unique<Http::TestResponseHeaderMapImpl>(\n      Http::TestResponseHeaderMapImpl({{\":status\", \"102\"}}));\n  EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _)).Times(0);\n  upstream_request_.decodeHeaders(std::move(other_headers), false);\n}\n\n// UpstreamRequest is responsible processing for passing 200 upgrade headers to onUpstreamHeaders.\nTEST_F(UpstreamRequestTest, Decode200UpgradeHeaders) {\n  auto response_headers = std::make_unique<Http::TestResponseHeaderMapImpl>(\n      Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}));\n  EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _));\n  upstream_request_.decodeHeaders(std::move(response_headers), false);\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/router/vhds_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/router/rds_impl.h\"\n\n#include \"server/admin/admin.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Router {\nnamespace {\n\nclass VhdsTest : public testing::Test {\npublic:\n  void SetUp() override {\n    default_vhds_config_ = R\"EOF(\nname: my_route\nvhds:\n  config_source:\n    api_config_source:\n      api_type: DELTA_GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n)EOF\";\n  }\n\n  envoy::config::route::v3::VirtualHost buildVirtualHost(const std::string& name,\n                                                         const std::string& domain) {\n    return TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(fmt::format(R\"EOF(\n      name: {}\n      domains: [{}]\n      routes:\n      - match: {{ prefix: \"/\" }}\n        route: {{ cluster: \"my_service\" }}\n    )EOF\",\n                                                                                     name, domain));\n  }\n\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>\n  buildAddedResources(const std::vector<envoy::config::route::v3::VirtualHost>& added_or_updated) {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> to_ret;\n\n    for (const auto& vhost : added_or_updated) {\n      auto* resource = to_ret.Add();\n      resource->set_name(vhost.name());\n      resource->set_version(\"1\");\n      resource->mutable_resource()->PackFrom(vhost);\n    }\n\n    return to_ret;\n  }\n\n  Protobuf::RepeatedPtrField<std::string>\n  buildRemovedResources(const std::vector<std::string>& removed) {\n    return Protobuf::RepeatedPtrField<std::string>{removed.begin(), removed.end()};\n  }\n  RouteConfigUpdatePtr\n  makeRouteConfigUpdate(const envoy::config::route::v3::RouteConfiguration& rc) {\n    RouteConfigUpdatePtr config_update_info =\n        std::make_unique<RouteConfigUpdateReceiverImpl>(factory_context_.timeSource());\n    config_update_info->onRdsUpdate(rc, \"1\");\n    return config_update_info;\n  }\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  Init::TargetHandlePtr init_target_handle_;\n  const std::string context_ = \"vhds_test\";\n  absl::node_hash_set<Envoy::Router::RouteConfigProvider*> providers_;\n  Protobuf::util::MessageDifferencer messageDifferencer_;\n  std::string default_vhds_config_;\n  NiceMock<Envoy::Config::MockSubscriptionFactory> subscription_factory_;\n};\n\n// verify that api_type: DELTA_GRPC passes validation\nTEST_F(VhdsTest, VhdsInstantiationShouldSucceedWithDELTA_GRPC) {\n  const auto route_config =\n      TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(default_vhds_config_);\n  RouteConfigUpdatePtr config_update_info = makeRouteConfigUpdate(route_config);\n\n  EXPECT_NO_THROW(VhdsSubscription(config_update_info, factory_context_, context_, providers_));\n}\n\n// verify that api_type: GRPC fails validation\nTEST_F(VhdsTest, VhdsInstantiationShouldFailWithoutDELTA_GRPC) {\n  const auto route_config =\n      TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(R\"EOF(\nname: my_route\nvhds:\n  config_source:\n    api_config_source:\n      api_type: GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n  )EOF\");\n  RouteConfigUpdatePtr config_update_info = makeRouteConfigUpdate(route_config);\n\n  EXPECT_THROW(VhdsSubscription(config_update_info, factory_context_, context_, providers_),\n               EnvoyException);\n}\n\n// verify addition/updating of virtual hosts\nTEST_F(VhdsTest, VhdsAddsVirtualHosts) {\n  const auto route_config =\n      TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(default_vhds_config_);\n  RouteConfigUpdatePtr config_update_info = makeRouteConfigUpdate(route_config);\n\n  VhdsSubscription subscription(config_update_info, factory_context_, context_, providers_);\n  EXPECT_EQ(0UL, config_update_info->routeConfiguration().virtual_hosts_size());\n\n  auto vhost = buildVirtualHost(\"vhost1\", \"vhost.first\");\n  const auto& added_resources = buildAddedResources({vhost});\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::route::v3::VirtualHost>(added_resources);\n  const Protobuf::RepeatedPtrField<std::string> removed_resources;\n  factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources.refvec_, removed_resources, \"1\");\n\n  EXPECT_EQ(1UL, config_update_info->routeConfiguration().virtual_hosts_size());\n  EXPECT_TRUE(\n      messageDifferencer_.Equals(vhost, config_update_info->routeConfiguration().virtual_hosts(0)));\n}\n\n// verify that an RDS update of virtual hosts leaves VHDS virtual hosts intact\nTEST_F(VhdsTest, RdsUpdatesVirtualHosts) {\n  const auto route_config =\n      TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(R\"EOF(\nname: my_route\nvirtual_hosts:\n- name: vhost_rds1\n  domains: [\"vhost.rds.first\"]\n  routes:\n  - match: { prefix: \"/rdsone\" }\n    route: { cluster: my_service }\nvhds:\n  config_source:\n    api_config_source:\n      api_type: DELTA_GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n  )EOF\");\n  const auto updated_route_config =\n      TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(R\"EOF(\nname: my_route\nvirtual_hosts:\n- name: vhost_rds1\n  domains: [\"vhost.rds.first\"]\n  routes:\n  - match: { prefix: \"/rdsone\" }\n    route: { cluster: my_service }\n- name: vhost_rds2\n  domains: [\"vhost.rds.second\"]\n  routes:\n  - match: { prefix: \"/rdstwo\" }\n    route: { cluster: my_other_service }\nvhds:\n  config_source:\n    api_config_source:\n      api_type: DELTA_GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n  )EOF\");\n  RouteConfigUpdatePtr config_update_info = makeRouteConfigUpdate(route_config);\n\n  VhdsSubscription subscription(config_update_info, factory_context_, context_, providers_);\n  EXPECT_EQ(1UL, config_update_info->routeConfiguration().virtual_hosts_size());\n  EXPECT_EQ(\"vhost_rds1\", config_update_info->routeConfiguration().virtual_hosts(0).name());\n\n  auto vhost = buildVirtualHost(\"vhost_vhds1\", \"vhost.first\");\n  const auto& added_resources = buildAddedResources({vhost});\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::route::v3::VirtualHost>(added_resources);\n  const Protobuf::RepeatedPtrField<std::string> removed_resources;\n  factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources.refvec_, removed_resources, \"1\");\n  EXPECT_EQ(2UL, config_update_info->routeConfiguration().virtual_hosts_size());\n\n  config_update_info->onRdsUpdate(updated_route_config, \"2\");\n\n  EXPECT_EQ(3UL, config_update_info->routeConfiguration().virtual_hosts_size());\n  auto actual_vhost_0 = config_update_info->routeConfiguration().virtual_hosts(0);\n  auto actual_vhost_1 = config_update_info->routeConfiguration().virtual_hosts(1);\n  auto actual_vhost_2 = config_update_info->routeConfiguration().virtual_hosts(2);\n  EXPECT_TRUE(\"vhost_rds1\" == actual_vhost_0.name() || \"vhost_rds1\" == actual_vhost_1.name() ||\n              \"vhost_rds1\" == actual_vhost_2.name());\n  EXPECT_TRUE(\"vhost_rds2\" == actual_vhost_0.name() || \"vhost_rds2\" == actual_vhost_1.name() ||\n              \"vhost_rds2\" == actual_vhost_2.name());\n  EXPECT_TRUE(\"vhost_vhds1\" == actual_vhost_0.name() || \"vhost_vhds1\" == actual_vhost_1.name() ||\n              \"vhost_vhds1\" == actual_vhost_2.name());\n}\n\n} // namespace\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/runtime/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nexports_files([\"filesystem_setup.sh\"])\n\nfilegroup(\n    name = \"filesystem_test_data\",\n    srcs = glob([\"test_data/**\"]),\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\n        \"utility.h\",\n    ],\n    deps = [\n        \"//source/common/runtime:runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"runtime_protos_test\",\n    srcs = [\"runtime_protos_test.cc\"],\n    deps = [\n        \"//source/common/runtime:runtime_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"runtime_impl_test\",\n    srcs = [\"runtime_impl_test.cc\"],\n    data = glob([\"test_data/**\"]) + [\"filesystem_setup.sh\"],\n    deps = [\n        \"//source/common/config:runtime_utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/runtime/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"runtime_flag_override_test\",\n    srcs = [\"runtime_flag_override_test.cc\"],\n    args = [\n        \"--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false\",\n        \"--runtime-feature-disable-for-tests=envoy.reloadable_features.test_feature_true\",\n    ],\n    coverage = False,\n    deps = [\n        \"//source/common/runtime:runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"runtime_flag_override_noop_test\",\n    srcs = [\"runtime_flag_override_noop_test.cc\"],\n    args = [\n        \"--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_true\",\n        \"--runtime-feature-disable-for-tests=envoy.reloadable_features.test_feature_false\",\n    ],\n    coverage = False,\n    deps = [\n        \"//source/common/runtime:runtime_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/runtime/filesystem_setup.sh",
    "content": "#!/bin/bash\n\nset -e\n\nTEST_DATA=test/common/runtime/test_data\n\n# Regular runtime tests.\ncd \"${TEST_SRCDIR}/envoy\"\nrm -rf \"${TEST_TMPDIR:?}/${TEST_DATA}\"\nmkdir -p \"${TEST_TMPDIR}/${TEST_DATA}\"\ncp -RfL \"${TEST_DATA}\"/* \"${TEST_TMPDIR}/${TEST_DATA}\"\nchmod -R u+rwX \"${TEST_TMPDIR}/${TEST_DATA}\"\n# Verify text value is treated as a binary blob regardless of source line-ending settings\nprintf \"hello\\nworld\" > \"${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_lf\"\nprintf \"hello\\r\\nworld\" > \"${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_crlf\"\n\n# Deliberate symlink of doom.\nLOOP_PATH=\"${TEST_TMPDIR}/${TEST_DATA}/loop\"\nmkdir -p \"${LOOP_PATH}\"\n\n# the ln in MSYS2 doesn't handle recursive symlinks correctly,\n# so use the cmd built in mklink instead on Windows\nif [[ -z \"${WINDIR}\" ]]; then\n  ln -sf \"${TEST_TMPDIR}/${TEST_DATA}/root\" \"${TEST_TMPDIR}/${TEST_DATA}/current\"\n  ln -sf \"${TEST_TMPDIR}/${TEST_DATA}/root/envoy/subdir\" \"${TEST_TMPDIR}/${TEST_DATA}/root/envoy/badlink\"\n  ln -sf \"${LOOP_PATH}\" \"${LOOP_PATH}\"/loop\nelse\n  # see https://github.com/koalaman/shellcheck/issues/861\n  # shellcheck disable=SC1003\n  win_test_root=\"$(echo \"${TEST_TMPDIR}/${TEST_DATA}\" | tr '/' '\\\\')\"\n  cmd.exe /C \"mklink /D ${win_test_root}\\\\current ${win_test_root}\\\\root\"\n  cmd.exe /C \"mklink /D ${win_test_root}\\\\root\\\\envoy\\\\badlink ${win_test_root}\\\\root\\\\envoy\\\\subdir\"\n  # see https://github.com/koalaman/shellcheck/issues/861\n  # shellcheck disable=SC1003\n  win_loop_path=\"$(echo \"$LOOP_PATH\" | tr '/' '\\\\')\"\n  cmd.exe /C \"mklink /D ${win_loop_path}\\\\loop ${win_loop_path}\"\nfi\n"
  },
  {
    "path": "test/common/runtime/runtime_flag_override_noop_test.cc",
    "content": "#include \"common/runtime/runtime_features.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\n// Features not in runtime_features.cc are false by default (and this particular one is verified to\n// be false in runtime_impl_test.cc). However in in the envoy_cc_test declaration, the flag is set\n// \"--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false\"\n// to override the return value of runtimeFeatureEnabled to true.\nTEST(RuntimeFlagOverrideNoopTest, OverridesNoop) {\n  EXPECT_FALSE(Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_false\"));\n}\n\n// For features in runtime_features.cc that are true by default, this flag\n// \"--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false\" is set in the\n// envoy_cc_test declaration to override the return value of runtimeFeatureEnabled to false.\nTEST(RuntimeFlagOverrideNoopTest, OverrideDisableFeatureNoop) {\n  EXPECT_TRUE(Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_true\"));\n}\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/runtime/runtime_flag_override_test.cc",
    "content": "#include \"common/runtime/runtime_features.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\n// Features not in runtime_features.cc are false by default (and this particular one is verified to\n// be false in runtime_impl_test.cc). However in in the envoy_cc_test declaration, the flag is set\n// \"--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false\"\n// to override the return value of runtimeFeatureEnabled to true.\nTEST(RuntimeFlagOverrideTest, OverridesWork) {\n  EXPECT_TRUE(Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_false\"));\n}\n\n// For features in runtime_features.cc that are true by default, this flag\n// \"--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false\" is set in the\n// envoy_cc_test declaration to override the return value of runtimeFeatureEnabled to false.\nTEST(RuntimeFlagOverrideTest, OverrideDisableFeatureWork) {\n  EXPECT_FALSE(Runtime::runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_true\"));\n}\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/runtime/runtime_impl_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/service/runtime/v3/rtds.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/config/runtime_utility.h\"\n#include \"common/runtime/runtime_features.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::MockFunction;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Runtime {\nnamespace {\n\nclass LoaderImplTest : public testing::Test {\nprotected:\n  LoaderImplTest() : api_(Api::createApiForTest(store_)) { local_info_.node_.set_cluster(\"\"); }\n\n  virtual void setup() {\n    EXPECT_CALL(dispatcher_, createFilesystemWatcher_()).WillRepeatedly(InvokeWithoutArgs([this] {\n      Filesystem::MockWatcher* mock_watcher = new NiceMock<Filesystem::MockWatcher>();\n      EXPECT_CALL(*mock_watcher, addWatch(_, Filesystem::Watcher::Events::MovedTo, _))\n          .WillRepeatedly(\n              Invoke([this](absl::string_view path, uint32_t, Filesystem::Watcher::OnChangedCb cb) {\n                EXPECT_EQ(path, expected_watch_root_);\n                on_changed_cbs_.emplace_back(cb);\n              }));\n      return mock_watcher;\n    }));\n  }\n\n  Event::MockDispatcher dispatcher_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Stats::TestUtil::TestStore store_;\n  Random::MockRandomGenerator generator_;\n  std::unique_ptr<LoaderImpl> loader_;\n  Api::ApiPtr api_;\n  Upstream::MockClusterManager cm_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  std::vector<Filesystem::Watcher::OnChangedCb> on_changed_cbs_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  std::string expected_watch_root_;\n};\n\nclass DiskLoaderImplTest : public LoaderImplTest {\npublic:\n  void SetUp() override {\n    TestEnvironment::exec(\n        {TestEnvironment::runfilesPath(\"test/common/runtime/filesystem_setup.sh\")});\n  }\n\n  void TearDown() override {\n    TestEnvironment::removePath(TestEnvironment::temporaryPath(\"test/common/runtime/test_data\"));\n  }\n\n  void run(const std::string& primary_dir, const std::string& override_dir) {\n    envoy::config::bootstrap::v3::Runtime runtime;\n    runtime.mutable_base()->MergeFrom(base_);\n    expected_watch_root_ = TestEnvironment::temporaryPath(primary_dir);\n    runtime.set_symlink_root(expected_watch_root_);\n    runtime.set_subdirectory(\"envoy\");\n    runtime.set_override_subdirectory(override_dir);\n\n    envoy::config::bootstrap::v3::LayeredRuntime layered_runtime;\n    Config::translateRuntime(runtime, layered_runtime);\n    loader_ = std::make_unique<LoaderImpl>(dispatcher_, tls_, layered_runtime, local_info_, store_,\n                                           generator_, validation_visitor_, *api_);\n  }\n\n  void write(const std::string& path, const std::string& value) {\n    TestEnvironment::writeStringToFileForTest(path, value);\n  }\n\n  void updateDiskLayer(uint32_t layer) {\n    ASSERT_LT(layer, on_changed_cbs_.size());\n    on_changed_cbs_[layer](Filesystem::Watcher::Events::MovedTo);\n  }\n\n  ProtobufWkt::Struct base_;\n};\n\nTEST_F(DiskLoaderImplTest, EmptyKeyTest) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n\n  EXPECT_FALSE(loader_->snapshot().get(\"\").has_value());\n  EXPECT_EQ(11, loader_->snapshot().getInteger(\"\", 11));\n  EXPECT_EQ(1.1, loader_->snapshot().getDouble(\"\", 1.1));\n  EXPECT_EQ(false, loader_->snapshot().featureEnabled(\"\", 0));\n  EXPECT_EQ(true, loader_->snapshot().featureEnabled(\"\", 100));\n  EXPECT_EQ(true, loader_->snapshot().getBoolean(\"\", true));\n  EXPECT_EQ(false, loader_->snapshot().getBoolean(\"\", false));\n}\n\nTEST_F(DiskLoaderImplTest, DoubleUintInteraction) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n\n  EXPECT_EQ(2UL, loader_->snapshot().getInteger(\"file3\", 1));\n  EXPECT_EQ(2.0, loader_->snapshot().getDouble(\"file3\", 1.1));\n}\n\nTEST_F(DiskLoaderImplTest, DoubleUintInteractionNegatives) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n\n  EXPECT_EQ(1, loader_->snapshot().getInteger(\"file_with_negative_double\", 1));\n  EXPECT_EQ(-4.2, loader_->snapshot().getDouble(\"file_with_negative_double\", 1.1));\n}\n\nTEST_F(DiskLoaderImplTest, All) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n\n  // Basic string getting.\n  EXPECT_EQ(\"world\", loader_->snapshot().get(\"file2\").value().get());\n  EXPECT_EQ(\"hello\", loader_->snapshot().get(\"subdir.file\").value().get());\n  EXPECT_EQ(\"hello\\nworld\", loader_->snapshot().get(\"file_lf\").value().get());\n  EXPECT_EQ(\"hello\\r\\nworld\", loader_->snapshot().get(\"file_crlf\").value().get());\n  EXPECT_FALSE(loader_->snapshot().get(\"invalid\").has_value());\n\n  // Existence checking.\n  EXPECT_EQ(true, loader_->snapshot().get(\"file2\").has_value());\n  EXPECT_EQ(true, loader_->snapshot().get(\"subdir.file\").has_value());\n  EXPECT_EQ(false, loader_->snapshot().get(\"invalid\").has_value());\n\n  // Integer getting.\n  EXPECT_EQ(1UL, loader_->snapshot().getInteger(\"file1\", 1));\n  EXPECT_EQ(2UL, loader_->snapshot().getInteger(\"file3\", 1));\n  EXPECT_EQ(123UL, loader_->snapshot().getInteger(\"file4\", 1));\n\n  // Double getting.\n  // Bogus string, expect default.\n  EXPECT_EQ(42.1, loader_->snapshot().getDouble(\"file_with_words\", 42.1));\n  // Valid float string.\n  EXPECT_EQ(23.2, loader_->snapshot().getDouble(\"file_with_double\", 1.1));\n  // Valid float string followed by newlines.\n  EXPECT_EQ(3.141, loader_->snapshot().getDouble(\"file_with_double_newlines\", 1.1));\n\n  const auto snapshot = reinterpret_cast<const SnapshotImpl*>(&loader_->snapshot());\n\n  // Validate that the layer name is set properly for static layers.\n  EXPECT_EQ(\"base\", snapshot->getLayers()[0]->name());\n  EXPECT_EQ(\"root\", snapshot->getLayers()[1]->name());\n  EXPECT_EQ(\"override\", snapshot->getLayers()[2]->name());\n  EXPECT_EQ(\"admin\", snapshot->getLayers()[3]->name());\n\n  // Boolean getting.\n  // Lower-case boolean specification.\n  EXPECT_EQ(true, snapshot->getBoolean(\"file11\", false));\n  EXPECT_EQ(true, snapshot->getBoolean(\"file11\", true));\n  // Mixed-case boolean specification.\n  EXPECT_EQ(false, snapshot->getBoolean(\"file12\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"file12\", false));\n  // Lower-case boolean specification with leading whitespace.\n  EXPECT_EQ(true, snapshot->getBoolean(\"file13\", true));\n  EXPECT_EQ(true, snapshot->getBoolean(\"file13\", false));\n  // File1 is not a boolean. Should take default.\n  EXPECT_EQ(true, snapshot->getBoolean(\"file1\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"file1\", false));\n\n  // Feature defaults.\n  // test_feature_true is explicitly set true in runtime_features.cc\n  EXPECT_EQ(true, snapshot->runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_true\"));\n  // test_feature_false is not in runtime_features.cc and so is false by default.\n  EXPECT_EQ(false, snapshot->runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_false\"));\n\n  // Deprecation\n  EXPECT_EQ(false, snapshot->deprecatedFeatureEnabled(\n                       \"envoy.deprecated_features.deprecated.proto:is_deprecated_fatal\", false));\n\n  // Feature defaults via helper function.\n  EXPECT_EQ(false, runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_false\"));\n  EXPECT_EQ(true, runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_true\"));\n\n  // Files with comments.\n  EXPECT_EQ(123UL, loader_->snapshot().getInteger(\"file5\", 1));\n  EXPECT_EQ(2.718, loader_->snapshot().getDouble(\"file_with_double_comment\", 1.1));\n  EXPECT_EQ(\"/home#about-us\", loader_->snapshot().get(\"file6\").value().get());\n  EXPECT_EQ(\"\", loader_->snapshot().get(\"file7\").value().get());\n\n  // Feature enablement.\n  EXPECT_CALL(generator_, random()).WillOnce(Return(1));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file3\", 1));\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(2));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file3\", 1));\n\n  // Fractional percent feature enablement\n  envoy::type::v3::FractionalPercent fractional_percent;\n  fractional_percent.set_numerator(5);\n  fractional_percent.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(50));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file8\", fractional_percent)); // valid data\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(60));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file8\", fractional_percent)); // valid data\n\n  // We currently expect that runtime values represented as fractional percents that are provided as\n  // integers are parsed simply as percents (denominator of 100).\n  EXPECT_CALL(generator_, random()).WillOnce(Return(53));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file10\", fractional_percent)); // valid int data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(51));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file10\", fractional_percent)); // valid int data\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(4));\n  EXPECT_TRUE(\n      loader_->snapshot().featureEnabled(\"file9\", fractional_percent)); // invalid proto data\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(6));\n  EXPECT_FALSE(\n      loader_->snapshot().featureEnabled(\"file9\", fractional_percent)); // invalid proto data\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(4));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file1\", fractional_percent)); // invalid data\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(6));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file1\", fractional_percent)); // invalid data\n\n  // Check stable value\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file3\", 1, 1));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file3\", 1, 3));\n\n  // Check stable value and num buckets.\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file4\", 1, 200, 300));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file4\", 1, 122, 300));\n\n  // Overrides from override dir\n  EXPECT_EQ(\"hello override\", loader_->snapshot().get(\"file1\").value().get());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(25, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(4, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\nTEST_F(DiskLoaderImplTest, UintLargeIntegerConversion) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n\n  EXPECT_EQ(1, loader_->snapshot().getInteger(\"file_with_large_integer\", 1));\n}\n\nTEST_F(DiskLoaderImplTest, GetLayers) {\n  base_ = TestUtility::parseYaml<ProtobufWkt::Struct>(R\"EOF(\n    foo: whatevs\n  )EOF\");\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n  const auto& layers = loader_->snapshot().getLayers();\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(4, layers.size());\n  EXPECT_EQ(4, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(\"whatevs\", layers[0]->values().find(\"foo\")->second.raw_string_value_);\n  EXPECT_EQ(\"hello\", layers[1]->values().find(\"file1\")->second.raw_string_value_);\n  EXPECT_EQ(\"hello override\", layers[2]->values().find(\"file1\")->second.raw_string_value_);\n  // Admin should be last\n  EXPECT_NE(nullptr, dynamic_cast<const AdminLayer*>(layers.back().get()));\n  EXPECT_TRUE(layers[3]->values().empty());\n\n  loader_->mergeValues({{\"foo\", \"bar\"}});\n  // The old snapshot and its layers should have been invalidated. Refetch.\n  const auto& new_layers = loader_->snapshot().getLayers();\n  EXPECT_EQ(\"bar\", new_layers[3]->values().find(\"foo\")->second.raw_string_value_);\n  EXPECT_EQ(2, store_.counter(\"runtime.load_success\").value());\n}\n\nTEST_F(DiskLoaderImplTest, BadDirectory) {\n  setup();\n  run(\"/baddir\", \"/baddir\");\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(0, store_.counter(\"runtime.override_dir_exists\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.override_dir_not_exists\").value());\n}\n\n// Validate that an error in a layer will results in appropriate stats tracking.\nTEST_F(DiskLoaderImplTest, DiskLayerFailure) {\n  setup();\n  // Symlink loopy configuration will result in an error.\n  run(\"test/common/runtime/test_data\", \"loop\");\n  EXPECT_EQ(1, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(0, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(0, store_.counter(\"runtime.override_dir_exists\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.override_dir_not_exists\").value());\n}\n\nTEST_F(DiskLoaderImplTest, OverrideFolderDoesNotExist) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override_does_not_exist\");\n\n  EXPECT_EQ(\"hello\", loader_->snapshot().get(\"file1\").value().get());\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(0, store_.counter(\"runtime.override_dir_exists\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.override_dir_not_exists\").value());\n}\n\nTEST_F(DiskLoaderImplTest, PercentHandling) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n\n  envoy::type::v3::FractionalPercent default_value;\n\n  // Smoke test integer value of 0, should be interpreted as 0%\n  {\n    loader_->mergeValues({{\"foo\", \"0\"}});\n\n    EXPECT_FALSE(loader_->snapshot().featureEnabled(\"foo\", default_value, 0));\n    EXPECT_FALSE(loader_->snapshot().featureEnabled(\"foo\", default_value, 5));\n  }\n\n  // Smoke test integer value of 5, should be interpreted as 5%\n  {\n    loader_->mergeValues({{\"foo\", \"5\"}});\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 0));\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 4));\n    EXPECT_FALSE(loader_->snapshot().featureEnabled(\"foo\", default_value, 5));\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 100));\n  }\n\n  // Verify uint64 -> uint32 conversion by using a runtime value with all 0s in\n  // the bottom 32 bits. If it were to be naively treated as a uint32_t then it\n  // would appear as 0%, but it should be 100% because we assume the\n  // denominator is 100\n  {\n    // NOTE: high_value has to have the property that the lowest 32 bits % 100\n    // is less than 100. If it's greater than 100 the test will pass whether or\n    // not the uint32 conversion is handled properly.\n    uint64_t high_value = 1ULL << 60;\n    std::string high_value_str = std::to_string(high_value);\n    loader_->mergeValues({{\"foo\", high_value_str}});\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 0));\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 50));\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 100));\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 12389));\n    EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", default_value, 23859235));\n  }\n}\n\nvoid testNewOverrides(Loader& loader, Stats::TestUtil::TestStore& store) {\n  Stats::Gauge& admin_overrides_active =\n      store.gauge(\"runtime.admin_overrides_active\", Stats::Gauge::ImportMode::NeverImport);\n\n  // New string.\n  loader.mergeValues({{\"foo\", \"bar\"}});\n  EXPECT_EQ(\"bar\", loader.snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(1, admin_overrides_active.value());\n\n  // Remove new string.\n  loader.mergeValues({{\"foo\", \"\"}});\n  EXPECT_FALSE(loader.snapshot().get(\"foo\").has_value());\n  EXPECT_EQ(0, admin_overrides_active.value());\n\n  // New integer.\n  loader.mergeValues({{\"baz\", \"42\"}});\n  EXPECT_EQ(42, loader.snapshot().getInteger(\"baz\", 0));\n  EXPECT_EQ(1, admin_overrides_active.value());\n\n  // Remove new integer.\n  loader.mergeValues({{\"baz\", \"\"}});\n  EXPECT_EQ(0, loader.snapshot().getInteger(\"baz\", 0));\n  EXPECT_EQ(0, admin_overrides_active.value());\n\n  // New double.\n  loader.mergeValues({{\"beep\", \"42.1\"}});\n  EXPECT_EQ(42.1, loader.snapshot().getDouble(\"beep\", 1.2));\n  EXPECT_EQ(1, admin_overrides_active.value());\n\n  // Remove new double.\n  loader.mergeValues({{\"beep\", \"\"}});\n  EXPECT_EQ(1.2, loader.snapshot().getDouble(\"beep\", 1.2));\n  EXPECT_EQ(0, admin_overrides_active.value());\n}\n\nTEST_F(DiskLoaderImplTest, MergeValues) {\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n  testNewOverrides(*loader_, store_);\n  Stats::Gauge& admin_overrides_active =\n      store_.gauge(\"runtime.admin_overrides_active\", Stats::Gauge::ImportMode::NeverImport);\n\n  // Override string\n  loader_->mergeValues({{\"file2\", \"new world\"}});\n  EXPECT_EQ(\"new world\", loader_->snapshot().get(\"file2\").value().get());\n  EXPECT_EQ(1, admin_overrides_active.value());\n\n  // Remove overridden string\n  loader_->mergeValues({{\"file2\", \"\"}});\n  EXPECT_EQ(\"world\", loader_->snapshot().get(\"file2\").value().get());\n  EXPECT_EQ(0, admin_overrides_active.value());\n\n  // Override integer\n  loader_->mergeValues({{\"file3\", \"42\"}});\n  EXPECT_EQ(42, loader_->snapshot().getInteger(\"file3\", 1));\n  EXPECT_EQ(1, admin_overrides_active.value());\n\n  // Remove overridden integer\n  loader_->mergeValues({{\"file3\", \"\"}});\n  EXPECT_EQ(2, loader_->snapshot().getInteger(\"file3\", 1));\n  EXPECT_EQ(0, admin_overrides_active.value());\n\n  // Override double\n  loader_->mergeValues({{\"file_with_double\", \"42.1\"}});\n  EXPECT_EQ(42.1, loader_->snapshot().getDouble(\"file_with_double\", 1.1));\n  EXPECT_EQ(1, admin_overrides_active.value());\n\n  // Remove overridden double\n  loader_->mergeValues({{\"file_with_double\", \"\"}});\n  EXPECT_EQ(23.2, loader_->snapshot().getDouble(\"file_with_double\", 1.1));\n  EXPECT_EQ(0, admin_overrides_active.value());\n\n  // Override override string\n  loader_->mergeValues({{\"file1\", \"hello overridden override\"}});\n  EXPECT_EQ(\"hello overridden override\", loader_->snapshot().get(\"file1\").value().get());\n  EXPECT_EQ(1, admin_overrides_active.value());\n\n  // Remove overridden override string\n  loader_->mergeValues({{\"file1\", \"\"}});\n  EXPECT_EQ(\"hello override\", loader_->snapshot().get(\"file1\").value().get());\n  EXPECT_EQ(0, admin_overrides_active.value());\n  EXPECT_EQ(0, store_.gauge(\"runtime.admin_overrides_active\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n\n  EXPECT_EQ(15, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(4, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\n// Validate that admin overrides disk, disk overrides bootstrap.\nTEST_F(DiskLoaderImplTest, LayersOverride) {\n  base_ = TestUtility::parseYaml<ProtobufWkt::Struct>(R\"EOF(\n    some: thing\n    other: thang\n    file2: whatevs\n  )EOF\");\n  setup();\n  run(\"test/common/runtime/test_data/current\", \"envoy_override\");\n  // Disk overrides bootstrap.\n  EXPECT_EQ(\"world\", loader_->snapshot().get(\"file2\").value().get());\n  EXPECT_EQ(\"thing\", loader_->snapshot().get(\"some\").value().get());\n  EXPECT_EQ(\"thang\", loader_->snapshot().get(\"other\").value().get());\n  // Admin overrides disk and bootstrap.\n  loader_->mergeValues({{\"file2\", \"pluto\"}, {\"some\", \"day soon\"}});\n  EXPECT_EQ(\"pluto\", loader_->snapshot().get(\"file2\").value().get());\n  EXPECT_EQ(\"day soon\", loader_->snapshot().get(\"some\").value().get());\n  EXPECT_EQ(\"thang\", loader_->snapshot().get(\"other\").value().get());\n  // Admin overrides stick over filesystem updates.\n  EXPECT_EQ(\"Layer cake\", loader_->snapshot().get(\"file14\").value().get());\n  EXPECT_EQ(\"Cheese cake\", loader_->snapshot().get(\"file15\").value().get());\n  loader_->mergeValues({{\"file14\", \"Mega layer cake\"}});\n  EXPECT_EQ(\"Mega layer cake\", loader_->snapshot().get(\"file14\").value().get());\n  EXPECT_EQ(\"Cheese cake\", loader_->snapshot().get(\"file15\").value().get());\n  write(\"test/common/runtime/test_data/current/envoy/file14\", \"Sad cake\");\n  write(\"test/common/runtime/test_data/current/envoy/file15\", \"Happy cake\");\n  updateDiskLayer(0);\n  EXPECT_EQ(\"Mega layer cake\", loader_->snapshot().get(\"file14\").value().get());\n  EXPECT_EQ(\"Happy cake\", loader_->snapshot().get(\"file15\").value().get());\n}\n\n// Validate that multiple admin layers leads to a configuration load failure.\nTEST_F(DiskLoaderImplTest, MultipleAdminLayersFail) {\n  setup();\n  envoy::config::bootstrap::v3::LayeredRuntime layered_runtime;\n  {\n    auto* layer = layered_runtime.add_layers();\n    layer->set_name(\"admin_0\");\n    layer->mutable_admin_layer();\n  }\n  {\n    auto* layer = layered_runtime.add_layers();\n    layer->set_name(\"admin_1\");\n    layer->mutable_admin_layer();\n  }\n  EXPECT_THROW_WITH_MESSAGE(\n      std::make_unique<LoaderImpl>(dispatcher_, tls_, layered_runtime, local_info_, store_,\n                                   generator_, validation_visitor_, *api_),\n      EnvoyException,\n      \"Too many admin layers specified in LayeredRuntime, at most one may be specified\");\n}\n\nclass StaticLoaderImplTest : public LoaderImplTest {\nprotected:\n  void setup() override {\n    LoaderImplTest::setup();\n    envoy::config::bootstrap::v3::LayeredRuntime layered_runtime;\n    {\n      auto* layer = layered_runtime.add_layers();\n      layer->set_name(\"base\");\n      layer->mutable_static_layer()->MergeFrom(base_);\n    }\n    {\n      auto* layer = layered_runtime.add_layers();\n      layer->set_name(\"admin\");\n      layer->mutable_admin_layer();\n    }\n    loader_ = std::make_unique<LoaderImpl>(dispatcher_, tls_, layered_runtime, local_info_, store_,\n                                           generator_, validation_visitor_, *api_);\n  }\n\n  ProtobufWkt::Struct base_;\n};\n\nTEST_F(StaticLoaderImplTest, All) {\n  setup();\n  EXPECT_FALSE(loader_->snapshot().get(\"foo\").has_value());\n  EXPECT_EQ(1UL, loader_->snapshot().getInteger(\"foo\", 1));\n  EXPECT_EQ(1.1, loader_->snapshot().getDouble(\"foo\", 1.1));\n  EXPECT_CALL(generator_, random()).WillOnce(Return(49));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"foo\", 50));\n  testNewOverrides(*loader_, store_);\n}\n\n// Validate proto parsing sanity.\nTEST_F(StaticLoaderImplTest, ProtoParsing) {\n  base_ = TestUtility::parseYaml<ProtobufWkt::Struct>(R\"EOF(\n    file1: hello override\n    file2: world\n    file3: 2\n    file4: 123\n    file8:\n      numerator: 52\n      denominator: HUNDRED\n    file9:\n      numerator: 100\n      denominator: NONSENSE\n    file10: 52\n    file11: true\n    file12: FaLSe\n    file13: false\n    subdir:\n      file: \"hello\"\n    numerator_only:\n      numerator: 52\n    denominator_only:\n      denominator: HUNDRED\n    false_friend:\n      numerator: 100\n      foo: bar\n    empty: {}\n    file_with_words: \"some words\"\n    file_with_double: 23.2\n    file_lf: \"hello\\nworld\"\n    file_crlf: \"hello\\r\\nworld\"\n    bool_as_int0: 0\n    bool_as_int1: 1\n  )EOF\");\n  setup();\n\n  // Basic string getting.\n  EXPECT_EQ(\"world\", loader_->snapshot().get(\"file2\").value().get());\n  EXPECT_EQ(\"hello\", loader_->snapshot().get(\"subdir.file\").value().get());\n  EXPECT_EQ(\"hello\\nworld\", loader_->snapshot().get(\"file_lf\").value().get());\n  EXPECT_EQ(\"hello\\r\\nworld\", loader_->snapshot().get(\"file_crlf\").value().get());\n  EXPECT_FALSE(loader_->snapshot().get(\"invalid\").has_value());\n\n  // Integer getting.\n  EXPECT_EQ(1UL, loader_->snapshot().getInteger(\"file1\", 1));\n  EXPECT_EQ(2UL, loader_->snapshot().getInteger(\"file3\", 1));\n  EXPECT_EQ(123UL, loader_->snapshot().getInteger(\"file4\", 1));\n\n  // Double getting.\n  EXPECT_EQ(1.1, loader_->snapshot().getDouble(\"file_with_words\", 1.1));\n  EXPECT_EQ(23.2, loader_->snapshot().getDouble(\"file_with_double\", 1.1));\n  EXPECT_EQ(2.0, loader_->snapshot().getDouble(\"file3\", 3.3));\n\n  // Boolean getting.\n  const auto snapshot = reinterpret_cast<const SnapshotImpl*>(&loader_->snapshot());\n\n  EXPECT_EQ(true, snapshot->getBoolean(\"file11\", true));\n  EXPECT_EQ(true, snapshot->getBoolean(\"file11\", false));\n\n  EXPECT_EQ(false, snapshot->getBoolean(\"file12\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"file12\", false));\n\n  EXPECT_EQ(false, snapshot->getBoolean(\"file13\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"file13\", false));\n\n  EXPECT_EQ(0, snapshot->getInteger(\"bool_as_int0\", 333));\n  EXPECT_EQ(1, snapshot->getInteger(\"bool_as_int1\", 333));\n\n  EXPECT_EQ(false, snapshot->getBoolean(\"bool_as_int0\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"bool_as_int0\", false));\n  EXPECT_EQ(true, snapshot->getBoolean(\"bool_as_int1\", false));\n  EXPECT_EQ(true, snapshot->getBoolean(\"bool_as_int1\", true));\n  EXPECT_EQ(true, snapshot->getBoolean(\"file11\", false));\n  EXPECT_EQ(true, snapshot->getBoolean(\"file11\", true));\n\n  // Test that a double value is not parsed as a boolean even though integers are fine.\n  EXPECT_EQ(true, snapshot->getBoolean(\"file_with_double\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"file_with_double\", false));\n\n  // Not a boolean. Expect the default.\n  EXPECT_EQ(true, snapshot->getBoolean(\"file1\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"file1\", false));\n  EXPECT_EQ(true, snapshot->getBoolean(\"blah.blah\", true));\n  EXPECT_EQ(false, snapshot->getBoolean(\"blah.blah\", false));\n\n  // Fractional percent feature enablement\n  envoy::type::v3::FractionalPercent fractional_percent;\n  fractional_percent.set_numerator(5);\n  fractional_percent.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n\n  EXPECT_CALL(generator_, random()).WillOnce(Return(50));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file8\", fractional_percent)); // valid data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(60));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file8\", fractional_percent)); // valid data\n\n  // We currently expect that runtime values represented as fractional percents that are provided as\n  // integers are parsed simply as percents (denominator of 100).\n  EXPECT_CALL(generator_, random()).WillOnce(Return(53));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"file10\", fractional_percent)); // valid int data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(51));\n  EXPECT_TRUE(loader_->snapshot().featureEnabled(\"file10\", fractional_percent)); // valid int data\n\n  // Invalid fractional percent is ignored.\n  EXPECT_CALL(generator_, random()).WillOnce(Return(4));\n  EXPECT_TRUE(\n      loader_->snapshot().featureEnabled(\"file9\", fractional_percent)); // invalid proto data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(6));\n  EXPECT_FALSE(\n      loader_->snapshot().featureEnabled(\"file9\", fractional_percent)); // invalid proto data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(4));\n  EXPECT_TRUE(\n      loader_->snapshot().featureEnabled(\"false_friend\", fractional_percent)); // invalid proto data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(6));\n  EXPECT_FALSE(\n      loader_->snapshot().featureEnabled(\"false_friend\", fractional_percent)); // invalid proto data\n\n  // Numerator only FractionalPercent is handled.\n  EXPECT_CALL(generator_, random()).WillOnce(Return(50));\n  EXPECT_TRUE(\n      loader_->snapshot().featureEnabled(\"numerator_only\", fractional_percent)); // valid data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(60));\n  EXPECT_FALSE(\n      loader_->snapshot().featureEnabled(\"numerator_only\", fractional_percent)); // valid data\n\n  // Denominator only FractionalPercent is handled.\n  EXPECT_CALL(generator_, random()).WillOnce(Return(4));\n  EXPECT_FALSE(\n      loader_->snapshot().featureEnabled(\"denominator_only\", fractional_percent)); // valid data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(6));\n  EXPECT_FALSE(\n      loader_->snapshot().featureEnabled(\"denominator_only\", fractional_percent)); // valid data\n\n  // Empty message is handled.\n  EXPECT_CALL(generator_, random()).WillOnce(Return(4));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"empty\", fractional_percent)); // valid data\n  EXPECT_CALL(generator_, random()).WillOnce(Return(6));\n  EXPECT_FALSE(loader_->snapshot().featureEnabled(\"empty\", fractional_percent)); // valid data\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(21, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\nTEST_F(StaticLoaderImplTest, InvalidNumerator) {\n  base_ = TestUtility::parseYaml<ProtobufWkt::Struct>(R\"EOF(\n    invalid_numerator:\n      numerator: 111\n      denominator: HUNDRED\n  )EOF\");\n  setup();\n\n  envoy::type::v3::FractionalPercent fractional_percent;\n\n  // There is no assertion here - when numerator is invalid\n  // featureEnabled() will just drop debug log line.\n  EXPECT_CALL(generator_, random()).WillOnce(Return(500000));\n  EXPECT_LOG_CONTAINS(\"debug\",\n                      \"runtime key 'invalid_numerator': numerator (111) > denominator (100), \"\n                      \"condition always evaluates to true\",\n                      loader_->snapshot().featureEnabled(\"invalid_numerator\", fractional_percent));\n}\n\nTEST_F(StaticLoaderImplTest, RuntimeFromNonWorkerThreads) {\n  // Force the thread to be considered a non-worker thread.\n  tls_.registered_ = false;\n  setup();\n\n  // Set up foo -> bar\n  loader_->mergeValues({{\"foo\", \"bar\"}});\n  EXPECT_EQ(\"bar\", loader_->threadsafeSnapshot()->get(\"foo\").value().get());\n  const Snapshot* original_snapshot_pointer = loader_->threadsafeSnapshot().get();\n\n  // Now set up a test thread which verifies foo -> bar\n  //\n  // Then change foo and make sure the test thread picks up the change.\n  bool read_bar = false;\n  bool updated_eep = false;\n  Thread::MutexBasicLockable mutex;\n  Thread::CondVar foo_read;\n  Thread::CondVar foo_changed;\n  const Snapshot* original_thread_snapshot_pointer = nullptr;\n  auto thread = Thread::threadFactoryForTest().createThread([&]() {\n    {\n      Thread::LockGuard lock(mutex);\n      EXPECT_EQ(\"bar\", loader_->threadsafeSnapshot()->get(\"foo\").value().get());\n      read_bar = true;\n      original_thread_snapshot_pointer = loader_->threadsafeSnapshot().get();\n      EXPECT_EQ(original_thread_snapshot_pointer, loader_->threadsafeSnapshot().get());\n      foo_read.notifyOne();\n    }\n\n    {\n      Thread::LockGuard lock(mutex);\n      if (!updated_eep) {\n        foo_changed.wait(mutex);\n      }\n      EXPECT_EQ(\"eep\", loader_->threadsafeSnapshot()->get(\"foo\").value().get());\n    }\n  });\n\n  {\n    Thread::LockGuard lock(mutex);\n    if (!read_bar) {\n      foo_read.wait(mutex);\n    }\n    loader_->mergeValues({{\"foo\", \"eep\"}});\n    updated_eep = true;\n  }\n\n  {\n    Thread::LockGuard lock(mutex);\n    foo_changed.notifyOne();\n    EXPECT_EQ(\"eep\", loader_->threadsafeSnapshot()->get(\"foo\").value().get());\n  }\n\n  thread->join();\n  EXPECT_EQ(original_thread_snapshot_pointer, original_snapshot_pointer);\n}\n\nclass DiskLayerTest : public testing::Test {\nprotected:\n  DiskLayerTest() : api_(Api::createApiForTest()) {}\n\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    TestEnvironment::exec(\n        {TestEnvironment::runfilesPath(\"test/common/runtime/filesystem_setup.sh\")});\n  }\n\n  static void TearDownTestSuite() {\n    TestEnvironment::removePath(TestEnvironment::temporaryPath(\"test/common/runtime/test_data\"));\n  }\n\n  Api::ApiPtr api_;\n};\n\nTEST_F(DiskLayerTest, IllegalPath) {\n#ifdef WIN32\n  EXPECT_THROW_WITH_MESSAGE(DiskLayer(\"test\", R\"EOF(\\\\.\\)EOF\", *api_), EnvoyException,\n                            R\"EOF(Invalid path: \\\\.\\)EOF\");\n#else\n  EXPECT_THROW_WITH_MESSAGE(DiskLayer(\"test\", \"/dev\", *api_), EnvoyException, \"Invalid path: /dev\");\n#endif\n}\n\n// Validate that we catch recursion that goes too deep in the runtime filesystem\n// walk.\nTEST_F(DiskLayerTest, Loop) {\n  EXPECT_THROW_WITH_MESSAGE(\n      DiskLayer(\"test\", TestEnvironment::temporaryPath(\"test/common/runtime/test_data/loop\"),\n                *api_),\n      EnvoyException, \"Walk recursion depth exceeded 16\");\n}\n\nTEST(NoRuntime, FeatureEnabled) {\n  // Make sure the registry is not set up.\n  ASSERT_TRUE(Runtime::LoaderSingleton::getExisting() == nullptr);\n\n  // Feature defaults should still work.\n  EXPECT_EQ(false, runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_false\"));\n  EXPECT_EQ(true, runtimeFeatureEnabled(\"envoy.reloadable_features.test_feature_true\"));\n}\n\nTEST(NoRuntime, DefaultIntValues) {\n  // Make sure the registry is not set up.\n  ASSERT_TRUE(Runtime::LoaderSingleton::getExisting() == nullptr);\n\n  // Feature defaults should still work.\n  EXPECT_EQ(0x1230000ABCDULL,\n            getInteger(\"envoy.reloadable_features.test_int_feature_default\", 0x1230000ABCDULL));\n  EXPECT_EQ(0, getInteger(\"envoy.reloadable_features.test_int_feature_zero\", 0));\n}\n\n// Test RTDS layer(s).\nclass RtdsLoaderImplTest : public LoaderImplTest {\npublic:\n  void setup() override {\n    LoaderImplTest::setup();\n\n    envoy::config::bootstrap::v3::LayeredRuntime config;\n    *config.add_layers()->mutable_static_layer() =\n        TestUtility::parseYaml<ProtobufWkt::Struct>(R\"EOF(\n    foo: whatevs\n    bar: yar\n  )EOF\");\n    for (const auto& layer_resource_name : layers_) {\n      auto* layer = config.add_layers();\n      layer->set_name(layer_resource_name);\n      auto* rtds_layer = layer->mutable_rtds_layer();\n      rtds_layer->set_name(layer_resource_name);\n      rtds_layer->mutable_rtds_config();\n    }\n    EXPECT_CALL(cm_, subscriptionFactory()).Times(layers_.size());\n    ON_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _))\n        .WillByDefault(\n            testing::Invoke([this](const envoy::config::core::v3::ConfigSource&, absl::string_view,\n                                   Stats::Scope&, Config::SubscriptionCallbacks& callbacks,\n                                   Config::OpaqueResourceDecoder&) -> Config::SubscriptionPtr {\n              auto ret = std::make_unique<testing::NiceMock<Config::MockSubscription>>();\n              rtds_subscriptions_.push_back(ret.get());\n              rtds_callbacks_.push_back(&callbacks);\n              return ret;\n            }));\n    loader_ = std::make_unique<LoaderImpl>(dispatcher_, tls_, config, local_info_, store_,\n                                           generator_, validation_visitor_, *api_);\n    loader_->initialize(cm_);\n    for (auto* sub : rtds_subscriptions_) {\n      EXPECT_CALL(*sub, start(_, _));\n    }\n\n    loader_->startRtdsSubscriptions(rtds_init_callback_.AsStdFunction());\n\n    // Validate that the layer name is set properly for dynamic layers.\n    EXPECT_EQ(layers_[0], loader_->snapshot().getLayers()[1]->name());\n\n    EXPECT_EQ(\"whatevs\", loader_->snapshot().get(\"foo\").value().get());\n    EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n    EXPECT_FALSE(loader_->snapshot().get(\"baz\").has_value());\n\n    EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n    EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n    EXPECT_EQ(2, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n    EXPECT_EQ(1 + layers_.size(),\n              store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n  }\n\n  void addLayer(absl::string_view name) { layers_.emplace_back(name); }\n\n  void doOnConfigUpdateVerifyNoThrow(const envoy::service::runtime::v3::Runtime& runtime,\n                                     uint32_t callback_index = 0) {\n    const auto decoded_resources = TestUtility::decodeResources({runtime});\n    VERBOSE_EXPECT_NO_THROW(\n        rtds_callbacks_[callback_index]->onConfigUpdate(decoded_resources.refvec_, \"\"));\n  }\n\n  void doDeltaOnConfigUpdateVerifyNoThrow(const envoy::service::runtime::v3::Runtime& runtime) {\n    const auto decoded_resources = TestUtility::decodeResources({runtime});\n    VERBOSE_EXPECT_NO_THROW(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, {}, \"\"));\n  }\n\n  std::vector<std::string> layers_{\"some_resource\"};\n  std::vector<Config::SubscriptionCallbacks*> rtds_callbacks_;\n  std::vector<Config::MockSubscription*> rtds_subscriptions_;\n  MockFunction<void()> rtds_init_callback_;\n};\n\n// Empty resource lists are rejected.\nTEST_F(RtdsLoaderImplTest, UnexpectedSizeEmpty) {\n  setup();\n\n  EXPECT_CALL(rtds_init_callback_, Call());\n  EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate({}, \"\"), EnvoyException,\n                            \"Unexpected RTDS resource length: 0\");\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\n// > 1 length lists are rejected.\nTEST_F(RtdsLoaderImplTest, UnexpectedSizeTooMany) {\n  setup();\n\n  const envoy::service::runtime::v3::Runtime runtime;\n  const auto decoded_resources = TestUtility::decodeResources({runtime, runtime});\n\n  EXPECT_CALL(rtds_init_callback_, Call());\n  EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, \"\"),\n                            EnvoyException, \"Unexpected RTDS resource length: 2\");\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\n// Validate behavior when the config fails delivery at the subscription level.\nTEST_F(RtdsLoaderImplTest, FailureSubscription) {\n  setup();\n\n  EXPECT_CALL(rtds_init_callback_, Call());\n  // onConfigUpdateFailed() should not be called for gRPC stream connection failure\n  rtds_callbacks_[0]->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout,\n                                           {});\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\n// Unexpected runtime resource name.\nTEST_F(RtdsLoaderImplTest, WrongResourceName) {\n  setup();\n\n  auto runtime = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: other_resource\n    layer:\n      foo: bar\n      baz: meh\n  )EOF\");\n  const auto decoded_resources = TestUtility::decodeResources({runtime});\n  EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, \"\"),\n                            EnvoyException,\n                            \"Unexpected RTDS runtime (expecting some_resource): other_resource\");\n\n  EXPECT_EQ(\"whatevs\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_FALSE(loader_->snapshot().get(\"baz\").has_value());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(1, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\n// Successful update.\nTEST_F(RtdsLoaderImplTest, OnConfigUpdateSuccess) {\n  setup();\n\n  auto runtime = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_resource\n    layer:\n      foo: bar\n      baz: meh\n  )EOF\");\n  EXPECT_CALL(rtds_init_callback_, Call());\n  doOnConfigUpdateVerifyNoThrow(runtime);\n\n  EXPECT_EQ(\"bar\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_EQ(\"meh\", loader_->snapshot().get(\"baz\").value().get());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(2, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n\n  runtime = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_resource\n    layer:\n      baz: saz\n  )EOF\");\n  doOnConfigUpdateVerifyNoThrow(runtime);\n\n  EXPECT_EQ(\"whatevs\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_EQ(\"saz\", loader_->snapshot().get(\"baz\").value().get());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(3, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\n// Delta style successful update.\nTEST_F(RtdsLoaderImplTest, DeltaOnConfigUpdateSuccess) {\n  setup();\n\n  auto runtime = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_resource\n    layer:\n      foo: bar\n      baz: meh\n  )EOF\");\n  EXPECT_CALL(rtds_init_callback_, Call());\n  doDeltaOnConfigUpdateVerifyNoThrow(runtime);\n\n  EXPECT_EQ(\"bar\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_EQ(\"meh\", loader_->snapshot().get(\"baz\").value().get());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(2, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n\n  runtime = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_resource\n    layer:\n      baz: saz\n  )EOF\");\n  doDeltaOnConfigUpdateVerifyNoThrow(runtime);\n\n  EXPECT_EQ(\"whatevs\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_EQ(\"saz\", loader_->snapshot().get(\"baz\").value().get());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(3, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(2, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\n// Updates with multiple RTDS layers.\nTEST_F(RtdsLoaderImplTest, MultipleRtdsLayers) {\n  addLayer(\"another_resource\");\n  setup();\n\n  EXPECT_EQ(\"whatevs\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_FALSE(loader_->snapshot().get(\"baz\").has_value());\n\n  auto runtime = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_resource\n    layer:\n      foo: bar\n      baz: meh\n  )EOF\");\n  EXPECT_CALL(rtds_init_callback_, Call()).Times(1);\n  doOnConfigUpdateVerifyNoThrow(runtime, 0);\n\n  EXPECT_EQ(\"bar\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_EQ(\"meh\", loader_->snapshot().get(\"baz\").value().get());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(2, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n\n  runtime = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: another_resource\n    layer:\n      baz: saz\n  )EOF\");\n  doOnConfigUpdateVerifyNoThrow(runtime, 1);\n\n  // Unlike in OnConfigUpdateSuccess, foo latches onto bar as the some_resource\n  // layer still applies.\n  EXPECT_EQ(\"bar\", loader_->snapshot().get(\"foo\").value().get());\n  EXPECT_EQ(\"yar\", loader_->snapshot().get(\"bar\").value().get());\n  EXPECT_EQ(\"saz\", loader_->snapshot().get(\"baz\").value().get());\n\n  EXPECT_EQ(0, store_.counter(\"runtime.load_error\").value());\n  EXPECT_EQ(3, store_.counter(\"runtime.load_success\").value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_keys\", Stats::Gauge::ImportMode::NeverImport).value());\n  EXPECT_EQ(3, store_.gauge(\"runtime.num_layers\", Stats::Gauge::ImportMode::NeverImport).value());\n}\n\nTEST_F(RtdsLoaderImplTest, BadConfigSource) {\n  Upstream::MockClusterManager cm_;\n  EXPECT_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _))\n      .WillOnce(InvokeWithoutArgs([]() -> Config::SubscriptionPtr {\n        throw EnvoyException(\"bad config\");\n        return nullptr;\n      }));\n\n  envoy::config::bootstrap::v3::LayeredRuntime config;\n  auto* layer = config.add_layers();\n  layer->set_name(\"some_other_resource\");\n  auto* rtds_layer = layer->mutable_rtds_layer();\n  rtds_layer->set_name(\"some_resource\");\n  rtds_layer->mutable_rtds_config();\n\n  EXPECT_CALL(cm_, subscriptionFactory()).Times(1);\n  LoaderImpl loader(dispatcher_, tls_, config, local_info_, store_, generator_, validation_visitor_,\n                    *api_);\n\n  EXPECT_THROW_WITH_MESSAGE(loader.initialize(cm_), EnvoyException, \"bad config\");\n}\n\n} // namespace\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/runtime/runtime_protos_test.cc",
    "content": "#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/base.pb.validate.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/runtime/runtime_protos.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Runtime {\nnamespace {\n\nclass RuntimeProtosTest : public testing::Test {\nprotected:\n  NiceMock<MockLoader> runtime_;\n};\n\nTEST_F(RuntimeProtosTest, PercentBasicTest) {\n  envoy::config::core::v3::RuntimePercent percent_proto;\n  std::string yaml(R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value:\n  value: 4.2\n)EOF\");\n  TestUtility::loadFromYamlAndValidate(yaml, percent_proto);\n  Percentage test_percent(percent_proto, runtime_);\n\n  // Basic double values and overrides.\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 4.2));\n  EXPECT_EQ(4.2, test_percent.value());\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 4.2)).WillOnce(Return(1.337));\n  EXPECT_EQ(1.337, test_percent.value());\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 4.2)).WillOnce(Return(1));\n  EXPECT_EQ(1.0, test_percent.value());\n\n  // Verify handling of bogus percentages (outside [0,100]).\n  yaml = R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value:\n  value: -20\n)EOF\";\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, percent_proto), EnvoyException);\n\n  yaml = R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value:\n  value: 400\n)EOF\";\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, percent_proto), EnvoyException);\n\n  yaml = R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value:\n  value: 23.0\n)EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, percent_proto);\n  Percentage test_percent2(percent_proto, runtime_);\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 23.0));\n  EXPECT_EQ(23.0, test_percent2.value());\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 23.0)).WillOnce(Return(1.337));\n  EXPECT_EQ(1.337, test_percent2.value());\n\n  // Return default value if bogus runtime values given.\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 23.0)).WillOnce(Return(-10.0));\n  EXPECT_EQ(23.0, test_percent2.value());\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 23.0)).WillOnce(Return(160.0));\n  EXPECT_EQ(23.0, test_percent2.value());\n}\n\nTEST_F(RuntimeProtosTest, DoubleBasicTest) {\n  envoy::config::core::v3::RuntimeDouble double_proto;\n  std::string yaml(R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value: 4.2\n)EOF\");\n  TestUtility::loadFromYamlAndValidate(yaml, double_proto);\n  Double test_double(double_proto, runtime_);\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 4.2));\n  EXPECT_EQ(4.2, test_double.value());\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 4.2)).WillOnce(Return(1.337));\n  EXPECT_EQ(1.337, test_double.value());\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.bar\", 4.2)).WillOnce(Return(1));\n  EXPECT_EQ(1.0, test_double.value());\n}\n\nTEST_F(RuntimeProtosTest, FeatureFlagBasicTest) {\n  envoy::config::core::v3::RuntimeFeatureFlag feature_flag_proto;\n  std::string yaml(R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value: true\n)EOF\");\n  TestUtility::loadFromYamlAndValidate(yaml, feature_flag_proto);\n  FeatureFlag test_feature(feature_flag_proto, runtime_);\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo.bar\", true));\n  EXPECT_EQ(true, test_feature.enabled());\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo.bar\", true)).WillOnce(Return(false));\n  EXPECT_EQ(false, test_feature.enabled());\n\n  envoy::config::core::v3::RuntimeFeatureFlag feature_flag_proto2;\n  yaml = R\"EOF(\nruntime_key: \"bar.foo\"\ndefault_value: false\n)EOF\";\n  TestUtility::loadFromYamlAndValidate(yaml, feature_flag_proto2);\n  FeatureFlag test_feature2(feature_flag_proto2, runtime_);\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"bar.foo\", false));\n  EXPECT_EQ(false, test_feature2.enabled());\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"bar.foo\", false)).WillOnce(Return(true));\n  EXPECT_EQ(true, test_feature2.enabled());\n}\n\nTEST_F(RuntimeProtosTest, FeatureFlagEmptyProtoTest) {\n  envoy::config::core::v3::RuntimeFeatureFlag empty_proto;\n  FeatureFlag test(empty_proto, runtime_);\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"\", true));\n  EXPECT_EQ(true, test.enabled());\n}\n\nTEST_F(RuntimeProtosTest, FractionalPercentBasicTest) {\n  envoy::config::core::v3::RuntimeFractionalPercent runtime_fractional_percent_proto;\n  std::string yaml(R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value:\n  numerator: 100\n  denominator: HUNDRED\n)EOF\");\n  TestUtility::loadFromYamlAndValidate(yaml, runtime_fractional_percent_proto);\n  FractionalPercent test_fractional_percent(runtime_fractional_percent_proto, runtime_);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"foo.bar\", testing::Matcher<const envoy::type::v3::FractionalPercent&>(\n                                            Percent(100))))\n      .WillOnce(Return(true));\n  EXPECT_EQ(true, test_fractional_percent.enabled());\n\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"foo.bar\", testing::Matcher<const envoy::type::v3::FractionalPercent&>(\n                                            Percent(100))))\n      .WillOnce(Return(false));\n  EXPECT_EQ(false, test_fractional_percent.enabled());\n\n  envoy::config::core::v3::RuntimeFractionalPercent runtime_fractional_percent_proto2;\n  yaml = (R\"EOF(\nruntime_key: \"foo.bar\"\ndefault_value:\n  numerator: 0\n  denominator: HUNDRED\n)EOF\");\n  TestUtility::loadFromYamlAndValidate(yaml, runtime_fractional_percent_proto2);\n  FractionalPercent test_fractional_percent2(runtime_fractional_percent_proto2, runtime_);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"foo.bar\", testing::Matcher<const envoy::type::v3::FractionalPercent&>(\n                                            Percent(0))))\n      .WillOnce(Return(true));\n  EXPECT_EQ(true, test_fractional_percent2.enabled());\n\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"foo.bar\", testing::Matcher<const envoy::type::v3::FractionalPercent&>(\n                                            Percent(0))))\n      .WillOnce(Return(false));\n  EXPECT_EQ(false, test_fractional_percent2.enabled());\n}\n\n} // namespace\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file1",
    "content": "hello\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file10",
    "content": "52\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file11",
    "content": "true\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file12",
    "content": "FaLSe\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file13",
    "content": " true \n\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file14",
    "content": "Layer cake\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file15",
    "content": "Cheese cake\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file2",
    "content": "world\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file3",
    "content": "2"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file4",
    "content": "123\n\n\n\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file5",
    "content": "# This is a comment in a file with an integer.\n123\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file6",
    "content": "# This is a comment in a file with a string.\n/home#about-us\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file7",
    "content": "# This is a comment in an empty file.\n# This file was intentionally left blank.\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file8",
    "content": "numerator: 52\ndenominator: HUNDRED\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file9",
    "content": "numerator: 100\ndenominator: NONSENSE\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file_with_double",
    "content": "23.2\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file_with_double_comment",
    "content": "# Here's a comment!\n2.718\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file_with_double_newlines",
    "content": "3.141\n\n\n\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file_with_large_integer",
    "content": "# 2^64 * 10\n184467440737095516160\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file_with_negative_double",
    "content": "-4.2\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/file_with_words",
    "content": "bogus string\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy/subdir/file",
    "content": "hello\n"
  },
  {
    "path": "test/common/runtime/test_data/root/envoy_override/file1",
    "content": "hello override\n"
  },
  {
    "path": "test/common/runtime/utility.h",
    "content": "#pragma once\n\n#include \"common/runtime/runtime_features.h\"\n#include \"common/runtime/runtime_impl.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\nclass RuntimeFeaturesPeer {\npublic:\n  static bool enableFeature(const std::string& feature) {\n    // Remove from disabled features and add to enabled features.\n    const_cast<Runtime::RuntimeFeatures*>(&Runtime::RuntimeFeaturesDefaults::get())\n        ->disabled_features_.erase(feature);\n    return const_cast<Runtime::RuntimeFeatures*>(&Runtime::RuntimeFeaturesDefaults::get())\n        ->enabled_features_.insert(feature)\n        .second;\n  }\n  static bool disableFeature(const std::string& feature) {\n    // Remove from enabled features and add to disabled features.\n    const_cast<Runtime::RuntimeFeatures*>(&Runtime::RuntimeFeaturesDefaults::get())\n        ->enabled_features_.erase(feature);\n    return const_cast<Runtime::RuntimeFeatures*>(&Runtime::RuntimeFeaturesDefaults::get())\n        ->disabled_features_.insert(feature)\n        .second;\n  }\n};\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/secret/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"secret_manager_impl_test\",\n    srcs = [\"secret_manager_impl_test.cc\"],\n    data = [\n        \"//test/extensions/transport_sockets/tls/test_data:certs\",\n    ],\n    deps = [\n        \"//source/common/secret:sds_api_lib\",\n        \"//source/common/secret:secret_manager_impl_lib\",\n        \"//source/common/ssl:certificate_validation_context_config_impl_lib\",\n        \"//source/common/ssl:tls_certificate_config_impl_lib\",\n        \"//test/mocks/server:config_tracker_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/grpc_credential/v2alpha:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"sds_api_test\",\n    srcs = [\"sds_api_test.cc\"],\n    data = [\n        \"//test/extensions/transport_sockets/tls/test_data:certs\",\n    ],\n    deps = [\n        \"//source/common/secret:sds_api_lib\",\n        \"//source/common/ssl:certificate_validation_context_config_impl_lib\",\n        \"//source/common/ssl:tls_certificate_config_impl_lib\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/secret/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/common/secret/sds_api_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/service/secret/v3/sds.pb.h\"\n\n#include \"common/config/datasource.h\"\n#include \"common/config/filesystem_subscription_impl.h\"\n#include \"common/secret/sds_api.h\"\n#include \"common/ssl/certificate_validation_context_config_impl.h\"\n#include \"common/ssl/tls_certificate_config_impl.h\"\n\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\nusing ::testing::Invoke;\nusing ::testing::InvokeWithoutArgs;\n\nnamespace Envoy {\nnamespace Secret {\nnamespace {\n\nclass SdsApiTest : public testing::Test {\nprotected:\n  SdsApiTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  void initialize() { init_target_handle_->initialize(init_watcher_); }\n  void setupMocks() {\n    EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) {\n      init_target_handle_ = target.createHandle(\"test\");\n    }));\n  }\n\n  Api::ApiPtr api_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  NiceMock<Config::MockSubscriptionFactory> subscription_factory_;\n  NiceMock<Init::MockManager> init_manager_;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher_;\n  Event::GlobalTimeSystem time_system_;\n  Init::TargetHandlePtr init_target_handle_;\n  Event::DispatcherPtr dispatcher_;\n};\n\n// Validate that SdsApi object is created and initialized successfully.\nTEST_F(SdsApiTest, BasicTest) {\n  ::testing::InSequence s;\n  const envoy::service::secret::v3::SdsDummy dummy;\n  NiceMock<Server::MockInstance> server;\n\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  TlsCertificateSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n  initialize();\n}\n\n// Validate that a noop init manager is used if the InitManger passed into the constructor\n// has been already initialized. This is a regression test for\n// https://github.com/envoyproxy/envoy/issues/12013\nTEST_F(SdsApiTest, InitManagerInitialised) {\n  NiceMock<Server::MockInstance> server;\n  std::string sds_config =\n      R\"EOF(\n  resources:\n    - \"@type\": \"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\"\n      name: \"abc.com\"\n      tls_certificate:\n        certificate_chain:\n          filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n        private_key:\n          filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n     )EOF\";\n\n  const std::string sds_config_path = TestEnvironment::writeStringToFileForTest(\n      \"sds.yaml\", TestEnvironment::substitute(sds_config), false);\n  NiceMock<Config::MockSubscriptionCallbacks> callbacks;\n  TestUtility::TestOpaqueResourceDecoderImpl<envoy::extensions::transport_sockets::tls::v3::Secret>\n      resource_decoder(\"name\");\n  Config::SubscriptionStats stats(Config::Utility::generateStats(server.stats()));\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor;\n  envoy::config::core::v3::ConfigSource config_source;\n\n  EXPECT_CALL(subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _))\n      .WillOnce(Invoke([this, &sds_config_path, &resource_decoder,\n                        &stats](const envoy::config::core::v3::ConfigSource&, absl::string_view,\n                                Stats::Scope&, Config::SubscriptionCallbacks& cbs,\n                                Config::OpaqueResourceDecoder&) -> Config::SubscriptionPtr {\n        return std::make_unique<Config::FilesystemSubscriptionImpl>(*dispatcher_, sds_config_path,\n                                                                    cbs, resource_decoder, stats,\n                                                                    validation_visitor_, *api_);\n      }));\n\n  auto init_manager = Init::ManagerImpl(\"testing\");\n  auto noop_init_target =\n      Init::TargetImpl(fmt::format(\"noop test init target\"), [] { /*Do nothing.*/ });\n  init_manager.add(noop_init_target);\n  auto noop_watcher = Init::WatcherImpl(fmt::format(\"noop watcher\"), []() { /*Do nothing.*/ });\n  init_manager.initialize(noop_watcher);\n\n  EXPECT_EQ(Init::Manager::State::Initializing, init_manager.state());\n  TlsCertificateSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  EXPECT_NO_THROW(sds_api.registerInitTarget(init_manager));\n}\n\n// Validate that bad ConfigSources are caught at construction time. This is a regression test for\n// https://github.com/envoyproxy/envoy/issues/10976.\nTEST_F(SdsApiTest, BadConfigSource) {\n  ::testing::InSequence s;\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  EXPECT_CALL(subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _))\n      .WillOnce(InvokeWithoutArgs([]() -> Config::SubscriptionPtr {\n        throw EnvoyException(\"bad config\");\n        return nullptr;\n      }));\n  EXPECT_THROW_WITH_MESSAGE(TlsCertificateSdsApi(\n                                config_source, \"abc.com\", subscription_factory_, time_system_,\n                                validation_visitor_, server.stats(), []() {}, *dispatcher_, *api_),\n                            EnvoyException, \"bad config\");\n}\n\n// Validate that TlsCertificateSdsApi updates secrets successfully if a good secret\n// is passed to onConfigUpdate().\nTEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  TlsCertificateSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n  initialize();\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  auto handle =\n      sds_api.addUpdateCallback([&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n\n  std::string yaml =\n      R\"EOF(\n  name: \"abc.com\"\n  tls_certificate:\n    certificate_chain:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n    private_key:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n    )EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n\n  EXPECT_CALL(secret_callback, onAddOrUpdateSecret());\n  subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\");\n\n  Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_);\n  const std::string cert_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            tls_config.certificateChain());\n\n  const std::string key_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(key_pem)),\n            tls_config.privateKey());\n\n  handle->remove();\n}\n\nclass PartialMockSds : public SdsApi {\npublic:\n  PartialMockSds(NiceMock<Server::MockInstance>& server, NiceMock<Init::MockManager>& init_manager,\n                 envoy::config::core::v3::ConfigSource& config_source,\n                 Config::SubscriptionFactory& subscription_factory, TimeSource& time_source,\n                 Event::Dispatcher& dispatcher, Api::Api& api)\n      : SdsApi(\n            config_source, \"abc.com\", subscription_factory, time_source, validation_visitor_,\n            server.stats(), []() {}, dispatcher, api) {\n    registerInitTarget(init_manager);\n  }\n\n  MOCK_METHOD(void, onConfigUpdate,\n              (const std::vector<Config::DecodedResourceRef>&, const std::string&));\n  void onConfigUpdate(const std::vector<Config::DecodedResourceRef>& added,\n                      const Protobuf::RepeatedPtrField<std::string>& removed,\n                      const std::string& version) override {\n    SdsApi::onConfigUpdate(added, removed, version);\n  }\n  void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {}\n  void validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {}\n  std::vector<std::string> getDataSourceFilenames() override { return {}; }\n\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n};\n\n// Basic test of delta's passthrough call to the state-of-the-world variant, to\n// increase coverage.\nTEST_F(SdsApiTest, Delta) {\n  auto secret = std::make_unique<envoy::extensions::transport_sockets::tls::v3::Secret>();\n  secret->set_name(\"secret_1\");\n  Config::DecodedResourceImpl resource(std::move(secret), \"name\", {}, \"version1\");\n  std::vector<Config::DecodedResourceRef> resources{resource};\n\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  Event::GlobalTimeSystem time_system;\n  setupMocks();\n  PartialMockSds sds(server, init_manager_, config_source, subscription_factory_, time_system,\n                     *dispatcher_, *api_);\n  initialize();\n  EXPECT_CALL(sds, onConfigUpdate(DecodedResourcesEq(resources), \"version1\"));\n  subscription_factory_.callbacks_->onConfigUpdate(resources, {}, \"ignored\");\n\n  // An attempt to remove a resource logs an error, but otherwise just carries on (ignoring the\n  // removal attempt).\n  auto secret_again = std::make_unique<envoy::extensions::transport_sockets::tls::v3::Secret>();\n  secret_again->set_name(\"secret_1\");\n  Config::DecodedResourceImpl resource_v2(std::move(secret_again), \"name\", {}, \"version2\");\n  std::vector<Config::DecodedResourceRef> resources_v2{resource_v2};\n  EXPECT_CALL(sds, onConfigUpdate(DecodedResourcesEq(resources_v2), \"version2\"));\n  Protobuf::RepeatedPtrField<std::string> removals;\n  *removals.Add() = \"route_0\";\n  subscription_factory_.callbacks_->onConfigUpdate(resources_v2, removals, \"ignored\");\n}\n\n// Tests SDS's use of the delta variant of onConfigUpdate().\nTEST_F(SdsApiTest, DeltaUpdateSuccess) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  TlsCertificateSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  auto handle =\n      sds_api.addUpdateCallback([&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n\n  std::string yaml =\n      R\"EOF(\n  name: \"abc.com\"\n  tls_certificate:\n    certificate_chain:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n    private_key:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n    )EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n\n  EXPECT_CALL(secret_callback, onAddOrUpdateSecret());\n  initialize();\n  subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, \"\");\n\n  Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_);\n  const std::string cert_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            tls_config.certificateChain());\n\n  const std::string key_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(key_pem)),\n            tls_config.privateKey());\n\n  handle->remove();\n}\n\n// Validate that CertificateValidationContextSdsApi updates secrets successfully if\n// a good secret is passed to onConfigUpdate().\nTEST_F(SdsApiTest, DynamicCertificateValidationContextUpdateSuccess) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  CertificateValidationContextSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  auto handle =\n      sds_api.addUpdateCallback([&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n\n  std::string yaml =\n      R\"EOF(\n  name: \"abc.com\"\n  validation_context:\n    trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n    allow_expired_certificate: true\n  )EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n  EXPECT_CALL(secret_callback, onAddOrUpdateSecret());\n  initialize();\n  subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\");\n\n  Ssl::CertificateValidationContextConfigImpl cvc_config(*sds_api.secret(), *api_);\n  const std::string ca_cert =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ca_cert)),\n            cvc_config.caCert());\n\n  handle->remove();\n}\n\nclass CvcValidationCallback {\npublic:\n  virtual ~CvcValidationCallback() = default;\n  virtual void validateCvc(\n      const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&) PURE;\n};\n\nclass MockCvcValidationCallback : public CvcValidationCallback {\npublic:\n  MockCvcValidationCallback() = default;\n  ~MockCvcValidationCallback() override = default;\n  MOCK_METHOD(void, validateCvc,\n              (const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&));\n};\n\n// Validate that CertificateValidationContextSdsApi updates secrets successfully if\n// a good secret is passed to onConfigUpdate(), and that merged CertificateValidationContext\n// provides correct information.\nTEST_F(SdsApiTest, DefaultCertificateValidationContextTest) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  CertificateValidationContextSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  auto handle =\n      sds_api.addUpdateCallback([&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n  NiceMock<MockCvcValidationCallback> validation_callback;\n  auto validation_handle = sds_api.addValidationCallback(\n      [&validation_callback](\n          const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext& cvc) {\n        validation_callback.validateCvc(cvc);\n      });\n\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  typed_secret.set_name(\"abc.com\");\n  auto* dynamic_cvc = typed_secret.mutable_validation_context();\n  dynamic_cvc->set_allow_expired_certificate(false);\n  dynamic_cvc->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  dynamic_cvc->add_match_subject_alt_names()->set_exact(\"second san\");\n  const std::string dynamic_verify_certificate_spki =\n      \"QGJRPdmx/r5EGOFLb2MTiZp2isyC0Whht7iazhzXaCM=\";\n  dynamic_cvc->add_verify_certificate_spki(dynamic_verify_certificate_spki);\n  EXPECT_CALL(secret_callback, onAddOrUpdateSecret());\n  EXPECT_CALL(validation_callback, validateCvc(_));\n\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n  initialize();\n  subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\");\n\n  const std::string default_verify_certificate_hash =\n      \"0000000000000000000000000000000000000000000000000000000000000000\";\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext default_cvc;\n  default_cvc.set_allow_expired_certificate(true);\n  default_cvc.mutable_trusted_ca()->set_inline_bytes(\"fake trusted ca\");\n  default_cvc.add_match_subject_alt_names()->set_exact(\"first san\");\n  default_cvc.add_verify_certificate_hash(default_verify_certificate_hash);\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext merged_cvc =\n      default_cvc;\n  merged_cvc.MergeFrom(*sds_api.secret());\n  Ssl::CertificateValidationContextConfigImpl cvc_config(merged_cvc, *api_);\n  // Verify that merging CertificateValidationContext applies logical OR to bool\n  // field.\n  EXPECT_TRUE(cvc_config.allowExpiredCertificate());\n  // Verify that singular fields are overwritten.\n  const std::string ca_cert =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ca_cert)),\n            cvc_config.caCert());\n  // Verify that repeated fields are concatenated.\n  EXPECT_EQ(2, cvc_config.subjectAltNameMatchers().size());\n  EXPECT_EQ(\"first san\", cvc_config.subjectAltNameMatchers()[0].exact());\n  EXPECT_EQ(\"second san\", cvc_config.subjectAltNameMatchers()[1].exact());\n  // Verify that if dynamic CertificateValidationContext does not set certificate hash list, the new\n  // secret contains hash list from default CertificateValidationContext.\n  EXPECT_EQ(1, cvc_config.verifyCertificateHashList().size());\n  EXPECT_EQ(default_verify_certificate_hash, cvc_config.verifyCertificateHashList()[0]);\n  // Verify that if default CertificateValidationContext does not set certificate SPKI list, the new\n  // secret contains SPKI list from dynamic CertificateValidationContext.\n  EXPECT_EQ(1, cvc_config.verifyCertificateSpkiList().size());\n  EXPECT_EQ(dynamic_verify_certificate_spki, cvc_config.verifyCertificateSpkiList()[0]);\n\n  handle->remove();\n  validation_handle->remove();\n}\n\nclass GenericSecretValidationCallback {\npublic:\n  virtual ~GenericSecretValidationCallback() = default;\n  virtual void\n  validateGenericSecret(const envoy::extensions::transport_sockets::tls::v3::GenericSecret&) PURE;\n};\n\nclass MockGenericSecretValidationCallback : public GenericSecretValidationCallback {\npublic:\n  MockGenericSecretValidationCallback() = default;\n  ~MockGenericSecretValidationCallback() override = default;\n  MOCK_METHOD(void, validateGenericSecret,\n              (const envoy::extensions::transport_sockets::tls::v3::GenericSecret&));\n};\n\n// Validate that GenericSecretSdsApi updates secrets successfully if\n// a good secret is passed to onConfigUpdate().\nTEST_F(SdsApiTest, GenericSecretSdsApiTest) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  GenericSecretSdsApi sds_api(\n      config_source, \"encryption_key\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  auto handle =\n      sds_api.addUpdateCallback([&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n  NiceMock<MockGenericSecretValidationCallback> validation_callback;\n  auto validation_handle = sds_api.addValidationCallback(\n      [&validation_callback](\n          const envoy::extensions::transport_sockets::tls::v3::GenericSecret& secret) {\n        validation_callback.validateGenericSecret(secret);\n      });\n\n  std::string yaml =\n      R\"EOF(\nname: \"encryption_key\"\ngeneric_secret:\n  secret:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/aes_128_key\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n  EXPECT_CALL(secret_callback, onAddOrUpdateSecret());\n  EXPECT_CALL(validation_callback, validateGenericSecret(_));\n  initialize();\n  subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\");\n\n  const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret(\n      *sds_api.secret());\n  const std::string secret_path =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/aes_128_key\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(secret_path)),\n            Config::DataSource::read(generic_secret.secret(), true, *api_));\n\n  handle->remove();\n  validation_handle->remove();\n}\n\n// Validate that SdsApi throws exception if an empty secret is passed to onConfigUpdate().\nTEST_F(SdsApiTest, EmptyResource) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  TlsCertificateSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n\n  initialize();\n  EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate({}, \"\"),\n                            EnvoyException,\n                            \"Missing SDS resources for abc.com in onConfigUpdate()\");\n}\n\n// Validate that SdsApi throws exception if multiple secrets are passed to onConfigUpdate().\nTEST_F(SdsApiTest, SecretUpdateWrongSize) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  TlsCertificateSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n\n  std::string yaml =\n      R\"EOF(\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n      )EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret, typed_secret});\n\n  initialize();\n  EXPECT_THROW_WITH_MESSAGE(\n      subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"),\n      EnvoyException, \"Unexpected SDS secrets length: 2\");\n}\n\n// Validate that SdsApi throws exception if secret name passed to onConfigUpdate()\n// does not match configured name.\nTEST_F(SdsApiTest, SecretUpdateWrongSecretName) {\n  NiceMock<Server::MockInstance> server;\n  envoy::config::core::v3::ConfigSource config_source;\n  setupMocks();\n  TlsCertificateSdsApi sds_api(\n      config_source, \"abc.com\", subscription_factory_, time_system_, validation_visitor_,\n      server.stats(), []() {}, *dispatcher_, *api_);\n  sds_api.registerInitTarget(init_manager_);\n\n  std::string yaml =\n      R\"EOF(\n      name: \"wrong.name.com\"\n      tls_certificate:\n        certificate_chain:\n          filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n        private_key:\n          filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n        )EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n\n  initialize();\n  EXPECT_THROW_WITH_MESSAGE(\n      subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"),\n      EnvoyException, \"Unexpected SDS secret (expecting abc.com): wrong.name.com\");\n}\n\n} // namespace\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/secret/secret_manager_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/grpc_credential/v2alpha/file_based_metadata.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/common/logger.h\"\n#include \"common/config/api_version.h\"\n#include \"common/secret/sds_api.h\"\n#include \"common/secret/secret_manager_impl.h\"\n#include \"common/ssl/certificate_validation_context_config_impl.h\"\n#include \"common/ssl/tls_certificate_config_impl.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/server/config_tracker.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Secret {\nnamespace {\n\nclass SecretManagerImplTest : public testing::Test, public Logger::Loggable<Logger::Id::secret> {\nprotected:\n  SecretManagerImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  void checkConfigDump(const std::string& expected_dump_yaml) {\n    auto message_ptr = config_tracker_.config_tracker_callbacks_[\"secrets\"]();\n    const auto& secrets_config_dump =\n        dynamic_cast<const envoy::admin::v3::SecretsConfigDump&>(*message_ptr);\n    envoy::admin::v3::SecretsConfigDump expected_secrets_config_dump;\n    TestUtility::loadFromYaml(expected_dump_yaml, expected_secrets_config_dump);\n    EXPECT_THAT(secrets_config_dump,\n                ProtoEqIgnoreRepeatedFieldOrdering(expected_secrets_config_dump));\n  }\n\n  void setupSecretProviderContext() {}\n\n  Api::ApiPtr api_;\n  testing::NiceMock<Server::MockConfigTracker> config_tracker_;\n  Event::SimulatedTimeSystem time_system_;\n  Event::DispatcherPtr dispatcher_;\n};\n\n// Validate that secret manager adds static TLS certificate secret successfully.\nTEST_F(SecretManagerImplTest, TlsCertificateSecretLoadSuccess) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n  const std::string yaml =\n      R\"EOF(\nname: \"abc.com\"\ntls_certificate:\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n)EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n  secret_manager->addStaticSecret(secret_config);\n\n  ASSERT_EQ(secret_manager->findStaticTlsCertificateProvider(\"undefined\"), nullptr);\n  ASSERT_NE(secret_manager->findStaticTlsCertificateProvider(\"abc.com\"), nullptr);\n\n  Ssl::TlsCertificateConfigImpl tls_config(\n      *secret_manager->findStaticTlsCertificateProvider(\"abc.com\")->secret(), nullptr, *api_);\n  const std::string cert_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            tls_config.certificateChain());\n\n  const std::string key_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(key_pem)),\n            tls_config.privateKey());\n}\n\n// Validate that secret manager throws an exception when adding duplicated static TLS certificate\n// secret.\nTEST_F(SecretManagerImplTest, DuplicateStaticTlsCertificateSecret) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n  const std::string yaml =\n      R\"EOF(\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n    )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n  secret_manager->addStaticSecret(secret_config);\n\n  ASSERT_NE(secret_manager->findStaticTlsCertificateProvider(\"abc.com\"), nullptr);\n  EXPECT_THROW_WITH_MESSAGE(secret_manager->addStaticSecret(secret_config), EnvoyException,\n                            \"Duplicate static TlsCertificate secret name abc.com\");\n}\n\n// Validate that secret manager adds static certificate validation context secret successfully.\nTEST_F(SecretManagerImplTest, CertificateValidationContextSecretLoadSuccess) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n  const std::string yaml =\n      R\"EOF(\n      name: \"abc.com\"\n      validation_context:\n        trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n        allow_expired_certificate: true\n      )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n  secret_manager->addStaticSecret(secret_config);\n\n  ASSERT_EQ(secret_manager->findStaticCertificateValidationContextProvider(\"undefined\"), nullptr);\n  ASSERT_NE(secret_manager->findStaticCertificateValidationContextProvider(\"abc.com\"), nullptr);\n  Ssl::CertificateValidationContextConfigImpl cvc_config(\n      *secret_manager->findStaticCertificateValidationContextProvider(\"abc.com\")->secret(), *api_);\n  const std::string cert_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            cvc_config.caCert());\n}\n\n// Validate that secret manager throws an exception when adding duplicated static certificate\n// validation context secret.\nTEST_F(SecretManagerImplTest, DuplicateStaticCertificateValidationContextSecret) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n  const std::string yaml =\n      R\"EOF(\n    name: \"abc.com\"\n    validation_context:\n      trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n      allow_expired_certificate: true\n    )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n  secret_manager->addStaticSecret(secret_config);\n\n  ASSERT_NE(secret_manager->findStaticCertificateValidationContextProvider(\"abc.com\"), nullptr);\n  EXPECT_THROW_WITH_MESSAGE(secret_manager->addStaticSecret(secret_config), EnvoyException,\n                            \"Duplicate static CertificateValidationContext secret name abc.com\");\n}\n\n// Validate that secret manager adds static STKs secret successfully.\nTEST_F(SecretManagerImplTest, SessionTicketKeysLoadSuccess) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n\n  const std::string yaml =\n      R\"EOF(\nname: \"abc.com\"\nsession_ticket_keys:\n  keys:\n    - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/keys.bin\"\n)EOF\";\n\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n\n  secret_manager->addStaticSecret(secret_config);\n\n  ASSERT_EQ(secret_manager->findStaticTlsSessionTicketKeysContextProvider(\"undefined\"), nullptr);\n  ASSERT_NE(secret_manager->findStaticTlsSessionTicketKeysContextProvider(\"abc.com\"), nullptr);\n\n  const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys session_ticket_keys(\n      *secret_manager->findStaticTlsSessionTicketKeysContextProvider(\"abc.com\")->secret());\n  const std::string keys_path =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/keys.bin\";\n  EXPECT_EQ(session_ticket_keys.keys_size(), 1);\n  EXPECT_EQ(session_ticket_keys.keys()[0].filename(), TestEnvironment::substitute(keys_path));\n}\n\n// Validate that secret manager throws an exception when adding duplicated static STKs secret.\nTEST_F(SecretManagerImplTest, DuplicateSessionTicketKeysSecret) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n\n  const std::string yaml =\n      R\"EOF(\nname: \"abc.com\"\nsession_ticket_keys:\n  keys:\n    - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/keys.bin\"\n)EOF\";\n\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n\n  secret_manager->addStaticSecret(secret_config);\n\n  ASSERT_NE(secret_manager->findStaticTlsSessionTicketKeysContextProvider(\"abc.com\"), nullptr);\n  EXPECT_THROW_WITH_MESSAGE(secret_manager->addStaticSecret(secret_config), EnvoyException,\n                            \"Duplicate static TlsSessionTicketKeys secret name abc.com\");\n}\n\n// Validate that secret manager adds static generic secret successfully.\nTEST_F(SecretManagerImplTest, GenericSecretLoadSuccess) {\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n\n  envoy::extensions::transport_sockets::tls::v3::Secret secret;\n  const std::string yaml =\n      R\"EOF(\nname: \"encryption_key\"\ngeneric_secret:\n  secret:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/aes_128_key\"\n)EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret);\n  secret_manager->addStaticSecret(secret);\n\n  ASSERT_EQ(secret_manager->findStaticGenericSecretProvider(\"undefined\"), nullptr);\n  ASSERT_NE(secret_manager->findStaticGenericSecretProvider(\"encryption_key\"), nullptr);\n\n  const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret(\n      *secret_manager->findStaticGenericSecretProvider(\"encryption_key\")->secret());\n  const std::string secret_path =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/aes_128_key\";\n  EXPECT_EQ(generic_secret.secret().filename(), TestEnvironment::substitute(secret_path));\n}\n\n// Validate that secret manager throws an exception when adding duplicated static generic secret.\nTEST_F(SecretManagerImplTest, DuplicateGenericSecret) {\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n\n  envoy::extensions::transport_sockets::tls::v3::Secret secret;\n  const std::string yaml =\n      R\"EOF(\nname: \"encryption_key\"\ngeneric_secret:\n  secret:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/aes_128_key\"\n)EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret);\n  secret_manager->addStaticSecret(secret);\n\n  ASSERT_NE(secret_manager->findStaticGenericSecretProvider(\"encryption_key\"), nullptr);\n  EXPECT_THROW_WITH_MESSAGE(secret_manager->addStaticSecret(secret), EnvoyException,\n                            \"Duplicate static GenericSecret secret name encryption_key\");\n}\n\n// Validate that secret manager deduplicates dynamic TLS certificate secret provider.\n// Regression test of https://github.com/envoyproxy/envoy/issues/5744\nTEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) {\n  Server::MockInstance server;\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_))\n      .WillRepeatedly(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n  EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n\n  envoy::config::core::v3::ConfigSource config_source;\n  TestUtility::loadFromYaml(R\"(\napi_config_source:\n  api_type: GRPC\n  grpc_services:\n  - google_grpc:\n      call_credentials:\n      - from_plugin:\n          name: file_based_metadata\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig\n      stat_prefix: sdsstat\n      credentials_factory_name: envoy.grpc_credentials.file_based_metadata\n  )\",\n                            config_source);\n  config_source.mutable_api_config_source()\n      ->mutable_grpc_services(0)\n      ->mutable_google_grpc()\n      ->mutable_call_credentials(0)\n      ->mutable_from_plugin()\n      ->mutable_typed_config()\n      ->set_value(Base64::decode(\"CjUKMy92YXIvcnVuL3NlY3JldHMva3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3Vud\"\n                                 \"C90b2tlbhILeC10b2tlbi1iaW4=\"));\n  auto secret_provider1 =\n      secret_manager->findOrCreateTlsCertificateProvider(config_source, \"abc.com\", secret_context);\n\n  // The base64 encoded proto binary is identical to the one above, but in different field order.\n  // It is also identical to the YAML below.\n  config_source.mutable_api_config_source()\n      ->mutable_grpc_services(0)\n      ->mutable_google_grpc()\n      ->mutable_call_credentials(0)\n      ->mutable_from_plugin()\n      ->mutable_typed_config()\n      ->set_value(Base64::decode(\"Egt4LXRva2VuLWJpbgo1CjMvdmFyL3J1bi9zZWNyZXRzL2t1YmVybmV0ZXMuaW8vc\"\n                                 \"2VydmljZWFjY291bnQvdG9rZW4=\"));\n  auto secret_provider2 =\n      secret_manager->findOrCreateTlsCertificateProvider(config_source, \"abc.com\", secret_context);\n\n  API_NO_BOOST(envoy::config::grpc_credential::v2alpha::FileBasedMetadataConfig)\n  file_based_metadata_config;\n  TestUtility::loadFromYaml(R\"(\nheader_key: x-token-bin\nsecret_data:\n  filename: \"/var/run/secrets/kubernetes.io/serviceaccount/token\"\n  )\",\n                            file_based_metadata_config);\n  config_source.mutable_api_config_source()\n      ->mutable_grpc_services(0)\n      ->mutable_google_grpc()\n      ->mutable_call_credentials(0)\n      ->mutable_from_plugin()\n      ->mutable_typed_config()\n      ->PackFrom(file_based_metadata_config);\n  auto secret_provider3 =\n      secret_manager->findOrCreateTlsCertificateProvider(config_source, \"abc.com\", secret_context);\n\n  EXPECT_EQ(secret_provider1, secret_provider2);\n  EXPECT_EQ(secret_provider2, secret_provider3);\n}\n\nTEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) {\n  Server::MockInstance server;\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n\n  envoy::config::core::v3::ConfigSource config_source;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_))\n      .WillOnce(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n  EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_));\n  EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_));\n\n  auto secret_provider =\n      secret_manager->findOrCreateTlsCertificateProvider(config_source, \"abc.com\", secret_context);\n  const std::string yaml =\n      R\"EOF(\nname: \"abc.com\"\ntls_certificate:\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n  init_target_handle->initialize(init_watcher);\n  secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources.refvec_, \"\");\n  Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_);\n  const std::string cert_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            tls_config.certificateChain());\n  const std::string key_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(key_pem)),\n            tls_config.privateKey());\n}\n\nTEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) {\n  Server::MockInstance server;\n  std::unique_ptr<SecretManager> secret_manager(new SecretManagerImpl(config_tracker_));\n  envoy::config::core::v3::ConfigSource config_source;\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  Init::TargetHandlePtr init_target_handle;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_));\n  EXPECT_CALL(secret_context, messageValidationVisitor()).WillOnce(ReturnRef(validation_visitor));\n  EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_));\n  EXPECT_CALL(init_manager, add(_))\n      .WillOnce(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n\n  auto secret_provider = secret_manager->findOrCreateGenericSecretProvider(\n      config_source, \"encryption_key\", secret_context);\n\n  const std::string yaml = R\"EOF(\nname: \"encryption_key\"\ngeneric_secret:\n  secret:\n    inline_string: \"DUMMY_AES_128_KEY\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n  init_target_handle->initialize(init_watcher);\n  secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources.refvec_, \"\");\n\n  const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret(\n      *secret_provider->secret());\n  EXPECT_EQ(\"DUMMY_AES_128_KEY\", generic_secret.secret().inline_string());\n}\n\nTEST_F(SecretManagerImplTest, ConfigDumpHandler) {\n  Server::MockInstance server;\n  auto secret_manager = std::make_unique<SecretManagerImpl>(config_tracker_);\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n\n  envoy::config::core::v3::ConfigSource config_source;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_))\n      .WillRepeatedly(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n  EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n\n  auto secret_provider =\n      secret_manager->findOrCreateTlsCertificateProvider(config_source, \"abc.com\", secret_context);\n  const std::string yaml =\n      R\"EOF(\nname: \"abc.com\"\ntls_certificate:\n  certificate_chain:\n    inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n  private_key:\n    inline_string: \"DUMMY_INLINE_BYTES_FOR_PRIVATE_KEY\"\n  password:\n    inline_string: \"DUMMY_PASSWORD\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  const auto decoded_resources = TestUtility::decodeResources({typed_secret});\n  init_target_handle->initialize(init_watcher);\n  secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources.refvec_, \"keycert-v1\");\n  Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_);\n  EXPECT_EQ(\"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\", tls_config.certificateChain());\n  EXPECT_EQ(\"DUMMY_INLINE_BYTES_FOR_PRIVATE_KEY\", tls_config.privateKey());\n  EXPECT_EQ(\"DUMMY_PASSWORD\", tls_config.password());\n\n  // Private key and password are removed.\n  const std::string expected_secrets_config_dump = R\"EOF(\ndynamic_active_secrets:\n- name: \"abc.com\"\n  version_info: \"keycert-v1\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n      private_key:\n        inline_string: \"[redacted]\"\n      password:\n        inline_string: \"[redacted]\"\n)EOF\";\n  checkConfigDump(expected_secrets_config_dump);\n\n  // Add a dynamic tls validation context provider.\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567899000));\n  auto context_secret_provider = secret_manager->findOrCreateCertificateValidationContextProvider(\n      config_source, \"abc.com.validation\", secret_context);\n  const std::string validation_yaml = R\"EOF(\nname: \"abc.com.validation\"\nvalidation_context:\n  trusted_ca:\n    inline_string: \"DUMMY_INLINE_STRING_TRUSTED_CA\"\n)EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(validation_yaml), typed_secret);\n  const auto decoded_resources_2 = TestUtility::decodeResources({typed_secret});\n\n  init_target_handle->initialize(init_watcher);\n  secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources_2.refvec_, \"validation-context-v1\");\n  Ssl::CertificateValidationContextConfigImpl cert_validation_context(\n      *context_secret_provider->secret(), *api_);\n  EXPECT_EQ(\"DUMMY_INLINE_STRING_TRUSTED_CA\", cert_validation_context.caCert());\n  const std::string updated_config_dump = R\"EOF(\ndynamic_active_secrets:\n- name: \"abc.com\"\n  version_info: \"keycert-v1\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n      private_key:\n        inline_string: \"[redacted]\"\n      password:\n        inline_string: \"[redacted]\"\n- name: \"abc.com.validation\"\n  version_info: \"validation-context-v1\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.validation\"\n    validation_context:\n      trusted_ca:\n        inline_string: \"DUMMY_INLINE_STRING_TRUSTED_CA\"\n)EOF\";\n  checkConfigDump(updated_config_dump);\n\n  // Add a dynamic tls session ticket encryption keys context provider.\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567899000));\n  auto stek_secret_provider = secret_manager->findOrCreateTlsSessionTicketKeysContextProvider(\n      config_source, \"abc.com.stek\", secret_context);\n  const std::string stek_yaml = R\"EOF(\nname: \"abc.com.stek\"\nsession_ticket_keys:\n  keys:\n    - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n    - inline_string: \"DUMMY_INLINE_STRING\"\n    - inline_bytes: \"RFVNTVlfSU5MSU5FX0JZVEVT\"\n)EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(stek_yaml), typed_secret);\n  const auto decoded_resources_3 = TestUtility::decodeResources({typed_secret});\n\n  init_target_handle->initialize(init_watcher);\n  secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources_3.refvec_, \"stek-context-v1\");\n  EXPECT_EQ(stek_secret_provider->secret()->keys()[1].inline_string(), \"DUMMY_INLINE_STRING\");\n\n  const std::string updated_once_more_config_dump = R\"EOF(\ndynamic_active_secrets:\n- name: \"abc.com\"\n  version_info: \"keycert-v1\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n      private_key:\n        inline_string: \"[redacted]\"\n      password:\n        inline_string: \"[redacted]\"\n- name: \"abc.com.validation\"\n  version_info: \"validation-context-v1\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.validation\"\n    validation_context:\n      trusted_ca:\n        inline_string: \"DUMMY_INLINE_STRING_TRUSTED_CA\"\n- name: \"abc.com.stek\"\n  version_info: \"stek-context-v1\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.stek\"\n    session_ticket_keys:\n      keys:\n        - filename: \"[redacted]\"\n        - inline_string: \"[redacted]\"\n        - inline_bytes: \"W3JlZGFjdGVkXQ==\"\n)EOF\";\n  checkConfigDump(TestEnvironment::substitute(updated_once_more_config_dump));\n\n  // Add a dynamic generic secret provider.\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567900000));\n  auto generic_secret_provider = secret_manager->findOrCreateGenericSecretProvider(\n      config_source, \"signing_key\", secret_context);\n\n  const std::string generic_secret_yaml = R\"EOF(\nname: \"signing_key\"\ngeneric_secret:\n  secret:\n    inline_string: \"DUMMY_ECDSA_KEY\"\n)EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(generic_secret_yaml), typed_secret);\n  const auto decoded_resources_4 = TestUtility::decodeResources({typed_secret});\n  init_target_handle->initialize(init_watcher);\n  secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(\n      decoded_resources_4.refvec_, \"signing-key-v1\");\n\n  const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret(\n      *generic_secret_provider->secret());\n  EXPECT_EQ(\"DUMMY_ECDSA_KEY\", generic_secret.secret().inline_string());\n\n  const std::string config_dump_with_generic_secret = R\"EOF(\ndynamic_active_secrets:\n- name: \"abc.com\"\n  version_info: \"keycert-v1\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n      private_key:\n        inline_string: \"[redacted]\"\n      password:\n        inline_string: \"[redacted]\"\n- name: \"abc.com.validation\"\n  version_info: \"validation-context-v1\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.validation\"\n    validation_context:\n      trusted_ca:\n        inline_string: \"DUMMY_INLINE_STRING_TRUSTED_CA\"\n- name: \"abc.com.stek\"\n  version_info: \"stek-context-v1\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.stek\"\n    session_ticket_keys:\n      keys:\n        - filename: \"[redacted]\"\n        - inline_string: \"[redacted]\"\n        - inline_bytes: \"W3JlZGFjdGVkXQ==\"\n- name: \"signing_key\"\n  version_info: \"signing-key-v1\"\n  last_updated:\n    seconds: 1234567900\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"signing_key\"\n    generic_secret:\n      secret:\n        inline_string: \"[redacted]\"\n)EOF\";\n  checkConfigDump(TestEnvironment::substitute(config_dump_with_generic_secret));\n}\n\nTEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) {\n  Server::MockInstance server;\n  auto secret_manager = std::make_unique<SecretManagerImpl>(config_tracker_);\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n\n  envoy::config::core::v3::ConfigSource config_source;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_))\n      .WillRepeatedly(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n  EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n\n  auto secret_provider =\n      secret_manager->findOrCreateTlsCertificateProvider(config_source, \"abc.com\", secret_context);\n  const std::string expected_secrets_config_dump = R\"EOF(\ndynamic_warming_secrets:\n- name: \"abc.com\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n  )EOF\";\n  checkConfigDump(expected_secrets_config_dump);\n\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567899000));\n  auto context_secret_provider = secret_manager->findOrCreateCertificateValidationContextProvider(\n      config_source, \"abc.com.validation\", secret_context);\n  init_target_handle->initialize(init_watcher);\n  const std::string updated_config_dump = R\"EOF(\ndynamic_warming_secrets:\n- name: \"abc.com\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n- name: \"abc.com.validation\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.validation\"\n)EOF\";\n  checkConfigDump(updated_config_dump);\n\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567899000));\n  auto stek_secret_provider = secret_manager->findOrCreateTlsSessionTicketKeysContextProvider(\n      config_source, \"abc.com.stek\", secret_context);\n  init_target_handle->initialize(init_watcher);\n  const std::string updated_once_more_config_dump = R\"EOF(\ndynamic_warming_secrets:\n- name: \"abc.com\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n- name: \"abc.com.validation\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.validation\"\n- name: \"abc.com.stek\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.stek\"\n)EOF\";\n  checkConfigDump(updated_once_more_config_dump);\n\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567900000));\n  auto generic_secret_provider = secret_manager->findOrCreateGenericSecretProvider(\n      config_source, \"signing_key\", secret_context);\n  init_target_handle->initialize(init_watcher);\n  const std::string config_dump_with_generic_secret = R\"EOF(\ndynamic_warming_secrets:\n- name: \"abc.com\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567891\n    nanos: 234000000\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n- name: \"abc.com.validation\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.validation\"\n- name: \"abc.com.stek\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567899\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.stek\"\n- name: \"signing_key\"\n  version_info: \"uninitialized\"\n  last_updated:\n    seconds: 1234567900\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"signing_key\"\n)EOF\";\n  checkConfigDump(config_dump_with_generic_secret);\n}\n\nTEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) {\n  Server::MockInstance server;\n  auto secret_manager = std::make_unique<SecretManagerImpl>(config_tracker_);\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n\n  envoy::config::core::v3::ConfigSource config_source;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_))\n      .WillRepeatedly(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n  EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n\n  const std::string tls_certificate =\n      R\"EOF(\nname: \"abc.com\"\ntls_certificate:\n  certificate_chain:\n    inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n  private_key:\n    inline_string: \"DUMMY_INLINE_BYTES_FOR_PRIVATE_KEY\"\n  password:\n    inline_string: \"DUMMY_PASSWORD\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret tls_cert_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate), tls_cert_secret);\n  secret_manager->addStaticSecret(tls_cert_secret);\n  TestUtility::loadFromYaml(TestEnvironment::substitute(R\"EOF(\nname: \"abc.com.nopassword\"\ntls_certificate:\n  certificate_chain:\n    inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n  private_key:\n    inline_string: \"DUMMY_INLINE_BYTES_FOR_PRIVATE_KEY\"\n)EOF\"),\n                            tls_cert_secret);\n  secret_manager->addStaticSecret(tls_cert_secret);\n  const std::string expected_config_dump = R\"EOF(\nstatic_secrets:\n- name: \"abc.com.nopassword\"\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.nopassword\"\n    tls_certificate:\n      certificate_chain:\n        inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n      private_key:\n        inline_string: \"[redacted]\"\n- name: \"abc.com\"\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        inline_string: \"DUMMY_INLINE_BYTES_FOR_CERT_CHAIN\"\n      private_key:\n        inline_string: \"[redacted]\"\n      password:\n        inline_string: \"[redacted]\"\n)EOF\";\n  checkConfigDump(expected_config_dump);\n}\n\nTEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) {\n  Server::MockInstance server;\n  auto secret_manager = std::make_unique<SecretManagerImpl>(config_tracker_);\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n  envoy::config::core::v3::ConfigSource config_source;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_))\n      .WillRepeatedly(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n  EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n\n  const std::string validation_context =\n      R\"EOF(\nname: \"abc.com.validation\"\nvalidation_context:\n  trusted_ca:\n    inline_string: \"DUMMY_INLINE_STRING_TRUSTED_CA\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret validation_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(validation_context), validation_secret);\n  secret_manager->addStaticSecret(validation_secret);\n  const std::string expected_config_dump = R\"EOF(\nstatic_secrets:\n- name: \"abc.com.validation\"\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.validation\"\n    validation_context:\n      trusted_ca:\n        inline_string: \"DUMMY_INLINE_STRING_TRUSTED_CA\"\n)EOF\";\n  checkConfigDump(expected_config_dump);\n}\n\nTEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) {\n  Server::MockInstance server;\n  auto secret_manager = std::make_unique<SecretManagerImpl>(config_tracker_);\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> secret_context;\n  envoy::config::core::v3::ConfigSource config_source;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Init::ExpectableWatcherImpl> init_watcher;\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_))\n      .WillRepeatedly(Invoke([&init_target_handle](const Init::Target& target) {\n        init_target_handle = target.createHandle(\"test\");\n      }));\n  EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats));\n  EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n\n  const std::string stek_context =\n      R\"EOF(\nname: \"abc.com.stek\"\nsession_ticket_keys:\n  keys:\n    - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n    - inline_string: \"DUMMY_INLINE_STRING\"\n    - inline_bytes: \"RFVNTVlfSU5MSU5FX0JZVEVT\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret stek_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(stek_context), stek_secret);\n  secret_manager->addStaticSecret(stek_secret);\n  const std::string expected_config_dump = R\"EOF(\nstatic_secrets:\n- name: \"abc.com.stek\"\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"abc.com.stek\"\n    session_ticket_keys:\n      keys:\n        - filename: \"[redacted]\"\n        - inline_string: \"[redacted]\"\n        - inline_bytes: \"W3JlZGFjdGVkXQ==\"\n)EOF\";\n  checkConfigDump(TestEnvironment::substitute(expected_config_dump));\n}\n\nTEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticGenericSecret) {\n  auto secret_manager = std::make_unique<SecretManagerImpl>(config_tracker_);\n\n  const std::string yaml = R\"EOF(\nname: \"signing_key\"\ngeneric_secret:\n  secret:\n    inline_bytes: \"DUMMY_ECDSA_KEY\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::Secret typed_secret;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret);\n  secret_manager->addStaticSecret(typed_secret);\n\n  const std::string expected_config_dump = R\"EOF(\nstatic_secrets:\n- name: \"signing_key\"\n  secret:\n    \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\n    name: \"signing_key\"\n    generic_secret:\n      secret:\n        inline_bytes: \"W3JlZGFjdGVkXQ==\"\n)EOF\";\n  checkConfigDump(TestEnvironment::substitute(expected_config_dump));\n}\n\n} // namespace\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/shared_pool/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"shared_pool_test\",\n    srcs = [\"shared_pool_test.cc\"],\n    deps = [\n        \"//source/common/event:timer_lib\",\n        \"//source/common/shared_pool:shared_pool_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/test_common:thread_factory_for_test_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/shared_pool/shared_pool_test.cc",
    "content": "#include <thread>\n\n#include \"common/event/timer_impl.h\"\n#include \"common/shared_pool/shared_pool.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/thread_factory_for_test.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace SharedPool {\n\nclass SharedPoolTest : public testing::Test {\nprotected:\n  SharedPoolTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")) {\n    dispatcher_thread_ = api_->threadFactory().createThread([this]() {\n      // Must create a keepalive timer to keep the dispatcher from exiting.\n      std::chrono::milliseconds time_interval(500);\n      keepalive_timer_ = dispatcher_->createTimer(\n          [this, time_interval]() { keepalive_timer_->enableTimer(time_interval); });\n      keepalive_timer_->enableTimer(time_interval);\n      dispatcher_->run(Event::Dispatcher::RunType::Block);\n    });\n  }\n\n  ~SharedPoolTest() override {\n    dispatcher_->exit();\n    dispatcher_thread_->join();\n  }\n\n  void deferredDeleteSharedPoolOnMainThread(std::shared_ptr<ObjectSharedPool<int>>& pool) {\n    absl::Notification go;\n    dispatcher_->post([&pool, &go]() {\n      pool.reset();\n      go.Notify();\n    });\n    go.WaitForNotification();\n  }\n\n  void createObjectSharedPool(std::shared_ptr<ObjectSharedPool<int>>& pool) {\n    absl::Notification go;\n    dispatcher_->post([&pool, &go, this]() {\n      pool = std::make_shared<ObjectSharedPool<int>>(*dispatcher_);\n      go.Notify();\n    });\n    go.WaitForNotification();\n  }\n\n  void getObjectFromObjectSharedPool(std::shared_ptr<ObjectSharedPool<int>>& pool,\n                                     std::shared_ptr<int>& o, int value) {\n    absl::Notification go;\n    dispatcher_->post([&pool, &o, &go, value]() {\n      o = pool->getObject(value);\n      go.Notify();\n    });\n    go.WaitForNotification();\n  }\n\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Thread::ThreadPtr dispatcher_thread_;\n  Event::TimerPtr keepalive_timer_;\n  absl::Notification go_;\n};\n\nTEST_F(SharedPoolTest, Basic) {\n  Event::MockDispatcher dispatcher;\n  auto pool = std::make_shared<ObjectSharedPool<int>>(dispatcher);\n  {\n    auto o = pool->getObject(4);\n    auto o1 = pool->getObject(4);\n    ASSERT_EQ(1, pool->poolSize());\n\n    auto o2 = pool->getObject(5);\n    ASSERT_EQ(o.get(), o1.get());\n    ASSERT_EQ(2, pool->poolSize());\n    ASSERT_TRUE(o.get() != o2.get());\n  }\n\n  ASSERT_EQ(0, pool->poolSize());\n}\n\nTEST_F(SharedPoolTest, NonThreadSafeForGetObjectDeathTest) {\n  std::shared_ptr<ObjectSharedPool<int>> pool;\n  createObjectSharedPool(pool);\n  EXPECT_DEBUG_DEATH(pool->getObject(4), \".*\");\n}\n\nTEST_F(SharedPoolTest, ThreadSafeForDeleteObject) {\n  std::shared_ptr<ObjectSharedPool<int>> pool;\n  {\n    // same thread\n    createObjectSharedPool(pool);\n    dispatcher_->post([&pool, this]() {\n      pool->deleteObject(std::hash<int>{}(4));\n      go_.Notify();\n    });\n    go_.WaitForNotification();\n  }\n\n  {\n    // different threads\n    createObjectSharedPool(pool);\n    Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest();\n    auto thread =\n        thread_factory.createThread([&pool]() { pool->deleteObject(std::hash<int>{}(4)); });\n    thread->join();\n  }\n}\n\nTEST_F(SharedPoolTest, NonThreadSafeForPoolSizeDeathTest) {\n  std::shared_ptr<ObjectSharedPool<int>> pool;\n  createObjectSharedPool(pool);\n  EXPECT_DEBUG_DEATH(pool->poolSize(), \".*\");\n}\n\nTEST_F(SharedPoolTest, GetObjectAndDeleteObjectRaceForSameHashValue) {\n  std::shared_ptr<ObjectSharedPool<int>> pool;\n  std::shared_ptr<int> o1;\n  createObjectSharedPool(pool);\n  getObjectFromObjectSharedPool(pool, o1, 4);\n  Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest();\n  pool->sync().enable();\n  pool->sync().waitOn(ObjectSharedPool<int>::DeleteObjectOnMainThread);\n  auto thread = thread_factory.createThread([&o1]() {\n    // simulation of shared objects destructing in other threads\n    o1.reset(); // Blocks in thread synchronizer waiting on DeleteObjectOnMainThread\n  });\n  pool->sync().barrierOn(ObjectSharedPool<int>::DeleteObjectOnMainThread);\n  // The deleteObject method has not been executed yet, when it is switched to the main thread and\n  // called getObject again to get an object with the same hash value.\n  std::shared_ptr<int> o2;\n  getObjectFromObjectSharedPool(pool, o2, 4);\n  pool->sync().signal(ObjectSharedPool<int>::DeleteObjectOnMainThread);\n  thread->join();\n\n  // deleteObject will to release older weak_ptr objects\n  // Because the storage is actually a new weak_ptr and the reference count is not zero, it is not\n  // deleted\n  dispatcher_->post([&pool, this]() {\n    EXPECT_EQ(1, pool->poolSize());\n    go_.Notify();\n  });\n  go_.WaitForNotification();\n  deferredDeleteSharedPoolOnMainThread(pool);\n}\n\nTEST_F(SharedPoolTest, RaceCondtionForGetObjectWithObjectDeleter) {\n  std::shared_ptr<ObjectSharedPool<int>> pool;\n  std::shared_ptr<int> o1;\n  createObjectSharedPool(pool);\n  getObjectFromObjectSharedPool(pool, o1, 4);\n  Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest();\n  pool->sync().enable();\n  pool->sync().waitOn(ObjectSharedPool<int>::ObjectDeleterEntry);\n  auto thread = thread_factory.createThread([&o1]() {\n    // simulation of shared objects destructing in other threads\n    o1.reset(); // Blocks in thread synchronizer waiting on ObjectDeleterEntry\n  });\n  pool->sync().barrierOn(ObjectSharedPool<int>::ObjectDeleterEntry);\n\n  // Object is destructing, no memory has been released,\n  // at this time the object obtained through getObject is newly created, not the old object,\n  // so the object memory is released when the destruct is complete, and o2 is still valid.\n  std::shared_ptr<int> o2;\n  getObjectFromObjectSharedPool(pool, o2, 4);\n  pool->sync().signal(ObjectSharedPool<int>::ObjectDeleterEntry);\n  thread->join();\n  EXPECT_EQ(4, *o2);\n  deferredDeleteSharedPoolOnMainThread(pool);\n}\n\n} // namespace SharedPool\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/signal/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"signals_test\",\n    srcs = [\"signals_test.cc\"],\n    # Posix signal tests are irrelevant to Windows\n    tags = [\n        \"backtrace\",\n        \"skip_on_windows\",\n    ],\n    deps = [\n        \"//source/common/signal:fatal_error_handler_lib\",\n        \"//source/common/signal:sigaction_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/signal/signals_test.cc",
    "content": "#include <sys/mman.h>\n\n#include <csignal>\n\n#include \"common/signal/fatal_error_handler.h\"\n#include \"common/signal/signal_action.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n#if defined(__has_feature)\n#if __has_feature(address_sanitizer)\n#define ASANITIZED /* Sanitized by Clang */\n#endif\n#endif\n\n#if defined(__SANITIZE_ADDRESS__)\n#define ASANITIZED /* Sanitized by GCC */\n#endif\n\n// Use this test handler instead of a mock, because fatal error handlers must be\n// signal-safe and a mock might allocate memory.\nclass TestFatalErrorHandler : public FatalErrorHandlerInterface {\n  void onFatalError(std::ostream& os) const override { os << \"HERE!\"; }\n};\n\n// Death tests that expect a particular output are disabled under address sanitizer.\n// The sanitizer does its own special signal handling and prints messages that are\n// not ours instead of what this test expects. As of latest Clang this appears\n// to include abort() as well.\n#ifndef ASANITIZED\nTEST(SignalsDeathTest, InvalidAddressDeathTest) {\n  SignalAction actions;\n  EXPECT_DEATH(\n      []() -> void {\n        // Oops!\n        volatile int* nasty_ptr = reinterpret_cast<int*>(0x0);\n        *(nasty_ptr) = 0; // NOLINT(clang-analyzer-core.NullDereference)\n      }(),\n      \"backtrace.*Segmentation fault\");\n}\n\nTEST(SignalsDeathTest, RegisteredHandlerTest) {\n  TestFatalErrorHandler handler;\n  FatalErrorHandler::registerFatalErrorHandler(handler);\n  SignalAction actions;\n  // Make sure the fatal error log \"HERE\" registered above is logged on fatal error.\n  EXPECT_DEATH(\n      []() -> void {\n        // Oops!\n        volatile int* nasty_ptr = reinterpret_cast<int*>(0x0);\n        *(nasty_ptr) = 0; // NOLINT(clang-analyzer-core.NullDereference)\n      }(),\n      \"HERE\");\n  FatalErrorHandler::removeFatalErrorHandler(handler);\n}\n\nTEST(SignalsDeathTest, BusDeathTest) {\n  SignalAction actions;\n  EXPECT_DEATH(\n      []() -> void {\n        // Bus error is tricky. There's one way that can work on POSIX systems\n        // described below but it depends on mmaping a file. Just make it easy and\n        // raise a bus.\n        //\n        // FILE *f = tmpfile();\n        // int *p = mmap(0, 4, PROT_WRITE, MAP_PRIVATE, fileno(f), 0);\n        // *p = 0;\n        raise(SIGBUS);\n      }(),\n      \"backtrace.*Bus\");\n}\n\nTEST(SignalsDeathTest, BadMathDeathTest) {\n  SignalAction actions;\n  EXPECT_DEATH(\n      []() -> void {\n        // It turns out to be really hard to not have the optimizer get rid of a\n        // division by zero. Just raise the signal for this test.\n        raise(SIGFPE);\n      }(),\n      \"backtrace.*Floating point\");\n}\n\n#if defined(__x86_64__) || defined(__i386__)\n// Unfortunately we don't have a reliable way to do this on other platforms\nTEST(SignalsDeathTest, IllegalInstructionDeathTest) {\n  SignalAction actions;\n  EXPECT_DEATH(\n      []() -> void {\n        // Intel defines the \"ud2\" opcode to be an invalid instruction:\n        __asm__(\"ud2\");\n      }(),\n      \"backtrace.*Illegal\");\n}\n#endif\n\nTEST(SignalsDeathTest, AbortDeathTest) {\n  SignalAction actions;\n  EXPECT_DEATH([]() -> void { abort(); }(), \"backtrace.*Abort(ed)?\");\n}\n\nTEST(SignalsDeathTest, RestoredPreviousHandlerDeathTest) {\n  SignalAction action;\n  {\n    SignalAction inner_action;\n    // Test case for a previously encountered misfeature:\n    // We should restore the previous SignalAction when the inner action\n    // goes out of scope, NOT the default.\n  }\n  // Outer SignalAction should be active again:\n  EXPECT_DEATH([]() -> void { abort(); }(), \"backtrace.*Abort(ed)?\");\n}\n\n#endif\n\nTEST(SignalsDeathTest, IllegalStackAccessDeathTest) {\n  SignalAction actions;\n  EXPECT_DEATH(actions.tryEvilAccessForTest(false), \"\");\n  EXPECT_DEATH(actions.tryEvilAccessForTest(true), \"\");\n}\n\nTEST(Signals, LegalTest) {\n  // Don't do anything wrong.\n  { SignalAction actions; }\n  // Nothing should happen...\n}\n\nTEST(Signals, RaiseNonFatalTest) {\n  {\n    SignalAction actions;\n    // I urgently request that you do nothing please!\n    raise(SIGURG);\n  }\n  // Nothing should happen...\n}\n\nTEST(Signals, LegalStackAccessTest) {\n  SignalAction actions;\n  actions.doGoodAccessForTest();\n}\n\nTEST(Signals, HandlerTest) {\n  siginfo_t fake_si;\n  fake_si.si_addr = nullptr;\n  SignalAction::sigHandler(SIGURG, &fake_si, nullptr);\n}\n\nTEST(FatalErrorHandler, CallHandler) {\n  // Reserve space in advance so that the handler doesn't allocate memory.\n  std::string s;\n  s.reserve(1024);\n  std::ostringstream os(std::move(s));\n\n  TestFatalErrorHandler handler;\n  FatalErrorHandler::registerFatalErrorHandler(handler);\n\n  FatalErrorHandler::callFatalErrorHandlers(os);\n  EXPECT_EQ(os.str(), \"HERE!\");\n\n  // callFatalErrorHandlers() will unregister the handler, so this isn't\n  // necessary for cleanup. Call it anyway, to simulate the case when one thread\n  // tries to remove the handler while another thread crashes.\n  FatalErrorHandler::removeFatalErrorHandler(handler);\n}\n\n// Use this specialized test handler instead of a mock, because fatal error\n// handlers must be signal-safe and a mock might allocate memory.\nclass MemoryCheckingFatalErrorHandler : public FatalErrorHandlerInterface {\npublic:\n  MemoryCheckingFatalErrorHandler(const Stats::TestUtil::MemoryTest& memory_test,\n                                  uint64_t& allocated_after_call)\n      : memory_test_(memory_test), allocated_after_call_(allocated_after_call) {}\n  void onFatalError(std::ostream& os) const override {\n    UNREFERENCED_PARAMETER(os);\n    allocated_after_call_ = memory_test_.consumedBytes();\n  }\n\nprivate:\n  const Stats::TestUtil::MemoryTest& memory_test_;\n  uint64_t& allocated_after_call_;\n};\n\n// FatalErrorHandler::callFatalErrorHandlers shouldn't allocate any heap memory,\n// so that it's safe to call from a signal handler. Test by comparing the\n// allocated memory before a call with the allocated memory during a handler.\nTEST(FatalErrorHandler, DontAllocateMemory) {\n  // Reserve space in advance so that the handler doesn't allocate memory.\n  std::string s;\n  s.reserve(1024);\n  std::ostringstream os(std::move(s));\n\n  Stats::TestUtil::MemoryTest memory_test;\n\n  uint64_t allocated_after_call;\n  MemoryCheckingFatalErrorHandler handler(memory_test, allocated_after_call);\n  FatalErrorHandler::registerFatalErrorHandler(handler);\n\n  uint64_t allocated_before_call = memory_test.consumedBytes();\n  FatalErrorHandler::callFatalErrorHandlers(os);\n\n  EXPECT_MEMORY_EQ(allocated_after_call, allocated_before_call);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/singleton/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"manager_impl_test\",\n    srcs = [\"manager_impl_test.cc\"],\n    deps = [\n        \"//source/common/singleton:manager_impl_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"threadsafe_singleton_test\",\n    srcs = [\"threadsafe_singleton_test.cc\"],\n    deps = [\n        \"//source/common/common:thread_lib\",\n        \"//source/common/singleton:threadsafe_singleton\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/singleton/manager_impl_test.cc",
    "content": "#include \"envoy/registry/registry.h\"\n\n#include \"common/singleton/manager_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Singleton {\nnamespace {\n\n// Must be a dedicated function so that TID is within the death test.\nstatic void deathTestWorker() {\n  ManagerImpl manager(Thread::threadFactoryForTest());\n\n  manager.get(\"foo\", [] { return nullptr; });\n}\n\nTEST(SingletonManagerImplDeathTest, NotRegistered) {\n  EXPECT_DEATH(deathTestWorker(), \"invalid singleton name 'foo'. Make sure it is registered.\");\n}\n\nSINGLETON_MANAGER_REGISTRATION(test);\n\nclass TestSingleton : public Instance {\npublic:\n  ~TestSingleton() override { onDestroy(); }\n\n  MOCK_METHOD(void, onDestroy, ());\n};\n\nTEST(SingletonManagerImplTest, Basic) {\n  ManagerImpl manager(Thread::threadFactoryForTest());\n\n  std::shared_ptr<TestSingleton> singleton = std::make_shared<TestSingleton>();\n  EXPECT_EQ(singleton, manager.get(\"test_singleton\", [singleton] { return singleton; }));\n  EXPECT_EQ(1UL, singleton.use_count());\n  EXPECT_EQ(singleton, manager.get(\"test_singleton\", [] { return nullptr; }));\n\n  EXPECT_CALL(*singleton, onDestroy());\n  singleton.reset();\n}\n\n} // namespace\n} // namespace Singleton\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/singleton/threadsafe_singleton_test.cc",
    "content": "#include <memory>\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n#include \"common/singleton/threadsafe_singleton.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass TestSingleton {\npublic:\n  virtual ~TestSingleton() = default;\n\n  virtual void addOne() {\n    Thread::LockGuard lock(lock_);\n    ++value_;\n  }\n\n  virtual int value() {\n    Thread::LockGuard lock(lock_);\n    return value_;\n  }\n\nprotected:\n  Thread::MutexBasicLockable lock_;\n  int value_{0};\n};\n\nclass EvilMathSingleton : public TestSingleton {\npublic:\n  EvilMathSingleton() { value_ = -50; }\n  void addOne() override {\n    Thread::LockGuard lock(lock_);\n    ++value_;\n    ++value_;\n  }\n};\n\nclass AddTen {\npublic:\n  AddTen() {\n    thread_ = Thread::threadFactoryForTest().createThread([this]() -> void { threadRoutine(); });\n  }\n  ~AddTen() {\n    thread_->join();\n    thread_.reset();\n  }\n\nprivate:\n  void threadRoutine() {\n    auto& singleton = ThreadSafeSingleton<TestSingleton>::get();\n    for (int i = 0; i < 10; ++i) {\n      singleton.addOne();\n    }\n  }\n  Thread::ThreadPtr thread_;\n};\n\nTEST(ThreadSafeSingleton, BasicCreationAndMutation) {\n  auto& singleton = ThreadSafeSingleton<TestSingleton>::get();\n  EXPECT_EQ(&singleton, &ThreadSafeSingleton<TestSingleton>::get());\n  EXPECT_EQ(0, singleton.value());\n  singleton.addOne();\n  EXPECT_EQ(1, singleton.value());\n\n  {\n    AddTen ten;\n    AddTen twenty;\n    AddTen thirty;\n  }\n  EXPECT_EQ(31, singleton.value());\n}\n\nTEST(ThreadSafeSingleton, Injection) {\n  EvilMathSingleton evil_singleton;\n\n  // Sanity check that other tests didn't cause the main singleton to overflow.\n  int latched_value = ThreadSafeSingleton<TestSingleton>::get().value();\n  ASSERT_GE(latched_value, 0);\n\n  {\n    TestThreadsafeSingletonInjector<TestSingleton> override(&evil_singleton);\n    auto& evil_math_reference = ThreadSafeSingleton<TestSingleton>::get();\n    EXPECT_NE(latched_value, evil_math_reference.value());\n    EXPECT_EQ(-50, evil_math_reference.value());\n    evil_math_reference.addOne();\n    EXPECT_EQ(-48, evil_math_reference.value());\n  }\n  EXPECT_EQ(latched_value, ThreadSafeSingleton<TestSingleton>::get().value());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_binary\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"allocator_impl_test\",\n    srcs = [\"allocator_impl_test.cc\"],\n    deps = [\n        \"//source/common/stats:allocator_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:thread_factory_for_test_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"isolated_store_impl_test\",\n    srcs = [\"isolated_store_impl_test.cc\"],\n    deps = [\n        \"//source/common/stats:isolated_store_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"histogram_impl_test\",\n    srcs = [\"histogram_impl_test.cc\"],\n    deps = [\n        \"//source/common/stats:histogram_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"metric_impl_test\",\n    srcs = [\"metric_impl_test.cc\"],\n    deps = [\n        \"//source/common/stats:allocator_lib\",\n        \"//source/common/stats:utility_lib\",\n        \"//test/test_common:logging_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"recent_lookups_test\",\n    srcs = [\"recent_lookups_test.cc\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/stats:recent_lookups_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test_binary(\n    name = \"recent_lookups_speed_test\",\n    srcs = [\"recent_lookups_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/common:random_generator_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/stats:recent_lookups_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"stat_merger_test\",\n    srcs = [\"stat_merger_test.cc\"],\n    deps = [\n        \":stat_test_utility_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stat_merger_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"stat_test_utility_lib\",\n    srcs = [\"stat_test_utility.cc\"],\n    hdrs = [\"stat_test_utility.h\"],\n    external_deps = [\n        \"abseil_strings\",\n    ],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"stat_test_utility_test\",\n    srcs = [\"stat_test_utility_test.cc\"],\n    deps = [\n        \":stat_test_utility_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"stats_matcher_impl_test\",\n    srcs = [\"stats_matcher_impl_test.cc\"],\n    deps = [\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/stats:stats_matcher_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"refcount_ptr_test\",\n    srcs = [\"refcount_ptr_test.cc\"],\n    deps = [\"//include/envoy/stats:refcount_ptr_interface\"],\n)\n\nenvoy_cc_test(\n    name = \"symbol_table_impl_test\",\n    srcs = [\"symbol_table_impl_test.cc\"],\n    external_deps = [\"abseil_hash_testing\"],\n    deps = [\n        \":stat_test_utility_lib\",\n        \"//source/common/common:mutex_tracer_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"stat_merger_fuzz_test\",\n    srcs = [\"stat_merger_fuzz_test.cc\"],\n    corpus = \"stat_merger_corpus\",\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"symbol_table_fuzz_test\",\n    srcs = [\"symbol_table_fuzz_test.cc\"],\n    corpus = \"symbol_table_corpus\",\n    deps = [\n        \":stat_test_utility_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"utility_fuzz_test\",\n    srcs = [\"utility_fuzz_test.cc\"],\n    corpus = \"utility_corpus\",\n    deps = [\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_binary(\n    name = \"symbol_table_speed_test\",\n    srcs = [\n        \"make_elements_helper.cc\",\n        \"make_elements_helper.h\",\n        \"symbol_table_speed_test.cc\",\n    ],\n    external_deps = [\n        \"abseil_strings\",\n        \"benchmark\",\n    ],\n    deps = [\n        \":stat_test_utility_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:utility_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"tag_extractor_impl_test\",\n    srcs = [\"tag_extractor_impl_test.cc\"],\n    deps = [\n        \"//source/common/stats:tag_extractor_lib\",\n        \"//source/common/stats:tag_producer_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"tag_producer_impl_test\",\n    srcs = [\"tag_producer_impl_test.cc\"],\n    deps = [\n        \"//source/common/stats:tag_producer_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"thread_local_store_test\",\n    srcs = [\"thread_local_store_test.cc\"],\n    deps = [\n        \":stat_test_utility_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/stats:stats_matcher_lib\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"thread_local_store_speed_test\",\n    srcs = [\"thread_local_store_speed_test.cc\"],\n    external_deps = [\n        \"abseil_strings\",\n        \"benchmark\",\n    ],\n    deps = [\n        \":stat_test_utility_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"thread_local_store_speed_test_benchmark_test\",\n    benchmark_binary = \"thread_local_store_speed_test\",\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/common/stats:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/stats/allocator_impl_test.cc",
    "content": "#include <string>\n\n#include \"common/stats/allocator_impl.h\"\n\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/thread_factory_for_test.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace {\n\nclass AllocatorImplTest : public testing::Test {\nprotected:\n  AllocatorImplTest() : alloc_(symbol_table_), pool_(symbol_table_) {}\n  ~AllocatorImplTest() override { clearStorage(); }\n\n  StatNameStorage makeStatStorage(absl::string_view name) {\n    return StatNameStorage(name, symbol_table_);\n  }\n\n  StatName makeStat(absl::string_view name) { return pool_.add(name); }\n\n  void clearStorage() {\n    pool_.clear();\n    EXPECT_EQ(0, symbol_table_.numSymbols());\n  }\n\n  SymbolTableImpl symbol_table_;\n  AllocatorImpl alloc_;\n  StatNamePool pool_;\n};\n\n// Allocate 2 counters of the same name, and you'll get the same object.\nTEST_F(AllocatorImplTest, CountersWithSameName) {\n  StatName counter_name = makeStat(\"counter.name\");\n  CounterSharedPtr c1 = alloc_.makeCounter(counter_name, StatName(), {});\n  EXPECT_EQ(1, c1->use_count());\n  CounterSharedPtr c2 = alloc_.makeCounter(counter_name, StatName(), {});\n  EXPECT_EQ(2, c1->use_count());\n  EXPECT_EQ(2, c2->use_count());\n  EXPECT_EQ(c1.get(), c2.get());\n  EXPECT_FALSE(c1->used());\n  EXPECT_FALSE(c2->used());\n  c1->inc();\n  EXPECT_TRUE(c1->used());\n  EXPECT_TRUE(c2->used());\n  c2->inc();\n  EXPECT_EQ(2, c1->value());\n  EXPECT_EQ(2, c2->value());\n}\n\nTEST_F(AllocatorImplTest, GaugesWithSameName) {\n  StatName gauge_name = makeStat(\"gauges.name\");\n  GaugeSharedPtr g1 = alloc_.makeGauge(gauge_name, StatName(), {}, Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(1, g1->use_count());\n  GaugeSharedPtr g2 = alloc_.makeGauge(gauge_name, StatName(), {}, Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(2, g1->use_count());\n  EXPECT_EQ(2, g2->use_count());\n  EXPECT_EQ(g1.get(), g2.get());\n  EXPECT_FALSE(g1->used());\n  EXPECT_FALSE(g2->used());\n  g1->inc();\n  EXPECT_TRUE(g1->used());\n  EXPECT_TRUE(g2->used());\n  EXPECT_EQ(1, g1->value());\n  EXPECT_EQ(1, g2->value());\n  g2->dec();\n  EXPECT_EQ(0, g1->value());\n  EXPECT_EQ(0, g2->value());\n}\n\n// Test for a race-condition where we may decrement the ref-count of a stat to\n// zero at the same time as we are allocating another instance of that\n// stat. This test reproduces that race organically by having a 12 threads each\n// iterate 10k times.\nTEST_F(AllocatorImplTest, RefCountDecAllocRaceOrganic) {\n  StatName counter_name = makeStat(\"counter.name\");\n  StatName gauge_name = makeStat(\"gauge.name\");\n  Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest();\n\n  const uint32_t num_threads = 12;\n  const uint32_t iters = 10000;\n  std::vector<Thread::ThreadPtr> threads;\n  absl::Notification go;\n  for (uint32_t i = 0; i < num_threads; ++i) {\n    threads.push_back(thread_factory.createThread([&]() {\n      go.WaitForNotification();\n      for (uint32_t i = 0; i < iters; ++i) {\n        alloc_.makeCounter(counter_name, StatName(), {});\n        alloc_.makeGauge(gauge_name, StatName(), {}, Gauge::ImportMode::NeverImport);\n      }\n    }));\n  }\n  go.Notify();\n  for (uint32_t i = 0; i < num_threads; ++i) {\n    threads[i]->join();\n  }\n}\n\n// Tests the same scenario as RefCountDecAllocRaceOrganic, but using just two\n// threads and the ThreadSynchronizer, in one iteration. Note that if the code\n// has the bug in it, this test fails fast as expected. However, if the bug is\n// fixed, the allocator's mutex will cause the second thread to block in\n// makeCounter() until the first thread finishes destructing the object. Thus\n// the test gives thread2 5 seconds to complete before releasing thread 1 to\n// complete its destruction of the counter.\nTEST_F(AllocatorImplTest, RefCountDecAllocRaceSynchronized) {\n  StatName counter_name = makeStat(\"counter.name\");\n  Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest();\n  alloc_.sync().enable();\n  alloc_.sync().waitOn(AllocatorImpl::DecrementToZeroSyncPoint);\n  Thread::ThreadPtr thread = thread_factory.createThread([&]() {\n    CounterSharedPtr counter = alloc_.makeCounter(counter_name, StatName(), {});\n    counter->inc();\n    counter->reset(); // Blocks in thread synchronizer waiting on DecrementToZeroSyncPoint\n  });\n\n  alloc_.sync().barrierOn(AllocatorImpl::DecrementToZeroSyncPoint);\n  EXPECT_TRUE(alloc_.isMutexLockedForTest());\n  alloc_.sync().signal(AllocatorImpl::DecrementToZeroSyncPoint);\n  thread->join();\n  EXPECT_FALSE(alloc_.isMutexLockedForTest());\n}\n\n} // namespace\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/histogram_impl_test.cc",
    "content": "#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/stats/histogram_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass HistogramSettingsImplTest : public testing::Test {\npublic:\n  void initialize() {\n    envoy::config::metrics::v3::StatsConfig config;\n    auto& bucket_settings = *config.mutable_histogram_bucket_settings();\n    for (auto& item : buckets_configs_) {\n      bucket_settings.Add(std::move(item));\n    }\n    settings_ = std::make_unique<HistogramSettingsImpl>(config);\n  }\n\n  std::vector<envoy::config::metrics::v3::HistogramBucketSettings> buckets_configs_;\n  std::unique_ptr<HistogramSettingsImpl> settings_;\n};\n\n// Test that a matching stat returns the configured buckets, and a non-matching\n// stat returns the defaults.\nTEST_F(HistogramSettingsImplTest, Basic) {\n  envoy::config::metrics::v3::HistogramBucketSettings setting;\n  setting.mutable_match()->set_prefix(\"a\");\n  setting.mutable_buckets()->Add(0.1);\n  setting.mutable_buckets()->Add(2);\n  buckets_configs_.push_back(setting);\n\n  initialize();\n  EXPECT_EQ(settings_->buckets(\"test\"), settings_->defaultBuckets());\n  EXPECT_EQ(settings_->buckets(\"abcd\"), ConstSupportedBuckets({0.1, 2}));\n}\n\n// Test that buckets are correctly sorted.\nTEST_F(HistogramSettingsImplTest, Sorted) {\n  envoy::config::metrics::v3::HistogramBucketSettings setting;\n  setting.mutable_match()->set_exact(\"a\");\n  setting.mutable_buckets()->Add(0.1);\n  setting.mutable_buckets()->Add(2);\n  setting.mutable_buckets()->Add(1); // Out-of-order\n  buckets_configs_.push_back(setting);\n\n  initialize();\n  EXPECT_EQ(settings_->buckets(\"a\"), ConstSupportedBuckets({0.1, 1, 2}));\n}\n\n// Test that only matching configurations are applied.\nTEST_F(HistogramSettingsImplTest, Matching) {\n  {\n    envoy::config::metrics::v3::HistogramBucketSettings setting;\n    setting.mutable_match()->set_prefix(\"a\");\n    setting.mutable_buckets()->Add(1);\n    setting.mutable_buckets()->Add(2);\n    buckets_configs_.push_back(setting);\n  }\n\n  {\n    envoy::config::metrics::v3::HistogramBucketSettings setting;\n    setting.mutable_match()->set_prefix(\"b\");\n    setting.mutable_buckets()->Add(3);\n    setting.mutable_buckets()->Add(4);\n    buckets_configs_.push_back(setting);\n  }\n\n  initialize();\n  EXPECT_EQ(settings_->buckets(\"abcd\"), ConstSupportedBuckets({1, 2}));\n  EXPECT_EQ(settings_->buckets(\"bcde\"), ConstSupportedBuckets({3, 4}));\n}\n\n// Test that earlier configs take precedence over later configs when both match.\nTEST_F(HistogramSettingsImplTest, Priority) {\n  {\n    envoy::config::metrics::v3::HistogramBucketSettings setting;\n    setting.mutable_match()->set_prefix(\"a\");\n    setting.mutable_buckets()->Add(1);\n    setting.mutable_buckets()->Add(2);\n    buckets_configs_.push_back(setting);\n  }\n\n  {\n    envoy::config::metrics::v3::HistogramBucketSettings setting;\n    setting.mutable_match()->set_prefix(\"ab\");\n    setting.mutable_buckets()->Add(3);\n    setting.mutable_buckets()->Add(4);\n  }\n\n  initialize();\n  EXPECT_EQ(settings_->buckets(\"abcd\"), ConstSupportedBuckets({1, 2}));\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/isolated_store_impl_test.cc",
    "content": "#include <string>\n\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stats/null_counter.h\"\n#include \"common/stats/null_gauge.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass StatsIsolatedStoreImplTest : public testing::Test {\nprotected:\n  StatsIsolatedStoreImplTest()\n      : store_(std::make_unique<IsolatedStoreImpl>(symbol_table_)), pool_(symbol_table_) {}\n  ~StatsIsolatedStoreImplTest() override {\n    pool_.clear();\n    store_.reset();\n    EXPECT_EQ(0, symbol_table_.numSymbols());\n  }\n\n  StatName makeStatName(absl::string_view name) { return pool_.add(name); }\n\n  SymbolTableImpl symbol_table_;\n  std::unique_ptr<IsolatedStoreImpl> store_;\n  StatNamePool pool_;\n};\n\nTEST_F(StatsIsolatedStoreImplTest, All) {\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  Counter& c1 = store_->counterFromString(\"c1\");\n  Counter& c2 = scope1->counterFromString(\"c2\");\n  EXPECT_EQ(\"c1\", c1.name());\n  EXPECT_EQ(\"scope1.c2\", c2.name());\n  EXPECT_EQ(\"c1\", c1.tagExtractedName());\n  EXPECT_EQ(\"scope1.c2\", c2.tagExtractedName());\n  EXPECT_EQ(0, c1.tags().size());\n  EXPECT_EQ(0, c1.tags().size());\n  CounterOptConstRef opt_counter = scope1->findCounter(c2.statName());\n  ASSERT_TRUE(opt_counter);\n  EXPECT_EQ(&c2, &opt_counter->get());\n  StatName not_found = pool_.add(\"not_found\");\n  EXPECT_FALSE(scope1->findCounter(not_found));\n\n  StatNameManagedStorage c1_name(\"c1\", store_->symbolTable());\n  c1.add(100);\n  auto found_counter = store_->findCounter(c1_name.statName());\n  ASSERT_TRUE(found_counter.has_value());\n  EXPECT_EQ(&c1, &found_counter->get());\n  EXPECT_EQ(100, found_counter->get().value());\n  c1.add(100);\n  EXPECT_EQ(200, found_counter->get().value());\n\n  Gauge& g1 = store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate);\n  Gauge& g2 = scope1->gaugeFromString(\"g2\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(\"g1\", g1.name());\n  EXPECT_EQ(\"scope1.g2\", g2.name());\n  EXPECT_EQ(\"g1\", g1.tagExtractedName());\n  EXPECT_EQ(\"scope1.g2\", g2.tagExtractedName());\n  EXPECT_EQ(0, g1.tags().size());\n  EXPECT_EQ(0, g2.tags().size());\n  GaugeOptConstRef opt_gauge = scope1->findGauge(g2.statName());\n  ASSERT_TRUE(opt_gauge);\n  EXPECT_EQ(&g2, &opt_gauge->get());\n  EXPECT_FALSE(scope1->findGauge(not_found));\n  // TODO(jmarantz): There may be a bug with\n  // scope1->findGauge(h1.statName()), which finds the histogram added to\n  // the store, which is arguably not in the scope. Investigate what the\n  // behavior should be.\n\n  StatNameManagedStorage g1_name(\"g1\", store_->symbolTable());\n  g1.set(100);\n  auto found_gauge = store_->findGauge(g1_name.statName());\n  ASSERT_TRUE(found_gauge.has_value());\n  EXPECT_EQ(&g1, &found_gauge->get());\n  EXPECT_EQ(100, found_gauge->get().value());\n  g1.set(0);\n  EXPECT_EQ(0, found_gauge->get().value());\n\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_TRUE(h1.used()); // hardcoded in impl to be true always.\n  EXPECT_TRUE(h1.use_count() == 1);\n  Histogram& h2 = scope1->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n  scope1->deliverHistogramToSinks(h2, 0);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"scope1.h2\", h2.name());\n  EXPECT_EQ(\"h1\", h1.tagExtractedName());\n  EXPECT_EQ(\"scope1.h2\", h2.tagExtractedName());\n  EXPECT_EQ(0, h1.tags().size());\n  EXPECT_EQ(0, h2.tags().size());\n  h1.recordValue(200);\n  h2.recordValue(200);\n  HistogramOptConstRef opt_histogram = scope1->findHistogram(h2.statName());\n  ASSERT_TRUE(opt_histogram);\n  EXPECT_EQ(&h2, &opt_histogram->get());\n  EXPECT_FALSE(scope1->findHistogram(not_found));\n  // TODO(jmarantz): There may be a bug with\n  // scope1->findHistogram(h1.statName()), which finds the histogram added to\n  // the store, which is arguably not in the scope. Investigate what the\n  // behavior should be.\n\n  StatNameManagedStorage h1_name(\"h1\", store_->symbolTable());\n  auto found_histogram = store_->findHistogram(h1_name.statName());\n  ASSERT_TRUE(found_histogram.has_value());\n  EXPECT_EQ(&h1, &found_histogram->get());\n\n  ScopePtr scope2 = scope1->createScope(\"foo.\");\n  EXPECT_EQ(\"scope1.foo.bar\", scope2->counterFromString(\"bar\").name());\n\n  // Validate that we sanitize away bad characters in the stats prefix.\n  ScopePtr scope3 = scope1->createScope(std::string(\"foo:\\0:.\", 7));\n  EXPECT_EQ(\"scope1.foo___.bar\", scope3->counterFromString(\"bar\").name());\n\n  EXPECT_EQ(4UL, store_->counters().size());\n  EXPECT_EQ(2UL, store_->gauges().size());\n\n  StatNameManagedStorage nonexistent_name(\"nonexistent\", store_->symbolTable());\n  EXPECT_EQ(store_->findCounter(nonexistent_name.statName()), absl::nullopt);\n  EXPECT_EQ(store_->findGauge(nonexistent_name.statName()), absl::nullopt);\n  EXPECT_EQ(store_->findHistogram(nonexistent_name.statName()), absl::nullopt);\n}\n\nTEST_F(StatsIsolatedStoreImplTest, PrefixIsStatName) {\n  ScopePtr scope1 = store_->createScope(\"scope1\");\n  ScopePtr scope2 = scope1->createScope(\"scope2\");\n  Counter& c1 = scope2->counterFromString(\"c1\");\n  EXPECT_EQ(\"scope1.scope2.c1\", c1.name());\n}\n\nTEST_F(StatsIsolatedStoreImplTest, AllWithSymbolTable) {\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  Counter& c1 = store_->counterFromStatName(makeStatName(\"c1\"));\n  Counter& c2 = scope1->counterFromStatName(makeStatName(\"c2\"));\n  EXPECT_EQ(\"c1\", c1.name());\n  EXPECT_EQ(\"scope1.c2\", c2.name());\n  EXPECT_EQ(\"c1\", c1.tagExtractedName());\n  EXPECT_EQ(\"scope1.c2\", c2.tagExtractedName());\n  EXPECT_EQ(0, c1.tags().size());\n  EXPECT_EQ(0, c1.tags().size());\n\n  Gauge& g1 = store_->gaugeFromStatName(makeStatName(\"g1\"), Gauge::ImportMode::Accumulate);\n  Gauge& g2 = scope1->gaugeFromStatName(makeStatName(\"g2\"), Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(\"g1\", g1.name());\n  EXPECT_EQ(\"scope1.g2\", g2.name());\n  EXPECT_EQ(\"g1\", g1.tagExtractedName());\n  EXPECT_EQ(\"scope1.g2\", g2.tagExtractedName());\n  EXPECT_EQ(0, g1.tags().size());\n  EXPECT_EQ(0, g2.tags().size());\n\n  TextReadout& b1 = store_->textReadoutFromStatName(makeStatName(\"b1\"));\n  TextReadout& b2 = scope1->textReadoutFromStatName(makeStatName(\"b2\"));\n  EXPECT_NE(&b1, &b2);\n  EXPECT_EQ(\"b1\", b1.name());\n  EXPECT_EQ(\"scope1.b2\", b2.name());\n  EXPECT_EQ(\"b1\", b1.tagExtractedName());\n  EXPECT_EQ(\"scope1.b2\", b2.tagExtractedName());\n  EXPECT_EQ(0, b1.tags().size());\n  EXPECT_EQ(0, b2.tags().size());\n  Histogram& h1 =\n      store_->histogramFromStatName(makeStatName(\"h1\"), Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 =\n      scope1->histogramFromStatName(makeStatName(\"h2\"), Stats::Histogram::Unit::Unspecified);\n  scope1->deliverHistogramToSinks(h2, 0);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"scope1.h2\", h2.name());\n  EXPECT_EQ(\"h1\", h1.tagExtractedName());\n  EXPECT_EQ(\"scope1.h2\", h2.tagExtractedName());\n  EXPECT_EQ(0, h1.tags().size());\n  EXPECT_EQ(0, h2.tags().size());\n  h1.recordValue(200);\n  h2.recordValue(200);\n\n  ScopePtr scope2 = scope1->createScope(\"foo.\");\n  EXPECT_EQ(\"scope1.foo.bar\", scope2->counterFromStatName(makeStatName(\"bar\")).name());\n\n  // Validate that we sanitize away bad characters in the stats prefix.\n  ScopePtr scope3 = scope1->createScope(std::string(\"foo:\\0:.\", 7));\n  EXPECT_EQ(\"scope1.foo___.bar\", scope3->counterFromString(\"bar\").name());\n\n  EXPECT_EQ(4UL, store_->counters().size());\n  EXPECT_EQ(2UL, store_->gauges().size());\n  EXPECT_EQ(2UL, store_->textReadouts().size());\n}\n\nTEST_F(StatsIsolatedStoreImplTest, ConstSymtabAccessor) {\n  ScopePtr scope = store_->createScope(\"scope.\");\n  const Scope& cscope = *scope;\n  const SymbolTable& const_symbol_table = cscope.constSymbolTable();\n  SymbolTable& symbol_table = scope->symbolTable();\n  EXPECT_EQ(&const_symbol_table, &symbol_table);\n}\n\nTEST_F(StatsIsolatedStoreImplTest, LongStatName) {\n  const std::string long_string(128, 'A');\n\n  ScopePtr scope = store_->createScope(\"scope.\");\n  Counter& counter = scope->counterFromString(long_string);\n  EXPECT_EQ(absl::StrCat(\"scope.\", long_string), counter.name());\n}\n\n/**\n * Test stats macros. @see stats_macros.h\n */\n#define ALL_TEST_STATS(COUNTER, GAUGE, HISTOGRAM, TEXT_READOUT)                                    \\\n  COUNTER(test_counter)                                                                            \\\n  GAUGE(test_gauge, Accumulate)                                                                    \\\n  HISTOGRAM(test_histogram, Microseconds)                                                          \\\n  TEXT_READOUT(test_text_readout)\n\nstruct TestStats {\n  ALL_TEST_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT,\n                 GENERATE_TEXT_READOUT_STRUCT)\n};\n\nTEST_F(StatsIsolatedStoreImplTest, StatsMacros) {\n  TestStats test_stats{ALL_TEST_STATS(\n      POOL_COUNTER_PREFIX(*store_, \"test.\"), POOL_GAUGE_PREFIX(*store_, \"test.\"),\n      POOL_HISTOGRAM_PREFIX(*store_, \"test.\"), POOL_TEXT_READOUT_PREFIX(*store_, \"test.\"))};\n\n  Counter& counter = test_stats.test_counter_;\n  EXPECT_EQ(\"test.test_counter\", counter.name());\n\n  Gauge& gauge = test_stats.test_gauge_;\n  EXPECT_EQ(\"test.test_gauge\", gauge.name());\n\n  TextReadout& textReadout = test_stats.test_text_readout_;\n  EXPECT_EQ(\"test.test_text_readout\", textReadout.name());\n\n  Histogram& histogram = test_stats.test_histogram_;\n  EXPECT_EQ(\"test.test_histogram\", histogram.name());\n  EXPECT_EQ(Histogram::Unit::Microseconds, histogram.unit());\n}\n\nTEST_F(StatsIsolatedStoreImplTest, NullImplCoverage) {\n  NullCounterImpl& c = store_->nullCounter();\n  c.inc();\n  EXPECT_EQ(0, c.value());\n  NullGaugeImpl& g = store_->nullGauge(\"\");\n  g.inc();\n  EXPECT_EQ(0, g.value());\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/make_elements_helper.cc",
    "content": "#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nElementVec makeElements(Element a, Element b, Element c, Element d, Element e) {\n  return ElementVec{a, b, c, d, e};\n}\n\nStatNameVec makeStatNames(StatName a, StatName b, StatName c, StatName d, StatName e) {\n  return StatNameVec{a, b, c, d, e};\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/make_elements_helper.h",
    "content": "#pragma once\n\n#include \"common/stats/utility.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\n// These two trivial functions are broken out into a separate compilation unit\n// to make sure the optimizer cannot hoist vector-creation out of the loop. They\n// simply create vectors based on their 5 inputs.\nElementVec makeElements(Element a, Element b, Element c, Element d, Element e);\nStatNameVec makeStatNames(StatName a, StatName b, StatName c, StatName d, StatName e);\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/metric_impl_test.cc",
    "content": "#include <string>\n\n#include \"common/stats/allocator_impl.h\"\n#include \"common/stats/utility.h\"\n\n#include \"test/test_common/logging.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace {\n\nclass MetricImplTest : public testing::Test {\nprotected:\n  MetricImplTest() : alloc_(symbol_table_), pool_(symbol_table_) {}\n  ~MetricImplTest() override { clearStorage(); }\n\n  StatName makeStat(absl::string_view name) { return pool_.add(name); }\n\n  void clearStorage() {\n    pool_.clear();\n    EXPECT_EQ(0, symbol_table_.numSymbols());\n  }\n\n  SymbolTableImpl symbol_table_;\n  AllocatorImpl alloc_;\n  StatNamePool pool_;\n};\n\n// No truncation occurs in the implementation of HeapStatData.\nTEST_F(MetricImplTest, NoTags) {\n  CounterSharedPtr counter = alloc_.makeCounter(makeStat(\"counter\"), StatName(), {});\n  EXPECT_EQ(0, counter->tags().size());\n}\n\nTEST_F(MetricImplTest, OneTag) {\n  CounterSharedPtr counter = alloc_.makeCounter(makeStat(\"counter.name.value\"), makeStat(\"counter\"),\n                                                {{makeStat(\"name\"), makeStat(\"value\")}});\n  TagVector tags = counter->tags();\n  ASSERT_EQ(1, tags.size());\n  EXPECT_EQ(\"name\", tags[0].name_);\n  EXPECT_EQ(\"value\", tags[0].value_);\n  EXPECT_EQ(\"counter.name.value\", counter->name());\n  EXPECT_EQ(\"counter\", counter->tagExtractedName());\n  EXPECT_EQ(makeStat(\"counter\"), counter->tagExtractedStatName());\n}\n\nTEST_F(MetricImplTest, TwoTagsIterOnce) {\n  CounterSharedPtr counter = alloc_.makeCounter(\n      makeStat(\"counter.name.value\"), makeStat(\"counter\"),\n      {{makeStat(\"name1\"), makeStat(\"value1\")}, {makeStat(\"name2\"), makeStat(\"value2\")}});\n  StatName name1 = makeStat(\"name1\");\n  StatName value1 = makeStat(\"value1\");\n  int count = 0;\n  counter->iterateTagStatNames([&name1, &value1, &count](StatName name, StatName value) -> bool {\n    EXPECT_EQ(name1, name);\n    EXPECT_EQ(value1, value);\n    ++count;\n    return false; // Abort the iteration at first tag.\n  });\n  EXPECT_EQ(1, count);\n}\n\nTEST_F(MetricImplTest, FindTag) {\n  CounterSharedPtr counter = alloc_.makeCounter(\n      makeStat(\"counter.name.value\"), makeStat(\"counter\"),\n      {{makeStat(\"name1\"), makeStat(\"value1\")}, {makeStat(\"name2\"), makeStat(\"value2\")}});\n  EXPECT_EQ(makeStat(\"value1\"), Utility::findTag(*counter, makeStat(\"name1\")));\n  EXPECT_EQ(makeStat(\"value2\"), Utility::findTag(*counter, makeStat(\"name2\")));\n  EXPECT_FALSE(Utility::findTag(*counter, makeStat(\"name3\")));\n}\n\n} // namespace\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/recent_lookups_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n//\n// NOLINT(namespace-envoy)\n//\n// Running bazel-bin/test/common/stats/recent_lookups_speed_test\n// Run on (12 X 4500 MHz CPU s)\n// CPU Caches:\n//   L1 Data 32K (x6)\n//   L1 Instruction 32K (x6)\n//   L2 Unified 1024K (x6)\n//   L3 Unified 8448K (x1)\n// Load Average: 1.32, 7.40, 10.21\n// ***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will\n// incur extra overhead.\n// -----------------------------------------------------------------\n// Benchmark                       Time             CPU   Iterations\n// -----------------------------------------------------------------\n// BM_LookupsMixed             87068 ns        87068 ns         6955\n// BM_LookupsNoEvictions       45662 ns        45662 ns        15329\n// BM_LookupsAllEvictions      83015 ns        83015 ns         8435\n\n#include \"common/common/random_generator.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stats/recent_lookups.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"benchmark/benchmark.h\"\n\nclass RecentLookupsSpeedTest {\npublic:\n  RecentLookupsSpeedTest(uint64_t lookup_variants, uint64_t capacity) {\n    recent_lookups_.setCapacity(capacity);\n    Envoy::Random::RandomGeneratorImpl random;\n    lookups_.reserve(lookup_variants);\n    for (size_t i = 0; i < lookup_variants; ++i) {\n      lookups_.push_back(absl::StrCat(\"lookup #\", random.random()));\n    }\n  }\n\n  void test(benchmark::State& state) {\n    for (auto _ : state) {\n      Envoy::Random::RandomGeneratorImpl random;\n      for (uint64_t i = 0; i < lookups_.size(); ++i) {\n        recent_lookups_.lookup(lookups_[random.random() % lookups_.size()]);\n      }\n    }\n  }\n\nprivate:\n  std::vector<std::string> lookups_;\n  Envoy::Stats::RecentLookups recent_lookups_;\n};\n\nstatic void BM_LookupsMixed(benchmark::State& state) {\n  RecentLookupsSpeedTest speed_test(1000, 500);\n  speed_test.test(state);\n}\nBENCHMARK(BM_LookupsMixed);\n\nstatic void BM_LookupsNoEvictions(benchmark::State& state) {\n  RecentLookupsSpeedTest speed_test(1000, 1000);\n  speed_test.test(state);\n}\nBENCHMARK(BM_LookupsNoEvictions);\n\nstatic void BM_LookupsAllEvictions(benchmark::State& state) {\n  RecentLookupsSpeedTest speed_test(1000, 10);\n  speed_test.test(state);\n}\nBENCHMARK(BM_LookupsAllEvictions);\n\nint main(int argc, char** argv) {\n  Envoy::Thread::MutexBasicLockable lock;\n  Envoy::Logger::Context logger_context(spdlog::level::warn,\n                                        Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false);\n  benchmark::Initialize(&argc, argv);\n\n  if (benchmark::ReportUnrecognizedArguments(argc, argv)) {\n    return 1;\n  }\n  benchmark::RunSpecifiedBenchmarks();\n}\n"
  },
  {
    "path": "test/common/stats/recent_lookups_test.cc",
    "content": "#include <algorithm>\n#include <string>\n\n#include \"common/common/utility.h\"\n#include \"common/stats/recent_lookups.h\"\n\n#include \"test/test_common/logging.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace {\n\nclass RecentLookupsTest : public testing::Test {\nprotected:\n  std::string joinLookups() {\n    using ItemCount = std::pair<std::string, uint64_t>;\n    std::vector<ItemCount> items;\n    recent_lookups_.forEach([&items](absl::string_view item, uint64_t count) {\n      items.emplace_back(ItemCount(std::string(item), count));\n    });\n    std::sort(items.begin(), items.end(), [](const ItemCount& a, const ItemCount& b) -> bool {\n      if (a.second == b.second) {\n        return a.first < b.first;\n      }\n      return a.second < b.second;\n    });\n    std::vector<std::string> accum;\n    accum.reserve(items.size());\n    for (const auto& item : items) {\n      accum.push_back(absl::StrCat(item.second, \": \", item.first));\n    }\n    return absl::StrJoin(accum, \" \");\n  }\n\n  RecentLookups recent_lookups_;\n};\n\nTEST_F(RecentLookupsTest, Empty) { EXPECT_EQ(\"\", joinLookups()); }\n\nTEST_F(RecentLookupsTest, One) {\n  recent_lookups_.lookup(\"Hello\");\n  EXPECT_EQ(\"\", joinLookups());\n  recent_lookups_.setCapacity(10);\n  EXPECT_EQ(1, recent_lookups_.total());\n  recent_lookups_.lookup(\"Hello\");\n  EXPECT_EQ(2, recent_lookups_.total());\n  EXPECT_EQ(\"1: Hello\", joinLookups());\n\n  recent_lookups_.clear();\n  EXPECT_EQ(\"\", joinLookups());\n  EXPECT_EQ(0, recent_lookups_.total());\n  recent_lookups_.lookup(\"Hello\");\n  EXPECT_EQ(1, recent_lookups_.total());\n  EXPECT_EQ(\"1: Hello\", joinLookups());\n  recent_lookups_.setCapacity(0);\n  EXPECT_EQ(\"\", joinLookups());\n  EXPECT_EQ(1, recent_lookups_.total());\n}\n\nTEST_F(RecentLookupsTest, DropOne) {\n  recent_lookups_.setCapacity(10);\n  for (int i = 0; i < 11; ++i) {\n    recent_lookups_.lookup(absl::StrCat(\"lookup\", i));\n  }\n  EXPECT_EQ(\"1: lookup1 \"\n            \"1: lookup10 \"\n            \"1: lookup2 \"\n            \"1: lookup3 \"\n            \"1: lookup4 \"\n            \"1: lookup5 \"\n            \"1: lookup6 \"\n            \"1: lookup7 \"\n            \"1: lookup8 \"\n            \"1: lookup9\",\n            joinLookups());\n  recent_lookups_.clear();\n  EXPECT_EQ(\"\", joinLookups());\n}\n\nTEST_F(RecentLookupsTest, RepeatDrop) {\n  recent_lookups_.setCapacity(10);\n  recent_lookups_.lookup(\"drop_early\");\n  for (int i = 0; i < 11; ++i) {\n    recent_lookups_.lookup(absl::StrCat(\"lookup\", i));\n    recent_lookups_.lookup(absl::StrCat(\"lookup\", i));\n  }\n  recent_lookups_.lookup(\"add_late\");\n  EXPECT_EQ(\"1: add_late \"\n            \"2: lookup10 \"\n            \"2: lookup2 \"\n            \"2: lookup3 \"\n            \"2: lookup4 \"\n            \"2: lookup5 \"\n            \"2: lookup6 \"\n            \"2: lookup7 \"\n            \"2: lookup8 \"\n            \"2: lookup9\",\n            joinLookups());\n  recent_lookups_.clear();\n  EXPECT_EQ(\"\", joinLookups());\n}\n\n} // namespace\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/refcount_ptr_test.cc",
    "content": "#include <string>\n\n#include \"envoy/stats/refcount_ptr.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass RefcountedString : public std::string, public RefcountHelper {\npublic:\n  explicit RefcountedString(const std::string& s) : std::string(s) {}\n};\nusing SharedString = RefcountPtr<RefcountedString>;\n\nclass DerivedRefcountedString : public RefcountedString {};\nusing DerivedSharedString = RefcountPtr<RefcountedString>;\n\nTEST(RefcountPtr, Constructors) {\n  SharedString rp1; // Default constructor.\n  EXPECT_FALSE(rp1);\n  rp1 = new RefcountedString(\"Hello\"); // Assign from pointer.\n  EXPECT_EQ(1, rp1.use_count());\n  SharedString rp2(rp1); // Copy-constructor.\n  EXPECT_EQ(2, rp1.use_count());\n  EXPECT_EQ(2, rp2.use_count());\n  EXPECT_EQ(rp1, rp2);\n  EXPECT_EQ(*rp1, *rp2);\n  *rp1 += \", World!\"; // Object is shared, so mutations are shared.\n  EXPECT_EQ(rp1, rp2);\n  EXPECT_EQ(*rp1, *rp2);\n  EXPECT_EQ(\"Hello, World!\", *rp2);\n  SharedString rp3(std::move(rp2)); // Move-constructor.\n  EXPECT_EQ(2, rp3.use_count());\n  EXPECT_EQ(\"Hello, World!\", *rp3);\n  EXPECT_NE(rp2, rp3);     // NOLINT -- intentionally testing what happens to a variable post-move.\n  EXPECT_EQ(nullptr, rp2); // NOLINT -- ditto\n  EXPECT_NE(rp1, rp2);     // NOLINT -- ditto\n  EXPECT_EQ(rp1, rp3);\n  EXPECT_FALSE(rp2); // NOLINT -- ditto\n  EXPECT_TRUE(rp3);\n  EXPECT_TRUE(rp1);\n  SharedString rp4(new RefcountedString(\"Hello, World!\")); // Construct from pointer.\n  EXPECT_EQ(*rp4, *rp3);\n  EXPECT_NE(rp4, rp3);\n  DerivedSharedString rp5(rp4); // Construct across hierarchies.\n  EXPECT_EQ(rp5, rp4);\n  EXPECT_EQ(*rp5, *rp4);\n  SharedString rp6;\n  rp6 = std::move(rp4);    // move-assign.\n  EXPECT_EQ(nullptr, rp4); // NOLINT -- intentionally testing what happens to a variable post-move.\n  EXPECT_EQ(rp5, rp6);\n}\n\nTEST(RefcountPtr, Operators) {\n  RefcountedString* ptr = new RefcountedString(\"Hello, World!\");\n  SharedString shared(ptr);\n  EXPECT_TRUE(shared);\n  EXPECT_EQ(13, shared->size());\n  RefcountedString& ref = *shared;\n  EXPECT_EQ(&ref, ptr);\n  SharedString shared2(new RefcountedString(\"Hello, World!\"));\n  EXPECT_NE(&ref, shared2.get());\n  SharedString shared3(shared2.get());\n  EXPECT_EQ(shared2, shared3);\n  EXPECT_EQ(2, shared2.use_count());\n  shared2.reset();\n  EXPECT_EQ(nullptr, shared2);\n  EXPECT_EQ(1, shared3.use_count());\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz",
    "content": "aVa.b"
  },
  {
    "path": "test/common/stats/stat_merger_corpus/example1",
    "content": "8098ausd0f8jaspodijf poa\n"
  },
  {
    "path": "test/common/stats/stat_merger_corpus/example2",
    "content": "-asdfoija 0psd89jf8098ausd0f8jaspodijf poa\n"
  },
  {
    "path": "test/common/stats/stat_merger_corpus/example3",
    "content": "-asdfoija 0psd89jf-09anuis-aspodijfd9fu 98098ausd0f8j poa\n"
  },
  {
    "path": "test/common/stats/stat_merger_corpus/example5",
    "content": "ไ-asdsdfoj pa098ausd0f8nuis-a1foj pa098ausd-asdsdfoj pa098ausd0f8nuis-a1foj pa098ausd0f214748364ʶ8󠀴nuis-a9.2233720368547󠀡75709j p"
  },
  {
    "path": "test/common/stats/stat_merger_fuzz_test.cc",
    "content": "#include <algorithm>\n\n#include \"common/stats/stat_merger.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n\n#include \"absl/strings/str_replace.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace Fuzz {\n\nvoid testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) {\n  StatNameDynamicPool dynamic_pool(symbol_table);\n  StatNamePool symbolic_pool(symbol_table);\n  StatNameVec stat_names;\n\n  // This local string is write-only; it's used to help when debugging\n  // a crash. If a crash is found, you can print the unit_test_encoding\n  // in the debugger and then add that as a test-case in stat_merger_text.cc,\n  // in StatMergerDynamicTest.DynamicsWithFakeSymbolTable and\n  // StatMergerDynamicTest.DynamicsWithRealSymbolTable.\n  std::string unit_test_encoding;\n\n  for (uint32_t index = 0; index < data.size();) {\n    // Select component lengths between 1 and 8 bytes inclusive, and ensure it\n    // doesn't overrun our buffer.\n    //\n    // TODO(#10008): We should remove the \"1 +\" below, so we can get empty\n    // segments, which trigger some inconsistent handling as described in that\n    // bug.\n    uint32_t num_bytes = (1 + data[index]) & 0x7;\n    num_bytes = std::min(static_cast<uint32_t>(data.size() - 1),\n                         num_bytes); // restrict number up to the size of data\n\n    // Carve out the segment and use the 4th bit from the control-byte to\n    // determine whether to treat this segment symbolic or not.\n    absl::string_view segment = data.substr(index, num_bytes);\n    bool is_symbolic = (data[index] & 0x8) == 0x0;\n    if (index != 0) {\n      unit_test_encoding += \".\";\n    }\n    index += num_bytes + 1;\n    if (is_symbolic) {\n      absl::StrAppend(&unit_test_encoding, segment);\n      stat_names.push_back(symbolic_pool.add(segment));\n    } else {\n      absl::StrAppend(&unit_test_encoding, \"D:\", absl::StrReplaceAll(segment, {{\".\", \",\"}}));\n      stat_names.push_back(dynamic_pool.add(segment));\n    }\n  }\n\n  SymbolTable::StoragePtr joined = symbol_table.join(stat_names);\n  StatName stat_name(joined.get());\n\n  StatMerger::DynamicContext dynamic_context(symbol_table);\n  std::string name = symbol_table.toString(stat_name);\n  StatMerger::DynamicsMap dynamic_map;\n  DynamicSpans spans = symbol_table.getDynamicSpans(stat_name);\n  if (!spans.empty()) {\n    dynamic_map[name] = spans;\n  }\n  StatName decoded = dynamic_context.makeDynamicStatName(name, dynamic_map);\n  FUZZ_ASSERT(name == symbol_table.toString(decoded));\n  FUZZ_ASSERT(stat_name == decoded);\n}\n\n// Fuzzer for symbol tables.\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  SymbolTableImpl symbol_table;\n\n  absl::string_view data(reinterpret_cast<const char*>(buf), len);\n  testDynamicEncoding(data, symbol_table);\n}\n\n} // namespace Fuzz\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/stat_merger_test.cc",
    "content": "#include <memory>\n\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stats/stat_merger.h\"\n#include \"common/stats/thread_local_store.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace {\n\nclass StatMergerTest : public testing::Test {\npublic:\n  StatMergerTest()\n      : stat_merger_(store_), whywassixafraidofseven_(store_.gaugeFromString(\n                                  \"whywassixafraidofseven\", Gauge::ImportMode::Accumulate)) {\n    whywassixafraidofseven_.set(678);\n  }\n\n  void mergeTest(const std::string& name, Gauge::ImportMode initial, Gauge::ImportMode merge) {\n    Gauge& g1 = store_.gaugeFromString(name, initial);\n    EXPECT_EQ(initial, g1.importMode()) << name;\n    g1.mergeImportMode(merge);\n    EXPECT_EQ(merge, g1.importMode()) << name;\n  }\n\n  void dynamicEncodeDecodeTest(absl::string_view input_name) {\n    SymbolTable& symbol_table = store_.symbolTable();\n\n    // Encode the input name into a joined StatName, using \"D:\" to indicate\n    // a dynamic component.\n    StatNameVec components;\n    StatNamePool symbolic_pool(symbol_table);\n    StatNameDynamicPool dynamic_pool(symbol_table);\n\n    for (absl::string_view segment : absl::StrSplit(input_name, '.')) {\n      if (absl::StartsWith(segment, \"D:\")) {\n        std::string hacked = absl::StrReplaceAll(segment.substr(2), {{\",\", \".\"}});\n        components.push_back(dynamic_pool.add(hacked));\n      } else {\n        components.push_back(symbolic_pool.add(segment));\n      }\n    }\n    SymbolTable::StoragePtr joined = symbol_table.join(components);\n    StatName stat_name(joined.get());\n\n    std::string name = symbol_table.toString(stat_name);\n    StatMerger::DynamicsMap dynamic_map;\n    dynamic_map[name] = symbol_table.getDynamicSpans(stat_name);\n    StatMerger::DynamicContext dynamic_context(symbol_table);\n    StatName decoded = dynamic_context.makeDynamicStatName(name, dynamic_map);\n    EXPECT_EQ(stat_name, decoded) << name;\n  }\n\n  IsolatedStoreImpl store_;\n  StatMerger stat_merger_;\n  Gauge& whywassixafraidofseven_;\n  Protobuf::Map<std::string, uint64_t> empty_counter_deltas_;\n  Protobuf::Map<std::string, uint64_t> empty_gauges_;\n};\n\nTEST_F(StatMergerTest, CounterMerge) {\n  // Child's value of the counter might already be non-zero by the first merge.\n  store_.counterFromString(\"draculaer\").inc();\n  EXPECT_EQ(1, store_.counterFromString(\"draculaer\").latch());\n\n  Protobuf::Map<std::string, uint64_t> counter_deltas;\n  counter_deltas[\"draculaer\"] = 1;\n  stat_merger_.mergeStats(counter_deltas, empty_gauges_);\n  // Initial combined value: 1+1.\n  EXPECT_EQ(2, store_.counterFromString(\"draculaer\").value());\n  EXPECT_EQ(1, store_.counterFromString(\"draculaer\").latch());\n\n  // The parent's counter increases by 1.\n  counter_deltas[\"draculaer\"] = 1;\n  stat_merger_.mergeStats(counter_deltas, empty_gauges_);\n  EXPECT_EQ(3, store_.counterFromString(\"draculaer\").value());\n  EXPECT_EQ(1, store_.counterFromString(\"draculaer\").latch());\n\n  // Our own counter increases by 4, while the parent's stays constant. Total increase of 4.\n  store_.counterFromString(\"draculaer\").add(4);\n  counter_deltas[\"draculaer\"] = 0;\n  stat_merger_.mergeStats(counter_deltas, empty_gauges_);\n  EXPECT_EQ(7, store_.counterFromString(\"draculaer\").value());\n  EXPECT_EQ(4, store_.counterFromString(\"draculaer\").latch());\n\n  // Our counter and the parent's counter both increase by 2, total increase of 4.\n  store_.counterFromString(\"draculaer\").add(2);\n  counter_deltas[\"draculaer\"] = 2;\n  stat_merger_.mergeStats(counter_deltas, empty_gauges_);\n  EXPECT_EQ(11, store_.counterFromString(\"draculaer\").value());\n  EXPECT_EQ(4, store_.counterFromString(\"draculaer\").latch());\n}\n\nTEST_F(StatMergerTest, BasicDefaultAccumulationImport) {\n  Protobuf::Map<std::string, uint64_t> gauges;\n  gauges[\"whywassixafraidofseven\"] = 111;\n  stat_merger_.mergeStats(empty_counter_deltas_, gauges);\n  EXPECT_EQ(789, whywassixafraidofseven_.value());\n}\n\nTEST_F(StatMergerTest, MultipleImportsWithAccumulationLogic) {\n  {\n    Protobuf::Map<std::string, uint64_t> gauges;\n    gauges[\"whywassixafraidofseven\"] = 100;\n    stat_merger_.mergeStats(empty_counter_deltas_, gauges);\n    // Initial combined values: 678+100 and 1+2.\n    EXPECT_EQ(778, whywassixafraidofseven_.value());\n  }\n  {\n    Protobuf::Map<std::string, uint64_t> gauges;\n    // The parent's gauge drops by 1, and its counter increases by 1.\n    gauges[\"whywassixafraidofseven\"] = 99;\n    stat_merger_.mergeStats(empty_counter_deltas_, gauges);\n    EXPECT_EQ(777, whywassixafraidofseven_.value());\n  }\n  {\n    Protobuf::Map<std::string, uint64_t> gauges;\n    // Our own gauge increases by 12, while the parent's stays constant. Total increase of 12.\n    // Our own counter increases by 4, while the parent's stays constant. Total increase of 4.\n    whywassixafraidofseven_.add(12);\n    stat_merger_.mergeStats(empty_counter_deltas_, gauges);\n    EXPECT_EQ(789, whywassixafraidofseven_.value());\n  }\n  {\n    Protobuf::Map<std::string, uint64_t> gauges;\n    // Our gauge decreases by 5, parent's increases by 5. Net zero change.\n    // Our counter and the parent's counter both increase by 1, total increase of 2.\n    whywassixafraidofseven_.sub(5);\n    gauges[\"whywassixafraidofseven\"] = 104;\n    stat_merger_.mergeStats(empty_counter_deltas_, gauges);\n    EXPECT_EQ(789, whywassixafraidofseven_.value());\n  }\n}\n\n// Stat names that have NoImport logic should leave the child gauge value alone upon import, even if\n// the child has that gauge undefined.\nTEST_F(StatMergerTest, ExclusionsNotImported) {\n  Gauge& some_sort_of_version =\n      store_.gaugeFromString(\"some.sort.of.version\", Gauge::ImportMode::NeverImport);\n  some_sort_of_version.set(12345);\n\n  Protobuf::Map<std::string, uint64_t> gauges;\n  gauges[\"some.sort.of.version\"] = 67890;\n  gauges[\"child.doesnt.have.this.version\"] = 111; // This should never be populated.\n\n  // Check defined values are not changed, and undefined remain undefined.\n  stat_merger_.mergeStats(empty_counter_deltas_, gauges);\n  EXPECT_EQ(12345, some_sort_of_version.value());\n  EXPECT_FALSE(\n      store_.gaugeFromString(\"child.doesnt.have.this.version\", Gauge::ImportMode::NeverImport)\n          .used());\n\n  // Check the \"undefined remains undefined\" behavior for a bunch of other names.\n  gauges[\"runtime.admin_overrides_active\"] = 111;\n  gauges[\"runtime.num_keys\"] = 111;\n  gauges[\"runtime.num_layers\"] = 111;\n  gauges[\"listener_manager.total_listeners_draining\"] = 111;\n  gauges[\"listener_manager.total_listeners_warming\"] = 111;\n  gauges[\"server.hot_restart_epoch\"] = 111;\n  gauges[\"server.live\"] = 1;\n  gauges[\"server.concurrency\"] = 1;\n  gauges[\"some.control_plane.connected_state\"] = 1;\n  gauges[\"cluster_manager.active_clusters\"] = 33;\n  gauges[\"cluster_manager.warming_clusters\"] = 33;\n  gauges[\"cluster.rds.membership_total\"] = 33;\n  gauges[\"cluster.rds.membership_healthy\"] = 33;\n  gauges[\"cluster.rds.membership_degraded\"] = 33;\n  gauges[\"cluster.rds.max_host_weight\"] = 33;\n  gauges[\"anything.total_principals\"] = 33;\n  gauges[\"listener_manager.total_listeners_active\"] = 33;\n  gauges[\"overload.something.pressure\"] = 33;\n\n  stat_merger_.mergeStats(empty_counter_deltas_, gauges);\n#define EXPECT_GAUGE_NOT_USED(name)                                                                \\\n  EXPECT_FALSE(store_.gaugeFromString(name, Gauge::ImportMode::NeverImport).used())\n\n  EXPECT_GAUGE_NOT_USED(\"child.doesnt.have.this.version\");\n  EXPECT_GAUGE_NOT_USED(\"runtime.admin_overrides_active\");\n  EXPECT_GAUGE_NOT_USED(\"runtime.num_keys\");\n  EXPECT_GAUGE_NOT_USED(\"runtime.num_layers\");\n  EXPECT_GAUGE_NOT_USED(\"listener_manager.total_listeners_draining\");\n  EXPECT_GAUGE_NOT_USED(\"listener_manager.total_listeners_warming\");\n  EXPECT_GAUGE_NOT_USED(\"server.hot_restart_epoch\");\n  EXPECT_GAUGE_NOT_USED(\"server.live\");\n  EXPECT_GAUGE_NOT_USED(\"server.concurrency\");\n  EXPECT_GAUGE_NOT_USED(\"some.control_plane.connected_state\");\n  EXPECT_GAUGE_NOT_USED(\"cluster_manager.active_clusters\");\n  EXPECT_GAUGE_NOT_USED(\"cluster_manager.warming_clusters\");\n  EXPECT_GAUGE_NOT_USED(\"cluster.rds.membership_total\");\n  EXPECT_GAUGE_NOT_USED(\"cluster.rds.membership_healthy\");\n  EXPECT_GAUGE_NOT_USED(\"cluster.rds.membership_degraded\");\n  EXPECT_GAUGE_NOT_USED(\"cluster.rds.max_host_weight\");\n  EXPECT_GAUGE_NOT_USED(\"anything.total_principals\");\n  EXPECT_GAUGE_NOT_USED(\"listener_manager.total_listeners_active\");\n  EXPECT_GAUGE_NOT_USED(\"overload.something.pressure\");\n#undef EXPECT_GAUGE_NOT_USED\n}\n\n// Targeted test of GaugeImpl::mergeImportMode().\nTEST_F(StatMergerTest, GaugeMergeImportMode) {\n  mergeTest(\"newgauge1\", Gauge::ImportMode::Accumulate, Gauge::ImportMode::Accumulate);\n  mergeTest(\"s1.version\", Gauge::ImportMode::NeverImport, Gauge::ImportMode::NeverImport);\n  mergeTest(\"newgauge2\", Gauge::ImportMode::Uninitialized, Gauge::ImportMode::Accumulate);\n  mergeTest(\"s2.version\", Gauge::ImportMode::Uninitialized, Gauge::ImportMode::NeverImport);\n}\n\nclass StatMergerDynamicTest : public testing::Test {\npublic:\n  void init(SymbolTablePtr&& symbol_table) { symbol_table_ = std::move(symbol_table); }\n\n  /**\n   * Test helper function takes an input_descriptor. And input_descriptor is\n   * mostly like the stringified StatName, but each segment that is prefixed by\n   * \"D:\" is dynamic, and within a segment, we map \",\" to \".\". The \"D:\" hack\n   * restricts the stat names we can test by making a prefix special. The \",\"\n   * hack does that too, allowing us to represent a single multi-segment dynamic\n   * token in the tests. These hacks were easy to implement (~ 3 lines of code)\n   * and provide a reasonably concise way to make a few test-cases.\n   *\n   * The test-helper ensures that a StatName created from a descriptor can\n   * be encoded into a DynamicsMap, and also decoded back into a StatName\n   * that compares as expected.\n   *\n   * @param a pattern describing a stat-name with dynamic and symbolic components.\n   * @return the number of elements in the dynamic map.\n   */\n  uint32_t dynamicEncodeDecodeTest(absl::string_view input_descriptor) {\n    // Encode the input name into a joined StatName, using \"D:\" to indicate\n    // a dynamic component.\n    StatNameVec components;\n    StatNamePool symbolic_pool(*symbol_table_);\n    StatNameDynamicPool dynamic_pool(*symbol_table_);\n\n    for (absl::string_view segment : absl::StrSplit(input_descriptor, '.')) {\n      if (absl::StartsWith(segment, \"D:\")) {\n        std::string hacked = absl::StrReplaceAll(segment.substr(2), {{\",\", \".\"}});\n        components.push_back(dynamic_pool.add(hacked));\n      } else {\n        components.push_back(symbolic_pool.add(segment));\n      }\n    }\n    StatName stat_name;\n    SymbolTable::StoragePtr joined;\n\n    if (components.size() == 1) {\n      stat_name = components[0];\n    } else {\n      joined = symbol_table_->join(components);\n      stat_name = StatName(joined.get());\n    }\n\n    std::string name = symbol_table_->toString(stat_name);\n    StatMerger::DynamicsMap dynamic_map;\n    DynamicSpans spans = symbol_table_->getDynamicSpans(stat_name);\n    uint32_t size = 0;\n    if (!spans.empty()) {\n      dynamic_map[name] = spans;\n      size = spans.size();\n    }\n    StatMerger::DynamicContext dynamic_context(*symbol_table_);\n    StatName decoded = dynamic_context.makeDynamicStatName(name, dynamic_map);\n    EXPECT_EQ(name, symbol_table_->toString(decoded)) << \"input=\" << input_descriptor;\n    EXPECT_TRUE(stat_name == decoded) << \"input=\" << input_descriptor << \", name=\" << name;\n\n    return size;\n  }\n\n  SymbolTablePtr symbol_table_;\n};\n\nTEST_F(StatMergerDynamicTest, DynamicsWithRealSymbolTable) {\n  init(std::make_unique<SymbolTableImpl>());\n\n  for (uint32_t i = 1; i < 256; ++i) {\n    char ch = static_cast<char>(i);\n    absl::string_view one_char(&ch, 1);\n    EXPECT_EQ(1, dynamicEncodeDecodeTest(absl::StrCat(\"D:\", one_char))) << \"dynamic=\" << one_char;\n    EXPECT_EQ(0, dynamicEncodeDecodeTest(one_char)) << \"symbolic=\" << one_char;\n  }\n  EXPECT_EQ(0, dynamicEncodeDecodeTest(\"normal\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"D:dynamic\"));\n  EXPECT_EQ(0, dynamicEncodeDecodeTest(\"hello.world\"));\n  EXPECT_EQ(0, dynamicEncodeDecodeTest(\"hello..world\"));\n  EXPECT_EQ(0, dynamicEncodeDecodeTest(\"hello...world\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"D:hello.world\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"hello.D:world\"));\n  EXPECT_EQ(2, dynamicEncodeDecodeTest(\"D:hello.D:world\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"D:hello,world\"));\n  EXPECT_EQ(4, dynamicEncodeDecodeTest(\"one.D:two.three.D:four.D:five.six.D:seven,eight.nine\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"D:one,two,three\"));\n  EXPECT_EQ(0, dynamicEncodeDecodeTest(\"hello..world\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"D:hello..world\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"hello..D:world\"));\n  EXPECT_EQ(2, dynamicEncodeDecodeTest(\"D:hello..D:world\"));\n  EXPECT_EQ(3, dynamicEncodeDecodeTest(\"D:hello.D:.D:world\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"D:hello,,world\"));\n  EXPECT_EQ(1, dynamicEncodeDecodeTest(\"D:hello,,,world\"));\n}\n\nclass StatMergerThreadLocalTest : public testing::Test {\nprotected:\n  SymbolTableImpl symbol_table_;\n  AllocatorImpl alloc_{symbol_table_};\n  ThreadLocalStoreImpl store_{alloc_};\n};\n\nTEST_F(StatMergerThreadLocalTest, FilterOutUninitializedGauges) {\n  Gauge& g1 = store_.gaugeFromString(\"newgauge1\", Gauge::ImportMode::Uninitialized);\n  Gauge& g2 = store_.gaugeFromString(\"newgauge2\", Gauge::ImportMode::Accumulate);\n  std::vector<GaugeSharedPtr> gauges = store_.gauges();\n  ASSERT_EQ(1, gauges.size());\n  EXPECT_EQ(&g2, gauges[0].get());\n\n  // We don't get \"newgauge1\" in the aggregated list, but we *do* get it if we try to\n  // find it by name.\n  GaugeOptConstRef find = store_.findGauge(g1.statName());\n  ASSERT_TRUE(find);\n  EXPECT_EQ(&g1, &(find->get()));\n}\n\n// When the parent sends us counters we haven't ourselves instantiated, they should be stored\n// temporarily, but then uninstantiated if hot restart ends without the child accessing them.\nTEST_F(StatMergerThreadLocalTest, NewStatFromParent) {\n  {\n    StatMerger stat_merger(store_);\n\n    Protobuf::Map<std::string, uint64_t> counter_deltas;\n    Protobuf::Map<std::string, uint64_t> gauges;\n    counter_deltas[\"newcounter0\"] = 0;\n    counter_deltas[\"newcounter1\"] = 1;\n    counter_deltas[\"newcounter2\"] = 2;\n    gauges[\"newgauge1\"] = 1;\n    gauges[\"newgauge2\"] = 2;\n    stat_merger.mergeStats(counter_deltas, gauges);\n    EXPECT_EQ(0, store_.counterFromString(\"newcounter0\").value());\n    EXPECT_EQ(0, store_.counterFromString(\"newcounter0\").latch());\n    EXPECT_EQ(1, store_.counterFromString(\"newcounter1\").value());\n    EXPECT_EQ(1, store_.counterFromString(\"newcounter1\").latch());\n    EXPECT_EQ(1, store_.gaugeFromString(\"newgauge1\", Gauge::ImportMode::Accumulate).value());\n  }\n  // We accessed 0 and 1 above, but not 2. Now that StatMerger has been destroyed,\n  // 2 should be gone.\n  EXPECT_TRUE(TestUtility::findCounter(store_, \"newcounter0\"));\n  EXPECT_TRUE(TestUtility::findCounter(store_, \"newcounter1\"));\n  EXPECT_FALSE(TestUtility::findCounter(store_, \"newcounter2\"));\n  EXPECT_TRUE(TestUtility::findGauge(store_, \"newgauge1\"));\n  EXPECT_FALSE(TestUtility::findGauge(store_, \"newgauge2\"));\n}\n\n// Verify that if we create a stat in the child process which then gets merged\n// from the parent, that we retain the import-mode, accumulating the updated\n// value. https://github.com/envoyproxy/envoy/issues/7227\nTEST_F(StatMergerThreadLocalTest, RetainImportModeAfterMerge) {\n  Gauge& gauge = store_.gaugeFromString(\"mygauge\", Gauge::ImportMode::Accumulate);\n  gauge.set(42);\n  EXPECT_EQ(Gauge::ImportMode::Accumulate, gauge.importMode());\n  EXPECT_EQ(42, gauge.value());\n  {\n    StatMerger stat_merger(store_);\n    Protobuf::Map<std::string, uint64_t> counter_deltas;\n    Protobuf::Map<std::string, uint64_t> gauges;\n    gauges[\"mygauge\"] = 789;\n    stat_merger.mergeStats(counter_deltas, gauges);\n    EXPECT_EQ(789 + 42, gauge.value());\n  }\n  EXPECT_EQ(42, gauge.value());\n  EXPECT_EQ(Gauge::ImportMode::Accumulate, gauge.importMode());\n}\n\n// Verify that if we create a never import stat in the child process which then gets merged\n// from the parent, that we retain the import-mode, and don't accumulate the updated\n// value. https://github.com/envoyproxy/envoy/issues/7227\nTEST_F(StatMergerThreadLocalTest, RetainNeverImportModeAfterMerge) {\n  Gauge& gauge = store_.gaugeFromString(\"mygauge\", Gauge::ImportMode::NeverImport);\n  gauge.set(42);\n  EXPECT_EQ(Gauge::ImportMode::NeverImport, gauge.importMode());\n  EXPECT_EQ(42, gauge.value());\n  {\n    StatMerger stat_merger(store_);\n    Protobuf::Map<std::string, uint64_t> counter_deltas;\n    Protobuf::Map<std::string, uint64_t> gauges;\n    gauges[\"mygauge\"] = 789;\n    stat_merger.mergeStats(counter_deltas, gauges);\n  }\n  EXPECT_EQ(Gauge::ImportMode::NeverImport, gauge.importMode());\n  EXPECT_EQ(42, gauge.value());\n}\n\n} // namespace\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/stat_test_utility.cc",
    "content": "#include \"test/common/stats/stat_test_utility.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/memory/stats.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace TestUtil {\n\nvoid forEachSampleStat(int num_clusters, std::function<void(absl::string_view)> fn) {\n  // These are stats that are repeated for each cluster as of Oct 2018, with a\n  // very basic configuration with no traffic.\n  static const char* cluster_stats[] = {\"bind_errors\",\n                                        \"lb_healthy_panic\",\n                                        \"lb_local_cluster_not_ok\",\n                                        \"lb_recalculate_zone_structures\",\n                                        \"lb_subsets_active\",\n                                        \"lb_subsets_created\",\n                                        \"lb_subsets_fallback\",\n                                        \"lb_subsets_removed\",\n                                        \"lb_subsets_selected\",\n                                        \"lb_zone_cluster_too_small\",\n                                        \"lb_zone_no_capacity_left\",\n                                        \"lb_zone_number_differs\",\n                                        \"lb_zone_routing_all_directly\",\n                                        \"lb_zone_routing_cross_zone\",\n                                        \"lb_zone_routing_sampled\",\n                                        \"max_host_weight\",\n                                        \"membership_change\",\n                                        \"membership_healthy\",\n                                        \"membership_total\",\n                                        \"original_dst_host_invalid\",\n                                        \"retry_or_shadow_abandoned\",\n                                        \"update_attempt\",\n                                        \"update_empty\",\n                                        \"update_failure\",\n                                        \"update_no_rebuild\",\n                                        \"update_success\",\n                                        \"upstream_cx_active\",\n                                        \"upstream_cx_close_notify\",\n                                        \"upstream_cx_connect_attempts_exceeded\",\n                                        \"upstream_cx_connect_fail\",\n                                        \"upstream_cx_connect_timeout\",\n                                        \"upstream_cx_destroy\",\n                                        \"upstream_cx_destroy_local\",\n                                        \"upstream_cx_destroy_local_with_active_rq\",\n                                        \"upstream_cx_destroy_remote\",\n                                        \"upstream_cx_destroy_remote_with_active_rq\",\n                                        \"upstream_cx_destroy_with_active_rq\",\n                                        \"upstream_cx_http1_total\",\n                                        \"upstream_cx_http2_total\",\n                                        \"upstream_cx_idle_timeout\",\n                                        \"upstream_cx_max_requests\",\n                                        \"upstream_cx_none_healthy\",\n                                        \"upstream_cx_overflow\",\n                                        \"upstream_cx_protocol_error\",\n                                        \"upstream_cx_rx_bytes_buffered\",\n                                        \"upstream_cx_rx_bytes_total\",\n                                        \"upstream_cx_total\",\n                                        \"upstream_cx_tx_bytes_buffered\",\n                                        \"upstream_cx_tx_bytes_total\",\n                                        \"upstream_flow_control_backed_up_total\",\n                                        \"upstream_flow_control_drained_total\",\n                                        \"upstream_flow_control_paused_reading_total\",\n                                        \"upstream_flow_control_resumed_reading_total\",\n                                        \"upstream_rq_active\",\n                                        \"upstream_rq_cancelled\",\n                                        \"upstream_rq_completed\",\n                                        \"upstream_rq_maintenance_mode\",\n                                        \"upstream_rq_pending_active\",\n                                        \"upstream_rq_pending_failure_eject\",\n                                        \"upstream_rq_pending_overflow\",\n                                        \"upstream_rq_pending_total\",\n                                        \"upstream_rq_per_try_timeout\",\n                                        \"upstream_rq_retry\",\n                                        \"upstream_rq_retry_overflow\",\n                                        \"upstream_rq_retry_success\",\n                                        \"upstream_rq_rx_reset\",\n                                        \"upstream_rq_timeout\",\n                                        \"upstream_rq_total\",\n                                        \"upstream_rq_tx_reset\",\n                                        \"version\"};\n\n  // These are the other stats that appear in the admin /stats request when made\n  // prior to any requests.\n  static const char* other_stats[] = {\"http.admin.downstream_cx_length_ms\",\n                                      \"http.admin.downstream_rq_time\",\n                                      \"http.ingress_http.downstream_cx_length_ms\",\n                                      \"http.ingress_http.downstream_rq_time\",\n                                      \"listener.0.0.0.0_40000.downstream_cx_length_ms\",\n                                      \"listener.admin.downstream_cx_length_ms\"};\n\n  for (int cluster = 0; cluster <= num_clusters; ++cluster) {\n    for (const auto& cluster_stat : cluster_stats) {\n      fn(absl::StrCat(\"cluster.service_\", cluster, \".\", cluster_stat));\n    }\n  }\n  for (const auto& other_stat : other_stats) {\n    fn(other_stat);\n  }\n}\n\nMemoryTest::Mode MemoryTest::mode() {\n#if !(defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC)) || defined(ENVOY_MEMORY_DEBUG_ENABLED)\n  // We can only test absolute memory usage if the malloc library is a known\n  // quantity. This decision is centralized here. As the preferred malloc\n  // library for Envoy is TCMALLOC that's what we test for here. If we switch\n  // to a different malloc library than we'd have to re-evaluate all the\n  // thresholds in the tests referencing MemoryTest.\n  return Mode::Disabled;\n#else\n  // Even when using TCMALLOC is defined, it appears that\n  // Memory::Stats::totalCurrentlyAllocated() does not work as expected\n  // on some platforms, so try to force-allocate some heap memory\n  // and determine whether we can measure it.\n  const size_t start_mem = Memory::Stats::totalCurrentlyAllocated();\n  volatile std::unique_ptr<std::string> long_string = std::make_unique<std::string>(\n      \"more than 22 chars to exceed libc++ short-string optimization\");\n  const size_t end_mem = Memory::Stats::totalCurrentlyAllocated();\n  bool can_measure_memory = end_mem > start_mem;\n\n  if (getenv(\"ENVOY_MEMORY_TEST_EXACT\") != nullptr) { // Set in \"ci/do_ci.sh\" for 'release' tests.\n    RELEASE_ASSERT(can_measure_memory,\n                   \"$ENVOY_MEMORY_TEST_EXACT is set for canonical memory measurements, \"\n                   \"but memory measurement looks broken\");\n    return Mode::Canonical;\n  }\n\n  // Different versions of STL and other compiler/architecture differences may\n  // also impact memory usage, so when not compiling with MEMORY_TEST_EXACT,\n  // memory comparisons must be given some slack. There have recently emerged\n  // some memory-allocation differences between development and Envoy CI and\n  // Bazel CI (which compiles Envoy as a test of Bazel).\n  return can_measure_memory ? Mode::Approximate : Mode::Disabled;\n#endif\n}\n\nCounter& TestStore::counterFromString(const std::string& name) {\n  Counter*& counter_ref = counter_map_[name];\n  if (counter_ref == nullptr) {\n    counter_ref = &IsolatedStoreImpl::counterFromString(name);\n  }\n  return *counter_ref;\n}\n\nCounter& TestStore::counterFromStatNameWithTags(const StatName& stat_name,\n                                                StatNameTagVectorOptConstRef tags) {\n  std::string name = symbolTable().toString(stat_name);\n  Counter*& counter_ref = counter_map_[name];\n  if (counter_ref == nullptr) {\n    counter_ref = &IsolatedStoreImpl::counterFromStatNameWithTags(stat_name, tags);\n  } else {\n    // Ensures StatNames with the same string representation are specified\n    // consistently using symbolic/dynamic components on every access.\n    ASSERT(counter_ref->statName() == stat_name, \"Inconsistent dynamic vs symbolic \"\n                                                 \"stat name specification\");\n  }\n  return *counter_ref;\n}\n\nGauge& TestStore::gaugeFromString(const std::string& name, Gauge::ImportMode mode) {\n  Gauge*& gauge_ref = gauge_map_[name];\n  if (gauge_ref == nullptr) {\n    gauge_ref = &IsolatedStoreImpl::gaugeFromString(name, mode);\n  }\n  return *gauge_ref;\n}\n\nGauge& TestStore::gaugeFromStatNameWithTags(const StatName& stat_name,\n                                            StatNameTagVectorOptConstRef tags,\n                                            Gauge::ImportMode mode) {\n  std::string name = symbolTable().toString(stat_name);\n  Gauge*& gauge_ref = gauge_map_[name];\n  if (gauge_ref == nullptr) {\n    gauge_ref = &IsolatedStoreImpl::gaugeFromStatNameWithTags(stat_name, tags, mode);\n  } else {\n    ASSERT(gauge_ref->statName() == stat_name, \"Inconsistent dynamic vs symbolic \"\n                                               \"stat name specification\");\n  }\n  return *gauge_ref;\n}\n\nHistogram& TestStore::histogramFromString(const std::string& name, Histogram::Unit unit) {\n  Histogram*& histogram_ref = histogram_map_[name];\n  if (histogram_ref == nullptr) {\n    histogram_ref = &IsolatedStoreImpl::histogramFromString(name, unit);\n  }\n  return *histogram_ref;\n}\n\nHistogram& TestStore::histogramFromStatNameWithTags(const StatName& stat_name,\n                                                    StatNameTagVectorOptConstRef tags,\n                                                    Histogram::Unit unit) {\n  std::string name = symbolTable().toString(stat_name);\n  Histogram*& histogram_ref = histogram_map_[name];\n  if (histogram_ref == nullptr) {\n    histogram_ref = &IsolatedStoreImpl::histogramFromStatNameWithTags(stat_name, tags, unit);\n  } else {\n    ASSERT(histogram_ref->statName() == stat_name, \"Inconsistent dynamic vs symbolic \"\n                                                   \"stat name specification\");\n  }\n  return *histogram_ref;\n}\n\ntemplate <class StatType>\nusing StatTypeOptConstRef = absl::optional<std::reference_wrapper<const StatType>>;\n\ntemplate <class StatType>\nstatic StatTypeOptConstRef<StatType>\nfindByString(const std::string& name, const absl::flat_hash_map<std::string, StatType*>& map) {\n  StatTypeOptConstRef<StatType> ret;\n  auto iter = map.find(name);\n  if (iter != map.end()) {\n    ret = *iter->second;\n  }\n  return ret;\n}\n\nCounterOptConstRef TestStore::findCounterByString(const std::string& name) const {\n  return findByString<Counter>(name, counter_map_);\n}\n\nGaugeOptConstRef TestStore::findGaugeByString(const std::string& name) const {\n  return findByString<Gauge>(name, gauge_map_);\n}\n\nHistogramOptConstRef TestStore::findHistogramByString(const std::string& name) const {\n  return findByString<Histogram>(name, histogram_map_);\n}\n\n// TODO(jmarantz): this utility is intended to be used both for unit tests\n// and fuzz tests. But those have different checking macros, e.g. EXPECT_EQ vs\n// FUZZ_ASSERT.\nstd::vector<uint8_t> serializeDeserializeNumber(uint64_t number) {\n  uint64_t num_bytes = SymbolTableImpl::Encoding::encodingSizeBytes(number);\n  const uint64_t block_size = 10;\n  MemBlockBuilder<uint8_t> mem_block(block_size);\n  SymbolTableImpl::Encoding::appendEncoding(number, mem_block);\n  num_bytes += mem_block.capacityRemaining();\n  RELEASE_ASSERT(block_size == num_bytes, absl::StrCat(\"Encoding size issue: block_size=\",\n                                                       block_size, \" num_bytes=\", num_bytes));\n  absl::Span<uint8_t> span = mem_block.span();\n  RELEASE_ASSERT(number == SymbolTableImpl::Encoding::decodeNumber(span.data()).first, \"\");\n  return std::vector<uint8_t>(span.data(), span.data() + span.size());\n}\n\nvoid serializeDeserializeString(absl::string_view in) {\n  MemBlockBuilder<uint8_t> mem_block(SymbolTableImpl::Encoding::totalSizeBytes(in.size()));\n  SymbolTableImpl::Encoding::appendEncoding(in.size(), mem_block);\n  const uint8_t* data = reinterpret_cast<const uint8_t*>(in.data());\n  mem_block.appendData(absl::MakeSpan(data, data + in.size()));\n  RELEASE_ASSERT(mem_block.capacityRemaining() == 0, \"\");\n  absl::Span<uint8_t> span = mem_block.span();\n  const std::pair<uint64_t, uint64_t> number_consumed =\n      SymbolTableImpl::Encoding::decodeNumber(span.data());\n  RELEASE_ASSERT(number_consumed.first == in.size(), absl::StrCat(\"size matches: \", in));\n  span.remove_prefix(number_consumed.second);\n  const absl::string_view out(reinterpret_cast<const char*>(span.data()), span.size());\n  RELEASE_ASSERT(in == out, absl::StrCat(\"'\", in, \"' != '\", out, \"'\"));\n}\n\n} // namespace TestUtil\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/stat_test_utility.h",
    "content": "#pragma once\n\n#include \"envoy/stats/store.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/memory/stats.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace TestUtil {\n\n/**\n * Calls fn for a sampling of plausible stat names given a number of clusters.\n * This is intended for memory and performance benchmarking, where the syntax of\n * the names may be material to the measurements. Here we are deliberately not\n * claiming this is a complete stat set, which will change over time. Instead we\n * are aiming for consistency over time in order to create unit tests against\n * fixed memory budgets.\n *\n * @param num_clusters the number of clusters for which to generate stats.\n * @param fn the function to call with every stat name.\n */\nvoid forEachSampleStat(int num_clusters, std::function<void(absl::string_view)> fn);\n\n// Tracks memory consumption over a span of time. Test classes instantiate a\n// MemoryTest object to start measuring heap memory, and call consumedBytes() to\n// determine how many bytes have been consumed since the class was instantiated.\n//\n// That value should then be passed to EXPECT_MEMORY_EQ and EXPECT_MEMORY_LE,\n// defined below, as the interpretation of this value can differ based on\n// platform and compilation mode.\nclass MemoryTest {\npublic:\n  // There are 3 cases:\n  //   1. Memory usage API is available, and is built using with a canonical\n  //      toolchain, enabling exact comparisons against an expected number of\n  //      bytes consumed. The canonical environment is Envoy CI release builds.\n  //   2. Memory usage API is available, but the current build may subtly differ\n  //      in memory consumption from #1. We'd still like to track memory usage\n  //      but it needs to be approximate.\n  //   3. Memory usage API is not available. In this case, the code is executed\n  //      but no testing occurs.\n  enum class Mode {\n    Disabled,    // No memory usage data available on platform.\n    Canonical,   // Memory usage is available, and current platform is canonical.\n    Approximate, // Memory usage is available, but variances form canonical expected.\n  };\n\n  MemoryTest() : memory_at_construction_(Memory::Stats::totalCurrentlyAllocated()) {}\n\n  /**\n   * @return the memory execution testability mode for the current compiler, architecture,\n   *         and compile flags.\n   */\n  static Mode mode();\n\n  size_t consumedBytes() const {\n    // Note that this subtraction of two unsigned numbers will yield a very\n    // large number if memory has actually shrunk since construction. In that\n    // case, the EXPECT_MEMORY_EQ and EXPECT_MEMORY_LE macros will both report\n    // failures, as desired, though the failure log may look confusing.\n    //\n    // Note also that tools like ubsan may report this as an unsigned integer\n    // underflow, if run with -fsanitize=unsigned-integer-overflow, though\n    // strictly speaking this is legal and well-defined for unsigned integers.\n    return Memory::Stats::totalCurrentlyAllocated() - memory_at_construction_;\n  }\n\nprivate:\n  const size_t memory_at_construction_;\n};\n\n// Helper class to use in lieu of an actual Stats::Store for doing lookups by\n// name. The intent is to remove the deprecated Scope::counter(const\n// std::string&) methods, and always use this class for accessing stats by\n// name.\n//\n// This string-based lookup wrapper is needed because the underlying name\n// representation, StatName, has multiple ways to represent the same string,\n// depending on which name segments are symbolic (known at compile time), and\n// which are dynamic (e.g. based on the request, e.g. request-headers, ssl\n// cipher, grpc method, etc). While the production Store implementations\n// use the StatName as a key, we must use strings in tests to avoid forcing\n// the tests to construct the StatName using the same pattern of dynamic\n// and symbol strings as production.\nclass TestStore : public IsolatedStoreImpl {\npublic:\n  TestStore() = default;\n\n  // Constructs a store using a symbol table, allowing for explicit sharing.\n  explicit TestStore(SymbolTable& symbol_table) : IsolatedStoreImpl(symbol_table) {}\n\n  Counter& counter(const std::string& name) { return counterFromString(name); }\n  Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) {\n    return gaugeFromString(name, import_mode);\n  }\n  Histogram& histogram(const std::string& name, Histogram::Unit unit) {\n    return histogramFromString(name, unit);\n  }\n  TextReadout& textReadout(const std::string& name) { return textReadoutFromString(name); }\n\n  // Override the Stats::Store methods for name-based lookup of stats, to use\n  // and update the string-maps in this class. Note that IsolatedStoreImpl\n  // does not support deletion of stats, so we only have to track additions\n  // to keep the maps up-to-date.\n  //\n  // Stats::Scope\n  Counter& counterFromString(const std::string& name) override;\n  Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override;\n  Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override;\n  Counter& counterFromStatNameWithTags(const StatName& name,\n                                       StatNameTagVectorOptConstRef tags) override;\n  Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                   Gauge::ImportMode import_mode) override;\n  Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                           Histogram::Unit unit) override;\n\n  // New APIs available for tests.\n  CounterOptConstRef findCounterByString(const std::string& name) const;\n  GaugeOptConstRef findGaugeByString(const std::string& name) const;\n  HistogramOptConstRef findHistogramByString(const std::string& name) const;\n\nprivate:\n  absl::flat_hash_map<std::string, Counter*> counter_map_;\n  absl::flat_hash_map<std::string, Gauge*> gauge_map_;\n  absl::flat_hash_map<std::string, Histogram*> histogram_map_;\n};\n\n// Compares the memory consumed against an exact expected value, but only on\n// canonical platforms, or when the expected value is zero. Canonical platforms\n// currently include only for 'release' tests in ci. On other platforms an info\n// log is emitted, indicating that the test is being skipped.\n#define EXPECT_MEMORY_EQ(consumed_bytes, expected_value)                                           \\\n  do {                                                                                             \\\n    if (expected_value == 0 ||                                                                     \\\n        Stats::TestUtil::MemoryTest::mode() == Stats::TestUtil::MemoryTest::Mode::Canonical) {     \\\n      EXPECT_EQ(consumed_bytes, expected_value);                                                   \\\n    } else {                                                                                       \\\n      ENVOY_LOG_MISC(info,                                                                         \\\n                     \"Skipping exact memory test of actual={} versus expected={} \"                 \\\n                     \"bytes as platform is non-canonical\",                                         \\\n                     consumed_bytes, expected_value);                                              \\\n    }                                                                                              \\\n  } while (false)\n\n// Compares the memory consumed against an expected upper bound, but only\n// on platforms where memory consumption can be measured via API. This is\n// currently enabled only for builds with TCMALLOC. On other platforms, an info\n// log is emitted, indicating that the test is being skipped.\n#define EXPECT_MEMORY_LE(consumed_bytes, upper_bound)                                              \\\n  do {                                                                                             \\\n    if (Stats::TestUtil::MemoryTest::mode() != Stats::TestUtil::MemoryTest::Mode::Disabled) {      \\\n      EXPECT_LE(consumed_bytes, upper_bound);                                                      \\\n      EXPECT_GT(consumed_bytes, 0);                                                                \\\n    } else {                                                                                       \\\n      ENVOY_LOG_MISC(                                                                              \\\n          info, \"Skipping upper-bound memory test against {} bytes as platform lacks tcmalloc\",    \\\n          upper_bound);                                                                            \\\n    }                                                                                              \\\n  } while (false)\n\n// Serializes a number into a uint8_t array, and check that it de-serializes to\n// the same number. The serialized number is also returned, which can be\n// checked in unit tests, but ignored in fuzz tests.\nstd::vector<uint8_t> serializeDeserializeNumber(uint64_t number);\n\n// Serializes a string into a MemBlock and then decodes it.\nvoid serializeDeserializeString(absl::string_view in);\n\n} // namespace TestUtil\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/stat_test_utility_test.cc",
    "content": "#include <string>\n\n#include \"test/common/stats/stat_test_utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace {\n\nclass StatTestUtilityTest : public testing::Test {\nprotected:\n  StatTestUtilityTest()\n      : test_store_(symbol_table_), dynamic_(symbol_table_), symbolic_(symbol_table_) {}\n\n  SymbolTableImpl symbol_table_;\n  TestUtil::TestStore test_store_;\n  StatNameDynamicPool dynamic_;\n  StatNamePool symbolic_;\n};\n\nTEST_F(StatTestUtilityTest, Counters) {\n  test_store_.counterFromStatName(dynamic_.add(\"dynamic.stat\")).inc();\n  test_store_.counterFromStatName(symbolic_.add(\"symbolic.stat\")).inc();\n  EXPECT_EQ(1, test_store_.counter(\"dynamic.stat\").value());\n  EXPECT_FALSE(test_store_.findCounterByString(\"dynamic.stat2\"));\n  EXPECT_EQ(1, test_store_.counter(\"symbolic.stat\").value());\n  EXPECT_FALSE(test_store_.findCounterByString(\"symbolic.stat2\"));\n}\n\nTEST_F(StatTestUtilityTest, Gauges) {\n  test_store_.counterFromStatName(dynamic_.add(\"dynamic.stat\")).inc();\n  test_store_.counterFromStatName(symbolic_.add(\"symbolic.stat\")).inc();\n  EXPECT_EQ(1, test_store_.counter(\"dynamic.stat\").value());\n  EXPECT_FALSE(test_store_.findGaugeByString(\"dynamic.stat2\"));\n  EXPECT_EQ(1, test_store_.counter(\"symbolic.stat\").value());\n  EXPECT_FALSE(test_store_.findGaugeByString(\"symbolic.stat2\"));\n}\n\nTEST_F(StatTestUtilityTest, Histograms) {\n  test_store_.counterFromStatName(dynamic_.add(\"dynamic.stat\")).inc();\n  test_store_.counterFromStatName(symbolic_.add(\"symbolic.stat\")).inc();\n  EXPECT_EQ(1, test_store_.counter(\"dynamic.stat\").value());\n  EXPECT_FALSE(test_store_.findHistogramByString(\"dynamic.stat2\"));\n  EXPECT_EQ(1, test_store_.counter(\"symbolic.stat\").value());\n  EXPECT_FALSE(test_store_.findHistogramByString(\"symbolic.stat2\"));\n}\n\n} // namespace\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/stats_matcher_impl_test.cc",
    "content": "#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/stats/stats_matcher_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass StatsMatcherTest : public testing::Test {\nprotected:\n  envoy::type::matcher::v3::StringMatcher* inclusionList() {\n    return stats_config_.mutable_stats_matcher()->mutable_inclusion_list()->add_patterns();\n  }\n  envoy::type::matcher::v3::StringMatcher* exclusionList() {\n    return stats_config_.mutable_stats_matcher()->mutable_exclusion_list()->add_patterns();\n  }\n  void rejectAll(const bool should_reject) {\n    stats_config_.mutable_stats_matcher()->set_reject_all(should_reject);\n  }\n  void initMatcher() { stats_matcher_impl_ = std::make_unique<StatsMatcherImpl>(stats_config_); }\n  void expectAccepted(std::vector<std::string> expected_to_pass) {\n    for (const auto& stat_name : expected_to_pass) {\n      EXPECT_FALSE(stats_matcher_impl_->rejects(stat_name)) << \"Accepted: \" << stat_name;\n    }\n  }\n  void expectDenied(std::vector<std::string> expected_to_fail) {\n    for (const auto& stat_name : expected_to_fail) {\n      EXPECT_TRUE(stats_matcher_impl_->rejects(stat_name)) << \"Rejected: \" << stat_name;\n    }\n  }\n\n  std::unique_ptr<StatsMatcherImpl> stats_matcher_impl_;\n\nprivate:\n  envoy::config::metrics::v3::StatsConfig stats_config_;\n};\n\nTEST_F(StatsMatcherTest, CheckDefault) {\n  // With no set fields, everything should be allowed through.\n  initMatcher();\n  expectAccepted({\"foo\", \"bar\", \"foo.bar\", \"foo.bar.baz\", \"foobarbaz\"});\n  EXPECT_TRUE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Across-the-board matchers.\n\nTEST_F(StatsMatcherTest, CheckRejectAll) {\n  // With reject_all, nothing should be allowed through.\n  rejectAll(true);\n  initMatcher();\n  expectDenied({\"foo\", \"bar\", \"foo.bar\", \"foo.bar.baz\", \"foobarbaz\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_TRUE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckNotRejectAll) {\n  // With !reject_all, everything should be allowed through.\n  rejectAll(false);\n  initMatcher();\n  expectAccepted({\"foo\", \"bar\", \"foo.bar\", \"foo.bar.baz\", \"foobarbaz\"});\n  EXPECT_TRUE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckIncludeAll) {\n  inclusionList()->set_hidden_envoy_deprecated_regex(\".*\");\n  initMatcher();\n  expectAccepted({\"foo\", \"bar\", \"foo.bar\", \"foo.bar.baz\"});\n  // It really does accept all, but the impl doesn't know it.\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckExcludeAll) {\n  exclusionList()->set_hidden_envoy_deprecated_regex(\".*\");\n  initMatcher();\n  expectDenied({\"foo\", \"bar\", \"foo.bar\", \"foo.bar.baz\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Single exact matchers.\n\nTEST_F(StatsMatcherTest, CheckIncludeExact) {\n  inclusionList()->set_exact(\"abc\");\n  initMatcher();\n  expectAccepted({\"abc\"});\n  expectDenied({\"abcd\", \"abc.d\", \"d.abc\", \"dabc\", \"ab\", \"ac\", \"abcc\", \"Abc\", \"aBc\", \"abC\", \"abc.\",\n                \".abc\", \"ABC\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckExcludeExact) {\n  exclusionList()->set_exact(\"abc\");\n  initMatcher();\n  expectAccepted({\"abcd\", \"abc.d\", \"d.abc\", \"dabc\", \"ab\", \"ac\", \"abcc\", \"Abc\", \"aBc\", \"abC\", \"abc.\",\n                  \".abc\", \"ABC\"});\n  expectDenied({\"abc\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Single prefix matchers.\n\nTEST_F(StatsMatcherTest, CheckIncludePrefix) {\n  inclusionList()->set_prefix(\"abc\");\n  initMatcher();\n  expectAccepted({\"abc\", \"abc.foo\", \"abcfoo\"});\n  expectDenied({\"ABC\", \"ABC.foo\", \"ABCfoo\", \"foo\", \"abb\", \"a.b.c\", \"_abc\", \"foo.abc\", \"fooabc\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckExcludePrefix) {\n  exclusionList()->set_prefix(\"abc\");\n  initMatcher();\n  expectAccepted({\"ABC\", \"ABC.foo\", \"ABCfoo\", \"foo\", \"abb\", \"a.b.c\", \"_abc\", \"foo.abc\", \"fooabc\"});\n  expectDenied({\"abc\", \"abc.foo\", \"abcfoo\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Single suffix matchers.\n\nTEST_F(StatsMatcherTest, CheckIncludeSuffix) {\n  inclusionList()->set_suffix(\"abc\");\n  initMatcher();\n  expectAccepted({\"abc\", \"foo.abc\", \"fooabc\"});\n  expectDenied({\"ABC\", \"foo.ABC\", \"fooABC\", \"foo\", \"abb\", \"a.b.c\", \"abc_\", \"abc.foo\", \"abcfoo\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckExcludeSuffix) {\n  exclusionList()->set_suffix(\"abc\");\n  initMatcher();\n  expectAccepted({\"ABC\", \"foo.ABC\", \"fooABC\", \"foo\", \"abb\", \"a.b.c\", \"abc_\", \"abc.foo\", \"abcfoo\"});\n  expectDenied({\"abc\", \"foo.abc\", \"fooabc\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Single regex matchers.\n\nTEST_F(StatsMatcherTest, CheckIncludeRegex) {\n  inclusionList()->set_hidden_envoy_deprecated_regex(\".*envoy.*\");\n  initMatcher();\n  expectAccepted({\"envoy.matchers.requests\", \"stats.envoy.2xx\", \"regex.envoy.matchers\"});\n  expectDenied({\"foo\", \"Envoy\", \"EnvoyProxy\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckExcludeRegex) {\n  exclusionList()->set_hidden_envoy_deprecated_regex(\".*envoy.*\");\n  initMatcher();\n  expectAccepted({\"foo\", \"Envoy\", \"EnvoyProxy\"});\n  expectDenied({\"envoy.matchers.requests\", \"stats.envoy.2xx\", \"regex.envoy.matchers\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Multiple exact matchers.\n\nTEST_F(StatsMatcherTest, CheckMultipleIncludeExact) {\n  inclusionList()->set_exact(\"foo\");\n  inclusionList()->set_exact(\"bar\");\n  initMatcher();\n  expectAccepted({\"foo\", \"bar\"});\n  expectDenied({\"foobar\", \"barfoo\", \"fo\", \"ba\", \"foo.bar\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckMultipleExcludeExact) {\n  exclusionList()->set_exact(\"foo\");\n  exclusionList()->set_exact(\"bar\");\n  initMatcher();\n  expectAccepted({\"foobar\", \"barfoo\", \"fo\", \"ba\", \"foo.bar\"});\n  expectDenied({\"foo\", \"bar\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Multiple prefix matchers.\n\nTEST_F(StatsMatcherTest, CheckMultipleIncludePrefix) {\n  inclusionList()->set_prefix(\"foo\");\n  inclusionList()->set_prefix(\"bar\");\n  initMatcher();\n  expectAccepted({\"foo\", \"foo.abc\", \"bar\", \"bar.abc\"});\n  expectDenied({\".foo\", \"abc.foo\", \"BAR\", \"_bar\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckMultipleExcludePrefix) {\n  exclusionList()->set_prefix(\"foo\");\n  exclusionList()->set_prefix(\"bar\");\n  initMatcher();\n  expectAccepted({\".foo\", \"abc.foo\", \"BAR\", \"_bar\"});\n  expectDenied({\"foo\", \"foo.abc\", \"bar\", \"bar.abc\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Multiple suffix matchers.\n\nTEST_F(StatsMatcherTest, CheckMultipleIncludeSuffix) {\n  inclusionList()->set_suffix(\"spam\");\n  inclusionList()->set_suffix(\"eggs\");\n  initMatcher();\n  expectAccepted(\n      {\"requests.for.spam\", \"requests.for.eggs\", \"spam\", \"eggs\", \"cannedspam\", \"fresheggs\"});\n  expectDenied({\"Spam\", \"EGGS\", \"spam_\", \"eggs_\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckMultipleExcludeSuffix) {\n  exclusionList()->set_suffix(\"spam\");\n  exclusionList()->set_suffix(\"eggs\");\n  initMatcher();\n  expectAccepted({\"Spam\", \"EGGS\", \"spam_\", \"eggs_\"});\n  expectDenied(\n      {\"requests.for.spam\", \"requests.for.eggs\", \"spam\", \"eggs\", \"cannedspam\", \"fresheggs\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Multiple regex matchers.\n\nTEST_F(StatsMatcherTest, CheckMultipleIncludeRegex) {\n  inclusionList()->set_hidden_envoy_deprecated_regex(\".*envoy.*\");\n  inclusionList()->set_hidden_envoy_deprecated_regex(\".*absl.*\");\n  initMatcher();\n  expectAccepted({\"envoy.matchers.requests\", \"stats.absl.2xx\", \"absl.envoy.matchers\"});\n  expectDenied({\"Abseil\", \"EnvoyProxy\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckMultipleExcludeRegex) {\n  exclusionList()->set_hidden_envoy_deprecated_regex(\".*envoy.*\");\n  exclusionList()->set_hidden_envoy_deprecated_regex(\".*absl.*\");\n  initMatcher();\n  expectAccepted({\"Abseil\", \"EnvoyProxy\"});\n  expectDenied({\"envoy.matchers.requests\", \"stats.absl.2xx\", \"absl.envoy.matchers\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n// Multiple prefix/suffix/regex matchers.\n//\n// Matchers are \"any_of\", so strings matching any of the rules are expected to pass or fail,\n// whichever the case may be.\n\nTEST_F(StatsMatcherTest, CheckMultipleAssortedInclusionMatchers) {\n  inclusionList()->set_hidden_envoy_deprecated_regex(\".*envoy.*\");\n  inclusionList()->set_suffix(\"requests\");\n  inclusionList()->set_exact(\"regex\");\n  initMatcher();\n  expectAccepted({\"envoy.matchers.requests\", \"requests.for.envoy\", \"envoyrequests\", \"regex\"});\n  expectDenied({\"requestsEnvoy\", \"EnvoyProxy\", \"foo\", \"regex_etc\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\nTEST_F(StatsMatcherTest, CheckMultipleAssortedExclusionMatchers) {\n  exclusionList()->set_hidden_envoy_deprecated_regex(\".*envoy.*\");\n  exclusionList()->set_suffix(\"requests\");\n  exclusionList()->set_exact(\"regex\");\n  initMatcher();\n  expectAccepted({\"requestsEnvoy\", \"EnvoyProxy\", \"foo\", \"regex_etc\"});\n  expectDenied({\"envoy.matchers.requests\", \"requests.for.envoy\", \"envoyrequests\", \"regex\"});\n  EXPECT_FALSE(stats_matcher_impl_->acceptsAll());\n  EXPECT_FALSE(stats_matcher_impl_->rejectsAll());\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/symbol_table_corpus/clusterfuzz-testcase-minimized-symbol_table_fuzz_test-5645970620809216",
    "content": " "
  },
  {
    "path": "test/common/stats/symbol_table_corpus/example1",
    "content": "one\n\n"
  },
  {
    "path": "test/common/stats/symbol_table_corpus/example2",
    "content": "one.two\n\n\n"
  },
  {
    "path": "test/common/stats/symbol_table_corpus/example3",
    "content": "one.two.three\n"
  },
  {
    "path": "test/common/stats/symbol_table_fuzz_test.cc",
    "content": "#include <algorithm>\n\n#include \"common/common/assert.h\"\n#include \"common/common/base64.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n\nnamespace Envoy {\nnamespace Stats {\nnamespace Fuzz {\n\n// Fuzzer for symbol tables.\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  FuzzedDataProvider provider(buf, len);\n  SymbolTableImpl symbol_table;\n  StatNamePool pool(symbol_table);\n  StatNameDynamicPool dynamic_pool(symbol_table);\n\n  while (provider.remaining_bytes() != 0) {\n    std::string next_data = provider.ConsumeRandomLengthString(provider.remaining_bytes());\n    StatName stat_name = pool.add(next_data);\n    StatName dynamic_stat_name = dynamic_pool.add(next_data);\n\n    // Encode the string directly first.\n    TestUtil::serializeDeserializeString(next_data);\n\n    // Grab the first few bytes from next_data to synthesize together a random uint64_t.\n    if (next_data.size() > 1) {\n      uint32_t num_bytes = (next_data[0] % 8) + 1; // random number between 1 and 8 inclusive.\n      num_bytes = std::min(static_cast<uint32_t>(next_data.size() - 1),\n                           num_bytes); // restrict number up to the size of next_data\n      uint64_t number = 0;\n      for (uint32_t i = 0; i < num_bytes; ++i) {\n        number = 256 * number + next_data[i + 1];\n      }\n      TestUtil::serializeDeserializeNumber(number);\n    }\n\n    // We can add stat-names with trailing dots, but note that they will be\n    // trimmed by the Symbol Table implementation, so we must trim the input\n    // string before comparing.\n    absl::string_view trimmed_fuzz_data = StringUtil::removeTrailingCharacters(next_data, '.');\n    FUZZ_ASSERT(trimmed_fuzz_data == symbol_table.toString(stat_name));\n    FUZZ_ASSERT(trimmed_fuzz_data == symbol_table.toString(dynamic_stat_name));\n\n    // The 'join' tests only work if the trimmed fuzz data is not empty.\n    if (trimmed_fuzz_data.empty()) {\n      continue;\n    }\n\n    std::string joined = absl::StrCat(trimmed_fuzz_data, \".\", trimmed_fuzz_data);\n    auto join = [joined](SymbolTable& table, StatName name1, StatName name2) -> bool {\n      bool ok = true;\n      SymbolTable::StoragePtr storage = table.join({name1, name2});\n      StatName stat_name(storage.get());\n      std::string name1_name2 = table.toString(stat_name);\n      if (joined.size() != name1_name2.size()) {\n        std::cerr << \"lengths don't match: \" << joined.size() << \" != \" << name1_name2.size()\n                  << std::endl;\n        ok = false;\n      } else {\n        for (uint32_t i = 0; i < joined.size(); ++i) {\n          if (joined[i] != name1_name2[i]) {\n            ENVOY_LOG_MISC(error, \"char [{}] mismatch: {}({}) != {}({})\", i, joined[i],\n                           static_cast<uint32_t>(joined[i]), name1_name2[i],\n                           static_cast<uint32_t>(name1_name2[i]));\n            ok = false;\n          }\n        }\n      }\n      return ok;\n    };\n    FUZZ_ASSERT(join(symbol_table, stat_name, stat_name));\n    FUZZ_ASSERT(join(symbol_table, stat_name, dynamic_stat_name));\n    FUZZ_ASSERT(join(symbol_table, dynamic_stat_name, dynamic_stat_name));\n    FUZZ_ASSERT(join(symbol_table, dynamic_stat_name, stat_name));\n  }\n}\n\n} // namespace Fuzz\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/symbol_table_impl_test.cc",
    "content": "#include <string>\n\n#include \"common/common/macros.h\"\n#include \"common/common/mutex_tracer_impl.h\"\n#include \"common/memory/stats.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/hash/hash_testing.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/synchronization/blocking_counter.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass StatNameTest : public testing::Test {\nprotected:\n  StatNameTest() : pool_(table_) {}\n  ~StatNameTest() override { clearStorage(); }\n\n  void clearStorage() {\n    pool_.clear();\n    EXPECT_EQ(0, table_.numSymbols());\n  }\n\n  SymbolVec getSymbols(StatName stat_name) {\n    return SymbolTableImpl::Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize());\n  }\n  Symbol monotonicCounter() { return table_.monotonicCounter(); }\n  std::string encodeDecode(absl::string_view stat_name) {\n    return table_.toString(makeStat(stat_name));\n  }\n\n  StatName makeStat(absl::string_view name) { return pool_.add(name); }\n\n  std::vector<uint8_t> serializeDeserialize(uint64_t number) {\n    return TestUtil::serializeDeserializeNumber(number);\n  }\n\n  SymbolTableImpl table_;\n  StatNamePool pool_;\n};\n\nTEST_F(StatNameTest, SerializeBytes) {\n  EXPECT_EQ(std::vector<uint8_t>{1}, serializeDeserialize(1));\n  EXPECT_EQ(std::vector<uint8_t>{127}, serializeDeserialize(127));\n  EXPECT_EQ((std::vector<uint8_t>{128, 1}), serializeDeserialize(128));\n  EXPECT_EQ((std::vector<uint8_t>{129, 1}), serializeDeserialize(129));\n  EXPECT_EQ((std::vector<uint8_t>{255, 1}), serializeDeserialize(255));\n\n  // This is the example from the image in stats.md.\n  EXPECT_EQ((std::vector<uint8_t>{0x80 + 5, 2}), serializeDeserialize(261));\n\n  EXPECT_EQ((std::vector<uint8_t>{255, 127}), serializeDeserialize(16383));\n  EXPECT_EQ((std::vector<uint8_t>{128, 128, 1}), serializeDeserialize(16384));\n  EXPECT_EQ((std::vector<uint8_t>{129, 128, 1}), serializeDeserialize(16385));\n\n  auto power2 = [](uint32_t exp) -> uint64_t {\n    uint64_t one = 1;\n    return one << exp;\n  };\n  EXPECT_EQ((std::vector<uint8_t>{255, 255, 127}), serializeDeserialize(power2(21) - 1));\n  EXPECT_EQ((std::vector<uint8_t>{128, 128, 128, 1}), serializeDeserialize(power2(21)));\n  EXPECT_EQ((std::vector<uint8_t>{129, 128, 128, 1}), serializeDeserialize(power2(21) + 1));\n  EXPECT_EQ((std::vector<uint8_t>{255, 255, 255, 127}), serializeDeserialize(power2(28) - 1));\n  EXPECT_EQ((std::vector<uint8_t>{128, 128, 128, 128, 1}), serializeDeserialize(power2(28)));\n  EXPECT_EQ((std::vector<uint8_t>{129, 128, 128, 128, 1}), serializeDeserialize(power2(28) + 1));\n  EXPECT_EQ((std::vector<uint8_t>{255, 255, 255, 255, 127}), serializeDeserialize(power2(35) - 1));\n  EXPECT_EQ((std::vector<uint8_t>{128, 128, 128, 128, 128, 1}), serializeDeserialize(power2(35)));\n  EXPECT_EQ((std::vector<uint8_t>{129, 128, 128, 128, 128, 1}),\n            serializeDeserialize(power2(35) + 1));\n\n  for (uint32_t i = 0; i < 17000; ++i) {\n    serializeDeserialize(i);\n  }\n}\n\nTEST_F(StatNameTest, SerializeStrings) {\n  TestUtil::serializeDeserializeString(\"\");\n  TestUtil::serializeDeserializeString(\"Hello, world!\");\n  TestUtil::serializeDeserializeString(\"embedded\\0\\nul\");\n  TestUtil::serializeDeserializeString(std::string(200, 'a'));\n  TestUtil::serializeDeserializeString(std::string(2000, 'a'));\n  TestUtil::serializeDeserializeString(std::string(20000, 'a'));\n  TestUtil::serializeDeserializeString(std::string(200000, 'a'));\n  TestUtil::serializeDeserializeString(std::string(2000000, 'a'));\n  TestUtil::serializeDeserializeString(std::string(20000000, 'a'));\n}\n\nTEST_F(StatNameTest, AllocFree) { encodeDecode(\"hello.world\"); }\n\nTEST_F(StatNameTest, TestArbitrarySymbolRoundtrip) {\n  const std::vector<std::string> stat_names = {\"\", \" \", \"  \", \",\", \"\\t\", \"$\", \"%\", \"`\", \".x\"};\n  for (auto& stat_name : stat_names) {\n    EXPECT_EQ(stat_name, encodeDecode(stat_name));\n  }\n}\n\nTEST_F(StatNameTest, TestEmpty) {\n  EXPECT_TRUE(makeStat(\"\").empty());\n  EXPECT_FALSE(makeStat(\"x\").empty());\n  EXPECT_TRUE(StatName().empty());\n}\n\nTEST_F(StatNameTest, TestDynamic100k) {\n  // Tests a variety different sizes of dynamic stat ranging to 500k, covering\n  // potential corner cases of spilling over into multi-byte lengths.\n  std::string stat_str(\"dyn.x\");\n  char ch = '\\001';\n  StatName ab = makeStat(\"a.b\");\n  StatName cd = makeStat(\"c.d\");\n  auto test_at_size = [this, &stat_str, &ch, ab, cd](uint32_t size) {\n    if (size > stat_str.size()) {\n      // Add rotating characters to stat_str until we hit size.\n      for (uint32_t i = stat_str.size(); i < size; ++i, ++ch) {\n        stat_str += (ch == '.') ? 'x' : ch;\n      }\n      StatNameDynamicStorage storage(stat_str, table_);\n      StatName dynamic = storage.statName();\n      EXPECT_EQ(stat_str, table_.toString(dynamic));\n      SymbolTable::StoragePtr joined = table_.join({ab, dynamic, cd});\n      EXPECT_EQ(absl::StrCat(\"a.b.\", stat_str, \".c.d\"), table_.toString(StatName(joined.get())));\n    }\n  };\n\n  // The outer-loop hits powers of 2 from 8 to 512k.\n  for (uint32_t i = 3; i < 20; ++i) {\n    int32_t pow_2 = 1 << i;\n\n    // The inner-loop covers every offset from the power of 2, between offsets of\n    // -10 and +10.\n    for (int32_t j = std::max(0, pow_2 - 10); j < pow_2 + 10; ++j) {\n      test_at_size(j);\n    }\n  }\n}\n\nTEST_F(StatNameTest, TestDynamicPools) {\n  // Same test for a dynamically allocated name. The only difference between\n  // the behavior with a remembered vs dynamic name is that when looking\n  // up a remembered name, a mutex is not taken. But we have no easy way\n  // to test for that. So we'll at least cover the code.\n  StatNameDynamicPool d1(table_);\n  const StatName dynamic = d1.add(\"dynamic\");\n  EXPECT_EQ(\"dynamic\", table_.toString(dynamic));\n\n  // The nature of the StatNameDynamicPool is that there is no sharing (and also no locks).\n  EXPECT_NE(dynamic.data(), d1.add(\"dynamic\").data());\n\n  // Make sure blanks are always the same.\n  const StatName blank = d1.add(\"\");\n  EXPECT_EQ(\"\", table_.toString(blank));\n  EXPECT_NE(blank.data(), d1.add(\"\").data());\n  EXPECT_NE(blank.data(), d1.add(\"\").data());\n  EXPECT_NE(blank.data(), d1.add(absl::string_view()).data());\n\n  // There's another corner case for the same \"dynamic\" name from a\n  // different set. Here we will get a different StatName object\n  // out of the second set, though it will share the same underlying\n  // symbol-table symbol.\n  StatNameDynamicPool d2(table_);\n  const StatName dynamic2 = d2.add(\"dynamic\");\n  EXPECT_EQ(\"dynamic\", table_.toString(dynamic2));\n  EXPECT_NE(dynamic2.data(), d2.add(\"dynamic\").data()); // No storage sharing.\n  EXPECT_NE(dynamic2.data(), dynamic.data());\n}\n\nTEST_F(StatNameTest, TestDynamicHash) {\n  StatNameDynamicPool dynamic(table_);\n  const StatName d1 = dynamic.add(\"dynamic\");\n  const StatName d2 = dynamic.add(\"dynamic\");\n  EXPECT_EQ(d1, d2);\n  EXPECT_EQ(d1.hash(), d2.hash());\n}\n\nTEST_F(StatNameTest, Test100KSymbolsRoundtrip) {\n  for (int i = 0; i < 100 * 1000; ++i) {\n    const std::string stat_name = absl::StrCat(\"symbol_\", i);\n    EXPECT_EQ(stat_name, encodeDecode(stat_name));\n  }\n}\n\nTEST_F(StatNameTest, TwoHundredTwoLevel) {\n  for (int i = 0; i < 200; ++i) {\n    const std::string stat_name = absl::StrCat(\"symbol_\", i);\n    EXPECT_EQ(stat_name, encodeDecode(stat_name));\n  }\n  EXPECT_EQ(\"http.foo\", encodeDecode(\"http.foo\"));\n}\n\nTEST_F(StatNameTest, TestLongSymbolName) {\n  std::string long_name(100000, 'a');\n  EXPECT_EQ(long_name, encodeDecode(long_name));\n}\n\nTEST_F(StatNameTest, TestLongSequence) {\n  std::string long_name(\"a\");\n  for (int i = 0; i < 100000; ++i) {\n    absl::StrAppend(&long_name, \".a\");\n  }\n\n  EXPECT_EQ(long_name, encodeDecode(long_name));\n}\n\nTEST_F(StatNameTest, TestUnusualDelimitersRoundtrip) {\n  const std::vector<std::string> stat_names = {\".x\",   \"..x\",    \"...x\",    \"foo\",     \"foo.x\",\n                                               \".foo\", \".foo.x\", \".foo..x\", \"..foo.x\", \"..foo..x\"};\n  for (auto& stat_name : stat_names) {\n    EXPECT_EQ(stat_name, encodeDecode(stat_name));\n  }\n}\n\nTEST_F(StatNameTest, TestSuccessfulDoubleLookup) {\n  StatName stat_name_1(makeStat(\"foo.bar.baz\"));\n  StatName stat_name_2(makeStat(\"foo.bar.baz\"));\n  EXPECT_EQ(stat_name_1, stat_name_2);\n}\n\nTEST_F(StatNameTest, TestSuccessfulDecode) {\n  std::string stat_name = \"foo.bar.baz\";\n  StatName stat_name_1(makeStat(stat_name));\n  StatName stat_name_2(makeStat(stat_name));\n  EXPECT_EQ(table_.toString(stat_name_1), table_.toString(stat_name_2));\n  EXPECT_EQ(table_.toString(stat_name_1), stat_name);\n}\n\nclass StatNameDeathTest : public StatNameTest {\npublic:\n  void decodeSymbolVec(const SymbolVec& symbol_vec) {\n    Thread::LockGuard lock(table_.lock_);\n    for (Symbol symbol : symbol_vec) {\n      table_.fromSymbol(symbol);\n    }\n  }\n};\n\nTEST_F(StatNameDeathTest, TestBadDecodes) {\n  {\n    // If a symbol doesn't exist, decoding it should trigger an ASSERT() and crash.\n    SymbolVec bad_symbol_vec = {1}; // symbol 0 is the empty symbol.\n    EXPECT_DEATH(decodeSymbolVec(bad_symbol_vec), \"\");\n  }\n\n  {\n    StatName stat_name_1 = makeStat(\"foo\");\n    SymbolVec vec_1 = getSymbols(stat_name_1);\n    // Decoding a symbol vec that exists is perfectly normal...\n    EXPECT_NO_THROW(decodeSymbolVec(vec_1));\n    clearStorage();\n    // But when the StatName is destroyed, its symbols are as well.\n    EXPECT_DEATH(decodeSymbolVec(vec_1), \"\");\n  }\n}\n\nTEST_F(StatNameTest, TestDifferentStats) {\n  StatName stat_name_1(makeStat(\"foo.bar\"));\n  StatName stat_name_2(makeStat(\"bar.foo\"));\n  EXPECT_NE(table_.toString(stat_name_1), table_.toString(stat_name_2));\n  EXPECT_NE(stat_name_1, stat_name_2);\n}\n\nTEST_F(StatNameTest, TestSymbolConsistency) {\n  StatName stat_name_1(makeStat(\"foo.bar\"));\n  StatName stat_name_2(makeStat(\"bar.foo\"));\n  // We expect the encoding of \"foo\" in one context to be the same as another.\n  SymbolVec vec_1 = getSymbols(stat_name_1);\n  SymbolVec vec_2 = getSymbols(stat_name_2);\n  EXPECT_EQ(vec_1[0], vec_2[1]);\n  EXPECT_EQ(vec_2[0], vec_1[1]);\n}\n\nTEST_F(StatNameTest, TestIgnoreTrailingDots) {\n  EXPECT_EQ(\"foo.bar\", encodeDecode(\"foo.bar.\"));\n  EXPECT_EQ(\"foo.bar\", encodeDecode(\"foo.bar...\"));\n  EXPECT_EQ(\"\", encodeDecode(\".\"));\n  EXPECT_EQ(\"\", encodeDecode(\"..\"));\n}\n\nTEST_F(StatNameTest, TestSameValueOnPartialFree) {\n  // This should hold true for components as well. Since \"foo\" persists even when \"foo.bar\" is\n  // freed, we expect both instances of \"foo\" to have the same symbol.\n  makeStat(\"foo\");\n  StatNameStorage stat_foobar_1(\"foo.bar\", table_);\n  SymbolVec stat_foobar_1_symbols = getSymbols(stat_foobar_1.statName());\n  stat_foobar_1.free(table_);\n  StatName stat_foobar_2(makeStat(\"foo.bar\"));\n  SymbolVec stat_foobar_2_symbols = getSymbols(stat_foobar_2);\n\n  EXPECT_EQ(stat_foobar_1_symbols[0],\n            stat_foobar_2_symbols[0]); // Both \"foo\" components have the same symbol,\n  // And we have no expectation for the \"bar\" components, because of the free pool.\n}\n\nTEST_F(StatNameTest, FreePoolTest) {\n  // To ensure that the free pool is being used, we should be able to cycle through a large number\n  // of stats while validating that:\n  //   a) the size of the table has not increased, and\n  //   b) the monotonically increasing counter has not risen to more than the maximum number of\n  //   coexisting symbols during the life of the table.\n\n  {\n    makeStat(\"1a\");\n    makeStat(\"2a\");\n    makeStat(\"3a\");\n    makeStat(\"4a\");\n    makeStat(\"5a\");\n    EXPECT_EQ(monotonicCounter(), 6);\n    EXPECT_EQ(table_.numSymbols(), 5);\n    clearStorage();\n  }\n  EXPECT_EQ(monotonicCounter(), 6);\n  EXPECT_EQ(table_.numSymbols(), 0);\n\n  // These are different strings being encoded, but they should recycle through the same symbols as\n  // the stats above.\n  makeStat(\"1b\");\n  makeStat(\"2b\");\n  makeStat(\"3b\");\n  makeStat(\"4b\");\n  makeStat(\"5b\");\n  EXPECT_EQ(monotonicCounter(), 6);\n  EXPECT_EQ(table_.numSymbols(), 5);\n\n  makeStat(\"6\");\n  EXPECT_EQ(monotonicCounter(), 7);\n  EXPECT_EQ(table_.numSymbols(), 6);\n}\n\nTEST_F(StatNameTest, TestShrinkingExpectation) {\n  // We expect that as we free stat names, the memory used to store those underlying symbols will\n  // be freed.\n  // ::size() is a public function, but should only be used for testing.\n  size_t table_size_0 = table_.numSymbols();\n\n  auto make_stat_storage = [this](absl::string_view name) -> StatNameStorage {\n    return StatNameStorage(name, table_);\n  };\n\n  StatNameStorage stat_a(make_stat_storage(\"a\"));\n  size_t table_size_1 = table_.numSymbols();\n\n  StatNameStorage stat_aa(make_stat_storage(\"a.a\"));\n  EXPECT_EQ(table_size_1, table_.numSymbols());\n\n  StatNameStorage stat_ab(make_stat_storage(\"a.b\"));\n  size_t table_size_2 = table_.numSymbols();\n\n  StatNameStorage stat_ac(make_stat_storage(\"a.c\"));\n  size_t table_size_3 = table_.numSymbols();\n\n  StatNameStorage stat_acd(make_stat_storage(\"a.c.d\"));\n  size_t table_size_4 = table_.numSymbols();\n\n  StatNameStorage stat_ace(make_stat_storage(\"a.c.e\"));\n  size_t table_size_5 = table_.numSymbols();\n  EXPECT_GE(table_size_5, table_size_4);\n\n  stat_ace.free(table_);\n  EXPECT_EQ(table_size_4, table_.numSymbols());\n\n  stat_acd.free(table_);\n  EXPECT_EQ(table_size_3, table_.numSymbols());\n\n  stat_ac.free(table_);\n  EXPECT_EQ(table_size_2, table_.numSymbols());\n\n  stat_ab.free(table_);\n  EXPECT_EQ(table_size_1, table_.numSymbols());\n\n  stat_aa.free(table_);\n  EXPECT_EQ(table_size_1, table_.numSymbols());\n\n  stat_a.free(table_);\n  EXPECT_EQ(table_size_0, table_.numSymbols());\n}\n\n// In the tests above we use the StatNameStorage abstraction which is not the\n// most space-efficient strategy in all cases. To use memory more effectively\n// you may want to store bytes in a larger structure. For example, you might\n// want to allocate two different StatName objects in contiguous memory. The\n// safety-net here in terms of leaks is that SymbolTable will assert-fail if\n// you don't free all the StatNames you've allocated bytes for. StatNameList\n// provides this capability.\nTEST_F(StatNameTest, List) {\n  StatName names[] = {makeStat(\"hello.world\"), makeStat(\"goodbye.world\")};\n  StatNameList name_list;\n  EXPECT_FALSE(name_list.populated());\n  table_.populateList(names, ARRAY_SIZE(names), name_list);\n  EXPECT_TRUE(name_list.populated());\n\n  // First, decode only the first name.\n  name_list.iterate([this](StatName stat_name) -> bool {\n    EXPECT_EQ(\"hello.world\", table_.toString(stat_name));\n    return false;\n  });\n\n  // Decode all the names.\n  std::vector<std::string> decoded_strings;\n  name_list.iterate([this, &decoded_strings](StatName stat_name) -> bool {\n    decoded_strings.push_back(table_.toString(stat_name));\n    return true;\n  });\n  ASSERT_EQ(2, decoded_strings.size());\n  EXPECT_EQ(\"hello.world\", decoded_strings[0]);\n  EXPECT_EQ(\"goodbye.world\", decoded_strings[1]);\n  name_list.clear(table_);\n  EXPECT_FALSE(name_list.populated());\n}\n\nTEST_F(StatNameTest, HashTable) {\n  StatName ac = makeStat(\"a.c\");\n  StatName ab = makeStat(\"a.b\");\n  StatName de = makeStat(\"d.e\");\n  StatName da = makeStat(\"d.a\");\n\n  StatNameHashMap<int> name_int_map;\n  name_int_map[ac] = 1;\n  name_int_map[ab] = 0;\n  name_int_map[de] = 3;\n  name_int_map[da] = 2;\n\n  EXPECT_EQ(0, name_int_map[ab]);\n  EXPECT_EQ(1, name_int_map[ac]);\n  EXPECT_EQ(2, name_int_map[da]);\n  EXPECT_EQ(3, name_int_map[de]);\n}\n\nTEST_F(StatNameTest, Sort) {\n  StatNameVec names{makeStat(\"a.c\"),   makeStat(\"a.b\"), makeStat(\"d.e\"),\n                    makeStat(\"d.a.a\"), makeStat(\"d.a\"), makeStat(\"a.c\")};\n  const StatNameVec sorted_names{makeStat(\"a.b\"), makeStat(\"a.c\"),   makeStat(\"a.c\"),\n                                 makeStat(\"d.a\"), makeStat(\"d.a.a\"), makeStat(\"d.e\")};\n  EXPECT_NE(names, sorted_names);\n  std::sort(names.begin(), names.end(), StatNameLessThan(table_));\n  EXPECT_EQ(names, sorted_names);\n}\n\nTEST_F(StatNameTest, Concat2) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"a.b\"), makeStat(\"c.d\")});\n  EXPECT_EQ(\"a.b.c.d\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, ConcatFirstEmpty) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"\"), makeStat(\"c.d\")});\n  EXPECT_EQ(\"c.d\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, ConcatSecondEmpty) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"a.b\"), makeStat(\"\")});\n  EXPECT_EQ(\"a.b\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, ConcatAllEmpty) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"\"), makeStat(\"\")});\n  EXPECT_EQ(\"\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, Join3) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"a.b\"), makeStat(\"c.d\"), makeStat(\"e.f\")});\n  EXPECT_EQ(\"a.b.c.d.e.f\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, Join3FirstEmpty) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"\"), makeStat(\"c.d\"), makeStat(\"e.f\")});\n  EXPECT_EQ(\"c.d.e.f\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, Join3SecondEmpty) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"a.b\"), makeStat(\"\"), makeStat(\"e.f\")});\n  EXPECT_EQ(\"a.b.e.f\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, Join3ThirdEmpty) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"a.b\"), makeStat(\"c.d\"), makeStat(\"\")});\n  EXPECT_EQ(\"a.b.c.d\", table_.toString(StatName(joined.get())));\n}\n\nTEST_F(StatNameTest, JoinAllEmpty) {\n  SymbolTable::StoragePtr joined = table_.join({makeStat(\"\"), makeStat(\"\"), makeStat(\"\")});\n  EXPECT_EQ(\"\", table_.toString(StatName(joined.get())));\n}\n\n// Validates that we don't get tsan or other errors when concurrently creating\n// a large number of stats.\nTEST_F(StatNameTest, RacingSymbolCreation) {\n  Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest();\n  MutexTracerImpl& mutex_tracer = MutexTracerImpl::getOrCreateTracer();\n\n  // Make 100 threads, each of which will race to encode an overlapping set of\n  // symbols, triggering corner-cases in SymbolTable::toSymbol.\n  constexpr int num_threads = 100;\n  std::vector<Thread::ThreadPtr> threads;\n  threads.reserve(num_threads);\n  ConditionalInitializer creation, access, wait;\n  absl::BlockingCounter creates(num_threads), accesses(num_threads);\n  for (int i = 0; i < num_threads; ++i) {\n    threads.push_back(\n        thread_factory.createThread([this, i, &creation, &access, &wait, &creates, &accesses]() {\n          // Rotate between 20 different symbols to try to get some\n          // contention. Based on a logging print statement in\n          // SymbolTable::toSymbol(), this appears to trigger creation-races,\n          // even when compiled with optimization.\n          std::string stat_name_string = absl::StrCat(\"symbol\", i % 20);\n\n          // Block each thread on waking up a common condition variable,\n          // so we make it likely to race on creation.\n          creation.wait();\n          StatNameManagedStorage initial(stat_name_string, table_);\n          creates.DecrementCount();\n\n          access.wait();\n          StatNameManagedStorage second(stat_name_string, table_);\n          accesses.DecrementCount();\n\n          wait.wait();\n        }));\n  }\n  creation.setReady();\n  creates.Wait();\n\n  int64_t create_contentions = mutex_tracer.numContentions();\n  ENVOY_LOG_MISC(info, \"Number of contentions: {}\", create_contentions);\n\n  access.setReady();\n  accesses.Wait();\n\n  // In a perfect world, we could use reader-locks in the SymbolTable\n  // implementation, and there should be zero additional contentions\n  // after latching 'create_contentions' above. And we can definitely\n  // have this world, but this slows down BM_CreateRace in\n  // symbol_table_speed_test.cc, even on a 72-core machine.\n  //\n  // Thus it is better to avoid symbol-table contention by refactoring\n  // all stat-creation code to symbolize all stat string elements at\n  // construction, as composition does not require a lock.\n  //\n  // See this commit\n  // https://github.com/envoyproxy/envoy/pull/5321/commits/ef712d0f5a11ff49831c1935e8a2ef8a0a935bc9\n  // for a working reader-lock implementation, which would pass this EXPECT:\n  //     EXPECT_EQ(create_contentions, mutex_tracer.numContentions());\n  //\n  // Note also that we cannot guarantee there *will* be contentions\n  // as a machine or OS is free to run all threads serially.\n\n  wait.setReady();\n  for (auto& thread : threads) {\n    thread->join();\n  }\n}\n\nTEST_F(StatNameTest, MutexContentionOnExistingSymbols) {\n  Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest();\n  MutexTracerImpl& mutex_tracer = MutexTracerImpl::getOrCreateTracer();\n\n  // Make 100 threads, each of which will race to encode an overlapping set of\n  // symbols, triggering corner-cases in SymbolTable::toSymbol.\n  constexpr int num_threads = 100;\n  std::vector<Thread::ThreadPtr> threads;\n  threads.reserve(num_threads);\n  ConditionalInitializer creation, access, wait;\n  absl::BlockingCounter creates(num_threads), accesses(num_threads);\n  for (int i = 0; i < num_threads; ++i) {\n    threads.push_back(\n        thread_factory.createThread([this, i, &creation, &access, &wait, &creates, &accesses]() {\n          // Rotate between 20 different symbols to try to get some\n          // contention. Based on a logging print statement in\n          // SymbolTable::toSymbol(), this appears to trigger creation-races,\n          // even when compiled with optimization.\n          std::string stat_name_string = absl::StrCat(\"symbol\", i % 20);\n\n          // Block each thread on waking up a common condition variable,\n          // so we make it likely to race on creation.\n          creation.wait();\n          StatNameManagedStorage initial(stat_name_string, table_);\n          creates.DecrementCount();\n\n          access.wait();\n          StatNameManagedStorage second(stat_name_string, table_);\n          accesses.DecrementCount();\n\n          wait.wait();\n        }));\n  }\n  creation.setReady();\n  creates.Wait();\n\n  int64_t create_contentions = mutex_tracer.numContentions();\n  ENVOY_LOG_MISC(info, \"Number of contentions: {}\", create_contentions);\n\n  // But when we access the already-existing symbols, we guarantee that no\n  // further mutex contentions occur.\n  access.setReady();\n  accesses.Wait();\n\n  // In a perfect world, we could use reader-locks in the SymbolTable\n  // implementation, and there should be zero additional contentions\n  // after latching 'create_contentions' above. And we can definitely\n  // have this world, but this slows down BM_CreateRace in\n  // symbol_table_speed_test.cc, even on a 72-core machine.\n  //\n  // Thus it is better to avoid symbol-table contention by refactoring\n  // all stat-creation code to symbolize all stat string elements at\n  // construction, as composition does not require a lock.\n  //\n  // See this commit\n  // https://github.com/envoyproxy/envoy/pull/5321/commits/ef712d0f5a11ff49831c1935e8a2ef8a0a935bc9\n  // for a working reader-lock implementation, which would pass this EXPECT:\n  //     EXPECT_EQ(create_contentions, mutex_tracer.numContentions());\n  //\n  // Note also that we cannot guarantee there *will* be contentions\n  // as a machine or OS is free to run all threads serially.\n\n  wait.setReady();\n  for (auto& thread : threads) {\n    thread->join();\n  }\n}\n\nTEST_F(StatNameTest, SharedStatNameStorageSetInsertAndFind) {\n  StatNameStorageSet set;\n  const int iters = 10;\n  for (int i = 0; i < iters; ++i) {\n    std::string foo = absl::StrCat(\"foo\", i);\n    auto insertion = set.insert(StatNameStorage(foo, table_));\n    StatNameManagedStorage temp_foo(foo, table_);\n    auto found = set.find(temp_foo.statName());\n    EXPECT_EQ(found->statName().data(), insertion.first->statName().data());\n  }\n  StatNameManagedStorage bar(\"bar\", table_);\n  EXPECT_EQ(set.end(), set.find(bar.statName()));\n  EXPECT_EQ(iters, set.size());\n  set.free(table_);\n}\n\nTEST_F(StatNameTest, StatNameSet) {\n  StatNameSetPtr set(table_.makeSet(\"set\"));\n\n  // Test that we get a consistent StatName object from a remembered name.\n  set->rememberBuiltin(\"remembered\");\n  const StatName fallback = set->add(\"fallback\");\n  const Stats::StatName remembered = set->getBuiltin(\"remembered\", fallback);\n  EXPECT_EQ(\"remembered\", table_.toString(remembered));\n  EXPECT_EQ(remembered.data(), set->getBuiltin(\"remembered\", fallback).data());\n  EXPECT_EQ(fallback.data(), set->getBuiltin(\"not_remembered\", fallback).data());\n}\n\nTEST_F(StatNameTest, StorageCopy) {\n  StatName a = pool_.add(\"stat.name\");\n  StatNameStorage b_storage(a, table_);\n  StatName b = b_storage.statName();\n  EXPECT_EQ(a, b);\n  EXPECT_NE(a.data(), b.data());\n  b_storage.free(table_);\n}\n\nTEST_F(StatNameTest, RecentLookups) {\n  StatNameSetPtr set1(table_.makeSet(\"set1\"));\n  table_.setRecentLookupCapacity(10);\n  StatNameSetPtr set2(table_.makeSet(\"set2\"));\n  StatNameDynamicPool d1(table_);\n  d1.add(\"dynamic.stat1\");\n  StatNameDynamicPool d2(table_);\n  d2.add(\"dynamic.stat2\");\n  encodeDecode(\"direct.stat\");\n\n  std::vector<std::string> accum;\n  uint64_t total = table_.getRecentLookups([&accum](absl::string_view name, uint64_t count) {\n    accum.emplace_back(absl::StrCat(count, \": \", name));\n  });\n  EXPECT_EQ(1, total); // Dynamic pool adds don't count as recent lookups.\n  std::string recent_lookups_str = absl::StrJoin(accum, \" \");\n\n  EXPECT_EQ(\"1: direct.stat\", recent_lookups_str); // No dynamic-pool lookups take locks.\n\n  table_.clearRecentLookups();\n  uint32_t num_calls = 0;\n  EXPECT_EQ(0, table_.getRecentLookups([&num_calls](absl::string_view, uint64_t) { ++num_calls; }));\n  EXPECT_EQ(0, num_calls);\n}\n\nTEST_F(StatNameTest, StatNameEmptyEquivalent) {\n  StatName empty1;\n  StatName empty2 = makeStat(\"\");\n  StatName non_empty = makeStat(\"a\");\n  EXPECT_EQ(empty1, empty2);\n  EXPECT_EQ(empty1.hash(), empty2.hash());\n  EXPECT_NE(empty1, non_empty);\n  EXPECT_NE(empty2, non_empty);\n  EXPECT_NE(empty1.hash(), non_empty.hash());\n  EXPECT_NE(empty2.hash(), non_empty.hash());\n}\n\nTEST_F(StatNameTest, SupportsAbslHash) {\n  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({\n      StatName(),\n      makeStat(\"\"),\n      makeStat(\"hello.world\"),\n  }));\n}\n\n// Tests the memory savings realized from using symbol tables with 1k\n// clusters. This test shows the memory drops from almost 8M to less than\n// 2M. Note that only SymbolTableImpl is tested for memory consumption,\n// and not FakeSymbolTableImpl.\nTEST(SymbolTableTest, Memory) {\n  // Tests a stat-name allocation strategy.\n  auto test_memory_usage = [](std::function<void(absl::string_view)> fn) -> size_t {\n    TestUtil::MemoryTest memory_test;\n    TestUtil::forEachSampleStat(1000, fn);\n    return memory_test.consumedBytes();\n  };\n\n  size_t string_mem_used, symbol_table_mem_used;\n  {\n    std::vector<std::string> names;\n    auto record_stat = [&names](absl::string_view stat) { names.push_back(std::string(stat)); };\n    string_mem_used = test_memory_usage(record_stat);\n  }\n  {\n    SymbolTableImpl table;\n    std::vector<StatNameStorage> names;\n    auto record_stat = [&names, &table](absl::string_view stat) {\n      names.emplace_back(StatNameStorage(stat, table));\n    };\n    symbol_table_mem_used = test_memory_usage(record_stat);\n    for (StatNameStorage& name : names) {\n      name.free(table);\n    }\n  }\n\n  // Make sure we don't regress.\n  // Data as of 2019/05/29:\n  // symbol_table_mem_used:  1726056 (3.9x) -- does not seem to depend on STL sizes.\n  EXPECT_MEMORY_LE(symbol_table_mem_used, string_mem_used / 3);\n  EXPECT_MEMORY_EQ(symbol_table_mem_used, 1726056);\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/symbol_table_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n//\n// NOLINT(namespace-envoy)\n\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/utility.h\"\n\n#include \"test/common/stats/make_elements_helper.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/blocking_counter.h\"\n#include \"benchmark/benchmark.h\"\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_CreateRace(benchmark::State& state) {\n  Envoy::Thread::ThreadFactory& thread_factory = Envoy::Thread::threadFactoryForTest();\n\n  // Make 100 threads, each of which will race to encode an overlapping set of\n  // symbols, triggering corner-cases in SymbolTable::toSymbol.\n  constexpr int num_threads = 36;\n  std::vector<Envoy::Thread::ThreadPtr> threads;\n  threads.reserve(num_threads);\n  Envoy::ConditionalInitializer access, wait;\n  absl::BlockingCounter accesses(num_threads);\n  Envoy::Stats::SymbolTableImpl table;\n  const absl::string_view stat_name_string = \"here.is.a.stat.name\";\n  Envoy::Stats::StatNameStorage initial(stat_name_string, table);\n\n  for (int i = 0; i < num_threads; ++i) {\n    threads.push_back(\n        thread_factory.createThread([&access, &accesses, &state, &table, &stat_name_string]() {\n          // Block each thread on waking up a common condition variable,\n          // so we make it likely to race on access.\n          access.wait();\n\n          for (auto _ : state) {\n            Envoy::Stats::StatNameStorage second(stat_name_string, table);\n            second.free(table);\n          }\n          accesses.DecrementCount();\n        }));\n  }\n\n  // But when we access the already-existing symbols, we guarantee that no\n  // further mutex contentions occur.\n  access.setReady();\n  accesses.Wait();\n\n  for (auto& thread : threads) {\n    thread->join();\n  }\n\n  initial.free(table);\n}\nBENCHMARK(BM_CreateRace);\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_JoinStatNames(benchmark::State& state) {\n  Envoy::Stats::SymbolTableImpl symbol_table;\n  Envoy::Stats::IsolatedStoreImpl store(symbol_table);\n  Envoy::Stats::StatNamePool pool(symbol_table);\n  Envoy::Stats::StatName a = pool.add(\"a\");\n  Envoy::Stats::StatName b = pool.add(\"b\");\n  Envoy::Stats::StatName c = pool.add(\"c\");\n  Envoy::Stats::StatName d = pool.add(\"d\");\n  Envoy::Stats::StatName e = pool.add(\"e\");\n  for (auto _ : state) {\n    Envoy::Stats::Utility::counterFromStatNames(store, Envoy::Stats::makeStatNames(a, b, c, d, e));\n  }\n}\nBENCHMARK(BM_JoinStatNames);\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic void BM_JoinElements(benchmark::State& state) {\n  Envoy::Stats::SymbolTableImpl symbol_table;\n  Envoy::Stats::IsolatedStoreImpl store(symbol_table);\n  Envoy::Stats::StatNamePool pool(symbol_table);\n  Envoy::Stats::StatName a = pool.add(\"a\");\n  Envoy::Stats::StatName b = pool.add(\"b\");\n  Envoy::Stats::StatName c = pool.add(\"c\");\n  Envoy::Stats::StatName e = pool.add(\"e\");\n  for (auto _ : state) {\n    Envoy::Stats::Utility::counterFromElements(\n        store, Envoy::Stats::makeElements(a, b, c, Envoy::Stats::DynamicName(\"d\"), e));\n  }\n}\nBENCHMARK(BM_JoinElements);\n\nint main(int argc, char** argv) {\n  Envoy::Thread::MutexBasicLockable lock;\n  Envoy::Logger::Context logger_context(spdlog::level::warn,\n                                        Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false);\n  benchmark::Initialize(&argc, argv);\n\n  if (benchmark::ReportUnrecognizedArguments(argc, argv)) {\n    return 1;\n  }\n  benchmark::RunSpecifiedBenchmarks();\n}\n"
  },
  {
    "path": "test/common/stats/tag_extractor_impl_test.cc",
    "content": "#include <string>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/config/well_known_names.h\"\n#include \"common/stats/tag_extractor_impl.h\"\n#include \"common/stats/tag_producer_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nTEST(TagExtractorTest, TwoSubexpressions) {\n  TagExtractorImpl tag_extractor(\"cluster_name\", \"^cluster\\\\.((.+?)\\\\.)\");\n  EXPECT_EQ(\"cluster_name\", tag_extractor.name());\n  std::string name = \"cluster.test_cluster.upstream_cx_total\";\n  TagVector tags;\n  IntervalSetImpl<size_t> remove_characters;\n  ASSERT_TRUE(tag_extractor.extractTag(name, tags, remove_characters));\n  std::string tag_extracted_name = StringUtil::removeCharacters(name, remove_characters);\n  EXPECT_EQ(\"cluster.upstream_cx_total\", tag_extracted_name);\n  ASSERT_EQ(1, tags.size());\n  EXPECT_EQ(\"test_cluster\", tags.at(0).value_);\n  EXPECT_EQ(\"cluster_name\", tags.at(0).name_);\n}\n\nTEST(TagExtractorTest, SingleSubexpression) {\n  TagExtractorImpl tag_extractor(\"listner_port\", \"^listener\\\\.(\\\\d+?\\\\.)\");\n  std::string name = \"listener.80.downstream_cx_total\";\n  TagVector tags;\n  IntervalSetImpl<size_t> remove_characters;\n  ASSERT_TRUE(tag_extractor.extractTag(name, tags, remove_characters));\n  std::string tag_extracted_name = StringUtil::removeCharacters(name, remove_characters);\n  EXPECT_EQ(\"listener.downstream_cx_total\", tag_extracted_name);\n  ASSERT_EQ(1, tags.size());\n  EXPECT_EQ(\"80.\", tags.at(0).value_);\n  EXPECT_EQ(\"listner_port\", tags.at(0).name_);\n}\n\nTEST(TagExtractorTest, substrMismatch) {\n  TagExtractorImpl tag_extractor(\"listner_port\", \"^listener\\\\.(\\\\d+?\\\\.)\\\\.foo\\\\.\", \".foo.\");\n  EXPECT_TRUE(tag_extractor.substrMismatch(\"listener.80.downstream_cx_total\"));\n  EXPECT_FALSE(tag_extractor.substrMismatch(\"listener.80.downstream_cx_total.foo.bar\"));\n}\n\nTEST(TagExtractorTest, noSubstrMismatch) {\n  TagExtractorImpl tag_extractor(\"listner_port\", \"^listener\\\\.(\\\\d+?\\\\.)\\\\.foo\\\\.\");\n  EXPECT_FALSE(tag_extractor.substrMismatch(\"listener.80.downstream_cx_total\"));\n  EXPECT_FALSE(tag_extractor.substrMismatch(\"listener.80.downstream_cx_total.foo.bar\"));\n}\n\nTEST(TagExtractorTest, EmptyName) {\n  EXPECT_THROW_WITH_MESSAGE(TagExtractorImpl::createTagExtractor(\"\", \"^listener\\\\.(\\\\d+?\\\\.)\"),\n                            EnvoyException, \"tag_name cannot be empty\");\n}\n\nTEST(TagExtractorTest, BadRegex) {\n  EXPECT_THROW_WITH_REGEX(TagExtractorImpl::createTagExtractor(\"cluster_name\", \"+invalid\"),\n                          EnvoyException, \"Invalid regex '\\\\+invalid':\");\n}\n\nclass DefaultTagRegexTester {\npublic:\n  DefaultTagRegexTester() : tag_extractors_(envoy::config::metrics::v3::StatsConfig()) {}\n\n  void testRegex(const std::string& stat_name, const std::string& expected_tag_extracted_name,\n                 const TagVector& expected_tags) {\n\n    // Test forward iteration through the regexes\n    TagVector tags;\n    const std::string tag_extracted_name = tag_extractors_.produceTags(stat_name, tags);\n\n    auto cmp = [](const Tag& lhs, const Tag& rhs) {\n      return lhs.name_ == rhs.name_ && lhs.value_ == rhs.value_;\n    };\n\n    EXPECT_EQ(expected_tag_extracted_name, tag_extracted_name);\n    ASSERT_EQ(expected_tags.size(), tags.size())\n        << fmt::format(\"Stat name '{}' did not produce the expected number of tags\", stat_name);\n    EXPECT_TRUE(std::is_permutation(expected_tags.begin(), expected_tags.end(), tags.begin(), cmp))\n        << fmt::format(\"Stat name '{}' did not produce the expected tags\", stat_name);\n\n    // Reverse iteration through regexes to ensure ordering invariance\n    TagVector rev_tags;\n    const std::string rev_tag_extracted_name = produceTagsReverse(stat_name, rev_tags);\n\n    EXPECT_EQ(expected_tag_extracted_name, rev_tag_extracted_name);\n    ASSERT_EQ(expected_tags.size(), rev_tags.size())\n        << fmt::format(\"Stat name '{}' did not produce the expected number of tags when regexes \"\n                       \"were run in reverse order\",\n                       stat_name);\n    EXPECT_TRUE(\n        std::is_permutation(expected_tags.begin(), expected_tags.end(), rev_tags.begin(), cmp))\n        << fmt::format(\"Stat name '{}' did not produce the expected tags when regexes were run in \"\n                       \"reverse order\",\n                       stat_name);\n  }\n\n  /**\n   * Reimplements TagProducerImpl::produceTags, but extracts the tags in reverse order.\n   * This helps demonstrate that the order of extractors does not matter to the end result,\n   * assuming we don't care about tag-order. This is in large part correct by design because\n   * stat_name is not mutated until all the extraction is done.\n   * @param metric_name std::string a name of Stats::Metric (Counter, Gauge, Histogram).\n   * @param tags TagVector& a set of Stats::Tag.\n   * @return std::string the metric_name with tags removed.\n   */\n  std::string produceTagsReverse(const std::string& metric_name, TagVector& tags) const {\n    // TODO(jmarantz): Skip the creation of string-based tags, creating a StatNameTagVector instead.\n\n    // Note: one discrepancy between this and TagProducerImpl::produceTags is that this\n    // version does not add in tag_extractors_.default_tags_ into tags. That doesn't matter\n    // for this test, however.\n    std::list<const TagExtractor*> extractors; // Note push-front is used to reverse order.\n    tag_extractors_.forEachExtractorMatching(metric_name,\n                                             [&extractors](const TagExtractorPtr& tag_extractor) {\n                                               extractors.push_front(tag_extractor.get());\n                                             });\n\n    IntervalSetImpl<size_t> remove_characters;\n    for (const TagExtractor* tag_extractor : extractors) {\n      tag_extractor->extractTag(metric_name, tags, remove_characters);\n    }\n    return StringUtil::removeCharacters(metric_name, remove_characters);\n  }\n\n  TagProducerImpl tag_extractors_;\n};\n\nTEST(TagExtractorTest, DefaultTagExtractors) {\n  const auto& tag_names = Config::TagNames::get();\n\n  // General cluster name\n  DefaultTagRegexTester regex_tester;\n\n  // Cluster name\n  Tag cluster_tag;\n  cluster_tag.name_ = tag_names.CLUSTER_NAME;\n  cluster_tag.value_ = \"ratelimit\";\n\n  regex_tester.testRegex(\"cluster.ratelimit.upstream_rq_timeout\", \"cluster.upstream_rq_timeout\",\n                         {cluster_tag});\n\n  // Listener SSL\n  Tag listener_address;\n  listener_address.name_ = tag_names.LISTENER_ADDRESS;\n\n  // ipv6 loopback address\n  listener_address.value_ = \"[__1]_0\";\n\n  // Cipher\n  Tag cipher_name;\n  cipher_name.name_ = tag_names.SSL_CIPHER;\n  cipher_name.value_ = \"AES256-SHA\";\n\n  regex_tester.testRegex(\"listener.[__1]_0.ssl.cipher.AES256-SHA\", \"listener.ssl.cipher\",\n                         {listener_address, cipher_name});\n\n  // Cipher suite\n  Tag cipher_suite;\n  cipher_suite.name_ = tag_names.SSL_CIPHER_SUITE;\n  cipher_suite.value_ = \"ECDHE-RSA-AES128-GCM-SHA256\";\n\n  regex_tester.testRegex(\"cluster.ratelimit.ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256\",\n                         \"cluster.ssl.ciphers\", {cluster_tag, cipher_suite});\n\n  // ipv6 non-loopback (for alphabetical chars)\n  listener_address.value_ = \"[2001_0db8_85a3_0000_0000_8a2e_0370_7334]_3543\";\n  regex_tester.testRegex(\n      \"listener.[2001_0db8_85a3_0000_0000_8a2e_0370_7334]_3543.ssl.cipher.AES256-SHA\",\n      \"listener.ssl.cipher\", {listener_address, cipher_name});\n\n  // ipv4 address\n  listener_address.value_ = \"127.0.0.1_0\";\n  regex_tester.testRegex(\"listener.127.0.0.1_0.ssl.cipher.AES256-SHA\", \"listener.ssl.cipher\",\n                         {listener_address, cipher_name});\n\n  // Mongo\n  Tag mongo_prefix;\n  mongo_prefix.name_ = tag_names.MONGO_PREFIX;\n  mongo_prefix.value_ = \"mongo_filter\";\n\n  Tag mongo_command;\n  mongo_command.name_ = tag_names.MONGO_CMD;\n  mongo_command.value_ = \"foo_cmd\";\n\n  Tag mongo_collection;\n  mongo_collection.name_ = tag_names.MONGO_COLLECTION;\n  mongo_collection.value_ = \"bar_collection\";\n\n  Tag mongo_callsite;\n  mongo_callsite.name_ = tag_names.MONGO_CALLSITE;\n  mongo_callsite.value_ = \"baz_callsite\";\n\n  regex_tester.testRegex(\"mongo.mongo_filter.op_reply\", \"mongo.op_reply\", {mongo_prefix});\n  regex_tester.testRegex(\"mongo.mongo_filter.cmd.foo_cmd.reply_size\", \"mongo.cmd.reply_size\",\n                         {mongo_prefix, mongo_command});\n  regex_tester.testRegex(\"mongo.mongo_filter.collection.bar_collection.query.multi_get\",\n                         \"mongo.collection.query.multi_get\", {mongo_prefix, mongo_collection});\n  regex_tester.testRegex(\n      \"mongo.mongo_filter.collection.bar_collection.callsite.baz_callsite.query.scatter_get\",\n      \"mongo.collection.callsite.query.scatter_get\",\n      {mongo_prefix, mongo_collection, mongo_callsite});\n\n  // Ratelimit\n  Tag ratelimit_prefix;\n  ratelimit_prefix.name_ = tag_names.RATELIMIT_PREFIX;\n  ratelimit_prefix.value_ = \"foo_ratelimiter\";\n  regex_tester.testRegex(\"ratelimit.foo_ratelimiter.over_limit\", \"ratelimit.over_limit\",\n                         {ratelimit_prefix});\n\n  // Dynamo\n  Tag dynamo_http_prefix;\n  dynamo_http_prefix.name_ = tag_names.HTTP_CONN_MANAGER_PREFIX;\n  dynamo_http_prefix.value_ = \"egress_dynamodb_iad\";\n\n  Tag dynamo_operation;\n  dynamo_operation.name_ = tag_names.DYNAMO_OPERATION;\n  dynamo_operation.value_ = \"Query\";\n\n  Tag dynamo_table;\n  dynamo_table.name_ = tag_names.DYNAMO_TABLE;\n  dynamo_table.value_ = \"bar_table\";\n\n  Tag dynamo_partition;\n  dynamo_partition.name_ = tag_names.DYNAMO_PARTITION_ID;\n  dynamo_partition.value_ = \"ABC1234\";\n\n  regex_tester.testRegex(\"http.egress_dynamodb_iad.downstream_cx_total\", \"http.downstream_cx_total\",\n                         {dynamo_http_prefix});\n  regex_tester.testRegex(\"http.egress_dynamodb_iad.dynamodb.operation.Query.upstream_rq_time\",\n                         \"http.dynamodb.operation.upstream_rq_time\",\n                         {dynamo_http_prefix, dynamo_operation});\n  regex_tester.testRegex(\"http.egress_dynamodb_iad.dynamodb.table.bar_table.upstream_rq_time\",\n                         \"http.dynamodb.table.upstream_rq_time\",\n                         {dynamo_http_prefix, dynamo_table});\n  regex_tester.testRegex(\n      \"http.egress_dynamodb_iad.dynamodb.table.bar_table.capacity.Query.__partition_id=ABC1234\",\n      \"http.dynamodb.table.capacity\",\n      {dynamo_http_prefix, dynamo_table, dynamo_operation, dynamo_partition});\n\n  // GRPC Http1.1 Bridge\n  Tag grpc_cluster;\n  grpc_cluster.name_ = tag_names.CLUSTER_NAME;\n  grpc_cluster.value_ = \"grpc_cluster\";\n\n  Tag grpc_service;\n  grpc_service.name_ = tag_names.GRPC_BRIDGE_SERVICE;\n  grpc_service.value_ = \"grpc_service_1\";\n\n  Tag grpc_method;\n  grpc_method.name_ = tag_names.GRPC_BRIDGE_METHOD;\n  grpc_method.value_ = \"grpc_method_1\";\n\n  regex_tester.testRegex(\"cluster.grpc_cluster.grpc.grpc_service_1.grpc_method_1.success\",\n                         \"cluster.grpc.success\", {grpc_cluster, grpc_method, grpc_service});\n\n  // Virtual host and cluster\n  Tag vhost;\n  vhost.name_ = tag_names.VIRTUAL_HOST;\n  vhost.value_ = \"vhost_1\";\n\n  Tag vcluster;\n  vcluster.name_ = tag_names.VIRTUAL_CLUSTER;\n  vcluster.value_ = \"vcluster_1\";\n\n  Tag response_code_class;\n  response_code_class.name_ = tag_names.RESPONSE_CODE_CLASS;\n  response_code_class.value_ = \"2\";\n\n  Tag response_code;\n  response_code.name_ = tag_names.RESPONSE_CODE;\n  response_code.value_ = \"200\";\n\n  regex_tester.testRegex(\"vhost.vhost_1.vcluster.vcluster_1.upstream_rq_2xx\",\n                         \"vhost.vcluster.upstream_rq_xx\", {vhost, vcluster, response_code_class});\n  regex_tester.testRegex(\"vhost.vhost_1.vcluster.vcluster_1.upstream_rq_200\",\n                         \"vhost.vcluster.upstream_rq\", {vhost, vcluster, response_code});\n\n  // Listener http prefix\n  Tag listener_http_prefix;\n  listener_http_prefix.name_ = tag_names.HTTP_CONN_MANAGER_PREFIX;\n  listener_http_prefix.value_ = \"http_prefix\";\n\n  listener_address.value_ = \"127.0.0.1_3012\";\n  response_code_class.value_ = \"5\";\n\n  regex_tester.testRegex(\"listener.127.0.0.1_3012.http.http_prefix.downstream_rq_5xx\",\n                         \"listener.http.downstream_rq_xx\",\n                         {listener_http_prefix, listener_address, response_code_class});\n\n  // User agent\n  Tag user_agent;\n  user_agent.name_ = tag_names.HTTP_USER_AGENT;\n  user_agent.value_ = \"ios\";\n\n  regex_tester.testRegex(\"http.egress_dynamodb_iad.user_agent.ios.downstream_cx_total\",\n                         \"http.user_agent.downstream_cx_total\", {user_agent, dynamo_http_prefix});\n\n  // Client SSL Prefix\n  Tag client_ssl;\n  client_ssl.name_ = tag_names.CLIENTSSL_PREFIX;\n  client_ssl.value_ = \"clientssl_prefix\";\n\n  regex_tester.testRegex(\"auth.clientssl.clientssl_prefix.auth_ip_allowlist\",\n                         \"auth.clientssl.auth_ip_allowlist\", {client_ssl});\n\n  // TCP Prefix\n  Tag tcp_prefix;\n  tcp_prefix.name_ = tag_names.TCP_PREFIX;\n  tcp_prefix.value_ = \"tcp_prefix\";\n\n  regex_tester.testRegex(\"tcp.tcp_prefix.downstream_flow_control_resumed_reading_total\",\n                         \"tcp.downstream_flow_control_resumed_reading_total\", {tcp_prefix});\n\n  // UDP Prefix\n  Tag udp_prefix;\n  udp_prefix.name_ = tag_names.UDP_PREFIX;\n  udp_prefix.value_ = \"udp_prefix\";\n\n  regex_tester.testRegex(\"udp.udp_prefix.downstream_flow_control_resumed_reading_total\",\n                         \"udp.downstream_flow_control_resumed_reading_total\", {udp_prefix});\n\n  // Fault Downstream Cluster\n  Tag fault_connection_manager;\n  fault_connection_manager.name_ = tag_names.HTTP_CONN_MANAGER_PREFIX;\n  fault_connection_manager.value_ = \"fault_connection_manager\";\n\n  Tag fault_downstream_cluster;\n  fault_downstream_cluster.name_ = tag_names.FAULT_DOWNSTREAM_CLUSTER;\n  fault_downstream_cluster.value_ = \"fault_cluster\";\n\n  regex_tester.testRegex(\"http.fault_connection_manager.fault.fault_cluster.aborts_injected\",\n                         \"http.fault.aborts_injected\",\n                         {fault_connection_manager, fault_downstream_cluster});\n\n  Tag rds_hcm;\n  rds_hcm.name_ = tag_names.HTTP_CONN_MANAGER_PREFIX;\n  rds_hcm.value_ = \"rds_connection_manager\";\n\n  Tag rds_route_config;\n  rds_route_config.name_ = tag_names.RDS_ROUTE_CONFIG;\n  rds_route_config.value_ = \"route_config.123\";\n\n  regex_tester.testRegex(\"http.rds_connection_manager.rds.route_config.123.update_success\",\n                         \"http.rds.update_success\", {rds_hcm, rds_route_config});\n\n  // Listener manager worker id\n  Tag worker_id;\n  worker_id.name_ = tag_names.WORKER_ID;\n  worker_id.value_ = \"worker_123\";\n\n  regex_tester.testRegex(\"listener_manager.worker_123.dispatcher.loop_duration_us\",\n                         \"listener_manager.dispatcher.loop_duration_us\", {worker_id});\n}\n\nTEST(TagExtractorTest, ExtractRegexPrefix) {\n  TagExtractorPtr tag_extractor; // Keep tag_extractor in this scope to prolong prefix lifetime.\n  auto extractRegexPrefix = [&tag_extractor](const std::string& regex) -> absl::string_view {\n    tag_extractor = TagExtractorImpl::createTagExtractor(\"foo\", regex);\n    return tag_extractor->prefixToken();\n  };\n\n  EXPECT_EQ(\"\", extractRegexPrefix(\"^prefix(foo).\"));\n  EXPECT_EQ(\"prefix\", extractRegexPrefix(\"^prefix\\\\.foo\"));\n  EXPECT_EQ(\"prefix_optional\", extractRegexPrefix(\"^prefix_optional(?=\\\\.)\"));\n  EXPECT_EQ(\"\", extractRegexPrefix(\"^notACompleteToken\"));   //\n  EXPECT_EQ(\"onlyToken\", extractRegexPrefix(\"^onlyToken$\")); //\n  EXPECT_EQ(\"\", extractRegexPrefix(\"(prefix)\"));\n  EXPECT_EQ(\"\", extractRegexPrefix(\"^(prefix)\"));\n  EXPECT_EQ(\"\", extractRegexPrefix(\"prefix(foo)\"));\n}\n\nTEST(TagExtractorTest, CreateTagExtractorNoRegex) {\n  EXPECT_THROW_WITH_REGEX(TagExtractorImpl::createTagExtractor(\"no such default tag\", \"\"),\n                          EnvoyException, \"^No regex specified for tag specifier and no default\");\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/tag_producer_impl_test.cc",
    "content": "#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/config/well_known_names.h\"\n#include \"common/stats/tag_producer_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nTEST(TagProducerTest, CheckConstructor) {\n  envoy::config::metrics::v3::StatsConfig stats_config;\n\n  // Should pass there were no tag name conflict.\n  auto& tag_specifier1 = *stats_config.mutable_stats_tags()->Add();\n  tag_specifier1.set_tag_name(\"test.x\");\n  tag_specifier1.set_fixed_value(\"xxx\");\n  EXPECT_NO_THROW(TagProducerImpl{stats_config});\n\n  // Should raise an error when duplicate tag names are specified.\n  auto& tag_specifier2 = *stats_config.mutable_stats_tags()->Add();\n  tag_specifier2.set_tag_name(\"test.x\");\n  tag_specifier2.set_fixed_value(\"yyy\");\n  EXPECT_THROW_WITH_MESSAGE(TagProducerImpl{stats_config}, EnvoyException,\n                            fmt::format(\"Tag name '{}' specified twice.\", \"test.x\"));\n\n  // Also should raise an error when user defined tag name conflicts with Envoy's default tag names.\n  stats_config.clear_stats_tags();\n  stats_config.mutable_use_all_default_tags()->set_value(true);\n  auto& custom_tag_extractor = *stats_config.mutable_stats_tags()->Add();\n  custom_tag_extractor.set_tag_name(Config::TagNames::get().CLUSTER_NAME);\n  EXPECT_THROW_WITH_MESSAGE(\n      TagProducerImpl{stats_config}, EnvoyException,\n      fmt::format(\"Tag name '{}' specified twice.\", Config::TagNames::get().CLUSTER_NAME));\n\n  // Non-default custom name without regex should throw\n  stats_config.mutable_use_all_default_tags()->set_value(true);\n  stats_config.clear_stats_tags();\n  custom_tag_extractor = *stats_config.mutable_stats_tags()->Add();\n  custom_tag_extractor.set_tag_name(\"test_extractor\");\n  EXPECT_THROW_WITH_MESSAGE(\n      TagProducerImpl{stats_config}, EnvoyException,\n      \"No regex specified for tag specifier and no default regex for name: 'test_extractor'\");\n\n  // Also empty regex should throw\n  stats_config.mutable_use_all_default_tags()->set_value(true);\n  stats_config.clear_stats_tags();\n  custom_tag_extractor = *stats_config.mutable_stats_tags()->Add();\n  custom_tag_extractor.set_tag_name(\"test_extractor\");\n  custom_tag_extractor.set_regex(\"\");\n  EXPECT_THROW_WITH_MESSAGE(\n      TagProducerImpl{stats_config}, EnvoyException,\n      \"No regex specified for tag specifier and no default regex for name: 'test_extractor'\");\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/thread_local_store_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/stats/allocator_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/tag_producer_impl.h\"\n#include \"common/stats/thread_local_store.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\n\nclass ThreadLocalStorePerf {\npublic:\n  ThreadLocalStorePerf()\n      : heap_alloc_(symbol_table_), store_(heap_alloc_),\n        api_(Api::createApiForTest(store_, time_system_)) {\n    store_.setTagProducer(std::make_unique<Stats::TagProducerImpl>(stats_config_));\n\n    Stats::TestUtil::forEachSampleStat(1000, [this](absl::string_view name) {\n      stat_names_.push_back(std::make_unique<Stats::StatNameStorage>(name, symbol_table_));\n    });\n  }\n\n  ~ThreadLocalStorePerf() {\n    for (auto& stat_name_storage : stat_names_) {\n      stat_name_storage->free(symbol_table_);\n    }\n    store_.shutdownThreading();\n    if (tls_) {\n      tls_->shutdownGlobalThreading();\n      tls_->shutdownThread();\n    }\n    if (dispatcher_) {\n      dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n    }\n  }\n\n  void accessCounters() {\n    for (auto& stat_name_storage : stat_names_) {\n      store_.counterFromStatName(stat_name_storage->statName());\n    }\n  }\n\n  void initThreading() {\n    if (!Envoy::Event::Libevent::Global::initialized()) {\n      Envoy::Event::Libevent::Global::initialize();\n    }\n    dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n    tls_ = std::make_unique<ThreadLocal::InstanceImpl>();\n    tls_->registerThread(*dispatcher_, true);\n    store_.initializeThreading(*dispatcher_, *tls_);\n  }\n\nprivate:\n  Stats::SymbolTableImpl symbol_table_;\n  Event::SimulatedTimeSystem time_system_;\n  Stats::AllocatorImpl heap_alloc_;\n  Event::DispatcherPtr dispatcher_;\n  ThreadLocal::InstanceImplPtr tls_;\n  Stats::ThreadLocalStoreImpl store_;\n  Api::ApiPtr api_;\n  envoy::config::metrics::v3::StatsConfig stats_config_;\n  std::vector<std::unique_ptr<Stats::StatNameStorage>> stat_names_;\n};\n\n} // namespace Envoy\n\n// Tests the single-threaded performance of the thread-local-store stats caches\n// without having initialized tls.\nstatic void BM_StatsNoTls(benchmark::State& state) {\n  Envoy::ThreadLocalStorePerf context;\n\n  for (auto _ : state) {\n    context.accessCounters();\n  }\n}\nBENCHMARK(BM_StatsNoTls);\n\n// Tests the single-threaded performance of the thread-local-store stats caches\n// with tls. Note that this test is still single-threaded, and so there's only\n// one replica of the tls cache.\nstatic void BM_StatsWithTls(benchmark::State& state) {\n  Envoy::ThreadLocalStorePerf context;\n  context.initThreading();\n\n  for (auto _ : state) {\n    context.accessCounters();\n  }\n}\nBENCHMARK(BM_StatsWithTls);\n\n// TODO(jmarantz): add multi-threaded variant of this test, that aggressively\n// looks up stats in multiple threads to try to trigger contention issues.\n"
  },
  {
    "path": "test/common/stats/thread_local_store_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/stats/histogram.h\"\n\n#include \"common/common/c_smart_ptr.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/memory/stats.h\"\n#include \"common/stats/stats_matcher_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/tag_producer_impl.h\"\n#include \"common/stats/thread_local_store.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_split.h\"\n#include \"absl/synchronization/blocking_counter.h\"\n#include \"absl/synchronization/notification.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::HasSubstr;\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Stats {\n\nconst uint64_t MaxStatNameLength = 127;\n\nclass ThreadLocalStoreTestingPeer {\npublic:\n  // Calculates the number of TLS histograms across all threads. This requires\n  // dispatching to all threads and blocking on their completion, and is exposed\n  // as a testing peer to enable tests that ensure that TLS histograms don't\n  // leak.\n  //\n  // Note that this must be called from the \"main thread\", which has different\n  // implications for unit tests that use real threads vs mocks. The easiest way\n  // to capture this in a general purpose helper is to use a callback to convey\n  // the resultant sum.\n  static void numTlsHistograms(ThreadLocalStoreImpl& thread_local_store_impl,\n                               const std::function<void(uint32_t)>& num_tls_hist_cb) {\n    auto num_tls_histograms = std::make_shared<std::atomic<uint32_t>>(0);\n    thread_local_store_impl.tls_->runOnAllThreads(\n        [num_tls_histograms](ThreadLocal::ThreadLocalObjectSharedPtr object)\n            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n          auto& tls_cache = object->asType<ThreadLocalStoreImpl::TlsCache>();\n          *num_tls_histograms += tls_cache.tls_histogram_cache_.size();\n          return object;\n        },\n        [num_tls_hist_cb, num_tls_histograms]() { num_tls_hist_cb(*num_tls_histograms); });\n  }\n};\n\nclass StatsThreadLocalStoreTest : public testing::Test {\npublic:\n  StatsThreadLocalStoreTest()\n      : alloc_(symbol_table_), store_(std::make_unique<ThreadLocalStoreImpl>(alloc_)) {\n    store_->addSink(sink_);\n  }\n\n  void resetStoreWithAlloc(Allocator& alloc) {\n    store_ = std::make_unique<ThreadLocalStoreImpl>(alloc);\n    store_->addSink(sink_);\n  }\n\n  uint32_t numTlsHistograms() {\n    uint32_t num_tls_histograms;\n    absl::Mutex mutex;\n    bool done = false;\n    ThreadLocalStoreTestingPeer::numTlsHistograms(\n        *store_, [&mutex, &done, &num_tls_histograms](uint32_t num) {\n          absl::MutexLock lock(&mutex);\n          num_tls_histograms = num;\n          done = true;\n        });\n    absl::MutexLock lock(&mutex);\n    mutex.Await(absl::Condition(&done));\n    return num_tls_histograms;\n  }\n\n  SymbolTableImpl symbol_table_;\n  NiceMock<Event::MockDispatcher> main_thread_dispatcher_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  AllocatorImpl alloc_;\n  MockSink sink_;\n  ThreadLocalStoreImplPtr store_;\n};\n\nclass HistogramWrapper {\npublic:\n  HistogramWrapper() : histogram_(hist_alloc()) {}\n\n  ~HistogramWrapper() { hist_free(histogram_); }\n\n  const histogram_t* getHistogram() { return histogram_; }\n\n  void setHistogramValues(const std::vector<uint64_t>& values) {\n    for (uint64_t value : values) {\n      hist_insert_intscale(histogram_, value, 0, 1);\n    }\n  }\n\nprivate:\n  histogram_t* histogram_;\n};\n\nclass HistogramTest : public testing::Test {\npublic:\n  using NameHistogramMap = std::map<std::string, ParentHistogramSharedPtr>;\n\n  HistogramTest() : alloc_(symbol_table_) {}\n\n  void SetUp() override {\n    store_ = std::make_unique<ThreadLocalStoreImpl>(alloc_);\n    store_->addSink(sink_);\n    store_->initializeThreading(main_thread_dispatcher_, tls_);\n  }\n\n  void TearDown() override {\n    store_->shutdownThreading();\n    tls_.shutdownThread();\n  }\n\n  NameHistogramMap makeHistogramMap(const std::vector<ParentHistogramSharedPtr>& hist_list) {\n    NameHistogramMap name_histogram_map;\n    for (const ParentHistogramSharedPtr& histogram : hist_list) {\n      // Exclude the scope part of the name.\n      const std::vector<std::string>& split_vector = absl::StrSplit(histogram->name(), '.');\n      name_histogram_map.insert(std::make_pair(split_vector.back(), histogram));\n    }\n    return name_histogram_map;\n  }\n\n  /**\n   * Validates that Histogram merge happens as desired and returns the processed histogram count\n   * that can be asserted later.\n   */\n  uint64_t validateMerge() {\n    bool merge_called = false;\n    store_->mergeHistograms([&merge_called]() -> void { merge_called = true; });\n\n    EXPECT_TRUE(merge_called);\n\n    std::vector<ParentHistogramSharedPtr> histogram_list = store_->histograms();\n\n    HistogramWrapper hist1_cumulative;\n    HistogramWrapper hist2_cumulative;\n    HistogramWrapper hist1_interval;\n    HistogramWrapper hist2_interval;\n\n    hist1_cumulative.setHistogramValues(h1_cumulative_values_);\n    hist2_cumulative.setHistogramValues(h2_cumulative_values_);\n    hist1_interval.setHistogramValues(h1_interval_values_);\n    hist2_interval.setHistogramValues(h2_interval_values_);\n\n    HistogramStatisticsImpl h1_cumulative_statistics(hist1_cumulative.getHistogram());\n    HistogramStatisticsImpl h2_cumulative_statistics(hist2_cumulative.getHistogram());\n    HistogramStatisticsImpl h1_interval_statistics(hist1_interval.getHistogram());\n    HistogramStatisticsImpl h2_interval_statistics(hist2_interval.getHistogram());\n\n    NameHistogramMap name_histogram_map = makeHistogramMap(histogram_list);\n    const ParentHistogramSharedPtr& h1 = name_histogram_map[\"h1\"];\n    EXPECT_EQ(h1->cumulativeStatistics().quantileSummary(),\n              h1_cumulative_statistics.quantileSummary());\n    EXPECT_EQ(h1->intervalStatistics().quantileSummary(), h1_interval_statistics.quantileSummary());\n    EXPECT_EQ(h1->cumulativeStatistics().bucketSummary(), h1_cumulative_statistics.bucketSummary());\n    EXPECT_EQ(h1->intervalStatistics().bucketSummary(), h1_interval_statistics.bucketSummary());\n\n    if (histogram_list.size() > 1) {\n      const ParentHistogramSharedPtr& h2 = name_histogram_map[\"h2\"];\n      EXPECT_EQ(h2->cumulativeStatistics().quantileSummary(),\n                h2_cumulative_statistics.quantileSummary());\n      EXPECT_EQ(h2->intervalStatistics().quantileSummary(),\n                h2_interval_statistics.quantileSummary());\n      EXPECT_EQ(h2->cumulativeStatistics().bucketSummary(),\n                h2_cumulative_statistics.bucketSummary());\n      EXPECT_EQ(h2->intervalStatistics().bucketSummary(), h2_interval_statistics.bucketSummary());\n    }\n\n    h1_interval_values_.clear();\n    h2_interval_values_.clear();\n\n    return histogram_list.size();\n  }\n\n  void expectCallAndAccumulate(Histogram& histogram, uint64_t record_value) {\n    EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), record_value));\n    histogram.recordValue(record_value);\n\n    if (histogram.name() == \"h1\") {\n      h1_cumulative_values_.push_back(record_value);\n      h1_interval_values_.push_back(record_value);\n    } else {\n      h2_cumulative_values_.push_back(record_value);\n      h2_interval_values_.push_back(record_value);\n    }\n  }\n\n  SymbolTableImpl symbol_table_;\n  NiceMock<Event::MockDispatcher> main_thread_dispatcher_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  AllocatorImpl alloc_;\n  MockSink sink_;\n  ThreadLocalStoreImplPtr store_;\n  InSequence s;\n  std::vector<uint64_t> h1_cumulative_values_, h2_cumulative_values_, h1_interval_values_,\n      h2_interval_values_;\n};\n\nTEST_F(StatsThreadLocalStoreTest, NoTls) {\n  InSequence s;\n\n  Counter& c1 = store_->counterFromString(\"c1\");\n  EXPECT_EQ(&c1, &store_->counterFromString(\"c1\"));\n  StatNameManagedStorage c1_name(\"c1\", symbol_table_);\n  c1.add(100);\n  auto found_counter = store_->findCounter(c1_name.statName());\n  ASSERT_TRUE(found_counter.has_value());\n  EXPECT_EQ(&c1, &found_counter->get());\n  EXPECT_EQ(100, found_counter->get().value());\n  c1.add(100);\n  EXPECT_EQ(200, found_counter->get().value());\n\n  Gauge& g1 = store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(&g1, &store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate));\n  StatNameManagedStorage g1_name(\"g1\", symbol_table_);\n  g1.set(100);\n  auto found_gauge = store_->findGauge(g1_name.statName());\n  ASSERT_TRUE(found_gauge.has_value());\n  EXPECT_EQ(&g1, &found_gauge->get());\n  EXPECT_EQ(100, found_gauge->get().value());\n  g1.set(0);\n  EXPECT_EQ(0, found_gauge->get().value());\n\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(&h1, &store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified));\n  StatNameManagedStorage h1_name(\"h1\", symbol_table_);\n  auto found_histogram = store_->findHistogram(h1_name.statName());\n  ASSERT_TRUE(found_histogram.has_value());\n  EXPECT_EQ(&h1, &found_histogram->get());\n\n  TextReadout& t1 = store_->textReadoutFromString(\"t1\");\n  EXPECT_EQ(&t1, &store_->textReadoutFromString(\"t1\"));\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200));\n  h1.recordValue(200);\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100));\n  store_->deliverHistogramToSinks(h1, 100);\n\n  EXPECT_EQ(1UL, store_->counters().size());\n  EXPECT_EQ(&c1, TestUtility::findCounter(*store_, \"c1\").get());\n  EXPECT_EQ(2L, TestUtility::findCounter(*store_, \"c1\").use_count());\n  EXPECT_EQ(1UL, store_->gauges().size());\n  EXPECT_EQ(&g1, store_->gauges().front().get()); // front() ok when size()==1\n  EXPECT_EQ(2L, store_->gauges().front().use_count());\n  EXPECT_EQ(1UL, store_->textReadouts().size());\n  EXPECT_EQ(&t1, store_->textReadouts().front().get()); // front() ok when size()==1\n  EXPECT_EQ(2L, store_->textReadouts().front().use_count());\n\n  store_->shutdownThreading();\n}\n\nTEST_F(StatsThreadLocalStoreTest, Tls) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  Counter& c1 = store_->counterFromString(\"c1\");\n  EXPECT_EQ(&c1, &store_->counterFromString(\"c1\"));\n  StatNameManagedStorage c1_name(\"c1\", symbol_table_);\n  c1.add(100);\n  auto found_counter = store_->findCounter(c1_name.statName());\n  ASSERT_TRUE(found_counter.has_value());\n  EXPECT_EQ(&c1, &found_counter->get());\n  EXPECT_EQ(100, found_counter->get().value());\n  c1.add(100);\n  EXPECT_EQ(200, found_counter->get().value());\n\n  Gauge& g1 = store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(&g1, &store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate));\n  StatNameManagedStorage g1_name(\"g1\", symbol_table_);\n  g1.set(100);\n  auto found_gauge = store_->findGauge(g1_name.statName());\n  ASSERT_TRUE(found_gauge.has_value());\n  EXPECT_EQ(&g1, &found_gauge->get());\n  EXPECT_EQ(100, found_gauge->get().value());\n  g1.set(0);\n  EXPECT_EQ(0, found_gauge->get().value());\n\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(&h1, &store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified));\n  StatNameManagedStorage h1_name(\"h1\", symbol_table_);\n  auto found_histogram = store_->findHistogram(h1_name.statName());\n  ASSERT_TRUE(found_histogram.has_value());\n  EXPECT_EQ(&h1, &found_histogram->get());\n\n  TextReadout& t1 = store_->textReadoutFromString(\"t1\");\n  EXPECT_EQ(&t1, &store_->textReadoutFromString(\"t1\"));\n\n  EXPECT_EQ(1UL, store_->counters().size());\n\n  EXPECT_EQ(&c1, TestUtility::findCounter(*store_, \"c1\").get());\n  EXPECT_EQ(2L, TestUtility::findCounter(*store_, \"c1\").use_count());\n  EXPECT_EQ(1UL, store_->gauges().size());\n  EXPECT_EQ(&g1, store_->gauges().front().get()); // front() ok when size()==1\n  EXPECT_EQ(2L, store_->gauges().front().use_count());\n  EXPECT_EQ(1UL, store_->textReadouts().size());\n  EXPECT_EQ(&t1, store_->textReadouts().front().get()); // front() ok when size()==1\n  EXPECT_EQ(2UL, store_->textReadouts().front().use_count());\n\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n\n  EXPECT_EQ(1UL, store_->counters().size());\n  EXPECT_EQ(&c1, TestUtility::findCounter(*store_, \"c1\").get());\n  EXPECT_EQ(2L, TestUtility::findCounter(*store_, \"c1\").use_count());\n  EXPECT_EQ(1UL, store_->gauges().size());\n  EXPECT_EQ(&g1, store_->gauges().front().get()); // front() ok when size()==1\n  EXPECT_EQ(2L, store_->gauges().front().use_count());\n  EXPECT_EQ(1UL, store_->textReadouts().size());\n  EXPECT_EQ(&t1, store_->textReadouts().front().get()); // front() ok when size()==1\n  EXPECT_EQ(2L, store_->textReadouts().front().use_count());\n}\n\nTEST_F(StatsThreadLocalStoreTest, BasicScope) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  Counter& c1 = store_->counterFromString(\"c1\");\n  Counter& c2 = scope1->counterFromString(\"c2\");\n  EXPECT_EQ(\"c1\", c1.name());\n  EXPECT_EQ(\"scope1.c2\", c2.name());\n  StatNameManagedStorage c1_name(\"c1\", symbol_table_);\n  auto found_counter = store_->findCounter(c1_name.statName());\n  ASSERT_TRUE(found_counter.has_value());\n  EXPECT_EQ(&c1, &found_counter->get());\n  StatNameManagedStorage c2_name(\"scope1.c2\", symbol_table_);\n  auto found_counter2 = store_->findCounter(c2_name.statName());\n  ASSERT_TRUE(found_counter2.has_value());\n  EXPECT_EQ(&c2, &found_counter2->get());\n\n  Gauge& g1 = store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate);\n  Gauge& g2 = scope1->gaugeFromString(\"g2\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(\"g1\", g1.name());\n  EXPECT_EQ(\"scope1.g2\", g2.name());\n  StatNameManagedStorage g1_name(\"g1\", symbol_table_);\n  auto found_gauge = store_->findGauge(g1_name.statName());\n  ASSERT_TRUE(found_gauge.has_value());\n  EXPECT_EQ(&g1, &found_gauge->get());\n  StatNameManagedStorage g2_name(\"scope1.g2\", symbol_table_);\n  auto found_gauge2 = store_->findGauge(g2_name.statName());\n  ASSERT_TRUE(found_gauge2.has_value());\n  EXPECT_EQ(&g2, &found_gauge2->get());\n\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 = scope1->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"scope1.h2\", h2.name());\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100));\n  h1.recordValue(100);\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h2), 200));\n  h2.recordValue(200);\n  StatNameManagedStorage h1_name(\"h1\", symbol_table_);\n  auto found_histogram = store_->findHistogram(h1_name.statName());\n  ASSERT_TRUE(found_histogram.has_value());\n  EXPECT_EQ(&h1, &found_histogram->get());\n  StatNameManagedStorage h2_name(\"scope1.h2\", symbol_table_);\n  auto found_histogram2 = store_->findHistogram(h2_name.statName());\n  ASSERT_TRUE(found_histogram2.has_value());\n  EXPECT_EQ(&h2, &found_histogram2->get());\n\n  TextReadout& t1 = store_->textReadoutFromString(\"t1\");\n  TextReadout& t2 = scope1->textReadoutFromString(\"t2\");\n  EXPECT_EQ(\"t1\", t1.name());\n  EXPECT_EQ(\"scope1.t2\", t2.name());\n\n  StatNameManagedStorage tag_key(\"a\", symbol_table_);\n  StatNameManagedStorage tag_value(\"b\", symbol_table_);\n  StatNameTagVector tags{{StatName(tag_key.statName()), StatName(tag_value.statName())}};\n\n  const TagVector expectedTags = {Tag{\"a\", \"b\"}};\n\n  {\n    StatNameManagedStorage storage(\"c3\", symbol_table_);\n    Counter& counter = scope1->counterFromStatNameWithTags(StatName(storage.statName()), tags);\n    EXPECT_EQ(expectedTags, counter.tags());\n    EXPECT_EQ(&counter, &scope1->counterFromStatNameWithTags(StatName(storage.statName()), tags));\n  }\n  {\n    StatNameManagedStorage storage(\"g3\", symbol_table_);\n    Gauge& gauge = scope1->gaugeFromStatNameWithTags(StatName(storage.statName()), tags,\n                                                     Gauge::ImportMode::Accumulate);\n    EXPECT_EQ(expectedTags, gauge.tags());\n    EXPECT_EQ(&gauge, &scope1->gaugeFromStatNameWithTags(StatName(storage.statName()), tags,\n                                                         Gauge::ImportMode::Accumulate));\n  }\n  {\n    StatNameManagedStorage storage(\"h3\", symbol_table_);\n    Histogram& histogram = scope1->histogramFromStatNameWithTags(\n        StatName(storage.statName()), tags, Stats::Histogram::Unit::Unspecified);\n    EXPECT_EQ(expectedTags, histogram.tags());\n    EXPECT_EQ(&histogram,\n              &scope1->histogramFromStatNameWithTags(StatName(storage.statName()), tags,\n                                                     Stats::Histogram::Unit::Unspecified));\n  }\n\n  store_->shutdownThreading();\n  scope1->deliverHistogramToSinks(h1, 100);\n  scope1->deliverHistogramToSinks(h2, 200);\n  scope1.reset();\n  tls_.shutdownThread();\n}\n\nTEST_F(StatsThreadLocalStoreTest, HistogramScopeOverlap) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  // Creating two scopes with the same name gets you two distinct scope objects.\n  ScopePtr scope1 = store_->createScope(\"scope.\");\n  ScopePtr scope2 = store_->createScope(\"scope.\");\n  EXPECT_NE(scope1, scope2);\n\n  EXPECT_EQ(0, store_->histograms().size());\n  EXPECT_EQ(0, numTlsHistograms());\n\n  // However, stats created in the two same-named scopes will be the same objects.\n  Counter& counter = scope1->counterFromString(\"counter\");\n  EXPECT_EQ(&counter, &scope2->counterFromString(\"counter\"));\n  Gauge& gauge = scope1->gaugeFromString(\"gauge\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(&gauge, &scope2->gaugeFromString(\"gauge\", Gauge::ImportMode::Accumulate));\n  TextReadout& text_readout = scope1->textReadoutFromString(\"tr\");\n  EXPECT_EQ(&text_readout, &scope2->textReadoutFromString(\"tr\"));\n  Histogram& histogram = scope1->histogramFromString(\"histogram\", Histogram::Unit::Unspecified);\n  EXPECT_EQ(&histogram, &scope2->histogramFromString(\"histogram\", Histogram::Unit::Unspecified));\n\n  // The histogram was created in scope1, which can now be destroyed. But the\n  // histogram is kept alive by scope2.\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 100));\n  histogram.recordValue(100);\n  EXPECT_EQ(1, store_->histograms().size());\n  EXPECT_EQ(1, numTlsHistograms());\n  scope1.reset();\n  EXPECT_EQ(1, store_->histograms().size());\n  EXPECT_EQ(1, numTlsHistograms());\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 200));\n  histogram.recordValue(200);\n  EXPECT_EQ(&histogram, &scope2->histogramFromString(\"histogram\", Histogram::Unit::Unspecified));\n  scope2.reset();\n  EXPECT_EQ(0, store_->histograms().size());\n  EXPECT_EQ(0, numTlsHistograms());\n\n  store_->shutdownThreading();\n\n  store_->histogramFromString(\"histogram_after_shutdown\", Histogram::Unit::Unspecified);\n\n  tls_.shutdownThread();\n}\n\n// Validate that we sanitize away bad characters in the stats prefix.\nTEST_F(StatsThreadLocalStoreTest, SanitizePrefix) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  ScopePtr scope1 = store_->createScope(std::string(\"scope1:\\0:foo.\", 13));\n  Counter& c1 = scope1->counterFromString(\"c1\");\n  EXPECT_EQ(\"scope1___foo.c1\", c1.name());\n\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nTEST_F(StatsThreadLocalStoreTest, ConstSymtabAccessor) {\n  ScopePtr scope = store_->createScope(\"scope.\");\n  const Scope& cscope = *scope;\n  const SymbolTable& const_symbol_table = cscope.constSymbolTable();\n  SymbolTable& symbol_table = scope->symbolTable();\n  EXPECT_EQ(&const_symbol_table, &symbol_table);\n}\n\nTEST_F(StatsThreadLocalStoreTest, ScopeDelete) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  scope1->counterFromString(\"c1\");\n  EXPECT_EQ(1UL, store_->counters().size());\n  CounterSharedPtr c1 = TestUtility::findCounter(*store_, \"scope1.c1\");\n  EXPECT_EQ(\"scope1.c1\", c1->name());\n\n  EXPECT_CALL(main_thread_dispatcher_, post(_));\n  EXPECT_CALL(tls_, runOnAllThreads(_, _));\n  scope1.reset();\n  EXPECT_EQ(0UL, store_->counters().size());\n\n  EXPECT_EQ(1L, c1.use_count());\n  c1.reset();\n\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nTEST_F(StatsThreadLocalStoreTest, NestedScopes) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  Counter& c1 = scope1->counterFromString(\"foo.bar\");\n  EXPECT_EQ(\"scope1.foo.bar\", c1.name());\n  StatNameManagedStorage c1_name(\"scope1.foo.bar\", symbol_table_);\n  auto found_counter = store_->findCounter(c1_name.statName());\n  ASSERT_TRUE(found_counter.has_value());\n  EXPECT_EQ(&c1, &found_counter->get());\n\n  ScopePtr scope2 = scope1->createScope(\"foo.\");\n  Counter& c2 = scope2->counterFromString(\"bar\");\n  EXPECT_EQ(&c1, &c2);\n  EXPECT_EQ(\"scope1.foo.bar\", c2.name());\n  StatNameManagedStorage c2_name(\"scope1.foo.bar\", symbol_table_);\n  auto found_counter2 = store_->findCounter(c2_name.statName());\n  ASSERT_TRUE(found_counter2.has_value());\n\n  // Different allocations point to the same referenced counted backing memory.\n  c1.inc();\n  EXPECT_EQ(1UL, c1.value());\n  EXPECT_EQ(c1.value(), c2.value());\n\n  Gauge& g1 = scope2->gaugeFromString(\"some_gauge\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(\"scope1.foo.some_gauge\", g1.name());\n\n  TextReadout& t1 = scope2->textReadoutFromString(\"some_string\");\n  EXPECT_EQ(\"scope1.foo.some_string\", t1.name());\n\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nTEST_F(StatsThreadLocalStoreTest, OverlappingScopes) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  // Both scopes point to the same namespace. This can happen during reload of a cluster for\n  // example.\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  ScopePtr scope2 = store_->createScope(\"scope1.\");\n\n  // We will call alloc twice, but they should point to the same backing storage.\n  Counter& c1 = scope1->counterFromString(\"c\");\n  Counter& c2 = scope2->counterFromString(\"c\");\n  EXPECT_EQ(&c1, &c2);\n  c1.inc();\n  EXPECT_EQ(1UL, c1.value());\n  EXPECT_EQ(1UL, c2.value());\n  c2.inc();\n  EXPECT_EQ(2UL, c1.value());\n  EXPECT_EQ(2UL, c2.value());\n\n  // We should dedup when we fetch all counters to handle the overlapping case.\n  EXPECT_EQ(1UL, store_->counters().size());\n\n  // Gauges should work the same way.\n  Gauge& g1 = scope1->gaugeFromString(\"g\", Gauge::ImportMode::Accumulate);\n  Gauge& g2 = scope2->gaugeFromString(\"g\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(&g1, &g2);\n  g1.set(5);\n  EXPECT_EQ(5UL, g1.value());\n  EXPECT_EQ(5UL, g2.value());\n  g2.set(1);\n  EXPECT_EQ(1UL, g1.value());\n  EXPECT_EQ(1UL, g2.value());\n  EXPECT_EQ(1UL, store_->gauges().size());\n\n  // TextReadouts should work just like gauges.\n  TextReadout& t1 = scope1->textReadoutFromString(\"b\");\n  TextReadout& t2 = scope2->textReadoutFromString(\"b\");\n  EXPECT_EQ(&t1, &t2);\n\n  t1.set(\"hello\");\n  EXPECT_EQ(\"hello\", t1.value());\n  EXPECT_EQ(\"hello\", t2.value());\n  t2.set(\"goodbye\");\n  EXPECT_EQ(\"goodbye\", t1.value());\n  EXPECT_EQ(\"goodbye\", t2.value());\n  EXPECT_EQ(1UL, store_->textReadouts().size());\n\n  // Deleting scope 1 will call free but will be reference counted. It still leaves scope 2 valid.\n  scope1.reset();\n  c2.inc();\n  EXPECT_EQ(3UL, c2.value());\n  EXPECT_EQ(1UL, store_->counters().size());\n  g2.set(10);\n  EXPECT_EQ(10UL, g2.value());\n  EXPECT_EQ(1UL, store_->gauges().size());\n  t2.set(\"abc\");\n  EXPECT_EQ(\"abc\", t2.value());\n  EXPECT_EQ(1UL, store_->textReadouts().size());\n\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nTEST_F(StatsThreadLocalStoreTest, TextReadoutAllLengths) {\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  TextReadout& t = store_->textReadoutFromString(\"t\");\n  EXPECT_EQ(\"\", t.value());\n  std::string str;\n  // ASCII\n  for (int i = 0; i < 15; i++) {\n    str += ('a' + i);\n    t.set(std::string(str));\n    EXPECT_EQ(str, t.value());\n  }\n\n  // Non-ASCII\n  str = \"\";\n  for (int i = 0; i < 15; i++) {\n    str += ('\\xEE' + i);\n    t.set(std::string(str));\n    EXPECT_EQ(str, t.value());\n  }\n\n  // Null bytes ok; the TextReadout implementation doesn't use null termination in its storage\n  t.set(std::string(\"\\x00\", 1));\n  EXPECT_EQ(std::string(\"\\x00\", 1), t.value());\n  t.set(std::string(\"\\x00\\x00\\x00\", 3));\n  EXPECT_EQ(std::string(\"\\x00\\x00\\x00\", 3), t.value());\n  EXPECT_NE(std::string(\"\\x00\", 1), t.value());\n  EXPECT_NE(std::string(\"\", 0), t.value());\n\n  // No Truncation to 15\n  t.set(\"aaaabbbbccccdddX\");\n  EXPECT_EQ(\"aaaabbbbccccdddX\", t.value());\n  t.set(\"aaaabbbbccccdddXX\");\n  EXPECT_EQ(\"aaaabbbbccccdddXX\", t.value());\n  t.set(\"aaaabbbbccccdddXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\");\n  // EXPECT_EQ(\"aaaabbbbccccddd\", t.value());\n\n  // Can set back to empty\n  t.set(\"\");\n  EXPECT_EQ(\"\", t.value());\n\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nclass ThreadLocalStoreNoMocksTestBase : public testing::Test {\npublic:\n  ThreadLocalStoreNoMocksTestBase()\n      : alloc_(symbol_table_), store_(std::make_unique<ThreadLocalStoreImpl>(alloc_)),\n        pool_(symbol_table_) {}\n  ~ThreadLocalStoreNoMocksTestBase() override {\n    if (store_ != nullptr) {\n      store_->shutdownThreading();\n    }\n  }\n\n  StatName makeStatName(absl::string_view name) { return pool_.add(name); }\n\n  SymbolTableImpl symbol_table_;\n  AllocatorImpl alloc_;\n  ThreadLocalStoreImplPtr store_;\n  StatNamePool pool_;\n};\n\nclass LookupWithStatNameTest : public ThreadLocalStoreNoMocksTestBase {};\n\nTEST_F(LookupWithStatNameTest, All) {\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  Counter& c1 = store_->Store::counterFromStatName(makeStatName(\"c1\"));\n  Counter& c2 = scope1->counterFromStatName(makeStatName(\"c2\"));\n  EXPECT_EQ(\"c1\", c1.name());\n  EXPECT_EQ(\"scope1.c2\", c2.name());\n  EXPECT_EQ(\"c1\", c1.tagExtractedName());\n  EXPECT_EQ(\"scope1.c2\", c2.tagExtractedName());\n  EXPECT_EQ(0, c1.tags().size());\n  EXPECT_EQ(0, c1.tags().size());\n\n  Gauge& g1 = store_->Store::gaugeFromStatName(makeStatName(\"g1\"), Gauge::ImportMode::Accumulate);\n  Gauge& g2 = scope1->gaugeFromStatName(makeStatName(\"g2\"), Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(\"g1\", g1.name());\n  EXPECT_EQ(\"scope1.g2\", g2.name());\n  EXPECT_EQ(\"g1\", g1.tagExtractedName());\n  EXPECT_EQ(\"scope1.g2\", g2.tagExtractedName());\n  EXPECT_EQ(0, g1.tags().size());\n  EXPECT_EQ(0, g1.tags().size());\n\n  Histogram& h1 =\n      store_->Store::histogramFromStatName(makeStatName(\"h1\"), Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 =\n      scope1->histogramFromStatName(makeStatName(\"h2\"), Stats::Histogram::Unit::Unspecified);\n  scope1->deliverHistogramToSinks(h2, 0);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"scope1.h2\", h2.name());\n  EXPECT_EQ(\"h1\", h1.tagExtractedName());\n  EXPECT_EQ(\"scope1.h2\", h2.tagExtractedName());\n  EXPECT_EQ(0, h1.tags().size());\n  EXPECT_EQ(0, h2.tags().size());\n  h1.recordValue(200);\n  h2.recordValue(200);\n\n  ScopePtr scope2 = scope1->createScope(\"foo.\");\n  EXPECT_EQ(\"scope1.foo.bar\", scope2->counterFromStatName(makeStatName(\"bar\")).name());\n\n  // Validate that we sanitize away bad characters in the stats prefix.\n  ScopePtr scope3 = scope1->createScope(std::string(\"foo:\\0:.\", 7));\n  EXPECT_EQ(\"scope1.foo___.bar\", scope3->counterFromString(\"bar\").name());\n\n  EXPECT_EQ(4UL, store_->counters().size());\n  EXPECT_EQ(2UL, store_->gauges().size());\n}\n\nTEST_F(LookupWithStatNameTest, NotFound) {\n  StatName not_found(makeStatName(\"not_found\"));\n  EXPECT_FALSE(store_->findCounter(not_found));\n  EXPECT_FALSE(store_->findGauge(not_found));\n  EXPECT_FALSE(store_->findHistogram(not_found));\n  EXPECT_FALSE(store_->findTextReadout(not_found));\n}\n\nclass StatsMatcherTLSTest : public StatsThreadLocalStoreTest {\npublic:\n  envoy::config::metrics::v3::StatsConfig stats_config_;\n};\n\nTEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) {\n  InSequence s;\n\n  stats_config_.mutable_stats_matcher()->mutable_exclusion_list()->add_patterns()->set_prefix(\n      \"noop\");\n  store_->setStatsMatcher(std::make_unique<StatsMatcherImpl>(stats_config_));\n\n  // Testing No-op counters, gauges, histograms which match the prefix \"noop\".\n\n  // Counter\n  Counter& noop_counter = store_->counterFromString(\"noop_counter\");\n  EXPECT_EQ(noop_counter.name(), \"\");\n  EXPECT_EQ(noop_counter.value(), 0);\n  noop_counter.add(1);\n  EXPECT_EQ(noop_counter.value(), 0);\n  noop_counter.inc();\n  EXPECT_EQ(noop_counter.value(), 0);\n  noop_counter.reset();\n  EXPECT_EQ(noop_counter.value(), 0);\n  Counter& noop_counter_2 = store_->counterFromString(\"noop_counter_2\");\n  EXPECT_EQ(&noop_counter, &noop_counter_2);\n  EXPECT_FALSE(noop_counter.used());      // hardcoded to return false in NullMetricImpl.\n  EXPECT_EQ(0, noop_counter.latch());     // hardcoded to 0.\n  EXPECT_EQ(0, noop_counter.use_count()); // null counter is contained in ThreadLocalStoreImpl.\n\n  // Gauge\n  Gauge& noop_gauge = store_->gaugeFromString(\"noop_gauge\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(noop_gauge.name(), \"\");\n  EXPECT_EQ(noop_gauge.value(), 0);\n  noop_gauge.add(1);\n  EXPECT_EQ(noop_gauge.value(), 0);\n  noop_gauge.inc();\n  EXPECT_EQ(noop_gauge.value(), 0);\n  noop_gauge.dec();\n  EXPECT_EQ(noop_gauge.value(), 0);\n  noop_gauge.set(2);\n  EXPECT_EQ(noop_gauge.value(), 0);\n  noop_gauge.sub(2);\n  EXPECT_EQ(noop_gauge.value(), 0);\n  EXPECT_EQ(Gauge::ImportMode::NeverImport, noop_gauge.importMode());\n  EXPECT_FALSE(noop_gauge.used());      // null gauge is contained in ThreadLocalStoreImpl.\n  EXPECT_EQ(0, noop_gauge.use_count()); // null gauge is contained in ThreadLocalStoreImpl.\n\n  Gauge& noop_gauge_2 = store_->gaugeFromString(\"noop_gauge_2\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(&noop_gauge, &noop_gauge_2);\n\n  // TextReadout\n  TextReadout& noop_string = store_->textReadoutFromString(\"noop_string\");\n  EXPECT_EQ(noop_string.name(), \"\");\n  EXPECT_EQ(\"\", noop_string.value());\n  noop_string.set(\"hello\");\n  EXPECT_EQ(\"\", noop_string.value());\n  noop_string.set(\"hello\");\n  EXPECT_EQ(\"\", noop_string.value());\n  noop_string.set(\"goodbye\");\n  EXPECT_EQ(\"\", noop_string.value());\n  noop_string.set(\"hello\");\n  EXPECT_EQ(\"\", noop_string.value());\n  TextReadout& noop_string_2 = store_->textReadoutFromString(\"noop_string_2\");\n  EXPECT_EQ(&noop_string, &noop_string_2);\n\n  // Histogram\n  Histogram& noop_histogram =\n      store_->histogramFromString(\"noop_histogram\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(noop_histogram.name(), \"\");\n  EXPECT_FALSE(noop_histogram.used());\n  EXPECT_EQ(Stats::Histogram::Unit::Null, noop_histogram.unit());\n  Histogram& noop_histogram_2 =\n      store_->histogramFromString(\"noop_histogram_2\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(&noop_histogram, &noop_histogram_2);\n\n  store_->shutdownThreading();\n}\n\n// We only test the exclusion list -- the inclusion list is the inverse, and both are tested in\n// test/common/stats:stats_matcher_test.\nTEST_F(StatsMatcherTLSTest, TestExclusionRegex) {\n  InSequence s;\n\n  // Expected to alloc lowercase_counter, lowercase_gauge, valid_counter, valid_gauge\n\n  // Will block all stats containing any capital alphanumeric letter.\n  stats_config_.mutable_stats_matcher()\n      ->mutable_exclusion_list()\n      ->add_patterns()\n      ->set_hidden_envoy_deprecated_regex(\".*[A-Z].*\");\n  store_->setStatsMatcher(std::make_unique<StatsMatcherImpl>(stats_config_));\n\n  // The creation of counters/gauges/histograms which have no uppercase letters should succeed.\n  Counter& lowercase_counter = store_->counterFromString(\"lowercase_counter\");\n  EXPECT_EQ(lowercase_counter.name(), \"lowercase_counter\");\n  Gauge& lowercase_gauge =\n      store_->gaugeFromString(\"lowercase_gauge\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(lowercase_gauge.name(), \"lowercase_gauge\");\n  Histogram& lowercase_histogram =\n      store_->histogramFromString(\"lowercase_histogram\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(lowercase_histogram.name(), \"lowercase_histogram\");\n\n  TextReadout& lowercase_string = store_->textReadoutFromString(\"lowercase_string\");\n  EXPECT_EQ(lowercase_string.name(), \"lowercase_string\");\n  // And the creation of counters/gauges/histograms which have uppercase letters should fail.\n  Counter& uppercase_counter = store_->counterFromString(\"UPPERCASE_counter\");\n  EXPECT_EQ(uppercase_counter.name(), \"\");\n  uppercase_counter.inc();\n  EXPECT_EQ(uppercase_counter.value(), 0);\n  uppercase_counter.inc();\n  EXPECT_EQ(uppercase_counter.value(), 0);\n\n  Gauge& uppercase_gauge =\n      store_->gaugeFromString(\"uppercase_GAUGE\", Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(uppercase_gauge.name(), \"\");\n  uppercase_gauge.inc();\n  EXPECT_EQ(uppercase_gauge.value(), 0);\n  uppercase_gauge.inc();\n  EXPECT_EQ(uppercase_gauge.value(), 0);\n\n  TextReadout& uppercase_string = store_->textReadoutFromString(\"uppercase_STRING\");\n  EXPECT_EQ(uppercase_string.name(), \"\");\n  uppercase_string.set(\"A STRING VALUE\");\n  EXPECT_EQ(\"\", uppercase_string.value());\n\n  // Histograms are harder to query and test, so we resort to testing that name() returns the empty\n  // string.\n  Histogram& uppercase_histogram =\n      store_->histogramFromString(\"upperCASE_histogram\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(uppercase_histogram.name(), \"\");\n\n  // Adding another exclusion rule -- now we reject not just uppercase stats but those starting with\n  // the string \"invalid\".\n  stats_config_.mutable_stats_matcher()->mutable_exclusion_list()->add_patterns()->set_prefix(\n      \"invalid\");\n  store_->setStatsMatcher(std::make_unique<StatsMatcherImpl>(stats_config_));\n\n  Counter& valid_counter = store_->counterFromString(\"valid_counter\");\n  valid_counter.inc();\n  EXPECT_EQ(valid_counter.value(), 1);\n\n  Counter& invalid_counter = store_->counterFromString(\"invalid_counter\");\n  invalid_counter.inc();\n  EXPECT_EQ(invalid_counter.value(), 0);\n\n  // But the old exclusion rule still holds.\n  Counter& invalid_counter_2 = store_->counterFromString(\"also_INVALID_counter\");\n  invalid_counter_2.inc();\n  EXPECT_EQ(invalid_counter_2.value(), 0);\n\n  // And we expect the same behavior from gauges and histograms.\n  Gauge& valid_gauge = store_->gaugeFromString(\"valid_gauge\", Gauge::ImportMode::Accumulate);\n  valid_gauge.set(2);\n  EXPECT_EQ(valid_gauge.value(), 2);\n\n  Gauge& invalid_gauge_1 = store_->gaugeFromString(\"invalid_gauge\", Gauge::ImportMode::Accumulate);\n  invalid_gauge_1.inc();\n  EXPECT_EQ(invalid_gauge_1.value(), 0);\n\n  Gauge& invalid_gauge_2 =\n      store_->gaugeFromString(\"also_INVALID_gauge\", Gauge::ImportMode::Accumulate);\n  invalid_gauge_2.inc();\n  EXPECT_EQ(invalid_gauge_2.value(), 0);\n\n  Histogram& valid_histogram =\n      store_->histogramFromString(\"valid_histogram\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(valid_histogram.name(), \"valid_histogram\");\n\n  Histogram& invalid_histogram_1 =\n      store_->histogramFromString(\"invalid_histogram\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(invalid_histogram_1.name(), \"\");\n\n  Histogram& invalid_histogram_2 =\n      store_->histogramFromString(\"also_INVALID_histogram\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(invalid_histogram_2.name(), \"\");\n\n  TextReadout& valid_string = store_->textReadoutFromString(\"valid_string\");\n  valid_string.set(\"i'm valid\");\n  EXPECT_EQ(\"i'm valid\", valid_string.value());\n\n  TextReadout& invalid_string_1 = store_->textReadoutFromString(\"invalid_string\");\n  invalid_string_1.set(\"nope\");\n  EXPECT_EQ(\"\", invalid_string_1.value());\n\n  TextReadout& invalid_string_2 = store_->textReadoutFromString(\"also_INVLD_string\");\n  invalid_string_2.set(\"still no\");\n  EXPECT_EQ(\"\", invalid_string_2.value());\n\n  // Expected to free lowercase_counter, lowercase_gauge, valid_counter, valid_gauge\n  store_->shutdownThreading();\n}\n\n// Tests the logic for caching the stats-matcher results, and in particular the\n// private impl method checkAndRememberRejection(). That method behaves\n// differently depending on whether TLS is enabled or not, so we parameterize\n// the test accordingly; GetParam()==true means we want a TLS cache. In either\n// case, we should never be calling the stats-matcher rejection logic more than\n// once on given stat name.\nclass RememberStatsMatcherTest : public testing::TestWithParam<bool> {\npublic:\n  RememberStatsMatcherTest()\n      : heap_alloc_(symbol_table_), store_(heap_alloc_), scope_(store_.createScope(\"scope.\")) {\n    if (GetParam()) {\n      store_.initializeThreading(main_thread_dispatcher_, tls_);\n    }\n  }\n\n  ~RememberStatsMatcherTest() override {\n    store_.shutdownThreading();\n    tls_.shutdownThread();\n  }\n\n  using LookupStatFn = std::function<std::string(const std::string&)>;\n\n  // Helper function to test the rejection cache. The goal here is to use\n  // mocks to ensure that we don't call rejects() more than once on any of the\n  // stats, even with 5 name-based lookups.\n  void testRememberMatcher(const LookupStatFn lookup_stat) {\n    InSequence s;\n\n    MockStatsMatcher* matcher = new MockStatsMatcher;\n    StatsMatcherPtr matcher_ptr(matcher);\n    store_.setStatsMatcher(std::move(matcher_ptr));\n\n    EXPECT_CALL(*matcher, rejects(\"scope.reject\")).WillOnce(Return(true));\n    EXPECT_CALL(*matcher, rejects(\"scope.ok\")).WillOnce(Return(false));\n\n    for (int j = 0; j < 5; ++j) {\n      EXPECT_EQ(\"\", lookup_stat(\"reject\"));\n      EXPECT_EQ(\"scope.ok\", lookup_stat(\"ok\"));\n    }\n  }\n\n  void testRejectsAll(const LookupStatFn lookup_stat) {\n    InSequence s;\n\n    MockStatsMatcher* matcher = new MockStatsMatcher;\n    matcher->rejects_all_ = true;\n    StatsMatcherPtr matcher_ptr(matcher);\n    store_.setStatsMatcher(std::move(matcher_ptr));\n\n    ScopePtr scope = store_.createScope(\"scope.\");\n\n    for (int j = 0; j < 5; ++j) {\n      // Note: zero calls to reject() are made, as reject-all should short-circuit.\n      EXPECT_EQ(\"\", lookup_stat(\"reject\"));\n    }\n  }\n\n  void testAcceptsAll(const LookupStatFn lookup_stat) {\n    InSequence s;\n\n    auto* matcher = new MockStatsMatcher;\n    matcher->accepts_all_ = true;\n    StatsMatcherPtr matcher_ptr(matcher);\n    store_.setStatsMatcher(std::move(matcher_ptr));\n\n    for (int j = 0; j < 5; ++j) {\n      // Note: zero calls to reject() are made, as accept-all should short-circuit.\n      EXPECT_EQ(\"scope.ok\", lookup_stat(\"ok\"));\n    }\n  }\n\n  LookupStatFn lookupCounterFn() {\n    return [this](const std::string& stat_name) -> std::string {\n      return scope_->counterFromString(stat_name).name();\n    };\n  }\n\n  LookupStatFn lookupGaugeFn() {\n    return [this](const std::string& stat_name) -> std::string {\n      return scope_->gaugeFromString(stat_name, Gauge::ImportMode::Accumulate).name();\n    };\n  }\n\n// TODO(jmarantz): restore BoolIndicator tests when https://github.com/envoyproxy/envoy/pull/6280\n// is reverted.\n#define HAS_BOOL_INDICATOR 0\n#if HAS_BOOL_INDICATOR\n  LookupStatFn lookupBoolIndicator() {\n    return [this](const std::string& stat_name) -> std::string {\n      return scope_->boolIndicator(stat_name).name();\n    };\n  }\n#endif\n\n  LookupStatFn lookupHistogramFn() {\n    return [this](const std::string& stat_name) -> std::string {\n      return scope_->histogramFromString(stat_name, Stats::Histogram::Unit::Unspecified).name();\n    };\n  }\n\n  LookupStatFn lookupTextReadoutFn() {\n    return [this](const std::string& stat_name) -> std::string {\n      return scope_->textReadoutFromString(stat_name).name();\n    };\n  }\n\n  SymbolTableImpl symbol_table_;\n  NiceMock<Event::MockDispatcher> main_thread_dispatcher_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  AllocatorImpl heap_alloc_;\n  ThreadLocalStoreImpl store_;\n  ScopePtr scope_;\n};\n\nINSTANTIATE_TEST_SUITE_P(RememberStatsMatcherTest, RememberStatsMatcherTest,\n                         testing::ValuesIn({false, true}));\n\n// Tests that the logic for remembering rejected stats works properly, both\n// with and without threading.\nTEST_P(RememberStatsMatcherTest, CounterRejectOne) { testRememberMatcher(lookupCounterFn()); }\n\nTEST_P(RememberStatsMatcherTest, CounterRejectsAll) { testRejectsAll(lookupCounterFn()); }\n\nTEST_P(RememberStatsMatcherTest, CounterAcceptsAll) { testAcceptsAll(lookupCounterFn()); }\n\nTEST_P(RememberStatsMatcherTest, GaugeRejectOne) { testRememberMatcher(lookupGaugeFn()); }\n\nTEST_P(RememberStatsMatcherTest, GaugeRejectsAll) { testRejectsAll(lookupGaugeFn()); }\n\nTEST_P(RememberStatsMatcherTest, GaugeAcceptsAll) { testAcceptsAll(lookupGaugeFn()); }\n\n#if HAS_BOOL_INDICATOR\nTEST_P(RememberStatsMatcherTest, BoolIndicatorRejectOne) {\n  testRememberMatcher(lookupBoolIndicator());\n}\n\nTEST_P(RememberStatsMatcherTest, BoolIndicatorRejectsAll) { testRejectsAll(lookupBoolIndicator()); }\n\nTEST_P(RememberStatsMatcherTest, BoolIndicatorAcceptsAll) { testAcceptsAll(lookupBoolIndicator()); }\n#endif\n\nTEST_P(RememberStatsMatcherTest, HistogramRejectOne) { testRememberMatcher(lookupHistogramFn()); }\n\nTEST_P(RememberStatsMatcherTest, HistogramRejectsAll) { testRejectsAll(lookupHistogramFn()); }\n\nTEST_P(RememberStatsMatcherTest, HistogramAcceptsAll) { testAcceptsAll(lookupHistogramFn()); }\n\nTEST_P(RememberStatsMatcherTest, TextReadoutRejectOne) {\n  testRememberMatcher(lookupTextReadoutFn());\n}\n\nTEST_P(RememberStatsMatcherTest, TextReadoutRejectsAll) { testRejectsAll(lookupTextReadoutFn()); }\n\nTEST_P(RememberStatsMatcherTest, TextReadoutAcceptsAll) { testAcceptsAll(lookupTextReadoutFn()); }\n\nTEST_F(StatsThreadLocalStoreTest, RemoveRejectedStats) {\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n  Counter& counter = store_->counterFromString(\"c1\");\n  Gauge& gauge = store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate);\n  Histogram& histogram = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  TextReadout& textReadout = store_->textReadoutFromString(\"t1\");\n  ASSERT_EQ(1, store_->counters().size()); // \"c1\".\n  EXPECT_TRUE(&counter == store_->counters()[0].get() ||\n              &counter == store_->counters()[1].get()); // counters() order is non-deterministic.\n  ASSERT_EQ(1, store_->gauges().size());\n  EXPECT_EQ(\"g1\", store_->gauges()[0]->name());\n  ASSERT_EQ(1, store_->histograms().size());\n  EXPECT_EQ(\"h1\", store_->histograms()[0]->name());\n  ASSERT_EQ(1, store_->textReadouts().size());\n  EXPECT_EQ(\"t1\", store_->textReadouts()[0]->name());\n\n  // Will effectively block all stats, and remove all the non-matching stats.\n  envoy::config::metrics::v3::StatsConfig stats_config;\n  stats_config.mutable_stats_matcher()->mutable_inclusion_list()->add_patterns()->set_exact(\n      \"no-such-stat\");\n  store_->setStatsMatcher(std::make_unique<StatsMatcherImpl>(stats_config));\n\n  // They can no longer be found.\n  EXPECT_EQ(0, store_->counters().size());\n  EXPECT_EQ(0, store_->gauges().size());\n  EXPECT_EQ(0, store_->histograms().size());\n  EXPECT_EQ(0, store_->textReadouts().size());\n\n  // However, referencing the previously allocated stats will not crash.\n  counter.inc();\n  gauge.inc();\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 42));\n  histogram.recordValue(42);\n  textReadout.set(\"fortytwo\");\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nTEST_F(StatsThreadLocalStoreTest, NonHotRestartNoTruncation) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  // Allocate a stat greater than the max name length.\n  const std::string name_1(MaxStatNameLength + 1, 'A');\n\n  store_->counterFromString(name_1);\n\n  // This works fine, and we can find it by its long name because heap-stats do not\n  // get truncated.\n  EXPECT_NE(nullptr, TestUtility::findCounter(*store_, name_1).get());\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nclass StatsThreadLocalStoreTestNoFixture : public testing::Test {\nprotected:\n  StatsThreadLocalStoreTestNoFixture() : alloc_(symbol_table_), store_(alloc_) {\n    store_.addSink(sink_);\n\n    // Use a tag producer that will produce tags.\n    envoy::config::metrics::v3::StatsConfig stats_config;\n    store_.setTagProducer(std::make_unique<TagProducerImpl>(stats_config));\n  }\n\n  ~StatsThreadLocalStoreTestNoFixture() override {\n    if (threading_enabled_) {\n      store_.shutdownThreading();\n      tls_.shutdownThread();\n    }\n  }\n\n  void initThreading() {\n    threading_enabled_ = true;\n    store_.initializeThreading(main_thread_dispatcher_, tls_);\n  }\n\n  static constexpr size_t million_ = 1000 * 1000;\n\n  MockSink sink_;\n  SymbolTableImpl symbol_table_;\n  AllocatorImpl alloc_;\n  ThreadLocalStoreImpl store_;\n  NiceMock<Event::MockDispatcher> main_thread_dispatcher_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  bool threading_enabled_{false};\n};\n\n// Tests how much memory is consumed allocating 100k stats.\nTEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithoutTlsRealSymbolTable) {\n  TestUtil::MemoryTest memory_test;\n  TestUtil::forEachSampleStat(\n      100, [this](absl::string_view name) { store_.counterFromString(std::string(name)); });\n  EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 688080); // July 2, 2020\n  EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.75 * million_);\n}\n\nTEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsRealSymbolTable) {\n  initThreading();\n  TestUtil::MemoryTest memory_test;\n  TestUtil::forEachSampleStat(\n      100, [this](absl::string_view name) { store_.counterFromString(std::string(name)); });\n  EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 827616); // Sep 25, 2020\n  EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.9 * million_);\n}\n\nTEST_F(StatsThreadLocalStoreTest, ShuttingDown) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  store_->counterFromString(\"c1\");\n  store_->gaugeFromString(\"g1\", Gauge::ImportMode::Accumulate);\n  store_->textReadoutFromString(\"t1\");\n  store_->shutdownThreading();\n  store_->counterFromString(\"c2\");\n  store_->gaugeFromString(\"g2\", Gauge::ImportMode::Accumulate);\n  store_->textReadoutFromString(\"t2\");\n\n  // We do not keep ref-counts for counters and gauges in the TLS cache, so\n  // all these stats should have a ref-count of 2: one for the SharedPtr\n  // returned from find*(), and one for the central cache.\n  EXPECT_EQ(2L, TestUtility::findCounter(*store_, \"c1\").use_count());\n  EXPECT_EQ(2L, TestUtility::findGauge(*store_, \"g1\").use_count());\n\n  // c1, g1, t1 should have a thread local ref, but c2, g2, t2 should not.\n  EXPECT_EQ(2L, TestUtility::findCounter(*store_, \"c1\").use_count());\n  EXPECT_EQ(2L, TestUtility::findGauge(*store_, \"g1\").use_count());\n  EXPECT_EQ(2L, TestUtility::findTextReadout(*store_, \"t1\").use_count());\n  EXPECT_EQ(2L, TestUtility::findCounter(*store_, \"c2\").use_count());\n  EXPECT_EQ(2L, TestUtility::findGauge(*store_, \"g2\").use_count());\n  EXPECT_EQ(2L, TestUtility::findTextReadout(*store_, \"t2\").use_count());\n\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nTEST_F(StatsThreadLocalStoreTest, MergeDuringShutDown) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(\"h1\", h1.name());\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 1));\n  h1.recordValue(1);\n\n  store_->shutdownThreading();\n\n  // Validate that merge callback is called during shutdown and there is no ASSERT.\n  bool merge_called = false;\n  store_->mergeHistograms([&merge_called]() -> void { merge_called = true; });\n\n  EXPECT_TRUE(merge_called);\n  store_->shutdownThreading();\n  tls_.shutdownThread();\n}\n\nTEST(ThreadLocalStoreThreadTest, ConstructDestruct) {\n  SymbolTableImpl symbol_table;\n  Api::ApiPtr api = Api::createApiForTest();\n  Event::DispatcherPtr dispatcher = api->allocateDispatcher(\"test_thread\");\n  NiceMock<ThreadLocal::MockInstance> tls;\n  AllocatorImpl alloc(symbol_table);\n  ThreadLocalStoreImpl store(alloc);\n\n  store.initializeThreading(*dispatcher, tls);\n  { ScopePtr scope1 = store.createScope(\"scope1.\"); }\n  store.shutdownThreading();\n}\n\n// Histogram tests\nTEST_F(HistogramTest, BasicSingleHistogramMerge) {\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(\"h1\", h1.name());\n\n  expectCallAndAccumulate(h1, 0);\n  expectCallAndAccumulate(h1, 43);\n  expectCallAndAccumulate(h1, 41);\n  expectCallAndAccumulate(h1, 415);\n  expectCallAndAccumulate(h1, 2201);\n  expectCallAndAccumulate(h1, 3201);\n  expectCallAndAccumulate(h1, 125);\n  expectCallAndAccumulate(h1, 13);\n\n  EXPECT_EQ(1, validateMerge());\n}\n\nTEST_F(HistogramTest, BasicMultiHistogramMerge) {\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 = store_->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"h2\", h2.name());\n\n  expectCallAndAccumulate(h1, 1);\n  expectCallAndAccumulate(h2, 1);\n  expectCallAndAccumulate(h2, 2);\n\n  EXPECT_EQ(2, validateMerge());\n}\n\nTEST_F(HistogramTest, MultiHistogramMultipleMerges) {\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 = store_->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"h2\", h2.name());\n\n  // Insert one value in to one histogram and validate\n  expectCallAndAccumulate(h1, 1);\n  EXPECT_EQ(2, validateMerge());\n\n  // Insert value into second histogram and validate that it is merged properly.\n  expectCallAndAccumulate(h2, 1);\n  EXPECT_EQ(2, validateMerge());\n\n  // Insert more values into both the histograms and validate that it is merged properly.\n  expectCallAndAccumulate(h1, 2);\n  EXPECT_EQ(2, validateMerge());\n\n  expectCallAndAccumulate(h2, 3);\n  EXPECT_EQ(2, validateMerge());\n\n  expectCallAndAccumulate(h2, 2);\n  EXPECT_EQ(2, validateMerge());\n\n  // Do not insert any value and validate that intervalSummary is empty for both the histograms and\n  // cumulativeSummary has right values.\n  EXPECT_EQ(2, validateMerge());\n}\n\nTEST_F(HistogramTest, BasicScopeHistogramMerge) {\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 = scope1->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"scope1.h2\", h2.name());\n\n  expectCallAndAccumulate(h1, 2);\n  expectCallAndAccumulate(h2, 2);\n  EXPECT_EQ(2, validateMerge());\n}\n\nTEST_F(HistogramTest, BasicHistogramSummaryValidate) {\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 = store_->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n\n  expectCallAndAccumulate(h1, 1);\n\n  EXPECT_EQ(2, validateMerge());\n\n  const std::string h1_expected_summary =\n      \"P0: 1, P25: 1.025, P50: 1.05, P75: 1.075, P90: 1.09, P95: 1.095, \"\n      \"P99: 1.099, P99.5: 1.0995, P99.9: 1.0999, P100: 1.1\";\n  const std::string h2_expected_summary =\n      \"P0: 0, P25: 25, P50: 50, P75: 75, P90: 90, P95: 95, P99: 99, \"\n      \"P99.5: 99.5, P99.9: 99.9, P100: 100\";\n\n  const std::string h1_expected_buckets =\n      \"B0.5: 0, B1: 0, B5: 1, B10: 1, B25: 1, B50: 1, B100: 1, B250: 1, \"\n      \"B500: 1, B1000: 1, B2500: 1, B5000: 1, B10000: 1, B30000: 1, B60000: 1, \"\n      \"B300000: 1, B600000: 1, B1.8e+06: 1, B3.6e+06: 1\";\n  const std::string h2_expected_buckets =\n      \"B0.5: 1, B1: 1, B5: 5, B10: 10, B25: 25, B50: 50, B100: 100, B250: 100, \"\n      \"B500: 100, B1000: 100, B2500: 100, B5000: 100, B10000: 100, B30000: 100, \"\n      \"B60000: 100, B300000: 100, B600000: 100, B1.8e+06: 100, B3.6e+06: 100\";\n\n  for (size_t i = 0; i < 100; ++i) {\n    expectCallAndAccumulate(h2, i);\n  }\n\n  EXPECT_EQ(2, validateMerge());\n\n  NameHistogramMap name_histogram_map = makeHistogramMap(store_->histograms());\n  EXPECT_EQ(h1_expected_summary,\n            name_histogram_map[\"h1\"]->cumulativeStatistics().quantileSummary());\n  EXPECT_EQ(h2_expected_summary,\n            name_histogram_map[\"h2\"]->cumulativeStatistics().quantileSummary());\n  EXPECT_EQ(h1_expected_buckets, name_histogram_map[\"h1\"]->cumulativeStatistics().bucketSummary());\n  EXPECT_EQ(h2_expected_buckets, name_histogram_map[\"h2\"]->cumulativeStatistics().bucketSummary());\n}\n\n// Validates the summary after known value merge in to same histogram.\nTEST_F(HistogramTest, BasicHistogramMergeSummary) {\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n\n  for (size_t i = 0; i < 50; ++i) {\n    expectCallAndAccumulate(h1, i);\n  }\n  EXPECT_EQ(1, validateMerge());\n\n  for (size_t i = 50; i < 100; ++i) {\n    expectCallAndAccumulate(h1, i);\n  }\n  EXPECT_EQ(1, validateMerge());\n\n  const std::string expected_summary = \"P0: 0, P25: 25, P50: 50, P75: 75, P90: 90, P95: 95, P99: \"\n                                       \"99, P99.5: 99.5, P99.9: 99.9, P100: 100\";\n  const std::string expected_bucket_summary =\n      \"B0.5: 1, B1: 1, B5: 5, B10: 10, B25: 25, B50: 50, B100: 100, B250: 100, \"\n      \"B500: 100, B1000: 100, B2500: 100, B5000: 100, B10000: 100, B30000: 100, \"\n      \"B60000: 100, B300000: 100, B600000: 100, B1.8e+06: 100, B3.6e+06: 100\";\n\n  NameHistogramMap name_histogram_map = makeHistogramMap(store_->histograms());\n  EXPECT_EQ(expected_summary, name_histogram_map[\"h1\"]->cumulativeStatistics().quantileSummary());\n  EXPECT_EQ(expected_bucket_summary,\n            name_histogram_map[\"h1\"]->cumulativeStatistics().bucketSummary());\n}\n\nTEST_F(HistogramTest, BasicHistogramUsed) {\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n\n  Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Histogram& h2 = scope1->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"scope1.h2\", h2.name());\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 1));\n  h1.recordValue(1);\n\n  NameHistogramMap name_histogram_map = makeHistogramMap(store_->histograms());\n  EXPECT_FALSE(name_histogram_map[\"h1\"]->used());\n  EXPECT_FALSE(name_histogram_map[\"h2\"]->used());\n\n  // Merge the histograms and validate that h1 is considered used.\n  store_->mergeHistograms([]() -> void {});\n  EXPECT_TRUE(name_histogram_map[\"h1\"]->used());\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h2), 2));\n  h2.recordValue(2);\n  EXPECT_FALSE(name_histogram_map[\"h2\"]->used());\n\n  // Merge histograms again and validate that both h1 and h2 are used.\n  store_->mergeHistograms([]() -> void {});\n\n  for (const ParentHistogramSharedPtr& histogram : store_->histograms()) {\n    EXPECT_TRUE(histogram->used());\n  }\n}\n\nTEST_F(HistogramTest, ParentHistogramBucketSummary) {\n  ScopePtr scope1 = store_->createScope(\"scope1.\");\n  Histogram& histogram =\n      store_->histogramFromString(\"histogram\", Stats::Histogram::Unit::Unspecified);\n  store_->mergeHistograms([]() -> void {});\n  ASSERT_EQ(1, store_->histograms().size());\n  ParentHistogramSharedPtr parent_histogram = store_->histograms()[0];\n  EXPECT_EQ(\"No recorded values\", parent_histogram->bucketSummary());\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 10));\n  histogram.recordValue(10);\n  store_->mergeHistograms([]() -> void {});\n  EXPECT_EQ(\"B0.5(0,0) B1(0,0) B5(0,0) B10(0,0) B25(1,1) B50(1,1) B100(1,1) \"\n            \"B250(1,1) B500(1,1) B1000(1,1) B2500(1,1) B5000(1,1) B10000(1,1) \"\n            \"B30000(1,1) B60000(1,1) B300000(1,1) B600000(1,1) B1.8e+06(1,1) \"\n            \"B3.6e+06(1,1)\",\n            parent_histogram->bucketSummary());\n}\n\nclass ThreadLocalRealThreadsTestBase : public ThreadLocalStoreNoMocksTestBase {\nprotected:\n  static constexpr uint32_t NumScopes = 1000;\n  static constexpr uint32_t NumIters = 35;\n\n  // Helper class to block on a number of multi-threaded operations occurring.\n  class BlockingBarrier {\n  public:\n    explicit BlockingBarrier(uint32_t count) : blocking_counter_(count) {}\n    ~BlockingBarrier() { blocking_counter_.Wait(); }\n\n    /**\n     * Returns a function that first executes 'f', and then decrements the count\n     * toward unblocking the scope. This is intended to be used as a post() callback.\n     *\n     * @param f the function to run prior to decrementing the count.\n     */\n    std::function<void()> run(std::function<void()> f) {\n      return [this, f]() {\n        f();\n        decrementCount();\n      };\n    }\n\n    /**\n     * @return a function that, when run, decrements the count, intended for passing to post().\n     */\n    std::function<void()> decrementCountFn() {\n      return [this] { decrementCount(); };\n    }\n\n    void decrementCount() { blocking_counter_.DecrementCount(); }\n\n  private:\n    absl::BlockingCounter blocking_counter_;\n  };\n\n  ThreadLocalRealThreadsTestBase(uint32_t num_threads)\n      : num_threads_(num_threads), start_time_(time_system_.monotonicTime()),\n        api_(Api::createApiForTest()), thread_factory_(api_->threadFactory()),\n        pool_(store_->symbolTable()) {\n    // This is the same order as InstanceImpl::initialize in source/server/server.cc.\n    thread_dispatchers_.resize(num_threads_);\n    {\n      BlockingBarrier blocking_barrier(num_threads_ + 1);\n      main_thread_ = thread_factory_.createThread(\n          [this, &blocking_barrier]() { mainThreadFn(blocking_barrier); });\n      for (uint32_t i = 0; i < num_threads_; ++i) {\n        threads_.emplace_back(thread_factory_.createThread(\n            [this, i, &blocking_barrier]() { workerThreadFn(i, blocking_barrier); }));\n      }\n    }\n\n    {\n      BlockingBarrier blocking_barrier(1);\n      main_dispatcher_->post(blocking_barrier.run([this]() {\n        tls_ = std::make_unique<ThreadLocal::InstanceImpl>();\n        tls_->registerThread(*main_dispatcher_, true);\n        for (Event::DispatcherPtr& dispatcher : thread_dispatchers_) {\n          // Worker threads must be registered from the main thread, per assert in registerThread().\n          tls_->registerThread(*dispatcher, false);\n        }\n        store_->initializeThreading(*main_dispatcher_, *tls_);\n      }));\n    }\n  }\n\n  ~ThreadLocalRealThreadsTestBase() override {\n    {\n      BlockingBarrier blocking_barrier(1);\n      main_dispatcher_->post(blocking_barrier.run([this]() {\n        store_->shutdownThreading();\n        tls_->shutdownGlobalThreading();\n        tls_->shutdownThread();\n      }));\n    }\n\n    for (Event::DispatcherPtr& dispatcher : thread_dispatchers_) {\n      dispatcher->post([&dispatcher]() { dispatcher->exit(); });\n    }\n\n    for (Thread::ThreadPtr& thread : threads_) {\n      thread->join();\n    }\n\n    main_dispatcher_->post([this]() {\n      store_.reset();\n      tls_.reset();\n      main_dispatcher_->exit();\n    });\n    main_thread_->join();\n  }\n\n  void workerThreadFn(uint32_t thread_index, BlockingBarrier& blocking_barrier) {\n    thread_dispatchers_[thread_index] =\n        api_->allocateDispatcher(absl::StrCat(\"test_worker_\", thread_index));\n    blocking_barrier.decrementCount();\n    thread_dispatchers_[thread_index]->run(Event::Dispatcher::RunType::RunUntilExit);\n  }\n\n  void mainThreadFn(BlockingBarrier& blocking_barrier) {\n    main_dispatcher_ = api_->allocateDispatcher(\"test_main_thread\");\n    blocking_barrier.decrementCount();\n    main_dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit);\n  }\n\n  void mainDispatchBlock() {\n    // To ensure all stats are freed we have to wait for a few posts() to clear.\n    // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup.\n    BlockingBarrier blocking_barrier(1);\n    main_dispatcher_->post(blocking_barrier.run([]() {}));\n  }\n\n  void tlsBlock() {\n    BlockingBarrier blocking_barrier(num_threads_);\n    for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) {\n      thread_dispatcher->post(blocking_barrier.run([]() {}));\n    }\n  }\n\n  const uint32_t num_threads_;\n  Event::TestRealTimeSystem time_system_;\n  MonotonicTime start_time_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr main_dispatcher_;\n  std::vector<Event::DispatcherPtr> thread_dispatchers_;\n  Thread::ThreadFactory& thread_factory_;\n  ThreadLocal::InstanceImplPtr tls_;\n  Thread::ThreadPtr main_thread_;\n  std::vector<Thread::ThreadPtr> threads_;\n  StatNamePool pool_;\n};\n\nclass ClusterShutdownCleanupStarvationTest : public ThreadLocalRealThreadsTestBase {\nprotected:\n  static constexpr uint32_t NumThreads = 2;\n\n  ClusterShutdownCleanupStarvationTest()\n      : ThreadLocalRealThreadsTestBase(NumThreads), my_counter_name_(pool_.add(\"my_counter\")),\n        my_counter_scoped_name_(pool_.add(\"scope.my_counter\")) {}\n\n  void createScopesIncCountersAndCleanup() {\n    for (uint32_t i = 0; i < NumScopes; ++i) {\n      ScopePtr scope = store_->createScope(\"scope.\");\n      Counter& counter = scope->counterFromStatName(my_counter_name_);\n      counter.inc();\n    }\n  }\n\n  void createScopesIncCountersAndCleanupAllThreads() {\n    BlockingBarrier blocking_barrier(NumThreads);\n    for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) {\n      thread_dispatcher->post(\n          blocking_barrier.run([this]() { createScopesIncCountersAndCleanup(); }));\n    }\n  }\n\n  std::chrono::seconds elapsedTime() {\n    return std::chrono::duration_cast<std::chrono::seconds>(time_system_.monotonicTime() -\n                                                            start_time_);\n  }\n\n  StatName my_counter_name_;\n  StatName my_counter_scoped_name_;\n};\n\n// Tests the scenario where a cluster and stat are allocated in multiple\n// concurrent threads, but after each round of allocation/free we post() an\n// empty callback to main to ensure that cross-scope thread cleanups complete.\n// In this test, we don't expect the use-count of the stat to get very high.\nTEST_F(ClusterShutdownCleanupStarvationTest, TwelveThreadsWithBlockade) {\n  for (uint32_t i = 0; i < NumIters && elapsedTime() < std::chrono::seconds(5); ++i) {\n    createScopesIncCountersAndCleanupAllThreads();\n\n    // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup.\n    mainDispatchBlock();\n\n    // Next, wait for all the worker threads to complete their TLS cleanup.\n    tlsBlock();\n\n    // Finally, wait for the final central-cache cleanup, which occurs on the main thread.\n    mainDispatchBlock();\n\n    // Here we show that the counter cleanups have finished, because the use-count is 1.\n    CounterSharedPtr counter =\n        alloc_.makeCounter(my_counter_scoped_name_, StatName(), StatNameTagVector{});\n    EXPECT_EQ(1, counter->use_count()) << \"index=\" << i;\n  }\n}\n\n// In this test, we don't run the main-callback post() in between each\n// iteration, and we use a thread synchronizer to block the cross-thread\n// cleanup. Thus no stat references in the caches get freed and the use_count()\n// grows without bound.\nTEST_F(ClusterShutdownCleanupStarvationTest, TwelveThreadsWithoutBlockade) {\n  store_->sync().enable();\n  store_->sync().waitOn(ThreadLocalStoreImpl::MainDispatcherCleanupSync);\n  for (uint32_t i = 0; i < NumIters && elapsedTime() < std::chrono::seconds(5); ++i) {\n    createScopesIncCountersAndCleanupAllThreads();\n    // As we have blocked the main dispatcher cleanup function above, nothing\n    // gets cleaned up and use-counts grow without bound as we recreate scopes,\n    // recreating the same counter in each one.\n\n    // Compute the use-count of one of the counters. This shows that by blocking\n    // the main-dispatcher cleanup thread, the use-counts grow without bound.\n    // We set our parameters so we attempt to exceed a use-count of 64k when\n    // running the test: NumScopes*NumThreads*NumIters == 70000, We use a timer\n    // so we don't time out on asan/tsan tests, In opt builds this test takes\n    // less than a second, and in fastbuild it takes less than 5.\n    CounterSharedPtr counter =\n        alloc_.makeCounter(my_counter_scoped_name_, StatName(), StatNameTagVector{});\n    uint32_t use_count = counter->use_count() - 1; // Subtract off this instance.\n    EXPECT_EQ((i + 1) * NumScopes * NumThreads, use_count);\n  }\n  EXPECT_EQ(70000, NumThreads * NumScopes * NumIters);\n  store_->sync().signal(ThreadLocalStoreImpl::MainDispatcherCleanupSync);\n}\n\nclass HistogramThreadTest : public ThreadLocalRealThreadsTestBase {\nprotected:\n  static constexpr uint32_t NumThreads = 10;\n\n  HistogramThreadTest() : ThreadLocalRealThreadsTestBase(NumThreads) {}\n\n  void mergeHistograms() {\n    BlockingBarrier blocking_barrier(1);\n    main_dispatcher_->post([this, &blocking_barrier]() {\n      store_->mergeHistograms(blocking_barrier.decrementCountFn());\n    });\n  }\n\n  uint32_t numTlsHistograms() {\n    uint32_t num;\n    {\n      BlockingBarrier blocking_barrier(1);\n      main_dispatcher_->post([this, &num, &blocking_barrier]() {\n        ThreadLocalStoreTestingPeer::numTlsHistograms(*store_,\n                                                      [&num, &blocking_barrier](uint32_t num_hist) {\n                                                        num = num_hist;\n                                                        blocking_barrier.decrementCount();\n                                                      });\n      });\n    }\n    return num;\n  }\n\n  // Executes a function on every worker thread dispatcher.\n  void foreachThread(const std::function<void()>& fn) {\n    BlockingBarrier blocking_barrier(NumThreads);\n    for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) {\n      thread_dispatcher->post(blocking_barrier.run(fn));\n    }\n  }\n};\n\nTEST_F(HistogramThreadTest, MakeHistogramsAndRecordValues) {\n  foreachThread([this]() {\n    Histogram& histogram =\n        store_->histogramFromString(\"my_hist\", Stats::Histogram::Unit::Unspecified);\n    histogram.recordValue(42);\n  });\n\n  mergeHistograms();\n\n  auto histograms = store_->histograms();\n  ASSERT_EQ(1, histograms.size());\n  ParentHistogramSharedPtr hist = histograms[0];\n  EXPECT_THAT(hist->bucketSummary(),\n              HasSubstr(absl::StrCat(\" B25(0,0) B50(\", NumThreads, \",\", NumThreads, \") \")));\n}\n\nTEST_F(HistogramThreadTest, ScopeOverlap) {\n  // Creating two scopes with the same name gets you two distinct scope objects.\n  ScopePtr scope1 = store_->createScope(\"scope.\");\n  ScopePtr scope2 = store_->createScope(\"scope.\");\n  EXPECT_NE(scope1, scope2);\n\n  EXPECT_EQ(0, store_->histograms().size());\n  EXPECT_EQ(0, numTlsHistograms());\n\n  // Histograms created in the two same-named scopes will be the same objects.\n  foreachThread([&scope1, &scope2]() {\n    Histogram& histogram = scope1->histogramFromString(\"histogram\", Histogram::Unit::Unspecified);\n    EXPECT_EQ(&histogram, &scope2->histogramFromString(\"histogram\", Histogram::Unit::Unspecified));\n    histogram.recordValue(100);\n  });\n\n  mergeHistograms();\n\n  // Verify that we have the expected number of TLS histograms since we accessed\n  // the histogram on every thread.\n  std::vector<ParentHistogramSharedPtr> histograms = store_->histograms();\n  ASSERT_EQ(1, histograms.size());\n  EXPECT_EQ(NumThreads, numTlsHistograms());\n\n  // There's no convenient API to pull data out of the histogram, except as\n  // a string. This expectation captures the bucket transition to indicate\n  // 0 samples at less than 100, and 10 between 100 and 249 inclusive.\n  EXPECT_THAT(histograms[0]->bucketSummary(),\n              HasSubstr(absl::StrCat(\" B100(0,0) B250(\", NumThreads, \",\", NumThreads, \") \")));\n\n  // The histogram was created in scope1, which can now be destroyed. But the\n  // histogram is kept alive by scope2.\n  scope1.reset();\n  histograms = store_->histograms();\n  EXPECT_EQ(1, histograms.size());\n  EXPECT_EQ(NumThreads, numTlsHistograms());\n\n  // We can continue to accumulate samples at the scope2's view of the same\n  // histogram, and they will combine with the existing data, despite the\n  // fact that scope1 has been deleted.\n  foreachThread([&scope2]() {\n    Histogram& histogram = scope2->histogramFromString(\"histogram\", Histogram::Unit::Unspecified);\n    histogram.recordValue(300);\n  });\n\n  mergeHistograms();\n\n  // Shows the bucket summary with 10 samples at >=100, and 20 at >=250.\n  EXPECT_THAT(histograms[0]->bucketSummary(),\n              HasSubstr(absl::StrCat(\" B100(0,0) B250(0,\", NumThreads, \") B500(\", NumThreads, \",\",\n                                     2 * NumThreads, \") \")));\n\n  // Now clear everything, and synchronize the system by calling mergeHistograms().\n  // THere should be no more ParentHistograms or TlsHistograms.\n  scope2.reset();\n  histograms.clear();\n  mergeHistograms();\n\n  EXPECT_EQ(0, store_->histograms().size());\n  EXPECT_EQ(0, numTlsHistograms());\n\n  store_->shutdownThreading();\n\n  store_->histogramFromString(\"histogram_after_shutdown\", Histogram::Unit::Unspecified);\n}\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/utility_corpus/test",
    "content": "hello world"
  },
  {
    "path": "test/common/stats/utility_fuzz_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stats/utility.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nnamespace {\n\n// The maximum number of iterations the fuzz test can run until stopped. This is\n// to avoid lengthy tests and timeouts.\nconstexpr size_t MaxIterations = 1000;\n\n} // namespace\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n\n  Stats::Utility::sanitizeStatsName(absl::string_view(reinterpret_cast<const char*>(buf), len));\n\n  if (len < 4) {\n    return;\n  }\n\n  // Create a greater scope vector to store the string to prevent the string memory from being free\n  std::list<std::string> string_list;\n  auto make_string = [&string_list](absl::string_view str) -> absl::string_view {\n    string_list.push_back(std::string(str));\n    return string_list.back();\n  };\n\n  // generate a random number as the maximum length of the stat name\n  const size_t max_len = *reinterpret_cast<const uint8_t*>(buf) % (len - 3);\n  FuzzedDataProvider provider(buf, len);\n\n  // model common/stats/utility_test.cc, initialize those objects to create random elements as\n  // input\n  Stats::SymbolTableImpl symbol_table;\n  std::unique_ptr<Stats::IsolatedStoreImpl> store =\n      std::make_unique<Stats::IsolatedStoreImpl>(symbol_table);\n  Stats::StatNamePool pool(symbol_table);\n  Stats::ScopePtr scope = store->createScope(provider.ConsumeRandomLengthString(max_len));\n  Stats::ElementVec ele_vec;\n  Stats::StatNameVec sn_vec;\n  Stats::StatNameTagVector tags;\n  Stats::StatName key, val;\n\n  if (provider.remaining_bytes() == 0) {\n    Stats::Utility::counterFromStatNames(*scope, {});\n    Stats::Utility::counterFromElements(*scope, {});\n  } else {\n    // Run until either running out of strings to process or a maximal number of\n    // iterations is reached.\n    for (size_t iter = 0; iter < MaxIterations && provider.remaining_bytes() > 3; iter++) {\n      // add random length string in each loop\n      if (provider.ConsumeBool()) {\n        absl::string_view str = make_string(\n            provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes())));\n        ele_vec.push_back(Stats::DynamicName(str));\n        sn_vec.push_back(pool.add(str));\n      } else {\n        key = pool.add(\n            provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes() / 2)));\n        val = pool.add(\n            provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes())));\n        tags.push_back({key, val});\n      }\n      Stats::Utility::counterFromStatNames(*scope, sn_vec, tags);\n      Stats::Utility::counterFromElements(*scope, ele_vec, tags);\n    }\n  }\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stats/utility_test.cc",
    "content": "#include <string>\n\n#include \"envoy/stats/stats_macros.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stats/null_counter.h\"\n#include \"common/stats/null_gauge.h\"\n#include \"common/stats/thread_local_store.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::UnorderedElementsAre;\n\nnamespace Envoy {\nnamespace Stats {\nnamespace {\n\n// All the tests should be run for both IsolatedStore and ThreadLocalStore.\nenum class StoreType {\n  ThreadLocal,\n  Isolated,\n};\n\nclass StatsUtilityTest : public testing::TestWithParam<StoreType> {\nprotected:\n  template <class StatType>\n  using IterateFn = std::function<bool(const RefcountPtr<StatType>& stat)>;\n  using MakeStatFn = std::function<void(Scope& scope, const ElementVec& elements)>;\n\n  StatsUtilityTest()\n      : symbol_table_(std::make_unique<SymbolTableImpl>()), pool_(*symbol_table_),\n        tags_(\n            {{pool_.add(\"tag1\"), pool_.add(\"value1\")}, {pool_.add(\"tag2\"), pool_.add(\"value2\")}}) {\n    switch (GetParam()) {\n    case StoreType::ThreadLocal:\n      alloc_ = std::make_unique<AllocatorImpl>(*symbol_table_),\n      store_ = std::make_unique<ThreadLocalStoreImpl>(*alloc_);\n      break;\n    case StoreType::Isolated:\n      store_ = std::make_unique<IsolatedStoreImpl>(*symbol_table_);\n      break;\n    }\n    scope_ = store_->createScope(\"scope\");\n  }\n\n  ~StatsUtilityTest() override {\n    scope_.reset();\n    pool_.clear();\n    store_.reset();\n    EXPECT_EQ(0, symbol_table_->numSymbols());\n  }\n\n  void init(MakeStatFn make_stat) {\n    make_stat(*store_, {pool_.add(\"symbolic1\")});\n    make_stat(*store_, {Stats::DynamicName(\"dynamic1\")});\n    make_stat(*scope_, {pool_.add(\"symbolic2\")});\n    make_stat(*scope_, {Stats::DynamicName(\"dynamic2\")});\n  }\n\n  template <class StatType> IterateFn<StatType> iterOnce() {\n    return [this](const RefcountPtr<StatType>& stat) -> bool {\n      results_.insert(stat->name());\n      return false;\n    };\n  }\n\n  template <class StatType> IterateFn<StatType> iterAll() {\n    return [this](const RefcountPtr<StatType>& stat) -> bool {\n      results_.insert(stat->name());\n      return true;\n    };\n  }\n\n  static MakeStatFn makeCounter() {\n    return [](Scope& scope, const ElementVec& elements) {\n      Utility::counterFromElements(scope, elements).inc();\n    };\n  }\n\n  static bool checkValue(const Counter& counter) { return counter.value() == 1; }\n\n  static MakeStatFn makeGauge() {\n    return [](Scope& scope, const ElementVec& elements) {\n      Utility::gaugeFromElements(scope, elements, Gauge::ImportMode::Accumulate).inc();\n    };\n  }\n\n  static bool checkValue(const Gauge& gauge) { return gauge.value() == 1; }\n\n  static MakeStatFn makeHistogram() {\n    return [](Scope& scope, const ElementVec& elements) {\n      Utility::histogramFromElements(scope, elements, Histogram::Unit::Milliseconds);\n    };\n  }\n\n  static bool checkValue(const Histogram& histogram) {\n    return histogram.unit() == Histogram::Unit::Milliseconds;\n  }\n\n  static MakeStatFn makeTextReadout() {\n    return [](Scope& scope, const ElementVec& elements) {\n      Utility::textReadoutFromElements(scope, elements).set(\"my-value\");\n    };\n  }\n\n  static bool checkValue(const TextReadout& text_readout) {\n    return text_readout.value() == \"my-value\";\n  }\n\n  template <class StatType> void storeOnce(const MakeStatFn make_stat) {\n    CachedReference<StatType> symbolic1_ref(*store_, \"symbolic1\");\n    CachedReference<StatType> dynamic1_ref(*store_, \"dynamic1\");\n    EXPECT_FALSE(symbolic1_ref.get());\n    EXPECT_FALSE(dynamic1_ref.get());\n\n    init(make_stat);\n\n    ASSERT_TRUE(symbolic1_ref.get());\n    ASSERT_TRUE(dynamic1_ref.get());\n    EXPECT_FALSE(store_->iterate(iterOnce<StatType>()));\n    EXPECT_EQ(1, results_.size());\n    EXPECT_TRUE(checkValue(*symbolic1_ref.get()));\n    EXPECT_TRUE(checkValue(*dynamic1_ref.get()));\n  }\n\n  template <class StatType> void storeAll(const MakeStatFn make_stat) {\n    init(make_stat);\n    EXPECT_TRUE(store_->iterate(iterAll<StatType>()));\n    EXPECT_THAT(results_,\n                UnorderedElementsAre(\"symbolic1\", \"dynamic1\", \"scope.symbolic2\", \"scope.dynamic2\"));\n  }\n\n  template <class StatType> void scopeOnce(const MakeStatFn make_stat) {\n    CachedReference<StatType> symbolic2_ref(*store_, \"scope.symbolic2\");\n    CachedReference<StatType> dynamic2_ref(*store_, \"scope.dynamic2\");\n    EXPECT_FALSE(symbolic2_ref.get());\n    EXPECT_FALSE(dynamic2_ref.get());\n\n    init(make_stat);\n\n    ASSERT_TRUE(symbolic2_ref.get());\n    ASSERT_TRUE(dynamic2_ref.get());\n    EXPECT_FALSE(scope_->iterate(iterOnce<StatType>()));\n    EXPECT_EQ(1, results_.size());\n    EXPECT_TRUE(checkValue(*symbolic2_ref.get()));\n    EXPECT_TRUE(checkValue(*dynamic2_ref.get()));\n  }\n\n  template <class StatType> void scopeAll(const MakeStatFn make_stat) {\n    init(make_stat);\n    EXPECT_TRUE(scope_->iterate(iterAll<StatType>()));\n    EXPECT_THAT(results_, UnorderedElementsAre(\"scope.symbolic2\", \"scope.dynamic2\"));\n  }\n\n  SymbolTablePtr symbol_table_;\n  StatNamePool pool_;\n  std::unique_ptr<AllocatorImpl> alloc_;\n  std::unique_ptr<Store> store_;\n  ScopePtr scope_;\n  absl::flat_hash_set<std::string> results_;\n  StatNameTagVector tags_;\n};\n\nINSTANTIATE_TEST_SUITE_P(StatsUtilityTest, StatsUtilityTest,\n                         testing::ValuesIn({StoreType::ThreadLocal, StoreType::Isolated}));\n\nTEST_P(StatsUtilityTest, Counters) {\n  ScopePtr scope = store_->createScope(\"scope.\");\n  Counter& c1 = Utility::counterFromElements(*scope, {DynamicName(\"a\"), DynamicName(\"b\")});\n  EXPECT_EQ(\"scope.a.b\", c1.name());\n  StatName token = pool_.add(\"token\");\n  Counter& c2 = Utility::counterFromElements(*scope, {DynamicName(\"a\"), token, DynamicName(\"b\")});\n  EXPECT_EQ(\"scope.a.token.b\", c2.name());\n  StatName suffix = pool_.add(\"suffix\");\n  Counter& c3 = Utility::counterFromElements(*scope, {token, suffix});\n  EXPECT_EQ(\"scope.token.suffix\", c3.name());\n  Counter& c4 = Utility::counterFromStatNames(*scope, {token, suffix});\n  EXPECT_EQ(\"scope.token.suffix\", c4.name());\n  EXPECT_EQ(&c3, &c4);\n\n  Counter& ctags =\n      Utility::counterFromElements(*scope, {DynamicName(\"x\"), token, DynamicName(\"y\")}, tags_);\n  EXPECT_EQ(\"scope.x.token.y.tag1.value1.tag2.value2\", ctags.name());\n}\n\nTEST_P(StatsUtilityTest, Gauges) {\n  ScopePtr scope = store_->createScope(\"scope.\");\n  Gauge& g1 = Utility::gaugeFromElements(*scope, {DynamicName(\"a\"), DynamicName(\"b\")},\n                                         Gauge::ImportMode::NeverImport);\n  EXPECT_EQ(\"scope.a.b\", g1.name());\n  EXPECT_EQ(Gauge::ImportMode::NeverImport, g1.importMode());\n  StatName token = pool_.add(\"token\");\n  Gauge& g2 = Utility::gaugeFromElements(*scope, {DynamicName(\"a\"), token, DynamicName(\"b\")},\n                                         Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(\"scope.a.token.b\", g2.name());\n  EXPECT_EQ(Gauge::ImportMode::Accumulate, g2.importMode());\n  StatName suffix = pool_.add(\"suffix\");\n  Gauge& g3 = Utility::gaugeFromElements(*scope, {token, suffix}, Gauge::ImportMode::NeverImport);\n  EXPECT_EQ(\"scope.token.suffix\", g3.name());\n  Gauge& g4 = Utility::gaugeFromStatNames(*scope, {token, suffix}, Gauge::ImportMode::NeverImport);\n  EXPECT_EQ(\"scope.token.suffix\", g4.name());\n  EXPECT_EQ(&g3, &g4);\n}\n\nTEST_P(StatsUtilityTest, Histograms) {\n  ScopePtr scope = store_->createScope(\"scope.\");\n  Histogram& h1 = Utility::histogramFromElements(*scope, {DynamicName(\"a\"), DynamicName(\"b\")},\n                                                 Histogram::Unit::Milliseconds);\n  EXPECT_EQ(\"scope.a.b\", h1.name());\n  EXPECT_EQ(Histogram::Unit::Milliseconds, h1.unit());\n  StatName token = pool_.add(\"token\");\n  Histogram& h2 = Utility::histogramFromElements(\n      *scope, {DynamicName(\"a\"), token, DynamicName(\"b\")}, Histogram::Unit::Microseconds);\n  EXPECT_EQ(\"scope.a.token.b\", h2.name());\n  EXPECT_EQ(Histogram::Unit::Microseconds, h2.unit());\n  StatName suffix = pool_.add(\"suffix\");\n  Histogram& h3 = Utility::histogramFromElements(*scope, {token, suffix}, Histogram::Unit::Bytes);\n  EXPECT_EQ(\"scope.token.suffix\", h3.name());\n  EXPECT_EQ(Histogram::Unit::Bytes, h3.unit());\n  Histogram& h4 = Utility::histogramFromStatNames(*scope, {token, suffix}, Histogram::Unit::Bytes);\n  EXPECT_EQ(&h3, &h4);\n}\n\nTEST_P(StatsUtilityTest, TextReadouts) {\n  ScopePtr scope = store_->createScope(\"scope.\");\n  TextReadout& t1 = Utility::textReadoutFromElements(*scope, {DynamicName(\"a\"), DynamicName(\"b\")});\n  EXPECT_EQ(\"scope.a.b\", t1.name());\n  StatName token = pool_.add(\"token\");\n  TextReadout& t2 =\n      Utility::textReadoutFromElements(*scope, {DynamicName(\"a\"), token, DynamicName(\"b\")});\n  EXPECT_EQ(\"scope.a.token.b\", t2.name());\n  StatName suffix = pool_.add(\"suffix\");\n  TextReadout& t3 = Utility::textReadoutFromElements(*scope, {token, suffix});\n  EXPECT_EQ(\"scope.token.suffix\", t3.name());\n  TextReadout& t4 = Utility::textReadoutFromStatNames(*scope, {token, suffix});\n  EXPECT_EQ(&t3, &t4);\n}\n\nTEST_P(StatsUtilityTest, StoreCounterOnce) { storeOnce<Counter>(makeCounter()); }\n\nTEST_P(StatsUtilityTest, StoreCounterAll) { storeAll<Counter>(makeCounter()); }\n\nTEST_P(StatsUtilityTest, ScopeCounterOnce) { scopeOnce<Counter>(makeCounter()); }\n\nTEST_P(StatsUtilityTest, ScopeCounterAll) { scopeAll<Counter>(makeCounter()); }\n\nTEST_P(StatsUtilityTest, StoreGaugeOnce) { storeOnce<Gauge>(makeGauge()); }\n\nTEST_P(StatsUtilityTest, StoreGaugeAll) { storeAll<Gauge>(makeGauge()); }\n\nTEST_P(StatsUtilityTest, ScopeGaugeOnce) { scopeOnce<Gauge>(makeGauge()); }\n\nTEST_P(StatsUtilityTest, ScopeGaugeAll) { scopeAll<Gauge>(makeGauge()); }\n\nTEST_P(StatsUtilityTest, StoreHistogramOnce) { storeOnce<Histogram>(makeHistogram()); }\n\nTEST_P(StatsUtilityTest, StoreHistogramAll) { storeAll<Histogram>(makeHistogram()); }\n\nTEST_P(StatsUtilityTest, ScopeHistogramOnce) { scopeOnce<Histogram>(makeHistogram()); }\n\nTEST_P(StatsUtilityTest, ScopeHistogramAll) { scopeAll<Histogram>(makeHistogram()); }\n\nTEST_P(StatsUtilityTest, StoreTextReadoutOnce) { storeOnce<TextReadout>(makeTextReadout()); }\n\nTEST_P(StatsUtilityTest, StoreTextReadoutAll) { storeAll<TextReadout>(makeTextReadout()); }\n\nTEST_P(StatsUtilityTest, ScopeTextReadoutOnce) { scopeOnce<TextReadout>(makeTextReadout()); }\n\nTEST_P(StatsUtilityTest, ScopeTextReadoutAll) { scopeAll<TextReadout>(makeTextReadout()); }\n\n} // namespace\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stream_info/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"filter_state_impl_test\",\n    srcs = [\"filter_state_impl_test.cc\"],\n    deps = [\n        \"//source/common/stream_info:filter_state_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"stream_info_impl_test\",\n    srcs = [\"stream_info_impl_test.cc\"],\n    deps = [\n        \":test_int_accessor_lib\",\n        \"//include/envoy/http:protocol_interface\",\n        \"//include/envoy/upstream:host_description_interface\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_int_accessor_lib\",\n    hdrs = [\"test_int_accessor.h\"],\n    deps = [\n        \"//include/envoy/stream_info:filter_state_interface\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_util\",\n    hdrs = [\"test_util.h\"],\n    deps = [\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/http:request_id_extension_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/stream_info:filter_state_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/common/stream_info:utility_lib\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"uint32_accessor_impl_test\",\n    srcs = [\"uint32_accessor_impl_test.cc\"],\n    deps = [\n        \"//source/common/stream_info:uint32_accessor_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/stream_info/filter_state_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\nnamespace {\n\nclass TestStoredTypeTracking : public FilterState::Object {\npublic:\n  TestStoredTypeTracking(int value, size_t* access_count, size_t* destruction_count)\n      : value_(value), access_count_(access_count), destruction_count_(destruction_count) {}\n  ~TestStoredTypeTracking() override {\n    if (destruction_count_) {\n      ++*destruction_count_;\n    }\n  }\n\n  int access() const {\n    if (access_count_) {\n      ++*access_count_;\n    }\n    return value_;\n  }\n\nprivate:\n  int value_;\n  size_t* access_count_;\n  size_t* destruction_count_;\n};\n\nclass SimpleType : public FilterState::Object {\npublic:\n  SimpleType(int value) : value_(value) {}\n\n  int access() const { return value_; }\n  void set(int value) { value_ = value; }\n\nprivate:\n  int value_;\n};\n\nclass FilterStateImplTest : public testing::Test {\npublic:\n  FilterStateImplTest() { resetFilterState(); }\n\n  void resetFilterState() {\n    filter_state_ = std::make_unique<FilterStateImpl>(FilterState::LifeSpan::FilterChain);\n  }\n  FilterState& filter_state() { return *filter_state_; }\n\nprivate:\n  std::unique_ptr<FilterStateImpl> filter_state_;\n};\n\n} // namespace\n\nTEST_F(FilterStateImplTest, Simple) {\n  size_t access_count = 0u;\n  size_t destruction_count = 0u;\n  filter_state().setData(\n      \"test_name\", std::make_unique<TestStoredTypeTracking>(5, &access_count, &destruction_count),\n      FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(0u, access_count);\n  EXPECT_EQ(0u, destruction_count);\n\n  EXPECT_EQ(5, filter_state().getDataReadOnly<TestStoredTypeTracking>(\"test_name\").access());\n  EXPECT_EQ(1u, access_count);\n  EXPECT_EQ(0u, destruction_count);\n\n  resetFilterState();\n  EXPECT_EQ(1u, access_count);\n  EXPECT_EQ(1u, destruction_count);\n}\n\nTEST_F(FilterStateImplTest, SameTypes) {\n  size_t access_count_1 = 0u;\n  size_t access_count_2 = 0u;\n  size_t destruction_count = 0u;\n  static const int ValueOne = 5;\n  static const int ValueTwo = 6;\n\n  filter_state().setData(\n      \"test_1\",\n      std::make_unique<TestStoredTypeTracking>(ValueOne, &access_count_1, &destruction_count),\n      FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\n      \"test_2\",\n      std::make_unique<TestStoredTypeTracking>(ValueTwo, &access_count_2, &destruction_count),\n      FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(0u, access_count_1);\n  EXPECT_EQ(0u, access_count_2);\n  EXPECT_EQ(0u, destruction_count);\n\n  EXPECT_EQ(ValueOne, filter_state().getDataReadOnly<TestStoredTypeTracking>(\"test_1\").access());\n  EXPECT_EQ(1u, access_count_1);\n  EXPECT_EQ(0u, access_count_2);\n  EXPECT_EQ(ValueTwo, filter_state().getDataReadOnly<TestStoredTypeTracking>(\"test_2\").access());\n  EXPECT_EQ(1u, access_count_1);\n  EXPECT_EQ(1u, access_count_2);\n  resetFilterState();\n  EXPECT_EQ(2u, destruction_count);\n}\n\nTEST_F(FilterStateImplTest, SimpleTypeReadOnly) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n\n  EXPECT_EQ(1, filter_state().getDataReadOnly<SimpleType>(\"test_1\").access());\n  EXPECT_EQ(2, filter_state().getDataReadOnly<SimpleType>(\"test_2\").access());\n}\n\nTEST_F(FilterStateImplTest, SimpleTypeMutable) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n\n  EXPECT_EQ(1, filter_state().getDataReadOnly<SimpleType>(\"test_1\").access());\n  EXPECT_EQ(2, filter_state().getDataReadOnly<SimpleType>(\"test_2\").access());\n\n  filter_state().getDataMutable<SimpleType>(\"test_1\").set(100);\n  filter_state().getDataMutable<SimpleType>(\"test_2\").set(200);\n  EXPECT_EQ(100, filter_state().getDataReadOnly<SimpleType>(\"test_1\").access());\n  EXPECT_EQ(200, filter_state().getDataReadOnly<SimpleType>(\"test_2\").access());\n}\n\nTEST_F(FilterStateImplTest, NameConflictReadOnly) {\n  // read only data cannot be overwritten (by any state type)\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_1\", std::make_unique<SimpleType>(2),\n                             FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain),\n      EnvoyException, \"FilterState::setData<T> called twice on same ReadOnly state.\");\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_1\", std::make_unique<SimpleType>(2),\n                             FilterState::StateType::Mutable, FilterState::LifeSpan::FilterChain),\n      EnvoyException, \"FilterState::setData<T> called twice on same ReadOnly state.\");\n  EXPECT_EQ(1, filter_state().getDataReadOnly<SimpleType>(\"test_1\").access());\n}\n\nTEST_F(FilterStateImplTest, NameConflictDifferentTypesReadOnly) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_1\",\n                             std::make_unique<TestStoredTypeTracking>(2, nullptr, nullptr),\n                             FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain),\n      EnvoyException, \"FilterState::setData<T> called twice on same ReadOnly state.\");\n}\n\nTEST_F(FilterStateImplTest, NameConflictMutableAndReadOnly) {\n  // Mutable data cannot be overwritten by read only data.\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_1\", std::make_unique<SimpleType>(2),\n                             FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain),\n      EnvoyException, \"FilterState::setData<T> called twice with different state types.\");\n}\n\nTEST_F(FilterStateImplTest, NoNameConflictMutableAndMutable) {\n  // Mutable data can be overwritten by another mutable data of same or different type.\n\n  // mutable + mutable - same type\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(3), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(4), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(4, filter_state().getDataMutable<SimpleType>(\"test_2\").access());\n\n  // mutable + mutable - different types\n  filter_state().setData(\"test_4\", std::make_unique<SimpleType>(7), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_4\", std::make_unique<TestStoredTypeTracking>(8, nullptr, nullptr),\n                         FilterState::StateType::Mutable, FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(8, filter_state().getDataReadOnly<TestStoredTypeTracking>(\"test_4\").access());\n}\n\nTEST_F(FilterStateImplTest, UnknownName) {\n  EXPECT_THROW_WITH_MESSAGE(filter_state().getDataReadOnly<SimpleType>(\"test_1\"), EnvoyException,\n                            \"FilterState::getDataReadOnly<T> called for unknown data name.\");\n  EXPECT_THROW_WITH_MESSAGE(filter_state().getDataMutable<SimpleType>(\"test_1\"), EnvoyException,\n                            \"FilterState::getDataMutable<T> called for unknown data name.\");\n}\n\nTEST_F(FilterStateImplTest, WrongTypeGet) {\n  filter_state().setData(\"test_name\", std::make_unique<TestStoredTypeTracking>(5, nullptr, nullptr),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_EQ(5, filter_state().getDataReadOnly<TestStoredTypeTracking>(\"test_name\").access());\n  EXPECT_THROW_WITH_MESSAGE(filter_state().getDataReadOnly<SimpleType>(\"test_name\"), EnvoyException,\n                            \"Data stored under test_name cannot be coerced to specified type\");\n}\n\nTEST_F(FilterStateImplTest, ErrorAccessingReadOnlyAsMutable) {\n  // Accessing read only data as mutable should throw error\n  filter_state().setData(\"test_name\", std::make_unique<TestStoredTypeTracking>(5, nullptr, nullptr),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().getDataMutable<TestStoredTypeTracking>(\"test_name\"), EnvoyException,\n      \"FilterState::getDataMutable<T> tried to access immutable data as mutable.\");\n}\n\nnamespace {\n\nclass A : public FilterState::Object {};\n\nclass B : public A {};\n\nclass C : public B {};\n\n} // namespace\n\nTEST_F(FilterStateImplTest, FungibleInheritance) {\n  filter_state().setData(\"testB\", std::make_unique<B>(), FilterState::StateType::ReadOnly,\n                         FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_state().hasData<B>(\"testB\"));\n  EXPECT_TRUE(filter_state().hasData<A>(\"testB\"));\n  EXPECT_FALSE(filter_state().hasData<C>(\"testB\"));\n\n  filter_state().setData(\"testC\", std::make_unique<C>(), FilterState::StateType::ReadOnly,\n                         FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_state().hasData<B>(\"testC\"));\n  EXPECT_TRUE(filter_state().hasData<A>(\"testC\"));\n  EXPECT_TRUE(filter_state().hasData<C>(\"testC\"));\n}\n\nTEST_F(FilterStateImplTest, HasData) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_state().hasData<SimpleType>(\"test_1\"));\n  EXPECT_FALSE(filter_state().hasData<SimpleType>(\"test_2\"));\n  EXPECT_FALSE(filter_state().hasData<TestStoredTypeTracking>(\"test_1\"));\n  EXPECT_FALSE(filter_state().hasData<TestStoredTypeTracking>(\"test_2\"));\n  EXPECT_TRUE(filter_state().hasDataWithName(\"test_1\"));\n  EXPECT_FALSE(filter_state().hasDataWithName(\"test_2\"));\n}\n\nTEST_F(FilterStateImplTest, LifeSpanInitFromParent) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_3\", std::make_unique<SimpleType>(3),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Request);\n  filter_state().setData(\"test_4\", std::make_unique<SimpleType>(4), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Request);\n  filter_state().setData(\"test_5\", std::make_unique<SimpleType>(5),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Connection);\n  filter_state().setData(\"test_6\", std::make_unique<SimpleType>(6), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Connection);\n\n  FilterStateImpl new_filter_state(filter_state().parent(), FilterState::LifeSpan::FilterChain);\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_1\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_2\"));\n  EXPECT_TRUE(new_filter_state.hasDataWithName(\"test_3\"));\n  EXPECT_TRUE(new_filter_state.hasDataWithName(\"test_4\"));\n  EXPECT_TRUE(new_filter_state.hasDataWithName(\"test_5\"));\n  EXPECT_TRUE(new_filter_state.hasDataWithName(\"test_6\"));\n  EXPECT_THROW_WITH_MESSAGE(\n      new_filter_state.getDataMutable<SimpleType>(\"test_3\"), EnvoyException,\n      \"FilterState::getDataMutable<T> tried to access immutable data as mutable.\");\n  EXPECT_EQ(4, new_filter_state.getDataMutable<SimpleType>(\"test_4\").access());\n  EXPECT_THROW_WITH_MESSAGE(\n      new_filter_state.getDataMutable<SimpleType>(\"test_5\"), EnvoyException,\n      \"FilterState::getDataMutable<T> tried to access immutable data as mutable.\");\n  EXPECT_EQ(6, new_filter_state.getDataMutable<SimpleType>(\"test_6\").access());\n}\n\nTEST_F(FilterStateImplTest, LifeSpanInitFromGrandparent) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_3\", std::make_unique<SimpleType>(3),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Request);\n  filter_state().setData(\"test_4\", std::make_unique<SimpleType>(4), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Request);\n  filter_state().setData(\"test_5\", std::make_unique<SimpleType>(5),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Connection);\n  filter_state().setData(\"test_6\", std::make_unique<SimpleType>(6), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Connection);\n\n  FilterStateImpl new_filter_state(filter_state().parent()->parent(),\n                                   FilterState::LifeSpan::FilterChain);\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_1\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_2\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_3\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_4\"));\n  EXPECT_TRUE(new_filter_state.hasDataWithName(\"test_5\"));\n  EXPECT_TRUE(new_filter_state.hasDataWithName(\"test_6\"));\n  EXPECT_THROW_WITH_MESSAGE(\n      new_filter_state.getDataMutable<SimpleType>(\"test_5\"), EnvoyException,\n      \"FilterState::getDataMutable<T> tried to access immutable data as mutable.\");\n  EXPECT_EQ(6, new_filter_state.getDataMutable<SimpleType>(\"test_6\").access());\n}\n\nTEST_F(FilterStateImplTest, LifeSpanInitFromNonParent) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::FilterChain);\n  filter_state().setData(\"test_3\", std::make_unique<SimpleType>(3),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Request);\n  filter_state().setData(\"test_4\", std::make_unique<SimpleType>(4), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Request);\n  filter_state().setData(\"test_5\", std::make_unique<SimpleType>(5),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Connection);\n  filter_state().setData(\"test_6\", std::make_unique<SimpleType>(6), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Connection);\n\n  FilterStateImpl new_filter_state(filter_state().parent(), FilterState::LifeSpan::Request);\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_1\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_2\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_3\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_4\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_5\"));\n  EXPECT_FALSE(new_filter_state.hasDataWithName(\"test_6\"));\n}\n\nTEST_F(FilterStateImplTest, HasDataAtOrAboveLifeSpan) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::FilterChain));\n  EXPECT_FALSE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::Request));\n  EXPECT_FALSE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::Connection));\n\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Request);\n  EXPECT_TRUE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::FilterChain));\n  EXPECT_TRUE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::Request));\n  EXPECT_FALSE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::Connection));\n\n  filter_state().setData(\"test_3\", std::make_unique<SimpleType>(3),\n                         FilterState::StateType::ReadOnly, FilterState::LifeSpan::Connection);\n  EXPECT_TRUE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::FilterChain));\n  EXPECT_TRUE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::Request));\n  EXPECT_TRUE(filter_state().hasDataAtOrAboveLifeSpan(FilterState::LifeSpan::Connection));\n}\n\nTEST_F(FilterStateImplTest, SetSameDataWithDifferentLifeSpan) {\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(1), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Connection);\n  // Test reset on smaller LifeSpan\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_1\", std::make_unique<SimpleType>(2),\n                             FilterState::StateType::Mutable, FilterState::LifeSpan::FilterChain),\n      EnvoyException,\n      \"FilterState::setData<T> called twice with conflicting life_span on the same data_name.\");\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_1\", std::make_unique<SimpleType>(2),\n                             FilterState::StateType::Mutable, FilterState::LifeSpan::Request),\n      EnvoyException,\n      \"FilterState::setData<T> called twice with conflicting life_span on the same data_name.\");\n\n  // Still mutable on the correct LifeSpan.\n  filter_state().setData(\"test_1\", std::make_unique<SimpleType>(2), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Connection);\n  EXPECT_EQ(2, filter_state().getDataMutable<SimpleType>(\"test_1\").access());\n\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(1), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Request);\n  // Test reset on smaller and greater LifeSpan\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2),\n                             FilterState::StateType::Mutable, FilterState::LifeSpan::FilterChain),\n      EnvoyException,\n      \"FilterState::setData<T> called twice with conflicting life_span on the same data_name.\");\n  EXPECT_THROW_WITH_MESSAGE(\n      filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2),\n                             FilterState::StateType::Mutable, FilterState::LifeSpan::Connection),\n      EnvoyException,\n      \"FilterState::setData<T> called twice with conflicting life_span on the same data_name.\");\n\n  // Still mutable on the correct LifeSpan.\n  filter_state().setData(\"test_2\", std::make_unique<SimpleType>(2), FilterState::StateType::Mutable,\n                         FilterState::LifeSpan::Request);\n  EXPECT_EQ(2, filter_state().getDataMutable<SimpleType>(\"test_2\").access());\n}\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stream_info/stream_info_impl_test.cc",
    "content": "#include <chrono>\n#include <functional>\n\n#include \"envoy/http/protocol.h\"\n#include \"envoy/stream_info/filter_state.h\"\n#include \"envoy/upstream/host_description.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"test/common/stream_info/test_int_accessor.h\"\n#include \"test/test_common/utility.h\"\n\n//#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\nnamespace {\n\nstd::chrono::nanoseconds checkDuration(std::chrono::nanoseconds last,\n                                       absl::optional<std::chrono::nanoseconds> timing) {\n  EXPECT_TRUE(timing);\n  EXPECT_LE(last, timing.value());\n  return timing.value();\n}\n\nclass StreamInfoImplTest : public testing::Test {\nprotected:\n  DangerousDeprecatedTestTime test_time_;\n};\n\nTEST_F(StreamInfoImplTest, TimingTest) {\n  MonotonicTime pre_start = test_time_.timeSystem().monotonicTime();\n  StreamInfoImpl info(Http::Protocol::Http2, test_time_.timeSystem());\n  Envoy::StreamInfo::UpstreamTiming upstream_timing;\n  MonotonicTime post_start = test_time_.timeSystem().monotonicTime();\n\n  const MonotonicTime& start = info.startTimeMonotonic();\n\n  EXPECT_LE(pre_start, start) << \"Start time was lower than expected\";\n  EXPECT_GE(post_start, start) << \"Start time was higher than expected\";\n\n  EXPECT_FALSE(info.lastDownstreamRxByteReceived());\n  info.onLastDownstreamRxByteReceived();\n  std::chrono::nanoseconds dur =\n      checkDuration(std::chrono::nanoseconds{0}, info.lastDownstreamRxByteReceived());\n\n  EXPECT_FALSE(info.firstUpstreamTxByteSent());\n  upstream_timing.onFirstUpstreamTxByteSent(test_time_.timeSystem());\n  info.setUpstreamTiming(upstream_timing);\n  dur = checkDuration(dur, info.firstUpstreamTxByteSent());\n\n  EXPECT_FALSE(info.lastUpstreamTxByteSent());\n  upstream_timing.onLastUpstreamTxByteSent(test_time_.timeSystem());\n  info.setUpstreamTiming(upstream_timing);\n  dur = checkDuration(dur, info.lastUpstreamTxByteSent());\n\n  EXPECT_FALSE(info.firstUpstreamRxByteReceived());\n  upstream_timing.onFirstUpstreamRxByteReceived(test_time_.timeSystem());\n  info.setUpstreamTiming(upstream_timing);\n  dur = checkDuration(dur, info.firstUpstreamRxByteReceived());\n\n  EXPECT_FALSE(info.lastUpstreamRxByteReceived());\n  upstream_timing.onLastUpstreamRxByteReceived(test_time_.timeSystem());\n  info.setUpstreamTiming(upstream_timing);\n  dur = checkDuration(dur, info.lastUpstreamRxByteReceived());\n\n  EXPECT_FALSE(info.firstDownstreamTxByteSent());\n  info.onFirstDownstreamTxByteSent();\n  dur = checkDuration(dur, info.firstDownstreamTxByteSent());\n\n  EXPECT_FALSE(info.lastDownstreamTxByteSent());\n  info.onLastDownstreamTxByteSent();\n  dur = checkDuration(dur, info.lastDownstreamTxByteSent());\n\n  EXPECT_FALSE(info.requestComplete());\n  info.onRequestComplete();\n  dur = checkDuration(dur, info.requestComplete());\n}\n\nTEST_F(StreamInfoImplTest, BytesTest) {\n  StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n\n  const uint64_t bytes_sent = 7;\n  const uint64_t bytes_received = 12;\n\n  stream_info.addBytesSent(bytes_sent);\n  stream_info.addBytesReceived(bytes_received);\n\n  EXPECT_EQ(bytes_sent, stream_info.bytesSent());\n  EXPECT_EQ(bytes_received, stream_info.bytesReceived());\n}\n\nTEST_F(StreamInfoImplTest, ResponseFlagTest) {\n  const std::vector<ResponseFlag> responseFlags = {FailedLocalHealthCheck,\n                                                   NoHealthyUpstream,\n                                                   UpstreamRequestTimeout,\n                                                   LocalReset,\n                                                   UpstreamRemoteReset,\n                                                   UpstreamConnectionFailure,\n                                                   UpstreamConnectionTermination,\n                                                   UpstreamOverflow,\n                                                   NoRouteFound,\n                                                   DelayInjected,\n                                                   FaultInjected,\n                                                   RateLimited};\n\n  StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n\n  EXPECT_FALSE(stream_info.hasAnyResponseFlag());\n  EXPECT_FALSE(stream_info.intersectResponseFlags(0));\n  for (ResponseFlag flag : responseFlags) {\n    // Test cumulative setting of response flags.\n    EXPECT_FALSE(stream_info.hasResponseFlag(flag))\n        << fmt::format(\"Flag: {} was already set\", flag);\n    stream_info.setResponseFlag(flag);\n    EXPECT_TRUE(stream_info.hasResponseFlag(flag))\n        << fmt::format(\"Flag: {} was expected to be set\", flag);\n  }\n  EXPECT_TRUE(stream_info.hasAnyResponseFlag());\n  EXPECT_EQ(0xFFF, stream_info.responseFlags());\n\n  StreamInfoImpl stream_info2(Http::Protocol::Http2, test_time_.timeSystem());\n  stream_info2.setResponseFlag(FailedLocalHealthCheck);\n\n  EXPECT_TRUE(stream_info2.intersectResponseFlags(FailedLocalHealthCheck));\n}\n\nTEST_F(StreamInfoImplTest, MiscSettersAndGetters) {\n  {\n    StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n\n    EXPECT_EQ(Http::Protocol::Http2, stream_info.protocol().value());\n\n    stream_info.protocol(Http::Protocol::Http10);\n    EXPECT_EQ(Http::Protocol::Http10, stream_info.protocol().value());\n\n    EXPECT_FALSE(stream_info.responseCode());\n    stream_info.response_code_ = 200;\n    ASSERT_TRUE(stream_info.responseCode());\n    EXPECT_EQ(200, stream_info.responseCode().value());\n\n    EXPECT_FALSE(stream_info.responseCodeDetails().has_value());\n    stream_info.setResponseCodeDetails(ResponseCodeDetails::get().ViaUpstream);\n    ASSERT_TRUE(stream_info.responseCodeDetails().has_value());\n    EXPECT_EQ(ResponseCodeDetails::get().ViaUpstream, stream_info.responseCodeDetails().value());\n\n    EXPECT_FALSE(stream_info.connectionTerminationDetails().has_value());\n    stream_info.setConnectionTerminationDetails(\"access_denied\");\n    ASSERT_TRUE(stream_info.connectionTerminationDetails().has_value());\n    EXPECT_EQ(\"access_denied\", stream_info.connectionTerminationDetails().value());\n\n    EXPECT_EQ(nullptr, stream_info.upstreamHost());\n    Upstream::HostDescriptionConstSharedPtr host(new NiceMock<Upstream::MockHostDescription>());\n    stream_info.onUpstreamHostSelected(host);\n    EXPECT_EQ(host, stream_info.upstreamHost());\n\n    EXPECT_FALSE(stream_info.healthCheck());\n    stream_info.healthCheck(true);\n    EXPECT_TRUE(stream_info.healthCheck());\n\n    EXPECT_EQ(nullptr, stream_info.routeEntry());\n    NiceMock<Router::MockRouteEntry> route_entry;\n    stream_info.route_entry_ = &route_entry;\n    EXPECT_EQ(&route_entry, stream_info.routeEntry());\n\n    stream_info.filterState()->setData(\"test\", std::make_unique<TestIntAccessor>(1),\n                                       FilterState::StateType::ReadOnly,\n                                       FilterState::LifeSpan::FilterChain);\n    EXPECT_EQ(1, stream_info.filterState()->getDataReadOnly<TestIntAccessor>(\"test\").access());\n\n    stream_info.setUpstreamFilterState(stream_info.filterState());\n    EXPECT_EQ(1,\n              stream_info.upstreamFilterState()->getDataReadOnly<TestIntAccessor>(\"test\").access());\n\n    EXPECT_EQ(\"\", stream_info.requestedServerName());\n    absl::string_view sni_name = \"stubserver.org\";\n    stream_info.setRequestedServerName(sni_name);\n    EXPECT_EQ(std::string(sni_name), stream_info.requestedServerName());\n\n    EXPECT_EQ(absl::nullopt, stream_info.upstreamClusterInfo());\n    Upstream::ClusterInfoConstSharedPtr cluster_info(new NiceMock<Upstream::MockClusterInfo>());\n    stream_info.setUpstreamClusterInfo(cluster_info);\n    EXPECT_NE(absl::nullopt, stream_info.upstreamClusterInfo());\n    EXPECT_EQ(\"fake_cluster\", stream_info.upstreamClusterInfo().value()->name());\n  }\n}\n\nTEST_F(StreamInfoImplTest, DynamicMetadataTest) {\n  StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n\n  EXPECT_EQ(0, stream_info.dynamicMetadata().filter_metadata_size());\n  stream_info.setDynamicMetadata(\"com.test\", MessageUtil::keyValueStruct(\"test_key\", \"test_value\"));\n  EXPECT_EQ(\"test_value\",\n            Config::Metadata::metadataValue(&stream_info.dynamicMetadata(), \"com.test\", \"test_key\")\n                .string_value());\n  ProtobufWkt::Struct struct_obj2;\n  ProtobufWkt::Value val2;\n  val2.set_string_value(\"another_value\");\n  (*struct_obj2.mutable_fields())[\"another_key\"] = val2;\n  stream_info.setDynamicMetadata(\"com.test\", struct_obj2);\n  EXPECT_EQ(\"another_value\", Config::Metadata::metadataValue(&stream_info.dynamicMetadata(),\n                                                             \"com.test\", \"another_key\")\n                                 .string_value());\n  // make sure \"test_key:test_value\" still exists\n  EXPECT_EQ(\"test_value\",\n            Config::Metadata::metadataValue(&stream_info.dynamicMetadata(), \"com.test\", \"test_key\")\n                .string_value());\n  std::string json;\n  const auto test_struct = stream_info.dynamicMetadata().filter_metadata().at(\"com.test\");\n  const auto status = Protobuf::util::MessageToJsonString(test_struct, &json);\n  EXPECT_TRUE(status.ok());\n  // check json contains the key and values we set\n  EXPECT_TRUE(json.find(\"\\\"test_key\\\":\\\"test_value\\\"\") != std::string::npos);\n  EXPECT_TRUE(json.find(\"\\\"another_key\\\":\\\"another_value\\\"\") != std::string::npos);\n}\n\nTEST_F(StreamInfoImplTest, DumpStateTest) {\n  StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  std::string prefix = \"\";\n\n  for (int i = 0; i < 7; ++i) {\n    std::stringstream out;\n    stream_info.dumpState(out, i);\n    std::string state = out.str();\n    EXPECT_TRUE(absl::StartsWith(state, prefix));\n    EXPECT_THAT(state, testing::HasSubstr(\"protocol_: 2\"));\n    prefix = prefix + \"  \";\n  }\n}\n\nTEST_F(StreamInfoImplTest, RequestHeadersTest) {\n  StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  EXPECT_FALSE(stream_info.getRequestHeaders());\n\n  Http::TestRequestHeaderMapImpl headers;\n  stream_info.setRequestHeaders(headers);\n  EXPECT_EQ(&headers, stream_info.getRequestHeaders());\n}\n\nTEST_F(StreamInfoImplTest, DefaultRequestIDExtensionTest) {\n  StreamInfoImpl stream_info(test_time_.timeSystem());\n  EXPECT_TRUE(stream_info.getRequestIDExtension());\n\n  auto rid_extension = stream_info.getRequestIDExtension();\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  rid_extension->set(request_headers, false);\n  rid_extension->set(request_headers, true);\n  rid_extension->setInResponse(response_headers, request_headers);\n  uint64_t out = 123;\n  EXPECT_FALSE(rid_extension->modBy(request_headers, out, 10000));\n  EXPECT_EQ(out, 123);\n  rid_extension->setTraceStatus(request_headers, Http::TraceStatus::Forced);\n  EXPECT_EQ(rid_extension->getTraceStatus(request_headers), Http::TraceStatus::NoTrace);\n}\n\nTEST_F(StreamInfoImplTest, ConnectionID) {\n  StreamInfoImpl stream_info(test_time_.timeSystem());\n  EXPECT_FALSE(stream_info.connectionID().has_value());\n  uint64_t id = 123;\n  stream_info.setConnectionID(id);\n  EXPECT_EQ(id, stream_info.connectionID());\n}\n\n} // namespace\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stream_info/test_int_accessor.h",
    "content": "#pragma once\n\n#include \"envoy/stream_info/filter_state.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nclass TestIntAccessor : public FilterState::Object {\npublic:\n  TestIntAccessor(int value) : value_(value) {}\n\n  int access() const { return value_; }\n\nprivate:\n  int value_;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stream_info/test_util.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/http/request_id_extension_impl.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n\nnamespace Envoy {\n\nclass TestStreamInfo : public StreamInfo::StreamInfo {\npublic:\n  TestStreamInfo()\n      : filter_state_(std::make_shared<Envoy::StreamInfo::FilterStateImpl>(\n            Envoy::StreamInfo::FilterState::LifeSpan::FilterChain)) {\n    // Use 1999-01-01 00:00:00 +0\n    time_t fake_time = 915148800;\n    start_time_ = std::chrono::system_clock::from_time_t(fake_time);\n    request_id_extension_ = Http::RequestIDExtensionFactory::defaultInstance(random_);\n\n    MonotonicTime now = timeSystem().monotonicTime();\n    start_time_monotonic_ = now;\n    end_time_ = now + std::chrono::milliseconds(3);\n  }\n\n  SystemTime startTime() const override { return start_time_; }\n  MonotonicTime startTimeMonotonic() const override { return start_time_monotonic_; }\n\n  void addBytesReceived(uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  uint64_t bytesReceived() const override { return 1; }\n  absl::optional<Http::Protocol> protocol() const override { return protocol_; }\n  void protocol(Http::Protocol protocol) override { protocol_ = protocol; }\n  absl::optional<uint32_t> responseCode() const override { return response_code_; }\n  const absl::optional<std::string>& responseCodeDetails() const override {\n    return response_code_details_;\n  }\n  void setResponseCodeDetails(absl::string_view rc_details) override {\n    response_code_details_.emplace(rc_details);\n  }\n  const absl::optional<std::string>& connectionTerminationDetails() const override {\n    return connection_termination_details_;\n  }\n  void setConnectionTerminationDetails(absl::string_view details) override {\n    connection_termination_details_.emplace(details);\n  }\n  void addBytesSent(uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  uint64_t bytesSent() const override { return 2; }\n  bool intersectResponseFlags(uint64_t response_flags) const override {\n    return (response_flags_ & response_flags) != 0;\n  }\n  bool hasResponseFlag(Envoy::StreamInfo::ResponseFlag response_flag) const override {\n    return response_flags_ & response_flag;\n  }\n  bool hasAnyResponseFlag() const override { return response_flags_ != 0; }\n  void setResponseFlag(Envoy::StreamInfo::ResponseFlag response_flag) override {\n    response_flags_ |= response_flag;\n  }\n  uint64_t responseFlags() const override { return response_flags_; }\n  void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override {\n    upstream_host_ = host;\n  }\n  Upstream::HostDescriptionConstSharedPtr upstreamHost() const override { return upstream_host_; }\n  void setUpstreamLocalAddress(\n      const Network::Address::InstanceConstSharedPtr& upstream_local_address) override {\n    upstream_local_address_ = upstream_local_address;\n  }\n  const Network::Address::InstanceConstSharedPtr& upstreamLocalAddress() const override {\n    return upstream_local_address_;\n  }\n  bool healthCheck() const override { return health_check_request_; }\n  void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; }\n\n  void setDownstreamLocalAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_local_address) override {\n    downstream_local_address_ = downstream_local_address;\n  }\n  const Network::Address::InstanceConstSharedPtr& downstreamLocalAddress() const override {\n    return downstream_local_address_;\n  }\n  void setDownstreamDirectRemoteAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_direct_remote_address) override {\n    downstream_direct_remote_address_ = downstream_direct_remote_address;\n  }\n  const Network::Address::InstanceConstSharedPtr& downstreamDirectRemoteAddress() const override {\n    return downstream_direct_remote_address_;\n  }\n  void setDownstreamRemoteAddress(\n      const Network::Address::InstanceConstSharedPtr& downstream_remote_address) override {\n    downstream_remote_address_ = downstream_remote_address;\n  }\n  const Network::Address::InstanceConstSharedPtr& downstreamRemoteAddress() const override {\n    return downstream_remote_address_;\n  }\n\n  void\n  setDownstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override {\n    downstream_connection_info_ = connection_info;\n  }\n\n  Ssl::ConnectionInfoConstSharedPtr downstreamSslConnection() const override {\n    return downstream_connection_info_;\n  }\n\n  void setUpstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override {\n    upstream_connection_info_ = connection_info;\n  }\n\n  Ssl::ConnectionInfoConstSharedPtr upstreamSslConnection() const override {\n    return upstream_connection_info_;\n  }\n  void setRouteName(absl::string_view route_name) override {\n    route_name_ = std::string(route_name);\n  }\n  const std::string& getRouteName() const override { return route_name_; }\n\n  const Router::RouteEntry* routeEntry() const override { return route_entry_; }\n\n  absl::optional<std::chrono::nanoseconds>\n  duration(const absl::optional<MonotonicTime>& time) const {\n    if (!time) {\n      return {};\n    }\n\n    return std::chrono::duration_cast<std::chrono::nanoseconds>(time.value() -\n                                                                start_time_monotonic_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastDownstreamRxByteReceived() const override {\n    return duration(last_rx_byte_received_);\n  }\n\n  void onLastDownstreamRxByteReceived() override {\n    last_rx_byte_received_ = timeSystem().monotonicTime();\n  }\n\n  absl::optional<std::chrono::nanoseconds> firstUpstreamTxByteSent() const override {\n    return duration(upstream_timing_.first_upstream_tx_byte_sent_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastUpstreamTxByteSent() const override {\n    return duration(upstream_timing_.last_upstream_tx_byte_sent_);\n  }\n  absl::optional<std::chrono::nanoseconds> firstUpstreamRxByteReceived() const override {\n    return duration(upstream_timing_.first_upstream_rx_byte_received_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastUpstreamRxByteReceived() const override {\n    return duration(upstream_timing_.last_upstream_rx_byte_received_);\n  }\n\n  absl::optional<std::chrono::nanoseconds> firstDownstreamTxByteSent() const override {\n    return duration(first_downstream_tx_byte_sent_);\n  }\n\n  void onFirstDownstreamTxByteSent() override {\n    first_downstream_tx_byte_sent_ = timeSystem().monotonicTime();\n  }\n\n  absl::optional<std::chrono::nanoseconds> lastDownstreamTxByteSent() const override {\n    return duration(last_downstream_tx_byte_sent_);\n  }\n\n  void onLastDownstreamTxByteSent() override {\n    last_downstream_tx_byte_sent_ = timeSystem().monotonicTime();\n  }\n\n  void onRequestComplete() override { end_time_ = timeSystem().monotonicTime(); }\n\n  void setUpstreamTiming(const Envoy::StreamInfo::UpstreamTiming& upstream_timing) override {\n    upstream_timing_ = upstream_timing;\n  }\n\n  absl::optional<std::chrono::nanoseconds> requestComplete() const override {\n    return duration(end_time_);\n  }\n\n  envoy::config::core::v3::Metadata& dynamicMetadata() override { return metadata_; };\n  const envoy::config::core::v3::Metadata& dynamicMetadata() const override { return metadata_; };\n\n  void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) override {\n    (*metadata_.mutable_filter_metadata())[name].MergeFrom(value);\n  };\n\n  const Envoy::StreamInfo::FilterStateSharedPtr& filterState() override { return filter_state_; }\n  const Envoy::StreamInfo::FilterState& filterState() const override { return *filter_state_; }\n\n  const Envoy::StreamInfo::FilterStateSharedPtr& upstreamFilterState() const override {\n    return upstream_filter_state_;\n  }\n  void\n  setUpstreamFilterState(const Envoy::StreamInfo::FilterStateSharedPtr& filter_state) override {\n    upstream_filter_state_ = filter_state;\n  }\n\n  void setRequestedServerName(const absl::string_view requested_server_name) override {\n    requested_server_name_ = std::string(requested_server_name);\n  }\n\n  const std::string& requestedServerName() const override { return requested_server_name_; }\n\n  void setUpstreamTransportFailureReason(absl::string_view failure_reason) override {\n    upstream_transport_failure_reason_ = std::string(failure_reason);\n  }\n\n  const std::string& upstreamTransportFailureReason() const override {\n    return upstream_transport_failure_reason_;\n  }\n\n  void setRequestHeaders(const Http::RequestHeaderMap& headers) override {\n    request_headers_ = &headers;\n  }\n\n  const Http::RequestHeaderMap* getRequestHeaders() const override { return request_headers_; }\n\n  void setRequestIDExtension(Http::RequestIDExtensionSharedPtr request_id_extension) override {\n    request_id_extension_ = request_id_extension;\n  }\n  Http::RequestIDExtensionSharedPtr getRequestIDExtension() const override {\n    return request_id_extension_;\n  }\n\n  Event::TimeSystem& timeSystem() { return test_time_.timeSystem(); }\n\n  void setUpstreamClusterInfo(\n      const Upstream::ClusterInfoConstSharedPtr& upstream_cluster_info) override {\n    upstream_cluster_info_ = upstream_cluster_info;\n  }\n  absl::optional<Upstream::ClusterInfoConstSharedPtr> upstreamClusterInfo() const override {\n    return upstream_cluster_info_;\n  }\n\n  void setConnectionID(uint64_t id) override { connection_id_ = id; }\n\n  absl::optional<uint64_t> connectionID() const override { return connection_id_; }\n\n  Random::RandomGeneratorImpl random_;\n  SystemTime start_time_;\n  MonotonicTime start_time_monotonic_;\n\n  absl::optional<MonotonicTime> last_rx_byte_received_;\n  absl::optional<MonotonicTime> first_upstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> last_upstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> first_upstream_rx_byte_received_;\n  absl::optional<MonotonicTime> last_upstream_rx_byte_received_;\n  absl::optional<MonotonicTime> first_downstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> last_downstream_tx_byte_sent_;\n  absl::optional<MonotonicTime> end_time_;\n\n  absl::optional<Http::Protocol> protocol_{Http::Protocol::Http11};\n  absl::optional<uint32_t> response_code_;\n  absl::optional<std::string> response_code_details_;\n  absl::optional<std::string> connection_termination_details_;\n  uint64_t response_flags_{};\n  Upstream::HostDescriptionConstSharedPtr upstream_host_{};\n  bool health_check_request_{};\n  std::string route_name_;\n  Network::Address::InstanceConstSharedPtr upstream_local_address_;\n  Network::Address::InstanceConstSharedPtr downstream_local_address_;\n  Network::Address::InstanceConstSharedPtr downstream_direct_remote_address_;\n  Network::Address::InstanceConstSharedPtr downstream_remote_address_;\n  Ssl::ConnectionInfoConstSharedPtr downstream_connection_info_;\n  Ssl::ConnectionInfoConstSharedPtr upstream_connection_info_;\n  const Router::RouteEntry* route_entry_{};\n  envoy::config::core::v3::Metadata metadata_{};\n  Envoy::StreamInfo::FilterStateSharedPtr filter_state_{\n      std::make_shared<Envoy::StreamInfo::FilterStateImpl>(\n          Envoy::StreamInfo::FilterState::LifeSpan::FilterChain)};\n  Envoy::StreamInfo::FilterStateSharedPtr upstream_filter_state_;\n  Envoy::StreamInfo::UpstreamTiming upstream_timing_;\n  std::string requested_server_name_;\n  std::string upstream_transport_failure_reason_;\n  const Http::RequestHeaderMap* request_headers_{};\n  Envoy::Event::SimulatedTimeSystem test_time_;\n  absl::optional<Upstream::ClusterInfoConstSharedPtr> upstream_cluster_info_{};\n  Http::RequestIDExtensionSharedPtr request_id_extension_;\n  absl::optional<uint64_t> connection_id_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stream_info/uint32_accessor_impl_test.cc",
    "content": "#include \"common/stream_info/uint32_accessor_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\nnamespace {\n\nTEST(UInt32AccessorImplTest, ConstructorInitsValue) {\n  uint32_t init_value = 0xdeadbeef;\n  UInt32AccessorImpl accessor(init_value);\n  EXPECT_EQ(init_value, accessor.value());\n}\n\nTEST(UInt32AccessorImplTest, IncrementValue) {\n  uint32_t init_value = 0xdeadbeef;\n  UInt32AccessorImpl accessor(init_value);\n  accessor.increment();\n  EXPECT_EQ(0xdeadbef0, accessor.value());\n}\n\n} // namespace\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/stream_info/utility_test.cc",
    "content": "#include \"common/network/address_impl.h\"\n#include \"common/stream_info/utility.h\"\n\n#include \"test/mocks/stream_info/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace StreamInfo {\nnamespace {\n\nTEST(ResponseFlagUtilsTest, toShortStringConversion) {\n  static_assert(ResponseFlag::LastFlag == 0x400000, \"A flag has been added. Fix this code.\");\n\n  std::vector<std::pair<ResponseFlag, std::string>> expected = {\n      std::make_pair(ResponseFlag::FailedLocalHealthCheck, \"LH\"),\n      std::make_pair(ResponseFlag::NoHealthyUpstream, \"UH\"),\n      std::make_pair(ResponseFlag::UpstreamRequestTimeout, \"UT\"),\n      std::make_pair(ResponseFlag::LocalReset, \"LR\"),\n      std::make_pair(ResponseFlag::UpstreamRemoteReset, \"UR\"),\n      std::make_pair(ResponseFlag::UpstreamConnectionFailure, \"UF\"),\n      std::make_pair(ResponseFlag::UpstreamConnectionTermination, \"UC\"),\n      std::make_pair(ResponseFlag::UpstreamOverflow, \"UO\"),\n      std::make_pair(ResponseFlag::NoRouteFound, \"NR\"),\n      std::make_pair(ResponseFlag::DelayInjected, \"DI\"),\n      std::make_pair(ResponseFlag::FaultInjected, \"FI\"),\n      std::make_pair(ResponseFlag::RateLimited, \"RL\"),\n      std::make_pair(ResponseFlag::UnauthorizedExternalService, \"UAEX\"),\n      std::make_pair(ResponseFlag::RateLimitServiceError, \"RLSE\"),\n      std::make_pair(ResponseFlag::DownstreamConnectionTermination, \"DC\"),\n      std::make_pair(ResponseFlag::UpstreamRetryLimitExceeded, \"URX\"),\n      std::make_pair(ResponseFlag::StreamIdleTimeout, \"SI\"),\n      std::make_pair(ResponseFlag::InvalidEnvoyRequestHeaders, \"IH\"),\n      std::make_pair(ResponseFlag::DownstreamProtocolError, \"DPE\"),\n      std::make_pair(ResponseFlag::UpstreamMaxStreamDurationReached, \"UMSDR\"),\n      std::make_pair(ResponseFlag::ResponseFromCacheFilter, \"RFCF\"),\n      std::make_pair(ResponseFlag::NoFilterConfigFound, \"NFCF\"),\n      std::make_pair(ResponseFlag::DurationTimeout, \"DT\")};\n\n  for (const auto& test_case : expected) {\n    NiceMock<MockStreamInfo> stream_info;\n    ON_CALL(stream_info, hasResponseFlag(test_case.first)).WillByDefault(Return(true));\n    EXPECT_EQ(test_case.second, ResponseFlagUtils::toShortString(stream_info));\n  }\n\n  // No flag is set.\n  {\n    NiceMock<MockStreamInfo> stream_info;\n    ON_CALL(stream_info, hasResponseFlag(_)).WillByDefault(Return(false));\n    EXPECT_EQ(\"-\", ResponseFlagUtils::toShortString(stream_info));\n  }\n\n  // Test combinations.\n  // These are not real use cases, but are used to cover multiple response flags case.\n  {\n    NiceMock<MockStreamInfo> stream_info;\n    ON_CALL(stream_info, hasResponseFlag(ResponseFlag::DelayInjected)).WillByDefault(Return(true));\n    ON_CALL(stream_info, hasResponseFlag(ResponseFlag::FaultInjected)).WillByDefault(Return(true));\n    ON_CALL(stream_info, hasResponseFlag(ResponseFlag::UpstreamRequestTimeout))\n        .WillByDefault(Return(true));\n    EXPECT_EQ(\"UT,DI,FI\", ResponseFlagUtils::toShortString(stream_info));\n  }\n}\n\nTEST(ResponseFlagsUtilsTest, toResponseFlagConversion) {\n  static_assert(ResponseFlag::LastFlag == 0x400000, \"A flag has been added. Fix this code.\");\n\n  std::vector<std::pair<std::string, ResponseFlag>> expected = {\n      std::make_pair(\"LH\", ResponseFlag::FailedLocalHealthCheck),\n      std::make_pair(\"UH\", ResponseFlag::NoHealthyUpstream),\n      std::make_pair(\"UT\", ResponseFlag::UpstreamRequestTimeout),\n      std::make_pair(\"LR\", ResponseFlag::LocalReset),\n      std::make_pair(\"UR\", ResponseFlag::UpstreamRemoteReset),\n      std::make_pair(\"UF\", ResponseFlag::UpstreamConnectionFailure),\n      std::make_pair(\"UC\", ResponseFlag::UpstreamConnectionTermination),\n      std::make_pair(\"UO\", ResponseFlag::UpstreamOverflow),\n      std::make_pair(\"NR\", ResponseFlag::NoRouteFound),\n      std::make_pair(\"DI\", ResponseFlag::DelayInjected),\n      std::make_pair(\"FI\", ResponseFlag::FaultInjected),\n      std::make_pair(\"RL\", ResponseFlag::RateLimited),\n      std::make_pair(\"UAEX\", ResponseFlag::UnauthorizedExternalService),\n      std::make_pair(\"RLSE\", ResponseFlag::RateLimitServiceError),\n      std::make_pair(\"DC\", ResponseFlag::DownstreamConnectionTermination),\n      std::make_pair(\"URX\", ResponseFlag::UpstreamRetryLimitExceeded),\n      std::make_pair(\"SI\", ResponseFlag::StreamIdleTimeout),\n      std::make_pair(\"IH\", ResponseFlag::InvalidEnvoyRequestHeaders),\n      std::make_pair(\"DPE\", ResponseFlag::DownstreamProtocolError),\n      std::make_pair(\"UMSDR\", ResponseFlag::UpstreamMaxStreamDurationReached),\n      std::make_pair(\"RFCF\", ResponseFlag::ResponseFromCacheFilter),\n      std::make_pair(\"NFCF\", ResponseFlag::NoFilterConfigFound),\n      std::make_pair(\"DT\", ResponseFlag::DurationTimeout)};\n\n  EXPECT_FALSE(ResponseFlagUtils::toResponseFlag(\"NonExistentFlag\").has_value());\n\n  for (const auto& test_case : expected) {\n    absl::optional<ResponseFlag> response_flag = ResponseFlagUtils::toResponseFlag(test_case.first);\n    EXPECT_TRUE(response_flag.has_value());\n    EXPECT_EQ(test_case.second, response_flag.value());\n  }\n}\n\nTEST(UtilityTest, formatDownstreamAddressNoPort) {\n  EXPECT_EQ(\"1.2.3.4\",\n            Utility::formatDownstreamAddressNoPort(Network::Address::Ipv4Instance(\"1.2.3.4\")));\n  EXPECT_EQ(\"/hello\",\n            Utility::formatDownstreamAddressNoPort(Network::Address::PipeInstance(\"/hello\")));\n}\n\n} // namespace\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/tcp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"conn_pool_test\",\n    srcs = [\"conn_pool_test.cc\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/tcp:conn_pool_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/tcp:tcp_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/tcp/conn_pool_test.cc",
    "content": "#include <memory>\n#include <vector>\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/tcp/conn_pool.h\"\n#include \"common/tcp/original_conn_pool.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/tcp/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Property;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Tcp {\nnamespace {\n\nstruct TestConnectionState : public ConnectionPool::ConnectionState {\n  TestConnectionState(int id, std::function<void()> on_destructor)\n      : id_(id), on_destructor_(on_destructor) {}\n  ~TestConnectionState() override { on_destructor_(); }\n\n  int id_;\n  std::function<void()> on_destructor_;\n};\n\n} // namespace\n\n/**\n * Mock callbacks used for conn pool testing.\n */\nstruct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks {\n  void onPoolReady(ConnectionPool::ConnectionDataPtr&& conn,\n                   Upstream::HostDescriptionConstSharedPtr host) override {\n    conn_data_ = std::move(conn);\n    host_ = host;\n    pool_ready_.ready();\n  }\n\n  void onPoolFailure(ConnectionPool::PoolFailureReason reason,\n                     Upstream::HostDescriptionConstSharedPtr host) override {\n    reason_ = reason;\n    host_ = host;\n    pool_failure_.ready();\n  }\n\n  ReadyWatcher pool_failure_;\n  ReadyWatcher pool_ready_;\n  ConnectionPool::ConnectionDataPtr conn_data_{};\n  absl::optional<ConnectionPool::PoolFailureReason> reason_;\n  Upstream::HostDescriptionConstSharedPtr host_;\n};\n\n/**\n * A wrapper around a ConnectionPoolImpl which tracks when the bridge between\n * the pool and the consumer of the connection is released and destroyed.\n */\nclass ConnPoolBase : public Tcp::ConnectionPool::Instance {\npublic:\n  ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host,\n               NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb,\n               bool test_new_connection_pool);\n\n  void addDrainedCallback(DrainedCb cb) override { conn_pool_->addDrainedCallback(cb); }\n  void drainConnections() override { conn_pool_->drainConnections(); }\n  void closeConnections() override { conn_pool_->closeConnections(); }\n  ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override {\n    return conn_pool_->newConnection(callbacks);\n  }\n  Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); }\n\n  MOCK_METHOD(void, onConnReleasedForTest, ());\n  MOCK_METHOD(void, onConnDestroyedForTest, ());\n  bool maybePrefetch(float ratio) override {\n    if (!test_new_connection_pool_) {\n      return false;\n    }\n    ASSERT(dynamic_cast<ConnPoolImplForTest*>(conn_pool_.get()) != nullptr);\n    return dynamic_cast<ConnPoolImplForTest*>(conn_pool_.get())->maybePrefetch(ratio);\n  }\n\n  struct TestConnection {\n    Network::MockClientConnection* connection_;\n    Event::MockTimer* connect_timer_;\n    Network::ReadFilterSharedPtr filter_;\n  };\n\n  void expectConnCreate() {\n    test_conns_.emplace_back();\n    TestConnection& test_conn = test_conns_.back();\n    test_conn.connection_ = new NiceMock<Network::MockClientConnection>();\n    test_conn.connect_timer_ = new NiceMock<Event::MockTimer>(&mock_dispatcher_);\n\n    EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Return(test_conn.connection_));\n    EXPECT_CALL(*test_conn.connection_, addReadFilter(_))\n        .WillOnce(Invoke(\n            [&](Network::ReadFilterSharedPtr filter) -> void { test_conn.filter_ = filter; }));\n    EXPECT_CALL(*test_conn.connection_, connect());\n    EXPECT_CALL(*test_conn.connect_timer_, enableTimer(_, _));\n\n    ON_CALL(*test_conn.connection_, close(Network::ConnectionCloseType::NoFlush))\n        .WillByDefault(InvokeWithoutArgs([test_conn]() -> void {\n          test_conn.connection_->raiseEvent(Network::ConnectionEvent::LocalClose);\n        }));\n  }\n\n  void expectEnableUpstreamReady(bool run);\n\n  std::unique_ptr<Tcp::ConnectionPool::Instance> conn_pool_;\n  Event::MockDispatcher& mock_dispatcher_;\n  NiceMock<Event::MockSchedulableCallback>* mock_upstream_ready_cb_;\n  std::vector<TestConnection> test_conns_;\n  Network::ConnectionCallbacks* callbacks_ = nullptr;\n  bool test_new_connection_pool_;\n\nprotected:\n  class ConnPoolImplForTest : public ConnPoolImpl {\n  public:\n    ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host,\n                        ConnPoolBase& parent)\n        : ConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, nullptr),\n          parent_(parent) {}\n\n    void onConnReleased(Envoy::ConnectionPool::ActiveClient& client) override {\n      ConnPoolImpl::onConnReleased(client);\n      parent_.onConnReleasedForTest();\n    }\n\n    void onConnDestroyed() override { parent_.onConnDestroyedForTest(); }\n    ConnPoolBase& parent_;\n  };\n\n  class OriginalConnPoolImplForTest : public OriginalConnPoolImpl {\n  public:\n    OriginalConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host,\n                                ConnPoolBase& parent)\n        : OriginalConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr,\n                               nullptr),\n          parent_(parent) {}\n\n    ~OriginalConnPoolImplForTest() override {\n      EXPECT_EQ(0U, ready_conns_.size());\n      EXPECT_EQ(0U, busy_conns_.size());\n      EXPECT_EQ(0U, pending_requests_.size());\n    }\n\n    void onConnReleased(OriginalConnPoolImpl::ActiveConn& conn) override {\n      parent_.onConnReleasedForTest();\n      OriginalConnPoolImpl::onConnReleased(conn);\n    }\n\n    void onConnDestroyed(OriginalConnPoolImpl::ActiveConn& conn) override {\n      parent_.onConnDestroyedForTest();\n      OriginalConnPoolImpl::onConnDestroyed(conn);\n    }\n    void expectEnableUpstreamReady(bool run) {\n      if (!run) {\n        EXPECT_FALSE(upstream_ready_enabled_);\n        EXPECT_CALL(*parent_.mock_upstream_ready_cb_, scheduleCallbackCurrentIteration())\n            .Times(1)\n            .RetiresOnSaturation();\n      } else {\n        EXPECT_TRUE(upstream_ready_enabled_);\n        parent_.mock_upstream_ready_cb_->invokeCallback();\n        EXPECT_FALSE(upstream_ready_enabled_);\n      }\n    }\n    ConnPoolBase& parent_;\n  };\n};\n\nConnPoolBase::ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host,\n                           NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb,\n                           bool test_new_connection_pool)\n    : mock_dispatcher_(dispatcher), mock_upstream_ready_cb_(upstream_ready_cb),\n      test_new_connection_pool_(test_new_connection_pool) {\n  if (test_new_connection_pool_) {\n    conn_pool_ = std::make_unique<ConnPoolImplForTest>(dispatcher, host, *this);\n  } else {\n    conn_pool_ = std::make_unique<OriginalConnPoolImplForTest>(dispatcher, host, *this);\n  }\n}\n\nvoid ConnPoolBase::expectEnableUpstreamReady(bool run) {\n  if (!test_new_connection_pool_) {\n    dynamic_cast<OriginalConnPoolImplForTest*>(conn_pool_.get())->expectEnableUpstreamReady(run);\n  } else {\n    if (!run) {\n      EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration())\n          .Times(1)\n          .RetiresOnSaturation();\n    } else {\n      mock_upstream_ready_cb_->invokeCallback();\n    }\n  }\n}\n\n/**\n * Test fixture for connection pool tests.\n */\nclass TcpConnPoolImplTest : public testing::TestWithParam<bool> {\npublic:\n  TcpConnPoolImplTest()\n      : test_new_connection_pool_(GetParam()),\n        upstream_ready_cb_(new NiceMock<Event::MockSchedulableCallback>(&dispatcher_)),\n        host_(Upstream::makeTestHost(cluster_, \"tcp://127.0.0.1:9000\")),\n        conn_pool_(dispatcher_, host_, upstream_ready_cb_, test_new_connection_pool_) {}\n\n  ~TcpConnPoolImplTest() override {\n    EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges()))\n        << TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges());\n  }\n\n  bool test_new_connection_pool_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};\n  NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb_;\n  Upstream::HostSharedPtr host_;\n  ConnPoolBase conn_pool_;\n  NiceMock<Runtime::MockLoader> runtime_;\n};\n\n/**\n * Test fixture for connection pool destructor tests.\n */\nclass TcpConnPoolImplDestructorTest : public testing::TestWithParam<bool> {\npublic:\n  TcpConnPoolImplDestructorTest()\n      : test_new_connection_pool_(GetParam()),\n        upstream_ready_cb_(new NiceMock<Event::MockSchedulableCallback>(&dispatcher_)) {\n    host_ = Upstream::makeTestHost(cluster_, \"tcp://127.0.0.1:9000\");\n    if (test_new_connection_pool_) {\n      conn_pool_ = std::make_unique<ConnPoolImpl>(\n          dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr);\n    } else {\n      conn_pool_ = std::make_unique<OriginalConnPoolImpl>(\n          dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr);\n    }\n  }\n  ~TcpConnPoolImplDestructorTest() override = default;\n\n  void prepareConn() {\n    connection_ = new NiceMock<Network::MockClientConnection>();\n    connect_timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n    EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_));\n    EXPECT_CALL(*connect_timer_, enableTimer(_, _));\n\n    callbacks_ = std::make_unique<ConnPoolCallbacks>();\n    ConnectionPool::Cancellable* handle = conn_pool_->newConnection(*callbacks_);\n    EXPECT_NE(nullptr, handle);\n\n    EXPECT_CALL(*connect_timer_, disableTimer());\n    EXPECT_CALL(callbacks_->pool_ready_, ready());\n    connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  }\n\n  bool test_new_connection_pool_;\n  Upstream::HostConstSharedPtr host_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};\n  NiceMock<Event::MockSchedulableCallback>* upstream_ready_cb_;\n  NiceMock<Event::MockTimer>* connect_timer_;\n  NiceMock<Network::MockClientConnection>* connection_;\n  std::unique_ptr<Tcp::ConnectionPool::Instance> conn_pool_;\n  std::unique_ptr<ConnPoolCallbacks> callbacks_;\n};\n\n/**\n * Helper for dealing with an active test connection.\n */\nstruct ActiveTestConn {\n  enum class Type {\n    Pending,          // pending request, waiting for free connection\n    InProgress,       // connection created, no callback\n    CreateConnection, // connection callback occurs after newConnection\n    Immediate,        // connection callback occurs during newConnection\n  };\n\n  ActiveTestConn(TcpConnPoolImplTest& parent, size_t conn_index, Type type)\n      : parent_(parent), conn_index_(conn_index) {\n    if (type == Type::CreateConnection || type == Type::InProgress) {\n      parent.conn_pool_.expectConnCreate();\n    }\n\n    if (type == Type::Immediate) {\n      expectNewConn();\n    }\n    handle_ = parent.conn_pool_.newConnection(callbacks_);\n\n    if (type == Type::Immediate) {\n      EXPECT_EQ(nullptr, handle_);\n      verifyConn();\n    } else {\n      EXPECT_NE(nullptr, handle_);\n    }\n\n    if (type == Type::CreateConnection) {\n      completeConnection();\n    }\n  }\n\n  void completeConnection() {\n    ASSERT_FALSE(completed_);\n\n    EXPECT_CALL(*parent_.conn_pool_.test_conns_[conn_index_].connect_timer_, disableTimer());\n    expectNewConn();\n    parent_.conn_pool_.test_conns_[conn_index_].connection_->raiseEvent(\n        Network::ConnectionEvent::Connected);\n    verifyConn();\n    completed_ = true;\n  }\n\n  void expectNewConn() { EXPECT_CALL(callbacks_.pool_ready_, ready()); }\n\n  void releaseConn() { callbacks_.conn_data_.reset(); }\n\n  void verifyConn() {\n    EXPECT_EQ(&callbacks_.conn_data_->connection(),\n              parent_.conn_pool_.test_conns_[conn_index_].connection_);\n  }\n\n  TcpConnPoolImplTest& parent_;\n  size_t conn_index_;\n  Tcp::ConnectionPool::Cancellable* handle_{};\n  ConnPoolCallbacks callbacks_;\n  bool completed_{};\n};\n\nTEST_P(TcpConnPoolImplTest, HostAccessor) { EXPECT_EQ(conn_pool_.host(), host_); }\n\n/**\n * Verify that connections are drained when requested.\n */\nTEST_P(TcpConnPoolImplTest, DrainConnections) {\n  cluster_->resetResourceManager(3, 1024, 1024, 1, 1);\n\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n  ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection);\n  ActiveTestConn c3(*this, 2, ActiveTestConn::Type::InProgress);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  c1.releaseConn();\n\n  {\n    // This will destroy the ready connection and set requests remaining to 1 on the busy and\n    // pending connections.\n    EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n    conn_pool_.drainConnections();\n    dispatcher_.clearDeferredDeleteList();\n  }\n  {\n    // This will destroy the busy connection when the response finishes.\n    EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n    EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n    c2.releaseConn();\n    dispatcher_.clearDeferredDeleteList();\n  }\n  {\n    // This will destroy the pending connection when the response finishes.\n    c3.completeConnection();\n\n    EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n    EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n    c3.releaseConn();\n    dispatcher_.clearDeferredDeleteList();\n  }\n}\n\n/**\n * Test all timing stats are set.\n */\nTEST_P(TcpConnPoolImplTest, VerifyTimingStats) {\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_connect_ms\"), _));\n  EXPECT_CALL(cluster_->stats_store_,\n              deliverHistogramToSinks(Property(&Stats::Metric::name, \"upstream_cx_length_ms\"), _));\n\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  c1.releaseConn();\n\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that buffer limits are set.\n */\nTEST_P(TcpConnPoolImplTest, VerifyBufferLimits) {\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192));\n  EXPECT_CALL(*conn_pool_.test_conns_.back().connection_, setBufferLimits(8192));\n\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that upstream callback fire for assigned connections.\n */\nTEST_P(TcpConnPoolImplTest, UpstreamCallbacks) {\n  Buffer::OwnedImpl buffer;\n\n  ConnectionPool::MockUpstreamCallbacks callbacks;\n\n  // Create connection, set UpstreamCallbacks\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n  c1.callbacks_.conn_data_->addUpstreamCallbacks(callbacks);\n\n  // Expect invocation when connection's ReadFilter::onData is invoked\n  EXPECT_CALL(callbacks, onUpstreamData(_, _));\n  EXPECT_EQ(Network::FilterStatus::StopIteration,\n            conn_pool_.test_conns_[0].filter_->onData(buffer, false));\n\n  EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark());\n  for (auto* cb : conn_pool_.test_conns_[0].connection_->callbacks_) {\n    cb->onAboveWriteBufferHighWatermark();\n  }\n\n  EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark());\n  for (auto* cb : conn_pool_.test_conns_[0].connection_->callbacks_) {\n    cb->onBelowWriteBufferLowWatermark();\n  }\n\n  // Shutdown normally.\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  c1.releaseConn();\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that upstream callback close event fires for assigned connections.\n */\nTEST_P(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) {\n  Buffer::OwnedImpl buffer;\n\n  ConnectionPool::MockUpstreamCallbacks callbacks;\n\n  // Create connection, set UpstreamCallbacks\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n  c1.callbacks_.conn_data_->addUpstreamCallbacks(callbacks);\n\n  EXPECT_CALL(callbacks, onEvent(Network::ConnectionEvent::RemoteClose));\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that a connection pool functions without upstream callbacks.\n */\nTEST_P(TcpConnPoolImplTest, NoUpstreamCallbacks) {\n  Buffer::OwnedImpl buffer;\n\n  // Create connection.\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n\n  // Trigger connection's ReadFilter::onData -- connection pool closes connection.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  EXPECT_EQ(Network::FilterStatus::StopIteration,\n            conn_pool_.test_conns_[0].filter_->onData(buffer, false));\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Tests a request that generates a new connection, completes, and then a second request that uses\n * the same connection.\n */\nTEST_P(TcpConnPoolImplTest, MultipleRequestAndResponse) {\n\n  // Request 1 should kick off a new connection.\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  c1.releaseConn();\n\n  // Request 2 should not.\n  ActiveTestConn c2(*this, 0, ActiveTestConn::Type::Immediate);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  c2.releaseConn();\n\n  // Cause the connection to go away.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Tests ConnectionState assignment, lookup and destruction.\n */\nTEST_P(TcpConnPoolImplTest, ConnectionStateLifecycle) {\n\n  bool state_destroyed = false;\n\n  // Request 1 should kick off a new connection.\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n\n  auto* state = new TestConnectionState(1, [&]() -> void { state_destroyed = true; });\n  c1.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(state));\n\n  EXPECT_EQ(state, c1.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  c1.releaseConn();\n\n  EXPECT_FALSE(state_destroyed);\n\n  // Request 2 should not.\n  ActiveTestConn c2(*this, 0, ActiveTestConn::Type::Immediate);\n\n  EXPECT_EQ(state, c2.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  c2.releaseConn();\n\n  EXPECT_FALSE(state_destroyed);\n\n  // Cause the connection to go away.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_TRUE(state_destroyed);\n}\n\n/**\n * Test when we overflow max pending requests.\n */\nTEST_P(TcpConnPoolImplTest, MaxPendingRequests) {\n  cluster_->resetResourceManager(1, 1, 1024, 1, 1);\n\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  ConnPoolCallbacks callbacks2;\n  EXPECT_CALL(callbacks2.pool_failure_, ready());\n  Tcp::ConnectionPool::Cancellable* handle2 = conn_pool_.newConnection(callbacks2);\n  EXPECT_EQ(nullptr, handle2);\n\n  handle->cancel(ConnectionPool::CancelPolicy::Default);\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(ConnectionPool::PoolFailureReason::Overflow, callbacks2.reason_);\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value());\n}\n\n/**\n * Tests a connection failure before a request is bound which should result in the pending request\n * getting purged.\n */\nTEST_P(TcpConnPoolImplTest, RemoteConnectFailure) {\n\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer());\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(ConnectionPool::PoolFailureReason::RemoteConnectionFailure, callbacks.reason_);\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value());\n}\n\n/**\n * Tests a connection failure before a request is bound which should result in the pending request\n * getting purged.\n */\nTEST_P(TcpConnPoolImplTest, LocalConnectFailure) {\n\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(callbacks.pool_failure_, ready());\n  EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer());\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::LocalClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(ConnectionPool::PoolFailureReason::LocalConnectionFailure, callbacks.reason_);\n\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value());\n}\n\n/**\n * Tests a connect timeout. Also test that we can add a new request during ejection processing.\n */\nTEST_P(TcpConnPoolImplTest, ConnectTimeout) {\n\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks1;\n  conn_pool_.expectConnCreate();\n  EXPECT_NE(nullptr, conn_pool_.newConnection(callbacks1));\n\n  ConnPoolCallbacks callbacks2;\n  EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void {\n    conn_pool_.expectConnCreate();\n    EXPECT_NE(nullptr, conn_pool_.newConnection(callbacks2));\n  }));\n\n  conn_pool_.test_conns_[0].connect_timer_->invokeCallback();\n\n  EXPECT_CALL(callbacks2.pool_failure_, ready());\n  conn_pool_.test_conns_[1].connect_timer_->invokeCallback();\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest()).Times(2);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(ConnectionPool::PoolFailureReason::Timeout, callbacks1.reason_);\n  EXPECT_EQ(ConnectionPool::PoolFailureReason::Timeout, callbacks2.reason_);\n\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value());\n}\n\n/**\n * Test cancelling before the request is bound to a connection.\n */\nTEST_P(TcpConnPoolImplTest, CancelBeforeBound) {\n\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  handle->cancel(ConnectionPool::CancelPolicy::Default);\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Cause the connection to go away.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test cancelling before the request is bound to a connection, with connection close.\n */\nTEST_P(TcpConnPoolImplTest, CancelAndCloseBeforeBound) {\n\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  // Expect the connection is closed.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  handle->cancel(ConnectionPool::CancelPolicy::CloseExcess);\n\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test an upstream disconnection while there is a bound request.\n */\nTEST_P(TcpConnPoolImplTest, DisconnectWhileBound) {\n\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Kill the connection while it has an active request.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test upstream disconnection of one request while another is pending.\n */\nTEST_P(TcpConnPoolImplTest, DisconnectWhilePending) {\n  cluster_->resetResourceManager(1, 1024, 1024, 1, 1);\n\n  // First request connected.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer());\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Second request pending.\n  ConnPoolCallbacks callbacks2;\n  ConnectionPool::Cancellable* handle2 = conn_pool_.newConnection(callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  // Connection closed, triggering new connection for pending request.\n  conn_pool_.expectConnCreate();\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::LocalClose);\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  dispatcher_.clearDeferredDeleteList();\n\n  // test_conns_[1] is the new connection\n  EXPECT_CALL(*conn_pool_.test_conns_[1].connect_timer_, disableTimer());\n  EXPECT_CALL(callbacks2.pool_ready_, ready());\n  conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  callbacks2.conn_data_.reset();\n\n  // Disconnect\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that we correctly handle reaching max connections.\n */\nTEST_P(TcpConnPoolImplTest, MaxConnections) {\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  // Request 2 should not kick off a new connection.\n  ConnPoolCallbacks callbacks2;\n  handle = conn_pool_.newConnection(callbacks2);\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value());\n\n  EXPECT_NE(nullptr, handle);\n\n  // Connect event will bind to request 1.\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Finishing request 1 will immediately bind to request 2.\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  conn_pool_.expectEnableUpstreamReady(false);\n  EXPECT_CALL(callbacks2.pool_ready_, ready());\n  callbacks.conn_data_.reset();\n\n  conn_pool_.expectEnableUpstreamReady(true);\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  callbacks2.conn_data_.reset();\n\n  // Cause the connection to go away.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test when we reach max requests per connection.\n */\nTEST_P(TcpConnPoolImplTest, MaxRequestsPerConnection) {\n\n  cluster_->max_requests_per_connection_ = 1;\n\n  // Request 1 should kick off a new connection.\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(callbacks.pool_ready_, ready());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  callbacks.conn_data_.reset();\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());\n  EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value());\n}\n\n/*\n * Test that multiple connections can be assigned at once.\n */\nTEST_P(TcpConnPoolImplTest, ConcurrentConnections) {\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n  ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection);\n  ActiveTestConn c3(*this, 0, ActiveTestConn::Type::Pending);\n\n  // Finish c1, which gets c3 going.\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  conn_pool_.expectEnableUpstreamReady(false);\n  c3.expectNewConn();\n  c1.releaseConn();\n\n  conn_pool_.expectEnableUpstreamReady(true);\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest()).Times(2);\n  c2.releaseConn();\n  c3.releaseConn();\n\n  // Disconnect both connections.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest()).Times(2);\n  conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Tests ConnectionState lifecycle with multiple concurrent connections.\n */\nTEST_P(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) {\n\n  int state_destroyed = 0;\n  auto* s1 = new TestConnectionState(1, [&]() -> void { state_destroyed |= 1; });\n  auto* s2 = new TestConnectionState(2, [&]() -> void { state_destroyed |= 2; });\n  auto* s3 = new TestConnectionState(2, [&]() -> void { state_destroyed |= 4; });\n\n  cluster_->resetResourceManager(2, 1024, 1024, 1, 1);\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n  c1.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(s1));\n  ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection);\n  c2.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(s2));\n  ActiveTestConn c3(*this, 0, ActiveTestConn::Type::Pending);\n\n  EXPECT_EQ(0, state_destroyed);\n\n  // Finish c1, which gets c3 going.\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  conn_pool_.expectEnableUpstreamReady(false);\n  c3.expectNewConn();\n  c1.releaseConn();\n\n  conn_pool_.expectEnableUpstreamReady(true);\n\n  // c3 now has the state set by c1.\n  EXPECT_EQ(s1, c3.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());\n  EXPECT_EQ(s2, c2.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());\n\n  // replace c3's state\n  c3.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(s3));\n  EXPECT_EQ(1, state_destroyed);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest()).Times(2);\n  c2.releaseConn();\n  c3.releaseConn();\n\n  EXPECT_EQ(1, state_destroyed);\n\n  // Disconnect both connections.\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest()).Times(2);\n  conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(7, state_destroyed);\n}\n\n/**\n * Tests that the DrainCallback is invoked when the number of connections goes to zero.\n */\nTEST_P(TcpConnPoolImplTest, DrainCallback) {\n  ReadyWatcher drained;\n\n  EXPECT_CALL(drained, ready());\n  conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); });\n\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n  ActiveTestConn c2(*this, 0, ActiveTestConn::Type::Pending);\n  c2.handle_->cancel(ConnectionPool::CancelPolicy::Default);\n\n  EXPECT_CALL(conn_pool_, onConnReleasedForTest());\n  EXPECT_CALL(drained, ready());\n  c1.releaseConn();\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test draining a connection pool that has a pending connection.\n */\nTEST_P(TcpConnPoolImplTest, DrainWhileConnecting) {\n  ReadyWatcher drained;\n\n  ConnPoolCallbacks callbacks;\n  conn_pool_.expectConnCreate();\n  Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);\n  EXPECT_NE(nullptr, handle);\n\n  conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); });\n  if (test_new_connection_pool_) {\n    // The shared connection pool removes and closes connecting clients if there are no\n    // pending requests.\n    EXPECT_CALL(drained, ready());\n    handle->cancel(ConnectionPool::CancelPolicy::Default);\n  } else {\n    handle->cancel(ConnectionPool::CancelPolicy::Default);\n    EXPECT_CALL(*conn_pool_.test_conns_[0].connection_,\n                close(Network::ConnectionCloseType::NoFlush));\n    EXPECT_CALL(drained, ready());\n    conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  }\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test that the DrainCallback is invoked when a connection is closed.\n */\nTEST_P(TcpConnPoolImplTest, DrainOnClose) {\n  ReadyWatcher drained;\n  EXPECT_CALL(drained, ready());\n  conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); });\n\n  ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);\n\n  ConnectionPool::MockUpstreamCallbacks callbacks;\n  c1.callbacks_.conn_data_->addUpstreamCallbacks(callbacks);\n\n  EXPECT_CALL(drained, ready());\n  EXPECT_CALL(callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent event) -> void {\n        EXPECT_EQ(Network::ConnectionEvent::RemoteClose, event);\n        c1.releaseConn();\n      }));\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_CALL(conn_pool_, onConnDestroyedForTest());\n  dispatcher_.clearDeferredDeleteList();\n}\n\n/**\n * Test connecting_request_capacity logic.\n */\nTEST_P(TcpConnPoolImplTest, RequestCapacity) {\n  if (!test_new_connection_pool_) {\n    return;\n  }\n  cluster_->resetResourceManager(5, 1024, 1024, 1, 1);\n  cluster_->max_requests_per_connection_ = 100;\n\n  ConnPoolCallbacks callbacks1;\n  ConnPoolCallbacks callbacks2;\n  Tcp::ConnectionPool::Cancellable* handle1;\n  Tcp::ConnectionPool::Cancellable* handle2;\n  {\n    // Request 1 should kick off a new connection.\n    conn_pool_.expectConnCreate();\n    handle1 = conn_pool_.newConnection(callbacks1);\n    EXPECT_NE(nullptr, handle1);\n  }\n  {\n    // Request 2 should kick off a new connection.\n    conn_pool_.expectConnCreate();\n    handle2 = conn_pool_.newConnection(callbacks2);\n    EXPECT_NE(nullptr, handle2);\n  }\n\n  // This should set the number of requests remaining to 1 on the active\n  // connections, and the connecting_request_capacity to 2 as well.\n  conn_pool_.drainConnections();\n\n  // Cancel the connections. Because neither used CloseExcess, the two connections should persist.\n  handle1->cancel(ConnectionPool::CancelPolicy::Default);\n  handle2->cancel(ConnectionPool::CancelPolicy::Default);\n\n  Tcp::ConnectionPool::Cancellable* handle3;\n  Tcp::ConnectionPool::Cancellable* handle4;\n  Tcp::ConnectionPool::Cancellable* handle5;\n  ConnPoolCallbacks callbacks3;\n  ConnPoolCallbacks callbacks4;\n  ConnPoolCallbacks callbacks5;\n\n  {\n    // The next two requests will use the connections in progress, bringing\n    // connecting_request_capacity to zero.\n    handle3 = conn_pool_.newConnection(callbacks3);\n    EXPECT_NE(nullptr, handle3);\n\n    handle4 = conn_pool_.newConnection(callbacks4);\n    EXPECT_NE(nullptr, handle4);\n  }\n  {\n    // With connecting_request_capacity zero, a request for a new connection\n    // will kick off connection #3.\n    conn_pool_.expectConnCreate();\n    handle5 = conn_pool_.newConnection(callbacks5);\n    EXPECT_NE(nullptr, handle5);\n  }\n\n  // Clean up remaining connections.\n  handle3->cancel(ConnectionPool::CancelPolicy::Default);\n  handle4->cancel(ConnectionPool::CancelPolicy::Default);\n  handle5->cancel(ConnectionPool::CancelPolicy::Default);\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  conn_pool_.test_conns_[2].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that maybePrefetch is passed up to the base class implementation.\nTEST_P(TcpConnPoolImplTest, TestPrefetch) {\n  if (!test_new_connection_pool_) {\n    return;\n  }\n  EXPECT_FALSE(conn_pool_.maybePrefetch(0));\n\n  conn_pool_.expectConnCreate();\n  ASSERT_TRUE(conn_pool_.maybePrefetch(2));\n\n  conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n/**\n * Test that pending connections are closed when the connection pool is destroyed.\n */\nTEST_P(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) {\n  connection_ = new NiceMock<Network::MockClientConnection>();\n  connect_timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_));\n  EXPECT_CALL(*connect_timer_, enableTimer(_, _));\n\n  callbacks_ = std::make_unique<ConnPoolCallbacks>();\n  ConnectionPool::Cancellable* handle = conn_pool_->newConnection(*callbacks_);\n  EXPECT_NE(nullptr, handle);\n\n  EXPECT_CALL(callbacks_->pool_failure_, ready());\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(dispatcher_, clearDeferredDeleteList());\n  conn_pool_.reset();\n}\n\n/**\n * Test that busy connections are closed when the connection pool is destroyed.\n */\nTEST_P(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) {\n  prepareConn();\n\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(dispatcher_, clearDeferredDeleteList());\n  conn_pool_.reset();\n}\n\n/**\n * Test that ready connections are closed when the connection pool is destroyed.\n */\nTEST_P(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) {\n  prepareConn();\n\n  // Transition connection to ready list\n  callbacks_->conn_data_.reset();\n\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(dispatcher_, clearDeferredDeleteList());\n  conn_pool_.reset();\n}\n\nINSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplTest, testing::Bool());\nINSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplDestructorTest, testing::Bool());\n\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/tcp_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"tcp_proxy_test\",\n    srcs = [\"tcp_proxy_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:application_protocol_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/network:upstream_server_name_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/tcp_proxy\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/access_loggers:well_known_names\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"upstream_test\",\n    srcs = [\"upstream_test.cc\"],\n    deps = [\n        \"//source/common/tcp_proxy\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/tcp:tcp_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/common/tcp_proxy/tcp_proxy_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/config/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/file/v3/file.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.validate.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/application_protocol.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/network/upstream_server_name.h\"\n#include \"common/router/metadatamatchcriteria_impl.h\"\n#include \"common/tcp_proxy/tcp_proxy.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/access_loggers/well_known_names.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/tcp/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace TcpProxy {\nnamespace {\n\nusing ::Envoy::Network::UpstreamServerName;\nusing ::testing::_;\nusing ::testing::DoAll;\nusing ::testing::Invoke;\nusing ::testing::InvokeWithoutArgs;\nusing ::testing::NiceMock;\nusing ::testing::Return;\nusing ::testing::ReturnPointee;\nusing ::testing::ReturnRef;\nusing ::testing::SaveArg;\n\nnamespace {\nConfig constructConfigFromYaml(const std::string& yaml,\n                               Server::Configuration::FactoryContext& context,\n                               bool avoid_boosting = true) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy;\n  TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting);\n  return Config(tcp_proxy, context);\n}\n\nConfig constructConfigFromV3Yaml(const std::string& yaml,\n                                 Server::Configuration::FactoryContext& context,\n                                 bool avoid_boosting = true) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy;\n  TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting);\n  return Config(tcp_proxy, context);\n}\n\n} // namespace\n\nTEST(ConfigTest, DefaultTimeout) {\n  const std::string yaml = R\"EOF(\nstat_prefix: name\ncluster: foo\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n  EXPECT_EQ(std::chrono::hours(1), config_obj.sharedConfig()->idleTimeout().value());\n}\n\nTEST(ConfigTest, DisabledTimeout) {\n  const std::string yaml = R\"EOF(\nstat_prefix: name\ncluster: foo\nidle_timeout: 0s\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n  EXPECT_FALSE(config_obj.sharedConfig()->idleTimeout().has_value());\n}\n\nTEST(ConfigTest, CustomTimeout) {\n  const std::string yaml = R\"EOF(\nstat_prefix: name\ncluster: foo\nidle_timeout: 1s\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n  EXPECT_EQ(std::chrono::seconds(1), config_obj.sharedConfig()->idleTimeout().value());\n}\n\nTEST(ConfigTest, MaxDownstreamConnectionDuration) {\n  const std::string yaml = R\"EOF(\nstat_prefix: name\ncluster: foo\nmax_downstream_connection_duration: 10s\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n  EXPECT_EQ(std::chrono::seconds(10), config_obj.maxDownstreamConnectionDuration().value());\n}\n\nTEST(ConfigTest, NoRouteConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  )EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_THROW(constructConfigFromYaml(yaml, factory_context), EnvoyException);\n}\n\nTEST(ConfigTest, DEPRECATED_FEATURE_TEST(BadConfig)) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: 1\n  cluster: cluster\n  deprecated_v1:\n    routes:\n    - cluster: fake_cluster\n  )EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_THROW(constructConfigFromYaml(yaml_string, factory_context, false), EnvoyException);\n}\n\nTEST(ConfigTest, DEPRECATED_FEATURE_TEST(EmptyRouteConfig)) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: cluster\n  deprecated_v1:\n    routes: []\n  )EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  EXPECT_THROW(constructConfigFromYaml(yaml, factory_context_, false), EnvoyException);\n}\n\nTEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: cluster\n  deprecated_v1:\n    routes:\n    - destination_ip_list:\n      - address_prefix: 10.10.10.10\n        prefix_len: 32\n      - address_prefix: 10.10.11.0\n        prefix_len: 24\n      - address_prefix: 10.11.0.0\n        prefix_len: 16\n      - address_prefix: 11.0.0.0\n        prefix_len: 8\n      - address_prefix: 128.0.0.0\n        prefix_len: 1\n      cluster: with_destination_ip_list\n    - destination_ip_list:\n      - address_prefix: \"::1\"\n        prefix_len: 128\n      - address_prefix: \"2001:abcd::\"\n        prefix_len: 64\n      cluster: with_v6_destination\n    - destination_ports: 1-1024,2048-4096,12345\n      cluster: with_destination_ports\n    - source_ports: '23457,23459'\n      cluster: with_source_ports\n    - destination_ip_list:\n      - address_prefix: \"2002::\"\n        prefix_len: 32\n      source_ip_list:\n      - address_prefix: \"2003::\"\n        prefix_len: 64\n      cluster: with_v6_source_and_destination\n    - destination_ip_list:\n      - address_prefix: 10.0.0.0\n        prefix_len: 24\n      source_ip_list:\n      - address_prefix: 20.0.0.0\n        prefix_len: 24\n      destination_ports: '10000'\n      source_ports: '20000'\n      cluster: with_everything\n    - cluster: catch_all\n    )EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Config config_obj(constructConfigFromYaml(yaml, factory_context_, false));\n\n  {\n    // hit route with destination_ip (10.10.10.10/32)\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.10.10\");\n    EXPECT_EQ(std::string(\"with_destination_ip_list\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall-through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.10.11\");\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\");\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination_ip (10.10.11.0/24)\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.11.11\");\n    EXPECT_EQ(std::string(\"with_destination_ip_list\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall-through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.12.12\");\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\");\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination_ip (10.11.0.0/16)\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.11.11.11\");\n    EXPECT_EQ(std::string(\"with_destination_ip_list\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall-through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.12.12.12\");\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\");\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination_ip (11.0.0.0/8)\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"11.11.11.11\");\n    EXPECT_EQ(std::string(\"with_destination_ip_list\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall-through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"12.12.12.12\");\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\");\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination_ip (128.0.0.0/8)\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"128.255.255.255\");\n    EXPECT_EQ(std::string(\"with_destination_ip_list\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination port range\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 12345);\n    EXPECT_EQ(std::string(\"with_destination_ports\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 23456);\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\");\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with source port range\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 23456);\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\", 23459);\n    EXPECT_EQ(std::string(\"with_source_ports\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 23456);\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"0.0.0.0\", 23458);\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit the route with all criteria present\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.0\", 10000);\n    connection.remote_address_ =\n        std::make_shared<Network::Address::Ipv4Instance>(\"20.0.0.0\", 20000);\n    EXPECT_EQ(std::string(\"with_everything\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.0\", 10000);\n    connection.remote_address_ =\n        std::make_shared<Network::Address::Ipv4Instance>(\"30.0.0.0\", 20000);\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination_ip (::1/128)\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv6Instance>(\"::1\");\n    EXPECT_EQ(std::string(\"with_v6_destination\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination_ip (\"2001:abcd/64\")\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ =\n        std::make_shared<Network::Address::Ipv6Instance>(\"2001:abcd:0:0:1::\");\n    EXPECT_EQ(std::string(\"with_v6_destination\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // hit route with destination_ip (\"2002::/32\") and source_ip (\"2003::/64\")\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ =\n        std::make_shared<Network::Address::Ipv6Instance>(\"2002:0:0:0:0:0::1\");\n    connection.remote_address_ =\n        std::make_shared<Network::Address::Ipv6Instance>(\"2003:0:0:0:0::5\");\n    EXPECT_EQ(std::string(\"with_v6_source_and_destination\"),\n              config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n\n  {\n    // fall through\n    NiceMock<Network::MockConnection> connection;\n    connection.local_address_ = std::make_shared<Network::Address::Ipv6Instance>(\"2004::\");\n    connection.remote_address_ = std::make_shared<Network::Address::Ipv6Instance>(\"::\");\n    EXPECT_EQ(std::string(\"catch_all\"), config_obj.getRouteFromEntries(connection)->clusterName());\n  }\n}\n\n// Tests that a deprecated_v1 route gets the top-level endpoint selector.\nTEST(ConfigTest, DEPRECATED_FEATURE_TEST(RouteWithTopLevelMetadataMatchConfig)) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: cluster\n  deprecated_v1:\n    routes:\n    - cluster: catch_all\n  metadata_match:\n    filter_metadata:\n      envoy.lb:\n        k1: v1\n        k2: v2\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Config config_obj(constructConfigFromYaml(yaml, factory_context_, false));\n\n  ProtobufWkt::Value v1, v2;\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  HashedValue hv1(v1), hv2(v2);\n\n  NiceMock<Network::MockConnection> connection;\n  const auto route = config_obj.getRouteFromEntries(connection);\n  EXPECT_NE(nullptr, route);\n\n  EXPECT_EQ(\"catch_all\", route->clusterName());\n\n  const auto* criteria = route->metadataMatchCriteria();\n  EXPECT_NE(nullptr, criteria);\n\n  const auto& criterions = criteria->metadataMatchCriteria();\n  EXPECT_EQ(2, criterions.size());\n\n  EXPECT_EQ(\"k1\", criterions[0]->name());\n  EXPECT_EQ(hv1, criterions[0]->value());\n\n  EXPECT_EQ(\"k2\", criterions[1]->name());\n  EXPECT_EQ(hv2, criterions[1]->value());\n}\n\n// Tests that it's not possible to define a weighted cluster with 0 weight.\nTEST(ConfigTest, WeightedClusterWithZeroWeightConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  weighted_clusters:\n    clusters:\n    - name: cluster1\n      weight: 1\n    - name: cluster2\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_THROW(constructConfigFromV3Yaml(yaml, factory_context), EnvoyException);\n}\n\n// Tests that it is possible to define a list of weighted clusters.\nTEST(ConfigTest, WeightedClustersConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  weighted_clusters:\n    clusters:\n    - name: cluster1\n      weight: 1\n    - name: cluster2\n      weight: 2\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n\n  NiceMock<Network::MockConnection> connection;\n  EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(0));\n  EXPECT_EQ(std::string(\"cluster1\"), config_obj.getRouteFromEntries(connection)->clusterName());\n\n  EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(2));\n  EXPECT_EQ(std::string(\"cluster2\"), config_obj.getRouteFromEntries(connection)->clusterName());\n}\n\n// Tests that it is possible to define a list of weighted clusters with independent endpoint\n// selectors.\nTEST(ConfigTest, WeightedClustersWithMetadataMatchConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  weighted_clusters:\n    clusters:\n    - name: cluster1\n      weight: 1\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k1: v1\n            k2: v2\n    - name: cluster2\n      weight: 2\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k3: v3\n            k4: v4\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n\n  {\n    ProtobufWkt::Value v1, v2;\n    v1.set_string_value(\"v1\");\n    v2.set_string_value(\"v2\");\n    HashedValue hv1(v1), hv2(v2);\n\n    NiceMock<Network::MockConnection> connection;\n    EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(0));\n\n    const auto route = config_obj.getRouteFromEntries(connection);\n    EXPECT_NE(nullptr, route);\n\n    EXPECT_EQ(\"cluster1\", route->clusterName());\n\n    const auto* criteria = route->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n\n    const auto& criterions = criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, criterions.size());\n\n    EXPECT_EQ(\"k1\", criterions[0]->name());\n    EXPECT_EQ(hv1, criterions[0]->value());\n\n    EXPECT_EQ(\"k2\", criterions[1]->name());\n    EXPECT_EQ(hv2, criterions[1]->value());\n  }\n\n  {\n    ProtobufWkt::Value v3, v4;\n    v3.set_string_value(\"v3\");\n    v4.set_string_value(\"v4\");\n    HashedValue hv3(v3), hv4(v4);\n\n    NiceMock<Network::MockConnection> connection;\n    EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(2));\n\n    const auto route = config_obj.getRouteFromEntries(connection);\n    EXPECT_NE(nullptr, route);\n\n    EXPECT_EQ(\"cluster2\", route->clusterName());\n\n    const auto* criteria = route->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n\n    const auto& criterions = criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, criterions.size());\n\n    EXPECT_EQ(\"k3\", criterions[0]->name());\n    EXPECT_EQ(hv3, criterions[0]->value());\n\n    EXPECT_EQ(\"k4\", criterions[1]->name());\n    EXPECT_EQ(hv4, criterions[1]->value());\n  }\n}\n\n// Tests that an individual endpoint selector of a weighted cluster gets merged with the top-level\n// endpoint selector.\nTEST(ConfigTest, WeightedClustersWithMetadataMatchAndTopLevelMetadataMatchConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  weighted_clusters:\n    clusters:\n    - name: cluster1\n      weight: 1\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k1: v1\n            k2: v2\n    - name: cluster2\n      weight: 2\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k3: v3\n            k4: v4\n  metadata_match:\n    filter_metadata:\n      envoy.lb:\n        k0: v00\n        k1: v01\n        k4: v04\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n\n  ProtobufWkt::Value v00, v01, v04;\n  v00.set_string_value(\"v00\");\n  v01.set_string_value(\"v01\");\n  v04.set_string_value(\"v04\");\n  HashedValue hv00(v00), hv01(v01), hv04(v04);\n\n  {\n    ProtobufWkt::Value v1, v2;\n    v1.set_string_value(\"v1\");\n    v2.set_string_value(\"v2\");\n    HashedValue hv1(v1), hv2(v2);\n\n    NiceMock<Network::MockConnection> connection;\n    EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(0));\n\n    const auto route = config_obj.getRouteFromEntries(connection);\n    EXPECT_NE(nullptr, route);\n\n    EXPECT_EQ(\"cluster1\", route->clusterName());\n\n    const auto* criteria = route->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n\n    const auto& criterions = criteria->metadataMatchCriteria();\n    EXPECT_EQ(4, criterions.size());\n\n    EXPECT_EQ(\"k0\", criterions[0]->name());\n    EXPECT_EQ(hv00, criterions[0]->value());\n\n    EXPECT_EQ(\"k1\", criterions[1]->name());\n    EXPECT_EQ(hv1, criterions[1]->value());\n\n    EXPECT_EQ(\"k2\", criterions[2]->name());\n    EXPECT_EQ(hv2, criterions[2]->value());\n\n    EXPECT_EQ(\"k4\", criterions[3]->name());\n    EXPECT_EQ(hv04, criterions[3]->value());\n  }\n\n  {\n    ProtobufWkt::Value v3, v4;\n    v3.set_string_value(\"v3\");\n    v4.set_string_value(\"v4\");\n    HashedValue hv3(v3), hv4(v4);\n\n    NiceMock<Network::MockConnection> connection;\n    EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(2));\n\n    const auto route = config_obj.getRouteFromEntries(connection);\n    EXPECT_NE(nullptr, route);\n\n    EXPECT_EQ(\"cluster2\", route->clusterName());\n\n    const auto* criteria = route->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n\n    const auto& criterions = criteria->metadataMatchCriteria();\n    EXPECT_EQ(4, criterions.size());\n\n    EXPECT_EQ(\"k0\", criterions[0]->name());\n    EXPECT_EQ(hv00, criterions[0]->value());\n\n    EXPECT_EQ(\"k1\", criterions[1]->name());\n    EXPECT_EQ(hv01, criterions[1]->value());\n\n    EXPECT_EQ(\"k3\", criterions[2]->name());\n    EXPECT_EQ(hv3, criterions[2]->value());\n\n    EXPECT_EQ(\"k4\", criterions[3]->name());\n    EXPECT_EQ(hv4, criterions[3]->value());\n  }\n}\n\n// Tests that a weighted cluster gets the top-level endpoint selector.\nTEST(ConfigTest, WeightedClustersWithTopLevelMetadataMatchConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  weighted_clusters:\n    clusters:\n    - name: cluster1\n      weight: 1\n  metadata_match:\n    filter_metadata:\n      envoy.lb:\n        k1: v1\n        k2: v2\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n\n  ProtobufWkt::Value v1, v2;\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  HashedValue hv1(v1), hv2(v2);\n\n  NiceMock<Network::MockConnection> connection;\n  const auto route = config_obj.getRouteFromEntries(connection);\n  EXPECT_NE(nullptr, route);\n\n  EXPECT_EQ(\"cluster1\", route->clusterName());\n\n  const auto* criteria = route->metadataMatchCriteria();\n  EXPECT_NE(nullptr, criteria);\n\n  const auto& criterions = criteria->metadataMatchCriteria();\n  EXPECT_EQ(2, criterions.size());\n\n  EXPECT_EQ(\"k1\", criterions[0]->name());\n  EXPECT_EQ(hv1, criterions[0]->value());\n\n  EXPECT_EQ(\"k2\", criterions[1]->name());\n  EXPECT_EQ(hv2, criterions[1]->value());\n}\n\n// Tests that it is possible to define the top-level endpoint selector.\nTEST(ConfigTest, TopLevelMetadataMatchConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: foo\n  metadata_match:\n    filter_metadata:\n      envoy.lb:\n        k1: v1\n        k2: v2\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n\n  ProtobufWkt::Value v1, v2;\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  HashedValue hv1(v1), hv2(v2);\n\n  const auto* criteria = config_obj.metadataMatchCriteria();\n  EXPECT_NE(nullptr, criteria);\n\n  const auto& criterions = criteria->metadataMatchCriteria();\n  EXPECT_EQ(2, criterions.size());\n\n  EXPECT_EQ(\"k1\", criterions[0]->name());\n  EXPECT_EQ(hv1, criterions[0]->value());\n\n  EXPECT_EQ(\"k2\", criterions[1]->name());\n  EXPECT_EQ(hv2, criterions[1]->value());\n}\n\n// Tests that a regular cluster gets the top-level endpoint selector.\nTEST(ConfigTest, ClusterWithTopLevelMetadataMatchConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: foo\n  metadata_match:\n    filter_metadata:\n      envoy.lb:\n        k1: v1\n        k2: v2\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n\n  ProtobufWkt::Value v1, v2;\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  HashedValue hv1(v1), hv2(v2);\n\n  NiceMock<Network::MockConnection> connection;\n  const auto route = config_obj.getRouteFromEntries(connection);\n  EXPECT_NE(nullptr, route);\n\n  EXPECT_EQ(\"foo\", route->clusterName());\n\n  const auto* criteria = route->metadataMatchCriteria();\n  EXPECT_NE(nullptr, criteria);\n\n  const auto& criterions = criteria->metadataMatchCriteria();\n  EXPECT_EQ(2, criterions.size());\n\n  EXPECT_EQ(\"k1\", criterions[0]->name());\n  EXPECT_EQ(hv1, criterions[0]->value());\n\n  EXPECT_EQ(\"k2\", criterions[1]->name());\n  EXPECT_EQ(hv2, criterions[1]->value());\n}\n\n// Tests that a per connection cluster gets the top-level endpoint selector.\nTEST(ConfigTest, PerConnectionClusterWithTopLevelMetadataMatchConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: foo\n  metadata_match:\n    filter_metadata:\n      envoy.lb:\n        k1: v1\n        k2: v2\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n\n  ProtobufWkt::Value v1, v2;\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  HashedValue hv1(v1), hv2(v2);\n\n  NiceMock<Network::MockConnection> connection;\n  connection.stream_info_.filterState()->setData(\n      \"envoy.tcp_proxy.cluster\", std::make_unique<PerConnectionCluster>(\"filter_state_cluster\"),\n      StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Connection);\n\n  const auto route = config_obj.getRouteFromEntries(connection);\n  EXPECT_NE(nullptr, route);\n\n  EXPECT_EQ(\"filter_state_cluster\", route->clusterName());\n\n  const auto* criteria = route->metadataMatchCriteria();\n  EXPECT_NE(nullptr, criteria);\n\n  const auto& criterions = criteria->metadataMatchCriteria();\n  EXPECT_EQ(2, criterions.size());\n\n  EXPECT_EQ(\"k1\", criterions[0]->name());\n  EXPECT_EQ(hv1, criterions[0]->value());\n\n  EXPECT_EQ(\"k2\", criterions[1]->name());\n  EXPECT_EQ(hv2, criterions[1]->value());\n}\n\nTEST(ConfigTest, HashWithSourceIpConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: foo\n  hash_policy:\n  - source_ip: {}\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n  EXPECT_NE(nullptr, config_obj.hashPolicy());\n}\n\nTEST(ConfigTest, HashWithSourceIpDefaultConfig) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  cluster: foo\n)EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  Config config_obj(constructConfigFromV3Yaml(yaml, factory_context));\n  EXPECT_EQ(nullptr, config_obj.hashPolicy());\n}\n\nTEST(ConfigTest, AccessLogConfig) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config;\n  envoy::config::accesslog::v3::AccessLog* log = config.mutable_access_log()->Add();\n  log->set_name(Extensions::AccessLoggers::AccessLogNames::get().File);\n  {\n    envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log;\n    file_access_log.set_path(\"some_path\");\n    file_access_log.mutable_log_format()->set_text_format(\"the format specifier\");\n    log->mutable_typed_config()->PackFrom(file_access_log);\n  }\n\n  log = config.mutable_access_log()->Add();\n  log->set_name(Extensions::AccessLoggers::AccessLogNames::get().File);\n  {\n    envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log;\n    file_access_log.set_path(\"another path\");\n    log->mutable_typed_config()->PackFrom(file_access_log);\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Config config_obj(config, factory_context_);\n\n  EXPECT_EQ(2, config_obj.accessLogs().size());\n}\n\nclass TcpProxyTest : public testing::Test {\npublic:\n  TcpProxyTest() {\n    ON_CALL(*factory_context_.access_log_manager_.file_, write(_))\n        .WillByDefault(SaveArg<0>(&access_log_data_));\n    ON_CALL(filter_callbacks_.connection_.stream_info_, onUpstreamHostSelected(_))\n        .WillByDefault(Invoke(\n            [this](Upstream::HostDescriptionConstSharedPtr host) { upstream_host_ = host; }));\n    ON_CALL(filter_callbacks_.connection_.stream_info_, upstreamHost())\n        .WillByDefault(ReturnPointee(&upstream_host_));\n  }\n\n  ~TcpProxyTest() override {\n    if (filter_ != nullptr) {\n      filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n    }\n  }\n\n  void configure(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config) {\n    config_ = std::make_shared<Config>(config, factory_context_);\n  }\n\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy defaultConfig() {\n    envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config;\n    config.set_stat_prefix(\"name\");\n    auto* route = config.mutable_hidden_envoy_deprecated_deprecated_v1()->mutable_routes()->Add();\n    route->set_cluster(\"fake_cluster\");\n\n    return config;\n  }\n\n  // Return the default config, plus one file access log with the specified format\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy\n  accessLogConfig(const std::string& access_log_format) {\n    envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n    envoy::config::accesslog::v3::AccessLog* access_log = config.mutable_access_log()->Add();\n    access_log->set_name(Extensions::AccessLoggers::AccessLogNames::get().File);\n    envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log;\n    file_access_log.set_path(\"unused\");\n    file_access_log.mutable_log_format()->set_text_format(access_log_format);\n    access_log->mutable_typed_config()->PackFrom(file_access_log);\n    return config;\n  }\n\n  void setup(uint32_t connections,\n             const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config) {\n    configure(config);\n    upstream_local_address_ = Network::Utility::resolveUrl(\"tcp://2.2.2.2:50000\");\n    upstream_remote_address_ = Network::Utility::resolveUrl(\"tcp://127.0.0.1:80\");\n    for (uint32_t i = 0; i < connections; i++) {\n      upstream_connections_.push_back(std::make_unique<NiceMock<Network::MockClientConnection>>());\n      upstream_connection_data_.push_back(\n          std::make_unique<NiceMock<Tcp::ConnectionPool::MockConnectionData>>());\n      ON_CALL(*upstream_connection_data_.back(), connection())\n          .WillByDefault(ReturnRef(*upstream_connections_.back()));\n      upstream_hosts_.push_back(std::make_shared<NiceMock<Upstream::MockHost>>());\n      conn_pool_handles_.push_back(\n          std::make_unique<NiceMock<Envoy::ConnectionPool::MockCancellable>>());\n\n      ON_CALL(*upstream_hosts_.at(i), cluster())\n          .WillByDefault(ReturnPointee(\n              factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_));\n      ON_CALL(*upstream_hosts_.at(i), address()).WillByDefault(Return(upstream_remote_address_));\n      upstream_connections_.at(i)->local_address_ = upstream_local_address_;\n      EXPECT_CALL(*upstream_connections_.at(i), dispatcher())\n          .WillRepeatedly(ReturnRef(filter_callbacks_.connection_.dispatcher_));\n    }\n\n    {\n      testing::InSequence sequence;\n      for (uint32_t i = 0; i < connections; i++) {\n        EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(\"fake_cluster\", _, _))\n            .WillOnce(Return(&conn_pool_))\n            .RetiresOnSaturation();\n        EXPECT_CALL(conn_pool_, newConnection(_))\n            .WillOnce(Invoke(\n                [=](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* {\n                  conn_pool_callbacks_.push_back(&cb);\n\n                  return onNewConnection(conn_pool_handles_.at(i).get());\n                }))\n            .RetiresOnSaturation();\n      }\n      EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(\"fake_cluster\", _, _))\n          .WillRepeatedly(Return(nullptr));\n    }\n\n    {\n      filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n      EXPECT_CALL(filter_callbacks_.connection_, enableHalfClose(true));\n      EXPECT_CALL(filter_callbacks_.connection_, readDisable(true));\n      filter_->initializeReadFilterCallbacks(filter_callbacks_);\n      filter_callbacks_.connection_.streamInfo().setDownstreamSslConnection(\n          filter_callbacks_.connection_.ssl());\n      filter_callbacks_.connection_.streamInfo().setDownstreamLocalAddress(\n          filter_callbacks_.connection_.localAddress());\n      filter_callbacks_.connection_.streamInfo().setDownstreamRemoteAddress(\n          filter_callbacks_.connection_.remoteAddress());\n      EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n\n      EXPECT_EQ(absl::optional<uint64_t>(), filter_->computeHashKey());\n      EXPECT_EQ(&filter_callbacks_.connection_, filter_->downstreamConnection());\n      EXPECT_EQ(nullptr, filter_->metadataMatchCriteria());\n    }\n  }\n\n  void setup(uint32_t connections) { setup(connections, defaultConfig()); }\n\n  void raiseEventUpstreamConnected(uint32_t conn_index) {\n    EXPECT_CALL(filter_callbacks_.connection_, readDisable(false));\n    EXPECT_CALL(*upstream_connection_data_.at(conn_index), addUpstreamCallbacks(_))\n        .WillOnce(Invoke([=](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void {\n          upstream_callbacks_ = &cb;\n\n          // Simulate TCP conn pool upstream callbacks. This is safe because the TCP proxy never\n          // releases a connection so all events go to the same UpstreamCallbacks instance.\n          upstream_connections_.at(conn_index)->addConnectionCallbacks(cb);\n        }));\n    EXPECT_CALL(*upstream_connections_.at(conn_index), enableHalfClose(true));\n    conn_pool_callbacks_.at(conn_index)\n        ->onPoolReady(std::move(upstream_connection_data_.at(conn_index)),\n                      upstream_hosts_.at(conn_index));\n  }\n\n  void raiseEventUpstreamConnectFailed(uint32_t conn_index,\n                                       ConnectionPool::PoolFailureReason reason) {\n    conn_pool_callbacks_.at(conn_index)->onPoolFailure(reason, upstream_hosts_.at(conn_index));\n  }\n\n  Tcp::ConnectionPool::Cancellable* onNewConnection(Tcp::ConnectionPool::Cancellable* connection) {\n    if (!new_connection_functions_.empty()) {\n      auto fn = new_connection_functions_.front();\n      new_connection_functions_.pop_front();\n      return fn(connection);\n    }\n    return connection;\n  }\n\n  Event::TestTimeSystem& timeSystem() { return factory_context_.timeSystem(); }\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  ConfigSharedPtr config_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  std::unique_ptr<Filter> filter_;\n  std::vector<std::shared_ptr<NiceMock<Upstream::MockHost>>> upstream_hosts_{};\n  std::vector<std::unique_ptr<NiceMock<Network::MockClientConnection>>> upstream_connections_{};\n  std::vector<std::unique_ptr<NiceMock<Tcp::ConnectionPool::MockConnectionData>>>\n      upstream_connection_data_{};\n  std::vector<Tcp::ConnectionPool::Callbacks*> conn_pool_callbacks_;\n  std::vector<std::unique_ptr<NiceMock<Envoy::ConnectionPool::MockCancellable>>> conn_pool_handles_;\n  NiceMock<Tcp::ConnectionPool::MockInstance> conn_pool_;\n  Tcp::ConnectionPool::UpstreamCallbacks* upstream_callbacks_;\n  StringViewSaver access_log_data_;\n  Network::Address::InstanceConstSharedPtr upstream_local_address_;\n  Network::Address::InstanceConstSharedPtr upstream_remote_address_;\n  std::list<std::function<Tcp::ConnectionPool::Cancellable*(Tcp::ConnectionPool::Cancellable*)>>\n      new_connection_functions_;\n  Upstream::HostDescriptionConstSharedPtr upstream_host_{};\n};\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(DefaultRoutes)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::WeightedCluster::ClusterWeight*\n      ignored_cluster = config.mutable_weighted_clusters()->mutable_clusters()->Add();\n  ignored_cluster->set_name(\"ignored_cluster\");\n  ignored_cluster->set_weight(10);\n\n  configure(config);\n\n  NiceMock<Network::MockConnection> connection;\n  EXPECT_EQ(std::string(\"fake_cluster\"), config_->getRouteFromEntries(connection)->clusterName());\n}\n\n// Tests that half-closes are proxied and don't themselves cause any connection to be closed.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(HalfCloseProxy)) {\n  setup(1);\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0);\n  EXPECT_CALL(*upstream_connections_.at(0), close(_)).Times(0);\n\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*upstream_connections_.at(0), write(BufferEqual(&buffer), true));\n  filter_->onData(buffer, true);\n\n  Buffer::OwnedImpl response(\"world\");\n  EXPECT_CALL(filter_callbacks_.connection_, write(BufferEqual(&response), true));\n  upstream_callbacks_->onUpstreamData(response, true);\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(_));\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that downstream is closed after an upstream LocalClose.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamLocalDisconnect)) {\n  setup(1);\n\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*upstream_connections_.at(0), write(BufferEqual(&buffer), false));\n  filter_->onData(buffer, false);\n\n  Buffer::OwnedImpl response(\"world\");\n  EXPECT_CALL(filter_callbacks_.connection_, write(BufferEqual(&response), _));\n  upstream_callbacks_->onUpstreamData(response, false);\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(_));\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n}\n\n// Test that downstream is closed after an upstream RemoteClose.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamRemoteDisconnect)) {\n  setup(1);\n\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*upstream_connections_.at(0), write(BufferEqual(&buffer), false));\n  filter_->onData(buffer, false);\n\n  Buffer::OwnedImpl response(\"world\");\n  EXPECT_CALL(filter_callbacks_.connection_, write(BufferEqual(&response), _));\n  upstream_callbacks_->onUpstreamData(response, false);\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that reconnect is attempted after a local connect failure\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(ConnectAttemptsUpstreamLocalFail)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_max_connect_attempts()->set_value(2);\n\n  setup(2, config);\n\n  raiseEventUpstreamConnectFailed(0, ConnectionPool::PoolFailureReason::LocalConnectionFailure);\n  raiseEventUpstreamConnected(1);\n\n  EXPECT_EQ(0U, factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_cx_connect_attempts_exceeded\")\n                    .value());\n}\n\n// Make sure that the tcp proxy code handles reentrant calls to onPoolFailure.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(ConnectAttemptsUpstreamLocalFailReentrant)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_max_connect_attempts()->set_value(2);\n\n  // Set up a call to onPoolFailure from inside the first newConnection call.\n  // This simulates a connection failure from under the stack of newStream.\n  new_connection_functions_.push_back(\n      [&](Tcp::ConnectionPool::Cancellable*) -> Tcp::ConnectionPool::Cancellable* {\n        raiseEventUpstreamConnectFailed(0,\n                                        ConnectionPool::PoolFailureReason::LocalConnectionFailure);\n        return nullptr;\n      });\n\n  setup(2, config);\n\n  // Make sure the last connection pool to be created is the one which gets the\n  // cancellation call.\n  EXPECT_CALL(*conn_pool_handles_.at(0), cancel(Tcp::ConnectionPool::CancelPolicy::CloseExcess))\n      .Times(0);\n  EXPECT_CALL(*conn_pool_handles_.at(1), cancel(Tcp::ConnectionPool::CancelPolicy::CloseExcess))\n      .Times(1);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that reconnect is attempted after a remote connect failure\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(ConnectAttemptsUpstreamRemoteFail)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_max_connect_attempts()->set_value(2);\n  setup(2, config);\n\n  raiseEventUpstreamConnectFailed(0, ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n  raiseEventUpstreamConnected(1);\n\n  EXPECT_EQ(0U, factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_cx_connect_attempts_exceeded\")\n                    .value());\n}\n\n// Test that reconnect is attempted after a connect timeout\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(ConnectAttemptsUpstreamTimeout)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_max_connect_attempts()->set_value(2);\n  setup(2, config);\n\n  raiseEventUpstreamConnectFailed(0, ConnectionPool::PoolFailureReason::Timeout);\n  raiseEventUpstreamConnected(1);\n\n  EXPECT_EQ(0U, factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"upstream_cx_connect_attempts_exceeded\")\n                    .value());\n}\n\n// Test that only the configured number of connect attempts occur\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(ConnectAttemptsLimit)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config =\n      accessLogConfig(\"%RESPONSE_FLAGS%\");\n  config.mutable_max_connect_attempts()->set_value(3);\n  setup(3, config);\n\n  EXPECT_CALL(upstream_hosts_.at(0)->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  EXPECT_CALL(upstream_hosts_.at(1)->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  EXPECT_CALL(upstream_hosts_.at(2)->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  // Try both failure modes\n  raiseEventUpstreamConnectFailed(0, ConnectionPool::PoolFailureReason::Timeout);\n  raiseEventUpstreamConnectFailed(1, ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n  raiseEventUpstreamConnectFailed(2, ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"UF,URX\");\n}\n\nTEST_F(TcpProxyTest, ConnectedNoOp) {\n  setup(1);\n  raiseEventUpstreamConnected(0);\n\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::Connected);\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that the tcp proxy sends the correct notifications to the outlier detector\nTEST_F(TcpProxyTest, OutlierDetection) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_max_connect_attempts()->set_value(3);\n  setup(3, config);\n\n  EXPECT_CALL(upstream_hosts_.at(0)->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  raiseEventUpstreamConnectFailed(0, ConnectionPool::PoolFailureReason::Timeout);\n\n  EXPECT_CALL(upstream_hosts_.at(1)->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  raiseEventUpstreamConnectFailed(1, ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n\n  EXPECT_CALL(upstream_hosts_.at(2)->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectSuccessFinal, _));\n  raiseEventUpstreamConnected(2);\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamDisconnectDownstreamFlowControl)) {\n  setup(1);\n\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*upstream_connections_.at(0), write(BufferEqual(&buffer), _));\n  filter_->onData(buffer, false);\n\n  Buffer::OwnedImpl response(\"world\");\n  EXPECT_CALL(filter_callbacks_.connection_, write(BufferEqual(&response), _));\n  upstream_callbacks_->onUpstreamData(response, false);\n\n  EXPECT_CALL(*upstream_connections_.at(0), readDisable(true));\n  filter_callbacks_.connection_.runHighWatermarkCallbacks();\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose);\n\n  filter_callbacks_.connection_.runLowWatermarkCallbacks();\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(DownstreamDisconnectRemote)) {\n  setup(1);\n\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*upstream_connections_.at(0), write(BufferEqual(&buffer), _));\n  filter_->onData(buffer, false);\n\n  Buffer::OwnedImpl response(\"world\");\n  EXPECT_CALL(filter_callbacks_.connection_, write(BufferEqual(&response), _));\n  upstream_callbacks_->onUpstreamData(response, false);\n\n  EXPECT_CALL(*upstream_connections_.at(0), close(Network::ConnectionCloseType::FlushWrite));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(DownstreamDisconnectLocal)) {\n  setup(1);\n\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*upstream_connections_.at(0), write(BufferEqual(&buffer), _));\n  filter_->onData(buffer, false);\n\n  Buffer::OwnedImpl response(\"world\");\n  EXPECT_CALL(filter_callbacks_.connection_, write(BufferEqual(&response), _));\n  upstream_callbacks_->onUpstreamData(response, false);\n\n  EXPECT_CALL(*upstream_connections_.at(0), close(Network::ConnectionCloseType::NoFlush));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamConnectTimeout)) {\n  setup(1, accessLogConfig(\"%RESPONSE_FLAGS%\"));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  raiseEventUpstreamConnectFailed(0, ConnectionPool::PoolFailureReason::Timeout);\n\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"UF,URX\");\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(NoHost)) {\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  setup(0, accessLogConfig(\"%RESPONSE_FLAGS%\"));\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"UH\");\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(RouteWithMetadataMatch)) {\n  auto v1 = ProtobufWkt::Value();\n  v1.set_string_value(\"v1\");\n  auto v2 = ProtobufWkt::Value();\n  v2.set_number_value(2.0);\n  auto v3 = ProtobufWkt::Value();\n  v3.set_bool_value(true);\n\n  std::vector<Router::MetadataMatchCriterionImpl> criteria = {{\"a\", v1}, {\"b\", v2}, {\"c\", v3}};\n\n  auto metadata_struct = ProtobufWkt::Struct();\n  auto mutable_fields = metadata_struct.mutable_fields();\n\n  for (const auto& criterion : criteria) {\n    mutable_fields->insert({criterion.name(), criterion.value().value()});\n  }\n\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_metadata_match()->mutable_filter_metadata()->insert(\n      {Envoy::Config::MetadataFilters::get().ENVOY_LB, metadata_struct});\n\n  configure(config);\n  filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n  filter_->initializeReadFilterCallbacks(filter_callbacks_);\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n\n  const auto effective_criteria = filter_->metadataMatchCriteria();\n  EXPECT_NE(nullptr, effective_criteria);\n\n  const auto& effective_criterions = effective_criteria->metadataMatchCriteria();\n  EXPECT_EQ(effective_criterions.size(), criteria.size());\n  for (size_t i = 0; i < criteria.size(); ++i) {\n    EXPECT_EQ(effective_criterions[i]->name(), criteria[i].name());\n    EXPECT_EQ(effective_criterions[i]->value(), criteria[i].value());\n  }\n}\n\n// Tests that the endpoint selector of a weighted cluster gets included into the\n// LoadBalancerContext.\nTEST_F(TcpProxyTest, WeightedClusterWithMetadataMatch) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  weighted_clusters:\n    clusters:\n    - name: cluster1\n      weight: 1\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k1: v1\n    - name: cluster2\n      weight: 2\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k2: v2\n  metadata_match:\n    filter_metadata:\n      envoy.lb:\n        k0: v0\n)EOF\";\n\n  config_ = std::make_shared<Config>(constructConfigFromYaml(yaml, factory_context_));\n\n  ProtobufWkt::Value v0, v1, v2;\n  v0.set_string_value(\"v0\");\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  HashedValue hv0(v0), hv1(v1), hv2(v2);\n\n  filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n  filter_->initializeReadFilterCallbacks(filter_callbacks_);\n\n  // Expect filter to try to open a connection to cluster1.\n  {\n    Upstream::LoadBalancerContext* context;\n\n    EXPECT_CALL(factory_context_.api_.random_, random()).WillOnce(Return(0));\n    EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(\"cluster1\", _, _))\n        .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr)));\n    EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n\n    EXPECT_NE(nullptr, context);\n\n    const auto effective_criteria = context->metadataMatchCriteria();\n    EXPECT_NE(nullptr, effective_criteria);\n\n    const auto& effective_criterions = effective_criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, effective_criterions.size());\n\n    EXPECT_EQ(\"k0\", effective_criterions[0]->name());\n    EXPECT_EQ(hv0, effective_criterions[0]->value());\n\n    EXPECT_EQ(\"k1\", effective_criterions[1]->name());\n    EXPECT_EQ(hv1, effective_criterions[1]->value());\n  }\n\n  // Expect filter to try to open a connection to cluster2.\n  {\n    Upstream::LoadBalancerContext* context;\n\n    EXPECT_CALL(factory_context_.api_.random_, random()).WillOnce(Return(2));\n    EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(\"cluster2\", _, _))\n        .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr)));\n    EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n\n    EXPECT_NE(nullptr, context);\n\n    const auto effective_criteria = context->metadataMatchCriteria();\n    EXPECT_NE(nullptr, effective_criteria);\n\n    const auto& effective_criterions = effective_criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, effective_criterions.size());\n\n    EXPECT_EQ(\"k0\", effective_criterions[0]->name());\n    EXPECT_EQ(hv0, effective_criterions[0]->value());\n\n    EXPECT_EQ(\"k2\", effective_criterions[1]->name());\n    EXPECT_EQ(hv2, effective_criterions[1]->value());\n  }\n}\n\n// Test that metadata match criteria provided on the StreamInfo is used.\nTEST_F(TcpProxyTest, StreamInfoDynamicMetadata) {\n  configure(defaultConfig());\n\n  ProtobufWkt::Value val;\n  val.set_string_value(\"val\");\n\n  envoy::config::core::v3::Metadata metadata;\n  ProtobufWkt::Struct& map =\n      (*metadata.mutable_filter_metadata())[Envoy::Config::MetadataFilters::get().ENVOY_LB];\n  (*map.mutable_fields())[\"test\"] = val;\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, dynamicMetadata())\n      .WillOnce(ReturnRef(metadata));\n\n  filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n  filter_->initializeReadFilterCallbacks(filter_callbacks_);\n\n  Upstream::LoadBalancerContext* context;\n\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(_, _, _))\n      .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr)));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n\n  EXPECT_NE(nullptr, context);\n\n  const auto effective_criteria = context->metadataMatchCriteria();\n  EXPECT_NE(nullptr, effective_criteria);\n\n  const auto& effective_criterions = effective_criteria->metadataMatchCriteria();\n  EXPECT_EQ(1, effective_criterions.size());\n\n  EXPECT_EQ(\"test\", effective_criterions[0]->name());\n  EXPECT_EQ(HashedValue(val), effective_criterions[0]->value());\n}\n\n// Test that if both streamInfo and configuration add metadata match criteria, they\n// are merged.\nTEST_F(TcpProxyTest, StreamInfoDynamicMetadataAndConfigMerged) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: name\n  weighted_clusters:\n    clusters:\n    - name: cluster1\n      weight: 1\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k0: v0\n            k1: from_config\n)EOF\";\n\n  config_ = std::make_shared<Config>(constructConfigFromYaml(yaml, factory_context_));\n\n  ProtobufWkt::Value v0, v1, v2;\n  v0.set_string_value(\"v0\");\n  v1.set_string_value(\"from_streaminfo\"); // 'v1' is overridden with this value by streamInfo.\n  v2.set_string_value(\"v2\");\n  HashedValue hv0(v0), hv1(v1), hv2(v2);\n\n  envoy::config::core::v3::Metadata metadata;\n  ProtobufWkt::Struct& map =\n      (*metadata.mutable_filter_metadata())[Envoy::Config::MetadataFilters::get().ENVOY_LB];\n  (*map.mutable_fields())[\"k1\"] = v1;\n  (*map.mutable_fields())[\"k2\"] = v2;\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, dynamicMetadata())\n      .WillOnce(ReturnRef(metadata));\n\n  filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n  filter_->initializeReadFilterCallbacks(filter_callbacks_);\n\n  Upstream::LoadBalancerContext* context;\n\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(_, _, _))\n      .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr)));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n\n  EXPECT_NE(nullptr, context);\n\n  const auto effective_criteria = context->metadataMatchCriteria();\n  EXPECT_NE(nullptr, effective_criteria);\n\n  const auto& effective_criterions = effective_criteria->metadataMatchCriteria();\n  EXPECT_EQ(3, effective_criterions.size());\n\n  EXPECT_EQ(\"k0\", effective_criterions[0]->name());\n  EXPECT_EQ(hv0, effective_criterions[0]->value());\n\n  EXPECT_EQ(\"k1\", effective_criterions[1]->name());\n  EXPECT_EQ(hv1, effective_criterions[1]->value());\n\n  EXPECT_EQ(\"k2\", effective_criterions[2]->name());\n  EXPECT_EQ(hv2, effective_criterions[2]->value());\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(DisconnectBeforeData)) {\n  configure(defaultConfig());\n  filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n  filter_->initializeReadFilterCallbacks(filter_callbacks_);\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that if the downstream connection is closed before the upstream connection\n// is established, the upstream connection is cancelled.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(RemoteClosedBeforeUpstreamConnected)) {\n  setup(1);\n  EXPECT_CALL(*conn_pool_handles_.at(0), cancel(Tcp::ConnectionPool::CancelPolicy::CloseExcess));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that if the downstream connection is closed before the upstream connection\n// is established, the upstream connection is cancelled.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(LocalClosetBeforeUpstreamConnected)) {\n  setup(1);\n  EXPECT_CALL(*conn_pool_handles_.at(0), cancel(Tcp::ConnectionPool::CancelPolicy::CloseExcess));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamConnectFailure)) {\n  setup(1, accessLogConfig(\"%RESPONSE_FLAGS%\"));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  raiseEventUpstreamConnectFailed(0, ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"UF,URX\");\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamConnectionLimit)) {\n  configure(accessLogConfig(\"%RESPONSE_FLAGS%\"));\n  factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->resetResourceManager(\n      0, 0, 0, 0, 0);\n\n  // setup sets up expectation for tcpConnForCluster but this test is expected to NOT call that\n  filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n  // The downstream connection closes if the proxy can't make an upstream connection.\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  filter_->initializeReadFilterCallbacks(filter_callbacks_);\n  filter_->onNewConnection();\n\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"UO\");\n}\n\n// Tests that the idle timer closes both connections, and gets updated when either\n// connection has activity.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(IdleTimeout)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_idle_timeout()->set_seconds(1);\n  setup(1, config);\n\n  Event::MockTimer* idle_timer = new Event::MockTimer(&filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  filter_->onData(buffer, false);\n\n  buffer.add(\"hello2\");\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  upstream_callbacks_->onUpstreamData(buffer, false);\n\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  filter_callbacks_.connection_.raiseBytesSentCallbacks(1);\n\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  upstream_connections_.at(0)->raiseBytesSentCallbacks(2);\n\n  EXPECT_CALL(*upstream_connections_.at(0), close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*idle_timer, disableTimer());\n  idle_timer->invokeCallback();\n}\n\n// Tests that the idle timer is disabled when the downstream connection is closed.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(IdleTimerDisabledDownstreamClose)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_idle_timeout()->set_seconds(1);\n  setup(1, config);\n\n  Event::MockTimer* idle_timer = new Event::MockTimer(&filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  raiseEventUpstreamConnected(0);\n\n  EXPECT_CALL(*idle_timer, disableTimer());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Tests that the idle timer is disabled when the upstream connection is closed.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(IdleTimerDisabledUpstreamClose)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_idle_timeout()->set_seconds(1);\n  setup(1, config);\n\n  Event::MockTimer* idle_timer = new Event::MockTimer(&filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  raiseEventUpstreamConnected(0);\n\n  EXPECT_CALL(*idle_timer, disableTimer());\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Tests that flushing data during an idle timeout doesn't cause problems.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(IdleTimeoutWithOutstandingDataFlushed)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_idle_timeout()->set_seconds(1);\n  setup(1, config);\n\n  Event::MockTimer* idle_timer = new Event::MockTimer(&filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  raiseEventUpstreamConnected(0);\n\n  Buffer::OwnedImpl buffer(\"hello\");\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  filter_->onData(buffer, false);\n\n  buffer.add(\"hello2\");\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  upstream_callbacks_->onUpstreamData(buffer, false);\n\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  filter_callbacks_.connection_.raiseBytesSentCallbacks(1);\n\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  upstream_connections_.at(0)->raiseBytesSentCallbacks(2);\n\n  // Mark the upstream connection as blocked.\n  // This should read-disable the downstream connection.\n  EXPECT_CALL(filter_callbacks_.connection_, readDisable(_));\n  upstream_connections_.at(0)->runHighWatermarkCallbacks();\n\n  // When Envoy has an idle timeout, the following happens.\n  // Envoy closes the downstream connection\n  // Envoy closes the upstream connection.\n  // When closing the upstream connection with ConnectionCloseType::NoFlush,\n  // if there is data in the buffer, Envoy does a best-effort flush.\n  // If the write succeeds, Envoy may go under the flow control limit and start\n  // the callbacks to read-enable the already-closed downstream connection.\n  //\n  // In this case we expect readDisable to not be called on the already closed\n  // connection.\n  EXPECT_CALL(filter_callbacks_.connection_, readDisable(true)).Times(0);\n  EXPECT_CALL(*upstream_connections_.at(0), close(Network::ConnectionCloseType::NoFlush))\n      .WillOnce(InvokeWithoutArgs(\n          [&]() -> void { upstream_connections_.at(0)->runLowWatermarkCallbacks(); }));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*idle_timer, disableTimer());\n  idle_timer->invokeCallback();\n}\n\n// Test that access log fields %UPSTREAM_HOST% and %UPSTREAM_CLUSTER% are correctly logged.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogUpstreamHost)) {\n  setup(1, accessLogConfig(\"%UPSTREAM_HOST% %UPSTREAM_CLUSTER%\"));\n  raiseEventUpstreamConnected(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"127.0.0.1:80 fake_cluster\");\n}\n\n// Test that access log field %UPSTREAM_LOCAL_ADDRESS% is correctly logged.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogUpstreamLocalAddress)) {\n  setup(1, accessLogConfig(\"%UPSTREAM_LOCAL_ADDRESS%\"));\n  raiseEventUpstreamConnected(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"2.2.2.2:50000\");\n}\n\n// Test that access log fields %DOWNSTREAM_PEER_URI_SAN% is correctly logged.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogPeerUriSan)) {\n  filter_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.1.1.2:20000\");\n  filter_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.1.1.1:40000\");\n\n  const std::vector<std::string> uriSan{\"someSan\"};\n  auto mockConnectionInfo = std::make_shared<Ssl::MockConnectionInfo>();\n  EXPECT_CALL(*mockConnectionInfo, uriSanPeerCertificate()).WillOnce(Return(uriSan));\n  EXPECT_CALL(filter_callbacks_.connection_, ssl()).WillRepeatedly(Return(mockConnectionInfo));\n\n  setup(1, accessLogConfig(\"%DOWNSTREAM_PEER_URI_SAN%\"));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"someSan\");\n}\n\n// Test that access log fields %DOWNSTREAM_TLS_SESSION_ID% is correctly logged.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogTlsSessionId)) {\n  filter_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.1.1.2:20000\");\n  filter_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.1.1.1:40000\");\n\n  const std::string tlsSessionId{\n      \"D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B\"};\n  auto mockConnectionInfo = std::make_shared<Ssl::MockConnectionInfo>();\n  EXPECT_CALL(*mockConnectionInfo, sessionId()).WillOnce(ReturnRef(tlsSessionId));\n  EXPECT_CALL(filter_callbacks_.connection_, ssl()).WillRepeatedly(Return(mockConnectionInfo));\n\n  setup(1, accessLogConfig(\"%DOWNSTREAM_TLS_SESSION_ID%\"));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B\");\n}\n\n// Test that access log fields %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% and\n// %DOWNSTREAM_LOCAL_ADDRESS% are correctly logged.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogDownstreamAddress)) {\n  filter_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.1.1.2:20000\");\n  filter_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.1.1.1:40000\");\n  setup(1, accessLogConfig(\"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% %DOWNSTREAM_LOCAL_ADDRESS%\"));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  filter_.reset();\n  EXPECT_EQ(access_log_data_, \"1.1.1.1 1.1.1.2:20000\");\n}\n\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogUpstreamSSLConnection)) {\n  setup(1);\n\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  const std::string session_id = \"D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B\";\n  auto ssl_info = std::make_shared<Ssl::MockConnectionInfo>();\n  EXPECT_CALL(*ssl_info, sessionId()).WillRepeatedly(ReturnRef(session_id));\n  stream_info.setDownstreamSslConnection(ssl_info);\n  EXPECT_CALL(*upstream_connections_.at(0), streamInfo()).WillRepeatedly(ReturnRef(stream_info));\n\n  raiseEventUpstreamConnected(0);\n  ASSERT_NE(nullptr, filter_->getStreamInfo().upstreamSslConnection());\n  EXPECT_EQ(session_id, filter_->getStreamInfo().upstreamSslConnection()->sessionId());\n}\n\n// Tests that upstream flush works properly with no idle timeout configured.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamFlushNoTimeout)) {\n  setup(1);\n  raiseEventUpstreamConnected(0);\n\n  EXPECT_CALL(*upstream_connections_.at(0),\n              close(Network::ConnectionCloseType::FlushWrite))\n      .WillOnce(Return()); // Cancel default action of raising LocalClose\n  EXPECT_CALL(*upstream_connections_.at(0), state())\n      .WillOnce(Return(Network::Connection::State::Closing));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  filter_.reset();\n\n  EXPECT_EQ(1U, config_->stats().upstream_flush_active_.value());\n\n  // Send some bytes; no timeout configured so this should be a no-op (not a crash).\n  upstream_connections_.at(0)->raiseBytesSentCallbacks(1);\n\n  // Simulate flush complete.\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n  EXPECT_EQ(1U, config_->stats().upstream_flush_total_.value());\n  EXPECT_EQ(0U, config_->stats().upstream_flush_active_.value());\n}\n\n// Tests that upstream flush works with an idle timeout configured, but the connection\n// finishes draining before the timer expires.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamFlushTimeoutConfigured)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_idle_timeout()->set_seconds(1);\n  setup(1, config);\n\n  NiceMock<Event::MockTimer>* idle_timer =\n      new NiceMock<Event::MockTimer>(&filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*idle_timer, enableTimer(_, _));\n  raiseEventUpstreamConnected(0);\n\n  EXPECT_CALL(*upstream_connections_.at(0),\n              close(Network::ConnectionCloseType::FlushWrite))\n      .WillOnce(Return()); // Cancel default action of raising LocalClose\n  EXPECT_CALL(*upstream_connections_.at(0), state())\n      .WillOnce(Return(Network::Connection::State::Closing));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  filter_.reset();\n  EXPECT_EQ(1U, config_->stats().upstream_flush_active_.value());\n\n  EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  upstream_connections_.at(0)->raiseBytesSentCallbacks(1);\n\n  // Simulate flush complete.\n  EXPECT_CALL(*idle_timer, disableTimer());\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n  EXPECT_EQ(1U, config_->stats().upstream_flush_total_.value());\n  EXPECT_EQ(0U, config_->stats().upstream_flush_active_.value());\n  EXPECT_EQ(0U, config_->stats().idle_timeout_.value());\n}\n\n// Tests that upstream flush closes the connection when the idle timeout fires.\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamFlushTimeoutExpired)) {\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig();\n  config.mutable_idle_timeout()->set_seconds(1);\n  setup(1, config);\n\n  NiceMock<Event::MockTimer>* idle_timer =\n      new NiceMock<Event::MockTimer>(&filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*idle_timer, enableTimer(_, _));\n  raiseEventUpstreamConnected(0);\n\n  EXPECT_CALL(*upstream_connections_.at(0),\n              close(Network::ConnectionCloseType::FlushWrite))\n      .WillOnce(Return()); // Cancel default action of raising LocalClose\n  EXPECT_CALL(*upstream_connections_.at(0), state())\n      .WillOnce(Return(Network::Connection::State::Closing));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  filter_.reset();\n  EXPECT_EQ(1U, config_->stats().upstream_flush_active_.value());\n\n  EXPECT_CALL(*upstream_connections_.at(0), close(Network::ConnectionCloseType::NoFlush));\n  idle_timer->invokeCallback();\n  EXPECT_EQ(1U, config_->stats().upstream_flush_total_.value());\n  EXPECT_EQ(0U, config_->stats().upstream_flush_active_.value());\n  EXPECT_EQ(1U, config_->stats().idle_timeout_.value());\n}\n\n// Tests that upstream flush will close a connection if it reads data from the upstream\n// connection after the downstream connection is closed (nowhere to send it).\nTEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(UpstreamFlushReceiveUpstreamData)) {\n  setup(1);\n  raiseEventUpstreamConnected(0);\n\n  EXPECT_CALL(*upstream_connections_.at(0),\n              close(Network::ConnectionCloseType::FlushWrite))\n      .WillOnce(Return()); // Cancel default action of raising LocalClose\n  EXPECT_CALL(*upstream_connections_.at(0), state())\n      .WillOnce(Return(Network::Connection::State::Closing));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  filter_.reset();\n\n  EXPECT_EQ(1U, config_->stats().upstream_flush_active_.value());\n\n  // Send some bytes; no timeout configured so this should be a no-op (not a crash).\n  Buffer::OwnedImpl buffer(\"a\");\n  EXPECT_CALL(*upstream_connections_.at(0), close(Network::ConnectionCloseType::NoFlush));\n  upstream_callbacks_->onUpstreamData(buffer, false);\n}\n\n// Tests that downstream connection can access upstream connections filter state.\nTEST_F(TcpProxyTest, ShareFilterState) {\n  setup(1);\n\n  upstream_connections_.at(0)->streamInfo().filterState()->setData(\n      \"envoy.tcp_proxy.cluster\", std::make_unique<PerConnectionCluster>(\"filter_state_cluster\"),\n      StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Connection);\n  raiseEventUpstreamConnected(0);\n  EXPECT_EQ(\"filter_state_cluster\",\n            filter_callbacks_.connection_.streamInfo()\n                .upstreamFilterState()\n                ->getDataReadOnly<PerConnectionCluster>(\"envoy.tcp_proxy.cluster\")\n                .value());\n}\n\n// Tests that filter callback can access downstream and upstream address and ssl properties.\nTEST_F(TcpProxyTest, AccessDownstreamAndUpstreamProperties) {\n  setup(1);\n\n  raiseEventUpstreamConnected(0);\n  EXPECT_EQ(filter_callbacks_.connection().streamInfo().downstreamLocalAddress(),\n            filter_callbacks_.connection().localAddress());\n  EXPECT_EQ(filter_callbacks_.connection().streamInfo().downstreamRemoteAddress(),\n            filter_callbacks_.connection().remoteAddress());\n  EXPECT_EQ(filter_callbacks_.connection().streamInfo().downstreamSslConnection(),\n            filter_callbacks_.connection().ssl());\n  EXPECT_EQ(filter_callbacks_.connection().streamInfo().upstreamLocalAddress(),\n            upstream_connections_.at(0)->localAddress());\n  EXPECT_EQ(filter_callbacks_.connection().streamInfo().upstreamSslConnection(),\n            upstream_connections_.at(0)->streamInfo().downstreamSslConnection());\n}\n\nclass TcpProxyRoutingTest : public testing::Test {\npublic:\n  TcpProxyRoutingTest() = default;\n\n  void setup(bool avoid_boosting = true) {\n    const std::string yaml = R\"EOF(\n    stat_prefix: name\n    cluster: fallback_cluster\n    deprecated_v1:\n      routes:\n      - destination_ports: 1-9999\n        cluster: fake_cluster\n    )EOF\";\n\n    config_ =\n        std::make_shared<Config>(constructConfigFromYaml(yaml, factory_context_, avoid_boosting));\n  }\n\n  void initializeFilter() {\n    EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(ReturnRef(connection_));\n\n    filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n  }\n\n  Event::TestTimeSystem& timeSystem() { return factory_context_.timeSystem(); }\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  ConfigSharedPtr config_;\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  std::unique_ptr<Filter> filter_;\n};\n\nTEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(NonRoutableConnection)) {\n  setup(false);\n\n  const uint32_t total_cx = config_->stats().downstream_cx_total_.value();\n  const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value();\n\n  initializeFilter();\n\n  // Port 10000 is outside the specified destination port range.\n  connection_.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 10000);\n\n  // Expect filter to try to open a connection to the fallback cluster.\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(\"fallback_cluster\", _, _))\n      .WillOnce(Return(nullptr));\n\n  filter_->onNewConnection();\n\n  EXPECT_EQ(total_cx + 1, config_->stats().downstream_cx_total_.value());\n  EXPECT_EQ(non_routable_cx, config_->stats().downstream_cx_no_route_.value());\n}\n\nTEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(RoutableConnection)) {\n  setup(false);\n\n  const uint32_t total_cx = config_->stats().downstream_cx_total_.value();\n  const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value();\n\n  initializeFilter();\n\n  // Port 9999 is within the specified destination port range.\n  connection_.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 9999);\n\n  // Expect filter to try to open a connection to specified cluster.\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(\"fake_cluster\", _, _))\n      .WillOnce(Return(nullptr));\n\n  filter_->onNewConnection();\n\n  EXPECT_EQ(total_cx + 1, config_->stats().downstream_cx_total_.value());\n  EXPECT_EQ(non_routable_cx, config_->stats().downstream_cx_no_route_.value());\n}\n\n// Test that the tcp proxy uses the cluster from FilterState if set\nTEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UseClusterFromPerConnectionCluster)) {\n  setup(false);\n  initializeFilter();\n\n  connection_.streamInfo().filterState()->setData(\n      \"envoy.tcp_proxy.cluster\", std::make_unique<PerConnectionCluster>(\"filter_state_cluster\"),\n      StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Connection);\n\n  // Expect filter to try to open a connection to specified cluster.\n  EXPECT_CALL(factory_context_.cluster_manager_,\n              tcpConnPoolForCluster(\"filter_state_cluster\", _, _))\n      .WillOnce(Return(nullptr));\n\n  filter_->onNewConnection();\n}\n\n// Test that the tcp proxy forwards the requested server name from FilterState if set\nTEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UpstreamServerName)) {\n  setup(false);\n  initializeFilter();\n\n  connection_.streamInfo().filterState()->setData(\n      \"envoy.network.upstream_server_name\", std::make_unique<UpstreamServerName>(\"www.example.com\"),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection);\n\n  // Expect filter to try to open a connection to a cluster with the transport socket options with\n  // override-server-name\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(_, _, _))\n      .WillOnce(\n          Invoke([](const std::string& cluster, Upstream::ResourcePriority,\n                    Upstream::LoadBalancerContext* context) -> Tcp::ConnectionPool::Instance* {\n            EXPECT_EQ(cluster, \"fake_cluster\");\n            Network::TransportSocketOptionsSharedPtr transport_socket_options =\n                context->upstreamTransportSocketOptions();\n            EXPECT_NE(transport_socket_options, nullptr);\n            EXPECT_TRUE(transport_socket_options->serverNameOverride().has_value());\n            EXPECT_EQ(transport_socket_options->serverNameOverride().value(), \"www.example.com\");\n            return nullptr;\n          }));\n\n  // Port 9999 is within the specified destination port range.\n  connection_.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 9999);\n\n  filter_->onNewConnection();\n}\n\n// Test that the tcp proxy override ALPN from FilterState if set\nTEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(ApplicationProtocols)) {\n  setup(false);\n  initializeFilter();\n\n  connection_.streamInfo().filterState()->setData(\n      Network::ApplicationProtocols::key(),\n      std::make_unique<Network::ApplicationProtocols>(std::vector<std::string>{\"foo\", \"bar\"}),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection);\n\n  // Expect filter to try to open a connection to a cluster with the transport socket options with\n  // override-application-protocol\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(_, _, _))\n      .WillOnce(\n          Invoke([](const std::string& cluster, Upstream::ResourcePriority,\n                    Upstream::LoadBalancerContext* context) -> Tcp::ConnectionPool::Instance* {\n            EXPECT_EQ(cluster, \"fake_cluster\");\n            Network::TransportSocketOptionsSharedPtr transport_socket_options =\n                context->upstreamTransportSocketOptions();\n            EXPECT_NE(transport_socket_options, nullptr);\n            EXPECT_FALSE(transport_socket_options->applicationProtocolListOverride().empty());\n            EXPECT_EQ(transport_socket_options->applicationProtocolListOverride().size(), 2);\n            EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[0], \"foo\");\n            EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[1], \"bar\");\n            return nullptr;\n          }));\n\n  // Port 9999 is within the specified destination port range.\n  connection_.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 9999);\n\n  filter_->onNewConnection();\n}\n\nclass TcpProxyNonDeprecatedConfigRoutingTest : public TcpProxyRoutingTest {\npublic:\n  TcpProxyNonDeprecatedConfigRoutingTest() = default;\n\n  void setup() {\n    const std::string yaml = R\"EOF(\n    stat_prefix: name\n    cluster: fake_cluster\n    )EOF\";\n\n    config_ = std::make_shared<Config>(constructConfigFromYaml(yaml, factory_context_));\n  }\n};\n\nTEST_F(TcpProxyNonDeprecatedConfigRoutingTest, ClusterNameSet) {\n  setup();\n\n  initializeFilter();\n\n  // Port 9999 is within the specified destination port range.\n  connection_.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 9999);\n\n  // Expect filter to try to open a connection to specified cluster.\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(\"fake_cluster\", _, _))\n      .WillOnce(Return(nullptr));\n  absl::optional<Upstream::ClusterInfoConstSharedPtr> cluster_info;\n  EXPECT_CALL(connection_.stream_info_, setUpstreamClusterInfo(_))\n      .WillOnce(\n          Invoke([&cluster_info](const Upstream::ClusterInfoConstSharedPtr& upstream_cluster_info) {\n            cluster_info = upstream_cluster_info;\n          }));\n  EXPECT_CALL(connection_.stream_info_, upstreamClusterInfo())\n      .WillOnce(ReturnPointee(&cluster_info));\n\n  filter_->onNewConnection();\n\n  EXPECT_EQ(connection_.stream_info_.upstreamClusterInfo().value()->name(), \"fake_cluster\");\n}\n\nclass TcpProxyHashingTest : public testing::Test {\npublic:\n  TcpProxyHashingTest() = default;\n\n  void setup() {\n    const std::string yaml = R\"EOF(\n    stat_prefix: name\n    cluster: fake_cluster\n    hash_policy:\n    - source_ip: {}\n    )EOF\";\n\n    config_ = std::make_shared<Config>(constructConfigFromYaml(yaml, factory_context_));\n  }\n\n  void initializeFilter() {\n    EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(ReturnRef(connection_));\n\n    filter_ = std::make_unique<Filter>(config_, factory_context_.cluster_manager_);\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n  }\n\n  Event::TestTimeSystem& timeSystem() { return factory_context_.timeSystem(); }\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  ConfigSharedPtr config_;\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  std::unique_ptr<Filter> filter_;\n};\n\n// Test TCP proxy use source IP to hash.\nTEST_F(TcpProxyHashingTest, HashWithSourceIp) {\n  setup();\n  initializeFilter();\n  EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster(_, _, _))\n      .WillOnce(\n          Invoke([](const std::string& cluster, Upstream::ResourcePriority,\n                    Upstream::LoadBalancerContext* context) -> Tcp::ConnectionPool::Instance* {\n            EXPECT_EQ(cluster, \"fake_cluster\");\n            EXPECT_TRUE(context->computeHashKey().has_value());\n            return nullptr;\n          }));\n\n  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 1111);\n  connection_.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"2.3.4.5\", 2222);\n\n  filter_->onNewConnection();\n}\n\n} // namespace\n} // namespace TcpProxy\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/tcp_proxy/upstream_test.cc",
    "content": "#include <memory>\n\n#include \"common/tcp_proxy/upstream.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/http/stream_encoder.h\"\n#include \"test/mocks/tcp/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\n\nnamespace Envoy {\nnamespace TcpProxy {\nnamespace {\n\nclass HttpUpstreamTest : public testing::Test {\npublic:\n  HttpUpstreamTest() {\n    EXPECT_CALL(encoder_, getStream()).Times(AnyNumber());\n    EXPECT_CALL(encoder_, encodeHeaders(_, false));\n    upstream_ = std::make_unique<HttpUpstream>(callbacks_, hostname_);\n    upstream_->setRequestEncoder(encoder_, true);\n  }\n\n  Http::MockRequestEncoder encoder_;\n  NiceMock<Tcp::ConnectionPool::MockUpstreamCallbacks> callbacks_;\n  std::unique_ptr<HttpUpstream> upstream_;\n  std::string hostname_{\"default.host.com\"};\n};\n\nTEST_F(HttpUpstreamTest, WriteUpstream) {\n  EXPECT_CALL(encoder_, encodeData(BufferStringEqual(\"foo\"), false));\n  Buffer::OwnedImpl buffer1(\"foo\");\n  upstream_->encodeData(buffer1, false);\n\n  EXPECT_CALL(encoder_, encodeData(BufferStringEqual(\"bar\"), true));\n  Buffer::OwnedImpl buffer2(\"bar\");\n  upstream_->encodeData(buffer2, true);\n\n  // New upstream with no encoder\n  upstream_ = std::make_unique<HttpUpstream>(callbacks_, hostname_);\n  upstream_->encodeData(buffer2, true);\n}\n\nTEST_F(HttpUpstreamTest, WriteDownstream) {\n  EXPECT_CALL(callbacks_, onUpstreamData(BufferStringEqual(\"foo\"), false));\n  Buffer::OwnedImpl buffer1(\"foo\");\n  upstream_->responseDecoder().decodeData(buffer1, false);\n\n  EXPECT_CALL(callbacks_, onUpstreamData(BufferStringEqual(\"bar\"), true));\n  Buffer::OwnedImpl buffer2(\"bar\");\n  upstream_->responseDecoder().decodeData(buffer2, true);\n}\n\nTEST_F(HttpUpstreamTest, InvalidUpgradeWithEarlyFin) {\n  EXPECT_CALL(callbacks_, onEvent(_));\n  Http::ResponseHeaderMapPtr headers{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}};\n  upstream_->responseDecoder().decodeHeaders(std::move(headers), true);\n}\n\nTEST_F(HttpUpstreamTest, InvalidUpgradeWithNon200) {\n  EXPECT_CALL(callbacks_, onEvent(_));\n  Http::ResponseHeaderMapPtr headers{new Http::TestResponseHeaderMapImpl{{\":status\", \"301\"}}};\n  upstream_->responseDecoder().decodeHeaders(std::move(headers), false);\n}\n\nTEST_F(HttpUpstreamTest, ReadDisable) {\n  EXPECT_CALL(encoder_.stream_, readDisable(true));\n  EXPECT_TRUE(upstream_->readDisable(true));\n\n  EXPECT_CALL(encoder_.stream_, readDisable(false));\n  EXPECT_TRUE(upstream_->readDisable(false));\n\n  // New upstream with no encoder\n  upstream_ = std::make_unique<HttpUpstream>(callbacks_, hostname_);\n  EXPECT_FALSE(upstream_->readDisable(true));\n}\n\nTEST_F(HttpUpstreamTest, AddBytesSentCallbackForCoverage) {\n  upstream_->addBytesSentCallback([&](uint64_t) {});\n}\n\nTEST_F(HttpUpstreamTest, DownstreamDisconnect) {\n  EXPECT_CALL(encoder_.stream_, resetStream(Http::StreamResetReason::LocalReset));\n  EXPECT_CALL(callbacks_, onEvent(_)).Times(0);\n  EXPECT_TRUE(upstream_->onDownstreamEvent(Network::ConnectionEvent::LocalClose) == nullptr);\n}\n\nTEST_F(HttpUpstreamTest, UpstreamReset) {\n  EXPECT_CALL(encoder_.stream_, resetStream(_)).Times(0);\n  EXPECT_CALL(callbacks_, onEvent(_));\n  upstream_->onResetStream(Http::StreamResetReason::ConnectionTermination, \"\");\n}\n\nTEST_F(HttpUpstreamTest, UpstreamWatermarks) {\n  EXPECT_CALL(callbacks_, onAboveWriteBufferHighWatermark());\n  upstream_->onAboveWriteBufferHighWatermark();\n\n  EXPECT_CALL(callbacks_, onBelowWriteBufferLowWatermark());\n  upstream_->onBelowWriteBufferLowWatermark();\n}\n\n} // namespace\n} // namespace TcpProxy\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/thread_local/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"thread_local_impl_test\",\n    srcs = [\"thread_local_impl_test.cc\"],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//test/mocks/event:event_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/common/thread_local/thread_local_impl_test.cc",
    "content": "#include \"common/common/thread.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"test/mocks/event/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Ref;\nusing testing::ReturnPointee;\n\nnamespace Envoy {\nnamespace ThreadLocal {\n\nclass TestThreadLocalObject : public ThreadLocalObject {\npublic:\n  ~TestThreadLocalObject() override { onDestroy(); }\n\n  MOCK_METHOD(void, onDestroy, ());\n};\n\nclass ThreadLocalInstanceImplTest : public testing::Test {\npublic:\n  ThreadLocalInstanceImplTest() {\n    tls_.registerThread(main_dispatcher_, true);\n    EXPECT_EQ(&main_dispatcher_, &tls_.dispatcher());\n    EXPECT_CALL(thread_dispatcher_, post(_));\n    tls_.registerThread(thread_dispatcher_, false);\n  }\n\n  MOCK_METHOD(ThreadLocalObjectSharedPtr, createThreadLocal, (Event::Dispatcher & dispatcher));\n\n  TestThreadLocalObject& setObject(Slot& slot) {\n    std::shared_ptr<TestThreadLocalObject> object(new TestThreadLocalObject());\n    TestThreadLocalObject& object_ref = *object;\n    EXPECT_CALL(thread_dispatcher_, post(_));\n    EXPECT_CALL(*this, createThreadLocal(Ref(thread_dispatcher_))).WillOnce(ReturnPointee(&object));\n    EXPECT_CALL(*this, createThreadLocal(Ref(main_dispatcher_))).WillOnce(ReturnPointee(&object));\n    slot.set([this](Event::Dispatcher& dispatcher) -> ThreadLocalObjectSharedPtr {\n      return createThreadLocal(dispatcher);\n    });\n    object.reset();\n    return object_ref;\n  }\n  int freeSlotIndexesListSize() { return tls_.free_slot_indexes_.size(); }\n  InstanceImpl tls_;\n\n  Event::MockDispatcher main_dispatcher_{\"test_main_thread\"};\n  Event::MockDispatcher thread_dispatcher_{\"test_worker_thread\"};\n};\n\nTEST_F(ThreadLocalInstanceImplTest, All) {\n  InSequence s;\n\n  // Free a slot without ever calling set.\n  EXPECT_CALL(thread_dispatcher_, post(_));\n  SlotPtr slot1 = tls_.allocateSlot();\n  slot1.reset();\n  EXPECT_EQ(freeSlotIndexesListSize(), 1);\n\n  // Create a new slot which should take the place of the old slot. ReturnPointee() is used to\n  // avoid \"leaks\" when using InSequence and shared_ptr.\n  SlotPtr slot2 = tls_.allocateSlot();\n  TestThreadLocalObject& object_ref2 = setObject(*slot2);\n  EXPECT_EQ(freeSlotIndexesListSize(), 0);\n\n  EXPECT_CALL(thread_dispatcher_, post(_));\n  EXPECT_CALL(object_ref2, onDestroy());\n  EXPECT_EQ(freeSlotIndexesListSize(), 0);\n  slot2.reset();\n  EXPECT_EQ(freeSlotIndexesListSize(), 1);\n\n  // Make two new slots, shutdown global threading, and delete them. We should not see any\n  // cross-thread posts at this point. We should also see destruction in reverse order.\n  SlotPtr slot3 = tls_.allocateSlot();\n  TestThreadLocalObject& object_ref3 = setObject(*slot3);\n  SlotPtr slot4 = tls_.allocateSlot();\n  TestThreadLocalObject& object_ref4 = setObject(*slot4);\n\n  tls_.shutdownGlobalThreading();\n  slot3.reset();\n  slot4.reset();\n  EXPECT_EQ(freeSlotIndexesListSize(), 0);\n\n  EXPECT_CALL(object_ref4, onDestroy());\n  EXPECT_CALL(object_ref3, onDestroy());\n  tls_.shutdownThread();\n}\n\nstruct ThreadStatus {\n  uint64_t thread_local_calls_{0};\n  bool all_threads_complete_ = false;\n};\n\nTEST_F(ThreadLocalInstanceImplTest, CallbackNotInvokedAfterDeletion) {\n  InSequence s;\n\n  // Allocate a slot and invoke all callback variants. Hold all callbacks and destroy the slot.\n  // Make sure that recycling happens appropriately.\n  SlotPtr slot = tls_.allocateSlot();\n\n  std::list<Event::PostCb> holder;\n  EXPECT_CALL(thread_dispatcher_, post(_)).Times(4).WillRepeatedly(Invoke([&](Event::PostCb cb) {\n    // Holds the posted callback.\n    holder.push_back(cb);\n  }));\n\n  uint32_t total_callbacks = 0;\n  slot->set([&total_callbacks](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    // Callbacks happen on the main thread but not the workers, so track the total.\n    total_callbacks++;\n    return nullptr;\n  });\n  slot->runOnAllThreads([&total_callbacks](ThreadLocal::ThreadLocalObjectSharedPtr)\n                            -> ThreadLocal::ThreadLocalObjectSharedPtr {\n    // Callbacks happen on the main thread but not the workers, so track the total.\n    total_callbacks++;\n    return nullptr;\n  });\n  ThreadStatus thread_status;\n  slot->runOnAllThreads(\n      [&thread_status](\n          ThreadLocal::ThreadLocalObjectSharedPtr) -> ThreadLocal::ThreadLocalObjectSharedPtr {\n        ++thread_status.thread_local_calls_;\n        return nullptr;\n      },\n      [&thread_status]() -> void {\n        // Callbacks happen on the main thread but not the workers.\n        EXPECT_EQ(thread_status.thread_local_calls_, 1);\n        thread_status.all_threads_complete_ = true;\n      });\n  EXPECT_FALSE(thread_status.all_threads_complete_);\n\n  EXPECT_EQ(2, total_callbacks);\n  slot.reset();\n  EXPECT_EQ(freeSlotIndexesListSize(), 1);\n\n  EXPECT_CALL(main_dispatcher_, post(_));\n  while (!holder.empty()) {\n    holder.front()();\n    holder.pop_front();\n  }\n  EXPECT_EQ(2, total_callbacks);\n  EXPECT_TRUE(thread_status.all_threads_complete_);\n\n  tls_.shutdownGlobalThreading();\n}\n\n// Test that the config passed into the update callback is the previous version stored in the slot.\nTEST_F(ThreadLocalInstanceImplTest, UpdateCallback) {\n  InSequence s;\n\n  SlotPtr slot = tls_.allocateSlot();\n\n  auto newer_version = std::make_shared<TestThreadLocalObject>();\n  bool update_called = false;\n\n  TestThreadLocalObject& object_ref = setObject(*slot);\n  auto update_cb = [&object_ref, &update_called,\n                    newer_version](ThreadLocalObjectSharedPtr obj) -> ThreadLocalObjectSharedPtr {\n    // The unit test setup have two dispatchers registered, but only one thread, this lambda will be\n    // called twice in the same thread.\n    if (!update_called) {\n      EXPECT_EQ(obj.get(), &object_ref);\n      update_called = true;\n    } else {\n      EXPECT_EQ(obj.get(), newer_version.get());\n    }\n\n    return newer_version;\n  };\n  EXPECT_CALL(thread_dispatcher_, post(_));\n  EXPECT_CALL(object_ref, onDestroy());\n  EXPECT_CALL(*newer_version, onDestroy());\n  slot->runOnAllThreads(update_cb);\n\n  EXPECT_EQ(newer_version.get(), &slot->getTyped<TestThreadLocalObject>());\n\n  tls_.shutdownGlobalThreading();\n  tls_.shutdownThread();\n}\n\n// TODO(ramaraochavali): Run this test with real threads. The current issue in the unit\n// testing environment is, the post to main_dispatcher is not working as expected.\n\n// Validate ThreadLocal::runOnAllThreads behavior with all_thread_complete call back.\nTEST_F(ThreadLocalInstanceImplTest, RunOnAllThreads) {\n  SlotPtr tlsptr = tls_.allocateSlot();\n  TestThreadLocalObject& object_ref = setObject(*tlsptr);\n\n  EXPECT_CALL(thread_dispatcher_, post(_));\n  EXPECT_CALL(main_dispatcher_, post(_));\n\n  // Ensure that the thread local call back and all_thread_complete call back are called.\n  ThreadStatus thread_status;\n  tlsptr->runOnAllThreads(\n      [&thread_status](ThreadLocal::ThreadLocalObjectSharedPtr object)\n          -> ThreadLocal::ThreadLocalObjectSharedPtr {\n        ++thread_status.thread_local_calls_;\n        return object;\n      },\n      [&thread_status]() -> void {\n        EXPECT_EQ(thread_status.thread_local_calls_, 2);\n        thread_status.all_threads_complete_ = true;\n      });\n  EXPECT_TRUE(thread_status.all_threads_complete_);\n\n  tls_.shutdownGlobalThreading();\n  tlsptr.reset();\n  EXPECT_EQ(freeSlotIndexesListSize(), 0);\n  EXPECT_CALL(object_ref, onDestroy());\n  tls_.shutdownThread();\n}\n\n// Validate ThreadLocal::InstanceImpl's dispatcher() behavior.\nTEST(ThreadLocalInstanceImplDispatcherTest, Dispatcher) {\n  InstanceImpl tls;\n\n  Api::ApiPtr api = Api::createApiForTest();\n  Event::DispatcherPtr main_dispatcher(api->allocateDispatcher(\"test_main_thread\"));\n  Event::DispatcherPtr thread_dispatcher(api->allocateDispatcher(\"test_worker_thread\"));\n\n  tls.registerThread(*main_dispatcher, true);\n  tls.registerThread(*thread_dispatcher, false);\n\n  // Ensure that the dispatcher update in tls posted during the above registerThread happens.\n  main_dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  // Verify we have the expected dispatcher for the main thread.\n  EXPECT_EQ(main_dispatcher.get(), &tls.dispatcher());\n\n  Thread::ThreadPtr thread =\n      Thread::threadFactoryForTest().createThread([&thread_dispatcher, &tls]() {\n        // Ensure that the dispatcher update in tls posted during the above registerThread happens.\n        thread_dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n        // Verify we have the expected dispatcher for the new thread thread.\n        EXPECT_EQ(thread_dispatcher.get(), &tls.dispatcher());\n      });\n  thread->join();\n\n  // Verify we still have the expected dispatcher for the main thread.\n  EXPECT_EQ(main_dispatcher.get(), &tls.dispatcher());\n\n  tls.shutdownGlobalThreading();\n  tls.shutdownThread();\n}\n\n} // namespace ThreadLocal\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/tracing/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"http_tracer_impl_test\",\n    srcs = [\n        \"http_tracer_impl_test.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/http:request_id_extension_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/tracing/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http_tracer_manager_impl_test\",\n    srcs = [\n        \"http_tracer_manager_impl_test.cc\",\n    ],\n    deps = [\n        \"//source/common/tracing:http_tracer_config_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/common/tracing:http_tracer_manager_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:tracer_factory_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/test_common:registry_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/common/tracing/http_tracer_impl_test.cc",
    "content": "#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/type/tracing/v3/custom_tag.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/request_id_extension_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnPointee;\n\nnamespace Envoy {\nnamespace Tracing {\nnamespace {\n\nTEST(HttpTracerUtilityTest, IsTracing) {\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  NiceMock<Stats::MockStore> stats;\n  Random::RandomGeneratorImpl random;\n  std::string not_traceable_guid = random.uuid();\n\n  auto rid_extension = Http::RequestIDExtensionFactory::defaultInstance(random);\n  ON_CALL(stream_info, getRequestIDExtension()).WillByDefault(Return(rid_extension));\n\n  std::string forced_guid = random.uuid();\n  Http::TestRequestHeaderMapImpl forced_header{{\"x-request-id\", forced_guid}};\n  rid_extension->setTraceStatus(forced_header, Http::TraceStatus::Forced);\n\n  std::string sampled_guid = random.uuid();\n  Http::TestRequestHeaderMapImpl sampled_header{{\"x-request-id\", sampled_guid}};\n  rid_extension->setTraceStatus(sampled_header, Http::TraceStatus::Sampled);\n\n  std::string client_guid = random.uuid();\n  Http::TestRequestHeaderMapImpl client_header{{\"x-request-id\", client_guid}};\n  rid_extension->setTraceStatus(client_header, Http::TraceStatus::Client);\n\n  Http::TestRequestHeaderMapImpl not_traceable_header{{\"x-request-id\", not_traceable_guid}};\n  Http::TestRequestHeaderMapImpl empty_header{};\n\n  // Force traced.\n  {\n    EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(false));\n\n    Decision result = HttpTracerUtility::isTracing(stream_info, forced_header);\n    EXPECT_EQ(Reason::ServiceForced, result.reason);\n    EXPECT_TRUE(result.traced);\n  }\n\n  // Sample traced.\n  {\n    EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(false));\n\n    Decision result = HttpTracerUtility::isTracing(stream_info, sampled_header);\n    EXPECT_EQ(Reason::Sampling, result.reason);\n    EXPECT_TRUE(result.traced);\n  }\n\n  // Health Check request.\n  {\n    Http::TestRequestHeaderMapImpl traceable_header_hc{{\"x-request-id\", forced_guid}};\n    EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(true));\n\n    Decision result = HttpTracerUtility::isTracing(stream_info, traceable_header_hc);\n    EXPECT_EQ(Reason::HealthCheck, result.reason);\n    EXPECT_FALSE(result.traced);\n  }\n\n  // Client traced.\n  {\n    EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(false));\n\n    Decision result = HttpTracerUtility::isTracing(stream_info, client_header);\n    EXPECT_EQ(Reason::ClientForced, result.reason);\n    EXPECT_TRUE(result.traced);\n  }\n\n  // No request id.\n  {\n    Http::TestRequestHeaderMapImpl headers;\n    EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(false));\n    Decision result = HttpTracerUtility::isTracing(stream_info, headers);\n    EXPECT_EQ(Reason::NotTraceableRequestId, result.reason);\n    EXPECT_FALSE(result.traced);\n  }\n\n  // Broken request id.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\"x-request-id\", \"not-real-x-request-id\"}};\n    EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(false));\n    Decision result = HttpTracerUtility::isTracing(stream_info, headers);\n    EXPECT_EQ(Reason::NotTraceableRequestId, result.reason);\n    EXPECT_FALSE(result.traced);\n  }\n}\n\nclass HttpConnManFinalizerImplTest : public testing::Test {\nprotected:\n  struct CustomTagCase {\n    std::string custom_tag;\n    bool set;\n    std::string value;\n  };\n\n  void expectSetCustomTags(const std::vector<CustomTagCase>& cases) {\n    for (const CustomTagCase& cas : cases) {\n      envoy::type::tracing::v3::CustomTag custom_tag;\n      TestUtility::loadFromYaml(cas.custom_tag, custom_tag);\n      config.custom_tags_.emplace(custom_tag.tag(), HttpTracerUtility::createCustomTag(custom_tag));\n      if (cas.set) {\n        EXPECT_CALL(span, setTag(Eq(custom_tag.tag()), Eq(cas.value)));\n      } else {\n        EXPECT_CALL(span, setTag(Eq(custom_tag.tag()), _)).Times(0);\n      }\n    }\n  }\n\n  NiceMock<MockSpan> span;\n  NiceMock<MockConfig> config;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n};\n\nTEST_F(HttpConnManFinalizerImplTest, OriginalAndLongPath) {\n  const std::string path(300, 'a');\n  const std::string path_prefix = \"http://\";\n  const std::string expected_path(256, 'a');\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"id\"},\n                                                 {\"x-envoy-original-path\", path},\n                                                 {\":method\", \"GET\"},\n                                                 {\":path\", \"\"},\n                                                 {\"x-forwarded-proto\", \"http\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(path_prefix + expected_path)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"GET\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/2\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) {\n  const std::string path(300, 'a');\n  const std::string path_prefix = \"http://\";\n  const std::string expected_path(256, 'a');\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"\"},\n                                                 {\"x-envoy-original-path\", path},\n                                                 {\":method\", \"GET\"},\n                                                 {\"x-forwarded-proto\", \"http\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(path_prefix + expected_path)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"GET\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/2\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, Connect) {\n  const std::string path(300, 'a');\n  const std::string path_prefix = \"http://\";\n  const std::string expected_path(256, 'a');\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"CONNECT\"},\n                                                 {\"x-forwarded-proto\", \"http\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(\"\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"CONNECT\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/2\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, NullRequestHeadersAndNullRouteEntry) {\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(stream_info, routeEntry()).WillRepeatedly(Return(nullptr));\n\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"0\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseSize), Eq(\"11\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq(\"10\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), _)).Times(0);\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), _)).Times(0);\n\n  expectSetCustomTags({{\"{ tag: a, request_header: { name: X-Ax } }\", false, \"\"},\n                       {R\"EOF(\ntag: b\nmetadata:\n  kind: { route: {} }\n  metadata_key: { key: m.rot, path: [ {key: not-found } ] }\n  default_value: _c)EOF\",\n                        true, \"_c\"},\n                       {R\"EOF(\ntag: c\nmetadata:\n  kind: { cluster: {} }\n  metadata_key: { key: m.cluster, path: [ {key: not-found } ] })EOF\",\n                        false, \"\"},\n                       {R\"EOF(\ntag: d\nmetadata:\n  kind: { host: {} }\n  metadata_key: { key: m.host, path: [ {key: not-found } ] })EOF\",\n                        false, \"\"}});\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, nullptr, nullptr, nullptr, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, StreamInfoLogs) {\n  stream_info.host_->cluster_.name_ = \"my_upstream_cluster\";\n\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, upstreamHost()).Times(2);\n  const auto start_timestamp =\n      SystemTime{std::chrono::duration_cast<SystemTime::duration>(std::chrono::hours{123})};\n  EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(start_timestamp));\n\n  const absl::optional<std::chrono::nanoseconds> nanoseconds = std::chrono::nanoseconds{10};\n  EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(nanoseconds));\n  EXPECT_CALL(stream_info, firstUpstreamTxByteSent()).WillRepeatedly(Return(nanoseconds));\n  EXPECT_CALL(stream_info, lastUpstreamTxByteSent()).WillRepeatedly(Return(nanoseconds));\n  EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(nanoseconds));\n  EXPECT_CALL(stream_info, lastUpstreamRxByteReceived()).WillRepeatedly(Return(nanoseconds));\n  EXPECT_CALL(stream_info, firstDownstreamTxByteSent()).WillRepeatedly(Return(nanoseconds));\n  EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(nanoseconds));\n\n  const auto log_timestamp =\n      start_timestamp + std::chrono::duration_cast<SystemTime::duration>(*nanoseconds);\n  EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().LastDownstreamRxByteReceived));\n  EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().FirstUpstreamTxByteSent));\n  EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().LastUpstreamTxByteSent));\n  EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().FirstUpstreamRxByteReceived));\n  EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().LastUpstreamRxByteReceived));\n  EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().FirstDownstreamTxByteSent));\n  EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().LastDownstreamTxByteSent));\n\n  EXPECT_CALL(config, verbose).WillOnce(Return(true));\n  HttpTracerUtility::finalizeDownstreamSpan(span, nullptr, nullptr, nullptr, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, UpstreamClusterTagSet) {\n  stream_info.host_->cluster_.name_ = \"my_upstream_cluster\";\n\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, upstreamHost()).Times(2);\n\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"my_upstream_cluster\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"0\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseSize), Eq(\"11\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq(\"10\")));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, nullptr, nullptr, nullptr, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, SpanOptionalHeaders) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"id\"},\n                                                 {\":path\", \"/test\"},\n                                                 {\":method\", \"GET\"},\n                                                 {\"x-forwarded-proto\", \"https\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http10;\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  // Check that span is populated correctly.\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXRequestId), Eq(\"id\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(\"https:///test\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"GET\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UserAgent), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.0\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().DownstreamCluster), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq(\"10\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(100));\n  EXPECT_CALL(stream_info, upstreamHost()).WillOnce(Return(nullptr));\n\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"0\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseSize), Eq(\"100\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), _)).Times(0);\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), _)).Times(0);\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, UnixDomainSocketPeerAddressTag) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"id\"},\n                                                 {\":path\", \"/test\"},\n                                                 {\":method\", \"GET\"},\n                                                 {\"x-forwarded-proto\", \"https\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  const std::string path_{TestEnvironment::unixDomainSocketPath(\"foo\")};\n  const auto remote_address = Network::Utility::resolveUrl(\"unix://\" + path_);\n\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  // Check that the PeerAddress is populated correctly for Unix domain sockets.\n  EXPECT_CALL(span, setTag(_, _)).Times(AnyNumber());\n  EXPECT_CALL(span,\n              setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(remote_address->logicalName())));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, SpanCustomTags) {\n  TestEnvironment::setEnvVar(\"E_CC\", \"c\", 1);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"id\"},\n                                                 {\":path\", \"/test\"},\n                                                 {\":method\", \"GET\"},\n                                                 {\"x-forwarded-proto\", \"https\"},\n                                                 {\"x-bb\", \"b\"}};\n\n  ProtobufWkt::Struct fake_struct;\n  std::string yaml = R\"EOF(\nree:\n  foo: bar\n  nuu: 1\n  boo: true\n  poo: false\n  stt: { some: thing }\n  lii: [ something ]\n  emp: \"\")EOF\";\n  TestUtility::loadFromYaml(yaml, fake_struct);\n  (*stream_info.metadata_.mutable_filter_metadata())[\"m.req\"].MergeFrom(fake_struct);\n  NiceMock<Router::MockRouteEntry> route_entry;\n  EXPECT_CALL(stream_info, routeEntry()).WillRepeatedly(Return(&route_entry));\n  (*route_entry.metadata_.mutable_filter_metadata())[\"m.rot\"].MergeFrom(fake_struct);\n  std::shared_ptr<envoy::config::core::v3::Metadata> host_metadata =\n      std::make_shared<envoy::config::core::v3::Metadata>();\n  (*host_metadata->mutable_filter_metadata())[\"m.host\"].MergeFrom(fake_struct);\n  (*stream_info.host_->cluster_.metadata_.mutable_filter_metadata())[\"m.cluster\"].MergeFrom(\n      fake_struct);\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http10;\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  absl::optional<uint32_t> response_code;\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(100));\n  EXPECT_CALL(*stream_info.host_, metadata()).WillRepeatedly(Return(host_metadata));\n\n  EXPECT_CALL(config, customTags());\n  EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber());\n\n  expectSetCustomTags(\n      {{\"{ tag: aa, literal: { value: a } }\", true, \"a\"},\n       {\"{ tag: bb-1, request_header: { name: X-Bb, default_value: _b } }\", true, \"b\"},\n       {\"{ tag: bb-2, request_header: { name: X-Bb-Not-Found, default_value: b2 } }\", true, \"b2\"},\n       {\"{ tag: bb-3, request_header: { name: X-Bb-Not-Found } }\", false, \"\"},\n       {\"{ tag: cc-1, environment: { name: E_CC } }\", true, \"c\"},\n       {\"{ tag: cc-1-a, environment: { name: E_CC, default_value: _c } }\", true, \"c\"},\n       {\"{ tag: cc-2, environment: { name: E_CC_NOT_FOUND, default_value: c2 } }\", true, \"c2\"},\n       {\"{ tag: cc-3, environment: { name: E_CC_NOT_FOUND} }\", false, \"\"},\n       {R\"EOF(\ntag: dd-1,\nmetadata:\n  kind: { request: {} }\n  metadata_key: { key: m.req, path: [ { key: ree }, { key: foo } ] })EOF\",\n        true, \"bar\"},\n       {R\"EOF(\ntag: dd-2,\nmetadata:\n  kind: { request: {} }\n  metadata_key: { key: m.req, path: [ { key: not-found } ] }\n  default_value: d2)EOF\",\n        true, \"d2\"},\n       {R\"EOF(\ntag: dd-3,\nmetadata:\n  kind: { request: {} }\n  metadata_key: { key: m.req, path: [ { key: not-found } ] })EOF\",\n        false, \"\"},\n       {R\"EOF(\ntag: dd-4,\nmetadata:\n  kind: { request: {} }\n  metadata_key: { key: m.req, path: [ { key: ree }, { key: nuu } ] }\n  default_value: _d)EOF\",\n        true, \"1\"},\n       {R\"EOF(\ntag: dd-5,\nmetadata:\n  kind: { route: {} }\n  metadata_key: { key: m.rot, path: [ { key: ree }, { key: boo } ] })EOF\",\n        true, \"true\"},\n       {R\"EOF(\ntag: dd-6,\nmetadata:\n  kind: { route: {} }\n  metadata_key: { key: m.rot, path: [ { key: ree }, { key: poo } ] })EOF\",\n        true, \"false\"},\n       {R\"EOF(\ntag: dd-7,\nmetadata:\n  kind: { cluster: {} }\n  metadata_key: { key: m.cluster, path: [ { key: ree }, { key: emp } ] }\n  default_value: _d)EOF\",\n        true, \"\"},\n       {R\"EOF(\ntag: dd-8,\nmetadata:\n  kind: { cluster: {} }\n  metadata_key: { key: m.cluster, path: [ { key: ree }, { key: lii } ] }\n  default_value: _d)EOF\",\n        true, \"[\\\"something\\\"]\"},\n       {R\"EOF(\ntag: dd-9,\nmetadata:\n  kind: { host: {} }\n  metadata_key: { key: m.host, path: [ { key: ree }, { key: stt } ] })EOF\",\n        true, R\"({\"some\":\"thing\"})\"},\n       {R\"EOF(\ntag: dd-10,\nmetadata:\n  kind: { host: {} }\n  metadata_key: { key: m.host, path: [ { key: not-found } ] })EOF\",\n        false, \"\"}});\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, nullptr, nullptr, stream_info,\n                                            config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, SpanPopulatedFailureResponse) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-request-id\", \"id\"},\n                                                 {\":path\", \"/test\"},\n                                                 {\":method\", \"GET\"},\n                                                 {\"x-forwarded-proto\", \"http\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  request_headers.setHost(\"api\");\n  request_headers.setUserAgent(\"agent\");\n  request_headers.setEnvoyDownstreamServiceCluster(\"downstream_cluster\");\n  request_headers.setClientTraceId(\"client_trace_id\");\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http10;\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  // Check that span is populated correctly.\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXRequestId), Eq(\"id\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(\"http://api/test\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"GET\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UserAgent), Eq(\"agent\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/1.0\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().DownstreamCluster), Eq(\"downstream_cluster\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq(\"10\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXClientTraceId), Eq(\"client_trace_id\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  EXPECT_CALL(config, verbose).WillOnce(Return(false));\n  EXPECT_CALL(config, maxPathTagLength).WillOnce(Return(256));\n\n  absl::optional<uint32_t> response_code(503);\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(100));\n  ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout))\n      .WillByDefault(Return(true));\n  EXPECT_CALL(stream_info, upstreamHost()).WillOnce(Return(nullptr));\n\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"503\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseSize), Eq(\"100\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"UT\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), _)).Times(0);\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), _)).Times(0);\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, GrpcOkStatus) {\n  const std::string path_prefix = \"http://\";\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":path\", \"/pb.Foo/Bar\"},\n                                                 {\":authority\", \"example.com:80\"},\n                                                 {\"content-type\", \"application/grpc\"},\n                                                 {\"x-forwarded-proto\", \"http\"},\n                                                 {\"te\", \"trailers\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/grpc\"}};\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"0\"}, {\"grpc-message\", \"\"}};\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;\n  absl::optional<uint32_t> response_code(200);\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().DownstreamCluster), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(\"fake_cluster\")));\n  EXPECT_CALL(span,\n              setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(\"http://example.com:80/pb.Foo/Bar\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UserAgent), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq(\"10\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseSize), Eq(\"11\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq(\"-\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"POST\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/2\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcPath), Eq(\"/pb.Foo/Bar\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcAuthority), Eq(\"example.com:80\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcContentType), Eq(\"application/grpc\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"0\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcMessage), Eq(\"\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, GrpcErrorTag) {\n  const std::string path_prefix = \"http://\";\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":path\", \"/pb.Foo/Bar\"},\n                                                 {\":authority\", \"example.com:80\"},\n                                                 {\"content-type\", \"application/grpc\"},\n                                                 {\"grpc-timeout\", \"10s\"},\n                                                 {\"x-forwarded-proto\", \"http\"},\n                                                 {\"te\", \"trailers\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/grpc\"}};\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"7\"},\n                                                     {\"grpc-message\", \"permission denied\"}};\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;\n  absl::optional<uint32_t> response_code(200);\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"POST\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/2\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcPath), Eq(\"/pb.Foo/Bar\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcAuthority), Eq(\"example.com:80\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcContentType), Eq(\"application/grpc\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcTimeout), Eq(\"10s\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"7\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcMessage), Eq(\"permission denied\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST_F(HttpConnManFinalizerImplTest, GrpcTrailersOnly) {\n  const std::string path_prefix = \"http://\";\n  const std::string expected_ip = \"10.0.0.100\";\n  const auto remote_address = Network::Address::InstanceConstSharedPtr{\n      new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)};\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":path\", \"/pb.Foo/Bar\"},\n                                                 {\":authority\", \"example.com:80\"},\n                                                 {\"content-type\", \"application/grpc\"},\n                                                 {\"x-forwarded-proto\", \"http\"},\n                                                 {\"te\", \"trailers\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/grpc\"},\n                                                   {\"grpc-status\", \"7\"},\n                                                   {\"grpc-message\", \"permission denied\"}};\n  Http::TestResponseTrailerMapImpl response_trailers;\n\n  absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;\n  absl::optional<uint32_t> response_code(200);\n  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));\n  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));\n  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));\n  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));\n  EXPECT_CALL(stream_info, downstreamDirectRemoteAddress())\n      .WillRepeatedly(ReturnPointee(&remote_address));\n\n  EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq(\"POST\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq(\"HTTP/2\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq(\"200\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcPath), Eq(\"/pb.Foo/Bar\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcAuthority), Eq(\"example.com:80\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcContentType), Eq(\"application/grpc\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq(\"7\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcMessage), Eq(\"permission denied\")));\n  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip)));\n\n  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,\n                                            &response_trailers, stream_info, config);\n}\n\nTEST(HttpTracerUtilityTest, operationTypeToString) {\n  EXPECT_EQ(\"ingress\", HttpTracerUtility::toString(OperationName::Ingress));\n  EXPECT_EQ(\"egress\", HttpTracerUtility::toString(OperationName::Egress));\n}\n\nTEST(HttpNullTracerTest, BasicFunctionality) {\n  HttpNullTracer null_tracer;\n  MockConfig config;\n  StreamInfo::MockStreamInfo stream_info;\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n\n  SpanPtr span_ptr =\n      null_tracer.startSpan(config, request_headers, stream_info, {Reason::Sampling, true});\n  EXPECT_TRUE(dynamic_cast<NullSpan*>(span_ptr.get()) != nullptr);\n\n  span_ptr->setOperation(\"foo\");\n  span_ptr->setTag(\"foo\", \"bar\");\n  span_ptr->setBaggage(\"key\", \"value\");\n  ASSERT_EQ(\"\", span_ptr->getBaggage(\"baggage_key\"));\n  span_ptr->injectContext(request_headers);\n\n  EXPECT_NE(nullptr, span_ptr->spawnChild(config, \"foo\", SystemTime()));\n}\n\nclass HttpTracerImplTest : public testing::Test {\npublic:\n  HttpTracerImplTest() {\n    driver_ = new MockDriver();\n    DriverPtr driver_ptr(driver_);\n    tracer_ = std::make_shared<HttpTracerImpl>(std::move(driver_ptr), local_info_);\n  }\n\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}, {\":authority\", \"test\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  Http::TestResponseTrailerMapImpl response_trailers;\n  StreamInfo::MockStreamInfo stream_info_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  MockConfig config_;\n  MockDriver* driver_;\n  HttpTracerSharedPtr tracer_;\n};\n\nTEST_F(HttpTracerImplTest, BasicFunctionalityNullSpan) {\n  EXPECT_CALL(config_, operationName()).Times(2);\n  EXPECT_CALL(stream_info_, startTime());\n  const std::string operation_name = \"ingress\";\n  EXPECT_CALL(*driver_, startSpan_(_, _, operation_name, stream_info_.start_time_, _))\n      .WillOnce(Return(nullptr));\n  tracer_->startSpan(config_, request_headers_, stream_info_, {Reason::Sampling, true});\n}\n\nTEST_F(HttpTracerImplTest, BasicFunctionalityNodeSet) {\n  EXPECT_CALL(stream_info_, startTime());\n  EXPECT_CALL(local_info_, nodeName());\n  EXPECT_CALL(config_, operationName()).Times(2).WillRepeatedly(Return(OperationName::Egress));\n\n  NiceMock<MockSpan>* span = new NiceMock<MockSpan>();\n  const std::string operation_name = \"egress test\";\n  EXPECT_CALL(*driver_, startSpan_(_, _, operation_name, stream_info_.start_time_, _))\n      .WillOnce(Return(span));\n  EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());\n  EXPECT_CALL(*span, setTag(Eq(Tracing::Tags::get().NodeId), Eq(\"node_name\")));\n\n  tracer_->startSpan(config_, request_headers_, stream_info_, {Reason::Sampling, true});\n}\n\n} // namespace\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/tracing/http_tracer_manager_impl_test.cc",
    "content": "#include \"common/tracing/http_tracer_config_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n#include \"common/tracing/http_tracer_manager_impl.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/tracer_factory.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::NotNull;\nusing testing::SizeIs;\nusing testing::WhenDynamicCastTo;\n\nnamespace Envoy {\nnamespace Tracing {\nnamespace {\n\nclass SampleTracer : public HttpTracer {\npublic:\n  SpanPtr startSpan(const Config&, Http::RequestHeaderMap&, const StreamInfo::StreamInfo&,\n                    const Tracing::Decision) override {\n    return nullptr;\n  }\n};\n\nclass SampleTracerFactory : public Server::Configuration::TracerFactory {\npublic:\n  Tracing::HttpTracerSharedPtr\n  createHttpTracer(const Protobuf::Message&,\n                   Server::Configuration::TracerFactoryContext&) override {\n    return std::make_shared<SampleTracer>();\n  }\n\n  std::string name() const override { return \"envoy.tracers.sample\"; }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ProtobufWkt::Struct>();\n  }\n};\n\nclass HttpTracerManagerImplTest : public testing::Test {\npublic:\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context_;\n  HttpTracerManagerImpl http_tracer_manager_{std::make_unique<TracerFactoryContextImpl>(\n      server_factory_context_, ProtobufMessage::getStrictValidationVisitor())};\n\nprivate:\n  SampleTracerFactory sample_tracer_factory_;\n  Registry::InjectFactory<Server::Configuration::TracerFactory> registered_sample_tracer_factory_{\n      sample_tracer_factory_};\n};\n\nTEST_F(HttpTracerManagerImplTest,\n       ShouldReturnHttpNullTracerWhenNoTracingProviderHasBeenConfigured) {\n  auto http_tracer = http_tracer_manager_.getOrCreateHttpTracer(nullptr);\n\n  // Should return a null object (Tracing::HttpNullTracer) rather than nullptr.\n  EXPECT_THAT(http_tracer.get(), WhenDynamicCastTo<Tracing::HttpNullTracer*>(NotNull()));\n}\n\nTEST_F(HttpTracerManagerImplTest, ShouldUseProperTracerFactory) {\n  envoy::config::trace::v3::Tracing_Http tracing_config;\n  tracing_config.set_name(\"envoy.tracers.sample\");\n\n  auto http_tracer = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config);\n\n  // Should use proper TracerFactory.\n  EXPECT_THAT(http_tracer.get(), WhenDynamicCastTo<SampleTracer*>(NotNull()));\n}\n\nTEST_F(HttpTracerManagerImplTest, ShouldCacheAndReuseTracers) {\n  envoy::config::trace::v3::Tracing_Http tracing_config;\n  tracing_config.set_name(\"envoy.tracers.sample\");\n  tracing_config.mutable_typed_config()->PackFrom(MessageUtil::keyValueStruct(\"key1\", \"value1\"));\n\n  auto http_tracer_one = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config);\n  // Expect a new HttpTracer to be added to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(1));\n\n  auto http_tracer_two = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config);\n  // Expect no changes to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(1));\n\n  // Should reuse previously created HttpTracer instance.\n  EXPECT_EQ(http_tracer_two, http_tracer_one);\n}\n\nTEST_F(HttpTracerManagerImplTest, ShouldCacheTracersBasedOnFullConfig) {\n  envoy::config::trace::v3::Tracing_Http tracing_config_one;\n  tracing_config_one.set_name(\"envoy.tracers.sample\");\n  tracing_config_one.mutable_typed_config()->PackFrom(\n      MessageUtil::keyValueStruct(\"key1\", \"value1\"));\n\n  auto http_tracer_one = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config_one);\n  // Expect a new HttpTracer to be added to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(1));\n\n  envoy::config::trace::v3::Tracing_Http tracing_config_two;\n  tracing_config_two.set_name(\"envoy.tracers.sample\");\n  tracing_config_two.mutable_typed_config()->PackFrom(\n      MessageUtil::keyValueStruct(\"key2\", \"value2\"));\n\n  auto http_tracer_two = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config_two);\n  // Expect a new HttpTracer to be added to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(2));\n\n  // Any changes to config must result in a new HttpTracer instance.\n  EXPECT_NE(http_tracer_two, http_tracer_one);\n}\n\nTEST_F(HttpTracerManagerImplTest, ShouldFailIfTracerProviderIsUnknown) {\n  envoy::config::trace::v3::Tracing_Http tracing_config;\n  tracing_config.set_name(\"invalid\");\n\n  EXPECT_THROW_WITH_MESSAGE(http_tracer_manager_.getOrCreateHttpTracer(&tracing_config),\n                            EnvoyException,\n                            \"Didn't find a registered implementation for name: 'invalid'\");\n}\n\nTEST_F(HttpTracerManagerImplTest, ShouldFailIfProviderSpecificConfigIsNotValid) {\n  envoy::config::trace::v3::Tracing_Http tracing_config;\n  tracing_config.set_name(\"envoy.tracers.sample\");\n  tracing_config.mutable_typed_config()->PackFrom(ValueUtil::stringValue(\"value\"));\n\n  EXPECT_THROW_WITH_MESSAGE(\n      http_tracer_manager_.getOrCreateHttpTracer(&tracing_config), EnvoyException,\n      R\"(Unable to unpack as google.protobuf.Struct: [type.googleapis.com/google.protobuf.Value] {\n  string_value: \"value\"\n}\n)\");\n}\n\nclass HttpTracerManagerImplCacheTest : public testing::Test {\npublic:\n  HttpTracerManagerImplCacheTest() {\n    tracing_config_one_.set_name(\"envoy.tracers.mock\");\n    tracing_config_one_.mutable_typed_config()->PackFrom(\n        MessageUtil::keyValueStruct(\"key1\", \"value1\"));\n\n    tracing_config_two_.set_name(\"envoy.tracers.mock\");\n    tracing_config_two_.mutable_typed_config()->PackFrom(\n        MessageUtil::keyValueStruct(\"key2\", \"value2\"));\n  }\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context_;\n  HttpTracerManagerImpl http_tracer_manager_{std::make_unique<TracerFactoryContextImpl>(\n      server_factory_context_, ProtobufMessage::getStrictValidationVisitor())};\n\n  NiceMock<Server::Configuration::MockTracerFactory> tracer_factory_{\"envoy.tracers.mock\"};\n\n  envoy::config::trace::v3::Tracing_Http tracing_config_one_;\n  envoy::config::trace::v3::Tracing_Http tracing_config_two_;\n\nprivate:\n  Registry::InjectFactory<Server::Configuration::TracerFactory> registered_tracer_factory_{\n      tracer_factory_};\n};\n\nTEST_F(HttpTracerManagerImplCacheTest, ShouldCacheHttpTracersUsingWeakReferences) {\n  HttpTracer* expected_tracer = new NiceMock<MockHttpTracer>();\n\n  // Expect HttpTracerManager to create a new HttpTracer.\n  EXPECT_CALL(tracer_factory_, createHttpTracer(_, _))\n      .WillOnce(InvokeWithoutArgs(\n          [expected_tracer] { return std::shared_ptr<HttpTracer>(expected_tracer); }));\n\n  auto actual_tracer_one = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config_one_);\n\n  EXPECT_EQ(actual_tracer_one.get(), expected_tracer);\n  // Expect a new HttpTracer to be added to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(1));\n\n  // Expect HttpTracerManager to re-use cached value.\n  auto actual_tracer_two = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config_one_);\n\n  EXPECT_EQ(actual_tracer_two.get(), expected_tracer);\n  // Expect no changes to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(1));\n\n  // Expect HttpTracerManager to use weak references under the hood and release HttpTracer as soon\n  // as it's no longer in use.\n  std::weak_ptr<HttpTracer> weak_pointer{actual_tracer_one};\n\n  actual_tracer_one.reset();\n  // Expect one strong reference still to be left.\n  EXPECT_NE(weak_pointer.lock(), nullptr);\n\n  actual_tracer_two.reset();\n  // Expect no more strong references to be left.\n  EXPECT_EQ(weak_pointer.lock(), nullptr);\n\n  HttpTracer* expected_another_tracer = new NiceMock<MockHttpTracer>();\n\n  // Expect HttpTracerManager to create a new HttpTracer once again.\n  EXPECT_CALL(tracer_factory_, createHttpTracer(_, _))\n      .WillOnce(InvokeWithoutArgs([expected_another_tracer] {\n        return std::shared_ptr<HttpTracer>(expected_another_tracer);\n      }));\n\n  // Use a different config to guarantee that a new cache entry will be added anyway.\n  auto actual_tracer_three = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config_two_);\n\n  EXPECT_EQ(actual_tracer_three.get(), expected_another_tracer);\n  // Expect expired cache entries to be removed and a new HttpTracer to be added to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(1));\n\n  // Expect HttpTracerManager to keep the right value in the cache.\n  auto actual_tracer_four = http_tracer_manager_.getOrCreateHttpTracer(&tracing_config_two_);\n\n  EXPECT_EQ(actual_tracer_four.get(), expected_another_tracer);\n  // Expect no changes to the cache.\n  EXPECT_THAT(http_tracer_manager_.peekCachedTracersForTest(), SizeIs(1));\n}\n\n} // namespace\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"cds_api_impl_test\",\n    srcs = [\"cds_api_impl_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/upstream:cds_api_lib\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"cluster_manager_impl_test\",\n    srcs = [\"cluster_manager_impl_test.cc\"],\n    external_deps = [\n        \"abseil_optional\",\n    ],\n    deps = [\n        \":test_cluster_manager\",\n        \"//test/mocks/upstream:cds_api_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/mocks/upstream:cluster_real_priority_set_mocks\",\n        \"//test/mocks/upstream:cluster_update_callbacks_mocks\",\n        \"//test/mocks/upstream:health_checker_mocks\",\n        \"//test/mocks/upstream:load_balancer_context_mock\",\n        \"//test/mocks/upstream:thread_aware_load_balancer_mocks\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"cluster_update_tracker_test\",\n    srcs = [\"cluster_update_tracker_test.cc\"],\n    deps = [\n        \"//source/common/upstream:cluster_update_tracker_lib\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"conn_pool_map_impl_test\",\n    srcs = [\"conn_pool_map_impl_test.cc\"],\n    deps = [\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//source/common/upstream:conn_pool_map_impl_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:conn_pool_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"edf_scheduler_test\",\n    srcs = [\"edf_scheduler_test.cc\"],\n    deps = [\"//source/common/upstream:edf_scheduler_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"eds_test\",\n    srcs = [\"eds_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/upstream:eds_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:health_checker_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"eds_speed_test\",\n    srcs = [\"eds_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/config:grpc_mux_lib\",\n        \"//source/common/config:grpc_subscription_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/upstream:eds_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"eds_speed_test_benchmark_test\",\n    benchmark_binary = \"eds_speed_test\",\n)\n\nenvoy_cc_test_library(\n    name = \"health_checker_impl_test_lib\",\n    srcs = [\n        \"health_checker_impl_test_utils.cc\",\n    ],\n    hdrs = [\n        \"health_checker_impl_test_utils.h\",\n    ],\n    deps = [\n        \":utility_lib\",\n        \"//test/common/http:common_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/mocks/upstream:health_check_event_logger_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"health_checker_impl_test\",\n    srcs = [\n        \"health_checker_impl_test.cc\",\n    ],\n    deps = [\n        \":health_checker_impl_test_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/upstream:health_checker_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/common/http:common_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/mocks/upstream:health_check_event_logger_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:transport_socket_match_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"host_stats_test\",\n    srcs = [\"host_stats_test.cc\"],\n    deps = [\n        \"//include/envoy/upstream:host_description_interface\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"host_utility_test\",\n    srcs = [\"host_utility_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:host_utility_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"load_balancer_impl_test\",\n    srcs = [\"load_balancer_impl_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:load_balancer_context_mock\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"load_balancer_simulation_test\",\n    srcs = [\"load_balancer_simulation_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"load_stats_reporter_test\",\n    srcs = [\"load_stats_reporter_test.cc\"],\n    deps = [\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/upstream:load_stats_reporter_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/load_stats/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"hds_test\",\n    srcs = [\"hds_test.cc\"],\n    deps = [\n        \"//source/common/upstream:health_discovery_service_lib\",\n        \"//source/common/upstream:transport_socket_match_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/upstream:cluster_info_factory_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/health/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"logical_dns_cluster_test\",\n    srcs = [\"logical_dns_cluster_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:logical_dns_cluster_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"original_dst_cluster_test\",\n    srcs = [\"original_dst_cluster_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:original_dst_cluster_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"outlier_detection_impl_test\",\n    srcs = [\"outlier_detection_impl_test.cc\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/common:time_interface\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:outlier_detection_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/cluster/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"priority_conn_pool_map_impl_test\",\n    srcs = [\"priority_conn_pool_map_impl_test.cc\"],\n    deps = [\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//source/common/upstream:priority_conn_pool_map_impl_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:conn_pool_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"resource_manager_impl_test\",\n    srcs = [\"resource_manager_impl_test.cc\"],\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/upstream:resource_manager_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"ring_hash_lb_test\",\n    srcs = [\"ring_hash_lb_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/router:router_interface\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:ring_hash_lb_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"maglev_lb_test\",\n    srcs = [\"maglev_lb_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/upstream:maglev_lb_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"bounded_load_hlb_test\",\n    srcs = [\"bounded_load_hlb_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/upstream:thread_aware_lb_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"load_balancer_benchmark\",\n    srcs = [\"load_balancer_benchmark.cc\"],\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/memory:stats_lib\",\n        \"//source/common/upstream:maglev_lb_lib\",\n        \"//source/common/upstream:ring_hash_lb_lib\",\n        \"//source/common/upstream:subset_lb_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"load_balancer_benchmark_test\",\n    timeout = \"long\",\n    benchmark_binary = \"load_balancer_benchmark\",\n)\n\nenvoy_cc_test(\n    name = \"subset_lb_test\",\n    srcs = [\"subset_lb_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//source/common/upstream:subset_lb_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:load_balancer_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"transport_socket_matcher_test\",\n    srcs = [\"transport_socket_matcher_test.cc\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/upstream:transport_socket_match_lib\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"upstream_impl_test\",\n    srcs = [\"upstream_impl_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        # TODO(mattklein123): Split this into 2 tests for each cluster.\n        \"//source/common/upstream:static_cluster_lib\",\n        \"//source/common/upstream:strict_dns_cluster_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:health_checker_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_cluster_manager\",\n    hdrs = [\"test_cluster_manager.h\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:cluster_manager_lib\",\n        \"//source/common/upstream:subset_lb_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/integration/clusters:custom_static_cluster\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/tcp:tcp_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"cluster_factory_impl_test\",\n    srcs = [\"cluster_factory_impl_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/integration/clusters:custom_static_cluster\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"health_check_fuzz_lib\",\n    srcs = [\"health_check_fuzz.cc\"],\n    hdrs = [\"health_check_fuzz.h\"],\n    deps = [\n        \":health_check_fuzz_proto_cc_proto\",\n        \":health_checker_impl_test_lib\",\n        \":utility_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"health_check_fuzz_proto\",\n    srcs = [\"health_check_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"health_check_fuzz_test\",\n    srcs = [\"health_check_fuzz_test.cc\"],\n    corpus = \"//test/common/upstream:health_check_corpus\",\n    deps = [\n        \":health_check_fuzz_lib\",\n        \":health_check_fuzz_proto_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/common/upstream/bounded_load_hlb_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/upstream/thread_aware_lb_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/upstream/mocks.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass TestHashingLoadBalancer : public ThreadAwareLoadBalancerBase::HashingLoadBalancer {\npublic:\n  explicit TestHashingLoadBalancer(NormalizedHostWeightVector ring) : ring_(std::move(ring)) {}\n  HostConstSharedPtr chooseHost(uint64_t hash, uint32_t /* attempt */) const override {\n    if (ring_.empty()) {\n      return nullptr;\n    }\n    return ring_.at(hash).first;\n  }\n\nprivate:\n  const NormalizedHostWeightVector ring_;\n};\n\nusing HostOverloadFactorPredicate = std::function<double(const Host& host, double weight)>;\nclass TestBoundedLoadHashingLoadBalancer\n    : public ThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer {\npublic:\n  TestBoundedLoadHashingLoadBalancer(\n      ThreadAwareLoadBalancerBase::HashingLoadBalancerSharedPtr hlb_ptr,\n      const NormalizedHostWeightVector& normalized_host_weights, uint32_t hash_balance_factor,\n      HostOverloadFactorPredicate host_overload_factor)\n      : ThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer(\n            hlb_ptr, normalized_host_weights, hash_balance_factor),\n        host_overload_factor_(host_overload_factor) {}\n\nprivate:\n  HostOverloadFactorPredicate host_overload_factor_;\n  double hostOverloadFactor(const Host& host, double weight) const override {\n    return host_overload_factor_(host, weight);\n  }\n};\n\nclass BoundedLoadHashingLoadBalancerTest : public testing::Test {\npublic:\n  HostOverloadFactorPredicate\n  getHostOverloadFactorPredicate(const std::vector<std::string>& addresses) {\n    return [addresses](const Host& h, double) -> double {\n      // Each host in the vector gets an increasing overload factor.\n      int index = 1;\n      for (const std::string& host : addresses) {\n        index = index + 1;\n        if (host == h.address()->asString()) {\n          return (100.0 + index) / 100;\n        }\n      }\n      return 0.5;\n    };\n  }\n\n  void createHosts(uint32_t num_hosts, NormalizedHostWeightVector& normalized_host_weights) {\n    const double equal_weight = static_cast<double>(1.0 / num_hosts);\n    for (uint32_t i = 0; i < num_hosts; i++) {\n      normalized_host_weights.push_back(\n          {makeTestHost(info_, fmt::format(\"tcp://127.0.0.1{}:90\", i)), equal_weight});\n    }\n  }\n\n  // creates hosts and also puts them in a ring so that 2 hashes map to the same host.\n  void createHosts(uint32_t num_hosts, NormalizedHostWeightVector& hosts,\n                   NormalizedHostWeightVector& ring) {\n    const double equal_weight = static_cast<double>(1.0 / num_hosts);\n    for (uint32_t i = 0; i < num_hosts; i++) {\n      HostConstSharedPtr h = makeTestHost(info_, fmt::format(\"tcp://127.0.0.1{}:90\", i));\n      ring.push_back({h, equal_weight});\n      ring.push_back({h, equal_weight});\n      hosts.push_back({h, equal_weight});\n    }\n  }\n\n  ThreadAwareLoadBalancerBase::HashingLoadBalancerSharedPtr hlb_;\n  std::unique_ptr<ThreadAwareLoadBalancerBase::BoundedLoadHashingLoadBalancer> lb_;\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n\n  HostOverloadFactorPredicate host_overload_factor_predicate_;\n};\n\n// Works correctly without any hosts.\nTEST_F(BoundedLoadHashingLoadBalancerTest, NoHosts) {\n  NormalizedHostWeightVector normalized_host_weights;\n  hlb_ = std::make_shared<TestHashingLoadBalancer>(normalized_host_weights);\n  lb_ = std::make_unique<TestBoundedLoadHashingLoadBalancer>(hlb_, normalized_host_weights, 1,\n                                                             nullptr);\n  EXPECT_EQ(lb_->chooseHost(1, 1), nullptr);\n};\n\n// Works correctly for the case when no host is ever overloaded.\nTEST_F(BoundedLoadHashingLoadBalancerTest, NoHostEverOverloaded) {\n  // setup: 5 hosts, none ever overloaded.\n  std::vector<std::string> addresses;\n  host_overload_factor_predicate_ = getHostOverloadFactorPredicate(addresses);\n\n  NormalizedHostWeightVector normalized_host_weights;\n  createHosts(5, normalized_host_weights);\n\n  NormalizedHostWeightVector ring(normalized_host_weights);\n  hlb_ = std::make_shared<TestHashingLoadBalancer>(ring);\n\n  lb_ = std::make_unique<TestBoundedLoadHashingLoadBalancer>(hlb_, normalized_host_weights, 1,\n                                                             host_overload_factor_predicate_);\n\n  for (uint32_t i = 0; i < 5; i++) {\n    HostConstSharedPtr host = lb_->chooseHost(i, 1);\n    EXPECT_NE(host, nullptr);\n    EXPECT_EQ(host->address()->asString(), fmt::format(\"127.0.0.1{}:90\", i));\n  }\n};\n\n// Works correctly for the case one host is overloaded.\nTEST_F(BoundedLoadHashingLoadBalancerTest, OneHostOverloaded) {\n  // In this test host 2 is overloaded. The random shuffle sequence of 5\n  // elements with seed 2 is 2 1 0 4 3. When the host picked up for\n  // hash 2 (which is 127.0.0.12) is overloaded, host 0 (127.0.0.10)\n  // is picked up.\n\n  // setup: 5 hosts, one of them is overloaded.\n  std::vector<std::string> addresses;\n  addresses.push_back(\"127.0.0.12:90\");\n  host_overload_factor_predicate_ = getHostOverloadFactorPredicate(addresses);\n\n  NormalizedHostWeightVector normalized_host_weights;\n  createHosts(5, normalized_host_weights);\n\n  NormalizedHostWeightVector ring(normalized_host_weights);\n  hlb_ = std::make_shared<TestHashingLoadBalancer>(ring);\n\n  lb_ = std::make_unique<TestBoundedLoadHashingLoadBalancer>(hlb_, normalized_host_weights, 1,\n                                                             host_overload_factor_predicate_);\n\n  HostConstSharedPtr host = lb_->chooseHost(2, 1);\n  EXPECT_NE(host, nullptr);\n  EXPECT_EQ(host->address()->asString(), \"127.0.0.11:90\");\n};\n\n// Works correctly for the case a few hosts are overloaded.\nTEST_F(BoundedLoadHashingLoadBalancerTest, MultipleHostOverloaded) {\n  // In this test hosts 0, 1 & 2 are overloaded. The random shuffle\n  // sequence of 5 elements with seed 2 is 2 1 0 4 3. When the host\n  // picked up for hash 2 (which is 127.0.0.12) is overloaded, the\n  // method passes over host 0 and picks host 3 (127.0.0.13) up.\n\n  // setup: 5 hosts, few of them are overloaded.\n  std::vector<std::string> addresses;\n  addresses.push_back(\"127.0.0.10:90\");\n  addresses.push_back(\"127.0.0.11:90\");\n  addresses.push_back(\"127.0.0.12:90\");\n  host_overload_factor_predicate_ = getHostOverloadFactorPredicate(addresses);\n\n  NormalizedHostWeightVector normalized_host_weights;\n  createHosts(5, normalized_host_weights);\n\n  NormalizedHostWeightVector ring(normalized_host_weights);\n  hlb_ = std::make_shared<TestHashingLoadBalancer>(ring);\n\n  lb_ = std::make_unique<TestBoundedLoadHashingLoadBalancer>(hlb_, normalized_host_weights, 1,\n                                                             host_overload_factor_predicate_);\n\n  HostConstSharedPtr host = lb_->chooseHost(2, 1);\n  EXPECT_NE(host, nullptr);\n  EXPECT_EQ(host->address()->asString(), \"127.0.0.14:90\");\n};\n\n// Works correctly for the case when requests with different hash map to the same\n// overloaded host.\nTEST_F(BoundedLoadHashingLoadBalancerTest, MultipleHashSameHostOverloaded) {\n  // In this case host 3 is overloaded and the CH ring has same host repeated on\n  // consecutive indices (0 0 1 1 2 2 3 3 4 4). The hashes 6 and 7 map to same host\n  // 3 which is overloaded. The random shuffle sequence of 5 elements with seed 6 is\n  // 4 0 2 3 1 and with 7 it is 0 1 4 3 2. Hence hosts 4 and 0 are picked up for these\n  // hashes.\n\n  // setup: 5 hosts, one of them is overloaded.\n  std::vector<std::string> addresses;\n  addresses.push_back(\"127.0.0.13:90\");\n  host_overload_factor_predicate_ = getHostOverloadFactorPredicate(addresses);\n\n  NormalizedHostWeightVector normalized_host_weights, hosts_on_ring;\n  createHosts(5, normalized_host_weights, hosts_on_ring);\n\n  hlb_ = std::make_shared<TestHashingLoadBalancer>(hosts_on_ring);\n\n  lb_ = std::make_unique<TestBoundedLoadHashingLoadBalancer>(hlb_, normalized_host_weights, 1,\n                                                             host_overload_factor_predicate_);\n\n  HostConstSharedPtr host1 = lb_->chooseHost(6, 1);\n  EXPECT_NE(host1, nullptr);\n  HostConstSharedPtr host2 = lb_->chooseHost(7, 1);\n  EXPECT_NE(host2, nullptr);\n\n  EXPECT_NE(host1->address()->asString(), host2->address()->asString());\n\n  // sequence for 4 is 40231, 4 is the first host not overloaded\n  EXPECT_EQ(host1->address()->asString(), \"127.0.0.14:90\");\n  // sequence for 5 is 01432, 0 is the first host not overloaded\n  EXPECT_EQ(host2->address()->asString(), \"127.0.0.10:90\");\n};\n\n// Works correctly for the case when all hosts are overloaded\nTEST_F(BoundedLoadHashingLoadBalancerTest, AllHostsOverloaded) {\n  std::vector<std::string> addresses;\n  addresses.push_back(\"127.0.0.11:90\");\n  addresses.push_back(\"127.0.0.10:90\");\n  addresses.push_back(\"127.0.0.12:90\");\n  host_overload_factor_predicate_ = getHostOverloadFactorPredicate(addresses);\n\n  NormalizedHostWeightVector normalized_host_weights;\n  createHosts(3, normalized_host_weights);\n\n  NormalizedHostWeightVector ring(normalized_host_weights);\n  hlb_ = std::make_shared<TestHashingLoadBalancer>(ring);\n\n  lb_ = std::make_unique<TestBoundedLoadHashingLoadBalancer>(hlb_, normalized_host_weights, 1,\n                                                             host_overload_factor_predicate_);\n\n  HostConstSharedPtr host = lb_->chooseHost(0, 1);\n  EXPECT_NE(host, nullptr);\n  EXPECT_EQ(host->address()->asString(), \"127.0.0.11:90\");\n};\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/cds_api_impl_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/upstream/cds_api_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Return;\nusing testing::StrEq;\nusing testing::Throw;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nMATCHER_P(WithName, expectedName, \"\") { return arg.name() == expectedName; }\n\nclass CdsApiImplTest : public testing::Test {\nprotected:\n  void setup() {\n    envoy::config::core::v3::ConfigSource cds_config;\n    cds_ = CdsApiImpl::create(cds_config, cm_, store_, validation_visitor_);\n    cds_->setInitializedCb([this]() -> void { initialized_.ready(); });\n\n    EXPECT_CALL(*cm_.subscription_factory_.subscription_, start(_, _));\n    cds_->initialize();\n    cds_callbacks_ = cm_.subscription_factory_.callbacks_;\n  }\n\n  void expectAdd(const std::string& cluster_name, const std::string& version = std::string(\"\")) {\n    EXPECT_CALL(cm_, addOrUpdateCluster(WithName(cluster_name), version)).WillOnce(Return(true));\n  }\n\n  void expectAddToThrow(const std::string& cluster_name, const std::string& exception_msg) {\n    EXPECT_CALL(cm_, addOrUpdateCluster(WithName(cluster_name), _))\n        .WillOnce(Throw(EnvoyException(exception_msg)));\n  }\n\n  ClusterManager::ClusterInfoMap makeClusterMap(const std::vector<std::string>& clusters) {\n    ClusterManager::ClusterInfoMap map;\n    for (const auto& cluster : clusters) {\n      map.emplace(cluster, cm_.thread_local_cluster_.cluster_);\n    }\n    return map;\n  }\n\n  NiceMock<MockClusterManager> cm_;\n  Upstream::ClusterManager::ClusterInfoMap cluster_map_;\n  Upstream::MockClusterMockPrioritySet mock_cluster_;\n  Stats::IsolatedStoreImpl store_;\n  CdsApiPtr cds_;\n  Config::SubscriptionCallbacks* cds_callbacks_{};\n  ReadyWatcher initialized_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n};\n\n// Regression test against only updating versionInfo() if at least one cluster\n// is are added/updated even if one or more are removed.\nTEST_F(CdsApiImplTest, UpdateVersionOnClusterRemove) {\n  InSequence s;\n\n  setup();\n\n  const std::string response1_yaml = R\"EOF(\nversion_info: '0'\nresources:\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster1\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: eds path\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_yaml);\n\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{}));\n  expectAdd(\"cluster1\", \"0\");\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_EQ(\"\", cds_->versionInfo());\n\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::cluster::v3::Cluster>(response1);\n  cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n  EXPECT_EQ(\"0\", cds_->versionInfo());\n\n  const std::string response2_yaml = R\"EOF(\nversion_info: '1'\nresources:\n)EOF\";\n  auto response2 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response2_yaml);\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(makeClusterMap({\"cluster1\"})));\n  EXPECT_CALL(cm_, removeCluster(\"cluster1\")).WillOnce(Return(true));\n  const auto decoded_resources_2 =\n      TestUtility::decodeResources<envoy::config::cluster::v3::Cluster>(response2);\n  cds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info());\n  EXPECT_EQ(\"1\", cds_->versionInfo());\n}\n\n// Validate onConfigUpdate throws EnvoyException with duplicate clusters.\nTEST_F(CdsApiImplTest, ValidateDuplicateClusters) {\n  InSequence s;\n\n  setup();\n\n  envoy::config::cluster::v3::Cluster cluster_1;\n  cluster_1.set_name(\"duplicate_cluster\");\n  const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_1});\n\n  EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_));\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_THROW_WITH_MESSAGE(cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"),\n                            EnvoyException,\n                            \"Error adding/updating cluster(s) duplicate_cluster: duplicate cluster \"\n                            \"duplicate_cluster found\");\n}\n\nTEST_F(CdsApiImplTest, EmptyConfigUpdate) {\n  InSequence s;\n\n  setup();\n\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{}));\n  EXPECT_CALL(initialized_, ready());\n\n  cds_callbacks_->onConfigUpdate({}, \"\");\n}\n\nTEST_F(CdsApiImplTest, ConfigUpdateWith2ValidClusters) {\n  {\n    InSequence s;\n    setup();\n  }\n\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{}));\n  EXPECT_CALL(initialized_, ready());\n\n  envoy::config::cluster::v3::Cluster cluster_1;\n  cluster_1.set_name(\"cluster_1\");\n  expectAdd(\"cluster_1\");\n\n  envoy::config::cluster::v3::Cluster cluster_2;\n  cluster_2.set_name(\"cluster_2\");\n  expectAdd(\"cluster_2\");\n\n  const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_2});\n  cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\");\n}\n\nTEST_F(CdsApiImplTest, DeltaConfigUpdate) {\n  {\n    InSequence s;\n    setup();\n  }\n  EXPECT_CALL(initialized_, ready());\n\n  {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> resources;\n    {\n      envoy::config::cluster::v3::Cluster cluster;\n      cluster.set_name(\"cluster_1\");\n      expectAdd(\"cluster_1\", \"v1\");\n      auto* resource = resources.Add();\n      resource->mutable_resource()->PackFrom(cluster);\n      resource->set_name(\"cluster_1\");\n      resource->set_version(\"v1\");\n    }\n    {\n      envoy::config::cluster::v3::Cluster cluster;\n      cluster.set_name(\"cluster_2\");\n      expectAdd(\"cluster_2\", \"v1\");\n      auto* resource = resources.Add();\n      resource->mutable_resource()->PackFrom(cluster);\n      resource->set_name(\"cluster_2\");\n      resource->set_version(\"v1\");\n    }\n    const auto decoded_resources =\n        TestUtility::decodeResources<envoy::config::cluster::v3::Cluster>(resources);\n    cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, \"v1\");\n  }\n\n  {\n    Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> resources;\n    {\n      envoy::config::cluster::v3::Cluster cluster;\n      cluster.set_name(\"cluster_3\");\n      expectAdd(\"cluster_3\", \"v2\");\n      auto* resource = resources.Add();\n      resource->mutable_resource()->PackFrom(cluster);\n      resource->set_name(\"cluster_3\");\n      resource->set_version(\"v2\");\n    }\n    Protobuf::RepeatedPtrField<std::string> removed;\n    *removed.Add() = \"cluster_1\";\n    EXPECT_CALL(cm_, removeCluster(StrEq(\"cluster_1\"))).WillOnce(Return(true));\n    const auto decoded_resources =\n        TestUtility::decodeResources<envoy::config::cluster::v3::Cluster>(resources);\n    cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, removed, \"v2\");\n  }\n}\n\nTEST_F(CdsApiImplTest, ConfigUpdateAddsSecondClusterEvenIfFirstThrows) {\n  {\n    InSequence s;\n    setup();\n  }\n\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{}));\n  EXPECT_CALL(initialized_, ready());\n\n  envoy::config::cluster::v3::Cluster cluster_1;\n  cluster_1.set_name(\"cluster_1\");\n  expectAddToThrow(\"cluster_1\", \"An exception\");\n\n  envoy::config::cluster::v3::Cluster cluster_2;\n  cluster_2.set_name(\"cluster_2\");\n  expectAdd(\"cluster_2\");\n\n  envoy::config::cluster::v3::Cluster cluster_3;\n  cluster_3.set_name(\"cluster_3\");\n  expectAddToThrow(\"cluster_3\", \"Another exception\");\n\n  const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_2, cluster_3});\n  EXPECT_THROW_WITH_MESSAGE(\n      cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"), EnvoyException,\n      \"Error adding/updating cluster(s) cluster_1: An exception, cluster_3: Another exception\");\n}\n\nTEST_F(CdsApiImplTest, Basic) {\n  InSequence s;\n\n  setup();\n\n  const std::string response1_yaml = R\"EOF(\nversion_info: '0'\nresources:\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster1\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: eds path\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster2\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: eds path\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_yaml);\n\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{}));\n  expectAdd(\"cluster1\", \"0\");\n  expectAdd(\"cluster2\", \"0\");\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_EQ(\"\", cds_->versionInfo());\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::cluster::v3::Cluster>(response1);\n  cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n  EXPECT_EQ(\"0\", cds_->versionInfo());\n\n  const std::string response2_yaml = R\"EOF(\nversion_info: '1'\nresources:\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster1\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: eds path\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster3\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: eds path\n)EOF\";\n  auto response2 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response2_yaml);\n\n  EXPECT_CALL(cm_, clusters()).WillOnce(Return(makeClusterMap({\"cluster1\", \"cluster2\"})));\n  expectAdd(\"cluster1\", \"1\");\n  expectAdd(\"cluster3\", \"1\");\n  EXPECT_CALL(cm_, removeCluster(\"cluster2\"));\n  const auto decoded_resources_2 =\n      TestUtility::decodeResources<envoy::config::cluster::v3::Cluster>(response2);\n  cds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info());\n\n  EXPECT_EQ(\"1\", cds_->versionInfo());\n}\n\n// Validate behavior when the config is delivered but it fails PGV validation.\nTEST_F(CdsApiImplTest, FailureInvalidConfig) {\n  InSequence s;\n\n  setup();\n\n  const std::string response1_yaml = R\"EOF(\nversion_info: '0'\nresources:\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster1\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: eds path\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster1\n  type: EDS\n  eds_cluster_config:\n    eds_config:\n      path: eds path\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_yaml);\n\n  EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_));\n  EXPECT_CALL(initialized_, ready());\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::cluster::v3::Cluster>(response1);\n  EXPECT_THROW(cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()),\n               EnvoyException);\n  EXPECT_EQ(\"\", cds_->versionInfo());\n}\n\n// Validate behavior when the config fails delivery at the subscription level.\nTEST_F(CdsApiImplTest, FailureSubscription) {\n  InSequence s;\n\n  setup();\n\n  EXPECT_CALL(initialized_, ready());\n  // onConfigUpdateFailed() should not be called for gRPC stream connection failure\n  cds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {});\n  EXPECT_EQ(\"\", cds_->versionInfo());\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/cluster_factory_impl_test.cc",
    "content": "#include <chrono>\n#include <list>\n#include <memory>\n#include <string>\n#include <tuple>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/integration/clusters/cluster_factory_config.pb.validate.h\"\n#include \"test/integration/clusters/custom_static_cluster.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\n// Test Cluster Factory without custom configuration\nclass TestStaticClusterFactory : public ClusterFactoryImplBase {\npublic:\n  TestStaticClusterFactory() : ClusterFactoryImplBase(\"envoy.clusters.test_static\") {}\n\n  std::pair<ClusterImplBaseSharedPtr, ThreadAwareLoadBalancerPtr> createClusterImpl(\n      const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override {\n    return std::make_pair(std::make_shared<CustomStaticCluster>(\n                              cluster, context.runtime(), socket_factory_context,\n                              std::move(stats_scope), context.addedViaApi(), 1, \"127.0.0.1\", 80),\n                          nullptr);\n  }\n};\n\nclass ClusterFactoryTestBase {\nprotected:\n  ClusterFactoryTestBase() : api_(Api::createApiForTest(stats_)) {\n    outlier_event_logger_ = std::make_shared<Outlier::MockEventLogger>();\n    dns_resolver_ = std::make_shared<Network::MockDnsResolver>();\n  }\n\n  NiceMock<Server::MockAdmin> admin_;\n  Ssl::MockContextManager ssl_context_manager_;\n  NiceMock<MockClusterManager> cm_;\n  const NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Stats::IsolatedStoreImpl stats_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n  Network::DnsResolverSharedPtr dns_resolver_;\n  AccessLog::MockAccessLogManager log_manager_;\n  Outlier::EventLoggerSharedPtr outlier_event_logger_;\n};\n\nclass TestStaticClusterImplTest : public testing::Test, public ClusterFactoryTestBase {};\n\nTEST_F(TestStaticClusterImplTest, CreateWithoutConfig) {\n  const std::string yaml = R\"EOF(\n      name: staticcluster\n      connect_timeout: 0.25s\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 443\n      cluster_type:\n        name: envoy.clusters.test_static\n    )EOF\";\n\n  TestStaticClusterFactory factory;\n  Registry::InjectFactory<ClusterFactory> registered_factory(factory);\n\n  const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  auto create_result = ClusterFactoryImplBase::create(\n      cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, dispatcher_,\n      log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_),\n      false, validation_visitor_, *api_);\n  auto cluster = create_result.first;\n  cluster->initialize([] {});\n\n  EXPECT_EQ(1UL, cluster->prioritySet().hostSetsPerPriority()[1]->healthyHosts().size());\n  EXPECT_EQ(\"\", cluster->prioritySet().hostSetsPerPriority()[1]->hosts()[0]->hostname());\n  // the hosts field override by values hardcoded in the factory\n  EXPECT_EQ(\"127.0.0.1\", cluster->prioritySet()\n                             .hostSetsPerPriority()[1]\n                             ->hosts()[0]\n                             ->address()\n                             ->ip()\n                             ->addressAsString());\n  EXPECT_EQ(80,\n            cluster->prioritySet().hostSetsPerPriority()[1]->hosts()[0]->address()->ip()->port());\n  EXPECT_FALSE(cluster->info()->addedViaApi());\n}\n\nTEST_F(TestStaticClusterImplTest, CreateWithStructConfig) {\n  const std::string yaml = R\"EOF(\n      name: staticcluster\n      connect_timeout: 0.25s\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 443\n      cluster_type:\n          name: envoy.clusters.custom_static\n          typed_config:\n            \"@type\": type.googleapis.com/google.protobuf.Struct\n            value:\n              priority: 10\n              address: 127.0.0.1\n              port_value: 80\n    )EOF\";\n\n  const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  auto create_result = ClusterFactoryImplBase::create(\n      cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, dispatcher_,\n      log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_),\n      false, validation_visitor_, *api_);\n  auto cluster = create_result.first;\n  cluster->initialize([] {});\n\n  EXPECT_EQ(1UL, cluster->prioritySet().hostSetsPerPriority()[10]->healthyHosts().size());\n  EXPECT_EQ(\"\", cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->hostname());\n  EXPECT_EQ(\"127.0.0.1\", cluster->prioritySet()\n                             .hostSetsPerPriority()[10]\n                             ->hosts()[0]\n                             ->address()\n                             ->ip()\n                             ->addressAsString());\n  EXPECT_EQ(80,\n            cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->address()->ip()->port());\n  EXPECT_FALSE(cluster->info()->addedViaApi());\n}\n\nTEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) {\n  const std::string yaml = R\"EOF(\n      name: staticcluster\n      connect_timeout: 0.25s\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 443\n      cluster_type:\n          name: envoy.clusters.custom_static\n          typed_config:\n            \"@type\": type.googleapis.com/test.integration.clusters.CustomStaticConfig\n            priority: 10\n            address: 127.0.0.1\n            port_value: 80\n    )EOF\";\n\n  const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  auto create_result = ClusterFactoryImplBase::create(\n      cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, dispatcher_,\n      log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_),\n      false, validation_visitor_, *api_);\n  auto cluster = create_result.first;\n  cluster->initialize([] {});\n\n  EXPECT_EQ(1UL, cluster->prioritySet().hostSetsPerPriority()[10]->healthyHosts().size());\n  EXPECT_EQ(\"\", cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->hostname());\n  EXPECT_EQ(\"127.0.0.1\", cluster->prioritySet()\n                             .hostSetsPerPriority()[10]\n                             ->hosts()[0]\n                             ->address()\n                             ->ip()\n                             ->addressAsString());\n  EXPECT_EQ(80,\n            cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->address()->ip()->port());\n  EXPECT_FALSE(cluster->info()->addedViaApi());\n}\n\nTEST_F(TestStaticClusterImplTest, UnsupportedClusterType) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 443\n    cluster_type:\n        name: envoy.clusters.bad_cluster_name\n        typed_config:\n          \"@type\": type.googleapis.com/test.integration.clusters.CustomStaticConfig\n          priority: 10\n  )EOF\";\n  // the factory is not registered, expect to throw\n  EXPECT_THROW_WITH_MESSAGE(\n      {\n        const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n        ClusterFactoryImplBase::create(\n            cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_,\n            dispatcher_, log_manager_, local_info_, admin_, singleton_manager_,\n            std::move(outlier_event_logger_), false, validation_visitor_, *api_);\n      },\n      EnvoyException,\n      \"Didn't find a registered cluster factory implementation for name: \"\n      \"'envoy.clusters.bad_cluster_name'\");\n}\n\nTEST_F(TestStaticClusterImplTest, HostnameWithoutDNS) {\n  const std::string yaml = R\"EOF(\n      name: staticcluster\n      connect_timeout: 0.25s\n      lb_policy: ROUND_ROBIN\n      common_lb_config:\n        consistent_hashing_lb_config:\n          use_hostname_for_hashing: true\n      load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 443\n      cluster_type:\n        name: envoy.clusters.test_static\n    )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      {\n        const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n        ClusterFactoryImplBase::create(\n            cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_,\n            dispatcher_, log_manager_, local_info_, admin_, singleton_manager_,\n            std::move(outlier_event_logger_), false, validation_visitor_, *api_);\n      },\n      EnvoyException,\n      \"Cannot use hostname for consistent hashing loadbalancing for cluster of type: \"\n      \"'envoy.clusters.test_static'\");\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/cluster_manager_impl_test.cc",
    "content": "#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.validate.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"test/common/upstream/test_cluster_manager.h\"\n#include \"test/mocks/upstream/cds_api.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/cluster_real_priority_set.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks.h\"\n#include \"test/mocks/upstream/health_checker.h\"\n#include \"test/mocks/upstream/load_balancer_context.h\"\n#include \"test/mocks/upstream/thread_aware_load_balancer.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nusing ::testing::_;\nusing ::testing::DoAll;\nusing ::testing::Eq;\nusing ::testing::InSequence;\nusing ::testing::Invoke;\nusing ::testing::Mock;\nusing ::testing::NiceMock;\nusing ::testing::Return;\nusing ::testing::ReturnNew;\nusing ::testing::ReturnRef;\nusing ::testing::SaveArg;\n\nenvoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml,\n                                                                 bool avoid_boosting = true) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromYaml(yaml, bootstrap, true, avoid_boosting);\n  return bootstrap;\n}\n\nstd::string clustersJson(const std::vector<std::string>& clusters) {\n  return fmt::sprintf(\"\\\"clusters\\\": [%s]\", absl::StrJoin(clusters, \",\"));\n}\n\nclass ClusterManagerImplTest : public testing::Test {\npublic:\n  ClusterManagerImplTest()\n      : http_context_(factory_.stats_.symbolTable()), grpc_context_(factory_.stats_.symbolTable()) {\n  }\n\n  void create(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    cluster_manager_ = std::make_unique<TestClusterManagerImpl>(\n        bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_,\n        factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_,\n        *factory_.api_, http_context_, grpc_context_);\n    cluster_manager_->setPrimaryClustersInitializedCb(\n        [this, bootstrap]() { cluster_manager_->initializeSecondaryClusters(bootstrap); });\n  }\n\n  void createWithLocalClusterUpdate(const bool enable_merge_window = true) {\n    std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      type: STATIC\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11002\n  )EOF\";\n    const std::string merge_window_enabled = R\"EOF(\n      common_lb_config:\n        update_merge_window: 3s\n  )EOF\";\n    const std::string merge_window_disabled = R\"EOF(\n      common_lb_config:\n        update_merge_window: 0s\n  )EOF\";\n\n    yaml += enable_merge_window ? merge_window_enabled : merge_window_disabled;\n\n    const auto& bootstrap = parseBootstrapFromV3Yaml(yaml);\n\n    cluster_manager_ = std::make_unique<MockedUpdatedClusterManagerImpl>(\n        bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_,\n        factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_,\n        *factory_.api_, local_cluster_update_, local_hosts_removed_, http_context_, grpc_context_);\n  }\n\n  void checkStats(uint64_t added, uint64_t modified, uint64_t removed, uint64_t active,\n                  uint64_t warming) {\n    EXPECT_EQ(added, factory_.stats_.counter(\"cluster_manager.cluster_added\").value());\n    EXPECT_EQ(modified, factory_.stats_.counter(\"cluster_manager.cluster_modified\").value());\n    EXPECT_EQ(removed, factory_.stats_.counter(\"cluster_manager.cluster_removed\").value());\n    EXPECT_EQ(active,\n              factory_.stats_\n                  .gauge(\"cluster_manager.active_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                  .value());\n    EXPECT_EQ(warming,\n              factory_.stats_\n                  .gauge(\"cluster_manager.warming_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                  .value());\n  }\n\n  void checkConfigDump(const std::string& expected_dump_yaml) {\n    auto message_ptr = admin_.config_tracker_.config_tracker_callbacks_[\"clusters\"]();\n    const auto& clusters_config_dump =\n        dynamic_cast<const envoy::admin::v3::ClustersConfigDump&>(*message_ptr);\n\n    envoy::admin::v3::ClustersConfigDump expected_clusters_config_dump;\n    TestUtility::loadFromYaml(expected_dump_yaml, expected_clusters_config_dump);\n    EXPECT_EQ(expected_clusters_config_dump.DebugString(), clusters_config_dump.DebugString());\n  }\n\n  MetadataConstSharedPtr buildMetadata(const std::string& version) const {\n    envoy::config::core::v3::Metadata metadata;\n\n    if (!version.empty()) {\n      Envoy::Config::Metadata::mutableMetadataValue(\n          metadata, Config::MetadataFilters::get().ENVOY_LB, \"version\")\n          .set_string_value(version);\n    }\n\n    return std::make_shared<const envoy::config::core::v3::Metadata>(metadata);\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<TestClusterManagerFactory> factory_;\n  NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  std::unique_ptr<TestClusterManagerImpl> cluster_manager_;\n  AccessLog::MockAccessLogManager log_manager_;\n  NiceMock<Server::MockAdmin> admin_;\n  MockLocalClusterUpdate local_cluster_update_;\n  MockLocalHostsRemoved local_hosts_removed_;\n  Http::ContextImpl http_context_;\n  Grpc::ContextImpl grpc_context_;\n};\n\nenvoy::config::bootstrap::v3::Bootstrap defaultConfig() {\n  const std::string yaml = R\"EOF(\nstatic_resources:\n  clusters: []\n  )EOF\";\n\n  return parseBootstrapFromV3Yaml(yaml);\n}\n\nTEST_F(ClusterManagerImplTest, MultipleProtocolClusterFail) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: http12_cluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      http2_protocol_options: {}\n      http_protocol_options: {}\n  )EOF\";\n  EXPECT_THROW_WITH_MESSAGE(\n      create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n      \"cluster: Both HTTP1 and HTTP2 options may only be configured with non-default \"\n      \"'protocol_selection' values\");\n}\n\nTEST_F(ClusterManagerImplTest, MultipleHealthCheckFail) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: service_google\n    connect_timeout: 0.25s\n    health_checks:\n      - timeout: 1s\n        interval: 1s\n        http_health_check:\n          path: \"/blah\"\n      - timeout: 1s\n        interval: 1s\n        http_health_check:\n          path: \"/\"\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n                            \"Multiple health checks not supported\");\n}\n\nTEST_F(ClusterManagerImplTest, MultipleProtocolCluster) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: http12_cluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      http2_protocol_options: {}\n      http_protocol_options: {}\n      protocol_selection: USE_DOWNSTREAM_PROTOCOL\n  )EOF\";\n  create(parseBootstrapFromV3Yaml(yaml));\n  checkConfigDump(R\"EOF(\nstatic_clusters:\n  - cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: http12_cluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      http2_protocol_options: {}\n      http_protocol_options: {}\n      protocol_selection: USE_DOWNSTREAM_PROTOCOL\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\ndynamic_active_clusters:\ndynamic_warming_clusters:\n)EOF\");\n}\n\nTEST_F(ClusterManagerImplTest, OutlierEventLog) {\n  const std::string json = R\"EOF(\n  {\n    \"cluster_manager\": {\n      \"outlier_detection\": {\n        \"event_log_path\": \"foo\"\n      }\n    },\n    \"static_resources\": {\n      \"clusters\": []\n    }\n  }\n  )EOF\";\n\n  EXPECT_CALL(log_manager_, createAccessLog(\"foo\"));\n  create(parseBootstrapFromV3Json(json));\n}\n\nTEST_F(ClusterManagerImplTest, NoSdsConfig) {\n  const std::string yaml = R\"EOF(\nstatic_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: eds\n    lb_policy: round_robin\n  )EOF\";\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n                            \"cannot create an EDS cluster without an EDS config\");\n}\n\nTEST_F(ClusterManagerImplTest, UnknownClusterType) {\n  const std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"clusters\": [\n        {\n          \"name\": \"cluster_1\",\n          \"connect_timeout\": \"0.250s\",\n          \"type\": \"foo\",\n          \"lb_policy\": \"round_robin\"\n        }]\n      }\n    }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV3Json(json)), EnvoyException,\n                          \"invalid value \\\"foo\\\" for type TYPE_ENUM\");\n}\n\nTEST_F(ClusterManagerImplTest, LocalClusterNotDefined) {\n  const std::string json = fmt::sprintf(\n      R\"EOF(\n  {\n    \"cluster_manager\": {\n      \"local_cluster_name\": \"new_cluster\",\n    },\n    \"static_resources\": {\n      %s\n    }\n  }\n  )EOF\",\n      clustersJson({defaultStaticClusterJson(\"cluster_1\"), defaultStaticClusterJson(\"cluster_2\")}));\n\n  EXPECT_THROW(create(parseBootstrapFromV3Json(json)), EnvoyException);\n}\n\nTEST_F(ClusterManagerImplTest, BadClusterManagerConfig) {\n  const std::string json = R\"EOF(\n  {\n    \"cluster_manager\": {\n      \"outlier_detection\": {\n        \"event_log_path\": \"foo\"\n      },\n      \"fake_property\" : \"fake_property\"\n    },\n    \"static_resources\": {\n      \"clusters\": []\n    }\n  }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV3Json(json)), EnvoyException,\n                          \"fake_property: Cannot find field\");\n}\n\nTEST_F(ClusterManagerImplTest, LocalClusterDefined) {\n  const std::string json = fmt::sprintf(\n      R\"EOF(\n  {\n    \"cluster_manager\": {\n      \"local_cluster_name\": \"new_cluster\",\n    },\n    \"static_resources\": {\n      %s\n    }\n  }\n  )EOF\",\n      clustersJson({defaultStaticClusterJson(\"cluster_1\"), defaultStaticClusterJson(\"cluster_2\"),\n                    defaultStaticClusterJson(\"new_cluster\")}));\n\n  create(parseBootstrapFromV3Json(json));\n  checkStats(3 /*added*/, 0 /*modified*/, 0 /*removed*/, 3 /*active*/, 0 /*warming*/);\n\n  factory_.tls_.shutdownThread();\n}\n\nTEST_F(ClusterManagerImplTest, DuplicateCluster) {\n  const std::string json = fmt::sprintf(\n      \"{\\\"static_resources\\\":{%s}}\",\n      clustersJson({defaultStaticClusterJson(\"cluster_1\"), defaultStaticClusterJson(\"cluster_1\")}));\n  const auto config = parseBootstrapFromV3Json(json);\n  EXPECT_THROW(create(config), EnvoyException);\n}\n\nTEST_F(ClusterManagerImplTest, ValidClusterName) {\n  const std::string yaml = R\"EOF(\nstatic_resources:\n  clusters:\n  - name: cluster:name\n    connect_timeout: 0.250s\n    type: static\n    lb_policy: round_robin\n    load_assignment:\n      cluster_name: foo\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 11001\n  )EOF\";\n\n  create(parseBootstrapFromV3Yaml(yaml));\n  cluster_manager_->clusters()\n      .find(\"cluster:name\")\n      ->second.get()\n      .info()\n      ->statsScope()\n      .counterFromString(\"foo\")\n      .inc();\n  EXPECT_EQ(1UL, factory_.stats_.counter(\"cluster.cluster_name.foo\").value());\n}\n\n// Validate that the primary clusters are derived from the bootstrap and don't\n// include EDS.\nTEST_F(ClusterManagerImplTest, PrimaryClusters) {\n  const std::string yaml = R\"EOF(\nstatic_resources:\n  clusters:\n  - name: static_cluster\n    connect_timeout: 0.250s\n    type: static\n  - name: logical_dns_cluster\n    connect_timeout: 0.250s\n    type: logical_dns\n    load_assignment:\n      endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: foo.com\n                  port_value: 11001\n  - name: strict_dns_cluster\n    connect_timeout: 0.250s\n    type: strict_dns\n    load_assignment:\n      endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: foo.com\n                  port_value: 11001\n  - name: rest_eds_cluster\n    connect_timeout: 0.250s\n    type: eds\n    eds_cluster_config:\n      eds_config:\n        api_config_source:\n          api_type: GRPC\n          grpc_services:\n            envoy_grpc:\n              cluster_name: static_cluster\n  )EOF\";\n  create(parseBootstrapFromV3Yaml(yaml));\n  const auto& primary_clusters = cluster_manager_->primaryClusters();\n  EXPECT_THAT(primary_clusters, testing::UnorderedElementsAre(\n                                    \"static_cluster\", \"strict_dns_cluster\", \"logical_dns_cluster\"));\n}\n\nTEST_F(ClusterManagerImplTest, OriginalDstLbRestriction) {\n  const std::string yaml = R\"EOF(\nstatic_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: original_dst\n    lb_policy: round_robin\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n      \"cluster: LB policy ROUND_ROBIN is not valid for Cluster type ORIGINAL_DST. Only \"\n      \"'CLUSTER_PROVIDED' or 'ORIGINAL_DST_LB' is allowed with cluster type 'ORIGINAL_DST'\");\n}\n\nTEST_F(ClusterManagerImplTest, OriginalDstLbRestriction2) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: static\n    lb_policy: original_dst_lb\n    load_assignment:\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 11001\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException,\n                            \"cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB is not \"\n                            \"valid for Cluster type STATIC. \"\n                            \"'ORIGINAL_DST_LB' is allowed only with cluster type 'ORIGINAL_DST'\");\n}\n\nclass ClusterManagerSubsetInitializationTest\n    : public ClusterManagerImplTest,\n      public testing::WithParamInterface<envoy::config::cluster::v3::Cluster::LbPolicy> {\npublic:\n  ClusterManagerSubsetInitializationTest() = default;\n\n  static std::vector<envoy::config::cluster::v3::Cluster::LbPolicy> lbPolicies() {\n    int first = static_cast<int>(envoy::config::cluster::v3::Cluster::LbPolicy_MIN);\n    int last = static_cast<int>(envoy::config::cluster::v3::Cluster::LbPolicy_MAX);\n    ASSERT(first < last);\n\n    std::vector<envoy::config::cluster::v3::Cluster::LbPolicy> policies;\n    for (int i = first; i <= last; i++) {\n      if (envoy::config::cluster::v3::Cluster::LbPolicy_IsValid(i)) {\n        auto policy = static_cast<envoy::config::cluster::v3::Cluster::LbPolicy>(i);\n        if (policy != envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) {\n          policies.push_back(policy);\n        }\n      }\n    }\n    return policies;\n  }\n\n  static std::string paramName(const testing::TestParamInfo<ParamType>& info) {\n    const std::string& name = envoy::config::cluster::v3::Cluster::LbPolicy_Name(info.param);\n    return absl::StrReplaceAll(name, {{\"_\", \"\"}});\n  }\n};\n\n// Test initialization of subset load balancer with every possible load balancer policy.\nTEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) {\n  const std::string yamlPattern = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    {}\n    lb_policy: \"{}\"\n    lb_subset_config:\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors:\n        - keys: [ \"x\" ]\n    load_assignment:\n      cluster_name: cluster_1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8000\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8001\n  )EOF\";\n\n  const std::string& policy_name = envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam());\n\n  std::string cluster_type = \"type: STATIC\";\n  if (GetParam() == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB) {\n    cluster_type = \"type: ORIGINAL_DST\";\n  } else if (GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) {\n    // This custom cluster type is registered by linking test/integration/custom/static_cluster.cc.\n    cluster_type = \"cluster_type: { name: envoy.clusters.custom_static_with_lb }\";\n  }\n  const std::string yaml = fmt::format(yamlPattern, cluster_type, policy_name);\n\n  if (GetParam() == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB ||\n      GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) {\n    EXPECT_THROW_WITH_MESSAGE(\n        create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n        fmt::format(\"cluster: LB policy {} cannot be combined with lb_subset_config\",\n                    envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam())));\n\n  } else {\n    create(parseBootstrapFromV3Yaml(yaml));\n    checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/);\n\n    Upstream::ThreadLocalCluster* tlc = cluster_manager_->get(\"cluster_1\");\n    EXPECT_NE(nullptr, tlc);\n\n    if (tlc) {\n      Upstream::LoadBalancer& lb = tlc->loadBalancer();\n      EXPECT_NE(nullptr, dynamic_cast<Upstream::SubsetLoadBalancer*>(&lb));\n    }\n\n    factory_.tls_.shutdownThread();\n  }\n}\n\nINSTANTIATE_TEST_SUITE_P(ClusterManagerSubsetInitializationTest,\n                         ClusterManagerSubsetInitializationTest,\n                         testing::ValuesIn(ClusterManagerSubsetInitializationTest::lbPolicies()),\n                         ClusterManagerSubsetInitializationTest::paramName);\n\nTEST_F(ClusterManagerImplTest, SubsetLoadBalancerOriginalDstRestriction) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: original_dst\n    lb_policy: original_dst_lb\n    lb_subset_config:\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors:\n        - keys: [ \"x\" ]\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException,\n                            \"cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB cannot be \"\n                            \"combined with lb_subset_config\");\n}\n\nTEST_F(ClusterManagerImplTest, SubsetLoadBalancerClusterProvidedLbRestriction) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: static\n    lb_policy: cluster_provided\n    lb_subset_config:\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors:\n        - keys: [ \"x\" ]\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n      \"cluster: LB policy CLUSTER_PROVIDED cannot be combined with lb_subset_config\");\n}\n\nTEST_F(ClusterManagerImplTest, SubsetLoadBalancerLocalityAware) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    lb_subset_config:\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors:\n        - keys: [ \"x\" ]\n      locality_weight_aware: true\n    load_assignment:\n      cluster_name: cluster_1\n      endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 8000\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 8001\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n                            \"Locality weight aware subset LB requires that a \"\n                            \"locality_weighted_lb_config be set in cluster_1\");\n}\n\nTEST_F(ClusterManagerImplTest, RingHashLoadBalancerInitialization) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: redis_cluster\n    lb_policy: RING_HASH\n    ring_hash_lb_config:\n      minimum_ring_size: 125\n    connect_timeout: 0.250s\n    type: STATIC\n    load_assignment:\n      cluster_name: redis_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8000\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8001\n  )EOF\";\n  create(parseBootstrapFromV3Yaml(yaml));\n}\n\nTEST_F(ClusterManagerImplTest, RingHashLoadBalancerV2Initialization) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: redis_cluster\n      connect_timeout: 0.250s\n      lb_policy: RING_HASH\n      load_assignment:\n        cluster_name: redis_cluster\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 8000\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 8001\n      dns_lookup_family: V4_ONLY\n      ring_hash_lb_config:\n        minimum_ring_size: 125\n  )EOF\";\n  create(parseBootstrapFromV3Yaml(yaml));\n}\n\n// Verify EDS clusters have EDS config.\nTEST_F(ClusterManagerImplTest, EdsClustersRequireEdsConfig) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_0\n      type: EDS\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n                            \"cannot create an EDS cluster without an EDS config\");\n}\n\n// Verify that specifying a cluster provided LB, but the cluster doesn't provide one is an error.\nTEST_F(ClusterManagerImplTest, ClusterProvidedLbNoLb) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"cluster_0\")}));\n\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  cluster1->info_->name_ = \"cluster_0\";\n  cluster1->info_->lb_type_ = LoadBalancerType::ClusterProvided;\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Json(json)), EnvoyException,\n                            \"cluster manager: cluster provided LB specified but cluster \"\n                            \"'cluster_0' did not provide one. Check cluster documentation.\");\n}\n\n// Verify that not specifying a cluster provided LB, but the cluster does provide one is an error.\nTEST_F(ClusterManagerImplTest, ClusterProvidedLbNotConfigured) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"cluster_0\")}));\n\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  cluster1->info_->name_ = \"cluster_0\";\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, new MockThreadAwareLoadBalancer())));\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Json(json)), EnvoyException,\n                            \"cluster manager: cluster provided LB not specified but cluster \"\n                            \"'cluster_0' provided one. Check cluster documentation.\");\n}\n\nclass ClusterManagerImplThreadAwareLbTest : public ClusterManagerImplTest {\npublic:\n  void doTest(LoadBalancerType lb_type) {\n    const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                          clustersJson({defaultStaticClusterJson(\"cluster_0\")}));\n\n    std::shared_ptr<MockClusterMockPrioritySet> cluster1(\n        new NiceMock<MockClusterMockPrioritySet>());\n    cluster1->info_->name_ = \"cluster_0\";\n    cluster1->info_->lb_type_ = lb_type;\n\n    InSequence s;\n    EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n        .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n    ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n    create(parseBootstrapFromV3Json(json));\n\n    EXPECT_EQ(nullptr, cluster_manager_->get(\"cluster_0\")->loadBalancer().chooseHost(nullptr));\n\n    cluster1->prioritySet().getMockHostSet(0)->hosts_ = {\n        makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\")};\n    cluster1->prioritySet().getMockHostSet(0)->runCallbacks(\n        cluster1->prioritySet().getMockHostSet(0)->hosts_, {});\n    cluster1->initialize_callback_();\n    EXPECT_EQ(cluster1->prioritySet().getMockHostSet(0)->hosts_[0],\n              cluster_manager_->get(\"cluster_0\")->loadBalancer().chooseHost(nullptr));\n  }\n};\n\n// Test that the cluster manager correctly re-creates the worker local LB when there is a host\n// set change.\nTEST_F(ClusterManagerImplThreadAwareLbTest, RingHashLoadBalancerThreadAwareUpdate) {\n  doTest(LoadBalancerType::RingHash);\n}\n\n// Test that the cluster manager correctly re-creates the worker local LB when there is a host\n// set change.\nTEST_F(ClusterManagerImplThreadAwareLbTest, MaglevLoadBalancerThreadAwareUpdate) {\n  doTest(LoadBalancerType::Maglev);\n}\n\nTEST_F(ClusterManagerImplTest, TcpHealthChecker) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: cluster_1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 11001\n    health_checks:\n    - timeout: 1s\n      interval: 1s\n      unhealthy_threshold: 2\n      healthy_threshold: 2\n      tcp_health_check:\n        send:\n          text: '01'\n        receive:\n          - text: '02'\n  )EOF\";\n\n  Network::MockClientConnection* connection = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(factory_.dispatcher_,\n              createClientConnection_(\n                  PointeesEq(Network::Utility::resolveUrl(\"tcp://127.0.0.1:11001\")), _, _, _))\n      .WillOnce(Return(connection));\n  create(parseBootstrapFromV3Yaml(yaml));\n  factory_.tls_.shutdownThread();\n}\n\nTEST_F(ClusterManagerImplTest, HttpHealthChecker) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: cluster_1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 11001\n    health_checks:\n    - timeout: 1s\n      interval: 1s\n      unhealthy_threshold: 2\n      healthy_threshold: 2\n      http_health_check:\n        path: \"/healthcheck\"\n  )EOF\";\n\n  Network::MockClientConnection* connection = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(factory_.dispatcher_,\n              createClientConnection_(\n                  PointeesEq(Network::Utility::resolveUrl(\"tcp://127.0.0.1:11001\")), _, _, _))\n      .WillOnce(Return(connection));\n  create(parseBootstrapFromV3Yaml(yaml));\n  factory_.tls_.shutdownThread();\n}\n\nTEST_F(ClusterManagerImplTest, UnknownCluster) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"cluster_1\")}));\n\n  create(parseBootstrapFromV3Json(json));\n  EXPECT_EQ(nullptr, cluster_manager_->get(\"hello\"));\n  EXPECT_EQ(nullptr, cluster_manager_->httpConnPoolForCluster(\"hello\", ResourcePriority::Default,\n                                                              Http::Protocol::Http2, nullptr));\n  EXPECT_EQ(nullptr,\n            cluster_manager_->tcpConnPoolForCluster(\"hello\", ResourcePriority::Default, nullptr));\n  EXPECT_THROW(cluster_manager_->tcpConnForCluster(\"hello\", nullptr), EnvoyException);\n\n  NiceMock<MockLoadBalancerContext> example_com_context;\n  ON_CALL(example_com_context, upstreamTransportSocketOptions())\n      .WillByDefault(Return(std::make_shared<Network::TransportSocketOptionsImpl>(\"example.com\")));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnPoolForCluster(\"hello\", ResourcePriority::Default,\n                                                             &example_com_context));\n  EXPECT_THROW(cluster_manager_->tcpConnForCluster(\"hello\", &example_com_context), EnvoyException);\n\n  EXPECT_THROW(cluster_manager_->httpAsyncClientForCluster(\"hello\"), EnvoyException);\n  factory_.tls_.shutdownThread();\n}\n\n/**\n * Test that buffer limits are set on new TCP connections.\n */\nTEST_F(ClusterManagerImplTest, VerifyBufferLimits) {\n  const std::string yaml = R\"EOF(\n static_resources:\n  clusters:\n  - name: cluster_1\n    connect_timeout: 0.250s\n    type: static\n    lb_policy: round_robin\n    per_connection_buffer_limit_bytes: 8192\n    load_assignment:\n      cluster_name: cluster_1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 11001\n  )EOF\";\n\n  create(parseBootstrapFromV3Yaml(yaml));\n  Network::MockClientConnection* connection = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(*connection, setBufferLimits(8192));\n  EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n      .WillOnce(Return(connection));\n  auto conn_data = cluster_manager_->tcpConnForCluster(\"cluster_1\", nullptr);\n  EXPECT_EQ(connection, conn_data.connection_.get());\n  factory_.tls_.shutdownThread();\n}\n\nTEST_F(ClusterManagerImplTest, ShutdownOrder) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"cluster_1\")}));\n\n  create(parseBootstrapFromV3Json(json));\n  Cluster& cluster = cluster_manager_->activeClusters().begin()->second;\n  EXPECT_EQ(\"cluster_1\", cluster.info()->name());\n  EXPECT_EQ(cluster.info(), cluster_manager_->get(\"cluster_1\")->info());\n  EXPECT_EQ(\n      1UL,\n      cluster_manager_->get(\"cluster_1\")->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0],\n            cluster_manager_->get(\"cluster_1\")->loadBalancer().chooseHost(nullptr));\n\n  // Local reference, primary reference, thread local reference, host reference, async client\n  // reference.\n  EXPECT_EQ(5U, cluster.info().use_count());\n\n  // Thread local reference should be gone.\n  factory_.tls_.shutdownThread();\n  EXPECT_EQ(3U, cluster.info().use_count());\n}\n\nTEST_F(ClusterManagerImplTest, InitializeOrder) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  const std::string json = fmt::sprintf(\n      R\"EOF(\n  {\n    \"dynamic_resources\": {\n      \"cds_config\": {\n        \"api_config_source\": {\n          \"api_type\": \"0\",\n          \"refresh_delay\": \"30s\",\n          \"cluster_names\": [\"cds_cluster\"]\n        }\n      }\n    },\n    \"static_resources\": {\n      %s\n    }\n  }\n  )EOF\",\n      clustersJson({defaultStaticClusterJson(\"cds_cluster\"),\n                    defaultStaticClusterJson(\"fake_cluster\"),\n                    defaultStaticClusterJson(\"fake_cluster2\")}));\n\n  MockCdsApi* cds = new MockCdsApi();\n  std::shared_ptr<MockClusterMockPrioritySet> cds_cluster(\n      new NiceMock<MockClusterMockPrioritySet>());\n  cds_cluster->info_->name_ = \"cds_cluster\";\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  std::shared_ptr<MockClusterMockPrioritySet> cluster2(new NiceMock<MockClusterMockPrioritySet>());\n  cluster2->info_->name_ = \"fake_cluster2\";\n  cluster2->info_->lb_type_ = LoadBalancerType::RingHash;\n\n  // This part tests static init.\n  InSequence s;\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cds_cluster, nullptr)));\n  ON_CALL(*cds_cluster, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster2, nullptr)));\n  ON_CALL(*cluster2, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  EXPECT_CALL(factory_, createCds_()).WillOnce(Return(cds));\n  EXPECT_CALL(*cds, setInitializedCb(_));\n  EXPECT_CALL(*cds_cluster, initialize(_));\n  EXPECT_CALL(*cluster1, initialize(_));\n\n  create(parseBootstrapFromV3Json(json));\n\n  ReadyWatcher initialized;\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  EXPECT_CALL(*cluster2, initialize(_));\n  cds_cluster->initialize_callback_();\n  cluster1->initialize_callback_();\n\n  EXPECT_CALL(*cds, initialize());\n  cluster2->initialize_callback_();\n\n  // This part tests CDS init.\n  std::shared_ptr<MockClusterMockPrioritySet> cluster3(new NiceMock<MockClusterMockPrioritySet>());\n  cluster3->info_->name_ = \"cluster3\";\n  std::shared_ptr<MockClusterMockPrioritySet> cluster4(new NiceMock<MockClusterMockPrioritySet>());\n  cluster4->info_->name_ = \"cluster4\";\n  std::shared_ptr<MockClusterMockPrioritySet> cluster5(new NiceMock<MockClusterMockPrioritySet>());\n  cluster5->info_->name_ = \"cluster5\";\n\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster3, nullptr)));\n  ON_CALL(*cluster3, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"cluster3\"), \"version1\");\n\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster4, nullptr)));\n  ON_CALL(*cluster4, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(*cluster4, initialize(_));\n  cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"cluster4\"), \"version2\");\n\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster5, nullptr)));\n  ON_CALL(*cluster5, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"cluster5\"), \"version3\");\n\n  cds->initialized_callback_();\n  EXPECT_CALL(*cds, versionInfo()).WillOnce(Return(\"version3\"));\n  checkConfigDump(R\"EOF(\n version_info: version3\n static_clusters:\n  - cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: \"cds_cluster\"\n      type: \"STATIC\"\n      connect_timeout: 0.25s\n      load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n  - cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: \"fake_cluster\"\n      type: \"STATIC\"\n      connect_timeout: 0.25s\n      load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n  - cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: \"fake_cluster2\"\n      type: \"STATIC\"\n      connect_timeout: 0.25s\n      load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n dynamic_active_clusters:\n  - version_info: \"version1\"\n    cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: \"cluster3\"\n      type: \"STATIC\"\n      connect_timeout: 0.25s\n      load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n  - version_info: \"version2\"\n    cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: \"cluster4\"\n      type: \"STATIC\"\n      connect_timeout: 0.25s\n      load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n  - version_info: \"version3\"\n    cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: \"cluster5\"\n      type: \"STATIC\"\n      connect_timeout: 0.25s\n      load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n dynamic_warming_clusters:\n)EOF\");\n\n  EXPECT_CALL(*cluster3, initialize(_));\n  cluster4->initialize_callback_();\n\n  // Test cluster 5 getting removed before everything is initialized.\n  cluster_manager_->removeCluster(\"cluster5\");\n\n  EXPECT_CALL(initialized, ready());\n  cluster3->initialize_callback_();\n\n  factory_.tls_.shutdownThread();\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cds_cluster.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster2.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster3.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster4.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster5.get()));\n}\n\nTEST_F(ClusterManagerImplTest, DynamicRemoveWithLocalCluster) {\n  InSequence s;\n\n  // Setup a cluster manager with a static local cluster.\n  const std::string json = fmt::sprintf(R\"EOF(\n  {\n    \"cluster_manager\": {\n      \"local_cluster_name\": \"foo\"\n    },\n    \"static_resources\": {\n      %s\n    }\n  }\n  )EOF\",\n                                        clustersJson({defaultStaticClusterJson(\"fake\")}));\n\n  std::shared_ptr<MockClusterMockPrioritySet> foo(new NiceMock<MockClusterMockPrioritySet>());\n  foo->info_->name_ = \"foo\";\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, false))\n      .WillOnce(Return(std::make_pair(foo, nullptr)));\n  ON_CALL(*foo, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(*foo, initialize(_));\n\n  create(parseBootstrapFromV3Json(json));\n  foo->initialize_callback_();\n\n  // Now add a dynamic cluster. This cluster will have a member update callback from the local\n  // cluster in its load balancer.\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  cluster1->info_->name_ = \"cluster1\";\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, true))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(*cluster1, initialize(_));\n  cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"cluster1\"), \"\");\n\n  // Add another update callback on foo so we make sure callbacks keep working.\n  ReadyWatcher membership_updated;\n  foo->prioritySet().addPriorityUpdateCb(\n      [&membership_updated](uint32_t, const HostVector&, const HostVector&) -> void {\n        membership_updated.ready();\n      });\n\n  // Remove the new cluster.\n  cluster_manager_->removeCluster(\"cluster1\");\n\n  // Fire a member callback on the local cluster, which should not call any update callbacks on\n  // the deleted cluster.\n  foo->prioritySet().getMockHostSet(0)->hosts_ = {makeTestHost(foo->info_, \"tcp://127.0.0.1:80\")};\n  EXPECT_CALL(membership_updated, ready());\n  foo->prioritySet().getMockHostSet(0)->runCallbacks(foo->prioritySet().getMockHostSet(0)->hosts_,\n                                                     {});\n\n  factory_.tls_.shutdownThread();\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(foo.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\nTEST_F(ClusterManagerImplTest, RemoveWarmingCluster) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n  create(defaultConfig());\n\n  InSequence s;\n  ReadyWatcher initialized;\n  EXPECT_CALL(initialized, ready());\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  EXPECT_CALL(*cluster1, initializePhase()).Times(0);\n  EXPECT_CALL(*cluster1, initialize(_));\n  EXPECT_TRUE(\n      cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"fake_cluster\"), \"version3\"));\n  checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/);\n  EXPECT_EQ(nullptr, cluster_manager_->get(\"fake_cluster\"));\n  checkConfigDump(R\"EOF(\ndynamic_warming_clusters:\n  - version_info: \"version3\"\n    cluster:\n      \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n      name: \"fake_cluster\"\n      type: STATIC\n      connect_timeout: 0.25s\n      load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n    last_updated:\n      seconds: 1234567891\n      nanos: 234000000\n)EOF\");\n\n  EXPECT_TRUE(cluster_manager_->removeCluster(\"fake_cluster\"));\n  checkStats(1 /*added*/, 0 /*modified*/, 1 /*removed*/, 0 /*active*/, 0 /*warming*/);\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\nTEST_F(ClusterManagerImplTest, ModifyWarmingCluster) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1234567891234));\n  create(defaultConfig());\n\n  InSequence s;\n  ReadyWatcher initialized;\n  EXPECT_CALL(initialized, ready());\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  // Add a \"fake_cluster\" in warming state.\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1 =\n      std::make_shared<NiceMock<MockClusterMockPrioritySet>>();\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  EXPECT_CALL(*cluster1, initializePhase()).Times(0);\n  EXPECT_CALL(*cluster1, initialize(_));\n  EXPECT_TRUE(\n      cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"fake_cluster\"), \"version3\"));\n  checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/);\n  EXPECT_EQ(nullptr, cluster_manager_->get(\"fake_cluster\"));\n  checkConfigDump(R\"EOF(\n dynamic_warming_clusters:\n   - version_info: \"version3\"\n     cluster:\n       \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n       name: \"fake_cluster\"\n       type: STATIC\n       connect_timeout: 0.25s\n       load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n     last_updated:\n       seconds: 1234567891\n       nanos: 234000000\n )EOF\");\n\n  // Update the warming cluster that was just added.\n  std::shared_ptr<MockClusterMockPrioritySet> cluster2 =\n      std::make_shared<NiceMock<MockClusterMockPrioritySet>>();\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster2, nullptr)));\n  EXPECT_CALL(*cluster2, initializePhase()).Times(0);\n  EXPECT_CALL(*cluster2, initialize(_));\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(\n      parseClusterFromV3Json(fmt::sprintf(kDefaultStaticClusterTmpl, \"fake_cluster\",\n                                          R\"EOF(\n\"socket_address\": {\n  \"address\": \"127.0.0.1\",\n  \"port_value\": 11002\n})EOF\")),\n      \"version3\"));\n  checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/);\n  checkConfigDump(R\"EOF(\n dynamic_warming_clusters:\n   - version_info: \"version3\"\n     cluster:\n       \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n       name: \"fake_cluster\"\n       type: STATIC\n       connect_timeout: 0.25s\n       load_assignment:\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11002\n     last_updated:\n       seconds: 1234567891\n       nanos: 234000000\n )EOF\");\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster2.get()));\n}\n\n// Verify that shutting down the cluster manager destroys warming clusters.\nTEST_F(ClusterManagerImplTest, ShutdownWithWarming) {\n  create(defaultConfig());\n\n  InSequence s;\n  ReadyWatcher initialized;\n  EXPECT_CALL(initialized, ready());\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  EXPECT_CALL(*cluster1, initializePhase()).Times(0);\n  EXPECT_CALL(*cluster1, initialize(_));\n  EXPECT_TRUE(\n      cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"fake_cluster\"), \"version1\"));\n  checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/);\n  cluster_manager_->shutdown();\n  checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 0 /*warming*/);\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\nTEST_F(ClusterManagerImplTest, DynamicAddRemove) {\n  create(defaultConfig());\n\n  InSequence s;\n  ReadyWatcher initialized;\n  EXPECT_CALL(initialized, ready());\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  std::unique_ptr<MockClusterUpdateCallbacks> callbacks(new NiceMock<MockClusterUpdateCallbacks>());\n  ClusterUpdateCallbacksHandlePtr cb =\n      cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks);\n\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  EXPECT_CALL(*cluster1, initializePhase()).Times(0);\n  EXPECT_CALL(*cluster1, initialize(_));\n  EXPECT_CALL(*callbacks, onClusterAddOrUpdate(_)).Times(1);\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"fake_cluster\"), \"\"));\n  checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/);\n  EXPECT_EQ(1, cluster_manager_->warmingClusterCount());\n  EXPECT_EQ(nullptr, cluster_manager_->get(\"fake_cluster\"));\n  cluster1->initialize_callback_();\n\n  EXPECT_EQ(cluster1->info_, cluster_manager_->get(\"fake_cluster\")->info());\n  checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/);\n  EXPECT_EQ(0, cluster_manager_->warmingClusterCount());\n\n  // Now try to update again but with the same hash.\n  EXPECT_FALSE(cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"fake_cluster\"), \"\"));\n\n  // Now do it again with a different hash.\n  auto update_cluster = defaultStaticCluster(\"fake_cluster\");\n  update_cluster.mutable_per_connection_buffer_limit_bytes()->set_value(12345);\n\n  std::shared_ptr<MockClusterMockPrioritySet> cluster2(new NiceMock<MockClusterMockPrioritySet>());\n  cluster2->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster2->info_, \"tcp://127.0.0.1:80\")};\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster2, nullptr)));\n  EXPECT_CALL(*cluster2, initializePhase()).Times(0);\n  EXPECT_CALL(*cluster2, initialize(_))\n      .WillOnce(Invoke([cluster1](std::function<void()> initialize_callback) {\n        // Test inline init.\n        initialize_callback();\n      }));\n  EXPECT_CALL(*callbacks, onClusterAddOrUpdate(_)).Times(1);\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(update_cluster, \"\"));\n\n  EXPECT_EQ(cluster2->info_, cluster_manager_->get(\"fake_cluster\")->info());\n  EXPECT_EQ(1UL, cluster_manager_->clusters().size());\n  Http::ConnectionPool::MockInstance* cp = new Http::ConnectionPool::MockInstance();\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(cp));\n  EXPECT_EQ(cp, cluster_manager_->httpConnPoolForCluster(\"fake_cluster\", ResourcePriority::Default,\n                                                         Http::Protocol::Http11, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* cp2 = new Tcp::ConnectionPool::MockInstance();\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(cp2));\n  EXPECT_EQ(cp2, cluster_manager_->tcpConnPoolForCluster(\"fake_cluster\", ResourcePriority::Default,\n                                                         nullptr));\n\n  Network::MockClientConnection* connection = new Network::MockClientConnection();\n  ON_CALL(*cluster2->info_, features())\n      .WillByDefault(Return(ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE));\n  EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n      .WillOnce(Return(connection));\n  EXPECT_CALL(*connection, setBufferLimits(_));\n  EXPECT_CALL(*connection, addConnectionCallbacks(_));\n  auto conn_info = cluster_manager_->tcpConnForCluster(\"fake_cluster\", nullptr);\n  EXPECT_EQ(conn_info.connection_.get(), connection);\n\n  // Now remove the cluster. This should drain the connection pools, but not affect\n  // tcp connections.\n  Http::ConnectionPool::Instance::DrainedCb drained_cb;\n  Tcp::ConnectionPool::Instance::DrainedCb drained_cb2;\n  EXPECT_CALL(*callbacks, onClusterRemoval(_)).Times(1);\n  EXPECT_CALL(*cp, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb));\n  EXPECT_CALL(*cp2, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb2));\n  EXPECT_TRUE(cluster_manager_->removeCluster(\"fake_cluster\"));\n  EXPECT_EQ(nullptr, cluster_manager_->get(\"fake_cluster\"));\n  EXPECT_EQ(0UL, cluster_manager_->clusters().size());\n\n  // Close the TCP connection. Success is no ASSERT or crash due to referencing\n  // the removed cluster.\n  EXPECT_CALL(*connection, dispatcher());\n  connection->raiseEvent(Network::ConnectionEvent::LocalClose);\n\n  // Remove an unknown cluster.\n  EXPECT_FALSE(cluster_manager_->removeCluster(\"foo\"));\n\n  drained_cb();\n  drained_cb2();\n\n  checkStats(1 /*added*/, 1 /*modified*/, 1 /*removed*/, 0 /*active*/, 0 /*warming*/);\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster2.get()));\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(callbacks.get()));\n}\n\nTEST_F(ClusterManagerImplTest, AddOrUpdateClusterStaticExists) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"fake_cluster\")}));\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  InSequence s;\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(*cluster1, initialize(_));\n\n  create(parseBootstrapFromV3Json(json));\n\n  ReadyWatcher initialized;\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  EXPECT_CALL(initialized, ready());\n  cluster1->initialize_callback_();\n\n  EXPECT_FALSE(cluster_manager_->addOrUpdateCluster(defaultStaticCluster(\"fake_cluster\"), \"\"));\n\n  // Attempt to remove a static cluster.\n  EXPECT_FALSE(cluster_manager_->removeCluster(\"fake_cluster\"));\n\n  factory_.tls_.shutdownThread();\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\n// Verifies that we correctly propagate the host_set state to the TLS clusters.\nTEST_F(ClusterManagerImplTest, HostsPostedToTlsCluster) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"fake_cluster\")}));\n  std::shared_ptr<MockClusterRealPrioritySet> cluster1(new NiceMock<MockClusterRealPrioritySet>());\n  InSequence s;\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(*cluster1, initialize(_));\n\n  create(parseBootstrapFromV3Json(json));\n\n  ReadyWatcher initialized;\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  EXPECT_CALL(initialized, ready());\n  cluster1->initialize_callback_();\n\n  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.\n  HostSharedPtr host1 = makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\");\n  host1->healthFlagSet(HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);\n  HostSharedPtr host2 = makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\");\n  host2->healthFlagSet(HostImpl::HealthFlag::FAILED_ACTIVE_HC);\n  HostSharedPtr host3 = makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\");\n\n  HostVector hosts{host1, host2, host3};\n  auto hosts_ptr = std::make_shared<HostVector>(hosts);\n\n  cluster1->priority_set_.updateHosts(\n      0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, {},\n      100);\n\n  auto* tls_cluster = cluster_manager_->get(cluster1->info_->name());\n\n  EXPECT_EQ(1, tls_cluster->prioritySet().hostSetsPerPriority().size());\n  EXPECT_EQ(1, tls_cluster->prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(host1, tls_cluster->prioritySet().hostSetsPerPriority()[0]->degradedHosts()[0]);\n  EXPECT_EQ(1, tls_cluster->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(host3, tls_cluster->prioritySet().hostSetsPerPriority()[0]->healthyHosts()[0]);\n  EXPECT_EQ(3, tls_cluster->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(100, tls_cluster->prioritySet().hostSetsPerPriority()[0]->overprovisioningFactor());\n\n  factory_.tls_.shutdownThread();\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\n// Test that we close all HTTP connection pool connections when there is a host health failure.\nTEST_F(ClusterManagerImplTest, CloseHttpConnectionsOnHealthFailure) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"some_cluster\")}));\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  cluster1->info_->name_ = \"some_cluster\";\n  HostSharedPtr test_host = makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\");\n  cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n\n  MockHealthChecker health_checker;\n  ON_CALL(*cluster1, healthChecker()).WillByDefault(Return(&health_checker));\n\n  Outlier::MockDetector outlier_detector;\n  ON_CALL(*cluster1, outlierDetector()).WillByDefault(Return(&outlier_detector));\n\n  Http::ConnectionPool::MockInstance* cp1 = new Http::ConnectionPool::MockInstance();\n  Http::ConnectionPool::MockInstance* cp2 = new Http::ConnectionPool::MockInstance();\n\n  {\n    InSequence s;\n\n    EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n        .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n    EXPECT_CALL(health_checker, addHostCheckCompleteCb(_));\n    EXPECT_CALL(outlier_detector, addChangedStateCb(_));\n    EXPECT_CALL(*cluster1, initialize(_))\n        .WillOnce(Invoke([cluster1](std::function<void()> initialize_callback) {\n          // Test inline init.\n          initialize_callback();\n        }));\n    create(parseBootstrapFromV3Json(json));\n\n    EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(cp1));\n    cluster_manager_->httpConnPoolForCluster(\"some_cluster\", ResourcePriority::Default,\n                                             Http::Protocol::Http11, nullptr);\n\n    outlier_detector.runCallbacks(test_host);\n    health_checker.runCallbacks(test_host, HealthTransition::Unchanged);\n\n    EXPECT_CALL(*cp1, drainConnections());\n    test_host->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n    outlier_detector.runCallbacks(test_host);\n\n    EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(cp2));\n    cluster_manager_->httpConnPoolForCluster(\"some_cluster\", ResourcePriority::High,\n                                             Http::Protocol::Http11, nullptr);\n  }\n\n  // Order of these calls is implementation dependent, so can't sequence them!\n  EXPECT_CALL(*cp1, drainConnections());\n  EXPECT_CALL(*cp2, drainConnections());\n  test_host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker.runCallbacks(test_host, HealthTransition::Changed);\n\n  test_host->healthFlagClear(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  outlier_detector.runCallbacks(test_host);\n  test_host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker.runCallbacks(test_host, HealthTransition::Changed);\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\n// Test that we close all TCP connection pool connections when there is a host health failure.\nTEST_F(ClusterManagerImplTest, CloseTcpConnectionPoolsOnHealthFailure) {\n  const std::string json = fmt::sprintf(\"{\\\"static_resources\\\":{%s}}\",\n                                        clustersJson({defaultStaticClusterJson(\"some_cluster\")}));\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  cluster1->info_->name_ = \"some_cluster\";\n  HostSharedPtr test_host = makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\");\n  cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n\n  MockHealthChecker health_checker;\n  ON_CALL(*cluster1, healthChecker()).WillByDefault(Return(&health_checker));\n\n  Outlier::MockDetector outlier_detector;\n  ON_CALL(*cluster1, outlierDetector()).WillByDefault(Return(&outlier_detector));\n\n  Tcp::ConnectionPool::MockInstance* cp1 = new Tcp::ConnectionPool::MockInstance();\n  Tcp::ConnectionPool::MockInstance* cp2 = new Tcp::ConnectionPool::MockInstance();\n\n  {\n    InSequence s;\n\n    EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n        .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n    EXPECT_CALL(health_checker, addHostCheckCompleteCb(_));\n    EXPECT_CALL(outlier_detector, addChangedStateCb(_));\n    EXPECT_CALL(*cluster1, initialize(_))\n        .WillOnce(Invoke([cluster1](std::function<void()> initialize_callback) {\n          // Test inline init.\n          initialize_callback();\n        }));\n    create(parseBootstrapFromV3Json(json));\n\n    EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(cp1));\n    cluster_manager_->tcpConnPoolForCluster(\"some_cluster\", ResourcePriority::Default, nullptr);\n\n    outlier_detector.runCallbacks(test_host);\n    health_checker.runCallbacks(test_host, HealthTransition::Unchanged);\n\n    EXPECT_CALL(*cp1, drainConnections());\n    test_host->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n    outlier_detector.runCallbacks(test_host);\n\n    EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(cp2));\n    cluster_manager_->tcpConnPoolForCluster(\"some_cluster\", ResourcePriority::High, nullptr);\n  }\n\n  // Order of these calls is implementation dependent, so can't sequence them!\n  EXPECT_CALL(*cp1, drainConnections());\n  EXPECT_CALL(*cp2, drainConnections());\n  test_host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker.runCallbacks(test_host, HealthTransition::Changed);\n\n  test_host->healthFlagClear(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  outlier_detector.runCallbacks(test_host);\n  test_host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker.runCallbacks(test_host, HealthTransition::Changed);\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\n// Test that we close all TCP connection pool connections when there is a host health failure,\n// when configured to do so.\nTEST_F(ClusterManagerImplTest, CloseTcpConnectionsOnHealthFailure) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: some_cluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      close_connections_on_host_health_failure: true\n  )EOF\";\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  EXPECT_CALL(*cluster1->info_, features())\n      .WillRepeatedly(Return(ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE));\n  cluster1->info_->name_ = \"some_cluster\";\n  HostSharedPtr test_host = makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\");\n  cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n\n  MockHealthChecker health_checker;\n  ON_CALL(*cluster1, healthChecker()).WillByDefault(Return(&health_checker));\n\n  Outlier::MockDetector outlier_detector;\n  ON_CALL(*cluster1, outlierDetector()).WillByDefault(Return(&outlier_detector));\n\n  Network::MockClientConnection* connection1 = new NiceMock<Network::MockClientConnection>();\n  Network::MockClientConnection* connection2 = new NiceMock<Network::MockClientConnection>();\n  Host::CreateConnectionData conn_info1, conn_info2;\n\n  {\n    InSequence s;\n\n    EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n        .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n    EXPECT_CALL(health_checker, addHostCheckCompleteCb(_));\n    EXPECT_CALL(outlier_detector, addChangedStateCb(_));\n    EXPECT_CALL(*cluster1, initialize(_))\n        .WillOnce(Invoke([cluster1](std::function<void()> initialize_callback) {\n          // Test inline init.\n          initialize_callback();\n        }));\n    create(parseBootstrapFromV3Yaml(yaml));\n\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Return(connection1));\n    conn_info1 = cluster_manager_->tcpConnForCluster(\"some_cluster\", nullptr);\n\n    outlier_detector.runCallbacks(test_host);\n    health_checker.runCallbacks(test_host, HealthTransition::Unchanged);\n\n    EXPECT_CALL(*connection1, close(Network::ConnectionCloseType::NoFlush));\n    test_host->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n    outlier_detector.runCallbacks(test_host);\n\n    connection1 = new NiceMock<Network::MockClientConnection>();\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Return(connection1));\n    conn_info1 = cluster_manager_->tcpConnForCluster(\"some_cluster\", nullptr);\n\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Return(connection2));\n    conn_info2 = cluster_manager_->tcpConnForCluster(\"some_cluster\", nullptr);\n  }\n\n  // Order of these calls is implementation dependent, so can't sequence them!\n  EXPECT_CALL(*connection1, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connection2, close(Network::ConnectionCloseType::NoFlush));\n  test_host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker.runCallbacks(test_host, HealthTransition::Changed);\n\n  test_host->healthFlagClear(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  outlier_detector.runCallbacks(test_host);\n  test_host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker.runCallbacks(test_host, HealthTransition::Changed);\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\n// Test that we do not close TCP connection pool connections when there is a host health failure,\n// when not configured to do so.\nTEST_F(ClusterManagerImplTest, DoNotCloseTcpConnectionsOnHealthFailure) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: some_cluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      close_connections_on_host_health_failure: false\n  )EOF\";\n  std::shared_ptr<MockClusterMockPrioritySet> cluster1(new NiceMock<MockClusterMockPrioritySet>());\n  EXPECT_CALL(*cluster1->info_, features()).WillRepeatedly(Return(0));\n  cluster1->info_->name_ = \"some_cluster\";\n  HostSharedPtr test_host = makeTestHost(cluster1->info_, \"tcp://127.0.0.1:80\");\n  cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n\n  MockHealthChecker health_checker;\n  ON_CALL(*cluster1, healthChecker()).WillByDefault(Return(&health_checker));\n\n  Outlier::MockDetector outlier_detector;\n  ON_CALL(*cluster1, outlierDetector()).WillByDefault(Return(&outlier_detector));\n\n  Network::MockClientConnection* connection1 = new NiceMock<Network::MockClientConnection>();\n  Host::CreateConnectionData conn_info1;\n\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _))\n      .WillOnce(Return(std::make_pair(cluster1, nullptr)));\n  EXPECT_CALL(health_checker, addHostCheckCompleteCb(_));\n  EXPECT_CALL(outlier_detector, addChangedStateCb(_));\n  EXPECT_CALL(*cluster1, initialize(_))\n      .WillOnce(Invoke([cluster1](std::function<void()> initialize_callback) {\n        // Test inline init.\n        initialize_callback();\n      }));\n  create(parseBootstrapFromV3Yaml(yaml));\n\n  EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n      .WillOnce(Return(connection1));\n  conn_info1 = cluster_manager_->tcpConnForCluster(\"some_cluster\", nullptr);\n\n  outlier_detector.runCallbacks(test_host);\n  health_checker.runCallbacks(test_host, HealthTransition::Unchanged);\n\n  EXPECT_CALL(*connection1, close(_)).Times(0);\n  test_host->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  outlier_detector.runCallbacks(test_host);\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));\n}\n\nTEST_F(ClusterManagerImplTest, DynamicHostRemove) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      type: STRICT_DNS\n      lb_policy: ROUND_ROBIN\n      dns_resolvers:\n        - socket_address:\n            address: 1.2.3.4\n            port_value: 80\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  )EOF\";\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));\n\n  Network::DnsResolver::ResolveCb dns_callback;\n  Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n  create(parseBootstrapFromV3Yaml(yaml));\n  EXPECT_FALSE(cluster_manager_->get(\"cluster_1\")->info()->addedViaApi());\n\n  // Test for no hosts returning the correct values before we have hosts.\n  EXPECT_EQ(nullptr, cluster_manager_->httpConnPoolForCluster(\n                         \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                             nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnForCluster(\"cluster_1\", nullptr).connection_);\n  EXPECT_EQ(3UL, factory_.stats_.counter(\"cluster.cluster_1.upstream_cx_none_healthy\").value());\n\n  // Set up for an initialize callback.\n  ReadyWatcher initialized;\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n  EXPECT_CALL(initialized, ready());\n\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n\n  // After we are initialized, we should immediately get called back if someone asks for an\n  // initialize callback.\n  EXPECT_CALL(initialized, ready());\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _))\n      .Times(4)\n      .WillRepeatedly(ReturnNew<Http::ConnectionPool::MockInstance>());\n\n  // This should provide us a CP for each of the above hosts.\n  Http::ConnectionPool::MockInstance* cp1 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp2 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp1_high =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::High, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp2_high =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::High, Http::Protocol::Http11, nullptr));\n\n  EXPECT_NE(cp1, cp2);\n  EXPECT_NE(cp1_high, cp2_high);\n  EXPECT_NE(cp1, cp1_high);\n\n  Http::ConnectionPool::Instance::DrainedCb drained_cb;\n  EXPECT_CALL(*cp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb));\n  Http::ConnectionPool::Instance::DrainedCb drained_cb_high;\n  EXPECT_CALL(*cp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb_high));\n\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_))\n      .Times(4)\n      .WillRepeatedly(ReturnNew<Tcp::ConnectionPool::MockInstance>());\n\n  // This should provide us a CP for each of the above hosts.\n  Tcp::ConnectionPool::MockInstance* tcp1 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp2 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp1_high = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::High, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp2_high = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::High, nullptr));\n\n  EXPECT_NE(tcp1, tcp2);\n  EXPECT_NE(tcp1_high, tcp2_high);\n  EXPECT_NE(tcp1, tcp1_high);\n\n  Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb;\n  EXPECT_CALL(*tcp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb));\n  Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_high;\n  EXPECT_CALL(*tcp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb_high));\n\n  // Remove the first host, this should lead to the first cp being drained.\n  dns_timer_->invokeCallback();\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.2\"}));\n  drained_cb();\n  drained_cb = nullptr;\n  tcp_drained_cb();\n  tcp_drained_cb = nullptr;\n  EXPECT_CALL(factory_.tls_.dispatcher_, deferredDelete_(_)).Times(4);\n  drained_cb_high();\n  drained_cb_high = nullptr;\n  tcp_drained_cb_high();\n  tcp_drained_cb_high = nullptr;\n\n  // Make sure we get back the same connection pool for the 2nd host as we did before the change.\n  Http::ConnectionPool::MockInstance* cp3 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp3_high =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::High, Http::Protocol::Http11, nullptr));\n  EXPECT_EQ(cp2, cp3);\n  EXPECT_EQ(cp2_high, cp3_high);\n\n  Tcp::ConnectionPool::MockInstance* tcp3 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp3_high = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::High, nullptr));\n  EXPECT_EQ(tcp2, tcp3);\n  EXPECT_EQ(tcp2_high, tcp3_high);\n\n  // Now add and remove a host that we never have a conn pool to. This should not lead to any\n  // drain callbacks, etc.\n  dns_timer_->invokeCallback();\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.3\"}));\n  factory_.tls_.shutdownThread();\n}\n\nTEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      type: STRICT_DNS\n      dns_resolvers:\n      - socket_address:\n          address: 1.2.3.4\n          port_value: 80\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  )EOF\";\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));\n\n  Network::DnsResolver::ResolveCb dns_callback;\n  Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n  create(parseBootstrapFromV3Yaml(yaml));\n  EXPECT_FALSE(cluster_manager_->get(\"cluster_1\")->info()->addedViaApi());\n\n  NiceMock<MockLoadBalancerContext> example_com_context;\n  ON_CALL(example_com_context, upstreamTransportSocketOptions())\n      .WillByDefault(Return(std::make_shared<Network::TransportSocketOptionsImpl>(\"example.com\")));\n\n  NiceMock<MockLoadBalancerContext> example_com_context_with_san;\n  ON_CALL(example_com_context_with_san, upstreamTransportSocketOptions())\n      .WillByDefault(Return(std::make_shared<Network::TransportSocketOptionsImpl>(\n          \"example.com\", std::vector<std::string>{\"example.com\"})));\n\n  NiceMock<MockLoadBalancerContext> example_com_context_with_san2;\n  ON_CALL(example_com_context_with_san2, upstreamTransportSocketOptions())\n      .WillByDefault(Return(std::make_shared<Network::TransportSocketOptionsImpl>(\n          \"example.com\", std::vector<std::string>{\"example.net\"})));\n\n  NiceMock<MockLoadBalancerContext> ibm_com_context;\n  ON_CALL(ibm_com_context, upstreamTransportSocketOptions())\n      .WillByDefault(Return(std::make_shared<Network::TransportSocketOptionsImpl>(\"ibm.com\")));\n\n  // Test for no hosts returning the correct values before we have hosts.\n  EXPECT_EQ(nullptr, cluster_manager_->httpConnPoolForCluster(\n                         \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                             nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnForCluster(\"cluster_1\", nullptr).connection_);\n\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                             &example_com_context));\n  EXPECT_EQ(nullptr,\n            cluster_manager_->tcpConnForCluster(\"cluster_1\", &ibm_com_context).connection_);\n\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                             &ibm_com_context));\n  EXPECT_EQ(nullptr,\n            cluster_manager_->tcpConnForCluster(\"cluster_1\", &ibm_com_context).connection_);\n\n  EXPECT_EQ(7UL, factory_.stats_.counter(\"cluster.cluster_1.upstream_cx_none_healthy\").value());\n\n  // Set up for an initialize callback.\n  ReadyWatcher initialized;\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n  EXPECT_CALL(initialized, ready());\n\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n\n  // After we are initialized, we should immediately get called back if someone asks for an\n  // initialize callback.\n  EXPECT_CALL(initialized, ready());\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _))\n      .Times(4)\n      .WillRepeatedly(ReturnNew<Http::ConnectionPool::MockInstance>());\n\n  // This should provide us a CP for each of the above hosts.\n  Http::ConnectionPool::MockInstance* cp1 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp2 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp1_high =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::High, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp2_high =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::High, Http::Protocol::Http11, nullptr));\n\n  EXPECT_NE(cp1, cp2);\n  EXPECT_NE(cp1_high, cp2_high);\n  EXPECT_NE(cp1, cp1_high);\n\n  Http::ConnectionPool::Instance::DrainedCb drained_cb;\n  EXPECT_CALL(*cp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb));\n  Http::ConnectionPool::Instance::DrainedCb drained_cb_high;\n  EXPECT_CALL(*cp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb_high));\n\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_))\n      .Times(10)\n      .WillRepeatedly(ReturnNew<Tcp::ConnectionPool::MockInstance>());\n\n  // This should provide us a CP for each of the above hosts, and for different SNIs\n  Tcp::ConnectionPool::MockInstance* tcp1 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp2 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp1_high = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::High, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp2_high = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::High, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* tcp1_example_com =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &example_com_context));\n  Tcp::ConnectionPool::MockInstance* tcp2_example_com =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &example_com_context));\n\n  Tcp::ConnectionPool::MockInstance* tcp1_ibm_com =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &ibm_com_context));\n  Tcp::ConnectionPool::MockInstance* tcp2_ibm_com =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &ibm_com_context));\n\n  EXPECT_NE(tcp1, tcp2);\n  EXPECT_NE(tcp1_high, tcp2_high);\n  EXPECT_NE(tcp1, tcp1_high);\n\n  EXPECT_NE(tcp1_ibm_com, tcp2_ibm_com);\n  EXPECT_NE(tcp1_ibm_com, tcp1);\n  EXPECT_NE(tcp1_ibm_com, tcp2);\n  EXPECT_NE(tcp1_ibm_com, tcp1_high);\n  EXPECT_NE(tcp1_ibm_com, tcp2_high);\n  EXPECT_NE(tcp1_ibm_com, tcp1_example_com);\n  EXPECT_NE(tcp1_ibm_com, tcp2_example_com);\n\n  EXPECT_NE(tcp2_ibm_com, tcp1);\n  EXPECT_NE(tcp2_ibm_com, tcp2);\n  EXPECT_NE(tcp2_ibm_com, tcp1_high);\n  EXPECT_NE(tcp2_ibm_com, tcp2_high);\n  EXPECT_NE(tcp2_ibm_com, tcp1_example_com);\n  EXPECT_NE(tcp2_ibm_com, tcp2_example_com);\n\n  EXPECT_NE(tcp1_example_com, tcp1);\n  EXPECT_NE(tcp1_example_com, tcp2);\n  EXPECT_NE(tcp1_example_com, tcp1_high);\n  EXPECT_NE(tcp1_example_com, tcp2_high);\n  EXPECT_NE(tcp1_example_com, tcp2_example_com);\n\n  EXPECT_NE(tcp2_example_com, tcp1);\n  EXPECT_NE(tcp2_example_com, tcp2);\n  EXPECT_NE(tcp2_example_com, tcp1_high);\n  EXPECT_NE(tcp2_example_com, tcp2_high);\n\n  EXPECT_CALL(factory_.tls_.dispatcher_, deferredDelete_(_)).Times(6);\n\n  Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb;\n  EXPECT_CALL(*tcp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb));\n  Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_high;\n  EXPECT_CALL(*tcp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb_high));\n\n  Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_example_com;\n  EXPECT_CALL(*tcp1_example_com, addDrainedCallback(_))\n      .WillOnce(SaveArg<0>(&tcp_drained_cb_example_com));\n  Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_ibm_com;\n  EXPECT_CALL(*tcp1_ibm_com, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb_ibm_com));\n\n  // Remove the first host, this should lead to the first cp being drained.\n  dns_timer_->invokeCallback();\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.2\"}));\n  drained_cb();\n  drained_cb = nullptr;\n  tcp_drained_cb();\n  tcp_drained_cb = nullptr;\n  drained_cb_high();\n  drained_cb_high = nullptr;\n  tcp_drained_cb_high();\n  tcp_drained_cb_high = nullptr;\n  tcp_drained_cb_example_com();\n  tcp_drained_cb_example_com = nullptr;\n  tcp_drained_cb_ibm_com();\n  tcp_drained_cb_ibm_com = nullptr;\n\n  // Make sure we get back the same connection pool for the 2nd host as we did before the change.\n  Http::ConnectionPool::MockInstance* cp3 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  Http::ConnectionPool::MockInstance* cp3_high =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::High, Http::Protocol::Http11, nullptr));\n  EXPECT_EQ(cp2, cp3);\n  EXPECT_EQ(cp2_high, cp3_high);\n\n  Tcp::ConnectionPool::MockInstance* tcp3 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n  Tcp::ConnectionPool::MockInstance* tcp3_high = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::High, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* tcp3_example_com =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &example_com_context));\n  Tcp::ConnectionPool::MockInstance* tcp3_example_com_with_san =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &example_com_context_with_san));\n  Tcp::ConnectionPool::MockInstance* tcp3_example_com_with_san2 =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &example_com_context_with_san2));\n  Tcp::ConnectionPool::MockInstance* tcp3_ibm_com =\n      dynamic_cast<Tcp::ConnectionPool::MockInstance*>(cluster_manager_->tcpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, &ibm_com_context));\n\n  EXPECT_EQ(tcp2, tcp3);\n  EXPECT_EQ(tcp2_high, tcp3_high);\n\n  EXPECT_EQ(tcp2_example_com, tcp3_example_com);\n  EXPECT_EQ(tcp2_ibm_com, tcp3_ibm_com);\n\n  EXPECT_NE(tcp3_example_com, tcp3_example_com_with_san);\n  EXPECT_NE(tcp3_example_com, tcp3_example_com_with_san2);\n  EXPECT_NE(tcp3_example_com_with_san, tcp3_example_com_with_san2);\n\n  // Now add and remove a host that we never have a conn pool to. This should not lead to any\n  // drain callbacks, etc.\n  dns_timer_->invokeCallback();\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.3\"}));\n  factory_.tls_.shutdownThread();\n}\n\n// Test that default DNS resolver with TCP lookups is used, when there are no DNS custom resolvers\n// configured per cluster and `use_tcp_for_dns_lookups` is set in bootstrap config.\nTEST_F(ClusterManagerImplTest, UseTcpInDefaultDnsResolver) {\n  const std::string yaml = R\"EOF(\n  use_tcp_for_dns_lookups: true\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      type: STRICT_DNS\n  )EOF\";\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  // As custom resolvers are not specified in config, this method should not be called,\n  // resolver from context should be used instead.\n  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).Times(0);\n\n  Network::DnsResolver::ResolveCb dns_callback;\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n  create(parseBootstrapFromV3Yaml(yaml));\n  factory_.tls_.shutdownThread();\n}\n\n// Test that custom DNS resolver with UDP lookups is used, when custom resolver is configured\n// per cluster and `use_tcp_for_dns_lookups` is not specified.\nTEST_F(ClusterManagerImplTest, UseUdpWithCustomDnsResolver) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      type: STRICT_DNS\n      dns_resolvers:\n      - socket_address:\n          address: 1.2.3.4\n          port_value: 80\n  )EOF\";\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  // `false` here stands for using udp\n  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, false)).WillOnce(Return(dns_resolver));\n\n  Network::DnsResolver::ResolveCb dns_callback;\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n  create(parseBootstrapFromV3Yaml(yaml));\n  factory_.tls_.shutdownThread();\n}\n\n// Test that custom DNS resolver with TCP lookups is used, when custom resolver is configured\n// per cluster and `use_tcp_for_dns_lookups` is enabled for that cluster.\nTEST_F(ClusterManagerImplTest, UseTcpWithCustomDnsResolver) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      use_tcp_for_dns_lookups: true\n      connect_timeout: 0.250s\n      type: STRICT_DNS\n      dns_resolvers:\n      - socket_address:\n          address: 1.2.3.4\n          port_value: 80\n  )EOF\";\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  // `true` here stands for using tcp\n  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, true)).WillOnce(Return(dns_resolver));\n\n  Network::DnsResolver::ResolveCb dns_callback;\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n  create(parseBootstrapFromV3Yaml(yaml));\n  factory_.tls_.shutdownThread();\n}\n\n// This is a regression test for a use-after-free in\n// ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(), where a removal at one\n// priority from the ConnPoolsContainer would delete the ConnPoolsContainer mid-iteration over the\n// pool.\nTEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      type: STRICT_DNS\n      dns_resolvers:\n      - socket_address:\n          address: 1.2.3.4\n          port_value: 80\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  )EOF\";\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));\n\n  Network::DnsResolver::ResolveCb dns_callback;\n  Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n  create(parseBootstrapFromV3Yaml(yaml));\n  EXPECT_FALSE(cluster_manager_->get(\"cluster_1\")->info()->addedViaApi());\n\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.2\"}));\n\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _))\n      .WillOnce(ReturnNew<Http::ConnectionPool::MockInstance>());\n\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_))\n      .WillOnce(ReturnNew<Tcp::ConnectionPool::MockInstance>());\n\n  Http::ConnectionPool::MockInstance* cp =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* tcp = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n\n  // Immediate drain, since this can happen with the HTTP codecs.\n  EXPECT_CALL(*cp, addDrainedCallback(_))\n      .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_CALL(*tcp, addDrainedCallback(_))\n      .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  // Remove the first host, this should lead to the cp being drained, without\n  // crash.\n  dns_timer_->invokeCallback();\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({}));\n\n  factory_.tls_.shutdownThread();\n}\n\nclass MockConnPoolWithDestroy : public Http::ConnectionPool::MockInstance {\npublic:\n  ~MockConnPoolWithDestroy() override { onDestroy(); }\n\n  MOCK_METHOD(void, onDestroy, ());\n};\n\nclass MockTcpConnPoolWithDestroy : public Tcp::ConnectionPool::MockInstance {\npublic:\n  ~MockTcpConnPoolWithDestroy() override { onDestroy(); }\n\n  MOCK_METHOD(void, onDestroy, ());\n};\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/3518. Make sure we handle a\n// drain callback during CP destroy.\nTEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      type: STRICT_DNS\n      dns_resolvers:\n      - socket_address:\n          address: 1.2.3.4\n          port_value: 80\n      lb_policy: ROUND_ROBIN\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  )EOF\";\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));\n\n  Network::DnsResolver::ResolveCb dns_callback;\n  Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n  create(parseBootstrapFromV3Yaml(yaml));\n  EXPECT_FALSE(cluster_manager_->get(\"cluster_1\")->info()->addedViaApi());\n\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n               TestUtility::makeDnsResponse({\"127.0.0.2\"}));\n\n  MockConnPoolWithDestroy* mock_cp = new MockConnPoolWithDestroy();\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(mock_cp));\n\n  MockTcpConnPoolWithDestroy* mock_tcp = new MockTcpConnPoolWithDestroy();\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(mock_tcp));\n\n  Http::ConnectionPool::MockInstance* cp =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* tcp = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n\n  // Remove the first host, this should lead to the cp being drained.\n  Http::ConnectionPool::Instance::DrainedCb drained_cb;\n  EXPECT_CALL(*cp, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb));\n  Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb;\n  EXPECT_CALL(*tcp, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb));\n  dns_timer_->invokeCallback();\n  dns_callback(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({}));\n\n  // The drained callback might get called when the CP is being destroyed.\n  EXPECT_CALL(*mock_cp, onDestroy()).WillOnce(Invoke(drained_cb));\n  EXPECT_CALL(*mock_tcp, onDestroy()).WillOnce(Invoke(tcp_drained_cb));\n  factory_.tls_.shutdownThread();\n}\n\nTEST_F(ClusterManagerImplTest, OriginalDstInitialization) {\n  const std::string yaml = R\"EOF(\n  {\n    \"static_resources\": {\n      \"clusters\": [\n        {\n          \"name\": \"cluster_1\",\n          \"connect_timeout\": \"0.250s\",\n          \"type\": \"original_dst\",\n          \"lb_policy\": \"cluster_provided\"\n        }\n      ]\n    }\n  }\n  )EOF\";\n\n  ReadyWatcher initialized;\n  EXPECT_CALL(initialized, ready());\n\n  create(parseBootstrapFromV3Yaml(yaml));\n\n  // Set up for an initialize callback.\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  EXPECT_FALSE(cluster_manager_->get(\"cluster_1\")->info()->addedViaApi());\n\n  // Test for no hosts returning the correct values before we have hosts.\n  EXPECT_EQ(nullptr, cluster_manager_->httpConnPoolForCluster(\n                         \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                             nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnForCluster(\"cluster_1\", nullptr).connection_);\n  EXPECT_EQ(3UL, factory_.stats_.counter(\"cluster.cluster_1.upstream_cx_none_healthy\").value());\n\n  factory_.tls_.shutdownThread();\n}\n\n// Tests that all the HC/weight/metadata changes are delivered in one go, as long as\n// there's no hosts changes in between.\n// Also tests that if hosts are added/removed between mergeable updates, delivery will\n// happen and the scheduled update will be cancelled.\nTEST_F(ClusterManagerImplTest, MergedUpdates) {\n  createWithLocalClusterUpdate();\n\n  // Ensure we see the right set of added/removed hosts on every call.\n  EXPECT_CALL(local_cluster_update_, post(_, _, _))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // 1st removal.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(0, hosts_added.size());\n        EXPECT_EQ(1, hosts_removed.size());\n      }))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // Triggered by the 2 HC updates, it's a merged update so no added/removed\n        // hosts.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(0, hosts_added.size());\n        EXPECT_EQ(0, hosts_removed.size());\n      }))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // 1st removed host added back.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(1, hosts_added.size());\n        EXPECT_EQ(0, hosts_removed.size());\n      }))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // 1st removed host removed again, plus the 3 HC/weight/metadata updates that were\n        // waiting for delivery.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(0, hosts_added.size());\n        EXPECT_EQ(1, hosts_removed.size());\n      }));\n\n  EXPECT_CALL(local_hosts_removed_, post(_))\n      .Times(2)\n      .WillRepeatedly(\n          Invoke([](const auto& hosts_removed) { EXPECT_EQ(1, hosts_removed.size()); }));\n\n  Event::MockTimer* timer = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);\n  Cluster& cluster = cluster_manager_->activeClusters().begin()->second;\n  HostVectorSharedPtr hosts(\n      new HostVector(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()));\n  HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared<HostsPerLocalityImpl>();\n  HostVector hosts_added;\n  HostVector hosts_removed;\n\n  // The first update should be applied immediately, since it's not mergeable.\n  hosts_removed.push_back((*hosts)[0]);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  // These calls should be merged, since there are no added/removed hosts.\n  hosts_removed.clear();\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  // Ensure the merged updates were applied.\n  timer->invokeCallback();\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  // Add the host back, the update should be immediately applied.\n  hosts_removed.clear();\n  hosts_added.push_back((*hosts)[0]);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(2, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  // Now emit 3 updates that should be scheduled: metadata, HC, and weight.\n  hosts_added.clear();\n\n  (*hosts)[0]->metadata(buildMetadata(\"v1\"));\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n\n  (*hosts)[0]->healthFlagSet(Host::HealthFlag::FAILED_EDS_HEALTH);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n\n  (*hosts)[0]->weight(100);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n\n  // Updates not delivered yet.\n  EXPECT_EQ(2, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  // Remove the host again, should cancel the scheduled update and be delivered immediately.\n  hosts_removed.push_back((*hosts)[0]);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n\n  EXPECT_EQ(3, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n}\n\n// Tests that mergeable updates outside of a window get applied immediately.\nTEST_F(ClusterManagerImplTest, MergedUpdatesOutOfWindow) {\n  createWithLocalClusterUpdate();\n\n  // Ensure we see the right set of added/removed hosts on every call.\n  EXPECT_CALL(local_cluster_update_, post(_, _, _))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // HC update, immediately delivered.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(0, hosts_added.size());\n        EXPECT_EQ(0, hosts_removed.size());\n      }));\n\n  Cluster& cluster = cluster_manager_->activeClusters().begin()->second;\n  HostVectorSharedPtr hosts(\n      new HostVector(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()));\n  HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared<HostsPerLocalityImpl>();\n  HostVector hosts_added;\n  HostVector hosts_removed;\n\n  // The first update should be applied immediately, because even though it's mergeable\n  // it's outside the default merge window of 3 seconds (found in debugger as value of\n  // cluster.info()->lbConfig().update_merge_window() in ClusterManagerImpl::scheduleUpdate.\n  time_system_.advanceTimeWait(std::chrono::seconds(60));\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.update_out_of_merge_window\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n}\n\n// Tests that mergeable updates inside of a window are not applied immediately.\nTEST_F(ClusterManagerImplTest, MergedUpdatesInsideWindow) {\n  createWithLocalClusterUpdate();\n\n  Cluster& cluster = cluster_manager_->activeClusters().begin()->second;\n  HostVectorSharedPtr hosts(\n      new HostVector(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()));\n  HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared<HostsPerLocalityImpl>();\n  HostVector hosts_added;\n  HostVector hosts_removed;\n\n  // The first update will not be applied, as we make it inside the default mergeable window of\n  // 3 seconds (found in debugger as value of cluster.info()->lbConfig().update_merge_window()\n  // in ClusterManagerImpl::scheduleUpdate. Note that initially the update-time is\n  // default-initialized to a monotonic time of 0, as is SimulatedTimeSystem::monotonic_time_.\n  time_system_.advanceTimeWait(std::chrono::seconds(2));\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_out_of_merge_window\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n}\n\n// Tests that mergeable updates outside of a window get applied immediately when\n// merging is disabled, and that the counters are correct.\nTEST_F(ClusterManagerImplTest, MergedUpdatesOutOfWindowDisabled) {\n  createWithLocalClusterUpdate(false);\n\n  // Ensure we see the right set of added/removed hosts on every call.\n  EXPECT_CALL(local_cluster_update_, post(_, _, _))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // HC update, immediately delivered.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(0, hosts_added.size());\n        EXPECT_EQ(0, hosts_removed.size());\n      }));\n\n  Cluster& cluster = cluster_manager_->activeClusters().begin()->second;\n  HostVectorSharedPtr hosts(\n      new HostVector(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()));\n  HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared<HostsPerLocalityImpl>();\n  HostVector hosts_added;\n  HostVector hosts_removed;\n\n  // The first update should be applied immediately, because even though it's mergeable\n  // and outside a merge window, merging is disabled.\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_out_of_merge_window\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n}\n\nTEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) {\n  // We create the default cluster, although for this test we won't use it since\n  // we can only update dynamic clusters.\n  createWithLocalClusterUpdate();\n\n  // Ensure we see the right set of added/removed hosts on every call, for the\n  // dynamically added/updated cluster.\n  EXPECT_CALL(local_cluster_update_, post(_, _, _))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // 1st add, when the cluster is added.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(1, hosts_added.size());\n        EXPECT_EQ(0, hosts_removed.size());\n      }))\n      .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed) -> void {\n        // 1st removal.\n        EXPECT_EQ(0, priority);\n        EXPECT_EQ(0, hosts_added.size());\n        EXPECT_EQ(1, hosts_removed.size());\n      }));\n\n  EXPECT_CALL(local_hosts_removed_, post(_)).WillOnce(Invoke([](const auto& hosts_removed) {\n    // 1st removal.\n    EXPECT_EQ(1, hosts_removed.size());\n  }));\n\n  Event::MockTimer* timer = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);\n\n  // We can't used the bootstrap cluster, so add one dynamically.\n  const std::string yaml = R\"EOF(\n  name: new_cluster\n  connect_timeout: 0.250s\n  type: STATIC\n  lb_policy: ROUND_ROBIN\n  load_assignment:\n    cluster_name: new_cluster\n    endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 12001\n  common_lb_config:\n    update_merge_window: 3s\n  )EOF\";\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(yaml), \"version1\"));\n\n  Cluster& cluster = cluster_manager_->activeClusters().find(\"new_cluster\")->second;\n  HostVectorSharedPtr hosts(\n      new HostVector(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()));\n  HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared<HostsPerLocalityImpl>();\n  HostVector hosts_added;\n  HostVector hosts_removed;\n\n  // The first update should be applied immediately, since it's not mergeable.\n  hosts_removed.push_back((*hosts)[0]);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  // These calls should be merged, since there are no added/removed hosts.\n  hosts_removed.clear();\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  cluster.prioritySet().updateHosts(\n      0,\n      updateHostsParams(hosts, hosts_per_locality,\n                        std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n      {}, hosts_added, hosts_removed, absl::nullopt);\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  // Update the cluster, which should cancel the pending updates.\n  std::shared_ptr<MockClusterMockPrioritySet> updated(new NiceMock<MockClusterMockPrioritySet>());\n  updated->info_->name_ = \"new_cluster\";\n  EXPECT_CALL(factory_, clusterFromProto_(_, _, _, true))\n      .WillOnce(Return(std::make_pair(updated, nullptr)));\n\n  const std::string yaml_updated = R\"EOF(\n  name: new_cluster\n  connect_timeout: 0.250s\n  type: STATIC\n  lb_policy: ROUND_ROBIN\n  load_assignment:\n    cluster_name: new_cluster\n    endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 12001\n  common_lb_config:\n    update_merge_window: 4s\n  )EOF\";\n\n  // Add the updated cluster.\n  EXPECT_EQ(2, factory_.stats_\n                   .gauge(\"cluster_manager.active_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_modified\").value());\n  EXPECT_EQ(0, factory_.stats_\n                   .gauge(\"cluster_manager.warming_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n  EXPECT_TRUE(\n      cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(yaml_updated), \"version2\"));\n  EXPECT_EQ(2, factory_.stats_\n                   .gauge(\"cluster_manager.active_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_modified\").value());\n  EXPECT_EQ(1, factory_.stats_\n                   .gauge(\"cluster_manager.warming_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n\n  // Promote the updated cluster from warming to active & assert the old timer was disabled\n  // and it won't be called on version1 of new_cluster.\n  EXPECT_CALL(*timer, disableTimer());\n  updated->initialize_callback_();\n\n  EXPECT_EQ(2, factory_.stats_\n                   .gauge(\"cluster_manager.active_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n  EXPECT_EQ(0, factory_.stats_\n                   .gauge(\"cluster_manager.warming_clusters\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n}\n\nTEST_F(ClusterManagerImplTest, UpstreamSocketOptionsPassedToConnPool) {\n  createWithLocalClusterUpdate();\n  NiceMock<MockLoadBalancerContext> context;\n\n  Http::ConnectionPool::MockInstance* to_create = new Http::ConnectionPool::MockInstance();\n  Network::Socket::OptionsSharedPtr options_to_return =\n      Network::SocketOptionFactory::buildIpTransparentOptions();\n\n  EXPECT_CALL(context, upstreamSocketOptions()).WillOnce(Return(options_to_return));\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(to_create));\n\n  Http::ConnectionPool::Instance* cp = cluster_manager_->httpConnPoolForCluster(\n      \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, &context);\n\n  EXPECT_NE(nullptr, cp);\n}\n\nTEST_F(ClusterManagerImplTest, UpstreamSocketOptionsUsedInConnPoolHash) {\n  createWithLocalClusterUpdate();\n  NiceMock<MockLoadBalancerContext> context1;\n  NiceMock<MockLoadBalancerContext> context2;\n\n  Http::ConnectionPool::MockInstance* to_create1 = new Http::ConnectionPool::MockInstance();\n  Http::ConnectionPool::MockInstance* to_create2 = new Http::ConnectionPool::MockInstance();\n  Network::Socket::OptionsSharedPtr options1 =\n      Network::SocketOptionFactory::buildIpTransparentOptions();\n  Network::Socket::OptionsSharedPtr options2 =\n      Network::SocketOptionFactory::buildSocketMarkOptions(3);\n\n  EXPECT_CALL(context1, upstreamSocketOptions()).WillRepeatedly(Return(options1));\n  EXPECT_CALL(context2, upstreamSocketOptions()).WillRepeatedly(Return(options2));\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(to_create1));\n\n  Http::ConnectionPool::Instance* cp1 = cluster_manager_->httpConnPoolForCluster(\n      \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, &context1);\n\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(to_create2));\n  Http::ConnectionPool::Instance* cp2 = cluster_manager_->httpConnPoolForCluster(\n      \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, &context2);\n\n  Http::ConnectionPool::Instance* should_be_cp1 = cluster_manager_->httpConnPoolForCluster(\n      \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, &context1);\n  Http::ConnectionPool::Instance* should_be_cp2 = cluster_manager_->httpConnPoolForCluster(\n      \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, &context2);\n\n  // The different upstream options should lead to different hashKeys, thus different pools.\n  EXPECT_NE(cp1, cp2);\n\n  // Reusing the same options should lead to the same connection pools.\n  EXPECT_EQ(cp1, should_be_cp1);\n  EXPECT_EQ(cp2, should_be_cp2);\n}\n\nTEST_F(ClusterManagerImplTest, UpstreamSocketOptionsNullIsOkay) {\n  createWithLocalClusterUpdate();\n  NiceMock<MockLoadBalancerContext> context;\n\n  Http::ConnectionPool::MockInstance* to_create = new Http::ConnectionPool::MockInstance();\n  Network::Socket::OptionsSharedPtr options_to_return = nullptr;\n\n  EXPECT_CALL(context, upstreamSocketOptions()).WillOnce(Return(options_to_return));\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(to_create));\n\n  Http::ConnectionPool::Instance* cp = cluster_manager_->httpConnPoolForCluster(\n      \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, &context);\n\n  EXPECT_NE(nullptr, cp);\n}\n\nclass TestUpstreamNetworkFilter : public Network::WriteFilter {\npublic:\n  Network::FilterStatus onWrite(Buffer::Instance&, bool) override {\n    return Network::FilterStatus::Continue;\n  }\n};\n\nclass TestUpstreamNetworkFilterConfigFactory\n    : public Server::Configuration::NamedUpstreamNetworkFilterConfigFactory {\npublic:\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&,\n                               Server::Configuration::CommonFactoryContext&) override {\n    return [](Network::FilterManager& filter_manager) -> void {\n      filter_manager.addWriteFilter(std::make_shared<TestUpstreamNetworkFilter>());\n    };\n  }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return std::make_unique<Envoy::ProtobufWkt::Struct>();\n  }\n  std::string name() const override { return \"envoy.test.filter\"; }\n};\n\n// Verify that configured upstream filters are added to client connections.\nTEST_F(ClusterManagerImplTest, AddUpstreamFilters) {\n  TestUpstreamNetworkFilterConfigFactory factory;\n  Registry::InjectFactory<Server::Configuration::NamedUpstreamNetworkFilterConfigFactory> registry(\n      factory);\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n      filters:\n      - name: envoy.test.filter\n  )EOF\";\n\n  create(parseBootstrapFromV3Yaml(yaml));\n  Network::MockClientConnection* connection = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(*connection, addReadFilter(_)).Times(0);\n  EXPECT_CALL(*connection, addWriteFilter(_)).Times(1);\n  EXPECT_CALL(*connection, addFilter(_)).Times(0);\n  EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n      .WillOnce(Return(connection));\n  auto conn_data = cluster_manager_->tcpConnForCluster(\"cluster_1\", nullptr);\n  EXPECT_EQ(connection, conn_data.connection_.get());\n  factory_.tls_.shutdownThread();\n}\n\nclass ClusterManagerInitHelperTest : public testing::Test {\npublic:\n  MOCK_METHOD(void, onClusterInit, (Cluster & cluster));\n\n  NiceMock<MockClusterManager> cm_;\n  ClusterManagerInitHelper init_helper_{cm_, [this](Cluster& cluster) { onClusterInit(cluster); }};\n};\n\nTEST_F(ClusterManagerInitHelperTest, ImmediateInitialize) {\n  InSequence s;\n\n  NiceMock<MockClusterMockPrioritySet> cluster1;\n  ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(cluster1, initialize(_));\n  init_helper_.addCluster(cluster1);\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster1)));\n  cluster1.initialize_callback_();\n\n  init_helper_.onStaticLoadComplete();\n  init_helper_.startInitializingSecondaryClusters();\n\n  ReadyWatcher cm_initialized;\n  EXPECT_CALL(cm_initialized, ready());\n  init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); });\n}\n\nTEST_F(ClusterManagerInitHelperTest, StaticSdsInitialize) {\n  InSequence s;\n\n  NiceMock<MockClusterMockPrioritySet> sds;\n  ON_CALL(sds, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(sds, initialize(_));\n  init_helper_.addCluster(sds);\n  EXPECT_CALL(*this, onClusterInit(Ref(sds)));\n  sds.initialize_callback_();\n\n  NiceMock<MockClusterMockPrioritySet> cluster1;\n  ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  init_helper_.addCluster(cluster1);\n\n  init_helper_.onStaticLoadComplete();\n\n  EXPECT_CALL(cluster1, initialize(_));\n  init_helper_.startInitializingSecondaryClusters();\n\n  ReadyWatcher cm_initialized;\n  init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); });\n\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster1)));\n  EXPECT_CALL(cm_initialized, ready());\n  cluster1.initialize_callback_();\n}\n\nTEST_F(ClusterManagerInitHelperTest, UpdateAlreadyInitialized) {\n  InSequence s;\n\n  ReadyWatcher primary_clusters_initialized;\n  init_helper_.setPrimaryClustersInitializedCb(\n      [&]() -> void { primary_clusters_initialized.ready(); });\n  ReadyWatcher cm_initialized;\n  init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); });\n\n  NiceMock<MockClusterMockPrioritySet> cluster1;\n  ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(cluster1, initialize(_));\n  init_helper_.addCluster(cluster1);\n\n  NiceMock<MockClusterMockPrioritySet> cluster2;\n  ON_CALL(cluster2, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(cluster2, initialize(_));\n  init_helper_.addCluster(cluster2);\n\n  init_helper_.onStaticLoadComplete();\n\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster1)));\n  cluster1.initialize_callback_();\n  init_helper_.removeCluster(cluster1);\n\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster2)));\n  EXPECT_CALL(primary_clusters_initialized, ready());\n  cluster2.initialize_callback_();\n\n  EXPECT_CALL(cm_initialized, ready());\n  init_helper_.startInitializingSecondaryClusters();\n}\n\n// If secondary clusters initialization triggered outside of CdsApiImpl::onConfigUpdate()'s\n// callback flows, sending ClusterLoadAssignment should not be paused before calling\n// ClusterManagerInitHelper::maybeFinishInitialize(). This case tests that\n// ClusterLoadAssignment request is paused and resumed properly.\nTEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) {\n  InSequence s;\n\n  ReadyWatcher primary_clusters_initialized;\n  init_helper_.setPrimaryClustersInitializedCb(\n      [&]() -> void { primary_clusters_initialized.ready(); });\n  ReadyWatcher cm_initialized;\n  init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); });\n\n  NiceMock<MockClusterMockPrioritySet> cluster1;\n  ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  init_helper_.addCluster(cluster1);\n\n  EXPECT_CALL(primary_clusters_initialized, ready());\n  init_helper_.onStaticLoadComplete();\n  EXPECT_CALL(cluster1, initialize(_));\n  init_helper_.startInitializingSecondaryClusters();\n\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster1)));\n  EXPECT_CALL(cm_initialized, ready());\n  cluster1.initialize_callback_();\n}\n\n// If secondary clusters initialization triggered inside of CdsApiImpl::onConfigUpdate()'s\n// callback flows, that's, the CDS response didn't have any primary cluster, sending\n// ClusterLoadAssignment should be already paused by CdsApiImpl::onConfigUpdate().\n// This case tests that ClusterLoadAssignment request isn't paused again.\nTEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) {\n  InSequence s;\n\n  ReadyWatcher primary_clusters_initialized;\n  init_helper_.setPrimaryClustersInitializedCb(\n      [&]() -> void { primary_clusters_initialized.ready(); });\n  ReadyWatcher cm_initialized;\n  init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); });\n\n  NiceMock<MockClusterMockPrioritySet> cluster1;\n  ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  init_helper_.addCluster(cluster1);\n\n  EXPECT_CALL(primary_clusters_initialized, ready());\n  init_helper_.onStaticLoadComplete();\n\n  EXPECT_CALL(cluster1, initialize(_));\n  init_helper_.startInitializingSecondaryClusters();\n\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster1)));\n  EXPECT_CALL(cm_initialized, ready());\n  cluster1.initialize_callback_();\n}\n\nTEST_F(ClusterManagerInitHelperTest, AddSecondaryAfterSecondaryInit) {\n  InSequence s;\n\n  ReadyWatcher primary_clusters_initialized;\n  init_helper_.setPrimaryClustersInitializedCb(\n      [&]() -> void { primary_clusters_initialized.ready(); });\n  ReadyWatcher cm_initialized;\n  init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); });\n\n  NiceMock<MockClusterMockPrioritySet> cluster1;\n  ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary));\n  EXPECT_CALL(cluster1, initialize(_));\n  init_helper_.addCluster(cluster1);\n\n  NiceMock<MockClusterMockPrioritySet> cluster2;\n  ON_CALL(cluster2, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  init_helper_.addCluster(cluster2);\n\n  init_helper_.onStaticLoadComplete();\n\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster1)));\n  EXPECT_CALL(primary_clusters_initialized, ready());\n  EXPECT_CALL(cluster2, initialize(_));\n  cluster1.initialize_callback_();\n  init_helper_.startInitializingSecondaryClusters();\n\n  NiceMock<MockClusterMockPrioritySet> cluster3;\n  ON_CALL(cluster3, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  EXPECT_CALL(cluster3, initialize(_));\n  init_helper_.addCluster(cluster3);\n\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster3)));\n  cluster3.initialize_callback_();\n  EXPECT_CALL(*this, onClusterInit(Ref(cluster2)));\n  EXPECT_CALL(cm_initialized, ready());\n  cluster2.initialize_callback_();\n}\n\n// Tests the scenario encountered in Issue 903: The cluster was removed from\n// the secondary init list while traversing the list.\nTEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) {\n  InSequence s;\n  NiceMock<MockClusterMockPrioritySet> cluster;\n  ON_CALL(cluster, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary));\n  init_helper_.addCluster(cluster);\n\n  // onStaticLoadComplete() must not initialize secondary clusters\n  init_helper_.onStaticLoadComplete();\n\n  // Set up the scenario seen in Issue 903 where initialize() ultimately results\n  // in the removeCluster() call. In the real bug this was a long and complex call\n  // chain.\n  EXPECT_CALL(cluster, initialize(_)).WillOnce(Invoke([&](std::function<void()>) -> void {\n    init_helper_.removeCluster(cluster);\n  }));\n\n  // Now call initializeSecondaryClusters which will exercise maybeFinishInitialize()\n  // which calls initialize() on the members of the secondary init list.\n  init_helper_.startInitializingSecondaryClusters();\n}\n\nusing NameVals = std::vector<std::pair<Network::SocketOptionName, int>>;\n\n// Validate that when options are set in the ClusterManager and/or Cluster, we see the socket option\n// propagated to setsockopt(). This is as close to an end-to-end test as we have for this feature,\n// due to the complexity of creating an integration test involving the network stack. We only test\n// the IPv4 case here, as the logic around IPv4/IPv6 handling is tested generically in\n// socket_option_impl_test.cc.\nclass SockoptsTest : public ClusterManagerImplTest {\npublic:\n  void initialize(const std::string& yaml) { create(parseBootstrapFromV3Yaml(yaml)); }\n\n  void TearDown() override { factory_.tls_.shutdownThread(); }\n\n  // TODO(tschroed): Extend this to support socket state as well.\n  void expectSetsockopts(const NameVals& names_vals) {\n    NiceMock<Api::MockOsSysCalls> os_sys_calls;\n    TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n    NiceMock<Network::MockConnectionSocket> socket;\n    bool expect_success = true;\n    for (const auto& name_val : names_vals) {\n      if (!name_val.first.hasValue()) {\n        expect_success = false;\n        continue;\n      }\n      EXPECT_CALL(socket,\n                  setSocketOption(name_val.first.level(), name_val.first.option(), _, sizeof(int)))\n          .WillOnce(\n              Invoke([&name_val](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n                EXPECT_EQ(name_val.second, *static_cast<const int*>(optval));\n                return {0, 0};\n              }));\n    }\n    EXPECT_CALL(socket, ipVersion())\n        .WillRepeatedly(testing::Return(Network::Address::IpVersion::v4));\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Invoke([this, &names_vals, expect_success, &socket](\n                             Network::Address::InstanceConstSharedPtr,\n                             Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&,\n                             const Network::ConnectionSocket::OptionsSharedPtr& options)\n                             -> Network::ClientConnection* {\n          EXPECT_NE(nullptr, options.get()) << \"Unexpected null options\";\n          if (options.get() != nullptr) { // Don't crash the entire test.\n            EXPECT_EQ(names_vals.size(), options->size());\n          }\n          if (expect_success) {\n            EXPECT_TRUE((Network::Socket::applyOptions(\n                options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n          } else {\n            EXPECT_FALSE((Network::Socket::applyOptions(\n                options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n          }\n          return connection_;\n        }));\n    cluster_manager_->tcpConnForCluster(\"SockoptsCluster\", nullptr);\n  }\n\n  void expectSetsockoptFreebind() {\n    NameVals names_vals{{ENVOY_SOCKET_IP_FREEBIND, 1}};\n    if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n      names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1));\n    }\n    expectSetsockopts(names_vals);\n  }\n\n  void expectOnlyNoSigpipeOptions() {\n    NameVals names_vals{{std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)}};\n    expectSetsockopts(names_vals);\n  }\n\n  void expectNoSocketOptions() {\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(\n            Invoke([this](Network::Address::InstanceConstSharedPtr,\n                          Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&,\n                          const Network::ConnectionSocket::OptionsSharedPtr& options)\n                       -> Network::ClientConnection* {\n              EXPECT_EQ(nullptr, options.get());\n              return connection_;\n            }));\n    auto conn_data = cluster_manager_->tcpConnForCluster(\"SockoptsCluster\", nullptr);\n    EXPECT_EQ(connection_, conn_data.connection_.get());\n  }\n\n  Network::MockClientConnection* connection_ = new NiceMock<Network::MockClientConnection>();\n};\n\nTEST_F(SockoptsTest, SockoptsUnset) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: SockoptsCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: SockoptsCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  )EOF\";\n  initialize(yaml);\n  if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n    expectOnlyNoSigpipeOptions();\n  } else {\n    expectNoSocketOptions();\n  }\n}\n\nTEST_F(SockoptsTest, FreebindClusterOnly) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: SockoptsCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: SockoptsCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n      upstream_bind_config:\n        freebind: true\n  )EOF\";\n  initialize(yaml);\n  expectSetsockoptFreebind();\n}\n\nTEST_F(SockoptsTest, FreebindClusterManagerOnly) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: SockoptsCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: SockoptsCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  cluster_manager:\n    upstream_bind_config:\n      freebind: true\n  )EOF\";\n  initialize(yaml);\n  expectSetsockoptFreebind();\n}\n\nTEST_F(SockoptsTest, FreebindClusterOverride) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: SockoptsCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: SockoptsCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n      upstream_bind_config:\n        freebind: true\n  cluster_manager:\n    upstream_bind_config:\n      freebind: false\n  )EOF\";\n  initialize(yaml);\n  expectSetsockoptFreebind();\n}\n\nTEST_F(SockoptsTest, SockoptsClusterOnly) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: SockoptsCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: SockoptsCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n      upstream_bind_config:\n        socket_options: [\n          { level: 1, name: 2, int_value: 3, state: STATE_PREBIND },\n          { level: 4, name: 5, int_value: 6, state: STATE_PREBIND }]\n\n  )EOF\";\n  initialize(yaml);\n  NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3},\n                      {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}};\n  if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n    names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1));\n  }\n  expectSetsockopts(names_vals);\n}\n\nTEST_F(SockoptsTest, SockoptsClusterManagerOnly) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: SockoptsCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: SockoptsCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  cluster_manager:\n    upstream_bind_config:\n      socket_options: [\n        { level: 1, name: 2, int_value: 3, state: STATE_PREBIND },\n        { level: 4, name: 5, int_value: 6, state: STATE_PREBIND }]\n  )EOF\";\n  initialize(yaml);\n  NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3},\n                      {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}};\n  if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n    names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1));\n  }\n  expectSetsockopts(names_vals);\n}\n\nTEST_F(SockoptsTest, SockoptsClusterOverride) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: SockoptsCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: SockoptsCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n      upstream_bind_config:\n        socket_options: [\n          { level: 1, name: 2, int_value: 3, state: STATE_PREBIND },\n          { level: 4, name: 5, int_value: 6, state: STATE_PREBIND }]\n  cluster_manager:\n    upstream_bind_config:\n      socket_options: [{ level: 7, name: 8, int_value: 9, state: STATE_PREBIND }]\n  )EOF\";\n  initialize(yaml);\n  NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3},\n                      {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}};\n  if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n    names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1));\n  }\n  expectSetsockopts(names_vals);\n}\n\n// Validate that when tcp keepalives are set in the Cluster, we see the socket\n// option propagated to setsockopt(). This is as close to an end-to-end test as we have for this\n// feature, due to the complexity of creating an integration test involving the network stack. We\n// only test the IPv4 case here, as the logic around IPv4/IPv6 handling is tested generically in\n// tcp_keepalive_option_impl_test.cc.\nclass TcpKeepaliveTest : public ClusterManagerImplTest {\npublic:\n  void initialize(const std::string& yaml) { create(parseBootstrapFromV3Yaml(yaml)); }\n\n  void TearDown() override { factory_.tls_.shutdownThread(); }\n\n  void expectSetsockoptSoKeepalive(absl::optional<int> keepalive_probes,\n                                   absl::optional<int> keepalive_time,\n                                   absl::optional<int> keepalive_interval) {\n    if (!ENVOY_SOCKET_SO_KEEPALIVE.hasValue()) {\n      EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n          .WillOnce(\n              Invoke([this](Network::Address::InstanceConstSharedPtr,\n                            Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&,\n                            const Network::ConnectionSocket::OptionsSharedPtr& options)\n                         -> Network::ClientConnection* {\n                EXPECT_NE(nullptr, options.get());\n                EXPECT_EQ(1, options->size());\n                NiceMock<Network::MockConnectionSocket> socket;\n                EXPECT_FALSE((Network::Socket::applyOptions(\n                    options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n                return connection_;\n              }));\n      cluster_manager_->tcpConnForCluster(\"TcpKeepaliveCluster\", nullptr);\n      return;\n    }\n    NiceMock<Api::MockOsSysCalls> os_sys_calls;\n    TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n    NiceMock<Network::MockConnectionSocket> socket;\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Invoke([this, &socket](Network::Address::InstanceConstSharedPtr,\n                                         Network::Address::InstanceConstSharedPtr,\n                                         Network::TransportSocketPtr&,\n                                         const Network::ConnectionSocket::OptionsSharedPtr& options)\n                             -> Network::ClientConnection* {\n          EXPECT_NE(nullptr, options.get());\n          EXPECT_TRUE((Network::Socket::applyOptions(\n              options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n          return connection_;\n        }));\n    if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n      EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_NOSIGPIPE.level(),\n                                          ENVOY_SOCKET_SO_NOSIGPIPE.option(), _, sizeof(int)))\n          .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n            EXPECT_EQ(1, *static_cast<const int*>(optval));\n            return {0, 0};\n          }));\n    }\n    EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_KEEPALIVE.level(),\n                                        ENVOY_SOCKET_SO_KEEPALIVE.option(), _, sizeof(int)))\n        .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n          EXPECT_EQ(1, *static_cast<const int*>(optval));\n          return {0, 0};\n        }));\n    if (keepalive_probes.has_value()) {\n      EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPCNT.level(),\n                                          ENVOY_SOCKET_TCP_KEEPCNT.option(), _, sizeof(int)))\n          .WillOnce(Invoke([&keepalive_probes](int, int, const void* optval,\n                                               socklen_t) -> Api::SysCallIntResult {\n            EXPECT_EQ(keepalive_probes.value(), *static_cast<const int*>(optval));\n            return {0, 0};\n          }));\n    }\n    if (keepalive_time.has_value()) {\n      EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPIDLE.level(),\n                                          ENVOY_SOCKET_TCP_KEEPIDLE.option(), _, sizeof(int)))\n          .WillOnce(Invoke(\n              [&keepalive_time](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n                EXPECT_EQ(keepalive_time.value(), *static_cast<const int*>(optval));\n                return {0, 0};\n              }));\n    }\n    if (keepalive_interval.has_value()) {\n      EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPINTVL.level(),\n                                          ENVOY_SOCKET_TCP_KEEPINTVL.option(), _, sizeof(int)))\n          .WillOnce(Invoke([&keepalive_interval](int, int, const void* optval,\n                                                 socklen_t) -> Api::SysCallIntResult {\n            EXPECT_EQ(keepalive_interval.value(), *static_cast<const int*>(optval));\n            return {0, 0};\n          }));\n    }\n    auto conn_data = cluster_manager_->tcpConnForCluster(\"TcpKeepaliveCluster\", nullptr);\n    EXPECT_EQ(connection_, conn_data.connection_.get());\n  }\n\n  void expectOnlyNoSigpipeOptions() {\n    NiceMock<Network::MockConnectionSocket> socket;\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(Invoke([this, &socket](Network::Address::InstanceConstSharedPtr,\n                                         Network::Address::InstanceConstSharedPtr,\n                                         Network::TransportSocketPtr&,\n                                         const Network::ConnectionSocket::OptionsSharedPtr& options)\n                             -> Network::ClientConnection* {\n          EXPECT_NE(nullptr, options.get());\n          EXPECT_TRUE((Network::Socket::applyOptions(\n              options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND)));\n          return connection_;\n        }));\n    EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_NOSIGPIPE.level(),\n                                        ENVOY_SOCKET_SO_NOSIGPIPE.option(), _, sizeof(int)))\n        .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n          EXPECT_EQ(1, *static_cast<const int*>(optval));\n          return {0, 0};\n        }));\n    auto conn_data = cluster_manager_->tcpConnForCluster(\"TcpKeepaliveCluster\", nullptr);\n    EXPECT_EQ(connection_, conn_data.connection_.get());\n  }\n\n  void expectNoSocketOptions() {\n    EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _))\n        .WillOnce(\n            Invoke([this](Network::Address::InstanceConstSharedPtr,\n                          Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&,\n                          const Network::ConnectionSocket::OptionsSharedPtr& options)\n                       -> Network::ClientConnection* {\n              EXPECT_EQ(nullptr, options.get());\n              return connection_;\n            }));\n    auto conn_data = cluster_manager_->tcpConnForCluster(\"TcpKeepaliveCluster\", nullptr);\n    EXPECT_EQ(connection_, conn_data.connection_.get());\n  }\n\n  Network::MockClientConnection* connection_ = new NiceMock<Network::MockClientConnection>();\n};\n\nTEST_F(TcpKeepaliveTest, TcpKeepaliveUnset) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: TcpKeepaliveCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: TcpKeepaliveCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n  )EOF\";\n  initialize(yaml);\n  if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) {\n    expectOnlyNoSigpipeOptions();\n  } else {\n    expectNoSocketOptions();\n  }\n}\n\nTEST_F(TcpKeepaliveTest, TcpKeepaliveCluster) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: TcpKeepaliveCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: TcpKeepaliveCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n      upstream_connection_options:\n        tcp_keepalive: {}\n  )EOF\";\n  initialize(yaml);\n  expectSetsockoptSoKeepalive({}, {}, {});\n}\n\nTEST_F(TcpKeepaliveTest, TcpKeepaliveClusterProbes) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: TcpKeepaliveCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: TcpKeepaliveCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n      upstream_connection_options:\n        tcp_keepalive:\n          keepalive_probes: 7\n  )EOF\";\n  initialize(yaml);\n  expectSetsockoptSoKeepalive(7, {}, {});\n}\n\nTEST_F(TcpKeepaliveTest, TcpKeepaliveWithAllOptions) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: TcpKeepaliveCluster\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      load_assignment:\n        cluster_name: TcpKeepaliveCluster\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 11001\n      upstream_connection_options:\n        tcp_keepalive:\n          keepalive_probes: 7\n          keepalive_time: 4\n          keepalive_interval: 1\n  )EOF\";\n  initialize(yaml);\n  expectSetsockoptSoKeepalive(7, 4, 1);\n}\n\nTEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      common_lb_config:\n        close_connections_on_host_set_change: true\n  )EOF\";\n\n  ReadyWatcher initialized;\n  EXPECT_CALL(initialized, ready());\n\n  create(parseBootstrapFromV3Yaml(yaml));\n\n  // Set up for an initialize callback.\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  std::unique_ptr<MockClusterUpdateCallbacks> callbacks(new NiceMock<MockClusterUpdateCallbacks>());\n  ClusterUpdateCallbacksHandlePtr cb =\n      cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks);\n\n  EXPECT_FALSE(cluster_manager_->get(\"cluster_1\")->info()->addedViaApi());\n\n  // Verify that we get no hosts when the HostSet is empty.\n  EXPECT_EQ(nullptr, cluster_manager_->httpConnPoolForCluster(\n                         \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                             nullptr));\n  EXPECT_EQ(nullptr, cluster_manager_->tcpConnForCluster(\"cluster_1\", nullptr).connection_);\n\n  Cluster& cluster = cluster_manager_->activeClusters().begin()->second;\n\n  // Set up the HostSet.\n  HostSharedPtr host1 = makeTestHost(cluster.info(), \"tcp://127.0.0.1:80\");\n  HostSharedPtr host2 = makeTestHost(cluster.info(), \"tcp://127.0.0.1:81\");\n\n  HostVector hosts{host1, host2};\n  auto hosts_ptr = std::make_shared<HostVector>(hosts);\n\n  // Sending non-mergeable updates.\n  cluster.prioritySet().updateHosts(\n      0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, {},\n      100);\n\n  EXPECT_EQ(1, factory_.stats_.counter(\"cluster_manager.cluster_updated\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.cluster_updated_via_merge\").value());\n  EXPECT_EQ(0, factory_.stats_.counter(\"cluster_manager.update_merge_cancelled\").value());\n\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _))\n      .Times(3)\n      .WillRepeatedly(ReturnNew<Http::ConnectionPool::MockInstance>());\n\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_))\n      .Times(3)\n      .WillRepeatedly(ReturnNew<Tcp::ConnectionPool::MockInstance>());\n\n  // This should provide us a CP for each of the above hosts.\n  Http::ConnectionPool::MockInstance* cp1 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n  // Create persistent connection for host2.\n  Http::ConnectionPool::MockInstance* cp2 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http2, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* tcp1 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* tcp2 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n\n  EXPECT_NE(cp1, cp2);\n  EXPECT_NE(tcp1, tcp2);\n\n  EXPECT_CALL(*cp2, addDrainedCallback(_))\n      .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_CALL(*cp1, addDrainedCallback(_))\n      .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_CALL(*tcp1, addDrainedCallback(_))\n      .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_CALL(*tcp2, addDrainedCallback(_))\n      .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  HostVector hosts_removed;\n  hosts_removed.push_back(host2);\n\n  // This update should drain all connection pools (host1, host2).\n  cluster.prioritySet().updateHosts(\n      0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, {},\n      hosts_removed, 100);\n\n  // Recreate connection pool for host1.\n  cp1 = dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n      \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n\n  tcp1 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n\n  HostSharedPtr host3 = makeTestHost(cluster.info(), \"tcp://127.0.0.1:82\");\n\n  HostVector hosts_added;\n  hosts_added.push_back(host3);\n\n  EXPECT_CALL(*cp1, addDrainedCallback(_))\n      .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_CALL(*tcp1, addDrainedCallback(_))\n      .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  // Adding host3 should drain connection pool for host1.\n  cluster.prioritySet().updateHosts(\n      0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr,\n      hosts_added, {}, 100);\n}\n\nTEST_F(ClusterManagerImplTest, ConnPoolsNotDrainedOnHostSetChange) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n  )EOF\";\n\n  ReadyWatcher initialized;\n  EXPECT_CALL(initialized, ready());\n  create(parseBootstrapFromV3Yaml(yaml));\n\n  // Set up for an initialize callback.\n  cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n  std::unique_ptr<MockClusterUpdateCallbacks> callbacks(new NiceMock<MockClusterUpdateCallbacks>());\n  ClusterUpdateCallbacksHandlePtr cb =\n      cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks);\n\n  Cluster& cluster = cluster_manager_->activeClusters().begin()->second;\n\n  // Set up the HostSet.\n  HostSharedPtr host1 = makeTestHost(cluster.info(), \"tcp://127.0.0.1:80\");\n\n  HostVector hosts{host1};\n  auto hosts_ptr = std::make_shared<HostVector>(hosts);\n\n  // Sending non-mergeable updates.\n  cluster.prioritySet().updateHosts(\n      0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, {},\n      100);\n\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _))\n      .Times(1)\n      .WillRepeatedly(ReturnNew<Http::ConnectionPool::MockInstance>());\n\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_))\n      .Times(1)\n      .WillRepeatedly(ReturnNew<Tcp::ConnectionPool::MockInstance>());\n\n  // This should provide us a CP for each of the above hosts.\n  Http::ConnectionPool::MockInstance* cp1 =\n      dynamic_cast<Http::ConnectionPool::MockInstance*>(cluster_manager_->httpConnPoolForCluster(\n          \"cluster_1\", ResourcePriority::Default, Http::Protocol::Http11, nullptr));\n\n  Tcp::ConnectionPool::MockInstance* tcp1 = dynamic_cast<Tcp::ConnectionPool::MockInstance*>(\n      cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr));\n\n  HostSharedPtr host2 = makeTestHost(cluster.info(), \"tcp://127.0.0.1:82\");\n  HostVector hosts_added;\n  hosts_added.push_back(host2);\n\n  // No connection pools should be drained.\n  EXPECT_CALL(*cp1, drainConnections()).Times(0);\n  EXPECT_CALL(*tcp1, drainConnections()).Times(0);\n\n  // No connection pools should be drained.\n  cluster.prioritySet().updateHosts(\n      0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr,\n      hosts_added, {}, 100);\n}\n\nTEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameStatic) {\n  std::string yaml = R\"EOF(\nstatic_resources:\n  clusters:\n  - name: new_cluster\n    connect_timeout: 4s\n    type: STATIC\n    load_assignment:\n      cluster_name: \"domains\"\n      endpoints:\n        - priority: 10\n          lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.2\n                  port_value: 11001\ncluster_manager:\n  local_cluster_name: new_cluster\n)EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n                            \"Unexpected non-zero priority for local cluster 'new_cluster'.\");\n}\n\nTEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameStrictDns) {\n  std::string yaml = R\"EOF(\nstatic_resources:\n  clusters:\n  - name: new_cluster\n    connect_timeout: 4s\n    type: STRICT_DNS\n    load_assignment:\n      cluster_name: \"domains\"\n      endpoints:\n        - priority: 10\n          lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.2\n                  port_value: 11001\ncluster_manager:\n  local_cluster_name: new_cluster\n)EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException,\n                            \"Unexpected non-zero priority for local cluster 'new_cluster'.\");\n}\n\nTEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameLogicalDns) {\n  std::string yaml = R\"EOF(\nstatic_resources:\n  clusters:\n  - name: new_cluster\n    connect_timeout: 4s\n    type: LOGICAL_DNS\n    load_assignment:\n      cluster_name: \"domains\"\n      endpoints:\n        - priority: 10\n          lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.2\n                  port_value: 11001\ncluster_manager:\n  local_cluster_name: new_cluster\n)EOF\";\n\n  // The priority for LOGICAL_DNS endpoints are written, so we just verify that there is only a\n  // single priority even if the endpoint was configured to be priority 10.\n  create(parseBootstrapFromV3Yaml(yaml));\n  const auto cluster = cluster_manager_->get(\"new_cluster\");\n  EXPECT_EQ(1, cluster->prioritySet().hostSetsPerPriority().size());\n}\n\nTEST_F(ClusterManagerImplTest, ConnectionPoolPerDownstreamConnection) {\n  const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n      connection_pool_per_downstream_connection: true\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 11001\n  )EOF\";\n  create(parseBootstrapFromV3Yaml(yaml));\n  NiceMock<MockLoadBalancerContext> lb_context;\n  NiceMock<Network::MockConnection> downstream_connection;\n  Network::Socket::OptionsSharedPtr options_to_return = nullptr;\n  ON_CALL(lb_context, downstreamConnection()).WillByDefault(Return(&downstream_connection));\n  ON_CALL(downstream_connection, socketOptions()).WillByDefault(ReturnRef(options_to_return));\n\n  std::vector<Http::ConnectionPool::MockInstance*> conn_pool_vector;\n  for (size_t i = 0; i < 3; ++i) {\n    conn_pool_vector.push_back(new Http::ConnectionPool::MockInstance());\n    EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(conn_pool_vector.back()));\n    EXPECT_CALL(downstream_connection, hashKey)\n        .WillOnce(Invoke([i](std::vector<uint8_t>& hash_key) { hash_key.push_back(i); }));\n    EXPECT_EQ(conn_pool_vector.back(),\n              cluster_manager_->httpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                       Http::Protocol::Http11, &lb_context));\n  }\n\n  // Check that the first entry is still in the pool map\n  EXPECT_CALL(downstream_connection, hashKey).WillOnce(Invoke([](std::vector<uint8_t>& hash_key) {\n    hash_key.push_back(0);\n  }));\n  EXPECT_EQ(conn_pool_vector.front(),\n            cluster_manager_->httpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                                     Http::Protocol::Http11, &lb_context));\n}\n\nclass PrefetchTest : public ClusterManagerImplTest {\npublic:\n  void initialize(float ratio) {\n    const std::string yaml = R\"EOF(\n  static_resources:\n    clusters:\n    - name: cluster_1\n      connect_timeout: 0.250s\n      lb_policy: ROUND_ROBIN\n      type: STATIC\n  )EOF\";\n\n    ReadyWatcher initialized;\n    EXPECT_CALL(initialized, ready());\n    envoy::config::bootstrap::v3::Bootstrap config = parseBootstrapFromV3Yaml(yaml);\n    if (ratio != 0) {\n      config.mutable_static_resources()\n          ->mutable_clusters(0)\n          ->mutable_prefetch_policy()\n          ->mutable_predictive_prefetch_ratio()\n          ->set_value(ratio);\n    }\n    create(config);\n\n    // Set up for an initialize callback.\n    cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); });\n\n    std::unique_ptr<MockClusterUpdateCallbacks> callbacks(\n        new NiceMock<MockClusterUpdateCallbacks>());\n    ClusterUpdateCallbacksHandlePtr cb =\n        cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks);\n\n    cluster_ = &cluster_manager_->activeClusters().begin()->second.get();\n\n    // Set up the HostSet.\n    host1_ = makeTestHost(cluster_->info(), \"tcp://127.0.0.1:80\");\n    host2_ = makeTestHost(cluster_->info(), \"tcp://127.0.0.1:80\");\n\n    HostVector hosts{host1_, host2_};\n    auto hosts_ptr = std::make_shared<HostVector>(hosts);\n\n    // Sending non-mergeable updates.\n    cluster_->prioritySet().updateHosts(\n        0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts,\n        {}, 100);\n  }\n\n  Cluster* cluster_{};\n  HostSharedPtr host1_;\n  HostSharedPtr host2_;\n};\n\nTEST_F(PrefetchTest, PrefetchOff) {\n  // With prefetch set to 0, each request for a connection pool will only\n  // allocate that conn pool.\n  initialize(0);\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _))\n      .Times(1)\n      .WillRepeatedly(ReturnNew<Http::ConnectionPool::MockInstance>());\n  cluster_manager_->httpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                           Http::Protocol::Http11, nullptr);\n\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_))\n      .Times(1)\n      .WillRepeatedly(ReturnNew<Tcp::ConnectionPool::MockInstance>());\n  cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr);\n}\n\nTEST_F(PrefetchTest, PrefetchOn) {\n  // With prefetch set to 1.1, each request for a connection pool will kick off\n  // prefetching, so create the pool for both the current connection and the\n  // anticipated one.\n  initialize(1.1);\n  EXPECT_CALL(factory_, allocateConnPool_(_, _, _))\n      .Times(2)\n      .WillRepeatedly(ReturnNew<NiceMock<Http::ConnectionPool::MockInstance>>());\n  cluster_manager_->httpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default,\n                                           Http::Protocol::Http11, nullptr);\n\n  EXPECT_CALL(factory_, allocateTcpConnPool_(_))\n      .Times(2)\n      .WillRepeatedly(ReturnNew<NiceMock<Tcp::ConnectionPool::MockInstance>>());\n  cluster_manager_->tcpConnPoolForCluster(\"cluster_1\", ResourcePriority::Default, nullptr);\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/cluster_update_tracker_test.cc",
    "content": "#include \"common/upstream/cluster_update_tracker.h\"\n\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass ClusterUpdateTrackerTest : public testing::Test {\npublic:\n  ClusterUpdateTrackerTest() {\n    expected_.cluster_.info_->name_ = cluster_name_;\n    irrelevant_.cluster_.info_->name_ = \"unrelated_cluster\";\n  }\n\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<Upstream::MockThreadLocalCluster> expected_;\n  NiceMock<Upstream::MockThreadLocalCluster> irrelevant_;\n  const std::string cluster_name_{\"fake_cluster\"};\n};\n\nTEST_F(ClusterUpdateTrackerTest, ClusterDoesNotExistAtConstructionTime) {\n  EXPECT_CALL(cm_, get(cluster_name_)).WillOnce(Return(nullptr));\n\n  ClusterUpdateTracker cluster_tracker(cm_, cluster_name_);\n\n  EXPECT_FALSE(cluster_tracker.exists());\n  EXPECT_EQ(cluster_tracker.info(), nullptr);\n}\n\nTEST_F(ClusterUpdateTrackerTest, ClusterDoesExistAtConstructionTime) {\n  EXPECT_CALL(cm_, get(cluster_name_)).WillOnce(Return(&expected_));\n\n  ClusterUpdateTracker cluster_tracker(cm_, cluster_name_);\n\n  EXPECT_TRUE(cluster_tracker.exists());\n  EXPECT_EQ(cluster_tracker.info(), expected_.cluster_.info_);\n}\n\nTEST_F(ClusterUpdateTrackerTest, ShouldProperlyHandleUpdateCallbacks) {\n  EXPECT_CALL(cm_, get(cluster_name_)).WillOnce(Return(nullptr));\n\n  ClusterUpdateTracker cluster_tracker(cm_, cluster_name_);\n\n  {\n    EXPECT_FALSE(cluster_tracker.exists());\n    EXPECT_EQ(cluster_tracker.info(), nullptr);\n  }\n\n  {\n    // Simulate addition of an irrelevant cluster.\n    cluster_tracker.onClusterAddOrUpdate(irrelevant_);\n\n    EXPECT_FALSE(cluster_tracker.exists());\n    EXPECT_EQ(cluster_tracker.info(), nullptr);\n  }\n\n  {\n    // Simulate addition of the relevant cluster.\n    cluster_tracker.onClusterAddOrUpdate(expected_);\n\n    EXPECT_TRUE(cluster_tracker.exists());\n    EXPECT_EQ(cluster_tracker.info(), expected_.cluster_.info_);\n  }\n\n  {\n    // Simulate removal of an irrelevant cluster.\n    cluster_tracker.onClusterRemoval(irrelevant_.cluster_.info_->name_);\n\n    EXPECT_TRUE(cluster_tracker.exists());\n    EXPECT_EQ(cluster_tracker.info(), expected_.cluster_.info_);\n  }\n\n  {\n    // Simulate removal of the relevant cluster.\n    cluster_tracker.onClusterRemoval(cluster_name_);\n\n    EXPECT_FALSE(cluster_tracker.exists());\n    EXPECT_EQ(cluster_tracker.info(), nullptr);\n  }\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/conn_pool_map_impl_test.cc",
    "content": "#include <memory>\n#include <vector>\n\n#include \"envoy/http/conn_pool.h\"\n\n#include \"common/upstream/conn_pool_map_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/conn_pool.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AtLeast;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass ConnPoolMapImplTest : public testing::Test {\npublic:\n  // Note, we could test with Http::ConnectionPool::MockInstance here, which would simplify the\n  // test. However, it's nice to test against an actual interface we'll be using.\n  using TestMap = ConnPoolMap<int, Http::ConnectionPool::Instance>;\n  using TestMapPtr = std::unique_ptr<TestMap>;\n\n  TestMapPtr makeTestMap() {\n    return std::make_unique<TestMap>(dispatcher_, host_, ResourcePriority::Default);\n  }\n\n  TestMapPtr makeTestMapWithLimit(uint64_t limit) {\n    return makeTestMapWithLimitAtPriority(limit, ResourcePriority::Default);\n  }\n\n  TestMapPtr makeTestMapWithLimitAtPriority(uint64_t limit, ResourcePriority priority) {\n    host_->cluster_.resetResourceManager(1024, 1024, 1024, 1024, limit);\n    return std::make_unique<TestMap>(dispatcher_, host_, priority);\n  }\n\n  TestMap::PoolFactory getBasicFactory() {\n    return [&]() {\n      auto pool = std::make_unique<NiceMock<Http::ConnectionPool::MockInstance>>();\n      ON_CALL(*pool, hasActiveConnections).WillByDefault(Return(false));\n      mock_pools_.push_back(pool.get());\n      return pool;\n    };\n  }\n\n  // Returns a pool which claims it has active connections.\n  TestMap::PoolFactory getActivePoolFactory() {\n    return [&]() {\n      auto pool = std::make_unique<NiceMock<Http::ConnectionPool::MockInstance>>();\n      ON_CALL(*pool, hasActiveConnections).WillByDefault(Return(true));\n      mock_pools_.push_back(pool.get());\n      return pool;\n    };\n  }\n  TestMap::PoolFactory getNeverCalledFactory() {\n    return []() {\n      EXPECT_TRUE(false);\n      return nullptr;\n    };\n  }\n\n  TestMap::PoolFactory getFactoryExpectDrainedCb(Http::ConnectionPool::Instance::DrainedCb* cb) {\n    return [this, cb]() {\n      auto pool = std::make_unique<NiceMock<Http::ConnectionPool::MockInstance>>();\n      EXPECT_CALL(*pool, addDrainedCallback(_)).WillOnce(SaveArg<0>(cb));\n      mock_pools_.push_back(pool.get());\n      return pool;\n    };\n  }\n\nprotected:\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::vector<NiceMock<Http::ConnectionPool::MockInstance>*> mock_pools_;\n  std::shared_ptr<NiceMock<MockHost>> host_ = std::make_shared<NiceMock<MockHost>>();\n};\n\nTEST_F(ConnPoolMapImplTest, TestMapIsEmptyOnConstruction) {\n  TestMapPtr test_map = makeTestMap();\n\n  EXPECT_EQ(test_map->size(), 0);\n}\n\nTEST_F(ConnPoolMapImplTest, TestAddingAConnPoolIncreasesSize) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  EXPECT_EQ(test_map->size(), 1);\n}\n\nTEST_F(ConnPoolMapImplTest, TestAddingTwoConnPoolsIncreasesSize) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n  EXPECT_EQ(test_map->size(), 2);\n}\n\nTEST_F(ConnPoolMapImplTest, TestConnPoolReturnedMatchesCreated) {\n  TestMapPtr test_map = makeTestMap();\n\n  TestMap::PoolOptRef pool = test_map->getPool(1, getBasicFactory());\n  EXPECT_EQ(&(pool.value().get()), mock_pools_[0]);\n}\n\nTEST_F(ConnPoolMapImplTest, TestConnSecondPoolReturnedMatchesCreated) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  TestMap::PoolOptRef pool = test_map->getPool(2, getBasicFactory());\n  EXPECT_EQ(&(pool.value().get()), mock_pools_[1]);\n}\n\nTEST_F(ConnPoolMapImplTest, TestMultipleOfSameKeyReturnsOriginal) {\n  TestMapPtr test_map = makeTestMap();\n\n  TestMap::PoolOptRef pool1 = test_map->getPool(1, getBasicFactory());\n  TestMap::PoolOptRef pool2 = test_map->getPool(2, getBasicFactory());\n\n  EXPECT_EQ(&(pool1.value().get()), &(test_map->getPool(1, getBasicFactory()).value().get()));\n  EXPECT_EQ(&(pool2.value().get()), &(test_map->getPool(2, getBasicFactory()).value().get()));\n  EXPECT_EQ(test_map->size(), 2);\n}\n\nTEST_F(ConnPoolMapImplTest, TestEmptyClerWorks) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->clear();\n  EXPECT_EQ(test_map->size(), 0);\n}\n\nTEST_F(ConnPoolMapImplTest, TestClearEmptiesOutMap) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n\n  test_map->clear();\n  EXPECT_EQ(test_map->size(), 0);\n}\n\nTEST_F(ConnPoolMapImplTest, CallbacksPassedToPools) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n  Http::ConnectionPool::Instance::DrainedCb cb1;\n  EXPECT_CALL(*mock_pools_[0], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cb1));\n  Http::ConnectionPool::Instance::DrainedCb cb2;\n  EXPECT_CALL(*mock_pools_[1], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cb2));\n\n  ReadyWatcher watcher;\n  test_map->addDrainedCallback([&watcher] { watcher.ready(); });\n\n  EXPECT_CALL(watcher, ready()).Times(2);\n  cb1();\n  cb2();\n}\n\n// Tests that if we add the callback first, it is passed along when pools are created later.\nTEST_F(ConnPoolMapImplTest, CallbacksCachedAndPassedOnCreation) {\n  TestMapPtr test_map = makeTestMap();\n\n  ReadyWatcher watcher;\n  test_map->addDrainedCallback([&watcher] { watcher.ready(); });\n\n  Http::ConnectionPool::Instance::DrainedCb cb1;\n  test_map->getPool(1, getFactoryExpectDrainedCb(&cb1));\n\n  Http::ConnectionPool::Instance::DrainedCb cb2;\n  test_map->getPool(2, getFactoryExpectDrainedCb(&cb2));\n\n  EXPECT_CALL(watcher, ready()).Times(2);\n  cb1();\n  cb2();\n}\n\n// Tests that if we drain connections on an empty map, nothing happens.\nTEST_F(ConnPoolMapImplTest, EmptyMapDrainConnectionsNop) {\n  TestMapPtr test_map = makeTestMap();\n  test_map->drainConnections();\n}\n\n// Tests that we forward drainConnections to the pools.\nTEST_F(ConnPoolMapImplTest, DrainConnectionsForwarded) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n  EXPECT_CALL(*mock_pools_[0], drainConnections());\n  EXPECT_CALL(*mock_pools_[1], drainConnections());\n\n  test_map->drainConnections();\n}\n\nTEST_F(ConnPoolMapImplTest, ClearDefersDelete) {\n  TestMapPtr test_map = makeTestMap();\n\n  Http::ConnectionPool::Instance::DrainedCb cb1;\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n  test_map->clear();\n\n  EXPECT_EQ(dispatcher_.to_delete_.size(), 2);\n}\n\nTEST_F(ConnPoolMapImplTest, GetPoolHittingLimitFails) {\n  TestMapPtr test_map = makeTestMapWithLimit(1);\n\n  test_map->getPool(1, getBasicFactory());\n  ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(true));\n  auto opt_pool = test_map->getPool(2, getNeverCalledFactory());\n\n  EXPECT_FALSE(opt_pool.has_value());\n  EXPECT_EQ(test_map->size(), 1);\n}\n\nTEST_F(ConnPoolMapImplTest, GetPoolHittingLimitIncrementsFailureCounter) {\n  TestMapPtr test_map = makeTestMapWithLimit(1);\n\n  test_map->getPool(1, getBasicFactory());\n  ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(true));\n  test_map->getPool(2, getNeverCalledFactory());\n\n  EXPECT_EQ(host_->cluster_.stats_.upstream_cx_pool_overflow_.value(), 1);\n}\n\nTEST_F(ConnPoolMapImplTest, GetPoolHittingLimitIncrementsFailureMultiple) {\n  TestMapPtr test_map = makeTestMapWithLimit(1);\n\n  test_map->getPool(1, getBasicFactory());\n  ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(true));\n  test_map->getPool(2, getNeverCalledFactory());\n  test_map->getPool(2, getNeverCalledFactory());\n  test_map->getPool(2, getNeverCalledFactory());\n\n  EXPECT_EQ(host_->cluster_.stats_.upstream_cx_pool_overflow_.value(), 3);\n}\n\nTEST_F(ConnPoolMapImplTest, GetPoolHittingLimitGreaterThan1Fails) {\n  TestMapPtr test_map = makeTestMapWithLimit(2);\n\n  test_map->getPool(1, getActivePoolFactory());\n  test_map->getPool(2, getActivePoolFactory());\n  auto opt_pool = test_map->getPool(3, getNeverCalledFactory());\n\n  EXPECT_FALSE(opt_pool.has_value());\n  EXPECT_EQ(test_map->size(), 2);\n}\n\nTEST_F(ConnPoolMapImplTest, GetPoolLimitHitThenOneFreesUpNextCallSucceeds) {\n  TestMapPtr test_map = makeTestMapWithLimit(1);\n\n  test_map->getPool(1, getActivePoolFactory());\n  test_map->getPool(2, getNeverCalledFactory());\n\n  ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(false));\n\n  auto opt_pool = test_map->getPool(2, getBasicFactory());\n\n  EXPECT_TRUE(opt_pool.has_value());\n  EXPECT_EQ(test_map->size(), 1);\n}\n\nTEST_F(ConnPoolMapImplTest, GetPoolLimitHitFollowedBySuccessDoesNotClearFailure) {\n  TestMapPtr test_map = makeTestMapWithLimit(1);\n\n  test_map->getPool(1, getActivePoolFactory());\n  test_map->getPool(2, getNeverCalledFactory());\n\n  ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(false));\n\n  test_map->getPool(2, getBasicFactory());\n  EXPECT_EQ(host_->cluster_.stats_.upstream_cx_pool_overflow_.value(), 1);\n}\n\n// Test that only the pool which are idle are actually cleared\nTEST_F(ConnPoolMapImplTest, GetOnePoolIdleOnlyClearsThatOne) {\n  TestMapPtr test_map = makeTestMapWithLimit(2);\n\n  // Get a pool which says it's not active.\n  test_map->getPool(1, getBasicFactory());\n\n  // Get one that *is* active.\n  auto opt_pool = test_map->getPool(2, getActivePoolFactory());\n\n  // this should force out #1\n  auto new_pool = test_map->getPool(3, getBasicFactory());\n\n  // Get 2 again. It should succeed, but not invoke the factory.\n  auto opt_pool2 = test_map->getPool(2, getNeverCalledFactory());\n\n  EXPECT_TRUE(opt_pool.has_value());\n  EXPECT_TRUE(new_pool.has_value());\n  EXPECT_EQ(&(opt_pool.value().get()), &(opt_pool2.value().get()));\n  EXPECT_EQ(test_map->size(), 2);\n}\n\n// Show that even if all pools are idle, we only free up one as necessary\nTEST_F(ConnPoolMapImplTest, GetPoolLimitHitManyIdleOnlyOneFreed) {\n  TestMapPtr test_map = makeTestMapWithLimit(3);\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n  test_map->getPool(3, getBasicFactory());\n  auto opt_pool = test_map->getPool(4, getBasicFactory());\n\n  ASSERT_TRUE(opt_pool.has_value());\n  EXPECT_EQ(test_map->size(), 3);\n}\n\n// Show that if we hit the limit once, then again with the same keys, we don't clean out the\n// previously cleaned entries. Essentially, ensure we clean up any state related to being full.\nTEST_F(ConnPoolMapImplTest, GetPoolFailStateIsCleared) {\n  TestMapPtr test_map = makeTestMapWithLimit(2);\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getActivePoolFactory());\n  test_map->getPool(3, getBasicFactory());\n\n  // At this point, 1 should be cleared out. Let's get it again, then trigger a full condition.\n  auto opt_pool = test_map->getPool(1, getActivePoolFactory());\n  EXPECT_TRUE(opt_pool.has_value());\n\n  // We're full. Because pool 1  and 2 are busy, the next call should fail.\n  auto opt_pool_failed = test_map->getPool(4, getNeverCalledFactory());\n  EXPECT_FALSE(opt_pool_failed.has_value());\n\n  EXPECT_EQ(test_map->size(), 2);\n}\n\nTEST_F(ConnPoolMapImplTest, CircuitBreakerNotSetOnClear) {\n  TestMapPtr test_map = makeTestMapWithLimit(1);\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n  test_map->getPool(3, getBasicFactory());\n\n  test_map->clear();\n\n  EXPECT_EQ(host_->cluster_.circuit_breakers_stats_.cx_pool_open_.value(), 0);\n}\n\nTEST_F(ConnPoolMapImplTest, CircuitBreakerSetAtLimit) {\n  TestMapPtr test_map = makeTestMapWithLimit(2);\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n\n  EXPECT_EQ(host_->cluster_.circuit_breakers_stats_.cx_pool_open_.value(), 1);\n}\n\nTEST_F(ConnPoolMapImplTest, CircuitBreakerClearedOnDestroy) {\n  {\n    TestMapPtr test_map = makeTestMapWithLimit(2);\n\n    test_map->getPool(1, getBasicFactory());\n    test_map->getPool(2, getBasicFactory());\n  }\n\n  EXPECT_EQ(host_->cluster_.circuit_breakers_stats_.cx_pool_open_.value(), 0);\n}\n\nTEST_F(ConnPoolMapImplTest, CircuitBreakerUsesProvidedPriorityDefault) {\n  TestMapPtr test_map = makeTestMapWithLimitAtPriority(2, ResourcePriority::Default);\n\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(0);\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(AtLeast(1));\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n}\n\nTEST_F(ConnPoolMapImplTest, CircuitBreakerUsesProvidedPriorityHigh) {\n  TestMapPtr test_map = makeTestMapWithLimitAtPriority(2, ResourcePriority::High);\n\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(AtLeast(1));\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(0);\n\n  test_map->getPool(1, getBasicFactory());\n  test_map->getPool(2, getBasicFactory());\n}\n\n// The following tests only die in debug builds, so don't run them if this isn't one.\n#if !defined(NDEBUG)\nclass ConnPoolMapImplDeathTest : public ConnPoolMapImplTest {};\n\nTEST_F(ConnPoolMapImplDeathTest, ReentryClearTripsAssert) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  ON_CALL(*mock_pools_[0], addDrainedCallback(_))\n      .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->clear(); }),\n               \".*Details: A resource should only be entered once\");\n}\n\nTEST_F(ConnPoolMapImplDeathTest, ReentryGetPoolTripsAssert) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  ON_CALL(*mock_pools_[0], addDrainedCallback(_))\n      .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_DEATH(\n      test_map->addDrainedCallback([&test_map, this] { test_map->getPool(2, getBasicFactory()); }),\n      \".*Details: A resource should only be entered once\");\n}\n\nTEST_F(ConnPoolMapImplDeathTest, ReentryDrainConnectionsTripsAssert) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  ON_CALL(*mock_pools_[0], addDrainedCallback(_))\n      .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->drainConnections(); }),\n               \".*Details: A resource should only be entered once\");\n}\n\nTEST_F(ConnPoolMapImplDeathTest, ReentryAddDrainedCallbackTripsAssert) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(1, getBasicFactory());\n  ON_CALL(*mock_pools_[0], addDrainedCallback(_))\n      .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); }));\n\n  EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->addDrainedCallback([]() {}); }),\n               \".*Details: A resource should only be entered once\");\n}\n#endif // !defined(NDEBUG)\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/edf_scheduler_test.cc",
    "content": "#include \"common/upstream/edf_scheduler.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nTEST(EdfSchedulerTest, Empty) {\n  EdfScheduler<uint32_t> sched;\n  EXPECT_EQ(nullptr, sched.peekAgain([](const double&) { return 0; }));\n  EXPECT_EQ(nullptr, sched.pickAndAdd([](const double&) { return 0; }));\n}\n\n// Validate we get regular RR behavior when all weights are the same.\nTEST(EdfSchedulerTest, Unweighted) {\n  EdfScheduler<uint32_t> sched;\n  constexpr uint32_t num_entries = 128;\n  std::shared_ptr<uint32_t> entries[num_entries];\n\n  for (uint32_t i = 0; i < num_entries; ++i) {\n    entries[i] = std::make_shared<uint32_t>(i);\n    sched.add(1, entries[i]);\n  }\n\n  for (uint32_t rounds = 0; rounds < 128; ++rounds) {\n    for (uint32_t i = 0; i < num_entries; ++i) {\n      auto peek = sched.peekAgain([](const double&) { return 1; });\n      auto p = sched.pickAndAdd([](const double&) { return 1; });\n      EXPECT_EQ(i, *p);\n      EXPECT_EQ(*peek, *p);\n    }\n  }\n}\n\n// Validate we get weighted RR behavior when weights are distinct.\nTEST(EdfSchedulerTest, Weighted) {\n  EdfScheduler<uint32_t> sched;\n  constexpr uint32_t num_entries = 128;\n  std::shared_ptr<uint32_t> entries[num_entries];\n  uint32_t pick_count[num_entries];\n\n  for (uint32_t i = 0; i < num_entries; ++i) {\n    entries[i] = std::make_shared<uint32_t>(i);\n    sched.add(i + 1, entries[i]);\n    pick_count[i] = 0;\n  }\n\n  for (uint32_t i = 0; i < (num_entries * (1 + num_entries)) / 2; ++i) {\n    auto peek = sched.peekAgain([](const double& orig) { return orig + 1; });\n    auto p = sched.pickAndAdd([](const double& orig) { return orig + 1; });\n    EXPECT_EQ(*p, *peek);\n    ++pick_count[*p];\n  }\n\n  for (uint32_t i = 0; i < num_entries; ++i) {\n    EXPECT_EQ(i + 1, pick_count[i]);\n  }\n}\n\n// Validate that expired entries are ignored.\nTEST(EdfSchedulerTest, Expired) {\n  EdfScheduler<uint32_t> sched;\n\n  auto second_entry = std::make_shared<uint32_t>(42);\n  {\n    auto first_entry = std::make_shared<uint32_t>(37);\n    sched.add(2, first_entry);\n    sched.add(1, second_entry);\n  }\n\n  auto peek = sched.peekAgain([](const double&) { return 1; });\n  auto p = sched.pickAndAdd([](const double&) { return 1; });\n  EXPECT_EQ(*peek, *p);\n  EXPECT_EQ(*second_entry, *p);\n  EXPECT_EQ(*second_entry, *p);\n}\n\n// Validate that expired entries are not peeked.\nTEST(EdfSchedulerTest, ExpiredPeek) {\n  EdfScheduler<uint32_t> sched;\n\n  {\n    auto second_entry = std::make_shared<uint32_t>(42);\n    auto first_entry = std::make_shared<uint32_t>(37);\n    sched.add(2, first_entry);\n    sched.add(1, second_entry);\n  }\n  auto third_entry = std::make_shared<uint32_t>(37);\n  sched.add(3, third_entry);\n\n  EXPECT_EQ(37, *sched.peekAgain([](const double&) { return 1; }));\n}\n\n// Validate that expired entries are ignored.\nTEST(EdfSchedulerTest, ExpiredPeekedIsNotPicked) {\n  EdfScheduler<uint32_t> sched;\n\n  {\n    auto second_entry = std::make_shared<uint32_t>(42);\n    auto first_entry = std::make_shared<uint32_t>(37);\n    sched.add(2, first_entry);\n    sched.add(1, second_entry);\n    for (int i = 0; i < 3; ++i) {\n      EXPECT_TRUE(sched.peekAgain([](const double&) { return 1; }) != nullptr);\n    }\n  }\n\n  EXPECT_TRUE(sched.peekAgain([](const double&) { return 1; }) == nullptr);\n  EXPECT_TRUE(sched.pickAndAdd([](const double&) { return 1; }) == nullptr);\n}\n\nTEST(EdfSchedulerTest, ManyPeekahead) {\n  EdfScheduler<uint32_t> sched1;\n  EdfScheduler<uint32_t> sched2;\n  constexpr uint32_t num_entries = 128;\n  std::shared_ptr<uint32_t> entries[num_entries];\n\n  for (uint32_t i = 0; i < num_entries; ++i) {\n    entries[i] = std::make_shared<uint32_t>(i);\n    sched1.add(1, entries[i]);\n    sched2.add(1, entries[i]);\n  }\n\n  std::vector<uint32_t> picks;\n  for (uint32_t rounds = 0; rounds < 10; ++rounds) {\n    picks.push_back(*sched1.peekAgain([](const double&) { return 1; }));\n  }\n  for (uint32_t rounds = 0; rounds < 10; ++rounds) {\n    auto p1 = sched1.pickAndAdd([](const double&) { return 1; });\n    auto p2 = sched2.pickAndAdd([](const double&) { return 1; });\n    EXPECT_EQ(picks[rounds], *p1);\n    EXPECT_EQ(*p2, *p1);\n  }\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/eds_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/grpc_mux_impl.h\"\n#include \"common/config/grpc_subscription_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/eds.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/benchmark/main.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"benchmark/benchmark.h\"\n\nusing ::benchmark::State;\nusing Envoy::benchmark::skipExpensiveBenchmarks;\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass EdsSpeedTest {\npublic:\n  EdsSpeedTest(State& state, bool v2_config)\n      : state_(state), v2_config_(v2_config),\n        type_url_(v2_config_\n                      ? \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\"\n                      : \"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\"),\n        subscription_stats_(Config::Utility::generateStats(stats_)),\n        api_(Api::createApiForTest(stats_)), async_client_(new Grpc::MockAsyncClient()),\n        grpc_mux_(new Config::GrpcMuxImpl(\n            local_info_, std::unique_ptr<Grpc::MockAsyncClient>(async_client_), dispatcher_,\n            *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(\n                \"envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints\"),\n            envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, {}, true)) {\n    resetCluster(R\"EOF(\n      name: name\n      connect_timeout: 0.25s\n      type: EDS\n      eds_cluster_config:\n        service_name: fare\n        eds_config:\n          api_config_source:\n            cluster_names:\n            - eds\n            refresh_delay: 1s\n    )EOF\",\n                 Envoy::Upstream::Cluster::InitializePhase::Secondary);\n\n    EXPECT_CALL(*cm_.subscription_factory_.subscription_, start(_, _));\n    cluster_->initialize([this] { initialized_ = true; });\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(testing::Return(&async_stream_));\n    subscription_->start({\"fare\"});\n  }\n\n  void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) {\n    local_info_.node_.mutable_locality()->set_zone(\"us-east-1a\");\n    eds_cluster_ = parseClusterFromV3Yaml(yaml_config);\n    Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n        \"cluster.{}.\",\n        eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    cluster_ = std::make_shared<EdsClusterImpl>(eds_cluster_, runtime_, factory_context,\n                                                std::move(scope), false);\n    EXPECT_EQ(initialize_phase, cluster_->initializePhase());\n    eds_callbacks_ = cm_.subscription_factory_.callbacks_;\n    subscription_ = std::make_unique<Config::GrpcSubscriptionImpl>(\n        grpc_mux_, *eds_callbacks_, resource_decoder_, subscription_stats_, type_url_, dispatcher_,\n        std::chrono::milliseconds(), false);\n  }\n\n  // Set up an EDS config with multiple priorities, localities, weights and make sure\n  // they are loaded as expected.\n  void priorityAndLocalityWeightedHelper(bool ignore_unknown_dynamic_fields, size_t num_hosts,\n                                         bool healthy) {\n    state_.PauseTiming();\n\n    envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n    cluster_load_assignment.set_cluster_name(\"fare\");\n\n    // Add a whole bunch of hosts in a single place:\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    endpoints->set_priority(1);\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(\"region\");\n    locality->set_zone(\"zone\");\n    locality->set_sub_zone(\"sub_zone\");\n    endpoints->mutable_load_balancing_weight()->set_value(1);\n\n    uint32_t port = 1000;\n    for (size_t i = 0; i < num_hosts; ++i) {\n      auto* lb_endpoint = endpoints->add_lb_endpoints();\n      if (healthy) {\n        lb_endpoint->set_health_status(envoy::config::core::v3::HEALTHY);\n      } else {\n        lb_endpoint->set_health_status(envoy::config::core::v3::UNHEALTHY);\n      }\n      auto* socket_address =\n          lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address();\n      socket_address->set_address(\"10.0.1.\" + std::to_string(i / 60000));\n      socket_address->set_port_value((port + i) % 60000);\n    }\n\n    // this is what we're actually testing:\n    validation_visitor_.setSkipValidation(ignore_unknown_dynamic_fields);\n\n    auto response = std::make_unique<envoy::service::discovery::v3::DiscoveryResponse>();\n    response->set_type_url(type_url_);\n    auto* resource = response->mutable_resources()->Add();\n    resource->PackFrom(cluster_load_assignment);\n    if (v2_config_) {\n      RELEASE_ASSERT(resource->type_url() ==\n                         \"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\",\n                     \"\");\n      resource->set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    }\n    state_.ResumeTiming();\n    grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response));\n    ASSERT(cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get()[0].size() ==\n           num_hosts);\n  }\n\n  State& state_;\n  const bool v2_config_;\n  const std::string type_url_;\n  bool initialized_{};\n  Stats::IsolatedStoreImpl stats_;\n  Config::SubscriptionStats subscription_stats_;\n  Ssl::MockContextManager ssl_context_manager_;\n  envoy::config::cluster::v3::Cluster eds_cluster_;\n  NiceMock<MockClusterManager> cm_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  EdsClusterImplSharedPtr cluster_;\n  Config::SubscriptionCallbacks* eds_callbacks_{};\n  Config::OpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment>\n      resource_decoder_{validation_visitor_, \"cluster_name\"};\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  ProtobufMessage::MockValidationVisitor validation_visitor_;\n  Api::ApiPtr api_;\n  Grpc::MockAsyncClient* async_client_;\n  NiceMock<Grpc::MockAsyncStream> async_stream_;\n  Config::GrpcMuxImplSharedPtr grpc_mux_;\n  Config::GrpcSubscriptionImplPtr subscription_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n\nstatic void priorityAndLocalityWeighted(State& state) {\n  Envoy::Thread::MutexBasicLockable lock;\n  Envoy::Logger::Context logging_state(spdlog::level::warn,\n                                       Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false);\n  for (auto _ : state) {\n    Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(0));\n    // if we've been instructed to skip tests, only run once no matter the argument:\n    uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(2);\n\n    speed_test.priorityAndLocalityWeightedHelper(state.range(1), endpoints, true);\n  }\n}\n\nBENCHMARK(priorityAndLocalityWeighted)\n    ->Ranges({{false, true}, {false, true}, {1, 100000}})\n    ->Unit(benchmark::kMillisecond);\n\nstatic void duplicateUpdate(State& state) {\n  Envoy::Thread::MutexBasicLockable lock;\n  Envoy::Logger::Context logging_state(spdlog::level::warn,\n                                       Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false);\n\n  for (auto _ : state) {\n    Envoy::Upstream::EdsSpeedTest speed_test(state, false);\n    uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0);\n\n    speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true);\n    speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true);\n  }\n}\n\nBENCHMARK(duplicateUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond);\n\nstatic void healthOnlyUpdate(State& state) {\n  Envoy::Thread::MutexBasicLockable lock;\n  Envoy::Logger::Context logging_state(spdlog::level::warn,\n                                       Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false);\n  for (auto _ : state) {\n    Envoy::Upstream::EdsSpeedTest speed_test(state, false);\n    uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0);\n\n    speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true);\n    speed_test.priorityAndLocalityWeightedHelper(true, endpoints, false);\n  }\n}\n\nBENCHMARK(healthOnlyUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond);\n"
  },
  {
    "path": "test/common/upstream/eds_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/eds.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/health_checker.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass EdsTest : public testing::Test {\nprotected:\n  EdsTest() : api_(Api::createApiForTest(stats_)) { resetCluster(); }\n\n  void resetCluster() {\n    resetCluster(R\"EOF(\n      name: name\n      connect_timeout: 0.25s\n      type: EDS\n      lb_policy: ROUND_ROBIN\n      eds_cluster_config:\n        service_name: fare\n        eds_config:\n          api_config_source:\n            api_type: REST\n            cluster_names:\n            - eds\n            refresh_delay: 1s\n    )EOF\",\n                 Cluster::InitializePhase::Secondary);\n  }\n\n  void resetClusterDrainOnHostRemoval() {\n    resetCluster(R\"EOF(\n        name: name\n        connect_timeout: 0.25s\n        type: EDS\n        lb_policy: ROUND_ROBIN\n        ignore_health_on_host_removal: true\n        eds_cluster_config:\n          service_name: fare\n          eds_config:\n            api_config_source:\n              api_type: REST\n              cluster_names:\n              - eds\n              refresh_delay: 1s\n    )EOF\",\n                 Cluster::InitializePhase::Secondary);\n  }\n\n  void resetClusterLoadedFromFile() {\n    resetCluster(R\"EOF(\n      name: name\n      connect_timeout: 0.25s\n      type: EDS\n      lb_policy: ROUND_ROBIN\n      eds_cluster_config:\n        eds_config:\n          path: \"eds path\"\n    )EOF\",\n                 Cluster::InitializePhase::Primary);\n  }\n\n  void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) {\n    local_info_.node_.mutable_locality()->set_zone(\"us-east-1a\");\n    eds_cluster_ = parseClusterFromV3Yaml(yaml_config);\n    Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n        \"cluster.{}.\",\n        eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    cluster_ = std::make_shared<EdsClusterImpl>(eds_cluster_, runtime_, factory_context,\n                                                std::move(scope), false);\n    EXPECT_EQ(initialize_phase, cluster_->initializePhase());\n    eds_callbacks_ = cm_.subscription_factory_.callbacks_;\n  }\n\n  void initialize() {\n    EXPECT_CALL(*cm_.subscription_factory_.subscription_, start(_, _));\n    cluster_->initialize([this] { initialized_ = true; });\n  }\n\n  void doOnConfigUpdateVerifyNoThrow(\n      const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment) {\n    const auto decoded_resources =\n        TestUtility::decodeResources({cluster_load_assignment}, \"cluster_name\");\n    VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"));\n  }\n\n  bool initialized_{};\n  Stats::TestUtil::TestStore stats_;\n  Ssl::MockContextManager ssl_context_manager_;\n  envoy::config::cluster::v3::Cluster eds_cluster_;\n  NiceMock<MockClusterManager> cm_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  EdsClusterImplSharedPtr cluster_;\n  Config::SubscriptionCallbacks* eds_callbacks_{};\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n};\n\nclass EdsWithHealthCheckUpdateTest : public EdsTest {\nprotected:\n  EdsWithHealthCheckUpdateTest() = default;\n\n  // Build the initial cluster with some endpoints.\n  void initializeCluster(const std::vector<uint32_t> endpoint_ports,\n                         const bool ignore_health_on_host_removal) {\n    resetCluster(ignore_health_on_host_removal);\n\n    auto health_checker = std::make_shared<MockHealthChecker>();\n    EXPECT_CALL(*health_checker, start());\n    EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n    cluster_->setHealthChecker(health_checker);\n\n    cluster_load_assignment_.set_cluster_name(\"fare\");\n\n    for (const auto& port : endpoint_ports) {\n      addEndpoint(port);\n    }\n\n    doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_);\n\n    // Make sure the cluster is rebuilt.\n    EXPECT_EQ(0UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n    {\n      auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n      EXPECT_EQ(hosts.size(), 2);\n\n      EXPECT_TRUE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n      EXPECT_TRUE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n\n      // Remove the pending HC flag. This is normally done by the health checker.\n      hosts[0]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n      hosts[1]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n\n      // Mark the hosts as healthy\n      hosts[0]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n      hosts[1]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n    }\n  }\n\n  void resetCluster(const bool ignore_health_on_host_removal) {\n    const std::string config = R\"EOF(\n      name: name\n      connect_timeout: 0.25s\n      type: EDS\n      lb_policy: ROUND_ROBIN\n      ignore_health_on_host_removal: {}\n      eds_cluster_config:\n        service_name: fare\n        eds_config:\n          api_config_source:\n            api_type: REST\n            cluster_names:\n            - eds\n            refresh_delay: 1s\n      )EOF\";\n    EdsTest::resetCluster(fmt::format(config, ignore_health_on_host_removal),\n                          Cluster::InitializePhase::Secondary);\n  }\n\n  void addEndpoint(const uint32_t port) {\n    auto* endpoints = cluster_load_assignment_.add_endpoints();\n    auto* socket_address = endpoints->add_lb_endpoints()\n                               ->mutable_endpoint()\n                               ->mutable_address()\n                               ->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port);\n  }\n\n  void updateEndpointHealthCheckPortAtIndex(const uint32_t index, const uint32_t port) {\n    cluster_load_assignment_.mutable_endpoints(index)\n        ->mutable_lb_endpoints(0)\n        ->mutable_endpoint()\n        ->mutable_health_check_config()\n        ->set_port_value(port);\n\n    doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_);\n\n    // Always rebuild if health check config is changed.\n    EXPECT_EQ(0UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n  }\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment_;\n};\n\n// Validate that onConfigUpdate() with unexpected cluster names rejects config.\nTEST_F(EdsTest, OnConfigUpdateWrongName) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"wrong name\");\n  const auto decoded_resources =\n      TestUtility::decodeResources({cluster_load_assignment}, \"cluster_name\");\n  initialize();\n  try {\n    eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\");\n  } catch (const EnvoyException& e) {\n    eds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected,\n                                         &e);\n  }\n  EXPECT_TRUE(initialized_);\n}\n\n// Validate that onConfigUpdate() with empty cluster vector size ignores config.\nTEST_F(EdsTest, OnConfigUpdateEmpty) {\n  initialize();\n  eds_callbacks_->onConfigUpdate({}, \"\");\n  Protobuf::RepeatedPtrField<std::string> removed_resources;\n  eds_callbacks_->onConfigUpdate({}, removed_resources, \"\");\n  EXPECT_EQ(2UL, stats_.counter(\"cluster.name.update_empty\").value());\n  EXPECT_TRUE(initialized_);\n}\n\n// Validate that onConfigUpdate() with unexpected cluster vector size rejects config.\nTEST_F(EdsTest, OnConfigUpdateWrongSize) {\n  initialize();\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  const auto decoded_resources = TestUtility::decodeResources(\n      {cluster_load_assignment, cluster_load_assignment}, \"cluster_name\");\n  try {\n    eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\");\n  } catch (const EnvoyException& e) {\n    eds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected,\n                                         &e);\n  }\n  EXPECT_TRUE(initialized_);\n}\n\n// Validate that onConfigUpdate() with the expected cluster accepts config.\nTEST_F(EdsTest, OnConfigUpdateSuccess) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n}\n\n// Validate that delta-style onConfigUpdate() with the expected cluster accepts config.\nTEST_F(EdsTest, DeltaOnConfigUpdateSuccess) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  initialize();\n\n  Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource> resources;\n  auto* resource = resources.Add();\n  resource->mutable_resource()->PackFrom(cluster_load_assignment);\n  resource->set_version(\"v1\");\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n          resources, \"cluster_name\");\n  VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, \"v1\"));\n\n  EXPECT_TRUE(initialized_);\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n}\n\n// Validate that onConfigUpdate() with no service name accepts config.\nTEST_F(EdsTest, NoServiceNameOnSuccessConfigUpdate) {\n  resetCluster(R\"EOF(\n      name: name\n      connect_timeout: 0.25s\n      type: EDS\n      lb_policy: ROUND_ROBIN\n      eds_cluster_config:\n        eds_config:\n          api_config_source:\n            api_type: REST\n            cluster_names:\n            - eds\n            refresh_delay: 1s\n    )EOF\",\n               Cluster::InitializePhase::Secondary);\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"name\");\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n}\n\n// Validate that EDS cluster loaded from file as primary cluster\nTEST_F(EdsTest, EdsClusterFromFileIsPrimaryCluster) {\n  resetClusterLoadedFromFile();\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"name\");\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n}\n\n// Validate that onConfigUpdate() updates the endpoint metadata.\nTEST_F(EdsTest, EndpointMetadata) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  auto* endpoints = cluster_load_assignment.add_endpoints();\n  auto* endpoint = endpoints->add_lb_endpoints();\n  auto* canary = endpoints->add_lb_endpoints();\n\n  endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address(\"1.2.3.4\");\n  endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80);\n  Config::Metadata::mutableMetadataValue(*endpoint->mutable_metadata(),\n                                         Config::MetadataFilters::get().ENVOY_LB, \"string_key\")\n      .set_string_value(\"string_value\");\n  Config::Metadata::mutableMetadataValue(*endpoint->mutable_metadata(), \"custom_namespace\",\n                                         \"num_key\")\n      .set_number_value(1.1);\n\n  canary->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address(\"2.3.4.5\");\n  canary->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80);\n  Config::Metadata::mutableMetadataValue(*canary->mutable_metadata(),\n                                         Config::MetadataFilters::get().ENVOY_LB,\n                                         Config::MetadataEnvoyLbKeys::get().CANARY)\n      .set_bool_value(true);\n  Config::Metadata::mutableMetadataValue(*canary->mutable_metadata(),\n                                         Config::MetadataFilters::get().ENVOY_LB, \"version\")\n      .set_string_value(\"v1\");\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n  EXPECT_EQ(0UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_EQ(hosts.size(), 2);\n  EXPECT_EQ(hosts[0]->metadata()->filter_metadata_size(), 2);\n  EXPECT_EQ(Config::Metadata::metadataValue(hosts[0]->metadata().get(),\n                                            Config::MetadataFilters::get().ENVOY_LB, \"string_key\")\n                .string_value(),\n            std::string(\"string_value\"));\n  EXPECT_EQ(\n      Config::Metadata::metadataValue(hosts[0]->metadata().get(), \"custom_namespace\", \"num_key\")\n          .number_value(),\n      1.1);\n  EXPECT_FALSE(Config::Metadata::metadataValue(hosts[0]->metadata().get(),\n                                               Config::MetadataFilters::get().ENVOY_LB,\n                                               Config::MetadataEnvoyLbKeys::get().CANARY)\n                   .bool_value());\n  EXPECT_FALSE(hosts[0]->canary());\n\n  EXPECT_EQ(hosts[1]->metadata()->filter_metadata_size(), 1);\n  EXPECT_TRUE(Config::Metadata::metadataValue(hosts[1]->metadata().get(),\n                                              Config::MetadataFilters::get().ENVOY_LB,\n                                              Config::MetadataEnvoyLbKeys::get().CANARY)\n                  .bool_value());\n  EXPECT_TRUE(hosts[1]->canary());\n  EXPECT_EQ(Config::Metadata::metadataValue(hosts[1]->metadata().get(),\n                                            Config::MetadataFilters::get().ENVOY_LB, \"version\")\n                .string_value(),\n            \"v1\");\n\n  // We don't rebuild with the exact same config.\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  // New resources with Metadata updated.\n  Config::Metadata::mutableMetadataValue(*canary->mutable_metadata(),\n                                         Config::MetadataFilters::get().ENVOY_LB, \"version\")\n      .set_string_value(\"v2\");\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  auto& nhosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_EQ(nhosts.size(), 2);\n  EXPECT_EQ(Config::Metadata::metadataValue(nhosts[1]->metadata().get(),\n                                            Config::MetadataFilters::get().ENVOY_LB, \"version\")\n                .string_value(),\n            \"v2\");\n}\n\n// Validate that onConfigUpdate() updates endpoint health status.\nTEST_F(EdsTest, EndpointHealthStatus) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  auto* endpoints = cluster_load_assignment.add_endpoints();\n\n  // First check that EDS is correctly mapping\n  // envoy::api::v2::core::HealthStatus values to the expected health() status.\n  const std::vector<std::pair<envoy::config::core::v3::HealthStatus, Host::Health>>\n      health_status_expected = {\n          {envoy::config::core::v3::UNKNOWN, Host::Health::Healthy},\n          {envoy::config::core::v3::HEALTHY, Host::Health::Healthy},\n          {envoy::config::core::v3::UNHEALTHY, Host::Health::Unhealthy},\n          {envoy::config::core::v3::DRAINING, Host::Health::Unhealthy},\n          {envoy::config::core::v3::TIMEOUT, Host::Health::Unhealthy},\n          {envoy::config::core::v3::DEGRADED, Host::Health::Degraded},\n      };\n\n  int port = 80;\n  for (auto hs : health_status_expected) {\n    auto* endpoint = endpoints->add_lb_endpoints();\n    auto* socket_address =\n        endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port++);\n    endpoint->set_health_status(hs.first);\n  }\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), health_status_expected.size());\n\n    for (uint32_t i = 0; i < hosts.size(); ++i) {\n      EXPECT_EQ(health_status_expected[i].second, hosts[i]->health());\n    }\n  }\n\n  // Perform an update in which we don't change the host set, but flip some host\n  // to unhealthy, check we have the expected change in status.\n  endpoints->mutable_lb_endpoints(0)->set_health_status(envoy::config::core::v3::UNHEALTHY);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), health_status_expected.size());\n    EXPECT_EQ(Host::Health::Unhealthy, hosts[0]->health());\n\n    for (uint32_t i = 1; i < hosts.size(); ++i) {\n      EXPECT_EQ(health_status_expected[i].second, hosts[i]->health());\n    }\n  }\n\n  // Perform an update in which we don't change the host set, but flip some host\n  // to healthy, check we have the expected change in status.\n  endpoints->mutable_lb_endpoints(health_status_expected.size() - 1)\n      ->set_health_status(envoy::config::core::v3::HEALTHY);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), health_status_expected.size());\n    EXPECT_EQ(Host::Health::Healthy, hosts[hosts.size() - 1]->health());\n\n    for (uint32_t i = 1; i < hosts.size() - 1; ++i) {\n      EXPECT_EQ(health_status_expected[i].second, hosts[i]->health());\n    }\n  }\n\n  // Mark host 0 unhealthy from active health checking as well, we should have\n  // the same situation as above.\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    hosts[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  }\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(Host::Health::Unhealthy, hosts[0]->health());\n  }\n\n  // Now mark host 0 healthy via EDS, it should still be unhealthy due to the\n  // active health check failure.\n  endpoints->mutable_lb_endpoints(0)->set_health_status(envoy::config::core::v3::HEALTHY);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(Host::Health::Unhealthy, hosts[0]->health());\n  }\n\n  // Finally, mark host 0 healthy again via active health check. It should be\n  // immediately healthy again.\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    hosts[0]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n    EXPECT_EQ(Host::Health::Healthy, hosts[0]->health());\n  }\n\n  const auto rebuild_container = stats_.counter(\"cluster.name.update_no_rebuild\").value();\n  // Now mark host 0 degraded via EDS, it should be degraded.\n  endpoints->mutable_lb_endpoints(0)->set_health_status(envoy::config::core::v3::DEGRADED);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(Host::Health::Degraded, hosts[0]->health());\n  }\n\n  // We should rebuild the cluster since we went from healthy -> degraded.\n  EXPECT_EQ(rebuild_container, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  // Now mark the host as having been degraded through active hc.\n  cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagSet(\n      Host::HealthFlag::DEGRADED_ACTIVE_HC);\n\n  // Now mark host 0 healthy via EDS, it should still be degraded.\n  endpoints->mutable_lb_endpoints(0)->set_health_status(envoy::config::core::v3::HEALTHY);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(Host::Health::Degraded, hosts[0]->health());\n  }\n\n  // Since the host health didn't change, expect no rebuild.\n  EXPECT_EQ(rebuild_container + 1, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n}\n\n// Validate that onConfigUpdate() updates the hostname.\nTEST_F(EdsTest, Hostname) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  auto* endpoint = cluster_load_assignment.add_endpoints()->add_lb_endpoints()->mutable_endpoint();\n  auto* socket_address = endpoint->mutable_address()->mutable_socket_address();\n  socket_address->set_address(\"1.2.3.4\");\n  socket_address->set_port_value(1234);\n  endpoint->set_hostname(\"foo\");\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_EQ(hosts.size(), 1);\n  EXPECT_EQ(hosts[0]->hostname(), \"foo\");\n}\n\nTEST_F(EdsTest, UseHostnameForHealthChecks) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  auto* endpoint = cluster_load_assignment.add_endpoints()->add_lb_endpoints()->mutable_endpoint();\n  auto* socket_address = endpoint->mutable_address()->mutable_socket_address();\n  socket_address->set_address(\"1.2.3.4\");\n  socket_address->set_port_value(1234);\n  endpoint->mutable_health_check_config()->set_hostname(\"foo\");\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_EQ(hosts.size(), 1);\n  EXPECT_EQ(hosts[0]->hostnameForHealthChecks(), \"foo\");\n}\n\n// Verify that a host is removed if it is removed from discovery, stabilized, and then later\n// fails active HC.\nTEST_F(EdsTest, EndpointRemovalAfterHcFail) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n\n  auto health_checker = std::make_shared<MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  auto add_endpoint = [&cluster_load_assignment](int port) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n\n    auto* socket_address = endpoints->add_lb_endpoints()\n                               ->mutable_endpoint()\n                               ->mutable_address()\n                               ->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port);\n  };\n\n  add_endpoint(80);\n  add_endpoint(81);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n\n    // Remove the pending HC flag. This is normally done by the health checker.\n    hosts[0]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n    hosts[1]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n\n    // Mark the hosts as healthy\n    hosts[0]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n    hosts[1]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  }\n\n  // Remove endpoints and add back the port 80 one. Both hosts should be present due to\n  // being stabilized, but one of them should be marked pending removal.\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(80);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n    EXPECT_TRUE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n  }\n\n  // Add both hosts back, make sure pending removal is gone.\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(80);\n  add_endpoint(81);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n    EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n  }\n\n  // Remove endpoints and add back the port 80 one. Both hosts should be present due to\n  // being stabilized, but one of them should be marked pending removal.\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(80);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  HostSharedPtr not_removed_host;\n  HostSharedPtr removed_host;\n  {\n    EXPECT_EQ(2,\n              cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get()[0].size());\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n    EXPECT_TRUE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n\n    // Mark the host is failing active HC and then run callbacks.\n    not_removed_host = hosts[0];\n    removed_host = hosts[1];\n    hosts[1]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n    health_checker->runCallbacks(hosts[1], HealthTransition::Changed);\n  }\n\n  {\n    EXPECT_EQ(1,\n              cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get()[0].size());\n    EXPECT_EQ(1, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  }\n\n  // Add back 81. Verify that we have a new host. This will show that the all_hosts_ was updated\n  // correctly.\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(80);\n  add_endpoint(81);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n    EXPECT_EQ(not_removed_host, hosts[0]);\n    EXPECT_EQ(removed_host->address()->asString(), hosts[1]->address()->asString());\n    EXPECT_NE(removed_host, hosts[1]);\n  }\n}\n\n// Verify that a host is removed when it is still passing active HC, but has been previously\n// told by the EDS server to fail health check.\nTEST_F(EdsTest, EndpointRemovalEdsFailButActiveHcSuccess) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  auto* endpoints = cluster_load_assignment.add_endpoints();\n\n  auto health_checker = std::make_shared<MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  auto add_endpoint = [endpoints](int port) {\n    auto* socket_address = endpoints->add_lb_endpoints()\n                               ->mutable_endpoint()\n                               ->mutable_address()\n                               ->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port);\n  };\n\n  add_endpoint(80);\n  add_endpoint(81);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n\n    // Remove the pending HC flag. This is normally done by the health checker.\n    hosts[0]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n    hosts[1]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n\n    // Mark the hosts as healthy\n    hosts[0]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n    hosts[1]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  }\n\n  // Mark the first endpoint as unhealthy from EDS.\n  endpoints->mutable_lb_endpoints(0)->set_health_status(envoy::config::core::v3::UNHEALTHY);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n\n    EXPECT_EQ(hosts[0]->health(), Host::Health::Unhealthy);\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_TRUE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH));\n    EXPECT_EQ(hosts[1]->health(), Host::Health::Healthy);\n  }\n\n  // Now remove the first host. Even though it is still passing active HC, since EDS has\n  // previously explicitly failed it, we won't stabilize it anymore.\n  endpoints->mutable_lb_endpoints()->erase(endpoints->mutable_lb_endpoints()->begin());\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n  }\n}\n\n// Validate that onConfigUpdate() removes endpoints that are marked as healthy\n// when configured to drain on host removal.\nTEST_F(EdsTest, EndpointRemovalClusterDrainOnHostRemoval) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  resetClusterDrainOnHostRemoval();\n\n  auto health_checker = std::make_shared<MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  auto add_endpoint = [&cluster_load_assignment](int port) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n\n    auto* socket_address = endpoints->add_lb_endpoints()\n                               ->mutable_endpoint()\n                               ->mutable_address()\n                               ->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port);\n  };\n\n  add_endpoint(80);\n  add_endpoint(81);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n\n    EXPECT_TRUE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_TRUE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n\n    // Remove the pending HC flag. This is normally done by the health checker.\n    hosts[0]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n    hosts[1]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n\n    // Mark the hosts as healthy\n    hosts[0]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n    hosts[1]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  }\n\n  // Remove endpoints and add back the port 80 one\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(80);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n  }\n}\n\n// Verifies that if an endpoint is moved to a new priority, the active hc status is preserved.\nTEST_F(EdsTest, EndpointMovedToNewPriority) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  resetClusterDrainOnHostRemoval();\n\n  auto health_checker = std::make_shared<MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  auto add_endpoint = [&cluster_load_assignment](int port, int priority) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    endpoints->set_priority(priority);\n\n    auto* socket_address = endpoints->add_lb_endpoints()\n                               ->mutable_endpoint()\n                               ->mutable_address()\n                               ->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port);\n  };\n\n  add_endpoint(80, 0);\n  add_endpoint(81, 0);\n\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n\n    // Mark the hosts as healthy\n    for (auto& host : hosts) {\n      EXPECT_TRUE(host->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n      host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n      host->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n    }\n  }\n\n  // Moves the endpoints between priorities\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(81, 0);\n  add_endpoint(80, 1);\n\n  // Verify that no hosts gets added or removed to/from the PrioritySet.\n  cluster_->prioritySet().addMemberUpdateCb([&](const auto& added, const auto& removed) {\n    EXPECT_TRUE(added.empty());\n    EXPECT_TRUE(removed.empty());\n  });\n\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n\n    // assert that it didn't move\n    EXPECT_EQ(hosts[0]->address()->asString(), \"1.2.3.4:81\");\n\n    // The endpoint was healthy in the original priority, so moving it\n    // around should preserve that.\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n  }\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n\n    // assert that it moved\n    EXPECT_EQ(hosts[0]->address()->asString(), \"1.2.3.4:80\");\n\n    // The endpoint was healthy in the original priority, so moving it\n    // around should preserve that.\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n  }\n\n  // Moves all the endpoints to priority 1.\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(80, 1);\n  add_endpoint(81, 1);\n\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    // Priority 0 should now be empty.\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 0);\n  }\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n\n    // The endpoints were healthy, so moving them around should preserve that.\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n  }\n}\n\n// Verifies that if an endpoint is moved between priorities, the health check value\n// of the host is preserved\nTEST_F(EdsTest, EndpointMoved) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  resetClusterDrainOnHostRemoval();\n\n  auto health_checker = std::make_shared<MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  auto add_endpoint = [&cluster_load_assignment](int port, int priority) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    endpoints->set_priority(priority);\n\n    auto* socket_address = endpoints->add_lb_endpoints()\n                               ->mutable_endpoint()\n                               ->mutable_address()\n                               ->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port);\n  };\n\n  add_endpoint(80, 0);\n  add_endpoint(81, 1);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n\n    EXPECT_TRUE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_EQ(0, hosts[0]->priority());\n    // Mark the host as healthy and remove the pending active hc flag.\n    hosts[0]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n    hosts[0]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n  }\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n\n    EXPECT_TRUE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_EQ(1, hosts[0]->priority());\n    // Mark the host as healthy and remove the pending active hc flag.\n    hosts[0]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n    hosts[0]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n  }\n\n  // Moves the endpoints between priorities\n  cluster_load_assignment.clear_endpoints();\n  add_endpoint(81, 0);\n  add_endpoint(80, 1);\n  // Verify that no hosts gets added or removed to/from the PrioritySet.\n  cluster_->prioritySet().addMemberUpdateCb([&](const auto& added, const auto& removed) {\n    EXPECT_TRUE(added.empty());\n    EXPECT_TRUE(removed.empty());\n  });\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n\n    // assert that it moved\n    EXPECT_EQ(hosts[0]->address()->asString(), \"1.2.3.4:81\");\n    EXPECT_EQ(0, hosts[0]->priority());\n\n    // The endpoint was healthy in the original priority, so moving it\n    // around should preserve that.\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n  }\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();\n    EXPECT_EQ(hosts.size(), 1);\n\n    // assert that it moved\n    EXPECT_EQ(hosts[0]->address()->asString(), \"1.2.3.4:80\");\n    EXPECT_EQ(1, hosts[0]->priority());\n\n    // The endpoint was healthy in the original priority, so moving it\n    // around should preserve that.\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n  }\n}\n\n// Validates that we correctly update the host list when a new overprovisioning factor is set.\nTEST_F(EdsTest, EndpointAddedWithNewOverprovisioningFactor) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  cluster_load_assignment.mutable_policy()->mutable_overprovisioning_factor()->set_value(1000);\n\n  {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(\"oceania\");\n    locality->set_zone(\"hello\");\n    locality->set_sub_zone(\"world\");\n    endpoints->mutable_load_balancing_weight()->set_value(42);\n\n    auto* endpoint_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n    endpoint_address->set_address(\"1.2.3.4\");\n    endpoint_address->set_port_value(80);\n  }\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  EXPECT_EQ(1, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(\"1.2.3.4:80\",\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->address()->asString());\n}\n\n// Validate that onConfigUpdate() updates the endpoint locality.\nTEST_F(EdsTest, EndpointLocality) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  auto* endpoints = cluster_load_assignment.add_endpoints();\n  auto* locality = endpoints->mutable_locality();\n  locality->set_region(\"oceania\");\n  locality->set_zone(\"hello\");\n  locality->set_sub_zone(\"world\");\n\n  {\n    auto* endpoint_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n    endpoint_address->set_address(\"1.2.3.4\");\n    endpoint_address->set_port_value(80);\n  }\n  {\n    auto* endpoint_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n    endpoint_address->set_address(\"2.3.4.5\");\n    endpoint_address->set_port_value(80);\n  }\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_EQ(hosts.size(), 2);\n  for (int i = 0; i < 2; ++i) {\n    EXPECT_EQ(0, hosts[i]->priority());\n    const auto& locality = hosts[i]->locality();\n    EXPECT_EQ(\"oceania\", locality.region());\n    EXPECT_EQ(\"hello\", locality.zone());\n    EXPECT_EQ(\"world\", locality.sub_zone());\n  }\n  EXPECT_EQ(nullptr, cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights());\n}\n\n// Validate that onConfigUpdate() does not propagate locality weights to the host set when\n// locality weighted balancing isn't configured.\nTEST_F(EdsTest, EndpointLocalityWeightsIgnored) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n\n  {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(\"oceania\");\n    locality->set_zone(\"hello\");\n    locality->set_sub_zone(\"world\");\n    endpoints->mutable_load_balancing_weight()->set_value(42);\n\n    auto* endpoint_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n    endpoint_address->set_address(\"1.2.3.4\");\n    endpoint_address->set_port_value(80);\n  }\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  EXPECT_EQ(nullptr, cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights());\n}\n\n// Validate that onConfigUpdate() propagates locality weights to the host set when locality\n// weighted balancing is configured.\nTEST_F(EdsTest, EndpointLocalityWeights) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  resetCluster(R\"EOF(\n      name: name\n      connect_timeout: 0.25s\n      type: EDS\n      lb_policy: ROUND_ROBIN\n      common_lb_config:\n        locality_weighted_lb_config: {}\n      eds_cluster_config:\n        service_name: fare\n        eds_config:\n          api_config_source:\n            api_type: REST\n            cluster_names:\n            - eds\n            refresh_delay: 1s\n    )EOF\",\n               Cluster::InitializePhase::Secondary);\n\n  {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(\"oceania\");\n    locality->set_zone(\"hello\");\n    locality->set_sub_zone(\"world\");\n    endpoints->mutable_load_balancing_weight()->set_value(42);\n\n    auto* endpoint_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n    endpoint_address->set_address(\"1.2.3.4\");\n    endpoint_address->set_port_value(80);\n  }\n\n  {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(\"space\");\n    locality->set_zone(\"station\");\n    locality->set_sub_zone(\"international\");\n\n    auto* endpoint_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n    endpoint_address->set_address(\"1.2.3.5\");\n    endpoint_address->set_port_value(80);\n  }\n\n  {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(\"sugar\");\n    locality->set_zone(\"candy\");\n    locality->set_sub_zone(\"mountain\");\n    endpoints->mutable_load_balancing_weight()->set_value(37);\n\n    auto* endpoint_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n    endpoint_address->set_address(\"1.2.3.6\");\n    endpoint_address->set_port_value(80);\n  }\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  const auto& locality_weights =\n      *cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights();\n  EXPECT_EQ(3, locality_weights.size());\n  EXPECT_EQ(42, locality_weights[0]);\n  EXPECT_EQ(0, locality_weights[1]);\n  EXPECT_EQ(37, locality_weights[2]);\n}\n\n// Validate that onConfigUpdate() removes any locality not referenced in the\n// config update in each priority.\nTEST_F(EdsTest, RemoveUnreferencedLocalities) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  uint32_t port = 1000;\n  auto add_hosts_to_locality = [&cluster_load_assignment,\n                                &port](const std::string& region, const std::string& zone,\n                                       const std::string& sub_zone, uint32_t n, uint32_t priority) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    endpoints->set_priority(priority);\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(region);\n    locality->set_zone(zone);\n    locality->set_sub_zone(sub_zone);\n\n    for (uint32_t i = 0; i < n; ++i) {\n      auto* socket_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n      socket_address->set_address(\"1.2.3.4\");\n      socket_address->set_port_value(port++);\n    }\n  };\n\n  // Add two localities to each of priority 0 and 1\n  add_hosts_to_locality(\"oceania\", \"koala\", \"ingsoc\", 2, 0);\n  add_hosts_to_locality(\"\", \"us-east-1a\", \"\", 1, 0);\n\n  add_hosts_to_locality(\"oceania\", \"bear\", \"best\", 4, 1);\n  add_hosts_to_locality(\"\", \"us-west-1a\", \"\", 2, 1);\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  {\n    auto& hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get();\n    EXPECT_EQ(2, hosts_per_locality.size());\n  }\n\n  {\n    auto& hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get();\n    EXPECT_EQ(2, hosts_per_locality.size());\n  }\n\n  // Reset the ClusterLoadAssignment to only contain one of the locality per priority.\n  // This should leave us with only one locality.\n  cluster_load_assignment.clear_endpoints();\n  add_hosts_to_locality(\"oceania\", \"koala\", \"ingsoc\", 4, 0);\n  add_hosts_to_locality(\"oceania\", \"bear\", \"best\", 2, 1);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get();\n    EXPECT_EQ(1, hosts_per_locality.size());\n  }\n\n  {\n    auto& hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get();\n    EXPECT_EQ(1, hosts_per_locality.size());\n  }\n\n  // Clear out the new ClusterLoadAssignment. This should leave us with 0 localities per priority.\n  cluster_load_assignment.clear_endpoints();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get();\n    EXPECT_EQ(0, hosts_per_locality.size());\n  }\n\n  {\n    auto& hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get();\n    EXPECT_EQ(0, hosts_per_locality.size());\n  }\n}\n\n// Validate that onConfigUpdate() updates bins hosts per locality as expected.\nTEST_F(EdsTest, EndpointHostsPerLocality) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  uint32_t port = 1000;\n  auto add_hosts_to_locality = [&cluster_load_assignment,\n                                &port](const std::string& region, const std::string& zone,\n                                       const std::string& sub_zone, uint32_t n) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(region);\n    locality->set_zone(zone);\n    locality->set_sub_zone(sub_zone);\n\n    for (uint32_t i = 0; i < n; ++i) {\n      auto* socket_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n      socket_address->set_address(\"1.2.3.4\");\n      socket_address->set_port_value(port++);\n    }\n  };\n\n  add_hosts_to_locality(\"oceania\", \"koala\", \"ingsoc\", 2);\n  add_hosts_to_locality(\"\", \"us-east-1a\", \"\", 1);\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  {\n    auto& hosts_per_locality = cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality();\n    EXPECT_EQ(2, hosts_per_locality.get().size());\n    EXPECT_EQ(1, hosts_per_locality.get()[0].size());\n    EXPECT_THAT(Locality(\"\", \"us-east-1a\", \"\"),\n                ProtoEq(hosts_per_locality.get()[0][0]->locality()));\n    EXPECT_EQ(2, hosts_per_locality.get()[1].size());\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(hosts_per_locality.get()[1][0]->locality()));\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(hosts_per_locality.get()[1][1]->locality()));\n  }\n\n  add_hosts_to_locality(\"oceania\", \"koala\", \"eucalyptus\", 3);\n  add_hosts_to_locality(\"general\", \"koala\", \"ingsoc\", 5);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts_per_locality = cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality();\n    EXPECT_EQ(4, hosts_per_locality.get().size());\n    EXPECT_EQ(1, hosts_per_locality.get()[0].size());\n    EXPECT_THAT(Locality(\"\", \"us-east-1a\", \"\"),\n                ProtoEq(hosts_per_locality.get()[0][0]->locality()));\n    EXPECT_EQ(5, hosts_per_locality.get()[1].size());\n    EXPECT_THAT(Locality(\"general\", \"koala\", \"ingsoc\"),\n                ProtoEq(hosts_per_locality.get()[1][0]->locality()));\n    EXPECT_EQ(3, hosts_per_locality.get()[2].size());\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"eucalyptus\"),\n                ProtoEq(hosts_per_locality.get()[2][0]->locality()));\n    EXPECT_EQ(2, hosts_per_locality.get()[3].size());\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(hosts_per_locality.get()[3][0]->locality()));\n  }\n}\n\n// Validate that onConfigUpdate() updates all priorities in the prioritySet\nTEST_F(EdsTest, EndpointHostPerPriority) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  uint32_t port = 1000;\n  auto add_hosts_to_locality = [&cluster_load_assignment,\n                                &port](const std::string& region, const std::string& zone,\n                                       const std::string& sub_zone, uint32_t n, uint32_t priority) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    endpoints->set_priority(priority);\n    auto* locality = endpoints->mutable_locality();\n    locality->set_region(region);\n    locality->set_zone(zone);\n    locality->set_sub_zone(sub_zone);\n\n    for (uint32_t i = 0; i < n; ++i) {\n      auto* socket_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n      socket_address->set_address(\"1.2.3.4\");\n      socket_address->set_port_value(port++);\n    }\n  };\n\n  add_hosts_to_locality(\"oceania\", \"koala\", \"ingsoc\", 2, 0);\n  add_hosts_to_locality(\"\", \"us-east-1a\", \"\", 1, 1);\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(2, hosts.size());\n  }\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();\n    EXPECT_EQ(1, hosts.size());\n  }\n\n  cluster_load_assignment.clear_endpoints();\n\n  add_hosts_to_locality(\"oceania\", \"koala\", \"ingsoc\", 4, 0);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(4, hosts.size());\n  }\n\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[1]->hosts();\n    EXPECT_EQ(0, hosts.size());\n  }\n}\n\n// Validate that onConfigUpdate() updates bins hosts per priority as expected.\nTEST_F(EdsTest, EndpointHostsPerPriority) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  uint32_t port = 1000;\n  auto add_hosts_to_priority = [&cluster_load_assignment, &port](uint32_t priority, uint32_t n) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    endpoints->set_priority(priority);\n\n    for (uint32_t i = 0; i < n; ++i) {\n      auto* socket_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n      socket_address->set_address(\"1.2.3.4\");\n      socket_address->set_port_value(port++);\n    }\n  };\n\n  // Set up the priority levels so 0 has two hosts and 1 has one host.\n  add_hosts_to_priority(0, 2);\n  add_hosts_to_priority(1, 1);\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  ASSERT_EQ(2, cluster_->prioritySet().hostSetsPerPriority().size());\n  EXPECT_EQ(2, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(1, cluster_->prioritySet().hostSetsPerPriority()[1]->hosts().size());\n\n  // Add 2 more hosts to priority 0, and add five hosts to priority 3.\n  // Note the (illegal) gap (no priority 2.)  Until we have config validation,\n  // make sure bad config does no harm.\n  add_hosts_to_priority(0, 2);\n  add_hosts_to_priority(3, 5);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  ASSERT_EQ(4, cluster_->prioritySet().hostSetsPerPriority().size());\n  EXPECT_EQ(4, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(1, cluster_->prioritySet().hostSetsPerPriority()[1]->hosts().size());\n  EXPECT_EQ(0, cluster_->prioritySet().hostSetsPerPriority()[2]->hosts().size());\n  EXPECT_EQ(5, cluster_->prioritySet().hostSetsPerPriority()[3]->hosts().size());\n\n  // Update the number of hosts in priority 3. Make sure we clear out the priorities previously\n  // occupied by hosts.\n  cluster_load_assignment.clear_endpoints();\n  add_hosts_to_priority(3, 4);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  ASSERT_EQ(4, cluster_->prioritySet().hostSetsPerPriority().size());\n  EXPECT_EQ(0, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0, cluster_->prioritySet().hostSetsPerPriority()[1]->hosts().size());\n  EXPECT_EQ(0, cluster_->prioritySet().hostSetsPerPriority()[2]->hosts().size());\n  EXPECT_EQ(4, cluster_->prioritySet().hostSetsPerPriority()[3]->hosts().size());\n}\n\n// Make sure config updates with P!=0 are rejected for the local cluster.\nTEST_F(EdsTest, NoPriorityForLocalCluster) {\n  cm_.local_cluster_name_ = \"name\";\n  resetCluster();\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  uint32_t port = 1000;\n  auto add_hosts_to_priority = [&cluster_load_assignment, &port](uint32_t priority, uint32_t n) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n    endpoints->set_priority(priority);\n\n    for (uint32_t i = 0; i < n; ++i) {\n      auto* socket_address = endpoints->add_lb_endpoints()\n                                 ->mutable_endpoint()\n                                 ->mutable_address()\n                                 ->mutable_socket_address();\n      socket_address->set_address(\"1.2.3.4\");\n      socket_address->set_port_value(port++);\n    }\n  };\n\n  // Set up the priority levels so 0 has two hosts and 1 has one host. Update\n  // should fail.\n  add_hosts_to_priority(0, 2);\n  add_hosts_to_priority(1, 1);\n  initialize();\n  const auto decoded_resources =\n      TestUtility::decodeResources({cluster_load_assignment}, \"cluster_name\");\n  EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"),\n                            EnvoyException,\n                            \"Unexpected non-zero priority for local cluster 'name'.\");\n\n  // Try an update which only has endpoints with P=0. This should go through.\n  cluster_load_assignment.clear_endpoints();\n  add_hosts_to_priority(0, 2);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n}\n\n// Set up an EDS config with multiple priorities and localities and make sure\n// they are loaded and reloaded as expected.\nTEST_F(EdsTest, PriorityAndLocality) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  uint32_t port = 1000;\n  auto add_hosts_to_locality_and_priority =\n      [&cluster_load_assignment, &port](const std::string& region, const std::string& zone,\n                                        const std::string& sub_zone, uint32_t priority,\n                                        uint32_t n) {\n        auto* endpoints = cluster_load_assignment.add_endpoints();\n        endpoints->set_priority(priority);\n        auto* locality = endpoints->mutable_locality();\n        locality->set_region(region);\n        locality->set_zone(zone);\n        locality->set_sub_zone(sub_zone);\n\n        for (uint32_t i = 0; i < n; ++i) {\n          auto* socket_address = endpoints->add_lb_endpoints()\n                                     ->mutable_endpoint()\n                                     ->mutable_address()\n                                     ->mutable_socket_address();\n          socket_address->set_address(\"1.2.3.4\");\n          socket_address->set_port_value(port++);\n        }\n      };\n\n  // Set up both priority 0 and priority 1 with 2 localities.\n  add_hosts_to_locality_and_priority(\"oceania\", \"koala\", \"ingsoc\", 0, 2);\n  add_hosts_to_locality_and_priority(\"\", \"us-east-1a\", \"\", 0, 1);\n  add_hosts_to_locality_and_priority(\"\", \"us-east-1a\", \"\", 1, 8);\n  add_hosts_to_locality_and_priority(\"foo\", \"bar\", \"eep\", 1, 2);\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n\n  {\n    auto& first_hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality();\n    EXPECT_EQ(2, first_hosts_per_locality.get().size());\n    EXPECT_EQ(1, first_hosts_per_locality.get()[0].size());\n    EXPECT_THAT(Locality(\"\", \"us-east-1a\", \"\"),\n                ProtoEq(first_hosts_per_locality.get()[0][0]->locality()));\n    EXPECT_EQ(2, first_hosts_per_locality.get()[1].size());\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(first_hosts_per_locality.get()[1][0]->locality()));\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(first_hosts_per_locality.get()[1][1]->locality()));\n\n    auto& second_hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality();\n    ASSERT_EQ(2, second_hosts_per_locality.get().size());\n    EXPECT_EQ(8, second_hosts_per_locality.get()[0].size());\n    EXPECT_EQ(2, second_hosts_per_locality.get()[1].size());\n  }\n\n  // Add one more locality to both priority 0 and priority 1.\n  add_hosts_to_locality_and_priority(\"oceania\", \"koala\", \"eucalyptus\", 0, 3);\n  add_hosts_to_locality_and_priority(\"general\", \"koala\", \"ingsoc\", 1, 5);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n\n  {\n    auto& first_hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality();\n    EXPECT_EQ(3, first_hosts_per_locality.get().size());\n    EXPECT_EQ(1, first_hosts_per_locality.get()[0].size());\n    EXPECT_THAT(Locality(\"\", \"us-east-1a\", \"\"),\n                ProtoEq(first_hosts_per_locality.get()[0][0]->locality()));\n    EXPECT_EQ(3, first_hosts_per_locality.get()[1].size());\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"eucalyptus\"),\n                ProtoEq(first_hosts_per_locality.get()[1][0]->locality()));\n    EXPECT_EQ(2, first_hosts_per_locality.get()[2].size());\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(first_hosts_per_locality.get()[2][0]->locality()));\n\n    auto& second_hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality();\n    EXPECT_EQ(3, second_hosts_per_locality.get().size());\n    EXPECT_EQ(8, second_hosts_per_locality.get()[0].size());\n    EXPECT_THAT(Locality(\"\", \"us-east-1a\", \"\"),\n                ProtoEq(second_hosts_per_locality.get()[0][0]->locality()));\n    EXPECT_EQ(2, second_hosts_per_locality.get()[1].size());\n    EXPECT_THAT(Locality(\"foo\", \"bar\", \"eep\"),\n                ProtoEq(second_hosts_per_locality.get()[1][0]->locality()));\n    EXPECT_EQ(5, second_hosts_per_locality.get()[2].size());\n    EXPECT_THAT(Locality(\"general\", \"koala\", \"ingsoc\"),\n                ProtoEq(second_hosts_per_locality.get()[2][0]->locality()));\n  }\n}\n\n// Set up an EDS config with multiple priorities, localities, weights and make sure\n// they are loaded and reloaded as expected.\nTEST_F(EdsTest, PriorityAndLocalityWeighted) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  resetCluster(R\"EOF(\n      name: name\n      connect_timeout: 0.25s\n      type: EDS\n      lb_policy: ROUND_ROBIN\n      common_lb_config:\n        locality_weighted_lb_config: {}\n      eds_cluster_config:\n        service_name: fare\n        eds_config:\n          api_config_source:\n            api_type: REST\n            cluster_names:\n            - eds\n            refresh_delay: 1s\n    )EOF\",\n               Cluster::InitializePhase::Secondary);\n\n  uint32_t port = 1000;\n  auto add_hosts_to_locality_and_priority =\n      [&cluster_load_assignment, &port](const std::string& region, const std::string& zone,\n                                        const std::string& sub_zone, uint32_t priority, uint32_t n,\n                                        uint32_t weight) {\n        auto* endpoints = cluster_load_assignment.add_endpoints();\n        endpoints->set_priority(priority);\n        auto* locality = endpoints->mutable_locality();\n        locality->set_region(region);\n        locality->set_zone(zone);\n        locality->set_sub_zone(sub_zone);\n        endpoints->mutable_load_balancing_weight()->set_value(weight);\n\n        for (uint32_t i = 0; i < n; ++i) {\n          auto* socket_address = endpoints->add_lb_endpoints()\n                                     ->mutable_endpoint()\n                                     ->mutable_address()\n                                     ->mutable_socket_address();\n          socket_address->set_address(\"1.2.3.4\");\n          socket_address->set_port_value(port++);\n        }\n      };\n\n  // Set up both priority 0 and priority 1 with 2 localities.\n  add_hosts_to_locality_and_priority(\"oceania\", \"koala\", \"ingsoc\", 0, 2, 25);\n  add_hosts_to_locality_and_priority(\"\", \"us-east-1a\", \"\", 0, 1, 75);\n  add_hosts_to_locality_and_priority(\"\", \"us-east-1a\", \"\", 1, 8, 60);\n  add_hosts_to_locality_and_priority(\"foo\", \"bar\", \"eep\", 1, 2, 40);\n\n  initialize();\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_TRUE(initialized_);\n  EXPECT_EQ(0UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  {\n    auto& first_hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality();\n    auto& first_locality_weights =\n        *cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights();\n    EXPECT_EQ(2, first_hosts_per_locality.get().size());\n    EXPECT_EQ(1, first_hosts_per_locality.get()[0].size());\n    EXPECT_THAT(Locality(\"\", \"us-east-1a\", \"\"),\n                ProtoEq(first_hosts_per_locality.get()[0][0]->locality()));\n    EXPECT_EQ(75, first_locality_weights[0]);\n    EXPECT_EQ(2, first_hosts_per_locality.get()[1].size());\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(first_hosts_per_locality.get()[1][0]->locality()));\n    EXPECT_THAT(Locality(\"oceania\", \"koala\", \"ingsoc\"),\n                ProtoEq(first_hosts_per_locality.get()[1][1]->locality()));\n    EXPECT_EQ(25, first_locality_weights[1]);\n\n    auto& second_hosts_per_locality =\n        cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality();\n    auto& second_locality_weights =\n        *cluster_->prioritySet().hostSetsPerPriority()[1]->localityWeights();\n    ASSERT_EQ(2, second_hosts_per_locality.get().size());\n    EXPECT_EQ(8, second_hosts_per_locality.get()[0].size());\n    EXPECT_EQ(60, second_locality_weights[0]);\n    EXPECT_EQ(2, second_hosts_per_locality.get()[1].size());\n    EXPECT_EQ(40, second_locality_weights[1]);\n  }\n\n  // This should noop (regression test for earlier bug where we would still\n  // rebuild).\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  // Adjust locality weights, validate that we observe an update.\n  cluster_load_assignment.mutable_endpoints(0)->mutable_load_balancing_weight()->set_value(60);\n  cluster_load_assignment.mutable_endpoints(1)->mutable_load_balancing_weight()->set_value(40);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n}\n\nTEST_F(EdsWithHealthCheckUpdateTest, EndpointUpdateHealthCheckConfig) {\n  const std::vector<uint32_t> endpoint_ports = {80, 81};\n  const uint32_t new_health_check_port = 8000;\n\n  // Initialize the cluster with two endpoints without draining connections on host removal.\n  initializeCluster(endpoint_ports, false);\n\n  updateEndpointHealthCheckPortAtIndex(0, new_health_check_port);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 3);\n    // Make sure the first endpoint health check port is updated.\n    EXPECT_EQ(new_health_check_port, hosts[0]->healthCheckAddress()->ip()->port());\n\n    EXPECT_NE(new_health_check_port, hosts[1]->healthCheckAddress()->ip()->port());\n    EXPECT_NE(new_health_check_port, hosts[2]->healthCheckAddress()->ip()->port());\n    EXPECT_EQ(endpoint_ports[1], hosts[1]->healthCheckAddress()->ip()->port());\n    EXPECT_EQ(endpoint_ports[0], hosts[2]->healthCheckAddress()->ip()->port());\n\n    EXPECT_TRUE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n\n    // The old hosts are still active. The health checker continues to do health checking to these\n    // hosts, until they are removed.\n    EXPECT_FALSE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_FALSE(hosts[2]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n  }\n\n  updateEndpointHealthCheckPortAtIndex(1, new_health_check_port);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 4);\n    EXPECT_EQ(new_health_check_port, hosts[0]->healthCheckAddress()->ip()->port());\n\n    // Make sure the second endpoint health check port is updated.\n    EXPECT_EQ(new_health_check_port, hosts[1]->healthCheckAddress()->ip()->port());\n\n    EXPECT_EQ(endpoint_ports[1], hosts[2]->healthCheckAddress()->ip()->port());\n    EXPECT_EQ(endpoint_ports[0], hosts[3]->healthCheckAddress()->ip()->port());\n\n    EXPECT_TRUE(hosts[0]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_TRUE(hosts[1]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n\n    // The old hosts are still active.\n    EXPECT_FALSE(hosts[2]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_FALSE(hosts[3]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n  }\n}\n\nTEST_F(EdsWithHealthCheckUpdateTest, EndpointUpdateHealthCheckConfigWithDrainConnectionsOnRemoval) {\n  const std::vector<uint32_t> endpoint_ports = {80, 81};\n  const uint32_t new_health_check_port = 8000;\n\n  // Initialize the cluster with two endpoints with draining connections on host removal.\n  initializeCluster(endpoint_ports, true);\n\n  updateEndpointHealthCheckPortAtIndex(0, new_health_check_port);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    // Since drain_connections_on_host_removal is set to true, the old hosts are removed\n    // immediately.\n    EXPECT_EQ(hosts.size(), 2);\n    // Make sure the first endpoint health check port is updated.\n    EXPECT_EQ(new_health_check_port, hosts[0]->healthCheckAddress()->ip()->port());\n\n    EXPECT_NE(new_health_check_port, hosts[1]->healthCheckAddress()->ip()->port());\n  }\n\n  updateEndpointHealthCheckPortAtIndex(1, new_health_check_port);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n    EXPECT_EQ(new_health_check_port, hosts[0]->healthCheckAddress()->ip()->port());\n\n    // Make sure the second endpoint health check port is updated.\n    EXPECT_EQ(new_health_check_port, hosts[1]->healthCheckAddress()->ip()->port());\n  }\n}\n\n// Throw on adding a new resource with an invalid endpoint (since the given address is invalid).\nTEST_F(EdsTest, MalformedIP) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  auto* endpoints = cluster_load_assignment.add_endpoints();\n\n  auto* endpoint = endpoints->add_lb_endpoints();\n  endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address(\n      \"foo.bar.com\");\n  endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80);\n\n  initialize();\n  const auto decoded_resources =\n      TestUtility::decodeResources({cluster_load_assignment}, \"cluster_name\");\n  EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"),\n                            EnvoyException,\n                            \"malformed IP address: foo.bar.com. Consider setting resolver_name or \"\n                            \"setting cluster type to 'STRICT_DNS' or 'LOGICAL_DNS'\");\n}\n\nclass EdsAssignmentTimeoutTest : public EdsTest {\npublic:\n  EdsAssignmentTimeoutTest() {\n    EXPECT_CALL(dispatcher_, createTimer_(_))\n        .WillOnce(Invoke([this](Event::TimerCb cb) {\n          timer_cb_ = cb;\n          EXPECT_EQ(nullptr, interval_timer_);\n          interval_timer_ = new Event::MockTimer();\n          return interval_timer_;\n        }))\n        .WillRepeatedly(Invoke([](Event::TimerCb) { return new Event::MockTimer(); }));\n\n    resetCluster();\n  }\n\n  Event::MockTimer* interval_timer_{nullptr};\n  Event::TimerCb timer_cb_;\n};\n\n// Test that assignment timeout is enabled and disabled correctly.\nTEST_F(EdsAssignmentTimeoutTest, AssignmentTimeoutEnableDisable) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  auto* endpoints = cluster_load_assignment.add_endpoints();\n\n  auto health_checker = std::make_shared<MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  auto* socket_address = endpoints->add_lb_endpoints()\n                             ->mutable_endpoint()\n                             ->mutable_address()\n                             ->mutable_socket_address();\n  socket_address->set_address(\"1.2.3.4\");\n  socket_address->set_port_value(80);\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment_lease =\n      cluster_load_assignment;\n  cluster_load_assignment_lease.mutable_policy()->mutable_endpoint_stale_after()->MergeFrom(\n      Protobuf::util::TimeUtil::SecondsToDuration(1));\n\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _)).Times(2); // Timer enabled twice.\n  EXPECT_CALL(*interval_timer_, disableTimer()).Times(1);    // Timer disabled once.\n  EXPECT_CALL(*interval_timer_, enabled()).Times(6);         // Includes calls by test.\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_lease);\n  // Check that the timer is enabled.\n  EXPECT_EQ(interval_timer_->enabled(), true);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  // Check that the timer is disabled.\n  EXPECT_EQ(interval_timer_->enabled(), false);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_lease);\n  // Check that the timer is enabled.\n  EXPECT_EQ(interval_timer_->enabled(), true);\n}\n\n// Test that assignment timeout is called and removes all the endpoints.\nTEST_F(EdsAssignmentTimeoutTest, AssignmentLeaseExpired) {\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"fare\");\n  cluster_load_assignment.mutable_policy()->mutable_endpoint_stale_after()->MergeFrom(\n      Protobuf::util::TimeUtil::SecondsToDuration(1));\n\n  auto health_checker = std::make_shared<MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  auto add_endpoint = [&cluster_load_assignment](int port) {\n    auto* endpoints = cluster_load_assignment.add_endpoints();\n\n    auto* socket_address = endpoints->add_lb_endpoints()\n                               ->mutable_endpoint()\n                               ->mutable_address()\n                               ->mutable_socket_address();\n    socket_address->set_address(\"1.2.3.4\");\n    socket_address->set_port_value(port);\n  };\n\n  // Add two endpoints to the cluster assignment.\n  add_endpoint(80);\n  add_endpoint(81);\n\n  // Expect the timer to be enabled once.\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  // Expect the timer to be disabled when stale assignments are removed.\n  EXPECT_CALL(*interval_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enabled()).Times(2);\n  doOnConfigUpdateVerifyNoThrow(cluster_load_assignment);\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 2);\n  }\n  // Call the timer callback to indicate timeout.\n  timer_cb_();\n  // Test that stale endpoints are removed.\n  {\n    auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(hosts.size(), 0);\n  }\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/hds_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/service/health/v3/hds.pb.h\"\n#include \"envoy/type/v3/http.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/health_discovery_service.h\"\n#include \"common/upstream/transport_socket_match_impl.h\"\n\n#include \"extensions/transport_sockets/raw_buffer/config.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_info_factory.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_format.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AtLeast;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnNew;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Upstream {\n\n// Friend class of HdsDelegate, making it easier to access private fields\nclass HdsDelegateFriend {\npublic:\n  // Allows access to private function processMessage\n  void processPrivateMessage(\n      HdsDelegate& hd,\n      std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier>&& message) {\n    hd.processMessage(std::move(message));\n  };\n  HdsDelegateStats getStats(HdsDelegate& hd) { return hd.stats_; };\n};\n\nclass HdsTest : public testing::Test {\nprotected:\n  HdsTest()\n      : retry_timer_(new Event::MockTimer()), server_response_timer_(new Event::MockTimer()),\n        async_client_(new Grpc::MockAsyncClient()),\n        api_(Api::createApiForTest(stats_store_, random_)),\n        ssl_context_manager_(api_->timeSource()) {\n    node_.set_id(\"hds-node\");\n  }\n\n  // Creates an HdsDelegate\n  void createHdsDelegate() {\n    InSequence s;\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      retry_timer_cb_ = timer_cb;\n      return retry_timer_;\n    }));\n    // First call will set up the response timer for assertions, all other future calls\n    // just return a new timer that we won't keep track of.\n    EXPECT_CALL(dispatcher_, createTimer_(_))\n        .Times(AtLeast(1))\n        .WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n          server_response_timer_cb_ = timer_cb;\n          return server_response_timer_;\n        }))\n        .WillRepeatedly(testing::ReturnNew<NiceMock<Event::MockTimer>>());\n\n    hds_delegate_ = std::make_unique<HdsDelegate>(\n        stats_store_, Grpc::RawAsyncClientPtr(async_client_),\n        envoy::config::core::v3::ApiVersion::AUTO, dispatcher_, runtime_, stats_store_,\n        ssl_context_manager_, test_factory_, log_manager_, cm_, local_info_, admin_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n  }\n\n  // Creates a HealthCheckSpecifier message that contains one endpoint and one\n  // healthcheck\n  envoy::service::health::v3::HealthCheckSpecifier* createSimpleMessage() {\n    envoy::service::health::v3::HealthCheckSpecifier* msg =\n        new envoy::service::health::v3::HealthCheckSpecifier;\n    msg->mutable_interval()->set_seconds(1);\n\n    auto* health_check = msg->add_cluster_health_checks();\n    health_check->set_cluster_name(\"anna\");\n    health_check->add_health_checks()->mutable_timeout()->set_seconds(1);\n    health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(1);\n    health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2);\n    health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2);\n    health_check->mutable_health_checks(0)->mutable_grpc_health_check();\n    health_check->mutable_health_checks(0)->mutable_http_health_check()->set_codec_client_type(\n        envoy::type::v3::HTTP1);\n    health_check->mutable_health_checks(0)->mutable_http_health_check()->set_path(\"/healthcheck\");\n\n    auto* locality_endpoints = health_check->add_locality_endpoints();\n    // add locality information to this endpoint set of one endpoint.\n    auto* locality = locality_endpoints->mutable_locality();\n    locality->set_region(\"middle_earth\");\n    locality->set_zone(\"shire\");\n    locality->set_sub_zone(\"hobbiton\");\n\n    // add one endpoint to this locality grouping.\n    auto* socket_address =\n        locality_endpoints->add_endpoints()->mutable_address()->mutable_socket_address();\n    socket_address->set_address(\"127.0.0.0\");\n    socket_address->set_port_value(1234);\n\n    return msg;\n  }\n\n  // Creates a HealthCheckSpecifier message that contains several clusters, endpoints, localities,\n  // with only one health check type.\n  std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier>\n  createComplexSpecifier(uint32_t n_clusters, uint32_t n_localities, uint32_t n_endpoints) {\n    // Final specifier to return.\n    std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier> msg =\n        std::make_unique<envoy::service::health::v3::HealthCheckSpecifier>();\n\n    // set interval.\n    msg->mutable_interval()->set_seconds(1);\n\n    for (uint32_t cluster_num = 0; cluster_num < n_clusters; cluster_num++) {\n      // add a cluster with a name by iteration, with path /healthcheck\n      auto* health_check = msg->add_cluster_health_checks();\n      health_check->set_cluster_name(absl::StrCat(\"anna\", cluster_num));\n      health_check->add_health_checks()->mutable_timeout()->set_seconds(1);\n\n      auto* health_check_info = health_check->mutable_health_checks(0);\n      health_check_info->mutable_interval()->set_seconds(1);\n      health_check_info->mutable_unhealthy_threshold()->set_value(2);\n      health_check_info->mutable_healthy_threshold()->set_value(2);\n\n      auto* health_check_http = health_check_info->mutable_http_health_check();\n      health_check_http->set_codec_client_type(envoy::type::v3::HTTP1);\n      health_check_http->set_path(\"/healthcheck\");\n\n      // add some locality groupings with iterative names for verification.\n      for (uint32_t loc_num = 0; loc_num < n_localities; loc_num++) {\n        auto* locality_endpoints = health_check->add_locality_endpoints();\n\n        // set the locality information for this group.\n        auto* locality = locality_endpoints->mutable_locality();\n        locality->set_region(absl::StrCat(\"region\", cluster_num));\n        locality->set_zone(absl::StrCat(\"zone\", loc_num));\n        locality->set_sub_zone(absl::StrCat(\"subzone\", loc_num));\n\n        // add some endpoints to the locality group with iterative naming for verification.\n        for (uint32_t endpoint_num = 0; endpoint_num < n_endpoints; endpoint_num++) {\n          auto* socket_address =\n              locality_endpoints->add_endpoints()->mutable_address()->mutable_socket_address();\n          socket_address->set_address(\n              absl::StrCat(\"127.\", cluster_num, \".\", loc_num, \".\", endpoint_num));\n          socket_address->set_port_value(1234);\n        }\n      }\n    }\n\n    return msg;\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  envoy::config::core::v3::Node node_;\n  Event::MockDispatcher dispatcher_;\n  Stats::IsolatedStoreImpl stats_store_;\n  MockClusterInfoFactory test_factory_;\n\n  std::unique_ptr<Upstream::HdsDelegate> hds_delegate_;\n  HdsDelegateFriend hds_delegate_friend_;\n\n  Event::MockTimer* retry_timer_;\n  Event::TimerCb retry_timer_cb_;\n  Event::MockTimer* server_response_timer_;\n  Event::TimerCb server_response_timer_cb_;\n\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_info_{\n      new NiceMock<Upstream::MockClusterInfo>()};\n  std::unique_ptr<envoy::service::health::v3::HealthCheckSpecifier> message;\n  Grpc::MockAsyncStream async_stream_;\n  Grpc::MockAsyncClient* async_client_;\n  Runtime::MockLoader runtime_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n  Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Envoy::AccessLog::MockAccessLogManager> log_manager_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n};\n\n// Test that HdsDelegate builds and sends initial message correctly\nTEST_F(HdsTest, HealthCheckRequest) {\n  envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse request;\n  request.mutable_health_check_request()->mutable_node()->set_id(\"hds-node\");\n  request.mutable_health_check_request()->mutable_capability()->add_health_check_protocols(\n      envoy::service::health::v3::Capability::HTTP);\n  request.mutable_health_check_request()->mutable_capability()->add_health_check_protocols(\n      envoy::service::health::v3::Capability::TCP);\n\n  EXPECT_CALL(local_info_, node()).WillOnce(ReturnRef(node_));\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(request), false));\n  createHdsDelegate();\n}\n\n// Test if processMessage processes endpoints from a HealthCheckSpecifier\n// message correctly\nTEST_F(HdsTest, TestProcessMessageEndpoints) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  // - Cluster \"anna0\" with 3 endpoints\n  // - Cluster \"anna1\" with 3 endpoints\n  message = std::make_unique<envoy::service::health::v3::HealthCheckSpecifier>();\n  message->mutable_interval()->set_seconds(1);\n\n  for (int i = 0; i < 2; i++) {\n    auto* health_check = message->add_cluster_health_checks();\n    health_check->set_cluster_name(\"anna\" + std::to_string(i));\n    for (int j = 0; j < 3; j++) {\n      auto* address = health_check->add_locality_endpoints()->add_endpoints()->mutable_address();\n      address->mutable_socket_address()->set_address(\"127.0.0.\" + std::to_string(i));\n      address->mutable_socket_address()->set_port_value(1234 + j);\n    }\n  }\n\n  // Process message\n  EXPECT_CALL(test_factory_, createClusterInfo(_)).Times(2).WillRepeatedly(Return(cluster_info_));\n  hds_delegate_friend_.processPrivateMessage(*hds_delegate_, std::move(message));\n\n  // Check Correctness\n  for (int i = 0; i < 2; i++) {\n    for (int j = 0; j < 3; j++) {\n      auto& host =\n          hds_delegate_->hdsClusters()[i]->prioritySet().hostSetsPerPriority()[0]->hosts()[j];\n      EXPECT_EQ(host->address()->ip()->addressAsString(), \"127.0.0.\" + std::to_string(i));\n      EXPECT_EQ(host->address()->ip()->port(), 1234 + j);\n    }\n  }\n}\n\n// Test if processMessage processes health checks from a HealthCheckSpecifier\n// message correctly\nTEST_F(HdsTest, TestProcessMessageHealthChecks) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  // - Cluster \"minkowski0\" with 2 health_checks\n  // - Cluster \"minkowski1\" with 3 health_checks\n  message = std::make_unique<envoy::service::health::v3::HealthCheckSpecifier>();\n  message->mutable_interval()->set_seconds(1);\n\n  for (int i = 0; i < 2; i++) {\n    auto* health_check = message->add_cluster_health_checks();\n    health_check->set_cluster_name(\"minkowski\" + std::to_string(i));\n    for (int j = 0; j < i + 2; j++) {\n      auto hc = health_check->add_health_checks();\n      hc->mutable_timeout()->set_seconds(i);\n      hc->mutable_interval()->set_seconds(j);\n      hc->mutable_unhealthy_threshold()->set_value(j + 1);\n      hc->mutable_healthy_threshold()->set_value(j + 1);\n      hc->mutable_grpc_health_check();\n      hc->mutable_http_health_check()->set_codec_client_type(envoy::type::v3::HTTP1);\n      hc->mutable_http_health_check()->set_path(\"/healthcheck\");\n    }\n  }\n\n  // Process message\n  EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_));\n\n  hds_delegate_friend_.processPrivateMessage(*hds_delegate_, std::move(message));\n\n  // Check Correctness\n  EXPECT_EQ(hds_delegate_->hdsClusters()[0]->healthCheckers().size(), 2);\n  EXPECT_EQ(hds_delegate_->hdsClusters()[1]->healthCheckers().size(), 3);\n}\n\n// Test if processMessage exits gracefully upon receiving a malformed message\nTEST_F(HdsTest, TestProcessMessageMissingFields) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  message.reset(createSimpleMessage());\n  // remove healthy threshold field to create an error\n  message->mutable_cluster_health_checks(0)->mutable_health_checks(0)->clear_healthy_threshold();\n\n  // call onReceiveMessage function for testing. Should increment stat_ errors upon\n  // getting a bad message\n  hds_delegate_->onReceiveMessage(std::move(message));\n\n  // Ensure that we never enabled the response timer that would start health checks,\n  // since this config was invalid.\n  EXPECT_FALSE(server_response_timer_->enabled_);\n\n  // ensure that no partial information was stored in hds_clusters_\n  EXPECT_TRUE(hds_delegate_->hdsClusters().empty());\n\n  // Check Correctness by verifying one request and one error has been generated in stat_\n  EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).errors_.value(), 1);\n  EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).requests_.value(), 1);\n}\n\n// Test if processMessage exits gracefully upon receiving a malformed message\n// There was a previous valid config, so we go back to that.\nTEST_F(HdsTest, TestProcessMessageMissingFieldsWithFallback) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  message.reset(createSimpleMessage());\n\n  Network::MockClientConnection* connection = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillRepeatedly(Return(connection));\n  EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(2);\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false));\n  EXPECT_CALL(test_factory_, createClusterInfo(_)).WillOnce(Return(cluster_info_));\n  EXPECT_CALL(*connection, setBufferLimits(_));\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  // Process message\n  hds_delegate_->onReceiveMessage(std::move(message));\n  connection->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Create a invalid message\n  message.reset(createSimpleMessage());\n\n  // set this address to be distinguishable from the previous message in sendResponse()\n  message->mutable_cluster_health_checks(0)\n      ->mutable_locality_endpoints(0)\n      ->mutable_endpoints(0)\n      ->mutable_address()\n      ->mutable_socket_address()\n      ->set_address(\"9.9.9.9\");\n\n  // remove healthy threshold field to create an error\n  message->mutable_cluster_health_checks(0)->mutable_health_checks(0)->clear_healthy_threshold();\n\n  // Pass invalid message through. Should increment stat_ errors upon\n  // getting a bad message.\n  hds_delegate_->onReceiveMessage(std::move(message));\n\n  // Ensure that the timer is enabled since there was a previous valid specifier.\n  EXPECT_TRUE(server_response_timer_->enabled_);\n\n  // read the response and check that it is pinging the old\n  // address 127.0.0.0 instead of the new 9.9.9.9\n  auto response = hds_delegate_->sendResponse();\n  EXPECT_EQ(response.endpoint_health_response()\n                .endpoints_health(0)\n                .endpoint()\n                .address()\n                .socket_address()\n                .address(),\n            \"127.0.0.0\");\n\n  // Check Correctness by verifying one request and one error has been generated in stat_\n  EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).errors_.value(), 1);\n  EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).requests_.value(), 2);\n}\n\n// Test if sendResponse() retains the structure of all endpoints ingested in the specifier\n// from onReceiveMessage(). This verifies that all endpoints are grouped by the correct\n// cluster and the correct locality.\nTEST_F(HdsTest, TestSendResponseMultipleEndpoints) {\n  // number of clusters, localities by cluster, and endpoints by locality\n  // to build and verify off of.\n  const uint32_t NumClusters = 2;\n  const uint32_t NumLocalities = 2;\n  const uint32_t NumEndpoints = 2;\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  message = createComplexSpecifier(NumClusters, NumLocalities, NumEndpoints);\n\n  // Create a new active connection on request, setting its status to connected\n  // to mock a found endpoint.\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _))\n      .WillRepeatedly(Invoke(\n          [](Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr,\n             Network::TransportSocketPtr&, const Network::ConnectionSocket::OptionsSharedPtr&) {\n            Network::MockClientConnection* connection =\n                new NiceMock<Network::MockClientConnection>();\n\n            // pretend our endpoint was connected to.\n            connection->raiseEvent(Network::ConnectionEvent::Connected);\n\n            // return this new, connected endpoint.\n            return connection;\n          }));\n\n  EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(2);\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false));\n\n  // Carry over cluster name on a call to createClusterInfo,\n  // in the same way that the prod factory does.\n  EXPECT_CALL(test_factory_, createClusterInfo(_))\n      .WillRepeatedly(Invoke([](const ClusterInfoFactory::CreateClusterInfoParams& params) {\n        std::shared_ptr<Upstream::MockClusterInfo> cluster_info{\n            new NiceMock<Upstream::MockClusterInfo>()};\n        // copy name for use in sendResponse() in HdsCluster\n\n        cluster_info->name_ = params.cluster_.name();\n        return cluster_info;\n      }));\n  EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(NumClusters * NumLocalities * NumEndpoints);\n\n  // Process message\n  hds_delegate_->onReceiveMessage(std::move(message));\n\n  // read response and verify fields\n  const auto response = hds_delegate_->sendResponse().endpoint_health_response();\n\n  ASSERT_EQ(response.cluster_endpoints_health_size(), NumClusters);\n\n  for (uint32_t i = 0; i < NumClusters; i++) {\n    const auto& cluster = response.cluster_endpoints_health(i);\n\n    // Expect the correct cluster name by index\n    EXPECT_EQ(cluster.cluster_name(), absl::StrCat(\"anna\", i));\n\n    // Every cluster should have two locality groupings\n    ASSERT_EQ(cluster.locality_endpoints_health_size(), NumLocalities);\n\n    for (uint32_t j = 0; j < NumLocalities; j++) {\n      // Every locality should have a number based on its index\n      const auto& loc_group = cluster.locality_endpoints_health(j);\n      EXPECT_EQ(loc_group.locality().region(), absl::StrCat(\"region\", i));\n      EXPECT_EQ(loc_group.locality().zone(), absl::StrCat(\"zone\", j));\n      EXPECT_EQ(loc_group.locality().sub_zone(), absl::StrCat(\"subzone\", j));\n\n      // Every locality should have two endpoints.\n      ASSERT_EQ(loc_group.endpoints_health_size(), NumEndpoints);\n\n      for (uint32_t k = 0; k < NumEndpoints; k++) {\n\n        // every endpoint's address is based on all 3 index values.\n        const auto& endpoint_health = loc_group.endpoints_health(k);\n        EXPECT_EQ(endpoint_health.endpoint().address().socket_address().address(),\n                  absl::StrCat(\"127.\", i, \".\", j, \".\", k));\n        EXPECT_EQ(endpoint_health.health_status(), envoy::config::core::v3::UNHEALTHY);\n      }\n    }\n  }\n  EXPECT_EQ(response.endpoints_health_size(), NumClusters * NumLocalities * NumEndpoints);\n}\n\n// Tests OnReceiveMessage given a minimal HealthCheckSpecifier message\nTEST_F(HdsTest, TestMinimalOnReceiveMessage) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  message = std::make_unique<envoy::service::health::v3::HealthCheckSpecifier>();\n  message->mutable_interval()->set_seconds(1);\n\n  // Process message\n  EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1));\n  hds_delegate_->onReceiveMessage(std::move(message));\n}\n\n// Test that a transport_socket_matches and transport_socket_match_criteria filter as expected to\n// build the correct TransportSocketFactory based on these fields.\nTEST_F(HdsTest, TestSocketContext) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message.\n  message.reset(createSimpleMessage());\n\n  // Add transport socket matches to message.\n  const std::string match_yaml = absl::StrFormat(\n      R\"EOF(\ntransport_socket_matches:\n- name: \"test_socket\"\n  match:\n    test_match: \"true\"\n  transport_socket:\n    name: \"envoy.transport_sockets.raw_buffer\"\n)EOF\");\n  auto* cluster_health_check = message->mutable_cluster_health_checks(0);\n  cluster_health_check->MergeFrom(\n      TestUtility::parseYaml<envoy::service::health::v3::ClusterHealthCheck>(match_yaml));\n\n  // Add transport socket match criteria to our health check, for filtering matches.\n  const std::string criteria_yaml = absl::StrFormat(\n      R\"EOF(\ntransport_socket_match_criteria:\n  test_match: \"true\"\n)EOF\");\n  cluster_health_check->mutable_health_checks(0)->MergeFrom(\n      TestUtility::parseYaml<envoy::config::core::v3::HealthCheck>(criteria_yaml));\n\n  Network::MockClientConnection* connection = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillRepeatedly(Return(connection));\n\n  // Pull out socket_matcher object normally internal to createClusterInfo, to test that a matcher\n  // would match the expected socket.\n  std::unique_ptr<TransportSocketMatcherImpl> socket_matcher;\n  EXPECT_CALL(test_factory_, createClusterInfo(_))\n      .WillRepeatedly(Invoke([&](const ClusterInfoFactory::CreateClusterInfoParams& params) {\n        // Build scope, factory_context as does ProdClusterInfoFactory.\n        Envoy::Stats::ScopePtr scope =\n            params.stats_.createScope(fmt::format(\"cluster.{}.\", params.cluster_.name()));\n        Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n            params.admin_, params.ssl_context_manager_, *scope, params.cm_, params.local_info_,\n            params.dispatcher_, params.stats_, params.singleton_manager_, params.tls_,\n            params.validation_visitor_, params.api_);\n\n        // Create a mock socket_factory for the scope of this unit test.\n        std::unique_ptr<Envoy::Network::TransportSocketFactory> socket_factory =\n            std::make_unique<Network::MockTransportSocketFactory>();\n\n        // set socket_matcher object in test scope.\n        socket_matcher = std::make_unique<Envoy::Upstream::TransportSocketMatcherImpl>(\n            params.cluster_.transport_socket_matches(), factory_context, socket_factory, *scope);\n\n        // But still use the fake cluster_info_.\n        return cluster_info_;\n      }));\n\n  EXPECT_CALL(*connection, setBufferLimits(_));\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n\n  // Process message.\n  EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1));\n  hds_delegate_->onReceiveMessage(std::move(message));\n\n  // pretend our endpoint was connected to.\n  connection->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Get our health checker to match against.\n  const auto clusters = hds_delegate_->hdsClusters();\n  ASSERT_EQ(clusters.size(), 1);\n  const auto hcs = clusters[0]->healthCheckers();\n  ASSERT_EQ(hcs.size(), 1);\n\n  // Check that our match hits.\n  HealthCheckerImplBase* health_checker_base = dynamic_cast<HealthCheckerImplBase*>(hcs[0].get());\n  const auto match =\n      socket_matcher->resolve(health_checker_base->transportSocketMatchMetadata().get());\n  EXPECT_EQ(match.name_, \"test_socket\");\n}\n\n// Tests OnReceiveMessage given a HealthCheckSpecifier message without interval field\nTEST_F(HdsTest, TestDefaultIntervalOnReceiveMessage) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  message = std::make_unique<envoy::service::health::v3::HealthCheckSpecifier>();\n  // notice that interval field is intentionally left undefined\n\n  // Process message\n  EXPECT_CALL(*server_response_timer_, enableTimer(std::chrono::milliseconds(1000), _))\n      .Times(AtLeast(1));\n  hds_delegate_->onReceiveMessage(std::move(message));\n}\n\n// Tests that SendResponse responds to the server in a timely fashion\n// given a minimal HealthCheckSpecifier message\nTEST_F(HdsTest, TestMinimalSendResponse) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  message = std::make_unique<envoy::service::health::v3::HealthCheckSpecifier>();\n  message->mutable_interval()->set_seconds(1);\n\n  // Process message and send 2 responses\n  EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)).Times(2);\n  hds_delegate_->onReceiveMessage(std::move(message));\n  hds_delegate_->sendResponse();\n  server_response_timer_cb_();\n}\n\nTEST_F(HdsTest, TestStreamConnectionFailure) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _))\n      .WillOnce(Return(nullptr))\n      .WillOnce(Return(nullptr))\n      .WillOnce(Return(nullptr))\n      .WillOnce(Return(nullptr))\n      .WillOnce(Return(nullptr))\n      .WillOnce(Return(&async_stream_));\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(1000005)).WillRepeatedly(Return(654321));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(5), _));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(321), _));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(2321), _));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(6321), _));\n  EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(14321), _));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n\n  // Test connection failure and retry\n  createHdsDelegate();\n  retry_timer_cb_();\n  retry_timer_cb_();\n  retry_timer_cb_();\n  retry_timer_cb_();\n  retry_timer_cb_();\n}\n\n// TODO(lilika): Add unit tests for HdsDelegate::sendResponse() with healthy and\n// unhealthy endpoints.\n\n// Tests that SendResponse responds to the server correctly given\n// a HealthCheckSpecifier message that contains a single endpoint\n// which times out\nTEST_F(HdsTest, TestSendResponseOneEndpointTimeout) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createHdsDelegate();\n\n  // Create Message\n  message.reset(createSimpleMessage());\n\n  Network::MockClientConnection* connection_ = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillRepeatedly(Return(connection_));\n  EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(2);\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, false));\n  EXPECT_CALL(test_factory_, createClusterInfo(_)).WillOnce(Return(cluster_info_));\n  EXPECT_CALL(*connection_, setBufferLimits(_));\n  EXPECT_CALL(dispatcher_, deferredDelete_(_));\n  // Process message\n  hds_delegate_->onReceiveMessage(std::move(message));\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Send Response\n  auto msg = hds_delegate_->sendResponse();\n\n  // Correctness\n  EXPECT_EQ(msg.endpoint_health_response().endpoints_health(0).health_status(),\n            envoy::config::core::v3::UNHEALTHY);\n  EXPECT_EQ(msg.endpoint_health_response()\n                .endpoints_health(0)\n                .endpoint()\n                .address()\n                .socket_address()\n                .address(),\n            \"127.0.0.0\");\n  EXPECT_EQ(msg.endpoint_health_response()\n                .endpoints_health(0)\n                .endpoint()\n                .address()\n                .socket_address()\n                .port_value(),\n            1234);\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5678121129607168",
    "content": "health_check_config {\n  timeout {\n    seconds: 8960\n  }\n  interval {\n    seconds: 26624\n  }\n  unhealthy_threshold {\n    value: 524288\n  }\n  healthy_threshold {\n    value: 2147483652\n  }\n  http_health_check {\n    path: \"\\003\"\n  }\n  event_log_path: \"(\"\n  interval_jitter_percent: 654311422\n}\nactions {\n  respond {\n    http_respond {\n      status: 11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\n    }\n  }\n}\nhttp_verify_cluster: true\nstart_failed: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5748071634567168",
    "content": "health_check_config {\n  timeout {\n    nanos: 9\n  }\n  interval {\n    seconds: 32768\n    nanos: 426\n  }\n  unhealthy_threshold {\n    value: 491516\n  }\n  healthy_threshold {\n    value: 524284\n  }\n  http_health_check {\n    path: \"(\"\n  }\n  event_log_path: \"(\"\n}\nactions {\n  raise_event: 355888746\n}\nhttp_verify_cluster: true\nstart_failed: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/custom_health_check",
    "content": "health_check_config {\n  timeout {\n    seconds: 26624\n  }\n  interval {\n    seconds: 8960\n    nanos: 65530\n  }\n  interval_jitter {\n    seconds: 8960\n    nanos: 7\n  }\n  unhealthy_threshold {\n    value: 641007614\n  }\n  healthy_threshold {\n    value: 1024\n  }\n  alt_port {\n    value: 16777216\n  }\n  reuse_connection {\n    value: true\n  }\n  no_traffic_interval {\n    nanos: 2097152\n  }\n  custom_health_check {\n    name: \"ssssssssssssssssssssssssssssssssssssssssss\"\n  }\n  unhealthy_edge_interval {\n    seconds: 131072\n    nanos: 4104\n  }\n  healthy_edge_interval {\n    seconds: 131072\n    nanos: 128\n  }\n  event_log_path: \"A(\"\n  interval_jitter_percent: 641007544\n  initial_jitter {\n    seconds: 8960\n    nanos: 7\n  }\n  tls_options {\n  }\n}\nactions {\n  raise_event: REMOTE_CLOSE\n}\nactions {\n  raise_event: REMOTE_CLOSE\n}\nactions {\n  trigger_interval_timer {\n  }\n}\nactions {\n  raise_event: CONNECTED\n}\nhttp_verify_cluster: true\nstart_failed: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_Success",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    grpc_health_check {\n        service_name: \"service\"\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n                headers {\n                    key: \"content-type\"\n                    value: \"application/grpc\"\n                }\n            }\n            status: 200\n            }\n            grpc_respond_bytes {\n                status: SERVING\n                chunk_size_for_structured_response: 3\n            }\n            grpc_respond_trailers {\n            trailers {\n                headers {\n                    key: \"grpc-status\"\n                    value: \"0\"\n                }\n            }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_SuccessWithAuthority",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    grpc_health_check {\n        service_name: \"service\"\n        authority: \"www.envoyproxy.io\"\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n                headers {\n                    key: \"content-type\"\n                    value: \"application/grpc\"\n                }\n            }\n            status: 200\n            chunk_size_for_structured_response: 3\n            }\n            grpc_respond_bytes {\n                status: SERVING\n            }\n            grpc_respond_trailers {\n            trailers {\n                headers {\n                    key: \"grpc-status\"\n                    value: \"0\"\n                }\n            }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_crash-33da964bf71e02e3324ceee47fbb204532817e61",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    grpc_health_check {\n        service_name: \"service\"\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n                headers {\n                    key: \"content-type\"\n                    value: \"application/grpc\"\n                }\n            }\n            status: 200\n            }\n            grpc_respond_bytes {\n                status: SERVING\n                chunk_size_for_structured_response: 3\n            }\n            }\n        }\n    }\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_crash-50b2ffbcf518e8f078ad8ed1f9801feb89a4d158",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 1\n  }\n  healthy_threshold {\n    value: 2\n  }\n  alt_port {\n    value: 544435713\n  }\n  grpc_health_check {\n  }\n  no_traffic_interval {\n    seconds: 5\n  }\n}\nactions {\n  trigger_timeout_timer {\n  }\n}\nactions {\n  raise_event: REMOTE_CLOSE\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_crash-5747b3523c44ce0a228a8d8884ed7aeea2608341",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  grpc_health_check {\n    service_name: \"service\"\n    authority: \"www.envoyproxy.io\"\n  }\n}\nactions {\n  trigger_interval_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_bytes {\n      }\n      grpc_respond_trailers {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_crash-5d27a3a5fc4fa384c9cbd76f0e7a3d841083396a",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  alt_port {\n    value: 2\n  }\n  grpc_health_check {\n    service_name: \"service\"\n  }\n  initial_jitter {\n    seconds: 1\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        headers {\n          headers {\n            key: \":status\"\n            value: \"200\"\n          }\n          headers {\n            key: \"content-type\"\n            value: \"application/grpc\"\n          }\n          headers {\n            key: \":status\"\n            value: \"200\"\n          }\n        }\n        status: 200\n      }\n      grpc_respond_bytes {\n        grpc_respond_unstructured_bytes {\n          data: \"\\005\\000\\000\\000\"\n        }\n        chunk_size_for_structured_response: 3\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_crash-d9287189542575619bdf21886dd396334fded9c6",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  grpc_health_check {\n    service_name: \"service\"\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        headers {\n          headers {\n            key: \"content-type\"\n            value: \"application/grpc\"\n          }\n          headers {\n            key: \":status\"\n            value: \"200\"\n          }\n          headers {\n            key: \"content-type\"\n            value: \"application/grpc\"\n          }\n        }\n        status: 200\n        chunk_size_for_structured_response: 3\n      }\n      grpc_respond_bytes {\n        status: SERVICE_UNKNOWN\n      }\n      grpc_respond_trailers {\n        trailers {\n          headers {\n            key: \"grpc-status\"\n            value: \"0\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/grpc_no-trailers",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    grpc_health_check {\n        service_name: \"service\"\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n                headers {\n                    key: \"content-type\"\n                    value: \"application/grpc\"\n                }\n            }\n            status: 200\n            }\n            grpc_respond_bytes {\n                status: SERVING\n                chunk_size_for_structured_response: 3\n            }\n            }\n        }\n    }\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_ConnectionClose",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nactions {\n  trigger_interval_timer {\n      \n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_Degraded",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n                headers {\n                    key: \"x-envoy-degraded\"\n                    value: \"true\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            \n        }\n    }\n}\nactions {\n    trigger_interval_timer {\n\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_Disconnect",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 5\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_LargeNanos",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n    nanos: 1929379840\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  http_health_check {\n    path: \"/healthcheck\"\n    service_name_matcher {\n      prefix: \"locations\"\n    }\n  }\n  no_traffic_interval {\n    seconds: 1\n  }\n}\nactions {\n  trigger_timeout_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n            \n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_RemoteCloseBetweenChecks",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 5\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n\n            }\n        }\n    }\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_Success",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_SuccessStartFailedSuccessFirst",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 5\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nstart_failed: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_Timeout",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 5\n    }\n    unhealthy_threshold {\n        value: 1\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n        path: \"/healthcheck\"\n    }\n}\nactions {\n    trigger_timeout_timer {\n\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_TimeoutThenRemoteClose",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    trigger_timeout_timer {\n\n    }\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_TimeoutThenSuccess",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    trigger_timeout_timer {\n\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_ZeroRetryInterval",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n        path: \"/healthcheck\"\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n                headers {\n                    key: \"-upstream-healthchecked-cluster\"\n                    value: \"locations-production-iad\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nhttp_verify_cluster : true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_crash-daebc8c8bcb985b777d6fa462a265ba5cdd8b06e",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  http_health_check {\n    path: \"/healthcheck\"\n    service_name_matcher {\n      prefix: \"locations\"\n    }\n  }\n  no_traffic_interval {\n    seconds: 5\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n    \n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\nactions {\n  raise_event: REMOTE_CLOSE\n}\nactions {\n  trigger_interval_timer {\n      \n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_crash-test",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  http_health_check {\n    path: \"/healthcheck\"\n    service_name_matcher {\n      prefix: \"locations\"\n    }\n  }\n  no_traffic_interval {\n    seconds: 5\n  }\n}\nactions {\n  trigger_interval_timer {\n\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_crash_1",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  http_health_check {\n    path: \"/healthcheck\"\n    service_name_matcher {\n      prefix: \"locations\"\n    }\n  }\n  initial_jitter {\n    seconds: 1\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"-upstream-healthchecked-cluster\"\n          value: \"locations-production-iad\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n            \n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\nhttp_verify_cluster: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_crash_2",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n    nanos: 7544832\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  http_health_check {\n    path: \"/healthcheck\"\n    service_name_matcher {\n      prefix: \"locations\"\n    }\n  }\n}\nactions {\n  respond {\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_crash_3",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n  }\n  alt_port {\n    value: 2\n  }\n  reuse_connection {\n  }\n  http_health_check {\n    path: \"/healthcheck\"\n    service_name_matcher {\n      prefix: \"locations\"\n    }\n  }\n  no_traffic_interval {\n    seconds: 1\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"x-envoy-degraded\"\n          value: \"true\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\nactions {\n  trigger_interval_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \"200\"\n          value: \"200\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n            \n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_crash_4",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n        path: \"/healthcheck\"\n    }\n    initial_jitter {\n    seconds: 1\n  }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n                headers {\n                    key: \"-upstream-healthchecked-cluster\"\n                    value: \"locations-production-iad\"\n                }\n            }\n            status: 200\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nhttp_verify_cluster : true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_crash_5",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n    nanos: 42496\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n    nanos: 620756992\n  }\n  unhealthy_threshold {\n    value: 2752514\n  }\n  healthy_threshold {\n    value: 2\n  }\n  http_health_check {\n    path: \"/healthcheck\"\n    service_name_matcher {\n      prefix: \"200\"\n      ignore_case: true\n    }\n  }\n  no_traffic_interval {\n    seconds: 1\n  }\n  always_log_health_check_failures: true\n  initial_jitter {\n    nanos: 16\n  }\n}\nactions {\n  trigger_timeout_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n            \n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_out_of_range_status",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        path: \"/healthcheck\"\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n    }\n}\nactions {\n    respond {\n        http_respond {\n            headers {\n                headers {\n                    key: \":status\"\n                    value: \"200\"\n                }\n            }\n            status: 1500\n        }\n        tcp_respond {\n            \n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/http_test-something",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    interval_jitter {\n        seconds: 1\n    }\n    no_traffic_interval {\n        seconds: 5\n    }\n    unhealthy_threshold {\n        value: 1\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    http_health_check {\n        service_name_matcher {\n            prefix: \"locations\"\n        }\n        path: \"/healthcheck\"\n    }\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp-expect_close_test",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    reuse_connection {\n        value: false\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    respond {\n        http_respond {\n            status: \"1\"\n        }\n        tcp_respond {\n            data: \"\\x02\"\n        }\n    }\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\nactions {\n    respond {\n        http_respond {\n            status: \"1\"\n        }\n        tcp_respond {\n            data: \"\\x02\"\n        }\n    }\n}\nactions {\n    trigger_interval_timer {\n        \n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_DataWithoutReusingConnection",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    reuse_connection {\n        value: false\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    respond {\n        http_respond {\n            status: 1\n        }\n        tcp_respond {\n            data: \"\\x02\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_Success",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    raise_event: CONNECTED\n}\nactions {\n    respond {\n        http_respond {\n            status: 1\n        }\n        tcp_respond {\n            data: \"\\x02\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_Timeout",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 1\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    raise_event: CONNECTED\n}\nactions {\n    respond {\n        http_respond {\n            status: 1\n        }\n        tcp_respond {\n            data: \"\\x01\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_TimeoutThenRemoteClose",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    raise_event: CONNECTED\n}\nactions {\n    respond {\n        http_respond {\n            status: 1\n        }\n        tcp_respond {\n            data: \"\\x01\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nactions {\n    trigger_timeout_timer {\n\n    }\n}\nactions {\n    raise_event: CONNECTED\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\nactions {\n    raise_event: CONNECTED\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_WrongData",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    reuse_connection {\n        value: false\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    raise_event: CONNECTED\n}\nactions {\n    respond {\n        http_respond {\n            status: 1\n        }\n        tcp_respond {\n            data: \"\\x03\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_crash-3596e4a310a1c131312ba869578be28a86a0439b",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n    nanos: 2097152\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  reuse_connection {\n  }\n  tcp_health_check {\n    send {\n      binary: \"\\001\\000\\000\\r\"\n    }\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"x-envoy-degraded\"\n          value: \"true\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n      data: \"0\"\n    }\n  }\n}\nactions {\n  trigger_interval_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_crash-449c4bf2d000d6e56b782fdd26a86e20a7f87b4f",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  tcp_health_check {\n  }\n  no_traffic_interval {\n    seconds: 1\n  }\n  event_log_path: \"200\"\n  initial_jitter {\n    seconds: 1\n  }\n  transport_socket_match_criteria {\n    fields {\n      key: \"\"\n      value {\n        bool_value: true\n      }\n    }\n  }\n}\nactions {\n  trigger_interval_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"-upstream-healthchecked-cluster\"\n          value: \"locations-production-iad\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\nhttp_verify_cluster: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_crash-e899b54d3e39838939bdde4000acbe8bcc8c37b9",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n    nanos: 196608\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 16580610\n  }\n  alt_port {\n    value: 16580610\n  }\n  tcp_health_check {\n  }\n  no_traffic_interval {\n    seconds: 5\n  }\n  initial_jitter {\n    nanos: 1701314560\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_crash-test",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  tcp_health_check {\n  }\n  no_traffic_interval {\n    seconds: 1\n  }\n  event_log_path: \"200\"\n  initial_jitter {\n    seconds: 1\n  }\n  transport_socket_match_criteria {\n    fields {\n      key: \"\"\n      value {\n        bool_value: true\n      }\n    }\n  }\n}\nactions {\n  trigger_interval_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"-upstream-healthchecked-cluster\"\n          value: \"locations-production-iad\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\nhttp_verify_cluster: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_crash-test-1",
    "content": "health_check_config {\n  timeout {\n    seconds: 1\n  }\n  interval {\n    seconds: 1\n  }\n  interval_jitter {\n    seconds: 1\n  }\n  unhealthy_threshold {\n    value: 2\n  }\n  healthy_threshold {\n    value: 2\n  }\n  tcp_health_check {\n      send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n  }\n  no_traffic_interval {\n    seconds: 1\n  }\n  event_log_path: \"200\"\n  initial_jitter {\n    seconds: 1\n  }\n  transport_socket_match_criteria {\n    fields {\n      key: \"\"\n      value {\n        bool_value: true\n      }\n    }\n  }\n}\nactions {\n  trigger_interval_timer {\n  }\n}\nactions {\n  respond {\n    http_respond {\n      headers {\n        headers {\n          key: \":status\"\n          value: \"200\"\n        }\n        headers {\n          key: \"-upstream-healthchecked-cluster\"\n          value: \"locations-production-iad\"\n        }\n      }\n      status: 200\n    }\n    tcp_respond {\n    }\n    grpc_respond {\n      grpc_respond_headers {\n        \n      }\n    }\n  }\n}\nhttp_verify_cluster: true\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_crash_test",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    trigger_interval_timer {\n\n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_expect_close_test",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    reuse_connection {\n        value: false\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n        receive [{\n            text: \"02\"\n        }]\n    }\n}\nactions {\n    respond {\n        http_respond {\n            status: \"1\"\n        }\n        tcp_respond {\n            data: \"\\x02\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n\n            }\n        }\n    }\n}\nactions {\n    raise_event: REMOTE_CLOSE\n}\nactions {\n    respond {\n        http_respond {\n            status: 1\n        }\n        tcp_respond {\n            data: \"\\x02\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nactions {\n    trigger_interval_timer {\n        \n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_corpus/tcp_expect_close_test_2",
    "content": "health_check_config {\n    timeout {\n        seconds: 1\n    }\n    interval {\n        seconds: 1\n    }\n    unhealthy_threshold {\n        value: 2\n    }\n    healthy_threshold: {\n        value: 2\n    }\n    reuse_connection {\n        value: false\n    }\n    tcp_health_check {\n        send {\n            text: \"01\"\n        }\n    }\n}\nactions {\n    raise_event: CONNECTED\n}\nactions {\n    respond {\n        http_respond {\n            status: 1\n        }\n        tcp_respond {\n            data: \"\\x02\"\n        }\n        grpc_respond {\n            grpc_respond_headers {\n                \n            }\n        }\n    }\n}\nactions {\n    trigger_interval_timer {\n        \n    }\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_fuzz.cc",
    "content": "#include \"test/common/upstream/health_check_fuzz.h\"\n\n#include <chrono>\n#include <memory>\n\n#include \"common/grpc/common.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/fuzz/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace { // gRPC helper methods\n// From unit tests\nstd::vector<std::vector<uint8_t>>\nserializeResponseToBufferList(grpc::health::v1::HealthCheckResponse::ServingStatus status,\n                              uint64_t chunk_size_from_fuzzer) {\n  grpc::health::v1::HealthCheckResponse response;\n  response.set_status(status);\n  const auto data = Grpc::Common::serializeToGrpcFrame(response);\n  uint64_t chunk_size = chunk_size_from_fuzzer % data->length();\n  if (chunk_size == 0) {\n    ++chunk_size;\n  }\n  std::vector<std::vector<uint8_t>> bufferList;\n  for (size_t i = 0; i < data->length(); i += chunk_size) {\n    if (i >= data->length() - chunk_size) {\n      // The length of the last chunk\n      chunk_size = data->length() - i;\n    }\n    auto buffer = std::vector<uint8_t>(chunk_size, 0);\n    data->copyOut(i, chunk_size, &buffer[0]);\n    bufferList.push_back(buffer);\n  }\n  return bufferList;\n}\n\ngrpc::health::v1::HealthCheckResponse::ServingStatus\nconvertToGrpcServingStatus(test::common::upstream::ServingStatus status) {\n  switch (status) {\n  case test::common::upstream::ServingStatus::UNKNOWN: {\n    return grpc::health::v1::HealthCheckResponse::UNKNOWN;\n  }\n  case test::common::upstream::ServingStatus::SERVING: {\n    return grpc::health::v1::HealthCheckResponse::SERVING;\n  }\n  case test::common::upstream::ServingStatus::NOT_SERVING: {\n    return grpc::health::v1::HealthCheckResponse::NOT_SERVING;\n  }\n  case test::common::upstream::ServingStatus::SERVICE_UNKNOWN: {\n    return grpc::health::v1::HealthCheckResponse::SERVICE_UNKNOWN;\n  }\n  default: // shouldn't hit\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nstd::vector<std::vector<uint8_t>>\nmakeBufferListToRespondWith(test::common::upstream::GrpcRespondBytes grpc_respond_bytes) {\n  switch (grpc_respond_bytes.grpc_respond_bytes_selector_case()) {\n  case test::common::upstream::GrpcRespondBytes::kStatus: {\n    // Structured Response\n    grpc::health::v1::HealthCheckResponse::ServingStatus servingStatus =\n        convertToGrpcServingStatus(grpc_respond_bytes.status());\n    ENVOY_LOG_MISC(trace, \"Will respond with a serialized frame with status: {}\",\n                   grpc_respond_bytes.status());\n    return serializeResponseToBufferList(servingStatus,\n                                         grpc_respond_bytes.chunk_size_for_structured_response());\n  }\n  case test::common::upstream::GrpcRespondBytes::kGrpcRespondUnstructuredBytes: {\n    std::vector<std::vector<uint8_t>> bufferList;\n    // Arbitrarily Generated Bytes\n    constexpr auto max_chunks = 128;\n    for (int i = 0;\n         i <\n         std::min(max_chunks, grpc_respond_bytes.grpc_respond_unstructured_bytes().data().size());\n         ++i) {\n      std::vector<uint8_t> chunk(\n          grpc_respond_bytes.grpc_respond_unstructured_bytes().data(i).begin(),\n          grpc_respond_bytes.grpc_respond_unstructured_bytes().data(i).end());\n      bufferList.push_back(chunk);\n    }\n    ENVOY_LOG_MISC(trace, \"Will respond with arbitrarily generated bytes which have no structure.\");\n    return bufferList;\n  }\n  default: // shouldn't hit\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace\n\nvoid HttpHealthCheckFuzz::allocHttpHealthCheckerFromProto(\n    const envoy::config::core::v3::HealthCheck& config) {\n  health_checker_ = std::make_shared<TestHttpHealthCheckerImpl>(\n      *cluster_, config, dispatcher_, runtime_, random_,\n      HealthCheckEventLoggerPtr(event_logger_storage_.release()));\n  ENVOY_LOG_MISC(trace, \"Created Test Http Health Checker\");\n}\n\nvoid HttpHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) {\n  allocHttpHealthCheckerFromProto(input.health_check_config());\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillByDefault(testing::Return(input.http_verify_cluster()));\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  // This sets up the possibility of testing hosts that never become healthy\n  if (input.start_failed()) {\n    cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet(\n        Host::HealthFlag::FAILED_ACTIVE_HC);\n  }\n  health_checker_->start();\n  ON_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillByDefault(testing::Return(45000));\n  // If has an initial jitter, this calls onIntervalBase and finishes startup\n  if (DurationUtil::durationToMilliseconds(input.health_check_config().initial_jitter()) != 0) {\n    test_sessions_[0]->interval_timer_->invokeCallback();\n  }\n  reuse_connection_ =\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(input.health_check_config(), reuse_connection, true);\n}\n\nvoid HttpHealthCheckFuzz::respond(test::common::upstream::Respond respond, bool last_action) {\n  // Timeout timer needs to be explicitly enabled, usually by onIntervalBase() (Callback on interval\n  // timer).\n  if (!test_sessions_[0]->timeout_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Timeout timer is disabled. Skipping response.\");\n    return;\n  }\n\n  const test::fuzz::Headers& headers = respond.http_respond().headers();\n  uint64_t status = respond.http_respond().status();\n\n  std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers =\n      std::make_unique<Http::TestResponseHeaderMapImpl>(\n          Fuzz::fromHeaders<Http::TestResponseHeaderMapImpl>(headers, {}, {}));\n\n  response_headers->setStatus(status);\n\n  // Responding with http can cause client to close, if so create a new one.\n  bool client_will_close = false;\n  if (response_headers->Connection()) {\n    client_will_close =\n        absl::EqualsIgnoreCase(response_headers->Connection()->value().getStringView(),\n                               Http::Headers::get().ConnectionValues.Close);\n  } else if (response_headers->ProxyConnection()) {\n    client_will_close =\n        absl::EqualsIgnoreCase(response_headers->ProxyConnection()->value().getStringView(),\n                               Http::Headers::get().ConnectionValues.Close);\n  }\n\n  ENVOY_LOG_MISC(trace, \"Responded headers {}\", *response_headers.get());\n  test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), true);\n\n  // Interval timer gets turned on from decodeHeaders()\n  if ((!reuse_connection_ || client_will_close) && !last_action) {\n    ENVOY_LOG_MISC(trace, \"Creating client and stream because shouldClose() is true\");\n    triggerIntervalTimer(true);\n  }\n}\n\nvoid HttpHealthCheckFuzz::triggerIntervalTimer(bool expect_client_create) {\n  // Interval timer needs to be explicitly enabled, usually by decodeHeaders.\n  if (!test_sessions_[0]->interval_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Interval timer is disabled. Skipping trigger interval timer.\");\n    return;\n  }\n  if (expect_client_create) {\n    expectClientCreate(0);\n  }\n  expectStreamCreate(0);\n  ENVOY_LOG_MISC(trace, \"Triggered interval timer\");\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nvoid HttpHealthCheckFuzz::triggerTimeoutTimer(bool last_action) {\n  // Timeout timer needs to be explicitly enabled, usually by a call to onIntervalBase().\n  if (!test_sessions_[0]->timeout_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Timeout timer is disabled. Skipping trigger timeout timer.\");\n    return;\n  }\n  ENVOY_LOG_MISC(trace, \"Triggered timeout timer\");\n  test_sessions_[0]->timeout_timer_->invokeCallback(); // This closes the client, turns off timeout\n                                                       // and enables interval\n  if (!last_action) {\n    ENVOY_LOG_MISC(trace, \"Creating client and stream from network timeout\");\n    triggerIntervalTimer(true);\n  }\n}\n\nvoid HttpHealthCheckFuzz::raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) {\n  test_sessions_[0]->client_connection_->raiseEvent(event_type);\n  if (!last_action && event_type != Network::ConnectionEvent::Connected) {\n    ENVOY_LOG_MISC(trace, \"Creating client and stream from close event\");\n    triggerIntervalTimer(\n        true); // Interval timer is guaranteed to be enabled from a close event - calls\n               // onResetStream which handles failure, turning interval timer on and timeout off\n  }\n}\n\nvoid TcpHealthCheckFuzz::allocTcpHealthCheckerFromProto(\n    const envoy::config::core::v3::HealthCheck& config) {\n  health_checker_ = std::make_shared<TcpHealthCheckerImpl>(\n      *cluster_, config, dispatcher_, runtime_, random_,\n      HealthCheckEventLoggerPtr(event_logger_storage_.release()));\n  ENVOY_LOG_MISC(trace, \"Created Tcp Health Checker\");\n}\n\nvoid TcpHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) {\n  allocTcpHealthCheckerFromProto(input.health_check_config());\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  health_checker_->start();\n  reuse_connection_ =\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(input.health_check_config(), reuse_connection, true);\n  // The Receive proto message has a validation that if there is a receive field, the text field, a\n  // string representing the hex encoded payload has a least one byte.\n  if (input.health_check_config().tcp_health_check().receive_size() != 0) {\n    ENVOY_LOG_MISC(trace, \"Health Checker is only testing to connect\");\n    empty_response_ = false;\n  }\n  // Clang tidy throws an error here in regards to a potential leak. It seems to have something to\n  // do with shared_ptr and possible cycles in regards to the clusters host objects. Since all this\n  // test class directly uses the unit test class that has been in master for a long time, this is\n  // likely a false positive.\n  if (DurationUtil::durationToMilliseconds(input.health_check_config().initial_jitter()) != 0) {\n    interval_timer_->invokeCallback();\n  }\n} // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks)\n\nvoid TcpHealthCheckFuzz::respond(test::common::upstream::Respond respond, bool last_action) {\n  std::string data = respond.tcp_respond().data();\n  if (!timeout_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Timeout timer is disabled. Skipping response.\");\n    return;\n  }\n  Buffer::OwnedImpl response;\n  response.add(data);\n\n  ENVOY_LOG_MISC(trace, \"Responded with {}. Length (in bytes) = {}. This is the string passed in.\",\n                 data, data.length());\n  read_filter_->onData(response, true);\n\n  // The interval timer may not be on. If it's not on, return. An http response will automatically\n  // turn on interval and turn off timeout, but for tcp it doesn't if the data doesn't match. If the\n  // response doesn't match, it only sets the host to unhealthy. If it does match, it will turn\n  // timeout off and interval on.\n  if (!reuse_connection_ && interval_timer_->enabled_ && !last_action) {\n    triggerIntervalTimer(true);\n  }\n}\n\nvoid TcpHealthCheckFuzz::triggerIntervalTimer(bool expect_client_create) {\n  if (!interval_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Interval timer is disabled. Skipping trigger interval timer.\");\n    return;\n  }\n  if (expect_client_create) {\n    ENVOY_LOG_MISC(trace, \"Creating client\");\n    expectClientCreate();\n  }\n  ENVOY_LOG_MISC(trace, \"Triggered interval timer\");\n  interval_timer_->invokeCallback();\n}\n\nvoid TcpHealthCheckFuzz::triggerTimeoutTimer(bool last_action) {\n  if (!timeout_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Timeout timer is disabled. Skipping trigger timeout timer.\");\n    return;\n  }\n  ENVOY_LOG_MISC(trace, \"Triggered timeout timer\");\n  timeout_timer_->invokeCallback(); // This closes the client, turns off timeout\n                                    // and enables interval\n  if (!last_action) {\n    ENVOY_LOG_MISC(trace, \"Will create client and stream from network timeout\");\n    triggerIntervalTimer(true);\n  }\n}\n\nvoid TcpHealthCheckFuzz::raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) {\n  // On a close event, the health checker will call handleFailure if expect_close_ is false. This is\n  // set by multiple code paths. handleFailure() turns on interval and turns off timeout. However,\n  // other action of the fuzzer account for this by explicitly invoking a client after\n  // expect_close_ gets set to true, turning expect_close_ back to false.\n  connection_->raiseEvent(event_type);\n  if (!last_action && event_type != Network::ConnectionEvent::Connected) {\n    if (!interval_timer_->enabled_) {\n      return;\n    }\n    ENVOY_LOG_MISC(trace, \"Will create client from close event\");\n    triggerIntervalTimer(true);\n  }\n\n  // In the specific case of:\n  // https://github.com/envoyproxy/envoy/blob/master/source/common/upstream/health_checker_impl.cc#L489\n  // This blows away client, should create a new one\n  if (event_type == Network::ConnectionEvent::Connected && empty_response_) {\n    ENVOY_LOG_MISC(trace, \"Will create client from connected event and empty response.\");\n    triggerIntervalTimer(true);\n  }\n}\n\nvoid GrpcHealthCheckFuzz::allocGrpcHealthCheckerFromProto(\n    const envoy::config::core::v3::HealthCheck& config) {\n  health_checker_ = std::make_shared<TestGrpcHealthCheckerImpl>(\n      *cluster_, config, dispatcher_, runtime_, random_,\n      HealthCheckEventLoggerPtr(event_logger_storage_.release()));\n  ENVOY_LOG_MISC(trace, \"Created Test Grpc Health Checker\");\n}\n\nvoid GrpcHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) {\n  allocGrpcHealthCheckerFromProto(input.health_check_config());\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  health_checker_->start();\n  ON_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillByDefault(testing::Return(45000));\n\n  if (DurationUtil::durationToMilliseconds(input.health_check_config().initial_jitter()) != 0) {\n    test_sessions_[0]->interval_timer_->invokeCallback();\n  }\n\n  reuse_connection_ =\n      PROTOBUF_GET_WRAPPED_OR_DEFAULT(input.health_check_config(), reuse_connection, true);\n}\n\n// Logic from respondResponseSpec() in unit tests\nvoid GrpcHealthCheckFuzz::respond(test::common::upstream::Respond respond, bool last_action) {\n  const test::common::upstream::GrpcRespond& grpc_respond = respond.grpc_respond();\n  if (!test_sessions_[0]->timeout_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Timeout timer is disabled. Skipping response.\");\n    return;\n  }\n  // These booleans help figure out when to end the stream\n  const bool has_data = grpc_respond.has_grpc_respond_bytes();\n  // Didn't hard code grpc-status to fully explore search space provided by codecs.\n\n  // If the fuzzing engine generates a grpc_respond_trailers message, there is a validation\n  // that trailers (test.fuzz.Headers) must be present. If it is present, that means there is\n  // trailers that will be passed to decodeTrailers(). An empty trailer map counts as having\n  // trailers.\n  const bool has_trailers = grpc_respond.has_grpc_respond_trailers();\n\n  ENVOY_LOG_MISC(trace, \"Has data: {}. Has trailers: {}.\", has_data, has_trailers);\n\n  const bool end_stream_on_headers = !has_data && !has_trailers;\n\n  std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers =\n      std::make_unique<Http::TestResponseHeaderMapImpl>(\n          Fuzz::fromHeaders<Http::TestResponseHeaderMapImpl>(\n              grpc_respond.grpc_respond_headers().headers(), {}, {}));\n\n  response_headers->setStatus(grpc_respond.grpc_respond_headers().status());\n\n  ENVOY_LOG_MISC(trace, \"Responded headers {}\", *response_headers.get());\n  test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers),\n                                                               end_stream_on_headers);\n\n  // If the interval timer is enabled, that means that the rpc is complete, as decodeHeaders hit a\n  // certain branch that called onRpcComplete(), logically representing a completed rpc call. Thus,\n  // skip the next responses until explicitly invoking interval timer as cleanup.\n  if (has_data && !test_sessions_[0]->interval_timer_->enabled_) {\n    std::vector<std::vector<uint8_t>> bufferList =\n        makeBufferListToRespondWith(grpc_respond.grpc_respond_bytes());\n    // If the interval timer is enabled, that means that the rpc is complete, as decodeData hit a\n    // certain branch that called onRpcComplete(), logically representing a completed rpc call.\n    // Thus, skip the next responses until explicitly invoking interval timer as cleanup.\n    for (size_t i = 0; i < bufferList.size() && !test_sessions_[0]->interval_timer_->enabled_;\n         ++i) {\n      const bool end_stream_on_data = !has_trailers && i == bufferList.size() - 1;\n      const auto data =\n          std::make_unique<Buffer::OwnedImpl>(bufferList[i].data(), bufferList[i].size());\n      ENVOY_LOG_MISC(trace, \"Responded with data\");\n      test_sessions_[0]->stream_response_callbacks_->decodeData(*data, end_stream_on_data);\n    }\n  }\n\n  // If the interval timer is enabled, that means that the rpc is complete, as decodeData hit a\n  // certain branch that called onRpcComplete(), logically representing a completed rpc call. Thus,\n  // skip responding with trailers until explicitly invoking interval timer as cleanup.\n  if (has_trailers && !test_sessions_[0]->interval_timer_->enabled_) {\n    std::unique_ptr<Http::TestResponseTrailerMapImpl> response_trailers =\n        std::make_unique<Http::TestResponseTrailerMapImpl>(\n            Fuzz::fromHeaders<Http::TestResponseTrailerMapImpl>(\n                grpc_respond.grpc_respond_trailers().trailers(), {}, {}));\n\n    ENVOY_LOG_MISC(trace, \"Responded trailers {}\", *response_trailers.get());\n\n    test_sessions_[0]->stream_response_callbacks_->decodeTrailers(std::move(response_trailers));\n  }\n\n  // This means that the response did not represent a full rpc response.\n  if (!test_sessions_[0]->interval_timer_->enabled_) {\n    return;\n  }\n\n  // Once it gets here the health checker will have called onRpcComplete(), logically representing a\n  // completed rpc call, which blows away client if reuse connection is set to false or the health\n  // checker had a goaway event with no error flag.\n  if (!last_action) {\n    ENVOY_LOG_MISC(trace, \"Triggering interval timer after response\");\n    triggerIntervalTimer(!reuse_connection_ || received_no_error_goaway_);\n    received_no_error_goaway_ = false; // from resetState()\n  }\n}\n\nvoid GrpcHealthCheckFuzz::triggerIntervalTimer(bool expect_client_create) {\n  if (!test_sessions_[0]->interval_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Interval timer is disabled. Skipping trigger interval timer.\");\n    return;\n  }\n  if (expect_client_create) {\n    expectClientCreate(0);\n    ENVOY_LOG_MISC(trace, \"Created client\");\n  }\n  expectStreamCreate(0);\n  ENVOY_LOG_MISC(trace, \"Created stream\");\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nvoid GrpcHealthCheckFuzz::triggerTimeoutTimer(bool last_action) {\n  if (!test_sessions_[0]->timeout_timer_->enabled_) {\n    ENVOY_LOG_MISC(trace, \"Timeout timer is disabled. Skipping trigger timeout timer.\");\n    return;\n  }\n  ENVOY_LOG_MISC(trace, \"Triggered timeout timer\");\n  test_sessions_[0]->timeout_timer_->invokeCallback(); // This closes the client, turns off\n                                                       // timeout and enables interval\n\n  if ((!reuse_connection_ || received_no_error_goaway_) && !last_action) {\n    ENVOY_LOG_MISC(trace, \"Triggering interval timer after timeout.\");\n    triggerIntervalTimer(true);\n  } else {\n    received_no_error_goaway_ = false; // from resetState()\n  }\n}\n\nvoid GrpcHealthCheckFuzz::raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) {\n  test_sessions_[0]->client_connection_->raiseEvent(event_type);\n  if (!last_action && event_type != Network::ConnectionEvent::Connected) {\n    // Close events will always blow away the client\n    ENVOY_LOG_MISC(trace, \"Triggering interval timer after close event\");\n    // Interval timer is guaranteed to be enabled from a close event - calls\n    // onResetStream which handles failure, turning interval timer on and timeout off\n    triggerIntervalTimer(true);\n  }\n}\n\nvoid GrpcHealthCheckFuzz::raiseGoAway(bool no_error) {\n  if (no_error) {\n    test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n    // Will cause other events to blow away client, because this is a \"graceful\" go away\n    received_no_error_goaway_ = true;\n  } else {\n    // go away events without no error flag explicitly blow away client\n    test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::Other);\n    triggerIntervalTimer(true);\n  }\n}\n\nNetwork::ConnectionEvent\nHealthCheckFuzz::getEventTypeFromProto(const test::common::upstream::RaiseEvent& event) {\n  switch (event) {\n  case test::common::upstream::RaiseEvent::CONNECTED: {\n    return Network::ConnectionEvent::Connected;\n  }\n  case test::common::upstream::RaiseEvent::REMOTE_CLOSE: {\n    return Network::ConnectionEvent::RemoteClose;\n  }\n  case test::common::upstream::RaiseEvent::LOCAL_CLOSE: {\n    return Network::ConnectionEvent::LocalClose;\n  }\n  default: // shouldn't hit\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nvoid HealthCheckFuzz::initializeAndReplay(test::common::upstream::HealthCheckTestCase input) {\n  try {\n    initialize(input);\n  } catch (EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n    return;\n  }\n  replay(input);\n}\n\nvoid HealthCheckFuzz::replay(const test::common::upstream::HealthCheckTestCase& input) {\n  constexpr auto max_actions = 64;\n  for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) {\n    const auto& event = input.actions(i);\n    // The last_action boolean prevents final actions from creating a client and stream that will\n    // never be used.\n    const bool last_action = i == std::min(max_actions, input.actions().size()) - 1;\n    ENVOY_LOG_MISC(trace, \"Action: {}\", event.DebugString());\n    switch (event.action_selector_case()) {\n    case test::common::upstream::Action::kRespond: {\n      respond(event.respond(), last_action);\n      break;\n    }\n    case test::common::upstream::Action::kTriggerIntervalTimer: {\n      triggerIntervalTimer(false);\n      break;\n    }\n    case test::common::upstream::Action::kTriggerTimeoutTimer: {\n      triggerTimeoutTimer(last_action);\n      break;\n    }\n    case test::common::upstream::Action::kRaiseEvent: {\n      raiseEvent(getEventTypeFromProto(event.raise_event()), last_action);\n      break;\n    }\n    default:\n      break;\n    }\n  }\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/health_check_fuzz.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"test/common/upstream/health_check_fuzz.pb.validate.h\"\n#include \"test/common/upstream/health_checker_impl_test_utils.h\"\n#include \"test/fuzz/common.pb.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass HealthCheckFuzz {\npublic:\n  HealthCheckFuzz() = default;\n  // This will delegate to the specific classes\n  void initializeAndReplay(test::common::upstream::HealthCheckTestCase input);\n  enum class Type {\n    HTTP,\n    TCP,\n    GRPC,\n  };\n\n  // The specific implementations of respond look into the respond proto, which has all three types\n  // of response\n  virtual void respond(test::common::upstream::Respond respond, bool last_action) PURE;\n\n  virtual void initialize(test::common::upstream::HealthCheckTestCase input) PURE;\n  virtual void triggerIntervalTimer(bool expect_client_create) PURE;\n  virtual void triggerTimeoutTimer(bool last_action) PURE;\n  virtual void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) PURE;\n\n  virtual ~HealthCheckFuzz() = default;\n\nprivate:\n  Network::ConnectionEvent getEventTypeFromProto(const test::common::upstream::RaiseEvent& event);\n\n  void replay(const test::common::upstream::HealthCheckTestCase& input);\n};\n\nclass HttpHealthCheckFuzz : public HealthCheckFuzz, HttpHealthCheckerImplTestBase {\npublic:\n  void allocHttpHealthCheckerFromProto(const envoy::config::core::v3::HealthCheck& config);\n  void initialize(test::common::upstream::HealthCheckTestCase input) override;\n  void respond(test::common::upstream::Respond respond, bool last_action) override;\n  void triggerIntervalTimer(bool expect_client_create) override;\n  void triggerTimeoutTimer(bool last_action) override;\n  void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) override;\n  ~HttpHealthCheckFuzz() override = default;\n\n  // Determines whether the client gets reused or not after response\n  bool reuse_connection_ = true;\n};\n\nclass TcpHealthCheckFuzz : public HealthCheckFuzz, TcpHealthCheckerImplTestBase {\npublic:\n  void allocTcpHealthCheckerFromProto(const envoy::config::core::v3::HealthCheck& config);\n  void initialize(test::common::upstream::HealthCheckTestCase input) override;\n  void respond(test::common::upstream::Respond respond, bool last_action) override;\n  void triggerIntervalTimer(bool expect_client_create) override;\n  void triggerTimeoutTimer(bool last_action) override;\n  void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) override;\n  ~TcpHealthCheckFuzz() override = default;\n\n  // Determines whether the client gets reused or not after response\n  bool reuse_connection_ = true;\n\n  // Empty response induces a specific codepath in raiseEvent in case of connected, ignores the\n  // binary field and only uses text.\n  bool empty_response_ = true;\n};\n\nclass GrpcHealthCheckFuzz : public HealthCheckFuzz, GrpcHealthCheckerImplTestBaseUtils {\npublic:\n  void allocGrpcHealthCheckerFromProto(const envoy::config::core::v3::HealthCheck& config);\n  void initialize(test::common::upstream::HealthCheckTestCase input) override;\n  // This has three components, headers, raw bytes, and trailers\n  void respond(test::common::upstream::Respond respond, bool last_action) override;\n  void triggerIntervalTimer(bool expect_client_create) override;\n  void triggerTimeoutTimer(bool last_action) override;\n  void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) override;\n  void raiseGoAway(bool no_error);\n  ~GrpcHealthCheckFuzz() override = default;\n\n  // Determines whether the client gets reused or not after response\n  bool reuse_connection_ = true;\n\n  // Determines whether a client closes after responds and timeouts. Exactly maps to\n  // received_no_error_goaway_ in source code.\n  bool received_no_error_goaway_ = false;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/health_check_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.common.upstream;\n\nimport \"validate/validate.proto\";\nimport \"test/fuzz/common.proto\";\nimport \"envoy/config/core/v3/health_check.proto\";\nimport \"google/protobuf/empty.proto\";\n\nmessage HttpRespond {\n  test.fuzz.Headers headers = 1;\n  uint64 status = 2 [(validate.rules).uint64.lt = 1000];\n}\n\nmessage TcpRespond {\n  bytes data = 1;\n}\n\nenum ServingStatus {\n  UNKNOWN = 0;\n  SERVING = 1;\n  NOT_SERVING = 2;\n  SERVICE_UNKNOWN = 3; // Used only by the Watch method.\n}\n\nmessage GrpcRespondHeaders {\n  test.fuzz.Headers headers = 1;\n  uint64 status = 2 [(validate.rules).uint64.lt = 1000];\n}\n\nmessage GrpcRespondUnstructuredBytes {\n  repeated bytes data = 1;\n}\n\nmessage GrpcRespondBytes {\n  oneof grpc_respond_bytes_selector {\n    option (validate.required) = true;\n    //Structured response, which will get converted to raw bytes\n    ServingStatus status = 1 [(validate.rules).enum.defined_only = true];\n    GrpcRespondUnstructuredBytes grpc_respond_unstructured_bytes = 2;\n  }\n  //This value will determine how many fixed bytes will make up a structured response\n  //It will be moded against the byte size of the serialized response.\n  uint64 chunk_size_for_structured_response = 3;\n}\n\nmessage GrpcRespondTrailers {\n  test.fuzz.Headers trailers = 1 [(validate.rules).message.required = true];\n}\n\nmessage GrpcRespond {\n  GrpcRespondHeaders grpc_respond_headers = 1 [(validate.rules).message.required = true];\n  GrpcRespondBytes grpc_respond_bytes = 2;\n  //Having this as a message allows the scenario with no trailers\n  GrpcRespondTrailers grpc_respond_trailers = 3;\n}\n\n/*\nThe three types of health checkers (HTTP, TCP, and gRPC) share a lot of logic, thus allowing the fuzzer to use a single action\nstream across all three. However, the main difference comes from the type of data parsed as a response. Switching across the\nhealth checker type allows the fuzzer to choose the correct action sequence.\n*/\nmessage Respond {\n  HttpRespond http_respond = 1 [(validate.rules).message.required = true];\n  TcpRespond tcp_respond = 2 [(validate.rules).message.required = true];\n  GrpcRespond grpc_respond = 3 [(validate.rules).message.required = true];\n}\n\nenum RaiseEvent {\n  CONNECTED = 0;\n  REMOTE_CLOSE = 1;\n  LOCAL_CLOSE = 2;\n}\n\nmessage Action {\n  oneof action_selector {\n    option (validate.required) = true;\n    Respond respond = 1;\n    google.protobuf.Empty trigger_interval_timer = 2;\n    //TODO: respondBody, respondTrailers\n    google.protobuf.Empty trigger_timeout_timer = 3;\n    RaiseEvent raise_event = 4 [(validate.rules).enum.defined_only = true];\n  }\n}\n\nmessage HealthCheckTestCase {\n  envoy.config.core.v3.HealthCheck health_check_config = 1\n      [(validate.rules).message.required = true];\n  repeated Action actions = 2;\n  bool http_verify_cluster = 3; //Determines if verify cluster setting is on\n  bool start_failed = 4;\n}\n"
  },
  {
    "path": "test/common/upstream/health_check_fuzz_test.cc",
    "content": "#include \"envoy/config/core/v3/health_check.pb.validate.h\"\n\n#include \"test/common/upstream/health_check_fuzz.h\"\n#include \"test/common/upstream/health_check_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nDEFINE_PROTO_FUZZER(const test::common::upstream::HealthCheckTestCase input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n\n  std::unique_ptr<HealthCheckFuzz> health_check_fuzz;\n\n  switch (input.health_check_config().health_checker_case()) {\n  case envoy::config::core::v3::HealthCheck::kHttpHealthCheck: {\n    health_check_fuzz = std::make_unique<HttpHealthCheckFuzz>();\n    break;\n  }\n  case envoy::config::core::v3::HealthCheck::kTcpHealthCheck: {\n    health_check_fuzz = std::make_unique<TcpHealthCheckFuzz>();\n    break;\n  }\n  case envoy::config::core::v3::HealthCheck::kGrpcHealthCheck: {\n    health_check_fuzz = std::make_unique<GrpcHealthCheckFuzz>();\n    break;\n  }\n  default: // Handles custom health checker\n    ENVOY_LOG_MISC(trace, \"Custom Health Checker currently unsupported, skipping\");\n    return;\n  }\n\n  health_check_fuzz->initializeAndReplay(input);\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/health_checker_impl_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.validate.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/headers.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/common/upstream/health_checker_impl_test_utils.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/transport_socket_match.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nenvoy::config::core::v3::HealthCheck createGrpcHealthCheckConfig() {\n  envoy::config::core::v3::HealthCheck health_check;\n  health_check.mutable_timeout()->set_seconds(1);\n  health_check.mutable_interval()->set_seconds(1);\n  health_check.mutable_unhealthy_threshold()->set_value(2);\n  health_check.mutable_healthy_threshold()->set_value(2);\n  health_check.mutable_grpc_health_check();\n  return health_check;\n}\n\nTEST(HealthCheckerFactoryTest, GrpcHealthCheckHTTP2NotConfiguredException) {\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster;\n  EXPECT_CALL(*cluster.info_, features()).WillRepeatedly(Return(0));\n\n  Runtime::MockLoader runtime;\n  Event::MockDispatcher dispatcher;\n  AccessLog::MockAccessLogManager log_manager;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor;\n  Api::MockApi api;\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, runtime, dispatcher,\n                                   log_manager, validation_visitor, api),\n      EnvoyException, \"fake_cluster cluster must support HTTP/2 for gRPC healthchecking\");\n}\n\nTEST(HealthCheckerFactoryTest, CreateGrpc) {\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster;\n  EXPECT_CALL(*cluster.info_, features())\n      .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n  Runtime::MockLoader runtime;\n  Event::MockDispatcher dispatcher;\n  AccessLog::MockAccessLogManager log_manager;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor;\n  NiceMock<Api::MockApi> api;\n\n  EXPECT_NE(nullptr,\n            dynamic_cast<GrpcHealthCheckerImpl*>(\n                HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, runtime,\n                                             dispatcher, log_manager, validation_visitor, api)\n                    .get()));\n}\n\nclass HttpHealthCheckerImplTest : public testing::Test, public HttpHealthCheckerImplTestBase {\npublic:\n  void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) {\n    health_checker_ = std::make_shared<TestHttpHealthCheckerImpl>(\n        *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_,\n        HealthCheckEventLoggerPtr(event_logger_storage_.release()));\n  }\n\n  void addCompletionCallback() {\n    health_checker_->addHostCheckCompleteCb(\n        [this](HostSharedPtr host, HealthTransition changed_state) -> void {\n          onHostStatus(host, changed_state);\n        });\n  }\n\n  void setupNoServiceValidationHCWithHttp2() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n      codec_client_type: Http2\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupInitialJitter() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    initial_jitter: 5s\n    interval_jitter_percent: 40\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupIntervalJitterPercent() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter_percent: 40\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupNoServiceValidationHC() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupNoServiceValidationHCOneUnhealthy() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupNoServiceValidationHCAlwaysLogFailure() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    always_log_health_check_failures: true\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupNoServiceValidationNoReuseConnectionHC() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    reuse_connection: false\n    http_health_check:\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupHealthCheckIntervalOverridesHC() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    unhealthy_interval: 2s\n    unhealthy_edge_interval: 3s\n    healthy_edge_interval: 4s\n    no_traffic_interval: 5s\n    interval_jitter: 0s\n    unhealthy_threshold: 3\n    healthy_threshold: 3\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupServiceValidationHC() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupDeprecatedServiceNameValidationHC(const std::string& prefix) {\n    std::string yaml = fmt::format(R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: {0}\n      path: /healthcheck\n    )EOF\",\n                                   prefix);\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupServicePrefixPatternValidationHC() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupServiceExactPatternValidationHC() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        exact: locations-production-iad\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupServiceRegexPatternValidationHC() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        safe_regex:\n          google_re2: {}\n          regex: 'locations-.*-.*$'\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupServiceValidationWithCustomHostValueHC(const std::string& host) {\n    std::string yaml = fmt::format(R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n      host: {0}\n    )EOF\",\n                                   host);\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig\n  makeHealthCheckConfig(const uint32_t port_value) {\n    envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config;\n    config.set_port_value(port_value);\n    return config;\n  }\n\n  void appendTestHosts(std::shared_ptr<MockClusterMockPrioritySet> cluster,\n                       const HostWithHealthCheckMap& hosts, const std::string& protocol = \"tcp://\",\n                       const uint32_t priority = 0) {\n    for (const auto& host : hosts) {\n      cluster->prioritySet().getMockHostSet(priority)->hosts_.emplace_back(\n          makeTestHost(cluster->info_, fmt::format(\"{}{}\", protocol, host.first), host.second));\n    }\n  }\n\n  void setupServiceValidationWithAdditionalHeaders() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n      host: \"www.envoyproxy.io\"\n      request_headers_to_add:\n        - header:\n            key: x-envoy-ok\n            value: ok\n        - header:\n            key: x-envoy-cool\n            value: cool\n        - header:\n            key: x-envoy-awesome\n            value: awesome\n        # The following entry replaces the current user-agent.\n        - header:\n            key: user-agent\n            value: CoolEnvoy/HC\n          append: false\n        - header:\n            key: x-protocol\n            value: \"%PROTOCOL%\"\n        - header:\n            key: x-upstream-metadata\n            value: \"%UPSTREAM_METADATA([\\\"namespace\\\", \\\"key\\\"])%\"\n        - header:\n            key: x-downstream-remote-address\n            value: \"%DOWNSTREAM_REMOTE_ADDRESS%\"\n        - header:\n            key: x-downstream-remote-address-without-port\n            value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n        - header:\n            key: x-downstream-local-address\n            value: \"%DOWNSTREAM_LOCAL_ADDRESS%\"\n        - header:\n            key: x-downstream-local-address-without-port\n            value: \"%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%\"\n        - header:\n            key: x-start-time\n            value: \"%START_TIME(%s.%9f)%\"\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupServiceValidationWithoutUserAgent() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n      host: \"www.envoyproxy.io\"\n      # The following entry removes the default \"user-agent\" header.\n      request_headers_to_remove: [\"user-agent\"]\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void respond(size_t index, const std::string& code, bool conn_close, bool proxy_close = false,\n               bool body = false, bool trailers = false,\n               const absl::optional<std::string>& service_cluster = absl::optional<std::string>(),\n               bool degraded = false) {\n    std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers(\n        new Http::TestResponseHeaderMapImpl{{\":status\", code}});\n\n    if (degraded) {\n      response_headers->setEnvoyDegraded(1);\n    }\n\n    if (service_cluster) {\n      response_headers->addCopy(Http::Headers::get().EnvoyUpstreamHealthCheckedCluster,\n                                service_cluster.value());\n    }\n    if (conn_close) {\n      response_headers->addCopy(\"connection\", \"close\");\n    }\n    if (proxy_close) {\n      response_headers->addCopy(\"proxy-connection\", \"close\");\n    }\n\n    test_sessions_[index]->stream_response_callbacks_->decodeHeaders(std::move(response_headers),\n                                                                     !body && !trailers);\n    if (body) {\n      Buffer::OwnedImpl response_data;\n      test_sessions_[index]->stream_response_callbacks_->decodeData(response_data, !trailers);\n    }\n\n    if (trailers) {\n      test_sessions_[index]->stream_response_callbacks_->decodeTrailers(\n          Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{{\"some\", \"trailer\"}}});\n    }\n  }\n\n  void expectSuccessStartFailedFailFirst(\n      const absl::optional<std::string>& health_checked_cluster = absl::optional<std::string>()) {\n    cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n        makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n    cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet(\n        Host::HealthFlag::FAILED_ACTIVE_HC);\n    expectSessionCreate();\n    expectStreamCreate(0);\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n    health_checker_->start();\n\n    // Test that failing first disables fast success.\n    EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n    EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n    EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n    respond(0, \"503\", false, false, false, false, health_checked_cluster);\n    EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n        Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_EQ(Host::Health::Unhealthy,\n              cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n    expectStreamCreate(0);\n    test_sessions_[0]->interval_timer_->invokeCallback();\n\n    EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n    EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n    respond(0, \"200\", false, false, false, false, health_checked_cluster);\n    EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n        Host::HealthFlag::FAILED_ACTIVE_HC));\n    EXPECT_EQ(Host::Health::Unhealthy,\n              cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n    expectStreamCreate(0);\n    test_sessions_[0]->interval_timer_->invokeCallback();\n\n    EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n    EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n    EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n    respond(0, \"200\", false, false, false, false, health_checked_cluster);\n    EXPECT_EQ(Host::Health::Healthy,\n              cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n  }\n\n  MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state));\n};\n\nTEST_F(HttpHealthCheckerImplTest, Success) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, Degraded) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(2);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillRepeatedly(Return(45000));\n\n  // We start off as healthy, and should go degraded after receiving the degraded health response.\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(event_logger_, logDegraded(_, _));\n  respond(0, \"200\", false, false, true, false, {}, true);\n  EXPECT_EQ(Host::Health::Degraded, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  // Then, after receiving a regular health check response we should go back to healthy.\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->interval_timer_->invokeCallback();\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(event_logger_, logNoLongerDegraded(_, _));\n  respond(0, \"200\", false, false, true, false, {}, false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessIntervalJitter) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber());\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  for (int i = 0; i < 50000; i += 239) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n    expectStreamCreate(0);\n    test_sessions_[0]->interval_timer_->invokeCallback();\n    // the jitter is 1000ms here\n    EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n                enableTimer(std::chrono::milliseconds(5000 + i % 1000), _));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n    respond(0, \"200\", false, false, true, true);\n  }\n}\n\nTEST_F(HttpHealthCheckerImplTest, InitialJitterNoTraffic) {\n  setupInitialJitter();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber());\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  for (int i = 0; i < 2; i += 1) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n    expectStreamCreate(0);\n    test_sessions_[0]->interval_timer_->invokeCallback();\n    // the jitter is 40% of 5000, so should be 2000\n    EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n                enableTimer(std::chrono::milliseconds(5000 + i % 2000), _));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n    respond(0, \"200\", false, false, true, true);\n  }\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessIntervalJitterPercentNoTraffic) {\n  setupIntervalJitterPercent();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber());\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  for (int i = 0; i < 50000; i += 239) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n    expectStreamCreate(0);\n    test_sessions_[0]->interval_timer_->invokeCallback();\n    // the jitter is 40% of 5000, so should be 2000\n    EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n                enableTimer(std::chrono::milliseconds(5000 + i % 2000), _));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n    respond(0, \"200\", false, false, true, true);\n  }\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessIntervalJitterPercent) {\n  setupIntervalJitterPercent();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber());\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  for (int i = 0; i < 50000; i += 239) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n    expectStreamCreate(0);\n    test_sessions_[0]->interval_timer_->invokeCallback();\n    // the jitter is 40% of 1000, so should be 400\n    EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n                enableTimer(std::chrono::milliseconds(1000 + i % 400), _));\n    EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n    respond(0, \"200\", false, false, true, true);\n  }\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessWithSpurious100Continue) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n\n  std::unique_ptr<Http::TestResponseHeaderMapImpl> continue_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n  test_sessions_[0]->stream_response_callbacks_->decode100ContinueHeaders(\n      std::move(continue_headers));\n\n  respond(0, \"200\", false, false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessWithSpuriousMetadata) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n\n  std::unique_ptr<Http::MetadataMap> metadata_map(new Http::MetadataMap());\n  metadata_map->insert(std::make_pair<std::string, std::string>(\"key\", \"value\"));\n  test_sessions_[0]->stream_response_callbacks_->decodeMetadata(std::move(metadata_map));\n\n  respond(0, \"200\", false, false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// Test host check success with multiple hosts.\nTEST_F(HttpHealthCheckerImplTest, SuccessWithMultipleHosts) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\"),\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:81\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectSessionCreate();\n  expectStreamCreate(1);\n  EXPECT_CALL(*test_sessions_[1]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _)).Times(2);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .Times(2)\n      .WillRepeatedly(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(*test_sessions_[1]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[1]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true);\n  respond(1, \"200\", false, false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[1]->health());\n}\n\n// Test host check success with multiple hosts across multiple priorities.\nTEST_F(HttpHealthCheckerImplTest, SuccessWithMultipleHostSets) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->prioritySet().getMockHostSet(1)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:81\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectSessionCreate();\n  expectStreamCreate(1);\n  EXPECT_CALL(*test_sessions_[1]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _)).Times(2);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .Times(2)\n      .WillRepeatedly(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(*test_sessions_[1]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[1]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true);\n  respond(1, \"200\", false, false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(1)->hosts_[0]->health());\n}\n\n// Validate that runtime settings can't force a zero lengthy retry duration (and hence livelock).\nTEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 1s\n    interval_jitter_percent: 40\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n  allocHealthChecker(yaml);\n  addCompletionCallback();\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n        EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _)).WillOnce(Return(0));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _)).WillOnce(Return(0));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nMATCHER_P(ApplicationProtocolListEq, expected, \"\") {\n  const Network::TransportSocketOptionsSharedPtr& options = arg;\n  EXPECT_EQ(options->applicationProtocolListOverride(), std::vector<std::string>{expected});\n  return true;\n}\n\nTEST_F(HttpHealthCheckerImplTest, TlsOptions) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 1s\n    interval_jitter_percent: 40\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    tls_options:\n      alpn_protocols:\n      - http1\n    )EOF\";\n\n  auto socket_factory = new Network::MockTransportSocketFactory();\n  EXPECT_CALL(*socket_factory, implementsSecureTransport()).WillOnce(Return(true));\n  auto transport_socket_match = new NiceMock<Upstream::MockTransportSocketMatcher>(\n      Network::TransportSocketFactoryPtr(socket_factory));\n  cluster_->info_->transport_socket_matcher_.reset(transport_socket_match);\n\n  EXPECT_CALL(*socket_factory, createTransportSocket(ApplicationProtocolListEq(\"http1\")));\n\n  allocHealthChecker(yaml);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceCheck) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  setupServiceValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n        EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServicePrefixPatternCheck) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  setupServicePrefixPatternValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n        EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceExactPatternCheck) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  setupServiceExactPatternValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n        EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceRegexPatternCheck) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  setupServiceRegexPatternValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n        EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// This test verifies that when a hostname is set in the endpoint's HealthCheckConfig, it is used in\n// the health check request.\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValueOnTheHost) {\n  const std::string host = \"www.envoyproxy.io\";\n  envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config;\n  health_check_config.set_hostname(host);\n  auto test_host = std::make_shared<HostImpl>(\n      cluster_->info_, \"\", Network::Utility::resolveUrl(\"tcp://127.0.0.1:80\"), nullptr, 1,\n      envoy::config::core::v3::Locality(), health_check_config, 0,\n      envoy::config::core::v3::UNKNOWN);\n  const std::string path = \"/healthcheck\";\n  setupServiceValidationHC();\n  // Requires non-empty `service_name` in config.\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// This test verifies that when a hostname is set in the endpoint's HealthCheckConfig and in the\n// cluster level configuration, the one in the endpoint takes priority.\nTEST_F(HttpHealthCheckerImplTest,\n       SuccessServiceCheckWithCustomHostValueOnTheHostThatOverridesConfigValue) {\n  const std::string host = \"www.envoyproxy.io\";\n  envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config;\n  health_check_config.set_hostname(host);\n  auto test_host = std::make_shared<HostImpl>(\n      cluster_->info_, \"\", Network::Utility::resolveUrl(\"tcp://127.0.0.1:80\"), nullptr, 1,\n      envoy::config::core::v3::Locality(), health_check_config, 0,\n      envoy::config::core::v3::UNKNOWN);\n  const std::string path = \"/healthcheck\";\n  // Setup health check config with a different host, to check that we still get the host configured\n  // on the endpoint.\n  setupServiceValidationWithCustomHostValueHC(\"foo.com\");\n  // Requires non-empty `service_name` in config.\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValue) {\n  const std::string host = \"www.envoyproxy.io\";\n  const std::string path = \"/healthcheck\";\n  setupServiceValidationWithCustomHostValueHC(host);\n  // Requires non-empty `service_name` in config.\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) {\n  const Http::LowerCaseString header_ok(\"x-envoy-ok\");\n  const Http::LowerCaseString header_cool(\"x-envoy-cool\");\n  const Http::LowerCaseString header_awesome(\"x-envoy-awesome\");\n  const Http::LowerCaseString upstream_metadata(\"x-upstream-metadata\");\n  const Http::LowerCaseString protocol(\"x-protocol\");\n  const Http::LowerCaseString downstream_remote_address(\"x-downstream-remote-address\");\n  const Http::LowerCaseString downstream_remote_address_without_port(\n      \"x-downstream-remote-address-without-port\");\n  const Http::LowerCaseString downstream_local_address(\"x-downstream-local-address\");\n  const Http::LowerCaseString downstream_local_address_without_port(\n      \"x-downstream-local-address-without-port\");\n  const Http::LowerCaseString start_time(\"x-start-time\");\n\n  const std::string value_ok = \"ok\";\n  const std::string value_cool = \"cool\";\n  const std::string value_awesome = \"awesome\";\n\n  const std::string value_user_agent = \"CoolEnvoy/HC\";\n  const std::string value_upstream_metadata = \"value\";\n  const std::string value_protocol = \"HTTP/1.1\";\n  const std::string value_downstream_remote_address = \"127.0.0.1:0\";\n  const std::string value_downstream_remote_address_without_port = \"127.0.0.1\";\n  const std::string value_downstream_local_address = \"127.0.0.1:0\";\n  const std::string value_downstream_local_address_without_port = \"127.0.0.1\";\n\n  setupServiceValidationWithAdditionalHeaders();\n  // Requires non-empty `service_name` in config.\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n  auto metadata = TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n      R\"EOF(\n        filter_metadata:\n          namespace:\n            key: value\n      )EOF\");\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\", metadata)};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.get(header_ok)->value().getStringView(), value_ok);\n        EXPECT_EQ(headers.get(header_cool)->value().getStringView(), value_cool);\n        EXPECT_EQ(headers.get(header_awesome)->value().getStringView(), value_awesome);\n\n        EXPECT_EQ(headers.getUserAgentValue(), value_user_agent);\n        EXPECT_EQ(headers.get(upstream_metadata)->value().getStringView(), value_upstream_metadata);\n\n        EXPECT_EQ(headers.get(protocol)->value().getStringView(), value_protocol);\n        EXPECT_EQ(headers.get(downstream_remote_address)->value().getStringView(),\n                  value_downstream_remote_address);\n        EXPECT_EQ(headers.get(downstream_remote_address_without_port)->value().getStringView(),\n                  value_downstream_remote_address_without_port);\n        EXPECT_EQ(headers.get(downstream_local_address)->value().getStringView(),\n                  value_downstream_local_address);\n        EXPECT_EQ(headers.get(downstream_local_address_without_port)->value().getStringView(),\n                  value_downstream_local_address_without_port);\n\n        Envoy::DateFormatter date_formatter(\"%s.%9f\");\n        std::string current_start_time =\n            date_formatter.fromTime(dispatcher_.timeSource().systemTime());\n        EXPECT_EQ(headers.get(start_time)->value().getStringView(), current_start_time);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithoutUserAgent) {\n  setupServiceValidationWithoutUserAgent();\n  // Requires non-empty `service_name` in config.\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n  auto metadata = TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n      R\"EOF(\n        filter_metadata:\n          namespace:\n            key: value\n      )EOF\");\n\n  std::string current_start_time;\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\", metadata)};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillRepeatedly(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.UserAgent(), nullptr);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nTEST_F(HttpHealthCheckerImplTest, ServiceDoesNotMatchFail) {\n  setupServiceValidationHC();\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"api-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, ServicePatternDoesNotMatchFail) {\n  setupServiceRegexPatternValidationHC();\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"api-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, ServiceNotPresentInResponseFail) {\n  setupServiceValidationHC();\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true, false);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, ServiceCheckRuntimeOff) {\n  setupServiceValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"api-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, ServiceCheckRuntimeOffWithStringPattern) {\n  setupServicePrefixPatternValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"api-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessStartFailedFailFirstServiceCheck) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillRepeatedly(Return(true));\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  expectSuccessStartFailedFailFirst(health_checked_cluster);\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessNoTraffic) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(5000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessStartFailedSuccessFirst) {\n  setupNoServiceValidationHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  // Test fast success immediately moves us to healthy.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1);\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _)).WillOnce(Return(500));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(500), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessStartFailedFailFirst) {\n  setupNoServiceValidationHC();\n  expectSuccessStartFailedFailFirst();\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessStartFailedFailFirstLogError) {\n  setupNoServiceValidationHCAlwaysLogFailure();\n  expectSuccessStartFailedFailFirst();\n}\n\n// Verify that removal during a failure callback works.\nTEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackNoClose) {\n  setupNoServiceValidationHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed))\n      .WillOnce(Invoke([&](HostSharedPtr host, HealthTransition) {\n        cluster_->prioritySet().getMockHostSet(0)->hosts_ = {};\n        cluster_->prioritySet().runUpdateCallbacks(0, {}, {host});\n      }));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)).Times(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()).Times(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  respond(0, \"503\", false);\n}\n\n// Verify that removal during a failure callback works with connection close.\nTEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackClose) {\n  setupNoServiceValidationHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed))\n      .WillOnce(Invoke([&](HostSharedPtr host, HealthTransition) {\n        cluster_->prioritySet().getMockHostSet(0)->hosts_ = {};\n        cluster_->prioritySet().runUpdateCallbacks(0, {}, {host});\n      }));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)).Times(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()).Times(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  respond(0, \"503\", true);\n}\n\nTEST_F(HttpHealthCheckerImplTest, HttpFail) {\n  setupNoServiceValidationHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  respond(0, \"503\", false);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::UNHEALTHY);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, HttpFailLogError) {\n  setupNoServiceValidationHCAlwaysLogFailure();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  respond(0, \"503\", false);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::UNHEALTHY);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // logUnhealthy is called with first_check == false\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, false));\n  respond(0, \"503\", false);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::UNHEALTHY);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, Disconnect) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(cluster_->prioritySet().getMockHostSet(0)->hosts_[0],\n                                  HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, Timeout) {\n  setupNoServiceValidationHCOneUnhealthy();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::TIMEOUT);\n}\n\n// Make sure that a timeout during a partial response works correctly.\nTEST_F(HttpHealthCheckerImplTest, TimeoutThenSuccess) {\n  setupNoServiceValidationHC();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  // Do a response that is not complete but includes headers.\n  std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers(\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}});\n  test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), false);\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, TimeoutThenRemoteClose) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::TIMEOUT);\n}\n\nTEST_F(HttpHealthCheckerImplTest, TimeoutAfterDisconnect) {\n  setupNoServiceValidationHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)).Times(2);\n  health_checker_->start();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)).Times(1);\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)).Times(2);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  for (auto& session : test_sessions_) {\n    session->client_connection_->close(Network::ConnectionCloseType::NoFlush);\n  }\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n\n  test_sessions_[0]->timeout_timer_->enableTimer(std::chrono::seconds(10), nullptr);\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, DynamicAddAndRemove) {\n  setupNoServiceValidationHC();\n  health_checker_->start();\n\n  expectSessionCreate();\n  expectStreamCreate(0);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks(\n      {cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}, {});\n\n  HostVector removed{cluster_->prioritySet().getMockHostSet(0)->hosts_.back()};\n  cluster_->prioritySet().getMockHostSet(0)->hosts_.clear();\n  EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_));\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks({}, removed);\n}\n\nTEST_F(HttpHealthCheckerImplTest, ConnectionClose) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nTEST_F(HttpHealthCheckerImplTest, ProxyConnectionClose) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nTEST_F(HttpHealthCheckerImplTest, ConnectionCloseLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fixed_connection_close\", \"false\"}});\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nTEST_F(HttpHealthCheckerImplTest, ProxyConnectionCloseLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fixed_connection_close\", \"false\"}});\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n}\n\nTEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) {\n  setupHealthCheckIntervalOverridesHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://128.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  // First check should respect no_traffic_interval setting.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(5000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  cluster_->info_->stats().upstream_cx_total_.inc();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Follow up successful checks should respect interval setting.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Follow up successful checks should respect interval setting.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // A logical failure is not considered a network failure, therefore the unhealthy threshold is\n  // ignored and health state changes immediately. Since the threshold is ignored, next health\n  // check respects \"unhealthy_interval\".\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"503\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent failing checks should respect unhealthy_interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"503\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent failing checks should respect unhealthy_interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"503\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // When transitioning to a successful state, checks should respect healthy_edge_interval. Health\n  // state should be delayed pending healthy threshold.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // After the healthy threshold is reached, health state should change while checks should respect\n  // the default interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent checks shouldn't change the state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // First failed check after a run o successful ones should respect unhealthy_edge_interval. A\n  // timeout, being a network type failure, should respect unhealthy threshold before changing the\n  // health state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(3000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a network timeout.\n  expectClientCreate(0);\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(3000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a network timeout.\n  expectClientCreate(0);\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent failing checks should respect unhealthy_interval. As the unhealthy threshold is\n  // reached, health state should also change.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a network timeout.\n  expectClientCreate(0);\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Remaining failing checks shouldn't change the state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a network timeout.\n  expectClientCreate(0);\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // When transitioning to a successful state, checks should respect healthy_edge_interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // After the healthy threshold is reached, health state should change while checks should respect\n  // the default interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent checks shouldn't change the state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n}\n\nTEST_F(HttpHealthCheckerImplTest, RemoteCloseBetweenChecks) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// Test that we close connections on a healthy check when reuse_connection is false.\nTEST_F(HttpHealthCheckerImplTest, DontReuseConnectionBetweenChecks) {\n  setupNoServiceValidationNoReuseConnectionHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  // A new client is created because we close the connection ourselves.\n  // See HttpHealthCheckerImplTest.RemoteCloseBetweenChecks for how this works when the remote end\n  // closes the connection.\n  expectClientCreate(0);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, StreamReachesWatermarkDuringCheck) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n\n  test_sessions_[0]->request_encoder_.stream_.runHighWatermarkCallbacks();\n  test_sessions_[0]->request_encoder_.stream_.runLowWatermarkCallbacks();\n\n  respond(0, \"200\", true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, ConnectionReachesWatermarkDuringCheck) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n\n  test_sessions_[0]->client_connection_->runHighWatermarkCallbacks();\n  test_sessions_[0]->client_connection_->runLowWatermarkCallbacks();\n\n  respond(0, \"200\", true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAltPort) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  setupServiceValidationHC();\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  // Prepares a host with its designated health check port.\n  const HostWithHealthCheckMap hosts{{\"127.0.0.1:80\", makeHealthCheckConfig(8000)}};\n  appendTestHosts(cluster_, hosts);\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate(hosts);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// Test host check success with multiple hosts by checking each host defined health check port.\nTEST_F(HttpHealthCheckerImplTest, SuccessWithMultipleHostsAndAltPort) {\n  setupNoServiceValidationHC();\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2);\n\n  // Prepares a set of hosts along with its designated health check ports.\n  const HostWithHealthCheckMap hosts = {{\"127.0.0.1:80\", makeHealthCheckConfig(8000)},\n                                        {\"127.0.0.1:81\", makeHealthCheckConfig(8001)}};\n  appendTestHosts(cluster_, hosts);\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate(hosts);\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  expectSessionCreate(hosts);\n  expectStreamCreate(1);\n  EXPECT_CALL(*test_sessions_[1]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _)).Times(2);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .Times(2)\n      .WillRepeatedly(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  EXPECT_CALL(*test_sessions_[1]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[1]->timeout_timer_, disableTimer());\n  respond(0, \"200\", false, false, true);\n  respond(1, \"200\", false, false, true);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[1]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, Http2ClusterUseHttp2CodecClient) {\n  setupNoServiceValidationHCWithHttp2();\n  EXPECT_EQ(Http::CodecClient::Type::HTTP2, health_checker_->codecClientType());\n}\n\nMATCHER_P(MetadataEq, expected, \"\") {\n  const envoy::config::core::v3::Metadata* metadata = arg;\n  if (!metadata) {\n    return false;\n  }\n  EXPECT_TRUE(Envoy::Protobuf::util::MessageDifferencer::Equals(*metadata, expected));\n  return true;\n}\n\nTEST_F(HttpHealthCheckerImplTest, TransportSocketMatchCriteria) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 1s\n    interval_jitter_percent: 40\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    transport_socket_match_criteria:\n      key: value\n    )EOF\";\n\n  auto default_socket_factory = std::make_unique<Network::MockTransportSocketFactory>();\n  // We expect that this default_socket_factory will NOT be used to create a transport socket for\n  // the health check connection.\n  EXPECT_CALL(*default_socket_factory, createTransportSocket(_)).Times(0);\n  EXPECT_CALL(*default_socket_factory, implementsSecureTransport());\n  auto transport_socket_match =\n      std::make_unique<Upstream::MockTransportSocketMatcher>(std::move(default_socket_factory));\n\n  auto metadata = TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n      R\"EOF(\n    filter_metadata:\n      envoy.transport_socket_match:\n        key: value\n  )EOF\");\n\n  Stats::IsolatedStoreImpl stats_store;\n  auto health_transport_socket_stats = TransportSocketMatchStats{\n      ALL_TRANSPORT_SOCKET_MATCH_STATS(POOL_COUNTER_PREFIX(stats_store, \"test\"))};\n  auto health_check_only_socket_factory = std::make_unique<Network::MockTransportSocketFactory>();\n\n  // We expect resolve() to be called twice, once for endpoint socket matching (with no metadata in\n  // this test) and once for health check socket matching. In the latter we expect metadata that\n  // matches the above object.\n  EXPECT_CALL(*transport_socket_match, resolve(nullptr));\n  EXPECT_CALL(*transport_socket_match, resolve(MetadataEq(metadata)))\n      .WillOnce(Return(TransportSocketMatcher::MatchData(\n          *health_check_only_socket_factory, health_transport_socket_stats, \"health_check_only\")));\n  // The health_check_only_socket_factory should be used to create a transport socket for the health\n  // check connection.\n  EXPECT_CALL(*health_check_only_socket_factory, createTransportSocket(_));\n\n  cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match);\n\n  allocHealthChecker(yaml);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n  EXPECT_EQ(health_transport_socket_stats.total_match_count_.value(), 1);\n}\n\nTEST_F(HttpHealthCheckerImplTest, NoTransportSocketMatchCriteria) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 1s\n    interval_jitter_percent: 40\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n  auto default_socket_factory = std::make_unique<Network::MockTransportSocketFactory>();\n  // The default_socket_factory should be used to create a transport socket for the health check\n  // connection.\n  EXPECT_CALL(*default_socket_factory, createTransportSocket(_));\n  EXPECT_CALL(*default_socket_factory, implementsSecureTransport());\n  auto transport_socket_match =\n      std::make_unique<Upstream::MockTransportSocketMatcher>(std::move(default_socket_factory));\n  // We expect resolve() to be called exactly once for endpoint socket matching. We should not\n  // attempt to match again for health checks since there is not match criteria in the config.\n  EXPECT_CALL(*transport_socket_match, resolve(nullptr));\n\n  cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match);\n\n  allocHealthChecker(yaml);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n}\n\nclass TestProdHttpHealthChecker : public ProdHttpHealthCheckerImpl {\npublic:\n  using ProdHttpHealthCheckerImpl::ProdHttpHealthCheckerImpl;\n\n  std::unique_ptr<Http::CodecClient>\n  createCodecClientForTest(std::unique_ptr<Network::ClientConnection>&& connection) {\n    Upstream::Host::CreateConnectionData data;\n    data.connection_ = std::move(connection);\n    data.host_description_ = std::make_shared<NiceMock<Upstream::MockHostDescription>>();\n    return std::unique_ptr<Http::CodecClient>(createCodecClient(data));\n  }\n};\n\nclass ProdHttpHealthCheckerTest : public testing::Test, public HealthCheckerTestBase {\npublic:\n  void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) {\n    health_checker_ = std::make_shared<TestProdHttpHealthChecker>(\n        *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_,\n        HealthCheckEventLoggerPtr(event_logger_storage_.release()));\n  }\n\n  void addCompletionCallback() {\n    health_checker_->addHostCheckCompleteCb(\n        [this](HostSharedPtr host, HealthTransition changed_state) -> void {\n          onHostStatus(host, changed_state);\n        });\n  }\n\n  void setupNoServiceValidationHCWithHttp2() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n      codec_client_type: Http2\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  void setupNoServiceValidationHC() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n\n    allocHealthChecker(yaml);\n    addCompletionCallback();\n  }\n\n  MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state));\n  std::unique_ptr<Network::MockClientConnection> connection_ =\n      std::make_unique<NiceMock<Network::MockClientConnection>>();\n  std::shared_ptr<TestProdHttpHealthChecker> health_checker_;\n};\n\nTEST_F(ProdHttpHealthCheckerTest, ProdHttpHealthCheckerH1HealthChecking) {\n  setupNoServiceValidationHC();\n  EXPECT_EQ(Http::CodecClient::Type::HTTP1,\n            health_checker_->createCodecClientForTest(std::move(connection_))->type());\n}\n\nTEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http1CodecClient)) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n      use_http2: false\n    )EOF\";\n\n  allocHealthChecker(yaml, false);\n  addCompletionCallback();\n  EXPECT_EQ(Http::CodecClient::Type::HTTP1, health_checker_->codecClientType());\n}\n\nTEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http2CodecClient)) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n      use_http2: true\n    )EOF\";\n\n  allocHealthChecker(yaml, false);\n  addCompletionCallback();\n  EXPECT_EQ(Http::CodecClient::Type::HTTP2, health_checker_->codecClientType());\n}\n\nTEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) {\n  const std::string host = \"fake_cluster\";\n  const std::string path = \"/healthcheck\";\n  setupDeprecatedServiceNameValidationHC(\"locations\");\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true))\n      .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n        EXPECT_EQ(headers.getHostValue(), host);\n        EXPECT_EQ(headers.getPathValue(), path);\n        EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http);\n      }));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"locations-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMismatch)) {\n  setupDeprecatedServiceNameValidationHC(\"locations\");\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"health_check.verify_cluster\", 100))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->info_->stats().upstream_cx_total_.inc();\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n      .WillOnce(Return(45000));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_,\n              enableTimer(std::chrono::milliseconds(45000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  absl::optional<std::string> health_checked_cluster(\"api-production-iad\");\n  respond(0, \"200\", false, false, true, false, health_checked_cluster);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(ProdHttpHealthCheckerTest, ProdHttpHealthCheckerH2HealthChecking) {\n  setupNoServiceValidationHCWithHttp2();\n  EXPECT_EQ(Http::CodecClient::Type::HTTP2,\n            health_checker_->createCodecClientForTest(std::move(connection_))->type());\n}\n\nTEST(HttpStatusChecker, Default) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthcheck\n  )EOF\";\n\n  HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n      parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200);\n\n  EXPECT_TRUE(http_status_checker.inRange(200));\n  EXPECT_FALSE(http_status_checker.inRange(204));\n}\n\nTEST(HttpStatusChecker, Single100) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthcheck\n    expected_statuses:\n      - start: 100\n        end: 101\n  )EOF\";\n\n  HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n      parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200);\n\n  EXPECT_FALSE(http_status_checker.inRange(200));\n\n  EXPECT_FALSE(http_status_checker.inRange(99));\n  EXPECT_TRUE(http_status_checker.inRange(100));\n  EXPECT_FALSE(http_status_checker.inRange(101));\n}\n\nTEST(HttpStatusChecker, Single599) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthcheck\n    expected_statuses:\n      - start: 599\n        end: 600\n  )EOF\";\n\n  HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n      parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200);\n\n  EXPECT_FALSE(http_status_checker.inRange(200));\n\n  EXPECT_FALSE(http_status_checker.inRange(598));\n  EXPECT_TRUE(http_status_checker.inRange(599));\n  EXPECT_FALSE(http_status_checker.inRange(600));\n}\n\nTEST(HttpStatusChecker, Ranges_204_304) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthcheck\n    expected_statuses:\n      - start: 204\n        end: 205\n      - start: 304\n        end: 305\n  )EOF\";\n\n  HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n      parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200);\n\n  EXPECT_FALSE(http_status_checker.inRange(200));\n\n  EXPECT_FALSE(http_status_checker.inRange(203));\n  EXPECT_TRUE(http_status_checker.inRange(204));\n  EXPECT_FALSE(http_status_checker.inRange(205));\n  EXPECT_FALSE(http_status_checker.inRange(303));\n  EXPECT_TRUE(http_status_checker.inRange(304));\n  EXPECT_FALSE(http_status_checker.inRange(305));\n}\n\nTEST(HttpStatusChecker, Below100) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthcheck\n    expected_statuses:\n      - start: 99\n        end: 100\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n          parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200),\n      EnvoyException, \"Invalid http status range: expecting start >= 100, but found start=99\");\n}\n\nTEST(HttpStatusChecker, Above599) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthchecka\n    expected_statuses:\n      - start: 600\n        end: 601\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n          parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200),\n      EnvoyException, \"Invalid http status range: expecting end <= 600, but found end=601\");\n}\n\nTEST(HttpStatusChecker, InvalidRange) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthchecka\n    expected_statuses:\n      - start: 200\n        end: 200\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n          parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200),\n      EnvoyException,\n      \"Invalid http status range: expecting start < end, but found start=200 and end=200\");\n}\n\nTEST(HttpStatusChecker, InvalidRange2) {\n  const std::string yaml = R\"EOF(\n  timeout: 1s\n  interval: 1s\n  unhealthy_threshold: 2\n  healthy_threshold: 2\n  http_health_check:\n    service_name_matcher:\n        prefix: locations\n    path: /healthchecka\n    expected_statuses:\n      - start: 201\n        end: 200\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HttpHealthCheckerImpl::HttpStatusChecker http_status_checker(\n          parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200),\n      EnvoyException,\n      \"Invalid http status range: expecting start < end, but found start=201 and end=200\");\n}\n\nTEST(TcpHealthCheckMatcher, loadJsonBytes) {\n  {\n    Protobuf::RepeatedPtrField<envoy::config::core::v3::HealthCheck::Payload> repeated_payload;\n    repeated_payload.Add()->set_text(\"39000000\");\n    repeated_payload.Add()->set_text(\"EEEEEEEE\");\n\n    TcpHealthCheckMatcher::MatchSegments segments =\n        TcpHealthCheckMatcher::loadProtoBytes(repeated_payload);\n    EXPECT_EQ(2U, segments.size());\n  }\n\n  {\n    Protobuf::RepeatedPtrField<envoy::config::core::v3::HealthCheck::Payload> repeated_payload;\n    repeated_payload.Add()->set_text(\"4\");\n\n    EXPECT_THROW(TcpHealthCheckMatcher::loadProtoBytes(repeated_payload), EnvoyException);\n  }\n\n  {\n    Protobuf::RepeatedPtrField<envoy::config::core::v3::HealthCheck::Payload> repeated_payload;\n    repeated_payload.Add()->set_text(\"gg\");\n\n    EXPECT_THROW(TcpHealthCheckMatcher::loadProtoBytes(repeated_payload), EnvoyException);\n  }\n}\n\nstatic void addUint8(Buffer::Instance& buffer, uint8_t addend) {\n  buffer.add(&addend, sizeof(addend));\n}\n\nTEST(TcpHealthCheckMatcher, match) {\n  Protobuf::RepeatedPtrField<envoy::config::core::v3::HealthCheck::Payload> repeated_payload;\n  repeated_payload.Add()->set_text(\"01\");\n  repeated_payload.Add()->set_text(\"02\");\n\n  TcpHealthCheckMatcher::MatchSegments segments =\n      TcpHealthCheckMatcher::loadProtoBytes(repeated_payload);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_FALSE(TcpHealthCheckMatcher::match(segments, buffer));\n  addUint8(buffer, 1);\n  EXPECT_FALSE(TcpHealthCheckMatcher::match(segments, buffer));\n  addUint8(buffer, 2);\n  EXPECT_TRUE(TcpHealthCheckMatcher::match(segments, buffer));\n\n  buffer.drain(2);\n  addUint8(buffer, 1);\n  addUint8(buffer, 3);\n  addUint8(buffer, 2);\n  EXPECT_TRUE(TcpHealthCheckMatcher::match(segments, buffer));\n\n  buffer.drain(3);\n  addUint8(buffer, 0);\n  addUint8(buffer, 3);\n  addUint8(buffer, 1);\n  addUint8(buffer, 2);\n  EXPECT_TRUE(TcpHealthCheckMatcher::match(segments, buffer));\n}\n\nclass TcpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase {\npublic:\n  void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) {\n    health_checker_ = std::make_shared<TcpHealthCheckerImpl>(\n        *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_,\n        HealthCheckEventLoggerPtr(event_logger_storage_.release()));\n  }\n\n  void setupData(unsigned int unhealthy_threshold = 2) {\n    std::ostringstream yaml;\n    yaml << R\"EOF(\n    timeout: 1s\n    interval: 1s\n    unhealthy_threshold: )EOF\"\n         << unhealthy_threshold << R\"EOF(\n    healthy_threshold: 2\n    tcp_health_check:\n      send:\n        text: \"01\"\n      receive:\n      - text: \"02\"\n    )EOF\";\n\n    allocHealthChecker(yaml.str());\n  }\n\n  void setupNoData() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    tcp_health_check: {}\n    )EOF\";\n\n    allocHealthChecker(yaml);\n  }\n\n  void setupDataDontReuseConnection() {\n    std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    unhealthy_threshold: 2\n    healthy_threshold: 2\n    reuse_connection: false\n    tcp_health_check:\n      send:\n        text: \"01\"\n      receive:\n      - text: \"02\"\n    )EOF\";\n\n    allocHealthChecker(yaml);\n  }\n\n  void expectSessionCreate() {\n    interval_timer_ = new Event::MockTimer(&dispatcher_);\n    timeout_timer_ = new Event::MockTimer(&dispatcher_);\n  }\n\n  void expectClientCreate() {\n    connection_ = new NiceMock<Network::MockClientConnection>();\n    EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_));\n    EXPECT_CALL(*connection_, addReadFilter(_)).WillOnce(SaveArg<0>(&read_filter_));\n  }\n\n  std::shared_ptr<TcpHealthCheckerImpl> health_checker_;\n  Network::MockClientConnection* connection_{};\n  Event::MockTimer* timeout_timer_{};\n  Event::MockTimer* interval_timer_{};\n  Network::ReadFilterSharedPtr read_filter_;\n};\n\nTEST_F(TcpHealthCheckerImplTest, Success) {\n  InSequence s;\n\n  setupData();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  connection_->runHighWatermarkCallbacks();\n  connection_->runLowWatermarkCallbacks();\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  Buffer::OwnedImpl response;\n  addUint8(response, 2);\n  read_filter_->onData(response, false);\n}\n\n// Tests that a successful healthcheck will disconnect the client when reuse_connection is false.\nTEST_F(TcpHealthCheckerImplTest, DataWithoutReusingConnection) {\n  InSequence s;\n\n  setupDataDontReuseConnection();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(1);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Expected execution flow when a healthcheck is successful and reuse_connection is false.\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)).Times(1);\n\n  Buffer::OwnedImpl response;\n  addUint8(response, 2);\n  read_filter_->onData(response, false);\n\n  // These are the expected metric results after testing.\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n}\n\n// Tests an unsuccessful healthcheck, where the endpoint sends wrong data\nTEST_F(TcpHealthCheckerImplTest, WrongData) {\n  InSequence s;\n\n  setupDataDontReuseConnection();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(1);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Not the expected response\n  Buffer::OwnedImpl response;\n  addUint8(response, 3);\n  read_filter_->onData(response, false);\n\n  // These are the expected metric results after testing.\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  // TODO(lilika): This should indicate a failure\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::UNHEALTHY);\n}\n\nTEST_F(TcpHealthCheckerImplTest, TimeoutThenRemoteClose) {\n  InSequence s;\n\n  setupData();\n  health_checker_->start();\n\n  expectSessionCreate();\n  expectClientCreate();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks(\n      {cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}, {});\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  Buffer::OwnedImpl response;\n  addUint8(response, 1);\n  read_filter_->onData(response, false);\n\n  EXPECT_CALL(*connection_, close(_));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  timeout_timer_->invokeCallback();\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::TIMEOUT);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  HostVector removed{cluster_->prioritySet().getMockHostSet(0)->hosts_.back()};\n  cluster_->prioritySet().getMockHostSet(0)->hosts_.clear();\n  EXPECT_CALL(*connection_, close(_));\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks({}, removed);\n}\n\nTEST_F(TcpHealthCheckerImplTest, Timeout) {\n  InSequence s;\n\n  setupData(1);\n  health_checker_->start();\n\n  expectSessionCreate();\n  expectClientCreate();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks(\n      {cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}, {});\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  Buffer::OwnedImpl response;\n  addUint8(response, 1);\n  read_filter_->onData(response, false);\n\n  EXPECT_CALL(*connection_, close(_));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  timeout_timer_->invokeCallback();\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::TIMEOUT);\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\nTEST_F(TcpHealthCheckerImplTest, DoubleTimeout) {\n  InSequence s;\n\n  setupData();\n  health_checker_->start();\n\n  expectSessionCreate();\n  expectClientCreate();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks(\n      {cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}, {});\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  Buffer::OwnedImpl response;\n  addUint8(response, 1);\n  read_filter_->onData(response, false);\n\n  EXPECT_CALL(*connection_, close(_));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  timeout_timer_->invokeCallback();\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::TIMEOUT);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  EXPECT_CALL(*connection_, close(_));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  timeout_timer_->invokeCallback();\n  EXPECT_EQ(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->getActiveHealthFailureType(),\n            Host::ActiveHealthFailureType::TIMEOUT);\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  HostVector removed{cluster_->prioritySet().getMockHostSet(0)->hosts_.back()};\n  cluster_->prioritySet().getMockHostSet(0)->hosts_.clear();\n  EXPECT_CALL(*connection_, close(_));\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks({}, removed);\n}\n\n// Tests that when reuse_connection is false timeouts execute normally.\nTEST_F(TcpHealthCheckerImplTest, TimeoutWithoutReusingConnection) {\n  InSequence s;\n\n  setupDataDontReuseConnection();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(1);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Expected flow when a healthcheck is successful and reuse_connection is false.\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)).Times(1);\n\n  Buffer::OwnedImpl response;\n  addUint8(response, 2);\n  read_filter_->onData(response, false);\n\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n\n  // The healthcheck will run again.\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Expected flow when a healthcheck times out.\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  // The healthcheck is not yet at the unhealthy threshold.\n  EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  // The healthcheck metric results after first timeout block.\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n\n  // The healthcheck will run again, it should be failing after this attempt.\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  // Expected flow when a healthcheck times out.\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  // The healthcheck metric results after the second timeout block.\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n}\n\nTEST_F(TcpHealthCheckerImplTest, NoData) {\n  InSequence s;\n\n  setupNoData();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(0);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  EXPECT_CALL(*connection_, close(_));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(0);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n}\n\nTEST_F(TcpHealthCheckerImplTest, PassiveFailure) {\n  InSequence s;\n\n  setupNoData();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(0);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  // Do multiple passive failures. This will not reset the active HC timers.\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthChecker().setUnhealthy();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthChecker().setUnhealthy();\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  // A single success should not bring us back to healthy.\n  EXPECT_CALL(*connection_, close(_));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.passive_failure\").value());\n}\n\nTEST_F(TcpHealthCheckerImplTest, PassiveFailureCrossThreadRemoveHostRace) {\n  InSequence s;\n\n  setupNoData();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(0);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  // Do a passive failure. This will not reset the active HC timers.\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthChecker().setUnhealthy();\n\n  // Remove before the cross thread event comes in.\n  EXPECT_CALL(*connection_, close(_));\n  HostVector old_hosts = std::move(cluster_->prioritySet().getMockHostSet(0)->hosts_);\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks({}, old_hosts);\n  post_cb();\n\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.passive_failure\").value());\n}\n\nTEST_F(TcpHealthCheckerImplTest, PassiveFailureCrossThreadRemoveClusterRace) {\n  InSequence s;\n\n  setupNoData();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _)).Times(0);\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  // Do a passive failure. This will not reset the active HC timers.\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthChecker().setUnhealthy();\n\n  // Remove before the cross thread event comes in.\n  EXPECT_CALL(*connection_, close(_));\n  health_checker_.reset();\n  post_cb();\n\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.passive_failure\").value());\n}\n\nTEST_F(TcpHealthCheckerImplTest, ConnectionLocalFailure) {\n  InSequence s;\n\n  setupData();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n  expectClientCreate();\n  EXPECT_CALL(*connection_, write(_, _));\n  EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  // Expect the LocalClose to be handled as a health check failure\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n\n  // Raise a LocalClose that is not triggered by the health monitor itself.\n  // e.g. a failure to setsockopt().\n  connection_->raiseEvent(Network::ConnectionEvent::LocalClose);\n\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.passive_failure\").value());\n}\n\nclass GrpcHealthCheckerImplTestBase : public GrpcHealthCheckerImplTestBaseUtils {\npublic:\n  struct ResponseSpec {\n    struct ChunkSpec {\n      bool valid;\n      std::vector<uint8_t> data;\n    };\n    static ChunkSpec invalidChunk() {\n      ChunkSpec spec;\n      spec.valid = false;\n      return spec;\n    }\n    static ChunkSpec invalidPayload(uint8_t flags, bool valid_message) {\n      ChunkSpec spec;\n      spec.valid = true;\n      spec.data = serializeResponse(grpc::health::v1::HealthCheckResponse::SERVING);\n      spec.data[0] = flags;\n      if (!valid_message) {\n        const size_t kGrpcHeaderSize = 5;\n        for (size_t i = kGrpcHeaderSize; i < spec.data.size(); i++) {\n          // Fill payload with some random data.\n          spec.data[i] = i % 256;\n        }\n      }\n      return spec;\n    }\n    static ChunkSpec validChunk(grpc::health::v1::HealthCheckResponse::ServingStatus status) {\n      ChunkSpec spec;\n      spec.valid = true;\n      spec.data = serializeResponse(status);\n      return spec;\n    }\n\n    static ChunkSpec servingResponse() {\n      return validChunk(grpc::health::v1::HealthCheckResponse::SERVING);\n    }\n\n    static ChunkSpec notServingResponse() {\n      return validChunk(grpc::health::v1::HealthCheckResponse::NOT_SERVING);\n    }\n\n    static std::vector<uint8_t>\n    serializeResponse(grpc::health::v1::HealthCheckResponse::ServingStatus status) {\n      grpc::health::v1::HealthCheckResponse response;\n      response.set_status(status);\n      const auto data = Grpc::Common::serializeToGrpcFrame(response);\n      auto ret = std::vector<uint8_t>(data->length(), 0);\n      data->copyOut(0, data->length(), &ret[0]);\n      return ret;\n    }\n\n    std::vector<std::pair<std::string, std::string>>\n        response_headers; // Encapsulates all three types of responses\n    std::vector<ChunkSpec> body_chunks;\n    std::vector<std::pair<std::string, std::string>> trailers;\n  };\n\n  void allocHealthChecker(const envoy::config::core::v3::HealthCheck& config) {\n    health_checker_ = std::make_shared<TestGrpcHealthCheckerImpl>(\n        *cluster_, config, dispatcher_, runtime_, random_,\n        HealthCheckEventLoggerPtr(event_logger_storage_.release()));\n  }\n\n  void addCompletionCallback() {\n    health_checker_->addHostCheckCompleteCb(\n        [this](HostSharedPtr host, HealthTransition changed_state) -> void {\n          onHostStatus(host, changed_state);\n        });\n  }\n\n  void setupHC() {\n    const auto config = createGrpcHealthCheckConfig();\n    allocHealthChecker(config);\n    addCompletionCallback();\n  }\n\n  void setupHCWithUnhealthyThreshold(int value) {\n    auto config = createGrpcHealthCheckConfig();\n    config.mutable_unhealthy_threshold()->set_value(value);\n    allocHealthChecker(config);\n    addCompletionCallback();\n  }\n\n  void setupServiceNameHC(const absl::optional<std::string>& authority) {\n    auto config = createGrpcHealthCheckConfig();\n    config.mutable_grpc_health_check()->set_service_name(\"service\");\n    if (authority.has_value()) {\n      config.mutable_grpc_health_check()->set_authority(authority.value());\n    }\n    allocHealthChecker(config);\n    addCompletionCallback();\n  }\n\n  void setupNoReuseConnectionHC() {\n    auto config = createGrpcHealthCheckConfig();\n    config.mutable_reuse_connection()->set_value(false);\n    allocHealthChecker(config);\n    addCompletionCallback();\n  }\n\n  void setupHealthCheckIntervalOverridesHC() {\n    auto config = createGrpcHealthCheckConfig();\n    config.mutable_interval()->set_seconds(1);\n    config.mutable_unhealthy_interval()->set_seconds(2);\n    config.mutable_unhealthy_edge_interval()->set_seconds(3);\n    config.mutable_healthy_edge_interval()->set_seconds(4);\n    config.mutable_no_traffic_interval()->set_seconds(5);\n    config.mutable_interval_jitter()->set_seconds(0);\n    config.mutable_unhealthy_threshold()->set_value(3);\n    config.mutable_healthy_threshold()->set_value(3);\n    allocHealthChecker(config);\n    addCompletionCallback();\n  }\n\n  // Starts healthchecker and sets up timer expectations, leaving up future specification of\n  // healthcheck response for the caller. Useful when there is only one healthcheck attempt\n  // performed during test case (but possibly on many hosts).\n  void expectHealthchecks(HealthTransition host_changed_state, size_t num_healthchecks) {\n    for (size_t i = 0; i < num_healthchecks; i++) {\n      cluster_->info_->stats().upstream_cx_total_.inc();\n      expectSessionCreate();\n      expectHealthcheckStart(i);\n    }\n    health_checker_->start();\n\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _))\n        .Times(num_healthchecks);\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n        .Times(num_healthchecks)\n        .WillRepeatedly(Return(45000));\n    for (size_t i = 0; i < num_healthchecks; i++) {\n      expectHealthcheckStop(i, 45000);\n    }\n    EXPECT_CALL(*this, onHostStatus(_, host_changed_state)).Times(num_healthchecks);\n  }\n\n  void expectSingleHealthcheck(HealthTransition host_changed_state) {\n    cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n        makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n    expectHealthchecks(host_changed_state, 1);\n  }\n\n  // Hides timer/stream-related boilerplate of healthcheck start.\n  void expectHealthcheckStart(size_t index) {\n    expectStreamCreate(index);\n    EXPECT_CALL(*test_sessions_[index]->timeout_timer_, enableTimer(_, _));\n  }\n\n  // Hides timer-related boilerplate of healthcheck stop.\n  void expectHealthcheckStop(size_t index, int interval_ms = 0) {\n    if (interval_ms > 0) {\n      EXPECT_CALL(*test_sessions_[index]->interval_timer_,\n                  enableTimer(std::chrono::milliseconds(interval_ms), _));\n    } else {\n      EXPECT_CALL(*test_sessions_[index]->interval_timer_, enableTimer(_, _));\n    }\n    EXPECT_CALL(*test_sessions_[index]->timeout_timer_, disableTimer());\n  }\n\n  // Hides host status checking boilerplate when only single host is used in test.\n  void expectHostHealthy(bool healthy) {\n    const auto host = cluster_->prioritySet().getMockHostSet(0)->hosts_[0];\n    if (!healthy) {\n      EXPECT_TRUE(host->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n      EXPECT_EQ(Host::Health::Unhealthy, host->health());\n    } else {\n      EXPECT_EQ(Host::Health::Healthy, host->health());\n    }\n  }\n\n  void respondServiceStatus(size_t index,\n                            grpc::health::v1::HealthCheckResponse::ServingStatus status) {\n    respondResponseSpec(index,\n                        ResponseSpec{{{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n                                     {ResponseSpec::validChunk(status)},\n                                     {{\"grpc-status\", \"0\"}}});\n  }\n\n  void respondResponseSpec(size_t index, ResponseSpec&& spec) {\n    const bool trailers_empty = spec.trailers.empty();\n    const bool end_stream_on_headers = spec.body_chunks.empty() && trailers_empty;\n    auto response_headers = std::make_unique<Http::TestResponseHeaderMapImpl>();\n    for (const auto& header : spec.response_headers) {\n      response_headers->addCopy(header.first, header.second);\n    }\n    test_sessions_[index]->stream_response_callbacks_->decodeHeaders(std::move(response_headers),\n                                                                     end_stream_on_headers);\n    for (size_t i = 0; i < spec.body_chunks.size(); i++) {\n      const bool end_stream = i == spec.body_chunks.size() - 1 && trailers_empty;\n      const auto& chunk = spec.body_chunks[i];\n      if (chunk.valid) {\n        const auto data = std::make_unique<Buffer::OwnedImpl>(chunk.data.data(), chunk.data.size());\n        test_sessions_[index]->stream_response_callbacks_->decodeData(*data, end_stream);\n      } else {\n        Buffer::OwnedImpl incorrect_data(\"incorrect\");\n        test_sessions_[index]->stream_response_callbacks_->decodeData(incorrect_data, end_stream);\n      }\n    }\n    if (!trailers_empty) {\n      auto trailers = std::make_unique<Http::TestResponseTrailerMapImpl>();\n      for (const auto& header : spec.trailers) {\n        trailers->addCopy(header.first, header.second);\n      }\n      test_sessions_[index]->stream_response_callbacks_->decodeTrailers(std::move(trailers));\n    }\n  }\n\n  void testSingleHostSuccess(const absl::optional<std::string>& authority) {\n    std::string expected_host = cluster_->info_->name();\n    if (authority.has_value()) {\n      expected_host = authority.value();\n    }\n\n    setupServiceNameHC(authority);\n\n    cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n        makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n    runHealthCheck(expected_host);\n  }\n\n  void runHealthCheck(std::string expected_host) {\n\n    cluster_->info_->stats().upstream_cx_total_.inc();\n\n    expectSessionCreate();\n    expectHealthcheckStart(0);\n\n    EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, false))\n        .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) {\n          EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, headers.getContentTypeValue());\n          EXPECT_EQ(std::string(\"/grpc.health.v1.Health/Check\"), headers.getPathValue());\n          EXPECT_EQ(Http::Headers::get().SchemeValues.Http, headers.getSchemeValue());\n          EXPECT_NE(nullptr, headers.Method());\n          EXPECT_EQ(expected_host, headers.getHostValue());\n          EXPECT_EQ(std::chrono::milliseconds(1000).count(),\n                    Envoy::Grpc::Common::getGrpcTimeout(headers).value().count());\n        }));\n    EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeData(_, true))\n        .WillOnce(Invoke([&](Buffer::Instance& data, bool) {\n          std::vector<Grpc::Frame> decoded_frames;\n          Grpc::Decoder decoder;\n          ASSERT_TRUE(decoder.decode(data, decoded_frames));\n          ASSERT_EQ(1U, decoded_frames.size());\n          auto& frame = decoded_frames[0];\n          Buffer::ZeroCopyInputStreamImpl stream(std::move(frame.data_));\n          grpc::health::v1::HealthCheckRequest request;\n          ASSERT_TRUE(request.ParseFromZeroCopyStream(&stream));\n          EXPECT_EQ(\"service\", request.service());\n        }));\n    health_checker_->start();\n\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _));\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _))\n        .WillOnce(Return(45000));\n    expectHealthcheckStop(0, 45000);\n\n    // Host state should not be changed (remains healthy).\n    EXPECT_CALL(*this, onHostStatus(cluster_->prioritySet().getMockHostSet(0)->hosts_[0],\n                                    HealthTransition::Unchanged));\n    respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n    expectHostHealthy(true);\n  }\n\n  MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state));\n};\n\nclass GrpcHealthCheckerImplTest : public testing::Test, public GrpcHealthCheckerImplTestBase {};\n\n// Test single host check success.\nTEST_F(GrpcHealthCheckerImplTest, Success) { testSingleHostSuccess(absl::nullopt); }\n\nTEST_F(GrpcHealthCheckerImplTest, SuccessWithHostname) {\n  std::string expected_host = \"www.envoyproxy.io\";\n\n  setupServiceNameHC(absl::nullopt);\n\n  envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config;\n  health_check_config.set_hostname(expected_host);\n  auto test_host = std::make_shared<HostImpl>(\n      cluster_->info_, \"\", Network::Utility::resolveUrl(\"tcp://127.0.0.1:80\"), nullptr, 1,\n      envoy::config::core::v3::Locality(), health_check_config, 0,\n      envoy::config::core::v3::UNKNOWN);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  runHealthCheck(expected_host);\n}\n\nTEST_F(GrpcHealthCheckerImplTest, SuccessWithHostnameOverridesConfig) {\n  std::string expected_host = \"www.envoyproxy.io\";\n\n  setupServiceNameHC(\"foo.com\");\n\n  envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config;\n  health_check_config.set_hostname(expected_host);\n  auto test_host = std::make_shared<HostImpl>(\n      cluster_->info_, \"\", Network::Utility::resolveUrl(\"tcp://127.0.0.1:80\"), nullptr, 1,\n      envoy::config::core::v3::Locality(), health_check_config, 0,\n      envoy::config::core::v3::UNKNOWN);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host};\n  runHealthCheck(expected_host);\n}\n\n// Test single host check success with custom authority.\nTEST_F(GrpcHealthCheckerImplTest, SuccessWithCustomAuthority) {\n  const std::string authority = \"www.envoyproxy.io\";\n  testSingleHostSuccess(authority);\n}\n\n// Test host check success when gRPC response payload is split between several incoming data chunks.\nTEST_F(GrpcHealthCheckerImplTest, SuccessResponseSplitBetweenChunks) {\n  setupServiceNameHC(absl::nullopt);\n  expectSingleHealthcheck(HealthTransition::Unchanged);\n\n  auto response_headers = std::make_unique<Http::TestResponseHeaderMapImpl>(\n      std::initializer_list<std::pair<std::string, std::string>>{\n          {\":status\", \"200\"},\n          {\"content-type\", \"application/grpc\"},\n      });\n  test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), false);\n\n  grpc::health::v1::HealthCheckResponse response;\n  response.set_status(grpc::health::v1::HealthCheckResponse::SERVING);\n  auto data = Grpc::Common::serializeToGrpcFrame(response);\n\n  const char* raw_data = static_cast<char*>(data->linearize(data->length()));\n  const uint64_t chunk_size = data->length() / 5;\n  for (uint64_t offset = 0; offset < data->length(); offset += chunk_size) {\n    const uint64_t effective_size = std::min(chunk_size, data->length() - offset);\n    const auto chunk = std::make_unique<Buffer::OwnedImpl>(raw_data + offset, effective_size);\n    test_sessions_[0]->stream_response_callbacks_->decodeData(*chunk, false);\n  }\n\n  auto trailers = std::make_unique<Http::TestResponseTrailerMapImpl>(\n      std::initializer_list<std::pair<std::string, std::string>>{{\"grpc-status\", \"0\"}});\n  test_sessions_[0]->stream_response_callbacks_->decodeTrailers(std::move(trailers));\n\n  expectHostHealthy(true);\n}\n\n// Test host check success with multiple hosts.\nTEST_F(GrpcHealthCheckerImplTest, SuccessWithMultipleHosts) {\n  setupHC();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\"),\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:81\")};\n\n  expectHealthchecks(HealthTransition::Unchanged, 2);\n\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  respondServiceStatus(1, grpc::health::v1::HealthCheckResponse::SERVING);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[1]->health());\n}\n\n// Test host check success with multiple hosts across multiple priorities.\nTEST_F(GrpcHealthCheckerImplTest, SuccessWithMultipleHostSets) {\n  setupHC();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->prioritySet().getMockHostSet(1)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:81\")};\n\n  expectHealthchecks(HealthTransition::Unchanged, 2);\n\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  respondServiceStatus(1, grpc::health::v1::HealthCheckResponse::SERVING);\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n  EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(1)->hosts_[0]->health());\n}\n\n// Test stream-level watermarks does not interfere with health check.\nTEST_F(GrpcHealthCheckerImplTest, StreamReachesWatermarkDuringCheck) {\n  setupHC();\n  expectSingleHealthcheck(HealthTransition::Unchanged);\n\n  test_sessions_[0]->request_encoder_.stream_.runHighWatermarkCallbacks();\n  test_sessions_[0]->request_encoder_.stream_.runLowWatermarkCallbacks();\n\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test connection-level watermarks does not interfere with health check.\nTEST_F(GrpcHealthCheckerImplTest, ConnectionReachesWatermarkDuringCheck) {\n  setupHC();\n  expectSingleHealthcheck(HealthTransition::Unchanged);\n\n  test_sessions_[0]->client_connection_->runHighWatermarkCallbacks();\n  test_sessions_[0]->client_connection_->runLowWatermarkCallbacks();\n\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test health check on host without traffic sets larger unconfigurable interval for the next check.\nTEST_F(GrpcHealthCheckerImplTest, SuccessNoTraffic) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  health_checker_->start();\n\n  // Default healthcheck interval for hosts without traffic is 60 seconds.\n  expectHealthcheckStop(0, 60000);\n  // Host state should not be changed (remains healthy).\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test first successful check immediately makes failed host available (without 2nd probe).\nTEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedSuccessFirst) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet(\n      Host::HealthFlag::PENDING_ACTIVE_HC);\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  health_checker_->start();\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.max_interval\", _)).WillOnce(Return(500));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"health_check.min_interval\", _));\n  expectHealthcheckStop(0, 500);\n  // Fast success immediately moves us to healthy.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, true));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n  EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::PENDING_ACTIVE_HC));\n}\n\n// Test host recovery after first failed check requires several successful checks.\nTEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet(\n      Host::HealthFlag::PENDING_ACTIVE_HC);\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  health_checker_->start();\n\n  // Failing first disables fast success.\n  expectHealthcheckStop(0);\n  // Host was unhealthy from the start, but we expect a state change due to the pending active hc\n  // flag changing.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING);\n  expectHostHealthy(false);\n  EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::PENDING_ACTIVE_HC));\n\n  // Next successful healthcheck does not move host int healthy state (because we configured\n  // healthchecker this way).\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Host still unhealthy, need yet another healthcheck.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(false);\n\n  // 2nd successful healthcheck renders host healthy.\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test host recovery after explicit check failure requires several successful checks.\nTEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  // Explicit healthcheck failure immediately renders host unhealthy.\n  expectHealthcheckStop(0);\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING);\n  expectHostHealthy(false);\n\n  // Next, we need 2 successful checks for host to become available again.\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Host still considered unhealthy.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(false);\n\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Host should has become healthy.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test disconnects produce network-type failures which does not lead to immediate unhealthy state.\nTEST_F(GrpcHealthCheckerImplTest, Disconnect) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Network-type healthcheck failure should make host unhealthy only after 2nd event in a row.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  expectHostHealthy(true);\n\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Now, host should be unhealthy.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  expectHostHealthy(false);\n}\n\nTEST_F(GrpcHealthCheckerImplTest, Timeout) {\n  setupHCWithUnhealthyThreshold(1);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Unhealthy threshold is 1 so first timeout causes unhealthy\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  expectHostHealthy(false);\n}\n\n// Test timeouts produce network-type failures which does not lead to immediate unhealthy state.\nTEST_F(GrpcHealthCheckerImplTest, DoubleTimeout) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  expectSessionCreate();\n\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Timeouts are considered network failures and make host unhealthy also after 2nd event.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  expectHostHealthy(true);\n\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  // Close connection. Timeouts and connection closes counts together.\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  expectHostHealthy(false);\n}\n\n// Test adding and removal of hosts starts and closes healthcheck sessions.\nTEST_F(GrpcHealthCheckerImplTest, DynamicAddAndRemove) {\n  setupHC();\n  health_checker_->start();\n\n  expectSessionCreate();\n  expectStreamCreate(0);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks(\n      {cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}, {});\n\n  HostVector removed{cluster_->prioritySet().getMockHostSet(0)->hosts_.back()};\n  cluster_->prioritySet().getMockHostSet(0)->hosts_.clear();\n  EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_));\n  cluster_->prioritySet().getMockHostSet(0)->runCallbacks({}, removed);\n}\n\nTEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) {\n  setupHealthCheckIntervalOverridesHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://128.0.0.1:80\")};\n  expectSessionCreate();\n  expectStreamCreate(0);\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  health_checker_->start();\n\n  // First check should respect no_traffic_interval setting.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(5000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  cluster_->info_->stats().upstream_cx_total_.inc();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Follow up successful checks should respect interval setting.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Follow up successful checks should respect interval setting.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // A logical failure is not considered a network failure, therefore the unhealthy threshold is\n  // ignored and health state changes immediately. Since the threshold is ignored, next health\n  // check respects \"unhealthy_interval\".\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent failing checks should respect unhealthy_interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent failing checks should respect unhealthy_interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // When transitioning to a successful state, checks should respect healthy_edge_interval. Health\n  // state should be delayed pending healthy threshold.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // After the healthy threshold is reached, health state should change while checks should respect\n  // the default interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent checks shouldn't change the state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // First failed check after a run o successful ones should respect unhealthy_edge_interval. A\n  // timeout, being a network type failure, should respect unhealthy threshold before changing the\n  // health state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(3000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(3000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent failing checks should respect unhealthy_interval. As the unhealthy threshold is\n  // reached, health state should also change.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Remaining failing checks shouldn't change the state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // When transitioning to a successful state, checks should respect healthy_edge_interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // After the healthy threshold is reached, health state should change while checks should respect\n  // the default interval.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logAddHealthy(_, _, false));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));\n  // Needed after a response is sent.\n  expectStreamCreate(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  // Subsequent checks shouldn't change the state.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n}\n\n// Test connection close between checks affects nothing.\nTEST_F(GrpcHealthCheckerImplTest, RemoteCloseBetweenChecks) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n\n  // Connection closed between checks - nothing happens, just re-create client.\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Test host state haven't changed.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test that we close connections on a healthy check when reuse_connection is false.\nTEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionBetweenChecks) {\n  setupNoReuseConnectionHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n\n  // A new client is created because we close the connection ourselves.\n  // See GrpcHealthCheckerImplTest.RemoteCloseBetweenChecks for how this works when the remote end\n  // closes the connection.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Test host state haven't changed.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test that we close connections when a timeout occurs and reuse_connection is false.\nTEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionTimeout) {\n  setupNoReuseConnectionHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Timeouts are considered network failures and make host unhealthy also after 2nd event.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  expectHostHealthy(true);\n\n  // A new client is created because we close the connection\n  // when a timeout occurs and connection reuse is disabled.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Test host state haven't changed.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test that we close connections when a stream reset occurs and reuse_connection is false.\nTEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionStreamReset) {\n  setupNoReuseConnectionHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Resets are considered network failures and make host unhealthy also after 2nd event.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  test_sessions_[0]->request_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n  expectHostHealthy(true);\n\n  // A new client is created because we close the connection\n  // when a stream reset occurs and connection reuse is disabled.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Test host state haven't changed.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test UNKNOWN health status is considered unhealthy.\nTEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknown) {\n  setupHC();\n  expectSingleHealthcheck(HealthTransition::Changed);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::UNKNOWN);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// Test SERVICE_UNKNOWN health status is considered unhealthy.\nTEST_F(GrpcHealthCheckerImplTest, GrpcFailServiceUnknown) {\n  setupHC();\n  expectSingleHealthcheck(HealthTransition::Changed);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVICE_UNKNOWN);\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// Test non existent health status enum is considered unhealthy.\nTEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknownHealthStatus) {\n  setupHC();\n  expectSingleHealthcheck(HealthTransition::Changed);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n\n  respondServiceStatus(0, static_cast<grpc::health::v1::HealthCheckResponse::ServingStatus>(999));\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// Test receiving GOAWAY (error) is interpreted as connection close event.\nTEST_F(GrpcHealthCheckerImplTest, GoAwayErrorProbeInProgress) {\n  // FailureType::Network will be issued, it will render host unhealthy only if unhealthy_threshold\n  // is reached.\n  setupHCWithUnhealthyThreshold(1);\n  expectSingleHealthcheck(HealthTransition::Changed);\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n\n  // GOAWAY with non-NO_ERROR code will result in a healthcheck failure\n  // and the connection closing.\n  test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::Other);\n\n  EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(\n      Host::HealthFlag::FAILED_ACTIVE_HC));\n  EXPECT_EQ(Host::Health::Unhealthy,\n            cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());\n}\n\n// Test receiving GOAWAY (no error) is handled gracefully while a check is in progress.\nTEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) {\n  setupHCWithUnhealthyThreshold(/*threshold=*/1);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n\n  // GOAWAY with NO_ERROR code during check should be handle gracefully.\n  test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n\n  // GOAWAY should cause a new connection to be created.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Test host state haven't changed.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\n// Test receiving GOAWAY (no error) closes connection after an in progress probe times outs.\nTEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressTimeout) {\n  setupHCWithUnhealthyThreshold(/*threshold=*/1);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Unhealthy threshold is 1 so first timeout causes unhealthy\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  // GOAWAY during check should be handled gracefully.\n  test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n  expectHostHealthy(true);\n\n  test_sessions_[0]->timeout_timer_->invokeCallback();\n  expectHostHealthy(false);\n\n  // GOAWAY should cause a new connection to be created.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Healthy threshold is 2, so the we'ere pending a state change.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(false);\n}\n\n// Test receiving GOAWAY (no error) closes connection after an unexpected stream reset.\nTEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressStreamReset) {\n  setupHCWithUnhealthyThreshold(/*threshold=*/1);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Unhealthy threshold is 1 so first stream reset causes unhealthy\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  // GOAWAY during check should be handled gracefully.\n  test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n  expectHostHealthy(true);\n\n  test_sessions_[0]->request_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset);\n  expectHostHealthy(false);\n\n  // GOAWAY should cause a new connection to be created.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Healthy threshold is 2, so the we'ere pending a state change.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(false);\n}\n\n// Test receiving GOAWAY (no error) closes connection after a bad response.\nTEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressBadResponse) {\n  setupHCWithUnhealthyThreshold(/*threshold=*/1);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Unhealthy threshold is 1 so first bad response causes unhealthy\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  // GOAWAY during check should be handled gracefully.\n  test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n  expectHostHealthy(true);\n\n  respondResponseSpec(0, ResponseSpec{{{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n                                      {ResponseSpec::invalidChunk()},\n                                      {}});\n  expectHostHealthy(false);\n\n  // GOAWAY should cause a new connection to be created.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Healthy threshold is 2, so the we'ere pending a state change.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(false);\n}\n\n// Test receiving GOAWAY (no error) and a connection close.\nTEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressConnectionClose) {\n  setupHCWithUnhealthyThreshold(/*threshold=*/1);\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  // Unhealthy threshold is 1 so first bad response causes unhealthy\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  // GOAWAY during check should be handled gracefully.\n  test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n  expectHostHealthy(true);\n\n  test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n  expectHostHealthy(false);\n\n  // GOAWAY should cause a new connection to be created.\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Healthy threshold is 2, so the we'ere pending a state change.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(false);\n}\n\n// Test receiving GOAWAY between checks affects nothing.\nTEST_F(GrpcHealthCheckerImplTest, GoAwayBetweenChecks) {\n  setupHC();\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectHealthcheckStart(0);\n  health_checker_->start();\n\n  expectHealthcheckStop(0);\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n\n  // GOAWAY between checks should go unnoticed.\n  test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError);\n\n  expectClientCreate(0);\n  expectHealthcheckStart(0);\n  test_sessions_[0]->interval_timer_->invokeCallback();\n\n  expectHealthcheckStop(0);\n  // Test host state haven't changed.\n  EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged));\n  respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING);\n  expectHostHealthy(true);\n}\n\nclass BadResponseGrpcHealthCheckerImplTest\n    : public testing::TestWithParam<GrpcHealthCheckerImplTest::ResponseSpec>,\n      public GrpcHealthCheckerImplTestBase {};\n\nINSTANTIATE_TEST_SUITE_P(\n    BadResponse, BadResponseGrpcHealthCheckerImplTest,\n    testing::ValuesIn(std::vector<GrpcHealthCheckerImplTest::ResponseSpec>{\n        // Non-200 response.\n        {\n            {{\":status\", \"500\"}},\n            {},\n            {},\n        },\n        // Non-200 response with gRPC status.\n        {\n            {{\":status\", \"500\"}, {\"grpc-status\", \"2\"}},\n            {},\n            {},\n        },\n        // Missing content-type.\n        {\n            {{\":status\", \"200\"}},\n            {},\n            {},\n        },\n        // End stream on response headers.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {},\n            {},\n        },\n        // Non-OK gRPC status in headers.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}, {\"grpc-status\", \"2\"}},\n            {},\n            {},\n        },\n        // Non-OK gRPC status\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::servingResponse()},\n            {{\"grpc-status\", \"2\"}},\n        },\n        // Missing body.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}, {\"grpc-status\", \"0\"}},\n            {},\n            {},\n        },\n        // Compressed body.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::invalidPayload(Grpc::GRPC_FH_COMPRESSED,\n                                                                     true)},\n            {},\n        },\n        // Invalid proto message.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::invalidPayload(Grpc::GRPC_FH_DEFAULT, false)},\n            {},\n        },\n        // Duplicate response.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::servingResponse(),\n             GrpcHealthCheckerImplTest::ResponseSpec::servingResponse()},\n            {},\n        },\n        // Invalid response.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::invalidChunk()},\n            {},\n        },\n        // No trailers.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::servingResponse()},\n            {},\n        },\n        // No gRPC status in trailer.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::servingResponse()},\n            {{\"some-header\", \"1\"}},\n        },\n        // Invalid gRPC status.\n        {\n            {{\":status\", \"200\"}, {\"content-type\", \"application/grpc\"}},\n            {GrpcHealthCheckerImplTest::ResponseSpec::servingResponse()},\n            {{\"grpc-status\", \"invalid\"}},\n        },\n    }));\n\n// Test different cases of invalid gRPC response makes host unhealthy.\nTEST_P(BadResponseGrpcHealthCheckerImplTest, GrpcBadResponse) {\n  setupHC();\n  expectSingleHealthcheck(HealthTransition::Changed);\n  EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));\n\n  ResponseSpec spec = GetParam();\n  respondResponseSpec(0, std::move(spec));\n  expectHostHealthy(false);\n}\n\nTEST(Printer, HealthStatePrinter) {\n  std::ostringstream healthy;\n  healthy << HealthState::Healthy;\n  EXPECT_EQ(\"Healthy\", healthy.str());\n\n  std::ostringstream unhealthy;\n  unhealthy << HealthState::Unhealthy;\n  EXPECT_EQ(\"Unhealthy\", unhealthy.str());\n}\n\nTEST(Printer, HealthTransitionPrinter) {\n  std::ostringstream changed;\n  changed << HealthTransition::Changed;\n  EXPECT_EQ(\"Changed\", changed.str());\n\n  std::ostringstream unchanged;\n  unchanged << HealthTransition::Unchanged;\n  EXPECT_EQ(\"Unchanged\", unchanged.str());\n}\n\nTEST(HealthCheckEventLoggerImplTest, All) {\n  AccessLog::MockAccessLogManager log_manager;\n  std::shared_ptr<AccessLog::MockAccessLogFile> file(new AccessLog::MockAccessLogFile());\n  EXPECT_CALL(log_manager, createAccessLog(\"foo\")).WillOnce(Return(file));\n\n  std::shared_ptr<MockHostDescription> host(new NiceMock<MockHostDescription>());\n  NiceMock<MockClusterInfo> cluster;\n  ON_CALL(*host, cluster()).WillByDefault(ReturnRef(cluster));\n\n  Event::SimulatedTimeSystem time_system;\n  // This is rendered as \"2009-02-13T23:31:31.234Z\".a\n  time_system.setSystemTime(std::chrono::milliseconds(1234567891234));\n\n  HealthCheckEventLoggerImpl event_logger(log_manager, time_system, \"foo\");\n\n  EXPECT_CALL(*file, write(absl::string_view{\n                         \"{\\\"health_checker_type\\\":\\\"HTTP\\\",\\\"host\\\":{\\\"socket_address\\\":{\"\n                         \"\\\"protocol\\\":\\\"TCP\\\",\\\"address\\\":\\\"10.0.0.1\\\",\\\"resolver_name\\\":\\\"\\\",\"\n                         \"\\\"ipv4_compat\\\":false,\\\"port_value\\\":443}},\\\"cluster_name\\\":\\\"fake_\"\n                         \"cluster\\\",\\\"eject_unhealthy_event\\\":{\\\"failure_type\\\":\\\"ACTIVE\\\"},\"\n                         \"\\\"timestamp\\\":\\\"2009-02-13T23:31:31.234Z\\\"}\\n\"}));\n  event_logger.logEjectUnhealthy(envoy::data::core::v3::HTTP, host, envoy::data::core::v3::ACTIVE);\n\n  EXPECT_CALL(*file, write(absl::string_view{\n                         \"{\\\"health_checker_type\\\":\\\"HTTP\\\",\\\"host\\\":{\\\"socket_address\\\":{\"\n                         \"\\\"protocol\\\":\\\"TCP\\\",\\\"address\\\":\\\"10.0.0.1\\\",\\\"resolver_name\\\":\\\"\\\",\"\n                         \"\\\"ipv4_compat\\\":false,\\\"port_value\\\":443}},\\\"cluster_name\\\":\\\"fake_\"\n                         \"cluster\\\",\\\"add_healthy_event\\\":{\\\"first_check\\\":false},\\\"timestamp\\\":\"\n                         \"\\\"2009-02-13T23:31:31.234Z\\\"}\\n\"}));\n  event_logger.logAddHealthy(envoy::data::core::v3::HTTP, host, false);\n\n  EXPECT_CALL(*file, write(absl::string_view{\n                         \"{\\\"health_checker_type\\\":\\\"HTTP\\\",\\\"host\\\":{\\\"socket_address\\\":{\"\n                         \"\\\"protocol\\\":\\\"TCP\\\",\\\"address\\\":\\\"10.0.0.1\\\",\\\"resolver_name\\\":\\\"\\\",\"\n                         \"\\\"ipv4_compat\\\":false,\\\"port_value\\\":443}},\\\"cluster_name\\\":\\\"fake_\"\n                         \"cluster\\\",\\\"health_check_failure_event\\\":{\\\"failure_type\\\":\\\"ACTIVE\\\",\"\n                         \"\\\"first_check\\\":false},\"\n                         \"\\\"timestamp\\\":\\\"2009-02-13T23:31:31.234Z\\\"}\\n\"}));\n  event_logger.logUnhealthy(envoy::data::core::v3::HTTP, host, envoy::data::core::v3::ACTIVE,\n                            false);\n\n  EXPECT_CALL(*file, write(absl::string_view{\n                         \"{\\\"health_checker_type\\\":\\\"HTTP\\\",\\\"host\\\":{\\\"socket_address\\\":{\"\n                         \"\\\"protocol\\\":\\\"TCP\\\",\\\"address\\\":\\\"10.0.0.1\\\",\\\"resolver_name\\\":\\\"\\\",\"\n                         \"\\\"ipv4_compat\\\":false,\\\"port_value\\\":443}},\\\"cluster_name\\\":\\\"fake_\"\n                         \"cluster\\\",\\\"degraded_healthy_host\\\":{},\"\n                         \"\\\"timestamp\\\":\\\"2009-02-13T23:31:31.234Z\\\"}\\n\"}));\n  event_logger.logDegraded(envoy::data::core::v3::HTTP, host);\n\n  EXPECT_CALL(*file, write(absl::string_view{\n                         \"{\\\"health_checker_type\\\":\\\"HTTP\\\",\\\"host\\\":{\\\"socket_address\\\":{\"\n                         \"\\\"protocol\\\":\\\"TCP\\\",\\\"address\\\":\\\"10.0.0.1\\\",\\\"resolver_name\\\":\\\"\\\",\"\n                         \"\\\"ipv4_compat\\\":false,\\\"port_value\\\":443}},\\\"cluster_name\\\":\\\"fake_\"\n                         \"cluster\\\",\\\"no_longer_degraded_host\\\":{},\"\n                         \"\\\"timestamp\\\":\\\"2009-02-13T23:31:31.234Z\\\"}\\n\"}));\n  event_logger.logNoLongerDegraded(envoy::data::core::v3::HTTP, host);\n}\n\n// Validate that the proto constraints don't allow zero length edge durations.\nTEST(HealthCheckProto, Validation) {\n  {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    healthy_threshold: 1\n    unhealthy_threshold: 1\n    no_traffic_interval: 0s\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n    envoy::config::core::v3::HealthCheck health_check_proto;\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException,\n                            \"Proto constraint validation failed.*value must be greater than.*\");\n  }\n  {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    healthy_threshold: 1\n    unhealthy_threshold: 1\n    unhealthy_interval: 0s\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n    envoy::config::core::v3::HealthCheck health_check_proto;\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException,\n                            \"Proto constraint validation failed.*value must be greater than.*\");\n  }\n  {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    healthy_threshold: 1\n    unhealthy_threshold: 1\n    unhealthy_edge_interval: 0s\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n    envoy::config::core::v3::HealthCheck health_check_proto;\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException,\n                            \"Proto constraint validation failed.*value must be greater than.*\");\n  }\n  {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    healthy_threshold: 1\n    unhealthy_threshold: 1\n    healthy_edge_interval: 0s\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n    envoy::config::core::v3::HealthCheck health_check_proto;\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException,\n                            \"Proto constraint validation failed.*value must be greater than.*\");\n  }\n  {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    unhealthy_threshold: 1\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n    envoy::config::core::v3::HealthCheck health_check_proto;\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException,\n                            \"Proto constraint validation failed.*value is required.*\");\n  }\n  {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    healthy_threshold: 1\n    http_health_check:\n      service_name_matcher:\n        prefix: locations\n      path: /healthcheck\n    )EOF\";\n    envoy::config::core::v3::HealthCheck health_check_proto;\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException,\n                            \"Proto constraint validation failed.*value is required.*\");\n  }\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/health_checker_impl_test_utils.cc",
    "content": "#include \"test/common/upstream/health_checker_impl_test_utils.h\"\n\n#include \"test/common/upstream/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nvoid HttpHealthCheckerImplTestBase::expectSessionCreate(\n    const HostWithHealthCheckMap& health_check_map) {\n  // Expectations are in LIFO order.\n  TestSessionPtr new_test_session(new TestSession());\n  test_sessions_.emplace_back(std::move(new_test_session));\n  TestSession& test_session = *test_sessions_.back();\n  test_session.timeout_timer_ = new Event::MockTimer(&dispatcher_);\n  test_session.interval_timer_ = new Event::MockTimer(&dispatcher_);\n  expectClientCreate(test_sessions_.size() - 1, health_check_map);\n}\n\nvoid HttpHealthCheckerImplTestBase::expectClientCreate(\n    size_t index, const HostWithHealthCheckMap& health_check_map) {\n  TestSession& test_session = *test_sessions_[index];\n  test_session.codec_ = new NiceMock<Http::MockClientConnection>();\n  ON_CALL(*test_session.codec_, protocol()).WillByDefault(testing::Return(Http::Protocol::Http11));\n  test_session.client_connection_ = new NiceMock<Network::MockClientConnection>();\n  connection_index_.push_back(index);\n  codec_index_.push_back(index);\n\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _))\n      .Times(testing::AnyNumber())\n      .WillRepeatedly(testing::InvokeWithoutArgs([&]() -> Network::ClientConnection* {\n        uint32_t index = connection_index_.front();\n        connection_index_.pop_front();\n        return test_sessions_[index]->client_connection_;\n      }));\n  EXPECT_CALL(*health_checker_, createCodecClient_(_))\n      .WillRepeatedly(\n          Invoke([&](Upstream::Host::CreateConnectionData& conn_data) -> Http::CodecClient* {\n            if (!health_check_map.empty()) {\n              const auto& health_check_config =\n                  health_check_map.at(conn_data.host_description_->address()->asString());\n              // To make sure health checker checks the correct port.\n              EXPECT_EQ(health_check_config.port_value(),\n                        conn_data.host_description_->healthCheckAddress()->ip()->port());\n            }\n            uint32_t index = codec_index_.front();\n            codec_index_.pop_front();\n            TestSession& test_session = *test_sessions_[index];\n            std::shared_ptr<Upstream::MockClusterInfo> cluster{\n                new NiceMock<Upstream::MockClusterInfo>()};\n            Event::MockDispatcher dispatcher_;\n            return new CodecClientForTest(\n                Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_),\n                test_session.codec_, nullptr,\n                Upstream::makeTestHost(cluster, \"tcp://127.0.0.1:9000\"), dispatcher_);\n          }));\n}\n\nvoid HttpHealthCheckerImplTestBase::expectStreamCreate(size_t index) {\n  test_sessions_[index]->request_encoder_.stream_.callbacks_.clear();\n  EXPECT_CALL(*test_sessions_[index]->codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&test_sessions_[index]->stream_response_callbacks_),\n                      ReturnRef(test_sessions_[index]->request_encoder_)));\n}\n\nvoid HttpHealthCheckerImplTestBase::expectSessionCreate() {\n  expectSessionCreate(health_checker_map_);\n}\nvoid HttpHealthCheckerImplTestBase::expectClientCreate(size_t index) {\n  expectClientCreate(index, health_checker_map_);\n}\n\n// This is needed to put expectations in LIFO order. The unit tests use inSequence, which makes\n// expectations FIFO.\nvoid TcpHealthCheckerImplTestBase::expectSessionCreate() {\n  timeout_timer_ = new Event::MockTimer(&dispatcher_);\n  interval_timer_ = new Event::MockTimer(&dispatcher_);\n}\n\nvoid TcpHealthCheckerImplTestBase::expectClientCreate() {\n  connection_ = new NiceMock<Network::MockClientConnection>();\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _))\n      .WillOnce(testing::Return(connection_));\n  EXPECT_CALL(*connection_, addReadFilter(_)).WillOnce(testing::SaveArg<0>(&read_filter_));\n}\n\nGrpcHealthCheckerImplTestBaseUtils::GrpcHealthCheckerImplTestBaseUtils() {\n  EXPECT_CALL(*cluster_->info_, features())\n      .WillRepeatedly(testing::Return(Upstream::ClusterInfo::Features::HTTP2));\n}\n\nvoid GrpcHealthCheckerImplTestBaseUtils::expectSessionCreate() {\n  // Expectations are in LIFO order.\n  TestSessionPtr new_test_session(new TestSession());\n  test_sessions_.emplace_back(std::move(new_test_session));\n  TestSession& test_session = *test_sessions_.back();\n  test_session.timeout_timer_ = new Event::MockTimer(&dispatcher_);\n  test_session.interval_timer_ = new Event::MockTimer(&dispatcher_);\n  expectClientCreate(test_sessions_.size() - 1);\n}\n\nvoid GrpcHealthCheckerImplTestBaseUtils::expectClientCreate(size_t index) {\n  TestSession& test_session = *test_sessions_[index];\n  test_session.codec_ = new NiceMock<Http::MockClientConnection>();\n  test_session.client_connection_ = new NiceMock<Network::MockClientConnection>();\n  connection_index_.push_back(index);\n  codec_index_.push_back(index);\n\n  EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _))\n      .Times(testing::AnyNumber())\n      .WillRepeatedly(testing::InvokeWithoutArgs([&]() -> Network::ClientConnection* {\n        uint32_t index = connection_index_.front();\n        connection_index_.pop_front();\n        return test_sessions_[index]->client_connection_;\n      }));\n\n  EXPECT_CALL(*health_checker_, createCodecClient_(_))\n      .WillRepeatedly(\n          Invoke([&](Upstream::Host::CreateConnectionData& conn_data) -> Http::CodecClient* {\n            uint32_t index = codec_index_.front();\n            codec_index_.pop_front();\n            TestSession& test_session = *test_sessions_[index];\n            std::shared_ptr<Upstream::MockClusterInfo> cluster{\n                new NiceMock<Upstream::MockClusterInfo>()};\n            Event::MockDispatcher dispatcher_;\n\n            test_session.codec_client_ = new CodecClientForTest(\n                Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_),\n                test_session.codec_, nullptr,\n                Upstream::makeTestHost(cluster, \"tcp://127.0.0.1:9000\"), dispatcher_);\n            return test_session.codec_client_;\n          }));\n}\n\nvoid GrpcHealthCheckerImplTestBaseUtils::expectStreamCreate(size_t index) {\n  test_sessions_[index]->request_encoder_.stream_.callbacks_.clear();\n  EXPECT_CALL(*test_sessions_[index]->codec_, newStream(_))\n      .WillOnce(DoAll(SaveArgAddress(&test_sessions_[index]->stream_response_callbacks_),\n                      ReturnRef(test_sessions_[index]->request_encoder_)));\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/health_checker_impl_test_utils.h",
    "content": "#include <vector>\n\n#include \"common/upstream/health_checker_impl.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/health_check_event_logger.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass HealthCheckerTestBase {\npublic:\n  std::shared_ptr<MockClusterMockPrioritySet> cluster_{\n      std::make_shared<NiceMock<MockClusterMockPrioritySet>>()};\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::unique_ptr<MockHealthCheckEventLogger> event_logger_storage_{\n      std::make_unique<MockHealthCheckEventLogger>()};\n  MockHealthCheckEventLogger& event_logger_{*event_logger_storage_};\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Runtime::MockLoader> runtime_;\n};\n\nclass TestHttpHealthCheckerImpl : public HttpHealthCheckerImpl {\npublic:\n  using HttpHealthCheckerImpl::HttpHealthCheckerImpl;\n\n  Http::CodecClient* createCodecClient(Upstream::Host::CreateConnectionData& conn_data) override {\n    return createCodecClient_(conn_data);\n  };\n\n  // HttpHealthCheckerImpl\n  MOCK_METHOD(Http::CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData&));\n\n  Http::CodecClient::Type codecClientType() { return codec_client_type_; }\n};\n\nclass HttpHealthCheckerImplTestBase : public HealthCheckerTestBase {\npublic:\n  struct TestSession {\n    Event::MockTimer* interval_timer_{};\n    Event::MockTimer* timeout_timer_{};\n    Http::MockClientConnection* codec_{};\n    Stats::IsolatedStoreImpl stats_store_;\n    Network::MockClientConnection* client_connection_{};\n    NiceMock<Http::MockRequestEncoder> request_encoder_;\n    Http::ResponseDecoder* stream_response_callbacks_{};\n  };\n\n  using TestSessionPtr = std::unique_ptr<TestSession>;\n  using HostWithHealthCheckMap =\n      absl::node_hash_map<std::string,\n                          const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig>;\n\n  void expectSessionCreate(const HostWithHealthCheckMap& health_check_map);\n\n  void expectClientCreate(size_t index, const HostWithHealthCheckMap& health_check_map);\n\n  void expectStreamCreate(size_t index);\n\n  void expectSessionCreate();\n  void expectClientCreate(size_t index);\n\n  std::vector<TestSessionPtr> test_sessions_;\n  std::shared_ptr<TestHttpHealthCheckerImpl> health_checker_;\n  std::list<uint32_t> connection_index_{};\n  std::list<uint32_t> codec_index_{};\n  const HostWithHealthCheckMap health_checker_map_{};\n};\n\n// TODO(zasweq): This class here isn't currently being used in the unit test class.\n// The class here expects the creates the timeout first, then the interval. This is due\n// to the normal expectation call to be opposite, or LIFO (Last in, First Out). The InSequence\n// object makes the tcp health checker unit tests FIFO (First in, First out). We should standardize\n// this amongst the three unit test classes.\nclass TcpHealthCheckerImplTestBase : public HealthCheckerTestBase {\npublic:\n  void expectSessionCreate();\n  void expectClientCreate();\n\n  std::shared_ptr<TcpHealthCheckerImpl> health_checker_;\n  Network::MockClientConnection* connection_{};\n  Event::MockTimer* timeout_timer_{};\n  Event::MockTimer* interval_timer_{};\n  Network::ReadFilterSharedPtr read_filter_;\n};\n\nclass TestGrpcHealthCheckerImpl : public GrpcHealthCheckerImpl {\npublic:\n  using GrpcHealthCheckerImpl::GrpcHealthCheckerImpl;\n\n  Http::CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& conn_data) override {\n    auto codec_client = createCodecClient_(conn_data);\n    return Http::CodecClientPtr(codec_client);\n  };\n\n  // GrpcHealthCheckerImpl\n  MOCK_METHOD(Http::CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData&));\n};\n\nclass GrpcHealthCheckerImplTestBaseUtils : public HealthCheckerTestBase {\npublic:\n  struct TestSession {\n    TestSession() = default;\n\n    Event::MockTimer* interval_timer_{};\n    Event::MockTimer* timeout_timer_{};\n    Http::MockClientConnection* codec_{};\n    Stats::IsolatedStoreImpl stats_store_;\n    Network::MockClientConnection* client_connection_{};\n    NiceMock<Http::MockRequestEncoder> request_encoder_;\n    Http::ResponseDecoder* stream_response_callbacks_{};\n    CodecClientForTest* codec_client_{};\n  };\n\n  using TestSessionPtr = std::unique_ptr<TestSession>;\n\n  GrpcHealthCheckerImplTestBaseUtils();\n\n  void expectSessionCreate();\n  void expectClientCreate(size_t index);\n  void expectStreamCreate(size_t index);\n\n  std::vector<TestSessionPtr> test_sessions_;\n  std::shared_ptr<TestGrpcHealthCheckerImpl> health_checker_;\n  std::list<uint32_t> connection_index_{};\n  std::list<uint32_t> codec_index_{};\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/host_stats_test.cc",
    "content": "#include \"envoy/upstream/host_description.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\n// Verify that counters are sorted by name.\nTEST(HostStatsTest, CountersSortedByName) {\n  HostStats host_stats;\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveCounterReference>> counters =\n      host_stats.counters();\n  EXPECT_FALSE(counters.empty());\n\n  for (size_t i = 1; i < counters.size(); ++i) {\n    EXPECT_LT(counters[i - 1].first, counters[i].first);\n  }\n}\n\n// Verify that gauges are sorted by name.\nTEST(HostStatsTest, GaugesSortedByName) {\n  HostStats host_stats;\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>> gauges =\n      host_stats.gauges();\n  EXPECT_FALSE(gauges.empty());\n\n  for (size_t i = 1; i < gauges.size(); ++i) {\n    EXPECT_LT(gauges[i - 1].first, gauges[i].first);\n  }\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/host_utility_test.cc",
    "content": "#include \"common/network/utility.h\"\n#include \"common/upstream/host_utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nTEST(HostUtilityTest, All) {\n  auto cluster = std::make_shared<NiceMock<MockClusterInfo>>();\n  HostSharedPtr host = makeTestHost(cluster, \"tcp://127.0.0.1:80\");\n  EXPECT_EQ(\"healthy\", HostUtility::healthFlagsToString(*host));\n\n  host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  EXPECT_EQ(\"/failed_active_hc\", HostUtility::healthFlagsToString(*host));\n\n  host->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  EXPECT_EQ(\"/failed_active_hc/failed_outlier_check\", HostUtility::healthFlagsToString(*host));\n\n  host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  EXPECT_EQ(\"/failed_outlier_check\", HostUtility::healthFlagsToString(*host));\n\n  host->healthFlagSet(Host::HealthFlag::FAILED_EDS_HEALTH);\n  EXPECT_EQ(\"/failed_outlier_check/failed_eds_health\", HostUtility::healthFlagsToString(*host));\n\n  host->healthFlagClear(Host::HealthFlag::FAILED_EDS_HEALTH);\n  EXPECT_EQ(\"/failed_outlier_check\", HostUtility::healthFlagsToString(*host));\n\n  // Invokes healthFlagSet for each health flag.\n#define SET_HEALTH_FLAG(name, notused) host->healthFlagSet(Host::HealthFlag::name);\n  HEALTH_FLAG_ENUM_VALUES(SET_HEALTH_FLAG)\n#undef SET_HEALTH_FLAG\n  EXPECT_EQ(\"/failed_active_hc/failed_outlier_check/failed_eds_health/degraded_active_hc/\"\n            \"degraded_eds_health/pending_dynamic_removal/pending_active_hc\",\n            HostUtility::healthFlagsToString(*host));\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/load_balancer_benchmark.cc",
    "content": "// Usage: bazel run //test/common/upstream:load_balancer_benchmark\n\n#include <memory>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/common/random_generator.h\"\n#include \"common/memory/stats.h\"\n#include \"common/upstream/maglev_lb.h\"\n#include \"common/upstream/ring_hash_lb.h\"\n#include \"common/upstream/subset_lb.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/benchmark/main.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass BaseTester {\npublic:\n  static constexpr absl::string_view metadata_key = \"key\";\n  // We weight the first weighted_subset_percent of hosts with weight.\n  BaseTester(uint64_t num_hosts, uint32_t weighted_subset_percent = 0, uint32_t weight = 0,\n             bool attach_metadata = false) {\n    HostVector hosts;\n    ASSERT(num_hosts < 65536);\n    for (uint64_t i = 0; i < num_hosts; i++) {\n      const bool should_weight = i < num_hosts * (weighted_subset_percent / 100.0);\n      const std::string url = fmt::format(\"tcp://10.0.{}.{}:6379\", i / 256, i % 256);\n      const auto effective_weight = should_weight ? weight : 1;\n      if (attach_metadata) {\n        envoy::config::core::v3::Metadata metadata;\n        ProtobufWkt::Value value;\n        value.set_number_value(i);\n        ProtobufWkt::Struct& map =\n            (*metadata.mutable_filter_metadata())[Config::MetadataFilters::get().ENVOY_LB];\n        (*map.mutable_fields())[std::string(metadata_key)] = value;\n\n        hosts.push_back(makeTestHost(info_, url, metadata, effective_weight));\n      } else {\n        hosts.push_back(makeTestHost(info_, url, effective_weight));\n      }\n    }\n\n    HostVectorConstSharedPtr updated_hosts = std::make_shared<HostVector>(hosts);\n    HostsPerLocalityConstSharedPtr hosts_per_locality = makeHostsPerLocality({hosts});\n    priority_set_.updateHosts(0, HostSetImpl::partitionHosts(updated_hosts, hosts_per_locality), {},\n                              hosts, {}, absl::nullopt);\n    local_priority_set_.updateHosts(0,\n                                    HostSetImpl::partitionHosts(updated_hosts, hosts_per_locality),\n                                    {}, hosts, {}, absl::nullopt);\n  }\n\n  Envoy::Thread::MutexBasicLockable lock_;\n  // Reduce default log level to warn while running this benchmark to avoid problems due to\n  // excessive debug logging in upstream_impl.cc\n  Envoy::Logger::Context logging_context_{spdlog::level::warn,\n                                          Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock_, false};\n\n  PrioritySetImpl priority_set_;\n  PrioritySetImpl local_priority_set_;\n  Stats::IsolatedStoreImpl stats_store_;\n  ClusterStats stats_{ClusterInfoImpl::generateStats(stats_store_)};\n  NiceMock<Runtime::MockLoader> runtime_;\n  Random::RandomGeneratorImpl random_;\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n};\n\nclass RoundRobinTester : public BaseTester {\npublic:\n  RoundRobinTester(uint64_t num_hosts, uint32_t weighted_subset_percent = 0, uint32_t weight = 0)\n      : BaseTester(num_hosts, weighted_subset_percent, weight) {}\n\n  void initialize() {\n    lb_ = std::make_unique<RoundRobinLoadBalancer>(priority_set_, &local_priority_set_, stats_,\n                                                   runtime_, random_, common_config_);\n  }\n\n  std::unique_ptr<RoundRobinLoadBalancer> lb_;\n};\n\nclass LeastRequestTester : public BaseTester {\npublic:\n  LeastRequestTester(uint64_t num_hosts, uint32_t choice_count) : BaseTester(num_hosts) {\n    envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config;\n    lr_lb_config.mutable_choice_count()->set_value(choice_count);\n    lb_ =\n        std::make_unique<LeastRequestLoadBalancer>(priority_set_, &local_priority_set_, stats_,\n                                                   runtime_, random_, common_config_, lr_lb_config);\n  }\n\n  std::unique_ptr<LeastRequestLoadBalancer> lb_;\n};\n\nvoid benchmarkRoundRobinLoadBalancerBuild(::benchmark::State& state) {\n  const uint64_t num_hosts = state.range(0);\n  const uint64_t weighted_subset_percent = state.range(1);\n  const uint64_t weight = state.range(2);\n\n  if (benchmark::skipExpensiveBenchmarks() && num_hosts > 10000) {\n    state.SkipWithError(\"Skipping expensive benchmark\");\n    return;\n  }\n\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    state.PauseTiming();\n    const size_t start_tester_mem = Memory::Stats::totalCurrentlyAllocated();\n    RoundRobinTester tester(num_hosts, weighted_subset_percent, weight);\n    const size_t end_tester_mem = Memory::Stats::totalCurrentlyAllocated();\n    const size_t start_mem = Memory::Stats::totalCurrentlyAllocated();\n\n    // We are only interested in timing the initial build.\n    state.ResumeTiming();\n    tester.initialize();\n    state.PauseTiming();\n    const size_t end_mem = Memory::Stats::totalCurrentlyAllocated();\n    state.counters[\"tester_memory\"] = end_tester_mem - start_tester_mem;\n    state.counters[\"memory\"] = end_mem - start_mem;\n    state.counters[\"memory_per_host\"] = (end_mem - start_mem) / num_hosts;\n    state.ResumeTiming();\n  }\n}\nBENCHMARK(benchmarkRoundRobinLoadBalancerBuild)\n    ->Args({1, 0, 1})\n    ->Args({500, 0, 1})\n    ->Args({500, 50, 50})\n    ->Args({500, 100, 50})\n    ->Args({2500, 0, 1})\n    ->Args({2500, 50, 50})\n    ->Args({2500, 100, 50})\n    ->Args({10000, 0, 1})\n    ->Args({10000, 50, 50})\n    ->Args({10000, 100, 50})\n    ->Args({25000, 0, 1})\n    ->Args({25000, 50, 50})\n    ->Args({25000, 100, 50})\n    ->Args({50000, 0, 1})\n    ->Args({50000, 50, 50})\n    ->Args({50000, 100, 50})\n    ->Unit(::benchmark::kMillisecond);\n\nclass RingHashTester : public BaseTester {\npublic:\n  RingHashTester(uint64_t num_hosts, uint64_t min_ring_size) : BaseTester(num_hosts) {\n    config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n    config_.value().mutable_minimum_ring_size()->set_value(min_ring_size);\n    ring_hash_lb_ = std::make_unique<RingHashLoadBalancer>(\n        priority_set_, stats_, stats_store_, runtime_, random_, config_, common_config_);\n  }\n\n  absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig> config_;\n  std::unique_ptr<RingHashLoadBalancer> ring_hash_lb_;\n};\n\nclass MaglevTester : public BaseTester {\npublic:\n  MaglevTester(uint64_t num_hosts, uint32_t weighted_subset_percent = 0, uint32_t weight = 0)\n      : BaseTester(num_hosts, weighted_subset_percent, weight) {\n    maglev_lb_ = std::make_unique<MaglevLoadBalancer>(priority_set_, stats_, stats_store_, runtime_,\n                                                      random_, config_, common_config_);\n  }\n\n  absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig> config_;\n  std::unique_ptr<MaglevLoadBalancer> maglev_lb_;\n};\n\nuint64_t hashInt(uint64_t i) {\n  // Hack to hash an integer.\n  return HashUtil::xxHash64(absl::string_view(reinterpret_cast<const char*>(&i), sizeof(i)));\n}\n\nvoid benchmarkRingHashLoadBalancerBuildRing(::benchmark::State& state) {\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    state.PauseTiming();\n    const uint64_t num_hosts = state.range(0);\n    const uint64_t min_ring_size = state.range(1);\n    RingHashTester tester(num_hosts, min_ring_size);\n\n    const size_t start_mem = Memory::Stats::totalCurrentlyAllocated();\n\n    // We are only interested in timing the initial ring build.\n    state.ResumeTiming();\n    tester.ring_hash_lb_->initialize();\n    state.PauseTiming();\n    const size_t end_mem = Memory::Stats::totalCurrentlyAllocated();\n    state.counters[\"memory\"] = end_mem - start_mem;\n    state.counters[\"memory_per_host\"] = (end_mem - start_mem) / num_hosts;\n    state.ResumeTiming();\n  }\n}\nBENCHMARK(benchmarkRingHashLoadBalancerBuildRing)\n    ->Args({100, 65536})\n    ->Args({200, 65536})\n    ->Args({500, 65536})\n    ->Args({100, 256000})\n    ->Args({200, 256000})\n    ->Args({500, 256000})\n    ->Unit(::benchmark::kMillisecond);\n\nvoid benchmarkMaglevLoadBalancerBuildTable(::benchmark::State& state) {\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    state.PauseTiming();\n    const uint64_t num_hosts = state.range(0);\n    MaglevTester tester(num_hosts);\n\n    const size_t start_mem = Memory::Stats::totalCurrentlyAllocated();\n\n    // We are only interested in timing the initial table build.\n    state.ResumeTiming();\n    tester.maglev_lb_->initialize();\n    state.PauseTiming();\n    const size_t end_mem = Memory::Stats::totalCurrentlyAllocated();\n    state.counters[\"memory\"] = end_mem - start_mem;\n    state.counters[\"memory_per_host\"] = (end_mem - start_mem) / num_hosts;\n    state.ResumeTiming();\n  }\n}\nBENCHMARK(benchmarkMaglevLoadBalancerBuildTable)\n    ->Arg(100)\n    ->Arg(200)\n    ->Arg(500)\n    ->Unit(::benchmark::kMillisecond);\n\nclass TestLoadBalancerContext : public LoadBalancerContextBase {\npublic:\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override { return hash_key_; }\n\n  absl::optional<uint64_t> hash_key_;\n};\n\nvoid computeHitStats(::benchmark::State& state,\n                     const absl::node_hash_map<std::string, uint64_t>& hit_counter) {\n  double mean = 0;\n  for (const auto& pair : hit_counter) {\n    mean += pair.second;\n  }\n  mean /= hit_counter.size();\n\n  double variance = 0;\n  for (const auto& pair : hit_counter) {\n    variance += std::pow(pair.second - mean, 2);\n  }\n  variance /= hit_counter.size();\n  const double stddev = std::sqrt(variance);\n\n  state.counters[\"mean_hits\"] = mean;\n  state.counters[\"stddev_hits\"] = stddev;\n  state.counters[\"relative_stddev_hits\"] = (stddev / mean);\n}\n\nvoid benchmarkLeastRequestLoadBalancerChooseHost(::benchmark::State& state) {\n  const uint64_t num_hosts = state.range(0);\n  const uint64_t choice_count = state.range(1);\n  const uint64_t keys_to_simulate = state.range(2);\n\n  if (benchmark::skipExpensiveBenchmarks() && keys_to_simulate > 1000) {\n    state.SkipWithError(\"Skipping expensive benchmark\");\n    return;\n  }\n\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    state.PauseTiming();\n    LeastRequestTester tester(num_hosts, choice_count);\n    absl::node_hash_map<std::string, uint64_t> hit_counter;\n    TestLoadBalancerContext context;\n    state.ResumeTiming();\n\n    for (uint64_t i = 0; i < keys_to_simulate; ++i) {\n      hit_counter[tester.lb_->chooseHost(&context)->address()->asString()] += 1;\n    }\n\n    // Do not time computation of mean, standard deviation, and relative standard deviation.\n    state.PauseTiming();\n    computeHitStats(state, hit_counter);\n    state.ResumeTiming();\n  }\n}\nBENCHMARK(benchmarkLeastRequestLoadBalancerChooseHost)\n    ->Args({100, 1, 1000})\n    ->Args({100, 2, 1000})\n    ->Args({100, 3, 1000})\n    ->Args({100, 10, 1000})\n    ->Args({100, 50, 1000})\n    ->Args({100, 100, 1000})\n    ->Args({100, 1, 1000000})\n    ->Args({100, 2, 1000000})\n    ->Args({100, 3, 1000000})\n    ->Args({100, 10, 1000000})\n    ->Args({100, 50, 1000000})\n    ->Args({100, 100, 1000000})\n    ->Unit(::benchmark::kMillisecond);\n\nvoid benchmarkRingHashLoadBalancerChooseHost(::benchmark::State& state) {\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    // Do not time the creation of the ring.\n    state.PauseTiming();\n    const uint64_t num_hosts = state.range(0);\n    const uint64_t min_ring_size = state.range(1);\n    const uint64_t keys_to_simulate = state.range(2);\n    RingHashTester tester(num_hosts, min_ring_size);\n    tester.ring_hash_lb_->initialize();\n    LoadBalancerPtr lb = tester.ring_hash_lb_->factory()->create();\n    absl::node_hash_map<std::string, uint64_t> hit_counter;\n    TestLoadBalancerContext context;\n    state.ResumeTiming();\n\n    // Note: To a certain extent this is benchmarking the performance of xxhash as well as\n    // absl::node_hash_map. However, it should be roughly equivalent to the work done when\n    // comparing different hashing algorithms.\n    // TODO(mattklein123): When Maglev is a real load balancer, further share code with the\n    //                     other test.\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hit_counter[lb->chooseHost(&context)->address()->asString()] += 1;\n    }\n\n    // Do not time computation of mean, standard deviation, and relative standard deviation.\n    state.PauseTiming();\n    computeHitStats(state, hit_counter);\n    state.ResumeTiming();\n  }\n}\nBENCHMARK(benchmarkRingHashLoadBalancerChooseHost)\n    ->Args({100, 65536, 100000})\n    ->Args({200, 65536, 100000})\n    ->Args({500, 65536, 100000})\n    ->Args({100, 256000, 100000})\n    ->Args({200, 256000, 100000})\n    ->Args({500, 256000, 100000})\n    ->Unit(::benchmark::kMillisecond);\n\nvoid benchmarkMaglevLoadBalancerChooseHost(::benchmark::State& state) {\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    // Do not time the creation of the table.\n    state.PauseTiming();\n    const uint64_t num_hosts = state.range(0);\n    const uint64_t keys_to_simulate = state.range(1);\n    MaglevTester tester(num_hosts);\n    tester.maglev_lb_->initialize();\n    LoadBalancerPtr lb = tester.maglev_lb_->factory()->create();\n    absl::node_hash_map<std::string, uint64_t> hit_counter;\n    TestLoadBalancerContext context;\n    state.ResumeTiming();\n\n    // Note: To a certain extent this is benchmarking the performance of xxhash as well as\n    // absl::node_hash_map. However, it should be roughly equivalent to the work done when\n    // comparing different hashing algorithms.\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hit_counter[lb->chooseHost(&context)->address()->asString()] += 1;\n    }\n\n    // Do not time computation of mean, standard deviation, and relative standard deviation.\n    state.PauseTiming();\n    computeHitStats(state, hit_counter);\n    state.ResumeTiming();\n  }\n}\nBENCHMARK(benchmarkMaglevLoadBalancerChooseHost)\n    ->Args({100, 100000})\n    ->Args({200, 100000})\n    ->Args({500, 100000})\n    ->Unit(::benchmark::kMillisecond);\n\nvoid benchmarkRingHashLoadBalancerHostLoss(::benchmark::State& state) {\n  const uint64_t num_hosts = state.range(0);\n  const uint64_t min_ring_size = state.range(1);\n  const uint64_t hosts_to_lose = state.range(2);\n  const uint64_t keys_to_simulate = state.range(3);\n\n  if (benchmark::skipExpensiveBenchmarks() && min_ring_size > 65536) {\n    state.SkipWithError(\"Skipping expensive benchmark\");\n    return;\n  }\n\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    RingHashTester tester(num_hosts, min_ring_size);\n    tester.ring_hash_lb_->initialize();\n    LoadBalancerPtr lb = tester.ring_hash_lb_->factory()->create();\n    std::vector<HostConstSharedPtr> hosts;\n    TestLoadBalancerContext context;\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hosts.push_back(lb->chooseHost(&context));\n    }\n\n    RingHashTester tester2(num_hosts - hosts_to_lose, min_ring_size);\n    tester2.ring_hash_lb_->initialize();\n    lb = tester2.ring_hash_lb_->factory()->create();\n    std::vector<HostConstSharedPtr> hosts2;\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hosts2.push_back(lb->chooseHost(&context));\n    }\n\n    ASSERT(hosts.size() == hosts2.size());\n    uint64_t num_different_hosts = 0;\n    for (uint64_t i = 0; i < hosts.size(); i++) {\n      if (hosts[i]->address()->asString() != hosts2[i]->address()->asString()) {\n        num_different_hosts++;\n      }\n    }\n\n    state.counters[\"percent_different\"] =\n        (static_cast<double>(num_different_hosts) / hosts.size()) * 100;\n    state.counters[\"host_loss_over_N_optimal\"] =\n        (static_cast<double>(hosts_to_lose) / num_hosts) * 100;\n  }\n}\nBENCHMARK(benchmarkRingHashLoadBalancerHostLoss)\n    ->Args({500, 65536, 1, 10000})\n    ->Args({500, 65536, 2, 10000})\n    ->Args({500, 65536, 3, 10000})\n    ->Args({500, 256000, 1, 10000})\n    ->Args({500, 256000, 2, 10000})\n    ->Args({500, 256000, 3, 10000})\n    ->Unit(::benchmark::kMillisecond);\n\nvoid benchmarkMaglevLoadBalancerHostLoss(::benchmark::State& state) {\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    const uint64_t num_hosts = state.range(0);\n    const uint64_t hosts_to_lose = state.range(1);\n    const uint64_t keys_to_simulate = state.range(2);\n\n    MaglevTester tester(num_hosts);\n    tester.maglev_lb_->initialize();\n    LoadBalancerPtr lb = tester.maglev_lb_->factory()->create();\n    std::vector<HostConstSharedPtr> hosts;\n    TestLoadBalancerContext context;\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hosts.push_back(lb->chooseHost(&context));\n    }\n\n    MaglevTester tester2(num_hosts - hosts_to_lose);\n    tester2.maglev_lb_->initialize();\n    lb = tester2.maglev_lb_->factory()->create();\n    std::vector<HostConstSharedPtr> hosts2;\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hosts2.push_back(lb->chooseHost(&context));\n    }\n\n    ASSERT(hosts.size() == hosts2.size());\n    uint64_t num_different_hosts = 0;\n    for (uint64_t i = 0; i < hosts.size(); i++) {\n      if (hosts[i]->address()->asString() != hosts2[i]->address()->asString()) {\n        num_different_hosts++;\n      }\n    }\n\n    state.counters[\"percent_different\"] =\n        (static_cast<double>(num_different_hosts) / hosts.size()) * 100;\n    state.counters[\"host_loss_over_N_optimal\"] =\n        (static_cast<double>(hosts_to_lose) / num_hosts) * 100;\n  }\n}\nBENCHMARK(benchmarkMaglevLoadBalancerHostLoss)\n    ->Args({500, 1, 10000})\n    ->Args({500, 2, 10000})\n    ->Args({500, 3, 10000})\n    ->Unit(::benchmark::kMillisecond);\n\nvoid benchmarkMaglevLoadBalancerWeighted(::benchmark::State& state) {\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    const uint64_t num_hosts = state.range(0);\n    const uint64_t weighted_subset_percent = state.range(1);\n    const uint64_t before_weight = state.range(2);\n    const uint64_t after_weight = state.range(3);\n    const uint64_t keys_to_simulate = state.range(4);\n\n    MaglevTester tester(num_hosts, weighted_subset_percent, before_weight);\n    tester.maglev_lb_->initialize();\n    LoadBalancerPtr lb = tester.maglev_lb_->factory()->create();\n    std::vector<HostConstSharedPtr> hosts;\n    TestLoadBalancerContext context;\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hosts.push_back(lb->chooseHost(&context));\n    }\n\n    MaglevTester tester2(num_hosts, weighted_subset_percent, after_weight);\n    tester2.maglev_lb_->initialize();\n    lb = tester2.maglev_lb_->factory()->create();\n    std::vector<HostConstSharedPtr> hosts2;\n    for (uint64_t i = 0; i < keys_to_simulate; i++) {\n      context.hash_key_ = hashInt(i);\n      hosts2.push_back(lb->chooseHost(&context));\n    }\n\n    ASSERT(hosts.size() == hosts2.size());\n    uint64_t num_different_hosts = 0;\n    for (uint64_t i = 0; i < hosts.size(); i++) {\n      if (hosts[i]->address()->asString() != hosts2[i]->address()->asString()) {\n        num_different_hosts++;\n      }\n    }\n\n    state.counters[\"percent_different\"] =\n        (static_cast<double>(num_different_hosts) / hosts.size()) * 100;\n    const auto weighted_hosts_percent = [weighted_subset_percent](uint32_t weight) -> double {\n      const double weighted_hosts = weighted_subset_percent;\n      const double unweighted_hosts = 100.0 - weighted_hosts;\n      const double total_weight = weighted_hosts * weight + unweighted_hosts;\n      return 100.0 * (weighted_hosts * weight) / total_weight;\n    };\n    state.counters[\"optimal_percent_different\"] =\n        std::abs(weighted_hosts_percent(before_weight) - weighted_hosts_percent(after_weight));\n  }\n}\nBENCHMARK(benchmarkMaglevLoadBalancerWeighted)\n    ->Args({500, 5, 1, 1, 10000})\n    ->Args({500, 5, 1, 127, 1000})\n    ->Args({500, 5, 127, 1, 10000})\n    ->Args({500, 50, 1, 127, 1000})\n    ->Args({500, 50, 127, 1, 10000})\n    ->Args({500, 95, 1, 127, 1000})\n    ->Args({500, 95, 127, 1, 10000})\n    ->Args({500, 95, 25, 75, 1000})\n    ->Args({500, 95, 75, 25, 10000})\n    ->Unit(::benchmark::kMillisecond);\n\nclass SubsetLbTester : public BaseTester {\npublic:\n  SubsetLbTester(uint64_t num_hosts, bool single_host_per_subset)\n      : BaseTester(num_hosts, 0, 0, true /* attach metadata */) {\n    envoy::config::cluster::v3::Cluster::LbSubsetConfig subset_config;\n    subset_config.set_fallback_policy(\n        envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT);\n    auto* selector = subset_config.mutable_subset_selectors()->Add();\n    selector->set_single_host_per_subset(single_host_per_subset);\n    *selector->mutable_keys()->Add() = metadata_key;\n\n    subset_info_ = std::make_unique<LoadBalancerSubsetInfoImpl>(subset_config);\n    lb_ = std::make_unique<SubsetLoadBalancer>(LoadBalancerType::Random, priority_set_,\n                                               &local_priority_set_, stats_, stats_store_, runtime_,\n                                               random_, *subset_info_, absl::nullopt, absl::nullopt,\n                                               absl::nullopt, common_config_);\n\n    const HostVector& hosts = priority_set_.getOrCreateHostSet(0).hosts();\n    ASSERT(hosts.size() == num_hosts);\n    orig_hosts_ = std::make_shared<HostVector>(hosts);\n    smaller_hosts_ = std::make_shared<HostVector>(hosts.begin() + 1, hosts.end());\n    ASSERT(smaller_hosts_->size() + 1 == orig_hosts_->size());\n    orig_locality_hosts_ = makeHostsPerLocality({*orig_hosts_});\n    smaller_locality_hosts_ = makeHostsPerLocality({*smaller_hosts_});\n  }\n\n  // Remove a host and add it back.\n  void update() {\n    priority_set_.updateHosts(0,\n                              HostSetImpl::partitionHosts(smaller_hosts_, smaller_locality_hosts_),\n                              nullptr, {}, host_moved_, absl::nullopt);\n    priority_set_.updateHosts(0, HostSetImpl::partitionHosts(orig_hosts_, orig_locality_hosts_),\n                              nullptr, host_moved_, {}, absl::nullopt);\n  }\n\n  std::unique_ptr<LoadBalancerSubsetInfoImpl> subset_info_;\n  std::unique_ptr<SubsetLoadBalancer> lb_;\n  HostVectorConstSharedPtr orig_hosts_;\n  HostVectorConstSharedPtr smaller_hosts_;\n  HostsPerLocalitySharedPtr orig_locality_hosts_;\n  HostsPerLocalitySharedPtr smaller_locality_hosts_;\n  HostVector host_moved_;\n};\n\nvoid benchmarkSubsetLoadBalancerCreate(::benchmark::State& state) {\n  const bool single_host_per_subset = state.range(0);\n  const uint64_t num_hosts = state.range(1);\n\n  if (benchmark::skipExpensiveBenchmarks() && num_hosts > 100) {\n    state.SkipWithError(\"Skipping expensive benchmark\");\n    return;\n  }\n\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    SubsetLbTester tester(num_hosts, single_host_per_subset);\n  }\n}\n\nBENCHMARK(benchmarkSubsetLoadBalancerCreate)\n    ->Ranges({{false, true}, {50, 2500}})\n    ->Unit(::benchmark::kMillisecond);\n\nvoid benchmarkSubsetLoadBalancerUpdate(::benchmark::State& state) {\n  const bool single_host_per_subset = state.range(0);\n  const uint64_t num_hosts = state.range(1);\n  if (benchmark::skipExpensiveBenchmarks() && num_hosts > 100) {\n    state.SkipWithError(\"Skipping expensive benchmark\");\n    return;\n  }\n\n  SubsetLbTester tester(num_hosts, single_host_per_subset);\n  for (auto _ : state) { // NOLINT: Silences warning about dead store\n    tester.update();\n  }\n}\n\nBENCHMARK(benchmarkSubsetLoadBalancerUpdate)\n    ->Ranges({{false, true}, {50, 2500}})\n    ->Unit(::benchmark::kMillisecond);\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/load_balancer_impl_test.cc",
    "content": "#include <memory>\n#include <set>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/load_balancer_context.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::ElementsAre;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass LoadBalancerTestBase : public testing::TestWithParam<bool> {\nprotected:\n  // Run all tests against both priority 0 and priority 1 host sets, to ensure\n  // all the load balancers have equivalent functionality for failover host sets.\n  MockHostSet& hostSet() { return GetParam() ? host_set_ : failover_host_set_; }\n\n  LoadBalancerTestBase() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {\n    least_request_lb_config_.mutable_choice_count()->set_value(2);\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  ClusterStats stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<MockPrioritySet> priority_set_;\n  MockHostSet& host_set_ = *priority_set_.getMockHostSet(0);\n  MockHostSet& failover_host_set_ = *priority_set_.getMockHostSet(1);\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_;\n};\n\nclass TestLb : public LoadBalancerBase {\npublic:\n  TestLb(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime,\n         Random::RandomGenerator& random,\n         const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)\n      : LoadBalancerBase(priority_set, stats, runtime, random, common_config) {}\n  using LoadBalancerBase::chooseHostSet;\n  using LoadBalancerBase::isInPanic;\n  using LoadBalancerBase::percentageDegradedLoad;\n  using LoadBalancerBase::percentageLoad;\n\n  HostConstSharedPtr chooseHostOnce(LoadBalancerContext*) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n};\n\nclass LoadBalancerBaseTest : public LoadBalancerTestBase {\npublic:\n  void updateHostSet(MockHostSet& host_set, uint32_t num_hosts, uint32_t num_healthy_hosts,\n                     uint32_t num_degraded_hosts = 0, uint32_t num_excluded_hosts = 0) {\n    ASSERT(num_healthy_hosts + num_degraded_hosts + num_excluded_hosts <= num_hosts);\n\n    host_set.hosts_.clear();\n    host_set.healthy_hosts_.clear();\n    host_set.degraded_hosts_.clear();\n    host_set.excluded_hosts_.clear();\n    for (uint32_t i = 0; i < num_hosts; ++i) {\n      host_set.hosts_.push_back(makeTestHost(info_, \"tcp://127.0.0.1:80\"));\n    }\n    uint32_t i = 0;\n    for (; i < num_healthy_hosts; ++i) {\n      host_set.healthy_hosts_.push_back(host_set.hosts_[i]);\n    }\n    for (; i < (num_healthy_hosts + num_degraded_hosts); ++i) {\n      host_set.degraded_hosts_.push_back(host_set.hosts_[i]);\n    }\n\n    for (; i < (num_healthy_hosts + num_degraded_hosts + num_excluded_hosts); ++i) {\n      host_set.excluded_hosts_.push_back(host_set.hosts_[i]);\n    }\n    host_set.runCallbacks({}, {});\n  }\n\n  template <typename T, typename FUNC>\n  std::vector<T> aggregatePrioritySetsValues(TestLb& lb, FUNC func) {\n    std::vector<T> ret;\n\n    for (size_t i = 0; i < priority_set_.host_sets_.size(); ++i) {\n      ret.push_back((lb.*func)(i));\n    }\n\n    return ret;\n  }\n\n  std::vector<uint32_t> getLoadPercentage() {\n    return aggregatePrioritySetsValues<uint32_t>(lb_, &TestLb::percentageLoad);\n  }\n\n  std::vector<uint32_t> getDegradedLoadPercentage() {\n    return aggregatePrioritySetsValues<uint32_t>(lb_, &TestLb::percentageDegradedLoad);\n  }\n\n  std::vector<bool> getPanic() {\n    return aggregatePrioritySetsValues<bool>(lb_, &TestLb::isInPanic);\n  }\n\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  TestLb lb_{priority_set_, stats_, runtime_, random_, common_config_};\n};\n\nINSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, LoadBalancerBaseTest, ::testing::Values(true));\n\n// Basic test of host set selection.\nTEST_P(LoadBalancerBaseTest, PrioritySelection) {\n  NiceMock<Upstream::MockLoadBalancerContext> context;\n  updateHostSet(host_set_, 1 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 1, 0);\n\n  HealthyAndDegradedLoad priority_load{Upstream::HealthyLoad({100, 0, 0}),\n                                       Upstream::DegradedLoad({0, 0, 0})};\n  EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load));\n  // Primary and failover are in panic mode. Load distribution is based\n  // on the number of hosts regardless of their health.\n  EXPECT_EQ(50, lb_.percentageLoad(0));\n  EXPECT_EQ(50, lb_.percentageLoad(1));\n  EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context, 0).first);\n\n  // Modify number of hosts in failover, but leave them in the unhealthy state\n  // primary and secondary are in panic mode, so load distribution is\n  // based on number of host regardless of their health.\n  updateHostSet(failover_host_set_, 2, 0);\n  EXPECT_EQ(34, lb_.percentageLoad(0));\n  EXPECT_EQ(66, lb_.percentageLoad(1));\n  EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context, 0).first);\n\n  // Update the priority set with a new priority level P=2 and ensure the host\n  // is chosen\n  MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2);\n  updateHostSet(tertiary_host_set_, 1 /* num_hosts */, 1 /* num_healthy_hosts */);\n  EXPECT_EQ(0, lb_.percentageLoad(0));\n  EXPECT_EQ(0, lb_.percentageLoad(1));\n  EXPECT_EQ(100, lb_.percentageLoad(2));\n  priority_load.healthy_priority_load_ = HealthyLoad({0u, 0u, 100});\n  EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet(&context, 0).first);\n\n  // Now add a healthy host in P=0 and make sure it is immediately selected.\n  updateHostSet(host_set_, 1 /* num_hosts */, 1 /* num_healthy_hosts */);\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(100, lb_.percentageLoad(0));\n  EXPECT_EQ(0, lb_.percentageLoad(2));\n  priority_load.healthy_priority_load_ = HealthyLoad({100u, 0u, 0u});\n  EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context, 0).first);\n\n  // Remove the healthy host and ensure we fail back over to tertiary_host_set_\n  updateHostSet(host_set_, 1 /* num_hosts */, 0 /* num_healthy_hosts */);\n  EXPECT_EQ(0, lb_.percentageLoad(0));\n  EXPECT_EQ(100, lb_.percentageLoad(2));\n  priority_load.healthy_priority_load_ = HealthyLoad({0u, 0u, 100});\n  EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet(&context, 0).first);\n}\n\n// Tests host selection with a randomized number of healthy, degraded and unhealthy hosts.\nTEST_P(LoadBalancerBaseTest, PrioritySelectionFuzz) {\n  TestRandomGenerator rand;\n\n  // Determine total number of hosts.\n  const auto total_hosts = 1 + (rand.random() % 10);\n\n  NiceMock<Upstream::MockLoadBalancerContext> context;\n\n  const auto host_set_hosts = rand.random() % total_hosts;\n\n  if (host_set_hosts == 0) {\n    updateHostSet(host_set_, 0, 0);\n  } else {\n    // We get on average 50% healthy hosts, 25% degraded hosts and 25% unhealthy hosts.\n    const auto healthy_hosts = rand.random() % host_set_hosts;\n    const auto degraded_hosts = rand.random() % (host_set_hosts - healthy_hosts);\n    const auto unhealthy_hosts = host_set_hosts - healthy_hosts - degraded_hosts;\n\n    updateHostSet(host_set_, host_set_hosts, unhealthy_hosts, degraded_hosts);\n  }\n\n  const auto failover_set_hosts = total_hosts - host_set_hosts;\n\n  if (host_set_hosts == 0) {\n    updateHostSet(failover_host_set_, 0, 0);\n  } else {\n    // We get on average 50% healthy hosts, 25% degraded hosts and 25% unhealthy hosts.\n    const auto healthy_hosts = rand.random() % failover_set_hosts;\n    const auto degraded_hosts = rand.random() % (failover_set_hosts - healthy_hosts);\n    const auto unhealthy_hosts = failover_set_hosts - healthy_hosts - degraded_hosts;\n\n    updateHostSet(failover_host_set_, failover_set_hosts, unhealthy_hosts, degraded_hosts);\n  }\n\n  EXPECT_CALL(context, determinePriorityLoad(_, _, _))\n      .WillRepeatedly(\n          Invoke([](const auto&, const auto& original_load,\n                    const auto&) -> const HealthyAndDegradedLoad& { return original_load; }));\n\n  for (uint64_t i = 0; i < total_hosts; ++i) {\n    const auto hs = lb_.chooseHostSet(&context, 0);\n    switch (hs.second) {\n    case LoadBalancerBase::HostAvailability::Healthy:\n      // Either we selected one of the healthy hosts or we failed to select anything and defaulted\n      // to healthy.\n      EXPECT_TRUE(!hs.first.healthyHosts().empty() ||\n                  (hs.first.healthyHosts().empty() && hs.first.degradedHosts().empty()));\n      break;\n    case LoadBalancerBase::HostAvailability::Degraded:\n      EXPECT_FALSE(hs.first.degradedHosts().empty());\n      break;\n    }\n  }\n}\n\n// Test of host set selection with priority filter\nTEST_P(LoadBalancerBaseTest, PrioritySelectionWithFilter) {\n  NiceMock<Upstream::MockLoadBalancerContext> context;\n\n  HealthyAndDegradedLoad priority_load{Upstream::HealthyLoad({0u, 100u}),\n                                       Upstream::DegradedLoad({0, 0})};\n  // return a filter that excludes priority 0\n  EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load));\n\n  updateHostSet(host_set_, 1 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 1, 1);\n\n  // Since we've excluded P0, we should pick the failover host set\n  EXPECT_EQ(failover_host_set_.priority(), lb_.chooseHostSet(&context, 0).first.priority());\n\n  updateHostSet(host_set_, 1 /* num_hosts */, 0 /* num_healthy_hosts */,\n                1 /* num_degraded_hosts */);\n  updateHostSet(failover_host_set_, 1, 0, 1);\n\n  // exclude priority 0 for degraded hosts\n  priority_load.healthy_priority_load_ = Upstream::HealthyLoad({0, 0});\n  priority_load.degraded_priority_load_ = Upstream::DegradedLoad({0, 100});\n\n  // Since we've excluded P0, we should pick the failover host set\n  EXPECT_EQ(failover_host_set_.priority(), lb_.chooseHostSet(&context, 0).first.priority());\n}\n\nTEST_P(LoadBalancerBaseTest, OverProvisioningFactor) {\n  // Default overprovisioning factor 1.4 makes P0 receives 70% load.\n  updateHostSet(host_set_, 4, 2);\n  updateHostSet(failover_host_set_, 4, 2);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(70, 30));\n\n  // Set overprovisioning factor to 1, now it should be proportioned to healthy ratio.\n  host_set_.setOverprovisioningFactor(100);\n  updateHostSet(host_set_, 4, 2);\n  failover_host_set_.setOverprovisioningFactor(100);\n  updateHostSet(failover_host_set_, 4, 2);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(50, 50));\n}\n\nTEST_P(LoadBalancerBaseTest, GentleFailover) {\n  // With 100% of P=0 hosts healthy, P=0 gets all the load.\n  // None of the levels is in Panic mode\n  updateHostSet(host_set_, 1, 1);\n  updateHostSet(failover_host_set_, 1, 1);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(100, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false));\n\n  // Health P=0 == 50*1.4 == 70\n  // Total health = 70 + 70 >= 100%. None of the levels should be in panic mode.\n  updateHostSet(host_set_, 2 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 2 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(70, 30));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false));\n\n  // Health P=0 == 25*1.4 == 35   P=1 is healthy so takes all spillover.\n  // Total health = 35+100 >= 100%. P=0 is below Panic level but it is ignored, because\n  // Total health >= 100%.\n  updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 2 /* num_hosts */, 2 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(35, 65));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false));\n\n  // Health P=0 == 25*1.4 == 35   P=1 == 35\n  // Health is then scaled up by (100 / (35 + 35) == 50)\n  // Total health = 35% + 35% is less than 100%. Panic levels per priority kick in.\n  updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(50, 50));\n  ASSERT_THAT(getPanic(), ElementsAre(true, true));\n\n  // Health P=0 == 100*1.4 == 35 P=1 == 35\n  // Since 3 hosts are excluded, P=0 should be considered fully healthy.\n  // Total health = 100% + 35% is greater than 100%. Panic should not trigger.\n  updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */, 0 /* num_degraded_hosts */,\n                3 /* num_excluded_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(100, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false));\n\n  // Health P=0 == 100*1.4 == 35 P=1 == 35\n  // Total health = 35% is less than 100%.\n  // All priorities are in panic mode (situation called TotalPanic)\n  // Load is distributed based on number of hosts regardless of their health status.\n  // P=0 and P=1 have 4 hosts each so each priority will receive 50% of the traffic.\n  updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts */,\n                4 /* num_excluded_hosts */);\n  updateHostSet(failover_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(50, 50));\n  ASSERT_THAT(getPanic(), ElementsAre(true, true));\n\n  // Make sure that in TotalPanic mode (all levels are in Panic),\n  // load distribution depends only on number of hosts.\n  // excluded_hosts should not be taken into account.\n  // P=0 has 4 hosts with 1 excluded, P=1 has 6 hosts with 2 excluded.\n  // P=0 should receive 4/(4+6)=40% of traffic\n  // P=1 should receive 6/(4+6)=60% of traffic\n  updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts */,\n                1 /* num_excluded_hosts */);\n  updateHostSet(failover_host_set_, 6 /* num_hosts */, 1 /* num_healthy_hosts */,\n                0 /* num_degraded_hosts */, 2 /* num_excluded_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(40, 60));\n  ASSERT_THAT(getPanic(), ElementsAre(true, true));\n}\n\nTEST_P(LoadBalancerBaseTest, GentleFailoverWithExtraLevels) {\n  // Add a third host set. Again with P=0 healthy, all traffic goes there.\n  MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2);\n  updateHostSet(host_set_, 1, 1);\n  updateHostSet(failover_host_set_, 1, 1);\n  updateHostSet(tertiary_host_set_, 1, 1);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(100, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false, false));\n\n  // Health P=0 == 50*1.4 == 70\n  // Health P=0 == 50, so can take the 30% spillover.\n  updateHostSet(host_set_, 2 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 2 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 2 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(70, 30, 0));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n\n  // Health P=0 == 25*1.4 == 35   P=1 is healthy so takes all spillover.\n  updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 2 /* num_hosts */, 2 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 2 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(35, 65, 0));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n\n  // This is the first test where health (P=0 + P=1 < 100)\n  // Health P=0 == 25*1.4 == 35   P=1 == 35  P=2 == 35\n  updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(35, 35, 30));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n\n  // This is the first test where (health P=0 + P=1 < 100)\n  // Health P=0 == 25*1.4 == 35   P=1 == 35  P=2 == 35\n  updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(35, 35, 30));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n\n  // Now all health is (20% * 1.5 == 28). 28 * 3 < 100 so we have to scale.\n  // Each Priority level gets 33% of the load, with P=0 picking up the rounding error.\n  updateHostSet(host_set_, 5 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 1 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(34, 33, 33));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(true, true, true));\n\n  // Levels P=0 and P=1 are totally down. P=2 is totally healthy.\n  // 100% of the traffic should go to P=2 and P=0 and P=1 should\n  // not be in panic mode.\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 5 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(0, 0, 100));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false, false));\n\n  // Levels P=0 and P=1 are totally down. P=2 is 80*1.4 >= 100% healthy.\n  // 100% of the traffic should go to P=2 and P=0 and P=1 should\n  // not be in panic mode.\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 4 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(0, 0, 100));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false, false));\n\n  // Levels P=0 and P=1 are totally down. P=2 is 40*1.4=56%% healthy.\n  // 100% of the traffic should go to P=2. All levels P=0, P=1 and P=2 should\n  // be in panic mode.\n  // Since all levels are in panic mode load distribution is based\n  // on number of hosts in each level.\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 2 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(34, 33, 33));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(true, true, true));\n\n  // Level P=0 is totally degraded. P=1 is 40*1.4=56% healthy and 40*1.4=56% degraded. P=2 is\n  // 40*1.4=56%% healthy. 100% of the traffic should go to P=2. No priorities should be in panic\n  // mode.\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */,\n                5 /* num_degraded_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 2 /* num_healthy_hosts */,\n                2 /* num_degraded_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 2 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(0, 56, 44));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(false, false, false));\n\n  // All levels are completely down - situation called TotalPanic.\n  // Load is distributed based on the number\n  // of hosts in the priority in relation to the total number of hosts.\n  // Here the total number of hosts is 10.\n  // priority 0 will receive 5/10: 50% of the traffic\n  // priority 1 will receive 3/10: 30% of the traffic\n  // priority 2 will receive 2/10: 20% of the traffic\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 2 /* num_hosts */, 0 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(50, 30, 20));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(true, true, true));\n\n  // Rounding errors should be picked up by the first priority.\n  // All priorities are in panic mode - situation called TotalPanic.\n  // Load is distributed based on the number\n  // of hosts in the priority in relation to the total number of hosts.\n  // Total number of hosts is 5+6+3=14.\n  // priority 0 should receive 5/14=37% of traffic\n  // priority 1 should receive 6/14=42% of traffic\n  // priority 2 should receive 3/14=21% of traffic\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 6 /* num_hosts */, 2 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 3 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(37, 42, 21));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getPanic(), ElementsAre(true, true, true));\n\n  // Load should spill over into degraded.\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */,\n                1 /* num_degraded_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */,\n                5 /* num_degraded_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 1 /* num_healthy_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(0, 0, 28));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(28, 44, 0));\n\n  // Rounding errors should be picked up by the first priority with degraded hosts when\n  // there are no healthy priorities.\n  // Disable panic threshold to prevent total panic from kicking in.\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(0));\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */,\n                2 /* num_degraded_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */,\n                1 /* num_degraded_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(0, 0, 0));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 67, 33));\n\n  // Simulate Total Panic mode. There is no healthy hosts, but there are\n  // degraded hosts. Because there is Total Panic, load is distributed\n  // based just on number of hosts in priorities regardless of its health.\n  // Rounding errors should be picked up by the first priority.\n  // Enable back panic threshold.\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  updateHostSet(host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */,\n                2 /* num_degraded_hosts */);\n  updateHostSet(tertiary_host_set_, 5 /* num_hosts */, 0 /* num_healthy_hosts */,\n                1 /* num_degraded_hosts */);\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(34, 33, 33));\n  ASSERT_THAT(getDegradedLoadPercentage(), ElementsAre(0, 0, 0));\n\n  // Rounding error should be allocated to the first non-empty priority\n  // In this test P=0 is not empty.\n  updateHostSet(host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  ASSERT_THAT(getPanic(), ElementsAre(true, true, true));\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(34, 33, 33));\n\n  // Rounding error should be allocated to the first non-empty priority\n  // In this test P=0 is empty and P=1 is not empty.\n  updateHostSet(host_set_, 0 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 6 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  ASSERT_THAT(getPanic(), ElementsAre(true, true, true));\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(0, 67, 33));\n  // In this test P=1 is not empty.\n  updateHostSet(host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(failover_host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  updateHostSet(tertiary_host_set_, 3 /* num_hosts */, 0 /* num_healthy_hosts */);\n  ASSERT_THAT(getPanic(), ElementsAre(true, true, true));\n  ASSERT_THAT(getLoadPercentage(), ElementsAre(34, 33, 33));\n}\n\nTEST_P(LoadBalancerBaseTest, BoundaryConditions) {\n  TestRandomGenerator rand;\n  uint32_t num_priorities = rand.random() % 10;\n\n  for (uint32_t i = 0; i < num_priorities; ++i) {\n    uint32_t num_hosts = rand.random() % 100;\n    uint32_t healthy_hosts = std::min<uint32_t>(num_hosts, rand.random() % 100);\n    // Make sure random health situations don't trigger the assert in recalculatePerPriorityState\n    updateHostSet(*priority_set_.getMockHostSet(i), num_hosts, healthy_hosts);\n  }\n}\n\nclass RoundRobinLoadBalancerTest : public LoadBalancerTestBase {\npublic:\n  void init(bool need_local_cluster) {\n    if (need_local_cluster) {\n      local_priority_set_ = std::make_shared<PrioritySetImpl>();\n      local_priority_set_->getOrCreateHostSet(0);\n    }\n    lb_ = std::make_shared<RoundRobinLoadBalancer>(priority_set_, local_priority_set_.get(), stats_,\n                                                   runtime_, random_, common_config_);\n  }\n\n  // Updates priority 0 with the given hosts and hosts_per_locality.\n  void updateHosts(HostVectorConstSharedPtr hosts,\n                   HostsPerLocalityConstSharedPtr hosts_per_locality) {\n    local_priority_set_->updateHosts(\n        0,\n        updateHostsParams(hosts, hosts_per_locality,\n                          std::make_shared<const HealthyHostVector>(*hosts), hosts_per_locality),\n        {}, empty_host_vector_, empty_host_vector_, absl::nullopt);\n  }\n\n  void peekThenPick(std::vector<int> picks) {\n    for (auto i : picks) {\n      EXPECT_EQ(hostSet().healthy_hosts_[i], lb_->peekAnotherHost(nullptr));\n    }\n    for (auto i : picks) {\n      EXPECT_EQ(hostSet().healthy_hosts_[i], lb_->chooseHost(nullptr));\n    }\n  }\n\n  std::shared_ptr<PrioritySetImpl> local_priority_set_;\n  std::shared_ptr<LoadBalancer> lb_;\n  HostsPerLocalityConstSharedPtr empty_locality_;\n  HostVector empty_host_vector_;\n}; // namespace\n\n// For the tests which mutate primary and failover host sets explicitly, only\n// run once.\nusing FailoverTest = RoundRobinLoadBalancerTest;\n\n// Ensure if all the hosts with priority 0 unhealthy, the next priority hosts are used.\nTEST_P(FailoverTest, BasicFailover) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  failover_host_set_.healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:82\")};\n  failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_;\n  init(false);\n  EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb_->peekAnotherHost(nullptr));\n  EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb_->chooseHost(nullptr));\n}\n\n// Ensure if all the hosts with priority 0 degraded, the first priority degraded hosts are used.\nTEST_P(FailoverTest, BasicDegradedHosts) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  host_set_.degraded_hosts_ = host_set_.hosts_;\n  failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_;\n  init(false);\n  EXPECT_EQ(host_set_.degraded_hosts_[0], lb_->peekAnotherHost(nullptr));\n  EXPECT_EQ(host_set_.degraded_hosts_[0], lb_->chooseHost(nullptr));\n}\n\n// Ensure if all the hosts with priority 0 degraded, but healthy hosts in the failover, the healthy\n// hosts in the second priority are used.\nTEST_P(FailoverTest, BasicFailoverDegradedHosts) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  host_set_.degraded_hosts_ = host_set_.hosts_;\n  failover_host_set_.healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:82\")};\n  failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_;\n  init(false);\n  EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb_->chooseHost(nullptr));\n}\n\n// Test that extending the priority set with an existing LB causes the correct updates.\nTEST_P(FailoverTest, PriorityUpdatesWithLocalHostSet) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  failover_host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  init(false);\n  // With both the primary and failover hosts unhealthy, we should select an\n  // unhealthy primary host.\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Update the priority set with a new priority level P=2 and ensure the host\n  // is chosen\n  MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2);\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  tertiary_host_set_.hosts_ = *hosts;\n  tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_;\n  HostVector add_hosts;\n  add_hosts.push_back(tertiary_host_set_.hosts_[0]);\n  tertiary_host_set_.runCallbacks(add_hosts, {});\n  EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Now add a healthy host in P=0 and make sure it is immediately selected.\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks(add_hosts, {});\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Remove the healthy host and ensure we fail back over to tertiary_host_set_\n  host_set_.healthy_hosts_ = {};\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n}\n\n// Test that extending the priority set with an existing LB causes the correct updates when the\n// cluster is configured to disable on panic.\nTEST_P(FailoverTest, PriorityUpdatesWithLocalHostSetDisableOnPanic) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  failover_host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true);\n\n  init(false);\n  // With both the primary and failover hosts unhealthy, we should select no host.\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n\n  // Update the priority set with a new priority level P=2 and ensure the host\n  // is chosen\n  MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2);\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  tertiary_host_set_.hosts_ = *hosts;\n  tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_;\n  HostVector add_hosts;\n  add_hosts.push_back(tertiary_host_set_.hosts_[0]);\n  tertiary_host_set_.runCallbacks(add_hosts, {});\n  EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Now add a healthy host in P=0 and make sure it is immediately selected.\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks(add_hosts, {});\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Remove the healthy host and ensure we fail back over to tertiary_host_set_\n  host_set_.healthy_hosts_ = {};\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n}\n\n// Test extending the priority set.\nTEST_P(FailoverTest, ExtendPrioritiesUpdatingPrioritySet) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  failover_host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  init(true);\n  // With both the primary and failover hosts unhealthy, we should select an\n  // unhealthy primary host.\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Update the priority set with a new priority level P=2\n  // As it has healthy hosts, it should be selected.\n  MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2);\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  tertiary_host_set_.hosts_ = *hosts;\n  tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_;\n  HostVector add_hosts;\n  add_hosts.push_back(tertiary_host_set_.hosts_[0]);\n  tertiary_host_set_.runCallbacks(add_hosts, {});\n  EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Now add a healthy host in P=0 and make sure it is immediately selected.\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks(add_hosts, {});\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n}\n\nTEST_P(FailoverTest, ExtendPrioritiesWithLocalPrioritySet) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  failover_host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  init(true);\n  // With both the primary and failover hosts unhealthy, we should select an\n  // unhealthy primary host.\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Update the host set with a new priority level. We should start selecting\n  // hosts from that level as it has viable hosts.\n  MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2);\n  HostVectorSharedPtr hosts2(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:84\")}));\n  tertiary_host_set_.hosts_ = *hosts2;\n  tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_;\n  HostVector add_hosts;\n  add_hosts.push_back(tertiary_host_set_.hosts_[0]);\n  tertiary_host_set_.runCallbacks(add_hosts, {});\n  EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  // Update the local hosts. We're not doing locality based routing in this\n  // test, but it should at least do no harm.\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  updateHosts(hosts, HostsPerLocalityImpl::empty());\n  EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n}\n\n// Verifies that the number of warmed hosts is used to compute priority spillover.\nTEST_P(FailoverTest, PrioritiesWithNotAllWarmedHosts) {\n  // To begin with we set up the following:\n  // P0: 1 healthy, 1 unhealthy, 1 warmed.\n  // P1: 1 healthy.\n  // We then expect no spillover, since P0 is still overprovisioned.\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  host_set_.healthy_hosts_ = {host_set_.hosts_[0]};\n  failover_host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:82\")};\n  failover_host_set_.healthy_hosts_ = failover_host_set_.hosts_;\n  init(true);\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n}\n\n// Verifies that we handle zero warmed hosts.\nTEST_P(FailoverTest, PrioritiesWithZeroWarmedHosts) {\n  // To begin with we set up the following:\n  // P0: 2 unhealthy, 0 warmed.\n  // P1: 1 healthy.\n  // We then expect all the traffic to spill over to P1 since P0 has an effective load of zero.\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  failover_host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:82\")};\n  failover_host_set_.healthy_hosts_ = failover_host_set_.hosts_;\n\n  init(true);\n\n  EXPECT_EQ(failover_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(failover_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(failover_host_set_.hosts_[0], lb_->chooseHost(nullptr));\n}\n\nINSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, FailoverTest, ::testing::Values(true));\n\nTEST_P(RoundRobinLoadBalancerTest, NoHosts) {\n  init(false);\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, SingleHost) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  init(false);\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, Normal) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  init(false);\n\n  // Make sure the round robin pattern works for peeking.\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->peekAnotherHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr));\n\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n\n  // Make sure that if picks get ahead of peeks, peeks resume at the next pick.\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n\n  // Change host set with no peeks in progress\n  hostSet().healthy_hosts_.push_back(makeTestHost(info_, \"tcp://127.0.0.1:82\"));\n  hostSet().hosts_.push_back(hostSet().healthy_hosts_.back());\n  hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {});\n  peekThenPick({2, 0, 1, 2});\n\n  // Now peek a few extra to push the index forward, alter the host set, and\n  // make sure the index is restored to 0.\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->peekAnotherHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr));\n\n  hostSet().healthy_hosts_.push_back(makeTestHost(info_, \"tcp://127.0.0.1:83\"));\n  hostSet().hosts_.push_back(hostSet().healthy_hosts_.back());\n  hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {hostSet().healthy_hosts_.front()});\n  peekThenPick({1, 2, 3});\n}\n\n// Validate that the RNG seed influences pick order.\nTEST_P(RoundRobinLoadBalancerTest, Seed) {\n  hostSet().healthy_hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"),\n  };\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  EXPECT_CALL(random_, random()).WillRepeatedly(Return(1));\n  init(false);\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, Locality) {\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{(*hosts)[1]}, {(*hosts)[0]}, {(*hosts)[2]}});\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_ = *hosts;\n  hostSet().healthy_hosts_per_locality_ = hosts_per_locality;\n  init(false);\n  // chooseHealthyLocality() return value determines which locality we use.\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(0));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(1));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(0));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(1));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(0));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  // When there is no locality, we RR over all available hosts.\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(absl::optional<uint32_t>()));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(absl::optional<uint32_t>()));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(absl::optional<uint32_t>()));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, DegradedLocality) {\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:84\")}));\n  HostVectorSharedPtr healthy_hosts(new HostVector({(*hosts)[0]}));\n  HostVectorSharedPtr degraded_hosts(new HostVector({(*hosts)[1], (*hosts)[2]}));\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{(*hosts)[0]}, {(*hosts)[1], (*hosts)[2]}});\n  HostsPerLocalitySharedPtr healthy_hosts_per_locality = makeHostsPerLocality({{(*hosts)[0]}, {}});\n  HostsPerLocalitySharedPtr degraded_hosts_per_locality =\n      makeHostsPerLocality({{}, {(*hosts)[1], (*hosts)[2]}});\n\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_ = *healthy_hosts;\n  hostSet().degraded_hosts_ = *degraded_hosts;\n  hostSet().hosts_per_locality_ = hosts_per_locality;\n  hostSet().healthy_hosts_per_locality_ = healthy_hosts_per_locality;\n  hostSet().degraded_hosts_per_locality_ = degraded_hosts_per_locality;\n  init(false);\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(50)).WillOnce(Return(0));\n  // Since we're split between healthy and degraded, the LB should call into both\n  // chooseHealthyLocality and chooseDegradedLocality.\n  EXPECT_CALL(hostSet(), chooseDegradedLocality()).WillOnce(Return(1));\n  EXPECT_EQ(hostSet().degraded_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_CALL(hostSet(), chooseHealthyLocality()).WillOnce(Return(0));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, Weighted) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\", 1),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\", 2)};\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  init(false);\n  // Initial weights respected.\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  // Modify weights, we converge on new weighting after one pick cycle.\n  hostSet().healthy_hosts_[0]->weight(2);\n  hostSet().healthy_hosts_[1]->weight(1);\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  // Add a host, it should participate in next round of scheduling.\n  hostSet().healthy_hosts_.push_back(makeTestHost(info_, \"tcp://127.0.0.1:82\", 3));\n  hostSet().hosts_.push_back(hostSet().healthy_hosts_.back());\n  hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {});\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  // Remove last two hosts, add a new one with different weights.\n  HostVector removed_hosts = {hostSet().hosts_[1], hostSet().hosts_[2]};\n  hostSet().healthy_hosts_.pop_back();\n  hostSet().healthy_hosts_.pop_back();\n  hostSet().hosts_.pop_back();\n  hostSet().hosts_.pop_back();\n  hostSet().healthy_hosts_.push_back(makeTestHost(info_, \"tcp://127.0.0.1:83\", 4));\n  hostSet().hosts_.push_back(hostSet().healthy_hosts_.back());\n  hostSet().healthy_hosts_[0]->weight(1);\n  hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, removed_hosts);\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n}\n\n// Validate that the RNG seed influences pick order when weighted RR.\nTEST_P(RoundRobinLoadBalancerTest, WeightedSeed) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\", 1),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\", 2)};\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  EXPECT_CALL(random_, random()).WillRepeatedly(Return(1));\n  init(false);\n  // Initial weights respected.\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanic) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:84\"), makeTestHost(info_, \"tcp://127.0.0.1:85\")};\n\n  init(false);\n  EXPECT_EQ(hostSet().hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().hosts_[2], lb_->chooseHost(nullptr));\n\n  // Take the threshold back above the panic threshold.\n  hostSet().healthy_hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\")};\n  hostSet().runCallbacks({}, {});\n\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n\n  EXPECT_EQ(3UL, stats_.lb_healthy_panic_.value());\n}\n\n// Test that no hosts are selected when fail_traffic_on_panic is enabled.\nTEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanicDisableOnPanic) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:84\"), makeTestHost(info_, \"tcp://127.0.0.1:85\")};\n\n  common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true);\n\n  init(false);\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n\n  // Take the threshold back above the panic threshold.\n  hostSet().healthy_hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\")};\n  hostSet().runCallbacks({}, {});\n\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n\n  EXPECT_EQ(1UL, stats_.lb_healthy_panic_.value());\n}\n\n// Ensure if the panic threshold is 0%, panic mode is disabled.\nTEST_P(RoundRobinLoadBalancerTest, DisablePanicMode) {\n  hostSet().healthy_hosts_ = {};\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n\n  common_config_.mutable_healthy_panic_threshold()->set_value(0);\n\n  init(false);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(0));\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n  EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value());\n}\n\n// Test of host set selection with host filter\nTEST_P(RoundRobinLoadBalancerTest, HostSelectionWithFilter) {\n  NiceMock<Upstream::MockLoadBalancerContext> context;\n\n  HostVectorSharedPtr hosts(new HostVector(\n      {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\")}));\n  HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality(\n      {{makeTestHost(info_, \"tcp://127.0.0.1:80\")}, {makeTestHost(info_, \"tcp://127.0.0.1:81\")}});\n\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_ = *hosts;\n  hostSet().healthy_hosts_per_locality_ = hosts_per_locality;\n\n  init(false);\n\n  // return a predicate that only accepts the first host\n  EXPECT_CALL(context, shouldSelectAnotherHost(_))\n      .WillRepeatedly(Invoke([&](const Host& host) -> bool {\n        return host.address()->asString() != hostSet().hosts_[0]->address()->asString();\n      }));\n  HealthyAndDegradedLoad priority_load{Upstream::HealthyLoad({0, 0}),\n                                       Upstream::DegradedLoad({0, 0})};\n\n  if (GetParam()) {\n    priority_load.healthy_priority_load_ = HealthyLoad({100u, 0u});\n  } else {\n    priority_load.healthy_priority_load_ = HealthyLoad({0u, 100u});\n  }\n  EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load));\n  EXPECT_CALL(context, hostSelectionRetryCount()).WillRepeatedly(Return(2));\n\n  // Calling chooseHost multiple times always returns host one, since the filter will reject\n  // the other host.\n  EXPECT_EQ(hostSet().hosts_[0], lb_->chooseHost(&context));\n  EXPECT_EQ(hostSet().hosts_[0], lb_->chooseHost(&context));\n  EXPECT_EQ(hostSet().hosts_[0], lb_->chooseHost(&context));\n\n  // By setting the retry counter to zero, we effectively disable the filter.\n  EXPECT_CALL(context, hostSelectionRetryCount()).WillRepeatedly(Return(0));\n\n  EXPECT_EQ(hostSet().hosts_[1], lb_->chooseHost(&context));\n  EXPECT_EQ(hostSet().hosts_[0], lb_->chooseHost(&context));\n  EXPECT_EQ(hostSet().hosts_[1], lb_->chooseHost(&context));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, ZoneAwareSmallCluster) {\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{makeTestHost(info_, \"tcp://127.0.0.1:81\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:80\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:82\")}});\n\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_ = *hosts;\n  hostSet().healthy_hosts_per_locality_ = hosts_per_locality;\n  common_config_.mutable_healthy_panic_threshold()->set_value(0);\n  common_config_.mutable_zone_aware_lb_config()->mutable_routing_enabled()->set_value(98);\n  common_config_.mutable_zone_aware_lb_config()->mutable_min_cluster_size()->set_value(7);\n  init(true);\n  updateHosts(hosts, hosts_per_locality);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 0))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 98))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 7))\n      .WillRepeatedly(Return(7));\n\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr));\n\n  if (&hostSet() == &host_set_) {\n    // Cluster size is computed once at zone aware struct regeneration point.\n    EXPECT_EQ(1U, stats_.lb_zone_cluster_too_small_.value());\n  } else {\n    EXPECT_EQ(0U, stats_.lb_zone_cluster_too_small_.value());\n    return;\n  }\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 7))\n      .WillRepeatedly(Return(1));\n  // Trigger reload.\n  updateHosts(hosts, hosts_per_locality);\n  EXPECT_EQ(hostSet().healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, NoZoneAwareDifferentZoneSize) {\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  HostsPerLocalitySharedPtr upstream_hosts_per_locality =\n      makeHostsPerLocality({{makeTestHost(info_, \"tcp://127.0.0.1:81\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:80\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:82\")}});\n  HostsPerLocalitySharedPtr local_hosts_per_locality = makeHostsPerLocality(\n      {{makeTestHost(info_, \"tcp://127.0.0.1:81\")}, {makeTestHost(info_, \"tcp://127.0.0.1:80\")}});\n\n  hostSet().healthy_hosts_ = *hosts;\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_per_locality_ = upstream_hosts_per_locality;\n  common_config_.mutable_healthy_panic_threshold()->set_value(100);\n  common_config_.mutable_zone_aware_lb_config()->mutable_routing_enabled()->set_value(98);\n  common_config_.mutable_zone_aware_lb_config()->mutable_min_cluster_size()->set_value(7);\n  init(true);\n  updateHosts(hosts, local_hosts_per_locality);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 100))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 98))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 7))\n      .WillRepeatedly(Return(7));\n\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(1U, stats_.lb_zone_number_differs_.value());\n}\n\nTEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingLargeZoneSwitchOnOff) {\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n                                            makeTestHost(info_, \"tcp://127.0.0.1:82\")}));\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{makeTestHost(info_, \"tcp://127.0.0.1:81\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:80\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:82\")}});\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(3));\n\n  hostSet().healthy_hosts_ = *hosts;\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_per_locality_ = hosts_per_locality;\n  init(true);\n  updateHosts(hosts, hosts_per_locality);\n\n  // There is only one host in the given zone for zone aware routing.\n  EXPECT_EQ(hostSet().healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(1U, stats_.lb_zone_routing_all_directly_.value());\n  EXPECT_EQ(hostSet().healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(2U, stats_.lb_zone_routing_all_directly_.value());\n\n  // Disable runtime global zone routing.\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(false));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingSmallZone) {\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  HostVectorSharedPtr upstream_hosts(new HostVector(\n      {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n       makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\"),\n       makeTestHost(info_, \"tcp://127.0.0.1:84\")}));\n  HostVectorSharedPtr local_hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:0\"),\n                                                  makeTestHost(info_, \"tcp://127.0.0.1:1\"),\n                                                  makeTestHost(info_, \"tcp://127.0.0.1:2\")}));\n\n  HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality(\n      {{makeTestHost(info_, \"tcp://127.0.0.1:81\")},\n       {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:82\")},\n       {makeTestHost(info_, \"tcp://127.0.0.1:83\"), makeTestHost(info_, \"tcp://127.0.0.1:84\")}});\n\n  HostsPerLocalitySharedPtr local_hosts_per_locality =\n      makeHostsPerLocality({{makeTestHost(info_, \"tcp://127.0.0.1:0\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:1\")},\n                            {makeTestHost(info_, \"tcp://127.0.0.1:2\")}});\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(5));\n\n  hostSet().healthy_hosts_ = *upstream_hosts;\n  hostSet().hosts_ = *upstream_hosts;\n  hostSet().healthy_hosts_per_locality_ = upstream_hosts_per_locality;\n  init(true);\n  updateHosts(local_hosts, local_hosts_per_locality);\n\n  // There is only one host in the given zone for zone aware routing.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(hostSet().healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(1U, stats_.lb_zone_routing_sampled_.value());\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(hostSet().healthy_hosts_per_locality_->get()[1][0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(1U, stats_.lb_zone_routing_cross_zone_.value());\n}\n\nTEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) {\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  // upstream_hosts and local_hosts do not matter, zone aware routing is based on per zone hosts.\n  HostVectorSharedPtr upstream_hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:80\")}));\n  hostSet().healthy_hosts_ = *upstream_hosts;\n  hostSet().hosts_ = *upstream_hosts;\n  HostVectorSharedPtr local_hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:0\")}));\n\n  std::vector<HostVector> upstream_hosts_per_locality;\n  std::vector<HostVector> local_hosts_per_locality;\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(1));\n\n  // The following host distribution with current precision should lead to the no_capacity_left\n  // situation.\n  // Reuse the same host in all of the structures below to reduce time test takes and this does not\n  // impact load balancing logic.\n  HostSharedPtr host = makeTestHost(info_, \"tcp://127.0.0.1:80\");\n  HostVector current(45000);\n\n  for (int i = 0; i < 45000; ++i) {\n    current[i] = host;\n  }\n  local_hosts_per_locality.push_back(current);\n\n  current.resize(55000);\n  for (int i = 0; i < 55000; ++i) {\n    current[i] = host;\n  }\n  local_hosts_per_locality.push_back(current);\n\n  current.resize(44999);\n  for (int i = 0; i < 44999; ++i) {\n    current[i] = host;\n  }\n  upstream_hosts_per_locality.push_back(current);\n\n  current.resize(55001);\n  for (int i = 0; i < 55001; ++i) {\n    current[i] = host;\n  }\n  upstream_hosts_per_locality.push_back(current);\n\n  hostSet().healthy_hosts_per_locality_ =\n      makeHostsPerLocality(std::move(upstream_hosts_per_locality));\n  init(true);\n\n  // To trigger update callback.\n  auto local_hosts_per_locality_shared = makeHostsPerLocality(std::move(local_hosts_per_locality));\n  updateHosts(local_hosts, local_hosts_per_locality_shared);\n\n  // Force request out of small zone and to randomly select zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  lb_->chooseHost(nullptr);\n  EXPECT_EQ(1U, stats_.lb_zone_no_capacity_left_.value());\n}\n\nTEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingOneZone) {\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, \"tcp://127.0.0.1:80\")}));\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{makeTestHost(info_, \"tcp://127.0.0.1:81\")}});\n\n  hostSet().healthy_hosts_ = *hosts;\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_per_locality_ = hosts_per_locality;\n  init(true);\n  updateHosts(hosts, hosts_per_locality);\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNotHealthy) {\n  HostVectorSharedPtr hosts(new HostVector(\n      {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.2:80\")}));\n  HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality(\n      {{}, {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.2:80\")}});\n\n  hostSet().healthy_hosts_ = *hosts;\n  hostSet().hosts_ = *hosts;\n  hostSet().healthy_hosts_per_locality_ = hosts_per_locality;\n  init(true);\n  updateHosts(hosts, hosts_per_locality);\n\n  // local zone has no healthy hosts, take from the all healthy hosts.\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmpty) {\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  HostVectorSharedPtr upstream_hosts(new HostVector(\n      {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\")}));\n  HostVectorSharedPtr local_hosts(new HostVector({}, {}));\n\n  HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality(\n      {{makeTestHost(info_, \"tcp://127.0.0.1:80\")}, {makeTestHost(info_, \"tcp://127.0.0.1:81\")}});\n  HostsPerLocalitySharedPtr local_hosts_per_locality = makeHostsPerLocality({{}, {}});\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillOnce(Return(50))\n      .WillOnce(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillOnce(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillOnce(Return(1));\n\n  hostSet().healthy_hosts_ = *upstream_hosts;\n  hostSet().hosts_ = *upstream_hosts;\n  hostSet().healthy_hosts_per_locality_ = upstream_hosts_per_locality;\n  init(true);\n  updateHosts(local_hosts, local_hosts_per_locality);\n\n  // Local cluster is not OK, we'll do regular routing.\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(0U, stats_.lb_healthy_panic_.value());\n  EXPECT_EQ(1U, stats_.lb_local_cluster_not_ok_.value());\n}\n\nTEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmptyFailTrafficOnPanic) {\n  common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true);\n\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  HostVectorSharedPtr upstream_hosts(new HostVector(\n      {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\")}));\n  HostVectorSharedPtr local_hosts(new HostVector({}, {}));\n\n  HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality(\n      {{makeTestHost(info_, \"tcp://127.0.0.1:80\")}, {makeTestHost(info_, \"tcp://127.0.0.1:81\")}});\n  HostsPerLocalitySharedPtr local_hosts_per_locality = makeHostsPerLocality({{}, {}});\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillOnce(Return(50))\n      .WillOnce(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillOnce(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillOnce(Return(1));\n\n  hostSet().healthy_hosts_ = *upstream_hosts;\n  hostSet().hosts_ = *upstream_hosts;\n  hostSet().healthy_hosts_per_locality_ = upstream_hosts_per_locality;\n  init(true);\n  updateHosts(local_hosts, local_hosts_per_locality);\n\n  // Local cluster is not OK, we'll do regular routing (and select no host, since we're in global\n  // panic).\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n  EXPECT_EQ(0U, stats_.lb_healthy_panic_.value());\n  EXPECT_EQ(1U, stats_.lb_local_cluster_not_ok_.value());\n}\n\n// Validate that if we have healthy host lists >= 2, but there is no local\n// locality included, that we skip zone aware routing and fallback.\nTEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNoLocalLocality) {\n  if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing.\n    return;\n  }\n  HostVectorSharedPtr upstream_hosts(new HostVector(\n      {makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\")}));\n  HostVectorSharedPtr local_hosts(new HostVector({}, {}));\n\n  HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality(\n      {{makeTestHost(info_, \"tcp://127.0.0.1:80\")}, {makeTestHost(info_, \"tcp://127.0.0.1:81\")}},\n      true);\n  const HostsPerLocalitySharedPtr& local_hosts_per_locality = upstream_hosts_per_locality;\n\n  hostSet().healthy_hosts_ = *upstream_hosts;\n  hostSet().hosts_ = *upstream_hosts;\n  hostSet().healthy_hosts_per_locality_ = upstream_hosts_per_locality;\n  init(true);\n  updateHosts(local_hosts, local_hosts_per_locality);\n\n  // Local cluster is not OK, we'll do regular routing.\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(0U, stats_.lb_healthy_panic_.value());\n  EXPECT_EQ(1U, stats_.lb_local_cluster_not_ok_.value());\n}\n\nINSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, RoundRobinLoadBalancerTest,\n                         ::testing::Values(true, false));\n\nclass LeastRequestLoadBalancerTest : public LoadBalancerTestBase {\npublic:\n  LeastRequestLoadBalancer lb_{\n      priority_set_, nullptr, stats_, runtime_, random_, common_config_, least_request_lb_config_};\n};\n\nTEST_P(LeastRequestLoadBalancerTest, NoHosts) { EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); }\n\nTEST_P(LeastRequestLoadBalancerTest, SingleHost) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n\n  // Host weight is 1.\n  {\n    EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3));\n    stats_.max_host_weight_.set(1UL);\n    EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  }\n\n  // Host weight is 100.\n  {\n    EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3));\n    stats_.max_host_weight_.set(100UL);\n    EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  }\n\n  HostVector empty;\n  {\n    hostSet().runCallbacks(empty, empty);\n    EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3));\n    EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  }\n\n  {\n    HostVector remove_hosts;\n    remove_hosts.push_back(hostSet().hosts_[0]);\n    hostSet().healthy_hosts_.clear();\n    hostSet().hosts_.clear();\n    hostSet().runCallbacks(empty, remove_hosts);\n    EXPECT_CALL(random_, random()).WillOnce(Return(0));\n    EXPECT_EQ(nullptr, lb_.chooseHost(nullptr));\n  }\n}\n\nTEST_P(LeastRequestLoadBalancerTest, Normal) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  stats_.max_host_weight_.set(1UL);\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n\n  hostSet().healthy_hosts_[0]->stats().rq_active_.set(1);\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(2);\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n\n  hostSet().healthy_hosts_[0]->stats().rq_active_.set(2);\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(1);\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)).WillOnce(Return(3));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n}\n\nTEST_P(LeastRequestLoadBalancerTest, PNC) {\n  hostSet().healthy_hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\")};\n  stats_.max_host_weight_.set(1UL);\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n\n  hostSet().healthy_hosts_[0]->stats().rq_active_.set(4);\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(3);\n  hostSet().healthy_hosts_[2]->stats().rq_active_.set(2);\n  hostSet().healthy_hosts_[3]->stats().rq_active_.set(1);\n\n  // Creating various load balancer objects with different choice configs.\n  envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config;\n  lr_lb_config.mutable_choice_count()->set_value(2);\n  LeastRequestLoadBalancer lb_2{priority_set_, nullptr,        stats_,      runtime_,\n                                random_,       common_config_, lr_lb_config};\n  lr_lb_config.mutable_choice_count()->set_value(5);\n  LeastRequestLoadBalancer lb_5{priority_set_, nullptr,        stats_,      runtime_,\n                                random_,       common_config_, lr_lb_config};\n\n  // Verify correct number of choices.\n\n  // 0 choices configured should default to P2C.\n  EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n\n  // 2 choices configured results in P2C.\n  EXPECT_CALL(random_, random()).Times(3).WillRepeatedly(Return(0));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n\n  // 5 choices configured results in P5C.\n  EXPECT_CALL(random_, random()).Times(6).WillRepeatedly(Return(0));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_5.chooseHost(nullptr));\n\n  // Verify correct host chosen in P5C scenario.\n  EXPECT_CALL(random_, random())\n      .Times(6)\n      .WillOnce(Return(0))\n      .WillOnce(Return(3))\n      .WillOnce(Return(0))\n      .WillOnce(Return(3))\n      .WillOnce(Return(2))\n      .WillOnce(Return(1));\n  EXPECT_EQ(hostSet().healthy_hosts_[3], lb_5.chooseHost(nullptr));\n}\n\nTEST_P(LeastRequestLoadBalancerTest, WeightImbalance) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\", 1),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\", 2)};\n  stats_.max_host_weight_.set(2UL);\n\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n\n  EXPECT_CALL(random_, random()).WillRepeatedly(Return(0));\n\n  // We should see 2:1 ratio for hosts[1] to hosts[0].\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n\n  // Bringing hosts[1] to an active request should yield a 1:1 ratio.\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(1);\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n\n  // Settings hosts[0] to an active request and hosts[1] to no active requests should yield a 4:1\n  // ratio.\n  hostSet().healthy_hosts_[0]->stats().rq_active_.set(1);\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(0);\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n}\n\n// Validate that the load balancer defaults to an active request bias value of 1.0 if the runtime\n// value is invalid (less than 0.0).\nTEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithInvalidActiveRequestBias) {\n  envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config;\n  lr_lb_config.mutable_active_request_bias()->set_runtime_key(\"ar_bias\");\n  lr_lb_config.mutable_active_request_bias()->set_default_value(1.0);\n  LeastRequestLoadBalancer lb_2{priority_set_, nullptr,        stats_,      runtime_,\n                                random_,       common_config_, lr_lb_config};\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"ar_bias\", 1.0)).WillRepeatedly(Return(-1.0));\n\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\", 1),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\", 2)};\n\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n\n  // Trigger callbacks. The added/removed lists are not relevant.\n  EXPECT_LOG_CONTAINS(\n      \"warn\", \"upstream: invalid active request bias supplied (runtime key ar_bias), using 1.0\",\n      hostSet().runCallbacks({}, {}));\n\n  EXPECT_CALL(random_, random()).WillRepeatedly(Return(0));\n\n  // We should see 2:1 ratio for hosts[1] to hosts[0].\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n\n  // Bringing hosts[1] to an active request should yield a 1:1 ratio.\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(1);\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n\n  // Settings hosts[0] to an active request and hosts[1] to no active requests should yield a 4:1\n  // ratio.\n  hostSet().healthy_hosts_[0]->stats().rq_active_.set(1);\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(0);\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n}\n\nTEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithCustomActiveRequestBias) {\n  // Create a load balancer with a custom active request bias.\n  envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config;\n  lr_lb_config.mutable_active_request_bias()->set_runtime_key(\"ar_bias\");\n  lr_lb_config.mutable_active_request_bias()->set_default_value(1.0);\n  LeastRequestLoadBalancer lb_2{priority_set_, nullptr,        stats_,      runtime_,\n                                random_,       common_config_, lr_lb_config};\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"ar_bias\", 1.0)).WillRepeatedly(Return(0.0));\n\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\", 1),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\", 2)};\n\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n\n  EXPECT_CALL(random_, random()).WillRepeatedly(Return(0));\n\n  // We should see 2:1 ratio for hosts[1] to hosts[0], regardless of the active request count.\n  hostSet().healthy_hosts_[1]->stats().rq_active_.set(1);\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr));\n}\n\nTEST_P(LeastRequestLoadBalancerTest, WeightImbalanceCallbacks) {\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\", 1),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\", 2)};\n  stats_.max_host_weight_.set(2UL);\n\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n\n  EXPECT_CALL(random_, random()).WillRepeatedly(Return(0));\n\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_.chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n\n  // Remove and verify we get other host.\n  HostVector empty;\n  HostVector hosts_removed;\n  hosts_removed.push_back(hostSet().hosts_[1]);\n  hostSet().hosts_.erase(hostSet().hosts_.begin() + 1);\n  hostSet().healthy_hosts_.erase(hostSet().healthy_hosts_.begin() + 1);\n  hostSet().runCallbacks(empty, hosts_removed);\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr));\n}\n\nINSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, LeastRequestLoadBalancerTest,\n                         ::testing::Values(true, false));\n\nclass RandomLoadBalancerTest : public LoadBalancerTestBase {\npublic:\n  void init() {\n    lb_ = std::make_shared<RandomLoadBalancer>(priority_set_, nullptr, stats_, runtime_, random_,\n                                               common_config_);\n  }\n  std::shared_ptr<LoadBalancer> lb_;\n};\n\nTEST_P(RandomLoadBalancerTest, NoHosts) {\n  init();\n\n  EXPECT_EQ(nullptr, lb_->peekAnotherHost(nullptr));\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n}\n\nTEST_P(RandomLoadBalancerTest, Normal) {\n  init();\n  hostSet().healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                              makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  hostSet().hosts_ = hostSet().healthy_hosts_;\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(2));\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->peekAnotherHost(nullptr));\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(3));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr));\n\n  EXPECT_CALL(random_, random()).Times(0);\n  EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr));\n}\n\nTEST_P(RandomLoadBalancerTest, FailClusterOnPanic) {\n  common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true);\n  init();\n\n  hostSet().healthy_hosts_ = {};\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant.\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n}\n\nINSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, RandomLoadBalancerTest, ::testing::Values(true, false));\n\nTEST(LoadBalancerSubsetInfoImplTest, DefaultConfigIsDiabled) {\n  auto subset_info = LoadBalancerSubsetInfoImpl(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::default_instance());\n\n  EXPECT_FALSE(subset_info.isEnabled());\n  EXPECT_TRUE(subset_info.fallbackPolicy() ==\n              envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK);\n  EXPECT_EQ(subset_info.defaultSubset().fields_size(), 0);\n  EXPECT_EQ(subset_info.subsetSelectors().size(), 0);\n}\n\nTEST(LoadBalancerSubsetInfoImplTest, SubsetConfig) {\n  auto subset_value = ProtobufWkt::Value();\n  subset_value.set_string_value(\"the value\");\n\n  auto subset_config = envoy::config::cluster::v3::Cluster::LbSubsetConfig::default_instance();\n  subset_config.set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET);\n  subset_config.mutable_default_subset()->mutable_fields()->insert({\"key\", subset_value});\n  auto subset_selector1 = subset_config.mutable_subset_selectors()->Add();\n  subset_selector1->add_keys(\"selector_key1\");\n  auto subset_selector2 = subset_config.mutable_subset_selectors()->Add();\n  subset_selector2->add_keys(\"selector_key2\");\n  subset_selector2->set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT);\n\n  auto subset_info = LoadBalancerSubsetInfoImpl(subset_config);\n\n  EXPECT_TRUE(subset_info.isEnabled());\n  EXPECT_TRUE(subset_info.fallbackPolicy() ==\n              envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET);\n  EXPECT_EQ(subset_info.defaultSubset().fields_size(), 1);\n  EXPECT_EQ(subset_info.defaultSubset().fields().at(\"key\").string_value(),\n            std::string(\"the value\"));\n  EXPECT_EQ(subset_info.subsetSelectors().size(), 2);\n  EXPECT_EQ(subset_info.subsetSelectors()[0]->selectorKeys(),\n            std::set<std::string>({\"selector_key1\"}));\n  EXPECT_EQ(subset_info.subsetSelectors()[0]->fallbackPolicy(),\n            envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED);\n  EXPECT_EQ(subset_info.subsetSelectors()[1]->selectorKeys(),\n            std::set<std::string>({\"selector_key2\"}));\n  EXPECT_EQ(subset_info.subsetSelectors()[1]->fallbackPolicy(),\n            envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT);\n}\n\nTEST(LoadBalancerSubsetInfoImplTest, KeysSubsetFallbackValid) {\n  auto subset_config = envoy::config::cluster::v3::Cluster::LbSubsetConfig::default_instance();\n  auto selector1 = subset_config.mutable_subset_selectors()->Add();\n  selector1->add_keys(\"key1\");\n  selector1->add_keys(\"key2\");\n  selector1->add_keys(\"key3\");\n  selector1->set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET);\n  selector1->add_fallback_keys_subset(\"key1\");\n  selector1->add_fallback_keys_subset(\"key3\");\n\n  auto selector2 = subset_config.mutable_subset_selectors()->Add();\n  selector2->add_keys(\"key1\");\n  selector2->add_keys(\"key3\");\n  selector2->add_keys(\"key4\");\n  selector2->set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET);\n  selector2->add_fallback_keys_subset(\"key4\");\n\n  auto subset_info = LoadBalancerSubsetInfoImpl(subset_config);\n\n  EXPECT_EQ(subset_info.subsetSelectors()[0]->fallbackPolicy(),\n            envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET);\n  EXPECT_EQ(subset_info.subsetSelectors()[0]->selectorKeys(),\n            std::set<std::string>({\"key1\", \"key2\", \"key3\"}));\n  EXPECT_EQ(subset_info.subsetSelectors()[0]->fallbackKeysSubset(),\n            std::set<std::string>({\"key1\", \"key3\"}));\n\n  EXPECT_EQ(subset_info.subsetSelectors()[1]->fallbackPolicy(),\n            envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET);\n  EXPECT_EQ(subset_info.subsetSelectors()[1]->selectorKeys(),\n            std::set<std::string>({\"key1\", \"key3\", \"key4\"}));\n  EXPECT_EQ(subset_info.subsetSelectors()[1]->fallbackKeysSubset(),\n            std::set<std::string>({\"key4\"}));\n}\n\nTEST(LoadBalancerSubsetInfoImplTest, KeysSubsetForOtherPolicyInvalid) {\n  auto subset_config = envoy::config::cluster::v3::Cluster::LbSubsetConfig::default_instance();\n  auto selector = subset_config.mutable_subset_selectors()->Add();\n\n  selector->add_keys(\"key1\");\n  selector->add_keys(\"key2\");\n  selector->set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT);\n  selector->add_fallback_keys_subset(\"key1\");\n\n  EXPECT_THROW_WITH_MESSAGE(LoadBalancerSubsetInfoImpl{subset_config}, EnvoyException,\n                            \"fallback_keys_subset can be set only for KEYS_SUBSET fallback_policy\");\n}\n\nTEST(LoadBalancerSubsetInfoImplTest, KeysSubsetNotASubsetInvalid) {\n  auto subset_config = envoy::config::cluster::v3::Cluster::LbSubsetConfig::default_instance();\n  auto selector = subset_config.mutable_subset_selectors()->Add();\n\n  selector->add_keys(\"key1\");\n  selector->add_keys(\"key2\");\n  selector->set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET);\n  selector->add_fallback_keys_subset(\"key3\");\n\n  EXPECT_THROW_WITH_MESSAGE(LoadBalancerSubsetInfoImpl{subset_config}, EnvoyException,\n                            \"fallback_keys_subset must be a subset of selector keys\");\n}\n\nTEST(LoadBalancerSubsetInfoImplTest, KeysSubsetEmptyInvalid) {\n  auto subset_config = envoy::config::cluster::v3::Cluster::LbSubsetConfig::default_instance();\n  auto selector = subset_config.mutable_subset_selectors()->Add();\n\n  selector->add_keys(\"key1\");\n  selector->add_keys(\"key2\");\n  selector->set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET);\n\n  EXPECT_THROW_WITH_MESSAGE(LoadBalancerSubsetInfoImpl{subset_config}, EnvoyException,\n                            \"fallback_keys_subset cannot be empty\");\n}\n\nTEST(LoadBalancerSubsetInfoImplTest, KeysSubsetEqualKeysInvalid) {\n  auto subset_config = envoy::config::cluster::v3::Cluster::LbSubsetConfig::default_instance();\n  auto selector = subset_config.mutable_subset_selectors()->Add();\n\n  selector->add_keys(\"key1\");\n  selector->add_keys(\"key2\");\n  selector->set_fallback_policy(\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET);\n  selector->add_fallback_keys_subset(\"key2\");\n  selector->add_fallback_keys_subset(\"key1\");\n\n  EXPECT_THROW_WITH_MESSAGE(LoadBalancerSubsetInfoImpl{subset_config}, EnvoyException,\n                            \"fallback_keys_subset cannot be equal to keys\");\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/load_balancer_simulation_test.cc",
    "content": "#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nstatic HostSharedPtr newTestHost(Upstream::ClusterInfoConstSharedPtr cluster,\n                                 const std::string& url, uint32_t weight = 1,\n                                 const std::string& zone = \"\") {\n  envoy::config::core::v3::Locality locality;\n  locality.set_zone(zone);\n  return HostSharedPtr{\n      new HostImpl(cluster, \"\", Network::Utility::resolveUrl(url), nullptr, weight, locality,\n                   envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0,\n                   envoy::config::core::v3::UNKNOWN)};\n}\n\n// Simulate weighted LR load balancer.\nTEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) {\n  const uint64_t num_hosts = 4;\n  const uint64_t weighted_subset_percent = 50;\n  const uint64_t weight = 2;          // weighted_subset_percent of hosts will have this weight.\n  const uint64_t active_requests = 3; // weighted_subset_percent will have this active requests.\n\n  PrioritySetImpl priority_set;\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n  HostVector hosts;\n  for (uint64_t i = 0; i < num_hosts; i++) {\n    const bool should_weight = i < num_hosts * (weighted_subset_percent / 100.0);\n    hosts.push_back(makeTestHost(info_, fmt::format(\"tcp://10.0.{}.{}:6379\", i / 256, i % 256),\n                                 should_weight ? weight : 1));\n    if (should_weight) {\n      hosts.back()->stats().rq_active_.set(active_requests);\n    }\n  }\n  HostVectorConstSharedPtr updated_hosts{new HostVector(hosts)};\n  HostsPerLocalitySharedPtr updated_locality_hosts{new HostsPerLocalityImpl(hosts)};\n  priority_set.updateHosts(\n      0,\n      updateHostsParams(updated_hosts, updated_locality_hosts,\n                        std::make_shared<const HealthyHostVector>(*updated_hosts),\n                        updated_locality_hosts),\n      {}, hosts, {}, absl::nullopt);\n\n  Stats::IsolatedStoreImpl stats_store;\n  ClusterStats stats{ClusterInfoImpl::generateStats(stats_store)};\n  stats.max_host_weight_.set(weight);\n  NiceMock<Runtime::MockLoader> runtime;\n  Random::RandomGeneratorImpl random;\n  envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config;\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config;\n  LeastRequestLoadBalancer lb_{\n      priority_set, nullptr, stats, runtime, random, common_config, least_request_lb_config};\n\n  absl::node_hash_map<HostConstSharedPtr, uint64_t> host_hits;\n  const uint64_t total_requests = 100;\n  for (uint64_t i = 0; i < total_requests; i++) {\n    host_hits[lb_.chooseHost(nullptr)]++;\n  }\n\n  absl::node_hash_map<uint64_t, double> weight_to_percent;\n  for (const auto& host : host_hits) {\n    std::cout << fmt::format(\"url:{}, weight:{}, hits:{}, percent_of_total:{}\\n\",\n                             host.first->address()->asString(), host.first->weight(), host.second,\n                             (static_cast<double>(host.second) / total_requests) * 100);\n    weight_to_percent[host.first->weight()] +=\n        (static_cast<double>(host.second) / total_requests) * 100;\n  }\n\n  for (const auto& weight : weight_to_percent) {\n    std::cout << fmt::format(\"weight:{}, percent:{}\\n\", weight.first, weight.second);\n  }\n}\n\n/**\n * This test is for simulation only and should not be run as part of unit tests.\n */\nclass DISABLED_SimulationTest : public testing::Test {\npublic:\n  DISABLED_SimulationTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {\n    ON_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50U))\n        .WillByDefault(Return(50U));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n        .WillByDefault(Return(6));\n  }\n\n  /**\n   * Run simulation with given parameters. Generate statistics on per host requests.\n   *\n   * @param originating_cluster total number of hosts in each zone in originating cluster.\n   * @param all_destination_cluster total number of hosts in each zone in upstream cluster.\n   * @param healthy_destination_cluster total number of healthy hosts in each zone in upstream\n   * cluster.\n   */\n  void run(std::vector<uint32_t> originating_cluster, std::vector<uint32_t> all_destination_cluster,\n           std::vector<uint32_t> healthy_destination_cluster) {\n    local_priority_set_ = new PrioritySetImpl;\n    // TODO(mattklein123): make load balancer per originating cluster host.\n    RandomLoadBalancer lb(priority_set_, local_priority_set_, stats_, runtime_, random_,\n                          common_config_);\n\n    HostsPerLocalitySharedPtr upstream_per_zone_hosts =\n        generateHostsPerZone(healthy_destination_cluster);\n    HostsPerLocalitySharedPtr local_per_zone_hosts = generateHostsPerZone(originating_cluster);\n\n    HostVectorSharedPtr originating_hosts = generateHostList(originating_cluster);\n    HostVectorSharedPtr healthy_destination = generateHostList(healthy_destination_cluster);\n    host_set_.healthy_hosts_ = *healthy_destination;\n    HostVectorSharedPtr all_destination = generateHostList(all_destination_cluster);\n    host_set_.hosts_ = *all_destination;\n\n    std::map<std::string, uint32_t> hits;\n    for (uint32_t i = 0; i < total_number_of_requests; ++i) {\n      HostSharedPtr from_host = selectOriginatingHost(*originating_hosts);\n      uint32_t from_zone = atoi(from_host->locality().zone().c_str());\n\n      // Populate host set for upstream cluster.\n      std::vector<HostVector> per_zone_upstream;\n      per_zone_upstream.push_back(upstream_per_zone_hosts->get()[from_zone]);\n      for (size_t zone = 0; zone < upstream_per_zone_hosts->get().size(); ++zone) {\n        if (zone == from_zone) {\n          continue;\n        }\n\n        per_zone_upstream.push_back(upstream_per_zone_hosts->get()[zone]);\n      }\n      auto per_zone_upstream_shared = makeHostsPerLocality(std::move(per_zone_upstream));\n      host_set_.hosts_per_locality_ = per_zone_upstream_shared;\n      host_set_.healthy_hosts_per_locality_ = per_zone_upstream_shared;\n\n      // Populate host set for originating cluster.\n      std::vector<HostVector> per_zone_local;\n      per_zone_local.push_back(local_per_zone_hosts->get()[from_zone]);\n      for (size_t zone = 0; zone < local_per_zone_hosts->get().size(); ++zone) {\n        if (zone == from_zone) {\n          continue;\n        }\n\n        per_zone_local.push_back(local_per_zone_hosts->get()[zone]);\n      }\n      auto per_zone_local_shared = makeHostsPerLocality(std::move(per_zone_local));\n      local_priority_set_->updateHosts(\n          0,\n          updateHostsParams(originating_hosts, per_zone_local_shared,\n                            std::make_shared<const HealthyHostVector>(*originating_hosts),\n                            per_zone_local_shared),\n          {}, empty_vector_, empty_vector_, absl::nullopt);\n\n      HostConstSharedPtr selected = lb.chooseHost(nullptr);\n      hits[selected->address()->asString()]++;\n    }\n\n    double mean = total_number_of_requests * 1.0 / hits.size();\n    for (const auto& host_hit_num_pair : hits) {\n      double percent_diff = std::abs((mean - host_hit_num_pair.second) / mean) * 100;\n      std::cout << fmt::format(\"url:{}, hits:{}, {} % from mean\", host_hit_num_pair.first,\n                               host_hit_num_pair.second, percent_diff)\n                << std::endl;\n    }\n  }\n\n  HostSharedPtr selectOriginatingHost(const HostVector& hosts) {\n    // Originating cluster should have roughly the same per host request distribution.\n    return hosts[random_.random() % hosts.size()];\n  }\n\n  /**\n   * Generate list of hosts based on number of hosts in the given zone.\n   * @param hosts number of hosts per zone.\n   */\n  HostVectorSharedPtr generateHostList(const std::vector<uint32_t>& hosts) {\n    HostVectorSharedPtr ret(new HostVector());\n    for (size_t i = 0; i < hosts.size(); ++i) {\n      const std::string zone = std::to_string(i);\n      for (uint32_t j = 0; j < hosts[i]; ++j) {\n        const std::string url = fmt::format(\"tcp://host.{}.{}:80\", i, j);\n        ret->push_back(newTestHost(info_, url, 1, zone));\n      }\n    }\n\n    return ret;\n  }\n\n  /**\n   * Generate hosts by zone.\n   * @param hosts number of hosts per zone.\n   */\n  HostsPerLocalitySharedPtr generateHostsPerZone(const std::vector<uint32_t>& hosts) {\n    std::vector<HostVector> ret;\n    for (size_t i = 0; i < hosts.size(); ++i) {\n      const std::string zone = std::to_string(i);\n      HostVector zone_hosts;\n\n      for (uint32_t j = 0; j < hosts[i]; ++j) {\n        const std::string url = fmt::format(\"tcp://host.{}.{}:80\", i, j);\n        zone_hosts.push_back(newTestHost(info_, url, 1, zone));\n      }\n\n      ret.push_back(std::move(zone_hosts));\n    }\n\n    return makeHostsPerLocality(std::move(ret));\n  };\n\n  const uint32_t total_number_of_requests = 1000000;\n  HostVector empty_vector_;\n\n  PrioritySetImpl* local_priority_set_;\n  NiceMock<MockPrioritySet> priority_set_;\n  MockHostSet& host_set_ = *priority_set_.getMockHostSet(0);\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n  NiceMock<Runtime::MockLoader> runtime_;\n  Random::RandomGeneratorImpl random_;\n  Stats::IsolatedStoreImpl stats_store_;\n  ClusterStats stats_;\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n};\n\nTEST_F(DISABLED_SimulationTest, StrictlyEqualDistribution) {\n  run({1U, 1U, 1U}, {3U, 3U, 3U}, {3U, 3U, 3U});\n}\n\nTEST_F(DISABLED_SimulationTest, UnequalZoneDistribution) {\n  run({1U, 1U, 1U}, {2U, 5U, 5U}, {2U, 5U, 5U});\n}\n\nTEST_F(DISABLED_SimulationTest, UnequalZoneDistribution2) {\n  run({1U, 1U, 1U}, {5U, 5U, 6U}, {5U, 5U, 6U});\n}\n\nTEST_F(DISABLED_SimulationTest, UnequalZoneDistribution3) {\n  run({1U, 1U, 1U}, {10U, 10U, 10U}, {10U, 8U, 8U});\n}\n\nTEST_F(DISABLED_SimulationTest, UnequalZoneDistribution4) {\n  run({20U, 20U, 21U}, {4U, 5U, 5U}, {4U, 5U, 5U});\n}\n\nTEST_F(DISABLED_SimulationTest, UnequalZoneDistribution5) {\n  run({3U, 2U, 5U}, {4U, 5U, 5U}, {4U, 5U, 5U});\n}\n\nTEST_F(DISABLED_SimulationTest, UnequalZoneDistribution6) {\n  run({3U, 2U, 5U}, {3U, 4U, 5U}, {3U, 4U, 5U});\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/load_stats_reporter_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/endpoint/v3/load_report.pb.h\"\n#include \"envoy/service/load_stats/v3/lrs.pb.h\"\n\n#include \"common/upstream/load_stats_reporter.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\n// The tests in this file provide just coverage over some corner cases in error handling. The test\n// for the happy path for LoadStatsReporter is provided in //test/integration:load_stats_reporter.\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass LoadStatsReporterTest : public testing::Test {\npublic:\n  LoadStatsReporterTest()\n      : retry_timer_(new Event::MockTimer()), response_timer_(new Event::MockTimer()),\n        async_client_(new Grpc::MockAsyncClient()) {}\n\n  void createLoadStatsReporter() {\n    InSequence s;\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      retry_timer_cb_ = timer_cb;\n      return retry_timer_;\n    }));\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      response_timer_cb_ = timer_cb;\n      return response_timer_;\n    }));\n    load_stats_reporter_ = std::make_unique<LoadStatsReporter>(\n        local_info_, cm_, stats_store_, Grpc::RawAsyncClientPtr(async_client_),\n        envoy::config::core::v3::ApiVersion::AUTO, dispatcher_);\n  }\n\n  void expectSendMessage(\n      const std::vector<envoy::config::endpoint::v3::ClusterStats>& expected_cluster_stats) {\n    envoy::service::load_stats::v3::LoadStatsRequest expected_request;\n    expected_request.mutable_node()->MergeFrom(local_info_.node());\n    expected_request.mutable_node()->add_client_features(\"envoy.lrs.supports_send_all_clusters\");\n    std::copy(expected_cluster_stats.begin(), expected_cluster_stats.end(),\n              Protobuf::RepeatedPtrFieldBackInserter(expected_request.mutable_cluster_stats()));\n    EXPECT_CALL(\n        async_stream_,\n        sendMessageRaw_(Grpc::ProtoBufferEqIgnoreRepeatedFieldOrdering(expected_request), false));\n  }\n\n  void deliverLoadStatsResponse(const std::vector<std::string>& cluster_names) {\n    std::unique_ptr<envoy::service::load_stats::v3::LoadStatsResponse> response(\n        new envoy::service::load_stats::v3::LoadStatsResponse());\n    response->mutable_load_reporting_interval()->set_seconds(42);\n    std::copy(cluster_names.begin(), cluster_names.end(),\n              Protobuf::RepeatedPtrFieldBackInserter(response->mutable_clusters()));\n\n    EXPECT_CALL(*response_timer_, enableTimer(std::chrono::milliseconds(42000), _));\n    load_stats_reporter_->onReceiveMessage(std::move(response));\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  Event::MockDispatcher dispatcher_;\n  Stats::IsolatedStoreImpl stats_store_;\n  std::unique_ptr<LoadStatsReporter> load_stats_reporter_;\n  Event::MockTimer* retry_timer_;\n  Event::TimerCb retry_timer_cb_;\n  Event::MockTimer* response_timer_;\n  Event::TimerCb response_timer_cb_;\n  Grpc::MockAsyncStream async_stream_;\n  Grpc::MockAsyncClient* async_client_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n};\n\n// Validate that stream creation results in a timer based retry.\nTEST_F(LoadStatsReporterTest, StreamCreationFailure) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(nullptr));\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _));\n  createLoadStatsReporter();\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage({});\n  retry_timer_cb_();\n}\n\nTEST_F(LoadStatsReporterTest, TestPubSub) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  createLoadStatsReporter();\n  deliverLoadStatsResponse({\"foo\"});\n\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  EXPECT_CALL(*response_timer_, enableTimer(std::chrono::milliseconds(42000), _));\n  response_timer_cb_();\n\n  deliverLoadStatsResponse({\"bar\"});\n\n  EXPECT_CALL(async_stream_, sendMessageRaw_(_, _));\n  EXPECT_CALL(*response_timer_, enableTimer(std::chrono::milliseconds(42000), _));\n  response_timer_cb_();\n}\n\n// Validate treatment of existing clusters across updates.\nTEST_F(LoadStatsReporterTest, ExistingClusters) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  // Initially, we have no clusters to report on.\n  expectSendMessage({});\n  createLoadStatsReporter();\n  time_system_.setMonotonicTime(std::chrono::microseconds(3));\n  // Start reporting on foo.\n  NiceMock<MockClusterMockPrioritySet> foo_cluster;\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(2);\n  foo_cluster.info_->eds_service_name_ = \"bar\";\n  NiceMock<MockClusterMockPrioritySet> bar_cluster;\n  MockClusterManager::ClusterInfoMap cluster_info{{\"foo\", foo_cluster}, {\"bar\", bar_cluster}};\n  ON_CALL(cm_, clusters()).WillByDefault(Return(cluster_info));\n  deliverLoadStatsResponse({\"foo\"});\n  // Initial stats report for foo on timer tick.\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(5);\n  time_system_.setMonotonicTime(std::chrono::microseconds(4));\n  {\n    envoy::config::endpoint::v3::ClusterStats foo_cluster_stats;\n    foo_cluster_stats.set_cluster_name(\"foo\");\n    foo_cluster_stats.set_cluster_service_name(\"bar\");\n    foo_cluster_stats.set_total_dropped_requests(5);\n    foo_cluster_stats.mutable_load_report_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MicrosecondsToDuration(1));\n    expectSendMessage({foo_cluster_stats});\n  }\n  EXPECT_CALL(*response_timer_, enableTimer(std::chrono::milliseconds(42000), _));\n  response_timer_cb_();\n\n  // Some traffic on foo/bar in between previous request and next response.\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n  bar_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n\n  // Start reporting on bar.\n  time_system_.setMonotonicTime(std::chrono::microseconds(6));\n  deliverLoadStatsResponse({\"foo\", \"bar\"});\n  // Stats report foo/bar on timer tick.\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n  bar_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n  time_system_.setMonotonicTime(std::chrono::microseconds(28));\n  {\n    envoy::config::endpoint::v3::ClusterStats foo_cluster_stats;\n    foo_cluster_stats.set_cluster_name(\"foo\");\n    foo_cluster_stats.set_cluster_service_name(\"bar\");\n    foo_cluster_stats.set_total_dropped_requests(2);\n    foo_cluster_stats.mutable_load_report_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MicrosecondsToDuration(24));\n    envoy::config::endpoint::v3::ClusterStats bar_cluster_stats;\n    bar_cluster_stats.set_cluster_name(\"bar\");\n    bar_cluster_stats.set_total_dropped_requests(1);\n    bar_cluster_stats.mutable_load_report_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MicrosecondsToDuration(22));\n    expectSendMessage({bar_cluster_stats, foo_cluster_stats});\n  }\n  EXPECT_CALL(*response_timer_, enableTimer(std::chrono::milliseconds(42000), _));\n  response_timer_cb_();\n\n  // Some traffic on foo/bar in between previous request and next response.\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n  bar_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n\n  // Stop reporting on foo.\n  deliverLoadStatsResponse({\"bar\"});\n  // Stats report for bar on timer tick.\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(5);\n  bar_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(5);\n  time_system_.setMonotonicTime(std::chrono::microseconds(33));\n  {\n    envoy::config::endpoint::v3::ClusterStats bar_cluster_stats;\n    bar_cluster_stats.set_cluster_name(\"bar\");\n    bar_cluster_stats.set_total_dropped_requests(6);\n    bar_cluster_stats.mutable_load_report_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MicrosecondsToDuration(5));\n    expectSendMessage({bar_cluster_stats});\n  }\n  EXPECT_CALL(*response_timer_, enableTimer(std::chrono::milliseconds(42000), _));\n  response_timer_cb_();\n\n  // Some traffic on foo/bar in between previous request and next response.\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n  bar_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n\n  // Start tracking foo again, we should forget earlier history for foo.\n  time_system_.setMonotonicTime(std::chrono::microseconds(43));\n  deliverLoadStatsResponse({\"foo\", \"bar\"});\n  // Stats report foo/bar on timer tick.\n  foo_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n  bar_cluster.info_->load_report_stats_.upstream_rq_dropped_.add(1);\n  time_system_.setMonotonicTime(std::chrono::microseconds(47));\n  {\n    envoy::config::endpoint::v3::ClusterStats foo_cluster_stats;\n    foo_cluster_stats.set_cluster_name(\"foo\");\n    foo_cluster_stats.set_cluster_service_name(\"bar\");\n    foo_cluster_stats.set_total_dropped_requests(1);\n    foo_cluster_stats.mutable_load_report_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MicrosecondsToDuration(4));\n    envoy::config::endpoint::v3::ClusterStats bar_cluster_stats;\n    bar_cluster_stats.set_cluster_name(\"bar\");\n    bar_cluster_stats.set_total_dropped_requests(2);\n    bar_cluster_stats.mutable_load_report_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MicrosecondsToDuration(14));\n    expectSendMessage({bar_cluster_stats, foo_cluster_stats});\n  }\n  EXPECT_CALL(*response_timer_, enableTimer(std::chrono::milliseconds(42000), _));\n  response_timer_cb_();\n}\n\n// Validate that the client can recover from a remote stream closure via retry.\nTEST_F(LoadStatsReporterTest, RemoteStreamClose) {\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage({});\n  createLoadStatsReporter();\n  EXPECT_CALL(*response_timer_, disableTimer());\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _));\n  load_stats_reporter_->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, \"\");\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_));\n  expectSendMessage({});\n  retry_timer_cb_();\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/logical_dns_cluster_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n#include <tuple>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/logical_dns_cluster.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass LogicalDnsClusterTest : public testing::Test {\nprotected:\n  LogicalDnsClusterTest() : api_(Api::createApiForTest(stats_store_, random_)) {}\n\n  void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) {\n    resolve_timer_ = new Event::MockTimer(&dispatcher_);\n    NiceMock<MockClusterManager> cm;\n    envoy::config::cluster::v3::Cluster cluster_config =\n        parseClusterFromV3Yaml(yaml, avoid_boosting);\n    Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format(\n        \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                              : cluster_config.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    cluster_ = std::make_shared<LogicalDnsCluster>(cluster_config, runtime_, dns_resolver_,\n                                                   factory_context, std::move(scope), false);\n    cluster_->prioritySet().addPriorityUpdateCb(\n        [&](uint32_t, const HostVector&, const HostVector&) -> void {\n          membership_updated_.ready();\n        });\n    cluster_->initialize([&]() -> void { initialized_.ready(); });\n  }\n\n  void expectResolve(Network::DnsLookupFamily dns_lookup_family,\n                     const std::string& expected_address) {\n    EXPECT_CALL(*dns_resolver_, resolve(expected_address, dns_lookup_family, _))\n        .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                             Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n          dns_callback_ = cb;\n          return &active_dns_query_;\n        }));\n  }\n\n  void testBasicSetup(const std::string& config, const std::string& expected_address,\n                      uint32_t expected_port, uint32_t expected_hc_port) {\n    expectResolve(Network::DnsLookupFamily::V4Only, expected_address);\n    setupFromV3Yaml(config);\n\n    EXPECT_CALL(membership_updated_, ready());\n    EXPECT_CALL(initialized_, ready());\n    EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n    dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                  TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n\n    EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n    EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n    EXPECT_EQ(1UL,\n              cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n    EXPECT_EQ(\n        1UL,\n        cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n    EXPECT_EQ(cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0],\n              cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts()[0]);\n    HostSharedPtr logical_host = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0];\n\n    EXPECT_EQ(\"127.0.0.1:\" + std::to_string(expected_hc_port),\n              logical_host->healthCheckAddress()->asString());\n    EXPECT_EQ(\"127.0.0.1:\" + std::to_string(expected_port), logical_host->address()->asString());\n\n    EXPECT_CALL(dispatcher_,\n                createClientConnection_(\n                    PointeesEq(Network::Utility::resolveUrl(\"tcp://127.0.0.1:443\")), _, _, _))\n        .WillOnce(Return(new NiceMock<Network::MockClientConnection>()));\n    logical_host->createConnection(dispatcher_, nullptr, nullptr);\n    logical_host->outlierDetector().putHttpResponseCode(200);\n\n    expectResolve(Network::DnsLookupFamily::V4Only, expected_address);\n    resolve_timer_->invokeCallback();\n\n    // Should not cause any changes.\n    EXPECT_CALL(*resolve_timer_, enableTimer(_, _));\n    dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                  TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\", \"127.0.0.3\"}));\n\n    EXPECT_EQ(\"127.0.0.1:\" + std::to_string(expected_hc_port),\n              logical_host->healthCheckAddress()->asString());\n    EXPECT_EQ(\"127.0.0.1:\" + std::to_string(expected_port), logical_host->address()->asString());\n\n    EXPECT_EQ(logical_host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n    EXPECT_CALL(dispatcher_,\n                createClientConnection_(\n                    PointeesEq(Network::Utility::resolveUrl(\"tcp://127.0.0.1:443\")), _, _, _))\n        .WillOnce(Return(new NiceMock<Network::MockClientConnection>()));\n    Host::CreateConnectionData data = logical_host->createConnection(dispatcher_, nullptr, nullptr);\n    EXPECT_FALSE(data.host_description_->canary());\n    EXPECT_EQ(&cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->cluster(),\n              &data.host_description_->cluster());\n    EXPECT_EQ(&cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->stats(),\n              &data.host_description_->stats());\n    EXPECT_EQ(\"127.0.0.1:443\", data.host_description_->address()->asString());\n    EXPECT_EQ(\"\", data.host_description_->locality().region());\n    EXPECT_EQ(\"\", data.host_description_->locality().zone());\n    EXPECT_EQ(\"\", data.host_description_->locality().sub_zone());\n    EXPECT_EQ(\"foo.bar.com\", data.host_description_->hostname());\n    EXPECT_TRUE(TestUtility::protoEqual(envoy::config::core::v3::Metadata::default_instance(),\n                                        *data.host_description_->metadata()));\n    data.host_description_->outlierDetector().putHttpResponseCode(200);\n    data.host_description_->healthChecker().setUnhealthy();\n\n    expectResolve(Network::DnsLookupFamily::V4Only, expected_address);\n    resolve_timer_->invokeCallback();\n\n    // Should cause a change.\n    EXPECT_CALL(*resolve_timer_, enableTimer(_, _));\n    dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                  TestUtility::makeDnsResponse({\"127.0.0.3\", \"127.0.0.1\", \"127.0.0.2\"}));\n\n    EXPECT_EQ(\"127.0.0.3:\" + std::to_string(expected_hc_port),\n              logical_host->healthCheckAddress()->asString());\n    EXPECT_EQ(\"127.0.0.3:\" + std::to_string(expected_port), logical_host->address()->asString());\n\n    EXPECT_EQ(logical_host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n    EXPECT_CALL(dispatcher_,\n                createClientConnection_(\n                    PointeesEq(Network::Utility::resolveUrl(\"tcp://127.0.0.3:443\")), _, _, _))\n        .WillOnce(Return(new NiceMock<Network::MockClientConnection>()));\n    logical_host->createConnection(dispatcher_, nullptr, nullptr);\n\n    expectResolve(Network::DnsLookupFamily::V4Only, expected_address);\n    resolve_timer_->invokeCallback();\n\n    // Failure should not cause any change.\n    ON_CALL(random_, random()).WillByDefault(Return(6000));\n    EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(6000), _));\n    dns_callback_(Network::DnsResolver::ResolutionStatus::Failure, {});\n\n    EXPECT_EQ(logical_host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n    EXPECT_CALL(dispatcher_,\n                createClientConnection_(\n                    PointeesEq(Network::Utility::resolveUrl(\"tcp://127.0.0.3:443\")), _, _, _))\n        .WillOnce(Return(new NiceMock<Network::MockClientConnection>()));\n    logical_host->createConnection(dispatcher_, nullptr, nullptr);\n\n    // Empty Success should not cause any change.\n    ON_CALL(random_, random()).WillByDefault(Return(6000));\n    EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(6000), _));\n    dns_callback_(Network::DnsResolver::ResolutionStatus::Success, {});\n\n    EXPECT_EQ(logical_host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n    EXPECT_CALL(dispatcher_,\n                createClientConnection_(\n                    PointeesEq(Network::Utility::resolveUrl(\"tcp://127.0.0.3:443\")), _, _, _))\n        .WillOnce(Return(new NiceMock<Network::MockClientConnection>()));\n    logical_host->createConnection(dispatcher_, nullptr, nullptr);\n\n    // Make sure we cancel.\n    EXPECT_CALL(active_dns_query_, cancel());\n    expectResolve(Network::DnsLookupFamily::V4Only, expected_address);\n    resolve_timer_->invokeCallback();\n\n    tls_.shutdownThread();\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Ssl::MockContextManager ssl_context_manager_;\n  std::shared_ptr<NiceMock<Network::MockDnsResolver>> dns_resolver_{\n      new NiceMock<Network::MockDnsResolver>};\n  Network::MockActiveDnsQuery active_dns_query_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Network::DnsResolver::ResolveCb dns_callback_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Event::MockTimer* resolve_timer_;\n  std::shared_ptr<LogicalDnsCluster> cluster_;\n  ReadyWatcher membership_updated_;\n  ReadyWatcher initialized_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n};\n\nusing LogicalDnsConfigTuple =\n    std::tuple<std::string, Network::DnsLookupFamily, std::list<std::string>>;\nstd::vector<LogicalDnsConfigTuple> generateLogicalDnsParams() {\n  std::vector<LogicalDnsConfigTuple> dns_config;\n  {\n    std::string family_yaml(\"\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::Auto);\n    std::list<std::string> dns_response{\"127.0.0.1\", \"127.0.0.2\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: v4_only\n                            )EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::V4Only);\n    std::list<std::string> dns_response{\"127.0.0.1\", \"127.0.0.2\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: v6_only\n                            )EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::V6Only);\n    std::list<std::string> dns_response{\"::1\", \"::2\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: auto\n                            )EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::Auto);\n    std::list<std::string> dns_response{\"::1\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  return dns_config;\n}\n\nclass LogicalDnsParamTest : public LogicalDnsClusterTest,\n                            public testing::WithParamInterface<LogicalDnsConfigTuple> {};\n\nINSTANTIATE_TEST_SUITE_P(DnsParam, LogicalDnsParamTest,\n                         testing::ValuesIn(generateLogicalDnsParams()));\n\n// Validate that if the DNS resolves immediately, during the LogicalDnsCluster\n// constructor, we have the expected host state and initialization callback\n// invocation.\nTEST_P(LogicalDnsParamTest, ImmediateResolve) {\n  const std::string yaml = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  type: logical_dns\n  lb_policy: round_robin\n  )EOF\" + std::get<0>(GetParam()) +\n                           R\"EOF(\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n  )EOF\";\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*dns_resolver_, resolve(\"foo.bar.com\", std::get<1>(GetParam()), _))\n      .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                           Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n        EXPECT_CALL(*resolve_timer_, enableTimer(_, _));\n        cb(Network::DnsResolver::ResolutionStatus::Success,\n           TestUtility::makeDnsResponse(std::get<2>(GetParam())));\n        return nullptr;\n      }));\n  setupFromV3Yaml(yaml);\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(\"foo.bar.com\",\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthChecker().setUnhealthy();\n  tls_.shutdownThread();\n}\n\nTEST_F(LogicalDnsParamTest, FailureRefreshRateBackoffResetsWhenSuccessHappens) {\n  const std::string yaml = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  dns_failure_refresh_rate:\n    base_interval: 7s\n    max_interval: 10s\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set\n  # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config.\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n  )EOF\";\n\n  expectResolve(Network::DnsLookupFamily::V4Only, \"foo.bar.com\");\n  setupFromV3Yaml(yaml);\n\n  // Failing response kicks the failure refresh backoff strategy.\n  ON_CALL(random_, random()).WillByDefault(Return(8000));\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  dns_callback_(Network::DnsResolver::ResolutionStatus::Failure, {});\n\n  // Successful call should reset the failure backoff strategy.\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n\n  // Therefore, a subsequent failure should get a [0,base * 1] refresh.\n  ON_CALL(random_, random()).WillByDefault(Return(8000));\n  EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  dns_callback_(Network::DnsResolver::ResolutionStatus::Failure, {});\n\n  tls_.shutdownThread();\n}\n\nTEST_F(LogicalDnsParamTest, TtlAsDnsRefreshRate) {\n  const std::string yaml = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  respect_dns_ttl: true\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set\n  # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config.\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                     address: foo.bar.com\n                     port_value: 443\n  )EOF\";\n\n  expectResolve(Network::DnsLookupFamily::V4Only, \"foo.bar.com\");\n  setupFromV3Yaml(yaml);\n\n  // TTL is recorded when the DNS response is successful and not empty\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(5000), _));\n  dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}, std::chrono::seconds(5)));\n\n  // If the response is successful but empty, the cluster uses the cluster configured refresh rate.\n  EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                TestUtility::makeDnsResponse({}, std::chrono::seconds(5)));\n\n  // On failure, the cluster uses the cluster configured refresh rate.\n  EXPECT_CALL(*resolve_timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  dns_callback_(Network::DnsResolver::ResolutionStatus::Failure,\n                TestUtility::makeDnsResponse({}, std::chrono::seconds(5)));\n\n  tls_.shutdownThread();\n}\n\nTEST_F(LogicalDnsClusterTest, BadConfig) {\n  const std::string multiple_hosts_yaml = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  load_assignment:\n        cluster_name: name\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443                     \n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo2.bar.com\n                    port_value: 443\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      setupFromV3Yaml(multiple_hosts_yaml), EnvoyException,\n      \"LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint\");\n\n  const std::string multiple_lb_endpoints_yaml = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n    cluster_name: name\n    endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: foo.bar.com\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n        - endpoint:\n            address:\n              socket_address:\n                address: hello.world.com\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      setupFromV3Yaml(multiple_lb_endpoints_yaml), EnvoyException,\n      \"LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint\");\n\n  const std::string multiple_endpoints_yaml = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n    cluster_name: name\n    endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: foo.bar.com\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: hello.world.com\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      setupFromV3Yaml(multiple_endpoints_yaml), EnvoyException,\n      \"LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint\");\n\n  const std::string custom_resolver_yaml = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n    cluster_name: name\n    endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: hello.world.com\n                port_value: 443\n                resolver_name: customresolver\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(setupFromV3Yaml(custom_resolver_yaml), EnvoyException,\n                            \"LOGICAL_DNS clusters must NOT have a custom resolver name set\");\n}\n\nTEST_F(LogicalDnsClusterTest, Basic) {\n  const std::string basic_yaml_hosts = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  dns_failure_refresh_rate:\n    base_interval: 7s\n    max_interval: 10s\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set\n  # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config.\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n  )EOF\";\n\n  const std::string basic_yaml_load_assignment = R\"EOF(\n  name: name\n  type: LOGICAL_DNS\n  dns_refresh_rate: 4s\n  dns_failure_refresh_rate:\n    base_interval: 7s\n    max_interval: 10s\n  connect_timeout: 0.25s\n  lb_policy: ROUND_ROBIN\n  # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set\n  # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config.\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n    cluster_name: name\n    endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: foo.bar.com\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  testBasicSetup(basic_yaml_hosts, \"foo.bar.com\", 443, 443);\n  // Expect to override the health check address port value.\n  testBasicSetup(basic_yaml_load_assignment, \"foo.bar.com\", 443, 8000);\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/maglev_lb_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/upstream/maglev_lb.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass TestLoadBalancerContext : public LoadBalancerContextBase {\npublic:\n  using HostPredicate = std::function<bool(const Host&)>;\n\n  TestLoadBalancerContext(uint64_t hash_key)\n      : TestLoadBalancerContext(hash_key, 0, [](const Host&) { return false; }) {}\n  TestLoadBalancerContext(uint64_t hash_key, uint32_t retry_count,\n                          HostPredicate should_select_another_host)\n      : hash_key_(hash_key), retry_count_(retry_count),\n        should_select_another_host_(should_select_another_host) {}\n\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override { return hash_key_; }\n  uint32_t hostSelectionRetryCount() const override { return retry_count_; };\n  bool shouldSelectAnotherHost(const Host& host) override {\n    return should_select_another_host_(host);\n  }\n\n  absl::optional<uint64_t> hash_key_;\n  uint32_t retry_count_;\n  HostPredicate should_select_another_host_;\n};\n\n// Note: ThreadAwareLoadBalancer base is heavily tested by RingHashLoadBalancerTest. Only basic\n//       functionality is covered here.\nclass MaglevLoadBalancerTest : public testing::Test {\npublic:\n  MaglevLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {}\n\n  void createLb() {\n    lb_ = std::make_unique<MaglevLoadBalancer>(priority_set_, stats_, stats_store_, runtime_,\n                                               random_, config_, common_config_);\n  }\n\n  void init(uint64_t table_size) {\n    config_ = envoy::config::cluster::v3::Cluster::MaglevLbConfig();\n    config_.value().mutable_table_size()->set_value(table_size);\n\n    createLb();\n    lb_->initialize();\n  }\n\n  NiceMock<MockPrioritySet> priority_set_;\n  MockHostSet& host_set_ = *priority_set_.getMockHostSet(0);\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n  Stats::IsolatedStoreImpl stats_store_;\n  ClusterStats stats_;\n  absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig> config_;\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  std::unique_ptr<MaglevLoadBalancer> lb_;\n};\n\n// Works correctly without any hosts.\nTEST_F(MaglevLoadBalancerTest, NoHost) {\n  init(7);\n  EXPECT_EQ(nullptr, lb_->factory()->create()->chooseHost(nullptr));\n};\n\n// Throws an exception if table size is not a prime number.\nTEST_F(MaglevLoadBalancerTest, NoPrimeNumber) {\n  EXPECT_THROW_WITH_MESSAGE(init(8), EnvoyException,\n                            \"The table size of maglev must be prime number\");\n};\n\n// Check it has default table size if config is null or table size has invalid value.\nTEST_F(MaglevLoadBalancerTest, DefaultMaglevTableSize) {\n  const uint64_t defaultValue = MaglevTable::DefaultTableSize;\n\n  config_ = envoy::config::cluster::v3::Cluster::MaglevLbConfig();\n  createLb();\n  EXPECT_EQ(defaultValue, lb_->tableSize());\n\n  config_ = absl::nullopt;\n  createLb();\n  EXPECT_EQ(defaultValue, lb_->tableSize());\n};\n\n// Basic sanity tests.\nTEST_F(MaglevLoadBalancerTest, Basic) {\n  host_set_.hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\"), makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\"), makeTestHost(info_, \"tcp://127.0.0.1:93\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:94\"), makeTestHost(info_, \"tcp://127.0.0.1:95\")};\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks({}, {});\n  init(7);\n\n  EXPECT_EQ(\"maglev_lb.min_entries_per_host\", lb_->stats().min_entries_per_host_.name());\n  EXPECT_EQ(\"maglev_lb.max_entries_per_host\", lb_->stats().max_entries_per_host_.name());\n  EXPECT_EQ(1, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_entries_per_host_.value());\n\n  // maglev: i=0 host=127.0.0.1:92\n  // maglev: i=1 host=127.0.0.1:94\n  // maglev: i=2 host=127.0.0.1:90\n  // maglev: i=3 host=127.0.0.1:91\n  // maglev: i=4 host=127.0.0.1:95\n  // maglev: i=5 host=127.0.0.1:90\n  // maglev: i=6 host=127.0.0.1:93\n  LoadBalancerPtr lb = lb_->factory()->create();\n  const std::vector<uint32_t> expected_assignments{2, 4, 0, 1, 5, 0, 3};\n  for (uint32_t i = 0; i < 3 * expected_assignments.size(); ++i) {\n    TestLoadBalancerContext context(i);\n    EXPECT_EQ(host_set_.hosts_[expected_assignments[i % expected_assignments.size()]],\n              lb->chooseHost(&context));\n  }\n}\n\n// Basic with hostname.\nTEST_F(MaglevLoadBalancerTest, BasicWithHostName) {\n  host_set_.hosts_ = {makeTestHost(info_, \"90\", \"tcp://127.0.0.1:90\"),\n                      makeTestHost(info_, \"91\", \"tcp://127.0.0.1:91\"),\n                      makeTestHost(info_, \"92\", \"tcp://127.0.0.1:92\"),\n                      makeTestHost(info_, \"93\", \"tcp://127.0.0.1:93\"),\n                      makeTestHost(info_, \"94\", \"tcp://127.0.0.1:94\"),\n                      makeTestHost(info_, \"95\", \"tcp://127.0.0.1:95\")};\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks({}, {});\n  common_config_ = envoy::config::cluster::v3::Cluster::CommonLbConfig();\n  auto chc = envoy::config::cluster::v3::Cluster::CommonLbConfig::ConsistentHashingLbConfig();\n  chc.set_use_hostname_for_hashing(true);\n  common_config_.set_allocated_consistent_hashing_lb_config(&chc);\n  init(7);\n  common_config_.release_consistent_hashing_lb_config();\n\n  EXPECT_EQ(\"maglev_lb.min_entries_per_host\", lb_->stats().min_entries_per_host_.name());\n  EXPECT_EQ(\"maglev_lb.max_entries_per_host\", lb_->stats().max_entries_per_host_.name());\n  EXPECT_EQ(1, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_entries_per_host_.value());\n\n  // maglev: i=0 host=92\n  // maglev: i=1 host=95\n  // maglev: i=2 host=90\n  // maglev: i=3 host=93\n  // maglev: i=4 host=94\n  // maglev: i=5 host=91\n  // maglev: i=6 host=90\n  LoadBalancerPtr lb = lb_->factory()->create();\n  const std::vector<uint32_t> expected_assignments{2, 5, 0, 3, 4, 1, 0};\n  for (uint32_t i = 0; i < 3 * expected_assignments.size(); ++i) {\n    TestLoadBalancerContext context(i);\n    EXPECT_EQ(host_set_.hosts_[expected_assignments[i % expected_assignments.size()]],\n              lb->chooseHost(&context));\n  }\n}\n\n// Same ring as the Basic test, but exercise retry host predicate behavior.\nTEST_F(MaglevLoadBalancerTest, BasicWithRetryHostPredicate) {\n  host_set_.hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\"), makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\"), makeTestHost(info_, \"tcp://127.0.0.1:93\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:94\"), makeTestHost(info_, \"tcp://127.0.0.1:95\")};\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks({}, {});\n  init(7);\n\n  EXPECT_EQ(\"maglev_lb.min_entries_per_host\", lb_->stats().min_entries_per_host_.name());\n  EXPECT_EQ(\"maglev_lb.max_entries_per_host\", lb_->stats().max_entries_per_host_.name());\n  EXPECT_EQ(1, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_entries_per_host_.value());\n\n  // maglev: i=0 host=127.0.0.1:92\n  // maglev: i=1 host=127.0.0.1:94\n  // maglev: i=2 host=127.0.0.1:90\n  // maglev: i=3 host=127.0.0.1:91\n  // maglev: i=4 host=127.0.0.1:95\n  // maglev: i=5 host=127.0.0.1:90\n  // maglev: i=6 host=127.0.0.1:93\n  LoadBalancerPtr lb = lb_->factory()->create();\n  {\n    // Confirm that i=3 is selected by the hash.\n    TestLoadBalancerContext context(10);\n    EXPECT_EQ(host_set_.hosts_[1], lb->chooseHost(&context));\n  }\n  {\n    // First attempt succeeds even when retry count is > 0.\n    TestLoadBalancerContext context(10, 2, [](const Host&) { return false; });\n    EXPECT_EQ(host_set_.hosts_[1], lb->chooseHost(&context));\n  }\n  {\n    // Second attempt chooses a different host in the ring.\n    TestLoadBalancerContext context(\n        10, 2, [&](const Host& host) { return &host == host_set_.hosts_[1].get(); });\n    EXPECT_EQ(host_set_.hosts_[0], lb->chooseHost(&context));\n  }\n  {\n    // Exhausted retries return the last checked host.\n    TestLoadBalancerContext context(10, 2, [](const Host&) { return true; });\n    EXPECT_EQ(host_set_.hosts_[5], lb->chooseHost(&context));\n  }\n}\n\n// Weighted sanity test.\nTEST_F(MaglevLoadBalancerTest, Weighted) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\", 1),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\", 2)};\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks({}, {});\n  init(17);\n  EXPECT_EQ(6, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(11, lb_->stats().max_entries_per_host_.value());\n\n  // maglev: i=0 host=127.0.0.1:91\n  // maglev: i=1 host=127.0.0.1:90\n  // maglev: i=2 host=127.0.0.1:90\n  // maglev: i=3 host=127.0.0.1:91\n  // maglev: i=4 host=127.0.0.1:90\n  // maglev: i=5 host=127.0.0.1:91\n  // maglev: i=6 host=127.0.0.1:91\n  // maglev: i=7 host=127.0.0.1:90\n  // maglev: i=8 host=127.0.0.1:91\n  // maglev: i=9 host=127.0.0.1:91\n  // maglev: i=10 host=127.0.0.1:91\n  // maglev: i=11 host=127.0.0.1:91\n  // maglev: i=12 host=127.0.0.1:91\n  // maglev: i=13 host=127.0.0.1:90\n  // maglev: i=14 host=127.0.0.1:91\n  // maglev: i=15 host=127.0.0.1:90\n  // maglev: i=16 host=127.0.0.1:91\n  LoadBalancerPtr lb = lb_->factory()->create();\n  const std::vector<uint32_t> expected_assignments{1, 0, 0, 1, 0, 1, 1, 0, 1,\n                                                   1, 1, 1, 1, 0, 1, 0, 1};\n  for (uint32_t i = 0; i < 3 * expected_assignments.size(); ++i) {\n    TestLoadBalancerContext context(i);\n    EXPECT_EQ(host_set_.hosts_[expected_assignments[i % expected_assignments.size()]],\n              lb->chooseHost(&context));\n  }\n}\n\n// Locality weighted sanity test when localities have the same weights. Host weights for hosts in\n// different localities shouldn't matter.\nTEST_F(MaglevLoadBalancerTest, LocalityWeightedSameLocalityWeights) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\", 1),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\", 2)};\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.hosts_per_locality_ =\n      makeHostsPerLocality({{host_set_.hosts_[0]}, {host_set_.hosts_[1]}});\n  host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_;\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1}};\n  host_set_.locality_weights_ = locality_weights;\n  host_set_.runCallbacks({}, {});\n  init(17);\n  EXPECT_EQ(8, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(9, lb_->stats().max_entries_per_host_.value());\n\n  // maglev: i=0 host=127.0.0.1:91\n  // maglev: i=1 host=127.0.0.1:90\n  // maglev: i=2 host=127.0.0.1:90\n  // maglev: i=3 host=127.0.0.1:91\n  // maglev: i=4 host=127.0.0.1:90\n  // maglev: i=5 host=127.0.0.1:91\n  // maglev: i=6 host=127.0.0.1:91\n  // maglev: i=7 host=127.0.0.1:90\n  // maglev: i=8 host=127.0.0.1:90\n  // maglev: i=9 host=127.0.0.1:91\n  // maglev: i=10 host=127.0.0.1:90\n  // maglev: i=11 host=127.0.0.1:91\n  // maglev: i=12 host=127.0.0.1:90\n  // maglev: i=13 host=127.0.0.1:90\n  // maglev: i=14 host=127.0.0.1:91\n  // maglev: i=15 host=127.0.0.1:90\n  // maglev: i=16 host=127.0.0.1:91\n  LoadBalancerPtr lb = lb_->factory()->create();\n  const std::vector<uint32_t> expected_assignments{1, 0, 0, 1, 0, 1, 1, 0, 0,\n                                                   1, 0, 1, 0, 0, 1, 0, 1};\n  for (uint32_t i = 0; i < 3 * expected_assignments.size(); ++i) {\n    TestLoadBalancerContext context(i);\n    EXPECT_EQ(host_set_.hosts_[expected_assignments[i % expected_assignments.size()]],\n              lb->chooseHost(&context));\n  }\n}\n\n// Locality weighted sanity test when localities have different weights. Host weights for hosts in\n// different localities shouldn't matter.\nTEST_F(MaglevLoadBalancerTest, LocalityWeightedDifferentLocalityWeights) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\", 1),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\", 2),\n                      makeTestHost(info_, \"tcp://127.0.0.1:92\", 3)};\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.hosts_per_locality_ =\n      makeHostsPerLocality({{host_set_.hosts_[0]}, {host_set_.hosts_[2]}, {host_set_.hosts_[1]}});\n  host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_;\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{8, 0, 2}};\n  host_set_.locality_weights_ = locality_weights;\n  host_set_.runCallbacks({}, {});\n  init(17);\n  EXPECT_EQ(4, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(13, lb_->stats().max_entries_per_host_.value());\n\n  // maglev: i=0 host=127.0.0.1:91\n  // maglev: i=1 host=127.0.0.1:90\n  // maglev: i=2 host=127.0.0.1:90\n  // maglev: i=3 host=127.0.0.1:90\n  // maglev: i=4 host=127.0.0.1:90\n  // maglev: i=5 host=127.0.0.1:90\n  // maglev: i=6 host=127.0.0.1:91\n  // maglev: i=7 host=127.0.0.1:90\n  // maglev: i=8 host=127.0.0.1:90\n  // maglev: i=9 host=127.0.0.1:91\n  // maglev: i=10 host=127.0.0.1:90\n  // maglev: i=11 host=127.0.0.1:91\n  // maglev: i=12 host=127.0.0.1:90\n  // maglev: i=13 host=127.0.0.1:90\n  // maglev: i=14 host=127.0.0.1:90\n  // maglev: i=15 host=127.0.0.1:90\n  // maglev: i=16 host=127.0.0.1:90\n  LoadBalancerPtr lb = lb_->factory()->create();\n  const std::vector<uint32_t> expected_assignments{1, 0, 0, 0, 0, 0, 1, 0, 0,\n                                                   1, 0, 1, 0, 0, 0, 0, 0};\n  for (uint32_t i = 0; i < 3 * expected_assignments.size(); ++i) {\n    TestLoadBalancerContext context(i);\n    EXPECT_EQ(host_set_.hosts_[expected_assignments[i % expected_assignments.size()]],\n              lb->chooseHost(&context));\n  }\n}\n\n// Locality weighted with all localities zero weighted.\nTEST_F(MaglevLoadBalancerTest, LocalityWeightedAllZeroLocalityWeights) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\", 1)};\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.hosts_per_locality_ = makeHostsPerLocality({{host_set_.hosts_[0]}});\n  host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_;\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{0}};\n  host_set_.locality_weights_ = locality_weights;\n  host_set_.runCallbacks({}, {});\n  init(17);\n  LoadBalancerPtr lb = lb_->factory()->create();\n  TestLoadBalancerContext context(0);\n  EXPECT_EQ(nullptr, lb->chooseHost(&context));\n}\n\n// Validate that when we are in global panic and have localities, we get sane\n// results (fall back to non-healthy hosts).\nTEST_F(MaglevLoadBalancerTest, LocalityWeightedGlobalPanic) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\", 1),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\", 2)};\n  host_set_.healthy_hosts_ = {};\n  host_set_.hosts_per_locality_ =\n      makeHostsPerLocality({{host_set_.hosts_[0]}, {host_set_.hosts_[1]}});\n  host_set_.healthy_hosts_per_locality_ = makeHostsPerLocality({{}, {}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1}};\n  host_set_.locality_weights_ = locality_weights;\n  host_set_.runCallbacks({}, {});\n  init(17);\n  EXPECT_EQ(8, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(9, lb_->stats().max_entries_per_host_.value());\n\n  // maglev: i=0 host=127.0.0.1:91\n  // maglev: i=1 host=127.0.0.1:90\n  // maglev: i=2 host=127.0.0.1:90\n  // maglev: i=3 host=127.0.0.1:91\n  // maglev: i=4 host=127.0.0.1:90\n  // maglev: i=5 host=127.0.0.1:91\n  // maglev: i=6 host=127.0.0.1:91\n  // maglev: i=7 host=127.0.0.1:90\n  // maglev: i=8 host=127.0.0.1:90\n  // maglev: i=9 host=127.0.0.1:91\n  // maglev: i=10 host=127.0.0.1:90\n  // maglev: i=11 host=127.0.0.1:91\n  // maglev: i=12 host=127.0.0.1:90\n  // maglev: i=13 host=127.0.0.1:90\n  // maglev: i=14 host=127.0.0.1:91\n  // maglev: i=15 host=127.0.0.1:90\n  // maglev: i=16 host=127.0.0.1:91\n  LoadBalancerPtr lb = lb_->factory()->create();\n  const std::vector<uint32_t> expected_assignments{1, 0, 0, 1, 0, 1, 1, 0, 0,\n                                                   1, 0, 1, 0, 0, 1, 0, 1};\n  for (uint32_t i = 0; i < 3 * expected_assignments.size(); ++i) {\n    TestLoadBalancerContext context(i);\n    EXPECT_EQ(host_set_.hosts_[expected_assignments[i % expected_assignments.size()]],\n              lb->chooseHost(&context));\n  }\n}\n\n// Given extremely lopsided locality weights, and a table that isn't large enough to fit all hosts,\n// expect that the least-weighted hosts appear once, and the most-weighted host fills the remainder.\nTEST_F(MaglevLoadBalancerTest, LocalityWeightedLopsided) {\n  host_set_.hosts_.clear();\n  HostVector heavy_but_sparse, light_but_dense;\n  for (uint32_t i = 0; i < 1024; ++i) {\n    auto host(makeTestHost(info_, fmt::format(\"tcp://127.0.0.1:{}\", i)));\n    host_set_.hosts_.push_back(host);\n    (i == 0 ? heavy_but_sparse : light_but_dense).push_back(host);\n  }\n  host_set_.healthy_hosts_ = {};\n  host_set_.hosts_per_locality_ = makeHostsPerLocality({heavy_but_sparse, light_but_dense});\n  host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_;\n  host_set_.locality_weights_ = makeLocalityWeights({127, 1});\n  host_set_.runCallbacks({}, {});\n  init(MaglevTable::DefaultTableSize);\n  EXPECT_EQ(1, lb_->stats().min_entries_per_host_.value());\n  EXPECT_EQ(MaglevTable::DefaultTableSize - 1023, lb_->stats().max_entries_per_host_.value());\n\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // Populate a histogram of the number of table entries for each host...\n  uint32_t counts[1024] = {0};\n  for (uint32_t i = 0; i < MaglevTable::DefaultTableSize; ++i) {\n    TestLoadBalancerContext context(i);\n    uint32_t port = lb->chooseHost(&context)->address()->ip()->port();\n    ++counts[port];\n  }\n\n  // Each of the light_but_dense hosts should appear in the table once.\n  for (uint32_t i = 1; i < 1024; ++i) {\n    EXPECT_EQ(1, counts[i]);\n  }\n\n  // The heavy_but_sparse host should occupy the remainder of the table.\n  EXPECT_EQ(MaglevTable::DefaultTableSize - 1023, counts[0]);\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/original_dst_cluster_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n#include <tuple>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/original_dst_cluster.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass TestLoadBalancerContext : public LoadBalancerContextBase {\npublic:\n  TestLoadBalancerContext(const Network::Connection* connection) : connection_(connection) {}\n  TestLoadBalancerContext(const Network::Connection* connection, const std::string& key,\n                          const std::string& value)\n      : connection_(connection) {\n    downstream_headers_ =\n        Http::RequestHeaderMapPtr{new Http::TestRequestHeaderMapImpl{{key, value}}};\n  }\n\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override { return 0; }\n  const Network::Connection* downstreamConnection() const override { return connection_; }\n  const Http::RequestHeaderMap* downstreamHeaders() const override {\n    return downstream_headers_.get();\n  }\n\n  absl::optional<uint64_t> hash_key_;\n  const Network::Connection* connection_;\n  Http::RequestHeaderMapPtr downstream_headers_;\n};\n\nclass OriginalDstClusterTest : public testing::Test {\npublic:\n  // cleanup timer must be created before the cluster (in setup()), so that we can set expectations\n  // on it. Ownership is transferred to the cluster at the cluster constructor, so the cluster will\n  // take care of destructing it!\n  OriginalDstClusterTest()\n      : cleanup_timer_(new Event::MockTimer(&dispatcher_)),\n        api_(Api::createApiForTest(stats_store_)) {}\n\n  void setupFromYaml(const std::string& yaml, bool avoid_boosting = true) {\n    setup(parseClusterFromV3Yaml(yaml, avoid_boosting));\n  }\n\n  void setup(const envoy::config::cluster::v3::Cluster& cluster_config) {\n    NiceMock<MockClusterManager> cm;\n    Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format(\n        \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                              : cluster_config.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    cluster_ = std::make_shared<OriginalDstCluster>(cluster_config, runtime_, factory_context,\n                                                    std::move(scope), false);\n    cluster_->prioritySet().addPriorityUpdateCb(\n        [&](uint32_t, const HostVector&, const HostVector&) -> void {\n          membership_updated_.ready();\n        });\n    cluster_->initialize([&]() -> void { initialized_.ready(); });\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Ssl::MockContextManager ssl_context_manager_;\n  OriginalDstClusterSharedPtr cluster_;\n  ReadyWatcher membership_updated_;\n  ReadyWatcher initialized_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Event::MockTimer* cleanup_timer_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n};\n\nTEST(OriginalDstClusterConfigTest, GoodConfig) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: original_dst\n    lb_policy: cluster_provided\n    cleanup_interval: 1s\n  )EOF\"; // Help Emacs balance quotation marks: \"\n\n  EXPECT_TRUE(parseClusterFromV3Yaml(yaml).has_cleanup_interval());\n}\n\nTEST_F(OriginalDstClusterTest, BadConfigWithLoadAssignment) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n    cleanup_interval: 1s\n    load_assignment:\n      cluster_name: name\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 8000\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      setupFromYaml(yaml), EnvoyException,\n      \"ORIGINAL_DST clusters must have no load assignment or hosts configured\");\n}\n\nTEST_F(OriginalDstClusterTest, BadConfigWithDeprecatedHosts) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: ORIGINAL_DST\n    lb_policy: ORIGINAL_DST_LB\n    cleanup_interval: 1s\n    hosts:\n      - socket_address:\n          address: 127.0.0.1\n          port_value: 8000\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      setupFromYaml(yaml, false), EnvoyException,\n      \"ORIGINAL_DST clusters must have no load assignment or hosts configured\");\n}\n\nTEST_F(OriginalDstClusterTest, CleanupInterval) {\n  std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 1.250s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n    cleanup_interval: 1s\n  )EOF\"; // Help Emacs balance quotation marks: \"\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(membership_updated_, ready()).Times(0);\n  EXPECT_CALL(*cleanup_timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  setupFromYaml(yaml);\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n}\n\nTEST_F(OriginalDstClusterTest, NoContext) {\n  std::string yaml = R\"EOF(\n    name: name,\n    connect_timeout: 0.125s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n  )EOF\";\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(membership_updated_, ready()).Times(0);\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  setupFromYaml(yaml);\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  // No downstream connection => no host.\n  {\n    TestLoadBalancerContext lb_context(nullptr);\n    OriginalDstCluster::LoadBalancer lb(cluster_);\n    EXPECT_CALL(dispatcher_, post(_)).Times(0);\n    HostConstSharedPtr host = lb.chooseHost(&lb_context);\n    EXPECT_EQ(host, nullptr);\n  }\n\n  // Downstream connection is not using original dst => no host.\n  {\n    NiceMock<Network::MockConnection> connection;\n    TestLoadBalancerContext lb_context(&connection);\n\n    EXPECT_CALL(connection, localAddressRestored()).WillOnce(Return(false));\n    // First argument is normally the reference to the ThreadLocalCluster's HostSet, but in these\n    // tests we do not have the thread local clusters, so we pass a reference to the HostSet of the\n    // primary cluster. The implementation handles both cases the same.\n    OriginalDstCluster::LoadBalancer lb(cluster_);\n    EXPECT_CALL(dispatcher_, post(_)).Times(0);\n    HostConstSharedPtr host = lb.chooseHost(&lb_context);\n    EXPECT_EQ(host, nullptr);\n  }\n\n  // No host for non-IP address\n  {\n    NiceMock<Network::MockConnection> connection;\n    TestLoadBalancerContext lb_context(&connection);\n    connection.local_address_ = std::make_shared<Network::Address::PipeInstance>(\"unix://foo\");\n    EXPECT_CALL(connection, localAddressRestored()).WillRepeatedly(Return(true));\n\n    OriginalDstCluster::LoadBalancer lb(cluster_);\n    EXPECT_CALL(dispatcher_, post(_)).Times(0);\n    HostConstSharedPtr host = lb.chooseHost(&lb_context);\n    EXPECT_EQ(host, nullptr);\n  }\n}\n\nTEST_F(OriginalDstClusterTest, Membership) {\n  std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 1.250s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n  )EOF\";\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  setupFromYaml(yaml);\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  EXPECT_CALL(membership_updated_, ready());\n\n  // Host gets the local address of the downstream connection.\n\n  NiceMock<Network::MockConnection> connection;\n  TestLoadBalancerContext lb_context(&connection);\n  connection.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.11.11\");\n  EXPECT_CALL(connection, localAddressRestored()).WillRepeatedly(Return(true));\n\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  // Mock the cluster manager by recreating the load balancer each time to get a fresh host map\n  HostConstSharedPtr host = OriginalDstCluster::LoadBalancer(cluster_).chooseHost(&lb_context);\n  post_cb();\n  auto cluster_hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n\n  ASSERT_NE(host, nullptr);\n  EXPECT_EQ(*connection.local_address_, *host->address());\n\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  EXPECT_EQ(host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(*connection.local_address_,\n            *cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->address());\n\n  // Same host is returned on the 2nd call\n  // Mock the cluster manager by recreating the load balancer with the new host map\n  HostConstSharedPtr host2 = OriginalDstCluster::LoadBalancer(cluster_).chooseHost(&lb_context);\n  EXPECT_EQ(host2, host);\n\n  // Make host time out, no membership changes happen on the first timeout.\n  ASSERT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(true, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->used());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  cleanup_timer_->invokeCallback();\n  EXPECT_EQ(\n      cluster_hosts,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()); // hosts vector remains the same\n\n  // host gets removed on the 2nd timeout.\n  ASSERT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(false, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->used());\n\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  EXPECT_CALL(membership_updated_, ready());\n  cleanup_timer_->invokeCallback();\n  EXPECT_NE(cluster_hosts,\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()); // hosts vector changes\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  cluster_hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n\n  // New host gets created\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  // Mock the cluster manager by recreating the load balancer with the new host map\n  HostConstSharedPtr host3 = OriginalDstCluster::LoadBalancer(cluster_).chooseHost(&lb_context);\n  post_cb();\n  EXPECT_NE(host3, nullptr);\n  EXPECT_NE(host3, host);\n  EXPECT_NE(cluster_hosts,\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()); // hosts vector changes\n\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(host3, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n}\n\nTEST_F(OriginalDstClusterTest, Membership2) {\n  std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 1.250s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n  )EOF\";\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  setupFromYaml(yaml);\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  // Host gets the local address of the downstream connection.\n\n  NiceMock<Network::MockConnection> connection1;\n  TestLoadBalancerContext lb_context1(&connection1);\n  connection1.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.11.11\");\n  EXPECT_CALL(connection1, localAddressRestored()).WillRepeatedly(Return(true));\n\n  NiceMock<Network::MockConnection> connection2;\n  TestLoadBalancerContext lb_context2(&connection2);\n  connection2.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.11.12\");\n  EXPECT_CALL(connection2, localAddressRestored()).WillRepeatedly(Return(true));\n\n  OriginalDstCluster::LoadBalancer lb(cluster_);\n  EXPECT_CALL(membership_updated_, ready());\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  HostConstSharedPtr host1 = lb.chooseHost(&lb_context1);\n  post_cb();\n  ASSERT_NE(host1, nullptr);\n  EXPECT_EQ(*connection1.local_address_, *host1->address());\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  HostConstSharedPtr host2 = lb.chooseHost(&lb_context2);\n  post_cb();\n  ASSERT_NE(host2, nullptr);\n  EXPECT_EQ(*connection2.local_address_, *host2->address());\n\n  EXPECT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  EXPECT_EQ(host1, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(*connection1.local_address_,\n            *cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->address());\n\n  EXPECT_EQ(host2, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[1]);\n  EXPECT_EQ(*connection2.local_address_,\n            *cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[1]->address());\n\n  auto cluster_hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n\n  // Make hosts time out, no membership changes happen on the first timeout.\n  ASSERT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(true, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->used());\n  EXPECT_EQ(true, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[1]->used());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  cleanup_timer_->invokeCallback();\n  EXPECT_EQ(\n      cluster_hosts,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()); // hosts vector remains the same\n\n  // both hosts get removed on the 2nd timeout.\n  ASSERT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(false, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->used());\n  EXPECT_EQ(false, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[1]->used());\n\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  EXPECT_CALL(membership_updated_, ready());\n  cleanup_timer_->invokeCallback();\n  EXPECT_NE(cluster_hosts,\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()); // hosts vector changes\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n}\n\nTEST_F(OriginalDstClusterTest, Connection) {\n  std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 1.250s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n  )EOF\";\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  setupFromYaml(yaml);\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  EXPECT_CALL(membership_updated_, ready());\n\n  // Connection to the host is made to the downstream connection's local address.\n  NiceMock<Network::MockConnection> connection;\n  TestLoadBalancerContext lb_context(&connection);\n  connection.local_address_ = std::make_shared<Network::Address::Ipv6Instance>(\"FD00::1\");\n  EXPECT_CALL(connection, localAddressRestored()).WillRepeatedly(Return(true));\n\n  OriginalDstCluster::LoadBalancer lb(cluster_);\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  HostConstSharedPtr host = lb.chooseHost(&lb_context);\n  post_cb();\n  ASSERT_NE(host, nullptr);\n  EXPECT_EQ(*connection.local_address_, *host->address());\n\n  EXPECT_CALL(dispatcher_, createClientConnection_(PointeesEq(connection.local_address_), _, _, _))\n      .WillOnce(Return(new NiceMock<Network::MockClientConnection>()));\n  host->createConnection(dispatcher_, nullptr, nullptr);\n}\n\nTEST_F(OriginalDstClusterTest, MultipleClusters) {\n  std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 1.250s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n  )EOF\";\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  setupFromYaml(yaml);\n\n  PrioritySetImpl second;\n  cluster_->prioritySet().addPriorityUpdateCb(\n      [&](uint32_t, const HostVector& added, const HostVector& removed) -> void {\n        // Update second hostset accordingly;\n        HostVectorSharedPtr new_hosts(\n            new HostVector(cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()));\n        auto healthy_hosts = std::make_shared<const HealthyHostVector>(\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts());\n        const HostsPerLocalityConstSharedPtr empty_hosts_per_locality{new HostsPerLocalityImpl()};\n\n        second.updateHosts(0,\n                           updateHostsParams(new_hosts, empty_hosts_per_locality, healthy_hosts,\n                                             empty_hosts_per_locality),\n                           {}, added, removed, absl::nullopt);\n      });\n\n  EXPECT_CALL(membership_updated_, ready());\n\n  // Connection to the host is made to the downstream connection's local address.\n  NiceMock<Network::MockConnection> connection;\n  TestLoadBalancerContext lb_context(&connection);\n  connection.local_address_ = std::make_shared<Network::Address::Ipv6Instance>(\"FD00::1\");\n  EXPECT_CALL(connection, localAddressRestored()).WillRepeatedly(Return(true));\n\n  OriginalDstCluster::LoadBalancer lb(cluster_);\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  HostConstSharedPtr host = lb.chooseHost(&lb_context);\n  post_cb();\n  ASSERT_NE(host, nullptr);\n  EXPECT_EQ(*connection.local_address_, *host->address());\n\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  // Check that 'second' also gets updated\n  EXPECT_EQ(1UL, second.hostSetsPerPriority()[0]->hosts().size());\n\n  EXPECT_EQ(host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(host, second.hostSetsPerPriority()[0]->hosts()[0]);\n}\n\nTEST_F(OriginalDstClusterTest, UseHttpHeaderEnabled) {\n  std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 1.250s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n    original_dst_lb_config:\n      use_http_header: true\n  )EOF\";\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  setupFromYaml(yaml);\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  OriginalDstCluster::LoadBalancer lb(cluster_);\n  Event::PostCb post_cb;\n\n  // HTTP header override.\n  TestLoadBalancerContext lb_context1(nullptr, Http::Headers::get().EnvoyOriginalDstHost.get(),\n                                      \"127.0.0.1:5555\");\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  HostConstSharedPtr host1 = lb.chooseHost(&lb_context1);\n  post_cb();\n  ASSERT_NE(host1, nullptr);\n  EXPECT_EQ(\"127.0.0.1:5555\", host1->address()->asString());\n\n  // HTTP header override on downstream connection which isn't using original_dst filter\n  // and/or is done over Unix Domain Socket. This works, because properties of the downstream\n  // connection are never checked when using HTTP header override.\n  NiceMock<Network::MockConnection> connection2;\n  EXPECT_CALL(connection2, localAddress()).Times(0);\n  EXPECT_CALL(connection2, localAddressRestored()).Times(0);\n  TestLoadBalancerContext lb_context2(&connection2, Http::Headers::get().EnvoyOriginalDstHost.get(),\n                                      \"127.0.0.1:5556\");\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  HostConstSharedPtr host2 = lb.chooseHost(&lb_context2);\n  post_cb();\n  ASSERT_NE(host2, nullptr);\n  EXPECT_EQ(\"127.0.0.1:5556\", host2->address()->asString());\n\n  // HTTP header override with empty header value.\n  TestLoadBalancerContext lb_context3(nullptr, Http::Headers::get().EnvoyOriginalDstHost.get(), \"\");\n\n  EXPECT_CALL(membership_updated_, ready()).Times(0);\n  EXPECT_CALL(dispatcher_, post(_)).Times(0);\n  HostConstSharedPtr host3 = lb.chooseHost(&lb_context3);\n  EXPECT_EQ(host3, nullptr);\n  EXPECT_EQ(\n      1, TestUtility::findCounter(stats_store_, \"cluster.name.original_dst_host_invalid\")->value());\n\n  // HTTP header override with invalid header value.\n  TestLoadBalancerContext lb_context4(nullptr, Http::Headers::get().EnvoyOriginalDstHost.get(),\n                                      \"a.b.c.d\");\n\n  EXPECT_CALL(membership_updated_, ready()).Times(0);\n  EXPECT_CALL(dispatcher_, post(_)).Times(0);\n  HostConstSharedPtr host4 = lb.chooseHost(&lb_context4);\n  EXPECT_EQ(host4, nullptr);\n  EXPECT_EQ(\n      2, TestUtility::findCounter(stats_store_, \"cluster.name.original_dst_host_invalid\")->value());\n}\n\nTEST_F(OriginalDstClusterTest, UseHttpHeaderDisabled) {\n  std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 1.250s\n    type: ORIGINAL_DST\n    lb_policy: CLUSTER_PROVIDED\n  )EOF\";\n\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cleanup_timer_, enableTimer(_, _));\n  setupFromYaml(yaml);\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(\n      0UL,\n      cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  OriginalDstCluster::LoadBalancer lb(cluster_);\n  Event::PostCb post_cb;\n\n  // Downstream connection with original_dst filter, HTTP header override ignored.\n  NiceMock<Network::MockConnection> connection1;\n  connection1.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.11.11\");\n  EXPECT_CALL(connection1, localAddressRestored()).WillOnce(Return(true));\n  TestLoadBalancerContext lb_context1(&connection1, Http::Headers::get().EnvoyOriginalDstHost.get(),\n                                      \"127.0.0.1:5555\");\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  HostConstSharedPtr host1 = lb.chooseHost(&lb_context1);\n  post_cb();\n  ASSERT_NE(host1, nullptr);\n  EXPECT_EQ(*connection1.local_address_, *host1->address());\n\n  // Downstream connection without original_dst filter, HTTP header override ignored.\n  NiceMock<Network::MockConnection> connection2;\n  connection2.local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"10.10.11.11\");\n  EXPECT_CALL(connection2, localAddressRestored()).WillOnce(Return(false));\n  TestLoadBalancerContext lb_context2(&connection2, Http::Headers::get().EnvoyOriginalDstHost.get(),\n                                      \"127.0.0.1:5555\");\n\n  EXPECT_CALL(membership_updated_, ready()).Times(0);\n  EXPECT_CALL(dispatcher_, post(_)).Times(0);\n  HostConstSharedPtr host2 = lb.chooseHost(&lb_context2);\n  EXPECT_EQ(host2, nullptr);\n\n  // Downstream connection over Unix Domain Socket, HTTP header override ignored.\n  NiceMock<Network::MockConnection> connection3;\n  connection3.local_address_ = std::make_shared<Network::Address::PipeInstance>(\"unix://foo\");\n  TestLoadBalancerContext lb_context3(&connection3, Http::Headers::get().EnvoyOriginalDstHost.get(),\n                                      \"127.0.0.1:5555\");\n\n  EXPECT_CALL(membership_updated_, ready()).Times(0);\n  EXPECT_CALL(dispatcher_, post(_)).Times(0);\n  HostConstSharedPtr host3 = lb.chooseHost(&lb_context3);\n  EXPECT_EQ(host3, nullptr);\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/outlier_detection_impl_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/cluster/v3/outlier_detection.pb.h\"\n#include \"envoy/data/cluster/v2alpha/outlier_detection_event.pb.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/upstream/outlier_detection_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace Outlier {\nnamespace {\n\nTEST(OutlierDetectorImplFactoryTest, NoDetector) {\n  NiceMock<MockClusterMockPrioritySet> cluster;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Runtime::MockLoader> runtime;\n  EXPECT_EQ(nullptr,\n            DetectorImplFactory::createForCluster(cluster, defaultStaticCluster(\"fake_cluster\"),\n                                                  dispatcher, runtime, nullptr));\n}\n\nTEST(OutlierDetectorImplFactoryTest, Detector) {\n  auto fake_cluster = defaultStaticCluster(\"fake_cluster\");\n  fake_cluster.mutable_outlier_detection();\n\n  NiceMock<MockClusterMockPrioritySet> cluster;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Runtime::MockLoader> runtime;\n  EXPECT_NE(nullptr, DetectorImplFactory::createForCluster(cluster, fake_cluster, dispatcher,\n                                                           runtime, nullptr));\n}\n\nclass CallbackChecker {\npublic:\n  MOCK_METHOD(void, check, (HostSharedPtr host));\n};\n\nclass OutlierDetectorImplTest : public testing::Test {\npublic:\n  OutlierDetectorImplTest()\n      : outlier_detection_ejections_active_(cluster_.info_->stats_store_.gauge(\n            \"outlier_detection.ejections_active\", Stats::Gauge::ImportMode::Accumulate)) {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_consecutive_5xx\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_success_rate\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_,\n            featureEnabled(\"outlier_detection.enforcing_consecutive_local_origin_failure_\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_,\n            featureEnabled(\"outlier_detection.enforcing_local_origin_success_rate\", 100))\n        .WillByDefault(Return(true));\n\n    // Prepare separate config with split_external_local_origin_errors set to true.\n    // It will be used for tests with split external and local origin errors.\n    outlier_detection_split_.set_split_external_local_origin_errors(true);\n  }\n\n  void addHosts(std::vector<std::string> urls, bool primary = true) {\n    HostVector& hosts = primary ? hosts_ : failover_hosts_;\n    for (auto& url : urls) {\n      hosts.emplace_back(makeTestHost(cluster_.info_, url));\n    }\n  }\n\n  template <typename T> void loadRq(HostVector& hosts, int num_rq, T code) {\n    for (auto& host : hosts) {\n      loadRq(host, num_rq, code);\n    }\n  }\n\n  void loadRq(HostSharedPtr host, int num_rq, int http_code) {\n    for (int i = 0; i < num_rq; i++) {\n      host->outlierDetector().putHttpResponseCode(http_code);\n    }\n  }\n\n  void loadRq(HostSharedPtr host, int num_rq, Result result) {\n    for (int i = 0; i < num_rq; i++) {\n      host->outlierDetector().putResult(result);\n    }\n  }\n\n  NiceMock<MockClusterMockPrioritySet> cluster_;\n  HostVector& hosts_ = cluster_.prioritySet().getMockHostSet(0)->hosts_;\n  HostVector& failover_hosts_ = cluster_.prioritySet().getMockHostSet(1)->hosts_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Event::MockTimer* interval_timer_ = new Event::MockTimer(&dispatcher_);\n  CallbackChecker checker_;\n  Event::SimulatedTimeSystem time_system_;\n  std::shared_ptr<MockEventLogger> event_logger_{new MockEventLogger()};\n  envoy::config::cluster::v3::OutlierDetection empty_outlier_detection_;\n  envoy::config::cluster::v3::OutlierDetection outlier_detection_split_;\n  Stats::Gauge& outlier_detection_ejections_active_;\n};\n\nTEST_F(OutlierDetectorImplTest, DetectorStaticConfig) {\n  const std::string yaml = R\"EOF(\ninterval: 0.1s\nbase_ejection_time: 10s\nconsecutive_5xx: 10\nmax_ejection_percent: 50\nenforcing_consecutive_5xx: 10\nenforcing_success_rate: 20\nsuccess_rate_minimum_hosts: 50\nsuccess_rate_request_volume: 200\nsuccess_rate_stdev_factor: 3000\nfailure_percentage_minimum_hosts: 10\nfailure_percentage_request_volume: 25\nfailure_percentage_threshold: 70\n  )EOF\";\n\n  envoy::config::cluster::v3::OutlierDetection outlier_detection;\n  TestUtility::loadFromYaml(yaml, outlier_detection);\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(100), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, outlier_detection, dispatcher_, runtime_, time_system_, event_logger_));\n\n  EXPECT_EQ(100UL, detector->config().intervalMs());\n  EXPECT_EQ(10000UL, detector->config().baseEjectionTimeMs());\n  EXPECT_EQ(10UL, detector->config().consecutive5xx());\n  EXPECT_EQ(5UL, detector->config().consecutiveGatewayFailure());\n  EXPECT_EQ(50UL, detector->config().maxEjectionPercent());\n  EXPECT_EQ(10UL, detector->config().enforcingConsecutive5xx());\n  EXPECT_EQ(0UL, detector->config().enforcingConsecutiveGatewayFailure());\n  EXPECT_EQ(20UL, detector->config().enforcingSuccessRate());\n  EXPECT_EQ(50UL, detector->config().successRateMinimumHosts());\n  EXPECT_EQ(200UL, detector->config().successRateRequestVolume());\n  EXPECT_EQ(3000UL, detector->config().successRateStdevFactor());\n  EXPECT_EQ(0UL, detector->config().enforcingFailurePercentage());\n  EXPECT_EQ(0UL, detector->config().enforcingFailurePercentageLocalOrigin());\n  EXPECT_EQ(10UL, detector->config().failurePercentageMinimumHosts());\n  EXPECT_EQ(25UL, detector->config().failurePercentageRequestVolume());\n  EXPECT_EQ(70UL, detector->config().failurePercentageThreshold());\n}\n\nTEST_F(OutlierDetectorImplTest, DestroyWithActive) {\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.max_ejection_percent\", _))\n      .WillByDefault(Return(100));\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"}, true);\n  addHosts({\"tcp://127.0.0.1:81\"}, false);\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  loadRq(hosts_[0], 4, 500);\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, 500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  loadRq(failover_hosts_[0], 4, 500);\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(failover_hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(failover_hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(failover_hosts_[0], 1, 500);\n  EXPECT_TRUE(failover_hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(2UL, outlier_detection_ejections_active_.value());\n\n  detector.reset();\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n}\n\nTEST_F(OutlierDetectorImplTest, DestroyHostInUse) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  detector.reset();\n\n  loadRq(hosts_[0], 5, 500);\n}\n\n/*\n Tests scenario when connect errors are reported by Non-http codes and success is reported by\n http codes. (this happens in http router).\n*/\nTEST_F(OutlierDetectorImplTest, BasicFlow5xxViaHttpCodes) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  addHosts({\"tcp://127.0.0.1:81\"});\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({hosts_[1]}, {});\n\n  // Cause a consecutive 5xx error on host[0] by reporting HTTP codes.\n  loadRq(hosts_[0], 1, 500);\n  loadRq(hosts_[0], 1, 200);\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 4, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, 500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(9999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(30001));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[0])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_TRUE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Eject host again to cause an ejection after an unejection has taken place\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 4, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(40000));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, 500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, hosts_);\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(2UL, cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_total\").value());\n  EXPECT_EQ(\n      2UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_consecutive_5xx\").value());\n  EXPECT_EQ(0UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_consecutive_gateway_failure\")\n                     .value());\n}\n\n/* Test verifies the LOCAL_ORIGIN_CONNECT_SUCCESS with optional HTTP code 200,\n   cancels LOCAL_ORIGIN_CONNECT_FAILED event.\n*/\nTEST_F(OutlierDetectorImplTest, ConnectSuccessWithOptionalHTTP_OK) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Make sure that in non-split mode LOCAL_ORIGIN_CONNECT_SUCCESS with optional HTTP code 200\n  // cancels LOCAL_ORIGIN_CONNECT_FAILED.\n  // such scenario is used by tcp_proxy.\n  for (auto i = 0; i < 100; i++) {\n    hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectSuccess,\n                                           absl::optional<uint64_t>(enumToInt(Http::Code::OK)));\n    hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectFailed);\n  }\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n}\n\n/* Test verifies the EXT_ORIGIN_REQUEST_SUCCESS cancels EXT_ORIGIN_REQUEST_FAILED event in non-split\n * mode.\n * EXT_ORIGIN_REQUEST_FAILED is mapped to 5xx code and EXT_ORIGIN_REQUEST_SUCCESS is mapped to 200\n * code.\n */\nTEST_F(OutlierDetectorImplTest, ExternalOriginEventsNonSplit) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Make sure that EXT_ORIGIN_REQUEST_SUCCESS cancels EXT_ORIGIN_REQUEST_FAILED\n  // such scenario is used by redis filter.\n  for (auto i = 0; i < 100; i++) {\n    hosts_[0]->outlierDetector().putResult(Result::ExtOriginRequestFailed);\n    hosts_[0]->outlierDetector().putResult(Result::ExtOriginRequestSuccess);\n  }\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Now make sure that EXT_ORIGIN_REQUEST_FAILED ejects the host\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  for (auto i = 0; i < 100; i++) {\n    hosts_[0]->outlierDetector().putResult(Result::ExtOriginRequestFailed);\n  }\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n}\n\nTEST_F(OutlierDetectorImplTest, BasicFlow5xxViaNonHttpCodes) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  addHosts({\"tcp://127.0.0.1:81\"});\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({hosts_[1]}, {});\n\n  // Cause a consecutive 5xx error on host[0] by reporting Non-HTTP codes.\n  loadRq(hosts_[0], 1, Result::LocalOriginConnectFailed);\n  loadRq(hosts_[0], 1, 200);\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 4, Result::LocalOriginConnectFailed);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, Result::LocalOriginConnectFailed);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(9999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(30001));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[0])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_TRUE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Eject host again to cause an ejection after an unejection has taken place\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 4, Result::LocalOriginConnectFailed);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(40000));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, Result::LocalOriginConnectFailed);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, hosts_);\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(2UL, cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_total\").value());\n  EXPECT_EQ(\n      2UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_consecutive_5xx\").value());\n  EXPECT_EQ(0UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_consecutive_gateway_failure\")\n                     .value());\n}\n\n/**\n * Test that the consecutive gateway failure detector correctly fires, and also successfully\n * retriggers after uneject. This will also ensure that the stats counters end up with the expected\n * values.\n */\nTEST_F(OutlierDetectorImplTest, BasicFlowGatewayFailure) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_gateway_failure\", 0))\n      .WillByDefault(Return(true));\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_consecutive_5xx\", 100))\n      .WillByDefault(Return(false));\n\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  addHosts({\"tcp://127.0.0.1:81\"});\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({hosts_[1]}, {});\n\n  // Cause a consecutive 5xx error.\n  loadRq(hosts_[0], 1, 503);\n  loadRq(hosts_[0], 1, 500);\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 2, 503);\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false));\n  loadRq(hosts_[0], 2, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, true));\n  loadRq(hosts_[0], 1, 503);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(9999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(30001));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[0])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_TRUE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Eject host again to cause an ejection after an unejection has taken place\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 4, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(40000));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, true));\n  loadRq(hosts_[0], 1, 503);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, hosts_);\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  // Check preserves deprecated counter behaviour\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_total\").value());\n  EXPECT_EQ(\n      2UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_enforced_total\").value());\n  EXPECT_EQ(2UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_detected_consecutive_gateway_failure\")\n                     .value());\n  EXPECT_EQ(2UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_enforced_consecutive_gateway_failure\")\n                     .value());\n\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_detected_consecutive_5xx\")\n                     .value());\n  EXPECT_EQ(0UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_enforced_consecutive_5xx\")\n                     .value());\n}\n\n/*\n * Test passing of optional HTTP code with Result:: LOCAL_ORIGIN_TIMEOUT\n */\nTEST_F(OutlierDetectorImplTest, TimeoutWithHttpCode) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\n      \"tcp://127.0.0.1:80\",\n      \"tcp://127.0.0.1:81\",\n      \"tcp://127.0.0.1:84\",\n  });\n\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Report several LOCAL_ORIGIN_TIMEOUT with optional Http code 500. Host should be ejected.\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  // Get the configured number of failures and simulate than number of connect failures.\n  uint32_t n = runtime_.snapshot_.getInteger(\"outlier_detection.consecutive_5xx\",\n                                             detector->config().consecutive5xx());\n  while (n--) {\n    hosts_[0]->outlierDetector().putResult(Result::LocalOriginTimeout,\n                                           absl::optional<uint64_t>(500));\n  }\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Wait until it is unejected\n  time_system_.setMonotonicTime(std::chrono::milliseconds(50001));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[0])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Report several LOCAL_ORIGIN_TIMEOUT with HTTP code other that 500. Node should not be ejected.\n  EXPECT_CALL(checker_, check(hosts_[0])).Times(0);\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true))\n      .Times(0);\n  // Get the configured number of failures and simulate than number of connect failures.\n  n = runtime_.snapshot_.getInteger(\"outlier_detection.consecutive_5xx\",\n                                    detector->config().consecutive5xx());\n  while (n--) {\n    hosts_[0]->outlierDetector().putResult(Result::LocalOriginTimeout,\n                                           absl::optional<uint64_t>(200));\n  }\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Report LOCAL_ORIGIN_TIMEOUT without explicit HTTP code mapping. It should be implicitly mapped\n  // to 5xx code and the node should be ejected.\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false));\n  // Get the configured number of failures and simulate than number of connect failures.\n  n = runtime_.snapshot_.getInteger(\"outlier_detection.consecutive_gateway_failure\",\n                                    detector->config().consecutiveGatewayFailure());\n  while (n--) {\n    hosts_[0]->outlierDetector().putResult(Result::LocalOriginTimeout);\n  }\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n}\n\n/**\n * Set of tests to verify ejecting and unejecting nodes when local/connect failures are reported.\n */\nTEST_F(OutlierDetectorImplTest, BasicFlowLocalOriginFailure) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"}, true);\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, outlier_detection_split_, dispatcher_, runtime_, time_system_, event_logger_));\n\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_local_origin_failure\", 100))\n      .WillByDefault(Return(true));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // When connect failure is detected the following methods should be called.\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE, true));\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n\n  // Get the configured number of failures and simulate than number of connect failures.\n  uint32_t n = runtime_.snapshot_.getInteger(\"outlier_detection.consecutive_local_origin_failure\",\n                                             detector->config().consecutiveLocalOriginFailure());\n  while (n--) {\n    hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectFailed);\n  }\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Wait short time - not enough to be unejected\n  time_system_.setMonotonicTime(std::chrono::milliseconds(9999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(30001));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[0])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_TRUE(hosts_[0]->outlierDetector().lastUnejectionTime());\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n\n  // Simulate few connect failures, not enough for ejection and then simulate connect success\n  // and again few failures not enough for ejection.\n  n = runtime_.snapshot_.getInteger(\"outlier_detection.consecutive_local_origin_failure\",\n                                    detector->config().consecutiveLocalOriginFailure());\n  n--; // make sure that this is not enough for ejection.\n  while (n--) {\n    hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectFailed);\n  }\n  // now success and few failures\n  hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectSuccess);\n  hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectFailed);\n  hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectFailed);\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_TRUE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Check stats\n  EXPECT_EQ(\n      1UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_enforced_total\").value());\n  EXPECT_EQ(1UL,\n            cluster_.info_->stats_store_\n                .counter(\"outlier_detection.ejections_detected_consecutive_local_origin_failure\")\n                .value());\n  EXPECT_EQ(1UL,\n            cluster_.info_->stats_store_\n                .counter(\"outlier_detection.ejections_enforced_consecutive_local_origin_failure\")\n                .value());\n}\n\n/**\n * Test the interaction between the consecutive gateway failure and 5xx detectors.\n * This will first trigger a consecutive gateway failure with 503s, and then trigger 5xx with a mix\n * of 503s and 500s. We expect the consecutive gateway failure to fire after 5 consecutive 503s, and\n * after an uneject the 5xx detector should require a further 5 consecutive 5xxs. The gateway\n * failure detector should not fire a second time since fewer than another 5x 503s are triggered.\n * This will also ensure that the stats counters end up with the expected values.\n */\nTEST_F(OutlierDetectorImplTest, BasicFlowGatewayFailureAnd5xx) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_gateway_failure\", 0))\n      .WillByDefault(Return(true));\n\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  addHosts({\"tcp://127.0.0.1:81\"});\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({hosts_[1]}, {});\n\n  // Cause a consecutive 5xx error.\n  loadRq(hosts_[0], 1, 503);\n  loadRq(hosts_[0], 1, 200);\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 4, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, true));\n  loadRq(hosts_[0], 1, 503);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(9999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(30001));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[0])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_TRUE(hosts_[0]->outlierDetector().lastUnejectionTime());\n\n  // Eject host again but with a mix of 500s and 503s to trigger 5xx ejection first\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 2, 503);\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  loadRq(hosts_[0], 2, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(40000));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, 500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, hosts_);\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  // Deprecated counter, check we're preserving old behaviour\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_total\").value());\n  EXPECT_EQ(\n      2UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_enforced_total\").value());\n  EXPECT_EQ(\n      1UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_consecutive_5xx\").value());\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_detected_consecutive_5xx\")\n                     .value());\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_enforced_consecutive_5xx\")\n                     .value());\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_detected_consecutive_gateway_failure\")\n                     .value());\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_enforced_consecutive_gateway_failure\")\n                     .value());\n}\n\n// Test mapping of Non-Http codes to Http. This happens when split between external and local\n// origin errors is turned off.\nTEST_F(OutlierDetectorImplTest, BasicFlowNonHttpCodesExternalOrigin) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  addHosts({\"tcp://127.0.0.1:81\"});\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({hosts_[1]}, {});\n\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_consecutive_5xx\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_gateway_failure\", 0))\n      .WillByDefault(Return(false));\n\n  // Make sure that EXT_ORIGIN_REQUEST_SUCCESS cancels LOCAL_ORIGIN_CONNECT_FAILED\n  for (auto i = 0; i < 100; i++) {\n    loadRq(hosts_[0], 1, Result::LocalOriginConnectFailed);\n    loadRq(hosts_[0], 1, Result::ExtOriginRequestSuccess);\n  }\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Cause a consecutive 5xx error. This situation happens in router filter.\n  // Make sure that one CONNECT_SUCCESS with optional code zero, does not\n  // interrupt sequence of LOCAL_ORIGIN_CONNECT_FAILED.\n  loadRq(hosts_[0], 1, Result::LocalOriginConnectFailed);\n  hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectSuccess);\n  hosts_[0]->outlierDetector().putResponseTime(std::chrono::milliseconds(5));\n  loadRq(hosts_[0], 3, Result::LocalOriginConnectFailed);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  loadRq(hosts_[0], 1, Result::LocalOriginConnectFailed);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n}\n\nTEST_F(OutlierDetectorImplTest, BasicFlowSuccessRateExternalOrigin) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\n      \"tcp://127.0.0.1:80\",\n      \"tcp://127.0.0.1:81\",\n      \"tcp://127.0.0.1:82\",\n      \"tcp://127.0.0.1:83\",\n      \"tcp://127.0.0.1:84\",\n  });\n\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Turn off 5xx detection to test SR detection in isolation.\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_consecutive_5xx\", 100))\n      .WillByDefault(Return(false));\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_gateway_failure\", 100))\n      .WillByDefault(Return(false));\n  // Expect non-enforcing logging to happen every time the consecutive_5xx_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false))\n      .Times(40);\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false))\n      .Times(40);\n\n  // Cause a SR error on one host. First have 4 of the hosts have perfect SR.\n  loadRq(hosts_, 200, 200);\n  loadRq(hosts_[4], 200, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10000));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]),\n                                       _, envoy::data::cluster::v2alpha::SUCCESS_RATE, true));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.success_rate_stdev_factor\", 1900))\n      .WillByDefault(Return(1900));\n  interval_timer_->invokeCallback();\n  EXPECT_EQ(50, hosts_[4]->outlierDetector().successRate(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(90, detector->successRateAverage(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(52, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  // Make sure that local origin success rate monitor is not affected\n  EXPECT_EQ(-1, hosts_[4]->outlierDetector().successRate(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1,\n            detector->successRateAverage(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(19999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(50001));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[4])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n\n  // Expect non-enforcing logging to happen every time the consecutive_5xx_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false))\n      .Times(5);\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false))\n      .Times(5);\n\n  // Give 4 hosts enough request volume but not to the 5th. Should not cause an ejection.\n  loadRq(hosts_, 25, 200);\n  loadRq(hosts_[4], 25, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(60001));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  // The success rate should be *calculated* since the minimum request volume was met for failure\n  // percentage ejection, but the host should not be ejected.\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(50UL, hosts_[4]->outlierDetector().successRate(\n                      DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateAverage(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n}\n\n// Test verifies that EXT_ORIGIN_REQUEST_FAILED and EXT_ORIGIN_REQUEST_SUCCESS cancel\n// each other in split mode.\nTEST_F(OutlierDetectorImplTest, ExternalOriginEventsWithSplit) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"}, true);\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, outlier_detection_split_, dispatcher_, runtime_, time_system_, event_logger_));\n\n  for (auto i = 0; i < 100; i++) {\n    hosts_[0]->outlierDetector().putResult(Result::ExtOriginRequestFailed);\n    hosts_[0]->outlierDetector().putResult(Result::ExtOriginRequestSuccess);\n  }\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Now make sure that EXT_ORIGIN_REQUEST_FAILED ejects the host\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false));\n  for (auto i = 0; i < 100; i++) {\n    hosts_[0]->outlierDetector().putResult(Result::ExtOriginRequestFailed);\n  }\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n}\n\nTEST_F(OutlierDetectorImplTest, BasicFlowSuccessRateLocalOrigin) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\n      \"tcp://127.0.0.1:80\",\n      \"tcp://127.0.0.1:81\",\n      \"tcp://127.0.0.1:82\",\n      \"tcp://127.0.0.1:83\",\n      \"tcp://127.0.0.1:84\",\n  });\n\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, outlier_detection_split_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Turn off detecting consecutive local origin failures.\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_local_origin_failure\", 100))\n      .WillByDefault(Return(false));\n  // Expect non-enforcing logging to happen every time the consecutive_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE, false))\n      .Times(40);\n  // Cause a SR error on one host. First have 4 of the hosts have perfect SR.\n  loadRq(hosts_, 200, Result::LocalOriginConnectSuccess);\n  loadRq(hosts_[4], 200, Result::LocalOriginConnectFailed);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10000));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN, true));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.success_rate_stdev_factor\", 1900))\n      .WillByDefault(Return(1900));\n  interval_timer_->invokeCallback();\n  EXPECT_EQ(50, hosts_[4]->outlierDetector().successRate(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(90,\n            detector->successRateAverage(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(52, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  // Make sure that external origin success rate monitor is not affected\n  EXPECT_EQ(-1, hosts_[4]->outlierDetector().successRate(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateAverage(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(19999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(50001));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[4])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n\n  // Expect non-enforcing logging to happen every time the consecutive_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE, false))\n      .Times(5);\n\n  // Give 4 hosts enough request volume but not to the 5th. Should not cause an ejection.\n  loadRq(hosts_, 25, Result::LocalOriginConnectSuccess);\n  loadRq(hosts_[4], 25, Result::LocalOriginConnectFailed);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(60001));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  // The success rate should be *calculated* since the minimum request volume was met for failure\n  // percentage ejection, but the host should not be ejected.\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(50UL, hosts_[4]->outlierDetector().successRate(\n                      DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1,\n            detector->successRateAverage(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n}\n\n// Validate that empty hosts doesn't crash success rate handling when success_rate_minimum_hosts is\n// zero. This is a regression test for earlier divide-by-zero behavior.\nTEST_F(OutlierDetectorImplTest, EmptySuccessRate) {\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  loadRq(hosts_, 200, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10000));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.success_rate_minimum_hosts\", 5))\n      .WillByDefault(Return(0));\n  interval_timer_->invokeCallback();\n}\n\nTEST_F(OutlierDetectorImplTest, BasicFlowFailurePercentageExternalOrigin) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\n      \"tcp://127.0.0.1:80\",\n      \"tcp://127.0.0.1:81\",\n      \"tcp://127.0.0.1:82\",\n      \"tcp://127.0.0.1:83\",\n      \"tcp://127.0.0.1:84\",\n  });\n\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Turn off 5xx detection and SR detection to test failure percentage detection in isolation.\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_consecutive_5xx\", 100))\n      .WillByDefault(Return(false));\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_gateway_failure\", 100))\n      .WillByDefault(Return(false));\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_success_rate\", 100))\n      .WillByDefault(Return(false));\n  // Now turn on failure percentage detection.\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_failure_percentage\", 0))\n      .WillByDefault(Return(true));\n  // Expect non-enforcing logging to happen every time the consecutive_5xx_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[3]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false))\n      .Times(50);\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[3]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false))\n      .Times(50);\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false))\n      .Times(60);\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false))\n      .Times(60);\n\n  // Cause a failure percentage error on one host. First 3 hosts have perfect failure percentage;\n  // fourth host has failure percentage slightly below threshold; fifth has failure percentage\n  // slightly above threshold.\n  loadRq(hosts_, 50, 200);\n  loadRq(hosts_[3], 250, 503);\n  loadRq(hosts_[4], 300, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10000));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]),\n                                       _, envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE, true));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.success_rate_stdev_factor\", 1900))\n      .WillByDefault(Return(1900));\n  interval_timer_->invokeCallback();\n  EXPECT_FLOAT_EQ(100.0 * (50.0 / 300.0),\n                  hosts_[3]->outlierDetector().successRate(\n                      DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_FLOAT_EQ(100.0 * (50.0 / 350.0),\n                  hosts_[4]->outlierDetector().successRate(\n                      DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  // Make sure that local origin success rate monitor is not affected\n  EXPECT_EQ(-1, hosts_[4]->outlierDetector().successRate(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1,\n            detector->successRateAverage(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_FALSE(hosts_[3]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(19999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(50001));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[4])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n\n  // Expect non-enforcing logging to happen every time the consecutive_5xx_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false))\n      .Times(5);\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false))\n      .Times(5);\n\n  // Give 4 hosts enough request volume but not to the 5th. Should not cause an ejection.\n  loadRq(hosts_, 25, 200);\n  loadRq(hosts_[4], 25, 503);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(60001));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  // The success rate should be *calculated* since the minimum request volume was met for failure\n  // percentage ejection, but the host should not be ejected.\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(50UL, hosts_[4]->outlierDetector().successRate(\n                      DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateAverage(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n}\n\nTEST_F(OutlierDetectorImplTest, BasicFlowFailurePercentageLocalOrigin) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\n      \"tcp://127.0.0.1:80\",\n      \"tcp://127.0.0.1:81\",\n      \"tcp://127.0.0.1:82\",\n      \"tcp://127.0.0.1:83\",\n      \"tcp://127.0.0.1:84\",\n  });\n\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, outlier_detection_split_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Turn off 5xx detection and SR detection to test failure percentage detection in isolation.\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_consecutive_local_origin_failure\", 100))\n      .WillByDefault(Return(false));\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_local_origin_success_rate\", 100))\n      .WillByDefault(Return(false));\n  // Now turn on failure percentage detection.\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"outlier_detection.enforcing_failure_percentage_local_origin\", 0))\n      .WillByDefault(Return(true));\n  // Expect non-enforcing logging to happen every time the consecutive_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE, false))\n      .Times(40);\n  // Cause a failure percentage error on one host. First 4 of the hosts have perfect failure\n  // percentage.\n  loadRq(hosts_, 200, Result::LocalOriginConnectSuccess);\n  loadRq(hosts_[4], 200, Result::LocalOriginConnectFailed);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10000));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE_LOCAL_ORIGIN, true));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN, false));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.failure_percentage_threshold\", 85))\n      .WillByDefault(Return(40));\n  interval_timer_->invokeCallback();\n  EXPECT_EQ(50, hosts_[4]->outlierDetector().successRate(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(90,\n            detector->successRateAverage(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(52, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  // Make sure that external origin success rate monitor is not affected\n  EXPECT_EQ(-1, hosts_[4]->outlierDetector().successRate(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateAverage(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin));\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that doesn't bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(19999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_TRUE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  // Interval that does bring the host back in.\n  time_system_.setMonotonicTime(std::chrono::milliseconds(50001));\n  EXPECT_CALL(checker_, check(hosts_[4]));\n  EXPECT_CALL(*event_logger_,\n              logUneject(std::static_pointer_cast<const HostDescription>(hosts_[4])));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  EXPECT_FALSE(hosts_[4]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n\n  // Expect non-enforcing logging to happen every time the consecutive_ counter\n  // gets saturated (every 5 times).\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[4]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE, false))\n      .Times(5);\n\n  // Give 4 hosts enough request volume but not to the 5th. Should not cause an ejection.\n  loadRq(hosts_, 25, Result::LocalOriginConnectSuccess);\n  loadRq(hosts_[4], 25, Result::LocalOriginConnectFailed);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(60001));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n  // The success rate should be *calculated* since the minimum request volume was met for failure\n  // percentage ejection, but the host should not be ejected.\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(50UL, hosts_[4]->outlierDetector().successRate(\n                      DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1,\n            detector->successRateAverage(DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n  EXPECT_EQ(-1, detector->successRateEjectionThreshold(\n                    DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin));\n}\n\nTEST_F(OutlierDetectorImplTest, RemoveWhileEjected) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  loadRq(hosts_[0], 4, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, 500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n\n  HostVector old_hosts = std::move(hosts_);\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, old_hosts);\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(9999));\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  interval_timer_->invokeCallback();\n}\n\nTEST_F(OutlierDetectorImplTest, Overflow) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\", \"tcp://127.0.0.1:81\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.max_ejection_percent\", _))\n      .WillByDefault(Return(1));\n\n  loadRq(hosts_[0], 4, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  hosts_[0]->outlierDetector().putHttpResponseCode(500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  loadRq(hosts_[1], 5, 500);\n  EXPECT_FALSE(hosts_[1]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(1UL,\n            cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_overflow\").value());\n}\n\nTEST_F(OutlierDetectorImplTest, NotEnforcing) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  loadRq(hosts_[0], 4, 503);\n\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"outlier_detection.enforcing_consecutive_5xx\", 100))\n      .WillByDefault(Return(false));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false));\n  EXPECT_CALL(*event_logger_,\n              logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]), _,\n                       envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false));\n  loadRq(hosts_[0], 1, 503);\n  EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_total\").value());\n  EXPECT_EQ(\n      0UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_enforced_total\").value());\n  EXPECT_EQ(\n      1UL,\n      cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_consecutive_5xx\").value());\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_detected_consecutive_5xx\")\n                     .value());\n  EXPECT_EQ(0UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_enforced_consecutive_5xx\")\n                     .value());\n  EXPECT_EQ(1UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_detected_consecutive_gateway_failure\")\n                     .value());\n  EXPECT_EQ(0UL, cluster_.info_->stats_store_\n                     .counter(\"outlier_detection.ejections_enforced_consecutive_gateway_failure\")\n                     .value());\n}\n\nTEST_F(OutlierDetectorImplTest, EjectionActiveValueIsAccountedWithoutMetricStorage) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\", \"tcp://127.0.0.1:81\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  ON_CALL(runtime_.snapshot_, getInteger(\"outlier_detection.max_ejection_percent\", _))\n      .WillByDefault(Return(1));\n\n  loadRq(hosts_[0], 4, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n\n  // Manually increase the gauge. From metric's perspective it's overflowed.\n  outlier_detection_ejections_active_.inc();\n\n  // Since the overflow is not determined by the metric. Host[0] can be ejected.\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  hosts_[0]->outlierDetector().putHttpResponseCode(500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Expect active helper_ has the value 1. However, helper is private and it cannot be tested.\n  EXPECT_EQ(2UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(0UL,\n            cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_overflow\").value());\n\n  // Now it starts to overflow.\n  loadRq(hosts_[1], 5, 500);\n  EXPECT_FALSE(hosts_[1]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  EXPECT_EQ(2UL, outlier_detection_ejections_active_.value());\n  EXPECT_EQ(1UL,\n            cluster_.info_->stats_store_.counter(\"outlier_detection.ejections_overflow\").value());\n}\n\nTEST_F(OutlierDetectorImplTest, CrossThreadRemoveRace) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  loadRq(hosts_[0], 4, 500);\n\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  loadRq(hosts_[0], 1, 500);\n\n  // Remove before the cross thread event comes in.\n  HostVector old_hosts = std::move(hosts_);\n  cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, old_hosts);\n  post_cb();\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n}\n\nTEST_F(OutlierDetectorImplTest, CrossThreadDestroyRace) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  loadRq(hosts_[0], 4, 500);\n\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  loadRq(hosts_[0], 1, 500);\n\n  // Destroy before the cross thread event comes in.\n  std::weak_ptr<DetectorImpl> weak_detector = detector;\n  detector.reset();\n  EXPECT_EQ(nullptr, weak_detector.lock());\n  post_cb();\n\n  EXPECT_EQ(0UL, outlier_detection_ejections_active_.value());\n}\n\nTEST_F(OutlierDetectorImplTest, CrossThreadFailRace) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  loadRq(hosts_[0], 4, 500);\n\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  loadRq(hosts_[0], 1, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n\n  // Fire the post callback twice. This should only result in a single ejection.\n  post_cb();\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n  post_cb();\n\n  EXPECT_EQ(1UL, outlier_detection_ejections_active_.value());\n}\n\nTEST_F(OutlierDetectorImplTest, Consecutive_5xxAlreadyEjected) {\n  EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_));\n  addHosts({\"tcp://127.0.0.1:80\"});\n  EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _));\n  std::shared_ptr<DetectorImpl> detector(DetectorImpl::create(\n      cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_));\n  detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); });\n\n  // Cause a consecutive 5xx error.\n  loadRq(hosts_[0], 4, 500);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(0));\n  EXPECT_CALL(checker_, check(hosts_[0]));\n  EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast<const HostDescription>(hosts_[0]),\n                                       _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true));\n  loadRq(hosts_[0], 1, 500);\n  EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK));\n\n  // Cause another consecutive 5xx error.\n  loadRq(hosts_[0], 1, 200);\n  loadRq(hosts_[0], 5, 500);\n}\n\nTEST(DetectorHostMonitorNullImplTest, All) {\n  DetectorHostMonitorNullImpl null_sink;\n\n  EXPECT_EQ(0UL, null_sink.numEjections());\n  EXPECT_FALSE(null_sink.lastEjectionTime());\n  EXPECT_FALSE(null_sink.lastUnejectionTime());\n}\n\nTEST(OutlierDetectionEventLoggerImplTest, All) {\n  AccessLog::MockAccessLogManager log_manager;\n  std::shared_ptr<AccessLog::MockAccessLogFile> file(new AccessLog::MockAccessLogFile());\n  NiceMock<MockClusterInfo> cluster;\n  std::shared_ptr<MockHostDescription> host(new NiceMock<MockHostDescription>());\n  ON_CALL(*host, cluster()).WillByDefault(ReturnRef(cluster));\n  Event::SimulatedTimeSystem time_system;\n  // This is rendered as \"2018-12-18T09:00:00Z\"\n  time_system.setSystemTime(std::chrono::milliseconds(1545123600000));\n  absl::optional<MonotonicTime> monotonic_time;\n  NiceMock<MockDetector> detector;\n\n  EXPECT_CALL(log_manager, createAccessLog(\"foo\")).WillOnce(Return(file));\n  EventLoggerImpl event_logger(log_manager, \"foo\", time_system);\n\n  StringViewSaver log1;\n  EXPECT_CALL(host->outlier_detector_, lastUnejectionTime()).WillOnce(ReturnRef(monotonic_time));\n\n  EXPECT_CALL(*file, write(absl::string_view(\n                         \"{\\\"type\\\":\\\"CONSECUTIVE_5XX\\\",\\\"cluster_name\\\":\\\"fake_cluster\\\",\"\n                         \"\\\"upstream_url\\\":\\\"10.0.0.1:443\\\",\\\"action\\\":\\\"EJECT\\\",\"\n                         \"\\\"num_ejections\\\":0,\\\"enforced\\\":true,\\\"eject_consecutive_event\\\":{}\"\n                         \",\\\"timestamp\\\":\\\"2018-12-18T09:00:00Z\\\"}\\n\")))\n      .WillOnce(SaveArg<0>(&log1));\n\n  event_logger.logEject(host, detector, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true);\n  Json::Factory::loadFromString(log1);\n\n  StringViewSaver log2;\n  EXPECT_CALL(host->outlier_detector_, lastEjectionTime()).WillOnce(ReturnRef(monotonic_time));\n\n  EXPECT_CALL(*file, write(absl::string_view(\n                         \"{\\\"type\\\":\\\"CONSECUTIVE_5XX\\\",\\\"cluster_name\\\":\\\"fake_cluster\\\",\"\n                         \"\\\"upstream_url\\\":\\\"10.0.0.1:443\\\",\\\"action\\\":\\\"UNEJECT\\\",\"\n                         \"\\\"num_ejections\\\":0,\\\"enforced\\\":false,\"\n                         \"\\\"timestamp\\\":\\\"2018-12-18T09:00:00Z\\\"}\\n\")))\n      .WillOnce(SaveArg<0>(&log2));\n\n  event_logger.logUneject(host);\n  Json::Factory::loadFromString(log2);\n\n  // now test with time since last action.\n  monotonic_time = (time_system.monotonicTime() - std::chrono::seconds(30));\n\n  StringViewSaver log3;\n  EXPECT_CALL(host->outlier_detector_, lastUnejectionTime()).WillOnce(ReturnRef(monotonic_time));\n  EXPECT_CALL(host->outlier_detector_,\n              successRate(DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))\n      .WillOnce(Return(0));\n  EXPECT_CALL(detector,\n              successRateAverage(DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))\n      .WillOnce(Return(0));\n  EXPECT_CALL(detector, successRateEjectionThreshold(\n                            DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))\n      .WillOnce(Return(0));\n  EXPECT_CALL(*file,\n              write(absl::string_view(\n                  \"{\\\"type\\\":\\\"SUCCESS_RATE\\\",\\\"cluster_name\\\":\\\"fake_cluster\\\",\"\n                  \"\\\"upstream_url\\\":\\\"10.0.0.1:443\\\",\\\"action\\\":\\\"EJECT\\\",\"\n                  \"\\\"num_ejections\\\":0,\\\"enforced\\\":false,\\\"eject_success_rate_event\\\":{\"\n                  \"\\\"host_success_rate\\\":0,\\\"cluster_average_success_rate\\\":0,\"\n                  \"\\\"cluster_success_rate_ejection_threshold\\\":0},\"\n                  \"\\\"timestamp\\\":\\\"2018-12-18T09:00:00Z\\\",\\\"secs_since_last_action\\\":\\\"30\\\"}\\n\")))\n      .WillOnce(SaveArg<0>(&log3));\n  event_logger.logEject(host, detector, envoy::data::cluster::v2alpha::SUCCESS_RATE, false);\n  Json::Factory::loadFromString(log3);\n\n  StringViewSaver log4;\n  EXPECT_CALL(host->outlier_detector_, lastEjectionTime()).WillOnce(ReturnRef(monotonic_time));\n  EXPECT_CALL(*file,\n              write(absl::string_view(\n                  \"{\\\"type\\\":\\\"CONSECUTIVE_5XX\\\",\\\"cluster_name\\\":\\\"fake_cluster\\\",\"\n                  \"\\\"upstream_url\\\":\\\"10.0.0.1:443\\\",\\\"action\\\":\\\"UNEJECT\\\",\"\n                  \"\\\"num_ejections\\\":0,\\\"enforced\\\":false,\\\"timestamp\\\":\\\"2018-12-18T09:00:00Z\\\",\"\n                  \"\\\"secs_since_last_action\\\":\\\"30\\\"}\\n\")))\n      .WillOnce(SaveArg<0>(&log4));\n  event_logger.logUneject(host);\n  Json::Factory::loadFromString(log4);\n\n  StringViewSaver log5;\n  EXPECT_CALL(host->outlier_detector_, lastUnejectionTime()).WillOnce(ReturnRef(monotonic_time));\n  EXPECT_CALL(host->outlier_detector_,\n              successRate(DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))\n      .WillOnce(Return(0));\n  EXPECT_CALL(*file,\n              write(absl::string_view(\n                  \"{\\\"type\\\":\\\"FAILURE_PERCENTAGE\\\",\\\"cluster_name\\\":\\\"fake_cluster\\\",\"\n                  \"\\\"upstream_url\\\":\\\"10.0.0.1:443\\\",\\\"action\\\":\\\"EJECT\\\",\"\n                  \"\\\"num_ejections\\\":0,\\\"enforced\\\":false,\\\"eject_failure_percentage_event\\\":{\"\n                  \"\\\"host_success_rate\\\":0},\\\"timestamp\\\":\\\"2018-12-18T09:00:00Z\\\",\"\n                  \"\\\"secs_since_last_action\\\":\\\"30\\\"}\\n\")))\n      .WillOnce(SaveArg<0>(&log5));\n  event_logger.logEject(host, detector, envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE, false);\n  Json::Factory::loadFromString(log5);\n\n  StringViewSaver log6;\n  EXPECT_CALL(host->outlier_detector_, lastEjectionTime()).WillOnce(ReturnRef(monotonic_time));\n  EXPECT_CALL(*file,\n              write(absl::string_view(\n                  \"{\\\"type\\\":\\\"CONSECUTIVE_5XX\\\",\\\"cluster_name\\\":\\\"fake_cluster\\\",\"\n                  \"\\\"upstream_url\\\":\\\"10.0.0.1:443\\\",\\\"action\\\":\\\"UNEJECT\\\",\"\n                  \"\\\"num_ejections\\\":0,\\\"enforced\\\":false,\\\"timestamp\\\":\\\"2018-12-18T09:00:00Z\\\",\"\n                  \"\\\"secs_since_last_action\\\":\\\"30\\\"}\\n\")))\n      .WillOnce(SaveArg<0>(&log6));\n  event_logger.logUneject(host);\n  Json::Factory::loadFromString(log6);\n}\n\nTEST(OutlierUtility, SRThreshold) {\n  std::vector<HostSuccessRatePair> data = {\n      HostSuccessRatePair(nullptr, 50),  HostSuccessRatePair(nullptr, 100),\n      HostSuccessRatePair(nullptr, 100), HostSuccessRatePair(nullptr, 100),\n      HostSuccessRatePair(nullptr, 100),\n  };\n  double sum = 450;\n\n  DetectorImpl::EjectionPair success_rate_nums =\n      DetectorImpl::successRateEjectionThreshold(sum, data, 1.9);\n  EXPECT_EQ(90.0, success_rate_nums.success_rate_average_); // average success rate\n  EXPECT_EQ(52.0, success_rate_nums.ejection_threshold_);   // ejection threshold\n}\n\n} // namespace\n} // namespace Outlier\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/priority_conn_pool_map_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/http/conn_pool.h\"\n\n#include \"common/upstream/priority_conn_pool_map_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/conn_pool.h\"\n#include \"test/mocks/upstream/host.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AtLeast;\nusing testing::Return;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass PriorityConnPoolMapImplTest : public testing::Test {\npublic:\n  using TestMap = PriorityConnPoolMap<int, Http::ConnectionPool::Instance>;\n  using TestMapPtr = std::unique_ptr<TestMap>;\n\n  TestMapPtr makeTestMap() { return std::make_unique<TestMap>(dispatcher_, host_); }\n\n  TestMap::PoolFactory getBasicFactory() {\n    return [&]() {\n      auto pool = std::make_unique<NiceMock<Http::ConnectionPool::MockInstance>>();\n      ON_CALL(*pool, hasActiveConnections).WillByDefault(Return(false));\n      mock_pools_.push_back(pool.get());\n      return pool;\n    };\n  }\n\nprotected:\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::vector<NiceMock<Http::ConnectionPool::MockInstance>*> mock_pools_;\n  std::shared_ptr<NiceMock<MockHost>> host_ = std::make_shared<NiceMock<MockHost>>();\n};\n\n// Show that we return a non-null value, and that we invoke the default resource manager\nTEST_F(PriorityConnPoolMapImplTest, DefaultPriorityProxiedThrough) {\n  TestMapPtr test_map = makeTestMap();\n\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(0);\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(AtLeast(1));\n\n  auto pool = test_map->getPool(ResourcePriority::Default, 0, getBasicFactory());\n  EXPECT_TRUE(pool.has_value());\n\n  // At this point, we may clean up/decrement by 0, etc, so allow any number.\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(AtLeast(1));\n}\n\n// Show that we return a non-null value, and that we invoke the high resource manager\nTEST_F(PriorityConnPoolMapImplTest, HighPriorityProxiedThrough) {\n  TestMapPtr test_map = makeTestMap();\n\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(0);\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(AtLeast(1));\n\n  auto pool = test_map->getPool(ResourcePriority::High, 0, getBasicFactory());\n  EXPECT_TRUE(pool.has_value());\n\n  // At this point, we may clean up/decrement by 0, etc, so allow any number.\n  EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(AtLeast(1));\n}\n\nTEST_F(PriorityConnPoolMapImplTest, TestSizeForSinglePriority) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(ResourcePriority::High, 0, getBasicFactory());\n  test_map->getPool(ResourcePriority::High, 1, getBasicFactory());\n\n  EXPECT_EQ(test_map->size(), 2);\n}\n\nTEST_F(PriorityConnPoolMapImplTest, TestSizeForMultiplePriorities) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(ResourcePriority::High, 0, getBasicFactory());\n  test_map->getPool(ResourcePriority::High, 1, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 0, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 1, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 2, getBasicFactory());\n\n  EXPECT_EQ(test_map->size(), 5);\n}\n\nTEST_F(PriorityConnPoolMapImplTest, TestClearEmptiesOut) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(ResourcePriority::High, 0, getBasicFactory());\n  test_map->getPool(ResourcePriority::High, 1, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 0, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 1, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 2, getBasicFactory());\n  test_map->clear();\n\n  EXPECT_EQ(test_map->size(), 0);\n}\n\n// Show that the drained callback is invoked once for the high priority pool, and once for\n// the default priority pool.\nTEST_F(PriorityConnPoolMapImplTest, TestAddDrainedCbProxiedThrough) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(ResourcePriority::High, 0, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 0, getBasicFactory());\n\n  Http::ConnectionPool::Instance::DrainedCb cbHigh;\n  EXPECT_CALL(*mock_pools_[0], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cbHigh));\n  Http::ConnectionPool::Instance::DrainedCb cbDefault;\n  EXPECT_CALL(*mock_pools_[1], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cbDefault));\n\n  ReadyWatcher watcher;\n  test_map->addDrainedCallback([&watcher] { watcher.ready(); });\n\n  EXPECT_CALL(watcher, ready()).Times(2);\n  cbHigh();\n  cbDefault();\n}\n\nTEST_F(PriorityConnPoolMapImplTest, TestDrainConnectionsProxiedThrough) {\n  TestMapPtr test_map = makeTestMap();\n\n  test_map->getPool(ResourcePriority::High, 0, getBasicFactory());\n  test_map->getPool(ResourcePriority::Default, 0, getBasicFactory());\n\n  EXPECT_CALL(*mock_pools_[0], drainConnections());\n  EXPECT_CALL(*mock_pools_[1], drainConnections());\n\n  test_map->drainConnections();\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/resource_manager_impl_test.cc",
    "content": "#include \"envoy/stats/stats.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/upstream/resource_manager_impl.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nTEST(ResourceManagerImplTest, RuntimeResourceManager) {\n  NiceMock<Runtime::MockLoader> runtime;\n  NiceMock<Stats::MockGauge> gauge;\n  NiceMock<Stats::MockStore> store;\n\n  ON_CALL(store, gauge(_, _)).WillByDefault(ReturnRef(gauge));\n\n  ResourceManagerImpl resource_manager(\n      runtime, \"circuit_breakers.runtime_resource_manager_test.default.\", 0, 0, 0, 1, 0,\n      ClusterCircuitBreakersStats{\n          ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE(store), POOL_GAUGE(store))},\n      absl::nullopt, absl::nullopt);\n\n  EXPECT_CALL(\n      runtime.snapshot_,\n      getInteger(\"circuit_breakers.runtime_resource_manager_test.default.max_connections\", 0U))\n      .Times(2)\n      .WillRepeatedly(Return(1U));\n  EXPECT_EQ(1U, resource_manager.connections().max());\n  EXPECT_TRUE(resource_manager.connections().canCreate());\n\n  EXPECT_CALL(\n      runtime.snapshot_,\n      getInteger(\"circuit_breakers.runtime_resource_manager_test.default.max_pending_requests\", 0U))\n      .Times(2)\n      .WillRepeatedly(Return(2U));\n  EXPECT_EQ(2U, resource_manager.pendingRequests().max());\n  EXPECT_TRUE(resource_manager.pendingRequests().canCreate());\n\n  EXPECT_CALL(runtime.snapshot_,\n              getInteger(\"circuit_breakers.runtime_resource_manager_test.default.max_requests\", 0U))\n      .Times(2)\n      .WillRepeatedly(Return(3U));\n  EXPECT_EQ(3U, resource_manager.requests().max());\n  EXPECT_TRUE(resource_manager.requests().canCreate());\n\n  EXPECT_CALL(runtime.snapshot_,\n              getInteger(\"circuit_breakers.runtime_resource_manager_test.default.max_retries\", 1U))\n      .Times(2)\n      .WillRepeatedly(Return(0U));\n  EXPECT_EQ(0U, resource_manager.retries().max());\n  EXPECT_FALSE(resource_manager.retries().canCreate());\n\n  EXPECT_CALL(\n      runtime.snapshot_,\n      getInteger(\"circuit_breakers.runtime_resource_manager_test.default.max_connection_pools\", 0U))\n      .Times(2)\n      .WillRepeatedly(Return(5U));\n  EXPECT_EQ(5U, resource_manager.connectionPools().max());\n  EXPECT_TRUE(resource_manager.connectionPools().canCreate());\n\n  // Verify retry budgets override max_retries.\n  std::string value;\n  EXPECT_CALL(runtime.snapshot_, get(_)).WillRepeatedly(Return(value));\n  EXPECT_CALL(runtime.snapshot_, getInteger(\"circuit_breakers.runtime_resource_manager_test.\"\n                                            \"default.retry_budget.min_retry_concurrency\",\n                                            _))\n      .WillRepeatedly(Return(5U));\n  EXPECT_EQ(5U, resource_manager.retries().max());\n  EXPECT_TRUE(resource_manager.retries().canCreate());\n}\n\nTEST(ResourceManagerImplTest, RemainingResourceGauges) {\n  NiceMock<Runtime::MockLoader> runtime;\n  Stats::IsolatedStoreImpl store;\n\n  auto stats = ClusterCircuitBreakersStats{\n      ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE(store), POOL_GAUGE(store))};\n  ResourceManagerImpl resource_manager(runtime,\n                                       \"circuit_breakers.runtime_resource_manager_test.default.\", 1,\n                                       2, 1, 0, 3, stats, absl::nullopt, absl::nullopt);\n\n  // Test remaining_cx_ gauge\n  EXPECT_EQ(1U, resource_manager.connections().max());\n  EXPECT_EQ(1U, stats.remaining_cx_.value());\n  EXPECT_EQ(0U, resource_manager.connections().count());\n  resource_manager.connections().inc();\n  EXPECT_EQ(1U, resource_manager.connections().count());\n  EXPECT_EQ(0U, stats.remaining_cx_.value());\n  resource_manager.connections().dec();\n  EXPECT_EQ(1U, stats.remaining_cx_.value());\n\n  // Test remaining_pending_ gauge\n  EXPECT_EQ(2U, resource_manager.pendingRequests().max());\n  EXPECT_EQ(2U, stats.remaining_pending_.value());\n  EXPECT_EQ(0U, resource_manager.pendingRequests().count());\n  resource_manager.pendingRequests().inc();\n  EXPECT_EQ(1U, resource_manager.pendingRequests().count());\n  EXPECT_EQ(1U, stats.remaining_pending_.value());\n  resource_manager.pendingRequests().inc();\n  EXPECT_EQ(0U, stats.remaining_pending_.value());\n  resource_manager.pendingRequests().dec();\n  EXPECT_EQ(1U, stats.remaining_pending_.value());\n  resource_manager.pendingRequests().dec();\n  EXPECT_EQ(2U, stats.remaining_pending_.value());\n  EXPECT_EQ(2U, stats.remaining_pending_.value());\n\n  // Test remaining_rq_ gauge\n  EXPECT_EQ(1U, resource_manager.requests().max());\n  EXPECT_EQ(1U, stats.remaining_rq_.value());\n  EXPECT_EQ(0U, resource_manager.requests().count());\n  resource_manager.requests().inc();\n  EXPECT_EQ(1U, resource_manager.requests().count());\n  EXPECT_EQ(0U, stats.remaining_rq_.value());\n  resource_manager.requests().dec();\n  EXPECT_EQ(1U, stats.remaining_rq_.value());\n\n  // Test remaining_retries_ gauge. Confirm that the value will not be negative\n  // despite having more retries than the configured max\n  EXPECT_EQ(0U, resource_manager.retries().max());\n  EXPECT_EQ(0U, stats.remaining_retries_.value());\n  EXPECT_EQ(0U, resource_manager.retries().count());\n  resource_manager.retries().inc();\n  EXPECT_EQ(1U, resource_manager.retries().count());\n  EXPECT_EQ(0U, stats.remaining_retries_.value());\n  resource_manager.retries().dec();\n\n  // Test remaining_cx_pools gauge.\n  EXPECT_EQ(3U, resource_manager.connectionPools().max());\n  EXPECT_EQ(3U, stats.remaining_cx_pools_.value());\n  EXPECT_EQ(0U, resource_manager.connectionPools().count());\n  resource_manager.connectionPools().inc();\n  EXPECT_EQ(1U, resource_manager.connectionPools().count());\n  EXPECT_EQ(2U, stats.remaining_cx_pools_.value());\n  resource_manager.connectionPools().dec();\n  EXPECT_EQ(3U, stats.remaining_cx_pools_.value());\n}\n\nTEST(ResourceManagerImplTest, RetryBudgetOverrideGauge) {\n  NiceMock<Runtime::MockLoader> runtime;\n  Stats::IsolatedStoreImpl store;\n\n  auto stats = ClusterCircuitBreakersStats{\n      ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE(store), POOL_GAUGE(store))};\n\n  // Test retry budgets disable remaining_retries gauge (it should always be 0).\n  ResourceManagerImpl rm(runtime, \"circuit_breakers.runtime_resource_manager_test.default.\", 1, 2,\n                         1, 0, 3, stats, 20.0, 5);\n\n  EXPECT_EQ(5U, rm.retries().max());\n  EXPECT_EQ(0U, stats.remaining_retries_.value());\n  EXPECT_EQ(0U, rm.retries().count());\n  rm.retries().inc();\n  EXPECT_EQ(1U, rm.retries().count());\n  EXPECT_EQ(0U, stats.remaining_retries_.value());\n  rm.retries().dec();\n}\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/ring_hash_lb_test.cc",
    "content": "#include <cstdint>\n#include <limits>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/router/router.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/upstream/ring_hash_lb.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass TestLoadBalancerContext : public LoadBalancerContextBase {\npublic:\n  using HostPredicate = std::function<bool(const Host&)>;\n\n  TestLoadBalancerContext(uint64_t hash_key)\n      : TestLoadBalancerContext(hash_key, 0, [](const Host&) { return false; }) {}\n  TestLoadBalancerContext(uint64_t hash_key, uint32_t retry_count,\n                          HostPredicate should_select_another_host)\n      : hash_key_(hash_key), retry_count_(retry_count),\n        should_select_another_host_(should_select_another_host) {}\n\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override { return hash_key_; }\n  uint32_t hostSelectionRetryCount() const override { return retry_count_; };\n  bool shouldSelectAnotherHost(const Host& host) override {\n    return should_select_another_host_(host);\n  }\n\n  absl::optional<uint64_t> hash_key_;\n  uint32_t retry_count_;\n  HostPredicate should_select_another_host_;\n};\n\nclass RingHashLoadBalancerTest : public testing::TestWithParam<bool> {\npublic:\n  RingHashLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {}\n\n  void init() {\n    lb_ = std::make_unique<RingHashLoadBalancer>(priority_set_, stats_, stats_store_, runtime_,\n                                                 random_, config_, common_config_);\n    lb_->initialize();\n  }\n\n  // Run all tests against both priority 0 and priority 1 host sets, to ensure\n  // all the load balancers have equivalent functionality for failover host sets.\n  MockHostSet& hostSet() { return GetParam() ? host_set_ : failover_host_set_; }\n\n  NiceMock<MockPrioritySet> priority_set_;\n  MockHostSet& host_set_ = *priority_set_.getMockHostSet(0);\n  MockHostSet& failover_host_set_ = *priority_set_.getMockHostSet(1);\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n  Stats::IsolatedStoreImpl stats_store_;\n  ClusterStats stats_;\n  absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig> config_;\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  std::unique_ptr<RingHashLoadBalancer> lb_;\n};\n\n// For tests which don't need to be run in both primary and failover modes.\nusing RingHashFailoverTest = RingHashLoadBalancerTest;\n\nINSTANTIATE_TEST_SUITE_P(RingHashPrimaryOrFailover, RingHashLoadBalancerTest,\n                         ::testing::Values(true, false));\nINSTANTIATE_TEST_SUITE_P(RingHashPrimaryOrFailover, RingHashFailoverTest, ::testing::Values(true));\n\n// Given no hosts, expect chooseHost to return null.\nTEST_P(RingHashLoadBalancerTest, NoHost) {\n  init();\n  EXPECT_EQ(nullptr, lb_->factory()->create()->chooseHost(nullptr));\n};\n\n// Given minimum_ring_size > maximum_ring_size, expect an exception.\nTEST_P(RingHashLoadBalancerTest, BadRingSizeBounds) {\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(20);\n  config_.value().mutable_maximum_ring_size()->set_value(10);\n  EXPECT_THROW_WITH_MESSAGE(init(), EnvoyException,\n                            \"ring hash: minimum_ring_size (20) > maximum_ring_size (10)\");\n}\n\nTEST_P(RingHashLoadBalancerTest, Basic) {\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\"), makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\"), makeTestHost(info_, \"tcp://127.0.0.1:93\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:94\"), makeTestHost(info_, \"tcp://127.0.0.1:95\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(12);\n\n  init();\n  EXPECT_EQ(\"ring_hash_lb.size\", lb_->stats().size_.name());\n  EXPECT_EQ(\"ring_hash_lb.min_hashes_per_host\", lb_->stats().min_hashes_per_host_.name());\n  EXPECT_EQ(\"ring_hash_lb.max_hashes_per_host\", lb_->stats().max_hashes_per_host_.name());\n  EXPECT_EQ(12, lb_->stats().size_.value());\n  EXPECT_EQ(2, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_hashes_per_host_.value());\n\n  // hash ring:\n  // port | position\n  // ---------------------------\n  // :94  | 833437586790550860\n  // :92  | 928266305478181108\n  // :90  | 1033482794131418490\n  // :95  | 3551244743356806947\n  // :93  | 3851675632748031481\n  // :91  | 5583722120771150861\n  // :91  | 6311230543546372928\n  // :93  | 7700377290971790572\n  // :95  | 13144177310400110813\n  // :92  | 13444792449719432967\n  // :94  | 15516499411664133160\n  // :90  | 16117243373044804889\n\n  LoadBalancerPtr lb = lb_->factory()->create();\n  {\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[4], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(std::numeric_limits<uint64_t>::max());\n    EXPECT_EQ(hostSet().hosts_[4], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(3551244743356806947);\n    EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(3551244743356806948);\n    EXPECT_EQ(hostSet().hosts_[3], lb->chooseHost(&context));\n  }\n  {\n    EXPECT_CALL(random_, random()).WillOnce(Return(16117243373044804880UL));\n    EXPECT_EQ(hostSet().hosts_[0], lb->chooseHost(nullptr));\n  }\n  EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value());\n\n  hostSet().healthy_hosts_.clear();\n  hostSet().runCallbacks({}, {});\n  lb = lb_->factory()->create();\n  {\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[4], lb->chooseHost(&context));\n  }\n  EXPECT_EQ(1UL, stats_.lb_healthy_panic_.value());\n}\n\n// Ensure if all the hosts with priority 0 unhealthy, the next priority hosts are used.\nTEST_P(RingHashFailoverTest, BasicFailover) {\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\")};\n  failover_host_set_.healthy_hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:82\")};\n  failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_;\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(12);\n  init();\n  EXPECT_EQ(12, lb_->stats().size_.value());\n  EXPECT_EQ(12, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(12, lb_->stats().max_hashes_per_host_.value());\n\n  LoadBalancerPtr lb = lb_->factory()->create();\n  EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb->chooseHost(nullptr));\n\n  // Add a healthy host at P=0 and it will be chosen.\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.runCallbacks({}, {});\n  lb = lb_->factory()->create();\n  EXPECT_EQ(host_set_.healthy_hosts_[0], lb->chooseHost(nullptr));\n\n  // Remove the healthy host and ensure we fail back over to the failover_host_set_\n  host_set_.healthy_hosts_ = {};\n  host_set_.runCallbacks({}, {});\n  lb = lb_->factory()->create();\n  EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb->chooseHost(nullptr));\n\n  // Set up so P=0 gets 70% of the load, and P=1 gets 30%.\n  host_set_.hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  host_set_.healthy_hosts_ = {host_set_.hosts_[0]};\n  host_set_.runCallbacks({}, {});\n  lb = lb_->factory()->create();\n  EXPECT_CALL(random_, random()).WillOnce(Return(69));\n  EXPECT_EQ(host_set_.healthy_hosts_[0], lb->chooseHost(nullptr));\n  EXPECT_CALL(random_, random()).WillOnce(Return(71));\n  EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb->chooseHost(nullptr));\n}\n\n// Expect reasonable results with Murmur2 hash.\nTEST_P(RingHashLoadBalancerTest, BasicWithMurmur2) {\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:84\"), makeTestHost(info_, \"tcp://127.0.0.1:85\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().set_hash_function(\n      envoy::config::cluster::v3::Cluster::RingHashLbConfig::MURMUR_HASH_2);\n  config_.value().mutable_minimum_ring_size()->set_value(12);\n  init();\n  EXPECT_EQ(12, lb_->stats().size_.value());\n  EXPECT_EQ(2, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_hashes_per_host_.value());\n\n  // This is the hash ring built using murmur2 hash.\n  // ring hash: host=127.0.0.1:85 hash=1358027074129602068\n  // ring hash: host=127.0.0.1:83 hash=4361834613929391114\n  // ring hash: host=127.0.0.1:84 hash=7224494972555149682\n  // ring hash: host=127.0.0.1:81 hash=7701421856454313576\n  // ring hash: host=127.0.0.1:82 hash=8649315368077433379\n  // ring hash: host=127.0.0.1:84 hash=8739448859063030639\n  // ring hash: host=127.0.0.1:81 hash=9887544217113020895\n  // ring hash: host=127.0.0.1:82 hash=10150910876324007731\n  // ring hash: host=127.0.0.1:83 hash=15168472011420622455\n  // ring hash: host=127.0.0.1:80 hash=15427156902705414897\n  // ring hash: host=127.0.0.1:85 hash=16375050414328759093\n  // ring hash: host=127.0.0.1:80 hash=17613279263364193813\n  LoadBalancerPtr lb = lb_->factory()->create();\n  {\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(std::numeric_limits<uint64_t>::max());\n    EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(1358027074129602068);\n    EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(1358027074129602069);\n    EXPECT_EQ(hostSet().hosts_[3], lb->chooseHost(&context));\n  }\n  {\n    EXPECT_CALL(random_, random()).WillOnce(Return(10150910876324007730UL));\n    EXPECT_EQ(hostSet().hosts_[2], lb->chooseHost(nullptr));\n  }\n  EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value());\n}\n\n// Expect reasonable results with hostname.\nTEST_P(RingHashLoadBalancerTest, BasicWithHostname) {\n  hostSet().hosts_ = {makeTestHost(info_, \"90\", \"tcp://127.0.0.1:90\"),\n                      makeTestHost(info_, \"91\", \"tcp://127.0.0.1:91\"),\n                      makeTestHost(info_, \"92\", \"tcp://127.0.0.1:92\"),\n                      makeTestHost(info_, \"93\", \"tcp://127.0.0.1:93\"),\n                      makeTestHost(info_, \"94\", \"tcp://127.0.0.1:94\"),\n                      makeTestHost(info_, \"95\", \"tcp://127.0.0.1:95\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(12);\n\n  common_config_ = envoy::config::cluster::v3::Cluster::CommonLbConfig();\n  auto chc = envoy::config::cluster::v3::Cluster::CommonLbConfig::ConsistentHashingLbConfig();\n  chc.set_use_hostname_for_hashing(true);\n  common_config_.set_allocated_consistent_hashing_lb_config(&chc);\n\n  init();\n  common_config_.release_consistent_hashing_lb_config();\n\n  EXPECT_EQ(\"ring_hash_lb.size\", lb_->stats().size_.name());\n  EXPECT_EQ(\"ring_hash_lb.min_hashes_per_host\", lb_->stats().min_hashes_per_host_.name());\n  EXPECT_EQ(\"ring_hash_lb.max_hashes_per_host\", lb_->stats().max_hashes_per_host_.name());\n  EXPECT_EQ(12, lb_->stats().size_.value());\n  EXPECT_EQ(2, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_hashes_per_host_.value());\n\n  // hash ring:\n  // host | position\n  // ---------------------------\n  // 95 | 1975508444536362413\n  // 95 | 2376063919839173711\n  // 93 | 2386806903309390596\n  // 94 | 6749904478991551885\n  // 93 | 6803900775736438537\n  // 92 | 7225015537174310577\n  // 90 | 8787465352164086522\n  // 92 | 11282020843382717940\n  // 91 | 13723418369486627818\n  // 90 | 13776502110861797421\n  // 91 | 14338313586354474791\n  // 94 | 15364271037087512980\n\n  LoadBalancerPtr lb = lb_->factory()->create();\n  {\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(std::numeric_limits<uint64_t>::max());\n    EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(7225015537174310577);\n    EXPECT_EQ(hostSet().hosts_[2], lb->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context(6803900775736438537);\n    EXPECT_EQ(hostSet().hosts_[3], lb->chooseHost(&context));\n  }\n  { EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(nullptr)); }\n  EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value());\n\n  hostSet().healthy_hosts_.clear();\n  hostSet().runCallbacks({}, {});\n  lb = lb_->factory()->create();\n  {\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context));\n  }\n  EXPECT_EQ(1UL, stats_.lb_healthy_panic_.value());\n}\n\n// Test the same ring as Basic but exercise retry host predicate behavior.\nTEST_P(RingHashLoadBalancerTest, BasicWithRetryHostPredicate) {\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\"), makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\"), makeTestHost(info_, \"tcp://127.0.0.1:93\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:94\"), makeTestHost(info_, \"tcp://127.0.0.1:95\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(12);\n\n  init();\n  EXPECT_EQ(\"ring_hash_lb.size\", lb_->stats().size_.name());\n  EXPECT_EQ(\"ring_hash_lb.min_hashes_per_host\", lb_->stats().min_hashes_per_host_.name());\n  EXPECT_EQ(\"ring_hash_lb.max_hashes_per_host\", lb_->stats().max_hashes_per_host_.name());\n  EXPECT_EQ(12, lb_->stats().size_.value());\n  EXPECT_EQ(2, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_hashes_per_host_.value());\n\n  // hash ring:\n  // port | position\n  // ---------------------------\n  // :94  | 833437586790550860\n  // :92  | 928266305478181108\n  // :90  | 1033482794131418490\n  // :95  | 3551244743356806947\n  // :93  | 3851675632748031481\n  // :91  | 5583722120771150861\n  // :91  | 6311230543546372928\n  // :93  | 7700377290971790572\n  // :95  | 13144177310400110813\n  // :92  | 13444792449719432967\n  // :94  | 15516499411664133160\n  // :90  | 16117243373044804889\n\n  LoadBalancerPtr lb = lb_->factory()->create();\n  {\n    // Proof that we know which host will be selected.\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[4], lb->chooseHost(&context));\n  }\n  {\n    // First attempt succeeds even when retry count is > 0.\n    TestLoadBalancerContext context(0, 2, [](const Host&) { return false; });\n    EXPECT_EQ(hostSet().hosts_[4], lb->chooseHost(&context));\n  }\n  {\n    // Second attempt chooses the next host in the ring.\n    TestLoadBalancerContext context(\n        0, 2, [&](const Host& host) { return &host == hostSet().hosts_[4].get(); });\n    EXPECT_EQ(hostSet().hosts_[2], lb->chooseHost(&context));\n  }\n  {\n    // Exhausted retries return the last checked host.\n    TestLoadBalancerContext context(0, 2, [](const Host&) { return true; });\n    EXPECT_EQ(hostSet().hosts_[0], lb->chooseHost(&context));\n  }\n  {\n    // Retries wrap around the ring.\n    TestLoadBalancerContext context(0, 13, [](const Host&) { return true; });\n    EXPECT_EQ(hostSet().hosts_[2], lb->chooseHost(&context));\n  }\n}\n\n// Given 2 hosts and a minimum ring size of 3, expect 2 hashes per host and a ring size of 4.\nTEST_P(RingHashLoadBalancerTest, UnevenHosts) {\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:80\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:81\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(3);\n  init();\n  EXPECT_EQ(4, lb_->stats().size_.value());\n  EXPECT_EQ(2, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(2, lb_->stats().max_hashes_per_host_.value());\n\n  // hash ring:\n  // port | position\n  // ---------------------------\n  // :80  | 5454692015285649509\n  // :81  | 7859399908942313493\n  // :80  | 13838424394637650569\n  // :81  | 16064866803292627174\n\n  LoadBalancerPtr lb = lb_->factory()->create();\n  {\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[0], lb->chooseHost(&context));\n  }\n\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:82\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  // hash ring:\n  // port | position\n  // ------------------\n  // :81  | 7859399908942313493\n  // :82  | 8241336090459785962\n  // :82  | 12882406409176325258\n  // :81  | 16064866803292627174\n\n  lb = lb_->factory()->create();\n  {\n    TestLoadBalancerContext context(0);\n    EXPECT_EQ(hostSet().hosts_[0], lb->chooseHost(&context));\n  }\n}\n\n// Given hosts with weights 1, 2 and 3, and a ring size of exactly 6, expect the correct number of\n// hashes for each host.\nTEST_P(RingHashLoadBalancerTest, HostWeightedTinyRing) {\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\", 1),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\", 2),\n                      makeTestHost(info_, \"tcp://127.0.0.1:92\", 3)};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  // enforce a ring size of exactly six entries\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(6);\n  config_.value().mutable_maximum_ring_size()->set_value(6);\n  init();\n  EXPECT_EQ(6, lb_->stats().size_.value());\n  EXPECT_EQ(1, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(3, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // :90 should appear once, :91 should appear twice and :92 should appear three times.\n  absl::node_hash_map<uint64_t, uint32_t> expected{\n      {928266305478181108UL, 2},  {4443673547860492590UL, 2},  {5583722120771150861UL, 1},\n      {6311230543546372928UL, 1}, {13444792449719432967UL, 2}, {16117243373044804889UL, 0}};\n  for (const auto& entry : expected) {\n    TestLoadBalancerContext context(entry.first);\n    EXPECT_EQ(hostSet().hosts_[entry.second], lb->chooseHost(&context));\n  }\n}\n\n// Given hosts with weights 1, 2 and 3, and a sufficiently large ring, expect that requests will\n// distribute to the hosts with approximately the right proportion.\nTEST_P(RingHashLoadBalancerTest, HostWeightedLargeRing) {\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\", 1),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\", 2),\n                      makeTestHost(info_, \"tcp://127.0.0.1:92\", 3)};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(6144);\n  init();\n  EXPECT_EQ(6144, lb_->stats().size_.value());\n  EXPECT_EQ(1024, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(3072, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // Generate 6000 hashes around the ring and populate a histogram of which hosts they mapped to...\n  uint32_t counts[3] = {0};\n  for (uint32_t i = 0; i < 6000; ++i) {\n    TestLoadBalancerContext context(i * (std::numeric_limits<uint64_t>::max() / 6000));\n    uint32_t port = lb->chooseHost(&context)->address()->ip()->port();\n    ++counts[port - 90];\n  }\n\n  EXPECT_EQ(987, counts[0]);  // :90 | ~1000 expected hits\n  EXPECT_EQ(1932, counts[1]); // :91 | ~2000 expected hits\n  EXPECT_EQ(3081, counts[2]); // :92 | ~3000 expected hits\n}\n\n// Given locality weights all 0, expect the same behavior as if no hosts were provided at all.\nTEST_P(RingHashLoadBalancerTest, ZeroLocalityWeights) {\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().hosts_per_locality_ =\n      makeHostsPerLocality({{hostSet().hosts_[0]}, {hostSet().hosts_[1]}});\n  hostSet().healthy_hosts_per_locality_ = hostSet().hosts_per_locality_;\n  hostSet().locality_weights_ = makeLocalityWeights({0, 0});\n  hostSet().runCallbacks({}, {});\n\n  init();\n  EXPECT_EQ(nullptr, lb_->factory()->create()->chooseHost(nullptr));\n}\n\n// Given localities with weights 1, 2, 3 and 0, and a ring size of exactly 6, expect the correct\n// number of hashes for each host.\nTEST_P(RingHashLoadBalancerTest, LocalityWeightedTinyRing) {\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\"), makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\"), makeTestHost(info_, \"tcp://127.0.0.1:93\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().hosts_per_locality_ = makeHostsPerLocality(\n      {{hostSet().hosts_[0]}, {hostSet().hosts_[1]}, {hostSet().hosts_[2]}, {hostSet().hosts_[3]}});\n  hostSet().healthy_hosts_per_locality_ = hostSet().hosts_per_locality_;\n  hostSet().locality_weights_ = makeLocalityWeights({1, 2, 3, 0});\n  hostSet().runCallbacks({}, {});\n\n  // enforce a ring size of exactly six entries\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(6);\n  config_.value().mutable_maximum_ring_size()->set_value(6);\n  init();\n  EXPECT_EQ(6, lb_->stats().size_.value());\n  EXPECT_EQ(1, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(3, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // :90 should appear once, :91 should appear twice, :92 should appear three times,\n  // and :93 shouldn't appear at all.\n  absl::node_hash_map<uint64_t, uint32_t> expected{\n      {928266305478181108UL, 2},  {4443673547860492590UL, 2},  {5583722120771150861UL, 1},\n      {6311230543546372928UL, 1}, {13444792449719432967UL, 2}, {16117243373044804889UL, 0}};\n  for (const auto& entry : expected) {\n    TestLoadBalancerContext context(entry.first);\n    EXPECT_EQ(hostSet().hosts_[entry.second], lb->chooseHost(&context));\n  }\n}\n\n// Given localities with weights 1, 2, 3 and 0, and a sufficiently large ring, expect that requests\n// will distribute to the hosts with approximately the right proportion.\nTEST_P(RingHashLoadBalancerTest, LocalityWeightedLargeRing) {\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\"), makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\"), makeTestHost(info_, \"tcp://127.0.0.1:93\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().hosts_per_locality_ = makeHostsPerLocality(\n      {{hostSet().hosts_[0]}, {hostSet().hosts_[1]}, {hostSet().hosts_[2]}, {hostSet().hosts_[3]}});\n  hostSet().healthy_hosts_per_locality_ = hostSet().hosts_per_locality_;\n  hostSet().locality_weights_ = makeLocalityWeights({1, 2, 3, 0});\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(6144);\n  init();\n  EXPECT_EQ(6144, lb_->stats().size_.value());\n  EXPECT_EQ(1024, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(3072, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // Generate 6000 hashes around the ring and populate a histogram of which hosts they mapped to...\n  uint32_t counts[4] = {0};\n  for (uint32_t i = 0; i < 6000; ++i) {\n    TestLoadBalancerContext context(i * (std::numeric_limits<uint64_t>::max() / 6000));\n    uint32_t port = lb->chooseHost(&context)->address()->ip()->port();\n    ++counts[port - 90];\n  }\n\n  EXPECT_EQ(987, counts[0]);  // :90 | ~1000 expected hits\n  EXPECT_EQ(1932, counts[1]); // :91 | ~2000 expected hits\n  EXPECT_EQ(3081, counts[2]); // :92 | ~3000 expected hits\n  EXPECT_EQ(0, counts[3]);    // :93 |    =0 expected hits\n}\n\n// Given both host weights and locality weights, expect the correct number of hashes for each host.\nTEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedTinyRing) {\n  // :90 and :91 have a 1:2 ratio within the first locality, :92 and :93 have a 1:2 ratio within the\n  // second locality, and the two localities have a 1:2 ratio overall.\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\", 1), makeTestHost(info_, \"tcp://127.0.0.1:91\", 2),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\", 1), makeTestHost(info_, \"tcp://127.0.0.1:93\", 2)};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().hosts_per_locality_ = makeHostsPerLocality(\n      {{hostSet().hosts_[0], hostSet().hosts_[1]}, {hostSet().hosts_[2], hostSet().hosts_[3]}});\n  hostSet().healthy_hosts_per_locality_ = hostSet().hosts_per_locality_;\n  hostSet().locality_weights_ = makeLocalityWeights({1, 2});\n  hostSet().runCallbacks({}, {});\n\n  // enforce a ring size of exactly 9 entries\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(9);\n  config_.value().mutable_maximum_ring_size()->set_value(9);\n  init();\n  EXPECT_EQ(9, lb_->stats().size_.value());\n  EXPECT_EQ(1, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(4, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // :90 should appear once, :91 and :92 should each appear two times, and :93 should appear four\n  // times, to get the correct overall proportions.\n  absl::node_hash_map<uint64_t, uint32_t> expected{\n      {928266305478181108UL, 2},   {3851675632748031481UL, 3},  {5583722120771150861UL, 1},\n      {6311230543546372928UL, 1},  {7700377290971790572UL, 3},  {12559126875973811811UL, 3},\n      {13444792449719432967UL, 2}, {13784988426630141778UL, 3}, {16117243373044804889UL, 0}};\n  for (const auto& entry : expected) {\n    TestLoadBalancerContext context(entry.first);\n    EXPECT_EQ(hostSet().hosts_[entry.second], lb->chooseHost(&context));\n  }\n}\n\n// Given both host weights and locality weights, and a sufficiently large ring, expect that requests\n// will distribute to the hosts with approximately the right proportion.\nTEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedLargeRing) {\n  // :90 and :91 have a 1:2 ratio within the first locality, :92 and :93 have a 1:2 ratio within the\n  // second locality, and the two localities have a 1:2 ratio overall.\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\", 1), makeTestHost(info_, \"tcp://127.0.0.1:91\", 2),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\", 1), makeTestHost(info_, \"tcp://127.0.0.1:93\", 2)};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().hosts_per_locality_ = makeHostsPerLocality(\n      {{hostSet().hosts_[0], hostSet().hosts_[1]}, {hostSet().hosts_[2], hostSet().hosts_[3]}});\n  hostSet().healthy_hosts_per_locality_ = hostSet().hosts_per_locality_;\n  hostSet().locality_weights_ = makeLocalityWeights({1, 2});\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(9216);\n  init();\n  EXPECT_EQ(9216, lb_->stats().size_.value());\n  EXPECT_EQ(1024, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(4096, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // Generate 9000 hashes around the ring and populate a histogram of which hosts they mapped to...\n  uint32_t counts[4] = {0};\n  for (uint32_t i = 0; i < 9000; ++i) {\n    TestLoadBalancerContext context(i * (std::numeric_limits<uint64_t>::max() / 9000));\n    uint32_t port = lb->chooseHost(&context)->address()->ip()->port();\n    ++counts[port - 90];\n  }\n\n  EXPECT_EQ(924, counts[0]);  // :90 | ~1000 expected hits\n  EXPECT_EQ(2009, counts[1]); // :91 | ~2000 expected hits\n  EXPECT_EQ(2053, counts[2]); // :92 | ~2000 expected hits\n  EXPECT_EQ(4014, counts[3]); // :93 | ~4000 expected hits\n}\n\n// Given 4 hosts and a ring size of exactly 2, expect that 2 hosts will be present in the ring and\n// the other 2 hosts will be absent.\nTEST_P(RingHashLoadBalancerTest, SmallFractionalScale) {\n  hostSet().hosts_ = {\n      makeTestHost(info_, \"tcp://127.0.0.1:90\"), makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:92\"), makeTestHost(info_, \"tcp://127.0.0.1:93\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(2);\n  config_.value().mutable_maximum_ring_size()->set_value(2);\n  init();\n  EXPECT_EQ(2, lb_->stats().size_.value());\n  EXPECT_EQ(0, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(1, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // Generate some reasonable number of hashes around the ring and populate a histogram of which\n  // hosts they mapped to. Here we don't care about the distribution (because the scale is\n  // intentionally stupidly low), other than to verify that two of the hosts are absent.\n  uint32_t counts[4] = {0};\n  for (uint32_t i = 0; i < 1024; ++i) {\n    TestLoadBalancerContext context(i * (std::numeric_limits<uint64_t>::max() / 1024));\n    uint32_t port = lb->chooseHost(&context)->address()->ip()->port();\n    ++counts[port - 90];\n  }\n\n  uint32_t zeroes = 0;\n  uint32_t sum = 0;\n  for (auto count : counts) {\n    if (count == 0) {\n      ++zeroes;\n    } else {\n      sum += count;\n    }\n  }\n  EXPECT_EQ(2, zeroes); // two hosts (we don't care which ones) should get no traffic\n  EXPECT_EQ(1024, sum); // the other two hosts should get all the traffic\n}\n\n// Given 2 hosts and a ring size of exactly 1023, expect that one host will have 511 entries and the\n// other will have 512.\nTEST_P(RingHashLoadBalancerTest, LargeFractionalScale) {\n  hostSet().hosts_ = {makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n                      makeTestHost(info_, \"tcp://127.0.0.1:91\")};\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(1023);\n  config_.value().mutable_maximum_ring_size()->set_value(1023);\n  init();\n  EXPECT_EQ(1023, lb_->stats().size_.value());\n  EXPECT_EQ(511, lb_->stats().min_hashes_per_host_.value());\n  EXPECT_EQ(512, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // Generate 1023 hashes around the ring and populate a histogram of which hosts they mapped to...\n  uint32_t counts[2] = {0};\n  for (uint32_t i = 0; i < 1023; ++i) {\n    TestLoadBalancerContext context(i * (std::numeric_limits<uint64_t>::max() / 1023));\n    uint32_t port = lb->chooseHost(&context)->address()->ip()->port();\n    ++counts[port - 90];\n  }\n\n  EXPECT_EQ(526, counts[0]); // :90 | ~512 expected hits\n  EXPECT_EQ(497, counts[1]); // :91 | ~511 expected hits\n}\n\n// Given extremely lopsided locality weights, and a ring that isn't large enough to fit all hosts,\n// expect that the correct proportion of hosts will be present in the ring.\nTEST_P(RingHashLoadBalancerTest, LopsidedWeightSmallScale) {\n  hostSet().hosts_.clear();\n  HostVector heavy_but_sparse, light_but_dense;\n  for (uint32_t i = 0; i < 1024; ++i) {\n    auto host(makeTestHost(info_, fmt::format(\"tcp://127.0.0.1:{}\", i)));\n    hostSet().hosts_.push_back(host);\n    (i == 0 ? heavy_but_sparse : light_but_dense).push_back(host);\n  }\n  hostSet().healthy_hosts_ = hostSet().hosts_;\n  hostSet().hosts_per_locality_ = makeHostsPerLocality({heavy_but_sparse, light_but_dense});\n  hostSet().healthy_hosts_per_locality_ = hostSet().hosts_per_locality_;\n  hostSet().locality_weights_ = makeLocalityWeights({127, 1});\n  hostSet().runCallbacks({}, {});\n\n  config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig();\n  config_.value().mutable_minimum_ring_size()->set_value(1024);\n  config_.value().mutable_maximum_ring_size()->set_value(1024);\n  init();\n  EXPECT_EQ(1024, lb_->stats().size_.value());\n  EXPECT_EQ(0, lb_->stats().min_hashes_per_host_.value());\n  // Host :0, from the heavy-but-sparse locality, should have 1016 out of the 1024 entries on the\n  // ring, which gives us the right ratio of 127/128.\n  EXPECT_EQ(1016, lb_->stats().max_hashes_per_host_.value());\n  LoadBalancerPtr lb = lb_->factory()->create();\n\n  // Every 128th host in the light-but-dense locality should have an entry on the ring, for a total\n  // of 8 entries. This gives us the right ratio of 1/128.\n  absl::node_hash_map<uint64_t, uint32_t> expected{\n      {11664790346325243808UL, 1},   {15894554872961148518UL, 128}, {13958138884277627155UL, 256},\n      {15803774069438192949UL, 384}, {3829253010855396576UL, 512},  {17918147347826565154UL, 640},\n      {6442769608292299103UL, 768},  {5881074926069334434UL, 896}};\n  for (const auto& entry : expected) {\n    TestLoadBalancerContext context(entry.first);\n    EXPECT_EQ(hostSet().hosts_[entry.second], lb->chooseHost(&context));\n  }\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/subset_lb_test.cc",
    "content": "#include <algorithm>\n#include <initializer_list>\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/config/metadata.h\"\n#include \"common/upstream/subset_lb.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/load_balancer.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass SubsetLoadBalancerDescribeMetadataTester {\npublic:\n  SubsetLoadBalancerDescribeMetadataTester(std::shared_ptr<SubsetLoadBalancer> lb) : lb_(lb) {}\n\n  using MetadataVector = std::vector<std::pair<std::string, ProtobufWkt::Value>>;\n\n  void test(std::string expected, const MetadataVector& metadata) {\n    const SubsetLoadBalancer::SubsetMetadata& subset_metadata(metadata);\n    EXPECT_EQ(expected, lb_.get()->describeMetadata(subset_metadata));\n  }\n\nprivate:\n  std::shared_ptr<SubsetLoadBalancer> lb_;\n};\n\nnamespace SubsetLoadBalancerTest {\n\nclass TestMetadataMatchCriterion : public Router::MetadataMatchCriterion {\npublic:\n  TestMetadataMatchCriterion(const std::string& name, const HashedValue& value)\n      : name_(name), value_(value) {}\n\n  const std::string& name() const override { return name_; }\n  const HashedValue& value() const override { return value_; }\n\nprivate:\n  std::string name_;\n  HashedValue value_;\n};\n\nclass TestMetadataMatchCriteria : public Router::MetadataMatchCriteria {\npublic:\n  TestMetadataMatchCriteria(const std::map<std::string, std::string> matches) {\n    for (const auto& it : matches) {\n      ProtobufWkt::Value v;\n      v.set_string_value(it.second);\n\n      matches_.emplace_back(\n          std::make_shared<const TestMetadataMatchCriterion>(it.first, HashedValue(v)));\n    }\n  }\n\n  const std::vector<Router::MetadataMatchCriterionConstSharedPtr>&\n  metadataMatchCriteria() const override {\n    return matches_;\n  }\n\n  Router::MetadataMatchCriteriaConstPtr\n  mergeMatchCriteria(const ProtobufWkt::Struct&) const override {\n    return nullptr;\n  }\n\n  Router::MetadataMatchCriteriaConstPtr\n  filterMatchCriteria(const std::set<std::string>& names) const override {\n    auto new_criteria = std::make_unique<TestMetadataMatchCriteria>(*this);\n    for (auto it = new_criteria->matches_.begin(); it != new_criteria->matches_.end();) {\n      if (names.count(it->get()->name()) == 0) {\n        it = new_criteria->matches_.erase(it);\n      } else {\n        it++;\n      }\n    }\n    return new_criteria;\n  }\n\nprivate:\n  std::vector<Router::MetadataMatchCriterionConstSharedPtr> matches_;\n};\n\nclass TestLoadBalancerContext : public LoadBalancerContextBase {\npublic:\n  TestLoadBalancerContext(\n      std::initializer_list<std::map<std::string, std::string>::value_type> metadata_matches)\n      : matches_(\n            new TestMetadataMatchCriteria(std::map<std::string, std::string>(metadata_matches))) {}\n\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override { return {}; }\n  const Network::Connection* downstreamConnection() const override { return nullptr; }\n  const Router::MetadataMatchCriteria* metadataMatchCriteria() override { return matches_.get(); }\n  const Http::RequestHeaderMap* downstreamHeaders() const override { return nullptr; }\n\nprivate:\n  const std::shared_ptr<Router::MetadataMatchCriteria> matches_;\n};\n\nenum class UpdateOrder { RemovesFirst, Simultaneous };\n\nclass SubsetLoadBalancerTest : public testing::TestWithParam<UpdateOrder> {\npublic:\n  SubsetLoadBalancerTest()\n      : scope_(stats_store_.createScope(\"testprefix\")),\n        stats_(ClusterInfoImpl::generateStats(stats_store_)) {\n    stats_.max_host_weight_.set(1UL);\n    least_request_lb_config_.mutable_choice_count()->set_value(2);\n  }\n\n  using HostMetadata = std::map<std::string, std::string>;\n  using HostListMetadata = std::map<std::string, std::vector<std::string>>;\n  using HostURLMetadataMap = std::map<std::string, HostMetadata>;\n\n  void init() {\n    init({\n        {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n        {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}},\n    });\n  }\n\n  void configureHostSet(const HostURLMetadataMap& host_metadata, MockHostSet& host_set) {\n    HostVector hosts;\n    for (const auto& it : host_metadata) {\n      hosts.emplace_back(makeHost(it.first, it.second));\n    }\n\n    host_set.hosts_ = hosts;\n    host_set.hosts_per_locality_ = makeHostsPerLocality({hosts});\n    host_set.healthy_hosts_ = host_set.hosts_;\n    host_set.healthy_hosts_per_locality_ = host_set.hosts_per_locality_;\n  }\n\n  void configureWeightedHostSet(const HostURLMetadataMap& first_locality_host_metadata,\n                                const HostURLMetadataMap& second_locality_host_metadata,\n                                MockHostSet& host_set, LocalityWeights locality_weights) {\n    HostVector first_locality;\n    HostVector all_hosts;\n    for (const auto& it : first_locality_host_metadata) {\n      auto host = makeHost(it.first, it.second);\n      first_locality.emplace_back(host);\n      all_hosts.emplace_back(host);\n    }\n\n    HostVector second_locality;\n    for (const auto& it : second_locality_host_metadata) {\n      auto host = makeHost(it.first, it.second);\n      second_locality.emplace_back(host);\n      all_hosts.emplace_back(host);\n    }\n\n    host_set.hosts_ = all_hosts;\n    host_set.hosts_per_locality_ = makeHostsPerLocality({first_locality, second_locality});\n    host_set.healthy_hosts_ = host_set.hosts_;\n    host_set.healthy_hosts_per_locality_ = host_set.hosts_per_locality_;\n    host_set.locality_weights_ = std::make_shared<const LocalityWeights>(locality_weights);\n  }\n\n  void init(const HostURLMetadataMap& host_metadata) {\n    HostURLMetadataMap failover;\n    init(host_metadata, failover);\n  }\n\n  void init(const HostURLMetadataMap& host_metadata,\n            const HostURLMetadataMap& failover_host_metadata) {\n    EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n\n    configureHostSet(host_metadata, host_set_);\n    if (!failover_host_metadata.empty()) {\n      configureHostSet(failover_host_metadata, *priority_set_.getMockHostSet(1));\n    }\n\n    lb_ = std::make_shared<SubsetLoadBalancer>(\n        lb_type_, priority_set_, nullptr, stats_, *scope_, runtime_, random_, subset_info_,\n        ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n  }\n\n  void zoneAwareInit(const std::vector<HostURLMetadataMap>& host_metadata_per_locality,\n                     const std::vector<HostURLMetadataMap>& local_host_metadata_per_locality) {\n    EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n\n    HostVector hosts;\n    std::vector<HostVector> hosts_per_locality;\n    for (const auto& host_metadata : host_metadata_per_locality) {\n      HostVector locality_hosts;\n      for (const auto& host_entry : host_metadata) {\n        HostSharedPtr host = makeHost(host_entry.first, host_entry.second);\n        hosts.emplace_back(host);\n        locality_hosts.emplace_back(host);\n      }\n      hosts_per_locality.emplace_back(locality_hosts);\n    }\n\n    host_set_.hosts_ = hosts;\n    host_set_.hosts_per_locality_ = makeHostsPerLocality(std::move(hosts_per_locality));\n\n    host_set_.healthy_hosts_ = host_set_.hosts_;\n    host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_;\n\n    local_hosts_ = std::make_shared<HostVector>();\n    std::vector<HostVector> local_hosts_per_locality_vector;\n    for (const auto& local_host_metadata : local_host_metadata_per_locality) {\n      HostVector local_locality_hosts;\n      for (const auto& host_entry : local_host_metadata) {\n        HostSharedPtr host = makeHost(host_entry.first, host_entry.second);\n        local_hosts_->emplace_back(host);\n        local_locality_hosts.emplace_back(host);\n      }\n      local_hosts_per_locality_vector.emplace_back(local_locality_hosts);\n    }\n    local_hosts_per_locality_ = makeHostsPerLocality(std::move(local_hosts_per_locality_vector));\n\n    local_priority_set_.updateHosts(\n        0,\n        HostSetImpl::updateHostsParams(\n            local_hosts_, local_hosts_per_locality_,\n            std::make_shared<HealthyHostVector>(*local_hosts_), local_hosts_per_locality_,\n            std::make_shared<DegradedHostVector>(), HostsPerLocalityImpl::empty(),\n            std::make_shared<ExcludedHostVector>(), HostsPerLocalityImpl::empty()),\n        {}, {}, {}, absl::nullopt);\n\n    lb_ = std::make_shared<SubsetLoadBalancer>(lb_type_, priority_set_, &local_priority_set_,\n                                               stats_, *scope_, runtime_, random_, subset_info_,\n                                               ring_hash_lb_config_, maglev_lb_config_,\n                                               least_request_lb_config_, common_config_);\n  }\n\n  HostSharedPtr makeHost(const std::string& url, const HostMetadata& metadata) {\n    envoy::config::core::v3::Metadata m;\n    for (const auto& m_it : metadata) {\n      Config::Metadata::mutableMetadataValue(m, Config::MetadataFilters::get().ENVOY_LB, m_it.first)\n          .set_string_value(m_it.second);\n    }\n\n    return makeTestHost(info_, url, m);\n  }\n  HostSharedPtr makeHost(const std::string& url, const HostListMetadata& metadata) {\n    envoy::config::core::v3::Metadata m;\n    for (const auto& m_it : metadata) {\n      auto& metadata = Config::Metadata::mutableMetadataValue(\n          m, Config::MetadataFilters::get().ENVOY_LB, m_it.first);\n      for (const auto& value : m_it.second) {\n        metadata.mutable_list_value()->add_values()->set_string_value(value);\n      }\n    }\n\n    return makeTestHost(info_, url, m);\n  }\n\n  ProtobufWkt::Struct makeDefaultSubset(HostMetadata metadata) {\n    ProtobufWkt::Struct default_subset;\n\n    auto* fields = default_subset.mutable_fields();\n    for (const auto& it : metadata) {\n      ProtobufWkt::Value v;\n      v.set_string_value(it.second);\n      fields->insert({it.first, v});\n    }\n\n    return default_subset;\n  }\n\n  SubsetSelectorPtr\n  makeSelector(const std::set<std::string>& selector_keys,\n               envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n                   LbSubsetSelectorFallbackPolicy fallback_policy,\n               const std::set<std::string>& fallback_keys_subset,\n               bool single_host_per_subset = false) {\n    Protobuf::RepeatedPtrField<std::string> selector_keys_mapped;\n    for (const auto& it : selector_keys) {\n      selector_keys_mapped.Add(std::string(it));\n    }\n\n    Protobuf::RepeatedPtrField<std::string> fallback_keys_subset_mapped;\n    for (const auto& it : fallback_keys_subset) {\n      fallback_keys_subset_mapped.Add(std::string(it));\n    }\n\n    return std::make_shared<SubsetSelectorImpl>(\n        selector_keys_mapped, fallback_policy, fallback_keys_subset_mapped, single_host_per_subset);\n  }\n\n  SubsetSelectorPtr makeSelector(\n      const std::set<std::string>& selector_keys,\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::\n          LbSubsetSelectorFallbackPolicy fallback_policy =\n              envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED) {\n    return makeSelector(selector_keys, fallback_policy, {});\n  }\n\n  void modifyHosts(HostVector add, HostVector remove, absl::optional<uint32_t> add_in_locality = {},\n                   uint32_t priority = 0) {\n    MockHostSet& host_set = *priority_set_.getMockHostSet(priority);\n    for (const auto& host : remove) {\n      auto it = std::find(host_set.hosts_.begin(), host_set.hosts_.end(), host);\n      if (it != host_set.hosts_.end()) {\n        host_set.hosts_.erase(it);\n      }\n      host_set.healthy_hosts_ = host_set.hosts_;\n\n      std::vector<HostVector> locality_hosts_copy = host_set.hosts_per_locality_->get();\n      for (auto& locality_hosts : locality_hosts_copy) {\n        auto it = std::find(locality_hosts.begin(), locality_hosts.end(), host);\n        if (it != locality_hosts.end()) {\n          locality_hosts.erase(it);\n        }\n      }\n      host_set.hosts_per_locality_ = makeHostsPerLocality(std::move(locality_hosts_copy));\n      host_set.healthy_hosts_per_locality_ = host_set.hosts_per_locality_;\n    }\n\n    if (GetParam() == UpdateOrder::RemovesFirst && !remove.empty()) {\n      host_set.runCallbacks({}, remove);\n    }\n\n    for (const auto& host : add) {\n      host_set.hosts_.emplace_back(host);\n      host_set.healthy_hosts_ = host_set.hosts_;\n\n      if (add_in_locality) {\n        std::vector<HostVector> locality_hosts_copy = host_set.hosts_per_locality_->get();\n        locality_hosts_copy[add_in_locality.value()].emplace_back(host);\n        host_set.hosts_per_locality_ = makeHostsPerLocality(std::move(locality_hosts_copy));\n        host_set.healthy_hosts_per_locality_ = host_set.hosts_per_locality_;\n      }\n    }\n\n    if (GetParam() == UpdateOrder::RemovesFirst) {\n      if (!add.empty()) {\n        host_set_.runCallbacks(add, {});\n      }\n    } else if (!add.empty() || !remove.empty()) {\n      host_set_.runCallbacks(add, remove);\n    }\n  }\n\n  void modifyLocalHosts(HostVector add, HostVector remove, uint32_t add_in_locality) {\n    for (const auto& host : remove) {\n      auto it = std::find(local_hosts_->begin(), local_hosts_->end(), host);\n      if (it != local_hosts_->end()) {\n        local_hosts_->erase(it);\n      }\n\n      std::vector<HostVector> locality_hosts_copy = local_hosts_per_locality_->get();\n      for (auto& locality_hosts : locality_hosts_copy) {\n        auto it = std::find(locality_hosts.begin(), locality_hosts.end(), host);\n        if (it != locality_hosts.end()) {\n          locality_hosts.erase(it);\n        }\n      }\n      local_hosts_per_locality_ = makeHostsPerLocality(std::move(locality_hosts_copy));\n    }\n\n    if (GetParam() == UpdateOrder::RemovesFirst && !remove.empty()) {\n      local_priority_set_.updateHosts(\n          0,\n          updateHostsParams(local_hosts_, local_hosts_per_locality_,\n                            std::make_shared<HealthyHostVector>(*local_hosts_),\n                            local_hosts_per_locality_),\n          {}, {}, remove, absl::nullopt);\n    }\n\n    for (const auto& host : add) {\n      local_hosts_->emplace_back(host);\n      std::vector<HostVector> locality_hosts_copy = local_hosts_per_locality_->get();\n      locality_hosts_copy[add_in_locality].emplace_back(host);\n      local_hosts_per_locality_ = makeHostsPerLocality(std::move(locality_hosts_copy));\n    }\n\n    if (GetParam() == UpdateOrder::RemovesFirst) {\n      if (!add.empty()) {\n        local_priority_set_.updateHosts(\n            0,\n            updateHostsParams(local_hosts_, local_hosts_per_locality_,\n                              std::make_shared<HealthyHostVector>(*local_hosts_),\n                              local_hosts_per_locality_),\n            {}, add, {}, absl::nullopt);\n      }\n    } else if (!add.empty() || !remove.empty()) {\n      local_priority_set_.updateHosts(\n          0,\n          updateHostsParams(local_hosts_, local_hosts_per_locality_,\n                            std::make_shared<const HealthyHostVector>(*local_hosts_),\n                            local_hosts_per_locality_),\n          {}, add, remove, absl::nullopt);\n    }\n  }\n\n  void doLbTypeTest(LoadBalancerType type) {\n    EXPECT_CALL(subset_info_, fallbackPolicy())\n        .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n    lb_type_ = type;\n    init({{\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}}});\n\n    EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n    HostSharedPtr added_host = makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.0\"}});\n    modifyHosts({added_host}, {host_set_.hosts_.back()});\n\n    EXPECT_EQ(added_host, lb_->chooseHost(nullptr));\n  }\n\n  MetadataConstSharedPtr buildMetadata(const std::string& version, bool is_default = false) const {\n    envoy::config::core::v3::Metadata metadata;\n\n    if (!version.empty()) {\n      Envoy::Config::Metadata::mutableMetadataValue(\n          metadata, Config::MetadataFilters::get().ENVOY_LB, \"version\")\n          .set_string_value(version);\n    }\n\n    if (is_default) {\n      Envoy::Config::Metadata::mutableMetadataValue(\n          metadata, Config::MetadataFilters::get().ENVOY_LB, \"default\")\n          .set_string_value(\"true\");\n    }\n\n    return std::make_shared<const envoy::config::core::v3::Metadata>(metadata);\n  }\n\n  MetadataConstSharedPtr buildMetadataWithStage(const std::string& version,\n                                                const std::string& stage = \"\") const {\n    envoy::config::core::v3::Metadata metadata;\n\n    if (!version.empty()) {\n      Envoy::Config::Metadata::mutableMetadataValue(\n          metadata, Config::MetadataFilters::get().ENVOY_LB, \"version\")\n          .set_string_value(version);\n    }\n\n    if (!stage.empty()) {\n      Envoy::Config::Metadata::mutableMetadataValue(\n          metadata, Config::MetadataFilters::get().ENVOY_LB, \"stage\")\n          .set_string_value(stage);\n    }\n\n    return std::make_shared<const envoy::config::core::v3::Metadata>(metadata);\n  }\n\n  LoadBalancerType lb_type_{LoadBalancerType::RoundRobin};\n  NiceMock<MockPrioritySet> priority_set_;\n  MockHostSet& host_set_ = *priority_set_.getMockHostSet(0);\n  NiceMock<MockLoadBalancerSubsetInfo> subset_info_;\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n  envoy::config::cluster::v3::Cluster::RingHashLbConfig ring_hash_lb_config_;\n  envoy::config::cluster::v3::Cluster::MaglevLbConfig maglev_lb_config_;\n  envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_;\n  envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Stats::ScopePtr scope_;\n  ClusterStats stats_;\n  PrioritySetImpl local_priority_set_;\n  HostVectorSharedPtr local_hosts_;\n  HostsPerLocalitySharedPtr local_hosts_per_locality_;\n  std::shared_ptr<SubsetLoadBalancer> lb_;\n};\n\nTEST_F(SubsetLoadBalancerTest, NoFallback) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  init();\n\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n  EXPECT_EQ(0U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_selected_.value());\n}\n\n// Validate that SubsetLoadBalancer unregisters its priority set member update\n// callback. Regression for heap-use-after-free.\nTEST_F(SubsetLoadBalancerTest, DeregisterCallbacks) {\n  init();\n  lb_.reset();\n  host_set_.runCallbacks({}, {});\n}\n\nTEST_P(SubsetLoadBalancerTest, NoFallbackAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  init();\n\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.0\"}})}, {host_set_.hosts_.back()});\n\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n}\n\nTEST_F(SubsetLoadBalancerTest, FallbackAnyEndpoint) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  init();\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(1U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_selected_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, FallbackAnyEndpointAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  init();\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n\n  HostSharedPtr added_host = makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.0\"}});\n  modifyHosts({added_host}, {host_set_.hosts_.back()});\n\n  EXPECT_EQ(added_host, lb_->chooseHost(nullptr));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n}\n\nTEST_F(SubsetLoadBalancerTest, FallbackDefaultSubset) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"version\", \"default\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"new\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"default\"}}},\n  });\n\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(1U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_selected_.value());\n}\n\nTEST_F(SubsetLoadBalancerTest, FallbackPanicMode) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n  EXPECT_CALL(subset_info_, panicModeAny()).WillRepeatedly(Return(true));\n\n  // The default subset will be empty.\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"version\", \"none\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"new\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"default\"}}},\n  });\n\n  EXPECT_TRUE(lb_->chooseHost(nullptr) != nullptr);\n  EXPECT_EQ(1U, stats_.lb_subsets_fallback_panic_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_selected_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, FallbackPanicModeWithUpdates) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n  EXPECT_CALL(subset_info_, panicModeAny()).WillRepeatedly(Return(true));\n\n  // The default subset will be empty.\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"version\", \"none\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  init({{\"tcp://127.0.0.1:80\", {{\"version\", \"default\"}}}});\n  EXPECT_TRUE(lb_->chooseHost(nullptr) != nullptr);\n\n  // Removing current host, adding a new one.\n  HostSharedPtr added_host = makeHost(\"tcp://127.0.0.2:8000\", {{\"version\", \"new\"}});\n  modifyHosts({added_host}, {host_set_.hosts_[0]});\n\n  EXPECT_EQ(1, host_set_.hosts_.size());\n  EXPECT_EQ(added_host, lb_->chooseHost(nullptr));\n}\n\nTEST_P(SubsetLoadBalancerTest, FallbackDefaultSubsetAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"version\", \"default\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"new\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"default\"}}},\n  });\n\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(nullptr));\n\n  HostSharedPtr added_host1 = makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"new\"}});\n  HostSharedPtr added_host2 = makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"default\"}});\n\n  modifyHosts({added_host1, added_host2}, {host_set_.hosts_.back()});\n\n  EXPECT_EQ(added_host2, lb_->chooseHost(nullptr));\n}\n\nTEST_F(SubsetLoadBalancerTest, FallbackEmptyDefaultSubsetConvertsToAnyEndpoint) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  EXPECT_CALL(subset_info_, defaultSubset())\n      .WillRepeatedly(ReturnRef(ProtobufWkt::Struct::default_instance()));\n\n  init();\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(nullptr));\n  EXPECT_EQ(2U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_selected_.value());\n}\n\nTEST_F(SubsetLoadBalancerTest, FallbackOnUnknownMetadata) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  init();\n\n  TestLoadBalancerContext context_unknown_key({{\"unknown\", \"unknown\"}});\n  TestLoadBalancerContext context_unknown_value({{\"version\", \"unknown\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_unknown_key));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_unknown_value));\n}\n\nTEST_F(SubsetLoadBalancerTest, BalancesSubset) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.1\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n  });\n\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_11({{\"version\", \"1.1\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_11));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_11));\n  EXPECT_EQ(0U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_selected_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, BalancesSubsetAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.1\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n  });\n\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_11({{\"version\", \"1.1\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_11));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_11));\n  EXPECT_EQ(2U, stats_.lb_subsets_created_.value());\n\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}),\n               makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}})},\n              {host_set_.hosts_[1], host_set_.hosts_[2]});\n\n  TestLoadBalancerContext context_12({{\"version\", \"1.2\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_11));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_12));\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_created_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, ListAsAnyEnabled) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n  EXPECT_CALL(subset_info_, listAsAny()).WillRepeatedly(Return(true));\n\n  init({});\n  modifyHosts(\n      {makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", std::vector<std::string>{\"1.2.1\", \"1.2\"}}}),\n       makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}})},\n      {}, {}, 0);\n\n  {\n    TestLoadBalancerContext context({{\"version\", \"1.0\"}});\n    EXPECT_TRUE(host_set_.hosts()[1] == lb_->chooseHost(&context));\n  }\n  {\n    TestLoadBalancerContext context({{\"version\", \"1.2\"}});\n    EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n  }\n  TestLoadBalancerContext context({{\"version\", \"1.2.1\"}});\n  EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n}\n\nTEST_P(SubsetLoadBalancerTest, ListAsAnyEnabledMultipleLists) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n  EXPECT_CALL(subset_info_, listAsAny()).WillRepeatedly(Return(true));\n\n  init({});\n  modifyHosts(\n      {makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", std::vector<std::string>{\"1.2.1\", \"1.2\"}}}),\n       makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", std::vector<std::string>{\"1.2.2\", \"1.2\"}}}),\n       makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}})},\n      {}, {}, 0);\n\n  {\n    TestLoadBalancerContext context({{\"version\", \"1.0\"}});\n    EXPECT_TRUE(host_set_.hosts()[2] == lb_->chooseHost(&context));\n    EXPECT_TRUE(host_set_.hosts()[2] == lb_->chooseHost(&context));\n  }\n  {\n    // This should LB between both hosts marked with version 1.2.\n    TestLoadBalancerContext context({{\"version\", \"1.2\"}});\n    EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n    EXPECT_TRUE(host_set_.hosts()[1] == lb_->chooseHost(&context));\n  }\n  {\n    // Choose a host multiple times to ensure that hosts()[0] is the *only*\n    // thing selected for this subset.\n    TestLoadBalancerContext context({{\"version\", \"1.2.1\"}});\n    EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n    EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n  }\n\n  TestLoadBalancerContext context({{\"version\", \"1.2.2\"}});\n  EXPECT_TRUE(host_set_.hosts()[1] == lb_->chooseHost(&context));\n}\n\nTEST_P(SubsetLoadBalancerTest, ListAsAnyEnabledMultipleListsForSingleHost) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\", \"hardware\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n  EXPECT_CALL(subset_info_, listAsAny()).WillRepeatedly(Return(true));\n\n  init({});\n  modifyHosts(\n      {makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", std::vector<std::string>{\"1.2.1\", \"1.2\"}},\n                                         {\"hardware\", std::vector<std::string>{\"a\", \"b\"}}}),\n       makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", std::vector<std::string>{\"1.1\", \"1.1.1\"}},\n                                         {\"hardware\", std::vector<std::string>{\"b\", \"c\"}}})},\n      {}, {}, 0);\n\n  {\n    TestLoadBalancerContext context({{\"version\", \"1.2\"}, {\"hardware\", \"a\"}});\n    EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n    EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n  }\n\n  {\n    TestLoadBalancerContext context({{\"version\", \"1.1\"}, {\"hardware\", \"b\"}});\n    EXPECT_TRUE(host_set_.hosts()[1] == lb_->chooseHost(&context));\n    EXPECT_TRUE(host_set_.hosts()[1] == lb_->chooseHost(&context));\n  }\n\n  {\n    TestLoadBalancerContext context({{\"version\", \"1.1\"}, {\"hardware\", \"a\"}});\n    EXPECT_TRUE(nullptr == lb_->chooseHost(&context));\n  }\n\n  TestLoadBalancerContext context({{\"version\", \"1.2.1\"}, {\"hardware\", \"b\"}});\n  EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n  EXPECT_TRUE(host_set_.hosts()[0] == lb_->chooseHost(&context));\n}\n\nTEST_P(SubsetLoadBalancerTest, ListAsAnyDisable) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({});\n  modifyHosts(\n      {makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", std::vector<std::string>{\"1.2.1\", \"1.2\"}}}),\n       makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}})},\n      {}, {}, 0);\n\n  {\n    TestLoadBalancerContext context({{\"version\", \"1.0\"}});\n    EXPECT_TRUE(host_set_.hosts()[1] == lb_->chooseHost(&context));\n  }\n  TestLoadBalancerContext context({{\"version\", \"1.2\"}});\n  EXPECT_TRUE(nullptr == lb_->chooseHost(&context));\n}\n\n// Test that adding backends to a failover group causes no problems.\nTEST_P(SubsetLoadBalancerTest, UpdateFailover) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n\n  // Start with an empty lb. Choosing a host should result in failure.\n  init({});\n  EXPECT_TRUE(nullptr == lb_->chooseHost(&context_10).get());\n\n  // Add hosts to the group at priority 1.\n  // These hosts should be selected as there are no healthy hosts with priority 0\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}),\n               makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}})},\n              {}, {}, 1);\n  EXPECT_FALSE(nullptr == lb_->chooseHost(&context_10).get());\n\n  // Finally update the priority 0 hosts. The LB should now select hosts.\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}),\n               makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}})},\n              {}, {}, 0);\n  EXPECT_FALSE(nullptr == lb_->chooseHost(&context_10).get());\n}\n\nTEST_P(SubsetLoadBalancerTest, OnlyMetadataChanged) {\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_12({{\"version\", \"1.2\"}});\n  TestLoadBalancerContext context_13({{\"version\", \"1.3\"}});\n  TestLoadBalancerContext context_default({{\"default\", \"true\"}});\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"default\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"default\", \"true\"}});\n\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  // Add hosts initial hosts.\n  init({{\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}},\n        {\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}, {\"default\", \"true\"}}}});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13));\n\n  // Swap the default version.\n  host_set_.hosts_[0]->metadata(buildMetadata(\"1.2\", true));\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.0\"));\n\n  host_set_.runCallbacks({}, {});\n\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_13));\n\n  // Bump 1.0 to 1.3, one subset should be removed.\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.3\"));\n\n  // No hosts added nor removed, so we bypass modifyHosts().\n  host_set_.runCallbacks({}, {});\n\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(1U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n\n  // Rollback from 1.3 to 1.0.\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.0\"));\n\n  host_set_.runCallbacks({}, {});\n\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(5U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_13));\n\n  // Make 1.0 default again.\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.0\", true));\n  host_set_.hosts_[0]->metadata(buildMetadata(\"1.2\"));\n\n  host_set_.runCallbacks({}, {});\n\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(5U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13));\n}\n\nTEST_P(SubsetLoadBalancerTest, EmptySubsetsPurged) {\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector({\"version\"}),\n                                                     makeSelector({\"version\", \"stage\"})};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  // Simple add and remove.\n  init({{\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}},\n        {\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}, {\"stage\", \"prod\"}}}});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n\n  host_set_.hosts_[0]->metadata(buildMetadataWithStage(\"1.3\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(1U, stats_.lb_subsets_removed_.value());\n\n  // Move host that was in the version + stage subset into a new version only subset.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.4\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(2U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(5U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_removed_.value());\n\n  // Create a new version + stage subset.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.5\", \"devel\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(7U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_removed_.value());\n\n  // Now move it back to its original version + stage subset.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.0\", \"prod\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(9U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(6U, stats_.lb_subsets_removed_.value());\n\n  // Finally, remove the original version + stage subset again.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.6\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(2U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(10U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(8U, stats_.lb_subsets_removed_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, EmptySubsetsPurgedCollapsed) {\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector({\"version\"}),\n                                                     makeSelector({\"version\", \"stage\"})};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  // Init subsets.\n  init({{\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}},\n        {\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}, {\"stage\", \"prod\"}}}});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n\n  // Get rid of 1.0.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.2\", \"prod\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(2U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_removed_.value());\n\n  // Get rid of stage prod.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.2\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(1U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_removed_.value());\n\n  // Add stage prod back.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.2\", \"prod\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(2U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(5U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_removed_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, EmptySubsetsPurgedVersionChanged) {\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector({\"version\"}),\n                                                     makeSelector({\"version\", \"stage\"})};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  // Init subsets.\n  init({{\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}},\n        {\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}, {\"stage\", \"prod\"}}}});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n\n  // Get rid of 1.0.\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.2\", \"prod\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(2U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_removed_.value());\n\n  // Change versions.\n  host_set_.hosts_[0]->metadata(buildMetadataWithStage(\"1.3\"));\n  host_set_.hosts_[1]->metadata(buildMetadataWithStage(\"1.4\", \"prod\"));\n  host_set_.runCallbacks({}, {});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(7U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_removed_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, MetadataChangedHostsAddedRemoved) {\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_12({{\"version\", \"1.2\"}});\n  TestLoadBalancerContext context_13({{\"version\", \"1.3\"}});\n  TestLoadBalancerContext context_14({{\"version\", \"1.4\"}});\n  TestLoadBalancerContext context_default({{\"default\", \"true\"}});\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"default\", \"true\"}});\n\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"default\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  // Add hosts initial hosts.\n  init({{\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}},\n        {\"tcp://127.0.0.1:8001\", {{\"version\", \"1.0\"}, {\"default\", \"true\"}}}});\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(3U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13));\n\n  // Swap the default version.\n  host_set_.hosts_[0]->metadata(buildMetadata(\"1.2\", true));\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.0\"));\n\n  // Add a new host.\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8002\", {{\"version\", \"1.3\"}})}, {});\n\n  EXPECT_EQ(4U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_13));\n\n  // Swap default again and remove the previous one.\n  host_set_.hosts_[0]->metadata(buildMetadata(\"1.2\"));\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.0\", true));\n\n  modifyHosts({}, {host_set_.hosts_[2]});\n\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(1U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13));\n\n  // Swap the default version once more, this time adding a new host and removing\n  // the current default version.\n  host_set_.hosts_[0]->metadata(buildMetadata(\"1.2\", true));\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.0\"));\n\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8003\", {{\"version\", \"1.4\"}})}, {host_set_.hosts_[1]});\n\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(5U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_13));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_14));\n\n  // Make 1.4 default, without hosts being added/removed.\n  host_set_.hosts_[0]->metadata(buildMetadata(\"1.2\"));\n  host_set_.hosts_[1]->metadata(buildMetadata(\"1.4\", true));\n\n  host_set_.runCallbacks({}, {});\n\n  EXPECT_EQ(3U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(5U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_removed_.value());\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_12));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_default));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_14));\n}\n\nTEST_P(SubsetLoadBalancerTest, UpdateRemovingLastSubsetHost) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n  });\n\n  HostSharedPtr host_v10 = host_set_.hosts_[0];\n  HostSharedPtr host_v11 = host_set_.hosts_[1];\n\n  TestLoadBalancerContext context({{\"version\", \"1.0\"}});\n  EXPECT_EQ(host_v10, lb_->chooseHost(&context));\n  EXPECT_EQ(1U, stats_.lb_subsets_selected_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_created_.value());\n\n  modifyHosts({}, {host_v10});\n\n  // fallback to any endpoint\n  EXPECT_EQ(host_v11, lb_->chooseHost(&context));\n  EXPECT_EQ(1U, stats_.lb_subsets_selected_.value());\n  EXPECT_EQ(1U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(1U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(2U, stats_.lb_subsets_created_.value());\n  EXPECT_EQ(1U, stats_.lb_subsets_removed_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, UpdateRemovingUnknownHost) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"stage\", \"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"stage\", \"prod\"}, {\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"stage\", \"prod\"}, {\"version\", \"1.1\"}}},\n  });\n\n  TestLoadBalancerContext context({{\"stage\", \"prod\"}, {\"version\", \"1.0\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context));\n\n  modifyHosts({}, {makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.2\"}}),\n                   makeHost(\"tcp://127.0.0.1:8001\", {{\"stage\", \"prod\"}, {\"version\", \"1.2\"}})});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context));\n}\n\nTEST_F(SubsetLoadBalancerTest, UpdateModifyingOnlyHostHealth) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"hardware\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.1\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n  });\n\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_11({{\"version\", \"1.1\"}});\n\n  // All hosts are healthy.\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_11));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_11));\n\n  host_set_.hosts_[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  host_set_.hosts_[2]->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  host_set_.healthy_hosts_ = {host_set_.hosts_[1], host_set_.hosts_[3]};\n  host_set_.runCallbacks({}, {});\n\n  // Unhealthy hosts are excluded.\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_11));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_11));\n}\n\nTEST_F(SubsetLoadBalancerTest, BalancesDisjointSubsets) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"hardware\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"hardware\", \"std\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}, {\"hardware\", \"bigmem\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.1\"}, {\"hardware\", \"std\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}, {\"hardware\", \"bigmem\"}}},\n  });\n\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_bigmem({{\"hardware\", \"bigmem\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_bigmem));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_bigmem));\n}\n\nTEST_F(SubsetLoadBalancerTest, BalancesOverlappingSubsets) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"stage\", \"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"stage\", \"prod\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}, {\"stage\", \"prod\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}, {\"stage\", \"off\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}, {\"stage\", \"prod\"}}},\n      {\"tcp://127.0.0.1:84\", {{\"version\", \"999\"}, {\"stage\", \"dev\"}}},\n  });\n\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_10_prod({{\"version\", \"1.0\"}, {\"stage\", \"prod\"}});\n  TestLoadBalancerContext context_dev({{\"version\", \"999\"}, {\"stage\", \"dev\"}});\n  TestLoadBalancerContext context_unknown({{\"version\", \"2.0\"}, {\"stage\", \"prod\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_10));\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10_prod));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10_prod));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10_prod));\n\n  EXPECT_EQ(host_set_.hosts_[4], lb_->chooseHost(&context_dev));\n  EXPECT_EQ(host_set_.hosts_[4], lb_->chooseHost(&context_dev));\n\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_unknown));\n}\n\nTEST_F(SubsetLoadBalancerTest, BalancesNestedSubsets) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"stage\", \"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"stage\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"stage\", \"prod\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}, {\"stage\", \"prod\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}, {\"stage\", \"off\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}, {\"stage\", \"prod\"}}},\n      {\"tcp://127.0.0.1:84\", {{\"version\", \"999\"}, {\"stage\", \"dev\"}}},\n  });\n\n  TestLoadBalancerContext context_prod({{\"stage\", \"prod\"}});\n  TestLoadBalancerContext context_prod_10({{\"version\", \"1.0\"}, {\"stage\", \"prod\"}});\n  TestLoadBalancerContext context_unknown_stage({{\"stage\", \"larval\"}});\n  TestLoadBalancerContext context_unknown_version({{\"version\", \"2.0\"}, {\"stage\", \"prod\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_prod));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_prod));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_prod));\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_prod_10));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_prod_10));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_prod_10));\n\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_unknown_stage));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_unknown_version));\n}\n\nTEST_F(SubsetLoadBalancerTest, IgnoresUnselectedMetadata) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"stage\", \"ignored\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"ignore\", \"value\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n  });\n\n  TestLoadBalancerContext context_ignore({{\"ignore\", \"value\"}});\n  TestLoadBalancerContext context_version({{\"version\", \"1.0\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_version));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_version));\n\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_ignore));\n}\n\nTEST_F(SubsetLoadBalancerTest, IgnoresHostsWithoutMetadata) {\n  EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  HostVector hosts;\n  hosts.emplace_back(makeTestHost(info_, \"tcp://127.0.0.1:80\"));\n  hosts.emplace_back(makeHost(\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}));\n\n  host_set_.hosts_ = hosts;\n  host_set_.hosts_per_locality_ = makeHostsPerLocality({hosts});\n\n  host_set_.healthy_hosts_ = host_set_.hosts_;\n  host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_;\n\n  lb_ = std::make_shared<SubsetLoadBalancer>(\n      lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_,\n      ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n\n  TestLoadBalancerContext context_version({{\"version\", \"1.0\"}});\n\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_version));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_version));\n}\n\n// TODO(mattklein123): The following 4 tests verify basic functionality with all sub-LB tests.\n// Optimally these would also be some type of TEST_P, but that is a little bit complicated as\n// modifyHosts() also needs params. Clean this up.\nTEST_P(SubsetLoadBalancerTest, LoadBalancerTypesRoundRobin) {\n  doLbTypeTest(LoadBalancerType::RoundRobin);\n}\n\nTEST_P(SubsetLoadBalancerTest, LoadBalancerTypesLeastRequest) {\n  doLbTypeTest(LoadBalancerType::LeastRequest);\n}\n\nTEST_P(SubsetLoadBalancerTest, LoadBalancerTypesRandom) { doLbTypeTest(LoadBalancerType::Random); }\n\nTEST_P(SubsetLoadBalancerTest, LoadBalancerTypesRingHash) {\n  doLbTypeTest(LoadBalancerType::RingHash);\n}\n\nTEST_P(SubsetLoadBalancerTest, LoadBalancerTypesMaglev) { doLbTypeTest(LoadBalancerType::Maglev); }\n\nTEST_F(SubsetLoadBalancerTest, ZoneAwareFallback) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"x\"}, envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  common_config_.mutable_healthy_panic_threshold()->set_value(40);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 40))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(2));\n\n  zoneAwareInit({{\n                     {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:82\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:83\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:84\", {{\"version\", \"1.1\"}}},\n                 }},\n                {{\n                     {\"tcp://127.0.0.1:90\", {{\"version\", \"1.0\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:91\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:92\", {{\"version\", \"1.0\"}}},\n                 }});\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][0], lb_->chooseHost(nullptr));\n}\n\nTEST_P(SubsetLoadBalancerTest, ZoneAwareFallbackAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"x\"}, envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(2));\n\n  zoneAwareInit({{\n                     {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:82\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:83\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:84\", {{\"version\", \"1.1\"}}},\n                 }},\n                {{\n                     {\"tcp://127.0.0.1:90\", {{\"version\", \"1.0\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:91\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:92\", {{\"version\", \"1.0\"}}},\n                 }});\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][0], lb_->chooseHost(nullptr));\n\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"1.0\"}})}, {host_set_.hosts_[0]},\n              absl::optional<uint32_t>(0));\n\n  modifyLocalHosts({makeHost(\"tcp://127.0.0.1:9000\", {{\"version\", \"1.0\"}})}, {local_hosts_->at(0)},\n                   0);\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][1], lb_->chooseHost(nullptr));\n}\n\nTEST_F(SubsetLoadBalancerTest, ZoneAwareFallbackDefaultSubset) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"version\", \"default\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(2));\n\n  zoneAwareInit({{\n                     {\"tcp://127.0.0.1:80\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:81\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:82\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:83\", {{\"version\", \"default\"}}},\n                     {\"tcp://127.0.0.1:84\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:85\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:86\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:87\", {{\"version\", \"default\"}}},\n                     {\"tcp://127.0.0.1:88\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:89\", {{\"version\", \"default\"}}},\n                 }},\n                {{\n                     {\"tcp://127.0.0.1:90\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:91\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:92\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:93\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:94\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:95\", {{\"version\", \"default\"}}},\n                 }});\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(nullptr));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][1], lb_->chooseHost(nullptr));\n}\n\nTEST_P(SubsetLoadBalancerTest, ZoneAwareFallbackDefaultSubsetAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"version\", \"default\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(2));\n\n  zoneAwareInit({{\n                     {\"tcp://127.0.0.1:80\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:81\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:82\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:83\", {{\"version\", \"default\"}}},\n                     {\"tcp://127.0.0.1:84\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:85\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:86\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:87\", {{\"version\", \"default\"}}},\n                     {\"tcp://127.0.0.1:88\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:89\", {{\"version\", \"default\"}}},\n                 }},\n                {{\n                     {\"tcp://127.0.0.1:90\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:91\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:92\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:93\", {{\"version\", \"default\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:94\", {{\"version\", \"new\"}}},\n                     {\"tcp://127.0.0.1:95\", {{\"version\", \"default\"}}},\n                 }});\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(nullptr));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][1], lb_->chooseHost(nullptr));\n\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"default\"}})}, {host_set_.hosts_[1]},\n              absl::optional<uint32_t>(0));\n\n  modifyLocalHosts({local_hosts_->at(1)},\n                   {makeHost(\"tcp://127.0.0.1:9001\", {{\"version\", \"default\"}})}, 0);\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(nullptr));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][3], lb_->chooseHost(nullptr));\n}\n\nTEST_F(SubsetLoadBalancerTest, ZoneAwareBalancesSubsets) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(2));\n\n  zoneAwareInit({{\n                     {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n                     {\"tcp://127.0.0.1:84\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:85\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:86\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:87\", {{\"version\", \"1.1\"}}},\n                     {\"tcp://127.0.0.1:88\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:89\", {{\"version\", \"1.1\"}}},\n                 }},\n                {{\n                     {\"tcp://127.0.0.1:90\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:91\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:92\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:93\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:94\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:95\", {{\"version\", \"1.1\"}}},\n                 }});\n\n  TestLoadBalancerContext context({{\"version\", \"1.1\"}});\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][1], lb_->chooseHost(&context));\n}\n\nTEST_P(SubsetLoadBalancerTest, ZoneAwareBalancesSubsetsAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.healthy_panic_threshold\", 50))\n      .WillRepeatedly(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.zone_routing.enabled\", 100))\n      .WillRepeatedly(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"upstream.zone_routing.min_cluster_size\", 6))\n      .WillRepeatedly(Return(2));\n\n  zoneAwareInit({{\n                     {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n                     {\"tcp://127.0.0.1:84\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:85\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:86\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:87\", {{\"version\", \"1.1\"}}},\n                     {\"tcp://127.0.0.1:88\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:89\", {{\"version\", \"1.1\"}}},\n                 }},\n                {{\n                     {\"tcp://127.0.0.1:90\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:91\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:92\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:93\", {{\"version\", \"1.1\"}}},\n                 },\n                 {\n                     {\"tcp://127.0.0.1:94\", {{\"version\", \"1.0\"}}},\n                     {\"tcp://127.0.0.1:95\", {{\"version\", \"1.1\"}}},\n                 }});\n\n  TestLoadBalancerContext context({{\"version\", \"1.1\"}});\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][1], lb_->chooseHost(&context));\n\n  modifyHosts({makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"1.1\"}})}, {host_set_.hosts_[1]},\n              absl::optional<uint32_t>(0));\n\n  modifyLocalHosts({local_hosts_->at(1)}, {makeHost(\"tcp://127.0.0.1:9001\", {{\"version\", \"1.1\"}})},\n                   0);\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(100));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n\n  // Force request out of small zone.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(9999)).WillOnce(Return(2));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][3], lb_->chooseHost(&context));\n}\n\nTEST_F(SubsetLoadBalancerTest, DescribeMetadata) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n  init();\n\n  ProtobufWkt::Value str_value;\n  str_value.set_string_value(\"abc\");\n\n  ProtobufWkt::Value num_value;\n  num_value.set_number_value(100);\n\n  auto tester = SubsetLoadBalancerDescribeMetadataTester(lb_);\n  tester.test(\"version=\\\"abc\\\"\", {{\"version\", str_value}});\n  tester.test(\"number=100\", {{\"number\", num_value}});\n  tester.test(\"x=\\\"abc\\\", y=100\", {{\"x\", str_value}, {\"y\", num_value}});\n  tester.test(\"y=100, x=\\\"abc\\\"\", {{\"y\", num_value}, {\"x\", str_value}});\n  tester.test(\"<no metadata>\", {});\n}\n\nTEST_F(SubsetLoadBalancerTest, DisabledLocalityWeightAwareness) {\n  EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n\n  // We configure a weighted host set that heavily favors the second locality.\n  configureWeightedHostSet(\n      {\n          {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n      },\n      {\n          {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n          {\"tcp://127.0.0.1:84\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:85\", {{\"version\", \"1.1\"}}},\n      },\n      host_set_, {1, 100});\n\n  lb_ = std::make_shared<SubsetLoadBalancer>(\n      lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_,\n      ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n\n  TestLoadBalancerContext context({{\"version\", \"1.1\"}});\n\n  // Since we don't respect locality weights, the first locality is selected.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(&context));\n}\n\n// Verifies that we do *not* invoke health() on hosts when constructing the load balancer. Since\n// health is modified concurrently from multiple threads, it is not safe to call on the worker\n// threads.\nTEST_F(SubsetLoadBalancerTest, DoesNotCheckHostHealth) {\n  EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n\n  auto mock_host = std::make_shared<MockHost>();\n  HostVector hosts{mock_host};\n  host_set_.hosts_ = hosts;\n\n  EXPECT_CALL(*mock_host, weight()).WillRepeatedly(Return(1));\n\n  lb_ = std::make_shared<SubsetLoadBalancer>(\n      lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_,\n      ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n}\n\nTEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) {\n  EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, localityWeightAware()).WillRepeatedly(Return(true));\n\n  // We configure a weighted host set that heavily favors the second locality.\n  configureWeightedHostSet(\n      {\n          {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n      },\n      {\n          {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n          {\"tcp://127.0.0.1:84\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:85\", {{\"version\", \"1.1\"}}},\n      },\n      host_set_, {1, 100});\n\n  lb_ = std::make_shared<SubsetLoadBalancer>(\n      lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_,\n      ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n\n  TestLoadBalancerContext context({{\"version\", \"1.1\"}});\n\n  // Since we respect locality weights, the second locality is selected.\n  EXPECT_CALL(random_, random()).WillOnce(Return(0));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][0], lb_->chooseHost(&context));\n}\n\nTEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeights) {\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n  EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, localityWeightAware()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, scaleLocalityWeight()).WillRepeatedly(Return(true));\n\n  // We configure a weighted host set is weighted equally between each locality.\n  configureWeightedHostSet(\n      {\n          {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n      },\n      {\n          {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:83\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:84\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:85\", {{\"version\", \"1.1\"}}},\n      },\n      host_set_, {50, 50});\n\n  lb_ = std::make_shared<SubsetLoadBalancer>(\n      lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_,\n      ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n  TestLoadBalancerContext context({{\"version\", \"1.1\"}});\n\n  // Since we scale the locality weights by number of hosts removed, we expect to see the second\n  // locality to be selected less because we've excluded more hosts in that locality than in the\n  // first.\n  // The localities are split 50/50, but because of the scaling we expect to see 66/33 instead.\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][3], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][3], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][3], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][1], lb_->chooseHost(&context));\n}\n\nTEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeightsRounding) {\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n  EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, localityWeightAware()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, scaleLocalityWeight()).WillRepeatedly(Return(true));\n\n  // We configure a weighted host set where the locality weights are very low to test\n  // that we are rounding computation instead of flooring it.\n  configureWeightedHostSet(\n      {\n          {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n      },\n      {\n          {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:83\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:84\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:85\", {{\"version\", \"1.1\"}}},\n      },\n      host_set_, {2, 2});\n\n  lb_ = std::make_shared<SubsetLoadBalancer>(\n      lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_,\n      ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n  TestLoadBalancerContext context({{\"version\", \"1.0\"}});\n\n  // We expect to see a 33/66 split because 2 * 1 / 2 = 1 and 2 * 3 / 4 = 1.5 -> 2\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][0], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][1], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][2], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][0], lb_->chooseHost(&context));\n  EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[1][1], lb_->chooseHost(&context));\n}\n\n// Regression for bug where missing locality weights crashed scaling and locality aware subset LBs.\nTEST_F(SubsetLoadBalancerTest, ScaleLocalityWeightsWithNoLocalityWeights) {\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n  EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, localityWeightAware()).WillRepeatedly(Return(true));\n  EXPECT_CALL(subset_info_, scaleLocalityWeight()).WillRepeatedly(Return(true));\n\n  configureHostSet(\n      {\n          {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n          {\"tcp://127.0.0.1:81\", {{\"version\", \"1.1\"}}},\n      },\n      host_set_);\n\n  lb_ = std::make_shared<SubsetLoadBalancer>(\n      lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_,\n      ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_);\n}\n\nTEST_P(SubsetLoadBalancerTest, GaugesUpdatedOnDestroy) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n  });\n\n  EXPECT_EQ(1U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(0U, stats_.lb_subsets_removed_.value());\n\n  lb_ = nullptr;\n\n  EXPECT_EQ(0U, stats_.lb_subsets_active_.value());\n  EXPECT_EQ(1U, stats_.lb_subsets_removed_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, SubsetSelectorNoFallbackPerSelector) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NO_FALLBACK)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"1.1\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"version\", \"1.1\"}}},\n  });\n\n  TestLoadBalancerContext context_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_11({{\"version\", \"1.1\"}});\n  TestLoadBalancerContext context_12({{\"version\", \"1.2\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_11));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_10));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_11));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_12));\n  EXPECT_EQ(0U, stats_.lb_subsets_fallback_.value());\n  EXPECT_EQ(4U, stats_.lb_subsets_selected_.value());\n}\n\nTEST_P(SubsetLoadBalancerTest, FallbackNotDefinedForIntermediateSelector) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"stage\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"stage\", \"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({{\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"stage\", \"dev\"}}},\n        {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}, {\"stage\", \"canary\"}}}});\n\n  TestLoadBalancerContext context_match_host0({{\"stage\", \"dev\"}});\n  TestLoadBalancerContext context_stage_nx({{\"stage\", \"test\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_match_host0));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_match_host0));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_stage_nx));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_stage_nx));\n}\n\nTEST_P(SubsetLoadBalancerTest, SubsetSelectorFallbackOverridesTopLevelOne) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NO_FALLBACK)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init();\n\n  TestLoadBalancerContext context_unknown_key({{\"unknown\", \"unknown\"}});\n  TestLoadBalancerContext context_unknown_value({{\"version\", \"unknown\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_unknown_key));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_unknown_value));\n}\n\nTEST_P(SubsetLoadBalancerTest, SubsetSelectorNoFallbackMatchesTopLevelOne) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NO_FALLBACK)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init();\n\n  TestLoadBalancerContext context_unknown_key({{\"unknown\", \"unknown\"}});\n  TestLoadBalancerContext context_unknown_value({{\"version\", \"unknown\"}});\n\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_unknown_key));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_unknown_value));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_unknown_value));\n}\n\nTEST_P(SubsetLoadBalancerTest, SubsetSelectorDefaultAnyFallbackPerSelector) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::DEFAULT_SUBSET),\n      makeSelector(\n          {\"app\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT),\n      makeSelector(\n          {\"foo\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"bar\", \"default\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  // Add hosts initial hosts.\n  init({{\"tcp://127.0.0.1:81\", {{\"version\", \"0.0\"}}},\n        {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}}},\n        {\"tcp://127.0.0.1:83\", {{\"app\", \"envoy\"}}},\n        {\"tcp://127.0.0.1:84\", {{\"foo\", \"abc\"}, {\"bar\", \"default\"}}}});\n\n  TestLoadBalancerContext context_ver_10({{\"version\", \"1.0\"}});\n  TestLoadBalancerContext context_ver_nx({{\"version\", \"x\"}});\n  TestLoadBalancerContext context_app({{\"app\", \"envoy\"}});\n  TestLoadBalancerContext context_app_nx({{\"app\", \"ngnix\"}});\n  TestLoadBalancerContext context_foo({{\"foo\", \"abc\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_app_nx));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_app_nx));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_app));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_ver_nx));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_foo));\n}\n\nTEST_P(SubsetLoadBalancerTest, SubsetSelectorDefaultAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::DEFAULT_SUBSET));\n\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"version\", \"default\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::DEFAULT_SUBSET)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"version\", \"new\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"default\"}}},\n  });\n\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(nullptr));\n\n  HostSharedPtr added_host1 = makeHost(\"tcp://127.0.0.1:8000\", {{\"version\", \"new\"}});\n  HostSharedPtr added_host2 = makeHost(\"tcp://127.0.0.1:8001\", {{\"version\", \"default\"}});\n\n  TestLoadBalancerContext context_ver_nx({{\"version\", \"x\"}});\n\n  modifyHosts({added_host1, added_host2}, {host_set_.hosts_.back()});\n\n  EXPECT_EQ(added_host2, lb_->chooseHost(&context_ver_nx));\n}\n\nTEST_P(SubsetLoadBalancerTest, SubsetSelectorAnyAfterUpdate) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT)};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({\n      {\"tcp://127.0.0.1:81\", {{\"version\", \"1\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"version\", \"2\"}}},\n  });\n\n  TestLoadBalancerContext context_ver_nx({{\"version\", \"x\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_ver_nx));\n\n  HostSharedPtr added_host1 = makeHost(\"tcp://127.0.0.1:83\", {{\"version\", \"3\"}});\n\n  modifyHosts({added_host1}, {host_set_.hosts_.back()});\n\n  EXPECT_EQ(added_host1, lb_->chooseHost(&context_ver_nx));\n}\n\nTEST_P(SubsetLoadBalancerTest, FallbackForCompoundSelector) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n  const ProtobufWkt::Struct default_subset = makeDefaultSubset({{\"foo\", \"bar\"}});\n  EXPECT_CALL(subset_info_, defaultSubset()).WillRepeatedly(ReturnRef(default_subset));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED),\n      makeSelector(\n          {\"version\", \"hardware\", \"stage\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NO_FALLBACK),\n      makeSelector(\n          {\"version\", \"hardware\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::DEFAULT_SUBSET),\n      makeSelector(\n          {\"version\", \"stage\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET,\n          {\"version\"})};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  // Add hosts initial hosts.\n  init({{\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"hardware\", \"c32\"}}},\n        {\"tcp://127.0.0.1:81\", {{\"version\", \"1.0\"}, {\"hardware\", \"c32\"}, {\"foo\", \"bar\"}}},\n        {\"tcp://127.0.0.1:82\", {{\"version\", \"2.0\"}, {\"hardware\", \"c32\"}, {\"stage\", \"dev\"}}},\n        {\"tcp://127.0.0.1:83\", {{\"version\", \"2.0\"}}}});\n\n  TestLoadBalancerContext context_match_host0({{\"version\", \"1.0\"}, {\"hardware\", \"c32\"}});\n  TestLoadBalancerContext context_ver_nx({{\"version\", \"x\"}, {\"hardware\", \"c32\"}});\n  TestLoadBalancerContext context_stage_nx(\n      {{\"version\", \"2.0\"}, {\"hardware\", \"c32\"}, {\"stage\", \"x\"}});\n  TestLoadBalancerContext context_hardware_nx(\n      {{\"version\", \"2.0\"}, {\"hardware\", \"zzz\"}, {\"stage\", \"dev\"}});\n  TestLoadBalancerContext context_match_host2(\n      {{\"version\", \"2.0\"}, {\"hardware\", \"c32\"}, {\"stage\", \"dev\"}});\n  TestLoadBalancerContext context_ver_20({{\"version\", \"2.0\"}});\n  TestLoadBalancerContext context_ver_stage_match_host2({{\"version\", \"2.0\"}, {\"stage\", \"dev\"}});\n  TestLoadBalancerContext context_ver_stage_nx({{\"version\", \"2.0\"}, {\"stage\", \"canary\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_match_host0));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_ver_nx));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_hardware_nx));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&context_stage_nx));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_match_host2));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_match_host2));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_ver_20));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_ver_stage_match_host2));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_ver_stage_match_host2));\n  EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context_ver_stage_nx));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&context_ver_stage_nx));\n}\n\nTEST_P(SubsetLoadBalancerTest, KeysSubsetFallbackChained) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector(\n          {\"stage\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NO_FALLBACK),\n      makeSelector(\n          {\"stage\", \"version\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET,\n          {\"stage\"}),\n      makeSelector(\n          {\"stage\", \"version\", \"hardware\"},\n          envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET,\n          {\"version\", \"stage\"})};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({{\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"hardware\", \"c32\"}, {\"stage\", \"dev\"}}},\n        {\"tcp://127.0.0.1:81\", {{\"version\", \"2.0\"}, {\"hardware\", \"c64\"}, {\"stage\", \"dev\"}}},\n        {\"tcp://127.0.0.1:82\", {{\"version\", \"1.0\"}, {\"hardware\", \"c32\"}, {\"stage\", \"test\"}}}});\n\n  TestLoadBalancerContext context_match_host0(\n      {{\"version\", \"1.0\"}, {\"hardware\", \"c32\"}, {\"stage\", \"dev\"}});\n  TestLoadBalancerContext context_hw_nx(\n      {{\"version\", \"2.0\"}, {\"hardware\", \"arm\"}, {\"stage\", \"dev\"}});\n  TestLoadBalancerContext context_ver_hw_nx(\n      {{\"version\", \"1.2\"}, {\"hardware\", \"arm\"}, {\"stage\", \"dev\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_match_host0));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_match_host0));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_hw_nx));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_hw_nx));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_ver_hw_nx));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_ver_hw_nx));\n}\n\nTEST_P(SubsetLoadBalancerTest, KeysSubsetFallbackToNotExistingSelector) {\n  EXPECT_CALL(subset_info_, fallbackPolicy())\n      .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n\n  std::vector<SubsetSelectorPtr> subset_selectors = {makeSelector(\n      {\"stage\", \"version\"},\n      envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET,\n      {\"stage\"})};\n\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  init({{\"tcp://127.0.0.1:80\", {{\"version\", \"1.0\"}, {\"stage\", \"dev\"}}}});\n\n  TestLoadBalancerContext context_nx({{\"version\", \"1.0\"}, {\"stage\", \"test\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_nx));\n  EXPECT_EQ(1U, stats_.lb_subsets_fallback_.value());\n}\n\nINSTANTIATE_TEST_SUITE_P(UpdateOrderings, SubsetLoadBalancerTest,\n                         testing::ValuesIn({UpdateOrder::RemovesFirst, UpdateOrder::Simultaneous}));\n\nclass SubsetLoadBalancerSingleHostPerSubsetTest : public SubsetLoadBalancerTest {\npublic:\n  SubsetLoadBalancerSingleHostPerSubsetTest()\n      : default_subset_selectors_({\n            makeSelector({\"key\"}, true),\n        }) {\n    ON_CALL(subset_info_, subsetSelectors()).WillByDefault(ReturnRef(default_subset_selectors_));\n    ON_CALL(subset_info_, fallbackPolicy())\n        .WillByDefault(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n  }\n\n  using SubsetLoadBalancerTest::init;\n  void init() {\n    init({\n        {\"tcp://127.0.0.1:80\", {}},\n        {\"tcp://127.0.0.1:81\", {{\"key\", \"a\"}}},\n        {\"tcp://127.0.0.1:82\", {{\"key\", \"b\"}}},\n\n    });\n  }\n\n  using SubsetLoadBalancerTest::makeSelector;\n  SubsetSelectorPtr makeSelector(const std::set<std::string>& selector_keys,\n                                 bool single_host_per_subset) {\n    return makeSelector(\n        selector_keys,\n        envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED, {},\n        single_host_per_subset);\n  }\n\n  std::vector<SubsetSelectorPtr> default_subset_selectors_;\n};\n\nTEST_F(SubsetLoadBalancerSingleHostPerSubsetTest, RejectMultipleSelectors) {\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector({\"version\"}, false),\n      makeSelector({\"test\"}, true),\n  };\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_THROW_WITH_MESSAGE(init(), EnvoyException,\n                            \"subset_lb selector: single_host_per_subset cannot be set when there \"\n                            \"are multiple subset selectors.\");\n}\n\nTEST_F(SubsetLoadBalancerSingleHostPerSubsetTest, RejectMultipleKeys) {\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector({\"version\", \"test\"}, true),\n  };\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_THROW_WITH_MESSAGE(init(), EnvoyException,\n                            \"subset_lb selector: single_host_per_subset cannot bet set when there \"\n                            \"isn't exactly 1 key or if that key is empty.\");\n}\n\nTEST_F(SubsetLoadBalancerSingleHostPerSubsetTest, RejectEmptyKey) {\n  std::vector<SubsetSelectorPtr> subset_selectors = {\n      makeSelector({\"\"}, true),\n  };\n  EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors));\n\n  EXPECT_THROW_WITH_MESSAGE(init(), EnvoyException,\n                            \"subset_lb selector: single_host_per_subset cannot bet set when there \"\n                            \"isn't exactly 1 key or if that key is empty.\");\n}\n\nTEST_F(SubsetLoadBalancerSingleHostPerSubsetTest, DuplicateMetadataStat) {\n  init({\n      {\"tcp://127.0.0.1:80\", {{\"key\", \"a\"}}},\n      {\"tcp://127.0.0.1:81\", {{\"key\", \"a\"}}},\n      {\"tcp://127.0.0.1:82\", {{\"key\", \"a\"}}},\n      {\"tcp://127.0.0.1:83\", {{\"key\", \"b\"}}},\n  });\n  // The first 'a' is the original, the next 2 instances of 'a' are duplicates (counted\n  // in stat), and 'b' is another non-duplicate.\n  for (auto& gauge : stats_store_.gauges()) {\n    ENVOY_LOG_MISC(error, \"name {} value {}\", gauge->name(), gauge->value());\n  }\n  EXPECT_EQ(2, TestUtility::findGauge(stats_store_,\n                                      \"testprefix.lb_subsets_single_host_per_subset_duplicate\")\n                   ->value());\n}\n\nTEST_F(SubsetLoadBalancerSingleHostPerSubsetTest, Match) {\n  init();\n\n  TestLoadBalancerContext host_1({{\"key\", \"a\"}});\n  TestLoadBalancerContext host_2({{\"key\", \"b\"}});\n\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_1));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_1));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&host_2));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&host_2));\n}\n\nTEST_F(SubsetLoadBalancerSingleHostPerSubsetTest, FallbackOnUnknownMetadata) {\n  init();\n\n  TestLoadBalancerContext context_unknown_key({{\"unknown\", \"unknown\"}});\n  TestLoadBalancerContext context_unknown_value({{\"key\", \"unknown\"}});\n\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context_unknown_key));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_unknown_value));\n}\n\nTEST_P(SubsetLoadBalancerSingleHostPerSubsetTest, Update) {\n  init();\n\n  TestLoadBalancerContext host_a({{\"key\", \"a\"}});\n  TestLoadBalancerContext host_b({{\"key\", \"b\"}});\n  TestLoadBalancerContext host_c({{\"key\", \"c\"}});\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_a));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_a));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&host_b));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&host_b));\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&host_c)); // fallback\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_c)); // fallback\n\n  HostSharedPtr added_host = makeHost(\"tcp://127.0.0.1:8000\", {{\"key\", \"c\"}});\n\n  // Remove b, add c\n  modifyHosts({added_host}, {host_set_.hosts_.back()});\n\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_a));\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_a));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&host_c));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&host_c));\n  EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(&host_b)); // fallback\n  EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&host_b)); // fallback\n  EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&host_b)); // fallback\n}\n\nINSTANTIATE_TEST_SUITE_P(UpdateOrderings, SubsetLoadBalancerSingleHostPerSubsetTest,\n                         testing::ValuesIn({UpdateOrder::RemovesFirst, UpdateOrder::Simultaneous}));\n\n} // namespace SubsetLoadBalancerTest\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/test_cluster_manager.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/config/utility.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/cluster_manager_impl.h\"\n#include \"common/upstream/subset_lb.h\"\n\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/integration/clusters/custom_static_cluster.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/tcp/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Upstream {\n\n// The tests in this file are split between testing with real clusters and some with mock clusters.\n// By default we setup to call the real cluster creation function. Individual tests can override\n// the expectations when needed.\nclass TestClusterManagerFactory : public ClusterManagerFactory {\npublic:\n  TestClusterManagerFactory() : api_(Api::createApiForTest(stats_, random_)) {\n    ON_CALL(*this, clusterFromProto_(_, _, _, _))\n        .WillByDefault(Invoke(\n            [&](const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm,\n                Outlier::EventLoggerSharedPtr outlier_event_logger,\n                bool added_via_api) -> std::pair<ClusterSharedPtr, ThreadAwareLoadBalancer*> {\n              auto result = ClusterFactoryImplBase::create(\n                  cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_,\n                  dispatcher_, log_manager_, local_info_, admin_, singleton_manager_,\n                  outlier_event_logger, added_via_api, validation_visitor_, *api_);\n              // Convert from load balancer unique_ptr -> raw pointer -> unique_ptr.\n              return std::make_pair(result.first, result.second.release());\n            }));\n  }\n\n  Http::ConnectionPool::InstancePtr allocateConnPool(\n      Event::Dispatcher&, HostConstSharedPtr host, ResourcePriority, Http::Protocol,\n      const Network::ConnectionSocket::OptionsSharedPtr& options,\n      const Network::TransportSocketOptionsSharedPtr& transport_socket_options) override {\n    return Http::ConnectionPool::InstancePtr{\n        allocateConnPool_(host, options, transport_socket_options)};\n  }\n\n  Tcp::ConnectionPool::InstancePtr\n  allocateTcpConnPool(Event::Dispatcher&, HostConstSharedPtr host, ResourcePriority,\n                      const Network::ConnectionSocket::OptionsSharedPtr&,\n                      Network::TransportSocketOptionsSharedPtr) override {\n    return Tcp::ConnectionPool::InstancePtr{allocateTcpConnPool_(host)};\n  }\n\n  std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>\n  clusterFromProto(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm,\n                   Outlier::EventLoggerSharedPtr outlier_event_logger,\n                   bool added_via_api) override {\n    auto result = clusterFromProto_(cluster, cm, outlier_event_logger, added_via_api);\n    return std::make_pair(result.first, ThreadAwareLoadBalancerPtr(result.second));\n  }\n\n  CdsApiPtr createCds(const envoy::config::core::v3::ConfigSource&, ClusterManager&) override {\n    return CdsApiPtr{createCds_()};\n  }\n\n  ClusterManagerPtr\n  clusterManagerFromProto(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) override {\n    return ClusterManagerPtr{clusterManagerFromProto_(bootstrap)};\n  }\n\n  Secret::SecretManager& secretManager() override { return secret_manager_; }\n\n  MOCK_METHOD(ClusterManager*, clusterManagerFromProto_,\n              (const envoy::config::bootstrap::v3::Bootstrap& bootstrap));\n  MOCK_METHOD(Http::ConnectionPool::Instance*, allocateConnPool_,\n              (HostConstSharedPtr host, Network::ConnectionSocket::OptionsSharedPtr,\n               Network::TransportSocketOptionsSharedPtr));\n  MOCK_METHOD(Tcp::ConnectionPool::Instance*, allocateTcpConnPool_, (HostConstSharedPtr host));\n  MOCK_METHOD((std::pair<ClusterSharedPtr, ThreadAwareLoadBalancer*>), clusterFromProto_,\n              (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm,\n               Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api));\n  MOCK_METHOD(CdsApi*, createCds_, ());\n\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  std::shared_ptr<NiceMock<Network::MockDnsResolver>> dns_resolver_{\n      new NiceMock<Network::MockDnsResolver>};\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_{\n      dispatcher_.timeSource()};\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  NiceMock<Secret::MockSecretManager> secret_manager_;\n  NiceMock<AccessLog::MockAccessLogManager> log_manager_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Api::ApiPtr api_;\n};\n\n// Helper to intercept calls to postThreadLocalClusterUpdate.\nclass MockLocalClusterUpdate {\npublic:\n  MOCK_METHOD(void, post,\n              (uint32_t priority, const HostVector& hosts_added, const HostVector& hosts_removed));\n};\n\nclass MockLocalHostsRemoved {\npublic:\n  MOCK_METHOD(void, post, (const HostVector&));\n};\n\n// A test version of ClusterManagerImpl that provides a way to get a non-const handle to the\n// clusters, which is necessary in order to call updateHosts on the priority set.\nclass TestClusterManagerImpl : public ClusterManagerImpl {\npublic:\n  using ClusterManagerImpl::ClusterManagerImpl;\n\n  TestClusterManagerImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                         ClusterManagerFactory& factory, Stats::Store& stats,\n                         ThreadLocal::Instance& tls, Runtime::Loader& runtime,\n                         const LocalInfo::LocalInfo& local_info,\n                         AccessLog::AccessLogManager& log_manager,\n                         Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin,\n                         ProtobufMessage::ValidationContext& validation_context, Api::Api& api,\n                         Http::Context& http_context, Grpc::Context& grpc_context)\n      : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, local_info, log_manager,\n                           main_thread_dispatcher, admin, validation_context, api, http_context,\n                           grpc_context) {}\n\n  std::map<std::string, std::reference_wrapper<Cluster>> activeClusters() {\n    std::map<std::string, std::reference_wrapper<Cluster>> clusters;\n    for (auto& cluster : active_clusters_) {\n      clusters.emplace(cluster.first, *cluster.second->cluster_);\n    }\n    return clusters;\n  }\n};\n\n// Override postThreadLocalClusterUpdate so we can test that merged updates calls\n// it with the right values at the right times.\nclass MockedUpdatedClusterManagerImpl : public TestClusterManagerImpl {\npublic:\n  MockedUpdatedClusterManagerImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap,\n                                  ClusterManagerFactory& factory, Stats::Store& stats,\n                                  ThreadLocal::Instance& tls, Runtime::Loader& runtime,\n                                  const LocalInfo::LocalInfo& local_info,\n                                  AccessLog::AccessLogManager& log_manager,\n                                  Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin,\n                                  ProtobufMessage::ValidationContext& validation_context,\n                                  Api::Api& api, MockLocalClusterUpdate& local_cluster_update,\n                                  MockLocalHostsRemoved& local_hosts_removed,\n                                  Http::Context& http_context, Grpc::Context& grpc_context)\n      : TestClusterManagerImpl(bootstrap, factory, stats, tls, runtime, local_info, log_manager,\n                               main_thread_dispatcher, admin, validation_context, api, http_context,\n                               grpc_context),\n        local_cluster_update_(local_cluster_update), local_hosts_removed_(local_hosts_removed) {}\n\nprotected:\n  void postThreadLocalClusterUpdate(const Cluster&, uint32_t priority,\n                                    const HostVector& hosts_added,\n                                    const HostVector& hosts_removed) override {\n    local_cluster_update_.post(priority, hosts_added, hosts_removed);\n  }\n\n  void postThreadLocalDrainConnections(const Cluster&, const HostVector& hosts_removed) override {\n    local_hosts_removed_.post(hosts_removed);\n  }\n\n  MockLocalClusterUpdate& local_cluster_update_;\n  MockLocalHostsRemoved& local_hosts_removed_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/test_data/sds_response.json",
    "content": "{\n    \"env\": \"production\",\n    \"hosts\": [\n        {\n            \"ip_address\": \"10.0.14.27\",\n            \"last_check_in\": \"2015-12-10 22:05:30.286993+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f6647241\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.14.29\",\n            \"last_check_in\": \"2015-12-10 22:05:17.258545+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f0647247\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.205\",\n            \"last_check_in\": \"2015-12-10 22:05:19.921349+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-95fc6525\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.208\",\n            \"last_check_in\": \"2015-12-10 22:05:16.312951+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d6ec7266\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.209\",\n            \"last_check_in\": \"2015-12-10 22:05:22.075746+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d5ec7265\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.43\",\n            \"last_check_in\": \"2015-12-10 22:05:16.808453+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": true,\n                \"instance_id\": \"i-11e726a1\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\",\n                \"load_balancing_weight\": 40\n            }\n        },\n        {\n            \"ip_address\": \"10.0.26.143\",\n            \"last_check_in\": \"2015-12-10 22:06:08.487769+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-9f7aee2f\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.42.185\",\n            \"last_check_in\": \"2015-12-10 22:05:03.713194+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-3edbb280\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.141\",\n            \"last_check_in\": \"2015-12-10 22:05:15.724613+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-dd7b1063\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.142\",\n            \"last_check_in\": \"2015-12-10 22:06:04.331018+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-de7b1060\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.143\",\n            \"last_check_in\": \"2015-12-10 22:05:24.151133+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-df7b1061\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.0\",\n            \"last_check_in\": \"2015-12-10 22:06:09.605907+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-497f7efe\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.2\",\n            \"last_check_in\": \"2015-12-10 22:06:09.036531+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-4e7f7ef9\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\",\n                \"load_balancing_weight\": 90\n            }\n        }\n    ],\n    \"service\": \"fare\"\n}\n"
  },
  {
    "path": "test/common/upstream/test_data/sds_response_2.json",
    "content": "{\n    \"env\": \"production\",\n    \"hosts\": [\n        {\n            \"ip_address\": \"10.0.14.27\",\n            \"last_check_in\": \"2015-12-10 22:05:30.286993+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f6647241\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.14.29\",\n            \"last_check_in\": \"2015-12-10 22:05:17.258545+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f0647247\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.205\",\n            \"last_check_in\": \"2015-12-10 22:05:19.921349+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-95fc6525\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.208\",\n            \"last_check_in\": \"2015-12-10 22:05:16.312951+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d6ec7266\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.209\",\n            \"last_check_in\": \"2015-12-10 22:05:22.075746+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d5ec7265\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.43\",\n            \"last_check_in\": \"2015-12-10 22:05:16.808453+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": true,\n                \"instance_id\": \"i-11e726a1\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.26.143\",\n            \"last_check_in\": \"2015-12-10 22:06:08.487769+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-9f7aee2f\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.42.185\",\n            \"last_check_in\": \"2015-12-10 22:05:03.713194+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-3edbb280\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.141\",\n            \"last_check_in\": \"2015-12-10 22:05:15.724613+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-dd7b1063\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.142\",\n            \"last_check_in\": \"2015-12-10 22:06:04.331018+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-de7b1060\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.1\",\n            \"last_check_in\": \"2015-12-10 22:06:09.036531+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-4e7f7ef9\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        }\n    ],\n    \"service\": \"fare\"\n}\n"
  },
  {
    "path": "test/common/upstream/test_data/sds_response_3.json",
    "content": "{\n    \"env\": \"production\",\n    \"hosts\": [\n        {\n            \"ip_address\": \"10.0.14.27\",\n            \"last_check_in\": \"2015-12-10 22:05:30.286993+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f6647241\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.14.29\",\n            \"last_check_in\": \"2015-12-10 22:05:17.258545+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f0647247\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.205\",\n            \"last_check_in\": \"2015-12-10 22:05:19.921349+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-95fc6525\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.208\",\n            \"last_check_in\": \"2015-12-10 22:05:16.312951+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d6ec7266\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.209\",\n            \"last_check_in\": \"2015-12-10 22:05:22.075746+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d5ec7265\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.43\",\n            \"last_check_in\": \"2015-12-10 22:05:16.808453+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": true,\n                \"instance_id\": \"i-11e726a1\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.26.143\",\n            \"last_check_in\": \"2015-12-10 22:06:08.487769+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-9f7aee2f\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.42.185\",\n            \"last_check_in\": \"2015-12-10 22:05:03.713194+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-3edbb280\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.141\",\n            \"last_check_in\": \"2015-12-10 22:05:15.724613+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-dd7b1063\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.142\",\n            \"last_check_in\": \"2015-12-10 22:06:04.331018+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-de7b1060\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.143\",\n            \"last_check_in\": \"2015-12-10 22:05:24.151133+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-df7b1061\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.1\",\n            \"last_check_in\": \"2015-12-10 22:06:09.036531+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-4e7f7ef9\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        }\n    ],\n    \"service\": \"fare\"\n}\n"
  },
  {
    "path": "test/common/upstream/test_data/sds_response_weight_change.json",
    "content": "{\n    \"env\": \"production\",\n    \"hosts\": [\n        {\n            \"ip_address\": \"10.0.14.27\",\n            \"last_check_in\": \"2015-12-10 22:05:30.286993+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f6647241\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.14.29\",\n            \"last_check_in\": \"2015-12-10 22:05:17.258545+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-f0647247\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.205\",\n            \"last_check_in\": \"2015-12-10 22:05:19.921349+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-95fc6525\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.208\",\n            \"last_check_in\": \"2015-12-10 22:05:16.312951+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d6ec7266\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.209\",\n            \"last_check_in\": \"2015-12-10 22:05:22.075746+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-d5ec7265\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.16.43\",\n            \"last_check_in\": \"2015-12-10 22:05:16.808453+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": true,\n                \"instance_id\": \"i-11e726a1\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\",\n                \"load_balancing_weight\": 50\n            }\n        },\n        {\n            \"ip_address\": \"10.0.26.143\",\n            \"last_check_in\": \"2015-12-10 22:06:08.487769+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1d\",\n                \"canary\": false,\n                \"instance_id\": \"i-9f7aee2f\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.42.185\",\n            \"last_check_in\": \"2015-12-10 22:05:03.713194+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-3edbb280\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.141\",\n            \"last_check_in\": \"2015-12-10 22:05:15.724613+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-dd7b1063\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.142\",\n            \"last_check_in\": \"2015-12-10 22:06:04.331018+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-de7b1060\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.43.143\",\n            \"last_check_in\": \"2015-12-10 22:05:24.151133+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1e\",\n                \"canary\": false,\n                \"instance_id\": \"i-df7b1061\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.0\",\n            \"last_check_in\": \"2015-12-10 22:06:09.605907+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-497f7efe\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        },\n        {\n            \"ip_address\": \"10.0.5.1\",\n            \"last_check_in\": \"2015-12-10 22:06:09.036531+00:00\",\n            \"port\": 80,\n            \"revision\": \"700b197acd50e1cbd23bcfd47125c55e9e1fd2b8\",\n            \"service\": \"fare\",\n            \"tags\": {\n                \"az\": \"us-east-1a\",\n                \"canary\": false,\n                \"instance_id\": \"i-4e7f7ef9\",\n                \"onebox_name\": null,\n                \"region\": \"us-east-1\"\n            }\n        }\n    ],\n    \"service\": \"fare\"\n}\n"
  },
  {
    "path": "test/common/upstream/transport_socket_matcher_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/base.pb.validate.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/upstream/transport_socket_match_impl.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass FakeTransportSocketFactory : public Network::TransportSocketFactory {\npublic:\n  MOCK_METHOD(bool, implementsSecureTransport, (), (const));\n  MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket,\n              (Network::TransportSocketOptionsSharedPtr), (const));\n  FakeTransportSocketFactory(std::string id) : id_(std::move(id)) {}\n  std::string id() const { return id_; }\n\nprivate:\n  const std::string id_;\n};\n\nclass FooTransportSocketFactory\n    : public Network::TransportSocketFactory,\n      public Server::Configuration::UpstreamTransportSocketConfigFactory,\n      Logger::Loggable<Logger::Id::upstream> {\npublic:\n  MOCK_METHOD(bool, implementsSecureTransport, (), (const));\n  MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket,\n              (Network::TransportSocketOptionsSharedPtr), (const));\n\n  Network::TransportSocketFactoryPtr\n  createTransportSocketFactory(const Protobuf::Message& proto,\n                               Server::Configuration::TransportSocketFactoryContext&) override {\n    const auto& node = dynamic_cast<const envoy::config::core::v3::Node&>(proto);\n    std::string id = \"default-foo\";\n    if (!node.id().empty()) {\n      id = node.id();\n    }\n    return std::make_unique<FakeTransportSocketFactory>(id);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<envoy::config::core::v3::Node>();\n  }\n\n  std::string name() const override { return \"foo\"; }\n};\n\nclass TransportSocketMatcherTest : public testing::Test {\npublic:\n  TransportSocketMatcherTest()\n      : registration_(factory_), mock_default_factory_(new FakeTransportSocketFactory(\"default\")),\n        stats_scope_(stats_store_.createScope(\"transport_socket_match.test\")) {}\n\n  void init(const std::vector<std::string>& match_yaml) {\n    Protobuf::RepeatedPtrField<envoy::config::cluster::v3::Cluster::TransportSocketMatch> matches;\n    for (const auto& yaml : match_yaml) {\n      auto transport_socket_match = matches.Add();\n      TestUtility::loadFromYaml(yaml, *transport_socket_match);\n    }\n    matcher_ = std::make_unique<TransportSocketMatcherImpl>(matches, mock_factory_context_,\n                                                            mock_default_factory_, *stats_scope_);\n  }\n\n  void validate(const envoy::config::core::v3::Metadata& metadata, const std::string& expected) {\n    auto& factory = matcher_->resolve(&metadata).factory_;\n    const auto& config_factory = dynamic_cast<const FakeTransportSocketFactory&>(factory);\n    EXPECT_EQ(expected, config_factory.id());\n  }\n\nprotected:\n  FooTransportSocketFactory factory_;\n  Registry::InjectFactory<Server::Configuration::UpstreamTransportSocketConfigFactory>\n      registration_;\n\n  TransportSocketMatcherPtr matcher_;\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> mock_factory_context_;\n  Network::TransportSocketFactoryPtr mock_default_factory_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Stats::ScopePtr stats_scope_;\n};\n\nTEST_F(TransportSocketMatcherTest, ReturnDefaultSocketFactoryWhenNoMatch) {\n  init({R\"EOF(\nname: \"enableFooSocket\"\nmatch:\n  hasSidecar: \"true\"\ntransport_socket:\n  name: \"foo\"\n  config:\n    id: \"abc\"\n )EOF\"});\n\n  envoy::config::core::v3::Metadata metadata;\n  validate(metadata, \"default\");\n}\n\nTEST_F(TransportSocketMatcherTest, BasicMatch) {\n  init({R\"EOF(\nname: \"sidecar_socket\"\nmatch:\n  sidecar: \"true\"\ntransport_socket:\n  name: \"foo\"\n  config:\n    id: \"sidecar\")EOF\",\n        R\"EOF(\nname: \"http_socket\"\nmatch:\n  protocol: \"http\"\ntransport_socket:\n  name: \"foo\"\n  config:\n    id: \"http\"\n )EOF\"});\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(R\"EOF(\nfilter_metadata:\n  envoy.transport_socket_match: { sidecar: \"true\" }\n)EOF\",\n                            metadata);\n\n  validate(metadata, \"sidecar\");\n  TestUtility::loadFromYaml(R\"EOF(\nfilter_metadata:\n  envoy.transport_socket_match: { protocol: \"http\" }\n)EOF\",\n                            metadata);\n  validate(metadata, \"http\");\n}\n\nTEST_F(TransportSocketMatcherTest, MultipleMatchFirstWin) {\n  init({R\"EOF(\nname: \"sidecar_http_socket\"\nmatch:\n  sidecar: \"true\"\n  protocol: \"http\"\ntransport_socket:\n  name: \"foo\"\n  config:\n    id: \"sidecar_http\"\n )EOF\",\n        R\"EOF(\nname: \"sidecar_socket\"\nmatch:\n  sidecar: \"true\"\ntransport_socket:\n  name: \"foo\"\n  config:\n    id: \"sidecar\"\n )EOF\"});\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(R\"EOF(\nfilter_metadata:\n  envoy.transport_socket_match: { sidecar: \"true\", protocol: \"http\" }\n)EOF\",\n                            metadata);\n  validate(metadata, \"sidecar_http\");\n}\n\nTEST_F(TransportSocketMatcherTest, MatchAllEndpointsFactory) {\n  init({R\"EOF(\nname: \"match_all\"\nmatch: {}\ntransport_socket:\n  name: \"foo\"\n  config:\n    id: \"match_all\"\n )EOF\"});\n  envoy::config::core::v3::Metadata metadata;\n  validate(metadata, \"match_all\");\n  TestUtility::loadFromYaml(R\"EOF(\nfilter_metadata:\n  envoy.transport_socket: { random_label: \"random_value\" }\n)EOF\",\n                            metadata);\n  validate(metadata, \"match_all\");\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/upstream_impl_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <list>\n#include <string>\n#include <tuple>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/network/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/static_cluster.h\"\n#include \"common/upstream/strict_dns_cluster.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/health_checker.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ContainerEq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nclass UpstreamImplTestBase {\nprotected:\n  UpstreamImplTestBase() : api_(Api::createApiForTest(stats_, random_)) {}\n\n  NiceMock<Server::MockAdmin> admin_;\n  Ssl::MockContextManager ssl_context_manager_;\n  NiceMock<MockClusterManager> cm_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Stats::TestUtil::TestStore stats_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n};\n\nstd::list<std::string> hostListToAddresses(const HostVector& hosts) {\n  std::list<std::string> addresses;\n  for (const HostSharedPtr& host : hosts) {\n    addresses.push_back(host->address()->asString());\n  }\n\n  return addresses;\n}\n\ntemplate <class HostsT = HostVector>\nstd::shared_ptr<const HostsT>\nmakeHostsFromHostsPerLocality(HostsPerLocalityConstSharedPtr hosts_per_locality) {\n  HostVector hosts;\n\n  for (const auto& locality_hosts : hosts_per_locality->get()) {\n    for (const auto& host : locality_hosts) {\n      hosts.emplace_back(host);\n    }\n  }\n\n  return std::make_shared<const HostsT>(hosts);\n}\n\nstruct ResolverData {\n  ResolverData(Network::MockDnsResolver& dns_resolver, Event::MockDispatcher& dispatcher) {\n    timer_ = new Event::MockTimer(&dispatcher);\n    expectResolve(dns_resolver);\n  }\n\n  void expectResolve(Network::MockDnsResolver& dns_resolver) {\n    EXPECT_CALL(dns_resolver, resolve(_, _, _))\n        .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                             Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n          dns_callback_ = cb;\n          return &active_dns_query_;\n        }))\n        .RetiresOnSaturation();\n  }\n\n  Event::MockTimer* timer_;\n  Network::DnsResolver::ResolveCb dns_callback_;\n  Network::MockActiveDnsQuery active_dns_query_;\n};\n\nusing StrictDnsConfigTuple =\n    std::tuple<std::string, Network::DnsLookupFamily, std::list<std::string>>;\nstd::vector<StrictDnsConfigTuple> generateStrictDnsParams() {\n  std::vector<StrictDnsConfigTuple> dns_config;\n  {\n    std::string family_yaml(\"\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::Auto);\n    std::list<std::string> dns_response{\"127.0.0.1\", \"127.0.0.2\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: v4_only\n                            )EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::V4Only);\n    std::list<std::string> dns_response{\"127.0.0.1\", \"127.0.0.2\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: v6_only\n                            )EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::V6Only);\n    std::list<std::string> dns_response{\"::1\", \"::2\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: auto\n                            )EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::Auto);\n    std::list<std::string> dns_response{\"127.0.0.1\", \"127.0.0.2\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response));\n  }\n  return dns_config;\n}\n\nclass StrictDnsParamTest : public testing::TestWithParam<StrictDnsConfigTuple>,\n                           public UpstreamImplTestBase {};\n\nINSTANTIATE_TEST_SUITE_P(DnsParam, StrictDnsParamTest,\n                         testing::ValuesIn(generateStrictDnsParams()));\n\nTEST_P(StrictDnsParamTest, ImmediateResolve) {\n  auto dns_resolver = std::make_shared<NiceMock<Network::MockDnsResolver>>();\n  ReadyWatcher initialized;\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: strict_dns\n    )EOF\" + std::get<0>(GetParam()) +\n                           R\"EOF(\n    lb_policy: round_robin\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n  )EOF\";\n  EXPECT_CALL(initialized, ready());\n  EXPECT_CALL(*dns_resolver, resolve(\"foo.bar.com\", std::get<1>(GetParam()), _))\n      .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                           Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n        cb(Network::DnsResolver::ResolutionStatus::Success,\n           TestUtility::makeDnsResponse(std::get<2>(GetParam())));\n        return nullptr;\n      }));\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver, factory_context,\n                               std::move(scope), false);\n  cluster.initialize([&]() -> void { initialized.ready(); });\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n}\n\nclass StrictDnsClusterImplTest : public testing::Test, public UpstreamImplTestBase {\nprotected:\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver_ =\n      std::make_shared<Network::MockDnsResolver>();\n};\n\nTEST_F(StrictDnsClusterImplTest, ZeroHostsIsInializedImmediately) {\n  ReadyWatcher initialized;\n\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      endpoints:\n      - lb_endpoints:\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  EXPECT_CALL(initialized, ready());\n  cluster.initialize([&]() -> void { initialized.ready(); });\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n}\n\n// Resolve zero hosts, while using health checking.\nTEST_F(StrictDnsClusterImplTest, ZeroHostsHealthChecker) {\n  ReadyWatcher initialized;\n\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n  )EOF\";\n\n  ResolverData resolver(*dns_resolver_, dispatcher_);\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  std::shared_ptr<MockHealthChecker> health_checker(new MockHealthChecker());\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_));\n  cluster.setHealthChecker(health_checker);\n  cluster.initialize([&]() -> void { initialized.ready(); });\n\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_));\n  EXPECT_CALL(initialized, ready());\n  EXPECT_CALL(*resolver.timer_, enableTimer(_, _));\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success, {});\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n}\n\nTEST_F(StrictDnsClusterImplTest, Basic) {\n  // gmock matches in LIFO order which is why these are swapped.\n  ResolverData resolver2(*dns_resolver_, dispatcher_);\n  ResolverData resolver1(*dns_resolver_, dispatcher_);\n\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: strict_dns\n    dns_refresh_rate: 4s\n    dns_failure_refresh_rate:\n      base_interval: 7s\n      max_interval: 10s\n    lb_policy: round_robin\n    circuit_breakers:\n      thresholds:\n      - priority: DEFAULT\n        max_connections: 43\n        max_pending_requests: 57\n        max_requests: 50\n        max_retries: 10\n      - priority: HIGH\n        max_connections: 1\n        max_pending_requests: 2\n        max_requests: 3\n        max_retries: 4\n    max_requests_per_connection: 3\n    protocol_selection: USE_DOWNSTREAM_PROTOCOL\n    http2_protocol_options:\n      hpack_table_size: 0\n    http_protocol_options:\n      header_key_format:\n        proper_case_words: {}\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: localhost1\n                    port_value: 11001\n            - endpoint:\n                address:\n                  socket_address:\n                    address: localhost2\n                    port_value: 11002 \n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.default.max_connections\", 43));\n  EXPECT_EQ(43U, cluster.info()->resourceManager(ResourcePriority::Default).connections().max());\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"circuit_breakers.name.default.max_pending_requests\", 57));\n  EXPECT_EQ(57U,\n            cluster.info()->resourceManager(ResourcePriority::Default).pendingRequests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.default.max_requests\", 50));\n  EXPECT_EQ(50U, cluster.info()->resourceManager(ResourcePriority::Default).requests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.default.max_retries\", 10));\n  EXPECT_EQ(10U, cluster.info()->resourceManager(ResourcePriority::Default).retries().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_connections\", 1));\n  EXPECT_EQ(1U, cluster.info()->resourceManager(ResourcePriority::High).connections().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_pending_requests\", 2));\n  EXPECT_EQ(2U, cluster.info()->resourceManager(ResourcePriority::High).pendingRequests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_requests\", 3));\n  EXPECT_EQ(3U, cluster.info()->resourceManager(ResourcePriority::High).requests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_retries\", 4));\n  EXPECT_EQ(4U, cluster.info()->resourceManager(ResourcePriority::High).retries().max());\n  EXPECT_EQ(3U, cluster.info()->maxRequestsPerConnection());\n  EXPECT_EQ(0U, cluster.info()->http2Options().hpack_table_size().value());\n  EXPECT_EQ(Http::Http1Settings::HeaderKeyFormat::ProperCase,\n            cluster.info()->http1Settings().header_key_format_);\n\n  cluster.info()->stats().upstream_rq_total_.inc();\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.upstream_rq_total\").value());\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.maintenance_mode.name\", 0));\n  EXPECT_FALSE(cluster.info()->maintenanceMode());\n\n  ReadyWatcher membership_updated;\n  cluster.prioritySet().addPriorityUpdateCb(\n      [&](uint32_t, const HostVector&, const HostVector&) -> void { membership_updated.ready(); });\n\n  cluster.initialize([] {});\n\n  resolver1.expectResolve(*dns_resolver_);\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n  EXPECT_EQ(\"localhost1\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_EQ(\"localhost1\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->hostname());\n\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.3\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.3:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  // Make sure we de-dup the same address.\n  EXPECT_CALL(*resolver2.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver2.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"10.0.0.1\", \"10.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.3:11001\", \"10.0.0.1:11002\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(1UL,\n            cluster.prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  for (const HostSharedPtr& host : cluster.prioritySet().hostSetsPerPriority()[0]->hosts()) {\n    EXPECT_EQ(cluster.info().get(), &host->cluster());\n  }\n\n  // Empty response. With successful but empty response the host list deletes the address.\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({}));\n  EXPECT_THAT(\n      std::list<std::string>({\"10.0.0.1:11002\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  // Empty response. With failing but empty response the host list does not delete the address.\n  ON_CALL(random_, random()).WillByDefault(Return(8000));\n  resolver2.expectResolve(*dns_resolver_);\n  resolver2.timer_->invokeCallback();\n  EXPECT_CALL(*resolver2.timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  resolver2.dns_callback_(Network::DnsResolver::ResolutionStatus::Failure,\n                          TestUtility::makeDnsResponse({}));\n  EXPECT_THAT(\n      std::list<std::string>({\"10.0.0.1:11002\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  // Make sure we cancel.\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  resolver2.expectResolve(*dns_resolver_);\n  resolver2.timer_->invokeCallback();\n\n  EXPECT_CALL(resolver1.active_dns_query_, cancel());\n  EXPECT_CALL(resolver2.active_dns_query_, cancel());\n}\n\n// Verifies that host removal works correctly when hosts are being health checked\n// but the cluster is configured to always remove hosts\nTEST_F(StrictDnsClusterImplTest, HostRemovalActiveHealthSkipped) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    ignore_health_on_host_removal: true\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443 \n  )EOF\";\n\n  ResolverData resolver(*dns_resolver_, dispatcher_);\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  std::shared_ptr<MockHealthChecker> health_checker(new MockHealthChecker());\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_));\n  cluster.setHealthChecker(health_checker);\n  cluster.initialize([&]() -> void {});\n\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_));\n  EXPECT_CALL(*resolver.timer_, enableTimer(_, _)).Times(2);\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                         TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n\n  // Verify that both endpoints are initially marked with FAILED_ACTIVE_HC, then\n  // clear the flag to simulate that these endpoints have been successfully health\n  // checked.\n  {\n    const auto& hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(2UL, hosts.size());\n\n    for (const auto& host : hosts) {\n      EXPECT_TRUE(host->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n      host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n      host->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n    }\n  }\n\n  // Re-resolve the DNS name with only one record\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                         TestUtility::makeDnsResponse({\"127.0.0.1\"}));\n\n  const auto& hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_EQ(1UL, hosts.size());\n}\n\n// Verify that a host is not removed if it is removed from DNS but still passing active health\n// checking.\nTEST_F(StrictDnsClusterImplTest, HostRemovalAfterHcFail) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n  )EOF\";\n\n  ResolverData resolver(*dns_resolver_, dispatcher_);\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  std::shared_ptr<MockHealthChecker> health_checker(new MockHealthChecker());\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_));\n  cluster.setHealthChecker(health_checker);\n  ReadyWatcher initialized;\n  cluster.initialize([&initialized]() { initialized.ready(); });\n\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_));\n  EXPECT_CALL(*resolver.timer_, enableTimer(_, _)).Times(2);\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                         TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n\n  // Verify that both endpoints are initially marked with FAILED_ACTIVE_HC, then\n  // clear the flag to simulate that these endpoints have been successfully health\n  // checked.\n  {\n    const auto& hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(2UL, hosts.size());\n\n    for (size_t i = 0; i < 2; ++i) {\n      EXPECT_TRUE(hosts[i]->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC));\n      hosts[i]->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n      hosts[i]->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC);\n      if (i == 1) {\n        EXPECT_CALL(initialized, ready());\n      }\n      health_checker->runCallbacks(hosts[i], HealthTransition::Changed);\n    }\n  }\n\n  // Re-resolve the DNS name with only one record, we should still have 2 hosts.\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                         TestUtility::makeDnsResponse({\"127.0.0.1\"}));\n\n  {\n    const auto& hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(2UL, hosts.size());\n    EXPECT_FALSE(hosts[0]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n    EXPECT_TRUE(hosts[1]->healthFlagGet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL));\n\n    hosts[1]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n    health_checker->runCallbacks(hosts[1], HealthTransition::Changed);\n  }\n\n  // Unlike EDS we will not remove if HC is failing but will wait until the next polling interval.\n  // This may change in the future.\n  {\n    const auto& hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(2UL, hosts.size());\n  }\n}\n\nTEST_F(StrictDnsClusterImplTest, LoadAssignmentBasic) {\n  // gmock matches in LIFO order which is why these are swapped.\n  ResolverData resolver3(*dns_resolver_, dispatcher_);\n  ResolverData resolver2(*dns_resolver_, dispatcher_);\n  ResolverData resolver1(*dns_resolver_, dispatcher_);\n\n  const std::string yaml = R\"EOF(\n    name: name\n    type: STRICT_DNS\n\n    dns_lookup_family: V4_ONLY\n    connect_timeout: 0.25s\n    dns_refresh_rate: 4s\n    dns_failure_refresh_rate:\n      base_interval: 7s\n      max_interval: 10s\n\n    lb_policy: ROUND_ROBIN\n\n    circuit_breakers:\n      thresholds:\n      - priority: DEFAULT\n        max_connections: 43\n        max_pending_requests: 57\n        max_requests: 50\n        max_retries: 10\n      - priority: HIGH\n        max_connections: 1\n        max_pending_requests: 2\n        max_requests: 3\n        max_retries: 4\n\n    max_requests_per_connection: 3\n\n    http2_protocol_options:\n      hpack_table_size: 0\n\n    load_assignment:\n      policy:\n        overprovisioning_factor: 100\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost1\n                port_value: 11001\n            health_check_config:\n              port_value: 8000\n          health_status: DEGRADED\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost2\n                port_value: 11002\n            health_check_config:\n              port_value: 8000\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost3\n                port_value: 11002\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.default.max_connections\", 43));\n  EXPECT_EQ(43U, cluster.info()->resourceManager(ResourcePriority::Default).connections().max());\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"circuit_breakers.name.default.max_pending_requests\", 57));\n  EXPECT_EQ(57U,\n            cluster.info()->resourceManager(ResourcePriority::Default).pendingRequests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.default.max_requests\", 50));\n  EXPECT_EQ(50U, cluster.info()->resourceManager(ResourcePriority::Default).requests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.default.max_retries\", 10));\n  EXPECT_EQ(10U, cluster.info()->resourceManager(ResourcePriority::Default).retries().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_connections\", 1));\n  EXPECT_EQ(1U, cluster.info()->resourceManager(ResourcePriority::High).connections().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_pending_requests\", 2));\n  EXPECT_EQ(2U, cluster.info()->resourceManager(ResourcePriority::High).pendingRequests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_requests\", 3));\n  EXPECT_EQ(3U, cluster.info()->resourceManager(ResourcePriority::High).requests().max());\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"circuit_breakers.name.high.max_retries\", 4));\n  EXPECT_EQ(4U, cluster.info()->resourceManager(ResourcePriority::High).retries().max());\n  EXPECT_EQ(3U, cluster.info()->maxRequestsPerConnection());\n  EXPECT_EQ(0U, cluster.info()->http2Options().hpack_table_size().value());\n\n  cluster.info()->stats().upstream_rq_total_.inc();\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.upstream_rq_total\").value());\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"upstream.maintenance_mode.name\", 0));\n  EXPECT_FALSE(cluster.info()->maintenanceMode());\n\n  ReadyWatcher membership_updated;\n  cluster.prioritySet().addPriorityUpdateCb(\n      [&](uint32_t, const HostVector&, const HostVector&) -> void { membership_updated.ready(); });\n\n  cluster.initialize([] {});\n\n  resolver1.expectResolve(*dns_resolver_);\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n  EXPECT_EQ(\"localhost1\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_EQ(\"localhost1\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->hostname());\n  EXPECT_EQ(100, cluster.prioritySet().hostSetsPerPriority()[0]->overprovisioningFactor());\n  EXPECT_EQ(Host::Health::Degraded,\n            cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->health());\n  EXPECT_EQ(Host::Health::Degraded,\n            cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->health());\n\n  // This is the first time we received an update for localhost1, we expect to rebuild.\n  EXPECT_EQ(0UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n  EXPECT_EQ(100, cluster.prioritySet().hostSetsPerPriority()[0]->overprovisioningFactor());\n\n  // Since no change for localhost1, we expect no rebuild.\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n  EXPECT_EQ(100, cluster.prioritySet().hostSetsPerPriority()[0]->overprovisioningFactor());\n\n  // Since no change for localhost1, we expect no rebuild.\n  EXPECT_EQ(2UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  EXPECT_CALL(*resolver2.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver2.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"10.0.0.1\", \"10.0.0.1\"}));\n\n  // We received a new set of hosts for localhost2. Should rebuild the cluster.\n  EXPECT_EQ(2UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.1\"}));\n\n  // We again received the same set as before for localhost1. No rebuild this time.\n  EXPECT_EQ(3UL, stats_.counter(\"cluster.name.update_no_rebuild\").value());\n\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.3\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.3:11001\", \"10.0.0.1:11002\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  // Make sure we de-dup the same address.\n  EXPECT_CALL(*resolver2.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver2.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"10.0.0.1\", \"10.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.3:11001\", \"10.0.0.1:11002\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(1UL,\n            cluster.prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  // Make sure that we *don't* de-dup between resolve targets.\n  EXPECT_CALL(*resolver3.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver3.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n\n  const auto hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_THAT(std::list<std::string>({\"127.0.0.3:11001\", \"10.0.0.1:11002\", \"10.0.0.1:11002\"}),\n              ContainerEq(hostListToAddresses(hosts)));\n\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(1UL,\n            cluster.prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  // Ensure that all host objects in the host list are unique.\n  for (const auto& host : hosts) {\n    EXPECT_EQ(1, std::count(hosts.begin(), hosts.end(), host));\n  }\n\n  for (const HostSharedPtr& host : cluster.prioritySet().hostSetsPerPriority()[0]->hosts()) {\n    EXPECT_EQ(cluster.info().get(), &host->cluster());\n  }\n\n  // Remove the duplicated hosts from both resolve targets and ensure that we don't see the same\n  // host multiple times.\n  absl::node_hash_set<HostSharedPtr> removed_hosts;\n  cluster.prioritySet().addPriorityUpdateCb(\n      [&](uint32_t, const HostVector&, const HostVector& hosts_removed) -> void {\n        for (const auto& host : hosts_removed) {\n          EXPECT_EQ(removed_hosts.end(), removed_hosts.find(host));\n          removed_hosts.insert(host);\n        }\n      });\n\n  EXPECT_CALL(*resolver2.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver2.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({}));\n\n  EXPECT_CALL(*resolver3.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver3.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({}));\n\n  // Ensure that we called the update membership callback.\n  EXPECT_EQ(2, removed_hosts.size());\n\n  // Make sure we cancel.\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  resolver2.expectResolve(*dns_resolver_);\n  resolver2.timer_->invokeCallback();\n  resolver3.expectResolve(*dns_resolver_);\n  resolver3.timer_->invokeCallback();\n\n  EXPECT_CALL(resolver1.active_dns_query_, cancel());\n  EXPECT_CALL(resolver2.active_dns_query_, cancel());\n  EXPECT_CALL(resolver3.active_dns_query_, cancel());\n}\n\nTEST_F(StrictDnsClusterImplTest, LoadAssignmentBasicMultiplePriorities) {\n  ResolverData resolver3(*dns_resolver_, dispatcher_);\n  ResolverData resolver2(*dns_resolver_, dispatcher_);\n  ResolverData resolver1(*dns_resolver_, dispatcher_);\n\n  const std::string yaml = R\"EOF(\n    name: name\n    type: STRICT_DNS\n\n    dns_lookup_family: V4_ONLY\n    connect_timeout: 0.25s\n    dns_refresh_rate: 4s\n\n    lb_policy: ROUND_ROBIN\n\n    load_assignment:\n      endpoints:\n      - priority: 0\n        lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost1\n                port_value: 11001\n            health_check_config:\n              port_value: 8000\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost2\n                port_value: 11002\n            health_check_config:\n              port_value: 8000\n\n      - priority: 1\n        lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost3\n                port_value: 11003\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  ReadyWatcher membership_updated;\n  cluster.prioritySet().addPriorityUpdateCb(\n      [&](uint32_t, const HostVector&, const HostVector&) -> void { membership_updated.ready(); });\n\n  cluster.initialize([] {});\n\n  resolver1.expectResolve(*dns_resolver_);\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.1\", \"127.0.0.2\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n  EXPECT_EQ(\"localhost1\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_EQ(\"localhost1\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->hostname());\n\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.2\", \"127.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.1:11001\", \"127.0.0.2:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  resolver1.timer_->invokeCallback();\n  EXPECT_CALL(*resolver1.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver1.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"127.0.0.3\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.3:11001\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  // Make sure we de-dup the same address.\n  EXPECT_CALL(*resolver2.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver2.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"10.0.0.1\", \"10.0.0.1\"}));\n  EXPECT_THAT(\n      std::list<std::string>({\"127.0.0.3:11001\", \"10.0.0.1:11002\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(1UL,\n            cluster.prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n\n  for (const HostSharedPtr& host : cluster.prioritySet().hostSetsPerPriority()[0]->hosts()) {\n    EXPECT_EQ(cluster.info().get(), &host->cluster());\n  }\n\n  EXPECT_CALL(*resolver3.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  EXPECT_CALL(membership_updated, ready());\n  resolver3.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                          TestUtility::makeDnsResponse({\"192.168.1.1\", \"192.168.1.2\"}));\n\n  // Make sure we have multiple priorities.\n  EXPECT_THAT(\n      std::list<std::string>({\"192.168.1.1:11003\", \"192.168.1.2:11003\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[1]->hosts())));\n\n  // Make sure we cancel.\n  resolver1.expectResolve(*dns_resolver_);\n  resolver1.timer_->invokeCallback();\n  resolver2.expectResolve(*dns_resolver_);\n  resolver2.timer_->invokeCallback();\n  resolver3.expectResolve(*dns_resolver_);\n  resolver3.timer_->invokeCallback();\n\n  EXPECT_CALL(resolver1.active_dns_query_, cancel());\n  EXPECT_CALL(resolver2.active_dns_query_, cancel());\n  EXPECT_CALL(resolver3.active_dns_query_, cancel());\n}\n\n// Verifies that specifying a custom resolver when using STRICT_DNS fails\nTEST_F(StrictDnsClusterImplTest, CustomResolverFails) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    ignore_health_on_host_removal: true\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n                    resolver_name: customresolver \n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope =\n      stats_.createScope(fmt::format(\"cluster.{}.\", cluster_config.name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n\n  EXPECT_THROW_WITH_MESSAGE(\n      std::make_unique<StrictDnsClusterImpl>(cluster_config, runtime_, dns_resolver_,\n                                             factory_context, std::move(scope), false),\n      EnvoyException, \"STRICT_DNS clusters must NOT have a custom resolver name set\");\n}\n\nTEST_F(StrictDnsClusterImplTest, FailureRefreshRateBackoffResetsWhenSuccessHappens) {\n  ResolverData resolver(*dns_resolver_, dispatcher_);\n\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    dns_refresh_rate: 4s\n    dns_failure_refresh_rate:\n      base_interval: 7s\n      max_interval: 10s\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: localhost1\n                    port_value: 11001\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  cluster.initialize([] {});\n\n  // Failing response kicks the failure refresh backoff strategy.\n  ON_CALL(random_, random()).WillByDefault(Return(8000));\n  EXPECT_CALL(*resolver.timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Failure,\n                         TestUtility::makeDnsResponse({}));\n\n  // Successful call should reset the failure backoff strategy.\n  EXPECT_CALL(*resolver.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                         TestUtility::makeDnsResponse({}));\n\n  // Therefore, a subsequent failure should get a [0,base * 1] refresh.\n  ON_CALL(random_, random()).WillByDefault(Return(8000));\n  EXPECT_CALL(*resolver.timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Failure,\n                         TestUtility::makeDnsResponse({}));\n}\n\nTEST_F(StrictDnsClusterImplTest, TtlAsDnsRefreshRate) {\n  ResolverData resolver(*dns_resolver_, dispatcher_);\n\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    dns_refresh_rate: 4s\n    respect_dns_ttl: true\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: localhost1\n                    port_value: 11001\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context,\n                               std::move(scope), false);\n  ReadyWatcher membership_updated;\n  cluster.prioritySet().addPriorityUpdateCb(\n      [&](uint32_t, const HostVector&, const HostVector&) -> void { membership_updated.ready(); });\n\n  cluster.initialize([] {});\n\n  // TTL is recorded when the DNS response is successful and not empty\n  EXPECT_CALL(membership_updated, ready());\n  EXPECT_CALL(*resolver.timer_, enableTimer(std::chrono::milliseconds(5000), _));\n  resolver.dns_callback_(\n      Network::DnsResolver::ResolutionStatus::Success,\n      TestUtility::makeDnsResponse({\"192.168.1.1\", \"192.168.1.2\"}, std::chrono::seconds(5)));\n\n  // If the response is successful but empty, the cluster uses the cluster configured refresh rate.\n  EXPECT_CALL(membership_updated, ready());\n  EXPECT_CALL(*resolver.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Success,\n                         TestUtility::makeDnsResponse({}, std::chrono::seconds(5)));\n\n  // On failure, the cluster uses the cluster configured refresh rate.\n  EXPECT_CALL(*resolver.timer_, enableTimer(std::chrono::milliseconds(4000), _));\n  resolver.dns_callback_(Network::DnsResolver::ResolutionStatus::Failure,\n                         TestUtility::makeDnsResponse({}, std::chrono::seconds(5)));\n}\n\n// Ensures that HTTP/2 user defined SETTINGS parameter validation is enforced on clusters.\nTEST_F(StrictDnsClusterImplTest, Http2UserDefinedSettingsParametersValidation) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: strict_dns\n    dns_refresh_rate: 4s\n    dns_failure_refresh_rate:\n      base_interval: 7s\n      max_interval: 10s\n    lb_policy: round_robin\n    circuit_breakers:\n      thresholds:\n      - priority: DEFAULT\n        max_connections: 43\n        max_pending_requests: 57\n        max_requests: 50\n        max_retries: 10\n      - priority: HIGH\n        max_connections: 1\n        max_pending_requests: 2\n        max_requests: 3\n        max_retries: 4\n    max_requests_per_connection: 3\n    protocol_selection: USE_DOWNSTREAM_PROTOCOL\n    http2_protocol_options:\n      hpack_table_size: 2048\n      custom_settings_parameters: { identifier: 1, value: 1024 }\n    http_protocol_options:\n      header_key_format:\n        proper_case_words: {}\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: localhost1\n                    port_value: 11001                     \n            - endpoint:\n                address:\n                  socket_address:\n                    address: localhost2\n                    port_value: 11002\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  EXPECT_THROW_WITH_REGEX(\n      StrictDnsClusterImpl(cluster_config, runtime_, dns_resolver_, factory_context,\n                           std::move(scope), false),\n      EnvoyException,\n      R\"(the \\{hpack_table_size\\} HTTP/2 SETTINGS parameter\\(s\\) can not be configured through)\"\n      \" both\");\n}\n\nTEST(HostImplTest, HostCluster) {\n  MockClusterMockPrioritySet cluster;\n  HostSharedPtr host = makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 1);\n  EXPECT_EQ(cluster.info_.get(), &host->cluster());\n  EXPECT_EQ(\"\", host->hostname());\n  EXPECT_FALSE(host->canary());\n  EXPECT_EQ(\"\", host->locality().zone());\n}\n\nTEST(HostImplTest, Weight) {\n  MockClusterMockPrioritySet cluster;\n\n  EXPECT_EQ(1U, makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 0)->weight());\n  EXPECT_EQ(128U, makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 128)->weight());\n  EXPECT_EQ(std::numeric_limits<uint32_t>::max(),\n            makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", std::numeric_limits<uint32_t>::max())\n                ->weight());\n\n  HostSharedPtr host = makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 50);\n  EXPECT_EQ(50U, host->weight());\n  host->weight(51);\n  EXPECT_EQ(51U, host->weight());\n  host->weight(0);\n  EXPECT_EQ(1U, host->weight());\n  host->weight(std::numeric_limits<uint32_t>::max());\n  EXPECT_EQ(std::numeric_limits<uint32_t>::max(), host->weight());\n}\n\nTEST(HostImplTest, HostnameCanaryAndLocality) {\n  MockClusterMockPrioritySet cluster;\n  envoy::config::core::v3::Metadata metadata;\n  Config::Metadata::mutableMetadataValue(metadata, Config::MetadataFilters::get().ENVOY_LB,\n                                         Config::MetadataEnvoyLbKeys::get().CANARY)\n      .set_bool_value(true);\n  envoy::config::core::v3::Locality locality;\n  locality.set_region(\"oceania\");\n  locality.set_zone(\"hello\");\n  locality.set_sub_zone(\"world\");\n  HostImpl host(cluster.info_, \"lyft.com\", Network::Utility::resolveUrl(\"tcp://10.0.0.1:1234\"),\n                std::make_shared<const envoy::config::core::v3::Metadata>(metadata), 1, locality,\n                envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 1,\n                envoy::config::core::v3::UNKNOWN);\n  EXPECT_EQ(cluster.info_.get(), &host.cluster());\n  EXPECT_EQ(\"lyft.com\", host.hostname());\n  EXPECT_TRUE(host.canary());\n  EXPECT_EQ(\"oceania\", host.locality().region());\n  EXPECT_EQ(\"hello\", host.locality().zone());\n  EXPECT_EQ(\"world\", host.locality().sub_zone());\n  EXPECT_EQ(1, host.priority());\n}\n\nTEST(HostImplTest, HealthFlags) {\n  MockClusterMockPrioritySet cluster;\n  HostSharedPtr host = makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 1);\n\n  // To begin with, no flags are set so we're healthy.\n  EXPECT_EQ(Host::Health::Healthy, host->health());\n\n  // Setting an unhealthy flag make the host unhealthy.\n  host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  EXPECT_EQ(Host::Health::Unhealthy, host->health());\n\n  // Setting a degraded flag on an unhealthy host has no effect.\n  host->healthFlagSet(Host::HealthFlag::DEGRADED_ACTIVE_HC);\n  EXPECT_EQ(Host::Health::Unhealthy, host->health());\n\n  // If the degraded flag is the only thing set, host is degraded.\n  host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC);\n  EXPECT_EQ(Host::Health::Degraded, host->health());\n\n  // If the EDS and active degraded flag is set, host is degraded.\n  host->healthFlagSet(Host::HealthFlag::DEGRADED_EDS_HEALTH);\n  EXPECT_EQ(Host::Health::Degraded, host->health());\n\n  // If only the EDS degraded is set, host is degraded.\n  host->healthFlagClear(Host::HealthFlag::DEGRADED_ACTIVE_HC);\n  EXPECT_EQ(Host::Health::Degraded, host->health());\n\n  // If EDS and failed active hc is set, host is unhealthy.\n  host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  EXPECT_EQ(Host::Health::Unhealthy, host->health());\n}\n\n// Test that it's not possible to do a HostDescriptionImpl with a unix\n// domain socket host and a health check config with non-zero port.\n// This is a regression test for oss-fuzz issue\n// https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11095\nTEST(HostImplTest, HealthPipeAddress) {\n  EXPECT_THROW_WITH_MESSAGE(\n      {\n        std::shared_ptr<MockClusterInfo> info{new NiceMock<MockClusterInfo>()};\n        envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config;\n        config.set_port_value(8000);\n        HostDescriptionImpl descr(info, \"\", Network::Utility::resolveUrl(\"unix://foo\"), nullptr,\n                                  envoy::config::core::v3::Locality().default_instance(), config,\n                                  1);\n      },\n      EnvoyException, \"Invalid host configuration: non-zero port for non-IP address\");\n}\n\n// Test that hostname flag from the health check config propagates.\nTEST(HostImplTest, HealthcheckHostname) {\n  std::shared_ptr<MockClusterInfo> info{new NiceMock<MockClusterInfo>()};\n  envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config;\n  config.set_hostname(\"foo\");\n  HostDescriptionImpl descr(info, \"\", Network::Utility::resolveUrl(\"tcp://1.2.3.4:80\"), nullptr,\n                            envoy::config::core::v3::Locality().default_instance(), config, 1);\n  EXPECT_EQ(\"foo\", descr.hostnameForHealthChecks());\n}\n\nclass StaticClusterImplTest : public testing::Test, public UpstreamImplTestBase {};\n\nTEST_F(StaticClusterImplTest, InitialHosts) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 443\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(\"\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_FALSE(cluster.info()->addedViaApi());\n}\n\nTEST_F(StaticClusterImplTest, LoadAssignmentEmptyHostname) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      policy:\n        overprovisioning_factor: 100\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.0.0.1\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(\"\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_EQ(100, cluster.prioritySet().hostSetsPerPriority()[0]->overprovisioningFactor());\n  EXPECT_FALSE(cluster.info()->addedViaApi());\n}\n\nTEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostname) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            hostname: foo\n            address:\n              socket_address:\n                address: 10.0.0.1\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(\"foo\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_FALSE(cluster.info()->addedViaApi());\n}\n\nTEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostnameWithHealthChecks) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            hostname: foo\n            address:\n              socket_address:\n                address: 10.0.0.1\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n              hostname: \"foo2\"\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(\"foo\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_EQ(\"foo2\",\n            cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostnameForHealthChecks());\n  EXPECT_FALSE(cluster.info()->addedViaApi());\n}\n\nTEST_F(StaticClusterImplTest, LoadAssignmentMultiplePriorities) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      endpoints:\n      - priority: 0\n        lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.0.0.1\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.0.0.2\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n\n      - priority: 1\n        lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.0.0.3\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[1]->healthyHosts().size());\n  EXPECT_EQ(\"\", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname());\n  EXPECT_FALSE(cluster.info()->addedViaApi());\n}\n\nTEST_F(StaticClusterImplTest, LoadAssignmentLocality) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      endpoints:\n      - locality:\n          region: oceania\n          zone: hello\n          sub_zone: world\n        lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.0.0.1\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.0.0.2\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  auto& hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts();\n  EXPECT_EQ(hosts.size(), 2);\n  for (int i = 0; i < 2; ++i) {\n    const auto& locality = hosts[i]->locality();\n    EXPECT_EQ(\"oceania\", locality.region());\n    EXPECT_EQ(\"hello\", locality.zone());\n    EXPECT_EQ(\"world\", locality.sub_zone());\n  }\n  EXPECT_EQ(nullptr, cluster.prioritySet().hostSetsPerPriority()[0]->localityWeights());\n  EXPECT_FALSE(cluster.info()->addedViaApi());\n}\n\n// Validates that setting an EDS health value through LoadAssignment is honored for static\n// clusters.\nTEST_F(StaticClusterImplTest, LoadAssignmentEdsHealth) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      policy:\n        overprovisioning_factor: 100\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 10.0.0.1\n                port_value: 443\n            health_check_config:\n              port_value: 8000\n          health_status: DEGRADED\n  )EOF\";\n\n  NiceMock<MockClusterManager> cm;\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(Host::Health::Degraded,\n            cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->health());\n}\n\nTEST_F(StaticClusterImplTest, AltStatName) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    alt_stat_name: staticcluster_stats\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 443\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n  // Increment a stat and verify it is emitted with alt_stat_name\n  cluster.info()->stats().upstream_rq_total_.inc();\n  EXPECT_EQ(1UL, stats_.counter(\"cluster.staticcluster_stats.upstream_rq_total\").value());\n}\n\nTEST_F(StaticClusterImplTest, RingHash) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    type: static\n    lb_policy: ring_hash\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 11001\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(LoadBalancerType::RingHash, cluster.info()->lbType());\n  EXPECT_TRUE(cluster.info()->addedViaApi());\n}\n\nTEST_F(StaticClusterImplTest, OutlierDetector) {\n  const std::string yaml = R\"EOF(\n    name: addressportconfig\n    connect_timeout: 0.25s\n    type: static\n    lb_policy: random\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 11001                  \n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 11002\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n\n  Outlier::MockDetector* detector = new Outlier::MockDetector();\n  EXPECT_CALL(*detector, addChangedStateCb(_));\n  cluster.setOutlierDetector(Outlier::DetectorSharedPtr{detector});\n  cluster.initialize([] {});\n\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(2UL, cluster.info()->stats().membership_healthy_.value());\n\n  // Set a single host as having failed and fire outlier detector callbacks. This should result\n  // in only a single healthy host.\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->outlierDetector().putHttpResponseCode(\n      503);\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagSet(\n      Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  detector->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_NE(cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts()[0],\n            cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n\n  // Bring the host back online.\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagClear(\n      Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  detector->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(2UL, cluster.info()->stats().membership_healthy_.value());\n}\n\nTEST_F(StaticClusterImplTest, HealthyStat) {\n  const std::string yaml = R\"EOF(\n    name: addressportconfig\n    connect_timeout: 0.25s\n    type: static\n    lb_policy: random\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 11001                  \n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 11002\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n\n  Outlier::MockDetector* outlier_detector = new NiceMock<Outlier::MockDetector>();\n  cluster.setOutlierDetector(Outlier::DetectorSharedPtr{outlier_detector});\n\n  std::shared_ptr<MockHealthChecker> health_checker(new NiceMock<MockHealthChecker>());\n  cluster.setHealthChecker(health_checker);\n\n  ReadyWatcher initialized;\n  cluster.initialize([&initialized] { initialized.ready(); });\n\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagClear(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0],\n                               HealthTransition::Changed);\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->healthFlagClear(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  EXPECT_CALL(initialized, ready());\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1],\n                               HealthTransition::Changed);\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagSet(\n      Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  outlier_detector->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagSet(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0],\n                               HealthTransition::Changed);\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagClear(\n      Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  outlier_detector->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagClear(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0],\n                               HealthTransition::Changed);\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(2UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthFlagSet(\n      Host::HealthFlag::FAILED_OUTLIER_CHECK);\n  outlier_detector->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]);\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->healthFlagSet(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1],\n                               HealthTransition::Changed);\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->healthFlagSet(\n      Host::HealthFlag::DEGRADED_ACTIVE_HC);\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->healthFlagClear(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1],\n                               HealthTransition::Changed);\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_degraded_.value());\n\n  // Mark the endpoint as unhealthy. This should decrement the degraded stat.\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->healthFlagSet(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1],\n                               HealthTransition::Changed);\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n\n  // Go back to degraded.\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->healthFlagClear(\n      Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1],\n                               HealthTransition::Changed);\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_degraded_.value());\n\n  // Then go healthy.\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1]->healthFlagClear(\n      Host::HealthFlag::DEGRADED_ACTIVE_HC);\n  health_checker->runCallbacks(cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[1],\n                               HealthTransition::Changed);\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->degradedHosts().size());\n  EXPECT_EQ(1UL, cluster.info()->stats().membership_healthy_.value());\n  EXPECT_EQ(0UL, cluster.info()->stats().membership_degraded_.value());\n}\n\nTEST_F(StaticClusterImplTest, UrlConfig) {\n  const std::string yaml = R\"EOF(\n    name: addressportconfig\n    connect_timeout: 0.25s\n    type: static\n    lb_policy: random\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.1\n                    port_value: 11001                  \n            - endpoint:\n                address:\n                  socket_address:\n                    address: 10.0.0.2\n                    port_value: 11002\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(1024U, cluster.info()->resourceManager(ResourcePriority::Default).connections().max());\n  EXPECT_EQ(1024U,\n            cluster.info()->resourceManager(ResourcePriority::Default).pendingRequests().max());\n  EXPECT_EQ(1024U, cluster.info()->resourceManager(ResourcePriority::Default).requests().max());\n  EXPECT_EQ(3U, cluster.info()->resourceManager(ResourcePriority::Default).retries().max());\n  EXPECT_EQ(1024U, cluster.info()->resourceManager(ResourcePriority::High).connections().max());\n  EXPECT_EQ(1024U, cluster.info()->resourceManager(ResourcePriority::High).pendingRequests().max());\n  EXPECT_EQ(1024U, cluster.info()->resourceManager(ResourcePriority::High).requests().max());\n  EXPECT_EQ(3U, cluster.info()->resourceManager(ResourcePriority::High).retries().max());\n  EXPECT_EQ(0U, cluster.info()->maxRequestsPerConnection());\n  EXPECT_EQ(::Envoy::Http2::Utility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE,\n            cluster.info()->http2Options().hpack_table_size().value());\n  EXPECT_EQ(LoadBalancerType::Random, cluster.info()->lbType());\n  EXPECT_THAT(\n      std::list<std::string>({\"10.0.0.1:11001\", \"10.0.0.2:11002\"}),\n      ContainerEq(hostListToAddresses(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())));\n  EXPECT_EQ(2UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n  EXPECT_EQ(1UL,\n            cluster.prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n  cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->healthChecker().setUnhealthy();\n}\n\nTEST_F(StaticClusterImplTest, UnsupportedLBType) {\n  const std::string yaml = R\"EOF(\n    name: addressportconfig\n    connect_timeout: 0.25s\n    type: static\n    lb_policy: fakelbtype\n    load_assignment:\n      cluster_name: addressportconfig\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address: { address: 192.168.1.1, port_value: 22 }\n              socket_address: { address: 192.168.1.2, port_value: 44 }\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      {\n        envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n        Envoy::Stats::ScopePtr scope =\n            stats_.createScope(fmt::format(\"cluster.{}.\", cluster_config.alt_stat_name().empty()\n                                                              ? cluster_config.name()\n                                                              : cluster_config.alt_stat_name()));\n        Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n            admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n            singleton_manager_, tls_, validation_visitor_, *api_);\n        StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope),\n                                  false);\n      },\n      EnvoyException,\n      \"Protobuf message (type envoy.config.cluster.v3.Cluster reason \"\n      \"INVALID_ARGUMENT:(lb_policy): invalid \"\n      \"value \\\"fakelbtype\\\" for type TYPE_ENUM) has unknown fields\");\n}\n\nTEST_F(StaticClusterImplTest, MalformedHostIP) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STATIC\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  EXPECT_THROW_WITH_MESSAGE(\n      StaticClusterImpl(cluster_config, runtime_, factory_context, std::move(scope), false),\n      EnvoyException,\n      \"malformed IP address: foo.bar.com. Consider setting resolver_name or \"\n      \"setting cluster type to 'STRICT_DNS' or 'LOGICAL_DNS'\");\n}\n\n// Test for oss-fuzz issue #11329\n// (https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11329). If no\n// hosts were specified in endpoints but a priority value > 0 there, a\n// crash would happen.\nTEST_F(StaticClusterImplTest, NoHostsTest) {\n  const std::string yaml = R\"EOF(\n    name: staticcluster\n    connect_timeout: 0.25s\n    load_assignment:\n      cluster_name: foo\n      endpoints:\n      - priority: 1\n  )EOF\";\n\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope =\n      stats_.createScope(fmt::format(\"cluster.{}.\", cluster_config.name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n  StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false);\n  cluster.initialize([] {});\n\n  EXPECT_EQ(0UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n}\n\nTEST_F(StaticClusterImplTest, SourceAddressPriority) {\n  envoy::config::cluster::v3::Cluster config;\n  config.set_name(\"staticcluster\");\n  config.mutable_connect_timeout();\n\n  {\n    // If the cluster manager gets a source address from the bootstrap proto, use it.\n    cm_.bind_config_.mutable_source_address()->set_address(\"1.2.3.5\");\n    Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n        \"cluster.{}.\", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    StaticClusterImpl cluster(config, runtime_, factory_context, std::move(scope), false);\n    EXPECT_EQ(\"1.2.3.5:0\", cluster.info()->sourceAddress()->asString());\n  }\n\n  const std::string cluster_address = \"5.6.7.8\";\n  config.mutable_upstream_bind_config()->mutable_source_address()->set_address(cluster_address);\n  {\n    // Verify source address from cluster config is used when present.\n    Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n        \"cluster.{}.\", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    StaticClusterImpl cluster(config, runtime_, factory_context, std::move(scope), false);\n    EXPECT_EQ(cluster_address, cluster.info()->sourceAddress()->ip()->addressAsString());\n  }\n\n  {\n    // The source address from cluster config takes precedence over one from the bootstrap proto.\n    cm_.bind_config_.mutable_source_address()->set_address(\"1.2.3.5\");\n    Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n        \"cluster.{}.\", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    StaticClusterImpl cluster(config, runtime_, factory_context, std::move(scope), false);\n    EXPECT_EQ(cluster_address, cluster.info()->sourceAddress()->ip()->addressAsString());\n  }\n}\n\nclass ClusterImplTest : public testing::Test, public UpstreamImplTestBase {};\n\n// Test that the correct feature() is set when close_connections_on_host_health_failure is\n// configured.\nTEST_F(ClusterImplTest, CloseConnectionsOnHostHealthFailure) {\n  auto dns_resolver = std::make_shared<Network::MockDnsResolver>();\n  ReadyWatcher initialized;\n\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    close_connections_on_host_health_failure: true\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n  )EOF\";\n  envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml);\n  Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format(\n      \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                            : cluster_config.alt_stat_name()));\n  Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n      admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_,\n      singleton_manager_, tls_, validation_visitor_, *api_);\n\n  StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver, factory_context,\n                               std::move(scope), false);\n  EXPECT_TRUE(cluster.info()->features() &\n              ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE);\n}\n\nclass TestBatchUpdateCb : public PrioritySet::BatchUpdateCb {\npublic:\n  TestBatchUpdateCb(HostVectorSharedPtr hosts, HostsPerLocalitySharedPtr hosts_per_locality)\n      : hosts_(hosts), hosts_per_locality_(hosts_per_locality) {}\n\n  void batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) override {\n    // Add the host from P1 to P0.\n    {\n      HostVector hosts_added{hosts_->front()};\n      HostVector hosts_removed{};\n      host_update_cb.updateHosts(\n          0,\n          updateHostsParams(hosts_, hosts_per_locality_,\n                            std::make_shared<const HealthyHostVector>(*hosts_),\n                            hosts_per_locality_),\n          {}, hosts_added, hosts_removed, absl::nullopt);\n    }\n\n    // Remove the host from P1.\n    {\n      HostVectorSharedPtr empty_hosts = std::make_shared<HostVector>();\n      HostVector hosts_added{};\n      HostVector hosts_removed{hosts_->front()};\n      host_update_cb.updateHosts(\n          1,\n          updateHostsParams(empty_hosts, HostsPerLocalityImpl::empty(),\n                            std::make_shared<const HealthyHostVector>(*empty_hosts),\n                            HostsPerLocalityImpl::empty()),\n          {}, hosts_added, hosts_removed, absl::nullopt);\n    }\n  }\n\n  HostVectorSharedPtr hosts_;\n  HostsPerLocalitySharedPtr hosts_per_locality_;\n};\n\n// Test creating and extending a priority set.\nTEST(PrioritySet, Extend) {\n  PrioritySetImpl priority_set;\n  priority_set.getOrCreateHostSet(0);\n\n  uint32_t priority_changes = 0;\n  uint32_t membership_changes = 0;\n  uint32_t last_priority = 0;\n  priority_set.addPriorityUpdateCb(\n      [&](uint32_t priority, const HostVector&, const HostVector&) -> void {\n        last_priority = priority;\n        ++priority_changes;\n      });\n  priority_set.addMemberUpdateCb(\n      [&](const HostVector&, const HostVector&) -> void { ++membership_changes; });\n\n  // The initial priority set starts with priority level 0..\n  EXPECT_EQ(1, priority_set.hostSetsPerPriority().size());\n  EXPECT_EQ(0, priority_set.hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0, priority_set.hostSetsPerPriority()[0]->priority());\n\n  // Add priorities 1 and 2, ensure the callback is called, and that the new\n  // host sets are created with the correct priority.\n  EXPECT_EQ(0, priority_changes);\n  EXPECT_EQ(0, membership_changes);\n  EXPECT_EQ(0, priority_set.getOrCreateHostSet(2).hosts().size());\n  EXPECT_EQ(3, priority_set.hostSetsPerPriority().size());\n  // No-op host set creation does not trigger callbacks.\n  EXPECT_EQ(0, priority_changes);\n  EXPECT_EQ(0, membership_changes);\n  EXPECT_EQ(last_priority, 0);\n  EXPECT_EQ(1, priority_set.hostSetsPerPriority()[1]->priority());\n  EXPECT_EQ(2, priority_set.hostSetsPerPriority()[2]->priority());\n\n  // Now add hosts for priority 1, and ensure they're added and subscribers are notified.\n  std::shared_ptr<MockClusterInfo> info{new NiceMock<MockClusterInfo>()};\n  HostVectorSharedPtr hosts(new HostVector({makeTestHost(info, \"tcp://127.0.0.1:80\")}));\n  HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared<HostsPerLocalityImpl>();\n  {\n    HostVector hosts_added{hosts->front()};\n    HostVector hosts_removed{};\n\n    priority_set.updateHosts(1,\n                             updateHostsParams(hosts, hosts_per_locality,\n                                               std::make_shared<const HealthyHostVector>(*hosts),\n                                               hosts_per_locality),\n                             {}, hosts_added, hosts_removed, absl::nullopt);\n  }\n  EXPECT_EQ(1, priority_changes);\n  EXPECT_EQ(1, membership_changes);\n  EXPECT_EQ(last_priority, 1);\n  EXPECT_EQ(1, priority_set.hostSetsPerPriority()[1]->hosts().size());\n\n  // Test iteration.\n  int i = 0;\n  for (auto& host_set : priority_set.hostSetsPerPriority()) {\n    EXPECT_EQ(host_set.get(), priority_set.hostSetsPerPriority()[i++].get());\n  }\n\n  // Test batch host updates. Verify that we can move a host without triggering intermediate host\n  // updates.\n\n  // We're going to do a noop host change, so add a callback to assert that we're not announcing\n  // any host changes.\n  priority_set.addMemberUpdateCb([&](const HostVector& added, const HostVector& removed) -> void {\n    EXPECT_TRUE(added.empty() && removed.empty());\n  });\n\n  TestBatchUpdateCb batch_update(hosts, hosts_per_locality);\n  priority_set.batchHostUpdate(batch_update);\n\n  // We expect to see two priority changes, but only one membership change.\n  EXPECT_EQ(3, priority_changes);\n  EXPECT_EQ(2, membership_changes);\n}\n\nclass ClusterInfoImplTest : public testing::Test {\npublic:\n  ClusterInfoImplTest() : api_(Api::createApiForTest(stats_, random_)) {}\n\n  std::unique_ptr<StrictDnsClusterImpl> makeCluster(const std::string& yaml,\n                                                    bool avoid_boosting = true) {\n    cluster_config_ = parseClusterFromV3Yaml(yaml, avoid_boosting);\n    scope_ = stats_.createScope(fmt::format(\"cluster.{}.\", cluster_config_.alt_stat_name().empty()\n                                                               ? cluster_config_.name()\n                                                               : cluster_config_.alt_stat_name()));\n    factory_context_ = std::make_unique<Server::Configuration::TransportSocketFactoryContextImpl>(\n        admin_, ssl_context_manager_, *scope_, cm_, local_info_, dispatcher_, stats_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n\n    return std::make_unique<StrictDnsClusterImpl>(cluster_config_, runtime_, dns_resolver_,\n                                                  *factory_context_, std::move(scope_), false);\n  }\n\n  Stats::TestUtil::TestStore stats_;\n  Ssl::MockContextManager ssl_context_manager_;\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver_{new NiceMock<Network::MockDnsResolver>()};\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<MockClusterManager> cm_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  ReadyWatcher initialized_;\n  envoy::config::cluster::v3::Cluster cluster_config_;\n  Envoy::Stats::ScopePtr scope_;\n  std::unique_ptr<Server::Configuration::TransportSocketFactoryContextImpl> factory_context_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n};\n\nstruct Foo : public Envoy::Config::TypedMetadata::Object {};\n\nstruct Baz : public Envoy::Config::TypedMetadata::Object {\n  Baz(std::string n) : name(n) {}\n  std::string name;\n};\n\nclass BazFactory : public ClusterTypedMetadataFactory {\npublic:\n  std::string name() const override { return \"baz\"; }\n  // Returns nullptr (conversion failure) if d is empty.\n  std::unique_ptr<const Envoy::Config::TypedMetadata::Object>\n  parse(const ProtobufWkt::Struct& d) const override {\n    if (d.fields().find(\"name\") != d.fields().end()) {\n      return std::make_unique<Baz>(d.fields().at(\"name\").string_value());\n    }\n    throw EnvoyException(\"Cannot create a Baz when metadata is empty.\");\n  }\n};\n\n// Cluster metadata and common config retrieval.\nTEST_F(ClusterInfoImplTest, Metadata) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: MAGLEV\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    metadata: { filter_metadata: { com.bar.foo: { baz: test_value },\n                                   baz: {name: meh } } }\n    common_lb_config:\n      healthy_panic_threshold:\n        value: 0.3\n  )EOF\";\n\n  BazFactory baz_factory;\n  Registry::InjectFactory<ClusterTypedMetadataFactory> registered_factory(baz_factory);\n  auto cluster = makeCluster(yaml);\n\n  EXPECT_EQ(\"meh\", cluster->info()->typedMetadata().get<Baz>(baz_factory.name())->name);\n  EXPECT_EQ(nullptr, cluster->info()->typedMetadata().get<Foo>(baz_factory.name()));\n  EXPECT_EQ(\"test_value\",\n            Config::Metadata::metadataValue(&cluster->info()->metadata(), \"com.bar.foo\", \"baz\")\n                .string_value());\n  EXPECT_EQ(0.3, cluster->info()->lbConfig().healthy_panic_threshold().value());\n  EXPECT_EQ(LoadBalancerType::Maglev, cluster->info()->lbType());\n}\n\n// Eds service_name is populated.\nTEST_F(ClusterInfoImplTest, EdsServiceNamePopulation) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: EDS\n    lb_policy: MAGLEV\n    eds_cluster_config:\n      service_name: service_foo\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    common_lb_config:\n      healthy_panic_threshold:\n        value: 0.3\n  )EOF\";\n  auto cluster = makeCluster(yaml);\n  EXPECT_EQ(cluster->info()->edsServiceName(), \"service_foo\");\n\n  const std::string unexpected_eds_config_yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: MAGLEV\n    eds_cluster_config:\n      service_name: service_foo\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    common_lb_config:\n      healthy_panic_threshold:\n        value: 0.3\n  )EOF\";\n  EXPECT_THROW_WITH_MESSAGE(makeCluster(unexpected_eds_config_yaml), EnvoyException,\n                            \"eds_cluster_config set in a non-EDS cluster\");\n}\n\n// Typed metadata loading throws exception.\nTEST_F(ClusterInfoImplTest, BrokenTypedMetadata) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: MAGLEV\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    metadata: { filter_metadata: { com.bar.foo: { baz: test_value },\n                                   baz: {boom: meh} } }\n    common_lb_config:\n      healthy_panic_threshold:\n        value: 0.3\n  )EOF\";\n\n  BazFactory baz_factory;\n  Registry::InjectFactory<ClusterTypedMetadataFactory> registered_factory(baz_factory);\n  EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException,\n                            \"Cannot create a Baz when metadata is empty.\");\n}\n\n// Cluster extension protocol options fails validation when configured for an unregistered filter.\nTEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForUnknownFilter) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    typed_extension_protocol_options:\n      no_such_filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n        value:\n          option: \"value\"\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException,\n                            \"Didn't find a registered network or http filter implementation for \"\n                            \"name: 'no_such_filter'\");\n}\n\nTEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForUnknownFilter) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    typed_extension_protocol_options:\n      no_such_filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException,\n                            \"Didn't find a registered network or http filter implementation for \"\n                            \"name: 'no_such_filter'\");\n}\n\n// This test case can't be converted for V3 API as it is specific for extension_protocol_options\nTEST_F(ClusterInfoImplTest, OneofExtensionProtocolOptionsForUnknownFilter) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}]\n    extension_protocol_options:\n      no_such_filter: { option: value }\n    typed_extension_protocol_options:\n      no_such_filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException,\n                            \"Only one of typed_extension_protocol_options or \"\n                            \"extension_protocol_options can be specified\");\n}\n\nTEST_F(ClusterInfoImplTest, TestTrackRequestResponseSizesNotSetInConfig) {\n  const std::string yaml_disabled = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n  )EOF\";\n\n  auto cluster = makeCluster(yaml_disabled);\n  // By default, histograms tracking request/response sizes are not published.\n  EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value());\n\n  const std::string yaml_disabled2 = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    track_cluster_stats: { timeout_budgets : true }\n  )EOF\";\n\n  cluster = makeCluster(yaml_disabled2);\n  EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value());\n\n  const std::string yaml_disabled3 = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    track_cluster_stats: { request_response_sizes : false }\n  )EOF\";\n\n  cluster = makeCluster(yaml_disabled3);\n  EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value());\n}\n\nTEST_F(ClusterInfoImplTest, TestTrackRequestResponseSizes) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    track_cluster_stats: { request_response_sizes : true }\n  )EOF\";\n\n  auto cluster = makeCluster(yaml);\n  // The stats should be created.\n  ASSERT_TRUE(cluster->info()->requestResponseSizeStats().has_value());\n\n  Upstream::ClusterRequestResponseSizeStats req_resp_stats =\n      cluster->info()->requestResponseSizeStats()->get();\n\n  EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rq_headers_size_.unit());\n  EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rq_body_size_.unit());\n  EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rs_body_size_.unit());\n}\n\nTEST_F(ClusterInfoImplTest, TestTrackRemainingResourcesGauges) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n\n    circuit_breakers:\n      thresholds:\n      - priority: DEFAULT\n        max_connections: 1\n        max_pending_requests: 2\n        max_requests: 3\n        max_retries: 4\n        track_remaining: false\n      - priority: HIGH\n        max_connections: 1\n        max_pending_requests: 2\n        max_requests: 3\n        max_retries: 4\n        track_remaining: true\n  )EOF\";\n\n  auto cluster = makeCluster(yaml);\n\n  // The value of a remaining resource gauge will always be 0 for the default\n  // priority circuit breaker since track_remaining is false\n  Stats::Gauge& default_remaining_retries =\n      stats_.gauge(\"cluster.name.circuit_breakers.default.remaining_retries\",\n                   Stats::Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(0U, default_remaining_retries.value());\n  cluster->info()->resourceManager(ResourcePriority::Default).retries().inc();\n  EXPECT_EQ(0U, default_remaining_retries.value());\n  cluster->info()->resourceManager(ResourcePriority::Default).retries().dec();\n  EXPECT_EQ(0U, default_remaining_retries.value());\n\n  // This gauge will be correctly set since we have opted in to tracking remaining\n  // resource gauges in the high priority circuit breaker.\n  Stats::Gauge& high_remaining_retries = stats_.gauge(\n      \"cluster.name.circuit_breakers.high.remaining_retries\", Stats::Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(4U, high_remaining_retries.value());\n  cluster->info()->resourceManager(ResourcePriority::High).retries().inc();\n  EXPECT_EQ(3U, high_remaining_retries.value());\n  cluster->info()->resourceManager(ResourcePriority::High).retries().dec();\n  EXPECT_EQ(4U, high_remaining_retries.value());\n}\n\nTEST_F(ClusterInfoImplTest, Timeouts) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: MAGLEV\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    metadata: { filter_metadata: { com.bar.foo: { baz: test_value },\n                                   baz: {name: meh } } }\n    common_lb_config:\n      healthy_panic_threshold:\n        value: 0.3\n  )EOF\";\n\n  BazFactory baz_factory;\n  Registry::InjectFactory<ClusterTypedMetadataFactory> registered_factory(baz_factory);\n  auto cluster1 = makeCluster(yaml);\n  ASSERT_TRUE(cluster1->info()->idleTimeout().has_value());\n  EXPECT_EQ(std::chrono::hours(1), cluster1->info()->idleTimeout().value());\n\n  const std::string explicit_timeout = R\"EOF(\n    common_http_protocol_options:\n      idle_timeout: 1s\n  )EOF\";\n\n  auto cluster2 = makeCluster(yaml + explicit_timeout);\n  ASSERT_TRUE(cluster2->info()->idleTimeout().has_value());\n  EXPECT_EQ(std::chrono::seconds(1), cluster2->info()->idleTimeout().value());\n\n  const std::string no_timeout = R\"EOF(\n    common_http_protocol_options:\n      idle_timeout: 0s\n  )EOF\";\n  auto cluster3 = makeCluster(yaml + no_timeout);\n  EXPECT_FALSE(cluster3->info()->idleTimeout().has_value());\n}\n\nTEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgetsNotSetInConfig) {\n  // Check that without the flag specified, the histogram is null.\n  const std::string yaml_disabled = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n  )EOF\";\n\n  auto cluster = makeCluster(yaml_disabled);\n  // The stats will be null if they have not been explicitly turned on.\n  EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value());\n\n  const std::string yaml_disabled2 = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    track_cluster_stats: { request_response_sizes : true }\n  )EOF\";\n\n  cluster = makeCluster(yaml_disabled2);\n  EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value());\n\n  const std::string yaml_disabled3 = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    track_cluster_stats: { timeout_budgets : false }\n  )EOF\";\n\n  cluster = makeCluster(yaml_disabled3);\n  EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value());\n}\n\nTEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgets) {\n  // Check that with the flag, the histogram is created.\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    track_cluster_stats: { timeout_budgets : true }\n  )EOF\";\n\n  auto cluster = makeCluster(yaml);\n  // The stats should be created.\n  ASSERT_TRUE(cluster->info()->timeoutBudgetStats().has_value());\n\n  Upstream::ClusterTimeoutBudgetStats tb_stats = cluster->info()->timeoutBudgetStats()->get();\n  EXPECT_EQ(Stats::Histogram::Unit::Unspecified,\n            tb_stats.upstream_rq_timeout_budget_percent_used_.unit());\n  EXPECT_EQ(Stats::Histogram::Unit::Unspecified,\n            tb_stats.upstream_rq_timeout_budget_per_try_percent_used_.unit());\n}\n\nTEST_F(ClusterInfoImplTest, DEPRECATED_FEATURE_TEST(TestTrackTimeoutBudgetsOld)) {\n  // Check that without the flag specified, the histogram is null.\n  const std::string yaml_disabled = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n  )EOF\";\n\n  auto cluster = makeCluster(yaml_disabled);\n  // The stats will be null if they have not been explicitly turned on.\n  EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value());\n\n  // Check that with the flag, the histogram is created.\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    track_timeout_budgets: true\n  )EOF\";\n\n  cluster = makeCluster(yaml);\n  // The stats should be created.\n  ASSERT_TRUE(cluster->info()->timeoutBudgetStats().has_value());\n\n  Upstream::ClusterTimeoutBudgetStats tb_stats = cluster->info()->timeoutBudgetStats()->get();\n  EXPECT_EQ(Stats::Histogram::Unit::Unspecified,\n            tb_stats.upstream_rq_timeout_budget_percent_used_.unit());\n  EXPECT_EQ(Stats::Histogram::Unit::Unspecified,\n            tb_stats.upstream_rq_timeout_budget_per_try_percent_used_.unit());\n}\n\n// Validates HTTP2 SETTINGS config.\nTEST_F(ClusterInfoImplTest, Http2ProtocolOptions) {\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    http2_protocol_options:\n      hpack_table_size: 2048\n      initial_stream_window_size: 65536\n      custom_settings_parameters:\n        - identifier: 0x10\n          value: 10\n        - identifier: 0x12\n          value: 12\n  )EOF\";\n\n  auto cluster = makeCluster(yaml);\n  EXPECT_EQ(cluster->info()->http2Options().hpack_table_size().value(), 2048);\n  EXPECT_EQ(cluster->info()->http2Options().initial_stream_window_size().value(), 65536);\n  EXPECT_EQ(cluster->info()->http2Options().custom_settings_parameters()[0].identifier().value(),\n            0x10);\n  EXPECT_EQ(cluster->info()->http2Options().custom_settings_parameters()[0].value().value(), 10);\n  EXPECT_EQ(cluster->info()->http2Options().custom_settings_parameters()[1].identifier().value(),\n            0x12);\n  EXPECT_EQ(cluster->info()->http2Options().custom_settings_parameters()[1].value().value(), 12);\n}\n\nclass TestFilterConfigFactoryBase {\npublic:\n  TestFilterConfigFactoryBase(\n      std::function<ProtobufTypes::MessagePtr()> empty_proto,\n      std::function<Upstream::ProtocolOptionsConfigConstSharedPtr(const Protobuf::Message&)> config)\n      : empty_proto_(empty_proto), config_(config) {}\n\n  ProtobufTypes::MessagePtr createEmptyProtocolOptionsProto() { return empty_proto_(); }\n  Upstream::ProtocolOptionsConfigConstSharedPtr\n  createProtocolOptionsConfig(const Protobuf::Message& msg) {\n    return config_(msg);\n  }\n\n  std::function<ProtobufTypes::MessagePtr()> empty_proto_;\n  std::function<Upstream::ProtocolOptionsConfigConstSharedPtr(const Protobuf::Message&)> config_;\n};\n\nclass TestNetworkFilterConfigFactory\n    : public Server::Configuration::NamedNetworkFilterConfigFactory {\npublic:\n  TestNetworkFilterConfigFactory(TestFilterConfigFactoryBase& parent) : parent_(parent) {}\n\n  // NamedNetworkFilterConfigFactory\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&,\n                               Server::Configuration::FactoryContext&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  ProtobufTypes::MessagePtr createEmptyProtocolOptionsProto() override {\n    return parent_.createEmptyProtocolOptionsProto();\n  }\n  Upstream::ProtocolOptionsConfigConstSharedPtr\n  createProtocolOptionsConfig(const Protobuf::Message& msg,\n                              Server::Configuration::ProtocolOptionsFactoryContext&) override {\n    return parent_.createProtocolOptionsConfig(msg);\n  }\n  std::string name() const override { CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.test.filter\"); }\n  std::string configType() override { return \"\"; };\n\n  TestFilterConfigFactoryBase& parent_;\n};\n\nclass TestHttpFilterConfigFactory : public Server::Configuration::NamedHttpFilterConfigFactory {\npublic:\n  TestHttpFilterConfigFactory(TestFilterConfigFactoryBase& parent) : parent_(parent) {}\n\n  // NamedNetworkFilterConfigFactory\n  Http::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&, const std::string&,\n                               Server::Configuration::FactoryContext&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n  ProtobufTypes::MessagePtr createEmptyRouteConfigProto() override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n  Router::RouteSpecificFilterConfigConstSharedPtr\n  createRouteSpecificFilterConfig(const Protobuf::Message&,\n                                  Server::Configuration::ServerFactoryContext&,\n                                  ProtobufMessage::ValidationVisitor&) override {\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  ProtobufTypes::MessagePtr createEmptyProtocolOptionsProto() override {\n    return parent_.createEmptyProtocolOptionsProto();\n  }\n  Upstream::ProtocolOptionsConfigConstSharedPtr\n  createProtocolOptionsConfig(const Protobuf::Message& msg,\n                              Server::Configuration::ProtocolOptionsFactoryContext&) override {\n    return parent_.createProtocolOptionsConfig(msg);\n  }\n  std::string name() const override { CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.test.filter\"); }\n  std::string configType() override { return \"\"; };\n\n  TestFilterConfigFactoryBase& parent_;\n};\nstruct TestFilterProtocolOptionsConfig : public Upstream::ProtocolOptionsConfig {};\n\n// Cluster extension protocol options fails validation when configured for filter that does not\n// support options.\nTEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithoutOptions) {\n  TestFilterConfigFactoryBase factoryBase(\n      []() -> ProtobufTypes::MessagePtr { return nullptr; },\n      [](const Protobuf::Message&) -> Upstream::ProtocolOptionsConfigConstSharedPtr {\n        return nullptr;\n      });\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    typed_extension_protocol_options:\n      envoy.test.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n        value:\n          option: \"value\"\n  )EOF\";\n\n  {\n    TestNetworkFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory> registry(\n        factory);\n    EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException,\n                              \"filter envoy.test.filter does not support protocol options\");\n  }\n  {\n    TestHttpFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory> registry(factory);\n    EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException,\n                              \"filter envoy.test.filter does not support protocol options\");\n  }\n}\n\nTEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForFilterWithoutOptions) {\n  TestFilterConfigFactoryBase factoryBase(\n      []() -> ProtobufTypes::MessagePtr { return nullptr; },\n      [](const Protobuf::Message&) -> Upstream::ProtocolOptionsConfigConstSharedPtr {\n        return nullptr;\n      });\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    typed_extension_protocol_options:\n      envoy.test.filter: { \"@type\": type.googleapis.com/google.protobuf.Struct }\n  )EOF\";\n\n  {\n    TestNetworkFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory> registry(\n        factory);\n    EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException,\n                              \"filter envoy.test.filter does not support protocol options\");\n  }\n  {\n    TestHttpFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory> registry(factory);\n    EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException,\n                              \"filter envoy.test.filter does not support protocol options\");\n  }\n}\n\n// Cluster retrieval of typed extension protocol options.\nTEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithOptions) {\n  auto protocol_options = std::make_shared<TestFilterProtocolOptionsConfig>();\n\n  TestFilterConfigFactoryBase factoryBase(\n      []() -> ProtobufTypes::MessagePtr { return std::make_unique<ProtobufWkt::Struct>(); },\n      [&](const Protobuf::Message& msg) -> Upstream::ProtocolOptionsConfigConstSharedPtr {\n        const auto& msg_struct = dynamic_cast<const ProtobufWkt::Struct&>(msg);\n        EXPECT_TRUE(msg_struct.fields().find(\"option\") != msg_struct.fields().end());\n\n        return protocol_options;\n      });\n\n  const std::string yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    typed_extension_protocol_options:\n      envoy.test.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n        value:\n          option: \"value\"\n  )EOF\";\n\n  const std::string typed_yaml = R\"EOF(\n    name: name\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 443\n    typed_extension_protocol_options:\n      envoy.test.filter:\n        \"@type\": type.googleapis.com/google.protobuf.Struct\n        value:\n          option: \"value\"\n  )EOF\";\n\n  // This vector is used to gather clusters with extension_protocol_options from the different\n  // types of extension factories (network, http).\n  std::vector<std::unique_ptr<StrictDnsClusterImpl>> clusters;\n\n  {\n    // Get the cluster with extension_protocol_options for a network filter factory.\n    TestNetworkFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory> registry(\n        factory);\n    clusters.push_back(makeCluster(yaml));\n  }\n  {\n    // Get the cluster with extension_protocol_options for an http filter factory.\n    TestHttpFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory> registry(factory);\n    clusters.push_back(makeCluster(yaml));\n  }\n  {\n    // Get the cluster with extension_protocol_options for a network filter factory.\n    TestNetworkFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory> registry(\n        factory);\n    clusters.push_back(makeCluster(typed_yaml));\n  }\n  {\n    // Get the cluster with extension_protocol_options for an http filter factory.\n    TestHttpFilterConfigFactory factory(factoryBase);\n    Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory> registry(factory);\n    clusters.push_back(makeCluster(typed_yaml));\n  }\n\n  // Make sure that the clusters created from both factories are as expected.\n  for (auto&& cluster : clusters) {\n    std::shared_ptr<const TestFilterProtocolOptionsConfig> stored_options =\n        cluster->info()->extensionProtocolOptionsTyped<TestFilterProtocolOptionsConfig>(\n            \"envoy.test.filter\");\n    EXPECT_NE(nullptr, protocol_options);\n    // Same pointer\n    EXPECT_EQ(stored_options.get(), protocol_options.get());\n  }\n}\n\nTEST_F(ClusterInfoImplTest, UseDownstreamHttpProtocol) {\n  const std::string yaml = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  type: STRICT_DNS\n  lb_policy: ROUND_ROBIN\n  protocol_selection: USE_DOWNSTREAM_PROTOCOL\n)EOF\";\n\n  auto cluster = makeCluster(yaml);\n\n  EXPECT_EQ(Http::Protocol::Http10,\n            cluster->info()->upstreamHttpProtocol({Http::Protocol::Http10}));\n  EXPECT_EQ(Http::Protocol::Http11,\n            cluster->info()->upstreamHttpProtocol({Http::Protocol::Http11}));\n  EXPECT_EQ(Http::Protocol::Http2, cluster->info()->upstreamHttpProtocol({Http::Protocol::Http2}));\n}\n\nTEST_F(ClusterInfoImplTest, UpstreamHttp2Protocol) {\n  const std::string yaml = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  type: STRICT_DNS\n  lb_policy: ROUND_ROBIN\n  http2_protocol_options: {}\n)EOF\";\n\n  auto cluster = makeCluster(yaml);\n\n  EXPECT_EQ(Http::Protocol::Http2, cluster->info()->upstreamHttpProtocol(absl::nullopt));\n  EXPECT_EQ(Http::Protocol::Http2, cluster->info()->upstreamHttpProtocol({Http::Protocol::Http10}));\n  EXPECT_EQ(Http::Protocol::Http2, cluster->info()->upstreamHttpProtocol({Http::Protocol::Http11}));\n  EXPECT_EQ(Http::Protocol::Http2, cluster->info()->upstreamHttpProtocol({Http::Protocol::Http2}));\n}\n\nTEST_F(ClusterInfoImplTest, UpstreamHttp11Protocol) {\n  const std::string yaml = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  type: STRICT_DNS\n  lb_policy: ROUND_ROBIN\n)EOF\";\n\n  auto cluster = makeCluster(yaml);\n\n  EXPECT_EQ(Http::Protocol::Http11, cluster->info()->upstreamHttpProtocol(absl::nullopt));\n  EXPECT_EQ(Http::Protocol::Http11,\n            cluster->info()->upstreamHttpProtocol({Http::Protocol::Http10}));\n  EXPECT_EQ(Http::Protocol::Http11,\n            cluster->info()->upstreamHttpProtocol({Http::Protocol::Http11}));\n  EXPECT_EQ(Http::Protocol::Http11, cluster->info()->upstreamHttpProtocol({Http::Protocol::Http2}));\n}\n\n// Validate empty singleton for HostsPerLocalityImpl.\nTEST(HostsPerLocalityImpl, Empty) {\n  EXPECT_FALSE(HostsPerLocalityImpl::empty()->hasLocalLocality());\n  EXPECT_EQ(0, HostsPerLocalityImpl::empty()->get().size());\n}\n\n// Validate HostsPerLocalityImpl constructors.\nTEST(HostsPerLocalityImpl, Cons) {\n  {\n    const HostsPerLocalityImpl hosts_per_locality;\n    EXPECT_FALSE(hosts_per_locality.hasLocalLocality());\n    EXPECT_EQ(0, hosts_per_locality.get().size());\n  }\n\n  MockClusterMockPrioritySet cluster;\n  HostSharedPtr host_0 = makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 1);\n  HostSharedPtr host_1 = makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 1);\n\n  {\n    std::vector<HostVector> locality_hosts = {{host_0}, {host_1}};\n    const auto locality_hosts_copy = locality_hosts;\n    const HostsPerLocalityImpl hosts_per_locality(std::move(locality_hosts), true);\n    EXPECT_TRUE(hosts_per_locality.hasLocalLocality());\n    EXPECT_EQ(locality_hosts_copy, hosts_per_locality.get());\n  }\n\n  {\n    std::vector<HostVector> locality_hosts = {{host_0}, {host_1}};\n    const auto locality_hosts_copy = locality_hosts;\n    const HostsPerLocalityImpl hosts_per_locality(std::move(locality_hosts), false);\n    EXPECT_FALSE(hosts_per_locality.hasLocalLocality());\n    EXPECT_EQ(locality_hosts_copy, hosts_per_locality.get());\n  }\n}\n\nTEST(HostsPerLocalityImpl, Filter) {\n  MockClusterMockPrioritySet cluster;\n  HostSharedPtr host_0 = makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 1);\n  HostSharedPtr host_1 = makeTestHost(cluster.info_, \"tcp://10.0.0.1:1234\", 1);\n\n  {\n    std::vector<HostVector> locality_hosts = {{host_0}, {host_1}};\n    const auto filtered =\n        HostsPerLocalityImpl(std::move(locality_hosts), false).filter({[&host_0](const Host& host) {\n          return &host == host_0.get();\n        }})[0];\n    EXPECT_FALSE(filtered->hasLocalLocality());\n    const std::vector<HostVector> expected_locality_hosts = {{host_0}, {}};\n    EXPECT_EQ(expected_locality_hosts, filtered->get());\n  }\n\n  {\n    std::vector<HostVector> locality_hosts = {{host_0}, {host_1}};\n    auto filtered =\n        HostsPerLocalityImpl(std::move(locality_hosts), true).filter({[&host_1](const Host& host) {\n          return &host == host_1.get();\n        }})[0];\n    EXPECT_TRUE(filtered->hasLocalLocality());\n    const std::vector<HostVector> expected_locality_hosts = {{}, {host_1}};\n    EXPECT_EQ(expected_locality_hosts, filtered->get());\n  }\n}\n\nclass HostSetImplLocalityTest : public testing::Test {\npublic:\n  LocalityWeightsConstSharedPtr locality_weights_;\n  HostSetImpl host_set_{0, kDefaultOverProvisioningFactor};\n  std::shared_ptr<MockClusterInfo> info_{new NiceMock<MockClusterInfo>()};\n  HostVector hosts_{\n      makeTestHost(info_, \"tcp://127.0.0.1:80\"), makeTestHost(info_, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:82\"), makeTestHost(info_, \"tcp://127.0.0.1:83\"),\n      makeTestHost(info_, \"tcp://127.0.0.1:84\"), makeTestHost(info_, \"tcp://127.0.0.1:85\")};\n};\n\n// When no locality weights belong to the host set, there's an empty pick.\nTEST_F(HostSetImplLocalityTest, Empty) {\n  EXPECT_EQ(nullptr, host_set_.localityWeights());\n  EXPECT_FALSE(host_set_.chooseHealthyLocality().has_value());\n}\n\n// When no hosts are healthy we should fail to select a locality\nTEST_F(HostSetImplLocalityTest, AllUnhealthy) {\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}, {hosts_[2]}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1, 1}};\n  auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n  host_set_.updateHosts(updateHostsParams(hosts, hosts_per_locality), locality_weights, {}, {},\n                        absl::nullopt);\n  EXPECT_FALSE(host_set_.chooseHealthyLocality().has_value());\n}\n\n// When a locality has endpoints that have not yet been warmed, weight calculation should ignore\n// these hosts.\nTEST_F(HostSetImplLocalityTest, NotWarmedHostsLocality) {\n  // We have two localities with 3 hosts in L1, 2 hosts in L2. Two of the hosts in L1 are not\n  // warmed yet, so even though they are unhealthy we should not adjust the locality weight.\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{hosts_[0], hosts_[1], hosts_[2]}, {hosts_[3], hosts_[4]}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1}};\n  auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n  HostsPerLocalitySharedPtr healthy_hosts_per_locality =\n      makeHostsPerLocality({{hosts_[0]}, {hosts_[3], hosts_[4]}});\n  HostsPerLocalitySharedPtr excluded_hosts_per_locality =\n      makeHostsPerLocality({{hosts_[1], hosts_[2]}, {}});\n\n  host_set_.updateHosts(\n      HostSetImpl::updateHostsParams(\n          hosts, hosts_per_locality,\n          makeHostsFromHostsPerLocality<HealthyHostVector>(healthy_hosts_per_locality),\n          healthy_hosts_per_locality, std::make_shared<const DegradedHostVector>(),\n          HostsPerLocalityImpl::empty(),\n          makeHostsFromHostsPerLocality<ExcludedHostVector>(excluded_hosts_per_locality),\n          excluded_hosts_per_locality),\n      locality_weights, {}, {}, absl::nullopt);\n  // We should RR between localities with equal weight.\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n}\n\n// When a locality has zero hosts, it should be treated as if it has zero healthy.\nTEST_F(HostSetImplLocalityTest, EmptyLocality) {\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{hosts_[0], hosts_[1], hosts_[2]}, {}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1}};\n  auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n  host_set_.updateHosts(updateHostsParams(hosts, hosts_per_locality,\n                                          std::make_shared<const HealthyHostVector>(*hosts),\n                                          hosts_per_locality),\n                        locality_weights, {}, {}, absl::nullopt);\n  // Verify that we are not RRing between localities.\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n}\n\n// When all locality weights are zero we should fail to select a locality.\nTEST_F(HostSetImplLocalityTest, AllZeroWeights) {\n  HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{0, 0}};\n  auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n  host_set_.updateHosts(updateHostsParams(hosts, hosts_per_locality,\n                                          std::make_shared<const HealthyHostVector>(*hosts),\n                                          hosts_per_locality),\n                        locality_weights, {}, {});\n  EXPECT_FALSE(host_set_.chooseHealthyLocality().has_value());\n}\n\n// When all locality weights are the same we have unweighted RR behavior.\nTEST_F(HostSetImplLocalityTest, Unweighted) {\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}, {hosts_[2]}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1, 1}};\n  auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n  host_set_.updateHosts(updateHostsParams(hosts, hosts_per_locality,\n                                          std::make_shared<const HealthyHostVector>(*hosts),\n                                          hosts_per_locality),\n                        locality_weights, {}, {}, absl::nullopt);\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(2, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(2, host_set_.chooseHealthyLocality().value());\n}\n\n// When locality weights differ, we have weighted RR behavior.\nTEST_F(HostSetImplLocalityTest, Weighted) {\n  HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 2}};\n  auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n  host_set_.updateHosts(updateHostsParams(hosts, hosts_per_locality,\n                                          std::make_shared<const HealthyHostVector>(*hosts),\n                                          hosts_per_locality),\n                        locality_weights, {}, {}, absl::nullopt);\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(1, host_set_.chooseHealthyLocality().value());\n}\n\n// Localities with no weight assignment are never picked.\nTEST_F(HostSetImplLocalityTest, MissingWeight) {\n  HostsPerLocalitySharedPtr hosts_per_locality =\n      makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}, {hosts_[2]}});\n  LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 0, 1}};\n  auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n  host_set_.updateHosts(updateHostsParams(hosts, hosts_per_locality,\n                                          std::make_shared<const HealthyHostVector>(*hosts),\n                                          hosts_per_locality),\n                        locality_weights, {}, {}, absl::nullopt);\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(2, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(2, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(0, host_set_.chooseHealthyLocality().value());\n  EXPECT_EQ(2, host_set_.chooseHealthyLocality().value());\n}\n\n// Gentle failover between localities as health diminishes.\nTEST_F(HostSetImplLocalityTest, UnhealthyFailover) {\n  const auto setHealthyHostCount = [this](uint32_t host_count) {\n    LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 2}};\n    HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality(\n        {{hosts_[0], hosts_[1], hosts_[2], hosts_[3], hosts_[4]}, {hosts_[5]}});\n    HostVector healthy_hosts;\n    for (uint32_t i = 0; i < host_count; ++i) {\n      healthy_hosts.emplace_back(hosts_[i]);\n    }\n    HostsPerLocalitySharedPtr healthy_hosts_per_locality =\n        makeHostsPerLocality({healthy_hosts, {hosts_[5]}});\n\n    auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality);\n    host_set_.updateHosts(updateHostsParams(hosts, hosts_per_locality,\n                                            makeHostsFromHostsPerLocality<HealthyHostVector>(\n                                                healthy_hosts_per_locality),\n                                            healthy_hosts_per_locality),\n                          locality_weights, {}, {}, absl::nullopt);\n  };\n\n  const auto expectPicks = [this](uint32_t locality_0_picks, uint32_t locality_1_picks) {\n    uint32_t count[2] = {0, 0};\n    for (uint32_t i = 0; i < 100; ++i) {\n      const uint32_t locality_index = host_set_.chooseHealthyLocality().value();\n      ASSERT_LT(locality_index, 2);\n      ++count[locality_index];\n    }\n    ENVOY_LOG_MISC(debug, \"Locality picks {} {}\", count[0], count[1]);\n    EXPECT_EQ(locality_0_picks, count[0]);\n    EXPECT_EQ(locality_1_picks, count[1]);\n  };\n\n  setHealthyHostCount(5);\n  expectPicks(33, 67);\n  setHealthyHostCount(4);\n  expectPicks(33, 67);\n  setHealthyHostCount(3);\n  expectPicks(29, 71);\n  setHealthyHostCount(2);\n  expectPicks(22, 78);\n  setHealthyHostCount(1);\n  expectPicks(12, 88);\n  setHealthyHostCount(0);\n  expectPicks(0, 100);\n}\n\nTEST(OverProvisioningFactorTest, LocalityPickChanges) {\n  auto setUpHostSetWithOPFAndTestPicks = [](const uint32_t overprovisioning_factor,\n                                            const uint32_t pick_0, const uint32_t pick_1) {\n    HostSetImpl host_set(0, overprovisioning_factor);\n    std::shared_ptr<MockClusterInfo> cluster_info{new NiceMock<MockClusterInfo>()};\n    HostVector hosts{makeTestHost(cluster_info, \"tcp://127.0.0.1:80\"),\n                     makeTestHost(cluster_info, \"tcp://127.0.0.1:81\"),\n                     makeTestHost(cluster_info, \"tcp://127.0.0.1:82\")};\n    LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1}};\n    HostsPerLocalitySharedPtr hosts_per_locality =\n        makeHostsPerLocality({{hosts[0], hosts[1]}, {hosts[2]}});\n    // Healthy ratio: (1/2, 1).\n    HostsPerLocalitySharedPtr healthy_hosts_per_locality =\n        makeHostsPerLocality({{hosts[0]}, {hosts[2]}});\n    auto healthy_hosts =\n        makeHostsFromHostsPerLocality<HealthyHostVector>(healthy_hosts_per_locality);\n    host_set.updateHosts(updateHostsParams(std::make_shared<const HostVector>(hosts),\n                                           hosts_per_locality, healthy_hosts,\n                                           healthy_hosts_per_locality),\n                         locality_weights, {}, {}, absl::nullopt);\n    uint32_t cnts[] = {0, 0};\n    for (uint32_t i = 0; i < 100; ++i) {\n      absl::optional<uint32_t> locality_index = host_set.chooseHealthyLocality();\n      if (!locality_index.has_value()) {\n        // It's possible locality scheduler is nullptr (when factor is 0).\n        continue;\n      }\n      ASSERT_LT(locality_index.value(), 2);\n      ++cnts[locality_index.value()];\n    }\n    EXPECT_EQ(pick_0, cnts[0]);\n    EXPECT_EQ(pick_1, cnts[1]);\n  };\n\n  // NOTE: effective locality weight: weight * min(1, factor * healthy-ratio).\n\n  // Picks in localities match to weight(1) * healthy-ratio when\n  // overprovisioning factor is 1.\n  setUpHostSetWithOPFAndTestPicks(100, 33, 67);\n  // Picks in localities match to weights as factor * healthy-ratio > 1.\n  setUpHostSetWithOPFAndTestPicks(200, 50, 50);\n};\n\n// Verifies that partitionHosts correctly splits hosts based on their health flags.\nTEST(HostPartitionTest, PartitionHosts) {\n  std::shared_ptr<MockClusterInfo> info{new NiceMock<MockClusterInfo>()};\n  HostVector hosts{\n      makeTestHost(info, \"tcp://127.0.0.1:80\"), makeTestHost(info, \"tcp://127.0.0.1:81\"),\n      makeTestHost(info, \"tcp://127.0.0.1:82\"), makeTestHost(info, \"tcp://127.0.0.1:83\")};\n\n  hosts[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n  hosts[1]->healthFlagSet(Host::HealthFlag::DEGRADED_ACTIVE_HC);\n  hosts[2]->healthFlagSet(Host::HealthFlag::PENDING_ACTIVE_HC);\n  hosts[2]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC);\n\n  auto hosts_per_locality = makeHostsPerLocality({{hosts[0], hosts[1]}, {hosts[2], hosts[3]}});\n\n  auto update_hosts_params =\n      HostSetImpl::partitionHosts(std::make_shared<const HostVector>(hosts), hosts_per_locality);\n\n  EXPECT_EQ(4, update_hosts_params.hosts->size());\n  EXPECT_EQ(1, update_hosts_params.healthy_hosts->get().size());\n  EXPECT_EQ(hosts[3], update_hosts_params.healthy_hosts->get()[0]);\n  EXPECT_EQ(1, update_hosts_params.degraded_hosts->get().size());\n  EXPECT_EQ(hosts[1], update_hosts_params.degraded_hosts->get()[0]);\n  EXPECT_EQ(1, update_hosts_params.excluded_hosts->get().size());\n  EXPECT_EQ(hosts[2], update_hosts_params.excluded_hosts->get()[0]);\n\n  EXPECT_EQ(2, update_hosts_params.hosts_per_locality->get()[0].size());\n  EXPECT_EQ(2, update_hosts_params.hosts_per_locality->get()[1].size());\n\n  EXPECT_EQ(0, update_hosts_params.healthy_hosts_per_locality->get()[0].size());\n  EXPECT_EQ(1, update_hosts_params.healthy_hosts_per_locality->get()[1].size());\n  EXPECT_EQ(hosts[3], update_hosts_params.healthy_hosts_per_locality->get()[1][0]);\n\n  EXPECT_EQ(1, update_hosts_params.degraded_hosts_per_locality->get()[0].size());\n  EXPECT_EQ(0, update_hosts_params.degraded_hosts_per_locality->get()[1].size());\n  EXPECT_EQ(hosts[1], update_hosts_params.degraded_hosts_per_locality->get()[0][0]);\n\n  EXPECT_EQ(0, update_hosts_params.excluded_hosts_per_locality->get()[0].size());\n  EXPECT_EQ(1, update_hosts_params.excluded_hosts_per_locality->get()[1].size());\n  EXPECT_EQ(hosts[2], update_hosts_params.excluded_hosts_per_locality->get()[1][0]);\n}\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/common/upstream/utility.h",
    "content": "#pragma once\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.validate.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"fmt/printf.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nconstexpr static const char* kDefaultStaticClusterTmpl = R\"EOF(\n  {\n    \"name\": \"%s\",\n    \"connect_timeout\": \"0.250s\",\n    \"type\": \"static\",\n    \"lb_policy\": \"round_robin\",\n    \"load_assignment\": {\n    \"endpoints\": [\n      {\n        \"lb_endpoints\": [\n          {\n            \"endpoint\": {\n              \"address\": {\n            %s,              }\n            }\n          }\n        ]\n      }\n    ]\n  }\n  }\n  )EOF\";\n\ninline std::string defaultStaticClusterJson(const std::string& name) {\n  return fmt::sprintf(kDefaultStaticClusterTmpl, name, R\"EOF(\n\"socket_address\": {\n  \"address\": \"127.0.0.1\",\n  \"port_value\": 11001\n})EOF\");\n}\n\ninline envoy::config::bootstrap::v3::Bootstrap\nparseBootstrapFromV3Json(const std::string& json_string, bool avoid_boosting = true) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromJson(json_string, bootstrap, true, avoid_boosting);\n  return bootstrap;\n}\n\ninline envoy::config::cluster::v3::Cluster parseClusterFromV3Json(const std::string& json_string,\n                                                                  bool avoid_boosting = true) {\n  envoy::config::cluster::v3::Cluster cluster;\n  TestUtility::loadFromJson(json_string, cluster, true, avoid_boosting);\n  return cluster;\n}\n\ninline envoy::config::cluster::v3::Cluster parseClusterFromV3Yaml(const std::string& yaml,\n                                                                  bool avoid_boosting = true) {\n  envoy::config::cluster::v3::Cluster cluster;\n  TestUtility::loadFromYaml(yaml, cluster, true, avoid_boosting);\n  return cluster;\n}\n\ninline envoy::config::cluster::v3::Cluster defaultStaticCluster(const std::string& name) {\n  return parseClusterFromV3Json(defaultStaticClusterJson(name));\n}\n\ninline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& hostname,\n                                  const std::string& url, uint32_t weight = 1) {\n  return HostSharedPtr{\n      new HostImpl(cluster, hostname, Network::Utility::resolveUrl(url), nullptr, weight,\n                   envoy::config::core::v3::Locality(),\n                   envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0,\n                   envoy::config::core::v3::UNKNOWN)};\n}\n\ninline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url,\n                                  uint32_t weight = 1, uint32_t priority = 0) {\n  return HostSharedPtr{\n      new HostImpl(cluster, \"\", Network::Utility::resolveUrl(url), nullptr, weight,\n                   envoy::config::core::v3::Locality(),\n                   envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(),\n                   priority, envoy::config::core::v3::UNKNOWN)};\n}\n\ninline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url,\n                                  const envoy::config::core::v3::Metadata& metadata,\n                                  uint32_t weight = 1) {\n  return HostSharedPtr{\n      new HostImpl(cluster, \"\", Network::Utility::resolveUrl(url),\n                   std::make_shared<const envoy::config::core::v3::Metadata>(metadata), weight,\n                   envoy::config::core::v3::Locality(),\n                   envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0,\n                   envoy::config::core::v3::UNKNOWN)};\n}\n\ninline HostSharedPtr\nmakeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url,\n             const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config,\n             uint32_t weight = 1) {\n  return HostSharedPtr{new HostImpl(cluster, \"\", Network::Utility::resolveUrl(url), nullptr, weight,\n                                    envoy::config::core::v3::Locality(), health_check_config, 0,\n                                    envoy::config::core::v3::UNKNOWN)};\n}\n\ninline HostDescriptionConstSharedPtr makeTestHostDescription(ClusterInfoConstSharedPtr cluster,\n                                                             const std::string& url) {\n  return HostDescriptionConstSharedPtr{new HostDescriptionImpl(\n      cluster, \"\", Network::Utility::resolveUrl(url), nullptr,\n      envoy::config::core::v3::Locality().default_instance(),\n      envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0)};\n}\n\ninline HostsPerLocalitySharedPtr makeHostsPerLocality(std::vector<HostVector>&& locality_hosts,\n                                                      bool force_no_local_locality = false) {\n  return std::make_shared<HostsPerLocalityImpl>(\n      std::move(locality_hosts), !force_no_local_locality && !locality_hosts.empty());\n}\n\ninline LocalityWeightsSharedPtr\nmakeLocalityWeights(std::initializer_list<uint32_t> locality_weights) {\n  return std::make_shared<LocalityWeights>(locality_weights);\n}\n\ninline envoy::config::core::v3::HealthCheck\nparseHealthCheckFromV3Yaml(const std::string& yaml_string, bool avoid_boosting = true) {\n  envoy::config::core::v3::HealthCheck health_check;\n  TestUtility::loadFromYamlAndValidate(yaml_string, health_check, false, avoid_boosting);\n  return health_check;\n}\n\n// For DEPRECATED TEST CASES\ninline envoy::config::core::v3::HealthCheck\nparseHealthCheckFromV2Yaml(const std::string& yaml_string) {\n  envoy::config::core::v3::HealthCheck health_check;\n  TestUtility::loadFromYamlAndValidate(yaml_string, health_check);\n  return health_check;\n}\n\ninline PrioritySet::UpdateHostsParams\nupdateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality,\n                  HealthyHostVectorConstSharedPtr healthy_hosts,\n                  HostsPerLocalityConstSharedPtr healthy_hosts_per_locality) {\n  return HostSetImpl::updateHostsParams(\n      hosts, hosts_per_locality, std::move(healthy_hosts), std::move(healthy_hosts_per_locality),\n      std::make_shared<const DegradedHostVector>(), HostsPerLocalityImpl::empty(),\n      std::make_shared<const ExcludedHostVector>(), HostsPerLocalityImpl::empty());\n}\n\ninline PrioritySet::UpdateHostsParams\nupdateHostsParams(HostVectorConstSharedPtr hosts,\n                  HostsPerLocalityConstSharedPtr hosts_per_locality) {\n  return updateHostsParams(std::move(hosts), std::move(hosts_per_locality),\n                           std::make_shared<const HealthyHostVector>(),\n                           HostsPerLocalityImpl::empty());\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/config/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    srcs = [\n        \"utility.cc\",\n    ],\n    hdrs = [\n        \"utility.h\",\n    ],\n    deps = [\n        \"//source/common/http:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/config/integration/certs:hashes\",\n        \"//test/integration:server_stats_interface\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/config/integration/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nexports_files([\n    \"server.yaml\",\n    \"server_unix_listener.yaml\",\n])\n\nfilegroup(\n    name = \"server_xds_files\",\n    srcs = [\n        \"server_xds.bootstrap.udpa.yaml\",\n        \"server_xds.bootstrap.yaml\",\n        \"server_xds.cds.with_unknown_field.yaml\",\n        \"server_xds.cds.yaml\",\n        \"server_xds.eds.ads_cluster.yaml\",\n        \"server_xds.eds.with_unknown_field.yaml\",\n        \"server_xds.eds.yaml\",\n        \"server_xds.lds.typed_struct.yaml\",\n        \"server_xds.lds.udpa.list_collection.yaml\",\n        \"server_xds.lds.with_unknown_field.typed_struct.yaml\",\n        \"server_xds.lds.with_unknown_field.yaml\",\n        \"server_xds.lds.yaml\",\n        \"server_xds.rds.with_unknown_field.yaml\",\n        \"server_xds.rds.yaml\",\n    ],\n)\n\nfilegroup(\n    name = \"server_config_files\",\n    srcs = [\n        \"server.yaml\",\n        \"server_unix_listener.yaml\",\n    ],\n)\n\nfilegroup(\n    name = \"google_com_proxy_port_0\",\n    srcs = [\"google_com_proxy_port_0.v2.yaml\"],\n)\n"
  },
  {
    "path": "test/config/integration/certs/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nfilegroup(\n    name = \"certs\",\n    srcs = glob([\n        \"*.pem\",\n        \"*.der\",\n    ]),\n)\n\nenvoy_cc_test_library(\n    name = \"hashes\",\n    hdrs = glob([\"*hash.h\"]),\n)\n"
  },
  {
    "path": "test/config/integration/certs/README.md",
    "content": "# What are the identities, certificates and keys\nThere are 5 identities:\n- **CA**: Certificate Authority for **Client** and **Server**. It has the\n  self-signed certificate *cacert.pem*. *cakey.pem* is its private key.\n- **Client**: It has the certificate *clientcert.pem*, signed by the **CA**.\n  *clientkey.pem* is its private key.\n- **Server**: It has the certificate *servercert.pem*, which is signed by the\n  **CA** using the config *servercert.cfg*. *serverkey.pem* is its private key.\n- **Upstream CA**: Certificate Authority for **Upstream**. It has the self-signed\n  certificate *upstreamcacert.pem*. *upstreamcakey.pem* is its private key.\n- **Upstream**: It has the certificate *upstreamcert.pem*, which is signed by\n  the **Upstream CA** using the config *upstreamcert.cfg*. *upstreamkey.pem* is\n  its private key.\n- **Upstream localhost**: It has the certificate *upstreamlocalhostcert.pem*, which is signed by\n  the **Upstream CA** using the config *upstreamlocalhostcert.cfg*. *upstreamlocalhostkey.pem* is\n  its private key. The different between this certificate and **Upstream** is that this certifcate\n  has a SAN for \"localhost\".\n\n# How to update certificates\n**certs.sh** has the commands to generate all files. Running certs.sh directly\nwill cause all files to be regenerated. So if you want to regenerate a\nparticular file, please copy the corresponding commands from certs.sh and\nexecute them in command line.\n"
  },
  {
    "path": "test/config/integration/certs/cacert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test CA\ncommonName_default = Test CA\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n"
  },
  {
    "path": "test/config/integration/certs/cacert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUdCu/mLip3X/We37vh3BA9u/nxakwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODA1MTkxNjAwWhcNMjIw\nODA1MTkxNjAwWjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBALu2Ihi4DmaQG7zySZlWyM9SjxOXCI5840V7Hn0C\nXoiI8sQQmKSC2YCzsaphQoJ0lXCi6Y47o5FkooYyLeNDQTGS0nh+IWm5RCyochtO\nfnaKPv/hYxhpyFQEwkJkbF1Zt1s6j2rq5MzmbWZx090uXZEE82DNZ9QJaMPu6VWt\niwGoGoS5HF5HNlUVxLNUsklNH0ZfDafR7/LC2ty1vO1c6EJ6yCGiyJZZ7Ilbz27Q\nHPAUd8CcDNKCHZDoMWkLSLN3Nj1MvPVZ5HDsHiNHXthP+zV8FQtloAuZ8Srsmlyg\nrJREkc7gF3f6HrH5ShNhsRFFc53NUjDbYZuha1u4hiOE8lcCAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJZL2ixTtL6V\nxpNz4qekny4NchiHMB8GA1UdIwQYMBaAFJZL2ixTtL6VxpNz4qekny4NchiHMA0G\nCSqGSIb3DQEBCwUAA4IBAQAcgG+AaCdrUFEVJDn9UsO7zqzQ3c1VOp+WAtAU8OQK\nOc4vJYVVKpDs8OZFxmukCeqm1gz2zDeH7TfgCs5UnLtkplx1YO1bd9qvserJVHiD\nLAK+Yl24ZEbrHPaq0zI1RLchqYUOGWmi51pcXi1gsfc8DQ3GqIXoai6kYJeV3jFJ\njxpQSR32nx6oNN/6kVKlgmBjlWrOy7JyDXGim6Z97TzmS6Clctewmw/5gZ9g+M8e\ng0ZdFbFkNUjzSNm44hiDX8nR6yJRn+gLaARaJvp1dnT+MlvofZuER17WYKH4OyMs\nie3qKR3an4KC20CtFbpZfv540BVuTTOCtQ5xqZ/LTE78\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/cakey.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpgIBAAKCAQEAu7YiGLgOZpAbvPJJmVbIz1KPE5cIjnzjRXsefQJeiIjyxBCY\npILZgLOxqmFCgnSVcKLpjjujkWSihjIt40NBMZLSeH4hablELKhyG05+doo+/+Fj\nGGnIVATCQmRsXVm3WzqPaurkzOZtZnHT3S5dkQTzYM1n1Alow+7pVa2LAagahLkc\nXkc2VRXEs1SySU0fRl8Np9Hv8sLa3LW87VzoQnrIIaLIllnsiVvPbtAc8BR3wJwM\n0oIdkOgxaQtIs3c2PUy89VnkcOweI0de2E/7NXwVC2WgC5nxKuyaXKCslESRzuAX\nd/oesflKE2GxEUVznc1SMNthm6FrW7iGI4TyVwIDAQABAoIBAQCasKW4qTV04B17\nwE9WxmYGNIskIbszcUf54lRlwKYW7oThfqvMJukHXw5y0mP1Dg55HEhMpmlNUBl/\nbarTNoFrUQuRsJ/oeHzuMIKYbj9ZgOQaCquXWtV0J9fOzuNeqqinzcKS4bBcCyjs\n27E0/Riugd3vUFbYLkjf7urraHC9k1mqMrTgTcQct3LO3oUG0cd2ANX6fMrE7Hfw\nZNPf9U+G0/QQIvRCn7xAzWNo+kwwrd/Yz4/S1YKDXeAkRHGlqjoXnrucwRYbBfsV\nzg66mGvtlTrYjKgz8GfKZD8Azz9LVVXbN/2P6WAL/vTeNfMCV4VDGVk7BNs7UU7X\nC1vetPJBAoGBAN8zNzdw/1tE6gmHzN3Hytk76t7uLA5XKlH81f7akRe/JqZ1urqi\nOgJz1coLsJeRbIczSuF6qpsBVufwGUV8pQbF5JQPhS74MESlFffJ8knDr+6AHFbn\now/8ZmDPyBBZIRsYdZZpqjqlqWjqnpYKMwcXmf5Yiq1G7FUdBEtrcObDAoGBANdL\n1ihVvRWZsN8gYC9Wb+PxKNqaKTZZ/1BGPHmuMB28r4F6C8EtLQCReiuBPiLWs7FE\n7hSeFTgLEncBK9nVRYpG3W6UKowBV16mr7tGJpoYVoUNUT7SxiwZWHx+VLkTfWex\niNYv/Ycxl6iYdkXzuHqUinxccEziKqEu6zMzd5TdAoGBAKEjRo/eIlzwCc7LndnX\nrdjbaxt6849+2mzKjmwpu2pbdDnk8ORgzmSK4CO4AMvMD4AkRcE3YAf8FZPpQTVr\nYXDcWcOS2OIqCB7m2E9GGoeqoU8callLbevSms7180fqMP5w0CPBMUaZ5w55o/hK\ncMCEB4cawTOL6n8gLcONU7slAoGBALCT826be3yW1Bj8ncbVdumV5nL8U2bPg3Zc\nVMdb1Pzev3dLGQ70NV+s8W1zD/pU64YtybLBQRf5BMjz/fooUGOr4XsLLKYth3IK\n9kB7tbdW1MdFd+g1yPFsTEW2+1fcI1ODqX46WA6k3wUZHpAa56gp4jdDPZvhNyOB\nrsgMozxFAoGBAJuaqAWXFA2wy+zDw8P6rhRDF52DmNMYeVBvFGtBOBy0BYavsesj\nqRMzdDdhrQMzYqLCnvurEytaLscM+Jltzt1ImWtNwXoTg1O+Cl01wzBWK/NGrMBg\ntiHVT2ojF3hIyFjr9Z7IgcosyvaNTHnBxIKUy2tiAprruYOwhH9kXdHX\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/certs/certs.sh",
    "content": "#!/bin/bash\n\nset -e\n\n# $1=<CA name>\ngenerate_ca() {\n  openssl genrsa -out \"${1}key.pem\" 2048\n  openssl req -new -key \"${1}key.pem\" -out \"${1}cert.csr\" -config \"${1}cert.cfg\" -batch -sha256\n  openssl x509 -req -days 730 -in \"${1}cert.csr\" -signkey \"${1}key.pem\" -out \"${1}cert.pem\" \\\n    -extensions v3_ca -extfile \"${1}cert.cfg\"\n}\n\n# $1=<certificate name>\ngenerate_rsa_key() {\n  openssl genrsa -out \"${1}key.pem\" 2048\n}\n\n# $1=<certificate name>\ngenerate_ecdsa_key() {\n  openssl ecparam -name secp256r1 -genkey -out \"${1}key.pem\"\n}\n\n# $1=<certificate name> $2=<CA name>\ngenerate_x509_cert() {\n  openssl req -new -key \"${1}key.pem\" -out \"${1}cert.csr\" -config \"${1}cert.cfg\" -batch -sha256\n  openssl x509 -req -days 730 -in \"${1}cert.csr\" -sha256 -CA \"${2}cert.pem\" -CAkey \\\n    \"${2}key.pem\" -CAcreateserial -out \"${1}cert.pem\" -extensions v3_ca -extfile \"${1}cert.cfg\"\n  echo -e \"// NOLINT(namespace-envoy)\\nconstexpr char TEST_$(echo \"$1\" | tr \"[:lower:]\" \"[:upper:]\")_CERT_HASH[] = \\\"$(openssl x509 -in \"${1}cert.pem\" -noout -fingerprint -sha256 | cut -d\"=\" -f2)\\\";\" > \"${1}cert_hash.h\"\n}\n\n# $1=<certificate name> $2=<CA name>\ngenerate_ocsp_response() {\n  # Generate an OCSP request\n  openssl ocsp -CAfile \"${2}cert.pem\" -issuer \"${2}cert.pem\" \\\n    -cert \"${1}cert.pem\" -reqout \"${1}_ocsp_req.der\"\n\n  # Generate the OCSP response\n  # Note: A database of certs is necessary to generate ocsp\n  # responses with `openssl ocsp`. `generate_x509_cert` does not use one\n  # so we must create an empty one here. Since generated certs are not\n  # tracked in this index, all ocsp response will have a cert status\n  # \"unknown\", but are still valid responses and the cert status should\n  # not matter for integration tests\n  touch \"${2}_index.txt\"\n  openssl ocsp -CA \"${2}cert.pem\" \\\n    -rkey \"${2}key.pem\" -rsigner \"${2}cert.pem\" -index \"${2}_index.txt\" \\\n    -reqin \"${1}_ocsp_req.der\" -respout \"${1}_ocsp_resp.der\" -ndays 730\n\n  rm \"${1}_ocsp_req.der\" \"${2}_index.txt\"\n}\n\n# Generate cert for the CA.\ngenerate_ca ca\n# Generate RSA cert for the server.\ngenerate_rsa_key server ca\ngenerate_x509_cert server ca\ngenerate_ocsp_response server ca\n# Generate ECDSA cert for the server.\ncp -f servercert.cfg server_ecdsacert.cfg\ngenerate_ecdsa_key server_ecdsa ca\ngenerate_x509_cert server_ecdsa ca\ngenerate_ocsp_response server_ecdsa ca\nrm -f server_ecdsacert.cfg\n# Generate cert for the client.\ngenerate_rsa_key client ca\ngenerate_x509_cert client ca\n# Generate ECDSA cert for the client.\ncp -f clientcert.cfg client_ecdsacert.cfg\ngenerate_ecdsa_key client_ecdsa ca\ngenerate_x509_cert client_ecdsa ca\nrm -f client_ecdsacert.cfg\n\n# Generate cert for the upstream CA.\ngenerate_ca upstreamca\n# Generate cert for the upstream node.\ngenerate_rsa_key upstream upstreamca\ngenerate_x509_cert upstream upstreamca\ngenerate_rsa_key upstreamlocalhost upstreamca\ngenerate_x509_cert upstreamlocalhost upstreamca\n\nrm ./*.csr\nrm ./*.srl\n"
  },
  {
    "path": "test/config/integration/certs/client_ecdsacert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIDpDCCAoygAwIBAgIUJuVBh0FKfFgIcO++ljWm7D47eYgwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODA1MTkxNjAyWhcNMjIw\nODA1MTkxNjAyWjCBqDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\nEEx5ZnQgRW5naW5lZXJpbmcxGzAZBgNVBAMMElRlc3QgRnJvbnRlbmQgVGVhbTEl\nMCMGCSqGSIb3DQEJARYWZnJvbnRlbmQtdGVhbUBseWZ0LmNvbTBZMBMGByqGSM49\nAgEGCCqGSM49AwEHA0IABFWdfntWW5wivQyk9j45hLFf7QjInjo4H8up56yUkCcm\nn7ewQ9BEPoJ74r5ro/6nPBiRiTx1aolAjDPhgOfUZiWjgcEwgb4wDAYDVR0TAQH/\nBAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMB\nMEIGA1UdEQQ7MDmGH3NwaWZmZTovL2x5ZnQuY29tL2Zyb250ZW5kLXRlYW2CCGx5\nZnQuY29tggx3d3cubHlmdC5jb20wHQYDVR0OBBYEFAB5r3dICa3pCN0DVINNnm3K\nDhmKMB8GA1UdIwQYMBaAFJZL2ixTtL6VxpNz4qekny4NchiHMA0GCSqGSIb3DQEB\nCwUAA4IBAQBel+R+1BpxpPhUGwkSbZBTY4zqU8w8Zx9eCCbpeAi96Qylg++nl88H\nZInHLsC77wfzSf8vKWpsA9KDMB4R4njN1WLsQOKYWJmOE3K+AUuPmqP6mYyAju5k\ngGSg6BIhb5+HtkmvvF56qyuc4GWH8Ab/BhPMRli7h4cBMuSBoZeFoxNbIJfJDJ/P\nZEkHqe1NsI9VG6pmGZ1adBMf04yE+muv+s43Wl/Ry/7W3Ae28XkxmS/c0wCq5h2u\nDMn6nnjDIzJ7tUqfWfacr8QZCQDYQKdBrE+OPw6bgK2p++jXfQDD277FcnMG6je/\nZbqoW4tPXfdl/INMbS1j2h59RTrXGZaR\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/client_ecdsacert_hash.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_CLIENT_ECDSA_CERT_HASH[] = \"9F:F2:B4:5A:72:E4:79:82:F4:5C:B1:49:8A:EF:12:53:9C:\"\n                                               \"A7:AB:0A:61:DF:79:2F:D8:8D:4E:29:89:28:03:07\";\n"
  },
  {
    "path": "test/config/integration/certs/client_ecdsakey.pem",
    "content": "-----BEGIN EC PARAMETERS-----\nBggqhkjOPQMBBw==\n-----END EC PARAMETERS-----\n-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIJjBCnRPxmRg21Jdlt8MAQGqtD6ilFK2bsxx5twZklmzoAoGCCqGSM49\nAwEHoUQDQgAEVZ1+e1ZbnCK9DKT2PjmEsV/tCMieOjgfy6nnrJSQJyaft7BD0EQ+\ngnvivmuj/qc8GJGJPHVqiUCMM+GA59RmJQ==\n-----END EC PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/certs/clientcert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Frontend Team\ncommonName_default = Test Frontend Team\ncommonName_max  = 64\nemailAddress = frontend-team@lyft.com\nemailAddress_default = frontend-team@lyft.com\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nURI.1 = spiffe://lyft.com/frontend-team\nDNS.1 = lyft.com\nDNS.2 = www.lyft.com\n"
  },
  {
    "path": "test/config/integration/certs/clientcert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEbzCCA1egAwIBAgIUJuVBh0FKfFgIcO++ljWm7D47eYcwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODA1MTkxNjAxWhcNMjIw\nODA1MTkxNjAxWjCBqDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\nEEx5ZnQgRW5naW5lZXJpbmcxGzAZBgNVBAMMElRlc3QgRnJvbnRlbmQgVGVhbTEl\nMCMGCSqGSIb3DQEJARYWZnJvbnRlbmQtdGVhbUBseWZ0LmNvbTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBANarN0opNpHy8zDYi2ih9rLXqgpjVEHSsUDo\neX506Uil9U5I2hCq5XDDsKYhFrRh54G7s9AXCh8orbCBwALPznLwBqSeVy1rnUUR\ndWlAGdoCGDbVPC1+Eg7Q8AM8KkGN49x6PcIepHClkX+nnwRogrSXnPHydik9Mhgc\n+009IcNf4pPl5U1WcL+JEN+x6iXg+nERoKbVHSp0mkS8CQYdPtx2p+QsnEKRyG3W\neJ7msZPW77th27yxzRYDK4TARQAmHchN3FeF2qs1ak2e2s2chTnYb9qfBb++aanA\nfewjnfd/enuvT7Uihswv0dniNTE5I5tDCigXYg1Pp3AKxs7Cja0CAwEAAaOBwTCB\nvjAMBgNVHRMBAf8EAjAAMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwQgYDVR0RBDswOYYfc3BpZmZlOi8vbHlmdC5jb20vZnJvbnRl\nbmQtdGVhbYIIbHlmdC5jb22CDHd3dy5seWZ0LmNvbTAdBgNVHQ4EFgQUxEyk+snc\npQdwv9y8NLXJeJQRP/QwHwYDVR0jBBgwFoAUlkvaLFO0vpXGk3Pip6SfLg1yGIcw\nDQYJKoZIhvcNAQELBQADggEBABT1893YgY90gOrkbFz7u81VXa/pxGGIvqoNlFEd\nPLP+QSdJVOHL7Ud7qecrUkeJ5Xn57BI4x4lfLVbs74phW8EDspxQ5FRtRQzrQZE9\nR/0VdeAVJIAm+108cfmX83rFriktQC9ffRFnsamCuDjpKWLg/3tMaNSAz7yfxYsJ\nsUCRKnLRb7kRyD4tiztHZjY9F1sEYoVbNuQiyFeDZzwLhe9S1WN9rUYA4n+wmyDh\nMAjmaJYWwyTZx6SwSevYaXAZSdlxqxVyBrTeNELhbXn0vRxK2J5c9gqXNf2vRKrW\njDsrvfG9XRMBXv1USqplMVduvml5OdTlnALD4Gd1WDJfVt8=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/clientcert_hash.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_CLIENT_CERT_HASH[] = \"0E:80:B9:1F:11:3F:FB:43:9B:CB:5A:70:2D:04:C5:00:D6:81:68:\"\n                                         \"8C:CA:F8:91:92:21:CA:0F:91:61:A5:FB:01\";\n"
  },
  {
    "path": "test/config/integration/certs/clientkey.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEA1qs3Sik2kfLzMNiLaKH2steqCmNUQdKxQOh5fnTpSKX1Tkja\nEKrlcMOwpiEWtGHngbuz0BcKHyitsIHAAs/OcvAGpJ5XLWudRRF1aUAZ2gIYNtU8\nLX4SDtDwAzwqQY3j3Ho9wh6kcKWRf6efBGiCtJec8fJ2KT0yGBz7TT0hw1/ik+Xl\nTVZwv4kQ37HqJeD6cRGgptUdKnSaRLwJBh0+3Han5CycQpHIbdZ4nuaxk9bvu2Hb\nvLHNFgMrhMBFACYdyE3cV4XaqzVqTZ7azZyFOdhv2p8Fv75pqcB97COd9396e69P\ntSKGzC/R2eI1MTkjm0MKKBdiDU+ncArGzsKNrQIDAQABAoIBABd8T+Y7MA8zp0uW\nxVnDLnxOf/n2+AbjiCTzyib9n3AlR/symTjtmYCGyFLEl/lQJMXaxUdk3eSezLHc\n4CbumUWV4QQtlpgPh/tAd7n2G13wkLmfBqBrhIo+baPM90qIvX8nmI4eUBtK4eo3\nanxO+s3LMI5/2lGUsmBU+2Ft6L25En1JpUg3THo0/Ek8VdySQ9V1PTFpTjKsd6Na\nDxKYpM85GE1mOKowQqAdbvFByqXoCZcd71ZE9t5XoohDLgPPHq7MA4Z0mKOK5qVG\nIR0pBThm4Ij7IFLjzV1b9IGWiBKj9uWPD5VNSad0lrMFoXBcGsp6YblrfpOkZv41\nPALL8rkCgYEA8a9aYsIvOYnE82MCut2xuTWE7lHFpxDV3rJqN51RZipgpWgz76PM\nkuKcqp3Fi813hz5abUOizQAaa2d57AmJ+VI0i6CbbCMcwu5javYuwOC7b47WeGIy\nfclCgUhbns/vZVatb1pJ+kd5sagT80GehlGoHevd0erzSIbcgZ0KMH8CgYEA42I4\nJ9wJ14pC/aGCTQbpne5RjAi07dpmTwlwpsFIHmUuqaJe25eq+R5lnKLqTyGbxDUQ\n6aU/8i+G4631gMYng7kyuashBTLsFoyMixLWApDO/U1Lil2dDOXM+DqPKuLdyq6v\nSYbfuh87J5BEfmtfFwS5sVSIHNcPp7nUJQ8o69MCgYBv9tV/tQgdtsZoHrFQEo5Y\nCAQ6R+WyPOlnju4IL7hbBTzaxAhzd0W5soPzwr2Ww6whGnDX96J/KBIVOc3Q3KZv\nu3aeTNxT33xejgO+tKf6MOKEjv6qrItJnKhTrkrLqvbz0pDsaj6lVOF6vSvo4Lho\n74Fbwz5zFk54hgm3fiIPTwKBgQCNrrHXBGCNkXVUnMKYRGplIg5l9zblzmRZc0Ri\nY6UQa3O795Srt8GtIKeoBkuBqytoArjbHUDPI5YlYEvNRatxhIB6+IrGtogtNL6O\nGdqIFrsjUnpzaQlm8/nX4oU678nLdTV71zKowrUVXeuP6k+CBEvAly+I6Oi0VjI+\nNUgGSQKBgG0qfjZ+k6KaLEvw/Ry6j1TZEzR9PNryIFb3zmcalZakZ92C9xP6+Wyw\nKaZsJt1b7kOZ/VdKrlUzfCEe8+CFy8eBWKuffb5v6QTa5af3+igGpjwygbjLW3Aw\nJq/Ykllf3iQP/XSq6fQxvhJ4zYYjnNvLAY9yV+MLJkkqaQTnoKSq\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/certs/server_ecdsacert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIDoTCCAomgAwIBAgIUJuVBh0FKfFgIcO++ljWm7D47eYYwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODA1MTkxNjAxWhcNMjIw\nODA1MTkxNjAxWjCBpjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\nEEx5ZnQgRW5naW5lZXJpbmcxGjAYBgNVBAMMEVRlc3QgQmFja2VuZCBUZWFtMSQw\nIgYJKoZIhvcNAQkBFhViYWNrZW5kLXRlYW1AbHlmdC5jb20wWTATBgcqhkjOPQIB\nBggqhkjOPQMBBwNCAARbRc4+r8sbKLs89/fS0Y8CSTVNKn8opkCFx/UyLPvELLn6\nlQ86GjcQIqJRqoEqEwP7iyCaDhFigsOKsNQSzE9/o4HAMIG9MAwGA1UdEwEB/wQC\nMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBB\nBgNVHREEOjA4hh5zcGlmZmU6Ly9seWZ0LmNvbS9iYWNrZW5kLXRlYW2CCGx5ZnQu\nY29tggx3d3cubHlmdC5jb20wHQYDVR0OBBYEFM6Jqpu+mo5ATAcIizKPrFH1qDLK\nMB8GA1UdIwQYMBaAFJZL2ixTtL6VxpNz4qekny4NchiHMA0GCSqGSIb3DQEBCwUA\nA4IBAQAYGH/EjPHxtE5fQ7kDsRGcafMLqcx6OYhZlqDfMqepbFEWlkL/kuNVrjez\nNoG67cc3DSagyVocjr7yCz/kkDm4HowRapGK6eSK3dWhjrQzp2F6ImAAfht7YkhF\nJKkuRYULKpsRRj+YVpBpYkUjRVaqLNJJpXOItpvZMsyXDZn/ihy1QerDXaetWXgP\nyFRL+g9f+e8OydXgd/lrKByLBCszNPeg4nhGsgZZ1WHj5sl2EY27eAIWRXqFIQbr\nVwbLfpFm2R2nrJT8zJF54OUpG/3hKOEkgZ3gSK9+tnQeSM01lXHd1ZzrmzCdItDQ\nMNl4mwlDTJLwL/vwMld9UFz0qAYi\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/server_ecdsacert_hash.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SERVER_ECDSA_CERT_HASH[] = \"B1:A4:B5:39:F8:91:77:00:3E:3E:23:1D:F2:AD:78:34:85:\"\n                                               \"D3:F0:D4:EF:D2:88:EA:B5:92:F7:71:E1:B8:0D:F5\";\n"
  },
  {
    "path": "test/config/integration/certs/server_ecdsakey.pem",
    "content": "-----BEGIN EC PARAMETERS-----\nBggqhkjOPQMBBw==\n-----END EC PARAMETERS-----\n-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIGY3X0at4PZN6X+MHnEC/vwjaKTq/ffwp0KfMcAKCwgToAoGCCqGSM49\nAwEHoUQDQgAEW0XOPq/LGyi7PPf30tGPAkk1TSp/KKZAhcf1Miz7xCy5+pUPOho3\nECKiUaqBKhMD+4sgmg4RYoLDirDUEsxPfw==\n-----END EC PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/certs/servercert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Backend Team\ncommonName_default = Test Backend Team\ncommonName_max  = 64\nemailAddress = backend-team@lyft.com\nemailAddress_default = backend-team@lyft.com\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nURI.1 = spiffe://lyft.com/backend-team\nDNS.1 = lyft.com\nDNS.2 = www.lyft.com\n"
  },
  {
    "path": "test/config/integration/certs/servercert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEbDCCA1SgAwIBAgIUJuVBh0FKfFgIcO++ljWm7D47eYUwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODA1MTkxNjAxWhcNMjIw\nODA1MTkxNjAxWjCBpjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\nEEx5ZnQgRW5naW5lZXJpbmcxGjAYBgNVBAMMEVRlc3QgQmFja2VuZCBUZWFtMSQw\nIgYJKoZIhvcNAQkBFhViYWNrZW5kLXRlYW1AbHlmdC5jb20wggEiMA0GCSqGSIb3\nDQEBAQUAA4IBDwAwggEKAoIBAQC9JgaI7hxjPM0tsUna/QmivBdKbCrLnLW9Teak\nRH/Ebg68ovyvrRIlybDT6XhKi+iVpzVY9kqxhGHgrFDgGLBakVMiYJ5EjIgHfoo4\nUUAHwIYbunJluYCgANzpprBsvTC/yFYDVMqUrjvwHsoYYVm36io994k9+t813b70\no0l7/PraBsKkz8NcY2V2mrd/yHn/0HAhv3hl6iiJme9yURuDYQrae2ACSrQtsbel\nKwdZ/Re71Z1awz0OQmAjMa2HuCop+Q/1QLnqBekT5+DH1qKUzJ3Jkq6NRkERXOpi\n87j04rtCBteCogrO67qnuBZ2lH3jYEMb+lQdLkyNMLltBSdLAgMBAAGjgcAwgb0w\nDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUHAwIG\nCCsGAQUFBwMBMEEGA1UdEQQ6MDiGHnNwaWZmZTovL2x5ZnQuY29tL2JhY2tlbmQt\ndGVhbYIIbHlmdC5jb22CDHd3dy5seWZ0LmNvbTAdBgNVHQ4EFgQU2XcTZbc0xKZf\ngNVKSvAbMZJCBoYwHwYDVR0jBBgwFoAUlkvaLFO0vpXGk3Pip6SfLg1yGIcwDQYJ\nKoZIhvcNAQELBQADggEBAFW05aca3hSiEz/g593GAV3XP4lI5kYUjGjbPSy/HmLr\nrdv/u3bGfacywAPo7yld+arMzd35tIYEqnhoq0+/OxPeyhwZXVVUatg5Oknut5Zv\n2+8l+mVW+8oFCXRqr2gwc8Xt4ByYN+HaNUYfoucnjDplOPukkfSuRhbxqnkhA14v\nLri2EbISX14sXf2VQ9I0dkm1hXUxiO0LlA1Z7tvJac9zPSoa6Oljke4D1iH2jzwF\nYn7S/gGvVQgkTmWrs3S3TGyBDi4GTDhCF1R+ESvXz8z4UW1MrCSdYUXbRtsT7sbE\nCjlFYuUyxCi1oe3IHCeXVDo/bmzwGQPDuF3WaDNSYWU=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/servercert_hash.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SERVER_CERT_HASH[] = \"E6:D4:55:6E:BC:91:E2:2F:1E:42:48:05:3C:3E:B7:97:ED:3E:51:\"\n                                         \"E3:A9:C4:87:1B:FF:61:76:25:6B:F3:95:7C\";\n"
  },
  {
    "path": "test/config/integration/certs/serverkey.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAvSYGiO4cYzzNLbFJ2v0JorwXSmwqy5y1vU3mpER/xG4OvKL8\nr60SJcmw0+l4Sovolac1WPZKsYRh4KxQ4BiwWpFTImCeRIyIB36KOFFAB8CGG7py\nZbmAoADc6aawbL0wv8hWA1TKlK478B7KGGFZt+oqPfeJPfrfNd2+9KNJe/z62gbC\npM/DXGNldpq3f8h5/9BwIb94ZeooiZnvclEbg2EK2ntgAkq0LbG3pSsHWf0Xu9Wd\nWsM9DkJgIzGth7gqKfkP9UC56gXpE+fgx9ailMydyZKujUZBEVzqYvO49OK7QgbX\ngqIKzuu6p7gWdpR942BDG/pUHS5MjTC5bQUnSwIDAQABAoIBADEMwlcSAFSPuNln\nhzJ9udj0k8md4T8p5Usw/2WLyeJDdBjg30wjQniAJBXgDmyueWMNmFz4iYgdP1CG\n/vYOEPV7iCZ7Da/TDZd77hYKo+MevuhD4lSU1VEoyCDjNA8OxKyHJB77BwmlYS+0\nnE3UOPLji47EOVfUTbvnRBSmn3DCSHkQiRIUP1xMivoiZgKJn+D+FxSMwwiq2pQR\n5tdo7nh2A8RxlYUbaD6i4poUB26HVm8vthXahNEkLpXQOz8MWRzs6xOdDHRzi9kT\nItRLa4A/3LIATqviQ2EpwcALHXcULcNUMTHORC1EHPvheWR5nLuRllYzN4ReoeHC\n3+A5KEkCgYEA52rlh/22/rLckCWugjyJic17vkg46feSOGhjuP2LelrIxNlg491y\no28n8lQPSVnEp3/sT7Y3quVvdboq4DC9LTzq52f6/mCYh9UQRpljuSmFqC2MPG46\nZl5KLEVLzhjC8aTWkhVINSpz9vauXderOpFYlPW32lnRTjJWE276kj8CgYEA0T2t\nULnn7TBvRSpmeWzEBA5FFo2QYkYvwrcVe0pfUltV6pf05xUmMXYFjpezSTEmPhh6\n+dZdhwxDk+6j8Oo61rTWucDsIqMj5ZT1hPNph8yQtb5LRlRbLGVrirU9Tp7xTgMq\n3uRA2Eka1d98dDBsEbMIVFSZ2MX3iezSGRL6j/UCgYEAxZQ82HjEDn2DVwb1EXjC\nLQdliTZ8cTXQf5yQ19aRiSuNkpPN536ga+1xe7JNQuEDx8auafg3Ww98tFT4WmUC\nf2ctX9klMJ4kXISK2twHioVq+gW5X7b04YXLajTX3eTCPDHyiNLmzY2raMWAZdrG\n9MA3kyafjCt3Sn4rg3gTM10CgYEAtJ8WRpJEd8aQttcUIItYZdvfnclUMtE9l0su\nGwCnalN3xguol/X0w0uLHn0rgeoQhhfhyFtY3yQiDcg58tRvODphBXZZIMlNSnic\nvEjW9ygKXyjGmA5nqdpezB0JsB2aVep8Dm5g35Ozu52xNCc8ksbGUO265Jp3xbMN\n5iEw9CUCgYBmfoPnJwzA5S1zMIqESUdVH6p3UwHU/+XTY6JHAnEVsE+BuLe3ioi7\n6dU4rFd845MCkunBlASLV8MmMbod9xU0vTVHPtmANaUCPxwUIxXQket09t19Dzg7\nA23sE+5myXtcfz6YrPhbLkijV4Nd7fmecodwDckvpBaWTMrv52/Www==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/certs/upstreamcacert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Upstream CA\ncommonName_default = Test Upstream CA\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n"
  },
  {
    "path": "test/config/integration/certs/upstreamcacert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID7zCCAtegAwIBAgIUTQZdxxw6y4+Te1kv8hDza/KXTHUwDQYJKoZIhvcNAQEL\nBQAwfzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxGTAXBgNVBAMMEFRlc3QgVXBzdHJlYW0gQ0EwHhcNMjAwODA1MTkx\nNjAyWhcNMjIwODA1MTkxNjAyWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2Fs\naWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZ\nMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEZMBcGA1UEAwwQVGVzdCBVcHN0cmVh\nbSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOFT8hbqRn+9AKU2\nIFtZKFFYpt7v2x1e8gtzgPm3TT7RJcV2GLeT1cOwubL81ArQmwfyVlwJkt1wK7Uw\n+Z4FvtcCjQc4dR3yxkIdhzZOiq7PbQgAjyRNNGmneYTAvpXwC+l8ZV2M66ihUKgj\n7iGiqQCvYhuYIb7BEnOj20nFuvHlxaDWOst4SQgZmRIkQyA8rrAIRfu7aQiCEla5\n86AXcXV4gmOW3dsKNoXO8Fr+9mtAmJKocLtlUkCeDW+WYqv6RLjMVa915khNQLde\nbL+5hYxBcKYB10wOVzSTCfM6fbqtpqJZEdlGjkKtQ2Szy3mpoAJKPmZYzodVhL6N\nLhoLjZ8CAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw\nHQYDVR0OBBYEFDtmHVOikybtJjVEI4Q7wvUbwgBkMB8GA1UdIwQYMBaAFDtmHVOi\nkybtJjVEI4Q7wvUbwgBkMA0GCSqGSIb3DQEBCwUAA4IBAQAT3kBm2uCpB4cAmdgu\nu6sqxUvYFzYlHFnWrQ3ZFwMrLRSzUdrcp2nSQz+e8VeXI2SkLPCD5Xg+8GGLWA5X\nlH6tvVx41cRqSr611ebxPVWkEeP+ALkHo4xUbcR5WUJD52VxzqYbhavYFjB2FzqA\nOfefKyXIhcKtezKBwaJbVn9FseH49q6UNjYODOY88rW+2mvDoZWBUuti8CxNhIiu\nRHnGimY7H565NpbPliVlo2GhiKhJvyPwK7+cjfj68HaoixlXHmrg506bczO/Gt1a\nUSQmjtB05h8bki0LQDiCQu1fdOPEflJnv3VdFz2SSKNRab2asP+KbRPURUW8f9zN\nGNxR\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/upstreamcakey.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA4VPyFupGf70ApTYgW1koUVim3u/bHV7yC3OA+bdNPtElxXYY\nt5PVw7C5svzUCtCbB/JWXAmS3XArtTD5ngW+1wKNBzh1HfLGQh2HNk6Krs9tCACP\nJE00aad5hMC+lfAL6XxlXYzrqKFQqCPuIaKpAK9iG5ghvsESc6PbScW68eXFoNY6\ny3hJCBmZEiRDIDyusAhF+7tpCIISVrnzoBdxdXiCY5bd2wo2hc7wWv72a0CYkqhw\nu2VSQJ4Nb5Ziq/pEuMxVr3XmSE1At15sv7mFjEFwpgHXTA5XNJMJ8zp9uq2molkR\n2UaOQq1DZLPLeamgAko+ZljOh1WEvo0uGguNnwIDAQABAoIBAFiIeThTuHt8MYK4\nb6I0t8iugnJZ38f8hDHHokd7pBgoaSTar/+BUJ5hE7Wl7VKKgD9xEkl7YX8sEaBR\nq+JQ85jbYboSjsHDn+5eV8AYwBjLW1WnkpZ61zskGHT2nmufM677t4A4XGeXam+G\nHoyMssaYIn4hGjEu/yb8nK6xyDA+kHuuDthhM9qnrOKpZGRscXY3yolJhscif2jW\nOx2VLomYtlaKcZ1mrBapaLpY12WoRU/3YXvPpvWKpajkWVcqHZfQ56KATLmlLuCJ\naW4+A8gXRPepva2Enc6fXKKpiFiqGlBZ9kKVORGVGndpZLSMxP20SMTovtAz3wxQ\nc3kCk0ECgYEA/6Cv7NtfqXLz2ovNIarObFYVuLyN49ubW/JntuRcQA9p5KyQcf8Y\n+4LmR3F+te/Cz+QfC7zDgObpxMqvCs3fKLUg1ZLFRtSXtGRiulzhVRrxDk1FU6KO\nm2+s2FGKyxB+czxg1jglASOXMRjKJ/K+/eiditcjaYeVzFfC7z9nyRECgYEA4af2\nAJEcvKdYc6PibIpmRGSNhCY5MPE88Rg19qA/0I7s1aytimHBYEKAUTpzcw+gbmNz\nIWYYMkpbq8SZD0IGnzTRJTXsOKy8ZxrjlBZMbYsm77qYjTarObLps2Qa/8VharCW\n9PAsT8rwA5PfVoTQaVQFHlyXC4266nx3z4gfa68CgYEAsXIkzQFXPXQLbHjBM46y\n7icvuuZAhJxsEv6JGj8Y/mr0sgVL26Yd/HFYUt2o/Lhrfg43stkcyT0Bp1ae/Zv9\nPe/F1BunD80BZfqNQhq5XG9wR+JBrpXX8nQqAptQAjf33xxZiDq/DTRcfntb0TFD\nfVPdEITZEydIR+nf6l4UOFECgYEA1AnNwS6aQDNHjDI8+xz5h96sk7aPGwwz5aCI\nZJykGkeTCB1gXJ4K5XbXuHwiK8ZNTC0q7AFRT0BL75Wm9Y1nR4aL2FlZBNBboM7F\ndkuVuYF+Ltm5q0fpkSgrLaQtMpW4OlaBItvj536cFeCHhnb6l16aCLOcQwEE2H3o\n3xvb2oUCgYBTIevNPEIVrzncUyP56gu61z5YG/kDQ4+UpUgn6Ab2A1auFCa7mtNp\nqa+LW6MpC6C5In6SSqotd5WbqxbLA6I7vH72psCeAgpkP12Sd31K/ikiwqj3EaEt\n11ucC4/nb+WZQEn1sqPOzichZoyJyaGAjmCk40sdHk2ZQJZcvA78Gg==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/certs/upstreamcert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Upstream Server\ncommonName_default = Test Upstream Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nDNS.1 = *.lyft.com\nIP.1 = 127.0.0.1\nIP.2 = ::1"
  },
  {
    "path": "test/config/integration/certs/upstreamcert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEPjCCAyagAwIBAgIUEuy1WgSCzX6mojPirk7Th6uhNHowDQYJKoZIhvcNAQEL\nBQAwfzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxGTAXBgNVBAMMEFRlc3QgVXBzdHJlYW0gQ0EwHhcNMjAwODA1MTkx\nNjAyWhcNMjIwODA1MTkxNjAyWjCBgzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNh\nbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQx\nGTAXBgNVBAsMEEx5ZnQgRW5naW5lZXJpbmcxHTAbBgNVBAMMFFRlc3QgVXBzdHJl\nYW0gU2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtpiYA4/I\nNuflkPe4L/GTslmngNQUCo8TzPXG0gt7uoxr4FeuVy7AaD28S2/hwhbl+bDtHTQY\nmvBUwNMsYzpND2eQ3sSIumdeLzBEKP2mnnZ9gE/Zd2TIuZl686RpDq0B6ZdZSpCu\nbqQmmPFLiRNH8JViJZMN5yqMt7T5oq+DnCYQZllqmpAwd6NnhKALrYmZ87oqc0zh\nkf+5amP7zMYKkwQuRwcx4QPZkEp3+qhszolpAJ52dFGJ+pLuUVDg0Gf0cnxLjFKc\n6vcTlj4tsymR4ci58MHRt4EdGdhShw0oaj67gRRfU4Vj61I2ZAVH07kL0mjO2TZT\nEKrOEJJ7/dtxdwIDAQABo4GsMIGpMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXg\nMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAtBgNVHREEJjAkggoqLmx5\nZnQuY29thwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMB0GA1UdDgQWBBQeoC5wxwX5\nk3ggIN/844/6jKx9czAfBgNVHSMEGDAWgBQ7Zh1TopMm7SY1RCOEO8L1G8IAZDAN\nBgkqhkiG9w0BAQsFAAOCAQEA18wEg8LnPm99cIouFUFMAO+BpiY2KVa9Bu6x07m9\nquNFv7/4mLt87sk/umD3LH/tDjqW0D84vhG9a+0yDq7ZrD/P5eK3R+yBINwhe4/x\nobJlThEsbcZF1FkMnq1rt53izukyQLLQwoVxidQl3HCg3hosWmpH1VBPgwoize6V\naAhKLW0n+JSfIE1d80nvZdYlHuCnS6UhLmAbTBCnwT0aGTfzT0Dd4KlYiY8vGZRu\ntXOw4MzKtJcOL3t7Zpz2mhqN25dyiuyvKEhLXdx48aemwa2t6ISfFKsd0/glnNe/\nPFZMakzKv1G0xLGURjsInCZ0kePAmerfZN6CBZDo4laYEg==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/upstreamcert_hash.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_UPSTREAM_CERT_HASH[] = \"9E:4B:E7:83:5B:DE:82:A6:E5:CE:24:8D:DB:91:C1:0C:20:0F:\"\n                                           \"25:3A:D4:4C:5E:13:AB:CD:53:00:93:85:F3:BE\";\n"
  },
  {
    "path": "test/config/integration/certs/upstreamkey.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAtpiYA4/INuflkPe4L/GTslmngNQUCo8TzPXG0gt7uoxr4Feu\nVy7AaD28S2/hwhbl+bDtHTQYmvBUwNMsYzpND2eQ3sSIumdeLzBEKP2mnnZ9gE/Z\nd2TIuZl686RpDq0B6ZdZSpCubqQmmPFLiRNH8JViJZMN5yqMt7T5oq+DnCYQZllq\nmpAwd6NnhKALrYmZ87oqc0zhkf+5amP7zMYKkwQuRwcx4QPZkEp3+qhszolpAJ52\ndFGJ+pLuUVDg0Gf0cnxLjFKc6vcTlj4tsymR4ci58MHRt4EdGdhShw0oaj67gRRf\nU4Vj61I2ZAVH07kL0mjO2TZTEKrOEJJ7/dtxdwIDAQABAoIBACz6E1+1N/0GTA7U\nZgMxP09MNC1QkAs1yQvQcoPknjqKQjxFfMUu1+gVZN80FOjpGQbTJOTvoyvvDQFe\nQu3CO58SxKWKxZ8cvR9khTWPnU4lI67KfGejZKoK+zUuh049IV53kGAEmWLZfkRo\nE1IVdL/3G/DjcyZA3d6WbnM7RnDcqORPnig4lq5HxN76eBdssbxtrAi3Sjy3ChMy\nBLInnryF4UtaT5xqR26YjgtFmYkunrgXTe1i/ewQgBBkSPXcNr7or69hCCv0SG9e\nvRsv1r+Uug3/iRZDjEhKBmXWNAZJ/IsDF37ywiyeBdUY+klDX+mWz+0BB0us8b4u\nLxoZQTECgYEA2Gu9EVC0RMrQ9FF5AgKKJWmZKkOn346RkPrtbl5lbuUgnVdBXJjr\nwfMZVTD/8E/tMN4EMSGmC9bxCpRRzhrphrm7SHGD6b9O30DH9q0TV0r0A8IG/bMO\nxJLYjrYVxtEE+KckzvyvfIefbDG7wYkI3u+ObmjBg9t6jcErKlI/PdkCgYEA1/1E\nT+cpR16iOPz1mz+f/GU4YmPkdSDj/PrjMv0c1OTDvdPiZPpLkhLUKiICnKSKbYjX\nKo8fdZc3cmakgD1dXtAfR7Tf/hXQIR5+iHD48I5e9DVlkqMNDObfj82ohTFKVe/P\nZSwgDiAPTMFxWr26u/GzY5D3adCQYJyKE2wTh88CgYEAu7vpzGVnmu0ciXNLNvUh\nBQcvODxsGT9BArTI1Z7I+oOD4TjZmAuHJz1L0lypB7stk+BjXoND2K1hdr3moJUz\n0gy3a0YdGd07++nkDBVi26xHNCNRkS2MN/TyKgnFpiuW1mOXSH5lc+7p2h7iMiY/\nLbQ8p4Xzp//xtZnFafbiqTECgYEAwDN5KZ1r5z24H/xCVv+cT46HSU7ZCr3VA9cC\nfOouUOitouu9J9xviTJGKKQRLPFi2awOxKmN9ic1SRE7y35P60JKw5WaSdGBXydy\ns9nMPMyEhM5Lb9y2jUeZo68ACl5dZvG63a4RbGBtHQF67KOvWvXvi2eCM2BMShyi\n5jujeZMCgYAjewq1hVqL1FOD8sIFpmndsH3+Dfc7BJ/erqGOX9bQYGvJO4nCe+7K\n4o8qFQf4jwdxu0iNxYJIMdn+l4/pz2e7GUFHjgMduUclf27Qj1p+8EyYqp6cmkzM\n8mcwRkYo3aM70EmUu0Xxi3d5O5F1bIJ5MkgXaX/zSF2N02B3jXroxQ==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/certs/upstreamlocalhostcert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Upstream Server\ncommonName_default = Test Upstream Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nDNS.2 = localhost\nIP.1 = 127.0.0.1\nIP.2 = ::1\n"
  },
  {
    "path": "test/config/integration/certs/upstreamlocalhostcert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEPTCCAyWgAwIBAgIUEuy1WgSCzX6mojPirk7Th6uhNHswDQYJKoZIhvcNAQEL\nBQAwfzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxGTAXBgNVBAMMEFRlc3QgVXBzdHJlYW0gQ0EwHhcNMjAwODA1MTkx\nNjAzWhcNMjIwODA1MTkxNjAzWjCBgzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNh\nbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQx\nGTAXBgNVBAsMEEx5ZnQgRW5naW5lZXJpbmcxHTAbBgNVBAMMFFRlc3QgVXBzdHJl\nYW0gU2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAut0zAnO+\n0q/IyxWG/LVbjh5gI2C9ZqNpFD+PK/bwPahg5JdtLnOWPwL+3BqjRcJ7/viocncL\neF3bhJ2hgF7gZGqd07juA3O9Qo+UUUgFvSsEGg2Y5c5gmD3rxdatxv/TnCmWn2pf\n5366GmMQXCz99QGFuwUvIAGdApTjh65V32BZlgIwI9ArRy2v4YyW4hEaqiMeMaqB\nQ994UCqlbhL/d3eBz3pEG0UIMkUPndTN42cWK8dGhSSHlJQkdAOk6x+24xY/qvjc\nOZOtoO0GQu+SBwV+S0Wt75CI2k/IKYzMX5G7QFEcE6RgkL8Ag2DSG4bl/6Zf3C3z\nufWZwMlhjzZoHwIDAQABo4GrMIGoMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXg\nMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAsBgNVHREEJTAjgglsb2Nh\nbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwHQYDVR0OBBYEFKsGHS0dB1v/\nQ0YBMcKXV0BC95TFMB8GA1UdIwQYMBaAFDtmHVOikybtJjVEI4Q7wvUbwgBkMA0G\nCSqGSIb3DQEBCwUAA4IBAQAet+A9/JpMOcTstI8AVxVtJ+DuaPSl9L+kUSTPRwee\nF1YeHz3i7ibhdheZcWKWJUaVYmU2VTzYTT3iPxB36KvpHEcsxRAaCyND9a48JeIn\nawV+2KUD6QhFPWrtcM/TIzDcrl9qT69nnraVv1haXh/t5xRA30+h5aA4t9PnHxWA\nmDv6Hslqi/NLCN1KBIZxmUspLkZFJT3uS7mdcI3d0m5tQx7W7UZr/40sbdxRjK3Y\nXnCJVa51ZAkoJMEiDY7/zPraZbW1QiIaGBRTGVJ/rzJDvwnMo117GIqiRKKw9fo3\nGntHAThLZeCkdfAcDnzKLkOU6Sp9QHXd2pAC7H1z3EjP\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/config/integration/certs/upstreamlocalhostcert_hash.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_UPSTREAMLOCALHOST_CERT_HASH[] =\n    \"8C:49:C0:6C:4C:02:39:29:EB:5B:37:C6:62:02:7A:81:77:22:35:DD:99:1D:62:78:4C:95:5B:38:36:6C:1F:\"\n    \"1E\";\n"
  },
  {
    "path": "test/config/integration/certs/upstreamlocalhostkey.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAut0zAnO+0q/IyxWG/LVbjh5gI2C9ZqNpFD+PK/bwPahg5Jdt\nLnOWPwL+3BqjRcJ7/viocncLeF3bhJ2hgF7gZGqd07juA3O9Qo+UUUgFvSsEGg2Y\n5c5gmD3rxdatxv/TnCmWn2pf5366GmMQXCz99QGFuwUvIAGdApTjh65V32BZlgIw\nI9ArRy2v4YyW4hEaqiMeMaqBQ994UCqlbhL/d3eBz3pEG0UIMkUPndTN42cWK8dG\nhSSHlJQkdAOk6x+24xY/qvjcOZOtoO0GQu+SBwV+S0Wt75CI2k/IKYzMX5G7QFEc\nE6RgkL8Ag2DSG4bl/6Zf3C3zufWZwMlhjzZoHwIDAQABAoIBAFhHQd7psX+1PeX7\nYI8oWn10ijSMck336x9+u3OosGxgjI3Rn+nu/077ak2vY+0D6TJWZLXW2Ztes+Md\n2PtdVyL5X2BzoDYPSp0UWZxgqx1oIgLw44fFjMq/jhAj0GsP1veSii77wR0LOH5Y\nyJTTSJKjynrFAzNar8NVdXxW9wiUnE9pXzXFEKhWY9zntYOxxzpSXdbUtQBWMnCU\n3gVkkKoJHSNqiDjLDHeNkmScPirJw+if8B+N5pgzhcoBzGdMGIEHxwvd7PtjyaxJ\nkw+XPJj/JIsipRBqXwhcjnbuy0gnHMiYgYS2i70iO853VWJvDdHYXNsF/wT2VLB0\n3jfcidkCgYEA4z7Q98cv6JcjMdtCBl4NAecxNgV+TLgy7eekWQUlVq2CXT8Ky72H\nqcNF1PDNEKzruNMtITomEBoOJbhMzMr0HOccP/DEU/nhMludR3a7MCL2TQw56Dhv\niI9Pzd0v8HwTggd1T8WP+ABwHRLHpJrMHnaKxWdYppm180azmRhaMAsCgYEA0oJO\ngpyJ+1G2w54pZ4zk17I0ZHANYb2dJ6bM1w2qGONHCEPfRWeo3oPy7zdjtgs2rDc9\neYtnuRrGCuzEJRI72+Qk/oxEDaqLUlEARjFEgzo17PijcGz1CLRHBZbG2+F9JBDF\nSlTG0lpl4i99xN4JHv3ysYWvgXlWpdVuNW/I0L0CgYA66ITJRpRvygYwnXMPLYBX\ntvP12hS0lKd3Lq5W+VOFlbMOsxH8YORzKJDIs6elI/5zSiMP0wAc+nQiaRVXnWEM\nwQh8ttBeKI+tOzyZUvkRcG7C6GF2hnK7RtNcPXN49uEjuwU5KbC5jHuDveONEyfI\n2df9dl3vyjb1mqViEYMHowKBgH7jrQ9t7H5hMxmXLL4OX6Lk+E/Sez5/XUuZb7/x\nrKZz2U1SHDNp2JDIWJd5e9Ev0TTd12B8d3lMejP7o//0jcBuNR56zkqukmx8Bv5I\nlFPFstu0xE/wXYNxp53m1NeVhClJMqMrlu0VMHS2y8jvTfAwgyoeuzwAOAqeLGBp\nkVLBAoGBAIU50ngoEEkuBa9jZiwRTUEnkcs6w5rxIiU9CHDKPkoki3m6Ke5ACE6X\n+iwv75MpIeMwNCVOsQNascRMgLfSFMJwnHm9rruCqi3iCGEhvvANrSbl80jPVaFq\nwuElkQJ67t/Kqj2Nxq84dtP8SCdYtu582xS0NyIFBBJ9EPausuIx\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/config/integration/google_com_proxy_port_0.v2.yaml",
    "content": "admin:\n  access_log_path: \"{{ null_device_path }}\"\n  address:\n    socket_address:\n      address: \"{{ ip_any_address }}\"\n      port_value: 0\n\nstatic_resources:\n  listeners:\n  - name: listener_0\n    address:\n      socket_address:\n        address: \"{{ ip_any_address }}\"\n        port_value: 0\n    filter_chains:\n    - filters:\n      - name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: ingress_http\n          codec_type: AUTO\n          route_config:\n            name: local_route\n            virtual_hosts:\n            - name: local_service\n              domains: [\"*\"]\n              routes:\n              - match: { prefix: \"/\" }\n                route: { host_rewrite: www.google.com, cluster: service_google }\n  clusters:\n  - name: service_google\n    connect_timeout: 0.25s\n    type: LOGICAL_DNS\n    # Comment out the following line to test on v6 networks\n    dns_lookup_family: {{ dns_lookup_family }}\n    lb_policy: ROUND_ROBIN\n    load_assignment:\n      cluster_name: service_google\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address: { socket_address: { address: google.com, port_value: 443 }}\n"
  },
  {
    "path": "test/config/integration/server.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: {{ ip_loopback_address }}\n        port_value: 0\n    reuse_port: {{ reuse_port }}\n    filter_chains:\n    - filters:\n      - name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          drain_timeout: 5s\n          route_config:\n            virtual_hosts:\n            - require_tls: all\n              routes:\n              - route: { cluster: cluster_1 }\n                match: { prefix: \"/\" }\n              domains:\n              - www.redirect.com\n              name: redirect\n            - routes:\n              - match: { prefix: \"/\" }\n                route:\n                  cluster: cluster_1\n              - match: { prefix: \"/test/long/url\" }\n                route:\n                  rate_limits:\n                  - actions:\n                    - destination_cluster: {}\n                  cluster: cluster_1\n              - match: { prefix: \"/test/\" }\n                route: { cluster: cluster_2 }\n              - match: { prefix: \"/websocket/test\" }\n                route:\n                  prefix_rewrite: \"/websocket\"\n                  cluster: cluster_1\n              domains:\n              - \"*\"\n              name: integration\n          codec_type: http1\n          stat_prefix: router\n          http_filters:\n          - name: health\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck\n              pass_through_mode: false\n          - name: envoy.filters.http.router\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n          access_log:\n          - name: accesslog\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog\n              path: {{ null_device_path }}\n            filter:\n              or_filter:\n                filters:\n                - status_code_filter:\n                    comparison:\n                      op: GE\n                      value:\n                        default_value: 500\n                        runtime_key: access_log.access_error.status\n                - duration_filter:\n                    comparison:\n                      op: GE\n                      value:\n                        default_value: 1000\n                        runtime_key: access_log.access_error.duration\n  clusters:\n  - name: cluster_1\n    connect_timeout: 5s\n    load_assignment:\n      cluster_name: cluster_1\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {{ ip_loopback_address }}\n                port_value: {{ upstream_0 }}\n    dns_lookup_family: \"{{ dns_lookup_family }}\"\n  - name: cluster_2\n    type: STRICT_DNS\n    connect_timeout: 5s\n    load_assignment:\n      cluster_name: cluster_2\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost\n                port_value: {{ upstream_1 }}\n    dns_lookup_family: \"{{ dns_lookup_family }}\"\n  - name: cluster_3\n    connect_timeout: 5s\n    per_connection_buffer_limit_bytes: 1024\n    load_assignment:\n      cluster_name: cluster_3\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {{ ip_loopback_address }}\n                port_value: {{ upstream_0 }}\n    dns_lookup_family: \"{{ dns_lookup_family }}\"\n  - name: statsd\n    type: STRICT_DNS\n    connect_timeout: 5s\n    load_assignment:\n      cluster_name: statsd\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost\n                port_value: 4\n    dns_lookup_family: \"{{ dns_lookup_family }}\"\n  - name: redis\n    type: STRICT_DNS\n    connect_timeout: 5s\n    lb_policy: RING_HASH\n    load_assignment:\n      cluster_name: redis\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost\n                port_value: 4\n    dns_lookup_family: \"{{ dns_lookup_family }}\"\n    outlier_detection: {}\ndynamic_resources: {}\ncluster_manager: {}\nflags_path: \"/invalid_flags\"\nstats_sinks:\n- name: local_stats\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.metrics.v3.StatsdSink\n    address:\n      socket_address:\n        address: {{ ip_loopback_address }}\n        port_value: 8125\n- name: tcp_stats\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.metrics.v3.StatsdSink\n    tcp_cluster_name: statsd\nwatchdog: {}\nlayered_runtime:\n  layers:\n    - name: root\n      disk_layer:\n        symlink_root: \"{{ test_tmpdir }}/test/common/runtime/test_data/current\"\n        subdirectory: envoy\n    - name: override\n      disk_layer:\n        symlink_root: \"{{ test_tmpdir }}/test/common/runtime/test_data/current\"\n        subdirectory: envoy_override\n        append_service_cluster: true\n    - name: admin\n      admin_layer: {}\nadmin:\n  access_log_path: \"{{ null_device_path }}\"\n  profile_path: \"{{ test_tmpdir }}/envoy.prof\"\n  address:\n    socket_address:\n      address: {{ ip_loopback_address }}\n      port_value: 0\n"
  },
  {
    "path": "test/config/integration/server_unix_listener.yaml",
    "content": "static_resources:\n  listeners:\n  - address:\n      pipe:\n        path: \"{{ socket_dir }}/unix-sockets.listener_0\"\n    filter_chains:\n    - filters:\n      - name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          http_filters:\n          - name: envoy.filters.http.router\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n          codec_type: auto\n          stat_prefix: router\n          drain_timeout: 5s\n          route_config:\n            virtual_hosts:\n            - domains:\n              - \"*\"\n              name: vhost_0\n              routes:\n              - match: { prefix: \"/\" }\n                route: { cluster: cluster_0 }\n  clusters:\n  - name: cluster_0\n    connect_timeout: 5s\n    load_assignment:\n      cluster_name: cluster_0\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: \"{{ ip_loopback_address }}\"\n                port_value: 0\n    dns_lookup_family: V4_ONLY\ncluster_manager: {}\nwatchdog: {}\nadmin:\n  access_log_path: \"{{ null_device_path }}\"\n  address:\n    socket_address:\n      address: \"{{ ip_loopback_address }}\"\n      port_value: 0\n"
  },
  {
    "path": "test/config/integration/server_xds.bootstrap.udpa.yaml",
    "content": "dynamic_resources:\n  lds_resources_locator:\n    scheme: FILE\n    id: {{ lds_json_path }}\n    resource_type: envoy.config.listener.v3.ListenerCollection\n  cds_config:\n    path: {{ cds_json_path }}\nadmin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n"
  },
  {
    "path": "test/config/integration/server_xds.bootstrap.yaml",
    "content": "dynamic_resources:\n  lds_config:\n    path: {{ lds_json_path }}\n  cds_config:\n    path: {{ cds_json_path }}\nadmin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n"
  },
  {
    "path": "test/config/integration/server_xds.cds.with_unknown_field.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster_1\n  connect_timeout: { seconds: 5 }\n  type: EDS\n  eds_cluster_config:\n    eds_config: { path: {{ eds_json_path }} }\n  lb_policy: ROUND_ROBIN\n  http2_protocol_options: {}\n  typed_extension_protocol_options:\n    envoy.test.dynamic_validation:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        stat_prefix: blah\n        cluster: blah\n        foo: bar\n"
  },
  {
    "path": "test/config/integration/server_xds.cds.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.cluster.v3.Cluster\n  name: cluster_1\n  connect_timeout: { seconds: 5 }\n  type: EDS\n  eds_cluster_config:\n    eds_config: { path: {{ eds_json_path }} }\n  lb_policy: ROUND_ROBIN\n  http2_protocol_options: {}\n"
  },
  {
    "path": "test/config/integration/server_xds.eds.ads_cluster.yaml",
    "content": "version_info: \"123\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\n  cluster_name: ads_cluster\n  endpoints:\n  - lb_endpoints:\n    - endpoint:\n        address:\n          socket_address:\n            address: {{ ntop_ip_loopback_address }}\n            port_value: {{ upstream_0 }}\n"
  },
  {
    "path": "test/config/integration/server_xds.eds.with_unknown_field.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\n  cluster_name: cluster_1\n  foo: bar\n  endpoints:\n  - lb_endpoints:\n    - endpoint:\n        address:\n          socket_address:\n            address: {{ ntop_ip_loopback_address }}\n            port_value: {{ upstream_0 }}\n"
  },
  {
    "path": "test/config/integration/server_xds.eds.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\n  cluster_name: cluster_1\n  endpoints:\n  - lb_endpoints:\n    - endpoint:\n        address:\n          socket_address:\n            address: {{ ntop_ip_loopback_address }}\n            port_value: {{ upstream_0 }}\n"
  },
  {
    "path": "test/config/integration/server_xds.lds.typed_struct.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n  name: listener_0\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n  filter_chains:\n  - filters:\n    - name: http\n      typed_config:\n        \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n        type_url: \"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\"\n        value:\n          codec_type: HTTP2\n          drain_timeout: 5s\n          stat_prefix: router\n          rds:\n            route_config_name: route_config_0\n            config_source: { path: {{ rds_json_path }} }\n          http_filters: [{ name: envoy.filters.http.router }]\n"
  },
  {
    "path": "test/config/integration/server_xds.lds.udpa.list_collection.yaml",
    "content": "version: \"0\"\nresource:\n  \"@type\": type.googleapis.com/envoy.config.listener.v3.ListenerCollection\n  entries:\n  - inline_entry:\n      name: listener_0\n      version: \"0\"\n      resource:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: listener_0\n        address:\n          socket_address:\n            address: {{ ntop_ip_loopback_address }}\n            port_value: 0\n        filter_chains:\n        - filters:\n          - name: http\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n              codec_type: HTTP2\n              drain_timeout: 5s\n              stat_prefix: router\n              rds:\n                route_config_name: route_config_0\n                config_source: { path: {{ rds_json_path }} }\n              http_filters: [{ name: envoy.filters.http.router }]\n"
  },
  {
    "path": "test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n  name: listener_0\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n  filter_chains:\n  - filters:\n    - name: http\n      typed_config:\n        \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n        type_url: \"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\"\n        value:\n          codec_type: HTTP2\n          drain_timeout: 5s\n          stat_prefix: router\n          rds:\n            route_config_name: route_config_0\n            config_source: { path: {{ rds_json_path }} }\n          http_filters: [{ name: envoy.filters.http.router }]\n          foo: bar\n"
  },
  {
    "path": "test/config/integration/server_xds.lds.with_unknown_field.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n  name: listener_0\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n  filter_chains:\n  - filters:\n    - name: http\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n        codec_type: HTTP2\n        drain_timeout: 5s\n        stat_prefix: router\n        rds:\n          route_config_name: route_config_0\n          config_source: { path: {{ rds_json_path }} }\n        http_filters: [{ name: envoy.filters.http.router }]\n        foo: bar\n"
  },
  {
    "path": "test/config/integration/server_xds.lds.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n  name: listener_0\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n  filter_chains:\n  - filters:\n    - name: http\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n        codec_type: HTTP2\n        drain_timeout: 5s\n        stat_prefix: router\n        rds:\n          route_config_name: route_config_0\n          config_source: { path: {{ rds_json_path }} }\n        http_filters: [{ name: envoy.filters.http.router }]\n"
  },
  {
    "path": "test/config/integration/server_xds.rds.with_unknown_field.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.route.v3.RouteConfiguration\n  name: route_config_0\n  virtual_hosts:\n  - name: integration\n    domains: [ \"*\" ]\n    routes:\n    - match: { prefix: \"/test/long/url\" }\n      route: { cluster: cluster_1 }\n  foo: bar\n"
  },
  {
    "path": "test/config/integration/server_xds.rds.yaml",
    "content": "version_info: \"0\"\nresources:\n- \"@type\": type.googleapis.com/envoy.config.route.v3.RouteConfiguration\n  name: route_config_0\n  virtual_hosts:\n  - name: integration\n    domains: [ \"*\" ]\n    routes:\n    - match: { prefix: \"/test/long/url\" }\n      route: { cluster: cluster_1 }\n"
  },
  {
    "path": "test/config/utility.cc",
    "content": "#include \"test/config/utility.h\"\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/extensions/access_loggers/file/v3/file.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/transport_sockets/tap/v3/tap.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/config/integration/certs/client_ecdsacert_hash.h\"\n#include \"test/config/integration/certs/clientcert_hash.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nstd::string ConfigHelper::baseConfig() {\n  return fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\ndynamic_resources:\n  lds_config:\n    path: {}\nstatic_resources:\n  secrets:\n  - name: \"secret_static_0\"\n    tls_certificate:\n      certificate_chain:\n        inline_string: \"DUMMY_INLINE_BYTES\"\n      private_key:\n        inline_string: \"DUMMY_INLINE_BYTES\"\n      password:\n        inline_string: \"DUMMY_INLINE_BYTES\"\n  clusters:\n    name: cluster_0\n    load_assignment:\n      cluster_name: cluster_0\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n)EOF\",\n                     Platform::null_device_path, Platform::null_device_path);\n}\n\nstd::string ConfigHelper::baseUdpListenerConfig() {\n  return fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\nstatic_resources:\n  clusters:\n    name: cluster_0\n    load_assignment:\n      cluster_name: cluster_0\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: 0.0.0.0\n        port_value: 0\n        protocol: udp\n)EOF\",\n                     Platform::null_device_path);\n}\n\nstd::string ConfigHelper::tcpProxyConfig() {\n  return absl::StrCat(baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n        name: tcp\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy\n          stat_prefix: tcp_stats\n          cluster: cluster_0\n)EOF\");\n}\n\nstd::string ConfigHelper::tlsInspectorFilter() {\n  return R\"EOF(\nname: \"envoy.filters.listener.tls_inspector\"\ntyped_config:\n)EOF\";\n}\n\nstd::string ConfigHelper::httpProxyConfig() {\n  return absl::StrCat(baseConfig(), fmt::format(R\"EOF(\n    filter_chains:\n      filters:\n        name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: config_test\n          delayed_close_timeout:\n            nanos: 100\n          http_filters:\n            name: envoy.filters.http.router\n          codec_type: HTTP1\n          access_log:\n            name: accesslog\n            filter:\n              not_health_check_filter:  {{}}\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: {}\n          route_config:\n            virtual_hosts:\n              name: integration\n              routes:\n                route:\n                  cluster: cluster_0\n                match:\n                  prefix: \"/\"\n              domains: \"*\"\n            name: route_config_0\n)EOF\",\n                                                Platform::null_device_path));\n}\n\n// TODO(danzh): For better compatibility with HTTP integration test framework,\n// it's better to combine with HTTP_PROXY_CONFIG, and use config modifiers to\n// specify quic specific things.\nstd::string ConfigHelper::quicHttpProxyConfig() {\n  return absl::StrCat(baseUdpListenerConfig(), fmt::format(R\"EOF(\n    filter_chains:\n      transport_socket:\n        name: envoy.transport_sockets.quic\n      filters:\n        name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: config_test\n          http_filters:\n            name: envoy.filters.http.router\n          codec_type: HTTP3\n          access_log:\n            name: file_access_log\n            filter:\n              not_health_check_filter:  {{}}\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n              path: {}\n          route_config:\n            virtual_hosts:\n              name: integration\n              routes:\n                route:\n                  cluster: cluster_0\n                match:\n                  prefix: \"/\"\n              domains: \"*\"\n            name: route_config_0\n    udp_listener_config:\n      udp_listener_name: \"quiche_quic_listener\"\n)EOF\",\n                                                           Platform::null_device_path));\n}\n\nstd::string ConfigHelper::defaultBufferFilter() {\n  return R\"EOF(\nname: buffer\ntyped_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer\n    max_request_bytes : 5242880\n)EOF\";\n}\n\nstd::string ConfigHelper::smallBufferFilter() {\n  return R\"EOF(\nname: buffer\ntyped_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer\n    max_request_bytes : 1024\n)EOF\";\n}\n\nstd::string ConfigHelper::defaultHealthCheckFilter() {\n  return R\"EOF(\nname: health_check\ntyped_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n    pass_through_mode: false\n)EOF\";\n}\n\nstd::string ConfigHelper::defaultSquashFilter() {\n  return R\"EOF(\nname: squash\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.squash.v2.Squash\n  cluster: squash\n  attachment_template:\n    spec:\n      attachment:\n        env: \"{{ SQUASH_ENV_TEST }}\"\n      match_request: true\n  attachment_timeout:\n    seconds: 1\n    nanos: 0\n  attachment_poll_period:\n    seconds: 2\n    nanos: 0\n  request_timeout:\n    seconds: 1\n    nanos: 0\n)EOF\";\n}\n\n// TODO(fredlas) set_node_on_first_message_only was true; the delta+SotW unification\n//               work restores it here.\n// TODO(#6327) cleaner approach to testing with static config.\nstd::string ConfigHelper::discoveredClustersBootstrap(const std::string& api_type) {\n  return fmt::format(\n      R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\ndynamic_resources:\n  cds_config:\n    api_config_source:\n      api_type: {}\n      grpc_services:\n        envoy_grpc:\n          cluster_name: my_cds_cluster\n      set_node_on_first_message_only: false\nstatic_resources:\n  clusters:\n  - name: my_cds_cluster\n    http2_protocol_options: {{}}\n    load_assignment:\n      cluster_name: my_cds_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n  listeners:\n    name: http\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n    filter_chains:\n      filters:\n        name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: config_test\n          http_filters:\n            name: envoy.filters.http.router\n          codec_type: HTTP2\n          route_config:\n            name: route_config_0\n            validate_clusters: false\n            virtual_hosts:\n              name: integration\n              routes:\n              - route:\n                  cluster: cluster_1\n                match:\n                  prefix: \"/cluster1\"\n              - route:\n                  cluster: cluster_2\n                match:\n                  prefix: \"/cluster2\"\n              domains: \"*\"\n)EOF\",\n      Platform::null_device_path, api_type);\n}\n\n// TODO(#6327) cleaner approach to testing with static config.\nstd::string ConfigHelper::adsBootstrap(const std::string& api_type,\n                                       envoy::config::core::v3::ApiVersion api_version) {\n  return fmt::format(R\"EOF(\ndynamic_resources:\n  lds_config:\n    resource_api_version: {1}\n    ads: {{}}\n  cds_config:\n    resource_api_version: {1}\n    ads: {{}}\n  ads_config:\n    transport_api_version: {1}\n    api_type: {0}\nstatic_resources:\n  clusters:\n    name: dummy_cluster\n    connect_timeout:\n      seconds: 5\n    type: STATIC\n    load_assignment:\n      cluster_name: dummy_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n    lb_policy: ROUND_ROBIN\n    http2_protocol_options: {{}}\nadmin:\n  access_log_path: {2}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\n)EOF\",\n                     api_type, api_version == envoy::config::core::v3::ApiVersion::V2 ? \"V2\" : \"V3\",\n                     Platform::null_device_path);\n}\n\n// TODO(samflattery): bundle this up with buildCluster\nenvoy::config::cluster::v3::Cluster\nConfigHelper::buildStaticCluster(const std::string& name, int port, const std::string& address) {\n  return TestUtility::parseYaml<envoy::config::cluster::v3::Cluster>(fmt::format(R\"EOF(\n      name: {}\n      connect_timeout: 5s\n      type: STATIC\n      load_assignment:\n        cluster_name: {}\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: {}\n                  port_value: {}\n      lb_policy: ROUND_ROBIN\n      http2_protocol_options: {{}}\n    )EOF\",\n                                                                                 name, name,\n                                                                                 address, port));\n}\n\nenvoy::config::cluster::v3::Cluster\nConfigHelper::buildCluster(const std::string& name, const std::string& lb_policy,\n                           envoy::config::core::v3::ApiVersion api_version) {\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster;\n  TestUtility::loadFromYaml(fmt::format(R\"EOF(\n      name: {}\n      connect_timeout: 5s\n      type: EDS\n      eds_cluster_config:\n        eds_config:\n          resource_api_version: {}\n          ads: {{}}\n      lb_policy: {}\n      http2_protocol_options: {{}}\n    )EOF\",\n                                        name, apiVersionStr(api_version), lb_policy),\n                            cluster, shouldBoost(api_version));\n  return cluster;\n}\n\nenvoy::config::cluster::v3::Cluster\nConfigHelper::buildTlsCluster(const std::string& name, const std::string& lb_policy,\n                              envoy::config::core::v3::ApiVersion api_version) {\n  API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster;\n  TestUtility::loadFromYaml(\n      fmt::format(R\"EOF(\n      name: {}\n      connect_timeout: 5s\n      type: EDS\n      eds_cluster_config:\n        eds_config:\n          resource_api_version: {}\n          ads: {{}}\n      transport_socket:\n        name: envoy.transport_sockets.tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n          common_tls_context:\n            validation_context:\n              trusted_ca:\n                filename: {}\n      lb_policy: {}\n      http2_protocol_options: {{}}\n    )EOF\",\n                  name, apiVersionStr(api_version),\n                  TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"),\n                  lb_policy),\n      cluster, shouldBoost(api_version));\n  return cluster;\n}\n\nenvoy::config::endpoint::v3::ClusterLoadAssignment\nConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::string& address,\n                                         uint32_t port,\n                                         envoy::config::core::v3::ApiVersion api_version) {\n  API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment;\n  TestUtility::loadFromYaml(fmt::format(R\"EOF(\n      cluster_name: {}\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {}\n                port_value: {}\n    )EOF\",\n                                        name, address, port),\n                            cluster_load_assignment, shouldBoost(api_version));\n  return cluster_load_assignment;\n}\n\nenvoy::config::listener::v3::Listener\nConfigHelper::buildBaseListener(const std::string& name, const std::string& address,\n                                const std::string& filter_chains,\n                                envoy::config::core::v3::ApiVersion api_version) {\n  API_NO_BOOST(envoy::config::listener::v3::Listener) listener;\n  TestUtility::loadFromYaml(fmt::format(\n                                R\"EOF(\n      name: {}\n      address:\n        socket_address:\n          address: {}\n          port_value: 0\n      filter_chains:\n      {}\n    )EOF\",\n                                name, address, filter_chains),\n                            listener, shouldBoost(api_version));\n  return listener;\n}\n\nenvoy::config::listener::v3::Listener\nConfigHelper::buildListener(const std::string& name, const std::string& route_config,\n                            const std::string& address, const std::string& stat_prefix,\n                            envoy::config::core::v3::ApiVersion api_version) {\n  std::string hcm = fmt::format(\n      R\"EOF(\n        filters:\n        - name: http\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n            stat_prefix: {}\n            codec_type: HTTP2\n            rds:\n              route_config_name: {}\n              config_source:\n                resource_api_version: {}\n                ads: {{}}\n            http_filters: [{{ name: envoy.filters.http.router }}]\n    )EOF\",\n      stat_prefix, route_config, apiVersionStr(api_version));\n  return buildBaseListener(name, address, hcm, api_version);\n}\n\nenvoy::config::route::v3::RouteConfiguration\nConfigHelper::buildRouteConfig(const std::string& name, const std::string& cluster,\n                               envoy::config::core::v3::ApiVersion api_version) {\n  API_NO_BOOST(envoy::config::route::v3::RouteConfiguration) route;\n  TestUtility::loadFromYaml(fmt::format(R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: {} }}\n    )EOF\",\n                                        name, cluster),\n                            route, shouldBoost(api_version));\n  return route;\n}\n\nenvoy::config::endpoint::v3::Endpoint ConfigHelper::buildEndpoint(const std::string& address) {\n  envoy::config::endpoint::v3::Endpoint endpoint;\n  endpoint.mutable_address()->mutable_socket_address()->set_address(address);\n  return endpoint;\n}\n\nConfigHelper::ConfigHelper(const Network::Address::IpVersion version, Api::Api& api,\n                           const std::string& config) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  std::string filename = TestEnvironment::writeStringToFileForTest(\"basic_config.yaml\", config);\n  TestUtility::loadFromFile(filename, bootstrap_, api);\n\n  // Fix up all the socket addresses with the correct version.\n  auto* admin = bootstrap_.mutable_admin();\n  auto* admin_socket_addr = admin->mutable_address()->mutable_socket_address();\n  admin_socket_addr->set_address(Network::Test::getLoopbackAddressString(version));\n\n  auto* static_resources = bootstrap_.mutable_static_resources();\n  for (int i = 0; i < static_resources->listeners_size(); ++i) {\n    auto* listener = static_resources->mutable_listeners(i);\n    auto* listener_socket_addr = listener->mutable_address()->mutable_socket_address();\n    if (listener_socket_addr->address() == \"0.0.0.0\" || listener_socket_addr->address() == \"::\") {\n      listener_socket_addr->set_address(Network::Test::getAnyAddressString(version));\n    } else {\n      listener_socket_addr->set_address(Network::Test::getLoopbackAddressString(version));\n    }\n  }\n\n  for (int i = 0; i < static_resources->clusters_size(); ++i) {\n    auto* cluster = static_resources->mutable_clusters(i);\n    RELEASE_ASSERT(\n        cluster->hidden_envoy_deprecated_hosts().empty(),\n        \"Hosts should be specified via load_assignment() in the integration test framework.\");\n    for (int j = 0; j < cluster->load_assignment().endpoints_size(); ++j) {\n      auto* locality_lb = cluster->mutable_load_assignment()->mutable_endpoints(j);\n      for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) {\n        auto* lb_endpoint = locality_lb->mutable_lb_endpoints(k);\n        if (lb_endpoint->endpoint().address().has_socket_address()) {\n          lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address(\n              Network::Test::getLoopbackAddressString(version));\n        }\n      }\n    }\n  }\n}\n\nvoid ConfigHelper::addClusterFilterMetadata(absl::string_view metadata_yaml,\n                                            absl::string_view cluster_name) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  ProtobufWkt::Struct cluster_metadata;\n  TestUtility::loadFromYaml(std::string(metadata_yaml), cluster_metadata);\n\n  auto* static_resources = bootstrap_.mutable_static_resources();\n  for (int i = 0; i < static_resources->clusters_size(); ++i) {\n    auto* cluster = static_resources->mutable_clusters(i);\n    if (cluster->name() != cluster_name) {\n      continue;\n    }\n    for (const auto& kvp : cluster_metadata.fields()) {\n      ASSERT_TRUE(kvp.second.kind_case() == ProtobufWkt::Value::KindCase::kStructValue);\n      cluster->mutable_metadata()->mutable_filter_metadata()->insert(\n          {kvp.first, kvp.second.struct_value()});\n    }\n    break;\n  }\n}\n\nvoid ConfigHelper::setConnectConfig(\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm,\n    bool terminate_connect) {\n  auto* route_config = hcm.mutable_route_config();\n  ASSERT_EQ(1, route_config->virtual_hosts_size());\n  auto* route = route_config->mutable_virtual_hosts(0)->mutable_routes(0);\n  auto* match = route->mutable_match();\n  match->Clear();\n  match->mutable_connect_matcher();\n\n  if (terminate_connect) {\n    auto* upgrade = route->mutable_route()->add_upgrade_configs();\n    upgrade->set_upgrade_type(\"CONNECT\");\n    upgrade->mutable_connect_config();\n  }\n\n  hcm.add_upgrade_configs()->set_upgrade_type(\"CONNECT\");\n  hcm.mutable_http2_protocol_options()->set_allow_connect(true);\n}\n\nvoid ConfigHelper::applyConfigModifiers() {\n  for (const auto& config_modifier : config_modifiers_) {\n    config_modifier(bootstrap_);\n  }\n  config_modifiers_.clear();\n}\n\nvoid ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& value) {\n  if (bootstrap_.mutable_layered_runtime()->layers_size() == 0) {\n    auto* static_layer = bootstrap_.mutable_layered_runtime()->add_layers();\n    static_layer->set_name(\"static_layer\");\n    static_layer->mutable_static_layer();\n    auto* admin_layer = bootstrap_.mutable_layered_runtime()->add_layers();\n    admin_layer->set_name(\"admin\");\n    admin_layer->mutable_admin_layer();\n  }\n  auto* static_layer =\n      bootstrap_.mutable_layered_runtime()->mutable_layers(0)->mutable_static_layer();\n  (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value));\n}\n\nvoid ConfigHelper::setNewCodecs() {\n  addRuntimeOverride(\"envoy.reloadable_features.new_codec_behavior\", \"true\");\n}\n\nvoid ConfigHelper::finalize(const std::vector<uint32_t>& ports) {\n  RELEASE_ASSERT(!finalized_, \"\");\n\n  applyConfigModifiers();\n\n  uint32_t port_idx = 0;\n  bool eds_hosts = false;\n  bool custom_cluster = false;\n  bool original_dst_cluster = false;\n  auto* static_resources = bootstrap_.mutable_static_resources();\n  const auto tap_path = TestEnvironment::getOptionalEnvVar(\"TAP_PATH\");\n  if (tap_path) {\n    ENVOY_LOG_MISC(debug, \"Test tap path set to {}\", tap_path.value());\n  } else {\n    ENVOY_LOG_MISC(debug, \"No tap path set for tests\");\n  }\n  for (int i = 0; i < bootstrap_.mutable_static_resources()->listeners_size(); ++i) {\n    auto* listener = static_resources->mutable_listeners(i);\n    for (int j = 0; j < listener->filter_chains_size(); ++j) {\n      if (tap_path) {\n        auto* filter_chain = listener->mutable_filter_chains(j);\n        const bool has_tls = filter_chain->has_hidden_envoy_deprecated_tls_context();\n        const Protobuf::Message* tls_config = nullptr;\n        if (has_tls) {\n          tls_config = &filter_chain->hidden_envoy_deprecated_tls_context();\n          filter_chain->clear_hidden_envoy_deprecated_tls_context();\n        }\n        setTapTransportSocket(tap_path.value(), fmt::format(\"listener_{}_{}\", i, j),\n                              *filter_chain->mutable_transport_socket(), tls_config);\n      }\n    }\n  }\n  for (int i = 0; i < bootstrap_.mutable_static_resources()->clusters_size(); ++i) {\n    auto* cluster = static_resources->mutable_clusters(i);\n    if (cluster->type() == envoy::config::cluster::v3::Cluster::EDS) {\n      eds_hosts = true;\n    } else if (cluster->type() == envoy::config::cluster::v3::Cluster::ORIGINAL_DST) {\n      original_dst_cluster = true;\n    } else if (cluster->has_cluster_type()) {\n      custom_cluster = true;\n    } else {\n      // Assign ports to statically defined load_assignment hosts.\n      RELEASE_ASSERT(\n          cluster->hidden_envoy_deprecated_hosts().empty(),\n          \"Hosts should be specified via load_assignment() in the integration test framework.\");\n      for (int j = 0; j < cluster->load_assignment().endpoints_size(); ++j) {\n        auto locality_lb = cluster->mutable_load_assignment()->mutable_endpoints(j);\n        for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) {\n          auto lb_endpoint = locality_lb->mutable_lb_endpoints(k);\n          if (lb_endpoint->endpoint().address().has_socket_address()) {\n            if (lb_endpoint->endpoint().address().socket_address().port_value() == 0) {\n              RELEASE_ASSERT(ports.size() > port_idx, \"\");\n              lb_endpoint->mutable_endpoint()\n                  ->mutable_address()\n                  ->mutable_socket_address()\n                  ->set_port_value(ports[port_idx++]);\n            } else {\n              ENVOY_LOG_MISC(debug, \"Not overriding preset port\",\n                             lb_endpoint->endpoint().address().socket_address().port_value());\n            }\n          }\n        }\n      }\n    }\n\n    if (tap_path) {\n      const bool has_tls = cluster->has_hidden_envoy_deprecated_tls_context();\n      const Protobuf::Message* tls_config = nullptr;\n      if (has_tls) {\n        tls_config = &cluster->hidden_envoy_deprecated_tls_context();\n        cluster->clear_hidden_envoy_deprecated_tls_context();\n      }\n      setTapTransportSocket(tap_path.value(), absl::StrCat(\"cluster_\", i),\n                            *cluster->mutable_transport_socket(), tls_config);\n    }\n  }\n  ASSERT(skip_port_usage_validation_ || port_idx == ports.size() || eds_hosts ||\n         original_dst_cluster || custom_cluster || bootstrap_.dynamic_resources().has_cds_config());\n\n  if (!connect_timeout_set_) {\n#ifdef __APPLE__\n    // Set a high default connect timeout. Under heavy load (and in particular in CI), macOS\n    // connections can take inordinately long to complete.\n    setConnectTimeout(std::chrono::seconds(30));\n#else\n    // Set a default connect timeout.\n    setConnectTimeout(std::chrono::seconds(5));\n#endif\n  }\n\n  finalized_ = true;\n}\n\nvoid ConfigHelper::setTapTransportSocket(const std::string& tap_path, const std::string& type,\n                                         envoy::config::core::v3::TransportSocket& transport_socket,\n                                         const Protobuf::Message* tls_config) {\n  // Determine inner transport socket.\n  envoy::config::core::v3::TransportSocket inner_transport_socket;\n  if (!transport_socket.name().empty()) {\n    RELEASE_ASSERT(!tls_config, \"\");\n    inner_transport_socket.MergeFrom(transport_socket);\n  } else if (tls_config) {\n    inner_transport_socket.set_name(\"envoy.transport_sockets.tls\");\n    inner_transport_socket.mutable_typed_config()->PackFrom(*tls_config);\n  } else {\n    inner_transport_socket.set_name(\"envoy.transport_sockets.raw_buffer\");\n  }\n  // Configure outer tap transport socket.\n  transport_socket.set_name(\"envoy.transport_sockets.tap\");\n  envoy::extensions::transport_sockets::tap::v3::Tap tap_config;\n  tap_config.mutable_common_config()\n      ->mutable_static_config()\n      ->mutable_match_config()\n      ->set_any_match(true);\n  auto* output_sink = tap_config.mutable_common_config()\n                          ->mutable_static_config()\n                          ->mutable_output_config()\n                          ->mutable_sinks()\n                          ->Add();\n  output_sink->set_format(envoy::config::tap::v3::OutputSink::PROTO_TEXT);\n  const ::testing::TestInfo* const test_info =\n      ::testing::UnitTest::GetInstance()->current_test_info();\n  const std::string test_id =\n      std::string(test_info->name()) + \"_\" + std::string(test_info->test_case_name()) + \"_\" + type;\n  output_sink->mutable_file_per_tap()->set_path_prefix(tap_path + \"_\" +\n                                                       absl::StrReplaceAll(test_id, {{\"/\", \"_\"}}));\n  tap_config.mutable_transport_socket()->MergeFrom(inner_transport_socket);\n  transport_socket.mutable_typed_config()->PackFrom(tap_config);\n}\n\nvoid ConfigHelper::setSourceAddress(const std::string& address_string) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  bootstrap_.mutable_cluster_manager()\n      ->mutable_upstream_bind_config()\n      ->mutable_source_address()\n      ->set_address(address_string);\n  // We don't have the ability to bind to specific ports yet.\n  bootstrap_.mutable_cluster_manager()\n      ->mutable_upstream_bind_config()\n      ->mutable_source_address()\n      ->set_port_value(0);\n}\n\nvoid ConfigHelper::setDefaultHostAndRoute(const std::string& domains, const std::string& prefix) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      hcm_config;\n  loadHttpConnectionManager(hcm_config);\n\n  auto* virtual_host = hcm_config.mutable_route_config()->mutable_virtual_hosts(0);\n  virtual_host->set_domains(0, domains);\n  virtual_host->mutable_routes(0)->mutable_match()->set_prefix(prefix);\n\n  storeHttpConnectionManager(hcm_config);\n}\n\nvoid ConfigHelper::setBufferLimits(uint32_t upstream_buffer_limit,\n                                   uint32_t downstream_buffer_limit) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  RELEASE_ASSERT(bootstrap_.mutable_static_resources()->listeners_size() == 1, \"\");\n  auto* listener = bootstrap_.mutable_static_resources()->mutable_listeners(0);\n  listener->mutable_per_connection_buffer_limit_bytes()->set_value(downstream_buffer_limit);\n\n  auto* static_resources = bootstrap_.mutable_static_resources();\n  for (int i = 0; i < bootstrap_.mutable_static_resources()->clusters_size(); ++i) {\n    auto* cluster = static_resources->mutable_clusters(i);\n    cluster->mutable_per_connection_buffer_limit_bytes()->set_value(upstream_buffer_limit);\n  }\n\n  auto filter = getFilterFromListener(\"http\");\n  if (filter) {\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n        hcm_config;\n    loadHttpConnectionManager(hcm_config);\n    if (hcm_config.codec_type() == envoy::extensions::filters::network::http_connection_manager::\n                                       v3::HttpConnectionManager::HTTP2) {\n      const uint32_t size = std::max(downstream_buffer_limit,\n                                     Http2::Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE);\n      auto* options = hcm_config.mutable_http2_protocol_options();\n      options->mutable_initial_stream_window_size()->set_value(size);\n      storeHttpConnectionManager(hcm_config);\n    }\n  }\n}\n\nvoid ConfigHelper::setDownstreamHttpIdleTimeout(std::chrono::milliseconds timeout) {\n  addConfigModifier(\n      [timeout](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) {\n        hcm.mutable_common_http_protocol_options()->mutable_idle_timeout()->MergeFrom(\n            ProtobufUtil::TimeUtil::MillisecondsToDuration(timeout.count()));\n      });\n}\n\nvoid ConfigHelper::setDownstreamMaxConnectionDuration(std::chrono::milliseconds timeout) {\n  addConfigModifier(\n      [timeout](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) {\n        hcm.mutable_common_http_protocol_options()->mutable_max_connection_duration()->MergeFrom(\n            ProtobufUtil::TimeUtil::MillisecondsToDuration(timeout.count()));\n      });\n}\n\nvoid ConfigHelper::setDownstreamMaxStreamDuration(std::chrono::milliseconds timeout) {\n  addConfigModifier(\n      [timeout](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) {\n        hcm.mutable_common_http_protocol_options()->mutable_max_stream_duration()->MergeFrom(\n            ProtobufUtil::TimeUtil::MillisecondsToDuration(timeout.count()));\n      });\n}\n\nvoid ConfigHelper::setConnectTimeout(std::chrono::milliseconds timeout) {\n  RELEASE_ASSERT(!finalized_, \"\");\n\n  auto* static_resources = bootstrap_.mutable_static_resources();\n  for (int i = 0; i < bootstrap_.mutable_static_resources()->clusters_size(); ++i) {\n    auto* cluster = static_resources->mutable_clusters(i);\n    cluster->mutable_connect_timeout()->MergeFrom(\n        ProtobufUtil::TimeUtil::MillisecondsToDuration(timeout.count()));\n  }\n  connect_timeout_set_ = true;\n}\n\nenvoy::config::route::v3::VirtualHost\nConfigHelper::createVirtualHost(const char* domain, const char* prefix, const char* cluster) {\n  envoy::config::route::v3::VirtualHost virtual_host;\n  virtual_host.set_name(domain);\n  virtual_host.add_domains(domain);\n  virtual_host.add_routes()->mutable_match()->set_prefix(prefix);\n  auto* route = virtual_host.mutable_routes(0)->mutable_route();\n  route->set_cluster(cluster);\n  return virtual_host;\n}\n\nvoid ConfigHelper::addVirtualHost(const envoy::config::route::v3::VirtualHost& vhost) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      hcm_config;\n  loadHttpConnectionManager(hcm_config);\n  auto route_config = hcm_config.mutable_route_config();\n  auto* virtual_host = route_config->add_virtual_hosts();\n  virtual_host->CopyFrom(vhost);\n  storeHttpConnectionManager(hcm_config);\n}\n\nvoid ConfigHelper::addFilter(const std::string& config) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      hcm_config;\n  loadHttpConnectionManager(hcm_config);\n\n  auto* filter_list_back = hcm_config.add_http_filters();\n  TestUtility::loadFromYaml(config, *filter_list_back);\n\n  // Now move it to the front.\n  for (int i = hcm_config.http_filters_size() - 1; i > 0; --i) {\n    hcm_config.mutable_http_filters()->SwapElements(i, i - 1);\n  }\n  storeHttpConnectionManager(hcm_config);\n}\n\nvoid ConfigHelper::setClientCodec(envoy::extensions::filters::network::http_connection_manager::v3::\n                                      HttpConnectionManager::CodecType type) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      hcm_config;\n  if (loadHttpConnectionManager(hcm_config)) {\n    hcm_config.set_codec_type(type);\n    storeHttpConnectionManager(hcm_config);\n  }\n}\n\nvoid ConfigHelper::addSslConfig(const ServerSslOptions& options) {\n  RELEASE_ASSERT(!finalized_, \"\");\n\n  auto* filter_chain =\n      bootstrap_.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  initializeTls(options, *tls_context.mutable_common_tls_context());\n  if (options.ocsp_staple_required_) {\n    tls_context.set_ocsp_staple_policy(\n        envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::MUST_STAPLE);\n  }\n  filter_chain->mutable_transport_socket()->set_name(\"envoy.transport_sockets.tls\");\n  filter_chain->mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context);\n}\n\nbool ConfigHelper::setAccessLog(const std::string& filename, absl::string_view format) {\n  if (getFilterFromListener(\"http\") == nullptr) {\n    return false;\n  }\n  // Replace null device with a real path for the file access log.\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      hcm_config;\n  loadHttpConnectionManager(hcm_config);\n  envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config;\n  if (!format.empty()) {\n    access_log_config.mutable_log_format()->set_text_format(absl::StrCat(format, \"\\n\"));\n  }\n  access_log_config.set_path(filename);\n  hcm_config.mutable_access_log(0)->mutable_typed_config()->PackFrom(access_log_config);\n  storeHttpConnectionManager(hcm_config);\n  return true;\n}\n\nbool ConfigHelper::setListenerAccessLog(const std::string& filename, absl::string_view format) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  if (bootstrap_.mutable_static_resources()->listeners_size() == 0) {\n    return false;\n  }\n  envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config;\n  if (!format.empty()) {\n    access_log_config.mutable_log_format()->set_text_format(std::string(format));\n  }\n  access_log_config.set_path(filename);\n  bootstrap_.mutable_static_resources()\n      ->mutable_listeners(0)\n      ->add_access_log()\n      ->mutable_typed_config()\n      ->PackFrom(access_log_config);\n  return true;\n}\n\nvoid ConfigHelper::initializeTls(\n    const ServerSslOptions& options,\n    envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_tls_context) {\n  common_tls_context.add_alpn_protocols(Http::Utility::AlpnNames::get().Http2);\n  common_tls_context.add_alpn_protocols(Http::Utility::AlpnNames::get().Http11);\n\n  auto* validation_context = common_tls_context.mutable_validation_context();\n  validation_context->mutable_trusted_ca()->set_filename(\n      TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n  validation_context->add_verify_certificate_hash(\n      options.expect_client_ecdsa_cert_ ? TEST_CLIENT_ECDSA_CERT_HASH : TEST_CLIENT_CERT_HASH);\n\n  // We'll negotiate up to TLSv1.3 for the tests that care, but it really\n  // depends on what the client sets.\n  common_tls_context.mutable_tls_params()->set_tls_maximum_protocol_version(\n      options.tlsv1_3_ ? envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3\n                       : envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2);\n  if (options.rsa_cert_) {\n    auto* tls_certificate = common_tls_context.add_tls_certificates();\n    tls_certificate->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/servercert.pem\"));\n    tls_certificate->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/serverkey.pem\"));\n    if (options.rsa_cert_ocsp_staple_) {\n      tls_certificate->mutable_ocsp_staple()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/server_ocsp_resp.der\"));\n    }\n  }\n  if (options.ecdsa_cert_) {\n    auto* tls_certificate = common_tls_context.add_tls_certificates();\n    tls_certificate->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/server_ecdsacert.pem\"));\n    tls_certificate->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/server_ecdsakey.pem\"));\n    if (options.ecdsa_cert_ocsp_staple_) {\n      tls_certificate->mutable_ocsp_staple()->set_filename(TestEnvironment::runfilesPath(\n          \"test/config/integration/certs/server_ecdsa_ocsp_resp.der\"));\n    }\n  }\n}\n\nvoid ConfigHelper::renameListener(const std::string& name) {\n  auto* static_resources = bootstrap_.mutable_static_resources();\n  if (static_resources->listeners_size() > 0) {\n    static_resources->mutable_listeners(0)->set_name(name);\n  }\n}\n\nenvoy::config::listener::v3::Filter* ConfigHelper::getFilterFromListener(const std::string& name) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  if (bootstrap_.mutable_static_resources()->listeners_size() == 0) {\n    return nullptr;\n  }\n  auto* listener = bootstrap_.mutable_static_resources()->mutable_listeners(0);\n  if (listener->filter_chains_size() == 0) {\n    return nullptr;\n  }\n  auto* filter_chain = listener->mutable_filter_chains(0);\n  for (ssize_t i = 0; i < filter_chain->filters_size(); i++) {\n    if (filter_chain->mutable_filters(i)->name() == name) {\n      return filter_chain->mutable_filters(i);\n    }\n  }\n  return nullptr;\n}\n\nvoid ConfigHelper::addNetworkFilter(const std::string& filter_yaml) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  auto* filter_chain =\n      bootstrap_.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);\n  auto* filter_list_back = filter_chain->add_filters();\n  TestUtility::loadFromYaml(filter_yaml, *filter_list_back);\n\n  // Now move it to the front.\n  for (int i = filter_chain->filters_size() - 1; i > 0; --i) {\n    filter_chain->mutable_filters()->SwapElements(i, i - 1);\n  }\n}\n\nvoid ConfigHelper::addListenerFilter(const std::string& filter_yaml) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  auto* listener = bootstrap_.mutable_static_resources()->mutable_listeners(0);\n  auto* filter_list_back = listener->add_listener_filters();\n  TestUtility::loadFromYaml(filter_yaml, *filter_list_back);\n\n  // Now move it to the front.\n  for (int i = listener->listener_filters_size() - 1; i > 0; --i) {\n    listener->mutable_listener_filters()->SwapElements(i, i - 1);\n  }\n}\n\nbool ConfigHelper::loadHttpConnectionManager(\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  auto* hcm_filter = getFilterFromListener(\"http\");\n  if (hcm_filter) {\n    auto* config = hcm_filter->mutable_typed_config();\n    hcm = MessageUtil::anyConvert<\n        envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>(\n        *config);\n    return true;\n  }\n  return false;\n}\n\nvoid ConfigHelper::storeHttpConnectionManager(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        hcm) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  auto* hcm_config_any = getFilterFromListener(\"http\")->mutable_typed_config();\n\n  hcm_config_any->PackFrom(hcm);\n}\n\nvoid ConfigHelper::addConfigModifier(ConfigModifierFunction function) {\n  RELEASE_ASSERT(!finalized_, \"\");\n  config_modifiers_.push_back(std::move(function));\n}\n\nvoid ConfigHelper::addConfigModifier(HttpModifierFunction function) {\n  addConfigModifier([function, this](envoy::config::bootstrap::v3::Bootstrap&) -> void {\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n        hcm_config;\n    loadHttpConnectionManager(hcm_config);\n    function(hcm_config);\n    storeHttpConnectionManager(hcm_config);\n  });\n}\n\nvoid ConfigHelper::setLds(absl::string_view version_info) {\n  applyConfigModifiers();\n\n  envoy::service::discovery::v3::DiscoveryResponse lds;\n  lds.set_version_info(std::string(version_info));\n  for (auto& listener : bootstrap_.static_resources().listeners()) {\n    ProtobufWkt::Any* resource = lds.add_resources();\n    resource->PackFrom(listener);\n  }\n\n  const std::string lds_filename = bootstrap().dynamic_resources().lds_config().path();\n  std::string file = TestEnvironment::writeStringToFileForTest(\n      \"new_lds_file\", MessageUtil::getJsonStringFromMessage(lds));\n  TestEnvironment::renameFile(file, lds_filename);\n}\n\nvoid ConfigHelper::setOutboundFramesLimits(uint32_t max_all_frames, uint32_t max_control_frames) {\n  auto filter = getFilterFromListener(\"http\");\n  if (filter) {\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n        hcm_config;\n    loadHttpConnectionManager(hcm_config);\n    if (hcm_config.codec_type() == envoy::extensions::filters::network::http_connection_manager::\n                                       v3::HttpConnectionManager::HTTP2) {\n      auto* options = hcm_config.mutable_http2_protocol_options();\n      options->mutable_max_outbound_frames()->set_value(max_all_frames);\n      options->mutable_max_outbound_control_frames()->set_value(max_control_frames);\n      storeHttpConnectionManager(hcm_config);\n    }\n  }\n}\n\nvoid ConfigHelper::setLocalReply(\n    const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig&\n        config) {\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      hcm_config;\n  loadHttpConnectionManager(hcm_config);\n  hcm_config.mutable_local_reply_config()->MergeFrom(config);\n  storeHttpConnectionManager(hcm_config);\n}\n\nCdsHelper::CdsHelper() : cds_path_(TestEnvironment::writeStringToFileForTest(\"cds.pb_text\", \"\")) {}\n\nvoid CdsHelper::setCds(const std::vector<envoy::config::cluster::v3::Cluster>& clusters) {\n  // Write to file the DiscoveryResponse and trigger inotify watch.\n  envoy::service::discovery::v3::DiscoveryResponse cds_response;\n  cds_response.set_version_info(std::to_string(cds_version_++));\n  cds_response.set_type_url(Config::TypeUrl::get().Cluster);\n  for (const auto& cluster : clusters) {\n    cds_response.add_resources()->PackFrom(cluster);\n  }\n  // Past the initial write, need move semantics to trigger inotify move event that the\n  // FilesystemSubscriptionImpl is subscribed to.\n  std::string path =\n      TestEnvironment::writeStringToFileForTest(\"cds.update.pb_text\", cds_response.DebugString());\n  TestEnvironment::renameFile(path, cds_path_);\n}\n\nEdsHelper::EdsHelper() : eds_path_(TestEnvironment::writeStringToFileForTest(\"eds.pb_text\", \"\")) {\n  // cluster.cluster_0.update_success will be incremented on the initial\n  // load when Envoy comes up.\n  ++update_successes_;\n}\n\nvoid EdsHelper::setEds(const std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment>&\n                           cluster_load_assignments) {\n  // Write to file the DiscoveryResponse and trigger inotify watch.\n  envoy::service::discovery::v3::DiscoveryResponse eds_response;\n  eds_response.set_version_info(std::to_string(eds_version_++));\n  eds_response.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n  for (const auto& cluster_load_assignment : cluster_load_assignments) {\n    eds_response.add_resources()->PackFrom(cluster_load_assignment);\n  }\n  // Past the initial write, need move semantics to trigger inotify move event that the\n  // FilesystemSubscriptionImpl is subscribed to.\n  std::string path =\n      TestEnvironment::writeStringToFileForTest(\"eds.update.pb_text\", eds_response.DebugString());\n  TestEnvironment::renameFile(path, eds_path_);\n}\n\nvoid EdsHelper::setEdsAndWait(\n    const std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment>& cluster_load_assignments,\n    IntegrationTestServerStats& server_stats) {\n  // Make sure the last version has been accepted before setting a new one.\n  server_stats.waitForCounterGe(\"cluster.cluster_0.update_success\", update_successes_);\n  setEds(cluster_load_assignments);\n  // Make sure Envoy has consumed the update now that it is running.\n  ++update_successes_;\n  server_stats.waitForCounterGe(\"cluster.cluster_0.update_success\", update_successes_);\n  RELEASE_ASSERT(\n      update_successes_ == server_stats.counter(\"cluster.cluster_0.update_success\")->value(), \"\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/config/utility.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <functional>\n#include <string>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/http/codes.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/integration/server_stats.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\nclass ConfigHelper {\npublic:\n  using HttpConnectionManager =\n      envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager;\n  struct ServerSslOptions {\n    ServerSslOptions& setRsaCert(bool rsa_cert) {\n      rsa_cert_ = rsa_cert;\n      return *this;\n    }\n\n    ServerSslOptions& setRsaCertOcspStaple(bool rsa_cert_ocsp_staple) {\n      rsa_cert_ocsp_staple_ = rsa_cert_ocsp_staple;\n      return *this;\n    }\n\n    ServerSslOptions& setEcdsaCert(bool ecdsa_cert) {\n      ecdsa_cert_ = ecdsa_cert;\n      return *this;\n    }\n\n    ServerSslOptions& setEcdsaCertOcspStaple(bool ecdsa_cert_ocsp_staple) {\n      ecdsa_cert_ocsp_staple_ = ecdsa_cert_ocsp_staple;\n      return *this;\n    }\n\n    ServerSslOptions& setOcspStapleRequired(bool ocsp_staple_required) {\n      ocsp_staple_required_ = ocsp_staple_required;\n      return *this;\n    }\n\n    ServerSslOptions& setTlsV13(bool tlsv1_3) {\n      tlsv1_3_ = tlsv1_3;\n      return *this;\n    }\n\n    ServerSslOptions& setExpectClientEcdsaCert(bool expect_client_ecdsa_cert) {\n      expect_client_ecdsa_cert_ = expect_client_ecdsa_cert;\n      return *this;\n    }\n\n    bool rsa_cert_{true};\n    bool rsa_cert_ocsp_staple_{true};\n    bool ecdsa_cert_{false};\n    bool ecdsa_cert_ocsp_staple_{false};\n    bool ocsp_staple_required_{false};\n    bool tlsv1_3_{false};\n    bool expect_client_ecdsa_cert_{false};\n  };\n\n  // Set up basic config, using the specified IpVersion for all connections: listeners, upstream,\n  // and admin connections.\n  //\n  // By default, this runs with an L7 proxy config, but config can be set to TCP_PROXY_CONFIG\n  // to test L4 proxying.\n  ConfigHelper(const Network::Address::IpVersion version, Api::Api& api,\n               const std::string& config = httpProxyConfig());\n\n  static void\n  initializeTls(const ServerSslOptions& options,\n                envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_context);\n\n  using ConfigModifierFunction = std::function<void(envoy::config::bootstrap::v3::Bootstrap&)>;\n  using HttpModifierFunction = std::function<void(HttpConnectionManager&)>;\n\n  // A basic configuration (admin port, cluster_0, one listener) with no network filters.\n  static std::string baseConfig();\n\n  // A basic configuration (admin port, cluster_0, one udp listener) with no network filters.\n  static std::string baseUdpListenerConfig();\n\n  // A string for a tls inspector listener filter which can be used with addListenerFilter()\n  static std::string tlsInspectorFilter();\n\n  // A basic configuration for L4 proxying.\n  static std::string tcpProxyConfig();\n  // A basic configuration for L7 proxying.\n  static std::string httpProxyConfig();\n  // A basic configuration for L7 proxying with QUIC transport.\n  static std::string quicHttpProxyConfig();\n  // A string for a basic buffer filter, which can be used with addFilter()\n  static std::string defaultBufferFilter();\n  // A string for a small buffer filter, which can be used with addFilter()\n  static std::string smallBufferFilter();\n  // A string for a health check filter which can be used with addFilter()\n  static std::string defaultHealthCheckFilter();\n  // A string for a squash filter which can be used with addFilter()\n  static std::string defaultSquashFilter();\n\n  // Configuration for L7 proxying, with clusters cluster_1 and cluster_2 meant to be added via CDS.\n  // api_type should be REST, GRPC, or DELTA_GRPC.\n  static std::string discoveredClustersBootstrap(const std::string& api_type);\n  static std::string adsBootstrap(const std::string& api_type,\n                                  envoy::config::core::v3::ApiVersion api_version);\n  // Builds a standard Cluster config fragment, with a single endpoint (at address:port).\n  static envoy::config::cluster::v3::Cluster buildStaticCluster(const std::string& name, int port,\n                                                                const std::string& address);\n\n  // ADS configurations\n  static envoy::config::cluster::v3::Cluster buildCluster(\n      const std::string& name, const std::string& lb_policy = \"ROUND_ROBIN\",\n      envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3);\n\n  static envoy::config::cluster::v3::Cluster buildTlsCluster(\n      const std::string& name, const std::string& lb_policy = \"ROUND_ROBIN\",\n      envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3);\n\n  static envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment(\n      const std::string& name, const std::string& ip_version, uint32_t port,\n      envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3);\n\n  static envoy::config::listener::v3::Listener buildBaseListener(\n      const std::string& name, const std::string& address, const std::string& filter_chains = \"\",\n      envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3);\n\n  static envoy::config::listener::v3::Listener buildListener(\n      const std::string& name, const std::string& route_config, const std::string& address,\n      const std::string& stat_prefix,\n      envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3);\n\n  static envoy::config::route::v3::RouteConfiguration buildRouteConfig(\n      const std::string& name, const std::string& cluster,\n      envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3);\n\n  // Builds a standard Endpoint suitable for population by finalize().\n  static envoy::config::endpoint::v3::Endpoint buildEndpoint(const std::string& address);\n\n  // Run the final config modifiers, and then set the upstream ports based on upstream connections.\n  // This is the last operation run on |bootstrap_| before it is handed to Envoy.\n  // Ports are assigned by looping through clusters, hosts, and addresses in the\n  // order they are stored in |bootstrap_|\n  void finalize(const std::vector<uint32_t>& ports);\n\n  // Set source_address in the bootstrap bind config.\n  void setSourceAddress(const std::string& address_string);\n\n  // Overwrite the first host and route for the primary listener.\n  void setDefaultHostAndRoute(const std::string& host, const std::string& route);\n\n  // Sets byte limits on upstream and downstream connections.\n  void setBufferLimits(uint32_t upstream_buffer_limit, uint32_t downstream_buffer_limit);\n\n  // Set the idle timeout on downstream connections through the HttpConnectionManager.\n  void setDownstreamHttpIdleTimeout(std::chrono::milliseconds idle_timeout);\n\n  // Set the max connection duration for downstream connections through the HttpConnectionManager.\n  void setDownstreamMaxConnectionDuration(std::chrono::milliseconds max_connection_duration);\n\n  // Set the max stream duration for downstream connections through the HttpConnectionManager.\n  void setDownstreamMaxStreamDuration(std::chrono::milliseconds max_stream_duration);\n\n  // Set the connect timeout on upstream connections.\n  void setConnectTimeout(std::chrono::milliseconds timeout);\n\n  envoy::config::route::v3::VirtualHost createVirtualHost(const char* host, const char* route = \"/\",\n                                                          const char* cluster = \"cluster_0\");\n\n  void addVirtualHost(const envoy::config::route::v3::VirtualHost& vhost);\n\n  // Add an HTTP filter prior to existing filters.\n  void addFilter(const std::string& filter_yaml);\n\n  // Add a network filter prior to existing filters.\n  void addNetworkFilter(const std::string& filter_yaml);\n\n  // Add a listener filter prior to existing filters.\n  void addListenerFilter(const std::string& filter_yaml);\n\n  // Sets the client codec to the specified type.\n  void setClientCodec(envoy::extensions::filters::network::http_connection_manager::v3::\n                          HttpConnectionManager::CodecType type);\n\n  // Add the default SSL configuration.\n  void addSslConfig(const ServerSslOptions& options);\n  void addSslConfig() { addSslConfig({}); }\n\n  // Set the HTTP access log for the first HCM (if present) to a given file. The default is\n  // the platform's null device.\n  bool setAccessLog(const std::string& filename, absl::string_view format = \"\");\n\n  // Set the listener access log for the first listener to a given file.\n  bool setListenerAccessLog(const std::string& filename, absl::string_view format = \"\");\n\n  // Renames the first listener to the name specified.\n  void renameListener(const std::string& name);\n\n  // Allows callers to do their own modification to |bootstrap_| which will be\n  // applied just before ports are modified in finalize().\n  void addConfigModifier(ConfigModifierFunction function);\n\n  // Allows callers to easily modify the HttpConnectionManager configuration.\n  // Modifiers will be applied just before ports are modified in finalize\n  void addConfigModifier(HttpModifierFunction function);\n\n  // Apply any outstanding config modifiers, stick all the listeners in a discovery response message\n  // and write it to the lds file.\n  void setLds(absl::string_view version_info);\n\n  // Set limits on pending outbound frames.\n  void setOutboundFramesLimits(uint32_t max_all_frames, uint32_t max_control_frames);\n\n  // Return the bootstrap configuration for hand-off to Envoy.\n  const envoy::config::bootstrap::v3::Bootstrap& bootstrap() { return bootstrap_; }\n\n  // Allow a finalized configuration to be edited for generating xDS responses\n  void applyConfigModifiers();\n\n  // Skip validation that ensures that all upstream ports are referenced by the\n  // configuration generated in ConfigHelper::finalize.\n  void skipPortUsageValidation() { skip_port_usage_validation_ = true; }\n\n  // Add this key value pair to the static runtime.\n  void addRuntimeOverride(const std::string& key, const std::string& value);\n\n  // Add filter_metadata to a cluster with the given name\n  void addClusterFilterMetadata(absl::string_view metadata_yaml,\n                                absl::string_view cluster_name = \"cluster_0\");\n\n  // Given an HCM with the default config, set the matcher to be a connect matcher and enable\n  // CONNECT requests.\n  static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect);\n\n  void setLocalReply(\n      const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig&\n          config);\n\n  // Set new codecs to use for upstream and downstream codecs.\n  void setNewCodecs();\n\nprivate:\n  static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) {\n    return api_version == envoy::config::core::v3::ApiVersion::V2;\n  }\n\n  static std::string apiVersionStr(envoy::config::core::v3::ApiVersion api_version) {\n    return api_version == envoy::config::core::v3::ApiVersion::V2 ? \"V2\" : \"V3\";\n  }\n\n  // Load the first HCM struct from the first listener into a parsed proto.\n  bool loadHttpConnectionManager(HttpConnectionManager& hcm);\n  // Take the contents of the provided HCM proto and stuff them into the first HCM\n  // struct of the first listener.\n  void storeHttpConnectionManager(const HttpConnectionManager& hcm);\n\n  // Finds the filter named 'name' from the first filter chain from the first listener.\n  envoy::config::listener::v3::Filter* getFilterFromListener(const std::string& name);\n\n  // Configure a tap transport socket for a cluster/filter chain.\n  void setTapTransportSocket(const std::string& tap_path, const std::string& type,\n                             envoy::config::core::v3::TransportSocket& transport_socket,\n                             const Protobuf::Message* tls_config);\n\n  // The bootstrap proto Envoy will start up with.\n  envoy::config::bootstrap::v3::Bootstrap bootstrap_;\n\n  // The config modifiers added via addConfigModifier() which will be applied in finalize()\n  std::vector<ConfigModifierFunction> config_modifiers_;\n\n  // Track if the connect timeout has been set (to avoid clobbering a custom setting with the\n  // default).\n  bool connect_timeout_set_{false};\n\n  // Option to disable port usage validation for cases where the number of\n  // upstream ports created is expected to be larger than the number of\n  // upstreams in the config.\n  bool skip_port_usage_validation_{false};\n\n  // A sanity check guard to make sure config is not modified after handing it to Envoy.\n  bool finalized_{false};\n};\n\nclass CdsHelper {\npublic:\n  CdsHelper();\n\n  // Set CDS contents on filesystem.\n  void setCds(const std::vector<envoy::config::cluster::v3::Cluster>& cluster);\n  const std::string& cds_path() const { return cds_path_; }\n\nprivate:\n  const std::string cds_path_;\n  uint32_t cds_version_{};\n};\n\n// Common code for tests that deliver EDS update via the filesystem.\nclass EdsHelper {\npublic:\n  EdsHelper();\n\n  // Set EDS contents on filesystem and wait for Envoy to pick this up.\n  void setEds(const std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment>&\n                  cluster_load_assignments);\n  void setEdsAndWait(const std::vector<envoy::config::endpoint::v3::ClusterLoadAssignment>&\n                         cluster_load_assignments,\n                     IntegrationTestServerStats& server_stats);\n  const std::string& eds_path() const { return eds_path_; }\n\nprivate:\n  const std::string eds_path_;\n  uint32_t eds_version_{};\n  uint32_t update_successes_{};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/config_test/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\"//source/extensions:all_extensions.bzl\", \"envoy_all_extensions\")\nload(\"//bazel:repositories.bzl\", \"PPC_SKIP_TARGETS\", \"WINDOWS_SKIP_TARGETS\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nexports_files([\"example_configs_test_setup.sh\"])\n\nenvoy_cc_test(\n    name = \"example_configs_test\",\n    srcs = [\n        \"example_configs_test.cc\",\n    ],\n    data = [\n        \"example_configs_test_setup.sh\",\n        \"//configs:example_configs\",\n    ],\n    deps = [\n        \":config_test_lib\",\n        \"//source/common/filesystem:filesystem_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"config_test_lib\",\n    srcs = [\"config_test.cc\"],\n    hdrs = [\"config_test.h\"],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/server:configuration_lib\",\n        \"//source/server/config_validation:server_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:worker_factory_mocks\",\n        \"//test/mocks/server:listener_component_factory_mocks\",\n        \"//test/mocks/server:worker_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ] + select({\n        \"//bazel:windows_x86_64\": envoy_all_extensions(WINDOWS_SKIP_TARGETS),\n        \"//bazel:linux_ppc\": envoy_all_extensions(PPC_SKIP_TARGETS),\n        \"//conditions:default\": envoy_all_extensions(),\n    }),\n)\n\nenvoy_cc_test(\n    name = \"deprecated_configs_test\",\n    srcs = [\n        \"deprecated_configs_test.cc\",\n    ],\n    deps = [\n        \":config_test_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/config_test/config_test.cc",
    "content": "#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"server/config_validation/server.h\"\n#include \"server/configuration_impl.h\"\n#include \"server/options_impl.h\"\n\n#include \"test/integration/server.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/listener_component_factory.h\"\n#include \"test/mocks/server/worker.h\"\n#include \"test/mocks/server/worker_factory.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::StrEq;\nusing testing::StrNe;\n\nnamespace Envoy {\nnamespace ConfigTest {\nnamespace {\n\n// asConfigYaml returns a new config that empties the configPath() and populates configYaml()\nOptionsImpl asConfigYaml(const OptionsImpl& src, Api::Api& api) {\n  return Envoy::Server::createTestOptionsImpl(\"\", api.fileSystem().fileReadToEnd(src.configPath()),\n                                              src.localAddressIpVersion());\n}\n\nclass ScopedRuntimeInjector {\npublic:\n  ScopedRuntimeInjector(Runtime::Loader& runtime) {\n    Runtime::LoaderSingleton::initialize(&runtime);\n  }\n\n  ~ScopedRuntimeInjector() { Runtime::LoaderSingleton::clear(); }\n};\n\n} // namespace\n\nclass ConfigTest {\npublic:\n  ConfigTest(const OptionsImpl& options)\n      : api_(Api::createApiForTest(time_system_)), options_(options) {\n    ON_CALL(server_, options()).WillByDefault(ReturnRef(options_));\n    ON_CALL(server_, sslContextManager()).WillByDefault(ReturnRef(ssl_context_manager_));\n    ON_CALL(server_.api_, fileSystem()).WillByDefault(ReturnRef(file_system_));\n    ON_CALL(server_.api_, randomGenerator()).WillByDefault(ReturnRef(random_));\n    ON_CALL(file_system_, fileReadToEnd(StrEq(\"/etc/envoy/lightstep_access_token\")))\n        .WillByDefault(Return(\"access_token\"));\n    ON_CALL(file_system_, fileReadToEnd(StrNe(\"/etc/envoy/lightstep_access_token\")))\n        .WillByDefault(Invoke([&](const std::string& file) -> std::string {\n          return api_->fileSystem().fileReadToEnd(file);\n        }));\n    ON_CALL(os_sys_calls_, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));\n\n    // Here we setup runtime to mimic the actual deprecated feature list used in the\n    // production code. Note that this test is actually more strict than production because\n    // in production runtime is not setup until after the bootstrap config is loaded. This seems\n    // better for configuration tests.\n    ScopedRuntimeInjector scoped_runtime(server_.runtime());\n    ON_CALL(server_.runtime_loader_.snapshot_, deprecatedFeatureEnabled(_, _))\n        .WillByDefault(Invoke([](absl::string_view, bool default_value) { return default_value; }));\n    ON_CALL(server_.runtime_loader_, threadsafeSnapshot()).WillByDefault(Invoke([this]() {\n      return snapshot_;\n    }));\n\n    envoy::config::bootstrap::v3::Bootstrap bootstrap;\n    Server::InstanceUtil::loadBootstrapConfig(\n        bootstrap, options_, server_.messageValidationContext().staticValidationVisitor(), *api_);\n    Server::Configuration::InitialImpl initial_config(bootstrap);\n    Server::Configuration::MainImpl main_config;\n\n    cluster_manager_factory_ = std::make_unique<Upstream::ValidationClusterManagerFactory>(\n        server_.admin(), server_.runtime(), server_.stats(), server_.threadLocal(),\n        server_.dnsResolver(), ssl_context_manager_, server_.dispatcher(), server_.localInfo(),\n        server_.secretManager(), server_.messageValidationContext(), *api_, server_.httpContext(),\n        server_.grpcContext(), server_.accessLogManager(), server_.singletonManager(),\n        time_system_);\n\n    ON_CALL(server_, clusterManager()).WillByDefault(Invoke([&]() -> Upstream::ClusterManager& {\n      return *main_config.clusterManager();\n    }));\n    ON_CALL(server_, listenerManager()).WillByDefault(ReturnRef(listener_manager_));\n    ON_CALL(component_factory_, createNetworkFilterFactoryList(_, _))\n        .WillByDefault(Invoke(\n            [&](const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n                Server::Configuration::FilterChainFactoryContext& context)\n                -> std::vector<Network::FilterFactoryCb> {\n              return Server::ProdListenerComponentFactory::createNetworkFilterFactoryList_(filters,\n                                                                                           context);\n            }));\n    ON_CALL(component_factory_, createListenerFilterFactoryList(_, _))\n        .WillByDefault(Invoke(\n            [&](const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&\n                    filters,\n                Server::Configuration::ListenerFactoryContext& context)\n                -> std::vector<Network::ListenerFilterFactoryCb> {\n              return Server::ProdListenerComponentFactory::createListenerFilterFactoryList_(\n                  filters, context);\n            }));\n    ON_CALL(component_factory_, createUdpListenerFilterFactoryList(_, _))\n        .WillByDefault(Invoke(\n            [&](const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&\n                    filters,\n                Server::Configuration::ListenerFactoryContext& context)\n                -> std::vector<Network::UdpListenerFilterFactoryCb> {\n              return Server::ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(\n                  filters, context);\n            }));\n    ON_CALL(server_, serverFactoryContext()).WillByDefault(ReturnRef(server_factory_context_));\n\n    try {\n      main_config.initialize(bootstrap, server_, *cluster_manager_factory_);\n    } catch (const EnvoyException& ex) {\n      ADD_FAILURE() << fmt::format(\"'{}' config failed. Error: {}\", options_.configPath(),\n                                   ex.what());\n    }\n\n    server_.thread_local_.shutdownThread();\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  NiceMock<Server::MockInstance> server_;\n  Server::ServerFactoryContextImpl server_factory_context_{server_};\n  NiceMock<Ssl::MockContextManager> ssl_context_manager_;\n  OptionsImpl options_;\n  std::unique_ptr<Upstream::ProdClusterManagerFactory> cluster_manager_factory_;\n  NiceMock<Server::MockListenerComponentFactory> component_factory_;\n  NiceMock<Server::MockWorkerFactory> worker_factory_;\n  Server::ListenerManagerImpl listener_manager_{server_, component_factory_, worker_factory_,\n                                                false};\n  Random::RandomGeneratorImpl random_;\n  Runtime::SnapshotConstSharedPtr snapshot_{std::make_shared<NiceMock<Runtime::MockSnapshot>>()};\n  NiceMock<Api::MockOsSysCalls> os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls{&os_sys_calls_};\n  NiceMock<Filesystem::MockInstance> file_system_;\n};\n\nvoid testMerge() {\n  Api::ApiPtr api = Api::createApiForTest();\n\n  const std::string overlay = \"static_resources: { clusters: [{name: 'foo'}]}\";\n  OptionsImpl options(Server::createTestOptionsImpl(\"google_com_proxy.v2.yaml\", overlay,\n                                                    Network::Address::IpVersion::v6));\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  Server::InstanceUtil::loadBootstrapConfig(bootstrap, options,\n                                            ProtobufMessage::getStrictValidationVisitor(), *api);\n  EXPECT_EQ(2, bootstrap.static_resources().clusters_size());\n}\n\nuint32_t run(const std::string& directory) {\n  uint32_t num_tested = 0;\n  Api::ApiPtr api = Api::createApiForTest();\n  for (const std::string& filename : TestUtility::listFiles(directory, false)) {\n    ENVOY_LOG_MISC(info, \"testing {}.\\n\", filename);\n    OptionsImpl options(\n        Envoy::Server::createTestOptionsImpl(filename, \"\", Network::Address::IpVersion::v6));\n    ConfigTest test1(options);\n    envoy::config::bootstrap::v3::Bootstrap bootstrap;\n    Server::InstanceUtil::loadBootstrapConfig(bootstrap, options,\n                                              ProtobufMessage::getStrictValidationVisitor(), *api);\n    ENVOY_LOG_MISC(info, \"testing {} as yaml.\", filename);\n    ConfigTest test2(asConfigYaml(options, *api));\n    num_tested++;\n  }\n  return num_tested;\n}\n\nvoid loadVersionedBootstrapFile(const std::string& filename,\n                                envoy::config::bootstrap::v3::Bootstrap& bootstrap_message,\n                                absl::optional<uint32_t> bootstrap_version) {\n  Api::ApiPtr api = Api::createApiForTest();\n  OptionsImpl options(\n      Envoy::Server::createTestOptionsImpl(filename, \"\", Network::Address::IpVersion::v6));\n  // Avoid contention issues with other tests over the hot restart domain socket.\n  options.setHotRestartDisabled(true);\n  if (bootstrap_version.has_value()) {\n    options.setBootstrapVersion(*bootstrap_version);\n  }\n  Server::InstanceUtil::loadBootstrapConfig(bootstrap_message, options,\n                                            ProtobufMessage::getStrictValidationVisitor(), *api);\n}\n\nvoid loadBootstrapConfigProto(const envoy::config::bootstrap::v3::Bootstrap& in_proto,\n                              envoy::config::bootstrap::v3::Bootstrap& bootstrap_message) {\n  Api::ApiPtr api = Api::createApiForTest();\n  OptionsImpl options(\n      Envoy::Server::createTestOptionsImpl(\"\", \"\", Network::Address::IpVersion::v6));\n  options.setConfigProto(in_proto);\n  // Avoid contention issues with other tests over the hot restart domain socket.\n  options.setHotRestartDisabled(true);\n  Server::InstanceUtil::loadBootstrapConfig(bootstrap_message, options,\n                                            ProtobufMessage::getStrictValidationVisitor(), *api);\n}\n\n} // namespace ConfigTest\n} // namespace Envoy\n"
  },
  {
    "path": "test/config_test/config_test.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace ConfigTest {\n\n/**\n * Load all configurations from a tar file and make sure they are valid.\n * @param path supplies the path to recurse through looking for config files.\n * @return uint32_t the number of configs tested.\n */\nuint32_t run(const std::string& path);\n\n/**\n * Test --config-yaml overlay merge.\n */\nvoid testMerge();\n\n/**\n * Loads the given bootstrap file with an optional bootstrap_version into the\n * given bootstrap protobuf message using the server's loadBootstrapConfig.\n */\nvoid loadVersionedBootstrapFile(const std::string& filename,\n                                envoy::config::bootstrap::v3::Bootstrap& bootstrap_message,\n                                absl::optional<uint32_t> bootstrap_version = absl::nullopt);\n\n/**\n * Loads the given bootstrap proto into the given bootstrap protobuf message\n * using the server's loadBootstrapConfig.\n */\nvoid loadBootstrapConfigProto(const envoy::config::bootstrap::v3::Bootstrap& in_proto,\n                              envoy::config::bootstrap::v3::Bootstrap& bootstrap_message);\n\n} // namespace ConfigTest\n} // namespace Envoy\n"
  },
  {
    "path": "test/config_test/deprecated_configs_test.cc",
    "content": "#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/config/api_version.h\"\n\n#include \"test/config_test/config_test.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::HasSubstr;\nusing testing::StartsWith;\n\nnamespace Envoy {\n\n// A deprecated field can be used in previous version text proto and upgraded.\nTEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapTextProtoDeprecatedField)) {\n  API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap)\n  bootstrap = TestUtility::parseYaml<envoy::config::bootstrap::v2::Bootstrap>(R\"EOF(\n    node:\n      build_version: foo\n    )EOF\");\n\n  std::string bootstrap_text;\n  ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text));\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb_text\", bootstrap_text);\n\n  // Loading as previous version should work (after upgrade)\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file;\n  EXPECT_LOG_CONTAINS(\"warning\", \"Using deprecated option 'envoy.api.v2.core.Node.build_version'\",\n                      ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2));\n  EXPECT_EQ(\"foo\", proto_v2_from_file.node().hidden_envoy_deprecated_build_version());\n\n  // Loading as current version should fail\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file;\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException,\n      AllOf(StartsWith(\"Unable to parse file\"),\n            HasSubstr(\"as a text protobuf (type envoy.config.bootstrap.v3.Bootstrap)\")));\n\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap)\n  bootstrap_v3 = TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(R\"EOF(\n    node:\n      hidden_envoy_deprecated_build_version: foo\n    )EOF\");\n\n  std::string bootstrap_text_v3;\n  ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap_v3, &bootstrap_text_v3));\n  const std::string filename_v3 =\n      TestEnvironment::writeStringToFileForTest(\"proto_v3.pb_text\", bootstrap_text_v3);\n\n  // Loading v3 with hidden-deprecated field as current version should fail\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException,\n      HasSubstr(\"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                \"'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'\"));\n\n  // Loading v3 with hidden-deprecated field with boosting should fail as it\n  // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException,\n      HasSubstr(\"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                \"'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'\"));\n}\n\n// A deprecated field can be used in previous version binary proto and upgraded.\nTEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapBinaryProtoDeprecatedField)) {\n  API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap)\n  bootstrap = TestUtility::parseYaml<envoy::config::bootstrap::v2::Bootstrap>(R\"EOF(\n    node:\n      build_version: foo\n    )EOF\");\n\n  std::string bootstrap_binary_str;\n  bootstrap_binary_str.reserve(bootstrap.ByteSizeLong());\n  bootstrap.SerializeToString(&bootstrap_binary_str);\n  const std::string filename =\n      TestEnvironment::writeStringToFileForTest(\"proto.pb\", bootstrap_binary_str);\n\n  // Loading as previous version should work (after upgrade)\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file;\n  EXPECT_LOG_CONTAINS(\"warning\", \"Using deprecated option 'envoy.api.v2.core.Node.build_version'\",\n                      ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2));\n  EXPECT_EQ(\"foo\", proto_v2_from_file.node().hidden_envoy_deprecated_build_version());\n\n  // Loading as current version should fail\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file;\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException,\n      HasSubstr(\"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                \"'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'\"));\n\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap)\n  bootstrap_v3 = TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(R\"EOF(\n    node:\n      hidden_envoy_deprecated_build_version: foo\n    )EOF\");\n\n  std::string bootstrap_binary_str_v3;\n  bootstrap_binary_str_v3.reserve(bootstrap.ByteSizeLong());\n  bootstrap.SerializeToString(&bootstrap_binary_str_v3);\n  const std::string filename_v3 =\n      TestEnvironment::writeStringToFileForTest(\"proto_v3.pb\", bootstrap_binary_str_v3);\n\n  // Loading v3 with hidden-deprecated field as current version should fail\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException,\n      HasSubstr(\"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                \"'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'\"));\n\n  // Loading binary proto v3 with hidden-deprecated field with boosting will\n  // succeed as it cannot differentiate between v2 with the deprecated field and\n  // v3 with hidden_envoy_deprecated field\n  ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file);\n  EXPECT_EQ(\"foo\", proto_v3_from_file.node().hidden_envoy_deprecated_build_version());\n}\n\n// A deprecated field can be used in previous version yaml and upgraded.\nTEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapYamlDeprecatedField)) {\n  API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap)\n  bootstrap = TestUtility::parseYaml<envoy::config::bootstrap::v2::Bootstrap>(R\"EOF(\n    node:\n      build_version: foo\n    )EOF\");\n\n  EXPECT_EQ(\"node:\\n  build_version: foo\",\n            MessageUtil::getYamlStringFromMessage(bootstrap, true, false));\n  const std::string filename = TestEnvironment::writeStringToFileForTest(\n      \"proto.yaml\", MessageUtil::getYamlStringFromMessage(bootstrap, false, false));\n\n  // Loading as previous version should work (after upgrade)\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file;\n  EXPECT_LOG_CONTAINS(\"warning\", \"Using deprecated option 'envoy.api.v2.core.Node.build_version'\",\n                      ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2));\n  EXPECT_EQ(\"foo\", proto_v2_from_file.node().hidden_envoy_deprecated_build_version());\n\n  // Loading as current version should fail\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file;\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException,\n      AllOf(HasSubstr(\"type envoy.config.bootstrap.v3.Bootstrap\"),\n            HasSubstr(\"build_version: Cannot find field\")));\n\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap)\n  bootstrap_v3 = TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(R\"EOF(\n    node:\n      hidden_envoy_deprecated_build_version: foo\n    )EOF\");\n\n  EXPECT_EQ(\"node:\\n  hidden_envoy_deprecated_build_version: foo\",\n            MessageUtil::getYamlStringFromMessage(bootstrap_v3, true, false));\n  const std::string filename_v3 = TestEnvironment::writeStringToFileForTest(\n      \"proto_v3.yaml\", MessageUtil::getYamlStringFromMessage(bootstrap_v3, false, false));\n\n  // Loading v3 with hidden-deprecated field as current version should fail\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException,\n      HasSubstr(\"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                \"'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'\"));\n\n  // Loading v3 with hidden-deprecated field with boosting should fail as the name\n  // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException,\n      HasSubstr(\"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                \"'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'\"));\n}\n\n// A deprecated field can be used in previous version json and upgraded.\nTEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapJsonDeprecatedField)) {\n  API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap)\n  bootstrap = TestUtility::parseYaml<envoy::config::bootstrap::v2::Bootstrap>(R\"EOF(\n    node:\n      build_version: foo\n    )EOF\");\n\n  EXPECT_EQ(\"{\\\"node\\\":{\\\"build_version\\\":\\\"foo\\\"}}\",\n            MessageUtil::getJsonStringFromMessage(bootstrap, false, false));\n  const std::string filename = TestEnvironment::writeStringToFileForTest(\n      \"proto.json\", MessageUtil::getJsonStringFromMessage(bootstrap, false, false));\n\n  // Loading as previous version should work (after upgrade)\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file;\n  EXPECT_LOG_CONTAINS(\"warning\", \"Using deprecated option 'envoy.api.v2.core.Node.build_version'\",\n                      ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2));\n  EXPECT_EQ(\"foo\", proto_v2_from_file.node().hidden_envoy_deprecated_build_version());\n\n  // Loading as current version should fail\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file;\n  EXPECT_THROW_WITH_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException,\n      \"Protobuf message (type envoy.config.bootstrap.v3.Bootstrap reason INVALID_ARGUMENT:(node) \"\n      \"build_version: Cannot find field.) has unknown fields\");\n\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap)\n  bootstrap_v3 = TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(R\"EOF(\n    node:\n      hidden_envoy_deprecated_build_version: foo\n    )EOF\");\n\n  EXPECT_EQ(\"{\\\"node\\\":{\\\"hidden_envoy_deprecated_build_version\\\":\\\"foo\\\"}}\",\n            MessageUtil::getJsonStringFromMessage(bootstrap_v3, false, false));\n  const std::string filename_v3 = TestEnvironment::writeStringToFileForTest(\n      \"proto_v3.json\", MessageUtil::getYamlStringFromMessage(bootstrap_v3, false, false));\n\n  // Loading v3 with hidden-deprecated field as current version should fail\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException,\n      AllOf(StartsWith(\"Unable to parse JSON as proto\"),\n            HasSubstr(\"hidden_envoy_deprecated_build_version: foo\")));\n\n  // Loading v3 with hidden-deprecated field with boosting should fail as the name\n  // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException,\n      AllOf(StartsWith(\"Unable to parse JSON as proto\"),\n            HasSubstr(\"hidden_envoy_deprecated_build_version: foo\")));\n}\n\n// Test the config_proto option when loading from bootstrap\nTEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapConfigProtoDeprecatedField)) {\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap)\n  in_bootstrap_v3 = TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(R\"EOF(\n    node:\n      hidden_envoy_deprecated_build_version: foo\n    )EOF\");\n\n  // Loading v3 with hidden-deprecated field as current version should fail\n  API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file;\n  EXPECT_THAT_THROWS_MESSAGE(\n      ConfigTest::loadBootstrapConfigProto(in_bootstrap_v3, proto_v3_from_file), EnvoyException,\n      HasSubstr(\"Illegal use of hidden_envoy_deprecated_ V2 field \"\n                \"'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'\"));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/config_test/example_configs_test.cc",
    "content": "#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"test/config_test/config_test.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(ExampleConfigsTest, All) {\n  TestEnvironment::exec(\n      {TestEnvironment::runfilesPath(\"test/config_test/example_configs_test_setup.sh\")});\n\n#ifdef WIN32\n  Filesystem::InstanceImplWin32 file_system;\n#else\n  Filesystem::InstanceImplPosix file_system;\n#endif\n\n  const auto config_file_count = std::stoi(\n      file_system.fileReadToEnd(TestEnvironment::temporaryDirectory() + \"/config-file-count.txt\"));\n\n  // Change working directory, otherwise we won't be able to read files using relative paths.\n#ifdef PATH_MAX\n  char cwd[PATH_MAX];\n#else\n  char cwd[1024];\n#endif\n  const std::string& directory = TestEnvironment::temporaryDirectory() + \"/test/config_test\";\n  RELEASE_ASSERT(::getcwd(cwd, sizeof(cwd)) != nullptr, \"\");\n  RELEASE_ASSERT(::chdir(directory.c_str()) == 0, \"\");\n\n  EXPECT_EQ(config_file_count, ConfigTest::run(directory));\n\n  ConfigTest::testMerge();\n\n  // Return to the original working directory, otherwise \"bazel.coverage\" breaks (...but why?).\n  RELEASE_ASSERT(::chdir(cwd) == 0, \"\");\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/config_test/example_configs_test_setup.sh",
    "content": "#!/bin/bash\n\nset -e\n\nDIR=\"$TEST_TMPDIR\"/test/config_test\nmkdir -p \"$DIR\"\ntar -xvf \"$TEST_SRCDIR\"/envoy/configs/example_configs.tar -C \"$DIR\"\n\n# find uses full path to prevent using Windows find on Windows.\n/usr/bin/find \"$DIR\" -type f | grep -c .yaml > \"$TEST_TMPDIR\"/config-file-count.txt\n"
  },
  {
    "path": "test/dependencies/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"curl_test\",\n    srcs = [\"curl_test.cc\"],\n    external_deps = [\n        \"curl\",\n    ],\n)\n"
  },
  {
    "path": "test/dependencies/curl_test.cc",
    "content": "#include \"curl/curl.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Dependencies {\n\nTEST(CurlTest, BuiltWithExpectedFeatures) {\n  // Ensure built with the expected features, flags from\n  // https://curl.haxx.se/libcurl/c/curl_version_info.html.\n  curl_version_info_data* info = curl_version_info(CURLVERSION_NOW);\n\n  // In sequence as declared in curl.h. Overlook any toggle of the\n  // developer or os elections for DEBUG, CURL DEBUG and LARGE FILE\n  EXPECT_NE(0, info->features & CURL_VERSION_IPV6);\n  EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS4);\n  EXPECT_EQ(0, info->features & CURL_VERSION_SSL);\n  EXPECT_NE(0, info->features & CURL_VERSION_LIBZ);\n  EXPECT_EQ(0, info->features & CURL_VERSION_NTLM);\n  EXPECT_EQ(0, info->features & CURL_VERSION_GSSNEGOTIATE);\n  EXPECT_NE(0, info->features & CURL_VERSION_ASYNCHDNS);\n  EXPECT_EQ(0, info->features & CURL_VERSION_SPNEGO);\n  EXPECT_EQ(0, info->features & CURL_VERSION_IDN);\n  EXPECT_EQ(0, info->features & CURL_VERSION_SSPI);\n  EXPECT_EQ(0, info->features & CURL_VERSION_CONV);\n  EXPECT_EQ(0, info->features & CURL_VERSION_TLSAUTH_SRP);\n  EXPECT_EQ(0, info->features & CURL_VERSION_NTLM_WB);\n  EXPECT_NE(0, info->features & CURL_VERSION_HTTP2);\n  EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI);\n  EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5);\n#ifndef WIN32\n  EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS);\n#else\n  // TODO(wrowe): correct to expected, when curl 7.72 and later is patched\n  // or fixed upstream to include `afunix.h` in place of `sys/un.h` on recent\n  // Windows SDKs (it may be necessary to be more specific because older\n  // SDKs did not provide `afunix.h`)\n  EXPECT_EQ(0, info->features & CURL_VERSION_UNIX_SOCKETS);\n#endif\n  EXPECT_EQ(0, info->features & CURL_VERSION_PSL);\n  EXPECT_EQ(0, info->features & CURL_VERSION_HTTPS_PROXY);\n  EXPECT_EQ(0, info->features & CURL_VERSION_MULTI_SSL);\n  EXPECT_EQ(0, info->features & CURL_VERSION_BROTLI);\n  EXPECT_EQ(0, info->features & CURL_VERSION_ALTSVC);\n  EXPECT_EQ(0, info->features & CURL_VERSION_HTTP3);\n  EXPECT_NE(0, info->ares_num);\n}\n\n} // namespace Dependencies\n} // namespace Envoy\n"
  },
  {
    "path": "test/dummy_main.cc",
    "content": "// Dummy main implementation for noop-ing tests.\n// TODO(htuch): remove when we have a solution for\n// https://github.com/bazelbuild/bazel/issues/3510\n\n// NOLINT(namespace-envoy)\nint main(int /*argc*/, char** /*argv*/) { return 0; }\n"
  },
  {
    "path": "test/exe/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n    \"envoy_sh_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_sh_test(\n    name = \"build_id_test\",\n    srcs = [\"build_id_test.sh\"],\n    coverage = False,\n    data = [\n        \"//bazel:raw_build_id.ldscript\",\n        \"//source/exe:envoy-static\",\n    ],\n    # The Windows equivalent of a binaries' \"link stamp\" is a resource file descriptor of the\n    # executable. Our build revision API and output of --version flags are sufficient for now.\n    tags = [\"skip_on_windows\"],\n)\n\nenvoy_sh_test(\n    name = \"envoy_static_test\",\n    srcs = [\"envoy_static_test.sh\"],\n    coverage = False,\n    data = [\"//source/exe:envoy-static\"],\n    # TODO(Windows): expect to test to leverage dumpbin.exe to confirm we avoid msvcrt, see\n    #   https://github.com/envoyproxy/envoy/pull/8280#pullrequestreview-290187328\n    # Sanitizers doesn't like statically linked lib(std)c++ and libgcc, skip this test in that context.\n    tags = [\n        \"no_san\",\n        \"skip_on_windows\",\n    ],\n)\n\nenvoy_sh_test(\n    name = \"pie_test\",\n    srcs = [\"pie_test.sh\"],\n    coverage = False,\n    data = [\"//source/exe:envoy-static\"],\n    # Since VS2015 or even earlier, link.exe defaults to PIE generation\n    tags = [\n        \"nofips\",\n        \"skip_on_windows\",\n    ],\n)\n\nenvoy_sh_test(\n    name = \"version_out_test\",\n    srcs = [\"version_out_test.sh\"],\n    coverage = False,\n    data = [\n        \"//:VERSION\",\n        \"//bazel:raw_build_id.ldscript\",\n        \"//source/exe:envoy-static\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"main_common_test\",\n    srcs = [\"main_common_test.cc\"],\n    data = [\"//test/config/integration:google_com_proxy_port_0\"],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/exe:main_common_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:contention_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"terminate_handler_test\",\n    srcs = [\"terminate_handler_test.cc\"],\n    tags = [\"backtrace\"],\n    deps = [\n        \"//source/exe:terminate_handler_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/exe/build_id_test.sh",
    "content": "#!/bin/bash\n\nset -e -o pipefail\n\nENVOY_BIN=${TEST_SRCDIR}/envoy/source/exe/envoy-static\n\nif [[ $(uname) == \"Darwin\" ]]; then\n  BUILDID=$(otool -X -s __TEXT __build_id \"${ENVOY_BIN}\" | grep -v section | cut -f2 | xxd -r -p)\nelse\n  BUILDID=$(file -L \"${ENVOY_BIN}\" | sed -n -E 's/.*BuildID\\[sha1\\]=([0-9a-f]{40}).*/\\1/p')\nfi\n\nEXPECTED=$(cat \"${TEST_SRCDIR}/envoy/bazel/raw_build_id.ldscript\")\n\nif [[ \"${BUILDID}\" != \"${EXPECTED}\" ]]; then\n  echo \"Build ID mismatch, got: ${BUILDID}, expected: ${EXPECTED}\".\n  exit 1\nfi\n"
  },
  {
    "path": "test/exe/envoy_static_test.sh",
    "content": "#!/bin/bash\n\nENVOY_BIN=${TEST_SRCDIR}/envoy/source/exe/envoy-static\n\nif [[ $(uname) == \"Darwin\" ]]; then\n  echo \"macOS doesn't support statically linked binaries, skipping.\"\n  exit 0\nfi\n\n# We can't rely on the exit code alone, since ldd fails for statically linked binaries.\nDYNLIBS=$(ldd \"${ENVOY_BIN}\" 2>&1) || {\n    if [[ ! \"${DYNLIBS}\" =~ 'not a dynamic executable' ]]; then\n\techo \"${DYNLIBS}\"\n\texit 1\n    fi\n}\n\nif [[ \"${DYNLIBS}\" =~ libc\\+\\+ ]]; then\n  echo \"libc++ is dynamically linked:\"\n  echo \"${DYNLIBS}\"\n  exit 1\nelif [[ \"${DYNLIBS}\" =~ libstdc\\+\\+ || \"${DYNLIBS}\" =~ libgcc ]]; then\n  echo \"libstdc++ and/or libgcc are dynamically linked:\"\n  echo \"${DYNLIBS}\"\n  exit 1\nfi\n"
  },
  {
    "path": "test/exe/main_common_test.cc",
    "content": "#include \"envoy/common/platform.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/mutex_tracer_impl.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/common/thread.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"exe/main_common.h\"\n\n#include \"server/options_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/test_common/contention.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\n#ifdef ENVOY_HANDLE_SIGNALS\n#include \"common/signal/signal_action.h\"\n#endif\n\n#include \"absl/synchronization/notification.h\"\n\nusing testing::HasSubstr;\nusing testing::IsEmpty;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\n\nnamespace {\n\n#if !(defined(__clang_analyzer__) ||                                                               \\\n      (defined(__has_feature) &&                                                                   \\\n       (__has_feature(thread_sanitizer) || __has_feature(address_sanitizer) ||                     \\\n        __has_feature(memory_sanitizer))))\nconst std::string& outOfMemoryPattern() {\n#if defined(TCMALLOC)\n  CONSTRUCT_ON_FIRST_USE(std::string, \".*Unable to allocate.*\");\n#else\n  CONSTRUCT_ON_FIRST_USE(std::string, \".*panic: out of memory.*\");\n#endif\n}\n#endif\n\n} // namespace\n\n/**\n * Captures common functions needed for invoking MainCommon.Maintains\n * an argv array that is terminated with nullptr. Identifies the config\n * file relative to runfiles directory.\n */\nclass MainCommonTest : public testing::TestWithParam<Network::Address::IpVersion> {\nprotected:\n  MainCommonTest()\n      : config_file_(TestEnvironment::temporaryFileSubstitute(\n            \"test/config/integration/google_com_proxy_port_0.v2.yaml\", TestEnvironment::ParamMap(),\n            TestEnvironment::PortMap(), GetParam())),\n        argv_({\"envoy-static\", \"--use-dynamic-base-id\", \"-c\", config_file_.c_str(), nullptr}) {}\n\n  const char* const* argv() { return &argv_[0]; }\n  int argc() { return argv_.size() - 1; }\n\n  // Adds an argument, assuring that argv remains null-terminated.\n  void addArg(const char* arg) {\n    ASSERT(!argv_.empty());\n    const size_t last = argv_.size() - 1;\n    ASSERT(argv_[last] == nullptr); // invariant established in ctor, maintained below.\n    argv_[last] = arg;              // guaranteed non-empty\n    argv_.push_back(nullptr);\n  }\n\n  // Adds options to make Envoy exit immediately after initialization.\n  void initOnly() {\n    addArg(\"--mode\");\n    addArg(\"init_only\");\n  }\n\n  std::string config_file_;\n  std::vector<const char*> argv_;\n};\nINSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Exercise the codepath to instantiate MainCommon and destruct it, with hot restart.\nTEST_P(MainCommonTest, ConstructDestructHotRestartEnabled) {\n  VERBOSE_EXPECT_NO_THROW(MainCommon main_common(argc(), argv()));\n}\n\n// Exercise the codepath to instantiate MainCommon and destruct it, without hot restart.\nTEST_P(MainCommonTest, ConstructDestructHotRestartDisabled) {\n  addArg(\"--disable-hot-restart\");\n  VERBOSE_EXPECT_NO_THROW(MainCommon main_common(argc(), argv()));\n}\n\n// Exercise init_only explicitly.\nTEST_P(MainCommonTest, ConstructDestructHotRestartDisabledNoInit) {\n  addArg(\"--disable-hot-restart\");\n  initOnly();\n  MainCommon main_common(argc(), argv());\n  EXPECT_TRUE(main_common.run());\n}\n\n// Exercise base-id-path option.\nTEST_P(MainCommonTest, ConstructWritesBasePathId) {\n#ifdef ENVOY_HOT_RESTART\n  const std::string base_id_path = TestEnvironment::temporaryPath(\"base-id-file\");\n  addArg(\"--base-id-path\");\n  addArg(base_id_path.c_str());\n  VERBOSE_EXPECT_NO_THROW(MainCommon main_common(argc(), argv()));\n\n  EXPECT_NE(\"\", TestEnvironment::readFileToStringForTest(base_id_path));\n#endif\n}\n\n// Test that an in-use base id triggers a retry and that we eventually give up.\nTEST_P(MainCommonTest, RetryDynamicBaseIdFails) {\n#ifdef ENVOY_HOT_RESTART\n  PlatformImpl platform;\n  Event::TestRealTimeSystem real_time_system;\n  DefaultListenerHooks default_listener_hooks;\n  ProdComponentFactory prod_component_factory;\n\n  const std::string base_id_path = TestEnvironment::temporaryPath(\"base-id-file\");\n\n  const auto first_args = std::vector<std::string>({\"envoy-static\", \"--use-dynamic-base-id\", \"-c\",\n                                                    config_file_, \"--base-id-path\", base_id_path});\n  OptionsImpl first_options(first_args, &MainCommon::hotRestartVersion, spdlog::level::info);\n  MainCommonBase first(first_options, real_time_system, default_listener_hooks,\n                       prod_component_factory, std::make_unique<Random::RandomGeneratorImpl>(),\n                       platform.threadFactory(), platform.fileSystem(), nullptr);\n\n  const std::string base_id_str = TestEnvironment::readFileToStringForTest(base_id_path);\n  uint32_t base_id;\n  ASSERT_TRUE(absl::SimpleAtoi(base_id_str, &base_id));\n\n  auto* mock_rng = new NiceMock<Random::MockRandomGenerator>();\n  EXPECT_CALL(*mock_rng, random()).WillRepeatedly(Return(base_id));\n\n  const auto second_args =\n      std::vector<std::string>({\"envoy-static\", \"--use-dynamic-base-id\", \"-c\", config_file_});\n  OptionsImpl second_options(second_args, &MainCommon::hotRestartVersion, spdlog::level::info);\n\n  EXPECT_THROW_WITH_MESSAGE(\n      MainCommonBase(second_options, real_time_system, default_listener_hooks,\n                     prod_component_factory, std::unique_ptr<Random::RandomGenerator>{mock_rng},\n                     platform.threadFactory(), platform.fileSystem(), nullptr),\n      EnvoyException, \"unable to select a dynamic base id\");\n#endif\n}\n\n// Test that std::set_new_handler() was called and the callback functions as expected.\n// This test fails under TSAN and ASAN, so don't run it in that build:\n//   [  DEATH   ] ==845==ERROR: ThreadSanitizer: requested allocation size 0x3e800000000\n//   exceeds maximum supported size of 0x10000000000\n//\n//   [  DEATH   ] ==33378==ERROR: AddressSanitizer: requested allocation size 0x3e800000000\n//   (0x3e800001000 after adjustments for alignment, red zones etc.) exceeds maximum supported size\n//   of 0x10000000000 (thread T0)\n\nclass MainCommonDeathTest : public MainCommonTest {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonDeathTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(MainCommonDeathTest, OutOfMemoryHandler) {\n#if defined(__clang_analyzer__) || (defined(__has_feature) && (__has_feature(thread_sanitizer) ||  \\\n                                                               __has_feature(address_sanitizer) || \\\n                                                               __has_feature(memory_sanitizer)))\n  ENVOY_LOG_MISC(critical,\n                 \"MainCommonTest::OutOfMemoryHandler not supported by this compiler configuration\");\n#else\n  MainCommon main_common(argc(), argv());\n#if !defined(WIN32)\n  // Resolving symbols for a backtrace takes longer than the timeout in coverage builds,\n  // so disable handling that signal.\n  signal(SIGABRT, SIG_DFL);\n#endif\n  EXPECT_DEATH(\n      []() {\n        // Allocating a fixed-size large array that results in OOM on gcc\n        // results in a compile-time error on clang of \"array size too big\",\n        // so dynamically find a size that is too large.\n        const uint64_t initial = 1 << 30;\n        for (uint64_t size = initial;\n             size >= initial; // Disallow wraparound to avoid infinite loops on failure.\n             size *= 1000) {\n          int* p = new int[size];\n          // Use the pointer to prevent clang from optimizing the allocation away in opt mode.\n          ENVOY_LOG_MISC(debug, \"p={}\", reinterpret_cast<intptr_t>(p));\n        }\n      }(),\n      outOfMemoryPattern());\n#endif\n}\n\nclass AdminRequestTest : public MainCommonTest {\nprotected:\n  AdminRequestTest() { addArg(\"--disable-hot-restart\"); }\n\n  // Runs an admin request specified in path, blocking until completion, and\n  // returning the response body.\n  std::string adminRequest(absl::string_view path, absl::string_view method) {\n    absl::Notification done;\n    std::string out;\n    main_common_->adminRequest(\n        path, method,\n        [&done, &out](const Http::HeaderMap& /*response_headers*/, absl::string_view body) {\n          out = std::string(body);\n          done.Notify();\n        });\n    done.WaitForNotification();\n    return out;\n  }\n\n  // Initiates Envoy running in its own thread.\n  void startEnvoy() {\n    envoy_thread_ = Thread::threadFactoryForTest().createThread([this]() {\n      // Note: main_common_ is accessed in the testing thread, but\n      // is race-free, as MainCommon::run() does not return until\n      // triggered with an adminRequest POST to /quitquitquit, which\n      // is done in the testing thread.\n      main_common_ = std::make_unique<MainCommon>(argc(), argv());\n      envoy_started_ = true;\n      started_.Notify();\n      pauseResumeInterlock(pause_before_run_);\n      bool status = main_common_->run();\n      pauseResumeInterlock(pause_after_run_);\n      main_common_.reset();\n      envoy_finished_ = true;\n      envoy_return_ = status;\n      finished_.Notify();\n    });\n  }\n\n  // Conditionally pauses at a critical point in the Envoy thread, waiting for\n  // the test thread to trigger something at that exact line. The test thread\n  // can then call resume_.Notify() to allow the Envoy thread to resume.\n  void pauseResumeInterlock(bool enable) {\n    if (enable) {\n      pause_point_.Notify();\n      resume_.WaitForNotification();\n    }\n  }\n\n  // Wait until Envoy is inside the main server run loop proper. Before entering, Envoy runs any\n  // pending post callbacks, so it's not reliable to use adminRequest() or post() to do this.\n  // Generally, tests should not depend on this for correctness, but as a result of\n  // https://github.com/libevent/libevent/issues/779 we need to for TSAN. This is because the entry\n  // to event_base_loop() is where the signal base race occurs, but once we're in that loop in\n  // blocking mode, we're safe to take signals.\n  // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is fixed.\n  void waitForEnvoyRun() {\n    absl::Notification done;\n    main_common_->dispatcherForTest().post([this, &done] {\n      struct Sacrifice : Event::DeferredDeletable {\n        Sacrifice(absl::Notification& notify) : notify_(notify) {}\n        ~Sacrifice() override { notify_.Notify(); }\n        absl::Notification& notify_;\n      };\n      auto sacrifice = std::make_unique<Sacrifice>(done);\n      // Wait for a deferred delete cleanup, this only happens in the main server run loop.\n      main_common_->dispatcherForTest().deferredDelete(std::move(sacrifice));\n    });\n    done.WaitForNotification();\n  }\n\n  // Having triggered Envoy to quit (via signal or /quitquitquit), this blocks until Envoy exits.\n  bool waitForEnvoyToExit() {\n    finished_.WaitForNotification();\n    envoy_thread_->join();\n    return envoy_return_;\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  std::unique_ptr<Thread::Thread> envoy_thread_;\n  std::unique_ptr<MainCommon> main_common_;\n  absl::Notification started_;\n  absl::Notification finished_;\n  absl::Notification resume_;\n  absl::Notification pause_point_;\n  bool envoy_return_{false};\n  bool envoy_started_{false};\n  bool envoy_finished_{false};\n  bool pause_before_run_{false};\n  bool pause_after_run_{false};\n};\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminRequestTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminRequestTest, AdminRequestGetStatsAndQuit) {\n  startEnvoy();\n  started_.WaitForNotification();\n  EXPECT_THAT(adminRequest(\"/stats\", \"GET\"), HasSubstr(\"filesystem.reopen_failed\"));\n  adminRequest(\"/quitquitquit\", \"POST\");\n  EXPECT_TRUE(waitForEnvoyToExit());\n}\n\n// no signals on Windows -- could probably make this work with GenerateConsoleCtrlEvent\n#ifndef WIN32\n// This test is identical to the above one, except that instead of using an admin /quitquitquit,\n// we send ourselves a SIGTERM, which should have the same effect.\nTEST_P(AdminRequestTest, AdminRequestGetStatsAndKill) {\n  startEnvoy();\n  started_.WaitForNotification();\n  // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is\n  // fixed, started_ will then become our real synchronization point.\n  waitForEnvoyRun();\n  EXPECT_THAT(adminRequest(\"/stats\", \"GET\"), HasSubstr(\"filesystem.reopen_failed\"));\n  kill(getpid(), SIGTERM);\n  EXPECT_TRUE(waitForEnvoyToExit());\n}\n\n// This test is the same as AdminRequestGetStatsAndQuit, except we send ourselves a SIGINT,\n// equivalent to receiving a Ctrl-C from the user.\nTEST_P(AdminRequestTest, AdminRequestGetStatsAndCtrlC) {\n  startEnvoy();\n  started_.WaitForNotification();\n  // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is\n  // fixed, started_ will then become our real synchronization point.\n  waitForEnvoyRun();\n  EXPECT_THAT(adminRequest(\"/stats\", \"GET\"), HasSubstr(\"filesystem.reopen_failed\"));\n  kill(getpid(), SIGINT);\n  EXPECT_TRUE(waitForEnvoyToExit());\n}\n\nTEST_P(AdminRequestTest, AdminRequestContentionDisabled) {\n  startEnvoy();\n  started_.WaitForNotification();\n  // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is\n  // fixed, started_ will then become our real synchronization point.\n  waitForEnvoyRun();\n  EXPECT_THAT(adminRequest(\"/contention\", \"GET\"), HasSubstr(\"not enabled\"));\n  kill(getpid(), SIGTERM);\n  EXPECT_TRUE(waitForEnvoyToExit());\n}\n\nTEST_P(AdminRequestTest, AdminRequestContentionEnabled) {\n  addArg(\"--enable-mutex-tracing\");\n  startEnvoy();\n  started_.WaitForNotification();\n  // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is\n  // fixed, started_ will then become our real synchronization point.\n  waitForEnvoyRun();\n\n  // Induce contention to guarantee a non-zero num_contentions count.\n  Thread::TestUtil::ContentionGenerator contention_generator(main_common_->server()->api());\n  contention_generator.generateContention(MutexTracerImpl::getOrCreateTracer());\n\n  std::string response = adminRequest(\"/contention\", \"GET\");\n  EXPECT_THAT(response, Not(HasSubstr(\"not enabled\")));\n  EXPECT_THAT(response, HasSubstr(\"\\\"num_contentions\\\":\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"\\\"num_contentions\\\": \\\"0\\\"\")));\n\n  kill(getpid(), SIGTERM);\n  EXPECT_TRUE(waitForEnvoyToExit());\n}\n#endif\n\nTEST_P(AdminRequestTest, AdminRequestBeforeRun) {\n  // Induce the situation where the Envoy thread is active, and main_common_ is constructed,\n  // but run() hasn't been issued yet. AdminRequests will not finish immediately, but will\n  // do so at some point after run() is allowed to start.\n  pause_before_run_ = true;\n  startEnvoy();\n  pause_point_.WaitForNotification();\n\n  bool admin_handler_was_called = false;\n  std::string out;\n  main_common_->adminRequest(\n      \"/stats\", \"GET\",\n      [&admin_handler_was_called, &out](const Http::HeaderMap& /*response_headers*/,\n                                        absl::string_view body) {\n        admin_handler_was_called = true;\n        out = std::string(body);\n      });\n\n  // The admin handler can't be called until after we let run() go.\n  EXPECT_FALSE(admin_handler_was_called);\n  EXPECT_THAT(out, IsEmpty());\n\n  // Now unblock the envoy thread so it can wake up and process outstanding posts.\n  resume_.Notify();\n\n  // We don't get a notification when run(), so it's not safe to check whether the\n  // admin handler is called until after we quit.\n  adminRequest(\"/quitquitquit\", \"POST\");\n  EXPECT_TRUE(waitForEnvoyToExit());\n  EXPECT_TRUE(admin_handler_was_called);\n\n  // This just checks that some stat output was reported. We could pick any stat.\n  EXPECT_THAT(out, HasSubstr(\"filesystem.reopen_failed\"));\n}\n\n// Class to track whether an object has been destroyed, which it does by bumping an atomic.\nclass DestroyCounter {\npublic:\n  // Note: destroy_count is captured by reference, so the variable must last longer than\n  // the DestroyCounter.\n  explicit DestroyCounter(std::atomic<uint64_t>& destroy_count) : destroy_count_(destroy_count) {}\n  ~DestroyCounter() { ++destroy_count_; }\n\nprivate:\n  std::atomic<uint64_t>& destroy_count_;\n};\n\nTEST_P(AdminRequestTest, AdminRequestAfterRun) {\n  startEnvoy();\n  started_.WaitForNotification();\n  // Induce the situation where Envoy is no longer in run(), but hasn't been\n  // destroyed yet. AdminRequests will never finish, but they won't crash.\n  pause_after_run_ = true;\n  adminRequest(\"/quitquitquit\", \"POST\");\n  pause_point_.WaitForNotification(); // run() finished, but main_common_ still exists.\n\n  // Admin requests will not work, but will never complete. The lambda itself will be\n  // destroyed on thread exit, which we'll track with an object that counts destructor calls.\n\n  std::atomic<uint64_t> lambda_destroy_count(0);\n  bool admin_handler_was_called = false;\n  {\n    // Ownership of the tracker will be passed to the lambda.\n    auto tracker = std::make_shared<DestroyCounter>(lambda_destroy_count);\n    main_common_->adminRequest(\n        \"/stats\", \"GET\",\n        [&admin_handler_was_called, tracker](const Http::HeaderMap& /*response_headers*/,\n                                             absl::string_view /*body*/) {\n          admin_handler_was_called = true;\n          UNREFERENCED_PARAMETER(tracker);\n        });\n  }\n  EXPECT_EQ(0, lambda_destroy_count); // The lambda won't be destroyed till envoy thread exit.\n\n  // Now unblock the envoy thread so it can destroy the object, along with our unfinished\n  // admin request.\n  resume_.Notify();\n\n  EXPECT_TRUE(waitForEnvoyToExit());\n  EXPECT_FALSE(admin_handler_was_called);\n  EXPECT_EQ(1, lambda_destroy_count);\n}\n\n// Verifies that the Logger::Registry is usable after constructing and\n// destructing MainCommon.\nTEST_P(MainCommonTest, ConstructDestructLogger) {\n  VERBOSE_EXPECT_NO_THROW(MainCommon main_common(argc(), argv()));\n\n  const std::string logger_name = \"logger\";\n  spdlog::details::log_msg log_msg(logger_name, spdlog::level::level_enum::err, \"error\");\n  Logger::Registry::getSink()->log(log_msg);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/exe/pie_test.sh",
    "content": "#!/bin/bash\n\nset -e\n\nENVOY_BIN=\"${TEST_SRCDIR}/envoy/source/exe/envoy-static\"\n\nif [[ $(uname) == \"Darwin\" ]]; then\n  echo \"Skipping on macOS.\"\n  exit 0\nfi\n\nif readelf -hW \"${ENVOY_BIN}\" | grep \"Type\" | grep -o \"DYN (Shared object file)\"; then\n  echo \"${ENVOY_BIN} is a PIE!\"\n  exit 0\nfi\n\necho \"${ENVOY_BIN} is not a PIE!\"\nexit 1\n"
  },
  {
    "path": "test/exe/terminate_handler_test.cc",
    "content": "#include \"exe/terminate_handler.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(TerminateHandlerDeathTest, HandlerInstalledTest) {\n  TerminateHandler handler;\n  EXPECT_DEATH([]() -> void { std::terminate(); }(), \".*std::terminate called!.*\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/exe/version_out_test.sh",
    "content": "#!/bin/bash\n\nset -e -o pipefail\n\nENVOY_BIN=\"${TEST_SRCDIR}/envoy/source/exe/envoy-static\"\n\nCOMMIT=$(${ENVOY_BIN} --version | \\\n  sed -n -E 's/.*version: ([0-9a-f]{40})\\/([0-9]+\\.[0-9]+\\.[0-9]+)(-[a-zA-Z0-9\\-_]+)?\\/(Clean|Modified)\\/(RELEASE|DEBUG)\\/([a-zA-Z-]+)$/\\1/p')\n\nEXPECTED=$(cat \"${TEST_SRCDIR}/envoy/bazel/raw_build_id.ldscript\")\n\nif [[ \"${COMMIT}\" != \"${EXPECTED}\" ]]; then\n  echo \"Commit mismatch, got: ${COMMIT}, expected: ${EXPECTED}\".\n  exit 1\nfi\n\nVERSION=$(${ENVOY_BIN} --version | \\\n  sed -n -E 's/.*version: ([0-9a-f]{40})\\/([0-9]+\\.[0-9]+\\.[0-9]+)(-[a-zA-Z0-9\\-_]+)?\\/(Clean|Modified)\\/(RELEASE|DEBUG)\\/([a-zA-Z-]+)$/\\2\\3/p')\n\nEXPECTED=$(cat \"${TEST_SRCDIR}/envoy/VERSION\")\n\nif [[ \"${VERSION}\" != \"${EXPECTED}\" ]]; then\n  echo \"Version mismatch, got: ${VERSION}, expected: ${EXPECTED}\".\n  exit 1\nfi\n"
  },
  {
    "path": "test/extensions/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n"
  },
  {
    "path": "test/extensions/access_loggers/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"access_log_base_test\",\n    srcs = [\"access_log_base_test.cc\"],\n    deps = [\n        \"//source/extensions/access_loggers/common:access_log_base\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/access_loggers/common/access_log_base_test.cc",
    "content": "#include \"extensions/access_loggers/common/access_log_base.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace Common {\nnamespace {\n\nusing AccessLog::FilterPtr;\nusing AccessLog::MockFilter;\nusing testing::_;\nusing testing::Return;\n\nclass TestImpl : public ImplBase {\npublic:\n  TestImpl(FilterPtr filter) : ImplBase(std::move(filter)) {}\n\n  int count() { return count_; };\n\nprivate:\n  void emitLog(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&,\n               const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) override {\n    count_++;\n  }\n\n  int count_ = 0;\n};\n\nTEST(AccessLogBaseTest, NoFilter) {\n  StreamInfo::MockStreamInfo stream_info;\n  TestImpl logger(nullptr);\n  EXPECT_EQ(logger.count(), 0);\n  logger.log(nullptr, nullptr, nullptr, stream_info);\n  EXPECT_EQ(logger.count(), 1);\n}\n\nTEST(AccessLogBaseTest, FilterReject) {\n  StreamInfo::MockStreamInfo stream_info;\n\n  std::unique_ptr<MockFilter> filter = std::make_unique<MockFilter>();\n  EXPECT_CALL(*filter, evaluate(_, _, _, _)).WillOnce(Return(false));\n  TestImpl logger(std::move(filter));\n  EXPECT_EQ(logger.count(), 0);\n  logger.log(nullptr, nullptr, nullptr, stream_info);\n  EXPECT_EQ(logger.count(), 0);\n}\n\n} // namespace\n} // namespace Common\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/file/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.access_loggers.file\",\n    deps = [\n        \"//source/extensions/access_loggers/file:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/access_loggers/file/config_test.cc",
    "content": "#include \"envoy/config/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/file/v3/file.pb.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/access_loggers/file/config.h\"\n#include \"extensions/access_loggers/file/file_access_log_impl.h\"\n#include \"extensions/access_loggers/well_known_names.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace File {\nnamespace {\n\nTEST(FileAccessLogNegativeTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_THROW(FileAccessLogFactory().createAccessLogInstance(\n                   envoy::extensions::access_loggers::file::v3::FileAccessLog(), nullptr, context),\n               ProtoValidationException);\n}\n\nTEST(FileAccessLogNegativeTest, InvalidNameFail) {\n  envoy::config::accesslog::v3::AccessLog config;\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException,\n                            \"Provided name for static registration lookup was empty.\");\n\n  config.set_name(\"INVALID\");\n\n  EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException,\n                            \"Didn't find a registered implementation for name: 'INVALID'\");\n}\n\nclass FileAccessLogTest : public testing::Test {\npublic:\n  FileAccessLogTest() = default;\n\n  void runTest(const std::string& yaml, absl::string_view expected, bool is_json) {\n    envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config;\n    TestUtility::loadFromYaml(yaml, fal_config);\n\n    envoy::config::accesslog::v3::AccessLog config;\n    config.mutable_typed_config()->PackFrom(fal_config);\n\n    auto file = std::make_shared<AccessLog::MockAccessLogFile>();\n    EXPECT_CALL(context_.access_log_manager_, createAccessLog(fal_config.path()))\n        .WillOnce(Return(file));\n\n    AccessLog::InstanceSharedPtr logger = AccessLog::AccessLogFactory::fromProto(config, context_);\n\n    absl::Time abslStartTime =\n        TestUtility::parseTime(\"Dec 18 01:50:34 2018 GMT\", \"%b %e %H:%M:%S %Y GMT\");\n    stream_info_.start_time_ = absl::ToChronoTime(abslStartTime);\n    EXPECT_CALL(stream_info_, upstreamHost()).WillRepeatedly(Return(nullptr));\n    stream_info_.response_code_ = 200;\n\n    EXPECT_CALL(*file, write(_)).WillOnce(Invoke([expected, is_json](absl::string_view got) {\n      if (is_json) {\n        EXPECT_TRUE(TestUtility::jsonStringEqual(std::string(got), std::string(expected)));\n      } else {\n        EXPECT_EQ(got, expected);\n      }\n    }));\n    logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_);\n  }\n\n  Http::TestRequestHeaderMapImpl request_headers_{{\":method\", \"GET\"}, {\":path\", \"/bar/foo\"}};\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n};\n\nTEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyFormatEmpty)) {\n  runTest(\n      R\"(\n  path: \"/foo\"\n  format: \"\"\n)\",\n      \"[2018-12-18T01:50:34.000Z] \\\"GET /bar/foo -\\\" 200 - 0 0 - - \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\"\\n\",\n      false);\n}\n\nTEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyFormatPlainText)) {\n  runTest(\n      R\"(\n  path: \"/foo\"\n  format: \"plain_text\"\n)\",\n      \"plain_text\", false);\n}\n\nTEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyJsonFormat)) {\n  runTest(\n      R\"(\n  path: \"/foo\"\n  json_format:\n    text: \"plain text\"\n    path: \"%REQ(:path)%\"\n    code: \"%RESPONSE_CODE%\"\n)\",\n      R\"({\n    \"text\": \"plain text\",\n    \"path\": \"/bar/foo\",\n    \"code\": \"200\"\n})\",\n      true);\n}\n\nTEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyTypedJsonFormat)) {\n  runTest(\n      R\"(\n  path: \"/foo\"\n  typed_json_format:\n    text: \"plain text\"\n    path: \"%REQ(:path)%\"\n    code: \"%RESPONSE_CODE%\"\n)\",\n      R\"({\n    \"text\": \"plain text\",\n    \"path\": \"/bar/foo\",\n    \"code\": 200\n})\",\n      true);\n}\n\nTEST_F(FileAccessLogTest, EmptyFormat) {\n  runTest(\n      R\"(\n  path: \"/foo\"\n)\",\n      \"[2018-12-18T01:50:34.000Z] \\\"GET /bar/foo -\\\" 200 - 0 0 - - \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\" \\\"-\\\"\\n\",\n      false);\n}\n\nTEST_F(FileAccessLogTest, LogFormatText) {\n  runTest(\n      R\"(\n  path: \"/foo\"\n  log_format:\n    text_format: \"plain_text - %REQ(:path)% - %RESPONSE_CODE%\"\n)\",\n      \"plain_text - /bar/foo - 200\", false);\n}\n\nTEST_F(FileAccessLogTest, LogFormatJson) {\n  runTest(\n      R\"(\n  path: \"/foo\"\n  log_format:\n    json_format:\n      text: \"plain text\"\n      path: \"%REQ(:path)%\"\n      code: \"%RESPONSE_CODE%\"\n)\",\n      R\"({\n    \"text\": \"plain text\",\n    \"path\": \"/bar/foo\",\n    \"code\": 200\n})\",\n      true);\n}\n\n} // namespace\n} // namespace File\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/grpc/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"grpc_access_log_impl_test\",\n    srcs = [\"grpc_access_log_impl_test.cc\"],\n    extension_name = \"envoy.access_loggers.http_grpc\",\n    deps = [\n        \"//source/extensions/access_loggers/grpc:http_grpc_access_log_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/accesslog/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"grpc_access_log_utils_test\",\n    srcs = [\"grpc_access_log_utils_test.cc\"],\n    extension_name = \"envoy.access_loggers.http_grpc\",\n    deps = [\n        \"//source/extensions/access_loggers/grpc:grpc_access_log_utils\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"http_grpc_access_log_impl_test\",\n    srcs = [\"http_grpc_access_log_impl_test.cc\"],\n    extension_name = \"envoy.access_loggers.http_grpc\",\n    deps = [\n        \"//source/common/router:string_accessor_lib\",\n        \"//source/extensions/access_loggers/grpc:http_grpc_access_log_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"http_config_test\",\n    srcs = [\"http_config_test.cc\"],\n    extension_name = \"envoy.access_loggers.http_grpc\",\n    deps = [\n        \"//source/extensions/access_loggers/grpc:http_config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"http_grpc_access_log_integration_test\",\n    srcs = [\"http_grpc_access_log_integration_test.cc\"],\n    extension_name = \"envoy.access_loggers.http_grpc\",\n    deps = [\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/extensions/access_loggers/grpc:http_config\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/accesslog/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"tcp_grpc_access_log_integration_test\",\n    srcs = [\"tcp_grpc_access_log_integration_test.cc\"],\n    extension_name = \"envoy.access_loggers.http_grpc\",\n    deps = [\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/extensions/access_loggers/grpc:http_config\",\n        \"//source/extensions/access_loggers/grpc:tcp_config\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/accesslog/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/service/accesslog/v3/als.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/access_loggers/grpc/http_grpc_access_log_impl.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/test_runtime.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\nnamespace {\n\nconstexpr std::chrono::milliseconds FlushInterval(10);\n\nclass GrpcAccessLoggerImplTest : public testing::Test {\npublic:\n  using MockAccessLogStream = Grpc::MockAsyncStream;\n  using AccessLogCallbacks =\n      Grpc::AsyncStreamCallbacks<envoy::service::accesslog::v3::StreamAccessLogsResponse>;\n\n  void initLogger(std::chrono::milliseconds buffer_flush_interval_msec, size_t buffer_size_bytes) {\n    timer_ = new Event::MockTimer(&dispatcher_);\n    EXPECT_CALL(*timer_, enableTimer(buffer_flush_interval_msec, _));\n    logger_ = std::make_unique<GrpcAccessLoggerImpl>(\n        Grpc::RawAsyncClientPtr{async_client_}, log_name_, buffer_flush_interval_msec,\n        buffer_size_bytes, dispatcher_, local_info_, stats_store_,\n        envoy::config::core::v3::ApiVersion::AUTO);\n  }\n\n  void expectStreamStart(MockAccessLogStream& stream, AccessLogCallbacks** callbacks_to_set) {\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _))\n        .WillOnce(Invoke([&stream, callbacks_to_set](absl::string_view, absl::string_view,\n                                                     Grpc::RawAsyncStreamCallbacks& callbacks,\n                                                     const Http::AsyncClient::StreamOptions&) {\n          *callbacks_to_set = dynamic_cast<AccessLogCallbacks*>(&callbacks);\n          return &stream;\n        }));\n  }\n\n  void expectStreamMessage(MockAccessLogStream& stream, const std::string& expected_message_yaml) {\n    envoy::service::accesslog::v3::StreamAccessLogsMessage expected_message;\n    TestUtility::loadFromYaml(expected_message_yaml, expected_message);\n    EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false));\n    EXPECT_CALL(stream, sendMessageRaw_(_, false))\n        .WillOnce(Invoke([expected_message](Buffer::InstancePtr& request, bool) {\n          envoy::service::accesslog::v3::StreamAccessLogsMessage message;\n          Buffer::ZeroCopyInputStreamImpl request_stream(std::move(request));\n          EXPECT_TRUE(message.ParseFromZeroCopyStream(&request_stream));\n          EXPECT_EQ(message.DebugString(), expected_message.DebugString());\n        }));\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  std::string log_name_ = \"test_log_name\";\n  LocalInfo::MockLocalInfo local_info_;\n  Event::MockTimer* timer_ = nullptr;\n  Event::MockDispatcher dispatcher_;\n  Grpc::MockAsyncClient* async_client_{new Grpc::MockAsyncClient};\n  GrpcAccessLoggerImplPtr logger_;\n};\n\n// Test basic stream logging flow.\nTEST_F(GrpcAccessLoggerImplTest, BasicFlow) {\n  InSequence s;\n  initLogger(FlushInterval, 0);\n\n  // Start a stream for the first log.\n  MockAccessLogStream stream;\n  AccessLogCallbacks* callbacks;\n  expectStreamStart(stream, &callbacks);\n  EXPECT_CALL(local_info_, node());\n  expectStreamMessage(stream, R\"EOF(\nidentifier:\n  node:\n    id: node_name\n    cluster: cluster_name\n    locality:\n      zone: zone_name\n  log_name: test_log_name\nhttp_logs:\n  log_entry:\n    request:\n      path: /test/path1\n)EOF\");\n  envoy::data::accesslog::v3::HTTPAccessLogEntry entry;\n  entry.mutable_request()->set_path(\"/test/path1\");\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      1,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n\n  expectStreamMessage(stream, R\"EOF(\nhttp_logs:\n  log_entry:\n    request:\n      path: /test/path2\n)EOF\");\n  entry.mutable_request()->set_path(\"/test/path2\");\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      2,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n\n  // Verify that sending an empty response message doesn't do anything bad.\n  callbacks->onReceiveMessage(\n      std::make_unique<envoy::service::accesslog::v3::StreamAccessLogsResponse>());\n\n  // Close the stream and make sure we make a new one.\n  callbacks->onRemoteClose(Grpc::Status::Internal, \"bad\");\n  expectStreamStart(stream, &callbacks);\n  EXPECT_CALL(local_info_, node());\n  expectStreamMessage(stream, R\"EOF(\nidentifier:\n  node:\n    id: node_name\n    cluster: cluster_name\n    locality:\n      zone: zone_name\n  log_name: test_log_name\nhttp_logs:\n  log_entry:\n    request:\n      path: /test/path3\n)EOF\");\n  entry.mutable_request()->set_path(\"/test/path3\");\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      0,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_dropped\")->value());\n  EXPECT_EQ(\n      3,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n}\n\nTEST_F(GrpcAccessLoggerImplTest, WatermarksOverrun) {\n  InSequence s;\n  initLogger(FlushInterval, 1);\n\n  // Start a stream for the first log.\n  MockAccessLogStream stream;\n  AccessLogCallbacks* callbacks;\n  expectStreamStart(stream, &callbacks);\n  EXPECT_CALL(local_info_, node());\n\n  // Fail to flush, so the log stays buffered up.\n  envoy::data::accesslog::v3::HTTPAccessLogEntry entry;\n  entry.mutable_request()->set_path(\"/test/path1\");\n  EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(true));\n  EXPECT_CALL(stream, sendMessageRaw_(_, false)).Times(0);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      1,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n  EXPECT_EQ(\n      0,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_dropped\")->value());\n\n  // Now canLogMore will fail, and the next log will be dropped.\n  EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(true));\n  EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(0);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      1,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n  EXPECT_EQ(\n      1,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_dropped\")->value());\n\n  // Now allow the flush to happen. The stored log will get logged, and the next log will succeed.\n  EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false));\n  EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(1);\n  EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false));\n  EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(1);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      2,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n  EXPECT_EQ(\n      1,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_dropped\")->value());\n}\n\n// Test legacy behavior of unbounded access logs.\nTEST_F(GrpcAccessLoggerImplTest, WatermarksLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.disallow_unbounded_access_logs\", \"false\"}});\n\n  InSequence s;\n  initLogger(FlushInterval, 1);\n\n  // Start a stream for the first log.\n  MockAccessLogStream stream;\n  AccessLogCallbacks* callbacks;\n  expectStreamStart(stream, &callbacks);\n  EXPECT_CALL(local_info_, node());\n\n  EXPECT_CALL(stream, isAboveWriteBufferHighWatermark())\n      .Times(AnyNumber())\n      .WillRepeatedly(Return(true));\n\n  // Fail to flush, so the log stays buffered up.\n  envoy::data::accesslog::v3::HTTPAccessLogEntry entry;\n  entry.mutable_request()->set_path(\"/test/path1\");\n  EXPECT_CALL(stream, sendMessageRaw_(_, false)).Times(0);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      1,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n  EXPECT_EQ(\n      0,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_dropped\")->value());\n\n  // As with the above test, try to log more. The log will not be dropped.\n  EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(0);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  EXPECT_EQ(\n      2,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_written\")->value());\n  EXPECT_EQ(\n      0,\n      TestUtility::findCounter(stats_store_, \"access_logs.grpc_access_log.logs_dropped\")->value());\n}\n// Test that stream failure is handled correctly.\nTEST_F(GrpcAccessLoggerImplTest, StreamFailure) {\n  InSequence s;\n  initLogger(FlushInterval, 0);\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _))\n      .WillOnce(\n          Invoke([](absl::string_view, absl::string_view, Grpc::RawAsyncStreamCallbacks& callbacks,\n                    const Http::AsyncClient::StreamOptions&) {\n            callbacks.onRemoteClose(Grpc::Status::Internal, \"bad\");\n            return nullptr;\n          }));\n  EXPECT_CALL(local_info_, node());\n  envoy::data::accesslog::v3::HTTPAccessLogEntry entry;\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n}\n\n// Test that log entries are batched.\nTEST_F(GrpcAccessLoggerImplTest, Batching) {\n  InSequence s;\n  initLogger(FlushInterval, 100);\n\n  MockAccessLogStream stream;\n  AccessLogCallbacks* callbacks;\n  expectStreamStart(stream, &callbacks);\n  EXPECT_CALL(local_info_, node());\n  const std::string path1(30, '1');\n  const std::string path2(30, '2');\n  const std::string path3(80, '3');\n  expectStreamMessage(stream, fmt::format(R\"EOF(\nidentifier:\n  node:\n    id: node_name\n    cluster: cluster_name\n    locality:\n      zone: zone_name\n  log_name: test_log_name\nhttp_logs:\n  log_entry:\n  - request:\n      path: \"{}\"\n  - request:\n      path: \"{}\"\n  - request:\n      path: \"{}\"\n)EOF\",\n                                          path1, path2, path3));\n  envoy::data::accesslog::v3::HTTPAccessLogEntry entry;\n  entry.mutable_request()->set_path(path1);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  entry.mutable_request()->set_path(path2);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n  entry.mutable_request()->set_path(path3);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n\n  const std::string path4(120, '4');\n  expectStreamMessage(stream, fmt::format(R\"EOF(\nhttp_logs:\n  log_entry:\n    request:\n      path: \"{}\"\n)EOF\",\n                                          path4));\n  entry.mutable_request()->set_path(path4);\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n}\n\n// Test that log entries are flushed periodically.\nTEST_F(GrpcAccessLoggerImplTest, Flushing) {\n  InSequence s;\n  initLogger(FlushInterval, 100);\n\n  // Nothing to do yet.\n  EXPECT_CALL(*timer_, enableTimer(FlushInterval, _));\n  timer_->invokeCallback();\n\n  envoy::data::accesslog::v3::HTTPAccessLogEntry entry;\n  // Not enough data yet to trigger flush on batch size.\n  entry.mutable_request()->set_path(\"/test/path1\");\n  logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry));\n\n  MockAccessLogStream stream;\n  AccessLogCallbacks* callbacks;\n  expectStreamStart(stream, &callbacks);\n  EXPECT_CALL(local_info_, node());\n  expectStreamMessage(stream, fmt::format(R\"EOF(\n  identifier:\n    node:\n      id: node_name\n      cluster: cluster_name\n      locality:\n        zone: zone_name\n    log_name: test_log_name\n  http_logs:\n    log_entry:\n    - request:\n        path: /test/path1\n  )EOF\"));\n  EXPECT_CALL(*timer_, enableTimer(FlushInterval, _));\n  timer_->invokeCallback();\n\n  // Flush on empty message does nothing.\n  EXPECT_CALL(*timer_, enableTimer(FlushInterval, _));\n  timer_->invokeCallback();\n}\n\nclass GrpcAccessLoggerCacheImplTest : public testing::Test {\npublic:\n  GrpcAccessLoggerCacheImplTest() {\n    logger_cache_ = std::make_unique<GrpcAccessLoggerCacheImpl>(async_client_manager_, scope_, tls_,\n                                                                local_info_);\n  }\n\n  void expectClientCreation() {\n    factory_ = new Grpc::MockAsyncClientFactory;\n    async_client_ = new Grpc::MockAsyncClient;\n    EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, false))\n        .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n          EXPECT_CALL(*factory_, create()).WillOnce(Invoke([this] {\n            return Grpc::RawAsyncClientPtr{async_client_};\n          }));\n          return Grpc::AsyncClientFactoryPtr{factory_};\n        }));\n  }\n\n  LocalInfo::MockLocalInfo local_info_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Grpc::MockAsyncClientManager async_client_manager_;\n  Grpc::MockAsyncClient* async_client_ = nullptr;\n  Grpc::MockAsyncClientFactory* factory_ = nullptr;\n  GrpcAccessLoggerCacheImplPtr logger_cache_;\n  NiceMock<Stats::MockIsolatedStatsStore> scope_;\n};\n\nTEST_F(GrpcAccessLoggerCacheImplTest, Deduplication) {\n  Stats::IsolatedStoreImpl scope;\n  InSequence s;\n\n  envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig config;\n  config.set_log_name(\"log-1\");\n  config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name(\"cluster-1\");\n\n  expectClientCreation();\n  GrpcAccessLoggerSharedPtr logger1 =\n      logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope);\n  EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope));\n\n  // Do not deduplicate different types of logger\n  expectClientCreation();\n  EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::TCP, scope));\n\n  // Changing log name leads to another logger.\n  config.set_log_name(\"log-2\");\n  expectClientCreation();\n  EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope));\n\n  config.set_log_name(\"log-1\");\n  EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope));\n\n  // Changing cluster name leads to another logger.\n  config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name(\"cluster-2\");\n  expectClientCreation();\n  EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope));\n}\n\n} // namespace\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc",
    "content": "#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n\n#include \"extensions/access_loggers/grpc/grpc_access_log_utils.h\"\n\n#include \"test/mocks/stream_info/mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace GrpcCommon {\nnamespace {\n\nusing testing::_;\nusing testing::Return;\n\nTEST(UtilityResponseFlagsToAccessLogResponseFlagsTest, All) {\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  ON_CALL(stream_info, hasResponseFlag(_)).WillByDefault(Return(true));\n  envoy::data::accesslog::v3::AccessLogCommon common_access_log;\n  Utility::responseFlagsToAccessLogResponseFlags(common_access_log, stream_info);\n\n  envoy::data::accesslog::v3::AccessLogCommon common_access_log_expected;\n  common_access_log_expected.mutable_response_flags()->set_failed_local_healthcheck(true);\n  common_access_log_expected.mutable_response_flags()->set_no_healthy_upstream(true);\n  common_access_log_expected.mutable_response_flags()->set_upstream_request_timeout(true);\n  common_access_log_expected.mutable_response_flags()->set_local_reset(true);\n  common_access_log_expected.mutable_response_flags()->set_upstream_remote_reset(true);\n  common_access_log_expected.mutable_response_flags()->set_upstream_connection_failure(true);\n  common_access_log_expected.mutable_response_flags()->set_upstream_connection_termination(true);\n  common_access_log_expected.mutable_response_flags()->set_upstream_overflow(true);\n  common_access_log_expected.mutable_response_flags()->set_no_route_found(true);\n  common_access_log_expected.mutable_response_flags()->set_delay_injected(true);\n  common_access_log_expected.mutable_response_flags()->set_fault_injected(true);\n  common_access_log_expected.mutable_response_flags()->set_rate_limited(true);\n  common_access_log_expected.mutable_response_flags()->mutable_unauthorized_details()->set_reason(\n      envoy::data::accesslog::v3::ResponseFlags::Unauthorized::EXTERNAL_SERVICE);\n  common_access_log_expected.mutable_response_flags()->set_rate_limit_service_error(true);\n  common_access_log_expected.mutable_response_flags()->set_downstream_connection_termination(true);\n  common_access_log_expected.mutable_response_flags()->set_upstream_retry_limit_exceeded(true);\n  common_access_log_expected.mutable_response_flags()->set_stream_idle_timeout(true);\n  common_access_log_expected.mutable_response_flags()->set_invalid_envoy_request_headers(true);\n  common_access_log_expected.mutable_response_flags()->set_downstream_protocol_error(true);\n  common_access_log_expected.mutable_response_flags()->set_upstream_max_stream_duration_reached(\n      true);\n  common_access_log_expected.mutable_response_flags()->set_response_from_cache_filter(true);\n  common_access_log_expected.mutable_response_flags()->set_no_filter_config_found(true);\n  common_access_log_expected.mutable_response_flags()->set_duration_timeout(true);\n\n  EXPECT_EQ(common_access_log_expected.DebugString(), common_access_log.DebugString());\n}\n\n} // namespace\n} // namespace GrpcCommon\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/grpc/http_config_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/access_log_config.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"extensions/access_loggers/grpc/http_grpc_access_log_impl.h\"\n#include \"extensions/access_loggers/well_known_names.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace HttpGrpc {\nnamespace {\n\nclass HttpGrpcAccessLogConfigTest : public testing::Test {\npublic:\n  void SetUp() override {\n    factory_ =\n        Registry::FactoryRegistry<Server::Configuration::AccessLogInstanceFactory>::getFactory(\n            AccessLogNames::get().HttpGrpc);\n    ASSERT_NE(nullptr, factory_);\n\n    message_ = factory_->createEmptyConfigProto();\n    ASSERT_NE(nullptr, message_);\n\n    EXPECT_CALL(context_.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _))\n        .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n          return std::make_unique<NiceMock<Grpc::MockAsyncClientFactory>>();\n        }));\n\n    auto* common_config = http_grpc_access_log_.mutable_common_config();\n    common_config->set_log_name(\"foo\");\n    common_config->mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name(\"bar\");\n    TestUtility::jsonConvert(http_grpc_access_log_, *message_);\n  }\n\n  AccessLog::FilterPtr filter_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig http_grpc_access_log_;\n  ProtobufTypes::MessagePtr message_;\n  Server::Configuration::AccessLogInstanceFactory* factory_{};\n};\n\n// Normal OK configuration.\nTEST_F(HttpGrpcAccessLogConfigTest, Ok) {\n  AccessLog::InstanceSharedPtr instance =\n      factory_->createAccessLogInstance(*message_, std::move(filter_), context_);\n  EXPECT_NE(nullptr, instance);\n  EXPECT_NE(nullptr, dynamic_cast<HttpGrpcAccessLog*>(instance.get()));\n}\n\n} // namespace\n} // namespace HttpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/data/accesslog/v3/accesslog.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/router/string_accessor_impl.h\"\n\n#include \"extensions/access_loggers/grpc/http_grpc_access_log_impl.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n\nusing namespace std::chrono_literals;\nusing testing::_;\nusing testing::An;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace HttpGrpc {\nnamespace {\n\nusing envoy::data::accesslog::v3::HTTPAccessLogEntry;\n\nclass MockGrpcAccessLogger : public GrpcCommon::GrpcAccessLogger {\npublic:\n  // GrpcAccessLogger\n  MOCK_METHOD(void, log, (HTTPAccessLogEntry && entry));\n  MOCK_METHOD(void, log, (envoy::data::accesslog::v3::TCPAccessLogEntry && entry));\n};\n\nclass MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache {\npublic:\n  // GrpcAccessLoggerCache\n  MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger,\n              (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config,\n               GrpcCommon::GrpcAccessLoggerType logger_type, Stats::Scope& scope));\n};\n\nclass HttpGrpcAccessLogTest : public testing::Test {\npublic:\n  void init() {\n    ON_CALL(*filter_, evaluate(_, _, _, _)).WillByDefault(Return(true));\n    config_.mutable_common_config()->set_log_name(\"hello_log\");\n    config_.mutable_common_config()->add_filter_state_objects_to_log(\"string_accessor\");\n    config_.mutable_common_config()->add_filter_state_objects_to_log(\"serialized\");\n    EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _))\n        .WillOnce(\n            [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig&\n                       config,\n                   GrpcCommon::GrpcAccessLoggerType logger_type, Stats::Scope&) {\n              EXPECT_EQ(config.DebugString(), config_.common_config().DebugString());\n              EXPECT_EQ(GrpcCommon::GrpcAccessLoggerType::HTTP, logger_type);\n              return logger_;\n            });\n    access_log_ = std::make_unique<HttpGrpcAccessLog>(AccessLog::FilterPtr{filter_}, config_, tls_,\n                                                      logger_cache_, scope_);\n  }\n\n  void expectLog(const std::string& expected_log_entry_yaml) {\n    if (access_log_ == nullptr) {\n      init();\n    }\n\n    HTTPAccessLogEntry expected_log_entry;\n    TestUtility::loadFromYaml(expected_log_entry_yaml, expected_log_entry);\n    EXPECT_CALL(*logger_, log(An<HTTPAccessLogEntry&&>()))\n        .WillOnce(\n            Invoke([expected_log_entry](envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) {\n              EXPECT_EQ(entry.DebugString(), expected_log_entry.DebugString());\n            }));\n  }\n\n  void expectLogRequestMethod(const std::string& request_method) {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", request_method},\n    };\n\n    expectLog(fmt::format(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\nrequest:\n  request_method: {}\n  request_headers_bytes: {}\nresponse: {{}}\n    )EOF\",\n                          request_method, request_method.length() + 7));\n    access_log_->log(&request_headers, nullptr, nullptr, stream_info);\n  }\n\n  Stats::IsolatedStoreImpl scope_;\n  AccessLog::MockFilter* filter_{new NiceMock<AccessLog::MockFilter>()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_;\n  std::shared_ptr<MockGrpcAccessLogger> logger_{new MockGrpcAccessLogger()};\n  std::shared_ptr<MockGrpcAccessLoggerCache> logger_cache_{new MockGrpcAccessLoggerCache()};\n  HttpGrpcAccessLogPtr access_log_;\n};\n\nclass TestSerializedFilterState : public StreamInfo::FilterState::Object {\npublic:\n  ProtobufTypes::MessagePtr serializeAsProto() const override {\n    auto any = std::make_unique<ProtobufWkt::Any>();\n    ProtobufWkt::Duration value;\n    value.set_seconds(10);\n    any->PackFrom(value);\n    return any;\n  }\n};\n\n// Test HTTP log marshaling.\nTEST_F(HttpGrpcAccessLogTest, Marshalling) {\n  InSequence s;\n\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n    stream_info.start_time_monotonic_ = MonotonicTime(1h);\n    stream_info.last_downstream_tx_byte_sent_ = 2ms;\n    stream_info.setDownstreamLocalAddress(std::make_shared<Network::Address::PipeInstance>(\"/foo\"));\n    (*stream_info.metadata_.mutable_filter_metadata())[\"foo\"] = ProtobufWkt::Struct();\n    stream_info.filter_state_->setData(\"string_accessor\",\n                                       std::make_unique<Router::StringAccessorImpl>(\"test_value\"),\n                                       StreamInfo::FilterState::StateType::ReadOnly,\n                                       StreamInfo::FilterState::LifeSpan::FilterChain);\n    stream_info.filter_state_->setData(\"serialized\", std::make_unique<TestSerializedFilterState>(),\n                                       StreamInfo::FilterState::StateType::ReadOnly,\n                                       StreamInfo::FilterState::LifeSpan::FilterChain);\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    pipe:\n      path: \"/foo\"\n  start_time:\n    seconds: 3600\n  time_to_last_downstream_tx_byte:\n    nanos: 2000000\n  metadata:\n    filter_metadata:\n      foo: {}\n  filter_state_objects:\n    string_accessor:\n      \"@type\": type.googleapis.com/google.protobuf.StringValue\n      value: test_value\n    serialized:\n      \"@type\": type.googleapis.com/google.protobuf.Duration\n      value: 10s\nrequest: {}\nresponse: {}\n)EOF\");\n    access_log_->log(nullptr, nullptr, nullptr, stream_info);\n  }\n\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n    stream_info.last_downstream_tx_byte_sent_ = std::chrono::nanoseconds(2000000);\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  time_to_last_downstream_tx_byte:\n    nanos: 2000000\nrequest: {}\nresponse: {}\n)EOF\");\n    access_log_->log(nullptr, nullptr, nullptr, stream_info);\n  }\n\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.start_time_ = SystemTime(1h);\n\n    stream_info.last_downstream_rx_byte_received_ = 2ms;\n    stream_info.first_upstream_tx_byte_sent_ = 4ms;\n    stream_info.last_upstream_tx_byte_sent_ = 6ms;\n    stream_info.first_upstream_rx_byte_received_ = 8ms;\n    stream_info.last_upstream_rx_byte_received_ = 10ms;\n    stream_info.first_downstream_tx_byte_sent_ = 12ms;\n    stream_info.last_downstream_tx_byte_sent_ = 14ms;\n\n    stream_info.setUpstreamLocalAddress(\n        std::make_shared<Network::Address::Ipv4Instance>(\"10.0.0.2\"));\n    stream_info.protocol_ = Http::Protocol::Http10;\n    stream_info.addBytesReceived(10);\n    stream_info.addBytesSent(20);\n    stream_info.response_code_ = 200;\n    stream_info.response_code_details_ = \"via_upstream\";\n    absl::string_view route_name_view(\"route-name-test\");\n    stream_info.setRouteName(route_name_view);\n    ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::FaultInjected))\n        .WillByDefault(Return(true));\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":scheme\", \"scheme_value\"},\n        {\":authority\", \"authority_value\"},\n        {\":path\", \"path_value\"},\n        {\":method\", \"POST\"},\n        {\"user-agent\", \"user-agent_value\"},\n        {\"referer\", \"referer_value\"},\n        {\"x-forwarded-for\", \"x-forwarded-for_value\"},\n        {\"x-request-id\", \"x-request-id_value\"},\n        {\"x-envoy-original-path\", \"x-envoy-original-path_value\"},\n    };\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  time_to_last_rx_byte:\n    nanos: 2000000\n  time_to_first_upstream_tx_byte:\n    nanos: 4000000\n  time_to_last_upstream_tx_byte:\n    nanos:  6000000\n  time_to_first_upstream_rx_byte:\n    nanos: 8000000\n  time_to_last_upstream_rx_byte:\n    nanos: 10000000\n  time_to_first_downstream_tx_byte:\n    nanos: 12000000\n  time_to_last_downstream_tx_byte:\n    nanos: 14000000\n  upstream_remote_address:\n    socket_address:\n      address: \"10.0.0.1\"\n      port_value: 443\n  upstream_local_address:\n    socket_address:\n      address: \"10.0.0.2\"\n      port_value: 0\n  upstream_cluster: \"fake_cluster\"\n  response_flags:\n    fault_injected: true\n  route_name: \"route-name-test\"\nprotocol_version: HTTP10\nrequest:\n  scheme: \"scheme_value\"\n  authority: \"authority_value\"\n  path: \"path_value\"\n  user_agent: \"user-agent_value\"\n  referer: \"referer_value\"\n  forwarded_for: \"x-forwarded-for_value\"\n  request_id: \"x-request-id_value\"\n  original_path: \"x-envoy-original-path_value\"\n  request_headers_bytes: 230\n  request_body_bytes: 10\n  request_method: \"POST\"\nresponse:\n  response_code:\n    value: 200\n  response_headers_bytes: 10\n  response_body_bytes: 20\n  response_code_details: \"via_upstream\"\n)EOF\");\n    access_log_->log(&request_headers, &response_headers, nullptr, stream_info);\n  }\n\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n    stream_info.upstream_transport_failure_reason_ = \"TLS error\";\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", \"WHACKADOO\"},\n    };\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  upstream_transport_failure_reason: \"TLS error\"\nrequest:\n  request_method: \"METHOD_UNSPECIFIED\"\n  request_headers_bytes: 16\nresponse: {}\n)EOF\");\n    access_log_->log(&request_headers, nullptr, nullptr, stream_info);\n  }\n\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n\n    auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n    const std::vector<std::string> peerSans{\"peerSan1\", \"peerSan2\"};\n    ON_CALL(*connection_info, uriSanPeerCertificate()).WillByDefault(Return(peerSans));\n    const std::vector<std::string> localSans{\"localSan1\", \"localSan2\"};\n    ON_CALL(*connection_info, uriSanLocalCertificate()).WillByDefault(Return(localSans));\n    const std::string peerSubject = \"peerSubject\";\n    ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(peerSubject));\n    const std::string localSubject = \"localSubject\";\n    ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(localSubject));\n    const std::string sessionId =\n        \"D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B\";\n    ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(sessionId));\n    const std::string tlsVersion = \"TLSv1.3\";\n    ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion));\n    ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2CC0));\n    stream_info.setDownstreamSslConnection(connection_info);\n    stream_info.requested_server_name_ = \"sni\";\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", \"WHACKADOO\"},\n    };\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  tls_properties:\n    tls_version: TLSv1_3\n    tls_cipher_suite: 0x2cc0\n    tls_sni_hostname: sni\n    local_certificate_properties:\n      subject_alt_name:\n      - uri: localSan1\n      - uri: localSan2\n      subject: localSubject\n    peer_certificate_properties:\n      subject_alt_name:\n      - uri: peerSan1\n      - uri: peerSan2\n      subject: peerSubject\n    tls_session_id: D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B\nrequest:\n  request_method: \"METHOD_UNSPECIFIED\"\n  request_headers_bytes: 16\nresponse: {}\n)EOF\");\n    access_log_->log(&request_headers, nullptr, nullptr, stream_info);\n  }\n\n  // TLSv1.2\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n\n    auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n    const std::string empty;\n    ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(empty));\n    const std::string tlsVersion = \"TLSv1.2\";\n    ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion));\n    ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F));\n    stream_info.setDownstreamSslConnection(connection_info);\n    stream_info.requested_server_name_ = \"sni\";\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", \"WHACKADOO\"},\n    };\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  tls_properties:\n    tls_version: TLSv1_2\n    tls_cipher_suite: 0x2f\n    tls_sni_hostname: sni\n    local_certificate_properties: {}\n    peer_certificate_properties: {}\nrequest:\n  request_method: \"METHOD_UNSPECIFIED\"\nresponse: {}\n)EOF\");\n    access_log_->log(nullptr, nullptr, nullptr, stream_info);\n  }\n\n  // TLSv1.1\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n\n    auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n    const std::string empty;\n    ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(empty));\n    const std::string tlsVersion = \"TLSv1.1\";\n    ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion));\n    ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F));\n    stream_info.setDownstreamSslConnection(connection_info);\n    stream_info.requested_server_name_ = \"sni\";\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", \"WHACKADOO\"},\n    };\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  tls_properties:\n    tls_version: TLSv1_1\n    tls_cipher_suite: 0x2f\n    tls_sni_hostname: sni\n    local_certificate_properties: {}\n    peer_certificate_properties: {}\nrequest:\n  request_method: \"METHOD_UNSPECIFIED\"\nresponse: {}\n)EOF\");\n    access_log_->log(nullptr, nullptr, nullptr, stream_info);\n  }\n\n  // TLSv1\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n\n    auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n    const std::string empty;\n    ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(empty));\n    const std::string tlsVersion = \"TLSv1\";\n    ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion));\n    ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F));\n    stream_info.setDownstreamSslConnection(connection_info);\n    stream_info.requested_server_name_ = \"sni\";\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", \"WHACKADOO\"},\n    };\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  tls_properties:\n    tls_version: TLSv1\n    tls_cipher_suite: 0x2f\n    tls_sni_hostname: sni\n    local_certificate_properties: {}\n    peer_certificate_properties: {}\nrequest:\n  request_method: \"METHOD_UNSPECIFIED\"\nresponse: {}\n)EOF\");\n    access_log_->log(nullptr, nullptr, nullptr, stream_info);\n  }\n\n  // Unknown TLS version (TLSv1.4)\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n\n    auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n    const std::string empty;\n    ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(empty));\n    ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(empty));\n    const std::string tlsVersion = \"TLSv1.4\";\n    ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion));\n    ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F));\n    stream_info.setDownstreamSslConnection(connection_info);\n    stream_info.requested_server_name_ = \"sni\";\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", \"WHACKADOO\"},\n    };\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\n  tls_properties:\n    tls_version: VERSION_UNSPECIFIED\n    tls_cipher_suite: 0x2f\n    tls_sni_hostname: sni\n    local_certificate_properties: {}\n    peer_certificate_properties: {}\nrequest:\n  request_method: \"METHOD_UNSPECIFIED\"\nresponse: {}\n)EOF\");\n    access_log_->log(nullptr, nullptr, nullptr, stream_info);\n  }\n}\n\n// Test HTTP log marshaling with additional headers.\nTEST_F(HttpGrpcAccessLogTest, MarshallingAdditionalHeaders) {\n  InSequence s;\n\n  config_.add_additional_request_headers_to_log(\"X-Custom-Request\");\n  config_.add_additional_request_headers_to_log(\"X-Custom-Empty\");\n  config_.add_additional_request_headers_to_log(\"X-Envoy-Max-Retries\");\n  config_.add_additional_request_headers_to_log(\"X-Envoy-Force-Trace\");\n\n  config_.add_additional_response_headers_to_log(\"X-Custom-Response\");\n  config_.add_additional_response_headers_to_log(\"X-Custom-Empty\");\n  config_.add_additional_response_headers_to_log(\"X-Envoy-Immediate-Health-Check-Fail\");\n  config_.add_additional_response_headers_to_log(\"X-Envoy-Upstream-Service-Time\");\n\n  config_.add_additional_response_trailers_to_log(\"X-Logged-Trailer\");\n  config_.add_additional_response_trailers_to_log(\"X-Missing-Trailer\");\n  config_.add_additional_response_trailers_to_log(\"X-Empty-Trailer\");\n\n  init();\n\n  {\n    NiceMock<StreamInfo::MockStreamInfo> stream_info;\n    stream_info.host_ = nullptr;\n    stream_info.start_time_ = SystemTime(1h);\n\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":scheme\", \"scheme_value\"},\n        {\":authority\", \"authority_value\"},\n        {\":path\", \"path_value\"},\n        {\":method\", \"POST\"},\n        {\"x-envoy-max-retries\", \"3\"}, // test inline header not otherwise logged\n        {\"x-custom-request\", \"custom_value\"},\n        {\"x-custom-empty\", \"\"},\n    };\n    Http::TestResponseHeaderMapImpl response_headers{\n        {\":status\", \"200\"},\n        {\"x-envoy-immediate-health-check-fail\", \"true\"}, // test inline header not otherwise logged\n        {\"x-custom-response\", \"custom_value\"},\n        {\"x-custom-empty\", \"\"},\n    };\n\n    Http::TestResponseTrailerMapImpl response_trailers{\n        {\"x-logged-trailer\", \"value\"},\n        {\"x-empty-trailer\", \"\"},\n        {\"x-unlogged-trailer\", \"2\"},\n    };\n\n    expectLog(R\"EOF(\ncommon_properties:\n  downstream_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_direct_remote_address:\n    socket_address:\n      address: \"127.0.0.1\"\n      port_value: 0\n  downstream_local_address:\n    socket_address:\n      address: \"127.0.0.2\"\n      port_value: 0\n  start_time:\n    seconds: 3600\nrequest:\n  scheme: \"scheme_value\"\n  authority: \"authority_value\"\n  path: \"path_value\"\n  request_method: \"POST\"\n  request_headers_bytes: 132\n  request_headers:\n    \"x-custom-request\": \"custom_value\"\n    \"x-custom-empty\": \"\"\n    \"x-envoy-max-retries\": \"3\"\nresponse:\n  response_headers_bytes: 92\n  response_headers:\n    \"x-custom-response\": \"custom_value\"\n    \"x-custom-empty\": \"\"\n    \"x-envoy-immediate-health-check-fail\": \"true\"\n  response_trailers:\n    \"x-logged-trailer\": \"value\"\n    \"x-empty-trailer\": \"\"\n)EOF\");\n    access_log_->log(&request_headers, &response_headers, &response_trailers, stream_info);\n  }\n}\n\nTEST_F(HttpGrpcAccessLogTest, LogWithRequestMethod) {\n  InSequence s;\n  expectLogRequestMethod(\"GET\");\n  expectLogRequestMethod(\"HEAD\");\n  expectLogRequestMethod(\"POST\");\n  expectLogRequestMethod(\"PUT\");\n  expectLogRequestMethod(\"DELETE\");\n  expectLogRequestMethod(\"CONNECT\");\n  expectLogRequestMethod(\"OPTIONS\");\n  expectLogRequestMethod(\"TRACE\");\n  expectLogRequestMethod(\"PATCH\");\n}\n\n} // namespace\n} // namespace HttpGrpc\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/service/accesslog/v3/als.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/version/version.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\nnamespace {\n\nclass AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest,\n                                 public HttpIntegrationTest {\npublic:\n  AccessLogIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {}\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* accesslog_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      accesslog_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      accesslog_cluster->set_name(\"accesslog\");\n      accesslog_cluster->mutable_http2_protocol_options();\n    });\n\n    config_helper_.addConfigModifier(\n        [this](\n            envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          auto* access_log = hcm.add_access_log();\n          access_log->set_name(\"grpc_accesslog\");\n\n          envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config;\n          auto* common_config = config.mutable_common_config();\n          common_config->set_log_name(\"foo\");\n          common_config->set_transport_api_version(apiVersion());\n          setGrpcService(*common_config->mutable_grpc_service(), \"accesslog\",\n                         fake_upstreams_.back()->localAddress());\n          access_log->mutable_typed_config()->PackFrom(config);\n        });\n\n    HttpIntegrationTest::initialize();\n  }\n\n  static ProtobufTypes::MessagePtr scrubHiddenEnvoyDeprecated(const Protobuf::Message& message) {\n    ProtobufTypes::MessagePtr mutable_clone;\n    mutable_clone.reset(message.New());\n    mutable_clone->MergeFrom(message);\n    Config::VersionUtil::scrubHiddenEnvoyDeprecated(*mutable_clone);\n    return mutable_clone;\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForAccessLogConnection() {\n    return fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_access_log_connection_);\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForAccessLogStream() {\n    return fake_access_log_connection_->waitForNewStream(*dispatcher_, access_log_request_);\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForAccessLogRequest(const std::string& expected_request_msg_yaml) {\n    envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg;\n    VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg));\n    EXPECT_EQ(\"POST\", access_log_request_->headers().getMethodValue());\n    EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.accesslog.{}.AccessLogService\",\n                                                  \"StreamAccessLogs\", apiVersion()),\n              access_log_request_->headers().getPathValue());\n    EXPECT_EQ(\"application/grpc\", access_log_request_->headers().getContentTypeValue());\n\n    envoy::service::accesslog::v3::StreamAccessLogsMessage expected_request_msg;\n    TestUtility::loadFromYaml(expected_request_msg_yaml, expected_request_msg);\n\n    // Clear fields which are not deterministic.\n    auto* log_entry = request_msg.mutable_http_logs()->mutable_log_entry(0);\n    log_entry->mutable_common_properties()->clear_downstream_remote_address();\n    log_entry->mutable_common_properties()->clear_downstream_direct_remote_address();\n    log_entry->mutable_common_properties()->clear_downstream_local_address();\n    log_entry->mutable_common_properties()->clear_start_time();\n    log_entry->mutable_common_properties()->clear_time_to_last_rx_byte();\n    log_entry->mutable_common_properties()->clear_time_to_first_downstream_tx_byte();\n    log_entry->mutable_common_properties()->clear_time_to_last_downstream_tx_byte();\n    log_entry->mutable_request()->clear_request_id();\n    if (request_msg.has_identifier()) {\n      auto* node = request_msg.mutable_identifier()->mutable_node();\n      node->clear_extensions();\n      node->clear_user_agent_build_version();\n    }\n    Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg);\n    Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg);\n    EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg,\n                                        /*ignore_repeated_field_ordering=*/false));\n    return AssertionSuccess();\n  }\n\n  void cleanup() {\n    if (fake_access_log_connection_ != nullptr) {\n      AssertionResult result = fake_access_log_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_access_log_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n  }\n\n  FakeHttpConnectionPtr fake_access_log_connection_;\n  FakeStreamPtr access_log_request_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsCientType, AccessLogIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Test a basic full access logging flow.\nTEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) {\n  testRouterNotFound();\n  ASSERT_TRUE(waitForAccessLogConnection());\n  ASSERT_TRUE(waitForAccessLogStream());\n  ASSERT_TRUE(waitForAccessLogRequest(fmt::format(R\"EOF(\nidentifier:\n  node:\n    id: node_name\n    cluster: cluster_name\n    locality:\n      zone: zone_name\n    build_version: {}\n    user_agent_name: \"envoy\"\n  log_name: foo\nhttp_logs:\n  log_entry:\n    common_properties:\n      response_flags:\n        no_route_found: true\n    protocol_version: HTTP11\n    request:\n      authority: host\n      path: /notfound\n      request_headers_bytes: 122\n      request_method: GET\n    response:\n      response_code:\n        value: 404\n      response_code_details: \"route_not_found\"\n      response_headers_bytes: 54\n)EOF\",\n                                                  VersionInfo::version())));\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/notfound\", \"\", downstream_protocol_, version_);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"404\", response->headers().getStatusValue());\n  ASSERT_TRUE(waitForAccessLogRequest(R\"EOF(\nhttp_logs:\n  log_entry:\n    common_properties:\n      response_flags:\n        no_route_found: true\n    protocol_version: HTTP11\n    request:\n      authority: host\n      path: /notfound\n      request_headers_bytes: 122\n      request_method: GET\n    response:\n      response_code:\n        value: 404\n      response_code_details: \"route_not_found\"\n      response_headers_bytes: 54\n)EOF\"));\n\n  // Send an empty response and end the stream. This should never happen but make sure nothing\n  // breaks and we make a new stream on a follow up request.\n  access_log_request_->startGrpcStream();\n  envoy::service::accesslog::v3::StreamAccessLogsResponse response_msg;\n  access_log_request_->sendGrpcMessage(response_msg);\n  access_log_request_->finishGrpcStream(Grpc::Status::Ok);\n  switch (clientType()) {\n  case Grpc::ClientType::EnvoyGrpc:\n    test_server_->waitForGaugeEq(\"cluster.accesslog.upstream_rq_active\", 0);\n    break;\n  case Grpc::ClientType::GoogleGrpc:\n    test_server_->waitForCounterGe(\"grpc.accesslog.streams_closed_0\", 1);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  response = IntegrationUtil::makeSingleRequest(lookupPort(\"http\"), \"GET\", \"/notfound\", \"\",\n                                                downstream_protocol_, version_);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"404\", response->headers().getStatusValue());\n  ASSERT_TRUE(waitForAccessLogStream());\n  ASSERT_TRUE(waitForAccessLogRequest(fmt::format(R\"EOF(\nidentifier:\n  node:\n    id: node_name\n    cluster: cluster_name\n    locality:\n      zone: zone_name\n    build_version: {}\n    user_agent_name: \"envoy\"\n  log_name: foo\nhttp_logs:\n  log_entry:\n    common_properties:\n      response_flags:\n        no_route_found: true\n    protocol_version: HTTP11\n    request:\n      authority: host\n      path: /notfound\n      request_headers_bytes: 122\n      request_method: GET\n    response:\n      response_code:\n        value: 404\n      response_code_details: \"route_not_found\"\n      response_headers_bytes: 54\n)EOF\",\n                                                  VersionInfo::version())));\n  cleanup();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/extensions/access_loggers/grpc/v3/als.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/service/accesslog/v3/als.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/version/version.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\nnamespace {\n\nvoid clearPort(envoy::config::core::v3::Address& address) {\n  address.mutable_socket_address()->clear_port_specifier();\n}\n\nclass TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest,\n                                        public BaseIntegrationTest {\npublic:\n  TcpGrpcAccessLogIntegrationTest()\n      : BaseIntegrationTest(ipVersion(), ConfigHelper::tcpProxyConfig()) {\n    enable_half_close_ = true;\n  }\n\n  void createUpstreams() override {\n    BaseIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initialize() override {\n    config_helper_.renameListener(\"tcp_proxy\");\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* accesslog_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      accesslog_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      accesslog_cluster->set_name(\"accesslog\");\n      accesslog_cluster->mutable_http2_protocol_options();\n    });\n\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n      auto* access_log = listener->add_access_log();\n      access_log->set_name(\"grpc_accesslog\");\n      envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig access_log_config;\n      auto* common_config = access_log_config.mutable_common_config();\n      common_config->set_log_name(\"foo\");\n      common_config->set_transport_api_version(apiVersion());\n      setGrpcService(*common_config->mutable_grpc_service(), \"accesslog\",\n                     fake_upstreams_.back()->localAddress());\n      access_log->mutable_typed_config()->PackFrom(access_log_config);\n    });\n    BaseIntegrationTest::initialize();\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForAccessLogConnection() {\n    return fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_access_log_connection_);\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForAccessLogStream() {\n    return fake_access_log_connection_->waitForNewStream(*dispatcher_, access_log_request_);\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForAccessLogRequest(const std::string& expected_request_msg_yaml) {\n    envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg;\n    VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg));\n    EXPECT_EQ(\"POST\", access_log_request_->headers().getMethodValue());\n    EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.accesslog.{}.AccessLogService\",\n                                                  \"StreamAccessLogs\", apiVersion()),\n              access_log_request_->headers().getPathValue());\n    EXPECT_EQ(\"application/grpc\", access_log_request_->headers().getContentTypeValue());\n\n    envoy::service::accesslog::v3::StreamAccessLogsMessage expected_request_msg;\n    TestUtility::loadFromYaml(expected_request_msg_yaml, expected_request_msg);\n\n    // Clear fields which are not deterministic.\n    auto* log_entry = request_msg.mutable_tcp_logs()->mutable_log_entry(0);\n    clearPort(*log_entry->mutable_common_properties()->mutable_downstream_remote_address());\n    clearPort(*log_entry->mutable_common_properties()->mutable_downstream_direct_remote_address());\n    clearPort(*log_entry->mutable_common_properties()->mutable_downstream_local_address());\n    clearPort(*log_entry->mutable_common_properties()->mutable_upstream_remote_address());\n    clearPort(*log_entry->mutable_common_properties()->mutable_upstream_local_address());\n    log_entry->mutable_common_properties()->clear_start_time();\n    log_entry->mutable_common_properties()->clear_time_to_last_rx_byte();\n    log_entry->mutable_common_properties()->clear_time_to_first_downstream_tx_byte();\n    log_entry->mutable_common_properties()->clear_time_to_last_downstream_tx_byte();\n    if (request_msg.has_identifier()) {\n      auto* node = request_msg.mutable_identifier()->mutable_node();\n      node->clear_extensions();\n      node->clear_user_agent_build_version();\n    }\n    Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg);\n    Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg);\n    EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg,\n                                        /*ignore_repeated_field_ordering=*/false));\n\n    return AssertionSuccess();\n  }\n\n  void cleanup() {\n    if (fake_access_log_connection_ != nullptr) {\n      AssertionResult result = fake_access_log_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_access_log_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n      fake_access_log_connection_ = nullptr;\n    }\n  }\n\n  FakeHttpConnectionPtr fake_access_log_connection_;\n  FakeStreamPtr access_log_request_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsCientType, TcpGrpcAccessLogIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Test a basic full access logging flow.\nTEST_P(TcpGrpcAccessLogIntegrationTest, BasicAccessLogFlow) {\n  initialize();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"hello\"));\n  tcp_client->waitForData(\"hello\");\n  ASSERT_TRUE(tcp_client->write(\"bar\", false));\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n  tcp_client->waitForHalfClose();\n  ASSERT_TRUE(tcp_client->write(\"\", true));\n\n  ASSERT_TRUE(fake_upstream_connection->waitForData(3));\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  ASSERT_TRUE(waitForAccessLogConnection());\n  ASSERT_TRUE(waitForAccessLogStream());\n  ASSERT_TRUE(waitForAccessLogRequest(\n      fmt::format(R\"EOF(\nidentifier:\n  node:\n    id: node_name\n    cluster: cluster_name\n    locality:\n      zone: zone_name\n    build_version: {}\n    user_agent_name: \"envoy\"\n  log_name: foo\ntcp_logs:\n  log_entry:\n    common_properties:\n      downstream_remote_address:\n        socket_address:\n          address: {}\n      downstream_local_address:\n        socket_address:\n          address: {}\n      upstream_remote_address:\n        socket_address:\n          address: {}\n      upstream_local_address:\n        socket_address:\n          address: {}\n      upstream_cluster: cluster_0\n      downstream_direct_remote_address:\n        socket_address:\n          address: {}\n    connection_properties:\n      received_bytes: 3\n      sent_bytes: 5\n)EOF\",\n                  VersionInfo::version(), Network::Test::getLoopbackAddressString(ipVersion()),\n                  Network::Test::getLoopbackAddressString(ipVersion()),\n                  Network::Test::getLoopbackAddressString(ipVersion()),\n                  Network::Test::getLoopbackAddressString(ipVersion()),\n                  Network::Test::getLoopbackAddressString(ipVersion()))));\n\n  cleanup();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//bazel:envoy_select.bzl\",\n    \"envoy_select_wasm\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/access_loggers/wasm/test_data:test_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.access_loggers.wasm\",\n    deps = [\n        \"//source/extensions/access_loggers/wasm:config\",\n        \"//test/extensions/access_loggers/wasm/test_data:test_cpp_plugin\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/access_loggers/wasm/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/access_loggers/wasm/config_test.cc",
    "content": "#include \"envoy/extensions/access_loggers/wasm/v3/wasm.pb.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/access_log/access_log_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/access_loggers/wasm/config.h\"\n#include \"extensions/access_loggers/wasm/wasm_access_log_impl.h\"\n#include \"extensions/access_loggers/well_known_names.h\"\n#include \"extensions/common/wasm/wasm.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace AccessLoggers {\nnamespace Wasm {\n\nclass TestFactoryContext : public NiceMock<Server::Configuration::MockFactoryContext> {\npublic:\n  TestFactoryContext(Api::Api& api, Stats::Scope& scope) : api_(api), scope_(scope) {}\n  Api::Api& api() override { return api_; }\n  Stats::Scope& scope() override { return scope_; }\n  const envoy::config::core::v3::Metadata& listenerMetadata() const override {\n    return listener_metadata_;\n  }\n\nprivate:\n  Api::Api& api_;\n  Stats::Scope& scope_;\n  envoy::config::core::v3::Metadata listener_metadata_;\n};\n\nclass WasmAccessLogConfigTest : public testing::TestWithParam<std::string> {};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\",\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\",\n#endif\n    \"null\");\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmAccessLogConfigTest, testing_values);\n\nTEST_P(WasmAccessLogConfigTest, CreateWasmFromEmpty) {\n  auto factory =\n      Registry::FactoryRegistry<Server::Configuration::AccessLogInstanceFactory>::getFactory(\n          AccessLogNames::get().Wasm);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  ASSERT_NE(nullptr, message);\n\n  AccessLog::FilterPtr filter;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  AccessLog::InstanceSharedPtr instance;\n  EXPECT_THROW_WITH_MESSAGE(\n      instance = factory->createAccessLogInstance(*message, std::move(filter), context),\n      Common::Wasm::WasmException, \"Unable to create Wasm access log \");\n}\n\nTEST_P(WasmAccessLogConfigTest, CreateWasmFromWASM) {\n  auto factory =\n      Registry::FactoryRegistry<Server::Configuration::AccessLogInstanceFactory>::getFactory(\n          AccessLogNames::get().Wasm);\n  ASSERT_NE(factory, nullptr);\n\n  envoy::extensions::access_loggers::wasm::v3::WasmAccessLog config;\n  config.mutable_config()->mutable_vm_config()->set_runtime(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()));\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        \"{{ test_rundir }}/test/extensions/access_loggers/wasm/test_data/test_cpp.wasm\"));\n  } else {\n    code = \"AccessLoggerTestCpp\";\n  }\n  config.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_inline_bytes(\n      code);\n  // Test Any configuration.\n  ProtobufWkt::Struct some_proto;\n  config.mutable_config()->mutable_vm_config()->mutable_configuration()->PackFrom(some_proto);\n\n  AccessLog::FilterPtr filter;\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  TestFactoryContext context(*api, stats_store);\n\n  AccessLog::InstanceSharedPtr instance =\n      factory->createAccessLogInstance(config, std::move(filter), context);\n  EXPECT_NE(nullptr, instance);\n  EXPECT_NE(nullptr, dynamic_cast<WasmAccessLog*>(instance.get()));\n  Http::TestRequestHeaderMapImpl request_header;\n  Http::TestResponseHeaderMapImpl response_header;\n  Http::TestResponseTrailerMapImpl response_trailer;\n  StreamInfo::MockStreamInfo log_stream_info;\n  instance->log(&request_header, &response_header, &response_trailer, log_stream_info);\n\n  filter = std::make_unique<NiceMock<AccessLog::MockFilter>>();\n  AccessLog::InstanceSharedPtr filter_instance =\n      factory->createAccessLogInstance(config, std::move(filter), context);\n  filter_instance->log(&request_header, &response_header, &response_trailer, log_stream_info);\n}\n\n} // namespace Wasm\n} // namespace AccessLoggers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/access_loggers/wasm/test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\nload(\"//bazel/wasm:wasm.bzl\", \"envoy_wasm_cc_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"test_cpp_plugin\",\n    srcs = [\n        \"test_cpp.cc\",\n        \"test_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"test_cpp.wasm\",\n    srcs = [\"test_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_lite\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/access_loggers/wasm/test_data/test_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n#include <unordered_map>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics.h\"\n#else\n#include \"include/proxy-wasm/null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(AccessLoggerTestCpp)\n\nclass TestRootContext : public RootContext {\npublic:\n  using RootContext::RootContext;\n\n  void onLog() override;\n};\nstatic RegisterContextFactory register_ExampleContext(ROOT_FACTORY(TestRootContext));\n\nvoid TestRootContext::onLog() {\n  auto path = getRequestHeader(\":path\");\n  logWarn(\"onLog \" + std::to_string(id()) + \" \" + std::string(path->view()));\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/access_loggers/wasm/test_data/test_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace AccessLoggerTestCpp {\nNullPluginRegistry* context_registry_;\n} // namespace AccessLoggerTestCpp\n\nRegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin(\"AccessLoggerTestCpp\", []() {\n  return std::make_unique<NullPlugin>(AccessLoggerTestCpp::context_registry_);\n});\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//bazel:envoy_select.bzl\",\n    \"envoy_select_wasm\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n    \"envoy_extension_cc_test_binary\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"wasm_test\",\n    srcs = [\"wasm_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/bootstrap/wasm/test_data:asm2wasm_cpp.wasm\",\n        \"//test/extensions/bootstrap/wasm/test_data:bad_signature_cpp.wasm\",\n        \"//test/extensions/bootstrap/wasm/test_data:emscripten_cpp.wasm\",\n        \"//test/extensions/bootstrap/wasm/test_data:logging_cpp.wasm\",\n        \"//test/extensions/bootstrap/wasm/test_data:logging_rust.wasm\",\n        \"//test/extensions/bootstrap/wasm/test_data:segv_cpp.wasm\",\n        \"//test/extensions/bootstrap/wasm/test_data:stats_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.bootstrap.wasm\",\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/bootstrap/wasm:config\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/extensions/bootstrap/wasm/test_data:stats_cpp_plugin\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/bootstrap/wasm/test_data:missing_cpp.wasm\",\n        \"//test/extensions/bootstrap/wasm/test_data:start_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.bootstrap.wasm\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/bootstrap/wasm:config\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/extensions/bootstrap/wasm/test_data:start_cpp_plugin\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test_binary(\n    name = \"wasm_speed_test\",\n    srcs = [\"wasm_speed_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/bootstrap/wasm/test_data:speed_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.bootstrap.wasm\",\n    external_deps = [\n        \"abseil_optional\",\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/bootstrap/wasm:config\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/extensions/bootstrap/wasm/test_data:speed_cpp_plugin\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/config_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/bootstrap/wasm/config.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Wasm {\n\nusing Extensions::Bootstrap::Wasm::WasmServicePtr;\n\nclass WasmFactoryTest : public testing::TestWithParam<std::string> {\nprotected:\n  WasmFactoryTest() {\n    config_.mutable_config()->mutable_vm_config()->set_runtime(\n        absl::StrCat(\"envoy.wasm.runtime.\", GetParam()));\n    if (GetParam() != \"null\") {\n      config_.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename(\n          TestEnvironment::substitute(\n              \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/start_cpp.wasm\"));\n    } else {\n      config_.mutable_config()\n          ->mutable_vm_config()\n          ->mutable_code()\n          ->mutable_local()\n          ->set_inline_bytes(\"WasmStartCpp\");\n    }\n    config_.mutable_config()->set_name(\"test\");\n    config_.set_singleton(true);\n  }\n\n  void initializeWithConfig(const envoy::extensions::wasm::v3::WasmService& config) {\n    auto factory =\n        Registry::FactoryRegistry<Server::Configuration::BootstrapExtensionFactory>::getFactory(\n            \"envoy.bootstrap.wasm\");\n    ASSERT_NE(factory, nullptr);\n    api_ = Api::createApiForTest(stats_store_);\n    EXPECT_CALL(context_, api()).WillRepeatedly(testing::ReturnRef(*api_));\n    EXPECT_CALL(context_, initManager()).WillRepeatedly(testing::ReturnRef(init_manager_));\n    EXPECT_CALL(context_, lifecycleNotifier())\n        .WillRepeatedly(testing::ReturnRef(lifecycle_notifier_));\n    extension_ = factory->createBootstrapExtension(config, context_);\n    static_cast<Bootstrap::Wasm::WasmServiceExtension*>(extension_.get())->wasmService();\n    EXPECT_CALL(init_watcher_, ready());\n    init_manager_.initialize(init_watcher_);\n  }\n\n  envoy::extensions::wasm::v3::WasmService config_;\n  testing::NiceMock<Server::Configuration::MockServerFactoryContext> context_;\n  testing::NiceMock<Server::MockServerLifecycleNotifier> lifecycle_notifier_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  Init::ManagerImpl init_manager_{\"init_manager\"};\n  Server::BootstrapExtensionPtr extension_;\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\",\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\",\n#endif\n    \"null\");\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmFactoryTest, testing_values);\n\nTEST_P(WasmFactoryTest, CreateWasmFromWasm) {\n  auto factory = std::make_unique<Bootstrap::Wasm::WasmFactory>();\n  auto empty_config = factory->createEmptyConfigProto();\n\n  initializeWithConfig(config_);\n\n  EXPECT_NE(extension_, nullptr);\n}\n\nTEST_P(WasmFactoryTest, CreateWasmFromWasmPerThread) {\n  config_.set_singleton(false);\n  initializeWithConfig(config_);\n\n  EXPECT_NE(extension_, nullptr);\n  extension_.reset();\n  context_.threadLocal().shutdownThread();\n}\n\nTEST_P(WasmFactoryTest, MissingImport) {\n  if (GetParam() == \"null\") {\n    return;\n  }\n  config_.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename(\n      TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/missing_cpp.wasm\"));\n  EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm service test\");\n}\n\nTEST_P(WasmFactoryTest, UnspecifiedRuntime) {\n  config_.mutable_config()->mutable_vm_config()->set_runtime(\"\");\n\n  EXPECT_THROW_WITH_REGEX(\n      initializeWithConfig(config_), EnvoyException,\n      \"Proto constraint validation failed \\\\(WasmServiceValidationError\\\\.Config\");\n}\n\nTEST_P(WasmFactoryTest, UnknownRuntime) {\n  config_.mutable_config()->mutable_vm_config()->set_runtime(\"envoy.wasm.runtime.invalid\");\n\n  EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm service test\");\n}\n\nTEST_P(WasmFactoryTest, StartFailed) {\n  ProtobufWkt::StringValue plugin_configuration;\n  plugin_configuration.set_value(\"bad\");\n  config_.mutable_config()->mutable_vm_config()->mutable_configuration()->PackFrom(\n      plugin_configuration);\n\n  EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm service test\");\n}\n\nTEST_P(WasmFactoryTest, StartFailedOpen) {\n  ProtobufWkt::StringValue plugin_configuration;\n  plugin_configuration.set_value(\"bad\");\n  config_.mutable_config()->mutable_vm_config()->mutable_configuration()->PackFrom(\n      plugin_configuration);\n  config_.mutable_config()->set_fail_open(true);\n\n  EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm service test\");\n}\n\nTEST_P(WasmFactoryTest, ConfigureFailed) {\n  ProtobufWkt::StringValue plugin_configuration;\n  plugin_configuration.set_value(\"bad\");\n  config_.mutable_config()->mutable_configuration()->PackFrom(plugin_configuration);\n\n  EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm service test\");\n}\n\n} // namespace Wasm\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\nload(\"//bazel/wasm:wasm.bzl\", \"envoy_wasm_cc_binary\", \"wasm_rust_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nwasm_rust_binary(\n    name = \"logging_rust.wasm\",\n    srcs = [\"logging_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"speed_cpp_plugin\",\n    srcs = [\n        \"speed_cpp.cc\",\n        \"speed_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"start_cpp_plugin\",\n    srcs = [\n        \"start_cpp.cc\",\n        \"start_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"stats_cpp_plugin\",\n    srcs = [\n        \"stats_cpp.cc\",\n        \"stats_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"asm2wasm_cpp.wasm\",\n    srcs = [\"asm2wasm_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"bad_signature_cpp.wasm\",\n    srcs = [\"bad_signature_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"emscripten_cpp.wasm\",\n    srcs = [\"emscripten_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"logging_cpp.wasm\",\n    srcs = [\"logging_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"missing_cpp.wasm\",\n    srcs = [\"missing_cpp.cc\"],\n    linkopts = [\n        \"--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js\",\n        \"-s ERROR_ON_UNDEFINED_SYMBOLS=0\",\n    ],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"segv_cpp.wasm\",\n    srcs = [\"segv_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"speed_cpp.wasm\",\n    srcs = [\"speed_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_full\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"start_cpp.wasm\",\n    srcs = [\"start_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"stats_cpp.wasm\",\n    srcs = [\"stats_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/asm2wasm_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <math.h>\n\n#include <string>\n\n#include \"proxy_wasm_intrinsics.h\"\n\n// Required Proxy-Wasm ABI version.\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {}\n\n// Use global variables so the compiler cannot optimize the operations away.\nint32_t i32a = 0;\nint32_t i32b = 1;\ndouble f64a = 0.0;\ndouble f64b = 1.0;\n\n// Emscripten in some modes and versions would use functions from the `asm2wasm` module to implement\n// these operations: int32_t % /, double conversion to int32_t and remainder().\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t) {\n  logInfo(std::string(\"out \") + std::to_string(i32a / i32b) + \" \" + std::to_string(i32a % i32b) +\n          \" \" + std::to_string((int32_t)remainder(f64a, f64b)));\n  return 1;\n}\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/bad_signature_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n\n#define PROXY_WASM_KEEPALIVE __attribute__((used)) __attribute__((visibility(\"default\")))\n\n// Required Proxy-Wasm ABI version.\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {}\n\nextern \"C\" uint32_t proxy_log(uint32_t level, const char* logMessage, size_t messageSize);\n\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, int bad, char* configuration,\n                                                            int size) {\n  std::string message = \"bad signature\";\n  proxy_log(4 /* error */, message.c_str(), message.size());\n  return 1;\n}\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/emscripten_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <cmath>\n#include <limits>\n#include <string>\n\n#include \"proxy_wasm_intrinsics.h\"\n\n// Required Proxy-Wasm ABI version.\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {}\n\nfloat gNan = std::nan(\"1\");\nfloat gInfinity = INFINITY;\n\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t) {\n  logInfo(std::string(\"NaN \") + std::to_string(gNan));\n  logWarn(\"inf \" + std::to_string(gInfinity));\n  logWarn(\"inf \" + std::to_string(1.0 / 0.0));\n  logWarn(std::string(\"inf \") + (std::isinf(gInfinity) ? \"inf\" : \"nan\"));\n  return 1;\n}\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/logging_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <stdio.h>\n\n#include <string>\n\n#include \"proxy_wasm_intrinsics.h\"\n\n// Required Proxy-Wasm ABI version.\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {}\n\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t configuration_size) {\n  fprintf(stdout, \"printf stdout test\");\n  fflush(stdout);\n  fprintf(stderr, \"printf stderr test\");\n  logTrace(\"test trace logging\");\n  logDebug(\"test debug logging\");\n  logError(\"test error logging\");\n  const char* configuration = nullptr;\n  size_t size;\n  proxy_get_buffer_bytes(WasmBufferType::PluginConfiguration, 0, configuration_size, &configuration,\n                         &size);\n  logWarn(std::string(\"warn \" + std::string(configuration, size)));\n  ::free((void*)configuration);\n  return 1;\n}\n\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_on_context_create(uint32_t, uint32_t) {}\n\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_vm_start(uint32_t, uint32_t) {\n  proxy_set_tick_period_milliseconds(10);\n  return 1;\n}\n\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_on_tick(uint32_t) {\n  const char* root_id = nullptr;\n  size_t size;\n  proxy_get_property(\"plugin_root_id\", sizeof(\"plugin_root_id\") - 1, &root_id, &size);\n  logInfo(\"test tick logging\" + std::string(root_id, size));\n  proxy_done();\n}\n\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_done(uint32_t) {\n  logInfo(\"onDone logging\");\n  return 0;\n}\n\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_on_delete(uint32_t) { logInfo(\"onDelete logging\"); }\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/logging_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm logging test\"\nname = \"logging_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/logging_rust/src/lib.rs",
    "content": "use log::{debug, error, info, trace, warn};\nuse proxy_wasm::traits::{Context, RootContext};\nuse proxy_wasm::types::LogLevel;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_root_context(|_| -> Box<dyn RootContext> { Box::new(TestRoot) });\n}\n\nstruct TestRoot;\n\nimpl RootContext for TestRoot {\n    fn on_vm_start(&mut self, _: usize) -> bool {\n        true\n    }\n\n    fn on_configure(&mut self, _: usize) -> bool {\n        trace!(\"test trace logging\");\n        debug!(\"test debug logging\");\n        error!(\"test error logging\");\n        if let Some(value) = self.get_configuration() {\n            warn!(\"warn {}\", String::from_utf8(value).unwrap());\n        }\n        true\n    }\n\n    fn on_tick(&mut self) {\n        if let Some(value) = self.get_property(vec![\"plugin_root_id\"]) {\n            info!(\"test tick logging{}\", String::from_utf8(value).unwrap());\n        } else {\n            info!(\"test tick logging\");\n        }\n        self.done();\n    }\n}\n\nimpl Context for TestRoot {\n    fn on_done(&mut self) -> bool {\n        info!(\"onDone logging\");\n        false\n    }\n}\n\nimpl Drop for TestRoot {\n    fn drop(&mut self) {\n        info!(\"onDelete logging\");\n    }\n}\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/missing_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"proxy_wasm_intrinsics.h\"\n\n// Required Proxy-Wasm ABI version.\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {}\n\nextern \"C\" void missing();\n\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_vm_start(uint32_t, uint32_t) {\n  missing();\n  return 1;\n}\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/segv_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n\n#include \"proxy_wasm_intrinsics.h\"\n\n// Required Proxy-Wasm ABI version.\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {}\n\nstatic int* badptr = nullptr;\n\nextern \"C\" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t) {\n  logError(\"before badptr\");\n  *badptr = 1;\n  logError(\"after badptr\");\n  return 1;\n}\n\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_on_log(uint32_t context_id) {\n  logError(\"before div by zero\");\n#pragma clang optimize off\n  int zero = context_id / 1000;\n  logError(\"divide by zero: \" + std::to_string(100 / zero));\n#pragma clang optimize on\n  logError(\"after div by zero\");\n}\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/speed_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <google/protobuf/util/json_util.h>\n\n#include <string>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_full.h\"\n// Required Proxy-Wasm ABI version.\nextern \"C\" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {}\n#else\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\nusing envoy::config::core::v3::GrpcService;\n#include \"include/proxy-wasm/null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(WasmSpeedCpp)\n\nint xDoNotRemove = 0;\n\ngoogle::protobuf::Arena arena;\n\ngoogle::protobuf::Struct args;\ngoogle::protobuf::Struct* args_arena =\n    google::protobuf::Arena::CreateMessage<google::protobuf::Struct>(&arena);\nstd::string configuration = R\"EOF(\n  {\n    \"NAME\":\"test_pod\",\n    \"NAMESPACE\":\"test_namespace\",\n    \"LABELS\": {\n        \"app\": \"productpage\",\n        \"version\": \"v1\",\n        \"pod-template-hash\": \"84975bc778\"\n    },\n    \"OWNER\":\"test_owner\",\n    \"WORKLOAD_NAME\":\"test_workload\",\n    \"PLATFORM_METADATA\":{\n        \"gcp_project\":\"test_project\",\n        \"gcp_cluster_location\":\"test_location\",\n        \"gcp_cluster_name\":\"test_cluster\"\n    },\n    \"ISTIO_VERSION\":\"istio-1.4\",\n    \"MESH_ID\":\"test-mesh\"\n  }\n  )EOF\";\n\n// google::protobuf::Struct a;\n// google::protobuf::util::JsonStringToMessage(configuration+'hfdjfhkjhdskhjk', a);\n\nconst static char encodeLookup[] =\n    \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\";\nconst static char padCharacter = '=';\n\nstd::string base64Encode(const uint8_t* start, const uint8_t* end) {\n  std::string encodedString;\n  size_t size = end - start;\n  encodedString.reserve(((size / 3) + (size % 3 > 0)) * 4);\n  uint32_t temp;\n  auto cursor = start;\n  for (size_t idx = 0; idx < size / 3; idx++) {\n    temp = (*cursor++) << 16; // Convert to big endian\n    temp += (*cursor++) << 8;\n    temp += (*cursor++);\n    encodedString.append(1, encodeLookup[(temp & 0x00FC0000) >> 18]);\n    encodedString.append(1, encodeLookup[(temp & 0x0003F000) >> 12]);\n    encodedString.append(1, encodeLookup[(temp & 0x00000FC0) >> 6]);\n    encodedString.append(1, encodeLookup[(temp & 0x0000003F)]);\n  }\n  switch (size % 3) {\n  case 1:\n    temp = (*cursor++) << 16; // Convert to big endian\n    encodedString.append(1, encodeLookup[(temp & 0x00FC0000) >> 18]);\n    encodedString.append(1, encodeLookup[(temp & 0x0003F000) >> 12]);\n    encodedString.append(2, padCharacter);\n    break;\n  case 2:\n    temp = (*cursor++) << 16; // Convert to big endian\n    temp += (*cursor++) << 8;\n    encodedString.append(1, encodeLookup[(temp & 0x00FC0000) >> 18]);\n    encodedString.append(1, encodeLookup[(temp & 0x0003F000) >> 12]);\n    encodedString.append(1, encodeLookup[(temp & 0x00000FC0) >> 6]);\n    encodedString.append(1, padCharacter);\n    break;\n  }\n  return encodedString;\n}\n\nbool base64Decode(const std::basic_string<char>& input, std::vector<uint8_t>* output) {\n  if (input.length() % 4)\n    return false;\n  size_t padding = 0;\n  if (input.length()) {\n    if (input[input.length() - 1] == padCharacter)\n      padding++;\n    if (input[input.length() - 2] == padCharacter)\n      padding++;\n  }\n  // Setup a vector to hold the result\n  std::vector<unsigned char> decodedBytes;\n  decodedBytes.reserve(((input.length() / 4) * 3) - padding);\n  uint32_t temp = 0; // Holds decoded quanta\n  std::basic_string<char>::const_iterator cursor = input.begin();\n  while (cursor < input.end()) {\n    for (size_t quantumPosition = 0; quantumPosition < 4; quantumPosition++) {\n      temp <<= 6;\n      if (*cursor >= 0x41 && *cursor <= 0x5A) // This area will need tweaking if\n        temp |= *cursor - 0x41;               // you are using an alternate alphabet\n      else if (*cursor >= 0x61 && *cursor <= 0x7A)\n        temp |= *cursor - 0x47;\n      else if (*cursor >= 0x30 && *cursor <= 0x39)\n        temp |= *cursor + 0x04;\n      else if (*cursor == 0x2B)\n        temp |= 0x3E; // change to 0x2D for URL alphabet\n      else if (*cursor == 0x2F)\n        temp |= 0x3F;                     // change to 0x5F for URL alphabet\n      else if (*cursor == padCharacter) { // pad\n        switch (input.end() - cursor) {\n        case 1: // One pad character\n          decodedBytes.push_back((temp >> 16) & 0x000000FF);\n          decodedBytes.push_back((temp >> 8) & 0x000000FF);\n          goto Ldone;\n        case 2: // Two pad characters\n          decodedBytes.push_back((temp >> 10) & 0x000000FF);\n          goto Ldone;\n        default:\n          return false;\n        }\n      } else\n        return false;\n      cursor++;\n    }\n    decodedBytes.push_back((temp >> 16) & 0x000000FF);\n    decodedBytes.push_back((temp >> 8) & 0x000000FF);\n    decodedBytes.push_back((temp)&0x000000FF);\n  }\nLdone:\n  *output = std::move(decodedBytes);\n  return true;\n}\nstd::string check_compiler;\n\nvoid (*test_fn)() = nullptr;\n\nvoid empty_test() {}\n\nvoid get_current_time_test() {\n  uint64_t t;\n  if (WasmResult::Ok != proxy_get_current_time_nanoseconds(&t)) {\n    logError(\"bad result from getCurrentTimeNanoseconds\");\n  }\n}\n\nvoid small_string_check_compiler_test() {\n  check_compiler = \"foo\";\n  check_compiler += \"bar\";\n  check_compiler = \"\";\n}\n\nvoid small_string_test() {\n  std::string s = \"foo\";\n  s += \"bar\";\n  xDoNotRemove = s.size();\n}\n\nvoid small_string_check_compiler1000_test() {\n  for (int x = 0; x < 1000; x++) {\n    check_compiler = \"foo\";\n    check_compiler += \"bar\";\n  }\n  check_compiler = \"\";\n}\n\nvoid small_string1000_test() {\n  for (int x = 0; x < 1000; x++) {\n    std::string s = \"foo\";\n    s += \"bar\";\n    xDoNotRemove += s.size();\n  }\n}\n\nvoid large_string_test() {\n  std::string s(1024, 'f');\n  std::string d(1024, 'o');\n  s += d;\n  xDoNotRemove += s.size();\n}\n\nvoid large_string1000_test() {\n  for (int x = 0; x < 1000; x++) {\n    std::string s(1024, 'f');\n    std::string d(1024, 'o');\n    s += d;\n    xDoNotRemove += s.size();\n  }\n}\n\nvoid get_property_test() {\n  std::string property = \"plugin_root_id\";\n  const char* value_ptr = nullptr;\n  size_t value_size = 0;\n  auto result = proxy_get_property(property.data(), property.size(), &value_ptr, &value_size);\n  if (WasmResult::Ok != result) {\n    logError(\"bad result for getProperty\");\n  }\n  ::free(reinterpret_cast<void*>(const_cast<char*>(value_ptr)));\n}\n\nvoid grpc_service_test() {\n  std::string value = \"foo\";\n  GrpcService grpc_service;\n  grpc_service.mutable_envoy_grpc()->set_cluster_name(value);\n  std::string grpc_service_string;\n  grpc_service.SerializeToString(&grpc_service_string);\n}\n\nvoid grpc_service1000_test() {\n  std::string value = \"foo\";\n  for (int x = 0; x < 1000; x++) {\n    GrpcService grpc_service;\n    grpc_service.mutable_envoy_grpc()->set_cluster_name(value);\n    std::string grpc_service_string;\n    grpc_service.SerializeToString(&grpc_service_string);\n  }\n}\n\nvoid modify_metadata_test() {\n  auto path = getRequestHeader(\":path\");\n  addRequestHeader(\"newheader\", \"newheadervalue\");\n  auto server = getRequestHeader(\"server\");\n  replaceRequestHeader(\"server\", \"envoy-wasm\");\n  replaceRequestHeader(\"envoy-wasm\", \"server\");\n  removeRequestHeader(\"newheader\");\n}\n\nvoid modify_metadata1000_test() {\n  for (int x = 0; x < 1000; x++) {\n    auto path = getRequestHeader(\":path\");\n    addRequestHeader(\"newheader\", \"newheadervalue\");\n    auto server = getRequestHeader(\"server\");\n    replaceRequestHeader(\"server\", \"envoy-wasm\");\n    replaceRequestHeader(\"envoy-wasm\", \"server\");\n    removeRequestHeader(\"newheader\");\n  }\n}\n\nvoid json_serialize_test() { google::protobuf::util::JsonStringToMessage(configuration, &args); }\n\nvoid json_serialize_arena_test() {\n  google::protobuf::util::JsonStringToMessage(configuration, args_arena);\n}\n\nvoid json_deserialize_test() {\n  std::string json;\n  google::protobuf::util::MessageToJsonString(args, &json);\n  xDoNotRemove += json.size();\n}\n\nvoid json_deserialize_arena_test() {\n  std::string json;\n  google::protobuf::util::MessageToJsonString(*args_arena, &json);\n}\n\nvoid json_deserialize_empty_test() {\n  std::string json;\n  google::protobuf::Struct empty;\n  google::protobuf::util::MessageToJsonString(empty, &json);\n  xDoNotRemove = json.size();\n}\n\nvoid json_serialize_deserialize_test() {\n  std::string json;\n  google::protobuf::Struct proto;\n  google::protobuf::util::JsonStringToMessage(configuration, &proto);\n  google::protobuf::util::MessageToJsonString(proto, &json);\n  xDoNotRemove = json.size();\n}\n\nvoid convert_to_filter_state_test() {\n  auto start = reinterpret_cast<uint8_t*>(&*configuration.begin());\n  auto end = start + configuration.size();\n  std::string encoded_config = base64Encode(start, end);\n  std::vector<uint8_t> decoded;\n  base64Decode(encoded_config, &decoded);\n  std::string decoded_config(decoded.begin(), decoded.end());\n  google::protobuf::util::JsonStringToMessage(decoded_config, &args);\n  auto bytes = args.SerializeAsString();\n  setFilterStateStringValue(\"wasm_request_set_key\", bytes);\n}\n\nWASM_EXPORT(uint32_t, proxy_on_vm_start, (uint32_t, uint32_t configuration_size)) {\n  const char* configuration_ptr = nullptr;\n  size_t size;\n  proxy_get_buffer_bytes(WasmBufferType::VmConfiguration, 0, configuration_size, &configuration_ptr,\n                         &size);\n  std::string configuration(configuration_ptr, size);\n  if (configuration == \"empty\") {\n    test_fn = &empty_test;\n  } else if (configuration == \"get_current_time\") {\n    test_fn = &get_current_time_test;\n  } else if (configuration == \"small_string\") {\n    test_fn = &small_string_test;\n  } else if (configuration == \"small_string1000\") {\n    test_fn = &small_string1000_test;\n  } else if (configuration == \"small_string_check_compiler\") {\n    test_fn = &small_string_check_compiler_test;\n  } else if (configuration == \"small_string_check_compiler1000\") {\n    test_fn = &small_string_check_compiler1000_test;\n  } else if (configuration == \"large_string\") {\n    test_fn = &large_string_test;\n  } else if (configuration == \"large_string1000\") {\n    test_fn = &large_string1000_test;\n  } else if (configuration == \"get_property\") {\n    test_fn = &get_property_test;\n  } else if (configuration == \"grpc_service\") {\n    test_fn = &grpc_service_test;\n  } else if (configuration == \"grpc_service1000\") {\n    test_fn = &grpc_service1000_test;\n  } else if (configuration == \"modify_metadata\") {\n    test_fn = &modify_metadata_test;\n  } else if (configuration == \"modify_metadata1000\") {\n    test_fn = &modify_metadata1000_test;\n  } else if (configuration == \"json_serialize\") {\n    test_fn = &json_serialize_test;\n  } else if (configuration == \"json_serialize_arena\") {\n    test_fn = &json_serialize_arena_test;\n  } else if (configuration == \"json_deserialize\") {\n    test_fn = &json_deserialize_test;\n  } else if (configuration == \"json_deserialize_empty\") {\n    test_fn = &json_deserialize_empty_test;\n  } else if (configuration == \"json_deserialize_arena\") {\n    test_fn = &json_deserialize_arena_test;\n  } else if (configuration == \"json_serialize_deserialize\") {\n    test_fn = &json_serialize_deserialize_test;\n  } else if (configuration == \"convert_to_filter_state\") {\n    test_fn = &convert_to_filter_state_test;\n  } else {\n    std::string message = \"on_start \" + configuration;\n    proxy_log(LogLevel::info, message.c_str(), message.size());\n  }\n  ::free(const_cast<void*>(reinterpret_cast<const void*>(configuration_ptr)));\n  return 1;\n}\n\nWASM_EXPORT(void, proxy_on_tick, (uint32_t)) { (*test_fn)(); }\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/speed_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace WasmSpeedCpp {\nNullPluginRegistry* context_registry_;\n} // namespace WasmSpeedCpp\n\nRegisterNullVmPluginFactory register_wasm_speed_test_plugin(\"WasmSpeedCpp\", []() {\n  return std::make_unique<NullPlugin>(WasmSpeedCpp::context_registry_);\n});\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/start_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics.h\"\n#else\n#include \"include/proxy-wasm/null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(WasmStartCpp)\n\n// Required Proxy-Wasm ABI version.\nWASM_EXPORT(void, proxy_abi_version_0_1_0, ()) {}\n\nWASM_EXPORT(uint32_t, proxy_on_vm_start, (uint32_t, uint32_t configuration_size)) {\n  logDebug(\"onStart\");\n  return configuration_size ? 0 /* failure */ : 1 /* success */;\n}\n\nWASM_EXPORT(uint32_t, proxy_on_configure, (uint32_t, uint32_t configuration_size)) {\n  // Fail if we are provided a non-empty configuration.\n  return configuration_size ? 0 /* failure */ : 1 /* success */;\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/start_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace WasmStartCpp {\nNullPluginRegistry* context_registry_;\n} // namespace WasmStartCpp\n\nRegisterNullVmPluginFactory register_wasm_speed_test_plugin(\"WasmStartCpp\", []() {\n  return std::make_unique<NullPlugin>(WasmStartCpp::context_registry_);\n});\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/stats_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics.h\"\n#else\n#include \"include/proxy-wasm/null_plugin.h\"\n#endif\n\ntemplate <typename T> std::unique_ptr<T> wrap_unique(T* ptr) { return std::unique_ptr<T>(ptr); }\n\nSTART_WASM_PLUGIN(WasmStatsCpp)\n\n// Required Proxy-Wasm ABI version.\nWASM_EXPORT(void, proxy_abi_version_0_1_0, ()) {}\n\n// Test the low level interface.\nWASM_EXPORT(uint32_t, proxy_on_configure, (uint32_t, uint32_t)) {\n  uint32_t c, g, h;\n  CHECK_RESULT(defineMetric(MetricType::Counter, \"test_counter\", &c));\n  CHECK_RESULT(defineMetric(MetricType::Gauge, \"test_gauge\", &g));\n  CHECK_RESULT(defineMetric(MetricType::Histogram, \"test_histogram\", &h));\n\n  CHECK_RESULT(incrementMetric(c, 1));\n  CHECK_RESULT(recordMetric(g, 2));\n  CHECK_RESULT(recordMetric(h, 3));\n\n  uint64_t value;\n  CHECK_RESULT(getMetric(c, &value));\n  logTrace(std::string(\"get counter = \") + std::to_string(value));\n  CHECK_RESULT(incrementMetric(c, 1));\n  CHECK_RESULT(getMetric(c, &value));\n  logDebug(std::string(\"get counter = \") + std::to_string(value));\n  CHECK_RESULT(recordMetric(c, 3));\n  CHECK_RESULT(getMetric(c, &value));\n  logInfo(std::string(\"get counter = \") + std::to_string(value));\n  CHECK_RESULT(getMetric(g, &value));\n  logWarn(std::string(\"get gauge = \") + std::to_string(value));\n  // Get on histograms is not supported.\n  if (getMetric(h, &value) != WasmResult::Ok) {\n    logError(std::string(\"get histogram = Unsupported\"));\n  }\n  return 1;\n}\n\n// Test the higher level interface.\nWASM_EXPORT(void, proxy_on_tick, (uint32_t)) {\n  Metric c(MetricType::Counter, \"test_counter\",\n           {MetricTag{\"counter_tag\", MetricTag::TagType::String}});\n  Metric g(MetricType::Gauge, \"test_gauge\", {MetricTag{\"gauge_int_tag\", MetricTag::TagType::Int}});\n  Metric h(MetricType::Histogram, \"test_histogram\",\n           {MetricTag{\"histogram_int_tag\", MetricTag::TagType::Int},\n            MetricTag{\"histogram_string_tag\", MetricTag::TagType::String},\n            MetricTag{\"histogram_bool_tag\", MetricTag::TagType::Bool}});\n\n  c.increment(1, \"test_tag\");\n  g.record(2, 9);\n  h.record(3, 7, \"test_tag\", true);\n\n  logTrace(std::string(\"get counter = \") + std::to_string(c.get(\"test_tag\")));\n  c.increment(1, \"test_tag\");\n  logDebug(std::string(\"get counter = \") + std::to_string(c.get(\"test_tag\")));\n  c.record(3, \"test_tag\");\n  logInfo(std::string(\"get counter = \") + std::to_string(c.get(\"test_tag\")));\n  logWarn(std::string(\"get gauge = \") + std::to_string(g.get(9)));\n\n  auto hh = h.partiallyResolve(7);\n  auto h_id = hh.resolve(\"test_tag\", true);\n  logError(std::string(\"resolved histogram name = \") + hh.nameFromIdSlow(h_id));\n}\n\n// Test the high level interface.\nWASM_EXPORT(void, proxy_on_log, (uint32_t /* context_zero */)) {\n  auto c = wrap_unique(\n      Counter<std::string, int, bool>::New(\"test_counter\", \"string_tag\", \"int_tag\", \"bool_tag\"));\n  auto g =\n      wrap_unique(Gauge<std::string, std::string>::New(\"test_gauge\", \"string_tag1\", \"string_tag2\"));\n  auto h = wrap_unique(Histogram<int, std::string, bool>::New(\"test_histogram\", \"int_tag\",\n                                                              \"string_tag\", \"bool_tag\"));\n\n  c->increment(1, \"test_tag\", 7, true);\n  logTrace(std::string(\"get counter = \") + std::to_string(c->get(\"test_tag\", 7, true)));\n  auto simple_c = c->resolve(\"test_tag\", 7, true);\n  simple_c++;\n  logDebug(std::string(\"get counter = \") + std::to_string(c->get(\"test_tag\", 7, true)));\n  c->record(3, \"test_tag\", 7, true);\n  logInfo(std::string(\"get counter = \") + std::to_string(c->get(\"test_tag\", 7, true)));\n\n  g->record(2, \"test_tag1\", \"test_tag2\");\n  logWarn(std::string(\"get gauge = \") + std::to_string(g->get(\"test_tag1\", \"test_tag2\")));\n\n  h->record(3, 7, \"test_tag\", true);\n  auto base_h = wrap_unique(Counter<int>::New(\"test_histogram\", \"int_tag\"));\n  auto complete_h =\n      wrap_unique(base_h->extendAndResolve<std::string, bool>(7, \"string_tag\", \"bool_tag\"));\n  auto simple_h = complete_h->resolve(\"test_tag\", true);\n  logError(std::string(\"h_id = \") + complete_h->nameFromIdSlow(simple_h.metric_id));\n\n  Counter<std::string, int, bool> stack_c(\"test_counter\", \"string_tag\", \"int_tag\", \"bool_tag\");\n  stack_c.increment(1, \"test_tag_stack\", 7, true);\n  logError(std::string(\"stack_c = \") + std::to_string(stack_c.get(\"test_tag_stack\", 7, true)));\n\n  Gauge<std::string, std::string> stack_g(\"test_gauge\", \"string_tag1\", \"string_tag2\");\n  stack_g.record(2, \"stack_test_tag1\", \"test_tag2\");\n  logError(std::string(\"stack_g = \") + std::to_string(stack_g.get(\"stack_test_tag1\", \"test_tag2\")));\n\n  std::string_view int_tag = \"int_tag\";\n  Histogram<int, std::string, bool> stack_h(\"test_histogram\", int_tag, \"string_tag\", \"bool_tag\");\n  std::string_view stack_test_tag = \"stack_test_tag\";\n  stack_h.record(3, 7, stack_test_tag, true);\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/test_data/stats_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace WasmStatsCpp {\nNullPluginRegistry* context_registry_;\n} // namespace WasmStatsCpp\n\nRegisterNullVmPluginFactory register_wasm_speed_test_plugin(\"WasmStatsCpp\", []() {\n  return std::make_unique<NullPlugin>(WasmStatsCpp::context_registry_);\n});\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/wasm_speed_test.cc",
    "content": "/**\n * Simple WASM speed test.\n *\n * Run with:\n * `bazel run --config=libc++ -c opt //test/extensions/bootstrap/wasm:wasm_speed_test`\n */\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"benchmark/benchmark.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"tools/cpp/runfiles/runfiles.h\"\n\nusing bazel::tools::cpp::runfiles::Runfiles;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Wasm {\n\nclass TestRoot : public Envoy::Extensions::Common::Wasm::Context {\npublic:\n  TestRoot(Extensions::Common::Wasm::Wasm* wasm,\n           const std::shared_ptr<Extensions::Common::Wasm::Plugin>& plugin)\n      : Envoy::Extensions::Common::Wasm::Context(wasm, plugin) {}\n\n  using Envoy::Extensions::Common::Wasm::Context::log;\n  proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override {\n    log_(static_cast<spdlog::level::level_enum>(level), message);\n    return proxy_wasm::WasmResult::Ok;\n  }\n  MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message));\n};\n\nstatic void bmWasmSimpleCallSpeedTest(benchmark::State& state, std::string test,\n                                      std::string runtime) {\n  Envoy::Logger::Registry::getLog(Logger::Id::wasm).set_level(spdlog::level::off);\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"some_long_root_id\";\n  auto vm_id = \"\";\n  auto vm_configuration = test;\n  auto vm_key = \"\";\n  auto plugin_configuration = \"\";\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, runtime, plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", runtime), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  std::string code;\n  if (runtime == \"null\") {\n    code = \"WasmSpeedCpp\";\n  } else {\n    code = TestEnvironment::readFileToStringForTest(\n        TestEnvironment::runfilesPath(\"test/extensions/bootstrap/wasm/test_data/speed_cpp.wasm\"));\n  }\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm->initialize(code, false));\n  wasm->setCreateContextForTesting(\n      nullptr,\n      [](Extensions::Common::Wasm::Wasm* wasm,\n         const std::shared_ptr<Extensions::Common::Wasm::Plugin>& plugin)\n          -> proxy_wasm::ContextBase* { return new TestRoot(wasm, plugin); });\n\n  auto root_context = wasm->start(plugin);\n  for (__attribute__((unused)) auto _ : state) {\n    root_context->onTick(0);\n  }\n}\n\n#if defined(ENVOY_WASM_WAVM)\n#define B(_t)                                                                                      \\\n  BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, V8SpeedTest_##_t, std::string(#_t),                 \\\n                    std::string(\"v8\"));                                                            \\\n  BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, NullSpeedTest_##_t, std::string(#_t),               \\\n                    std::string(\"null\"));                                                          \\\n  BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, WavmSpeedTest_##_t, std::string(#_t),               \\\n                    std::string(\"wavm\"));\n#else\n#define B(_t)                                                                                      \\\n  BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, V8SpeedTest_##_t, std::string(#_t),                 \\\n                    std::string(\"v8\"));                                                            \\\n  BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, NullSpeedTest_##_t, std::string(#_t),               \\\n                    std::string(\"null\"));\n#endif\n\nB(empty)\nB(get_current_time)\nB(small_string)\nB(small_string1000)\nB(small_string_check_compiler)\nB(small_string_check_compiler1000)\nB(large_string)\nB(large_string1000)\nB(get_property)\nB(grpc_service)\nB(grpc_service1000)\nB(modify_metadata)\nB(modify_metadata1000)\nB(json_serialize)\nB(json_serialize_arena)\nB(json_deserialize)\nB(json_deserialize_arena)\nB(json_deserialize_empty)\nB(json_serialize_deserialize)\nB(convert_to_filter_state)\n\n} // namespace Wasm\n} // namespace Extensions\n} // namespace Envoy\n\nint main(int argc, char** argv) {\n  ::benchmark::Initialize(&argc, argv);\n  Envoy::TestEnvironment::initializeOptions(argc, argv);\n  // Create a Runfiles object for runfiles lookup.\n  // https://github.com/bazelbuild/bazel/blob/master/tools/cpp/runfiles/runfiles_src.h#L32\n  std::string error;\n  std::unique_ptr<Runfiles> runfiles(Runfiles::Create(argv[0], &error));\n  RELEASE_ASSERT(Envoy::TestEnvironment::getOptionalEnvVar(\"NORUNFILES\").has_value() ||\n                     runfiles != nullptr,\n                 error);\n  Envoy::TestEnvironment::setRunfiles(runfiles.get());\n  Envoy::TestEnvironment::setEnvVar(\"ENVOY_IP_TEST_VERSIONS\", \"all\", 0);\n  Envoy::Event::Libevent::Global::initialize();\n  if (::benchmark::ReportUnrecognizedArguments(argc, argv)) {\n    return 1;\n  }\n  ::benchmark::RunSpecifiedBenchmarks();\n  return 0;\n}\n"
  },
  {
    "path": "test/extensions/bootstrap/wasm/wasm_test.cc",
    "content": "#include \"common/event/dispatcher_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest-param-test.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Eq;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Wasm {\n\nclass TestContext : public Extensions::Common::Wasm::Context {\npublic:\n  TestContext(Extensions::Common::Wasm::Wasm* wasm,\n              const std::shared_ptr<Extensions::Common::Wasm::Plugin>& plugin)\n      : Extensions::Common::Wasm::Context(wasm, plugin) {}\n  ~TestContext() override = default;\n  using Extensions::Common::Wasm::Context::log;\n  proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override {\n    std::cerr << std::string(message) << \"\\n\";\n    log_(static_cast<spdlog::level::level_enum>(level), message);\n    return proxy_wasm::WasmResult::Ok;\n  }\n  MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message));\n};\n\nclass WasmTestBase {\npublic:\n  WasmTestBase()\n      : api_(Api::createApiForTest(stats_store_)),\n        dispatcher_(api_->allocateDispatcher(\"wasm_test\")),\n        base_scope_(stats_store_.createScope(\"\")), scope_(base_scope_->createScope(\"\")) {}\n\n  void createWasm(absl::string_view runtime) {\n    plugin_ = std::make_shared<Extensions::Common::Wasm::Plugin>(\n        name_, root_id_, vm_id_, runtime, plugin_configuration_, false,\n        envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info_, nullptr);\n    wasm_ = std::make_shared<Extensions::Common::Wasm::Wasm>(\n        absl::StrCat(\"envoy.wasm.runtime.\", runtime), vm_id_, vm_configuration_, vm_key_, scope_,\n        cluster_manager, *dispatcher_);\n    EXPECT_NE(wasm_, nullptr);\n    wasm_->setCreateContextForTesting(\n        nullptr,\n        [](Extensions::Common::Wasm::Wasm* wasm,\n           const std::shared_ptr<Extensions::Common::Wasm::Plugin>& plugin)\n            -> proxy_wasm::ContextBase* { return new TestContext(wasm, plugin); });\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher_;\n  Stats::ScopeSharedPtr base_scope_;\n  Stats::ScopeSharedPtr scope_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  std::string name_;\n  std::string root_id_;\n  std::string vm_id_;\n  std::string vm_configuration_;\n  std::string vm_key_;\n  std::string plugin_configuration_;\n  std::shared_ptr<Extensions::Common::Wasm::Plugin> plugin_;\n  std::shared_ptr<Extensions::Common::Wasm::Wasm> wasm_;\n};\n\n#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM)\nclass WasmTest : public WasmTestBase, public testing::TestWithParam<std::string> {\npublic:\n  void createWasm() { WasmTestBase::createWasm(GetParam()); }\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\"\n#endif\n#if defined(ENVOY_WASM_V8) && defined(ENVOY_WASM_WAVM)\n    ,\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\"\n#endif\n);\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmTest, testing_values);\n#endif\n\nclass WasmNullTest : public WasmTestBase, public testing::TestWithParam<std::string> {\npublic:\n  void createWasm() {\n    WasmTestBase::createWasm(GetParam());\n    const auto code =\n        GetParam() != \"null\"\n            ? TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n                  \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/stats_cpp.wasm\"))\n            : \"WasmStatsCpp\";\n    EXPECT_FALSE(code.empty());\n    EXPECT_TRUE(wasm_->initialize(code, false));\n  }\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_null_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\",\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\",\n#endif\n    \"null\");\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmNullTest, testing_null_values);\n\n#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM)\nclass WasmTestMatrix : public WasmTestBase,\n                       public testing::TestWithParam<std::tuple<std::string, std::string>> {\npublic:\n  void createWasm() { WasmTestBase::createWasm(std::get<0>(GetParam())); }\n\n  void setWasmCode(std::string vm_configuration) {\n    const auto basic_path =\n        absl::StrCat(\"test/extensions/bootstrap/wasm/test_data/\", vm_configuration);\n    code_ = TestEnvironment::readFileToStringForTest(\n        TestEnvironment::runfilesPath(basic_path + \"_\" + std::get<1>(GetParam()) + \".wasm\"));\n\n    EXPECT_FALSE(code_.empty());\n  }\n\nprotected:\n  std::string code_;\n};\n\nINSTANTIATE_TEST_SUITE_P(RuntimesAndLanguages, WasmTestMatrix,\n                         testing::Combine(testing::Values(\n#if defined(ENVOY_WASM_V8)\n                                              \"v8\"\n#endif\n#if defined(ENVOY_WASM_V8) && defined(ENVOY_WASM_WAVM)\n                                              ,\n#endif\n#if defined(ENVOY_WASM_WAVM)\n                                              \"wavm\"\n#endif\n                                              ),\n                                          testing::Values(\"cpp\", \"rust\")));\n\nTEST_P(WasmTestMatrix, Logging) {\n  plugin_configuration_ = \"configure-test\";\n  createWasm();\n  setWasmCode(\"logging\");\n\n  auto wasm_weak = std::weak_ptr<Extensions::Common::Wasm::Wasm>(wasm_);\n  auto wasm_handler = std::make_unique<Extensions::Common::Wasm::WasmHandle>(std::move(wasm_));\n\n  EXPECT_TRUE(wasm_weak.lock()->initialize(code_, false));\n  auto context = static_cast<TestContext*>(wasm_weak.lock()->start(plugin_));\n\n  if (std::get<1>(GetParam()) == \"cpp\") {\n    EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"printf stdout test\")));\n    EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"printf stderr test\")));\n  }\n  EXPECT_CALL(*context, log_(spdlog::level::warn, Eq(\"warn configure-test\")));\n  EXPECT_CALL(*context, log_(spdlog::level::trace, Eq(\"test trace logging\")));\n  EXPECT_CALL(*context, log_(spdlog::level::debug, Eq(\"test debug logging\")));\n  EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"test error logging\")));\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"test tick logging\")))\n      .Times(testing::AtLeast(1));\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"onDone logging\")));\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"onDelete logging\")));\n\n  EXPECT_TRUE(wasm_weak.lock()->configure(context, plugin_));\n  wasm_handler.reset();\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  // This will `SEGV` on nullptr if wasm has been deleted.\n  context->onTick(0);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  dispatcher_->clearDeferredDeleteList();\n}\n#endif\n\n#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM)\nTEST_P(WasmTest, BadSignature) {\n  createWasm();\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/bad_signature_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  EXPECT_FALSE(wasm_->initialize(code, false));\n  EXPECT_TRUE(wasm_->isFailed());\n}\n\nTEST_P(WasmTest, Segv) {\n  createWasm();\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/segv_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm_->initialize(code, false));\n  auto context = static_cast<TestContext*>(wasm_->start(plugin_));\n  EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"before badptr\")));\n  EXPECT_FALSE(wasm_->configure(context, plugin_));\n  wasm_->isFailed();\n}\n\nTEST_P(WasmTest, DivByZero) {\n  createWasm();\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/segv_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm_->initialize(code, false));\n  auto context = static_cast<TestContext*>(wasm_->start(plugin_));\n  EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"before div by zero\")));\n  context->onLog();\n  wasm_->isFailed();\n}\n\nTEST_P(WasmTest, EmscriptenVersion) {\n  createWasm();\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/segv_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm_->initialize(code, false));\n  uint32_t major = 9, minor = 9, abi_major = 9, abi_minor = 9;\n  EXPECT_TRUE(wasm_->getEmscriptenVersion(&major, &minor, &abi_major, &abi_minor));\n  EXPECT_EQ(major, 0);\n  EXPECT_LE(minor, 3);\n  // Up to (at least) emsdk 1.39.6.\n  EXPECT_EQ(abi_major, 0);\n  EXPECT_LE(abi_minor, 20);\n}\n\nTEST_P(WasmTest, IntrinsicGlobals) {\n  createWasm();\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/emscripten_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm_->initialize(code, false));\n  auto context = static_cast<TestContext*>(wasm_->start(plugin_));\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"NaN nan\")));\n  EXPECT_CALL(*context, log_(spdlog::level::warn, Eq(\"inf inf\"))).Times(3);\n  EXPECT_TRUE(wasm_->configure(context, plugin_));\n}\n\n// The `asm2wasm.wasm` file uses operations which would require the `asm2wasm` Emscripten module\n// *if* em++ is invoked with the trap mode \"clamp\". See\n// https://emscripten.org/docs/compiling/WebAssembly.html This test demonstrates that the `asm2wasm`\n// module is not required with the trap mode is set to \"allow\". Note: future Wasm standards will\n// change this behavior by providing non-trapping instructions, but in the mean time we support the\n// default Emscripten behavior.\nTEST_P(WasmTest, Asm2Wasm) {\n  createWasm();\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/asm2wasm_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm_->initialize(code, false));\n  auto context = static_cast<TestContext*>(wasm_->start(plugin_));\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"out 0 0 0\")));\n  EXPECT_TRUE(wasm_->configure(context, plugin_));\n}\n#endif\n\nTEST_P(WasmNullTest, Stats) {\n  createWasm();\n  auto context = static_cast<TestContext*>(wasm_->start(plugin_));\n\n  EXPECT_CALL(*context, log_(spdlog::level::trace, Eq(\"get counter = 1\")));\n  EXPECT_CALL(*context, log_(spdlog::level::debug, Eq(\"get counter = 2\")));\n  // recordMetric on a Counter is the same as increment.\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"get counter = 5\")));\n  EXPECT_CALL(*context, log_(spdlog::level::warn, Eq(\"get gauge = 2\")));\n  // Get is not supported on histograms.\n  EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"get histogram = Unsupported\")));\n\n  EXPECT_TRUE(wasm_->configure(context, plugin_));\n  EXPECT_EQ(scope_->counterFromString(\"test_counter\").value(), 5);\n  EXPECT_EQ(scope_->gaugeFromString(\"test_gauge\", Stats::Gauge::ImportMode::Accumulate).value(), 2);\n}\n\nTEST_P(WasmNullTest, StatsHigherLevel) {\n  createWasm();\n  auto context = static_cast<TestContext*>(wasm_->start(plugin_));\n\n  EXPECT_CALL(*context, log_(spdlog::level::trace, Eq(\"get counter = 1\")));\n  EXPECT_CALL(*context, log_(spdlog::level::debug, Eq(\"get counter = 2\")));\n  // recordMetric on a Counter is the same as increment.\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"get counter = 5\")));\n  EXPECT_CALL(*context, log_(spdlog::level::warn, Eq(\"get gauge = 2\")));\n  // Get is not supported on histograms.\n  EXPECT_CALL(*context, log_(spdlog::level::err,\n                             Eq(std::string(\"resolved histogram name = \"\n                                            \"histogram_int_tag.7.histogram_string_tag.test_tag.\"\n                                            \"histogram_bool_tag.true.test_histogram\"))));\n\n  wasm_->setTimerPeriod(1, std::chrono::milliseconds(10));\n  wasm_->tickHandler(1);\n  EXPECT_EQ(scope_->counterFromString(\"counter_tag.test_tag.test_counter\").value(), 5);\n  EXPECT_EQ(\n      scope_->gaugeFromString(\"gauge_int_tag.9.test_gauge\", Stats::Gauge::ImportMode::Accumulate)\n          .value(),\n      2);\n}\n\nTEST_P(WasmNullTest, StatsHighLevel) {\n  createWasm();\n  auto context = static_cast<TestContext*>(wasm_->start(plugin_));\n\n  EXPECT_CALL(*context, log_(spdlog::level::trace, Eq(\"get counter = 1\")));\n  EXPECT_CALL(*context, log_(spdlog::level::debug, Eq(\"get counter = 2\")));\n  // recordMetric on a Counter is the same as increment.\n  EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"get counter = 5\")));\n  EXPECT_CALL(*context, log_(spdlog::level::warn, Eq(\"get gauge = 2\")));\n  // Get is not supported on histograms.\n  // EXPECT_CALL(*context, log_(spdlog::level::err, Eq(std::string(\"resolved histogram name\n  // = int_tag.7_string_tag.test_tag.bool_tag.true.test_histogram\"))));\n  EXPECT_CALL(*context,\n              log_(spdlog::level::err,\n                   Eq(\"h_id = int_tag.7.string_tag.test_tag.bool_tag.true.test_histogram\")));\n  EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"stack_c = 1\")));\n  EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"stack_g = 2\")));\n  // Get is not supported on histograms.\n  // EXPECT_CALL(*context, log_(spdlog::level::err, Eq(\"stack_h = 3\")));\n  context->onLog();\n  EXPECT_EQ(\n      scope_->counterFromString(\"string_tag.test_tag.int_tag.7.bool_tag.true.test_counter\").value(),\n      5);\n  EXPECT_EQ(scope_\n                ->gaugeFromString(\"string_tag1.test_tag1.string_tag2.test_tag2.test_gauge\",\n                                  Stats::Gauge::ImportMode::Accumulate)\n                .value(),\n            2);\n}\n\n} // namespace Wasm\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/aggregate/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"cluster_test\",\n    srcs = [\"cluster_test.cc\"],\n    extension_name = \"envoy.clusters.aggregate\",\n    deps = [\n        \"//source/extensions/clusters/aggregate:cluster\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:load_balancer_context_mock\",\n        \"//test/mocks/upstream:load_balancer_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/clusters/aggregate/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cluster_update_test\",\n    srcs = [\"cluster_update_test.cc\"],\n    extension_name = \"envoy.clusters.aggregate\",\n    deps = [\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:cluster_manager_lib\",\n        \"//source/extensions/clusters/aggregate:cluster\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//test/common/upstream:test_cluster_manager\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:cluster_update_callbacks_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/clusters/aggregate/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cluster_integration_test\",\n    srcs = [\"cluster_integration_test.cc\"],\n    extension_name = \"envoy.clusters.aggregate\",\n    deps = [\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/clusters/aggregate:cluster\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//source/extensions/retry/priority/previous_priorities:config\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/clusters/aggregate/cluster_integration_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\nnamespace {\n\nconst char FirstClusterName[] = \"cluster_1\";\nconst char SecondClusterName[] = \"cluster_2\";\n// Index in fake_upstreams_\nconst int FirstUpstreamIndex = 2;\nconst int SecondUpstreamIndex = 3;\n\nconst std::string& config() {\n  CONSTRUCT_ON_FIRST_USE(std::string, fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\ndynamic_resources:\n  cds_config:\n    api_config_source:\n      api_type: GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: my_cds_cluster\n      set_node_on_first_message_only: false\nstatic_resources:\n  clusters:\n  - name: my_cds_cluster\n    http2_protocol_options: {{}}\n    load_assignment:\n      cluster_name: my_cds_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n  - name: aggregate_cluster\n    connect_timeout: 0.25s\n    lb_policy: CLUSTER_PROVIDED\n    protocol_selection: USE_DOWNSTREAM_PROTOCOL # this should be ignored, as cluster_1 and cluster_2 specify HTTP/2.\n    cluster_type:\n      name: envoy.clusters.aggregate\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig\n        clusters:\n        - cluster_1\n        - cluster_2\n  listeners:\n  - name: http\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n    filter_chains:\n      filters:\n        name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: config_test\n          http_filters:\n            name: envoy.filters.http.router\n          codec_type: HTTP1\n          route_config:\n            name: route_config_0\n            validate_clusters: false\n            virtual_hosts:\n              name: integration\n              routes:\n              - route:\n                  cluster: cluster_1\n                match:\n                  prefix: \"/cluster1\"\n              - route:\n                  cluster: cluster_2\n                match:\n                  prefix: \"/cluster2\"\n              - route:\n                  cluster: aggregate_cluster\n                  retry_policy:\n                    retry_priority:\n                      name: envoy.retry_priorities.previous_priorities\n                      typed_config:\n                        \"@type\": type.googleapis.com/envoy.config.retry.previous_priorities.PreviousPrioritiesConfig\n                        update_frequency: 1\n                match:\n                  prefix: \"/aggregatecluster\"\n              domains: \"*\"\n)EOF\",\n                                                  Platform::null_device_path));\n}\n\nclass AggregateIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                 public HttpIntegrationTest {\npublic:\n  AggregateIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), config()) {\n    use_lds_ = false;\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  void initialize() override {\n    use_lds_ = false;\n    setUpstreamCount(2);                                  // the CDS cluster\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.\n\n    defer_listener_finalization_ = true;\n    HttpIntegrationTest::initialize();\n\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    cluster1_ = ConfigHelper::buildStaticCluster(\n        FirstClusterName, fake_upstreams_[FirstUpstreamIndex]->localAddress()->ip()->port(),\n        Network::Test::getLoopbackAddressString(GetParam()));\n    cluster2_ = ConfigHelper::buildStaticCluster(\n        SecondClusterName, fake_upstreams_[SecondUpstreamIndex]->localAddress()->ip()->port(),\n        Network::Test::getLoopbackAddressString(GetParam()));\n\n    // Let Envoy establish its connection to the CDS server.\n    acceptXdsConnection();\n\n    // Do the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_1.\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n    sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                               {cluster1_}, {cluster1_}, {}, \"55\");\n\n    test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 3);\n\n    // Wait for our statically specified listener to become ready, and register its port in the\n    // test framework's downstream listener port map.\n    test_server_->waitUntilListenersReady();\n    registerTestServerPorts({\"http\"});\n  }\n\n  void acceptXdsConnection() {\n    AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n  }\n\n  envoy::config::cluster::v3::Cluster cluster1_;\n  envoy::config::cluster::v3::Cluster cluster2_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AggregateIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\nTEST_P(AggregateIntegrationTest, ClusterUpDownUp) {\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, \"/aggregatecluster\");\n\n  // Tell Envoy that cluster_1 is gone.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"55\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster, {}, {},\n                                                             {FirstClusterName}, \"42\");\n  // We can continue the test once we're sure that Envoy's ClusterManager has made use of\n  // the DiscoveryResponse that says cluster_1 is gone.\n  test_server_->waitForCounterGe(\"cluster_manager.cluster_removed\", 1);\n\n  // Now that cluster_1 is gone, the listener (with its routing to cluster_1) should 503.\n  BufferingStreamDecoderPtr response =\n      IntegrationUtil::makeSingleRequest(lookupPort(\"http\"), \"GET\", \"/aggregatecluster\", \"\",\n                                         downstream_protocol_, version_, \"foo.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_1 is back.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"42\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {cluster1_}, {cluster1_}, {}, \"413\");\n\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 3);\n  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, \"/aggregatecluster\");\n\n  cleanupUpstreamAndDownstream();\n}\n\n// Tests adding a cluster, adding another, then removing the first.\nTEST_P(AggregateIntegrationTest, TwoClusters) {\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, \"/aggregatecluster\");\n\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_2 is here.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"55\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster2_}, {}, \"42\");\n  // The '4' includes the fake CDS server and aggregate cluster.\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 4);\n\n  // A request for aggregate cluster should be fine.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, \"/aggregatecluster\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_1 is gone.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"42\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {cluster2_}, {}, {FirstClusterName}, \"42\");\n  // We can continue the test once we're sure that Envoy's ClusterManager has made use of\n  // the DiscoveryResponse that says cluster_1 is gone.\n  test_server_->waitForCounterGe(\"cluster_manager.cluster_removed\", 1);\n\n  testRouterHeaderOnlyRequestAndResponse(nullptr, SecondUpstreamIndex, \"/aggregatecluster\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_1 is back.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"42\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster1_}, {}, \"413\");\n\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 4);\n  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, \"/aggregatecluster\");\n\n  cleanupUpstreamAndDownstream();\n}\n\n// Test that the PreviousPriorities retry predicate works as expected. It is configured\n// in this test to exclude a priority after a single failure, so the first failure\n// on cluster_1 results in the retry going to cluster_2.\nTEST_P(AggregateIntegrationTest, PreviousPrioritiesRetryPredicate) {\n  initialize();\n\n  // Tell Envoy that cluster_2 is here.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster2_}, {}, \"42\");\n  // The '4' includes the fake CDS server and aggregate cluster.\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 4);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/aggregatecluster\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024);\n  waitForNextUpstreamRequest(FirstUpstreamIndex);\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  fake_upstream_connection_.reset();\n\n  waitForNextUpstreamRequest(SecondUpstreamIndex);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  cleanupUpstreamAndDownstream();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/aggregate/cluster_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.h\"\n\n#include \"common/singleton/manager_impl.h\"\n\n#include \"extensions/clusters/aggregate/cluster.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/load_balancer.h\"\n#include \"test/mocks/upstream/load_balancer_context.h\"\n#include \"test/test_common/environment.h\"\n\nusing testing::Eq;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Aggregate {\n\nnamespace {\nconst std::string primary_name(\"primary\");\nconst std::string secondary_name(\"secondary\");\n} // namespace\n\nclass AggregateClusterTest : public testing::Test {\npublic:\n  AggregateClusterTest() : stats_(Upstream::ClusterInfoImpl::generateStats(stats_store_)) {\n    ON_CALL(*primary_info_, name()).WillByDefault(ReturnRef(primary_name));\n    ON_CALL(*secondary_info_, name()).WillByDefault(ReturnRef(secondary_name));\n  }\n\n  Upstream::HostVector setupHostSet(Upstream::ClusterInfoConstSharedPtr cluster, int healthy_hosts,\n                                    int degraded_hosts, int unhealthy_hosts, uint32_t priority) {\n    Upstream::HostVector hosts;\n    for (int i = 0; i < healthy_hosts; ++i) {\n      hosts.emplace_back(Upstream::makeTestHost(cluster, \"tcp://127.0.0.1:80\", 1, priority));\n    }\n\n    for (int i = 0; i < degraded_hosts; ++i) {\n      Upstream::HostSharedPtr host =\n          Upstream::makeTestHost(cluster, \"tcp://127.0.0.2:80\", 1, priority);\n      host->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);\n      hosts.emplace_back(host);\n    }\n\n    for (int i = 0; i < unhealthy_hosts; ++i) {\n      Upstream::HostSharedPtr host =\n          Upstream::makeTestHost(cluster, \"tcp://127.0.0.3:80\", 1, priority);\n      host->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);\n      hosts.emplace_back(host);\n    }\n\n    return hosts;\n  }\n\n  void setupPrimary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) {\n    auto hosts =\n        setupHostSet(primary_info_, healthy_hosts, degraded_hosts, unhealthy_hosts, priority);\n    primary_ps_.updateHosts(\n        priority,\n        Upstream::HostSetImpl::partitionHosts(std::make_shared<Upstream::HostVector>(hosts),\n                                              Upstream::HostsPerLocalityImpl::empty()),\n        nullptr, hosts, {}, 100);\n    cluster_->refresh();\n  }\n\n  void setupSecondary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) {\n    auto hosts =\n        setupHostSet(secondary_info_, healthy_hosts, degraded_hosts, unhealthy_hosts, priority);\n    secondary_ps_.updateHosts(\n        priority,\n        Upstream::HostSetImpl::partitionHosts(std::make_shared<Upstream::HostVector>(hosts),\n                                              Upstream::HostsPerLocalityImpl::empty()),\n        nullptr, hosts, {}, 100);\n    cluster_->refresh();\n  }\n\n  void setupPrioritySet() {\n    setupPrimary(0, 1, 1, 1);\n    setupPrimary(1, 2, 2, 2);\n    setupSecondary(0, 2, 2, 2);\n    setupSecondary(1, 1, 1, 1);\n  }\n\n  void initialize(const std::string& yaml_config) {\n    envoy::config::cluster::v3::Cluster cluster_config =\n        Upstream::parseClusterFromV3Yaml(yaml_config);\n    envoy::extensions::clusters::aggregate::v3::ClusterConfig config;\n    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(),\n                                           ProtobufWkt::Struct::default_instance(),\n                                           ProtobufMessage::getStrictValidationVisitor(), config);\n    Stats::ScopePtr scope = stats_store_.createScope(\"cluster.name.\");\n    Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_store_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n\n    cluster_ =\n        std::make_shared<Cluster>(cluster_config, config, cm_, runtime_, api_->randomGenerator(),\n                                  factory_context, std::move(scope), tls_, false);\n\n    thread_aware_lb_ = std::make_unique<AggregateThreadAwareLoadBalancer>(*cluster_);\n    lb_factory_ = thread_aware_lb_->factory();\n    lb_ = lb_factory_->create();\n\n    EXPECT_CALL(cm_, get(Eq(\"aggregate_cluster\"))).WillRepeatedly(Return(&aggregate_cluster_));\n    EXPECT_CALL(cm_, get(Eq(\"primary\"))).WillRepeatedly(Return(&primary_));\n    EXPECT_CALL(cm_, get(Eq(\"secondary\"))).WillRepeatedly(Return(&secondary_));\n    EXPECT_CALL(cm_, get(Eq(\"tertiary\"))).WillRepeatedly(Return(nullptr));\n    ON_CALL(primary_, prioritySet()).WillByDefault(ReturnRef(primary_ps_));\n    ON_CALL(secondary_, prioritySet()).WillByDefault(ReturnRef(secondary_ps_));\n    ON_CALL(aggregate_cluster_, loadBalancer()).WillByDefault(ReturnRef(*lb_));\n\n    setupPrioritySet();\n\n    ON_CALL(primary_, loadBalancer()).WillByDefault(ReturnRef(primary_load_balancer_));\n    ON_CALL(secondary_, loadBalancer()).WillByDefault(ReturnRef(secondary_load_balancer_));\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Ssl::MockContextManager ssl_context_manager_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_{Api::createApiForTest(stats_store_, random_)};\n  std::shared_ptr<Cluster> cluster_;\n  Upstream::ThreadAwareLoadBalancerPtr thread_aware_lb_;\n  Upstream::LoadBalancerFactorySharedPtr lb_factory_;\n  Upstream::LoadBalancerPtr lb_;\n  Upstream::ClusterStats stats_;\n  std::shared_ptr<Upstream::MockClusterInfo> primary_info_{\n      new NiceMock<Upstream::MockClusterInfo>()};\n  std::shared_ptr<Upstream::MockClusterInfo> secondary_info_{\n      new NiceMock<Upstream::MockClusterInfo>()};\n  NiceMock<Upstream::MockThreadLocalCluster> aggregate_cluster_, primary_, secondary_;\n  Upstream::PrioritySetImpl primary_ps_, secondary_ps_;\n  NiceMock<Upstream::MockLoadBalancer> primary_load_balancer_, secondary_load_balancer_;\n\n  const std::string default_yaml_config_ = R\"EOF(\n    name: aggregate_cluster\n    connect_timeout: 0.25s\n    lb_policy: CLUSTER_PROVIDED\n    cluster_type:\n      name: envoy.clusters.aggregate\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig\n        clusters:\n        - primary\n        - secondary\n)EOF\";\n}; // namespace Aggregate\n\nTEST_F(AggregateClusterTest, LoadBalancerTest) {\n  initialize(default_yaml_config_);\n  // Health value:\n  // Cluster 1:\n  //     Priority 0: 33.3%\n  //     Priority 1: 33.3%\n  // Cluster 2:\n  //     Priority 0: 33.3%\n  //     Priority 1: 33.3%\n  Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, \"tcp://127.0.0.1:80\");\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n\n  for (int i = 0; i <= 65; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    EXPECT_TRUE(lb_->peekAnotherHost(nullptr) == nullptr);\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n  for (int i = 66; i < 100; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n\n  // Set up the HostSet with 1 healthy, 1 degraded and 2 unhealthy.\n  setupPrimary(0, 1, 1, 2);\n\n  // Health value:\n  // Cluster 1:\n  //     Priority 0: 25%\n  //     Priority 1: 33.3%\n  // Cluster 2:\n  //     Priority 0: 33.3%\n  //     Priority 1: 33.3%\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n\n  for (int i = 0; i <= 57; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n  for (int i = 58; i < 100; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n}\n\nTEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) {\n  initialize(default_yaml_config_);\n  Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, \"tcp://127.0.0.1:80\");\n  // Set up the HostSet with 0 healthy, 0 degraded and 2 unhealthy.\n  setupPrimary(0, 0, 0, 2);\n  setupPrimary(1, 0, 0, 2);\n\n  // Set up the HostSet with 0 healthy, 0 degraded and 2 unhealthy.\n  setupSecondary(0, 0, 0, 2);\n  setupSecondary(1, 0, 0, 2);\n  // Health value:\n  // Cluster 1:\n  //     Priority 0: 0%\n  //     Priority 1: 0%\n  // Cluster 2:\n  //     Priority 0: 0%\n  //     Priority 1: 0%\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n\n  // Choose the first cluster as the second one is unavailable.\n  for (int i = 0; i < 50; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n\n  // Choose the second cluster as the first one is unavailable.\n  for (int i = 50; i < 100; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n}\n\nTEST_F(AggregateClusterTest, ClusterInPanicTest) {\n  initialize(default_yaml_config_);\n  Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, \"tcp://127.0.0.1:80\");\n  setupPrimary(0, 1, 0, 4);\n  setupPrimary(1, 1, 0, 4);\n  setupSecondary(0, 1, 0, 4);\n  setupSecondary(1, 1, 0, 4);\n  // Health value:\n  // Cluster 1:\n  //     Priority 0: 20%\n  //     Priority 1: 20%\n  // Cluster 2:\n  //     Priority 0: 20%\n  //     Priority 1: 20%\n  // All priorities are in panic mode. Traffic will be distributed evenly among four priorities.\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n\n  for (int i = 0; i < 50; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n\n  for (int i = 50; i < 100; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n\n  setupPrimary(0, 1, 0, 9);\n  setupPrimary(1, 1, 0, 9);\n  setupSecondary(0, 1, 0, 9);\n  setupSecondary(1, 1, 0, 1);\n  // Health value:\n  // Cluster 1:\n  //     Priority 0: 10%\n  //     Priority 1: 10%\n  // Cluster 2:\n  //     Priority 0: 10%\n  //     Priority 0: 50%\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n\n  for (int i = 0; i <= 25; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n\n  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));\n\n  for (int i = 26; i < 100; ++i) {\n    EXPECT_CALL(random_, random()).WillOnce(Return(i));\n    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);\n    EXPECT_EQ(host.get(), target.get());\n  }\n}\n\nTEST_F(AggregateClusterTest, LBContextTest) {\n  AggregateLoadBalancerContext context(nullptr,\n                                       Upstream::LoadBalancerBase::HostAvailability::Healthy, 0);\n\n  EXPECT_EQ(context.computeHashKey().has_value(), false);\n  EXPECT_EQ(context.downstreamConnection(), nullptr);\n  EXPECT_EQ(context.metadataMatchCriteria(), nullptr);\n  EXPECT_EQ(context.downstreamHeaders(), nullptr);\n  EXPECT_EQ(context.upstreamSocketOptions(), nullptr);\n  EXPECT_EQ(context.upstreamTransportSocketOptions(), nullptr);\n}\n\nTEST_F(AggregateClusterTest, ContextDeterminePriorityLoad) {\n  Upstream::MockLoadBalancerContext lb_context;\n  initialize(default_yaml_config_);\n  setupPrimary(0, 1, 0, 0);\n  setupPrimary(1, 1, 0, 0);\n  setupSecondary(0, 1, 0, 0);\n  setupSecondary(1, 1, 0, 0);\n\n  const uint32_t invalid_priority = 42;\n  Upstream::HostSharedPtr host =\n      Upstream::makeTestHost(primary_info_, \"tcp://127.0.0.1:80\", 1, invalid_priority);\n\n  // The linearized priorities are [P0, P1, S0, S1].\n  Upstream::HealthyAndDegradedLoad secondary_priority_1{Upstream::HealthyLoad({0, 0, 0, 100}),\n                                                        Upstream::DegradedLoad()};\n\n  // Validate that lb_context->determinePriorityLoad() is called and that the mapping function\n  // passed in works correctly.\n  EXPECT_CALL(lb_context, determinePriorityLoad(_, _, _))\n      .WillOnce(Invoke([&](const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&,\n                           const Upstream::RetryPriority::PriorityMappingFunc& mapping_func)\n                           -> const Upstream::HealthyAndDegradedLoad& {\n        // This one isn't part of the mapping due to an invalid priority.\n        EXPECT_FALSE(mapping_func(*host).has_value());\n\n        // Helper to get a host from the given set and priority\n        auto host_from_priority = [](Upstream::PrioritySetImpl& ps,\n                                     uint32_t priority) -> const Upstream::HostDescription& {\n          return *(ps.hostSetsPerPriority()[priority]->hosts()[0]);\n        };\n\n        EXPECT_EQ(mapping_func(host_from_priority(primary_ps_, 0)), absl::optional<uint32_t>(0));\n        EXPECT_EQ(mapping_func(host_from_priority(primary_ps_, 1)), absl::optional<uint32_t>(1));\n        EXPECT_EQ(mapping_func(host_from_priority(secondary_ps_, 0)), absl::optional<uint32_t>(2));\n        EXPECT_EQ(mapping_func(host_from_priority(secondary_ps_, 1)), absl::optional<uint32_t>(3));\n\n        return secondary_priority_1;\n      }));\n\n  // Validate that the AggregateLoadBalancerContext is initialized with the weights from\n  // lb_context->determinePriorityLoad().\n  EXPECT_CALL(secondary_load_balancer_, chooseHost(_))\n      .WillOnce(Invoke([this, &host](\n                           Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n        const Upstream::HealthyAndDegradedLoad& adjusted_load = context->determinePriorityLoad(\n            secondary_ps_, {Upstream::HealthyLoad({100, 0}), Upstream::DegradedLoad()}, nullptr);\n\n        EXPECT_EQ(adjusted_load.healthy_priority_load_.get().size(), 2);\n        EXPECT_EQ(adjusted_load.healthy_priority_load_.get().at(0), 0);\n        EXPECT_EQ(adjusted_load.healthy_priority_load_.get().at(1), 100);\n\n        return host;\n      }));\n\n  lb_->chooseHost(&lb_context);\n}\n\n} // namespace Aggregate\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/aggregate/cluster_update_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.h\"\n\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n#include \"common/upstream/cluster_manager_impl.h\"\n\n#include \"extensions/clusters/aggregate/cluster.h\"\n\n#include \"test/common/upstream/test_cluster_manager.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Aggregate {\n\nenvoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV2Yaml(const std::string& yaml) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromYaml(yaml, bootstrap);\n  return bootstrap;\n}\n\nclass AggregateClusterUpdateTest : public testing::Test {\npublic:\n  AggregateClusterUpdateTest()\n      : http_context_(stats_store_.symbolTable()), grpc_context_(stats_store_.symbolTable()) {}\n\n  void initialize(const std::string& yaml_config) {\n    auto bootstrap = parseBootstrapFromV2Yaml(yaml_config);\n    cluster_manager_ = std::make_unique<Upstream::TestClusterManagerImpl>(\n        bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_,\n        factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_,\n        *factory_.api_, http_context_, grpc_context_);\n    cluster_manager_->initializeSecondaryClusters(bootstrap);\n    EXPECT_EQ(cluster_manager_->activeClusters().size(), 1);\n    cluster_ = cluster_manager_->get(\"aggregate_cluster\");\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  NiceMock<Server::MockAdmin> admin_;\n  NiceMock<Upstream::TestClusterManagerFactory> factory_;\n  Upstream::ThreadLocalCluster* cluster_;\n\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  std::unique_ptr<Upstream::TestClusterManagerImpl> cluster_manager_;\n  AccessLog::MockAccessLogManager log_manager_;\n  Http::ContextImpl http_context_;\n  Grpc::ContextImpl grpc_context_;\n\n  const std::string default_yaml_config_ = R\"EOF(\n static_resources:\n  clusters:\n  - name: aggregate_cluster\n    connect_timeout: 0.25s\n    lb_policy: CLUSTER_PROVIDED\n    cluster_type:\n      name: envoy.clusters.aggregate\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig\n        clusters:\n        - primary\n        - secondary\n  )EOF\";\n};\n\nTEST_F(AggregateClusterUpdateTest, NoHealthyUpstream) {\n  initialize(default_yaml_config_);\n  EXPECT_EQ(nullptr, cluster_->loadBalancer().chooseHost(nullptr));\n}\n\nTEST_F(AggregateClusterUpdateTest, BasicFlow) {\n  initialize(default_yaml_config_);\n\n  std::unique_ptr<Upstream::MockClusterUpdateCallbacks> callbacks(\n      new NiceMock<Upstream::MockClusterUpdateCallbacks>());\n  Upstream::ClusterUpdateCallbacksHandlePtr cb =\n      cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks);\n\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster(\"primary\"), \"\"));\n  auto primary = cluster_manager_->get(\"primary\");\n  EXPECT_NE(nullptr, primary);\n  auto host = cluster_->loadBalancer().chooseHost(nullptr);\n  EXPECT_NE(nullptr, host);\n  EXPECT_EQ(\"primary\", host->cluster().name());\n  EXPECT_EQ(\"127.0.0.1:11001\", host->address()->asString());\n\n  EXPECT_TRUE(\n      cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster(\"secondary\"), \"\"));\n  auto secondary = cluster_manager_->get(\"secondary\");\n  EXPECT_NE(nullptr, secondary);\n  host = cluster_->loadBalancer().chooseHost(nullptr);\n  EXPECT_NE(nullptr, host);\n  EXPECT_EQ(\"primary\", host->cluster().name());\n  EXPECT_EQ(\"127.0.0.1:11001\", host->address()->asString());\n\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster(\"tertiary\"), \"\"));\n  auto tertiary = cluster_manager_->get(\"tertiary\");\n  EXPECT_NE(nullptr, tertiary);\n  host = cluster_->loadBalancer().chooseHost(nullptr);\n  EXPECT_NE(nullptr, host);\n  EXPECT_EQ(\"primary\", host->cluster().name());\n  EXPECT_EQ(\"127.0.0.1:11001\", host->address()->asString());\n\n  EXPECT_TRUE(cluster_manager_->removeCluster(\"primary\"));\n  EXPECT_EQ(nullptr, cluster_manager_->get(\"primary\"));\n  host = cluster_->loadBalancer().chooseHost(nullptr);\n  EXPECT_NE(nullptr, host);\n  EXPECT_EQ(\"secondary\", host->cluster().name());\n  EXPECT_EQ(\"127.0.0.1:11001\", host->address()->asString());\n  EXPECT_EQ(3, cluster_manager_->activeClusters().size());\n\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster(\"primary\"), \"\"));\n  primary = cluster_manager_->get(\"primary\");\n  EXPECT_NE(nullptr, primary);\n  host = cluster_->loadBalancer().chooseHost(nullptr);\n  EXPECT_NE(nullptr, host);\n  EXPECT_EQ(\"primary\", host->cluster().name());\n  EXPECT_EQ(\"127.0.0.1:11001\", host->address()->asString());\n}\n\nTEST_F(AggregateClusterUpdateTest, LoadBalancingTest) {\n  initialize(default_yaml_config_);\n  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster(\"primary\"), \"\"));\n  auto primary = cluster_manager_->get(\"primary\");\n  EXPECT_NE(nullptr, primary);\n  EXPECT_TRUE(\n      cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster(\"secondary\"), \"\"));\n  auto secondary = cluster_manager_->get(\"secondary\");\n  EXPECT_NE(nullptr, secondary);\n\n  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.\n  Upstream::HostSharedPtr host1 = Upstream::makeTestHost(primary->info(), \"tcp://127.0.0.1:80\");\n  host1->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);\n  Upstream::HostSharedPtr host2 = Upstream::makeTestHost(primary->info(), \"tcp://127.0.0.2:80\");\n  host2->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);\n  Upstream::HostSharedPtr host3 = Upstream::makeTestHost(primary->info(), \"tcp://127.0.0.3:80\");\n  Upstream::Cluster& cluster = cluster_manager_->activeClusters().find(\"primary\")->second;\n  cluster.prioritySet().updateHosts(\n      0,\n      Upstream::HostSetImpl::partitionHosts(\n          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host1, host2, host3}),\n          Upstream::HostsPerLocalityImpl::empty()),\n      nullptr, {host1, host2, host3}, {}, 100);\n\n  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.\n  Upstream::HostSharedPtr host4 = Upstream::makeTestHost(secondary->info(), \"tcp://127.0.0.4:80\");\n  host4->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);\n  Upstream::HostSharedPtr host5 = Upstream::makeTestHost(secondary->info(), \"tcp://127.0.0.5:80\");\n  host5->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);\n  Upstream::HostSharedPtr host6 = Upstream::makeTestHost(secondary->info(), \"tcp://127.0.0.6:80\");\n  Upstream::Cluster& cluster1 = cluster_manager_->activeClusters().find(\"secondary\")->second;\n  cluster1.prioritySet().updateHosts(\n      0,\n      Upstream::HostSetImpl::partitionHosts(\n          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host4, host5, host6}),\n          Upstream::HostsPerLocalityImpl::empty()),\n      nullptr, {host4, host5, host6}, {}, 100);\n\n  Upstream::HostConstSharedPtr host;\n  for (int i = 0; i < 33; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host3, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  for (int i = 33; i < 66; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host6, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  for (int i = 66; i < 99; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host1, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  for (int i = 99; i < 100; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host4, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  EXPECT_TRUE(cluster_manager_->removeCluster(\"primary\"));\n  EXPECT_EQ(nullptr, cluster_manager_->get(\"primary\"));\n\n  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.\n  Upstream::HostSharedPtr host7 = Upstream::makeTestHost(secondary->info(), \"tcp://127.0.0.7:80\");\n  host7->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);\n  Upstream::HostSharedPtr host8 = Upstream::makeTestHost(secondary->info(), \"tcp://127.0.0.8:80\");\n  host8->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);\n  Upstream::HostSharedPtr host9 = Upstream::makeTestHost(secondary->info(), \"tcp://127.0.0.9:80\");\n  cluster1.prioritySet().updateHosts(\n      1,\n      Upstream::HostSetImpl::partitionHosts(\n          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host7, host8, host9}),\n          Upstream::HostsPerLocalityImpl::empty()),\n      nullptr, {host7, host8, host9}, {}, 100);\n\n  // Priority set\n  //   Priority 0: 1/3 healthy, 1/3 degraded\n  //   Priority 1: 1/3 healthy, 1/3 degraded\n  for (int i = 0; i < 33; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    host = cluster_->loadBalancer().chooseHost(nullptr);\n    EXPECT_EQ(host6, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  for (int i = 33; i < 66; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    host = cluster_->loadBalancer().chooseHost(nullptr);\n    EXPECT_EQ(host9, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  for (int i = 66; i < 99; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host4, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  for (int i = 99; i < 100; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host7, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n}\n\nTEST_F(AggregateClusterUpdateTest, InitializeAggregateClusterAfterOtherClusters) {\n  const std::string config = R\"EOF(\n static_resources:\n  clusters:\n  - name: primary\n    connect_timeout: 5s\n    type: STATIC\n    load_assignment:\n      cluster_name: primary\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 80\n    lb_policy: ROUND_ROBIN\n  - name: aggregate_cluster\n    connect_timeout: 0.25s\n    lb_policy: CLUSTER_PROVIDED\n    cluster_type:\n      name: envoy.clusters.aggregate\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig\n        clusters:\n        - primary\n        - secondary\n  )EOF\";\n\n  auto bootstrap = parseBootstrapFromV2Yaml(config);\n  cluster_manager_ = std::make_unique<Upstream::TestClusterManagerImpl>(\n      bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.local_info_,\n      log_manager_, factory_.dispatcher_, admin_, validation_context_, *factory_.api_,\n      http_context_, grpc_context_);\n  cluster_manager_->initializeSecondaryClusters(bootstrap);\n  EXPECT_EQ(cluster_manager_->activeClusters().size(), 2);\n  cluster_ = cluster_manager_->get(\"aggregate_cluster\");\n  auto primary = cluster_manager_->get(\"primary\");\n  EXPECT_NE(nullptr, primary);\n  auto host = cluster_->loadBalancer().chooseHost(nullptr);\n  EXPECT_NE(nullptr, host);\n  EXPECT_EQ(\"primary\", host->cluster().name());\n  EXPECT_EQ(\"127.0.0.1:80\", host->address()->asString());\n\n  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.\n  Upstream::HostSharedPtr host1 = Upstream::makeTestHost(primary->info(), \"tcp://127.0.0.1:80\");\n  host1->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);\n  Upstream::HostSharedPtr host2 = Upstream::makeTestHost(primary->info(), \"tcp://127.0.0.2:80\");\n  host2->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);\n  Upstream::HostSharedPtr host3 = Upstream::makeTestHost(primary->info(), \"tcp://127.0.0.3:80\");\n  Upstream::Cluster& cluster = cluster_manager_->activeClusters().find(\"primary\")->second;\n  cluster.prioritySet().updateHosts(\n      0,\n      Upstream::HostSetImpl::partitionHosts(\n          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host1, host2, host3}),\n          Upstream::HostsPerLocalityImpl::empty()),\n      nullptr, {host1, host2, host3}, {}, 100);\n\n  for (int i = 0; i < 50; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host3, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n\n  for (int i = 50; i < 100; ++i) {\n    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));\n    EXPECT_EQ(host1, cluster_->loadBalancer().chooseHost(nullptr));\n  }\n}\n\n} // namespace Aggregate\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"cluster_test\",\n    srcs = [\"cluster_test.cc\"],\n    data = [\"//test/extensions/transport_sockets/tls/test_data:certs\"],\n    extension_name = \"envoy.filters.http.dynamic_forward_proxy\",\n    deps = [\n        \"//source/extensions/clusters/dynamic_forward_proxy:cluster\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/extensions/common/dynamic_forward_proxy:mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:load_balancer_context_mock\",\n        \"//test/mocks/upstream:load_balancer_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.h\"\n#include \"envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.validate.h\"\n\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n\n#include \"extensions/clusters/dynamic_forward_proxy/cluster.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/extensions/common/dynamic_forward_proxy/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/load_balancer.h\"\n#include \"test/mocks/upstream/load_balancer_context.h\"\n#include \"test/test_common/environment.h\"\n\nusing testing::AtLeast;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::Return;\nusing testing::SizeIs;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace DynamicForwardProxy {\n\nclass ClusterTest : public testing::Test,\n                    public Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory {\npublic:\n  void initialize(const std::string& yaml_config, bool uses_tls) {\n    envoy::config::cluster::v3::Cluster cluster_config =\n        Upstream::parseClusterFromV3Yaml(yaml_config);\n    envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig config;\n    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(),\n                                           ProtobufWkt::Struct::default_instance(),\n                                           ProtobufMessage::getStrictValidationVisitor(), config);\n    Stats::ScopePtr scope = stats_store_.createScope(\"cluster.name.\");\n    Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_store_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n    if (uses_tls) {\n      EXPECT_CALL(ssl_context_manager_, createSslClientContext(_, _));\n    }\n    EXPECT_CALL(*dns_cache_manager_, getCache(_));\n    // Below we return a nullptr handle which has no effect on the code under test but isn't\n    // actually correct. It's possible this will have to change in the future.\n    EXPECT_CALL(*dns_cache_manager_->dns_cache_, addUpdateCallbacks_(_))\n        .WillOnce(DoAll(SaveArgAddress(&update_callbacks_), Return(nullptr)));\n    cluster_ = std::make_shared<Cluster>(cluster_config, config, runtime_, *this, local_info_,\n                                         factory_context, std::move(scope), false);\n    thread_aware_lb_ = std::make_unique<Cluster::ThreadAwareLoadBalancer>(*cluster_);\n    lb_factory_ = thread_aware_lb_->factory();\n    refreshLb();\n\n    ON_CALL(lb_context_, downstreamHeaders()).WillByDefault(Return(&downstream_headers_));\n\n    cluster_->prioritySet().addMemberUpdateCb(\n        [this](const Upstream::HostVector& hosts_added,\n               const Upstream::HostVector& hosts_removed) -> void {\n          onMemberUpdateCb(hosts_added, hosts_removed);\n        });\n\n    absl::flat_hash_map<std::string, Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr>\n        existing_hosts;\n    for (const auto& host : host_map_) {\n      existing_hosts.emplace(host.first, host.second);\n    }\n    EXPECT_CALL(*dns_cache_manager_->dns_cache_, hosts()).WillOnce(Return(existing_hosts));\n    if (!existing_hosts.empty()) {\n      EXPECT_CALL(*this, onMemberUpdateCb(SizeIs(existing_hosts.size()), SizeIs(0)));\n    }\n    cluster_->initialize([] {});\n  }\n\n  Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr get() override {\n    return dns_cache_manager_;\n  }\n\n  void makeTestHost(const std::string& host, const std::string& address) {\n    EXPECT_TRUE(host_map_.find(host) == host_map_.end());\n    host_map_[host] = std::make_shared<Extensions::Common::DynamicForwardProxy::MockDnsHostInfo>();\n    host_map_[host]->address_ = Network::Utility::parseInternetAddress(address);\n\n    // Allow touch() to still be strict.\n    EXPECT_CALL(*host_map_[host], address()).Times(AtLeast(0));\n    EXPECT_CALL(*host_map_[host], isIpAddress()).Times(AtLeast(0));\n    EXPECT_CALL(*host_map_[host], resolvedHost()).Times(AtLeast(0));\n  }\n\n  void updateTestHostAddress(const std::string& host, const std::string& address) {\n    EXPECT_FALSE(host_map_.find(host) == host_map_.end());\n    host_map_[host]->address_ = Network::Utility::parseInternetAddress(address);\n  }\n\n  void refreshLb() { lb_ = lb_factory_->create(); }\n\n  Upstream::MockLoadBalancerContext* setHostAndReturnContext(const std::string& host) {\n    downstream_headers_.remove(\":authority\");\n    downstream_headers_.addCopy(\":authority\", host);\n    return &lb_context_;\n  }\n\n  MOCK_METHOD(void, onMemberUpdateCb,\n              (const Upstream::HostVector& hosts_added, const Upstream::HostVector& hosts_removed));\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Ssl::MockContextManager ssl_context_manager_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_{Api::createApiForTest(stats_store_)};\n  std::shared_ptr<Extensions::Common::DynamicForwardProxy::MockDnsCacheManager> dns_cache_manager_{\n      new Extensions::Common::DynamicForwardProxy::MockDnsCacheManager()};\n  std::shared_ptr<Cluster> cluster_;\n  Upstream::ThreadAwareLoadBalancerPtr thread_aware_lb_;\n  Upstream::LoadBalancerFactorySharedPtr lb_factory_;\n  Upstream::LoadBalancerPtr lb_;\n  NiceMock<Upstream::MockLoadBalancerContext> lb_context_;\n  Http::TestRequestHeaderMapImpl downstream_headers_;\n  Extensions::Common::DynamicForwardProxy::DnsCache::UpdateCallbacks* update_callbacks_{};\n  absl::flat_hash_map<std::string,\n                      std::shared_ptr<Extensions::Common::DynamicForwardProxy::MockDnsHostInfo>>\n      host_map_;\n\n  const std::string default_yaml_config_ = R\"EOF(\nname: name\nconnect_timeout: 0.25s\ncluster_type:\n  name: dynamic_forward_proxy\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig\n    dns_cache_config:\n      name: foo\n      dns_lookup_family: AUTO\n)EOF\";\n};\n\n// Basic flow of the cluster including adding hosts and removing them.\nTEST_F(ClusterTest, BasicFlow) {\n  initialize(default_yaml_config_, false);\n  makeTestHost(\"host1\", \"1.2.3.4\");\n  InSequence s;\n\n  // Verify no host LB cases.\n  EXPECT_EQ(nullptr, lb_->chooseHost(setHostAndReturnContext(\"foo\")));\n\n  // LB will not resolve host1 until it has been updated.\n  EXPECT_CALL(*this, onMemberUpdateCb(SizeIs(1), SizeIs(0)));\n  update_callbacks_->onDnsHostAddOrUpdate(\"host1\", host_map_[\"host1\"]);\n  EXPECT_EQ(nullptr, lb_->chooseHost(setHostAndReturnContext(\"host1\")));\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(\"1.2.3.4:0\",\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->address()->asString());\n  refreshLb();\n  EXPECT_CALL(*host_map_[\"host1\"], touch());\n  EXPECT_EQ(\"1.2.3.4:0\", lb_->chooseHost(setHostAndReturnContext(\"host1\"))->address()->asString());\n\n  // After changing the address, LB will immediately resolve the new address with a refresh.\n  updateTestHostAddress(\"host1\", \"2.3.4.5\");\n  update_callbacks_->onDnsHostAddOrUpdate(\"host1\", host_map_[\"host1\"]);\n  EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(\"2.3.4.5:0\",\n            cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->address()->asString());\n  EXPECT_CALL(*host_map_[\"host1\"], touch());\n  EXPECT_EQ(\"2.3.4.5:0\", lb_->chooseHost(setHostAndReturnContext(\"host1\"))->address()->asString());\n\n  // Remove the host, LB will still resolve until it is refreshed.\n  EXPECT_CALL(*this, onMemberUpdateCb(SizeIs(0), SizeIs(1)));\n  update_callbacks_->onDnsHostRemove(\"host1\");\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_CALL(*host_map_[\"host1\"], touch());\n  EXPECT_EQ(\"2.3.4.5:0\", lb_->chooseHost(setHostAndReturnContext(\"host1\"))->address()->asString());\n  refreshLb();\n  EXPECT_EQ(nullptr, lb_->chooseHost(setHostAndReturnContext(\"host1\")));\n}\n\n// Various invalid LB context permutations in case the cluster is used outside of HTTP.\nTEST_F(ClusterTest, InvalidLbContext) {\n  initialize(default_yaml_config_, false);\n  ON_CALL(lb_context_, downstreamHeaders()).WillByDefault(Return(nullptr));\n  EXPECT_EQ(nullptr, lb_->chooseHost(&lb_context_));\n  EXPECT_EQ(nullptr, lb_->chooseHost(nullptr));\n}\n\n// Verify cluster attaches to a populated cache.\nTEST_F(ClusterTest, PopulatedCache) {\n  makeTestHost(\"host1\", \"1.2.3.4\");\n  makeTestHost(\"host2\", \"1.2.3.5\");\n  initialize(default_yaml_config_, false);\n  EXPECT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n}\n\nclass ClusterFactoryTest : public testing::Test {\nprotected:\n  void createCluster(const std::string& yaml_config, bool avoid_boosting = true) {\n    envoy::config::cluster::v3::Cluster cluster_config =\n        Upstream::parseClusterFromV3Yaml(yaml_config, avoid_boosting);\n    Upstream::ClusterFactoryContextImpl cluster_factory_context(\n        cm_, stats_store_, tls_, nullptr, ssl_context_manager_, runtime_, dispatcher_, log_manager_,\n        local_info_, admin_, singleton_manager_, nullptr, true, validation_visitor_, *api_);\n    std::unique_ptr<Upstream::ClusterFactory> cluster_factory = std::make_unique<ClusterFactory>();\n\n    std::tie(cluster_, thread_aware_lb_) =\n        cluster_factory->create(cluster_config, cluster_factory_context);\n  }\n\nprivate:\n  Stats::IsolatedStoreImpl stats_store_;\n  NiceMock<Ssl::MockContextManager> ssl_context_manager_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<AccessLog::MockAccessLogManager> log_manager_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_{Api::createApiForTest(stats_store_)};\n  Upstream::ClusterSharedPtr cluster_;\n  Upstream::ThreadAwareLoadBalancerPtr thread_aware_lb_;\n};\n\n// Verify that using 'sni' causes a failure.\nTEST_F(ClusterFactoryTest, DEPRECATED_FEATURE_TEST(InvalidSNI)) {\n  const std::string yaml_config = TestEnvironment::substitute(R\"EOF(\nname: name\nconnect_timeout: 0.25s\ncluster_type:\n  name: dynamic_forward_proxy\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig\n    dns_cache_config:\n      name: foo\ntls_context:\n  sni: api.lyft.com\n  common_tls_context:\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\");\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createCluster(yaml_config, false), EnvoyException,\n      \"dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'\");\n}\n\n// Verify that using 'verify_subject_alt_name' causes a failure.\nTEST_F(ClusterFactoryTest, DEPRECATED_FEATURE_TEST(InvalidVerifySubjectAltName)) {\n  const std::string yaml_config = TestEnvironment::substitute(R\"EOF(\nname: name\nconnect_timeout: 0.25s\ncluster_type:\n  name: dynamic_forward_proxy\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig\n    dns_cache_config:\n      name: foo\ntls_context:\n  common_tls_context:\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      verify_subject_alt_name: [api.lyft.com]\n)EOF\");\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createCluster(yaml_config, false), EnvoyException,\n      \"dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'\");\n}\n\nTEST_F(ClusterFactoryTest, InvalidUpstreamHttpProtocolOptions) {\n  const std::string yaml_config = TestEnvironment::substitute(R\"EOF(\nname: name\nconnect_timeout: 0.25s\ncluster_type:\n  name: dynamic_forward_proxy\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig\n    dns_cache_config:\n      name: foo\nupstream_http_protocol_options: {}\n)EOF\");\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createCluster(yaml_config), EnvoyException,\n      \"dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true when \"\n      \"configured with upstream_http_protocol_options\");\n}\n\nTEST_F(ClusterFactoryTest, InsecureUpstreamHttpProtocolOptions) {\n  const std::string yaml_config = TestEnvironment::substitute(R\"EOF(\nname: name\nconnect_timeout: 0.25s\ncluster_type:\n  name: dynamic_forward_proxy\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig\n    allow_insecure_cluster_options: true\n    dns_cache_config:\n      name: foo\nupstream_http_protocol_options: {}\n)EOF\");\n\n  createCluster(yaml_config);\n}\n\n} // namespace DynamicForwardProxy\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"redis_cluster_test\",\n    srcs = [\"redis_cluster_test.cc\"],\n    extension_name = \"envoy.clusters.redis\",\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/clusters/redis:redis_cluster\",\n        \"//source/extensions/clusters/redis:redis_cluster_lb\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/extensions/clusters/redis:redis_cluster_mocks\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/extensions/filters/network/common/redis:test_utils_lib\",\n        \"//test/extensions/filters/network/redis_proxy:redis_mocks\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:health_check_event_logger_mocks\",\n        \"//test/mocks/upstream:health_checker_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/redis:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"redis_cluster_lb_test\",\n    srcs = [\"redis_cluster_lb_test.cc\"],\n    extension_name = \"envoy.clusters.redis\",\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:thread_aware_lb_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/clusters/redis:redis_cluster\",\n        \"//source/extensions/clusters/redis:redis_cluster_lb\",\n        \"//source/extensions/filters/network/common/redis:client_interface\",\n        \"//source/extensions/filters/network/common/redis:codec_lib\",\n        \"//source/extensions/filters/network/common/redis:supported_commands_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/extensions/clusters/redis:redis_cluster_mocks\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/extensions/filters/network/common/redis:test_utils_lib\",\n        \"//test/extensions/filters/network/redis_proxy:redis_mocks\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"redis_cluster_integration_test\",\n    size = \"small\",\n    srcs = [\"redis_cluster_integration_test.cc\"],\n    extension_name = \"envoy.clusters.redis\",\n    deps = [\n        \"//source/extensions/clusters/redis:redis_cluster\",\n        \"//source/extensions/clusters/redis:redis_cluster_lb\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//test/integration:integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"redis_cluster_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/clusters/redis:redis_cluster\",\n        \"@envoy_api//envoy/config/cluster/redis:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"crc16_test\",\n    srcs = [\"crc16_test.cc\"],\n    deps = [\"//source/extensions/clusters/redis:crc16_lib\"],\n)\n"
  },
  {
    "path": "test/extensions/clusters/redis/crc16_test.cc",
    "content": "#include \"source/extensions/clusters/redis/crc16.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\nTEST(Hash, crc16) {\n  EXPECT_EQ(44950, Crc16::crc16(\"foo\"));\n  EXPECT_EQ(37829, Crc16::crc16(\"bar\"));\n  EXPECT_EQ(3951, Crc16::crc16(\"foo\\nbar\"));\n  EXPECT_EQ(53222, Crc16::crc16(\"lyft\"));\n  EXPECT_EQ(0, Crc16::crc16(\"\"));\n}\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/redis/mocks.cc",
    "content": "#include \"test/extensions/clusters/redis/mocks.h\"\n\n#include \"envoy/config/cluster/redis/redis_cluster.pb.h\"\n#include \"envoy/config/cluster/redis/redis_cluster.pb.validate.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nMockClusterSlotUpdateCallBack::MockClusterSlotUpdateCallBack() {\n  ON_CALL(*this, onClusterSlotUpdate(_, _)).WillByDefault(Return(true));\n}\n\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/redis/mocks.h",
    "content": "#include \"envoy/config/cluster/redis/redis_cluster.pb.h\"\n#include \"envoy/config/cluster/redis/redis_cluster.pb.validate.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"source/extensions/clusters/redis/redis_cluster.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nclass MockClusterSlotUpdateCallBack : public ClusterSlotUpdateCallBack {\npublic:\n  MockClusterSlotUpdateCallBack();\n  ~MockClusterSlotUpdateCallBack() override = default;\n\n  MOCK_METHOD(bool, onClusterSlotUpdate, (ClusterSlotsPtr&&, Upstream::HostMap));\n  MOCK_METHOD(void, onHostHealthUpdate, ());\n};\n\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/redis/redis_cluster_integration_test.cc",
    "content": "#include <sstream>\n#include <vector>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/network/redis_proxy/command_splitter_impl.h\"\n\n#include \"test/integration/integration.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace {\n\n// This is a basic redis_proxy configuration with a single host\n// in the cluster. The load balancing policy must be set\n// to random for proper test operation.\nconst std::string& listenerConfig() {\n  CONSTRUCT_ON_FIRST_USE(std::string, fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\nstatic_resources:\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n    filter_chains:\n      filters:\n        name: redis\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy\n          stat_prefix: redis_stats\n          prefix_routes:\n            catch_all_route:\n              cluster: cluster_0\n          settings:\n            op_timeout: 5s\n            enable_redirection: true\n)EOF\",\n                                                  Platform::null_device_path));\n}\n\nconst std::string& clusterConfig() {\n  CONSTRUCT_ON_FIRST_USE(std::string, R\"EOF(\n  clusters:\n    - name: cluster_0\n      lb_policy: CLUSTER_PROVIDED\n      load_assignment:\n        cluster_name: cluster_0\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 0\n      cluster_type:\n        name: envoy.clusters.redis\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            cluster_refresh_rate: 60s\n            cluster_refresh_timeout: 4s\n            redirect_refresh_interval: 0s\n            redirect_refresh_threshold: 1\n)EOF\");\n}\n\nconst std::string& testConfig() {\n  CONSTRUCT_ON_FIRST_USE(std::string, listenerConfig() + clusterConfig());\n}\n\nconst std::string& testConfigWithRefresh() {\n  CONSTRUCT_ON_FIRST_USE(std::string, listenerConfig() + R\"EOF(\n  clusters:\n    - name: cluster_0\n      lb_policy: CLUSTER_PROVIDED\n      load_assignment:\n        cluster_name: cluser_0\n        endpoints:\n        - lb_endpoints:\n          - endpoint:\n              address:\n                socket_address:\n                  address: 127.0.0.1\n                  port_value: 0\n      cluster_type:\n        name: envoy.clusters.redis\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            cluster_refresh_rate: 3600s\n            cluster_refresh_timeout: 4s\n            redirect_refresh_interval: 100s\n            redirect_refresh_threshold: 1\n            failure_refresh_threshold: 1\n)EOF\");\n}\n\nconst std::string& testConfigWithReadPolicy() {\n  CONSTRUCT_ON_FIRST_USE(std::string, listenerConfig() + R\"EOF(\n            read_policy: REPLICA\n)EOF\" + clusterConfig());\n}\n\n// This is the basic redis_proxy configuration with an upstream\n// authentication password specified.\n\nconst std::string& testConfigWithAuth() {\n  CONSTRUCT_ON_FIRST_USE(std::string, testConfig() + R\"EOF(\n      typed_extension_protocol_options:\n        envoy.filters.network.redis_proxy:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions\n          auth_password: { inline_string: somepassword }\n)EOF\");\n}\n\n// This function encodes commands as an array of bulkstrings as transmitted by Redis clients to\n// Redis servers, according to the Redis protocol.\nstd::string makeBulkStringArray(std::vector<std::string>&& command_strings) {\n  std::stringstream result;\n\n  result << \"*\" << command_strings.size() << \"\\r\\n\";\n  for (auto& command_string : command_strings) {\n    result << \"$\" << command_string.size() << \"\\r\\n\";\n    result << command_string << \"\\r\\n\";\n  }\n\n  return result.str();\n}\n\nclass RedisClusterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                    public BaseIntegrationTest {\npublic:\n  RedisClusterIntegrationTest(const std::string& config = testConfig(), int num_upstreams = 2)\n      : BaseIntegrationTest(GetParam(), config), num_upstreams_(num_upstreams),\n        version_(GetParam()) {}\n\n  void initialize() override {\n    setUpstreamCount(num_upstreams_);\n    setDeterministic();\n    config_helper_.renameListener(\"redis_proxy\");\n\n    // Change the port for each of the discovery host in cluster_0.\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      uint32_t upstream_idx = 0;\n      auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0);\n      for (int j = 0; j < cluster_0->load_assignment().endpoints_size(); ++j) {\n        auto locality_lb = cluster_0->mutable_load_assignment()->mutable_endpoints(j);\n        for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) {\n          auto lb_endpoint = locality_lb->mutable_lb_endpoints(k);\n          if (lb_endpoint->endpoint().address().has_socket_address()) {\n            auto* host_socket_addr =\n                lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address();\n            RELEASE_ASSERT(fake_upstreams_.size() > upstream_idx, \"\");\n            host_socket_addr->set_address(\n                fake_upstreams_[upstream_idx]->localAddress()->ip()->addressAsString());\n            host_socket_addr->set_port_value(\n                fake_upstreams_[upstream_idx++]->localAddress()->ip()->port());\n          }\n        }\n      }\n    });\n\n    on_server_ready_function_ = [this](Envoy::IntegrationTestServer& test_server) {\n      mock_rng_ = dynamic_cast<Random::MockRandomGenerator*>(\n          &(test_server.server().api().randomGenerator()));\n      // Abort now if we cannot downcast the server's random number generator pointer.\n      ASSERT_TRUE(mock_rng_ != nullptr);\n      // Ensure that fake_upstreams_[0] is the load balancer's host of choice by default.\n      ON_CALL(*mock_rng_, random()).WillByDefault(Return(random_index_));\n    };\n\n    BaseIntegrationTest::initialize();\n  }\n\nprotected:\n  /**\n   * A single step of a larger test involving a fake Redis client and a specific Redis server.\n   * @param upstream a handle to the server that will respond to the request.\n   * @param request supplies Redis client data to transmit to the Redis server.\n   * @param response supplies Redis server data to transmit to the client.\n   * @param redis_client a handle to the fake redis client that sends the request.\n   * @param fake_upstream_connection supplies a handle to connection from the proxy to the fake\n   * server.\n   * @param auth_password supplies the fake upstream's server password, if not an empty string.\n   */\n  void roundtripToUpstreamStep(FakeUpstreamPtr& upstream, const std::string& request,\n                               const std::string& response, IntegrationTcpClientPtr& redis_client,\n                               FakeRawConnectionPtr& fake_upstream_connection,\n                               const std::string& auth_username, const std::string& auth_password,\n                               const bool expect_readonly = false) {\n    std::string proxy_to_server;\n    bool expect_auth_command = false;\n    std::string ok = \"+OK\\r\\n\";\n\n    redis_client->clearData();\n    ASSERT_TRUE(redis_client->write(request));\n\n    if (fake_upstream_connection.get() == nullptr) {\n      expect_auth_command = (!auth_password.empty());\n      EXPECT_TRUE(upstream->waitForRawConnection(fake_upstream_connection));\n    }\n\n    if (expect_auth_command) {\n      std::string auth_command = (auth_username.empty())\n                                     ? makeBulkStringArray({\"auth\", auth_password})\n                                     : makeBulkStringArray({\"auth\", auth_username, auth_password});\n\n      EXPECT_TRUE(fake_upstream_connection->waitForData(auth_command.size() + request.size(),\n                                                        &proxy_to_server));\n      // The original request should be the same as the data received by the server.\n      EXPECT_EQ(auth_command + request, proxy_to_server);\n      // Send back an OK for the auth command.\n      EXPECT_TRUE(fake_upstream_connection->write(ok));\n\n    } else if (expect_readonly) {\n      std::string readonly_command = makeBulkStringArray({\"readonly\"});\n      EXPECT_TRUE(fake_upstream_connection->waitForData(readonly_command.size() + request.size(),\n                                                        &proxy_to_server));\n      EXPECT_EQ(readonly_command + request, proxy_to_server);\n      // Send back an OK for the readonly command.\n      EXPECT_TRUE(fake_upstream_connection->write(ok));\n    } else {\n      EXPECT_TRUE(fake_upstream_connection->waitForData(request.size(), &proxy_to_server));\n      // The original request should be the same as the data received by the server.\n      EXPECT_EQ(request, proxy_to_server);\n    }\n\n    EXPECT_TRUE(fake_upstream_connection->write(response));\n    redis_client->waitForData(response);\n    // The original response should be received by the fake Redis client.\n    EXPECT_EQ(response, redis_client->data());\n  }\n\n  /**\n   * Simple bi-directional test between a fake Redis client and Redis server.\n   * @param request supplies Redis client data to transmit to the Redis server.\n   * @param response supplies Redis server data to transmit to the client.\n   */\n  void simpleRequestAndResponse(const int stream_index, const std::string& request,\n                                const std::string& response, const bool expect_readonly = false) {\n    IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n    FakeRawConnectionPtr fake_upstream_connection;\n\n    roundtripToUpstreamStep(fake_upstreams_[stream_index], request, response, redis_client,\n                            fake_upstream_connection, \"\", \"\", expect_readonly);\n\n    redis_client->close();\n    EXPECT_TRUE(fake_upstream_connection->close());\n  }\n\n  void expectCallClusterSlot(int stream_index, std::string& response,\n                             const std::string& auth_username = \"\",\n                             const std::string& auth_password = \"\") {\n    std::string cluster_slot_request = makeBulkStringArray({\"CLUSTER\", \"SLOTS\"});\n\n    std::string proxied_cluster_slot_request;\n\n    FakeRawConnectionPtr fake_upstream_connection_;\n    EXPECT_TRUE(fake_upstreams_[stream_index]->waitForRawConnection(fake_upstream_connection_));\n    if (auth_password.empty()) {\n      EXPECT_TRUE(fake_upstream_connection_->waitForData(cluster_slot_request.size(),\n                                                         &proxied_cluster_slot_request));\n      EXPECT_EQ(cluster_slot_request, proxied_cluster_slot_request);\n    } else if (auth_username.empty()) {\n      std::string auth_request = makeBulkStringArray({\"auth\", auth_password});\n      std::string ok = \"+OK\\r\\n\";\n\n      EXPECT_TRUE(fake_upstream_connection_->waitForData(\n          auth_request.size() + cluster_slot_request.size(), &proxied_cluster_slot_request));\n      EXPECT_EQ(auth_request + cluster_slot_request, proxied_cluster_slot_request);\n      EXPECT_TRUE(fake_upstream_connection_->write(ok));\n    } else {\n      std::string auth_request = makeBulkStringArray({\"auth\", auth_username, auth_password});\n      std::string ok = \"+OK\\r\\n\";\n\n      EXPECT_TRUE(fake_upstream_connection_->waitForData(\n          auth_request.size() + cluster_slot_request.size(), &proxied_cluster_slot_request));\n      EXPECT_EQ(auth_request + cluster_slot_request, proxied_cluster_slot_request);\n      EXPECT_TRUE(fake_upstream_connection_->write(ok));\n    }\n\n    EXPECT_TRUE(fake_upstream_connection_->write(response));\n    EXPECT_TRUE(fake_upstream_connection_->close());\n  }\n\n  /**\n   * Simple response for a single slot redis cluster with a primary and replica.\n   * @param primary the ip of the primary node.\n   * @param replica the ip of the replica node.\n   * @return The cluster slot response.\n   */\n  std::string singleSlotPrimaryReplica(const Network::Address::Ip* primary,\n                                       const Network::Address::Ip* replica) {\n    int64_t start_slot = 0;\n    int64_t end_slot = 16383;\n\n    std::stringstream resp;\n    resp << \"*1\\r\\n\"\n         << \"*4\\r\\n\"\n         << \":\" << start_slot << \"\\r\\n\"\n         << \":\" << end_slot << \"\\r\\n\"\n         << makeIp(primary->addressAsString(), primary->port())\n         << makeIp(replica->addressAsString(), replica->port());\n\n    return resp.str();\n  }\n\n  /**\n   * Simple response for 2 slot redis cluster with 2 nodes.\n   * @param slot1 the ip of the primary node of slot1.\n   * @param slot2 the ip of the primary node of slot2.\n   * @return The cluster slot response.\n   */\n  std::string twoSlots(const Network::Address::Ip* slot1, const Network::Address::Ip* slot2,\n                       int64_t start_slot1 = 0, int64_t end_slot1 = 10000,\n                       int64_t start_slot2 = 10000, int64_t end_slot2 = 16383) {\n    std::stringstream resp;\n    resp << \"*2\\r\\n\"\n         << \"*3\\r\\n\"\n         << \":\" << start_slot1 << \"\\r\\n\"\n         << \":\" << end_slot1 << \"\\r\\n\"\n         << makeIp(slot1->addressAsString(), slot1->port()) << \"*3\\r\\n\"\n         << \":\" << start_slot2 << \"\\r\\n\"\n         << \":\" << end_slot2 << \"\\r\\n\"\n         << makeIp(slot2->addressAsString(), slot2->port());\n    return resp.str();\n  }\n\n  std::string makeIp(const std::string& address, uint32_t port) {\n    return fmt::format(\"*2\\r\\n${0}\\r\\n{1}\\r\\n:{2}\\r\\n\", address.size(), address, port);\n  }\n\n  // This method encodes a fake upstream's IP address and TCP port in the\n  // same format as one would expect from a Redis server in\n  // an ask/moved redirection error.\n  std::string redisAddressAndPort(FakeUpstreamPtr& upstream) {\n    std::stringstream result;\n    if (version_ == Network::Address::IpVersion::v4) {\n      result << \"127.0.0.1\"\n             << \":\";\n    } else {\n      result << \"::1\"\n             << \":\";\n    }\n    result << upstream->localAddress()->ip()->port();\n    return result.str();\n  }\n\n  Random::MockRandomGenerator* mock_rng_{};\n  const int num_upstreams_;\n  const Network::Address::IpVersion version_;\n  int random_index_;\n};\n\nclass RedisClusterWithAuthIntegrationTest : public RedisClusterIntegrationTest {\npublic:\n  RedisClusterWithAuthIntegrationTest(const std::string& config = testConfigWithAuth(),\n                                      int num_upstreams = 2)\n      : RedisClusterIntegrationTest(config, num_upstreams) {}\n};\n\nclass RedisClusterWithReadPolicyIntegrationTest : public RedisClusterIntegrationTest {\npublic:\n  RedisClusterWithReadPolicyIntegrationTest(const std::string& config = testConfigWithReadPolicy(),\n                                            int num_upstreams = 3)\n      : RedisClusterIntegrationTest(config, num_upstreams) {}\n};\n\nclass RedisClusterWithRefreshIntegrationTest : public RedisClusterIntegrationTest {\npublic:\n  RedisClusterWithRefreshIntegrationTest(const std::string& config = testConfigWithRefresh(),\n                                         int num_upstreams = 3)\n      : RedisClusterIntegrationTest(config, num_upstreams) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisClusterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisClusterWithAuthIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisClusterWithReadPolicyIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisClusterWithRefreshIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// This test sends a simple \"get foo\" command from a fake\n// downstream client through the proxy to a fake upstream\n// Redis cluster with a single slot with primary and replica.\n// The fake server sends a valid response back to the client.\n// The request and response should make it through the envoy\n// proxy server code unchanged.\nTEST_P(RedisClusterIntegrationTest, SingleSlotPrimaryReplica) {\n  random_index_ = 0;\n\n  on_server_init_function_ = [this]() {\n    std::string cluster_slot_response = singleSlotPrimaryReplica(\n        fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip());\n    expectCallClusterSlot(random_index_, cluster_slot_response);\n  };\n\n  initialize();\n\n  // foo hashes to slot 12182 which is in upstream 0\n  simpleRequestAndResponse(0, makeBulkStringArray({\"get\", \"foo\"}), \"$3\\r\\nbar\\r\\n\");\n}\n\n// This test sends a simple \"get foo\" command from a fake\n// downstream client through the proxy to a fake upstream\n// Redis cluster with 2 slots. The fake server sends a valid response\n// back to the client. The request and response should\n// make it through the envoy proxy server code unchanged.\nTEST_P(RedisClusterIntegrationTest, TwoSlot) {\n  random_index_ = 0;\n\n  on_server_init_function_ = [this]() {\n    std::string cluster_slot_response = twoSlots(fake_upstreams_[0]->localAddress()->ip(),\n                                                 fake_upstreams_[1]->localAddress()->ip());\n    expectCallClusterSlot(random_index_, cluster_slot_response);\n  };\n\n  initialize();\n\n  // foobar hashes to slot 12325 which is in upstream 1\n  simpleRequestAndResponse(1, makeBulkStringArray({\"get\", \"foobar\"}), \"$3\\r\\nbar\\r\\n\");\n  // bar hashes to slot 5061 which is in upstream 0\n  simpleRequestAndResponse(0, makeBulkStringArray({\"get\", \"bar\"}), \"$3\\r\\nbar\\r\\n\");\n  // foo hashes to slot 12182 which is in upstream 1\n  simpleRequestAndResponse(1, makeBulkStringArray({\"get\", \"foo\"}), \"$3\\r\\nbar\\r\\n\");\n}\n\n// This test show the test proxy's multi-stage response to a redirection error from an upstream fake\n// redis server. The proxy will properly redirect the original \"get foo\" command to the second fake\n// upstream server, and connect to the first fake upstream server to rediscover the cluster's\n// topology using a \"cluster slots\" command.\nTEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) {\n  random_index_ = 0;\n\n  on_server_init_function_ = [this]() {\n    std::string cluster_slot_response = singleSlotPrimaryReplica(\n        fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip());\n    expectCallClusterSlot(random_index_, cluster_slot_response);\n  };\n\n  initialize();\n\n  // foo hashes to slot 12182 which the proxy believes is at the server reachable via\n  // fake_upstreams_[0], based on the singleSlotPrimaryReplica() response above.\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n  // The actual moved redirection error that redirects to the fake_upstreams_[1] server.\n  std::string redirection_response =\n      \"-MOVED 12182 \" + redisAddressAndPort(fake_upstreams_[1]) + \"\\r\\n\";\n  // The \"get foo\" response from fake_upstreams_[1].\n  std::string response = \"$3\\r\\nbar\\r\\n\";\n  std::string cluster_slots_request = makeBulkStringArray({\"CLUSTER\", \"SLOTS\"});\n  std::string proxy_to_server;\n\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(request));\n\n  FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2,\n      fake_upstream_connection_3;\n\n  // Data from the client should always be routed to fake_upstreams_[0] by the load balancer.\n  EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_1));\n  EXPECT_TRUE(fake_upstream_connection_1->waitForData(request.size(), &proxy_to_server));\n  // The data in request should be received by the first server, fake_upstreams_[0].\n  EXPECT_EQ(request, proxy_to_server);\n  proxy_to_server.clear();\n\n  // Send the redirection_error response from the first fake Redis server back to the proxy.\n  EXPECT_TRUE(fake_upstream_connection_1->write(redirection_response));\n  // The proxy should initiate a new connection to the fake redis server, fake_upstreams_[1], in\n  // response.\n  EXPECT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_2));\n\n  // The server at fake_upstreams_[1] should receive the original request unchanged.\n  EXPECT_TRUE(fake_upstream_connection_2->waitForData(request.size(), &proxy_to_server));\n  EXPECT_EQ(request, proxy_to_server);\n\n  // Send response from the second fake Redis server at fake_upstreams_[1] to the client.\n  EXPECT_TRUE(fake_upstream_connection_2->write(response));\n  redis_client->waitForData(response);\n  // The client should receive response unchanged.\n  EXPECT_EQ(response, redis_client->data());\n\n  // A new connection should be created to fake_upstreams_[0] for topology discovery.\n  proxy_to_server.clear();\n  EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_3));\n  EXPECT_TRUE(\n      fake_upstream_connection_3->waitForData(cluster_slots_request.size(), &proxy_to_server));\n  EXPECT_EQ(cluster_slots_request, proxy_to_server);\n\n  EXPECT_TRUE(fake_upstream_connection_1->close());\n  EXPECT_TRUE(fake_upstream_connection_2->close());\n  EXPECT_TRUE(fake_upstream_connection_3->close());\n  redis_client->close();\n}\n\n// This test sends simple \"set foo\" and \"get foo\" command from a fake\n// downstream client through the proxy to a fake upstream\n// Redis cluster with a single slot with primary and replica.\n// The envoy proxy is set with read_policy to read from replica, the expected result\n// is that the set command will be sent to the primary and the get command will be sent\n// to the replica\n\nTEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotPrimaryReplicaReadReplica) {\n  random_index_ = 0;\n\n  on_server_init_function_ = [this]() {\n    std::string cluster_slot_response = singleSlotPrimaryReplica(\n        fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip());\n    expectCallClusterSlot(random_index_, cluster_slot_response);\n  };\n\n  initialize();\n\n  // foo hashes to slot 12182 which has primary node in upstream 0 and replica in upstream 1\n  simpleRequestAndResponse(0, makeBulkStringArray({\"set\", \"foo\", \"bar\"}), \":1\\r\\n\", true);\n  simpleRequestAndResponse(1, makeBulkStringArray({\"get\", \"foo\"}), \"$3\\r\\nbar\\r\\n\", true);\n}\n\n// This test sends a simple \"get foo\" command from a fake\n// downstream client through the proxy to a fake upstream\n// Redis cluster with a single slot with primary and replica.\n// The fake server sends a valid response back to the client.\n// The request and response should make it through the envoy\n// proxy server code unchanged.\n//\n// In this scenario, the fake server will receive 2 auth commands:\n// one as part of a topology discovery connection (before sending a\n// \"cluster slots\" command), and one to authenticate the connection\n// that carries the \"get foo\" request.\n\nTEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotPrimaryReplica) {\n  random_index_ = 0;\n\n  on_server_init_function_ = [this]() {\n    std::string cluster_slot_response = singleSlotPrimaryReplica(\n        fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip());\n    expectCallClusterSlot(0, cluster_slot_response, \"\", \"somepassword\");\n  };\n\n  initialize();\n\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n\n  roundtripToUpstreamStep(fake_upstreams_[random_index_], makeBulkStringArray({\"get\", \"foo\"}),\n                          \"$3\\r\\nbar\\r\\n\", redis_client, fake_upstream_connection, \"\",\n                          \"somepassword\");\n\n  redis_client->close();\n  EXPECT_TRUE(fake_upstream_connection->close());\n}\n\n// This test show the test proxy's multi-stage response to an error from an upstream fake\n// redis server. The proxy will connect to the first fake upstream server to rediscover the\n// cluster's topology using a \"cluster slots\" command.\nTEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) {\n  random_index_ = 0;\n\n  on_server_init_function_ = [this]() {\n    std::string cluster_slot_response = singleSlotPrimaryReplica(\n        fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip());\n    expectCallClusterSlot(random_index_, cluster_slot_response);\n  };\n\n  initialize();\n\n  // foo hashes to slot 12182 which the proxy believes is at the server reachable via\n  // fake_upstreams_[0], based on the singleSlotPrimaryReplica() response above.\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n  // The actual error response.\n  std::string error_response = \"-CLUSTERDOWN The cluster is down\\r\\n\";\n  std::string upstream_error_response = \"-upstream failure\\r\\n\";\n  std::string cluster_slots_request = makeBulkStringArray({\"CLUSTER\", \"SLOTS\"});\n  std::string proxy_to_server;\n\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(request));\n\n  FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2;\n\n  // Data from the client should always be routed to fake_upstreams_[0] by the load balancer.\n  EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_1));\n  EXPECT_TRUE(fake_upstream_connection_1->waitForData(request.size(), &proxy_to_server));\n  // The data in request should be received by the first server, fake_upstreams_[0].\n  EXPECT_EQ(request, proxy_to_server);\n  proxy_to_server.clear();\n\n  // Send the server down error response from the first fake Redis server back to the proxy.\n  EXPECT_TRUE(fake_upstream_connection_1->write(error_response));\n  redis_client->waitForData(upstream_error_response);\n  // The client should receive response unchanged.\n  EXPECT_EQ(upstream_error_response, redis_client->data());\n\n  // A new connection should be created to fake_upstreams_[0] for topology discovery.\n  proxy_to_server.clear();\n  EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_2));\n  EXPECT_TRUE(\n      fake_upstream_connection_2->waitForData(cluster_slots_request.size(), &proxy_to_server));\n  EXPECT_EQ(cluster_slots_request, proxy_to_server);\n\n  EXPECT_TRUE(fake_upstream_connection_1->close());\n  EXPECT_TRUE(fake_upstream_connection_2->close());\n  redis_client->close();\n}\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/redis/redis_cluster_lb_test.cc",
    "content": "#include <memory>\n\n#include \"source/extensions/clusters/redis/redis_cluster_lb.h\"\n\n#include \"extensions/filters/network/common/redis/client.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nclass TestLoadBalancerContext : public RedisLoadBalancerContext,\n                                public Upstream::LoadBalancerContextBase {\npublic:\n  TestLoadBalancerContext(uint64_t hash_key, bool is_read,\n                          NetworkFilters::Common::Redis::Client::ReadPolicy read_policy)\n      : hash_key_(hash_key), is_read_(is_read), read_policy_(read_policy) {}\n\n  TestLoadBalancerContext(absl::optional<uint64_t> hash) : hash_key_(hash) {}\n\n  // Upstream::LoadBalancerContext\n  absl::optional<uint64_t> computeHashKey() override { return hash_key_; }\n\n  bool isReadCommand() const override { return is_read_; };\n  NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override {\n    return read_policy_;\n  };\n\n  absl::optional<uint64_t> hash_key_;\n  bool is_read_;\n  NetworkFilters::Common::Redis::Client::ReadPolicy read_policy_;\n};\n\nclass RedisClusterLoadBalancerTest : public testing::Test {\npublic:\n  RedisClusterLoadBalancerTest() = default;\n\n  void init() {\n    factory_ = std::make_shared<RedisClusterLoadBalancerFactory>(random_);\n    lb_ = std::make_unique<RedisClusterThreadAwareLoadBalancer>(factory_);\n    lb_->initialize();\n    factory_->onHostHealthUpdate();\n  }\n\n  void validateAssignment(Upstream::HostVector& hosts,\n                          const std::vector<std::pair<uint32_t, uint32_t>>& expected_assignments,\n                          bool read_command = false,\n                          NetworkFilters::Common::Redis::Client::ReadPolicy read_policy =\n                              NetworkFilters::Common::Redis::Client::ReadPolicy::Primary) {\n\n    Upstream::LoadBalancerPtr lb = lb_->factory()->create();\n    for (auto& assignment : expected_assignments) {\n      TestLoadBalancerContext context(assignment.first, read_command, read_policy);\n      auto host = lb->chooseHost(&context);\n      EXPECT_FALSE(host == nullptr);\n      EXPECT_EQ(hosts[assignment.second]->address()->asString(), host->address()->asString());\n    }\n  }\n\n  static std::pair<std::string, Upstream::HostSharedPtr> makePair(Upstream::HostSharedPtr host) {\n    return std::make_pair(host->address()->asString(), std::move(host));\n  }\n\n  Upstream::HostMap generateHostMap(Upstream::HostVector& hosts) {\n    Upstream::HostMap map;\n    std::transform(hosts.begin(), hosts.end(), std::inserter(map, map.end()), makePair);\n    return map;\n  }\n\n  std::shared_ptr<RedisClusterLoadBalancerFactory> factory_;\n  std::unique_ptr<RedisClusterThreadAwareLoadBalancer> lb_;\n  std::shared_ptr<Upstream::MockClusterInfo> info_{new NiceMock<Upstream::MockClusterInfo>()};\n  NiceMock<Random::MockRandomGenerator> random_;\n};\n\nclass RedisLoadBalancerContextImplTest : public testing::Test {\npublic:\n  void makeBulkStringArray(NetworkFilters::Common::Redis::RespValue& value,\n                           const std::vector<std::string>& strings) {\n    std::vector<NetworkFilters::Common::Redis::RespValue> values(strings.size());\n    for (uint64_t i = 0; i < strings.size(); i++) {\n      values[i].type(NetworkFilters::Common::Redis::RespType::BulkString);\n      values[i].asString() = strings[i];\n    }\n\n    value.type(NetworkFilters::Common::Redis::RespType::Array);\n    value.asArray().swap(values);\n  }\n};\n\n// Works correctly without any hosts.\nTEST_F(RedisClusterLoadBalancerTest, NoHost) {\n  init();\n  EXPECT_EQ(nullptr, lb_->factory()->create()->chooseHost(nullptr));\n};\n\n// Works correctly with empty context\nTEST_F(RedisClusterLoadBalancerTest, NoHash) {\n  Upstream::HostVector hosts{Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:92\")};\n\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 1000, hosts[0]->address()),\n      ClusterSlot(1001, 2000, hosts[1]->address()),\n      ClusterSlot(2001, 16383, hosts[2]->address()),\n  });\n  Upstream::HostMap all_hosts{\n      {hosts[0]->address()->asString(), hosts[0]},\n      {hosts[1]->address()->asString(), hosts[1]},\n      {hosts[2]->address()->asString(), hosts[2]},\n  };\n  init();\n  factory_->onClusterSlotUpdate(std::move(slots), all_hosts);\n  TestLoadBalancerContext context(absl::nullopt);\n  EXPECT_EQ(nullptr, lb_->factory()->create()->chooseHost(&context));\n};\n\nTEST_F(RedisClusterLoadBalancerTest, Basic) {\n  Upstream::HostVector hosts{Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:92\")};\n\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 1000, hosts[0]->address()),\n      ClusterSlot(1001, 2000, hosts[1]->address()),\n      ClusterSlot(2001, 16383, hosts[2]->address()),\n  });\n  Upstream::HostMap all_hosts{\n      {hosts[0]->address()->asString(), hosts[0]},\n      {hosts[1]->address()->asString(), hosts[1]},\n      {hosts[2]->address()->asString(), hosts[2]},\n  };\n  init();\n  factory_->onClusterSlotUpdate(std::move(slots), all_hosts);\n\n  // A list of (hash: host_index) pair\n  const std::vector<std::pair<uint32_t, uint32_t>> expected_assignments = {\n      {0, 0},    {100, 0},   {1000, 0}, {17382, 0}, {1001, 1},  {1100, 1},\n      {2000, 1}, {18382, 1}, {2001, 2}, {2100, 2},  {16383, 2}, {19382, 2}};\n  validateAssignment(hosts, expected_assignments);\n}\n\nTEST_F(RedisClusterLoadBalancerTest, ReadStrategiesHealthy) {\n  Upstream::HostVector hosts{\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.2:90\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.2:91\"),\n  };\n\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 2000, hosts[0]->address()),\n      ClusterSlot(2001, 16383, hosts[1]->address()),\n  });\n  slots->at(0).addReplica(hosts[2]->address());\n  slots->at(1).addReplica(hosts[3]->address());\n  Upstream::HostMap all_hosts;\n  std::transform(hosts.begin(), hosts.end(), std::inserter(all_hosts, all_hosts.end()), makePair);\n  init();\n  factory_->onClusterSlotUpdate(std::move(slots), all_hosts);\n\n  // A list of (hash: host_index) pair\n  const std::vector<std::pair<uint32_t, uint32_t>> replica_assignments = {\n      {0, 2}, {1100, 2}, {2000, 2}, {18382, 2}, {2001, 3}, {2100, 3}, {16383, 3}, {19382, 3}};\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Replica);\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica);\n\n  const std::vector<std::pair<uint32_t, uint32_t>> primary_assignments = {\n      {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}};\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary);\n\n  ON_CALL(random_, random()).WillByDefault(Return(0));\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n  ON_CALL(random_, random()).WillByDefault(Return(1));\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n}\n\nTEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyPrimary) {\n  Upstream::HostVector hosts{\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.2:90\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.2:91\"),\n  };\n\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 2000, hosts[0]->address()),\n      ClusterSlot(2001, 16383, hosts[1]->address()),\n  });\n  slots->at(0).addReplica(hosts[2]->address());\n  slots->at(1).addReplica(hosts[3]->address());\n  Upstream::HostMap all_hosts;\n  std::transform(hosts.begin(), hosts.end(), std::inserter(all_hosts, all_hosts.end()), makePair);\n  init();\n  factory_->onClusterSlotUpdate(std::move(slots), all_hosts);\n\n  hosts[0]->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC);\n  hosts[1]->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC);\n\n  factory_->onHostHealthUpdate();\n\n  // A list of (hash: host_index) pair\n  const std::vector<std::pair<uint32_t, uint32_t>> replica_assignments = {\n      {0, 2}, {1100, 2}, {2000, 2}, {18382, 2}, {2001, 3}, {2100, 3}, {16383, 3}, {19382, 3}};\n  const std::vector<std::pair<uint32_t, uint32_t>> primary_assignments = {\n      {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}};\n\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Replica);\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary);\n\n  ON_CALL(random_, random()).WillByDefault(Return(0));\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n  ON_CALL(random_, random()).WillByDefault(Return(1));\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n}\n\nTEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyReplica) {\n  Upstream::HostVector hosts{\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.2:90\"),\n      Upstream::makeTestHost(info_, \"tcp://127.0.0.2:91\"),\n  };\n\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 2000, hosts[0]->address()),\n      ClusterSlot(2001, 16383, hosts[1]->address()),\n  });\n  slots->at(0).addReplica(hosts[2]->address());\n  slots->at(1).addReplica(hosts[3]->address());\n  Upstream::HostMap all_hosts;\n  std::transform(hosts.begin(), hosts.end(), std::inserter(all_hosts, all_hosts.end()), makePair);\n  init();\n  factory_->onClusterSlotUpdate(std::move(slots), all_hosts);\n\n  hosts[2]->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC);\n  hosts[3]->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC);\n\n  factory_->onHostHealthUpdate();\n\n  // A list of (hash: host_index) pair\n  const std::vector<std::pair<uint32_t, uint32_t>> replica_assignments = {\n      {0, 2}, {1100, 2}, {2000, 2}, {18382, 2}, {2001, 3}, {2100, 3}, {16383, 3}, {19382, 3}};\n  const std::vector<std::pair<uint32_t, uint32_t>> primary_assignments = {\n      {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}};\n\n  validateAssignment(hosts, replica_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Replica);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary);\n\n  ON_CALL(random_, random()).WillByDefault(Return(0));\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n  ON_CALL(random_, random()).WillByDefault(Return(1));\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n}\n\nTEST_F(RedisClusterLoadBalancerTest, ReadStrategiesNoReplica) {\n  Upstream::HostVector hosts{Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\")};\n\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 2000, hosts[0]->address()),\n      ClusterSlot(2001, 16383, hosts[1]->address()),\n  });\n  Upstream::HostMap all_hosts;\n  std::transform(hosts.begin(), hosts.end(), std::inserter(all_hosts, all_hosts.end()), makePair);\n  init();\n  factory_->onClusterSlotUpdate(std::move(slots), all_hosts);\n\n  // A list of (hash: host_index) pair\n  const std::vector<std::pair<uint32_t, uint32_t>> primary_assignments = {\n      {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}};\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n  validateAssignment(hosts, primary_assignments, true,\n                     NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica);\n\n  Upstream::LoadBalancerPtr lb = lb_->factory()->create();\n  TestLoadBalancerContext context(1100, true,\n                                  NetworkFilters::Common::Redis::Client::ReadPolicy::Replica);\n  auto host = lb->chooseHost(&context);\n  EXPECT_TRUE(host == nullptr);\n}\n\nTEST_F(RedisClusterLoadBalancerTest, ClusterSlotUpdate) {\n  Upstream::HostVector hosts{Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\")};\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 1000, hosts[0]->address()), ClusterSlot(1001, 16383, hosts[1]->address())});\n  Upstream::HostMap all_hosts{{hosts[0]->address()->asString(), hosts[0]},\n                              {hosts[1]->address()->asString(), hosts[1]}};\n  init();\n  EXPECT_EQ(true, factory_->onClusterSlotUpdate(std::move(slots), all_hosts));\n\n  // A list of initial (hash: host_index) pair\n  const std::vector<std::pair<uint32_t, uint32_t>> original_assignments = {\n      {100, 0}, {1100, 1}, {2100, 1}};\n\n  validateAssignment(hosts, original_assignments);\n\n  // Update the slot allocation should also change the assignment.\n  std::vector<ClusterSlot> updated_slot{\n      ClusterSlot(0, 1000, hosts[0]->address()),\n      ClusterSlot(1001, 2000, hosts[1]->address()),\n      ClusterSlot(2001, 16383, hosts[0]->address()),\n  };\n  EXPECT_EQ(true, factory_->onClusterSlotUpdate(\n                      std::make_unique<std::vector<ClusterSlot>>(updated_slot), all_hosts));\n\n  // A list of updated (hash: host_index) pair.\n  const std::vector<std::pair<uint32_t, uint32_t>> updated_assignments = {\n      {100, 0}, {1100, 1}, {2100, 0}};\n  validateAssignment(hosts, updated_assignments);\n}\n\nTEST_F(RedisClusterLoadBalancerTest, ClusterSlotNoUpdate) {\n  Upstream::HostVector hosts{Upstream::makeTestHost(info_, \"tcp://127.0.0.1:90\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:91\"),\n                             Upstream::makeTestHost(info_, \"tcp://127.0.0.1:92\")};\n\n  ClusterSlotsPtr slots = std::make_unique<std::vector<ClusterSlot>>(std::vector<ClusterSlot>{\n      ClusterSlot(0, 1000, hosts[0]->address()),\n      ClusterSlot(1001, 2000, hosts[1]->address()),\n      ClusterSlot(2001, 16383, hosts[2]->address()),\n  });\n  Upstream::HostMap all_hosts{\n      {hosts[0]->address()->asString(), hosts[0]},\n      {hosts[1]->address()->asString(), hosts[1]},\n      {hosts[2]->address()->asString(), hosts[2]},\n  };\n\n  // A list of (hash: host_index) pair.\n  const std::vector<std::pair<uint32_t, uint32_t>> expected_assignments = {\n      {100, 0}, {1100, 1}, {2100, 2}};\n\n  init();\n  EXPECT_EQ(true, factory_->onClusterSlotUpdate(std::move(slots), all_hosts));\n  validateAssignment(hosts, expected_assignments);\n\n  // Calling cluster slot update without change should not change assignment.\n  std::vector<ClusterSlot> updated_slot{\n      ClusterSlot(0, 1000, hosts[0]->address()),\n      ClusterSlot(1001, 2000, hosts[1]->address()),\n      ClusterSlot(2001, 16383, hosts[2]->address()),\n  };\n  EXPECT_EQ(false, factory_->onClusterSlotUpdate(\n                       std::make_unique<std::vector<ClusterSlot>>(updated_slot), all_hosts));\n  validateAssignment(hosts, expected_assignments);\n}\n\nTEST_F(RedisLoadBalancerContextImplTest, Basic) {\n  // Simple read command\n  std::vector<NetworkFilters::Common::Redis::RespValue> get_foo(2);\n  get_foo[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  get_foo[0].asString() = \"get\";\n  get_foo[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  get_foo[1].asString() = \"foo\";\n\n  NetworkFilters::Common::Redis::RespValue get_request;\n  get_request.type(NetworkFilters::Common::Redis::RespType::Array);\n  get_request.asArray().swap(get_foo);\n\n  RedisLoadBalancerContextImpl context1(\"foo\", true, true, get_request,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context1.computeHashKey());\n  EXPECT_EQ(true, context1.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy());\n\n  // Simple write command\n  std::vector<NetworkFilters::Common::Redis::RespValue> set_foo(3);\n  set_foo[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[0].asString() = \"set\";\n  set_foo[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[1].asString() = \"foo\";\n  set_foo[2].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[2].asString() = \"bar\";\n\n  NetworkFilters::Common::Redis::RespValue set_request;\n  set_request.type(NetworkFilters::Common::Redis::RespType::Array);\n  set_request.asArray().swap(set_foo);\n\n  RedisLoadBalancerContextImpl context2(\"foo\", true, true, set_request,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context2.computeHashKey());\n  EXPECT_EQ(false, context2.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy());\n}\n\nTEST_F(RedisLoadBalancerContextImplTest, CompositeArray) {\n\n  NetworkFilters::Common::Redis::RespValueSharedPtr base =\n      std::make_shared<NetworkFilters::Common::Redis::RespValue>();\n  makeBulkStringArray(*base, {\"get\", \"foo\", \"bar\"});\n\n  // Composite read command\n  NetworkFilters::Common::Redis::RespValue get_command;\n  get_command.type(NetworkFilters::Common::Redis::RespType::SimpleString);\n  get_command.asString() = \"get\";\n\n  NetworkFilters::Common::Redis::RespValue get_request1{base, get_command, 1, 1};\n  NetworkFilters::Common::Redis::RespValue get_request2{base, get_command, 2, 2};\n\n  RedisLoadBalancerContextImpl context1(\"foo\", true, true, get_request1,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context1.computeHashKey());\n  EXPECT_EQ(true, context1.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy());\n\n  RedisLoadBalancerContextImpl context2(\"bar\", true, true, get_request2,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(37829), context2.computeHashKey());\n  EXPECT_EQ(true, context2.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy());\n\n  // Composite write command\n  NetworkFilters::Common::Redis::RespValue set_command;\n  set_command.type(NetworkFilters::Common::Redis::RespType::SimpleString);\n  set_command.asString() = \"set\";\n\n  NetworkFilters::Common::Redis::RespValue set_request{base, set_command, 1, 2};\n  RedisLoadBalancerContextImpl context3(\"foo\", true, true, set_request,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context3.computeHashKey());\n  EXPECT_EQ(false, context3.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context3.readPolicy());\n}\n\nTEST_F(RedisLoadBalancerContextImplTest, UpperCaseCommand) {\n  // Simple read command\n  std::vector<NetworkFilters::Common::Redis::RespValue> get_foo(2);\n  get_foo[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  get_foo[0].asString() = \"GET\";\n  get_foo[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  get_foo[1].asString() = \"foo\";\n\n  NetworkFilters::Common::Redis::RespValue get_request;\n  get_request.type(NetworkFilters::Common::Redis::RespType::Array);\n  get_request.asArray().swap(get_foo);\n\n  RedisLoadBalancerContextImpl context1(\"foo\", true, true, get_request,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context1.computeHashKey());\n  EXPECT_EQ(true, context1.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy());\n\n  // Simple write command\n  std::vector<NetworkFilters::Common::Redis::RespValue> set_foo(3);\n  set_foo[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[0].asString() = \"SET\";\n  set_foo[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[1].asString() = \"foo\";\n  set_foo[2].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[2].asString() = \"bar\";\n\n  NetworkFilters::Common::Redis::RespValue set_request;\n  set_request.type(NetworkFilters::Common::Redis::RespType::Array);\n  set_request.asArray().swap(set_foo);\n\n  RedisLoadBalancerContextImpl context2(\"foo\", true, true, set_request,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context2.computeHashKey());\n  EXPECT_EQ(false, context2.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy());\n}\n\nTEST_F(RedisLoadBalancerContextImplTest, UnsupportedCommand) {\n  std::vector<NetworkFilters::Common::Redis::RespValue> unknown(1);\n  unknown[0].type(NetworkFilters::Common::Redis::RespType::Integer);\n  unknown[0].asInteger() = 1;\n  NetworkFilters::Common::Redis::RespValue unknown_request;\n  unknown_request.type(NetworkFilters::Common::Redis::RespType::Array);\n  unknown_request.asArray().swap(unknown);\n\n  RedisLoadBalancerContextImpl context3(\"foo\", true, true, unknown_request,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context3.computeHashKey());\n  EXPECT_EQ(false, context3.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context3.readPolicy());\n}\n\nTEST_F(RedisLoadBalancerContextImplTest, EnforceHashTag) {\n  std::vector<NetworkFilters::Common::Redis::RespValue> set_foo(3);\n  set_foo[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[0].asString() = \"set\";\n  set_foo[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[1].asString() = \"{foo}bar\";\n  set_foo[2].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  set_foo[2].asString() = \"bar\";\n\n  NetworkFilters::Common::Redis::RespValue set_request;\n  set_request.type(NetworkFilters::Common::Redis::RespType::Array);\n  set_request.asArray().swap(set_foo);\n\n  // Enable_hash tagging should be override when is_redis_cluster is true. This is treated like\n  // \"foo\"\n  RedisLoadBalancerContextImpl context2(\"{foo}bar\", false, true, set_request,\n                                        NetworkFilters::Common::Redis::Client::ReadPolicy::Primary);\n\n  EXPECT_EQ(absl::optional<uint64_t>(44950), context2.computeHashKey());\n  EXPECT_EQ(false, context2.isReadCommand());\n  EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy());\n}\n\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/clusters/redis/redis_cluster_test.cc",
    "content": "#include <bitset>\n#include <chrono>\n#include <memory>\n#include <vector>\n\n#include \"envoy/config/cluster/redis/redis_cluster.pb.h\"\n#include \"envoy/config/cluster/redis/redis_cluster.pb.validate.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/upstream/logical_dns_cluster.h\"\n\n#include \"source/extensions/clusters/redis/redis_cluster.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/extensions/clusters/redis/mocks.h\"\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/health_check_event_logger.h\"\n#include \"test/mocks/upstream/health_checker.h\"\n\nusing testing::_;\nusing testing::ContainerEq;\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Clusters {\nnamespace Redis {\n\nnamespace {\nconst std::string BasicConfig = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 22120\n  cluster_type:\n    name: envoy.clusters.redis\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        cluster_refresh_rate: 4s\n        cluster_refresh_timeout: 0.25s\n  )EOF\";\n}\n\nstatic const int ResponseFlagSize = 11;\nstatic const int ResponseReplicaFlagSize = 4;\nclass RedisClusterTest : public testing::Test,\n                         public Extensions::NetworkFilters::Common::Redis::Client::ClientFactory {\npublic:\n  // ClientFactory\n  Extensions::NetworkFilters::Common::Redis::Client::ClientPtr\n  create(Upstream::HostConstSharedPtr host, Event::Dispatcher&,\n         const Extensions::NetworkFilters::Common::Redis::Client::Config&,\n         const Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr&,\n         Stats::Scope&, const std::string&, const std::string&) override {\n    EXPECT_EQ(22120, host->address()->ip()->port());\n    return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{\n        create_(host->address()->asString())};\n  }\n\n  MOCK_METHOD(Extensions::NetworkFilters::Common::Redis::Client::Client*, create_, (std::string));\n\nprotected:\n  RedisClusterTest() : api_(Api::createApiForTest(stats_store_, random_)) {}\n\n  std::list<std::string> hostListToAddresses(const Upstream::HostVector& hosts) {\n    std::list<std::string> addresses;\n    for (const Upstream::HostSharedPtr& host : hosts) {\n      addresses.push_back(host->address()->asString());\n    }\n\n    return addresses;\n  }\n\n  void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) {\n    expectRedisSessionCreated();\n    NiceMock<Upstream::MockClusterManager> cm;\n    envoy::config::cluster::v3::Cluster cluster_config =\n        Upstream::parseClusterFromV3Yaml(yaml, avoid_boosting);\n    Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format(\n        \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                              : cluster_config.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n\n    envoy::config::cluster::redis::RedisClusterConfig config;\n    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(),\n                                           ProtobufWkt::Struct::default_instance(),\n                                           ProtobufMessage::getStrictValidationVisitor(), config);\n    cluster_callback_ = std::make_shared<NiceMock<MockClusterSlotUpdateCallBack>>();\n    cluster_ = std::make_shared<RedisCluster>(\n        cluster_config,\n        TestUtility::downcastAndValidate<const envoy::config::cluster::redis::RedisClusterConfig&>(\n            config),\n        *this, cm, runtime_, *api_, dns_resolver_, factory_context, std::move(scope), false,\n        cluster_callback_);\n    // This allows us to create expectation on cluster slot response without waiting for\n    // makeRequest.\n    pool_callbacks_ = &cluster_->redis_discovery_session_;\n    cluster_->prioritySet().addPriorityUpdateCb(\n        [&](uint32_t, const Upstream::HostVector&, const Upstream::HostVector&) -> void {\n          membership_updated_.ready();\n        });\n  }\n\n  void setupFactoryFromV3Yaml(const std::string& yaml) {\n    NiceMock<Upstream::MockClusterManager> cm;\n    envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml);\n    Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format(\n        \"cluster.{}.\", cluster_config.alt_stat_name().empty() ? cluster_config.name()\n                                                              : cluster_config.alt_stat_name()));\n    Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context(\n        admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_,\n        singleton_manager_, tls_, validation_visitor_, *api_);\n\n    envoy::config::cluster::redis::RedisClusterConfig config;\n    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(),\n                                           ProtobufWkt::Struct::default_instance(),\n                                           validation_visitor_, config);\n\n    NiceMock<AccessLog::MockAccessLogManager> log_manager;\n    NiceMock<Upstream::Outlier::EventLoggerSharedPtr> outlier_event_logger;\n    NiceMock<Envoy::Api::MockApi> api;\n    Upstream::ClusterFactoryContextImpl cluster_factory_context(\n        cm, stats_store_, tls_, std::move(dns_resolver_), ssl_context_manager_, runtime_,\n        dispatcher_, log_manager, local_info_, admin_, singleton_manager_,\n        std::move(outlier_event_logger), false, validation_visitor_, api);\n\n    RedisClusterFactory factory = RedisClusterFactory();\n    factory.createClusterWithConfig(cluster_config, config, cluster_factory_context,\n                                    factory_context, std::move(scope));\n  }\n\n  void expectResolveDiscovery(Network::DnsLookupFamily dns_lookup_family,\n                              const std::string& expected_address,\n                              const std::list<std::string>& resolved_addresses,\n                              Network::DnsResolver::ResolutionStatus status =\n                                  Network::DnsResolver::ResolutionStatus::Success) {\n    EXPECT_CALL(*dns_resolver_, resolve(expected_address, dns_lookup_family, _))\n        .WillOnce(Invoke([status, resolved_addresses](\n                             const std::string&, Network::DnsLookupFamily,\n                             Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n          cb(status, TestUtility::makeDnsResponse(resolved_addresses));\n          return nullptr;\n        }));\n  }\n\n  void expectRedisSessionCreated() {\n    resolve_timer_ = new Event::MockTimer(&dispatcher_);\n    EXPECT_CALL(*resolve_timer_, disableTimer());\n    ON_CALL(random_, random()).WillByDefault(Return(0));\n  }\n\n  void expectRedisResolve(bool create_client = false) {\n    if (create_client) {\n      client_ = new Extensions::NetworkFilters::Common::Redis::Client::MockClient();\n      EXPECT_CALL(*this, create_(_)).WillOnce(Return(client_));\n      EXPECT_CALL(*client_, addConnectionCallbacks(_));\n      EXPECT_CALL(*client_, close());\n    }\n    EXPECT_CALL(*client_, makeRequest_(Ref(RedisCluster::ClusterSlotsRequest::instance_), _))\n        .WillOnce(Return(&pool_request_));\n  }\n\n  void expectClusterSlotResponse(NetworkFilters::Common::Redis::RespValuePtr&& response) {\n    EXPECT_CALL(*resolve_timer_, enableTimer(_, _));\n    pool_callbacks_->onResponse(std::move(response));\n  }\n\n  void expectClusterSlotFailure() {\n    EXPECT_CALL(*resolve_timer_, enableTimer(_, _));\n    pool_callbacks_->onFailure();\n  }\n\n  NetworkFilters::Common::Redis::RespValuePtr singleSlotPrimaryReplica(const std::string& primary,\n                                                                       const std::string& replica,\n                                                                       int64_t port) const {\n    std::vector<NetworkFilters::Common::Redis::RespValue> primary_1(2);\n    primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    primary_1[0].asString() = primary;\n    primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    primary_1[1].asInteger() = port;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> replica_1(2);\n    replica_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    replica_1[0].asString() = replica;\n    replica_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    replica_1[1].asInteger() = port;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slot_1(ResponseReplicaFlagSize);\n    slot_1[0].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_1[0].asInteger() = 0;\n    slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_1[1].asInteger() = 16383;\n    slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_1[2].asArray().swap(primary_1);\n    slot_1[3].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_1[3].asArray().swap(replica_1);\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slots(1);\n    slots[0].type(NetworkFilters::Common::Redis::RespType::Array);\n    slots[0].asArray().swap(slot_1);\n\n    NetworkFilters::Common::Redis::RespValuePtr response(\n        new NetworkFilters::Common::Redis::RespValue());\n    response->type(NetworkFilters::Common::Redis::RespType::Array);\n    response->asArray().swap(slots);\n    return response;\n  }\n\n  NetworkFilters::Common::Redis::RespValuePtr twoSlotsPrimaries() const {\n    std::vector<NetworkFilters::Common::Redis::RespValue> primary_1(2);\n    primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    primary_1[0].asString() = \"127.0.0.1\";\n    primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    primary_1[1].asInteger() = 22120;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> primary_2(2);\n    primary_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    primary_2[0].asString() = \"127.0.0.2\";\n    primary_2[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    primary_2[1].asInteger() = 22120;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slot_1(3);\n    slot_1[0].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_1[0].asInteger() = 0;\n    slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_1[1].asInteger() = 9999;\n    slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_1[2].asArray().swap(primary_1);\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slot_2(3);\n    slot_2[0].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_2[0].asInteger() = 10000;\n    slot_2[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_2[1].asInteger() = 16383;\n    slot_2[2].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_2[2].asArray().swap(primary_2);\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slots(2);\n    slots[0].type(NetworkFilters::Common::Redis::RespType::Array);\n    slots[0].asArray().swap(slot_1);\n    slots[1].type(NetworkFilters::Common::Redis::RespType::Array);\n    slots[1].asArray().swap(slot_2);\n\n    NetworkFilters::Common::Redis::RespValuePtr response(\n        new NetworkFilters::Common::Redis::RespValue());\n    response->type(NetworkFilters::Common::Redis::RespType::Array);\n    response->asArray().swap(slots);\n    return response;\n  }\n\n  NetworkFilters::Common::Redis::RespValuePtr twoSlotsPrimariesWithReplica() const {\n    std::vector<NetworkFilters::Common::Redis::RespValue> primary_1(2);\n    primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    primary_1[0].asString() = \"127.0.0.1\";\n    primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    primary_1[1].asInteger() = 22120;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> primary_2(2);\n    primary_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    primary_2[0].asString() = \"127.0.0.2\";\n    primary_2[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    primary_2[1].asInteger() = 22120;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> replica_1(2);\n    replica_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    replica_1[0].asString() = \"127.0.0.3\";\n    replica_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    replica_1[1].asInteger() = 22120;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> replica_2(2);\n    replica_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n    replica_2[0].asString() = \"127.0.0.4\";\n    replica_2[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    replica_2[1].asInteger() = 22120;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slot_1(ResponseReplicaFlagSize);\n    slot_1[0].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_1[0].asInteger() = 0;\n    slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_1[1].asInteger() = 9999;\n    slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_1[2].asArray().swap(primary_1);\n    slot_1[3].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_1[3].asArray().swap(replica_1);\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slot_2(ResponseReplicaFlagSize);\n    slot_2[0].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_2[0].asInteger() = 10000;\n    slot_2[1].type(NetworkFilters::Common::Redis::RespType::Integer);\n    slot_2[1].asInteger() = 16383;\n    slot_2[2].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_2[2].asArray().swap(primary_2);\n    slot_2[3].type(NetworkFilters::Common::Redis::RespType::Array);\n    slot_2[3].asArray().swap(replica_2);\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slots(2);\n    slots[0].type(NetworkFilters::Common::Redis::RespType::Array);\n    slots[0].asArray().swap(slot_1);\n    slots[1].type(NetworkFilters::Common::Redis::RespType::Array);\n    slots[1].asArray().swap(slot_2);\n\n    NetworkFilters::Common::Redis::RespValuePtr response(\n        new NetworkFilters::Common::Redis::RespValue());\n    response->type(NetworkFilters::Common::Redis::RespType::Array);\n    response->asArray().swap(slots);\n    return response;\n  }\n\n  NetworkFilters::Common::Redis::RespValue\n  createStringField(bool is_correct_type, const std::string& correct_value) const {\n    NetworkFilters::Common::Redis::RespValue respValue;\n    if (is_correct_type) {\n      respValue.type(NetworkFilters::Common::Redis::RespType::BulkString);\n      respValue.asString() = correct_value;\n    } else {\n      respValue.type(NetworkFilters::Common::Redis::RespType::Integer);\n      respValue.asInteger() = ResponseFlagSize;\n    }\n    return respValue;\n  }\n\n  NetworkFilters::Common::Redis::RespValue createIntegerField(bool is_correct_type,\n                                                              int64_t correct_value) const {\n    NetworkFilters::Common::Redis::RespValue respValue;\n    if (is_correct_type) {\n      respValue.type(NetworkFilters::Common::Redis::RespType::Integer);\n      respValue.asInteger() = correct_value;\n    } else {\n      respValue.type(NetworkFilters::Common::Redis::RespType::BulkString);\n      respValue.asString() = \"bad_value\";\n    }\n    return respValue;\n  }\n\n  NetworkFilters::Common::Redis::RespValue\n  createArrayField(bool is_correct_type,\n                   std::vector<NetworkFilters::Common::Redis::RespValue>& correct_value) const {\n    NetworkFilters::Common::Redis::RespValue respValue;\n    if (is_correct_type) {\n      respValue.type(NetworkFilters::Common::Redis::RespType::Array);\n      respValue.asArray().swap(correct_value);\n    } else {\n      respValue.type(NetworkFilters::Common::Redis::RespType::BulkString);\n      respValue.asString() = \"bad value\";\n    }\n    return respValue;\n  }\n\n  // Create a redis cluster slot response. If a bit is set in the bitset, then that part of\n  // of the response is correct, otherwise it's incorrect.\n  NetworkFilters::Common::Redis::RespValuePtr\n  createResponse(std::bitset<ResponseFlagSize> flags,\n                 std::bitset<ResponseReplicaFlagSize> replica_flags) const {\n    int64_t idx(0);\n    int64_t slots_type = idx++;\n    int64_t slots_size = idx++;\n    int64_t slot1_type = idx++;\n    int64_t slot1_size = idx++;\n    int64_t slot1_range_start_type = idx++;\n    int64_t slot1_range_end_type = idx++;\n    int64_t primary_type = idx++;\n    int64_t primary_size = idx++;\n    int64_t primary_ip_type = idx++;\n    int64_t primary_ip_value = idx++;\n    int64_t primary_port_type = idx++;\n    idx = 0;\n    int64_t replica_size = idx++;\n    int64_t replica_ip_type = idx++;\n    int64_t replica_ip_value = idx++;\n    int64_t replica_port_type = idx++;\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> primary_1_array;\n    if (flags.test(primary_size)) {\n      // Ip field.\n      if (flags.test(primary_ip_value)) {\n        primary_1_array.push_back(createStringField(flags.test(primary_ip_type), \"127.0.0.1\"));\n      } else {\n        primary_1_array.push_back(createStringField(flags.test(primary_ip_type), \"bad ip foo\"));\n      }\n      // Port field.\n      primary_1_array.push_back(createIntegerField(flags.test(primary_port_type), 22120));\n    }\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> replica_1_array;\n    if (replica_flags.any()) {\n      // Ip field.\n      if (replica_flags.test(replica_ip_value)) {\n        replica_1_array.push_back(\n            createStringField(replica_flags.test(replica_ip_type), \"127.0.0.2\"));\n      } else {\n        replica_1_array.push_back(\n            createStringField(replica_flags.test(replica_ip_type), \"bad ip bar\"));\n      }\n      // Port field.\n      replica_1_array.push_back(createIntegerField(replica_flags.test(replica_port_type), 22120));\n    }\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slot_1_array;\n    if (flags.test(slot1_size)) {\n      slot_1_array.push_back(createIntegerField(flags.test(slot1_range_start_type), 0));\n      slot_1_array.push_back(createIntegerField(flags.test(slot1_range_end_type), 16383));\n      slot_1_array.push_back(createArrayField(flags.test(primary_type), primary_1_array));\n      if (replica_flags.any()) {\n        slot_1_array.push_back(createArrayField(replica_flags.test(replica_size), replica_1_array));\n      }\n    }\n\n    std::vector<NetworkFilters::Common::Redis::RespValue> slots_array;\n    if (flags.test(slots_size)) {\n      slots_array.push_back(createArrayField(flags.test(slot1_type), slot_1_array));\n    }\n\n    NetworkFilters::Common::Redis::RespValuePtr response{\n        new NetworkFilters::Common::Redis::RespValue()};\n    if (flags.test(slots_type)) {\n      response->type(NetworkFilters::Common::Redis::RespType::Array);\n      response->asArray().swap(slots_array);\n    } else {\n      response->type(NetworkFilters::Common::Redis::RespType::BulkString);\n      response->asString() = \"Pong\";\n    }\n\n    return response;\n  }\n\n  void\n  expectHealthyHosts(const std::list<std::string, std::allocator<std::string>>& healthy_hosts) {\n    EXPECT_THAT(healthy_hosts, ContainerEq(hostListToAddresses(\n                                   cluster_->prioritySet().hostSetsPerPriority()[0]->hosts())));\n    EXPECT_THAT(healthy_hosts,\n                ContainerEq(hostListToAddresses(\n                    cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts())));\n    EXPECT_EQ(1UL,\n              cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality().get().size());\n    EXPECT_EQ(\n        1UL,\n        cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size());\n  }\n\n  void testBasicSetup(const std::string& config, const std::string& expected_discovery_address) {\n    setupFromV3Yaml(config);\n    const std::list<std::string> resolved_addresses{\"127.0.0.1\", \"127.0.0.2\"};\n    expectResolveDiscovery(Network::DnsLookupFamily::V4Only, expected_discovery_address,\n                           resolved_addresses);\n    expectRedisResolve(true);\n\n    EXPECT_CALL(membership_updated_, ready());\n    EXPECT_CALL(initialized_, ready());\n    cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n    EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n    expectClusterSlotResponse(singleSlotPrimaryReplica(\"127.0.0.1\", \"127.0.0.2\", 22120));\n    expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\", \"127.0.0.2:22120\"}));\n\n    // Promote replica to primary\n    expectRedisResolve();\n    EXPECT_CALL(membership_updated_, ready());\n    resolve_timer_->invokeCallback();\n    EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n    expectClusterSlotResponse(twoSlotsPrimaries());\n    expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\", \"127.0.0.2:22120\"}));\n\n    // No change.\n    expectRedisResolve();\n    resolve_timer_->invokeCallback();\n    EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false));\n    expectClusterSlotResponse(twoSlotsPrimaries());\n    expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\", \"127.0.0.2:22120\"}));\n\n    // Add replicas to primaries\n    expectRedisResolve();\n    EXPECT_CALL(membership_updated_, ready());\n    resolve_timer_->invokeCallback();\n    EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n    expectClusterSlotResponse(twoSlotsPrimariesWithReplica());\n    expectHealthyHosts(std::list<std::string>(\n        {\"127.0.0.1:22120\", \"127.0.0.3:22120\", \"127.0.0.2:22120\", \"127.0.0.4:22120\"}));\n\n    // No change.\n    expectRedisResolve();\n    resolve_timer_->invokeCallback();\n    EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false));\n    expectClusterSlotResponse(twoSlotsPrimariesWithReplica());\n    expectHealthyHosts(std::list<std::string>(\n        {\"127.0.0.1:22120\", \"127.0.0.3:22120\", \"127.0.0.2:22120\", \"127.0.0.4:22120\"}));\n\n    // Remove 2nd shard.\n    expectRedisResolve();\n    EXPECT_CALL(membership_updated_, ready());\n    resolve_timer_->invokeCallback();\n    EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n    expectClusterSlotResponse(singleSlotPrimaryReplica(\"127.0.0.1\", \"127.0.0.2\", 22120));\n    expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\", \"127.0.0.2:22120\"}));\n  }\n\n  void exerciseStubs() {\n    EXPECT_CALL(dispatcher_, createTimer_(_));\n    RedisCluster::RedisDiscoverySession discovery_session(*cluster_, *this);\n    EXPECT_FALSE(discovery_session.enableHashtagging());\n    EXPECT_EQ(discovery_session.bufferFlushTimeoutInMs(), std::chrono::milliseconds(0));\n    EXPECT_EQ(discovery_session.maxUpstreamUnknownConnections(), 0);\n\n    NetworkFilters::Common::Redis::RespValuePtr dummy_value{\n        new NetworkFilters::Common::Redis::RespValue()};\n    dummy_value->type(NetworkFilters::Common::Redis::RespType::Error);\n    dummy_value->asString() = \"dummy text\";\n    EXPECT_TRUE(discovery_session.onRedirection(std::move(dummy_value), \"dummy ip\", false));\n\n    RedisCluster::RedisDiscoveryClient discovery_client(discovery_session);\n    EXPECT_NO_THROW(discovery_client.onAboveWriteBufferHighWatermark());\n    EXPECT_NO_THROW(discovery_client.onBelowWriteBufferLowWatermark());\n  }\n\n  void testDnsResolve(const char* const address, const int port) {\n    RedisCluster::DnsDiscoveryResolveTarget resolver_target(*cluster_, address, port);\n    EXPECT_CALL(*dns_resolver_, resolve(address, Network::DnsLookupFamily::V4Only, _))\n        .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                             Network::DnsResolver::ResolveCb) -> Network::ActiveDnsQuery* {\n          return &active_dns_query_;\n        }));\n    ;\n    resolver_target.startResolveDns();\n\n    EXPECT_CALL(active_dns_query_, cancel());\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Ssl::MockContextManager ssl_context_manager_;\n  std::shared_ptr<NiceMock<Network::MockDnsResolver>> dns_resolver_{\n      new NiceMock<Network::MockDnsResolver>};\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Event::MockTimer* resolve_timer_;\n  ReadyWatcher membership_updated_;\n  ReadyWatcher initialized_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockAdmin> admin_;\n  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n  std::shared_ptr<Upstream::MockClusterMockPrioritySet> hosts_;\n  Upstream::MockHealthCheckEventLogger* event_logger_{};\n  Event::MockTimer* interval_timer_{};\n  Extensions::NetworkFilters::Common::Redis::Client::MockClient* client_{};\n  Extensions::NetworkFilters::Common::Redis::Client::MockPoolRequest pool_request_;\n  Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks* pool_callbacks_{};\n  std::shared_ptr<RedisCluster> cluster_;\n  std::shared_ptr<NiceMock<MockClusterSlotUpdateCallBack>> cluster_callback_;\n  Network::MockActiveDnsQuery active_dns_query_;\n};\n\nusing RedisDnsConfigTuple = std::tuple<std::string, Network::DnsLookupFamily,\n                                       std::list<std::string>, std::list<std::string>>;\nstd::vector<RedisDnsConfigTuple> generateRedisDnsParams() {\n  std::vector<RedisDnsConfigTuple> dns_config;\n  {\n    std::string family_yaml(\"\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::Auto);\n    std::list<std::string> dns_response{\"127.0.0.1\", \"127.0.0.2\"};\n    std::list<std::string> resolved_host{\"127.0.0.1:22120\", \"127.0.0.2:22120\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response, resolved_host));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: V4_ONLY)EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::V4Only);\n    std::list<std::string> dns_response{\"127.0.0.1\", \"127.0.0.2\"};\n    std::list<std::string> resolved_host{\"127.0.0.1:22120\", \"127.0.0.2:22120\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response, resolved_host));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: V6_ONLY)EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::V6Only);\n    std::list<std::string> dns_response{\"::1\", \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"};\n    std::list<std::string> resolved_host{\"[::1]:22120\", \"[2001:db8:85a3::8a2e:370:7334]:22120\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response, resolved_host));\n  }\n  {\n    std::string family_yaml(R\"EOF(dns_lookup_family: AUTO)EOF\");\n    Network::DnsLookupFamily family(Network::DnsLookupFamily::Auto);\n    std::list<std::string> dns_response{\"::1\", \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"};\n    std::list<std::string> resolved_host{\"[::1]:22120\", \"[2001:db8:85a3::8a2e:370:7334]:22120\"};\n    dns_config.push_back(std::make_tuple(family_yaml, family, dns_response, resolved_host));\n  }\n  return dns_config;\n}\n\nclass RedisDnsParamTest : public RedisClusterTest,\n                          public testing::WithParamInterface<RedisDnsConfigTuple> {};\n\nINSTANTIATE_TEST_SUITE_P(DnsParam, RedisDnsParamTest, testing::ValuesIn(generateRedisDnsParams()));\n\n// Validate that if the DNS and CLUSTER SLOT resolve immediately, we have the expected\n// host state and initialization callback invocation.\n\nTEST_P(RedisDnsParamTest, ImmediateResolveDns) {\n  const std::string config = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  )EOF\" + std::get<0>(GetParam()) +\n                             R\"EOF(\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 22120\n  cluster_type:\n    name: envoy.clusters.redis\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        cluster_refresh_rate: 4s\n        cluster_refresh_timeout: 0.25s\n  )EOF\";\n\n  setupFromV3Yaml(config);\n\n  expectRedisResolve(true);\n  EXPECT_CALL(*dns_resolver_, resolve(\"foo.bar.com\", std::get<1>(GetParam()), _))\n      .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                           Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n        std::list<std::string> address_pair = std::get<2>(GetParam());\n        cb(Network::DnsResolver::ResolutionStatus::Success,\n           TestUtility::makeDnsResponse(address_pair));\n        EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n        expectClusterSlotResponse(\n            singleSlotPrimaryReplica(address_pair.front(), address_pair.back(), 22120));\n        return nullptr;\n      }));\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(initialized_, ready());\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  expectHealthyHosts(std::get<3>(GetParam()));\n}\n\nTEST_F(RedisClusterTest, EmptyDnsResponse) {\n  Event::MockTimer* dns_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  setupFromV3Yaml(BasicConfig);\n  const std::list<std::string> resolved_addresses{};\n  EXPECT_CALL(*dns_timer, enableTimer(_, _));\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses);\n\n  EXPECT_CALL(initialized_, ready());\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1U, cluster_->info()->stats().update_empty_.value());\n\n  // Does not recreate the timer on subsequent DNS resolve calls.\n  EXPECT_CALL(*dns_timer, enableTimer(_, _));\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses);\n  dns_timer->invokeCallback();\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(2U, cluster_->info()->stats().update_empty_.value());\n}\n\nTEST_F(RedisClusterTest, FailedDnsResponse) {\n  Event::MockTimer* dns_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  setupFromV3Yaml(BasicConfig);\n  const std::list<std::string> resolved_addresses{};\n  EXPECT_CALL(*dns_timer, enableTimer(_, _));\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses,\n                         Network::DnsResolver::ResolutionStatus::Failure);\n\n  EXPECT_CALL(initialized_, ready());\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(0U, cluster_->info()->stats().update_empty_.value());\n\n  // Does not recreate the timer on subsequent DNS resolve calls.\n  EXPECT_CALL(*dns_timer, enableTimer(_, _));\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses);\n  dns_timer->invokeCallback();\n\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_EQ(0UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n  EXPECT_EQ(1U, cluster_->info()->stats().update_empty_.value());\n}\n\nTEST_F(RedisClusterTest, Basic) {\n  // Using load assignment.\n  const std::string basic_yaml_load_assignment = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n    cluster_name: name\n    endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: foo.bar.com\n                port_value: 22120\n            health_check_config:\n              port_value: 8000\n  cluster_type:\n    name: envoy.clusters.redis\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        cluster_refresh_rate: 4s\n        cluster_refresh_timeout: 0.25s\n  )EOF\";\n\n  testBasicSetup(BasicConfig, \"foo.bar.com\");\n  testBasicSetup(basic_yaml_load_assignment, \"foo.bar.com\");\n\n  // Exercise stubbed out interfaces for coverage.\n  exerciseStubs();\n}\n\nTEST_F(RedisClusterTest, RedisResolveFailure) {\n  setupFromV3Yaml(BasicConfig);\n  const std::list<std::string> resolved_addresses{\"127.0.0.1\", \"127.0.0.2\"};\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses);\n  expectRedisResolve(true);\n\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  // Initialization will wait til the redis cluster succeed.\n  expectClusterSlotFailure();\n  EXPECT_EQ(1U, cluster_->info()->stats().update_attempt_.value());\n  EXPECT_EQ(1U, cluster_->info()->stats().update_failure_.value());\n\n  expectRedisResolve(true);\n  resolve_timer_->invokeCallback();\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n  expectClusterSlotResponse(singleSlotPrimaryReplica(\"127.0.0.1\", \"127.0.0.2\", 22120));\n  expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\", \"127.0.0.2:22120\"}));\n\n  // Expect no change if resolve failed.\n  expectRedisResolve();\n  resolve_timer_->invokeCallback();\n  expectClusterSlotFailure();\n  expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\", \"127.0.0.2:22120\"}));\n  EXPECT_EQ(3U, cluster_->info()->stats().update_attempt_.value());\n  EXPECT_EQ(2U, cluster_->info()->stats().update_failure_.value());\n}\n\nTEST_F(RedisClusterTest, FactoryInitNotRedisClusterTypeFailure) {\n  const std::string basic_yaml_hosts = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 22120\n  cluster_type:\n    name: envoy.clusters.memcached\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        cluster_refresh_rate: 4s\n        cluster_refresh_timeout: 0.25s\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(setupFactoryFromV3Yaml(basic_yaml_hosts), EnvoyException,\n                            \"Redis cluster can only created with redis cluster type.\");\n}\n\nTEST_F(RedisClusterTest, FactoryInitRedisClusterTypeSuccess) {\n  setupFactoryFromV3Yaml(BasicConfig);\n}\n\nTEST_F(RedisClusterTest, RedisErrorResponse) {\n  setupFromV3Yaml(BasicConfig);\n  const std::list<std::string> resolved_addresses{\"127.0.0.1\", \"127.0.0.2\"};\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses);\n  expectRedisResolve(true);\n\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  // Initialization will wait til the redis cluster succeed.\n  std::vector<NetworkFilters::Common::Redis::RespValue> hello_world(2);\n  hello_world[0].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  hello_world[0].asString() = \"hello\";\n  hello_world[1].type(NetworkFilters::Common::Redis::RespType::BulkString);\n  hello_world[1].asString() = \"world\";\n\n  NetworkFilters::Common::Redis::RespValuePtr hello_world_response(\n      new NetworkFilters::Common::Redis::RespValue());\n  hello_world_response->type(NetworkFilters::Common::Redis::RespType::Array);\n  hello_world_response->asArray().swap(hello_world);\n\n  EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(0);\n  expectClusterSlotResponse(std::move(hello_world_response));\n  EXPECT_EQ(1U, cluster_->info()->stats().update_attempt_.value());\n  EXPECT_EQ(1U, cluster_->info()->stats().update_failure_.value());\n\n  expectRedisResolve();\n  resolve_timer_->invokeCallback();\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n  std::bitset<ResponseFlagSize> single_slot_primary(0xfff);\n  std::bitset<ResponseReplicaFlagSize> no_replica(0);\n  expectClusterSlotResponse(createResponse(single_slot_primary, no_replica));\n  expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\"}));\n\n  // Expect no change if resolve failed.\n  uint64_t update_attempt = 2;\n  uint64_t update_failure = 1;\n  // Test every combination the cluster slots response.\n  for (uint64_t i = 0; i < (1 << ResponseFlagSize); i++) {\n    std::bitset<ResponseFlagSize> flags(i);\n    expectRedisResolve();\n    resolve_timer_->invokeCallback();\n    if (flags.all()) {\n      EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false));\n    }\n    expectClusterSlotResponse(createResponse(flags, no_replica));\n    expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\"}));\n    EXPECT_EQ(++update_attempt, cluster_->info()->stats().update_attempt_.value());\n    if (!flags.all()) {\n      EXPECT_EQ(++update_failure, cluster_->info()->stats().update_failure_.value());\n    }\n  }\n}\n\nTEST_F(RedisClusterTest, RedisReplicaErrorResponse) {\n  setupFromV3Yaml(BasicConfig);\n  const std::list<std::string> resolved_addresses{\"127.0.0.1\", \"127.0.0.2\"};\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses);\n  expectRedisResolve(true);\n\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(initialized_, ready());\n  EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n  std::bitset<ResponseFlagSize> single_slot_primary(0xfff);\n  std::bitset<ResponseReplicaFlagSize> no_replica(0);\n  expectClusterSlotResponse(createResponse(single_slot_primary, no_replica));\n  expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\"}));\n\n  // Expect no change if resolve failed.\n  uint64_t update_attempt = 1;\n  uint64_t update_failure = 0;\n  // Test every combination the replica error response.\n  for (uint64_t i = 1; i < (1 << ResponseReplicaFlagSize); i++) {\n    std::bitset<ResponseReplicaFlagSize> replica_flags(i);\n    expectRedisResolve();\n    resolve_timer_->invokeCallback();\n    if (replica_flags.all()) {\n      EXPECT_CALL(membership_updated_, ready());\n      EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false));\n    }\n    expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\"}));\n    expectClusterSlotResponse(createResponse(single_slot_primary, replica_flags));\n    EXPECT_EQ(++update_attempt, cluster_->info()->stats().update_attempt_.value());\n    if (!(replica_flags.all() || replica_flags.none())) {\n      EXPECT_EQ(++update_failure, cluster_->info()->stats().update_failure_.value());\n    }\n  }\n}\n\nTEST_F(RedisClusterTest, DnsDiscoveryResolverBasic) {\n  setupFromV3Yaml(BasicConfig);\n  testDnsResolve(\"foo.bar.com\", 22120);\n}\n\nTEST_F(RedisClusterTest, MultipleDnsDiscovery) {\n  const std::string config = R\"EOF(\n  name: name\n  connect_timeout: 0.25s\n  dns_lookup_family: V4_ONLY\n  load_assignment:\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo.bar.com\n                    port_value: 22120               \n            - endpoint:\n                address:\n                  socket_address:\n                    address: foo1.bar.com\n                    port_value: 22120\n  cluster_type:\n    name: envoy.clusters.redis\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        cluster_refresh_rate: 4s\n        cluster_refresh_timeout: 0.25s\n  )EOF\";\n\n  setupFromV3Yaml(config);\n\n  // Only single in-flight \"cluster slots\" call.\n  expectRedisResolve(true);\n\n  ReadyWatcher dns_resolve_1;\n  ReadyWatcher dns_resolve_2;\n\n  EXPECT_CALL(*dns_resolver_, resolve(\"foo.bar.com\", _, _))\n      .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                           Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n        cb(Network::DnsResolver::ResolutionStatus::Success,\n           TestUtility::makeDnsResponse(std::list<std::string>({\"127.0.0.1\", \"127.0.0.2\"})));\n        return nullptr;\n      }));\n\n  EXPECT_CALL(*dns_resolver_, resolve(\"foo1.bar.com\", _, _))\n      .WillOnce(Invoke([&](const std::string&, Network::DnsLookupFamily,\n                           Network::DnsResolver::ResolveCb cb) -> Network::ActiveDnsQuery* {\n        cb(Network::DnsResolver::ResolutionStatus::Success,\n           TestUtility::makeDnsResponse(std::list<std::string>({\"127.0.0.3\", \"127.0.0.4\"})));\n        return nullptr;\n      }));\n\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  // Pending RedisResolve will call cancel in the destructor.\n  EXPECT_CALL(pool_request_, cancel());\n}\n\nTEST_F(RedisClusterTest, HostRemovalAfterHcFail) {\n  setupFromV3Yaml(BasicConfig);\n  auto health_checker = std::make_shared<Upstream::MockHealthChecker>();\n  EXPECT_CALL(*health_checker, start());\n  EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2);\n  cluster_->setHealthChecker(health_checker);\n\n  const std::list<std::string> resolved_addresses{\"127.0.0.1\", \"127.0.0.2\"};\n  expectResolveDiscovery(Network::DnsLookupFamily::V4Only, \"foo.bar.com\", resolved_addresses);\n  expectRedisResolve(true);\n\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(initialized_, ready());\n  cluster_->initialize([&]() -> void { initialized_.ready(); });\n\n  EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1);\n  expectClusterSlotResponse(singleSlotPrimaryReplica(\"127.0.0.1\", \"127.0.0.2\", 22120));\n\n  // Verify that both hosts are initially marked with FAILED_ACTIVE_HC, then\n  // clear the flag to simulate that these hosts have been successfully health\n  // checked.\n  {\n    EXPECT_CALL(membership_updated_, ready());\n    const auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n    EXPECT_EQ(2UL, hosts.size());\n\n    for (size_t i = 0; i < 2; ++i) {\n      EXPECT_TRUE(hosts[i]->healthFlagGet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC));\n      hosts[i]->healthFlagClear(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC);\n      hosts[i]->healthFlagClear(Upstream::Host::HealthFlag::PENDING_ACTIVE_HC);\n      health_checker->runCallbacks(hosts[i], Upstream::HealthTransition::Changed);\n    }\n    expectHealthyHosts(std::list<std::string>({\"127.0.0.1:22120\", \"127.0.0.2:22120\"}));\n  }\n\n  // Failed HC\n  EXPECT_CALL(membership_updated_, ready());\n  EXPECT_CALL(*cluster_callback_, onHostHealthUpdate());\n  const auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts();\n  hosts[1]->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC);\n  health_checker->runCallbacks(hosts[1], Upstream::HealthTransition::Changed);\n\n  EXPECT_THAT(2U, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size());\n  EXPECT_THAT(1U, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size());\n}\n\n} // namespace Redis\n} // namespace Clusters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\n        \"utility_test.cc\",\n    ],\n    deps = [\n        \"//source/extensions/common:utility_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/aws/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"aws_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/extensions/common/aws:credentials_provider_interface\",\n        \"//source/extensions/common/aws:signer_interface\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"signer_impl_test\",\n    srcs = [\"signer_impl_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/extensions/common/aws:signer_impl_lib\",\n        \"//test/extensions/common/aws:aws_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \"//source/extensions/common/aws:utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"region_provider_impl_test\",\n    srcs = [\"region_provider_impl_test.cc\"],\n    deps = [\n        \"//source/extensions/common/aws:region_provider_impl_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"credentials_provider_impl_test\",\n    srcs = [\"credentials_provider_impl_test.cc\"],\n    deps = [\n        \"//source/extensions/common/aws:credentials_provider_impl_lib\",\n        \"//test/extensions/common/aws:aws_mocks\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"credentials_provider_test\",\n    srcs = [\"credentials_provider_test.cc\"],\n    deps = [\n        \"//source/extensions/common/aws:credentials_provider_interface\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"aws_metadata_fetcher_integration_test\",\n    srcs = [\n        \"aws_metadata_fetcher_integration_test.cc\",\n    ],\n    deps = [\n        \"//source/common/common:fmt_lib\",\n        \"//source/extensions/common/aws:utility_lib\",\n        \"//source/extensions/filters/http/fault:config\",\n        \"//source/extensions/filters/http/fault:fault_filter_lib\",\n        \"//source/extensions/filters/http/router:config\",\n        \"//source/extensions/filters/network/echo:config\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//test/integration:integration_lib\",\n        \"//test/server:utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc",
    "content": "#include \"common/common/fmt.h\"\n\n#include \"extensions/common/aws/utility.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\n\nusing Envoy::Extensions::Common::Aws::Utility;\n\nclass AwsMetadataIntegrationTestBase : public ::testing::Test, public BaseIntegrationTest {\npublic:\n  AwsMetadataIntegrationTestBase(int status_code, int delay_s)\n      : BaseIntegrationTest(Network::Address::IpVersion::v4, renderConfig(status_code, delay_s)) {}\n\n  static std::string renderConfig(int status_code, int delay_s) {\n    return absl::StrCat(ConfigHelper::baseConfig(),\n                        fmt::format(R\"EOF(\n    filter_chains:\n      filters:\n        name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: metadata_test\n          http_filters:\n            - name: fault\n              typed_config:\n                \"@type\": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault\n                delay:\n                  fixed_delay:\n                    seconds: {}\n                    nanos: {}\n                  percentage:\n                    numerator: 100\n                    denominator: HUNDRED\n            - name: envoy.filters.http.router\n          codec_type: HTTP1\n          route_config:\n            virtual_hosts:\n              name: metadata_endpoint\n              routes:\n                - name: redirect_route\n                  redirect:\n                    prefix_rewrite: \"/\"\n                  match:\n                    prefix: \"/redirect\"\n                - name: auth_route\n                  direct_response:\n                    status: {}\n                    body:\n                      inline_string: METADATA_VALUE_WITH_AUTH\n                  match:\n                    prefix: \"/\"\n                    headers:\n                      - name: Authorization\n                        exact_match: AUTH_TOKEN\n                - name: no_auth_route\n                  direct_response:\n                    status: {}\n                    body:\n                      inline_string: METADATA_VALUE\n                  match:\n                    prefix: \"/\"\n              domains: \"*\"\n            name: route_config_0\n      )EOF\",\n                                    delay_s, delay_s > 0 ? 0 : 1000, status_code, status_code));\n  }\n\n  void SetUp() override { BaseIntegrationTest::initialize(); }\n};\n\nclass AwsMetadataIntegrationTestSuccess : public AwsMetadataIntegrationTestBase {\npublic:\n  AwsMetadataIntegrationTestSuccess() : AwsMetadataIntegrationTestBase(200, 0) {}\n};\n\nTEST_F(AwsMetadataIntegrationTestSuccess, Success) {\n  const auto endpoint = fmt::format(\"{}:{}\", Network::Test::getLoopbackAddressUrlString(version_),\n                                    lookupPort(\"listener_0\"));\n  const auto response = Utility::metadataFetcher(endpoint, \"\", \"\");\n\n  ASSERT_TRUE(response.has_value());\n  EXPECT_EQ(\"METADATA_VALUE\", *response);\n\n  ASSERT_NE(nullptr, test_server_->counter(\"http.metadata_test.downstream_rq_completed\"));\n  EXPECT_EQ(1, test_server_->counter(\"http.metadata_test.downstream_rq_completed\")->value());\n}\n\nTEST_F(AwsMetadataIntegrationTestSuccess, AuthToken) {\n  const auto endpoint = fmt::format(\"{}:{}\", Network::Test::getLoopbackAddressUrlString(version_),\n                                    lookupPort(\"listener_0\"));\n  const auto response = Utility::metadataFetcher(endpoint, \"\", \"AUTH_TOKEN\");\n\n  ASSERT_TRUE(response.has_value());\n  EXPECT_EQ(\"METADATA_VALUE_WITH_AUTH\", *response);\n\n  ASSERT_NE(nullptr, test_server_->counter(\"http.metadata_test.downstream_rq_completed\"));\n  EXPECT_EQ(1, test_server_->counter(\"http.metadata_test.downstream_rq_completed\")->value());\n}\n\nTEST_F(AwsMetadataIntegrationTestSuccess, Redirect) {\n  const auto endpoint = fmt::format(\"{}:{}\", Network::Test::getLoopbackAddressUrlString(version_),\n                                    lookupPort(\"listener_0\"));\n  const auto response = Utility::metadataFetcher(endpoint, \"redirect\", \"AUTH_TOKEN\");\n\n  ASSERT_TRUE(response.has_value());\n  EXPECT_EQ(\"METADATA_VALUE_WITH_AUTH\", *response);\n\n  // We should make 2 requests, 1 that results in a redirect, and a final successful one\n  ASSERT_NE(nullptr, test_server_->counter(\"http.metadata_test.downstream_rq_completed\"));\n  EXPECT_EQ(2, test_server_->counter(\"http.metadata_test.downstream_rq_completed\")->value());\n\n  ASSERT_NE(nullptr, test_server_->counter(\"http.metadata_test.downstream_rq_3xx\"));\n  EXPECT_EQ(1, test_server_->counter(\"http.metadata_test.downstream_rq_3xx\")->value());\n}\n\nclass AwsMetadataIntegrationTestFailure : public AwsMetadataIntegrationTestBase {\npublic:\n  AwsMetadataIntegrationTestFailure() : AwsMetadataIntegrationTestBase(503, 0) {}\n};\n\nTEST_F(AwsMetadataIntegrationTestFailure, Failure) {\n  const auto endpoint = fmt::format(\"{}:{}\", Network::Test::getLoopbackAddressUrlString(version_),\n                                    lookupPort(\"listener_0\"));\n\n  const auto start_time = timeSystem().monotonicTime();\n  const auto response = Utility::metadataFetcher(endpoint, \"\", \"\");\n  const auto end_time = timeSystem().monotonicTime();\n\n  EXPECT_FALSE(response.has_value());\n\n  // Verify correct number of retries\n  ASSERT_NE(nullptr, test_server_->counter(\"http.metadata_test.downstream_rq_completed\"));\n  EXPECT_EQ(4, test_server_->counter(\"http.metadata_test.downstream_rq_completed\")->value());\n\n  // Verify correct sleep time between retries: 4 * 1000 = 4000\n  EXPECT_LE(4000,\n            std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count());\n}\n\nclass AwsMetadataIntegrationTestTimeout : public AwsMetadataIntegrationTestBase {\npublic:\n  AwsMetadataIntegrationTestTimeout() : AwsMetadataIntegrationTestBase(200, 10) {}\n};\n\nTEST_F(AwsMetadataIntegrationTestTimeout, Timeout) {\n  const auto endpoint = fmt::format(\"{}:{}\", Network::Test::getLoopbackAddressUrlString(version_),\n                                    lookupPort(\"listener_0\"));\n\n  const auto start_time = timeSystem().monotonicTime();\n  const auto response = Utility::metadataFetcher(endpoint, \"\", \"\");\n  const auto end_time = timeSystem().monotonicTime();\n\n  EXPECT_FALSE(response.has_value());\n\n  // We do now check http.metadata_test.downstream_rq_completed value here because it's\n  // behavior is different between Linux and Mac when Curl disconnects on timeout. On Mac it is\n  // incremented, while on Linux it is not.\n\n  // Verify correct sleep time between retries: 4 * 5000 = 20000\n  EXPECT_LE(20000,\n            std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count());\n  EXPECT_GT(40000,\n            std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/aws/credentials_provider_impl_test.cc",
    "content": "#include \"extensions/common/aws/credentials_provider_impl.h\"\n\n#include \"test/extensions/common/aws/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nclass EvironmentCredentialsProviderTest : public testing::Test {\npublic:\n  ~EvironmentCredentialsProviderTest() override {\n    TestEnvironment::unsetEnvVar(\"AWS_ACCESS_KEY_ID\");\n    TestEnvironment::unsetEnvVar(\"AWS_SECRET_ACCESS_KEY\");\n    TestEnvironment::unsetEnvVar(\"AWS_SESSION_TOKEN\");\n  }\n\n  EnvironmentCredentialsProvider provider_;\n};\n\nTEST_F(EvironmentCredentialsProviderTest, AllEnvironmentVars) {\n  TestEnvironment::setEnvVar(\"AWS_ACCESS_KEY_ID\", \"akid\", 1);\n  TestEnvironment::setEnvVar(\"AWS_SECRET_ACCESS_KEY\", \"secret\", 1);\n  TestEnvironment::setEnvVar(\"AWS_SESSION_TOKEN\", \"token\", 1);\n  const auto credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", credentials.sessionToken().value());\n}\n\nTEST_F(EvironmentCredentialsProviderTest, NoEnvironmentVars) {\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(EvironmentCredentialsProviderTest, MissingAccessKeyId) {\n  TestEnvironment::setEnvVar(\"AWS_SECRET_ACCESS_KEY\", \"secret\", 1);\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(EvironmentCredentialsProviderTest, NoSessionToken) {\n  TestEnvironment::setEnvVar(\"AWS_ACCESS_KEY_ID\", \"akid\", 1);\n  TestEnvironment::setEnvVar(\"AWS_SECRET_ACCESS_KEY\", \"secret\", 1);\n  const auto credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", credentials.secretAccessKey().value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nclass InstanceProfileCredentialsProviderTest : public testing::Test {\npublic:\n  InstanceProfileCredentialsProviderTest()\n      : api_(Api::createApiForTest(time_system_)),\n        provider_(*api_,\n                  [this](const std::string& host, const std::string& path,\n                         const std::string& auth_token) -> absl::optional<std::string> {\n                    return this->fetcher_.fetch(host, path, auth_token);\n                  }) {}\n\n  void expectCredentialListing(const absl::optional<std::string>& listing) {\n    EXPECT_CALL(fetcher_,\n                fetch(\"169.254.169.254:80\", \"/latest/meta-data/iam/security-credentials\", _))\n        .WillOnce(Return(listing));\n  }\n\n  void expectDocument(const absl::optional<std::string>& document) {\n    EXPECT_CALL(fetcher_,\n                fetch(\"169.254.169.254:80\", \"/latest/meta-data/iam/security-credentials/doc1\", _))\n        .WillOnce(Return(document));\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  NiceMock<MockMetadataFetcher> fetcher_;\n  InstanceProfileCredentialsProvider provider_;\n};\n\nTEST_F(InstanceProfileCredentialsProviderTest, FailedCredentailListing) {\n  expectCredentialListing(absl::optional<std::string>());\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(InstanceProfileCredentialsProviderTest, EmptyCredentialListing) {\n  expectCredentialListing(\"\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(InstanceProfileCredentialsProviderTest, MissingDocument) {\n  expectCredentialListing(\"doc1\\ndoc2\\ndoc3\");\n  expectDocument(absl::optional<std::string>());\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(InstanceProfileCredentialsProviderTest, MalformedDocumenet) {\n  expectCredentialListing(\"doc1\");\n  expectDocument(R\"EOF(\nnot json\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(InstanceProfileCredentialsProviderTest, EmptyValues) {\n  expectCredentialListing(\"doc1\");\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"\",\n  \"SecretAccessKey\": \"\",\n  \"Token\": \"\"\n}\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(InstanceProfileCredentialsProviderTest, FullCachedCredentials) {\n  expectCredentialListing(\"doc1\");\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"akid\",\n  \"SecretAccessKey\": \"secret\",\n  \"Token\": \"token\"\n}\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", credentials.sessionToken().value());\n  const auto cached_credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", cached_credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", cached_credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", cached_credentials.sessionToken().value());\n}\n\nTEST_F(InstanceProfileCredentialsProviderTest, CredentialExpiration) {\n  InSequence sequence;\n  expectCredentialListing(\"doc1\");\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"akid\",\n  \"SecretAccessKey\": \"secret\",\n  \"Token\": \"token\"\n}\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", credentials.sessionToken().value());\n  time_system_.advanceTimeWait(std::chrono::hours(2));\n  expectCredentialListing(\"doc1\");\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"new_akid\",\n  \"SecretAccessKey\": \"new_secret\",\n  \"Token\": \"new_token\"\n}\n)EOF\");\n  const auto new_credentials = provider_.getCredentials();\n  EXPECT_EQ(\"new_akid\", new_credentials.accessKeyId().value());\n  EXPECT_EQ(\"new_secret\", new_credentials.secretAccessKey().value());\n  EXPECT_EQ(\"new_token\", new_credentials.sessionToken().value());\n}\n\nclass TaskRoleCredentialsProviderTest : public testing::Test {\npublic:\n  TaskRoleCredentialsProviderTest()\n      : api_(Api::createApiForTest(time_system_)),\n        provider_(\n            *api_,\n            [this](const std::string& host, const std::string& path,\n                   const absl::optional<std::string>& auth_token) -> absl::optional<std::string> {\n              return this->fetcher_.fetch(host, path, auth_token);\n            },\n            \"169.254.170.2:80/path/to/doc\", \"auth_token\") {\n    // Tue Jan  2 03:04:05 UTC 2018\n    time_system_.setSystemTime(std::chrono::milliseconds(1514862245000));\n  }\n\n  void expectDocument(const absl::optional<std::string>& document) {\n    EXPECT_CALL(fetcher_, fetch(\"169.254.170.2:80\", \"/path/to/doc\", _)).WillOnce(Return(document));\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  NiceMock<MockMetadataFetcher> fetcher_;\n  TaskRoleCredentialsProvider provider_;\n};\n\nTEST_F(TaskRoleCredentialsProviderTest, FailedFetchingDocument) {\n  expectDocument(absl::optional<std::string>());\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(TaskRoleCredentialsProviderTest, MalformedDocumenet) {\n  expectDocument(R\"EOF(\nnot json\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(TaskRoleCredentialsProviderTest, EmptyValues) {\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"\",\n  \"SecretAccessKey\": \"\",\n  \"Token\": \"\",\n  \"Expiration\": \"\"\n}\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_FALSE(credentials.accessKeyId().has_value());\n  EXPECT_FALSE(credentials.secretAccessKey().has_value());\n  EXPECT_FALSE(credentials.sessionToken().has_value());\n}\n\nTEST_F(TaskRoleCredentialsProviderTest, FullCachedCredentials) {\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"akid\",\n  \"SecretAccessKey\": \"secret\",\n  \"Token\": \"token\",\n  \"Expiration\": \"20180102T030500Z\"\n}\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", credentials.sessionToken().value());\n  const auto cached_credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", cached_credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", cached_credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", cached_credentials.sessionToken().value());\n}\n\nTEST_F(TaskRoleCredentialsProviderTest, NormalCredentialExpiration) {\n  InSequence sequence;\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"akid\",\n  \"SecretAccessKey\": \"secret\",\n  \"Token\": \"token\",\n  \"Expiration\": \"20190102T030405Z\"\n}\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", credentials.sessionToken().value());\n  time_system_.advanceTimeWait(std::chrono::hours(2));\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"new_akid\",\n  \"SecretAccessKey\": \"new_secret\",\n  \"Token\": \"new_token\",\n  \"Expiration\": \"20190102T030405Z\"\n}\n)EOF\");\n  const auto cached_credentials = provider_.getCredentials();\n  EXPECT_EQ(\"new_akid\", cached_credentials.accessKeyId().value());\n  EXPECT_EQ(\"new_secret\", cached_credentials.secretAccessKey().value());\n  EXPECT_EQ(\"new_token\", cached_credentials.sessionToken().value());\n}\n\nTEST_F(TaskRoleCredentialsProviderTest, TimestampCredentialExpiration) {\n  InSequence sequence;\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"akid\",\n  \"SecretAccessKey\": \"secret\",\n  \"Token\": \"token\",\n  \"Expiration\": \"20180102T030405Z\"\n}\n)EOF\");\n  const auto credentials = provider_.getCredentials();\n  EXPECT_EQ(\"akid\", credentials.accessKeyId().value());\n  EXPECT_EQ(\"secret\", credentials.secretAccessKey().value());\n  EXPECT_EQ(\"token\", credentials.sessionToken().value());\n  expectDocument(R\"EOF(\n{\n  \"AccessKeyId\": \"new_akid\",\n  \"SecretAccessKey\": \"new_secret\",\n  \"Token\": \"new_token\",\n  \"Expiration\": \"20190102T030405Z\"\n}\n)EOF\");\n  const auto cached_credentials = provider_.getCredentials();\n  EXPECT_EQ(\"new_akid\", cached_credentials.accessKeyId().value());\n  EXPECT_EQ(\"new_secret\", cached_credentials.secretAccessKey().value());\n  EXPECT_EQ(\"new_token\", cached_credentials.sessionToken().value());\n}\n\nclass DefaultCredentialsProviderChainTest : public testing::Test {\npublic:\n  DefaultCredentialsProviderChainTest() : api_(Api::createApiForTest(time_system_)) {\n    EXPECT_CALL(factories_, createEnvironmentCredentialsProvider());\n  }\n\n  ~DefaultCredentialsProviderChainTest() override {\n    TestEnvironment::unsetEnvVar(\"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\");\n    TestEnvironment::unsetEnvVar(\"AWS_CONTAINER_CREDENTIALS_FULL_URI\");\n    TestEnvironment::unsetEnvVar(\"AWS_CONTAINER_AUTHORIZATION_TOKEN\");\n    TestEnvironment::unsetEnvVar(\"AWS_EC2_METADATA_DISABLED\");\n  }\n\n  class MockCredentialsProviderChainFactories : public CredentialsProviderChainFactories {\n  public:\n    MOCK_METHOD(CredentialsProviderSharedPtr, createEnvironmentCredentialsProvider, (), (const));\n    MOCK_METHOD(CredentialsProviderSharedPtr, createTaskRoleCredentialsProvider,\n                (Api::Api&, const MetadataCredentialsProviderBase::MetadataFetcher&,\n                 absl::string_view, absl::string_view),\n                (const));\n    MOCK_METHOD(CredentialsProviderSharedPtr, createInstanceProfileCredentialsProvider,\n                (Api::Api&, const MetadataCredentialsProviderBase::MetadataFetcher& fetcher),\n                (const));\n  };\n\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  NiceMock<MockCredentialsProviderChainFactories> factories_;\n};\n\nTEST_F(DefaultCredentialsProviderChainTest, NoEnvironmentVars) {\n  EXPECT_CALL(factories_, createInstanceProfileCredentialsProvider(Ref(*api_), _));\n  DefaultCredentialsProviderChain chain(*api_, DummyMetadataFetcher(), factories_);\n}\n\nTEST_F(DefaultCredentialsProviderChainTest, MetadataDisabled) {\n  TestEnvironment::setEnvVar(\"AWS_EC2_METADATA_DISABLED\", \"true\", 1);\n  EXPECT_CALL(factories_, createInstanceProfileCredentialsProvider(Ref(*api_), _)).Times(0);\n  DefaultCredentialsProviderChain chain(*api_, DummyMetadataFetcher(), factories_);\n}\n\nTEST_F(DefaultCredentialsProviderChainTest, MetadataNotDisabled) {\n  TestEnvironment::setEnvVar(\"AWS_EC2_METADATA_DISABLED\", \"false\", 1);\n  EXPECT_CALL(factories_, createInstanceProfileCredentialsProvider(Ref(*api_), _));\n  DefaultCredentialsProviderChain chain(*api_, DummyMetadataFetcher(), factories_);\n}\n\nTEST_F(DefaultCredentialsProviderChainTest, RelativeUri) {\n  TestEnvironment::setEnvVar(\"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\", \"/path/to/creds\", 1);\n  EXPECT_CALL(factories_, createTaskRoleCredentialsProvider(Ref(*api_), _,\n                                                            \"169.254.170.2:80/path/to/creds\", \"\"));\n  DefaultCredentialsProviderChain chain(*api_, DummyMetadataFetcher(), factories_);\n}\n\nTEST_F(DefaultCredentialsProviderChainTest, FullUriNoAuthorizationToken) {\n  TestEnvironment::setEnvVar(\"AWS_CONTAINER_CREDENTIALS_FULL_URI\", \"http://host/path/to/creds\", 1);\n  EXPECT_CALL(factories_,\n              createTaskRoleCredentialsProvider(Ref(*api_), _, \"http://host/path/to/creds\", \"\"));\n  DefaultCredentialsProviderChain chain(*api_, DummyMetadataFetcher(), factories_);\n}\n\nTEST_F(DefaultCredentialsProviderChainTest, FullUriWithAuthorizationToken) {\n  TestEnvironment::setEnvVar(\"AWS_CONTAINER_CREDENTIALS_FULL_URI\", \"http://host/path/to/creds\", 1);\n  TestEnvironment::setEnvVar(\"AWS_CONTAINER_AUTHORIZATION_TOKEN\", \"auth_token\", 1);\n  EXPECT_CALL(factories_, createTaskRoleCredentialsProvider(\n                              Ref(*api_), _, \"http://host/path/to/creds\", \"auth_token\"));\n  DefaultCredentialsProviderChain chain(*api_, DummyMetadataFetcher(), factories_);\n}\n\nTEST(CredentialsProviderChainTest, getCredentials_noCredentials) {\n  auto mock_provider1 = std::make_shared<MockCredentialsProvider>();\n  auto mock_provider2 = std::make_shared<MockCredentialsProvider>();\n\n  EXPECT_CALL(*mock_provider1, getCredentials()).Times(1);\n  EXPECT_CALL(*mock_provider2, getCredentials()).Times(1);\n\n  CredentialsProviderChain chain;\n  chain.add(mock_provider1);\n  chain.add(mock_provider2);\n\n  const Credentials creds = chain.getCredentials();\n  EXPECT_EQ(Credentials(), creds);\n}\n\nTEST(CredentialsProviderChainTest, getCredentials_firstProviderReturns) {\n  auto mock_provider1 = std::make_shared<MockCredentialsProvider>();\n  auto mock_provider2 = std::make_shared<MockCredentialsProvider>();\n\n  const Credentials creds(\"access_key\", \"secret_key\");\n\n  EXPECT_CALL(*mock_provider1, getCredentials()).WillOnce(Return(creds));\n  EXPECT_CALL(*mock_provider2, getCredentials()).Times(0);\n\n  CredentialsProviderChain chain;\n  chain.add(mock_provider1);\n  chain.add(mock_provider2);\n\n  const Credentials ret_creds = chain.getCredentials();\n  EXPECT_EQ(creds, ret_creds);\n}\n\nTEST(CredentialsProviderChainTest, getCredentials_secondProviderReturns) {\n  auto mock_provider1 = std::make_shared<MockCredentialsProvider>();\n  auto mock_provider2 = std::make_shared<MockCredentialsProvider>();\n\n  const Credentials creds(\"access_key\", \"secret_key\");\n\n  EXPECT_CALL(*mock_provider1, getCredentials()).Times(1);\n  EXPECT_CALL(*mock_provider2, getCredentials()).WillOnce(Return(creds));\n\n  CredentialsProviderChain chain;\n  chain.add(mock_provider1);\n  chain.add(mock_provider2);\n\n  const Credentials ret_creds = chain.getCredentials();\n  EXPECT_EQ(creds, ret_creds);\n}\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/aws/credentials_provider_test.cc",
    "content": "#include \"extensions/common/aws/credentials_provider.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nTEST(Credentials, Default) {\n  const auto c = Credentials();\n  EXPECT_FALSE(c.accessKeyId().has_value());\n  EXPECT_FALSE(c.secretAccessKey().has_value());\n  EXPECT_FALSE(c.sessionToken().has_value());\n}\n\nTEST(Credentials, AllNull) {\n  const auto c = Credentials({}, {}, {});\n  EXPECT_FALSE(c.accessKeyId().has_value());\n  EXPECT_FALSE(c.secretAccessKey().has_value());\n  EXPECT_FALSE(c.sessionToken().has_value());\n}\n\nTEST(Credentials, AllEmpty) {\n  const auto c = Credentials(\"\", \"\", \"\");\n  EXPECT_FALSE(c.accessKeyId().has_value());\n  EXPECT_FALSE(c.secretAccessKey().has_value());\n  EXPECT_FALSE(c.sessionToken().has_value());\n}\n\nTEST(Credentials, OnlyAccessKeyId) {\n  const auto c = Credentials(\"access_key\", \"\", \"\");\n  EXPECT_EQ(\"access_key\", c.accessKeyId());\n  EXPECT_FALSE(c.secretAccessKey().has_value());\n  EXPECT_FALSE(c.sessionToken().has_value());\n}\n\nTEST(Credentials, AccessKeyIdAndSecretKey) {\n  const auto c = Credentials(\"access_key\", \"secret_key\", \"\");\n  EXPECT_EQ(\"access_key\", c.accessKeyId());\n  EXPECT_EQ(\"secret_key\", c.secretAccessKey());\n  EXPECT_FALSE(c.sessionToken().has_value());\n}\n\nTEST(Credentials, AllNonEmpty) {\n  const auto c = Credentials(\"access_key\", \"secret_key\", \"session_token\");\n  EXPECT_EQ(\"access_key\", c.accessKeyId());\n  EXPECT_EQ(\"secret_key\", c.secretAccessKey());\n  EXPECT_EQ(\"session_token\", c.sessionToken());\n}\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/aws/mocks.cc",
    "content": "#include \"test/extensions/common/aws/mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nMockCredentialsProvider::MockCredentialsProvider() = default;\n\nMockCredentialsProvider::~MockCredentialsProvider() = default;\n\nMockSigner::MockSigner() = default;\n\nMockSigner::~MockSigner() = default;\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/aws/mocks.h",
    "content": "#pragma once\n\n#include \"extensions/common/aws/credentials_provider.h\"\n#include \"extensions/common/aws/signer.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nclass MockCredentialsProvider : public CredentialsProvider {\npublic:\n  MockCredentialsProvider();\n  ~MockCredentialsProvider() override;\n\n  MOCK_METHOD(Credentials, getCredentials, ());\n};\n\nclass MockSigner : public Signer {\npublic:\n  MockSigner();\n  ~MockSigner() override;\n\n  MOCK_METHOD(void, sign, (Http::RequestMessage&, bool));\n  MOCK_METHOD(void, sign, (Http::RequestHeaderMap&));\n  MOCK_METHOD(void, sign, (Http::RequestHeaderMap&, const std::string&));\n};\n\nclass MockMetadataFetcher {\npublic:\n  virtual ~MockMetadataFetcher() = default;\n\n  MOCK_METHOD(absl::optional<std::string>, fetch,\n              (const std::string&, const std::string&, const absl::optional<std::string>&),\n              (const));\n};\n\nclass DummyMetadataFetcher {\npublic:\n  absl::optional<std::string> operator()(const std::string&, const std::string&,\n                                         const absl::optional<std::string>&) {\n    return absl::nullopt;\n  }\n};\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/aws/region_provider_impl_test.cc",
    "content": "#include \"extensions/common/aws/region_provider_impl.h\"\n\n#include \"test/test_common/environment.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\n\nclass EnvironmentRegionProviderTest : public testing::Test {\npublic:\n  ~EnvironmentRegionProviderTest() override { TestEnvironment::unsetEnvVar(\"AWS_REGION\"); }\n\n  EnvironmentRegionProvider provider_;\n};\n\nclass StaticRegionProviderTest : public testing::Test {\npublic:\n  StaticRegionProviderTest() : provider_(\"test-region\") {}\n\n  StaticRegionProvider provider_;\n};\n\nTEST_F(EnvironmentRegionProviderTest, SomeRegion) {\n  TestEnvironment::setEnvVar(\"AWS_REGION\", \"test-region\", 1);\n  EXPECT_EQ(\"test-region\", provider_.getRegion().value());\n}\n\nTEST_F(EnvironmentRegionProviderTest, NoRegion) { EXPECT_FALSE(provider_.getRegion().has_value()); }\n\nTEST_F(StaticRegionProviderTest, SomeRegion) {\n  EXPECT_EQ(\"test-region\", provider_.getRegion().value());\n}\n\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/aws/signer_impl_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/message_impl.h\"\n\n#include \"extensions/common/aws/signer_impl.h\"\n#include \"extensions/common/aws/utility.h\"\n\n#include \"test/extensions/common/aws/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\nnamespace {\n\nclass SignerImplTest : public testing::Test {\npublic:\n  SignerImplTest()\n      : credentials_provider_(new NiceMock<MockCredentialsProvider>()),\n        message_(new Http::RequestMessageImpl()),\n        signer_(\"service\", \"region\", CredentialsProviderSharedPtr{credentials_provider_},\n                time_system_),\n        credentials_(\"akid\", \"secret\"), token_credentials_(\"akid\", \"secret\", \"token\") {\n    // 20180102T030405Z\n    time_system_.setSystemTime(std::chrono::milliseconds(1514862245000));\n  }\n\n  void addMethod(const std::string& method) { message_->headers().setMethod(method); }\n\n  void addPath(const std::string& path) { message_->headers().setPath(path); }\n\n  void addHeader(const std::string& key, const std::string& value) {\n    message_->headers().addCopy(Http::LowerCaseString(key), value);\n  }\n\n  void setBody(const std::string& body) { message_->body().add(body); }\n\n  void expectSignHeaders(absl::string_view service_name, absl::string_view signature,\n                         absl::string_view payload) {\n    auto* credentials_provider = new NiceMock<MockCredentialsProvider>();\n    EXPECT_CALL(*credentials_provider, getCredentials()).WillOnce(Return(credentials_));\n    Http::TestRequestHeaderMapImpl headers{};\n    headers.setMethod(\"GET\");\n    headers.setPath(\"/\");\n    headers.addCopy(Http::LowerCaseString(\"host\"), \"www.example.com\");\n\n    SignerImpl signer(service_name, \"region\", CredentialsProviderSharedPtr{credentials_provider},\n                      time_system_);\n    signer.sign(headers);\n\n    EXPECT_EQ(fmt::format(\"AWS4-HMAC-SHA256 Credential=akid/20180102/region/{}/aws4_request, \"\n                          \"SignedHeaders=host;x-amz-content-sha256;x-amz-date, \"\n                          \"Signature={}\",\n                          service_name, signature),\n              headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView());\n    EXPECT_EQ(payload, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView());\n  }\n\n  NiceMock<MockCredentialsProvider>* credentials_provider_;\n  Event::SimulatedTimeSystem time_system_;\n  Http::RequestMessagePtr message_;\n  SignerImpl signer_;\n  Credentials credentials_;\n  Credentials token_credentials_;\n  absl::optional<std::string> region_;\n};\n\n// No authorization header should be present when the credentials are empty\nTEST_F(SignerImplTest, AnonymousCredentials) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(Credentials()));\n  signer_.sign(*message_);\n  EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization));\n}\n\n// HTTP :method header is required\nTEST_F(SignerImplTest, MissingMethodException) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_));\n  EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException,\n                            \"Message is missing :method header\");\n  EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization));\n}\n\n// HTTP :path header is required\nTEST_F(SignerImplTest, MissingPathException) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_));\n  addMethod(\"GET\");\n  EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException,\n                            \"Message is missing :path header\");\n  EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization));\n}\n\n// Verify we sign the date header\nTEST_F(SignerImplTest, SignDateHeader) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_));\n  addMethod(\"GET\");\n  addPath(\"/\");\n  signer_.sign(*message_);\n  EXPECT_NE(nullptr, message_->headers().get(SignatureHeaders::get().ContentSha256));\n  EXPECT_EQ(\"20180102T030400Z\",\n            message_->headers().get(SignatureHeaders::get().Date)->value().getStringView());\n  EXPECT_EQ(\n      \"AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, \"\n      \"SignedHeaders=x-amz-content-sha256;x-amz-date, \"\n      \"Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42\",\n      message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView());\n}\n\n// Verify we sign the security token header if the token is present in the credentials\nTEST_F(SignerImplTest, SignSecurityTokenHeader) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(token_credentials_));\n  addMethod(\"GET\");\n  addPath(\"/\");\n  signer_.sign(*message_);\n  EXPECT_EQ(\n      \"token\",\n      message_->headers().get(SignatureHeaders::get().SecurityToken)->value().getStringView());\n  EXPECT_EQ(\n      \"AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, \"\n      \"SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, \"\n      \"Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e\",\n      message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView());\n}\n\n// Verify we sign the content header as the hashed empty string if the body is empty\nTEST_F(SignerImplTest, SignEmptyContentHeader) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_));\n  addMethod(\"GET\");\n  addPath(\"/\");\n  signer_.sign(*message_, true);\n  EXPECT_EQ(\n      SignatureConstants::get().HashedEmptyString,\n      message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView());\n  EXPECT_EQ(\n      \"AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, \"\n      \"SignedHeaders=x-amz-content-sha256;x-amz-date, \"\n      \"Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42\",\n      message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView());\n}\n\n// Verify we sign the content header correctly when we have a body\nTEST_F(SignerImplTest, SignContentHeader) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_));\n  addMethod(\"POST\");\n  addPath(\"/\");\n  setBody(\"test1234\");\n  signer_.sign(*message_, true);\n  EXPECT_EQ(\n      \"937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244\",\n      message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView());\n  EXPECT_EQ(\n      \"AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, \"\n      \"SignedHeaders=x-amz-content-sha256;x-amz-date, \"\n      \"Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b\",\n      message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView());\n}\n\n// Verify we sign some extra headers\nTEST_F(SignerImplTest, SignExtraHeaders) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_));\n  addMethod(\"GET\");\n  addPath(\"/\");\n  addHeader(\"a\", \"a_value\");\n  addHeader(\"b\", \"b_value\");\n  addHeader(\"c\", \"c_value\");\n  signer_.sign(*message_);\n  EXPECT_EQ(\n      \"AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, \"\n      \"SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, \"\n      \"Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81\",\n      message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView());\n}\n\n// Verify signing a host header\nTEST_F(SignerImplTest, SignHostHeader) {\n  EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_));\n  addMethod(\"GET\");\n  addPath(\"/\");\n  addHeader(\"host\", \"www.example.com\");\n  signer_.sign(*message_);\n  EXPECT_EQ(\n      \"AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, \"\n      \"SignedHeaders=host;x-amz-content-sha256;x-amz-date, \"\n      \"Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd\",\n      message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView());\n}\n\n// Verify signing headers for services.\nTEST_F(SignerImplTest, SignHeadersByService) {\n  expectSignHeaders(\"s3\", \"d97cae067345792b78d2bad746f25c729b9eb4701127e13a7c80398f8216a167\",\n                    SignatureConstants::get().UnsignedPayload);\n  expectSignHeaders(\"service\", \"d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd\",\n                    SignatureConstants::get().HashedEmptyString);\n  expectSignHeaders(\"es\", \"0fd9c974bb2ad16c8d8a314dca4f6db151d32cbd04748d9c018afee2a685a02e\",\n                    SignatureConstants::get().UnsignedPayload);\n  expectSignHeaders(\"glacier\", \"8d1f241d77c64cda57b042cd312180f16e98dbd7a96e5545681430f8dbde45a0\",\n                    SignatureConstants::get().UnsignedPayload);\n}\n\n} // namespace\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/aws/utility_test.cc",
    "content": "#include \"extensions/common/aws/utility.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::ElementsAre;\nusing testing::Pair;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Aws {\nnamespace {\n\n// Headers must be in alphabetical order by virtue of std::map\nTEST(UtilityTest, CanonicalizeHeadersInAlphabeticalOrder) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\"d\", \"d_value\"}, {\"f\", \"f_value\"}, {\"b\", \"b_value\"},\n      {\"e\", \"e_value\"}, {\"c\", \"c_value\"}, {\"a\", \"a_value\"},\n  };\n  const auto map = Utility::canonicalizeHeaders(headers);\n  EXPECT_THAT(map, ElementsAre(Pair(\"a\", \"a_value\"), Pair(\"b\", \"b_value\"), Pair(\"c\", \"c_value\"),\n                               Pair(\"d\", \"d_value\"), Pair(\"e\", \"e_value\"), Pair(\"f\", \"f_value\")));\n}\n\n// HTTP pseudo-headers should be ignored\nTEST(UtilityTest, CanonicalizeHeadersSkippingPseudoHeaders) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\":path\", \"path_value\"},\n      {\":method\", \"GET\"},\n      {\"normal\", \"normal_value\"},\n  };\n  const auto map = Utility::canonicalizeHeaders(headers);\n  EXPECT_THAT(map, ElementsAre(Pair(\"normal\", \"normal_value\")));\n}\n\n// Repeated headers are joined with commas\nTEST(UtilityTest, CanonicalizeHeadersJoiningDuplicatesWithCommas) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\"a\", \"a_value1\"},\n      {\"a\", \"a_value2\"},\n      {\"a\", \"a_value3\"},\n  };\n  const auto map = Utility::canonicalizeHeaders(headers);\n  EXPECT_THAT(map, ElementsAre(Pair(\"a\", \"a_value1,a_value2,a_value3\")));\n}\n\n// We canonicalize the :authority header as host\nTEST(UtilityTest, CanonicalizeHeadersAuthorityToHost) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\":authority\", \"authority_value\"},\n  };\n  const auto map = Utility::canonicalizeHeaders(headers);\n  EXPECT_THAT(map, ElementsAre(Pair(\"host\", \"authority_value\")));\n}\n\n// Ports 80 and 443 are omitted from the host headers\nTEST(UtilityTest, CanonicalizeHeadersRemovingDefaultPortsFromHost) {\n  Http::TestRequestHeaderMapImpl headers_port80{\n      {\":authority\", \"example.com:80\"},\n  };\n  const auto map_port80 = Utility::canonicalizeHeaders(headers_port80);\n  EXPECT_THAT(map_port80, ElementsAre(Pair(\"host\", \"example.com\")));\n\n  Http::TestRequestHeaderMapImpl headers_port443{\n      {\":authority\", \"example.com:443\"},\n  };\n  const auto map_port443 = Utility::canonicalizeHeaders(headers_port443);\n  EXPECT_THAT(map_port443, ElementsAre(Pair(\"host\", \"example.com\")));\n}\n\n// Whitespace is trimmed from headers\nTEST(UtilityTest, CanonicalizeHeadersTrimmingWhitespace) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\"leading\", \"    leading value\"},\n      {\"trailing\", \"trailing value    \"},\n      {\"internal\", \"internal    value\"},\n      {\"all\", \"    all    value    \"},\n  };\n  const auto map = Utility::canonicalizeHeaders(headers);\n  EXPECT_THAT(map,\n              ElementsAre(Pair(\"all\", \"all value\"), Pair(\"internal\", \"internal value\"),\n                          Pair(\"leading\", \"leading value\"), Pair(\"trailing\", \"trailing value\")));\n}\n\n// Headers that are likely to mutate are not considered canonical\nTEST(UtilityTest, CanonicalizeHeadersDropMutatingHeaders) {\n  Http::TestRequestHeaderMapImpl headers{\n      {\":authority\", \"example.com\"},          {\"x-forwarded-for\", \"1.2.3.4\"},\n      {\"x-forwarded-proto\", \"https\"},         {\"x-amz-date\", \"20130708T220855Z\"},\n      {\"x-amz-content-sha256\", \"e3b0c44...\"},\n  };\n  const auto map = Utility::canonicalizeHeaders(headers);\n  EXPECT_THAT(map,\n              ElementsAre(Pair(\"host\", \"example.com\"), Pair(\"x-amz-content-sha256\", \"e3b0c44...\"),\n                          Pair(\"x-amz-date\", \"20130708T220855Z\")));\n}\n\n// Verify the format of a minimalist canonical request\nTEST(UtilityTest, MinimalCanonicalRequest) {\n  std::map<std::string, std::string> headers;\n  const auto request = Utility::createCanonicalRequest(\"GET\", \"\", headers, \"content-hash\");\n  EXPECT_EQ(R\"(GET\n/\n\n\n\ncontent-hash)\",\n            request);\n}\n\nTEST(UtilityTest, CanonicalRequestWithQueryString) {\n  const std::map<std::string, std::string> headers;\n  const auto request = Utility::createCanonicalRequest(\"GET\", \"?query\", headers, \"content-hash\");\n  EXPECT_EQ(R\"(GET\n/\nquery\n\n\ncontent-hash)\",\n            request);\n}\n\nTEST(UtilityTest, CanonicalRequestWithHeaders) {\n  const std::map<std::string, std::string> headers = {\n      {\"header1\", \"value1\"},\n      {\"header2\", \"value2\"},\n      {\"header3\", \"value3\"},\n  };\n  const auto request = Utility::createCanonicalRequest(\"GET\", \"\", headers, \"content-hash\");\n  EXPECT_EQ(R\"(GET\n/\n\nheader1:value1\nheader2:value2\nheader3:value3\n\nheader1;header2;header3\ncontent-hash)\",\n            request);\n}\n\n// Verify headers are joined with \";\"\nTEST(UtilityTest, JoinCanonicalHeaderNames) {\n  std::map<std::string, std::string> headers = {\n      {\"header1\", \"value1\"},\n      {\"header2\", \"value2\"},\n      {\"header3\", \"value3\"},\n  };\n  const auto names = Utility::joinCanonicalHeaderNames(headers);\n  EXPECT_EQ(\"header1;header2;header3\", names);\n}\n\n// Verify we return \"\" when there are no headers\nTEST(UtilityTest, JoinCanonicalHeaderNamesWithEmptyMap) {\n  std::map<std::string, std::string> headers;\n  const auto names = Utility::joinCanonicalHeaderNames(headers);\n  EXPECT_EQ(\"\", names);\n}\n\n} // namespace\n} // namespace Aws\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"dns_cache_impl_test\",\n    srcs = [\"dns_cache_impl_test.cc\"],\n    deps = [\n        \":mocks\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_impl\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"dns_cache_resource_manager_test\",\n    srcs = [\"dns_cache_resource_manager_test.cc\"],\n    deps = [\n        \":mocks\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_impl\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_resource_manager\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_impl\",\n        \"//test/mocks/upstream:basic_resource_limit_mocks\",\n        \"@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_impl.h\"\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h\"\n\n#include \"test/extensions/common/dynamic_forward_proxy/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::InSequence;\nusing testing::Return;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\nnamespace {\n\nclass DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedTime {\npublic:\n  void initialize() {\n    config_.set_name(\"foo\");\n    config_.set_dns_lookup_family(envoy::config::cluster::v3::Cluster::V4_ONLY);\n\n    EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_));\n    dns_cache_ =\n        std::make_unique<DnsCacheImpl>(dispatcher_, tls_, random_, loader_, store_, config_);\n    update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_);\n  }\n\n  ~DnsCacheImplTest() override {\n    dns_cache_.reset();\n    EXPECT_EQ(0, TestUtility::findGauge(store_, \"dns_cache.foo.num_hosts\")->value());\n  }\n\n  void checkStats(uint64_t query_attempt, uint64_t query_success, uint64_t query_failure,\n                  uint64_t address_changed, uint64_t added, uint64_t removed, uint64_t num_hosts) {\n    const auto counter_value = [this](const std::string& name) {\n      return TestUtility::findCounter(store_, \"dns_cache.foo.\" + name)->value();\n    };\n\n    EXPECT_EQ(query_attempt, counter_value(\"dns_query_attempt\"));\n    EXPECT_EQ(query_success, counter_value(\"dns_query_success\"));\n    EXPECT_EQ(query_failure, counter_value(\"dns_query_failure\"));\n    EXPECT_EQ(address_changed, counter_value(\"host_address_changed\"));\n    EXPECT_EQ(added, counter_value(\"host_added\"));\n    EXPECT_EQ(removed, counter_value(\"host_removed\"));\n    EXPECT_EQ(num_hosts, TestUtility::findGauge(store_, \"dns_cache.foo.num_hosts\")->value());\n  }\n\n  envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::shared_ptr<Network::MockDnsResolver> resolver_{std::make_shared<Network::MockDnsResolver>()};\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Runtime::MockLoader> loader_;\n  Stats::IsolatedStoreImpl store_;\n  std::unique_ptr<DnsCache> dns_cache_;\n  MockUpdateCallbacks update_callbacks_;\n  DnsCache::AddUpdateCallbacksHandlePtr update_callbacks_handle_;\n};\n\nMATCHER_P3(DnsHostInfoEquals, address, resolved_host, is_ip_address, \"\") {\n  bool equal = address == arg->address()->asString();\n  if (!equal) {\n    *result_listener << fmt::format(\"address '{}' != '{}'\", address, arg->address()->asString());\n    return equal;\n  }\n  equal &= resolved_host == arg->resolvedHost();\n  if (!equal) {\n    *result_listener << fmt::format(\"resolved_host '{}' != '{}'\", resolved_host,\n                                    arg->resolvedHost());\n    return equal;\n  }\n  equal &= is_ip_address == arg->isIpAddress();\n  if (!equal) {\n    *result_listener << fmt::format(\"is_ip_address '{}' != '{}'\", is_ip_address,\n                                    arg->isIpAddress());\n  }\n  return equal;\n}\n\n// Basic successful resolution and then re-resolution.\nTEST_F(DnsCacheImplTest, ResolveSuccess) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.1:80\", \"foo.com\", false)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n\n  checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  // Re-resolve timer.\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  resolve_timer->invokeCallback();\n\n  checkStats(2 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  // Address does not change.\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n\n  checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  // Re-resolve timer.\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  resolve_timer->invokeCallback();\n\n  checkStats(3 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  // Address does change.\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.2:80\", \"foo.com\", false)));\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.2\"}));\n\n  checkStats(3 /* attempt */, 3 /* success */, 0 /* failure */, 2 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n}\n\n// Ipv4 address.\nTEST_F(DnsCacheImplTest, Ipv4Address) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"127.0.0.1\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"127.0.0.1\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(\n      update_callbacks_,\n      onDnsHostAddOrUpdate(\"127.0.0.1\", DnsHostInfoEquals(\"127.0.0.1:80\", \"127.0.0.1\", true)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"127.0.0.1\"}));\n}\n\n// Ipv4 address with port.\nTEST_F(DnsCacheImplTest, Ipv4AddressWithPort) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"127.0.0.1\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"127.0.0.1:10000\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"127.0.0.1:10000\",\n                                   DnsHostInfoEquals(\"127.0.0.1:10000\", \"127.0.0.1\", true)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"127.0.0.1\"}));\n}\n\n// Ipv6 address.\nTEST_F(DnsCacheImplTest, Ipv6Address) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"::1\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"[::1]\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"[::1]\", DnsHostInfoEquals(\"[::1]:80\", \"::1\", true)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"::1\"}));\n}\n\n// Ipv6 address with port.\nTEST_F(DnsCacheImplTest, Ipv6AddressWithPort) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"::1\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"[::1]:10000\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"[::1]:10000\", DnsHostInfoEquals(\"[::1]:10000\", \"::1\", true)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"::1\"}));\n}\n\n// TTL purge test.\nTEST_F(DnsCacheImplTest, TTL) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.1:80\", \"foo.com\", false)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}, std::chrono::seconds(0)));\n\n  checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  // Re-resolve with ~60s passed. TTL should still be OK at default of 5 minutes.\n  simTime().advanceTimeWait(std::chrono::milliseconds(60001));\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  resolve_timer->invokeCallback();\n  checkStats(2 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n  checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times\n  // during this period but it's good enough for the test.\n  simTime().advanceTimeWait(std::chrono::milliseconds(300000));\n  EXPECT_CALL(update_callbacks_, onDnsHostRemove(\"foo.com\"));\n  resolve_timer->invokeCallback();\n  checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */,\n             1 /* added */, 1 /* removed */, 0 /* num hosts */);\n\n  // Make sure we don't get a cache hit the next time the host is requested.\n  resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n  checkStats(3 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */,\n             2 /* added */, 1 /* removed */, 1 /* num hosts */);\n}\n\n// TTL purge test with different refresh/TTL parameters.\nTEST_F(DnsCacheImplTest, TTLWithCustomParameters) {\n  *config_.mutable_dns_refresh_rate() = Protobuf::util::TimeUtil::SecondsToDuration(30);\n  *config_.mutable_host_ttl() = Protobuf::util::TimeUtil::SecondsToDuration(60);\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.1:80\", \"foo.com\", false)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(30000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}, std::chrono::seconds(0)));\n\n  // Re-resolve with ~30s passed. TTL should still be OK at 60s.\n  simTime().advanceTimeWait(std::chrono::milliseconds(30001));\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  resolve_timer->invokeCallback();\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(30000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n\n  // Re-resolve with ~30s passed. TTL should expire.\n  simTime().advanceTimeWait(std::chrono::milliseconds(30001));\n  EXPECT_CALL(update_callbacks_, onDnsHostRemove(\"foo.com\"));\n  resolve_timer->invokeCallback();\n}\n\n// Resolve that completes inline without any callback.\nTEST_F(DnsCacheImplTest, InlineResolve) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  auto result = dns_cache_->loadDnsCacheEntry(\"localhost\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"localhost\", _, _))\n      .WillOnce(Invoke([](const std::string&, Network::DnsLookupFamily,\n                          Network::DnsResolver::ResolveCb callback) {\n        callback(Network::DnsResolver::ResolutionStatus::Success,\n                 TestUtility::makeDnsResponse({\"127.0.0.1\"}));\n        return nullptr;\n      }));\n  EXPECT_CALL(\n      update_callbacks_,\n      onDnsHostAddOrUpdate(\"localhost\", DnsHostInfoEquals(\"127.0.0.1:80\", \"localhost\", false)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  post_cb();\n}\n\n// Resolve failure that returns no addresses.\nTEST_F(DnsCacheImplTest, ResolveFailure) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n  checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0);\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Failure, TestUtility::makeDnsResponse({}));\n  checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::InCache, result.status_);\n  EXPECT_EQ(result.handle_, nullptr);\n\n  // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times\n  // during this period but it's good enough for the test.\n  simTime().advanceTimeWait(std::chrono::milliseconds(300001));\n  // Because resolution failed for the host, onDnsHostAddOrUpdate was not called.\n  // Therefore, onDnsHostRemove should not be called either.\n  EXPECT_CALL(update_callbacks_, onDnsHostRemove(_)).Times(0);\n  resolve_timer->invokeCallback();\n  // DnsCacheImpl state is updated accordingly: the host is removed.\n  checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */,\n             1 /* added */, 1 /* removed */, 0 /* num hosts */);\n}\n\nTEST_F(DnsCacheImplTest, ResolveFailureWithFailureRefreshRate) {\n  *config_.mutable_dns_failure_refresh_rate()->mutable_base_interval() =\n      Protobuf::util::TimeUtil::SecondsToDuration(7);\n  *config_.mutable_dns_failure_refresh_rate()->mutable_max_interval() =\n      Protobuf::util::TimeUtil::SecondsToDuration(10);\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n  checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0);\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  ON_CALL(random_, random()).WillByDefault(Return(8000));\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(1000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Failure, TestUtility::makeDnsResponse({}));\n  checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::InCache, result.status_);\n  EXPECT_EQ(result.handle_, nullptr);\n\n  // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times\n  // during this period but it's good enough for the test.\n  simTime().advanceTimeWait(std::chrono::milliseconds(300001));\n  // Because resolution failed for the host, onDnsHostAddOrUpdate was not called.\n  // Therefore, onDnsHostRemove should not be called either.\n  EXPECT_CALL(update_callbacks_, onDnsHostRemove(_)).Times(0);\n  resolve_timer->invokeCallback();\n  // DnsCacheImpl state is updated accordingly: the host is removed.\n  checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */,\n             1 /* added */, 1 /* removed */, 0 /* num hosts */);\n}\n\nTEST_F(DnsCacheImplTest, ResolveSuccessWithEmptyResult) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n  checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  // A successful empty resolution DOES NOT update the host information.\n  EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0);\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({}));\n  checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 0 /* address changed */,\n             1 /* added */, 0 /* removed */, 1 /* num hosts */);\n\n  result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::InCache, result.status_);\n  EXPECT_EQ(result.handle_, nullptr);\n\n  // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times\n  // during this period but it's good enough for the test.\n  simTime().advanceTimeWait(std::chrono::milliseconds(300001));\n  // Because resolution failed for the host, onDnsHostAddOrUpdate was not called.\n  // Therefore, onDnsHostRemove should not be called either.\n  EXPECT_CALL(update_callbacks_, onDnsHostRemove(_)).Times(0);\n  resolve_timer->invokeCallback();\n  // DnsCacheImpl state is updated accordingly: the host is removed.\n  checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 0 /* address changed */,\n             1 /* added */, 1 /* removed */, 0 /* num hosts */);\n}\n\n// Cancel a cache load before the resolve completes.\nTEST_F(DnsCacheImplTest, CancelResolve) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  result.handle_.reset();\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.1:80\", \"foo.com\", false)));\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n}\n\n// Two cache loads that are trying to resolve the same host. Make sure we only do a single resolve\n// and fire both callbacks on completion.\nTEST_F(DnsCacheImplTest, MultipleResolveSameHost) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks1;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result1 = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks1);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result1.status_);\n  EXPECT_NE(result1.handle_, nullptr);\n\n  MockLoadDnsCacheEntryCallbacks callbacks2;\n  auto result2 = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks2);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result2.status_);\n  EXPECT_NE(result2.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.1:80\", \"foo.com\", false)));\n  EXPECT_CALL(callbacks2, onLoadDnsCacheComplete());\n  EXPECT_CALL(callbacks1, onLoadDnsCacheComplete());\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n}\n\n// Two cache loads that are resolving different hosts.\nTEST_F(DnsCacheImplTest, MultipleResolveDifferentHost) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks1;\n  Network::DnsResolver::ResolveCb resolve_cb1;\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb1), Return(&resolver_->active_query_)));\n  auto result1 = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks1);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result1.status_);\n  EXPECT_NE(result1.handle_, nullptr);\n\n  MockLoadDnsCacheEntryCallbacks callbacks2;\n  Network::DnsResolver::ResolveCb resolve_cb2;\n  EXPECT_CALL(*resolver_, resolve(\"bar.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb2), Return(&resolver_->active_query_)));\n  auto result2 = dns_cache_->loadDnsCacheEntry(\"bar.com\", 443, callbacks2);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result2.status_);\n  EXPECT_NE(result2.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"bar.com\", DnsHostInfoEquals(\"10.0.0.1:443\", \"bar.com\", false)));\n  EXPECT_CALL(callbacks2, onLoadDnsCacheComplete());\n  resolve_cb2(Network::DnsResolver::ResolutionStatus::Success,\n              TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.2:80\", \"foo.com\", false)));\n  EXPECT_CALL(callbacks1, onLoadDnsCacheComplete());\n  resolve_cb1(Network::DnsResolver::ResolutionStatus::Success,\n              TestUtility::makeDnsResponse({\"10.0.0.2\"}));\n\n  auto hosts = dns_cache_->hosts();\n  EXPECT_EQ(2, hosts.size());\n  EXPECT_THAT(hosts[\"bar.com\"], DnsHostInfoEquals(\"10.0.0.1:443\", \"bar.com\", false));\n  EXPECT_THAT(hosts[\"foo.com\"], DnsHostInfoEquals(\"10.0.0.2:80\", \"foo.com\", false));\n}\n\n// A successful resolve followed by a cache hit.\nTEST_F(DnsCacheImplTest, CacheHit) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_,\n              onDnsHostAddOrUpdate(\"foo.com\", DnsHostInfoEquals(\"10.0.0.1:80\", \"foo.com\", false)));\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"10.0.0.1\"}));\n\n  result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::InCache, result.status_);\n  EXPECT_EQ(result.handle_, nullptr);\n}\n\n// Make sure we destroy active queries if the cache goes away.\nTEST_F(DnsCacheImplTest, CancelActiveQueriesOnDestroy) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(\"foo.com\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(resolver_->active_query_, cancel());\n  dns_cache_.reset();\n}\n\n// Invalid port\nTEST_F(DnsCacheImplTest, InvalidPort) {\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(\"foo.com:abc\", _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com:abc\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_);\n  EXPECT_NE(result.handle_, nullptr);\n\n  EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0);\n  EXPECT_CALL(callbacks, onLoadDnsCacheComplete());\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({}));\n}\n\n// Max host overflow.\nTEST_F(DnsCacheImplTest, MaxHostOverflow) {\n  config_.mutable_max_hosts()->set_value(0);\n  initialize();\n  InSequence s;\n\n  MockLoadDnsCacheEntryCallbacks callbacks;\n  auto result = dns_cache_->loadDnsCacheEntry(\"foo.com\", 80, callbacks);\n  EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Overflow, result.status_);\n  EXPECT_EQ(result.handle_, nullptr);\n  EXPECT_EQ(1, TestUtility::findCounter(store_, \"dns_cache.foo.host_overflow\")->value());\n}\n\nTEST_F(DnsCacheImplTest, CircuitBreakersNotInvoked) {\n  initialize();\n\n  auto raii_ptr = dns_cache_->canCreateDnsRequest(absl::nullopt);\n  EXPECT_NE(raii_ptr.get(), nullptr);\n}\n\nTEST_F(DnsCacheImplTest, DnsCacheCircuitBreakersOverflow) {\n  config_.mutable_dns_cache_circuit_breaker()->mutable_max_pending_requests()->set_value(0);\n  initialize();\n\n  auto raii_ptr = dns_cache_->canCreateDnsRequest(absl::nullopt);\n  EXPECT_EQ(raii_ptr.get(), nullptr);\n  EXPECT_EQ(1, TestUtility::findCounter(store_, \"dns_cache.foo.dns_rq_pending_overflow\")->value());\n}\n\nTEST_F(DnsCacheImplTest, ClustersCircuitBreakersOverflow) {\n  initialize();\n  NiceMock<Upstream::MockBasicResourceLimit> pending_requests_;\n\n  EXPECT_CALL(pending_requests_, canCreate()).WillOnce(Return(false));\n  auto raii_ptr = dns_cache_->canCreateDnsRequest(pending_requests_);\n  EXPECT_EQ(raii_ptr.get(), nullptr);\n  EXPECT_EQ(0, TestUtility::findCounter(store_, \"dns_cache.foo.dns_rq_pending_overflow\")->value());\n}\n\nTEST(DnsCacheImplOptionsTest, UseTcpForDnsLookupsOptionSet) {\n  NiceMock<Event::MockDispatcher> dispatcher;\n  std::shared_ptr<Network::MockDnsResolver> resolver{std::make_shared<Network::MockDnsResolver>()};\n  NiceMock<ThreadLocal::MockInstance> tls;\n  NiceMock<Random::MockRandomGenerator> random;\n  NiceMock<Runtime::MockLoader> loader;\n  Stats::IsolatedStoreImpl store;\n\n  envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config;\n  config.set_use_tcp_for_dns_lookups(true);\n  EXPECT_CALL(dispatcher, createDnsResolver(_, true)).WillOnce(Return(resolver));\n  DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config);\n}\n\nTEST(DnsCacheImplOptionsTest, UseTcpForDnsLookupsOptionUnSet) {\n  NiceMock<Event::MockDispatcher> dispatcher;\n  std::shared_ptr<Network::MockDnsResolver> resolver{std::make_shared<Network::MockDnsResolver>()};\n  NiceMock<ThreadLocal::MockInstance> tls;\n  NiceMock<Random::MockRandomGenerator> random;\n  NiceMock<Runtime::MockLoader> loader;\n  Stats::IsolatedStoreImpl store;\n\n  envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config;\n  config.set_use_tcp_for_dns_lookups(false);\n  EXPECT_CALL(dispatcher, createDnsResolver(_, false)).WillOnce(Return(resolver));\n  DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config);\n}\n\n// DNS cache manager config tests.\nTEST(DnsCacheManagerImplTest, LoadViaConfig) {\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<ThreadLocal::MockInstance> tls;\n  NiceMock<Random::MockRandomGenerator> random;\n  NiceMock<Runtime::MockLoader> loader;\n  Stats::IsolatedStoreImpl store;\n  DnsCacheManagerImpl cache_manager(dispatcher, tls, random, loader, store);\n\n  envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1;\n  config1.set_name(\"foo\");\n\n  auto cache1 = cache_manager.getCache(config1);\n  EXPECT_NE(cache1, nullptr);\n\n  envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config2;\n  config2.set_name(\"foo\");\n  EXPECT_EQ(cache1, cache_manager.getCache(config2));\n\n  envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config3;\n  config3.set_name(\"bar\");\n  auto cache2 = cache_manager.getCache(config3);\n  EXPECT_NE(cache2, nullptr);\n  EXPECT_NE(cache1, cache2);\n\n  envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config4;\n  config4.set_name(\"foo\");\n  config4.set_dns_lookup_family(envoy::config::cluster::v3::Cluster::V6_ONLY);\n  EXPECT_THROW_WITH_MESSAGE(cache_manager.getCache(config4), EnvoyException,\n                            \"config specified DNS cache 'foo' with different settings\");\n}\n\n// Note: this test is done here, rather than a TYPED_TEST_SUITE in\n// //test/common/config:utility_test, because we did not want to include an extension type in\n// non-extension test suites.\n// TODO(junr03): I ran into problems with templatizing this test and macro expansion.\n// I spent too much time trying to figure this out. So for the moment I have copied this test body\n// here. I will spend some more time fixing this, but wanted to land unblocking functionality first.\nTEST(UtilityTest, PrepareDnsRefreshStrategy) {\n  NiceMock<Random::MockRandomGenerator> random;\n\n  {\n    // dns_failure_refresh_rate not set.\n    envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig dns_cache_config;\n    BackOffStrategyPtr strategy = Config::Utility::prepareDnsRefreshStrategy<\n        envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>(dns_cache_config,\n                                                                              5000, random);\n    EXPECT_NE(nullptr, dynamic_cast<FixedBackOffStrategy*>(strategy.get()));\n  }\n\n  {\n    // dns_failure_refresh_rate set.\n    envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig dns_cache_config;\n    dns_cache_config.mutable_dns_failure_refresh_rate()->mutable_base_interval()->set_seconds(7);\n    dns_cache_config.mutable_dns_failure_refresh_rate()->mutable_max_interval()->set_seconds(10);\n    BackOffStrategyPtr strategy = Config::Utility::prepareDnsRefreshStrategy<\n        envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>(dns_cache_config,\n                                                                              5000, random);\n    EXPECT_NE(nullptr, dynamic_cast<JitteredExponentialBackOffStrategy*>(strategy.get()));\n  }\n\n  {\n    // dns_failure_refresh_rate set with invalid max_interval.\n    envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig dns_cache_config;\n    dns_cache_config.mutable_dns_failure_refresh_rate()->mutable_base_interval()->set_seconds(7);\n    dns_cache_config.mutable_dns_failure_refresh_rate()->mutable_max_interval()->set_seconds(2);\n    EXPECT_THROW_WITH_REGEX(\n        Config::Utility::prepareDnsRefreshStrategy<\n            envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>(dns_cache_config,\n                                                                                  5000, random),\n        EnvoyException,\n        \"dns_failure_refresh_rate must have max_interval greater than \"\n        \"or equal to the base_interval\");\n  }\n}\n\n} // namespace\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc",
    "content": "#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n\n#include \"common/config/utility.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_impl.h\"\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h\"\n\n#include \"test/extensions/common/dynamic_forward_proxy/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\nnamespace {\n\nclass DnsCacheResourceManagerTest : public testing::Test {\npublic:\n  DnsCacheResourceManagerTest() { ON_CALL(store_, gauge(_, _)).WillByDefault(ReturnRef(gauge_)); }\n\n  void setupResourceManager(std::string& config_yaml) {\n    envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers cb_config;\n    TestUtility::loadFromYaml(config_yaml, cb_config);\n\n    resource_manager_ =\n        std::make_unique<DnsCacheResourceManagerImpl>(store_, loader_, \"dummy\", cb_config);\n  }\n\n  void cleanup() {\n    auto& pending_requests = resource_manager_->pendingRequests();\n    while (pending_requests.count() != 0) {\n      pending_requests.dec();\n    }\n  }\n\n  std::unique_ptr<DnsCacheResourceManager> resource_manager_;\n  NiceMock<Stats::MockStore> store_;\n  NiceMock<Stats::MockGauge> gauge_;\n  NiceMock<Runtime::MockLoader> loader_;\n};\n\nTEST_F(DnsCacheResourceManagerTest, CheckDnsResource) {\n  std::string config_yaml = R\"EOF(\n    max_pending_requests: 3\n  )EOF\";\n  setupResourceManager(config_yaml);\n\n  auto& pending_requests = resource_manager_->pendingRequests();\n  EXPECT_EQ(3, pending_requests.max());\n  EXPECT_EQ(0, pending_requests.count());\n  EXPECT_TRUE(pending_requests.canCreate());\n\n  pending_requests.inc();\n  EXPECT_EQ(1, pending_requests.count());\n  EXPECT_TRUE(pending_requests.canCreate());\n\n  pending_requests.inc();\n  pending_requests.inc();\n  EXPECT_EQ(3, pending_requests.count());\n  EXPECT_FALSE(pending_requests.canCreate());\n\n  pending_requests.dec();\n  EXPECT_EQ(2, pending_requests.count());\n  EXPECT_TRUE(pending_requests.canCreate());\n\n  cleanup();\n}\n} // namespace\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/common/dynamic_forward_proxy/mocks.cc",
    "content": "#include \"test/extensions/common/dynamic_forward_proxy/mocks.h\"\n\nusing testing::_;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\nMockDnsCacheResourceManager::MockDnsCacheResourceManager() {\n  ON_CALL(*this, pendingRequests()).WillByDefault(ReturnRef(pending_requests_));\n}\nMockDnsCacheResourceManager::~MockDnsCacheResourceManager() = default;\n\nMockDnsCache::MockDnsCache() {\n  ON_CALL(*this, canCreateDnsRequest_(_)).WillByDefault(Return(nullptr));\n}\nMockDnsCache::~MockDnsCache() = default;\n\nMockLoadDnsCacheEntryHandle::MockLoadDnsCacheEntryHandle() = default;\nMockLoadDnsCacheEntryHandle::~MockLoadDnsCacheEntryHandle() { onDestroy(); }\n\nMockDnsCacheManager::MockDnsCacheManager() {\n  ON_CALL(*this, getCache(_)).WillByDefault(Return(dns_cache_));\n}\nMockDnsCacheManager::~MockDnsCacheManager() = default;\n\nMockDnsHostInfo::MockDnsHostInfo() {\n  ON_CALL(*this, address()).WillByDefault(ReturnPointee(&address_));\n  ON_CALL(*this, resolvedHost()).WillByDefault(ReturnRef(resolved_host_));\n}\nMockDnsHostInfo::~MockDnsHostInfo() = default;\n\nMockUpdateCallbacks::MockUpdateCallbacks() = default;\nMockUpdateCallbacks::~MockUpdateCallbacks() = default;\n\nMockLoadDnsCacheEntryCallbacks::MockLoadDnsCacheEntryCallbacks() = default;\nMockLoadDnsCacheEntryCallbacks::~MockLoadDnsCacheEntryCallbacks() = default;\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/dynamic_forward_proxy/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_impl.h\"\n\n#include \"test/mocks/upstream/basic_resource_limit.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace DynamicForwardProxy {\n\nclass MockDnsCacheResourceManager : public DnsCacheResourceManager {\npublic:\n  MockDnsCacheResourceManager();\n  ~MockDnsCacheResourceManager() override;\n\n  MOCK_METHOD(ResourceLimit&, pendingRequests, ());\n  MOCK_METHOD(DnsCacheCircuitBreakersStats&, stats, ());\n\n  NiceMock<Upstream::MockBasicResourceLimit> pending_requests_;\n};\n\nclass MockDnsCache : public DnsCache {\npublic:\n  MockDnsCache();\n  ~MockDnsCache() override;\n\n  struct MockLoadDnsCacheEntryResult {\n    LoadDnsCacheEntryStatus status_;\n    LoadDnsCacheEntryHandle* handle_;\n  };\n\n  LoadDnsCacheEntryResult loadDnsCacheEntry(absl::string_view host, uint16_t default_port,\n                                            LoadDnsCacheEntryCallbacks& callbacks) override {\n    MockLoadDnsCacheEntryResult result = loadDnsCacheEntry_(host, default_port, callbacks);\n    return {result.status_, LoadDnsCacheEntryHandlePtr{result.handle_}};\n  }\n  Upstream::ResourceAutoIncDecPtr\n  canCreateDnsRequest(ResourceLimitOptRef pending_requests) override {\n    Upstream::ResourceAutoIncDec* raii_ptr = canCreateDnsRequest_(pending_requests);\n    return std::unique_ptr<Upstream::ResourceAutoIncDec>(raii_ptr);\n  }\n  MOCK_METHOD(MockLoadDnsCacheEntryResult, loadDnsCacheEntry_,\n              (absl::string_view host, uint16_t default_port,\n               LoadDnsCacheEntryCallbacks& callbacks));\n\n  AddUpdateCallbacksHandlePtr addUpdateCallbacks(UpdateCallbacks& callbacks) override {\n    return AddUpdateCallbacksHandlePtr{addUpdateCallbacks_(callbacks)};\n  }\n  MOCK_METHOD(DnsCache::AddUpdateCallbacksHandle*, addUpdateCallbacks_,\n              (UpdateCallbacks & callbacks));\n\n  MOCK_METHOD((absl::flat_hash_map<std::string, DnsHostInfoSharedPtr>), hosts, ());\n  MOCK_METHOD(Upstream::ResourceAutoIncDec*, canCreateDnsRequest_, (ResourceLimitOptRef));\n};\n\nclass MockLoadDnsCacheEntryHandle : public DnsCache::LoadDnsCacheEntryHandle {\npublic:\n  MockLoadDnsCacheEntryHandle();\n  ~MockLoadDnsCacheEntryHandle() override;\n\n  MOCK_METHOD(void, onDestroy, ());\n};\n\nclass MockDnsCacheManager : public DnsCacheManager {\npublic:\n  MockDnsCacheManager();\n  ~MockDnsCacheManager() override;\n\n  MOCK_METHOD(DnsCacheSharedPtr, getCache,\n              (const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config));\n\n  std::shared_ptr<NiceMock<MockDnsCache>> dns_cache_{new NiceMock<MockDnsCache>()};\n};\n\nclass MockDnsHostInfo : public DnsHostInfo {\npublic:\n  MockDnsHostInfo();\n  ~MockDnsHostInfo() override;\n\n  MOCK_METHOD(Network::Address::InstanceConstSharedPtr, address, ());\n  MOCK_METHOD(const std::string&, resolvedHost, (), (const));\n  MOCK_METHOD(bool, isIpAddress, (), (const));\n  MOCK_METHOD(void, touch, ());\n\n  Network::Address::InstanceConstSharedPtr address_;\n  std::string resolved_host_;\n};\n\nclass MockUpdateCallbacks : public DnsCache::UpdateCallbacks {\npublic:\n  MockUpdateCallbacks();\n  ~MockUpdateCallbacks() override;\n\n  MOCK_METHOD(void, onDnsHostAddOrUpdate,\n              (const std::string& host, const DnsHostInfoSharedPtr& address));\n  MOCK_METHOD(void, onDnsHostRemove, (const std::string& host));\n};\n\nclass MockLoadDnsCacheEntryCallbacks : public DnsCache::LoadDnsCacheEntryCallbacks {\npublic:\n  MockLoadDnsCacheEntryCallbacks();\n  ~MockLoadDnsCacheEntryCallbacks() override;\n\n  MOCK_METHOD(void, onLoadDnsCacheComplete, ());\n};\n\n} // namespace DynamicForwardProxy\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/matcher/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"matcher_test\",\n    srcs = [\"matcher_test.cc\"],\n    deps = [\n        \"//source/extensions/common/matcher:matcher_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/common/matcher/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/matcher/matcher_test.cc",
    "content": "#include \"envoy/config/common/matcher/v3/matcher.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/common/matcher/matcher.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Matcher {\nnamespace {\n\nclass MatcherTestBase {\npublic:\n  std::vector<MatcherPtr> matchers_;\n  Matcher::MatchStatusVector statuses_;\n  envoy::config::common::matcher::v3::MatchPredicate config_;\n\n  enum class Direction { Request, Response };\n};\n\nclass TapMatcherTest : public MatcherTestBase, public testing::Test {\npublic:\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n};\n\nclass TapMatcherGenericBodyConfigTest : public MatcherTestBase, public ::testing::Test {};\n\nclass TapMatcherGenericBodyTest\n    : public MatcherTestBase,\n      public ::testing::TestWithParam<\n          std::tuple<MatcherTestBase::Direction,\n                     std::tuple<std::vector<std::string>, std::list<std::list<uint32_t>>,\n                                std::pair<bool, bool>>>> {\npublic:\n  TapMatcherGenericBodyTest();\n\n  Buffer::OwnedImpl data_;\n  std::vector<std::string> body_parts_;\n};\n\nTEST_F(TapMatcherTest, Any) {\n  const std::string matcher_yaml =\n      R\"EOF(\nany_match: true\n)EOF\";\n\n  TestUtility::loadFromYaml(matcher_yaml, config_);\n  buildMatcher(config_, matchers_);\n  EXPECT_EQ(1, matchers_.size());\n  statuses_.resize(matchers_.size());\n  matchers_[0]->onNewStream(statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_));\n}\n\nTEST_F(TapMatcherTest, Not) {\n  const std::string matcher_yaml =\n      R\"EOF(\nnot_match:\n  any_match: true\n)EOF\";\n\n  TestUtility::loadFromYaml(matcher_yaml, config_);\n  buildMatcher(config_, matchers_);\n  EXPECT_EQ(2, matchers_.size());\n  statuses_.resize(matchers_.size());\n  matchers_[0]->onNewStream(statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_));\n}\n\nTEST_F(TapMatcherTest, AndMightChangeStatus) {\n  const std::string matcher_yaml =\n      R\"EOF(\nand_match:\n  rules:\n    - http_response_headers_match:\n        headers:\n          - name: bar\n            exact_match: baz\n)EOF\";\n\n  TestUtility::loadFromYaml(matcher_yaml, config_);\n  buildMatcher(config_, matchers_);\n  EXPECT_EQ(2, matchers_.size());\n  statuses_.resize(matchers_.size());\n  matchers_[0]->onNewStream(statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpRequestHeaders(request_headers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpRequestTrailers(request_trailers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpResponseHeaders(response_headers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_));\n  matchers_[0]->onHttpResponseTrailers(response_trailers_, statuses_);\n  EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_));\n}\n\nTapMatcherGenericBodyTest::TapMatcherGenericBodyTest() {\n  std::string hex;\n  body_parts_.push_back(\"This is generic body matcher test for envoy\"); // Index 0\n  body_parts_.push_back(\"proxy used to create and assemble http body\"); // Index 1\n  body_parts_.push_back(\"env\");                                         // Index 2\n  body_parts_.push_back(\"oyp\");                                         // Index 3\n  body_parts_.push_back(\"roxy\");                                        // Index 4\n  body_parts_.push_back(\"roxy layer 7\");                                // Index 5\n  body_parts_.push_back(\"blah\");                                        // Index 6\n  hex = \"xx\";\n  unsigned char buf[] = {0xde, 0xad};\n  memcpy(const_cast<char*>(hex.data()), buf, 2);\n  body_parts_.push_back(hex); // Index 7\n  unsigned char buf1[] = {0xbe, 0xef};\n  memcpy(const_cast<char*>(hex.data()), buf1, 2);\n  body_parts_.push_back(hex); // Index 8\n}\n\n// This test initializes matcher with several patterns. The length of the longest\n// pattern is used to initialize overlap_ buffer.\n// The longest pattern is found first. This should result in less buffering\n// required for locating remaining patterns.\nTEST_F(TapMatcherGenericBodyTest, ResizeOverlap) {\n  std::string matcher_yaml = R\"EOF(\nhttp_request_generic_body_match:\n  patterns:\n    - string_match: generic\n    - string_match: lay\n)EOF\";\n  TestUtility::loadFromYaml(matcher_yaml, config_);\n  buildMatcher(config_, matchers_);\n  EXPECT_EQ(1, matchers_.size());\n  statuses_.resize(matchers_.size());\n  matchers_[0]->onNewStream(statuses_);\n\n  const auto& ctx = reinterpret_cast<HttpGenericBodyMatcherCtx*>(statuses_[0].ctx_.get());\n  // 6 is length of \"generic\"\n  ASSERT_THAT(ctx->overlap_.capacity(), 6);\n  // 2 patterns must be located\n  ASSERT_THAT(ctx->patterns_index_.size(), 2);\n\n  // Process body chunk which produces no match.\n  // It should fill the overlap_ buffer to full capacity.\n  data_.add(body_parts_[1].data(), body_parts_[1].length());\n  matchers_[0]->onRequestBody(data_, statuses_);\n  ASSERT_THAT(ctx->overlap_.size(), 6);\n  ASSERT_THAT(ctx->capacity_, 6);\n\n  // Now pass the chunk which matches \"generic\" pattern.\n  data_.drain(data_.length());\n  data_.add(body_parts_[0].data(), body_parts_[0].length());\n  matchers_[0]->onRequestBody(data_, statuses_);\n\n  // Size of patterns_index_ should drop down to one.\n  // Capacity of the overlap_ should drop to to 2, as the longest pattern not found yet is 3 chars\n  // long. Also 2 bytes should have been copied to overlap, so its size is 2.\n  ASSERT_THAT(ctx->patterns_index_.size(), 1);\n  ASSERT_THAT(ctx->overlap_.size(), 2);\n  ASSERT_THAT(ctx->capacity_, 2);\n}\n\n// Test the case when hex string is not even number of characters\nTEST_F(TapMatcherGenericBodyTest, WrongConfigTest) {\n  std::string matcher_yaml = R\"EOF(\nhttp_request_generic_body_match:\n  patterns:\n    - binary_match: 4rdHFh%2\n)EOF\";\n  ASSERT_ANY_THROW(TestUtility::loadFromYaml(matcher_yaml, config_));\n}\n\n// Test different configurations against the body.\n// Parameterized test passes various configurations\n// which are appended to the yaml string.\nTEST_P(TapMatcherGenericBodyTest, GenericBodyTest) {\n  Direction dir = std::get<0>(GetParam());\n  std::string matcher_yaml;\n  if (Direction::Request == dir) {\n    matcher_yaml =\n        R\"EOF(http_request_generic_body_match:\n  patterns:)EOF\";\n  } else {\n    matcher_yaml =\n        R\"EOF(http_response_generic_body_match:\n  patterns:)EOF\";\n  }\n\n  auto text_and_result = std::get<1>(GetParam());\n  // Append vector of matchers\n  for (const auto& i : std::get<0>(text_and_result)) {\n    matcher_yaml += '\\n';\n    matcher_yaml += i;\n    matcher_yaml += '\\n';\n  }\n\n  TestUtility::loadFromYaml(matcher_yaml, config_);\n  buildMatcher(config_, matchers_);\n  EXPECT_EQ(1, matchers_.size());\n  statuses_.resize(matchers_.size());\n  matchers_[0]->onNewStream(statuses_);\n\n  // Now create data. The data is passed to matcher in several\n  // steps to simulate that body was not received in one continuous\n  // chunk. Data for each step is reassembled from body_parts_.\n  for (const auto& i : std::get<1>(text_and_result)) {\n    data_.drain(data_.length());\n    for (const auto& j : i) {\n      data_.add(body_parts_[j].data(), body_parts_[j].length());\n    }\n\n    if (Direction::Request == dir) {\n      matchers_[0]->onRequestBody(data_, statuses_);\n    } else {\n      matchers_[0]->onResponseBody(data_, statuses_);\n    }\n  }\n  const std::pair<bool, bool>& expected = std::get<2>(text_and_result);\n  EXPECT_EQ((Matcher::MatchStatus{expected.first, expected.second}),\n            matchers_[0]->matchStatus(statuses_));\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    TapMatcherGenericBodyTestSuite, TapMatcherGenericBodyTest,\n    ::testing::Combine(\n        ::testing::Values(MatcherTestBase::Direction::Request,\n                          MatcherTestBase::Direction::Response),\n        ::testing::Values(\n            // SEARCHING FOR SINGLE PATTERN - no limit\n            // Should match - there is a single body chunk and envoy is in the body\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(true, false)),\n            // Should match - single body and `envoyproxy` is there\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{0, 1}}, std::make_pair(true, false)),\n            // Should match - 2 body chunks. First contains 'envoy' at the end and the second\n            // chunk contains 'proxy' at the beginning.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{0}, {1}}, std::make_pair(true, false)),\n            // Should not match - 2 body chunks. First chunk does not contain 'enwoy' at the end but\n            // should match 'en' and then bail out.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"enwoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{0}, {1}}, std::make_pair(false, true)),\n            // Should match - 3 body chunks containing string `envoyproxy` when reassembled.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{2}, {3}, {4}},\n                            std::make_pair(true, false)),\n            // Should match - 3 body chunks containing string ``envoyproxy layer`` when reassembled.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{2}, {3}, {5}},\n                            std::make_pair(true, false)),\n            // Should match - 4 body chunks The last 3 contain string ``envoyproxy layer`` when\n            // reassembled.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{6}, {2}, {3}, {5}},\n                            std::make_pair(true, false)),\n            // Should match - First few chunks does not match, then 3 reassembled match\n            // `envoyproxy`.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{6}, {6}, {6}, {2}, {3}, {5}, {6}},\n                            std::make_pair(true, false)),\n            // Should match - chunk #7 contains hex '0xdead (3q0= in base64 format)'.\n            std::make_tuple(std::vector<std::string>{\"    - binary_match: \\\"3q0=\\\"\"},\n                            std::list<std::list<uint32_t>>{{6}, {6}, {7}, {6}},\n                            std::make_pair(true, false)),\n            // Should match - chunk #7 contains 0xdead and chunk 8 contains 0xbeef\n            // 0xdeadbeef encoded in base64 format is '3q2+7w=='.\n            std::make_tuple(std::vector<std::string>{\"    - binary_match: \\\"3q2+7w==\\\"\"},\n                            std::list<std::list<uint32_t>>{{6}, {6}, {7}, {8}, {6}},\n                            std::make_pair(true, false)),\n            // Should NOT match - hex 0xdeed (3u0= in base64 format) is not there\n            std::make_tuple(std::vector<std::string>{\"    - binary_match: \\\"3u0=\\\"\"},\n                            std::list<std::list<uint32_t>>{{6}, {6}, {7}, {8}, {6}},\n                            std::make_pair(false, true)),\n\n            // SEARCHING FOR SINGLE PATTERN - with limit\n            // Should match - there is a single body chunk and 'This' is within\n            // search limit.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"This\\\"\",\n                                                     \"  bytes_limit: 10\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(true, false)),\n            // Should NOT match - there is a single body chunk and envoy is in the body\n            // but outside of the limit\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"  bytes_limit: 10\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(false, false)),\n            // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second\n            // chunk contains 'proxy' at the beginning. Search is limited to the first 10 bytes\n            //  - 'proxy' in the second chunk should not be found as it is outside of the search\n            //  limit.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"proxy\\\"\",\n                                                     \"  bytes_limit: 10\"},\n                            std::list<std::list<uint32_t>>{{0}, {1}}, std::make_pair(false, false)),\n            // Should match - 2 body chunks. First contains 'envoy' at the end and the second\n            // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48\n            // so should be found when search limit is 48.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"proxy\\\"\",\n                                                     \"  bytes_limit: 48\"},\n                            std::list<std::list<uint32_t>>{{0}, {1}}, std::make_pair(true, false)),\n            // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second\n            // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48.\n            // Search limit is 47 bytes, so the last character of 'proxy' is outside of the search\n            // limit.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"proxy\\\"\",\n                                                     \"  bytes_limit: 47\"},\n                            std::list<std::list<uint32_t>>{{0}, {1}}, std::make_pair(false, false)),\n            // Should match - 2 body chunks. First contains 'envoy' at the end and the second\n            // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48.\n            // Search limit is 46 bytes, which is enough to include 'envoypro' in search.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoypro\\\"\",\n                                                     \"  bytes_limit: 46\"},\n                            std::list<std::list<uint32_t>>{{0}, {1}}, std::make_pair(true, false)),\n            // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second\n            // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48.\n            // Search limit is 45 bytes, so the last character of `envoyproxy` is outside of the\n            // search limit.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoypro\\\"\",\n                                                     \"  bytes_limit: 45\"},\n                            std::list<std::list<uint32_t>>{{0}, {1}}, std::make_pair(false, false)),\n\n            // SEARCHING FOR MULTIPLE PATTERNS - no limit\n            // Should NOT match. None of the patterns is in the body.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"balancer\\\"\",\n                                                     \"    - string_match: \\\"error\\\"\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(false, true)),\n            // Should NOT match. One pattern is in the body but the second is not.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - string_match: \\\"error\\\"\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(false, true)),\n            // Should match. Both patterns are in the body (concatenated frags 0 and 1).\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - string_match: \\\"proxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{0, 1}}, std::make_pair(true, false)),\n            // SPELLCHECKER(off)\n            // Should match. Both patterns should be found. 'envoy' is in the first\n            // chunk and '0xbeef' (`vu8=` in base64 format) is in the chunk 8.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - binary_match: \\\"vu8=\\\"\"},\n                            std::list<std::list<uint32_t>>{{0, 1}, {8}, {6}},\n                            std::make_pair(true, false)),\n            // Should match. Both patterns should be found. '0xdeadbeef' is spread\n            // across two chunks - 7 and 8. The second pattern 'envoy' is in chunk 0.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - binary_match: \\\"3q2+7w==\\\"\"},\n                            std::list<std::list<uint32_t>>{{7}, {8}, {6, 0}},\n                            std::make_pair(true, false)),\n            // Should match. One pattern is substring of the other and they both\n            // are located part in chunk 0 and part in chunk 1.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\",\n                                                     \"    - string_match: \\\"voypro\\\"\"},\n                            std::list<std::list<uint32_t>>{{6}, {0}, {1}, {8}, {6}},\n                            std::make_pair(true, false)),\n            // Should match. Duplicated pattern which is found in the body.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoyproxy\\\"\",\n                                                     \"    - string_match: \\\"envoyproxy\\\"\"},\n                            std::list<std::list<uint32_t>>{{6}, {0}, {1}, {8}, {6}},\n                            std::make_pair(true, false)),\n            // Test starting search from some offset for shorter patterns.\n            // Overlap buffer size will be initialized for longest pattern but\n            // search for shorter patterns should start from some index in overlap\n            // buffer. Make sure that the index is enough for the shorter pattern to be found.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"assemble\\\"\",\n                                                     \"    - string_match: \\\"envoyp\\\"\"},\n                            std::list<std::list<uint32_t>>{{0, 1}}, std::make_pair(true, false)),\n            // SEARCHING FOR MULTIPLE PATTERNS - with limit\n            // Should NOT match. None of the patterns is in the body.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"balancer\\\"\",\n                                                     \"    - string_match: \\\"error\\\"\",\n                                                     \"  bytes_limit: 15\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(false, false)),\n            // Should NOT match. One pattern is in the body but the second is not.\n            // Search limit is large enough to find the first pattern.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - string_match: \\\"error\\\"\",\n                                                     \"  bytes_limit: 35\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(false, false)),\n            // Should NOT match. One pattern is in the body but the second is not.\n            // Search limit is small so none of the patterns should be found.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - string_match: \\\"error\\\"\",\n                                                     \"  bytes_limit: 5\"},\n                            std::list<std::list<uint32_t>>{{0}}, std::make_pair(false, false)),\n            // Should NOT match. Both patterns are in the body (concatenated frags 0 and 1).\n            // Limit includes only the first pattern.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - string_match: \\\"proxy\\\"\",\n                                                     \"  bytes_limit: 30\"},\n                            std::list<std::list<uint32_t>>{{0, 1}}, std::make_pair(false, false)),\n            // Should match. Both patterns should be found. 'envoy' is in the first\n            // chunk and '0xbeef (vu8= in base64 format)' is in the chunk 8 and search limit is\n            // large enough to include 2 patterns\n            std::make_tuple(\n                std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                         \"    - binary_match: \\\"vu8=\\\"\", \"  bytes_limit: 90\"},\n                std::list<std::list<uint32_t>>{{0, 1}, {8}, {6}}, std::make_pair(true, false)),\n            // Should match. Both patterns should be found. '0xdeadbeef  (3q2+7w== in base64)' is\n            // spread across two chunks - 7 and 8. The second pattern 'envoy' is in chunk 0.\n            std::make_tuple(\n                std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                         \"    - binary_match: \\\"3q2+7w==\\\"\", \"  bytes_limit: 85\"},\n                std::list<std::list<uint32_t>>{{7}, {8}, {6, 0}}, std::make_pair(true, false)),\n            // Should match. Search limit ends exactly where '0xdeadbeef (3q2+7w== in base64)' ends.\n            std::make_tuple(\n                std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                         \"    - binary_match: \\\"3q2+7w==\\\"\", \"  bytes_limit: 47\"},\n                std::list<std::list<uint32_t>>{{0}, {7}, {8}, {6, 0}}, std::make_pair(true, false)),\n            // Should NOT match. Search limit ends exactly one byte before end of '0xdeadbeef\n            // (3q2+7w== in base64)'.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - binary_match: \\\"3q2+7w==\\\"\",\n                                                     \"  bytes_limit: 46\"},\n                            std::list<std::list<uint32_t>>{{0}, {7}, {8}, {6, 0}},\n                            std::make_pair(false, false)),\n            // Test the situation when end of the search limit overlaps with end of first chunk.\n            // Should NOT match. The second pattern should not be found.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - binary_match: \\\"3q2+7w==\\\"\",\n                                                     \"  bytes_limit: 43\"},\n                            std::list<std::list<uint32_t>>{{0}, {7}, {8}, {6, 0}},\n                            std::make_pair(false, false)),\n\n            // SPELLCHECKER(on)\n            // Now pass enormously large value. It should work just fine.\n            std::make_tuple(std::vector<std::string>{\"    - string_match: \\\"envoy\\\"\",\n                                                     \"    - binary_match: \\\"3q2+7w==\\\"\",\n                                                     \"  bytes_limit: 50000000\"},\n                            std::list<std::list<uint32_t>>{{0}, {7}, {8}, {6, 0}},\n                            std::make_pair(true, false)))));\n\n// Test takes one long pattern existing on the boundary of two body chunks and generates random\n// number of substrings of various lengths. All substrings and original long pattern are added to\n// the matcher's config. Next the two body chunks are passed to the matcher. In all cases the\n// matcher should report that match was found.\nTEST_F(TapMatcherGenericBodyTest, RandomLengthOverlappingPatterns) {\n  std::string pattern = \"envoyproxy\";\n\n  // Loop through fairly large number of tests\n  for (size_t i = 0; i < 10 * pattern.length(); i++) {\n    std::string matcher_yaml = R\"EOF(\nhttp_request_generic_body_match:\n  patterns:\n)EOF\";\n    // generate number of substrings which will be derived from pattern\n    uint32_t num = std::rand() % 10;\n    for (size_t j = 0; j < num; j++) {\n      std::string yaml_line = \"  - string_match: \";\n\n      // Generate random start index.\n      const uint32_t start = std::rand() % (pattern.length() - 1);\n      // Generate random length. Minimum 1 character.\n      const uint32_t len = 1 + std::rand() % (pattern.length() - start - 1);\n      yaml_line += \"\\\"\" + pattern.substr(start, len) + \"\\\"\\n\";\n      matcher_yaml += yaml_line;\n    }\n    // Finally add the original pattern, but not in all cases\n    if (0 == (num % 2)) {\n      matcher_yaml += \"  - string_match: \" + pattern + \"\\n\";\n    }\n\n    // Initialize matcher.\n    TestUtility::loadFromYaml(matcher_yaml, config_);\n    buildMatcher(config_, matchers_);\n    EXPECT_EQ(1, matchers_.size());\n    statuses_.resize(matchers_.size());\n    matchers_[0]->onNewStream(statuses_);\n\n    EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_));\n\n    // Use body chunks #0 and #1\n    data_.drain(data_.length());\n    data_.add(body_parts_[0].data(), body_parts_[0].length());\n    matchers_[0]->onRequestBody(data_, statuses_);\n    data_.drain(data_.length());\n    data_.add(body_parts_[1].data(), body_parts_[1].length());\n    matchers_[0]->onRequestBody(data_, statuses_);\n\n    // Check the result. All patterns should be found.\n    EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_));\n\n    matchers_.clear();\n  }\n}\n} // namespace\n} // namespace Matcher\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/proxy_protocol/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"proxy_protocol_header_test\",\n    srcs = [\"proxy_protocol_header_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n        \"//test/mocks/network:connection_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"proxy_protocol_regression_test\",\n    srcs = [\"proxy_protocol_regression_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_balancer_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n        \"//source/extensions/filters/listener/proxy_protocol:proxy_protocol_lib\",\n        \"//source/server:connection_handler_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc",
    "content": "#include \"envoy/network/address.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n\n#include \"test/mocks/network/connection.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace ProxyProtocol {\nnamespace {\n\nTEST(ProxyProtocolHeaderTest, GeneratesV1IPv4Header) {\n  const auto expectedHeaderStr = \"PROXY TCP4 174.2.2.222 172.0.0.1 50000 80\\r\\n\";\n  const Buffer::OwnedImpl expectedBuff(expectedHeaderStr);\n  const auto src_addr = \"174.2.2.222\";\n  const auto dst_addr = \"172.0.0.1\";\n  const auto src_port = 50000;\n  const auto dst_port = 80;\n  const auto version = Network::Address::IpVersion::v4;\n  Buffer::OwnedImpl buff{};\n\n  generateV1Header(src_addr, dst_addr, src_port, dst_port, version, buff);\n\n  EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff));\n\n  // Make sure the wrapper utility generates the same output.\n  testing::NiceMock<Network::MockClientConnection> connection;\n  connection.remote_address_ = Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  connection.local_address_ = Network::Utility::resolveUrl(\"tcp://172.0.0.1:80\");\n  Buffer::OwnedImpl util_buf;\n  envoy::config::core::v3::ProxyProtocolConfig config;\n  config.set_version(envoy::config::core::v3::ProxyProtocolConfig::V1);\n  generateProxyProtoHeader(config, connection, util_buf);\n  EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, util_buf));\n}\n\nTEST(ProxyProtocolHeaderTest, GeneratesV1IPv6Header) {\n  const auto expectedHeaderStr = \"PROXY TCP6 1::2:3 a:b:c:d:: 50000 80\\r\\n\";\n  const Buffer::OwnedImpl expectedBuff(expectedHeaderStr);\n  const auto src_addr = \"1::2:3\";\n  const auto dst_addr = \"a:b:c:d::\";\n  const auto src_port = 50000;\n  const auto dst_port = 80;\n  const auto version = Network::Address::IpVersion::v6;\n  Buffer::OwnedImpl buff{};\n\n  generateV1Header(src_addr, dst_addr, src_port, dst_port, version, buff);\n\n  EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff));\n}\n\nTEST(ProxyProtocolHeaderTest, GeneratesV2IPv4Header) {\n  const uint8_t v2_protocol[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x02, 0x01};\n  const Buffer::OwnedImpl expectedBuff(v2_protocol, sizeof(v2_protocol));\n  const auto src_addr = \"1.2.3.4\";\n  const auto dst_addr = \"0.1.1.2\";\n  const auto src_port = 773;\n  const auto dst_port = 513;\n  const auto version = Network::Address::IpVersion::v4;\n  Buffer::OwnedImpl buff{};\n\n  generateV2Header(src_addr, dst_addr, src_port, dst_port, version, buff);\n\n  EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff));\n}\n\nTEST(ProxyProtocolHeaderTest, GeneratesV2IPv6Header) {\n  const uint8_t v2_protocol[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54,\n                                 0x0a, 0x21, 0x21, 0x00, 0x24, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03,\n                                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,\n                                 0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,\n                                 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02};\n  const Buffer::OwnedImpl expectedBuff(v2_protocol, sizeof(v2_protocol));\n  const auto src_addr = \"1:2:3::4\";\n  const auto dst_addr = \"1:100:200:3::\";\n  const auto src_port = 8;\n  const auto dst_port = 2;\n  const auto version = Network::Address::IpVersion::v6;\n  Buffer::OwnedImpl buff{};\n\n  generateV2Header(src_addr, dst_addr, src_port, dst_port, version, buff);\n\n  EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff));\n\n  // Make sure the wrapper utility generates the same output.\n  testing::NiceMock<Network::MockConnection> connection;\n  connection.remote_address_ = Network::Utility::resolveUrl(\"tcp://[1:2:3::4]:8\");\n  connection.local_address_ = Network::Utility::resolveUrl(\"tcp://[1:100:200:3::]:2\");\n  Buffer::OwnedImpl util_buf;\n  envoy::config::core::v3::ProxyProtocolConfig config;\n  config.set_version(envoy::config::core::v3::ProxyProtocolConfig::V2);\n  generateProxyProtoHeader(config, connection, util_buf);\n  EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, util_buf));\n}\n\nTEST(ProxyProtocolHeaderTest, GeneratesV2LocalHeader) {\n  const uint8_t v2_protocol[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51,\n                                 0x55, 0x49, 0x54, 0x0a, 0x20, 0x00, 0x00, 0x00};\n  const Buffer::OwnedImpl expectedBuff(v2_protocol, sizeof(v2_protocol));\n  Buffer::OwnedImpl buff{};\n\n  generateV2LocalHeader(buff);\n\n  EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff));\n}\n\n} // namespace\n} // namespace ProxyProtocol\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc",
    "content": "#include \"envoy/network/address.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/basic_resource_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/connection_balancer_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n\n#include \"server/connection_handler_impl.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n#include \"extensions/filters/listener/proxy_protocol/proxy_protocol.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace ProxyProtocol {\nnamespace {\n\n/**\n * Regression tests for testing that the PROXY protocol listener filter can correctly read\n * what the PROXY protocol util functions generate\n */\nclass ProxyProtocolRegressionTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                    public Network::ListenerConfig,\n                                    public Network::FilterChainManager,\n                                    protected Logger::Loggable<Logger::Id::main> {\npublic:\n  ProxyProtocolRegressionTest()\n      : api_(Api::createApiForTest(stats_store_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        socket_(std::make_shared<Network::TcpListenSocket>(\n            Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)),\n        connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)),\n        name_(\"proxy\"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()),\n        init_manager_(nullptr) {\n    EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream));\n    EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress()));\n    EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_));\n    connection_handler_->addListener(absl::nullopt, *this);\n    conn_ = dispatcher_->createClientConnection(socket_->localAddress(),\n                                                Network::Address::InstanceConstSharedPtr(),\n                                                Network::Test::createRawBufferSocket(), nullptr);\n    conn_->addConnectionCallbacks(connection_callbacks_);\n  }\n\n  // Network::ListenerConfig\n  Network::FilterChainManager& filterChainManager() override { return *this; }\n  Network::FilterChainFactory& filterChainFactory() override { return factory_; }\n  Network::ListenSocketFactory& listenSocketFactory() override { return socket_factory_; }\n  bool bindToPort() override { return true; }\n  bool handOffRestoredDestinationConnections() const override { return false; }\n  uint32_t perConnectionBufferLimitBytes() const override { return 0; }\n  std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; }\n  bool continueOnListenerFiltersTimeout() const override { return false; }\n  Stats::Scope& listenerScope() override { return stats_store_; }\n  uint64_t listenerTag() const override { return 1; }\n  const std::string& name() const override { return name_; }\n  Network::ActiveUdpListenerFactory* udpListenerFactory() override { return nullptr; }\n  Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { return absl::nullopt; }\n  Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override {\n    return absl::nullopt;\n  }\n  ResourceLimit& openConnections() override { return open_connections_; }\n  envoy::config::core::v3::TrafficDirection direction() const override {\n    return envoy::config::core::v3::UNSPECIFIED;\n  }\n  Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; }\n  const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n    return empty_access_logs_;\n  }\n  uint32_t tcpBacklogSize() const override { return ENVOY_TCP_BACKLOG_SIZE; }\n  Init::Manager& initManager() override { return *init_manager_; }\n\n  // Network::FilterChainManager\n  const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override {\n    return filter_chain_.get();\n  }\n\n  void connect(bool read = true) {\n    int expected_callbacks = 2;\n    auto maybeExitDispatcher = [&]() -> void {\n      expected_callbacks--;\n      if (expected_callbacks == 0) {\n        dispatcher_->exit();\n      }\n    };\n\n    EXPECT_CALL(factory_, createListenerFilterChain(_))\n        .WillOnce(Invoke([&](Network::ListenerFilterManager& filter_manager) -> bool {\n          filter_manager.addAcceptFilter(\n              nullptr,\n              std::make_unique<ListenerFilters::ProxyProtocol::Filter>(\n                  std::make_shared<ListenerFilters::ProxyProtocol::Config>(\n                      listenerScope(),\n                      envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol())));\n          maybeExitDispatcher();\n          return true;\n        }));\n    conn_->connect();\n    if (read) {\n      read_filter_ = std::make_shared<NiceMock<Network::MockReadFilter>>();\n      EXPECT_CALL(factory_, createNetworkFilterChain(_, _))\n          .WillOnce(Invoke([&](Network::Connection& connection,\n                               const std::vector<Network::FilterFactoryCb>&) -> bool {\n            server_connection_ = &connection;\n            connection.addConnectionCallbacks(server_callbacks_);\n            connection.addReadFilter(read_filter_);\n            return true;\n          }));\n    }\n    EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { maybeExitDispatcher(); }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void write(const uint8_t* s, ssize_t l) {\n    Buffer::OwnedImpl buf(s, l);\n    conn_->write(buf, false);\n  }\n\n  void write(const std::string& s) {\n    Buffer::OwnedImpl buf(s);\n    conn_->write(buf, false);\n  }\n\n  void expectData(std::string expected) {\n    EXPECT_CALL(*read_filter_, onNewConnection());\n    EXPECT_CALL(*read_filter_, onData(_, _))\n        .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> Network::FilterStatus {\n          EXPECT_EQ(buffer.toString(), expected);\n          buffer.drain(expected.length());\n          dispatcher_->exit();\n          return Network::FilterStatus::Continue;\n        }));\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void disconnect() {\n    EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n    EXPECT_CALL(server_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n    conn_->close(Network::ConnectionCloseType::NoFlush);\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  std::shared_ptr<Network::TcpListenSocket> socket_;\n  Network::MockListenSocketFactory socket_factory_;\n  Network::NopConnectionBalancerImpl connection_balancer_;\n  Network::ConnectionHandlerPtr connection_handler_;\n  Network::MockFilterChainFactory factory_;\n  Network::ClientConnectionPtr conn_;\n  NiceMock<Network::MockConnectionCallbacks> connection_callbacks_;\n  BasicResourceLimitImpl open_connections_;\n  Network::Connection* server_connection_;\n  Network::MockConnectionCallbacks server_callbacks_;\n  std::shared_ptr<Network::MockReadFilter> read_filter_;\n  std::string name_;\n  const Network::FilterChainSharedPtr filter_chain_;\n  const std::vector<AccessLog::InstanceSharedPtr> empty_access_logs_;\n  std::unique_ptr<Init::Manager> init_manager_;\n};\n\n// Parameterize the listener socket address version.\nINSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtocolRegressionTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ProxyProtocolRegressionTest, V1Basic) {\n  std::string source_addr;\n  Buffer::OwnedImpl buff{};\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    source_addr = \"202.168.0.13\";\n    generateV1Header(source_addr, \"174.2.2.222\", 52000, 80, Network::Address::IpVersion::v4, buff);\n  } else {\n    source_addr = \"1:2:3::4\";\n    generateV1Header(source_addr, \"5:6::7:8\", 62000, 8000, Network::Address::IpVersion::v6, buff);\n  }\n  connect();\n\n  write(buff.toString() + \"more data\");\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), source_addr);\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolRegressionTest, V2Basic) {\n  std::string source_addr;\n  Buffer::OwnedImpl buff{};\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    source_addr = \"202.168.0.13\";\n    generateV2Header(source_addr, \"174.2.2.222\", 52000, 80, Network::Address::IpVersion::v4, buff);\n  } else {\n    source_addr = \"1:2:3::4\";\n    generateV2Header(source_addr, \"5:6::7:8\", 62000, 8000, Network::Address::IpVersion::v6, buff);\n  }\n  connect();\n\n  write(buff.toString() + \"more data\");\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), source_addr);\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolRegressionTest, V2LocalConnection) {\n  Buffer::OwnedImpl buff{};\n  generateV2LocalHeader(buff);\n  connect();\n\n  write(buff.toString() + \"more data\");\n\n  expectData(\"more data\");\n\n  if (GetParam() == Envoy::Network::Address::IpVersion::v4) {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"127.0.0.1\");\n  } else {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"::1\");\n  }\n  EXPECT_FALSE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\n} // namespace\n} // namespace ProxyProtocol\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_platform_dep\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"mocks_lib\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/extensions/common/redis:cluster_refresh_manager_interface\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cluster_refresh_manager_test\",\n    srcs = [\"cluster_refresh_manager_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    flaky = True,\n    deps = [\n        \"//source/common/common:lock_guard_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/extensions/common/redis:cluster_refresh_manager_lib\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/extensions/filters/network/redis_proxy:redis_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ] + envoy_cc_platform_dep(\"//source/exe:platform_impl_lib\"),\n)\n"
  },
  {
    "path": "test/extensions/common/redis/cluster_refresh_manager_test.cc",
    "content": "#include <atomic>\n#include <chrono>\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n\n#include \"exe/platform_impl.h\"\n\n#include \"extensions/common/redis/cluster_refresh_manager_impl.h\"\n\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/redis_proxy/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Redis {\n\n// TODO: rewrite the tests to fix the flaky test\nclass ClusterRefreshManagerTest : public testing::Test {\npublic:\n  ClusterRefreshManagerTest()\n      : cluster_name_(\"fake_cluster\"), refresh_manager_(std::make_shared<ClusterRefreshManagerImpl>(\n                                           dispatcher_, cm_, time_system_)) {\n    time_system_.setMonotonicTime(std::chrono::seconds(1));\n    map_.emplace(\"fake_cluster\", mock_cluster_);\n    ON_CALL(cm_, clusters()).WillByDefault(Return(map_));\n  }\n  ~ClusterRefreshManagerTest() override = default;\n\n  // Advance simulation time by increment milliseconds, waiting on nthreads other threads at each\n  // point, before continuing. This must be called only by a single thread.\n  void advanceTime(MonotonicTime&& end_time, uint32_t nthreads = 0,\n                   std::chrono::milliseconds&& increment = std::chrono::milliseconds(1000)) {\n    if (nthreads == 0) {\n      // This is a special case. Ignore increment and set the time to end_time.\n      time_system_.setMonotonicTime(end_time);\n    } else {\n      MonotonicTime current_time = time_system_.monotonicTime();\n      while (current_time < end_time) {\n        {\n          Thread::LockGuard lg(time_mutex_);\n          // Wait for all waiting threads to arrive. Wait on a separate condition variable that is\n          // signaled by each waiting thread.\n          while (nthreads_waiting_ < nthreads) {\n            setter_wait_cv_.wait(time_mutex_);\n          }\n          current_time += increment;\n          if (current_time > end_time) {\n            // Ensure that end_time is not overshot.\n            current_time = end_time;\n          }\n          time_system_.setMonotonicTime(current_time);\n          wait_cv_.notifyAll();\n        }\n        // Wait for the waiting threads to all reach this \"exit\" gate. This ensures that all threads\n        // properly enter and exit the time-advancing loop without getting ahead or behind.\n        while (nthreads_going_ < nthreads) {\n          std::this_thread::yield();\n        }\n        // Release the gate for waiting threads.\n        nthreads_going_ = 0;\n      }\n    }\n  }\n\n  // Wait until simulation time reaches end_time.\n  void waitForTime(MonotonicTime&& end_time) {\n    while (time_system_.monotonicTime() < end_time) {\n      {\n        Thread::LockGuard lg(time_mutex_);\n        // Only notify the time-advancing thread that we're about to wait with time_mutex_ locked.\n        // This ensures that this thread is properly waiting before the time-advancing thread gets\n        // to notify this thread that time has been advanced. Otherwise, this thread might miss\n        // the notification.\n        nthreads_waiting_++;\n        setter_wait_cv_.notifyOne();\n        wait_cv_.wait(time_mutex_);\n        nthreads_waiting_--;\n      }\n      nthreads_going_++;\n      // Wait at this gate until the time setting threads releases it.\n      while (nthreads_going_ > 0) {\n        std::this_thread::yield();\n      }\n    }\n  }\n\n  ClusterRefreshManagerImpl::ClusterInfoSharedPtr clusterInfo(const std::string& cluster_name) {\n    Thread::LockGuard lock(refresh_manager_->map_mutex_);\n    return refresh_manager_->info_map_[cluster_name];\n  }\n\n  const std::string cluster_name_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  Upstream::ClusterManager::ClusterInfoMap map_;\n  Upstream::MockClusterMockPrioritySet mock_cluster_;\n  Event::SimulatedTimeSystem time_system_;\n  std::shared_ptr<ClusterRefreshManagerImpl> refresh_manager_;\n  ClusterRefreshManager::HandlePtr handle_;\n  std::atomic<uint32_t> callback_count_{};\n  std::atomic<uint32_t> nthreads_waiting_{};\n  std::atomic<uint32_t> nthreads_going_{};\n  Thread::CondVar wait_cv_;\n  Thread::CondVar setter_wait_cv_;\n  Thread::MutexBasicLockable time_mutex_;\n  PlatformImpl platform_;\n};\n\n// This test exercises the redirection manager's basic functionality with redirect events being\n// registered via 2 threads. The manager is notified of events on valid registered clusters and\n// invalid unregistered cluster names.\nTEST_F(ClusterRefreshManagerTest, Basic) {\n  handle_ = refresh_manager_->registerCluster(cluster_name_, std::chrono::milliseconds(1000), 1, 1,\n                                              1, [&]() { callback_count_++; });\n  ClusterRefreshManagerImpl::ClusterInfoSharedPtr cluster_info = clusterInfo(cluster_name_);\n\n  Thread::ThreadPtr thread_1 = platform_.threadFactory().createThread([&]() {\n    waitForTime(MonotonicTime(std::chrono::seconds(1)));\n    EXPECT_TRUE(refresh_manager_->onRedirection(cluster_name_));\n    // wait for 3 ensures that thread_1's first onRedirection is completed,\n    // as wait for 2 would only ensure onRedirection was started\n    waitForTime(MonotonicTime(std::chrono::seconds(3)));\n    refresh_manager_->onRedirection(cluster_name_);\n  });\n  Thread::ThreadPtr thread_2 = platform_.threadFactory().createThread([&]() {\n    // wait for 3 ensures that thread_1's first onRedirection is completed,\n    // as wait for 2 would only ensure onRedirection was started\n    waitForTime(MonotonicTime(std::chrono::seconds(3)));\n    refresh_manager_->onRedirection(cluster_name_);\n  });\n\n  advanceTime(MonotonicTime(std::chrono::seconds(3)), 2);\n  thread_1->join();\n  thread_2->join();\n\n  EXPECT_GE(callback_count_, 2);\n  EXPECT_EQ(cluster_info->redirects_count_, 0);\n  EXPECT_EQ(cluster_info->last_callback_time_ms_.load(), 3000);\n  EXPECT_EQ(cluster_info->min_time_between_triggering_, std::chrono::milliseconds(1000));\n  EXPECT_EQ(cluster_info->redirects_threshold_, 1);\n  EXPECT_EQ(cluster_info->failure_threshold_, 1);\n  EXPECT_EQ(cluster_info->host_degraded_threshold_, 1);\n\n  callback_count_ = 0;\n  advanceTime(MonotonicTime(std::chrono::seconds(5)));\n  EXPECT_FALSE(refresh_manager_->onRedirection(\"unregistered_cluster_name\"));\n  EXPECT_EQ(callback_count_, 0);\n\n  handle_.reset();\n  EXPECT_FALSE(refresh_manager_->onRedirection(cluster_name_));\n}\n\n// This test exercises the redirection manager's basic functionality with failure events being\n// registered via 2 threads. The manager is notified of events on valid registered clusters and\n// invalid unregistered cluster names.\nTEST_F(ClusterRefreshManagerTest, BasicFailureEvents) {\n  handle_ = refresh_manager_->registerCluster(cluster_name_, std::chrono::milliseconds(1000), 1, 1,\n                                              1, [&]() { callback_count_++; });\n  ClusterRefreshManagerImpl::ClusterInfoSharedPtr cluster_info = clusterInfo(cluster_name_);\n\n  Thread::ThreadPtr thread_1 = platform_.threadFactory().createThread([&]() {\n    waitForTime(MonotonicTime(std::chrono::seconds(1)));\n    EXPECT_TRUE(refresh_manager_->onFailure(cluster_name_));\n    // wait for 3 ensures that thread_1's first onRedirection is completed,\n    // as wait for 2 would only ensure onRedirection was started\n    waitForTime(MonotonicTime(std::chrono::seconds(3)));\n    refresh_manager_->onFailure(cluster_name_);\n  });\n  Thread::ThreadPtr thread_2 = platform_.threadFactory().createThread([&]() {\n    // wait for 3 ensures that thread_1's first onRedirection is completed,\n    // as wait for 2 would only ensure onRedirection was started\n    waitForTime(MonotonicTime(std::chrono::seconds(3)));\n    refresh_manager_->onFailure(cluster_name_);\n  });\n\n  advanceTime(MonotonicTime(std::chrono::seconds(3)), 2);\n  thread_1->join();\n  thread_2->join();\n\n  EXPECT_GE(callback_count_, 2);\n  EXPECT_EQ(cluster_info->failures_count_, 0);\n  EXPECT_EQ(cluster_info->last_callback_time_ms_.load(), 3000);\n  EXPECT_EQ(cluster_info->min_time_between_triggering_, std::chrono::milliseconds(1000));\n  EXPECT_EQ(cluster_info->redirects_threshold_, 1);\n  EXPECT_EQ(cluster_info->failure_threshold_, 1);\n  EXPECT_EQ(cluster_info->host_degraded_threshold_, 1);\n\n  callback_count_ = 0;\n  advanceTime(MonotonicTime(std::chrono::seconds(5)));\n  EXPECT_FALSE(refresh_manager_->onFailure(\"unregistered_cluster_name\"));\n  EXPECT_EQ(callback_count_, 0);\n\n  handle_.reset();\n  EXPECT_FALSE(refresh_manager_->onFailure(cluster_name_));\n}\n\n// This test exercises the redirection manager's basic functionality with degraded events being\n// registered via 2 threads. The manager is notified of events on valid registered clusters and\n// invalid unregistered cluster names.\nTEST_F(ClusterRefreshManagerTest, BasicDegradedEvents) {\n  handle_ = refresh_manager_->registerCluster(cluster_name_, std::chrono::milliseconds(1000), 1, 1,\n                                              1, [&]() { callback_count_++; });\n  ClusterRefreshManagerImpl::ClusterInfoSharedPtr cluster_info = clusterInfo(cluster_name_);\n\n  Thread::ThreadPtr thread_1 = platform_.threadFactory().createThread([&]() {\n    waitForTime(MonotonicTime(std::chrono::seconds(1)));\n    EXPECT_TRUE(refresh_manager_->onHostDegraded(cluster_name_));\n    // wait for 3 ensures that thread_1's first onRedirection is completed,\n    // as wait for 2 would only ensure onRedirection was started\n    waitForTime(MonotonicTime(std::chrono::seconds(3)));\n    refresh_manager_->onHostDegraded(cluster_name_);\n  });\n  Thread::ThreadPtr thread_2 = platform_.threadFactory().createThread([&]() {\n    // wait for 3 ensures that thread_1's first onRedirection is completed,\n    // as wait for 2 would only ensure onRedirection was started\n    waitForTime(MonotonicTime(std::chrono::seconds(3)));\n    refresh_manager_->onHostDegraded(cluster_name_);\n  });\n\n  advanceTime(MonotonicTime(std::chrono::seconds(3)), 2);\n  thread_1->join();\n  thread_2->join();\n\n  EXPECT_GE(callback_count_, 2);\n  EXPECT_EQ(cluster_info->host_degraded_count_, 0);\n  EXPECT_EQ(cluster_info->last_callback_time_ms_.load(), 3000);\n  EXPECT_EQ(cluster_info->min_time_between_triggering_, std::chrono::milliseconds(1000));\n  EXPECT_EQ(cluster_info->redirects_threshold_, 1);\n  EXPECT_EQ(cluster_info->failure_threshold_, 1);\n  EXPECT_EQ(cluster_info->host_degraded_threshold_, 1);\n\n  callback_count_ = 0;\n  advanceTime(MonotonicTime(std::chrono::seconds(5)));\n  EXPECT_FALSE(refresh_manager_->onHostDegraded(\"unregistered_cluster_name\"));\n  EXPECT_EQ(callback_count_, 0);\n\n  handle_.reset();\n  EXPECT_FALSE(refresh_manager_->onHostDegraded(cluster_name_));\n}\n\n// This test records a high number of events for a cluster using 2 threads. Simulated time\n// is advanced without thread synchronization for up to 2 seconds during the threads' activity\n// to simulate possible thread timing issues.\nTEST_F(ClusterRefreshManagerTest, HighVolume) {\n  handle_ = refresh_manager_->registerCluster(cluster_name_, std::chrono::seconds(2), 1000, 1000,\n                                              1000, [&]() { callback_count_++; });\n  ClusterRefreshManagerImpl::ClusterInfoSharedPtr cluster_info = clusterInfo(cluster_name_);\n  uint32_t thread1_callback_count = 0;\n  uint32_t thread2_callback_count = 0;\n\n  Thread::ThreadPtr thread_1 = platform_.threadFactory().createThread([&]() {\n    for (uint32_t i = 1; i < 61; i += 2) {\n      waitForTime(MonotonicTime(std::chrono::seconds(i)));\n      for (uint32_t j = 0; j < 2000; j++) {\n        if (refresh_manager_->onRedirection(cluster_name_) ||\n            refresh_manager_->onFailure(cluster_name_) ||\n            refresh_manager_->onHostDegraded(cluster_name_)) {\n          thread1_callback_count++;\n        }\n      }\n    }\n  });\n  Thread::ThreadPtr thread_2 = platform_.threadFactory().createThread([&]() {\n    for (uint32_t i = 1; i < 61; i += 2) {\n      waitForTime(MonotonicTime(std::chrono::seconds(i)));\n      for (uint32_t j = 0; j < 2000; j++) {\n        if (refresh_manager_->onRedirection(cluster_name_) ||\n            refresh_manager_->onFailure(cluster_name_) ||\n            refresh_manager_->onHostDegraded(cluster_name_)) {\n          thread2_callback_count++;\n        }\n      }\n    }\n  });\n\n  // Synchronize all threads every 2 seconds of simulated time.\n  for (uint32_t i = 1; i < 61; i += 2) {\n    advanceTime(MonotonicTime(std::chrono::seconds(i)), 2, std::chrono::seconds(1));\n  }\n  thread_1->join();\n  thread_2->join();\n\n  EXPECT_EQ(callback_count_, thread1_callback_count + thread2_callback_count);\n  EXPECT_EQ(callback_count_, 30);\n}\n\n// This test exercises the redirection manager's basic functionality with redirect/failure/host\n// degraded events are disabled by setting the threshold to 0\nTEST_F(ClusterRefreshManagerTest, FeatureDisabled) {\n  handle_ = refresh_manager_->registerCluster(cluster_name_, std::chrono::milliseconds(1000), 0, 0,\n                                              0, [&]() { callback_count_++; });\n  ClusterRefreshManagerImpl::ClusterInfoSharedPtr cluster_info = clusterInfo(cluster_name_);\n\n  EXPECT_FALSE(refresh_manager_->onRedirection(cluster_name_));\n  EXPECT_FALSE(refresh_manager_->onFailure(cluster_name_));\n  EXPECT_FALSE(refresh_manager_->onHostDegraded(cluster_name_));\n\n  EXPECT_GE(callback_count_, 0);\n  EXPECT_EQ(cluster_info->redirects_count_, 0);\n  EXPECT_EQ(cluster_info->failures_count_, 0);\n  EXPECT_EQ(cluster_info->host_degraded_count_, 0);\n  EXPECT_EQ(cluster_info->last_callback_time_ms_.load(), 0);\n  EXPECT_EQ(cluster_info->min_time_between_triggering_, std::chrono::milliseconds(1000));\n  EXPECT_EQ(cluster_info->redirects_threshold_, 0);\n  EXPECT_EQ(cluster_info->failure_threshold_, 0);\n  EXPECT_EQ(cluster_info->host_degraded_threshold_, 0);\n}\n\n} // namespace Redis\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/redis/mocks.cc",
    "content": "#include \"mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Redis {\n\nMockClusterRefreshManager::MockClusterRefreshManager() = default;\nMockClusterRefreshManager::~MockClusterRefreshManager() = default;\n\n} // namespace Redis\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/redis/mocks.h",
    "content": "#pragma once\n\n#include \"extensions/common/redis/cluster_refresh_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Redis {\n\nclass MockClusterRefreshManager : public ClusterRefreshManager {\npublic:\n  MockClusterRefreshManager();\n  ~MockClusterRefreshManager() override;\n\n  MOCK_METHOD(bool, onRedirection, (const std::string& cluster_name));\n  MOCK_METHOD(bool, onFailure, (const std::string& cluster_name));\n  MOCK_METHOD(bool, onHostDegraded, (const std::string& cluster_name));\n  MOCK_METHOD(HandlePtr, registerCluster,\n              (const std::string& cluster_name,\n               std::chrono::milliseconds min_time_between_triggering,\n               const uint32_t redirects_threshold, const uint32_t failure_threshold,\n               const uint32_t host_degraded_threshold, const RefreshCB& cb));\n};\n\n} // namespace Redis\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/sqlutils/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"sqlutils_tests\",\n    srcs = [\n        \"sqlutils_test.cc\",\n    ],\n    external_deps = [\"sqlparser\"],\n    deps = [\n        \"//source/extensions/common/sqlutils:sqlutils_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/sqlutils/sqlutils_test.cc",
    "content": "#include \"extensions/common/sqlutils/sqlutils.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace SQLUtils {\n\n// MetadataFromSQLTest class is used for parameterized tests.\n// The values in the tests are:\n// std::string - SQL query\n// bool - whether to expect SQL parsing to be successful\n// std::map<std::string, std::list<std::string>> map of expected tables accessed based on the query.\n// The map is checked only when parsing was successful. Map is indexed by table name and points to\n// list of operations performed on the table. For example table1: \"select\", \"insert\" says that there\n// was SELECT and INSERT operations on table1.\n// DecoderAttributes is a map containing additional attributes which augment creating metadata.\nclass MetadataFromSQLTest\n    : public ::testing::TestWithParam<\n          std::tuple<std::string, bool, std::map<std::string, std::list<std::string>>,\n                     SQLUtils::DecoderAttributes>> {};\n\n// Test takes SQL query as a parameter and checks if the parsing\n// produces the correct metadata.\n// Metadata is 2-level structure. First layer is list of resources\n// over which the SQL query operates: in our case is list of tables.\n// Under each table there is secondary list which contains operations performed\n// on the table, like \"select\", \"insert\", etc.\nTEST_P(MetadataFromSQLTest, ParsingAndMetadataTest) {\n  // Get the SQL query\n  const std::string& query = std::get<0>(GetParam());\n  // vector of queries to check.\n  std::vector<std::string> test_queries;\n  test_queries.push_back(query);\n\n  // Create uppercase and lowercase versions of the queries and put\n  // them into vector of queries to check\n  test_queries.push_back(absl::AsciiStrToLower(query));\n  test_queries.push_back(absl::AsciiStrToUpper(query));\n\n  while (!test_queries.empty()) {\n    std::string test_query = test_queries.back();\n    ProtobufWkt::Struct metadata;\n\n    // Check if the parsing result is what expected.\n    ASSERT_EQ(std::get<1>(GetParam()),\n              SQLUtils::setMetadata(test_query, std::get<3>(GetParam()), metadata));\n\n    // If parsing was expected to fail do not check parsing values.\n    if (!std::get<1>(GetParam())) {\n      return;\n    }\n\n    // Access metadata fields, where parsing results are stored.\n    auto& fields = *metadata.mutable_fields();\n\n    // Get the names of resources which SQL query operates on.\n    std::map<std::string, std::list<std::string>> expected_tables = std::get<2>(GetParam());\n    // Check if query results return the same number of resources as expected.\n    ASSERT_EQ(expected_tables.size(), fields.size());\n    for (const auto& i : fields) {\n      // Get from created metadata the list of operations on the resource\n      const auto& operations = i;\n      std::string table_name = operations.first;\n\n      std::transform(table_name.begin(), table_name.end(), table_name.begin(),\n                     [](unsigned char c) { return std::tolower(c); });\n      // Get the list of expected operations on the same resource from test param.\n      const auto& table_name_it = expected_tables.find(table_name);\n      // Make sure that a resource (table) found in metadata is expected.\n      ASSERT_NE(expected_tables.end(), table_name_it);\n      auto& operations_list = table_name_it->second;\n      // The number of expected operations and created in metadata must be the same.\n      ASSERT_EQ(operations_list.size(), operations.second.list_value().values().size());\n      // Now iterate over the operations list found in metadata and check if the same operation\n      // is listed as expected in test param.\n      for (const auto& j : operations.second.list_value().values()) {\n        // Find that operation in test params.\n        const auto operation_it =\n            std::find(operations_list.begin(), operations_list.end(), j.string_value());\n        ASSERT_NE(operations_list.end(), operation_it);\n        // Erase the operation. At the end of the test this list should be empty what means\n        // that we found all expected operations.\n        operations_list.erase(operation_it);\n      }\n      // Make sure that we went through all expected operations.\n      ASSERT_TRUE(operations_list.empty());\n      // Remove the table from the list. At the end of the test this list must be empty.\n      expected_tables.erase(table_name_it);\n    }\n\n    ASSERT_TRUE(expected_tables.empty());\n    test_queries.pop_back();\n  }\n}\n\n// Note: This parameterized test's queries are converted to all lowercase and all uppercase\n// to validate that parser is case-insensitive. The test routine converts to uppercase and\n// lowercase entire query string, not only SQL keywords. This introduces a problem when comparing\n// tables' names when verifying parsing result. Therefore the test converts table names to lowercase\n// before comparing. It however requires that all table names in the queries below use lowercase\n// only.\n#define TEST_VALUE(...)                                                                            \\\n  std::tuple<std::string, bool, std::map<std::string, std::list<std::string>>,                     \\\n             SQLUtils::DecoderAttributes> {                                                        \\\n    __VA_ARGS__                                                                                    \\\n  }\nINSTANTIATE_TEST_SUITE_P(\n    SQLUtilsTestSuite, MetadataFromSQLTest,\n    ::testing::Values(\n        TEST_VALUE(\"blahblah;\", false, {}, {}),\n\n        TEST_VALUE(\"CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT);\", true,\n                   {{\"table1\", {\"create\"}}}, {}),\n        TEST_VALUE(\"CREATE TABLE IF NOT EXISTS `table number 1`(Usr VARCHAR(40),Count INT);\", true,\n                   {{\"table number 1.testdb\", {\"create\"}}}, {{\"database\", \"testdb\"}}),\n        TEST_VALUE(\n            \"CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table1;\",\n            true, {{\"table1\", {\"select\", \"create\"}}}, {}),\n        TEST_VALUE(\n            \"CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table2;\",\n            true, {{\"table1\", {\"create\"}}, {\"table2\", {\"select\"}}}, {{\"user\", \"testusr\"}}),\n\n        TEST_VALUE(\"CREATE TABLE table1(Usr VARCHAR(40),Count INT);\", true,\n                   {{\"table1\", {\"create\"}}}, {}),\n        TEST_VALUE(\"CREATE TABLE;\", false, {}, {}),\n        TEST_VALUE(\"CREATE TEMPORARY table table1(Usr VARCHAR(40),Count INT);\", true,\n                   {{\"table1\", {\"create\"}}}, {}),\n        TEST_VALUE(\"DROP TABLE IF EXISTS table1\", true, {{\"table1\", {\"drop\"}}}, {}),\n        TEST_VALUE(\"ALTER TABLE table1 add column Id varchar (20);\", true, {{\"table1\", {\"alter\"}}},\n                   {}),\n        TEST_VALUE(\"INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);\", true,\n                   {{\"table1\", {\"insert\"}}}, {}),\n        TEST_VALUE(\"INSERT LOW_PRIORITY INTO table1 (Usr, Count) VALUES ('allsp2', 3);\", true,\n                   {{\"table1\", {\"insert\"}}}, {}),\n        TEST_VALUE(\"INSERT IGNORE INTO table1 (Usr, Count) VALUES ('allsp2', 3);\", true,\n                   {{\"table1\", {\"insert\"}}}, {}),\n        TEST_VALUE(\"INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);SELECT * from table1\",\n                   true, {{\"table1\", {\"insert\", \"select\"}}}, {}),\n        TEST_VALUE(\"DELETE FROM table1 WHERE Count > 3;\", true, {{\"table1\", {\"delete\"}}}, {}),\n        TEST_VALUE(\"DELETE LOW_PRIORITY FROM table1 WHERE Count > 3;\", true,\n                   {{\"table1\", {\"delete\"}}}, {}),\n        TEST_VALUE(\"DELETE QUICK FROM table1 WHERE Count > 3;\", true, {{\"table1\", {\"delete\"}}}, {}),\n        TEST_VALUE(\"DELETE IGNORE FROM table1 WHERE Count > 3;\", true, {{\"table1\", {\"delete\"}}},\n                   {}),\n\n        TEST_VALUE(\"SELECT * FROM table1 WHERE Count = 1;\", true, {{\"table1\", {\"select\"}}}, {}),\n        TEST_VALUE(\"SELECT * FROM table1 WHERE Count = 1;\", true, {{\"table1\", {\"select\"}}}, {}),\n        TEST_VALUE(\"SELECT product.category FROM table1 WHERE Count = 1;\", true,\n                   {{\"table1\", {\"select\"}}, {\"product\", {\"unknown\"}}}, {}),\n        TEST_VALUE(\"SELECT DISTINCT Usr FROM table1;\", true, {{\"table1\", {\"select\"}}}, {}),\n        TEST_VALUE(\"SELECT Usr, Count FROM table1 ORDER BY Count DESC;\", true,\n                   {{\"table1.testdb\", {\"select\"}}}, {{\"user\", \"testuser\"}, {\"database\", \"testdb\"}}),\n        TEST_VALUE(\"SELECT 12 AS a, a FROM table1 GROUP BY a;\", true, {{\"table1\", {\"select\"}}}, {}),\n        TEST_VALUE(\"SELECT;\", false, {}, {}), TEST_VALUE(\"SELECT Usr, Count FROM;\", false, {}, {}),\n        TEST_VALUE(\"INSERT INTO table1 SELECT * FROM table2;\", true,\n                   {{\"table1\", {\"insert\"}}, {\"table2\", {\"select\"}}}, {}),\n        TEST_VALUE(\"INSERT INTO table1 SELECT tbl_temp1.fld_order_id FROM table2;\", true,\n                   {{\"tbl_temp1\", {\"unknown\"}}, {\"table2\", {\"select\"}}, {\"table1\", {\"insert\"}}},\n                   {}),\n        TEST_VALUE(\"UPDATE table1 SET col1 = col1 + 1\", true, {{\"table1\", {\"update\"}}}, {}),\n        TEST_VALUE(\"UPDATE LOW_PRIORITY table1 SET col1 = col1 + 1\", true, {{\"table1\", {\"update\"}}},\n                   {}),\n        TEST_VALUE(\"UPDATE IGNORE table1 SET col1 = col1 + 1\", true, {{\"table1\", {\"update\"}}}, {}),\n        TEST_VALUE(\"UPDATE table1 SET  column1=(SELECT * columnX from table2);\", true,\n                   {{\"table1\", {\"update\"}}, {\"table2\", {\"select\"}}}, {}),\n\n        // operations on database should not create any metadata\n        TEST_VALUE(\"CREATE DATABASE testdb;\", true, {}, {}),\n        TEST_VALUE(\"CREATE DATABASE IF NOT EXISTS testdb;\", true, {}, {}),\n        TEST_VALUE(\"ALTER DATABASE testdb CHARACTER SET charset_name;\", true, {}, {}),\n        TEST_VALUE(\"ALTER DATABASE testdb default CHARACTER SET charset_name;\", true, {}, {}),\n        TEST_VALUE(\"ALTER DATABASE testdb default CHARACTER SET = charset_name;\", true, {}, {}),\n        TEST_VALUE(\"ALTER SCHEMA testdb default CHARACTER SET = charset_name;\", true, {}, {}),\n\n        // The following DROP DATABASE tests should not produce metadata.\n        TEST_VALUE(\"DROP DATABASE testdb;\", true, {}, {}),\n        TEST_VALUE(\"DROP DATABASE IF EXISTS testdb;\", true, {}, {}),\n\n        // Schema. Should be parsed fine, but should not produce any metadata\n        TEST_VALUE(\"SHOW databases;\", true, {}, {}), TEST_VALUE(\"SHOW tables;\", true, {}, {}),\n        TEST_VALUE(\"SELECT * FROM;\", false, {}, {}),\n        TEST_VALUE(\"SELECT 1 FROM tabletest1;\", true, {{\"tabletest1\", {\"select\"}}}, {})\n\n            ));\n\n} // namespace SQLUtils\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/tap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"common\",\n    srcs = [\"common.cc\"],\n    hdrs = [\"common.h\"],\n    deps = [\n        \"//source/extensions/common/tap:tap_interface\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"admin_test\",\n    srcs = [\"admin_test.cc\"],\n    deps = [\n        \"//source/extensions/common/tap:admin\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:admin_stream_mocks\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"tap_config_base_test\",\n    srcs = [\"tap_config_base_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/common/tap:tap_config_base\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/tap/admin_test.cc",
    "content": "#include \"envoy/config/tap/v3/common.pb.h\"\n\n#include \"extensions/common/tap/admin.h\"\n\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/admin_stream.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\nnamespace {\n\nusing ::testing::_;\nusing ::testing::DoAll;\nusing ::testing::Return;\nusing ::testing::SaveArg;\n\nclass MockExtensionConfig : public ExtensionConfig {\npublic:\n  MOCK_METHOD(const absl::string_view, adminId, ());\n  MOCK_METHOD(void, clearTapConfig, ());\n  MOCK_METHOD(void, newTapConfig,\n              (envoy::config::tap::v3::TapConfig && proto_config, Sink* admin_streamer));\n};\n\nclass AdminHandlerTest : public testing::Test {\npublic:\n  AdminHandlerTest() {\n    EXPECT_CALL(admin_, addHandler(\"/tap\", \"tap filter control\", _, true, true))\n        .WillOnce(DoAll(SaveArg<2>(&cb_), Return(true)));\n    handler_ = std::make_unique<AdminHandler>(admin_, main_thread_dispatcher_);\n  }\n\n  ~AdminHandlerTest() override {\n    EXPECT_CALL(admin_, removeHandler(\"/tap\")).WillOnce(Return(true));\n  }\n\n  Server::MockAdmin admin_;\n  Event::MockDispatcher main_thread_dispatcher_{\"test_main_thread\"};\n  std::unique_ptr<AdminHandler> handler_;\n  Server::Admin::HandlerCb cb_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Buffer::OwnedImpl response_;\n  Server::MockAdminStream admin_stream_;\n\n  const std::string admin_request_yaml_ =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    any_match: true\n  output_config:\n    sinks:\n      - streaming_admin: {}\n)EOF\";\n};\n\n// Request with no config body.\nTEST_F(AdminHandlerTest, NoBody) {\n  EXPECT_CALL(admin_stream_, getRequestBody());\n  EXPECT_EQ(Http::Code::BadRequest, cb_(\"/tap\", response_headers_, response_, admin_stream_));\n  EXPECT_EQ(\"/tap requires a JSON/YAML body\", response_.toString());\n}\n\n// Request with a config body that doesn't parse/verify.\nTEST_F(AdminHandlerTest, BadBody) {\n  Buffer::OwnedImpl bad_body(\"hello\");\n  EXPECT_CALL(admin_stream_, getRequestBody()).WillRepeatedly(Return(&bad_body));\n  EXPECT_EQ(Http::Code::BadRequest, cb_(\"/tap\", response_headers_, response_, admin_stream_));\n  EXPECT_EQ(\"Unable to convert YAML as JSON: hello\", response_.toString());\n}\n\n// Request that references an unknown config ID.\nTEST_F(AdminHandlerTest, UnknownConfigId) {\n  Buffer::OwnedImpl body(admin_request_yaml_);\n  EXPECT_CALL(admin_stream_, getRequestBody()).WillRepeatedly(Return(&body));\n  EXPECT_EQ(Http::Code::BadRequest, cb_(\"/tap\", response_headers_, response_, admin_stream_));\n  EXPECT_EQ(\"Unknown config id 'test_config_id'. No extension has registered with this id.\",\n            response_.toString());\n}\n\n// Request while there is already an active tap session.\nTEST_F(AdminHandlerTest, RequestTapWhileAttached) {\n  MockExtensionConfig extension_config;\n  handler_->registerConfig(extension_config, \"test_config_id\");\n\n  Buffer::OwnedImpl body(admin_request_yaml_);\n  EXPECT_CALL(admin_stream_, getRequestBody()).WillRepeatedly(Return(&body));\n  EXPECT_CALL(extension_config, newTapConfig(_, handler_.get()));\n  EXPECT_CALL(admin_stream_, setEndStreamOnComplete(false));\n  EXPECT_CALL(admin_stream_, addOnDestroyCallback(_));\n  EXPECT_EQ(Http::Code::OK, cb_(\"/tap\", response_headers_, response_, admin_stream_));\n\n  EXPECT_EQ(Http::Code::BadRequest, cb_(\"/tap\", response_headers_, response_, admin_stream_));\n  EXPECT_EQ(\"An attached /tap admin stream already exists. Detach it.\", response_.toString());\n}\n\n} // namespace\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/tap/common.cc",
    "content": "#include \"test/extensions/common/tap/common.h\"\n\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n\nnamespace envoy {\nnamespace data {\nnamespace tap {\nnamespace v3 {\n\nstd::ostream& operator<<(std::ostream& os, const TraceWrapper& trace) {\n  return os << Envoy::MessageUtil::getJsonStringFromMessage(trace, true, false);\n}\n\n} // namespace v3\n} // namespace tap\n} // namespace data\n} // namespace envoy\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\n\nMockPerTapSinkHandleManager::MockPerTapSinkHandleManager() = default;\nMockPerTapSinkHandleManager::~MockPerTapSinkHandleManager() = default;\n\nMockMatcher::~MockMatcher() = default;\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/tap/common.h",
    "content": "#include \"envoy/data/tap/v3/wrapper.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/common/tap/tap.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace envoy {\nnamespace data {\nnamespace tap {\nnamespace v3 {\n\n// TODO(mattklein123): AFAICT gtest has built in printing for proto messages but it doesn't seem\n// to work unless this is here.\nstd::ostream& operator<<(std::ostream& os, const TraceWrapper& trace);\n\n} // namespace v3\n} // namespace tap\n} // namespace data\n} // namespace envoy\n\nnamespace Envoy {\nnamespace Extensions {\n\n// TODO(mattklein123): Make this a common matcher called ProtoYamlEq and figure out how to\n// correctly templatize it.\nMATCHER_P(TraceEqual, rhs, \"\") {\n  envoy::data::tap::v3::TraceWrapper expected_trace;\n  TestUtility::loadFromYaml(rhs, expected_trace);\n  return TestUtility::protoEqual(expected_trace, arg);\n}\n\nnamespace Common {\nnamespace Tap {\n\nclass MockPerTapSinkHandleManager : public PerTapSinkHandleManager {\npublic:\n  MockPerTapSinkHandleManager();\n  ~MockPerTapSinkHandleManager() override;\n\n  void submitTrace(TraceWrapperPtr&& trace) override { submitTrace_(*trace); }\n\n  MOCK_METHOD(void, submitTrace_, (const envoy::data::tap::v3::TraceWrapper& trace));\n};\n\nclass MockMatcher : public Matcher {\npublic:\n  using Matcher::Matcher;\n  ~MockMatcher() override;\n\n  MOCK_METHOD(void, onNewStream, (MatchStatusVector & statuses), (const));\n  MOCK_METHOD(void, onHttpRequestHeaders,\n              (const Http::RequestHeaderMap& request_headers, MatchStatusVector& statuses),\n              (const));\n  MOCK_METHOD(void, onHttpRequestTrailers,\n              (const Http::RequestTrailerMap& request_trailers, MatchStatusVector& statuses),\n              (const));\n  MOCK_METHOD(void, onHttpResponseHeaders,\n              (const Http::ResponseHeaderMap& response_headers, MatchStatusVector& statuses),\n              (const));\n  MOCK_METHOD(void, onHttpResponseTrailers,\n              (const Http::ResponseTrailerMap& response_trailers, MatchStatusVector& statuses),\n              (const));\n  MOCK_METHOD(void, onRequestBody, (const Buffer::Instance& data, MatchStatusVector& statuses));\n  MOCK_METHOD(void, onResponseBody, (const Buffer::Instance& data, MatchStatusVector& statuses),\n              ());\n};\n\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/tap/tap_config_base_test.cc",
    "content": "#include <vector>\n\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/common/tap/tap_config_base.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Tap {\nnamespace {\n\nTEST(BodyBytesToString, All) {\n  {\n    envoy::data::tap::v3::TraceWrapper trace;\n    trace.mutable_http_streamed_trace_segment()->mutable_request_body_chunk()->set_as_bytes(\n        \"hello\");\n    Utility::bodyBytesToString(trace, envoy::config::tap::v3::OutputSink::JSON_BODY_AS_BYTES);\n    EXPECT_EQ(\"hello\", trace.http_streamed_trace_segment().request_body_chunk().as_bytes());\n  }\n\n  {\n    envoy::data::tap::v3::TraceWrapper trace;\n    trace.mutable_http_streamed_trace_segment()->mutable_request_body_chunk()->set_as_bytes(\n        \"hello\");\n    Utility::bodyBytesToString(trace, envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING);\n    EXPECT_EQ(\"hello\", trace.http_streamed_trace_segment().request_body_chunk().as_string());\n  }\n\n  {\n    envoy::data::tap::v3::TraceWrapper trace;\n    trace.mutable_http_streamed_trace_segment()->mutable_response_body_chunk()->set_as_bytes(\n        \"hello\");\n    Utility::bodyBytesToString(trace, envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING);\n    EXPECT_EQ(\"hello\", trace.http_streamed_trace_segment().response_body_chunk().as_string());\n  }\n\n  {\n    envoy::data::tap::v3::TraceWrapper trace;\n    trace.mutable_socket_streamed_trace_segment()\n        ->mutable_event()\n        ->mutable_read()\n        ->mutable_data()\n        ->set_as_bytes(\"hello\");\n    Utility::bodyBytesToString(trace, envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING);\n    EXPECT_EQ(\"hello\", trace.socket_streamed_trace_segment().event().read().data().as_string());\n  }\n\n  {\n    envoy::data::tap::v3::TraceWrapper trace;\n    trace.mutable_socket_streamed_trace_segment()\n        ->mutable_event()\n        ->mutable_write()\n        ->mutable_data()\n        ->set_as_bytes(\"hello\");\n    Utility::bodyBytesToString(trace, envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING);\n    EXPECT_EQ(\"hello\", trace.socket_streamed_trace_segment().event().write().data().as_string());\n  }\n}\n\nTEST(AddBufferToProtoBytes, All) {\n  {\n    Buffer::OwnedImpl data(\"hello\");\n    envoy::data::tap::v3::Body body;\n    Utility::addBufferToProtoBytes(body, 5, data, 4, 1);\n    EXPECT_EQ(\"o\", body.as_bytes());\n    EXPECT_FALSE(body.truncated());\n  }\n\n  {\n    Buffer::OwnedImpl data(\"hello\");\n    envoy::data::tap::v3::Body body;\n    Utility::addBufferToProtoBytes(body, 3, data, 0, 5);\n    EXPECT_EQ(\"hel\", body.as_bytes());\n    EXPECT_TRUE(body.truncated());\n  }\n\n  {\n    Buffer::OwnedImpl data(\"hello\");\n    envoy::data::tap::v3::Body body;\n    Utility::addBufferToProtoBytes(body, 100, data, 0, 5);\n    EXPECT_EQ(\"hello\", body.as_bytes());\n    EXPECT_FALSE(body.truncated());\n  }\n}\n\nTEST(TrimSlice, All) {\n  std::string slice_mem = \"static base slice memory that is long enough\";\n  void* test_base = static_cast<void*>(&slice_mem[0]);\n  {\n    std::vector<Buffer::RawSlice> slices;\n    Utility::trimSlices(slices, 0, 100);\n    EXPECT_TRUE(slices.empty());\n  }\n\n  {\n    std::vector<Buffer::RawSlice> slices = {{test_base, 5}};\n    Utility::trimSlices(slices, 0, 100);\n\n    const std::vector<Buffer::RawSlice> expected{{test_base, 5}};\n    EXPECT_EQ(expected, slices);\n  }\n\n  {\n    std::vector<Buffer::RawSlice> slices = {{test_base, 5}};\n    Utility::trimSlices(slices, 3, 3);\n\n    const std::vector<Buffer::RawSlice> expected{{static_cast<void*>(&slice_mem[3]), 2}};\n    EXPECT_EQ(expected, slices);\n  }\n\n  {\n    std::vector<Buffer::RawSlice> slices = {{test_base, 5}, {test_base, 4}};\n    Utility::trimSlices(slices, 3, 3);\n\n    const std::vector<Buffer::RawSlice> expected{{static_cast<void*>(&slice_mem[3]), 2},\n                                                 {static_cast<void*>(&slice_mem[0]), 1}};\n    EXPECT_EQ(expected, slices);\n  }\n\n  {\n    std::vector<Buffer::RawSlice> slices = {{test_base, 5}, {test_base, 4}};\n    Utility::trimSlices(slices, 6, 3);\n\n    const std::vector<Buffer::RawSlice> expected{{static_cast<void*>(&slice_mem[5]), 0},\n                                                 {static_cast<void*>(&slice_mem[1]), 3}};\n    EXPECT_EQ(expected, slices);\n  }\n\n  {\n    std::vector<Buffer::RawSlice> slices = {{test_base, 5}, {test_base, 4}};\n    Utility::trimSlices(slices, 0, 0);\n\n    const std::vector<Buffer::RawSlice> expected{{static_cast<void*>(&slice_mem[0]), 0},\n                                                 {static_cast<void*>(&slice_mem[0]), 0}};\n    EXPECT_EQ(expected, slices);\n  }\n\n  {\n    std::vector<Buffer::RawSlice> slices = {{test_base, 5}, {test_base, 4}};\n    Utility::trimSlices(slices, 0, 3);\n\n    const std::vector<Buffer::RawSlice> expected{{static_cast<void*>(&slice_mem[0]), 3},\n                                                 {static_cast<void*>(&slice_mem[0]), 0}};\n    EXPECT_EQ(expected, slices);\n  }\n\n  {\n    std::vector<Buffer::RawSlice> slices = {{test_base, 5}, {test_base, 4}};\n    Utility::trimSlices(slices, 1, 3);\n\n    const std::vector<Buffer::RawSlice> expected{{static_cast<void*>(&slice_mem[1]), 3},\n                                                 {static_cast<void*>(&slice_mem[0]), 0}};\n    EXPECT_EQ(expected, slices);\n  }\n}\n\n} // namespace\n} // namespace Tap\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/utility_test.cc",
    "content": "#include \"extensions/common/utility.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Utility {\nnamespace {\n\n// Test that deprecated names indicate warning or block depending on runtime flags.\nTEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestDeprecatedExtensionNameStatus)) {\n  // Validate that no runtime available results in warnings.\n  {\n    EXPECT_EQ(ExtensionNameUtil::Status::Warn,\n              ExtensionNameUtil::deprecatedExtensionNameStatus(nullptr));\n  }\n\n  // If deprecated feature is enabled, warn.\n  {\n    NiceMock<Runtime::MockLoader> runtime;\n\n    EXPECT_CALL(\n        runtime.snapshot_,\n        deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n        .WillRepeatedly(Return(true));\n\n    EXPECT_EQ(ExtensionNameUtil::Status::Warn,\n              ExtensionNameUtil::deprecatedExtensionNameStatus(&runtime));\n  }\n\n  // If deprecated feature is disabled, block.\n  {\n    NiceMock<Runtime::MockLoader> runtime;\n\n    EXPECT_CALL(\n        runtime.snapshot_,\n        deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n        .WillRepeatedly(Return(false));\n\n    EXPECT_EQ(ExtensionNameUtil::Status::Block,\n              ExtensionNameUtil::deprecatedExtensionNameStatus(&runtime));\n  }\n}\n\n// Test that deprecated names trigger an exception.\nTEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionNameThrows)) {\n  // Validate that no runtime available results in warnings.\n  {\n    auto test = []() {\n      ExtensionNameUtil::checkDeprecatedExtensionName(\"XXX\", \"deprecated\", \"canonical\", nullptr);\n    };\n\n    EXPECT_NO_THROW(test());\n\n    EXPECT_LOG_CONTAINS(\"warn\", \"Using deprecated XXX extension name 'deprecated' for 'canonical'.\",\n                        test());\n  }\n\n  // If deprecated feature is enabled, warn.\n  {\n    NiceMock<Runtime::MockLoader> runtime;\n\n    EXPECT_CALL(\n        runtime.snapshot_,\n        deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n        .WillRepeatedly(Return(true));\n\n    auto test = [&]() {\n      ExtensionNameUtil::checkDeprecatedExtensionName(\"XXX\", \"deprecated\", \"canonical\", &runtime);\n    };\n    EXPECT_NO_THROW(test());\n\n    EXPECT_LOG_CONTAINS(\"warn\", \"Using deprecated XXX extension name 'deprecated' for 'canonical'.\",\n                        test());\n  }\n\n  // If deprecated feature is disabled, throw.\n  {\n    NiceMock<Runtime::MockLoader> runtime;\n\n    EXPECT_CALL(\n        runtime.snapshot_,\n        deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n        .WillRepeatedly(Return(false));\n\n    EXPECT_THROW_WITH_REGEX(\n        ExtensionNameUtil::checkDeprecatedExtensionName(\"XXX\", \"deprecated\", \"canonical\", &runtime),\n        EnvoyException, \"Using deprecated XXX extension name 'deprecated' for 'canonical'.*\");\n  }\n}\n\n// Test that deprecated names are reported as allowed or not, with logging.\nTEST(ExtensionNameUtilTest, DEPRECATED_FEATURE_TEST(TestAllowDeprecatedExtensionName)) {\n  // Validate that no runtime available results in warnings and allows deprecated names.\n  {\n    auto test = []() {\n      return ExtensionNameUtil::allowDeprecatedExtensionName(\"XXX\", \"deprecated\", \"canonical\",\n                                                             nullptr);\n    };\n    EXPECT_TRUE(test());\n\n    EXPECT_LOG_CONTAINS(\"warn\", \"Using deprecated XXX extension name 'deprecated' for 'canonical'.\",\n                        test());\n  }\n\n  // If deprecated feature is enabled, log and return true.\n  {\n    NiceMock<Runtime::MockLoader> runtime;\n\n    EXPECT_CALL(\n        runtime.snapshot_,\n        deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n        .WillRepeatedly(Return(true));\n\n    auto test = [&]() {\n      return ExtensionNameUtil::allowDeprecatedExtensionName(\"XXX\", \"deprecated\", \"canonical\",\n                                                             &runtime);\n    };\n    EXPECT_TRUE(test());\n\n    EXPECT_LOG_CONTAINS(\"warn\", \"Using deprecated XXX extension name 'deprecated' for 'canonical'.\",\n                        test());\n  }\n\n  // If deprecated feature is disabled, log and return false.\n  {\n    NiceMock<Runtime::MockLoader> runtime;\n\n    EXPECT_CALL(\n        runtime.snapshot_,\n        deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n        .WillRepeatedly(Return(false));\n\n    auto test = [&]() {\n      return ExtensionNameUtil::allowDeprecatedExtensionName(\"XXX\", \"deprecated\", \"canonical\",\n                                                             &runtime);\n    };\n    EXPECT_FALSE(test());\n\n    EXPECT_LOG_CONTAINS(\"error\", \"#using-runtime-overrides-for-deprecated-features\", test());\n  }\n}\n\n} // namespace\n} // namespace Utility\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_binary\",\n    \"envoy_package\",\n)\nload(\n    \"//bazel:envoy_select.bzl\",\n    \"envoy_select_wasm\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"wasm_vm_test\",\n    srcs = [\"wasm_vm_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/common/wasm/test_data:test_rust.wasm\",\n    ]),\n    tags = [\n        # wasm (wee v8 etc) will not compile on Windows\n        \"skip_on_windows\",\n    ],\n    deps = [\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"wasm_test\",\n    srcs = [\"wasm_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/common/wasm/test_data:bad_signature_cpp.wasm\",\n        \"//test/extensions/common/wasm/test_data:test_context_cpp.wasm\",\n        \"//test/extensions/common/wasm/test_data:test_cpp.wasm\",\n    ]),\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/common:hex_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/extensions/common/wasm/test_data:test_context_cpp_plugin\",\n        \"//test/extensions/common/wasm/test_data:test_cpp_plugin\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:wasm_lib\",\n    ],\n)\n\nenvoy_cc_test_binary(\n    name = \"wasm_speed_test\",\n    srcs = [\"wasm_speed_test.cc\"],\n    external_deps = [\n        \"abseil_optional\",\n        \"benchmark\",\n    ],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\nload(\"//bazel/wasm:wasm.bzl\", \"envoy_wasm_cc_binary\", \"wasm_rust_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nwasm_rust_binary(\n    name = \"test_rust.wasm\",\n    srcs = [\"test_rust.rs\"],\n    rustc_flags = [\"-Clink-arg=-zstack-size=32768\"],\n)\n\nenvoy_cc_library(\n    name = \"test_cpp_plugin\",\n    srcs = [\n        \"test_cpp.cc\",\n        \"test_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"test_context_cpp_plugin\",\n    srcs = [\n        \"test_context_cpp.cc\",\n        \"test_context_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n        \"//source/extensions/common/wasm/ext:envoy_null_plugin\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"test_cpp.wasm\",\n    srcs = [\"test_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"test_context_cpp.wasm\",\n    srcs = [\"test_context_cpp.cc\"],\n    deps = [\n        \"//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_lib\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"bad_signature_cpp.wasm\",\n    srcs = [\"bad_signature_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/Makefile",
    "content": "all: test_rust.wasm\n\ntest_rust.wasm: test_rust.rs\n\trustc -C lto -C opt-level=3 -C panic=abort -C link-arg=-S -C link-arg=-zstack-size=32768 --crate-type cdylib --target wasm32-unknown-unknown test_rust.rs\n\t../../../../../bazel-bin/test/tools/wee8_compile/wee8_compile_tool test_rust.wasm test_rust.wasm\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/bad_signature_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n\n#define EMSCRIPTEN_KEEPALIVE __attribute__((used)) __attribute__((visibility(\"default\")))\n\n// Required Proxy-Wasm ABI version.\nextern \"C\" EMSCRIPTEN_KEEPALIVE void proxy_abi_version_0_1_0() {}\n\nextern \"C\" uint32_t proxy_log(uint32_t level, const char* logMessage, size_t messageSize);\n\nextern \"C\" EMSCRIPTEN_KEEPALIVE uint32_t proxy_on_configure(uint32_t, int bad, char* configuration,\n                                                            int size) {\n  std::string message = \"bad signature\";\n  proxy_log(4 /* error */, message.c_str(), message.size());\n  return 1;\n}\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/test_context_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <climits>\n#include <string>\n#include <unordered_map>\n#include <vector>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics.h\"\n#include \"source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(CommonWasmTestContextCpp)\n\nclass TestContext : public EnvoyContext {\npublic:\n  explicit TestContext(uint32_t id, RootContext* root) : EnvoyContext(id, root) {}\n};\n\nclass TestRootContext : public EnvoyRootContext {\npublic:\n  explicit TestRootContext(uint32_t id, std::string_view root_id) : EnvoyRootContext(id, root_id) {}\n\n  bool onStart(size_t vm_configuration_size) override;\n  bool onDone() override;\n  void onTick() override;\n  void onQueueReady(uint32_t) override;\n  void onResolveDns(uint32_t token, uint32_t results_size) override;\n\nprivate:\n  uint32_t dns_token_;\n};\n\nstatic RegisterContextFactory register_TestContext(CONTEXT_FACTORY(TestContext),\n                                                   ROOT_FACTORY(TestRootContext));\nstatic RegisterContextFactory register_EmptyTestContext(CONTEXT_FACTORY(EnvoyContext),\n                                                        ROOT_FACTORY(EnvoyRootContext), \"empty\");\n\nbool TestRootContext::onStart(size_t) {\n  envoy_resolve_dns(\"example.com\", sizeof(\"example.com\") - 1, &dns_token_);\n  return true;\n}\n\nvoid TestRootContext::onResolveDns(uint32_t token, uint32_t result_size) {\n  logWarn(\"TestRootContext::onResolveDns \" + std::to_string(token));\n  auto dns_buffer = getBufferBytes(WasmBufferType::CallData, 0, result_size);\n  auto dns = parseDnsResults(dns_buffer->view());\n  for (auto& e : dns) {\n    logInfo(\"TestRootContext::onResolveDns dns \" + std::to_string(e.ttl_seconds) + \" \" + e.address);\n  }\n}\n\nbool TestRootContext::onDone() {\n  logWarn(\"TestRootContext::onDone \" + std::to_string(id()));\n  return true;\n}\n\n// Null VM fails on nullptr.\nvoid TestRootContext::onTick() {\n  if (envoy_resolve_dns(0, 1, &dns_token_) != WasmResult::InvalidMemoryAccess) {\n    logInfo(\"resolve_dns should report invalid memory access\");\n  }\n  if (envoy_resolve_dns(\"example.com\", sizeof(\"example.com\") - 1, nullptr) !=\n      WasmResult::InvalidMemoryAccess) {\n    logInfo(\"resolve_dns should report invalid memory access\");\n  }\n}\n\n// V8 fails on pointer too large.\nvoid TestRootContext::onQueueReady(uint32_t) {\n  if (envoy_resolve_dns(reinterpret_cast<char*>(INT_MAX), 0, &dns_token_) !=\n      WasmResult::InvalidMemoryAccess) {\n    logInfo(\"resolve_dns should report invalid memory access\");\n  }\n  if (envoy_resolve_dns(\"example.com\", sizeof(\"example.com\") - 1,\n                        reinterpret_cast<uint32_t*>(INT_MAX)) != WasmResult::InvalidMemoryAccess) {\n    logInfo(\"resolve_dns should report invalid memory access\");\n  }\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/test_context_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace CommonWasmTestContextCpp {\nNullPluginRegistry* context_registry_;\n} // namespace CommonWasmTestContextCpp\n\nRegisterNullVmPluginFactory\n    register_common_wasm_test_context_cpp_plugin(\"CommonWasmTestContextCpp\", []() {\n      return std::make_unique<NullPlugin>(CommonWasmTestContextCpp::context_registry_);\n    });\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/test_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#ifndef WIN32\n#include \"unistd.h\"\n\n#endif\n#include <cerrno>\n#include <cmath>\n#include <cstdio>\n#include <cstdlib>\n#include <limits>\n#include <string>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics.h\"\n#else\n#include \"include/proxy-wasm/null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(CommonWasmTestCpp)\n\nstatic int* badptr = nullptr;\nstatic float gNan = std::nan(\"1\");\nstatic float gInfinity = INFINITY;\nvolatile double zero_unbeknownst_to_the_compiler = 0.0;\n\n#ifndef CHECK_RESULT\n#define CHECK_RESULT(_c)                                                                           \\\n  do {                                                                                             \\\n    if ((_c) != WasmResult::Ok) {                                                                  \\\n      proxy_log(LogLevel::critical, #_c, sizeof(#_c) - 1);                                         \\\n      abort();                                                                                     \\\n    }                                                                                              \\\n  } while (0)\n#endif\n\n#define CHECK_RESULT_NOT_OK(_c)                                                                    \\\n  do {                                                                                             \\\n    if ((_c) == WasmResult::Ok) {                                                                  \\\n      proxy_log(LogLevel::critical, #_c, sizeof(#_c) - 1);                                         \\\n      abort();                                                                                     \\\n    }                                                                                              \\\n  } while (0)\n\n#define FAIL_NOW(_msg)                                                                             \\\n  do {                                                                                             \\\n    const std::string __message = _msg;                                                            \\\n    proxy_log(LogLevel::critical, __message.c_str(), __message.size());                            \\\n    abort();                                                                                       \\\n  } while (0)\n\nWASM_EXPORT(void, proxy_abi_version_0_2_1, (void)) {}\n\nWASM_EXPORT(void, proxy_on_context_create, (uint32_t, uint32_t)) {}\n\nWASM_EXPORT(uint32_t, proxy_on_vm_start, (uint32_t context_id, uint32_t configuration_size)) {\n  const char* configuration_ptr = nullptr;\n  size_t size;\n  proxy_get_buffer_bytes(WasmBufferType::VmConfiguration, 0, configuration_size, &configuration_ptr,\n                         &size);\n  std::string configuration(configuration_ptr, size);\n  if (configuration == \"logging\") {\n    std::string trace_message = \"test trace logging\";\n    proxy_log(LogLevel::trace, trace_message.c_str(), trace_message.size());\n    std::string debug_message = \"test debug logging\";\n    proxy_log(LogLevel::debug, debug_message.c_str(), debug_message.size());\n    std::string warn_message = \"test warn logging\";\n    proxy_log(LogLevel::warn, warn_message.c_str(), warn_message.size());\n    std::string error_message = \"test error logging\";\n    proxy_log(LogLevel::error, error_message.c_str(), error_message.size());\n    LogLevel log_level;\n    CHECK_RESULT(proxy_get_log_level(&log_level));\n    std::string level_message = \"log level is \" + std::to_string(static_cast<uint32_t>(log_level));\n    proxy_log(LogLevel::info, level_message.c_str(), level_message.size());\n  } else if (configuration == \"segv\") {\n    std::string message = \"before badptr\";\n    proxy_log(LogLevel::error, message.c_str(), message.size());\n    ::free(const_cast<void*>(reinterpret_cast<const void*>(configuration_ptr)));\n    *badptr = 1;\n    message = \"after badptr\";\n    proxy_log(LogLevel::error, message.c_str(), message.size());\n  } else if (configuration == \"divbyzero\") {\n    std::string message = \"before div by zero\";\n    proxy_log(LogLevel::error, message.c_str(), message.size());\n    ::free(const_cast<void*>(reinterpret_cast<const void*>(configuration_ptr)));\n    int zero = context_id & 0x100000;\n    message = \"divide by zero: \" + std::to_string(100 / zero);\n    proxy_log(LogLevel::error, message.c_str(), message.size());\n  } else if (configuration == \"globals\") {\n    std::string message = \"NaN \" + std::to_string(gNan);\n    proxy_log(LogLevel::warn, message.c_str(), message.size());\n    message = \"inf \" + std::to_string(gInfinity);\n    proxy_log(LogLevel::warn, message.c_str(), message.size());\n    message = \"inf \" + std::to_string(1.0 / zero_unbeknownst_to_the_compiler);\n    proxy_log(LogLevel::warn, message.c_str(), message.size());\n    message = std::string(\"inf \") + (std::isinf(gInfinity) ? \"inf\" : \"nan\");\n    proxy_log(LogLevel::warn, message.c_str(), message.size());\n  } else if (configuration == \"stats\") {\n    uint32_t c, g, h;\n\n    std::string name = \"test_counter\";\n    CHECK_RESULT(proxy_define_metric(MetricType::Counter, name.data(), name.size(), &c));\n    name = \"test_gauge\";\n    CHECK_RESULT(proxy_define_metric(MetricType::Gauge, name.data(), name.size(), &g));\n    name = \"test_historam\";\n    CHECK_RESULT(proxy_define_metric(MetricType::Histogram, name.data(), name.size(), &h));\n    // Bad type.\n    CHECK_RESULT_NOT_OK(\n        proxy_define_metric(static_cast<MetricType>(9999), name.data(), name.size(), &c));\n\n    CHECK_RESULT(proxy_increment_metric(c, 1));\n    CHECK_RESULT(proxy_increment_metric(g, 1));\n    CHECK_RESULT_NOT_OK(proxy_increment_metric(h, 1));\n    CHECK_RESULT(proxy_record_metric(g, 2));\n    CHECK_RESULT(proxy_record_metric(h, 3));\n\n    uint64_t value;\n    // Not found\n    CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 0, &value));\n    CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 1, &value));\n    CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 2, &value));\n    CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 3, &value));\n    CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 0, 1));\n    CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 1, 1));\n    CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 2, 1));\n    CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 3, 1));\n    CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 0, 1));\n    CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 1, 1));\n    CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 2, 1));\n    CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 3, 1));\n    // Found.\n    std::string message;\n    CHECK_RESULT(proxy_get_metric(c, &value));\n    message = std::string(\"get counter = \") + std::to_string(value);\n    proxy_log(LogLevel::trace, message.c_str(), message.size());\n    CHECK_RESULT(proxy_increment_metric(c, 1));\n    CHECK_RESULT(proxy_get_metric(c, &value));\n    message = std::string(\"get counter = \") + std::to_string(value);\n    proxy_log(LogLevel::debug, message.c_str(), message.size());\n    CHECK_RESULT(proxy_record_metric(c, 3));\n    CHECK_RESULT(proxy_get_metric(c, &value));\n    message = std::string(\"get counter = \") + std::to_string(value);\n    proxy_log(LogLevel::info, message.c_str(), message.size());\n    CHECK_RESULT(proxy_get_metric(g, &value));\n    message = std::string(\"get gauge = \") + std::to_string(value);\n    proxy_log(LogLevel::warn, message.c_str(), message.size());\n    // Get on histograms is not supported.\n    if (proxy_get_metric(h, &value) != WasmResult::Ok) {\n      message = std::string(\"get histogram = Unsupported\");\n      proxy_log(LogLevel::error, message.c_str(), message.size());\n    }\n    // Negative.\n    CHECK_RESULT_NOT_OK(proxy_increment_metric(c, -1));\n    CHECK_RESULT(proxy_increment_metric(g, -1));\n  } else if (configuration == \"foreign\") {\n    std::string function = \"compress\";\n    char* compressed = nullptr;\n    size_t compressed_size = 0;\n    std::string argument = std::string(2000, 'a'); // super compressible.\n    std::string message;\n    CHECK_RESULT(proxy_call_foreign_function(function.data(), function.size(), argument.data(),\n                                             argument.size(), &compressed, &compressed_size));\n    message = std::string(\"compress \") + std::to_string(argument.size()) + \" -> \" +\n              std::to_string(compressed_size);\n    proxy_log(LogLevel::trace, message.c_str(), message.size());\n    function = \"uncompress\";\n    char* result = nullptr;\n    size_t result_size = 0;\n    CHECK_RESULT(proxy_call_foreign_function(function.data(), function.size(), compressed,\n                                             compressed_size, &result, &result_size));\n    message = std::string(\"uncompress \") + std::to_string(compressed_size) + \" -> \" +\n              std::to_string(result_size);\n    proxy_log(LogLevel::debug, message.c_str(), message.size());\n    if (argument != std::string(result, result_size)) {\n      message = \"compress mismatch \";\n      proxy_log(LogLevel::error, message.c_str(), message.size());\n    }\n    ::free(result);\n    result = nullptr;\n    memset(compressed, 0, 4); // damage the compressed version.\n    if (proxy_call_foreign_function(function.data(), function.size(), compressed, compressed_size,\n                                    &result, &result_size) != WasmResult::SerializationFailure) {\n      message = \"bad uncompress should be an error\";\n      proxy_log(LogLevel::error, message.c_str(), message.size());\n    }\n    if (compressed) {\n      ::free(compressed);\n    }\n    if (result) {\n      ::free(result);\n    }\n  } else if (configuration == \"configuration\") {\n    std::string message = \"configuration\";\n    proxy_log(LogLevel::error, message.c_str(), message.size());\n  } else if (configuration == \"WASI\") {\n    // These checks depend on Emscripten's support for `WASI` and will only\n    // work if invoked on a \"real\" Wasm VM.\n    int err = fprintf(stdout, \"WASI write to stdout\\n\");\n    if (err < 0) {\n      FAIL_NOW(\"stdout write should succeed\");\n    }\n    err = fprintf(stderr, \"WASI write to stderr\\n\");\n    if (err < 0) {\n      FAIL_NOW(\"stderr write should succeed\");\n    }\n    // We explicitly don't support reading from stdin\n    char tmp[16];\n    size_t rc = fread(static_cast<void*>(tmp), 1, 16, stdin);\n    if (rc != 0 || errno != ENOSYS) {\n      FAIL_NOW(\"stdin read should fail. errno = \" + std::to_string(errno));\n    }\n    // No environment variables should be available\n    char* pathenv = getenv(\"PATH\");\n    if (pathenv != nullptr) {\n      FAIL_NOW(\"PATH environment variable should not be available\");\n    }\n#ifndef WIN32\n    // Exercise the `WASI` `fd_fdstat_get` a little bit\n    int tty = isatty(1);\n    if (errno != ENOTTY || tty != 0) {\n      FAIL_NOW(\"stdout is not a tty\");\n    }\n    tty = isatty(2);\n    if (errno != ENOTTY || tty != 0) {\n      FAIL_NOW(\"stderr is not a tty\");\n    }\n    tty = isatty(99);\n    if (errno != EBADF || tty != 0) {\n      FAIL_NOW(\"isatty errors on bad fds. errno = \" + std::to_string(errno));\n    }\n#endif\n  } else if (configuration == \"on_foreign\") {\n    std::string message = \"on_foreign start\";\n    proxy_log(LogLevel::debug, message.c_str(), message.size());\n  } else {\n    std::string message = \"on_vm_start \" + configuration;\n    proxy_log(LogLevel::info, message.c_str(), message.size());\n  }\n  ::free(const_cast<void*>(reinterpret_cast<const void*>(configuration_ptr)));\n  return 1;\n}\n\nWASM_EXPORT(uint32_t, proxy_on_configure, (uint32_t, uint32_t configuration_size)) {\n  const char* configuration_ptr = nullptr;\n  size_t size;\n  proxy_get_buffer_bytes(WasmBufferType::PluginConfiguration, 0, configuration_size,\n                         &configuration_ptr, &size);\n  std::string configuration(configuration_ptr, size);\n  if (configuration == \"done\") {\n    proxy_done();\n  } else {\n    std::string message = \"on_configuration \" + configuration;\n    proxy_log(LogLevel::info, message.c_str(), message.size());\n  }\n  ::free(const_cast<void*>(reinterpret_cast<const void*>(configuration_ptr)));\n  return 1;\n}\n\nWASM_EXPORT(void, proxy_on_foreign_function, (uint32_t, uint32_t token, uint32_t data_size)) {\n  std::string message =\n      \"on_foreign_function \" + std::to_string(token) + \" \" + std::to_string(data_size);\n  proxy_log(LogLevel::info, message.c_str(), message.size());\n}\n\nWASM_EXPORT(uint32_t, proxy_on_done, (uint32_t)) {\n  std::string message = \"on_done logging\";\n  proxy_log(LogLevel::info, message.c_str(), message.size());\n  return 0;\n}\n\nWASM_EXPORT(void, proxy_on_delete, (uint32_t)) {\n  std::string message = \"on_delete logging\";\n  proxy_log(LogLevel::info, message.c_str(), message.size());\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/test_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace CommonWasmTestCpp {\nNullPluginRegistry* context_registry_;\n} // namespace CommonWasmTestCpp\n\nRegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin(\"CommonWasmTestCpp\", []() {\n  return std::make_unique<NullPlugin>(CommonWasmTestCpp::context_registry_);\n});\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/common/wasm/test_data/test_rust.rs",
    "content": "// TODO(PiotrSikora): build test data with Bazel rules.\n// See: https://github.com/envoyproxy/envoy/issues/9733\n//\n// Build using:\n// $ rustc -C lto -C opt-level=3 -C panic=abort -C link-arg=-S -C link-arg=-zstack-size=32768 --crate-type cdylib --target wasm32-unknown-unknown test_rust.rs\n// $ ../../../../../bazel-bin/test/tools/wee8_compile/wee8_compile_tool test_rust.wasm test_rust.wasm\n\n// Import functions exported from the host environment.\nextern \"C\" {\n    fn pong(value: u32);\n    fn random() -> u32;\n}\n\n#[no_mangle]\nextern \"C\" fn ping(value: u32) {\n    unsafe { pong(value) }\n}\n\n#[no_mangle]\nextern \"C\" fn lucky(number: u32) -> bool {\n    unsafe { number == random() }\n}\n\n#[no_mangle]\nextern \"C\" fn sum(a: u32, b: u32, c: u32) -> u32 {\n    a + b + c\n}\n\n#[no_mangle]\nextern \"C\" fn div(a: u32, b: u32) -> u32 {\n    a / b\n}\n\n#[no_mangle]\nextern \"C\" fn abort() {\n    panic!(\"abort\")\n}\n"
  },
  {
    "path": "test/extensions/common/wasm/wasm_speed_test.cc",
    "content": "#include \"common/common/thread.h\"\n#include \"common/common/thread_synchronizer.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/thread_factory_for_test.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"absl/synchronization/notification.h\"\n#include \"benchmark/benchmark.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"tools/cpp/runfiles/runfiles.h\"\n\nusing bazel::tools::cpp::runfiles::Runfiles;\n\nnamespace Envoy {\n\nvoid bmWasmSpeedTest(benchmark::State& state) {\n  Envoy::Thread::MutexBasicLockable lock;\n  Envoy::Logger::Context logging_state(spdlog::level::warn,\n                                       Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false);\n  Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm).set_level(spdlog::level::off);\n  Envoy::Stats::IsolatedStoreImpl stats_store;\n  Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(stats_store);\n  Envoy::Upstream::MockClusterManager cluster_manager;\n  Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Envoy::Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  auto wasm = std::make_unique<Envoy::Extensions::Common::Wasm::Wasm>(\n      \"envoy.wasm.runtime.null\", \"\", \"\", \"\", scope, cluster_manager, *dispatcher);\n\n  auto context = std::make_shared<Envoy::Extensions::Common::Wasm::Context>(wasm.get());\n  Envoy::Thread::ThreadFactory& thread_factory{Envoy::Thread::threadFactoryForTest()};\n  std::pair<std::string, uint32_t> data;\n  int n_threads = 10;\n\n  for (__attribute__((unused)) auto _ : state) {\n    auto thread_fn = [&]() {\n      for (int i = 0; i < 1000000; i++) {\n        context->getSharedData(\"foo\", &data);\n        context->setSharedData(\"foo\", \"bar\", 1);\n      }\n      return new uint32_t(42);\n    };\n    std::vector<Envoy::Thread::ThreadPtr> threads;\n    for (int i = 0; i < n_threads; ++i) {\n      std::string name = absl::StrCat(\"thread\", i);\n      threads.emplace_back(thread_factory.createThread(thread_fn, Envoy::Thread::Options{name}));\n    }\n    for (auto& thread : threads) {\n      thread->join();\n    }\n  }\n}\n\nBENCHMARK(bmWasmSpeedTest);\n\n} // namespace Envoy\n\nint main(int argc, char** argv) {\n  ::benchmark::Initialize(&argc, argv);\n  Envoy::TestEnvironment::initializeOptions(argc, argv);\n  // Create a Runfiles object for runfiles lookup.\n  // https://github.com/bazelbuild/bazel/blob/master/tools/cpp/runfiles/runfiles_src.h#L32\n  std::string error;\n  std::unique_ptr<Runfiles> runfiles(Runfiles::Create(argv[0], &error));\n  RELEASE_ASSERT(Envoy::TestEnvironment::getOptionalEnvVar(\"NORUNFILES\").has_value() ||\n                     runfiles != nullptr,\n                 error);\n  Envoy::TestEnvironment::setRunfiles(runfiles.get());\n  Envoy::TestEnvironment::setEnvVar(\"ENVOY_IP_TEST_VERSIONS\", \"all\", 0);\n  Envoy::Event::Libevent::Global::initialize();\n  if (::benchmark::ReportUnrecognizedArguments(argc, argv)) {\n    return 1;\n  }\n  ::benchmark::RunSpecifiedBenchmarks();\n  return 0;\n}\n"
  },
  {
    "path": "test/extensions/common/wasm/wasm_test.cc",
    "content": "#include \"envoy/server/lifecycle_notifier.h\"\n\n#include \"common/common/hex.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n#include \"test/test_common/wasm_base.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"openssl/bytestring.h\"\n#include \"openssl/hmac.h\"\n#include \"openssl/sha.h\"\n\nusing Envoy::Server::ServerLifecycleNotifier;\nusing StageCallbackWithCompletion =\n    Envoy::Server::ServerLifecycleNotifier::StageCallbackWithCompletion;\nusing testing::Eq;\nusing testing::Return;\n\nnamespace Envoy {\n\nnamespace Server {\nclass MockServerLifecycleNotifier2 : public ServerLifecycleNotifier {\npublic:\n  MockServerLifecycleNotifier2() = default;\n  ~MockServerLifecycleNotifier2() override = default;\n\n  using ServerLifecycleNotifier::registerCallback;\n\n  ServerLifecycleNotifier::HandlePtr\n  registerCallback(Stage stage, StageCallbackWithCompletion callback) override {\n    return registerCallback2(stage, callback);\n  }\n\n  MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, (Stage, StageCallback));\n  MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback2,\n              (Stage stage, StageCallbackWithCompletion callback));\n};\n} // namespace Server\n\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\nREGISTER_WASM_EXTENSION(EnvoyWasm);\n\nstd::string sha256(absl::string_view data) {\n  std::vector<uint8_t> digest(SHA256_DIGEST_LENGTH);\n  EVP_MD_CTX* ctx(EVP_MD_CTX_new());\n  auto rc = EVP_DigestInit(ctx, EVP_sha256());\n  RELEASE_ASSERT(rc == 1, \"Failed to init digest context\");\n  rc = EVP_DigestUpdate(ctx, data.data(), data.size());\n  RELEASE_ASSERT(rc == 1, \"Failed to update digest\");\n  rc = EVP_DigestFinal(ctx, digest.data(), nullptr);\n  RELEASE_ASSERT(rc == 1, \"Failed to finalize digest\");\n  EVP_MD_CTX_free(ctx);\n  return std::string(reinterpret_cast<const char*>(&digest[0]), digest.size());\n}\n\nclass TestContext : public ::Envoy::Extensions::Common::Wasm::Context {\npublic:\n  using ::Envoy::Extensions::Common::Wasm::Context::Context;\n  ~TestContext() override = default;\n  using ::Envoy::Extensions::Common::Wasm::Context::log;\n  proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override {\n    std::cerr << std::string(message) << \"\\n\";\n    log_(static_cast<spdlog::level::level_enum>(level), message);\n    Extensions::Common::Wasm::Context::log(static_cast<spdlog::level::level_enum>(level), message);\n    return proxy_wasm::WasmResult::Ok;\n  }\n  MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message));\n};\n\nclass WasmCommonTest : public testing::TestWithParam<std::string> {\npublic:\n  void SetUp() override { // NOLINT(readability-identifier-naming)\n    Logger::Registry::getLog(Logger::Id::wasm).set_level(spdlog::level::debug);\n    clearCodeCacheForTesting();\n  }\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto test_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\",\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\",\n#endif\n    \"null\");\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmCommonTest, test_values);\n\nTEST_P(WasmCommonTest, EnvoyWasm) {\n  auto envoy_wasm = std::make_unique<EnvoyWasm>();\n  envoy_wasm->initialize();\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      \"\", \"\", \"\", GetParam(), \"\", false, envoy::config::core::v3::TrafficDirection::UNSPECIFIED,\n      local_info, nullptr);\n  auto wasm = std::make_shared<WasmHandle>(\n      std::make_unique<Wasm>(absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), \"\",\n                             \"vm_configuration\", \"\", scope, cluster_manager, *dispatcher));\n  auto wasm_base = std::dynamic_pointer_cast<proxy_wasm::WasmHandleBase>(wasm);\n  wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::UnableToCreateVM);\n  EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::UnableToCreateVM);\n  wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::UnableToCloneVM);\n  EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::UnableToCloneVM);\n  wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::MissingFunction);\n  EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::MissingFunction);\n  wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::UnableToInitializeCode);\n  EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::UnableToInitializeCode);\n  wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::StartFailed);\n  EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::StartFailed);\n  wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::ConfigureFailed);\n  EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::ConfigureFailed);\n  wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::RuntimeError);\n  EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::RuntimeError);\n\n  auto root_context = static_cast<Context*>(wasm->wasm()->createRootContext(plugin));\n  uint32_t grpc_call_token1 = root_context->nextGrpcCallToken();\n  uint32_t grpc_call_token2 = root_context->nextGrpcCallToken();\n  EXPECT_NE(grpc_call_token1, grpc_call_token2);\n  root_context->setNextGrpcTokenForTesting(0); // Rollover.\n  EXPECT_EQ(root_context->nextGrpcCallToken(), 1);\n\n  uint32_t grpc_stream_token1 = root_context->nextGrpcStreamToken();\n  uint32_t grpc_stream_token2 = root_context->nextGrpcStreamToken();\n  EXPECT_NE(grpc_stream_token1, grpc_stream_token2);\n  root_context->setNextGrpcTokenForTesting(0xFFFFFFFF); // Rollover.\n  EXPECT_EQ(root_context->nextGrpcStreamToken(), 2);\n\n  uint32_t http_call_token1 = root_context->nextHttpCallToken();\n  uint32_t http_call_token2 = root_context->nextHttpCallToken();\n  EXPECT_NE(http_call_token1, http_call_token2);\n  root_context->setNextHttpCallTokenForTesting(0); // Rollover.\n  EXPECT_EQ(root_context->nextHttpCallToken(), 1);\n\n  EXPECT_EQ(root_context->getBuffer(WasmBufferType::HttpCallResponseBody), nullptr);\n  EXPECT_EQ(root_context->getBuffer(WasmBufferType::PluginConfiguration), nullptr);\n\n  delete root_context;\n\n  WasmStatePrototype wasm_state_prototype(true, WasmType::Bytes, \"\",\n                                          StreamInfo::FilterState::LifeSpan::FilterChain);\n  auto wasm_state = std::make_unique<WasmState>(wasm_state_prototype);\n  Protobuf::Arena arena;\n  EXPECT_EQ(wasm_state->exprValue(&arena, true).MessageOrDie(), nullptr);\n  wasm_state->setValue(\"foo\");\n  auto any = wasm_state->serializeAsProto();\n  EXPECT_TRUE(static_cast<ProtobufWkt::Any*>(any.get())->Is<ProtobufWkt::BytesValue>());\n}\n\nTEST_P(WasmCommonTest, Logging) {\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"logging\";\n  auto plugin_configuration = \"configure-test\";\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_shared<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  EXPECT_NE(wasm->buildVersion(), \"\");\n  EXPECT_NE(std::unique_ptr<ContextBase>(wasm->createContext(plugin)), nullptr);\n  wasm->setCreateContextForTesting(\n      [](Wasm*, const std::shared_ptr<Plugin>&) -> ContextBase* { return nullptr; },\n      [](Wasm*, const std::shared_ptr<Plugin>&) -> ContextBase* { return nullptr; });\n  EXPECT_EQ(std::unique_ptr<ContextBase>(wasm->createContext(plugin)), nullptr);\n  auto wasm_weak = std::weak_ptr<Extensions::Common::Wasm::Wasm>(wasm);\n  auto wasm_handle = std::make_shared<Extensions::Common::Wasm::WasmHandle>(std::move(wasm));\n  EXPECT_TRUE(wasm_weak.lock()->initialize(code, false));\n  auto thread_local_wasm = std::make_shared<Wasm>(wasm_handle, *dispatcher);\n  thread_local_wasm.reset();\n\n  auto wasm_lock = wasm_weak.lock();\n  wasm_lock->setCreateContextForTesting(\n      nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context,\n                    log_(spdlog::level::info, Eq(\"on_configuration configure-test\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq(\"test trace logging\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq(\"test debug logging\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq(\"test warn logging\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq(\"test error logging\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"log level is 1\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_done logging\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_delete logging\")));\n        return root_context;\n      });\n\n  auto root_context = static_cast<TestContext*>(wasm_weak.lock()->start(plugin));\n  EXPECT_EQ(root_context->getConfiguration(), \"logging\");\n  if (GetParam() != \"null\") {\n    EXPECT_TRUE(root_context->validateConfiguration(\"\", plugin));\n  }\n  wasm_weak.lock()->configure(root_context, plugin);\n  EXPECT_EQ(root_context->getStatus().first, 0);\n\n  wasm_handle.reset();\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  // This will fault on nullptr if wasm has been deleted.\n  plugin->plugin_configuration_ = \"done\";\n  wasm_weak.lock()->configure(root_context, plugin);\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  dispatcher->clearDeferredDeleteList();\n}\n\nTEST_P(WasmCommonTest, BadSignature) {\n  if (GetParam() != \"v8\") {\n    return;\n  }\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"\";\n  auto plugin_configuration = \"\";\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/bad_signature_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_FALSE(wasm->initialize(code, false));\n  EXPECT_TRUE(wasm->isFailed());\n}\n\nTEST_P(WasmCommonTest, Segv) {\n  if (GetParam() != \"v8\") {\n    return;\n  }\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"segv\";\n  auto plugin_configuration = \"\";\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_TRUE(wasm->initialize(code, false));\n  TestContext* root_context = nullptr;\n  wasm->setCreateContextForTesting(\n      nullptr, [&root_context](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq(\"before badptr\")));\n        return root_context;\n      });\n  wasm->start(plugin);\n  EXPECT_TRUE(wasm->isFailed());\n\n  // Subsequent calls should be NOOP(s).\n\n  root_context->onResolveDns(0, Envoy::Network::DnsResolver::ResolutionStatus::Success, {});\n  Envoy::Stats::MockMetricSnapshot stats_snapshot;\n  root_context->onStatsUpdate(stats_snapshot);\n}\n\nTEST_P(WasmCommonTest, DivByZero) {\n  if (GetParam() != \"v8\") {\n    return;\n  }\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"divbyzero\";\n  auto plugin_configuration = \"\";\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  auto context = std::make_unique<TestContext>(wasm.get());\n  EXPECT_TRUE(wasm->initialize(code, false));\n  wasm->setCreateContextForTesting(\n      nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq(\"before div by zero\")));\n        return root_context;\n      });\n  wasm->start(plugin);\n}\n\nTEST_P(WasmCommonTest, EmscriptenVersion) {\n  if (GetParam() != \"v8\") {\n    return;\n  }\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"\";\n  auto plugin_configuration = \"\";\n  const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  auto context = std::make_unique<TestContext>(wasm.get());\n  EXPECT_TRUE(wasm->initialize(code, false));\n\n  uint32_t major = 9, minor = 9, abi_major = 9, abi_minor = 9;\n  EXPECT_TRUE(wasm->getEmscriptenVersion(&major, &minor, &abi_major, &abi_minor));\n  EXPECT_EQ(major, 0);\n  EXPECT_LE(minor, 3);\n  // Up to (at least) emsdk 1.39.6.\n  EXPECT_EQ(abi_major, 0);\n  EXPECT_LE(abi_minor, 20);\n}\n\nTEST_P(WasmCommonTest, IntrinsicGlobals) {\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"globals\";\n  auto plugin_configuration = \"\";\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  EXPECT_TRUE(wasm->initialize(code, false));\n  wasm->setCreateContextForTesting(\n      nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq(\"NaN nan\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq(\"inf inf\"))).Times(3);\n        return root_context;\n      });\n  wasm->start(plugin);\n}\n\nTEST_P(WasmCommonTest, Utilities) {\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"utilities\";\n  auto plugin_configuration = \"\";\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  EXPECT_TRUE(wasm->initialize(code, false));\n  wasm->setCreateContextForTesting(\n      nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_vm_start utilities\")));\n        return root_context;\n      });\n  wasm->start(plugin);\n\n  // Context\n  auto context = std::make_unique<Context>();\n  context->error(\"error\");\n\n  // Buffer\n  Extensions::Common::Wasm::Buffer buffer;\n  Extensions::Common::Wasm::Buffer const_buffer;\n  Extensions::Common::Wasm::Buffer string_buffer;\n  auto buffer_impl = std::make_unique<Envoy::Buffer::OwnedImpl>(\"contents\");\n  buffer.set(buffer_impl.get());\n  const_buffer.set(static_cast<const ::Envoy::Buffer::Instance*>(buffer_impl.get()));\n  string_buffer.set(\"contents\");\n  std::string data(\"contents\");\n  if (GetParam() != \"null\") {\n    EXPECT_EQ(WasmResult::InvalidMemoryAccess,\n              buffer.copyTo(wasm.get(), 0, 1 << 30 /* length too long */, 0, 0));\n    EXPECT_EQ(WasmResult::InvalidMemoryAccess,\n              buffer.copyTo(wasm.get(), 0, 1, 1 << 30 /* bad pointer location */, 0));\n    EXPECT_EQ(WasmResult::InvalidMemoryAccess,\n              buffer.copyTo(wasm.get(), 0, 1, 0, 1 << 30 /* bad size location */));\n    EXPECT_EQ(WasmResult::BadArgument, buffer.copyFrom(0, 1, data));\n    EXPECT_EQ(WasmResult::BadArgument, buffer.copyFrom(1, 1, data));\n    EXPECT_EQ(WasmResult::BadArgument, const_buffer.copyFrom(1, 1, data));\n    EXPECT_EQ(WasmResult::BadArgument, string_buffer.copyFrom(1, 1, data));\n  }\n}\n\nTEST_P(WasmCommonTest, Stats) {\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"stats\";\n  auto plugin_configuration = \"\";\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  EXPECT_TRUE(wasm->initialize(code, false));\n  wasm->setCreateContextForTesting(\n      nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq(\"get counter = 1\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq(\"get counter = 2\")));\n        // recordMetric on a Counter is the same as increment.\n        EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"get counter = 5\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq(\"get gauge = 2\")));\n        // Get is not supported on histograms.\n        EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq(\"get histogram = Unsupported\")));\n        return root_context;\n      });\n  wasm->start(plugin);\n}\n\nTEST_P(WasmCommonTest, Foreign) {\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"foreign\";\n  auto vm_key = \"\";\n  auto plugin_configuration = \"\";\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm->initialize(code, false));\n  wasm->setCreateContextForTesting(\n      nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq(\"compress 2000 -> 23\")));\n        EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq(\"uncompress 23 -> 2000\")));\n        return root_context;\n      });\n  wasm->start(plugin);\n}\n\nTEST_P(WasmCommonTest, OnForeign) {\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"on_foreign\";\n  auto vm_key = \"\";\n  auto plugin_configuration = \"\";\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm->initialize(code, false));\n  TestContext* test_context = nullptr;\n  wasm->setCreateContextForTesting(\n      nullptr, [&test_context](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*context, log_(spdlog::level::debug, Eq(\"on_foreign start\")));\n        EXPECT_CALL(*context, log_(spdlog::level::info, Eq(\"on_foreign_function 7 13\")));\n        test_context = context;\n        return context;\n      });\n  wasm->start(plugin);\n  test_context->onForeignFunction(7, 13);\n}\n\nTEST_P(WasmCommonTest, WASI) {\n  if (GetParam() == \"null\") {\n    // This test has no meaning unless it is invoked by actual Wasm code\n    return;\n  }\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  Upstream::MockClusterManager cluster_manager;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"WASI\";\n  auto vm_key = \"\";\n  auto plugin_configuration = \"\";\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n  auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(\n      absl::StrCat(\"envoy.wasm.runtime.\", GetParam()), vm_id, vm_configuration, vm_key, scope,\n      cluster_manager, *dispatcher);\n  EXPECT_NE(wasm, nullptr);\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  EXPECT_TRUE(wasm->initialize(code, false));\n  wasm->setCreateContextForTesting(\n      nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n        auto root_context = new TestContext(wasm, plugin);\n        EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"WASI write to stdout\"))).Times(1);\n        EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq(\"WASI write to stderr\"))).Times(1);\n        return root_context;\n      });\n  wasm->start(plugin);\n}\n\nTEST_P(WasmCommonTest, VmCache) {\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  NiceMock<Upstream::MockClusterManager> cluster_manager;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Server::MockServerLifecycleNotifier2> lifecycle_notifier;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider;\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"vm_cache\";\n  auto plugin_configuration = \"init\";\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n\n  ServerLifecycleNotifier::StageCallbackWithCompletion lifecycle_callback;\n  EXPECT_CALL(lifecycle_notifier, registerCallback2(_, _))\n      .WillRepeatedly(\n          Invoke([&](ServerLifecycleNotifier::Stage,\n                     StageCallbackWithCompletion callback) -> ServerLifecycleNotifier::HandlePtr {\n            lifecycle_callback = callback;\n            return nullptr;\n          }));\n\n  VmConfig vm_config;\n  vm_config.set_runtime(absl::StrCat(\"envoy.wasm.runtime.\", GetParam()));\n  ProtobufWkt::StringValue vm_configuration_string;\n  vm_configuration_string.set_value(vm_configuration);\n  vm_config.mutable_configuration()->PackFrom(vm_configuration_string);\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n        absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  vm_config.mutable_code()->mutable_local()->set_inline_bytes(code);\n  WasmHandleSharedPtr wasm_handle;\n  createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api,\n             lifecycle_notifier, remote_data_provider,\n             [&wasm_handle](const WasmHandleSharedPtr& w) { wasm_handle = w; });\n  EXPECT_NE(wasm_handle, nullptr);\n  Event::PostCb post_cb = [] {};\n  lifecycle_callback(post_cb);\n\n  WasmHandleSharedPtr wasm_handle2;\n  createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api,\n             lifecycle_notifier, remote_data_provider,\n             [&wasm_handle2](const WasmHandleSharedPtr& w) { wasm_handle2 = w; });\n  EXPECT_NE(wasm_handle2, nullptr);\n  EXPECT_EQ(wasm_handle, wasm_handle2);\n\n  auto wasm_handle_local = getOrCreateThreadLocalWasm(\n      wasm_handle, plugin,\n      [&dispatcher](const WasmHandleBaseSharedPtr& base_wasm) -> WasmHandleBaseSharedPtr {\n        auto wasm =\n            std::make_shared<Wasm>(std::static_pointer_cast<WasmHandle>(base_wasm), *dispatcher);\n        wasm->setCreateContextForTesting(\n            nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n              auto root_context = new TestContext(wasm, plugin);\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_vm_start vm_cache\")));\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_configuration init\")));\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_done logging\")));\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_delete logging\")));\n              return root_context;\n            });\n        return std::make_shared<WasmHandle>(wasm);\n      });\n  wasm_handle.reset();\n  wasm_handle2.reset();\n\n  auto wasm = wasm_handle_local->wasm().get();\n  wasm_handle_local.reset();\n\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n\n  plugin->plugin_configuration_ = \"done\";\n  wasm->configure(wasm->getContext(1), plugin);\n  plugin.reset();\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  dispatcher->clearDeferredDeleteList();\n\n  proxy_wasm::clearWasmCachesForTesting();\n}\n\nTEST_P(WasmCommonTest, RemoteCode) {\n  if (GetParam() == \"null\") {\n    return;\n  }\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  NiceMock<Upstream::MockClusterManager> cluster_manager;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Server::MockServerLifecycleNotifier> lifecycle_notifier;\n  Init::ExpectableWatcherImpl init_watcher;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider;\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"vm_cache\";\n  auto plugin_configuration = \"done\";\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n\n  std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n\n  VmConfig vm_config;\n  vm_config.set_runtime(absl::StrCat(\"envoy.wasm.runtime.\", GetParam()));\n  ProtobufWkt::BytesValue vm_configuration_bytes;\n  vm_configuration_bytes.set_value(vm_configuration);\n  vm_config.mutable_configuration()->PackFrom(vm_configuration_bytes);\n  std::string sha256 = Extensions::Common::Wasm::sha256(code);\n  std::string sha256Hex =\n      Hex::encode(reinterpret_cast<const uint8_t*>(&*sha256.begin()), sha256.size());\n  vm_config.mutable_code()->mutable_remote()->set_sha256(sha256Hex);\n  vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_uri(\n      \"http://example.com/test.wasm\");\n  vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_cluster(\"example_com\");\n  vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->mutable_timeout()->set_seconds(5);\n  WasmHandleSharedPtr wasm_handle;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager, httpAsyncClientForCluster(\"example_com\"))\n      .WillOnce(ReturnRef(cluster_manager.async_client_));\n  EXPECT_CALL(cluster_manager.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return nullptr;\n          }));\n\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_)).WillOnce(Invoke([&](const Init::Target& target) {\n    init_target_handle = target.createHandle(\"test\");\n  }));\n  createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api,\n             lifecycle_notifier, remote_data_provider,\n             [&wasm_handle](const WasmHandleSharedPtr& w) { wasm_handle = w; });\n\n  EXPECT_CALL(init_watcher, ready());\n  init_target_handle->initialize(init_watcher);\n\n  EXPECT_NE(wasm_handle, nullptr);\n\n  auto wasm_handle_local = getOrCreateThreadLocalWasm(\n      wasm_handle, plugin,\n      [&dispatcher](const WasmHandleBaseSharedPtr& base_wasm) -> WasmHandleBaseSharedPtr {\n        auto wasm =\n            std::make_shared<Wasm>(std::static_pointer_cast<WasmHandle>(base_wasm), *dispatcher);\n        wasm->setCreateContextForTesting(\n            nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n              auto root_context = new TestContext(wasm, plugin);\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_vm_start vm_cache\")));\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_done logging\")));\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_delete logging\")));\n              return root_context;\n            });\n        return std::make_shared<WasmHandle>(wasm);\n      });\n  wasm_handle.reset();\n\n  auto wasm = wasm_handle_local->wasm().get();\n  wasm_handle_local.reset();\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  wasm->configure(wasm->getContext(1), plugin);\n  plugin.reset();\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  dispatcher->clearDeferredDeleteList();\n}\n\nTEST_P(WasmCommonTest, RemoteCodeMultipleRetry) {\n  if (GetParam() == \"null\") {\n    return;\n  }\n  Stats::IsolatedStoreImpl stats_store;\n  Api::ApiPtr api = Api::createApiForTest(stats_store);\n  NiceMock<Upstream::MockClusterManager> cluster_manager;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Server::MockServerLifecycleNotifier> lifecycle_notifier;\n  Init::ExpectableWatcherImpl init_watcher;\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"wasm_test\"));\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider;\n  auto scope = Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  auto name = \"\";\n  auto root_id = \"\";\n  auto vm_id = \"\";\n  auto vm_configuration = \"vm_cache\";\n  auto plugin_configuration = \"done\";\n  auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(\n      name, root_id, vm_id, GetParam(), plugin_configuration, false,\n      envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr);\n\n  std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      absl::StrCat(\"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm\")));\n\n  VmConfig vm_config;\n  vm_config.set_runtime(absl::StrCat(\"envoy.wasm.runtime.\", GetParam()));\n  ProtobufWkt::StringValue vm_configuration_string;\n  vm_configuration_string.set_value(vm_configuration);\n  vm_config.mutable_configuration()->PackFrom(vm_configuration_string);\n  std::string sha256 = Extensions::Common::Wasm::sha256(code);\n  std::string sha256Hex =\n      Hex::encode(reinterpret_cast<const uint8_t*>(&*sha256.begin()), sha256.size());\n  int num_retries = 3;\n  vm_config.mutable_code()->mutable_remote()->set_sha256(sha256Hex);\n  vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_uri(\n      \"http://example.com/test.wasm\");\n  vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_cluster(\"example_com\");\n  vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->mutable_timeout()->set_seconds(5);\n  vm_config.mutable_code()\n      ->mutable_remote()\n      ->mutable_retry_policy()\n      ->mutable_num_retries()\n      ->set_value(num_retries);\n  WasmHandleSharedPtr wasm_handle;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager, httpAsyncClientForCluster(\"example_com\"))\n      .WillRepeatedly(ReturnRef(cluster_manager.async_client_));\n  EXPECT_CALL(cluster_manager.async_client_, send_(_, _, _))\n      .WillRepeatedly(Invoke([&, retry = num_retries](\n                                 Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                                 const Http::AsyncClient::RequestOptions&) mutable\n                             -> Http::AsyncClient::Request* {\n        if (retry-- == 0) {\n          Http::ResponseMessagePtr response(new Http::ResponseMessageImpl(\n              Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}}));\n          callbacks.onSuccess(request, std::move(response));\n          return nullptr;\n        } else {\n          Http::ResponseMessagePtr response(new Http::ResponseMessageImpl(\n              Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n          response->body().add(code);\n          callbacks.onSuccess(request, std::move(response));\n          return nullptr;\n        }\n      }));\n\n  Init::TargetHandlePtr init_target_handle;\n  EXPECT_CALL(init_manager, add(_)).WillOnce(Invoke([&](const Init::Target& target) {\n    init_target_handle = target.createHandle(\"test\");\n  }));\n  createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api,\n             lifecycle_notifier, remote_data_provider,\n             [&wasm_handle](const WasmHandleSharedPtr& w) { wasm_handle = w; });\n\n  EXPECT_CALL(init_watcher, ready());\n  init_target_handle->initialize(init_watcher);\n\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  EXPECT_NE(wasm_handle, nullptr);\n\n  auto wasm_handle_local = getOrCreateThreadLocalWasm(\n      wasm_handle, plugin,\n      [&dispatcher](const WasmHandleBaseSharedPtr& base_wasm) -> WasmHandleBaseSharedPtr {\n        auto wasm =\n            std::make_shared<Wasm>(std::static_pointer_cast<WasmHandle>(base_wasm), *dispatcher);\n        wasm->setCreateContextForTesting(\n            nullptr, [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n              auto root_context = new TestContext(wasm, plugin);\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_vm_start vm_cache\")));\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_done logging\")));\n              EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq(\"on_delete logging\")));\n              return root_context;\n            });\n        return std::make_shared<WasmHandle>(wasm);\n      });\n  wasm_handle.reset();\n\n  auto wasm = wasm_handle_local->wasm().get();\n  wasm_handle_local.reset();\n\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  wasm->configure(wasm->getContext(1), plugin);\n  plugin.reset();\n  dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n  dispatcher->clearDeferredDeleteList();\n}\n\nclass WasmCommonContextTest\n    : public Common::Wasm::WasmTestBase<testing::TestWithParam<std::string>> {\npublic:\n  WasmCommonContextTest() = default;\n\n  void setup(const std::string& code, std::string vm_configuration, std::string root_id = \"\") {\n    setupBase(\n        GetParam(), code,\n        [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n          return new TestContext(wasm, plugin);\n        },\n        root_id, vm_configuration);\n  }\n  void setupContext() {\n    context_ = std::make_unique<TestContext>(wasm_->wasm().get(), root_context_->id(), plugin_);\n    context_->onCreate();\n  }\n\n  TestContext& rootContext() { return *static_cast<TestContext*>(root_context_); }\n  TestContext& context() { return *context_; }\n\n  std::unique_ptr<TestContext> context_;\n};\n\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmCommonContextTest, test_values);\n\nTEST_P(WasmCommonContextTest, OnDnsResolve) {\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(absl::StrCat(\n        \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_context_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestContextCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n\n  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());\n  EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillRepeatedly(Return(dns_resolver));\n  Network::DnsResolver::ResolveCb dns_callback;\n  Network::MockActiveDnsQuery active_dns_query;\n  EXPECT_CALL(*dns_resolver, resolve(_, _, _))\n      .WillRepeatedly(\n          testing::DoAll(testing::SaveArg<2>(&dns_callback), Return(&active_dns_query)));\n\n  setup(code, \"context\");\n  setupContext();\n  EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq(\"TestRootContext::onResolveDns 1\")));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq(\"TestRootContext::onResolveDns 2\")));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::info,\n                                  Eq(\"TestRootContext::onResolveDns dns 1001 192.168.1.101:0\")));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::info,\n                                  Eq(\"TestRootContext::onResolveDns dns 1001 192.168.1.102:0\")));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq(\"TestRootContext::onDone 1\")));\n\n  dns_callback(\n      Network::DnsResolver::ResolutionStatus::Success,\n      TestUtility::makeDnsResponse({\"192.168.1.101\", \"192.168.1.102\"}, std::chrono::seconds(1001)));\n\n  rootContext().onResolveDns(1 /* token */, Envoy::Network::DnsResolver::ResolutionStatus::Failure,\n                             {});\n  if (GetParam() == \"null\") {\n    rootContext().onTick(0);\n  }\n  if (GetParam() == \"v8\") {\n    rootContext().onQueueReady(0);\n  }\n  // Wait till the Wasm is destroyed and then the late callback should do nothing.\n  deferred_runner_.setFunction([dns_callback] {\n    dns_callback(Network::DnsResolver::ResolutionStatus::Success,\n                 TestUtility::makeDnsResponse({\"192.168.1.101\", \"192.168.1.102\"},\n                                              std::chrono::seconds(1001)));\n  });\n}\n\nTEST_P(WasmCommonContextTest, EmptyContext) {\n  std::string code;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(absl::StrCat(\n        \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_context_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestContextCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n\n  setup(code, \"context\", \"empty\");\n  setupContext();\n\n  root_context_->onResolveDns(0, Envoy::Network::DnsResolver::ResolutionStatus::Success, {});\n  NiceMock<Envoy::Stats::MockMetricSnapshot> stats_snapshot;\n  root_context_->onStatsUpdate(stats_snapshot);\n  root_context_->validateConfiguration(\"\", plugin_);\n}\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/common/wasm/wasm_vm_test.cc",
    "content": "#include \"envoy/registry/registry.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/common/wasm/wasm_vm.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"include/proxy-wasm/null_vm_plugin.h\"\n\nusing proxy_wasm::Cloneable;    // NOLINT\nusing proxy_wasm::WasmCallVoid; // NOLINT\nusing proxy_wasm::WasmCallWord; // NOLINT\nusing proxy_wasm::Word;         // NOLINT\nusing testing::HasSubstr;       // NOLINT\nusing testing::Return;          // NOLINT\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\nnamespace {\n\nclass TestNullVmPlugin : public proxy_wasm::NullVmPlugin {\npublic:\n  TestNullVmPlugin() = default;\n  ~TestNullVmPlugin() override = default;\n\n  MOCK_METHOD(void, start, ());\n};\n\nTestNullVmPlugin* test_null_vm_plugin_ = nullptr;\n\nproxy_wasm::RegisterNullVmPluginFactory register_test_null_vm_plugin(\"test_null_vm_plugin\", []() {\n  auto plugin = std::make_unique<TestNullVmPlugin>();\n  test_null_vm_plugin_ = plugin.get();\n  return plugin;\n});\n\nclass BaseVmTest : public testing::Test {\npublic:\n  BaseVmTest() : scope_(Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"))) {}\n\nprotected:\n  Stats::IsolatedStoreImpl stats_store;\n  Stats::ScopeSharedPtr scope_;\n};\n\nTEST_F(BaseVmTest, NoRuntime) { EXPECT_EQ(createWasmVm(\"\", scope_), nullptr); }\n\nTEST_F(BaseVmTest, BadRuntime) {\n  EXPECT_EQ(createWasmVm(\"envoy.wasm.runtime.invalid\", scope_), nullptr);\n}\n\nTEST_F(BaseVmTest, NullVmStartup) {\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.null\", scope_);\n  EXPECT_TRUE(wasm_vm != nullptr);\n  EXPECT_TRUE(wasm_vm->runtime() == \"null\");\n  EXPECT_TRUE(wasm_vm->cloneable() == Cloneable::InstantiatedModule);\n  auto wasm_vm_clone = wasm_vm->clone();\n  EXPECT_TRUE(wasm_vm_clone != nullptr);\n  EXPECT_TRUE(wasm_vm->getCustomSection(\"user\").empty());\n  EXPECT_EQ(getEnvoyWasmIntegration(*wasm_vm).runtime(), \"envoy.wasm.runtime.null\");\n  std::function<void()> f;\n  EXPECT_FALSE(\n      getEnvoyWasmIntegration(*wasm_vm).getNullVmFunction(\"bad_function\", false, 0, nullptr, &f));\n}\n\nTEST_F(BaseVmTest, NullVmMemory) {\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.null\", scope_);\n  EXPECT_EQ(wasm_vm->getMemorySize(), std::numeric_limits<uint64_t>::max());\n  std::string d = \"data\";\n  auto m = wasm_vm->getMemory(reinterpret_cast<uint64_t>(d.data()), d.size()).value();\n  EXPECT_EQ(m.data(), d.data());\n  EXPECT_EQ(m.size(), d.size());\n  EXPECT_FALSE(wasm_vm->getMemory(0 /* nullptr */, 1 /* size */).has_value());\n\n  char c;\n  char z = 'z';\n  EXPECT_TRUE(wasm_vm->setMemory(reinterpret_cast<uint64_t>(&c), 1, &z));\n  EXPECT_EQ(c, z);\n  EXPECT_TRUE(wasm_vm->setMemory(0 /* nullptr */, 0 /* size */, nullptr));\n  EXPECT_FALSE(wasm_vm->setMemory(0 /* nullptr */, 1 /* size */, nullptr));\n\n  Word w(13);\n  EXPECT_TRUE(\n      wasm_vm->setWord(reinterpret_cast<uint64_t>(&w), std::numeric_limits<uint64_t>::max()));\n  EXPECT_EQ(w.u64_, std::numeric_limits<uint64_t>::max());\n  EXPECT_FALSE(wasm_vm->setWord(0 /* nullptr */, 1));\n\n  Word w2(0);\n  w.u64_ = 7;\n  EXPECT_TRUE(wasm_vm->getWord(reinterpret_cast<uint64_t>(&w), &w2));\n  EXPECT_EQ(w2.u64_, 7);\n  EXPECT_FALSE(wasm_vm->getWord(0 /* nullptr */, &w2));\n}\n\nclass MockHostFunctions {\npublic:\n  MOCK_METHOD(void, pong, (uint32_t), (const));\n  MOCK_METHOD(uint32_t, random, (), (const));\n};\n\n#if defined(ENVOY_WASM_V8)\nMockHostFunctions* g_host_functions;\n\nvoid pong(void*, Word value) { g_host_functions->pong(convertWordToUint32(value)); }\n\nWord random(void*) { return {g_host_functions->random()}; }\n\n// pong() with wrong number of arguments.\nvoid bad_pong1(void*) {}\n\n// pong() with wrong return type.\nWord bad_pong2(void*, Word) { return 2; }\n\n// pong() with wrong argument type.\ndouble bad_pong3(void*, double) { return 3; }\n\nclass WasmVmTest : public testing::TestWithParam<bool> {\npublic:\n  WasmVmTest() : scope_(Stats::ScopeSharedPtr(stats_store.createScope(\"wasm.\"))) {}\n\n  void SetUp() override { // NOLINT(readability-identifier-naming)\n    g_host_functions = new MockHostFunctions();\n  }\n  void TearDown() override { delete g_host_functions; }\n\nprotected:\n  Stats::IsolatedStoreImpl stats_store;\n  Stats::ScopeSharedPtr scope_;\n};\n\nINSTANTIATE_TEST_SUITE_P(AllowPrecompiled, WasmVmTest, testing::Values(false, true));\n\nTEST_P(WasmVmTest, V8BadCode) {\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.v8\", scope_);\n  ASSERT_TRUE(wasm_vm != nullptr);\n\n  EXPECT_FALSE(wasm_vm->load(\"bad code\", GetParam()));\n}\n\nTEST_P(WasmVmTest, V8Code) {\n#ifndef NDEBUG\n  // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the\n  // flags do not match. TODO: restore this test when the rust toolchain is integrated.\n  if (GetParam() == 1) {\n    return;\n  }\n#endif\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.v8\", scope_);\n  ASSERT_TRUE(wasm_vm != nullptr);\n  EXPECT_TRUE(wasm_vm->runtime() == \"v8\");\n\n  auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_rust.wasm\"));\n  EXPECT_TRUE(wasm_vm->load(code, GetParam()));\n\n  // Sanity checks for the expected test file.\n  if (!wasm_vm->getPrecompiledSectionName().empty()) {\n    EXPECT_TRUE(!wasm_vm->getCustomSection(wasm_vm->getPrecompiledSectionName()).empty());\n  }\n  EXPECT_THAT(wasm_vm->getCustomSection(\"producers\"), HasSubstr(\"rustc\"));\n  EXPECT_TRUE(wasm_vm->getCustomSection(\"emscripten_metadata\").empty());\n\n  EXPECT_TRUE(wasm_vm->cloneable() == Cloneable::CompiledBytecode);\n  EXPECT_TRUE(wasm_vm->clone() != nullptr);\n}\n\nTEST_P(WasmVmTest, V8BadHostFunctions) {\n#ifndef NDEBUG\n  // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the\n  // flags do not match. TODO: restore this test when the rust toolchain is integrated.\n  if (GetParam() == 1) {\n    return;\n  }\n#endif\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.v8\", scope_);\n  ASSERT_TRUE(wasm_vm != nullptr);\n\n  auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_rust.wasm\"));\n  EXPECT_TRUE(wasm_vm->load(code, GetParam()));\n\n  wasm_vm->registerCallback(\"env\", \"random\", &random, CONVERT_FUNCTION_WORD_TO_UINT32(random));\n  EXPECT_FALSE(wasm_vm->link(\"test\"));\n\n  wasm_vm->registerCallback(\"env\", \"pong\", &bad_pong1, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong1));\n  EXPECT_FALSE(wasm_vm->link(\"test\"));\n\n  wasm_vm->registerCallback(\"env\", \"pong\", &bad_pong2, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong2));\n  EXPECT_FALSE(wasm_vm->link(\"test\"));\n\n  wasm_vm->registerCallback(\"env\", \"pong\", &bad_pong3, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong3));\n  EXPECT_FALSE(wasm_vm->link(\"test\"));\n}\n\nTEST_P(WasmVmTest, V8BadModuleFunctions) {\n#ifndef NDEBUG\n  // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the\n  // flags do not match. TODO: restore this test when the rust toolchain is integrated.\n  if (GetParam() == 1) {\n    return;\n  }\n#endif\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.v8\", scope_);\n  ASSERT_TRUE(wasm_vm != nullptr);\n\n  auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_rust.wasm\"));\n  EXPECT_TRUE(wasm_vm->load(code, GetParam()));\n\n  wasm_vm->registerCallback(\"env\", \"pong\", &pong, CONVERT_FUNCTION_WORD_TO_UINT32(pong));\n  wasm_vm->registerCallback(\"env\", \"random\", &random, CONVERT_FUNCTION_WORD_TO_UINT32(random));\n  wasm_vm->link(\"test\");\n\n  WasmCallVoid<1> ping;\n  WasmCallWord<3> sum;\n\n  wasm_vm->getFunction(\"nonexistent\", &ping);\n  EXPECT_TRUE(ping == nullptr);\n\n  wasm_vm->getFunction(\"nonexistent\", &sum);\n  EXPECT_TRUE(sum == nullptr);\n\n  wasm_vm->getFunction(\"ping\", &sum);\n  EXPECT_TRUE(wasm_vm->isFailed());\n\n  wasm_vm->getFunction(\"sum\", &ping);\n  EXPECT_TRUE(wasm_vm->isFailed());\n}\n\nTEST_P(WasmVmTest, V8FunctionCalls) {\n#ifndef NDEBUG\n  // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the\n  // flags do not match. TODO: restore this test when the rust toolchain is integrated.\n  if (GetParam() == 1) {\n    return;\n  }\n#endif\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.v8\", scope_);\n  ASSERT_TRUE(wasm_vm != nullptr);\n\n  auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_rust.wasm\"));\n  EXPECT_TRUE(wasm_vm->load(code, GetParam()));\n\n  wasm_vm->registerCallback(\"env\", \"pong\", &pong, CONVERT_FUNCTION_WORD_TO_UINT32(pong));\n  wasm_vm->registerCallback(\"env\", \"random\", &random, CONVERT_FUNCTION_WORD_TO_UINT32(random));\n  wasm_vm->link(\"test\");\n\n  WasmCallVoid<1> ping;\n  wasm_vm->getFunction(\"ping\", &ping);\n  EXPECT_CALL(*g_host_functions, pong(42));\n  ping(nullptr /* no context */, 42);\n\n  WasmCallWord<1> lucky;\n  wasm_vm->getFunction(\"lucky\", &lucky);\n  EXPECT_CALL(*g_host_functions, random()).WillRepeatedly(Return(42));\n  EXPECT_EQ(0, lucky(nullptr /* no context */, 1).u64_);\n  EXPECT_EQ(1, lucky(nullptr /* no context */, 42).u64_);\n\n  WasmCallWord<3> sum;\n  wasm_vm->getFunction(\"sum\", &sum);\n  EXPECT_EQ(42, sum(nullptr /* no context */, 13, 14, 15).u64_);\n\n  WasmCallWord<2> div;\n  wasm_vm->getFunction(\"div\", &div);\n  div(nullptr /* no context */, 42, 0);\n  EXPECT_TRUE(wasm_vm->isFailed());\n\n  WasmCallVoid<0> abort;\n  wasm_vm->getFunction(\"abort\", &abort);\n  abort(nullptr /* no context */);\n  EXPECT_TRUE(wasm_vm->isFailed());\n}\n\nTEST_P(WasmVmTest, V8Memory) {\n#ifndef NDEBUG\n  // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the\n  // flags do not match. TODO: restore this test when the rust toolchain is integrated.\n  if (GetParam() == 1) {\n    return;\n  }\n#endif\n  auto wasm_vm = createWasmVm(\"envoy.wasm.runtime.v8\", scope_);\n  ASSERT_TRUE(wasm_vm != nullptr);\n\n  auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/common/wasm/test_data/test_rust.wasm\"));\n  EXPECT_TRUE(wasm_vm->load(code, GetParam()));\n\n  wasm_vm->registerCallback(\"env\", \"pong\", &pong, CONVERT_FUNCTION_WORD_TO_UINT32(pong));\n  wasm_vm->registerCallback(\"env\", \"random\", &random, CONVERT_FUNCTION_WORD_TO_UINT32(random));\n  wasm_vm->link(\"test\");\n\n  EXPECT_EQ(wasm_vm->getMemorySize(), 65536 /* stack size requested at the build-time */);\n\n  const uint64_t test_addr = 128;\n\n  std::string set = \"test\";\n  EXPECT_TRUE(wasm_vm->setMemory(test_addr, set.size(), set.data()));\n  auto got = wasm_vm->getMemory(test_addr, set.size()).value();\n  EXPECT_EQ(sizeof(\"test\") - 1, got.size());\n  EXPECT_STREQ(\"test\", got.data());\n\n  EXPECT_FALSE(wasm_vm->setMemory(1024 * 1024 /* out of bound */, 1 /* size */, nullptr));\n  EXPECT_FALSE(wasm_vm->getMemory(1024 * 1024 /* out of bound */, 1 /* size */).has_value());\n\n  Word word(0);\n  EXPECT_TRUE(wasm_vm->setWord(test_addr, std::numeric_limits<uint32_t>::max()));\n  EXPECT_TRUE(wasm_vm->getWord(test_addr, &word));\n  EXPECT_EQ(std::numeric_limits<uint32_t>::max(), word.u64_);\n\n  EXPECT_FALSE(wasm_vm->setWord(1024 * 1024 /* out of bound */, 1));\n  EXPECT_FALSE(wasm_vm->getWord(1024 * 1024 /* out of bound */, &word));\n}\n#endif\n\n} // namespace\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/compression/gzip/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_fuzz_test(\n    name = \"compressor_fuzz_test\",\n    srcs = [\"compressor_fuzz_test.cc\"],\n    corpus = \"compressor_corpus\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/compression/gzip/compressor:compressor_lib\",\n        \"//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/compression/gzip/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"compressor_test\",\n    srcs = [\"zlib_compressor_impl_test.cc\"],\n    extension_name = \"envoy.compression.gzip.compressor\",\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/extensions/compression/gzip/compressor:config\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/hex.h\"\n\n#include \"extensions/compression/gzip/compressor/config.h\"\n#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Compressor {\nnamespace {\n\n// Test helpers\n\nvoid expectValidFlushedBuffer(const Buffer::OwnedImpl& output_buffer) {\n  Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices();\n  const uint64_t num_comp_slices = compressed_slices.size();\n\n  const std::string header_hex_str = Hex::encode(\n      reinterpret_cast<unsigned char*>(compressed_slices[0].mem_), compressed_slices[0].len_);\n\n  // HEADER 0x1f = 31 (window_bits)\n  EXPECT_EQ(\"1f8b\", header_hex_str.substr(0, 4));\n  // CM 0x8 = deflate (compression method)\n  EXPECT_EQ(\"08\", header_hex_str.substr(4, 2));\n\n  const std::string footer_hex_str =\n      Hex::encode(reinterpret_cast<unsigned char*>(compressed_slices[num_comp_slices - 1].mem_),\n                  compressed_slices[num_comp_slices - 1].len_);\n  // FOOTER four-byte sequence (sync flush)\n  EXPECT_EQ(\"0000ffff\", footer_hex_str.substr(footer_hex_str.size() - 8, 10));\n}\n\nvoid expectEqualInputSize(const std::string& footer_bytes, const uint32_t input_size) {\n  const std::string size_bytes = footer_bytes.substr(footer_bytes.size() - 8, 8);\n  uint64_t size;\n  StringUtil::atoull(size_bytes.c_str(), size, 16);\n  EXPECT_EQ(TestUtility::flipOrder<uint32_t>(size), input_size);\n}\n\nvoid expectValidFinishedBuffer(const Buffer::OwnedImpl& output_buffer, const uint32_t input_size) {\n  Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices();\n  const uint64_t num_comp_slices = compressed_slices.size();\n\n  const std::string header_hex_str = Hex::encode(\n      reinterpret_cast<unsigned char*>(compressed_slices[0].mem_), compressed_slices[0].len_);\n  // HEADER 0x1f = 31 (window_bits)\n  EXPECT_EQ(\"1f8b\", header_hex_str.substr(0, 4));\n  // CM 0x8 = deflate (compression method)\n  EXPECT_EQ(\"08\", header_hex_str.substr(4, 2));\n\n  const std::string footer_bytes_str =\n      Hex::encode(reinterpret_cast<unsigned char*>(compressed_slices[num_comp_slices - 1].mem_),\n                  compressed_slices[num_comp_slices - 1].len_);\n\n  // A valid finished compressed buffer should have trailer with input size in it.\n  expectEqualInputSize(footer_bytes_str, input_size);\n}\n\nvoid drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); }\n\nclass ZlibCompressorImplTester : public ZlibCompressorImpl {\npublic:\n  ZlibCompressorImplTester() = default;\n  ZlibCompressorImplTester(uint64_t chunk_size) : ZlibCompressorImpl(chunk_size) {}\n  void compressThenFlush(Buffer::OwnedImpl& buffer) {\n    compress(buffer, Envoy::Compression::Compressor::State::Flush);\n  }\n  void finish(Buffer::OwnedImpl& buffer) {\n    compress(buffer, Envoy::Compression::Compressor::State::Finish);\n  }\n};\n\n// Fixtures\n\nclass ZlibCompressorImplTest : public testing::Test {\nprotected:\n  static constexpr int64_t gzip_window_bits{31};\n  static constexpr int64_t memory_level{8};\n  static constexpr uint64_t default_input_size{796};\n};\n\nclass ZlibCompressorImplDeathTest : public ZlibCompressorImplTest {\nprotected:\n  static void compressorBadInitTestHelper(int64_t window_bits, int64_t mem_level) {\n    ZlibCompressorImpl compressor;\n    compressor.init(ZlibCompressorImpl::CompressionLevel::Standard,\n                    ZlibCompressorImpl::CompressionStrategy::Standard, window_bits, mem_level);\n  }\n\n  static void uninitializedCompressorTestHelper() {\n    Buffer::OwnedImpl buffer;\n    ZlibCompressorImplTester compressor;\n    TestUtility::feedBufferWithRandomCharacters(buffer, 100);\n    compressor.finish(buffer);\n  }\n\n  static void uninitializedCompressorFlushTestHelper() {\n    Buffer::OwnedImpl buffer;\n    ZlibCompressorImplTester compressor;\n    compressor.compressThenFlush(buffer);\n  }\n\n  static void uninitializedCompressorFinishTestHelper() {\n    Buffer::OwnedImpl buffer;\n    ZlibCompressorImplTester compressor;\n    compressor.finish(buffer);\n  }\n};\n\nclass ZlibCompressorImplFactoryTest\n    : public ::testing::TestWithParam<std::tuple<std::string, std::string>> {};\n\nINSTANTIATE_TEST_SUITE_P(\n    CreateCompressorTests, ZlibCompressorImplFactoryTest,\n    ::testing::Values(std::make_tuple(\"\", \"\"), std::make_tuple(\"FILTERED\", \"BEST_COMPRESSION\"),\n                      std::make_tuple(\"HUFFMAN_ONLY\", \"BEST_COMPRESSION\"),\n                      std::make_tuple(\"RLE\", \"BEST_SPEED\"),\n                      std::make_tuple(\"DEFAULT_STRATEGY\", \"DEFAULT_COMPRESSION\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_1\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_2\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_3\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_4\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_5\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_6\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_7\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_8\"),\n                      std::make_tuple(\"FIXED\", \"COMPRESSION_LEVEL_9\")));\n\nTEST_P(ZlibCompressorImplFactoryTest, CreateCompressorTest) {\n  Buffer::OwnedImpl buffer;\n  envoy::extensions::compression::gzip::compressor::v3::Gzip gzip;\n  std::string json{\"{}\"};\n  absl::string_view strategy = std::get<0>(GetParam());\n  absl::string_view compression_level = std::get<1>(GetParam());\n\n  if (!strategy.empty()) {\n    json = fmt::format(R\"EOF({{\n      \"compression_strategy\": \"{}\",\n      \"compression_level\": \"{}\",\n      \"memory_level\": 6,\n      \"window_bits\": 27,\n      \"chunk_size\": 10000\n    }})EOF\",\n                       strategy, compression_level);\n  }\n  TestUtility::loadFromJson(json, gzip);\n  Envoy::Compression::Compressor::CompressorPtr compressor =\n      GzipCompressorFactory(gzip).createCompressor();\n  // Check the created compressor produces valid output.\n  TestUtility::feedBufferWithRandomCharacters(buffer, 4096);\n  compressor->compress(buffer, Envoy::Compression::Compressor::State::Flush);\n  expectValidFlushedBuffer(buffer);\n  drainBuffer(buffer);\n}\n\n// Exercises death by passing bad initialization params or by calling\n// compress before init.\nTEST_F(ZlibCompressorImplDeathTest, CompressorDeathTest) {\n  EXPECT_DEATH(compressorBadInitTestHelper(100, 8), \"assert failure: result >= 0\");\n  EXPECT_DEATH(compressorBadInitTestHelper(31, 10), \"assert failure: result >= 0\");\n  EXPECT_DEATH(uninitializedCompressorTestHelper(), \"assert failure: result == Z_OK\");\n  EXPECT_DEATH(uninitializedCompressorFlushTestHelper(), \"assert failure: result == Z_OK\");\n  EXPECT_DEATH(uninitializedCompressorFinishTestHelper(), \"assert failure: result == Z_STREAM_END\");\n}\n\n// Exercises compressor's checksum by calling it before init or compress.\nTEST_F(ZlibCompressorImplTest, CallingChecksum) {\n  Buffer::OwnedImpl buffer;\n\n  ZlibCompressorImplTester compressor;\n  EXPECT_EQ(0, compressor.checksum());\n\n  compressor.init(ZlibCompressorImpl::CompressionLevel::Standard,\n                  ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits,\n                  memory_level);\n  EXPECT_EQ(0, compressor.checksum());\n\n  TestUtility::feedBufferWithRandomCharacters(buffer, 4096);\n  compressor.compressThenFlush(buffer);\n  expectValidFlushedBuffer(buffer);\n\n  drainBuffer(buffer);\n  EXPECT_TRUE(compressor.checksum() > 0);\n}\n\n// Exercises compressor's checksum by calling it before init or compress.\nTEST_F(ZlibCompressorImplTest, CallingFinishOnly) {\n  Buffer::OwnedImpl buffer;\n\n  ZlibCompressorImplTester compressor;\n  compressor.init(ZlibCompressorImpl::CompressionLevel::Standard,\n                  ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits,\n                  memory_level);\n  EXPECT_EQ(0, compressor.checksum());\n\n  TestUtility::feedBufferWithRandomCharacters(buffer, 4096);\n  compressor.finish(buffer);\n  expectValidFinishedBuffer(buffer, 4096);\n}\n\nTEST_F(ZlibCompressorImplTest, CompressWithSmallChunkSize) {\n  Buffer::OwnedImpl buffer;\n  Buffer::OwnedImpl accumulation_buffer;\n\n  ZlibCompressorImplTester compressor(8);\n  compressor.init(ZlibCompressorImpl::CompressionLevel::Standard,\n                  ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits,\n                  memory_level);\n\n  uint64_t input_size = 0;\n  for (uint64_t i = 0; i < 10; i++) {\n    TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i);\n    ASSERT_EQ(default_input_size * i, buffer.length());\n    input_size += buffer.length();\n    compressor.compressThenFlush(buffer);\n    accumulation_buffer.add(buffer);\n    drainBuffer(buffer);\n    ASSERT_EQ(0, buffer.length());\n  }\n  expectValidFlushedBuffer(accumulation_buffer);\n\n  compressor.finish(buffer);\n  accumulation_buffer.add(buffer);\n  expectValidFinishedBuffer(accumulation_buffer, input_size);\n}\n\n// Exercises compression with other supported zlib initialization params.\nTEST_F(ZlibCompressorImplTest, CompressWithNotCommonParams) {\n  Buffer::OwnedImpl buffer;\n  Buffer::OwnedImpl accumulation_buffer;\n\n  ZlibCompressorImplTester compressor;\n  compressor.init(ZlibCompressorImpl::CompressionLevel::Speed,\n                  ZlibCompressorImpl::CompressionStrategy::Rle, gzip_window_bits, 1);\n\n  uint64_t input_size = 0;\n  for (uint64_t i = 0; i < 10; i++) {\n    TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i);\n    ASSERT_EQ(default_input_size * i, buffer.length());\n    input_size += buffer.length();\n    compressor.compressThenFlush(buffer);\n    accumulation_buffer.add(buffer);\n    drainBuffer(buffer);\n    ASSERT_EQ(0, buffer.length());\n  }\n\n  expectValidFlushedBuffer(accumulation_buffer);\n\n  compressor.finish(buffer);\n  accumulation_buffer.add(buffer);\n  expectValidFinishedBuffer(accumulation_buffer, input_size);\n}\n\n} // namespace\n} // namespace Compressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/compression/gzip/compressor_corpus/empty",
    "content": ""
  },
  {
    "path": "test/extensions/compression/gzip/compressor_corpus/simple",
    "content": "aaaaaaaaaaaaaaaabbbbbbbbbcccccccccccccccccccc\n"
  },
  {
    "path": "test/extensions/compression/gzip/compressor_fuzz_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n#include \"extensions/compression/gzip/decompressor/zlib_decompressor_impl.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Compressor {\nnamespace Fuzz {\n\n// Fuzzer for zlib compression. While the zlib project has its own fuzzer, this\n// fuzzer validates that the Envoy wiring around zlib makes sense and the\n// specific ways we configure it are safe. The fuzzer below validates a round\n// trip compress-decompress pair; the decompressor itself is not fuzzed beyond\n// whatever the compressor emits, as it exists only as a test utility today.\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n\n  FuzzedDataProvider provider(buf, len);\n  ZlibCompressorImpl compressor;\n  Stats::IsolatedStoreImpl stats_store;\n  Decompressor::ZlibDecompressorImpl decompressor{stats_store, \"test\"};\n\n  // Select target compression level. We can't use ConsumeEnum() since the range\n  // is non-contiguous.\n  const ZlibCompressorImpl::CompressionLevel compression_levels[] = {\n      ZlibCompressorImpl::CompressionLevel::Best,\n      ZlibCompressorImpl::CompressionLevel::Speed,\n      ZlibCompressorImpl::CompressionLevel::Standard,\n  };\n  const ZlibCompressorImpl::CompressionLevel target_compression_level =\n      provider.PickValueInArray(compression_levels);\n\n  // Select target compression strategy. We can't use ConsumeEnum() since the\n  // range does not start with zero.\n  const ZlibCompressorImpl::CompressionStrategy compression_strategies[] = {\n      ZlibCompressorImpl::CompressionStrategy::Filtered,\n      ZlibCompressorImpl::CompressionStrategy::Huffman,\n      ZlibCompressorImpl::CompressionStrategy::Rle,\n      ZlibCompressorImpl::CompressionStrategy::Standard,\n  };\n  const ZlibCompressorImpl::CompressionStrategy target_compression_strategy =\n      provider.PickValueInArray(compression_strategies);\n\n  // Select target window bits. The range comes from the PGV constraints in\n  // api/envoy/config/filter/http/gzip/v2/gzip.proto.\n  const int64_t target_window_bits = provider.ConsumeIntegralInRange(9, 15);\n\n  // Select memory level. The range comes from the restriction in the init()\n  // header comments.\n  const uint64_t target_memory_level = provider.ConsumeIntegralInRange(1, 9);\n\n  compressor.init(target_compression_level, target_compression_strategy, target_window_bits,\n                  target_memory_level);\n  decompressor.init(target_window_bits);\n\n  bool provider_empty = provider.remaining_bytes() == 0;\n  Buffer::OwnedImpl full_input;\n  Buffer::OwnedImpl full_output;\n  while (!provider_empty) {\n    const std::string next_data = provider.ConsumeRandomLengthString(provider.remaining_bytes());\n    ENVOY_LOG_MISC(debug, \"Processing {} bytes\", next_data.size());\n    full_input.add(next_data);\n    Buffer::OwnedImpl buffer{next_data.data(), next_data.size()};\n    provider_empty = provider.remaining_bytes() == 0;\n    compressor.compress(buffer, provider_empty ? Envoy::Compression::Compressor::State::Finish\n                                               : Envoy::Compression::Compressor::State::Flush);\n    decompressor.decompress(buffer, full_output);\n  }\n  RELEASE_ASSERT(full_input.toString() == full_output.toString(), \"\");\n  RELEASE_ASSERT(compressor.checksum() == decompressor.checksum(), \"\");\n}\n\n} // namespace Fuzz\n} // namespace Compressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/compression/gzip/decompressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"zlib_decompressor_impl_test\",\n    srcs = [\"zlib_decompressor_impl_test.cc\"],\n    extension_name = \"envoy.compression.gzip.decompressor\",\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/compression/gzip/compressor:compressor_lib\",\n        \"//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/hex.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n#include \"extensions/compression/gzip/decompressor/zlib_decompressor_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Compression {\nnamespace Gzip {\nnamespace Decompressor {\n\nclass ZlibDecompressorImplTest : public testing::Test {\nprotected:\n  void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); }\n\n  void testcompressDecompressWithUncommonParams(\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel comp_level,\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy\n          comp_strategy,\n      int64_t window_bits, uint64_t memory_level) {\n    Buffer::OwnedImpl buffer;\n    Buffer::OwnedImpl accumulation_buffer;\n\n    Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor;\n    compressor.init(comp_level, comp_strategy, window_bits, memory_level);\n\n    std::string original_text{};\n    for (uint64_t i = 0; i < 30; ++i) {\n      TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i);\n      original_text.append(buffer.toString());\n      compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush);\n      accumulation_buffer.add(buffer);\n      drainBuffer(buffer);\n    }\n    ASSERT_EQ(0, buffer.length());\n\n    compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish);\n    accumulation_buffer.add(buffer);\n\n    drainBuffer(buffer);\n    ASSERT_EQ(0, buffer.length());\n\n    Stats::IsolatedStoreImpl stats_store{};\n    ZlibDecompressorImpl decompressor{stats_store, \"test.\"};\n    decompressor.init(window_bits);\n\n    decompressor.decompress(accumulation_buffer, buffer);\n    std::string decompressed_text{buffer.toString()};\n\n    ASSERT_EQ(compressor.checksum(), decompressor.checksum());\n    ASSERT_EQ(original_text.length(), decompressed_text.length());\n    EXPECT_EQ(original_text, decompressed_text);\n    ASSERT_EQ(0, decompressor.decompression_error_);\n  }\n\n  static constexpr int64_t gzip_window_bits{31};\n  static constexpr int64_t memory_level{8};\n  static constexpr uint64_t default_input_size{796};\n};\n\nclass ZlibDecompressorImplFailureTest : public ZlibDecompressorImplTest {\nprotected:\n  static void decompressorBadInitTestHelper(int64_t window_bits) {\n    Stats::IsolatedStoreImpl stats_store{};\n    ZlibDecompressorImpl decompressor{stats_store, \"test.\"};\n    decompressor.init(window_bits);\n  }\n\n  static void uninitializedDecompressorTestHelper() {\n    Buffer::OwnedImpl input_buffer;\n    Buffer::OwnedImpl output_buffer;\n    Stats::IsolatedStoreImpl stats_store{};\n    ZlibDecompressorImpl decompressor{stats_store, \"test.\"};\n    TestUtility::feedBufferWithRandomCharacters(input_buffer, 100);\n    decompressor.decompress(input_buffer, output_buffer);\n    ASSERT_TRUE(decompressor.decompression_error_ < 0);\n    ASSERT_EQ(stats_store.counterFromString(\"test.zlib_stream_error\").value(), 1);\n  }\n};\n\n// Test different failures by passing bad initialization params or by calling decompress before\n// init.\nTEST_F(ZlibDecompressorImplFailureTest, DecompressorFailureTest) {\n  EXPECT_DEATH(decompressorBadInitTestHelper(100), \"assert failure: result >= 0\");\n  uninitializedDecompressorTestHelper();\n}\n\n// Exercises decompressor's checksum by calling it before init or decompress.\nTEST_F(ZlibDecompressorImplTest, CallingChecksum) {\n  Buffer::OwnedImpl compressor_buffer;\n  Buffer::OwnedImpl decompressor_output_buffer;\n\n  Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor;\n  ASSERT_EQ(0, compressor.checksum());\n\n  compressor.init(\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard,\n      gzip_window_bits, memory_level);\n  ASSERT_EQ(0, compressor.checksum());\n\n  TestUtility::feedBufferWithRandomCharacters(compressor_buffer, 4096);\n  compressor.compress(compressor_buffer, Envoy::Compression::Compressor::State::Flush);\n  ASSERT_TRUE(compressor.checksum() > 0);\n\n  Stats::IsolatedStoreImpl stats_store{};\n  ZlibDecompressorImpl decompressor{stats_store, \"test.\"};\n  decompressor.init(gzip_window_bits);\n  EXPECT_EQ(0, decompressor.checksum());\n\n  decompressor.decompress(compressor_buffer, decompressor_output_buffer);\n\n  drainBuffer(compressor_buffer);\n  drainBuffer(decompressor_output_buffer);\n\n  EXPECT_EQ(compressor.checksum(), decompressor.checksum());\n  ASSERT_EQ(0, decompressor.decompression_error_);\n}\n\n// Exercises compression and decompression by compressing some data, decompressing it and then\n// comparing compressor's input/checksum with decompressor's output/checksum.\nTEST_F(ZlibDecompressorImplTest, CompressAndDecompress) {\n  Buffer::OwnedImpl buffer;\n  Buffer::OwnedImpl accumulation_buffer;\n  Buffer::OwnedImpl empty_buffer;\n\n  Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor;\n  compressor.init(\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard,\n      gzip_window_bits, memory_level);\n\n  std::string original_text{};\n  for (uint64_t i = 0; i < 20; ++i) {\n    TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i);\n    original_text.append(buffer.toString());\n    compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush);\n    accumulation_buffer.add(buffer);\n    drainBuffer(buffer);\n  }\n\n  ASSERT_EQ(0, buffer.length());\n\n  compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish);\n  ASSERT_GE(10, buffer.length());\n\n  accumulation_buffer.add(buffer);\n\n  drainBuffer(buffer);\n  ASSERT_EQ(0, buffer.length());\n\n  Stats::IsolatedStoreImpl stats_store{};\n  ZlibDecompressorImpl decompressor{stats_store, \"test.\"};\n  decompressor.init(gzip_window_bits);\n\n  decompressor.decompress(accumulation_buffer, buffer);\n  std::string decompressed_text{buffer.toString()};\n\n  // Check decompressor's internal state isn't broken.\n  drainBuffer(buffer);\n  ASSERT_EQ(0, buffer.length());\n  decompressor.decompress(empty_buffer, buffer);\n  ASSERT_EQ(0, buffer.length());\n\n  ASSERT_EQ(compressor.checksum(), decompressor.checksum());\n  ASSERT_EQ(original_text.length(), decompressed_text.length());\n  EXPECT_EQ(original_text, decompressed_text);\n  ASSERT_EQ(0, decompressor.decompression_error_);\n}\n\n// Tests decompression_error_ set to True when Decompression Fails\nTEST_F(ZlibDecompressorImplTest, FailedDecompression) {\n  Buffer::OwnedImpl buffer;\n  Buffer::OwnedImpl accumulation_buffer;\n\n  std::string original_text{};\n  for (uint64_t i = 0; i < 20; ++i) {\n    TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i);\n    original_text.append(buffer.toString());\n    accumulation_buffer.add(buffer);\n    drainBuffer(buffer);\n  }\n  Stats::IsolatedStoreImpl stats_store{};\n  ZlibDecompressorImpl decompressor{stats_store, \"test.\"};\n  decompressor.init(gzip_window_bits);\n\n  decompressor.decompress(accumulation_buffer, buffer);\n\n  ASSERT_TRUE(decompressor.decompression_error_ < 0);\n  ASSERT_EQ(stats_store.counterFromString(\"test.zlib_data_error\").value(), 17);\n}\n\n// Exercises decompression with a very small output buffer.\nTEST_F(ZlibDecompressorImplTest, DecompressWithSmallOutputBuffer) {\n  Buffer::OwnedImpl buffer;\n  Buffer::OwnedImpl accumulation_buffer;\n\n  Envoy::Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor;\n  compressor.init(\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard,\n      gzip_window_bits, memory_level);\n\n  std::string original_text{};\n  for (uint64_t i = 0; i < 20; ++i) {\n    TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i);\n    original_text.append(buffer.toString());\n    compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush);\n    accumulation_buffer.add(buffer);\n    drainBuffer(buffer);\n  }\n\n  ASSERT_EQ(0, buffer.length());\n\n  compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish);\n  ASSERT_GE(10, buffer.length());\n\n  accumulation_buffer.add(buffer);\n\n  drainBuffer(buffer);\n  ASSERT_EQ(0, buffer.length());\n\n  Stats::IsolatedStoreImpl stats_store{};\n  ZlibDecompressorImpl decompressor{stats_store, \"test.\", 16};\n  decompressor.init(gzip_window_bits);\n\n  decompressor.decompress(accumulation_buffer, buffer);\n  std::string decompressed_text{buffer.toString()};\n\n  ASSERT_EQ(compressor.checksum(), decompressor.checksum());\n  ASSERT_EQ(original_text.length(), decompressed_text.length());\n  EXPECT_EQ(original_text, decompressed_text);\n  ASSERT_EQ(0, decompressor.decompression_error_);\n}\n\n// Exercises decompression with other supported zlib initialization params.\nTEST_F(ZlibDecompressorImplTest, CompressDecompressWithUncommonParams) {\n  // Test with different memory levels.\n  for (uint64_t i = 1; i < 10; ++i) {\n    testcompressDecompressWithUncommonParams(\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best,\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15,\n        i);\n\n    testcompressDecompressWithUncommonParams(\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best,\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15,\n        i);\n\n    testcompressDecompressWithUncommonParams(\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed,\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman,\n        15, i);\n\n    testcompressDecompressWithUncommonParams(\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed,\n        Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::\n            Filtered,\n        15, i);\n  }\n}\n\nTEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) {\n  Buffer::OwnedImpl buffer;\n  Buffer::OwnedImpl accumulation_buffer;\n\n  const std::string sample{\"slice, slice, slice, slice, slice, \"};\n  std::string original_text;\n  for (uint64_t i = 0; i < 20; ++i) {\n    Buffer::BufferFragmentImpl* frag = new Buffer::BufferFragmentImpl(\n        sample.c_str(), sample.size(),\n        [](const void*, size_t, const Buffer::BufferFragmentImpl* frag) { delete frag; });\n\n    buffer.addBufferFragment(*frag);\n    original_text.append(sample);\n  }\n\n  const uint64_t num_slices = buffer.getRawSlices().size();\n  EXPECT_EQ(num_slices, 20);\n\n  Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor;\n  compressor.init(\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n      Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard,\n      gzip_window_bits, memory_level);\n\n  compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush);\n  accumulation_buffer.add(buffer);\n\n  Stats::IsolatedStoreImpl stats_store{};\n  ZlibDecompressorImpl decompressor{stats_store, \"test.\"};\n  decompressor.init(gzip_window_bits);\n\n  drainBuffer(buffer);\n  ASSERT_EQ(0, buffer.length());\n\n  decompressor.decompress(accumulation_buffer, buffer);\n  std::string decompressed_text{buffer.toString()};\n\n  ASSERT_EQ(compressor.checksum(), decompressor.checksum());\n  ASSERT_EQ(original_text.length(), decompressed_text.length());\n  EXPECT_EQ(original_text, decompressed_text);\n}\n\nclass ZlibDecompressorStatsTest : public testing::Test {\nprotected:\n  void chargeErrorStats(const int result) { decompressor_.chargeErrorStats(result); }\n\n  Stats::IsolatedStoreImpl stats_store_{};\n  ZlibDecompressorImpl decompressor_{stats_store_, \"test.\"};\n};\n\nTEST_F(ZlibDecompressorStatsTest, ChargeErrorStats) {\n  decompressor_.init(31);\n\n  chargeErrorStats(Z_ERRNO);\n  ASSERT_EQ(stats_store_.counterFromString(\"test.zlib_errno\").value(), 1);\n  chargeErrorStats(Z_STREAM_ERROR);\n  ASSERT_EQ(stats_store_.counterFromString(\"test.zlib_stream_error\").value(), 1);\n  chargeErrorStats(Z_DATA_ERROR);\n  ASSERT_EQ(stats_store_.counterFromString(\"test.zlib_data_error\").value(), 1);\n  chargeErrorStats(Z_MEM_ERROR);\n  ASSERT_EQ(stats_store_.counterFromString(\"test.zlib_mem_error\").value(), 1);\n  chargeErrorStats(Z_BUF_ERROR);\n  ASSERT_EQ(stats_store_.counterFromString(\"test.zlib_buf_error\").value(), 1);\n  chargeErrorStats(Z_VERSION_ERROR);\n  ASSERT_EQ(stats_store_.counterFromString(\"test.zlib_version_error\").value(), 1);\n}\n\n} // namespace Decompressor\n} // namespace Gzip\n} // namespace Compression\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/extensions_build_system.bzl",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_benchmark_test\", \"envoy_cc_benchmark_binary\", \"envoy_cc_mock\", \"envoy_cc_test\", \"envoy_cc_test_binary\", \"envoy_cc_test_library\")\nload(\"@envoy_build_config//:extensions_build_config.bzl\", \"EXTENSIONS\")\n\n# All extension tests should use this version of envoy_cc_test(). It allows compiling out\n# tests for extensions that the user does not wish to include in their build.\n# @param extension_name should match an extension listed in EXTENSIONS.\ndef envoy_extension_cc_test(\n        name,\n        extension_name,\n        **kwargs):\n    if not extension_name in EXTENSIONS:\n        return\n\n    envoy_cc_test(name, **kwargs)\n\ndef envoy_extension_cc_test_library(\n        name,\n        extension_name,\n        **kwargs):\n    if not extension_name in EXTENSIONS:\n        return\n\n    envoy_cc_test_library(name, **kwargs)\n\ndef envoy_extension_cc_mock(\n        name,\n        extension_name,\n        **kwargs):\n    if not extension_name in EXTENSIONS:\n        return\n\n    envoy_cc_mock(name, **kwargs)\n\ndef envoy_extension_cc_test_binary(\n        name,\n        extension_name,\n        **kwargs):\n    if not extension_name in EXTENSIONS:\n        return\n\n    envoy_cc_test_binary(name, **kwargs)\n\ndef envoy_extension_cc_benchmark_binary(\n        name,\n        extension_name,\n        **kwargs):\n    if not extension_name in EXTENSIONS:\n        return\n\n    envoy_cc_benchmark_binary(name, **kwargs)\n\ndef envoy_extension_benchmark_test(\n        name,\n        extension_name,\n        **kwargs):\n    if not extension_name in EXTENSIONS:\n        return\n\n    envoy_benchmark_test(name, **kwargs)\n"
  },
  {
    "path": "test/extensions/filters/common/expr/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"context_test\",\n    srcs = [\"context_test.cc\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/common/router:string_accessor_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/filters/common/expr:context_lib\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"evaluator_fuzz_proto\",\n    srcs = [\"evaluator_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"evaluator_fuzz_test\",\n    srcs = [\"evaluator_fuzz_test.cc\"],\n    corpus = \":evaluator_corpus\",\n    deps = [\n        \":evaluator_fuzz_proto_cc_proto\",\n        \"//source/extensions/filters/common/expr:evaluator_lib\",\n        \"//test/common/stream_info:test_util\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/expr/context_test.cc",
    "content": "#include \"common/network/utility.h\"\n#include \"common/router/string_accessor_impl.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"extensions/filters/common/expr/context.h\"\n\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n\n#include \"absl/time/time.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Expr {\nnamespace {\n\nconstexpr absl::string_view Undefined = \"undefined\";\n\nTEST(Context, EmptyHeadersAttributes) {\n  Protobuf::Arena arena;\n  HeadersWrapper<Http::RequestHeaderMap> headers(arena, nullptr);\n  auto header = headers[CelValue::CreateStringView(Referer)];\n  EXPECT_FALSE(header.has_value());\n  EXPECT_EQ(0, headers.size());\n  EXPECT_TRUE(headers.empty());\n}\n\nTEST(Context, InvalidRequest) {\n  Http::TestRequestHeaderMapImpl header_map{{\"referer\", \"dogs.com\"}};\n  Protobuf::Arena arena;\n  HeadersWrapper<Http::RequestHeaderMap> headers(arena, &header_map);\n  auto header = headers[CelValue::CreateStringView(\"dogs.com\\n\")];\n  EXPECT_FALSE(header.has_value());\n}\n\nTEST(Context, RequestAttributes) {\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  NiceMock<StreamInfo::MockStreamInfo> empty_info;\n  Http::TestRequestHeaderMapImpl header_map{\n      {\":method\", \"POST\"},           {\":scheme\", \"http\"},      {\":path\", \"/meow?yes=1\"},\n      {\":authority\", \"kittens.com\"}, {\"referer\", \"dogs.com\"},  {\"user-agent\", \"envoy-mobile\"},\n      {\"content-length\", \"10\"},      {\"x-request-id\", \"blah\"}, {\"double-header\", \"foo\"},\n      {\"double-header\", \"bar\"}};\n  Protobuf::Arena arena;\n  RequestWrapper request(arena, &header_map, info);\n  RequestWrapper empty_request(arena, nullptr, empty_info);\n\n  EXPECT_CALL(info, bytesReceived()).WillRepeatedly(Return(10));\n  // \"2018-04-03T23:06:09.123Z\".\n  const SystemTime start_time(std::chrono::milliseconds(1522796769123));\n  EXPECT_CALL(info, startTime()).WillRepeatedly(Return(start_time));\n  absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(15000000);\n  EXPECT_CALL(info, requestComplete()).WillRepeatedly(Return(dur));\n  EXPECT_CALL(info, protocol()).WillRepeatedly(Return(Http::Protocol::Http2));\n\n  // stub methods\n  EXPECT_EQ(0, request.size());\n  EXPECT_FALSE(request.empty());\n\n  {\n    auto value = request[CelValue::CreateStringView(Undefined)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = request[CelValue::CreateInt64(13)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Scheme)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"http\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = empty_request[CelValue::CreateStringView(Scheme)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Host)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"kittens.com\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Path)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"/meow?yes=1\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(UrlPath)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"/meow\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Method)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"POST\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Referer)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"dogs.com\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(UserAgent)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"envoy-mobile\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(ID)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"blah\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Size)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(10, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(TotalSize)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    // this includes the headers size\n    EXPECT_EQ(170, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = empty_request[CelValue::CreateStringView(TotalSize)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    // this includes the headers size\n    EXPECT_EQ(0, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Time)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsTimestamp());\n    EXPECT_EQ(\"2018-04-03T23:06:09.123+00:00\", absl::FormatTime(value.value().TimestampOrDie()));\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Headers)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsMap());\n    auto& map = *value.value().MapOrDie();\n    EXPECT_FALSE(map.empty());\n    EXPECT_EQ(10, map.size());\n\n    auto header = map[CelValue::CreateStringView(Referer)];\n    EXPECT_TRUE(header.has_value());\n    ASSERT_TRUE(header.value().IsString());\n    EXPECT_EQ(\"dogs.com\", header.value().StringOrDie().value());\n\n    auto header2 = map[CelValue::CreateStringView(\"double-header\")];\n    EXPECT_TRUE(header2.has_value());\n    ASSERT_TRUE(header2.value().IsString());\n    EXPECT_EQ(\"foo,bar\", header2.value().StringOrDie().value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Duration)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsDuration());\n    EXPECT_EQ(\"15ms\", absl::FormatDuration(value.value().DurationOrDie()));\n  }\n\n  {\n    auto value = empty_request[CelValue::CreateStringView(Duration)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(Protocol)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"HTTP/2\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = empty_request[CelValue::CreateStringView(Protocol)];\n    EXPECT_FALSE(value.has_value());\n  }\n}\n\nTEST(Context, RequestFallbackAttributes) {\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Http::TestRequestHeaderMapImpl header_map{\n      {\":method\", \"POST\"},\n      {\":scheme\", \"http\"},\n      {\":path\", \"/meow\"},\n  };\n  Protobuf::Arena arena;\n  RequestWrapper request(arena, &header_map, info);\n\n  EXPECT_CALL(info, bytesReceived()).WillRepeatedly(Return(10));\n\n  {\n    auto value = request[CelValue::CreateStringView(Size)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(10, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = request[CelValue::CreateStringView(UrlPath)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"/meow\", value.value().StringOrDie().value());\n  }\n}\n\nTEST(Context, ResponseAttributes) {\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  NiceMock<StreamInfo::MockStreamInfo> empty_info;\n  const std::string header_name = \"test-header\";\n  const std::string trailer_name = \"test-trailer\";\n  const std::string grpc_status = \"grpc-status\";\n  Http::TestResponseHeaderMapImpl header_map{{header_name, \"a\"}};\n  Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, \"b\"}, {grpc_status, \"8\"}};\n  Protobuf::Arena arena;\n  ResponseWrapper response(arena, &header_map, &trailer_map, info);\n  ResponseWrapper empty_response(arena, nullptr, nullptr, empty_info);\n\n  EXPECT_CALL(info, responseCode()).WillRepeatedly(Return(404));\n  EXPECT_CALL(info, bytesSent()).WillRepeatedly(Return(123));\n  EXPECT_CALL(info, responseFlags()).WillRepeatedly(Return(0x1));\n\n  const absl::optional<std::string> code_details = \"unauthorized\";\n  EXPECT_CALL(info, responseCodeDetails()).WillRepeatedly(ReturnRef(code_details));\n\n  {\n    auto value = response[CelValue::CreateStringView(Undefined)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = response[CelValue::CreateInt64(13)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(Size)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(123, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(TotalSize)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(160, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = empty_response[CelValue::CreateStringView(TotalSize)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(0, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(Code)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(404, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(CodeDetails)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(code_details.value(), value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(Headers)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsMap());\n    auto& map = *value.value().MapOrDie();\n    EXPECT_FALSE(map.empty());\n    EXPECT_EQ(1, map.size());\n\n    auto header = map[CelValue::CreateStringView(header_name)];\n    EXPECT_TRUE(header.has_value());\n    ASSERT_TRUE(header.value().IsString());\n    EXPECT_EQ(\"a\", header.value().StringOrDie().value());\n\n    auto missing = map[CelValue::CreateStringView(Undefined)];\n    EXPECT_FALSE(missing.has_value());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(Trailers)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsMap());\n    auto& map = *value.value().MapOrDie();\n    EXPECT_FALSE(map.empty());\n    EXPECT_EQ(2, map.size());\n\n    auto header = map[CelValue::CreateString(&trailer_name)];\n    EXPECT_TRUE(header.has_value());\n    ASSERT_TRUE(header.value().IsString());\n    EXPECT_EQ(\"b\", header.value().StringOrDie().value());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(Flags)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(0x1, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = response[CelValue::CreateStringView(GrpcStatus)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(0x8, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = empty_response[CelValue::CreateStringView(GrpcStatus)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = empty_response[CelValue::CreateStringView(Code)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = empty_response[CelValue::CreateStringView(CodeDetails)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    Http::TestResponseHeaderMapImpl header_map{{header_name, \"a\"}, {grpc_status, \"7\"}};\n    Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, \"b\"}};\n    Protobuf::Arena arena;\n    ResponseWrapper response_header_status(arena, &header_map, &trailer_map, info);\n    auto value = response_header_status[CelValue::CreateStringView(GrpcStatus)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(0x7, value.value().Int64OrDie());\n  }\n  {\n    Http::TestResponseHeaderMapImpl header_map{{header_name, \"a\"}};\n    Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, \"b\"}};\n    Protobuf::Arena arena;\n    ResponseWrapper response_no_status(arena, &header_map, &trailer_map, info);\n    auto value = response_no_status[CelValue::CreateStringView(GrpcStatus)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(0xc, value.value().Int64OrDie()); // http:404 -> grpc:12\n  }\n  {\n    NiceMock<StreamInfo::MockStreamInfo> info_without_code;\n    Http::TestResponseHeaderMapImpl header_map{{header_name, \"a\"}};\n    Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, \"b\"}};\n    Protobuf::Arena arena;\n    ResponseWrapper response_no_status(arena, &header_map, &trailer_map, info_without_code);\n    auto value = response_no_status[CelValue::CreateStringView(GrpcStatus)];\n    EXPECT_FALSE(value.has_value());\n  }\n}\n\nTEST(Context, ConnectionFallbackAttributes) {\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  ConnectionWrapper connection(info);\n  UpstreamWrapper upstream(info);\n  {\n    auto value = connection[CelValue::CreateStringView(Undefined)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(ID)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(Undefined)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateInt64(1)];\n    EXPECT_FALSE(value.has_value());\n  }\n}\n\nTEST(Context, ConnectionAttributes) {\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  std::shared_ptr<NiceMock<Envoy::Upstream::MockHostDescription>> upstream_host(\n      new NiceMock<Envoy::Upstream::MockHostDescription>());\n  auto downstream_ssl_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  auto upstream_ssl_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ConnectionWrapper connection(info);\n  UpstreamWrapper upstream(info);\n  PeerWrapper source(info, false);\n  PeerWrapper destination(info, true);\n\n  Network::Address::InstanceConstSharedPtr local =\n      Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  Network::Address::InstanceConstSharedPtr remote =\n      Network::Utility::parseInternetAddress(\"10.20.30.40\", 456, false);\n  Network::Address::InstanceConstSharedPtr upstream_address =\n      Network::Utility::parseInternetAddress(\"10.1.2.3\", 679, false);\n  Network::Address::InstanceConstSharedPtr upstream_local_address =\n      Network::Utility::parseInternetAddress(\"10.1.2.3\", 1000, false);\n  const std::string sni_name = \"kittens.com\";\n  EXPECT_CALL(info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(local));\n  EXPECT_CALL(info, downstreamRemoteAddress()).WillRepeatedly(ReturnRef(remote));\n  EXPECT_CALL(info, downstreamSslConnection()).WillRepeatedly(Return(downstream_ssl_info));\n  EXPECT_CALL(info, upstreamSslConnection()).WillRepeatedly(Return(upstream_ssl_info));\n  EXPECT_CALL(info, upstreamHost()).WillRepeatedly(Return(upstream_host));\n  EXPECT_CALL(info, requestedServerName()).WillRepeatedly(ReturnRef(sni_name));\n  EXPECT_CALL(info, upstreamLocalAddress()).WillRepeatedly(ReturnRef(upstream_local_address));\n  const std::string upstream_transport_failure_reason = \"ConnectionTermination\";\n  EXPECT_CALL(info, upstreamTransportFailureReason())\n      .WillRepeatedly(ReturnRef(upstream_transport_failure_reason));\n  EXPECT_CALL(info, connectionID()).WillRepeatedly(Return(123));\n\n  EXPECT_CALL(*downstream_ssl_info, peerCertificatePresented()).WillRepeatedly(Return(true));\n  EXPECT_CALL(*upstream_host, address()).WillRepeatedly(Return(upstream_address));\n\n  const std::string tls_version = \"TLSv1\";\n  EXPECT_CALL(*downstream_ssl_info, tlsVersion()).WillRepeatedly(ReturnRef(tls_version));\n  EXPECT_CALL(*upstream_ssl_info, tlsVersion()).WillRepeatedly(ReturnRef(tls_version));\n  std::vector<std::string> dns_sans_peer = {\"www.peer.com\"};\n  EXPECT_CALL(*downstream_ssl_info, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans_peer));\n  EXPECT_CALL(*upstream_ssl_info, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans_peer));\n  std::vector<std::string> dns_sans_local = {\"www.local.com\"};\n  EXPECT_CALL(*downstream_ssl_info, dnsSansLocalCertificate())\n      .WillRepeatedly(Return(dns_sans_local));\n  EXPECT_CALL(*upstream_ssl_info, dnsSansLocalCertificate()).WillRepeatedly(Return(dns_sans_local));\n  std::vector<std::string> uri_sans_peer = {\"www.peer.com/uri\"};\n  EXPECT_CALL(*downstream_ssl_info, uriSanPeerCertificate()).WillRepeatedly(Return(uri_sans_peer));\n  EXPECT_CALL(*upstream_ssl_info, uriSanPeerCertificate()).WillRepeatedly(Return(uri_sans_peer));\n  std::vector<std::string> uri_sans_local = {\"www.local.com/uri\"};\n  EXPECT_CALL(*downstream_ssl_info, uriSanLocalCertificate())\n      .WillRepeatedly(Return(uri_sans_local));\n  EXPECT_CALL(*upstream_ssl_info, uriSanLocalCertificate()).WillRepeatedly(Return(uri_sans_local));\n  const std::string subject_local = \"local.com\";\n  EXPECT_CALL(*downstream_ssl_info, subjectLocalCertificate())\n      .WillRepeatedly(ReturnRef(subject_local));\n  EXPECT_CALL(*upstream_ssl_info, subjectLocalCertificate())\n      .WillRepeatedly(ReturnRef(subject_local));\n  const std::string subject_peer = \"peer.com\";\n  EXPECT_CALL(*downstream_ssl_info, subjectPeerCertificate())\n      .WillRepeatedly(ReturnRef(subject_peer));\n  EXPECT_CALL(*upstream_ssl_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer));\n\n  {\n    auto value = connection[CelValue::CreateStringView(Undefined)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateInt64(13)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = source[CelValue::CreateStringView(Undefined)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = source[CelValue::CreateInt64(13)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = destination[CelValue::CreateStringView(Address)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"1.2.3.4:123\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = destination[CelValue::CreateStringView(Port)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(123, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = source[CelValue::CreateStringView(Address)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"10.20.30.40:456\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = source[CelValue::CreateStringView(Port)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(456, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(Address)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(\"10.1.2.3:679\", value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(Port)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsInt64());\n    EXPECT_EQ(679, value.value().Int64OrDie());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(MTLS)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsBool());\n    EXPECT_TRUE(value.value().BoolOrDie());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(RequestedServerName)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(sni_name, value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(TLSVersion)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(tls_version, value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(DNSSanLocalCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(dns_sans_local[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(DNSSanPeerCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(dns_sans_peer[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(URISanLocalCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(uri_sans_local[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(URISanPeerCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(uri_sans_peer[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(SubjectLocalCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(subject_local, value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(SubjectPeerCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(subject_peer, value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = connection[CelValue::CreateStringView(ID)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsUint64());\n    EXPECT_EQ(123, value.value().Uint64OrDie());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(TLSVersion)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(tls_version, value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(DNSSanLocalCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(dns_sans_local[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(DNSSanPeerCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(dns_sans_peer[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(URISanLocalCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(uri_sans_local[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(URISanPeerCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(uri_sans_peer[0], value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(SubjectLocalCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(subject_local, value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(SubjectPeerCertificate)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(subject_peer, value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(UpstreamLocalAddress)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(upstream_local_address->asStringView(), value.value().StringOrDie().value());\n  }\n\n  {\n    auto value = upstream[CelValue::CreateStringView(UpstreamTransportFailureReason)];\n    EXPECT_TRUE(value.has_value());\n    ASSERT_TRUE(value.value().IsString());\n    EXPECT_EQ(upstream_transport_failure_reason, value.value().StringOrDie().value());\n  }\n}\n\nTEST(Context, FilterStateAttributes) {\n  StreamInfo::FilterStateImpl filter_state(StreamInfo::FilterState::LifeSpan::FilterChain);\n  FilterStateWrapper wrapper(filter_state);\n  ProtobufWkt::Arena arena;\n  wrapper.Produce(&arena);\n\n  const std::string key = \"filter_state_key\";\n  const std::string serialized = \"filter_state_value\";\n  const std::string missing = \"missing_key\";\n\n  auto accessor = std::make_shared<Envoy::Router::StringAccessorImpl>(serialized);\n  filter_state.setData(key, accessor, StreamInfo::FilterState::StateType::ReadOnly);\n\n  {\n    auto value = wrapper[CelValue::CreateStringView(missing)];\n    EXPECT_FALSE(value.has_value());\n  }\n\n  {\n    auto value = wrapper[CelValue::CreateStringView(key)];\n    EXPECT_TRUE(value.has_value());\n    EXPECT_TRUE(value.value().IsBytes());\n    EXPECT_EQ(serialized, value.value().BytesOrDie().value());\n  }\n}\n\n} // namespace\n} // namespace Expr\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/clusterfuzz-testcase-minimized-evaluator_fuzz_test-4803938816884736.fuzz",
    "content": "expression {\n  list_expr {\n    elements {\n      id: 1407374883553280\n      ident_expr {\n        name: \"response\"\n      }\n    }\n    elements {\n      id: 360287970189639680\n      select_expr {\n        operand {\n          id: 69242844270821376\n          select_expr {\n            field: \"trailers\"\n          }\n        }\n        field: \"trailers\"\n      }\n    }\n    elements {\n      id: 69242844270821376\n      ident_expr {\n        name: \"response\"\n      }\n    }\n    elements {\n      id: 69242844270821376\n      select_expr {\n        field: \"trailers\"\n      }\n    }\n    elements {\n      select_expr {\n        operand {\n        }\n        field: \"trailers\"\n      }\n    }\n    elements {\n      struct_expr {\n        entries {\n          value {\n            list_expr {\n              elements {\n                id: 21474836488\n                ident_expr {\n                  name: \"response\"\n                }\n              }\n              elements {\n                id: 1407374883553280\n                select_expr {\n                  operand {\n                    select_expr {\n                      field: \"trailers\"\n                    }\n                  }\n                  field: \"tr0Zzsגƃf$f!Ÿ!\u0001C\u0003P\u0017>huV,b}\r\u0005>@%շSP୔S\u000e\rd1wqnQ@-q)ѫ+8WLJ_@g<ݾ]e[S\u0011Tt\t4;\u0011Pailers\"\n                }\n              }\n              elements {\n                id: 69242844270821376\n                ident_expr {\n                  name: \"response\"\n                }\n              }\n              elements {\n                select_expr {\n                  field: \"trailers\"\n                }\n              }\n              elements {\n                select_expr {\n                  field: \"trailers\"\n                }\n              }\n              elements {\n              }\n              elements {\n              }\n              elements {\n                id: 7277817135808643072\n              }\n              elements {\n              }\n            }\n          }\n        }\n      }\n    }\n    elements {\n    }\n    elements {\n      id: 360287970189639680\n    }\n    elements {\n    }\n    elements {\n    }\n  }\n}\ntrailers {\n  headers {\n    key: \"trailers\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/clusterfuzz-testcase-minimized-evaluator_fuzz_test-5723735986536448",
    "content": "expression { } stream_info {   address {     pipe {       path: \"            \\0     \"     }   } } "
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/crash-67e48e44650e25b93159729a7a4dd386625bb5c2",
    "content": "expression {\n  id: 17179869184\n  struct_expr {\n    entries {\n      map_key {\n        id: 27424471945274724\n        list_expr {\n        }\n      }\n    }\n    entries {\n      map_key {\n        id: 27424471945274724\n        list_expr {\n          elements {\n            id: 50331648\n          }\n        }\n      }\n    }\n    entries {\n      map_key {\n        id: 27424471945274724\n        list_expr {\n          elements {\n            id: 50331648\n          }\n        }\n      }\n    }\n    entries {\n      map_key {\n        id: 27424471945274724\n        list_expr {\n          elements {\n            id: 50331648\n          }\n        }\n      }\n    }\n    entries {\n      map_key {\n        id: 27424471945274724\n        list_expr {\n          elements {\n            select_expr {\n              operand {\n                ident_expr {\n                  name: \"\\004\\000\\000\\000\"\n                }\n              }\n            }\n          }\n        }\n      }\n    }\n    entries {\n      map_key {\n        id: 27424471945274724\n        list_expr {\n          elements {\n            id: 50331648\n          }\n        }\n      }\n    }\n    entries {\n      map_key {\n        id: 27424471945274724\n        list_expr {\n          elements {\n            id: 50331648\n          }\n        }\n      }\n    }\n    entries {\n      map_key {\n        id: 27424471945274724\n      }\n      value {\n        comprehension_expr {\n          iter_var: \"\\001\\000\\000\\000\\000\\000\\000\\031\"\n        }\n      }\n    }\n  }\n}\nrequest_headers {\n  headers {\n    key: \"\\t\\000\\000\\000\"\n    value: \"&&\"\n  }\n}\nresponse_headers {\n}\ntrailers {\n  headers {\n    key: \"\\000\\000\\000\\001\"\n    value: \"\\000\\000\\000\\001\"\n  }\n  headers {\n    key: \"\\000\\000\\000\\001\"\n    value: \"\\000\\000\\000\\001\"\n  }\n  headers {\n    key: \"\\000\\000\\000\\001\"\n    value: \"\\000\\000\\000\\001\"\n  }\n  headers {\n    key: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n    value: \"\\000\\000\\000\\0010\\000\"\n  }\n}\nstream_info {\n  dynamic_metadata {\n    filter_metadata {\n      key: \"\"\n      value {\n      }\n    }\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"(\"\n          value {\n          }\n        }\n      }\n    }\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"N5Envoy24ProtoValidation\"\n          value {\n          }\n        }\n      }\n    }\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"\"\n          value {\n            number_value: 1.3262473693533e-315\n          }\n        }\n        fields {\n          key: \"(\"\n          value {\n          }\n        }\n      }\n    }\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"\"\n          value {\n            number_value: 1.3262473693533e-315\n          }\n        }\n        fields {\n          key: \"(\"\n          value {\n          }\n        }\n      }\n    }\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"\"\n          value {\n            number_value: 1.3262473693533e-315\n          }\n        }\n        fields {\n          key: \"(\"\n          value {\n          }\n        }\n      }\n    }\n  }\n  response_code {\n    value: 134219776\n  }\n  upstream_metadata {\n    filter_metadata {\n      key: \"\"\n      value {\n        fields {\n          key: \"\"\n          value {\n            number_value: 9.56944336513491e-315\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/crash-87e3c780acf4403ddd8b182496e6cad5ac5efd66",
    "content": "trailers {\n}\nstream_info {\n  address {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/crash-d6a9858c9b8e8b60845af9f5adc9eaead58147bd",
    "content": "expression {\n  comprehension_expr {\n    iter_range {\n      ident_expr {\n        name: \"request\"\n      }\n    }\n    result {\n      id: 3530822107858468864\n    }\n  }\n}\ntrailers {\n  headers {\n    key: \"\\r\\000\"\n  }\n}\nstream_info {\n  dynamic_metadata {\n    filter_metadata {\n      key: \"\"\n      value {\n      }\n    }\n  }\n  requested_server_name: \"/\"\n}\n"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/emptystruct",
    "content": "expression {\n  struct_expr {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/errorcondition",
    "content": "expression {\n  call_expr {\n    function: \"_[_]\"\n    args {\n      select_expr {\n        operand {\n\t  ident_expr {\n\t    name: \"request\"\n\t  }\n        }\n\tfield: \"undefined\"\n      }\n      const_expr {\n        string_value: \"foo\"\n      }\n    }\n  }\n}\nrequest_headers {\n  headers{key: \":method\" value : \"GET\"}\n  headers{key: \":path\" value : \"/\"}\n  headers{key: \":scheme\" value : \"http\"}\n  headers{key: \":authority\" value : \"foo.com\"}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n}\nresponse_headers {\n  headers {\n    key: \":status\"\n    value : \"200\"\n  }\n}\ntrailers {\n  headers {}\n}\nstream_info {}"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/example",
    "content": "expression {\n  const_expr {\n    bool_value: false\n  }\n}\nrequest_headers {\n  headers{key: \":method\" value : \"GET\"}\n  headers{key: \":path\" value : \"/\"}\n  headers{key: \":scheme\" value : \"http\"}\n  headers{key: \":authority\" value : \"foo.com\"}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n}\nresponse_headers {\n  headers {\n    key: \":status\"\n    value : \"200\"\n  }\n}\ntrailers {\n  headers {}\n}\nstream_info {}"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/example1",
    "content": "expression {\n  call_expr {\n    function: \"undefined_extent\"\n    args {\n       const_expr {\n         bool_value: false\n       }\n    }\n  }\n}\nrequest_headers {\n  headers{key: \":method\" value : \"GET\"}\n  headers{key: \":path\" value : \"/\"}\n  headers{key: \":scheme\" value : \"http\"}\n  headers{key: \":authority\" value : \"foo.com\"}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n}\nresponse_headers {\n  headers {\n    key: \":status\"\n    value : \"200\"\n  }\n}\ntrailers {\n  headers {}\n}\nstream_info {}"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/headercondition",
    "content": "expression {\n  call_expr {\n    function: \"_==_\"\n    args {\n      call_expr {\n        function: \"_[_]\"\n\targs {\n\t  select_expr {\n\t    operand {\n\t      ident_expr {\n\t        name: \"request\"\n\t      }\n\t    }\n\t    field: \"headers\"\n\t  }\n\t  const_expr {\n\t    string_value: \"foo\"\n\t  }\n        }\n      }\n      const_expr {\n        string_value: \"bar\"\n      }\n    }\n  }\n}\nrequest_headers {\n  headers{key: \":method\" value : \"GET\"}\n  headers{key: \":path\" value : \"/\"}\n  headers{key: \":scheme\" value : \"http\"}\n  headers{key: \":authority\" value : \"foo.com\"}\n  headers {key: \"foo\" value: \"bar\"}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n}\nresponse_headers {\n  headers {\n    key: \":status\"\n    value : \"200\"\n  }\n}\ntrailers {\n  headers {}\n}\nstream_info {}"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/metadatacondition",
    "content": "expression {\n  call_expr {\n    function: \"_==_\"\n    args {\n      call_expr {\n        function: \"_[_]\"\n\targs {\n\t  select_expr {\n\t    operand {\n\t      ident_expr {\n\t        name: \"metadata\"\n\t      }\n\t    }\n\t    field: \"filter_metadata\"\n\t  }\n\t  const_expr {\n\t    string_value: \"other\"\n\t  }\n        }\n\tconst_expr {\n\t  string_value: \"label\"\n\t}\n      }\n      const_expr {\n        string_value: \"prod\"\n      }\n    }\n  }\n}\nrequest_headers {\n  headers{key: \":method\" value : \"GET\"}\n  headers{key: \":path\" value : \"/\"}\n  headers{key: \":scheme\" value : \"http\"}\n  headers{key: \":authority\" value : \"foo.com\"}\n  headers {key: \"foo\" value: \"bar\"}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n}\nresponse_headers {\n  headers {\n    key: \":status\"\n    value : \"200\"\n  }\n}\ntrailers {\n  headers {}\n}\nstream_info {\n  start_time: 1522796769123\n  upstream_metadata {\n    filter_metadata {\n      key: \"other\"\n      value: {\n        fields {\n          key: \"label\"\n          value: { string_value: \"prod\" }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_corpus/mistypedcondition",
    "content": "expression {\n  const_expr {\n    int64_value: 13\n  }\n}\nrequest_headers {\n  headers{key: \":method\" value : \"GET\"}\n  headers{key: \":path\" value : \"/\"}\n  headers{key: \":scheme\" value : \"http\"}\n  headers{key: \":authority\" value : \"foo.com\"}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n  headers {}\n}\nresponse_headers {\n  headers {\n    key: \":status\"\n    value : \"200\"\n  }\n}\ntrailers {\n  headers {}\n}\nstream_info {}"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.common.expr;\n\nimport \"google/api/expr/v1alpha1/syntax.proto\";\nimport \"test/fuzz/common.proto\";\nimport \"validate/validate.proto\";\n\n// Structured input for fuzz test.\n\nmessage EvaluatorTestCase {\n  google.api.expr.v1alpha1.Expr expression = 1 [(validate.rules).message.required = true];\n  test.fuzz.Headers request_headers = 2;\n  test.fuzz.Headers response_headers = 3;\n  test.fuzz.Headers trailers = 4;\n  test.fuzz.StreamInfo stream_info = 5;\n}"
  },
  {
    "path": "test/extensions/filters/common/expr/evaluator_fuzz_test.cc",
    "content": "#include \"common/network/utility.h\"\n\n#include \"extensions/filters/common/expr/evaluator.h\"\n\n#include \"test/common/stream_info/test_util.h\"\n#include \"test/extensions/filters/common/expr/evaluator_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Expr {\nnamespace {\n\nDEFINE_PROTO_FUZZER(const test::extensions::filters::common::expr::EvaluatorTestCase& input) {\n  // Create builder without constant folding.\n  static Expr::BuilderPtr builder = Expr::createBuilder(nullptr);\n  std::unique_ptr<TestStreamInfo> stream_info;\n\n  try {\n    // Validate that the input has an expression.\n    TestUtility::validate(input);\n    // Create stream_info to test against, this may catch exceptions from invalid addresses.\n    stream_info = Fuzz::fromStreamInfo(input.stream_info());\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException: {}\", e.what());\n    return;\n  }\n\n  auto request_headers = Fuzz::fromHeaders<Http::TestRequestHeaderMapImpl>(input.request_headers());\n  auto response_headers =\n      Fuzz::fromHeaders<Http::TestResponseHeaderMapImpl>(input.response_headers());\n  auto response_trailers = Fuzz::fromHeaders<Http::TestResponseTrailerMapImpl>(input.trailers());\n\n  try {\n    // Create the CEL expression.\n    Expr::ExpressionPtr expr = Expr::createExpression(*builder, input.expression());\n\n    // Evaluate the CEL expression.\n    Protobuf::Arena arena;\n    Expr::evaluate(*expr, arena, *stream_info, &request_headers, &response_headers,\n                   &response_trailers);\n  } catch (const CelException& e) {\n    ENVOY_LOG_MISC(debug, \"CelException: {}\", e.what());\n  }\n}\n\n} // namespace\n} // namespace Expr\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"check_request_utils_test\",\n    srcs = [\"check_request_utils_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/filters/common/ext_authz:check_request_utils_lib\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_interface\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"ext_authz_grpc_impl_test\",\n    srcs = [\"ext_authz_grpc_impl_test.cc\"],\n    deps = [\n        \"//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib\",\n        \"//test/extensions/filters/common/ext_authz:ext_authz_test_common\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"ext_authz_http_impl_test\",\n    srcs = [\"ext_authz_http_impl_test.cc\"],\n    deps = [\n        \"//source/extensions/filters/common/ext_authz:ext_authz_http_lib\",\n        \"//test/extensions/filters/common/ext_authz:ext_authz_test_common\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"ext_authz_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/extensions/filters/common/ext_authz:ext_authz_interface\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"ext_authz_test_common\",\n    srcs = [\"test_common.cc\"],\n    hdrs = [\"test_common.h\"],\n    deps = [\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib\",\n        \"//test/extensions/filters/common/ext_authz:ext_authz_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/check_request_utils_test.cc",
    "content": "#include \"envoy/service/auth/v3/external_auth.pb.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/common/ext_authz/check_request_utils.h\"\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\nnamespace {\n\nclass CheckRequestUtilsTest : public testing::Test {\npublic:\n  CheckRequestUtilsTest() {\n    addr_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 1111);\n    protocol_ = Envoy::Http::Protocol::Http10;\n    buffer_ = CheckRequestUtilsTest::newTestBuffer(8192);\n    ssl_ = std::make_shared<NiceMock<Envoy::Ssl::MockConnectionInfo>>();\n  };\n\n  void expectBasicHttp() {\n    EXPECT_CALL(callbacks_, connection()).Times(2).WillRepeatedly(Return(&connection_));\n    EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n    EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n    EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(ssl_));\n    EXPECT_CALL(callbacks_, streamId()).Times(1).WillOnce(Return(0));\n    EXPECT_CALL(callbacks_, decodingBuffer()).WillOnce(Return(buffer_.get()));\n    EXPECT_CALL(callbacks_, streamInfo()).Times(1).WillOnce(ReturnRef(req_info_));\n    EXPECT_CALL(req_info_, protocol()).Times(2).WillRepeatedly(ReturnPointee(&protocol_));\n    EXPECT_CALL(req_info_, startTime()).Times(1).WillOnce(Return(SystemTime()));\n  }\n\n  void callHttpCheckAndValidateRequestAttributes(bool include_peer_certificate) {\n    Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-downstream-service-cluster\", \"foo\"},\n                                                   {\":path\", \"/bar\"}};\n    envoy::service::auth::v3::CheckRequest request;\n    Protobuf::Map<std::string, std::string> context_extensions;\n    context_extensions[\"key\"] = \"value\";\n\n    envoy::config::core::v3::Metadata metadata_context;\n    auto metadata_val = MessageUtil::keyValueStruct(\"foo\", \"bar\");\n    (*metadata_context.mutable_filter_metadata())[\"meta.key\"] = metadata_val;\n\n    CheckRequestUtils::createHttpCheck(\n        &callbacks_, request_headers, std::move(context_extensions), std::move(metadata_context),\n        request, /*max_request_bytes=*/0, /*pack_as_bytes=*/false, include_peer_certificate);\n\n    EXPECT_EQ(\"source\", request.attributes().source().principal());\n    EXPECT_EQ(\"destination\", request.attributes().destination().principal());\n    EXPECT_EQ(\"foo\", request.attributes().source().service());\n    EXPECT_EQ(\"value\", request.attributes().context_extensions().at(\"key\"));\n    EXPECT_EQ(\"bar\", request.attributes()\n                         .metadata_context()\n                         .filter_metadata()\n                         .at(\"meta.key\")\n                         .fields()\n                         .at(\"foo\")\n                         .string_value());\n\n    if (include_peer_certificate) {\n      EXPECT_EQ(cert_data_, request.attributes().source().certificate());\n    } else {\n      EXPECT_EQ(0, request.attributes().source().certificate().size());\n    }\n  }\n\n  static Buffer::InstancePtr newTestBuffer(uint64_t size) {\n    auto buffer = std::make_unique<Buffer::OwnedImpl>();\n    while (buffer->length() < size) {\n      auto new_buffer =\n          Buffer::OwnedImpl(\"Lorem ipsum dolor sit amet, consectetuer adipiscing elit.\");\n      buffer->add(new_buffer);\n    }\n    return buffer;\n  }\n\n  Network::Address::InstanceConstSharedPtr addr_;\n  absl::optional<Http::Protocol> protocol_;\n  CheckRequestUtils check_request_generator_;\n  NiceMock<Envoy::Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  NiceMock<Envoy::Network::MockReadFilterCallbacks> net_callbacks_;\n  NiceMock<Envoy::Network::MockConnection> connection_;\n  std::shared_ptr<NiceMock<Envoy::Ssl::MockConnectionInfo>> ssl_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> req_info_;\n  Buffer::InstancePtr buffer_;\n  const std::string cert_data_{\"cert-data\"};\n};\n\n// Verify that createTcpCheck's dependencies are invoked when it's called.\n// Verify that the source certificate is not set by default.\nTEST_F(CheckRequestUtilsTest, BasicTcp) {\n  envoy::service::auth::v3::CheckRequest request;\n  EXPECT_CALL(net_callbacks_, connection()).Times(2).WillRepeatedly(ReturnRef(connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(ssl_));\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n\n  CheckRequestUtils::createTcpCheck(&net_callbacks_, request, false);\n\n  EXPECT_EQ(request.attributes().source().certificate().size(), 0);\n}\n\n// Verify that createTcpCheck's dependencies are invoked when it's called.\n// Verify that createTcpCheck populates the source certificate correctly.\nTEST_F(CheckRequestUtilsTest, TcpPeerCertificate) {\n  envoy::service::auth::v3::CheckRequest request;\n  EXPECT_CALL(net_callbacks_, connection()).Times(2).WillRepeatedly(ReturnRef(connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(ssl_));\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n  EXPECT_CALL(*ssl_, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(cert_data_));\n\n  CheckRequestUtils::createTcpCheck(&net_callbacks_, request, true);\n\n  EXPECT_EQ(cert_data_, request.attributes().source().certificate());\n}\n\n// Verify that createHttpCheck's dependencies are invoked when it's called.\n// Verify that check request object has no request data.\n// Verify that a client supplied EnvoyAuthPartialBody will not affect the\n// CheckRequest call.\nTEST_F(CheckRequestUtilsTest, BasicHttp) {\n  const uint64_t size = 0;\n  envoy::service::auth::v3::CheckRequest request_;\n\n  // A client supplied EnvoyAuthPartialBody header should be ignored.\n  Http::TestRequestHeaderMapImpl request_headers{{Headers::get().EnvoyAuthPartialBody.get(), \"1\"}};\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n  expectBasicHttp();\n  CheckRequestUtils::createHttpCheck(&callbacks_, request_headers,\n                                     Protobuf::Map<std::string, std::string>(),\n                                     envoy::config::core::v3::Metadata(), request_, size,\n                                     /*pack_as_bytes=*/false, /*include_peer_certificate=*/false);\n  ASSERT_EQ(size, request_.attributes().request().http().body().size());\n  EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body());\n  EXPECT_EQ(request_.attributes().request().http().headers().end(),\n            request_.attributes().request().http().headers().find(\n                Headers::get().EnvoyAuthPartialBody.get()));\n  EXPECT_TRUE(request_.attributes().request().has_time());\n}\n\n// Verify that check request object has only a portion of the request data.\nTEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) {\n  const uint64_t size = 4049;\n  Http::TestRequestHeaderMapImpl headers_;\n  envoy::service::auth::v3::CheckRequest request_;\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n  expectBasicHttp();\n  CheckRequestUtils::createHttpCheck(&callbacks_, headers_,\n                                     Protobuf::Map<std::string, std::string>(),\n                                     envoy::config::core::v3::Metadata(), request_, size,\n                                     /*pack_as_bytes=*/false, /*include_peer_certificate=*/false);\n  ASSERT_EQ(size, request_.attributes().request().http().body().size());\n  EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body());\n  EXPECT_EQ(\"true\", request_.attributes().request().http().headers().at(\n                        Headers::get().EnvoyAuthPartialBody.get()));\n}\n\n// Verify that check request object has all the request data.\nTEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) {\n  Http::TestRequestHeaderMapImpl headers_;\n  envoy::service::auth::v3::CheckRequest request_;\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n  expectBasicHttp();\n  CheckRequestUtils::createHttpCheck(\n      &callbacks_, headers_, Protobuf::Map<std::string, std::string>(),\n      envoy::config::core::v3::Metadata(), request_, buffer_->length(), /*pack_as_bytes=*/false,\n      /*include_peer_certificate=*/false);\n  ASSERT_EQ(buffer_->length(), request_.attributes().request().http().body().size());\n  EXPECT_EQ(buffer_->toString().substr(0, buffer_->length()),\n            request_.attributes().request().http().body());\n  EXPECT_EQ(\"false\", request_.attributes().request().http().headers().at(\n                         Headers::get().EnvoyAuthPartialBody.get()));\n}\n\n// Verify that check request object has all the request data and packed as bytes instead of UTF-8\n// string.\nTEST_F(CheckRequestUtilsTest, BasicHttpWithFullBodyPackAsBytes) {\n  Http::TestRequestHeaderMapImpl headers_;\n  envoy::service::auth::v3::CheckRequest request_;\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n\n  // Fill the buffer with non UTF-8 data.\n  uint8_t raw[2] = {0xc0, 0xc0};\n  Buffer::OwnedImpl raw_buffer(raw, 2);\n  buffer_->drain(buffer_->length());\n  buffer_->add(raw_buffer);\n\n  expectBasicHttp();\n\n  // Setting pack_as_bytes as false and a string field with invalid UTF-8 data makes\n  // calling request_.SerializeToString() below to print an error message to stderr. Interestingly,\n  // request_.SerializeToString() still returns \"true\" when it is failed to serialize the data.\n  CheckRequestUtils::createHttpCheck(\n      &callbacks_, headers_, Protobuf::Map<std::string, std::string>(),\n      envoy::config::core::v3::Metadata(), request_, buffer_->length(), /*pack_as_bytes=*/true,\n      /*include_peer_certificate=*/false);\n\n  // TODO(dio): Find a way to test this without using function from testing::internal namespace.\n  testing::internal::CaptureStderr();\n  std::string out;\n  ASSERT_TRUE(request_.SerializeToString(&out));\n  ASSERT_EQ(\"\", testing::internal::GetCapturedStderr());\n\n  // Non UTF-8 data sets raw_body field, instead of body field.\n  ASSERT_EQ(buffer_->length(), request_.attributes().request().http().raw_body().size());\n  ASSERT_EQ(0, request_.attributes().request().http().body().size());\n\n  EXPECT_EQ(buffer_->toString().substr(0, buffer_->length()),\n            request_.attributes().request().http().raw_body());\n  EXPECT_EQ(\"false\", request_.attributes().request().http().headers().at(\n                         Headers::get().EnvoyAuthPartialBody.get()));\n}\n\n// Verify that createHttpCheck extract the proper attributes from the http request into CheckRequest\n// proto object.\n// Verify that the source certificate is not set by default.\nTEST_F(CheckRequestUtilsTest, CheckAttrContextPeer) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-downstream-service-cluster\", \"foo\"},\n                                                 {\":path\", \"/bar\"}};\n  envoy::service::auth::v3::CheckRequest request;\n  EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillRepeatedly(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillRepeatedly(ReturnRef(addr_));\n  EXPECT_CALL(Const(connection_), ssl()).WillRepeatedly(Return(ssl_));\n  EXPECT_CALL(callbacks_, streamId()).WillRepeatedly(Return(0));\n  EXPECT_CALL(callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(callbacks_, decodingBuffer()).Times(1);\n  EXPECT_CALL(req_info_, protocol()).WillRepeatedly(ReturnPointee(&protocol_));\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n\n  callHttpCheckAndValidateRequestAttributes(false);\n}\n\n// Verify that createHttpCheck extract the attributes from the HTTP request into CheckRequest\n// proto object and URI SAN is used as principal if present.\nTEST_F(CheckRequestUtilsTest, CheckAttrContextPeerUriSans) {\n  expectBasicHttp();\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n\n  callHttpCheckAndValidateRequestAttributes(false);\n}\n\n// Verify that createHttpCheck extract the attributes from the HTTP request into CheckRequest\n// proto object and DNS SAN is used as principal if URI SAN is absent.\nTEST_F(CheckRequestUtilsTest, CheckAttrContextPeerDnsSans) {\n  expectBasicHttp();\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{}));\n  EXPECT_CALL(*ssl_, dnsSansPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate()).WillOnce(Return(std::vector<std::string>{}));\n  EXPECT_CALL(*ssl_, dnsSansLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n\n  Protobuf::Map<std::string, std::string> context_extensions;\n  context_extensions[\"key\"] = \"value\";\n\n  callHttpCheckAndValidateRequestAttributes(false);\n}\n\n// Verify that createHttpCheck extract the attributes from the HTTP request into CheckRequest\n// proto object and Subject is used as principal if both URI SAN and DNS SAN are absent.\nTEST_F(CheckRequestUtilsTest, CheckAttrContextSubject) {\n  expectBasicHttp();\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{}));\n  EXPECT_CALL(*ssl_, dnsSansPeerCertificate()).WillOnce(Return(std::vector<std::string>{}));\n  std::string subject_peer = \"source\";\n  EXPECT_CALL(*ssl_, subjectPeerCertificate()).WillOnce(ReturnRef(subject_peer));\n\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate()).WillOnce(Return(std::vector<std::string>{}));\n  EXPECT_CALL(*ssl_, dnsSansLocalCertificate()).WillOnce(Return(std::vector<std::string>{}));\n  std::string subject_local = \"destination\";\n  EXPECT_CALL(*ssl_, subjectLocalCertificate()).WillOnce(ReturnRef(subject_local));\n\n  callHttpCheckAndValidateRequestAttributes(false);\n}\n\n// Verify that the source certificate is populated correctly.\nTEST_F(CheckRequestUtilsTest, CheckAttrContextPeerCertificate) {\n  expectBasicHttp();\n\n  EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector<std::string>{\"source\"}));\n  EXPECT_CALL(*ssl_, uriSanLocalCertificate())\n      .WillOnce(Return(std::vector<std::string>{\"destination\"}));\n  EXPECT_CALL(*ssl_, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(cert_data_));\n\n  callHttpCheckAndValidateRequestAttributes(true);\n}\n\n} // namespace\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc",
    "content": "#include \"envoy/service/auth/v2alpha/external_auth.pb.h\" // for proto link\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/type/v3/http_status.pb.h\"\n\n#include \"common/grpc/common.h\"\n#include \"common/http/headers.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n\n#include \"test/extensions/filters/common/ext_authz/mocks.h\"\n#include \"test/extensions/filters/common/ext_authz/test_common.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::Ref;\nusing testing::Return;\nusing testing::Values;\nusing testing::WhenDynamicCastTo;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nusing Params = std::tuple<envoy::config::core::v3::ApiVersion, bool>;\n\nclass ExtAuthzGrpcClientTest : public testing::TestWithParam<Params> {\npublic:\n  ExtAuthzGrpcClientTest() : async_client_(new Grpc::MockAsyncClient()), timeout_(10) {}\n\n  void initialize(const Params& param) {\n    api_version_ = std::get<0>(param);\n    use_alpha_ = std::get<1>(param);\n    client_ = std::make_unique<GrpcClientImpl>(Grpc::RawAsyncClientPtr{async_client_}, timeout_,\n                                               api_version_, use_alpha_);\n  }\n\n  void expectCallSend(envoy::service::auth::v3::CheckRequest& request) {\n    EXPECT_CALL(*async_client_,\n                sendRaw(_, _, Grpc::ProtoBufferEq(request), Ref(*(client_.get())), _, _))\n        .WillOnce(\n            Invoke([this](absl::string_view service_full_name, absl::string_view method_name,\n                          Buffer::InstancePtr&&, Grpc::RawAsyncRequestCallbacks&, Tracing::Span&,\n                          const Http::AsyncClient::RequestOptions& options) -> Grpc::AsyncRequest* {\n              EXPECT_EQ(TestUtility::getVersionedServiceFullName(\n                            \"envoy.service.auth.{}.Authorization\", api_version_, use_alpha_),\n                        service_full_name);\n              EXPECT_EQ(\"Check\", method_name);\n              if (Runtime::runtimeFeatureEnabled(\n                      \"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\")) {\n                EXPECT_FALSE(options.timeout.has_value());\n              } else {\n                EXPECT_EQ(timeout_->count(), options.timeout->count());\n              }\n              return &async_request_;\n            }));\n  }\n\n  Grpc::MockAsyncClient* async_client_;\n  absl::optional<std::chrono::milliseconds> timeout_;\n  Grpc::MockAsyncRequest async_request_;\n  GrpcClientImplPtr client_;\n  MockRequestCallbacks request_callbacks_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Tracing::MockSpan span_;\n  bool use_alpha_{};\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n  envoy::config::core::v3::ApiVersion api_version_;\n};\n\nINSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest,\n                         Values(Params(envoy::config::core::v3::ApiVersion::AUTO, false),\n                                Params(envoy::config::core::v3::ApiVersion::V2, false),\n                                Params(envoy::config::core::v3::ApiVersion::V2, true),\n                                Params(envoy::config::core::v3::ApiVersion::V3, false)));\n\n// Test the client when an ok response is received.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) {\n  initialize(GetParam());\n\n  auto check_response = std::make_unique<envoy::service::auth::v3::CheckResponse>();\n  auto status = check_response->mutable_status();\n\n  ProtobufWkt::Struct expected_dynamic_metadata;\n  auto* metadata_fields = expected_dynamic_metadata.mutable_fields();\n  (*metadata_fields)[\"foo\"] = ValueUtil::stringValue(\"ok\");\n  (*metadata_fields)[\"bar\"] = ValueUtil::numberValue(1);\n\n  // The expected dynamic metadata is set to the outer check response, hence regardless the\n  // check_response's http_response value (either OkHttpResponse or DeniedHttpResponse) the dynamic\n  // metadata is set to be equal to the check response's dynamic metadata.\n  check_response->mutable_dynamic_metadata()->MergeFrom(expected_dynamic_metadata);\n\n  status->set_code(Grpc::Status::WellKnownGrpcStatus::Ok);\n\n  // This is the expected authz response.\n  auto authz_response = Response{};\n  authz_response.status = CheckStatus::OK;\n\n  authz_response.dynamic_metadata = expected_dynamic_metadata;\n\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(timeout_.value(), _));\n  bool timer_destroyed = false;\n  timer->timer_destroyed_ = &timer_destroyed;\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  Http::TestRequestHeaderMapImpl headers;\n  client_->onCreateInitialMetadata(headers);\n\n  EXPECT_CALL(span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_ok\")));\n  EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo<ResponsePtr&>(\n                                      AuthzResponseNoAttributes(authz_response))));\n  client_->onSuccess(std::move(check_response), span_);\n  // make sure the internal timeout timer is destroyed\n  EXPECT_EQ(timer_destroyed, true);\n}\n\n// Test the client when an ok response is received.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) {\n  initialize(GetParam());\n\n  const std::string empty_body{};\n  const auto expected_headers = TestCommon::makeHeaderValueOption({{\"foo\", \"bar\", false}});\n  auto check_response = TestCommon::makeCheckResponse(\n      Grpc::Status::WellKnownGrpcStatus::Ok, envoy::type::v3::OK, empty_body, expected_headers);\n  auto authz_response =\n      TestCommon::makeAuthzResponse(CheckStatus::OK, Http::Code::OK, empty_body, expected_headers);\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  Http::TestRequestHeaderMapImpl headers;\n  client_->onCreateInitialMetadata(headers);\n\n  EXPECT_CALL(span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_ok\")));\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzOkResponse(authz_response))));\n  client_->onSuccess(std::move(check_response), span_);\n}\n\n// Test the client when a denied response is received.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) {\n  initialize(GetParam());\n\n  auto check_response = std::make_unique<envoy::service::auth::v3::CheckResponse>();\n  auto status = check_response->mutable_status();\n  status->set_code(Grpc::Status::WellKnownGrpcStatus::PermissionDenied);\n  auto authz_response = Response{};\n  authz_response.status = CheckStatus::Denied;\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  Http::TestRequestHeaderMapImpl headers;\n  client_->onCreateInitialMetadata(headers);\n  EXPECT_EQ(nullptr, headers.RequestId());\n  EXPECT_CALL(span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_unauthorized\")));\n  EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo<ResponsePtr&>(\n                                      AuthzResponseNoAttributes(authz_response))));\n\n  client_->onSuccess(std::move(check_response), span_);\n}\n\n// Test the client when a gRPC status code unknown is received from the authorization server.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) {\n  initialize(GetParam());\n\n  auto check_response = std::make_unique<envoy::service::auth::v3::CheckResponse>();\n  auto status = check_response->mutable_status();\n  status->set_code(Grpc::Status::WellKnownGrpcStatus::Unknown);\n  auto authz_response = Response{};\n  authz_response.status = CheckStatus::Denied;\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  Http::TestRequestHeaderMapImpl headers;\n  client_->onCreateInitialMetadata(headers);\n  EXPECT_EQ(nullptr, headers.RequestId());\n  EXPECT_CALL(span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_unauthorized\")));\n  EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo<ResponsePtr&>(\n                                      AuthzResponseNoAttributes(authz_response))));\n\n  client_->onSuccess(std::move(check_response), span_);\n}\n\n// Test the client when a denied response with additional HTTP attributes is received.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) {\n  initialize(GetParam());\n\n  const std::string expected_body{\"test\"};\n  const auto expected_headers =\n      TestCommon::makeHeaderValueOption({{\"foo\", \"bar\", false}, {\"foobar\", \"bar\", true}});\n  auto check_response =\n      TestCommon::makeCheckResponse(Grpc::Status::WellKnownGrpcStatus::PermissionDenied,\n                                    envoy::type::v3::Unauthorized, expected_body, expected_headers);\n  auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::Denied, Http::Code::Unauthorized,\n                                                      expected_body, expected_headers);\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  Http::TestRequestHeaderMapImpl headers;\n  client_->onCreateInitialMetadata(headers);\n  EXPECT_EQ(nullptr, headers.RequestId());\n  EXPECT_CALL(span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_unauthorized\")));\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzDeniedResponse(authz_response))));\n\n  client_->onSuccess(std::move(check_response), span_);\n}\n\n// Test the client when an unknown error occurs.\nTEST_P(ExtAuthzGrpcClientTest, UnknownError) {\n  initialize(GetParam());\n\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(timeout_.value(), _));\n  bool timer_destroyed = false;\n  timer->timer_destroyed_ = &timer_destroyed;\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzErrorResponse(CheckStatus::Error))));\n  client_->onFailure(Grpc::Status::Unknown, \"\", span_);\n\n  // make sure the internal timeout timer is destroyed\n  EXPECT_EQ(timer_destroyed, true);\n}\n\n// Test the client when the request is canceled.\nTEST_P(ExtAuthzGrpcClientTest, CancelledAuthorizationRequest) {\n  initialize(GetParam());\n\n  envoy::service::auth::v3::CheckRequest request;\n  EXPECT_CALL(*async_client_, sendRaw(_, _, _, _, _, _)).WillOnce(Return(&async_request_));\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  EXPECT_CALL(async_request_, cancel());\n  client_->cancel();\n}\n\n// Test the client when the request times out.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationRequestTimeout) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\", \"false\"}});\n  initialize(GetParam());\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzErrorResponse(CheckStatus::Error))));\n  client_->onFailure(Grpc::Status::DeadlineExceeded, \"\", span_);\n}\n\n// Test the client when the request times out on an internal timeout.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationInternalRequestTimeout) {\n  initialize(GetParam());\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\", \"true\"}});\n\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(timeout_.value(), _));\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  EXPECT_CALL(async_request_, cancel());\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzTimedoutResponse())));\n  timer->invokeCallback();\n}\n\n// Test when the client is cancelled with internal timeout.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationInternalRequestTimeoutCancelled) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\", \"true\"}});\n\n  initialize(GetParam());\n\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(timeout_.value(), _));\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  EXPECT_CALL(async_request_, cancel());\n  EXPECT_CALL(request_callbacks_, onComplete_(_)).Times(0);\n  // make sure cancel resets the timer:\n  bool timer_destroyed = false;\n  timer->timer_destroyed_ = &timer_destroyed;\n  client_->cancel();\n  EXPECT_EQ(timer_destroyed, true);\n}\n\n// Test the client when an OK response is received with dynamic metadata in that OK response.\nTEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) {\n  initialize(GetParam());\n\n  auto check_response = std::make_unique<envoy::service::auth::v3::CheckResponse>();\n  auto status = check_response->mutable_status();\n\n  ProtobufWkt::Struct expected_dynamic_metadata;\n  auto* metadata_fields = expected_dynamic_metadata.mutable_fields();\n  (*metadata_fields)[\"original\"] = ValueUtil::stringValue(\"true\");\n  check_response->mutable_dynamic_metadata()->MergeFrom(expected_dynamic_metadata);\n\n  ProtobufWkt::Struct overridden_dynamic_metadata;\n  metadata_fields = overridden_dynamic_metadata.mutable_fields();\n  (*metadata_fields)[\"original\"] = ValueUtil::stringValue(\"false\");\n\n  check_response->mutable_ok_response()->mutable_dynamic_metadata()->MergeFrom(\n      overridden_dynamic_metadata);\n\n  status->set_code(Grpc::Status::WellKnownGrpcStatus::Ok);\n\n  // This is the expected authz response.\n  auto authz_response = Response{};\n  authz_response.status = CheckStatus::OK;\n  authz_response.dynamic_metadata = overridden_dynamic_metadata;\n\n  envoy::service::auth::v3::CheckRequest request;\n  expectCallSend(request);\n  client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(),\n                 stream_info_);\n\n  Http::TestRequestHeaderMapImpl headers;\n  client_->onCreateInitialMetadata(headers);\n\n  EXPECT_CALL(span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_ok\")));\n  EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo<ResponsePtr&>(\n                                      AuthzResponseNoAttributes(authz_response))));\n  client_->onSuccess(std::move(check_response), span_);\n}\n\nclass AsyncClientCacheTest : public testing::Test {\npublic:\n  AsyncClientCacheTest() {\n    client_cache_ = std::make_unique<AsyncClientCache>(async_client_manager_, scope_, tls_);\n  }\n\n  void expectClientCreation() {\n    factory_ = new Grpc::MockAsyncClientFactory;\n    async_client_ = new Grpc::MockAsyncClient;\n    EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true))\n        .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n          EXPECT_CALL(*factory_, create()).WillOnce(Invoke([this] {\n            return Grpc::RawAsyncClientPtr{async_client_};\n          }));\n          return Grpc::AsyncClientFactoryPtr{factory_};\n        }));\n  }\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Grpc::MockAsyncClientManager async_client_manager_;\n  Grpc::MockAsyncClient* async_client_ = nullptr;\n  Grpc::MockAsyncClientFactory* factory_ = nullptr;\n  std::unique_ptr<AsyncClientCache> client_cache_;\n  NiceMock<Stats::MockIsolatedStatsStore> scope_;\n};\n\nTEST_F(AsyncClientCacheTest, Deduplication) {\n  Stats::IsolatedStoreImpl scope;\n  testing::InSequence s;\n\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthz config;\n  config.mutable_grpc_service()->mutable_google_grpc()->set_target_uri(\"dns://test01\");\n  config.mutable_grpc_service()->mutable_google_grpc()->set_credentials_factory_name(\n      \"test_credential01\");\n\n  expectClientCreation();\n  Grpc::RawAsyncClientSharedPtr test_client_01 = client_cache_->getOrCreateAsyncClient(config);\n  // Fetches the existing client.\n  EXPECT_EQ(test_client_01, client_cache_->getOrCreateAsyncClient(config));\n\n  config.mutable_grpc_service()->mutable_google_grpc()->set_credentials_factory_name(\n      \"test_credential02\");\n  expectClientCreation();\n  // Different credentials use different clients.\n  EXPECT_NE(test_client_01, client_cache_->getOrCreateAsyncClient(config));\n  Grpc::RawAsyncClientSharedPtr test_client_02 = client_cache_->getOrCreateAsyncClient(config);\n\n  config.mutable_grpc_service()->mutable_google_grpc()->set_credentials_factory_name(\n      \"test_credential02\");\n  // No creation, fetching the existing one.\n  EXPECT_EQ(test_client_02, client_cache_->getOrCreateAsyncClient(config));\n\n  // Different targets use different clients.\n  config.mutable_grpc_service()->mutable_google_grpc()->set_target_uri(\"dns://test02\");\n  expectClientCreation();\n  EXPECT_NE(test_client_01, client_cache_->getOrCreateAsyncClient(config));\n  EXPECT_NE(test_client_02, client_cache_->getOrCreateAsyncClient(config));\n}\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc",
    "content": "#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz_http_impl.h\"\n\n#include \"test/extensions/filters/common/ext_authz/mocks.h\"\n#include \"test/extensions/filters/common/ext_authz/test_common.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AllOf;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::WhenDynamicCastTo;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\nnamespace {\n\nconstexpr uint32_t REQUEST_TIMEOUT{250};\n\nclass ExtAuthzHttpClientTest : public testing::Test {\npublic:\n  ExtAuthzHttpClientTest() : async_request_{&async_client_} { initialize(EMPTY_STRING); }\n\n  void initialize(const std::string& yaml) {\n    config_ = createConfig(yaml);\n    client_ = std::make_unique<RawHttpClientImpl>(cm_, config_);\n    ON_CALL(cm_, httpAsyncClientForCluster(config_->cluster()))\n        .WillByDefault(ReturnRef(async_client_));\n  }\n\n  ClientConfigSharedPtr createConfig(const std::string& yaml = EMPTY_STRING,\n                                     uint32_t timeout = REQUEST_TIMEOUT,\n                                     const std::string& path_prefix = \"/bar\") {\n    envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config{};\n    if (yaml.empty()) {\n      const std::string default_yaml = R\"EOF(\n        http_service:\n          server_uri:\n            uri: \"ext_authz:9000\"\n            cluster: \"ext_authz\"\n            timeout: 0.25s\n\n          authorization_request:\n            allowed_headers:\n              patterns:\n              - exact: Baz\n                ignore_case: true\n              - prefix: \"X-\"\n                ignore_case: true\n              - safe_regex:\n                  google_re2: {}\n                  regex: regex-foo.?\n            headers_to_add:\n            - key: \"x-authz-header1\"\n              value: \"value\"\n            - key: \"x-authz-header2\"\n              value: \"value\"\n\n          authorization_response:\n            allowed_upstream_headers:\n              patterns:\n              - exact: Bar\n                ignore_case: true\n              - prefix: \"X-\"\n                ignore_case: true\n            allowed_upstream_headers_to_append:\n              patterns:\n              - exact: Alice\n                ignore_case: true\n              - prefix: \"Append-\"\n                ignore_case: true\n            allowed_client_headers:\n              patterns:\n              - exact: Foo\n                ignore_case: true\n              - prefix: \"X-\"\n                ignore_case: true\n        )EOF\";\n      TestUtility::loadFromYaml(default_yaml, proto_config);\n    } else {\n      TestUtility::loadFromYaml(yaml, proto_config);\n    }\n    return std::make_shared<ClientConfig>(proto_config, timeout, path_prefix);\n  }\n\n  Http::RequestMessagePtr sendRequest(absl::node_hash_map<std::string, std::string>&& headers) {\n    envoy::service::auth::v3::CheckRequest request{};\n    auto mutable_headers =\n        request.mutable_attributes()->mutable_request()->mutable_http()->mutable_headers();\n    for (const auto& header : headers) {\n      (*mutable_headers)[header.first] = header.second;\n    }\n\n    Http::RequestMessagePtr message_ptr;\n    EXPECT_CALL(async_client_, send_(_, _, _))\n        .WillOnce(Invoke(\n            [&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks&,\n                const Envoy::Http::AsyncClient::RequestOptions) -> Http::AsyncClient::Request* {\n              message_ptr = std::move(message);\n              return nullptr;\n            }));\n\n    const auto expected_headers = TestCommon::makeHeaderValueOption({{\":status\", \"200\", false}});\n    const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK);\n    auto check_response = TestCommon::makeMessageResponse(expected_headers);\n\n    client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n    EXPECT_CALL(request_callbacks_,\n                onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzOkResponse(authz_response))));\n    client_->onSuccess(async_request_, std::move(check_response));\n\n    return message_ptr;\n  }\n\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<Http::MockAsyncClient> async_client_;\n  NiceMock<Http::MockAsyncClientRequest> async_request_;\n  ClientConfigSharedPtr config_;\n  std::unique_ptr<RawHttpClientImpl> client_;\n  MockRequestCallbacks request_callbacks_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Tracing::MockSpan parent_span_;\n  Tracing::MockSpan child_span_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n};\n\n// Test HTTP client config default values.\nTEST_F(ExtAuthzHttpClientTest, ClientConfig) {\n  const Http::LowerCaseString foo{\"foo\"};\n  const Http::LowerCaseString baz{\"baz\"};\n  const Http::LowerCaseString bar{\"bar\"};\n  const Http::LowerCaseString alice{\"alice\"};\n\n  // Check allowed request headers.\n  EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get()));\n  EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Host.get()));\n  EXPECT_TRUE(\n      config_->requestHeaderMatchers()->matches(Http::CustomHeaders::get().Authorization.get()));\n  EXPECT_FALSE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get()));\n  EXPECT_TRUE(config_->requestHeaderMatchers()->matches(baz.get()));\n\n  // Check allowed client headers.\n  EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Status.get()));\n  EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().ContentLength.get()));\n  EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Path.get()));\n  EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Host.get()));\n  EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().WWWAuthenticate.get()));\n  EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::CustomHeaders::get().Origin.get()));\n  EXPECT_TRUE(config_->clientHeaderMatchers()->matches(foo.get()));\n\n  // Check allowed upstream headers.\n  EXPECT_TRUE(config_->upstreamHeaderMatchers()->matches(bar.get()));\n\n  // Check allowed upstream headers to append.\n  EXPECT_TRUE(config_->upstreamHeaderToAppendMatchers()->matches(alice.get()));\n\n  // Check other attributes.\n  EXPECT_EQ(config_->pathPrefix(), \"/bar\");\n  EXPECT_EQ(config_->cluster(), \"ext_authz\");\n  EXPECT_EQ(config_->tracingName(), \"async ext_authz egress\");\n  EXPECT_EQ(config_->timeout(), std::chrono::milliseconds{250});\n}\n\n// Test default allowed headers in the HTTP client.\nTEST_F(ExtAuthzHttpClientTest, TestDefaultAllowedHeaders) {\n  const std::string yaml = R\"EOF(\n  http_service:\n    server_uri:\n      uri: \"ext_authz:9000\"\n      cluster: \"ext_authz\"\n      timeout: 0.25s\n  failure_mode_allow: true\n  )EOF\";\n\n  initialize(yaml);\n\n  // Check allowed request headers.\n  EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get()));\n  EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Host.get()));\n  EXPECT_TRUE(\n      config_->requestHeaderMatchers()->matches(Http::CustomHeaders::get().Authorization.get()));\n  EXPECT_FALSE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get()));\n\n  // Check allowed client headers.\n  EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().ContentLength.get()));\n  EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Host.get()));\n\n  // Check allowed upstream headers.\n  EXPECT_FALSE(\n      config_->upstreamHeaderMatchers()->matches(Http::Headers::get().ContentLength.get()));\n}\n\n// Verify client response when the authorization server returns a 200 OK and path_prefix is\n// configured.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithPathRewrite) {\n  Http::RequestMessagePtr message_ptr = sendRequest({{\":path\", \"/foo\"}, {\"foo\", \"bar\"}});\n\n  EXPECT_EQ(message_ptr->headers().getPathValue(), \"/bar/foo\");\n}\n\n// Test the client when a request contains Content-Length greater than 0.\nTEST_F(ExtAuthzHttpClientTest, ContentLengthEqualZero) {\n  Http::RequestMessagePtr message_ptr =\n      sendRequest({{Http::Headers::get().ContentLength.get(), std::string{\"47\"}},\n                   {Http::Headers::get().Method.get(), std::string{\"POST\"}}});\n\n  EXPECT_EQ(message_ptr->headers().getContentLengthValue(), \"0\");\n  EXPECT_EQ(message_ptr->headers().getMethodValue(), \"POST\");\n}\n\n// Test the client when a request contains Content-Length greater than 0.\nTEST_F(ExtAuthzHttpClientTest, ContentLengthEqualZeroWithAllowedHeaders) {\n  const std::string yaml = R\"EOF(\n  http_service:\n    server_uri:\n      uri: \"ext_authz:9000\"\n      cluster: \"ext_authz\"\n      timeout: 0.25s\n    authorization_request:\n      allowed_headers:\n        patterns:\n        - exact: content-length\n  failure_mode_allow: true\n  )EOF\";\n\n  initialize(yaml);\n  EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get()));\n  EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get()));\n\n  Http::RequestMessagePtr message_ptr =\n      sendRequest({{Http::Headers::get().ContentLength.get(), std::string{\"47\"}},\n                   {Http::Headers::get().Method.get(), std::string{\"POST\"}}});\n\n  EXPECT_EQ(message_ptr->headers().getContentLengthValue(), \"0\");\n  EXPECT_EQ(message_ptr->headers().getMethodValue(), \"POST\");\n}\n\n// Test the client when a request contains headers in the prefix matchers.\nTEST_F(ExtAuthzHttpClientTest, AllowedRequestHeadersPrefix) {\n  const Http::LowerCaseString regexFood{\"regex-food\"};\n  const Http::LowerCaseString regexFool{\"regex-fool\"};\n  Http::RequestMessagePtr message_ptr =\n      sendRequest({{Http::Headers::get().XContentTypeOptions.get(), \"foobar\"},\n                   {Http::Headers::get().XSquashDebug.get(), \"foo\"},\n                   {Http::Headers::get().ContentType.get(), \"bar\"},\n                   {regexFood.get(), \"food\"},\n                   {regexFool.get(), \"fool\"}});\n\n  EXPECT_EQ(message_ptr->headers().get(Http::Headers::get().ContentType), nullptr);\n  const auto* x_squash = message_ptr->headers().get(Http::Headers::get().XSquashDebug);\n  ASSERT_NE(x_squash, nullptr);\n  EXPECT_EQ(x_squash->value().getStringView(), \"foo\");\n\n  const auto* x_content_type = message_ptr->headers().get(Http::Headers::get().XContentTypeOptions);\n  ASSERT_NE(x_content_type, nullptr);\n  EXPECT_EQ(x_content_type->value().getStringView(), \"foobar\");\n\n  const auto* food = message_ptr->headers().get(regexFood);\n  ASSERT_NE(food, nullptr);\n  EXPECT_EQ(food->value().getStringView(), \"food\");\n\n  const auto* fool = message_ptr->headers().get(regexFool);\n  ASSERT_NE(fool, nullptr);\n  EXPECT_EQ(fool->value().getStringView(), \"fool\");\n}\n\n// Verify client response when authorization server returns a 200 OK.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationOk) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(_, _));\n  bool timer_destroyed = false;\n  timer->timer_destroyed_ = &timer_destroyed;\n\n  const auto expected_headers = TestCommon::makeHeaderValueOption({{\":status\", \"200\", false}});\n  const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK);\n  auto check_response = TestCommon::makeMessageResponse(expected_headers);\n  envoy::service::auth::v3::CheckRequest request;\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzOkResponse(authz_response))));\n  client_->onSuccess(async_request_, std::move(check_response));\n  // make sure the internal timeout timer is destroyed\n  EXPECT_EQ(timer_destroyed, true);\n}\n\nusing HeaderValuePair = std::pair<const Http::LowerCaseString, const std::string>;\n\n// Verify client response headers when authorization_headers_to_add is configured.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) {\n  const auto expected_headers = TestCommon::makeHeaderValueOption({{\":status\", \"200\", false}});\n  const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK);\n  auto check_response = TestCommon::makeMessageResponse(expected_headers);\n  envoy::service::auth::v3::CheckRequest request;\n  auto mutable_headers =\n      request.mutable_attributes()->mutable_request()->mutable_http()->mutable_headers();\n  (*mutable_headers)[std::string{\":x-authz-header2\"}] = std::string{\"forged-value\"};\n  // Expect that header1 will be added and header2 correctly overwritten. Due to this behavior, the\n  // append property of header value option should always be false.\n  const HeaderValuePair header1{\"x-authz-header1\", \"value\"};\n  const HeaderValuePair header2{\"x-authz-header2\", \"value\"};\n  EXPECT_CALL(async_client_,\n              send_(AllOf(ContainsPairAsHeader(header1), ContainsPairAsHeader(header2)), _, _));\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  // Check for child span tagging when the request is allowed.\n  EXPECT_CALL(child_span_, setTag(Eq(\"ext_authz_http_status\"), Eq(\"OK\")));\n  EXPECT_CALL(child_span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_ok\")));\n  client_->onBeforeFinalizeUpstreamSpan(child_span_, &check_response->headers());\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzOkResponse(authz_response))));\n  client_->onSuccess(async_request_, std::move(check_response));\n}\n\n// Verify client response headers when authorization_headers_to_add is configured with value from\n// stream info.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInfo) {\n  const std::string yaml = R\"EOF(\n  http_service:\n    server_uri:\n      uri: \"ext_authz:9000\"\n      cluster: \"ext_authz\"\n      timeout: 0.25s\n    authorization_request:\n      headers_to_add:\n      - key: \"x-authz-header1\"\n        value: \"%REQ(x-request-id)%\"\n  failure_mode_allow: true\n  )EOF\";\n\n  initialize(yaml);\n\n  const auto expected_headers = TestCommon::makeHeaderValueOption({{\":status\", \"200\", false}});\n  const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK);\n  auto check_response = TestCommon::makeMessageResponse(expected_headers);\n\n  const HeaderValuePair expected_header{\"x-authz-header1\", \"123\"};\n  EXPECT_CALL(async_client_, send_(ContainsPairAsHeader(expected_header), _, _));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  request_headers.addCopy(Http::LowerCaseString(std::string(\"x-request-id\")),\n                          expected_header.second);\n\n  StreamInfo::MockStreamInfo stream_info;\n  EXPECT_CALL(stream_info, getRequestHeaders()).WillOnce(Return(&request_headers));\n\n  envoy::service::auth::v3::CheckRequest request;\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info);\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzOkResponse(authz_response))));\n  client_->onSuccess(async_request_, std::move(check_response));\n}\n\n// Verify client response headers when allow_upstream_headers is configured.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) {\n  const std::string empty_body{};\n  const auto expected_headers =\n      TestCommon::makeHeaderValueOption({{\"x-baz\", \"foo\", false}, {\"bar\", \"foo\", false}});\n  const auto authz_response =\n      TestCommon::makeAuthzResponse(CheckStatus::OK, Http::Code::OK, empty_body, expected_headers);\n\n  envoy::service::auth::v3::CheckRequest request;\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzOkResponse(authz_response))));\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  const auto check_response_headers =\n      TestCommon::makeHeaderValueOption({{\":status\", \"200\", false},\n                                         {\":path\", \"/bar\", false},\n                                         {\":method\", \"post\", false},\n                                         {\"content-length\", \"post\", false},\n                                         {\"bar\", \"foo\", false},\n                                         {\"x-baz\", \"foo\", false},\n                                         {\"foobar\", \"foo\", false}});\n\n  auto message_response = TestCommon::makeMessageResponse(check_response_headers);\n  client_->onSuccess(async_request_, std::move(message_response));\n}\n\n// Verify headers present in x-envoy-auth-headers-to-remove make it into the\n// Response correctly.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithHeadersToRemove) {\n  envoy::service::auth::v3::CheckRequest request;\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  // When we call onSuccess() at the bottom of the test we expect that all the\n  // headers-to-remove in that http response to have been correctly extracted\n  // and inserted into the authz Response just below.\n  Response authz_response;\n  authz_response.status = CheckStatus::OK;\n  authz_response.headers_to_remove.emplace_back(Http::LowerCaseString{\"remove-me\"});\n  authz_response.headers_to_remove.emplace_back(Http::LowerCaseString{\"remove-me-too\"});\n  authz_response.headers_to_remove.emplace_back(Http::LowerCaseString{\"remove-me-also\"});\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzOkResponse(authz_response))));\n\n  const HeaderValueOptionVector http_response_headers = TestCommon::makeHeaderValueOption({\n      {\":status\", \"200\", false},\n      {\"x-envoy-auth-headers-to-remove\", \" ,remove-me,, ,  remove-me-too , \", false},\n      {\"x-envoy-auth-headers-to-remove\", \" remove-me-also \", false},\n  });\n  Http::ResponseMessagePtr http_response = TestCommon::makeMessageResponse(http_response_headers);\n  client_->onSuccess(async_request_, std::move(http_response));\n}\n\n// Test the client when a denied response is received.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) {\n  const auto expected_headers = TestCommon::makeHeaderValueOption({{\":status\", \"403\", false}});\n  const auto authz_response = TestCommon::makeAuthzResponse(\n      CheckStatus::Denied, Http::Code::Forbidden, EMPTY_STRING, expected_headers);\n  auto check_response = TestCommon::makeMessageResponse(expected_headers);\n\n  envoy::service::auth::v3::CheckRequest request;\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  // Check for child span tagging when the request is denied.\n  EXPECT_CALL(child_span_, setTag(Eq(\"ext_authz_http_status\"), Eq(\"Forbidden\")));\n  EXPECT_CALL(child_span_, setTag(Eq(\"ext_authz_status\"), Eq(\"ext_authz_unauthorized\")));\n  client_->onBeforeFinalizeUpstreamSpan(child_span_, &check_response->headers());\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzDeniedResponse(authz_response))));\n  client_->onSuccess(async_request_, TestCommon::makeMessageResponse(expected_headers));\n}\n\n// Verify client response headers and body when the authorization server denies the request.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) {\n  const auto expected_body = std::string{\"test\"};\n  const auto expected_headers = TestCommon::makeHeaderValueOption(\n      {{\":status\", \"401\", false}, {\"foo\", \"bar\", false}, {\"x-foobar\", \"bar\", false}});\n  const auto authz_response = TestCommon::makeAuthzResponse(\n      CheckStatus::Denied, Http::Code::Unauthorized, expected_body, expected_headers);\n\n  envoy::service::auth::v3::CheckRequest request;\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzDeniedResponse(authz_response))));\n  client_->onSuccess(async_request_,\n                     TestCommon::makeMessageResponse(expected_headers, expected_body));\n}\n\n// Verify client response headers when the authorization server denies the request and\n// allowed_client_headers is configured.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) {\n  const auto expected_body = std::string{\"test\"};\n  const auto authz_response = TestCommon::makeAuthzResponse(\n      CheckStatus::Denied, Http::Code::Unauthorized, expected_body,\n      TestCommon::makeHeaderValueOption(\n          {{\"x-foo\", \"bar\", false}, {\":status\", \"401\", false}, {\"foo\", \"bar\", false}}));\n\n  envoy::service::auth::v3::CheckRequest request;\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzDeniedResponse(authz_response))));\n  const auto check_response_headers = TestCommon::makeHeaderValueOption({{\":method\", \"post\", false},\n                                                                         {\"x-foo\", \"bar\", false},\n                                                                         {\":status\", \"401\", false},\n                                                                         {\"foo\", \"bar\", false}});\n  client_->onSuccess(async_request_,\n                     TestCommon::makeMessageResponse(check_response_headers, expected_body));\n}\n\n// Test the client when an unknown error occurs.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) {\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(_, _));\n  bool timer_destroyed = false;\n  timer->timer_destroyed_ = &timer_destroyed;\n\n  envoy::service::auth::v3::CheckRequest request;\n\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzErrorResponse(CheckStatus::Error))));\n  client_->onFailure(async_request_, Http::AsyncClient::FailureReason::Reset);\n  // make sure the internal timeout timer is destroyed\n  // EXPECT_EQ(timer_destroyed, true);\n}\n\n// Test the client when a call to authorization server returns a 5xx error status.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) {\n  Http::ResponseMessagePtr check_response(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}}));\n  envoy::service::auth::v3::CheckRequest request;\n\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzErrorResponse(CheckStatus::Error))));\n  client_->onSuccess(async_request_, std::move(check_response));\n}\n\n// Test the client when the request is canceled.\nTEST_F(ExtAuthzHttpClientTest, CancelledAuthorizationRequest) {\n  envoy::service::auth::v3::CheckRequest request;\n\n  EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_));\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  EXPECT_CALL(async_request_, cancel());\n  client_->cancel();\n}\n\n// Test the client when the request times out on an internal timeout.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationInternalRequestTimeout) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\", \"true\"}});\n\n  initialize(\"\");\n  envoy::service::auth::v3::CheckRequest request;\n\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(REQUEST_TIMEOUT), _));\n\n  EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_));\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  EXPECT_CALL(async_request_, cancel());\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzTimedoutResponse())));\n  timer->invokeCallback();\n}\n\n// Test when the client is cancelled with internal timeout.\nTEST_F(ExtAuthzHttpClientTest, AuthorizationInternalRequestTimeoutCancelled) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\", \"true\"}});\n\n  initialize(\"\");\n  envoy::service::auth::v3::CheckRequest request;\n\n  NiceMock<Event::MockTimer>* timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(REQUEST_TIMEOUT), _));\n\n  EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_));\n  client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_);\n\n  // make sure cancel resets the timer:\n  EXPECT_CALL(async_request_, cancel());\n  bool timer_destroyed = false;\n  timer->timer_destroyed_ = &timer_destroyed;\n  client_->cancel();\n  EXPECT_EQ(timer_destroyed, true);\n}\n\n// Test the client when the configured cluster is missing/removed.\nTEST_F(ExtAuthzHttpClientTest, NoCluster) {\n  InSequence s;\n\n  EXPECT_CALL(cm_, get(Eq(\"ext_authz\"))).WillOnce(Return(nullptr));\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(\"ext_authz\")).Times(0);\n  EXPECT_CALL(request_callbacks_,\n              onComplete_(WhenDynamicCastTo<ResponsePtr&>(AuthzErrorResponse(CheckStatus::Error))));\n  client_->check(request_callbacks_, dispatcher_, envoy::service::auth::v3::CheckRequest{},\n                 parent_span_, stream_info_);\n}\n\n} // namespace\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/mocks.cc",
    "content": "#include \"mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nMockClient::MockClient() = default;\nMockClient::~MockClient() = default;\n\nMockRequestCallbacks::MockRequestCallbacks() = default;\nMockRequestCallbacks::~MockRequestCallbacks() = default;\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/mocks.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nclass MockClient : public Client {\npublic:\n  MockClient();\n  ~MockClient() override;\n\n  // ExtAuthz::Client\n  MOCK_METHOD(void, cancel, ());\n  MOCK_METHOD(void, check,\n              (RequestCallbacks & callbacks, Event::Dispatcher& dispatcher,\n               const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span,\n               const StreamInfo::StreamInfo& stream_info));\n};\n\nclass MockRequestCallbacks : public RequestCallbacks {\npublic:\n  MockRequestCallbacks();\n  ~MockRequestCallbacks() override;\n\n  void onComplete(ResponsePtr&& response) override { onComplete_(response); }\n\n  MOCK_METHOD(void, onComplete_, (ResponsePtr & response));\n};\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/test_common.cc",
    "content": "#include \"test/extensions/filters/common/ext_authz/test_common.h\"\n\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/type/v3/http_status.pb.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nCheckResponsePtr TestCommon::makeCheckResponse(Grpc::Status::GrpcStatus response_status,\n                                               envoy::type::v3::StatusCode http_status_code,\n                                               const std::string& body,\n                                               const HeaderValueOptionVector& headers) {\n  auto response = std::make_unique<envoy::service::auth::v3::CheckResponse>();\n  auto status = response->mutable_status();\n  status->set_code(response_status);\n\n  if (response_status != Grpc::Status::WellKnownGrpcStatus::Ok) {\n    const auto denied_response = response->mutable_denied_response();\n    if (!body.empty()) {\n      denied_response->set_body(body);\n    }\n\n    auto status_code = denied_response->mutable_status();\n    status_code->set_code(http_status_code);\n\n    auto denied_response_headers = denied_response->mutable_headers();\n    if (!headers.empty()) {\n      for (const auto& header : headers) {\n        auto* item = denied_response_headers->Add();\n        item->CopyFrom(header);\n      }\n    }\n  } else {\n    if (!headers.empty()) {\n      const auto ok_response_headers = response->mutable_ok_response()->mutable_headers();\n      for (const auto& header : headers) {\n        auto* item = ok_response_headers->Add();\n        item->CopyFrom(header);\n      }\n    }\n  }\n  return response;\n}\n\nResponse TestCommon::makeAuthzResponse(CheckStatus status, Http::Code status_code,\n                                       const std::string& body,\n                                       const HeaderValueOptionVector& headers) {\n  auto authz_response = Response{};\n  authz_response.status = status;\n  authz_response.status_code = status_code;\n  if (!body.empty()) {\n    authz_response.body = body;\n  }\n  if (!headers.empty()) {\n    for (auto& header : headers) {\n      if (header.append().value()) {\n        authz_response.headers_to_append.emplace_back(Http::LowerCaseString(header.header().key()),\n                                                      header.header().value());\n      } else {\n        authz_response.headers_to_set.emplace_back(Http::LowerCaseString(header.header().key()),\n                                                   header.header().value());\n      }\n    }\n  }\n  return authz_response;\n}\n\nHeaderValueOptionVector TestCommon::makeHeaderValueOption(KeyValueOptionVector&& headers) {\n  HeaderValueOptionVector header_option_vector{};\n  for (const auto& header : headers) {\n    envoy::config::core::v3::HeaderValueOption header_value_option;\n    auto* mutable_header = header_value_option.mutable_header();\n    mutable_header->set_key(header.key);\n    mutable_header->set_value(header.value);\n    header_value_option.mutable_append()->set_value(header.append);\n    header_option_vector.push_back(header_value_option);\n  }\n  return header_option_vector;\n}\n\nHttp::ResponseMessagePtr TestCommon::makeMessageResponse(const HeaderValueOptionVector& headers,\n                                                         const std::string& body) {\n  Http::ResponseMessagePtr response(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{}}));\n  for (auto& header : headers) {\n    response->headers().addCopy(Http::LowerCaseString(header.header().key()),\n                                header.header().value());\n  }\n  response->body().add(body);\n  return response;\n};\n\nbool TestCommon::compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs) {\n  return std::set<std::pair<Http::LowerCaseString, std::string>>(lhs.begin(), lhs.end()) ==\n         std::set<std::pair<Http::LowerCaseString, std::string>>(rhs.begin(), rhs.end());\n}\n\nbool TestCommon::compareVectorOfHeaderName(const std::vector<Http::LowerCaseString>& lhs,\n                                           const std::vector<Http::LowerCaseString>& rhs) {\n  return std::set<Http::LowerCaseString>(lhs.begin(), lhs.end()) ==\n         std::set<Http::LowerCaseString>(rhs.begin(), rhs.end());\n}\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ext_authz/test_common.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/type/v3/http_status.pb.h\"\n\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/common/ext_authz/ext_authz_grpc_impl.h\"\n\n#include \"test/extensions/filters/common/ext_authz/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace ExtAuthz {\n\nstruct KeyValueOption {\n  std::string key;\n  std::string value;\n  bool append;\n};\n\nusing KeyValueOptionVector = std::vector<KeyValueOption>;\nusing HeaderValueOptionVector = std::vector<envoy::config::core::v3::HeaderValueOption>;\nusing CheckResponsePtr = std::unique_ptr<envoy::service::auth::v3::CheckResponse>;\n\nclass TestCommon {\npublic:\n  static Http::ResponseMessagePtr makeMessageResponse(const HeaderValueOptionVector& headers,\n                                                      const std::string& body = std::string{});\n\n  static CheckResponsePtr makeCheckResponse(\n      Grpc::Status::GrpcStatus response_status = Grpc::Status::WellKnownGrpcStatus::Ok,\n      envoy::type::v3::StatusCode http_status_code = envoy::type::v3::OK,\n      const std::string& body = std::string{},\n      const HeaderValueOptionVector& headers = HeaderValueOptionVector{});\n\n  static Response\n  makeAuthzResponse(CheckStatus status, Http::Code status_code = Http::Code::OK,\n                    const std::string& body = std::string{},\n                    const HeaderValueOptionVector& headers = HeaderValueOptionVector{});\n\n  static HeaderValueOptionVector makeHeaderValueOption(KeyValueOptionVector&& headers);\n\n  static bool compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs);\n  static bool compareVectorOfHeaderName(const std::vector<Http::LowerCaseString>& lhs,\n                                        const std::vector<Http::LowerCaseString>& rhs);\n};\n\nMATCHER_P(AuthzErrorResponse, status, \"\") {\n  // These fields should be always empty when the status is an error.\n  if (!arg->headers_to_add.empty() || !arg->headers_to_append.empty() || !arg->body.empty()) {\n    return false;\n  }\n  // HTTP status code should be always set to Forbidden.\n  if (arg->status_code != Http::Code::Forbidden) {\n    return false;\n  }\n  return arg->status == status;\n}\n\nMATCHER(AuthzTimedoutResponse, \"\") {\n  // These fields should be always empty when the status is a timeout error.\n  if (!arg->headers_to_add.empty() || !arg->headers_to_append.empty() || !arg->body.empty()) {\n    return false;\n  }\n  // HTTP status code should be always set to Forbidden.\n  if (arg->status_code != Http::Code::Forbidden) {\n    return false;\n  }\n  return arg->status == CheckStatus::Error && arg->error_kind == ErrorKind::Timedout;\n}\n\nMATCHER_P(AuthzResponseNoAttributes, response, \"\") {\n  const bool equal_status = arg->status == response.status;\n  const bool equal_metadata =\n      TestUtility::protoEqual(arg->dynamic_metadata, response.dynamic_metadata);\n  if (!equal_metadata) {\n    *result_listener << \"\\n\"\n                     << \"==================Expected response dynamic metadata:==================\\n\"\n                     << response.dynamic_metadata.DebugString()\n                     << \"------------------is not equal to actual dynamic metadata:-------------\\n\"\n                     << arg->dynamic_metadata.DebugString()\n                     << \"=======================================================================\\n\";\n  }\n  return equal_status && equal_metadata;\n}\n\nMATCHER_P(AuthzDeniedResponse, response, \"\") {\n  if (arg->status != response.status) {\n    return false;\n  }\n  if (arg->status_code != response.status_code) {\n    return false;\n  }\n  if (arg->body.compare(response.body)) {\n    return false;\n  }\n  // Compare headers_to_add.\n  return TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add);\n}\n\nMATCHER_P(AuthzOkResponse, response, \"\") {\n  if (arg->status != response.status) {\n    return false;\n  }\n  // Compare headers_to_append.\n  if (!TestCommon::compareHeaderVector(response.headers_to_append, arg->headers_to_append)) {\n    return false;\n  }\n\n  // Compare headers_to_add.\n  if (!TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add)) {\n    return false;\n  }\n\n  return TestCommon::compareVectorOfHeaderName(response.headers_to_remove, arg->headers_to_remove);\n}\n\nMATCHER_P(ContainsPairAsHeader, pair, \"\") {\n  return arg->headers().get(pair.first)->value().getStringView() == pair.second;\n}\n\n} // namespace ExtAuthz\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/fault/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"fault_config_test\",\n    srcs = [\"fault_config_test.cc\"],\n    deps = [\n        \"//source/extensions/filters/common/fault:fault_config_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/common/fault/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/fault/fault_config_test.cc",
    "content": "#include \"envoy/extensions/filters/common/fault/v3/fault.pb.h\"\n\n#include \"extensions/filters/common/fault/fault_config.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Fault {\nnamespace {\n\nTEST(FaultConfigTest, FaultAbortHeaderConfig) {\n  envoy::extensions::filters::http::fault::v3::FaultAbort proto_config;\n  proto_config.mutable_header_abort();\n  FaultAbortConfig config(proto_config);\n\n  // Header with bad data.\n  Http::TestRequestHeaderMapImpl bad_headers{{\"x-envoy-fault-abort-request\", \"abc\"}};\n  EXPECT_EQ(absl::nullopt, config.httpStatusCode(&bad_headers));\n\n  // Out of range header - value too low.\n  Http::TestRequestHeaderMapImpl too_low_headers{{\"x-envoy-fault-abort-request\", \"199\"}};\n  EXPECT_EQ(absl::nullopt, config.httpStatusCode(&too_low_headers));\n\n  // Out of range header - value too high.\n  Http::TestRequestHeaderMapImpl too_high_headers{{\"x-envoy-fault-abort-request\", \"600\"}};\n  EXPECT_EQ(absl::nullopt, config.httpStatusCode(&too_high_headers));\n\n  // Valid header.\n  Http::TestRequestHeaderMapImpl good_headers{{\"x-envoy-fault-abort-request\", \"401\"}};\n  EXPECT_EQ(Http::Code::Unauthorized, config.httpStatusCode(&good_headers));\n}\n\nTEST(FaultConfigTest, FaultAbortGrpcHeaderConfig) {\n  envoy::extensions::filters::http::fault::v3::FaultAbort proto_config;\n  proto_config.mutable_header_abort();\n  FaultAbortConfig config(proto_config);\n\n  // Header with bad data.\n  Http::TestRequestHeaderMapImpl bad_headers{{\"x-envoy-fault-abort-grpc-request\", \"abc\"}};\n  EXPECT_EQ(absl::nullopt, config.grpcStatusCode(&bad_headers));\n\n  // Out of range header - value too low.\n  Http::TestRequestHeaderMapImpl too_low_headers{{\"x-envoy-fault-abort-grpc-request\", \"-1\"}};\n  EXPECT_EQ(absl::nullopt, config.grpcStatusCode(&too_low_headers));\n\n  // Valid header - with well-defined gRPC status code in [0,16] range.\n  Http::TestRequestHeaderMapImpl good_headers{{\"x-envoy-fault-abort-grpc-request\", \"5\"}};\n  EXPECT_EQ(Grpc::Status::NotFound, config.grpcStatusCode(&good_headers));\n\n  // Valid header - with not well-defined gRPC status code (> 16).\n  Http::TestRequestHeaderMapImpl too_high_headers{{\"x-envoy-fault-abort-grpc-request\", \"100\"}};\n  EXPECT_EQ(100, config.grpcStatusCode(&too_high_headers));\n}\n\nTEST(FaultConfigTest, FaultAbortPercentageHeaderConfig) {\n  envoy::extensions::filters::http::fault::v3::FaultAbort proto_config;\n  proto_config.mutable_header_abort();\n  proto_config.mutable_percentage()->set_numerator(80);\n  proto_config.mutable_percentage()->set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  FaultAbortConfig config(proto_config);\n\n  // Header with bad data - fallback to proto config.\n  Http::TestRequestHeaderMapImpl bad_headers{{\"x-envoy-fault-abort-request-percentage\", \"abc\"}};\n  const auto bad_headers_percentage = config.percentage(&bad_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), bad_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), bad_headers_percentage.denominator());\n\n  // Out of range header, value too low - fallback to proto config.\n  Http::TestRequestHeaderMapImpl too_low_headers{{\"x-envoy-fault-abort-request-percentage\", \"-1\"}};\n  const auto too_low_headers_percentage = config.percentage(&too_low_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), too_low_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), too_low_headers_percentage.denominator());\n\n  // Valid header with value greater than the value of the numerator of default percentage - use\n  // proto config.\n  Http::TestRequestHeaderMapImpl good_headers{{\"x-envoy-fault-abort-request-percentage\", \"90\"}};\n  const auto good_headers_percentage = config.percentage(&good_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), good_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), good_headers_percentage.denominator());\n\n  // Valid header with value lesser than the value of the numerator of default percentage.\n  Http::TestRequestHeaderMapImpl greater_numerator_headers{\n      {\"x-envoy-fault-abort-request-percentage\", \"60\"}};\n  const auto greater_numerator_headers_percentage = config.percentage(&greater_numerator_headers);\n  EXPECT_EQ(60, greater_numerator_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(),\n            greater_numerator_headers_percentage.denominator());\n}\n\nTEST(FaultConfigTest, FaultDelayHeaderConfig) {\n  envoy::extensions::filters::common::fault::v3::FaultDelay proto_config;\n  proto_config.mutable_header_delay();\n  FaultDelayConfig config(proto_config);\n\n  // Header with bad data.\n  Http::TestRequestHeaderMapImpl bad_headers{{\"x-envoy-fault-delay-request\", \"abc\"}};\n  EXPECT_EQ(absl::nullopt, config.duration(&bad_headers));\n\n  // Valid header.\n  Http::TestRequestHeaderMapImpl good_headers{{\"x-envoy-fault-delay-request\", \"123\"}};\n  EXPECT_EQ(std::chrono::milliseconds(123), config.duration(&good_headers).value());\n}\n\nTEST(FaultConfigTest, FaultDelayPercentageHeaderConfig) {\n  envoy::extensions::filters::common::fault::v3::FaultDelay proto_config;\n  proto_config.mutable_header_delay();\n  proto_config.mutable_percentage()->set_numerator(80);\n  proto_config.mutable_percentage()->set_denominator(\n      envoy::type::v3::FractionalPercent::TEN_THOUSAND);\n  FaultDelayConfig config(proto_config);\n\n  // Header with bad data - fallback to proto config.\n  Http::TestRequestHeaderMapImpl bad_headers{{\"x-envoy-fault-delay-request-percentage\", \"abc\"}};\n  const auto bad_headers_percentage = config.percentage(&bad_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), bad_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), bad_headers_percentage.denominator());\n\n  // Out of range header, value too low - fallback to proto config.\n  Http::TestRequestHeaderMapImpl too_low_headers{{\"x-envoy-fault-delay-request-percentage\", \"-1\"}};\n  const auto too_low_headers_percentage = config.percentage(&too_low_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), too_low_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), too_low_headers_percentage.denominator());\n\n  // Valid header with value greater than the value of the numerator of default percentage - use\n  // proto config.\n  Http::TestRequestHeaderMapImpl good_headers{{\"x-envoy-fault-delay-request-percentage\", \"90\"}};\n  const auto good_headers_percentage = config.percentage(&good_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), good_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), good_headers_percentage.denominator());\n\n  // Valid header with value lesser than the value of the numerator of default percentage.\n  Http::TestRequestHeaderMapImpl greater_numerator_headers{\n      {\"x-envoy-fault-delay-request-percentage\", \"60\"}};\n  const auto greater_numerator_headers_percentage = config.percentage(&greater_numerator_headers);\n  EXPECT_EQ(60, greater_numerator_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(),\n            greater_numerator_headers_percentage.denominator());\n}\n\nTEST(FaultConfigTest, FaultRateLimitHeaderConfig) {\n  envoy::extensions::filters::common::fault::v3::FaultRateLimit proto_config;\n  proto_config.mutable_header_limit();\n  FaultRateLimitConfig config(proto_config);\n\n  // Header with bad data.\n  Http::TestRequestHeaderMapImpl bad_headers{{\"x-envoy-fault-throughput-response\", \"abc\"}};\n  EXPECT_EQ(absl::nullopt, config.rateKbps(&bad_headers));\n\n  // Header with zero.\n  Http::TestRequestHeaderMapImpl zero_headers{{\"x-envoy-fault-throughput-response\", \"0\"}};\n  EXPECT_EQ(absl::nullopt, config.rateKbps(&zero_headers));\n\n  // Valid header.\n  Http::TestRequestHeaderMapImpl good_headers{{\"x-envoy-fault-throughput-response\", \"123\"}};\n  EXPECT_EQ(123UL, config.rateKbps(&good_headers).value());\n}\n\nTEST(FaultConfigTest, FaultRateLimitPercentageHeaderConfig) {\n  envoy::extensions::filters::common::fault::v3::FaultRateLimit proto_config;\n  proto_config.mutable_header_limit();\n  proto_config.mutable_percentage()->set_numerator(80);\n  proto_config.mutable_percentage()->set_denominator(envoy::type::v3::FractionalPercent::MILLION);\n  FaultRateLimitConfig config(proto_config);\n\n  // Header with bad data - fallback to proto config.\n  Http::TestRequestHeaderMapImpl bad_headers{\n      {\"x-envoy-fault-throughput-response-percentage\", \"abc\"}};\n  const auto bad_headers_percentage = config.percentage(&bad_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), bad_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), bad_headers_percentage.denominator());\n\n  // Out of range header, value too low - fallback to proto config.\n  Http::TestRequestHeaderMapImpl too_low_headers{\n      {\"x-envoy-fault-throughput-response-percentage\", \"-1\"}};\n  const auto too_low_headers_percentage = config.percentage(&too_low_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), too_low_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), too_low_headers_percentage.denominator());\n\n  // Valid header with value greater than the value of the numerator of default percentage - use\n  // proto config.\n  Http::TestRequestHeaderMapImpl good_headers{\n      {\"x-envoy-fault-throughput-response-percentage\", \"90\"}};\n  const auto good_headers_percentage = config.percentage(&good_headers);\n  EXPECT_EQ(proto_config.percentage().numerator(), good_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(), good_headers_percentage.denominator());\n\n  // Valid header with value lesser than the value of the numerator of default percentage.\n  Http::TestRequestHeaderMapImpl greater_numerator_headers{\n      {\"x-envoy-fault-throughput-response-percentage\", \"60\"}};\n  const auto greater_numerator_headers_percentage = config.percentage(&greater_numerator_headers);\n  EXPECT_EQ(60, greater_numerator_headers_percentage.numerator());\n  EXPECT_EQ(proto_config.percentage().denominator(),\n            greater_numerator_headers_percentage.denominator());\n}\n\n} // namespace\n} // namespace Fault\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/local_ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"local_ratelimit_test\",\n    srcs = [\"local_ratelimit_test.cc\"],\n    deps = [\n        \"//source/extensions/filters/common/local_ratelimit:local_ratelimit_lib\",\n        \"//test/mocks/event:event_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc",
    "content": "#include \"extensions/filters/common/local_ratelimit/local_ratelimit_impl.h\"\n\n#include \"test/mocks/event/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace LocalRateLimit {\n\nclass LocalRateLimiterImplTest : public testing::Test {\npublic:\n  void initialize(const std::chrono::milliseconds fill_interval, const uint32_t max_tokens,\n                  const uint32_t tokens_per_fill) {\n\n    fill_timer_ = new Event::MockTimer(&dispatcher_);\n    EXPECT_CALL(*fill_timer_, enableTimer(_, nullptr));\n    EXPECT_CALL(*fill_timer_, disableTimer());\n\n    rate_limiter_ = std::make_shared<LocalRateLimiterImpl>(fill_interval, max_tokens,\n                                                           tokens_per_fill, dispatcher_);\n  }\n\n  Thread::ThreadSynchronizer& synchronizer() { return rate_limiter_->synchronizer_; }\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Event::MockTimer* fill_timer_{};\n  std::shared_ptr<LocalRateLimiterImpl> rate_limiter_;\n};\n\n// Make sure we fail with a fill rate this is too fast.\nTEST_F(LocalRateLimiterImplTest, TooFastFillRate) {\n  EXPECT_THROW_WITH_MESSAGE(\n      LocalRateLimiterImpl(std::chrono::milliseconds(49), 100, 1, dispatcher_), EnvoyException,\n      \"local rate limit token bucket fill timer must be >= 50ms\");\n}\n\n// Verify various token bucket CAS edge cases.\nTEST_F(LocalRateLimiterImplTest, CasEdgeCases) {\n  // This tests the case in which an allowed check races with the fill timer.\n  {\n    initialize(std::chrono::milliseconds(50), 1, 1);\n\n    synchronizer().enable();\n\n    // Start a thread and start the fill callback. This will wait pre-CAS.\n    synchronizer().waitOn(\"on_fill_timer_pre_cas\");\n    std::thread t1([&] {\n      EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr));\n      fill_timer_->invokeCallback();\n    });\n    // Wait until the thread is actually waiting.\n    synchronizer().barrierOn(\"on_fill_timer_pre_cas\");\n\n    // This should succeed.\n    EXPECT_TRUE(rate_limiter_->requestAllowed());\n\n    // Now signal the thread to continue which should cause a CAS failure and the loop to repeat.\n    synchronizer().signal(\"on_fill_timer_pre_cas\");\n    t1.join();\n\n    // 1 -> 0 tokens\n    EXPECT_TRUE(rate_limiter_->requestAllowed());\n    EXPECT_FALSE(rate_limiter_->requestAllowed());\n  }\n\n  // This tests the case in which two allowed checks race.\n  {\n    initialize(std::chrono::milliseconds(200), 1, 1);\n\n    synchronizer().enable();\n\n    // Start a thread and see if we are under limit. This will wait pre-CAS.\n    synchronizer().waitOn(\"allowed_pre_cas\");\n    std::thread t1([&] { EXPECT_FALSE(rate_limiter_->requestAllowed()); });\n    // Wait until the thread is actually waiting.\n    synchronizer().barrierOn(\"allowed_pre_cas\");\n\n    // Consume a token on this thread, which should cause the CAS to fail on the other thread.\n    EXPECT_TRUE(rate_limiter_->requestAllowed());\n    synchronizer().signal(\"allowed_pre_cas\");\n    t1.join();\n  }\n}\n\n// Verify token bucket functionality with a single token.\nTEST_F(LocalRateLimiterImplTest, TokenBucket) {\n  initialize(std::chrono::milliseconds(200), 1, 1);\n\n  // 1 -> 0 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n\n  // 0 -> 1 tokens\n  EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr));\n  fill_timer_->invokeCallback();\n\n  // 1 -> 0 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n\n  // 0 -> 1 tokens\n  EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr));\n  fill_timer_->invokeCallback();\n\n  // 1 -> 1 tokens\n  EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr));\n  fill_timer_->invokeCallback();\n\n  // 1 -> 0 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n}\n\n// Verify token bucket functionality with max tokens and tokens per fill > 1.\nTEST_F(LocalRateLimiterImplTest, TokenBucketMultipleTokensPerFill) {\n  initialize(std::chrono::milliseconds(200), 2, 2);\n\n  // 2 -> 0 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n\n  // 0 -> 2 tokens\n  EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr));\n  fill_timer_->invokeCallback();\n\n  // 2 -> 1 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n\n  // 1 -> 2 tokens\n  EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr));\n  fill_timer_->invokeCallback();\n\n  // 2 -> 0 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n}\n\n// Verify token bucket functionality with max tokens > tokens per fill.\nTEST_F(LocalRateLimiterImplTest, TokenBucketMaxTokensGreaterThanTokensPerFill) {\n  initialize(std::chrono::milliseconds(200), 2, 1);\n\n  // 2 -> 0 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n\n  // 0 -> 1 tokens\n  EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr));\n  fill_timer_->invokeCallback();\n\n  // 1 -> 0 tokens\n  EXPECT_TRUE(rate_limiter_->requestAllowed());\n  EXPECT_FALSE(rate_limiter_->requestAllowed());\n}\n\n} // Namespace LocalRateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/lua/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"lua_test\",\n    srcs = [\"lua_test.cc\"],\n    deps = [\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//source/extensions/filters/common/lua:lua_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"wrappers_test\",\n    srcs = [\"wrappers_test.cc\"],\n    deps = [\n        \":lua_wrappers_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/common/lua:wrappers_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"lua_wrappers_lib\",\n    hdrs = [\"lua_wrappers.h\"],\n    deps = [\n        \"//source/extensions/filters/common/lua:lua_lib\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/lua/lua_test.cc",
    "content": "#include <memory>\n\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"extensions/filters/common/lua/lua.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Lua {\nnamespace {\n\n// Setting large alignment requirement here so it fails the UBSAN tests if Lua allocated memory is\n// not aligned by Envoy. See https://github.com/envoyproxy/envoy/issues/5551 for details.\nclass alignas(32) TestObject : public BaseLuaObject<TestObject> {\npublic:\n  ~TestObject() override { onDestroy(); }\n\n  static ExportedFunctions exportedFunctions() { return {{\"testCall\", static_luaTestCall}}; }\n\n  MOCK_METHOD(int, doTestCall, (lua_State * state));\n  MOCK_METHOD(void, onDestroy, ());\n\nprivate:\n  DECLARE_LUA_FUNCTION(TestObject, luaTestCall);\n};\n\nint TestObject::luaTestCall(lua_State* state) { return doTestCall(state); }\n\nclass LuaTest : public testing::Test {\npublic:\n  LuaTest() : yield_callback_([this]() { on_yield_.ready(); }) {}\n\n  void setup(const std::string& code) {\n    state_ = std::make_unique<ThreadLocalState>(code, tls_);\n    state_->registerType<TestObject>();\n  }\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  ThreadLocalStatePtr state_;\n  std::function<void()> yield_callback_;\n  ReadyWatcher on_yield_;\n};\n\n// Basic ref counting between coroutines.\nTEST_F(LuaTest, CoroutineRefCounting) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n  EXPECT_EQ(LUA_REFNIL, state_->getGlobalRef(state_->registerGlobal(\"not here\")));\n  EXPECT_NE(LUA_REFNIL, state_->getGlobalRef(state_->registerGlobal(\"callMe\")));\n\n  // Start a coroutine but do not hold a reference to the object we pass.\n  CoroutinePtr cr1(state_->createCoroutine());\n  TestObject* object1 = TestObject::create(cr1->luaState()).first;\n  cr1->start(state_->getGlobalRef(1), 1, yield_callback_);\n  EXPECT_EQ(cr1->state(), Coroutine::State::Finished);\n  EXPECT_CALL(*object1, onDestroy());\n  lua_gc(cr1->luaState(), LUA_GCCOLLECT, 0);\n  cr1.reset();\n\n  // Start a second coroutine but do hold a reference. Do a gc after finish which should not\n  // collect it. Then unref and collect and it should be gone.\n  CoroutinePtr cr2(state_->createCoroutine());\n  LuaRef<TestObject> ref2(TestObject::create(cr2->luaState()), true);\n  cr2->start(state_->getGlobalRef(1), 1, yield_callback_);\n  EXPECT_EQ(cr2->state(), Coroutine::State::Finished);\n  lua_gc(cr2->luaState(), LUA_GCCOLLECT, 0);\n  EXPECT_CALL(*ref2.get(), onDestroy());\n  ref2.reset();\n  lua_gc(cr2->luaState(), LUA_GCCOLLECT, 0);\n}\n\n// Basic yield/resume functionality.\nTEST_F(LuaTest, YieldAndResume) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      coroutine.yield()\n      object:testCall()\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n  EXPECT_NE(LUA_REFNIL, state_->getGlobalRef(state_->registerGlobal(\"callMe\")));\n\n  CoroutinePtr cr(state_->createCoroutine());\n  LuaRef<TestObject> ref(TestObject::create(cr->luaState()), true);\n  EXPECT_CALL(on_yield_, ready());\n  cr->start(state_->getGlobalRef(0), 1, yield_callback_);\n  EXPECT_EQ(cr->state(), Coroutine::State::Yielded);\n\n  EXPECT_CALL(*ref.get(), doTestCall(_));\n  cr->resume(0, yield_callback_);\n  EXPECT_EQ(cr->state(), Coroutine::State::Finished);\n\n  lua_gc(cr->luaState(), LUA_GCCOLLECT, 0);\n  EXPECT_CALL(*ref.get(), onDestroy());\n  ref.reset();\n  lua_gc(cr->luaState(), LUA_GCCOLLECT, 0);\n}\n\n// Mark dead/live and ref counting across coroutines.\nTEST_F(LuaTest, MarkDead) {\n  const std::string SCRIPT{R\"EOF(\n    function callMeFirst(object)\n      global_object = object\n      global_object:testCall()\n      coroutine.yield()\n      global_object:testCall()\n    end\n\n    function callMeSecond()\n      global_object:testCall()\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n  EXPECT_NE(LUA_REFNIL, state_->getGlobalRef(state_->registerGlobal(\"callMeFirst\")));\n  EXPECT_NE(LUA_REFNIL, state_->getGlobalRef(state_->registerGlobal(\"callMeSecond\")));\n\n  CoroutinePtr cr1(state_->createCoroutine());\n  LuaDeathRef<TestObject> ref(TestObject::create(cr1->luaState()), true);\n  EXPECT_CALL(*ref.get(), doTestCall(_));\n  EXPECT_CALL(on_yield_, ready());\n  cr1->start(state_->getGlobalRef(0), 1, yield_callback_);\n  EXPECT_EQ(cr1->state(), Coroutine::State::Yielded);\n\n  ref.markDead();\n  CoroutinePtr cr2(state_->createCoroutine());\n  EXPECT_THROW_WITH_MESSAGE(cr2->start(state_->getGlobalRef(1), 0, yield_callback_), LuaException,\n                            \"[string \\\"...\\\"]:10: object used outside of proper scope\");\n  EXPECT_EQ(cr2->state(), Coroutine::State::Finished);\n\n  ref.markLive();\n  EXPECT_CALL(*ref.get(), doTestCall(_));\n  cr1->resume(0, yield_callback_);\n  EXPECT_EQ(cr1->state(), Coroutine::State::Finished);\n\n  lua_gc(cr1->luaState(), LUA_GCCOLLECT, 0);\n  EXPECT_CALL(*ref.get(), onDestroy());\n  ref.reset();\n  lua_gc(cr1->luaState(), LUA_GCCOLLECT, 0);\n}\n\nclass ThreadSafeTest : public testing::Test {\npublic:\n  ThreadSafeTest()\n      : api_(Api::createApiForTest()), main_dispatcher_(api_->allocateDispatcher(\"main\")),\n        worker_dispatcher_(api_->allocateDispatcher(\"worker\")) {}\n\n  // Use real dispatchers to verify that callback functions can be executed correctly.\n  Api::ApiPtr api_;\n  Event::DispatcherPtr main_dispatcher_;\n  Event::DispatcherPtr worker_dispatcher_;\n  ThreadLocal::InstanceImpl tls_;\n\n  std::unique_ptr<ThreadLocalState> state_;\n};\n\n// Test whether ThreadLocalState can be safely released.\nTEST_F(ThreadSafeTest, StateDestructedBeforeWorkerRun) {\n  const std::string SCRIPT{R\"EOF(\n    function HelloWorld()\n      print(\"Hello World!\")\n    end\n  )EOF\"};\n\n  tls_.registerThread(*main_dispatcher_, true);\n  EXPECT_EQ(main_dispatcher_.get(), &tls_.dispatcher());\n  tls_.registerThread(*worker_dispatcher_, false);\n\n  // Some callback functions waiting to be executed will be added to the dispatcher of the Worker\n  // thread. The callback functions in the main thread will be executed directly.\n  state_ = std::make_unique<ThreadLocalState>(SCRIPT, tls_);\n  state_->registerType<TestObject>();\n\n  main_dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  // Destroy state_.\n  state_.reset(nullptr);\n\n  // Start a new worker thread to execute the callback functions in the worker dispatcher.\n  Thread::ThreadPtr thread = Thread::threadFactoryForTest().createThread([this]() {\n    worker_dispatcher_->run(Event::Dispatcher::RunType::Block);\n    // Verify we have the expected dispatcher for the new worker thread.\n    EXPECT_EQ(worker_dispatcher_.get(), &tls_.dispatcher());\n  });\n  thread->join();\n\n  tls_.shutdownGlobalThreading();\n  tls_.shutdownThread();\n}\n\n} // namespace\n} // namespace Lua\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/lua/lua_wrappers.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"extensions/filters/common/lua/lua.h\"\n\n#include \"test/mocks/thread_local/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Lua {\n\n// A helper to be called inside the registered closure.\nclass Printer {\npublic:\n  MOCK_METHOD(void, testPrint, (const std::string&), (const));\n};\n\nconst Printer& getPrinter() { CONSTRUCT_ON_FIRST_USE(Printer); }\n\ntemplate <class T> class LuaWrappersTestBase : public testing::Test {\npublic:\n  virtual void setup(const std::string& code) {\n    coroutine_.reset();\n    state_ = std::make_unique<ThreadLocalState>(code, tls_);\n    state_->registerType<T>();\n    coroutine_ = state_->createCoroutine();\n    lua_pushcclosure(coroutine_->luaState(), luaTestPrint, 1);\n    lua_setglobal(coroutine_->luaState(), \"testPrint\");\n    testing::Mock::AllowLeak(&printer_);\n  }\n\n  void start(const std::string& method) {\n    coroutine_->start(state_->getGlobalRef(state_->registerGlobal(method)), 1, yield_callback_);\n  }\n\n  static int luaTestPrint(lua_State* state) {\n    const char* message = luaL_checkstring(state, 1);\n    getPrinter().testPrint(message);\n    return 0;\n  }\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  ThreadLocalStatePtr state_;\n  std::function<void()> yield_callback_;\n  CoroutinePtr coroutine_;\n  const Printer& printer_{getPrinter()};\n};\n\n} // namespace Lua\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/lua/wrappers_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/common/lua/wrappers.h\"\n\n#include \"test/extensions/filters/common/lua/lua_wrappers.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace Lua {\nnamespace {\n\nclass LuaBufferWrapperTest : public LuaWrappersTestBase<BufferWrapper> {};\n\nclass LuaMetadataMapWrapperTest : public LuaWrappersTestBase<MetadataMapWrapper> {\npublic:\n  void setup(const std::string& script) override {\n    LuaWrappersTestBase<MetadataMapWrapper>::setup(script);\n    state_->registerType<MetadataMapIterator>();\n  }\n\n  envoy::config::core::v3::Metadata parseMetadataFromYaml(const std::string& yaml_string) {\n    envoy::config::core::v3::Metadata metadata;\n    TestUtility::loadFromYaml(yaml_string, metadata);\n    return metadata;\n  }\n};\n\nclass LuaConnectionWrapperTest : public LuaWrappersTestBase<ConnectionWrapper> {\npublic:\n  void setup(const std::string& script) override {\n    LuaWrappersTestBase<ConnectionWrapper>::setup(script);\n    state_->registerType<SslConnectionWrapper>();\n    ssl_ = std::make_shared<NiceMock<Envoy::Ssl::MockConnectionInfo>>();\n  }\n\nprotected:\n  void expectSecureConnection(const bool secure) {\n    const std::string SCRIPT{R\"EOF(\n      function callMe(object)\n        if object:ssl() == nil then\n          testPrint(\"plain\")\n        else\n          testPrint(\"secure\")\n        end\n        testPrint(type(object:ssl()))\n      end\n    )EOF\"};\n    testing::InSequence s;\n    setup(SCRIPT);\n\n    // Setup secure connection if required.\n    EXPECT_CALL(Const(connection_), ssl()).WillOnce(Return(secure ? ssl_ : nullptr));\n\n    ConnectionWrapper::create(coroutine_->luaState(), &connection_);\n    EXPECT_CALL(printer_, testPrint(secure ? \"secure\" : \"plain\"));\n    EXPECT_CALL(Const(connection_), ssl()).WillOnce(Return(secure ? ssl_ : nullptr));\n    EXPECT_CALL(printer_, testPrint(secure ? \"userdata\" : \"nil\"));\n    start(\"callMe\");\n  }\n\n  NiceMock<Envoy::Network::MockConnection> connection_;\n  std::shared_ptr<NiceMock<Envoy::Ssl::MockConnectionInfo>> ssl_;\n};\n\n// Basic buffer wrapper methods test.\nTEST_F(LuaBufferWrapperTest, Methods) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      testPrint(object:length())\n      testPrint(object:getBytes(0, 2))\n      testPrint(object:getBytes(6, 5))\n      testPrint(object:setBytes(\"neverland\"))\n      testPrint(object:getBytes(0, 5))\n    end\n  )EOF\"};\n\n  setup(SCRIPT);\n  Buffer::OwnedImpl data(\"hello world\");\n  BufferWrapper::create(coroutine_->luaState(), data);\n  EXPECT_CALL(printer_, testPrint(\"11\"));\n  EXPECT_CALL(printer_, testPrint(\"he\"));\n  EXPECT_CALL(printer_, testPrint(\"world\"));\n  EXPECT_CALL(printer_, testPrint(\"9\"));\n  EXPECT_CALL(printer_, testPrint(\"never\"));\n  start(\"callMe\");\n}\n\n// Invalid params for the buffer wrapper getBytes() call.\nTEST_F(LuaBufferWrapperTest, GetBytesInvalidParams) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      object:getBytes(100, 100)\n    end\n  )EOF\"};\n\n  setup(SCRIPT);\n  Buffer::OwnedImpl data(\"hello world\");\n  BufferWrapper::create(coroutine_->luaState(), data);\n  EXPECT_THROW_WITH_MESSAGE(\n      start(\"callMe\"), LuaException,\n      \"[string \\\"...\\\"]:3: index/length must be >= 0 and (index + length) must be <= buffer size\");\n}\n\n// Basic methods test for the metadata wrapper.\nTEST_F(LuaMetadataMapWrapperTest, Methods) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      recipe = object:get(\"make.delicious.bread\")\n\n      testPrint(recipe[\"name\"])\n      testPrint(recipe[\"origin\"])\n\n      testPrint(tostring(recipe[\"lactose\"]))\n      testPrint(tostring(recipe[\"nut\"]))\n\n      testPrint(tostring(recipe[\"portion\"]))\n      testPrint(tostring(recipe[\"minutes\"]))\n\n      testPrint(recipe[\"butter\"][\"type\"])\n      testPrint(tostring(recipe[\"butter\"][\"expensive\"]))\n\n      for i, ingredient in ipairs(recipe[\"ingredients\"]) do\n        testPrint(ingredient)\n      end\n\n      testPrint(tostring(object:get(\"make.nothing\")[\"value\"]))\n\n      local function nRetVals(...)\n        return select('#',...)\n      end\n\n      testPrint(tostring(nRetVals(object:get(\"make.coffee\"))))\n    end\n    )EOF\"};\n\n  testing::InSequence s;\n  setup(SCRIPT);\n\n  const std::string yaml = R\"EOF(\n    filter_metadata:\n      envoy.filters.http.lua:\n        make.delicious.bread:\n          name: pulla\n          origin: finland\n          lactose: true\n          nut: false\n          portion: 5\n          minutes: 30.5\n          butter:\n            type: grass_fed\n            expensive: false\n          ingredients:\n            - flour\n            - milk\n        make.delicious.cookie:\n          name: chewy\n        make.nothing:\n          name: nothing\n          value: ~\n        make.nothing1:\n          name: nothing\n          value: ~\n    )EOF\";\n\n  envoy::config::core::v3::Metadata metadata = parseMetadataFromYaml(yaml);\n  const auto filter_metadata = metadata.filter_metadata().at(\"envoy.filters.http.lua\");\n  MetadataMapWrapper::create(coroutine_->luaState(), filter_metadata);\n\n  EXPECT_CALL(printer_, testPrint(\"pulla\"));\n  EXPECT_CALL(printer_, testPrint(\"finland\"));\n\n  EXPECT_CALL(printer_, testPrint(\"true\"));\n  EXPECT_CALL(printer_, testPrint(\"false\"));\n\n  EXPECT_CALL(printer_, testPrint(\"5\"));\n  EXPECT_CALL(printer_, testPrint(\"30.5\"));\n\n  EXPECT_CALL(printer_, testPrint(\"grass_fed\"));\n  EXPECT_CALL(printer_, testPrint(\"false\"));\n\n  EXPECT_CALL(printer_, testPrint(\"flour\"));\n  EXPECT_CALL(printer_, testPrint(\"milk\"));\n\n  EXPECT_CALL(printer_, testPrint(\"nil\"));\n  EXPECT_CALL(printer_, testPrint(\"0\"));\n\n  start(\"callMe\");\n}\n\n// Iterate over the (unordered) underlying map.\nTEST_F(LuaMetadataMapWrapperTest, Iterators) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      for key, value in pairs(object) do\n        testPrint(string.format(\"'%s' '%s'\", key, value[\"name\"]))\n      end\n    end\n    )EOF\"};\n\n  const std::string yaml = R\"EOF(\n    filter_metadata:\n      envoy.filters.http.lua:\n        make.delicious.bread:\n          name: pulla\n        make.delicious.cookie:\n          name: chewy\n        make.nothing0:\n          name: nothing\n          value: ~\n        make.nothing1:\n          name: nothing\n          value: ~\n        make.nothing2:\n          name: nothing\n          value: ~\n    )EOF\";\n\n  // The underlying map is unordered.\n  setup(SCRIPT);\n\n  envoy::config::core::v3::Metadata metadata = parseMetadataFromYaml(yaml);\n  const auto filter_metadata = metadata.filter_metadata().at(\"envoy.filters.http.lua\");\n  MetadataMapWrapper::create(coroutine_->luaState(), filter_metadata);\n\n  EXPECT_CALL(printer_, testPrint(\"'make.delicious.bread' 'pulla'\"));\n  EXPECT_CALL(printer_, testPrint(\"'make.delicious.cookie' 'chewy'\"));\n  EXPECT_CALL(printer_, testPrint(\"'make.nothing0' 'nothing'\"));\n  EXPECT_CALL(printer_, testPrint(\"'make.nothing1' 'nothing'\"));\n  EXPECT_CALL(printer_, testPrint(\"'make.nothing2' 'nothing'\"));\n\n  start(\"callMe\");\n}\n\n// Don't finish iteration.\nTEST_F(LuaMetadataMapWrapperTest, DontFinishIteration) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      iterator = pairs(object)\n      key, value = iterator()\n      iterator2 = pairs(object)\n    end\n  )EOF\"};\n\n  testing::InSequence s;\n  setup(SCRIPT);\n\n  const std::string yaml = R\"EOF(\n    filter_metadata:\n      envoy.filters.http.lua:\n        make.delicious.bread:\n          name: pulla\n        make.delicious.cookie:\n          name: chewy\n        make.nothing:\n          name: nothing\n    )EOF\";\n\n  envoy::config::core::v3::Metadata metadata = parseMetadataFromYaml(yaml);\n  const auto filter_metadata = metadata.filter_metadata().at(\"envoy.filters.http.lua\");\n  MetadataMapWrapper::create(coroutine_->luaState(), filter_metadata);\n  EXPECT_THROW_WITH_MESSAGE(\n      start(\"callMe\"), LuaException,\n      \"[string \\\"...\\\"]:5: cannot create a second iterator before completing the first\");\n}\n\nTEST_F(LuaConnectionWrapperTest, Secure) {\n  expectSecureConnection(true);\n  expectSecureConnection(false);\n}\n\n} // namespace\n} // namespace Lua\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/original_src/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"original_src_socket_option_test\",\n    srcs = [\"original_src_socket_option_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/filters/common/original_src:original_src_socket_option_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/original_src/original_src_socket_option_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/address.h\"\n\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/common/original_src/original_src_socket_option.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace OriginalSrc {\nnamespace {\n\nclass OriginalSrcSocketOptionTest : public testing::Test {\npublic:\n  std::unique_ptr<OriginalSrcSocketOption>\n  makeOptionByAddress(const Network::Address::InstanceConstSharedPtr& address) {\n    return std::make_unique<OriginalSrcSocketOption>(address);\n  }\n\nprotected:\n  NiceMock<Network::MockConnectionSocket> socket_;\n  std::vector<uint8_t> key_;\n};\n\nTEST_F(OriginalSrcSocketOptionTest, TestSetOptionPreBindSetsAddress) {\n  const auto address = Network::Utility::parseInternetAddress(\"127.0.0.2\");\n  auto option = makeOptionByAddress(address);\n  EXPECT_CALL(socket_, setLocalAddress(PointeesEq(address)));\n  EXPECT_EQ(option->setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND), true);\n}\n\nTEST_F(OriginalSrcSocketOptionTest, TestSetOptionPreBindSetsAddressSecond) {\n  const auto address = Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  auto option = makeOptionByAddress(address);\n  EXPECT_CALL(socket_, setLocalAddress(PointeesEq(address)));\n  EXPECT_EQ(option->setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND), true);\n}\n\nTEST_F(OriginalSrcSocketOptionTest, TestSetOptionNotPrebindDoesNotSetAddress) {\n  const auto address = Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  auto option = makeOptionByAddress(address);\n  EXPECT_CALL(socket_, setLocalAddress(_)).Times(0);\n  EXPECT_EQ(option->setOption(socket_, envoy::config::core::v3::SocketOption::STATE_LISTENING),\n            true);\n}\n\nTEST_F(OriginalSrcSocketOptionTest, TestIpv4HashKey) {\n  const auto address = Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  auto option = makeOptionByAddress(address);\n  option->hashKey(key_);\n\n  // The ip address broken into big-endian octets.\n  std::vector<uint8_t> expected_key = {1, 2, 3, 4};\n  EXPECT_EQ(key_, expected_key);\n}\n\nTEST_F(OriginalSrcSocketOptionTest, TestIpv4HashKeyOther) {\n  const auto address = Network::Utility::parseInternetAddress(\"255.254.253.0\");\n  auto option = makeOptionByAddress(address);\n  option->hashKey(key_);\n\n  // The ip address broken into big-endian octets.\n  std::vector<uint8_t> expected_key = {255, 254, 253, 0};\n  EXPECT_EQ(key_, expected_key);\n}\n\nTEST_F(OriginalSrcSocketOptionTest, TestIpv6HashKey) {\n  const auto address = Network::Utility::parseInternetAddress(\"102:304:506:708:90a:b0c:d0e:f00\");\n  auto option = makeOptionByAddress(address);\n  option->hashKey(key_);\n\n  std::vector<uint8_t> expected_key = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8,\n                                       0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x0};\n  EXPECT_EQ(key_, expected_key);\n}\n\nTEST_F(OriginalSrcSocketOptionTest, TestIpv6HashKeyOther) {\n  const auto address = Network::Utility::parseInternetAddress(\"F02:304:519:708:90a:b0e:FFFF:0000\");\n  auto option = makeOptionByAddress(address);\n  option->hashKey(key_);\n\n  std::vector<uint8_t> expected_key = {0xF, 0x2, 0x3, 0x4, 0x5,  0x19, 0x7, 0x8,\n                                       0x9, 0xa, 0xb, 0xe, 0xff, 0xff, 0x0, 0x0};\n  EXPECT_EQ(key_, expected_key);\n}\n\nTEST_F(OriginalSrcSocketOptionTest, TestOptionDetailsNotSupported) {\n  const auto address = Network::Utility::parseInternetAddress(\"255.254.253.0\");\n  auto option = makeOptionByAddress(address);\n\n  auto details =\n      option->getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n  EXPECT_FALSE(details.has_value());\n}\n\n} // namespace\n} // namespace OriginalSrc\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"ratelimit_impl_test\",\n    srcs = [\"ratelimit_impl_test.cc\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_lib\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"ratelimit_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_client_interface\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"ratelimit_utils\",\n    hdrs = [\"utils.h\"],\n    deps = [\n        \"@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/ratelimit/mocks.cc",
    "content": "#include \"mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RateLimit {\n\nMockClient::MockClient() = default;\nMockClient::~MockClient() = default;\n\n} // namespace RateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ratelimit/mocks.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/ratelimit/ratelimit.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RateLimit {\n\nclass MockClient : public Client {\npublic:\n  MockClient();\n  ~MockClient() override;\n\n  // RateLimit::Client\n  MOCK_METHOD(void, cancel, ());\n  MOCK_METHOD(void, limit,\n              (RequestCallbacks & callbacks, const std::string& domain,\n               const std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n               Tracing::Span& parent_span));\n};\n\n} // namespace RateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/service/ratelimit/v3/rls.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit_impl.h\"\n\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::Ref;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RateLimit {\nnamespace {\n\nclass MockRequestCallbacks : public RequestCallbacks {\npublic:\n  void complete(LimitStatus status, DescriptorStatusListPtr&& descriptor_statuses,\n                Http::ResponseHeaderMapPtr&& response_headers_to_add,\n                Http::RequestHeaderMapPtr&& request_headers_to_add) override {\n    complete_(status, descriptor_statuses.get(), response_headers_to_add.get(),\n              request_headers_to_add.get());\n  }\n\n  MOCK_METHOD(void, complete_,\n              (LimitStatus status, const DescriptorStatusList* descriptor_statuses,\n               const Http::ResponseHeaderMap* response_headers_to_add,\n               const Http::RequestHeaderMap* request_headers_to_add));\n};\n\nclass RateLimitGrpcClientTest : public testing::Test {\npublic:\n  RateLimitGrpcClientTest()\n      : async_client_(new Grpc::MockAsyncClient()),\n        client_(Grpc::RawAsyncClientPtr{async_client_}, absl::optional<std::chrono::milliseconds>(),\n                envoy::config::core::v3::ApiVersion::AUTO) {}\n\n  Grpc::MockAsyncClient* async_client_;\n  Grpc::MockAsyncRequest async_request_;\n  GrpcClientImpl client_;\n  MockRequestCallbacks request_callbacks_;\n  Tracing::MockSpan span_;\n};\n\nTEST_F(RateLimitGrpcClientTest, Basic) {\n  std::unique_ptr<envoy::service::ratelimit::v3::RateLimitResponse> response;\n\n  {\n    envoy::service::ratelimit::v3::RateLimitRequest request;\n    Http::TestRequestHeaderMapImpl headers;\n    GrpcClientImpl::createRequest(request, \"foo\", {{{{\"foo\", \"bar\"}}}});\n    EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), Ref(client_), _, _))\n        .WillOnce(\n            Invoke([this](absl::string_view service_full_name, absl::string_view method_name,\n                          Buffer::InstancePtr&&, Grpc::RawAsyncRequestCallbacks&, Tracing::Span&,\n                          const Http::AsyncClient::RequestOptions&) -> Grpc::AsyncRequest* {\n              std::string service_name = \"envoy.service.ratelimit.v2.RateLimitService\";\n              EXPECT_EQ(service_name, service_full_name);\n              EXPECT_EQ(\"ShouldRateLimit\", method_name);\n              return &async_request_;\n            }));\n\n    client_.limit(request_callbacks_, \"foo\", {{{{\"foo\", \"bar\"}}}}, Tracing::NullSpan::instance());\n\n    client_.onCreateInitialMetadata(headers);\n    EXPECT_EQ(nullptr, headers.RequestId());\n\n    response = std::make_unique<envoy::service::ratelimit::v3::RateLimitResponse>();\n    response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT);\n    EXPECT_CALL(span_, setTag(Eq(\"ratelimit_status\"), Eq(\"over_limit\")));\n    EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OverLimit, _, _, _));\n    client_.onSuccess(std::move(response), span_);\n  }\n\n  {\n    envoy::service::ratelimit::v3::RateLimitRequest request;\n    Http::TestRequestHeaderMapImpl headers;\n    GrpcClientImpl::createRequest(request, \"foo\", {{{{\"foo\", \"bar\"}, {\"bar\", \"baz\"}}}});\n    EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), _, _, _))\n        .WillOnce(Return(&async_request_));\n\n    client_.limit(request_callbacks_, \"foo\", {{{{\"foo\", \"bar\"}, {\"bar\", \"baz\"}}}},\n                  Tracing::NullSpan::instance());\n\n    client_.onCreateInitialMetadata(headers);\n\n    response = std::make_unique<envoy::service::ratelimit::v3::RateLimitResponse>();\n    response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OK);\n    EXPECT_CALL(span_, setTag(Eq(\"ratelimit_status\"), Eq(\"ok\")));\n    EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _, _));\n    client_.onSuccess(std::move(response), span_);\n  }\n\n  {\n    envoy::service::ratelimit::v3::RateLimitRequest request;\n    GrpcClientImpl::createRequest(\n        request, \"foo\",\n        {{{{\"foo\", \"bar\"}, {\"bar\", \"baz\"}}}, {{{\"foo2\", \"bar2\"}, {\"bar2\", \"baz2\"}}}});\n    EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), _, _, _))\n        .WillOnce(Return(&async_request_));\n\n    client_.limit(request_callbacks_, \"foo\",\n                  {{{{\"foo\", \"bar\"}, {\"bar\", \"baz\"}}}, {{{\"foo2\", \"bar2\"}, {\"bar2\", \"baz2\"}}}},\n                  Tracing::NullSpan::instance());\n\n    response = std::make_unique<envoy::service::ratelimit::v3::RateLimitResponse>();\n    EXPECT_CALL(request_callbacks_, complete_(LimitStatus::Error, _, _, _));\n    client_.onFailure(Grpc::Status::Unknown, \"\", span_);\n  }\n\n  {\n    envoy::service::ratelimit::v3::RateLimitRequest request;\n    Http::TestRequestHeaderMapImpl headers;\n    GrpcClientImpl::createRequest(\n        request, \"foo\",\n        {{{{\"foo\", \"bar\"}, {\"bar\", \"baz\"}}, {{42, envoy::type::v3::RateLimitUnit::MINUTE}}}});\n    EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), _, _, _))\n        .WillOnce(Return(&async_request_));\n\n    client_.limit(\n        request_callbacks_, \"foo\",\n        {{{{\"foo\", \"bar\"}, {\"bar\", \"baz\"}}, {{42, envoy::type::v3::RateLimitUnit::MINUTE}}}},\n        Tracing::NullSpan::instance());\n\n    client_.onCreateInitialMetadata(headers);\n\n    response = std::make_unique<envoy::service::ratelimit::v3::RateLimitResponse>();\n    response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OK);\n    EXPECT_CALL(span_, setTag(Eq(\"ratelimit_status\"), Eq(\"ok\")));\n    EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _, _));\n    client_.onSuccess(std::move(response), span_);\n  }\n}\n\nTEST_F(RateLimitGrpcClientTest, Cancel) {\n  std::unique_ptr<envoy::service::ratelimit::v3::RateLimitResponse> response;\n\n  EXPECT_CALL(*async_client_, sendRaw(_, _, _, _, _, _)).WillOnce(Return(&async_request_));\n\n  client_.limit(request_callbacks_, \"foo\", {{{{\"foo\", \"bar\"}}}}, Tracing::NullSpan::instance());\n\n  EXPECT_CALL(async_request_, cancel());\n  client_.cancel();\n}\n\n} // namespace\n} // namespace RateLimit\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/ratelimit/utils.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"envoy/service/ratelimit/v3/rls.pb.h\"\n\nnamespace Envoy {\nnamespace RateLimit {\n\ninline envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus\nbuildDescriptorStatus(uint32_t requests_per_unit,\n                      envoy::service::ratelimit::v3::RateLimitResponse_RateLimit_Unit unit,\n                      std::string name, uint32_t limit_remaining, uint32_t seconds_until_reset) {\n  envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus statusMsg;\n  statusMsg.set_limit_remaining(limit_remaining);\n  statusMsg.mutable_duration_until_reset()->set_seconds(seconds_until_reset);\n  if (requests_per_unit) {\n    envoy::service::ratelimit::v3::RateLimitResponse_RateLimit* limitMsg =\n        statusMsg.mutable_current_limit();\n    limitMsg->set_requests_per_unit(requests_per_unit);\n    limitMsg->set_unit(unit);\n    limitMsg->set_name(name);\n  }\n  return statusMsg;\n}\n\n} // namespace RateLimit\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/rbac/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_mock\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"matchers_test\",\n    srcs = [\"matchers_test.cc\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/common/expr:evaluator_lib\",\n        \"//source/extensions/filters/common/rbac:matchers_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"engine_impl_test\",\n    srcs = [\"engine_impl_test.cc\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/common/rbac:engine_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/common/rbac:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_mock(\n    name = \"engine_mocks\",\n    hdrs = [\"mocks.h\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/common/rbac:engine_lib\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/common/rbac/engine_impl_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/config/rbac/v3/rbac.pb.validate.h\"\n\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/common/rbac/engine_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Const;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\nnamespace {\n\nenum class LogResult { Yes, No, Undecided };\n\nvoid checkEngine(\n    RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, LogResult expected_log,\n    StreamInfo::StreamInfo& info,\n    const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(),\n    const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl()) {\n\n  bool engineRes = engine.handleAction(connection, headers, info, nullptr);\n  EXPECT_EQ(expected, engineRes);\n\n  if (expected_log != LogResult::Undecided) {\n    auto filter_meta = info.dynamicMetadata().filter_metadata().at(\n        RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace);\n    EXPECT_EQ(expected_log == LogResult::Yes,\n              filter_meta.fields()\n                  .at(RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey)\n                  .bool_value());\n  } else {\n    EXPECT_EQ(info.dynamicMetadata().filter_metadata().end(),\n              info.dynamicMetadata().filter_metadata().find(\n                  Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace));\n  }\n}\n\nvoid checkEngine(\n    RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, LogResult expected_log,\n    const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(),\n    const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl()) {\n\n  NiceMock<StreamInfo::MockStreamInfo> empty_info;\n  checkEngine(engine, expected, expected_log, empty_info, connection, headers);\n}\n\nvoid onMetadata(NiceMock<StreamInfo::MockStreamInfo>& info) {\n  ON_CALL(info, setDynamicMetadata(\"envoy.common\", _))\n      .WillByDefault(Invoke([&info](const std::string&, const ProtobufWkt::Struct& obj) {\n        (*info.metadata_.mutable_filter_metadata())[\"envoy.common\"] = obj;\n      }));\n}\n\nTEST(RoleBasedAccessControlEngineImpl, Disabled) {\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  RBAC::RoleBasedAccessControlEngineImpl engine_allow(rbac);\n  checkEngine(engine_allow, false, LogResult::Undecided);\n\n  rbac.set_action(envoy::config::rbac::v3::RBAC::DENY);\n  RBAC::RoleBasedAccessControlEngineImpl engine_deny(rbac);\n  checkEngine(engine_deny, true, LogResult::Undecided);\n}\n\n// Test various invalid policies to validate the fix for\n// https://github.com/envoyproxy/envoy/issues/8715.\nTEST(RoleBasedAccessControlEngineImpl, InvalidConfig) {\n  {\n    envoy::config::rbac::v3::RBAC rbac;\n    rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n    envoy::config::rbac::v3::Policy policy;\n    (*rbac.mutable_policies())[\"foo\"] = policy;\n\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(rbac), EnvoyException,\n                            \"RBACValidationError\\\\.Policies.*PolicyValidationError\\\\.Permissions\"\n                            \".*value must contain at least\")\n  }\n\n  {\n    envoy::config::rbac::v3::RBAC rbac;\n    rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n    envoy::config::rbac::v3::Policy policy;\n    policy.add_permissions();\n    (*rbac.mutable_policies())[\"foo\"] = policy;\n\n    EXPECT_THROW_WITH_REGEX(\n        TestUtility::validate(rbac), EnvoyException,\n        \"RBACValidationError\\\\.Policies.*PolicyValidationError\\\\.Permissions.*rule.*is required\");\n  }\n\n  {\n    envoy::config::rbac::v3::RBAC rbac;\n    rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n    envoy::config::rbac::v3::Policy policy;\n    auto* permission = policy.add_permissions();\n    auto* and_rules = permission->mutable_and_rules();\n    and_rules->add_rules();\n    (*rbac.mutable_policies())[\"foo\"] = policy;\n\n    EXPECT_THROW_WITH_REGEX(\n        TestUtility::validate(rbac), EnvoyException,\n        \"RBACValidationError\\\\.Policies.*PolicyValidationError\\\\.Permissions\"\n        \".*PermissionValidationError\\\\.AndRules.*SetValidationError\\\\.Rules.*rule.*is required\");\n  }\n\n  {\n    envoy::config::rbac::v3::RBAC rbac;\n    rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n    envoy::config::rbac::v3::Policy policy;\n    auto* permission = policy.add_permissions();\n    permission->set_any(true);\n    (*rbac.mutable_policies())[\"foo\"] = policy;\n\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(rbac), EnvoyException,\n                            \"RBACValidationError\\\\.Policies.*PolicyValidationError\\\\.Principals\"\n                            \".*value must contain at least\")\n  }\n\n  {\n    envoy::config::rbac::v3::RBAC rbac;\n    rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n    envoy::config::rbac::v3::Policy policy;\n    auto* permission = policy.add_permissions();\n    permission->set_any(true);\n    policy.add_principals();\n    (*rbac.mutable_policies())[\"foo\"] = policy;\n\n    EXPECT_THROW_WITH_REGEX(TestUtility::validate(rbac), EnvoyException,\n                            \"RBACValidationError\\\\.Policies.*PolicyValidationError\\\\.Principals\"\n                            \".*identifier.*is required\");\n  }\n\n  {\n    envoy::config::rbac::v3::RBAC rbac;\n    rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n    envoy::config::rbac::v3::Policy policy;\n    auto* permission = policy.add_permissions();\n    permission->set_any(true);\n    auto* principal = policy.add_principals();\n    auto* and_ids = principal->mutable_and_ids();\n    and_ids->add_ids();\n    (*rbac.mutable_policies())[\"foo\"] = policy;\n\n    EXPECT_THROW_WITH_REGEX(\n        TestUtility::validate(rbac), EnvoyException,\n        \"RBACValidationError\\\\.Policies.*PolicyValidationError\\\\.Principals\"\n        \".*PrincipalValidationError\\\\.AndIds.*SetValidationError\\\\.Ids.*identifier.*is required\");\n  }\n}\n\nTEST(RoleBasedAccessControlEngineImpl, AllowedAllowlist) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_destination_port(123);\n  policy.add_principals()->set_any(true);\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n  checkEngine(engine, true, LogResult::Undecided, info, conn, headers);\n\n  addr = Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 456, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n  checkEngine(engine, false, LogResult::Undecided, info, conn, headers);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, DeniedDenylist) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_destination_port(123);\n  policy.add_principals()->set_any(true);\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::DENY);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n  checkEngine(engine, false, LogResult::Undecided, info, conn, headers);\n\n  addr = Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 456, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n  checkEngine(engine, true, LogResult::Undecided, info, conn, headers);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, BasicCondition) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    const_expr:\n      bool_value: false\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n  checkEngine(engine, false, LogResult::Undecided);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, MalformedCondition) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    call_expr:\n      function: undefined_extent\n      args:\n      - const_expr:\n          bool_value: false\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n\n  EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine(rbac), EnvoyException,\n                          \"failed to create an expression: .*\");\n\n  rbac.set_action(envoy::config::rbac::v3::RBAC::LOG);\n  EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine_log(rbac), EnvoyException,\n                          \"failed to create an expression: .*\");\n}\n\nTEST(RoleBasedAccessControlEngineImpl, MistypedCondition) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    const_expr:\n      int64_value: 13\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n  checkEngine(engine, false, LogResult::Undecided);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, EvaluationFailure) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    select_expr:\n      operand:\n        const_expr:\n          string_value: request\n      field: undefined\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n  checkEngine(engine, false, LogResult::Undecided);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, ErrorCondition) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    call_expr:\n      function: _[_]\n      args:\n      - select_expr:\n          operand:\n            ident_expr:\n              name: request\n          field: undefined\n      - const_expr:\n          string_value: foo\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n  checkEngine(engine, false, LogResult::Undecided, Envoy::Network::MockConnection());\n}\n\nTEST(RoleBasedAccessControlEngineImpl, HeaderCondition) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    call_expr:\n      function: _==_\n      args:\n      - call_expr:\n          function: _[_]\n          args:\n          - select_expr:\n              operand:\n                ident_expr:\n                  name: request\n              field: headers\n          - const_expr:\n              string_value: foo\n      - const_expr:\n          string_value: bar\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  Envoy::Http::LowerCaseString key(\"foo\");\n  std::string value = \"bar\";\n  headers.setReference(key, value);\n\n  checkEngine(engine, true, LogResult::Undecided, Envoy::Network::MockConnection(), headers);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, MetadataCondition) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    call_expr:\n      function: _==_\n      args:\n      - call_expr:\n          function: _[_]\n          args:\n          - call_expr:\n              function: _[_]\n              args:\n              - select_expr:\n                  operand:\n                    ident_expr:\n                      name: metadata\n                  field: filter_metadata\n              - const_expr:\n                  string_value: other\n          - const_expr:\n              string_value: label\n      - const_expr:\n          string_value: prod\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n\n  auto label = MessageUtil::keyValueStruct(\"label\", \"prod\");\n  envoy::config::core::v3::Metadata metadata;\n  metadata.mutable_filter_metadata()->insert(\n      Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\"other\", label));\n  EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n\n  checkEngine(engine, true, LogResult::Undecided, info, Envoy::Network::MockConnection(), headers);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_destination_port(123);\n  policy.add_principals()->set_any(true);\n  policy.mutable_condition()->MergeFrom(\n      TestUtility::parseYaml<google::api::expr::v1alpha1::Expr>(R\"EOF(\n    const_expr:\n      bool_value: false\n  )EOF\"));\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(1).WillRepeatedly(ReturnRef(addr));\n  checkEngine(engine, false, LogResult::Undecided, info, conn, headers);\n}\n\n// Log tests\nTEST(RoleBasedAccessControlEngineImpl, DisabledLog) {\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  onMetadata(info);\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::LOG);\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n  checkEngine(engine, true, RBAC::LogResult::No, info);\n}\n\nTEST(RoleBasedAccessControlEngineImpl, LogIfMatched) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_destination_port(123);\n  policy.add_principals()->set_any(true);\n\n  envoy::config::rbac::v3::RBAC rbac;\n  rbac.set_action(envoy::config::rbac::v3::RBAC::LOG);\n  (*rbac.mutable_policies())[\"foo\"] = policy;\n  RBAC::RoleBasedAccessControlEngineImpl engine(rbac);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  onMetadata(info);\n\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n  checkEngine(engine, true, RBAC::LogResult::Yes, info, conn, headers);\n\n  addr = Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 456, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n  checkEngine(engine, true, RBAC::LogResult::No, info, conn, headers);\n}\n\n} // namespace\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/rbac/matchers_test.cc",
    "content": "#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/type/matcher/v3/metadata.pb.h\"\n\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/common/expr/evaluator.h\"\n#include \"extensions/filters/common/rbac/matchers.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Const;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\nnamespace {\n\nvoid checkMatcher(\n    const RBAC::Matcher& matcher, bool expected,\n    const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(),\n    const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl(),\n    const StreamInfo::StreamInfo& info = NiceMock<StreamInfo::MockStreamInfo>()) {\n  EXPECT_EQ(expected, matcher.matches(connection, headers, info));\n}\n\nTEST(AlwaysMatcher, AlwaysMatches) { checkMatcher(RBAC::AlwaysMatcher(), true); }\n\nTEST(AndMatcher, Permission_Set) {\n  envoy::config::rbac::v3::Permission::Set set;\n  envoy::config::rbac::v3::Permission* perm = set.add_rules();\n  perm->set_any(true);\n\n  checkMatcher(RBAC::AndMatcher(set), true);\n\n  perm = set.add_rules();\n  perm->set_destination_port(123);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n\n  checkMatcher(RBAC::AndMatcher(set), true, conn, headers, info);\n\n  addr = Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 8080, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr));\n\n  checkMatcher(RBAC::AndMatcher(set), false, conn, headers, info);\n}\n\nTEST(AndMatcher, Principal_Set) {\n  envoy::config::rbac::v3::Principal::Set set;\n  envoy::config::rbac::v3::Principal* principal = set.add_ids();\n  principal->set_any(true);\n\n  checkMatcher(RBAC::AndMatcher(set), true);\n\n  principal = set.add_ids();\n  auto* cidr = principal->mutable_direct_remote_ip();\n  cidr->set_address_prefix(\"1.2.3.0\");\n  cidr->mutable_prefix_len()->set_value(24);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  EXPECT_CALL(Const(info), downstreamDirectRemoteAddress()).WillOnce(ReturnRef(addr));\n\n  checkMatcher(RBAC::AndMatcher(set), true, conn, headers, info);\n\n  addr = Envoy::Network::Utility::parseInternetAddress(\"1.2.4.6\", 123, false);\n  EXPECT_CALL(Const(info), downstreamDirectRemoteAddress()).WillOnce(ReturnRef(addr));\n\n  checkMatcher(RBAC::AndMatcher(set), false, conn, headers, info);\n}\n\nTEST(OrMatcher, Permission_Set) {\n  envoy::config::rbac::v3::Permission::Set set;\n  envoy::config::rbac::v3::Permission* perm = set.add_rules();\n  perm->set_destination_port(123);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 456, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr));\n\n  checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info);\n\n  perm = set.add_rules();\n  perm->set_any(true);\n\n  checkMatcher(RBAC::OrMatcher(set), true, conn, headers, info);\n}\n\nTEST(OrMatcher, Principal_Set) {\n  envoy::config::rbac::v3::Principal::Set set;\n  envoy::config::rbac::v3::Principal* id = set.add_ids();\n  auto* cidr = id->mutable_direct_remote_ip();\n  cidr->set_address_prefix(\"1.2.3.0\");\n  cidr->mutable_prefix_len()->set_value(24);\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.4.6\", 456, false);\n  EXPECT_CALL(Const(info), downstreamDirectRemoteAddress())\n      .Times(2)\n      .WillRepeatedly(ReturnRef(addr));\n\n  checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info);\n\n  id = set.add_ids();\n  id->set_any(true);\n\n  checkMatcher(RBAC::OrMatcher(set), true, conn, headers, info);\n}\n\nTEST(NotMatcher, Permission) {\n  envoy::config::rbac::v3::Permission perm;\n  perm.set_any(true);\n\n  checkMatcher(RBAC::NotMatcher(perm), false, Envoy::Network::MockConnection());\n}\n\nTEST(NotMatcher, Principal) {\n  envoy::config::rbac::v3::Principal principal;\n  principal.set_any(true);\n\n  checkMatcher(RBAC::NotMatcher(principal), false, Envoy::Network::MockConnection());\n}\n\nTEST(HeaderMatcher, HeaderMatcher) {\n  envoy::config::route::v3::HeaderMatcher config;\n  config.set_name(\"foo\");\n  config.set_exact_match(\"bar\");\n\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  Envoy::Http::LowerCaseString key(\"foo\");\n  std::string value = \"bar\";\n  headers.setReference(key, value);\n\n  RBAC::HeaderMatcher matcher(config);\n\n  checkMatcher(matcher, true, Envoy::Network::MockConnection(), headers);\n\n  value = \"baz\";\n  headers.setReference(key, value);\n\n  checkMatcher(matcher, false, Envoy::Network::MockConnection(), headers);\n  checkMatcher(matcher, false);\n}\n\nTEST(IPMatcher, IPMatcher) {\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr connectionRemote =\n      Envoy::Network::Utility::parseInternetAddress(\"12.13.14.15\", 789, false);\n  Envoy::Network::Address::InstanceConstSharedPtr directLocal =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  Envoy::Network::Address::InstanceConstSharedPtr directRemote =\n      Envoy::Network::Utility::parseInternetAddress(\"4.5.6.7\", 456, false);\n  Envoy::Network::Address::InstanceConstSharedPtr downstreamRemote =\n      Envoy::Network::Utility::parseInternetAddress(\"8.9.10.11\", 456, false);\n  EXPECT_CALL(conn, remoteAddress()).Times(2).WillRepeatedly(ReturnRef(connectionRemote));\n  EXPECT_CALL(Const(info), downstreamLocalAddress())\n      .Times(2)\n      .WillRepeatedly(ReturnRef(directLocal));\n  EXPECT_CALL(Const(info), downstreamDirectRemoteAddress())\n      .Times(2)\n      .WillRepeatedly(ReturnRef(directRemote));\n  EXPECT_CALL(Const(info), downstreamRemoteAddress())\n      .Times(2)\n      .WillRepeatedly(ReturnRef(downstreamRemote));\n\n  envoy::config::core::v3::CidrRange connection_remote_cidr;\n  connection_remote_cidr.set_address_prefix(\"12.13.14.15\");\n  connection_remote_cidr.mutable_prefix_len()->set_value(32);\n\n  envoy::config::core::v3::CidrRange downstream_local_cidr;\n  downstream_local_cidr.set_address_prefix(\"1.2.3.0\");\n  downstream_local_cidr.mutable_prefix_len()->set_value(24);\n\n  envoy::config::core::v3::CidrRange downstream_direct_remote_cidr;\n  downstream_direct_remote_cidr.set_address_prefix(\"4.5.6.7\");\n  downstream_direct_remote_cidr.mutable_prefix_len()->set_value(32);\n\n  envoy::config::core::v3::CidrRange downstream_remote_cidr;\n  downstream_remote_cidr.set_address_prefix(\"8.9.10.11\");\n  downstream_remote_cidr.mutable_prefix_len()->set_value(32);\n\n  checkMatcher(IPMatcher(connection_remote_cidr, IPMatcher::Type::ConnectionRemote), true, conn,\n               headers, info);\n  checkMatcher(IPMatcher(downstream_local_cidr, IPMatcher::Type::DownstreamLocal), true, conn,\n               headers, info);\n  checkMatcher(IPMatcher(downstream_direct_remote_cidr, IPMatcher::Type::DownstreamDirectRemote),\n               true, conn, headers, info);\n  checkMatcher(IPMatcher(downstream_remote_cidr, IPMatcher::Type::DownstreamRemote), true, conn,\n               headers, info);\n\n  connection_remote_cidr.set_address_prefix(\"4.5.6.7\");\n  downstream_local_cidr.set_address_prefix(\"1.2.4.8\");\n  downstream_direct_remote_cidr.set_address_prefix(\"4.5.6.0\");\n  downstream_remote_cidr.set_address_prefix(\"4.5.6.7\");\n\n  checkMatcher(IPMatcher(connection_remote_cidr, IPMatcher::Type::ConnectionRemote), false, conn,\n               headers, info);\n  checkMatcher(IPMatcher(downstream_local_cidr, IPMatcher::Type::DownstreamLocal), false, conn,\n               headers, info);\n  checkMatcher(IPMatcher(downstream_direct_remote_cidr, IPMatcher::Type::DownstreamDirectRemote),\n               false, conn, headers, info);\n  checkMatcher(IPMatcher(downstream_remote_cidr, IPMatcher::Type::DownstreamRemote), false, conn,\n               headers, info);\n}\n\nTEST(PortMatcher, PortMatcher) {\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 123, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr));\n\n  checkMatcher(PortMatcher(123), true, conn, headers, info);\n  checkMatcher(PortMatcher(456), false, conn, headers, info);\n}\n\nTEST(AuthenticatedMatcher, uriSanPeerCertificate) {\n  Envoy::Network::MockConnection conn;\n  auto ssl = std::make_shared<Ssl::MockConnectionInfo>();\n\n  const std::vector<std::string> uri_sans{\"foo\", \"baz\"};\n  const std::vector<std::string> dns_sans;\n  const std::string subject = \"subject\";\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(uri_sans));\n  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans));\n  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject));\n\n  EXPECT_CALL(Const(conn), ssl()).WillRepeatedly(Return(ssl));\n\n  // We should check if any URI SAN matches.\n  envoy::config::rbac::v3::Principal::Authenticated auth;\n  auth.mutable_principal_name()->set_exact(\"foo\");\n  checkMatcher(AuthenticatedMatcher(auth), true, conn);\n\n  auth.mutable_principal_name()->set_exact(\"baz\");\n  checkMatcher(AuthenticatedMatcher(auth), true, conn);\n\n  auth.mutable_principal_name()->set_exact(\"bar\");\n  checkMatcher(AuthenticatedMatcher(auth), false, conn);\n}\n\nTEST(AuthenticatedMatcher, dnsSanPeerCertificate) {\n  Envoy::Network::MockConnection conn;\n  auto ssl = std::make_shared<Ssl::MockConnectionInfo>();\n\n  const std::vector<std::string> uri_sans{\"uri_foo\"};\n  const std::vector<std::string> dns_sans{\"foo\", \"baz\"};\n  const std::string subject = \"subject\";\n\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(uri_sans));\n  EXPECT_CALL(Const(conn), ssl()).WillRepeatedly(Return(ssl));\n\n  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans));\n  EXPECT_CALL(Const(conn), ssl()).WillRepeatedly(Return(ssl));\n\n  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject));\n\n  // We should get check if any DNS SAN matches as URI SAN is not available.\n  envoy::config::rbac::v3::Principal::Authenticated auth;\n  auth.mutable_principal_name()->set_exact(\"foo\");\n  checkMatcher(AuthenticatedMatcher(auth), true, conn);\n\n  auth.mutable_principal_name()->set_exact(\"baz\");\n  checkMatcher(AuthenticatedMatcher(auth), true, conn);\n\n  auth.mutable_principal_name()->set_exact(\"bar\");\n  checkMatcher(AuthenticatedMatcher(auth), false, conn);\n}\n\nTEST(AuthenticatedMatcher, subjectPeerCertificate) {\n  Envoy::Network::MockConnection conn;\n  auto ssl = std::make_shared<Ssl::MockConnectionInfo>();\n\n  const std::vector<std::string> sans;\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(sans));\n  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillRepeatedly(Return(sans));\n  std::string peer_subject = \"bar\";\n  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef(peer_subject));\n  EXPECT_CALL(Const(conn), ssl()).WillRepeatedly(Return(ssl));\n\n  envoy::config::rbac::v3::Principal::Authenticated auth;\n  auth.mutable_principal_name()->set_exact(\"bar\");\n  checkMatcher(AuthenticatedMatcher(auth), true, conn);\n\n  auth.mutable_principal_name()->set_exact(\"foo\");\n  checkMatcher(AuthenticatedMatcher(auth), false, conn);\n}\n\nTEST(AuthenticatedMatcher, AnySSLSubject) {\n  Envoy::Network::MockConnection conn;\n  auto ssl = std::make_shared<Ssl::MockConnectionInfo>();\n  const std::vector<std::string> sans{\"foo\", \"baz\"};\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(sans));\n  EXPECT_CALL(Const(conn), ssl()).WillRepeatedly(Return(ssl));\n\n  envoy::config::rbac::v3::Principal::Authenticated auth;\n  checkMatcher(AuthenticatedMatcher(auth), true, conn);\n\n  auth.mutable_principal_name()->set_hidden_envoy_deprecated_regex(\".*\");\n  checkMatcher(AuthenticatedMatcher(auth), true, conn);\n}\n\nTEST(AuthenticatedMatcher, NoSSL) {\n  Envoy::Network::MockConnection conn;\n  EXPECT_CALL(Const(conn), ssl()).WillOnce(Return(nullptr));\n  checkMatcher(AuthenticatedMatcher({}), false, conn);\n}\n\nTEST(MetadataMatcher, MetadataMatcher) {\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl header;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n\n  auto label = MessageUtil::keyValueStruct(\"label\", \"prod\");\n  envoy::config::core::v3::Metadata metadata;\n  metadata.mutable_filter_metadata()->insert(\n      Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\"other\", label));\n  metadata.mutable_filter_metadata()->insert(\n      Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\"rbac\", label));\n  EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));\n\n  envoy::type::matcher::v3::MetadataMatcher matcher;\n  matcher.set_filter(\"rbac\");\n  matcher.add_path()->set_key(\"label\");\n\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"test\");\n  checkMatcher(MetadataMatcher(matcher), false, conn, header, info);\n  matcher.mutable_value()->mutable_string_match()->set_exact(\"prod\");\n  checkMatcher(MetadataMatcher(matcher), true, conn, header, info);\n}\n\nTEST(PolicyMatcher, PolicyMatcher) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_destination_port(123);\n  policy.add_permissions()->set_destination_port(456);\n  policy.add_principals()->mutable_authenticated()->mutable_principal_name()->set_exact(\"foo\");\n  policy.add_principals()->mutable_authenticated()->mutable_principal_name()->set_exact(\"bar\");\n  Expr::BuilderPtr builder = Expr::createBuilder(nullptr);\n\n  RBAC::PolicyMatcher matcher(policy, builder.get());\n\n  Envoy::Network::MockConnection conn;\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  NiceMock<StreamInfo::MockStreamInfo> info;\n  auto ssl = std::make_shared<Ssl::MockConnectionInfo>();\n  Envoy::Network::Address::InstanceConstSharedPtr addr =\n      Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 456, false);\n\n  const std::vector<std::string> uri_sans{\"bar\", \"baz\"};\n  const std::vector<std::string> dns_sans;\n  const std::string subject = \"subject\";\n  EXPECT_CALL(*ssl, uriSanPeerCertificate()).Times(4).WillRepeatedly(Return(uri_sans));\n  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans));\n  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject));\n\n  EXPECT_CALL(Const(conn), ssl()).Times(2).WillRepeatedly(Return(ssl));\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr));\n\n  checkMatcher(matcher, true, conn, headers, info);\n\n  EXPECT_CALL(Const(conn), ssl()).Times(2).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr));\n\n  checkMatcher(matcher, false, conn, headers, info);\n\n  addr = Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 789, false);\n  EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr));\n\n  checkMatcher(matcher, false, conn, headers, info);\n}\n\nTEST(RequestedServerNameMatcher, ValidRequestedServerName) {\n  Envoy::Network::MockConnection conn;\n  EXPECT_CALL(conn, requestedServerName())\n      .Times(9)\n      .WillRepeatedly(Return(absl::string_view(\"www.cncf.io\")));\n\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createRegexMatcher(\".*cncf.io\")), true,\n               conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createRegexMatcher(\".*cncf.*\")), true, conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createRegexMatcher(\"www.*\")), true, conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createRegexMatcher(\".*io\")), true, conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createRegexMatcher(\".*\")), true, conn);\n\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createExactMatcher(\"\")), false, conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createExactMatcher(\"www.cncf.io\")), true,\n               conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createExactMatcher(\"xyz.cncf.io\")), false,\n               conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createExactMatcher(\"example.com\")), false,\n               conn);\n}\n\nTEST(RequestedServerNameMatcher, EmptyRequestedServerName) {\n  Envoy::Network::MockConnection conn;\n  EXPECT_CALL(conn, requestedServerName()).Times(3).WillRepeatedly(Return(absl::string_view(\"\")));\n\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createRegexMatcher(\".*\")), true, conn);\n\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createExactMatcher(\"\")), true, conn);\n  checkMatcher(RequestedServerNameMatcher(TestUtility::createExactMatcher(\"example.com\")), false,\n               conn);\n}\n\nTEST(PathMatcher, NoPathInHeader) {\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  envoy::type::matcher::v3::PathMatcher matcher;\n  matcher.mutable_path()->mutable_safe_regex()->mutable_google_re2();\n  matcher.mutable_path()->mutable_safe_regex()->set_regex(\".*\");\n\n  headers.setPath(\"/path\");\n  checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers);\n  headers.removePath();\n  checkMatcher(PathMatcher(matcher), false, Envoy::Network::MockConnection(), headers);\n}\n\nTEST(PathMatcher, ValidPathInHeader) {\n  Envoy::Http::TestRequestHeaderMapImpl headers;\n  envoy::type::matcher::v3::PathMatcher matcher;\n  matcher.mutable_path()->set_exact(\"/exact\");\n\n  headers.setPath(\"/exact\");\n  checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers);\n  headers.setPath(\"/exact?param=val\");\n  checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers);\n  headers.setPath(\"/exact#fragment\");\n  checkMatcher(PathMatcher(matcher), true, Envoy::Network::MockConnection(), headers);\n  headers.setPath(\"/exacz\");\n  checkMatcher(PathMatcher(matcher), false, Envoy::Network::MockConnection(), headers);\n}\n\n} // namespace\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/rbac/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n\n#include \"extensions/filters/common/rbac/engine_impl.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\n\nclass MockEngine : public RoleBasedAccessControlEngineImpl {\npublic:\n  MockEngine(const envoy::config::rbac::v3::RBAC& rules,\n             const EnforcementMode mode = EnforcementMode::Enforced)\n      : RoleBasedAccessControlEngineImpl(rules, mode){};\n\n  MOCK_METHOD(bool, handleAction,\n              (const Envoy::Network::Connection&, const Envoy::Http::RequestHeaderMap&,\n               StreamInfo::StreamInfo&, std::string* effective_policy_id),\n              (const));\n\n  MOCK_METHOD(bool, handleAction,\n              (const Envoy::Network::Connection&, StreamInfo::StreamInfo&,\n               std::string* effective_policy_id),\n              (const));\n};\n\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/common/rbac/utility_test.cc",
    "content": "#include \"extensions/filters/common/rbac/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Filters {\nnamespace Common {\nnamespace RBAC {\nnamespace {\n\nTEST(ResponseDetail, ResponseDetail) {\n  EXPECT_EQ(RBAC::responseDetail(\"abdfxy\"), \"rbac_access_denied_matched_policy[abdfxy]\");\n  EXPECT_EQ(RBAC::responseDetail(\"ab df  xy\"), \"rbac_access_denied_matched_policy[ab_df__xy]\");\n  EXPECT_EQ(RBAC::responseDetail(\"a \\t\\f\\v\\n\\ry\"), \"rbac_access_denied_matched_policy[a______y]\");\n}\n\n} // namespace\n} // namespace RBAC\n} // namespace Common\n} // namespace Filters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/adaptive_concurrency/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"adaptive_concurrency_filter_test\",\n    srcs = [\"adaptive_concurrency_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.adaptive_concurrency\",\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/http/adaptive_concurrency:adaptive_concurrency_filter_lib\",\n        \"//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"adaptive_concurrency_integration_test\",\n    srcs = [\n        \"adaptive_concurrency_filter_integration_test.cc\",\n        \"adaptive_concurrency_filter_integration_test.h\",\n    ],\n    extension_name = \"envoy.filters.http.adaptive_concurrency\",\n    deps = [\n        \"//source/extensions/filters/http/adaptive_concurrency:config\",\n        \"//source/extensions/filters/http/fault:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.cc",
    "content": "#include \"test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h\"\n\n#include \"common/http/header_map_impl.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nvoid AdaptiveConcurrencyIntegrationTest::sendRequests(uint32_t request_count,\n                                                      uint32_t num_forwarded) {\n  ASSERT_LE(num_forwarded, request_count);\n\n  // TODO (tonya11en):\n  // We send header-only requests below because the adaptive concurrency filter will reject requests\n  // when decoding their headers. If we try to send data, there's no way to ensure that the filter\n  // doesn't respond between the client sending headers and data, invalidating the client's encoder\n  // stream. We should change this integration test to allow for the ability to test this scenario.\n\n  if (use_grpc_) {\n    default_request_headers_.setContentType(Http::Headers::get().ContentTypeValues.Grpc);\n  }\n\n  // We expect these requests to reach the upstream.\n  for (uint32_t idx = 0; idx < num_forwarded; ++idx) {\n    auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n    responses_.push_back(std::move(response));\n    upstream_connections_.emplace_back();\n    upstream_requests_.emplace_back();\n\n    ASSERT_TRUE(\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, upstream_connections_.back()));\n    ASSERT_TRUE(\n        upstream_connections_.back()->waitForNewStream(*dispatcher_, upstream_requests_.back()));\n\n    ASSERT_TRUE(upstream_requests_.back()->waitForEndStream(*dispatcher_));\n  }\n\n  // These requests should be blocked by the filter, so they never make it to the upstream.\n  auto blocked_counter = test_server_->counter(REQUEST_BLOCK_COUNTER_NAME)->value();\n  for (uint32_t idx = 0; idx < request_count - num_forwarded; ++idx) {\n    auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n    responses_.push_back(std::move(response));\n\n    test_server_->waitForCounterEq(REQUEST_BLOCK_COUNTER_NAME, ++blocked_counter);\n\n    // These will remain nullptr.\n    upstream_connections_.emplace_back();\n    upstream_requests_.emplace_back();\n  }\n\n  ASSERT_EQ(upstream_connections_.size(), upstream_requests_.size());\n  ASSERT_EQ(responses_.size(), upstream_requests_.size());\n}\n\nvoid AdaptiveConcurrencyIntegrationTest::respondToAllRequests(uint32_t forwarded_count,\n                                                              std::chrono::milliseconds latency) {\n  ASSERT_GE(responses_.size(), static_cast<size_t>(forwarded_count));\n\n  timeSystem().advanceTimeWait(latency);\n\n  for (uint32_t idx = 0; idx < forwarded_count; ++idx) {\n    respondToRequest(true);\n  }\n\n  while (!responses_.empty()) {\n    respondToRequest(false);\n  }\n}\n\nvoid AdaptiveConcurrencyIntegrationTest::respondToRequest(bool expect_forwarded) {\n  ASSERT_EQ(upstream_connections_.size(), upstream_requests_.size());\n  ASSERT_EQ(responses_.size(), upstream_requests_.size());\n\n  if (expect_forwarded) {\n    ASSERT_NE(upstream_connections_.front(), nullptr);\n    ASSERT_NE(upstream_requests_.front(), nullptr);\n    ASSERT_TRUE(upstream_requests_.front()->waitForEndStream(*dispatcher_));\n    upstream_requests_.front()->encodeHeaders(default_response_headers_, false);\n    upstream_requests_.front()->encodeData(1, true);\n  }\n\n  responses_.front()->waitForEndStream();\n\n  if (expect_forwarded) {\n    EXPECT_TRUE(upstream_requests_.front()->complete());\n  }\n\n  EXPECT_TRUE(responses_.front()->complete());\n\n  if (expect_forwarded) {\n    verifyResponseForwarded(std::move(responses_.front()));\n    ASSERT_TRUE(upstream_connections_.front()->close());\n    ASSERT_TRUE(upstream_connections_.front()->waitForDisconnect());\n  } else {\n    verifyResponseBlocked(std::move(responses_.front()));\n  }\n\n  upstream_connections_.pop_front();\n  upstream_requests_.pop_front();\n  responses_.pop_front();\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdaptiveConcurrencyIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\n// Test a single request returns successfully.\nTEST_P(AdaptiveConcurrencyIntegrationTest, TestConcurrency1) {\n  customInit();\n\n  EXPECT_EQ(0, test_server_->counter(REQUEST_BLOCK_COUNTER_NAME)->value());\n  sendRequests(2, 1);\n  respondToAllRequests(1, std::chrono::milliseconds(5));\n  test_server_->waitForCounterEq(REQUEST_BLOCK_COUNTER_NAME, 1);\n}\n\n// Test many requests, where only a single request returns 200 during the minRTT window.\nTEST_P(AdaptiveConcurrencyIntegrationTest, TestManyConcurrency1) {\n  customInit();\n\n  EXPECT_EQ(0, test_server_->counter(REQUEST_BLOCK_COUNTER_NAME)->value());\n  sendRequests(10, 1);\n  respondToAllRequests(1, std::chrono::milliseconds(5));\n  test_server_->waitForCounterEq(REQUEST_BLOCK_COUNTER_NAME, 9);\n}\n\n// Test many grpc requests, where only a single request returns 200 during the minRTT window.\nTEST_P(AdaptiveConcurrencyIntegrationTest, TestManyConcurrencyGrpc) {\n  use_grpc_ = true;\n  customInit();\n\n  EXPECT_EQ(0, test_server_->counter(REQUEST_BLOCK_COUNTER_NAME)->value());\n  sendRequests(10, 1);\n  respondToAllRequests(1, std::chrono::milliseconds(5));\n  test_server_->waitForCounterEq(REQUEST_BLOCK_COUNTER_NAME, 9);\n}\n\n/**\n * TODO: Test the ability to increase/decrease the concurrency limit with request latencies based on\n * the minRTT value.\n *\n * See PR #8405.\n *\n * Previous attempts at this test took a long time when using simulated time, which resulted in\n * intermittent timeouts in CI.\n */\n\n/**\n * TODO: Test the ability to enforce the concurrency limit outside of the minRTT calculation window.\n *\n * See PR #8405.\n *\n * Previous attempts at this test would hang during waitForHttpConnection after successfully sending\n * several requests to inflate the minRTT value. Alternative approaches that circumvented the need\n * for manually waiting included:\n *\n *   - Using a fault filter to inject delay into requests after passing the adaptive concurrency\n *   filter. This fails when using simulated time due to the fault filter's delay mechanism not\n *   being governed by the simulated time class. This required usage of real time, which sacrificed\n *   determinism.\n *\n *   - Buffering requests at the fake upstream and releasing them manually. Buffering via simulated\n *   time and releasing by advancing time does not work due to the only_one_thread.h assertions\n *   requiring simulated time to advance on a single thread. Buffering via a request queue and\n *   changes to the fake upstream requires too many changes to the fake upstream to be worth the\n *   investment of time, since it would be more worthwhile to overhaul the integration test\n *   framework to be event-driven rather than waitFor* driven.\n */\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h",
    "content": "#include \"test/integration/http_integration.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nconst std::string ADAPTIVE_CONCURRENCY_CONFIG =\n    R\"EOF(\nname: envoy.filters.http.adaptive_concurrency\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency\n  gradient_controller_config:\n    sample_aggregate_percentile:\n      value: 50\n    concurrency_limit_params:\n      concurrency_update_interval: 0.1s\n    min_rtt_calc_params:\n      interval: 30s\n      request_count: 50\n      min_concurrency: 1\n)EOF\";\n\nconst std::string CONCURRENCY_LIMIT_GAUGE_NAME =\n    \"http.config_test.adaptive_concurrency.gradient_controller.concurrency_limit\";\nconst std::string REQUEST_BLOCK_COUNTER_NAME =\n    \"http.config_test.adaptive_concurrency.gradient_controller.rq_blocked\";\nconst std::string MIN_RTT_GAUGE_NAME =\n    \"http.config_test.adaptive_concurrency.gradient_controller.min_rtt_msecs\";\n\nclass AdaptiveConcurrencyIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public Event::TestUsingSimulatedTime,\n      public HttpIntegrationTest {\npublic:\n  AdaptiveConcurrencyIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\n  void customInit() {\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    config_helper_.addFilter(ADAPTIVE_CONCURRENCY_CONFIG);\n    initialize();\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  }\n\n  void TearDown() override {\n    HttpIntegrationTest::cleanupUpstreamAndDownstream();\n    codec_client_->close();\n    codec_client_.reset();\n  }\n\nprotected:\n  // Send some number of requests with 'delay_ms' specifying the amount of time the fault filter\n  // will delay them.\n  void sendRequests(uint32_t request_count, uint32_t num_forwarded);\n\n  // Waits for a specified duration and then responds to all queued up requests in a FIFO manner.\n  // Asserts that the expected number of requests are forwarded through the filter. The oldest\n  // requests are the forwarded requests.\n  //\n  // Note: For interleaved forwarded/blocked requests, use respondToRequest() directly.\n  void respondToAllRequests(uint32_t forwarded_count, std::chrono::milliseconds latency);\n\n  // Responds to a single request in a FIFO manner. Asserts the forwarding expectation.\n  void respondToRequest(bool expect_forwarded);\n\n  void verifyResponseForwarded(IntegrationStreamDecoderPtr response) {\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n\n  void verifyResponseBlocked(IntegrationStreamDecoderPtr response) {\n    if (use_grpc_) {\n      EXPECT_EQ(\"200\", response->headers().getStatusValue());\n      EXPECT_EQ(\"reached concurrency limit\", response->headers().getGrpcMessageValue());\n    } else {\n      EXPECT_EQ(\"503\", response->headers().getStatusValue());\n      EXPECT_EQ(\"reached concurrency limit\", response->body());\n    }\n  }\n\n  std::deque<IntegrationStreamDecoderPtr> responses_;\n  std::deque<FakeStreamPtr> upstream_requests_;\n  std::deque<FakeHttpConnectionPtr> upstream_connections_;\n  bool use_grpc_{};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.validate.h\"\n\n#include \"extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h\"\n#include \"extensions/filters/http/adaptive_concurrency/controller/controller.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\nnamespace {\n\nusing Controller::RequestForwardingAction;\n\nclass MockConcurrencyController : public Controller::ConcurrencyController {\npublic:\n  MOCK_METHOD(RequestForwardingAction, forwardingDecision, ());\n  MOCK_METHOD(void, cancelLatencySample, ());\n  MOCK_METHOD(void, recordLatencySample, (MonotonicTime));\n\n  uint32_t concurrencyLimit() const override { return 0; }\n};\n\nclass AdaptiveConcurrencyFilterTest : public testing::Test {\npublic:\n  AdaptiveConcurrencyFilterTest() = default;\n\n  void SetUp() override {\n    const envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency config;\n    auto config_ptr = std::make_shared<AdaptiveConcurrencyFilterConfig>(\n        config, runtime_, \"testprefix.\", stats_, time_system_);\n\n    filter_ = std::make_unique<AdaptiveConcurrencyFilter>(config_ptr, controller_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  void TearDown() override { filter_.reset(); }\n\n  envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency\n  makeConfig(const std::string& yaml_config) {\n    envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency proto;\n    TestUtility::loadFromYamlAndValidate(yaml_config, proto);\n    return proto;\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  Stats::IsolatedStoreImpl stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  std::shared_ptr<MockConcurrencyController> controller_{new MockConcurrencyController()};\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  std::unique_ptr<AdaptiveConcurrencyFilter> filter_;\n};\n\nTEST_F(AdaptiveConcurrencyFilterTest, TestEnableOverriddenFromRuntime) {\n  std::string yaml_config =\n      R\"EOF(\ngradient_controller_config:\n  sample_aggregate_percentile:\n    value: 50\n  concurrency_limit_params:\n    concurrency_update_interval:\n      nanos: 100000000 # 100ms\n  min_rtt_calc_params:\n    interval:\n      seconds: 30\n    request_count: 50\nenabled:\n  default_value: true\n  runtime_key: \"adaptive_concurrency.enabled\"\n)EOF\";\n\n  auto config = makeConfig(yaml_config);\n\n  auto config_ptr = std::make_shared<AdaptiveConcurrencyFilterConfig>(\n      config, runtime_, \"testprefix.\", stats_, time_system_);\n  filter_ = std::make_unique<AdaptiveConcurrencyFilter>(config_ptr, controller_);\n  filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n\n  // The filter should behave as normal here.\n\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  // The filter will be disabled when the flag is overridden. Note there is no expected call to\n  // forwardingDecision() or recordLatencySample().\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"adaptive_concurrency.enabled\", true))\n      .WillOnce(Return(false));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl request_body;\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n  filter_->encodeComplete();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, TestNanosValidationFail) {\n  std::string yaml_config =\n      R\"EOF(\ngradient_controller_config:\n  sample_aggregate_percentile:\n    value: 50\n  concurrency_limit_params:\n    concurrency_update_interval:\n      nanos: 100000000 # 100ms\n  min_rtt_calc_params:\n    interval:\n      nanos: 8\n    request_count: 50\nenabled:\n  default_value: true\n  runtime_key: \"adaptive_concurrency.enabled\"\n)EOF\";\n\n  EXPECT_THROW(auto config = makeConfig(yaml_config), ProtoValidationException);\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, TestNanosValidationPass) {\n  std::string yaml_config =\n      R\"EOF(\ngradient_controller_config:\n  sample_aggregate_percentile:\n    value: 50\n  concurrency_limit_params:\n    concurrency_update_interval:\n      nanos: 100000000 # 100ms\n  min_rtt_calc_params:\n    interval:\n      nanos: 1000000\n    request_count: 50\nenabled:\n  default_value: true\n  runtime_key: \"adaptive_concurrency.enabled\"\n)EOF\";\n\n  auto config = makeConfig(yaml_config);\n\n  auto config_ptr = std::make_shared<AdaptiveConcurrencyFilterConfig>(\n      config, runtime_, \"testprefix.\", stats_, time_system_);\n  filter_ = std::make_unique<AdaptiveConcurrencyFilter>(config_ptr, controller_);\n  filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n\n  // The filter should behave as normal here.\n\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  // The filter will be disabled when the flag is overridden. Note there is no expected call to\n  // forwardingDecision() or recordLatencySample().\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"adaptive_concurrency.enabled\", true))\n      .WillOnce(Return(false));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl request_body;\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n  filter_->encodeComplete();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, TestEnableConfiguredInProto) {\n  std::string yaml_config =\n      R\"EOF(\ngradient_controller_config:\n  sample_aggregate_percentile:\n    value: 50\n  concurrency_limit_params:\n    concurrency_update_interval:\n      nanos: 100000000 # 100ms\n  min_rtt_calc_params:\n    interval:\n      seconds: 30\n    request_count: 50\nenabled:\n  default_value: false\n  runtime_key: \"adaptive_concurrency.enabled\"\n)EOF\";\n\n  auto config = makeConfig(yaml_config);\n\n  auto config_ptr = std::make_shared<AdaptiveConcurrencyFilterConfig>(\n      config, runtime_, \"testprefix.\", stats_, time_system_);\n  filter_ = std::make_unique<AdaptiveConcurrencyFilter>(config_ptr, controller_);\n  filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n\n  // We expect no calls to the concurrency controller.\n\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl request_body;\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n  filter_->encodeComplete();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, DecodeHeadersTestForwarding) {\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  EXPECT_CALL(*controller_, forwardingDecision())\n      .WillOnce(Return(RequestForwardingAction::Forward));\n  EXPECT_CALL(*controller_, recordLatencySample(_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl request_body;\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, DecodeHeadersTestBlock) {\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  EXPECT_CALL(*controller_, forwardingDecision()).WillOnce(Return(RequestForwardingAction::Block));\n  EXPECT_CALL(decoder_callbacks_, sendLocalReply(Http::Code::ServiceUnavailable, _, _, _, _));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, RecordSampleInDestructor) {\n  // Verify that the request latency is always sampled even if encodeComplete() is never called.\n  EXPECT_CALL(*controller_, forwardingDecision())\n      .WillOnce(Return(RequestForwardingAction::Forward));\n  Http::TestRequestHeaderMapImpl request_headers;\n  filter_->decodeHeaders(request_headers, true);\n\n  EXPECT_CALL(*controller_, recordLatencySample(_));\n  filter_.reset();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, RecordSampleOmission) {\n  // Verify that the request latency is not sampled if forwardingDecision blocks the request.\n  EXPECT_CALL(*controller_, forwardingDecision()).WillOnce(Return(RequestForwardingAction::Block));\n  Http::TestRequestHeaderMapImpl request_headers;\n  filter_->decodeHeaders(request_headers, true);\n\n  filter_.reset();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, OnDestroyCleanupResetTest) {\n  // Get the filter to record the request start time via decode.\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_CALL(*controller_, forwardingDecision())\n      .WillOnce(Return(RequestForwardingAction::Forward));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  EXPECT_CALL(*controller_, cancelLatencySample());\n\n  // Encode step is not performed prior to destruction.\n  filter_->onDestroy();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, OnDestroyCleanupTest) {\n  // Get the filter to record the request start time via decode.\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_CALL(*controller_, forwardingDecision())\n      .WillOnce(Return(RequestForwardingAction::Forward));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  const auto rq_rcv_time = time_system_.monotonicTime();\n  time_system_.advanceTimeWait(std::chrono::nanoseconds(42));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n  EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time));\n  filter_->encodeComplete();\n\n  filter_->onDestroy();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, EncodeHeadersValidTestWithBody) {\n  auto mt = time_system_.monotonicTime();\n  time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(123));\n\n  // Get the filter to record the request start time via decode.\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_CALL(*controller_, forwardingDecision())\n      .WillOnce(Return(RequestForwardingAction::Forward));\n  Buffer::OwnedImpl data;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  const auto rq_rcv_time = time_system_.monotonicTime();\n  mt = time_system_.monotonicTime();\n  time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(42));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n  EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time));\n  filter_->encodeComplete();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, EncodeHeadersValidTest) {\n  auto mt = time_system_.monotonicTime();\n  time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(123));\n\n  // Get the filter to record the request start time via decode.\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_CALL(*controller_, forwardingDecision())\n      .WillOnce(Return(RequestForwardingAction::Forward));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  const auto rq_rcv_time = time_system_.monotonicTime();\n  mt = time_system_.monotonicTime();\n  time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(42));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n  EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time));\n  filter_->encodeComplete();\n}\n\nTEST_F(AdaptiveConcurrencyFilterTest, DisregardHealthChecks) {\n  StreamInfo::MockStreamInfo stream_info;\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(ReturnRef(stream_info));\n  EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  // We do not expect a call to forwardingDecision() during decode.\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n\n  // We do not expect a call to recordLatencySample() as well.\n\n  filter_->encodeComplete();\n}\n\n} // namespace\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/adaptive_concurrency/controller/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"gradient_controller_test\",\n    srcs = [\"gradient_controller_test.cc\"],\n    extension_name = \"envoy.filters.http.adaptive_concurrency\",\n    deps = [\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/filters/http/adaptive_concurrency:adaptive_concurrency_filter_lib\",\n        \"//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc",
    "content": "#include <chrono>\n#include <iostream>\n\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h\"\n#include \"envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.validate.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h\"\n#include \"extensions/filters/http/adaptive_concurrency/controller/controller.h\"\n#include \"extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdaptiveConcurrency {\nnamespace Controller {\nnamespace {\n\nGradientControllerConfig makeConfig(const std::string& yaml_config,\n                                    NiceMock<Runtime::MockLoader>& runtime) {\n  envoy::extensions::filters::http::adaptive_concurrency::v3::GradientControllerConfig proto;\n  TestUtility::loadFromYamlAndValidate(yaml_config, proto);\n  return GradientControllerConfig{proto, runtime};\n}\n\nclass GradientControllerConfigTest : public testing::Test {\npublic:\n  GradientControllerConfigTest() = default;\n\nprotected:\n  NiceMock<Runtime::MockLoader> runtime_;\n};\n\nclass GradientControllerTest : public testing::Test {\npublic:\n  GradientControllerTest()\n      : api_(Api::createApiForTest(time_system_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")) {}\n\n  GradientControllerSharedPtr makeController(const std::string& yaml_config) {\n    const auto config = std::make_shared<GradientController>(makeConfig(yaml_config, runtime_),\n                                                             *dispatcher_, runtime_, \"test_prefix.\",\n                                                             stats_, random_, time_system_);\n\n    // Advance time so that the latency sample calculations don't underflow if monotonic time is 0.\n    time_system_.advanceTimeAndRun(std::chrono::hours(42), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n\n    return config;\n  }\n\nprotected:\n  void sampleLatency(const GradientControllerSharedPtr& controller,\n                     std::chrono::microseconds latency) {\n    controller->recordLatencySample(time_system_.monotonicTime() - latency);\n  }\n\n  // Helper function that will attempt to pull forwarding decisions.\n  void tryForward(const GradientControllerSharedPtr& controller,\n                  const bool expect_forward_response) {\n    const auto expected_resp =\n        expect_forward_response ? RequestForwardingAction::Forward : RequestForwardingAction::Block;\n    EXPECT_EQ(expected_resp, controller->forwardingDecision());\n  }\n\n  // Gets the controller past the initial minRTT stage.\n  void advancePastMinRTTStage(const GradientControllerSharedPtr& controller,\n                              const std::string& yaml_config,\n                              std::chrono::milliseconds latency = std::chrono::milliseconds(5)) {\n    const auto config = makeConfig(yaml_config, runtime_);\n    for (uint32_t i = 0; i <= config.minRTTAggregateRequestCount(); ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, latency);\n    }\n  }\n\n  void verifyMinRTTValue(std::chrono::milliseconds min_rtt) {\n    EXPECT_EQ(\n        min_rtt.count(),\n        stats_.gauge(\"test_prefix.min_rtt_msecs\", Stats::Gauge::ImportMode::NeverImport).value());\n  }\n\n  void verifyMinRTTActive() {\n    EXPECT_EQ(\n        1,\n        stats_.gauge(\"test_prefix.min_rtt_calculation_active\", Stats::Gauge::ImportMode::Accumulate)\n            .value());\n  }\n\n  void verifyMinRTTInactive() {\n    EXPECT_EQ(\n        0,\n        stats_.gauge(\"test_prefix.min_rtt_calculation_active\", Stats::Gauge::ImportMode::Accumulate)\n            .value());\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  NiceMock<Random::MockRandomGenerator> random_;\n};\n\nTEST_F(GradientControllerConfigTest, BasicTest) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 42.5\nconcurrency_limit_params:\n  max_concurrency_limit: 1337\n  concurrency_update_interval: 0.123s\nmin_rtt_calc_params:\n  jitter:\n    value: 13.2\n  interval: 31s\n  request_count: 52\n  min_concurrency: 8\n)EOF\";\n\n  auto config = makeConfig(yaml, runtime_);\n\n  EXPECT_EQ(config.minRTTCalcInterval(), std::chrono::seconds(31));\n  EXPECT_EQ(config.sampleRTTCalcInterval(), std::chrono::milliseconds(123));\n  EXPECT_EQ(config.maxConcurrencyLimit(), 1337);\n  EXPECT_EQ(config.minRTTAggregateRequestCount(), 52);\n  EXPECT_EQ(config.sampleAggregatePercentile(), .425);\n  EXPECT_EQ(config.jitterPercent(), .132);\n  EXPECT_EQ(config.minConcurrency(), 8);\n}\n\nTEST_F(GradientControllerConfigTest, Clamping) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 42.5\nconcurrency_limit_params:\n  max_concurrency_limit: 1337\n  concurrency_update_interval:\n    nanos: 123000000\nmin_rtt_calc_params:\n  jitter:\n    value: 13.2\n  interval:\n    seconds: 31\n  request_count: 52\n)EOF\";\n\n  auto config = makeConfig(yaml, runtime_);\n\n  // Should be clamped in the range [0,1].\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(_, 42.5)).WillOnce(Return(150.0));\n  EXPECT_EQ(config.sampleAggregatePercentile(), 1.0);\n  EXPECT_CALL(runtime_.snapshot_, getDouble(_, 42.5)).WillOnce(Return(-50.5));\n  EXPECT_EQ(config.sampleAggregatePercentile(), 0.0);\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(_, 13.2)).WillOnce(Return(150.0));\n  EXPECT_EQ(config.jitterPercent(), 1.0);\n  EXPECT_CALL(runtime_.snapshot_, getDouble(_, 13.2)).WillOnce(Return(-50.5));\n  EXPECT_EQ(config.jitterPercent(), 0.0);\n}\n\nTEST_F(GradientControllerConfigTest, BasicTestOverrides) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 42.5\nconcurrency_limit_params:\n  max_concurrency_limit: 1337\n  concurrency_update_interval:\n    nanos: 123000000\nmin_rtt_calc_params:\n  buffer:\n    value: 33\n  jitter:\n    value: 13.2\n  interval:\n    seconds: 31\n  request_count: 52\n  min_concurrency: 7\n)EOF\";\n\n  auto config = makeConfig(yaml, runtime_);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, 31000)).WillOnce(Return(60000));\n  EXPECT_EQ(config.minRTTCalcInterval(), std::chrono::seconds(60));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, 123)).WillOnce(Return(456));\n  EXPECT_EQ(config.sampleRTTCalcInterval(), std::chrono::milliseconds(456));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, 1337)).WillOnce(Return(9000));\n  EXPECT_EQ(config.maxConcurrencyLimit(), 9000);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, 52)).WillOnce(Return(65));\n  EXPECT_EQ(config.minRTTAggregateRequestCount(), 65);\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(_, 42.5)).WillOnce(Return(66.0));\n  EXPECT_EQ(config.sampleAggregatePercentile(), .66);\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(_, 13.2)).WillOnce(Return(15.5));\n  EXPECT_EQ(config.jitterPercent(), .155);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, 7)).WillOnce(Return(9));\n  EXPECT_EQ(config.minConcurrency(), 9);\n\n  EXPECT_CALL(runtime_.snapshot_, getDouble(_, 33.0)).WillOnce(Return(77.0));\n  EXPECT_EQ(config.minRTTBufferPercent(), .77);\n}\n\nTEST_F(GradientControllerConfigTest, DefaultValuesTest) {\n  const std::string yaml = R\"EOF(\nconcurrency_limit_params:\n  concurrency_update_interval: 0.123s\nmin_rtt_calc_params:\n  interval: 31s\n)EOF\";\n\n  auto config = makeConfig(yaml, runtime_);\n\n  EXPECT_EQ(config.minRTTCalcInterval(), std::chrono::seconds(31));\n  EXPECT_EQ(config.sampleRTTCalcInterval(), std::chrono::milliseconds(123));\n  EXPECT_EQ(config.maxConcurrencyLimit(), 1000);\n  EXPECT_EQ(config.minRTTAggregateRequestCount(), 50);\n  EXPECT_EQ(config.sampleAggregatePercentile(), .5);\n  EXPECT_EQ(config.jitterPercent(), .15);\n  EXPECT_EQ(config.minConcurrency(), 3);\n  EXPECT_EQ(config.minRTTBufferPercent(), 0.25);\n}\n\n// Verify that requests started in the previous minRTT window are not sampled in the next.\nTEST_F(GradientControllerTest, MinRTTEpoch) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 25\n  min_concurrency: 2\n  buffer:\n    value: 0.0\n)EOF\";\n\n  const int min_concurrency = 2;\n  auto controller = makeController(yaml);\n  const auto min_rtt = std::chrono::milliseconds(1350);\n  time_system_.advanceTimeAndRun(min_rtt, *dispatcher_, Event::Dispatcher::RunType::Block);\n\n  verifyMinRTTActive();\n  EXPECT_EQ(controller->concurrencyLimit(), min_concurrency);\n  advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(1350));\n  verifyMinRTTInactive();\n  verifyMinRTTValue(std::chrono::milliseconds(1350));\n\n  // Advance time to just before the end of the epoch and inflate the concurrency limit.\n  uint32_t last_limit = controller->concurrencyLimit();\n  for (int i = 0; i < 29; ++i) {\n    tryForward(controller, true);\n    time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    sampleLatency(controller, min_rtt);\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n    EXPECT_GT(controller->concurrencyLimit(), last_limit);\n    last_limit = controller->concurrencyLimit();\n  }\n\n  int active_rq_counter = 0;\n  // Send out requests that we won't attempt to sample until the next minRTT window so the requests\n  // will be disregarded as they were started in the previous minRTT window.\n  for (uint32_t i = 0; i < controller->concurrencyLimit(); ++i) {\n    tryForward(controller, true);\n    ++active_rq_counter;\n  }\n\n  // Move into the next minRTT window while the requests are outstanding.\n  time_system_.advanceTimeAndRun(std::chrono::seconds(5), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n  verifyMinRTTActive();\n  EXPECT_EQ(controller->concurrencyLimit(), min_concurrency);\n\n  // Sample more than enough requests to break out of the minRTT measurement window (>25). These are\n  // expected to be disregarded since they would have started in the previous minRTT epoch.\n  // Therefore, we expect the minRTT window to still be active.\n  EXPECT_GT(active_rq_counter, 25);\n  for (int i = 0; i < active_rq_counter; ++i) {\n    // Sample requests that were send \"5 minutes ago,\" which would surely be from an older minRTT\n    // epoch.\n    sampleLatency(controller, std::chrono::minutes(5));\n  }\n  verifyMinRTTActive();\n}\n\nTEST_F(GradientControllerTest, MinRTTLogicTest) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 50\n  min_concurrency: 7\n)EOF\";\n\n  auto controller = makeController(yaml);\n  const auto min_rtt = std::chrono::milliseconds(13);\n\n  // The controller should be measuring minRTT upon creation, so the concurrency window is 7 (the\n  // min concurrency).\n  verifyMinRTTActive();\n  EXPECT_EQ(controller->concurrencyLimit(), 7);\n  for (int i = 0; i < 7; ++i) {\n    tryForward(controller, true);\n  }\n  tryForward(controller, false);\n  tryForward(controller, false);\n  time_system_.advanceTimeAndRun(min_rtt, *dispatcher_, Event::Dispatcher::RunType::Block);\n  for (int i = 0; i < 7; ++i) {\n    sampleLatency(controller, min_rtt);\n  }\n\n  // 43 more requests should cause the minRTT to be done calculating.\n  for (int i = 0; i < 43; ++i) {\n    EXPECT_EQ(controller->concurrencyLimit(), 7);\n    tryForward(controller, true);\n    sampleLatency(controller, min_rtt);\n  }\n\n  // Verify the minRTT value measured is accurate.\n  verifyMinRTTInactive();\n  verifyMinRTTValue(std::chrono::milliseconds(13));\n}\n\nTEST_F(GradientControllerTest, CancelLatencySample) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 5\n)EOF\";\n\n  auto controller = makeController(yaml);\n\n  for (int i = 1; i <= 5; ++i) {\n    tryForward(controller, true);\n    sampleLatency(controller, std::chrono::milliseconds(i));\n  }\n  verifyMinRTTValue(std::chrono::milliseconds(3));\n}\n\nTEST_F(GradientControllerTest, SamplePercentileProcessTest) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 5\n)EOF\";\n\n  auto controller = makeController(yaml);\n\n  tryForward(controller, true);\n  tryForward(controller, true);\n  tryForward(controller, true);\n  tryForward(controller, false);\n  controller->cancelLatencySample();\n  tryForward(controller, true);\n  tryForward(controller, false);\n}\n\nTEST_F(GradientControllerTest, MinRTTBufferTest) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 5\n  buffer:\n    value: 50\n)EOF\";\n\n  auto controller = makeController(yaml);\n  EXPECT_EQ(controller->concurrencyLimit(), 3);\n\n  // Force a minRTT of 5ms.\n  advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5));\n  verifyMinRTTValue(std::chrono::milliseconds(5));\n\n  // Ensure that the minRTT doesn't decrease due to the buffer added.\n  for (int recalcs = 0; recalcs < 10; ++recalcs) {\n    const auto last_concurrency = controller->concurrencyLimit();\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      // Recording sample that's technically higher than the minRTT, but the 50% buffer should\n      // prevent the concurrency limit from decreasing.\n      sampleLatency(controller, std::chrono::milliseconds(6));\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    EXPECT_GT(controller->concurrencyLimit(), last_concurrency);\n  }\n}\n\nTEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 5\n  buffer:\n    value: 10\n  min_concurrency: 7\n)EOF\";\n\n  auto controller = makeController(yaml);\n  EXPECT_EQ(controller->concurrencyLimit(), 7);\n\n  // Force a minRTT of 5ms.\n  advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5));\n  verifyMinRTTValue(std::chrono::milliseconds(5));\n\n  // Ensure that the concurrency window increases on its own due to the headroom calculation with\n  // the max gradient.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n  EXPECT_GE(controller->concurrencyLimit(), 7);\n  EXPECT_LE(controller->concurrencyLimit() / 7.0, 2.0);\n\n  // Make it seem as if the recorded latencies are consistently lower than the measured minRTT.\n  // Ensure that it grows.\n  for (int recalcs = 0; recalcs < 10; ++recalcs) {\n    const auto last_concurrency = controller->concurrencyLimit();\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, std::chrono::milliseconds(4));\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    // Verify the minimum gradient.\n    EXPECT_LE(last_concurrency, controller->concurrencyLimit());\n    EXPECT_GE(static_cast<double>(last_concurrency) / controller->concurrencyLimit(), 0.5);\n  }\n\n  // Verify that the concurrency limit can now shrink as necessary.\n  for (int recalcs = 0; recalcs < 10; ++recalcs) {\n    const auto last_concurrency = controller->concurrencyLimit();\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, std::chrono::milliseconds(6));\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    EXPECT_LT(controller->concurrencyLimit(), last_concurrency);\n    EXPECT_GE(controller->concurrencyLimit(), 7);\n  }\n}\n\nTEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 5\n)EOF\";\n\n  auto controller = makeController(yaml);\n  EXPECT_EQ(controller->concurrencyLimit(), 3);\n\n  // Get initial minRTT measurement out of the way and advance time so request samples are not\n  // thought to come from the previous minRTT epoch.\n  advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5));\n  time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n\n  // Force the limit calculation to run a few times from some measurements.\n  for (int sample_iters = 0; sample_iters < 5; ++sample_iters) {\n    const auto last_concurrency = controller->concurrencyLimit();\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, std::chrono::milliseconds(4));\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    // Verify the value is growing.\n    EXPECT_GT(controller->concurrencyLimit(), last_concurrency);\n  }\n\n  const auto limit_val = controller->concurrencyLimit();\n\n  // Wait until the minRTT recalculation is triggered again and verify the limit drops.\n  time_system_.advanceTimeAndRun(std::chrono::seconds(31), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n  EXPECT_EQ(controller->concurrencyLimit(), 3);\n\n  // Advance time again for request samples to appear from the current epoch.\n  time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n\n  // 49 more requests should cause the minRTT to be done calculating.\n  for (int i = 0; i < 5; ++i) {\n    EXPECT_EQ(controller->concurrencyLimit(), 3);\n    tryForward(controller, true);\n    sampleLatency(controller, std::chrono::milliseconds(13));\n  }\n\n  // Check that we restored the old concurrency limit value.\n  EXPECT_EQ(limit_val, controller->concurrencyLimit());\n}\n\nTEST_F(GradientControllerTest, MinRTTRescheduleTest) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 5\n)EOF\";\n\n  auto controller = makeController(yaml);\n  EXPECT_EQ(controller->concurrencyLimit(), 3);\n\n  // Get initial minRTT measurement out of the way and advance time so request samples are not\n  // thought to come from the previous minRTT epoch.\n  advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5));\n  time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n\n  // Force the limit calculation to run a few times from some measurements.\n  for (int sample_iters = 0; sample_iters < 5; ++sample_iters) {\n    const auto last_concurrency = controller->concurrencyLimit();\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, std::chrono::milliseconds(4));\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    // Verify the value is growing.\n    EXPECT_GT(controller->concurrencyLimit(), last_concurrency);\n  }\n\n  // Wait until the minRTT recalculation is triggered again and verify the limit drops.\n  time_system_.advanceTimeAndRun(std::chrono::seconds(31), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n  EXPECT_EQ(controller->concurrencyLimit(), 3);\n\n  // Verify sample recalculation doesn't occur during the minRTT window.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n  EXPECT_EQ(controller->concurrencyLimit(), 3);\n}\n\nTEST_F(GradientControllerTest, NoSamplesTest) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 30s\n  request_count: 5\n)EOF\";\n\n  auto controller = makeController(yaml);\n  EXPECT_EQ(controller->concurrencyLimit(), 3);\n\n  // Get minRTT measurement out of the way.\n  advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5));\n\n  // Force the limit calculation to run a few times from some measurements.\n  for (int sample_iters = 0; sample_iters < 5; ++sample_iters) {\n    const auto last_concurrency = controller->concurrencyLimit();\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, std::chrono::milliseconds(4));\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    // Verify the value is growing.\n    EXPECT_GT(controller->concurrencyLimit(), last_concurrency);\n  }\n\n  // Now we make sure that the limit value doesn't change in the absence of samples.\n  for (int sample_iters = 0; sample_iters < 5; ++sample_iters) {\n    const auto old_limit = controller->concurrencyLimit();\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    EXPECT_EQ(old_limit, controller->concurrencyLimit());\n  }\n}\n\nTEST_F(GradientControllerTest, TimerAccuracyTest) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.123s\nmin_rtt_calc_params:\n  jitter:\n    value: 10.0\n  interval: 100s\n  request_count: 5\n)EOF\";\n\n  // Verify the configuration affects the timers that are kicked off.\n  NiceMock<Event::MockDispatcher> fake_dispatcher;\n  auto sample_timer = new Event::MockTimer();\n  auto rtt_timer = new Event::MockTimer();\n\n  // Expect the sample timer to trigger start immediately upon controller creation.\n  EXPECT_CALL(fake_dispatcher, createTimer_(_))\n      .Times(2)\n      .WillOnce(Return(rtt_timer))\n      .WillOnce(Return(sample_timer));\n  EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _));\n  auto controller =\n      std::make_shared<GradientController>(makeConfig(yaml, runtime_), fake_dispatcher, runtime_,\n                                           \"test_prefix.\", stats_, random_, time_system_);\n\n  // Set the minRTT- this will trigger the timer for the next minRTT calculation.\n\n  // Let's make sure the jitter value can't exceed the configured percentage as well by returning a\n  // random value > 10% of the interval.\n  EXPECT_CALL(random_, random()).WillOnce(Return(15000));\n  EXPECT_CALL(*rtt_timer, enableTimer(std::chrono::milliseconds(105000), _));\n  // Verify the sample timer is reset after the minRTT calculation occurs.\n  EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _));\n  for (int i = 0; i < 6; ++i) {\n    tryForward(controller, true);\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(5), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    sampleLatency(controller, std::chrono::milliseconds(5));\n  }\n}\n\nTEST_F(GradientControllerTest, TimerAccuracyTestNoJitter) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.123s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 45s\n  request_count: 5\n)EOF\";\n\n  // Verify the configuration affects the timers that are kicked off.\n  NiceMock<Event::MockDispatcher> fake_dispatcher;\n  auto sample_timer = new Event::MockTimer;\n  auto rtt_timer = new Event::MockTimer;\n\n  // Expect the sample timer to trigger start immediately upon controller creation.\n  EXPECT_CALL(fake_dispatcher, createTimer_(_))\n      .Times(2)\n      .WillOnce(Return(rtt_timer))\n      .WillOnce(Return(sample_timer));\n  EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _));\n  auto controller =\n      std::make_shared<GradientController>(makeConfig(yaml, runtime_), fake_dispatcher, runtime_,\n                                           \"test_prefix.\", stats_, random_, time_system_);\n\n  // Set the minRTT- this will trigger the timer for the next minRTT calculation.\n  EXPECT_CALL(*rtt_timer, enableTimer(std::chrono::milliseconds(45000), _));\n  // Verify the sample timer is reset after the minRTT calculation occurs.\n  EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _));\n  for (int i = 0; i < 6; ++i) {\n    tryForward(controller, true);\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(5), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    sampleLatency(controller, std::chrono::milliseconds(5));\n  }\n}\n\n// Test that consecutively setting the concurrency limit to the minimum triggers a minRTT\n// recalculation.\nTEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) {\n  const std::string yaml = R\"EOF(\nsample_aggregate_percentile:\n  value: 50\nconcurrency_limit_params:\n  max_concurrency_limit:\n  concurrency_update_interval: 0.1s\nmin_rtt_calc_params:\n  jitter:\n    value: 0.0\n  interval: 3600s\n  request_count: 5\n  buffer:\n    value: 0\n  min_concurrency: 7\n)EOF\";\n\n  auto controller = makeController(yaml);\n  EXPECT_EQ(controller->concurrencyLimit(), 7);\n\n  // Force a minRTT of 5ms.\n  advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5));\n  verifyMinRTTValue(std::chrono::milliseconds(5));\n\n  // Ensure that the concurrency window increases on its own due to the headroom calculation with\n  // the max gradient.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                 Event::Dispatcher::RunType::Block);\n  EXPECT_GE(controller->concurrencyLimit(), 7);\n  EXPECT_LE(controller->concurrencyLimit() / 7.0, 2.0);\n\n  // Make it seem as if the recorded latencies are consistently higher than the measured minRTT to\n  // induce a minRTT recalculation after 5 iterations.\n  const auto elevated_latency = std::chrono::milliseconds(10);\n  for (int recalcs = 0; recalcs < 5; ++recalcs) {\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, elevated_latency);\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n  }\n\n  // Verify that the concurrency limit starts growing with newly measured minRTT.\n  for (int recalcs = 0; recalcs < 10; ++recalcs) {\n    const auto last_concurrency = controller->concurrencyLimit();\n    for (int i = 1; i <= 5; ++i) {\n      tryForward(controller, true);\n      sampleLatency(controller, elevated_latency);\n    }\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_,\n                                   Event::Dispatcher::RunType::Block);\n    EXPECT_GE(controller->concurrencyLimit(), last_concurrency);\n  }\n}\n\n} // namespace\n} // namespace Controller\n} // namespace AdaptiveConcurrency\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/admission_control/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"admission_control_filter_test\",\n    srcs = [\"admission_control_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.admission_control\",\n    deps = [\n        \"//source/common/common:enum_to_int\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/http/admission_control:admission_control_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.admission_control\",\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/http/admission_control:admission_control_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"success_criteria_evaluator_test\",\n    srcs = [\"success_criteria_evaluator_test.cc\"],\n    extension_name = \"envoy.filters.http.admission_control\",\n    deps = [\n        \"//source/extensions/filters/http/admission_control:admission_control_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"admission_control_integration_test\",\n    srcs = [\"admission_control_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.admission_control\",\n    deps = [\n        \"//source/extensions/filters/http/admission_control:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"admission_controller_test\",\n    srcs = [\"controller_test.cc\"],\n    extension_name = \"envoy.filters.http.admission_control\",\n    deps = [\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/http/admission_control:admission_control_filter_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/admission_control/admission_control_filter_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h\"\n#include \"envoy/grpc/status.h\"\n\n#include \"common/common/enum_to_int.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/http/admission_control/admission_control.h\"\n#include \"extensions/filters/http/admission_control/evaluators/response_evaluator.h\"\n#include \"extensions/filters/http/admission_control/thread_local_controller.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\nnamespace {\n\nusing RequestData = ThreadLocalController::RequestData;\n\nclass MockThreadLocalController : public ThreadLocal::ThreadLocalObject,\n                                  public ThreadLocalController {\npublic:\n  MOCK_METHOD(RequestData, requestCounts, ());\n  MOCK_METHOD(void, recordSuccess, ());\n  MOCK_METHOD(void, recordFailure, ());\n};\n\nclass MockResponseEvaluator : public ResponseEvaluator {\npublic:\n  MOCK_METHOD(bool, isHttpSuccess, (uint64_t code), (const));\n  MOCK_METHOD(bool, isGrpcSuccess, (uint32_t status), (const));\n};\n\nclass TestConfig : public AdmissionControlFilterConfig {\npublic:\n  TestConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime,\n             Random::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls,\n             MockThreadLocalController& controller, std::shared_ptr<ResponseEvaluator> evaluator)\n      : AdmissionControlFilterConfig(proto_config, runtime, random, scope, std::move(tls),\n                                     std::move(evaluator)),\n        controller_(controller) {}\n  ThreadLocalController& getController() const override { return controller_; }\n\nprivate:\n  MockThreadLocalController& controller_;\n};\n\nclass AdmissionControlTest : public testing::Test {\npublic:\n  AdmissionControlTest() = default;\n\n  std::shared_ptr<AdmissionControlFilterConfig> makeConfig(const std::string& yaml) {\n    AdmissionControlProto proto;\n    TestUtility::loadFromYamlAndValidate(yaml, proto);\n    auto tls = context_.threadLocal().allocateSlot();\n    evaluator_ = std::make_shared<MockResponseEvaluator>();\n\n    return std::make_shared<TestConfig>(proto, runtime_, random_, scope_, std::move(tls),\n                                        controller_, evaluator_);\n  }\n\n  void setupFilter(std::shared_ptr<AdmissionControlFilterConfig> config) {\n    filter_ = std::make_shared<AdmissionControlFilter>(config, \"test_prefix.\");\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  }\n\n  void sampleGrpcRequest(const Grpc::Status::WellKnownGrpcStatus status) {\n    Http::TestResponseHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                            {\"grpc-status\", std::to_string(enumToInt(status))}};\n    filter_->encodeHeaders(headers, true);\n  }\n\n  void sampleGrpcRequestTrailer(const Grpc::Status::WellKnownGrpcStatus status) {\n    Http::TestResponseHeaderMapImpl headers{{\"content-type\", \"application/grpc\"},\n                                            {\":status\", \"200\"}};\n    filter_->encodeHeaders(headers, false);\n    Http::TestResponseTrailerMapImpl trailers{{\"grpc-message\", \"foo\"},\n                                              {\"grpc-status\", std::to_string(enumToInt(status))}};\n    filter_->encodeTrailers(trailers);\n  }\n\n  void sampleHttpRequest(const std::string& http_error_code) {\n    Http::TestResponseHeaderMapImpl headers{{\":status\", http_error_code}};\n    filter_->encodeHeaders(headers, true);\n  }\n\n  void verifyProbabilities(int success_rate, double expected_rejection_probability) {\n    // Success rate will be the same as the number of successful requests if the total request count\n    // is 100.\n    constexpr int total_request_count = 100;\n    EXPECT_CALL(controller_, requestCounts())\n        .WillRepeatedly(Return(RequestData(total_request_count, success_rate)));\n    EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true));\n\n    Http::TestRequestHeaderMapImpl request_headers;\n    uint32_t rejection_count = 0;\n    // Assuming 4 significant figures in rejection probability calculation.\n    const auto accuracy = 1e4;\n    for (int i = 0; i < accuracy; ++i) {\n      EXPECT_CALL(random_, random()).WillRepeatedly(Return(i));\n      if (filter_->decodeHeaders(request_headers, true) != Http::FilterHeadersStatus::Continue) {\n        ++rejection_count;\n      }\n    }\n\n    EXPECT_NEAR(static_cast<double>(rejection_count) / accuracy, expected_rejection_probability,\n                0.01);\n  }\n\nprotected:\n  std::string stats_prefix_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Stats::IsolatedStoreImpl scope_;\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  std::shared_ptr<AdmissionControlFilter> filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<MockThreadLocalController> controller_;\n  std::shared_ptr<MockResponseEvaluator> evaluator_;\n  const std::string default_yaml_{R\"EOF(\nenabled:\n  default_value: true\n  runtime_key: \"foo.enabled\"\nsampling_window: 10s\naggression:\n  default_value: 1.0\n  runtime_key: \"foo.aggression\"\nsuccess_criteria:\n  http_criteria:\n  grpc_criteria:\n)EOF\"};\n};\n\n// Ensure the filter can be disabled/enabled via runtime.\nTEST_F(AdmissionControlTest, FilterRuntimeOverride) {\n  const std::string yaml = R\"EOF(\nenabled:\n  default_value: true\n  runtime_key: \"foo.enabled\"\nsampling_window: 10s\naggression:\n  default_value: 1.0\n  runtime_key: \"foo.aggression\"\nsuccess_criteria:\n  http_criteria:\n  grpc_criteria:\n)EOF\";\n\n  auto config = makeConfig(yaml);\n  setupFilter(config);\n\n  // \"Disable\" the filter via runtime.\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo.enabled\", true)).WillRepeatedly(Return(false));\n\n  // The filter is bypassed via runtime.\n  EXPECT_CALL(controller_, requestCounts()).Times(0);\n\n  // We expect no rejections.\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Ensure the filter disregards healthcheck traffic.\nTEST_F(AdmissionControlTest, DisregardHealthChecks) {\n  auto config = makeConfig(default_yaml_);\n  setupFilter(config);\n\n  StreamInfo::MockStreamInfo stream_info;\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(testing::ReturnRef(stream_info));\n  EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(true));\n\n  // We do not make admission decisions for health checks, so we expect no lookup of request success\n  // counts.\n  EXPECT_CALL(controller_, requestCounts()).Times(0);\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n}\n\n// Validate simple HTTP failure case.\nTEST_F(AdmissionControlTest, HttpFailureBehavior) {\n  auto config = makeConfig(default_yaml_);\n  setupFilter(config);\n\n  // We expect rejection counter to increment upon failure.\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n\n  EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0)));\n  EXPECT_CALL(*evaluator_, isHttpSuccess(500)).WillRepeatedly(Return(false));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n  sampleHttpRequest(\"500\");\n\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 1, time_system_);\n}\n\n// Validate simple HTTP success case.\nTEST_F(AdmissionControlTest, HttpSuccessBehavior) {\n  auto config = makeConfig(default_yaml_);\n  setupFilter(config);\n\n  // We expect rejection counter to NOT increment upon success.\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n\n  EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100)));\n  EXPECT_CALL(*evaluator_, isHttpSuccess(200)).WillRepeatedly(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  sampleHttpRequest(\"200\");\n\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n}\n\n// Validate simple gRPC failure case.\nTEST_F(AdmissionControlTest, GrpcFailureBehavior) {\n  auto config = makeConfig(default_yaml_);\n  setupFilter(config);\n\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n\n  EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0)));\n  EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n  sampleGrpcRequest(Grpc::Status::WellKnownGrpcStatus::PermissionDenied);\n\n  // We expect rejection counter to increment upon failure.\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 1, time_system_);\n}\n\n// Validate simple gRPC success case with status in the trailer.\nTEST_F(AdmissionControlTest, GrpcSuccessBehaviorTrailer) {\n  auto config = makeConfig(default_yaml_);\n  setupFilter(config);\n\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n\n  EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100)));\n  EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  sampleGrpcRequestTrailer(Grpc::Status::WellKnownGrpcStatus::Ok);\n\n  // We expect rejection counter to NOT increment upon success.\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n}\n\n// Validate simple gRPC failure case with status in the trailer.\nTEST_F(AdmissionControlTest, GrpcFailureBehaviorTrailer) {\n  auto config = makeConfig(default_yaml_);\n  setupFilter(config);\n\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n\n  EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0)));\n  EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n  sampleGrpcRequestTrailer(Grpc::Status::WellKnownGrpcStatus::PermissionDenied);\n\n  // We expect rejection counter to increment upon failure.\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 1, time_system_);\n}\n\n// Validate simple gRPC success case.\nTEST_F(AdmissionControlTest, GrpcSuccessBehavior) {\n  auto config = makeConfig(default_yaml_);\n  setupFilter(config);\n\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n\n  EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100)));\n  EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true));\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  sampleGrpcRequest(Grpc::Status::WellKnownGrpcStatus::Ok);\n\n  // We expect rejection counter to NOT increment upon success.\n  TestUtility::waitForCounterEq(scope_, \"test_prefix.rq_rejected\", 0, time_system_);\n}\n\n// Validate rejection probabilities.\nTEST_F(AdmissionControlTest, RejectionProbability) {\n  std::string yaml = R\"EOF(\nenabled:\n  default_value: true\n  runtime_key: \"foo.enabled\"\nsampling_window: 10s\nsr_threshold:\n  default_value:\n    value: 100.0\n  runtime_key: \"foo.threshold\"\naggression:\n  default_value: 1.0\n  runtime_key: \"foo.aggression\"\nsuccess_criteria:\n  http_criteria:\n  grpc_criteria:\n)EOF\";\n\n  auto config = makeConfig(yaml);\n  setupFilter(config);\n\n  verifyProbabilities(100 /* success rate */, 0.0 /* expected rejection probability */);\n  verifyProbabilities(95, 0.05);\n  verifyProbabilities(75, 0.25);\n\n  // Increase aggression and expect higher rejection probabilities for the same values.\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.aggression\", 1.0)).WillRepeatedly(Return(2.0));\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.threshold\", 100.0)).WillRepeatedly(Return(100.0));\n  verifyProbabilities(100, 0.0);\n  verifyProbabilities(95, 0.22);\n  verifyProbabilities(75, 0.5);\n\n  // Lower the success rate threshold and expect the rejections to begin at a lower SR and increase\n  // from there.\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.aggression\", 1.0)).WillRepeatedly(Return(1.0));\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.threshold\", 100.0)).WillRepeatedly(Return(95.0));\n  verifyProbabilities(100, 0.0);\n  verifyProbabilities(98, 0.0);\n  verifyProbabilities(95, 0.0);\n  verifyProbabilities(90, 0.05);\n  verifyProbabilities(75, 0.20);\n  verifyProbabilities(50, 0.46);\n}\n\n} // namespace\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/admission_control/admission_control_integration_test.cc",
    "content": "#include \"common/grpc/common.h\"\n\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace {\n\nconst std::string ADMISSION_CONTROL_CONFIG =\n    R\"EOF(\nname: envoy.filters.http.admission_control\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl\n  success_criteria:\n    http_criteria:\n    grpc_criteria:\n  sampling_window: 120s\n  aggression:\n    default_value: 2.0\n    runtime_key: \"foo.aggression\"\n  sr_threshold:\n    default_value:\n      value: 100.0\n    runtime_key: \"foo.sr_threshold\"\n  enabled:\n    default_value: true\n    runtime_key: \"foo.enabled\"\n)EOF\";\n\nclass AdmissionControlIntegrationTest : public Event::TestUsingSimulatedTime,\n                                        public testing::TestWithParam<Network::Address::IpVersion>,\n                                        public HttpIntegrationTest {\npublic:\n  AdmissionControlIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), realTime()) {}\n\n  void SetUp() override {}\n\n  void initialize() override {\n    config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n    config_helper_.addFilter(ADMISSION_CONTROL_CONFIG);\n    HttpIntegrationTest::initialize();\n  }\n\nprotected:\n  void verifyGrpcSuccess(IntegrationStreamDecoderPtr response) {\n    EXPECT_EQ(\"0\", response->trailers()->GrpcStatus()->value().getStringView());\n  }\n\n  void verifyHttpSuccess(IntegrationStreamDecoderPtr response) {\n    EXPECT_EQ(\"200\", response->headers().Status()->value().getStringView());\n  }\n\n  IntegrationStreamDecoderPtr sendGrpcRequestWithReturnCode(uint64_t code) {\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n    // Set the response headers on the autonomous upstream.\n    auto headers = std::make_unique<Http::TestResponseHeaderMapImpl>();\n    headers->setStatus(200);\n    headers->setContentType(\"application/grpc\");\n\n    auto trailers = std::make_unique<Http::TestResponseTrailerMapImpl>();\n    trailers->setGrpcMessage(\"this is a message\");\n    trailers->setGrpcStatus(code);\n\n    auto* au = reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get());\n    au->setResponseHeaders(std::move(headers));\n    au->setResponseTrailers(std::move(trailers));\n\n    auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n    response->waitForEndStream();\n    codec_client_->close();\n    return response;\n  }\n\n  IntegrationStreamDecoderPtr sendRequestWithReturnCode(std::string&& code) {\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n    // Set the response headers on the autonomous upstream.\n    auto* au = reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get());\n    au->setResponseHeaders(std::make_unique<Http::TestResponseHeaderMapImpl>(\n        Http::TestResponseHeaderMapImpl({{\":status\", code}})));\n\n    auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n    response->waitForEndStream();\n    codec_client_->close();\n    return response;\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdmissionControlIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\nTEST_P(AdmissionControlIntegrationTest, HttpTest) {\n  autonomous_upstream_ = true;\n  initialize();\n\n  // Drop the success rate to a very low value.\n  ENVOY_LOG(info, \"dropping success rate\");\n  for (int i = 0; i < 300; ++i) {\n    sendRequestWithReturnCode(\"500\");\n  }\n\n  // Measure throttling rate from the admission control filter.\n  double throttle_count = 0;\n  double request_count = 0;\n  ENVOY_LOG(info, \"validating throttling rate\");\n  for (int i = 0; i < 300; ++i) {\n    auto response = sendRequestWithReturnCode(\"500\");\n    auto rc = response->headers().Status()->value().getStringView();\n    if (rc == \"503\") {\n      ++throttle_count;\n    } else {\n      ASSERT_EQ(rc, \"500\");\n    }\n    ++request_count;\n  }\n\n  // Given the current throttling rate formula with an aggression of 1, it should result in a ~98%\n  // throttling rate. Allowing an error of 5%.\n  EXPECT_NEAR(throttle_count / request_count, 0.98, 0.05);\n\n  // We now wait for the history to become stale.\n  timeSystem().advanceTimeWait(std::chrono::seconds(120));\n\n  // We expect a 100% success rate after waiting. No throttling should occur.\n  for (int i = 0; i < 100; ++i) {\n    verifyHttpSuccess(sendRequestWithReturnCode(\"200\"));\n  }\n}\n\nTEST_P(AdmissionControlIntegrationTest, GrpcTest) {\n  autonomous_upstream_ = true;\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  initialize();\n\n  // Drop the success rate to a very low value.\n  for (int i = 0; i < 300; ++i) {\n    sendGrpcRequestWithReturnCode(14);\n  }\n\n  // Measure throttling rate from the admission control filter.\n  double throttle_count = 0;\n  double request_count = 0;\n  for (int i = 0; i < 300; ++i) {\n    auto response = sendGrpcRequestWithReturnCode(10);\n\n    // When the filter is throttling, it returns an HTTP code 503 and the GRPC status is unset.\n    // Otherwise, we expect a GRPC status of \"Unknown\" as set above.\n    if (response->headers().Status()->value().getStringView() == \"503\") {\n      ++throttle_count;\n    } else {\n      auto grpc_status = Grpc::Common::getGrpcStatus(*(response->trailers()));\n      ASSERT_EQ(grpc_status, Grpc::Status::WellKnownGrpcStatus::Aborted);\n    }\n    ++request_count;\n  }\n\n  // Given the current throttling rate formula with an aggression of 1, it should result in a ~98%\n  // throttling rate. Allowing an error of 5%.\n  EXPECT_NEAR(throttle_count / request_count, 0.98, 0.05);\n\n  // We now wait for the history to become stale.\n  timeSystem().advanceTimeWait(std::chrono::seconds(120));\n\n  // We expect a 100% success rate after waiting. No throttling should occur.\n  for (int i = 0; i < 100; ++i) {\n    verifyGrpcSuccess(sendGrpcRequestWithReturnCode(0));\n  }\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/admission_control/config_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/http/admission_control/admission_control.h\"\n#include \"extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\nnamespace {\n\nclass AdmissionControlConfigTest : public testing::Test {\npublic:\n  AdmissionControlConfigTest() = default;\n\n  std::shared_ptr<AdmissionControlFilterConfig> makeConfig(const std::string& yaml) {\n    AdmissionControlProto proto;\n    TestUtility::loadFromYamlAndValidate(yaml, proto);\n    auto tls = context_.threadLocal().allocateSlot();\n    auto evaluator = std::make_unique<SuccessCriteriaEvaluator>(proto.success_criteria());\n    return std::make_shared<AdmissionControlFilterConfig>(proto, runtime_, random_, scope_,\n                                                          std::move(tls), std::move(evaluator));\n  }\n\nprotected:\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Stats::IsolatedStoreImpl scope_;\n  NiceMock<Random::MockRandomGenerator> random_;\n};\n\n// Verify the configuration when all fields are set.\nTEST_F(AdmissionControlConfigTest, BasicTestAllConfigured) {\n  const std::string yaml = R\"EOF(\nenabled:\n  default_value: false\n  runtime_key: \"foo.enabled\"\nsampling_window: 1337s\nsr_threshold:\n  default_value:\n    value: 92\n  runtime_key: \"foo.sr_threshold\"\naggression:\n  default_value: 4.2\n  runtime_key: \"foo.aggression\"\nsuccess_criteria:\n  http_criteria:\n  grpc_criteria:\n)EOF\";\n\n  auto config = makeConfig(yaml);\n\n  EXPECT_FALSE(config->filterEnabled());\n  EXPECT_EQ(4.2, config->aggression());\n  EXPECT_EQ(0.92, config->successRateThreshold());\n}\n\n// Verify the config defaults when not specified.\nTEST_F(AdmissionControlConfigTest, BasicTestMinimumConfigured) {\n  // Empty config. No fields are required.\n  AdmissionControlProto proto;\n\n  const std::string yaml = R\"EOF(\nsuccess_criteria:\n  http_criteria:\n  grpc_criteria:\n)EOF\";\n  auto config = makeConfig(yaml);\n\n  EXPECT_TRUE(config->filterEnabled());\n  EXPECT_EQ(1.0, config->aggression());\n  EXPECT_EQ(0.95, config->successRateThreshold());\n}\n\n// Ensure runtime fields are honored.\nTEST_F(AdmissionControlConfigTest, VerifyRuntime) {\n  const std::string yaml = R\"EOF(\nenabled:\n  default_value: false\n  runtime_key: \"foo.enabled\"\nsampling_window: 1337s\nsr_threshold:\n  default_value:\n    value: 92\n  runtime_key: \"foo.sr_threshold\"\naggression:\n  default_value: 4.2\n  runtime_key: \"foo.aggression\"\nsuccess_criteria:\n  http_criteria:\n  grpc_criteria:\n)EOF\";\n\n  auto config = makeConfig(yaml);\n\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo.enabled\", false)).WillOnce(Return(true));\n  EXPECT_TRUE(config->filterEnabled());\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.aggression\", 4.2)).WillOnce(Return(1.3));\n  EXPECT_EQ(1.3, config->aggression());\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.sr_threshold\", 92)).WillOnce(Return(24.0));\n  EXPECT_EQ(0.24, config->successRateThreshold());\n\n  // Verify bogus runtime thresholds revert to the default value.\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.sr_threshold\", 92)).WillOnce(Return(250.0));\n  EXPECT_EQ(0.92, config->successRateThreshold());\n  EXPECT_CALL(runtime_.snapshot_, getDouble(\"foo.sr_threshold\", 92)).WillOnce(Return(-1.0));\n  EXPECT_EQ(0.92, config->successRateThreshold());\n}\n\n} // namespace\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/admission_control/controller_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h\"\n\n#include \"extensions/filters/http/admission_control/thread_local_controller.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\nnamespace {\n\nusing RequestData = ThreadLocalController::RequestData;\n\nclass ThreadLocalControllerTest : public testing::Test {\npublic:\n  ThreadLocalControllerTest() : window_(5), tlc_(time_system_, window_) {}\n\nprotected:\n  // Submit a single request per entry in the historical data (this comes out to a single request\n  // each second). The final sample does not advance time to allow for testing of this transition.\n  void fillHistorySlots(const bool successes = true) {\n    std::function<void()> record;\n    if (successes) {\n      record = [this]() { tlc_.recordSuccess(); };\n    } else {\n      record = [this]() { tlc_.recordFailure(); };\n    }\n    for (int tick = 0; tick < window_.count(); ++tick) {\n      record();\n      time_system_.advanceTimeWait(std::chrono::seconds(1));\n    }\n    // Don't sleep after the final sample to allow for measurements.\n    record();\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  std::chrono::seconds window_;\n  ThreadLocalControllerImpl tlc_;\n};\n\n// Test the basic functionality of the admission controller.\nTEST_F(ThreadLocalControllerTest, BasicRecord) {\n  EXPECT_EQ(RequestData(0, 0), tlc_.requestCounts());\n\n  tlc_.recordFailure();\n  EXPECT_EQ(RequestData(1, 0), tlc_.requestCounts());\n\n  tlc_.recordSuccess();\n  EXPECT_EQ(RequestData(2, 1), tlc_.requestCounts());\n}\n\n// Verify that stale historical samples are removed when they grow stale.\nTEST_F(ThreadLocalControllerTest, RemoveStaleSamples) {\n  fillHistorySlots();\n\n  // We expect a single request counted in each second of the window.\n  EXPECT_EQ(RequestData(window_.count(), window_.count()), tlc_.requestCounts());\n\n  time_system_.advanceTimeWait(std::chrono::seconds(1));\n\n  // Continuing to sample requests at 1 per second should maintain the same request counts. We'll\n  // record failures here.\n  fillHistorySlots(false);\n  EXPECT_EQ(RequestData(window_.count(), 0), tlc_.requestCounts());\n\n  // Expect the oldest entry to go stale.\n  time_system_.advanceTimeWait(std::chrono::seconds(1));\n  EXPECT_EQ(RequestData(window_.count() - 1, 0), tlc_.requestCounts());\n}\n\n// Verify that stale historical samples are removed when they grow stale.\nTEST_F(ThreadLocalControllerTest, RemoveStaleSamples2) {\n  fillHistorySlots();\n\n  // We expect a single request counted in each second of the window.\n  EXPECT_EQ(RequestData(window_.count(), window_.count()), tlc_.requestCounts());\n\n  // Let's just sit here for a full day. We expect all samples to become stale.\n  time_system_.advanceTimeWait(std::chrono::hours(24));\n\n  EXPECT_EQ(RequestData(0, 0), tlc_.requestCounts());\n}\n\n// Verify that historical samples are made only when there is data to record.\nTEST_F(ThreadLocalControllerTest, VerifyMemoryUsage) {\n  // Make sure we don't add any null data to the history if there are sparse requests.\n  tlc_.recordSuccess();\n  time_system_.advanceTimeWait(std::chrono::seconds(1));\n  tlc_.recordSuccess();\n  time_system_.advanceTimeWait(std::chrono::seconds(3));\n  tlc_.recordSuccess();\n  EXPECT_EQ(RequestData(3, 3), tlc_.requestCounts());\n}\n\n} // namespace\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h\"\n#include \"envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h\"\n\n#include \"common/common/enum_to_int.h\"\n\n#include \"extensions/filters/http/admission_control/admission_control.h\"\n#include \"extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AdmissionControl {\nnamespace {\n\nclass SuccessCriteriaTest : public testing::Test {\npublic:\n  SuccessCriteriaTest() = default;\n\n  void makeEvaluator(const std::string& yaml) {\n    AdmissionControlProto::SuccessCriteria proto;\n    TestUtility::loadFromYamlAndValidate(yaml, proto);\n\n    evaluator_ = std::make_unique<SuccessCriteriaEvaluator>(proto);\n  }\n\n  void expectHttpSuccess(int code) { EXPECT_TRUE(evaluator_->isHttpSuccess(code)); }\n\n  void expectHttpFail(int code) { EXPECT_FALSE(evaluator_->isHttpSuccess(code)); }\n\n  void expectGrpcSuccess(int code) { EXPECT_TRUE(evaluator_->isGrpcSuccess(code)); }\n\n  void expectGrpcFail(int code) { EXPECT_FALSE(evaluator_->isGrpcSuccess(code)); }\n\n  void verifyGrpcDefaultEval() {\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::AlreadyExists);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Canceled);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::FailedPrecondition);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::InvalidArgument);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::NotFound);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Ok);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::OutOfRange);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::PermissionDenied);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unauthenticated);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unimplemented);\n    expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unknown);\n\n    expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Aborted));\n    expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::DataLoss));\n    expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded));\n    expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Internal));\n    expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted));\n    expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable));\n  }\n\n  void verifyHttpDefaultEval() {\n    for (int code = 200; code < 600; ++code) {\n      if (code < 500) {\n        expectHttpSuccess(code);\n      } else {\n        expectHttpFail(code);\n      }\n    }\n  }\n\nprotected:\n  std::unique_ptr<SuccessCriteriaEvaluator> evaluator_;\n};\n\n// Ensure the HTTP code successful range configurations are honored.\nTEST_F(SuccessCriteriaTest, HttpErrorCodes) {\n  const std::string yaml = R\"EOF(\nhttp_criteria:\n  http_success_status:\n  - start: 200\n    end:   300\n  - start: 400\n    end:   500\n)EOF\";\n\n  makeEvaluator(yaml);\n\n  for (int code = 200; code < 600; ++code) {\n    if ((code < 300 && code >= 200) || (code < 500 && code >= 400)) {\n      expectHttpSuccess(code);\n      continue;\n    }\n\n    expectHttpFail(code);\n  }\n\n  verifyGrpcDefaultEval();\n}\n\n// Verify default success values of the evaluator.\nTEST_F(SuccessCriteriaTest, DefaultBehaviorTest) {\n  const std::string yaml = R\"EOF(\nhttp_criteria:\ngrpc_criteria:\n)EOF\";\n\n  makeEvaluator(yaml);\n  verifyGrpcDefaultEval();\n  verifyHttpDefaultEval();\n}\n\n// Check that GRPC error code configurations are honored.\nTEST_F(SuccessCriteriaTest, GrpcErrorCodes) {\n  const std::string yaml = R\"EOF(\ngrpc_criteria:\n  grpc_success_status:\n  - 7\n  - 13\n)EOF\";\n\n  makeEvaluator(yaml);\n\n  using GrpcStatus = Grpc::Status::WellKnownGrpcStatus;\n  for (int code = GrpcStatus::Ok; code <= GrpcStatus::MaximumKnown; ++code) {\n    if (code == 7 || code == 13) {\n      expectGrpcSuccess(code);\n    } else {\n      expectGrpcFail(code);\n    }\n  }\n\n  verifyHttpDefaultEval();\n}\n\n// Verify correct gRPC range validation.\nTEST_F(SuccessCriteriaTest, GrpcRangeValidation) {\n  const std::string yaml = R\"EOF(\ngrpc_criteria:\n  grpc_success_status:\n    - 17\n)EOF\";\n  EXPECT_THROW_WITH_REGEX(makeEvaluator(yaml), EnvoyException, \"invalid gRPC code*\");\n}\n\n// Verify correct HTTP range validation.\nTEST_F(SuccessCriteriaTest, HttpRangeValidation) {\n  auto check_ranges = [this](std::string&& yaml) {\n    EXPECT_THROW_WITH_REGEX(makeEvaluator(yaml), EnvoyException, \"invalid HTTP range*\");\n  };\n\n  check_ranges(R\"EOF(\nhttp_criteria:\n  http_success_status:\n    - start: 300\n      end:   200\n)EOF\");\n\n  check_ranges(R\"EOF(\nhttp_criteria:\n  http_success_status:\n    - start: 600\n      end:   600\n)EOF\");\n\n  check_ranges(R\"EOF(\nhttp_criteria:\n  http_success_status:\n    - start: 99\n      end:   99\n)EOF\");\n}\n\n} // namespace\n} // namespace AdmissionControl\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/aws_lambda/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"aws_lambda_filter_test\",\n    srcs = [\"aws_lambda_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.aws_lambda\",\n    deps = [\n        \"//source/extensions/filters/http/aws_lambda:aws_lambda_filter_lib\",\n        \"//test/extensions/common/aws:aws_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"aws_lambda_filter_integration_test\",\n    srcs = [\"aws_lambda_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.aws_lambda\",\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/aws_lambda:aws_lambda_filter_lib\",\n        \"//source/extensions/filters/http/aws_lambda:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"arn_test\",\n    srcs = [\"arn_test.cc\"],\n    extension_name = \"envoy.filters.http.aws_lambda\",\n    deps = [\n        \"//source/extensions/filters/http/aws_lambda:aws_lambda_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.aws_lambda\",\n    deps = [\n        \"//source/extensions/filters/http/aws_lambda:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/aws_lambda/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/aws_lambda/arn_test.cc",
    "content": "#include \"extensions/filters/http/aws_lambda/aws_lambda_filter.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsLambdaFilter {\n\nnamespace {\n\nTEST(AwsArn, ValidArn) {\n  constexpr auto input_arn = \"arn:aws:lambda:us-west-2:1337:function:fun\";\n  const absl::optional<Arn> arn = parseArn(input_arn);\n  ASSERT_TRUE(arn.has_value());\n  EXPECT_STREQ(\"aws\", arn->partition().c_str());\n  EXPECT_STREQ(\"lambda\", arn->service().c_str());\n  EXPECT_STREQ(\"us-west-2\", arn->region().c_str());\n  EXPECT_STREQ(\"1337\", arn->accountId().c_str());\n  EXPECT_STREQ(\"function\", arn->resourceType().c_str());\n  EXPECT_STREQ(\"fun\", arn->functionName().c_str());\n}\n\nTEST(AwsArn, ValidArnWithVersion) {\n  constexpr auto input_arn = \"arn:aws:lambda:us-west-2:1337:function:fun:v2\";\n  const absl::optional<Arn> arn = parseArn(input_arn);\n  ASSERT_TRUE(arn.has_value());\n  EXPECT_STREQ(\"aws\", arn->partition().c_str());\n  EXPECT_STREQ(\"lambda\", arn->service().c_str());\n  EXPECT_STREQ(\"us-west-2\", arn->region().c_str());\n  EXPECT_STREQ(\"1337\", arn->accountId().c_str());\n  EXPECT_STREQ(\"function\", arn->resourceType().c_str());\n  EXPECT_STREQ(\"fun:v2\", arn->functionName().c_str());\n}\n\nTEST(AwsArn, InvalidArn) {\n  constexpr auto input_arn = \"arn:aws:lambda:us-west-2:1337:function\";\n  const absl::optional<Arn> arn = parseArn(input_arn);\n  EXPECT_EQ(absl::nullopt, arn);\n}\n\n} // namespace\n} // namespace AwsLambdaFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc",
    "content": "#include <vector>\n\n#include \"source/extensions/filters/http/aws_lambda/request_response.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing source::extensions::filters::http::aws_lambda::Request;\n\nnamespace Envoy {\nnamespace {\n\nclass AwsLambdaFilterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                       public HttpIntegrationTest {\npublic:\n  AwsLambdaFilterIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\n  void SetUp() override {\n    // Set these environment variables to quickly sign credentials instead of attempting to query\n    // instance metadata and timing-out.\n    TestEnvironment::setEnvVar(\"AWS_ACCESS_KEY_ID\", \"aws-user\", 1 /*overwrite*/);\n    TestEnvironment::setEnvVar(\"AWS_SECRET_ACCESS_KEY\", \"secret\", 1 /*overwrite*/);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP1);\n  }\n\n  void TearDown() override { fake_upstream_connection_.reset(); }\n\n  void setupLambdaFilter(bool passthrough) {\n    const std::string filter =\n        R\"EOF(\n            name: envoy.filters.http.aws_lambda\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.Config\n              arn: \"arn:aws:lambda:us-west-2:123456789:function:test\"\n              payload_passthrough: {}\n            )EOF\";\n    config_helper_.addFilter(fmt::format(filter, passthrough));\n\n    constexpr auto metadata_yaml = R\"EOF(\n        com.amazonaws.lambda:\n          egress_gateway: true\n        )EOF\";\n    config_helper_.addClusterFilterMetadata(metadata_yaml);\n  }\n\n  template <typename TMap>\n  ABSL_MUST_USE_RESULT testing::AssertionResult compareMaps(const TMap& m1, const TMap& m2) {\n    for (auto&& kvp : m1) {\n      auto it = m2.find(kvp.first);\n      if (it == m2.end()) {\n        return AssertionFailure() << \"Failed to find value: \" << kvp.first;\n        ;\n      }\n      if (it->second != kvp.second) {\n        return AssertionFailure() << \"Values of key: \" << kvp.first\n                                  << \" are different. expected: \" << kvp.second\n                                  << \" actual: \" << it->second;\n      }\n    }\n    return AssertionSuccess();\n  }\n\n  void runTest(const Http::RequestHeaderMap& request_headers, const std::string& request_body,\n               const std::string& expected_json_request,\n               const Http::ResponseHeaderMap& lambda_response_headers,\n               const std::string& lambda_response_body,\n               const Http::ResponseHeaderMap& expected_response_headers,\n               const std::vector<std::string>& expected_response_cookies,\n               const std::string& expected_response_body) {\n\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    IntegrationStreamDecoderPtr response;\n    if (request_body.empty()) {\n      response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    } else {\n      auto encoder_decoder = codec_client_->startRequest(request_headers);\n      request_encoder_ = &encoder_decoder.first;\n      response = std::move(encoder_decoder.second);\n      // Chunk the data to simulate a real request.\n      const size_t chunk_size = 5;\n      size_t i = 0;\n      for (; i < request_body.length() / chunk_size; i++) {\n        Buffer::OwnedImpl buffer(request_body.substr(i * chunk_size, chunk_size));\n        codec_client_->sendData(*request_encoder_, buffer, false);\n      }\n      // Send the last chunk flagged as end_stream.\n      Buffer::OwnedImpl buffer(\n          request_body.substr(i * chunk_size, request_body.length() % chunk_size));\n      codec_client_->sendData(*request_encoder_, buffer, true);\n    }\n\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n    ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n    ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n    Request transformed_request;\n    Request expected_request;\n    TestUtility::loadFromJson(upstream_request_->body().toString(), transformed_request);\n    TestUtility::loadFromJson(expected_json_request, expected_request);\n\n    EXPECT_EQ(expected_request.raw_path(), transformed_request.raw_path());\n    EXPECT_EQ(expected_request.method(), transformed_request.method());\n    EXPECT_EQ(expected_request.body(), transformed_request.body());\n    EXPECT_EQ(expected_request.is_base64_encoded(), transformed_request.is_base64_encoded());\n    EXPECT_TRUE(compareMaps(expected_request.headers(), transformed_request.headers()));\n    EXPECT_TRUE(compareMaps(expected_request.query_string_parameters(),\n                            transformed_request.query_string_parameters()));\n\n    if (lambda_response_body.empty()) {\n      upstream_request_->encodeHeaders(lambda_response_headers, true /*end_stream*/);\n    } else {\n      upstream_request_->encodeHeaders(lambda_response_headers, false /*end_stream*/);\n      Buffer::OwnedImpl buffer(lambda_response_body);\n      upstream_request_->encodeData(buffer, true);\n    }\n\n    response->waitForEndStream();\n    EXPECT_TRUE(response->complete());\n\n    // verify headers\n    expected_response_headers.iterate(\n        [actual_headers = &response->headers()](const Http::HeaderEntry& expected_entry) {\n          const auto* actual_entry = actual_headers->get(\n              Http::LowerCaseString(std::string(expected_entry.key().getStringView())));\n          EXPECT_EQ(actual_entry->value().getStringView(), expected_entry.value().getStringView());\n          return Http::HeaderMap::Iterate::Continue;\n        });\n\n    // verify cookies if we have any\n    if (!expected_response_cookies.empty()) {\n      std::vector<std::string> actual_cookies;\n      response->headers().iterate([&actual_cookies](const Http::HeaderEntry& entry) {\n        if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) {\n          actual_cookies.emplace_back(entry.value().getStringView());\n        }\n        return Http::HeaderMap::Iterate::Continue;\n      });\n\n      EXPECT_EQ(expected_response_cookies, actual_cookies);\n    }\n\n    // verify body\n    EXPECT_STREQ(expected_response_body.c_str(), response->body().c_str());\n\n    // cleanup\n    codec_client_->close();\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AwsLambdaFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AwsLambdaFilterIntegrationTest, JsonWrappedHeaderOnlyRequest) {\n  setupLambdaFilter(false /*passthrough*/);\n  HttpIntegrationTest::initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":scheme\", \"http\"},\n                                                 {\":method\", \"GET\"},\n                                                 {\":path\", \"/resize?type=jpg\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"s3-location\", \"mybucket/images/123.jpg\"}};\n  constexpr auto expected_json_request = R\"EOF(\n  {\n    \"rawPath\": \"/resize?type=jpg\",\n    \"method\": \"GET\",\n    \"headers\":{ \"s3-location\": \"mybucket/images/123.jpg\"},\n    \"queryStringParameters\": {\"type\":\"jpg\"},\n    \"body\": \"\",\n    \"isBase64Encoded\": false\n  }\n  )EOF\";\n\n  const std::string lambda_response_body = R\"EOF(\n  {\n      \"body\": \"my-bucket/123-small.jpg\",\n      \"isBase64Encoded\": false,\n      \"statusCode\": 200,\n      \"cookies\": [\"user=John\", \"session-id=1337\"],\n      \"headers\": {\"x-amz-custom-header\": \"envoy,proxy\"}\n  }\n  )EOF\";\n\n  Http::TestResponseHeaderMapImpl lambda_response_headers{\n      {\":status\", \"201\"},\n      {\"content-type\", \"application/json\"},\n      {\"content-length\", fmt::format(\"{}\", lambda_response_body.length())}};\n\n  Http::TestResponseHeaderMapImpl expected_response_headers{{\":status\", \"200\"},\n                                                            {\"content-type\", \"application/json\"},\n                                                            {\"x-amz-custom-header\", \"envoy,proxy\"}};\n  std::vector<std::string> expected_response_cookies{\"user=John\", \"session-id=1337\"};\n  constexpr auto expected_response_body = \"my-bucket/123-small.jpg\";\n  runTest(request_headers, \"\" /*request_body*/, expected_json_request, lambda_response_headers,\n          lambda_response_body, expected_response_headers, expected_response_cookies,\n          expected_response_body);\n}\n\nTEST_P(AwsLambdaFilterIntegrationTest, JsonWrappedPlainBody) {\n  setupLambdaFilter(false /*passthrough*/);\n  HttpIntegrationTest::initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":scheme\", \"http\"},\n                                                 {\":method\", \"GET\"},\n                                                 {\":path\", \"/resize?type=jpg\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"content-type\", \"text/plain\"},\n                                                 {\"xray-trace-id\", \"qwerty12345\"}};\n\n  constexpr auto request_body = \"AWS Lambda is a FaaS platform\";\n\n  constexpr auto expected_json_request = R\"EOF(\n  {\n    \"rawPath\": \"/resize?type=jpg\",\n    \"method\": \"GET\",\n    \"headers\":{ \"xray-trace-id\": \"qwerty12345\"},\n    \"queryStringParameters\": {\"type\":\"jpg\"},\n    \"body\": \"AWS Lambda is a FaaS platform\",\n    \"isBase64Encoded\": false\n  }\n  )EOF\";\n\n  const std::string lambda_response_body = R\"EOF(\n  {\n      \"body\": \"AWS Lambda is cheap!\",\n      \"isBase64Encoded\": false,\n      \"statusCode\": 200,\n      \"cookies\": [\"user=John\", \"session-id=1337\"],\n      \"headers\": {\"x-amz-custom-header\": \"envoy,proxy\"}\n  }\n  )EOF\";\n\n  Http::TestResponseHeaderMapImpl lambda_response_headers{\n      {\":status\", \"201\"},\n      {\"content-type\", \"application/json\"},\n      {\"content-length\", fmt::format(\"{}\", lambda_response_body.length())}};\n\n  Http::TestResponseHeaderMapImpl expected_response_headers{{\":status\", \"200\"},\n                                                            {\"content-type\", \"application/json\"},\n                                                            {\"x-amz-custom-header\", \"envoy,proxy\"}};\n  std::vector<std::string> expected_response_cookies{\"user=John\", \"session-id=1337\"};\n  constexpr auto expected_response_body = \"AWS Lambda is cheap!\";\n  runTest(request_headers, request_body, expected_json_request, lambda_response_headers,\n          lambda_response_body, expected_response_headers, expected_response_cookies,\n          expected_response_body);\n}\n\nTEST_P(AwsLambdaFilterIntegrationTest, JsonWrappedBinaryBody) {\n  setupLambdaFilter(false /*passthrough*/);\n  HttpIntegrationTest::initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":scheme\", \"http\"},\n                                                 {\":method\", \"GET\"},\n                                                 {\":path\", \"/resize?type=jpg\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"content-type\", \"application/octet-stream\"},\n                                                 {\"xray-trace-id\", \"qwerty12345\"}};\n\n  constexpr auto request_body = \"this should get base64 encoded\";\n\n  constexpr auto expected_json_request = R\"EOF(\n  {\n    \"rawPath\": \"/resize?type=jpg\",\n    \"method\": \"GET\",\n    \"headers\":{ \"xray-trace-id\": \"qwerty12345\"},\n    \"queryStringParameters\": {\"type\":\"jpg\"},\n    \"body\": \"dGhpcyBzaG91bGQgZ2V0IGJhc2U2NCBlbmNvZGVk\",\n    \"isBase64Encoded\": true\n  }\n  )EOF\";\n\n  const std::string lambda_response_body = R\"EOF(\n  {\n      \"body\": \"QVdTIExhbWJkYSBpcyBjaGVhcCE=\",\n      \"isBase64Encoded\": true,\n      \"statusCode\": 200,\n      \"cookies\": [\"user=John\", \"session-id=1337\"],\n      \"headers\": {\"x-amz-custom-header\": \"envoy,proxy\"}\n  }\n  )EOF\";\n\n  Http::TestResponseHeaderMapImpl lambda_response_headers{\n      {\":status\", \"201\"},\n      {\"content-type\", \"application/json\"},\n      {\"content-length\", fmt::format(\"{}\", lambda_response_body.length())}};\n\n  Http::TestResponseHeaderMapImpl expected_response_headers{{\":status\", \"200\"},\n                                                            {\"content-type\", \"application/json\"},\n                                                            {\"x-amz-custom-header\", \"envoy,proxy\"}};\n  std::vector<std::string> expected_response_cookies{\"user=John\", \"session-id=1337\"};\n  constexpr auto expected_response_body = \"AWS Lambda is cheap!\";\n  runTest(request_headers, request_body, expected_json_request, lambda_response_headers,\n          lambda_response_body, expected_response_headers, expected_response_cookies,\n          expected_response_body);\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc",
    "content": "#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/filter.h\"\n\n#include \"source/extensions/filters/http/aws_lambda/request_response.pb.validate.h\"\n\n#include \"extensions/filters/http/aws_lambda/aws_lambda_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/common/aws/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsLambdaFilter {\n\nnamespace {\n\nusing Common::Aws::MockSigner;\nusing ::testing::An;\nusing ::testing::ElementsAre;\nusing ::testing::InSequence;\nusing ::testing::Invoke;\nusing ::testing::Pair;\nusing ::testing::Return;\nusing ::testing::ReturnRef;\nusing ::testing::UnorderedElementsAre;\n\nclass AwsLambdaFilterTest : public ::testing::Test {\npublic:\n  AwsLambdaFilterTest() : arn_(parseArn(\"arn:aws:lambda:us-west-2:1337:function:fun\").value()) {}\n\n  void setupFilter(const FilterSettings& settings) {\n    signer_ = std::make_shared<NiceMock<MockSigner>>();\n    filter_ = std::make_unique<Filter>(settings, stats_, signer_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n    setupClusterMetadata();\n  }\n\n  void setupClusterMetadata() {\n    ProtobufWkt::Struct cluster_metadata;\n    TestUtility::loadFromYaml(metadata_yaml_, cluster_metadata);\n    metadata_.mutable_filter_metadata()->insert({\"com.amazonaws.lambda\", cluster_metadata});\n    ON_CALL(*decoder_callbacks_.cluster_info_, metadata()).WillByDefault(ReturnRef(metadata_));\n  }\n\n  std::unique_ptr<Filter> filter_;\n  std::shared_ptr<NiceMock<MockSigner>> signer_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  envoy::config::core::v3::Metadata metadata_;\n  Arn arn_;\n  Stats::IsolatedStoreImpl stats_store_;\n  FilterStats stats_ = generateStats(\"test\", stats_store_);\n  const std::string metadata_yaml_ = \"egress_gateway: true\";\n};\n\n/**\n * Requests that are _not_ header only, should result in StopIteration.\n */\nTEST_F(AwsLambdaFilterTest, DecodingHeaderStopIteration) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  Http::TestRequestHeaderMapImpl headers;\n  const auto result = filter_->decodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result);\n}\n\n/**\n * Header only pass-through requests should be signed and Continue iteration.\n */\nTEST_F(AwsLambdaFilterTest, HeaderOnlyShouldContinue) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  EXPECT_CALL(*signer_, sign(_));\n  Http::TestRequestHeaderMapImpl input_headers;\n  const auto result = filter_->decodeHeaders(input_headers, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, result);\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  const auto encode_result = filter_->encodeHeaders(response_headers, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, encode_result);\n}\n\n/**\n * If there's a per route config and the target cluster has the _wrong_ metadata, then skip the\n * filter.\n */\nTEST_F(AwsLambdaFilterTest, PerRouteConfigWrongClusterMetadata) {\n  const std::string metadata_yaml = R\"EOF(\n  egress_gateway: true\n  )EOF\";\n\n  ProtobufWkt::Struct cluster_metadata;\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(metadata_yaml, cluster_metadata);\n  metadata.mutable_filter_metadata()->insert({\"WrongMetadataKey\", cluster_metadata});\n\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  FilterSettings route_settings{arn_, InvocationMode::Synchronous, true /*passthrough*/};\n  ON_CALL(decoder_callbacks_.route_->route_entry_,\n          perFilterConfig(HttpFilterNames::get().AwsLambda))\n      .WillByDefault(Return(&route_settings));\n\n  ON_CALL(*decoder_callbacks_.cluster_info_, metadata()).WillByDefault(ReturnRef(metadata));\n  Http::TestRequestHeaderMapImpl headers;\n\n  const auto decode_header_result = filter_->decodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, decode_header_result);\n\n  Buffer::OwnedImpl buf;\n  const auto decode_data_result = filter_->decodeData(buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, decode_data_result);\n  EXPECT_EQ(0, buf.length());\n}\n\n/**\n * If there's a per route config and the target cluster has the _correct_ metadata, then we should\n * process the request (i.e. StopIteration if end_stream is false)\n */\nTEST_F(AwsLambdaFilterTest, PerRouteConfigCorrectClusterMetadata) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  FilterSettings route_settings{arn_, InvocationMode::Synchronous, true /*passthrough*/};\n  ON_CALL(decoder_callbacks_.route_->route_entry_,\n          perFilterConfig(HttpFilterNames::get().AwsLambda))\n      .WillByDefault(Return(&route_settings));\n\n  Http::TestRequestHeaderMapImpl headers;\n  const auto result = filter_->decodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result);\n}\n\nTEST_F(AwsLambdaFilterTest, DecodeDataRecordsPayloadSize) {\n  FilterSettings settings{arn_, InvocationMode::Synchronous, true /*passthrough*/};\n  NiceMock<Stats::MockStore> store;\n  NiceMock<Stats::MockHistogram> histogram;\n  EXPECT_CALL(store, histogramFromString(_, _)).WillOnce(ReturnRef(histogram));\n\n  setupClusterMetadata();\n\n  FilterStats stats(generateStats(\"test\", store));\n  signer_ = std::make_shared<NiceMock<MockSigner>>();\n  filter_ = std::make_unique<Filter>(settings, stats, signer_);\n  filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n\n  // Payload\n  Buffer::OwnedImpl buffer;\n  const std::string data(100, 'Z');\n  buffer.add(data);\n\n  Http::TestRequestHeaderMapImpl headers;\n  const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, header_result);\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer).WillOnce(Return(&buffer));\n  EXPECT_CALL(histogram, recordValue(100));\n\n  filter_->decodeData(buffer, true);\n}\n\nTEST_F(AwsLambdaFilterTest, DecodeDataShouldBuffer) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  Http::TestRequestHeaderMapImpl headers;\n  const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, header_result);\n  Buffer::OwnedImpl buffer;\n  const auto data_result = filter_->decodeData(buffer, false);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, data_result);\n}\n\nTEST_F(AwsLambdaFilterTest, DecodeDataShouldSign) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  Http::TestRequestHeaderMapImpl headers;\n  const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, header_result);\n  Buffer::OwnedImpl buffer;\n\n  InSequence seq;\n  EXPECT_CALL(decoder_callbacks_, addDecodedData(_, false));\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer).WillOnce(Return(&buffer));\n  EXPECT_CALL(*signer_, sign(An<Http::RequestHeaderMap&>(), An<const std::string&>()));\n\n  const auto data_result = filter_->decodeData(buffer, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, data_result);\n}\n\nTEST_F(AwsLambdaFilterTest, DecodeHeadersInvocationModeSetsHeader) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  Http::TestRequestHeaderMapImpl headers;\n  const auto header_result = filter_->decodeHeaders(headers, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, header_result);\n\n  std::string invocation_header_value;\n  headers.iterate([&invocation_header_value](const Http::HeaderEntry& entry) {\n    if (entry.key().getStringView() == \"x-amz-invocation-type\") {\n      invocation_header_value.append(std::string(entry.value().getStringView()));\n      return Http::HeaderMap::Iterate::Break;\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  EXPECT_EQ(\"RequestResponse\", invocation_header_value);\n}\n\n/**\n * A header-only request with pass-through turned off should result in:\n * - a request with JSON body.\n * - content-length header set appropriately\n * - content-type header set to application/json\n * - headers with multiple values coalesced with a comma\n */\nTEST_F(AwsLambdaFilterTest, DecodeHeadersOnlyRequestWithJsonOn) {\n  using source::extensions::filters::http::aws_lambda::Request;\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  Buffer::OwnedImpl json_buf;\n  auto on_add_decoded_data = [&json_buf](Buffer::Instance& buf, bool) { json_buf.move(buf); };\n  ON_CALL(decoder_callbacks_, addDecodedData(_, _)).WillByDefault(Invoke(on_add_decoded_data));\n  Http::TestRequestHeaderMapImpl headers;\n  headers.setContentLength(0);\n  headers.setPath(\"/resource?proxy=envoy\");\n  headers.setMethod(\"GET\");\n  headers.addCopy(\"x-custom-header\", \"unit\");\n  headers.addCopy(\"x-custom-header\", \"test\");\n  const auto header_result = filter_->decodeHeaders(headers, true /*end_stream*/);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, header_result);\n\n  // Assert it's not empty\n  ASSERT_GT(json_buf.length(), 0);\n\n  ASSERT_NE(headers.ContentType(), nullptr);\n  EXPECT_EQ(\"application/json\", headers.getContentTypeValue());\n\n  // Assert the true (post-transformation) content-length sent to the Lambda endpoint.\n  ASSERT_NE(headers.ContentLength(), nullptr);\n  EXPECT_EQ(fmt::format(\"{}\", json_buf.length()), headers.getContentLengthValue());\n\n  // The best way to verify the generated JSON is to deserialize it and inspect it.\n  Request req;\n  TestUtility::loadFromJson(json_buf.toString(), req);\n\n  // Assert the content-length wrapped in JSON reflects the original request's value.\n  EXPECT_THAT(req.headers(), UnorderedElementsAre(Pair(\"content-length\", \"0\"),\n                                                  Pair(\"x-custom-header\", \"unit,test\")));\n  EXPECT_THAT(req.query_string_parameters(), UnorderedElementsAre(Pair(\"proxy\", \"envoy\")));\n  EXPECT_STREQ(\"/resource?proxy=envoy\", req.raw_path().c_str());\n  EXPECT_FALSE(req.is_base64_encoded());\n  EXPECT_TRUE(req.body().empty());\n  EXPECT_STREQ(\"GET\", req.method().c_str());\n}\n\n/**\n * A request with text payload and pass-through turned off should result in:\n * - a request with JSON body containing the original payload\n * - content-length header set appropriately\n * - content-type header set to application/json\n * - headers with multiple values coalesced with a comma\n */\nTEST_F(AwsLambdaFilterTest, DecodeDataWithTextualBodyWithJsonOn) {\n  using source::extensions::filters::http::aws_lambda::Request;\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n\n  Buffer::OwnedImpl decoded_buf;\n  constexpr absl::string_view expected_plain_text = \"Foo bar bazz\";\n  decoded_buf.add(expected_plain_text);\n\n  auto on_modify_decoding_buffer = [&decoded_buf](std::function<void(Buffer::Instance&)> cb) {\n    cb(decoded_buf);\n  };\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer).WillRepeatedly(Return(&decoded_buf));\n  EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer)\n      .WillRepeatedly(Invoke(on_modify_decoding_buffer));\n\n  std::array<const char*, 4> textual_mime_types = {\"application/json\", \"application/javascript\",\n                                                   \"application/xml\", \"text/plain\"};\n\n  for (auto mime_type : textual_mime_types) {\n    Http::TestRequestHeaderMapImpl headers;\n    headers.setContentLength(expected_plain_text.length());\n    headers.setPath(\"/resource?proxy=envoy\");\n    headers.setMethod(\"POST\");\n    headers.setContentType(mime_type);\n    headers.addCopy(\"x-custom-header\", \"unit\");\n    headers.addCopy(\"x-custom-header\", \"test\");\n    const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/);\n    ASSERT_EQ(Http::FilterHeadersStatus::StopIteration, header_result);\n\n    const auto data_result = filter_->decodeData(decoded_buf, true /*end_stream*/);\n    ASSERT_EQ(Http::FilterDataStatus::Continue, data_result);\n\n    // Assert decoded buffer is not drained\n    ASSERT_GT(decoded_buf.length(), 0);\n\n    ASSERT_NE(headers.ContentType(), nullptr);\n    EXPECT_EQ(\"application/json\", headers.getContentTypeValue());\n\n    // Assert the true (post-transformation) content-length sent to the Lambda endpoint.\n    ASSERT_NE(headers.ContentLength(), nullptr);\n    EXPECT_EQ(fmt::format(\"{}\", decoded_buf.length()), headers.getContentLengthValue());\n\n    // The best way to verify the generated JSON is to deserialize it and inspect it.\n    Request req;\n    TestUtility::loadFromJson(decoded_buf.toString(), req);\n\n    // Assert the content-length wrapped in JSON reflects the original request's value.\n    EXPECT_THAT(req.headers(),\n                UnorderedElementsAre(\n                    Pair(\"content-length\", fmt::format(\"{}\", expected_plain_text.length())),\n                    Pair(\"content-type\", mime_type), Pair(\"x-custom-header\", \"unit,test\")));\n    EXPECT_THAT(req.query_string_parameters(), UnorderedElementsAre(Pair(\"proxy\", \"envoy\")));\n    EXPECT_STREQ(\"/resource?proxy=envoy\", req.raw_path().c_str());\n    EXPECT_STREQ(\"POST\", req.method().c_str());\n    EXPECT_FALSE(req.is_base64_encoded());\n    ASSERT_FALSE(req.body().empty());\n    EXPECT_STREQ(expected_plain_text.data(), req.body().c_str());\n\n    // reset the buffer for the next iteration\n    decoded_buf.drain(decoded_buf.length());\n    decoded_buf.add(expected_plain_text);\n  }\n}\n\n/**\n * A request with binary payload and pass-through turned off should result in a JSON payload with\n * isBase64Encoded flag set.\n * binary payload is determined by looking at both transfer-encoding and content-type.\n */\nTEST_F(AwsLambdaFilterTest, DecodeDataWithBinaryBodyWithJsonOn) {\n  using source::extensions::filters::http::aws_lambda::Request;\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n\n  Buffer::OwnedImpl decoded_buf;\n  const absl::string_view fake_binary_data = \"this should get base64 encoded\";\n  decoded_buf.add(fake_binary_data);\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer).WillRepeatedly(Return(&decoded_buf));\n  auto on_modify_decoding_buffer = [&decoded_buf](std::function<void(Buffer::Instance&)> cb) {\n    cb(decoded_buf);\n  };\n  EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer)\n      .WillRepeatedly(Invoke(on_modify_decoding_buffer));\n  std::array<absl::string_view, 3> binary_mime_types = {\"\", \"application/pdf\", \"gzipped\"};\n  for (auto mime_type : binary_mime_types) {\n    Http::TestRequestHeaderMapImpl headers;\n    headers.setPath(\"/\");\n    headers.setMethod(\"POST\");\n    headers.setContentLength(fake_binary_data.length());\n    if (mime_type == \"gzipped\") {\n      headers.setTransferEncoding(\"gzip\");\n    } else if (!mime_type.empty()) {\n      headers.setContentType(mime_type);\n    }\n    const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/);\n    ASSERT_EQ(Http::FilterHeadersStatus::StopIteration, header_result);\n\n    const auto data_result = filter_->decodeData(decoded_buf, true /*end_stream*/);\n    ASSERT_EQ(Http::FilterDataStatus::Continue, data_result);\n\n    // The best way to verify the generated JSON is to deserialize it and inspect it.\n    Request req;\n    TestUtility::loadFromJson(decoded_buf.toString(), req);\n\n    ASSERT_TRUE(req.is_base64_encoded());\n    ASSERT_FALSE(req.body().empty());\n    ASSERT_STREQ(req.body().c_str(), \"dGhpcyBzaG91bGQgZ2V0IGJhc2U2NCBlbmNvZGVk\");\n\n    // reset the buffer for the next iteration\n    decoded_buf.drain(decoded_buf.length());\n    decoded_buf.add(fake_binary_data);\n  }\n}\n\nTEST_F(AwsLambdaFilterTest, EncodeHeadersEndStreamShouldSkip) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  Http::TestResponseHeaderMapImpl headers;\n  auto result = filter_->encodeHeaders(headers, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, result);\n\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  result = filter_->encodeHeaders(headers, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, result);\n}\n\n/**\n * If the Lambda function itself raises an error (syntax, exception, etc.) then we should skip\n * encoding headers and skip the filter.\n */\nTEST_F(AwsLambdaFilterTest, EncodeHeadersWithLambdaErrorShouldSkipAndContinue) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  Http::TestResponseHeaderMapImpl headers;\n  headers.setStatus(200);\n  headers.addCopy(Http::LowerCaseString(\"x-Amz-Function-Error\"), \"unhandled\");\n  auto result = filter_->encodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, result);\n}\n\n/**\n * If Lambda returns a 5xx error then we should skip encoding headers and skip the filter.\n */\nTEST_F(AwsLambdaFilterTest, EncodeHeadersWithLambda5xxShouldSkipAndContinue) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  Http::TestResponseHeaderMapImpl headers;\n  headers.setStatus(500);\n  auto result = filter_->encodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, result);\n}\n\n/**\n * encodeHeaders() in a happy path should stop iteration.\n */\nTEST_F(AwsLambdaFilterTest, EncodeHeadersStopsIteration) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  Http::TestResponseHeaderMapImpl headers;\n  headers.setStatus(200);\n  auto result = filter_->encodeHeaders(headers, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result);\n}\n\n/**\n * encodeData() data in pass-through mode should simply return Continue.\n * This is true whether end_stream is true or false.\n */\nTEST_F(AwsLambdaFilterTest, EncodeDataInPassThroughMode) {\n  setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/});\n  Buffer::OwnedImpl buf;\n  filter_->resolveSettings();\n  auto result = filter_->encodeData(buf, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n\n  result = filter_->encodeData(buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n\n  setupFilter({arn_, InvocationMode::Asynchronous, true /*passthrough*/});\n  filter_->resolveSettings();\n  result = filter_->encodeData(buf, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n\n  result = filter_->encodeData(buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n}\n\n/**\n * encodeData() data in asynchronous mode should simply return Continue.\n * This is true whether end_stream is true or false.\n */\nTEST_F(AwsLambdaFilterTest, EncodeDataInAsynchrnous) {\n  setupFilter({arn_, InvocationMode::Asynchronous, false /*passthrough*/});\n  Buffer::OwnedImpl buf;\n  filter_->resolveSettings();\n  auto result = filter_->encodeData(buf, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n\n  result = filter_->encodeData(buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n}\n\n/**\n * encodeData() data in JSON mode should stop iteration if end_stream is false.\n */\nTEST_F(AwsLambdaFilterTest, EncodeDataJsonModeStopIterationAndBuffer) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  Buffer::OwnedImpl buf;\n  filter_->resolveSettings();\n  auto result = filter_->encodeData(buf, false /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, result);\n}\n\nTEST_F(AwsLambdaFilterTest, EncodeDataAddsLastChunk) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  filter_->resolveSettings();\n  Http::TestResponseHeaderMapImpl headers;\n  headers.setStatus(200);\n  filter_->encodeHeaders(headers, false /*end_stream*/);\n\n  Buffer::OwnedImpl buf(std::string(\"foobar\"));\n  EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false));\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&buf));\n  filter_->encodeData(buf, true /*end_stream*/);\n}\n\n/**\n * encodeData() data in JSON mode without a 'body' key should translate the 'headers' key to HTTP\n * headers while ignoring any HTTP/2 pseudo-headers.\n */\nTEST_F(AwsLambdaFilterTest, EncodeDataJsonModeTransformToHttp) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  filter_->resolveSettings();\n  Http::TestResponseHeaderMapImpl headers;\n  headers.setStatus(200);\n  filter_->encodeHeaders(headers, false /*end_stream*/);\n\n  constexpr auto json_response = R\"EOF(\n  {\n      \"statusCode\": 201,\n      \"headers\": {\n                    \"x-awesome-header\": \"awesome value\",\n                    \":other\": \"should_never_make_it\"\n                 },\n      \"cookies\": [\"session-id=42; Secure; HttpOnly\", \"user=joe\"]\n  }\n  )EOF\";\n\n  Buffer::OwnedImpl encoded_buf;\n  encoded_buf.add(json_response);\n  auto on_modify_encoding_buffer = [&encoded_buf](std::function<void(Buffer::Instance&)> cb) {\n    cb(encoded_buf);\n  };\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&encoded_buf));\n  EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer)\n      .WillRepeatedly(Invoke(on_modify_encoding_buffer));\n\n  auto result = filter_->encodeData(encoded_buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n\n  ASSERT_NE(nullptr, headers.Status());\n  EXPECT_EQ(\"201\", headers.getStatusValue());\n\n  EXPECT_EQ(nullptr, headers.get(Http::LowerCaseString(\":other\")));\n\n  const auto* custom_header = headers.get(Http::LowerCaseString(\"x-awesome-header\"));\n  EXPECT_NE(custom_header, nullptr);\n  EXPECT_EQ(\"awesome value\", custom_header->value().getStringView());\n\n  std::vector<std::string> cookies;\n  headers.iterate([&cookies](const Http::HeaderEntry& entry) {\n    if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) {\n      cookies.emplace_back(entry.value().getStringView());\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  EXPECT_THAT(cookies, ElementsAre(\"session-id=42; Secure; HttpOnly\", \"user=joe\"));\n}\n\n/**\n * encodeData() in JSON mode with a non-empty body should translate the body to plain text if it was\n * base64-encoded.\n */\nTEST_F(AwsLambdaFilterTest, EncodeDataJsonModeBase64EncodedBody) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  filter_->resolveSettings();\n  Http::TestResponseHeaderMapImpl headers;\n  headers.setStatus(200);\n  filter_->encodeHeaders(headers, false /*end_stream*/);\n\n  constexpr auto json_base64_body = R\"EOF(\n  {\n      \"statusCode\": 201,\n      \"body\": \"Q29mZmVl\",\n      \"isBase64Encoded\": true\n  }\n  )EOF\";\n\n  constexpr auto json_plain_text_body = R\"EOF(\n  {\n      \"statusCode\": 201,\n      \"body\": \"Beans\",\n      \"isBase64Encoded\": false\n  }\n  )EOF\";\n\n  Buffer::OwnedImpl encoded_buf;\n  encoded_buf.add(json_base64_body);\n  auto on_modify_encoding_buffer = [&encoded_buf](std::function<void(Buffer::Instance&)> cb) {\n    cb(encoded_buf);\n  };\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&encoded_buf));\n  EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer)\n      .WillRepeatedly(Invoke(on_modify_encoding_buffer));\n\n  auto result = filter_->encodeData(encoded_buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n  EXPECT_STREQ(\"Coffee\", encoded_buf.toString().c_str());\n\n  encoded_buf.drain(encoded_buf.length());\n\n  encoded_buf.add(json_plain_text_body);\n  result = filter_->encodeData(encoded_buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n  EXPECT_STREQ(\"Beans\", encoded_buf.toString().c_str());\n\n  EXPECT_EQ(0ul, filter_->stats().server_error_.value());\n}\n\n/**\n * Encode data in JSON mode _returning_ invalid JSON payload should result in a 500 error.\n */\nTEST_F(AwsLambdaFilterTest, EncodeDataJsonModeInvalidJson) {\n  setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/});\n  filter_->resolveSettings();\n  Http::TestResponseHeaderMapImpl headers;\n  headers.setStatus(200);\n  filter_->encodeHeaders(headers, false /*end_stream*/);\n\n  constexpr auto json_response = R\"EOF(\n  <response>\n        <body>Does XML work??</body>\n  </response>\n  )EOF\";\n\n  Buffer::OwnedImpl encoded_buf;\n  encoded_buf.add(json_response);\n  auto on_modify_encoding_buffer = [&encoded_buf](std::function<void(Buffer::Instance&)> cb) {\n    cb(encoded_buf);\n  };\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&encoded_buf));\n  EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer)\n      .WillRepeatedly(Invoke(on_modify_encoding_buffer));\n\n  auto result = filter_->encodeData(encoded_buf, true /*end_stream*/);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, result);\n  EXPECT_EQ(0, encoded_buf.length());\n\n  ASSERT_NE(nullptr, headers.Status());\n  EXPECT_EQ(\"500\", headers.getStatusValue());\n\n  EXPECT_EQ(1ul, filter_->stats().server_error_.value());\n}\n\n} // namespace\n} // namespace AwsLambdaFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/aws_lambda/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.pb.h\"\n#include \"envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.pb.validate.h\"\n\n#include \"extensions/filters/http/aws_lambda/aws_lambda_filter.h\"\n#include \"extensions/filters/http/aws_lambda/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::Truly;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsLambdaFilter {\nnamespace {\n\nusing LambdaConfig = envoy::extensions::filters::http::aws_lambda::v3::Config;\nusing LambdaPerRouteConfig = envoy::extensions::filters::http::aws_lambda::v3::PerRouteConfig;\n\nTEST(AwsLambdaFilterConfigTest, ValidConfigCreatesFilter) {\n  const std::string yaml = R\"EOF(\narn: \"arn:aws:lambda:region:424242:function:fun\"\npayload_passthrough: true\ninvocation_mode: asynchronous\n  )EOF\";\n\n  LambdaConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  AwsLambdaFilterFactory factory;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  auto has_expected_settings = [](std::shared_ptr<Envoy::Http::StreamFilter> stream_filter) {\n    auto filter = std::static_pointer_cast<Filter>(stream_filter);\n    const auto settings = filter->settingsForTest();\n    return settings.payloadPassthrough() &&\n           settings.invocationMode() == InvocationMode::Asynchronous;\n  };\n\n  EXPECT_CALL(filter_callbacks, addStreamFilter(Truly(has_expected_settings)));\n  cb(filter_callbacks);\n}\n\n/**\n * The default for passthrough is false.\n * The default for invocation_mode is Synchronous.\n */\nTEST(AwsLambdaFilterConfigTest, ValidConfigVerifyDefaults) {\n  const std::string yaml = R\"EOF(\narn: \"arn:aws:lambda:region:424242:function:fun\"\n  )EOF\";\n\n  LambdaConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  AwsLambdaFilterFactory factory;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  auto has_expected_settings = [](std::shared_ptr<Envoy::Http::StreamFilter> stream_filter) {\n    auto filter = std::static_pointer_cast<Filter>(stream_filter);\n    const auto settings = filter->settingsForTest();\n    return settings.payloadPassthrough() == false &&\n           settings.invocationMode() == InvocationMode::Synchronous;\n  };\n\n  EXPECT_CALL(filter_callbacks, addStreamFilter(Truly(has_expected_settings)));\n  cb(filter_callbacks);\n}\n\nTEST(AwsLambdaFilterConfigTest, ValidPerRouteConfigCreatesFilter) {\n  const std::string yaml = R\"EOF(\n  invoke_config:\n    arn: \"arn:aws:lambda:region:424242:function:fun\"\n    payload_passthrough: true\n  )EOF\";\n\n  LambdaPerRouteConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockServerFactoryContext> context;\n  AwsLambdaFilterFactory factory;\n\n  auto route_specific_config_ptr = factory.createRouteSpecificFilterConfig(\n      proto_config, context, ProtobufMessage::getStrictValidationVisitor());\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  ASSERT_NE(route_specific_config_ptr, nullptr);\n  auto filter_settings_ptr =\n      std::static_pointer_cast<const FilterSettings>(route_specific_config_ptr);\n  EXPECT_TRUE(filter_settings_ptr->payloadPassthrough());\n  EXPECT_EQ(InvocationMode::Synchronous, filter_settings_ptr->invocationMode());\n}\n\nTEST(AwsLambdaFilterConfigTest, InvalidARNThrows) {\n  const std::string yaml = R\"EOF(\narn: \"arn:aws:lambda:region:424242:fun\"\n  )EOF\";\n\n  LambdaConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  AwsLambdaFilterFactory factory;\n\n  EXPECT_THROW(factory.createFilterFactoryFromProto(proto_config, \"stats\", context),\n               EnvoyException);\n}\n\nTEST(AwsLambdaFilterConfigTest, PerRouteConfigWithInvalidARNThrows) {\n  const std::string yaml = R\"EOF(\n  invoke_config:\n    arn: \"arn:aws:lambda:region:424242:fun\"\n    payload_passthrough: true\n  )EOF\";\n\n  LambdaPerRouteConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockServerFactoryContext> context;\n  AwsLambdaFilterFactory factory;\n\n  EXPECT_THROW(factory.createRouteSpecificFilterConfig(\n                   proto_config, context, ProtobufMessage::getStrictValidationVisitor()),\n               EnvoyException);\n}\n\nTEST(AwsLambdaFilterConfigTest, AsynchrnousPerRouteConfig) {\n  const std::string yaml = R\"EOF(\n  invoke_config:\n    arn: \"arn:aws:lambda:region:424242:function:fun\"\n    payload_passthrough: false\n    invocation_mode: asynchronous\n  )EOF\";\n\n  LambdaPerRouteConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockServerFactoryContext> context;\n  AwsLambdaFilterFactory factory;\n\n  auto route_specific_config_ptr = factory.createRouteSpecificFilterConfig(\n      proto_config, context, ProtobufMessage::getStrictValidationVisitor());\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  ASSERT_NE(route_specific_config_ptr, nullptr);\n  auto filter_settings_ptr =\n      std::static_pointer_cast<const FilterSettings>(route_specific_config_ptr);\n  EXPECT_FALSE(filter_settings_ptr->payloadPassthrough());\n  EXPECT_EQ(InvocationMode::Asynchronous, filter_settings_ptr->invocationMode());\n}\n\n} // namespace\n} // namespace AwsLambdaFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/aws_request_signing/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"aws_request_signing_filter_test\",\n    srcs = [\"aws_request_signing_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.aws_request_signing\",\n    deps = [\n        \"//source/extensions/filters/http/aws_request_signing:aws_request_signing_filter_lib\",\n        \"//test/extensions/common/aws:aws_mocks\",\n        \"//test/mocks/http:http_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.aws_request_signing\",\n    deps = [\n        \"//source/extensions/filters/http/aws_request_signing:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/aws_request_signing/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc",
    "content": "#include \"extensions/common/aws/signer.h\"\n#include \"extensions/filters/http/aws_request_signing/aws_request_signing_filter.h\"\n\n#include \"test/extensions/common/aws/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsRequestSigningFilter {\nnamespace {\n\nclass MockFilterConfig : public FilterConfig {\npublic:\n  MockFilterConfig() { signer_ = std::make_shared<Common::Aws::MockSigner>(); }\n\n  Common::Aws::Signer& signer() override { return *signer_; }\n  FilterStats& stats() override { return stats_; }\n  const std::string& hostRewrite() const override { return host_rewrite_; }\n\n  std::shared_ptr<Common::Aws::MockSigner> signer_;\n  Stats::IsolatedStoreImpl stats_store_;\n  FilterStats stats_{Filter::generateStats(\"test\", stats_store_)};\n  std::string host_rewrite_;\n};\n\nclass AwsRequestSigningFilterTest : public testing::Test {\npublic:\n  void setup() {\n    filter_config_ = std::make_shared<MockFilterConfig>();\n    filter_ = std::make_unique<Filter>(filter_config_);\n  }\n\n  std::shared_ptr<MockFilterConfig> filter_config_;\n  std::unique_ptr<Filter> filter_;\n};\n\n// Verify filter functionality when signing works.\nTEST_F(AwsRequestSigningFilterTest, SignSucceeds) {\n  setup();\n  EXPECT_CALL(*(filter_config_->signer_), sign(_)).Times(1);\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1UL, filter_config_->stats_.signing_added_.value());\n}\n\n// Verify filter functionality when a host rewrite happens.\nTEST_F(AwsRequestSigningFilterTest, SignWithHostRewrite) {\n  setup();\n  filter_config_->host_rewrite_ = \"foo\";\n  EXPECT_CALL(*(filter_config_->signer_), sign(_)).Times(1);\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(\"foo\", headers.getHostValue());\n  EXPECT_EQ(1UL, filter_config_->stats_.signing_added_.value());\n}\n\n// Verify filter functionality when signing fails.\nTEST_F(AwsRequestSigningFilterTest, SignFails) {\n  setup();\n  EXPECT_CALL(*(filter_config_->signer_), sign(_)).WillOnce(Invoke([](Http::HeaderMap&) -> void {\n    throw EnvoyException(\"failed\");\n  }));\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1UL, filter_config_->stats_.signing_failed_.value());\n}\n\n// Verify FilterConfigImpl's getters.\nTEST_F(AwsRequestSigningFilterTest, FilterConfigImplGetters) {\n  Stats::IsolatedStoreImpl stats;\n  auto signer = std::make_unique<Common::Aws::MockSigner>();\n  const auto* signer_ptr = signer.get();\n  FilterConfigImpl config(std::move(signer), \"prefix\", stats, \"foo\");\n\n  EXPECT_EQ(signer_ptr, &config.signer());\n  EXPECT_EQ(0UL, config.stats().signing_added_.value());\n  EXPECT_EQ(\"foo\", config.hostRewrite());\n}\n\n} // namespace\n} // namespace AwsRequestSigningFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/aws_request_signing/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.h\"\n#include \"envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.pb.validate.h\"\n\n#include \"extensions/filters/http/aws_request_signing/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace AwsRequestSigningFilter {\n\nusing AwsRequestSigningProtoConfig =\n    envoy::extensions::filters::http::aws_request_signing::v3::AwsRequestSigning;\n\nTEST(AwsRequestSigningFilterConfigTest, SimpleConfig) {\n  const std::string yaml = R\"EOF(\nservice_name: s3\nregion: us-west-2\n  )EOF\";\n\n  AwsRequestSigningProtoConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  AwsRequestSigningFilterFactory factory;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  EXPECT_CALL(filter_callbacks, addStreamDecoderFilter(_));\n  cb(filter_callbacks);\n}\n\n} // namespace AwsRequestSigningFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/buffer/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"buffer_filter_test\",\n    srcs = [\"buffer_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.buffer\",\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/http/buffer:buffer_filter_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"buffer_filter_integration_test\",\n    srcs = [\"buffer_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.buffer\",\n    deps = [\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//test/config:utility_lib\",\n        \"//test/integration:http_protocol_integration_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.buffer\",\n    deps = [\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/buffer/buffer_filter_integration_test.cc",
    "content": "#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"test/integration/http_protocol_integration.h\"\n\nnamespace Envoy {\nnamespace {\n\nusing BufferIntegrationTest = HttpProtocolIntegrationTest;\n\nINSTANTIATE_TEST_SUITE_P(Protocols, BufferIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(BufferIntegrationTest, RouterNotFoundBodyBuffer) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  testRouterNotFoundWithBody();\n}\n\nTEST_P(BufferIntegrationTest, RouterRequestAndResponseWithGiantBodyBuffer) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  testRouterRequestAndResponseWithBody(4 * 1024 * 1024, 4 * 1024 * 1024, false);\n}\n\nTEST_P(BufferIntegrationTest, RouterHeaderOnlyRequestAndResponseBuffer) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\nTEST_P(BufferIntegrationTest, RouterRequestAndResponseWithBodyBuffer) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  testRouterRequestAndResponseWithBody(1024, 512, false);\n}\n\nTEST_P(BufferIntegrationTest, RouterRequestAndResponseWithZeroByteBodyBuffer) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  testRouterRequestAndResponseWithBody(0, 0, false);\n}\n\nTEST_P(BufferIntegrationTest, RouterRequestPopulateContentLength) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"POST\"}, {\":scheme\", \"http\"}, {\":path\", \"/shelf\"}, {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, \"123\", false);\n  codec_client_->sendData(*request_encoder_, \"456\", false);\n  codec_client_->sendData(*request_encoder_, \"789\", true);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  auto* content_length = upstream_request_->headers().ContentLength();\n  ASSERT_NE(content_length, nullptr);\n  EXPECT_EQ(content_length->value().getStringView(), \"9\");\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"POST\"}, {\":scheme\", \"http\"}, {\":path\", \"/shelf\"}, {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, \"0123\", false);\n  codec_client_->sendData(*request_encoder_, \"456\", false);\n  codec_client_->sendData(*request_encoder_, \"789\", false);\n  Http::TestRequestTrailerMapImpl request_trailers{{\"request\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  auto* content_length = upstream_request_->headers().ContentLength();\n  ASSERT_NE(content_length, nullptr);\n  EXPECT_EQ(content_length->value().getStringView(), \"10\");\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) {\n  // Make sure the connection isn't closed during request upload.\n  // Without a large drain-close it's possible that the local reply will be sent\n  // during request upload, and continued upload will result in TCP reset before\n  // the response is read.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(2000 * 1000); });\n  config_helper_.addFilter(ConfigHelper::smallBufferFilter());\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/dynamo/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024 * 65);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"413\", response->headers().getStatusValue());\n}\n\nConfigHelper::HttpModifierFunction overrideConfig(const std::string& json_config) {\n  envoy::extensions::filters::http::buffer::v3::BufferPerRoute buffer_per_route;\n  TestUtility::loadFromJson(json_config, buffer_per_route);\n\n  return\n      [buffer_per_route](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              cfg) {\n        auto* config = cfg.mutable_route_config()\n                           ->mutable_virtual_hosts()\n                           ->Mutable(0)\n                           ->mutable_typed_per_filter_config();\n\n        (*config)[\"envoy.filters.http.buffer\"].PackFrom(buffer_per_route);\n      };\n}\n\nTEST_P(BufferIntegrationTest, RouteDisabled) {\n  ConfigHelper::HttpModifierFunction mod = overrideConfig(R\"EOF({\"disabled\": true})EOF\");\n  config_helper_.addConfigModifier(mod);\n  config_helper_.addFilter(ConfigHelper::smallBufferFilter());\n  config_helper_.setBufferLimits(1024, 1024);\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"}},\n      1024 * 65);\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(BufferIntegrationTest, RouteOverride) {\n  ConfigHelper::HttpModifierFunction mod = overrideConfig(R\"EOF({\"buffer\": {\n    \"max_request_bytes\": 5242880\n  }})EOF\");\n  config_helper_.addConfigModifier(mod);\n  config_helper_.addFilter(ConfigHelper::smallBufferFilter());\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"}},\n      1024 * 65);\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/buffer/buffer_filter_test.cc",
    "content": "#include <chrono>\n#include <memory>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"extensions/filters/http/buffer/buffer_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace BufferFilter {\n\nclass BufferFilterTest : public testing::Test {\npublic:\n  BufferFilterConfigSharedPtr setupConfig() {\n    envoy::extensions::filters::http::buffer::v3::Buffer proto_config;\n    proto_config.mutable_max_request_bytes()->set_value(1024 * 1024);\n    return std::make_shared<BufferFilterConfig>(proto_config);\n  }\n\n  BufferFilterTest() : config_(setupConfig()), filter_(config_) {\n    filter_.setDecoderFilterCallbacks(callbacks_);\n  }\n\n  void routeLocalConfig(const Router::RouteSpecificFilterConfig* route_settings,\n                        const Router::RouteSpecificFilterConfig* vhost_settings) {\n    ON_CALL(callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Buffer))\n        .WillByDefault(Return(route_settings));\n    ON_CALL(callbacks_.route_->route_entry_.virtual_host_,\n            perFilterConfig(HttpFilterNames::get().Buffer))\n        .WillByDefault(Return(vhost_settings));\n  }\n\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  BufferFilterConfigSharedPtr config_;\n  BufferFilter filter_;\n  // Create a runtime loader, so that tests can manually manipulate runtime guarded features.\n  TestScopedRuntime scoped_runtime;\n};\n\nTEST_F(BufferFilterTest, HeaderOnlyRequest) {\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, true));\n}\n\nTEST_F(BufferFilterTest, TestMetadata) {\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n}\n\nTEST_F(BufferFilterTest, RequestWithData) {\n  InSequence s;\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.decodeData(data1, false));\n\n  Buffer::OwnedImpl data2(\" world\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data2, true));\n}\n\nTEST_F(BufferFilterTest, TxResetAfterEndStream) {\n  InSequence s;\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.decodeData(data1, false));\n\n  Buffer::OwnedImpl data2(\" world\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data2, true));\n\n  // It's possible that the stream will be reset on the TX side even after RX end stream. Mimic\n  // that here.\n  filter_.onDestroy();\n}\n\nTEST_F(BufferFilterTest, ContentLengthPopulation) {\n  InSequence s;\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.decodeData(data1, false));\n\n  Buffer::OwnedImpl data2(\" world\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data2, true));\n  EXPECT_EQ(headers.getContentLengthValue(), \"11\");\n}\n\nTEST_F(BufferFilterTest, ContentLengthPopulationInTrailers) {\n  InSequence s;\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.decodeData(data1, false));\n  ASSERT_EQ(headers.ContentLength(), nullptr);\n\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers));\n  EXPECT_EQ(headers.getContentLengthValue(), \"5\");\n}\n\nTEST_F(BufferFilterTest, ContentLengthPopulationAlreadyPresent) {\n  InSequence s;\n\n  Http::TestRequestHeaderMapImpl headers{{\"content-length\", \"3\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data(\"foo\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, true));\n  EXPECT_EQ(headers.getContentLengthValue(), \"3\");\n}\n\nTEST_F(BufferFilterTest, RouteConfigOverride) {\n  envoy::extensions::filters::http::buffer::v3::BufferPerRoute route_cfg;\n  auto* buf = route_cfg.mutable_buffer();\n  buf->mutable_max_request_bytes()->set_value(123);\n  envoy::extensions::filters::http::buffer::v3::BufferPerRoute vhost_cfg;\n  vhost_cfg.set_disabled(true);\n  BufferFilterSettings route_settings(route_cfg);\n  BufferFilterSettings vhost_settings(vhost_cfg);\n  routeLocalConfig(&route_settings, &vhost_settings);\n\n  EXPECT_CALL(callbacks_, setDecoderBufferLimit(123ULL));\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false));\n\n  filter_.onDestroy();\n}\n\nTEST_F(BufferFilterTest, VHostConfigOverride) {\n  envoy::extensions::filters::http::buffer::v3::BufferPerRoute vhost_cfg;\n  auto* buf = vhost_cfg.mutable_buffer();\n  buf->mutable_max_request_bytes()->set_value(789);\n  BufferFilterSettings vhost_settings(vhost_cfg);\n  routeLocalConfig(nullptr, &vhost_settings);\n\n  EXPECT_CALL(callbacks_, setDecoderBufferLimit(789ULL));\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false));\n  filter_.onDestroy();\n}\n\nTEST_F(BufferFilterTest, RouteDisabledConfigOverride) {\n  envoy::extensions::filters::http::buffer::v3::BufferPerRoute vhost_cfg;\n  vhost_cfg.set_disabled(true);\n  BufferFilterSettings vhost_settings(vhost_cfg);\n  routeLocalConfig(nullptr, &vhost_settings);\n\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false));\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data1, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data1, true));\n}\n\n} // namespace BufferFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/buffer/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.validate.h\"\n\n#include \"extensions/filters/http/buffer/buffer_filter.h\"\n#include \"extensions/filters/http/buffer/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace BufferFilter {\nnamespace {\n\nTEST(BufferFilterFactoryTest, BufferFilterCorrectYaml) {\n  const std::string yaml_string = R\"EOF(\n  max_request_bytes: 1028\n  )EOF\";\n\n  envoy::extensions::filters::http::buffer::v3::Buffer proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  BufferFilterFactory factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\nTEST(BufferFilterFactoryTest, BufferFilterCorrectProto) {\n  envoy::extensions::filters::http::buffer::v3::Buffer config;\n  config.mutable_max_request_bytes()->set_value(1028);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  BufferFilterFactory factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\nTEST(BufferFilterFactoryTest, BufferFilterEmptyProto) {\n  BufferFilterFactory factory;\n  auto empty_proto = factory.createEmptyConfigProto();\n  envoy::extensions::filters::http::buffer::v3::Buffer config =\n      *dynamic_cast<envoy::extensions::filters::http::buffer::v3::Buffer*>(empty_proto.get());\n\n  config.mutable_max_request_bytes()->set_value(1028);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\nTEST(BufferFilterFactoryTest, BufferFilterNoMaxRequestBytes) {\n  BufferFilterFactory factory;\n  auto empty_proto = factory.createEmptyConfigProto();\n  envoy::extensions::filters::http::buffer::v3::Buffer config =\n      *dynamic_cast<envoy::extensions::filters::http::buffer::v3::Buffer*>(empty_proto.get());\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW_WITH_REGEX(factory.createFilterFactoryFromProto(config, \"stats\", context),\n                          EnvoyException, \"Proto constraint validation failed\");\n}\n\nTEST(BufferFilterFactoryTest, BufferFilterEmptyRouteProto) {\n  BufferFilterFactory factory;\n  EXPECT_NO_THROW({\n    EXPECT_NE(nullptr, dynamic_cast<envoy::extensions::filters::http::buffer::v3::BufferPerRoute*>(\n                           factory.createEmptyRouteConfigProto().get()));\n  });\n}\n\nTEST(BufferFilterFactoryTest, BufferFilterRouteSpecificConfig) {\n  BufferFilterFactory factory;\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context;\n\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  EXPECT_TRUE(proto_config.get());\n\n  auto& cfg = dynamic_cast<envoy::extensions::filters::http::buffer::v3::BufferPerRoute&>(\n      *proto_config.get());\n  cfg.set_disabled(true);\n\n  Router::RouteSpecificFilterConfigConstSharedPtr route_config =\n      factory.createRouteSpecificFilterConfig(*proto_config, factory_context,\n                                              ProtobufMessage::getNullValidationVisitor());\n  EXPECT_TRUE(route_config.get());\n\n  const auto* inflated = dynamic_cast<const BufferFilterSettings*>(route_config.get());\n  EXPECT_TRUE(inflated);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(BufferFilterFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.buffer\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace BufferFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cache/BUILD",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_cc_test_library\", \"envoy_package\")\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"common\",\n    hdrs = [\"common.h\"],\n    deps = [\n        \"//source/extensions/filters/http/cache:cache_headers_utils_lib\",\n        \"//source/extensions/filters/http/cache:http_cache_lib\",\n        \"//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cache_headers_utils_test\",\n    srcs = [\"cache_headers_utils_test.cc\"],\n    extension_name = \"envoy.filters.http.cache\",\n    deps = [\n        \":common\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/cache:cache_headers_utils_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"http_cache_test\",\n    srcs = [\"http_cache_test.cc\"],\n    extension_name = \"envoy.filters.http.cache\",\n    deps = [\n        \":common\",\n        \"//source/extensions/filters/http/cache:http_cache_lib\",\n        \"//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cache_filter_test\",\n    srcs = [\"cache_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.cache\",\n    deps = [\n        \":common\",\n        \"//source/extensions/filters/http/cache:cache_filter_lib\",\n        \"//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cacheability_utils_test\",\n    srcs = [\"cacheability_utils_test.cc\"],\n    extension_name = \"envoy.filters.http.cache\",\n    deps = [\n        \"//source/extensions/filters/http/cache:cacheability_utils_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.cache\",\n    deps = [\n        \"//source/extensions/filters/http/cache:config\",\n        \"//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cache_filter_integration_test\",\n    srcs = [\n        \"cache_filter_integration_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.cache\",\n    deps = [\n        \"//source/extensions/filters/http/cache:config\",\n        \"//source/extensions/filters/http/cache:http_cache_lib\",\n        \"//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib\",\n        \"//test/integration:http_protocol_integration_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/cache/cache_filter_integration_test.cc",
    "content": "#include \"test/integration/http_protocol_integration.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\n// TODO(toddmgreer): Expand integration test to include age header values,\n// expiration, range headers, HEAD requests, trailers, config customizations,\n// cache-control headers, and conditional header fields, as they are\n// implemented.\n\nclass CacheIntegrationTest : public Event::TestUsingSimulatedTime,\n                             public HttpProtocolIntegrationTest {\npublic:\n  void TearDown() override {\n    cleanupUpstreamAndDownstream();\n    HttpProtocolIntegrationTest::TearDown();\n  }\n\n  void initializeFilter(const std::string& config) {\n    config_helper_.addFilter(config);\n    initialize();\n    codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  }\n\n  const std::string default_config{R\"EOF(\n    name: \"envoy.filters.http.cache\"\n    typed_config:\n        \"@type\": \"type.googleapis.com/envoy.extensions.filters.http.cache.v3alpha.CacheConfig\"\n        typed_config:\n           \"@type\": \"type.googleapis.com/envoy.source.extensions.filters.http.cache.SimpleHttpCacheConfig\"\n    )EOF\"};\n  DateFormatter formatter_{\"%a, %d %b %Y %H:%M:%S GMT\"};\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, CacheIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(CacheIntegrationTest, MissInsertHit) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  // Set system time to cause Envoy's cached formatted time to match time on this thread.\n  simTime().setSystemTime(std::chrono::hours(1));\n  initializeFilter(default_config);\n\n  // Include test name and params in URL to make each test's requests unique.\n  const Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", absl::StrCat(\"/\", protocolTestParamsToString({GetParam(), 0}))},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"MissInsertHit\"}};\n\n  const std::string response_body(42, 'a');\n  Http::TestResponseHeaderMapImpl response_headers = {\n      {\":status\", \"200\"},\n      {\"date\", formatter_.now(simTime())},\n      {\"cache-control\", \"public,max-age=3600\"},\n      {\"content-length\", std::to_string(response_body.size())}};\n\n  // Send first request, and get response from upstream.\n  {\n    IntegrationStreamDecoderPtr response_decoder =\n        codec_client_->makeHeaderOnlyRequest(request_headers);\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(response_headers, /*end_stream=*/false);\n    // send 42 'a's\n    upstream_request_->encodeData(response_body, /*end_stream=*/true);\n    // Wait for the response to be read by the codec client.\n    response_decoder->waitForEndStream();\n    EXPECT_TRUE(response_decoder->complete());\n    EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers));\n    EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr);\n    EXPECT_EQ(response_decoder->body(), response_body);\n    EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr(\"- via_upstream\"));\n  }\n\n  // Advance time, to verify the original date header is preserved.\n  simTime().advanceTimeWait(Seconds(10));\n\n  // Send second request, and get response from cache.\n  {\n    IntegrationStreamDecoderPtr response_decoder =\n        codec_client_->makeHeaderOnlyRequest(request_headers);\n    response_decoder->waitForEndStream();\n    EXPECT_TRUE(response_decoder->complete());\n    EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers));\n    EXPECT_EQ(response_decoder->body(), response_body);\n    EXPECT_THAT(response_decoder->headers(), HeaderHasValueRef(Http::Headers::get().Age, \"10\"));\n    // Advance time to force a log flush.\n    simTime().advanceTimeWait(Seconds(1));\n    EXPECT_THAT(waitForAccessLog(access_log_name_, 1),\n                testing::HasSubstr(\"RFCF cache.response_from_cache_filter\"));\n  }\n}\n\nTEST_P(CacheIntegrationTest, ExpiredValidated) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  // Set system time to cause Envoy's cached formatted time to match time on this thread.\n  simTime().setSystemTime(std::chrono::hours(1));\n  initializeFilter(default_config);\n\n  // Include test name and params in URL to make each test's requests unique.\n  const Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", absl::StrCat(\"/\", protocolTestParamsToString({GetParam(), 0}))},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"ExpiredValidated\"}};\n\n  const std::string response_body(42, 'a');\n  Http::TestResponseHeaderMapImpl response_headers = {\n      {\":status\", \"200\"},\n      {\"date\", formatter_.now(simTime())},\n      {\"cache-control\", \"max-age=10\"}, // expires after 10 s\n      {\"content-length\", std::to_string(response_body.size())},\n      {\"etag\", \"abc123\"}};\n\n  // Send first request, and get response from upstream.\n  {\n    IntegrationStreamDecoderPtr response_decoder =\n        codec_client_->makeHeaderOnlyRequest(request_headers);\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(response_headers, /*end_stream=*/false);\n    // send 42 'a's\n    upstream_request_->encodeData(response_body, true);\n    // Wait for the response to be read by the codec client.\n    response_decoder->waitForEndStream();\n    EXPECT_TRUE(response_decoder->complete());\n    EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers));\n    EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr);\n    EXPECT_EQ(response_decoder->body(), response_body);\n    EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr(\"- via_upstream\"));\n  }\n\n  // Advance time for the cached response to be stale (expired)\n  // Also to make sure response date header gets updated with the 304 date\n  simTime().advanceTimeWait(Seconds(11));\n\n  // Send second request, the cached response should be validate then served\n  {\n    IntegrationStreamDecoderPtr response_decoder =\n        codec_client_->makeHeaderOnlyRequest(request_headers);\n    waitForNextUpstreamRequest();\n\n    // Check for injected precondition headers\n    const Http::TestRequestHeaderMapImpl injected_headers = {{\"if-none-match\", \"abc123\"}};\n    EXPECT_THAT(upstream_request_->headers(), IsSupersetOfHeaders(injected_headers));\n\n    // Create a 304 (not modified) response -> cached response is valid\n    const std::string not_modified_date = formatter_.now(simTime());\n    const Http::TestResponseHeaderMapImpl not_modified_response_headers = {\n        {\":status\", \"304\"}, {\"date\", not_modified_date}};\n    upstream_request_->encodeHeaders(not_modified_response_headers, /*end_stream=*/true);\n\n    // The original response headers should be updated with 304 response headers\n    response_headers.setDate(not_modified_date);\n\n    // Wait for the response to be read by the codec client.\n    response_decoder->waitForEndStream();\n\n    // Check that the served response is the cached response\n    EXPECT_TRUE(response_decoder->complete());\n    EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers));\n    EXPECT_EQ(response_decoder->body(), response_body);\n\n    // A response that has been validated should not contain an Age header as it is equivalent to a\n    // freshly served response from the origin, unless the 304 response has an Age header, which\n    // means it was served by an upstream cache.\n    EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr);\n\n    // Advance time to force a log flush.\n    simTime().advanceTimeWait(Seconds(1));\n    EXPECT_THAT(waitForAccessLog(access_log_name_, 1),\n                testing::HasSubstr(\"RFCF cache.response_from_cache_filter\"));\n  }\n}\n\nTEST_P(CacheIntegrationTest, ExpiredFetchedNewResponse) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  // Set system time to cause Envoy's cached formatted time to match time on this thread.\n  simTime().setSystemTime(std::chrono::hours(1));\n  initializeFilter(default_config);\n\n  // Include test name and params in URL to make each test's requests unique.\n  const Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", absl::StrCat(\"/\", protocolTestParamsToString({GetParam(), 0}))},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"ExpiredFetchedNewResponse\"}};\n\n  // Send first request, and get response from upstream.\n  {\n    const std::string response_body(10, 'a');\n    Http::TestResponseHeaderMapImpl response_headers = {\n        {\":status\", \"200\"},\n        {\"date\", formatter_.now(simTime())},\n        {\"cache-control\", \"max-age=10\"}, // expires after 10 s\n        {\"content-length\", std::to_string(response_body.size())},\n        {\"etag\", \"a1\"}};\n\n    IntegrationStreamDecoderPtr response_decoder =\n        codec_client_->makeHeaderOnlyRequest(request_headers);\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(response_headers, /*end_stream=*/false);\n    // send 10 'a's\n    upstream_request_->encodeData(response_body, /*end_stream=*/true);\n    // Wait for the response to be read by the codec client.\n    response_decoder->waitForEndStream();\n    EXPECT_TRUE(response_decoder->complete());\n    EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers));\n    EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr);\n    EXPECT_EQ(response_decoder->body(), response_body);\n    EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr(\"- via_upstream\"));\n  }\n\n  // Advance time for the cached response to be stale (expired)\n  // Also to make sure response date header gets updated with the 304 date\n  simTime().advanceTimeWait(Seconds(11));\n\n  // Send second request, validation of the cached response should be attempted but should fail\n  // The new response should be served\n  {\n    const std::string response_body(20, 'a');\n    Http::TestResponseHeaderMapImpl response_headers = {\n        {\":status\", \"200\"},\n        {\"date\", formatter_.now(simTime())},\n        {\"content-length\", std::to_string(response_body.size())},\n        {\"etag\", \"a2\"}};\n\n    IntegrationStreamDecoderPtr response_decoder =\n        codec_client_->makeHeaderOnlyRequest(request_headers);\n    waitForNextUpstreamRequest();\n\n    // Check for injected precondition headers\n    Http::TestRequestHeaderMapImpl injected_headers = {{\"if-none-match\", \"a1\"}};\n    EXPECT_THAT(upstream_request_->headers(), IsSupersetOfHeaders(injected_headers));\n\n    // Reply with the updated response -> cached response is invalid\n    upstream_request_->encodeHeaders(response_headers, /*end_stream=*/false);\n    // send 20 'a's\n    upstream_request_->encodeData(response_body, /*end_stream=*/true);\n\n    // Wait for the response to be read by the codec client.\n    response_decoder->waitForEndStream();\n    // Check that the served response is the updated response\n    EXPECT_TRUE(response_decoder->complete());\n    EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers));\n    EXPECT_EQ(response_decoder->body(), response_body);\n    // Check that age header does not exist as this is not a cached response\n    EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr);\n\n    // Advance time to force a log flush.\n    simTime().advanceTimeWait(Seconds(1));\n    EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr(\"- via_upstream\"));\n  }\n}\n\n// Send the same GET request with body and trailers twice, then check that the response\n// doesn't have an age header, to confirm that it wasn't served from cache.\nTEST_P(CacheIntegrationTest, GetRequestWithBodyAndTrailers) {\n  // Set system time to cause Envoy's cached formatted time to match time on this thread.\n  simTime().setSystemTime(std::chrono::hours(1));\n  initializeFilter(default_config);\n\n  // Include test name and params in URL to make each test's requests unique.\n  const Http::TestRequestHeaderMapImpl request_headers = {\n      {\":method\", \"GET\"},\n      {\":path\", absl::StrCat(\"/\", protocolTestParamsToString({GetParam(), 0}))},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"MissInsertHit\"}};\n  Http::TestRequestTrailerMapImpl request_trailers{{\"request1\", \"trailer1\"},\n                                                   {\"request2\", \"trailer2\"}};\n  Http::TestResponseHeaderMapImpl response_headers = {{\":status\", \"200\"},\n                                                      {\"date\", formatter_.now(simTime())},\n                                                      {\"cache-control\", \"public,max-age=3600\"},\n                                                      {\"content-length\", \"42\"}};\n\n  for (int i = 0; i < 2; ++i) {\n    auto encoder_decoder = codec_client_->startRequest(request_headers);\n    request_encoder_ = &encoder_decoder.first;\n    auto response = std::move(encoder_decoder.second);\n    codec_client_->sendData(*request_encoder_, 13, false);\n    codec_client_->sendTrailers(*request_encoder_, request_trailers);\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(response_headers, /*end_stream=*/false);\n    // send 42 'a's\n    upstream_request_->encodeData(42, true);\n    // Wait for the response to be read by the codec client.\n    response->waitForEndStream();\n    EXPECT_TRUE(response->complete());\n    EXPECT_THAT(response->headers(), IsSupersetOfHeaders(response_headers));\n    EXPECT_EQ(response->headers().get(Http::Headers::get().Age), nullptr);\n    EXPECT_EQ(response->body(), std::string(42, 'a'));\n  }\n}\n\n} // namespace\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cache/cache_filter_test.cc",
    "content": "#include \"envoy/event/dispatcher.h\"\n\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/http/cache/cache_filter.h\"\n#include \"extensions/filters/http/cache/simple_http_cache/simple_http_cache.h\"\n\n#include \"test/extensions/filters/http/cache/common.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\nclass CacheFilterTest : public ::testing::Test {\nprotected:\n  // The filter has to be created as a shared_ptr to enable shared_from_this() which is used in the\n  // cache callbacks.\n  CacheFilterSharedPtr makeFilter(HttpCache& cache) {\n    auto filter = std::make_shared<CacheFilter>(config_, /*stats_prefix=*/\"\", context_.scope(),\n                                                context_.timeSource(), cache);\n    filter->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter->setEncoderFilterCallbacks(encoder_callbacks_);\n    return filter;\n  }\n\n  void SetUp() override {\n    ON_CALL(decoder_callbacks_, dispatcher()).WillByDefault(::testing::ReturnRef(*dispatcher_));\n    // Initialize the time source (otherwise it returns the real time)\n    time_source_.setSystemTime(std::chrono::hours(1));\n    // Use the initialized time source to set the response date header\n    response_headers_.setDate(formatter_.now(time_source_));\n  }\n\n  void testDecodeRequestMiss(CacheFilterSharedPtr filter) {\n    // The filter should not encode any headers or data as no cached response exists.\n    EXPECT_CALL(decoder_callbacks_, encodeHeaders_).Times(0);\n    EXPECT_CALL(decoder_callbacks_, encodeData).Times(0);\n\n    // The filter should stop decoding iteration when decodeHeaders is called as a cache lookup is\n    // in progress.\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true),\n              Http::FilterHeadersStatus::StopAllIterationAndWatermark);\n\n    // The filter should continue decoding when the cache lookup result (miss) is ready.\n    EXPECT_CALL(decoder_callbacks_, continueDecoding).Times(1);\n\n    // The cache lookup callback should be posted to the dispatcher.\n    // Run events on the dispatcher so that the callback is invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n  }\n\n  void testDecodeRequestHitNoBody(CacheFilterSharedPtr filter) {\n    // The filter should encode cached headers.\n    EXPECT_CALL(decoder_callbacks_,\n                encodeHeaders_(testing::AllOf(IsSupersetOfHeaders(response_headers_),\n                                              HeaderHasValueRef(Http::Headers::get().Age, age)),\n                               true));\n\n    // The filter should not encode any data as the response has no body.\n    EXPECT_CALL(decoder_callbacks_, encodeData).Times(0);\n\n    // The filter should stop decoding iteration when decodeHeaders is called as a cache lookup is\n    // in progress.\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true),\n              Http::FilterHeadersStatus::StopAllIterationAndWatermark);\n\n    // The filter should not continue decoding when the cache lookup result is ready, as the\n    // expected result is a hit.\n    EXPECT_CALL(decoder_callbacks_, continueDecoding).Times(0);\n\n    // The cache lookup callback should be posted to the dispatcher.\n    // Run events on the dispatcher so that the callback is invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n  }\n\n  void testDecodeRequestHitWithBody(CacheFilterSharedPtr filter, std::string body) {\n    // The filter should encode cached headers.\n    EXPECT_CALL(decoder_callbacks_,\n                encodeHeaders_(testing::AllOf(IsSupersetOfHeaders(response_headers_),\n                                              HeaderHasValueRef(Http::Headers::get().Age, age)),\n                               false));\n\n    // The filter should encode cached data.\n    EXPECT_CALL(\n        decoder_callbacks_,\n        encodeData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true));\n\n    // The filter should stop decoding iteration when decodeHeaders is called as a cache lookup is\n    // in progress.\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true),\n              Http::FilterHeadersStatus::StopAllIterationAndWatermark);\n\n    // The filter should not continue decoding when the cache lookup result is ready, as the\n    // expected result is a hit.\n    EXPECT_CALL(decoder_callbacks_, continueDecoding).Times(0);\n\n    // The cache lookup callback should be posted to the dispatcher.\n    // Run events on the dispatcher so that the callback is invoked.\n    // The posted lookup callback will cause another callback to be posted (when getBody() is\n    // called) which should also be invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n  }\n\n  void waitBeforeSecondRequest() { time_source_.advanceTimeWait(delay_); }\n\n  SimpleHttpCache simple_cache_;\n  envoy::extensions::filters::http::cache::v3alpha::CacheConfig config_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Event::SimulatedTimeSystem time_source_;\n  DateFormatter formatter_{\"%a, %d %b %Y %H:%M:%S GMT\"};\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-forwarded-proto\", \"https\"}};\n  Http::TestResponseHeaderMapImpl response_headers_{{\":status\", \"200\"},\n                                                    {\"cache-control\", \"public,max-age=3600\"}};\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  Api::ApiPtr api_ = Api::createApiForTest();\n  Event::DispatcherPtr dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n  const Seconds delay_ = Seconds(10);\n  const std::string age = std::to_string(delay_.count());\n};\n\nTEST_F(CacheFilterTest, UncacheableRequest) {\n  request_headers_.setHost(\"UncacheableRequest\");\n\n  // POST requests are uncacheable\n  request_headers_.setMethod(Http::Headers::get().MethodValues.Post);\n\n  for (int request = 1; request <= 2; request++) {\n    // Create filter for the request\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Decode request headers\n    // The filter should not encode any headers or data as no cached response exists.\n    EXPECT_CALL(decoder_callbacks_, encodeHeaders_).Times(0);\n    EXPECT_CALL(decoder_callbacks_, encodeData).Times(0);\n\n    // Uncacheable requests should bypass the cache filter-> No cache lookups should be initiated.\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::Continue);\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n\n    // Encode response header\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue);\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, UncacheableResponse) {\n  request_headers_.setHost(\"UncacheableResponse\");\n\n  // Responses with \"Cache-Control: no-store\" are uncacheable\n  response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-store\");\n\n  for (int request = 1; request <= 2; request++) {\n    // Create filter for the request.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response headers.\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue);\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, CacheMiss) {\n  for (int request = 1; request <= 2; request++) {\n    // Each iteration a request is sent to a different host, therefore the second one is a miss\n    request_headers_.setHost(\"CacheMiss\" + std::to_string(request));\n\n    // Create filter for request 1\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response header\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue);\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, CacheHitNoBody) {\n  request_headers_.setHost(\"CacheHitNoBody\");\n\n  {\n    // Create filter for request 1.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response headers.\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue);\n    filter->onDestroy();\n  }\n  waitBeforeSecondRequest();\n  {\n    // Create filter for request 2.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestHitNoBody(filter);\n\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, CacheHitWithBody) {\n  request_headers_.setHost(\"CacheHitWithBody\");\n  const std::string body = \"abc\";\n\n  {\n    // Create filter for request 1.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response.\n    Buffer::OwnedImpl buffer(body);\n    response_headers_.setContentLength(body.size());\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue);\n\n    filter->onDestroy();\n  }\n  waitBeforeSecondRequest();\n  {\n    // Create filter for request 2\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestHitWithBody(filter, body);\n\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, SuccessfulValidation) {\n  request_headers_.setHost(\"SuccessfulValidation\");\n  const std::string body = \"abc\";\n  const std::string etag = \"abc123\";\n  const std::string last_modified_date = formatter_.now(time_source_);\n  {\n    // Create filter for request 1\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response\n    // Add Etag & Last-Modified headers to the response for validation\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag);\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date);\n\n    Buffer::OwnedImpl buffer(body);\n    response_headers_.setContentLength(body.size());\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue);\n    filter->onDestroy();\n  }\n  waitBeforeSecondRequest();\n  {\n    // Create filter for request 2\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Make request require validation\n    request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n\n    // Decoding the request should find a cached response that requires validation.\n    // As far as decoding the request is concerned, this is the same as a cache miss with the\n    // exception of injecting validation precondition headers.\n    testDecodeRequestMiss(filter);\n\n    // Make sure validation conditional headers are added\n    const Http::TestRequestHeaderMapImpl injected_headers = {\n        {\"if-none-match\", etag}, {\"if-modified-since\", last_modified_date}};\n    EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers));\n\n    // Encode 304 response\n    // Advance time to make sure the cached date is updated with the 304 date\n    const std::string not_modified_date = formatter_.now(time_source_);\n    Http::TestResponseHeaderMapImpl not_modified_response_headers = {{\":status\", \"304\"},\n                                                                     {\"date\", not_modified_date}};\n\n    // The filter should stop encoding iteration when encodeHeaders is called as a cached response\n    // is being fetched and added to the encoding stream. StopIteration does not stop encodeData of\n    // the same filter from being called\n    EXPECT_EQ(filter->encodeHeaders(not_modified_response_headers, true),\n              Http::FilterHeadersStatus::StopIteration);\n\n    // Check for the cached response headers with updated date\n    Http::TestResponseHeaderMapImpl updated_response_headers = response_headers_;\n    updated_response_headers.setDate(not_modified_date);\n    EXPECT_THAT(not_modified_response_headers, IsSupersetOfHeaders(updated_response_headers));\n\n    // A 304 response should not have a body, so encodeData should not be called\n    // However, if a body is present by mistake, encodeData should stop iteration until\n    // encoding the cached response is done\n    Buffer::OwnedImpl not_modified_body;\n    EXPECT_EQ(filter->encodeData(not_modified_body, true),\n              Http::FilterDataStatus::StopIterationAndBuffer);\n\n    // The filter should add the cached response body to encoded data.\n    Buffer::OwnedImpl buffer(body);\n    EXPECT_CALL(\n        encoder_callbacks_,\n        addEncodedData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true));\n\n    // The cache getBody callback should be posted to the dispatcher.\n    // Run events on the dispatcher so that the callback is invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&encoder_callbacks_);\n\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, UnsuccessfulValidation) {\n  request_headers_.setHost(\"UnsuccessfulValidation\");\n  const std::string body = \"abc\";\n  const std::string etag = \"abc123\";\n  const std::string last_modified_date = formatter_.now(time_source_);\n  {\n    // Create filter for request 1\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response\n    // Add Etag & Last-Modified headers to the response for validation.\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag);\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date);\n\n    Buffer::OwnedImpl buffer(body);\n    response_headers_.setContentLength(body.size());\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue);\n    filter->onDestroy();\n  }\n  waitBeforeSecondRequest();\n  {\n    // Create filter for request 2.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Make request require validation\n    request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n\n    // Decoding the request should find a cached response that requires validation.\n    // As far as decoding the request is concerned, this is the same as a cache miss with the\n    // exception of injecting validation precondition headers.\n    testDecodeRequestMiss(filter);\n\n    // Make sure validation conditional headers are added.\n    const Http::TestRequestHeaderMapImpl injected_headers = {\n        {\"if-none-match\", etag}, {\"if-modified-since\", last_modified_date}};\n    EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers));\n\n    // Encode new response.\n    // Change the status code to make sure new headers are served, not the cached ones.\n    response_headers_.setStatus(201);\n\n    // The filter should not stop encoding iteration as this is a new response.\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue);\n    Buffer::OwnedImpl new_body;\n    EXPECT_EQ(filter->encodeData(new_body, true), Http::FilterDataStatus::Continue);\n\n    // The response headers should have the new status.\n    EXPECT_THAT(response_headers_, HeaderHasValueRef(Http::Headers::get().Status, \"201\"));\n\n    // The filter should not encode any data.\n    EXPECT_CALL(encoder_callbacks_, addEncodedData).Times(0);\n\n    // If a cache getBody callback is made, it should be posted to the dispatcher.\n    // Run events on the dispatcher so that any available callbacks are invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&encoder_callbacks_);\n\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, SingleSatisfiableRange) {\n  request_headers_.setHost(\"SingleSatisfiableRange\");\n  const std::string body = \"abc\";\n\n  {\n    // Create filter for request 1.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response.\n    Buffer::OwnedImpl buffer(body);\n    response_headers_.setContentLength(body.size());\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue);\n    filter->onDestroy();\n  }\n  waitBeforeSecondRequest();\n  {\n    // Add range info to headers.\n    request_headers_.addReference(Http::Headers::get().Range, \"bytes=-2\");\n\n    response_headers_.setStatus(static_cast<uint64_t>(Http::Code::PartialContent));\n    response_headers_.addReference(Http::Headers::get().ContentRange, \"bytes 1-2/3\");\n    response_headers_.setContentLength(2);\n\n    // Create filter for request 2\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Decode request 2 header\n    EXPECT_CALL(decoder_callbacks_,\n                encodeHeaders_(testing::AllOf(IsSupersetOfHeaders(response_headers_),\n                                              HeaderHasValueRef(Http::Headers::get().Age, age)),\n                               false));\n\n    EXPECT_CALL(\n        decoder_callbacks_,\n        encodeData(testing::Property(&Buffer::Instance::toString, testing::Eq(\"bc\")), true));\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true),\n              Http::FilterHeadersStatus::StopAllIterationAndWatermark);\n\n    // The cache lookup callback should be posted to the dispatcher.\n    // Run events on the dispatcher so that the callback is invoked.\n    // The posted lookup callback will cause another callback to be posted (when getBody() is\n    // called) which should also be invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, MultipleSatisfiableRanges) {\n  request_headers_.setHost(\"MultipleSatisfiableRanges\");\n  const std::string body = \"abc\";\n\n  {\n    // Create filter for request 1\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response header\n    Buffer::OwnedImpl buffer(body);\n    response_headers_.setContentLength(body.size());\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue);\n    filter->onDestroy();\n  }\n  waitBeforeSecondRequest();\n  {\n    // Add range info to headers\n    // multi-part responses are not supported, 200 expected\n    request_headers_.addReference(Http::Headers::get().Range, \"bytes=0-1,-2\");\n\n    // Create filter for request 2\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Decode request 2 header\n    EXPECT_CALL(decoder_callbacks_,\n                encodeHeaders_(testing::AllOf(IsSupersetOfHeaders(response_headers_),\n                                              HeaderHasValueRef(Http::Headers::get().Age, age)),\n                               false));\n\n    EXPECT_CALL(\n        decoder_callbacks_,\n        encodeData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true));\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true),\n              Http::FilterHeadersStatus::StopAllIterationAndWatermark);\n\n    // The cache lookup callback should be posted to the dispatcher.\n    // Run events on the dispatcher so that the callback is invoked.\n    // The posted lookup callback will cause another callback to be posted (when getBody() is\n    // called) which should also be invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n    filter->onDestroy();\n  }\n}\n\nTEST_F(CacheFilterTest, NotSatisfiableRange) {\n  request_headers_.setHost(\"NotSatisfiableRange\");\n  const std::string body = \"abc\";\n\n  {\n    // Create filter for request 1\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response header\n    Buffer::OwnedImpl buffer(body);\n    response_headers_.setContentLength(body.size());\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue);\n    filter->onDestroy();\n  }\n  waitBeforeSecondRequest();\n  {\n    // Add range info to headers\n    request_headers_.addReference(Http::Headers::get().Range, \"bytes=123-\");\n\n    response_headers_.setStatus(static_cast<uint64_t>(Http::Code::RangeNotSatisfiable));\n    response_headers_.addReference(Http::Headers::get().ContentRange, \"bytes */3\");\n    response_headers_.setContentLength(0);\n\n    // Create filter for request 2\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Decode request 2 header\n    EXPECT_CALL(decoder_callbacks_,\n                encodeHeaders_(testing::AllOf(IsSupersetOfHeaders(response_headers_),\n                                              HeaderHasValueRef(Http::Headers::get().Age, age)),\n                               true));\n\n    // 416 response should not have a body, so we don't expect a call to encodeData\n    EXPECT_CALL(decoder_callbacks_,\n                encodeData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true))\n        .Times(0);\n\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true),\n              Http::FilterHeadersStatus::StopAllIterationAndWatermark);\n\n    // The cache lookup callback should be posted to the dispatcher.\n    // Run events on the dispatcher so that the callback is invoked.\n    // The posted lookup callback will cause another callback to be posted (when getBody() is\n    // called) which should also be invoked.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n    filter->onDestroy();\n  }\n}\n\n// Send two identical GET requests with bodies. The CacheFilter will just pass everything through.\nTEST_F(CacheFilterTest, GetRequestWithBodyAndTrailers) {\n  request_headers_.setHost(\"GetRequestWithBodyAndTrailers\");\n  const std::string body = \"abc\";\n  Buffer::OwnedImpl request_buffer(body);\n  Http::TestRequestTrailerMapImpl request_trailers;\n\n  for (int i = 0; i < 2; ++i) {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(filter->decodeData(request_buffer, false), Http::FilterDataStatus::Continue);\n    EXPECT_EQ(filter->decodeTrailers(request_trailers), Http::FilterTrailersStatus::Continue);\n\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue);\n    filter->onDestroy();\n  }\n}\n\n// Checks the case where a cache lookup callback is posted to the dispatcher, then the CacheFilter\n// was deleted (e.g. connection dropped with the client) before the posted callback was executed. In\n// this case the CacheFilter should not be accessed after it was deleted, which is ensured by using\n// a weak_ptr to the CacheFilter in the posted callback.\n// This test may mistakenly pass (false positive) even if the the CacheFilter is accessed after\n// being deleted, as filter_state_ may be accessed and read as \"FilterState::Destroyed\" which will\n// result in a correct behavior. However, running the test with ASAN sanitizer enabled should\n// reliably fail if the CacheFilter is accessed after being deleted.\nTEST_F(CacheFilterTest, FilterDeletedBeforePostedCallbackExecuted) {\n  request_headers_.setHost(\"FilterDeletedBeforePostedCallbackExecuted\");\n  {\n    // Create filter for request 1.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    testDecodeRequestMiss(filter);\n\n    // Encode response headers.\n    EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue);\n    filter->onDestroy();\n  }\n  {\n    // Create filter for request 2.\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Call decode headers to start the cache lookup, which should immediately post the callback to\n    // the dispatcher.\n    EXPECT_EQ(filter->decodeHeaders(request_headers_, true),\n              Http::FilterHeadersStatus::StopAllIterationAndWatermark);\n\n    // Destroy the filter\n    filter->onDestroy();\n    filter.reset();\n\n    // Make sure that onHeaders was not called by making sure no decoder callbacks were made.\n    EXPECT_CALL(decoder_callbacks_, continueDecoding).Times(0);\n    EXPECT_CALL(decoder_callbacks_, encodeHeaders_).Times(0);\n\n    // Run events on the dispatcher so that the callback is invoked after the filter deletion.\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_);\n  }\n}\n\n// A new type alias for a different type of tests that use the exact same class\nusing ValidationHeadersTest = CacheFilterTest;\n\nTEST_F(ValidationHeadersTest, EtagAndLastModified) {\n  request_headers_.setHost(\"EtagAndLastModified\");\n  const std::string etag = \"abc123\";\n\n  // Make request 1 to insert the response into cache\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n    testDecodeRequestMiss(filter);\n\n    // Add validation headers to the response\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag);\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified,\n                                      formatter_.now(time_source_));\n\n    filter->encodeHeaders(response_headers_, true);\n  }\n  // Make request 2 to test for added conditional headers\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Make sure the request requires validation\n    request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n    testDecodeRequestMiss(filter);\n\n    // Make sure validation conditional headers are added\n    const Http::TestRequestHeaderMapImpl injected_headers = {\n        {\"if-none-match\", \"abc123\"}, {\"if-modified-since\", formatter_.now(time_source_)}};\n    EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers));\n  }\n}\n\nTEST_F(ValidationHeadersTest, EtagOnly) {\n  request_headers_.setHost(\"EtagOnly\");\n  const std::string etag = \"abc123\";\n\n  // Make request 1 to insert the response into cache\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n    testDecodeRequestMiss(filter);\n\n    // Add validation headers to the response\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag);\n\n    filter->encodeHeaders(response_headers_, true);\n  }\n  // Make request 2 to test for added conditional headers\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Make sure the request requires validation\n    request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n    testDecodeRequestMiss(filter);\n\n    // Make sure validation conditional headers are added\n    // If-Modified-Since falls back to date\n    const Http::TestRequestHeaderMapImpl injected_headers = {\n        {\"if-none-match\", \"abc123\"}, {\"if-modified-since\", formatter_.now(time_source_)}};\n    EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers));\n  }\n}\n\nTEST_F(ValidationHeadersTest, LastModifiedOnly) {\n  request_headers_.setHost(\"LastModifiedOnly\");\n\n  // Make request 1 to insert the response into cache\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n    testDecodeRequestMiss(filter);\n\n    // Add validation headers to the response\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified,\n                                      formatter_.now(time_source_));\n\n    filter->encodeHeaders(response_headers_, true);\n  }\n  // Make request 2 to test for added conditional headers\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Make sure the request requires validation\n    request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n    testDecodeRequestMiss(filter);\n\n    // Make sure validation conditional headers are added\n    const Http::TestRequestHeaderMapImpl injected_headers = {\n        {\"if-modified-since\", formatter_.now(time_source_)}};\n    EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers));\n  }\n}\n\nTEST_F(ValidationHeadersTest, NoEtagOrLastModified) {\n  request_headers_.setHost(\"NoEtagOrLastModified\");\n\n  // Make request 1 to insert the response into cache\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n    testDecodeRequestMiss(filter);\n    filter->encodeHeaders(response_headers_, true);\n  }\n  // Make request 2 to test for added conditional headers\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Make sure the request requires validation\n    request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n    testDecodeRequestMiss(filter);\n\n    // Make sure validation conditional headers are added\n    // If-Modified-Since falls back to date\n    const Http::TestRequestHeaderMapImpl injected_headers = {\n        {\"if-modified-since\", formatter_.now(time_source_)}};\n    EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers));\n  }\n}\n\nTEST_F(ValidationHeadersTest, InvalidLastModified) {\n  request_headers_.setHost(\"InvalidLastModified\");\n\n  // Make request 1 to insert the response into cache\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n    testDecodeRequestMiss(filter);\n\n    // Add validation headers to the response\n    response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, \"invalid-date\");\n    filter->encodeHeaders(response_headers_, true);\n  }\n  // Make request 2 to test for added conditional headers\n  {\n    CacheFilterSharedPtr filter = makeFilter(simple_cache_);\n\n    // Make sure the request requires validation\n    request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n    testDecodeRequestMiss(filter);\n\n    // Make sure validation conditional headers are added\n    // If-Modified-Since falls back to date\n    const Http::TestRequestHeaderMapImpl injected_headers = {\n        {\"if-modified-since\", formatter_.now(time_source_)}};\n    EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers));\n  }\n}\n\n} // namespace\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cache/cache_headers_utils_test.cc",
    "content": "#include <chrono>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/header_utility.h\"\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n\n#include \"test/extensions/filters/http/cache/common.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\nstruct TestRequestCacheControl : public RequestCacheControl {\n  TestRequestCacheControl(bool must_validate, bool no_store, bool no_transform, bool only_if_cached,\n                          OptionalDuration max_age, OptionalDuration min_fresh,\n                          OptionalDuration max_stale) {\n    must_validate_ = must_validate;\n    no_store_ = no_store;\n    no_transform_ = no_transform;\n    only_if_cached_ = only_if_cached;\n    max_age_ = max_age;\n    min_fresh_ = min_fresh;\n    max_stale_ = max_stale;\n  }\n};\n\nstruct RequestCacheControlTestCase {\n  absl::string_view cache_control_header;\n  TestRequestCacheControl request_cache_control;\n};\n\nclass RequestCacheControlTest : public testing::TestWithParam<RequestCacheControlTestCase> {\npublic:\n  static const std::vector<RequestCacheControlTestCase>& getTestCases() {\n    // clang-format off\n    CONSTRUCT_ON_FIRST_USE(std::vector<RequestCacheControlTestCase>,\n        // Empty header\n        {\n          \"\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt}\n        },\n        // Valid cache-control headers\n        {\n          \"max-age=3600, min-fresh=10, no-transform, only-if-cached, no-store\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, true, true, true, Seconds(3600), Seconds(10), absl::nullopt}\n        },\n        {\n          \"min-fresh=100, max-stale, no-cache\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {true, false, false, false, absl::nullopt, Seconds(100), SystemTime::duration::max()}\n        },\n        {\n          \"max-age=10, max-stale=50\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, Seconds(10), absl::nullopt, Seconds(50)}\n        },\n        // Quoted arguments are interpreted correctly\n        {\n          \"max-age=\\\"3600\\\", min-fresh=\\\"10\\\", no-transform, only-if-cached, no-store\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, true, true, true, Seconds(3600), Seconds(10), absl::nullopt}\n        },\n        {\n          \"max-age=\\\"10\\\", max-stale=\\\"50\\\", only-if-cached\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, true, Seconds(10), absl::nullopt, Seconds(50)}\n        },\n        // Unknown directives are ignored\n        {\n          \"max-age=10, max-stale=50, unknown-directive\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, Seconds(10), absl::nullopt, Seconds(50)}\n        },\n        {\n          \"max-age=10, max-stale=50, unknown-directive-with-arg=arg1\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, Seconds(10), absl::nullopt, Seconds(50)}\n        },\n        {\n          \"max-age=10, max-stale=50, unknown-directive-with-quoted-arg=\\\"arg1\\\"\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, Seconds(10), absl::nullopt, Seconds(50)}\n        },\n        {\n          \"max-age=10, max-stale=50, unknown-directive, unknown-directive-with-quoted-arg=\\\"arg1\\\"\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, Seconds(10), absl::nullopt, Seconds(50)}\n        },\n        // Invalid durations are ignored\n        {\n          \"max-age=five, min-fresh=30, no-store\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, true, false, false, absl::nullopt, Seconds(30), absl::nullopt}\n        },\n        {\n          \"max-age=five, min-fresh=30s, max-stale=-2\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt}\n        },\n        {\n          \"max-age=\\\"\", \n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt}\n        },\n        // Invalid parts of the header are ignored\n        {\n          \"no-cache, ,,,fjfwioen3298, max-age=20, min-fresh=30=40\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {true, false, false, false, Seconds(20), absl::nullopt, absl::nullopt}\n        },\n        // If a directive argument contains a comma by mistake\n        // the part before the comma will be interpreted as the argument\n        // and the part after it will be ignored\n        {\n          \"no-cache, max-age=10,0, no-store\",\n          // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_}\n          {true, true, false, false, Seconds(10), absl::nullopt, absl::nullopt}\n        },\n    );\n    // clang-format on\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(RequestCacheControlTest, RequestCacheControlTest,\n                         testing::ValuesIn(RequestCacheControlTest::getTestCases()));\n\nTEST_P(RequestCacheControlTest, RequestCacheControlTest) {\n  const absl::string_view cache_control_header = GetParam().cache_control_header;\n  const RequestCacheControl expected_request_cache_control = GetParam().request_cache_control;\n  EXPECT_EQ(expected_request_cache_control, RequestCacheControl(cache_control_header));\n}\n\nstruct TestResponseCacheControl : public ResponseCacheControl {\n  TestResponseCacheControl(bool must_validate, bool no_store, bool no_transform, bool no_stale,\n                           bool is_public, OptionalDuration max_age) {\n    must_validate_ = must_validate;\n    no_store_ = no_store;\n    no_transform_ = no_transform;\n    no_stale_ = no_stale;\n    is_public_ = is_public;\n    max_age_ = max_age;\n  }\n};\n\nstruct ResponseCacheControlTestCase {\n  absl::string_view cache_control_header;\n  TestResponseCacheControl response_cache_control;\n};\n\nclass ResponseCacheControlTest : public testing::TestWithParam<ResponseCacheControlTestCase> {\npublic:\n  static const std::vector<ResponseCacheControlTestCase>& getTestCases() {\n    // clang-format off\n    CONSTRUCT_ON_FIRST_USE(std::vector<ResponseCacheControlTestCase>,\n        // Empty header\n        {\n          \"\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, false, false, false, false, absl::nullopt}\n        },\n        // Valid cache-control headers\n        {\n          \"s-maxage=1000, max-age=2000, proxy-revalidate, no-store\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, true, false, true, false, Seconds(1000)}\n        },\n        {\n          \"max-age=500, must-revalidate, no-cache, no-transform\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, false, true, true, false, Seconds(500)}\n        },\n        {\n          \"s-maxage=10, private=content-length, no-cache=content-encoding\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, true, false, false, false, Seconds(10)}\n        },\n        {\n          \"private\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, true, false, false, false, absl::nullopt}\n        },\n        {\n          \"public, max-age=0\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, false, false, false, true, Seconds(0)}\n        },\n        // Quoted arguments are interpreted correctly\n        {\n          \"s-maxage=\\\"20\\\", max-age=\\\"10\\\", public\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, false, false, false, true, Seconds(20)}\n        },\n        {\n          \"max-age=\\\"50\\\", private\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, true, false, false, false, Seconds(50)}\n        },\n        {\n          \"s-maxage=\\\"0\\\"\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, false, false, false, false, Seconds(0)}\n        },\n        // Unknown directives are ignored\n        {\n          \"private, no-cache, max-age=30, unknown-directive\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, true, false, false, false, Seconds(30)}\n        },\n        {\n          \"private, no-cache, max-age=30, unknown-directive-with-arg=arg\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, true, false, false, false, Seconds(30)}\n        },\n        {\n          \"private, no-cache, max-age=30, unknown-directive-with-quoted-arg=\\\"arg\\\"\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, true, false, false, false, Seconds(30)}\n        },\n        {\n          \"private, no-cache, max-age=30, unknown-directive, unknown-directive-with-quoted-arg=\\\"arg\\\"\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, true, false, false, false, Seconds(30)}\n        },\n        // Invalid durations are ignored\n        {\n          \"max-age=five\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, false, false, false, false, absl::nullopt}\n        },\n        {\n          \"max-age=10s, private\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, true, false, false, false, absl::nullopt}\n        },\n        {\n          \"s-maxage=\\\"50s\\\", max-age=\\\"zero\\\", no-cache\",\n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, false, false, false, false, absl::nullopt}\n        },\n        {\n          \"s-maxage=five, max-age=10, no-transform\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, false, true, false, false, Seconds(10)}\n        },\n        {\n          \"max-age=\\\"\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {false, false, false, false, false, absl::nullopt}\n        },\n        // Invalid parts of the header are ignored\n        {\n          \"no-cache, ,,,fjfwioen3298, max-age=20\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, false, false, false, false, Seconds(20)}\n        },\n        // If a directive argument contains a comma by mistake\n        // the part before the comma will be interpreted as the argument\n        // and the part after it will be ignored\n        {\n          \"no-cache, max-age=10,0, no-store\", \n          // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_}\n          {true, true, false, false, false, Seconds(10)}\n        },\n    );\n    // clang-format on\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(ResponseCacheControlTest, ResponseCacheControlTest,\n                         testing::ValuesIn(ResponseCacheControlTest::getTestCases()));\n\nTEST_P(ResponseCacheControlTest, ResponseCacheControlTest) {\n  const absl::string_view cache_control_header = GetParam().cache_control_header;\n  const ResponseCacheControl expected_response_cache_control = GetParam().response_cache_control;\n  EXPECT_EQ(expected_response_cache_control, ResponseCacheControl(cache_control_header));\n}\n\nclass HttpTimeTest : public testing::TestWithParam<std::string> {\npublic:\n  static const std::vector<std::string>& getOkTestCases() {\n    // clang-format off\n    CONSTRUCT_ON_FIRST_USE(std::vector<std::string>,\n        \"Sun, 06 Nov 1994 08:49:37 GMT\",  // IMF-fixdate.\n        \"Sunday, 06-Nov-94 08:49:37 GMT\", // obsolete RFC 850 format.\n        \"Sun Nov  6 08:49:37 1994\"        // ANSI C's asctime() format.\n    );\n    // clang-format on\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(Ok, HttpTimeTest, testing::ValuesIn(HttpTimeTest::getOkTestCases()));\n\nTEST_P(HttpTimeTest, OkFormats) {\n  const Http::TestResponseHeaderMapImpl response_headers{{\"date\", GetParam()}};\n  // Manually confirmed that 784111777 is 11/6/94, 8:46:37.\n  EXPECT_EQ(784111777,\n            SystemTime::clock::to_time_t(CacheHeadersUtils::httpTime(response_headers.Date())));\n}\n\nTEST(HttpTime, InvalidFormat) {\n  const std::string invalid_format_date = \"Sunday, 06-11-1994 08:49:37\";\n  const Http::TestResponseHeaderMapImpl response_headers{{\"date\", invalid_format_date}};\n  EXPECT_EQ(CacheHeadersUtils::httpTime(response_headers.Date()), SystemTime());\n}\n\nTEST(HttpTime, Null) { EXPECT_EQ(CacheHeadersUtils::httpTime(nullptr), SystemTime()); }\n\nstruct CalculateAgeTestCase {\n  std::string test_name;\n  Http::TestResponseHeaderMapImpl response_headers;\n  SystemTime response_time, now;\n  Seconds expected_age;\n};\n\nclass CalculateAgeTest : public testing::TestWithParam<CalculateAgeTestCase> {\npublic:\n  static std::string durationToString(const SystemTime::duration& duration) {\n    return std::to_string(duration.count());\n  }\n  static std::string formatTime(const SystemTime& time) { return formatter().fromTime(time); }\n  static const DateFormatter& formatter() {\n    CONSTRUCT_ON_FIRST_USE(DateFormatter, {\"%a, %d %b %Y %H:%M:%S GMT\"});\n  }\n  static const SystemTime& currentTime() {\n    CONSTRUCT_ON_FIRST_USE(SystemTime, Event::SimulatedTimeSystem().systemTime());\n  }\n  static const std::vector<CalculateAgeTestCase>& getTestCases() {\n    // clang-format off\n    CONSTRUCT_ON_FIRST_USE(std::vector<CalculateAgeTestCase>,\n        {\n          \"no_initial_age_all_times_equal\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime())}},\n          /*response_time=*/currentTime(),\n          /*now=*/currentTime(),\n          /*expected_age=*/Seconds(0)\n        },\n        {\n          \"initial_age_zero_all_times_equal\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime())}, {\"age\", \"0\"}},\n          /*response_time=*/currentTime(),\n          /*now=*/currentTime(),\n          /*expected_age=*/Seconds(0)\n        },\n        {\n          \"initial_age_non_zero_all_times_equal\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime())}, {\"age\", \"50\"}},\n          /*response_time=*/currentTime(),\n          /*now=*/currentTime(),\n          /*expected_age=*/Seconds(50)\n        },\n        {\n          \"date_after_response_time_no_initial_age\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime() + Seconds(5))}},\n          /*response_time=*/currentTime(),\n          /*now=*/currentTime() + Seconds(10),\n          /*expected_age=*/Seconds(10)\n        },\n        {\n          \"date_after_response_time_with_initial_age\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime() + Seconds(10))}, {\"age\", \"5\"}},\n          /*response_time=*/currentTime(),\n          /*now=*/currentTime() + Seconds(10),\n          /*expected_age=*/Seconds(15)\n        },\n        {\n          \"apparent_age_equals_initial_age\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime())}, {\"age\", \"1\"}},\n          /*response_time=*/currentTime() + Seconds(1),\n          /*now=*/currentTime() + Seconds(5),\n          /*expected_age=*/Seconds(5)\n        },\n        {\n          \"apparent_age_lower_than_initial_age\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime())}, {\"age\", \"3\"}},\n          /*response_time=*/currentTime() + Seconds(1),\n          /*now=*/currentTime() + Seconds(5),\n          /*expected_age=*/Seconds(7)\n        },\n        {\n          \"apparent_age_higher_than_initial_age\",\n          /*response_headers=*/{{\"date\", formatTime(currentTime())}, {\"age\", \"1\"}},\n          /*response_time=*/currentTime() + Seconds(3),\n          /*now=*/currentTime() + Seconds(5),\n          /*expected_age=*/Seconds(5)\n        },\n    );\n    // clang-format on\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(CalculateAgeTest, CalculateAgeTest,\n                         testing::ValuesIn(CalculateAgeTest::getTestCases()),\n                         [](const auto& info) { return info.param.test_name; });\n\nTEST_P(CalculateAgeTest, CalculateAgeTest) {\n  const Seconds calculated_age = CacheHeadersUtils::calculateAge(\n      GetParam().response_headers, GetParam().response_time, GetParam().now);\n  const Seconds expected_age = GetParam().expected_age;\n  EXPECT_EQ(calculated_age, expected_age)\n      << \"Expected age: \" << durationToString(expected_age)\n      << \", Calculated age: \" << durationToString(calculated_age);\n}\n\nvoid testReadAndRemoveLeadingDigits(absl::string_view input, int64_t expected,\n                                    absl::string_view remaining) {\n  absl::string_view test_input(input);\n  auto output = CacheHeadersUtils::readAndRemoveLeadingDigits(test_input);\n  if (output) {\n    EXPECT_EQ(output, static_cast<uint64_t>(expected)) << \"input=\" << input;\n    EXPECT_EQ(test_input, remaining) << \"input=\" << input;\n  } else {\n    EXPECT_LT(expected, 0) << \"input=\" << input;\n    EXPECT_EQ(test_input, remaining) << \"input=\" << input;\n  }\n}\n\nTEST(ReadAndRemoveLeadingDigits, ComprehensiveTest) {\n  testReadAndRemoveLeadingDigits(\"123\", 123, \"\");\n  testReadAndRemoveLeadingDigits(\"a123\", -1, \"a123\");\n  testReadAndRemoveLeadingDigits(\"9_\", 9, \"_\");\n  testReadAndRemoveLeadingDigits(\"11111111111xyz\", 11111111111ll, \"xyz\");\n\n  // Overflow case\n  testReadAndRemoveLeadingDigits(\"1111111111111111111111111111111xyz\", -1,\n                                 \"1111111111111111111111111111111xyz\");\n\n  // 2^64\n  testReadAndRemoveLeadingDigits(\"18446744073709551616xyz\", -1, \"18446744073709551616xyz\");\n  // 2^64-1\n  testReadAndRemoveLeadingDigits(\"18446744073709551615xyz\", 18446744073709551615ull, \"xyz\");\n  // (2^64-1)*10+9\n  testReadAndRemoveLeadingDigits(\"184467440737095516159yz\", -1, \"184467440737095516159yz\");\n}\n\nTEST(GetAllMatchingHeaderNames, EmptyRuleset) {\n  Http::TestRequestHeaderMapImpl headers{{\"accept\", \"image/*\"}};\n  std::vector<Matchers::StringMatcherPtr> ruleset;\n  absl::flat_hash_set<absl::string_view> result;\n\n  CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result);\n\n  ASSERT_TRUE(result.empty());\n}\n\nTEST(GetAllMatchingHeaderNames, EmptyHeaderMap) {\n  Http::TestRequestHeaderMapImpl headers;\n  std::vector<Matchers::StringMatcherPtr> ruleset;\n  absl::flat_hash_set<absl::string_view> result;\n\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"accept\");\n  ruleset.emplace_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n\n  CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result);\n\n  ASSERT_TRUE(result.empty());\n}\n\nTEST(GetAllMatchingHeaderNames, SingleMatchSingleValue) {\n  Http::TestRequestHeaderMapImpl headers{{\"accept\", \"image/*\"}, {\"accept-language\", \"en-US\"}};\n  std::vector<Matchers::StringMatcherPtr> ruleset;\n  absl::flat_hash_set<absl::string_view> result;\n\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"accept\");\n  ruleset.emplace_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n\n  CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result);\n\n  ASSERT_EQ(result.size(), 1);\n  EXPECT_TRUE(result.contains(\"accept\"));\n}\n\nTEST(GetAllMatchingHeaderNames, SingleMatchMultiValue) {\n  Http::TestRequestHeaderMapImpl headers{{\"accept\", \"image/*\"}, {\"accept\", \"text/html\"}};\n  std::vector<Matchers::StringMatcherPtr> ruleset;\n  absl::flat_hash_set<absl::string_view> result;\n\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"accept\");\n  ruleset.emplace_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n\n  CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result);\n\n  ASSERT_EQ(result.size(), 1);\n  EXPECT_TRUE(result.contains(\"accept\"));\n}\n\nTEST(GetAllMatchingHeaderNames, MultipleMatches) {\n  Http::TestRequestHeaderMapImpl headers{{\"accept\", \"image/*\"}, {\"accept-language\", \"en-US\"}};\n  std::vector<Matchers::StringMatcherPtr> ruleset;\n  absl::flat_hash_set<absl::string_view> result;\n\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"accept\");\n  ruleset.emplace_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n  matcher.set_exact(\"accept-language\");\n  ruleset.emplace_back(std::make_unique<Matchers::StringMatcherImpl>(matcher));\n\n  CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result);\n\n  ASSERT_EQ(result.size(), 2);\n  EXPECT_TRUE(result.contains(\"accept\"));\n  EXPECT_TRUE(result.contains(\"accept-language\"));\n}\n\nTEST(ParseCommaDelimitedList, Null) {\n  Http::TestResponseHeaderMapImpl headers;\n  std::vector<std::string> result =\n      CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary));\n\n  EXPECT_EQ(result.size(), 0);\n}\n\nTEST(ParseCommaDelimitedList, Empty) {\n  Http::TestResponseHeaderMapImpl headers{{\"vary\", \"\"}};\n  std::vector<std::string> result =\n      CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary));\n\n  EXPECT_EQ(result.size(), 1);\n  EXPECT_EQ(result[0], \"\");\n}\n\nTEST(ParseCommaDelimitedList, SingleValue) {\n  Http::TestResponseHeaderMapImpl headers{{\"vary\", \"accept\"}};\n  std::vector<std::string> result =\n      CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary));\n\n  EXPECT_EQ(result.size(), 1);\n  EXPECT_EQ(result[0], \"accept\");\n}\n\nclass ParseCommaDelimitedListMultipleTest : public testing::Test,\n                                            public testing::WithParamInterface<std::string> {\nprotected:\n  Http::TestResponseHeaderMapImpl headers{{\"vary\", GetParam()}};\n};\n\nINSTANTIATE_TEST_SUITE_P(MultipleValuesMixedSpaces, ParseCommaDelimitedListMultipleTest,\n                         testing::Values(\"accept,accept-language\", \" accept,accept-language\",\n                                         \"accept ,accept-language\", \"accept, accept-language\",\n                                         \"accept,accept-language \", \" accept, accept-language \",\n                                         \"  accept  ,  accept-language  \"));\n\nTEST_P(ParseCommaDelimitedListMultipleTest, MultipleValuesMixedSpaces) {\n  std::vector<std::string> result =\n      CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary));\n  EXPECT_EQ(result.size(), 2);\n  EXPECT_EQ(result[0], \"accept\");\n  EXPECT_EQ(result[1], \"accept-language\");\n}\n\nTEST(HasVary, Null) {\n  Http::TestResponseHeaderMapImpl headers;\n  ASSERT_FALSE(VaryHeader::hasVary(headers));\n}\n\nTEST(HasVary, Empty) {\n  Http::TestResponseHeaderMapImpl headers{{\"vary\", \"\"}};\n  ASSERT_FALSE(VaryHeader::hasVary(headers));\n}\n\nTEST(HasVary, NotEmpty) {\n  Http::TestResponseHeaderMapImpl headers{{\"vary\", \"accept\"}};\n  ASSERT_TRUE(VaryHeader::hasVary(headers));\n}\n\nTEST(CreateVaryKey, EmptyVaryEntry) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"\"}};\n  Http::TestRequestHeaderMapImpl request_headers{{\"accept\", \"image/*\"}};\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\n\\r\\n\");\n}\n\nTEST(CreateVaryKey, SingleHeaderExists) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"accept\"}};\n  Http::TestRequestHeaderMapImpl request_headers{{\"accept\", \"image/*\"}};\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\naccept\\r\"\n      \"image/*\\n\");\n}\n\nTEST(CreateVaryKey, SingleHeaderMissing) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"accept\"}};\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\naccept\\r\\n\");\n}\n\nTEST(CreateVaryKey, MultipleHeadersAllExist) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"accept, accept-language, width\"}};\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"accept\", \"image/*\"}, {\"accept-language\", \"en-us\"}, {\"width\", \"640\"}};\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\naccept\\r\"\n      \"image/*\\naccept-language\\r\"\n      \"en-us\\nwidth\\r640\\n\");\n}\n\nTEST(CreateVaryKey, MultipleHeadersSomeExist) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"accept, accept-language, width\"}};\n  Http::TestRequestHeaderMapImpl request_headers{{\"accept\", \"image/*\"}, {\"width\", \"640\"}};\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\naccept\\r\"\n      \"image/*\\naccept-language\\r\\nwidth\\r640\\n\");\n}\n\nTEST(CreateVaryKey, ExtraRequestHeaders) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"accept, width\"}};\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"accept\", \"image/*\"}, {\"heigth\", \"1280\"}, {\"width\", \"640\"}};\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\naccept\\r\"\n      \"image/*\\nwidth\\r640\\n\");\n}\n\nTEST(CreateVaryKey, MultipleHeadersNoneExist) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"accept, accept-language, width\"}};\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\naccept\\r\\naccept-language\\r\\nwidth\\r\\n\");\n}\n\nTEST(CreateVaryKey, DifferentHeadersSameValue) {\n  // Two requests with the same value for different headers must have different vary-keys.\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"accept, accept-language\"}};\n\n  Http::TestRequestHeaderMapImpl request_headers1{{\"accept\", \"foo\"}};\n  std::string vary_key1 =\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers1);\n\n  Http::TestRequestHeaderMapImpl request_headers2{{\"accept-language\", \"foo\"}};\n  std::string vary_key2 =\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers2);\n\n  ASSERT_NE(vary_key1, vary_key2);\n}\n\nTEST(CreateVaryKey, MultiValueSameHeader) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"vary\", \"width\"}};\n  Http::TestRequestHeaderMapImpl request_headers{{\"width\", \"foo\"}, {\"width\", \"bar\"}};\n\n  ASSERT_EQ(\n      VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers),\n      \"vary-key\\nwidth\\r\"\n      \"foo\\r\"\n      \"bar\\n\");\n}\n\nenvoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() {\n  // Allows {accept, accept-language, width} to be varied in the tests.\n  envoy::extensions::filters::http::cache::v3alpha::CacheConfig config;\n\n  const auto& add_accept = config.mutable_allowed_vary_headers()->Add();\n  add_accept->set_exact(\"accept\");\n\n  const auto& add_accept_language = config.mutable_allowed_vary_headers()->Add();\n  add_accept_language->set_exact(\"accept-language\");\n\n  const auto& add_width = config.mutable_allowed_vary_headers()->Add();\n  add_width->set_exact(\"width\");\n\n  return config;\n}\n\nclass VaryHeaderTest : public testing::Test {\nprotected:\n  VaryHeaderTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {}\n\n  VaryHeader vary_allow_list_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n};\n\nTEST_F(VaryHeaderTest, IsAllowedNull) {\n  ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_));\n}\n\nTEST_F(VaryHeaderTest, IsAllowedEmpty) {\n  response_headers_.addCopy(\"vary\", \"\");\n  ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_));\n}\n\nTEST_F(VaryHeaderTest, IsAllowedSingle) {\n  response_headers_.addCopy(\"vary\", \"accept\");\n  ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_));\n}\n\nTEST_F(VaryHeaderTest, IsAllowedMultiple) {\n  response_headers_.addCopy(\"vary\", \"accept\");\n  ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_));\n}\n\nTEST_F(VaryHeaderTest, NotIsAllowedStar) {\n  // Should never be allowed, regardless of the allow_list.\n  response_headers_.addCopy(\"vary\", \"*\");\n  ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_));\n}\n\nTEST_F(VaryHeaderTest, NotIsAllowedSingle) {\n  response_headers_.addCopy(\"vary\", \"wrong-header\");\n  ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_));\n}\n\nTEST_F(VaryHeaderTest, NotIsAllowedMixed) {\n  response_headers_.addCopy(\"vary\", \"accept, wrong-header\");\n  ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_));\n}\n\nTEST_F(VaryHeaderTest, PossibleVariedHeadersEmpty) {\n  Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_);\n\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"accept\")));\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"accept-language\")));\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"width\")));\n}\n\nTEST_F(VaryHeaderTest, PossibleVariedHeadersNoOverlap) {\n  request_headers_.addCopy(\"abc\", \"123\");\n  Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_);\n\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"accept\")));\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"accept-language\")));\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"width\")));\n}\n\nTEST_F(VaryHeaderTest, PossibleVariedHeadersOverlap) {\n  request_headers_.addCopy(\"abc\", \"123\");\n  request_headers_.addCopy(\"accept\", \"image/*\");\n  Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_);\n\n  std::vector<absl::string_view> values;\n  Http::HeaderUtility::getAllOfHeader(*result, \"accept\", values);\n  ASSERT_EQ(values.size(), 1);\n  EXPECT_EQ(values[0], \"image/*\");\n\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"accept-language\")));\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"width\")));\n}\n\nTEST_F(VaryHeaderTest, PossibleVariedHeadersMultiValues) {\n  request_headers_.addCopy(\"accept\", \"image/*\");\n  request_headers_.addCopy(\"accept\", \"text/html\");\n  Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_);\n\n  std::vector<absl::string_view> values;\n  Http::HeaderUtility::getAllOfHeader(*result, \"accept\", values);\n  ASSERT_EQ(values.size(), 2);\n  EXPECT_EQ(values[0], \"image/*\");\n  EXPECT_EQ(values[1], \"text/html\");\n\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"accept-language\")));\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"width\")));\n}\n\nTEST_F(VaryHeaderTest, PossibleVariedHeadersMultiHeaders) {\n  request_headers_.addCopy(\"accept\", \"image/*\");\n  request_headers_.addCopy(\"accept-language\", \"en-US\");\n  Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_);\n\n  std::vector<absl::string_view> values;\n  Http::HeaderUtility::getAllOfHeader(*result, \"accept\", values);\n  ASSERT_EQ(values.size(), 1);\n  EXPECT_EQ(values[0], \"image/*\");\n\n  Http::HeaderUtility::getAllOfHeader(*result, \"accept-language\", values);\n  ASSERT_EQ(values.size(), 2);\n  EXPECT_EQ(values[1], \"en-US\");\n\n  EXPECT_FALSE(result->get(Http::LowerCaseString(\"width\")));\n}\n\n} // namespace\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cache/cacheability_utils_test.cc",
    "content": "#include \"envoy/http/header_map.h\"\n\n#include \"extensions/filters/http/cache/cacheability_utils.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\nclass IsCacheableRequestTest : public testing::Test {\nprotected:\n  Http::TestRequestHeaderMapImpl request_headers_ = {{\":path\", \"/\"},\n                                                     {\":method\", \"GET\"},\n                                                     {\"x-forwarded-proto\", \"http\"},\n                                                     {\":authority\", \"test.com\"}};\n};\n\nclass RequestConditionalHeadersTest : public testing::TestWithParam<std::string> {\nprotected:\n  Http::TestRequestHeaderMapImpl request_headers_ = {{\":path\", \"/\"},\n                                                     {\":method\", \"GET\"},\n                                                     {\"x-forwarded-proto\", \"http\"},\n                                                     {\":authority\", \"test.com\"}};\n  std::string conditionalHeader() const { return GetParam(); }\n};\n\nenvoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() {\n  // Allows 'accept' to be varied in the tests.\n  envoy::extensions::filters::http::cache::v3alpha::CacheConfig config;\n  const auto& add_accept = config.mutable_allowed_vary_headers()->Add();\n  add_accept->set_exact(\"accept\");\n  return config;\n}\n\nclass IsCacheableResponseTest : public testing::Test {\npublic:\n  IsCacheableResponseTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {}\n\nprotected:\n  std::string cache_control_ = \"max-age=3600\";\n  Http::TestResponseHeaderMapImpl response_headers_ = {{\":status\", \"200\"},\n                                                       {\"date\", \"Sun, 06 Nov 1994 08:49:37 GMT\"},\n                                                       {\"cache-control\", cache_control_}};\n  VaryHeader vary_allow_list_;\n};\n\nTEST_F(IsCacheableRequestTest, CacheableRequest) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers_));\n}\n\nTEST_F(IsCacheableRequestTest, PathHeader) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.removePath();\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n}\n\nTEST_F(IsCacheableRequestTest, HostHeader) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.removeHost();\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n}\n\nTEST_F(IsCacheableRequestTest, MethodHeader) {\n  const Http::HeaderValues& header_values = Http::Headers::get();\n  EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.setMethod(header_values.MethodValues.Post);\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.setMethod(header_values.MethodValues.Put);\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.removeMethod();\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n}\n\nTEST_F(IsCacheableRequestTest, ForwardedProtoHeader) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.setForwardedProto(\"ftp\");\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.removeForwardedProto();\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n}\n\nTEST_F(IsCacheableRequestTest, AuthorizationHeader) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().Authorization,\n                                   \"basic YWxhZGRpbjpvcGVuc2VzYW1l\");\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n}\n\nINSTANTIATE_TEST_SUITE_P(ConditionalHeaders, RequestConditionalHeadersTest,\n                         testing::Values(\"if-match\", \"if-none-match\", \"if-modified-since\",\n                                         \"if-unmodified-since\", \"if-range\"),\n                         [](const auto& info) {\n                           std::string test_name = info.param;\n                           absl::c_replace_if(\n                               test_name, [](char c) { return !std::isalnum(c); }, '_');\n                           return test_name;\n                         });\n\nTEST_P(RequestConditionalHeadersTest, ConditionalHeaders) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers_));\n  request_headers_.setCopy(Http::LowerCaseString{conditionalHeader()}, \"test-value\");\n  EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers_));\n}\n\nTEST_F(IsCacheableResponseTest, CacheableResponse) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\nTEST_F(IsCacheableResponseTest, UncacheableStatusCode) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  response_headers_.setStatus(\"700\");\n  EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  response_headers_.removeStatus();\n  EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\nTEST_F(IsCacheableResponseTest, ValidationData) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  // No cache control headers or expires header\n  response_headers_.remove(Http::CustomHeaders::get().CacheControl);\n  EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  // No max-age data or expires header\n  response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl,\n                                    \"public, no-transform\");\n  EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  // Max-age data available\n  response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"s-maxage=1000\");\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  // No max-age data, but the response requires revalidation anyway\n  response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"no-cache\");\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  // No cache control headers, but there is an expires header\n  response_headers_.remove(Http::CustomHeaders::get().CacheControl);\n  response_headers_.setReferenceKey(Http::Headers::get().Expires, \"Sun, 06 Nov 1994 09:49:37 GMT\");\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\nTEST_F(IsCacheableResponseTest, ResponseNoStore) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  std::string cache_control_no_store = absl::StrCat(cache_control_, \", no-store\");\n  response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl,\n                                    cache_control_no_store);\n  EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\nTEST_F(IsCacheableResponseTest, ResponsePrivate) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  std::string cache_control_private = absl::StrCat(cache_control_, \", private\");\n  response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, cache_control_private);\n  EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\nTEST_F(IsCacheableResponseTest, EmptyVary) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  response_headers_.setCopy(Http::Headers::get().Vary, \"\");\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\nTEST_F(IsCacheableResponseTest, AllowedVary) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  response_headers_.setCopy(Http::Headers::get().Vary, \"accept\");\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\nTEST_F(IsCacheableResponseTest, NotAllowedVary) {\n  EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n  response_headers_.setCopy(Http::Headers::get().Vary, \"*\");\n  EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_));\n}\n\n} // namespace\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cache/common.h",
    "content": "#pragma once\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n#include \"extensions/filters/http/cache/http_cache.h\"\n#include \"extensions/filters/http/cache/simple_http_cache/simple_http_cache.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\n\nstd::ostream& operator<<(std::ostream& os, const RequestCacheControl& request_cache_control) {\n  std::string s = \"{\";\n  s += request_cache_control.must_validate_ ? \"must_validate, \" : \"\";\n  s += request_cache_control.no_store_ ? \"no_store, \" : \"\";\n  s += request_cache_control.no_transform_ ? \"no_transform, \" : \"\";\n  s += request_cache_control.only_if_cached_ ? \"only_if_cached, \" : \"\";\n\n  s += request_cache_control.max_age_.has_value()\n           ? \"max-age=\" + std::to_string(request_cache_control.max_age_.value().count()) + \", \"\n           : \"\";\n  s += request_cache_control.min_fresh_.has_value()\n           ? \"min-fresh=\" + std::to_string(request_cache_control.min_fresh_.value().count()) + \", \"\n           : \"\";\n  s += request_cache_control.max_stale_.has_value()\n           ? \"max-stale=\" + std::to_string(request_cache_control.max_stale_.value().count()) + \", \"\n           : \"\";\n\n  // Remove any extra \", \" at the end\n  if (s.size() > 1) {\n    s.pop_back();\n    s.pop_back();\n  }\n\n  s += \"}\";\n  return os << s;\n}\n\nstd::ostream& operator<<(std::ostream& os, const ResponseCacheControl& response_cache_control) {\n  std::string s = \"{\";\n  s += response_cache_control.must_validate_ ? \"must_validate, \" : \"\";\n  s += response_cache_control.no_store_ ? \"no_store, \" : \"\";\n  s += response_cache_control.no_transform_ ? \"no_transform, \" : \"\";\n  s += response_cache_control.no_stale_ ? \"no_stale, \" : \"\";\n  s += response_cache_control.is_public_ ? \"public, \" : \"\";\n\n  s += response_cache_control.max_age_.has_value()\n           ? \"max-age=\" + std::to_string(response_cache_control.max_age_.value().count()) + \", \"\n           : \"\";\n\n  // Remove any extra \", \" at the end\n  if (s.size() > 1) {\n    s.pop_back();\n    s.pop_back();\n  }\n\n  s += \"}\";\n  return os << s;\n}\n\nstd::ostream& operator<<(std::ostream& os, CacheEntryStatus status) {\n  switch (status) {\n  case CacheEntryStatus::Ok:\n    return os << \"Ok\";\n  case CacheEntryStatus::Unusable:\n    return os << \"Unusable\";\n  case CacheEntryStatus::RequiresValidation:\n    return os << \"RequiresValidation\";\n  case CacheEntryStatus::FoundNotModified:\n    return os << \"FoundNotModified\";\n  case CacheEntryStatus::SatisfiableRange:\n    return os << \"SatisfiableRange\";\n  case CacheEntryStatus::NotSatisfiableRange:\n    return os << \"NotSatisfiableRange\";\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nstd::ostream& operator<<(std::ostream& os, const AdjustedByteRange& range) {\n  return os << \"[\" << range.begin() << \",\" << range.end() << \")\";\n}\n\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/http/cache/config_test.cc",
    "content": "#include \"source/extensions/filters/http/cache/simple_http_cache/config.pb.h\"\n\n#include \"extensions/filters/http/cache/cache_filter.h\"\n#include \"extensions/filters/http/cache/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\nclass CacheFilterFactoryTest : public ::testing::Test {\nprotected:\n  envoy::extensions::filters::http::cache::v3alpha::CacheConfig config_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  CacheFilterFactory factory_;\n  Http::MockFilterChainFactoryCallbacks filter_callback_;\n};\n\nTEST_F(CacheFilterFactoryTest, Basic) {\n  config_.mutable_typed_config()->PackFrom(\n      envoy::source::extensions::filters::http::cache::SimpleHttpCacheConfig());\n  Http::FilterFactoryCb cb = factory_.createFilterFactoryFromProto(config_, \"stats\", context_);\n  Http::StreamFilterSharedPtr filter;\n  EXPECT_CALL(filter_callback_, addStreamFilter(_)).WillOnce(::testing::SaveArg<0>(&filter));\n  cb(filter_callback_);\n  ASSERT(filter);\n  ASSERT(dynamic_cast<CacheFilter*>(filter.get()));\n}\n\nTEST_F(CacheFilterFactoryTest, NoTypedConfig) {\n  EXPECT_THROW(factory_.createFilterFactoryFromProto(config_, \"stats\", context_), EnvoyException);\n}\n\nTEST_F(CacheFilterFactoryTest, UnregisteredTypedConfig) {\n  config_.mutable_typed_config()->PackFrom(\n      envoy::extensions::filters::http::cache::v3alpha::CacheConfig());\n  EXPECT_THROW(factory_.createFilterFactoryFromProto(config_, \"stats\", context_), EnvoyException);\n}\n\n} // namespace\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cache/http_cache_test.cc",
    "content": "#include <chrono>\n#include <string>\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n#include \"extensions/filters/http/cache/http_cache.h\"\n#include \"extensions/filters/http/cache/inline_headers_handles.h\"\n\n#include \"test/extensions/filters/http/cache/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::ContainerEq;\nusing testing::TestWithParam;\nusing testing::ValuesIn;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\nstruct LookupRequestTestCase {\n  std::string test_name, request_cache_control, response_cache_control;\n  SystemTime request_time, response_date;\n  CacheEntryStatus expected_cache_entry_status;\n  std::string expected_age;\n};\n\nusing Seconds = std::chrono::seconds;\n\nenvoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() {\n  // Allows 'accept' to be varied in the tests.\n  envoy::extensions::filters::http::cache::v3alpha::CacheConfig config;\n  const auto& add_accept = config.mutable_allowed_vary_headers()->Add();\n  add_accept->set_exact(\"accept\");\n  return config;\n}\n\nclass LookupRequestTest : public testing::TestWithParam<LookupRequestTestCase> {\npublic:\n  LookupRequestTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {}\n\n  DateFormatter formatter_{\"%a, %d %b %Y %H:%M:%S GMT\"};\n  Http::TestRequestHeaderMapImpl request_headers_{{\":path\", \"/\"},\n                                                  {\":method\", \"GET\"},\n                                                  {\"x-forwarded-proto\", \"https\"},\n                                                  {\":authority\", \"example.com\"}};\n\n  VaryHeader vary_allow_list_;\n\n  static const SystemTime& currentTime() {\n    CONSTRUCT_ON_FIRST_USE(SystemTime, Event::SimulatedTimeSystem().systemTime());\n  }\n\n  static const std::vector<LookupRequestTestCase>& getTestCases() {\n    CONSTRUCT_ON_FIRST_USE(std::vector<LookupRequestTestCase>,\n                           {\"request_requires_revalidation\",\n                            /*request_cache_control=*/\"no-cache\",\n                            /*response_cache_control=*/\"public, max-age=3600\",\n                            /*request_time=*/currentTime(),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"0\"},\n                           {\"response_requires_revalidation\",\n                            /*request_cache_control=*/\"\",\n                            /*response_cache_control=*/\"no-cache\",\n                            /*request_time=*/currentTime(),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"0\"},\n                           {\"request_max_age_satisfied\",\n                            /*request_cache_control=*/\"max-age=10\",\n                            /*response_cache_control=*/\"public, max-age=3600\",\n                            /*request_time=*/currentTime() + Seconds(9),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::Ok,\n                            /*expected_age=*/\"9\"},\n                           {\"request_max_age_unsatisfied\",\n                            /*request_cache_control=*/\"max-age=10\",\n                            /*response_cache_control=*/\"public, max-age=3600\",\n                            /*request_time=*/currentTime() + Seconds(11),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"11\"},\n                           {\"request_min_fresh_satisfied\",\n                            /*request_cache_control=*/\"min-fresh=1000\",\n                            /*response_cache_control=*/\"public, max-age=2000\",\n                            /*request_time=*/currentTime() + Seconds(999),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::Ok,\n                            /*expected_age=*/\"999\"},\n                           {\"request_min_fresh_unsatisfied\",\n                            /*request_cache_control=*/\"min-fresh=1000\",\n                            /*response_cache_control=*/\"public, max-age=2000\",\n                            /*request_time=*/currentTime() + Seconds(1001),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1001\"},\n                           {\"request_max_age_satisfied_but_min_fresh_unsatisfied\",\n                            /*request_cache_control=*/\"max-age=1500, min-fresh=1000\",\n                            /*response_cache_control=*/\"public, max-age=2000\",\n                            /*request_time=*/currentTime() + Seconds(1001),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1001\"},\n                           {\"request_max_age_satisfied_but_max_stale_unsatisfied\",\n                            /*request_cache_control=*/\"max-age=1500, max-stale=400\",\n                            /*response_cache_control=*/\"public, max-age=1000\",\n                            /*request_time=*/currentTime() + Seconds(1401),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1401\"},\n                           {\"request_max_stale_satisfied_but_min_fresh_unsatisfied\",\n                            /*request_cache_control=*/\"min-fresh=1000, max-stale=500\",\n                            /*response_cache_control=*/\"public, max-age=2000\",\n                            /*request_time=*/currentTime() + Seconds(1001),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1001\"},\n                           {\"request_max_stale_satisfied_but_max_age_unsatisfied\",\n                            /*request_cache_control=*/\"max-age=1200, max-stale=500\",\n                            /*response_cache_control=*/\"public, max-age=1000\",\n                            /*request_time=*/currentTime() + Seconds(1201),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1201\"},\n                           {\"request_min_fresh_satisfied_but_max_age_unsatisfied\",\n                            /*request_cache_control=*/\"max-age=500, min-fresh=400\",\n                            /*response_cache_control=*/\"public, max-age=1000\",\n                            /*request_time=*/currentTime() + Seconds(501),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"501\"},\n                           {\"expired\",\n                            /*request_cache_control=*/\"\",\n                            /*response_cache_control=*/\"public, max-age=1000\",\n                            /*request_time=*/currentTime() + Seconds(1001),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1001\"},\n                           {\"expired_but_max_stale_satisfied\",\n                            /*request_cache_control=*/\"max-stale=500\",\n                            /*response_cache_control=*/\"public, max-age=1000\",\n                            /*request_time=*/currentTime() + Seconds(1499),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::Ok,\n                            /*expected_age=*/\"1499\"},\n                           {\"expired_max_stale_unsatisfied\",\n                            /*request_cache_control=*/\"max-stale=500\",\n                            /*response_cache_control=*/\"public, max-age=1000\",\n                            /*request_time=*/currentTime() + Seconds(1501),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1501\"},\n                           {\"expired_max_stale_satisfied_but_response_must_revalidate\",\n                            /*request_cache_control=*/\"max-stale=500\",\n                            /*response_cache_control=*/\"public, max-age=1000, must-revalidate\",\n                            /*request_time=*/currentTime() + Seconds(1499),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::RequiresValidation,\n                            /*expected_age=*/\"1499\"},\n                           {\"fresh_and_response_must_revalidate\",\n                            /*request_cache_control=*/\"\",\n                            /*response_cache_control=*/\"public, max-age=1000, must-revalidate\",\n                            /*request_time=*/currentTime() + Seconds(999),\n                            /*response_date=*/currentTime(),\n                            /*expected_result=*/CacheEntryStatus::Ok,\n                            /*expected_age=*/\"999\"},\n\n    );\n  }\n};\n\nLookupResult makeLookupResult(const LookupRequest& lookup_request,\n                              const Http::TestResponseHeaderMapImpl& response_headers,\n                              uint64_t content_length = 0) {\n  // For the purpose of the test, set the response_time to the date header value.\n  ResponseMetadata metadata = {CacheHeadersUtils::httpTime(response_headers.Date())};\n  return lookup_request.makeLookupResult(\n      std::make_unique<Http::TestResponseHeaderMapImpl>(response_headers), std::move(metadata),\n      content_length);\n}\n\nINSTANTIATE_TEST_SUITE_P(ResultMatchesExpectation, LookupRequestTest,\n                         testing::ValuesIn(LookupRequestTest::getTestCases()),\n                         [](const auto& info) { return info.param.test_name; });\n\nTEST_P(LookupRequestTest, ResultWithoutBodyMatchesExpectation) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl,\n                                   GetParam().request_cache_control);\n  const SystemTime request_time = GetParam().request_time, response_date = GetParam().response_date;\n  const LookupRequest lookup_request(request_headers_, request_time, vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"cache-control\", GetParam().response_cache_control},\n       {\"date\", formatter_.fromTime(response_date)}});\n  const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers);\n\n  EXPECT_EQ(GetParam().expected_cache_entry_status, lookup_response.cache_entry_status_);\n  ASSERT_TRUE(lookup_response.headers_);\n  EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers));\n  EXPECT_THAT(*lookup_response.headers_,\n              HeaderHasValueRef(Http::Headers::get().Age, GetParam().expected_age));\n  EXPECT_EQ(lookup_response.content_length_, 0);\n  EXPECT_TRUE(lookup_response.response_ranges_.empty());\n  EXPECT_FALSE(lookup_response.has_trailers_);\n}\n\nTEST_P(LookupRequestTest, ResultWithBodyMatchesExpectation) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl,\n                                   GetParam().request_cache_control);\n  const SystemTime request_time = GetParam().request_time, response_date = GetParam().response_date;\n  const LookupRequest lookup_request(request_headers_, request_time, vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"cache-control\", GetParam().response_cache_control},\n       {\"date\", formatter_.fromTime(response_date)}});\n  const uint64_t content_length = 5;\n  const LookupResult lookup_response =\n      makeLookupResult(lookup_request, response_headers, content_length);\n\n  EXPECT_EQ(GetParam().expected_cache_entry_status, lookup_response.cache_entry_status_);\n  ASSERT_TRUE(lookup_response.headers_);\n  EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers));\n  EXPECT_THAT(*lookup_response.headers_,\n              HeaderHasValueRef(Http::Headers::get().Age, GetParam().expected_age));\n  EXPECT_EQ(lookup_response.content_length_, content_length);\n  EXPECT_TRUE(lookup_response.response_ranges_.empty());\n  EXPECT_FALSE(lookup_response.has_trailers_);\n}\n\nTEST_F(LookupRequestTest, ExpiredViaFallbackheader) {\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"expires\", formatter_.fromTime(currentTime() - Seconds(5))},\n       {\"date\", formatter_.fromTime(currentTime())}});\n  const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers);\n\n  EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_);\n}\n\nTEST_F(LookupRequestTest, NotExpiredViaFallbackheader) {\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"expires\", formatter_.fromTime(currentTime() + Seconds(5))},\n       {\"date\", formatter_.fromTime(currentTime())}});\n  const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers);\n  EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_);\n}\n\n// If request Cache-Control header is missing,\n// \"Pragma:no-cache\" is equivalent to \"Cache-Control:no-cache\".\n// https://httpwg.org/specs/rfc7234.html#header.pragma\nTEST_F(LookupRequestTest, PragmaNoCacheFallback) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma, \"no-cache\");\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"date\", formatter_.fromTime(currentTime())}, {\"cache-control\", \"public, max-age=3600\"}});\n  const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers);\n  // Response is not expired but the request requires revalidation through Pragma: no-cache.\n  EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_);\n}\n\nTEST_F(LookupRequestTest, PragmaNoCacheFallbackExtraDirectivesIgnored) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma,\n                                   \"no-cache, custom-directive=custom-value\");\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"date\", formatter_.fromTime(currentTime())}, {\"cache-control\", \"public, max-age=3600\"}});\n  const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers);\n  // Response is not expired but the request requires revalidation through Pragma: no-cache.\n  EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_);\n}\n\nTEST_F(LookupRequestTest, PragmaFallbackOtherValuesIgnored) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma, \"max-age=0\");\n  const LookupRequest lookup_request(request_headers_, currentTime() + Seconds(5),\n                                     vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"date\", formatter_.fromTime(currentTime())}, {\"cache-control\", \"public, max-age=3600\"}});\n  const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers);\n  // Response is fresh, Pragma header with values other than \"no-cache\" is ignored.\n  EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_);\n}\n\nTEST_F(LookupRequestTest, PragmaNoFallback) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma, \"no-cache\");\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"max-age=10\");\n  const LookupRequest lookup_request(request_headers_, currentTime() + Seconds(5),\n                                     vary_allow_list_);\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"date\", formatter_.fromTime(currentTime())}, {\"cache-control\", \"public, max-age=3600\"}});\n  const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers);\n  // Pragma header is ignored when Cache-Control header is present.\n  EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_);\n}\n\nTEST_F(LookupRequestTest, SingleSatisfiableRange) {\n  // add range info to headers\n  request_headers_.addReference(Http::Headers::get().Range, \"bytes=1-99\");\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"date\", formatter_.fromTime(currentTime())},\n       {\"cache-control\", \"public, max-age=3600\"},\n       {\"content-length\", \"4\"}});\n  const uint64_t content_length = 4;\n  const LookupResult lookup_response =\n      makeLookupResult(lookup_request, response_headers, content_length);\n  ASSERT_EQ(CacheEntryStatus::SatisfiableRange, lookup_response.cache_entry_status_);\n\n  ASSERT_TRUE(lookup_response.headers_);\n  EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers));\n  EXPECT_EQ(lookup_response.content_length_, 4);\n\n  // checks that the ranges have been adjusted to the content's length\n  EXPECT_EQ(lookup_response.response_ranges_.size(), 1);\n\n  EXPECT_EQ(lookup_response.response_ranges_[0].begin(), 1);\n  EXPECT_EQ(lookup_response.response_ranges_[0].end(), 4);\n  EXPECT_EQ(lookup_response.response_ranges_[0].length(), 3);\n\n  EXPECT_FALSE(lookup_response.has_trailers_);\n}\n\nTEST_F(LookupRequestTest, MultipleSatisfiableRanges) {\n  // Because we do not support multi-part responses for now, we are limiting parsing of a single\n  // range. Thus, multiple ranges are ignored, and a usual \"::Ok\" should be expected. If multi-part\n  // responses are implemented (and the parsing limit is changed), this test should be adjusted.\n\n  // add range info to headers\n  request_headers_.addCopy(Http::Headers::get().Range.get(), \"bytes=1-99,3-,-3\");\n\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"date\", formatter_.fromTime(currentTime())},\n       {\"cache-control\", \"public, max-age=3600\"},\n       {\"content-length\", \"4\"}});\n  const uint64_t content_length = 4;\n  const LookupResult lookup_response =\n      makeLookupResult(lookup_request, response_headers, content_length);\n\n  ASSERT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_);\n\n  ASSERT_TRUE(lookup_response.headers_);\n  EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers));\n  EXPECT_EQ(lookup_response.content_length_, 4);\n\n  // Check that the ranges have been ignored since we don't support multi-part responses.\n  EXPECT_EQ(lookup_response.response_ranges_.size(), 0);\n  EXPECT_FALSE(lookup_response.has_trailers_);\n}\n\nTEST_F(LookupRequestTest, NotSatisfiableRange) {\n  // add range info to headers\n  request_headers_.addReference(Http::Headers::get().Range, \"bytes=100-\");\n\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n\n  const Http::TestResponseHeaderMapImpl response_headers(\n      {{\"date\", formatter_.fromTime(currentTime())},\n       {\"cache-control\", \"public, max-age=3600\"},\n       {\"content-length\", \"4\"}});\n  const uint64_t content_length = 4;\n  const LookupResult lookup_response =\n      makeLookupResult(lookup_request, response_headers, content_length);\n  ASSERT_EQ(CacheEntryStatus::NotSatisfiableRange, lookup_response.cache_entry_status_);\n\n  ASSERT_TRUE(lookup_response.headers_);\n  EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers));\n  EXPECT_EQ(lookup_response.content_length_, 4);\n  ASSERT_TRUE(lookup_response.response_ranges_.empty());\n  EXPECT_FALSE(lookup_response.has_trailers_);\n}\n\nTEST(RawByteRangeTest, IsSuffix) {\n  auto r = RawByteRange(UINT64_MAX, 4);\n  ASSERT_TRUE(r.isSuffix());\n}\n\nTEST(RawByteRangeTest, IsNotSuffix) {\n  auto r = RawByteRange(3, 4);\n  ASSERT_FALSE(r.isSuffix());\n}\n\nTEST(RawByteRangeTest, FirstBytePos) {\n  auto r = RawByteRange(3, 4);\n  ASSERT_EQ(3, r.firstBytePos());\n}\n\nTEST(RawByteRangeTest, LastBytePos) {\n  auto r = RawByteRange(3, 4);\n  ASSERT_EQ(4, r.lastBytePos());\n}\n\nTEST(RawByteRangeTest, SuffixLength) {\n  auto r = RawByteRange(UINT64_MAX, 4);\n  ASSERT_EQ(4, r.suffixLength());\n}\n\nTEST(AdjustedByteRangeTest, Length) {\n  auto a = AdjustedByteRange(3, 6);\n  ASSERT_EQ(3, a.length());\n}\n\nTEST(AdjustedByteRangeTest, TrimFront) {\n  auto a = AdjustedByteRange(3, 6);\n  a.trimFront(2);\n  ASSERT_EQ(5, a.begin());\n}\n\nTEST(AdjustedByteRangeTest, MaxLength) {\n  auto a = AdjustedByteRange(0, UINT64_MAX);\n  ASSERT_EQ(UINT64_MAX, a.length());\n}\n\nTEST(AdjustedByteRangeTest, MaxTrim) {\n  auto a = AdjustedByteRange(0, UINT64_MAX);\n  a.trimFront(UINT64_MAX);\n  ASSERT_EQ(0, a.length());\n}\n\nstruct AdjustByteRangeParams {\n  std::vector<RawByteRange> request;\n  std::vector<AdjustedByteRange> result;\n  uint64_t content_length;\n};\n\nAdjustByteRangeParams satisfiable_ranges[] =\n    // request, result, content_length\n    {\n        // Various ways to request the full body. Full responses are signaled by empty result\n        // vectors.\n        {{{0, 3}}, {}, 4},                       // byte-range-spec, exact\n        {{{UINT64_MAX, 4}}, {}, 4},              // suffix-byte-range-spec, exact\n        {{{0, 99}}, {}, 4},                      // byte-range-spec, overlong\n        {{{0, UINT64_MAX}}, {}, 4},              // byte-range-spec, overlong\n        {{{UINT64_MAX, 5}}, {}, 4},              // suffix-byte-range-spec, overlong\n        {{{UINT64_MAX, UINT64_MAX - 1}}, {}, 4}, // suffix-byte-range-spec, overlong\n        {{{UINT64_MAX, UINT64_MAX}}, {}, 4},     // suffix-byte-range-spec, overlong\n\n        // Single bytes\n        {{{0, 0}}, {{0, 1}}, 4},\n        {{{1, 1}}, {{1, 2}}, 4},\n        {{{3, 3}}, {{3, 4}}, 4},\n        {{{UINT64_MAX, 1}}, {{3, 4}}, 4},\n\n        // Multiple bytes, starting in the middle\n        {{{1, 2}}, {{1, 3}}, 4},           // fully in the middle\n        {{{1, 3}}, {{1, 4}}, 4},           // to the end\n        {{{2, 21}}, {{2, 4}}, 4},          // overlong\n        {{{1, UINT64_MAX}}, {{1, 4}}, 4}}; // overlong\n// TODO(toddmgreer): Before enabling support for multi-range requests, test it.\n\nclass AdjustByteRangeTest : public TestWithParam<AdjustByteRangeParams> {};\n\nTEST_P(AdjustByteRangeTest, All) {\n  std::vector<AdjustedByteRange> result;\n  ASSERT_TRUE(adjustByteRangeSet(result, GetParam().request, GetParam().content_length));\n  EXPECT_THAT(result, ContainerEq(GetParam().result));\n}\n\nINSTANTIATE_TEST_SUITE_P(AdjustByteRangeTest, AdjustByteRangeTest, ValuesIn(satisfiable_ranges));\n\nclass AdjustByteRangeUnsatisfiableTest : public TestWithParam<std::vector<RawByteRange>> {};\n\nstd::vector<RawByteRange> unsatisfiable_ranges[] = {\n    {{4, 5}},\n    {{4, 9}},\n    {{7, UINT64_MAX}},\n    {{UINT64_MAX, 0}},\n};\n\nTEST_P(AdjustByteRangeUnsatisfiableTest, All) {\n  std::vector<AdjustedByteRange> result;\n  ASSERT_FALSE(adjustByteRangeSet(result, GetParam(), 3));\n}\n\nINSTANTIATE_TEST_SUITE_P(AdjustByteRangeUnsatisfiableTest, AdjustByteRangeUnsatisfiableTest,\n                         ValuesIn(unsatisfiable_ranges));\n\nTEST(AdjustByteRange, NoRangeRequest) {\n  std::vector<AdjustedByteRange> result;\n  ASSERT_TRUE(adjustByteRangeSet(result, {}, 8));\n  EXPECT_THAT(result, ContainerEq(std::vector<AdjustedByteRange>{}));\n}\n\nnamespace {\nHttp::TestRequestHeaderMapImpl makeTestHeaderMap(std::string range_value) {\n  return Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"}, {\"range\", range_value}};\n}\n} // namespace\n\nTEST(ParseRangesTest, NoRangeHeader) {\n  Http::TestRequestHeaderMapImpl headers = Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"}};\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 5);\n\n  ASSERT_EQ(0, result_vector.size());\n}\n\nTEST(ParseRangesTest, InvalidUnit) {\n  Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap(\"bits=3-4\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 5);\n\n  ASSERT_EQ(0, result_vector.size());\n}\n\nTEST(ParseRangesTest, SingleRange) {\n  Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap(\"bytes=3-4\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 5);\n\n  ASSERT_EQ(1, result_vector.size());\n\n  ASSERT_EQ(3, result_vector[0].firstBytePos());\n  ASSERT_EQ(4, result_vector[0].lastBytePos());\n}\n\nTEST(ParseRangesTest, MissingFirstBytePos) {\n  Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap(\"bytes=-5\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 5);\n\n  ASSERT_EQ(1, result_vector.size());\n\n  ASSERT_TRUE(result_vector[0].isSuffix());\n  ASSERT_EQ(5, result_vector[0].suffixLength());\n}\n\nTEST(ParseRangesTest, MissingLastBytePos) {\n  Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap(\"bytes=6-\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 5);\n\n  ASSERT_EQ(1, result_vector.size());\n\n  ASSERT_EQ(6, result_vector[0].firstBytePos());\n  ASSERT_EQ(std::numeric_limits<uint64_t>::max(), result_vector[0].lastBytePos());\n}\n\nTEST(ParseRangesTest, MultipleRanges) {\n  Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap(\"bytes=345-456,-567,6789-\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 5);\n\n  ASSERT_EQ(3, result_vector.size());\n\n  ASSERT_EQ(345, result_vector[0].firstBytePos());\n  ASSERT_EQ(456, result_vector[0].lastBytePos());\n\n  ASSERT_TRUE(result_vector[1].isSuffix());\n  ASSERT_EQ(567, result_vector[1].suffixLength());\n\n  ASSERT_EQ(6789, result_vector[2].firstBytePos());\n  ASSERT_EQ(UINT64_MAX, result_vector[2].lastBytePos());\n}\n\nTEST(ParseRangesTest, LongRangeHeaderValue) {\n  Http::TestRequestHeaderMapImpl headers =\n      makeTestHeaderMap(\"bytes=1000-1000,1001-1001,1002-1002,1003-1003,1004-1004,1005-\"\n                        \"1005,1006-1006,1007-1007,1008-1008,100-\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 10);\n\n  ASSERT_EQ(10, result_vector.size());\n}\n\nTEST(ParseRangesTest, ZeroRangeLimit) {\n  Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap(\"bytes=1000-1000\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 0);\n\n  ASSERT_EQ(0, result_vector.size());\n}\n\nTEST(ParseRangesTest, OverRangeLimit) {\n  Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap(\"bytes=1000-1000,1001-1001\");\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(headers, 1);\n\n  ASSERT_EQ(0, result_vector.size());\n}\n\nclass ParseInvalidRangeHeaderTest : public testing::Test,\n                                    public testing::WithParamInterface<std::string> {\nprotected:\n  Http::TestRequestHeaderMapImpl range() { return makeTestHeaderMap(GetParam()); }\n};\n\n// clang-format off\nINSTANTIATE_TEST_SUITE_P(\n    Default, ParseInvalidRangeHeaderTest,\n    testing::Values(\"-\",\n                    \"1-2\",\n                    \"12\",\n                    \"a\",\n                    \"a1\",\n                    \"bytes=\",\n                    \"bytes=-\",\n                    \"bytes1-2\",\n                    \"bytes=12\",\n                    \"bytes=1-2-3\",\n                    \"bytes=1-2-\",\n                    \"bytes=1--3\",\n                    \"bytes=--2\",\n                    \"bytes=2--\",\n                    \"bytes=-2-\",\n                    \"bytes=-1-2\",\n                    \"bytes=a-2\",\n                    \"bytes=2-a\",\n                    \"bytes=-a\",\n                    \"bytes=a-\",\n                    \"bytes=a1-2\",\n                    \"bytes=1-a2\",\n                    \"bytes=1a-2\",\n                    \"bytes=1-2a\",\n                    \"bytes=1-2,3-a\",\n                    \"bytes=1-a,3-4\",\n                    \"bytes=1-2,3a-4\",\n                    \"bytes=1-2,3-4a\",\n                    \"bytes=1-2,3-4-5\",\n                    \"bytes=1-2,bytes=3-4\",\n                    \"bytes=1-2,3-4,a\",\n                    // too many byte ranges (test sets the limit as 5)\n                    \"bytes=0-1,1-2,2-3,3-4,4-5,5-6\",\n                    // UINT64_MAX-UINT64_MAX+1\n                    \"bytes=18446744073709551615-18446744073709551616\",\n                    // UINT64_MAX+1-UINT64_MAX+2\n                    \"bytes=18446744073709551616-18446744073709551617\"));\n// clang-format on\n\nTEST_P(ParseInvalidRangeHeaderTest, InvalidRangeReturnsEmpty) {\n  std::vector<RawByteRange> result_vector = RangeRequests::parseRanges(range(), 5);\n  ASSERT_EQ(0, result_vector.size());\n}\n\nTEST_F(LookupRequestTest, VariedHeaders) {\n  request_headers_.addCopy(\"accept\", \"image/*\");\n  request_headers_.addCopy(\"other-header\", \"abc123\");\n  const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_);\n  const Http::RequestHeaderMap& result = lookup_request.getVaryHeaders();\n\n  ASSERT_TRUE(result.get(Http::LowerCaseString(\"accept\")));\n  ASSERT_EQ(result.get(Http::LowerCaseString(\"accept\"))->value().getStringView(), \"image/*\");\n  ASSERT_FALSE(result.get(Http::LowerCaseString(\"other-header\")));\n}\n\n} // namespace\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cache/simple_http_cache/BUILD",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"simple_http_cache_test\",\n    srcs = [\"simple_http_cache_test.cc\"],\n    extension_name = \"envoy.filters.http.cache.simple_http_cache\",\n    deps = [\n        \"//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib\",\n        \"//test/extensions/filters/http/cache:common\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc",
    "content": "#include \"envoy/http/header_map.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/http/cache/cache_headers_utils.h\"\n#include \"extensions/filters/http/cache/simple_http_cache/simple_http_cache.h\"\n\n#include \"test/extensions/filters/http/cache/common.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cache {\nnamespace {\n\nconst std::string EpochDate = \"Thu, 01 Jan 1970 00:00:00 GMT\";\n\nenvoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() {\n  // Allows 'accept' to be varied in the tests.\n  envoy::extensions::filters::http::cache::v3alpha::CacheConfig config;\n  const auto& add_accept = config.mutable_allowed_vary_headers()->Add();\n  add_accept->set_exact(\"accept\");\n  return config;\n}\n\nclass SimpleHttpCacheTest : public testing::Test {\nprotected:\n  SimpleHttpCacheTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {\n    request_headers_.setMethod(\"GET\");\n    request_headers_.setHost(\"example.com\");\n    request_headers_.setForwardedProto(\"https\");\n    request_headers_.setCopy(Http::CustomHeaders::get().CacheControl, \"max-age=3600\");\n  }\n\n  // Performs a cache lookup.\n  LookupContextPtr lookup(absl::string_view request_path) {\n    LookupRequest request = makeLookupRequest(request_path);\n    LookupContextPtr context = cache_.makeLookupContext(std::move(request));\n    context->getHeaders([this](LookupResult&& result) { lookup_result_ = std::move(result); });\n    return context;\n  }\n\n  // Inserts a value into the cache.\n  void insert(LookupContextPtr lookup, const Http::TestResponseHeaderMapImpl& response_headers,\n              const absl::string_view response_body) {\n    InsertContextPtr inserter = cache_.makeInsertContext(move(lookup));\n    const ResponseMetadata metadata = {current_time_};\n    inserter->insertHeaders(response_headers, metadata, false);\n    inserter->insertBody(Buffer::OwnedImpl(response_body), nullptr, true);\n  }\n\n  void insert(absl::string_view request_path,\n              const Http::TestResponseHeaderMapImpl& response_headers,\n              const absl::string_view response_body) {\n    insert(lookup(request_path), response_headers, response_body);\n  }\n\n  std::string getBody(LookupContext& context, uint64_t start, uint64_t end) {\n    AdjustedByteRange range(start, end);\n    std::string body;\n    context.getBody(range, [&body](Buffer::InstancePtr&& data) {\n      EXPECT_NE(data, nullptr);\n      if (data) {\n        body = data->toString();\n      }\n    });\n    return body;\n  }\n\n  LookupRequest makeLookupRequest(absl::string_view request_path) {\n    request_headers_.setPath(request_path);\n    return LookupRequest(request_headers_, current_time_, vary_allow_list_);\n  }\n\n  AssertionResult expectLookupSuccessWithBody(LookupContext* lookup_context,\n                                              absl::string_view body) {\n    if (lookup_result_.cache_entry_status_ != CacheEntryStatus::Ok) {\n      return AssertionFailure() << \"Expected: lookup_result_.cache_entry_status == \"\n                                   \"CacheEntryStatus::Ok\\n  Actual: \"\n                                << lookup_result_.cache_entry_status_;\n    }\n    if (!lookup_result_.headers_) {\n      return AssertionFailure() << \"Expected nonnull lookup_result_.headers\";\n    }\n    if (!lookup_context) {\n      return AssertionFailure() << \"Expected nonnull lookup_context\";\n    }\n    const std::string actual_body = getBody(*lookup_context, 0, body.size());\n    if (body != actual_body) {\n      return AssertionFailure() << \"Expected body == \" << body << \"\\n  Actual:  \" << actual_body;\n    }\n    return AssertionSuccess();\n  }\n\n  SimpleHttpCache cache_;\n  LookupResult lookup_result_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Event::SimulatedTimeSystem time_source_;\n  SystemTime current_time_ = time_source_.systemTime();\n  DateFormatter formatter_{\"%a, %d %b %Y %H:%M:%S GMT\"};\n  VaryHeader vary_allow_list_;\n};\n\n// Simple flow of putting in an item, getting it, deleting it.\nTEST_F(SimpleHttpCacheTest, PutGet) {\n  const std::string RequestPath1(\"Name\");\n  LookupContextPtr name_lookup_context = lookup(RequestPath1);\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"date\", formatter_.fromTime(current_time_)},\n                                                   {\"cache-control\", \"public,max-age=3600\"}};\n\n  const std::string Body1(\"Value\");\n  insert(move(name_lookup_context), response_headers, Body1);\n  name_lookup_context = lookup(RequestPath1);\n  EXPECT_TRUE(expectLookupSuccessWithBody(name_lookup_context.get(), Body1));\n\n  const std::string& RequestPath2(\"Another Name\");\n  LookupContextPtr another_name_lookup_context = lookup(RequestPath2);\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n\n  const std::string NewBody1(\"NewValue\");\n  insert(move(name_lookup_context), response_headers, NewBody1);\n  EXPECT_TRUE(expectLookupSuccessWithBody(lookup(RequestPath1).get(), NewBody1));\n}\n\nTEST_F(SimpleHttpCacheTest, PrivateResponse) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"date\", formatter_.fromTime(current_time_)},\n                                                   {\"age\", \"2\"},\n                                                   {\"cache-control\", \"private,max-age=3600\"}};\n  const std::string request_path(\"Name\");\n\n  LookupContextPtr name_lookup_context = lookup(request_path);\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n\n  const std::string Body(\"Value\");\n  // We must make sure at cache insertion time, private responses must not be\n  // inserted. However, if the insertion did happen, it would be served at the\n  // time of lookup.\n  insert(move(name_lookup_context), response_headers, Body);\n  EXPECT_TRUE(expectLookupSuccessWithBody(lookup(request_path).get(), Body));\n}\n\nTEST_F(SimpleHttpCacheTest, Miss) {\n  LookupContextPtr name_lookup_context = lookup(\"Name\");\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n}\n\nTEST_F(SimpleHttpCacheTest, Fresh) {\n  const Http::TestResponseHeaderMapImpl response_headers = {\n      {\"date\", formatter_.fromTime(current_time_)}, {\"cache-control\", \"public, max-age=3600\"}};\n  // TODO(toddmgreer): Test with various date headers.\n  insert(\"/\", response_headers, \"\");\n  time_source_.advanceTimeWait(Seconds(3600));\n  lookup(\"/\");\n  EXPECT_EQ(CacheEntryStatus::Ok, lookup_result_.cache_entry_status_);\n}\n\nTEST_F(SimpleHttpCacheTest, Stale) {\n  const Http::TestResponseHeaderMapImpl response_headers = {\n      {\"date\", formatter_.fromTime(current_time_)}, {\"cache-control\", \"public, max-age=3600\"}};\n  // TODO(toddmgreer): Test with various date headers.\n  insert(\"/\", response_headers, \"\");\n  time_source_.advanceTimeWait(Seconds(3601));\n  lookup(\"/\");\n  EXPECT_EQ(CacheEntryStatus::Ok, lookup_result_.cache_entry_status_);\n}\n\nTEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"min-fresh=1000\");\n  const std::string request_path(\"Name\");\n  LookupContextPtr name_lookup_context = lookup(request_path);\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"date\", formatter_.fromTime(current_time_)},\n                                                   {\"age\", \"6000\"},\n                                                   {\"cache-control\", \"public, max-age=9000\"}};\n  const std::string Body(\"Value\");\n  insert(move(name_lookup_context), response_headers, Body);\n  EXPECT_TRUE(expectLookupSuccessWithBody(lookup(request_path).get(), Body));\n}\n\nTEST_F(SimpleHttpCacheTest, ResponseStaleWithRequestLargeMaxStale) {\n  request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, \"max-stale=9000\");\n\n  const std::string request_path(\"Name\");\n  LookupContextPtr name_lookup_context = lookup(request_path);\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"date\", formatter_.fromTime(current_time_)},\n                                                   {\"age\", \"7200\"},\n                                                   {\"cache-control\", \"public, max-age=3600\"}};\n\n  const std::string Body(\"Value\");\n  insert(move(name_lookup_context), response_headers, Body);\n  EXPECT_TRUE(expectLookupSuccessWithBody(lookup(request_path).get(), Body));\n}\n\nTEST_F(SimpleHttpCacheTest, StreamingPut) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"date\", formatter_.fromTime(current_time_)},\n                                                   {\"age\", \"2\"},\n                                                   {\"cache-control\", \"public, max-age=3600\"}};\n  InsertContextPtr inserter = cache_.makeInsertContext(lookup(\"request_path\"));\n  const ResponseMetadata metadata = {current_time_};\n  inserter->insertHeaders(response_headers, metadata, false);\n  inserter->insertBody(\n      Buffer::OwnedImpl(\"Hello, \"), [](bool ready) { EXPECT_TRUE(ready); }, false);\n  inserter->insertBody(Buffer::OwnedImpl(\"World!\"), nullptr, true);\n  LookupContextPtr name_lookup_context = lookup(\"request_path\");\n  EXPECT_EQ(CacheEntryStatus::Ok, lookup_result_.cache_entry_status_);\n  EXPECT_NE(nullptr, lookup_result_.headers_);\n  ASSERT_EQ(13, lookup_result_.content_length_);\n  EXPECT_EQ(\"Hello, World!\", getBody(*name_lookup_context, 0, 13));\n}\n\nTEST(Registration, GetFactory) {\n  HttpCacheFactory* factory = Registry::FactoryRegistry<HttpCacheFactory>::getFactoryByType(\n      \"envoy.source.extensions.filters.http.cache.SimpleHttpCacheConfig\");\n  ASSERT_NE(factory, nullptr);\n  envoy::extensions::filters::http::cache::v3alpha::CacheConfig config;\n  config.mutable_typed_config()->PackFrom(*factory->createEmptyConfigProto());\n  EXPECT_EQ(factory->getCache(config).cacheInfo().name_, \"envoy.extensions.http.cache.simple\");\n}\n\nTEST_F(SimpleHttpCacheTest, VaryResponses) {\n  // Responses will vary on accept.\n  const std::string RequestPath(\"some-resource\");\n  Http::TestResponseHeaderMapImpl response_headers{{\"date\", formatter_.fromTime(current_time_)},\n                                                   {\"cache-control\", \"public,max-age=3600\"},\n                                                   {\"vary\", \"accept\"}};\n\n  // First request.\n  request_headers_.setCopy(Http::LowerCaseString(\"accept\"), \"image/*\");\n  LookupContextPtr first_value_vary = lookup(RequestPath);\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n  const std::string Body1(\"accept is image/*\");\n  insert(move(first_value_vary), response_headers, Body1);\n  first_value_vary = lookup(RequestPath);\n  EXPECT_TRUE(expectLookupSuccessWithBody(first_value_vary.get(), Body1));\n\n  // Second request with a different value for the varied header.\n  request_headers_.setCopy(Http::LowerCaseString(\"accept\"), \"text/html\");\n  LookupContextPtr second_value_vary = lookup(RequestPath);\n  // Should miss because we don't have this version of the response saved yet.\n  EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_);\n  // Add second version and make sure we receive the correct one..\n  const std::string Body2(\"accept is text/html\");\n  insert(move(second_value_vary), response_headers, Body2);\n  EXPECT_TRUE(expectLookupSuccessWithBody(lookup(RequestPath).get(), Body2));\n\n  // Looks up first version again to be sure it wasn't replaced with the second one.\n  EXPECT_TRUE(expectLookupSuccessWithBody(first_value_vary.get(), Body1));\n}\n\n} // namespace\n} // namespace Cache\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.cdn_loop\",\n    deps = [\n        \"//source/extensions/filters/http/cdn_loop:config\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_integration_test\",\n    srcs = [\"filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.cdn_loop\",\n    deps = [\n        \"//source/extensions/filters/http/cdn_loop:config\",\n        \"//test/integration:http_protocol_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"filter_test\",\n    srcs = [\"filter_test.cc\"],\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//source/extensions/filters/http/cdn_loop:filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"parser_test\",\n    srcs = [\"parser_test.cc\"],\n    deps = [\n        \"//source/extensions/filters/http/cdn_loop:parser_lib\",\n        \"//test/test_common:status_utility_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"parser_fuzz_test\",\n    srcs = [\"parser_fuzz_test.cc\"],\n    corpus = \"parser_corpus\",\n    deps = [\n        \"//source/common/common:statusor_lib\",\n        \"//source/extensions/filters/http/cdn_loop:parser_lib\",\n        \"//test/fuzz:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utils_test\",\n    srcs = [\"utils_test.cc\"],\n    deps = [\n        \"//source/extensions/filters/http/cdn_loop:utils_lib\",\n        \"//test/test_common:status_utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/config_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h\"\n\n#include \"extensions/filters/http/cdn_loop/config.h\"\n#include \"extensions/filters/http/cdn_loop/filter.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\n\nusing testing::HasSubstr;\n\nTEST(CdnLoopFilterFactoryTest, ValidValuesWork) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  Http::StreamDecoderFilterSharedPtr filter;\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  EXPECT_CALL(filter_callbacks, addStreamDecoderFilter).WillOnce(::testing::SaveArg<0>(&filter));\n\n  envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config;\n  config.set_cdn_id(\"cdn\");\n  CdnLoopFilterFactory factory;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, \"stats\", context);\n  cb(filter_callbacks);\n  EXPECT_NE(filter.get(), nullptr);\n  EXPECT_NE(dynamic_cast<CdnLoopFilter*>(filter.get()), nullptr);\n}\n\nTEST(CdnLoopFilterFactoryTest, BlankCdnIdThrows) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config;\n  CdnLoopFilterFactory factory;\n\n  EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, \"stats\", context),\n                             ProtoValidationException, HasSubstr(\"value length must be at least\"));\n}\n\nTEST(CdnLoopFilterFactoryTest, InvalidCdnId) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config;\n  config.set_cdn_id(\"[not-token-or-ip\");\n  CdnLoopFilterFactory factory;\n\n  EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, \"stats\", context),\n                             EnvoyException, HasSubstr(\"is not a valid CDN identifier\"));\n}\n\nTEST(CdnLoopFilterFactoryTest, InvalidCdnIdNonHeaderWhitespace) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config;\n  config.set_cdn_id(\"\\r\\n\");\n  CdnLoopFilterFactory factory;\n\n  EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, \"stats\", context),\n                             EnvoyException, HasSubstr(\"is not a valid CDN identifier\"));\n}\n\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/filter_integration_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h\"\n\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\nnamespace {\n\nconst std::string MaxDefaultConfig = R\"EOF(\nname: envoy.filters.http.cdn_loop\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig\n  cdn_id: cdn\n)EOF\";\n\nconst std::string MaxOf2Config = R\"EOF(\nname: envoy.filters.http.cdn_loop\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig\n  cdn_id: cdn\n  max_allowed_occurrences: 2\n)EOF\";\n\nclass CdnLoopFilterIntegrationTest : public HttpProtocolIntegrationTest {};\n\nTEST_P(CdnLoopFilterIntegrationTest, NoCdnLoopHeader) {\n  config_helper_.addFilter(MaxDefaultConfig);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  const auto* payload_entry = upstream_request_->headers().get(Http::LowerCaseString(\"CDN-Loop\"));\n  ASSERT_NE(payload_entry, nullptr);\n  EXPECT_EQ(payload_entry->value().getStringView(), \"cdn\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(CdnLoopFilterIntegrationTest, CdnLoopHeaderWithOtherCdns) {\n  config_helper_.addFilter(MaxDefaultConfig);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"CDN-Loop\", \"cdn1,cdn2\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  const auto* payload_entry = upstream_request_->headers().get(Http::LowerCaseString(\"CDN-Loop\"));\n  ASSERT_NE(payload_entry, nullptr);\n  EXPECT_EQ(payload_entry->value().getStringView(), \"cdn1,cdn2,cdn\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(CdnLoopFilterIntegrationTest, MultipleCdnLoopHeaders) {\n  config_helper_.addFilter(MaxDefaultConfig);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},   {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},  {\":authority\", \"host\"},\n                                                 {\"CDN-Loop\", \"cdn1\"}, {\"CDN-Loop\", \"cdn2\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  const auto* payload_entry = upstream_request_->headers().get(Http::LowerCaseString(\"CDN-Loop\"));\n  ASSERT_NE(payload_entry, nullptr);\n  EXPECT_EQ(payload_entry->value().getStringView(), \"cdn1,cdn2,cdn\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(CdnLoopFilterIntegrationTest, CdnLoop0Allowed1Seen) {\n  config_helper_.addFilter(MaxDefaultConfig);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"CDN-Loop\", \"cdn\"}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"502\", response->headers().getStatusValue());\n}\n\nTEST_P(CdnLoopFilterIntegrationTest, UnparseableHeader) {\n  config_helper_.addFilter(MaxDefaultConfig);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"CDN-Loop\", \"[bad-header\"}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"400\", response->headers().getStatusValue());\n}\n\nTEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed1Seen) {\n  config_helper_.addFilter(MaxOf2Config);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"CDN-Loop\", \"cdn\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  const auto* payload_entry = upstream_request_->headers().get(Http::LowerCaseString(\"CDN-Loop\"));\n  ASSERT_NE(payload_entry, nullptr);\n  EXPECT_EQ(payload_entry->value().getStringView(), \"cdn,cdn\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed2Seen) {\n  config_helper_.addFilter(MaxOf2Config);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"CDN-Loop\", \"cdn, cdn\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  const auto* payload_entry = upstream_request_->headers().get(Http::LowerCaseString(\"CDN-Loop\"));\n  ASSERT_NE(payload_entry, nullptr);\n  EXPECT_EQ(payload_entry->value().getStringView(), \"cdn, cdn,cdn\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed3Seen) {\n  config_helper_.addFilter(MaxOf2Config);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"CDN-Loop\", \"cdn, cdn, cdn\"}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"502\", response->headers().getStatusValue());\n}\n\nINSTANTIATE_TEST_SUITE_P(Protocols, CdnLoopFilterIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2},\n                             // Upstream doesn't matter, so by testing only 1,\n                             // the test is twice as fast.\n                             {FakeHttpConnection::Type::HTTP1})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n} // namespace\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/filter_test.cc",
    "content": "#include \"envoy/http/codes.h\"\n#include \"envoy/http/filter.h\"\n\n#include \"extensions/filters/http/cdn_loop/filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\nnamespace {\n\nTEST(CdnLoopFilterTest, TestNoHeader) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  CdnLoopFilter filter(\"cdn\", 0);\n  filter.setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl request_headers{};\n\n  EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue);\n  EXPECT_EQ(request_headers.get(Http::LowerCaseString(\"CDN-Loop\"))->value().getStringView(), \"cdn\");\n}\n\nTEST(CdnLoopFilterTest, OtherCdnsInHeader) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  CdnLoopFilter filter(\"cdn\", 0);\n  filter.setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \"cdn1,cdn2\"}};\n\n  EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue);\n  EXPECT_EQ(request_headers.get(Http::LowerCaseString(\"CDN-Loop\"))->value().getStringView(),\n            \"cdn1,cdn2,cdn\");\n}\n\nTEST(CdnLoopFilterTest, LoopDetected) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::BadGateway, _, _, _, _)).Times(1);\n  CdnLoopFilter filter(\"cdn\", 0);\n  filter.setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \"cdn\"}};\n\n  EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::StopIteration);\n}\n\nTEST(CdnLoopFilterTest, MultipleTransitsAllowed) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::BadGateway, _, _, _, _)).Times(1);\n  CdnLoopFilter filter(\"cdn\", 3);\n  filter.setDecoderFilterCallbacks(decoder_callbacks);\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{};\n    EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(request_headers.get(Http::LowerCaseString(\"CDN-Loop\"))->value().getStringView(),\n              \"cdn\");\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \"cdn\"}};\n    EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(request_headers.get(Http::LowerCaseString(\"CDN-Loop\"))->value().getStringView(),\n              \"cdn,cdn\");\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \"cdn,cdn\"}};\n    EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(request_headers.get(Http::LowerCaseString(\"CDN-Loop\"))->value().getStringView(),\n              \"cdn,cdn,cdn\");\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \"cdn,cdn,cdn\"}};\n    EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue);\n    EXPECT_EQ(request_headers.get(Http::LowerCaseString(\"CDN-Loop\"))->value().getStringView(),\n              \"cdn,cdn,cdn,cdn\");\n  }\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \"cdn,cdn,cdn,cdn\"}};\n    EXPECT_EQ(filter.decodeHeaders(request_headers, false),\n              Http::FilterHeadersStatus::StopIteration);\n  }\n}\n\nTEST(CdnLoopFilterTest, MultipleHeadersAllowed) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  CdnLoopFilter filter(\"cdn\", 0);\n  filter.setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \"cdn1\"}, {\"CDN-Loop\", \"cdn2\"}};\n\n  EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue);\n  EXPECT_EQ(request_headers.get(Http::LowerCaseString(\"CDN-Loop\"))->value().getStringView(),\n            \"cdn1,cdn2,cdn\");\n}\n\nTEST(CdnLoopFilterTest, UnparseableHeader) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::BadRequest, _, _, _, _)).Times(1);\n  CdnLoopFilter filter(\"cdn\", 0);\n  filter.setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"CDN-Loop\", \";\"}};\n\n  EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::StopIteration);\n}\n\n} // namespace\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-InvalidParameter.txt",
    "content": "name ; a= \n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MissingParameter.txt",
    "content": "name ; \n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MultipleParametersWithWhitespace.txt",
    "content": "name ; a=b ; c=\"d\" ; e=\";\" \n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameter.txt",
    "content": "name;a=b\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameterExtraWhitespace.txt",
    "content": "name ; a=b  \n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-ExtraWhiteSpace.txt",
    "content": " \t cdn1 \t , cdn2  \t  ,  \t cdn3   \n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidCdnId.txt",
    "content": "[bad\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidParseNoComma.txt",
    "content": "cdn1 cdn2\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-1.txt",
    "content": "foo,bar\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-2.txt",
    "content": "foo ,bar,\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-3.txt",
    "content": "foo , ,bar,charlie   \n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-4-empty.txt",
    "content": ""
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-5.txt",
    "content": ",\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-6.txt",
    "content": ",   ,\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Simple.txt",
    "content": "cdn1, cdn2, cdn3\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_corpus/rfc8586-example.txt",
    "content": "foo123.foocdn.example, barcdn.example; trace=\"abcdef\",AnotherCDN; abc=123; def=\"456\"\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_fuzz_test.cc",
    "content": "#include \"common/common/statusor.h\"\n\n#include \"extensions/filters/http/cdn_loop/parser.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nusing Envoy::Extensions::HttpFilters::CdnLoop::Parser::parseCdnInfoList;\nusing Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParseContext;\nusing Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParsedCdnInfoList;\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  absl::string_view input(reinterpret_cast<const char*>(buf), len);\n  StatusOr<ParsedCdnInfoList> list = parseCdnInfoList(ParseContext(input));\n  if (list) {\n    // If we successfully parse input, we should make sure that cdn_ids we find appear in the input\n    // string in order.\n    size_t start = 0;\n    for (const absl::string_view& cdn_id : list->cdnIds()) {\n      size_t pos = input.find(cdn_id, start);\n      FUZZ_ASSERT(pos != absl::string_view::npos);\n      FUZZ_ASSERT(pos >= start);\n      start = pos + cdn_id.length();\n    }\n  }\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/parser_test.cc",
    "content": "#include <sstream>\n\n#include \"extensions/filters/http/cdn_loop/parser.h\"\n\n#include \"test/test_common/status_utility.h\"\n\n#include \"absl/status/status.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\nnamespace Parser {\nnamespace {\n\nusing ::Envoy::StatusHelpers::IsOkAndHolds;\nusing ::Envoy::StatusHelpers::StatusIs;\n\nTEST(ParseContextOstreamTest, Works) {\n  std::ostringstream out;\n  ParseContext context(\"foo\", 3);\n  out << context;\n  EXPECT_EQ(out.str(), \"ParseContext{next=3}\");\n}\n\nTEST(ParsedCdnIdOstreamTest, Works) {\n  std::ostringstream out;\n  ParsedCdnId cdnId(ParseContext(\"foo\", 3), \"foo\");\n  out << cdnId;\n  EXPECT_EQ(out.str(), \"ParsedCdnId{context=ParseContext{next=3}, cdn_id=foo}\");\n}\n\nTEST(ParsedCdnInfoOstreamTest, Works) {\n  std::ostringstream out;\n  ParsedCdnInfo cdnId(ParseContext(\"foo\", 3), \"foo\");\n  out << cdnId;\n  EXPECT_EQ(out.str(), \"ParsedCdnInfo{context=ParseContext{next=3}, cdn_id=foo}\");\n}\n\nTEST(ParsedCdnInfoListOstreamTest, Works) {\n  std::ostringstream out;\n  ParsedCdnInfoList cdnId(ParseContext(\"foo\", 3), {\"foo\"});\n  out << cdnId;\n  EXPECT_EQ(out.str(), \"ParsedCdnInfoList{context=ParseContext{next=3}, cdn_ids=[foo]}\");\n}\n\nTEST(SkipOptionalWhitespaceTest, TestEmpty) {\n  const std::string value = \"\";\n  ParseContext input(value);\n  EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 0)));\n}\n\nTEST(SkipOptionalWhitespaceTest, TestSpace) {\n  const std::string value = \" \";\n  ParseContext input(value);\n  EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 1)));\n}\n\nTEST(SkipOptionalWhitespaceTest, TestTab) {\n  const std::string value = \"\\t\";\n  ParseContext input(value);\n  EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 1)));\n}\n\nTEST(SkipOptionalWhitespaceTest, TestLots) {\n  const std::string value = \"   \\t \\t \";\n  ParseContext input(value);\n  EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 7)));\n}\n\nTEST(SkipOptionalWhitespaceTest, NoWhitespace) {\n  const std::string value = \"c\";\n  ParseContext input(value);\n  EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 0)));\n}\n\nTEST(SkipOptionalWhitespaceTest, StopsAtNonWhitespace) {\n  const std::string value = \"  c\";\n  ParseContext input(value);\n  EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 2)));\n}\n\nTEST(ParseQuotedPairTest, Simple) {\n  const std::string value = R\"(\\a)\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedPair(input), IsOkAndHolds(ParseContext(value, 2)));\n}\n\nTEST(ParseQuotedPairTest, EndOfInput) {\n  const std::string value = \"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedPairTest, MissingQuotable) {\n  const std::string value = R\"(\\)\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedPairTest, BadQuotable) {\n  const std::string value = \"\\\\\\x1f\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedPairTest, MissingBackslash) {\n  const std::string value = R\"(a)\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedStringTest, Simple) {\n  const std::string value = \"\\\"abcd\\\"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), IsOkAndHolds(ParseContext(value, 6)));\n}\n\nTEST(ParseQuotedStringTest, QdStringEdgeCases) {\n  const std::string value = \"\\\"\\t \\x21\\x23\\x5b\\x5d\\x7e\\x80\\xff\\\"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), IsOkAndHolds(ParseContext(value, 11)));\n}\n\nTEST(ParseQuotedStringTest, QuotedPair) {\n  const std::string value = \"\\\"\\\\\\\"\\\"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), IsOkAndHolds(ParseContext(value, 4)));\n}\n\nTEST(ParseQuotedStringTest, NoStartQuote) {\n  const std::string value = \"foo\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedStringTest, NoEndQuote) {\n  const std::string value = \"\\\"missing-final-dquote\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedStringTest, EmptyInput) {\n  const std::string value = \"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedStringTest, NonVisualChar) {\n  const std::string value = \"\\\"\\x1f\\\"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseQuotedStringTest, QuotedPairEdgeCases) {\n  const std::string value = \"\\\"\\\\\";\n  ParseContext input(value);\n  EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseTokenTest, AllValues) {\n  const std::string value = \"!#$%&'*+-.^_`|~09azAZ\";\n  ParseContext input(value);\n  EXPECT_THAT(parseToken(input), IsOkAndHolds(ParseContext(value, 21)));\n}\n\nTEST(ParseTokenTest, TwoTokens) {\n  const std::string value = \"token1 token2\";\n  {\n    ParseContext input(value);\n    EXPECT_THAT(parseToken(input), IsOkAndHolds(ParseContext(value, 6)));\n  }\n  {\n    ParseContext input(value, 6);\n    EXPECT_THAT(parseToken(input), StatusIs(absl::StatusCode::kInvalidArgument));\n  }\n  {\n    ParseContext input(value, 7);\n    EXPECT_THAT(parseToken(input), IsOkAndHolds(ParseContext(value, 13)));\n  }\n}\n\nTEST(ParseTokenTest, ParseEmpty) {\n  const std::string value = \"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseToken(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParsePlausibleIpV6, Example) {\n  const std::string value = \"[2001:DB8::1]\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 13)));\n}\n\nTEST(ParsePlausibleIpV6, ExampleLowerCase) {\n  const std::string value = \"[2001:db8::1]\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 13)));\n}\n\nTEST(ParsePlausibleIpV6, ExampleIpV4) {\n  const std::string value = \"[2001:db8::192.0.2.0]\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 21)));\n}\n\nTEST(ParsePlausibleIpV6, AllHexValues) {\n  const std::string value = \"[1234:5678:90aA:bBcC:dDeE:fF00]\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 31)));\n}\n\nTEST(ParsePlausibleIpV6, EmptyInput) {\n  const std::string value = \"\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParsePlausibleIpV6, BadStartDelimiter) {\n  const std::string value = \"{2001:DB8::1}\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParsePlausibleIpV6, BadCharacter) {\n  const std::string value = \"[hello]\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParsePlausibleIpV6, BadEndDelimiter) {\n  const std::string value = \"[2001:DB8::1}\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParsePlausibleIpV6, EndBeforeDelimiter) {\n  const std::string value = \"[2001:DB8::1\";\n  ParseContext input(value);\n  EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnIdTest, Simple) {\n  const std::string value = \"name\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 4), \"name\")));\n}\n\nTEST(ParseCdnIdTest, SecondInSeries) {\n  // Make sure that absl::string_view::substr is called with (start, end) not\n  // (start, len)\n  const std::string value = \"cdn1, cdn2, cdn3\";\n  ParseContext input(value, 6);\n  EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 10), \"cdn2\")));\n}\n\nTEST(ParseCdnIdTest, Empty) {\n  const std::string value = \"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnIdTest, NotValidTokenOrUri) {\n  const std::string value = \",\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnIdTest, InvalidIpV6) {\n  const std::string value = \"[2001::\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnIdTest, InvalidPortNumberStopsParse) {\n  const std::string value = \"host:13z\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 7), \"host:13\")));\n}\n\nTEST(ParseCdnIdTest, UriHostName) {\n  const std::string value = \"www.example.com\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 15), \"www.example.com\")));\n}\n\nTEST(ParseCdnIdTest, UriHostPercentEncoded) {\n  const std::string value = \"%ba%ba.example.com\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 18), \"%ba%ba.example.com\")));\n}\n\nTEST(ParseCdnIdTest, UriHostNamePort) {\n  const std::string value = \"www.example.com:443\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 19), \"www.example.com:443\")));\n}\n\nTEST(ParseCdnIdTest, UriHostNameBlankPort) {\n  const std::string value = \"www.example.com:\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 16), \"www.example.com:\")));\n}\n\nTEST(ParseCdnIdTest, UriHostIpV4) {\n  const std::string value = \"192.0.2.0\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 9), \"192.0.2.0\")));\n}\n\nTEST(ParseCdnIdTest, UriHostIpV4Port) {\n  const std::string value = \"192.0.2.0:443\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 13), \"192.0.2.0:443\")));\n}\n\nTEST(ParseCdnIdTest, UriHostIpV4BlankPort) {\n  const std::string value = \"192.0.2.0:\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 10), \"192.0.2.0:\")));\n}\n\nTEST(ParseCdnIdTest, UriHostIpV6) {\n  const std::string value = \"[2001:DB8::1]\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 13), \"[2001:DB8::1]\")));\n}\n\nTEST(ParseCdnIdTest, UriHostIpV6Port) {\n  const std::string value = \"[2001:DB8::1]:443\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 17), \"[2001:DB8::1]:443\")));\n}\n\nTEST(ParseCdnIdTest, UriHostIpV6BlankPort) {\n  const std::string value = \"[2001:DB8::1]:\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnId(input),\n              IsOkAndHolds(ParsedCdnId(ParseContext(value, 14), \"[2001:DB8::1]:\")));\n}\n\nTEST(ParseParameterTest, SimpleTokenValue) {\n  const std::string value = \"a=b\";\n  ParseContext input(value);\n  EXPECT_THAT(parseParameter(input), IsOkAndHolds(ParseContext(value, 3)));\n}\n\nTEST(ParseParameterTest, SimpleQuotedValue) {\n  const std::string value = \"a=\\\"b\\\"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseParameter(input), IsOkAndHolds(ParseContext(value, 5)));\n}\n\nTEST(ParseParameterTest, EndOfInputBeforeEquals) {\n  const std::string value = \"a\";\n  ParseContext input(value);\n  EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseParameterTest, EndOfInputAfterEquals) {\n  const std::string value = \"a=\";\n  ParseContext input(value);\n  EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseParameterTest, MissingEquals) {\n  const std::string value = \"a,\";\n  ParseContext input(value);\n  EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseParameterTest, ValueNotToken) {\n  const std::string value = \"a=,\";\n  ParseContext input(value);\n  EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseParameterTest, ValueNotQuotedString) {\n  const std::string value = \"a=\\\"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnInfoTest, Simple) {\n  const std::string value = \"name\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 4), \"name\")));\n}\n\nTEST(ParseCdnInfoTest, Empty) {\n  const std::string value = \"\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnInfoTest, NotValidTokenOrUri) {\n  const std::string value = \",\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnInfoTest, SingleParameter) {\n  const std::string value = \"name;a=b\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 8), \"name\")));\n}\n\nTEST(ParseCdnInfoTest, SingleParameterExtraWhitespace) {\n  const std::string value = \"name ; a=b  \";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 12), \"name\")));\n}\n\nTEST(ParseCdnInfoTest, MultipleParametersWithWhitespace) {\n  const std::string value = \"name ; a=b ; c=\\\"d\\\" ; e=\\\";\\\" \";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 27), \"name\")));\n}\n\nTEST(ParseCdnInfoTest, MissingParameter) {\n  const std::string value = \"name ; \";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnInfoTest, InvalidParameter) {\n  const std::string value = \"name ; a= \";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnInfoListTest, Simple) {\n  const std::string value = \"cdn1, cdn2, cdn3\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfoList(input),\n              IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 16), {\"cdn1\", \"cdn2\", \"cdn3\"})));\n}\n\nTEST(ParseCdnInfoListTest, ExtraWhitespace) {\n  const std::string value = \" \\t cdn1 \\t , cdn2  \\t  ,  \\t cdn3   \";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfoList(input),\n              IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 33), {\"cdn1\", \"cdn2\", \"cdn3\"})));\n}\n\nTEST(ParseCdnInfoListTest, InvalidParseNoComma) {\n  const std::string value = \"cdn1 cdn2\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfoList(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnInfoListTest, InvalidCdnId) {\n  const std::string value = \"[bad\";\n  ParseContext input(value);\n  EXPECT_THAT(parseCdnInfoList(input), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(ParseCdnInfoListTest, Rfc7230Section7Tests) {\n  // These are the examples from https://tools.ietf.org/html/rfc7230#section-7\n  {\n    const std::string value = \"foo,bar\";\n    ParseContext input(value);\n    EXPECT_THAT(parseCdnInfoList(input),\n                IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 7), {\"foo\", \"bar\"})));\n  }\n  {\n    const std::string value = \"foo ,bar,\";\n    ParseContext input(value);\n    EXPECT_THAT(parseCdnInfoList(input),\n                IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 9), {\"foo\", \"bar\"})));\n  }\n  {\n    const std::string value = \"foo , ,bar,charlie   \";\n    ParseContext input(value);\n    EXPECT_THAT(parseCdnInfoList(input), IsOkAndHolds(ParsedCdnInfoList(\n                                             ParseContext(value, 21), {\"foo\", \"bar\", \"charlie\"})));\n  }\n  // The following tests are allowed in the #cdn-info rule because it doesn't\n  // require a single element.\n  {\n    const std::string value = \"\";\n    ParseContext input(value);\n\n    EXPECT_THAT(parseCdnInfoList(input),\n                IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 0), {})));\n  }\n  {\n    const std::string value = \",\";\n    ParseContext input(value);\n    EXPECT_THAT(parseCdnInfoList(input),\n                IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 1), {})));\n  }\n  {\n    const std::string value = \",   ,\";\n    ParseContext input(value);\n    EXPECT_THAT(parseCdnInfoList(input),\n                IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 5), {})));\n  }\n}\n\n} // namespace\n} // namespace Parser\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cdn_loop/utils_test.cc",
    "content": "#include \"extensions/filters/http/cdn_loop/utils.h\"\n\n#include \"test/test_common/status_utility.h\"\n\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace CdnLoop {\nnamespace {\n\nusing ::Envoy::StatusHelpers::IsOkAndHolds;\nusing ::Envoy::StatusHelpers::StatusIs;\n\nTEST(CountCdnLoopOccurrencesTest, EmptyHeader) {\n  EXPECT_THAT(countCdnLoopOccurrences(\"\", \"cdn\"), IsOkAndHolds(0));\n}\n\nTEST(CountCdnLoopOccurrencesTest, NoParameterTests) {\n  // A pseudonym\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, CDN\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, CDN\", \"CDN\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, CDN\", \"foo\"), IsOkAndHolds(0));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, cdn, cdn\", \"cdn\"), IsOkAndHolds(3));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, cdn, cdn\", \"foo\"), IsOkAndHolds(0));\n\n  // A DNS name\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn.example.com\", \"cdn.example.com\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn.example.com, CDN\", \"cdn.example.com\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn.example.com, CDN\", \"CDN\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn.example.com, CDN\", \"foo\"), IsOkAndHolds(0));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn.example.com, cdn.example.com, cdn.example.com\",\n                                      \"cdn.example.com\"),\n              IsOkAndHolds(3));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn.example.com, cdn.example.com, cdn.example.com\", \"foo\"),\n              IsOkAndHolds(0));\n\n  // IPv4 Addresses\n  EXPECT_THAT(countCdnLoopOccurrences(\"192.0.2.1\", \"192.0.2.1\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"192.0.2.1, CDN\", \"192.0.2.1\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"192.0.2.1, CDN\", \"CDN\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"192.0.2.1, CDN\", \"foo\"), IsOkAndHolds(0));\n  EXPECT_THAT(countCdnLoopOccurrences(\"192.0.2.1, 192.0.2.1, 192.0.2.1\", \"192.0.2.1\"),\n              IsOkAndHolds(3));\n  EXPECT_THAT(countCdnLoopOccurrences(\"192.0.2.1, 192.0.2.1, 192.0.2.1\", \"foo\"), IsOkAndHolds(0));\n\n  // IpV6 Addresses\n  EXPECT_THAT(countCdnLoopOccurrences(\"[2001:DB8::3]\", \"[2001:DB8::3]\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"[2001:DB8::3], CDN\", \"[2001:DB8::3]\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"[2001:DB8::3], CDN\", \"CDN\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"[2001:DB8::3], CDN\", \"foo\"), IsOkAndHolds(0));\n  EXPECT_THAT(\n      countCdnLoopOccurrences(\"[2001:DB8::3], [2001:DB8::3], [2001:DB8::3]\", \"[2001:DB8::3]\"),\n      IsOkAndHolds(3));\n  EXPECT_THAT(countCdnLoopOccurrences(\"[2001:DB8::3], [2001:DB8::3], [2001:DB8::3]\", \"foo\"),\n              IsOkAndHolds(0));\n}\n\nTEST(CountCdnLoopOccurrencesTest, SimpleParameterTests) {\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; foo=bar\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; foo=bar, CDN\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; foo=bar; baz=quux, CDN\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, cdn; foo=bar, cdn; baz=quux\", \"cdn\"), IsOkAndHolds(3));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, cdn; foo=bar; baz=quux, cdn\", \"foo\"), IsOkAndHolds(0));\n}\n\nTEST(CountCdnLoopOccurrencesTest, ExcessWhitespace) {\n  EXPECT_THAT(countCdnLoopOccurrences(\"  cdn\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn  \", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\" cdn \", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"\\tcdn\\t\", \"cdn\"), IsOkAndHolds(1));\n}\n\nTEST(CountCdnLoopOccurrencesTest, NoWhitespace) {\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn,cdn\", \"cdn\"), IsOkAndHolds(2));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn;foo=bar;baz=quuz,cdn\", \"cdn\"), IsOkAndHolds(2));\n}\n\nTEST(CountCdnLoopOccurrencesTest, CdnIdInParameterTests) {\n  // In these tests, the parameter contains a string matching the cdn_id in\n  // either the key or the value of the parameters.\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; cdn=bar\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; foo=cdn\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; cdn=cdn\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; cdn=\\\"cdn\\\"\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; cdn=\\\"cdn,cdn\\\"\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; cdn=\\\"cdn, cdn\\\"\", \"cdn\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn, cdn; cdn=\\\"cdn\\\", cdn ; cdn=\\\"cdn,cdn\\\"\", \"cdn\"),\n              IsOkAndHolds(3));\n}\n\nTEST(CountCdnLoopOccurrencesTest, Rfc8586Tests) {\n  // Examples from RFC 8586, Section 2.\n  const std::string example1 = \"foo123.foocdn.example, barcdn.example; trace=\\\"abcdef\\\"\";\n  EXPECT_THAT(countCdnLoopOccurrences(example1, \"foo123.foocdn.example\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(example1, \"barcdn.example\"), IsOkAndHolds(1));\n  EXPECT_THAT(countCdnLoopOccurrences(example1, \"trace=\\\"abcdef\\\"\"), IsOkAndHolds(0));\n  const std::string example2 = \"AnotherCDN; abc=123; def=\\\"456\\\"\";\n  EXPECT_THAT(countCdnLoopOccurrences(example2, \"AnotherCDN\"), IsOkAndHolds(1));\n\n  // The concatenation of the two done correctly as per RFC 7230 rules\n  {\n    const std::string combined = absl::StrCat(example1, \",\", example2);\n    EXPECT_THAT(countCdnLoopOccurrences(combined, \"foo123.foocdn.example\"), IsOkAndHolds(1));\n    EXPECT_THAT(countCdnLoopOccurrences(combined, \"barcdn.example\"), IsOkAndHolds(1));\n    EXPECT_THAT(countCdnLoopOccurrences(combined, \"AnotherCDN\"), IsOkAndHolds(1));\n  }\n\n  // The concatenation of two done poorly (with extra commas)\n  {\n    const std::string combined = absl::StrCat(example1, \",,,\", example2);\n    EXPECT_THAT(countCdnLoopOccurrences(combined, \"foo123.foocdn.example\"), IsOkAndHolds(1));\n    EXPECT_THAT(countCdnLoopOccurrences(combined, \"barcdn.example\"), IsOkAndHolds(1));\n    EXPECT_THAT(countCdnLoopOccurrences(combined, \"AnotherCDN\"), IsOkAndHolds(1));\n  }\n}\n\nTEST(CountCdnLoopOccurrencesTest, ValidHeaderInsideParameter) {\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn; header=\\\"cdn; cdn=cdn; cdn\\\"\", \"cdn\"), IsOkAndHolds(1));\n}\n\nTEST(CountCdnLoopOccurrencesTest, BadCdnId) {\n  EXPECT_THAT(countCdnLoopOccurrences(\"cdn\", \"\"), StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\nTEST(CountCdnLoopOccurrencesTest, BadHeader) {\n  EXPECT_THAT(countCdnLoopOccurrences(\"[bad-id\", \"cdn\"),\n              StatusIs(absl::StatusCode::kInvalidArgument));\n}\n\n} // namespace\n} // namespace CdnLoop\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"mock_lib\",\n    srcs = [\n        \"mock.cc\",\n    ],\n    hdrs = [\n        \"mock.h\",\n    ],\n    deps = [\n        \"//source/extensions/filters/http/common:jwks_fetcher_lib\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"empty_http_filter_config_lib\",\n    hdrs = [\"empty_http_filter_config.h\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"jwks_fetcher_test\",\n    srcs = [\n        \"jwks_fetcher_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \"//source/extensions/filters/http/common:jwks_fetcher_lib\",\n        \"//test/extensions/filters/http/common:mock_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\n        \"utility_test.cc\",\n    ],\n    deps = [\n        \"//source/extensions/filters/http/common:utility_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/common/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"compressor_filter_test\",\n    srcs = [\"compressor_filter_test.cc\"],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/compression/gzip/compressor:config\",\n        \"//source/extensions/filters/http/common/compressor:compressor_lib\",\n        \"//test/mocks/compression/compressor:compressor_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"compressor_filter_speed_test\",\n    srcs = [\"compressor_filter_speed_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n        \"googletest\",\n    ],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/compression/gzip/compressor:compressor_lib\",\n        \"//source/extensions/filters/http/common/compressor:compressor_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"compressor_filter_speed_test_benchmark_test\",\n    benchmark_binary = \"compressor_filter_speed_test\",\n)\n"
  },
  {
    "path": "test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc",
    "content": "#include \"envoy/extensions/filters/http/compressor/v3/compressor.pb.h\"\n\n#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n#include \"extensions/filters/http/common/compressor/compressor.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"benchmark/benchmark.h\"\n#include \"gmock/gmock.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nnamespace Compressors {\n\nclass MockCompressorFilterConfig : public CompressorFilterConfig {\npublic:\n  MockCompressorFilterConfig(\n      const envoy::extensions::filters::http::compressor::v3::Compressor& compressor,\n      const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n      const std::string& compressor_name,\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level,\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy,\n      int64_t window_bits, uint64_t memory_level)\n      : CompressorFilterConfig(compressor, stats_prefix + compressor_name + \".\", scope, runtime,\n                               compressor_name),\n        level_(level), strategy_(strategy), window_bits_(window_bits), memory_level_(memory_level) {\n  }\n\n  Envoy::Compression::Compressor::CompressorPtr makeCompressor() override {\n    auto compressor = std::make_unique<Compression::Gzip::Compressor::ZlibCompressorImpl>();\n    compressor->init(level_, strategy_, window_bits_, memory_level_);\n    return compressor;\n  }\n\n  const Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level_;\n  const Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy_;\n  const int64_t window_bits_;\n  const uint64_t memory_level_;\n};\n\nusing CompressionParams =\n    std::tuple<Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel,\n               Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy, int64_t,\n               uint64_t>;\n\nstatic constexpr uint64_t TestDataSize = 122880;\n\nBuffer::OwnedImpl generateTestData() {\n  Buffer::OwnedImpl data;\n  TestUtility::feedBufferWithRandomCharacters(data, TestDataSize);\n  return data;\n}\n\nconst Buffer::OwnedImpl& testData() {\n  CONSTRUCT_ON_FIRST_USE(Buffer::OwnedImpl, generateTestData());\n}\n\nstatic std::vector<Buffer::OwnedImpl> generateChunks(const uint64_t chunk_count,\n                                                     const uint64_t chunk_size) {\n  std::vector<Buffer::OwnedImpl> vec;\n  vec.reserve(chunk_count);\n\n  const auto& test_data = testData();\n  uint64_t added = 0;\n\n  for (uint64_t i = 0; i < chunk_count; ++i) {\n    Buffer::OwnedImpl chunk;\n    std::unique_ptr<char[]> data(new char[chunk_size]);\n\n    test_data.copyOut(added, chunk_size, data.get());\n    chunk.add(absl::string_view(data.get(), chunk_size));\n    vec.push_back(std::move(chunk));\n\n    added += chunk_size;\n  }\n\n  return vec;\n}\n\nstruct Result {\n  uint64_t total_uncompressed_bytes = 0;\n  uint64_t total_compressed_bytes = 0;\n};\n\nstatic Result compressWith(std::vector<Buffer::OwnedImpl>&& chunks, CompressionParams params,\n                           NiceMock<Http::MockStreamDecoderFilterCallbacks>& decoder_callbacks,\n                           benchmark::State& state) {\n  auto start = std::chrono::high_resolution_clock::now();\n  Stats::IsolatedStoreImpl stats;\n  testing::NiceMock<Runtime::MockLoader> runtime;\n  envoy::extensions::filters::http::compressor::v3::Compressor compressor;\n\n  const auto level = std::get<0>(params);\n  const auto strategy = std::get<1>(params);\n  const auto window_bits = std::get<2>(params);\n  const auto memory_level = std::get<3>(params);\n  CompressorFilterConfigSharedPtr config = std::make_shared<MockCompressorFilterConfig>(\n      compressor, \"test.\", stats, runtime, \"gzip\", level, strategy, window_bits, memory_level);\n\n  ON_CALL(runtime.snapshot_, featureEnabled(\"test.filter_enabled\", 100))\n      .WillByDefault(Return(true));\n\n  auto filter = std::make_unique<CompressorFilter>(config);\n  filter->setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl headers = {{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}};\n  filter->decodeHeaders(headers, false);\n\n  Http::TestResponseHeaderMapImpl response_headers = {\n      {\":method\", \"get\"},\n      {\"content-length\", \"122880\"},\n      {\"content-type\", \"application/json;charset=utf-8\"}};\n  filter->encodeHeaders(response_headers, false);\n\n  uint64_t idx = 0;\n  Result res;\n  for (auto& data : chunks) {\n    res.total_uncompressed_bytes += data.length();\n\n    if (idx == (chunks.size() - 1)) {\n      filter->encodeData(data, true);\n    } else {\n      filter->encodeData(data, false);\n    }\n\n    res.total_compressed_bytes += data.length();\n    ++idx;\n  }\n\n  EXPECT_EQ(res.total_uncompressed_bytes,\n            stats.counterFromString(\"test.gzip.total_uncompressed_bytes\").value());\n  EXPECT_EQ(res.total_compressed_bytes,\n            stats.counterFromString(\"test.gzip.total_compressed_bytes\").value());\n\n  EXPECT_EQ(1U, stats.counterFromString(\"test.gzip.compressed\").value());\n  auto end = std::chrono::high_resolution_clock::now();\n  const auto elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);\n  state.SetIterationTime(elapsed.count());\n\n  return res;\n}\n\n// SPELLCHECKER(off)\n/*\nRunning ./bazel-bin/test/extensions/filters/http/common/compressor/compressor_filter_speed_test\nRun on (8 X 2300 MHz CPU s)\nCPU Caches:\nL1 Data 32K (x4)\nL1 Instruction 32K (x4)\nL2 Unified 262K (x4)\nL3 Unified 6291K (x1)\nLoad Average: 1.82, 1.72, 1.74\n***WARNING*** Library was built as DEBUG. Timings may be affected.\n------------------------------------------------------------\nBenchmark                  Time             CPU   Iterations\n------------------------------------------------------------\n....\ncompressFull/0/manual_time              14.1 ms         14.3 ms           48\ncompressFull/1/manual_time              7.06 ms         7.22 ms          104\ncompressFull/2/manual_time              5.17 ms         5.33 ms          123\ncompressFull/3/manual_time              15.4 ms         15.5 ms           45\ncompressFull/4/manual_time              10.1 ms         10.3 ms           69\ncompressFull/5/manual_time              15.8 ms         16.0 ms           40\ncompressFull/6/manual_time              15.3 ms         15.5 ms           42\ncompressFull/7/manual_time              9.91 ms         10.1 ms           71\ncompressFull/8/manual_time              15.8 ms         16.0 ms           45\ncompressChunks16384/0/manual_time       13.4 ms         13.5 ms           52\ncompressChunks16384/1/manual_time       6.33 ms         6.48 ms          111\ncompressChunks16384/2/manual_time       5.09 ms         5.27 ms          147\ncompressChunks16384/3/manual_time       15.1 ms         15.3 ms           46\ncompressChunks16384/4/manual_time       9.61 ms         9.78 ms           71\ncompressChunks16384/5/manual_time       14.5 ms         14.6 ms           47\ncompressChunks16384/6/manual_time       14.0 ms         14.1 ms           48\ncompressChunks16384/7/manual_time       9.20 ms         9.36 ms           76\ncompressChunks16384/8/manual_time       14.5 ms         14.6 ms           48\ncompressChunks8192/0/manual_time        14.3 ms         14.5 ms           50\ncompressChunks8192/1/manual_time        6.80 ms         6.96 ms          100\ncompressChunks8192/2/manual_time        5.21 ms         5.36 ms          135\ncompressChunks8192/3/manual_time        14.9 ms         15.0 ms           47\ncompressChunks8192/4/manual_time        9.71 ms         9.87 ms           68\ncompressChunks8192/5/manual_time        15.9 ms         16.1 ms           45\n....\n*/\n// SPELLCHECKER(on)\n\nstatic std::vector<CompressionParams> compression_params = {\n    // Speed + Standard + Small Window + Low mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1},\n\n    // Speed + Standard + Med window + Med mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5},\n\n    // Speed + Standard + Big window + High mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9},\n\n    // Standard + Standard + Small window + Low mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1},\n\n    // Standard + Standard + Med window + Med mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5},\n\n    // Standard + Standard + High window + High mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9},\n\n    // Best + Standard + Small window + Low mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1},\n\n    // Best + Standard + Med window + Med mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5},\n\n    // Best + Standard + High window + High mem level\n    {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best,\n     Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}};\n\nstatic void compressFull(benchmark::State& state) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  const auto idx = state.range(0);\n  const auto& params = compression_params[idx];\n\n  for (auto _ : state) {\n    std::vector<Buffer::OwnedImpl> chunks = generateChunks(1, 122880);\n    compressWith(std::move(chunks), params, decoder_callbacks, state);\n  }\n}\nBENCHMARK(compressFull)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond);\n\nstatic void compressChunks16384(benchmark::State& state) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  const auto idx = state.range(0);\n  const auto& params = compression_params[idx];\n\n  for (auto _ : state) {\n    std::vector<Buffer::OwnedImpl> chunks = generateChunks(7, 16384);\n    compressWith(std::move(chunks), params, decoder_callbacks, state);\n  }\n}\nBENCHMARK(compressChunks16384)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond);\n\nstatic void compressChunks8192(benchmark::State& state) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  const auto idx = state.range(0);\n  const auto& params = compression_params[idx];\n\n  for (auto _ : state) {\n    std::vector<Buffer::OwnedImpl> chunks = generateChunks(15, 8192);\n    compressWith(std::move(chunks), params, decoder_callbacks, state);\n  }\n}\nBENCHMARK(compressChunks8192)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond);\n\nstatic void compressChunks4096(benchmark::State& state) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  const auto idx = state.range(0);\n  const auto& params = compression_params[idx];\n\n  for (auto _ : state) {\n    std::vector<Buffer::OwnedImpl> chunks = generateChunks(30, 4096);\n    compressWith(std::move(chunks), params, decoder_callbacks, state);\n  }\n}\nBENCHMARK(compressChunks4096)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond);\n\nstatic void compressChunks1024(benchmark::State& state) {\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  const auto idx = state.range(0);\n  const auto& params = compression_params[idx];\n\n  for (auto _ : state) {\n    std::vector<Buffer::OwnedImpl> chunks = generateChunks(120, 1024);\n    compressWith(std::move(chunks), params, decoder_callbacks, state);\n  }\n}\nBENCHMARK(compressChunks1024)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond);\n\n} // namespace Compressors\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/compressor/compressor_filter_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/extensions/filters/http/compressor/v3/compressor.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/common/compressor/compressor.h\"\n\n#include \"test/mocks/compression/compressor/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nnamespace Compressors {\n\nusing testing::_;\nusing testing::Return;\n\nclass TestCompressorFilterConfig : public CompressorFilterConfig {\npublic:\n  TestCompressorFilterConfig(\n      const envoy::extensions::filters::http::compressor::v3::Compressor& compressor,\n      const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime,\n      const std::string& compressor_name)\n      : CompressorFilterConfig(compressor, stats_prefix + compressor_name + \".\", scope, runtime,\n                               compressor_name) {}\n\n  Envoy::Compression::Compressor::CompressorPtr makeCompressor() override {\n    auto compressor = std::make_unique<Compression::Compressor::MockCompressor>();\n    EXPECT_CALL(*compressor, compress(_, _)).Times(expected_compress_calls_);\n    return compressor;\n  }\n\n  void setExpectedCompressCalls(uint32_t calls) { expected_compress_calls_ = calls; }\n\nprivate:\n  uint32_t expected_compress_calls_{1};\n};\n\nclass CompressorFilterTest : public testing::Test {\npublic:\n  CompressorFilterTest() {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"test.filter_enabled\", 100))\n        .WillByDefault(Return(true));\n  }\n\n  void SetUp() override {\n    setUpFilter(R\"EOF(\n{\n  \"compressor_library\": {\n     \"name\": \"test\",\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\");\n  }\n\n  // CompressorFilterTest Helpers\n  void setUpFilter(std::string&& json) {\n    envoy::extensions::filters::http::compressor::v3::Compressor compressor;\n    TestUtility::loadFromJson(json, compressor);\n    config_ =\n        std::make_shared<TestCompressorFilterConfig>(compressor, \"test.\", stats_, runtime_, \"test\");\n    filter_ = std::make_unique<CompressorFilter>(config_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  void verifyCompressedData() {\n    EXPECT_EQ(expected_str_.length(), stats_.counter(\"test.test.total_uncompressed_bytes\").value());\n    EXPECT_EQ(data_.length(), stats_.counter(\"test.test.total_compressed_bytes\").value());\n  }\n\n  void feedBuffer(uint64_t size) {\n    TestUtility::feedBufferWithRandomCharacters(data_, size);\n    expected_str_ += data_.toString();\n  }\n\n  void doRequest(Http::TestRequestHeaderMapImpl&& headers) {\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n    Buffer::OwnedImpl data(\"hello\");\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  void doResponseCompression(Http::TestResponseHeaderMapImpl& headers, bool with_trailers) {\n    doResponse(headers, true, with_trailers);\n  }\n\n  void doResponseNoCompression(Http::TestResponseHeaderMapImpl& headers) {\n    doResponse(headers, false, true);\n  }\n\n  void doResponse(Http::TestResponseHeaderMapImpl& headers, bool with_compression,\n                  bool with_trailers) {\n    uint64_t buffer_content_size;\n    if (!absl::SimpleAtoi(headers.get_(\"content-length\"), &buffer_content_size)) {\n      ASSERT_TRUE(\n          StringUtil::CaseInsensitiveCompare()(headers.get_(\"transfer-encoding\"), \"chunked\"));\n      // In case of chunked stream just feed the buffer with 1000 bytes.\n      buffer_content_size = 1000;\n    }\n    feedBuffer(buffer_content_size);\n    Http::TestResponseHeaderMapImpl continue_headers;\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n              filter_->encode100ContinueHeaders(continue_headers));\n    Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n    EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n\n    if (with_compression) {\n      EXPECT_EQ(\"\", headers.get_(\"content-length\"));\n      EXPECT_EQ(\"test\", headers.get_(\"content-encoding\"));\n      EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, !with_trailers));\n      if (with_trailers) {\n        EXPECT_CALL(encoder_callbacks_, addEncodedData(_, true))\n            .WillOnce(Invoke([&](Buffer::Instance& data, bool) { data_.move(data); }));\n        Http::TestResponseTrailerMapImpl trailers;\n        EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(trailers));\n      }\n      verifyCompressedData();\n      EXPECT_EQ(1, stats_.counter(\"test.test.compressed\").value());\n    } else {\n      EXPECT_EQ(\"\", headers.get_(\"content-encoding\"));\n      EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, false));\n      Http::TestResponseTrailerMapImpl trailers;\n      EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(trailers));\n      EXPECT_EQ(1, stats_.counter(\"test.test.not_compressed\").value());\n    }\n  }\n\n  std::shared_ptr<TestCompressorFilterConfig> config_;\n  std::unique_ptr<CompressorFilter> filter_;\n  Buffer::OwnedImpl data_;\n  std::string expected_str_;\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n};\n\n// Test if Runtime Feature is Disabled\nTEST_F(CompressorFilterTest, DecodeHeadersWithRuntimeDisabled) {\n  setUpFilter(R\"EOF(\n{\n  \"runtime_enabled\": {\n    \"default_value\": true,\n    \"runtime_key\": \"foo_key\"\n  },\n  \"compressor_library\": {\n     \"name\": \"test\",\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\");\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo_key\", true))\n      .Times(2)\n      .WillRepeatedly(Return(false));\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"deflate, test\"}});\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  doResponseNoCompression(headers);\n  EXPECT_FALSE(headers.has(\"vary\"));\n}\n\n// Default config values.\nTEST_F(CompressorFilterTest, DefaultConfigValues) {\n  EXPECT_EQ(30, config_->minimumLength());\n  EXPECT_EQ(false, config_->disableOnEtagHeader());\n  EXPECT_EQ(false, config_->removeAcceptEncodingHeader());\n  EXPECT_EQ(18, config_->contentTypeValues().size());\n}\n\n// Acceptance Testing with default configuration.\nTEST_F(CompressorFilterTest, AcceptanceTestEncoding) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"deflate, test\"}});\n\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  doResponseCompression(headers, false);\n}\n\nTEST_F(CompressorFilterTest, AcceptanceTestEncodingWithTrailers) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"deflate, test\"}});\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  config_->setExpectedCompressCalls(2);\n  doResponseCompression(headers, true);\n}\n\nTEST_F(CompressorFilterTest, NoAcceptEncodingHeader) {\n  doRequest({{\":method\", \"get\"}, {}});\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  doResponseNoCompression(headers);\n  EXPECT_EQ(1, stats_.counter(\"test.test.no_accept_header\").value());\n  EXPECT_EQ(\"Accept-Encoding\", headers.get_(\"vary\"));\n}\n\nTEST_F(CompressorFilterTest, CacheIdentityDecision) {\n  // check if identity stat is increased twice (the second time via the cached path).\n  config_->setExpectedCompressCalls(0);\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"identity\"}});\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_EQ(1, stats_.counter(\"test.test.header_identity\").value());\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_EQ(2, stats_.counter(\"test.test.header_identity\").value());\n}\n\nTEST_F(CompressorFilterTest, CacheHeaderNotValidDecision) {\n  // check if not_valid stat is increased twice (the second time via the cached path).\n  config_->setExpectedCompressCalls(0);\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test;q=invalid\"}});\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_EQ(1, stats_.counter(\"test.test.header_not_valid\").value());\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_EQ(2, stats_.counter(\"test.test.header_not_valid\").value());\n}\n\n// Content-Encoding: upstream response is already encoded.\nTEST_F(CompressorFilterTest, ContentEncodingAlreadyEncoded) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test\"}});\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"content-encoding\", \"deflate, gzip\"}};\n  feedBuffer(256);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n  EXPECT_TRUE(response_headers.has(\"content-length\"));\n  EXPECT_FALSE(response_headers.has(\"transfer-encoding\"));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, false));\n  EXPECT_EQ(1U, stats_.counter(\"test.test.not_compressed\").value());\n}\n\n// No compression when upstream response is empty.\nTEST_F(CompressorFilterTest, EmptyResponse) {\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\":status\", \"204\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, true));\n  EXPECT_EQ(\"\", headers.get_(\"content-length\"));\n  EXPECT_EQ(\"\", headers.get_(\"content-encoding\"));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, true));\n}\n\n// Verify removeAcceptEncoding header.\nTEST_F(CompressorFilterTest, RemoveAcceptEncodingHeader) {\n  {\n    Http::TestRequestHeaderMapImpl headers = {{\"accept-encoding\", \"deflate, test, gzip, br\"}};\n    setUpFilter(R\"EOF(\n{\n  \"remove_accept_encoding_header\": true,\n  \"compressor_library\": {\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\");\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true));\n    EXPECT_FALSE(headers.has(\"accept-encoding\"));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = {{\"accept-encoding\", \"deflate, test, gzip, br\"}};\n    setUpFilter(R\"EOF(\n{\n  \"compressor_library\": {\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\");\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true));\n    EXPECT_TRUE(headers.has(\"accept-encoding\"));\n    EXPECT_EQ(\"deflate, test, gzip, br\", headers.get_(\"accept-encoding\"));\n  }\n}\n\nclass IsAcceptEncodingAllowedTest\n    : public CompressorFilterTest,\n      public testing::WithParamInterface<std::tuple<std::string, bool, int, int, int, int>> {};\n\nINSTANTIATE_TEST_SUITE_P(\n    IsAcceptEncodingAllowedTestSuite, IsAcceptEncodingAllowedTest,\n    testing::Values(std::make_tuple(\"deflate, test, br\", true, 1, 0, 0, 0),\n                    std::make_tuple(\"deflate, test;q=1.0, *;q=0.5\", true, 1, 0, 0, 0),\n                    std::make_tuple(\"\\tdeflate\\t, test\\t ; q\\t =\\t 1.0,\\t * ;q=0.5\", true, 1, 0, 0,\n                                    0),\n                    std::make_tuple(\"deflate,test;q=1.0,*;q=0\", true, 1, 0, 0, 0),\n                    std::make_tuple(\"deflate, test;q=0.2, br;q=1\", true, 1, 0, 0, 0),\n                    std::make_tuple(\"*\", true, 0, 1, 0, 0),\n                    std::make_tuple(\"*;q=1\", true, 0, 1, 0, 0),\n                    std::make_tuple(\"xyz;q=1, br;q=0.2, *\", true, 0, 1, 0, 0),\n                    std::make_tuple(\"deflate, test;Q=.5, br\", true, 1, 0, 0, 0),\n                    std::make_tuple(\"test;q=0,*;q=1\", false, 0, 0, 1, 0),\n                    std::make_tuple(\"identity, *;q=0\", false, 0, 0, 0, 1),\n                    std::make_tuple(\"identity\", false, 0, 0, 0, 1),\n                    std::make_tuple(\"identity, *;q=0\", false, 0, 0, 0, 1),\n                    std::make_tuple(\"identity;q=1\", false, 0, 0, 0, 1),\n                    std::make_tuple(\"identity;q=0\", false, 0, 0, 1, 0),\n                    std::make_tuple(\"identity;Q=0\", false, 0, 0, 1, 0),\n                    std::make_tuple(\"identity;q=0.5, *;q=0\", false, 0, 0, 0, 1),\n                    std::make_tuple(\"identity;q=0, *;q=0\", false, 0, 0, 1, 0),\n                    std::make_tuple(\"xyz;q=1, br;q=0.2, *;q=0\", false, 0, 0, 1, 0),\n                    std::make_tuple(\"xyz;q=1, br;q=0.2\", false, 0, 0, 1, 0),\n                    std::make_tuple(\"\", false, 0, 0, 1, 0),\n                    std::make_tuple(\"test;q=invalid\", false, 0, 0, 1, 0)));\n\nTEST_P(IsAcceptEncodingAllowedTest, Validate) {\n  const std::string& accept_encoding = std::get<0>(GetParam());\n  const bool is_compression_expected = std::get<1>(GetParam());\n  const int compressor_used = std::get<2>(GetParam());\n  const int wildcard = std::get<3>(GetParam());\n  const int not_valid = std::get<4>(GetParam());\n  const int identity = std::get<5>(GetParam());\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", accept_encoding}});\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  doResponse(headers, is_compression_expected, false);\n  EXPECT_EQ(compressor_used, stats_.counter(\"test.test.header_compressor_used\").value());\n  EXPECT_EQ(wildcard, stats_.counter(\"test.test.header_wildcard\").value());\n  EXPECT_EQ(not_valid, stats_.counter(\"test.test.header_not_valid\").value());\n  EXPECT_EQ(identity, stats_.counter(\"test.test.header_identity\").value());\n  // Even if compression is disallowed by a client we must let her know the resource is\n  // compressible.\n  EXPECT_EQ(\"Accept-Encoding\", headers.get_(\"vary\"));\n}\n\nclass IsContentTypeAllowedTest\n    : public CompressorFilterTest,\n      public testing::WithParamInterface<std::tuple<std::string, bool, bool>> {};\n\nINSTANTIATE_TEST_SUITE_P(\n    IsContentTypeAllowedTestSuite, IsContentTypeAllowedTest,\n    testing::Values(\n        std::make_tuple(\"text/html\", true, false), std::make_tuple(\"text/xml\", true, false),\n        std::make_tuple(\"text/plain\", true, false), std::make_tuple(\"text/css\", true, false),\n        std::make_tuple(\"application/javascript\", true, false),\n        std::make_tuple(\"application/x-javascript\", true, false),\n        std::make_tuple(\"text/javascript\", true, false),\n        std::make_tuple(\"text/x-javascript\", true, false),\n        std::make_tuple(\"text/ecmascript\", true, false), std::make_tuple(\"text/js\", true, false),\n        std::make_tuple(\"text/jscript\", true, false), std::make_tuple(\"text/x-js\", true, false),\n        std::make_tuple(\"application/ecmascript\", true, false),\n        std::make_tuple(\"application/x-json\", true, false),\n        std::make_tuple(\"application/xml\", true, false),\n        std::make_tuple(\"application/json\", true, false),\n        std::make_tuple(\"image/svg+xml\", true, false),\n        std::make_tuple(\"application/xhtml+xml\", true, false),\n        std::make_tuple(\"application/json;charset=utf-8\", true, false),\n        std::make_tuple(\"Application/XHTML+XML\", true, false),\n        std::make_tuple(\"\\ttext/html\\t\", true, false), std::make_tuple(\"image/jpeg\", false, false),\n        std::make_tuple(\"xyz/svg+xml\", true, true), std::make_tuple(\"xyz/false\", false, true),\n        std::make_tuple(\"image/jpeg\", false, true),\n        std::make_tuple(\"test/insensitive\", true, true)));\n\nTEST_P(IsContentTypeAllowedTest, Validate) {\n  const std::string& content_type = std::get<0>(GetParam());\n  const bool should_compress = std::get<1>(GetParam());\n  const bool is_custom_config = std::get<2>(GetParam());\n\n  if (is_custom_config) {\n    setUpFilter(R\"EOF(\n      {\n        \"content_type\": [\n          \"text/html\",\n          \"xyz/svg+xml\",\n          \"Test/INSENSITIVE\"\n        ],\n    \"compressor_library\": {\n       \"name\": \"test\",\n       \"typed_config\": {\n         \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n       }\n    }\n      }\n    )EOF\");\n  }\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test, deflate\"}});\n  Http::TestResponseHeaderMapImpl headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"content-type\", content_type}};\n  doResponse(headers, should_compress, false);\n  EXPECT_EQ(should_compress ? 0 : 1, stats_.counter(\"test.test.header_not_valid\").value());\n  EXPECT_EQ(should_compress, headers.has(\"vary\"));\n}\n\nclass CompressWithEtagTest\n    : public CompressorFilterTest,\n      public testing::WithParamInterface<std::tuple<std::string, std::string, bool>> {};\n\nINSTANTIATE_TEST_SUITE_P(\n    CompressWithEtagSuite, CompressWithEtagTest,\n    testing::Values(std::make_tuple(\"etag\", R\"EOF(W/\"686897696a7c876b7e\")EOF\", true),\n                    std::make_tuple(\"etag\", R\"EOF(w/\"686897696a7c876b7e\")EOF\", true),\n                    std::make_tuple(\"etag\", \"686897696a7c876b7e\", false),\n                    std::make_tuple(\"x-garbage\", \"garbagevalue\", false)));\n\nTEST_P(CompressWithEtagTest, CompressionIsEnabledOnEtag) {\n  const std::string& header_name = std::get<0>(GetParam());\n  const std::string& header_value = std::get<1>(GetParam());\n  const bool is_weak_etag = std::get<2>(GetParam());\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test, deflate\"}});\n  Http::TestResponseHeaderMapImpl headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {header_name, header_value}};\n  doResponseCompression(headers, false);\n  EXPECT_EQ(0, stats_.counter(\"test.test.not_compressed_etag\").value());\n  EXPECT_EQ(\"test\", headers.get_(\"content-encoding\"));\n  if (is_weak_etag) {\n    EXPECT_EQ(header_value, headers.get_(\"etag\"));\n  } else {\n    EXPECT_FALSE(headers.has(\"etag\"));\n  }\n}\n\nTEST_P(CompressWithEtagTest, CompressionIsDisabledOnEtag) {\n  const std::string& header_name = std::get<0>(GetParam());\n  const std::string& header_value = std::get<1>(GetParam());\n\n  setUpFilter(R\"EOF(\n{\n  \"disable_on_etag_header\": true,\n  \"compressor_library\": {\n     \"name\": \"test\",\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\");\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test, deflate\"}});\n  Http::TestResponseHeaderMapImpl headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {header_name, header_value}};\n  if (StringUtil::CaseInsensitiveCompare()(\"etag\", header_name)) {\n    doResponseNoCompression(headers);\n    EXPECT_EQ(1, stats_.counter(\"test.test.not_compressed_etag\").value());\n    EXPECT_FALSE(headers.has(\"vary\"));\n    EXPECT_TRUE(headers.has(\"etag\"));\n  } else {\n    doResponseCompression(headers, false);\n    EXPECT_EQ(0, stats_.counter(\"test.test.not_compressed_etag\").value());\n    EXPECT_EQ(\"test\", headers.get_(\"content-encoding\"));\n    EXPECT_TRUE(headers.has(\"vary\"));\n    EXPECT_FALSE(headers.has(\"etag\"));\n  }\n}\n\nclass HasCacheControlNoTransformTest\n    : public CompressorFilterTest,\n      public testing::WithParamInterface<std::tuple<std::string, bool>> {};\n\nINSTANTIATE_TEST_SUITE_P(HasCacheControlNoTransformTestSuite, HasCacheControlNoTransformTest,\n                         testing::Values(std::make_tuple(\"no-cache\", true),\n                                         std::make_tuple(\"no-transform\", false),\n                                         std::make_tuple(\"No-Transform\", false)));\n\nTEST_P(HasCacheControlNoTransformTest, Validate) {\n  const std::string& cache_control = std::get<0>(GetParam());\n  const bool is_compression_expected = std::get<1>(GetParam());\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test, deflate\"}});\n  Http::TestResponseHeaderMapImpl headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"cache-control\", cache_control}};\n  doResponse(headers, is_compression_expected, false);\n  EXPECT_EQ(is_compression_expected, headers.has(\"vary\"));\n}\n\nclass IsMinimumContentLengthTest\n    : public CompressorFilterTest,\n      public testing::WithParamInterface<std::tuple<std::string, std::string, std::string, bool>> {\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    IsMinimumContentLengthTestSuite, IsMinimumContentLengthTest,\n    testing::Values(std::make_tuple(\"content-length\", \"31\", \"\", true),\n                    std::make_tuple(\"content-length\", \"29\", \"\", false),\n                    std::make_tuple(\"transfer-encoding\", \"chunked\", \"\", true),\n                    std::make_tuple(\"transfer-encoding\", \"Chunked\", \"\", true),\n                    std::make_tuple(\"transfer-encoding\", \"chunked\", \"\\\"content_length\\\": 500,\",\n                                    true),\n                    std::make_tuple(\"content-length\", \"501\", \"\\\"content_length\\\": 500,\", true),\n                    std::make_tuple(\"content-length\", \"499\", \"\\\"content_length\\\": 500,\", false)));\n\nTEST_P(IsMinimumContentLengthTest, Validate) {\n  const std::string& header_name = std::get<0>(GetParam());\n  const std::string& header_value = std::get<1>(GetParam());\n  const std::string& content_length_config = std::get<2>(GetParam());\n  const bool is_compression_expected = std::get<3>(GetParam());\n\n  setUpFilter(fmt::format(R\"EOF(\n{{\n  {}\n  \"compressor_library\": {{\n     \"name\": \"test\",\n     \"typed_config\": {{\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }}\n  }}\n}}\n)EOF\",\n                          content_length_config));\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test, deflate\"}});\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {header_name, header_value}};\n  doResponse(headers, is_compression_expected, false);\n  EXPECT_EQ(is_compression_expected, headers.has(\"vary\"));\n}\n\nclass IsTransferEncodingAllowedTest\n    : public CompressorFilterTest,\n      public testing::WithParamInterface<std::tuple<std::string, std::string, bool>> {};\n\nINSTANTIATE_TEST_SUITE_P(\n    IsTransferEncodingAllowedSuite, IsTransferEncodingAllowedTest,\n    testing::Values(std::make_tuple(\"transfer-encoding\", \"chunked\", true),\n                    std::make_tuple(\"transfer-encoding\", \"Chunked\", true),\n                    std::make_tuple(\"transfer-encoding\", \"deflate\", false),\n                    std::make_tuple(\"transfer-encoding\", \"Deflate\", false),\n                    std::make_tuple(\"transfer-encoding\", \"test\", false),\n                    std::make_tuple(\"transfer-encoding\", \"chunked, test\", false),\n                    std::make_tuple(\"transfer-encoding\", \"test, chunked\", false),\n                    std::make_tuple(\"transfer-encoding\", \"test\\t, chunked\\t\", false),\n                    std::make_tuple(\"x-garbage\", \"no_value\", true)));\n\nTEST_P(IsTransferEncodingAllowedTest, Validate) {\n  const std::string& header_name = std::get<0>(GetParam());\n  const std::string& header_value = std::get<1>(GetParam());\n  const bool is_compression_expected = std::get<2>(GetParam());\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test\"}});\n  Http::TestResponseHeaderMapImpl headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {header_name, header_value}};\n  doResponse(headers, is_compression_expected, false);\n  EXPECT_EQ(\"Accept-Encoding\", headers.get_(\"vary\"));\n}\n\nclass InsertVaryHeaderTest\n    : public CompressorFilterTest,\n      public testing::WithParamInterface<std::tuple<std::string, std::string, std::string>> {};\n\nINSTANTIATE_TEST_SUITE_P(\n    InsertVaryHeaderTestSuite, InsertVaryHeaderTest,\n    testing::Values(std::make_tuple(\"x-garbage\", \"Cookie\", \"Accept-Encoding\"),\n                    std::make_tuple(\"vary\", \"Cookie\", \"Cookie, Accept-Encoding\"),\n                    std::make_tuple(\"vary\", \"accept-encoding\", \"accept-encoding, Accept-Encoding\"),\n                    std::make_tuple(\"vary\", \"Accept-Encoding, Cookie\", \"Accept-Encoding, Cookie\"),\n                    std::make_tuple(\"vary\", \"User-Agent, Cookie\",\n                                    \"User-Agent, Cookie, Accept-Encoding\"),\n                    std::make_tuple(\"vary\", \"Accept-Encoding\", \"Accept-Encoding\")));\n\nTEST_P(InsertVaryHeaderTest, Validate) {\n  const std::string& header_name = std::get<0>(GetParam());\n  const std::string& header_value = std::get<1>(GetParam());\n  const std::string& expected = std::get<2>(GetParam());\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"test\"}});\n  Http::TestResponseHeaderMapImpl headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {header_name, header_value}};\n  doResponseCompression(headers, false);\n  EXPECT_EQ(expected, headers.get_(\"vary\"));\n}\n\nclass MultipleFiltersTest : public testing::Test {\nprotected:\n  void SetUp() override {\n    envoy::extensions::filters::http::compressor::v3::Compressor compressor;\n    TestUtility::loadFromJson(R\"EOF(\n{\n  \"compressor_library\": {\n     \"name\": \"test1\",\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\",\n                              compressor);\n    auto config1 = std::make_shared<TestCompressorFilterConfig>(compressor, \"test1.\", stats1_,\n                                                                runtime_, \"test1\");\n    config1->setExpectedCompressCalls(0);\n    filter1_ = std::make_unique<CompressorFilter>(config1);\n\n    TestUtility::loadFromJson(R\"EOF(\n{\n  \"compressor_library\": {\n     \"name\": \"test2\",\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\",\n                              compressor);\n    auto config2 = std::make_shared<TestCompressorFilterConfig>(compressor, \"test2.\", stats2_,\n                                                                runtime_, \"test2\");\n    config2->setExpectedCompressCalls(0);\n    filter2_ = std::make_unique<CompressorFilter>(config2);\n  }\n\n  NiceMock<Runtime::MockLoader> runtime_;\n  Stats::TestUtil::TestStore stats1_;\n  Stats::TestUtil::TestStore stats2_;\n  std::unique_ptr<CompressorFilter> filter1_;\n  std::unique_ptr<CompressorFilter> filter2_;\n};\n\nTEST_F(MultipleFiltersTest, IndependentFilters) {\n  // The compressor \"test1\" from an independent filter chain should not overshadow \"test2\".\n  // The independence is simulated with different instances of DecoderFilterCallbacks set for\n  // \"test1\" and \"test2\".\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks1;\n  filter1_->setDecoderFilterCallbacks(decoder_callbacks1);\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks2;\n  filter2_->setDecoderFilterCallbacks(decoder_callbacks2);\n\n  Http::TestRequestHeaderMapImpl req_headers{{\":method\", \"get\"},\n                                             {\"accept-encoding\", \"test1;Q=.5,test2;q=0.75\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter1_->decodeHeaders(req_headers, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter2_->decodeHeaders(req_headers, false));\n  Http::TestResponseHeaderMapImpl headers1{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  Http::TestResponseHeaderMapImpl headers2{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter1_->encodeHeaders(headers1, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter2_->encodeHeaders(headers2, false));\n  EXPECT_EQ(0, stats1_.counter(\"test1.test1.header_compressor_overshadowed\").value());\n  EXPECT_EQ(0, stats2_.counter(\"test2.test2.header_compressor_overshadowed\").value());\n  EXPECT_EQ(1, stats1_.counter(\"test1.test1.compressed\").value());\n  EXPECT_EQ(1, stats1_.counter(\"test1.test1.header_compressor_used\").value());\n  EXPECT_EQ(1, stats2_.counter(\"test2.test2.compressed\").value());\n  EXPECT_EQ(1, stats2_.counter(\"test2.test2.header_compressor_used\").value());\n}\n\nTEST_F(MultipleFiltersTest, CacheEncodingDecision) {\n  // Test that encoding decision is cached when used by multiple filters.\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  filter1_->setDecoderFilterCallbacks(decoder_callbacks);\n  filter2_->setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl req_headers{{\":method\", \"get\"},\n                                             {\"accept-encoding\", \"test1;Q=.5,test2;q=0.75\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter1_->decodeHeaders(req_headers, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter2_->decodeHeaders(req_headers, false));\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter1_->encodeHeaders(headers, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter2_->encodeHeaders(headers, false));\n  EXPECT_EQ(1, stats1_.counter(\"test1.test1.header_compressor_overshadowed\").value());\n  EXPECT_EQ(1, stats2_.counter(\"test2.test2.header_compressor_used\").value());\n  // Reset headers as content-length got removed by filter2.\n  headers = {{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter1_->encodeHeaders(headers, false));\n  EXPECT_EQ(2, stats1_.counter(\"test1.test1.header_compressor_overshadowed\").value());\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter2_->encodeHeaders(headers, false));\n  EXPECT_EQ(2, stats2_.counter(\"test2.test2.header_compressor_used\").value());\n}\n\nTEST_F(MultipleFiltersTest, UseFirstRegisteredFilterWhenWildcard) {\n  // Test that first registered filter is used when handling wildcard.\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  filter1_->setDecoderFilterCallbacks(decoder_callbacks);\n  filter2_->setDecoderFilterCallbacks(decoder_callbacks);\n\n  Http::TestRequestHeaderMapImpl req_headers{{\":method\", \"get\"}, {\"accept-encoding\", \"*\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter1_->decodeHeaders(req_headers, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter2_->decodeHeaders(req_headers, false));\n  Http::TestResponseHeaderMapImpl headers1{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  Http::TestResponseHeaderMapImpl headers2{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter1_->encodeHeaders(headers1, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter2_->encodeHeaders(headers2, false));\n  EXPECT_EQ(1, stats1_.counter(\"test1.test1.compressed\").value());\n  EXPECT_EQ(0, stats2_.counter(\"test2.test2.compressed\").value());\n  EXPECT_EQ(1, stats1_.counter(\"test1.test1.header_wildcard\").value());\n  EXPECT_EQ(1, stats2_.counter(\"test2.test2.header_wildcard\").value());\n}\n\nTEST(LegacyTest, GzipStats) {\n  // check if the legacy \"header_gzip\" counter is incremented for gzip compression filter\n  Stats::TestUtil::TestStore stats;\n  NiceMock<Runtime::MockLoader> runtime;\n  envoy::extensions::filters::http::compressor::v3::Compressor compressor;\n  TestUtility::loadFromJson(R\"EOF(\n{\n  \"compressor_library\": {\n     \"typed_config\": {\n       \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\"\n     }\n  }\n}\n)EOF\",\n                            compressor);\n  auto config =\n      std::make_shared<TestCompressorFilterConfig>(compressor, \"test.\", stats, runtime, \"gzip\");\n  config->setExpectedCompressCalls(0);\n  auto gzip_filter = std::make_unique<CompressorFilter>(config);\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  gzip_filter->setDecoderFilterCallbacks(decoder_callbacks);\n  Http::TestRequestHeaderMapImpl req_headers{{\":method\", \"get\"},\n                                             {\"accept-encoding\", \"gzip;q=0.75\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, gzip_filter->decodeHeaders(req_headers, false));\n  Http::TestResponseHeaderMapImpl headers{{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, gzip_filter->encodeHeaders(headers, false));\n  EXPECT_EQ(1, stats.counter(\"test.gzip.header_gzip\").value());\n  EXPECT_EQ(1, stats.counter(\"test.gzip.compressed\").value());\n  // Reset headers as content-length got removed by gzip_filter.\n  headers = {{\":method\", \"get\"}, {\"content-length\", \"256\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, gzip_filter->encodeHeaders(headers, false));\n  EXPECT_EQ(2, stats.counter(\"test.gzip.header_gzip\").value());\n  EXPECT_EQ(2, stats.counter(\"test.gzip.compressed\").value());\n}\n\n} // namespace Compressors\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/empty_http_filter_config.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\n\n/**\n * Config registration for http filters that have empty configuration blocks.\n * The boiler plate instantiation functions (createFilterFactory, createFilterFactoryFromProto,\n * and createEmptyConfigProto) are implemented here. Users of this class have to implement\n * the createFilter function that instantiates the actual filter.\n */\nclass EmptyHttpFilterConfig : public Server::Configuration::NamedHttpFilterConfigFactory {\npublic:\n  virtual Http::FilterFactoryCb createFilter(const std::string& stat_prefix,\n                                             Server::Configuration::FactoryContext& context) PURE;\n\n  Http::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&, const std::string& stat_prefix,\n                               Server::Configuration::FactoryContext& context) override {\n    return createFilter(stat_prefix, context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom filter config proto. This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n\n  std::string configType() override {\n    // Prevent registration of filters by type. This is only allowed in tests.\n    return \"\";\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  EmptyHttpFilterConfig(const std::string& name) : name_(name) {}\n\nprivate:\n  const std::string name_;\n};\n\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//source/extensions:all_extensions.bzl\",\n    \"envoy_all_http_filters\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_proto_library(\n    name = \"filter_fuzz_proto\",\n    srcs = [\"filter_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"http_filter_fuzzer_lib\",\n    hdrs = [\"http_filter_fuzzer.h\"],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//source/common/http:utility_lib\",\n        \"//test/fuzz:common_proto_cc_proto\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"uber_filter_lib\",\n    srcs = [\n        \"uber_filter.cc\",\n        \"uber_per_filter.cc\",\n    ],\n    hdrs = [\"uber_filter.h\"],\n    deps = [\n        \":filter_fuzz_proto_cc_proto\",\n        \":http_filter_fuzzer_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/tracing:http_tracer_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/common:utility_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/proto:bookstore_proto_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/tap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"filter_fuzz_test\",\n    srcs = [\"filter_fuzz_test.cc\"],\n    corpus = \"filter_corpus\",\n    # All Envoy extensions must be linked to the test in order for the fuzzer to pick\n    # these up via the NamedHttpFilterConfigFactory.\n    deps = [\n        \":uber_filter_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/upstreams/http/generic:config\",\n        \"//test/config:utility_lib\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto\",\n    ] + envoy_all_http_filters(),\n)\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/adminnullptr",
    "content": "config {\n  name: \"envoy.filters.http.tap\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\"\n    value: \"\\n\\036\\022\\034\\n\\022\\022\\020\\n\\n\\022\\010\\n\\002B\\000\\n\\002B\\000\\n\\002:\\000\\022\\006\\n\\004\\010\\001\\022\\000\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/buffer1",
    "content": "config {\n       name: \"envoy.buffer\"\ntyped_config {\n  [type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer] {\n    max_request_bytes {\n      value: 1048576\n    }\n  }\n}\n}\ndata {\n  headers {\n    headers {\n    key:\n      \"a\" value : \"b\"\n    }\n  }\n  http_body {\n    data: \"hello\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296",
    "content": "config {\n  name: \"envoy.filters.http.header_to_metadata\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config\"\n    value: \"\\n\\033\\n\\002;;\\032\\023\\022\\001;\\032\\014stanotcci_fi \\t \\001\\n+\\n\\001;\\022\\021\\022\\001;\\032\\014static_confi\\032\\023\\022\\001;\\032\\014static_confi \\t\\022\\031\\n\\002m;\\032\\023\\022\\001;\\032\\014stanotcci_fi \\t\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5728684315770880",
    "content": "config {\n  name: \"envoy.filters.http.adaptive_concurrency\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency\"\n    value: \"\\n\\024\\022\\010\\022\\002\\010\\010\\032\\002\\020\\010\\032\\010\\n\\002\\020\\010\\022\\002\\010\\001\"\n  }\n}\ndata {\n}\nupstream_data {\n  trailers {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5107908850483200.fuzz",
    "content": "config {\n  name: \"envoy.filters.http.header_to_metadata\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config\"\n    value: \"\\n\\036\\n\\001=\\012\\014\\n\\002Ae\\022\\001]\\032\\001{(\\001\\032\\t\\n\\001]\\022\\001]\\032\\001{ \\001\\n&\\n\\001=\\032\\037\\n\\027envoy.filters.http.rbac\\022\\001]\\032\\001{ \\001\\n\\020\\n\\001=\\032\\t\\n\\001]\\022\\001]\\032\\001{ \\001\"\n  }\n}\ndata {\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5115447232692224",
    "content": "config {\n  name: \"envoy.filters.http.grpc_http1_reverse_bridge\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig\"\n    value: \"\\n\\020application/grpc\"\n  }\n}\ndata {\n  headers {\n    headers {\n      key: \"content-type\"\n      value: \"application/grpc\"\n    }\n  }\n  http_body {\n    data: \"\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\nupstream_data {\n  headers {\n    headers {\n      key: \"content-type\"\n      value: \"application/grpc\"\n    }\n  }\n  http_body {\n    data: \"\\000\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz",
    "content": "config {\n  name: \"envoy.squash\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.squash.v3.Squash\"\n    value: \"\\n\\002Ae\\022\\356\\n\\n\\342\\n\\n\\001\\017\\022\\334\\n2\\331\\n\\n\\305\\n2\\302\\n\\n\\0022\\000\\n\\267\\n*\\264\\n\\n\\261\\n\\n\\004o\\177\\177\\177\\022\\250\\n2\\245\\n\\n\\216\\n2\\213\\n\\n\\0022\\000\\n\\200\\n*\\375\\t\\n\\372\\t\\n\\001\\017\\022\\364\\t2\\361\\t\\nE2C\\n\\0022\\000\\n9*7\\n5\\n\\004o\\177\\177\\177\\022-2+\\n\\0252\\023\\n\\0022\\000\\n\\t*\\007\\n\\005\\n\\001@\\022\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\230\\t2\\225\\t\\n\\375\\0102\\372\\010\\n\\357\\0102\\354\\010\\n\\365\\0072\\362\\007\\n\\0022\\000\\n\\347\\007*\\344\\007\\n\\341\\007\\n\\004o\\177\\177\\177\\022\\330\\0072\\325\\007\\n\\276\\0072\\273\\007\\n\\0022\\000\\n\\260\\007*\\255\\007\\n\\252\\007\\n\\001\\017\\022\\244\\0072\\241\\007\\nE2C\\n\\0022\\000\\n9*7\\n5\\n\\004o\\177\\177\\177\\022-2+\\n\\0252\\023\\n\\0022\\000\\n\\t*\\007\\n\\005\\n\\001@\\022\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\310\\0062\\305\\006\\n\\255\\0062\\252\\006\\n\\237\\0062\\234\\006\\n\\0142\\n\\n\\000\\n\\0022\\000\\n\\002*\\000\\n\\374\\0052\\371\\005\\n\\366\\0052\\363\\005\\n\\337\\0052\\334\\005\\n\\0022\\000\\n\\321\\005*\\316\\005\\n\\313\\005\\n\\004o\\177\\177\\177\\022\\302\\0052\\277\\005\\n\\0022\\000\\n\\0302\\026\\n\\0022\\000\\n\\014*\\n\\n\\010\\n\\004o\\177\\177\\177\\022\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\231\\005*\\226\\005\\n\\223\\005\\n\\004o\\177\\177\\177\\022\\212\\0052\\207\\005\\n\\360\\0042\\355\\004\\n\\0022\\000\\n\\342\\004*\\337\\004\\n\\334\\004\\n\\001\\017\\022\\326\\0042\\323\\004\\nE2C\\n\\0022\\000\\n9*7\\n5\\n\\004o\\177\\177\\177\\022-2+\\n\\0252\\023\\n\\0022\\000\\n\\t*\\007\\n\\005\\n\\001@\\022\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\372\\0032\\367\\003\\n\\337\\0032\\334\\003\\n\\321\\0032\\316\\003\\n\\327\\0022\\324\\002\\n\\0022\\000\\n\\311\\002*\\306\\002\\n\\303\\002\\n\\004o\\177\\177\\177\\021\\272\\0022\\267\\002\\n\\240\\0022\\235\\002\\n\\0022\\000\\n\\222\\002*\\217\\002\\n\\214\\002\\n\\001\\017\\022\\206\\0022\\203\\002\\nE2C\\n\\0022\\000\\n9*7\\n5\\n\\004o\\177\\177\\177\\022-2+\\n\\0252\\023\\n\\0022\\000\\n\\t*\\007\\n\\005\\n\\001@\\022\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\252\\0012\\247\\001\\n\\217\\0012\\214\\001\\n\\201\\0012\\177\\n\\0142\\n\\n\\000\\n\\0022\\000\\n\\002*\\000\\n`2^\\n\\\\2Z\\nG2E\\n\\0022\\000\\n;*9\\n7\\n\\004o\\177\\177\\177\\022/2-\\n\\0022\\000\\n\\0302\\026\\n\\0022\\000\\n\\014*\\n\\n\\010\\n\\004o\\177\\177\\177\\022\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\010*\\006\\n\\004\\n\\000\\022\\000\\n\\002*\\000\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\0022\\000\\n\\002*\\000\\n\\0042\\002\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\nc2a\\n_2]\\nJ2H\\n\\0022\\000\\n>*<\\n:\\n\\004o\\177\\177\\177\\022220\\n\\0022\\000\\n!2\\037\\n\\0022\\000\\n\\014*\\n\\n\\010\\n\\004o\\177\\177\\177\\022\\000\\n\\013*\\t\\n\\007\\n\\001\\001\\022\\002\\010\\000\\n\\003\\032\\001#\\n\\002*\\000\\n\\002*\\000\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\0022\\000\\n\\002*\\000\\n\\0042\\002\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\0022\\000\\n\\002*\\000\\n\\0042\\002\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\nc2a\\n_2]\\nJ2H\\n\\0022\\000\\n>*<\\n:\\n\\004o\\177\\177\\177\\022220\\n\\0022\\000\\n!2\\037\\n\\0022\\000\\n\\014*\\n\\n\\010\\n\\004o\\177\\177\\177\\022\\000\\n\\013*\\t\\n\\007\\n\\001\\001\\022\\002\\010\\000\\n\\003\\032\\001#\\n\\002*\\000\\n\\002*\\000\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\0022\\000\\n\\002*\\000\\n\\0042\\002\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\003\\032\\001#\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\002*\\000\\n\\000\\n\\t\\021\\010\\000\\000\\000\\000\\000\\0002\\n\\002*\\000\\n\\007\\n\\001\\001\\022\\002\\010\\000*\\007\\010 \\020\\261\\300\\334\\001\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296",
    "content": "config {\n  name: \"envoy.ext_authz\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\"\n    value: \"\\020\\001\\032\\356\\001\\n\\317\\001\\n\\177\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\035\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\022Gtype.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\\032\\003\\020\\200`\\022\\032envoy.ext_aeny.ext_aututhz\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5167332043522048",
    "content": "config {\n  name: \"envoy.gzip\"\n}\ndata {\n  headers {\n    headers {\n      value: \"\\002\\000\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880",
    "content": "config {\n  name: \"envoy.filters.http.tap\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\"\n    value: \"\\nZ\\022X\\n\\010\\032\\006\\032\\004\\032\\002 \\001\\022L\\nH\\\"F\\n)envoy.service.health.v3.HealthCheckReques\\022\\031\\022\\027\\n\\010BB\\017\\000\\000\\000\\000\\000\\\"\\001R*\\010P\\000\\000\\000\\000\\000\\000\\000 \\001\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5661692476522496",
    "content": "config {\n  name: \"envoy.ext_authz\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\"\n    value: \" \\001\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5710239968264192",
    "content": "config {\n  name: \"envoy.rate_limit\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit\"\n    value: \"\\n\\020envoy.rate_limit\\032>type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC:\\007\\022\\005\\n\\003\\n\\0012\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664",
    "content": "config {\n  name: \"envoy.filters.http.adaptive_concurrency\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency\"\n    value: \"\\n\\016\\022\\005\\032\\003\\010\\200\\001\\032\\005\\n\\003\\010\\200\\001\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz",
    "content": "config {\n  name: \"envoy.router\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\"\n    value: \"*\\023x-envoy-max-retries\"\n  }\n}\ndata {\n  headers {\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"?\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"fff\\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"fff\\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n      value: \"fff\\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n  }\n  trailers {\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"?\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"&&&&&&&&&&&\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"x-envoy-max-retries\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"fff\\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"fff\\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"?\"\n    }\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568",
    "content": "config {\n  name: \"envoy.filters.http.tap\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\"\n    value: \"\\n\\002\\022\\000\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320",
    "content": "config {\n  name: \"envoy.filters.http.tap\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\"\n    value: \"\\no\\022m\\nb\\n`\\nD\\032B\\n@\\n\\030\\032\\026\\n\\024\\n\\n\\032\\010\\032\\006J\\004\\022\\002\\n\\000\\n\\006\\032\\004\\032\\002*\\000\\n$\\n\\\"\\n\\002 \\001\\n\\034\\032\\032\\032\\030\\n\\026\\n\\002 \\001\\n\\020\\032\\016\\032\\014\\n\\n\\n\\002 \\001\\n\\004\\032\\002B\\000\\n\\030\\n\\026\\n\\002 \\001\\n\\020\\032\\016\\032\\014\\n\\n\\n\\002 \\001\\n\\004\\032\\002B\\000\\022\\007\\n\\005\\032\\003\\n\\001(\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5762605081952256",
    "content": "config {\n  name: \"envoy.filters.http.tap\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\"\n    value: \"\\n,\\n*\\n(envoy.filters.http.dynamic_forward_proxy\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5969746626609152",
    "content": "config {\n  name: \"envoy.filters.http.dynamic_forward_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig\"\n    value: \"\\n\\t\\n\\002Ae\\032\\003\\020\\200N\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144",
    "content": "config {\n  name: \"envoy.filters.http.admission_control\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl\"\n    value: \"\\022\\000\"\n  }\n}\nupstream_data {\n  trailers {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6246534715539456",
    "content": "config {\n  name: \"envoy.filters.http.rbac\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC\"\n    value: \"\\022\\002\\010\\004\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6506457133219840",
    "content": "config {\n  name: \"envoy.filters.http.tap\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\"\n    value: \"\\n \\022\\036\\n\\002 \\001\\022\\020\\n\\007\\010\\001\\032\\003\\n\\001(\\032\\003\\010\\200` \\001\\\"\\006R\\004\\022\\002\\n\\000\"\n  }\n}\ndata {\n  headers {\n    headers {\n      key: \"\\036\"\n      value: \"\\036\"\n    }\n  }\n  trailers {\n    headers {\n      key: \"\\036\"\n      value: \"\\036\"\n    }\n  }\n  proto_body {\n    message {\n      type_url: \"type.googleapis.com/bookstore.CreateShelfRequest\"\n    }\n    chunk_size: 2\n  }\n}\nupstream_data {\n  proto_body {\n    message {\n      type_url: \"type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC\"\n      value: \"\\022\\277\\001\\0229\\n\\000\\0225\\022\\013B\\t*\\007\\022\\005\\010\\200\\200\\200\\020\\032&\\020\\377\\377\\377\\377\\277\\240\\224\\244U:\\032\\n\\t\\020\\242\\200\\200\\200\\203\\206\\2277\\n\\r:\\013\\n\\t\\020\\242\\200\\200\\200\\203\\206\\2277\\022\\022\\n\\0108Y;\\003\\000\\000\\000\\000\\022\\006\\022\\002\\n\\000\\\"\\000\\022[\\nW///////////////////////////////////////////////////////////////////////////////////////\\022\\000\\022\\021\\n\\004size\\022\\t\\n\\000\\022\\005Z\\003\\n\\001m\"\n    }\n    chunk_size: 2\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3",
    "content": "config {\n  name: \"envoy.ext_authz\"\n}\ndata {\n  headers {\n    headers {\n      value: \"\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\360\\240\\240\\240\\314\\255\"\n    }\n  }\n  http_body {\n    data: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n    data: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n  trailers {\n    headers {\n      key: \"6\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/crash-7137be4f227ac0faa82d76aa9b4c32a68e4c15f9",
    "content": "config {\n  name: \"envoy.router\"\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/crash-803e5cd629426a361101632f37b4651ef595c92e",
    "content": "config {\n  name: \"envoy.router\"\n  typed_config {\n    type_url: \"envoy.extensions.filters.http.router.v3.Router\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/crash-a45927a3f6e2efcbdb8ba12a1816895b219a09d2",
    "content": "config {\n  name: \"envoy.filters.http.dynamic_forward_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig\"\n    value: \"\\nq\\no\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b",
    "content": "config {\n  name: \"envoy.grpc_json_transcoder\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder\"\n    value: \"\\n\\001%8\\001\"\n  }\n}\ndata {\n  http_body {\n    data: \"\\001\\000\\000\\t\"\n  }\n  trailers {\n    headers {\n      key: \"0\"\n      value: \"||||||||||||||||||||||||||||||||||||||||\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/crash-da39a3ee5e6b4b0d3255bfef95601890afd80709",
    "content": ""
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/crash-ee8851a25304e8515905d09019afe8798b2376ac",
    "content": "config {\n  name: \"envoy.filters.http.header_to_metadata\"\n  typed_config {\n    type_url: \"\\000u\"\n  }\n}\ndata {\n  trailers {\n    headers {\n      key: \"\\177\\177\\177z\"\n    }\n    headers {\n      key: \"M\"\n      value: \"\\000u\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats",
    "content": "config {\n  name: \"envoy.filters.http.grpc_stats\"\n  typed_config: {}\n}\ndata {\n  headers {\n    headers {\n      key: \":method\"\n      value: \"POST\"\n    }\n    headers {\n      key: \":path\"\n      value: \"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\"\n    }\n    headers {\n      key: \"content-type\"\n      value: \"application/grpc\"\n    }\n  }\n}\nupstream_data {\n  headers {\n    headers {\n      key: \":status\"\n      value: \"200\"\n    }\n    headers {\n      key: \"content-type\"\n      value: \"application/grpc\"\n    }\n  }\n  proto_body {\n    message {\n      [type.googleapis.com/bookstore.Book] {\n        id: 16\n        title: \"Hardy Boys\"\n      }\n    }\n    chunk_size: 4\n  }\n  trailers {\n    headers {\n      key: \"grpc-status\"\n      value: \"0\"\n    }\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode",
    "content": "config {\n  name: \"envoy.filters.http.grpc_json_transcoder\"\n  typed_config: {}\n}\ndata {\n  headers {\n    headers {\n      key: \"content-type\"\n      value: \"application/json\"\n    }\n    headers {\n      key: \":method\"\n      value: \"POST\"\n    }\n    headers {\n      key: \":path\"\n      value: \"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\"\n    }\n  }\n  http_body {\n    data: \"{\\\"theme\\\": \\\"Children\\\"}\"\n  }\n}\nupstream_data {\n  headers {\n    headers {\n      key: \":status\"\n      value: \"200\"\n    }\n    headers {\n      key: \"content-type\"\n      value: \"application/grpc\"\n    }\n  }\n  proto_body {\n    message {\n      [type.googleapis.com/bookstore.Book] {\n        id: 16\n        title: \"Hardy Boys\"\n      }\n    }\n    chunk_size: 100\n  }\n  trailers {\n    headers {\n      key: \"grpc-status\"\n      value: \"0\"\n    }\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data",
    "content": "config {\n  name: \"envoy.filters.http.grpc_json_transcoder\"\n  typed_config: {}\n}\n\ndata {\n  headers {\n    headers {\n      key: \"content-type\"\n      value: \"application/json\"\n    }\n    headers {\n      key: \":method\"\n      value: \"POST\"\n    }\n    headers {\n      key: \":path\"\n      value: \"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\"\n    }\n  }\n  http_body {\n    data: \"{\\\"theme\\\": \\\"Children\\\"}\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data",
    "content": "config {\n  name: \"envoy.filters.http.grpc_json_transcoder\"\n  typed_config: {}\n}\n\ndata {\n  headers {\n    headers {\n      key: \":method\"\n      value: \"POST\"\n    }\n    headers {\n      key: \":path\"\n      value: \"/bookstore.Bookstore/CreateShelf\"\n    }\n    headers {\n      key: \"content-type\"\n      value: \"application/grpc\"\n    }\n  }\n  proto_body {\n    message {\n      [type.googleapis.com/bookstore.CreateShelfRequest] {\n        shelf: {\n          id: 32\n          theme: \"Children\"\n        }\n      }\n    }\n    chunk_size: 3\n  }\n  trailers {\n    headers {\n      key: \"grpc-status\"\n      value: \"0\"\n    }\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect",
    "content": "config {\n  name: \"envoy.filters.http.jwt_authn\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\"\n    value: \"\\022\\004\\n\\002b\\000\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached",
    "content": "config {\n  name: \"envoy.router\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\"\n    value: \"\\020\\001\\032\\200\\005\\n\\0012\\022\\372\\004:\\367\\004\\022\\207\\004:\\204\\004\\022\\255\\001:\\252\\001\\022\\004\\n\\002\\n\\000\\022[2Y\\n\\006\\n\\004\\n\\002\\010\\001\\nK:I\\022927\\n\\002R\\000\\n12/\\n):\\'\\022\\tB\\007\\n\\005\\n\\001)@\\001\\022\\0222\\020\\n\\006\\n\\004\\n\\002\\010\\002\\n\\002\\032\\000\\n\\002b\\000\\022\\006\\n\\004\\n\\002\\010\\001\\n\\002\\032\\000\\022\\006\\n\\004\\n\\002\\010\\001\\022\\004\\n\\002\\n\\000\\n\\002\\032\\000\\022\\006\\n\\004\\n\\002\\010\\001\\022725\\n\\004\\n\\002\\n\\000\\n\\002\\032\\000\\n)2\\'\\n!:\\037\\022\\tB\\007\\n\\005\\n\\001)@\\001\\022\\n2\\010\\n\\002\\032\\000\\n\\002\\\"\\000\\022\\006\\n\\004\\n\\002\\010\\001\\n\\002\\032\\000\\022\\004\\n\\002\\n\\000\\022\\004\\n\\002\\n\\000\\022\\263\\0022\\260\\002\\n\\002R\\000\\n\\245\\0022\\242\\002\\n\\206\\0022\\203\\002\\nw2u\\nj:h\\022\\004\\n\\002\\n\\000\\022@2>\\n8:6\\022&2$\\n\\002R\\000\\n\\0362\\034\\n\\026:\\024\\022\\n2\\010\\n\\002\\032\\000\\n\\002\\\"\\000\\022\\006\\n\\004\\n\\002\\010\\001\\n\\002\\032\\000\\022\\006\\n\\004\\n\\002\\010\\001\\022\\004\\n\\002\\n\\000\\n\\002\\032\\000\\022\\006\\n\\004\\n\\002\\010\\001\\022\\0202\\016\\n\\004\\n\\002\\n\\000\\n\\002\\032\\000\\n\\002\\\"\\000\\022\\004\\n\\002\\n\\000\\n\\007R\\005\\n\\001\\002\\020\\001\\n.:,\\022\\0342\\032\\n\\005R\\003\\n\\001\\004\\n\\r2\\013\\n\\005R\\003\\n\\001\\002\\n\\002\\032\\000\\n\\002\\032\\000\\022\\006\\n\\004\\n\\002\\010\\001\\022\\004\\n\\002\\n\\000\\nR:P\\022<2:\\n\\006\\n\\004\\n\\002\\010\\001\\n,:*\\022\\0342\\032\\n\\005R\\003\\n\\001\\004\\n\\r2\\013\\n\\005R\\003\\n\\001\\002\\n\\002\\032\\000\\n\\002\\032\\000\\022\\004\\n\\002\\n\\000\\022\\004\\n\\002\\n\\000\\n\\002\\032\\000\\022\\006\\n\\004\\n\\002\\010\\001\\022\\002\\\"\\000\\022\\004\\n\\002\\n\\000\\n\\004R\\002\\020\\001\\n\\027:\\025\\022\\r2\\013\\n\\005R\\003\\n\\001\\004\\n\\002\\032\\000\\022\\004\\n\\002\\n\\000\\n\\002\\032\\000\\022\\006\\n\\004\\n\\002\\010\\001\\022\\006\\n\\004\\n\\002\\010\\001\\022\\006\\n\\004\\n\\002\\010\\002\\022\\002\\032\\000\\022J*H\\nD\\n\\000\\032@\\022>2<\\n:28\\n6:4\\022220\\n.2,\\n*2(\\n&:$\\022\\\"2 \\n\\000\\n\\034:\\032\\022\\0302\\026\\n\\n2\\010\\n\\006:\\004\\022\\0022\\000\\n\\010:\\006\\022\\0042\\002\\n\\000\\030\\001\\022\\033:\\031\\022\\002J\\000\\022\\013\\n\\t\\n\\007\\010\\001\\022\\003\\032\\001/\\022\\006\\n\\004\\n\\002\\010\\002\\032(\\n\\016\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\022\\0262\\024\\n\\002R\\000\\n\\n2\\010\\n\\002R\\000\\n\\002\\032\\000\\n\\002\\032\\0000\\001\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap",
    "content": "config {\n  name: \"envoy.filters.http.tap\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\"\n    value: \"\\ns\\n\\000\\032o\\nf\\nd\\nb\\032`\\022^\\n$\\022\\\"\\n\\034\\022\\032\\n\\n\\032\\010\\032\\006\\n\\004\\n\\002\\032\\000\\n\\006\\022\\004\\n\\002 \\001\\n\\004\\032\\002 \\001\\n\\002*\\000\\n2\\n0\\n.\\032,\\022*\\n\\\"\\022 \\n\\032\\022\\030\\n\\n\\032\\010\\032\\006\\n\\004\\n\\002\\032\\000\\n\\004\\022\\002\\n\\000\\n\\004\\032\\002 \\001\\n\\002 \\001\\n\\000\\n\\002 \\001\\n\\002 \\001\\022\\005\\n\\000\\022s\\006\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/oom-da39a3ee5e6b4b0d3255bfef95601890afd80709",
    "content": ""
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering",
    "content": "config {\n  name: \"envoy.router\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\"\n    value: \"*\\023x-envoy-max-retries\"\n  }\n}\ndata {\n  headers {\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"?\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"&&&&&&&&&&&\"\n    }\n    headers {\n      key: \"fff\\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n      value: \"&&&&&&&&&&&\"\n    }\n    headers {\n      key: \"=\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n  }\n  trailers {\n    headers {\n      key: \"x-envoy-max!-retries\"\n      value: \"&\"\n    }\n    headers {\n      key: \"type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC\"\n      value: \"&&&&&&&&&&&\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n    }\n    headers {\n      key: \"x-env-max-retries\"\n      value: \"fff\\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"fff\\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmfffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffff\"\n    }\n    headers {\n      key: \"x-envoy-m_x-retries\"\n      value: \"x-envoy-max-retries\"\n    }\n    headers {\n      key: \"x-envoy-max-retries\"\n      value: \"?\"\n    }\n  }\n  proto_body {\n    message {\n      type_url: \"type.googleapis.com/google.protobuf.Empty\"\n    }\n    chunk_size: 32\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_corpus/valid_jwt",
    "content": "config {\n  name: \"envoy.filters.http.jwt_authn\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\"\n    value: \"\\n#\\n\\000\\022\\037\\n\\001h\\032\\000B\\025\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000J\\001h\\022\\205\\001\\n\\177\\022}TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\022\\002\\022\\000\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.http;\n\nimport \"test/fuzz/common.proto\";\nimport \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto\";\n\nmessage FilterFuzzTestCase {\n  envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter config = 1;\n\n  // Downstream data (named for backwards compatibility).\n  test.fuzz.HttpData data = 2;\n\n  // Upstream data.\n  test.fuzz.HttpData upstream_data = 3;\n}\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc",
    "content": "#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/extensions/filters/http/common/fuzz/filter_fuzz.pb.validate.h\"\n#include \"test/extensions/filters/http/common/fuzz/uber_filter.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\n\nDEFINE_PROTO_FUZZER(const test::extensions::filters::http::FilterFuzzTestCase& input) {\n  ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = {\n      [](test::extensions::filters::http::FilterFuzzTestCase* input, unsigned int seed) {\n        // This ensures that the mutated configs all have valid filter names and type_urls. The list\n        // of names and type_urls is pulled from the NamedHttpFilterConfigFactory. All Envoy\n        // extensions are built with this test (see BUILD file). This post-processor mutation is\n        // applied only when libprotobuf-mutator calls mutate on an input, and *not* during fuzz\n        // target execution. Replaying a corpus through the fuzzer will not be affected by the\n        // post-processor mutation.\n        static const std::vector<absl::string_view> filter_names = Registry::FactoryRegistry<\n            Server::Configuration::NamedHttpFilterConfigFactory>::registeredNames();\n        static const auto factories = Registry::FactoryRegistry<\n            Server::Configuration::NamedHttpFilterConfigFactory>::factories();\n        // Choose a valid filter name.\n        if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) ==\n            std::end(filter_names)) {\n          absl::string_view filter_name = filter_names[seed % filter_names.size()];\n          input->mutable_config()->set_name(std::string(filter_name));\n        }\n        // Set the corresponding type_url for Any.\n        auto& factory = factories.at(input->config().name());\n        input->mutable_config()->mutable_typed_config()->set_type_url(\n            absl::StrCat(\"type.googleapis.com/\",\n                         factory->createEmptyConfigProto()->GetDescriptor()->full_name()));\n\n        // For fuzzing proto data, guide the mutator to useful 'Any' types half\n        // the time. The other half the time, let the fuzzing engine choose\n        // any message to serialize.\n        if (seed % 2 == 0 && input->data().has_proto_body()) {\n          UberFilterFuzzer::guideAnyProtoType(input->mutable_data(), seed / 2);\n        }\n      }};\n\n  try {\n    // Catch invalid header characters.\n    TestUtility::validate(input);\n    // Fuzz filter.\n    static UberFilterFuzzer fuzzer;\n    fuzzer.fuzz(input.config(), input.data(), input.upstream_data());\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n  }\n}\n\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h",
    "content": "#pragma once\n\n#include \"envoy/http/filter.h\"\n\n#include \"common/http/utility.h\"\n\n#include \"test/fuzz/common.pb.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\n\n// Generic library to fuzz HTTP filters.\n// Usage:\n//   1. Create filter and set callbacks.\n//          ExampleFilter filter;\n//          filter.setDecoderFilterCallbacks(decoder_callbacks);\n//\n//   2. Create HttpFilterFuzzer class and run decode methods. Optionally add access logging. Reset\n//   fuzzer to reset state. This class can be static. All state is reset in the reset method.\n//          Envoy::Extensions::HttpFilters::HttpFilterFuzzer fuzzer;\n//          fuzzer.runData(static_cast<Envoy::Http::StreamDecoderFilter*>(&filter),\n//                         input.downstream_request());\n//          fuzzer.accessLog(static_cast<Envoy::AccessLog::Instance*>(&filter),\n//                            stream_info);\n//          fuzzer.reset();\n\nclass HttpFilterFuzzer {\npublic:\n  // Instantiate HttpFilterFuzzer\n  HttpFilterFuzzer() = default;\n\n  // This executes the filter decode or encode methods with the fuzzed data.\n  template <class FilterType> void runData(FilterType* filter, const test::fuzz::HttpData& data);\n\n  // This executes the access logger with the fuzzed headers/trailers.\n  void accessLog(AccessLog::Instance* access_logger, const StreamInfo::StreamInfo& stream_info) {\n    ENVOY_LOG_MISC(debug, \"Access logging\");\n    access_logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info);\n  }\n\n  // Fuzzed headers and trailers are needed for access logging, reset the data and destroy filters.\n  void reset() {\n    enabled_ = true;\n    request_headers_.clear();\n    response_headers_.clear();\n    request_trailers_.clear();\n    response_trailers_.clear();\n    encoded_trailers_.clear();\n  }\n\nprotected:\n  // Templated functions to validate and send headers/data/trailers for decoders/encoders.\n  // General functions are deleted, but templated specializations for encoders/decoders are defined\n  // in the cc file.\n  template <class FilterType>\n  Http::FilterHeadersStatus sendHeaders(FilterType* filter, const test::fuzz::HttpData& data,\n                                        bool end_stream) = delete;\n\n  template <class FilterType>\n  Http::FilterDataStatus sendData(FilterType* filter, Buffer::Instance& buffer,\n                                  bool end_stream) = delete;\n\n  template <class FilterType>\n  void sendTrailers(FilterType* filter, const test::fuzz::HttpData& data) = delete;\n\n  // This keeps track of when a filter will stop decoding due to direct responses.\n  // If your filter needs to stop decoding because of a direct response, make sure you override\n  // sendLocalReply to set enabled_ to false.\n  bool enabled_ = true;\n\n  // Headers/trailers need to be saved for the lifetime of the filter,\n  // so save them as member variables.\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  Http::TestResponseTrailerMapImpl encoded_trailers_;\n};\n\ntemplate <class FilterType>\nvoid HttpFilterFuzzer::runData(FilterType* filter, const test::fuzz::HttpData& data) {\n  bool end_stream = false;\n  enabled_ = true;\n  if (data.body_case() == test::fuzz::HttpData::BODY_NOT_SET && !data.has_trailers()) {\n    end_stream = true;\n  }\n  const auto& headersStatus = sendHeaders(filter, data, end_stream);\n  ENVOY_LOG_MISC(debug, \"Finished with FilterHeadersStatus: {}\", headersStatus);\n  if ((headersStatus != Http::FilterHeadersStatus::Continue &&\n       headersStatus != Http::FilterHeadersStatus::StopIteration) ||\n      !enabled_) {\n    return;\n  }\n\n  const std::vector<std::string> data_chunks = Fuzz::parseHttpData(data);\n  for (size_t i = 0; i < data_chunks.size(); i++) {\n    if (!data.has_trailers() && i == data_chunks.size() - 1) {\n      end_stream = true;\n    }\n    Buffer::OwnedImpl buffer(data_chunks[i]);\n    const auto& dataStatus = sendData(filter, buffer, end_stream);\n    ENVOY_LOG_MISC(debug, \"Finished with FilterDataStatus: {}\", dataStatus);\n    if (dataStatus != Http::FilterDataStatus::Continue || !enabled_) {\n      return;\n    }\n  }\n\n  if (data.has_trailers() && enabled_) {\n    sendTrailers(filter, data);\n  }\n}\n\ntemplate <>\ninline Http::FilterHeadersStatus HttpFilterFuzzer::sendHeaders(Http::StreamDecoderFilter* filter,\n                                                               const test::fuzz::HttpData& data,\n                                                               bool end_stream) {\n  request_headers_ = Fuzz::fromHeaders<Http::TestRequestHeaderMapImpl>(data.headers());\n  if (request_headers_.Path() == nullptr) {\n    request_headers_.setPath(\"/foo\");\n  }\n  if (request_headers_.Method() == nullptr) {\n    request_headers_.setMethod(\"GET\");\n  }\n  if (request_headers_.Host() == nullptr) {\n    request_headers_.setHost(\"foo.com\");\n  }\n\n  ENVOY_LOG_MISC(debug, \"Decoding headers (end_stream={}):\\n{} \", end_stream, request_headers_);\n  Http::FilterHeadersStatus status = filter->decodeHeaders(request_headers_, end_stream);\n  if (end_stream) {\n    filter->decodeComplete();\n  }\n  return status;\n}\n\ntemplate <>\ninline Http::FilterHeadersStatus HttpFilterFuzzer::sendHeaders(Http::StreamEncoderFilter* filter,\n                                                               const test::fuzz::HttpData& data,\n                                                               bool end_stream) {\n  response_headers_ = Fuzz::fromHeaders<Http::TestResponseHeaderMapImpl>(data.headers());\n\n  // Status must be a valid unsigned long. If not set, the utility function below will throw\n  // an exception on the data path of some filters. This should never happen in production, so catch\n  // the exception and set to a default value.\n  try {\n    (void)Http::Utility::getResponseStatus(response_headers_);\n  } catch (const Http::CodecClientException& e) {\n    response_headers_.setStatus(200);\n  }\n\n  ENVOY_LOG_MISC(debug, \"Encoding headers (end_stream={}):\\n{} \", end_stream, response_headers_);\n  Http::FilterHeadersStatus status = filter->encodeHeaders(response_headers_, end_stream);\n  if (end_stream) {\n    filter->encodeComplete();\n  }\n  return status;\n}\n\ntemplate <>\ninline Http::FilterDataStatus HttpFilterFuzzer::sendData(Http::StreamDecoderFilter* filter,\n                                                         Buffer::Instance& buffer,\n                                                         bool end_stream) {\n  ENVOY_LOG_MISC(debug, \"Decoding data (end_stream={}): {} \", end_stream, buffer.toString());\n  Http::FilterDataStatus status = filter->decodeData(buffer, end_stream);\n  if (end_stream) {\n    filter->decodeComplete();\n  }\n  return status;\n}\n\ntemplate <>\ninline Http::FilterDataStatus HttpFilterFuzzer::sendData(Http::StreamEncoderFilter* filter,\n                                                         Buffer::Instance& buffer,\n                                                         bool end_stream) {\n  ENVOY_LOG_MISC(debug, \"Encoding data (end_stream={}): {} \", end_stream, buffer.toString());\n  Http::FilterDataStatus status = filter->encodeData(buffer, end_stream);\n  if (end_stream) {\n    filter->encodeComplete();\n  }\n  return status;\n}\n\ntemplate <>\ninline void HttpFilterFuzzer::sendTrailers(Http::StreamDecoderFilter* filter,\n                                           const test::fuzz::HttpData& data) {\n  request_trailers_ = Fuzz::fromHeaders<Http::TestRequestTrailerMapImpl>(data.trailers());\n  ENVOY_LOG_MISC(debug, \"Decoding trailers:\\n{} \", request_trailers_);\n  filter->decodeTrailers(request_trailers_);\n  filter->decodeComplete();\n}\n\ntemplate <>\ninline void HttpFilterFuzzer::sendTrailers(Http::StreamEncoderFilter* filter,\n                                           const test::fuzz::HttpData& data) {\n  response_trailers_ = Fuzz::fromHeaders<Http::TestResponseTrailerMapImpl>(data.trailers());\n  ENVOY_LOG_MISC(debug, \"Encoding trailers:\\n{} \", response_trailers_);\n  filter->encodeTrailers(response_trailers_);\n  filter->encodeComplete();\n}\n\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/uber_filter.cc",
    "content": "#include \"test/extensions/filters/http/common/fuzz/uber_filter.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\n\nUberFilterFuzzer::UberFilterFuzzer() : async_request_{&cluster_manager_.async_client_} {\n  // This is a decoder filter.\n  ON_CALL(filter_callback_, addStreamDecoderFilter(_))\n      .WillByDefault(Invoke([&](Http::StreamDecoderFilterSharedPtr filter) -> void {\n        decoder_filter_ = filter;\n        decoder_filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n      }));\n  // This is an encoded filter.\n  ON_CALL(filter_callback_, addStreamEncoderFilter(_))\n      .WillByDefault(Invoke([&](Http::StreamEncoderFilterSharedPtr filter) -> void {\n        encoder_filter_ = filter;\n        encoder_filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n      }));\n  // This is a decoder and encoder filter.\n  ON_CALL(filter_callback_, addStreamFilter(_))\n      .WillByDefault(Invoke([&](Http::StreamFilterSharedPtr filter) -> void {\n        decoder_filter_ = filter;\n        decoder_filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n        encoder_filter_ = filter;\n        encoder_filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n      }));\n  // This filter supports access logging.\n  ON_CALL(filter_callback_, addAccessLogHandler(_))\n      .WillByDefault(\n          Invoke([&](AccessLog::InstanceSharedPtr handler) -> void { access_logger_ = handler; }));\n  // This handles stopping execution after a direct response is sent.\n  ON_CALL(decoder_callbacks_, sendLocalReply(_, _, _, _, _))\n      .WillByDefault(\n          Invoke([this](Http::Code code, absl::string_view body,\n                        std::function<void(Http::ResponseHeaderMap & headers)> modify_headers,\n                        const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                        absl::string_view details) {\n            enabled_ = false;\n            decoder_callbacks_.sendLocalReply_(code, body, modify_headers, grpc_status, details);\n          }));\n  ON_CALL(encoder_callbacks_, addEncodedTrailers())\n      .WillByDefault(testing::ReturnRef(encoded_trailers_));\n  // Set expectations for particular filters that may get fuzzed.\n  perFilterSetup();\n}\n\nvoid UberFilterFuzzer::fuzz(\n    const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter&\n        proto_config,\n    const test::fuzz::HttpData& downstream_data, const test::fuzz::HttpData& upstream_data) {\n  try {\n    // Try to create the filter. Exit early if the config is invalid or violates PGV constraints.\n    ENVOY_LOG_MISC(info, \"filter name {}\", proto_config.name());\n    auto& factory = Config::Utility::getAndCheckFactoryByName<\n        Server::Configuration::NamedHttpFilterConfigFactory>(proto_config.name());\n    ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n        proto_config, factory_context_.messageValidationVisitor(), factory);\n    // Clean-up config with filter-specific logic before it runs through validations.\n    cleanFuzzedConfig(proto_config.name(), message.get());\n    cb_ = factory.createFilterFactoryFromProto(*message, \"stats\", factory_context_);\n    cb_(filter_callback_);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"Controlled exception {}\", e.what());\n    return;\n  }\n\n  // Data path should not throw exceptions.\n  if (decoder_filter_ != nullptr) {\n    HttpFilterFuzzer::runData(decoder_filter_.get(), downstream_data);\n  }\n  if (encoder_filter_ != nullptr) {\n    HttpFilterFuzzer::runData(encoder_filter_.get(), upstream_data);\n  }\n  if (access_logger_ != nullptr) {\n    HttpFilterFuzzer::accessLog(access_logger_.get(), stream_info_);\n  }\n\n  reset();\n}\n\nvoid UberFilterFuzzer::reset() {\n  if (decoder_filter_ != nullptr) {\n    decoder_filter_->onDestroy();\n  }\n  decoder_filter_.reset();\n\n  if (encoder_filter_ != nullptr) {\n    encoder_filter_->onDestroy();\n  }\n  encoder_filter_.reset();\n\n  access_logger_.reset();\n  HttpFilterFuzzer::reset();\n}\n\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/uber_filter.h",
    "content": "#include \"test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\n\n// Generic filter fuzzer that can fuzz any HttpFilter.\nclass UberFilterFuzzer : public HttpFilterFuzzer {\npublic:\n  UberFilterFuzzer();\n\n  // This creates the filter config and runs the fuzzed data against the filter.\n  void fuzz(const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter&\n                proto_config,\n            const test::fuzz::HttpData& downstream_data, const test::fuzz::HttpData& upstream_data);\n\n  // For fuzzing proto data, guide the mutator to useful 'Any' types.\n  static void guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice);\n\n  void reset();\n\nprotected:\n  // Set-up filter specific mock expectations in constructor.\n  void perFilterSetup();\n  // Filter specific input cleanup.\n  void cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message);\n\nprivate:\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  NiceMock<Http::MockFilterChainFactoryCallbacks> filter_callback_;\n  std::shared_ptr<Network::MockDnsResolver> resolver_{std::make_shared<Network::MockDnsResolver>()};\n  Http::FilterFactoryCb cb_;\n  NiceMock<Envoy::Network::MockConnection> connection_;\n  Network::Address::InstanceConstSharedPtr addr_;\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  NiceMock<Http::MockAsyncClientRequest> async_request_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info_;\n\n  // Filter constructed from the config.\n  Http::StreamDecoderFilterSharedPtr decoder_filter_;\n  Http::StreamEncoderFilterSharedPtr encoder_filter_;\n  AccessLog::InstanceSharedPtr access_logger_;\n\n  // Mocked callbacks.\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n};\n\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/fuzz/uber_per_filter.cc",
    "content": "#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.h\"\n#include \"envoy/extensions/filters/http/tap/v3/tap.pb.h\"\n\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/filters/http/common/utility.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/filters/http/common/fuzz/uber_filter.h\"\n#include \"test/proto/bookstore.pb.h\"\n\n// This file contains any filter-specific setup and input clean-up needed in the generic filter fuzz\n// target.\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace {\n\nvoid addFileDescriptorsRecursively(const Protobuf::FileDescriptor& descriptor,\n                                   Protobuf::FileDescriptorSet& set,\n                                   absl::flat_hash_set<absl::string_view>& added_descriptors) {\n  if (!added_descriptors.insert(descriptor.name()).second) {\n    // Already added.\n    return;\n  }\n  for (int i = 0; i < descriptor.dependency_count(); i++) {\n    addFileDescriptorsRecursively(*descriptor.dependency(i), set, added_descriptors);\n  }\n  descriptor.CopyTo(set.add_file());\n}\n\nvoid addBookstoreProtoDescriptor(Protobuf::Message* message) {\n  envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder& config =\n      dynamic_cast<envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder&>(\n          *message);\n  config.clear_services();\n  config.add_services(\"bookstore.Bookstore\");\n\n  Protobuf::FileDescriptorSet descriptor_set;\n  const auto* file_descriptor =\n      Protobuf::DescriptorPool::generated_pool()->FindFileByName(\"test/proto/bookstore.proto\");\n  ASSERT(file_descriptor != nullptr);\n  // Create a set to keep track of descriptors as they are added.\n  absl::flat_hash_set<absl::string_view> added_descriptors;\n  addFileDescriptorsRecursively(*file_descriptor, descriptor_set, added_descriptors);\n  descriptor_set.SerializeToString(config.mutable_proto_descriptor_bin());\n}\n} // namespace\n\nvoid UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice) {\n  // These types are request/response from the test Bookstore service\n  // for the gRPC Transcoding filter.\n  static const std::vector<std::string> expected_types = {\n      \"type.googleapis.com/bookstore.ListShelvesResponse\",\n      \"type.googleapis.com/bookstore.CreateShelfRequest\",\n      \"type.googleapis.com/bookstore.GetShelfRequest\",\n      \"type.googleapis.com/bookstore.DeleteShelfRequest\",\n      \"type.googleapis.com/bookstore.ListBooksRequest\",\n      \"type.googleapis.com/bookstore.CreateBookRequest\",\n      \"type.googleapis.com/bookstore.GetBookRequest\",\n      \"type.googleapis.com/bookstore.UpdateBookRequest\",\n      \"type.googleapis.com/bookstore.DeleteBookRequest\",\n      \"type.googleapis.com/bookstore.GetAuthorRequest\",\n      \"type.googleapis.com/bookstore.EchoBodyRequest\",\n      \"type.googleapis.com/bookstore.EchoStructReqResp\",\n      \"type.googleapis.com/bookstore.Shelf\",\n      \"type.googleapis.com/bookstore.Book\",\n      \"type.googleapis.com/google.protobuf.Empty\",\n      \"type.googleapis.com/google.api.HttpBody\",\n  };\n  ProtobufWkt::Any* mutable_any = mutable_data->mutable_proto_body()->mutable_message();\n  const std::string& type_url = expected_types[choice % expected_types.size()];\n  mutable_any->set_type_url(type_url);\n}\n\nvoid cleanAttachmentTemplate(Protobuf::Message* message) {\n  envoy::extensions::filters::http::squash::v3::Squash& config =\n      dynamic_cast<envoy::extensions::filters::http::squash::v3::Squash&>(*message);\n  std::string json;\n  Protobuf::util::JsonPrintOptions json_options;\n  if (!Protobuf::util::MessageToJsonString(config.attachment_template(), &json, json_options)\n           .ok()) {\n    config.clear_attachment_template();\n  }\n}\n\nvoid cleanTapConfig(Protobuf::Message* message) {\n  envoy::extensions::filters::http::tap::v3::Tap& config =\n      dynamic_cast<envoy::extensions::filters::http::tap::v3::Tap&>(*message);\n  if (config.common_config().config_type_case() ==\n      envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kTapdsConfig) {\n    config.mutable_common_config()->mutable_static_config()->mutable_match_config()->set_any_match(\n        true);\n  }\n  // TODO(samflattery): remove once StreamingGrpcSink is implemented\n  // a static config filter is required to have one sink, but since validation isn't performed on\n  // the filter until after this function runs, we have to manually check that there are sinks\n  // before checking that they are not StreamingGrpc\n  else if (config.common_config().config_type_case() ==\n               envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::\n                   kStaticConfig &&\n           !config.common_config().static_config().output_config().sinks().empty() &&\n           config.common_config()\n                   .static_config()\n                   .output_config()\n                   .sinks(0)\n                   .output_sink_type_case() ==\n               envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kStreamingGrpc) {\n    // will be caught in UberFilterFuzzer::fuzz\n    throw EnvoyException(\"received input with not implemented output_sink_type StreamingGrpcSink\");\n  }\n}\n\nvoid UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name,\n                                         Protobuf::Message* message) {\n  const std::string name = Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName(\n      std::string(filter_name));\n  // Map filter name to clean-up function.\n  if (filter_name == HttpFilterNames::get().GrpcJsonTranscoder) {\n    // Add a valid service proto descriptor.\n    addBookstoreProtoDescriptor(message);\n  } else if (name == HttpFilterNames::get().Squash) {\n    cleanAttachmentTemplate(message);\n  } else if (name == HttpFilterNames::get().Tap) {\n    // TapDS oneof field and OutputSinkType StreamingGrpc not implemented\n    cleanTapConfig(message);\n  }\n}\n\nvoid UberFilterFuzzer::perFilterSetup() {\n  // Prepare expectations for the ext_authz filter.\n  addr_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 1111);\n  ON_CALL(connection_, remoteAddress()).WillByDefault(testing::ReturnRef(addr_));\n  ON_CALL(connection_, localAddress()).WillByDefault(testing::ReturnRef(addr_));\n  ON_CALL(factory_context_, clusterManager()).WillByDefault(testing::ReturnRef(cluster_manager_));\n  ON_CALL(cluster_manager_.async_client_, send_(_, _, _)).WillByDefault(Return(&async_request_));\n\n  ON_CALL(decoder_callbacks_, connection()).WillByDefault(testing::Return(&connection_));\n  ON_CALL(decoder_callbacks_, activeSpan())\n      .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance()));\n  decoder_callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2;\n\n  ON_CALL(encoder_callbacks_, connection()).WillByDefault(testing::Return(&connection_));\n  ON_CALL(encoder_callbacks_, activeSpan())\n      .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance()));\n  encoder_callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2;\n\n  // Prepare expectations for dynamic forward proxy.\n  ON_CALL(factory_context_.dispatcher_, createDnsResolver(_, _))\n      .WillByDefault(testing::Return(resolver_));\n\n  // Prepare expectations for TAP config.\n  ON_CALL(factory_context_, admin()).WillByDefault(testing::ReturnRef(factory_context_.admin_));\n  ON_CALL(factory_context_.admin_, addHandler(_, _, _, _, _)).WillByDefault(testing::Return(true));\n  ON_CALL(factory_context_.admin_, removeHandler(_)).WillByDefault(testing::Return(true));\n}\n\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/jwks_fetcher_test.cc",
    "content": "#include <chrono>\n#include <thread>\n\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/common/jwks_fetcher.h\"\n\n#include \"test/extensions/filters/http/common/mock.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\nusing envoy::config::core::v3::HttpUri;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nnamespace {\n\nconst char publicKey[] = R\"(\n{\n  \"keys\": [\n    {\n      \"kty\": \"RSA\",\n      \"alg\": \"RS256\",\n      \"use\": \"sig\",\n      \"kid\": \"62a93512c9ee4c7f8067b5a216dade2763d32a47\",\n      \"n\": \"up97uqrF9MWOPaPkwSaBeuAPLOr9FKcaWGdVEGzQ4f3Zq5WKVZowx9TCBxmImNJ1qmUi13pB8otwM_l5lfY1AFBMxVbQCUXntLovhDaiSvYp4wGDjFzQiYA-pUq8h6MUZBnhleYrkU7XlCBwNVyN8qNMkpLA7KFZYz-486GnV2NIJJx_4BGa3HdKwQGxi2tjuQsQvao5W4xmSVaaEWopBwMy2QmlhSFQuPUpTaywTqUcUq_6SfAHhZ4IDa_FxEd2c2z8gFGtfst9cY3lRYf-c_ZdboY3mqN9Su3-j3z5r2SHWlhB_LNAjyWlBGsvbGPlTqDziYQwZN4aGsqVKQb9Vw\",\n      \"e\": \"AQAB\"\n    },\n    {\n      \"kty\": \"RSA\",\n      \"alg\": \"RS256\",\n      \"use\": \"sig\",\n      \"kid\": \"b3319a147514df7ee5e4bcdee51350cc890cc89e\",\n      \"n\": \"up97uqrF9MWOPaPkwSaBeuAPLOr9FKcaWGdVEGzQ4f3Zq5WKVZowx9TCBxmImNJ1qmUi13pB8otwM_l5lfY1AFBMxVbQCUXntLovhDaiSvYp4wGDjFzQiYA-pUq8h6MUZBnhleYrkU7XlCBwNVyN8qNMkpLA7KFZYz-486GnV2NIJJx_4BGa3HdKwQGxi2tjuQsQvao5W4xmSVaaEWopBwMy2QmlhSFQuPUpTaywTqUcUq_6SfAHhZ4IDa_FxEd2c2z8gFGtfst9cY3lRYf-c_ZdboY3mqN9Su3-j3z5r2SHWlhB_LNAjyWlBGsvbGPlTqDziYQwZN4aGsqVKQb9Vw\",\n      \"e\": \"AQAB\"\n    }\n  ]\n}\n)\";\n\nconst std::string JwksUri = R\"(\nuri: https://pubkey_server/pubkey_path\ncluster: pubkey_cluster\ntimeout:\n  seconds: 5\n)\";\n\nclass JwksFetcherTest : public testing::Test {\npublic:\n  void SetUp() override { TestUtility::loadFromYaml(JwksUri, uri_); }\n  HttpUri uri_;\n  testing::NiceMock<Server::Configuration::MockFactoryContext> mock_factory_ctx_;\n  NiceMock<Tracing::MockSpan> parent_span_;\n};\n\n// Test findByIssuer\nTEST_F(JwksFetcherTest, TestGetSuccess) {\n  // Setup\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, \"200\", publicKey);\n  MockJwksReceiver receiver;\n  std::unique_ptr<JwksFetcher> fetcher(JwksFetcher::create(mock_factory_ctx_.cluster_manager_));\n  EXPECT_TRUE(fetcher != nullptr);\n  EXPECT_CALL(receiver, onJwksSuccessImpl(testing::_)).Times(1);\n  EXPECT_CALL(receiver, onJwksError(testing::_)).Times(0);\n\n  // Act\n  fetcher->fetch(uri_, parent_span_, receiver);\n}\n\nTEST_F(JwksFetcherTest, TestGet400) {\n  // Setup\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, \"400\", \"invalid\");\n  MockJwksReceiver receiver;\n  std::unique_ptr<JwksFetcher> fetcher(JwksFetcher::create(mock_factory_ctx_.cluster_manager_));\n  EXPECT_TRUE(fetcher != nullptr);\n  EXPECT_CALL(receiver, onJwksSuccessImpl(testing::_)).Times(0);\n  EXPECT_CALL(receiver, onJwksError(JwksFetcher::JwksReceiver::Failure::Network)).Times(1);\n\n  // Act\n  fetcher->fetch(uri_, parent_span_, receiver);\n}\n\nTEST_F(JwksFetcherTest, TestGetNoBody) {\n  // Setup\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, \"200\", \"\");\n  MockJwksReceiver receiver;\n  std::unique_ptr<JwksFetcher> fetcher(JwksFetcher::create(mock_factory_ctx_.cluster_manager_));\n  EXPECT_TRUE(fetcher != nullptr);\n  EXPECT_CALL(receiver, onJwksSuccessImpl(testing::_)).Times(0);\n  EXPECT_CALL(receiver, onJwksError(JwksFetcher::JwksReceiver::Failure::Network)).Times(1);\n\n  // Act\n  fetcher->fetch(uri_, parent_span_, receiver);\n}\n\nTEST_F(JwksFetcherTest, TestGetInvalidJwks) {\n  // Setup\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, \"200\", \"invalid\");\n  MockJwksReceiver receiver;\n  std::unique_ptr<JwksFetcher> fetcher(JwksFetcher::create(mock_factory_ctx_.cluster_manager_));\n  EXPECT_TRUE(fetcher != nullptr);\n  EXPECT_CALL(receiver, onJwksSuccessImpl(testing::_)).Times(0);\n  EXPECT_CALL(receiver, onJwksError(JwksFetcher::JwksReceiver::Failure::InvalidJwks)).Times(1);\n\n  // Act\n  fetcher->fetch(uri_, parent_span_, receiver);\n}\n\nTEST_F(JwksFetcherTest, TestHttpFailure) {\n  // Setup\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_,\n                           Http::AsyncClient::FailureReason::Reset);\n  MockJwksReceiver receiver;\n  std::unique_ptr<JwksFetcher> fetcher(JwksFetcher::create(mock_factory_ctx_.cluster_manager_));\n  EXPECT_TRUE(fetcher != nullptr);\n  EXPECT_CALL(receiver, onJwksSuccessImpl(testing::_)).Times(0);\n  EXPECT_CALL(receiver, onJwksError(JwksFetcher::JwksReceiver::Failure::Network)).Times(1);\n\n  // Act\n  fetcher->fetch(uri_, parent_span_, receiver);\n}\n\nTEST_F(JwksFetcherTest, TestCancel) {\n  // Setup\n  Http::MockAsyncClientRequest request(&(mock_factory_ctx_.cluster_manager_.async_client_));\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, &request);\n  MockJwksReceiver receiver;\n  std::unique_ptr<JwksFetcher> fetcher(JwksFetcher::create(mock_factory_ctx_.cluster_manager_));\n  EXPECT_TRUE(fetcher != nullptr);\n  EXPECT_CALL(request, cancel()).Times(1);\n  EXPECT_CALL(receiver, onJwksSuccessImpl(testing::_)).Times(0);\n  EXPECT_CALL(receiver, onJwksError(testing::_)).Times(0);\n\n  // Act\n  fetcher->fetch(uri_, parent_span_, receiver);\n  // Proper cancel\n  fetcher->cancel();\n  // Re-entrant cancel\n  fetcher->cancel();\n}\n\nTEST_F(JwksFetcherTest, TestSpanPassedDown) {\n  // Setup\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, \"200\", publicKey);\n  NiceMock<MockJwksReceiver> receiver;\n  std::unique_ptr<JwksFetcher> fetcher(JwksFetcher::create(mock_factory_ctx_.cluster_manager_));\n\n  // Expectations for span\n  EXPECT_CALL(mock_factory_ctx_.cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(Invoke(\n          [this](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks&,\n                 const Http::AsyncClient::RequestOptions& options) -> Http::AsyncClient::Request* {\n            EXPECT_TRUE(options.parent_span_ == &this->parent_span_);\n            EXPECT_TRUE(options.child_span_name_ == \"JWT Remote PubKey Fetch\");\n            return nullptr;\n          }));\n\n  // Act\n  fetcher->fetch(uri_, parent_span_, receiver);\n}\n\n} // namespace\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/mock.cc",
    "content": "#include \"test/extensions/filters/http/common/mock.h\"\n\n#include <memory>\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nMockUpstream::MockUpstream(Upstream::MockClusterManager& mock_cm, const std::string& status,\n                           const std::string& response_body)\n    : request_(&mock_cm.async_client_), status_(status), response_body_(response_body) {\n  ON_CALL(mock_cm.async_client_, send_(testing::_, testing::_, testing::_))\n      .WillByDefault(testing::Invoke(\n          [this](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                 const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response_message(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", status_}}}));\n            if (response_body_.length()) {\n              response_message->body().add(response_body_);\n            } else {\n              response_message->body().drain(response_message->body().length());\n            }\n            cb.onSuccess(request_, std::move(response_message));\n            return &request_;\n          }));\n}\n\nMockUpstream::MockUpstream(Upstream::MockClusterManager& mock_cm,\n                           Http::AsyncClient::FailureReason reason)\n    : request_(&mock_cm.async_client_) {\n  ON_CALL(mock_cm.async_client_, send_(testing::_, testing::_, testing::_))\n      .WillByDefault(testing::Invoke(\n          [this, reason](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                         const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            cb.onFailure(request_, reason);\n            return &request_;\n          }));\n}\n\nMockUpstream::MockUpstream(Upstream::MockClusterManager& mock_cm,\n                           Http::MockAsyncClientRequest* request)\n    : request_(&mock_cm.async_client_) {\n  ON_CALL(mock_cm.async_client_, send_(testing::_, testing::_, testing::_))\n      .WillByDefault(testing::Return(request));\n}\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/mock.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/http_uri.pb.h\"\n\n#include \"common/http/message_impl.h\"\n\n#include \"extensions/filters/http/common/jwks_fetcher.h\"\n\n#include \"test/mocks/upstream/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\n\nclass MockJwksFetcher : public JwksFetcher {\npublic:\n  MOCK_METHOD(void, cancel, ());\n  MOCK_METHOD(void, fetch,\n              (const envoy::config::core::v3::HttpUri& uri, Tracing::Span& parent_span,\n               JwksReceiver& receiver));\n};\n\n// A mock HTTP upstream.\nclass MockUpstream {\npublic:\n  /**\n   * Mock upstream which returns a given response body.\n   */\n  MockUpstream(Upstream::MockClusterManager& mock_cm, const std::string& status,\n               const std::string& response_body);\n  /**\n   * Mock upstream which returns a given failure.\n   */\n  MockUpstream(Upstream::MockClusterManager& mock_cm, Http::AsyncClient::FailureReason reason);\n  /**\n   * Mock upstream which returns the given request.\n   */\n  MockUpstream(Upstream::MockClusterManager& mock_cm, Http::MockAsyncClientRequest* request);\n\nprivate:\n  Http::MockAsyncClientRequest request_;\n  std::string status_;\n  std::string response_body_;\n};\n\nclass MockJwksReceiver : public JwksFetcher::JwksReceiver {\npublic:\n  /* GoogleMock does handle r-value references hence the below construction.\n   * Expectations and assertions should be made on onJwksSuccessImpl in place\n   * of onJwksSuccess.\n   */\n  void onJwksSuccess(google::jwt_verify::JwksPtr&& jwks) override {\n    ASSERT(jwks);\n    onJwksSuccessImpl(*jwks.get());\n  }\n  MOCK_METHOD(void, onJwksSuccessImpl, (const google::jwt_verify::Jwks& jwks));\n  MOCK_METHOD(void, onJwksError, (JwksFetcher::JwksReceiver::Failure reason));\n};\n\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/common/utility_test.cc",
    "content": "#include \"extensions/filters/http/common/utility.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Common {\nnamespace {\n\n// Test that canonical (or unknown) names are returned unmodified.\nTEST(FilterNameUtilTest, TestIgnoreCanonicalName) {\n  NiceMock<Runtime::MockLoader> runtime;\n\n  EXPECT_EQ(HttpFilterNames::get().Buffer,\n            FilterNameUtil::canonicalFilterName(HttpFilterNames::get().Buffer, &runtime));\n  EXPECT_EQ(\"canonical.name\", FilterNameUtil::canonicalFilterName(\"canonical.name\", &runtime));\n}\n\n// Test that deprecated names are canonicalized.\nTEST(FilterNameUtilTest, DEPRECATED_FEATURE_TEST(TestDeprecatedName)) {\n  NiceMock<Runtime::MockLoader> runtime;\n\n  EXPECT_CALL(\n      runtime.snapshot_,\n      deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n      .WillRepeatedly(Return(true));\n\n  EXPECT_EQ(HttpFilterNames::get().Buffer,\n            FilterNameUtil::canonicalFilterName(\"envoy.buffer\", &runtime));\n  EXPECT_EQ(HttpFilterNames::get().Squash,\n            FilterNameUtil::canonicalFilterName(\"envoy.squash\", &runtime));\n}\n\n// Test that deprecated names trigger an exception if the deprecated name feature is disabled.\nTEST(FilterNameUtilTest, TestDeprecatedNameThrows) {\n  NiceMock<Runtime::MockLoader> runtime;\n\n  EXPECT_CALL(\n      runtime.snapshot_,\n      deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n      .WillRepeatedly(Return(false));\n\n  EXPECT_THROW_WITH_REGEX(FilterNameUtil::canonicalFilterName(\"envoy.buffer\", &runtime),\n                          EnvoyException,\n                          \"Using deprecated http filter extension name 'envoy.buffer' .*\");\n  EXPECT_THROW_WITH_REGEX(FilterNameUtil::canonicalFilterName(\"envoy.squash\", &runtime),\n                          EnvoyException,\n                          \"Using deprecated http filter extension name 'envoy.squash' .*\");\n}\n\n} // namespace\n} // namespace Common\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"compressor_filter_test\",\n    srcs = [\n        \"compressor_filter_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.compressor\",\n    deps = [\n        \"//source/extensions/filters/http/compressor:compressor_filter_lib\",\n        \"//test/mocks/compression/compressor:compressor_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"compressor_filter_integration_test\",\n    srcs = [\n        \"compressor_filter_integration_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.compressor\",\n    deps = [\n        \"//source/extensions/compression/gzip/compressor:config\",\n        \"//source/extensions/compression/gzip/decompressor:config\",\n        \"//source/extensions/filters/http/compressor:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"mock_config\",\n    srcs = [\"mock_compressor_library.proto\"],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\n        \"config_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.compressor\",\n    deps = [\n        \":mock_config_cc_proto\",\n        \"//source/extensions/filters/http/compressor:config\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/compressor/compressor_filter_integration_test.cc",
    "content": "#include \"envoy/event/timer.h\"\n\n#include \"extensions/compression/gzip/decompressor/zlib_decompressor_impl.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass CompressorIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                  public Event::SimulatedTimeSystem,\n                                  public HttpIntegrationTest {\npublic:\n  CompressorIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void SetUp() override { decompressor_.init(window_bits); }\n  void TearDown() override { cleanupUpstreamAndDownstream(); }\n\n  void initializeFilter(const std::string& config) {\n    config_helper_.addFilter(config);\n    initialize();\n    codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  }\n\n  void doRequestAndCompression(Http::TestRequestHeaderMapImpl&& request_headers,\n                               Http::TestResponseHeaderMapImpl&& response_headers) {\n    uint64_t content_length;\n    ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_(\"content-length\"), &content_length));\n    const Buffer::OwnedImpl expected_response{std::string(content_length, 'a')};\n    auto response =\n        sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length);\n    EXPECT_TRUE(upstream_request_->complete());\n    EXPECT_EQ(0U, upstream_request_->bodyLength());\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n    EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip,\n              response->headers()\n                  .get(Http::CustomHeaders::get().ContentEncoding)\n                  ->value()\n                  .getStringView());\n    EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked,\n              response->headers().getTransferEncodingValue());\n\n    Buffer::OwnedImpl decompressed_response{};\n    const Buffer::OwnedImpl compressed_response{response->body()};\n    decompressor_.decompress(compressed_response, decompressed_response);\n    ASSERT_EQ(content_length, decompressed_response.length());\n    EXPECT_TRUE(TestUtility::buffersEqual(expected_response, decompressed_response));\n  }\n\n  void doRequestAndNoCompression(Http::TestRequestHeaderMapImpl&& request_headers,\n                                 Http::TestResponseHeaderMapImpl&& response_headers) {\n    uint64_t content_length;\n    ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_(\"content-length\"), &content_length));\n    auto response =\n        sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length);\n    EXPECT_TRUE(upstream_request_->complete());\n    EXPECT_EQ(0U, upstream_request_->bodyLength());\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n    ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr);\n    ASSERT_EQ(content_length, response->body().size());\n    EXPECT_EQ(response->body(), std::string(content_length, 'a'));\n  }\n\n  const std::string full_config{R\"EOF(\n      name: compressor\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor\n        disable_on_etag_header: true\n        content_length: 100\n        content_type:\n          - text/html\n          - application/json\n        compressor_library:\n          name: testlib\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\n            memory_level: 3\n            window_bits: 10\n            compression_level: best_compression\n            compression_strategy: rle\n    )EOF\"};\n\n  const std::string default_config{R\"EOF(\n      name: envoy.filters.http.compressor\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor\n        compressor_library:\n          name: testlib\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip\n    )EOF\"};\n\n  const uint64_t window_bits{15 | 16};\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Extensions::Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_{stats_store_,\n                                                                                  \"test\"};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, CompressorIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n/**\n * Exercises gzip compression with default configuration.\n */\nTEST_P(CompressorIntegrationTest, AcceptanceDefaultConfigTest) {\n  initializeFilter(default_config);\n  doRequestAndCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                         {\":path\", \"/test/long/url\"},\n                                                         {\":scheme\", \"http\"},\n                                                         {\":authority\", \"host\"},\n                                                         {\"accept-encoding\", \"deflate, gzip\"}},\n                          Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                          {\"content-length\", \"4400\"},\n                                                          {\"content-type\", \"text/xml\"}});\n}\n\n/**\n * Exercises gzip compression with full configuration.\n */\nTEST_P(CompressorIntegrationTest, AcceptanceFullConfigTest) {\n  initializeFilter(full_config);\n  doRequestAndCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                         {\":path\", \"/test/long/url\"},\n                                                         {\":scheme\", \"http\"},\n                                                         {\":authority\", \"host\"},\n                                                         {\"accept-encoding\", \"deflate, gzip\"}},\n                          Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                          {\"content-length\", \"4400\"},\n                                                          {\"content-type\", \"application/json\"}});\n}\n\n/**\n * Exercises filter when client request contains 'identity' type.\n */\nTEST_P(CompressorIntegrationTest, IdentityAcceptEncoding) {\n  initializeFilter(default_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"identity\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"content-type\", \"text/plain\"}});\n}\n\n/**\n * Exercises filter when client request contains unsupported 'accept-encoding' type.\n */\nTEST_P(CompressorIntegrationTest, NotSupportedAcceptEncoding) {\n  initializeFilter(default_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"deflate, br\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"content-type\", \"text/plain\"}});\n}\n\n/**\n * Exercises filter when upstream response is already encoded.\n */\nTEST_P(CompressorIntegrationTest, UpstreamResponseAlreadyEncoded) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-encoding\", \"br\"},\n                                                   {\"content-length\", \"128\"},\n                                                   {\"content-type\", \"application/json\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 128);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_EQ(\n      \"br\",\n      response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView());\n  EXPECT_EQ(128U, response->body().size());\n}\n\n/**\n * Exercises filter when upstream responds with content length below the default threshold.\n */\nTEST_P(CompressorIntegrationTest, NotEnoughContentLength) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"content-length\", \"10\"}, {\"content-type\", \"application/json\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 10);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr);\n  EXPECT_EQ(10U, response->body().size());\n}\n\n/**\n * Exercises filter when response from upstream service is empty.\n */\nTEST_P(CompressorIntegrationTest, EmptyResponse) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"204\"}, {\"content-length\", \"0\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"204\", response->headers().getStatusValue());\n  ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr);\n  EXPECT_EQ(0U, response->body().size());\n}\n\n/**\n * Exercises filter when upstream responds with restricted content-type value.\n */\nTEST_P(CompressorIntegrationTest, SkipOnContentType) {\n  initializeFilter(full_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"deflate, gzip\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"content-type\", \"application/xml\"}});\n}\n\n/**\n * Exercises filter when upstream responds with restricted cache-control value.\n */\nTEST_P(CompressorIntegrationTest, SkipOnCacheControl) {\n  initializeFilter(full_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"deflate, gzip\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"cache-control\", \"no-transform\"},\n                                                            {\"content-type\", \"application/json\"}});\n}\n\n/**\n * Exercises gzip compression when upstream returns a chunked response.\n */\nTEST_P(CompressorIntegrationTest, AcceptanceFullConfigChunkedResponse) {\n  initializeFilter(full_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/json\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_EQ(\n      \"gzip\",\n      response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView());\n  ASSERT_EQ(\"chunked\", response->headers().getTransferEncodingValue());\n}\n\n/**\n * Verify Vary header values are preserved.\n */\nTEST_P(CompressorIntegrationTest, AcceptanceFullConfigVaryHeader) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"content-type\", \"application/json\"}, {\"vary\", \"Cookie\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_EQ(\n      \"gzip\",\n      response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView());\n  ASSERT_EQ(\"Cookie, Accept-Encoding\",\n            response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView());\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/compressor/compressor_filter_test.cc",
    "content": "#include \"extensions/filters/http/compressor/compressor_filter.h\"\n\n#include \"test/mocks/compression/compressor/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Compressor {\nnamespace {\n\nusing testing::NiceMock;\n\nTEST(CompressorFilterConfigTests, MakeCompressorTest) {\n  const envoy::extensions::filters::http::compressor::v3::Compressor compressor_cfg;\n  NiceMock<Runtime::MockLoader> runtime;\n  Stats::TestUtil::TestStore stats;\n  auto compressor_factory(std::make_unique<Compression::Compressor::MockCompressorFactory>());\n  EXPECT_CALL(*compressor_factory, createCompressor()).Times(1);\n  EXPECT_CALL(*compressor_factory, statsPrefix()).Times(1);\n  EXPECT_CALL(*compressor_factory, contentEncoding()).Times(1);\n  CompressorFilterConfig config(compressor_cfg, \"test.compressor.\", stats, runtime,\n                                std::move(compressor_factory));\n  Envoy::Compression::Compressor::CompressorPtr compressor = config.makeCompressor();\n}\n\n} // namespace\n} // namespace Compressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/compressor/config_test.cc",
    "content": "#include \"extensions/filters/http/compressor/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Compressor {\nnamespace {\n\nusing testing::NiceMock;\n\nTEST(CompressorFilterFactoryTests, MissingCompressorLibraryConfig) {\n  const envoy::extensions::filters::http::compressor::v3::Compressor proto_config;\n  CompressorFilterFactory factory;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context),\n                            EnvoyException,\n                            \"Compressor filter doesn't have compressor_library defined\");\n}\n\nTEST(CompressorFilterFactoryTests, UnregisteredCompressorLibraryConfig) {\n  const std::string yaml_string = R\"EOF(\n  compressor_library:\n    name: fake_compressor\n    typed_config:\n      \"@type\": type.googleapis.com/test.mock_compressor_library.Unregistered\n  )EOF\";\n\n  envoy::extensions::filters::http::compressor::v3::Compressor proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  CompressorFilterFactory factory;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context),\n                            EnvoyException,\n                            \"Didn't find a registered implementation for type: \"\n                            \"'test.mock_compressor_library.Unregistered'\");\n}\n\n} // namespace\n} // namespace Compressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/compressor/mock_compressor_library.proto",
    "content": "syntax = \"proto3\";\n\npackage test.mock_compressor_library;\n\nmessage Unregistered {\n}"
  },
  {
    "path": "test/extensions/filters/http/cors/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"cors_filter_test\",\n    srcs = [\"cors_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.cors\",\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/cors:config\",\n        \"//source/extensions/filters/http/cors:cors_filter_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"cors_filter_integration_test\",\n    srcs = [\"cors_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.cors\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/cors:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/cors/cors_filter_integration_test.cc",
    "content": "#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass CorsFilterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                  public HttpIntegrationTest {\npublic:\n  CorsFilterIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void initialize() override {\n    config_helper_.addFilter(\"name: envoy.filters.http.cors\");\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) -> void {\n          auto* route_config = hcm.mutable_route_config();\n          auto* virtual_host = route_config->mutable_virtual_hosts(0);\n          {\n            auto* cors = virtual_host->mutable_cors();\n            cors->add_hidden_envoy_deprecated_allow_origin(\"*\");\n            cors->set_allow_headers(\"content-type,x-grpc-web\");\n            cors->set_allow_methods(\"GET,POST\");\n          }\n\n          {\n            auto* route = virtual_host->mutable_routes(0);\n            route->mutable_match()->set_prefix(\"/cors-vhost-config\");\n          }\n\n          {\n            auto* route = virtual_host->add_routes();\n            route->mutable_match()->set_prefix(\"/no-cors\");\n            route->mutable_route()->set_cluster(\"cluster_0\");\n            route->mutable_route()\n                ->mutable_cors()\n                ->mutable_filter_enabled()\n                ->mutable_default_value()\n                ->set_numerator(0);\n          }\n\n          {\n            auto* route = virtual_host->add_routes();\n            route->mutable_match()->set_prefix(\"/cors-route-config\");\n            route->mutable_route()->set_cluster(\"cluster_0\");\n            auto* cors = route->mutable_route()->mutable_cors();\n            cors->add_hidden_envoy_deprecated_allow_origin(\"test-origin-1\");\n            cors->add_hidden_envoy_deprecated_allow_origin(\"test-host-2\");\n            cors->set_allow_headers(\"content-type\");\n            cors->set_allow_methods(\"POST\");\n            cors->set_max_age(\"100\");\n          }\n\n          {\n            // TODO(mattklein123): When deprecated config is removed, remove DEPRECATED_FEATURE_TEST\n            // from all tests below.\n            auto* route = virtual_host->add_routes();\n            route->mutable_match()->set_prefix(\"/cors-credentials-allowed\");\n            route->mutable_route()->set_cluster(\"cluster_0\");\n            auto* cors = route->mutable_route()->mutable_cors();\n            cors->add_hidden_envoy_deprecated_allow_origin(\"test-origin-1\");\n            cors->mutable_allow_credentials()->set_value(true);\n          }\n\n          {\n            auto* route = virtual_host->add_routes();\n            route->mutable_match()->set_prefix(\"/cors-allow-origin-regex\");\n            route->mutable_route()->set_cluster(\"cluster_0\");\n            auto* cors = route->mutable_route()->mutable_cors();\n            auto* safe_regex =\n                cors->mutable_allow_origin_string_match()->Add()->mutable_safe_regex();\n            safe_regex->mutable_google_re2();\n            safe_regex->set_regex(\".*\\\\.envoyproxy\\\\.io\");\n          }\n\n          {\n            auto* route = virtual_host->add_routes();\n            route->mutable_match()->set_prefix(\"/cors-expose-headers\");\n            route->mutable_route()->set_cluster(\"cluster_0\");\n            auto* cors = route->mutable_route()->mutable_cors();\n            cors->add_hidden_envoy_deprecated_allow_origin(\"test-origin-1\");\n            cors->set_expose_headers(\"custom-header-1,custom-header-2\");\n          }\n        });\n    config_helper_.addRuntimeOverride(\"envoy.deprecated_features:envoy.config.route.v3.CorsPolicy.\"\n                                      \"hidden_envoy_deprecated_allow_origin\",\n                                      \"true\");\n    HttpIntegrationTest::initialize();\n  }\n\nprotected:\n  void testPreflight(Http::TestRequestHeaderMapImpl&& request_headers,\n                     Http::TestResponseHeaderMapImpl&& expected_response_headers) {\n    initialize();\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    response->waitForEndStream();\n    EXPECT_TRUE(response->complete());\n    compareHeaders(response->headers(), expected_response_headers);\n  }\n\n  void testNormalRequest(Http::TestRequestHeaderMapImpl&& request_headers,\n                         Http::TestResponseHeaderMapImpl&& expected_response_headers) {\n    initialize();\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    auto response = sendRequestAndWaitForResponse(request_headers, 0, expected_response_headers, 0);\n\n    EXPECT_TRUE(response->complete());\n    compareHeaders(response->headers(), expected_response_headers);\n  }\n\n  void compareHeaders(Http::TestResponseHeaderMapImpl&& response_headers,\n                      Http::TestResponseHeaderMapImpl& expected_response_headers) {\n    response_headers.remove(Envoy::Http::LowerCaseString{\"date\"});\n    response_headers.remove(Envoy::Http::LowerCaseString{\"x-envoy-upstream-service-time\"});\n    EXPECT_EQ(expected_response_headers, response_headers);\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, CorsFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestVHostConfigSuccess)) {\n  config_helper_.addRuntimeOverride(\"envoy.deprecated_features:envoy.config.route.v3.\"\n                                    \"CorsPolicy.hidden_envoy_deprecated_enabled\",\n                                    \"true\");\n  testPreflight(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"OPTIONS\"},\n          {\":path\", \"/cors-vhost-config/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"access-control-request-method\", \"GET\"},\n          {\"origin\", \"test-origin\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"access-control-allow-origin\", \"test-origin\"},\n          {\"access-control-allow-methods\", \"GET,POST\"},\n          {\"access-control-allow-headers\", \"content-type,x-grpc-web\"},\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestRouteConfigSuccess)) {\n  testPreflight(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"OPTIONS\"},\n          {\":path\", \"/cors-route-config/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"access-control-request-method\", \"GET\"},\n          {\"origin\", \"test-origin-1\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"access-control-allow-origin\", \"test-origin-1\"},\n          {\"access-control-allow-methods\", \"POST\"},\n          {\"access-control-allow-headers\", \"content-type\"},\n          {\"access-control-max-age\", \"100\"},\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestRouteConfigBadOrigin)) {\n  config_helper_.addRuntimeOverride(\"envoy.deprecated_features:envoy.config.route.v3.\"\n                                    \"CorsPolicy.hidden_envoy_deprecated_enabled\",\n                                    \"true\");\n  testNormalRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"OPTIONS\"},\n          {\":path\", \"/cors-route-config/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"access-control-request-method\", \"GET\"},\n          {\"origin\", \"test-origin\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestCorsDisabled)) {\n  testNormalRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"OPTIONS\"},\n          {\":path\", \"/no-cors/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"access-control-request-method\", \"GET\"},\n          {\"origin\", \"test-origin\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestLegacyCorsDisabled)) {\n  config_helper_.addRuntimeOverride(\"envoy.deprecated_features:envoy.config.route.v3.\"\n                                    \"CorsPolicy.hidden_envoy_deprecated_enabled\",\n                                    \"true\");\n\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        auto* virtual_host = route_config->mutable_virtual_hosts(0);\n        auto* route = virtual_host->add_routes();\n        route->mutable_match()->set_prefix(\"/legacy-no-cors\");\n        route->mutable_route()->set_cluster(\"cluster_0\");\n        route->mutable_route()\n            ->mutable_cors()\n            ->mutable_hidden_envoy_deprecated_enabled()\n            ->set_value(false);\n      });\n  testNormalRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"OPTIONS\"},\n          {\":path\", \"/legacy-no-cors/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"access-control-request-method\", \"GET\"},\n          {\"origin\", \"test-origin\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeaders)) {\n  testNormalRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/cors-vhost-config/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"origin\", \"test-origin\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"access-control-allow-origin\", \"test-origin\"},\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeadersCredentialsAllowed)) {\n  testNormalRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/cors-credentials-allowed/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"origin\", \"test-origin\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"access-control-allow-origin\", \"test-origin\"},\n          {\"access-control-allow-credentials\", \"true\"},\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestAllowedOriginRegex)) {\n  testNormalRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/cors-allow-origin-regex/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"origin\", \"www.envoyproxy.io\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"access-control-allow-origin\", \"www.envoyproxy.io\"},\n          {\"access-control-allow-credentials\", \"true\"},\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n\nTEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestExposeHeaders)) {\n  testNormalRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/cors-expose-headers/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"test-host\"},\n          {\"origin\", \"test-origin-1\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"access-control-allow-origin\", \"test-origin-1\"},\n          {\"access-control-expose-headers\", \"custom-header-1,custom-header-2\"},\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n      });\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/cors/cors_filter_test.cc",
    "content": "#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/common/matchers.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/http/cors/cors_filter.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Cors {\nnamespace {\n\nMatchers::StringMatcherPtr makeExactStringMatcher(const std::string& exact_match) {\n  envoy::type::matcher::v3::StringMatcher config;\n  config.set_exact(exact_match);\n  return std::make_unique<Matchers::StringMatcherImpl>(config);\n}\n\nMatchers::StringMatcherPtr makeStdRegexStringMatcher(const std::string& regex) {\n  envoy::type::matcher::v3::StringMatcher config;\n  config.set_hidden_envoy_deprecated_regex(regex);\n  return std::make_unique<Matchers::StringMatcherImpl>(config);\n}\n\n} // namespace\n\nclass CorsFilterTest : public testing::Test {\npublic:\n  CorsFilterTest() : config_(new CorsFilterConfig(\"test.\", stats_)), filter_(config_) {\n    cors_policy_ = std::make_unique<Router::TestCorsPolicy>();\n    cors_policy_->enabled_ = true;\n    cors_policy_->shadow_enabled_ = false;\n    cors_policy_->allow_origins_.emplace_back(makeExactStringMatcher(\"*\"));\n    cors_policy_->allow_methods_ = \"GET\";\n    cors_policy_->allow_headers_ = \"content-type\";\n    cors_policy_->expose_headers_ = \"content-type\";\n    cors_policy_->allow_credentials_ = false;\n    cors_policy_->max_age_ = \"0\";\n\n    ON_CALL(decoder_callbacks_.route_->route_entry_, corsPolicy())\n        .WillByDefault(Return(cors_policy_.get()));\n\n    ON_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_, corsPolicy())\n        .WillByDefault(Return(cors_policy_.get()));\n\n    filter_.setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_.setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  bool IsCorsRequest() { return filter_.is_cors_request_; }\n\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  Stats::TestUtil::TestStore stats_;\n  CorsFilterConfigSharedPtr config_;\n  CorsFilter filter_;\n  Buffer::OwnedImpl data_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  std::unique_ptr<Router::TestCorsPolicy> cors_policy_;\n  Router::MockDirectResponseEntry direct_response_entry_;\n};\n\nTEST_F(CorsFilterTest, RequestWithoutOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"get\"}};\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(false, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, RequestWithOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"get\"}, {\"origin\", \"localhost\"}};\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithoutOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"}};\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(false, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}};\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithOriginCorsDisabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}};\n\n  cors_policy_->enabled_ = false;\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithOriginCorsDisabledShadowDisabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}};\n\n  cors_policy_->enabled_ = false;\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithOriginCorsDisabledShadowEnabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}};\n\n  cors_policy_->enabled_ = false;\n  cors_policy_->shadow_enabled_ = true;\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithOriginCorsEnabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}};\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithoutAccessRequestMethod) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}};\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestMatchingOriginByWildcard) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"test-host\"}, {\"access-control-request-method\", \"GET\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"test-host\"},\n      {\"access-control-allow-methods\", \"GET\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n  ASSERT_TRUE(decoder_callbacks_.stream_info_.responseCodeDetails().has_value());\n  EXPECT_EQ(decoder_callbacks_.stream_info_.responseCodeDetails().value(), \"cors_response\");\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithOriginCorsEnabledShadowDisabled) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"test-host\"}, {\"access-control-request-method\", \"GET\"}};\n\n  cors_policy_->enabled_ = true;\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"test-host\"},\n      {\"access-control-allow-methods\", \"GET\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestWithOriginCorsEnabledShadowEnabled) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"test-host\"}, {\"access-control-request-method\", \"GET\"}};\n\n  cors_policy_->shadow_enabled_ = true;\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"test-host\"},\n      {\"access-control-allow-methods\", \"GET\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestNotMatchingOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"test-host\"}, {\"access-control-request-method\", \"GET\"}};\n\n  cors_policy_->allow_origins_.clear();\n  cors_policy_->allow_origins_.emplace_back(makeExactStringMatcher(\"localhost\"));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(false, IsCorsRequest());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestEmptyOriginList) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"test-host\"}, {\"access-control-request-method\", \"GET\"}};\n\n  cors_policy_->allow_origins_.clear();\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(false, IsCorsRequest());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, ValidOptionsRequestWithAllowCredentialsTrue) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}, {\"access-control-request-method\", \"GET\"}};\n\n  cors_policy_->allow_credentials_ = true;\n  cors_policy_->allow_origins_.clear();\n  cors_policy_->allow_origins_.emplace_back(makeExactStringMatcher(\"localhost\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"localhost\"},\n      {\"access-control-allow-credentials\", \"true\"},\n      {\"access-control-allow-methods\", \"GET\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, ValidOptionsRequestWithAllowCredentialsFalse) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}, {\"access-control-request-method\", \"GET\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"localhost\"},\n      {\"access-control-allow-methods\", \"GET\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EncodeWithCorsDisabled) {\n  cors_policy_->enabled_ = false;\n  cors_policy_->shadow_enabled_ = false;\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EncodeNonCorsRequest) {\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EncodeWithAllowCredentialsTrue) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"origin\", \"localhost\"}};\n  cors_policy_->allow_credentials_ = true;\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_.encode100ContinueHeaders(continue_headers));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"localhost\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"true\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EncodeWithExposeHeaders) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"origin\", \"localhost\"}};\n  cors_policy_->expose_headers_ = \"custom-header-1\";\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_.encode100ContinueHeaders(continue_headers));\n\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.encodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"localhost\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"custom-header-1\", response_headers.get_(\"access-control-expose-headers\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EncodeWithAllowCredentialsFalse) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"origin\", \"localhost\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"localhost\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EncodeWithNonMatchingOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"origin\", \"test-host\"}};\n\n  cors_policy_->allow_origins_.clear();\n  cors_policy_->allow_origins_.emplace_back(makeExactStringMatcher(\"localhost\"));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, RedirectRoute) {\n  ON_CALL(*decoder_callbacks_.route_, directResponseEntry())\n      .WillByDefault(Return(&direct_response_entry_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(false, IsCorsRequest());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EmptyRoute) {\n  ON_CALL(decoder_callbacks_, route()).WillByDefault(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, EmptyRouteEntry) {\n  ON_CALL(*decoder_callbacks_.route_, routeEntry()).WillByDefault(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, NoCorsEntry) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}, {\"access-control-request-method\", \"GET\"}};\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_, corsPolicy()).WillByDefault(Return(nullptr));\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_, corsPolicy())\n      .WillByDefault(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(false, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-origin\"));\n  EXPECT_EQ(\"\", response_headers.get_(\"access-control-allow-credentials\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, NoRouteCorsEntry) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}, {\"access-control-request-method\", \"GET\"}};\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_, corsPolicy()).WillByDefault(Return(nullptr));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"localhost\"},\n      {\"access-control-allow-methods\", \"GET\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, NoVHostCorsEntry) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"OPTIONS\"}, {\"origin\", \"localhost\"}, {\"access-control-request-method\", \"GET\"}};\n\n  cors_policy_->allow_methods_ = \"\";\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_, corsPolicy())\n      .WillByDefault(Return(nullptr));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"localhost\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestMatchingOriginByRegex) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"},\n                                                 {\"origin\", \"www.envoyproxy.io\"},\n                                                 {\"access-control-request-method\", \"GET\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"access-control-allow-origin\", \"www.envoyproxy.io\"},\n      {\"access-control-allow-methods\", \"GET\"},\n      {\"access-control-allow-headers\", \"content-type\"},\n      {\"access-control-max-age\", \"0\"},\n  };\n\n  cors_policy_->allow_origins_.clear();\n  cors_policy_->allow_origins_.emplace_back(makeStdRegexStringMatcher(\".*\"));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(true, IsCorsRequest());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(CorsFilterTest, OptionsRequestNotMatchingOriginByRegex) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"OPTIONS\"},\n                                                 {\"origin\", \"www.envoyproxy.com\"},\n                                                 {\"access-control-request-method\", \"GET\"}};\n\n  cors_policy_->allow_origins_.clear();\n  cors_policy_->allow_origins_.emplace_back(makeStdRegexStringMatcher(\".*.envoyproxy.io\"));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(false, IsCorsRequest());\n  EXPECT_EQ(1, stats_.counter(\"test.cors.origin_invalid\").value());\n  EXPECT_EQ(0, stats_.counter(\"test.cors.origin_valid\").value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\n// Test that the deprecated extension name still functions.\nTEST(CorsFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.cors\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace Cors\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/csrf/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"csrf_filter_test\",\n    srcs = [\"csrf_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.csrf\",\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/csrf:config\",\n        \"//source/extensions/filters/http/csrf:csrf_filter_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/csrf/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"csrf_filter_integration_test\",\n    srcs = [\"csrf_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.csrf\",\n    deps = [\n        \"//source/extensions/filters/http/csrf:config\",\n        \"//test/config:utility_lib\",\n        \"//test/integration:http_protocol_integration_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/csrf/csrf_filter_integration_test.cc",
    "content": "#include \"test/integration/http_protocol_integration.h\"\n\nnamespace Envoy {\nnamespace {\nconst std::string CSRF_ENABLED_CONFIG = R\"EOF(\nname: csrf\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.csrf.v2.CsrfPolicy\n  filter_enabled:\n    default_value:\n      numerator: 100\n      denominator: HUNDRED\n  shadow_enabled:\n    default_value:\n      numerator: 100\n      denominator: HUNDRED\n)EOF\";\n\nconst std::string CSRF_FILTER_ENABLED_CONFIG = R\"EOF(\nname: csrf\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.csrf.v2.CsrfPolicy\n  filter_enabled:\n    default_value:\n      numerator: 100\n      denominator: HUNDRED\n)EOF\";\n\nconst std::string CSRF_SHADOW_ENABLED_CONFIG = R\"EOF(\nname: csrf\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.csrf.v2.CsrfPolicy\n  filter_enabled:\n    default_value:\n      numerator: 0\n      denominator: HUNDRED\n  shadow_enabled:\n    default_value:\n      numerator: 100\n      denominator: HUNDRED\n)EOF\";\n\nconst std::string CSRF_DISABLED_CONFIG = R\"EOF(\nname: csrf\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.csrf.v2.CsrfPolicy\n  filter_enabled:\n    default_value:\n      numerator: 0\n      denominator: HUNDRED\n)EOF\";\n\nclass CsrfFilterIntegrationTest : public HttpProtocolIntegrationTest {\nprotected:\n  IntegrationStreamDecoderPtr\n  sendRequestAndWaitForResponse(Http::RequestHeaderMap& request_headers) {\n    initialize();\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    auto response = codec_client_->makeRequestWithBody(request_headers, 1024);\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n    response->waitForEndStream();\n\n    return response;\n  }\n\n  IntegrationStreamDecoderPtr sendRequest(Http::TestRequestHeaderMapImpl& request_headers) {\n    initialize();\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    auto response = codec_client_->makeRequestWithBody(request_headers, 1024);\n    response->waitForEndStream();\n\n    return response;\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, CsrfFilterIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(CsrfFilterIntegrationTest, TestCsrfSuccess) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"PUT\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://localhost\"},\n      {\"host\", \"localhost\"},\n  }};\n  const auto& response = sendRequestAndWaitForResponse(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"200\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) {\n  config_helper_.addFilter(CSRF_DISABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"PUT\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"test-origin\"},\n  }};\n  const auto& response = sendRequestAndWaitForResponse(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"200\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"test-origin\"},\n  }};\n  const auto& response = sendRequestAndWaitForResponse(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"200\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"PUT\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"test-origin\"},\n  }};\n  const auto& response = sendRequest(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"403\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"POST\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"test-origin\"},\n  }};\n  const auto& response = sendRequest(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"403\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"DELETE\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"test-origin\"},\n  }};\n  const auto& response = sendRequest(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"403\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"PATCH\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"test-origin\"},\n  }};\n  const auto& response = sendRequest(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"403\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestRefererFallback) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\":method\", \"DELETE\"},\n                                            {\":path\", \"/\"},\n                                            {\":scheme\", \"http\"},\n                                            {\"referer\", \"http://test-origin\"},\n                                            {\"host\", \"test-origin\"}};\n  const auto& response = sendRequestAndWaitForResponse(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"200\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestMissingOrigin) {\n  config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {\n      {{\":method\", \"DELETE\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\"host\", \"test-origin\"}}};\n  const auto& response = sendRequest(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"403\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) {\n  config_helper_.addFilter(CSRF_SHADOW_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"PUT\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"localhost\"},\n  }};\n  const auto& response = sendRequestAndWaitForResponse(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"200\");\n}\n\nTEST_P(CsrfFilterIntegrationTest, TestFilterAndShadowEnabled) {\n  config_helper_.addFilter(CSRF_ENABLED_CONFIG);\n  Http::TestRequestHeaderMapImpl headers = {{\n      {\":method\", \"PUT\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\"origin\", \"http://cross-origin\"},\n      {\"host\", \"localhost\"},\n  }};\n  const auto& response = sendRequest(headers);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->headers().getStatusValue(), \"403\");\n}\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/csrf/csrf_filter_test.cc",
    "content": "#include \"envoy/extensions/filters/http/csrf/v3/csrf.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/http/csrf/csrf_filter.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Csrf {\n\nclass CsrfFilterTest : public testing::Test {\npublic:\n  CsrfFilterConfigSharedPtr setupConfig() {\n    envoy::extensions::filters::http::csrf::v3::CsrfPolicy policy;\n    const auto& filter_enabled = policy.mutable_filter_enabled();\n    filter_enabled->mutable_default_value()->set_numerator(100);\n    filter_enabled->mutable_default_value()->set_denominator(\n        envoy::type::v3::FractionalPercent::HUNDRED);\n    filter_enabled->set_runtime_key(\"csrf.enabled\");\n\n    const auto& shadow_enabled = policy.mutable_shadow_enabled();\n    shadow_enabled->mutable_default_value()->set_numerator(0);\n    shadow_enabled->mutable_default_value()->set_denominator(\n        envoy::type::v3::FractionalPercent::HUNDRED);\n    shadow_enabled->set_runtime_key(\"csrf.shadow_enabled\");\n\n    const auto& add_exact_origin = policy.mutable_additional_origins()->Add();\n    add_exact_origin->set_exact(\"additionalhost\");\n\n    const auto& add_regex_origin = policy.mutable_additional_origins()->Add();\n    add_regex_origin->set_hidden_envoy_deprecated_regex(R\"(www\\-[0-9]\\.allow\\.com)\");\n\n    return std::make_shared<CsrfFilterConfig>(policy, \"test\", stats_, runtime_);\n  }\n\n  CsrfFilterTest() : config_(setupConfig()), filter_(config_) {}\n\n  void SetUp() override {\n    setRoutePolicy(config_->policy());\n    setVirtualHostPolicy(config_->policy());\n\n    setFilterEnabled(true);\n    setShadowEnabled(false);\n\n    filter_.setDecoderFilterCallbacks(decoder_callbacks_);\n  }\n\n  void setRoutePolicy(const CsrfPolicy* policy) {\n    ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(filter_name_))\n        .WillByDefault(Return(policy));\n  }\n\n  void setVirtualHostPolicy(const CsrfPolicy* policy) {\n    ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(filter_name_))\n        .WillByDefault(Return(policy));\n  }\n\n  void setFilterEnabled(bool enabled) {\n    ON_CALL(runtime_.snapshot_,\n            featureEnabled(\"csrf.enabled\",\n                           testing::Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n        .WillByDefault(Return(enabled));\n  }\n\n  void setShadowEnabled(bool enabled) {\n    ON_CALL(runtime_.snapshot_,\n            featureEnabled(\"csrf.shadow_enabled\",\n                           testing::Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n        .WillByDefault(Return(enabled));\n  }\n\n  const std::string filter_name_ = \"envoy.filters.http.csrf\";\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  Buffer::OwnedImpl data_;\n  Router::MockDirectResponseEntry direct_response_entry_;\n  Stats::IsolatedStoreImpl stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  CsrfFilterConfigSharedPtr config_;\n\n  CsrfFilter filter_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n};\n\nTEST_F(CsrfFilterTest, RequestWithNonMutableMethod) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithoutOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(1U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithoutDestination) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://localhost\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithInvalidOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"PUT\"}, {\"origin\", \"http://cross-origin\"}, {\":authority\", \"localhost\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"403\"},\n      {\"content-length\", \"14\"},\n      {\"content-type\", \"text/plain\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n  EXPECT_EQ(\"csrf_origin_mismatch\", decoder_callbacks_.details());\n}\n\nTEST_F(CsrfFilterTest, RequestWithInvalidOriginDifferentNonStandardPorts) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://localhost:90\"},\n                                                 {\":authority\", \"localhost:91\"},\n                                                 {\":scheme\", \"http\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"403\"},\n      {\"content-length\", \"14\"},\n      {\"content-type\", \"text/plain\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n  EXPECT_EQ(\"csrf_origin_mismatch\", decoder_callbacks_.details());\n}\n\nTEST_F(CsrfFilterTest, RequestWithValidOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://localhost\"},\n                                                 {\"host\", \"localhost\"},\n                                                 {\":scheme\", \"http\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(1U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithValidOriginNonStandardPort) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://localhost:88\"},\n                                                 {\"host\", \"localhost:88\"},\n                                                 {\":scheme\", \"http\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(1U, config_->stats().request_valid_.value());\n}\n\n// This works because gURL drops the port for hostAndPort() when they are standard\n// ports (e.g.: 80 & 443).\nTEST_F(CsrfFilterTest, RequestWithValidOriginHttpVsHttps) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"https://localhost\"},\n                                                 {\"host\", \"localhost\"},\n                                                 {\":scheme\", \"http\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(1U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowDisabled) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"PUT\"}, {\"origin\", \"http://cross-origin\"}, {\"host\", \"localhost\"}};\n\n  setFilterEnabled(false);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowEnabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://cross-origin\"},\n                                                 {\"host\", \"localhost\"},\n                                                 {\":scheme\", \"http\"}};\n\n  setFilterEnabled(false);\n  setShadowEnabled(true);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithValidOriginCsrfDisabledShadowEnabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://localhost\"},\n                                                 {\"host\", \"localhost\"},\n                                                 {\":scheme\", \"http\"}};\n\n  setFilterEnabled(false);\n  setShadowEnabled(true);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(1U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfEnabledShadowEnabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://cross-origin\"},\n                                                 {\"host\", \"localhost\"},\n                                                 {\":scheme\", \"http\"}};\n\n  setShadowEnabled(true);\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"403\"},\n      {\"content-length\", \"14\"},\n      {\"content-type\", \"text/plain\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestWithValidOriginCsrfEnabledShadowEnabled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://localhost\"},\n                                                 {\"host\", \"localhost\"},\n                                                 {\":scheme\", \"http\"}};\n\n  setShadowEnabled(true);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(1U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RedirectRoute) {\n  ON_CALL(*decoder_callbacks_.route_, directResponseEntry())\n      .WillByDefault(Return(&direct_response_entry_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, EmptyRoute) {\n  ON_CALL(decoder_callbacks_, route()).WillByDefault(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, EmptyRouteEntry) {\n  ON_CALL(*decoder_callbacks_.route_, routeEntry()).WillByDefault(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, NoCsrfEntry) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://cross-origin\"},\n                                                 {\"host\", \"localhost\"},\n                                                 {\":scheme\", \"http\"}};\n\n  setRoutePolicy(nullptr);\n  setVirtualHostPolicy(nullptr);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, NoRouteCsrfEntry) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\"origin\", \"http://localhost\"}};\n\n  setRoutePolicy(nullptr);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, NoVHostCsrfEntry) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"DELETE\"},\n                                                 {\"origin\", \"http://localhost\"}};\n\n  setVirtualHostPolicy(nullptr);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestFromAdditionalExactOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://additionalhost\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(1U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestFromAdditionalRegexOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://www-1.allow.com\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(0U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(1U, config_->stats().request_valid_.value());\n}\n\nTEST_F(CsrfFilterTest, RequestFromInvalidAdditionalRegexOrigin) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"PUT\"},\n                                                 {\"origin\", \"http://www.allow.com\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0U, config_->stats().missing_source_origin_.value());\n  EXPECT_EQ(1U, config_->stats().request_invalid_.value());\n  EXPECT_EQ(0U, config_->stats().request_valid_.value());\n}\n\n// Test that the deprecated extension name still functions.\nTEST(CsrfFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.csrf\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace Csrf\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/decompressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"decompressor_filter_test\",\n    srcs = [\"decompressor_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.decompressor\",\n    deps = [\n        \"//source/common/http:headers_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/compression/gzip/decompressor:config\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/decompressor:config\",\n        \"//test/mocks/compression/decompressor:decompressor_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"decompressor_filter_integration_test\",\n    srcs = [\n        \"decompressor_filter_integration_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.decompressor\",\n    deps = [\n        \"//source/extensions/compression/gzip/compressor:config\",\n        \"//source/extensions/compression/gzip/decompressor:config\",\n        \"//source/extensions/filters/http/decompressor:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc",
    "content": "#include \"envoy/event/timer.h\"\n\n#include \"extensions/compression/gzip/compressor/config.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass DecompressorIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                    public HttpIntegrationTest {\npublic:\n  DecompressorIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {\n    Extensions::Compression::Gzip::Compressor::GzipCompressorLibraryFactory\n        compressor_library_factory;\n    envoy::extensions::compression::gzip::compressor::v3::Gzip factory_config;\n    testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n\n    auto compressor_factory =\n        compressor_library_factory.createCompressorFactoryFromProto(factory_config, context);\n    request_compressor_ = compressor_factory->createCompressor();\n    response_compressor_ = compressor_factory->createCompressor();\n  }\n\n  void TearDown() override { cleanupUpstreamAndDownstream(); }\n\n  void initializeFilter(const std::string& config) {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    config_helper_.addFilter(config);\n    HttpIntegrationTest::initialize();\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  }\n\n  const std::string default_config{R\"EOF(\n      name: default_decompressor\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor\n        decompressor_library:\n          name: testlib\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\n    )EOF\"};\n\n  Envoy::Compression::Compressor::CompressorPtr request_compressor_{};\n  Envoy::Compression::Compressor::CompressorPtr response_compressor_{};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, DecompressorIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n/**\n * Exercises gzip decompression bidirectionally with default configuration.\n */\nTEST_P(DecompressorIntegrationTest, BidirectionalDecompression) {\n  // Use gzip for decompression.\n  initializeFilter(default_config);\n\n  // Enable request decompression by setting the Content-Encoding header to gzip.\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"content-encoding\", \"gzip\"}});\n  auto request_encoder = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  // Send first data chunk upstream.\n  Buffer::OwnedImpl request_data1;\n  TestUtility::feedBufferWithRandomCharacters(request_data1, 8192);\n  auto uncompressed_request_length = request_data1.length();\n  request_compressor_->compress(request_data1, Envoy::Compression::Compressor::State::Flush);\n  auto compressed_request_length = request_data1.length();\n  codec_client_->sendData(*request_encoder, request_data1, false);\n\n  // Send second data chunk upstream and finish the request stream.\n  Buffer::OwnedImpl request_data2;\n  TestUtility::feedBufferWithRandomCharacters(request_data2, 16384);\n  uncompressed_request_length += request_data2.length();\n  request_compressor_->compress(request_data2, Envoy::Compression::Compressor::State::Finish);\n  compressed_request_length += request_data2.length();\n  codec_client_->sendData(*request_encoder, request_data2, true);\n\n  // Wait for frames to arrive upstream.\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Assert that the total bytes received upstream equal the sum of the uncompressed byte buffers\n  // sent.\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(\"gzip\", upstream_request_->headers()\n                        .get(Http::LowerCaseString(\"accept-encoding\"))\n                        ->value()\n                        .getStringView());\n  EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString(\"content-encoding\")));\n  EXPECT_EQ(uncompressed_request_length, upstream_request_->bodyLength());\n  EXPECT_EQ(std::to_string(compressed_request_length),\n            upstream_request_->trailers()\n                ->get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                ->value()\n                .getStringView());\n  EXPECT_EQ(std::to_string(uncompressed_request_length),\n            upstream_request_->trailers()\n                ->get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-uncompressed-bytes\"))\n                ->value()\n                .getStringView());\n\n  // Verify stats\n  test_server_->waitForCounterEq(\"http.config_test.decompressor.testlib.gzip.request.decompressed\",\n                                 1);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.request.not_decompressed\", 0);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.request.total_compressed_bytes\",\n      compressed_request_length);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.request.total_uncompressed_bytes\",\n      uncompressed_request_length);\n\n  // Enable response decompression by setting the Content-Encoding header to gzip.\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-encoding\", \"gzip\"}}, false);\n\n  // Send first data chunk downstream.\n  Buffer::OwnedImpl response_data1;\n  TestUtility::feedBufferWithRandomCharacters(response_data1, 4096);\n  auto uncompressed_response_length = response_data1.length();\n  response_compressor_->compress(response_data1, Envoy::Compression::Compressor::State::Flush);\n  auto compressed_response_length = response_data1.length();\n  upstream_request_->encodeData(response_data1, false);\n\n  // Send second data chunk downstream and finish the response stream.\n  Buffer::OwnedImpl response_data2;\n  TestUtility::feedBufferWithRandomCharacters(response_data2, 8192);\n  uncompressed_response_length += response_data2.length();\n  response_compressor_->compress(response_data2, Envoy::Compression::Compressor::State::Flush);\n  compressed_response_length += response_data2.length();\n  upstream_request_->encodeData(response_data2, true);\n\n  // Wait for frames to arrive downstream.\n  response->waitForEndStream();\n\n  // Assert that the total bytes received downstream equal the sum of the uncompressed byte buffers\n  // sent.\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().Status()->value().getStringView());\n  EXPECT_EQ(uncompressed_response_length, response->body().length());\n  EXPECT_EQ(std::to_string(compressed_response_length),\n            response->trailers()\n                ->get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                ->value()\n                .getStringView());\n  EXPECT_EQ(std::to_string(uncompressed_response_length),\n            response->trailers()\n                ->get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-uncompressed-bytes\"))\n                ->value()\n                .getStringView());\n\n  // Verify stats\n  test_server_->waitForCounterEq(\"http.config_test.decompressor.testlib.gzip.response.decompressed\",\n                                 1);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.response.not_decompressed\", 0);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.response.total_compressed_bytes\",\n      compressed_response_length);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.response.total_uncompressed_bytes\",\n      uncompressed_response_length);\n}\n\n/**\n * Exercises gzip decompression bidirectionally with configuration using incompatible window bits\n * resulting in an error.\n */\nTEST_P(DecompressorIntegrationTest, BidirectionalDecompressionError) {\n  const std::string bad_config{R\"EOF(\n      name: default_decompressor\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor\n        decompressor_library:\n          name: testlib\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\n            window_bits: 10\n    )EOF\"};\n  // Use gzip for decompression.\n  initializeFilter(bad_config);\n\n  // Enable request decompression by setting the Content-Encoding header to gzip.\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"content-encoding\", \"gzip\"}});\n  auto request_encoder = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  // Send first data chunk upstream.\n  Buffer::OwnedImpl request_data1;\n  TestUtility::feedBufferWithRandomCharacters(request_data1, 8192);\n  request_compressor_->compress(request_data1, Envoy::Compression::Compressor::State::Flush);\n  auto compressed_request_length = request_data1.length();\n  codec_client_->sendData(*request_encoder, request_data1, false);\n\n  // Send second data chunk upstream and finish the request stream.\n  Buffer::OwnedImpl request_data2;\n  TestUtility::feedBufferWithRandomCharacters(request_data2, 16384);\n  request_compressor_->compress(request_data2, Envoy::Compression::Compressor::State::Finish);\n  compressed_request_length += request_data2.length();\n  codec_client_->sendData(*request_encoder, request_data2, true);\n\n  // Wait for frames to arrive upstream.\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(\"gzip\", upstream_request_->headers()\n                        .get(Http::LowerCaseString(\"accept-encoding\"))\n                        ->value()\n                        .getStringView());\n  EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString(\"content-encoding\")));\n  EXPECT_EQ(std::to_string(compressed_request_length),\n            upstream_request_->trailers()\n                ->get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                ->value()\n                .getStringView());\n\n  // Verify stats. While the stream was decompressed, there should be a decompression failure.\n  test_server_->waitForCounterEq(\"http.config_test.decompressor.testlib.gzip.request.decompressed\",\n                                 1);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.request.not_decompressed\", 0);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.request.total_compressed_bytes\",\n      compressed_request_length);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.decompressor_library.zlib_data_error\", 2);\n\n  // Enable response decompression by setting the Content-Encoding header to gzip.\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-encoding\", \"gzip\"}}, false);\n\n  // Send first data chunk downstream.\n  Buffer::OwnedImpl response_data1;\n  TestUtility::feedBufferWithRandomCharacters(response_data1, 4096);\n  response_compressor_->compress(response_data1, Envoy::Compression::Compressor::State::Flush);\n  auto compressed_response_length = response_data1.length();\n  upstream_request_->encodeData(response_data1, false);\n\n  // Send second data chunk downstream and finish the response stream.\n  Buffer::OwnedImpl response_data2;\n  TestUtility::feedBufferWithRandomCharacters(response_data2, 8192);\n  response_compressor_->compress(response_data2, Envoy::Compression::Compressor::State::Flush);\n  compressed_response_length += response_data2.length();\n  upstream_request_->encodeData(response_data2, true);\n\n  // Wait for frames to arrive downstream.\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().Status()->value().getStringView());\n  EXPECT_EQ(std::to_string(compressed_response_length),\n            response->trailers()\n                ->get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                ->value()\n                .getStringView());\n\n  // Verify stats. While the stream was decompressed, there should be a decompression failure.\n  test_server_->waitForCounterEq(\"http.config_test.decompressor.testlib.gzip.response.decompressed\",\n                                 1);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.response.not_decompressed\", 0);\n  test_server_->waitForCounterEq(\n      \"http.config_test.decompressor.testlib.gzip.response.total_compressed_bytes\",\n      compressed_response_length);\n  test_server_->waitForCounterGe(\n      \"http.config_test.decompressor.testlib.gzip.decompressor_library.zlib_data_error\", 3);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/decompressor/decompressor_filter_test.cc",
    "content": "#include \"envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h\"\n\n#include \"common/http/headers.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/decompressor/decompressor_filter.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/compression/decompressor/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::ByMove;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Decompressor {\nnamespace {\n\nclass DecompressorFilterTest : public testing::TestWithParam<bool> {\npublic:\n  void SetUp() override {\n    setUpFilter(R\"EOF(\ndecompressor_library:\n  name: testlib\n  typed_config:\n    \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\n)EOF\");\n  }\n\n  void setUpFilter(std::string&& yaml) {\n    envoy::extensions::filters::http::decompressor::v3::Decompressor decompressor;\n    TestUtility::loadFromYaml(yaml, decompressor);\n    auto decompressor_factory =\n        std::make_unique<NiceMock<Compression::Decompressor::MockDecompressorFactory>>();\n    decompressor_factory_ = decompressor_factory.get();\n    config_ = std::make_shared<DecompressorFilterConfig>(decompressor, \"test.\", stats_, runtime_,\n                                                         std::move(decompressor_factory));\n    filter_ = std::make_unique<DecompressorFilter>(config_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  bool isRequestDirection() { return GetParam(); }\n\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> doHeaders(const Http::HeaderMap& headers,\n                                                              const bool end_stream) {\n    if (isRequestDirection()) {\n      auto request_headers = std::make_unique<Http::TestRequestHeaderMapImpl>(headers);\n      EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n                filter_->decodeHeaders(*request_headers, end_stream));\n      return request_headers;\n    } else {\n      auto response_headers = std::make_unique<Http::TestResponseHeaderMapImpl>(headers);\n      EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n                filter_->encodeHeaders(*response_headers, end_stream));\n      return response_headers;\n    }\n  }\n\n  void doData(Buffer::Instance& buffer, const bool end_stream, const bool expect_decompression) {\n    if (isRequestDirection()) {\n      Http::TestRequestTrailerMapImpl trailers;\n      if (end_stream && expect_decompression) {\n        EXPECT_CALL(decoder_callbacks_, addDecodedTrailers()).WillOnce(ReturnRef(trailers));\n      }\n\n      EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, end_stream));\n\n      if (end_stream && expect_decompression) {\n        EXPECT_EQ(\n            \"30\",\n            trailers.get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                ->value()\n                .getStringView());\n        EXPECT_EQ(\n            \"60\",\n            trailers.get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-uncompressed-bytes\"))\n                ->value()\n                .getStringView());\n      }\n    } else {\n      Http::TestResponseTrailerMapImpl trailers;\n      if (end_stream && expect_decompression) {\n        EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers));\n      }\n\n      EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, end_stream));\n\n      if (end_stream && expect_decompression) {\n        EXPECT_EQ(\n            \"30\",\n            trailers.get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                ->value()\n                .getStringView());\n        EXPECT_EQ(\n            \"60\",\n            trailers.get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-uncompressed-bytes\"))\n                ->value()\n                .getStringView());\n      }\n    }\n  }\n\n  void doTrailers() {\n    if (isRequestDirection()) {\n      Http::TestRequestTrailerMapImpl request_trailers;\n      EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n      EXPECT_EQ(\"30\",\n                request_trailers\n                    .get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                    ->value()\n                    .getStringView());\n      EXPECT_EQ(\"60\",\n                request_trailers\n                    .get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-uncompressed-bytes\"))\n                    ->value()\n                    .getStringView());\n    } else {\n      Http::TestResponseTrailerMapImpl response_trailers;\n      EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n      EXPECT_EQ(\"30\",\n                response_trailers\n                    .get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-compressed-bytes\"))\n                    ->value()\n                    .getStringView());\n      EXPECT_EQ(\"60\",\n                response_trailers\n                    .get(Http::LowerCaseString(\"x-envoy-decompressor-testlib-uncompressed-bytes\"))\n                    ->value()\n                    .getStringView());\n    }\n  }\n\n  void expectDecompression(Compression::Decompressor::MockDecompressor* decompressor_ptr,\n                           bool end_with_data) {\n    EXPECT_CALL(*decompressor_ptr, decompress(_, _))\n        .Times(2)\n        .WillRepeatedly(\n            Invoke([&](const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) {\n              TestUtility::feedBufferWithRandomCharacters(output_buffer, 2 * input_buffer.length());\n            }));\n    Buffer::OwnedImpl buffer;\n    TestUtility::feedBufferWithRandomCharacters(buffer, 10);\n    EXPECT_EQ(10, buffer.length());\n    doData(buffer, false /* end_stream */, true /* expect_decompression */);\n    EXPECT_EQ(20, buffer.length());\n    doData(buffer, end_with_data /* end_stream */, true /* expect_decompression */);\n    EXPECT_EQ(40, buffer.length());\n    if (!end_with_data) {\n      doTrailers();\n    }\n  }\n\n  void expectNoDecompression() {\n    Buffer::OwnedImpl buffer;\n    TestUtility::feedBufferWithRandomCharacters(buffer, 10);\n    EXPECT_EQ(10, buffer.length());\n    doData(buffer, true /* end_stream */, false /* expect_decompression */);\n    EXPECT_EQ(10, buffer.length());\n  }\n\n  void decompressionActive(const Http::HeaderMap& headers_before_filter, bool end_with_data,\n                           const absl::optional<std::string> expected_content_encoding,\n                           const absl::optional<std::string> expected_accept_encoding = \"mock\") {\n    // Keep the decompressor to set expectations about it\n    auto decompressor = std::make_unique<Compression::Decompressor::MockDecompressor>();\n    auto* decompressor_ptr = decompressor.get();\n    EXPECT_CALL(*decompressor_factory_, createDecompressor(_))\n        .WillOnce(Return(ByMove(std::move(decompressor))));\n\n    std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n        doHeaders(headers_before_filter, false /* end_stream */);\n\n    // The filter removes Content-Length\n    EXPECT_EQ(nullptr, headers_after_filter->ContentLength());\n\n    // The filter removes the decompressor's content encoding from the Content-Encoding header.\n    if (expected_content_encoding.has_value()) {\n      EXPECT_EQ(expected_content_encoding.value(),\n                headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding)\n                    ->value()\n                    .getStringView());\n    } else {\n      EXPECT_EQ(nullptr, headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding));\n    }\n\n    // The filter adds the decompressor's content encoding to the Accept-Encoding header on the\n    // request direction.\n    const auto* accept_encoding =\n        headers_after_filter->get(Http::LowerCaseString{\"accept-encoding\"});\n    if (isRequestDirection() && expected_accept_encoding.has_value()) {\n      EXPECT_EQ(expected_accept_encoding.value(), accept_encoding->value().getStringView());\n    } else {\n      EXPECT_EQ(nullptr, accept_encoding);\n    }\n\n    expectDecompression(decompressor_ptr, end_with_data);\n  }\n\n  Compression::Decompressor::MockDecompressorFactory* decompressor_factory_{};\n  DecompressorFilterConfigSharedPtr config_;\n  std::unique_ptr<DecompressorFilter> filter_;\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IsRequestDirection, DecompressorFilterTest,\n                         ::testing::Values(true, false));\n\nTEST_P(DecompressorFilterTest, DecompressionActive) {\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock\"},\n                                                       {\"content-length\", \"256\"}};\n  decompressionActive(headers_before_filter, true /* end_with_data */,\n                      absl::nullopt /* expected_content_encoding */);\n}\n\nTEST_P(DecompressorFilterTest, DecompressionActiveEndWithTrailers) {\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock\"},\n                                                       {\"content-length\", \"256\"}};\n  decompressionActive(headers_before_filter, false /* end_with_data */,\n                      absl::nullopt /* expected_content_encoding */);\n}\n\nTEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingSpacing) {\n  // Additional spacing should still match.\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \" mock \"},\n                                                       {\"content-length\", \"256\"}};\n  decompressionActive(headers_before_filter, true /* end_with_data */,\n                      absl::nullopt /* expected_content_encoding */);\n}\n\nTEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingCasing) {\n  // Different casing should still match.\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"MOCK\"},\n                                                       {\"content-length\", \"256\"}};\n  decompressionActive(headers_before_filter, true /* end_with_data */,\n                      absl::nullopt /* expected_content_encoding */);\n}\n\nTEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings) {\n  // If the first encoding in the Content-Encoding header is the configured value, the filter should\n  // still be active.\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock, br\"},\n                                                       {\"content-length\", \"256\"}};\n  decompressionActive(headers_before_filter, true /* end_with_data */, \"br\");\n}\n\nTEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings2) {\n  // If the first encoding in the Content-Encoding header is the configured value, the filter should\n  // still be active.\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock, br , gzip \"},\n                                                       {\"content-length\", \"256\"}};\n  decompressionActive(headers_before_filter, true /* end_with_data */, \"br , gzip\");\n}\n\nTEST_P(DecompressorFilterTest, DisableAdvertiseAcceptEncoding) {\n  setUpFilter(R\"EOF(\ndecompressor_library:\n  typed_config:\n    \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\nrequest_direction_config:\n  advertise_accept_encoding: false\n)EOF\");\n\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock\"},\n                                                       {\"content-length\", \"256\"}};\n  decompressionActive(headers_before_filter, true /* end_with_data */,\n                      absl::nullopt /* expected_content_encoding*/,\n                      absl::nullopt /* expected_accept_encoding */);\n}\n\nTEST_P(DecompressorFilterTest, ExplicitlyEnableAdvertiseAcceptEncoding) {\n  setUpFilter(R\"EOF(\ndecompressor_library:\n  typed_config:\n    \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\nrequest_direction_config:\n  advertise_accept_encoding: true\n)EOF\");\n\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock\"},\n                                                       {\"content-length\", \"256\"}};\n  if (isRequestDirection()) {\n    // Also test that the filter appends to an already existing header.\n    headers_before_filter.addCopy(\"accept-encoding\", \"br\");\n  }\n  decompressionActive(headers_before_filter, true /* end_with_data */,\n                      absl::nullopt /* expected_content_encoding*/,\n                      \"br,mock\" /* expected_accept_encoding */);\n}\n\nTEST_P(DecompressorFilterTest, DecompressionDisabled) {\n  setUpFilter(R\"EOF(\ndecompressor_library:\n  typed_config:\n    \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\nrequest_direction_config:\n  common_config:\n    enabled:\n      default_value: false\n      runtime_key: does_not_exist\nresponse_direction_config:\n  common_config:\n    enabled:\n      default_value: false\n      runtime_key: does_not_exist\n)EOF\");\n\n  EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock\"},\n                                                       {\"content-length\", \"256\"}};\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n      doHeaders(headers_before_filter, false /* end_stream */);\n  EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n\n  expectNoDecompression();\n}\n\nTEST_P(DecompressorFilterTest, RequestDecompressionDisabled) {\n  setUpFilter(R\"EOF(\ndecompressor_library:\n  typed_config:\n    \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\nrequest_direction_config:\n  common_config:\n    enabled:\n      default_value: false\n      runtime_key: does_not_exist\n)EOF\");\n\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock\"},\n                                                       {\"content-length\", \"256\"}};\n\n  if (isRequestDirection()) {\n    EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n    std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n        doHeaders(headers_before_filter, false /* end_stream */);\n\n    // The request direction adds Accept-Encoding by default. Other than this header, the rest of\n    // the headers should be the same before and after the filter.\n    headers_after_filter->remove(Http::LowerCaseString(\"accept-encoding\"));\n    EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n\n    expectNoDecompression();\n  } else {\n    decompressionActive(headers_before_filter, true /* end_with_data */,\n                        absl::nullopt /* expected_content_encoding*/,\n                        \"mock\" /* expected_accept_encoding */);\n  }\n}\n\nTEST_P(DecompressorFilterTest, ResponseDecompressionDisabled) {\n  setUpFilter(R\"EOF(\ndecompressor_library:\n  typed_config:\n    \"@type\": \"type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip\"\nresponse_direction_config:\n  common_config:\n    enabled:\n      default_value: false\n      runtime_key: does_not_exist\n)EOF\");\n\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"mock\"},\n                                                       {\"content-length\", \"256\"}};\n\n  if (isRequestDirection()) {\n    // Accept-Encoding is not advertised in the request headers when response decompression is\n    // disabled.\n    decompressionActive(headers_before_filter, true /* end_with_data */,\n                        absl::nullopt /* expected_content_encoding*/,\n                        absl::nullopt /* expected_accept_encoding */);\n  } else {\n    EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n    std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n        doHeaders(headers_before_filter, false /* end_stream */);\n\n    EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n\n    expectNoDecompression();\n  }\n}\n\nTEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) {\n  EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers_before_filter;\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n      doHeaders(headers_before_filter, true /* end_stream */);\n\n  if (isRequestDirection()) {\n    ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString(\"accept-encoding\"))\n                  ->value()\n                  .getStringView(),\n              \"mock\");\n    // The request direction adds Accept-Encoding by default, even for header-only requests.\n    // Other than this header, the rest of the headers should be the same before and after the\n    // filter.\n    headers_after_filter->remove(Http::LowerCaseString(\"accept-encoding\"));\n  }\n  EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n}\n\nTEST_P(DecompressorFilterTest, NoDecompressionContentEncodingAbsent) {\n  EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-length\", \"256\"}};\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n      doHeaders(headers_before_filter, false /* end_stream */);\n\n  if (isRequestDirection()) {\n    ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString(\"accept-encoding\"))\n                  ->value()\n                  .getStringView(),\n              \"mock\");\n    // The request direction adds Accept-Encoding by default. Other than this header, the rest of\n    // the headers should be the same before and after the filter.\n    headers_after_filter->remove(Http::LowerCaseString(\"accept-encoding\"));\n  }\n  EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n\n  expectNoDecompression();\n}\n\nTEST_P(DecompressorFilterTest, NoDecompressionContentEncodingDoesNotMatch) {\n  EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"not-matching\"},\n                                                       {\"content-length\", \"256\"}};\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n      doHeaders(headers_before_filter, false /* end_stream */);\n\n  expectNoDecompression();\n}\n\nTEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) {\n  EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n  // The decompressor's content scheme is not the first value in the comma-delimited list in the\n  // Content-Encoding header. Therefore, compression will not occur.\n  Http::TestRequestHeaderMapImpl headers_before_filter{{\"content-encoding\", \"gzip,mock\"},\n                                                       {\"content-length\", \"256\"}};\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n      doHeaders(headers_before_filter, false /* end_stream */);\n\n  if (isRequestDirection()) {\n    ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString(\"accept-encoding\"))\n                  ->value()\n                  .getStringView(),\n              \"mock\");\n    // The request direction adds Accept-Encoding by default. Other than this header, the rest of\n    // the headers should be the same before and after the filter.\n    headers_after_filter->remove(Http::LowerCaseString(\"accept-encoding\"));\n  }\n  EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n\n  expectNoDecompression();\n}\n\nTEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) {\n  EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers_before_filter{\n      {\"cache-control\", Http::CustomHeaders::get().CacheControlValues.NoTransform},\n      {\"content-encoding\", \"mock\"},\n      {\"content-length\", \"256\"}};\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n      doHeaders(headers_before_filter, false /* end_stream */);\n\n  if (isRequestDirection()) {\n    ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString(\"accept-encoding\"))\n                  ->value()\n                  .getStringView(),\n              \"mock\");\n    // The request direction adds Accept-Encoding by default. Other than this header, the rest of\n    // the headers should be the same before and after the filter.\n    headers_after_filter->remove(Http::LowerCaseString(\"accept-encoding\"));\n  }\n  EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n\n  expectNoDecompression();\n}\n\nTEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) {\n  EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0);\n  Http::TestRequestHeaderMapImpl headers_before_filter{\n      {\"cache-control\", fmt::format(\"{}, {}\", Http::CustomHeaders::get().CacheControlValues.NoCache,\n                                    Http::CustomHeaders::get().CacheControlValues.NoTransform)},\n      {\"content-encoding\", \"mock\"},\n      {\"content-length\", \"256\"}};\n  std::unique_ptr<Http::RequestOrResponseHeaderMap> headers_after_filter =\n      doHeaders(headers_before_filter, false /* end_stream */);\n\n  if (isRequestDirection()) {\n    ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString(\"accept-encoding\"))\n                  ->value()\n                  .getStringView(),\n              \"mock\");\n    // The request direction adds Accept-Encoding by default. Other than this header, the rest of\n    // the headers should be the same before and after the filter.\n    headers_after_filter->remove(Http::LowerCaseString(\"accept-encoding\"));\n  }\n  EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter));\n\n  expectNoDecompression();\n}\n\nTEST_P(DecompressorFilterTest, DecompressionLibraryNotRegistered) {\n  EXPECT_THROW_WITH_MESSAGE(\n      setUpFilter(R\"EOF(\ndecompressor_library:\n  typed_config:\n    \"@type\": \"type.googleapis.com/envoy.extensions.compression.does_not_exist\"\n)EOF\"),\n      EnvoyException,\n      \"Unable to parse JSON as proto (INVALID_ARGUMENT:(decompressor_library.typed_config): \"\n      \"invalid value Invalid type URL, unknown type: envoy.extensions.compression.does_not_exist \"\n      \"for type Any): \"\n      \"{\\\"decompressor_library\\\":{\\\"typed_config\\\":{\\\"@type\\\":\\\"type.googleapis.com/\"\n      \"envoy.extensions.compression.does_not_exist\\\"}}}\");\n}\n\n} // namespace\n} // namespace Decompressor\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.dynamic_forward_proxy\",\n    deps = [\n        \"//source/extensions/filters/http/dynamic_forward_proxy:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"proxy_filter_test\",\n    srcs = [\"proxy_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.dynamic_forward_proxy\",\n    deps = [\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/clusters:well_known_names\",\n        \"//source/extensions/common/dynamic_forward_proxy:dns_cache_impl\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/dynamic_forward_proxy:config\",\n        \"//test/extensions/common/dynamic_forward_proxy:mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/upstream:basic_resource_limit_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:transport_socket_match_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"proxy_filter_integration_test\",\n    srcs = [\"proxy_filter_integration_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    extension_name = \"envoy.filters.http.dynamic_forward_proxy\",\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \"//source/extensions/clusters/dynamic_forward_proxy:cluster\",\n        \"//source/extensions/filters/http/dynamic_forward_proxy:config\",\n        \"//test/integration:http_integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/dynamic_forward_proxy/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h\"\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.validate.h\"\n\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_impl.h\"\n#include \"extensions/filters/http/dynamic_forward_proxy/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace DynamicForwardProxy {\nnamespace {\n\nTEST(DynamicForwardProxyFilterFactoryTest, RouteSpecificConfig) {\n  DynamicForwardProxyFilterFactory factory;\n  NiceMock<Server::Configuration::MockServerFactoryContext> context;\n\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  EXPECT_TRUE(proto_config.get());\n\n  Router::RouteSpecificFilterConfigConstSharedPtr route_config =\n      factory.createRouteSpecificFilterConfig(*proto_config, context,\n                                              ProtobufMessage::getNullValidationVisitor());\n  EXPECT_TRUE(route_config.get());\n}\n\n} // namespace\n} // namespace DynamicForwardProxy\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/ssl_utility.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass ProxyFilterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                   public Event::TestUsingSimulatedTime,\n                                   public HttpIntegrationTest {\npublic:\n  ProxyFilterIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void setup(uint64_t max_hosts = 1024, uint32_t max_pending_requests = 1024) {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP1);\n\n    const std::string filter = fmt::format(R\"EOF(\nname: dynamic_forward_proxy\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig\n  dns_cache_config:\n    name: foo\n    dns_lookup_family: {}\n    max_hosts: {}\n    dns_cache_circuit_breaker:\n      max_pending_requests: {}\n)EOF\",\n                                           Network::Test::ipVersionToDnsFamily(GetParam()),\n                                           max_hosts, max_pending_requests);\n    config_helper_.addFilter(filter);\n\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Switch predefined cluster_0 to CDS filesystem sourcing.\n      bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path());\n      bootstrap.mutable_static_resources()->clear_clusters();\n    });\n\n    // Set validate_clusters to false to allow us to reference a CDS cluster.\n    config_helper_.addConfigModifier(\n        [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n               hcm) { hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); });\n\n    // Setup the initial CDS cluster.\n    cluster_.mutable_connect_timeout()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    cluster_.set_name(\"cluster_0\");\n    cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED);\n\n    if (upstream_tls_) {\n      envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n      auto* validation_context =\n          tls_context.mutable_common_tls_context()->mutable_validation_context();\n      validation_context->mutable_trusted_ca()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n      cluster_.mutable_transport_socket()->set_name(\"envoy.transport_sockets.tls\");\n      cluster_.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context);\n    }\n\n    const std::string cluster_type_config = fmt::format(\n        R\"EOF(\nname: envoy.clusters.dynamic_forward_proxy\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig\n  dns_cache_config:\n    name: foo\n    dns_lookup_family: {}\n    max_hosts: {}\n    dns_cache_circuit_breaker:\n      max_pending_requests: {}\n)EOF\",\n        Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests);\n\n    TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type());\n    cluster_.mutable_circuit_breakers()\n        ->add_thresholds()\n        ->mutable_max_pending_requests()\n        ->set_value(max_pending_requests);\n\n    // Load the CDS cluster and wait for it to initialize.\n    cds_helper_.setCds({cluster_});\n    HttpIntegrationTest::initialize();\n    test_server_->waitForCounterEq(\"cluster_manager.cluster_added\", 1);\n    test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n  }\n\n  void createUpstreams() override {\n    if (upstream_tls_) {\n      addFakeUpstream(Ssl::createFakeUpstreamSslContext(upstream_cert_name_, context_manager_,\n                                                        factory_context_),\n                      FakeHttpConnection::Type::HTTP1);\n    } else {\n      HttpIntegrationTest::createUpstreams();\n    }\n  }\n\n  void disableDnsCacheCircuitBreakers() {\n    config_helper_.addRuntimeOverride(\"envoy.reloadable_features.enable_dns_cache_circuit_breakers\",\n                                      \"false\");\n  }\n\n  bool upstream_tls_{};\n  std::string upstream_cert_name_{\"upstreamlocalhost\"};\n  CdsHelper cds_helper_;\n  envoy::config::cluster::v3::Cluster cluster_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// A basic test where we pause a request to lookup localhost, and then do another request which\n// should hit the TLS cache.\nTEST_P(ProxyFilterIntegrationTest, RequestWithBody) {\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response =\n      sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024);\n  checkSimpleRequestSuccess(1024, 1024, response.get());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_query_attempt\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_added\")->value());\n\n  // Now send another request. This should hit the DNS cache.\n  response = sendRequestAndWaitForResponse(request_headers, 512, default_response_headers_, 512);\n  checkSimpleRequestSuccess(512, 512, response.get());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_query_attempt\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_added\")->value());\n}\n\nTEST_P(ProxyFilterIntegrationTest, RequestWithBodyWithClusterCircuitBreaker) {\n  disableDnsCacheCircuitBreakers();\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response =\n      sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024);\n  checkSimpleRequestSuccess(1024, 1024, response.get());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_query_attempt\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_added\")->value());\n\n  // Now send another request. This should hit the DNS cache.\n  response = sendRequestAndWaitForResponse(request_headers, 512, default_response_headers_, 512);\n  checkSimpleRequestSuccess(512, 512, response.get());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_query_attempt\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_added\")->value());\n}\n\n// Verify that after we populate the cache and reload the cluster we reattach to the cache with\n// its existing hosts.\nTEST_P(ProxyFilterIntegrationTest, ReloadClusterAndAttachToCache) {\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response =\n      sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024);\n  checkSimpleRequestSuccess(1024, 1024, response.get());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_query_attempt\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_added\")->value());\n\n  // Cause a cluster reload via CDS.\n  cluster_.mutable_circuit_breakers()->add_thresholds()->mutable_max_connections()->set_value(100);\n  cds_helper_.setCds({cluster_});\n  test_server_->waitForCounterEq(\"cluster_manager.cluster_modified\", 1);\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n\n  // We need to wait until the workers have gotten the new cluster update. The only way we can\n  // know this currently is when the connection pools drain and terminate.\n  AssertionResult result = fake_upstream_connection_->waitForDisconnect();\n  RELEASE_ASSERT(result, result.message());\n  fake_upstream_connection_.reset();\n\n  // Now send another request. This should hit the DNS cache.\n  response = sendRequestAndWaitForResponse(request_headers, 512, default_response_headers_, 512);\n  checkSimpleRequestSuccess(512, 512, response.get());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_query_attempt\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_added\")->value());\n}\n\n// Verify that we expire hosts.\nTEST_P(ProxyFilterIntegrationTest, RemoveHostViaTTL) {\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response =\n      sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024);\n  checkSimpleRequestSuccess(1024, 1024, response.get());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_query_attempt\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_added\")->value());\n  EXPECT_EQ(1, test_server_->gauge(\"dns_cache.foo.num_hosts\")->value());\n  cleanupUpstreamAndDownstream();\n\n  // > 5m\n  simTime().advanceTimeWait(std::chrono::milliseconds(300001));\n  test_server_->waitForGaugeEq(\"dns_cache.foo.num_hosts\", 0);\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_removed\")->value());\n}\n\n// Test DNS cache host overflow.\nTEST_P(ProxyFilterIntegrationTest, DNSCacheHostOverflow) {\n  setup(1);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response =\n      sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024);\n  checkSimpleRequestSuccess(1024, 1024, response.get());\n\n  // Send another request, this should lead to a response directly from the filter.\n  const Http::TestRequestHeaderMapImpl request_headers2{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", fmt::format(\"localhost2\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n  response = codec_client_->makeHeaderOnlyRequest(request_headers2);\n  response->waitForEndStream();\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.host_overflow\")->value());\n}\n\n// Verify that upstream TLS works with auto verification for SAN as well as auto setting SNI.\nTEST_P(ProxyFilterIntegrationTest, UpstreamTls) {\n  upstream_tls_ = true;\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  waitForNextUpstreamRequest();\n\n  const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const Extensions::TransportSockets::Tls::SslHandshakerImpl*>(\n          fake_upstream_connection_->connection().ssl().get());\n  EXPECT_STREQ(\"localhost\", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name));\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  checkSimpleRequestSuccess(0, 0, response.get());\n}\n\nTEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithIpHost) {\n  upstream_tls_ = true;\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", fmt::format(\"{}:{}\", Network::Test::getLoopbackAddressUrlString(GetParam()),\n                                 fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  waitForNextUpstreamRequest();\n\n  // No SNI for IP hosts.\n  const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const Extensions::TransportSockets::Tls::SslHandshakerImpl*>(\n          fake_upstream_connection_->connection().ssl().get());\n  EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name));\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  checkSimpleRequestSuccess(0, 0, response.get());\n}\n\n// Verify that auto-SAN verification fails with an incorrect certificate.\nTEST_P(ProxyFilterIntegrationTest, UpstreamTlsInvalidSAN) {\n  upstream_tls_ = true;\n  upstream_cert_name_ = \"upstream\";\n  setup();\n  fake_upstreams_[0]->setReadDisableOnNewConnection(false);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ssl.fail_verify_san\")->value());\n}\n\nTEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) {\n  setup(1024, 0);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response = codec_client_->makeRequestWithBody(request_headers, 1024);\n  response->waitForEndStream();\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_rq_pending_overflow\")->value());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().Status()->value().getStringView());\n}\n\nTEST_P(ProxyFilterIntegrationTest, ClusterCircuitBreakersInvoked) {\n  disableDnsCacheCircuitBreakers();\n  setup(1024, 0);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response = codec_client_->makeRequestWithBody(request_headers, 1024);\n  response->waitForEndStream();\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.upstream_rq_pending_overflow\")->value());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().Status()->value().getStringView());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h\"\n\n#include \"extensions/clusters/well_known_names.h\"\n#include \"extensions/common/dynamic_forward_proxy/dns_cache_impl.h\"\n#include \"extensions/filters/http/dynamic_forward_proxy/proxy_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/common/dynamic_forward_proxy/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/upstream/basic_resource_limit.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/transport_socket_match.h\"\n#include \"test/test_common/test_runtime.h\"\n\nusing testing::AtLeast;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace DynamicForwardProxy {\nnamespace {\n\nusing CustomClusterType = envoy::config::cluster::v3::Cluster::CustomClusterType;\n\nusing LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus;\nusing MockLoadDnsCacheEntryResult =\n    Common::DynamicForwardProxy::MockDnsCache::MockLoadDnsCacheEntryResult;\n\nclass ProxyFilterTest : public testing::Test,\n                        public Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory {\npublic:\n  ProxyFilterTest() {\n    transport_socket_match_ = new NiceMock<Upstream::MockTransportSocketMatcher>(\n        Network::TransportSocketFactoryPtr(transport_socket_factory_));\n    cm_.thread_local_cluster_.cluster_.info_->transport_socket_matcher_.reset(\n        transport_socket_match_);\n\n    envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig proto_config;\n    EXPECT_CALL(*dns_cache_manager_, getCache(_));\n    filter_config_ = std::make_shared<ProxyFilterConfig>(proto_config, *this, cm_);\n    filter_ = std::make_unique<ProxyFilter>(filter_config_);\n    filter_->setDecoderFilterCallbacks(callbacks_);\n\n    // Allow for an otherwise strict mock.\n    EXPECT_CALL(callbacks_, connection()).Times(AtLeast(0));\n    EXPECT_CALL(callbacks_, streamId()).Times(AtLeast(0));\n\n    // Configure upstream cluster to be a Dynamic Forward Proxy since that's the\n    // kind we need to do DNS entries for.\n    CustomClusterType cluster_type;\n    cluster_type.set_name(Envoy::Extensions::Clusters::ClusterTypes::get().DynamicForwardProxy);\n    cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = cluster_type;\n\n    // Configure max pending to 1 so we can test circuit breaking.\n    cm_.thread_local_cluster_.cluster_.info_->resetResourceManager(0, 1, 0, 0, 0);\n  }\n\n  ~ProxyFilterTest() override {\n    EXPECT_TRUE(\n        cm_.thread_local_cluster_.cluster_.info_->resource_manager_->pendingRequests().canCreate());\n  }\n\n  Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr get() override {\n    return dns_cache_manager_;\n  }\n\n  std::shared_ptr<Extensions::Common::DynamicForwardProxy::MockDnsCacheManager> dns_cache_manager_{\n      new Extensions::Common::DynamicForwardProxy::MockDnsCacheManager()};\n  Network::MockTransportSocketFactory* transport_socket_factory_{\n      new Network::MockTransportSocketFactory()};\n  NiceMock<Upstream::MockTransportSocketMatcher>* transport_socket_match_;\n  Upstream::MockClusterManager cm_;\n  ProxyFilterConfigSharedPtr filter_config_;\n  std::unique_ptr<ProxyFilter> filter_;\n  Http::MockStreamDecoderFilterCallbacks callbacks_;\n  Http::TestRequestHeaderMapImpl request_headers_{{\":authority\", \"foo\"}};\n  NiceMock<Upstream::MockBasicResourceLimit> pending_requests_;\n};\n\n// Default port 80 if upstream TLS not configured.\nTEST_F(ProxyFilterTest, HttpDefaultPort) {\n  Upstream::ResourceAutoIncDec* circuit_breakers_(\n      new Upstream::ResourceAutoIncDec(pending_requests_));\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false));\n  Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle =\n      new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle();\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 80, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle}));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_CALL(*handle, onDestroy());\n  filter_->onDestroy();\n}\n\n// Default port 443 if upstream TLS is configured.\nTEST_F(ProxyFilterTest, HttpsDefaultPort) {\n  Upstream::ResourceAutoIncDec* circuit_breakers_(\n      new Upstream::ResourceAutoIncDec(pending_requests_));\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true));\n  Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle =\n      new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle();\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 443, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle}));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_CALL(*handle, onDestroy());\n  filter_->onDestroy();\n}\n\n// Cache overflow.\nTEST_F(ProxyFilterTest, CacheOverflow) {\n  Upstream::ResourceAutoIncDec* circuit_breakers_(\n      new Upstream::ResourceAutoIncDec(pending_requests_));\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 443, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Overflow, nullptr}));\n  EXPECT_CALL(callbacks_, sendLocalReply(Http::Code::ServiceUnavailable, Eq(\"DNS cache overflow\"),\n                                         _, _, Eq(\"DNS cache overflow\")));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  filter_->onDestroy();\n}\n\n// Circuit breaker overflow\nTEST_F(ProxyFilterTest, CircuitBreakerOverflow) {\n  // Disable dns cache circuit breakers because which we expect to be used cluster circuit breakers.\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.enable_dns_cache_circuit_breakers\", \"false\"}});\n  Upstream::ResourceAutoIncDec* circuit_breakers_(\n      new Upstream::ResourceAutoIncDec(pending_requests_));\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true));\n  Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle =\n      new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle();\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 443, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle}));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Create a second filter for a 2nd request.\n  auto filter2 = std::make_unique<ProxyFilter>(filter_config_);\n  filter2->setDecoderFilterCallbacks(callbacks_);\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_));\n  EXPECT_CALL(callbacks_, sendLocalReply(Http::Code::ServiceUnavailable,\n                                         Eq(\"Dynamic forward proxy pending request overflow\"), _, _,\n                                         Eq(\"Dynamic forward proxy pending request overflow\")));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter2->decodeHeaders(request_headers_, false));\n\n  EXPECT_EQ(1,\n            cm_.thread_local_cluster_.cluster_.info_->stats_.upstream_rq_pending_overflow_.value());\n  filter2->onDestroy();\n  EXPECT_CALL(*handle, onDestroy());\n  filter_->onDestroy();\n}\n\n// Circuit breaker overflow with DNS Cache resource manager\nTEST_F(ProxyFilterTest, CircuitBreakerOverflowWithDnsCacheResourceManager) {\n  Upstream::ResourceAutoIncDec* circuit_breakers_(\n      new Upstream::ResourceAutoIncDec(pending_requests_));\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true));\n  Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle =\n      new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle();\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 443, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle}));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Create a second filter for a 2nd request.\n  auto filter2 = std::make_unique<ProxyFilter>(filter_config_);\n  filter2->setDecoderFilterCallbacks(callbacks_);\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_));\n  EXPECT_CALL(callbacks_, sendLocalReply(Http::Code::ServiceUnavailable,\n                                         Eq(\"Dynamic forward proxy pending request overflow\"), _, _,\n                                         Eq(\"Dynamic forward proxy pending request overflow\")));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter2->decodeHeaders(request_headers_, false));\n\n  // Cluster circuit breaker overflow counter won't be incremented.\n  EXPECT_EQ(0,\n            cm_.thread_local_cluster_.cluster_.info_->stats_.upstream_rq_pending_overflow_.value());\n  filter2->onDestroy();\n  EXPECT_CALL(*handle, onDestroy());\n  filter_->onDestroy();\n}\n\n// No route handling.\nTEST_F(ProxyFilterTest, NoRoute) {\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(nullptr));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// No cluster handling.\nTEST_F(ProxyFilterTest, NoCluster) {\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_)).WillOnce(Return(nullptr));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// No cluster type leads to skipping DNS lookups.\nTEST_F(ProxyFilterTest, NoClusterType) {\n  cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = absl::nullopt;\n\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// Cluster that isn't a dynamic forward proxy cluster\nTEST_F(ProxyFilterTest, NonDynamicForwardProxy) {\n  CustomClusterType cluster_type;\n  cluster_type.set_name(Envoy::Extensions::Clusters::ClusterTypes::get().Static);\n  cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = cluster_type;\n\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\nTEST_F(ProxyFilterTest, HostRewrite) {\n  Upstream::ResourceAutoIncDec* circuit_breakers_(\n      new Upstream::ResourceAutoIncDec(pending_requests_));\n  InSequence s;\n\n  envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig proto_config;\n  proto_config.set_host_rewrite_literal(\"bar\");\n  ProxyPerRouteConfig config(proto_config);\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false));\n  Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle =\n      new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle();\n  EXPECT_CALL(callbacks_.route_->route_entry_,\n              perFilterConfig(HttpFilterNames::get().DynamicForwardProxy))\n      .WillOnce(Return(&config));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"bar\"), 80, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle}));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_CALL(*handle, onDestroy());\n  filter_->onDestroy();\n}\n\nTEST_F(ProxyFilterTest, HostRewriteViaHeader) {\n  Upstream::ResourceAutoIncDec* circuit_breakers_(\n      new Upstream::ResourceAutoIncDec(pending_requests_));\n  InSequence s;\n\n  envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig proto_config;\n  proto_config.set_host_rewrite_header(\"x-set-header\");\n  ProxyPerRouteConfig config(proto_config);\n\n  EXPECT_CALL(callbacks_, route());\n  EXPECT_CALL(cm_, get(_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false));\n  Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle =\n      new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle();\n  EXPECT_CALL(callbacks_.route_->route_entry_,\n              perFilterConfig(HttpFilterNames::get().DynamicForwardProxy))\n      .WillOnce(Return(&config));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"bar:82\"), 80, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle}));\n\n  Http::TestRequestHeaderMapImpl headers{{\":authority\", \"foo\"}, {\"x-set-header\", \"bar:82\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(headers, false));\n\n  EXPECT_CALL(*handle, onDestroy());\n  filter_->onDestroy();\n}\n\n} // namespace\n} // namespace DynamicForwardProxy\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/dynamo/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"dynamo_filter_test\",\n    srcs = [\"dynamo_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.dynamo\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/dynamo:dynamo_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"dynamo_request_parser_test\",\n    srcs = [\"dynamo_request_parser_test.cc\"],\n    extension_name = \"envoy.filters.http.dynamo\",\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/extensions/filters/http/dynamo:dynamo_request_parser_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"dynamo_stats_test\",\n    srcs = [\"dynamo_stats_test.cc\"],\n    extension_name = \"envoy.filters.http.dynamo\",\n    deps = [\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/http/dynamo:dynamo_stats_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.dynamo\",\n    deps = [\n        \"//source/extensions/filters/http/dynamo:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/dynamo/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/dynamo/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/dynamo/v3/dynamo.pb.h\"\n#include \"envoy/extensions/filters/http/dynamo/v3/dynamo.pb.validate.h\"\n\n#include \"extensions/filters/http/dynamo/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\nnamespace {\n\nTEST(DynamoFilterConfigTest, DynamoFilter) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  DynamoFilterConfig factory;\n  envoy::extensions::filters::http::dynamo::v3::Dynamo proto_config;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(DynamoFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.http_dynamo_filter\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/dynamo/dynamo_filter_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/http/dynamo/dynamo_filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Property;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\nnamespace {\n\nclass DynamoFilterTest : public testing::Test {\npublic:\n  void setup(bool enabled) {\n    ON_CALL(loader_.snapshot_, featureEnabled(\"dynamodb.filter_enabled\", 100))\n        .WillByDefault(Return(enabled));\n    EXPECT_CALL(loader_.snapshot_, featureEnabled(\"dynamodb.filter_enabled\", 100));\n\n    auto stats = std::make_shared<DynamoStats>(stats_, \"prefix.\");\n    filter_ = std::make_unique<DynamoFilter>(loader_, stats,\n                                             decoder_callbacks_.dispatcher().timeSource());\n\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  ~DynamoFilterTest() override { filter_->onDestroy(); }\n\n  NiceMock<Stats::MockStore> stats_;\n  std::unique_ptr<DynamoFilter> filter_;\n  NiceMock<Runtime::MockLoader> loader_;\n  std::string stat_prefix_{\"prefix.\"};\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n};\n\nTEST_F(DynamoFilterTest, OperatorPresent) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.Get\"},\n                                                 {\"random\", \"random\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(continue_headers));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation_missing\")).Times(0);\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table_missing\"));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.Get.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.Get.upstream_rq_total_200\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.Get.upstream_rq_total\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.Get.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.Get.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.Get.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.operation.Get.upstream_rq_time_2xx\"), _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.operation.Get.upstream_rq_time_200\"), _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.operation.Get.upstream_rq_time\"), _));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n}\n\nTEST_F(DynamoFilterTest, JsonBodyNotWellFormed) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.GetItem\"},\n                                                 {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"test\", 4);\n  buffer.add(\"test2\", 5);\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.invalid_req_body\"));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, true));\n}\n\nTEST_F(DynamoFilterTest, BothOperationAndTableIncorrect) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version\"}, {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation_missing\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table_missing\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n}\n\nTEST_F(DynamoFilterTest, HandleErrorTypeTableMissing) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version\"}, {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation_missing\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table_missing\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"400\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::InstancePtr error_data(new Buffer::OwnedImpl());\n  std::string internal_error =\n      \"{\\\"__type\\\":\\\"com.amazonaws.dynamodb.v20120810#ValidationException\\\"}\";\n  error_data->add(internal_error);\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.error.no_table.ValidationException\"));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(*error_data, true));\n\n  error_data->add(\"}\", 1);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_->encodeData(*error_data, false));\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillRepeatedly(Return(error_data.get()));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.invalid_resp_body\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation_missing\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table_missing\"));\n  Http::TestResponseTrailerMapImpl response_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n}\n\nTEST_F(DynamoFilterTest, HandleErrorTypeTablePresent) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.GetItem\"},\n                                                 {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl buffer;\n  std::string buffer_content = \"{\\\"TableName\\\":\\\"locations\\\"}\";\n  buffer.add(buffer_content);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"400\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl error_data;\n  std::string internal_error =\n      \"{\\\"__type\\\":\\\"com.amazonaws.dynamodb.v20120810#ValidationException\\\"}\";\n  error_data.add(internal_error);\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.error.locations.ValidationException\"));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total_4xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total_400\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time_4xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time_400\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.GetItem.upstream_rq_time_4xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.GetItem.upstream_rq_time_400\"),\n                          _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.operation.GetItem.upstream_rq_time\"), _));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_4xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_400\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_4xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_400\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_4xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_400\"),\n                          _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.table.locations.upstream_rq_time\"), _));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(error_data, true));\n}\n\nTEST_F(DynamoFilterTest, BatchMultipleTables) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.BatchGetItem\"},\n                                                 {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = R\"EOF(\n{\n  \"RequestItems\": {\n    \"table_1\": { \"test1\" : \"something\" },\n    \"table_2\": { \"test2\" : \"something\" }\n  }\n}\n)EOF\";\n  buffer->add(buffer_content);\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(*buffer, false));\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(buffer.get()));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.multiple_tables\"));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\"),\n                          _));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n}\n\nTEST_F(DynamoFilterTest, BatchMultipleTablesUnprocessedKeys) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.BatchGetItem\"},\n                                                 {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = R\"EOF(\n{\n  \"RequestItems\": {\n    \"table_1\": { \"test1\" : \"something\" },\n    \"table_2\": { \"test2\" : \"something\" }\n  }\n}\n)EOF\";\n  buffer->add(buffer_content);\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(*buffer, false));\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(buffer.get()));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.multiple_tables\"));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\"),\n                          _));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl empty_data;\n  Buffer::InstancePtr response_data(new Buffer::OwnedImpl());\n  std::string response_content = R\"EOF(\n{\n  \"UnprocessedKeys\": {\n    \"table_1\": { \"test1\" : \"something\" },\n    \"table_2\": { \"test2\" : \"something\" }\n  }\n}\n)EOF\";\n  response_data->add(response_content);\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.error.table_1.BatchFailureUnprocessedKeys\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.error.table_2.BatchFailureUnprocessedKeys\"));\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillRepeatedly(Return(response_data.get()));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(empty_data, true));\n}\n\nTEST_F(DynamoFilterTest, BatchMultipleTablesNoUnprocessedKeys) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.BatchGetItem\"},\n                                                 {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = R\"EOF(\n{\n  \"RequestItems\": {\n    \"table_1\": { \"test1\" : \"something\" },\n    \"table_2\": { \"test2\" : \"something\" }\n  }\n}\n)EOF\";\n  buffer->add(buffer_content);\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(*buffer, false));\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(buffer.get()));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.multiple_tables\"));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\"),\n                          _));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl empty_data;\n  Buffer::InstancePtr response_data(new Buffer::OwnedImpl());\n  std::string response_content = R\"EOF(\n{\n  \"UnprocessedKeys\": {\n  }\n}\n)EOF\";\n  response_data->add(response_content);\n\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillOnce(Return(response_data.get()));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(empty_data, true));\n}\n\nTEST_F(DynamoFilterTest, BatchMultipleTablesInvalidResponseBody) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.BatchGetItem\"},\n                                                 {\"random\", \"random\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = R\"EOF(\n{\n  \"RequestItems\": {\n    \"table_1\": { \"test1\" : \"something\" },\n    \"table_2\": { \"test2\" : \"something\" }\n  }\n}\n)EOF\";\n  buffer->add(buffer_content);\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(*buffer, false));\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(buffer.get()));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.multiple_tables\"));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\"),\n                          _));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl empty_data;\n  Buffer::InstancePtr response_data(new Buffer::OwnedImpl());\n  std::string response_content = R\"EOF(\n{\n  \"UnprocessedKeys\": {\n    \"table_1\": { \"test1\" : \"something\" },\n    \"table_2\": { \"test2\" : \"something\" }\n  }\n}\n)EOF\";\n  response_data->add(response_content);\n  response_data->add(\"}\", 1);\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.invalid_resp_body\"));\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillOnce(Return(response_data.get()));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(empty_data, true));\n}\n\nTEST_F(DynamoFilterTest, BothOperationAndTableCorrect) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.GetItem\"}};\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = \"{\\\"TableName\\\":\\\"locations\\\"\";\n  buffer->add(buffer_content);\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(buffer.get()));\n  Buffer::OwnedImpl data;\n  data.add(\"}\", 1);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total_200\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.GetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.operation.GetItem.upstream_rq_time\"), _));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_200\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.table.locations.upstream_rq_time\"), _));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n}\n\nTEST_F(DynamoFilterTest, OperatorPresentRuntimeDisabled) {\n  setup(false);\n\n  EXPECT_CALL(stats_, counter(_)).Times(0);\n  EXPECT_CALL(stats_, deliverHistogramToSinks(_, _)).Times(0);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.operator\"},\n                                                 {\"random\", \"random\"}};\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n  Http::TestResponseTrailerMapImpl response_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n}\n\nTEST_F(DynamoFilterTest, PartitionIdStats) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.GetItem\"}};\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = \"{\\\"TableName\\\":\\\"locations\\\"\";\n  buffer->add(buffer_content);\n  ON_CALL(decoder_callbacks_, decodingBuffer()).WillByDefault(Return(buffer.get()));\n  Buffer::OwnedImpl data;\n  data.add(\"}\", 1);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total_200\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.GetItem.upstream_rq_total\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.GetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.GetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.operation.GetItem.upstream_rq_time\"), _));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_200\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.table.locations.upstream_rq_time\"), _));\n\n  EXPECT_CALL(stats_,\n              counter(\"prefix.dynamodb.table.locations.capacity.GetItem.__partition_id=ition_1\"))\n      .Times(1);\n  EXPECT_CALL(stats_,\n              counter(\"prefix.dynamodb.table.locations.capacity.GetItem.__partition_id=ition_2\"))\n      .Times(1);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl empty_data;\n  Buffer::InstancePtr response_data(new Buffer::OwnedImpl());\n  std::string response_content = R\"EOF(\n    {\n      \"ConsumedCapacity\": {\n        \"Partitions\": {\n          \"partition_1\" : 0.5,\n          \"partition_2\" : 3.0\n        }\n      }\n    }\n    )EOF\";\n\n  response_data->add(response_content);\n\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillOnce(Return(response_data.get()));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(empty_data, true));\n}\n\nTEST_F(DynamoFilterTest, NoPartitionIdStatsForMultipleTables) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.BatchGetItem\"}};\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = R\"EOF(\n{\n  \"RequestItems\": {\n    \"table_1\": { \"test1\" : \"something\" },\n    \"table_2\": { \"test2\" : \"something\" }\n  }\n}\n)EOF\";\n  buffer->add(buffer_content);\n  ON_CALL(decoder_callbacks_, decodingBuffer()).WillByDefault(Return(buffer.get()));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(*buffer, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.multiple_tables\"));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\"),\n                          _));\n\n  EXPECT_CALL(\n      stats_,\n      counter(\"prefix.dynamodb.table.locations.capacity.BatchGetItem.__partition_id=ition_1\"))\n      .Times(0);\n  EXPECT_CALL(\n      stats_,\n      counter(\"prefix.dynamodb.table.locations.capacity.BatchGetItem.__partition_id=ition_2\"))\n      .Times(0);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl empty_data;\n  Buffer::InstancePtr response_data(new Buffer::OwnedImpl());\n  std::string response_content = R\"EOF(\n    {\n      \"ConsumedCapacity\": {\n        \"Partitions\": {\n          \"partition_1\" : 0.5,\n          \"partition_2\" : 3.0\n        }\n      }\n    }\n    )EOF\";\n\n  response_data->add(response_content);\n\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillOnce(Return(response_data.get()));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(empty_data, true));\n}\n\nTEST_F(DynamoFilterTest, PartitionIdStatsForSingleTableBatchOperation) {\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-amz-target\", \"version.BatchGetItem\"}};\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n  std::string buffer_content = R\"EOF(\n{\n  \"RequestItems\": {\n    \"locations\": { \"test1\" : \"something\" }\n  }\n}\n)EOF\";\n  buffer->add(buffer_content);\n  ON_CALL(decoder_callbacks_, decodingBuffer()).WillByDefault(Return(buffer.get()));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(*buffer, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.multiple_tables\")).Times(0);\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.operation.BatchGetItem.upstream_rq_time\"),\n                          _));\n\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_2xx\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total_200\"));\n  EXPECT_CALL(stats_, counter(\"prefix.dynamodb.table.locations.upstream_rq_total\"));\n\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_2xx\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time_200\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, histogram(\"prefix.dynamodb.table.locations.upstream_rq_time\",\n                                Stats::Histogram::Unit::Milliseconds));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_2xx\"),\n                          _));\n  EXPECT_CALL(stats_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"prefix.dynamodb.table.locations.upstream_rq_time_200\"),\n                          _));\n  EXPECT_CALL(\n      stats_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"prefix.dynamodb.table.locations.upstream_rq_time\"), _));\n\n  EXPECT_CALL(\n      stats_,\n      counter(\"prefix.dynamodb.table.locations.capacity.BatchGetItem.__partition_id=ition_1\"))\n      .Times(1);\n  EXPECT_CALL(\n      stats_,\n      counter(\"prefix.dynamodb.table.locations.capacity.BatchGetItem.__partition_id=ition_2\"))\n      .Times(1);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl empty_data;\n  Buffer::InstancePtr response_data(new Buffer::OwnedImpl());\n  std::string response_content = R\"EOF(\n    {\n      \"ConsumedCapacity\": {\n        \"Partitions\": {\n          \"partition_1\" : 0.5,\n          \"partition_2\" : 3.0\n        }\n      }\n    }\n    )EOF\";\n\n  response_data->add(response_content);\n\n  EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillOnce(Return(response_data.get()));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(empty_data, true));\n}\n\n} // namespace\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/json/json_loader.h\"\n\n#include \"extensions/filters/http/dynamo/dynamo_request_parser.h\"\n\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\nnamespace {\n\nTEST(DynamoRequestParser, parseOperation) {\n  // Well formed x-amz-target header, in a format, Version.Operation\n  {\n    Http::TestRequestHeaderMapImpl headers{{\"X\", \"X\"}, {\"x-amz-target\", \"X.Operation\"}};\n    EXPECT_EQ(\"Operation\", RequestParser::parseOperation(headers));\n  }\n\n  // Not well formed x-amz-target header.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\"X\", \"X\"}, {\"x-amz-target\", \"X,Operation\"}};\n    EXPECT_EQ(\"\", RequestParser::parseOperation(headers));\n  }\n\n  // Too many entries in the Version.Operation.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\"X\", \"X\"},\n                                           {\"x-amz-target\", \"NOT_VALID.NOT_VALID.NOT_VALID\"}};\n    EXPECT_EQ(\"\", RequestParser::parseOperation(headers));\n  }\n\n  // Required header is not present in the headers\n  {\n    Http::TestRequestHeaderMapImpl headers{{\"Z\", \"Z\"}};\n    EXPECT_EQ(\"\", RequestParser::parseOperation(headers));\n  }\n}\n\nTEST(DynamoRequestParser, parseTableNameSingleOperation) {\n  std::vector<std::string> supported_single_operations{\"GetItem\", \"Query\",      \"Scan\",\n                                                       \"PutItem\", \"UpdateItem\", \"DeleteItem\"};\n\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"TableName\": \"Pets\",\n      \"Key\": {\n        \"AnimalType\": {\"S\": \"Dog\"},\n        \"Name\": {\"S\": \"Fido\"}\n      }\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    // Supported operation\n    for (const std::string& operation : supported_single_operations) {\n      EXPECT_EQ(\"Pets\", RequestParser::parseTable(operation, *json_data).table_name);\n    }\n\n    // Not supported operation\n    EXPECT_EQ(\"\", RequestParser::parseTable(\"NotSupportedOperation\", *json_data).table_name);\n  }\n\n  {\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(R\"({\"TableName\":\"Pets\"})\");\n    EXPECT_EQ(\"Pets\", RequestParser::parseTable(\"GetItem\", *json_data).table_name);\n  }\n}\n\nTEST(DynamoRequestParser, parseTableNameTransactOperation) {\n  std::vector<std::string> supported_transact_operations{\"TransactGetItems\", \"TransactWriteItems\"};\n  // testing single table operation\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"TransactItems\": [\n        { \"Update\": { \"TableName\": \"Pets\", \"Key\": { \"Name\": {\"S\": \"Maxine\"} }, \"AnimalType\": {\"S\": \"Dog\"} } },\n        { \"Put\": { \"TableName\": \"Pets\", \"Key\": { \"Name\": {\"S\": \"Max\"} }, \"AnimalType\": {\"S\": \"Puppy\"} } },\n        { \"Put\": { \"TableName\": \"Pets\", \"Key\": { \"Name\": {\"S\": \"Oscar\"} }, \"AnimalType\": {\"S\": \"Puppy\"} } },\n        { \"Put\": { \"TableName\": \"Pets\", \"Key\": { \"Name\": {\"S\": \"Chloe\"} }, \"AnimalType\": {\"S\": \"Puppy\"} } }\n      ]\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    for (const std::string& operation : supported_transact_operations) {\n      RequestParser::TableDescriptor table = RequestParser::parseTable(operation, *json_data);\n      EXPECT_EQ(\"Pets\", table.table_name);\n      EXPECT_TRUE(table.is_single_table);\n    }\n  }\n\n  // testing multi-table operation\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"TransactItems\": [\n        { \"Put\": { \"TableName\": \"Pets\", \"Key\": { \"AnimalType\": {\"S\": \"Dog\"}, \"Name\": {\"S\": \"Fido\"} } } },\n        { \"Delete\": { \"TableName\": \"Strays\", \"Key\": { \"AnimalType\": {\"S\": \"Dog\"}, \"Name\": {\"S\": \"Fido\"} } } },\n        { \"Put\": { \"TableName\": \"Pets\", \"Key\": { \"AnimalType\": {\"S\": \"Cat\"}, \"Name\": {\"S\": \"Max\"} } } },\n        { \"Delete\": { \"TableName\": \"Strays\", \"Key\": { \"AnimalType\": {\"S\": \"Cat\"}, \"Name\": {\"S\": \"Max\"} } } }\n      ]\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    for (const std::string& operation : supported_transact_operations) {\n      RequestParser::TableDescriptor table = RequestParser::parseTable(operation, *json_data);\n      EXPECT_EQ(\"\", table.table_name);\n      EXPECT_FALSE(table.is_single_table);\n    }\n  }\n\n  // testing missing table\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"TransactItems\": [\n        { \"Put\": { \"TableName\": \"\" } },\n        { \"Delete\": { \"TableName\": \"Strays\", \"Key\": { \"AnimalType\": {\"S\": \"Dog\"}, \"Name\": {\"S\": \"Fido\"} } } },\n        { \"Put\": { \"TableName\": \"Pets\", \"Key\": { \"AnimalType\": {\"S\": \"Cat\"}, \"Name\": {\"S\": \"Max\"} } } },\n        { \"Delete\": { \"TableName\": \"Strays\", \"Key\": { \"AnimalType\": {\"S\": \"Cat\"}, \"Name\": {\"S\": \"Max\"} } } }\n      ]\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    for (const std::string& operation : supported_transact_operations) {\n      RequestParser::TableDescriptor table = RequestParser::parseTable(operation, *json_data);\n      EXPECT_EQ(\"\", table.table_name);\n      EXPECT_TRUE(table.is_single_table);\n    }\n  }\n}\n\nTEST(DynamoRequestParser, parseErrorType) {\n  {\n    EXPECT_EQ(\"ResourceNotFoundException\",\n              RequestParser::parseErrorType(*Json::Factory::loadFromString(\n                  \"{\\\"__type\\\":\\\"com.amazonaws.dynamodb.v20120810#ResourceNotFoundException\\\"}\")));\n  }\n\n  {\n    EXPECT_EQ(\"ResourceNotFoundException\",\n              RequestParser::parseErrorType(*Json::Factory::loadFromString(\n                  \"{\\\"__type\\\":\\\"com.amazonaws.dynamodb.v20120810#ResourceNotFoundException\\\",\"\n                  \"\\\"message\\\":\\\"Requested resource not found: Table: tablename not found\\\"}\")));\n  }\n\n  {\n    EXPECT_EQ(\"\", RequestParser::parseErrorType(\n                      *Json::Factory::loadFromString(\"{\\\"__type\\\":\\\"UnKnownError\\\"}\")));\n  }\n}\n\nTEST(DynamoRequestParser, parseTableNameBatchOperation) {\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"RequestItems\": {\n        \"table_1\": { \"test1\" : \"something\" },\n        \"table_2\": { \"test2\" : \"something\" }\n      }\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    RequestParser::TableDescriptor table = RequestParser::parseTable(\"BatchGetItem\", *json_data);\n    EXPECT_EQ(\"\", table.table_name);\n    EXPECT_FALSE(table.is_single_table);\n  }\n\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"RequestItems\": {\n        \"table_2\": { \"test1\" : \"something\" },\n        \"table_2\": { \"test2\" : \"something\" }\n      }\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    RequestParser::TableDescriptor table = RequestParser::parseTable(\"BatchGetItem\", *json_data);\n    EXPECT_EQ(\"table_2\", table.table_name);\n    EXPECT_TRUE(table.is_single_table);\n  }\n\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"RequestItems\": {\n        \"table_2\": { \"test1\" : \"something\" },\n        \"table_2\": { \"test2\" : \"something\" },\n        \"table_3\": { \"test3\" : \"something\" }\n      }\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    RequestParser::TableDescriptor table = RequestParser::parseTable(\"BatchGetItem\", *json_data);\n    EXPECT_EQ(\"\", table.table_name);\n    EXPECT_FALSE(table.is_single_table);\n  }\n\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"RequestItems\": {\n        \"table_2\": { \"test1\" : \"something\" },\n        \"table_2\": { \"test2\" : \"something\" }\n      }\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    RequestParser::TableDescriptor table = RequestParser::parseTable(\"BatchWriteItem\", *json_data);\n    EXPECT_EQ(\"table_2\", table.table_name);\n    EXPECT_TRUE(table.is_single_table);\n  }\n\n  {\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(\"{}\");\n    RequestParser::TableDescriptor table =\n        RequestParser::parseTable(\"BatchWriteItem\", *Json::Factory::loadFromString(\"{}\"));\n    EXPECT_EQ(\"\", table.table_name);\n    EXPECT_TRUE(table.is_single_table);\n  }\n\n  {\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(\"{\\\"RequestItems\\\":{}}\");\n    RequestParser::TableDescriptor table = RequestParser::parseTable(\"BatchWriteItem\", *json_data);\n    EXPECT_EQ(\"\", table.table_name);\n    EXPECT_TRUE(table.is_single_table);\n  }\n\n  {\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(\"{}\");\n    RequestParser::TableDescriptor table = RequestParser::parseTable(\"BatchGetItem\", *json_data);\n    EXPECT_EQ(\"\", table.table_name);\n    EXPECT_TRUE(table.is_single_table);\n  }\n}\nTEST(DynamoRequestParser, parseBatchUnProcessedKeys) {\n  {\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(\"{}\");\n    std::vector<std::string> unprocessed_tables =\n        RequestParser::parseBatchUnProcessedKeys(*json_data);\n    EXPECT_EQ(0u, unprocessed_tables.size());\n  }\n  {\n    std::vector<std::string> unprocessed_tables = RequestParser::parseBatchUnProcessedKeys(\n        *Json::Factory::loadFromString(\"{\\\"UnprocessedKeys\\\":{}}\"));\n    EXPECT_EQ(0u, unprocessed_tables.size());\n  }\n\n  {\n    std::vector<std::string> unprocessed_tables = RequestParser::parseBatchUnProcessedKeys(\n        *Json::Factory::loadFromString(R\"({\"UnprocessedKeys\":{\"table_1\" :{}}})\"));\n    EXPECT_EQ(\"table_1\", unprocessed_tables[0]);\n    EXPECT_EQ(1u, unprocessed_tables.size());\n  }\n\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"UnprocessedKeys\": {\n        \"table_1\": { \"test1\" : \"something\" },\n        \"table_2\": { \"test2\" : \"something\" }\n      }\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    std::vector<std::string> unprocessed_tables =\n        RequestParser::parseBatchUnProcessedKeys(*json_data);\n    EXPECT_TRUE(find(unprocessed_tables.begin(), unprocessed_tables.end(), \"table_1\") !=\n                unprocessed_tables.end());\n    EXPECT_TRUE(find(unprocessed_tables.begin(), unprocessed_tables.end(), \"table_2\") !=\n                unprocessed_tables.end());\n    EXPECT_EQ(2u, unprocessed_tables.size());\n  }\n}\n\nTEST(DynamoRequestParser, parsePartitionIds) {\n  {\n    std::vector<RequestParser::PartitionDescriptor> partitions =\n        RequestParser::parsePartitions(*Json::Factory::loadFromString(\"{}\"));\n    EXPECT_EQ(0u, partitions.size());\n  }\n  {\n    std::vector<RequestParser::PartitionDescriptor> partitions =\n        RequestParser::parsePartitions(*Json::Factory::loadFromString(\"{\\\"ConsumedCapacity\\\":{}}\"));\n    EXPECT_EQ(0u, partitions.size());\n  }\n  {\n    std::vector<RequestParser::PartitionDescriptor> partitions = RequestParser::parsePartitions(\n        *Json::Factory::loadFromString(R\"({\"ConsumedCapacity\":{ \"Partitions\":{}}})\"));\n    EXPECT_EQ(0u, partitions.size());\n  }\n  {\n    std::string json_string = R\"EOF(\n    {\n      \"ConsumedCapacity\": {\n        \"Partitions\": {\n          \"partition_1\" : 0.5,\n          \"partition_2\" : 3.0\n        }\n      }\n    }\n    )EOF\";\n    Json::ObjectSharedPtr json_data = Json::Factory::loadFromString(json_string);\n\n    std::vector<RequestParser::PartitionDescriptor> partitions =\n        RequestParser::parsePartitions(*json_data);\n    for (const RequestParser::PartitionDescriptor& partition : partitions) {\n      if (partition.partition_id_ == \"partition_1\") {\n        EXPECT_EQ(1u, partition.capacity_);\n      } else {\n        EXPECT_EQ(3u, partition.capacity_);\n      }\n    }\n    EXPECT_EQ(2u, partitions.size());\n  }\n}\n\n} // namespace\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/dynamo/dynamo_stats_test.cc",
    "content": "#include <string>\n\n#include \"extensions/filters/http/dynamo/dynamo_stats.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Dynamo {\nnamespace {\n\nTEST(DynamoStats, PartitionIdStatString) {\n  Stats::IsolatedStoreImpl store;\n  auto build_partition_string =\n      [&store](const std::string& stat_prefix, const std::string& table_name,\n               const std::string& operation, const std::string& partition_id) -> std::string {\n    DynamoStats stats(store, stat_prefix);\n    Stats::Counter& counter = stats.buildPartitionStatCounter(table_name, operation, partition_id);\n    return counter.name();\n  };\n\n  {\n    std::string stats_prefix = \"prefix.\";\n    std::string table_name = \"locations\";\n    std::string operation = \"GetItem\";\n    std::string partition_id = \"6235c781-1d0d-47a3-a4ea-eec04c5883ca\";\n    std::string partition_stat_string =\n        build_partition_string(stats_prefix, table_name, operation, partition_id);\n    std::string expected_stat_string =\n        \"prefix.dynamodb.table.locations.capacity.GetItem.__partition_id=c5883ca\";\n    EXPECT_EQ(expected_stat_string, partition_stat_string);\n  }\n\n  {\n    std::string stats_prefix = \"http.egress_dynamodb_iad.\";\n    std::string table_name = \"locations-sandbox-partition-test-iad-mytest-really-long-name\";\n    std::string operation = \"GetItem\";\n    std::string partition_id = \"6235c781-1d0d-47a3-a4ea-eec04c5883ca\";\n\n    std::string partition_stat_string =\n        build_partition_string(stats_prefix, table_name, operation, partition_id);\n    std::string expected_stat_string =\n        \"http.egress_dynamodb_iad.dynamodb.table.locations-sandbox-partition-test-iad-mytest-\"\n        \"really-long-name.capacity.GetItem.__partition_id=c5883ca\";\n    EXPECT_EQ(expected_stat_string, partition_stat_string);\n  }\n  {\n    std::string stats_prefix = \"http.egress_dynamodb_iad.\";\n    std::string table_name = \"locations-sandbox-partition-test-iad-mytest-rea\";\n    std::string operation = \"GetItem\";\n    std::string partition_id = \"6235c781-1d0d-47a3-a4ea-eec04c5883ca\";\n\n    std::string partition_stat_string =\n        build_partition_string(stats_prefix, table_name, operation, partition_id);\n    std::string expected_stat_string = \"http.egress_dynamodb_iad.dynamodb.table.locations-sandbox-\"\n                                       \"partition-test-iad-mytest-rea.capacity.GetItem.__partition_\"\n                                       \"id=c5883ca\";\n\n    EXPECT_EQ(expected_stat_string, partition_stat_string);\n  }\n}\n\n} // namespace\n} // namespace Dynamo\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ext_authz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"ext_authz_test\",\n    srcs = [\"ext_authz_test.cc\"],\n    extension_name = \"envoy.filters.http.ext_authz\",\n    deps = [\n        \"//include/envoy/http:codes_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib\",\n        \"//source/extensions/filters/http/ext_authz\",\n        \"//test/extensions/filters/common/ext_authz:ext_authz_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.ext_authz\",\n    deps = [\n        \"//source/extensions/filters/http/ext_authz:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"ext_authz_integration_test\",\n    srcs = [\"ext_authz_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.ext_authz\",\n    deps = [\n        \"//source/extensions/filters/http/ext_authz:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/auth/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/ext_authz/config_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.validate.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"extensions/filters/http/ext_authz/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace ExtAuthz {\nnamespace {\n\nvoid expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion api_version) {\n  std::string yaml = R\"EOF(\n  grpc_service:\n    google_grpc:\n      target_uri: ext_authz_server\n      stat_prefix: google\n  failure_mode_allow: false\n  transport_api_version: {}\n  )EOF\";\n\n  ExtAuthzFilterConfig factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto();\n  TestUtility::loadFromYaml(\n      fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config);\n\n  testing::StrictMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_CALL(context, singletonManager()).Times(1);\n  EXPECT_CALL(context, threadLocal()).Times(1);\n  EXPECT_CALL(context, messageValidationVisitor()).Times(1);\n  EXPECT_CALL(context, clusterManager()).Times(1);\n  EXPECT_CALL(context, runtime()).Times(1);\n  EXPECT_CALL(context, scope()).Times(2);\n  EXPECT_CALL(context.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        return std::make_unique<NiceMock<Grpc::MockAsyncClientFactory>>();\n      }));\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\n} // namespace\n\nTEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) {\n  expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::AUTO);\n  expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V2);\n  expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V3);\n}\n\nTEST(HttpExtAuthzConfigTest, CorrectProtoHttp) {\n  std::string yaml = R\"EOF(\n  stat_prefix: \"wall\"\n  http_service:\n    server_uri:\n      uri: \"ext_authz:9000\"\n      cluster: \"ext_authz\"\n      timeout: 0.25s\n\n    authorization_request:\n      allowed_headers:\n        patterns:\n        - exact: baz\n        - prefix: x-\n      headers_to_add:\n      - key: foo\n        value: bar\n      - key: bar\n        value: foo\n\n    authorization_response:\n      allowed_upstream_headers:\n        patterns:\n        - exact: baz\n        - prefix: x-success\n      allowed_client_headers:\n        patterns:\n        - exact: baz\n        - prefix: x-fail\n      allowed_upstream_headers_to_append:\n        patterns:\n        - exact: baz-append\n        - prefix: x-append\n\n    path_prefix: /extauth\n\n  failure_mode_allow: true\n  with_request_body:\n    max_request_bytes: 100\n    pack_as_bytes: true\n  )EOF\";\n\n  ExtAuthzFilterConfig factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto();\n  TestUtility::loadFromYaml(yaml, *proto_config);\n  testing::StrictMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_CALL(context, messageValidationVisitor()).Times(1);\n  EXPECT_CALL(context, clusterManager()).Times(1);\n  EXPECT_CALL(context, runtime()).Times(1);\n  EXPECT_CALL(context, scope()).Times(1);\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, \"stats\", context);\n  testing::StrictMock<Http::MockFilterChainFactoryCallbacks> filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.ext_authz\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace ExtAuthz\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n\n#include \"common/common/macros.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_format.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\nusing testing::Not;\nusing testing::TestWithParam;\nusing testing::ValuesIn;\n\nnamespace Envoy {\n\nusing Headers = std::vector<std::pair<const std::string, const std::string>>;\n\nvoid setMeasureTimeoutOnCheckCreated(ConfigHelper& config_helper, bool timeout_on_check) {\n  if (timeout_on_check) {\n    config_helper.addRuntimeOverride(\n        \"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\", \"true\");\n  } else {\n    config_helper.addRuntimeOverride(\n        \"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created\", \"false\");\n  }\n}\n\nclass ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest,\n                                    public HttpIntegrationTest {\npublic:\n  ExtAuthzGrpcIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {}\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initializeConfig(bool with_timeout = false, bool disable_with_metadata = false) {\n    config_helper_.addConfigModifier([this, with_timeout, disable_with_metadata](\n                                         envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ext_authz_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      ext_authz_cluster->set_name(\"ext_authz\");\n      ext_authz_cluster->mutable_http2_protocol_options();\n\n      TestUtility::loadFromYaml(base_filter_config_, proto_config_);\n      setGrpcService(*proto_config_.mutable_grpc_service(), \"ext_authz\",\n                     fake_upstreams_.back()->localAddress());\n\n      if (with_timeout) {\n        proto_config_.mutable_grpc_service()->mutable_timeout()->CopyFrom(\n            Protobuf::util::TimeUtil::MillisecondsToDuration(1));\n      }\n\n      proto_config_.mutable_filter_enabled()->set_runtime_key(\"envoy.ext_authz.enable\");\n      proto_config_.mutable_filter_enabled()->mutable_default_value()->set_numerator(100);\n      if (disable_with_metadata) {\n        // Disable the ext_authz filter with metadata matcher that never matches.\n        auto* metadata = proto_config_.mutable_filter_enabled_metadata();\n        metadata->set_filter(\"xyz.abc\");\n        metadata->add_path()->set_key(\"k1\");\n        metadata->mutable_value()->mutable_string_match()->set_exact(\"never_matched\");\n      }\n      proto_config_.mutable_deny_at_disable()->set_runtime_key(\"envoy.ext_authz.deny_at_disable\");\n      proto_config_.mutable_deny_at_disable()->mutable_default_value()->set_value(false);\n      proto_config_.set_transport_api_version(apiVersion());\n\n      envoy::config::listener::v3::Filter ext_authz_filter;\n      ext_authz_filter.set_name(Extensions::HttpFilters::HttpFilterNames::get().ExtAuthorization);\n      ext_authz_filter.mutable_typed_config()->PackFrom(proto_config_);\n      config_helper_.addFilter(MessageUtil::getJsonStringFromMessage(ext_authz_filter));\n    });\n  }\n\n  void setDenyAtDisableRuntimeConfig(bool deny_at_disable, bool disable_with_metadata) {\n    if (!disable_with_metadata) {\n      config_helper_.addRuntimeOverride(\"envoy.ext_authz.enable\", \"numerator: 0\");\n    }\n    if (deny_at_disable) {\n      config_helper_.addRuntimeOverride(\"envoy.ext_authz.deny_at_disable\", \"true\");\n    } else {\n      config_helper_.addRuntimeOverride(\"envoy.ext_authz.deny_at_disable\", \"false\");\n    }\n  }\n\n  void initiateClientConnection(uint64_t request_body_length,\n                                const Headers& headers_to_add = Headers{},\n                                const Headers& headers_to_append = Headers{},\n                                const Headers& headers_to_remove = Headers{}) {\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    codec_client_ = makeHttpConnection(std::move(conn));\n    Http::TestRequestHeaderMapImpl headers{\n        {\":method\", \"POST\"}, {\":path\", \"/test\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n\n    // Initialize headers to append. If the authorization server returns any matching keys with one\n    // of value in headers_to_add, the header entry from authorization server replaces the one in\n    // headers_to_add.\n    for (const auto& header_to_add : headers_to_add) {\n      headers.addCopy(header_to_add.first, header_to_add.second);\n    }\n\n    // Initialize headers to append. If the authorization server returns any matching keys with one\n    // of value in headers_to_append, it will be appended.\n    for (const auto& headers_to_append : headers_to_append) {\n      headers.addCopy(headers_to_append.first, headers_to_append.second);\n    }\n\n    // Initialize headers to be removed. If the authorization server returns any of\n    // these as a header to remove, it will be removed.\n    for (const auto& header_to_remove : headers_to_remove) {\n      headers.addCopy(header_to_remove.first, header_to_remove.second);\n    }\n\n    TestUtility::feedBufferWithRandomCharacters(request_body_, request_body_length);\n    response_ = codec_client_->makeRequestWithBody(headers, request_body_.toString());\n  }\n\n  void waitForExtAuthzRequest(const std::string& expected_check_request_yaml) {\n    AssertionResult result =\n        fake_upstreams_.back()->waitForHttpConnection(*dispatcher_, fake_ext_authz_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_ext_authz_connection_->waitForNewStream(*dispatcher_, ext_authz_request_);\n    RELEASE_ASSERT(result, result.message());\n\n    // Check for the validity of the received CheckRequest.\n    envoy::service::auth::v3::CheckRequest check_request;\n    result = ext_authz_request_->waitForGrpcMessage(*dispatcher_, check_request);\n    RELEASE_ASSERT(result, result.message());\n\n    EXPECT_EQ(\"POST\", ext_authz_request_->headers().getMethodValue());\n    EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.auth.{}.Authorization\", \"Check\",\n                                                  apiVersion()),\n              ext_authz_request_->headers().getPathValue());\n    EXPECT_EQ(\"application/grpc\", ext_authz_request_->headers().getContentTypeValue());\n\n    envoy::service::auth::v3::CheckRequest expected_check_request;\n    TestUtility::loadFromYaml(expected_check_request_yaml, expected_check_request);\n\n    auto* attributes = check_request.mutable_attributes();\n    auto* http_request = attributes->mutable_request()->mutable_http();\n\n    EXPECT_TRUE(attributes->request().has_time());\n\n    // Clear fields which are not relevant.\n    attributes->clear_source();\n    attributes->clear_destination();\n    attributes->clear_metadata_context();\n    attributes->mutable_request()->clear_time();\n    http_request->clear_id();\n    http_request->clear_headers();\n    http_request->clear_scheme();\n\n    EXPECT_EQ(check_request.DebugString(), expected_check_request.DebugString());\n\n    result = ext_authz_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n  }\n\n  void waitForSuccessfulUpstreamResponse(\n      const std::string& expected_response_code, const Headers& headers_to_add = Headers{},\n      const Headers& headers_to_append = Headers{}, const Headers& headers_to_remove = Headers{},\n      const Http::TestRequestHeaderMapImpl& new_headers_from_upstream =\n          Http::TestRequestHeaderMapImpl{},\n      const Http::TestRequestHeaderMapImpl& headers_to_append_multiple =\n          Http::TestRequestHeaderMapImpl{}) {\n    AssertionResult result =\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n    RELEASE_ASSERT(result, result.message());\n    result = upstream_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n\n    upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n    upstream_request_->encodeData(response_size_, true);\n\n    for (const auto& header_to_add : headers_to_add) {\n      EXPECT_THAT(upstream_request_->headers(),\n                  Http::HeaderValueOf(header_to_add.first, header_to_add.second));\n      // For headers_to_add (with append = false), the original request headers have no \"-replaced\"\n      // suffix, but the ones from the authorization server have it.\n      EXPECT_TRUE(absl::EndsWith(header_to_add.second, \"-replaced\"));\n    }\n\n    for (const auto& header_to_append : headers_to_append) {\n      // The current behavior of appending is using the \"appendCopy\", which ALWAYS combines entries\n      // with the same key into one key, and the values are separated by \",\" (regardless it is an\n      // inline-header or not). In addition to that, it only applies to the existing headers (the\n      // header is existed in the original request headers).\n      EXPECT_THAT(\n          upstream_request_->headers(),\n          Http::HeaderValueOf(\n              header_to_append.first,\n              // In this test, the keys and values of the original request headers have the same\n              // string value. Hence for \"header2\" key, the value is \"header2,header2-appended\".\n              absl::StrCat(header_to_append.first, \",\", header_to_append.second)));\n      const auto value = upstream_request_->headers()\n                             .get(Http::LowerCaseString(header_to_append.first))\n                             ->value()\n                             .getStringView();\n      EXPECT_TRUE(absl::EndsWith(value, \"-appended\"));\n      const auto values = StringUtil::splitToken(value, \",\");\n      EXPECT_EQ(2, values.size());\n    }\n\n    if (!new_headers_from_upstream.empty()) {\n      // new_headers_from_upstream has append = true. The current implementation ignores to set\n      // multiple headers that are not present in the original request headers. In order to add\n      // headers with the same key multiple times, setting response headers with append = false and\n      // append = true is required.\n      EXPECT_THAT(new_headers_from_upstream,\n                  Not(Http::IsSubsetOfHeaders(upstream_request_->headers())));\n    }\n\n    if (!headers_to_append_multiple.empty()) {\n      // headers_to_append_multiple has append = false for the first entry of multiple entries, and\n      // append = true for the rest entries.\n      EXPECT_THAT(upstream_request_->headers(),\n                  Http::HeaderValueOf(\"multiple\", \"multiple-first,multiple-second\"));\n    }\n\n    for (const auto& header_to_remove : headers_to_remove) {\n      // The headers that were originally present in the request have now been removed.\n      EXPECT_EQ(upstream_request_->headers().get(Http::LowerCaseString{header_to_remove.first}),\n                nullptr);\n    }\n\n    response_->waitForEndStream();\n\n    EXPECT_TRUE(upstream_request_->complete());\n    EXPECT_EQ(request_body_.length(), upstream_request_->bodyLength());\n\n    EXPECT_TRUE(response_->complete());\n    EXPECT_EQ(expected_response_code, response_->headers().getStatusValue());\n    EXPECT_EQ(response_size_, response_->body().size());\n  }\n\n  void sendExtAuthzResponse(const Headers& headers_to_add, const Headers& headers_to_append,\n                            const Headers& headers_to_remove,\n                            const Http::TestRequestHeaderMapImpl& new_headers_from_upstream,\n                            const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) {\n    ext_authz_request_->startGrpcStream();\n    envoy::service::auth::v3::CheckResponse check_response;\n    check_response.mutable_status()->set_code(Grpc::Status::WellKnownGrpcStatus::Ok);\n\n    for (const auto& header_to_add : headers_to_add) {\n      auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add();\n      entry->mutable_append()->set_value(false);\n      entry->mutable_header()->set_key(header_to_add.first);\n      entry->mutable_header()->set_value(header_to_add.second);\n    }\n\n    for (const auto& header_to_append : headers_to_append) {\n      auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add();\n      entry->mutable_append()->set_value(true);\n      entry->mutable_header()->set_key(header_to_append.first);\n      entry->mutable_header()->set_value(header_to_append.second);\n    }\n\n    for (const auto& header_to_remove : headers_to_remove) {\n      auto* entry = check_response.mutable_ok_response()->mutable_headers_to_remove();\n      entry->Add(std::string(header_to_remove.first));\n    }\n\n    // Entries in this headers are not present in the original request headers.\n    new_headers_from_upstream.iterate(\n        [&check_response](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate {\n          auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add();\n          // Try to append to a non-existent field.\n          entry->mutable_append()->set_value(true);\n          entry->mutable_header()->set_key(std::string(h.key().getStringView()));\n          entry->mutable_header()->set_value(std::string(h.value().getStringView()));\n          return Http::HeaderMap::Iterate::Continue;\n        });\n\n    // Entries in this headers are not present in the original request headers. But we set append =\n    // true and append = false.\n    headers_to_append_multiple.iterate(\n        [&check_response](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate {\n          auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add();\n          const auto key = std::string(h.key().getStringView());\n          const auto value = std::string(h.value().getStringView());\n\n          // This scenario makes sure we have set the headers to be appended later.\n          entry->mutable_append()->set_value(!absl::EndsWith(value, \"-first\"));\n          entry->mutable_header()->set_key(key);\n          entry->mutable_header()->set_value(value);\n          return Http::HeaderMap::Iterate::Continue;\n        });\n\n    ext_authz_request_->sendGrpcMessage(check_response);\n    ext_authz_request_->finishGrpcStream(Grpc::Status::Ok);\n  }\n\n  const std::string expectedRequestBody() {\n    const std::string request_body_string = request_body_.toString();\n    const uint64_t request_body_length = request_body_.length();\n    return request_body_length > max_request_bytes_\n               ? request_body_string.substr(0, max_request_bytes_)\n               : request_body_string;\n  }\n\n  void cleanup() {\n    if (fake_ext_authz_connection_ != nullptr) {\n      AssertionResult result = fake_ext_authz_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n    }\n    cleanupUpstreamAndDownstream();\n  }\n\n  const std::string expectedCheckRequest(Http::CodecClient::Type downstream_protocol) {\n    const std::string expected_downstream_protocol =\n        downstream_protocol == Http::CodecClient::Type::HTTP1 ? \"HTTP/1.1\" : \"HTTP/2\";\n    constexpr absl::string_view expected_format = R\"EOF(\nattributes:\n  request:\n    http:\n      method: POST\n      path: /test\n      host: host\n      size: \"%d\"\n      body: \"%s\"\n      protocol: %s\n)EOF\";\n\n    return absl::StrFormat(expected_format, request_body_.length(), expectedRequestBody(),\n                           expected_downstream_protocol);\n  }\n\n  void expectCheckRequestWithBody(Http::CodecClient::Type downstream_protocol,\n                                  uint64_t request_size) {\n    expectCheckRequestWithBodyWithHeaders(downstream_protocol, request_size, Headers{}, Headers{},\n                                          Headers{}, Http::TestRequestHeaderMapImpl{},\n                                          Http::TestRequestHeaderMapImpl{});\n  }\n\n  void expectCheckRequestWithBodyWithHeaders(\n      Http::CodecClient::Type downstream_protocol, uint64_t request_size,\n      const Headers& headers_to_add, const Headers& headers_to_append,\n      const Headers& headers_to_remove,\n      const Http::TestRequestHeaderMapImpl& new_headers_from_upstream,\n      const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) {\n    initializeConfig();\n    setDownstreamProtocol(downstream_protocol);\n    HttpIntegrationTest::initialize();\n    initiateClientConnection(request_size, headers_to_add, headers_to_append, headers_to_remove);\n    waitForExtAuthzRequest(expectedCheckRequest(downstream_protocol));\n\n    Headers updated_headers_to_add;\n    for (auto& header_to_add : headers_to_add) {\n      updated_headers_to_add.push_back(\n          std::make_pair(header_to_add.first, header_to_add.second + \"-replaced\"));\n    }\n    Headers updated_headers_to_append;\n    for (const auto& header_to_append : headers_to_append) {\n      updated_headers_to_append.push_back(\n          std::make_pair(header_to_append.first, header_to_append.second + \"-appended\"));\n    }\n    sendExtAuthzResponse(updated_headers_to_add, updated_headers_to_append, headers_to_remove,\n                         new_headers_from_upstream, headers_to_append_multiple);\n\n    waitForSuccessfulUpstreamResponse(\"200\", updated_headers_to_add, updated_headers_to_append,\n                                      headers_to_remove, new_headers_from_upstream,\n                                      headers_to_append_multiple);\n    cleanup();\n  }\n\n  void initiateAndWait() {\n    initiateClientConnection(4);\n    response_->waitForEndStream();\n  }\n\n  void expectCheckRequestTimedout(bool timeout_on_check) {\n    setMeasureTimeoutOnCheckCreated(this->config_helper_, timeout_on_check);\n    initializeConfig(true);\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n    HttpIntegrationTest::initialize();\n    initiateAndWait();\n    if (timeout_on_check) {\n      uint32_t timeouts = test_server_->counter(\"http.config_test.ext_authz.timeout\")->value();\n      EXPECT_EQ(1U, timeouts);\n    }\n\n    EXPECT_TRUE(response_->complete());\n    EXPECT_EQ(\"403\", response_->headers().getStatusValue());\n\n    cleanup();\n  }\n\n  void expectFilterDisableCheck(bool deny_at_disable, bool disable_with_metadata,\n                                const std::string& expected_status) {\n    initializeConfig(false, disable_with_metadata);\n    setDenyAtDisableRuntimeConfig(deny_at_disable, disable_with_metadata);\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n    HttpIntegrationTest::initialize();\n    initiateClientConnection(4);\n    if (!deny_at_disable) {\n      waitForSuccessfulUpstreamResponse(expected_status);\n    }\n    cleanup();\n  }\n\n  FakeHttpConnectionPtr fake_ext_authz_connection_;\n  FakeStreamPtr ext_authz_request_;\n  IntegrationStreamDecoderPtr response_;\n\n  Buffer::OwnedImpl request_body_;\n  const uint64_t response_size_ = 512;\n  const uint64_t max_request_bytes_ = 1024;\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config_{};\n  const std::string base_filter_config_ = R\"EOF(\n    with_request_body:\n      max_request_bytes: 1024\n      allow_partial_message: true\n  )EOF\";\n};\n\nclass ExtAuthzHttpIntegrationTest : public HttpIntegrationTest,\n                                    public TestWithParam<Network::Address::IpVersion> {\npublic:\n  ExtAuthzHttpIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP1);\n  }\n\n  // By default, HTTP Service uses case sensitive string matcher.\n  void disableCaseSensitiveStringMatcher() {\n    config_helper_.addRuntimeOverride(\n        \"envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher\",\n        \"false\");\n  }\n\n  void initiateClientConnection() {\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    codec_client_ = makeHttpConnection(std::move(conn));\n    response_ = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n        {\":method\", \"GET\"},\n        {\":path\", \"/\"},\n        {\":scheme\", \"http\"},\n        {\":authority\", \"host\"},\n        {\"x-case-sensitive-header\", case_sensitive_header_value_},\n        {\"baz\", \"foo\"},\n        {\"bat\", \"foo\"},\n        {\"remove-me\", \"upstream-should-not-see-me\"},\n    });\n  }\n\n  void waitForExtAuthzRequest() {\n    AssertionResult result =\n        fake_upstreams_.back()->waitForHttpConnection(*dispatcher_, fake_ext_authz_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_ext_authz_connection_->waitForNewStream(*dispatcher_, ext_authz_request_);\n    RELEASE_ASSERT(result, result.message());\n    result = ext_authz_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n\n    // Send back authorization response with \"baz\" and \"bat\" headers.\n    // Also add multiple values \"append-foo\" and \"append-bar\" for key \"x-append-bat\".\n    // Also tell Envoy to remove \"remove-me\" header before sending to upstream.\n    Http::TestResponseHeaderMapImpl response_headers{\n        {\":status\", \"200\"},\n        {\"baz\", \"baz\"},\n        {\"bat\", \"bar\"},\n        {\"x-append-bat\", \"append-foo\"},\n        {\"x-append-bat\", \"append-bar\"},\n        {\"x-envoy-auth-headers-to-remove\", \"remove-me\"},\n    };\n    ext_authz_request_->encodeHeaders(response_headers, true);\n  }\n\n  void cleanup() {\n    if (fake_ext_authz_connection_ != nullptr) {\n      AssertionResult result = fake_ext_authz_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_ext_authz_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n    cleanupUpstreamAndDownstream();\n  }\n  void initializeConfig(bool with_timeout = false) {\n    config_helper_.addConfigModifier([this, with_timeout](\n                                         envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ext_authz_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      ext_authz_cluster->set_name(\"ext_authz\");\n\n      TestUtility::loadFromYaml(default_config_, proto_config_);\n      if (with_timeout) {\n        proto_config_.mutable_http_service()->mutable_server_uri()->mutable_timeout()->CopyFrom(\n            Protobuf::util::TimeUtil::MillisecondsToDuration(1));\n        proto_config_.clear_failure_mode_allow();\n      }\n      envoy::config::listener::v3::Filter ext_authz_filter;\n      ext_authz_filter.set_name(Extensions::HttpFilters::HttpFilterNames::get().ExtAuthorization);\n      ext_authz_filter.mutable_typed_config()->PackFrom(proto_config_);\n\n      config_helper_.addFilter(MessageUtil::getJsonStringFromMessage(ext_authz_filter));\n    });\n  }\n\n  void setupWithDisabledCaseSensitiveStringMatcher(bool disable_case_sensitive_matcher) {\n    initializeConfig();\n\n    if (disable_case_sensitive_matcher) {\n      disableCaseSensitiveStringMatcher();\n    }\n\n    HttpIntegrationTest::initialize();\n\n    initiateClientConnection();\n    waitForExtAuthzRequest();\n\n    AssertionResult result =\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n    RELEASE_ASSERT(result, result.message());\n    result = upstream_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n\n    // The original client request header value of \"baz\" is \"foo\". Since we configure to \"override\"\n    // the value of \"baz\", we expect the request headers to be sent to upstream contain only one\n    // \"baz\" with value \"baz\" (set by the authorization server).\n    EXPECT_THAT(upstream_request_->headers(), Http::HeaderValueOf(\"baz\", \"baz\"));\n\n    // The original client request header value of \"bat\" is \"foo\". Since we configure to \"append\"\n    // the value of \"bat\", we expect the request headers to be sent to upstream contain two \"bat\"s,\n    // with values: \"foo\" and \"bar\" (the \"bat: bar\" header is appended by the authorization server).\n    const auto& request_existed_headers =\n        Http::TestRequestHeaderMapImpl{{\"bat\", \"foo\"}, {\"bat\", \"bar\"}};\n    EXPECT_THAT(request_existed_headers, Http::IsSubsetOfHeaders(upstream_request_->headers()));\n\n    // The original client request header does not contain x-append-bat. Since we configure to\n    // \"append\" the value of \"x-append-bat\", we expect the headers to be sent to upstream contain\n    // two \"x-append-bat\"s, instead of replacing the first with the last one, with values:\n    // \"append-foo\" and \"append-bar\"\n    const auto& request_nonexisted_headers = Http::TestRequestHeaderMapImpl{\n        {\"x-append-bat\", \"append-foo\"}, {\"x-append-bat\", \"append-bar\"}};\n    EXPECT_THAT(request_nonexisted_headers, Http::IsSubsetOfHeaders(upstream_request_->headers()));\n\n    // The \"remove-me\" header that was present in the downstream request has\n    // been removed by envoy as a result of being present in\n    // \"x-envoy-auth-headers-to-remove\".\n    EXPECT_EQ(upstream_request_->headers().get(Http::LowerCaseString{\"remove-me\"}), nullptr);\n    // \"x-envoy-auth-headers-to-remove\" itself has also been removed because\n    // it's only used for communication between the authorization server and\n    // envoy itself.\n    EXPECT_EQ(\n        upstream_request_->headers().get(Http::LowerCaseString{\"x-envoy-auth-headers-to-remove\"}),\n        nullptr);\n\n    upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n    response_->waitForEndStream();\n    EXPECT_TRUE(response_->complete());\n    EXPECT_EQ(\"200\", response_->headers().getStatusValue());\n\n    cleanup();\n  }\n\n  void initiateAndWait() {\n    initiateClientConnection();\n    response_->waitForEndStream();\n  }\n\n  void expectCheckRequestTimedout(bool timeout_on_check) {\n    setMeasureTimeoutOnCheckCreated(this->config_helper_, timeout_on_check);\n    initializeConfig(true);\n    HttpIntegrationTest::initialize();\n\n    initiateAndWait();\n    if (timeout_on_check) {\n      uint32_t timeouts = test_server_->counter(\"http.config_test.ext_authz.timeout\")->value();\n      EXPECT_EQ(1U, timeouts);\n    }\n\n    EXPECT_TRUE(response_->complete());\n    EXPECT_EQ(\"403\", response_->headers().getStatusValue());\n    cleanup();\n  }\n\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config_{};\n  FakeHttpConnectionPtr fake_ext_authz_connection_;\n  FakeStreamPtr ext_authz_request_;\n  IntegrationStreamDecoderPtr response_;\n  const Http::LowerCaseString case_sensitive_header_name_{\"x-case-sensitive-header\"};\n  const std::string case_sensitive_header_value_{\"Case-Sensitive\"};\n  const std::string default_config_ = R\"EOF(\n  http_service:\n    server_uri:\n      uri: \"ext_authz:9000\"\n      cluster: \"ext_authz\"\n      timeout: 300s\n\n    authorization_request:\n      allowed_headers:\n        patterns:\n        - exact: X-Case-Sensitive-Header\n\n    authorization_response:\n      allowed_upstream_headers:\n        patterns:\n        - exact: baz\n        - prefix: x-success\n\n      allowed_upstream_headers_to_append:\n        patterns:\n        - exact: bat\n        - prefix: x-append\n\n  failure_mode_allow: true\n  )EOF\";\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsCientType, ExtAuthzGrpcIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Verifies that the request body is included in the CheckRequest when the downstream protocol is\n// HTTP/1.1.\nTEST_P(ExtAuthzGrpcIntegrationTest, HTTP1DownstreamRequestWithBody) {\n  expectCheckRequestWithBody(Http::CodecClient::Type::HTTP1, 4);\n}\n\n// Verifies that the request body is included in the CheckRequest when the downstream protocol is\n// HTTP/1.1 and the size of the request body is larger than max_request_bytes.\nTEST_P(ExtAuthzGrpcIntegrationTest, HTTP1DownstreamRequestWithLargeBody) {\n  expectCheckRequestWithBody(Http::CodecClient::Type::HTTP1, 2048);\n}\n\n// Verifies that the request body is included in the CheckRequest when the downstream protocol is\n// HTTP/2.\nTEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithBody) {\n  expectCheckRequestWithBody(Http::CodecClient::Type::HTTP2, 4);\n}\n\n// Verifies that the request body is included in the CheckRequest when the downstream protocol is\n// HTTP/2 and the size of the request body is larger than max_request_bytes.\nTEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithLargeBody) {\n  expectCheckRequestWithBody(Http::CodecClient::Type::HTTP2, 2048);\n}\n\n// Verifies that the original request headers will be added and appended when the authorization\n// server returns headers_to_add and headers_to_append in OkResponse message.\nTEST_P(ExtAuthzGrpcIntegrationTest, SendHeadersToAddAndToAppendToUpstream) {\n  expectCheckRequestWithBodyWithHeaders(\n      Http::CodecClient::Type::HTTP1, 4,\n      /*headers_to_add=*/Headers{{\"header1\", \"header1\"}},\n      /*headers_to_append=*/Headers{{\"header2\", \"header2\"}},\n      /*headers_to_remove=*/Headers{{\"remove-me\", \"upstream-should-not-see-me\"}},\n      /*new_headers_from_upstream=*/Http::TestRequestHeaderMapImpl{{\"new1\", \"new1\"}},\n      /*headers_to_append_multiple=*/\n      Http::TestRequestHeaderMapImpl{{\"multiple\", \"multiple-first\"},\n                                     {\"multiple\", \"multiple-second\"}});\n}\n\nTEST_P(ExtAuthzGrpcIntegrationTest, CheckTimesOutLegacy) { expectCheckRequestTimedout(false); }\n\nTEST_P(ExtAuthzGrpcIntegrationTest, CheckTimesOutFromCheckCreation) {\n  expectCheckRequestTimedout(true);\n}\n\nTEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisable) {\n  expectFilterDisableCheck(/*deny_at_disable=*/false, /*disable_with_metadata=*/false, \"200\");\n}\n\nTEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisableWithMetadata) {\n  expectFilterDisableCheck(/*deny_at_disable=*/false, /*disable_with_metadata=*/true, \"200\");\n}\n\nTEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisable) {\n  expectFilterDisableCheck(/*deny_at_disable=*/true, /*disable_with_metadata=*/false, \"403\");\n}\n\nTEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisableWithMetadata) {\n  expectFilterDisableCheck(/*deny_at_disable=*/true, /*disable_with_metadata=*/true, \"403\");\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzHttpIntegrationTest,\n                         ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that by default HTTP service uses the case-sensitive string matcher.\nTEST_P(ExtAuthzHttpIntegrationTest, DefaultCaseSensitiveStringMatcher) {\n  setupWithDisabledCaseSensitiveStringMatcher(false);\n  const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_);\n  ASSERT_EQ(header_entry, nullptr);\n}\n\n// Verifies that by setting \"false\" to\n// envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher, the string\n// matcher used by HTTP service will be case-insensitive.\nTEST_P(ExtAuthzHttpIntegrationTest, DisableCaseSensitiveStringMatcher) {\n  setupWithDisabledCaseSensitiveStringMatcher(true);\n  const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_);\n  ASSERT_NE(header_entry, nullptr);\n  EXPECT_EQ(case_sensitive_header_value_, header_entry->value().getStringView());\n}\n\nTEST_P(ExtAuthzHttpIntegrationTest, CheckTimesOutLegacy) { expectCheckRequestTimedout(false); }\n\nTEST_P(ExtAuthzHttpIntegrationTest, CheckTimesOutFromCheckCreation) {\n  expectCheckRequestTimedout(true);\n}\n\nclass ExtAuthzLocalReplyIntegrationTest : public HttpIntegrationTest,\n                                          public TestWithParam<Network::Address::IpVersion> {\npublic:\n  ExtAuthzLocalReplyIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP1);\n  }\n\n  void cleanup() {\n    if (fake_ext_authz_connection_ != nullptr) {\n      AssertionResult result = fake_ext_authz_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_ext_authz_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n    cleanupUpstreamAndDownstream();\n  }\n\n  FakeHttpConnectionPtr fake_ext_authz_connection_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzLocalReplyIntegrationTest,\n                         ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// This integration test uses ext_authz combined with `local_reply_config`.\n// * If ext_authz response status is 401; its response headers and body are sent to the client.\n// * But if `local_reply_config` is specified, the response body and its content-length and type\n//   are controlled by the `local_reply_config`.\n// This integration test verifies that content-type and content-length generated\n// from `local_reply_config` are not overridden by ext_authz response.\nTEST_P(ExtAuthzLocalReplyIntegrationTest, DeniedHeaderTest) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters();\n    ext_authz_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n    ext_authz_cluster->set_name(\"ext_authz\");\n\n    envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config;\n    const std::string ext_authz_config = R\"EOF(\n  http_service:\n    server_uri:\n      uri: \"ext_authz:9000\"\n      cluster: \"ext_authz\"\n      timeout: 300s\n  )EOF\";\n    TestUtility::loadFromYaml(ext_authz_config, proto_config);\n\n    envoy::config::listener::v3::Filter ext_authz_filter;\n    ext_authz_filter.set_name(Extensions::HttpFilters::HttpFilterNames::get().ExtAuthorization);\n    ext_authz_filter.mutable_typed_config()->PackFrom(proto_config);\n    config_helper_.addFilter(MessageUtil::getJsonStringFromMessage(ext_authz_filter));\n  });\n\n  const std::string local_reply_yaml = R\"EOF(\nbody_format:\n  json_format:\n    code: \"%RESPONSE_CODE%\"\n    message: \"%LOCAL_REPLY_BODY%\"\n  )EOF\";\n  envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig\n      local_reply_config;\n  TestUtility::loadFromYaml(local_reply_yaml, local_reply_config);\n  config_helper_.setLocalReply(local_reply_config);\n\n  HttpIntegrationTest::initialize();\n\n  auto conn = makeClientConnection(lookupPort(\"http\"));\n  codec_client_ = makeHttpConnection(std::move(conn));\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n  });\n\n  AssertionResult result =\n      fake_upstreams_.back()->waitForHttpConnection(*dispatcher_, fake_ext_authz_connection_);\n  RELEASE_ASSERT(result, result.message());\n  FakeStreamPtr ext_authz_request;\n  result = fake_ext_authz_connection_->waitForNewStream(*dispatcher_, ext_authz_request);\n  RELEASE_ASSERT(result, result.message());\n  result = ext_authz_request->waitForEndStream(*dispatcher_);\n  RELEASE_ASSERT(result, result.message());\n\n  Http::TestResponseHeaderMapImpl ext_authz_response_headers{\n      {\":status\", \"401\"},\n      {\"content-type\", \"fake-type\"},\n  };\n  ext_authz_request->encodeHeaders(ext_authz_response_headers, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n\n  EXPECT_EQ(\"401\", response->headers().Status()->value().getStringView());\n  // Without fixing the bug, \"content-type\" and \"content-length\" are overridden by the ext_authz\n  // responses as its \"content-type: fake-type\" and \"content-length: 0\".\n  EXPECT_EQ(\"application/json\", response->headers().ContentType()->value().getStringView());\n  EXPECT_EQ(\"26\", response->headers().ContentLength()->value().getStringView());\n\n  const std::string expected_body = R\"({\n      \"code\": 401,\n      \"message\": \"\"\n})\";\n  EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body));\n\n  cleanup();\n}\n\nTEST_P(ExtAuthzGrpcIntegrationTest, GoogleAsyncClientCreation) {\n  initializeConfig();\n  setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n  HttpIntegrationTest::initialize();\n  initiateClientConnection(4, Headers{}, Headers{});\n\n  waitForExtAuthzRequest(expectedCheckRequest(Http::CodecClient::Type::HTTP2));\n  if (clientType() == Grpc::ClientType::GoogleGrpc) {\n    // Make sure one Google grpc client is created.\n    EXPECT_EQ(1, test_server_->counter(\"grpc.ext_authz.google_grpc_client_creation\")->value());\n  }\n  sendExtAuthzResponse(Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{},\n                       Http::TestRequestHeaderMapImpl{});\n\n  waitForSuccessfulUpstreamResponse(\"200\");\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\":method\", \"POST\"}, {\":path\", \"/test\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  TestUtility::feedBufferWithRandomCharacters(request_body_, 4);\n  response_ = codec_client_->makeRequestWithBody(headers, request_body_.toString());\n\n  auto result = fake_ext_authz_connection_->waitForNewStream(*dispatcher_, ext_authz_request_);\n  RELEASE_ASSERT(result, result.message());\n\n  envoy::service::auth::v3::CheckRequest check_request;\n  result = ext_authz_request_->waitForGrpcMessage(*dispatcher_, check_request);\n  RELEASE_ASSERT(result, result.message());\n\n  EXPECT_EQ(\"POST\", ext_authz_request_->headers().getMethodValue());\n  EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.auth.{}.Authorization\", \"Check\",\n                                                apiVersion()),\n            ext_authz_request_->headers().getPathValue());\n  EXPECT_EQ(\"application/grpc\", ext_authz_request_->headers().getContentTypeValue());\n  result = ext_authz_request_->waitForEndStream(*dispatcher_);\n  RELEASE_ASSERT(result, result.message());\n\n  if (clientType() == Grpc::ClientType::GoogleGrpc) {\n    // Make sure one Google grpc client is created.\n    EXPECT_EQ(1, test_server_->counter(\"grpc.ext_authz.google_grpc_client_creation\")->value());\n  }\n  sendExtAuthzResponse(Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{},\n                       Http::TestRequestHeaderMapImpl{});\n\n  result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n  RELEASE_ASSERT(result, result.message());\n  result = upstream_request_->waitForEndStream(*dispatcher_);\n  RELEASE_ASSERT(result, result.message());\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request_->encodeData(response_size_, true);\n\n  response_->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(request_body_.length(), upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response_->complete());\n  EXPECT_EQ(\"200\", response_->headers().getStatusValue());\n  EXPECT_EQ(response_size_, response_->body().size());\n\n  cleanup();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ext_authz/ext_authz_test.cc",
    "content": "#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.validate.h\"\n#include \"envoy/http/codes.h\"\n#include \"envoy/service/auth/v3/external_auth.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/ext_authz/ext_authz.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/filters/common/ext_authz/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::UnorderedElementsAre;\nusing testing::Values;\nusing testing::WithArgs;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace ExtAuthz {\nnamespace {\n\ntemplate <class T> class HttpFilterTestBase : public T {\npublic:\n  HttpFilterTestBase() : http_context_(stats_store_.symbolTable()) {}\n\n  void initialize(std::string&& yaml) {\n    envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config{};\n    if (!yaml.empty()) {\n      TestUtility::loadFromYaml(yaml, proto_config);\n    }\n    config_.reset(\n        new FilterConfig(proto_config, stats_store_, runtime_, http_context_, \"ext_authz_prefix\"));\n    client_ = new Filters::Common::ExtAuthz::MockClient();\n    filter_ = std::make_unique<Filter>(config_, Filters::Common::ExtAuthz::ClientPtr{client_});\n    filter_->setDecoderFilterCallbacks(filter_callbacks_);\n    addr_ = std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 1111);\n  }\n\n  void prepareCheck() {\n    ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n    EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n    EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  }\n\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  FilterConfigSharedPtr config_;\n  Filters::Common::ExtAuthz::MockClient* client_;\n  std::unique_ptr<Filter> filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks_;\n  Filters::Common::ExtAuthz::RequestCallbacks* request_callbacks_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  Buffer::OwnedImpl data_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  Network::Address::InstanceConstSharedPtr addr_;\n  NiceMock<Envoy::Network::MockConnection> connection_;\n  Http::ContextImpl http_context_;\n};\n\nclass HttpFilterTest : public HttpFilterTestBase<testing::Test> {\npublic:\n  HttpFilterTest() = default;\n};\n\nusing CreateFilterConfigFunc = envoy::extensions::filters::http::ext_authz::v3::ExtAuthz();\n\nclass HttpFilterTestParam\n    : public HttpFilterTestBase<testing::TestWithParam<CreateFilterConfigFunc*>> {\npublic:\n  void SetUp() override { initialize(\"\"); }\n};\n\ntemplate <bool failure_mode_allow_value, bool http_client>\nenvoy::extensions::filters::http::ext_authz::v3::ExtAuthz GetFilterConfig() {\n  const std::string http_config = R\"EOF(\n  http_service:\n    server_uri:\n      uri: \"ext_authz:9000\"\n      cluster: \"ext_authz\"\n      timeout: 0.25s\n  )EOF\";\n\n  const std::string grpc_config = R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  )EOF\";\n\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config{};\n  TestUtility::loadFromYaml(http_client ? http_config : grpc_config, proto_config);\n  proto_config.set_failure_mode_allow(failure_mode_allow_value);\n  return proto_config;\n}\n\nINSTANTIATE_TEST_SUITE_P(ParameterizedFilterConfig, HttpFilterTestParam,\n                         Values(&GetFilterConfig<true, true>, &GetFilterConfig<false, false>,\n                                &GetFilterConfig<true, false>, &GetFilterConfig<false, true>));\n\n// Test that the per route config is properly merged: more specific keys override previous keys.\nTEST_F(HttpFilterTest, MergeConfig) {\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings;\n  auto&& extensions = settings.mutable_check_settings()->mutable_context_extensions();\n\n  // First config base config with one base value, and one value to be overridden.\n  (*extensions)[\"base_key\"] = \"base_value\";\n  (*extensions)[\"merged_key\"] = \"base_value\";\n  FilterConfigPerRoute base_config(settings);\n\n  // Construct a config to merge, that provides one value and overrides one value.\n  settings.Clear();\n  auto&& specific_extensions = settings.mutable_check_settings()->mutable_context_extensions();\n  (*specific_extensions)[\"merged_key\"] = \"value\";\n  (*specific_extensions)[\"key\"] = \"value\";\n  FilterConfigPerRoute specific_config(settings);\n\n  // Perform the merge:\n  base_config.merge(specific_config);\n\n  settings.Clear();\n  settings.set_disabled(true);\n  FilterConfigPerRoute disabled_config(settings);\n\n  // Perform a merge with disabled config:\n  base_config.merge(disabled_config);\n\n  // Make sure all values were merged:\n  auto&& merged_extensions = base_config.contextExtensions();\n  EXPECT_EQ(\"base_value\", merged_extensions.at(\"base_key\"));\n  EXPECT_EQ(\"value\", merged_extensions.at(\"merged_key\"));\n  EXPECT_EQ(\"value\", merged_extensions.at(\"key\"));\n}\n\n// Test that defining stat_prefix appends an additional prefix to the emitted statistics names.\nTEST_F(HttpFilterTest, StatsWithPrefix) {\n  const std::string stat_prefix = \"with_stat_prefix\";\n  const std::string error_counter_name_with_prefix =\n      absl::StrCat(\"ext_authz.\", stat_prefix, \".error\");\n  const std::string error_counter_name_without_prefix = \"ext_authz.error\";\n\n  InSequence s;\n\n  initialize(fmt::format(R\"EOF(\n  stat_prefix: \"{}\"\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  )EOF\",\n                         stat_prefix));\n\n  EXPECT_EQ(0U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(error_counter_name_with_prefix)\n                    .value());\n  EXPECT_EQ(0U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(error_counter_name_without_prefix)\n                    .value());\n\n  prepareCheck();\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, _)).Times(1);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Error;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(error_counter_name_with_prefix)\n                    .value());\n  // The one without an additional prefix is not incremented, since it is not \"defined\".\n  EXPECT_EQ(0U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(error_counter_name_without_prefix)\n                    .value());\n}\n\n// Test when failure_mode_allow is NOT set and the response from the authorization service is Error\n// that the request is not allowed to continue.\nTEST_F(HttpFilterTest, ErrorFailClose) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.getStatusValue(), std::to_string(enumToInt(Http::Code::Forbidden)));\n      }));\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Error;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.error\").value());\n  EXPECT_EQ(\n      0U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.timeout\").value());\n  EXPECT_EQ(1U, config_->stats().error_.value());\n  EXPECT_EQ(0U, config_->stats().timeout_.value());\n}\n\n// Test when when a timeout error occurs, the correct stat is incremented.\nTEST_F(HttpFilterTest, TimeoutError) {\n  InSequence s;\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.getStatusValue(), std::to_string(enumToInt(Http::Code::Forbidden)));\n      }));\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Error;\n  response.error_kind = Filters::Common::ExtAuthz::ErrorKind::Timedout;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.error\").value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.timeout\").value());\n  EXPECT_EQ(1U, config_->stats().error_.value());\n  EXPECT_EQ(1U, config_->stats().timeout_.value());\n}\n\n// Verifies that the filter responds with a configurable HTTP status when an network error occurs.\nTEST_F(HttpFilterTest, ErrorCustomStatusCode) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  status_on_error:\n    code: 503\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true))\n      .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void {\n        EXPECT_EQ(headers.getStatusValue(),\n                  std::to_string(enumToInt(Http::Code::ServiceUnavailable)));\n      }));\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Error;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.error\").value());\n  EXPECT_EQ(1U, config_->stats().error_.value());\n  EXPECT_EQ(\"ext_authz_error\", filter_callbacks_.details());\n}\n\n// Test when failure_mode_allow is set and the response from the authorization service is Error that\n// the request is allowed to continue.\nTEST_F(HttpFilterTest, ErrorOpen) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: true\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Error;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.error\").value());\n  EXPECT_EQ(1U, config_->stats().error_.value());\n}\n\n// Test when failure_mode_allow is set and the response from the authorization service is an\n// immediate Error that the request is allowed to continue.\nTEST_F(HttpFilterTest, ImmediateErrorOpen) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: true\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Error;\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.error\").value());\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(\"ext_authz.failure_mode_allowed\")\n                    .value());\n  EXPECT_EQ(1U, config_->stats().error_.value());\n  EXPECT_EQ(1U, config_->stats().failure_mode_allowed_.value());\n}\n\n// Check a bad configuration results in validation exception.\nTEST_F(HttpFilterTest, BadConfig) {\n  const std::string filter_config = R\"EOF(\n  grpc_service:\n    envoy_grpc: {}\n  failure_mode_allow: true\n  )EOF\";\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config{};\n  TestUtility::loadFromYaml(filter_config, proto_config);\n  EXPECT_THROW(TestUtility::downcastAndValidate<\n                   const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz&>(proto_config),\n               ProtoValidationException);\n}\n\n// Checks that filter does not initiate the authorization request when the buffer reaches the max\n// request bytes.\nTEST_F(HttpFilterTest, RequestDataIsTooLarge) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  with_request_body:\n    max_request_bytes: 10\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1);\n  EXPECT_CALL(connection_, remoteAddress()).Times(0);\n  EXPECT_CALL(connection_, localAddress()).Times(0);\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  Buffer::OwnedImpl buffer1(\"foo\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(buffer1, false));\n\n  Buffer::OwnedImpl buffer2(\"foobarbaz\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(buffer2, false));\n}\n\n// Checks that the filter initiates an authorization request when the buffer reaches max\n// request bytes and allow_partial_message is set to true.\nTEST_F(HttpFilterTest, RequestDataWithPartialMessage) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  with_request_body:\n    max_request_bytes: 10\n    allow_partial_message: true\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_));\n  EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0);\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  data_.add(\"foo\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n\n  data_.add(\"bar\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n\n  data_.add(\"barfoo\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false));\n\n  data_.add(\"more data after watermark is set is possible\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true));\n\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n}\n\n// Checks that the filter initiates the authorization process only when the filter decode trailers\n// is called.\nTEST_F(HttpFilterTest, RequestDataWithSmallBuffer) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  with_request_body:\n    max_request_bytes: 10\n    allow_partial_message: true\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_));\n  EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0);\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  data_.add(\"foo\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n}\n\n// Checks that the filter buffers the data and initiates the authorization request.\nTEST_F(HttpFilterTest, AuthWithRequestData) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  with_request_body:\n    max_request_bytes: 10\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_));\n  prepareCheck();\n\n  envoy::service::auth::v3::CheckRequest check_request;\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(WithArgs<0, 2>(\n          Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks,\n                     const envoy::service::auth::v3::CheckRequest& check_param) -> void {\n            request_callbacks_ = &callbacks;\n            check_request = check_param;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  data_.add(\"foo\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n  data_.add(\"bar\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(data_.length(), check_request.attributes().request().http().body().size());\n  EXPECT_EQ(0, check_request.attributes().request().http().raw_body().size());\n}\n\n// Checks that the filter buffers the data and initiates the authorization request.\nTEST_F(HttpFilterTest, AuthWithNonUtf8RequestData) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  with_request_body:\n    max_request_bytes: 10\n    pack_as_bytes: true\n  )EOF\");\n\n  ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_));\n  prepareCheck();\n\n  envoy::service::auth::v3::CheckRequest check_request;\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(WithArgs<0, 2>(\n          Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks,\n                     const envoy::service::auth::v3::CheckRequest& check_param) -> void {\n            request_callbacks_ = &callbacks;\n            check_request = check_param;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Use non UTF-8 data to fill up the decoding buffer.\n  uint8_t raw[1] = {0xc0};\n  Buffer::OwnedImpl raw_buffer(raw, 1);\n\n  data_.add(raw_buffer);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n  data_.add(raw_buffer);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0, check_request.attributes().request().http().body().size());\n  EXPECT_EQ(data_.length(), check_request.attributes().request().http().raw_body().size());\n}\n\n// Checks that filter does not buffer data on header-only request.\nTEST_F(HttpFilterTest, HeaderOnlyRequest) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  with_request_body:\n    max_request_bytes: 10\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, true));\n  // decodeData() and decodeTrailers() will not be called since request is header only.\n}\n\n// Checks that filter does not buffer data on upgrade WebSocket request.\nTEST_F(HttpFilterTest, UpgradeWebsocketRequest) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  with_request_body:\n    max_request_bytes: 10\n  )EOF\");\n\n  prepareCheck();\n\n  request_headers_.addCopy(Http::Headers::get().Connection,\n                           Http::Headers::get().ConnectionValues.Upgrade);\n  request_headers_.addCopy(Http::Headers::get().Upgrade,\n                           Http::Headers::get().UpgradeValues.WebSocket);\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  // decodeData() and decodeTrailers() will not be called until continueDecoding() is called.\n}\n\n// Checks that filter does not buffer data on upgrade H2 WebSocket request.\nTEST_F(HttpFilterTest, H2UpgradeRequest) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  with_request_body:\n    max_request_bytes: 10\n  )EOF\");\n\n  prepareCheck();\n\n  request_headers_.addCopy(Http::Headers::get().Method, Http::Headers::get().MethodValues.Connect);\n  request_headers_.addCopy(Http::Headers::get().Protocol,\n                           Http::Headers::get().ProtocolStrings.Http2String);\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  // decodeData() and decodeTrailers() will not be called until continueDecoding() is called.\n}\n\n// Checks that filter does not buffer data when is not the end of the stream, but header-only\n// request has been received.\nTEST_F(HttpFilterTest, HeaderOnlyRequestWithStream) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  with_request_body:\n    max_request_bytes: 10\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n}\n\n// Checks that the filter removes the specified headers from the request, but\n// that pseudo headers and Host are not removed.\nTEST_F(HttpFilterTest, HeadersToRemoveRemovesHeadersExceptSpecialHeaders) {\n  InSequence s;\n\n  // Set up all the typical headers plus an additional user defined header.\n  request_headers_.addCopy(Http::Headers::get().Host, \"example.com\");\n  request_headers_.addCopy(Http::Headers::get().Method, \"GET\");\n  request_headers_.addCopy(Http::Headers::get().Path, \"/users\");\n  request_headers_.addCopy(Http::Headers::get().Protocol, \"websocket\");\n  request_headers_.addCopy(Http::Headers::get().Scheme, \"https\");\n  request_headers_.addCopy(\"remove-me\", \"upstream-should-not-see-me\");\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  clear_route_cache: true\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  // Let's try to remove all the headers in the request.\n  response.headers_to_remove = std::vector<Http::LowerCaseString>{\n      Http::Headers::get().Host,          Http::Headers::get().HostLegacy,\n      Http::Headers::get().Method,        Http::Headers::get().Path,\n      Http::Headers::get().Protocol,      Http::Headers::get().Scheme,\n      Http::LowerCaseString{\"remove-me\"},\n  };\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n\n  // All :-prefixed headers (and Host) should still be there - only the user\n  // defined header should have been removed.\n  EXPECT_EQ(\"example.com\", request_headers_.get_(Http::Headers::get().Host));\n  EXPECT_EQ(\"example.com\", request_headers_.get_(Http::Headers::get().HostLegacy));\n  EXPECT_EQ(\"GET\", request_headers_.get_(Http::Headers::get().Method));\n  EXPECT_EQ(\"/users\", request_headers_.get_(Http::Headers::get().Path));\n  EXPECT_EQ(\"websocket\", request_headers_.get_(Http::Headers::get().Protocol));\n  EXPECT_EQ(\"https\", request_headers_.get_(Http::Headers::get().Scheme));\n  EXPECT_EQ(nullptr, request_headers_.get(Http::LowerCaseString{\"remove-me\"}));\n}\n\n// Verifies that the filter clears the route cache when an authorization response:\n// 1. is an OK response.\n// 2. has headers to append.\n// 3. has headers to add.\n// 4. has headers to remove.\nTEST_F(HttpFilterTest, ClearCache) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  clear_route_cache: true\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n  response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"bar\"}, \"foo\"}};\n  response.headers_to_remove =\n      std::vector<Http::LowerCaseString>{Http::LowerCaseString{\"remove-me\"}};\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Verifies that the filter clears the route cache when an authorization response:\n// 1. is an OK response.\n// 2. has headers to append.\n// 3. has NO headers to add.\n// 4. has NO headers to remove.\nTEST_F(HttpFilterTest, ClearCacheRouteHeadersToAppendOnly) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  clear_route_cache: true\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Verifies that the filter clears the route cache when an authorization response:\n// 1. is an OK response.\n// 2. has NO headers to append.\n// 3. has headers to add.\n// 4. has NO headers to remove.\nTEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  clear_route_cache: true\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Verifies that the filter clears the route cache when an authorization response:\n// 1. is an OK response.\n// 2. has NO headers to append.\n// 3. has NO headers to add.\n// 4. has headers to remove.\nTEST_F(HttpFilterTest, ClearCacheRouteHeadersToRemoveOnly) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  clear_route_cache: true\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  response.headers_to_remove =\n      std::vector<Http::LowerCaseString>{Http::LowerCaseString{\"remove-me\"}};\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Verifies that the filter DOES NOT clear the route cache when an authorization response:\n// 1. is an OK response.\n// 2. has NO headers to append.\n// 3. has NO headers to add.\n// 4. has NO headers to remove.\nTEST_F(HttpFilterTest, NoClearCacheRoute) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  clear_route_cache: true\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Verifies that the filter DOES NOT clear the route cache when clear_route_cache is set to false.\nTEST_F(HttpFilterTest, NoClearCacheRouteConfig) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n  response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"bar\"}, \"foo\"}};\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Verifies that the filter DOES NOT clear the route cache when authorization response is NOT OK.\nTEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  clear_route_cache: true\n  )EOF\");\n\n  prepareCheck();\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n  response.status_code = Http::Code::Unauthorized;\n  response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n  auto response_ptr = std::make_unique<Filters::Common::ExtAuthz::Response>(response);\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(std::move(response_ptr));\n          })));\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0);\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.denied\").value());\n  EXPECT_EQ(\"ext_authz_denied\", filter_callbacks_.details());\n}\n\n// Verifies that specified metadata is passed along in the check request\nTEST_F(HttpFilterTest, MetadataContext) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  metadata_context_namespaces:\n  - jazz.sax\n  - rock.guitar\n  - hiphop.drums\n  )EOF\");\n\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    jazz.sax:\n      coltrane: john\n      parker: charlie\n    jazz.piano:\n      monk: thelonious\n      hancock: herbie\n    rock.guitar:\n      hendrix: jimi\n      richards: keith\n  )EOF\";\n\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata));\n\n  prepareCheck();\n\n  envoy::service::auth::v3::CheckRequest check_request;\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(WithArgs<2>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param)\n                                       -> void { check_request = check_param; })));\n\n  filter_->decodeHeaders(request_headers_, false);\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n\n  EXPECT_EQ(\"john\", check_request.attributes()\n                        .metadata_context()\n                        .filter_metadata()\n                        .at(\"jazz.sax\")\n                        .fields()\n                        .at(\"coltrane\")\n                        .string_value());\n\n  EXPECT_EQ(\"jimi\", check_request.attributes()\n                        .metadata_context()\n                        .filter_metadata()\n                        .at(\"rock.guitar\")\n                        .fields()\n                        .at(\"hendrix\")\n                        .string_value());\n\n  EXPECT_EQ(0, check_request.attributes().metadata_context().filter_metadata().count(\"jazz.piano\"));\n\n  EXPECT_EQ(0,\n            check_request.attributes().metadata_context().filter_metadata().count(\"hiphop.drums\"));\n}\n\n// Test that filter can be disabled via the filter_enabled field.\nTEST_F(HttpFilterTest, FilterDisabled) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled:\n    runtime_key: \"http.ext_authz.enabled\"\n    default_value:\n      numerator: 0\n      denominator: HUNDRED\n  )EOF\");\n\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"http.ext_authz.enabled\",\n                         testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(0))))\n      .WillByDefault(Return(false));\n\n  // Make sure check is not called.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that filter can be enabled via the filter_enabled field.\nTEST_F(HttpFilterTest, FilterEnabled) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled:\n    runtime_key: \"http.ext_authz.enabled\"\n    default_value:\n      numerator: 100\n      denominator: HUNDRED\n  )EOF\");\n\n  prepareCheck();\n\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"http.ext_authz.enabled\",\n                         testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillByDefault(Return(true));\n\n  // Make sure check is called once.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that filter can be disabled via the filter_enabled_metadata field.\nTEST_F(HttpFilterTest, MetadataDisabled) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled_metadata:\n    filter: \"abc.xyz\"\n    path:\n    - key: \"k1\"\n    value:\n      string_match:\n        exact: \"check\"\n  )EOF\");\n\n  // Disable in filter_enabled.\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    abc.xyz:\n      k1: skip\n  )EOF\";\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata));\n\n  // Make sure check is not called.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that filter can be enabled via the filter_enabled_metadata field.\nTEST_F(HttpFilterTest, MetadataEnabled) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled_metadata:\n    filter: \"abc.xyz\"\n    path:\n    - key: \"k1\"\n    value:\n      string_match:\n        exact: \"check\"\n  )EOF\");\n\n  // Enable in filter_enabled.\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    abc.xyz:\n      k1: check\n  )EOF\";\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata));\n\n  prepareCheck();\n\n  // Make sure check is called once.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that the filter is disabled if one of the filter_enabled and filter_enabled_metadata field\n// is disabled.\nTEST_F(HttpFilterTest, FilterEnabledButMetadataDisabled) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled:\n    runtime_key: \"http.ext_authz.enabled\"\n    default_value:\n      numerator: 100\n      denominator: HUNDRED\n  filter_enabled_metadata:\n    filter: \"abc.xyz\"\n    path:\n    - key: \"k1\"\n    value:\n      string_match:\n        exact: \"check\"\n  )EOF\");\n\n  // Enable in filter_enabled.\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"http.ext_authz.enabled\",\n                         testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillByDefault(Return(true));\n\n  // Disable in filter_enabled_metadata.\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    abc.xyz:\n      k1: skip\n  )EOF\";\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata));\n\n  // Make sure check is not called.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that the filter is disabled if one of the filter_enabled and filter_enabled_metadata field\n// is disabled.\nTEST_F(HttpFilterTest, FilterDisabledButMetadataEnabled) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled:\n    runtime_key: \"http.ext_authz.enabled\"\n    default_value:\n      numerator: 0\n      denominator: HUNDRED\n  filter_enabled_metadata:\n    filter: \"abc.xyz\"\n    path:\n    - key: \"k1\"\n    value:\n      string_match:\n        exact: \"check\"\n  )EOF\");\n\n  // Disable in filter_enabled.\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"http.ext_authz.enabled\",\n                         testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(0))))\n      .WillByDefault(Return(false));\n\n  // Enable in filter_enabled_metadata.\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    abc.xyz:\n      k1: check\n  )EOF\";\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata));\n\n  // Make sure check is not called.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that the filter is enabled if both the filter_enabled and filter_enabled_metadata field\n// is enabled.\nTEST_F(HttpFilterTest, FilterEnabledAndMetadataEnabled) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled:\n    runtime_key: \"http.ext_authz.enabled\"\n    default_value:\n      numerator: 100\n      denominator: HUNDRED\n  filter_enabled_metadata:\n    filter: \"abc.xyz\"\n    path:\n    - key: \"k1\"\n    value:\n      string_match:\n        exact: \"check\"\n  )EOF\");\n\n  // Enable in filter_enabled.\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"http.ext_authz.enabled\",\n                         testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillByDefault(Return(true));\n\n  // Enable in filter_enabled_metadata.\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    abc.xyz:\n      k1: check\n  )EOF\";\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata));\n\n  prepareCheck();\n\n  // Make sure check is called once.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that filter can deny for protected path when filter is disabled via filter_enabled field.\nTEST_F(HttpFilterTest, FilterDenyAtDisable) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled:\n    runtime_key: \"http.ext_authz.enabled\"\n    default_value:\n      numerator: 0\n      denominator: HUNDRED\n  deny_at_disable:\n    runtime_key: \"http.ext_authz.deny_at_disable\"\n    default_value:\n      value: true\n  )EOF\");\n\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"http.ext_authz.enabled\",\n                         testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(0))))\n      .WillByDefault(Return(false));\n\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"http.ext_authz.enabled\", false))\n      .WillByDefault(Return(true));\n\n  // Make sure check is not called.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that filter allows for protected path when filter is disabled via filter_enabled field.\nTEST_F(HttpFilterTest, FilterAllowAtDisable) {\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  filter_enabled:\n    runtime_key: \"http.ext_authz.enabled\"\n    default_value:\n      numerator: 0\n      denominator: HUNDRED\n  deny_at_disable:\n    runtime_key: \"http.ext_authz.deny_at_disable\"\n    default_value:\n      value: false\n  )EOF\");\n\n  ON_CALL(runtime_.snapshot_,\n          featureEnabled(\"http.ext_authz.enabled\",\n                         testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(0))))\n      .WillByDefault(Return(false));\n\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"http.ext_authz.enabled\", false))\n      .WillByDefault(Return(false));\n\n  // Make sure check is not called.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// -------------------\n// Parameterized Tests\n// -------------------\n\n// Test that context extensions make it into the check request.\nTEST_P(HttpFilterTestParam, ContextExtensions) {\n  // Place something in the context extensions on the virtualhost.\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settingsvhost;\n  (*settingsvhost.mutable_check_settings()->mutable_context_extensions())[\"key_vhost\"] =\n      \"value_vhost\";\n  // add a default route value to see it overridden\n  (*settingsvhost.mutable_check_settings()->mutable_context_extensions())[\"key_route\"] =\n      \"default_route_value\";\n  // Initialize the virtual host's per filter config.\n  FilterConfigPerRoute auth_per_vhost(settingsvhost);\n  ON_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n          perFilterConfig(HttpFilterNames::get().ExtAuthorization))\n      .WillByDefault(Return(&auth_per_vhost));\n\n  // Place something in the context extensions on the route.\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settingsroute;\n  (*settingsroute.mutable_check_settings()->mutable_context_extensions())[\"key_route\"] =\n      \"value_route\";\n  // Initialize the route's per filter config.\n  FilterConfigPerRoute auth_per_route(settingsroute);\n  ON_CALL(*filter_callbacks_.route_, perFilterConfig(HttpFilterNames::get().ExtAuthorization))\n      .WillByDefault(Return(&auth_per_route));\n\n  prepareCheck();\n\n  // Save the check request from the check call.\n  envoy::service::auth::v3::CheckRequest check_request;\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(WithArgs<2>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param)\n                                       -> void { check_request = check_param; })));\n\n  // Engage the filter so that check is called.\n  filter_->decodeHeaders(request_headers_, false);\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n\n  // Make sure that the extensions appear in the check request issued by the filter.\n  EXPECT_EQ(\"value_vhost\", check_request.attributes().context_extensions().at(\"key_vhost\"));\n  EXPECT_EQ(\"value_route\", check_request.attributes().context_extensions().at(\"key_route\"));\n}\n\n// Test that filter can be disabled with route config.\nTEST_P(HttpFilterTestParam, DisabledOnRoute) {\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings;\n  FilterConfigPerRoute auth_per_route(settings);\n\n  prepareCheck();\n\n  ON_CALL(*filter_callbacks_.route_, perFilterConfig(HttpFilterNames::get().ExtAuthorization))\n      .WillByDefault(Return(&auth_per_route));\n\n  auto test_disable = [&](bool disabled) {\n    initialize(\"\");\n    // Set disabled\n    settings.set_disabled(disabled);\n    // Initialize the route's per filter config.\n    auth_per_route = FilterConfigPerRoute(settings);\n  };\n\n  // baseline: make sure that when not disabled, check is called\n  test_disable(false);\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _)).Times(1);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // test that disabling works\n  test_disable(true);\n  // Make sure check is not called.\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Engage the filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n}\n\n// Test that filter can be disabled with route config.\nTEST_P(HttpFilterTestParam, DisabledOnRouteWithRequestBody) {\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings;\n  FilterConfigPerRoute auth_per_route(settings);\n\n  ON_CALL(*filter_callbacks_.route_, perFilterConfig(HttpFilterNames::get().ExtAuthorization))\n      .WillByDefault(Return(&auth_per_route));\n\n  auto test_disable = [&](bool disabled) {\n    initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  with_request_body:\n    max_request_bytes: 1\n    allow_partial_message: false\n  )EOF\");\n\n    // Set the filter disabled setting.\n    settings.set_disabled(disabled);\n    // Initialize the route's per filter config.\n    auth_per_route = FilterConfigPerRoute(settings);\n  };\n\n  test_disable(false);\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  // When filter is not disabled, setDecoderBufferLimit is called.\n  EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1);\n  EXPECT_CALL(connection_, remoteAddress()).Times(0);\n  EXPECT_CALL(connection_, localAddress()).Times(0);\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n\n  // To test that disabling the filter works.\n  test_disable(true);\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  // Make sure that setDecoderBufferLimit is skipped.\n  EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n}\n\n// Test that the request continues when the filter_callbacks has no route.\nTEST_P(HttpFilterTestParam, NoRoute) {\n  EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n}\n\n// Test that the request is stopped till there is an OK response back after which it continues on.\nTEST_P(HttpFilterTestParam, OkResponse) {\n  InSequence s;\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  // Send an OK response Without setting the dynamic metadata field.\n  EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _)).Times(0);\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n  // decodeData() and decodeTrailers() are called after continueDecoding().\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n}\n\n// Test that an synchronous OK response from the authorization service, on the call stack, results\n// in request continuing on.\nTEST_P(HttpFilterTestParam, ImmediateOkResponse) {\n  InSequence s;\n\n  prepareCheck();\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n          })));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Test that an synchronous denied response from the authorization service passing additional HTTP\n// attributes to the downstream.\nTEST_P(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) {\n  InSequence s;\n\n  prepareCheck();\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n  response.status_code = Http::Code::Unauthorized;\n  response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n  response.body = std::string{\"baz\"};\n\n  auto response_ptr = std::make_unique<Filters::Common::ExtAuthz::Response>(response);\n\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(std::move(response_ptr));\n          })));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.denied\").value());\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  // When request is denied, no call to continueDecoding(). As a result, decodeData() and\n  // decodeTrailer() will not be called.\n}\n\n// Test that an synchronous ok response from the authorization service passing additional HTTP\n// attributes to the upstream.\nTEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) {\n  InSequence s;\n\n  // `bar` will be appended to this header.\n  const Http::LowerCaseString request_header_key{\"baz\"};\n  request_headers_.addCopy(request_header_key, \"foo\");\n\n  // `foo` will be added to this key.\n  const Http::LowerCaseString key_to_add{\"bar\"};\n\n  // `foo` will be override with `bar`.\n  const Http::LowerCaseString key_to_override{\"foobar\"};\n  request_headers_.addCopy(\"foobar\", \"foo\");\n\n  // `remove-me` will be removed\n  const Http::LowerCaseString key_to_remove(\"remove-me\");\n  request_headers_.addCopy(key_to_remove, \"upstream-should-not-see-me\");\n\n  prepareCheck();\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  response.headers_to_append = Http::HeaderVector{{request_header_key, \"bar\"}};\n  response.headers_to_set = Http::HeaderVector{{key_to_add, \"foo\"}, {key_to_override, \"bar\"}};\n  response.headers_to_remove = std::vector<Http::LowerCaseString>{key_to_remove};\n\n  auto response_ptr = std::make_unique<Filters::Common::ExtAuthz::Response>(response);\n\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(std::move(response_ptr));\n          })));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(request_headers_.get_(request_header_key), \"foo,bar\");\n  EXPECT_EQ(request_headers_.get_(key_to_add), \"foo\");\n  EXPECT_EQ(request_headers_.get_(key_to_override), \"bar\");\n  EXPECT_EQ(request_headers_.has(key_to_remove), false);\n}\n\n// Test that an synchronous denied response from the authorization service, on the call stack,\n// results in request not continuing.\nTEST_P(HttpFilterTestParam, ImmediateDeniedResponse) {\n  InSequence s;\n\n  prepareCheck();\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n          })));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.denied\").value());\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  // When request is denied, no call to continueDecoding(). As a result, decodeData() and\n  // decodeTrailer() will not be called.\n}\n\n// Test that a denied response results in the connection closing with a 401 response to the client.\nTEST_P(HttpFilterTestParam, DeniedResponseWith401) {\n  InSequence s;\n\n  prepareCheck();\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"401\"}};\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService));\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n  response.status_code = Http::Code::Unauthorized;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.denied\").value());\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"upstream_rq_4xx\").value());\n}\n\n// Test that a denied response results in the connection closing with a 403 response to the client.\nTEST_P(HttpFilterTestParam, DeniedResponseWith403) {\n  InSequence s;\n\n  prepareCheck();\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"403\"}};\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService));\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n  response.status_code = Http::Code::Forbidden;\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.denied\").value());\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"upstream_rq_4xx\").value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"upstream_rq_403\").value());\n}\n\n// Verify that authz response memory is not used after free.\nTEST_P(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) {\n  InSequence s;\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n  response.status_code = Http::Code::Forbidden;\n  response.body = std::string{\"foo\"};\n  response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"},\n                                               {Http::LowerCaseString{\"bar\"}, \"foo\"}};\n  Filters::Common::ExtAuthz::ResponsePtr response_ptr =\n      std::make_unique<Filters::Common::ExtAuthz::Response>(response);\n\n  prepareCheck();\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"403\"},\n                                                   {\"content-length\", \"3\"},\n                                                   {\"content-type\", \"text/plain\"},\n                                                   {\"foo\", \"bar\"},\n                                                   {\"bar\", \"foo\"}};\n  Http::HeaderMap* saved_headers;\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false))\n      .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) { saved_headers = &headers; }));\n  EXPECT_CALL(filter_callbacks_, encodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) {\n        response_ptr.reset();\n        Http::TestRequestHeaderMapImpl test_headers{*saved_headers};\n        EXPECT_EQ(test_headers.get_(\"foo\"), \"bar\");\n        EXPECT_EQ(test_headers.get_(\"bar\"), \"foo\");\n        EXPECT_EQ(data.toString(), \"foo\");\n      }));\n\n  request_callbacks_->onComplete(std::move(response_ptr));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.denied\").value());\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"upstream_rq_4xx\").value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"upstream_rq_403\").value());\n}\n\n// Verify that authz denied response headers overrides the existing encoding headers,\n// and that it adds repeated header names using the standard method of comma concatenation of values\n// for predefined inline headers while repeating other headers\nTEST_P(HttpFilterTestParam, OverrideEncodingHeaders) {\n  InSequence s;\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n  response.status_code = Http::Code::Forbidden;\n  response.body = std::string{\"foo\"};\n  response.headers_to_set =\n      Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"},\n                         {Http::LowerCaseString{\"bar\"}, \"foo\"},\n                         {Http::LowerCaseString{\"set-cookie\"}, \"cookie1=value\"},\n                         {Http::LowerCaseString{\"set-cookie\"}, \"cookie2=value\"},\n                         {Http::LowerCaseString{\"accept-encoding\"}, \"gzip,deflate\"}};\n  Filters::Common::ExtAuthz::ResponsePtr response_ptr =\n      std::make_unique<Filters::Common::ExtAuthz::Response>(response);\n\n  prepareCheck();\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"403\"},\n                                                   {\"content-length\", \"3\"},\n                                                   {\"content-type\", \"text/plain\"},\n                                                   {\"foo\", \"bar\"},\n                                                   {\"bar\", \"foo\"},\n                                                   {\"set-cookie\", \"cookie1=value\"},\n                                                   {\"set-cookie\", \"cookie2=value\"},\n                                                   {\"accept-encoding\", \"gzip,deflate\"}};\n  Http::HeaderMap* saved_headers;\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false))\n      .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) {\n        headers.addCopy(Http::LowerCaseString{\"foo\"}, std::string{\"OVERRIDE_WITH_bar\"});\n        headers.addCopy(Http::LowerCaseString{\"foobar\"}, std::string{\"DO_NOT_OVERRIDE\"});\n        saved_headers = &headers;\n      }));\n  EXPECT_CALL(filter_callbacks_, encodeData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) {\n        response_ptr.reset();\n        Http::TestRequestHeaderMapImpl test_headers{*saved_headers};\n        EXPECT_EQ(test_headers.get_(\"foo\"), \"bar\");\n        EXPECT_EQ(test_headers.get_(\"bar\"), \"foo\");\n        EXPECT_EQ(test_headers.get_(\"foobar\"), \"DO_NOT_OVERRIDE\");\n        EXPECT_EQ(test_headers.get_(\"accept-encoding\"), \"gzip,deflate\");\n        EXPECT_EQ(data.toString(), \"foo\");\n\n        std::vector<absl::string_view> setCookieHeaderValues;\n        Http::HeaderUtility::getAllOfHeader(test_headers, \"set-cookie\", setCookieHeaderValues);\n        EXPECT_THAT(setCookieHeaderValues, UnorderedElementsAre(\"cookie1=value\", \"cookie2=value\"));\n      }));\n\n  request_callbacks_->onComplete(std::move(response_ptr));\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.denied\").value());\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"upstream_rq_4xx\").value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"upstream_rq_403\").value());\n}\n\n// Verify that when returning an OK response with dynamic_metadata field set, the filter emits\n// dynamic metadata.\nTEST_F(HttpFilterTest, EmitDynamicMetadata) {\n  InSequence s;\n\n  initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  )EOF\");\n\n  prepareCheck();\n\n  EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  Filters::Common::ExtAuthz::Response response{};\n  response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n  response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n\n  auto* fields = response.dynamic_metadata.mutable_fields();\n  (*fields)[\"foo\"] = ValueUtil::stringValue(\"ok\");\n  (*fields)[\"bar\"] = ValueUtil::numberValue(1);\n\n  EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _))\n      .WillOnce(Invoke([&response](const std::string& ns,\n                                   const ProtobufWkt::Struct& returned_dynamic_metadata) {\n        EXPECT_EQ(ns, HttpFilterNames::get().ExtAuthorization);\n        EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata));\n      }));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService))\n      .Times(0);\n  request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString(\"ext_authz.ok\").value());\n  EXPECT_EQ(1U, config_->stats().ok_.value());\n}\n\n// Test that when a connection awaiting a authorization response is canceled then the\n// authorization call is closed.\nTEST_P(HttpFilterTestParam, ResetDuringCall) {\n  InSequence s;\n\n  prepareCheck();\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_CALL(*client_, cancel());\n  filter_->onDestroy();\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/pull/8436.\n// Test that ext_authz filter is not in noop mode when cluster is not specified per route\n// (this could be the case when route is configured with redirect or direct response action).\nTEST_P(HttpFilterTestParam, NoCluster) {\n\n  ON_CALL(filter_callbacks_, clusterInfo()).WillByDefault(Return(nullptr));\n\n  // Place something in the context extensions on the route.\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settingsroute;\n  (*settingsroute.mutable_check_settings()->mutable_context_extensions())[\"key_route\"] =\n      \"value_route\";\n  // Initialize the route's per filter config.\n  FilterConfigPerRoute auth_per_route(settingsroute);\n  ON_CALL(*filter_callbacks_.route_, perFilterConfig(HttpFilterNames::get().ExtAuthorization))\n      .WillByDefault(Return(&auth_per_route));\n\n  prepareCheck();\n\n  // Save the check request from the check call.\n  envoy::service::auth::v3::CheckRequest check_request;\n\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(WithArgs<2>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param)\n                                       -> void { check_request = check_param; })));\n  // Make sure that filter chain is not continued and the call has been invoked.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Engage the filter so that check is called.\n  filter_->decodeHeaders(request_headers_, false);\n}\n\n// Verify that request body buffering can be skipped per route.\nTEST_P(HttpFilterTestParam, DisableRequestBodyBufferingOnRoute) {\n  envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings;\n  FilterConfigPerRoute auth_per_route(settings);\n\n  ON_CALL(*filter_callbacks_.route_, perFilterConfig(HttpFilterNames::get().ExtAuthorization))\n      .WillByDefault(Return(&auth_per_route));\n\n  auto test_disable_request_body_buffering = [&](bool bypass) {\n    initialize(R\"EOF(\n  grpc_service:\n    envoy_grpc:\n      cluster_name: \"ext_authz_server\"\n  failure_mode_allow: false\n  with_request_body:\n    max_request_bytes: 1\n    allow_partial_message: false\n  )EOF\");\n\n    // Set bypass request body buffering for this route.\n    settings.mutable_check_settings()->set_disable_request_body_buffering(bypass);\n    // Initialize the route's per filter config.\n    auth_per_route = FilterConfigPerRoute(settings);\n  };\n\n  test_disable_request_body_buffering(false);\n  ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_));\n  // When request body buffering is not skipped, setDecoderBufferLimit is called.\n  EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1);\n  EXPECT_CALL(connection_, remoteAddress()).Times(0);\n  EXPECT_CALL(connection_, localAddress()).Times(0);\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false));\n\n  test_disable_request_body_buffering(true);\n  // When request body buffering is skipped, setDecoderBufferLimit is not called.\n  EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0);\n  EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1);\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n}\n\n} // namespace\n} // namespace ExtAuthz\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/fault/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"fault_filter_test\",\n    srcs = [\"fault_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.fault\",\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/http/fault:fault_filter_lib\",\n        \"//test/common/http:common_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.fault\",\n    deps = [\n        \":utility_lib\",\n        \"//source/extensions/filters/http/fault:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"fault_filter_integration_test\",\n    srcs = [\"fault_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.fault\",\n    deps = [\n        \"//source/extensions/filters/http/fault:config\",\n        \"//test/integration:http_protocol_integration_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    deps = [\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/fault/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.validate.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"extensions/filters/http/fault/config.h\"\n\n#include \"test/extensions/filters/http/fault/utility.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\nnamespace {\n\nTEST(FaultFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  fault.mutable_abort();\n  EXPECT_THROW(FaultFilterFactory().createFilterFactoryFromProto(fault, \"stats\", context),\n               ProtoValidationException);\n}\n\nTEST(FaultFilterConfigTest, FaultFilterCorrectJson) {\n  const std::string yaml_string = R\"EOF(\n  delay:\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    fixed_delay: 5s\n  )EOF\";\n\n  const auto proto_config = convertYamlStrToProtoConfig(yaml_string);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  FaultFilterFactory factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\nTEST(FaultFilterConfigTest, FaultFilterCorrectProto) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault config;\n  config.mutable_delay()->mutable_percentage()->set_numerator(100);\n  config.mutable_delay()->mutable_percentage()->set_denominator(\n      envoy::type::v3::FractionalPercent::HUNDRED);\n  config.mutable_delay()->mutable_fixed_delay()->set_seconds(5);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  FaultFilterFactory factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\nTEST(FaultFilterConfigTest, FaultFilterEmptyProto) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  FaultFilterFactory factory;\n  Http::FilterFactoryCb cb =\n      factory.createFilterFactoryFromProto(*factory.createEmptyConfigProto(), \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(FaultFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.fault\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/fault/fault_filter_integration_test.cc",
    "content": "#include \"test/integration/http_protocol_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\nnamespace {\n\nclass FaultIntegrationTest : public Event::TestUsingSimulatedTime,\n                             public HttpProtocolIntegrationTest {\npublic:\n  void initializeFilter(const std::string& filter_config) {\n    config_helper_.addFilter(filter_config);\n    initialize();\n  }\n\n  const std::string upstream_rate_limit_config_ =\n      R\"EOF(\nname: fault\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault\n  response_rate_limit:\n    fixed_limit:\n      limit_kbps: 1\n    percentage:\n      numerator: 100\n)EOF\";\n\n  const std::string header_fault_config_ =\n      R\"EOF(\nname: fault\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault\n  abort:\n    header_abort: {}\n    percentage:\n      numerator: 100\n  delay:\n    header_delay: {}\n    percentage:\n      numerator: 100\n  response_rate_limit:\n    header_limit: {}\n    percentage:\n      numerator: 100\n)EOF\";\n\n  const std::string abort_grpc_fault_config_ =\n      R\"EOF(\nname: fault\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault\n  abort:\n    grpc_status: 5\n    percentage:\n      numerator: 100\n)EOF\";\n};\n\n// Fault integration tests that should run with all protocols, useful for testing various\n// end_stream permutations when rate limiting.\nclass FaultIntegrationTestAllProtocols : public FaultIntegrationTest {};\nINSTANTIATE_TEST_SUITE_P(Protocols, FaultIntegrationTestAllProtocols,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n// No fault injected.\nTEST_P(FaultIntegrationTestAllProtocols, NoFault) {\n  const std::string filter_config =\n      R\"EOF(\nname: fault\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault\n)EOF\";\n\n  initializeFilter(filter_config);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto response =\n      sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 1024);\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Response rate limited with no trailers.\nTEST_P(FaultIntegrationTestAllProtocols, ResponseRateLimitNoTrailers) {\n  initializeFilter(upstream_rate_limit_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  IntegrationStreamDecoderPtr decoder =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  // Active faults gauge is incremented.\n  EXPECT_EQ(1UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  Buffer::OwnedImpl data(std::string(127, 'a'));\n  upstream_request_->encodeData(data, true);\n\n  // Wait for a tick worth of data.\n  decoder->waitForBodyData(64);\n\n  // Wait for a tick worth of data and end stream.\n  simTime().advanceTimeWait(std::chrono::milliseconds(63));\n  decoder->waitForBodyData(127);\n  decoder->waitForEndStream();\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Request delay and response rate limited via header configuration.\nTEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfig) {\n  initializeFilter(header_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-envoy-fault-delay-request\", \"200\"},\n                                                 {\"x-envoy-fault-throughput-response\", \"1\"}};\n  IntegrationStreamDecoderPtr decoder = codec_client_->makeHeaderOnlyRequest(request_headers);\n  test_server_->waitForCounterEq(\"http.config_test.fault.delays_injected\", 1,\n                                 TestUtility::DefaultTimeout, dispatcher_.get());\n  simTime().advanceTimeWait(std::chrono::milliseconds(200));\n  waitForNextUpstreamRequest();\n\n  // Verify response body throttling.\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  Buffer::OwnedImpl data(std::string(128, 'a'));\n  upstream_request_->encodeData(data, true);\n\n  // Wait for a tick worth of data.\n  decoder->waitForBodyData(64);\n\n  // Wait for a tick worth of data and end stream.\n  simTime().advanceTimeWait(std::chrono::milliseconds(63));\n  decoder->waitForBodyData(128);\n  decoder->waitForEndStream();\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Request abort controlled via header configuration.\nTEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortConfig) {\n  initializeFilter(header_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-envoy-fault-abort-request\", \"429\"}});\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), Envoy::Http::HttpStatusIs(\"429\"));\n\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Request faults controlled via header configuration.\nTEST_P(FaultIntegrationTestAllProtocols, HeaderFaultsConfig0PercentageHeaders) {\n  initializeFilter(header_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-envoy-fault-abort-request\", \"429\"},\n                                     {\"x-envoy-fault-abort-request-percentage\", \"0\"},\n                                     {\"x-envoy-fault-delay-request\", \"100\"},\n                                     {\"x-envoy-fault-delay-request-percentage\", \"0\"},\n                                     {\"x-envoy-fault-throughput-response\", \"100\"},\n                                     {\"x-envoy-fault-throughput-response-percentage\", \"0\"}});\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Request faults controlled via header configuration.\nTEST_P(FaultIntegrationTestAllProtocols, HeaderFaultsConfig100PercentageHeaders) {\n  initializeFilter(header_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-envoy-fault-delay-request\", \"100\"},\n                                     {\"x-envoy-fault-delay-request-percentage\", \"100\"},\n                                     {\"x-envoy-fault-throughput-response\", \"100\"},\n                                     {\"x-envoy-fault-throughput-response-percentage\", \"100\"}});\n  test_server_->waitForCounterEq(\"http.config_test.fault.delays_injected\", 1,\n                                 TestUtility::DefaultTimeout, dispatcher_.get());\n  simTime().advanceTimeWait(std::chrono::milliseconds(100));\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Header configuration with no headers, so no fault injection.\nTEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfigNoHeaders) {\n  initializeFilter(header_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto response =\n      sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 1024);\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Request abort with grpc status, controlled via header configuration.\nTEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig) {\n  initializeFilter(header_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-envoy-fault-abort-grpc-request\", \"5\"},\n                                     {\"content-type\", \"application/grpc\"}});\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), Envoy::Http::HttpStatusIs(\"200\"));\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, \"5\"));\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Http::Headers::get().GrpcMessage, \"fault filter abort\"));\n  EXPECT_EQ(nullptr, response->trailers());\n\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Request abort with grpc status, controlled via header configuration.\nTEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig0PercentageHeader) {\n  initializeFilter(header_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-envoy-fault-abort-grpc-request\", \"5\"},\n                                     {\"x-envoy-fault-abort-request-percentage\", \"0\"},\n                                     {\"content-type\", \"application/grpc\"}});\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Request abort with grpc status, controlled via configuration.\nTEST_P(FaultIntegrationTestAllProtocols, FaultAbortGrpcConfig) {\n  initializeFilter(abort_grpc_fault_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/grpc\"}});\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), Envoy::Http::HttpStatusIs(\"200\"));\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, \"5\"));\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Http::Headers::get().GrpcMessage, \"fault filter abort\"));\n  EXPECT_EQ(nullptr, response->trailers());\n\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Fault integration tests that run with HTTP/2 only, used for fully testing trailers.\nclass FaultIntegrationTestHttp2 : public FaultIntegrationTest {};\nINSTANTIATE_TEST_SUITE_P(Protocols, FaultIntegrationTestHttp2,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP2}, {FakeHttpConnection::Type::HTTP2})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n// Rate limiting with trailers received after the body has been flushed.\nTEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) {\n  initializeFilter(upstream_rate_limit_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  IntegrationStreamDecoderPtr decoder =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  // Active fault gauge is incremented.\n  EXPECT_EQ(1UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  Buffer::OwnedImpl data(std::string(127, 'a'));\n  upstream_request_->encodeData(data, false);\n\n  // Wait for a tick worth of data.\n  decoder->waitForBodyData(64);\n\n  // Advance time and wait for a tick worth of data.\n  simTime().advanceTimeWait(std::chrono::milliseconds(63));\n  decoder->waitForBodyData(127);\n\n  // Send trailers and wait for end stream.\n  Http::TestResponseTrailerMapImpl trailers{{\"hello\", \"world\"}};\n  upstream_request_->encodeTrailers(trailers);\n  decoder->waitForEndStream();\n  EXPECT_NE(nullptr, decoder->trailers());\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n// Rate limiting with trailers received before the body has been flushed.\nTEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyNotFlushed) {\n  initializeFilter(upstream_rate_limit_config_);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  IntegrationStreamDecoderPtr decoder =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  Buffer::OwnedImpl data(std::string(128, 'a'));\n  upstream_request_->encodeData(data, false);\n  Http::TestResponseTrailerMapImpl trailers{{\"hello\", \"world\"}};\n  upstream_request_->encodeTrailers(trailers);\n\n  // Wait for a tick worth of data.\n  decoder->waitForBodyData(64);\n\n  // Advance time and wait for a tick worth of data, trailers, and end stream.\n  simTime().advanceTimeWait(std::chrono::milliseconds(63));\n  decoder->waitForBodyData(128);\n  decoder->waitForEndStream();\n  EXPECT_NE(nullptr, decoder->trailers());\n\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.aborts_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->counter(\"http.config_test.fault.delays_injected\")->value());\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.fault.response_rl_injected\")->value());\n  EXPECT_EQ(0UL, test_server_->gauge(\"http.config_test.fault.active_faults\")->value());\n}\n\n} // namespace\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/fault/fault_filter_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <limits>\n#include <memory>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.validate.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/http/fault/fault_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/extensions/filters/http/fault/utility.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Matcher;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\nnamespace {\n\nclass FaultFilterTest : public testing::Test {\npublic:\n  const std::string fixed_delay_and_abort_nodes_yaml = R\"EOF(\n  delay:\n    type: fixed\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    fixed_delay: 5s\n  abort:\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    http_status: 503\n  downstream_nodes:\n  - canary\n  )EOF\";\n\n  const std::string fixed_delay_only_yaml = R\"EOF(\n  delay:\n    type: fixed\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    fixed_delay: 5s\n  )EOF\";\n\n  const std::string abort_only_yaml = R\"EOF(\n  abort:\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    http_status: 429\n  )EOF\";\n\n  const std::string fixed_delay_and_abort_yaml = R\"EOF(\n  delay:\n    type: fixed\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    fixed_delay: 5s\n  abort:\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    http_status: 503\n  )EOF\";\n\n  const std::string header_abort_only_yaml = R\"EOF(\n  abort:\n    header_abort: {}\n    percentage:\n      numerator: 100\n  )EOF\";\n\n  const std::string fixed_delay_and_abort_match_headers_yaml = R\"EOF(\n  delay:\n    type: fixed\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    fixed_delay: 5s\n  abort:\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    http_status: 503\n  headers:\n  - name: X-Foo1\n    exact_match: Bar\n  - name: X-Foo2\n  )EOF\";\n\n  const std::string delay_with_upstream_cluster_yaml = R\"EOF(\n  delay:\n    type: fixed\n    percentage:\n      numerator: 100\n      denominator: HUNDRED\n    fixed_delay: 5s\n  upstream_cluster: www1\n  )EOF\";\n\n  const std::string v2_empty_fault_config_yaml = \"{}\";\n\n  void SetUpTest(const envoy::extensions::filters::http::fault::v3::HTTPFault fault) {\n    config_ = std::make_shared<FaultFilterConfig>(fault, runtime_, \"prefix.\", stats_, time_system_);\n    filter_ = std::make_unique<FaultFilter>(config_);\n    filter_->setDecoderFilterCallbacks(decoder_filter_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_filter_callbacks_);\n    EXPECT_CALL(decoder_filter_callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber());\n  }\n\n  void SetUpTest(const std::string& yaml) { SetUpTest(convertYamlStrToProtoConfig(yaml)); }\n\n  void expectDelayTimer(uint64_t duration_ms) {\n    timer_ = new Event::MockTimer(&decoder_filter_callbacks_.dispatcher_);\n    EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(duration_ms), _));\n    EXPECT_CALL(*timer_, disableTimer());\n  }\n\n  void TestPerFilterConfigFault(const Router::RouteSpecificFilterConfig* route_fault,\n                                const Router::RouteSpecificFilterConfig* vhost_fault);\n\n  NiceMock<Stats::MockIsolatedStatsStore> stats_;\n  FaultFilterConfigSharedPtr config_;\n  std::unique_ptr<FaultFilter> filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_filter_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_filter_callbacks_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  Buffer::OwnedImpl data_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Event::MockTimer* timer_{};\n  Event::SimulatedTimeSystem time_system_;\n};\n\nvoid faultFilterBadConfigHelper(const std::string& yaml) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, fault), EnvoyException);\n}\n\nTEST(FaultFilterBadConfigTest, EmptyDownstreamNodes) {\n  const std::string yaml = R\"EOF(\n  abort:\n    abort_percent:\n      numerator: 80\n      denominator: HUNDRED\n    http_status: 503\n  downstream_nodes: []\n\n  )EOF\";\n\n  faultFilterBadConfigHelper(yaml);\n}\n\nTEST(FaultFilterBadConfigTest, MissingHTTPStatus) {\n  const std::string yaml = R\"EOF(\n  abort:\n    abort_percent:\n      numerator: 100\n      denominator: HUNDRED\n  )EOF\";\n\n  faultFilterBadConfigHelper(yaml);\n}\n\nTEST(FaultFilterBadConfigTest, BadDelayType) {\n  const std::string yaml = R\"EOF(\n  delay:\n    type: foo\n    percentage:\n      numerator: 50\n      denominator: HUNDRED\n    fixed_delay: 5s\n  )EOF\";\n\n  faultFilterBadConfigHelper(yaml);\n}\n\nTEST(FaultFilterBadConfigTest, BadDelayDuration) {\n  const std::string yaml = R\"EOF(\n  delay:\n    type: fixed\n    percentage:\n      numerator: 50\n      denominator: HUNDRED\n    fixed_delay: 0s\n   )EOF\";\n\n  faultFilterBadConfigHelper(yaml);\n}\n\nTEST(FaultFilterBadConfigTest, MissingDelayDuration) {\n  const std::string yaml = R\"EOF(\n  delay:\n    type: fixed\n    percentage:\n      numerator: 50\n      denominator: HUNDRED\n   )EOF\";\n\n  faultFilterBadConfigHelper(yaml);\n}\n\nTEST_F(FaultFilterTest, AbortWithHttpStatus) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  fault.mutable_abort()->mutable_percentage()->set_numerator(100);\n  fault.mutable_abort()->mutable_percentage()->set_denominator(\n      envoy::type::v3::FractionalPercent::HUNDRED);\n  fault.mutable_abort()->set_http_status(429);\n  SetUpTest(fault);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected))\n      .Times(0);\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", 429))\n      .WillOnce(Return(429));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"429\"}, {\"content-length\", \"18\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  filter_->onDestroy();\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(\"fault_filter_abort\", decoder_filter_callbacks_.details());\n}\n\nTEST_F(FaultFilterTest, HeaderAbortWithHttpStatus) {\n  SetUpTest(header_abort_only_yaml);\n\n  request_headers_.addCopy(\"x-envoy-fault-abort-request\", \"429\");\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected))\n      .Times(0);\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", 429))\n      .WillOnce(Return(429));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"429\"}, {\"content-length\", \"18\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  filter_->onDestroy();\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(\"fault_filter_abort\", decoder_filter_callbacks_.details());\n}\n\nTEST_F(FaultFilterTest, AbortWithGrpcStatus) {\n  decoder_filter_callbacks_.is_grpc_request_ = true;\n\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  fault.mutable_abort()->mutable_percentage()->set_numerator(100);\n  fault.mutable_abort()->mutable_percentage()->set_denominator(\n      envoy::type::v3::FractionalPercent::HUNDRED);\n  fault.mutable_abort()->set_grpc_status(5);\n  SetUpTest(fault);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected))\n      .Times(0);\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.grpc_status\", 5))\n      .WillOnce(Return(5));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/grpc\"},\n                                                   {\"grpc-status\", \"5\"},\n                                                   {\"grpc-message\", \"fault filter abort\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  filter_->onDestroy();\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(\"fault_filter_abort\", decoder_filter_callbacks_.details());\n}\n\nTEST_F(FaultFilterTest, HeaderAbortWithGrpcStatus) {\n  decoder_filter_callbacks_.is_grpc_request_ = true;\n  SetUpTest(header_abort_only_yaml);\n\n  request_headers_.addCopy(\"x-envoy-fault-abort-grpc-request\", \"5\");\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected))\n      .Times(0);\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.grpc_status\", 5))\n      .WillOnce(Return(5));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/grpc\"},\n                                                   {\"grpc-status\", \"5\"},\n                                                   {\"grpc-message\", \"fault filter abort\"}};\n\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  filter_->onDestroy();\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(\"fault_filter_abort\", decoder_filter_callbacks_.details());\n}\n\nTEST_F(FaultFilterTest, HeaderAbortWithHttpAndGrpcStatus) {\n  SetUpTest(header_abort_only_yaml);\n\n  request_headers_.addCopy(\"x-envoy-fault-abort-request\", \"429\");\n  request_headers_.addCopy(\"x-envoy-fault-abort-grpc-request\", \"5\");\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected))\n      .Times(0);\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", 429))\n      .WillOnce(Return(429));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.grpc_status\", 5)).Times(0);\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"429\"}, {\"content-length\", \"18\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  filter_->onDestroy();\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(\"fault_filter_abort\", decoder_filter_callbacks_.details());\n}\n\nTEST_F(FaultFilterTest, FixedDelayZeroDuration) {\n  SetUpTest(fixed_delay_only_yaml);\n\n  // Delay related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  // Return 0ms delay\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(0));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_, setResponseFlag(_)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n\n  // Expect filter to continue execution when delay is 0ms\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, Overflow) {\n  SetUpTest(fixed_delay_only_yaml);\n\n  // Delay related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  // Return 1ms delay\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(1));\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(0));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(1UL, config_->stats().faults_overflow_.value());\n}\n\n// Verifies that we don't increment the active_faults gauge when not applying a fault.\nTEST_F(FaultFilterTest, Passthrough) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  SetUpTest(fault);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n}\n\nTEST_F(FaultFilterTest, FixedDelayDeprecatedPercentAndNonZeroDuration) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  fault.mutable_delay()->mutable_percentage()->set_numerator(50);\n  fault.mutable_delay()->mutable_percentage()->set_denominator(\n      envoy::type::v3::FractionalPercent::HUNDRED);\n  fault.mutable_delay()->mutable_fixed_delay()->set_seconds(5);\n  SetUpTest(fault);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  // Delay related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(50))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(5000UL));\n\n  SCOPED_TRACE(\"FixedDelayDeprecatedPercentAndNonZeroDuration\");\n  expectDelayTimer(5000UL);\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Delay only case\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected))\n      .Times(0);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false));\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding());\n  timer_->invokeCallback();\n  filter_->onDestroy();\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n}\n\nTEST_F(FaultFilterTest, DelayForDownstreamCluster) {\n  SetUpTest(fixed_delay_only_yaml);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  request_headers_.addCopy(\"x-envoy-downstream-service-cluster\", \"cluster\");\n\n  // Delay related calls.\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.cluster.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(125UL));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.cluster.delay.fixed_duration_ms\", 125UL))\n      .WillOnce(Return(500UL));\n  expectDelayTimer(500UL);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Delay only case, no aborts.\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.cluster.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected))\n      .Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding());\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false));\n\n  EXPECT_CALL(decoder_filter_callbacks_.dispatcher_, setTrackedObject(_)).Times(2);\n  timer_->invokeCallback();\n\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(1UL, stats_.counter(\"prefix.fault.cluster.delays_injected\").value());\n  EXPECT_EQ(0UL, stats_.counter(\"prefix.fault.cluster.aborts_injected\").value());\n}\n\nTEST_F(FaultFilterTest, FixedDelayAndAbortDownstream) {\n  SetUpTest(fixed_delay_and_abort_yaml);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  request_headers_.addCopy(\"x-envoy-downstream-service-cluster\", \"cluster\");\n\n  // Delay related calls.\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.cluster.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(125UL));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.cluster.delay.fixed_duration_ms\", 125UL))\n      .WillOnce(Return(500UL));\n  expectDelayTimer(500UL);\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.cluster.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", 503))\n      .WillOnce(Return(503));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.cluster.abort.http_status\", 503))\n      .WillOnce(Return(500));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"500\"}, {\"content-length\", \"18\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n  timer_->invokeCallback();\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n  filter_->onDestroy();\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n  EXPECT_EQ(1UL, stats_.counter(\"prefix.fault.cluster.delays_injected\").value());\n  EXPECT_EQ(1UL, stats_.counter(\"prefix.fault.cluster.aborts_injected\").value());\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n}\n\nTEST_F(FaultFilterTest, FixedDelayAndAbort) {\n  SetUpTest(fixed_delay_and_abort_yaml);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  // Delay related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(5000UL));\n\n  SCOPED_TRACE(\"FixedDelayAndAbort\");\n  expectDelayTimer(5000UL);\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", 503))\n      .WillOnce(Return(503));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"18\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n\n  timer_->invokeCallback();\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, FixedDelayAndAbortDownstreamNodes) {\n  SetUpTest(fixed_delay_and_abort_nodes_yaml);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  // Delay related calls.\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(5000UL));\n\n  expectDelayTimer(5000UL);\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n\n  request_headers_.addCopy(\"x-envoy-downstream-service-node\", \"canary\");\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Abort related calls.\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", 503))\n      .WillOnce(Return(503));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"18\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n\n  timer_->invokeCallback();\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, NoDownstreamMatch) {\n  SetUpTest(fixed_delay_and_abort_nodes_yaml);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n}\n\nTEST_F(FaultFilterTest, FixedDelayAndAbortHeaderMatchSuccess) {\n  SetUpTest(fixed_delay_and_abort_match_headers_yaml);\n  request_headers_.addCopy(\"x-foo1\", \"Bar\");\n  request_headers_.addCopy(\"x-foo2\", \"RandomValue\");\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  // Delay related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(5000UL));\n\n  SCOPED_TRACE(\"FixedDelayAndAbortHeaderMatchSuccess\");\n  expectDelayTimer(5000UL);\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Abort related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", 503))\n      .WillOnce(Return(503));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"503\"}, {\"content-length\", \"18\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_CALL(decoder_filter_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true));\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n\n  timer_->invokeCallback();\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, FixedDelayAndAbortHeaderMatchFail) {\n  SetUpTest(fixed_delay_and_abort_match_headers_yaml);\n  request_headers_.addCopy(\"x-foo1\", \"Bar\");\n  request_headers_.addCopy(\"x-foo3\", \"Baz\");\n\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", _)).Times(0);\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_, setResponseFlag(_)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n\n  // Expect filter to continue execution when headers don't match\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, TimerResetAfterStreamReset) {\n  SetUpTest(fixed_delay_only_yaml);\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  // Prep up with a 5s delay\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(5000UL));\n\n  SCOPED_TRACE(\"FixedDelayWithStreamReset\");\n  timer_ = new Event::MockTimer(&decoder_filter_callbacks_.dispatcher_);\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(5000UL), _));\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n\n  // delay timer should have been fired by now. If caller resets the stream while we are waiting\n  // on the delay timer, check if timers are cancelled\n  EXPECT_CALL(*timer_, disableTimer());\n\n  // The timer callback should never be called.\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected))\n      .Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true));\n\n  filter_->onDestroy();\n}\n\nTEST_F(FaultFilterTest, FaultWithTargetClusterMatchSuccess) {\n  SetUpTest(delay_with_upstream_cluster_yaml);\n  const std::string upstream_cluster(\"www1\");\n\n  EXPECT_CALL(decoder_filter_callbacks_.route_->route_entry_, clusterName())\n      .WillOnce(ReturnRef(upstream_cluster));\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  // Delay related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(5000UL));\n\n  SCOPED_TRACE(\"FaultWithTargetClusterMatchSuccess\");\n  expectDelayTimer(5000UL);\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  // Delay only case\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FaultInjected))\n      .Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding());\n  timer_->invokeCallback();\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, FaultWithTargetClusterMatchFail) {\n  SetUpTest(delay_with_upstream_cluster_yaml);\n  const std::string upstream_cluster(\"mismatch\");\n\n  EXPECT_CALL(decoder_filter_callbacks_.route_->route_entry_, clusterName())\n      .WillOnce(ReturnRef(upstream_cluster));\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", _)).Times(0);\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_, setResponseFlag(_)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, FaultWithTargetClusterNullRoute) {\n  SetUpTest(delay_with_upstream_cluster_yaml);\n  const std::string upstream_cluster(\"www1\");\n\n  EXPECT_CALL(*decoder_filter_callbacks_.route_, routeEntry()).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", _)).Times(0);\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.abort.abort_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(_)))\n      .Times(0);\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.abort.http_status\", _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_, setResponseFlag(_)).Times(0);\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(0UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n}\n\nvoid FaultFilterTest::TestPerFilterConfigFault(\n    const Router::RouteSpecificFilterConfig* route_fault,\n    const Router::RouteSpecificFilterConfig* vhost_fault) {\n\n  ON_CALL(decoder_filter_callbacks_.route_->route_entry_,\n          perFilterConfig(Extensions::HttpFilters::HttpFilterNames::get().Fault))\n      .WillByDefault(Return(route_fault));\n  ON_CALL(decoder_filter_callbacks_.route_->route_entry_.virtual_host_,\n          perFilterConfig(Extensions::HttpFilters::HttpFilterNames::get().Fault))\n      .WillByDefault(Return(vhost_fault));\n\n  const std::string upstream_cluster(\"www1\");\n\n  EXPECT_CALL(decoder_filter_callbacks_.route_->route_entry_, clusterName())\n      .WillOnce(ReturnRef(upstream_cluster));\n\n  EXPECT_CALL(runtime_.snapshot_,\n              getInteger(\"fault.http.max_active_faults\", std::numeric_limits<uint64_t>::max()))\n      .WillOnce(Return(std::numeric_limits<uint64_t>::max()));\n\n  // Delay related calls\n  EXPECT_CALL(runtime_.snapshot_,\n              featureEnabled(\"fault.http.delay.fixed_delay_percent\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"fault.http.delay.fixed_duration_ms\", 5000))\n      .WillOnce(Return(5000UL));\n\n  SCOPED_TRACE(\"PerFilterConfigFault\");\n  expectDelayTimer(5000UL);\n\n  EXPECT_CALL(decoder_filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::DelayInjected));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_CALL(decoder_filter_callbacks_, continueDecoding());\n  timer_->invokeCallback();\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(1UL, config_->stats().delays_injected_.value());\n  EXPECT_EQ(0UL, config_->stats().aborts_injected_.value());\n}\n\nTEST_F(FaultFilterTest, RouteFaultOverridesListenerFault) {\n\n  Fault::FaultSettings abort_fault(convertYamlStrToProtoConfig(abort_only_yaml));\n  Fault::FaultSettings delay_fault(convertYamlStrToProtoConfig(delay_with_upstream_cluster_yaml));\n\n  // route-level fault overrides listener-level fault\n  {\n    SetUpTest(v2_empty_fault_config_yaml); // This is a valid listener level fault\n    TestPerFilterConfigFault(&delay_fault, nullptr);\n  }\n\n  // virtual-host-level fault overrides listener-level fault\n  {\n    config_->stats().aborts_injected_.reset();\n    config_->stats().delays_injected_.reset();\n    SetUpTest(v2_empty_fault_config_yaml);\n    TestPerFilterConfigFault(nullptr, &delay_fault);\n  }\n\n  // route-level fault overrides virtual-host-level fault\n  {\n    config_->stats().aborts_injected_.reset();\n    config_->stats().delays_injected_.reset();\n    SetUpTest(v2_empty_fault_config_yaml);\n    TestPerFilterConfigFault(&delay_fault, &abort_fault);\n  }\n}\n\nclass FaultFilterRateLimitTest : public FaultFilterTest {\npublic:\n  void setupRateLimitTest(bool enable_runtime) {\n    envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n    fault.mutable_response_rate_limit()->mutable_fixed_limit()->set_limit_kbps(1);\n    fault.mutable_response_rate_limit()->mutable_percentage()->set_numerator(100);\n    SetUpTest(fault);\n\n    EXPECT_CALL(runtime_.snapshot_,\n                featureEnabled(\"fault.http.rate_limit.response_percent\",\n                               Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n        .WillOnce(Return(enable_runtime));\n  }\n};\n\nTEST_F(FaultFilterRateLimitTest, ResponseRateLimitDisabled) {\n  setupRateLimitTest(false);\n  Buffer::OwnedImpl data;\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\n// Make sure we destroy the rate limiter if we are reset.\nTEST_F(FaultFilterRateLimitTest, DestroyWithResponseRateLimitEnabled) {\n  setupRateLimitTest(true);\n\n  ON_CALL(encoder_filter_callbacks_, encoderBufferLimit()).WillByDefault(Return(1100));\n  // The timer is consumed but not used by this test.\n  new NiceMock<Event::MockTimer>(&decoder_filter_callbacks_.dispatcher_);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n\n  EXPECT_EQ(1UL, config_->stats().response_rl_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n\n  filter_->onDestroy();\n\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n}\n\nTEST_F(FaultFilterRateLimitTest, ResponseRateLimitEnabled) {\n  setupRateLimitTest(true);\n\n  ON_CALL(encoder_filter_callbacks_, encoderBufferLimit()).WillByDefault(Return(1100));\n  Event::MockTimer* token_timer =\n      new NiceMock<Event::MockTimer>(&decoder_filter_callbacks_.dispatcher_);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n\n  EXPECT_EQ(1UL, config_->stats().response_rl_injected_.value());\n  EXPECT_EQ(1UL, config_->stats().active_faults_.value());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  Http::MetadataMap metadata_map;\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n\n  // Send a small amount of data which should be within limit.\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data1, false));\n  EXPECT_CALL(encoder_filter_callbacks_,\n              injectEncodedDataToFilterChain(BufferStringEqual(\"hello\"), false));\n  token_timer->invokeCallback();\n\n  // Advance time by 1s which should refill all tokens.\n  time_system_.advanceTimeWait(std::chrono::seconds(1));\n\n  // Send 1152 bytes of data which is 1s + 2 refill cycles of data.\n  EXPECT_CALL(encoder_filter_callbacks_, onEncoderFilterAboveWriteBufferHighWatermark());\n  EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _));\n  Buffer::OwnedImpl data2(std::string(1152, 'a'));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data2, false));\n\n  EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(63), _));\n  EXPECT_CALL(encoder_filter_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark());\n  EXPECT_CALL(encoder_filter_callbacks_,\n              injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'a')), false));\n  token_timer->invokeCallback();\n\n  // Fire timer, also advance time.\n  time_system_.advanceTimeWait(std::chrono::milliseconds(63));\n  EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(63), _));\n  EXPECT_CALL(encoder_filter_callbacks_,\n              injectEncodedDataToFilterChain(BufferStringEqual(std::string(64, 'a')), false));\n  token_timer->invokeCallback();\n\n  // Get new data with current data buffered, not end_stream.\n  Buffer::OwnedImpl data3(std::string(64, 'b'));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data3, false));\n\n  // Fire timer, also advance time.\n  time_system_.advanceTimeWait(std::chrono::milliseconds(63));\n  EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(63), _));\n  EXPECT_CALL(encoder_filter_callbacks_,\n              injectEncodedDataToFilterChain(BufferStringEqual(std::string(64, 'a')), false));\n  token_timer->invokeCallback();\n\n  // Fire timer, also advance time. No time enable because there is nothing buffered.\n  time_system_.advanceTimeWait(std::chrono::milliseconds(63));\n  EXPECT_CALL(encoder_filter_callbacks_,\n              injectEncodedDataToFilterChain(BufferStringEqual(std::string(64, 'b')), false));\n  token_timer->invokeCallback();\n\n  // Advance time by 1s for a full refill.\n  time_system_.advanceTimeWait(std::chrono::seconds(1));\n\n  // Now send 1024 in one shot with end_stream true which should go through and end the stream.\n  EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _));\n  Buffer::OwnedImpl data4(std::string(1024, 'c'));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data4, true));\n  EXPECT_CALL(encoder_filter_callbacks_,\n              injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'c')), true));\n  token_timer->invokeCallback();\n\n  filter_->onDestroy();\n  EXPECT_EQ(0UL, config_->stats().active_faults_.value());\n}\n\nclass FaultFilterSettingsTest : public FaultFilterTest {};\n\nTEST_F(FaultFilterSettingsTest, CheckDefaultRuntimeKeys) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n\n  Fault::FaultSettings settings(fault);\n\n  EXPECT_EQ(\"fault.http.delay.fixed_delay_percent\", settings.delayPercentRuntime());\n  EXPECT_EQ(\"fault.http.abort.abort_percent\", settings.abortPercentRuntime());\n  EXPECT_EQ(\"fault.http.delay.fixed_duration_ms\", settings.delayDurationRuntime());\n  EXPECT_EQ(\"fault.http.abort.http_status\", settings.abortHttpStatusRuntime());\n  EXPECT_EQ(\"fault.http.abort.grpc_status\", settings.abortGrpcStatusRuntime());\n  EXPECT_EQ(\"fault.http.max_active_faults\", settings.maxActiveFaultsRuntime());\n  EXPECT_EQ(\"fault.http.rate_limit.response_percent\", settings.responseRateLimitPercentRuntime());\n}\n\nTEST_F(FaultFilterSettingsTest, CheckOverrideRuntimeKeys) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  fault.set_abort_percent_runtime(std::string(\"fault.abort_percent_runtime\"));\n  fault.set_delay_percent_runtime(std::string(\"fault.delay_percent_runtime\"));\n  fault.set_abort_http_status_runtime(std::string(\"fault.abort_http_status_runtime\"));\n  fault.set_abort_grpc_status_runtime(std::string(\"fault.abort_grpc_status_runtime\"));\n  fault.set_delay_duration_runtime(std::string(\"fault.delay_duration_runtime\"));\n  fault.set_max_active_faults_runtime(std::string(\"fault.max_active_faults_runtime\"));\n  fault.set_response_rate_limit_percent_runtime(\n      std::string(\"fault.response_rate_limit_percent_runtime\"));\n\n  Fault::FaultSettings settings(fault);\n\n  EXPECT_EQ(\"fault.delay_percent_runtime\", settings.delayPercentRuntime());\n  EXPECT_EQ(\"fault.abort_percent_runtime\", settings.abortPercentRuntime());\n  EXPECT_EQ(\"fault.delay_duration_runtime\", settings.delayDurationRuntime());\n  EXPECT_EQ(\"fault.abort_http_status_runtime\", settings.abortHttpStatusRuntime());\n  EXPECT_EQ(\"fault.abort_grpc_status_runtime\", settings.abortGrpcStatusRuntime());\n  EXPECT_EQ(\"fault.max_active_faults_runtime\", settings.maxActiveFaultsRuntime());\n  EXPECT_EQ(\"fault.response_rate_limit_percent_runtime\",\n            settings.responseRateLimitPercentRuntime());\n}\n\n} // namespace\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/fault/utility.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/extensions/filters/http/fault/v3/fault.pb.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Fault {\n\ninline envoy::extensions::filters::http::fault::v3::HTTPFault\nconvertYamlStrToProtoConfig(const std::string& yaml) {\n  envoy::extensions::filters::http::fault::v3::HTTPFault fault;\n  TestUtility::loadFromYaml(yaml, fault);\n  return fault;\n}\n\n} // namespace Fault\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_http1_bridge/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"http1_bridge_filter_test\",\n    srcs = [\"http1_bridge_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_http1_bridge\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/grpc_http1_bridge:http1_bridge_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:global_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_http1_bridge\",\n    deps = [\n        \"//source/extensions/filters/http/grpc_http1_bridge:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_http1_bridge/config_test.cc",
    "content": "#include \"extensions/filters/http/grpc_http1_bridge/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1Bridge {\nnamespace {\n\nTEST(GrpcHttp1BridgeFilterConfigTest, GrpcHttp1BridgeFilter) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  GrpcHttp1BridgeFilterConfig factory;\n  envoy::extensions::filters::http::grpc_http1_bridge::v3::Config config;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(GrpcHttp1BridgeFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.grpc_http1_bridge\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace GrpcHttp1Bridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnPointee;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1Bridge {\nnamespace {\n\nclass GrpcHttp1BridgeFilterTest : public testing::Test {\npublic:\n  GrpcHttp1BridgeFilterTest() : context_(*symbol_table_), filter_(context_) {\n    filter_.setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_.setEncoderFilterCallbacks(encoder_callbacks_);\n    ON_CALL(decoder_callbacks_.stream_info_, protocol()).WillByDefault(ReturnPointee(&protocol_));\n  }\n\n  ~GrpcHttp1BridgeFilterTest() override { filter_.onDestroy(); }\n\n  Stats::TestSymbolTable symbol_table_;\n  Grpc::ContextImpl context_;\n  Http1BridgeFilter filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  absl::optional<Http::Protocol> protocol_{Http::Protocol::Http11};\n};\n\nTEST_F(GrpcHttp1BridgeFilterTest, NoRoute) {\n  protocol_ = Http::Protocol::Http2;\n  ON_CALL(decoder_callbacks_, route()).WillByDefault(Return(nullptr));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, true));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"404\"}};\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, NoCluster) {\n  protocol_ = Http::Protocol::Http2;\n  ON_CALL(decoder_callbacks_, clusterInfo()).WillByDefault(Return(nullptr));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"404\"}};\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, StatsHttp2HeaderOnlyResponse) {\n  protocol_ = Http::Protocol::Http2;\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_.encode100ContinueHeaders(continue_headers));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.encodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"grpc-status\", \"1\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true));\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.failure\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, StatsHttp2NormalResponse) {\n  protocol_ = Http::Protocol::Http2;\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"0\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, StatsHttp2ContentTypeGrpcPlusProto) {\n  protocol_ = Http::Protocol::Http2;\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc+proto\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"0\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, NotHandlingHttp2) {\n  protocol_ = Http::Protocol::Http2;\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/foo\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  Http::TestRequestTrailerMapImpl request_trailers{{\"hello\", \"world\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"hello\", \"world\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(\"200\", response_headers.get_(\":status\"));\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, NotHandlingHttp1) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/foo\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  Http::TestRequestTrailerMapImpl request_trailers{{\"hello\", \"world\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"hello\", \"world\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(\"200\", response_headers.get_(\":status\"));\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, HandlingNormalResponse) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  Http::TestRequestTrailerMapImpl request_trailers{{\"hello\", \"world\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n\n  Buffer::InstancePtr buffer(new Buffer::OwnedImpl(\"hello\"));\n  ON_CALL(encoder_callbacks_, encodingBuffer()).WillByDefault(Return(buffer.get()));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"0\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(\"200\", response_headers.get_(\":status\"));\n  EXPECT_EQ(\"5\", response_headers.get_(\"content-length\"));\n  EXPECT_EQ(\"0\", response_headers.get_(\"grpc-status\"));\n}\n\nTEST_F(GrpcHttp1BridgeFilterTest, HandlingBadGrpcStatus) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  Http::TestRequestTrailerMapImpl request_trailers{{\"hello\", \"world\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"1\"}, {\"grpc-message\", \"foo\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(\"503\", response_headers.get_(\":status\"));\n  EXPECT_EQ(\"0\", response_headers.get_(\"content-length\"));\n  EXPECT_EQ(\"1\", response_headers.get_(\"grpc-status\"));\n  EXPECT_EQ(\"foo\", response_headers.get_(\"grpc-message\"));\n}\n\n} // namespace\n} // namespace GrpcHttp1Bridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"reverse_bridge_test\",\n    srcs = [\"reverse_bridge_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_http1_reverse_bridge\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/grpc_http1_reverse_bridge:filter_lib\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"reverse_bridge_integration_test\",\n    srcs = [\"reverse_bridge_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_http1_reverse_bridge\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/grpc_http1_reverse_bridge:config\",\n        \"//source/extensions/filters/http/grpc_http1_reverse_bridge:filter_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_http1_reverse_bridge\",\n    deps = [\n        \"//source/extensions/filters/http/grpc_http1_reverse_bridge:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h\"\n\n#include \"extensions/filters/http/grpc_http1_reverse_bridge/config.h\"\n#include \"extensions/filters/http/grpc_http1_reverse_bridge/filter.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1ReverseBridge {\nnamespace {\n\nTEST(ReversBridgeFilterFactoryTest, ReverseBridgeFilter) {\n  const std::string yaml_string = R\"EOF(\ncontent_type: application/grpc+proto\nwithhold_grpc_frames: true\n  )EOF\";\n\n  envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfig proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  Config config_factory;\n  Http::FilterFactoryCb cb =\n      config_factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\nTEST(ReverseBridgeFilterFactoryTest, ReverseBridgeFilterRouteSpecificConfig) {\n  Config config_factory;\n  NiceMock<Server::Configuration::MockServerFactoryContext> factory_context;\n\n  ProtobufTypes::MessagePtr proto_config = config_factory.createEmptyRouteConfigProto();\n  EXPECT_TRUE(proto_config.get());\n\n  auto& cfg = dynamic_cast<\n      envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute&>(\n      *proto_config.get());\n  cfg.set_disabled(true);\n\n  Router::RouteSpecificFilterConfigConstSharedPtr route_config =\n      config_factory.createRouteSpecificFilterConfig(*proto_config, factory_context,\n                                                     ProtobufMessage::getNullValidationVisitor());\n  EXPECT_TRUE(route_config.get());\n\n  const auto* inflated = dynamic_cast<const FilterConfigPerRoute*>(route_config.get());\n  EXPECT_TRUE(inflated);\n}\n\n} // namespace\n} // namespace GrpcHttp1ReverseBridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h\"\n\n#include \"common/http/message_impl.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gtest/gtest.h\"\n\nusing Envoy::Http::HeaderValueOf;\n\n// for ::operator\"\"s (which Windows compiler does not support):\nusing namespace std::string_literals;\n\nnamespace Envoy {\nnamespace {\n\n// Tests a downstream HTTP2 client sending gRPC requests that are converted into HTTP/1.1 for a\n// HTTP1 upstream.\nclass ReverseBridgeIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                     public HttpIntegrationTest {\npublic:\n  ReverseBridgeIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\n  void initialize() override {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n\n    const std::string filter =\n        R\"EOF(\nname: grpc_http1_reverse_bridge\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig\n  content_type: application/x-protobuf\n  withhold_grpc_frames: true\n            )EOF\";\n    config_helper_.addFilter(filter);\n\n    auto vhost = config_helper_.createVirtualHost(\"disabled\");\n    envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute\n        route_config;\n    route_config.set_disabled(true);\n    (*vhost.mutable_routes(0)\n          ->mutable_typed_per_filter_config())[\"envoy.filters.http.grpc_http1_reverse_bridge\"]\n        .PackFrom(route_config);\n    config_helper_.addVirtualHost(vhost);\n\n    HttpIntegrationTest::initialize();\n  }\n\n  void TearDown() override { fake_upstream_connection_.reset(); }\n\nprotected:\n  FakeHttpConnection::Type upstream_protocol_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ReverseBridgeIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that we don't do anything with the request when it's hitting a route that\n// doesn't enable the bridge.\n// Regression test of https://github.com/envoyproxy/envoy/issues/9922\nTEST_P(ReverseBridgeIntegrationTest, DisabledRoute) {\n  upstream_protocol_ = FakeHttpConnection::Type::HTTP2;\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl request_headers({{\":scheme\", \"http\"},\n                                                  {\":method\", \"POST\"},\n                                                  {\":authority\", \"disabled\"},\n                                                  {\":path\", \"/testing.ExampleService/Print\"},\n                                                  {\"content-type\", \"application/grpc\"}});\n  auto response = codec_client_->makeRequestWithBody(request_headers, \"abcdef\");\n\n  // Wait for upstream to finish the request.\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Ensure that we don't do anything\n  EXPECT_EQ(\"abcdef\", upstream_request_->body().toString());\n  EXPECT_THAT(upstream_request_->headers(),\n              HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n\n  // Respond to the request.\n  Http::TestResponseHeaderMapImpl response_headers;\n  response_headers.setStatus(200);\n  response_headers.setContentType(\"application/grpc\");\n  upstream_request_->encodeHeaders(response_headers, false);\n\n  Buffer::OwnedImpl response_data{\"defgh\"};\n  upstream_request_->encodeData(response_data, false);\n\n  Http::TestResponseTrailerMapImpl response_trailers;\n  response_trailers.setGrpcStatus(std::string(\"0\"));\n  upstream_request_->encodeTrailers(response_trailers);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n\n  EXPECT_EQ(response->body(), response_data.toString());\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(*response->trailers(), HeaderValueOf(Http::Headers::get().GrpcStatus, \"0\"));\n\n  codec_client_->close();\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n}\n\nTEST_P(ReverseBridgeIntegrationTest, EnabledRoute) {\n  upstream_protocol_ = FakeHttpConnection::Type::HTTP1;\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl request_headers({{\":scheme\", \"http\"},\n                                                  {\":method\", \"POST\"},\n                                                  {\":authority\", \"foo\"},\n                                                  {\":path\", \"/testing.ExampleService/Print\"},\n                                                  {\"content-type\", \"application/grpc\"}});\n\n  auto response = codec_client_->makeRequestWithBody(request_headers, \"abcdef\");\n\n  // Wait for upstream to finish the request.\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Ensure that we stripped the length prefix and set the appropriate headers.\n  EXPECT_EQ(\"f\", upstream_request_->body().toString());\n\n  EXPECT_THAT(upstream_request_->headers(),\n              HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n  EXPECT_THAT(upstream_request_->headers(),\n              HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n\n  // Respond to the request.\n  Http::TestResponseHeaderMapImpl response_headers;\n  response_headers.setStatus(200);\n  response_headers.setContentType(\"application/x-protobuf\");\n  upstream_request_->encodeHeaders(response_headers, false);\n\n  Buffer::OwnedImpl response_data{\"defgh\"};\n  upstream_request_->encodeData(response_data, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n\n  // Ensure that we restored the content-type and that we added the length prefix.\n  EXPECT_EQ(response_data.length() + 5, response->body().size());\n  EXPECT_TRUE(absl::EndsWith(response->body(), response_data.toString()));\n\n  // Comparing strings embedded zero literals is hard. Use string literal and std::equal to avoid\n  // truncating the string when it's converted to const char *.\n  const auto expected_prefix = \"\\0\\0\\0\\0\\4\"s;\n  EXPECT_TRUE(\n      std::equal(response->body().begin(), response->body().begin() + 4, expected_prefix.begin()));\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(*response->trailers(), HeaderValueOf(Http::Headers::get().GrpcStatus, \"0\"));\n\n  codec_client_->close();\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n}\n\nTEST_P(ReverseBridgeIntegrationTest, EnabledRouteBadContentType) {\n  upstream_protocol_ = FakeHttpConnection::Type::HTTP1;\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl request_headers({{\":scheme\", \"http\"},\n                                                  {\":method\", \"POST\"},\n                                                  {\":authority\", \"foo\"},\n                                                  {\":path\", \"/testing.ExampleService/Print\"},\n                                                  {\"content-type\", \"application/grpc\"}});\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  response_headers.setStatus(200);\n  response_headers.setContentType(\"application/x-not-protobuf\");\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 5, response_headers, 5);\n\n  EXPECT_TRUE(response->complete());\n\n  // The response should indicate an error.\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, \"2\"));\n\n  codec_client_->close();\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n}\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/utility.h\"\n\n#include \"extensions/filters/http/grpc_http1_reverse_bridge/filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing Envoy::Http::HeaderValueOf;\nusing testing::_;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcHttp1ReverseBridge {\nnamespace {\n\nclass ReverseBridgeTest : public testing::Test {\nprotected:\n  void initialize(bool withhold_grpc_headers = true) {\n    filter_ = std::make_unique<Filter>(\"application/x-protobuf\", withhold_grpc_headers);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  FilterPtr filter_;\n  std::shared_ptr<Router::MockRoute> route_ = std::make_shared<Router::MockRoute>();\n  Router::RouteSpecificFilterConfig filter_config_;\n  Http::MockStreamDecoderFilterCallbacks decoder_callbacks_;\n  Http::MockStreamEncoderFilterCallbacks encoder_callbacks_;\n};\n\n// Verifies that an incoming request with too small a request body will immediately fail.\nTEST_F(ReverseBridgeTest, InvalidGrpcRequest) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                            {\"content-length\", \"25\"},\n                                            {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abc\", 3);\n    EXPECT_CALL(decoder_callbacks_, sendLocalReply(_, _, _, _, _));\n    EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, _)).WillOnce(Invoke([](auto& headers, auto) {\n      EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Status, \"200\"));\n      EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().GrpcStatus, \"2\"));\n      EXPECT_THAT(headers,\n                  HeaderValueOf(Http::Headers::get().GrpcMessage,\n                                Http::Utility::PercentEncoding::encode(\"invalid request body\")));\n    }));\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(buffer, false));\n    EXPECT_EQ(decoder_callbacks_.details(), \"grpc_bridge_data_too_small\");\n  }\n}\n\n// Verifies that we do nothing to a header only request even if it looks like a gRPC request.\nTEST_F(ReverseBridgeTest, HeaderOnlyGrpcRequest) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                            {\"content-length\", \"25\"},\n                                            {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true));\n\n    // Verify that headers are unmodified.\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"25\"));\n  }\n\n  // Verify no modification on encoding path as well.\n  Http::TestResponseHeaderMapImpl headers(\n      {{\"content-type\", \"application/grpc\"}, {\"content-length\", \"20\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, true));\n  // Ensure we didn't mutate content type or length.\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n\n  // We should not drain the buffer, nor stop iteration.\n  Envoy::Buffer::OwnedImpl buffer;\n  buffer.add(\"abc\", 3);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, true));\n  EXPECT_EQ(3, buffer.length());\n}\n\n// Tests that the filter passes a non-GRPC request through without modification.\nTEST_F(ReverseBridgeTest, NoGrpcRequest) {\n  initialize();\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    Http::TestRequestHeaderMapImpl headers(\n        {{\"content-type\", \"application/json\"}, {\"content-length\", \"10\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n    // Ensure we didn't mutate content type or length.\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/json\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"10\"));\n  }\n\n  {\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"test\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(4, buffer.length());\n  }\n\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n\n  {\n    Http::TestResponseHeaderMapImpl headers(\n        {{\"content-type\", \"application/json\"}, {\"content-length\", \"20\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n    // Ensure we didn't mutate content type or length.\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/json\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n  }\n\n  Envoy::Buffer::OwnedImpl buffer;\n  buffer.add(\"test\", 4);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, true));\n  EXPECT_EQ(4, buffer.length());\n\n  // Verify no modification on encoding path as well.\n  Http::TestResponseHeaderMapImpl headers(\n      {{\"content-type\", \"application/grpc\"}, {\"content-length\", \"20\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, true));\n  // Ensure we didn't mutate content type or length.\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n}\n\n// Verifies that if we receive a gRPC request but have configured the filter to not handle the gRPC\n// frames, then the data should not be modified.\nTEST_F(ReverseBridgeTest, GrpcRequestNoManageFrameHeader) {\n  initialize(false);\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                            {\"content-length\", \"25\"},\n                                            {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"25\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should not mutate the request data.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  {\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  // We should not modify the content-length.\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"200\"}, {\"content-length\", \"30\"}, {\"content-type\", \"application/x-protobuf\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"30\"));\n\n  {\n    // We should not drain the buffer, nor stop iteration.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abc\", 3);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, false));\n    EXPECT_EQ(3, buffer.length());\n  }\n\n  {\n    // Last call should also not modify the buffer.\n    Http::TestResponseTrailerMapImpl trailers;\n    EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers));\n\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"ghj\", 3);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, true));\n    EXPECT_EQ(3, buffer.length());\n    EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, \"0\"));\n  }\n}\n\n// Tests that a gRPC is downgraded to application/x-protobuf and upgraded back\n// to gRPC.\nTEST_F(ReverseBridgeTest, GrpcRequest) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                            {\"content-length\", \"25\"},\n                                            {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    // Subsequent calls to decodeData should do nothing.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  {\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"200\"}, {\"content-length\", \"30\"}, {\"content-type\", \"application/x-protobuf\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"35\"));\n\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abc\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"def\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // Last call should prefix the buffer with the size and insert the gRPC status into trailers.\n    Http::TestResponseTrailerMapImpl trailers;\n    EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers));\n\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"ghj\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, true));\n    EXPECT_EQ(17, buffer.length());\n    EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, \"0\"));\n\n    Grpc::Decoder decoder;\n    std::vector<Grpc::Frame> frames;\n    decoder.decode(buffer, frames);\n\n    EXPECT_EQ(1, frames.size());\n    EXPECT_EQ(12, frames[0].length_);\n  }\n}\n\n// Tests that a gRPC is downgraded to application/x-protobuf and upgraded back\n// to gRPC and that content length headers are not required.\n// Same as ReverseBridgeTest.GrpcRequest except no content-length header is passed.\nTEST_F(ReverseBridgeTest, GrpcRequestNoContentLength) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers(\n        {{\"content-type\", \"application/grpc\"}, {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n    // Ensure that we don't insert a content-length header.\n    EXPECT_EQ(nullptr, headers.ContentLength());\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    // Subsequent calls to decodeData should do nothing.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  {\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"200\"}, {\"content-type\", \"application/x-protobuf\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  // Ensure that we don't insert a content-length header.\n  EXPECT_EQ(nullptr, headers.ContentLength());\n\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abc\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"def\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // Last call should prefix the buffer with the size and insert the gRPC status into trailers.\n    Http::TestResponseTrailerMapImpl trailers;\n    EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers));\n\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"ghj\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, true));\n    EXPECT_EQ(17, buffer.length());\n    EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, \"0\"));\n\n    Grpc::Decoder decoder;\n    std::vector<Grpc::Frame> frames;\n    decoder.decode(buffer, frames);\n\n    EXPECT_EQ(1, frames.size());\n    EXPECT_EQ(12, frames[0].length_);\n  }\n}\n\n// Regression tests that header-only responses do not get the content-length\n// adjusted (https://github.com/envoyproxy/envoy/issues/11099)\nTEST_F(ReverseBridgeTest, GrpcRequestHeaderOnlyResponse) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                            {\"content-length\", \"25\"},\n                                            {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    // Subsequent calls to decodeData should do nothing.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  {\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"200\"}, {\"content-length\", \"0\"}, {\"content-type\", \"application/x-protobuf\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, true));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"0\"));\n}\n\n// Tests that a gRPC is downgraded to application/x-protobuf and upgraded back\n// to gRPC, and that the upstream 400 is converted into an internal (13)\n// grpc-status.\nTEST_F(ReverseBridgeTest, GrpcRequestInternalError) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers(\n        {{\"content-type\", \"application/grpc\"}, {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    // Subsequent calls to decodeData should do nothing.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  {\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"400\"}, {\"content-type\", \"application/x-protobuf\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abc\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"def\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // Last call should prefix the buffer with the size and insert the appropriate gRPC status.\n    Http::TestResponseTrailerMapImpl trailers;\n    EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers));\n\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"ghj\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, true));\n    EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, \"13\"));\n\n    Grpc::Decoder decoder;\n    std::vector<Grpc::Frame> frames;\n    decoder.decode(buffer, frames);\n\n    EXPECT_EQ(1, frames.size());\n    EXPECT_EQ(12, frames[0].length_);\n  }\n}\n\n// Tests that a gRPC is downgraded to application/x-protobuf and that if the response\n// has a missing content type we respond with a useful error message.\nTEST_F(ReverseBridgeTest, GrpcRequestBadResponseNoContentType) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers(\n        {{\"content-type\", \"application/grpc\"}, {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    // Subsequent calls to decodeData should do nothing.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n\n  Http::TestResponseHeaderMapImpl headers({{\":status\", \"400\"}});\n  EXPECT_CALL(\n      decoder_callbacks_,\n      sendLocalReply(\n          Http::Code::OK,\n          \"envoy reverse bridge: upstream responded with no content-type header, status code 400\",\n          _, absl::make_optional(static_cast<Grpc::Status::GrpcStatus>(Grpc::Status::Unknown)), _));\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, _));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->encodeHeaders(headers, false));\n}\n\n// Tests that a gRPC is downgraded to application/x-protobuf and that if the response\n// has an invalid content type we respond with a useful error message.\nTEST_F(ReverseBridgeTest, GrpcRequestBadResponse) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr));\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers(\n        {{\"content-type\", \"application/grpc\"}, {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    // Subsequent calls to decodeData should do nothing.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"400\"}, {\"content-type\", \"application/json\"}});\n  EXPECT_CALL(\n      decoder_callbacks_,\n      sendLocalReply(\n          Http::Code::OK,\n          \"envoy reverse bridge: upstream responded with unsupported \"\n          \"content-type application/json, status code 400\",\n          _, absl::make_optional(static_cast<Grpc::Status::GrpcStatus>(Grpc::Status::Unknown)), _));\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, _));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->encodeHeaders(headers, false));\n}\n\n// Tests that the filter passes a GRPC request through without modification because it is disabled\n// per route.\nTEST_F(ReverseBridgeTest, FilterConfigPerRouteDisabled) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute\n      filter_config_per_route;\n  filter_config_per_route.set_disabled(true);\n  FilterConfigPerRoute filterConfigPerRoute(filter_config_per_route);\n\n  ON_CALL(*decoder_callbacks_.route_,\n          perFilterConfig(HttpFilterNames::get().GrpcHttp1ReverseBridge))\n      .WillByDefault(testing::Return(&filterConfigPerRoute));\n\n  EXPECT_CALL(decoder_callbacks_, route()).Times(2);\n\n  Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                          {\"content-length\", \"25\"},\n                                          {\":path\", \"/testing.ExampleService/SendData\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n\n  // Verify that headers are unmodified.\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"25\"));\n  EXPECT_THAT(headers,\n              HeaderValueOf(Http::Headers::get().Path, \"/testing.ExampleService/SendData\"));\n}\n\n// Tests that a gRPC is downgraded to application/x-protobuf and upgraded back\n// to gRPC when the filter is enabled per route.\nTEST_F(ReverseBridgeTest, FilterConfigPerRouteEnabled) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute\n      filter_config_per_route;\n  filter_config_per_route.set_disabled(false);\n  FilterConfigPerRoute filterConfigPerRoute(filter_config_per_route);\n\n  ON_CALL(*decoder_callbacks_.route_,\n          perFilterConfig(HttpFilterNames::get().GrpcHttp1ReverseBridge))\n      .WillByDefault(testing::Return(&filterConfigPerRoute));\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).Times(2);\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                            {\"content-length\", \"25\"},\n                                            {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    // Subsequent calls to decodeData should do nothing.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"abcdefgh\", buffer.toString());\n  }\n\n  {\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"200\"}, {\"content-length\", \"30\"}, {\"content-type\", \"application/x-protobuf\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"35\"));\n\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abc\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"def\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // Last call should prefix the buffer with the size and insert the gRPC status into trailers.\n    Http::TestResponseTrailerMapImpl trailers;\n    EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers));\n\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"ghj\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, true));\n    EXPECT_EQ(17, buffer.length());\n    EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, \"0\"));\n\n    Grpc::Decoder decoder;\n    std::vector<Grpc::Frame> frames;\n    decoder.decode(buffer, frames);\n\n    EXPECT_EQ(1, frames.size());\n    EXPECT_EQ(12, frames[0].length_);\n  }\n}\n\nTEST_F(ReverseBridgeTest, RouteWithTrailers) {\n  initialize();\n  decoder_callbacks_.is_grpc_request_ = true;\n\n  envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute\n      filter_config_per_route;\n  filter_config_per_route.set_disabled(false);\n  FilterConfigPerRoute filterConfigPerRoute(filter_config_per_route);\n\n  ON_CALL(*decoder_callbacks_.route_,\n          perFilterConfig(HttpFilterNames::get().GrpcHttp1ReverseBridge))\n      .WillByDefault(testing::Return(&filterConfigPerRoute));\n\n  {\n    EXPECT_CALL(decoder_callbacks_, route()).Times(2);\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl headers({{\"content-type\", \"application/grpc\"},\n                                            {\"content-length\", \"25\"},\n                                            {\":path\", \"/testing.ExampleService/SendData\"}});\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/x-protobuf\"));\n    EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"20\"));\n    EXPECT_THAT(headers,\n                HeaderValueOf(Http::CustomHeaders::get().Accept, \"application/x-protobuf\"));\n  }\n\n  {\n    // We should remove the first five bytes.\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abcdefgh\", 8);\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n    EXPECT_EQ(\"fgh\", buffer.toString());\n  }\n\n  {\n    Http::TestRequestTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  }\n\n  Http::TestResponseHeaderMapImpl headers(\n      {{\":status\", \"200\"}, {\"content-length\", \"30\"}, {\"content-type\", \"application/x-protobuf\"}});\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, \"application/grpc\"));\n  EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, \"35\"));\n\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"abc\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n  {\n    // First few calls should drain the buffer\n    Envoy::Buffer::OwnedImpl buffer;\n    buffer.add(\"def\", 4);\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(buffer, false));\n    EXPECT_EQ(0, buffer.length());\n  }\n\n  {\n    // Last call should prefix the buffer with the size and insert the gRPC status into trailers.\n    Envoy::Buffer::OwnedImpl buffer;\n    EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false))\n        .WillOnce(Invoke([&](Envoy::Buffer::Instance& buf, bool) -> void { buffer.move(buf); }));\n    Http::TestResponseTrailerMapImpl trailers({{\"foo\", \"bar\"}, {\"one\", \"two\"}, {\"three\", \"four\"}});\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(trailers));\n    EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, \"0\"));\n\n    Grpc::Decoder decoder;\n    std::vector<Grpc::Frame> frames;\n    decoder.decode(buffer, frames);\n\n    EXPECT_EQ(4, trailers.size());\n    EXPECT_EQ(1, frames.size());\n    EXPECT_EQ(8, frames[0].length_);\n  }\n}\n\n} // namespace\n} // namespace GrpcHttp1ReverseBridge\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_json_transcoder/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"json_transcoder_filter_test\",\n    srcs = [\"json_transcoder_filter_test.cc\"],\n    data = [\n        \"//test/proto:bookstore.proto\",\n        \"//test/proto:bookstore_proto_descriptor\",\n    ],\n    extension_name = \"envoy.filters.http.grpc_json_transcoder\",\n    deps = [\n        \"//source/extensions/filters/http/grpc_json_transcoder:json_transcoder_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/proto:bookstore_proto_cc_proto\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"http_body_utils_test\",\n    srcs = [\"http_body_utils_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_json_transcoder\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/extensions/filters/http/grpc_json_transcoder:http_body_utils_lib\",\n        \"//test/proto:bookstore_proto_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"transcoder_input_stream_test\",\n    srcs = [\"transcoder_input_stream_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_json_transcoder\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/http/grpc_json_transcoder:transcoder_input_stream_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"grpc_json_transcoder_integration_test\",\n    srcs = [\n        \"grpc_json_transcoder_integration_test.cc\",\n    ],\n    data = [\n        \"//test/proto:bookstore_proto_descriptor\",\n    ],\n    extension_name = \"envoy.filters.http.grpc_json_transcoder\",\n    deps = [\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/grpc_json_transcoder:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/proto:bookstore_proto_cc_proto\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_json_transcoder\",\n    deps = [\n        \"//source/extensions/filters/http/grpc_json_transcoder:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_json_transcoder/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.validate.h\"\n\n#include \"extensions/filters/http/grpc_json_transcoder/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\nnamespace {\n\nTEST(GrpcJsonTranscoderFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(GrpcJsonTranscoderFilterConfig().createFilterFactoryFromProto(\n                   envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder(),\n                   \"stats\", context),\n               ProtoValidationException);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(GrpcJsonTranscoderFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.grpc_json_transcoder\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc",
    "content": "#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/proto/bookstore.pb.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gtest/gtest.h\"\n\nusing Envoy::Protobuf::TextFormat;\nusing Envoy::ProtobufUtil::Status;\nusing Envoy::ProtobufUtil::error::Code;\nusing Envoy::ProtobufWkt::Empty;\n\nnamespace Envoy {\nnamespace {\n\n// A magic header value which marks header as not expected.\nconstexpr char UnexpectedHeaderValue[] = \"Unexpected header value\";\n\nclass GrpcJsonTranscoderIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public HttpIntegrationTest {\npublic:\n  GrpcJsonTranscoderIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void SetUp() override {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    const std::string filter =\n        R\"EOF(\n            name: grpc_json_transcoder\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder\n              proto_descriptor : \"{}\"\n              services : \"bookstore.Bookstore\"\n            )EOF\";\n    config_helper_.addFilter(\n        fmt::format(filter, TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\")));\n  }\n\nprotected:\n  template <class RequestType, class ResponseType>\n  void testTranscoding(Http::RequestHeaderMap&& request_headers, const std::string& request_body,\n                       const std::vector<std::string>& grpc_request_messages,\n                       const std::vector<std::string>& grpc_response_messages,\n                       const Status& grpc_status, Http::HeaderMap&& response_headers,\n                       const std::string& response_body, bool full_response = true,\n                       bool always_send_trailers = false) {\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n    IntegrationStreamDecoderPtr response;\n    if (!request_body.empty()) {\n      auto encoder_decoder = codec_client_->startRequest(request_headers);\n      request_encoder_ = &encoder_decoder.first;\n      response = std::move(encoder_decoder.second);\n      Buffer::OwnedImpl body(request_body);\n      codec_client_->sendData(*request_encoder_, body, true);\n    } else {\n      response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    }\n\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n    if (!grpc_request_messages.empty()) {\n      ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n      ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n      std::string dump;\n      for (char ch : upstream_request_->body().toString()) {\n        dump += std::to_string(int(ch));\n        dump += \" \";\n      }\n\n      Grpc::Decoder grpc_decoder;\n      std::vector<Grpc::Frame> frames;\n      EXPECT_TRUE(grpc_decoder.decode(upstream_request_->body(), frames)) << dump;\n      EXPECT_EQ(grpc_request_messages.size(), frames.size());\n\n      for (size_t i = 0; i < grpc_request_messages.size(); ++i) {\n        RequestType actual_message;\n        if (frames[i].length_ > 0) {\n          EXPECT_TRUE(actual_message.ParseFromString(frames[i].data_->toString()));\n        }\n        RequestType expected_message;\n        EXPECT_TRUE(TextFormat::ParseFromString(grpc_request_messages[i], &expected_message));\n        EXPECT_THAT(actual_message, ProtoEq(expected_message));\n      }\n\n      Http::TestResponseHeaderMapImpl response_headers;\n      response_headers.setStatus(200);\n      response_headers.setContentType(\"application/grpc\");\n      if (grpc_response_messages.empty() && !always_send_trailers) {\n        response_headers.setGrpcStatus(static_cast<uint64_t>(grpc_status.error_code()));\n        response_headers.setGrpcMessage(absl::string_view(grpc_status.error_message().data(),\n                                                          grpc_status.error_message().size()));\n        upstream_request_->encodeHeaders(response_headers, true);\n      } else {\n        response_headers.addCopy(Http::LowerCaseString(\"trailer\"), \"Grpc-Status\");\n        response_headers.addCopy(Http::LowerCaseString(\"trailer\"), \"Grpc-Message\");\n        upstream_request_->encodeHeaders(response_headers, false);\n        for (const auto& response_message_str : grpc_response_messages) {\n          ResponseType response_message;\n          EXPECT_TRUE(TextFormat::ParseFromString(response_message_str, &response_message));\n          auto buffer = Grpc::Common::serializeToGrpcFrame(response_message);\n          upstream_request_->encodeData(*buffer, false);\n        }\n        Http::TestResponseTrailerMapImpl response_trailers;\n        response_trailers.setGrpcStatus(static_cast<uint64_t>(grpc_status.error_code()));\n        response_trailers.setGrpcMessage(absl::string_view(grpc_status.error_message().data(),\n                                                           grpc_status.error_message().size()));\n        upstream_request_->encodeTrailers(response_trailers);\n      }\n      EXPECT_TRUE(upstream_request_->complete());\n    }\n\n    response->waitForEndStream();\n    EXPECT_TRUE(response->complete());\n\n    if (response->headers().get(Http::LowerCaseString(\"transfer-encoding\")) == nullptr ||\n        !absl::StartsWith(response->headers()\n                              .get(Http::LowerCaseString(\"transfer-encoding\"))\n                              ->value()\n                              .getStringView(),\n                          \"chunked\")) {\n      EXPECT_EQ(response->headers().get(Http::LowerCaseString(\"trailer\")), nullptr);\n    }\n\n    response_headers.iterate(\n        [response = response.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n          Http::LowerCaseString lower_key{std::string(entry.key().getStringView())};\n          if (entry.value() == UnexpectedHeaderValue) {\n            EXPECT_FALSE(response->headers().get(lower_key));\n          } else {\n            EXPECT_EQ(entry.value().getStringView(),\n                      response->headers().get(lower_key)->value().getStringView());\n          }\n          return Http::HeaderMap::Iterate::Continue;\n        });\n    if (!response_body.empty()) {\n      if (full_response) {\n        EXPECT_EQ(response_body, response->body());\n      } else {\n        EXPECT_TRUE(absl::StartsWith(response->body(), response_body));\n      }\n    }\n\n    codec_client_->close();\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, GrpcJsonTranscoderIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryPost) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/shelf\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/json\"}},\n      R\"({\"theme\": \"Children\"})\", {R\"(shelf { theme: \"Children\" })\"},\n      {R\"(id: 20 theme: \"Children\" )\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"content-length\", \"30\"},\n                                      {\"grpc-status\", \"0\"}},\n      R\"({\"id\":\"20\",\"theme\":\"Children\"})\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, QueryParams) {\n  HttpIntegrationTest::initialize();\n  // 1. Binding theme='Children' in CreateShelfRequest\n  // Using the following HTTP template:\n  //   POST /shelves\n  //   body: shelf\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/shelf?shelf.theme=Children\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/json\"}},\n      \"\", {R\"(shelf { theme: \"Children\" })\"}, {R\"(id: 20 theme: \"Children\" )\"}, Status(),\n      Http::TestResponseHeaderMapImpl{\n          {\":status\", \"200\"},\n          {\"content-type\", \"application/json\"},\n      },\n      R\"({\"id\":\"20\",\"theme\":\"Children\"})\");\n\n  // 2. Binding theme='Children' and id='999' in CreateShelfRequest\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/shelf?shelf.id=999&shelf.theme=Children\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/json\"}},\n      \"\", {R\"(shelf { id: 999 theme: \"Children\" })\"}, {R\"(id: 999 theme: \"Children\" )\"}, Status(),\n      Http::TestResponseHeaderMapImpl{\n          {\":status\", \"200\"},\n          {\"content-type\", \"application/json\"},\n      },\n      R\"({\"id\":\"999\",\"theme\":\"Children\"})\");\n\n  // 3. Binding shelf=1, book=<post body> and book.title='War and Peace' in CreateBookRequest\n  //    Using the following HTTP template:\n  //      POST /shelves/{shelf}/books\n  //      body: book\n  testTranscoding<bookstore::CreateBookRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"PUT\"},\n                                     {\":path\", \"/shelves/1/books?book.title=War%20and%20Peace\"},\n                                     {\":authority\", \"host\"}},\n      R\"({\"author\" : \"Leo Tolstoy\"})\",\n      {R\"(shelf: 1 book { author: \"Leo Tolstoy\" title: \"War and Peace\" })\"},\n      {R\"(id: 3 author: \"Leo Tolstoy\" title: \"War and Peace\")\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      R\"({\"id\":\"3\",\"author\":\"Leo Tolstoy\",\"title\":\"War and Peace\"})\");\n\n  // 4. Binding shelf=1, book.author='Leo Tolstoy' and book.title='War and Peace' in\n  // CreateBookRequest\n  //    Using the following HTTP template:\n  //      POST /shelves/{shelf}/books\n  //      body: book\n  testTranscoding<bookstore::CreateBookRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"PUT\"},\n          {\":path\", \"/shelves/1/books?book.author=Leo%20Tolstoy&book.title=War%20and%20Peace\"},\n          {\":authority\", \"host\"}},\n      \"\", {R\"(shelf: 1 book { author: \"Leo Tolstoy\" title: \"War and Peace\" })\"},\n      {R\"(id: 3 author: \"Leo Tolstoy\" title: \"War and Peace\")\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      R\"({\"id\":\"3\",\"author\":\"Leo Tolstoy\",\"title\":\"War and Peace\"})\");\n\n  // 5. Test URL decoding.\n  testTranscoding<bookstore::CreateBookRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"PUT\"},\n                                     {\":path\", \"/shelves/1/books?book.title=War%20%26%20Peace\"},\n                                     {\":authority\", \"host\"}},\n      R\"({\"author\" : \"Leo Tolstoy\"})\",\n      {R\"(shelf: 1 book { author: \"Leo Tolstoy\" title: \"War & Peace\" })\"},\n      {R\"(id: 3 author: \"Leo Tolstoy\" title: \"War & Peace\")\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      R\"({\"id\":\"3\",\"author\":\"Leo Tolstoy\",\"title\":\"War & Peace\"})\");\n\n  // 6. Binding all book fields through query params.\n  testTranscoding<bookstore::CreateBookRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"PUT\"},\n          {\":path\",\n           \"/shelves/1/books?book.id=999&book.author=Leo%20Tolstoy&book.title=War%20and%20Peace\"},\n          {\":authority\", \"host\"}},\n      \"\", {R\"(shelf: 1 book { id : 999  author: \"Leo Tolstoy\" title: \"War and Peace\" })\"},\n      {R\"(id: 999 author: \"Leo Tolstoy\" title: \"War and Peace\")\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      R\"({\"id\":\"999\",\"author\":\"Leo Tolstoy\",\"title\":\"War and Peace\"})\");\n\n  // 7. Binding shelf=3, book=<post body> and the repeated field book.quote with\n  //     two values (\"Winter is coming\" and \"Hold the door\") in CreateBookRequest.\n  //     These values should be added to the repeated field in addition to what is\n  //     translated in the body.\n  //     Using the following HTTP template:\n  //       POST /shelves/{shelf}/books\n  //       body: book\n  std::string reqBody =\n      R\"({\"id\":\"999\",\"author\":\"George R.R. Martin\",\"title\":\"A Game of Thrones\",)\"\n      R\"(\"quotes\":[\"A girl has no name\",\"A very small man can cast a very large shadow\"]})\";\n  std::string grpcResp = R\"(id : 999  author: \"George R.R. Martin\" title: \"A Game of Thrones\"\n      quotes: \"A girl has no name\" quotes : \"A very small man can cast a very large shadow\"\n      quotes: \"Winter is coming\" quotes : \"Hold the door\")\";\n  std::string expectGrpcRequest = absl::StrCat(\"shelf: 1 book {\", grpcResp, \"}\");\n  std::string respBody =\n      R\"({\"id\":\"999\",\"author\":\"George R.R. Martin\",\"title\":\"A Game of Thrones\",\"quotes\":[\"A girl has no name\")\"\n      R\"(,\"A very small man can cast a very large shadow\",\"Winter is coming\",\"Hold the door\"]})\";\n\n  testTranscoding<bookstore::CreateBookRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"PUT\"},\n          {\":path\",\n           \"/shelves/1/books?book.quotes=Winter%20is%20coming&book.quotes=Hold%20the%20door\"},\n          {\":authority\", \"host\"}},\n      reqBody, {expectGrpcRequest}, {grpcResp}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      respBody);\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGet) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<Empty, bookstore::ListShelvesResponse>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves\"}, {\":authority\", \"host\"}},\n      \"\", {\"\"}, {R\"(shelves { id: 20 theme: \"Children\" }\n          shelves { id: 1 theme: \"Foo\" } )\"},\n      Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"content-length\", \"69\"},\n                                      {\"grpc-status\", \"0\"}},\n      R\"({\"shelves\":[{\"id\":\"20\",\"theme\":\"Children\"},{\"id\":\"1\",\"theme\":\"Foo\"}]})\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGetHttpBody) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<Empty, google::api::HttpBody>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/index\"}, {\":authority\", \"host\"}},\n      \"\", {\"\"}, {R\"(content_type: \"text/html\" data: \"<h1>Hello!</h1>\" )\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"text/html\"},\n                                      {\"content-length\", \"15\"},\n                                      {\"grpc-status\", \"0\"}},\n      R\"(<h1>Hello!</h1>)\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBody) {\n  HttpIntegrationTest::initialize();\n\n  // 1. Normal streaming get\n  testTranscoding<Empty, google::api::HttpBody>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/indexStream\"}, {\":authority\", \"host\"}},\n      \"\", {\"\"},\n      {R\"(content_type: \"text/html\" data: \"<h1>Hello!</h1>\")\",\n       R\"(content_type: \"text/plain\" data: \"Hello!\")\"},\n      Status(), Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"text/html\"}},\n      R\"(<h1>Hello!</h1>)\"\n      R\"(Hello!)\");\n\n  // 2. Empty response (trailers only) from streaming backend, with a gRPC error.\n  testTranscoding<Empty, google::api::HttpBody>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/indexStream\"}, {\":authority\", \"host\"}},\n      \"\", {\"\"}, {}, Status(Code::NOT_FOUND, \"Not Found\"),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}, {\"content-type\", \"application/json\"}},\n      \"\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyMultipleFramesInData) {\n  HttpIntegrationTest::initialize();\n\n  // testTranscoding() does not provide grpc multiframe support.\n  // Since this is one-off it does not make sense to even more\n  // complicate this function.\n  //\n  // Make request to gRPC upstream\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/indexStream\"},\n      {\":authority\", \"host\"},\n  });\n  waitForNextUpstreamRequest();\n\n  // Send multi-framed gRPC response\n  // Headers\n  Http::TestResponseHeaderMapImpl response_headers;\n  response_headers.setStatus(200);\n  response_headers.setContentType(\"application/grpc\");\n  upstream_request_->encodeHeaders(response_headers, false);\n  // Payload\n  google::api::HttpBody grpcMsg;\n  EXPECT_TRUE(TextFormat::ParseFromString(R\"(content_type: \"text/plain\" data: \"Hello\")\", &grpcMsg));\n  Buffer::OwnedImpl response_buffer;\n  for (size_t i = 0; i < 3; i++) {\n    auto frame = Grpc::Common::serializeToGrpcFrame(grpcMsg);\n    response_buffer.add(*frame);\n  }\n  upstream_request_->encodeData(response_buffer, false);\n  // Trailers\n  Http::TestResponseTrailerMapImpl response_trailers;\n  auto grpc_status = Status();\n  response_trailers.setGrpcStatus(static_cast<uint64_t>(grpc_status.error_code()));\n  response_trailers.setGrpcMessage(\n      absl::string_view(grpc_status.error_message().data(), grpc_status.error_message().size()));\n  upstream_request_->encodeTrailers(response_trailers);\n  EXPECT_TRUE(upstream_request_->complete());\n\n  // Wait for complete / check body to have 3 frames joined\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->body(), \"HelloHelloHello\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyFragmented) {\n  HttpIntegrationTest::initialize();\n\n  // Make request to gRPC upstream\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/indexStream\"},\n      {\":authority\", \"host\"},\n  });\n  waitForNextUpstreamRequest();\n\n  // Send fragmented gRPC response\n  // Headers\n  Http::TestResponseHeaderMapImpl response_headers;\n  response_headers.setStatus(200);\n  response_headers.setContentType(\"application/grpc\");\n  upstream_request_->encodeHeaders(response_headers, false);\n  // Fragmented payload\n  google::api::HttpBody http_body;\n  http_body.set_content_type(\"text/plain\");\n  http_body.set_data(std::string(1024, 'a'));\n  // Fragment gRPC frame into 2 buffers equally divided\n  Buffer::OwnedImpl fragment1;\n  auto fragment2 = Grpc::Common::serializeToGrpcFrame(http_body);\n  fragment1.move(*fragment2, fragment2->length() / 2);\n  upstream_request_->encodeData(fragment1, false);\n  upstream_request_->encodeData(*fragment2, false);\n  // Trailers\n  Http::TestResponseTrailerMapImpl response_trailers;\n  auto grpc_status = Status();\n  response_trailers.setGrpcStatus(static_cast<uint64_t>(grpc_status.error_code()));\n  response_trailers.setGrpcMessage(\n      absl::string_view(grpc_status.error_message().data(), grpc_status.error_message().size()));\n  upstream_request_->encodeTrailers(response_trailers);\n  EXPECT_TRUE(upstream_request_->complete());\n\n  // Wait for complete\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  // Ensure that body was actually replaced\n  EXPECT_EQ(response->body(), http_body.data());\n  // As well as content-type header\n  auto content_type = response->headers().get(Http::LowerCaseString(\"content-type\"));\n  EXPECT_EQ(\"text/plain\", content_type->value().getStringView());\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryEchoHttpBody) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::EchoBodyRequest, google::api::HttpBody>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/echoBody?arg=oops\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"text/plain\"}},\n      \"Hello!\", {R\"(arg: \"oops\" nested { content { content_type: \"text/plain\" data: \"Hello!\" } })\"},\n      {R\"(content_type: \"text/html\" data: \"<h1>Hello!</h1>\" )\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"text/html\"},\n                                      {\"content-length\", \"15\"},\n                                      {\"grpc-status\", \"0\"}},\n      R\"(<h1>Hello!</h1>)\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGetError) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::GetShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves/100?\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 100\"}, {}, Status(Code::NOT_FOUND, \"Shelf 100 Not Found\"),\n      Http::TestResponseHeaderMapImpl{\n          {\":status\", \"404\"}, {\"grpc-status\", \"5\"}, {\"grpc-message\", \"Shelf 100 Not Found\"}},\n      \"\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGetError1) {\n  const std::string filter =\n      R\"EOF(\n            name: grpc_json_transcoder\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder\n              proto_descriptor : \"{}\"\n              services : \"bookstore.Bookstore\"\n              ignore_unknown_query_parameters : true\n            )EOF\";\n  config_helper_.addFilter(\n      fmt::format(filter, TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\")));\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::GetShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/shelves/100?unknown=1&shelf=9999\"},\n                                     {\":authority\", \"host\"}},\n      \"\", {\"shelf: 9999\"}, {}, Status(Code::NOT_FOUND, \"Shelf 9999 Not Found\"),\n      Http::TestResponseHeaderMapImpl{\n          {\":status\", \"404\"}, {\"grpc-status\", \"5\"}, {\"grpc-message\", \"Shelf 9999 Not Found\"}},\n      \"\");\n}\n\n// Test an upstream that returns an error in a trailer-only response.\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryErrorConvertedToJson) {\n  const std::string filter =\n      R\"EOF(\n            name: grpc_json_transcoder\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder\n              proto_descriptor: \"{}\"\n              services: \"bookstore.Bookstore\"\n              convert_grpc_status: true\n            )EOF\";\n  config_helper_.addFilter(\n      fmt::format(filter, TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\")));\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::GetShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves/100\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 100\"}, {}, Status(Code::NOT_FOUND, \"Shelf 100 Not Found\"),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"404\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"grpc-status\", UnexpectedHeaderValue},\n                                      {\"grpc-message\", UnexpectedHeaderValue}},\n      R\"({\"code\":5,\"message\":\"Shelf 100 Not Found\"})\");\n}\n\n// Upstream sends headers (e.g. sends metadata), and then sends trailer with an error.\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryErrorInTrailerConvertedToJson) {\n  const std::string filter =\n      R\"EOF(\n            name: grpc_json_transcoder\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder\n              proto_descriptor: \"{}\"\n              services: \"bookstore.Bookstore\"\n              convert_grpc_status: true\n            )EOF\";\n  config_helper_.addFilter(\n      fmt::format(filter, TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\")));\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::GetShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves/100\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 100\"}, {}, Status(Code::NOT_FOUND, \"Shelf 100 Not Found\"),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"404\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"grpc-status\", UnexpectedHeaderValue},\n                                      {\"grpc-message\", UnexpectedHeaderValue}},\n      R\"({\"code\":5,\"message\":\"Shelf 100 Not Found\"})\", true, true);\n}\n\n// Streaming backend returns an error in a trailer-only response.\nTEST_P(GrpcJsonTranscoderIntegrationTest, StreamingErrorConvertedToJson) {\n  const std::string filter =\n      R\"EOF(\n            name: grpc_json_transcoder\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder\n              proto_descriptor: \"{}\"\n              services: \"bookstore.Bookstore\"\n              convert_grpc_status: true\n            )EOF\";\n  config_helper_.addFilter(\n      fmt::format(filter, TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\")));\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::ListBooksRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves/37/books\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 37\"}, {}, Status(Code::NOT_FOUND, \"Shelf 37 Not Found\"),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"404\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"grpc-status\", UnexpectedHeaderValue},\n                                      {\"grpc-message\", UnexpectedHeaderValue}},\n      R\"({\"code\":5,\"message\":\"Shelf 37 Not Found\"})\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryDelete) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::DeleteBookRequest, Empty>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"DELETE\"}, {\":path\", \"/shelves/456/books/123\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 456 book: 123\"}, {\"\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"content-length\", \"2\"},\n                                      {\"grpc-status\", \"0\"}},\n      \"{}\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryPatch) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::UpdateBookRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"PATCH\"}, {\":path\", \"/shelves/456/books/123\"}, {\":authority\", \"host\"}},\n      R\"({\"author\" : \"Leo Tolstoy\", \"title\" : \"War and Peace\"})\",\n      {R\"(shelf: 456 book { id: 123 author: \"Leo Tolstoy\" title: \"War and Peace\" })\"},\n      {R\"(id: 123 author: \"Leo Tolstoy\" title: \"War and Peace\")\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"content-length\", \"59\"},\n                                      {\"grpc-status\", \"0\"}},\n      R\"({\"id\":\"123\",\"author\":\"Leo Tolstoy\",\"title\":\"War and Peace\"})\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnaryCustom) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::GetShelfRequest, Empty>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"OPTIONS\"}, {\":path\", \"/shelves/456\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 456\"}, {\"\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"content-length\", \"2\"},\n                                      {\"grpc-status\", \"0\"}},\n      \"{}\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, BindingAndBody) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::CreateBookRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"PUT\"}, {\":path\", \"/shelves/1/books\"}, {\":authority\", \"host\"}},\n      R\"({\"author\" : \"Leo Tolstoy\", \"title\" : \"War and Peace\"})\",\n      {R\"(shelf: 1 book { author: \"Leo Tolstoy\" title: \"War and Peace\" })\"},\n      {R\"(id: 3 author: \"Leo Tolstoy\" title: \"War and Peace\")\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      R\"({\"id\":\"3\",\"author\":\"Leo Tolstoy\",\"title\":\"War and Peace\"})\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, ServerStreamingGet) {\n  HttpIntegrationTest::initialize();\n\n  // 1: Normal streaming get\n  testTranscoding<bookstore::ListBooksRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves/1/books\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 1\"},\n      {R\"(id: 1 author: \"Neal Stephenson\" title: \"Readme\")\",\n       R\"(id: 2 author: \"George R.R. Martin\" title: \"A Game of Thrones\")\"},\n      Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      R\"([{\"id\":\"1\",\"author\":\"Neal Stephenson\",\"title\":\"Readme\"})\"\n      R\"(,{\"id\":\"2\",\"author\":\"George R.R. Martin\",\"title\":\"A Game of Thrones\"}])\");\n\n  // 2: Empty response (trailers only) from streaming backend.\n  // Response type is a valid JSON, so content type should be application/json.\n  // Regression test for github.com/envoyproxy/envoy#5011\n  testTranscoding<bookstore::ListBooksRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves/2/books\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 2\"}, {}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      \"[]\");\n\n  // 3: Empty response (trailers only) from streaming backend, with a gRPC error.\n  testTranscoding<bookstore::ListBooksRequest, bookstore::Book>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/shelves/37/books\"}, {\":authority\", \"host\"}},\n      \"\", {\"shelf: 37\"}, {}, Status(Code::NOT_FOUND, \"Shelf 37 not found\"),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}, {\"content-type\", \"application/json\"}},\n      \"[]\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, StreamingPost) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/bulk/shelves\"}, {\":authority\", \"host\"}},\n      R\"([\n        { \"theme\" : \"Classics\" },\n        { \"theme\" : \"Satire\" },\n        { \"theme\" : \"Russian\" },\n        { \"theme\" : \"Children\" },\n        { \"theme\" : \"Documentary\" },\n        { \"theme\" : \"Mystery\" },\n      ])\",\n      {R\"(shelf { theme: \"Classics\" })\", R\"(shelf { theme: \"Satire\" })\",\n       R\"(shelf { theme: \"Russian\" })\", R\"(shelf { theme: \"Children\" })\",\n       R\"(shelf { theme: \"Documentary\" })\", R\"(shelf { theme: \"Mystery\" })\"},\n      {R\"(id: 3 theme: \"Classics\")\", R\"(id: 4 theme: \"Satire\")\", R\"(id: 5 theme: \"Russian\")\",\n       R\"(id: 6 theme: \"Children\")\", R\"(id: 7 theme: \"Documentary\")\", R\"(id: 8 theme: \"Mystery\")\"},\n      Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"transfer-encoding\", \"chunked\"}},\n      R\"([{\"id\":\"3\",\"theme\":\"Classics\"})\"\n      R\"(,{\"id\":\"4\",\"theme\":\"Satire\"})\"\n      R\"(,{\"id\":\"5\",\"theme\":\"Russian\"})\"\n      R\"(,{\"id\":\"6\",\"theme\":\"Children\"})\"\n      R\"(,{\"id\":\"7\",\"theme\":\"Documentary\"})\"\n      R\"(,{\"id\":\"8\",\"theme\":\"Mystery\"}])\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, InvalidJson) {\n  HttpIntegrationTest::initialize();\n  // Usually the response would be\n  // \"Unexpected token.\\n\"\n  //    \"INVALID_JSON\\n\"\n  //    \"^\"\n  // If Envoy does a short read of the upstream connection, it may only read part of the\n  // string \"INVALID_JSON\". Envoy will note \"Unexpected token [whatever substring is read]\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/shelf\"}, {\":authority\", \"host\"}},\n      R\"(INVALID_JSON)\", {}, {}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"400\"}, {\"content-type\", \"text/plain\"}},\n      \"Unexpected token.\\nI\", false);\n\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/shelf\"}, {\":authority\", \"host\"}},\n      R\"({ \"theme\" : \"Children\")\", {}, {}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"400\"}, {\"content-type\", \"text/plain\"}},\n      \"Unexpected end of string. Expected , or } after key:value pair.\\n\"\n      \"\\n\"\n      \"^\");\n\n  // Usually the response would be\n  //    \"Expected : between key:value pair.\\n\"\n  //    \"{ \\\"theme\\\"  \\\"Children\\\" }\\n\"\n  //    \"           ^\");\n  // But as with INVALID_JSON Envoy may not read the full string from the upstream connection so may\n  // generate its error based on a partial upstream response.\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/shelf\"}, {\":authority\", \"host\"}},\n      R\"({ \"theme\"  \"Children\" })\", {}, {}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"400\"}, {\"content-type\", \"text/plain\"}},\n      \"Expected : between key:value pair.\\n\", false);\n\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/shelf\"}, {\":authority\", \"host\"}},\n      R\"({ \"theme\" : \"Children\" }EXTRA)\", {}, {}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"400\"}, {\"content-type\", \"text/plain\"}},\n      \"Parsing terminated before end of input.\\n\", false);\n}\n\nstd::string createDeepJson(int level, bool valid) {\n  std::string begin = R\"({\"k\":)\";\n  std::string deep_val = R\"(\"v\")\";\n  std::string end = R\"(})\";\n  std::string json;\n\n  for (int i = 0; i < level; ++i) {\n    absl::StrAppend(&json, begin);\n  }\n  if (valid) {\n    absl::StrAppend(&json, deep_val);\n  }\n  for (int i = 0; i < level; ++i) {\n    absl::StrAppend(&json, end);\n  }\n  return json;\n}\n\nstd::string jsonStrToPbStrucStr(std::string json) {\n  Envoy::ProtobufWkt::Struct message;\n  std::string structStr;\n  TestUtility::loadFromJson(json, message);\n  TextFormat::PrintToString(message, &structStr);\n  return structStr;\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, DeepStruct) {\n  // Lower the timeout for the 408 response.\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* virtual_host = hcm.mutable_route_config()->mutable_virtual_hosts(0);\n        virtual_host->mutable_routes(0)->mutable_route()->mutable_idle_timeout()->set_seconds(5);\n      });\n\n  HttpIntegrationTest::initialize();\n  // Due to the limit of protobuf util, we can only compare to level 32.\n  std::string deepJson = createDeepJson(32, true);\n  std::string deepProto = \"content {\" + jsonStrToPbStrucStr(deepJson) + \"}\";\n  testTranscoding<bookstore::EchoStructReqResp, bookstore::EchoStructReqResp>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/echoStruct\"}, {\":authority\", \"host\"}},\n      deepJson, {deepProto}, {deepProto}, Status(),\n      Http::TestResponseHeaderMapImpl{\n          {\":status\", \"200\"}, {\"content-type\", \"application/json\"}, {\"grpc-status\", \"0\"}},\n      R\"({\"content\":)\" + deepJson + R\"(})\");\n\n  // The valid deep struct is parsed successfully.\n  // Since we didn't set a response, it will time out.\n  // Response body is empty (not a valid JSON), so the error response is plaintext.\n  testTranscoding<bookstore::EchoStructReqResp, bookstore::EchoStructReqResp>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/echoStruct\"}, {\":authority\", \"host\"}},\n      createDeepJson(100, true), {}, {}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"408\"}, {\"content-type\", \"text/plain\"}}, \"\");\n\n  // The invalid deep struct is detected.\n  testTranscoding<bookstore::EchoStructReqResp, bookstore::EchoStructReqResp>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/echoStruct\"}, {\":authority\", \"host\"}},\n      createDeepJson(100, false), {}, {}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"400\"}, {\"content-type\", \"text/plain\"}},\n      \"Unexpected token.\\n\", false);\n}\n\nstd::string createLargeJson(int level) {\n  std::shared_ptr<ProtobufWkt::Value> cur = std::make_shared<ProtobufWkt::Value>();\n  for (int i = 0; i < level - 1; ++i) {\n    std::shared_ptr<ProtobufWkt::Value> next = std::make_shared<ProtobufWkt::Value>();\n    ProtobufWkt::Value val = ProtobufWkt::Value();\n    ProtobufWkt::Value left = ProtobufWkt::Value(*cur);\n    ProtobufWkt::Value right = ProtobufWkt::Value(*cur);\n    val.mutable_list_value()->add_values()->Swap(&left);\n    val.mutable_list_value()->add_values()->Swap(&right);\n    (*next->mutable_struct_value()->mutable_fields())[\"k\"] = val;\n    cur = next;\n  }\n  return MessageUtil::getJsonStringFromMessage(*cur, false, false);\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, LargeStruct) {\n  HttpIntegrationTest::initialize();\n  // Create a 40kB json payload.\n\n  std::string largeJson = createLargeJson(12);\n  std::string largeProto = \"content {\" + jsonStrToPbStrucStr(largeJson) + \"}\";\n  testTranscoding<bookstore::EchoStructReqResp, bookstore::EchoStructReqResp>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"}, {\":path\", \"/echoStruct\"}, {\":authority\", \"host\"}},\n      largeJson, {largeProto}, {largeProto}, Status(),\n      Http::TestResponseHeaderMapImpl{\n          {\":status\", \"200\"}, {\"content-type\", \"application/json\"}, {\"grpc-status\", \"0\"}},\n      R\"({\"content\":)\" + largeJson + R\"(})\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnknownFieldInRequest) {\n  // Request JSON has many fields that are unknown to the request proto message.\n  // They are discarded.\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/shelf\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/json\"}},\n      R\"({\"theme\": \"Children\", \"unknown1\": \"a\", \"unknown2\" : {\"a\" : \"b\"}, \"unknown3\" : [\"a\", \"b\", \"c\"]})\",\n      {R\"(shelf { theme: \"Children\" })\"}, {R\"(id: 20 theme: \"Children\" )\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"content-length\", \"30\"},\n                                      {\"grpc-status\", \"0\"}},\n      R\"({\"id\":\"20\",\"theme\":\"Children\"})\");\n}\n\n// Test proto to json transcoding with an unknown field in the response message.\n// gRPC server may use a updated proto with a new field, but Envoy transcoding\n// filter could use an old proto descriptor without that field. That fields is unknown\n// to the Envoy transcoder filter. Expected result: the unknown field is discarded,\n// other fields should be transcoded properly.\nTEST_P(GrpcJsonTranscoderIntegrationTest, UnknownResponse) {\n  // The mocked upstream proto response message is bookstore::BigBook which has\n  // all 3 fields. But the proto descriptor used by the Envoy transcoder filter is using\n  // bookstore::OldBigBook which is missing the `field1` field.\n  HttpIntegrationTest::initialize();\n  // The bug is ZeroCopyInputStreamImpl::Skip() which is not implemented.\n  // In order to trigger a call to that function, the response message has to be big enough\n  // so it is stored in multiple slices.\n  const std::string field1_value = std::string(32 * 1024, 'O');\n  const std::string response_body =\n      fmt::format(R\"(field1: \"{}\" field2: \"field2_value\" field3: \"field3_value\" )\", field1_value);\n  testTranscoding<Empty, bookstore::BigBook>(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/bigbook\"}, {\":authority\", \"host\"}},\n      \"\", {\"\"}, {response_body}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"content-type\", \"application/json\"},\n                                      {\"content-length\", \"49\"},\n                                      {\"grpc-status\", \"0\"}},\n      R\"({\"field2\":\"field2_value\",\"field3\":\"field3_value\"})\");\n}\n\nTEST_P(GrpcJsonTranscoderIntegrationTest, UTF8) {\n  HttpIntegrationTest::initialize();\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/shelf\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/json\"}},\n      \"{\\\"id\\\":\\\"20\\\",\\\"theme\\\":\\\"\\xC2\\xAE\\\"}\", {\"shelf {id : 20 theme: \\\"®\\\" }\"},\n      {\"id: 20 theme: \\\"\\xC2\\xAE\\\"\"}, Status(),\n      Http::TestResponseHeaderMapImpl{\n          {\":status\", \"200\"}, {\"content-type\", \"application/json\"}, {\"grpc-status\", \"0\"}},\n      R\"({\"id\":\"20\",\"theme\":\"®\"})\");\n\n  testTranscoding<bookstore::CreateShelfRequest, bookstore::Shelf>(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/shelf\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/json\"}},\n      \"{\\\"id\\\":\\\"20\\\",\\\"theme\\\":\\\"\\xC3\\x28\\\"}\", {}, {\"\"}, Status(),\n      Http::TestResponseHeaderMapImpl{{\":status\", \"400\"}}, R\"(Encountered non UTF-8 code points)\",\n      false);\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n\n#include \"extensions/filters/http/grpc_json_transcoder/http_body_utils.h\"\n\n#include \"test/proto/bookstore.pb.h\"\n\n#include \"google/api/httpbody.pb.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\nnamespace {\n\nclass HttpBodyUtilsTest : public testing::Test {\npublic:\n  HttpBodyUtilsTest() = default;\n\n  void setBodyFieldPath(const std::vector<int>& body_field_path) {\n    for (int field_number : body_field_path) {\n      Protobuf::Field field;\n      field.set_number(field_number);\n      raw_body_field_path_.emplace_back(std::move(field));\n    }\n    for (auto& field : raw_body_field_path_) {\n      body_field_path_.push_back(&field);\n    }\n  }\n\n  template <typename Message>\n  void basicTest(const std::string& content, const std::string& content_type,\n                 const std::vector<int>& body_field_path,\n                 std::function<google::api::HttpBody(Message message)> get_http_body) {\n    setBodyFieldPath(body_field_path);\n\n    // Parse using concrete message type.\n    {\n      Buffer::InstancePtr message_buffer = std::make_unique<Buffer::OwnedImpl>();\n      HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type,\n                                            content.length());\n      message_buffer->add(content);\n\n      Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer));\n\n      Message message;\n      message.ParseFromZeroCopyStream(&stream);\n\n      google::api::HttpBody http_body = get_http_body(std::move(message));\n      EXPECT_EQ(http_body.content_type(), content_type);\n      EXPECT_EQ(http_body.data(), content);\n    }\n\n    // Parse message dynamically by field path.\n    {\n      Buffer::InstancePtr message_buffer = std::make_unique<Buffer::OwnedImpl>();\n      HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type,\n                                            content.length());\n      message_buffer->add(content);\n\n      google::api::HttpBody http_body;\n      Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer));\n      EXPECT_TRUE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body));\n      EXPECT_EQ(http_body.content_type(), content_type);\n      EXPECT_EQ(http_body.data(), content);\n    }\n  }\n\n  void testInvalidMessage(const std::string& content, const std::vector<int>& body_field_path) {\n    setBodyFieldPath(body_field_path);\n    Buffer::InstancePtr message_buffer = std::make_unique<Buffer::OwnedImpl>();\n    message_buffer->add(content);\n    google::api::HttpBody http_body;\n    Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer));\n    EXPECT_FALSE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body));\n  }\n\n  std::vector<Protobuf::Field> raw_body_field_path_;\n  std::vector<const Protobuf::Field*> body_field_path_;\n};\n\nTEST_F(HttpBodyUtilsTest, EmptyFieldsList) {\n  basicTest<google::api::HttpBody>(\"abcd\", \"text/plain\", {},\n                                   [](google::api::HttpBody http_body) { return http_body; });\n}\n\nTEST_F(HttpBodyUtilsTest, LargeMessage) {\n  // Check some content with more than single byte in varint encoding of the size.\n  std::string content;\n  content.assign(20000, 'a');\n  basicTest<google::api::HttpBody>(content, \"text/binary\", {},\n                                   [](google::api::HttpBody http_body) { return http_body; });\n}\n\nTEST_F(HttpBodyUtilsTest, LargeContentType) {\n  // Check some content type with more than single byte in varint encoding of the size.\n  std::string content_type;\n  content_type.assign(20000, 'a');\n  basicTest<google::api::HttpBody>(\"abcd\", content_type, {},\n                                   [](google::api::HttpBody http_body) { return http_body; });\n}\n\nTEST_F(HttpBodyUtilsTest, NestedFieldsList) {\n  basicTest<bookstore::DeepNestedBody>(\n      \"abcd\", \"text/nested\", {1, 1000000, 100000000, 500000000},\n      [](bookstore::DeepNestedBody message) { return message.nested().nested().nested().body(); });\n}\n\nTEST_F(HttpBodyUtilsTest, SkipUnknownFields) {\n  bookstore::DeepNestedBody message;\n  auto* body = message.mutable_nested()->mutable_nested()->mutable_nested()->mutable_body();\n  body->set_content_type(\"text/nested\");\n  body->set_data(\"abcd\");\n  message.mutable_extra()->set_field(\"test\");\n  message.mutable_nested()->mutable_extra()->set_field(123);\n\n  Buffer::InstancePtr message_buffer = std::make_unique<Buffer::OwnedImpl>();\n  std::string serialized_message;\n  EXPECT_TRUE(message.SerializeToString(&serialized_message));\n  message_buffer->add(serialized_message);\n  setBodyFieldPath({1, 1000000, 100000000, 500000000});\n\n  google::api::HttpBody http_body;\n  Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer));\n  EXPECT_TRUE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body));\n  EXPECT_EQ(http_body.content_type(), \"text/nested\");\n  EXPECT_EQ(http_body.data(), \"abcd\");\n}\n\nTEST_F(HttpBodyUtilsTest, FailInvalidLength) {\n  std::string message;\n  // First field tag.\n  message += static_cast<char>((1 << 3) | 2);\n  // Invalid length.\n  message += '\\x02';\n  // Second field tag.\n  message += static_cast<char>((2 << 3) | 2);\n  // Invalid length.\n  message += '\\x80';\n  testInvalidMessage(message, {1, 2});\n}\n\nTEST_F(HttpBodyUtilsTest, FailSkipField) {\n  std::string message;\n  // Field tag.\n  message += static_cast<char>((2 << 3) | 2);\n  // Invalid length.\n  message += '\\x80';\n  testInvalidMessage(message, {1});\n}\n\nTEST_F(HttpBodyUtilsTest, FailShortMessage) {\n  std::string message;\n  // Field tag.\n  message += static_cast<char>((1 << 3) | 2);\n  // Length less then remaining message size.\n  message += '\\x02';\n  // Invalid tag.\n  message += '\\x00';\n  testInvalidMessage(message, {1, 2});\n}\n\n} // namespace\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc",
    "content": "#include <fstream>\n#include <functional>\n#include <memory>\n\n#include \"envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/proto/bookstore.pb.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\n\nusing Envoy::Protobuf::FileDescriptorProto;\nusing Envoy::Protobuf::FileDescriptorSet;\nusing Envoy::Protobuf::util::MessageDifferencer;\nusing Envoy::ProtobufUtil::error::Code;\nusing google::api::HttpRule;\nusing google::grpc::transcoding::Transcoder;\nusing TranscoderPtr = std::unique_ptr<Transcoder>;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\nnamespace {\n\nclass GrpcJsonTranscoderFilterTestBase {\nprotected:\n  GrpcJsonTranscoderFilterTestBase() : api_(Api::createApiForTest()) {}\n  ~GrpcJsonTranscoderFilterTestBase() {\n    TestEnvironment::removePath(TestEnvironment::temporaryPath(\"envoy_test/proto.descriptor\"));\n  }\n\n  Api::ApiPtr api_;\n};\n\nclass GrpcJsonTranscoderConfigTest : public testing::Test, public GrpcJsonTranscoderFilterTestBase {\nprotected:\n  const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder\n  getProtoConfig(const std::string& descriptor_path, const std::string& service_name,\n                 bool match_incoming_request_route = false,\n                 const std::vector<std::string>& ignored_query_parameters = {}) {\n    const std::string json_string = \"{\\\"proto_descriptor\\\": \\\"\" + descriptor_path +\n                                    \"\\\",\\\"services\\\": [\\\"\" + service_name + \"\\\"]}\";\n    envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config;\n    TestUtility::loadFromJson(json_string, proto_config);\n    proto_config.set_match_incoming_request_route(match_incoming_request_route);\n    for (const auto& query_param : ignored_query_parameters) {\n      proto_config.add_ignored_query_parameters(query_param);\n    }\n\n    return proto_config;\n  }\n\n  std::string makeProtoDescriptor(std::function<void(FileDescriptorSet&)> process) {\n    FileDescriptorSet descriptor_set;\n    descriptor_set.ParseFromString(api_->fileSystem().fileReadToEnd(\n        TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\")));\n\n    process(descriptor_set);\n\n    TestEnvironment::createPath(TestEnvironment::temporaryPath(\"envoy_test\"));\n    std::string path = TestEnvironment::temporaryPath(\"envoy_test/proto.descriptor\");\n    std::ofstream file(path, std::ios::binary);\n    descriptor_set.SerializeToOstream(&file);\n\n    return path;\n  }\n\n  void setGetBookHttpRule(FileDescriptorSet& descriptor_set, const HttpRule& http_rule) {\n    for (auto& file : *descriptor_set.mutable_file()) {\n      for (auto& service : *file.mutable_service()) {\n        for (auto& method : *service.mutable_method()) {\n          if (method.name() == \"GetBook\") {\n            method.mutable_options()->MutableExtension(google::api::http)->MergeFrom(http_rule);\n            return;\n          }\n        }\n      }\n    }\n  }\n\n  void stripImports(FileDescriptorSet& descriptor_set, const std::string& file_name) {\n    FileDescriptorProto file_descriptor;\n    // filter down descriptor_set to only contain one proto specified as file_name but none of its\n    // dependencies\n    auto file_itr =\n        std::find_if(descriptor_set.file().begin(), descriptor_set.file().end(),\n                     [&file_name](const FileDescriptorProto& file) {\n                       // return whether file.name() ends with file_name\n                       return file.name().length() >= file_name.length() &&\n                              0 == file.name().compare(file.name().length() - file_name.length(),\n                                                       std::string::npos, file_name);\n                     });\n    RELEASE_ASSERT(file_itr != descriptor_set.file().end(), \"\");\n    file_descriptor = *file_itr;\n\n    descriptor_set.clear_file();\n    descriptor_set.add_file()->Swap(&file_descriptor);\n  }\n};\n\nTEST_F(GrpcJsonTranscoderConfigTest, ParseConfig) {\n  EXPECT_NO_THROW(JsonTranscoderConfig config(\n      getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                     \"bookstore.Bookstore\"),\n      *api_));\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, ParseConfigSkipRecalculating) {\n  EXPECT_NO_THROW(JsonTranscoderConfig config(\n      getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                     \"bookstore.Bookstore\", true),\n      *api_));\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, ParseBinaryConfig) {\n  envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config;\n  proto_config.set_proto_descriptor_bin(api_->fileSystem().fileReadToEnd(\n      TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\")));\n  proto_config.add_services(\"bookstore.Bookstore\");\n  EXPECT_NO_THROW(JsonTranscoderConfig config(proto_config, *api_));\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, UnknownService) {\n  EXPECT_THROW_WITH_MESSAGE(\n      JsonTranscoderConfig config(\n          getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                         \"grpc.service.UnknownService\"),\n          *api_),\n      EnvoyException,\n      \"transcoding_filter: Could not find 'grpc.service.UnknownService' in the proto descriptor\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, IncompleteProto) {\n  EXPECT_THROW_WITH_MESSAGE(\n      JsonTranscoderConfig config(getProtoConfig(makeProtoDescriptor([&](FileDescriptorSet& pb) {\n                                                   stripImports(pb, \"test/proto/bookstore.proto\");\n                                                 }),\n                                                 \"bookstore.Bookstore\"),\n                                  *api_),\n      EnvoyException, \"transcoding_filter: Unable to build proto descriptor pool\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, NonProto) {\n  EXPECT_THROW_WITH_MESSAGE(\n      JsonTranscoderConfig config(\n          getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.proto\"),\n                         \"grpc.service.UnknownService\"),\n          *api_),\n      EnvoyException, \"transcoding_filter: Unable to parse proto descriptor\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, JsonResponseBody) {\n  EXPECT_THROW_WITH_REGEX(\n      JsonTranscoderConfig config(\n          getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                         \"bookstore.ServiceWithResponseBody\"),\n          *api_),\n      EnvoyException, \"Setting \\\"response_body\\\" is not supported yet for non-HttpBody fields\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, InvalidRequestBodyPath) {\n  EXPECT_THROW_WITH_REGEX(\n      JsonTranscoderConfig config(\n          getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                         \"bookstore.ServiceWithInvalidRequestBodyPath\"),\n          *api_),\n      EnvoyException, \"Could not find field\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, InvalidResponseBodyPath) {\n  EXPECT_THROW_WITH_REGEX(\n      JsonTranscoderConfig config(\n          getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                         \"bookstore.ServiceWithInvalidResponseBodyPath\"),\n          *api_),\n      EnvoyException, \"Could not find field\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, NonBinaryProto) {\n  envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config;\n  proto_config.set_proto_descriptor_bin(\"This is invalid proto\");\n  proto_config.add_services(\"bookstore.Bookstore\");\n  EXPECT_THROW_WITH_MESSAGE(JsonTranscoderConfig config(proto_config, *api_), EnvoyException,\n                            \"transcoding_filter: Unable to parse proto descriptor\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, InvalidHttpTemplate) {\n  HttpRule http_rule;\n  http_rule.set_get(\"/book/{\");\n  EXPECT_THROW_WITH_MESSAGE(\n      JsonTranscoderConfig config(getProtoConfig(makeProtoDescriptor([&](FileDescriptorSet& pb) {\n                                                   setGetBookHttpRule(pb, http_rule);\n                                                 }),\n                                                 \"bookstore.Bookstore\"),\n                                  *api_),\n      EnvoyException,\n      \"transcoding_filter: Cannot register 'bookstore.Bookstore.GetBook' to path matcher\");\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, CreateTranscoder) {\n  JsonTranscoderConfig config(\n      getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                     \"bookstore.Bookstore\"),\n      *api_);\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/shelves\"}};\n\n  TranscoderInputStreamImpl request_in, response_in;\n  TranscoderPtr transcoder;\n  MethodInfoSharedPtr method_info;\n  const auto status =\n      config.createTranscoder(headers, request_in, response_in, transcoder, method_info);\n\n  EXPECT_TRUE(status.ok());\n  EXPECT_TRUE(transcoder);\n  EXPECT_EQ(\"bookstore.Bookstore.ListShelves\", method_info->descriptor_->full_name());\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, CreateTranscoderAutoMap) {\n  auto proto_config = getProtoConfig(\n      TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"), \"bookstore.Bookstore\");\n  proto_config.set_auto_mapping(true);\n\n  JsonTranscoderConfig config(proto_config, *api_);\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"POST\"},\n                                         {\":path\", \"/bookstore.Bookstore/DeleteShelf\"}};\n\n  TranscoderInputStreamImpl request_in, response_in;\n  TranscoderPtr transcoder;\n  MethodInfoSharedPtr method_info;\n  const auto status =\n      config.createTranscoder(headers, request_in, response_in, transcoder, method_info);\n\n  EXPECT_TRUE(status.ok());\n  EXPECT_TRUE(transcoder);\n  EXPECT_EQ(\"bookstore.Bookstore.DeleteShelf\", method_info->descriptor_->full_name());\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, InvalidQueryParameter) {\n  JsonTranscoderConfig config(\n      getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                     \"bookstore.Bookstore\"),\n      *api_);\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/shelves?foo=bar\"}};\n\n  TranscoderInputStreamImpl request_in, response_in;\n  TranscoderPtr transcoder;\n  MethodInfoSharedPtr method_info;\n  const auto status =\n      config.createTranscoder(headers, request_in, response_in, transcoder, method_info);\n\n  EXPECT_EQ(Code::INVALID_ARGUMENT, status.error_code());\n  EXPECT_EQ(\"Could not find field \\\"foo\\\" in the type \\\"google.protobuf.Empty\\\".\",\n            status.error_message());\n  EXPECT_FALSE(transcoder);\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, UnknownQueryParameterIsIgnored) {\n  auto proto_config = getProtoConfig(\n      TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"), \"bookstore.Bookstore\");\n  proto_config.set_ignore_unknown_query_parameters(true);\n  JsonTranscoderConfig config(proto_config, *api_);\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/shelves?foo=bar\"}};\n\n  TranscoderInputStreamImpl request_in, response_in;\n  TranscoderPtr transcoder;\n  MethodInfoSharedPtr method_info;\n  const auto status =\n      config.createTranscoder(headers, request_in, response_in, transcoder, method_info);\n\n  EXPECT_TRUE(status.ok());\n  EXPECT_TRUE(transcoder);\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, IgnoredQueryParameter) {\n  std::vector<std::string> ignored_query_parameters = {\"key\"};\n  JsonTranscoderConfig config(\n      getProtoConfig(TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\"),\n                     \"bookstore.Bookstore\", false, ignored_query_parameters),\n      *api_);\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/shelves?key=API_KEY\"}};\n\n  TranscoderInputStreamImpl request_in, response_in;\n  TranscoderPtr transcoder;\n  MethodInfoSharedPtr method_info;\n  const auto status =\n      config.createTranscoder(headers, request_in, response_in, transcoder, method_info);\n\n  EXPECT_TRUE(status.ok());\n  EXPECT_TRUE(transcoder);\n  EXPECT_EQ(\"bookstore.Bookstore.ListShelves\", method_info->descriptor_->full_name());\n}\n\nTEST_F(GrpcJsonTranscoderConfigTest, InvalidVariableBinding) {\n  HttpRule http_rule;\n  http_rule.set_get(\"/book/{b}\");\n  JsonTranscoderConfig config(getProtoConfig(makeProtoDescriptor([&](FileDescriptorSet& pb) {\n                                               setGetBookHttpRule(pb, http_rule);\n                                             }),\n                                             \"bookstore.Bookstore\"),\n                              *api_);\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"}, {\":path\", \"/book/1\"}};\n\n  TranscoderInputStreamImpl request_in, response_in;\n  TranscoderPtr transcoder;\n  MethodInfoSharedPtr method_info;\n  const auto status =\n      config.createTranscoder(headers, request_in, response_in, transcoder, method_info);\n\n  EXPECT_EQ(Code::INVALID_ARGUMENT, status.error_code());\n  EXPECT_EQ(\"Could not find field \\\"b\\\" in the type \\\"bookstore.GetBookRequest\\\".\",\n            status.error_message());\n  EXPECT_FALSE(transcoder);\n}\n\nclass GrpcJsonTranscoderFilterTest : public testing::Test, public GrpcJsonTranscoderFilterTestBase {\nprotected:\n  GrpcJsonTranscoderFilterTest(\n      envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config =\n          bookstoreProtoConfig())\n      : config_(proto_config, *api_), filter_(config_) {\n    filter_.setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_.setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  static const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder\n  bookstoreProtoConfig() {\n    const std::string json_string = \"{\\\"proto_descriptor\\\": \\\"\" + bookstoreDescriptorPath() +\n                                    \"\\\",\\\"services\\\": [\\\"bookstore.Bookstore\\\"]}\";\n    envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config;\n    TestUtility::loadFromJson(json_string, proto_config);\n    return proto_config;\n  }\n\n  static const std::string bookstoreDescriptorPath() {\n    return TestEnvironment::runfilesPath(\"test/proto/bookstore.descriptor\");\n  }\n\n  // TODO(lizan): Add a mock of JsonTranscoderConfig and test more error cases.\n  JsonTranscoderConfig config_;\n  JsonTranscoderFilter filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n};\n\nTEST_F(GrpcJsonTranscoderFilterTest, NoTranscoding) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":method\", \"POST\"},\n                                                 {\":path\", \"/grpc.service/UnknownGrpcMethod\"}};\n\n  Http::TestRequestHeaderMapImpl expected_request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":method\", \"POST\"},\n      {\":path\", \"/grpc.service/UnknownGrpcMethod\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache()).Times(0);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(expected_request_headers, request_headers);\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n\n  Buffer::OwnedImpl request_data{\"{}\"};\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, false));\n  EXPECT_EQ(2, request_data.length());\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  Http::TestResponseHeaderMapImpl expected_response_headers{{\"content-type\", \"application/grpc\"},\n                                                            {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(expected_response_headers, response_headers);\n\n  Buffer::OwnedImpl response_data{\"{}\"};\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(response_data, false));\n  EXPECT_EQ(2, response_data.length());\n\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"0\"}};\n  Http::TestResponseTrailerMapImpl expected_response_trailers{{\"grpc-status\", \"0\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(expected_response_trailers, response_trailers);\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPost) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/json\"}, {\":method\", \"POST\"}, {\":path\", \"/shelf\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/shelf\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"POST\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelf\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Buffer::OwnedImpl request_data{\"{\\\"theme\\\": \\\"Children\\\"}\"};\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, true));\n\n  Grpc::Decoder decoder;\n  std::vector<Grpc::Frame> frames;\n  decoder.decode(request_data, frames);\n\n  EXPECT_EQ(1, frames.size());\n\n  bookstore::CreateShelfRequest expected_request;\n  expected_request.mutable_shelf()->set_theme(\"Children\");\n\n  bookstore::CreateShelfRequest request;\n  request.ParseFromString(frames[0].data_->toString());\n\n  EXPECT_EQ(expected_request.ByteSize(), frames[0].length_);\n  EXPECT_TRUE(MessageDifferencer::Equals(expected_request, request));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"000\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_.encode100ContinueHeaders(continue_headers));\n\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.encodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n\n  bookstore::Shelf response;\n  response.set_id(20);\n  response.set_theme(\"Children\");\n\n  auto response_data = Grpc::Common::serializeToGrpcFrame(response);\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_.encodeData(*response_data, false));\n\n  std::string response_json = response_data->toString();\n\n  EXPECT_EQ(\"{\\\"id\\\":\\\"20\\\",\\\"theme\\\":\\\"Children\\\"}\", response_json);\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPostWithPackageServiceMethodPath) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/json\"},\n      {\":method\", \"POST\"},\n      {\":path\", \"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\",\n            request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"POST\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\",\n            request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Buffer::OwnedImpl request_data{\"{\\\"theme\\\": \\\"Children\\\"}\"};\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, true));\n\n  Grpc::Decoder decoder;\n  std::vector<Grpc::Frame> frames;\n  decoder.decode(request_data, frames);\n\n  EXPECT_EQ(1, frames.size());\n\n  bookstore::CreateShelfRequest expected_request;\n  expected_request.mutable_shelf()->set_theme(\"Children\");\n\n  bookstore::CreateShelfRequest request;\n  request.ParseFromString(frames[0].data_->toString());\n\n  EXPECT_EQ(expected_request.ByteSize(), frames[0].length_);\n  EXPECT_TRUE(MessageDifferencer::Equals(expected_request, request));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"000\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_.encode100ContinueHeaders(continue_headers));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n\n  bookstore::Shelf response;\n  response.set_id(20);\n  response.set_theme(\"Children\");\n\n  auto response_data = Grpc::Common::serializeToGrpcFrame(response);\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_.encodeData(*response_data, false));\n\n  std::string response_json = response_data->toString();\n\n  EXPECT_EQ(\"{\\\"id\\\":\\\"20\\\",\\\"theme\\\":\\\"Children\\\"}\", response_json);\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, ForwardUnaryPostGrpc) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":method\", \"POST\"},\n      {\":path\", \"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\",\n            request_headers.get_(\":path\"));\n\n  bookstore::CreateShelfRequest request;\n  request.mutable_shelf()->set_theme(\"Children\");\n\n  Buffer::InstancePtr request_data = Grpc::Common::serializeToGrpcFrame(request);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(*request_data, true));\n\n  Grpc::Decoder decoder;\n  std::vector<Grpc::Frame> frames;\n  decoder.decode(*request_data, frames);\n\n  EXPECT_EQ(1, frames.size());\n\n  bookstore::CreateShelfRequest expected_request;\n  expected_request.mutable_shelf()->set_theme(\"Children\");\n\n  bookstore::CreateShelfRequest forwarded_request;\n  forwarded_request.ParseFromString(frames[0].data_->toString());\n\n  EXPECT_EQ(expected_request.ByteSize(), frames[0].length_);\n  EXPECT_TRUE(MessageDifferencer::Equals(expected_request, forwarded_request));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"000\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_.encode100ContinueHeaders(continue_headers));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/grpc\", response_headers.get_(\"content-type\"));\n\n  bookstore::Shelf expected_response;\n  expected_response.set_id(20);\n  expected_response.set_theme(\"Children\");\n\n  bookstore::Shelf response;\n  response.set_id(20);\n  response.set_theme(\"Children\");\n\n  Buffer::InstancePtr response_data = Grpc::Common::serializeToGrpcFrame(response);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(*response_data, true));\n\n  frames.clear();\n  decoder.decode(*response_data, frames);\n\n  EXPECT_EQ(1, frames.size());\n\n  bookstore::Shelf forwarded_response;\n  forwarded_response.ParseFromString(frames[0].data_->toString());\n\n  EXPECT_EQ(expected_response.ByteSize(), frames[0].length_);\n  EXPECT_TRUE(MessageDifferencer::Equals(expected_response, forwarded_response));\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n}\n\nclass GrpcJsonTranscoderFilterSkipRecalculatingTest : public GrpcJsonTranscoderFilterTest {\npublic:\n  GrpcJsonTranscoderFilterSkipRecalculatingTest()\n      : GrpcJsonTranscoderFilterTest(makeProtoConfig()) {}\n\nprivate:\n  const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder\n  makeProtoConfig() {\n    auto proto_config = bookstoreProtoConfig();\n    proto_config.set_match_incoming_request_route(true);\n    return proto_config;\n  }\n};\n\nTEST_F(GrpcJsonTranscoderFilterSkipRecalculatingTest, TranscodingUnaryPostSkipRecalculate) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/json\"}, {\":method\", \"POST\"}, {\":path\", \"/shelf\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache()).Times(0);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/shelf\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"POST\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelf\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Buffer::OwnedImpl request_data{\"{\\\"theme\\\": \\\"Children\\\"}\"};\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, true));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryError) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/json\"}, {\":method\", \"POST\"}, {\":path\", \"/shelf\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelf\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Buffer::OwnedImpl request_data{\"{\\\"theme\\\": \\\"Children\\\"\"};\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false))\n      .WillOnce(Invoke([](Http::ResponseHeaderMap& headers, bool end_stream) {\n        EXPECT_EQ(\"400\", headers.getStatusValue());\n        EXPECT_FALSE(end_stream);\n      }));\n  EXPECT_CALL(decoder_callbacks_, encodeData(_, true));\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(request_data, true));\n  EXPECT_EQ(0, request_data.length());\n  EXPECT_EQ(decoder_callbacks_.details(), \"grpc_json_transcode_failure{INVALID_ARGUMENT}\");\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryTimeout) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/json\"}, {\":method\", \"POST\"}, {\":path\", \"/shelf\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelf\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Buffer::OwnedImpl request_data{\"{\\\"theme\\\": \\\"Children\\\"}\"};\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"504\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(request_data, true));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryNotGrpcResponse) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/json\"}, {\":method\", \"POST\"}, {\":path\", \"/shelf\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/CreateShelf\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Buffer::OwnedImpl request_data{\"{\\\"theme\\\": \\\"Children\\\"}\"};\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"content-length\", \"24\"}, {\"content-type\", \"text/plain\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(request_data, true));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutput) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/index\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/index\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"GET\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/GetIndex\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n\n  google::api::HttpBody response;\n  response.set_content_type(\"text/html\");\n  response.set_data(\"<h1>Hello, world!</h1>\");\n\n  auto response_data = Grpc::Common::serializeToGrpcFrame(response);\n\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_.encodeData(*response_data, false));\n\n  EXPECT_EQ(response.content_type(), response_headers.get_(\"content-type\"));\n  EXPECT_EQ(response.data(), response_data->toString());\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithInvalidHttpBodyAsOutput) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/echoResponseBodyPath\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/echoResponseBodyPath\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"GET\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/EchoResponseBodyPath\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n\n  google::api::HttpBody response;\n  response.set_content_type(\"text/html\");\n  response.set_data(\"<h1>Hello, world!</h1>\");\n\n  Buffer::OwnedImpl response_data;\n  // Some invalid message.\n  response_data.add(\"\\x10\\x80\");\n  Grpc::Common::prependGrpcFrameHeader(response_data);\n\n  EXPECT_CALL(encoder_callbacks_, resetStream());\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_.encodeData(response_data, false));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutputAndSplitTwoEncodeData) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/index\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/index\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"GET\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/GetIndex\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n\n  google::api::HttpBody response;\n  response.set_content_type(\"text/html\");\n  response.set_data(\"<h1>Hello, world!</h1>\");\n\n  auto response_data = Grpc::Common::serializeToGrpcFrame(response);\n\n  // Firstly, the response data buffer is split into two parts.\n  Buffer::OwnedImpl response_data_first_part;\n  response_data_first_part.move(*response_data, response_data->length() / 2);\n\n  // Secondly, we send the first part of response data to the data encoding step.\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_.encodeData(response_data_first_part, false));\n\n  // Finally, since half of the response data buffer is moved already, here we can send the rest\n  // of it to the next data encoding step.\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_.encodeData(*response_data, false));\n\n  EXPECT_EQ(response.content_type(), response_headers.get_(\"content-type\"));\n  EXPECT_EQ(response.data(), response_data->toString());\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPostWithHttpBody) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"}, {\":path\", \"/postBody?arg=hi\"}, {\"content-type\", \"text/plain\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/postBody?arg=hi\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"POST\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/PostBody\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Grpc::Decoder decoder;\n  std::vector<Grpc::Frame> frames;\n\n  EXPECT_CALL(decoder_callbacks_, addDecodedData(_, true))\n      .Times(testing::AtLeast(1))\n      .WillRepeatedly(testing::Invoke([&decoder, &frames](Buffer::Instance& data, bool end_stream) {\n        EXPECT_TRUE(end_stream);\n        decoder.decode(data, frames);\n      }));\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.decodeData(buffer, false));\n  EXPECT_EQ(buffer.length(), 0);\n  EXPECT_EQ(frames.size(), 0);\n  buffer.add(\" \");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.decodeData(buffer, false));\n  EXPECT_EQ(buffer.length(), 0);\n  EXPECT_EQ(frames.size(), 0);\n  buffer.add(\"world!\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(buffer, true));\n  EXPECT_EQ(buffer.length(), 0);\n  ASSERT_EQ(frames.size(), 1);\n\n  bookstore::EchoBodyRequest expected_request;\n  expected_request.set_arg(\"hi\");\n  expected_request.mutable_nested()->mutable_content()->set_content_type(\"text/plain\");\n  expected_request.mutable_nested()->mutable_content()->set_data(\"hello world!\");\n\n  bookstore::EchoBodyRequest request;\n  request.ParseFromString(frames[0].data_->toString());\n\n  EXPECT_THAT(request, ProtoEq(expected_request));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamPostWithHttpBody) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"}, {\":path\", \"/streamBody?arg=hi\"}, {\"content-type\", \"text/plain\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/streamBody?arg=hi\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"POST\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/StreamBody\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Grpc::Decoder decoder;\n  std::vector<Grpc::Frame> frames;\n\n  EXPECT_CALL(decoder_callbacks_, addDecodedData(_, true))\n      .Times(testing::AtLeast(2))\n      .WillRepeatedly(testing::Invoke([&decoder, &frames](Buffer::Instance& data, bool end_stream) {\n        EXPECT_TRUE(end_stream);\n        decoder.decode(data, frames);\n      }));\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(buffer, false));\n  EXPECT_EQ(buffer.length(), 0);\n  EXPECT_EQ(frames.size(), 1);\n  buffer.add(\" \");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(buffer, false));\n  EXPECT_EQ(buffer.length(), 0);\n  EXPECT_EQ(frames.size(), 2);\n  buffer.add(\"world!\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(buffer, true));\n  EXPECT_EQ(buffer.length(), 0);\n  ASSERT_EQ(frames.size(), 3);\n\n  bookstore::EchoBodyRequest expected_request;\n  bookstore::EchoBodyRequest request;\n\n  expected_request.set_arg(\"hi\");\n  expected_request.mutable_nested()->mutable_content()->set_content_type(\"text/plain\");\n  expected_request.mutable_nested()->mutable_content()->set_data(\"hello\");\n  request.ParseFromString(frames[0].data_->toString());\n  EXPECT_THAT(request, ProtoEq(expected_request));\n\n  expected_request.Clear();\n  expected_request.mutable_nested()->mutable_content()->set_data(\" \");\n  request.ParseFromString(frames[1].data_->toString());\n  EXPECT_THAT(request, ProtoEq(expected_request));\n\n  expected_request.Clear();\n  expected_request.mutable_nested()->mutable_content()->set_data(\"world!\");\n  request.ParseFromString(frames[2].data_->toString());\n  EXPECT_THAT(request, ProtoEq(expected_request));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithHttpBodyAsOutput) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/indexStream\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/indexStream\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"GET\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/GetIndexStream\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n\n  // \"Send\" 1st gRPC message\n  google::api::HttpBody response;\n  response.set_content_type(\"text/html\");\n  response.set_data(\"<h1>Message 1!</h1>\");\n  auto response_data = Grpc::Common::serializeToGrpcFrame(response);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(*response_data, false));\n  // Content type set to HttpBody.content_type / no content-length\n  EXPECT_EQ(\"text/html\", response_headers.get_(\"content-type\"));\n  EXPECT_EQ(nullptr, response_headers.ContentLength());\n  EXPECT_EQ(response.data(), response_data->toString());\n\n  // \"Send\" 2nd message with different context type\n  response.set_content_type(\"text/plain\");\n  response.set_data(\"Message2\");\n  response_data = Grpc::Common::serializeToGrpcFrame(response);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(*response_data, false));\n  // Content type unchanged\n  EXPECT_EQ(\"text/html\", response_headers.get_(\"content-type\"));\n  EXPECT_EQ(nullptr, response_headers.ContentLength());\n  EXPECT_EQ(response.data(), response_data->toString());\n\n  // \"Send\" 3rd multiframe message (\"msgmsgmsg\")\n  Buffer::OwnedImpl multiframe_data;\n  response.set_data(\"msg\");\n  for (size_t i = 0; i < 3; i++) {\n    auto frame = Grpc::Common::serializeToGrpcFrame(response);\n    multiframe_data.add(*frame);\n  }\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(multiframe_data, false));\n  // 3 grpc frames joined\n  EXPECT_EQ(\"msgmsgmsg\", multiframe_data.toString());\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n}\n\nTEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithFragmentedHttpBody) {\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"}, {\":path\", \"/indexStream\"}};\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"application/grpc\", request_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"/indexStream\", request_headers.get_(\"x-envoy-original-path\"));\n  EXPECT_EQ(\"GET\", request_headers.get_(\"x-envoy-original-method\"));\n  EXPECT_EQ(\"/bookstore.Bookstore/GetIndexStream\", request_headers.get_(\":path\"));\n  EXPECT_EQ(\"trailers\", request_headers.get_(\"te\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n\n  // \"Send\" one fragmented gRPC frame\n  google::api::HttpBody http_body;\n  http_body.set_content_type(\"text/html\");\n  http_body.set_data(\"<h1>Fragmented Message!</h1>\");\n  auto fragment2 = Grpc::Common::serializeToGrpcFrame(http_body);\n  Buffer::OwnedImpl fragment1;\n  fragment1.move(*fragment2, fragment2->length() / 2);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.encodeData(fragment1, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(*fragment2, false));\n\n  // Ensure that content-type is correct (taken from httpBody)\n  EXPECT_EQ(\"text/html\", response_headers.get_(\"content-type\"));\n\n  // Fragment1 is buffered by transcoder\n  EXPECT_EQ(0, fragment1.length());\n  // Second fragment contains entire body\n  EXPECT_EQ(http_body.data(), fragment2->toString());\n}\n\nclass GrpcJsonTranscoderFilterGrpcStatusTest : public GrpcJsonTranscoderFilterTest {\npublic:\n  GrpcJsonTranscoderFilterGrpcStatusTest(\n      const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder&\n          proto_config)\n      : GrpcJsonTranscoderFilterTest(proto_config) {}\n  GrpcJsonTranscoderFilterGrpcStatusTest() : GrpcJsonTranscoderFilterTest(makeProtoConfig()) {}\n\n  void SetUp() override {\n    EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"content-type\", \"application/json\"}, {\":method\", \"POST\"}, {\":path\", \"/shelf\"}};\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n\n    Buffer::OwnedImpl request_data{R\"({\"theme\": \"Children\"})\"};\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, true));\n\n    Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"000\"}};\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n              filter_.encode100ContinueHeaders(continue_headers));\n  }\n\nprivate:\n  const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder\n  makeProtoConfig() {\n    auto proto_config = bookstoreProtoConfig();\n    return proto_config;\n  }\n};\n\nclass GrpcJsonTranscoderFilterConvertGrpcStatusTest\n    : public GrpcJsonTranscoderFilterGrpcStatusTest {\npublic:\n  GrpcJsonTranscoderFilterConvertGrpcStatusTest()\n      : GrpcJsonTranscoderFilterGrpcStatusTest(makeProtoConfig()) {}\n\nprivate:\n  const envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder\n  makeProtoConfig() {\n    auto proto_config = bookstoreProtoConfig();\n    proto_config.set_convert_grpc_status(true);\n    return proto_config;\n  }\n};\n\n// Single headers frame with end_stream flag (trailer), no grpc-status-details-bin header.\nTEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest, TranscodingTextHeadersInTrailerOnlyResponse) {\n  const std::string expected_response(R\"({\"code\":5,\"message\":\"Resource not found\"})\");\n  EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false))\n      .WillOnce(Invoke([&expected_response](Buffer::Instance& data, bool) {\n        EXPECT_EQ(expected_response, data.toString());\n      }));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/grpc\"},\n                                                   {\"grpc-status\", \"5\"},\n                                                   {\"grpc-message\", \"Resource not found\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true));\n  EXPECT_EQ(\"404\", response_headers.get_(\":status\"));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-status\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-message\"));\n}\n\n// Trailer-only response with grpc-status-details-bin header.\nTEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest,\n       TranscodingBinaryHeaderInTrailerOnlyResponse) {\n  const std::string expected_response(R\"({\"code\":5,\"message\":\"Resource not found\"})\");\n  EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false))\n      .WillOnce(Invoke([&expected_response](Buffer::Instance& data, bool) {\n        EXPECT_EQ(expected_response, data.toString());\n      }));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"content-type\", \"application/grpc\"},\n      {\"grpc-status\", \"5\"},\n      {\"grpc-message\", \"unused\"},\n      {\"grpc-status-details-bin\", \"CAUSElJlc291cmNlIG5vdCBmb3VuZA\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true));\n  EXPECT_EQ(\"404\", response_headers.get_(\":status\"));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-status\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-message\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-status-details-bin\"));\n}\n\n// Trailer-only response with grpc-status-details-bin header with details.\n// Also tests that a user-defined type from a proto descriptor in config can be used in details.\nTEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest,\n       TranscodingBinaryHeaderWithDetailsInTrailerOnlyResponse) {\n  const std::string expected_response(\n      \"{\\\"code\\\":5,\\\"message\\\":\\\"Error\\\",\\\"details\\\":\"\n      \"[{\\\"@type\\\":\\\"type.googleapis.com/helloworld.HelloReply\\\",\\\"message\\\":\\\"details\\\"}]}\");\n  EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false))\n      .WillOnce(Invoke([&expected_response](Buffer::Instance& data, bool) {\n        EXPECT_EQ(expected_response, data.toString());\n      }));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"},\n      {\"content-type\", \"application/grpc\"},\n      {\"grpc-status\", \"5\"},\n      {\"grpc-message\", \"unused\"},\n      {\"grpc-status-details-bin\",\n       \"CAUSBUVycm9yGjYKKXR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5IZWxsb1JlcGx5EgkKB2RldGFpbHM\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true));\n}\n\n// Response with a header frame and a trailer frame.\n// (E.g. a gRPC server sends metadata and then it sends an error.)\nTEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest, TranscodingStatusFromTrailer) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n  std::string expected_response(R\"({\"code\":5,\"message\":\"Resource not found\"})\");\n  EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false))\n      .WillOnce(Invoke([&expected_response](Buffer::Instance& data, bool) {\n        EXPECT_EQ(expected_response, data.toString());\n      }));\n  Http::TestResponseTrailerMapImpl response_trailers{\n      {\"grpc-status\", \"5\"},\n      {\"grpc-message\", \"unused\"},\n      {\"grpc-status-details-bin\", \"CAUSElJlc291cmNlIG5vdCBmb3VuZA\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(\"404\", response_headers.get_(\":status\"));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-status\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-message\"));\n  EXPECT_FALSE(response_headers.has(\"grpc-status-details-bin\"));\n}\n\nTEST_F(GrpcJsonTranscoderFilterGrpcStatusTest, TranscodingInvalidGrpcStatusFromTrailer) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"1024\"},\n                                                     {\"grpc-message\", \"message\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(\"500\", response_headers.get_(\":status\"));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n  EXPECT_EQ(\"1024\", response_headers.get_(\"grpc-status\"));\n  EXPECT_TRUE(response_headers.has(\"grpc-message\"));\n}\n\n// Server sends a response body, don't replace it.\nTEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest, SkipTranscodingStatusIfBodyIsPresent) {\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc\"},\n                                                   {\":status\", \"200\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"application/json\", response_headers.get_(\"content-type\"));\n\n  bookstore::Shelf response;\n  response.set_id(20);\n  response.set_theme(\"Children\");\n\n  auto response_data = Grpc::Common::serializeToGrpcFrame(response);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_.encodeData(*response_data, false));\n\n  const std::string response_json = response_data->toString();\n  EXPECT_EQ(R\"({\"id\":\"20\",\"theme\":\"Children\"})\", response_json);\n\n  EXPECT_CALL(encoder_callbacks_, addEncodedData(_, _)).Times(0);\n\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers));\n}\n\nstruct GrpcJsonTranscoderFilterPrintTestParam {\n  std::string config_json_;\n  std::string expected_response_;\n};\n\nclass GrpcJsonTranscoderFilterPrintTest\n    : public testing::TestWithParam<GrpcJsonTranscoderFilterPrintTestParam>,\n      public GrpcJsonTranscoderFilterTestBase {\nprotected:\n  GrpcJsonTranscoderFilterPrintTest() {\n    envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config;\n    TestUtility::loadFromJson(TestEnvironment::substitute(GetParam().config_json_), proto_config);\n    config_ = new JsonTranscoderConfig(proto_config, *api_);\n    filter_ = new JsonTranscoderFilter(*config_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  ~GrpcJsonTranscoderFilterPrintTest() override {\n    delete filter_;\n    delete config_;\n  }\n\n  JsonTranscoderConfig* config_;\n  JsonTranscoderFilter* filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n};\n\nTEST_P(GrpcJsonTranscoderFilterPrintTest, PrintOptions) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/json\"}, {\":method\", \"GET\"}, {\":path\", \"/authors/101\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  bookstore::Author author;\n  author.set_id(101);\n  author.set_gender(bookstore::Author_Gender_MALE);\n  author.set_last_name(\"Shakespeare\");\n\n  const auto response_data = Grpc::Common::serializeToGrpcFrame(author);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_->encodeData(*response_data, false));\n\n  std::string response_json = response_data->toString();\n  EXPECT_EQ(GetParam().expected_response_, response_json);\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    GrpcJsonTranscoderFilterPrintOptions, GrpcJsonTranscoderFilterPrintTest,\n    ::testing::Values(\n        GrpcJsonTranscoderFilterPrintTestParam{\n            R\"({\n     \"proto_descriptor\": \"{{ test_rundir }}/test/proto/bookstore.descriptor\",\n     \"services\": [\"bookstore.Bookstore\"]\n    })\",\n            R\"({\"id\":\"101\",\"gender\":\"MALE\",\"lname\":\"Shakespeare\"})\"},\n        GrpcJsonTranscoderFilterPrintTestParam{R\"({\n     \"proto_descriptor\": \"{{ test_rundir }}/test/proto/bookstore.descriptor\",\n     \"services\": [\"bookstore.Bookstore\"],\n     \"print_options\":{\"add_whitespace\": true}\n    })\",\n                                               R\"({\n \"id\": \"101\",\n \"gender\": \"MALE\",\n \"lname\": \"Shakespeare\"\n}\n)\"},\n        GrpcJsonTranscoderFilterPrintTestParam{\n            R\"({\n     \"proto_descriptor\": \"{{ test_rundir }}/test/proto/bookstore.descriptor\",\n     \"services\": [\"bookstore.Bookstore\"],\n     \"print_options\":{\"always_print_primitive_fields\": true}\n    })\",\n            R\"({\"id\":\"101\",\"gender\":\"MALE\",\"firstName\":\"\",\"lname\":\"Shakespeare\"})\"},\n        GrpcJsonTranscoderFilterPrintTestParam{R\"({\n     \"proto_descriptor\": \"{{ test_rundir }}/test/proto/bookstore.descriptor\",\n     \"services\": [\"bookstore.Bookstore\"],\n     \"print_options\":{\"always_print_enums_as_ints\": true}\n    })\",\n                                               R\"({\"id\":\"101\",\"gender\":1,\"lname\":\"Shakespeare\"})\"},\n        GrpcJsonTranscoderFilterPrintTestParam{\n            R\"({\n     \"proto_descriptor\": \"{{ test_rundir }}/test/proto/bookstore.descriptor\",\n     \"services\": [\"bookstore.Bookstore\"],\n     \"print_options\":{\"preserve_proto_field_names\": true}\n    })\",\n            R\"({\"id\":\"101\",\"gender\":\"MALE\",\"last_name\":\"Shakespeare\"})\"}));\n\n} // namespace\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_json_transcoder/transcoder_input_stream_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/http/grpc_json_transcoder/transcoder_input_stream_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcJsonTranscoder {\nnamespace {\n\nclass TranscoderInputStreamTest : public testing::Test {\npublic:\n  TranscoderInputStreamTest() {\n    Buffer::OwnedImpl buffer{\"abcd\"};\n    stream_.move(buffer);\n  }\n\n  std::string slice_data_{\"abcd\"};\n  TranscoderInputStreamImpl stream_;\n\n  const void* data_;\n  int size_;\n};\n\nTEST_F(TranscoderInputStreamTest, BytesAvailable) {\n  Buffer::OwnedImpl buffer{\"abcd\"};\n\n  stream_.move(buffer);\n  EXPECT_EQ(8, stream_.BytesAvailable());\n}\n\nTEST_F(TranscoderInputStreamTest, TwoSlices) {\n  Buffer::OwnedImpl buffer(\"efghi\");\n\n  stream_.move(buffer);\n  EXPECT_EQ(9, stream_.BytesAvailable());\n}\n\nTEST_F(TranscoderInputStreamTest, BackUp) {\n  EXPECT_TRUE(stream_.Next(&data_, &size_));\n  EXPECT_EQ(4, size_);\n\n  stream_.BackUp(3);\n  EXPECT_EQ(3, stream_.BytesAvailable());\n}\n\n} // namespace\n} // namespace GrpcJsonTranscoder\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_stats/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_stats\",\n    deps = [\n        \"//source/common/grpc:common_lib\",\n        \"//source/extensions/filters/http/grpc_stats:config\",\n        \"//test/common/buffer:utility_lib\",\n        \"//test/common/stream_info:test_util\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/grpc_stats/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_stats/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/grpc_stats/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/grpc_stats/v3/config.pb.validate.h\"\n\n#include \"common/grpc/common.h\"\n\n#include \"extensions/filters/http/grpc_stats/grpc_stats_filter.h\"\n\n#include \"test/common/buffer/utility.h\"\n#include \"test/common/stream_info/test_util.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/logging.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Property;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcStats {\nnamespace {\n\nclass GrpcStatsFilterConfigTest : public testing::Test {\nprotected:\n  void initialize() {\n    GrpcStatsFilterConfigFactory factory;\n    Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config_, \"stats\", context_);\n    Http::MockFilterChainFactoryCallbacks filter_callback;\n\n    ON_CALL(filter_callback, addStreamFilter(_)).WillByDefault(testing::SaveArg<0>(&filter_));\n    EXPECT_CALL(filter_callback, addStreamFilter(_));\n    cb(filter_callback);\n\n    ON_CALL(decoder_callbacks_, streamInfo()).WillByDefault(testing::ReturnRef(stream_info_));\n\n    ON_CALL(*decoder_callbacks_.cluster_info_, statsScope())\n        .WillByDefault(testing::ReturnRef(stats_store_));\n\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  }\n\n  void addAllowlistEntry() {\n    auto* allowlist = config_.mutable_individual_method_stats_allowlist();\n    auto* services = allowlist->mutable_services();\n    auto* service = services->Add();\n    service->set_name(\"BadCompanions\");\n    *service->mutable_method_names()->Add() = \"GetBadCompanions\";\n    *service->mutable_method_names()->Add() = \"AnotherMethod\";\n  }\n\n  void doRequestResponse(Http::TestRequestHeaderMapImpl& request_headers) {\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n    Buffer::OwnedImpl data(\"hello\");\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false));\n    Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"0\"}};\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n  }\n\n  envoy::extensions::filters::http::grpc_stats::v3::FilterConfig config_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Http::StreamFilterSharedPtr filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n};\n\nTEST_F(GrpcStatsFilterConfigTest, StatsHttp2HeaderOnlyResponse) {\n  config_.mutable_stats_for_all_methods()->set_value(true);\n  initialize();\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(continue_headers));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"grpc-status\", \"1\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.failure\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_FALSE(stream_info_.filterState()->hasDataWithName(HttpFilterNames::get().GrpcStats));\n}\n\nTEST_F(GrpcStatsFilterConfigTest, StatsHttp2NormalResponse) {\n  config_.mutable_stats_for_all_methods()->set_value(true);\n  initialize();\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_FALSE(stream_info_.filterState()->hasDataWithName(HttpFilterNames::get().GrpcStats));\n}\n\nTEST_F(GrpcStatsFilterConfigTest, StatsHttp2ContentTypeGrpcPlusProto) {\n  config_.mutable_stats_for_all_methods()->set_value(true);\n  initialize();\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc+proto\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_FALSE(stream_info_.filterState()->hasDataWithName(HttpFilterNames::get().GrpcStats));\n}\n\n// Test that an allowlist match results in method-named stats.\nTEST_F(GrpcStatsFilterConfigTest, StatsAllowlistMatch) {\n  addAllowlistEntry();\n  initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":path\", \"/BadCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.total\")\n                     .value());\n}\n\n// Test that an allowlist method mismatch results in going to the generic stat.\nTEST_F(GrpcStatsFilterConfigTest, StatsAllowlistMismatchMethod) {\n  addAllowlistEntry();\n  initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":path\", \"/BadCompanions/GetGoodCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetGoodCompanions.success\")\n                     .value());\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetGoodCompanions.total\")\n                     .value());\n  EXPECT_EQ(\n      1UL,\n      decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.success\").value());\n  EXPECT_EQ(1UL,\n            decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.total\").value());\n}\n\n// Test that an allowlist service mismatch results in going to the generic stat.\nTEST_F(GrpcStatsFilterConfigTest, StatsAllowlistMismatchService) {\n  addAllowlistEntry();\n  initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":path\", \"/GoodCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.GoodCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.GoodCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_EQ(\n      1UL,\n      decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.success\").value());\n  EXPECT_EQ(1UL,\n            decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.total\").value());\n}\n\n// Test that any method results in going to the generic stat, when stats_for_all_methods == false.\nTEST_F(GrpcStatsFilterConfigTest, DisableStatsForAllMethods) {\n  config_.mutable_stats_for_all_methods()->set_value(false);\n  initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":path\", \"/BadCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_EQ(\n      1UL,\n      decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.success\").value());\n  EXPECT_EQ(1UL,\n            decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.total\").value());\n}\n\n// Test that any method results in a specific stat, when stats_for_all_methods isn't set\n// at all.\n//\n// This is deprecated behavior and will be changed during the deprecation window.\nTEST_F(GrpcStatsFilterConfigTest, StatsForAllMethodsDefaultSetting) {\n  EXPECT_CALL(\n      context_.runtime_loader_.snapshot_,\n      deprecatedFeatureEnabled(\n          \"envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default\", _))\n      .WillOnce(Invoke([](absl::string_view, bool default_value) { return default_value; }));\n  EXPECT_LOG_CONTAINS(\"warn\",\n                      \"Using deprecated default value for \"\n                      \"'envoy.extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_\"\n                      \"methods'\",\n                      initialize());\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":path\", \"/BadCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_EQ(\n      0UL,\n      decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.success\").value());\n  EXPECT_EQ(0UL,\n            decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.total\").value());\n}\n\n// Test that any method results in a specific stat, when stats_for_all_methods isn't set\n// at all.\n//\n// This is deprecated behavior and will be changed during the deprecation window.\nTEST_F(GrpcStatsFilterConfigTest, StatsForAllMethodsDefaultSettingRuntimeOverrideTrue) {\n  EXPECT_CALL(\n      context_.runtime_loader_.snapshot_,\n      deprecatedFeatureEnabled(\n          \"envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default\", _))\n      .WillOnce(Return(true));\n  EXPECT_LOG_CONTAINS(\"warn\",\n                      \"Using deprecated default value for \"\n                      \"'envoy.extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_\"\n                      \"methods'\",\n                      initialize());\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":path\", \"/BadCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_EQ(\n      0UL,\n      decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.success\").value());\n  EXPECT_EQ(0UL,\n            decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.total\").value());\n}\n\n// Test that the runtime override for the deprecated previous default behavior works.\nTEST_F(GrpcStatsFilterConfigTest, StatsForAllMethodsDefaultSettingRuntimeOverrideFalse) {\n  EXPECT_CALL(\n      context_.runtime_loader_.snapshot_,\n      deprecatedFeatureEnabled(\n          \"envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default\", _))\n      .WillOnce(Return(false));\n  initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{{\"content-type\", \"application/grpc\"},\n                                                 {\":path\", \"/BadCompanions/GetBadCompanions\"}};\n\n  doRequestResponse(request_headers);\n\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.success\")\n                     .value());\n  EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc.BadCompanions.GetBadCompanions.total\")\n                     .value());\n  EXPECT_EQ(\n      1UL,\n      decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.success\").value());\n  EXPECT_EQ(1UL,\n            decoder_callbacks_.clusterInfo()->statsScope().counterFromString(\"grpc.total\").value());\n}\n\nTEST_F(GrpcStatsFilterConfigTest, MessageCounts) {\n  config_.mutable_stats_for_all_methods()->set_value(true);\n  config_.set_emit_filter_state(true);\n  initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc+proto\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  ProtobufWkt::Value v1;\n  v1.set_string_value(\"v1\");\n  auto b1 = Grpc::Common::serializeToGrpcFrame(v1);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(*b1, false));\n  ProtobufWkt::Value v2;\n  v2.set_string_value(\"v2\");\n  auto b2 = Grpc::Common::serializeToGrpcFrame(v2);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(*b2, true));\n\n  EXPECT_EQ(2U, decoder_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(\n                        \"grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count\")\n                    .value());\n\n  // Check that there is response_message_count stat yet. We use\n  // stats_store_.findCounterByString rather than looking on\n  // clusterInfo()->statsScope() because findCounterByString is not an API on\n  // Stats::Store, and there is no prefix so the names will match. We verify\n  // that by double-checking we can find the request_message_count using the\n  // same API.\n  EXPECT_FALSE(stats_store_.findCounterByString(\n      \"grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count\"));\n  EXPECT_TRUE(stats_store_.findCounterByString(\n      \"grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count\"));\n\n  const auto& data = stream_info_.filterState()->getDataReadOnly<GrpcStatsObject>(\n      HttpFilterNames::get().GrpcStats);\n  EXPECT_EQ(2U, data.request_message_count);\n  EXPECT_EQ(0U, data.response_message_count);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\"content-type\", \"application/grpc+proto\"},\n                                                   {\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(*b1);\n  buffer.add(*b2);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, false));\n  EXPECT_EQ(2U, decoder_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(\n                        \"grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count\")\n                    .value());\n  EXPECT_EQ(2U, decoder_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(\n                        \"grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count\")\n                    .value());\n  EXPECT_EQ(2U, data.request_message_count);\n  EXPECT_EQ(2U, data.response_message_count);\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(*b1, true));\n  EXPECT_EQ(2U, decoder_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(\n                        \"grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count\")\n                    .value());\n  EXPECT_EQ(3U, decoder_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromString(\n                        \"grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count\")\n                    .value());\n  EXPECT_EQ(2U, data.request_message_count);\n  EXPECT_EQ(3U, data.response_message_count);\n\n  auto filter_object =\n      *dynamic_cast<envoy::extensions::filters::http::grpc_stats::v3::FilterObject*>(\n          data.serializeAsProto().get());\n  EXPECT_EQ(2U, filter_object.request_message_count());\n  EXPECT_EQ(3U, filter_object.response_message_count());\n  EXPECT_EQ(\"2,3\", data.serializeAsString().value());\n}\n\nTEST_F(GrpcStatsFilterConfigTest, UpstreamStats) {\n  config_.mutable_stats_for_all_methods()->set_value(true);\n  config_.set_emit_filter_state(true);\n  config_.set_enable_upstream_stats(true);\n  initialize();\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc+proto\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  ON_CALL(stream_info_, lastUpstreamRxByteReceived())\n      .WillByDefault(testing::Return(\n          absl::optional<std::chrono::nanoseconds>(std::chrono::nanoseconds(30000000))));\n  ON_CALL(stream_info_, lastUpstreamTxByteSent())\n      .WillByDefault(testing::Return(\n          absl::optional<std::chrono::nanoseconds>(std::chrono::nanoseconds(20000000))));\n\n  EXPECT_CALL(stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name,\n                           \"grpc.lyft.users.BadCompanions.GetBadCompanions.upstream_rq_time\"),\n                  10ul));\n\n  doRequestResponse(request_headers);\n}\n\nTEST_F(GrpcStatsFilterConfigTest, UpstreamStatsWithTrailersOnly) {\n  config_.mutable_stats_for_all_methods()->set_value(true);\n  config_.set_emit_filter_state(true);\n  config_.set_enable_upstream_stats(true);\n  initialize();\n\n  ON_CALL(stream_info_, lastUpstreamRxByteReceived())\n      .WillByDefault(testing::Return(\n          absl::optional<std::chrono::nanoseconds>(std::chrono::nanoseconds(30000000))));\n  ON_CALL(stream_info_, lastUpstreamTxByteSent())\n      .WillByDefault(testing::Return(\n          absl::optional<std::chrono::nanoseconds>(std::chrono::nanoseconds(20000000))));\n\n  EXPECT_CALL(stats_store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name,\n                           \"grpc.lyft.users.BadCompanions.GetBadCompanions.upstream_rq_time\"),\n                  10ul));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", \"application/grpc+proto\"},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"500\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n}\n\n} // namespace\n} // namespace GrpcStats\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_web/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"grpc_web_filter_test\",\n    srcs = [\"grpc_web_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_web\",\n    deps = [\n        \"//source/extensions/filters/http/grpc_web:grpc_web_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:global_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_web\",\n    deps = [\n        \"//source/extensions/filters/http/grpc_web:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"grpc_web_integration_test\",\n    srcs = [\"grpc_web_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.grpc_web\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/grpc_web:config\",\n        \"//source/extensions/filters/http/grpc_web:grpc_web_filter_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_web/config_test.cc",
    "content": "#include \"extensions/filters/http/grpc_web/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcWeb {\nnamespace {\n\nTEST(GrpcWebFilterConfigTest, GrpcWebFilter) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  GrpcWebFilterConfig factory;\n  envoy::extensions::filters::http::grpc_web::v3::GrpcWeb config;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(GrpcWebFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.grpc_web\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace GrpcWeb\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc",
    "content": "#include <memory>\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nusing SkipEncodingEmptyTrailers = bool;\nusing TestParams =\n    std::tuple<Network::Address::IpVersion, Http::CodecClient::Type, SkipEncodingEmptyTrailers>;\n\nclass GrpcWebFilterIntegrationTest : public testing::TestWithParam<TestParams>,\n                                     public HttpIntegrationTest {\npublic:\n  GrpcWebFilterIntegrationTest()\n      : HttpIntegrationTest(std::get<1>(GetParam()), std::get<0>(GetParam())) {}\n\n  void SetUp() override {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    config_helper_.addFilter(\"name: envoy.filters.http.grpc_web\");\n  }\n\n  void skipEncodingEmptyTrailers(SkipEncodingEmptyTrailers http2_skip_encoding_empty_trailers) {\n    config_helper_.addRuntimeOverride(\n        \"envoy.reloadable_features.http2_skip_encoding_empty_trailers\",\n        http2_skip_encoding_empty_trailers ? \"true\" : \"false\");\n  }\n\n  static std::string testParamsToString(const testing::TestParamInfo<TestParams> params) {\n    return fmt::format(\n        \"{}_{}_{}\",\n        TestUtility::ipTestParamsToString(testing::TestParamInfo<Network::Address::IpVersion>(\n            std::get<0>(params.param), params.index)),\n        std::get<1>(params.param) == Http::CodecClient::Type::HTTP2 ? \"Http2\" : \"Http\",\n        std::get<2>(params.param) ? \"SkipEncodingEmptyTrailers\" : \"SubmitEncodingEmptyTrailers\");\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    Params, GrpcWebFilterIntegrationTest,\n    testing::Combine(\n        testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n        testing::Values(Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2),\n        testing::Values(SkipEncodingEmptyTrailers{true}, SkipEncodingEmptyTrailers{false})),\n    GrpcWebFilterIntegrationTest::testParamsToString);\n\nTEST_P(GrpcWebFilterIntegrationTest, GrpcWebTrailersNotDuplicated) {\n  const auto downstream_protocol = std::get<1>(GetParam());\n  const bool http2_skip_encoding_empty_trailers = std::get<2>(GetParam());\n\n  if (downstream_protocol == Http::CodecClient::Type::HTTP1) {\n    config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  } else {\n    skipEncodingEmptyTrailers(http2_skip_encoding_empty_trailers);\n  }\n\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"request1\", \"trailer1\"},\n                                                   {\"request2\", \"trailer2\"}};\n  Http::TestResponseTrailerMapImpl response_trailers{{\"response1\", \"trailer1\"},\n                                                     {\"response2\", \"trailer2\"}};\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"content-type\", \"application/grpc-web\"},\n                                     {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(1, false);\n  upstream_request_->encodeTrailers(response_trailers);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1, upstream_request_->bodyLength());\n  EXPECT_THAT(*upstream_request_->trailers(), HeaderMapEqualRef(&request_trailers));\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_TRUE(absl::StrContains(response->body(), \"response1:trailer1\"));\n  EXPECT_TRUE(absl::StrContains(response->body(), \"response2:trailer2\"));\n\n  if (downstream_protocol == Http::CodecClient::Type::HTTP1) {\n    // When the downstream protocol is HTTP/1.1 we expect the trailers to be in the response-body.\n    EXPECT_EQ(nullptr, response->trailers());\n  }\n\n  if (downstream_protocol == Http::CodecClient::Type::HTTP2) {\n    if (http2_skip_encoding_empty_trailers) {\n      // When the downstream protocol is HTTP/2 and the feature-flag to skip encoding empty trailers\n      // is turned on, expect that the trailers are included in the response-body.\n      EXPECT_EQ(nullptr, response->trailers());\n    } else {\n      // Otherwise, we send empty trailers.\n      ASSERT_NE(nullptr, response->trailers());\n      EXPECT_TRUE(response->trailers()->empty());\n    }\n  }\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc",
    "content": "#include \"envoy/http/filter.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/codes.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/http/grpc_web/grpc_web_filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Combine;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::Values;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace GrpcWeb {\nnamespace {\n\nconst char MESSAGE[] = \"\\x00\\x00\\x00\\x00\\x11grpc-web-bin-data\";\nconst size_t MESSAGE_SIZE = sizeof(MESSAGE) - 1;\nconst char TEXT_MESSAGE[] = \"\\x00\\x00\\x00\\x00\\x12grpc-web-text-data\";\nconst size_t TEXT_MESSAGE_SIZE = sizeof(TEXT_MESSAGE) - 1;\nconst char B64_MESSAGE[] = \"AAAAABJncnBjLXdlYi10ZXh0LWRhdGE=\";\nconst size_t B64_MESSAGE_SIZE = sizeof(B64_MESSAGE) - 1;\nconst char B64_MESSAGE_NO_PADDING[] = \"AAAAABJncnBjLXdlYi10ZXh0LWRhdGE\";\nconst size_t B64_MESSAGE_NO_PADDING_SIZE = sizeof(B64_MESSAGE_NO_PADDING) - 1;\nconst char INVALID_B64_MESSAGE[] = \"****\";\nconst size_t INVALID_B64_MESSAGE_SIZE = sizeof(INVALID_B64_MESSAGE) - 1;\nconst char TRAILERS[] = \"\\x80\\x00\\x00\\x00\\x20grpc-status:0\\r\\ngrpc-message:ok\\r\\n\";\nconst size_t TRAILERS_SIZE = sizeof(TRAILERS) - 1;\n\n} // namespace\n\nclass GrpcWebFilterTest : public testing::TestWithParam<std::tuple<std::string, std::string>> {\npublic:\n  GrpcWebFilterTest() : grpc_context_(*symbol_table_), filter_(grpc_context_) {\n    filter_.setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_.setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  ~GrpcWebFilterTest() override { filter_.onDestroy(); }\n\n  const std::string& request_content_type() const { return std::get<0>(GetParam()); }\n\n  const std::string& request_accept() const { return std::get<1>(GetParam()); }\n\n  bool isTextRequest() const {\n    return request_content_type() == Http::Headers::get().ContentTypeValues.GrpcWebText ||\n           request_content_type() == Http::Headers::get().ContentTypeValues.GrpcWebTextProto;\n  }\n\n  bool isBinaryRequest() const {\n    return request_content_type() == Http::Headers::get().ContentTypeValues.GrpcWeb ||\n           request_content_type() == Http::Headers::get().ContentTypeValues.GrpcWebProto;\n  }\n\n  bool accept_text_response() const {\n    return request_accept() == Http::Headers::get().ContentTypeValues.GrpcWebText ||\n           request_accept() == Http::Headers::get().ContentTypeValues.GrpcWebTextProto;\n  }\n\n  bool accept_binary_response() const {\n    return request_accept() == Http::Headers::get().ContentTypeValues.GrpcWeb ||\n           request_accept() == Http::Headers::get().ContentTypeValues.GrpcWebProto;\n  }\n\n  bool doStatTracking() const { return filter_.doStatTracking(); }\n\n  void expectErrorResponse(const Http::Code& expected_code, const std::string& expected_message) {\n    EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, _))\n        .WillOnce(Invoke([=](Http::ResponseHeaderMap& headers, bool) {\n          uint64_t code;\n          ASSERT_TRUE(absl::SimpleAtoi(headers.getStatusValue(), &code));\n          EXPECT_EQ(static_cast<uint64_t>(expected_code), code);\n        }));\n    EXPECT_CALL(decoder_callbacks_, encodeData(_, _))\n        .WillOnce(Invoke(\n            [=](Buffer::Instance& data, bool) { EXPECT_EQ(expected_message, data.toString()); }));\n  }\n\n  void expectRequiredGrpcUpstreamHeaders(const Http::TestRequestHeaderMapImpl& request_headers) {\n    EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, request_headers.getContentTypeValue());\n    // Ensure we never send content-length upstream\n    EXPECT_EQ(nullptr, request_headers.ContentLength());\n    EXPECT_EQ(Http::Headers::get().TEValues.Trailers, request_headers.getTEValue());\n    EXPECT_EQ(Http::CustomHeaders::get().GrpcAcceptEncodingValues.Default,\n              request_headers.get_(Http::CustomHeaders::get().GrpcAcceptEncoding));\n  }\n\n  Stats::TestSymbolTable symbol_table_;\n  Grpc::ContextImpl grpc_context_;\n  GrpcWebFilter filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  Http::TestRequestHeaderMapImpl request_headers_{{\":path\", \"/\"}};\n};\n\nTEST_F(GrpcWebFilterTest, SupportedContentTypes) {\n  const std::string supported_content_types[] = {\n      Http::Headers::get().ContentTypeValues.GrpcWeb,\n      Http::Headers::get().ContentTypeValues.GrpcWebProto,\n      Http::Headers::get().ContentTypeValues.GrpcWebText,\n      Http::Headers::get().ContentTypeValues.GrpcWebTextProto};\n  for (auto& content_type : supported_content_types) {\n    Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n    request_headers.addCopy(Http::Headers::get().ContentType, content_type);\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n    Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n    EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n    EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, request_headers.getContentTypeValue());\n  }\n}\n\nTEST_F(GrpcWebFilterTest, UnsupportedContentType) {\n  Buffer::OwnedImpl data;\n  request_headers_.addCopy(Http::Headers::get().ContentType, \"unsupported\");\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(GrpcWebFilterTest, NoContentType) {\n  Buffer::OwnedImpl data;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(GrpcWebFilterTest, NoPath) {\n  Http::TestRequestHeaderMapImpl request_headers{};\n  Buffer::OwnedImpl data;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_));\n}\n\nTEST_F(GrpcWebFilterTest, InvalidBase64) {\n  request_headers_.addCopy(Http::Headers::get().ContentType,\n                           Http::Headers::get().ContentTypeValues.GrpcWebText);\n  expectErrorResponse(Http::Code::BadRequest, \"Bad gRPC-web request, invalid base64 data.\");\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  expectRequiredGrpcUpstreamHeaders(request_headers_);\n\n  Buffer::OwnedImpl request_buffer;\n  Buffer::OwnedImpl decoded_buffer;\n  request_buffer.add(&INVALID_B64_MESSAGE, INVALID_B64_MESSAGE_SIZE);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer,\n            filter_.decodeData(request_buffer, true));\n  EXPECT_EQ(decoder_callbacks_.details(), \"grpc_base_64_decode_failed\");\n}\n\nTEST_F(GrpcWebFilterTest, Base64NoPadding) {\n  request_headers_.addCopy(Http::Headers::get().ContentType,\n                           Http::Headers::get().ContentTypeValues.GrpcWebText);\n  expectErrorResponse(Http::Code::BadRequest, \"Bad gRPC-web request, invalid base64 data.\");\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  expectRequiredGrpcUpstreamHeaders(request_headers_);\n\n  Buffer::OwnedImpl request_buffer;\n  Buffer::OwnedImpl decoded_buffer;\n  request_buffer.add(&B64_MESSAGE_NO_PADDING, B64_MESSAGE_NO_PADDING_SIZE);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer,\n            filter_.decodeData(request_buffer, true));\n  EXPECT_EQ(decoder_callbacks_.details(), \"grpc_base_64_decode_failed_bad_size\");\n}\n\nTEST_P(GrpcWebFilterTest, StatsNoCluster) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", request_content_type()},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n  EXPECT_CALL(decoder_callbacks_, clusterInfo()).WillOnce(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  EXPECT_FALSE(doStatTracking());\n}\n\nTEST_P(GrpcWebFilterTest, StatsNormalResponse) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", request_content_type()},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_.encode100ContinueHeaders(continue_headers));\n\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.encodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"0\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(1UL,\n            decoder_callbacks_.clusterInfo()\n                ->statsScope()\n                .counterFromString(\"grpc-web.lyft.users.BadCompanions.GetBadCompanions.success\")\n                .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc-web.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n}\n\nTEST_P(GrpcWebFilterTest, StatsErrorResponse) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"content-type\", request_content_type()},\n      {\":path\", \"/lyft.users.BadCompanions/GetBadCompanions\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false));\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"grpc-status\", \"1\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  EXPECT_EQ(1UL,\n            decoder_callbacks_.clusterInfo()\n                ->statsScope()\n                .counterFromString(\"grpc-web.lyft.users.BadCompanions.GetBadCompanions.failure\")\n                .value());\n  EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo()\n                     ->statsScope()\n                     .counterFromString(\"grpc-web.lyft.users.BadCompanions.GetBadCompanions.total\")\n                     .value());\n}\n\nTEST_P(GrpcWebFilterTest, Unary) {\n  // Tests request headers.\n  request_headers_.addCopy(Http::Headers::get().ContentType, request_content_type());\n  request_headers_.addCopy(Http::CustomHeaders::get().Accept, request_accept());\n  request_headers_.addCopy(Http::Headers::get().ContentLength, uint64_t(8));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers_, false));\n  expectRequiredGrpcUpstreamHeaders(request_headers_);\n\n  // Tests request data.\n  if (isBinaryRequest()) {\n    Buffer::OwnedImpl request_buffer;\n    Buffer::OwnedImpl decoded_buffer;\n    for (size_t i = 0; i < MESSAGE_SIZE; i++) {\n      request_buffer.add(&MESSAGE[i], 1);\n      EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_buffer, true));\n      decoded_buffer.move(request_buffer);\n    }\n    EXPECT_EQ(std::string(MESSAGE, MESSAGE_SIZE), decoded_buffer.toString());\n  } else if (isTextRequest()) {\n    Buffer::OwnedImpl request_buffer;\n    Buffer::OwnedImpl decoded_buffer;\n    for (size_t i = 0; i < B64_MESSAGE_SIZE; i++) {\n      request_buffer.add(&B64_MESSAGE[i], 1);\n      if (i == B64_MESSAGE_SIZE - 1) {\n        EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_buffer, true));\n        decoded_buffer.move(request_buffer);\n        break;\n      }\n      if (i % 4 == 3) {\n        EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_buffer, false));\n      } else {\n        EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer,\n                  filter_.decodeData(request_buffer, false));\n      }\n      decoded_buffer.move(request_buffer);\n    }\n    EXPECT_EQ(std::string(TEXT_MESSAGE, TEXT_MESSAGE_SIZE), decoded_buffer.toString());\n  } else {\n    FAIL() << \"Unsupported gRPC-Web request content-type: \" << request_content_type();\n  }\n\n  // Tests request trailers, they are passed through.\n  Http::TestRequestTrailerMapImpl request_trailers;\n  request_trailers.addCopy(Http::Headers::get().GrpcStatus, \"0\");\n  request_trailers.addCopy(Http::Headers::get().GrpcMessage, \"ok\");\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_));\n  EXPECT_EQ(\"0\", request_trailers.get_(\"grpc-status\"));\n  EXPECT_EQ(\"ok\", request_trailers.get_(\"grpc-message\"));\n\n  // Tests response headers.\n  Http::TestResponseHeaderMapImpl response_headers;\n  response_headers.addCopy(Http::Headers::get().Status, \"200\");\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false));\n  EXPECT_EQ(\"200\", response_headers.get_(Http::Headers::get().Status.get()));\n  if (accept_binary_response()) {\n    EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebProto,\n              response_headers.getContentTypeValue());\n  } else if (accept_text_response()) {\n    EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebTextProto,\n              response_headers.getContentTypeValue());\n  } else {\n    FAIL() << \"Unsupported gRPC-Web request accept: \" << request_accept();\n  }\n\n  // Tests response data.\n  if (accept_binary_response()) {\n    Buffer::OwnedImpl response_buffer;\n    Buffer::OwnedImpl encoded_buffer;\n    for (size_t i = 0; i < MESSAGE_SIZE; i++) {\n      response_buffer.add(&MESSAGE[i], 1);\n      EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(response_buffer, false));\n      encoded_buffer.move(response_buffer);\n    }\n    EXPECT_EQ(std::string(MESSAGE, MESSAGE_SIZE), encoded_buffer.toString());\n  } else if (accept_text_response()) {\n    Buffer::OwnedImpl response_buffer;\n    Buffer::OwnedImpl encoded_buffer;\n    for (size_t i = 0; i < TEXT_MESSAGE_SIZE; i++) {\n      response_buffer.add(&TEXT_MESSAGE[i], 1);\n      if (i < TEXT_MESSAGE_SIZE - 1) {\n        EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer,\n                  filter_.encodeData(response_buffer, false));\n      } else {\n        EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(response_buffer, false));\n      }\n      encoded_buffer.move(response_buffer);\n    }\n    EXPECT_EQ(std::string(B64_MESSAGE, B64_MESSAGE_SIZE), encoded_buffer.toString());\n  } else {\n    FAIL() << \"Unsupported gRPC-Web response content-type: \"\n           << response_headers.getContentTypeValue();\n  }\n\n  // Tests response trailers.\n  Buffer::OwnedImpl trailers_buffer;\n  EXPECT_CALL(encoder_callbacks_, addEncodedData(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& data, bool) { trailers_buffer.move(data); }));\n  Http::TestResponseTrailerMapImpl response_trailers;\n  response_trailers.addCopy(Http::Headers::get().GrpcStatus, \"0\");\n  response_trailers.addCopy(Http::Headers::get().GrpcMessage, \"ok\");\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers));\n  if (accept_binary_response()) {\n    EXPECT_EQ(std::string(TRAILERS, TRAILERS_SIZE), trailers_buffer.toString());\n  } else if (accept_text_response()) {\n    EXPECT_EQ(std::string(TRAILERS, TRAILERS_SIZE), Base64::decode(trailers_buffer.toString()));\n  } else {\n    FAIL() << \"Unsupported gRPC-Web response content-type: \"\n           << response_headers.getContentTypeValue();\n  }\n  EXPECT_EQ(0, response_trailers.size());\n}\n\nINSTANTIATE_TEST_SUITE_P(Unary, GrpcWebFilterTest,\n                         Combine(Values(Http::Headers::get().ContentTypeValues.GrpcWeb,\n                                        Http::Headers::get().ContentTypeValues.GrpcWebProto,\n                                        Http::Headers::get().ContentTypeValues.GrpcWebText,\n                                        Http::Headers::get().ContentTypeValues.GrpcWebTextProto),\n                                 Values(Http::Headers::get().ContentTypeValues.GrpcWeb,\n                                        Http::Headers::get().ContentTypeValues.GrpcWebProto,\n                                        Http::Headers::get().ContentTypeValues.GrpcWebText,\n                                        Http::Headers::get().ContentTypeValues.GrpcWebTextProto)));\n\n} // namespace GrpcWeb\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/gzip/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"gzip_filter_test\",\n    srcs = [\"gzip_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.gzip\",\n    deps = [\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/compression/gzip/compressor:compressor_lib\",\n        \"//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib\",\n        \"//source/extensions/filters/http/gzip:config\",\n        \"//source/extensions/filters/http/gzip:gzip_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"gzip_filter_integration_test\",\n    srcs = [\n        \"gzip_filter_integration_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.gzip\",\n    deps = [\n        \"//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib\",\n        \"//source/extensions/filters/http/gzip:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/gzip/gzip_filter_integration_test.cc",
    "content": "#include \"envoy/event/timer.h\"\n\n#include \"extensions/compression/gzip/decompressor/zlib_decompressor_impl.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass GzipIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                            public Event::SimulatedTimeSystem,\n                            public HttpIntegrationTest {\npublic:\n  GzipIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void SetUp() override { decompressor_.init(window_bits); }\n  void TearDown() override { cleanupUpstreamAndDownstream(); }\n\n  void initializeFilter(const std::string& config) {\n    config_helper_.addFilter(config);\n    initialize();\n    codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  }\n\n  void doRequestAndCompression(Http::TestRequestHeaderMapImpl&& request_headers,\n                               Http::TestResponseHeaderMapImpl&& response_headers) {\n    uint64_t content_length;\n    ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_(\"content-length\"), &content_length));\n    const Buffer::OwnedImpl expected_response{std::string(content_length, 'a')};\n    auto response =\n        sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length);\n    EXPECT_TRUE(upstream_request_->complete());\n    EXPECT_EQ(0U, upstream_request_->bodyLength());\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n    ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) != nullptr);\n    EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip,\n              response->headers()\n                  .get(Http::CustomHeaders::get().ContentEncoding)\n                  ->value()\n                  .getStringView());\n    ASSERT_TRUE(response->headers().TransferEncoding() != nullptr);\n    EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked,\n              response->headers().getTransferEncodingValue());\n\n    Buffer::OwnedImpl decompressed_response{};\n    const Buffer::OwnedImpl compressed_response{response->body()};\n    decompressor_.decompress(compressed_response, decompressed_response);\n    ASSERT_EQ(content_length, decompressed_response.length());\n    EXPECT_TRUE(TestUtility::buffersEqual(expected_response, decompressed_response));\n  }\n\n  void doRequestAndNoCompression(Http::TestRequestHeaderMapImpl&& request_headers,\n                                 Http::TestResponseHeaderMapImpl&& response_headers) {\n    uint64_t content_length;\n    ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_(\"content-length\"), &content_length));\n    auto response =\n        sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length);\n    EXPECT_TRUE(upstream_request_->complete());\n    EXPECT_EQ(0U, upstream_request_->bodyLength());\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n    ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr);\n    ASSERT_EQ(content_length, response->body().size());\n    EXPECT_EQ(response->body(), std::string(content_length, 'a'));\n  }\n\n  const std::string deprecated_full_config{R\"EOF(\n      name: gzip\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.filter.http.gzip.v2.Gzip\n        memory_level: 3\n        window_bits: 10\n        compression_level: best\n        compression_strategy: rle\n        disable_on_etag_header: true\n        content_length: 100\n        content_type:\n          - text/html\n          - application/json\n    )EOF\"};\n\n  const std::string full_config{R\"EOF(\n      name: gzip\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.filter.http.gzip.v2.Gzip\n        memory_level: 3\n        window_bits: 10\n        compression_level: best\n        compression_strategy: rle\n        compressor:\n          disable_on_etag_header: true\n          content_length: 100\n          content_type:\n            - text/html\n            - application/json\n    )EOF\"};\n\n  const std::string default_config{\"name: envoy.filters.http.gzip\"};\n\n  const uint64_t window_bits{15 | 16};\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Extensions::Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_{stats_store_,\n                                                                                  \"test\"};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, GzipIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n/**\n * Exercises gzip compression with default configuration.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceDefaultConfigTest)) {\n  initializeFilter(default_config);\n  doRequestAndCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                         {\":path\", \"/test/long/url\"},\n                                                         {\":scheme\", \"http\"},\n                                                         {\":authority\", \"host\"},\n                                                         {\"accept-encoding\", \"deflate, gzip\"}},\n                          Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                          {\"content-length\", \"4400\"},\n                                                          {\"content-type\", \"text/xml\"}});\n}\n\n/**\n * Exercises gzip compression with deprecated full configuration.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceDeprecatedFullConfigTest)) {\n  initializeFilter(deprecated_full_config);\n  doRequestAndCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                         {\":path\", \"/test/long/url\"},\n                                                         {\":scheme\", \"http\"},\n                                                         {\":authority\", \"host\"},\n                                                         {\"accept-encoding\", \"deflate, gzip\"}},\n                          Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                          {\"content-length\", \"4400\"},\n                                                          {\"content-type\", \"application/json\"}});\n}\n\n/**\n * Exercises gzip compression with full configuration.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigTest)) {\n  initializeFilter(full_config);\n  doRequestAndCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                         {\":path\", \"/test/long/url\"},\n                                                         {\":scheme\", \"http\"},\n                                                         {\":authority\", \"host\"},\n                                                         {\"accept-encoding\", \"deflate, gzip\"}},\n                          Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                          {\"content-length\", \"4400\"},\n                                                          {\"content-type\", \"application/json\"}});\n}\n\n/**\n * Exercises filter when client request contains 'identity' type.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(IdentityAcceptEncoding)) {\n  initializeFilter(default_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"identity\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"content-type\", \"text/plain\"}});\n}\n\n/**\n * Exercises filter when client request contains unsupported 'accept-encoding' type.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotSupportedAcceptEncoding)) {\n  initializeFilter(default_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"deflate, br\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"content-type\", \"text/plain\"}});\n}\n\n/**\n * Exercises filter when upstream response is already encoded.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(UpstreamResponseAlreadyEncoded)) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-encoding\", \"br\"},\n                                                   {\"content-length\", \"128\"},\n                                                   {\"content-type\", \"application/json\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 128);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_EQ(\n      \"br\",\n      response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView());\n  EXPECT_EQ(128U, response->body().size());\n}\n\n/**\n * Exercises filter when upstream responds with content length below the default threshold.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotEnoughContentLength)) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"content-length\", \"10\"}, {\"content-type\", \"application/json\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 10);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr);\n  EXPECT_EQ(10U, response->body().size());\n}\n\n/**\n * Exercises filter when response from upstream service is empty.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(EmptyResponse)) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"204\"}, {\"content-length\", \"0\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"204\", response->headers().getStatusValue());\n  ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr);\n  EXPECT_EQ(0U, response->body().size());\n}\n\n/**\n * Exercises filter when upstream responds with restricted content-type value.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(SkipOnContentType)) {\n  initializeFilter(full_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"deflate, gzip\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"content-type\", \"application/xml\"}});\n}\n\n/**\n * Exercises filter when upstream responds with restricted cache-control value.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(SkipOnCacheControl)) {\n  initializeFilter(full_config);\n  doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                           {\":path\", \"/test/long/url\"},\n                                                           {\":scheme\", \"http\"},\n                                                           {\":authority\", \"host\"},\n                                                           {\"accept-encoding\", \"deflate, gzip\"}},\n                            Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                                            {\"content-length\", \"128\"},\n                                                            {\"cache-control\", \"no-transform\"},\n                                                            {\"content-type\", \"application/json\"}});\n}\n\n/**\n * Exercises gzip compression when upstream returns a chunked response.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigChunkedResponse)) {\n  initializeFilter(full_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"content-type\", \"application/json\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_EQ(\n      \"gzip\",\n      response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView());\n  ASSERT_EQ(\"chunked\", response->headers().getTransferEncodingValue());\n}\n\n/**\n * Verify Vary header values are preserved.\n */\nTEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVaryHeader)) {\n  initializeFilter(default_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"accept-encoding\", \"deflate, gzip\"}};\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"content-type\", \"application/json\"}, {\"vary\", \"Cookie\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  ASSERT_EQ(\n      \"gzip\",\n      response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView());\n  ASSERT_EQ(\"Cookie, Accept-Encoding\",\n            response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView());\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/gzip/gzip_filter_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/extensions/filters/http/gzip/v3/gzip.pb.h\"\n\n#include \"common/common/hex.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/compression/gzip/compressor/zlib_compressor_impl.h\"\n#include \"extensions/compression/gzip/decompressor/zlib_decompressor_impl.h\"\n#include \"extensions/filters/http/gzip/config.h\"\n#include \"extensions/filters/http/gzip/gzip_filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Gzip {\n\nclass GzipFilterTest : public testing::Test {\nprotected:\n  GzipFilterTest() {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"gzip.filter_enabled\", 100))\n        .WillByDefault(Return(true));\n  }\n\n  void SetUp() override {\n    setUpFilter(\"{}\");\n    decompressor_.init(31);\n  }\n\n  // GzipFilterTest Helpers\n  void setUpFilter(std::string&& json) {\n    Json::ObjectSharedPtr config = Json::Factory::loadFromString(json);\n    envoy::extensions::filters::http::gzip::v3::Gzip gzip;\n    TestUtility::loadFromJson(json, gzip);\n    config_ = std::make_shared<GzipFilterConfig>(gzip, \"test.\", stats_, runtime_);\n    filter_ = std::make_unique<Common::Compressors::CompressorFilter>(config_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  }\n\n  void verifyCompressedData(const uint32_t content_length) {\n    // This makes sure we have a finished buffer before sending it to the client.\n    expectValidFinishedBuffer(content_length);\n    decompressor_.decompress(data_, decompressed_data_);\n    const std::string uncompressed_str{decompressed_data_.toString()};\n    ASSERT_EQ(expected_str_.length(), uncompressed_str.length());\n    EXPECT_EQ(expected_str_, uncompressed_str);\n    EXPECT_EQ(expected_str_.length(), stats_.counter(\"test.gzip.total_uncompressed_bytes\").value());\n    EXPECT_EQ(data_.length(), stats_.counter(\"test.gzip.total_compressed_bytes\").value());\n  }\n\n  void feedBuffer(uint64_t size) {\n    TestUtility::feedBufferWithRandomCharacters(data_, size);\n    expected_str_ += data_.toString();\n  }\n\n  void drainBuffer() {\n    const uint64_t data_len = data_.length();\n    data_.drain(data_len);\n  }\n\n  void doRequest(Http::TestRequestHeaderMapImpl&& headers, bool end_stream) {\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, end_stream));\n  }\n\n  void doResponseCompression(Http::TestResponseHeaderMapImpl&& headers, bool with_trailers) {\n    uint64_t content_length;\n    ASSERT_TRUE(absl::SimpleAtoi(headers.get_(\"content-length\"), &content_length));\n    feedBuffer(content_length);\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n    EXPECT_EQ(\"\", headers.get_(\"content-length\"));\n    EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip,\n              headers.get_(\"content-encoding\"));\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, !with_trailers));\n    if (with_trailers) {\n      Buffer::OwnedImpl trailers_buffer;\n      EXPECT_CALL(encoder_callbacks_, addEncodedData(_, true))\n          .WillOnce(Invoke([&](Buffer::Instance& data, bool) { data_.move(data); }));\n      Http::TestResponseTrailerMapImpl trailers;\n      EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(trailers));\n    }\n    verifyCompressedData(content_length);\n    drainBuffer();\n    EXPECT_EQ(1U, stats_.counter(\"test.gzip.compressed\").value());\n  }\n\n  void expectValidFinishedBuffer(const uint32_t content_length) {\n    Buffer::RawSliceVector compressed_slices = data_.getRawSlices();\n    const uint64_t num_comp_slices = compressed_slices.size();\n\n    const std::string header_hex_str = Hex::encode(\n        reinterpret_cast<unsigned char*>(compressed_slices[0].mem_), compressed_slices[0].len_);\n    // HEADER 0x1f = 31 (window_bits)\n    EXPECT_EQ(\"1f8b\", header_hex_str.substr(0, 4));\n    // CM 0x8 = deflate (compression method)\n    EXPECT_EQ(\"08\", header_hex_str.substr(4, 2));\n\n    const std::string footer_bytes_str =\n        Hex::encode(reinterpret_cast<unsigned char*>(compressed_slices[num_comp_slices - 1].mem_),\n                    compressed_slices[num_comp_slices - 1].len_);\n\n    // A valid finished compressed buffer should have trailer with input size in it, i.e. equals to\n    // the value of content_length.\n    expectEqualInputSize(footer_bytes_str, content_length);\n  }\n\n  void expectEqualInputSize(const std::string& footer_bytes, const uint32_t input_size) {\n    const std::string size_bytes = footer_bytes.substr(footer_bytes.size() - 8, 8);\n    uint64_t size;\n    StringUtil::atoull(size_bytes.c_str(), size, 16);\n    EXPECT_EQ(TestUtility::flipOrder<uint32_t>(size), input_size);\n  }\n\n  void expectValidCompressionStrategyAndLevel(\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy,\n      absl::string_view strategy_name,\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level,\n      absl::string_view level_name) {\n    setUpFilter(fmt::format(R\"EOF({{\"compression_strategy\": \"{}\", \"compression_level\": \"{}\"}})EOF\",\n                            strategy_name, level_name));\n    EXPECT_EQ(strategy, config_->compressionStrategy());\n    EXPECT_EQ(level, config_->compressionLevel());\n    EXPECT_EQ(5, config_->memoryLevel());\n    EXPECT_EQ(30, config_->minimumLength());\n    EXPECT_EQ(28, config_->windowBits());\n    EXPECT_EQ(false, config_->disableOnEtagHeader());\n    EXPECT_EQ(false, config_->removeAcceptEncodingHeader());\n    EXPECT_EQ(18, config_->contentTypeValues().size());\n  }\n\n  void doResponseNoCompression(Http::TestResponseHeaderMapImpl&& headers) {\n    uint64_t content_length;\n    ASSERT_TRUE(absl::SimpleAtoi(headers.get_(\"content-length\"), &content_length));\n    feedBuffer(content_length);\n    Http::TestResponseHeaderMapImpl continue_headers;\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n              filter_->encode100ContinueHeaders(continue_headers));\n    Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n    EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n    EXPECT_EQ(\"\", headers.get_(\"content-encoding\"));\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, false));\n    Http::TestResponseTrailerMapImpl trailers;\n    EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(trailers));\n    EXPECT_EQ(1, stats_.counter(\"test.gzip.not_compressed\").value());\n  }\n\n  std::shared_ptr<GzipFilterConfig> config_;\n  std::unique_ptr<Common::Compressors::CompressorFilter> filter_;\n  Buffer::OwnedImpl data_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_{stats_store_, \"test\"};\n  Buffer::OwnedImpl decompressed_data_;\n  std::string expected_str_;\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n};\n\n// Test if Runtime Feature is Disabled\nTEST_F(GzipFilterTest, RuntimeDisabled) {\n  setUpFilter(R\"EOF(\n{\n  \"compressor\": {\n    \"runtime_enabled\": {\n      \"default_value\": true,\n      \"runtime_key\": \"foo_key\"\n    }\n  }\n}\n)EOF\");\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo_key\", true))\n      .Times(2)\n      .WillRepeatedly(Return(false));\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"deflate, gzip\"}}, false);\n  doResponseNoCompression({{\":method\", \"get\"}, {\"content-length\", \"256\"}});\n}\n\n// Default config values.\nTEST_F(GzipFilterTest, DefaultConfigValues) {\n  EXPECT_EQ(5, config_->memoryLevel());\n  EXPECT_EQ(30, config_->minimumLength());\n  EXPECT_EQ(28, config_->windowBits());\n  EXPECT_EQ(false, config_->disableOnEtagHeader());\n  EXPECT_EQ(false, config_->removeAcceptEncodingHeader());\n  EXPECT_EQ(Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard,\n            config_->compressionStrategy());\n  EXPECT_EQ(Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard,\n            config_->compressionLevel());\n  EXPECT_EQ(18, config_->contentTypeValues().size());\n}\n\nTEST_F(GzipFilterTest, AvailableCombinationCompressionStrategyAndLevelConfig) {\n  expectValidCompressionStrategyAndLevel(\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered, \"FILTERED\",\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, \"BEST\");\n  expectValidCompressionStrategyAndLevel(\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, \"HUFFMAN\",\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, \"BEST\");\n  expectValidCompressionStrategyAndLevel(\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, \"RLE\",\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, \"SPEED\");\n  expectValidCompressionStrategyAndLevel(\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, \"DEFAULT\",\n      Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, \"DEFAULT\");\n}\n\n// Acceptance Testing with default configuration.\nTEST_F(GzipFilterTest, AcceptanceGzipEncoding) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"deflate, gzip\"}}, false);\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  doResponseCompression({{\":method\", \"get\"}, {\"content-length\", \"256\"}}, false);\n}\n\nTEST_F(GzipFilterTest, AcceptanceGzipEncodingWithTrailers) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"deflate, gzip\"}}, false);\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n  doResponseCompression({{\":method\", \"get\"}, {\"content-length\", \"256\"}}, true);\n}\n\n// Verifies that compression is skipped when cache-control header has no-transform value.\nTEST_F(GzipFilterTest, HasCacheControlNoTransformNoCompression) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip;q=0, deflate\"}}, true);\n  doResponseNoCompression(\n      {{\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"cache-control\", \"no-transform\"}});\n}\n\n// Verifies that compression is NOT skipped when cache-control header does NOT have no-transform\n// value.\nTEST_F(GzipFilterTest, HasCacheControlNoTransformCompression) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip, deflate\"}}, true);\n  doResponseCompression(\n      {{\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"cache-control\", \"no-cache\"}}, false);\n}\n\n// Verifies that compression is skipped when accept-encoding header is not allowed.\nTEST_F(GzipFilterTest, AcceptEncodingNoCompression) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip;q=0, deflate\"}}, true);\n  doResponseNoCompression({{\":method\", \"get\"}, {\"content-length\", \"256\"}});\n}\n\n// Verifies that compression is NOT skipped when accept-encoding header is allowed.\nTEST_F(GzipFilterTest, AcceptEncodingCompression) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip, deflate\"}}, true);\n  doResponseCompression({{\":method\", \"get\"}, {\"content-length\", \"256\"}}, false);\n}\n\n// Verifies that compression is skipped when content-length header is NOT allowed.\nTEST_F(GzipFilterTest, ContentLengthNoCompression) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  doResponseNoCompression({{\":method\", \"get\"}, {\"content-length\", \"10\"}});\n}\n\n// Verifies that compression is NOT skipped when content-length header is allowed.\nTEST_F(GzipFilterTest, ContentLengthCompression) {\n  setUpFilter(R\"EOF({\"content_length\": 500})EOF\");\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  doResponseCompression({{\":method\", \"get\"}, {\"content-length\", \"1000\"}}, false);\n}\n\n// Verifies that compression is skipped when content-encoding header is NOT allowed.\nTEST_F(GzipFilterTest, ContentTypeNoCompression) {\n  setUpFilter(R\"EOF(\n    {\n      \"content_type\": [\n        \"text/html\",\n        \"text/css\",\n        \"text/plain\",\n        \"application/javascript\",\n        \"application/json\",\n        \"font/eot\",\n        \"image/svg+xml\"\n      ]\n    }\n  )EOF\");\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  doResponseNoCompression(\n      {{\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"content-type\", \"image/jpeg\"}});\n}\n\n// Verifies that compression is NOT skipped when content-encoding header is allowed.\nTEST_F(GzipFilterTest, ContentTypeCompression) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  doResponseCompression({{\":method\", \"get\"},\n                         {\"content-length\", \"256\"},\n                         {\"content-type\", \"application/json;charset=utf-8\"}},\n                        false);\n}\n\n// Verifies that compression is skipped when etag header is NOT allowed.\nTEST_F(GzipFilterTest, EtagNoCompression) {\n  setUpFilter(R\"EOF({ \"disable_on_etag_header\": true })EOF\");\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  doResponseNoCompression(\n      {{\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"etag\", R\"EOF(W/\"686897696a7c876b7e\")EOF\"}});\n  EXPECT_EQ(1, stats_.counter(\"test.gzip.not_compressed_etag\").value());\n}\n\n// Verifies that compression is skipped when etag header is NOT allowed.\nTEST_F(GzipFilterTest, EtagCompression) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  Http::TestResponseHeaderMapImpl headers{\n      {\":status\", \"200\"}, {\"content-length\", \"256\"}, {\"etag\", \"686897696a7c876b7e\"}};\n  feedBuffer(256);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_FALSE(headers.has(\"etag\"));\n  EXPECT_EQ(\"gzip\", headers.get_(\"content-encoding\"));\n}\n\n// Tests compression when Transfer-Encoding header exists.\nTEST_F(GzipFilterTest, TransferEncodingChunked) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  doResponseCompression(\n      {{\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"transfer-encoding\", \"chunked\"}}, false);\n}\n\n// Tests compression when Transfer-Encoding header exists.\nTEST_F(GzipFilterTest, AcceptanceTransferEncodingGzip) {\n\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  doResponseNoCompression(\n      {{\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"transfer-encoding\", \"chunked, deflate\"}});\n}\n\n// Content-Encoding: upstream response is already encoded.\nTEST_F(GzipFilterTest, ContentEncodingAlreadyEncoded) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":method\", \"get\"}, {\"content-length\", \"256\"}, {\"content-encoding\", \"deflate, gzip\"}};\n  feedBuffer(256);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n  EXPECT_TRUE(response_headers.has(\"content-length\"));\n  EXPECT_FALSE(response_headers.has(\"transfer-encoding\"));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, false));\n}\n\n// No compression when upstream response is empty.\nTEST_F(GzipFilterTest, EmptyResponse) {\n\n  Http::TestResponseHeaderMapImpl headers{{\":status\", \"204\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, true));\n  EXPECT_EQ(\"\", headers.get_(\"content-length\"));\n  EXPECT_EQ(\"\", headers.get_(\"content-encoding\"));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, true));\n}\n\n// Filter should set Vary header value with `accept-encoding`.\nTEST_F(GzipFilterTest, NoVaryHeader) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  Http::TestResponseHeaderMapImpl headers{{\":status\", \"200\"}, {\"content-length\", \"256\"}};\n  feedBuffer(256);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_TRUE(headers.has(\"vary\"));\n  EXPECT_EQ(\"Accept-Encoding\", headers.get_(\"vary\"));\n}\n\n// Filter should set Vary header value with `accept-encoding` and preserve other values.\nTEST_F(GzipFilterTest, VaryOtherValues) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  Http::TestResponseHeaderMapImpl headers{\n      {\":status\", \"200\"}, {\"content-length\", \"256\"}, {\"vary\", \"User-Agent, Cookie\"}};\n  feedBuffer(256);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_TRUE(headers.has(\"vary\"));\n  EXPECT_EQ(\"User-Agent, Cookie, Accept-Encoding\", headers.get_(\"vary\"));\n}\n\n// Vary header should have only one `accept-encoding`value.\nTEST_F(GzipFilterTest, VaryAlreadyHasAcceptEncoding) {\n  doRequest({{\":method\", \"get\"}, {\"accept-encoding\", \"gzip\"}}, true);\n  Http::TestResponseHeaderMapImpl headers{\n      {\":status\", \"200\"}, {\"content-length\", \"256\"}, {\"vary\", \"accept-encoding\"}};\n  feedBuffer(256);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false));\n  EXPECT_TRUE(headers.has(\"vary\"));\n  EXPECT_EQ(\"accept-encoding, Accept-Encoding\", headers.get_(\"vary\"));\n}\n\n// Verify removeAcceptEncoding header.\nTEST_F(GzipFilterTest, RemoveAcceptEncodingHeader) {\n  {\n    Http::TestRequestHeaderMapImpl headers = {{\"accept-encoding\", \"deflate, gzip, br\"}};\n    setUpFilter(R\"EOF({\"remove_accept_encoding_header\": true})EOF\");\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true));\n    EXPECT_FALSE(headers.has(\"accept-encoding\"));\n  }\n  {\n    Http::TestRequestHeaderMapImpl headers = {{\"accept-encoding\", \"deflate, gzip, br\"}};\n    setUpFilter(\"{}\");\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true));\n    EXPECT_TRUE(headers.has(\"accept-encoding\"));\n    EXPECT_EQ(\"deflate, gzip, br\", headers.get_(\"accept-encoding\"));\n  }\n}\n\n// Test setting zlib's chunk size.\nTEST_F(GzipFilterTest, ChunkSize) {\n  // Default\n  setUpFilter(\"{}\");\n  EXPECT_EQ(config_->chunkSize(), 4096);\n\n  // Override\n  setUpFilter(R\"EOF(\n{\n  \"chunk_size\": 8192\n}\n)EOF\");\n  EXPECT_EQ(config_->chunkSize(), 8192);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(GzipFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.gzip\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n// Test that the deprecated extension triggers an exception.\nTEST(GzipFilterFactoryTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionThrows)) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  GzipFilterFactory factory;\n  envoy::extensions::filters::http::gzip::v3::Gzip config;\n\n  EXPECT_CALL(\n      context.runtime_loader_.snapshot_,\n      deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_gzip_http_filter\", _))\n      .WillRepeatedly(Return(false));\n\n  EXPECT_THROW_WITH_REGEX(factory.createFilterFactoryFromProto(config, \"stats.\", context),\n                          EnvoyException,\n                          \"Using deprecated extension 'envoy.extensions.filters.http.gzip'.*\");\n}\n\n// Test that the deprecated extension gives a deprecation warning.\nTEST(GzipFilterFactoryTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionWarns)) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  GzipFilterFactory factory;\n  envoy::extensions::filters::http::gzip::v3::Gzip config;\n\n  EXPECT_CALL(\n      context.runtime_loader_.snapshot_,\n      deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_gzip_http_filter\", _))\n      .WillRepeatedly(Return(true));\n\n  EXPECT_NO_THROW(factory.createFilterFactoryFromProto(config, \"stats.\", context));\n\n  EXPECT_LOG_CONTAINS(\"warn\", \"Using deprecated extension 'envoy.extensions.filters.http.gzip'.\",\n                      factory.createFilterFactoryFromProto(config, \"stats.\", context));\n}\n\n} // namespace Gzip\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/header_to_metadata/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"header_to_metadata_filter_test\",\n    srcs = [\"header_to_metadata_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.header_to_metadata\",\n    deps = [\n        \"//source/common/common:base64_lib\",\n        \"//source/extensions/filters/http/header_to_metadata:header_to_metadata_filter_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.header_to_metadata\",\n    deps = [\n        \"//source/extensions/filters/http/header_to_metadata:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/header_to_metadata/config_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h\"\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.validate.h\"\n\n#include \"extensions/filters/http/header_to_metadata/config.h\"\n#include \"extensions/filters/http/header_to_metadata/header_to_metadata_filter.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HeaderToMetadataFilter {\n\nusing HeaderToMetadataProtoConfig =\n    envoy::extensions::filters::http::header_to_metadata::v3::Config;\n\nvoid testForbiddenConfig(const std::string& yaml) {\n  HeaderToMetadataProtoConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  HeaderToMetadataConfig factory;\n\n  EXPECT_THROW(factory.createFilterFactoryFromProto(proto_config, \"stats\", context),\n               EnvoyException);\n}\n\n// Tests that empty (metadata) keys are rejected.\nTEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: \"\"\n      type: STRING\n  )EOF\";\n\n  HeaderToMetadataProtoConfig proto_config;\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException);\n}\n\n// Tests that empty (metadata) keys are rejected in case of cookie.\nTEST(HeaderToMetadataFilterConfigTest, InvalidEmptyCookieKey) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - cookie: x-cookie\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: \"\"\n      type: STRING\n  )EOF\";\n\n  HeaderToMetadataProtoConfig proto_config;\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException);\n}\n\n// Tests that a valid config with header is properly consumed.\nTEST(HeaderToMetadataFilterConfigTest, SimpleConfig) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: version\n      type: STRING\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: default\n      value: 'true'\n      type: STRING\n  )EOF\";\n\n  HeaderToMetadataProtoConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  HeaderToMetadataConfig factory;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  EXPECT_CALL(filter_callbacks, addStreamFilter(_));\n  cb(filter_callbacks);\n}\n\n// Tests that a valid config with cookie is properly consumed.\nTEST(HeaderToMetadataFilterConfigTest, SimpleCookieConfig) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - cookie: x-cookie\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: version1\n      type: STRING\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: default\n      value: 'true'\n      type: STRING\n  )EOF\";\n\n  HeaderToMetadataProtoConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  HeaderToMetadataConfig factory;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  EXPECT_CALL(filter_callbacks, addStreamFilter(_));\n  cb(filter_callbacks);\n}\n\n// Tests that per route config properly overrides the global config.\nTEST(HeaderToMetadataFilterConfigTest, PerRouteConfig) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: version\n      type: STRING\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: default\n      value: 'true'\n      type: STRING\n  )EOF\";\n\n  HeaderToMetadataProtoConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockServerFactoryContext> context;\n  HeaderToMetadataConfig factory;\n\n  const auto route_config = factory.createRouteSpecificFilterConfig(\n      proto_config, context, ProtobufMessage::getNullValidationVisitor());\n  const auto* config = dynamic_cast<const Config*>(route_config.get());\n  EXPECT_TRUE(config->doRequest());\n  EXPECT_FALSE(config->doResponse());\n}\n\n// Tests that configuration does not allow value and regex_value_rewrite in the same rule.\nTEST(HeaderToMetadataFilterConfigTest, ValueAndRegex) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: cluster\n      value: foo\n      regex_value_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"^/(cluster[\\\\d\\\\w-]+)/?.*$\"\n        substitution: \"\\\\1\"\n  )EOF\";\n\n  testForbiddenConfig(yaml);\n}\n\n// Tests that cookie configuration does not allow value and regex_value_rewrite in the same rule.\nTEST(HeaderToMetadataFilterConfigTest, CookieValueAndRegex) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - cookie: x-cookie\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: cluster\n      value: foo\n      regex_value_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"^/(cluster[\\\\d\\\\w-]+)/?.*$\"\n        substitution: \"\\\\1\"\n  )EOF\";\n\n  testForbiddenConfig(yaml);\n}\n\n// Tests that on_header_missing rules don't allow an empty value.\nTEST(HeaderToMetadataFilterConfigTest, OnHeaderMissingEmptyValue) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: \"foo\"\n      type: STRING\n  )EOF\";\n\n  testForbiddenConfig(yaml);\n}\n\n// Tests that on_header_missing rules don't allow an empty cookie value.\nTEST(HeaderToMetadataFilterConfigTest, CookieOnHeaderMissingEmptyValue) {\n  const std::string yaml = R\"EOF(\nrequest_rules:\n  - cookie: x-cookie\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: \"foo\"\n      type: STRING\n  )EOF\";\n\n  testForbiddenConfig(yaml);\n}\n\n} // namespace HeaderToMetadataFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/http/header_to_metadata/header_to_metadata_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HeaderToMetadataFilter {\nnamespace {\n\nMATCHER_P(MapEq, rhs, \"\") {\n  const ProtobufWkt::Struct& obj = arg;\n  EXPECT_TRUE(!rhs.empty());\n  for (auto const& entry : rhs) {\n    EXPECT_EQ(obj.fields().at(entry.first).string_value(), entry.second);\n  }\n  return true;\n}\n\nMATCHER_P(MapEqNum, rhs, \"\") {\n  const ProtobufWkt::Struct& obj = arg;\n  EXPECT_TRUE(!rhs.empty());\n  for (auto const& entry : rhs) {\n    EXPECT_EQ(obj.fields().at(entry.first).number_value(), entry.second);\n  }\n  return true;\n}\n\nMATCHER_P(MapEqValue, rhs, \"\") {\n  const ProtobufWkt::Struct& obj = arg;\n  EXPECT_TRUE(!rhs.empty());\n  for (auto const& entry : rhs) {\n    EXPECT_TRUE(TestUtility::protoEqual(obj.fields().at(entry.first), entry.second));\n  }\n  return true;\n}\n\n} // namespace\n\nclass HeaderToMetadataTest : public testing::Test {\npublic:\n  const std::string request_config_yaml = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: version\n      type: STRING\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: default\n      value: 'true'\n      type: STRING\n)EOF\";\n\n  void initializeFilter(const std::string& yaml) {\n    envoy::extensions::filters::http::header_to_metadata::v3::Config config;\n    TestUtility::loadFromYaml(yaml, config);\n    config_ = std::make_shared<Config>(config);\n    filter_ = std::make_shared<HeaderToMetadataFilter>(config_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  const Config* getConfig() { return filter_->getConfig(); }\n\n  ConfigSharedPtr config_;\n  std::shared_ptr<HeaderToMetadataFilter> filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> req_info_;\n};\n\n/**\n * Basic use-case.\n */\nTEST_F(HeaderToMetadataTest, BasicRequestTest) {\n  initializeFilter(request_config_yaml);\n  Http::TestRequestHeaderMapImpl incoming_headers{{\"X-VERSION\", \"0xdeadbeef\"}};\n  std::map<std::string, std::string> expected = {{\"version\", \"0xdeadbeef\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl incoming_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(incoming_trailers));\n  filter_->onDestroy();\n}\n\n// Verify concatenation works.\nTEST_F(HeaderToMetadataTest, BasicRequestDoubleHeadersTest) {\n  initializeFilter(request_config_yaml);\n  Http::TestRequestHeaderMapImpl incoming_headers{{\"X-VERSION\", \"foo\"}, {\"X-VERSION\", \"bar\"}};\n  std::map<std::string, std::string> expected = {{\"version\", \"foo,bar\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl incoming_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(incoming_trailers));\n  filter_->onDestroy();\n}\n\nTEST_F(HeaderToMetadataTest, PerRouteOverride) {\n  // Global config is empty.\n  initializeFilter(\"{}\");\n  Http::TestRequestHeaderMapImpl incoming_headers{{\"X-VERSION\", \"0xdeadbeef\"}};\n  std::map<std::string, std::string> expected = {{\"version\", \"0xdeadbeef\"}};\n\n  // Setup per route config.\n  envoy::extensions::filters::http::header_to_metadata::v3::Config config_proto;\n  TestUtility::loadFromYaml(request_config_yaml, config_proto);\n  Config per_route_config(config_proto, true);\n  EXPECT_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().HeaderToMetadata))\n      .WillOnce(Return(&per_route_config));\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl incoming_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(incoming_trailers));\n  filter_->onDestroy();\n}\n\nTEST_F(HeaderToMetadataTest, ConfigIsCached) {\n  // Global config is empty.\n  initializeFilter(\"{}\");\n  Http::TestRequestHeaderMapImpl incoming_headers{{\"X-VERSION\", \"0xdeadbeef\"}};\n  std::map<std::string, std::string> expected = {{\"version\", \"0xdeadbeef\"}};\n\n  // Setup per route config.\n  envoy::extensions::filters::http::header_to_metadata::v3::Config config_proto;\n  TestUtility::loadFromYaml(request_config_yaml, config_proto);\n  Config per_route_config(config_proto, true);\n  EXPECT_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().HeaderToMetadata))\n      .WillOnce(Return(&per_route_config));\n\n  EXPECT_TRUE(getConfig()->doRequest());\n  EXPECT_TRUE(getConfig()->doRequest());\n}\n\n/**\n * X-version not set, the on missing value should be set.\n */\nTEST_F(HeaderToMetadataTest, DefaultEndpointsTest) {\n  initializeFilter(request_config_yaml);\n  Http::TestRequestHeaderMapImpl incoming_headers{{\"X-FOO\", \"bar\"}};\n  std::map<std::string, std::string> expected = {{\"default\", \"true\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n}\n\n/**\n * Test that private headers get removed.\n */\nTEST_F(HeaderToMetadataTest, HeaderRemovedTest) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - header: x-authenticated\n    on_header_present:\n      key: auth\n      type: STRING\n    remove: true\n)EOF\";\n  initializeFilter(response_config_yaml);\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"x-authenticated\", \"1\"}};\n  std::map<std::string, std::string> expected = {{\"auth\", \"1\"}};\n  Http::TestResponseHeaderMapImpl empty_headers;\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_,\n              setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected)));\n  Http::TestResponseHeaderMapImpl continue_response{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(continue_response));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n  EXPECT_EQ(empty_headers, incoming_headers);\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false));\n  Http::TestResponseTrailerMapImpl incoming_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(incoming_trailers));\n}\n\n/**\n * Test the value gets written as a number.\n */\nTEST_F(HeaderToMetadataTest, NumberTypeTest) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - header: x-authenticated\n    on_header_present:\n      key: auth\n      type: NUMBER\n)EOF\";\n  initializeFilter(response_config_yaml);\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"x-authenticated\", \"1\"}};\n  std::map<std::string, int> expected = {{\"auth\", 1}};\n  Http::TestResponseHeaderMapImpl empty_headers;\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_,\n              setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEqNum(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n}\n\n/**\n * Test the Base64 encoded value gets written as a string.\n */\nTEST_F(HeaderToMetadataTest, StringTypeInBase64UrlTest) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - header: x-authenticated\n    on_header_present:\n      key: auth\n      type: STRING\n      encode: BASE64\n)EOF\";\n  initializeFilter(response_config_yaml);\n  std::string data = \"Non-ascii-characters\";\n  const auto encoded = Base64::encode(data.c_str(), data.size());\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"x-authenticated\", encoded}};\n  std::map<std::string, std::string> expected = {{\"auth\", data}};\n  Http::TestResponseHeaderMapImpl empty_headers;\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_,\n              setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n}\n\n/**\n * Test the Base64 encoded protobuf value gets written as a protobuf value.\n */\nTEST_F(HeaderToMetadataTest, ProtobufValueTypeInBase64UrlTest) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - header: x-authenticated\n    on_header_present:\n      key: auth\n      type: PROTOBUF_VALUE\n      encode: BASE64\n)EOF\";\n  initializeFilter(response_config_yaml);\n\n  ProtobufWkt::Value value;\n  auto* s = value.mutable_struct_value();\n\n  ProtobufWkt::Value v;\n  v.set_string_value(\"blafoo\");\n  (*s->mutable_fields())[\"k1\"] = v;\n  v.set_number_value(2019.07);\n  (*s->mutable_fields())[\"k2\"] = v;\n  v.set_bool_value(true);\n  (*s->mutable_fields())[\"k3\"] = v;\n\n  std::string data;\n  ASSERT_TRUE(value.SerializeToString(&data));\n  const auto encoded = Base64::encode(data.c_str(), data.size());\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"x-authenticated\", encoded}};\n  std::map<std::string, ProtobufWkt::Value> expected = {{\"auth\", value}};\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_,\n              setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEqValue(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n}\n\n/**\n * Test bad Base64 encoding is not written.\n */\nTEST_F(HeaderToMetadataTest, ProtobufValueTypeInBadBase64UrlTest) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - header: x-authenticated\n    on_header_present:\n      key: auth\n      type: PROTOBUF_VALUE\n      encode: BASE64\n)EOF\";\n  initializeFilter(response_config_yaml);\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"x-authenticated\", \"invalid\"}};\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n}\n\n/**\n * Test the bad protobuf value is not written.\n */\nTEST_F(HeaderToMetadataTest, BadProtobufValueTypeInBase64UrlTest) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - header: x-authenticated\n    on_header_present:\n      key: auth\n      type: PROTOBUF_VALUE\n      encode: BASE64\n)EOF\";\n  initializeFilter(response_config_yaml);\n  std::string data = \"invalid\";\n  const auto encoded = Base64::encode(data.c_str(), data.size());\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"x-authenticated\", encoded}};\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n}\n\n/**\n * Headers not present.\n */\nTEST_F(HeaderToMetadataTest, HeaderNotPresent) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: version\n      type: STRING\n)EOF\";\n  initializeFilter(config);\n  Http::TestRequestHeaderMapImpl incoming_headers;\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n}\n\n/**\n * Two headers match.\n */\nTEST_F(HeaderToMetadataTest, MultipleHeadersMatch) {\n  const std::string python_yaml = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      key: version\n      metadata_namespace: envoy.lb\n      type: STRING\n  - header: x-python-version\n    on_header_present:\n      key: python_version\n      metadata_namespace: envoy.lb\n      type: STRING\n)EOF\";\n  initializeFilter(python_yaml);\n  Http::TestRequestHeaderMapImpl incoming_headers{\n      {\"X-VERSION\", \"v4.0\"},\n      {\"X-PYTHON-VERSION\", \"3.7\"},\n      {\"X-IGNORE\", \"nothing\"},\n  };\n  std::map<std::string, std::string> expected = {{\"version\", \"v4.0\"}, {\"python_version\", \"3.7\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n}\n\n/**\n * No header value.\n */\nTEST_F(HeaderToMetadataTest, EmptyHeaderValue) {\n  initializeFilter(request_config_yaml);\n  Http::TestRequestHeaderMapImpl incoming_headers{{\"X-VERSION\", \"\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n}\n\n/**\n * Header value too long.\n */\nTEST_F(HeaderToMetadataTest, HeaderValueTooLong) {\n  initializeFilter(request_config_yaml);\n  auto length = Envoy::Extensions::HttpFilters::HeaderToMetadataFilter::MAX_HEADER_VALUE_LEN + 1;\n  Http::TestRequestHeaderMapImpl incoming_headers{{\"X-VERSION\", std::string(length, 'x')}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false));\n}\n\n/**\n * Ignore the header's value, use a constant value.\n */\nTEST_F(HeaderToMetadataTest, IgnoreHeaderValueUseConstant) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - header: x-something\n    on_header_present:\n      key: something\n      value: else\n      type: STRING\n    remove: true\n)EOF\";\n  initializeFilter(response_config_yaml);\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"x-something\", \"thing\"}};\n  std::map<std::string, std::string> expected = {{\"something\", \"else\"}};\n  Http::TestResponseHeaderMapImpl empty_headers;\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_,\n              setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n  EXPECT_EQ(empty_headers, incoming_headers);\n}\n\n/**\n * Rules with no on_header{present,missing} fields should be rejected.\n */\nTEST_F(HeaderToMetadataTest, RejectInvalidRule) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: x-something\n)EOF\";\n  auto expected = \"header to metadata filter: rule for header 'x-something' has neither \"\n                  \"`on_header_present` nor `on_header_missing` set\";\n  EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected);\n}\n\nTEST_F(HeaderToMetadataTest, PerRouteEmtpyRules) {\n  envoy::extensions::filters::http::header_to_metadata::v3::Config config_proto;\n  EXPECT_THROW(std::make_shared<Config>(config_proto, true), EnvoyException);\n}\n\n/**\n * Invalid empty header or cookie should be rejected.\n */\nTEST_F(HeaderToMetadataTest, RejectEmptyHeader) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: \"\"\n\n)EOF\";\n  auto expected = \"One of Cookie or Header option needs to be specified\";\n  EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected);\n}\n\n/**\n * Rules with both header and cookie fields should be rejected.\n */\nTEST_F(HeaderToMetadataTest, RejectBothCookieHeader) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: x-something\n    cookie: something-else\n    on_header_present:\n      key: something\n      value: else\n      type: STRING\n    remove: false\n\n)EOF\";\n  auto expected = \"Cannot specify both header and cookie\";\n  EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected);\n}\n\n/**\n * Rules with remove field should be rejected in case of a cookie.\n */\nTEST_F(HeaderToMetadataTest, RejectRemoveForCookie) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - cookie: cookie\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: version\n      type: STRING\n    remove: true\n)EOF\";\n  auto expected = \"Cannot specify remove for cookie\";\n  EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected);\n}\n\n/**\n * Empty values not added to metadata.\n */\nTEST_F(HeaderToMetadataTest, NoEmptyValues) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: version\n      type: STRING\n)EOF\";\n  initializeFilter(config);\n  Http::TestRequestHeaderMapImpl headers{{\"x-version\", \"\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n}\n\n/**\n * Regex substitution on header value.\n */\nTEST_F(HeaderToMetadataTest, RegexSubstitution) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: :path\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: cluster\n      regex_value_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"^/(cluster[\\\\d\\\\w-]+)/?.*$\"\n        substitution: \"\\\\1\"\n)EOF\";\n  initializeFilter(config);\n\n  // Match with additional path elements.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\":path\", \"/cluster-prod-001/x/y\"}};\n    std::map<std::string, std::string> expected = {{\"cluster\", \"cluster-prod-001\"}};\n\n    EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n    EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  }\n\n  // Match with no additional path elements.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\":path\", \"/cluster-prod-001\"}};\n    std::map<std::string, std::string> expected = {{\"cluster\", \"cluster-prod-001\"}};\n\n    EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n    EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  }\n\n  // No match.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\":path\", \"/foo\"}};\n    std::map<std::string, std::string> expected = {{\"cluster\", \"/foo\"}};\n\n    EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n    EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  }\n\n  // No match with additional path elements.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\":path\", \"/foo/bar?x=2\"}};\n    std::map<std::string, std::string> expected = {{\"cluster\", \"/foo/bar?x=2\"}};\n\n    EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n    EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  }\n}\n\n/**\n * Missing case is not executed when header is present.\n */\nTEST_F(HeaderToMetadataTest, NoMissingWhenHeaderIsPresent) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: version\n      value: some_value\n      type: STRING\n)EOF\";\n  initializeFilter(config);\n  Http::TestRequestHeaderMapImpl headers{{\"x-version\", \"19\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n}\n\n/**\n * on header missing case with no header data\n */\n\nTEST_F(HeaderToMetadataTest, OnMissingWhenHeaderIsPresent) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: version\n      value: some_value\n      type: STRING\n)EOF\";\n  initializeFilter(config);\n  Http::TestRequestHeaderMapImpl headers{{\"x-version\", \"\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n}\n\n/**\n * on header present case, when the regex replacement turns the header into an empty string\n */\nTEST_F(HeaderToMetadataTest, HeaderIsPresentButRegexEmptiesIt) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - header: x-version\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: cluster\n      regex_value_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"^foo\"\n        substitution: \"\"\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: version\n      value: some_value\n      type: STRING\n)EOF\";\n  initializeFilter(config);\n  Http::TestRequestHeaderMapImpl headers{{\"x-version\", \"foo\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n}\n\n/**\n * cookie value extracted and stored\n */\nTEST_F(HeaderToMetadataTest, CookieValueUsed) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - cookie: bar\n    on_header_present:\n      key: bar\n      type: STRING\n    remove: false\n)EOF\";\n  initializeFilter(response_config_yaml);\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"cookie\", \"bar=foo\"}};\n  std::map<std::string, std::string> expected = {{\"bar\", \"foo\"}};\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_,\n              setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n}\n\n/**\n * Ignore the cookie's value, use a given constant value.\n */\nTEST_F(HeaderToMetadataTest, IgnoreCookieValueUseConstant) {\n  const std::string response_config_yaml = R\"EOF(\nresponse_rules:\n  - cookie: meh\n    on_header_present:\n      key: meh\n      value: some_value\n      type: STRING\n    remove: false\n)EOF\";\n  initializeFilter(response_config_yaml);\n  Http::TestResponseHeaderMapImpl incoming_headers{{\"cookie\", \"meh=foo\"}};\n  std::map<std::string, std::string> expected = {{\"meh\", \"some_value\"}};\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_,\n              setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false));\n}\n\n/**\n * No cookie value, no metadata\n */\nTEST_F(HeaderToMetadataTest, NoCookieValue) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - cookie: foo\n    on_header_missing:\n      metadata_namespace: envoy.lb\n      key: foo\n      value: some_value\n      type: STRING\n)EOF\";\n  initializeFilter(config);\n  Http::TestRequestHeaderMapImpl headers{{\"cookie\", \"\"}};\n  std::map<std::string, std::string> expected = {{\"foo\", \"some_value\"}};\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n  EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n}\n\n/**\n * Regex substitution on cookie value.\n */\nTEST_F(HeaderToMetadataTest, CookieRegexSubstitution) {\n  const std::string config = R\"EOF(\nrequest_rules:\n  - cookie: foo\n    on_header_present:\n      metadata_namespace: envoy.lb\n      key: cluster\n      regex_value_rewrite:\n        pattern:\n          google_re2: {}\n          regex: \"^(cluster[\\\\d\\\\w-]+)$\"\n        substitution: \"\\\\1 matched\"\n)EOF\";\n  initializeFilter(config);\n\n  // match.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\"cookie\", \"foo=cluster-prod-001\"}};\n    std::map<std::string, std::string> expected = {{\"cluster\", \"cluster-prod-001 matched\"}};\n\n    EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n    EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  }\n\n  // No match.\n  {\n    Http::TestRequestHeaderMapImpl headers{{\"cookie\", \"foo=cluster\"}};\n    std::map<std::string, std::string> expected = {{\"cluster\", \"cluster\"}};\n\n    EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n    EXPECT_CALL(req_info_, setDynamicMetadata(\"envoy.lb\", MapEq(expected)));\n    EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  }\n}\n\n} // namespace HeaderToMetadataFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/health_check/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"health_check_test\",\n    srcs = [\"health_check_test.cc\"],\n    extension_name = \"envoy.filters.http.health_check\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/extensions/filters/http/health_check:health_check_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.health_check\",\n    deps = [\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/health_check/config_test.cc",
    "content": "#include <string>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/http/health_check/v3/health_check.pb.h\"\n#include \"envoy/extensions/filters/http/health_check/v3/health_check.pb.validate.h\"\n\n#include \"extensions/filters/http/health_check/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HealthCheck {\nnamespace {\n\nTEST(HealthCheckFilterConfig, HealthCheckFilter) {\n  const std::string yaml_string = R\"EOF(\n  pass_through_mode: true\n  headers:\n    - name: \":path\"\n      exact_match: \"/hc\"\n  )EOF\";\n\n  envoy::extensions::filters::http::health_check::v3::HealthCheck proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  HealthCheckFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\nTEST(HealthCheckFilterConfig, BadHealthCheckFilterConfig) {\n  const std::string yaml_string = R\"EOF(\n  pass_through_mode: true\n  headers:\n    - name: \":path\"\n      exact_match: \"/hc\"\n  status: 500\n  )EOF\";\n\n  envoy::extensions::filters::http::health_check::v3::HealthCheck proto_config;\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config), EnvoyException,\n                          \"status: Cannot find field\");\n}\n\nTEST(HealthCheckFilterConfig, FailsWhenNotPassThroughButTimeoutSetYaml) {\n  const std::string yaml_string = R\"EOF(\n  pass_through_mode: false\n  cache_time: 0.234s\n  headers:\n    - name: \":path\"\n      exact_match: \"/foo\"\n  )EOF\";\n\n  envoy::extensions::filters::http::health_check::v3::HealthCheck proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n\n  HealthCheckFilterConfig factory;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_THROW(factory.createFilterFactoryFromProto(proto_config, \"dummy_stats_prefix\", context),\n               EnvoyException);\n}\n\nTEST(HealthCheckFilterConfig, NotFailingWhenNotPassThroughAndTimeoutNotSetYaml) {\n  const std::string yaml_string = R\"EOF(\n  pass_through_mode: true\n  cache_time: 0.234s\n  headers:\n    - name: \":path\"\n      exact_match: \"/foo\"\n  )EOF\";\n\n  envoy::extensions::filters::http::health_check::v3::HealthCheck proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n\n  HealthCheckFilterConfig factory;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_NO_THROW(\n      factory.createFilterFactoryFromProto(proto_config, \"dummy_stats_prefix\", context));\n}\n\nTEST(HealthCheckFilterConfig, FailsWhenNotPassThroughButTimeoutSetProto) {\n  HealthCheckFilterConfig healthCheckFilterConfig;\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config{};\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  config.mutable_pass_through_mode()->set_value(false);\n  config.mutable_cache_time()->set_seconds(10);\n  envoy::config::route::v3::HeaderMatcher& header = *config.add_headers();\n  header.set_name(\":path\");\n  header.set_exact_match(\"foo\");\n\n  EXPECT_THROW(\n      healthCheckFilterConfig.createFilterFactoryFromProto(config, \"dummy_stats_prefix\", context),\n      EnvoyException);\n}\n\nTEST(HealthCheckFilterConfig, NotFailingWhenNotPassThroughAndTimeoutNotSetProto) {\n  HealthCheckFilterConfig healthCheckFilterConfig;\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config{};\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  config.mutable_pass_through_mode()->set_value(false);\n  envoy::config::route::v3::HeaderMatcher& header = *config.add_headers();\n  header.set_name(\":path\");\n  header.set_exact_match(\"foo\");\n  healthCheckFilterConfig.createFilterFactoryFromProto(config, \"dummy_stats_prefix\", context);\n}\n\nTEST(HealthCheckFilterConfig, HealthCheckFilterWithEmptyProto) {\n  HealthCheckFilterConfig healthCheckFilterConfig;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config =\n      *dynamic_cast<envoy::extensions::filters::http::health_check::v3::HealthCheck*>(\n          healthCheckFilterConfig.createEmptyConfigProto().get());\n\n  config.mutable_pass_through_mode()->set_value(false);\n  envoy::config::route::v3::HeaderMatcher& header = *config.add_headers();\n  header.set_name(\":path\");\n  header.set_exact_match(\"foo\");\n  healthCheckFilterConfig.createFilterFactoryFromProto(config, \"dummy_stats_prefix\", context);\n}\n\nvoid testHealthCheckHeaderMatch(\n    const envoy::extensions::filters::http::health_check::v3::HealthCheck& input_config,\n    Http::TestRequestHeaderMapImpl& input_headers, bool expect_health_check_response) {\n  HealthCheckFilterConfig healthCheckFilterConfig;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  ProtobufTypes::MessagePtr config_msg = healthCheckFilterConfig.createEmptyConfigProto();\n  auto config = dynamic_cast<envoy::extensions::filters::http::health_check::v3::HealthCheck*>(\n      config_msg.get());\n  ASSERT_NE(config, nullptr);\n\n  *config = input_config;\n\n  Http::FilterFactoryCb cb =\n      healthCheckFilterConfig.createFilterFactoryFromProto(*config, \"dummy_stats_prefix\", context);\n\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  Http::StreamFilterSharedPtr health_check_filter;\n  EXPECT_CALL(filter_callbacks, addStreamFilter(_))\n      .WillRepeatedly(Invoke([&health_check_filter](Http::StreamFilterSharedPtr filter) {\n        health_check_filter = filter;\n      }));\n\n  cb(filter_callbacks);\n  ASSERT_NE(health_check_filter, nullptr);\n\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks;\n  health_check_filter->setDecoderFilterCallbacks(decoder_callbacks);\n\n  if (expect_health_check_response) {\n    // Expect that the filter intercepts this request because all headers match.\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"200\"}};\n    EXPECT_CALL(decoder_callbacks, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(health_check_filter->decodeHeaders(input_headers, true),\n              Http::FilterHeadersStatus::StopIteration);\n  } else {\n    EXPECT_EQ(health_check_filter->decodeHeaders(input_headers, true),\n              Http::FilterHeadersStatus::Continue);\n  }\n}\n\n// Basic header match with two conditions should match if both conditions are satisfied.\nTEST(HealthCheckFilterConfig, HealthCheckFilterHeaderMatch) {\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config;\n\n  config.mutable_pass_through_mode()->set_value(false);\n\n  envoy::config::route::v3::HeaderMatcher& xheader = *config.add_headers();\n  xheader.set_name(\"x-healthcheck\");\n\n  envoy::config::route::v3::HeaderMatcher& yheader = *config.add_headers();\n  yheader.set_name(\"y-healthcheck\");\n  yheader.set_exact_match(\"foo\");\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-healthcheck\", \"arbitrary_value\"},\n                                         {\"y-healthcheck\", \"foo\"}};\n\n  testHealthCheckHeaderMatch(config, headers, true);\n}\n\n// The match should fail if a single header value fails to match.\nTEST(HealthCheckFilterConfig, HealthCheckFilterHeaderMatchWrongValue) {\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config;\n\n  config.mutable_pass_through_mode()->set_value(false);\n\n  envoy::config::route::v3::HeaderMatcher& xheader = *config.add_headers();\n  xheader.set_name(\"x-healthcheck\");\n\n  envoy::config::route::v3::HeaderMatcher& yheader = *config.add_headers();\n  yheader.set_name(\"y-healthcheck\");\n  yheader.set_exact_match(\"foo\");\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-healthcheck\", \"arbitrary_value\"},\n                                         {\"y-healthcheck\", \"bar\"}};\n\n  testHealthCheckHeaderMatch(config, headers, false);\n}\n\n// If either of the specified headers is completely missing the match should fail.\nTEST(HealthCheckFilterConfig, HealthCheckFilterHeaderMatchMissingHeader) {\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config;\n\n  config.mutable_pass_through_mode()->set_value(false);\n\n  envoy::config::route::v3::HeaderMatcher& xheader = *config.add_headers();\n  xheader.set_name(\"x-healthcheck\");\n\n  envoy::config::route::v3::HeaderMatcher& yheader = *config.add_headers();\n  yheader.set_name(\"y-healthcheck\");\n  yheader.set_exact_match(\"foo\");\n\n  Http::TestRequestHeaderMapImpl headers{{\"y-healthcheck\", \"foo\"}};\n\n  testHealthCheckHeaderMatch(config, headers, false);\n}\n\n// Conditions for the same header should match if they are both satisfied.\nTEST(HealthCheckFilterConfig, HealthCheckFilterDuplicateMatch) {\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config;\n\n  config.mutable_pass_through_mode()->set_value(false);\n\n  envoy::config::route::v3::HeaderMatcher& header = *config.add_headers();\n  header.set_name(\"x-healthcheck\");\n  header.set_exact_match(\"foo\");\n\n  envoy::config::route::v3::HeaderMatcher& dup_header = *config.add_headers();\n  dup_header.set_name(\"x-healthcheck\");\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-healthcheck\", \"foo\"}};\n\n  testHealthCheckHeaderMatch(config, headers, true);\n}\n\n// Conditions on the same header should not match if one or more is not satisfied.\nTEST(HealthCheckFilterConfig, HealthCheckFilterDuplicateNoMatch) {\n  envoy::extensions::filters::http::health_check::v3::HealthCheck config;\n\n  config.mutable_pass_through_mode()->set_value(false);\n\n  envoy::config::route::v3::HeaderMatcher& header = *config.add_headers();\n  header.set_name(\"x-healthcheck\");\n  header.set_exact_match(\"foo\");\n\n  envoy::config::route::v3::HeaderMatcher& dup_header = *config.add_headers();\n  dup_header.set_name(\"x-healthcheck\");\n  dup_header.set_exact_match(\"bar\");\n\n  Http::TestRequestHeaderMapImpl headers{{\"x-healthcheck\", \"foo\"}};\n\n  testHealthCheckHeaderMatch(config, headers, false);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(HealthCheckFilterConfig, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.health_check\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace HealthCheck\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/health_check/health_check_test.cc",
    "content": "#include <chrono>\n#include <memory>\n\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/filters/http/health_check/health_check.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace HealthCheck {\nnamespace {\n\nclass HealthCheckFilterTest : public testing::Test {\npublic:\n  HealthCheckFilterTest(bool pass_through, bool caching)\n      : request_headers_{{\":path\", \"/healthcheck\"}}, request_headers_no_hc_{{\":path\", \"/foo\"}} {\n\n    if (caching) {\n      cache_timer_ = new Event::MockTimer(&dispatcher_);\n      EXPECT_CALL(*cache_timer_, enableTimer(_, _));\n      cache_manager_ =\n          std::make_shared<HealthCheckCacheManager>(dispatcher_, std::chrono::milliseconds(1));\n    }\n\n    prepareFilter(pass_through);\n  }\n\n  void prepareFilter(\n      bool pass_through,\n      ClusterMinHealthyPercentagesConstSharedPtr cluster_min_healthy_percentages = nullptr) {\n    header_data_ = std::make_shared<std::vector<Http::HeaderUtility::HeaderDataPtr>>();\n    envoy::config::route::v3::HeaderMatcher matcher;\n    matcher.set_name(\":path\");\n    matcher.set_exact_match(\"/healthcheck\");\n    header_data_->emplace_back(std::make_unique<Http::HeaderUtility::HeaderData>(matcher));\n    filter_ = std::make_unique<HealthCheckFilter>(context_, pass_through, cache_manager_,\n                                                  header_data_, cluster_min_healthy_percentages);\n    filter_->setDecoderFilterCallbacks(callbacks_);\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Event::MockTimer* cache_timer_{};\n  Event::MockDispatcher dispatcher_;\n  HealthCheckCacheManagerSharedPtr cache_manager_;\n  std::unique_ptr<HealthCheckFilter> filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestHeaderMapImpl request_headers_no_hc_;\n  HeaderDataVectorSharedPtr header_data_;\n\n  class MockHealthCheckCluster : public NiceMock<Upstream::MockThreadLocalCluster> {\n  public:\n    MockHealthCheckCluster(uint64_t membership_total, uint64_t membership_healthy,\n                           uint64_t membership_degraded = 0) {\n      info()->stats().membership_total_.set(membership_total);\n      info()->stats().membership_healthy_.set(membership_healthy);\n      info()->stats().membership_degraded_.set(membership_degraded);\n    }\n  };\n};\n\nclass HealthCheckFilterNoPassThroughTest : public HealthCheckFilterTest {\npublic:\n  HealthCheckFilterNoPassThroughTest() : HealthCheckFilterTest(false, false) {}\n};\n\nclass HealthCheckFilterPassThroughTest : public HealthCheckFilterTest {\npublic:\n  HealthCheckFilterPassThroughTest() : HealthCheckFilterTest(true, false) {}\n};\n\nclass HealthCheckFilterCachingTest : public HealthCheckFilterTest {\npublic:\n  HealthCheckFilterCachingTest() : HealthCheckFilterTest(true, true) {}\n};\n\nTEST_F(HealthCheckFilterNoPassThroughTest, OkOrFailed) {\n  EXPECT_CALL(context_, healthCheckFailed()).Times(0);\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true));\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n}\n\nTEST_F(HealthCheckFilterNoPassThroughTest, NotHcRequest) {\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(_)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->decodeHeaders(request_headers_no_hc_, true));\n\n  Http::TestResponseHeaderMapImpl service_response{{\":status\", \"200\"}};\n  EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(true));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_response, false));\n  Buffer::OwnedImpl body;\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(body, false));\n  Http::TestResponseTrailerMapImpl response_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n  EXPECT_EQ(\"true\", service_response.getEnvoyImmediateHealthCheckFailValue());\n}\n\nTEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) {\n  // Test non-pass-through health checks without upstream cluster minimum health specified.\n  prepareFilter(false);\n  {\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"200\"}};\n    EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n    EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers_, true));\n    EXPECT_EQ(\"health_check_ok\", callbacks_.details());\n  }\n  {\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"503\"}};\n    EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(true));\n    EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers_, true));\n    EXPECT_EQ(\"health_check_failed\", callbacks_.details());\n  }\n\n  // Test non-pass-through health checks with upstream cluster minimum health specified.\n  prepareFilter(false, ClusterMinHealthyPercentagesConstSharedPtr(\n                           new ClusterMinHealthyPercentages{{\"www1\", 50.0}, {\"www2\", 75.0}}));\n  {\n    // This should pass, because each upstream cluster has at least the\n    // minimum percentage of healthy servers.\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"200\"}};\n    MockHealthCheckCluster cluster_www1(100, 50);\n    MockHealthCheckCluster cluster_www2(1000, 800);\n    EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n    EXPECT_CALL(context_, clusterManager());\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www1\"))).WillRepeatedly(Return(&cluster_www1));\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(&cluster_www2));\n    EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers_, true));\n    EXPECT_EQ(\"health_check_ok_cluster_healthy\", callbacks_.details());\n  }\n  {\n    // This should fail, because one upstream cluster has too few healthy servers.\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"503\"}};\n    MockHealthCheckCluster cluster_www1(100, 49);\n    MockHealthCheckCluster cluster_www2(1000, 800);\n    EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n    EXPECT_CALL(context_, clusterManager());\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www1\"))).WillRepeatedly(Return(&cluster_www1));\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(&cluster_www2));\n    EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers_, true));\n    EXPECT_EQ(\"health_check_failed_cluster_unhealthy\", callbacks_.details());\n  }\n  {\n    // This should fail, because one upstream cluster has no servers at all.\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"503\"}};\n    MockHealthCheckCluster cluster_www1(0, 0);\n    MockHealthCheckCluster cluster_www2(1000, 800);\n    EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n    EXPECT_CALL(context_, clusterManager());\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www1\"))).WillRepeatedly(Return(&cluster_www1));\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(&cluster_www2));\n    EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers_, true));\n    EXPECT_EQ(\"health_check_failed_cluster_empty\", callbacks_.details());\n  }\n  // Test the cases where an upstream cluster is empty, or has no healthy servers, but\n  // the minimum required percent healthy is zero. The health check should return a 200.\n  prepareFilter(false, ClusterMinHealthyPercentagesConstSharedPtr(\n                           new ClusterMinHealthyPercentages{{\"www1\", 0.0}, {\"www2\", 0.0}}));\n  {\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"200\"}};\n    MockHealthCheckCluster cluster_www1(0, 0);\n    MockHealthCheckCluster cluster_www2(1000, 0);\n    EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n    EXPECT_CALL(context_, clusterManager());\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www1\"))).WillRepeatedly(Return(&cluster_www1));\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(&cluster_www2));\n    EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers_, true));\n    EXPECT_EQ(\"health_check_ok_cluster_healthy\", callbacks_.details());\n  }\n  {\n    // This should succeed, because each cluster has degraded + healthy hosts greater than the\n    // threshold.\n    Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"200\"}};\n    MockHealthCheckCluster cluster_www1(100, 40, 20);\n    MockHealthCheckCluster cluster_www2(1000, 0, 800);\n    EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n    EXPECT_CALL(context_, clusterManager());\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www1\"))).WillRepeatedly(Return(&cluster_www1));\n    EXPECT_CALL(context_.cluster_manager_, get(Eq(\"www2\"))).WillRepeatedly(Return(&cluster_www2));\n    EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers_, true));\n    EXPECT_EQ(\"health_check_ok_cluster_healthy\", callbacks_.details());\n  }\n}\n\nTEST_F(HealthCheckFilterNoPassThroughTest, HealthCheckFailedCallbackCalled) {\n  EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true));\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false));\n  Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"503\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true))\n      .Times(1)\n      .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) {\n        filter_->encodeHeaders(headers, end_stream);\n        EXPECT_EQ(\"cluster_name\", headers.getEnvoyUpstreamHealthCheckedClusterValue());\n        EXPECT_EQ(nullptr, headers.EnvoyImmediateHealthCheckFail());\n      }));\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers));\n}\n\nTEST_F(HealthCheckFilterPassThroughTest, Ok) {\n  EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true));\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n\n  Http::TestResponseHeaderMapImpl service_hc_response{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_response, true));\n  EXPECT_EQ(\"cluster_name\", service_hc_response.getEnvoyUpstreamHealthCheckedClusterValue());\n}\n\nTEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) {\n  EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(false));\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true));\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n\n  // Goodness only knows why there would be a 100-Continue response in health\n  // checks but we can still verify Envoy handles it.\n  Http::TestResponseHeaderMapImpl continue_response{{\":status\", \"100\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(continue_response));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n  Http::TestResponseHeaderMapImpl service_hc_response{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_response, true));\n  EXPECT_EQ(\"cluster_name\", service_hc_response.getEnvoyUpstreamHealthCheckedClusterValue());\n}\n\nTEST_F(HealthCheckFilterPassThroughTest, Failed) {\n  EXPECT_CALL(context_, healthCheckFailed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true));\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n}\n\nTEST_F(HealthCheckFilterPassThroughTest, NotHcRequest) {\n  EXPECT_CALL(context_, healthCheckFailed()).Times(0);\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(_)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->decodeHeaders(request_headers_no_hc_, true));\n}\n\nTEST_F(HealthCheckFilterCachingTest, CachedServiceUnavailableCallbackCalled) {\n  EXPECT_CALL(context_, healthCheckFailed()).WillRepeatedly(Return(false));\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true));\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false));\n  cache_manager_->setCachedResponse(Http::Code::ServiceUnavailable, false);\n\n  Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"503\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true))\n      .Times(1)\n      .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) {\n        filter_->encodeHeaders(headers, end_stream);\n        EXPECT_EQ(\"cluster_name\", headers.getEnvoyUpstreamHealthCheckedClusterValue());\n      }));\n\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, true));\n}\n\nTEST_F(HealthCheckFilterCachingTest, CachedOkCallbackNotCalled) {\n  EXPECT_CALL(context_, healthCheckFailed()).WillRepeatedly(Return(false));\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true));\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false));\n  cache_manager_->setCachedResponse(Http::Code::OK, false);\n\n  Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"200\"}};\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true))\n      .Times(1)\n      .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) {\n        filter_->encodeHeaders(headers, end_stream);\n        EXPECT_EQ(\"cluster_name\", headers.getEnvoyUpstreamHealthCheckedClusterValue());\n      }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, true));\n  EXPECT_EQ(\"health_check_cached\", callbacks_.details());\n}\n\nTEST_F(HealthCheckFilterCachingTest, All) {\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true)).Times(3);\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false)).Times(3);\n\n  // Verify that the first request goes through.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n  Http::TestResponseHeaderMapImpl service_response_headers{{\":status\", \"503\"}};\n  Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"503\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encodeHeaders(service_response_headers, true));\n\n  // Verify that the next request uses the cached value without setting the degraded header.\n  prepareFilter(true);\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck));\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true))\n      .Times(1)\n      .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) {\n        filter_->encodeHeaders(headers, end_stream);\n        EXPECT_EQ(\"cluster_name\", headers.getEnvoyUpstreamHealthCheckedClusterValue());\n      }));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, true));\n\n  // Fire the timer, this should result in the next request going through.\n  EXPECT_CALL(*cache_timer_, enableTimer(_, _));\n  cache_timer_->invokeCallback();\n  prepareFilter(true);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n}\n\nTEST_F(HealthCheckFilterCachingTest, DegradedHeader) {\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(true)).Times(3);\n  EXPECT_CALL(callbacks_.active_span_, setSampled(false)).Times(3);\n\n  // Verify that the first request goes through.\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n  Http::TestResponseHeaderMapImpl service_response_headers{{\":status\", \"503\"},\n                                                           {\"x-envoy-degraded\", \"true\"}};\n  Http::TestResponseHeaderMapImpl health_check_response{{\":status\", \"503\"},\n                                                        {\"x-envoy-degraded\", \"\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encodeHeaders(service_response_headers, true));\n\n  // Verify that the next request uses the cached value and that the x-envoy-degraded header is set.\n  prepareFilter(true);\n  EXPECT_CALL(callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck));\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true))\n      .Times(1)\n      .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) {\n        filter_->encodeHeaders(headers, end_stream);\n        EXPECT_EQ(\"cluster_name\", headers.getEnvoyUpstreamHealthCheckedClusterValue());\n      }));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, true));\n\n  // Fire the timer, this should result in the next request going through.\n  EXPECT_CALL(*cache_timer_, enableTimer(_, _));\n  cache_timer_->invokeCallback();\n  prepareFilter(true);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, true));\n}\n\nTEST_F(HealthCheckFilterCachingTest, NotHcRequest) {\n  EXPECT_CALL(context_, healthCheckFailed()).Times(0);\n  EXPECT_CALL(callbacks_.stream_info_, healthCheck(_)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->decodeHeaders(request_headers_no_hc_, true));\n}\n\n} // namespace\n} // namespace HealthCheck\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ip_tagging/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"ip_tagging_filter_test\",\n    srcs = [\"ip_tagging_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.ip_tagging\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/filters/http/ip_tagging:config\",\n        \"//source/extensions/filters/http/ip_tagging:ip_tagging_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/http/ip_tagging/ip_tagging_filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace IpTagging {\nnamespace {\n\nclass IpTaggingFilterTest : public testing::Test {\npublic:\n  IpTaggingFilterTest() {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ip_tagging.http_filter_enabled\", 100))\n        .WillByDefault(Return(true));\n  }\n\n  const std::string internal_request_yaml = R\"EOF(\nrequest_type: internal\nip_tags:\n  - ip_tag_name: internal_request\n    ip_list:\n      - {address_prefix: 1.2.3.5, prefix_len: 32}\n\n)EOF\";\n\n  void initializeFilter(const std::string& yaml) {\n    envoy::extensions::filters::http::ip_tagging::v3::IPTagging config;\n    TestUtility::loadFromYaml(yaml, config);\n    config_ = std::make_shared<IpTaggingFilterConfig>(config, \"prefix.\", stats_, runtime_);\n    filter_ = std::make_unique<IpTaggingFilter>(config_);\n    filter_->setDecoderFilterCallbacks(filter_callbacks_);\n  }\n\n  ~IpTaggingFilterTest() override { filter_->onDestroy(); }\n\n  NiceMock<Stats::MockStore> stats_;\n  IpTaggingFilterConfigSharedPtr config_;\n  std::unique_ptr<IpTaggingFilter> filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks_;\n  Buffer::OwnedImpl data_;\n  NiceMock<Runtime::MockLoader> runtime_;\n};\n\nTEST_F(IpTaggingFilterTest, InternalRequest) {\n  initializeFilter(internal_request_yaml);\n  EXPECT_EQ(FilterRequestType::INTERNAL, config_->requestType());\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"}};\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"1.2.3.5\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.internal_request.hit\")).Times(1);\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.total\")).Times(1);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"internal_request\", request_headers.get_(Http::Headers::get().EnvoyIpTags));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  // Check external requests don't get a tag.\n  request_headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags));\n}\n\nTEST_F(IpTaggingFilterTest, ExternalRequest) {\n  const std::string external_request_yaml = R\"EOF(\nrequest_type: external\nip_tags:\n  - ip_tag_name: external_request\n    ip_list:\n      - {address_prefix: 1.2.3.4, prefix_len: 32}\n)EOF\";\n  initializeFilter(external_request_yaml);\n  EXPECT_EQ(FilterRequestType::EXTERNAL, config_->requestType());\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.total\")).Times(1);\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.external_request.hit\")).Times(1);\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"external_request\", request_headers.get_(Http::Headers::get().EnvoyIpTags));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  // Check internal requests don't get a tag.\n  request_headers = {{\"x-envoy-internal\", \"true\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags));\n}\n\nTEST_F(IpTaggingFilterTest, BothRequest) {\n  const std::string both_request_yaml = R\"EOF(\nrequest_type: both\nip_tags:\n  - ip_tag_name: external_request\n    ip_list:\n      - {address_prefix: 1.2.3.4, prefix_len: 32}\n  - ip_tag_name: internal_request\n    ip_list:\n      - {address_prefix: 1.2.3.5, prefix_len: 32}\n)EOF\";\n\n  initializeFilter(both_request_yaml);\n  EXPECT_EQ(FilterRequestType::BOTH, config_->requestType());\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"}};\n\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.total\")).Times(2);\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.internal_request.hit\")).Times(1);\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.external_request.hit\")).Times(1);\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"1.2.3.5\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"internal_request\", request_headers.get_(Http::Headers::get().EnvoyIpTags));\n\n  request_headers = Http::TestRequestHeaderMapImpl{};\n  remote_address = Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"external_request\", request_headers.get_(Http::Headers::get().EnvoyIpTags));\n}\n\nTEST_F(IpTaggingFilterTest, NoHits) {\n  initializeFilter(internal_request_yaml);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"}};\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"10.2.3.5\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.no_hit\")).Times(1);\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.total\")).Times(1);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\nTEST_F(IpTaggingFilterTest, AppendEntry) {\n  initializeFilter(internal_request_yaml);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"},\n                                                 {\"x-envoy-ip-tags\", \"test\"}};\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"1.2.3.5\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"test,internal_request\", request_headers.get_(Http::Headers::get().EnvoyIpTags));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\nTEST_F(IpTaggingFilterTest, NestedPrefixes) {\n  const std::string duplicate_request_yaml = R\"EOF(\nrequest_type: both\nip_tags:\n  - ip_tag_name: duplicate_request\n    ip_list:\n      - {address_prefix: 1.2.3.4, prefix_len: 32}\n  - ip_tag_name: internal_request\n    ip_list:\n      - {address_prefix: 1.2.3.4, prefix_len: 32}\n)EOF\";\n\n  initializeFilter(duplicate_request_yaml);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"},\n                                                 {\"x-envoy-ip-tags\", \"test\"}};\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.total\")).Times(1);\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.internal_request.hit\")).Times(1);\n  EXPECT_CALL(stats_, counter(\"prefix.ip_tagging.duplicate_request.hit\")).Times(1);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  // There is no guarantee for the order tags are returned by the LC-Trie.\n  const std::string header_tag_data = request_headers.get_(Http::Headers::get().EnvoyIpTags.get());\n  EXPECT_NE(std::string::npos, header_tag_data.find(\"test\"));\n  EXPECT_NE(std::string::npos, header_tag_data.find(\"internal_request\"));\n  EXPECT_NE(std::string::npos, header_tag_data.find(\"duplicate_request\"));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\nTEST_F(IpTaggingFilterTest, Ipv6Address) {\n  const std::string ipv6_addresses_yaml = R\"EOF(\nip_tags:\n  - ip_tag_name: ipv6_request\n    ip_list:\n      - {address_prefix: 2001:abcd:ef01:2345:6789:abcd:ef01:234, prefix_len: 64}\n)EOF\";\n  initializeFilter(ipv6_addresses_yaml);\n  Http::TestRequestHeaderMapImpl request_headers;\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"2001:abcd:ef01:2345::1\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"ipv6_request\", request_headers.get_(Http::Headers::get().EnvoyIpTags));\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\nTEST_F(IpTaggingFilterTest, RuntimeDisabled) {\n  initializeFilter(internal_request_yaml);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"}};\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"ip_tagging.http_filter_enabled\", 100))\n      .WillOnce(Return(false));\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\nTEST_F(IpTaggingFilterTest, ClearRouteCache) {\n  initializeFilter(internal_request_yaml);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"}};\n\n  Network::Address::InstanceConstSharedPtr remote_address =\n      Network::Utility::parseInternetAddress(\"1.2.3.5\");\n  EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress())\n      .WillOnce(ReturnRef(remote_address));\n\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(\"internal_request\", request_headers.get_(Http::Headers::get().EnvoyIpTags));\n\n  // no tags, no call\n  EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0);\n  request_headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags));\n}\n\n// Test that the deprecated extension name still functions.\nTEST(IpTaggingFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.ip_tagging\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace IpTagging\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"test_common_lib\",\n    hdrs = [\"test_common.h\"],\n)\n\nenvoy_cc_mock(\n    name = \"mock_lib\",\n    hdrs = [\"mock.h\"],\n    deps = [\n        \"//source/common/http:message_lib\",\n        \"//source/extensions/filters/http/jwt_authn:authenticator_lib\",\n        \"//source/extensions/filters/http/jwt_authn:verifier_lib\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"extractor_test\",\n    srcs = [\"extractor_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \"//source/extensions/filters/http/jwt_authn:extractor_lib\",\n        \"//test/extensions/filters/http/jwt_authn:test_common_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_test\",\n    srcs = [\"filter_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \":mock_lib\",\n        \"//source/extensions/filters/http/jwt_authn:filter_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_config_test\",\n    srcs = [\"filter_config_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \"//source/common/router:string_accessor_lib\",\n        \"//source/common/stream_info:filter_state_lib\",\n        \"//source/extensions/filters/http/jwt_authn:config\",\n        \"//test/extensions/filters/http/jwt_authn:test_common_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_factory_test\",\n    srcs = [\"filter_factory_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \"//source/extensions/filters/http/jwt_authn:config\",\n        \"//test/extensions/filters/http/jwt_authn:test_common_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"jwks_cache_test\",\n    srcs = [\"jwks_cache_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/filters/http/common:jwks_fetcher_lib\",\n        \"//source/extensions/filters/http/jwt_authn:jwks_cache_lib\",\n        \"//test/extensions/filters/http/jwt_authn:test_common_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"authenticator_test\",\n    srcs = [\"authenticator_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \":mock_lib\",\n        \"//source/extensions/filters/http/common:jwks_fetcher_lib\",\n        \"//source/extensions/filters/http/jwt_authn:authenticator_lib\",\n        \"//source/extensions/filters/http/jwt_authn:filter_config_interface\",\n        \"//source/extensions/filters/http/jwt_authn:matchers_lib\",\n        \"//test/extensions/filters/http/common:mock_lib\",\n        \"//test/extensions/filters/http/jwt_authn:test_common_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_integration_test\",\n    srcs = [\"filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \"//source/common/router:string_accessor_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//source/extensions/filters/http/jwt_authn:config\",\n        \"//test/config:utility_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n        \"//test/extensions/filters/http/jwt_authn:test_common_lib\",\n        \"//test/integration:http_protocol_integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"matcher_test\",\n    srcs = [\"matcher_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \":mock_lib\",\n        \":test_common_lib\",\n        \"//source/extensions/filters/http/jwt_authn:matchers_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"group_verifier_test\",\n    srcs = [\"group_verifier_test.cc\"],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \":mock_lib\",\n        \":test_common_lib\",\n        \"//source/extensions/filters/http/jwt_authn:verifier_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"verifier_test\",\n    srcs = [\n        \"all_verifier_test.cc\",\n        \"provider_verifier_test.cc\",\n    ],\n    extension_name = \"envoy.filters.http.jwt_authn\",\n    deps = [\n        \":mock_lib\",\n        \":test_common_lib\",\n        \"//source/extensions/filters/http/jwt_authn:filter_config_interface\",\n        \"//source/extensions/filters/http/jwt_authn:matchers_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/all_verifier_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"extensions/filters/http/jwt_authn/filter_config.h\"\n#include \"extensions/filters/http/jwt_authn/verifier.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/mock.h\"\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing ::google::jwt_verify::Status;\nusing ::testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nconstexpr char kConfigTemplate[] = R\"(\nproviders:\n  example_provider:\n    issuer: https://example.com\n    from_headers:\n    - name: \"x-example\"\n      value_prefix: \"\"\n    forward_payload_header: \"x-example-payload\"\n    local_jwks:\n      inline_string: \"\"\n  other_provider:\n    issuer: https://other.com\n    from_headers:\n    - name: \"x-other\"\n      value_prefix: \"\"\n    forward_payload_header: \"x-other-payload\"\n    local_jwks:\n      inline_string: \"\"\nrules:\n- match:\n    path: \"/\"\n)\";\n\nconstexpr char kExampleHeader[] = \"x-example\";\nconstexpr char kOtherHeader[] = \"x-other\";\n\n// Returns true if the jwt_header is empty, and the jwt_header payload exists.\n// Based on the JWT provider setup for this test, this matcher is equivalent to JWT verification\n// was success.\nMATCHER_P(JwtOutputSuccess, jwt_header, \"\") {\n  auto payload_header = absl::StrCat(jwt_header, \"-payload\");\n  return !arg.has(std::string(jwt_header)) && arg.has(payload_header);\n}\n\n// Returns true if the jwt_header exists, and the jwt_header payload is empty.\n// Based on the JWT provider setup for this test, this matcher is equivalent to JWT verification\n// was failed.\nMATCHER_P(JwtOutputFailedOrIgnore, jwt_header, \"\") {\n  auto payload_header = absl::StrCat(jwt_header, \"-payload\");\n  return arg.has(std::string(jwt_header)) && !arg.has(payload_header);\n}\n\nclass AllVerifierTest : public testing::Test {\npublic:\n  void SetUp() override {\n    TestUtility::loadFromYaml(kConfigTemplate, proto_config_);\n    for (auto& it : *(proto_config_.mutable_providers())) {\n      it.second.mutable_local_jwks()->set_inline_string(PublicKey);\n    }\n  }\n\n  void createVerifier() {\n    filter_config_ = FilterConfigImpl::create(proto_config_, \"\", mock_factory_ctx_);\n    verifier_ = Verifier::create(proto_config_.rules(0).requires(), proto_config_.providers(),\n                                 *filter_config_);\n  }\n\n  void modifyRequirement(const std::string& yaml) {\n    TestUtility::loadFromYaml(yaml, *proto_config_.mutable_rules(0)->mutable_requires());\n  }\n\n  JwtAuthentication proto_config_;\n  std::shared_ptr<FilterConfigImpl> filter_config_;\n  VerifierConstPtr verifier_;\n  NiceMock<Server::Configuration::MockFactoryContext> mock_factory_ctx_;\n  ContextSharedPtr context_;\n  MockVerifierCallbacks mock_cb_;\n  NiceMock<Tracing::MockSpan> parent_span_;\n};\n\n// tests rule that is just match no requires.\nTEST_F(AllVerifierTest, TestAllAllow) {\n  createVerifier();\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(2);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, \"a\"}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\n// tests requires allow missing or failed. The `allow_missing_or_failed` is defined in a single\n// requirement by itself.\nclass AllowFailedInSingleRequirementTest : public AllVerifierTest {\nprotected:\n  void SetUp() override {\n    AllVerifierTest::SetUp();\n    proto_config_.mutable_rules(0)->mutable_requires()->mutable_allow_missing_or_failed();\n    createVerifier();\n  }\n};\n\nTEST_F(AllowFailedInSingleRequirementTest, NoJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(AllowFailedInSingleRequirementTest, BadJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowFailedInSingleRequirementTest, MissingIssToken) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ES256WithoutIssToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowFailedInSingleRequirementTest, OneGoodJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  // As requirement has nothing except allow_missing_or_failed, it will\n  // not try to check any token.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowFailedInSingleRequirementTest, TwoGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nTEST_F(AllowFailedInSingleRequirementTest, GoodAndBadJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, ExpiredToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\n// The `allow_missing_or_failed` is defined in an OR-list of requirements.\nclass AllowFailedInOrListTest : public AllVerifierTest {\nprotected:\n  void SetUp() override {\n    AllVerifierTest::SetUp();\n    const char allow_failed_yaml[] = R\"(\nrequires_any:\n  requirements:\n  - provider_name: \"example_provider\"\n  - allow_missing_or_failed: {}\n)\";\n    modifyRequirement(allow_failed_yaml);\n    createVerifier();\n  }\n};\n\nTEST_F(AllowFailedInOrListTest, NoJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(AllowFailedInOrListTest, BadJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowFailedInOrListTest, GoodAndBadJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, NonExistKidToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nTEST_F(AllowFailedInOrListTest, TwoGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  // Note: the first (provider) requirement is satisfied, so the allow_missing_or_failed has not\n  // kicked in yet.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nTEST_F(AllowFailedInOrListTest, BadAndGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken},\n                                                {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n  // Token in x-other is not required, so it will be ignore.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\n// The `allow_missing_or_failed` is defined in an AND-list of requirements.\nclass AllowFailedInAndListTest : public AllVerifierTest {\nprotected:\n  void SetUp() override {\n    AllVerifierTest::SetUp();\n    const char allow_failed_yaml[] = R\"(\nrequires_all:\n  requirements:\n  - provider_name: \"example_provider\"\n  - allow_missing_or_failed: {}\n)\";\n    modifyRequirement(allow_failed_yaml);\n    createVerifier();\n  }\n};\n\nTEST_F(AllowFailedInAndListTest, NoJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtMissed)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(AllowFailedInAndListTest, BadJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtExpired)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowFailedInAndListTest, OneGoodJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {kExampleHeader, GoodToken},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n}\n\nTEST_F(AllowFailedInAndListTest, GoodAndBadJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, NonExistKidToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  // The bad, non-required token won't affect the verification status though.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nTEST_F(AllowFailedInAndListTest, TwoGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  // The JWT in x-other is ignored.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nclass AllowFailedInAndOfOrListTest : public AllVerifierTest {\nprotected:\n  void SetUp() override {\n    AllVerifierTest::SetUp();\n    const char allow_failed_yaml[] = R\"(\nrequires_all:\n  requirements:\n  - requires_any:\n      requirements:\n      - provider_name: \"example_provider\"\n      - allow_missing_or_failed: {}\n  - requires_any:\n      requirements:\n      - provider_name: \"other_provider\"\n      - allow_missing_or_failed: {}\n)\";\n    modifyRequirement(allow_failed_yaml);\n    createVerifier();\n  }\n};\n\nTEST_F(AllowFailedInAndOfOrListTest, NoJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(AllowFailedInAndOfOrListTest, BadJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowFailedInAndOfOrListTest, OneGoodJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n}\n\nTEST_F(AllowFailedInAndOfOrListTest, OtherGoodJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kOtherHeader));\n}\n\nTEST_F(AllowFailedInAndOfOrListTest, BadAndGoodJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken},\n                                                {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputSuccess(kOtherHeader));\n}\n\nTEST_F(AllowFailedInAndOfOrListTest, TwoGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputSuccess(kOtherHeader));\n}\n\n// The `allow_missing` is defined in an OR-list of requirements.\nclass AllowMissingInOrListTest : public AllVerifierTest {\nprotected:\n  void SetUp() override {\n    AllVerifierTest::SetUp();\n    const char allow_failed_yaml[] = R\"(\nrequires_any:\n  requirements:\n  - provider_name: \"example_provider\"\n  - allow_missing: {}\n)\";\n    modifyRequirement(allow_failed_yaml);\n    createVerifier();\n  }\n};\n\nTEST_F(AllowMissingInOrListTest, NoJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(AllowMissingInOrListTest, BadJwt) {\n  // Bad JWT should fail.\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtVerificationFail)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, NonExistKidToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowMissingInOrListTest, OtherGoodJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  // x-other JWT should be ignored.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nTEST_F(AllowMissingInOrListTest, BadAndGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtVerificationFail)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, NonExistKidToken},\n                                                {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n  // x-other JWT should be ignored.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nclass AllowMissingInAndListTest : public AllVerifierTest {\nprotected:\n  void SetUp() override {\n    AllVerifierTest::SetUp();\n    const char allow_failed_yaml[] = R\"(\nrequires_all:\n  requirements:\n  - provider_name: \"example_provider\"\n  - allow_missing: {}\n)\";\n    modifyRequirement(allow_failed_yaml);\n    createVerifier();\n  }\n};\n\nTEST_F(AllowMissingInAndListTest, NoJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtMissed)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(AllowMissingInAndListTest, BadJwt) {\n  // Bad JWT should fail.\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtVerificationFail)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, NonExistKidToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowMissingInAndListTest, GoodJwt) {\n  // Bad JWT should fail.\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n}\n\nTEST_F(AllowMissingInAndListTest, TwoGoodJwts) {\n  // Bad JWT should fail.\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nclass AllowMissingInAndOfOrListTest : public AllVerifierTest {\nprotected:\n  void SetUp() override {\n    AllVerifierTest::SetUp();\n    const char allow_failed_yaml[] = R\"(\nrequires_all:\n  requirements:\n  - requires_any:\n      requirements:\n      - provider_name: \"example_provider\"\n      - allow_missing: {}\n  - requires_any:\n      requirements:\n      - provider_name: \"other_provider\"\n      - allow_missing: {}\n)\";\n    modifyRequirement(allow_failed_yaml);\n    createVerifier();\n  }\n};\n\nTEST_F(AllowMissingInAndOfOrListTest, NoJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(AllowMissingInAndOfOrListTest, BadJwt) {\n  // Bad JWT should fail.\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtVerificationFail)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, NonExistKidToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n}\n\nTEST_F(AllowMissingInAndOfOrListTest, OneGoodJwt) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n}\n\nTEST_F(AllowMissingInAndOfOrListTest, TwoGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputSuccess(kOtherHeader));\n}\n\nTEST_F(AllowMissingInAndOfOrListTest, GoodAndBadJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtUnknownIssuer)).Times(1);\n  // Use the token with example.com issuer for x-other.\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{kExampleHeader, GoodToken}, {kOtherHeader, GoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputSuccess(kExampleHeader));\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\nTEST_F(AllowMissingInAndOfOrListTest, BadAndGoodJwts) {\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtExpired)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken},\n                                                {kOtherHeader, OtherGoodToken}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));\n  // Short-circuit AND, the x-other JWT should be ignored.\n  EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kOtherHeader));\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/authenticator_test.cc",
    "content": "#include \"envoy/config/core/v3/http_uri.pb.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/common/jwks_fetcher.h\"\n#include \"extensions/filters/http/jwt_authn/authenticator.h\"\n#include \"extensions/filters/http/jwt_authn/filter_config.h\"\n\n#include \"test/extensions/filters/http/common/mock.h\"\n#include \"test/extensions/filters/http/jwt_authn/mock.h\"\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing Envoy::Extensions::HttpFilters::Common::JwksFetcher;\nusing Envoy::Extensions::HttpFilters::Common::JwksFetcherPtr;\nusing Envoy::Extensions::HttpFilters::Common::MockJwksFetcher;\nusing ::google::jwt_verify::Jwks;\nusing ::google::jwt_verify::Status;\nusing ::testing::_;\nusing ::testing::Invoke;\nusing ::testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nclass AuthenticatorTest : public testing::Test {\npublic:\n  void SetUp() override {\n    TestUtility::loadFromYaml(ExampleConfig, proto_config_);\n    createAuthenticator();\n  }\n\n  void createAuthenticator(\n      ::google::jwt_verify::CheckAudience* check_audience = nullptr,\n      const absl::optional<std::string>& provider = absl::make_optional<std::string>(ProviderName),\n      bool allow_failed = false, bool allow_missing = false) {\n    filter_config_ = FilterConfigImpl::create(proto_config_, \"\", mock_factory_ctx_);\n    raw_fetcher_ = new MockJwksFetcher;\n    fetcher_.reset(raw_fetcher_);\n    auth_ = Authenticator::create(\n        check_audience, provider, allow_failed, allow_missing,\n        filter_config_->getCache().getJwksCache(), filter_config_->cm(),\n        [this](Upstream::ClusterManager&) { return std::move(fetcher_); },\n        filter_config_->timeSource());\n    jwks_ = Jwks::createFrom(PublicKey, Jwks::JWKS);\n    EXPECT_TRUE(jwks_->getStatus() == Status::Ok);\n  }\n\n  void expectVerifyStatus(Status expected_status, Http::RequestHeaderMap& headers) {\n    std::function<void(const Status&)> on_complete_cb = [&expected_status](const Status& status) {\n      ASSERT_EQ(status, expected_status);\n    };\n    auto set_payload_cb = [this](const std::string& name, const ProtobufWkt::Struct& payload) {\n      out_name_ = name;\n      out_payload_ = payload;\n    };\n    initTokenExtractor();\n    auto tokens = extractor_->extract(headers);\n    auth_->verify(headers, parent_span_, std::move(tokens), std::move(set_payload_cb),\n                  std::move(on_complete_cb));\n  }\n\n  void initTokenExtractor() {\n    JwtProviderList providers;\n    for (const auto& it : proto_config_.providers()) {\n      providers.emplace_back(&it.second);\n    }\n    extractor_ = Extractor::create(providers);\n  }\n\n  JwtAuthentication proto_config_;\n  ExtractorConstPtr extractor_;\n  std::shared_ptr<FilterConfigImpl> filter_config_;\n  MockJwksFetcher* raw_fetcher_;\n  JwksFetcherPtr fetcher_;\n  AuthenticatorPtr auth_;\n  ::google::jwt_verify::JwksPtr jwks_;\n  NiceMock<Server::Configuration::MockFactoryContext> mock_factory_ctx_;\n  std::string out_name_;\n  ProtobufWkt::Struct out_payload_;\n  NiceMock<Tracing::MockSpan> parent_span_;\n};\n\n// This test validates a good JWT authentication with a remote Jwks.\n// It also verifies Jwks cache with 10 JWT authentications, but only one Jwks fetch.\nTEST_F(AuthenticatorTest, TestOkJWTandCache) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  // Test OK pubkey and its cache\n  for (int i = 0; i < 10; i++) {\n    Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n\n    expectVerifyStatus(Status::Ok, headers);\n\n    EXPECT_EQ(headers.get_(\"sec-istio-auth-userinfo\"), ExpectedPayloadValue);\n    // Verify the token is removed.\n    EXPECT_FALSE(headers.has(Http::CustomHeaders::get().Authorization));\n  }\n}\n\n// This test verifies the Jwt is forwarded if \"forward\" flag is set.\nTEST_F(AuthenticatorTest, TestForwardJwt) {\n  // Config forward_jwt flag\n  (*proto_config_.mutable_providers())[std::string(ProviderName)].set_forward(true);\n  createAuthenticator();\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  // Test OK pubkey and its cache\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n\n  expectVerifyStatus(Status::Ok, headers);\n\n  // Verify the token is NOT removed.\n  EXPECT_TRUE(headers.has(Http::CustomHeaders::get().Authorization));\n\n  // Payload not set by default\n  EXPECT_EQ(out_name_, \"\");\n}\n\n// This test verifies the Jwt payload is set.\nTEST_F(AuthenticatorTest, TestSetPayload) {\n  // Config payload_in_metadata flag\n  (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata(\n      \"my_payload\");\n  createAuthenticator();\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  // Test OK pubkey and its cache\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n\n  expectVerifyStatus(Status::Ok, headers);\n\n  // Payload is set\n  EXPECT_EQ(out_name_, \"my_payload\");\n\n  ProtobufWkt::Struct expected_payload;\n  TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload);\n  EXPECT_TRUE(TestUtility::protoEqual(out_payload_, expected_payload));\n}\n\n// This test verifies the Jwt with non existing kid\nTEST_F(AuthenticatorTest, TestJwtWithNonExistKid) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  // Test OK pubkey and its cache\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(NonExistKidToken)}};\n\n  expectVerifyStatus(Status::JwtVerificationFail, headers);\n}\n\n// This test verifies the Jwt without \"iss\" work\nTEST_F(AuthenticatorTest, TestJwtWithoutIss) {\n  jwks_ = Jwks::createFrom(ES256PublicKey, Jwks::JWKS);\n  EXPECT_TRUE(jwks_->getStatus() == Status::Ok);\n\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  // Test OK pubkey and its cache\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(ES256WithoutIssToken)}};\n\n  expectVerifyStatus(Status::Ok, headers);\n}\n\n// This test verifies if Jwt is missing, proper status is called.\nTEST_F(AuthenticatorTest, TestMissedJWT) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  // Empty headers.\n  Http::TestRequestHeaderMapImpl headers{};\n\n  expectVerifyStatus(Status::JwtMissed, headers);\n}\n\n// Test multiple tokens; the one from query parameter is bad, verification should fail.\nTEST_F(AuthenticatorTest, TestMultipleJWTOneBadFromQuery) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1);\n\n  // headers with multiple tokens: one good, one bad\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n      {\":path\", \"/foo?access_token=\" + std::string(NonExistKidToken)},\n  };\n\n  expectVerifyStatus(Status::JwtVerificationFail, headers);\n}\n\n// Test multiple tokens; the one from header is bad, verification should fail.\nTEST_F(AuthenticatorTest, TestMultipleJWTOneBadFromHeader) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1);\n\n  // headers with multiple tokens: one good, one bad\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(NonExistKidToken)},\n      {\":path\", \"/foo?access_token=\" + std::string(GoodToken)},\n  };\n\n  expectVerifyStatus(Status::JwtVerificationFail, headers);\n}\n\n// Test multiple tokens; all are good, verification is ok.\nTEST_F(AuthenticatorTest, TestMultipleJWTAllGood) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1);\n\n  // headers with multiple tokens: all are good\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n      {\":path\", \"/foo?access_token=\" + std::string(GoodToken)},\n  };\n\n  expectVerifyStatus(Status::Ok, headers);\n}\n\n// Test multiple tokens; one of them is bad and allow_failed, verification is ok.\nTEST_F(AuthenticatorTest, TestMultipleJWTOneBadAllowFails) {\n  createAuthenticator(nullptr, absl::make_optional<std::string>(ProviderName),\n                      /*allow_failed=*/true, /*all_missing=*/false);\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1);\n\n  // headers with multiple tokens: one good, one bad\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n      {\":path\", \"/foo?access_token=\" + std::string(NonExistKidToken)},\n  };\n\n  expectVerifyStatus(Status::Ok, headers);\n}\n\n// Test empty header and allow_missing, verification is ok.\nTEST_F(AuthenticatorTest, TestAllowMissingWithEmptyHeader) {\n  createAuthenticator(nullptr, absl::make_optional<std::string>(ProviderName),\n                      /*allow_failed=*/false, /*all_missing=*/true);\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  // Empty headers\n  Http::TestRequestHeaderMapImpl headers{};\n\n  expectVerifyStatus(Status::Ok, headers);\n}\n\n// This test verifies if Jwt is invalid, JwtBadFormat status is returned.\nTEST_F(AuthenticatorTest, TestInvalidJWT) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  std::string token = \"invalidToken\";\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + token}};\n  expectVerifyStatus(Status::JwtBadFormat, headers);\n}\n\n// This test verifies if Authorization header has invalid prefix, JwtMissed status is returned\nTEST_F(AuthenticatorTest, TestInvalidPrefix) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer-invalid\"}};\n  expectVerifyStatus(Status::JwtMissed, headers);\n}\n\n// This test verifies when a JWT is non-expiring without audience specified, JwtAudienceNotAllowed\n// is returned.\nTEST_F(AuthenticatorTest, TestNonExpiringJWT) {\n  EXPECT_CALL(mock_factory_ctx_.cluster_manager_, httpAsyncClientForCluster(_)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(NonExpiringToken)}};\n  expectVerifyStatus(Status::JwtAudienceNotAllowed, headers);\n}\n\n// This test verifies when a JWT is expired, JwtExpired status is returned.\nTEST_F(AuthenticatorTest, TestExpiredJWT) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(ExpiredToken)}};\n  expectVerifyStatus(Status::JwtExpired, headers);\n}\n\n// This test verifies when a JWT is not yet valid, JwtNotYetValid status is returned.\nTEST_F(AuthenticatorTest, TestNotYetValidJWT) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(NotYetValidToken)}};\n  expectVerifyStatus(Status::JwtNotYetValid, headers);\n}\n\n// This test verifies when an inline JWKS is misconfigured, JwksNoValidKeys is returns\nTEST_F(AuthenticatorTest, TestInvalidLocalJwks) {\n  auto& provider = (*proto_config_.mutable_providers())[std::string(ProviderName)];\n  provider.clear_remote_jwks();\n  provider.mutable_local_jwks()->set_inline_string(\"invalid\");\n  createAuthenticator();\n\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  expectVerifyStatus(Status::JwksNoValidKeys, headers);\n}\n\n// This test verifies when a JWT is with invalid audience, JwtAudienceNotAllowed is returned.\nTEST_F(AuthenticatorTest, TestNonMatchAudJWT) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(InvalidAudToken)}};\n  expectVerifyStatus(Status::JwtAudienceNotAllowed, headers);\n}\n\n// This test verifies when Jwt issuer is not configured, JwtUnknownIssuer is returned.\nTEST_F(AuthenticatorTest, TestIssuerNotFound) {\n  // Create a config with an other issuer.\n  (*proto_config_.mutable_providers())[std::string(ProviderName)].set_issuer(\"other_issuer\");\n  createAuthenticator();\n\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  expectVerifyStatus(Status::JwtUnknownIssuer, headers);\n}\n\n// This test verifies that when Jwks fetching fails, JwksFetchFail status is returned.\nTEST_F(AuthenticatorTest, TestPubkeyFetchFail) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                          JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksError(JwksFetcher::JwksReceiver::Failure::InvalidJwks);\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  expectVerifyStatus(Status::JwksFetchFail, headers);\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"401\"}}}));\n}\n\n// This test verifies when a Jwks fetching is not completed yet, but onDestroy() is called,\n// onComplete() callback should not be called, but internal request->cancel() should be called.\n// Most importantly, no crash.\nTEST_F(AuthenticatorTest, TestOnDestroy) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1);\n\n  // Cancel is called once.\n  EXPECT_CALL(*raw_fetcher_, cancel()).Times(1);\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  initTokenExtractor();\n  auto tokens = extractor_->extract(headers);\n  // callback should not be called.\n  std::function<void(const Status&)> on_complete_cb = [](const Status&) { FAIL(); };\n  auth_->verify(headers, parent_span_, std::move(tokens), nullptr, std::move(on_complete_cb));\n\n  // Destroy the authenticating process.\n  auth_->onDestroy();\n}\n\n// This test verifies if \"forward_payload_header\" is empty, payload is not forwarded.\nTEST_F(AuthenticatorTest, TestNoForwardPayloadHeader) {\n  // In this config, there is no forward_payload_header\n  auto& provider0 = (*proto_config_.mutable_providers())[std::string(ProviderName)];\n  provider0.clear_forward_payload_header();\n  createAuthenticator();\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  expectVerifyStatus(Status::Ok, headers);\n\n  // Test when forward_payload_header is not set, the output should NOT\n  // contain the sec-istio-auth-userinfo header.\n  EXPECT_FALSE(headers.has(\"sec-istio-auth-userinfo\"));\n}\n\n// This test verifies that allow failed authenticator will verify all tokens.\nTEST_F(AuthenticatorTest, TestAllowFailedMultipleTokens) {\n  auto& provider = (*proto_config_.mutable_providers())[std::string(ProviderName)];\n  std::vector<std::string> names = {\"a\", \"b\", \"c\"};\n  for (const auto& it : names) {\n    auto header = provider.add_from_headers();\n    header->set_name(it);\n    header->set_value_prefix(\"Bearer \");\n  }\n\n  createAuthenticator(nullptr, absl::nullopt, /*allow_failed=*/true);\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  Http::TestRequestHeaderMapImpl headers1{\n      {\"a\", \"Bearer \" + std::string(ExpiredToken)},\n      {\"b\", \"Bearer \" + std::string(GoodToken)},\n      {\"c\", \"Bearer \" + std::string(InvalidAudToken)},\n      {\":path\", \"/\"},\n  };\n  expectVerifyStatus(Status::Ok, headers1);\n\n  EXPECT_TRUE(headers1.has(\"a\"));\n  EXPECT_FALSE(headers1.has(\"b\"));\n  EXPECT_TRUE(headers1.has(\"c\"));\n\n  Http::TestRequestHeaderMapImpl headers2{\n      {\"a\", \"Bearer \" + std::string(GoodToken)},\n      {\"b\", \"Bearer \" + std::string(GoodToken)},\n      {\"c\", \"Bearer \" + std::string(GoodToken)},\n      {\":path\", \"/\"},\n  };\n  expectVerifyStatus(Status::Ok, headers2);\n\n  EXPECT_FALSE(headers2.has(\"a\"));\n  EXPECT_FALSE(headers2.has(\"b\"));\n  EXPECT_FALSE(headers2.has(\"c\"));\n}\n\n// This test verifies that allow failed authenticator will verify all tokens.\nTEST_F(AuthenticatorTest, TestAllowFailedMultipleIssuers) {\n  auto& provider = (*proto_config_.mutable_providers())[\"other_provider\"];\n  provider.set_issuer(\"https://other.com\");\n  provider.add_audiences(\"other_service\");\n  auto& uri = *provider.mutable_remote_jwks()->mutable_http_uri();\n  uri.set_uri(\"https://pubkey_server/pubkey_path\");\n  uri.set_cluster(\"pubkey_cluster\");\n  auto header = provider.add_from_headers();\n  header->set_name(\"expired-auth\");\n  header->set_value_prefix(\"Bearer \");\n  header = provider.add_from_headers();\n  header->set_name(\"other-auth\");\n  header->set_value_prefix(\"Bearer \");\n\n  createAuthenticator(nullptr, absl::nullopt, /*allow_failed=*/true);\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .Times(2)\n      .WillRepeatedly(Invoke([](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                                JwksFetcher::JwksReceiver& receiver) {\n        ::google::jwt_verify::JwksPtr jwks = Jwks::createFrom(PublicKey, Jwks::JWKS);\n        EXPECT_TRUE(jwks->getStatus() == Status::Ok);\n        receiver.onJwksSuccess(std::move(jwks));\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n      {\"expired-auth\", \"Bearer \" + std::string(ExpiredToken)},\n      {\"other-auth\", \"Bearer \" + std::string(OtherGoodToken)},\n      {\":path\", \"/\"},\n  };\n  expectVerifyStatus(Status::Ok, headers);\n\n  EXPECT_FALSE(headers.has(\"Authorization\"));\n  EXPECT_TRUE(headers.has(\"expired-auth\"));\n  EXPECT_FALSE(headers.has(\"other-auth\"));\n}\n\n// Test checks that supplying a CheckAudience to auth will override the one in JwksCache.\nTEST_F(AuthenticatorTest, TestCustomCheckAudience) {\n  auto check_audience = std::make_unique<::google::jwt_verify::CheckAudience>(\n      std::vector<std::string>{\"invalid_service\"});\n  createAuthenticator(check_audience.get());\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                              JwksFetcher::JwksReceiver& receiver) {\n        receiver.onJwksSuccess(std::move(jwks_));\n      }));\n\n  Http::TestRequestHeaderMapImpl headers1{\n      {\"Authorization\", \"Bearer \" + std::string(InvalidAudToken)}};\n  expectVerifyStatus(Status::Ok, headers1);\n\n  Http::TestRequestHeaderMapImpl headers2{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  expectVerifyStatus(Status::JwtAudienceNotAllowed, headers2);\n}\n\n// This test verifies that when invalid JWKS is fetched, an JWKS error status is returned.\nTEST_F(AuthenticatorTest, TestInvalidPubkeyKey) {\n  EXPECT_CALL(*raw_fetcher_, fetch(_, _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::HttpUri&, Tracing::Span&,\n                          JwksFetcher::JwksReceiver& receiver) {\n        auto jwks = Jwks::createFrom(PublicKey, Jwks::PEM);\n        receiver.onJwksSuccess(std::move(jwks));\n      }));\n\n  Http::TestRequestHeaderMapImpl headers{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  expectVerifyStatus(Status::JwksPemBadBase64, headers);\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/extractor_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/jwt_authn/extractor.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/test_common/utility.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtProvider;\nusing Envoy::Http::TestRequestHeaderMapImpl;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nconst char ExampleConfig[] = R\"(\nproviders:\n  provider1:\n    issuer: issuer1\n  provider2:\n    issuer: issuer2\n    from_headers:\n      - name: token-header\n  provider3:\n    issuer: issuer3\n    from_params:\n      - token_param\n  provider4:\n    issuer: issuer4\n    from_headers:\n      - name: token-header\n    from_params:\n      - token_param\n  provider5:\n    issuer: issuer5\n    from_headers:\n      - name: prefix-header\n        value_prefix: AAA\n  provider6:\n    issuer: issuer6\n    from_headers:\n      - name: prefix-header\n        value_prefix: AAABBB\n  provider7:\n    issuer: issuer7\n    from_headers:\n      - name: prefix-header\n        value_prefix: CCCDDD\n  provider8:\n    issuer: issuer8\n    from_headers:\n      - name: prefix-header\n        value_prefix: '\"CCCDDD\"'\n)\";\n\nclass ExtractorTest : public testing::Test {\npublic:\n  void SetUp() override {\n    TestUtility::loadFromYaml(ExampleConfig, config_);\n    JwtProviderList providers;\n    for (const auto& it : config_.providers()) {\n      providers.emplace_back(&it.second);\n    }\n    extractor_ = Extractor::create(providers);\n  }\n\n  JwtAuthentication config_;\n  ExtractorConstPtr extractor_;\n};\n\n// Test not token in the request headers\nTEST_F(ExtractorTest, TestNoToken) {\n  auto headers = TestRequestHeaderMapImpl{};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 0);\n}\n\n// Test the token in the wrong header.\nTEST_F(ExtractorTest, TestWrongHeaderToken) {\n  auto headers = TestRequestHeaderMapImpl{{\"wrong-token-header\", \"jwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 0);\n}\n\n// Test the token in the wrong query parameter.\nTEST_F(ExtractorTest, TestWrongParamToken) {\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/path?wrong_token=jwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 0);\n}\n\n// Test extracting token from the default header location: \"Authorization\"\nTEST_F(ExtractorTest, TestDefaultHeaderLocation) {\n  auto headers = TestRequestHeaderMapImpl{{\"Authorization\", \"Bearer jwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n\n  // Only the issue1 is using default header location.\n  EXPECT_EQ(tokens[0]->token(), \"jwt_token\");\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer1\"));\n\n  // Other issuers are using custom locations\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer2\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer3\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer4\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer5\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"unknown_issuer\"));\n\n  // Test token remove\n  tokens[0]->removeJwt(headers);\n  EXPECT_FALSE(headers.has(Http::CustomHeaders::get().Authorization));\n}\n\n// Test extracting JWT as Bearer token from the default header location: \"Authorization\" -\n// using an actual (correctly-formatted) JWT:\nTEST_F(ExtractorTest, TestDefaultHeaderLocationWithValidJWT) {\n  auto headers =\n      TestRequestHeaderMapImpl{{absl::StrCat(\"Authorization\"), absl::StrCat(\"Bearer \", GoodToken)}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n\n  // Only the issue1 is using default header location.\n  EXPECT_EQ(tokens[0]->token(), GoodToken);\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer1\"));\n}\n\n// Test extracting token from the default query parameter: \"access_token\"\nTEST_F(ExtractorTest, TestDefaultParamLocation) {\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/path?access_token=jwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n\n  // Only the issue1 is using default header location.\n  EXPECT_EQ(tokens[0]->token(), \"jwt_token\");\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer1\"));\n\n  // Other issuers are using custom locations\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer2\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer3\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer4\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer5\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"unknown_issuer\"));\n\n  tokens[0]->removeJwt(headers);\n}\n\n// Test extracting token from the custom header: \"token-header\"\nTEST_F(ExtractorTest, TestCustomHeaderToken) {\n  auto headers = TestRequestHeaderMapImpl{{\"token-header\", \"jwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n\n  // Only issuer2 and issuer4 are using \"token-header\" location\n  EXPECT_EQ(tokens[0]->token(), \"jwt_token\");\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer2\"));\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer4\"));\n\n  // Other issuers are not allowed from \"token-header\"\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer1\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer3\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer5\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"unknown_issuer\"));\n\n  // Test token remove\n  tokens[0]->removeJwt(headers);\n  EXPECT_FALSE(headers.get(Http::LowerCaseString(\"token-header\")));\n}\n\n// Make sure a double custom header concatenates the token\nTEST_F(ExtractorTest, TestDoubleCustomHeaderToken) {\n  auto headers = TestRequestHeaderMapImpl{{\"token-header\", \"jwt_token\"}, {\"token-header\", \"foo\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n  EXPECT_EQ(tokens[0]->token(), \"jwt_token,foo\");\n}\n\n// Test extracting token from the custom header: \"prefix-header\"\n// value prefix doesn't match. It has to be either \"AAA\" or \"AAABBB\".\nTEST_F(ExtractorTest, TestPrefixHeaderNotMatch) {\n  auto headers = TestRequestHeaderMapImpl{{\"prefix-header\", \"jwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 0);\n}\n\n// Test extracting token from the custom header: \"prefix-header\"\n// The value matches both prefix values: \"AAA\" or \"AAABBB\".\nTEST_F(ExtractorTest, TestPrefixHeaderMatch) {\n  auto headers = TestRequestHeaderMapImpl{{\"prefix-header\", \"AAABBBjwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 2);\n\n  // Match issuer 5 with map key as: prefix-header + AAA\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer5\"));\n  EXPECT_EQ(tokens[0]->token(), \"BBBjwt_token\");\n\n  // Match issuer 6 with map key as: prefix-header + AAABBB which is after AAA\n  EXPECT_TRUE(tokens[1]->isIssuerSpecified(\"issuer6\"));\n  EXPECT_EQ(tokens[1]->token(), \"jwt_token\");\n\n  // Test token remove\n  tokens[0]->removeJwt(headers);\n  EXPECT_FALSE(headers.get(Http::LowerCaseString(\"prefix-header\")));\n}\n\n// Test extracting token from the custom header: \"prefix-header\"\n// The value is found after the \"CCCDDD\", then between the '=' and the ','.\nTEST_F(ExtractorTest, TestPrefixHeaderFlexibleMatch1) {\n  auto headers =\n      TestRequestHeaderMapImpl{{\"prefix-header\", \"preamble CCCDDD=jwt_token,extra=more\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n\n  // Match issuer 7 with map key as: prefix-header + 'CCCDDD'\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer7\"));\n  EXPECT_EQ(tokens[0]->token(), \"jwt_token\");\n}\n\nTEST_F(ExtractorTest, TestPrefixHeaderFlexibleMatch2) {\n  auto headers =\n      TestRequestHeaderMapImpl{{\"prefix-header\", \"CCCDDD=\\\"and0X3Rva2Vu\\\",comment=\\\"fish tag\\\"\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n\n  // Match issuer 7 with map key as: prefix-header + AAA\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer7\"));\n  EXPECT_EQ(tokens[0]->token(), \"and0X3Rva2Vu\");\n}\n\nTEST_F(ExtractorTest, TestPrefixHeaderFlexibleMatch3) {\n  auto headers = TestRequestHeaderMapImpl{\n      {\"prefix-header\", \"creds={\\\"authLevel\\\": \\\"20\\\", \\\"CCCDDD\\\": \\\"and0X3Rva2Vu\\\"}\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 2);\n\n  // Match issuer 8 with map key as: prefix-header + '\"CCCDDD\"'\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer8\"));\n  EXPECT_EQ(tokens[0]->token(), \"and0X3Rva2Vu\");\n\n  // Match issuer 7 with map key as: prefix-header + 'CCCDDD'\n  EXPECT_TRUE(tokens[1]->isIssuerSpecified(\"issuer7\"));\n  EXPECT_EQ(tokens[1]->token(), \"and0X3Rva2Vu\");\n}\n\n// Test extracting token from the custom query parameter: \"token_param\"\nTEST_F(ExtractorTest, TestCustomParamToken) {\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/path?token_param=jwt_token\"}};\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 1);\n\n  // Both issuer3 and issuer4 have specified this custom query location.\n  EXPECT_EQ(tokens[0]->token(), \"jwt_token\");\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer3\"));\n  EXPECT_TRUE(tokens[0]->isIssuerSpecified(\"issuer4\"));\n\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer1\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer2\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"issuer5\"));\n  EXPECT_FALSE(tokens[0]->isIssuerSpecified(\"unknown_issuer\"));\n\n  tokens[0]->removeJwt(headers);\n}\n\n// Test extracting multiple tokens.\nTEST_F(ExtractorTest, TestMultipleTokens) {\n  auto headers = TestRequestHeaderMapImpl{\n      {\":path\", \"/path?token_param=token3&access_token=token4\"},\n      {\"token-header\", \"token2\"},\n      {\"authorization\", \"Bearer token1\"},\n      {\"prefix-header\", \"AAAtoken5\"},\n  };\n  auto tokens = extractor_->extract(headers);\n  EXPECT_EQ(tokens.size(), 5);\n\n  EXPECT_EQ(tokens[0]->token(), \"token1\"); // from authorization\n  EXPECT_EQ(tokens[1]->token(), \"token5\"); // from prefix-header\n  EXPECT_EQ(tokens[2]->token(), \"token2\"); // from token-header\n  EXPECT_EQ(tokens[3]->token(), \"token4\"); // from access_token param\n  EXPECT_EQ(tokens[4]->token(), \"token3\"); // from token_param param\n}\n\n// Test selected extraction of multiple tokens.\nTEST_F(ExtractorTest, TestExtractParam) {\n  auto headers = TestRequestHeaderMapImpl{\n      {\":path\", \"/path?token_param=token3&access_token=token4\"},\n      {\"token-header\", \"token2\"},\n      {\"authorization\", \"Bearer token1\"},\n      {\"prefix-header\", \"AAAtoken5\"},\n  };\n  JwtProvider provider;\n  provider.set_issuer(\"foo\");\n  auto extractor = Extractor::create(provider);\n  auto tokens = extractor->extract(headers);\n  EXPECT_EQ(tokens.size(), 2);\n  EXPECT_EQ(tokens[0]->token(), \"token1\");\n  EXPECT_EQ(tokens[1]->token(), \"token4\");\n  auto header = provider.add_from_headers();\n  header->set_name(\"prefix-header\");\n  header->set_value_prefix(\"AAA\");\n  provider.add_from_params(\"token_param\");\n  extractor = Extractor::create(provider);\n  tokens = extractor->extract(headers);\n  EXPECT_EQ(tokens.size(), 2);\n  EXPECT_EQ(tokens[0]->token(), \"token5\");\n  EXPECT_EQ(tokens[1]->token(), \"token3\");\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/filter_config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/router/string_accessor_impl.h\"\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"extensions/filters/http/jwt_authn/filter_config.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nTEST(HttpJwtAuthnFilterConfigTest, FindByMatch) {\n  const char config[] = R\"(\nproviders:\n  provider1:\n    issuer: issuer1\n    local_jwks:\n      inline_string: jwks\nrules:\n- match:\n    path: /path1\n  requires:\n    provider_name: provider1\n)\";\n\n  JwtAuthentication proto_config;\n  TestUtility::loadFromYaml(config, proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  auto filter_conf = FilterConfigImpl::create(proto_config, \"\", context);\n\n  StreamInfo::FilterStateImpl filter_state(StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_conf->findVerifier(\n                  Http::TestRequestHeaderMapImpl{\n                      {\":method\", \"GET\"},\n                      {\":path\", \"/path1\"},\n                  },\n                  filter_state) != nullptr);\n\n  EXPECT_TRUE(filter_conf->findVerifier(\n                  Http::TestRequestHeaderMapImpl{\n                      {\":method\", \"GET\"},\n                      {\":path\", \"/path2\"},\n                  },\n                  filter_state) == nullptr);\n}\n\nTEST(HttpJwtAuthnFilterConfigTest, VerifyTLSLifetime) {\n  const char config[] = R\"(\nproviders:\n  provider1:\n    issuer: issuer1\n    local_jwks:\n      inline_string: jwks\nrules:\n- match:\n    path: /path1\n  requires:\n    provider_name: provider1\n)\";\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_context;\n  // Make sure that the thread callbacks are not invoked inline.\n  server_context.thread_local_.defer_data = true;\n  {\n    // Scope in all the things that the filter depends on, so they are destroyed as we leave the\n    // scope.\n    NiceMock<Server::Configuration::MockFactoryContext> context;\n    // The threadLocal, dispatcher and api that are used by the filter config, actually belong to\n    // the server factory context that who's lifetime is longer. We simulate that by returning\n    // their instances from outside the scope.\n    ON_CALL(context, dispatcher()).WillByDefault(ReturnRef(server_context.dispatcher()));\n    ON_CALL(context, api()).WillByDefault(ReturnRef(server_context.api()));\n    ON_CALL(context, threadLocal()).WillByDefault(ReturnRef(server_context.threadLocal()));\n\n    JwtAuthentication proto_config;\n    TestUtility::loadFromYaml(config, proto_config);\n    auto filter_conf = FilterConfigImpl::create(proto_config, \"\", context);\n  }\n\n  // Even though filter_conf is now de-allocated, using a reference to it might still work, as its\n  // memory was not cleared. This leads to a false positive in this test when run normally. The\n  // test should fail under asan if the code uses invalid reference.\n\n  // Make sure the filter scheduled a callback\n  EXPECT_EQ(1, server_context.thread_local_.deferred_data_.size());\n\n  // Simulate a situation where the callback is called after the filter config is destroyed.\n  // call the tls callback. we want to make sure that it doesn't depend on objects\n  // that are out of scope.\n  EXPECT_NO_THROW(server_context.thread_local_.call());\n}\n\nTEST(HttpJwtAuthnFilterConfigTest, FindByFilterState) {\n  const char config[] = R\"(\nproviders:\n  provider1:\n    issuer: issuer1\n    local_jwks:\n      inline_string: jwks\n  provider2:\n    issuer: issuer2\n    local_jwks:\n      inline_string: jwks\nfilter_state_rules:\n  name: jwt_selector\n  requires:\n    selector1:\n      provider_name: provider1\n    selector2:\n      provider_name: provider2\n)\";\n\n  JwtAuthentication proto_config;\n  TestUtility::loadFromYaml(config, proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  auto filter_conf = FilterConfigImpl::create(proto_config, \"\", context);\n\n  // Empty filter_state\n  StreamInfo::FilterStateImpl filter_state1(StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_conf->findVerifier(Http::TestRequestHeaderMapImpl(), filter_state1) ==\n              nullptr);\n\n  // Wrong selector\n  StreamInfo::FilterStateImpl filter_state2(StreamInfo::FilterState::LifeSpan::FilterChain);\n  filter_state2.setData(\n      \"jwt_selector\", std::make_unique<Router::StringAccessorImpl>(\"wrong_selector\"),\n      StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_conf->findVerifier(Http::TestRequestHeaderMapImpl(), filter_state2) ==\n              nullptr);\n\n  // correct selector\n  StreamInfo::FilterStateImpl filter_state3(StreamInfo::FilterState::LifeSpan::FilterChain);\n  filter_state3.setData(\"jwt_selector\", std::make_unique<Router::StringAccessorImpl>(\"selector1\"),\n                        StreamInfo::FilterState::StateType::ReadOnly,\n                        StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_conf->findVerifier(Http::TestRequestHeaderMapImpl(), filter_state3) !=\n              nullptr);\n\n  // correct selector\n  StreamInfo::FilterStateImpl filter_state4(StreamInfo::FilterState::LifeSpan::FilterChain);\n  filter_state4.setData(\"jwt_selector\", std::make_unique<Router::StringAccessorImpl>(\"selector2\"),\n                        StreamInfo::FilterState::StateType::ReadOnly,\n                        StreamInfo::FilterState::LifeSpan::FilterChain);\n  EXPECT_TRUE(filter_conf->findVerifier(Http::TestRequestHeaderMapImpl(), filter_state4) !=\n              nullptr);\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/filter_factory_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.validate.h\"\n\n#include \"extensions/filters/http/jwt_authn/filter_factory.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nTEST(HttpJwtAuthnFilterFactoryTest, GoodRemoteJwks) {\n  FilterFactory factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto();\n  TestUtility::loadFromYaml(ExampleConfig, *proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\nTEST(HttpJwtAuthnFilterFactoryTest, GoodLocalJwks) {\n  JwtAuthentication proto_config;\n  auto& provider = (*proto_config.mutable_providers())[\"provider\"];\n  provider.set_issuer(\"issuer\");\n  provider.mutable_local_jwks()->set_inline_string(PublicKey);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  FilterFactory factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\nTEST(HttpJwtAuthnFilterFactoryTest, BadLocalJwks) {\n  JwtAuthentication proto_config;\n  auto& provider = (*proto_config.mutable_providers())[\"provider\"];\n  provider.set_issuer(\"issuer\");\n  provider.mutable_local_jwks()->set_inline_string(\"A bad jwks\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  FilterFactory factory;\n  EXPECT_THROW(factory.createFilterFactoryFromProto(proto_config, \"stats\", context),\n               EnvoyException);\n}\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/filter_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/router/string_accessor_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/test_common/registry.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nconst char HeaderToFilterStateFilterName[] = \"envoy.filters.http.header_to_filter_state_for_test\";\n\n// This filter extracts a string header from \"header\" and\n// save it into FilterState as name \"state\" as read-only Router::StringAccessor.\nclass HeaderToFilterStateFilter : public Http::PassThroughDecoderFilter {\npublic:\n  HeaderToFilterStateFilter(const std::string& header, const std::string& state)\n      : header_(header), state_(state) {}\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override {\n    const Http::HeaderEntry* entry = headers.get(header_);\n    if (entry) {\n      decoder_callbacks_->streamInfo().filterState()->setData(\n          state_, std::make_unique<Router::StringAccessorImpl>(entry->value().getStringView()),\n          StreamInfo::FilterState::StateType::ReadOnly,\n          StreamInfo::FilterState::LifeSpan::FilterChain);\n    }\n    return Http::FilterHeadersStatus::Continue;\n  }\n\nprivate:\n  Http::LowerCaseString header_;\n  std::string state_;\n};\n\nclass HeaderToFilterStateFilterConfig : public Common::EmptyHttpFilterConfig {\npublic:\n  HeaderToFilterStateFilterConfig()\n      : Common::EmptyHttpFilterConfig(HeaderToFilterStateFilterName) {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamDecoderFilter(\n          std::make_shared<HeaderToFilterStateFilter>(\"jwt_selector\", \"jwt_selector\"));\n    };\n  }\n};\n\nstd::string getAuthFilterConfig(const std::string& config_str, bool use_local_jwks) {\n  JwtAuthentication proto_config;\n  TestUtility::loadFromYaml(config_str, proto_config);\n\n  if (use_local_jwks) {\n    auto& provider0 = (*proto_config.mutable_providers())[std::string(ProviderName)];\n    provider0.clear_remote_jwks();\n    auto local_jwks = provider0.mutable_local_jwks();\n    local_jwks->set_inline_string(PublicKey);\n  }\n\n  HttpFilter filter;\n  filter.set_name(HttpFilterNames::get().JwtAuthn);\n  filter.mutable_typed_config()->PackFrom(proto_config);\n  return MessageUtil::getJsonStringFromMessage(filter);\n}\n\nstd::string getFilterConfig(bool use_local_jwks) {\n  return getAuthFilterConfig(ExampleConfig, use_local_jwks);\n}\n\nclass LocalJwksIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  LocalJwksIntegrationTest() : registration_(factory_) {}\n\n  HeaderToFilterStateFilterConfig factory_;\n  Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory> registration_;\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, LocalJwksIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n// With local Jwks, this test verifies a request is passed with a good Jwt token.\nTEST_P(LocalJwksIntegrationTest, WithGoodToken) {\n  config_helper_.addFilter(getFilterConfig(true));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n  });\n\n  waitForNextUpstreamRequest();\n  const auto* payload_entry =\n      upstream_request_->headers().get(Http::LowerCaseString(\"sec-istio-auth-userinfo\"));\n  EXPECT_TRUE(payload_entry != nullptr);\n  EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue);\n  // Verify the token is removed.\n  EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization));\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// With local Jwks, this test verifies a request is rejected with an expired Jwt token.\nTEST_P(LocalJwksIntegrationTest, ExpiredToken) {\n  config_helper_.addFilter(getFilterConfig(true));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"Authorization\", \"Bearer \" + std::string(ExpiredToken)},\n  });\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"401\", response->headers().getStatusValue());\n}\n\nTEST_P(LocalJwksIntegrationTest, MissingToken) {\n  config_helper_.addFilter(getFilterConfig(true));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n  });\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"401\", response->headers().getStatusValue());\n}\n\nTEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) {\n  config_helper_.addFilter(getFilterConfig(true));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"HEAD\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"Authorization\", \"Bearer \" + std::string(ExpiredToken)},\n  });\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"401\", response->headers().getStatusValue());\n  EXPECT_NE(\"0\", response->headers().getContentLengthValue());\n  EXPECT_THAT(response->body(), ::testing::IsEmpty());\n}\n\n// This test verifies a request is passed with a path that don't match any requirements.\nTEST_P(LocalJwksIntegrationTest, NoRequiresPath) {\n  config_helper_.addFilter(getFilterConfig(true));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/foo\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n  });\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// This test verifies a CORS preflight request without JWT token is allowed.\nTEST_P(LocalJwksIntegrationTest, CorsPreflight) {\n  config_helper_.addFilter(getFilterConfig(true));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"OPTIONS\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"access-control-request-method\", \"GET\"},\n      {\"origin\", \"test-origin\"},\n  });\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// This test verifies JwtRequirement specified from filer state rules\nTEST_P(LocalJwksIntegrationTest, FilterStateRequirement) {\n  // A config with metadata rules.\n  const std::string auth_filter_conf = R\"(\n  providers:\n    example_provider:\n      issuer: https://example.com\n      audiences:\n      - example_service\n  filter_state_rules:\n    name: jwt_selector\n    requires:\n      example_provider:\n        provider_name: example_provider\n)\";\n\n  config_helper_.addFilter(getAuthFilterConfig(auth_filter_conf, true));\n  config_helper_.addFilter(absl::StrCat(\"name: \", HeaderToFilterStateFilterName));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  struct TestCase {\n    std::vector<std::pair<std::string, std::string>> extra_headers;\n    std::string expected_status;\n  };\n\n  const TestCase test_cases[] = {\n      // Case1: not set metadata, so Jwt is not required, expect 200\n      {\n          // Empty extra headers\n          {},\n          \"200\",\n      },\n\n      // Case2: requirement is set in the metadata, but missing token, expect 401\n      {\n          // selector header, but not token header\n          {\n              {\"jwt_selector\", \"example_provider\"},\n          },\n          \"401\",\n      },\n\n      // Case 3: requirement is set in the metadata, token is good, expect 200\n      {\n          // selector header, and token header\n          {\n              {\"jwt_selector\", \"example_provider\"},\n              {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n          },\n          \"200\",\n      },\n  };\n\n  for (const auto& test : test_cases) {\n    Http::TestRequestHeaderMapImpl headers{\n        {\":method\", \"GET\"},\n        {\":path\", \"/foo\"},\n        {\":scheme\", \"http\"},\n        {\":authority\", \"host\"},\n    };\n    for (const auto& h : test.extra_headers) {\n      headers.addCopy(h.first, h.second);\n    }\n    auto response = codec_client_->makeHeaderOnlyRequest(headers);\n\n    if (test.expected_status == \"200\") {\n      waitForNextUpstreamRequest();\n      upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n    }\n\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(test.expected_status, response->headers().getStatusValue());\n  }\n}\n\n// The test case with a fake upstream for remote Jwks server.\nclass RemoteJwksIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  void createUpstreams() override {\n    HttpProtocolIntegrationTest::createUpstreams();\n    // for Jwks upstream.\n    addFakeUpstream(GetParam().upstream_protocol);\n  }\n\n  void initializeFilter(bool add_cluster) {\n    config_helper_.addFilter(getFilterConfig(false));\n\n    if (add_cluster) {\n      config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n        auto* jwks_cluster = bootstrap.mutable_static_resources()->add_clusters();\n        jwks_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n        jwks_cluster->set_name(\"pubkey_cluster\");\n      });\n    } else {\n      config_helper_.skipPortUsageValidation();\n    }\n\n    initialize();\n  }\n\n  void waitForJwksResponse(const std::string& status, const std::string& jwks_body) {\n    AssertionResult result =\n        fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_jwks_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_jwks_connection_->waitForNewStream(*dispatcher_, jwks_request_);\n    RELEASE_ASSERT(result, result.message());\n    result = jwks_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", status}};\n    jwks_request_->encodeHeaders(response_headers, false);\n    Buffer::OwnedImpl response_data1(jwks_body);\n    jwks_request_->encodeData(response_data1, true);\n  }\n\n  void cleanup() {\n    codec_client_->close();\n    if (fake_jwks_connection_ != nullptr) {\n      AssertionResult result = fake_jwks_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_jwks_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n    if (fake_upstream_connection_ != nullptr) {\n      AssertionResult result = fake_upstream_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_upstream_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n  }\n\n  FakeHttpConnectionPtr fake_jwks_connection_{};\n  FakeStreamPtr jwks_request_{};\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, RemoteJwksIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n// With remote Jwks, this test verifies a request is passed with a good Jwt token\n// and a good public key fetched from a remote server.\nTEST_P(RemoteJwksIntegrationTest, WithGoodToken) {\n  initializeFilter(/*add_cluster=*/true);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n  });\n\n  waitForJwksResponse(\"200\", PublicKey);\n\n  waitForNextUpstreamRequest();\n\n  const auto* payload_entry =\n      upstream_request_->headers().get(Http::LowerCaseString(\"sec-istio-auth-userinfo\"));\n  EXPECT_TRUE(payload_entry != nullptr);\n  EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue);\n  // Verify the token is removed.\n  EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization));\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  cleanup();\n}\n\n// With remote Jwks, this test verifies a request is rejected even with a good Jwt token\n// when the remote jwks server replied with 500.\nTEST_P(RemoteJwksIntegrationTest, FetchFailedJwks) {\n  initializeFilter(/*add_cluster=*/true);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n  });\n\n  // Fails the jwks fetching.\n  waitForJwksResponse(\"500\", \"\");\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"401\", response->headers().getStatusValue());\n\n  cleanup();\n}\n\nTEST_P(RemoteJwksIntegrationTest, FetchFailedMissingCluster) {\n  initializeFilter(/*add_cluster=*/false);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n  });\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"401\", response->headers().getStatusValue());\n\n  cleanup();\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/filter_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"extensions/filters/http/jwt_authn/filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/mock.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::google::jwt_verify::Status;\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nclass MockMatcher : public Matcher {\npublic:\n  MOCK_METHOD(bool, matches, (const Http::RequestHeaderMap& headers), (const));\n};\n\nJwtAuthnFilterStats generateMockStats(Stats::Scope& scope) {\n  return {ALL_JWT_AUTHN_FILTER_STATS(POOL_COUNTER_PREFIX(scope, \"\"))};\n}\n\nclass MockFilterConfig : public FilterConfig {\npublic:\n  MockFilterConfig() : stats_(generateMockStats(stats_store_)) {\n    ON_CALL(*this, bypassCorsPreflightRequest()).WillByDefault(Return(true));\n    ON_CALL(*this, findVerifier(_, _)).WillByDefault(Return(nullptr));\n    ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_));\n  }\n\n  MOCK_METHOD(const Verifier*, findVerifier,\n              (const Http::RequestHeaderMap& headers, const StreamInfo::FilterState& filter_state),\n              (const));\n  MOCK_METHOD(bool, bypassCorsPreflightRequest, (), (const));\n  MOCK_METHOD(JwtAuthnFilterStats&, stats, ());\n\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  JwtAuthnFilterStats stats_;\n};\n\nclass FilterTest : public testing::Test {\npublic:\n  void SetUp() override {\n    mock_config_ = ::std::make_shared<NiceMock<MockFilterConfig>>();\n\n    mock_verifier_ = std::make_unique<MockVerifier>();\n    filter_ = std::make_unique<Filter>(mock_config_);\n    filter_->setDecoderFilterCallbacks(filter_callbacks_);\n  }\n\n  void setupMockConfig() {\n    EXPECT_CALL(*mock_config_.get(), findVerifier(_, _)).WillOnce(Return(mock_verifier_.get()));\n  }\n\n  std::shared_ptr<NiceMock<MockFilterConfig>> mock_config_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks_;\n  std::unique_ptr<Filter> filter_;\n  std::unique_ptr<MockVerifier> mock_verifier_;\n  NiceMock<MockVerifierCallbacks> verifier_callback_;\n  Http::TestRequestTrailerMapImpl trailers_;\n};\n\n// This test verifies Verifier::Callback is called inline with OK status.\n// All functions should return Continue.\nTEST_F(FilterTest, InlineOK) {\n  setupMockConfig();\n  // A successful authentication completed inline: callback is called inside verify().\n  EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([](ContextSharedPtr context) {\n    context->callback()->onComplete(Status::Ok);\n  }));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(1U, mock_config_->stats().allowed_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n}\n\n// This test verifies Verifier::Callback is not called for CORS preflight request.\nTEST_F(FilterTest, CorsPreflight) {\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\":method\", \"OPTIONS\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"access-control-request-method\", \"GET\"},\n      {\"origin\", \"test-origin\"},\n  };\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(1U, mock_config_->stats().allowed_.value());\n  EXPECT_EQ(1U, mock_config_->stats().cors_preflight_bypassed_.value());\n  EXPECT_EQ(0U, mock_config_->stats().denied_.value());\n}\n\nTEST_F(FilterTest, CorsPreflightMssingOrigin) {\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\":method\", \"OPTIONS\"},\n      {\":path\", \"/\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n      {\"access-control-request-method\", \"GET\"},\n  };\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, mock_config_->stats().allowed_.value());\n  // Should not be bypassed by cors_preflight since missing origin.\n  EXPECT_EQ(0U, mock_config_->stats().cors_preflight_bypassed_.value());\n  EXPECT_EQ(0U, mock_config_->stats().denied_.value());\n}\n\nTEST_F(FilterTest, CorsPreflightMssingAccessControlRequestMethod) {\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\":method\", \"OPTIONS\"},    {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"},\n      {\"origin\", \"test-origin\"},\n  };\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, mock_config_->stats().allowed_.value());\n  // Should not be bypassed by cors_preflight since missing access-control-request-method.\n  EXPECT_EQ(0U, mock_config_->stats().cors_preflight_bypassed_.value());\n  EXPECT_EQ(0U, mock_config_->stats().denied_.value());\n}\n\n// This test verifies the setPayload call is handled correctly\nTEST_F(FilterTest, TestSetPayloadCall) {\n  setupMockConfig();\n  ProtobufWkt::Struct payload;\n  // A successful authentication completed inline: callback is called inside verify().\n  EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([&payload](ContextSharedPtr context) {\n    context->callback()->setPayload(payload);\n    context->callback()->onComplete(Status::Ok);\n  }));\n\n  EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _))\n      .WillOnce(Invoke([&payload](const std::string& ns, const ProtobufWkt::Struct& out_payload) {\n        EXPECT_EQ(ns, HttpFilterNames::get().JwtAuthn);\n        EXPECT_TRUE(TestUtility::protoEqual(out_payload, payload));\n      }));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, mock_config_->stats().allowed_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n}\n\n// This test verifies Verifier::Callback is called inline with a failure(401 Unauthorized) status.\n// All functions should return Continue except decodeHeaders(), it returns StopIteration.\nTEST_F(FilterTest, InlineUnauthorizedFailure) {\n  setupMockConfig();\n  // A failed authentication completed inline: callback is called inside verify().\n\n  EXPECT_CALL(filter_callbacks_, sendLocalReply(Http::Code::Unauthorized, _, _, _, _));\n  EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([](ContextSharedPtr context) {\n    context->callback()->onComplete(Status::JwtBadFormat);\n  }));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, mock_config_->stats().denied_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n  EXPECT_EQ(\"jwt_authn_access_denied\", filter_callbacks_.details());\n}\n\n// This test verifies Verifier::Callback is called inline with a failure(403 Forbidden) status.\n// All functions should return Continue except decodeHeaders(), it returns StopIteration.\nTEST_F(FilterTest, InlineForbiddenFailure) {\n  setupMockConfig();\n  // A failed authentication completed inline: callback is called inside verify().\n\n  EXPECT_CALL(filter_callbacks_, sendLocalReply(Http::Code::Forbidden, _, _, _, _));\n  EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([](ContextSharedPtr context) {\n    context->callback()->onComplete(Status::JwtAudienceNotAllowed);\n  }));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, mock_config_->stats().denied_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n  EXPECT_EQ(\"jwt_authn_access_denied\", filter_callbacks_.details());\n}\n\n// This test verifies Verifier::Callback is called with OK status after verify().\nTEST_F(FilterTest, OutBoundOK) {\n  setupMockConfig();\n  Verifier::Callbacks* m_cb;\n  // callback is saved, not called right\n  EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([&m_cb](ContextSharedPtr context) {\n    m_cb = context->callback();\n  }));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(trailers_));\n\n  // Callback is called now with OK status.\n  m_cb->onComplete(Status::Ok);\n\n  EXPECT_EQ(1U, mock_config_->stats().allowed_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n}\n\n// This test verifies Verifier::Callback is called with a failure(401 Unauthorized) after verify()\n// returns any NonOK status except JwtAudienceNotAllowed.\nTEST_F(FilterTest, OutBoundUnauthorizedFailure) {\n  setupMockConfig();\n  Verifier::Callbacks* m_cb;\n  // callback is saved, not called right\n  EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([&m_cb](ContextSharedPtr context) {\n    m_cb = context->callback();\n  }));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(trailers_));\n\n  // Callback is called now with a failure status.\n  EXPECT_CALL(filter_callbacks_, sendLocalReply(Http::Code::Unauthorized, _, _, _, _));\n  m_cb->onComplete(Status::JwtBadFormat);\n\n  EXPECT_EQ(1U, mock_config_->stats().denied_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n\n  // Should be OK to call the onComplete() again.\n  m_cb->onComplete(Status::JwtBadFormat);\n}\n\n// This test verifies Verifier::Callback is called with a failure(403 Forbidden) after verify()\n// returns JwtAudienceNotAllowed.\nTEST_F(FilterTest, OutBoundForbiddenFailure) {\n  setupMockConfig();\n  Verifier::Callbacks* m_cb;\n  // callback is saved, not called right\n  EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([&m_cb](ContextSharedPtr context) {\n    m_cb = context->callback();\n  }));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, false));\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(trailers_));\n\n  // Callback is called now with a failure status.\n  EXPECT_CALL(filter_callbacks_, sendLocalReply(Http::Code::Forbidden, _, _, _, _));\n  m_cb->onComplete(Status::JwtAudienceNotAllowed);\n\n  EXPECT_EQ(1U, mock_config_->stats().denied_.value());\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n\n  // Should be OK to call the onComplete() again.\n  m_cb->onComplete(Status::JwtAudienceNotAllowed);\n}\n\n// Test verifies that if no route matched requirement, then request is allowed.\nTEST_F(FilterTest, TestNoRouteMatched) {\n  EXPECT_CALL(*mock_config_.get(), findVerifier(_, _)).WillOnce(Return(nullptr));\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, mock_config_->stats().allowed_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_));\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/group_verifier_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"extensions/filters/http/jwt_authn/verifier.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/mock.h\"\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing ::google::jwt_verify::Status;\nusing ::testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nconst char AllWithAny[] = R\"(\nproviders:\n  provider_1:\n    issuer: iss_1\n  provider_2:\n    issuer: iss_2\n  provider_3:\n    issuer: iss_3\nrules:\n- match: { path: \"/\" }\n  requires:\n    requires_all:\n      requirements:\n      - requires_any:\n          requirements:\n            - provider_name: \"provider_1\"\n            - provider_name: \"provider_2\"\n      - provider_name: \"provider_3\"\n)\";\n\nconst char AnyWithAll[] = R\"(\nproviders:\n  provider_1:\n    issuer: iss_1\n  provider_2:\n    issuer: iss_2\n  provider_3:\n    issuer: iss_3\n  provider_4:\n    issuer: iss_4\nrules:\n- match: { path: \"/\" }\n  requires:\n    requires_any:\n      requirements:\n      - requires_all:\n          requirements:\n            - provider_name: \"provider_1\"\n            - provider_name: \"provider_2\"\n      - requires_all:\n          requirements:\n            - provider_name: \"provider_3\"\n            - provider_name: \"provider_4\"\n)\";\n\nusing StatusMap = absl::node_hash_map<std::string, const Status>;\n\nconstexpr auto allowfailed = \"_allow_failed_\";\n\nclass GroupVerifierTest : public testing::Test {\npublic:\n  void createVerifier() {\n    ON_CALL(mock_factory_, create(_, _, _, _))\n        .WillByDefault(Invoke([&](const ::google::jwt_verify::CheckAudience*,\n                                  const absl::optional<std::string>& provider, bool, bool) {\n          return std::move(mock_auths_[provider ? provider.value() : allowfailed]);\n        }));\n    verifier_ = Verifier::create(proto_config_.rules(0).requires(), proto_config_.providers(),\n                                 mock_factory_);\n  }\n  void createSyncMockAuthsAndVerifier(const StatusMap& statuses) {\n    for (const auto& it : statuses) {\n      auto mock_auth = std::make_unique<MockAuthenticator>();\n      EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _))\n          .WillOnce(Invoke([issuer = it.first, status = it.second](\n                               Http::HeaderMap&, Tracing::Span&, std::vector<JwtLocationConstPtr>*,\n                               SetPayloadCallback set_payload_cb, AuthenticatorCallback callback) {\n            if (status == Status::Ok) {\n              ProtobufWkt::Struct empty_struct;\n              set_payload_cb(issuer, empty_struct);\n            }\n            callback(status);\n          }));\n      EXPECT_CALL(*mock_auth, onDestroy()).Times(1);\n      mock_auths_[it.first] = std::move(mock_auth);\n    }\n    createVerifier();\n  }\n\n  // This expected payload is only for createSyncMockAuthsAndVerifier() function\n  // which set an empty payload struct for each issuer.\n  static ProtobufWkt::Struct getExpectedPayload(const std::vector<std::string>& issuers) {\n    ProtobufWkt::Struct struct_obj;\n    auto* fields = struct_obj.mutable_fields();\n    for (const auto& issuer : issuers) {\n      ProtobufWkt::Struct empty_struct;\n      *(*fields)[issuer].mutable_struct_value() = empty_struct;\n    }\n    return struct_obj;\n  }\n\n  absl::node_hash_map<std::string, AuthenticatorCallback>\n  createAsyncMockAuthsAndVerifier(const std::vector<std::string>& providers) {\n    absl::node_hash_map<std::string, AuthenticatorCallback> callbacks;\n    for (const auto& provider : providers) {\n      auto mock_auth = std::make_unique<MockAuthenticator>();\n      EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _))\n          .WillOnce(Invoke([&callbacks, iss = provider](\n                               Http::HeaderMap&, Tracing::Span&, std::vector<JwtLocationConstPtr>*,\n                               SetPayloadCallback, AuthenticatorCallback callback) {\n            callbacks[iss] = std::move(callback);\n          }));\n      EXPECT_CALL(*mock_auth, onDestroy()).Times(1);\n      mock_auths_[provider] = std::move(mock_auth);\n    }\n    createVerifier();\n    return callbacks;\n  }\n\n  JwtAuthentication proto_config_;\n  VerifierConstPtr verifier_;\n  MockVerifierCallbacks mock_cb_;\n  absl::node_hash_map<std::string, std::unique_ptr<MockAuthenticator>> mock_auths_;\n  NiceMock<MockAuthFactory> mock_factory_;\n  ContextSharedPtr context_;\n  NiceMock<Tracing::MockSpan> parent_span_;\n};\n\n// Deeply nested anys that ends in provider name\nTEST_F(GroupVerifierTest, DeeplyNestedAnys) {\n  const char config[] = R\"(\nproviders:\n  example_provider:\n    issuer: https://example.com\n    audiences:\n    - example_service\n    - http://example_service1\n    - https://example_service2/\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n    forward_payload_header: sec-istio-auth-userinfo\n    from_params:\n    - jwta\n    - jwtb\n    - jwtc\nrules:\n- match: { path: \"/match\" }\n  requires:\n    requires_any:\n      requirements:\n      - requires_any:\n          requirements:\n          - requires_any:\n              requirements:\n              - provider_name: \"example_provider\"\n)\";\n  TestUtility::loadFromYaml(config, proto_config_);\n  createSyncMockAuthsAndVerifier(StatusMap{{\"example_provider\", Status::Ok}});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({\"example_provider\"})));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"sec-istio-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"sec-istio-auth-userinfo\"));\n}\n\n// require alls that just ends\nTEST_F(GroupVerifierTest, CanHandleUnexpectedEnd) {\n  const char config[] = R\"(\nproviders:\n  example_provider:\n    issuer: https://example.com\n    audiences:\n    - example_service\n    - http://example_service1\n    - https://example_service2/\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n    forward_payload_header: sec-istio-auth-userinfo\nrules:\n- match: { path: \"/match\" }\n  requires:\n    requires_all:\n      requirements:\n      - requires_all:\n)\";\n  TestUtility::loadFromYaml(config, proto_config_);\n  auto mock_auth = std::make_unique<MockAuthenticator>();\n  EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _)).Times(0);\n  mock_auths_[\"example_provider\"] = std::move(mock_auth);\n  createVerifier();\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\n// test requires all with both auth returning OK\nTEST_F(GroupVerifierTest, TestRequiresAll) {\n  TestUtility::loadFromYaml(RequiresAllConfig, proto_config_);\n  createSyncMockAuthsAndVerifier(\n      StatusMap{{\"example_provider\", Status::Ok}, {\"other_provider\", Status::Ok}});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(\n        payload, getExpectedPayload({\"example_provider\", \"other_provider\"})));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// test requires all with first auth returning bad format\nTEST_F(GroupVerifierTest, TestRequiresAllBadFormat) {\n  TestUtility::loadFromYaml(RequiresAllConfig, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"example_provider\", \"other_provider\"});\n\n  // onComplete with failure status, not payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtBadFormat)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[\"example_provider\"](Status::JwtBadFormat);\n  // can keep invoking callback\n  callbacks[\"other_provider\"](Status::Ok);\n  callbacks[\"example_provider\"](Status::Ok);\n  callbacks[\"other_provider\"](Status::Ok);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// test requires all with second auth returning missing jwt\nTEST_F(GroupVerifierTest, TestRequiresAllMissing) {\n  TestUtility::loadFromYaml(RequiresAllConfig, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"example_provider\", \"other_provider\"});\n\n  // onComplete with failure status, not payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtMissed)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[\"example_provider\"](Status::Ok);\n  callbacks[\"other_provider\"](Status::JwtMissed);\n  // can keep invoking callback\n  callbacks[\"example_provider\"](Status::Ok);\n  callbacks[\"other_provider\"](Status::Ok);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// Test requires all and mock auths simulate cache misses and async return of failure statuses.\nTEST_F(GroupVerifierTest, TestRequiresAllBothFailed) {\n  TestUtility::loadFromYaml(RequiresAllConfig, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"example_provider\", \"other_provider\"});\n\n  // onComplete with failure status, not payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtUnknownIssuer)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n  callbacks[\"example_provider\"](Status::JwtUnknownIssuer);\n  callbacks[\"other_provider\"](Status::JwtUnknownIssuer);\n}\n\n// Test requires any with first auth returning OK.\nTEST_F(GroupVerifierTest, TestRequiresAnyFirstAuthOK) {\n  TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_);\n  createSyncMockAuthsAndVerifier(StatusMap{{\"example_provider\", Status::Ok}});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({\"example_provider\"})));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_TRUE(headers.has(\"other-auth-userinfo\"));\n}\n\n// Test requires any with last auth returning OK.\nTEST_F(GroupVerifierTest, TestRequiresAnyLastAuthOk) {\n  TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_);\n  createSyncMockAuthsAndVerifier(\n      StatusMap{{\"example_provider\", Status::JwtUnknownIssuer}, {\"other_provider\", Status::Ok}});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({\"other_provider\"})));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// Test requires any with both auth returning error. Requires any returns the error last received\n// back to the caller.\nTEST_F(GroupVerifierTest, TestRequiresAnyAllAuthFailed) {\n  TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_);\n  auto mock_auth = std::make_unique<MockAuthenticator>();\n  createSyncMockAuthsAndVerifier(StatusMap{{\"example_provider\", Status::JwtMissed},\n                                           {\"other_provider\", Status::JwtHeaderBadKid}});\n\n  // onComplete with failure status, not payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// Test requires any with both auth returning errors, last error is JwtMissed.\n// Usually the final error is from the last one.\n// But if a token is not for a provider, that provider auth will either return\n// JwtMissed or JwtUnknownIssuer, such error should not be used for the final\n// error in Any case\nTEST_F(GroupVerifierTest, TestRequiresAnyLastIsJwtMissed) {\n  TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_);\n  auto mock_auth = std::make_unique<MockAuthenticator>();\n  createSyncMockAuthsAndVerifier(StatusMap{{\"example_provider\", Status::JwtHeaderBadKid},\n                                           {\"other_provider\", Status::JwtMissed}});\n\n  // onComplete with failure status, not payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// Test requires any with both auth returning errors: last error is\n// JwtUnknownIssuer\nTEST_F(GroupVerifierTest, TestRequiresAnyLastIsJwtUnknownIssuer) {\n  TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_);\n  auto mock_auth = std::make_unique<MockAuthenticator>();\n  createSyncMockAuthsAndVerifier(StatusMap{{\"example_provider\", Status::JwtHeaderBadKid},\n                                           {\"other_provider\", Status::JwtUnknownIssuer}});\n\n  // onComplete with failure status, not payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// Test contains a 2 provider_name in a require any along with another provider_name in require all.\n// Test simulates first require any is OK and provider_name is OK.\nTEST_F(GroupVerifierTest, TestAnyInAllFirstAnyIsOk) {\n  TestUtility::loadFromYaml(AllWithAny, proto_config_);\n  createSyncMockAuthsAndVerifier(StatusMap{{\"provider_1\", Status::Ok}, {\"provider_3\", Status::Ok}});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({\"provider_1\", \"provider_3\"})));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\n// Test contains a 2 provider_name in a require any along with another provider_name in require all.\n// Test simulates first require any is OK and provider_name is OK.\nTEST_F(GroupVerifierTest, TestAnyInAllLastAnyIsOk) {\n  TestUtility::loadFromYaml(AllWithAny, proto_config_);\n  createSyncMockAuthsAndVerifier(StatusMap{{\"provider_1\", Status::JwtUnknownIssuer},\n                                           {\"provider_2\", Status::Ok},\n                                           {\"provider_3\", Status::Ok}});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({\"provider_2\", \"provider_3\"})));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\n// Test contains a 2 provider_name in a require any along with another provider_name in require all.\n// Test simulates all require any OK and provider_name is OK.\nTEST_F(GroupVerifierTest, TestAnyInAllBothInRequireAnyIsOk) {\n  TestUtility::loadFromYaml(AllWithAny, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"provider_1\", \"provider_2\", \"provider_3\"});\n\n  // AsyncMockVerifier doesn't set payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[\"provider_1\"](Status::Ok);\n  callbacks[\"provider_2\"](Status::Ok);\n  callbacks[\"provider_3\"](Status::Ok);\n}\n\n// Test contains a 2 provider_name in a require any along with another provider_name in require all.\n// Test simulates all require any failed and provider_name is OK.\nTEST_F(GroupVerifierTest, TestAnyInAllBothInRequireAnyFailed) {\n  TestUtility::loadFromYaml(AllWithAny, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"provider_1\", \"provider_2\", \"provider_3\"});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwksFetchFail)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[\"provider_1\"](Status::JwksFetchFail);\n  callbacks[\"provider_2\"](Status::JwksFetchFail);\n  callbacks[\"provider_3\"](Status::Ok);\n}\n\n// Test contains a requires any which in turn has 2 requires all. Mock auths simulate JWKs cache\n// hits and inline return of errors. Requires any returns the error last received back to the\n// caller.\nTEST_F(GroupVerifierTest, TestAllInAnyBothRequireAllFailed) {\n  TestUtility::loadFromYaml(AnyWithAll, proto_config_);\n  createSyncMockAuthsAndVerifier(\n      StatusMap{{\"provider_1\", Status::JwksFetchFail}, {\"provider_3\", Status::JwtExpired}});\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtExpired)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\n// Test contains a requires any which in turn has 2 requires all. The first inner requires all is\n// completed with OKs. Mock auths simulate JWKs cache misses and async return of OKs.\nTEST_F(GroupVerifierTest, TestAllInAnyFirstAllIsOk) {\n  TestUtility::loadFromYaml(AnyWithAll, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"provider_1\", \"provider_2\", \"provider_3\", \"provider_4\"});\n\n  // AsyncMockVerifier doesn't set payload\n  EXPECT_CALL(mock_cb_, setPayload(_)).Times(0);\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[\"provider_2\"](Status::Ok);\n  callbacks[\"provider_3\"](Status::JwtMissed);\n  callbacks[\"provider_1\"](Status::Ok);\n}\n\n// Test contains a requires any which in turn has 2 requires all. The last inner requires all is\n// completed with OKs. Mock auths simulate JWKs cache misses and async return of OKs.\nTEST_F(GroupVerifierTest, TestAllInAnyLastAllIsOk) {\n  TestUtility::loadFromYaml(AnyWithAll, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"provider_1\", \"provider_2\", \"provider_3\", \"provider_4\"});\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[\"provider_3\"](Status::Ok);\n  callbacks[\"provider_4\"](Status::Ok);\n  callbacks[\"provider_2\"](Status::JwtExpired);\n}\n\n// Test contains a requires any which in turn has 2 requires all. The both inner requires all are\n// completed with OKs. Mock auths simulate JWKs cache misses and async return of OKs.\nTEST_F(GroupVerifierTest, TestAllInAnyBothRequireAllAreOk) {\n  TestUtility::loadFromYaml(AnyWithAll, proto_config_);\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"provider_1\", \"provider_2\", \"provider_3\", \"provider_4\"});\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[\"provider_1\"](Status::Ok);\n  callbacks[\"provider_2\"](Status::Ok);\n  callbacks[\"provider_3\"](Status::Ok);\n  callbacks[\"provider_4\"](Status::Ok);\n}\n\n// Test require any with additional allow all\nTEST_F(GroupVerifierTest, TestRequiresAnyWithAllowAll) {\n  TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_);\n  proto_config_.mutable_rules(0)\n      ->mutable_requires()\n      ->mutable_requires_any()\n      ->add_requirements()\n      ->mutable_allow_missing_or_failed();\n\n  auto callbacks = createAsyncMockAuthsAndVerifier(\n      std::vector<std::string>{\"example_provider\", \"other_provider\"});\n  auto mock_auth = std::make_unique<MockAuthenticator>();\n  EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _))\n      .WillOnce(Invoke([&](Http::HeaderMap&, Tracing::Span&, std::vector<JwtLocationConstPtr>*,\n                           SetPayloadCallback, AuthenticatorCallback callback) {\n        callbacks[allowfailed] = std::move(callback);\n      }));\n  EXPECT_CALL(*mock_auth, onDestroy()).Times(1);\n  mock_auths_[allowfailed] = std::move(mock_auth);\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n\n  auto headers = Http::TestRequestHeaderMapImpl{};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  callbacks[allowfailed](Status::Ok);\n  // with requires any, if any inner verifier returns OK the whole any verifier should return OK.\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/jwks_cache_test.cc",
    "content": "#include <chrono>\n#include <thread>\n\n#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/http/jwt_authn/jwks_cache.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing ::google::jwt_verify::Status;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nclass JwksCacheTest : public testing::Test {\nprotected:\n  JwksCacheTest() : api_(Api::createApiForTest()) {}\n  void SetUp() override {\n    TestUtility::loadFromYaml(ExampleConfig, config_);\n    cache_ = JwksCache::create(config_, time_system_, *api_);\n    jwks_ = google::jwt_verify::Jwks::createFrom(PublicKey, google::jwt_verify::Jwks::JWKS);\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  JwtAuthentication config_;\n  JwksCachePtr cache_;\n  google::jwt_verify::JwksPtr jwks_;\n  Api::ApiPtr api_;\n};\n\n// Test findByIssuer\nTEST_F(JwksCacheTest, TestFindByIssuer) {\n  EXPECT_TRUE(cache_->findByIssuer(\"https://example.com\") != nullptr);\n  EXPECT_TRUE(cache_->findByIssuer(\"other-issuer\") == nullptr);\n}\n\n// Test setRemoteJwks and its expiration\nTEST_F(JwksCacheTest, TestSetRemoteJwks) {\n  auto& provider0 = (*config_.mutable_providers())[std::string(ProviderName)];\n  // Set cache_duration to 1 second to test expiration\n  provider0.mutable_remote_jwks()->mutable_cache_duration()->set_seconds(1);\n  cache_ = JwksCache::create(config_, time_system_, *api_);\n\n  auto jwks = cache_->findByIssuer(\"https://example.com\");\n  EXPECT_TRUE(jwks->getJwksObj() == nullptr);\n\n  EXPECT_EQ(jwks->setRemoteJwks(std::move(jwks_))->getStatus(), Status::Ok);\n  EXPECT_FALSE(jwks->getJwksObj() == nullptr);\n  EXPECT_FALSE(jwks->isExpired());\n\n  // cache duration is 1 second, sleep two seconds to expire it\n  time_system_.advanceTimeWait(std::chrono::seconds(2));\n  EXPECT_TRUE(jwks->isExpired());\n}\n\n// Test setRemoteJwks and use default cache duration.\nTEST_F(JwksCacheTest, TestSetRemoteJwksWithDefaultCacheDuration) {\n  auto& provider0 = (*config_.mutable_providers())[std::string(ProviderName)];\n  // Clear cache_duration to use default one.\n  provider0.mutable_remote_jwks()->clear_cache_duration();\n  cache_ = JwksCache::create(config_, time_system_, *api_);\n\n  auto jwks = cache_->findByIssuer(\"https://example.com\");\n  EXPECT_TRUE(jwks->getJwksObj() == nullptr);\n\n  EXPECT_EQ(jwks->setRemoteJwks(std::move(jwks_))->getStatus(), Status::Ok);\n  EXPECT_FALSE(jwks->getJwksObj() == nullptr);\n  EXPECT_FALSE(jwks->isExpired());\n}\n\n// Test a good local jwks\nTEST_F(JwksCacheTest, TestGoodInlineJwks) {\n  auto& provider0 = (*config_.mutable_providers())[std::string(ProviderName)];\n  provider0.clear_remote_jwks();\n  auto local_jwks = provider0.mutable_local_jwks();\n  local_jwks->set_inline_string(PublicKey);\n\n  cache_ = JwksCache::create(config_, time_system_, *api_);\n\n  auto jwks = cache_->findByIssuer(\"https://example.com\");\n  EXPECT_FALSE(jwks->getJwksObj() == nullptr);\n  EXPECT_FALSE(jwks->isExpired());\n}\n\n// Test a bad local jwks\nTEST_F(JwksCacheTest, TestBadInlineJwks) {\n  auto& provider0 = (*config_.mutable_providers())[std::string(ProviderName)];\n  provider0.clear_remote_jwks();\n  auto local_jwks = provider0.mutable_local_jwks();\n  local_jwks->set_inline_string(\"BAD-JWKS\");\n\n  cache_ = JwksCache::create(config_, time_system_, *api_);\n\n  auto jwks = cache_->findByIssuer(\"https://example.com\");\n  EXPECT_TRUE(jwks->getJwksObj() == nullptr);\n}\n\n// Test audiences with different formats\nTEST_F(JwksCacheTest, TestAudiences) {\n  auto jwks = cache_->findByIssuer(\"https://example.com\");\n\n  /**\n   * when comparing audiences, protocol scheme and trailing slash\n   * should be sanitized.\n   * In this test, jwks config has following:\n   *\n   * audiences:\n   * - example_service\n   * - http://example_service1\n   * - https://example_service2/\n   *\n   */\n\n  // incoming has http://, config doesn't\n  EXPECT_TRUE(jwks->areAudiencesAllowed({\"http://example_service/\"}));\n\n  // incoming has https://, config is http://\n  // incoming has tailing slash, config has not tailing slash\n  EXPECT_TRUE(jwks->areAudiencesAllowed({\"https://example_service1/\"}));\n\n  // incoming without tailing slash, config has tailing slash\n  // incoming has http://, config is https://\n  EXPECT_TRUE(jwks->areAudiencesAllowed({\"http://example_service2\"}));\n\n  // Multiple audiences: a good one and a wrong one\n  EXPECT_TRUE(jwks->areAudiencesAllowed({\"example_service\", \"wrong-audience\"}));\n\n  // Wrong multiple audiences\n  EXPECT_FALSE(jwks->areAudiencesAllowed({\"wrong-audience1\", \"wrong-audience2\"}));\n}\n\n// Test findByProvider\nTEST_F(JwksCacheTest, TestFindByProvider) {\n  EXPECT_TRUE(cache_->findByProvider(ProviderName) != nullptr);\n  EXPECT_TRUE(cache_->findByProvider(\"other-provider\") == nullptr);\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/matcher_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/jwt_authn/matcher.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/mock.h\"\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/test_common/utility.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::RequirementRule;\nusing Envoy::Http::TestRequestHeaderMapImpl;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nclass MatcherTest : public testing::Test {\npublic:\n};\n\nTEST_F(MatcherTest, TestMatchPrefix) {\n  const char config[] = R\"(match:\n  prefix: \"/match\")\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/match/this\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/MATCH\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/matching\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/matc\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/no\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchRegex) {\n  const char config[] = R\"(match:\n  regex: \"/[^c][au]t\")\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/but\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/mat?ok=bye\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/maut\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/cut\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/mut/\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchSafeRegex) {\n  const char config[] = R\"(\nmatch:\n  safe_regex:\n    google_re2: {}\n    regex: \"/[^c][au]t\")\";\n\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/but\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/mat?ok=bye\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/maut\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/cut\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/mut/\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchPath) {\n  const char config[] = R\"(match:\n  path: \"/match\"\n  case_sensitive: false)\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/match\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/MATCH\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/match?ok=bye\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/matc\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/match/\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/matching\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchQuery) {\n  const char config[] = R\"(match:\n  prefix: \"/\"\n  query_parameters:\n  - name: foo\n    value: bar)\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?foo=bar\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?ok=bye\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/foo?bar=bar\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?foo\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?bar=foo\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchHeader) {\n  const char config[] = R\"(match:\n  prefix: \"/\"\n  headers:\n  - name: a)\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\"a\", \"\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\"a\", \"some\"}, {\"b\", \"\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\"aa\", \"\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/\"}, {\"\", \"\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchPathAndHeader) {\n  const char config[] = R\"(match:\n  path: \"/boo\"\n  query_parameters:\n  - name: foo\n    value: bar)\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?foo=bar\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?ok=bye\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/foo?bar=bar\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?foo\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":path\", \"/boo?bar=foo\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchConnect) {\n  const char config[] = R\"(match:\n  connect_matcher: {})\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":method\", \"CONNECT\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":method\", \"GET\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\nTEST_F(MatcherTest, TestMatchConnectQuery) {\n  const char config[] = R\"(match:\n  connect_matcher: {}\n  query_parameters:\n  - name: foo\n    string_match:\n      exact: \"bar\")\";\n  RequirementRule rule;\n  TestUtility::loadFromYaml(config, rule);\n  MatcherConstPtr matcher = Matcher::create(rule);\n  auto headers = TestRequestHeaderMapImpl{{\":method\", \"CONNECT\"}, {\":path\", \"/boo?foo=bar\"}};\n  EXPECT_TRUE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":method\", \"GET\"}, {\":path\", \"/boo?foo=bar\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n  headers = TestRequestHeaderMapImpl{{\":method\", \"CONNECT\"}, {\":path\", \"/boo?ok=bye\"}};\n  EXPECT_FALSE(matcher->matches(headers));\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/mock.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"common/http/message_impl.h\"\n\n#include \"extensions/filters/http/jwt_authn/authenticator.h\"\n#include \"extensions/filters/http/jwt_authn/verifier.h\"\n\n#include \"test/mocks/upstream/cluster_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nusing ::google::jwt_verify::Status;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\nclass MockAuthFactory : public AuthFactory {\npublic:\n  MOCK_METHOD(AuthenticatorPtr, create,\n              (const ::google::jwt_verify::CheckAudience*, const absl::optional<std::string>&, bool,\n               bool),\n              (const));\n};\n\nclass MockAuthenticator : public Authenticator {\npublic:\n  MOCK_METHOD(void, doVerify,\n              (Http::HeaderMap & headers, Tracing::Span& parent_span,\n               std::vector<JwtLocationConstPtr>* tokens, SetPayloadCallback set_payload_cb,\n               AuthenticatorCallback callback));\n\n  void verify(Http::HeaderMap& headers, Tracing::Span& parent_span,\n              std::vector<JwtLocationConstPtr>&& tokens, SetPayloadCallback set_payload_cb,\n              AuthenticatorCallback callback) override {\n    doVerify(headers, parent_span, &tokens, std::move(set_payload_cb), std::move(callback));\n  }\n\n  MOCK_METHOD(void, onDestroy, ());\n};\n\nclass MockVerifierCallbacks : public Verifier::Callbacks {\npublic:\n  MOCK_METHOD(void, setPayload, (const ProtobufWkt::Struct& payload));\n  MOCK_METHOD(void, onComplete, (const Status& status));\n};\n\nclass MockVerifier : public Verifier {\npublic:\n  MOCK_METHOD(void, verify, (ContextSharedPtr context), (const));\n};\n\nclass MockExtractor : public Extractor {\npublic:\n  MOCK_METHOD(std::vector<JwtLocationConstPtr>, extract, (const Http::RequestHeaderMap& headers),\n              (const));\n  MOCK_METHOD(void, sanitizePayloadHeaders, (Http::HeaderMap & headers), (const));\n};\n\n// A mock HTTP upstream with response body.\nclass MockUpstream {\npublic:\n  MockUpstream(Upstream::MockClusterManager& mock_cm, const std::string& response_body)\n      : request_(&mock_cm.async_client_), response_body_(response_body) {\n    ON_CALL(mock_cm.async_client_, send_(_, _, _))\n        .WillByDefault(\n            Invoke([this](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                          const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n              Http::ResponseMessagePtr response_message(\n                  new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n              response_message->body().add(response_body_);\n              cb.onSuccess(request_, std::move(response_message));\n              called_count_++;\n              return &request_;\n            }));\n  }\n\n  int called_count() const { return called_count_; }\n\nprivate:\n  Http::MockAsyncClientRequest request_;\n  std::string response_body_;\n  int called_count_{};\n};\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/provider_verifier_test.cc",
    "content": "#include \"envoy/extensions/filters/http/jwt_authn/v3/config.pb.h\"\n\n#include \"extensions/filters/http/jwt_authn/filter_config.h\"\n#include \"extensions/filters/http/jwt_authn/verifier.h\"\n\n#include \"test/extensions/filters/http/jwt_authn/mock.h\"\n#include \"test/extensions/filters/http/jwt_authn/test_common.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nusing envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication;\nusing ::google::jwt_verify::Status;\nusing ::testing::Eq;\nusing ::testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\nnamespace {\n\nProtobufWkt::Struct getExpectedPayload(const std::string& name) {\n  ProtobufWkt::Struct expected_payload;\n  TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload);\n\n  ProtobufWkt::Struct struct_obj;\n  *(*struct_obj.mutable_fields())[name].mutable_struct_value() = expected_payload;\n  return struct_obj;\n}\n\nclass ProviderVerifierTest : public testing::Test {\npublic:\n  void createVerifier() {\n    filter_config_ = FilterConfigImpl::create(proto_config_, \"\", mock_factory_ctx_);\n    verifier_ = Verifier::create(proto_config_.rules(0).requires(), proto_config_.providers(),\n                                 *filter_config_);\n  }\n\n  JwtAuthentication proto_config_;\n  std::shared_ptr<FilterConfigImpl> filter_config_;\n  VerifierConstPtr verifier_;\n  NiceMock<Server::Configuration::MockFactoryContext> mock_factory_ctx_;\n  ContextSharedPtr context_;\n  MockVerifierCallbacks mock_cb_;\n  NiceMock<Tracing::MockSpan> parent_span_;\n};\n\nTEST_F(ProviderVerifierTest, TestOkJWT) {\n  TestUtility::loadFromYaml(ExampleConfig, proto_config_);\n  (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata(\n      \"my_payload\");\n  createVerifier();\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey);\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload(\"my_payload\")));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n      {\"sec-istio-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_EQ(ExpectedPayloadValue, headers.get_(\"sec-istio-auth-userinfo\"));\n}\n\nTEST_F(ProviderVerifierTest, TestSpanPassedDown) {\n  TestUtility::loadFromYaml(ExampleConfig, proto_config_);\n  (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata(\n      \"my_payload\");\n  createVerifier();\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey);\n\n  EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) {\n    EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload(\"my_payload\")));\n  }));\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::Ok)).Times(1);\n\n  auto options = Http::AsyncClient::RequestOptions()\n                     .setTimeout(std::chrono::milliseconds(5 * 1000))\n                     .setParentSpan(parent_span_)\n                     .setChildSpanName(\"JWT Remote PubKey Fetch\");\n  EXPECT_CALL(mock_factory_ctx_.cluster_manager_.async_client_, send_(_, _, Eq(options))).Times(1);\n\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n      {\"sec-istio-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n}\n\nTEST_F(ProviderVerifierTest, TestMissedJWT) {\n  TestUtility::loadFromYaml(ExampleConfig, proto_config_);\n  createVerifier();\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtMissed)).Times(1);\n\n  auto headers = Http::TestRequestHeaderMapImpl{{\"sec-istio-auth-userinfo\", \"\"}};\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_FALSE(headers.has(\"sec-istio-auth-userinfo\"));\n}\n\n// This test verifies that JWT must be issued by the provider specified in the requirement.\nTEST_F(ProviderVerifierTest, TestTokenRequirementProviderMismatch) {\n  const char config[] = R\"(\nproviders:\n  example_provider:\n    issuer: https://example.com\n    audiences:\n    - example_service\n    - http://example_service1\n    - https://example_service2/\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n    forward_payload_header: example-auth-userinfo\n  other_provider:\n    issuer: other_issuer\n    forward_payload_header: other-auth-userinfo\nrules:\n- match:\n    path: \"/\"\n  requires:\n    provider_name: \"other_provider\"\n)\";\n  TestUtility::loadFromYaml(config, proto_config_);\n  createVerifier();\n\n  EXPECT_CALL(mock_cb_, onComplete(Status::JwtUnknownIssuer)).Times(1);\n\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\"Authorization\", \"Bearer \" + std::string(GoodToken)},\n      {\"example-auth-userinfo\", \"\"},\n      {\"other-auth-userinfo\", \"\"},\n  };\n  context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);\n  verifier_->verify(context_);\n  EXPECT_TRUE(headers.has(\"example-auth-userinfo\"));\n  EXPECT_FALSE(headers.has(\"other-auth-userinfo\"));\n}\n\n// This test verifies that JWT requirement can override audiences\nTEST_F(ProviderVerifierTest, TestRequiresProviderWithAudiences) {\n  TestUtility::loadFromYaml(ExampleConfig, proto_config_);\n  auto* requires =\n      proto_config_.mutable_rules(0)->mutable_requires()->mutable_provider_and_audiences();\n  requires->set_provider_name(\"example_provider\");\n  requires->add_audiences(\"invalid_service\");\n  createVerifier();\n  MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey);\n\n  EXPECT_CALL(mock_cb_, onComplete(_))\n      .WillOnce(\n          Invoke([](const Status& status) { ASSERT_EQ(status, Status::JwtAudienceNotAllowed); }))\n      .WillOnce(Invoke([](const Status& status) { ASSERT_EQ(status, Status::Ok); }));\n\n  auto headers =\n      Http::TestRequestHeaderMapImpl{{\"Authorization\", \"Bearer \" + std::string(GoodToken)}};\n  verifier_->verify(Verifier::createContext(headers, parent_span_, &mock_cb_));\n  headers =\n      Http::TestRequestHeaderMapImpl{{\"Authorization\", \"Bearer \" + std::string(InvalidAudToken)}};\n  verifier_->verify(Verifier::createContext(headers, parent_span_, &mock_cb_));\n}\n\n// This test verifies that requirement referencing nonexistent provider will throw exception\nTEST_F(ProviderVerifierTest, TestRequiresNonexistentProvider) {\n  TestUtility::loadFromYaml(ExampleConfig, proto_config_);\n  proto_config_.mutable_rules(0)->mutable_requires()->set_provider_name(\"nosuchprovider\");\n\n  EXPECT_THROW(FilterConfigImpl::create(proto_config_, \"\", mock_factory_ctx_), EnvoyException);\n}\n\n} // namespace\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/jwt_authn/test_common.h",
    "content": "#pragma once\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace JwtAuthn {\n\n// RS256 private key\n//-----BEGIN PRIVATE KEY-----\n//    MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC6n3u6qsX0xY49\n//    o+TBJoF64A8s6v0UpxpYZ1UQbNDh/dmrlYpVmjDH1MIHGYiY0nWqZSLXekHyi3Az\n//    +XmV9jUAUEzFVtAJRee0ui+ENqJK9injAYOMXNCJgD6lSryHoxRkGeGV5iuRTteU\n//    IHA1XI3yo0ySksDsoVljP7jzoadXY0gknH/gEZrcd0rBAbGLa2O5CxC9qjlbjGZJ\n//    VpoRaikHAzLZCaWFIVC49SlNrLBOpRxSr/pJ8AeFnggNr8XER3ZzbPyAUa1+y31x\n//    jeVFh/5z9l1uhjeao31K7f6PfPmvZIdaWEH8s0CPJaUEay9sY+VOoPOJhDBk3hoa\n//    ypUpBv1XAgMBAAECggEAc5HaJJIm/trsqD17pyV6X6arnyxyx7xn80Eii4ZnoNv8\n//    VWbJARP4i3e1JIJqdgE3PutctUYP2u0A8h7XbcfHsMcJk9ecA3IX+HKohF71CCkD\n//    bYH9fgnoVo5lvSTYNcMHGKpyacrdRiImHKQt+M21VgJMpCRfdurAmVbX6YA9Sj6w\n//    SBFrZbWkBHiHg7w++xKr+VeTHW/8fXI5bvSPAm/XB6dDKAcSXYiJJJhIoaVR9cHn\n//    1ePRDLpEwfDpBHeepd/S3qR37mIbHmo8SVytDY2xTUaIoaRfXRWGMYSyxl0y4RsZ\n//    Vo6Tp9Tj2fyohvB/S+lE34zhxnsHToK2JZvPeoyHCQKBgQDyEcjaUZiPdx7K63CT\n//    d57QNYC6DTjtKWnfO2q/vAVyAPwS30NcVuXj3/1yc0L+eExpctn8tcLfvDi1xZPY\n//    dW2L3SZKgRJXL+JHTCEkP8To/qNLhBqitcKYwp0gtpoZbUjZdZwn18QJx7Mw/nFC\n//    lJhSYRl+FjVolY3qBaS6eD7imwKBgQDFXNmeAV5FFF0FqGRsLYl0hhXTR6Hi/hKQ\n//    OyRALBW9LUKbsazwWEFGRlqbEWd1OcOF5SSV4d3u7wLQRTDeNELXUFvivok12GR3\n//    gNl9nDJ5KKYGFmqxM0pzfbT5m3Lsrr2FTIq8gM9GBpQAOmzQIkEu62yELtt2rRf0\n//    1pTh+UbN9QKBgF88kAEUySjofLzpFElwbpML+bE5MoRcHsMs5Tq6BopryMDEBgR2\n//    S8vzfAtjPaBQQ//Yp9q8yAauTsF1Ek2/JXI5d68oSMb0l9nlIcTZMedZB3XWa4RI\n//    bl8bciZEsSv/ywGDPASQ5xfR8bX85SKEw8jlWto4cprK/CJuRfj3BgaxAoGAAmQf\n//    ltR5aejXP6xMmyrqEWlWdlrV0UQ2wVyWEdj24nXb6rr6V2caU1mi22IYmMj8X3Dp\n//    Qo+b+rsWk6Ni9i436RfmJRcd3nMitHfxKp5r1h/x8vzuifsPGdsaCDQj7k4nqafF\n//    vobo+/Y0cNREYTkpBQKBLBDNQ+DQ+3xmDV7RxskCgYBCo6u2b/DZWFLoq3VpAm8u\n//    1ZgL8qxY/bbyA02IKF84QPFczDM5wiLjDGbGnOcIYYMvTHf1LJU4FozzYkB0GicX\n//    Y0tBQIHaaLWbPk1RZdPfR9kAp16iwk8H+V4UVjLfsTP7ocEfNCzZztmds83h8mTL\n//    DSwE5aY76Cs8XLcF/GNJRQ==\n//-----END PRIVATE KEY-----\n\n// A good public key\nconst char PublicKey[] = R\"(\n{\n  \"keys\": [\n    {\n      \"kty\": \"RSA\",\n      \"alg\": \"RS256\",\n      \"use\": \"sig\",\n      \"kid\": \"62a93512c9ee4c7f8067b5a216dade2763d32a47\",\n      \"n\": \"up97uqrF9MWOPaPkwSaBeuAPLOr9FKcaWGdVEGzQ4f3Zq5WKVZowx9TCBxmImNJ1qmUi13pB8otwM_l5lfY1AFBMxVbQCUXntLovhDaiSvYp4wGDjFzQiYA-pUq8h6MUZBnhleYrkU7XlCBwNVyN8qNMkpLA7KFZYz-486GnV2NIJJx_4BGa3HdKwQGxi2tjuQsQvao5W4xmSVaaEWopBwMy2QmlhSFQuPUpTaywTqUcUq_6SfAHhZ4IDa_FxEd2c2z8gFGtfst9cY3lRYf-c_ZdboY3mqN9Su3-j3z5r2SHWlhB_LNAjyWlBGsvbGPlTqDziYQwZN4aGsqVKQb9Vw\",\n      \"e\": \"AQAB\"\n    },\n    {\n      \"kty\": \"RSA\",\n      \"alg\": \"RS256\",\n      \"use\": \"sig\",\n      \"kid\": \"b3319a147514df7ee5e4bcdee51350cc890cc89e\",\n      \"n\": \"up97uqrF9MWOPaPkwSaBeuAPLOr9FKcaWGdVEGzQ4f3Zq5WKVZowx9TCBxmImNJ1qmUi13pB8otwM_l5lfY1AFBMxVbQCUXntLovhDaiSvYp4wGDjFzQiYA-pUq8h6MUZBnhleYrkU7XlCBwNVyN8qNMkpLA7KFZYz-486GnV2NIJJx_4BGa3HdKwQGxi2tjuQsQvao5W4xmSVaaEWopBwMy2QmlhSFQuPUpTaywTqUcUq_6SfAHhZ4IDa_FxEd2c2z8gFGtfst9cY3lRYf-c_ZdboY3mqN9Su3-j3z5r2SHWlhB_LNAjyWlBGsvbGPlTqDziYQwZN4aGsqVKQb9Vw\",\n      \"e\": \"AQAB\"\n    }\n  ]\n}\n)\";\n\n// A good config.\nconst char ExampleConfig[] = R\"(\nproviders:\n  example_provider:\n    issuer: https://example.com\n    audiences:\n    - example_service\n    - http://example_service1\n    - https://example_service2/\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n        timeout:\n          seconds: 5\n      cache_duration:\n        seconds: 600\n    forward_payload_header: sec-istio-auth-userinfo\nrules:\n- match:\n    path: \"/\"\n  requires:\n    provider_name: \"example_provider\"\nbypass_cors_preflight: true\n)\";\n\n// The name of provider for above config.\nconst char ProviderName[] = \"example_provider\";\n\n// Payload:\n// {\"iss\":\"https://example.com\",\"sub\":\"test@example.com\",\"aud\":\"example_service\",\"exp\":2001001001}\nconst char GoodToken[] = \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwczovL2V4YW1wbGUu\"\n                         \"Y29tIiwic3ViIjoidGVzdEBleGFtcGxlLmNvbSIsImV4cCI6MjAwMTAwMTAwMSwiY\"\n                         \"XVkIjoiZXhhbXBsZV9zZXJ2aWNlIn0.cuui_Syud76B0tqvjESE8IZbX7vzG6xA-M\"\n                         \"Daof1qEFNIoCFT_YQPkseLSUSR2Od3TJcNKk-dKjvUEL1JW3kGnyC1dBx4f3-Xxro\"\n                         \"yL23UbR2eS8TuxO9ZcNCGkjfvH5O4mDb6cVkFHRDEolGhA7XwNiuVgkGJ5Wkrvshi\"\n                         \"h6nqKXcPNaRx9lOaRWg2PkE6ySNoyju7rNfunXYtVxPuUIkl0KMq3WXWRb_cb8a_Z\"\n                         \"EprqSZUzi_ZzzYzqBNVhIJujcNWij7JRra2sXXiSAfKjtxHQoxrX8n4V1ySWJ3_1T\"\n                         \"H_cJcdfS_RKP7YgXRWC0L16PNF5K7iqRqmjKALNe83ZFnFIw\";\n\n// Payload:\n// {\"iss\":\"https://example.com\",\"sub\":\"test@example.com\",\"exp\":null}\nconst char NonExpiringToken[] =\n    \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwczovL2V4YW1wbGUu\"\n    \"Y29tIiwic3ViIjoidGVzdEBleGFtcGxlLmNvbSIsImlhdCI6MTUzMzE3NTk0Mn0.OSh-\"\n    \"AcY9dCUibXiIZzPlTdEsYH8xP3QkCJDesO3LVu4ndgTrxDnNuR3I4_oV4tjtirmLZD3sx\"\n    \"96wmLiIhOyqj3nipIdf_aQWcmET0XoRqGixOKse5FlHyU_VC1Jj9AlMvSz9zyCvKxMyP0\"\n    \"CeA-bhI_Qs-I9vBPK8pd-EUOespUqWMQwNdtrOdXLcvF8EA5BV5G2qRGzCU0QJaW0Dpyj\"\n    \"YF7ZCswRGorc2oMt5duXSp3-L1b9dDrnLwroxUrmQIZz9qvfwdDR-guyYSjKVQu5NJAyy\"\n    \"sd8XKNzmHqJ2fYhRjc5s7l5nIWTDyBXSdPKQ8cBnfFKoxaRhmMBjdEn9RB7r6A\";\n\n// An expired token\n// {\"iss\":\"https://example.com\",\"sub\":\"test@example.com\",\"aud\":\"example_service\",\"exp\":1205005587}\nconst char ExpiredToken[] = \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwczovL2V4YW1wbGUu\"\n                            \"Y29tIiwic3ViIjoidGVzdEBleGFtcGxlLmNvbSIsImV4cCI6MTIwNTAwNTU4NywiY\"\n                            \"XVkIjoiZXhhbXBsZV9zZXJ2aWNlIn0.izDa6aHNgbsbeRzucE0baXIP7SXOrgopYQ\"\n                            \"ALLFAsKq_N0GvOyqpAZA9nwCAhqCkeKWcL-9gbQe3XJa0KN3FPa2NbW4ChenIjmf2\"\n                            \"QYXOuOQaDu9QRTdHEY2Y4mRy6DiTZAsBHWGA71_cLX-rzTSO_8aC8eIqdHo898oJw\"\n                            \"3E8ISKdryYjayb9X3wtF6KLgNomoD9_nqtOkliuLElD8grO0qHKI1xQurGZNaoeyi\"\n                            \"V1AdwgX_5n3SmQTacVN0WcSgk6YJRZG6VE8PjxZP9bEameBmbSB0810giKRpdTU1-\"\n                            \"RJtjq6aCSTD4CYXtW38T5uko4V-S4zifK3BXeituUTebkgoA\";\n\n// An NotYetValid token\n// {\"iss\":\"https://example.com\",\"sub\":\"test@example.com\",\"aud\":\"example_service\",\"nbf\":9223372036854775807}\nconst char NotYetValidToken[] =\n    \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.\"\n    \"eyJpc3MiOiJodHRwczovL2V4YW1wbGUuY29tIiwic3ViIjoidGVzdEBleGFtcGxlLmNvbSIsImF1\"\n    \"ZCI6ImV4YW1wbGVfc2VydmljZSIsIm5iZiI6OTIyMzM3MjAzNjg1NDc3NTgwN30K\"\n    \".izDa6aHNgbsbeRzucE0baXIP7SXOrgopYQ\"\n    \"ALLFAsKq_N0GvOyqpAZA9nwCAhqCkeKWcL-9gbQe3XJa0KN3FPa2NbW4ChenIjmf2\"\n    \"QYXOuOQaDu9QRTdHEY2Y4mRy6DiTZAsBHWGA71_cLX-rzTSO_8aC8eIqdHo898oJw\"\n    \"3E8ISKdryYjayb9X3wtF6KLgNomoD9_nqtOkliuLElD8grO0qHKI1xQurGZNaoeyi\"\n    \"V1AdwgX_5n3SmQTacVN0WcSgk6YJRZG6VE8PjxZP9bEameBmbSB0810giKRpdTU1-\"\n    \"RJtjq6aCSTD4CYXtW38T5uko4V-S4zifK3BXeituUTebkgoA\";\n\n// A token with \"aud\" as invalid_service\n// Payload:\n// {\"iss\":\"https://example.com\",\"sub\":\"test@example.com\",\"aud\":\"invalid_service\",\"exp\":2001001001}\nconst char InvalidAudToken[] =\n    \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwczovL2V4YW1wbGUu\"\n    \"Y29tIiwic3ViIjoidGVzdEBleGFtcGxlLmNvbSIsImV4cCI6MjAwMTAwMTAwMSwiY\"\n    \"XVkIjoiaW52YWxpZF9zZXJ2aWNlIn0.B9HuVXpRDVYIvApfNQmE_l5fEMPEiPdi-s\"\n    \"dKbTione8I_UsnYHccKZVegaF6f2uyWhAvaTPgaMosyDlJD6skadEcmZD0V4TzsYK\"\n    \"v7eP5FQga26hZ1Kra7n9hAq4oFfH0J8aZLOvDV3tAgCNRXlh9h7QiBPeDNQlwztqE\"\n    \"csyp1lHI3jdUhsn3InIn-vathdx4PWQWLVb-74vwsP-END-MGlOfu_TY5OZUeY-GB\"\n    \"E4Wr06aOSU2XQjuNr6y2WJGMYFsKKWfF01kHSuyc9hjnq5UI19WrOM8s7LFP4w2iK\"\n    \"WFIPUGmPy3aM0TiF2oFOuuMxdPR3HNdSG7EWWRwoXv7n__jA\";\n\n// A token with non exist kid\n// Payload:\n// {\"iss\":\"https://example.com\",\"sub\":\"test@example.com\",\"aud\":\"example_service\",\"exp\":2001001001}\nconst char NonExistKidToken[] =\n    \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.\"\n    \"eyJpc3MiOiJodHRwczovL2V4YW1wbGUuY29tIiwic3ViIjoidGVzdEBleGFtcGxlLmNvbSIs\"\n    \"ImF1ZCI6ImV4YW1wbGVfc2VydmljZSIsImV4cCI6MjAwMTAwMTAwMX0.\"\n    \"n45uWZfIBZwCIPiL0K8Ca3tmm-ZlsDrC79_\"\n    \"vXCspPwk5oxdSn983tuC9GfVWKXWUMHe11DsB02b19Ow-\"\n    \"fmoEzooTFn65Ml7G34nW07amyM6lETiMhNzyiunctplOr6xKKJHmzTUhfTirvDeG-q9n24-\"\n    \"8lH7GP8GgHvDlgSM9OY7TGp81bRcnZBmxim_UzHoYO3_\"\n    \"c8OP4ZX3xG5PfihVk5G0g6wcHrO70w0_64JgkKRCrLHMJSrhIgp9NHel_\"\n    \"CNOnL0AjQKe9IGblJrMuouqYYS0zEWwmOVUWUSxQkoLpldQUVefcfjQeGjz8IlvktRa77FYe\"\n    \"xfP590ACPyXrivtsxg\";\n\n// {\"iss\":\"https://other.com\",\"sub\":\"test@other.com\",\"aud\":\"other_service\",\"exp\":2001001001}\nconst char OtherGoodToken[] =\n    \"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.\"\n    \"eyJpc3MiOiJodHRwczovL290aGVyLmNvbSIsInN1YiI6InRlc3RAb3RoZXIuY29tIiwiYXVkIjoib3RoZXJfc2VydmljZS\"\n    \"IsImV4cCI6MjAwMTAwMTAwMX0.R0GR2rnRTg_gWzDvuO-BXVMmw3-vyBspV_kUQ4zvIdO-_\"\n    \"1icaWzbioPTPEyoViWuErNYxaZ5YFBoD6Zk_hIe1YWoSJr9QRwxWA4CWcasJdBXPq2mMETt8VjAiXE_\"\n    \"aIrJOLIlP786GLjVgTsnvhaDUJyU7xUdoi9HRjEBYcdjNPvxJutoby8MypAkwdGxjl4H4Z01gomgWyUDRRy47OKI_\"\n    \"buwXk5M6d-\"\n    \"drRvLcvlT5gB4adOIOlmhm8xtXgYpvqrXfmMJCHbP9no7JATFaTEAkmA3OOxDsaOju4BFgMtRZtDM8p12QQG0rFl_FE-\"\n    \"2FqYX9qA4q41HJ4vxTSxgObeLGA\";\n\n// Expected base64 payload value.\nconst char ExpectedPayloadValue[] = \"eyJpc3MiOiJodHRwczovL2V4YW1wbGUuY29tIiwic3ViIjoidGVzdEBleGFtcG\"\n                                    \"xlLmNvbSIsImV4cCI6MjAwMTAwMTAwMSwiYXVkIjoiZXhhbXBsZV9zZXJ2\"\n                                    \"aWNlIn0\";\n\n// Base64 decoded Payload JSON\nconst char ExpectedPayloadJSON[] = R\"(\n{\n  \"iss\":\"https://example.com\",\n  \"sub\":\"test@example.com\",\n  \"exp\":2001001001,\n  \"aud\":\"example_service\"\n}\n)\";\n\n// Token copied from https://github.com/google/jwt_verify_lib/blob/master/src/verify_jwk_ec_test.cc\n// Use jwt.io to modify payload as:\n// {\n//  \"iss11\": \"628645741881-noabiu23f5a8m8ovd8ucv698lj78vv0l@developer.gserviceaccount.com\",\n//  \"sub\": \"628645741881-noabiu23f5a8m8ovd8ucv698lj78vv0l@developer.gserviceaccount.com\",\n//  \"aud\": \"example_service\"\n// }\nconst char ES256WithoutIssToken[] =\n    \"eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6ImFiYyJ9.\"\n    \"eyJpc3MxMSI6IjYyODY0NTc0MTg4MS1ub2FiaXUyM2Y1YThtOG92ZDh1Y3Y2OThsajc4dnYwbEBkZXZlbG9wZXIuZ3Nlcn\"\n    \"ZpY2VhY2NvdW50LmNvbSIsInN1YiI6IjYyODY0NTc0MTg4MS1ub2FiaXUyM2Y1YThtOG92ZDh1Y3Y2OThsajc4dnYwbEBk\"\n    \"ZXZlbG9wZXIuZ3NlcnZpY2VhY2NvdW50LmNvbSIsImF1ZCI6ImV4YW1wbGVfc2VydmljZSJ9.f-_\"\n    \"NAdznQK9o93AZTqNawu1yal1igNLgYvuj0JzW9SVmJJQuBT_12wJi9XQzZLFwSFn6D3f7bPWSZQlScTFSMg\";\n\n// Public key for above ES256 tokens.\nconst char ES256PublicKey[] = R\"(\n{\n  \"keys\": [\n    {\n      \"kty\": \"EC\",\n      \"crv\": \"P-256\",\n      \"alg\": \"ES256\",\n      \"kid\": \"abc\",\n      \"x\": \"EB54wykhS7YJFD6RYJNnwbWEz3cI7CF5bCDTXlrwI5k\",\n      \"y\": \"92bCBTvMFQ8lKbS2MbgjT3YfmYo6HnPEE2tsAqWUJw8\"\n    }\n  ]\n}\n)\";\n\n// Config with requires_all requirement\nconst char RequiresAllConfig[] = R\"(\nproviders:\n  example_provider:\n    issuer: https://example.com\n    audiences:\n    - example_service\n    - http://example_service1\n    - https://example_service2/\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n    from_params: [\"jwt_a\"]\n    forward_payload_header: example-auth-userinfo\n  other_provider:\n    issuer: https://other.com\n    audiences:\n    - other_service\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n    from_params: [\"jwt_b\"]\n    forward_payload_header: other-auth-userinfo\nrules:\n- match:\n    path: \"/requires-all\"\n  requires:\n    requires_all:\n      requirements:\n      - provider_name: \"example_provider\"\n      - provider_name: \"other_provider\"\n)\";\n// Config with requires_any requirement\nconst char RequiresAnyConfig[] = R\"(\nproviders:\n  example_provider:\n    issuer: https://example.com\n    audiences:\n    - example_service\n    - http://example_service1\n    - https://example_service2/\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n    from_headers:\n    - name: a\n      value_prefix: \"Bearer \"\n    - name: b\n      value_prefix: \"Bearer \"\n    forward_payload_header: example-auth-userinfo\n  other_provider:\n    issuer: https://other.com\n    audiences:\n    - other_service\n    remote_jwks:\n      http_uri:\n        uri: https://pubkey_server/pubkey_path\n        cluster: pubkey_cluster\n    from_headers:\n    - name: a\n      value_prefix: \"Bearer \"\n    - name: b\n      value_prefix: \"Bearer \"\n    forward_payload_header: other-auth-userinfo\nrules:\n- match:\n    path: \"/requires-any\"\n  requires:\n    requires_any:\n      requirements:\n      - provider_name: \"example_provider\"\n      - provider_name: \"other_provider\"\n)\";\n\n} // namespace JwtAuthn\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/local_ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"filter_test\",\n    srcs = [\"filter_test.cc\"],\n    extension_name = \"envoy.filters.http.local_ratelimit\",\n    deps = [\n        \"//source/extensions/filters/http/local_ratelimit:local_ratelimit_lib\",\n        \"//test/common/stream_info:test_util\",\n        \"//test/mocks/http:http_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/local_ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.local_ratelimit\",\n    deps = [\n        \"//source/extensions/filters/http/local_ratelimit:config\",\n        \"//test/mocks/server:server_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/local_ratelimit/config_test.cc",
    "content": "#include \"extensions/filters/http/local_ratelimit/config.h\"\n#include \"extensions/filters/http/local_ratelimit/local_ratelimit.h\"\n\n#include \"test/mocks/server/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace LocalRateLimitFilter {\n\nTEST(Factory, GlobalEmptyConfig) {\n  const std::string yaml = R\"(\nstat_prefix: test\n  )\";\n\n  LocalRateLimitFilterConfig factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  TestUtility::loadFromYaml(yaml, *proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(0);\n  auto callback = factory.createFilterFactoryFromProto(*proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  callback(filter_callback);\n}\n\nTEST(Factory, RouteSpecificFilterConfig) {\n  const std::string config_yaml = R\"(\nstat_prefix: test\ntoken_bucket:\n  max_tokens: 1\n  tokens_per_fill: 1\n  fill_interval: 1000s\nfilter_enabled:\n  runtime_key: test_enabled\n  default_value:\n    numerator: 100\n    denominator: HUNDRED\nfilter_enforced:\n  runtime_key: test_enforced\n  default_value:\n    numerator: 100\n    denominator: HUNDRED\nresponse_headers_to_add:\n  - append: false\n    header:\n      key: x-test-rate-limit\n      value: 'true'\n  )\";\n\n  LocalRateLimitFilterConfig factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  TestUtility::loadFromYaml(config_yaml, *proto_config);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> context;\n\n  EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(1);\n  const auto route_config = factory.createRouteSpecificFilterConfig(\n      *proto_config, context, ProtobufMessage::getNullValidationVisitor());\n  const auto* config = dynamic_cast<const FilterConfig*>(route_config.get());\n  EXPECT_TRUE(config->requestAllowed());\n}\n\nTEST(Factory, EnabledEnforcedDisabledByDefault) {\n  const std::string config_yaml = R\"(\nstat_prefix: test\ntoken_bucket:\n  max_tokens: 1\n  tokens_per_fill: 1\n  fill_interval: 1000s\n  )\";\n\n  LocalRateLimitFilterConfig factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  TestUtility::loadFromYaml(config_yaml, *proto_config);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> context;\n\n  EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(1);\n  const auto route_config = factory.createRouteSpecificFilterConfig(\n      *proto_config, context, ProtobufMessage::getNullValidationVisitor());\n  const auto* config = dynamic_cast<const FilterConfig*>(route_config.get());\n  EXPECT_FALSE(config->enabled());\n  EXPECT_FALSE(config->enforced());\n}\n\nTEST(Factory, PerRouteConfigNoTokenBucket) {\n  const std::string config_yaml = R\"(\nstat_prefix: test\n  )\";\n\n  LocalRateLimitFilterConfig factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  TestUtility::loadFromYaml(config_yaml, *proto_config);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> context;\n  EXPECT_THROW(factory.createRouteSpecificFilterConfig(*proto_config, context,\n                                                       ProtobufMessage::getNullValidationVisitor()),\n               EnvoyException);\n}\n\nTEST(Factory, FillTimerTooLow) {\n  const std::string config_yaml = R\"(\nstat_prefix: test\ntoken_bucket:\n  max_tokens: 1\n  tokens_per_fill: 1\n  fill_interval: 0.040s\n  )\";\n\n  LocalRateLimitFilterConfig factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  TestUtility::loadFromYaml(config_yaml, *proto_config);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> context;\n\n  EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(1);\n  EXPECT_THROW(factory.createRouteSpecificFilterConfig(*proto_config, context,\n                                                       ProtobufMessage::getNullValidationVisitor()),\n               EnvoyException);\n}\n\n} // namespace LocalRateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/local_ratelimit/filter_test.cc",
    "content": "#include \"envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.h\"\n\n#include \"extensions/filters/http/local_ratelimit/local_ratelimit.h\"\n\n#include \"test/mocks/http/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace LocalRateLimitFilter {\n\nstatic const std::string config_yaml = R\"(\nstat_prefix: test\ntoken_bucket:\n  max_tokens: {}\n  tokens_per_fill: 1\n  fill_interval: 1000s\nfilter_enabled:\n  runtime_key: test_enabled\n  default_value:\n    numerator: 100\n    denominator: HUNDRED\nfilter_enforced:\n  runtime_key: test_enforced\n  default_value:\n    numerator: 100\n    denominator: HUNDRED\nresponse_headers_to_add:\n  - append: false\n    header:\n      key: x-test-rate-limit\n      value: 'true'\n  )\";\n\nclass FilterTest : public testing::Test {\npublic:\n  FilterTest() = default;\n\n  void setup(const std::string& yaml, const bool enabled = true, const bool enforced = true) {\n    EXPECT_CALL(\n        runtime_.snapshot_,\n        featureEnabled(absl::string_view(\"test_enabled\"),\n                       testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n        .WillRepeatedly(testing::Return(enabled));\n    EXPECT_CALL(\n        runtime_.snapshot_,\n        featureEnabled(absl::string_view(\"test_enforced\"),\n                       testing::Matcher<const envoy::type::v3::FractionalPercent&>(Percent(100))))\n        .WillRepeatedly(testing::Return(enforced));\n\n    envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit config;\n    TestUtility::loadFromYaml(yaml, config);\n    config_ = std::make_shared<FilterConfig>(config, dispatcher_, stats_, runtime_);\n    filter_ = std::make_shared<Filter>(config_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  }\n\n  uint64_t findCounter(const std::string& name) {\n    const auto counter = TestUtility::findCounter(stats_, name);\n    return counter != nullptr ? counter->value() : 0;\n  }\n\n  Http::Code toErrorCode(const uint64_t code) { return config_->toErrorCode(code); }\n\n  Stats::IsolatedStoreImpl stats_;\n  testing::NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  std::shared_ptr<FilterConfig> config_;\n  std::shared_ptr<Filter> filter_;\n};\n\nTEST_F(FilterTest, Runtime) {\n  setup(fmt::format(config_yaml, \"1\"), false, false);\n  EXPECT_EQ(&runtime_, &(config_->runtime()));\n}\n\nTEST_F(FilterTest, ToErrorCode) {\n  setup(fmt::format(config_yaml, \"1\"), false, false);\n  EXPECT_EQ(Http::Code::BadRequest, toErrorCode(400));\n}\n\nTEST_F(FilterTest, Disabled) {\n  setup(fmt::format(config_yaml, \"1\"), false, false);\n  auto headers = Http::TestRequestHeaderMapImpl();\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(0U, findCounter(\"test.http_local_rate_limit.enabled\"));\n  EXPECT_EQ(0U, findCounter(\"test.http_local_rate_limit.enforced\"));\n}\n\nTEST_F(FilterTest, RequestOk) {\n  setup(fmt::format(config_yaml, \"1\"));\n  auto headers = Http::TestRequestHeaderMapImpl();\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, findCounter(\"test.http_local_rate_limit.enabled\"));\n  EXPECT_EQ(0U, findCounter(\"test.http_local_rate_limit.enforced\"));\n  EXPECT_EQ(1U, findCounter(\"test.http_local_rate_limit.ok\"));\n}\n\nTEST_F(FilterTest, RequestRateLimited) {\n  setup(fmt::format(config_yaml, \"0\"));\n\n  EXPECT_CALL(decoder_callbacks_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _))\n      .WillOnce(Invoke([](Http::Code code, absl::string_view body,\n                          std::function<void(Http::ResponseHeaderMap & headers)> modify_headers,\n                          const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                          absl::string_view details) {\n        EXPECT_EQ(Http::Code::TooManyRequests, code);\n        EXPECT_EQ(\"local_rate_limited\", body);\n\n        Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n        modify_headers(response_headers);\n        EXPECT_EQ(\"true\", response_headers.get(Http::LowerCaseString(\"x-test-rate-limit\"))\n                              ->value()\n                              .getStringView());\n\n        EXPECT_EQ(grpc_status, absl::nullopt);\n        EXPECT_EQ(details, \"local_rate_limited\");\n      }));\n\n  auto headers = Http::TestRequestHeaderMapImpl();\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, findCounter(\"test.http_local_rate_limit.enabled\"));\n  EXPECT_EQ(1U, findCounter(\"test.http_local_rate_limit.enforced\"));\n  EXPECT_EQ(1U, findCounter(\"test.http_local_rate_limit.rate_limited\"));\n}\n\nTEST_F(FilterTest, RequestRateLimitedButNotEnforced) {\n  setup(fmt::format(config_yaml, \"0\"), true, false);\n\n  EXPECT_CALL(decoder_callbacks_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _)).Times(0);\n\n  auto headers = Http::TestRequestHeaderMapImpl();\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(1U, findCounter(\"test.http_local_rate_limit.enabled\"));\n  EXPECT_EQ(0U, findCounter(\"test.http_local_rate_limit.enforced\"));\n  EXPECT_EQ(1U, findCounter(\"test.http_local_rate_limit.rate_limited\"));\n}\n\n} // namespace LocalRateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/lua/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"lua_filter_test\",\n    srcs = [\"lua_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.lua\",\n    deps = [\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/filters/http/lua:lua_filter_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"wrappers_test\",\n    srcs = [\"wrappers_test.cc\"],\n    extension_name = \"envoy.filters.http.lua\",\n    deps = [\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/filters/http/lua:wrappers_lib\",\n        \"//test/extensions/filters/common/lua:lua_wrappers_lib\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"lua_integration_test\",\n    srcs = [\"lua_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.lua\",\n    deps = [\n        \"//source/extensions/filters/http/lua:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.lua\",\n    deps = [\n        \"//source/extensions/filters/http/lua:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/lua/config_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/http/lua/v3/lua.pb.h\"\n#include \"envoy/extensions/filters/http/lua/v3/lua.pb.validate.h\"\n\n#include \"extensions/filters/http/lua/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\nnamespace {\n\nTEST(LuaFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(LuaFilterConfig().createFilterFactoryFromProto(\n                   envoy::extensions::filters::http::lua::v3::Lua(), \"stats\", context),\n               ProtoValidationException);\n}\n\nTEST(LuaFilterConfigTest, LuaFilterInJson) {\n  const std::string yaml_string = R\"EOF(\n  inline_code : \"print(5)\"\n  )EOF\";\n\n  envoy::extensions::filters::http::lua::v3::Lua proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  LuaFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(LuaFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.lua\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/lua/lua_filter_test.cc",
    "content": "#include <cstdint>\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/filters/http/lua/lua_filter.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::_;\nusing testing::AtLeast;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::StrEq;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\nnamespace {\n\nclass TestFilter : public Filter {\npublic:\n  using Filter::Filter;\n\n  MOCK_METHOD(void, scriptLog, (spdlog::level::level_enum level, const char* message));\n};\n\nclass LuaHttpFilterTest : public testing::Test {\npublic:\n  LuaHttpFilterTest() {\n    // Avoid strict mock failures for the following calls. We want strict for other calls.\n    EXPECT_CALL(decoder_callbacks_, addDecodedData(_, _))\n        .Times(AtLeast(0))\n        .WillRepeatedly(Invoke([this](Buffer::Instance& data, bool) {\n          if (decoder_callbacks_.buffer_ == nullptr) {\n            decoder_callbacks_.buffer_ = std::make_unique<Buffer::OwnedImpl>();\n          }\n          decoder_callbacks_.buffer_->move(data);\n        }));\n\n    EXPECT_CALL(decoder_callbacks_, activeSpan()).Times(AtLeast(0));\n    EXPECT_CALL(decoder_callbacks_, decodingBuffer()).Times(AtLeast(0));\n    EXPECT_CALL(decoder_callbacks_, route()).Times(AtLeast(0));\n\n    EXPECT_CALL(encoder_callbacks_, addEncodedData(_, _))\n        .Times(AtLeast(0))\n        .WillRepeatedly(Invoke([this](Buffer::Instance& data, bool) {\n          if (encoder_callbacks_.buffer_ == nullptr) {\n            encoder_callbacks_.buffer_ = std::make_unique<Buffer::OwnedImpl>();\n          }\n          encoder_callbacks_.buffer_->move(data);\n        }));\n    EXPECT_CALL(encoder_callbacks_, activeSpan()).Times(AtLeast(0));\n    EXPECT_CALL(encoder_callbacks_, encodingBuffer()).Times(AtLeast(0));\n    EXPECT_CALL(decoder_callbacks_, streamInfo()).Times(testing::AnyNumber());\n  }\n\n  ~LuaHttpFilterTest() override { filter_->onDestroy(); }\n\n  // Quickly set up a global configuration. In order to avoid extensive modification of existing\n  // test cases, the existing configuration methods must be compatible.\n  void setup(const std::string& lua_code) {\n    envoy::extensions::filters::http::lua::v3::Lua proto_config;\n    proto_config.set_inline_code(lua_code);\n    envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config;\n    setupConfig(proto_config, per_route_proto_config);\n    setupFilter();\n  }\n\n  void setupConfig(envoy::extensions::filters::http::lua::v3::Lua& proto_config,\n                   envoy::extensions::filters::http::lua::v3::LuaPerRoute& per_route_proto_config) {\n    // Setup filter config for Lua filter.\n    config_ = std::make_shared<FilterConfig>(proto_config, tls_, cluster_manager_, api_);\n    // Setup per route config for Lua filter.\n    per_route_config_ =\n        std::make_shared<FilterConfigPerRoute>(per_route_proto_config, server_factory_context_);\n  }\n\n  void setupFilter() {\n    filter_ = std::make_unique<TestFilter>(config_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    filter_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  void setupSecureConnection(const bool secure) {\n    ssl_ = std::make_shared<NiceMock<Envoy::Ssl::MockConnectionInfo>>();\n    EXPECT_CALL(decoder_callbacks_, connection()).WillOnce(Return(&connection_));\n    EXPECT_CALL(Const(connection_), ssl()).Times(1).WillOnce(Return(secure ? ssl_ : nullptr));\n  }\n\n  void setupMetadata(const std::string& yaml) {\n    TestUtility::loadFromYaml(yaml, metadata_);\n    EXPECT_CALL(decoder_callbacks_.route_->route_entry_, metadata())\n        .WillOnce(testing::ReturnRef(metadata_));\n  }\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Api::MockApi> api_;\n  Upstream::MockClusterManager cluster_manager_;\n  std::shared_ptr<FilterConfig> config_;\n  std::shared_ptr<FilterConfigPerRoute> per_route_config_;\n  std::unique_ptr<TestFilter> filter_;\n  Http::MockStreamDecoderFilterCallbacks decoder_callbacks_;\n  Http::MockStreamEncoderFilterCallbacks encoder_callbacks_;\n  envoy::config::core::v3::Metadata metadata_;\n  std::shared_ptr<NiceMock<Envoy::Ssl::MockConnectionInfo>> ssl_;\n  NiceMock<Envoy::Network::MockConnection> connection_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info_;\n  Tracing::MockSpan child_span_;\n\n  const std::string HEADER_ONLY_SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:headers():get(\":path\"))\n    end\n  )EOF\"};\n\n  const std::string BODY_CHUNK_SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:headers():get(\":path\"))\n\n      for chunk in request_handle:bodyChunks() do\n        request_handle:logTrace(chunk:length())\n      end\n\n      request_handle:logTrace(\"done\")\n    end\n  )EOF\"};\n\n  const std::string TRAILERS_SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:headers():get(\":path\"))\n\n      for chunk in request_handle:bodyChunks() do\n        request_handle:logTrace(chunk:length())\n      end\n\n      local trailers = request_handle:trailers()\n      if trailers ~= nil then\n        request_handle:logTrace(trailers:get(\"foo\"))\n      else\n        request_handle:logTrace(\"no trailers\")\n      end\n    end\n  )EOF\"};\n\n  const std::string TRAILERS_NO_BODY_SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:headers():get(\":path\"))\n\n      if request_handle:trailers() ~= nil then\n        request_handle:logTrace(request_handle:trailers():get(\"foo\"))\n      else\n        request_handle:logTrace(\"no trailers\")\n      end\n    end\n  )EOF\"};\n\n  const std::string BODY_SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:headers():get(\":path\"))\n\n      if request_handle:body() ~= nil then\n        request_handle:logTrace(request_handle:body():length())\n      else\n        request_handle:logTrace(\"no body\")\n      end\n    end\n  )EOF\"};\n\n  const std::string BODY_TRAILERS_SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:headers():get(\":path\"))\n\n      if request_handle:body() ~= nil then\n        request_handle:logTrace(request_handle:body():length())\n      else\n        request_handle:logTrace(\"no body\")\n      end\n\n      if request_handle:trailers() ~= nil then\n        request_handle:logTrace(request_handle:trailers():get(\"foo\"))\n      else\n        request_handle:logTrace(\"no trailers\")\n      end\n    end\n  )EOF\"};\n\n  const std::string ADD_HEADERS_SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:headers():add(\"hello\", \"world\")\n    end\n  )EOF\"};\n};\n\n// Bad code in initial config.\nTEST(LuaHttpFilterConfigTest, BadCode) {\n  const std::string SCRIPT{R\"EOF(\n    bad\n  )EOF\"};\n\n  NiceMock<ThreadLocal::MockInstance> tls;\n  NiceMock<Upstream::MockClusterManager> cluster_manager;\n  NiceMock<Api::MockApi> api;\n\n  envoy::extensions::filters::http::lua::v3::Lua proto_config;\n  proto_config.set_inline_code(SCRIPT);\n\n  EXPECT_THROW_WITH_MESSAGE(FilterConfig(proto_config, tls, cluster_manager, api),\n                            Filters::Common::Lua::LuaException,\n                            \"script load error: [string \\\"...\\\"]:3: '=' expected near '<eof>'\");\n}\n\n// Script touching headers only, request that is headers only.\nTEST_F(LuaHttpFilterTest, ScriptHeadersOnlyRequestHeadersOnly) {\n  InSequence s;\n  setup(HEADER_ONLY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Script touching headers only, request that has body.\nTEST_F(LuaHttpFilterTest, ScriptHeadersOnlyRequestBody) {\n  InSequence s;\n  setup(HEADER_ONLY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// Script touching headers only, request that has body and trailers.\nTEST_F(LuaHttpFilterTest, ScriptHeadersOnlyRequestBodyTrailers) {\n  InSequence s;\n  setup(HEADER_ONLY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Script asking for body chunks, request that is headers only.\nTEST_F(LuaHttpFilterTest, ScriptBodyChunksRequestHeadersOnly) {\n  InSequence s;\n  setup(BODY_CHUNK_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"done\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Script asking for body chunks, request that has body.\nTEST_F(LuaHttpFilterTest, ScriptBodyChunksRequestBody) {\n  InSequence s;\n  setup(BODY_CHUNK_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"done\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// Script asking for body chunks, request that has body and trailers.\nTEST_F(LuaHttpFilterTest, ScriptBodyChunksRequestBodyTrailers) {\n  InSequence s;\n  setup(BODY_CHUNK_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"done\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Script asking for trailers, request is headers only.\nTEST_F(LuaHttpFilterTest, ScriptTrailersRequestHeadersOnly) {\n  InSequence s;\n  setup(TRAILERS_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no trailers\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Script asking for trailers, request that has a body.\nTEST_F(LuaHttpFilterTest, ScriptTrailersRequestBody) {\n  InSequence s;\n  setup(TRAILERS_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no trailers\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// Script asking for trailers, request that has body and trailers.\nTEST_F(LuaHttpFilterTest, ScriptTrailersRequestBodyTrailers) {\n  InSequence s;\n  setup(TRAILERS_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Script asking for trailers without body, request is headers only.\nTEST_F(LuaHttpFilterTest, ScriptTrailersNoBodyRequestHeadersOnly) {\n  InSequence s;\n  setup(TRAILERS_NO_BODY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no trailers\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Script asking for trailers without body, request that has a body.\nTEST_F(LuaHttpFilterTest, ScriptTrailersNoBodyRequestBody) {\n  InSequence s;\n  setup(TRAILERS_NO_BODY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no trailers\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// Script asking for trailers without body, request that has a body and trailers.\nTEST_F(LuaHttpFilterTest, ScriptTrailersNoBodyRequestBodyTrailers) {\n  InSequence s;\n  setup(TRAILERS_NO_BODY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Script asking for synchronous body, request that is headers only.\nTEST_F(LuaHttpFilterTest, ScriptBodyRequestHeadersOnly) {\n  InSequence s;\n  setup(BODY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no body\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Script asking for synchronous body, request that has a body.\nTEST_F(LuaHttpFilterTest, ScriptBodyRequestBody) {\n  InSequence s;\n  setup(BODY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// Script asking for synchronous body, request that has a body in multiple frames.\nTEST_F(LuaHttpFilterTest, ScriptBodyRequestBodyTwoFrames) {\n  InSequence s;\n  setup(BODY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n  decoder_callbacks_.addDecodedData(data, false);\n\n  Buffer::OwnedImpl data2(\"world\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"10\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data2, true));\n}\n\n// Scripting asking for synchronous body, request that has a body in multiple frames follows by\n// trailers.\nTEST_F(LuaHttpFilterTest, ScriptBodyRequestBodyTwoFramesTrailers) {\n  InSequence s;\n  setup(BODY_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n  decoder_callbacks_.addDecodedData(data, false);\n\n  Buffer::OwnedImpl data2(\"world\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data2, false));\n  decoder_callbacks_.addDecodedData(data2, false);\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"10\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Script asking for synchronous body and trailers, request that is headers only.\nTEST_F(LuaHttpFilterTest, ScriptBodyTrailersRequestHeadersOnly) {\n  InSequence s;\n  setup(BODY_TRAILERS_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no body\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no trailers\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Script asking for synchronous body and trailers, request that has a body.\nTEST_F(LuaHttpFilterTest, ScriptBodyTrailersRequestBody) {\n  InSequence s;\n  setup(BODY_TRAILERS_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no trailers\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// Script asking for synchronous body and trailers, request that has a body and trailers.\nTEST_F(LuaHttpFilterTest, ScriptBodyTrailersRequestBodyTrailers) {\n  InSequence s;\n  setup(BODY_TRAILERS_SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n  decoder_callbacks_.addDecodedData(data, false);\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Store a body chunk and reference it outside the loop.\nTEST_F(LuaHttpFilterTest, BodyChunkOutsideOfLoop) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      for chunk in request_handle:bodyChunks() do\n        if previous_chunk == nil then\n          previous_chunk = chunk\n        else\n          previous_chunk:length()\n        end\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data1, false));\n\n  Buffer::OwnedImpl data2(\"world\");\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::err,\n                        StrEq(\"[string \\\"...\\\"]:7: object used outside of proper scope\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data2, false));\n}\n\n// Script that should not be run.\nTEST_F(LuaHttpFilterTest, ScriptRandomRequestBodyTrailers) {\n  const std::string SCRIPT{R\"EOF(\n    function some_random_function()\n      print(\"don't run me\")\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Script that has an error during headers processing.\nTEST_F(LuaHttpFilterTest, ScriptErrorHeadersRequestBodyTrailers) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local foo = nil\n      foo[\"bar\"] = \"baz\"\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::err,\n                        StrEq(\"[string \\\"...\\\"]:4: attempt to index local 'foo' (a nil value)\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Script that tries to store a local variable to a global and then use it.\nTEST_F(LuaHttpFilterTest, ThreadEnvironments) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      if global_request_handle == nil then\n        global_request_handle = request_handle\n      else\n        global_request_handle:logTrace(\"should not work\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  TestFilter filter2(config_);\n  EXPECT_CALL(filter2, scriptLog(spdlog::level::err,\n                                 StrEq(\"[string \\\"...\\\"]:6: object used outside of proper scope\")));\n  filter2.decodeHeaders(request_headers, true);\n}\n\n// Script that yields on its own.\nTEST_F(LuaHttpFilterTest, UnexpectedYield) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      coroutine.yield()\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::err, StrEq(\"script performed an unexpected yield\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Script that has an error during a callback from C into Lua.\nTEST_F(LuaHttpFilterTest, ErrorDuringCallback) {\n  const std::string SCRIPT(R\"EOF(\n    function envoy_on_request(request_handle)\n      for key, value in pairs(request_handle:headers()) do\n        local foo = nil\n        foo[\"bar\"] = \"baz\"\n      end\n    end\n  )EOF\");\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::err,\n                        StrEq(\"[string \\\"...\\\"]:5: attempt to index local 'foo' (a nil value)\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Use of header iterator across yield.\nTEST_F(LuaHttpFilterTest, HeadersIteratorAcrossYield) {\n  const std::string SCRIPT(R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers_it = pairs(request_handle:headers())\n      request_handle:body()\n      headers_it()\n    end\n  )EOF\");\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::err,\n                        StrEq(\"[string \\\"...\\\"]:5: object used outside of proper scope\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// Combo request and response script.\nTEST_F(LuaHttpFilterTest, RequestAndResponse) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:headers():get(\":path\"))\n      request_handle:headers():add(\"foo\", \"bar\")\n\n      for chunk in request_handle:bodyChunks() do\n        request_handle:logTrace(chunk:length())\n      end\n\n      request_handle:logTrace(request_handle:trailers():get(\"foo\"))\n    end\n\n    function envoy_on_response(response_handle)\n      response_handle:logTrace(response_handle:headers():get(\":status\"))\n      response_handle:headers():add(\"foo\", \"bar\")\n\n      for chunk in response_handle:bodyChunks() do\n        response_handle:logTrace(chunk:length())\n      end\n\n      response_handle:logTrace(response_handle:trailers():get(\"hello\"))\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"/\")));\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"5\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl continue_headers{{\":status\", \"100\"}};\n  // No lua hooks for 100-continue\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"100\"))).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(continue_headers));\n\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"200\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl data2(\"helloworld\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"10\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data2, false));\n\n  Http::TestResponseTrailerMapImpl response_trailers{{\"hello\", \"world\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"world\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n}\n\n// Response synchronous body.\nTEST_F(LuaHttpFilterTest, ResponseSynchronousBody) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_response(response_handle)\n      response_handle:logTrace(response_handle:headers():get(\":status\"))\n      response_handle:logTrace(response_handle:body():length())\n      if response_handle:trailers() == nil then\n        response_handle:logTrace(\"no trailers\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"200\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl data2(\"helloworld\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"10\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no trailers\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data2, true));\n}\n\n// Basic HTTP request flow.\nTEST_F(LuaHttpFilterTest, HttpCall) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"POST\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\",\n          [\"set-cookie\"] = { \"flavor=chocolate; Path=/\", \"variant=chewy; Path=/\" }\n        },\n        \"hello world\",\n        5000)\n      for key, value in pairs(headers) do\n        request_handle:logTrace(key .. \" \" .. value)\n      end\n      request_handle:logTrace(string.len(body))\n      request_handle:logTrace(body)\n      request_handle:logTrace(string.byte(body, 5))\n      request_handle:logTrace(string.sub(body, 6, 8))\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":path\", \"/\"},\n                                                      {\":method\", \"POST\"},\n                                                      {\":authority\", \"foo\"},\n                                                      {\"set-cookie\", \"flavor=chocolate; Path=/\"},\n                                                      {\"set-cookie\", \"variant=chewy; Path=/\"},\n                                                      {\"content-length\", \"11\"}}),\n                      message->headers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers));\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n  const char response[8] = {'r', 'e', 's', 'p', '\\0', 'n', 's', 'e'};\n  response_message->body().add(response, 8);\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\":status 200\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"8\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"resp\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"0\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"nse\")));\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  callbacks->onBeforeFinalizeUpstreamSpan(child_span_, &response_message->headers());\n  callbacks->onSuccess(request, std::move(response_message));\n}\n\n// Basic HTTP request flow. Asynchronous flag set to false.\nTEST_F(LuaHttpFilterTest, HttpCallAsyncFalse) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"POST\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\",\n          [\"set-cookie\"] = { \"flavor=chocolate; Path=/\", \"variant=chewy; Path=/\" }\n        },\n        \"hello world\",\n        5000,\n        false)\n      for key, value in pairs(headers) do\n        request_handle:logTrace(key .. \" \" .. value)\n      end\n      request_handle:logTrace(body)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":path\", \"/\"},\n                                                      {\":method\", \"POST\"},\n                                                      {\":authority\", \"foo\"},\n                                                      {\"set-cookie\", \"flavor=chocolate; Path=/\"},\n                                                      {\"set-cookie\", \"variant=chewy; Path=/\"},\n                                                      {\"content-length\", \"11\"}}),\n                      message->headers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers));\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n  response_message->body().add(\"response\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\":status 200\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"response\")));\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  callbacks->onSuccess(request, std::move(response_message));\n}\n\n// Basic asynchronous, fire-and-forget HTTP request flow.\nTEST_F(LuaHttpFilterTest, HttpCallAsynchronous) {\n  const std::string SCRIPT{R\"EOF(\n        function envoy_on_request(request_handle)\n          local headers, body = request_handle:httpCall(\n            \"cluster\",\n            {\n              [\":method\"] = \"POST\",\n              [\":path\"] = \"/\",\n              [\":authority\"] = \"foo\",\n              [\"set-cookie\"] = { \"flavor=chocolate; Path=/\", \"variant=chewy; Path=/\" }\n            },\n            \"hello world\",\n            5000,\n            true)\n        end\n      )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":path\", \"/\"},\n                                                      {\":method\", \"POST\"},\n                                                      {\":authority\", \"foo\"},\n                                                      {\"set-cookie\", \"flavor=chocolate; Path=/\"},\n                                                      {\"set-cookie\", \"variant=chewy; Path=/\"},\n                                                      {\"content-length\", \"11\"}}),\n                      message->headers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Double HTTP call. Responses before request body.\nTEST_F(LuaHttpFilterTest, DoubleHttpCall) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"POST\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\"\n        },\n        \"hello world\",\n        5000)\n      for key, value in pairs(headers) do\n        request_handle:logTrace(key .. \" \" .. value)\n      end\n      request_handle:logTrace(body)\n\n      headers, body = request_handle:httpCall(\n        \"cluster2\",\n        {\n          [\":method\"] = \"GET\",\n          [\":path\"] = \"/bar\",\n          [\":authority\"] = \"foo\"\n        },\n        nil,\n        0)\n      for key, value in pairs(headers) do\n        request_handle:logTrace(key .. \" \" .. value)\n      end\n      if body == nil then\n        request_handle:logTrace(\"no body\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":path\", \"/\"},\n                                                      {\":method\", \"POST\"},\n                                                      {\":authority\", \"foo\"},\n                                                      {\"content-length\", \"11\"}}),\n                      message->headers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n  response_message->body().add(\"response\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\":status 200\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"response\")));\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster2\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster2\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{\n                          {\":path\", \"/bar\"}, {\":method\", \"GET\"}, {\":authority\", \"foo\"}}),\n                      message->headers());\n            callbacks = &cb;\n            return &request;\n          }));\n  callbacks->onSuccess(request, std::move(response_message));\n\n  response_message = std::make_unique<Http::ResponseMessageImpl>(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"403\"}}});\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\":status 403\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no body\")));\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  callbacks->onBeforeFinalizeUpstreamSpan(child_span_, &response_message->headers());\n  callbacks->onSuccess(request, std::move(response_message));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// Basic HTTP request flow with no body.\nTEST_F(LuaHttpFilterTest, HttpCallNoBody) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"GET\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\"\n        },\n        nil,\n        5000)\n      for key, value in pairs(headers) do\n        request_handle:logTrace(key .. \" \" .. value)\n      end\n      if body == nil then\n        request_handle:logTrace(\"no body\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{\n                          {\":path\", \"/\"}, {\":method\", \"GET\"}, {\":authority\", \"foo\"}}),\n                      message->headers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers));\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\":status 200\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"no body\")));\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  callbacks->onSuccess(request, std::move(response_message));\n}\n\n// HTTP call followed by immediate response.\nTEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"GET\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\"\n        },\n        nil,\n        5000)\n      request_handle:respond(\n        {\n          [\":status\"] = \"403\",\n          [\"set-cookie\"] = { \"flavor=chocolate; Path=/\", \"variant=chewy; Path=/\" }\n        },\n        nil)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{\n                          {\":path\", \"/\"}, {\":method\", \"GET\"}, {\":authority\", \"foo\"}}),\n                      message->headers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n  Http::TestResponseHeaderMapImpl expected_headers{{\":status\", \"403\"},\n                                                   {\"set-cookie\", \"flavor=chocolate; Path=/\"},\n                                                   {\"set-cookie\", \"variant=chewy; Path=/\"}};\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true));\n  callbacks->onSuccess(request, std::move(response_message));\n}\n\n// HTTP call with script error after resume.\nTEST_F(LuaHttpFilterTest, HttpCallErrorAfterResumeSuccess) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"GET\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\"\n        },\n        nil,\n        5000)\n\n        local foo = nil\n        foo[\"bar\"] = \"baz\"\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::err,\n                        StrEq(\"[string \\\"...\\\"]:14: attempt to index local 'foo' (a nil value)\")));\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  callbacks->onSuccess(request, std::move(response_message));\n}\n\n// HTTP call failure.\nTEST_F(LuaHttpFilterTest, HttpCallFailure) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"GET\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\"\n        },\n        nil,\n        5000)\n\n        for key, value in pairs(headers) do\n          request_handle:logTrace(key .. \" \" .. value)\n        end\n        request_handle:logTrace(body)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\":status 503\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"upstream failure\")));\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  callbacks->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n}\n\n// HTTP call reset.\nTEST_F(LuaHttpFilterTest, HttpCallReset) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"GET\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\"\n        },\n        nil,\n        5000)\n\n        request_handle:logTrace(\"not run\")\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, true));\n\n  EXPECT_CALL(request, cancel());\n  filter_->onDestroy();\n}\n\n// HTTP call immediate failure.\nTEST_F(LuaHttpFilterTest, HttpCallImmediateFailure) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"GET\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\"\n        },\n        nil,\n        5000)\n\n        for key, value in pairs(headers) do\n          request_handle:logTrace(key .. \" \" .. value)\n        end\n        request_handle:logTrace(body)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            cb.onFailure(request, Http::AsyncClient::FailureReason::Reset);\n            // Intentionally return nullptr (instead of request handle) to trigger a particular\n            // code path.\n            return nullptr;\n          }));\n\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\":status 503\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"upstream failure\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Invalid HTTP call timeout.\nTEST_F(LuaHttpFilterTest, HttpCallInvalidTimeout) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {},\n        nil,\n        -1)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::err,\n                                  StrEq(\"[string \\\"...\\\"]:3: http call timeout must be >= 0\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n}\n\n// Invalid HTTP call cluster.\nTEST_F(LuaHttpFilterTest, HttpCallInvalidCluster) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {},\n        nil,\n        5000)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\"))).WillOnce(Return(nullptr));\n  EXPECT_CALL(\n      *filter_,\n      scriptLog(spdlog::level::err,\n                StrEq(\"[string \\\"...\\\"]:3: http call cluster invalid. Must be configured\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n}\n\n// Invalid HTTP call headers.\nTEST_F(LuaHttpFilterTest, HttpCallInvalidHeaders) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n        \"cluster\",\n        {},\n        nil,\n        5000)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::err,\n                                  StrEq(\"[string \\\"...\\\"]:3: http call headers must include \"\n                                        \"':path', ':method', and ':authority'\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n}\n\n// Invalid HTTP call asynchronous flag value.\nTEST_F(LuaHttpFilterTest, HttpCallAsyncInvalidAsynchronousFlag) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:httpCall(\n        \"cluster\",\n        {\n          [\":method\"] = \"POST\",\n          [\":path\"] = \"/\",\n          [\":authority\"] = \"foo\",\n          [\"set-cookie\"] = { \"flavor=chocolate; Path=/\", \"variant=chewy; Path=/\" }\n        },\n        \"hello world\",\n        5000,\n        potato)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::err, StrEq(\"[string \\\"...\\\"]:3: http call asynchronous flag \"\n                                                  \"must be 'true', 'false', or empty\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n}\n\n// Respond right away.\n// This is also a regression test for https://github.com/envoyproxy/envoy/issues/3570 which runs\n// the request flow 2000 times and does a GC at the end to make sure we don't leak memory.\nTEST_F(LuaHttpFilterTest, ImmediateResponse) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:respond(\n        {[\":status\"] = \"503\"},\n        \"nope\")\n\n      -- Should not run\n      local foo = nil\n      foo[\"bar\"] = \"baz\"\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  // Perform a GC and snap bytes currently used by the runtime.\n  auto script_config = config_->perLuaCodeSetup(GLOBAL_SCRIPT_NAME);\n  script_config->runtimeGC();\n  const uint64_t mem_use_at_start = script_config->runtimeBytesUsed();\n\n  uint64_t num_loops = 2000;\n#if defined(__has_feature) && (__has_feature(thread_sanitizer))\n  // per https://github.com/envoyproxy/envoy/issues/7374 this test is causing\n  // problems on tsan\n  num_loops = 200;\n#endif\n\n  for (uint64_t i = 0; i < num_loops; i++) {\n    Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n    Http::TestResponseHeaderMapImpl expected_headers{{\":status\", \"503\"}, {\"content-length\", \"4\"}};\n    EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), false));\n    EXPECT_CALL(decoder_callbacks_, encodeData(_, true));\n    EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(request_headers, false));\n    filter_->onDestroy();\n    setupFilter();\n  }\n\n  // Perform GC and compare bytes currently used by the runtime to the original value.\n  // NOTE: This value is not the same as the original value for reasons that I do not fully\n  //       understand. Depending on the number of requests tested, it increases incrementally, but\n  //       then goes down again at a certain point. There must be some type of interpreter caching\n  //       going on because I'm pretty certain this is not another leak. Because of this, we need\n  //       to do a soft comparison here. In my own testing, without a fix for #3570, the memory\n  //       usage after is at least 20x higher after 2000 iterations so we just check to see if it's\n  //       within 2x.\n  script_config->runtimeGC();\n  EXPECT_TRUE(script_config->runtimeBytesUsed() < mem_use_at_start * 2);\n}\n\n// Respond with bad status.\nTEST_F(LuaHttpFilterTest, ImmediateResponseBadStatus) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:respond(\n        {[\":status\"] = \"100\"},\n        \"nope\")\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::err,\n                                  StrEq(\"[string \\\"...\\\"]:3: :status must be between 200-599\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n}\n\n// Respond after headers have been continued.\nTEST_F(LuaHttpFilterTest, RespondAfterHeadersContinued) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      for chunk in request_handle:bodyChunks() do\n        request_handle:respond(\n          {[\":status\"] = \"100\"},\n          \"nope\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  EXPECT_CALL(\n      *filter_,\n      scriptLog(\n          spdlog::level::err,\n          StrEq(\"[string \\\"...\\\"]:4: respond() cannot be called if headers have been continued\")));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n}\n\n// Respond in response path.\nTEST_F(LuaHttpFilterTest, RespondInResponsePath) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_response(response_handle)\n      response_handle:respond(\n        {[\":status\"] = \"200\"},\n        \"nope\")\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(\n      *filter_,\n      scriptLog(spdlog::level::err,\n                StrEq(\"[string \\\"...\\\"]:3: respond not currently supported in the response path\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true));\n}\n\n// bodyChunks() after body continued.\nTEST_F(LuaHttpFilterTest, BodyChunksAfterBodyContinued) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:body()\n      request_handle:bodyChunks()\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(\n      *filter_,\n      scriptLog(\n          spdlog::level::err,\n          StrEq(\"[string \\\"...\\\"]:4: cannot call bodyChunks after body processing has begun\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, true));\n}\n\n// body() after only waiting for trailers.\nTEST_F(LuaHttpFilterTest, BodyAfterTrailers) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:trailers()\n      request_handle:body()\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n\n  Http::TestRequestTrailerMapImpl request_trailers{{\"foo\", \"bar\"}};\n  EXPECT_CALL(\n      *filter_,\n      scriptLog(spdlog::level::err,\n                StrEq(\"[string \\\"...\\\"]:4: cannot call body() after body has been streamed\")));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n}\n\n// body() after streaming has started.\nTEST_F(LuaHttpFilterTest, BodyAfterStreamingHasStarted) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      for chunk in request_handle:bodyChunks() do\n        request_handle:body()\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(\n      *filter_,\n      scriptLog(spdlog::level::err,\n                StrEq(\"[string \\\"...\\\"]:4: cannot call body() after body streaming has started\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n}\n\n// script touch metadata():get(\"key\")\nTEST_F(LuaHttpFilterTest, GetMetadataFromHandle) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:metadata():get(\"foo.bar\")[\"name\"])\n      request_handle:logTrace(request_handle:metadata():get(\"foo.bar\")[\"prop\"])\n      request_handle:logTrace(request_handle:metadata():get(\"baz.bat\")[\"name\"])\n      request_handle:logTrace(request_handle:metadata():get(\"baz.bat\")[\"prop\"])\n    end\n  )EOF\"};\n\n  const std::string METADATA{R\"EOF(\n    filter_metadata:\n      envoy.filters.http.lua:\n        foo.bar:\n          name: foo\n          prop: bar\n        baz.bat:\n          name: baz\n          prop: bat\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n  setupMetadata(METADATA);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"foo\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"baz\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bat\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Test that the deprecated filter name works for metadata.\nTEST_F(LuaHttpFilterTest, DEPRECATED_FEATURE_TEST(GetMetadataFromHandleUsingDeprecatedName)) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:metadata():get(\"foo.bar\")[\"name\"])\n      request_handle:logTrace(request_handle:metadata():get(\"foo.bar\")[\"prop\"])\n    end\n  )EOF\"};\n\n  const std::string METADATA{R\"EOF(\n    filter_metadata:\n      envoy.lua:\n        foo.bar:\n          name: foo\n          prop: bar\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n  setupMetadata(METADATA);\n\n  // Logs deprecation warning the first time.\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"foo\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_LOG_CONTAINS(\n      \"warn\",\n      \"Using deprecated http filter extension name 'envoy.lua' for 'envoy.filters.http.lua'\",\n      filter_->decodeHeaders(request_headers, true));\n\n  // Doesn't log deprecation warning the second time.\n  setupMetadata(METADATA);\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"foo\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_LOG_NOT_CONTAINS(\n      \"warn\",\n      \"Using deprecated http filter extension name 'envoy.lua' for 'envoy.filters.http.lua'\",\n      filter_->decodeHeaders(request_headers, true));\n}\n\n// No available metadata on route.\nTEST_F(LuaHttpFilterTest, GetMetadataFromHandleNoRoute) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      if request_handle:metadata():get(\"foo.bar\") == nil then\n        request_handle:logTrace(\"ok\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  ON_CALL(decoder_callbacks_, route()).WillByDefault(Return(nullptr));\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"ok\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// No available Lua metadata on route.\nTEST_F(LuaHttpFilterTest, GetMetadataFromHandleNoLuaMetadata) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      if request_handle:metadata():get(\"foo.bar\") == nil then\n        request_handle:logTrace(\"ok\")\n      end\n    end\n  )EOF\"};\n\n  const std::string METADATA{R\"EOF(\n    filter_metadata:\n      envoy.some_filter:\n        foo.bar:\n          name: foo\n          prop: bar\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n  setupMetadata(METADATA);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"ok\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Get the current protocol.\nTEST_F(LuaHttpFilterTest, GetCurrentProtocol) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(request_handle:streamInfo():protocol())\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(ReturnRef(stream_info_));\n  EXPECT_CALL(stream_info_, protocol()).WillOnce(Return(Http::Protocol::Http11));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"HTTP/1.1\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Set and get stream info dynamic metadata.\nTEST_F(LuaHttpFilterTest, SetGetDynamicMetadata) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:streamInfo():dynamicMetadata():set(\"envoy.lb\", \"foo\", \"bar\")\n      request_handle:streamInfo():dynamicMetadata():set(\"envoy.lb\", \"complex\", {x=\"abcd\", y=1234})\n      request_handle:logTrace(request_handle:streamInfo():dynamicMetadata():get(\"envoy.lb\")[\"foo\"])\n      request_handle:logTrace(request_handle:streamInfo():dynamicMetadata():get(\"envoy.lb\")[\"complex\"].x)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Event::SimulatedTimeSystem test_time;\n  StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time.timeSystem());\n  EXPECT_EQ(0, stream_info.dynamicMetadata().filter_metadata_size());\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(ReturnRef(stream_info));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"bar\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"abcd\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  EXPECT_EQ(1, stream_info.dynamicMetadata().filter_metadata_size());\n  EXPECT_EQ(\"bar\", stream_info.dynamicMetadata()\n                       .filter_metadata()\n                       .at(\"envoy.lb\")\n                       .fields()\n                       .at(\"foo\")\n                       .string_value());\n\n  const ProtobufWkt::Struct& meta_complex = stream_info.dynamicMetadata()\n                                                .filter_metadata()\n                                                .at(\"envoy.lb\")\n                                                .fields()\n                                                .at(\"complex\")\n                                                .struct_value();\n  EXPECT_EQ(\"abcd\", meta_complex.fields().at(\"x\").string_value());\n  EXPECT_EQ(1234.0, meta_complex.fields().at(\"y\").number_value());\n}\n\n// Check the connection.\nTEST_F(LuaHttpFilterTest, CheckConnection) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      if request_handle:connection():ssl() == nil then\n        request_handle:logTrace(\"plain\")\n      else\n        request_handle:logTrace(\"secure\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n\n  setupSecureConnection(false);\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"plain\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  setupSecureConnection(true);\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"secure\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Inspect stream info downstream SSL connection.\nTEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnection) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      if request_handle:streamInfo():downstreamSslConnection() == nil then\n      else\n        if request_handle:streamInfo():downstreamSslConnection():peerCertificatePresented() then\n          request_handle:logTrace(\"peerCertificatePresented\")\n        end\n\n        if request_handle:streamInfo():downstreamSslConnection():peerCertificateValidated() then\n          request_handle:logTrace(\"peerCertificateValidated\")\n        end\n\n        request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():uriSanPeerCertificate(), \",\"))\n        request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():uriSanLocalCertificate(), \",\"))\n        request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():dnsSansPeerCertificate(), \",\"))\n        request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():dnsSansLocalCertificate(), \",\"))\n\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():ciphersuiteId())\n\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():validFromPeerCertificate())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():expirationPeerCertificate())\n\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():subjectLocalCertificate())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():sha256PeerCertificateDigest())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():serialNumberPeerCertificate())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():issuerPeerCertificate())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():subjectPeerCertificate())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():ciphersuiteString())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():tlsVersion())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificate())\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificateChain())\n\n        request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():sessionId())\n      end\n    end\n  )EOF\"};\n\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n\n  auto connection_info = std::make_shared<Ssl::MockConnectionInfo>();\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_));\n  EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(connection_info));\n\n  EXPECT_CALL(*connection_info, peerCertificatePresented()).WillOnce(Return(true));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"peerCertificatePresented\")));\n\n  EXPECT_CALL(*connection_info, peerCertificateValidated()).WillOnce(Return(true));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"peerCertificateValidated\")));\n\n  const std::vector<std::string> peer_uri_sans{\"peer-uri-sans-1\", \"peer-uri-sans-2\"};\n  EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillOnce(Return(peer_uri_sans));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"peer-uri-sans-1,peer-uri-sans-2\")));\n\n  const std::vector<std::string> local_uri_sans{\"local-uri-sans-1\", \"local-uri-sans-2\"};\n  EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans));\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::trace, StrEq(\"local-uri-sans-1,local-uri-sans-2\")));\n\n  const std::vector<std::string> peer_dns_sans{\"peer-dns-sans-1\", \"peer-dns-sans-2\"};\n  EXPECT_CALL(*connection_info, dnsSansPeerCertificate()).WillOnce(Return(peer_dns_sans));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"peer-dns-sans-1,peer-dns-sans-2\")));\n\n  const std::vector<std::string> local_dns_sans{\"local-dns-sans-1\", \"local-dns-sans-2\"};\n  EXPECT_CALL(*connection_info, dnsSansLocalCertificate()).WillOnce(Return(local_dns_sans));\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::trace, StrEq(\"local-dns-sans-1,local-dns-sans-2\")));\n\n  const std::string subject_local = \"subject-local\";\n  EXPECT_CALL(*connection_info, subjectLocalCertificate()).WillOnce(ReturnRef(subject_local));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(subject_local)));\n\n  const uint64_t cipher_suite_id = 0x0707;\n  EXPECT_CALL(*connection_info, ciphersuiteId()).WillRepeatedly(Return(cipher_suite_id));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"0x0707\")));\n\n  const SystemTime validity(std::chrono::seconds(1522796777));\n  EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(validity));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"1522796777\")));\n\n  const SystemTime expiry(std::chrono::seconds(1522796776));\n  EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(expiry));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"1522796776\")));\n\n  const std::string peer_cert_digest = \"peer-cert-digest\";\n  EXPECT_CALL(*connection_info, sha256PeerCertificateDigest())\n      .WillOnce(ReturnRef(peer_cert_digest));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_digest)));\n\n  const std::string peer_cert_serial_number = \"peer-cert-serial-number\";\n  EXPECT_CALL(*connection_info, serialNumberPeerCertificate())\n      .WillOnce(ReturnRef(peer_cert_serial_number));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_serial_number)));\n\n  const std::string peer_cert_issuer = \"peer-cert-issuer\";\n  EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillOnce(ReturnRef(peer_cert_issuer));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_issuer)));\n\n  const std::string peer_cert_subject = \"peer-cert-subject\";\n  EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillOnce(ReturnRef(peer_cert_subject));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_subject)));\n\n  const std::string cipher_suite = \"cipher-suite\";\n  EXPECT_CALL(*connection_info, ciphersuiteString()).WillOnce(Return(cipher_suite));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(cipher_suite)));\n\n  const std::string tls_version = \"tls-version\";\n  EXPECT_CALL(*connection_info, tlsVersion()).WillOnce(ReturnRef(tls_version));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(tls_version)));\n\n  const std::string peer_cert = \"peer-cert\";\n  EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate())\n      .WillOnce(ReturnRef(peer_cert));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert)));\n\n  const std::string peer_cert_chain = \"peer-cert-chain\";\n  EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificateChain())\n      .WillOnce(ReturnRef(peer_cert_chain));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_chain)));\n\n  const std::string id = \"12345\";\n  EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(id));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(id)));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Inspect stream info downstream SSL connection in a plain connection.\nTEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnectionOnPlainConnection) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      if request_handle:streamInfo():downstreamSslConnection() == nil then\n        request_handle:logTrace(\"downstreamSslConnection is nil\")\n      end\n    end\n  )EOF\"};\n\n  setup(SCRIPT);\n\n  EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_));\n  EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(nullptr));\n\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"downstreamSslConnection is nil\")));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\nTEST_F(LuaHttpFilterTest, ImportPublicKey) {\n  const std::string SCRIPT{R\"EOF(\n    function string.fromhex(str)\n      return (str:gsub('..', function (cc)\n        return string.char(tonumber(cc, 16))\n      end))\n    end\n    function envoy_on_request(request_handle)\n      key = \"30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001\"\n      raw = key:fromhex()\n      key = request_handle:importPublicKey(raw, string.len(raw)):get()\n\n      if key == nil then\n        request_handle:logTrace(\"failed to import public key\")\n      else\n        request_handle:logTrace(\"succeeded to import public key\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"succeeded to import public key\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\nTEST_F(LuaHttpFilterTest, InvalidPublicKey) {\n  const std::string SCRIPT{R\"EOF(\n    function string.fromhex(str)\n      return (str:gsub('..', function (cc)\n        return string.char(tonumber(cc, 16))\n      end))\n    end\n    function envoy_on_request(request_handle)\n      key = \"0000\"\n      raw = key:fromhex()\n      key = request_handle:importPublicKey(raw, string.len(raw)):get()\n\n      if key == nil then\n        request_handle:logTrace(\"failed to import public key\")\n      else\n        request_handle:logTrace(\"succeeded to import public key\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"failed to import public key\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\nTEST_F(LuaHttpFilterTest, SignatureVerify) {\n  const std::string SCRIPT{R\"EOF(\n    function string.fromhex(str)\n      return (str:gsub('..', function (cc)\n        return string.char(tonumber(cc, 16))\n      end))\n    end\n    function envoy_on_request(request_handle)\n      key = \"30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001\"\n      hashFunc = \"sha256\"\n      signature = \"345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a29c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf570a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332\"\n      data = \"hello\"\n\n      rawkey = key:fromhex()\n      pubkey = request_handle:importPublicKey(rawkey, string.len(rawkey)):get()\n\n      if pubkey == nil then\n        request_handle:logTrace(\"failed to import public key\")\n        return\n      end\n\n      rawsig = signature:fromhex()\n\n      ok, error = request_handle:verifySignature(hashFunc, pubkey, rawsig, string.len(rawsig), data, string.len(data))\n      if ok then\n        request_handle:logTrace(\"signature is valid\")\n      else\n        request_handle:logTrace(error)\n      end\n\n      ok, error = request_handle:verifySignature(\"unknown\", pubkey, rawsig, string.len(rawsig), data, string.len(data))\n      if ok then\n        request_handle:logTrace(\"signature is valid\")\n      else\n        request_handle:logTrace(error)\n      end\n\n      ok, error = request_handle:verifySignature(hashFunc, pubkey, \"0000\", 4, data, string.len(data))\n      if ok then\n        request_handle:logTrace(\"signature is valid\")\n      else\n        request_handle:logTrace(error)\n      end\n\n      ok, error = request_handle:verifySignature(hashFunc, pubkey, rawsig, string.len(rawsig), \"xxxx\", 4)\n      if ok then\n        request_handle:logTrace(\"signature is valid\")\n      else\n        request_handle:logTrace(error)\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"signature is valid\")));\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"unknown is not supported.\")));\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::trace, StrEq(\"Failed to verify digest. Error code: 0\")));\n  EXPECT_CALL(*filter_,\n              scriptLog(spdlog::level::trace, StrEq(\"Failed to verify digest. Error code: 0\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n}\n\n// Test whether the route configuration can properly disable the Lua filter.\nTEST_F(LuaHttpFilterTest, LuaFilterDisabled) {\n  envoy::extensions::filters::http::lua::v3::Lua proto_config;\n  proto_config.set_inline_code(ADD_HEADERS_SCRIPT);\n  envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config;\n  per_route_proto_config.set_disabled(true);\n\n  setupConfig(proto_config, per_route_proto_config);\n  setupFilter();\n\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua))\n      .WillByDefault(Return(nullptr));\n\n  Http::TestRequestHeaderMapImpl request_headers_1{{\":path\", \"/\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_1, true));\n  EXPECT_EQ(\"world\", request_headers_1.get_(\"hello\"));\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua))\n      .WillByDefault(Return(per_route_config_.get()));\n\n  Http::TestRequestHeaderMapImpl request_headers_2{{\":path\", \"/\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_2, true));\n  EXPECT_EQ(nullptr, request_headers_2.get(Http::LowerCaseString(\"hello\")));\n}\n\n// Test whether the route can directly reuse the Lua code in the global configuration.\nTEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodes) {\n  const std::string SCRIPT_FOR_ROUTE_ONE{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:headers():add(\"route_info\", \"This request is routed by ROUTE_ONE\");\n    end\n  )EOF\"};\n  const std::string SCRIPT_FOR_ROUTE_TWO{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:headers():add(\"route_info\", \"This request is routed by ROUTE_TWO\");\n    end\n  )EOF\"};\n  EXPECT_CALL(decoder_callbacks_, clearRouteCache());\n  envoy::extensions::filters::http::lua::v3::Lua proto_config;\n  proto_config.set_inline_code(ADD_HEADERS_SCRIPT);\n  envoy::config::core::v3::DataSource source1, source2;\n  source1.set_inline_string(SCRIPT_FOR_ROUTE_ONE);\n  source2.set_inline_string(SCRIPT_FOR_ROUTE_TWO);\n  proto_config.mutable_source_codes()->insert({\"route_one.lua\", source1});\n  proto_config.mutable_source_codes()->insert({\"route_two.lua\", source2});\n\n  envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config;\n  per_route_proto_config.set_name(\"route_two.lua\");\n\n  setupConfig(proto_config, per_route_proto_config);\n  setupFilter();\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua))\n      .WillByDefault(Return(per_route_config_.get()));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  EXPECT_EQ(\"This request is routed by ROUTE_TWO\", request_headers.get_(\"route_info\"));\n}\n\n// Lua filter do nothing when the referenced name does not exist.\nTEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodeNotExist) {\n  const std::string SCRIPT_FOR_ROUTE_ONE{R\"EOF(\n    function envoy_on_request(request_handle)\n      request_handle:headers():add(\"route_info\", \"This request is routed by ROUTE_ONE\");\n    end\n  )EOF\"};\n\n  envoy::extensions::filters::http::lua::v3::Lua proto_config;\n  proto_config.set_inline_code(ADD_HEADERS_SCRIPT);\n  envoy::config::core::v3::DataSource source1;\n  source1.set_inline_string(SCRIPT_FOR_ROUTE_ONE);\n  proto_config.mutable_source_codes()->insert({\"route_one.lua\", source1});\n\n  envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config;\n  // The global source codes do not contain a script named 'route_two.lua'.\n  per_route_proto_config.set_name(\"route_two.lua\");\n\n  setupConfig(proto_config, per_route_proto_config);\n  setupFilter();\n\n  ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua))\n      .WillByDefault(Return(per_route_config_.get()));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n  EXPECT_EQ(nullptr, request_headers.get(Http::LowerCaseString(\"hello\")));\n}\n\nTEST_F(LuaHttpFilterTest, LuaFilterBase64Escape) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_request(request_handle)\n      local base64Encoded = request_handle:base64Escape(\"foobar\")\n      request_handle:logTrace(base64Encoded)\n    end\n\n    function envoy_on_response(response_handle)\n      local base64Encoded = response_handle:base64Escape(\"barfoo\")\n      response_handle:logTrace(base64Encoded)\n\n      local resp_body_buf = response_handle:body()\n      local resp_body = resp_body_buf:getBytes(0, resp_body_buf:length())\n      local b64_resp_body = response_handle:base64Escape(resp_body)\n      response_handle:logTrace(b64_resp_body)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"Zm9vYmFy\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"YmFyZm9v\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n\n  // Base64 encoding should also work for binary data.\n  uint8_t buffer[34] = {31, 139, 8,  0, 0, 0, 0, 0,   0,   255, 202, 72,  205, 201, 201, 47, 207,\n                        47, 202, 73, 1, 4, 0, 0, 255, 255, 173, 32,  235, 249, 10,  0,   0,  0};\n  Buffer::OwnedImpl response_body(buffer, 34);\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace,\n                                  StrEq(\"H4sIAAAAAAAA/8pIzcnJL88vykkBBAAA//+tIOv5CgAAAA==\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true));\n}\n\nTEST_F(LuaHttpFilterTest, LuaFilterSetResponseBuffer) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_response(response_handle)\n      local content_length = response_handle:body():setBytes(\"1234\")\n      response_handle:logTrace(content_length)\n\n      -- It is possible to replace an entry in headers after overridding encoding buffer.\n      response_handle:headers():replace(\"content-length\", content_length)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->encodeHeaders(response_headers, false));\n  Buffer::OwnedImpl response_body(\"1234567890\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"4\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true));\n  EXPECT_EQ(4, encoder_callbacks_.buffer_->length());\n}\n\nTEST_F(LuaHttpFilterTest, LuaFilterSetResponseBufferChunked) {\n  const std::string SCRIPT{R\"EOF(\n    function envoy_on_response(response_handle)\n      local last\n      for chunk in response_handle:bodyChunks() do\n        chunk:setBytes(\"\")\n        last = chunk\n      end\n      response_handle:logTrace(last:setBytes(\"1234\"))\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl response_body(\"1234567890\");\n  EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(\"4\")));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true));\n}\n\n} // namespace\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/lua/lua_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing Envoy::Http::HeaderValueOf;\n\nnamespace Envoy {\nnamespace {\n\nclass LuaIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                           public HttpIntegrationTest {\npublic:\n  LuaIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP1);\n    addFakeUpstream(FakeHttpConnection::Type::HTTP1);\n    // Create the xDS upstream.\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initializeFilter(const std::string& filter_config, const std::string& domain = \"*\") {\n    config_helper_.addFilter(filter_config);\n\n    // Create static clusters.\n    createClusters();\n\n    config_helper_.addConfigModifier(\n        [domain](\n            envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          hcm.mutable_route_config()\n              ->mutable_virtual_hosts(0)\n              ->mutable_routes(0)\n              ->mutable_match()\n              ->set_prefix(\"/test/long/url\");\n\n          hcm.mutable_route_config()->mutable_virtual_hosts(0)->set_domains(0, domain);\n          auto* new_route = hcm.mutable_route_config()->mutable_virtual_hosts(0)->add_routes();\n          new_route->mutable_match()->set_prefix(\"/alt/route\");\n          new_route->mutable_route()->set_cluster(\"alt_cluster\");\n\n          const std::string key = Extensions::HttpFilters::HttpFilterNames::get().Lua;\n          const std::string yaml =\n              R\"EOF(\n            foo.bar:\n              foo: bar\n              baz: bat\n            keyset:\n              foo: MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp0cSZtAdFgMI1zQJwG8ujTXFMcRY0+SA6fMZGEfQYuxcz/e8UelJ1fLDVAwYmk7KHoYzpizy0JIxAcJ+OAE+cd6a6RpwSEm/9/vizlv0vWZv2XMRAqUxk/5amlpQZE/4sRg/qJdkZZjKrSKjf5VEUQg2NytExYyYWG+3FEYpzYyUeVktmW0y/205XAuEQuxaoe+AUVKeoON1iDzvxywE42C0749XYGUFicqBSRj2eO7jm4hNWvgTapYwpswM3hV9yOAPOVQGKNXzNbLDbFTHyLw3OKayGs/4FUBa+ijlGD9VDawZq88RRaf5ztmH22gOSiKcrHXe40fsnrzh/D27uwIDAQAB\n          )EOF\";\n\n          ProtobufWkt::Struct value;\n          TestUtility::loadFromYaml(yaml, value);\n\n          // Sets the route's metadata.\n          hcm.mutable_route_config()\n              ->mutable_virtual_hosts(0)\n              ->mutable_routes(0)\n              ->mutable_metadata()\n              ->mutable_filter_metadata()\n              ->insert(Protobuf::MapPair<std::string, ProtobufWkt::Struct>(key, value));\n        });\n\n    initialize();\n  }\n\n  void initializeWithYaml(const std::string& filter_config, const std::string& route_config) {\n    config_helper_.addFilter(filter_config);\n\n    createClusters();\n    config_helper_.addConfigModifier(\n        [route_config](\n            envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          TestUtility::loadFromYaml(route_config, *hcm.mutable_route_config(), true);\n        });\n    initialize();\n  }\n\n  void createClusters() {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* lua_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      lua_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      lua_cluster->set_name(\"lua_cluster\");\n\n      auto* alt_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      alt_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      alt_cluster->set_name(\"alt_cluster\");\n\n      auto* xds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      xds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      xds_cluster->set_name(\"xds_cluster\");\n      xds_cluster->mutable_http2_protocol_options();\n    });\n  }\n\n  void initializeWithRds(const std::string& filter_config, const std::string& route_config_name,\n                         const std::string& initial_route_config) {\n    config_helper_.addFilter(filter_config);\n\n    // Create static clusters.\n    createClusters();\n\n    // Set RDS config source.\n    config_helper_.addConfigModifier(\n        [route_config_name](\n            envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          hcm.mutable_rds()->set_route_config_name(route_config_name);\n          envoy::config::core::v3::ApiConfigSource* rds_api_config_source =\n              hcm.mutable_rds()->mutable_config_source()->mutable_api_config_source();\n          rds_api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n          envoy::config::core::v3::GrpcService* grpc_service =\n              rds_api_config_source->add_grpc_services();\n          grpc_service->mutable_envoy_grpc()->set_cluster_name(\"xds_cluster\");\n        });\n\n    on_server_init_function_ = [&]() {\n      AssertionResult result =\n          fake_upstreams_[3]->waitForHttpConnection(*dispatcher_, xds_connection_);\n      RELEASE_ASSERT(result, result.message());\n      result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n      RELEASE_ASSERT(result, result.message());\n      xds_stream_->startGrpcStream();\n\n      EXPECT_TRUE(compareSotwDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                              {route_config_name}, true));\n      sendSotwDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n          Config::TypeUrl::get().RouteConfiguration,\n          {TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(\n              initial_route_config)},\n          \"1\");\n    };\n    initialize();\n    registerTestServerPorts({\"http\"});\n  }\n\n  void testRewriteResponse(const std::string& code) {\n    initializeFilter(code);\n    codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                   {\":path\", \"/test/long/url\"},\n                                                   {\":scheme\", \"http\"},\n                                                   {\":authority\", \"host\"},\n                                                   {\"x-forwarded-for\", \"10.0.0.1\"}};\n\n    auto encoder_decoder = codec_client_->startRequest(request_headers);\n    Http::StreamEncoder& encoder = encoder_decoder.first;\n    auto response = std::move(encoder_decoder.second);\n    Buffer::OwnedImpl request_data(\"done\");\n    encoder.encodeData(request_data, true);\n\n    waitForNextUpstreamRequest();\n\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"foo\", \"bar\"}};\n    upstream_request_->encodeHeaders(response_headers, false);\n    Buffer::OwnedImpl response_data1(\"good\");\n    upstream_request_->encodeData(response_data1, false);\n    Buffer::OwnedImpl response_data2(\"bye\");\n    upstream_request_->encodeData(response_data2, true);\n\n    response->waitForEndStream();\n\n    EXPECT_EQ(\n        \"2\",\n        response->headers().get(Http::LowerCaseString(\"content-length\"))->value().getStringView());\n    EXPECT_EQ(\"ok\", response->body());\n    cleanup();\n  }\n\n  void cleanup() {\n    codec_client_->close();\n    if (fake_lua_connection_ != nullptr) {\n      AssertionResult result = fake_lua_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_lua_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n    if (fake_upstream_connection_ != nullptr) {\n      AssertionResult result = fake_upstream_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_upstream_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n    if (xds_connection_ != nullptr) {\n      AssertionResult result = xds_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = xds_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n      xds_connection_ = nullptr;\n    }\n  }\n\n  FakeHttpConnectionPtr fake_lua_connection_;\n  FakeStreamPtr lua_request_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, LuaIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Regression test for pulling route info during early local replies using the Lua filter\n// metadata() API. Covers both the upgrade required and no authority cases.\nTEST_P(LuaIntegrationTest, CallMetadataDuringLocalReply) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_response(response_handle)\n      local metadata = response_handle:metadata():get(\"foo.bar\")\n      if metadata == nil then\n      end\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE, \"foo\");\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.0\\r\\n\\r\\n\", &response, true);\n  EXPECT_TRUE(response.find(\"HTTP/1.1 426 Upgrade Required\\r\\n\") == 0);\n\n  response = \"\";\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.1\\r\\n\\r\\n\", &response, true);\n  EXPECT_TRUE(response.find(\"HTTP/1.1 400 Bad Request\\r\\n\") == 0);\n}\n\n// Basic request and response.\nTEST_P(LuaIntegrationTest, RequestAndResponse) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_request(request_handle)\n      request_handle:logTrace(\"log test\")\n      request_handle:logDebug(\"log test\")\n      request_handle:logInfo(\"log test\")\n      request_handle:logWarn(\"log test\")\n      request_handle:logErr(\"log test\")\n      request_handle:logCritical(\"log test\")\n\n      local metadata = request_handle:metadata():get(\"foo.bar\")\n      local body_length = request_handle:body():length()\n\n      request_handle:streamInfo():dynamicMetadata():set(\"envoy.lb\", \"foo\", \"bar\")\n      local dynamic_metadata_value = request_handle:streamInfo():dynamicMetadata():get(\"envoy.lb\")[\"foo\"]\n\n      request_handle:headers():add(\"request_body_size\", body_length)\n      request_handle:headers():add(\"request_metadata_foo\", metadata[\"foo\"])\n      request_handle:headers():add(\"request_metadata_baz\", metadata[\"baz\"])\n      if request_handle:connection():ssl() == nil then\n        request_handle:headers():add(\"request_secure\", \"false\")\n      else\n        request_handle:headers():add(\"request_secure\", \"true\")\n      end\n      request_handle:headers():add(\"request_protocol\", request_handle:streamInfo():protocol())\n      request_handle:headers():add(\"request_dynamic_metadata_value\", dynamic_metadata_value)\n    end\n\n    function envoy_on_response(response_handle)\n      local metadata = response_handle:metadata():get(\"foo.bar\")\n      local body_length = response_handle:body():length()\n      response_handle:headers():add(\"response_metadata_foo\", metadata[\"foo\"])\n      response_handle:headers():add(\"response_metadata_baz\", metadata[\"baz\"])\n      response_handle:headers():add(\"response_body_size\", body_length)\n      response_handle:headers():add(\"request_protocol\", response_handle:streamInfo():protocol())\n      response_handle:headers():remove(\"foo\")\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE);\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"}};\n\n  auto encoder_decoder = codec_client_->startRequest(request_headers);\n  Http::StreamEncoder& encoder = encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  Buffer::OwnedImpl request_data1(\"hello\");\n  encoder.encodeData(request_data1, false);\n  Buffer::OwnedImpl request_data2(\"world\");\n  encoder.encodeData(request_data2, true);\n\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(\"10\", upstream_request_->headers()\n                      .get(Http::LowerCaseString(\"request_body_size\"))\n                      ->value()\n                      .getStringView());\n\n  EXPECT_EQ(\"bar\", upstream_request_->headers()\n                       .get(Http::LowerCaseString(\"request_metadata_foo\"))\n                       ->value()\n                       .getStringView());\n\n  EXPECT_EQ(\"bat\", upstream_request_->headers()\n                       .get(Http::LowerCaseString(\"request_metadata_baz\"))\n                       ->value()\n                       .getStringView());\n  EXPECT_EQ(\"false\", upstream_request_->headers()\n                         .get(Http::LowerCaseString(\"request_secure\"))\n                         ->value()\n                         .getStringView());\n\n  EXPECT_EQ(\"HTTP/1.1\", upstream_request_->headers()\n                            .get(Http::LowerCaseString(\"request_protocol\"))\n                            ->value()\n                            .getStringView());\n\n  EXPECT_EQ(\"bar\", upstream_request_->headers()\n                       .get(Http::LowerCaseString(\"request_dynamic_metadata_value\"))\n                       ->value()\n                       .getStringView());\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"foo\", \"bar\"}};\n  upstream_request_->encodeHeaders(response_headers, false);\n  Buffer::OwnedImpl response_data1(\"good\");\n  upstream_request_->encodeData(response_data1, false);\n  Buffer::OwnedImpl response_data2(\"bye\");\n  upstream_request_->encodeData(response_data2, true);\n\n  response->waitForEndStream();\n\n  EXPECT_EQ(\"7\", response->headers()\n                     .get(Http::LowerCaseString(\"response_body_size\"))\n                     ->value()\n                     .getStringView());\n  EXPECT_EQ(\"bar\", response->headers()\n                       .get(Http::LowerCaseString(\"response_metadata_foo\"))\n                       ->value()\n                       .getStringView());\n  EXPECT_EQ(\"bat\", response->headers()\n                       .get(Http::LowerCaseString(\"response_metadata_baz\"))\n                       ->value()\n                       .getStringView());\n  EXPECT_EQ(\n      \"HTTP/1.1\",\n      response->headers().get(Http::LowerCaseString(\"request_protocol\"))->value().getStringView());\n  EXPECT_EQ(nullptr, response->headers().get(Http::LowerCaseString(\"foo\")));\n\n  cleanup();\n}\n\n// Upstream call followed by continuation.\nTEST_P(LuaIntegrationTest, UpstreamHttpCall) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n      \"lua_cluster\",\n      {\n        [\":method\"] = \"POST\",\n        [\":path\"] = \"/\",\n        [\":authority\"] = \"lua_cluster\"\n      },\n      \"hello world\",\n      5000)\n\n      request_handle:headers():add(\"upstream_foo\", headers[\"foo\"])\n      request_handle:headers():add(\"upstream_body_size\", #body)\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n\n  ASSERT_TRUE(fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_lua_connection_));\n  ASSERT_TRUE(fake_lua_connection_->waitForNewStream(*dispatcher_, lua_request_));\n  ASSERT_TRUE(lua_request_->waitForEndStream(*dispatcher_));\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"foo\", \"bar\"}};\n  lua_request_->encodeHeaders(response_headers, false);\n  Buffer::OwnedImpl response_data1(\"good\");\n  lua_request_->encodeData(response_data1, true);\n\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(\"bar\", upstream_request_->headers()\n                       .get(Http::LowerCaseString(\"upstream_foo\"))\n                       ->value()\n                       .getStringView());\n  EXPECT_EQ(\"4\", upstream_request_->headers()\n                     .get(Http::LowerCaseString(\"upstream_body_size\"))\n                     ->value()\n                     .getStringView());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  cleanup();\n}\n\n// Upstream call followed by immediate response.\nTEST_P(LuaIntegrationTest, UpstreamCallAndRespond) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n      \"lua_cluster\",\n      {\n        [\":method\"] = \"POST\",\n        [\":path\"] = \"/\",\n        [\":authority\"] = \"lua_cluster\"\n      },\n      \"hello world\",\n      5000)\n\n      request_handle:respond(\n        {[\":status\"] = \"403\",\n         [\"upstream_foo\"] = headers[\"foo\"]},\n        \"nope\")\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n\n  ASSERT_TRUE(fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_lua_connection_));\n  ASSERT_TRUE(fake_lua_connection_->waitForNewStream(*dispatcher_, lua_request_));\n  ASSERT_TRUE(lua_request_->waitForEndStream(*dispatcher_));\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}, {\"foo\", \"bar\"}};\n  lua_request_->encodeHeaders(response_headers, true);\n\n  response->waitForEndStream();\n  cleanup();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n  EXPECT_EQ(\"nope\", response->body());\n}\n\n// Upstream fire and forget asynchronous call.\nTEST_P(LuaIntegrationTest, UpstreamAsyncHttpCall) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: envoy.filters.http.lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_request(request_handle)\n      local headers, body = request_handle:httpCall(\n      \"lua_cluster\",\n      {\n        [\":method\"] = \"POST\",\n        [\":path\"] = \"/\",\n        [\":authority\"] = \"lua_cluster\"\n      },\n      \"hello world\",\n      5000,\n      true)\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n\n  ASSERT_TRUE(fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_lua_connection_));\n  ASSERT_TRUE(fake_lua_connection_->waitForNewStream(*dispatcher_, lua_request_));\n  ASSERT_TRUE(lua_request_->waitForEndStream(*dispatcher_));\n  // Sanity checking that we sent the expected data.\n  EXPECT_THAT(lua_request_->headers(), HeaderValueOf(Http::Headers::get().Method, \"POST\"));\n  EXPECT_THAT(lua_request_->headers(), HeaderValueOf(Http::Headers::get().Path, \"/\"));\n\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  cleanup();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// Filter alters headers and changes route.\nTEST_P(LuaIntegrationTest, ChangeRoute) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_request(request_handle)\n      request_handle:headers():remove(\":path\")\n      request_handle:headers():add(\":path\", \"/alt/route\")\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n\n  waitForNextUpstreamRequest(2);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  cleanup();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// Should survive from 30 calls when calling streamInfo():dynamicMetadata(). This is a regression\n// test for #4305.\nTEST_P(LuaIntegrationTest, SurviveMultipleCalls) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_request(request_handle)\n      request_handle:streamInfo():dynamicMetadata()\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"}};\n\n  for (uint32_t i = 0; i < 30; ++i) {\n    auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(default_response_headers_, true);\n    response->waitForEndStream();\n\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n\n  cleanup();\n}\n\n// Basic test for verifying signature.\nTEST_P(LuaIntegrationTest, SignatureVerification) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function string.fromhex(str)\n      return (str:gsub('..', function (cc)\n        return string.char(tonumber(cc, 16))\n      end))\n    end\n\n    -- decoding\n    function dec(data)\n      local b='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\n      data = string.gsub(data, '[^'..b..'=]', '')\n      return (data:gsub('.', function(x)\n        if (x == '=') then return '' end\n        local r,f='',(b:find(x)-1)\n        for i=6,1,-1 do r=r..(f%2^i-f%2^(i-1)>0 and '1' or '0') end\n        return r;\n      end):gsub('%d%d%d?%d?%d?%d?%d?%d?', function(x)\n        if (#x ~= 8) then return '' end\n        local c=0\n        for i=1,8 do c=c+(x:sub(i,i)=='1' and 2^(8-i) or 0) end\n        return string.char(c)\n      end))\n    end\n\n    function envoy_on_request(request_handle)\n      local metadata = request_handle:metadata():get(\"keyset\")\n      local keyder = metadata[request_handle:headers():get(\"keyid\")]\n\n      local rawkeyder = dec(keyder)\n      local pubkey = request_handle:importPublicKey(rawkeyder, string.len(rawkeyder)):get()\n\n      if pubkey == nil then\n        request_handle:logErr(\"log test\")\n        request_handle:headers():add(\"signature_verification\", \"rejected\")\n        return\n      end\n\n      local hash = request_handle:headers():get(\"hash\")\n      local sig = request_handle:headers():get(\"signature\")\n      local rawsig = sig:fromhex()\n      local data = request_handle:headers():get(\"message\")\n      local ok, error = request_handle:verifySignature(hash, pubkey, rawsig, string.len(rawsig), data, string.len(data))\n\n      if ok then\n        request_handle:headers():add(\"signature_verification\", \"approved\")\n      else\n        request_handle:logErr(error)\n        request_handle:headers():add(\"signature_verification\", \"rejected\")\n      end\n\n      request_handle:headers():add(\"verification\", \"done\")\n    end\n)EOF\";\n\n  initializeFilter(FILTER_AND_CODE);\n\n  auto signature =\n      \"345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a2\"\n      \"9c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5\"\n      \"ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf5\"\n      \"70a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace\"\n      \"1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6\"\n      \"295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332\";\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},    {\":path\", \"/test/long/url\"},     {\":scheme\", \"https\"},\n      {\":authority\", \"host\"}, {\"x-forwarded-for\", \"10.0.0.1\"}, {\"message\", \"hello\"},\n      {\"keyid\", \"foo\"},       {\"signature\", signature},        {\"hash\", \"sha256\"}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  waitForNextUpstreamRequest();\n\n  EXPECT_EQ(\"approved\", upstream_request_->headers()\n                            .get(Http::LowerCaseString(\"signature_verification\"))\n                            ->value()\n                            .getStringView());\n\n  EXPECT_EQ(\"done\", upstream_request_->headers()\n                        .get(Http::LowerCaseString(\"verification\"))\n                        ->value()\n                        .getStringView());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  cleanup();\n}\n\nconst std::string FILTER_AND_CODE =\n    R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua\n  inline_code: |\n    function envoy_on_request(request_handle)\n      request_handle:headers():add(\"code\", \"code_from_global\")\n    end\n  source_codes:\n    hello.lua:\n      inline_string: |\n        function envoy_on_request(request_handle)\n          request_handle:headers():add(\"code\", \"code_from_hello\")\n        end\n    byebye.lua:\n      inline_string: |\n        function envoy_on_request(request_handle)\n          request_handle:headers():add(\"code\", \"code_from_byebye\")\n        end\n)EOF\";\n\nconst std::string INITIAL_ROUTE_CONFIG =\n    R\"EOF(\nname: basic_lua_routes\nvirtual_hosts:\n- name: rds_vhost_1\n  domains: [\"lua.per.route\"]\n  routes:\n  - match:\n      prefix: \"/lua/per/route/default\"\n    route:\n      cluster: lua_cluster\n  - match:\n      prefix: \"/lua/per/route/disabled\"\n    route:\n      cluster: lua_cluster\n    typed_per_filter_config:\n      envoy.filters.http.lua:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute\n        disabled: true\n  - match:\n      prefix: \"/lua/per/route/hello\"\n    route:\n      cluster: lua_cluster\n    typed_per_filter_config:\n      envoy.filters.http.lua:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute\n        name: hello.lua\n  - match:\n      prefix: \"/lua/per/route/byebye\"\n    route:\n      cluster: lua_cluster\n    typed_per_filter_config:\n      envoy.filters.http.lua:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute\n        name: byebye.lua\n  - match:\n      prefix: \"/lua/per/route/inline\"\n    route:\n      cluster: lua_cluster\n    typed_per_filter_config:\n      envoy.filters.http.lua:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute\n        source_code:\n          inline_string: |\n            function envoy_on_request(request_handle)\n              request_handle:headers():add(\"code\", \"inline_code_from_inline\")\n            end\n  - match:\n      prefix: \"/lua/per/route/nocode\"\n    route:\n      cluster: lua_cluster\n    typed_per_filter_config:\n      envoy.filters.http.lua:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute\n        name: nocode.lua\n)EOF\";\n\nconst std::string UPDATE_ROUTE_CONFIG =\n    R\"EOF(\nname: basic_lua_routes\nvirtual_hosts:\n- name: rds_vhost_1\n  domains: [\"lua.per.route\"]\n  routes:\n  - match:\n      prefix: \"/lua/per/route/hello\"\n    route:\n      cluster: lua_cluster\n    typed_per_filter_config:\n      envoy.filters.http.lua:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute\n        source_code:\n          inline_string: |\n            function envoy_on_request(request_handle)\n              request_handle:headers():add(\"code\", \"inline_code_from_hello\")\n            end\n  - match:\n      prefix: \"/lua/per/route/inline\"\n    route:\n      cluster: lua_cluster\n    typed_per_filter_config:\n      envoy.filters.http.lua:\n        \"@type\": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute\n        source_code:\n          inline_string: |\n            function envoy_on_request(request_handle)\n              request_handle:headers():add(\"code\", \"new_inline_code_from_inline\")\n            end\n)EOF\";\n\n// Test whether LuaPerRoute works properly. Since this test is mainly for configuration, the Lua\n// script can be very simple.\nTEST_P(LuaIntegrationTest, BasicTestOfLuaPerRoute) {\n  initializeWithYaml(FILTER_AND_CODE, INITIAL_ROUTE_CONFIG);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto check_request = [this](Http::TestRequestHeaderMapImpl request_headers,\n                              std::string expected_value) {\n    auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    waitForNextUpstreamRequest(1);\n\n    auto* entry = upstream_request_->headers().get(Http::LowerCaseString(\"code\"));\n    if (!expected_value.empty()) {\n      EXPECT_EQ(expected_value, entry->value().getStringView());\n    } else {\n      EXPECT_EQ(nullptr, entry);\n    }\n\n    upstream_request_->encodeHeaders(default_response_headers_, true);\n    response->waitForEndStream();\n\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  };\n\n  // Lua code defined in 'inline_code' will be executed by default.\n  Http::TestRequestHeaderMapImpl default_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/lua/per/route/default\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"lua.per.route\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"}};\n  check_request(default_headers, \"code_from_global\");\n\n  // Test whether LuaPerRoute can disable the Lua filter.\n  Http::TestRequestHeaderMapImpl disabled_headers{{\":method\", \"GET\"},\n                                                  {\":path\", \"/lua/per/route/disabled\"},\n                                                  {\":scheme\", \"http\"},\n                                                  {\":authority\", \"lua.per.route\"},\n                                                  {\"x-forwarded-for\", \"10.0.0.1\"}};\n  check_request(disabled_headers, \"\");\n\n  // Test whether LuaPerRoute can correctly reference Lua code defined in filter config.\n  Http::TestRequestHeaderMapImpl hello_headers{{\":method\", \"GET\"},\n                                               {\":path\", \"/lua/per/route/hello\"},\n                                               {\":scheme\", \"http\"},\n                                               {\":authority\", \"lua.per.route\"},\n                                               {\"x-forwarded-for\", \"10.0.0.1\"}};\n  check_request(hello_headers, \"code_from_hello\");\n\n  Http::TestRequestHeaderMapImpl byebye_headers{{\":method\", \"GET\"},\n                                                {\":path\", \"/lua/per/route/byebye\"},\n                                                {\":scheme\", \"http\"},\n                                                {\":authority\", \"lua.per.route\"},\n                                                {\"x-forwarded-for\", \"10.0.0.1\"}};\n  check_request(byebye_headers, \"code_from_byebye\");\n\n  // Test whether LuaPerRoute can directly provide inline Lua code.\n  Http::TestRequestHeaderMapImpl inline_headers{{\":method\", \"GET\"},\n                                                {\":path\", \"/lua/per/route/inline\"},\n                                                {\":scheme\", \"http\"},\n                                                {\":authority\", \"lua.per.route\"},\n                                                {\"x-forwarded-for\", \"10.0.0.1\"}};\n  check_request(inline_headers, \"inline_code_from_inline\");\n\n  // When the name referenced by LuaPerRoute does not exist, Lua filter does nothing.\n  Http::TestRequestHeaderMapImpl nocode_headers{{\":method\", \"GET\"},\n                                                {\":path\", \"/lua/per/route/nocode\"},\n                                                {\":scheme\", \"http\"},\n                                                {\":authority\", \"lua.per.route\"},\n                                                {\"x-forwarded-for\", \"10.0.0.1\"}};\n\n  check_request(nocode_headers, \"\");\n  cleanup();\n}\n\n// Test whether Rds can correctly deliver LuaPerRoute configuration.\nTEST_P(LuaIntegrationTest, RdsTestOfLuaPerRoute) {\n// When the route configuration is updated dynamically via RDS and the configuration contains an\n// inline Lua code, Envoy may call lua_open in multiple threads to create new lua_State objects.\n// During lua_State creation, 'LuaJIT' uses some static local variables shared by multiple threads\n// to aid memory allocation. Although 'LuaJIT' itself guarantees that there is no thread safety\n// issue here, the use of these static local variables by multiple threads will cause a TSAN alarm.\n#if defined(__has_feature) && __has_feature(thread_sanitizer)\n  ENVOY_LOG_MISC(critical, \"LuaIntegrationTest::RdsTestOfLuaPerRoute not supported by this \"\n                           \"compiler configuration\");\n#else\n  initializeWithRds(FILTER_AND_CODE, \"basic_lua_routes\", INITIAL_ROUTE_CONFIG);\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto check_request = [this](Http::TestRequestHeaderMapImpl request_headers,\n                              std::string expected_value) {\n    auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    waitForNextUpstreamRequest(1);\n\n    auto* entry = upstream_request_->headers().get(Http::LowerCaseString(\"code\"));\n    if (!expected_value.empty()) {\n      EXPECT_EQ(expected_value, entry->value().getStringView());\n    } else {\n      EXPECT_EQ(nullptr, entry);\n    }\n\n    upstream_request_->encodeHeaders(default_response_headers_, true);\n    response->waitForEndStream();\n\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  };\n\n  Http::TestRequestHeaderMapImpl hello_headers{{\":method\", \"GET\"},\n                                               {\":path\", \"/lua/per/route/hello\"},\n                                               {\":scheme\", \"http\"},\n                                               {\":authority\", \"lua.per.route\"},\n                                               {\"x-forwarded-for\", \"10.0.0.1\"}};\n  check_request(hello_headers, \"code_from_hello\");\n\n  Http::TestRequestHeaderMapImpl inline_headers{{\":method\", \"GET\"},\n                                                {\":path\", \"/lua/per/route/inline\"},\n                                                {\":scheme\", \"http\"},\n                                                {\":authority\", \"lua.per.route\"},\n                                                {\"x-forwarded-for\", \"10.0.0.1\"}};\n  check_request(inline_headers, \"inline_code_from_inline\");\n\n  // Update route config by RDS. Test whether RDS can work normally.\n  sendSotwDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration,\n      {TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(UPDATE_ROUTE_CONFIG)},\n      \"2\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.basic_lua_routes.update_success\", 2);\n\n  check_request(hello_headers, \"inline_code_from_hello\");\n  check_request(inline_headers, \"new_inline_code_from_inline\");\n\n  cleanup();\n#endif\n}\n\n// Rewrite response buffer.\nTEST_P(LuaIntegrationTest, RewriteResponseBuffer) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_response(response_handle)\n      local content_length = response_handle:body():setBytes(\"ok\")\n      response_handle:logTrace(content_length)\n\n      response_handle:headers():replace(\"content-length\", content_length)\n    end\n)EOF\";\n\n  testRewriteResponse(FILTER_AND_CODE);\n}\n\n// Rewrite chunked response body.\nTEST_P(LuaIntegrationTest, RewriteChunkedBody) {\n  const std::string FILTER_AND_CODE =\n      R\"EOF(\nname: lua\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua\n  inline_code: |\n    function envoy_on_response(response_handle)\n      response_handle:headers():replace(\"content-length\", 2)\n      local last\n      for chunk in response_handle:bodyChunks() do\n        chunk:setBytes(\"\")\n        last = chunk\n      end\n      last:setBytes(\"ok\")\n    end\n)EOF\";\n\n  testRewriteResponse(FILTER_AND_CODE);\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/lua/wrappers_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/http/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/filters/http/lua/wrappers.h\"\n\n#include \"test/extensions/filters/common/lua/lua_wrappers.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::InSequence;\nusing testing::ReturnPointee;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Lua {\nnamespace {\n\nclass LuaHeaderMapWrapperTest : public Filters::Common::Lua::LuaWrappersTestBase<HeaderMapWrapper> {\npublic:\n  void setup(const std::string& script) override {\n    Filters::Common::Lua::LuaWrappersTestBase<HeaderMapWrapper>::setup(script);\n    state_->registerType<HeaderMapIterator>();\n  }\n};\n\n// Basic methods test for the header wrapper.\nTEST_F(LuaHeaderMapWrapperTest, Methods) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      object:add(\"HELLO\", \"WORLD\")\n      testPrint(object:get(\"hELLo\"))\n\n      object:add(\"header1\", \"\")\n      object:add(\"header2\", \"foo\")\n\n      for key, value in pairs(object) do\n        testPrint(string.format(\"'%s' '%s'\", key, value))\n      end\n\n      object:remove(\"header1\")\n      for key, value in pairs(object) do\n        testPrint(string.format(\"'%s' '%s'\", key, value))\n      end\n\n      object:add(\"header3\", \"foo\")\n      object:add(\"header3\", \"bar\")\n      testPrint(object:get(\"header3\"))\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl headers;\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; });\n  EXPECT_CALL(printer_, testPrint(\"WORLD\"));\n  EXPECT_CALL(printer_, testPrint(\"'hello' 'WORLD'\"));\n  EXPECT_CALL(printer_, testPrint(\"'header1' ''\"));\n  EXPECT_CALL(printer_, testPrint(\"'header2' 'foo'\"));\n  EXPECT_CALL(printer_, testPrint(\"'hello' 'WORLD'\"));\n  EXPECT_CALL(printer_, testPrint(\"'header2' 'foo'\"));\n  EXPECT_CALL(printer_, testPrint(\"foo,bar\"));\n  start(\"callMe\");\n}\n\n// Test modifiable methods.\nTEST_F(LuaHeaderMapWrapperTest, ModifiableMethods) {\n  const std::string SCRIPT{R\"EOF(\n    function shouldBeOk(object)\n      object:get(\"hELLo\")\n      for key, value in pairs(object) do\n      end\n    end\n\n    function shouldFailRemove(object)\n      object:remove(\"foo\")\n    end\n\n    function shouldFailAdd(object)\n      object:add(\"foo\")\n    end\n\n    function shouldFailReplace(object)\n      object:replace(\"foo\")\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl headers;\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return false; });\n  start(\"shouldBeOk\");\n\n  setup(SCRIPT);\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return false; });\n  EXPECT_THROW_WITH_MESSAGE(start(\"shouldFailRemove\"), Filters::Common::Lua::LuaException,\n                            \"[string \\\"...\\\"]:9: header map can no longer be modified\");\n\n  setup(SCRIPT);\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return false; });\n  EXPECT_THROW_WITH_MESSAGE(start(\"shouldFailAdd\"), Filters::Common::Lua::LuaException,\n                            \"[string \\\"...\\\"]:13: header map can no longer be modified\");\n\n  setup(SCRIPT);\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return false; });\n  EXPECT_THROW_WITH_MESSAGE(start(\"shouldFailReplace\"), Filters::Common::Lua::LuaException,\n                            \"[string \\\"...\\\"]:17: header map can no longer be modified\");\n}\n\n// Verify that replace works correctly with both inline and normal headers.\nTEST_F(LuaHeaderMapWrapperTest, Replace) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      object:replace(\":path\", \"/new_path\")\n      object:replace(\"other_header\", \"other_header_value\")\n      object:replace(\"new_header\", \"new_header_value\")\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl headers{{\":path\", \"/\"}, {\"other_header\", \"hello\"}};\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; });\n  start(\"callMe\");\n\n  EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":path\", \"/new_path\"},\n                                            {\"other_header\", \"other_header_value\"},\n                                            {\"new_header\", \"new_header_value\"}}),\n            headers);\n}\n\n// Modify during iteration.\nTEST_F(LuaHeaderMapWrapperTest, ModifyDuringIteration) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      for key, value in pairs(object) do\n        object:add(\"hello\", \"world\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl headers{{\"foo\", \"bar\"}};\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; });\n  EXPECT_THROW_WITH_MESSAGE(start(\"callMe\"), Filters::Common::Lua::LuaException,\n                            \"[string \\\"...\\\"]:4: header map cannot be modified while iterating\");\n}\n\n// Modify after iteration.\nTEST_F(LuaHeaderMapWrapperTest, ModifyAfterIteration) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      for key, value in pairs(object) do\n        testPrint(string.format(\"'%s' '%s'\", key, value))\n      end\n\n      object:add(\"hello\", \"world\")\n\n      for key, value in pairs(object) do\n        testPrint(string.format(\"'%s' '%s'\", key, value))\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl headers{{\"foo\", \"bar\"}};\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; });\n  EXPECT_CALL(printer_, testPrint(\"'foo' 'bar'\"));\n  EXPECT_CALL(printer_, testPrint(\"'foo' 'bar'\"));\n  EXPECT_CALL(printer_, testPrint(\"'hello' 'world'\"));\n  start(\"callMe\");\n}\n\n// Don't finish iteration.\nTEST_F(LuaHeaderMapWrapperTest, DontFinishIteration) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      iterator = pairs(object)\n      key, value = iterator()\n      iterator2 = pairs(object)\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl headers{{\"foo\", \"bar\"}, {\"hello\", \"world\"}};\n  HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; });\n  EXPECT_THROW_WITH_MESSAGE(\n      start(\"callMe\"), Filters::Common::Lua::LuaException,\n      \"[string \\\"...\\\"]:5: cannot create a second iterator before completing the first\");\n}\n\n// Use iterator across yield.\nTEST_F(LuaHeaderMapWrapperTest, IteratorAcrossYield) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      iterator = pairs(object)\n      coroutine.yield()\n      iterator()\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  Http::TestRequestHeaderMapImpl headers{{\"foo\", \"bar\"}, {\"hello\", \"world\"}};\n  Filters::Common::Lua::LuaDeathRef<HeaderMapWrapper> wrapper(\n      HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }), true);\n  yield_callback_ = [] {};\n  start(\"callMe\");\n  wrapper.reset();\n  EXPECT_THROW_WITH_MESSAGE(coroutine_->resume(0, [] {}), Filters::Common::Lua::LuaException,\n                            \"[string \\\"...\\\"]:5: object used outside of proper scope\");\n}\n\nclass LuaStreamInfoWrapperTest\n    : public Filters::Common::Lua::LuaWrappersTestBase<StreamInfoWrapper> {\npublic:\n  void setup(const std::string& script) override {\n    Filters::Common::Lua::LuaWrappersTestBase<StreamInfoWrapper>::setup(script);\n    state_->registerType<DynamicMetadataMapWrapper>();\n    state_->registerType<DynamicMetadataMapIterator>();\n  }\n\nprotected:\n  void expectToPrintCurrentProtocol(const absl::optional<Envoy::Http::Protocol>& protocol) {\n    const std::string SCRIPT{R\"EOF(\n      function callMe(object)\n        testPrint(string.format(\"'%s'\", object:protocol()))\n      end\n    )EOF\"};\n\n    InSequence s;\n    setup(SCRIPT);\n\n    NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n    ON_CALL(stream_info, protocol()).WillByDefault(ReturnPointee(&protocol));\n    Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> wrapper(\n        StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true);\n    EXPECT_CALL(printer_,\n                testPrint(fmt::format(\"'{}'\", Http::Utility::getProtocolString(protocol.value()))));\n    start(\"callMe\");\n    wrapper.reset();\n  }\n\n  envoy::config::core::v3::Metadata parseMetadataFromYaml(const std::string& yaml_string) {\n    envoy::config::core::v3::Metadata metadata;\n    TestUtility::loadFromYaml(yaml_string, metadata);\n    return metadata;\n  }\n\n  Event::SimulatedTimeSystem test_time_;\n};\n\n// Return the current request protocol.\nTEST_F(LuaStreamInfoWrapperTest, ReturnCurrentProtocol) {\n  expectToPrintCurrentProtocol(Http::Protocol::Http10);\n  expectToPrintCurrentProtocol(Http::Protocol::Http11);\n  expectToPrintCurrentProtocol(Http::Protocol::Http2);\n}\n\n// Set, get and iterate stream info dynamic metadata.\nTEST_F(LuaStreamInfoWrapperTest, SetGetAndIterateDynamicMetadata) {\n  const std::string SCRIPT{R\"EOF(\n      function callMe(object)\n        testPrint(type(object:dynamicMetadata()))\n        object:dynamicMetadata():set(\"envoy.lb\", \"foo\", \"bar\")\n        object:dynamicMetadata():set(\"envoy.lb\", \"so\", \"cool\")\n\n        testPrint(object:dynamicMetadata():get(\"envoy.lb\")[\"foo\"])\n        testPrint(object:dynamicMetadata():get(\"envoy.lb\")[\"so\"])\n\n        for filter, entry in pairs(object:dynamicMetadata()) do\n          for key, value in pairs(entry) do\n            testPrint(string.format(\"'%s' '%s'\", key, value))\n          end\n        end\n\n        local function nRetVals(...)\n          return select('#',...)\n        end\n        testPrint(tostring(nRetVals(object:dynamicMetadata():get(\"envoy.ngx\"))))\n      end\n    )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  EXPECT_EQ(0, stream_info.dynamicMetadata().filter_metadata_size());\n  Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> wrapper(\n      StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true);\n  EXPECT_CALL(printer_, testPrint(\"userdata\"));\n  EXPECT_CALL(printer_, testPrint(\"bar\"));\n  EXPECT_CALL(printer_, testPrint(\"cool\"));\n  EXPECT_CALL(printer_, testPrint(\"'foo' 'bar'\"));\n  EXPECT_CALL(printer_, testPrint(\"'so' 'cool'\"));\n  EXPECT_CALL(printer_, testPrint(\"0\"));\n  start(\"callMe\");\n\n  EXPECT_EQ(1, stream_info.dynamicMetadata().filter_metadata_size());\n  EXPECT_EQ(\"bar\", stream_info.dynamicMetadata()\n                       .filter_metadata()\n                       .at(\"envoy.lb\")\n                       .fields()\n                       .at(\"foo\")\n                       .string_value());\n  wrapper.reset();\n}\n\n// Set, get complex key/values in stream info dynamic metadata.\nTEST_F(LuaStreamInfoWrapperTest, SetGetComplexDynamicMetadata) {\n  const std::string SCRIPT{R\"EOF(\n      function callMe(object)\n        object:dynamicMetadata():set(\"envoy.lb\", \"foo\", {x=1234, y=\"baz\", z=true})\n        object:dynamicMetadata():set(\"envoy.lb\", \"so\", {\"cool\", \"and\", \"dynamic\", true})\n\n        testPrint(tostring(object:dynamicMetadata():get(\"envoy.lb\")[\"foo\"].x))\n        testPrint(object:dynamicMetadata():get(\"envoy.lb\")[\"foo\"].y)\n        testPrint(tostring(object:dynamicMetadata():get(\"envoy.lb\")[\"foo\"].z))\n        testPrint(object:dynamicMetadata():get(\"envoy.lb\")[\"so\"][1])\n        testPrint(object:dynamicMetadata():get(\"envoy.lb\")[\"so\"][2])\n        testPrint(object:dynamicMetadata():get(\"envoy.lb\")[\"so\"][3])\n        testPrint(tostring(object:dynamicMetadata():get(\"envoy.lb\")[\"so\"][4]))\n      end\n    )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  EXPECT_EQ(0, stream_info.dynamicMetadata().filter_metadata_size());\n  Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> wrapper(\n      StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true);\n  EXPECT_CALL(printer_, testPrint(\"1234\"));\n  EXPECT_CALL(printer_, testPrint(\"baz\"));\n  EXPECT_CALL(printer_, testPrint(\"true\"));\n  EXPECT_CALL(printer_, testPrint(\"cool\"));\n  EXPECT_CALL(printer_, testPrint(\"and\"));\n  EXPECT_CALL(printer_, testPrint(\"dynamic\"));\n  EXPECT_CALL(printer_, testPrint(\"true\"));\n  start(\"callMe\");\n\n  EXPECT_EQ(1, stream_info.dynamicMetadata().filter_metadata_size());\n  const ProtobufWkt::Struct& meta_foo = stream_info.dynamicMetadata()\n                                            .filter_metadata()\n                                            .at(\"envoy.lb\")\n                                            .fields()\n                                            .at(\"foo\")\n                                            .struct_value();\n\n  EXPECT_EQ(1234.0, meta_foo.fields().at(\"x\").number_value());\n  EXPECT_EQ(\"baz\", meta_foo.fields().at(\"y\").string_value());\n  EXPECT_EQ(true, meta_foo.fields().at(\"z\").bool_value());\n\n  const ProtobufWkt::ListValue& meta_so =\n      stream_info.dynamicMetadata().filter_metadata().at(\"envoy.lb\").fields().at(\"so\").list_value();\n\n  EXPECT_EQ(4, meta_so.values_size());\n  EXPECT_EQ(\"cool\", meta_so.values(0).string_value());\n  EXPECT_EQ(\"and\", meta_so.values(1).string_value());\n  EXPECT_EQ(\"dynamic\", meta_so.values(2).string_value());\n  EXPECT_EQ(true, meta_so.values(3).bool_value());\n\n  wrapper.reset();\n}\n\n// Bad types in table\nTEST_F(LuaStreamInfoWrapperTest, BadTypesInTableForDynamicMetadata) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      object:dynamicMetadata():set(\"envoy.lb\", \"hello\", {x=\"world\", y=function(a, b) end})\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> wrapper(\n      StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true);\n  EXPECT_THROW_WITH_MESSAGE(start(\"callMe\"), Filters::Common::Lua::LuaException,\n                            \"[string \\\"...\\\"]:3: unexpected type 'function' in dynamicMetadata\");\n}\n\n// Modify during iteration.\nTEST_F(LuaStreamInfoWrapperTest, ModifyDuringIterationForDynamicMetadata) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      object:dynamicMetadata():set(\"envoy.lb\", \"hello\", \"world\")\n      for key, value in pairs(object:dynamicMetadata()) do\n        object:dynamicMetadata():set(\"envoy.lb\", \"hello\", \"envoy\")\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> wrapper(\n      StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true);\n  EXPECT_THROW_WITH_MESSAGE(\n      start(\"callMe\"), Filters::Common::Lua::LuaException,\n      \"[string \\\"...\\\"]:5: dynamic metadata map cannot be modified while iterating\");\n}\n\n// Modify after iteration.\nTEST_F(LuaStreamInfoWrapperTest, ModifyAfterIterationForDynamicMetadata) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      object:dynamicMetadata():set(\"envoy.lb\", \"hello\", \"world\")\n      for filter, entry in pairs(object:dynamicMetadata()) do\n        testPrint(filter)\n        for key, value in pairs(entry) do\n          testPrint(string.format(\"'%s' '%s'\", key, value))\n        end\n      end\n\n      object:dynamicMetadata():set(\"envoy.lb\", \"hello\", \"envoy\")\n      object:dynamicMetadata():set(\"envoy.proxy\", \"proto\", \"grpc\")\n      for filter, entry in pairs(object:dynamicMetadata()) do\n        testPrint(filter)\n        for key, value in pairs(entry) do\n          testPrint(string.format(\"'%s' '%s'\", key, value))\n        end\n      end\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  EXPECT_EQ(0, stream_info.dynamicMetadata().filter_metadata_size());\n  Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> wrapper(\n      StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true);\n  EXPECT_CALL(printer_, testPrint(\"envoy.lb\"));\n  EXPECT_CALL(printer_, testPrint(\"'hello' 'world'\"));\n  EXPECT_CALL(printer_, testPrint(\"envoy.proxy\"));\n  EXPECT_CALL(printer_, testPrint(\"'proto' 'grpc'\"));\n  EXPECT_CALL(printer_, testPrint(\"envoy.lb\"));\n  EXPECT_CALL(printer_, testPrint(\"'hello' 'envoy'\"));\n  start(\"callMe\");\n}\n\n// Don't finish iteration.\nTEST_F(LuaStreamInfoWrapperTest, DontFinishIterationForDynamicMetadata) {\n  const std::string SCRIPT{R\"EOF(\n    function callMe(object)\n      object:dynamicMetadata():set(\"envoy.lb\", \"foo\", \"bar\")\n      iterator = pairs(object:dynamicMetadata())\n      key, value = iterator()\n      iterator2 = pairs(object:dynamicMetadata())\n    end\n  )EOF\"};\n\n  InSequence s;\n  setup(SCRIPT);\n\n  StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem());\n  Filters::Common::Lua::LuaDeathRef<StreamInfoWrapper> wrapper(\n      StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true);\n  EXPECT_THROW_WITH_MESSAGE(\n      start(\"callMe\"), Filters::Common::Lua::LuaException,\n      \"[string \\\"...\\\"]:6: cannot create a second iterator before completing the first\");\n}\n\n} // namespace\n} // namespace Lua\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/oauth2/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.oauth2\",\n    deps = [\n        \"//source/extensions/filters/http/oauth2:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"oauth_integration_test\",\n    srcs = [\"oauth_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.oauth2\",\n    deps = [\n        \"//source/extensions/filters/http/oauth2:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_test\",\n    srcs = [\"filter_test.cc\"],\n    extension_name = \"envoy.filters.http.oauth2\",\n    deps = [\n        \"//source/extensions/filters/http/oauth2:config\",\n        \"//source/extensions/filters/http/oauth2:oauth_callback_interface\",\n        \"//source/extensions/filters/http/oauth2:oauth_client\",\n        \"//source/extensions/filters/http/oauth2:oauth_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"oauth_client_test\",\n    srcs = [\"oauth_test.cc\"],\n    extension_name = \"envoy.filters.http.oauth2\",\n    deps = [\n        \"//source/extensions/filters/http/oauth2:oauth_client\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/oauth2/config_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h\"\n\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/secret/secret_provider_impl.h\"\n\n#include \"extensions/filters/http/oauth2/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nusing testing::NiceMock;\nusing testing::Return;\n\nTEST(ConfigTest, CreateFilter) {\n  const std::string yaml = R\"EOF(\nconfig:\n    token_endpoint:\n      cluster: foo\n      uri: oauth.com/token\n      timeout: 3s\n    authorization_endpoint: https://oauth.com/oauth/authorize/\n    redirect_uri: \"%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback\"\n    signout_path: \n      path:\n        exact: /signout\n    )EOF\";\n\n  envoy::extensions::filters::http::oauth2::v3alpha::OAuth2 proto_config;\n  MessageUtil::loadFromYaml(yaml, proto_config, ProtobufMessage::getStrictValidationVisitor());\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  auto& secret_manager = factory_context.cluster_manager_.cluster_manager_factory_.secretManager();\n  ON_CALL(secret_manager, findStaticGenericSecretProvider(_))\n      .WillByDefault(Return(std::make_shared<Secret::GenericSecretConfigProviderImpl>(\n          envoy::extensions::transport_sockets::tls::v3::GenericSecret())));\n\n  OAuth2Config config;\n  auto cb = config.createFilterFactoryFromProtoTyped(proto_config, \"whatever\", factory_context);\n\n  NiceMock<Http::MockFilterChainFactoryCallbacks> filter_callbacks;\n  cb(filter_callbacks);\n}\n\nTEST(ConfigTest, CreateFilterMissingConfig) {\n  OAuth2Config config;\n\n  envoy::extensions::filters::http::oauth2::v3alpha::OAuth2 proto_config;\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_THROW_WITH_MESSAGE(\n      config.createFilterFactoryFromProtoTyped(proto_config, \"whatever\", factory_context),\n      EnvoyException, \"config must be present for global config\");\n}\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/http/oauth2/filter_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h\"\n#include \"envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/message.h\"\n\n#include \"common/common/macros.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/oauth2/filter.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nstatic const std::string TEST_CALLBACK = \"/_oauth\";\nstatic const std::string TEST_CLIENT_ID = \"1\";\nstatic const std::string TEST_CLIENT_SECRET_ID = \"MyClientSecretKnoxID\";\nstatic const std::string TEST_TOKEN_SECRET_ID = \"MyTokenSecretKnoxID\";\n\nnamespace {\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    authorization_handle(Http::CustomHeaders::get().Authorization);\n}\n\nclass MockSecretReader : public SecretReader {\npublic:\n  const std::string& clientSecret() const override {\n    CONSTRUCT_ON_FIRST_USE(std::string, \"asdf_client_secret_fdsa\");\n  }\n  const std::string& tokenSecret() const override {\n    CONSTRUCT_ON_FIRST_USE(std::string, \"asdf_token_secret_fdsa\");\n  }\n};\n\nclass MockOAuth2CookieValidator : public CookieValidator {\npublic:\n  MOCK_METHOD(std::string&, username, (), (const));\n  MOCK_METHOD(std::string&, token, (), (const));\n  MOCK_METHOD(bool, isValid, (), (const));\n  MOCK_METHOD(void, setParams, (const Http::RequestHeaderMap& headers, const std::string& secret));\n};\n\nclass MockOAuth2Client : public OAuth2Client {\npublic:\n  void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override {}\n  void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override {}\n  void setCallbacks(FilterCallbacks&) override {}\n  void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&,\n                                    const Http::ResponseHeaderMap*) override {}\n\n  MOCK_METHOD(void, asyncGetAccessToken,\n              (const std::string&, const std::string&, const std::string&, const std::string&));\n};\n\nclass OAuth2Test : public testing::Test {\npublic:\n  OAuth2Test() : request_(&cm_.async_client_) { init(); }\n\n  void init() {\n    // Set up the OAuth client\n    oauth_client_ = new MockOAuth2Client();\n    std::unique_ptr<OAuth2Client> oauth_client_ptr{oauth_client_};\n\n    // Set up proto fields\n    envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config p;\n    auto* endpoint = p.mutable_token_endpoint();\n    endpoint->set_cluster(\"auth.example.com\");\n    endpoint->set_uri(\"auth.example.com/_oauth\");\n    endpoint->mutable_timeout()->set_seconds(1);\n    p.set_redirect_uri(\"%REQ(x-forwarded-proto)%://%REQ(:authority)%\" + TEST_CALLBACK);\n    p.mutable_redirect_path_matcher()->mutable_path()->set_exact(TEST_CALLBACK);\n    p.set_authorization_endpoint(\"https://auth.example.com/oauth/authorize/\");\n    p.mutable_signout_path()->mutable_path()->set_exact(\"/_signout\");\n    p.set_forward_bearer_token(true);\n    auto* matcher = p.add_pass_through_matcher();\n    matcher->set_name(\":method\");\n    matcher->set_exact_match(\"OPTIONS\");\n\n    auto credentials = p.mutable_credentials();\n    credentials->set_client_id(TEST_CLIENT_ID);\n    credentials->mutable_token_secret()->set_name(\"secret\");\n    credentials->mutable_hmac_secret()->set_name(\"hmac\");\n\n    MessageUtil::validate(p, ProtobufMessage::getStrictValidationVisitor());\n\n    // Create the OAuth config.\n    auto secret_reader = std::make_shared<MockSecretReader>();\n    config_ = std::make_shared<FilterConfig>(p, factory_context_.cluster_manager_, secret_reader,\n                                             scope_, \"test.\");\n\n    filter_ = std::make_shared<OAuth2Filter>(config_, std::move(oauth_client_ptr), test_time_);\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n    validator_ = std::make_shared<MockOAuth2CookieValidator>();\n    filter_->validator_ = validator_;\n  }\n\n  Http::AsyncClient::Callbacks* popPendingCallback() {\n    if (callbacks_.empty()) {\n      // Can't use ASSERT_* as this is not a test function\n      throw std::underflow_error(\"empty deque\");\n    }\n\n    auto callbacks = callbacks_.front();\n    callbacks_.pop_front();\n    return callbacks;\n  }\n\n  NiceMock<Event::MockTimer>* attachmentTimeout_timer_{};\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  std::shared_ptr<MockOAuth2CookieValidator> validator_;\n  std::shared_ptr<OAuth2Filter> filter_;\n  MockOAuth2Client* oauth_client_;\n  FilterConfigSharedPtr config_;\n  Http::MockAsyncClientRequest request_;\n  std::deque<Http::AsyncClient::Callbacks*> callbacks_;\n  Stats::IsolatedStoreImpl scope_;\n  Event::SimulatedTimeSystem test_time_;\n};\n\n// Verifies that we fail constructing the filter if the configured cluster doesn't exist.\nTEST_F(OAuth2Test, InvalidCluster) {\n  ON_CALL(factory_context_.cluster_manager_, get(_)).WillByDefault(Return(nullptr));\n\n  EXPECT_THROW_WITH_MESSAGE(init(), EnvoyException,\n                            \"OAuth2 filter: unknown cluster 'auth.example.com' in config. Please \"\n                            \"specify which cluster to direct OAuth requests to.\");\n}\n\n/**\n * Scenario: The OAuth filter receives a sign out request.\n *\n * Expected behavior: the filter should redirect to the server name with cleared OAuth cookies.\n */\nTEST_F(OAuth2Test, RequestSignout) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Path.get(), \"/_signout\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n  };\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {Http::Headers::get().Status.get(), \"302\"},\n      {Http::Headers::get().SetCookie.get(),\n       \"OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\"},\n      {Http::Headers::get().SetCookie.get(),\n       \"BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\"},\n      {Http::Headers::get().Location.get(), \"https://traffic.example.com/\"},\n  };\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(request_headers, false));\n}\n\n/**\n * Scenario: The OAuth filter receives a request to an arbitrary path with valid OAuth cookies\n * (cookie values and validation are mocked out)\n * In a real flow, the injected OAuth headers should be sanitized and replaced with legitimate\n * values.\n *\n * Expected behavior: the filter should let the request proceed, and sanitize the injected headers.\n */\nTEST_F(OAuth2Test, OAuthOkPass) {\n  Http::TestRequestHeaderMapImpl mock_request_headers{\n      {Http::Headers::get().Path.get(), \"/anypath\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n      {Http::CustomHeaders::get().Authorization.get(), \"Bearer injected_malice!\"},\n  };\n\n  Http::TestRequestHeaderMapImpl expected_headers{\n      {Http::Headers::get().Path.get(), \"/anypath\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n      {Http::CustomHeaders::get().Authorization.get(), \"Bearer legit_token\"},\n  };\n\n  // cookie-validation mocking\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(true));\n\n  // Sanitized return reference mocking\n  std::string legit_token{\"legit_token\"};\n  EXPECT_CALL(*validator_, token()).WillOnce(ReturnRef(legit_token));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->decodeHeaders(mock_request_headers, false));\n\n  // Ensure that existing OAuth forwarded headers got sanitized.\n  EXPECT_EQ(mock_request_headers, expected_headers);\n\n  EXPECT_EQ(scope_.counterFromString(\"test.oauth_failure\").value(), 0);\n  EXPECT_EQ(scope_.counterFromString(\"test.oauth_success\").value(), 1);\n}\n\n/**\n * Scenario: The OAuth filter receives a request without valid OAuth cookies to a non-callback URL\n * (indicating that the user needs to re-validate cookies or get 401'd).\n * This also tests both a forwarded http protocol from upstream and a plaintext connection.\n *\n * Expected behavior: the filter should redirect the user to the OAuth server with the credentials\n * in the query parameters.\n */\nTEST_F(OAuth2Test, OAuthErrorNonOAuthHttpCallback) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Path.get(), \"/not/_oauth\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().Scheme.get(), \"http\"},\n      {Http::Headers::get().ForwardedProto.get(), \"http\"},\n  };\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {Http::Headers::get().Status.get(), \"302\"},\n      {Http::Headers::get().Location.get(),\n       \"https://auth.example.com/oauth/\"\n       \"authorize/?client_id=\" +\n           TEST_CLIENT_ID +\n           \"&scope=user&response_type=code&\"\n           \"redirect_uri=http%3A%2F%2Ftraffic.example.com%2F\"\n           \"_oauth&state=http%3A%2F%2Ftraffic.example.com%2Fnot%2F_oauth\"},\n  };\n\n  // explicitly tell the validator to fail the validation\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(request_headers, false));\n}\n\n/**\n * Scenario: The OAuth filter receives a callback request with an error code\n */\nTEST_F(OAuth2Test, OAuthErrorQueryString) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Path.get(), \"/_oauth?error=someerrorcode\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n  };\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {Http::Headers::get().Status.get(), \"401\"},\n      {Http::Headers::get().ContentLength.get(), \"18\"}, // unauthorizedBodyMessage()\n      {Http::Headers::get().ContentType.get(), \"text/plain\"},\n  };\n\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(decoder_callbacks_, encodeData(_, true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(request_headers, false));\n\n  EXPECT_EQ(scope_.counterFromString(\"test.oauth_failure\").value(), 1);\n  EXPECT_EQ(scope_.counterFromString(\"test.oauth_success\").value(), 0);\n}\n\n/**\n * Scenario: The OAuth filter requests credentials from auth.example.com which returns a\n * response without expires_in (JSON response is mocked out)\n *\n * Expected behavior: the filter should return a 401 directly to the user.\n */\nTEST_F(OAuth2Test, OAuthCallbackStartsAuthentication) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Path.get(), \"/_oauth?code=123&state=https://asdf&method=GET\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n  };\n\n  // Deliberately fail the HMAC Validation check.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n\n  EXPECT_CALL(*oauth_client_, asyncGetAccessToken(\"123\", TEST_CLIENT_ID, \"asdf_client_secret_fdsa\",\n                                                  \"https://traffic.example.com\" + TEST_CALLBACK));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(request_headers, false));\n}\n\n/**\n * Scenario: Protoc in opted-in to allow OPTIONS requests to pass-through. This is important as\n * POST requests initiate an OPTIONS request first in order to ensure POST is supported. During a\n * preflight request where the client Javascript initiates a remote call to a different endpoint,\n * we don't want to fail the call immediately due to browser restrictions, and use existing\n * cookies instead (OPTIONS requests do not send OAuth cookies.)\n */\nTEST_F(OAuth2Test, OAuthOptionsRequestAndContinue) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Path.get(), \"/anypath\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Options},\n  };\n\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n}\n\n// Validates the behavior of the cookie validator.\nTEST_F(OAuth2Test, CookieValidator) {\n  // Set SystemTime to a fixed point so we get consistent HMAC encodings between test runs.\n  test_time_.setSystemTime(SystemTime(std::chrono::seconds(0)));\n\n  const auto expires_at_s =\n      std::chrono::duration_cast<std::chrono::seconds>(\n          test_time_.timeSystem().systemTime().time_since_epoch() + std::chrono::seconds(10))\n          .count();\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Path.get(), \"/anypath\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().Cookie.get(),\n       fmt::format(\"OauthExpires={};version=test\", expires_at_s)},\n      {Http::Headers::get().Cookie.get(), \"BearerToken=xyztoken;version=test\"},\n      {Http::Headers::get().Cookie.get(),\n       \"OauthHMAC=\"\n       \"NGQ3MzVjZGExNGM5NTFiZGJjODBkMjBmYjAyYjNiOTFjMmNjYjIxMTUzNmNiNWU0NjQzMmMxMWUzZmE2ZWJjYg==\"\n       \";version=test\"},\n  };\n\n  auto cookie_validator = std::make_shared<OAuth2CookieValidator>(test_time_);\n  cookie_validator->setParams(request_headers, \"mock-secret\");\n\n  EXPECT_TRUE(cookie_validator->hmacIsValid());\n  EXPECT_TRUE(cookie_validator->timestampIsValid());\n  EXPECT_TRUE(cookie_validator->isValid());\n\n  // If we advance time beyond 10s the timestamp should no longer be valid.\n  test_time_.advanceTimeWait(std::chrono::seconds(11));\n\n  EXPECT_FALSE(cookie_validator->timestampIsValid());\n  EXPECT_FALSE(cookie_validator->isValid());\n}\n\n// Validates the behavior of the cookie validator when the expires_at value is not a valid integer.\nTEST_F(OAuth2Test, CookieValidatorInvalidExpiresAt) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Path.get(), \"/anypath\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().Cookie.get(), \"OauthExpires=notanumber;version=test\"},\n      {Http::Headers::get().Cookie.get(), \"BearerToken=xyztoken;version=test\"},\n      {Http::Headers::get().Cookie.get(),\n       \"OauthHMAC=\"\n       \"M2NjZmIxYWE0NzQzOGZlZTJjMjQwMzBiZTU5OTdkN2Y0NDRhZjE5MjZiOWNhY2YzNjM0MWRmMTNkMDVmZWFlOQ==\"\n       \";version=test\"},\n  };\n\n  auto cookie_validator = std::make_shared<OAuth2CookieValidator>(test_time_);\n  cookie_validator->setParams(request_headers, \"mock-secret\");\n\n  EXPECT_TRUE(cookie_validator->hmacIsValid());\n  EXPECT_FALSE(cookie_validator->timestampIsValid());\n  EXPECT_FALSE(cookie_validator->isValid());\n}\n\n// Verify that we 401 the request if the state query param doesn't contain a valid URL.\nTEST_F(OAuth2Test, OAuthTestInvalidUrlInStateQueryParam) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().Path.get(), \"/_oauth?code=abcdefxyz123&scope=user&\"\n                                        \"state=blah\"},\n      {Http::Headers::get().Cookie.get(), \"OauthExpires=123;version=test\"},\n      {Http::Headers::get().Cookie.get(), \"BearerToken=legit_token;version=test\"},\n      {Http::Headers::get().Cookie.get(),\n       \"OauthHMAC=\"\n       \"ZTRlMzU5N2Q4ZDIwZWE5ZTU5NTg3YTU3YTcxZTU0NDFkMzY1ZTc1NjMyODYyMj\"\n       \"RlNjMxZTJmNTZkYzRmZTM0ZQ====;version=test\"},\n  };\n\n  Http::TestRequestHeaderMapImpl expected_headers{\n      {Http::Headers::get().Status.get(), \"401\"},\n      {Http::Headers::get().ContentLength.get(), \"18\"},\n      {Http::Headers::get().ContentType.get(), \"text/plain\"},\n      // Invalid URL: we inject a few : in the middle of the URL.\n  };\n\n  // Succeed the HMAC validation.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(true));\n\n  std::string legit_token{\"legit_token\"};\n  EXPECT_CALL(*validator_, token()).WillOnce(ReturnRef(legit_token));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), false));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(request_headers, false));\n}\n\n// Verify that we 401 the request if the state query param contains the callback URL.\nTEST_F(OAuth2Test, OAuthTestCallbackUrlInStateQueryParam) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().Path.get(), \"/_oauth?code=abcdefxyz123&scope=user&\"\n                                        \"state=https%3A%2F%2Ftraffic.example.com%2F_oauth\"},\n      {Http::Headers::get().Cookie.get(), \"OauthExpires=123;version=test\"},\n      {Http::Headers::get().Cookie.get(), \"BearerToken=legit_token;version=test\"},\n      {Http::Headers::get().Cookie.get(),\n       \"OauthHMAC=\"\n       \"ZTRlMzU5N2Q4ZDIwZWE5ZTU5NTg3YTU3YTcxZTU0NDFkMzY1ZTc1NjMyODYyMj\"\n       \"RlNjMxZTJmNTZkYzRmZTM0ZQ====;version=test\"},\n  };\n\n  Http::TestRequestHeaderMapImpl expected_headers{\n      {Http::Headers::get().Status.get(), \"401\"},\n      {Http::Headers::get().ContentLength.get(), \"18\"},\n      {Http::Headers::get().ContentType.get(), \"text/plain\"},\n  };\n\n  // Succeed the HMAC validation.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(true));\n\n  std::string legit_token{\"legit_token\"};\n  EXPECT_CALL(*validator_, token()).WillOnce(ReturnRef(legit_token));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), false));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(request_headers, false));\n}\n\n/**\n * Testing the Path header replacement after an OAuth success.\n *\n * Expected behavior: the passed in HeaderMap should pass the OAuth flow, but since it's during\n * a callback from the authentication server, we should first parse out the state query string\n * parameter and set it to be the new path.\n */\nTEST_F(OAuth2Test, OAuthTestUpdatePathAfterSuccess) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().Path.get(), \"/_oauth?code=abcdefxyz123&scope=user&\"\n                                        \"state=https%3A%2F%2Ftraffic.example.com%2Foriginal_path\"},\n      {Http::Headers::get().Cookie.get(), \"OauthExpires=123;version=test\"},\n      {Http::Headers::get().Cookie.get(), \"BearerToken=legit_token;version=test\"},\n      {Http::Headers::get().Cookie.get(),\n       \"OauthHMAC=\"\n       \"ZTRlMzU5N2Q4ZDIwZWE5ZTU5NTg3YTU3YTcxZTU0NDFkMzY1ZTc1NjMyODYyMj\"\n       \"RlNjMxZTJmNTZkYzRmZTM0ZQ====;version=test\"},\n  };\n\n  Http::TestRequestHeaderMapImpl expected_headers{\n      {Http::Headers::get().Status.get(), \"302\"},\n      {Http::Headers::get().Location.get(), \"https://traffic.example.com/original_path\"},\n  };\n\n  // Succeed the HMAC validation.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(true));\n\n  std::string legit_token{\"legit_token\"};\n  EXPECT_CALL(*validator_, token()).WillOnce(ReturnRef(legit_token));\n\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n}\n\n/**\n * Testing oauth state with query string parameters.\n *\n * Expected behavior: HTTP Utility should not strip the parameters of the original request.\n */\nTEST_F(OAuth2Test, OAuthTestFullFlowPostWithParameters) {\n  // First construct the initial request to the oauth filter with URI parameters.\n  Http::TestRequestHeaderMapImpl first_request_headers{\n      {Http::Headers::get().Path.get(), \"/test?name=admin&level=trace\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Post},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n  };\n\n  // This is the immediate response - a redirect to the auth cluster.\n  Http::TestResponseHeaderMapImpl first_response_headers{\n      {Http::Headers::get().Status.get(), \"302\"},\n      {Http::Headers::get().Location.get(),\n       \"https://auth.example.com/oauth/\"\n       \"authorize/?client_id=\" +\n           TEST_CLIENT_ID +\n           \"&scope=user&response_type=code&\"\n           \"redirect_uri=https%3A%2F%2Ftraffic.example.com%2F\"\n           \"_oauth&state=https%3A%2F%2Ftraffic.example.com%2Ftest%\"\n           \"3Fname%3Dadmin%26level%3Dtrace\"},\n  };\n\n  // Fail the validation to trigger the OAuth flow.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n\n  // Check that the redirect includes the escaped parameter characters, '?', '&' and '='.\n  EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&first_response_headers), true));\n\n  // This represents the beginning of the OAuth filter.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(first_request_headers, false));\n\n  // This represents the callback request from the authorization server.\n  Http::TestRequestHeaderMapImpl second_request_headers{\n      {Http::Headers::get().Path.get(), \"/_oauth?code=123&state=https%3A%2F%2Ftraffic.example.com%\"\n                                        \"2Ftest%3Fname%3Dadmin%26level%3Dtrace\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n  };\n\n  // Deliberately fail the HMAC validation check.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n\n  EXPECT_CALL(*oauth_client_, asyncGetAccessToken(\"123\", TEST_CLIENT_ID, \"asdf_client_secret_fdsa\",\n                                                  \"https://traffic.example.com\" + TEST_CALLBACK));\n\n  // Invoke the callback logic. As a side effect, state_ will be populated.\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter_->decodeHeaders(second_request_headers, false));\n\n  EXPECT_EQ(1, config_->stats().oauth_unauthorized_rq_.value());\n\n  // Expected response after the callback & validation is complete - verifying we kept the\n  // state and method of the original request, including the query string parameters.\n  Http::TestRequestHeaderMapImpl second_response_headers{\n      {Http::Headers::get().Status.get(), \"302\"},\n      {Http::Headers::get().SetCookie.get(),\n       \"OauthHMAC=\"\n       \"NWUzNzE5MWQwYTg0ZjA2NjIyMjVjMzk3MzY3MzMyZmE0NjZmMWI2MjI1NWFhNDhkYjQ4NDFlZmRiMTVmMTk0MQ==;\"\n       \"version=1;path=/;Max-Age=;secure;HttpOnly\"},\n      {Http::Headers::get().SetCookie.get(),\n       \"OauthExpires=;version=1;path=/;Max-Age=;secure;HttpOnly\"},\n      {Http::Headers::get().SetCookie.get(), \"BearerToken=;version=1;path=/;Max-Age=;secure\"},\n      {Http::Headers::get().Location.get(),\n       \"https://traffic.example.com/test?name=admin&level=trace\"},\n  };\n\n  EXPECT_CALL(decoder_callbacks_,\n              encodeHeaders_(HeaderMapEqualRef(&second_response_headers), true));\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n\n  filter_->finishFlow();\n}\n\nTEST_F(OAuth2Test, OAuthBearerTokenFlowFromHeader) {\n  Http::TestRequestHeaderMapImpl request_headers_before{\n      {Http::Headers::get().Path.get(), \"/test?role=bearer\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n      {Http::CustomHeaders::get().Authorization.get(), \"Bearer xyz-header-token\"},\n  };\n  // Expected decoded headers after the callback & validation of the bearer token is complete.\n  Http::TestRequestHeaderMapImpl request_headers_after{\n      {Http::Headers::get().Path.get(), \"/test?role=bearer\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n      {Http::CustomHeaders::get().Authorization.get(), \"Bearer xyz-header-token\"},\n  };\n\n  // Fail the validation to trigger the OAuth flow.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->decodeHeaders(request_headers_before, false));\n\n  // Finally, expect that the header map had OAuth information appended to it.\n  EXPECT_EQ(request_headers_before, request_headers_after);\n}\n\nTEST_F(OAuth2Test, OAuthBearerTokenFlowFromQueryParameters) {\n  Http::TestRequestHeaderMapImpl request_headers_before{\n      {Http::Headers::get().Path.get(), \"/test?role=bearer&token=xyz-queryparam-token\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n  };\n  Http::TestRequestHeaderMapImpl request_headers_after{\n      {Http::Headers::get().Path.get(), \"/test?role=bearer&token=xyz-queryparam-token\"},\n      {Http::Headers::get().Host.get(), \"traffic.example.com\"},\n      {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get},\n      {Http::Headers::get().ForwardedProto.get(), \"https\"},\n      {Http::CustomHeaders::get().Authorization.get(), \"Bearer xyz-queryparam-token\"},\n  };\n\n  // Fail the validation to trigger the OAuth flow.\n  EXPECT_CALL(*validator_, setParams(_, _)).Times(1);\n  EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->decodeHeaders(request_headers_before, false));\n\n  // Expected decoded headers after the callback & validation of the bearer token is complete.\n  EXPECT_EQ(request_headers_before, request_headers_after);\n}\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/oauth2/oauth_integration_test.cc",
    "content": "#include \"common/protobuf/utility.h\"\n\n#include \"source/extensions/filters/http/oauth2/oauth_response.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth {\nnamespace {\n\nclass OauthIntegrationTest : public testing::Test, public HttpIntegrationTest {\npublic:\n  OauthIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, Network::Address::IpVersion::v4) {\n    enable_half_close_ = true;\n  }\n\n  envoy::service::discovery::v3::DiscoveryResponse genericSecretResponse(absl::string_view name,\n                                                                         absl::string_view value) {\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(std::string(name));\n    secret.mutable_generic_secret()->mutable_secret()->set_inline_string(std::string(value));\n\n    envoy::service::discovery::v3::DiscoveryResponse response_pb;\n    response_pb.add_resources()->PackFrom(secret);\n    response_pb.set_type_url(\n        envoy::extensions::transport_sockets::tls::v3::Secret::descriptor()->name());\n    return response_pb;\n  }\n\n  void initialize() override {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n\n    config_helper_.addFilter(R\"EOF(\nname: oauth\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2\n  config:\n    token_endpoint:\n      cluster: oauth\n      uri: oauth.com/token\n      timeout: 3s\n    authorization_endpoint: https://oauth.com/oauth/authorize/\n    redirect_uri: \"%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback\"\n    redirect_path_matcher: \n      path:\n        exact: /callback\n    signout_path: \n      path:\n        exact: /signout\n    credentials:\n      client_id: foo\n      token_secret:\n        name: token\n      hmac_secret:\n        name: hmac\n)EOF\");\n\n    // Add the OAuth cluster.\n    config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      *bootstrap.mutable_static_resources()->add_clusters() =\n          config_helper_.buildStaticCluster(\"oauth\", 0, \"127.0.0.1\");\n\n      auto* token_secret = bootstrap.mutable_static_resources()->add_secrets();\n      token_secret->set_name(\"token\");\n      token_secret->mutable_generic_secret()->mutable_secret()->set_inline_bytes(\"token_secret\");\n\n      auto* hmac_secret = bootstrap.mutable_static_resources()->add_secrets();\n      hmac_secret->set_name(\"hmac\");\n      hmac_secret->mutable_generic_secret()->mutable_secret()->set_inline_bytes(\"hmac_secret\");\n    });\n\n    setUpstreamCount(2);\n\n    HttpIntegrationTest::initialize();\n  }\n};\n\n// Regular request gets redirected to the login page.\nTEST_F(OauthIntegrationTest, UnauthenticatedFlow) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                         {\":path\", \"/lua/per/route/default\"},\n                                         {\":scheme\", \"http\"},\n                                         {\":authority\", \"authority\"}};\n  auto encoder_decoder = codec_client_->startRequest(headers);\n\n  Buffer::OwnedImpl buffer;\n  encoder_decoder.first.encodeData(buffer, true);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  // We should get an immediate redirect back.\n  response->waitForHeaders();\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n}\n\nTEST_F(OauthIntegrationTest, AuthenticationFlow) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\":method\", \"GET\"},\n      {\":path\", \"/callback?code=foo&state=http%3A%2F%2Ftraffic.example.com%2Fnot%2F_oauth\"},\n      {\":scheme\", \"http\"},\n      {\"x-forwarded-proto\", \"http\"},\n      {\":authority\", \"authority\"},\n      {\"authority\", \"Bearer token\"}};\n  auto encoder_decoder = codec_client_->startRequest(headers);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  waitForNextUpstreamRequest(1);\n\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  upstream_request_->encodeHeaders(\n      Http::TestRequestHeaderMapImpl{{\":status\", \"200\"}, {\"content-type\", \"application/json\"}},\n      false);\n\n  envoy::extensions::http_filters::oauth2::OAuthResponse oauth_response;\n  oauth_response.mutable_access_token()->set_value(\"bar\");\n  oauth_response.mutable_expires_in()->set_value(\n      std::chrono::duration_cast<std::chrono::seconds>(\n          api_->timeSource().systemTime().time_since_epoch() + std::chrono::seconds(10))\n          .count());\n\n  Buffer::OwnedImpl buffer(MessageUtil::getJsonStringFromMessage(oauth_response));\n  upstream_request_->encodeData(buffer, true);\n\n  // We should get an immediate redirect back.\n  response->waitForHeaders();\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n}\n\n} // namespace\n} // namespace Oauth\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/http/oauth2/oauth_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/oauth2/oauth.h\"\n#include \"extensions/filters/http/oauth2/oauth_client.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Oauth2 {\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\n\nclass MockCallbacks : public FilterCallbacks {\npublic:\n  MOCK_METHOD(void, sendUnauthorizedResponse, ());\n  MOCK_METHOD(void, onGetAccessTokenSuccess, (const std::string&, std::chrono::seconds));\n};\n\nclass OAuth2ClientTest : public testing::Test {\npublic:\n  OAuth2ClientTest()\n      : mock_callbacks_(std::make_shared<MockCallbacks>()), request_(&cm_.async_client_) {\n    envoy::config::core::v3::HttpUri uri;\n    uri.set_cluster(\"auth\");\n    uri.set_uri(\"auth.com/oauth/token\");\n    uri.mutable_timeout()->set_seconds(1);\n    client_ = std::make_shared<OAuth2ClientImpl>(cm_, uri);\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult popPendingCallback(std::function<void(Http::AsyncClient::Callbacks*)> func) {\n    if (callbacks_.empty()) {\n      return AssertionFailure() << \"tried to pop callback from empty deque\";\n    }\n\n    func(callbacks_.front());\n    callbacks_.pop_front();\n    return AssertionSuccess();\n  }\n\n  NiceMock<Upstream::MockClusterManager> cm_;\n  std::shared_ptr<OAuth2Client> client_;\n  std::shared_ptr<MockCallbacks> mock_callbacks_;\n  Http::MockAsyncClientRequest request_;\n  std::deque<Http::AsyncClient::Callbacks*> callbacks_;\n};\n\nTEST_F(OAuth2ClientTest, RequestAccessTokenSuccess) {\n  std::string json = R\"EOF(\n    {\n      \"access_token\": \"golden ticket\",\n      \"expires_in\": 1000\n    }\n    )EOF\";\n  Http::ResponseHeaderMapPtr mock_response_headers{new Http::TestResponseHeaderMapImpl{\n      {Http::Headers::get().Status.get(), \"200\"},\n      {Http::Headers::get().ContentType.get(), \"application/json\"},\n  }};\n  Http::ResponseMessagePtr mock_response(\n      new Http::ResponseMessageImpl(std::move(mock_response_headers)));\n  mock_response->body().add(json);\n\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks_.push_back(&cb);\n            return &request_;\n          }));\n\n  client_->setCallbacks(*mock_callbacks_);\n  client_->asyncGetAccessToken(\"a\", \"b\", \"c\", \"d\");\n  EXPECT_EQ(1, callbacks_.size());\n  EXPECT_CALL(*mock_callbacks_, onGetAccessTokenSuccess(_, _));\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  ASSERT_TRUE(popPendingCallback(\n      [&](auto* callback) { callback->onSuccess(request, std::move(mock_response)); }));\n}\n\nTEST_F(OAuth2ClientTest, RequestAccessTokenIncompleteResponse) {\n  std::string json = R\"EOF(\n    {\n      \"expires_in\": 1000\n    }\n    )EOF\";\n  Http::ResponseHeaderMapPtr mock_response_headers{new Http::TestResponseHeaderMapImpl{\n      {Http::Headers::get().Status.get(), \"200\"},\n      {Http::Headers::get().ContentType.get(), \"application/json\"},\n  }};\n  Http::ResponseMessagePtr mock_response(\n      new Http::ResponseMessageImpl(std::move(mock_response_headers)));\n  mock_response->body().add(json);\n\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks_.push_back(&cb);\n            return &request_;\n          }));\n\n  client_->setCallbacks(*mock_callbacks_);\n  client_->asyncGetAccessToken(\"a\", \"b\", \"c\", \"d\");\n  EXPECT_EQ(1, callbacks_.size());\n  EXPECT_CALL(*mock_callbacks_, sendUnauthorizedResponse());\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  ASSERT_TRUE(popPendingCallback(\n      [&](auto* callback) { callback->onSuccess(request, std::move(mock_response)); }));\n}\n\nTEST_F(OAuth2ClientTest, RequestAccessTokenErrorResponse) {\n  Http::ResponseHeaderMapPtr mock_response_headers{new Http::TestResponseHeaderMapImpl{\n      {Http::Headers::get().Status.get(), \"500\"},\n      {Http::Headers::get().ContentType.get(), \"application/json\"},\n  }};\n  Http::ResponseMessagePtr mock_response(\n      new Http::ResponseMessageImpl(std::move(mock_response_headers)));\n\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks_.push_back(&cb);\n            return &request_;\n          }));\n\n  client_->setCallbacks(*mock_callbacks_);\n  client_->asyncGetAccessToken(\"a\", \"b\", \"c\", \"d\");\n  EXPECT_EQ(1, callbacks_.size());\n  EXPECT_CALL(*mock_callbacks_, sendUnauthorizedResponse());\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  ASSERT_TRUE(popPendingCallback(\n      [&](auto* callback) { callback->onSuccess(request, std::move(mock_response)); }));\n}\n\nTEST_F(OAuth2ClientTest, RequestAccessTokenInvalidResponse) {\n  std::string json = R\"EOF(\n    {\n      \"expires_in\": \"some_string\"\n    }\n    )EOF\";\n  Http::ResponseHeaderMapPtr mock_response_headers{new Http::TestResponseHeaderMapImpl{\n      {Http::Headers::get().Status.get(), \"200\"},\n      {Http::Headers::get().ContentType.get(), \"application/json\"},\n  }};\n  Http::ResponseMessagePtr mock_response(\n      new Http::ResponseMessageImpl(std::move(mock_response_headers)));\n  mock_response->body().add(json);\n\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks_.push_back(&cb);\n            return &request_;\n          }));\n\n  client_->setCallbacks(*mock_callbacks_);\n  client_->asyncGetAccessToken(\"a\", \"b\", \"c\", \"d\");\n  EXPECT_EQ(1, callbacks_.size());\n  EXPECT_CALL(*mock_callbacks_, sendUnauthorizedResponse());\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  ASSERT_TRUE(popPendingCallback(\n      [&](auto* callback) { callback->onSuccess(request, std::move(mock_response)); }));\n}\n\nTEST_F(OAuth2ClientTest, NetworkError) {\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks_.push_back(&cb);\n            return &request_;\n          }));\n\n  client_->setCallbacks(*mock_callbacks_);\n  client_->asyncGetAccessToken(\"a\", \"b\", \"c\", \"d\");\n  EXPECT_EQ(1, callbacks_.size());\n\n  EXPECT_CALL(*mock_callbacks_, sendUnauthorizedResponse());\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  ASSERT_TRUE(popPendingCallback([&](auto* callback) {\n    callback->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n  }));\n}\n\n} // namespace Oauth2\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/on_demand/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"on_demand_filter_test\",\n    srcs = [\"on_demand_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.on_demand\",\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/http/on_demand:on_demand_update_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/on_demand/on_demand_filter_test.cc",
    "content": "#include <memory>\n\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/filters/http/on_demand/on_demand_update.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OnDemand {\n\nclass OnDemandFilterTest : public testing::Test {\npublic:\n  void SetUp() override {\n    filter_ = std::make_unique<OnDemandRouteUpdate>();\n    filter_->setDecoderFilterCallbacks(decoder_callbacks_);\n  }\n\n  std::unique_ptr<OnDemandRouteUpdate> filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n};\n\n// tests decodeHeaders() when no cached route is available and vhds is configured\nTEST_F(OnDemandFilterTest, TestDecodeHeaders) {\n  Http::TestRequestHeaderMapImpl headers;\n  std::shared_ptr<Router::MockConfig> route_config_ptr{new NiceMock<Router::MockConfig>()};\n  EXPECT_CALL(decoder_callbacks_, route()).WillOnce(Return(nullptr));\n  EXPECT_CALL(decoder_callbacks_, requestRouteConfigUpdate(_));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, true));\n}\n\n// tests decodeHeaders() when no cached route is available\nTEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteAvailable) {\n  Http::TestRequestHeaderMapImpl headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true));\n}\n\n// tests decodeHeaders() when no route configuration is available\nTEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteConfigIsNotAvailable) {\n  Http::TestRequestHeaderMapImpl headers;\n  std::shared_ptr<Router::MockConfig> route_config_ptr{new NiceMock<Router::MockConfig>()};\n  EXPECT_CALL(decoder_callbacks_, route()).WillOnce(Return(nullptr));\n  EXPECT_CALL(decoder_callbacks_, requestRouteConfigUpdate(_));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, true));\n}\n\nTEST_F(OnDemandFilterTest, TestDecodeTrailers) {\n  Http::TestRequestTrailerMapImpl headers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(headers));\n}\n\n// tests decodeData() when filter state is Http::FilterHeadersStatus::Continue\nTEST_F(OnDemandFilterTest, TestDecodeDataReturnsContinue) {\n  Buffer::OwnedImpl buffer;\n  filter_->setFilterIterationState(Http::FilterHeadersStatus::Continue);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n}\n\n// tests decodeData() when filter state is Http::FilterHeadersStatus::StopIteration\nTEST_F(OnDemandFilterTest, TestDecodeDataReturnsStopIteration) {\n  Buffer::OwnedImpl buffer;\n  filter_->setFilterIterationState(Http::FilterHeadersStatus::StopIteration);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(buffer, false));\n}\n\n// tests onRouteConfigUpdateCompletion() route hasn't been resolved\nTEST_F(OnDemandFilterTest,\n       TestOnRouteConfigUpdateCompletionContinuesDecodingWhenRouteDoesNotExist) {\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  filter_->onRouteConfigUpdateCompletion(false);\n}\n\n// tests onRouteConfigUpdateCompletion() when redirect contains a body\nTEST_F(OnDemandFilterTest, TestOnRouteConfigUpdateCompletionContinuesDecodingWithRedirectWithBody) {\n  Buffer::OwnedImpl buffer;\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillOnce(Return(&buffer));\n  filter_->onRouteConfigUpdateCompletion(true);\n}\n\n// tests onRouteConfigUpdateCompletion() when ActiveStream recreation fails\nTEST_F(OnDemandFilterTest, OnRouteConfigUpdateCompletionContinuesDecodingIfRedirectFails) {\n  EXPECT_CALL(decoder_callbacks_, continueDecoding());\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillOnce(Return(nullptr));\n  EXPECT_CALL(decoder_callbacks_, recreateStream()).WillOnce(Return(false));\n  filter_->onRouteConfigUpdateCompletion(true);\n}\n\n// tests onRouteConfigUpdateCompletion() when route was resolved\nTEST_F(OnDemandFilterTest, OnRouteConfigUpdateCompletionRestartsActiveStream) {\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillOnce(Return(nullptr));\n  EXPECT_CALL(decoder_callbacks_, recreateStream()).WillOnce(Return(true));\n  filter_->onRouteConfigUpdateCompletion(true);\n}\n\n} // namespace OnDemand\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/original_src/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.original_src\",\n    deps = [\n        \"//source/extensions/filters/http/original_src:config_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/original_src/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"original_src_config_factory_test\",\n    srcs = [\"original_src_config_factory_test.cc\"],\n    extension_name = \"envoy.filters.http.original_src\",\n    deps = [\n        \"//source/extensions/filters/http/original_src:config\",\n        \"//source/extensions/filters/http/original_src:config_lib\",\n        \"//source/extensions/filters/http/original_src:original_src_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/http/original_src/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"original_src_test\",\n    srcs = [\"original_src_test.cc\"],\n    extension_name = \"envoy.filters.http.original_src\",\n    deps = [\n        \"//source/common/network:socket_option_lib\",\n        \"//source/extensions/filters/http/original_src:original_src_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/original_src/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/original_src/config_test.cc",
    "content": "#include <numeric>\n\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.h\"\n\n#include \"extensions/filters/http/original_src/config.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\nnamespace {\n\n// In keeping with the class under test, it would have made sense to call this ConfigTest. However,\n// when running coverage tests, that conflicts with tests elsewhere in the codebase.\nclass OriginalSrcHttpConfigTest : public testing::Test {\npublic:\n  Config makeConfigFromProto(\n      const envoy::extensions::filters::http::original_src::v3::OriginalSrc& proto_config) {\n    return Config(proto_config);\n  }\n};\n\nTEST_F(OriginalSrcHttpConfigTest, TestUseMark0) {\n  envoy::extensions::filters::http::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_mark(0);\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_EQ(config.mark(), 0);\n}\n\nTEST_F(OriginalSrcHttpConfigTest, TestUseMark1234) {\n  envoy::extensions::filters::http::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_mark(1234);\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_EQ(config.mark(), 1234);\n}\n\nTEST_F(OriginalSrcHttpConfigTest, TestUseMarkMax) {\n  envoy::extensions::filters::http::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_mark(std::numeric_limits<uint32_t>::max());\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_EQ(config.mark(), std::numeric_limits<uint32_t>::max());\n}\n\n} // namespace\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/original_src/original_src_config_factory_test.cc",
    "content": "#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.h\"\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.validate.h\"\n\n#include \"extensions/filters/http/original_src/config.h\"\n#include \"extensions/filters/http/original_src/original_src.h\"\n#include \"extensions/filters/http/original_src/original_src_config_factory.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Invoke;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\nnamespace {\n\nTEST(OriginalSrcHttpConfigFactoryTest, TestCreateFactory) {\n  const std::string yaml = R\"EOF(\n    mark: 5\n)EOF\";\n\n  OriginalSrcConfigFactory factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto();\n  TestUtility::loadFromYaml(yaml, *proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, \"\", context);\n\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  Http::StreamDecoderFilterSharedPtr added_filter;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_))\n      .WillOnce(Invoke([&added_filter](Http::StreamDecoderFilterSharedPtr filter) {\n        added_filter = std::move(filter);\n      }));\n\n  cb(filter_callback);\n\n  // Make sure we actually create the correct type!\n  EXPECT_NE(dynamic_cast<OriginalSrcFilter*>(added_filter.get()), nullptr);\n}\n\n} // namespace\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/original_src/original_src_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/original_src/v3/original_src.pb.h\"\n\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/http/original_src/original_src.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::SaveArg;\nusing testing::StrictMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace OriginalSrc {\nnamespace {\n\nclass OriginalSrcHttpTest : public testing::Test {\npublic:\n  std::unique_ptr<OriginalSrcFilter> makeDefaultFilter() {\n    return makeFilterWithCallbacks(callbacks_);\n  }\n\n  std::unique_ptr<OriginalSrcFilter>\n  makeFilterWithCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) {\n    const Config default_config;\n    auto filter = std::make_unique<OriginalSrcFilter>(default_config);\n    filter->setDecoderFilterCallbacks(callbacks);\n    return filter;\n  }\n\n  std::unique_ptr<OriginalSrcFilter> makeMarkingFilter(uint32_t mark) {\n    envoy::extensions::filters::http::original_src::v3::OriginalSrc proto_config;\n    proto_config.set_mark(mark);\n\n    const Config config(proto_config);\n    auto filter = std::make_unique<OriginalSrcFilter>(config);\n    filter->setDecoderFilterCallbacks(callbacks_);\n    return filter;\n  }\n\n  void setAddressToReturn(const std::string& address) {\n    callbacks_.stream_info_.downstream_remote_address_ = Network::Utility::resolveUrl(address);\n  }\n\nprotected:\n  StrictMock<MockBuffer> buffer_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  NiceMock<Network::MockConnectionSocket> socket_;\n  Http::TestRequestHeaderMapImpl headers_;\n  Http::TestRequestTrailerMapImpl trailers_;\n\n  absl::optional<Network::Socket::Option::Details>\n  findOptionDetails(const Network::Socket::Options& options, Network::SocketOptionName name,\n                    envoy::config::core::v3::SocketOption::SocketState state) {\n    for (const auto& option : options) {\n      const auto details = option->getOptionDetails(socket_, state);\n      if (details.has_value() && details->name_ == name) {\n        return details;\n      }\n    }\n\n    return absl::nullopt;\n  }\n};\n\nTEST_F(OriginalSrcHttpTest, OnNonIpAddressDecodeSkips) {\n  auto filter = makeDefaultFilter();\n  setAddressToReturn(\"unix://domain.socket\");\n  EXPECT_CALL(callbacks_, addUpstreamSocketOptions(_)).Times(0);\n  EXPECT_EQ(filter->decodeHeaders(headers_, false), Http::FilterHeadersStatus::Continue);\n}\n\nTEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressAddsOption) {\n  auto filter = makeDefaultFilter();\n\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:0\");\n  EXPECT_CALL(callbacks_, addUpstreamSocketOptions(_)).WillOnce(SaveArg<0>(&options));\n\n  EXPECT_EQ(filter->decodeHeaders(headers_, false), Http::FilterHeadersStatus::Continue);\n\n  NiceMock<Network::MockConnectionSocket> socket;\n  EXPECT_CALL(socket,\n              setLocalAddress(PointeesEq(callbacks_.stream_info_.downstream_remote_address_)));\n  for (const auto& option : *options) {\n    option->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  }\n}\n\nTEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressUsesCorrectAddress) {\n  auto filter = makeDefaultFilter();\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:0\");\n  EXPECT_CALL(callbacks_, addUpstreamSocketOptions(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->decodeHeaders(headers_, false);\n  std::vector<uint8_t> key;\n  for (const auto& option : *options) {\n    option->hashKey(key);\n  }\n\n  std::vector<uint8_t> expected_key = {1, 2, 3, 4};\n\n  EXPECT_EQ(key, expected_key);\n}\n\nTEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressBleachesPort) {\n  auto filter = makeDefaultFilter();\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_, addUpstreamSocketOptions(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->decodeHeaders(headers_, false);\n\n  NiceMock<Network::MockConnectionSocket> socket;\n  const auto expected_address = Network::Utility::parseInternetAddress(\"1.2.3.4\");\n\n  EXPECT_CALL(socket, setLocalAddress(PointeesEq(expected_address)));\n  for (const auto& option : *options) {\n    option->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  }\n}\n\nTEST_F(OriginalSrcHttpTest, FilterAddsTransparentOption) {\n  if (!ENVOY_SOCKET_IP_TRANSPARENT.hasValue()) {\n    // The option isn't supported on this platform. Just skip the test.\n    return;\n  }\n\n  auto filter = makeDefaultFilter();\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_, addUpstreamSocketOptions(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->decodeHeaders(headers_, false);\n\n  const auto transparent_option = findOptionDetails(\n      *options, ENVOY_SOCKET_IP_TRANSPARENT, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n  EXPECT_TRUE(transparent_option.has_value());\n}\n\nTEST_F(OriginalSrcHttpTest, FilterAddsMarkOption) {\n  if (!ENVOY_SOCKET_SO_MARK.hasValue()) {\n    // The option isn't supported on this platform. Just skip the test.\n    return;\n  }\n\n  auto filter = makeMarkingFilter(1234);\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_, addUpstreamSocketOptions(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->decodeHeaders(headers_, false);\n\n  const auto mark_option = findOptionDetails(*options, ENVOY_SOCKET_SO_MARK,\n                                             envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n  ASSERT_TRUE(mark_option.has_value());\n  uint32_t value = 1234;\n  absl::string_view value_as_bstr(reinterpret_cast<const char*>(&value), sizeof(value));\n  EXPECT_EQ(value_as_bstr, mark_option->value_);\n}\n\nTEST_F(OriginalSrcHttpTest, Mark0NotAdded) {\n  if (!ENVOY_SOCKET_SO_MARK.hasValue()) {\n    // The option isn't supported on this platform. Just skip the test.\n    return;\n  }\n\n  auto filter = makeMarkingFilter(0);\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_, addUpstreamSocketOptions(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->decodeHeaders(headers_, false);\n\n  const auto mark_option = findOptionDetails(*options, ENVOY_SOCKET_SO_MARK,\n                                             envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n  ASSERT_FALSE(mark_option.has_value());\n}\n\nTEST_F(OriginalSrcHttpTest, TrailersAndDataEndStreamDoNothing) {\n  // Use a strict mock to show that decodeData and decodeTrailers do nothing to the callback.\n  StrictMock<Http::MockStreamDecoderFilterCallbacks> callbacks;\n  auto filter = makeFilterWithCallbacks(callbacks);\n\n  // This will be invoked in decodeHeaders.\n  EXPECT_CALL(callbacks, addUpstreamSocketOptions(_));\n  EXPECT_CALL(callbacks, streamInfo());\n  callbacks.stream_info_.downstream_remote_address_ =\n      Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  filter->decodeHeaders(headers_, true);\n\n  // No new expectations => no side effects from calling these.\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter->decodeData(buffer_, true));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter->decodeTrailers(trailers_));\n}\n\nTEST_F(OriginalSrcHttpTest, TrailersAndDataNotEndStreamDoNothing) {\n  // Use a strict mock to show that decodeData and decodeTrailers do nothing to the callback.\n  StrictMock<Http::MockStreamDecoderFilterCallbacks> callbacks;\n  auto filter = makeFilterWithCallbacks(callbacks);\n\n  // This will be invoked in decodeHeaders.\n  EXPECT_CALL(callbacks, addUpstreamSocketOptions(_));\n  EXPECT_CALL(callbacks, streamInfo());\n  callbacks.stream_info_.downstream_remote_address_ =\n      Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  filter->decodeHeaders(headers_, false);\n\n  // No new expectations => no side effects from calling these.\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter->decodeData(buffer_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter->decodeTrailers(trailers_));\n}\n} // namespace\n} // namespace OriginalSrc\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"ratelimit_test\",\n    srcs = [\"ratelimit_test.cc\"],\n    extension_name = \"envoy.filters.http.ratelimit\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_lib\",\n        \"//source/extensions/filters/http/ratelimit:ratelimit_lib\",\n        \"//test/extensions/filters/common/ratelimit:ratelimit_mocks\",\n        \"//test/extensions/filters/common/ratelimit:ratelimit_utils\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.ratelimit\",\n    deps = [\n        \"//source/extensions/filters/http/ratelimit:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"ratelimit_integration_test\",\n    srcs = [\"ratelimit_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.ratelimit\",\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/extensions/filters/http/ratelimit:config\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/extensions/filters/common/ratelimit:ratelimit_utils\",\n        \"//test/integration:http_integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"ratelimit_headers_test\",\n    srcs = [\"ratelimit_headers_test.cc\"],\n    extension_name = \"envoy.filters.http.cache\",\n    deps = [\n        \"//source/extensions/filters/http/ratelimit:ratelimit_headers_lib\",\n        \"//test/extensions/filters/common/ratelimit:ratelimit_utils\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/ratelimit/config_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/http/ratelimit/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\nnamespace {\n\nTEST(RateLimitFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(RateLimitFilterConfig().createFilterFactoryFromProto(\n                   envoy::extensions::filters::http::ratelimit::v3::RateLimit(), \"stats\", context),\n               ProtoValidationException);\n}\n\nTEST(RateLimitFilterConfigTest, RatelimitCorrectProto) {\n  const std::string yaml = R\"EOF(\n  domain: test\n  timeout: 2s\n  rate_limit_service:\n    grpc_service:\n      envoy_grpc:\n        cluster_name: ratelimit_cluster\n  )EOF\";\n\n  envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config{};\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_CALL(context.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        return std::make_unique<NiceMock<Grpc::MockAsyncClientFactory>>();\n      }));\n\n  RateLimitFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  cb(filter_callback);\n}\n\nTEST(RateLimitFilterConfigTest, RateLimitFilterEmptyProto) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  NiceMock<Server::MockInstance> instance;\n\n  RateLimitFilterConfig factory;\n\n  envoy::extensions::filters::http::ratelimit::v3::RateLimit empty_proto_config =\n      *dynamic_cast<envoy::extensions::filters::http::ratelimit::v3::RateLimit*>(\n          factory.createEmptyConfigProto().get());\n\n  EXPECT_THROW(factory.createFilterFactoryFromProto(empty_proto_config, \"stats\", context),\n               EnvoyException);\n}\n\nTEST(RateLimitFilterConfigTest, BadRateLimitFilterConfig) {\n  const std::string yaml = R\"EOF(\n  domain: foo\n  route_key: my_route\n  )EOF\";\n\n  envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config{};\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYamlAndValidate(yaml, proto_config), EnvoyException,\n                          \"route_key: Cannot find field\");\n}\n\n// Test that the deprecated extension name still functions.\nTEST(RateLimitFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.rate_limit\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"extensions/filters/http/ratelimit/ratelimit_headers.h\"\n\n#include \"test/extensions/filters/common/ratelimit/utils.h\"\n#include \"test/mocks/http/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\nnamespace {\n\nusing Envoy::RateLimit::buildDescriptorStatus;\nusing Filters::Common::RateLimit::DescriptorStatusList;\n\nstruct RateLimitHeadersTestCase {\n  Http::TestResponseHeaderMapImpl expected_headers;\n  DescriptorStatusList descriptor_statuses;\n};\n\nclass RateLimitHeadersTest : public testing::TestWithParam<RateLimitHeadersTestCase> {\npublic:\n  static const std::vector<RateLimitHeadersTestCase>& getTestCases() {\n    CONSTRUCT_ON_FIRST_USE(\n        std::vector<RateLimitHeadersTestCase>,\n        // Empty descriptor statuses\n        {{}, {}},\n        // Status with no current limit is ignored\n        {{{\"x-ratelimit-limit\", \"4, 4;w=3600;name=\\\"second\\\"\"},\n          {\"x-ratelimit-remaining\", \"5\"},\n          {\"x-ratelimit-reset\", \"6\"}},\n         {// passing 0 will cause it not to set a current limit\n          buildDescriptorStatus(0,\n                                envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE,\n                                \"first\", 2, 3),\n          buildDescriptorStatus(4,\n                                envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR,\n                                \"second\", 5, 6)}},\n        // Empty name is not appended\n        {{{\"x-ratelimit-limit\", \"1, 1;w=60\"},\n          {\"x-ratelimit-remaining\", \"2\"},\n          {\"x-ratelimit-reset\", \"3\"}},\n         {\n             // passing 0 will cause it not to set a current limit\n             buildDescriptorStatus(\n                 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, \"\", 2, 3),\n         }},\n        // Unknown unit is ignored in window, but not overall\n        {{{\"x-ratelimit-limit\", \"1, 4;w=3600;name=\\\"second\\\"\"},\n          {\"x-ratelimit-remaining\", \"2\"},\n          {\"x-ratelimit-reset\", \"3\"}},\n         {// passing 0 will cause it not to set a current limit\n          buildDescriptorStatus(\n              1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::UNKNOWN, \"first\", 2,\n              3),\n          buildDescriptorStatus(4,\n                                envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR,\n                                \"second\", 5, 6)}},\n        // Normal case, multiple arguments\n        {{{\"x-ratelimit-limit\", \"1, 1;w=60;name=\\\"first\\\", 4;w=3600;name=\\\"second\\\"\"},\n          {\"x-ratelimit-remaining\", \"2\"},\n          {\"x-ratelimit-reset\", \"3\"}},\n         {buildDescriptorStatus(1,\n                                envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE,\n                                \"first\", 2, 3),\n          buildDescriptorStatus(4,\n                                envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR,\n                                \"second\", 5, 6)}}, );\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(RateLimitHeadersTest, RateLimitHeadersTest,\n                         testing::ValuesIn(RateLimitHeadersTest::getTestCases()));\n\nTEST_P(RateLimitHeadersTest, RateLimitHeadersTest) {\n  Http::ResponseHeaderMapPtr result = XRateLimitHeaderUtils::create(\n      std::make_unique<DescriptorStatusList>(GetParam().descriptor_statuses));\n  EXPECT_THAT(result, HeaderMapEqual(&GetParam().expected_headers));\n}\n\n} // namespace\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.validate.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/service/ratelimit/v3/rls.pb.h\"\n\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n\n#include \"extensions/filters/http/ratelimit/config.h\"\n#include \"extensions/filters/http/ratelimit/ratelimit_headers.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/extensions/filters/common/ratelimit/utils.h\"\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n// Tests Ratelimit functionality with config in filter.\nclass RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest,\n                                 public HttpIntegrationTest {\npublic:\n  RatelimitIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {}\n\n  void SetUp() override { initialize(); }\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initialize() override {\n\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* ratelimit_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ratelimit_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      ratelimit_cluster->set_name(\"ratelimit\");\n      ratelimit_cluster->mutable_http2_protocol_options();\n\n      // enhance rate limit filter config based on the configuration of test.\n      TestUtility::loadFromYaml(base_filter_config_, proto_config_);\n      proto_config_.set_failure_mode_deny(failure_mode_deny_);\n      proto_config_.set_enable_x_ratelimit_headers(enable_x_ratelimit_headers_);\n      setGrpcService(*proto_config_.mutable_rate_limit_service()->mutable_grpc_service(),\n                     \"ratelimit\", fake_upstreams_.back()->localAddress());\n      proto_config_.mutable_rate_limit_service()->set_transport_api_version(apiVersion());\n\n      envoy::config::listener::v3::Filter ratelimit_filter;\n      ratelimit_filter.set_name(\"envoy.filters.http.ratelimit\");\n      ratelimit_filter.mutable_typed_config()->PackFrom(proto_config_);\n      config_helper_.addFilter(MessageUtil::getJsonStringFromMessage(ratelimit_filter));\n    });\n    config_helper_.addConfigModifier(\n        [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n               hcm) {\n          auto* rate_limit = hcm.mutable_route_config()\n                                 ->mutable_virtual_hosts(0)\n                                 ->mutable_routes(0)\n                                 ->mutable_route()\n                                 ->add_rate_limits();\n          rate_limit->add_actions()->mutable_destination_cluster();\n        });\n    HttpIntegrationTest::initialize();\n  }\n\n  void initiateClientConnection() {\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    codec_client_ = makeHttpConnection(std::move(conn));\n    Http::TestRequestHeaderMapImpl headers{\n        {\":method\", \"POST\"},    {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"},\n        {\":authority\", \"host\"}, {\"x-lyft-user-id\", \"123\"},   {\"x-forwarded-for\", \"10.0.0.1\"}};\n    response_ = codec_client_->makeRequestWithBody(headers, request_size_);\n  }\n\n  void waitForRatelimitRequest() {\n    AssertionResult result =\n        fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_ratelimit_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_ratelimit_connection_->waitForNewStream(*dispatcher_, ratelimit_request_);\n    RELEASE_ASSERT(result, result.message());\n    envoy::service::ratelimit::v3::RateLimitRequest request_msg;\n    result = ratelimit_request_->waitForGrpcMessage(*dispatcher_, request_msg);\n    RELEASE_ASSERT(result, result.message());\n    result = ratelimit_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n    EXPECT_EQ(\"POST\", ratelimit_request_->headers().getMethodValue());\n    EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.ratelimit.{}.RateLimitService\",\n                                                  \"ShouldRateLimit\", apiVersion()),\n              ratelimit_request_->headers().getPathValue());\n    EXPECT_EQ(\"application/grpc\", ratelimit_request_->headers().getContentTypeValue());\n\n    envoy::service::ratelimit::v3::RateLimitRequest expected_request_msg;\n    expected_request_msg.set_domain(\"some_domain\");\n    auto* entry = expected_request_msg.add_descriptors()->add_entries();\n    entry->set_key(\"destination_cluster\");\n    entry->set_value(\"cluster_0\");\n    EXPECT_EQ(expected_request_msg.DebugString(), request_msg.DebugString());\n  }\n\n  void waitForSuccessfulUpstreamResponse() {\n    AssertionResult result =\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n    RELEASE_ASSERT(result, result.message());\n    result = upstream_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n\n    upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n    upstream_request_->encodeData(response_size_, true);\n    response_->waitForEndStream();\n\n    EXPECT_TRUE(upstream_request_->complete());\n    EXPECT_EQ(request_size_, upstream_request_->bodyLength());\n\n    EXPECT_TRUE(response_->complete());\n    EXPECT_EQ(\"200\", response_->headers().getStatusValue());\n    EXPECT_EQ(response_size_, response_->body().size());\n  }\n\n  void waitForFailedUpstreamResponse(uint32_t response_code) {\n    response_->waitForEndStream();\n    EXPECT_TRUE(response_->complete());\n    EXPECT_EQ(std::to_string(response_code), response_->headers().getStatusValue());\n  }\n\n  void sendRateLimitResponse(\n      envoy::service::ratelimit::v3::RateLimitResponse::Code code,\n      const Extensions::Filters::Common::RateLimit::DescriptorStatusList& descriptor_statuses,\n      const Http::ResponseHeaderMap& response_headers_to_add,\n      const Http::RequestHeaderMap& request_headers_to_add) {\n    ratelimit_request_->startGrpcStream();\n    envoy::service::ratelimit::v3::RateLimitResponse response_msg;\n    response_msg.set_overall_code(code);\n    *response_msg.mutable_statuses() = {descriptor_statuses.begin(), descriptor_statuses.end()};\n\n    response_headers_to_add.iterate(\n        [&response_msg](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate {\n          auto header = response_msg.mutable_response_headers_to_add()->Add();\n          header->set_key(std::string(h.key().getStringView()));\n          header->set_value(std::string(h.value().getStringView()));\n          return Http::HeaderMap::Iterate::Continue;\n        });\n    request_headers_to_add.iterate(\n        [&response_msg](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate {\n          auto header = response_msg.mutable_request_headers_to_add()->Add();\n          header->set_key(std::string(h.key().getStringView()));\n          header->set_value(std::string(h.value().getStringView()));\n          return Http::HeaderMap::Iterate::Continue;\n        });\n    ratelimit_request_->sendGrpcMessage(response_msg);\n    ratelimit_request_->finishGrpcStream(Grpc::Status::Ok);\n  }\n\n  void cleanup() {\n    if (fake_ratelimit_connection_ != nullptr) {\n      if (clientType() != Grpc::ClientType::GoogleGrpc) {\n        // TODO(htuch) we should document the underlying cause of this difference and/or fix it.\n        AssertionResult result = fake_ratelimit_connection_->close();\n        RELEASE_ASSERT(result, result.message());\n      }\n      AssertionResult result = fake_ratelimit_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n    cleanupUpstreamAndDownstream();\n  }\n\n  void basicFlow() {\n    initiateClientConnection();\n    waitForRatelimitRequest();\n    sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, {},\n                          Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{});\n    waitForSuccessfulUpstreamResponse();\n    cleanup();\n\n    EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\")->value());\n    EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\"));\n    EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.error\"));\n  }\n\n  FakeHttpConnectionPtr fake_ratelimit_connection_;\n  FakeStreamPtr ratelimit_request_;\n  IntegrationStreamDecoderPtr response_;\n\n  const uint64_t request_size_ = 1024;\n  const uint64_t response_size_ = 512;\n  bool failure_mode_deny_ = false;\n  envoy::extensions::filters::http::ratelimit::v3::RateLimit::XRateLimitHeadersRFCVersion\n      enable_x_ratelimit_headers_ = envoy::extensions::filters::http::ratelimit::v3::RateLimit::OFF;\n  envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config_{};\n  const std::string base_filter_config_ = R\"EOF(\n    domain: some_domain\n    timeout: 0.5s\n  )EOF\";\n};\n\n// Test that verifies failure mode cases.\nclass RatelimitFailureModeIntegrationTest : public RatelimitIntegrationTest {\npublic:\n  RatelimitFailureModeIntegrationTest() { failure_mode_deny_ = true; }\n};\n\n// Test verifies that response headers provided by filter work.\nclass RatelimitFilterHeadersEnabledIntegrationTest : public RatelimitIntegrationTest {\npublic:\n  RatelimitFilterHeadersEnabledIntegrationTest() {\n    enable_x_ratelimit_headers_ =\n        envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03;\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFailureModeIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFilterHeadersEnabledIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\n\nTEST_P(RatelimitIntegrationTest, Ok) { basicFlow(); }\n\nTEST_P(RatelimitIntegrationTest, OkWithHeaders) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n  Http::TestResponseHeaderMapImpl ratelimit_response_headers{{\"x-ratelimit-limit\", \"1000\"},\n                                                             {\"x-ratelimit-remaining\", \"500\"}};\n  Http::TestRequestHeaderMapImpl request_headers_to_add{{\"x-ratelimit-done\", \"true\"}};\n\n  sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, {},\n                        ratelimit_response_headers, request_headers_to_add);\n  waitForSuccessfulUpstreamResponse();\n\n  ratelimit_response_headers.iterate(\n      [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n        Http::LowerCaseString lower_key{std::string(entry.key().getStringView())};\n        EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView());\n        return Http::HeaderMap::Iterate::Continue;\n      });\n\n  request_headers_to_add.iterate([upstream = upstream_request_.get()](\n                                     const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n    Http::LowerCaseString lower_key{std::string(entry.key().getStringView())};\n    EXPECT_EQ(entry.value(), upstream->headers().get(lower_key)->value().getStringView());\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  cleanup();\n\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\")->value());\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\"));\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.error\"));\n}\n\nTEST_P(RatelimitIntegrationTest, OverLimit) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n  sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {},\n                        Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{});\n  waitForFailedUpstreamResponse(429);\n  cleanup();\n\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\"));\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\")->value());\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.error\"));\n}\n\nTEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n  Http::TestResponseHeaderMapImpl ratelimit_response_headers{\n      {\"x-ratelimit-limit\", \"1000\"}, {\"x-ratelimit-remaining\", \"0\"}, {\"retry-after\", \"33\"}};\n  sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {},\n                        ratelimit_response_headers, Http::TestRequestHeaderMapImpl{});\n  waitForFailedUpstreamResponse(429);\n\n  ratelimit_response_headers.iterate(\n      [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate {\n        Http::LowerCaseString lower_key{std::string(entry.key().getStringView())};\n        EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView());\n        return Http::HeaderMap::Iterate::Continue;\n      });\n\n  cleanup();\n\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\"));\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\")->value());\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.error\"));\n}\n\nTEST_P(RatelimitIntegrationTest, Error) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n  ratelimit_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}, true);\n  // Rate limiter fails open\n  waitForSuccessfulUpstreamResponse();\n  cleanup();\n\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\"));\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\"));\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.error\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.failure_mode_allowed\")->value());\n}\n\nTEST_P(RatelimitIntegrationTest, Timeout) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n  switch (clientType()) {\n  case Grpc::ClientType::EnvoyGrpc:\n    test_server_->waitForCounterGe(\"cluster.ratelimit.upstream_rq_timeout\", 1);\n    test_server_->waitForCounterGe(\"cluster.ratelimit.upstream_rq_504\", 1);\n    EXPECT_EQ(1, test_server_->counter(\"cluster.ratelimit.upstream_rq_timeout\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"cluster.ratelimit.upstream_rq_504\")->value());\n    break;\n  case Grpc::ClientType::GoogleGrpc:\n    test_server_->waitForCounterGe(\"grpc.ratelimit.streams_closed_4\", 1);\n    EXPECT_EQ(1, test_server_->counter(\"grpc.ratelimit.streams_total\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"grpc.ratelimit.streams_closed_4\")->value());\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  // Rate limiter fails open\n  waitForSuccessfulUpstreamResponse();\n  cleanup();\n}\n\nTEST_P(RatelimitIntegrationTest, ConnectImmediateDisconnect) {\n  initiateClientConnection();\n  ASSERT_TRUE(fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_ratelimit_connection_));\n  ASSERT_TRUE(fake_ratelimit_connection_->close());\n  ASSERT_TRUE(fake_ratelimit_connection_->waitForDisconnect());\n  fake_ratelimit_connection_ = nullptr;\n  // Rate limiter fails open\n  waitForSuccessfulUpstreamResponse();\n  cleanup();\n}\n\nTEST_P(RatelimitIntegrationTest, FailedConnect) {\n  // Do not reset the fake upstream for the ratelimiter, but have it stop listening.\n  // If we reset, the Envoy will continue to send H2 to the original rate limiter port, which may\n  // be used by another test, and data sent to that port \"unexpectedly\" will cause problems for\n  // that test.\n  fake_upstreams_[1]->cleanUp();\n  initiateClientConnection();\n  // Rate limiter fails open\n  waitForSuccessfulUpstreamResponse();\n  cleanup();\n}\n\nTEST_P(RatelimitFailureModeIntegrationTest, ErrorWithFailureModeOff) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n  ratelimit_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, true);\n  // Rate limiter fail closed\n  waitForFailedUpstreamResponse(500);\n  cleanup();\n\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\"));\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\"));\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.error\")->value());\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.failure_mode_allowed\"));\n}\n\nTEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OkWithFilterHeaders) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n\n  Extensions::Filters::Common::RateLimit::DescriptorStatusList descriptor_statuses{\n      Envoy::RateLimit::buildDescriptorStatus(\n          1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, \"first\", 2, 3),\n      Envoy::RateLimit::buildDescriptorStatus(\n          4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, \"second\", 5, 6)};\n  sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, descriptor_statuses,\n                        Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{});\n  waitForSuccessfulUpstreamResponse();\n\n  EXPECT_THAT(\n      response_.get()->headers(),\n      Http::HeaderValueOf(\n          Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitLimit,\n          \"1, 1;w=60;name=\\\"first\\\", 4;w=3600;name=\\\"second\\\"\"));\n  EXPECT_THAT(\n      response_.get()->headers(),\n      Http::HeaderValueOf(\n          Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitRemaining,\n          \"2\"));\n  EXPECT_THAT(\n      response_.get()->headers(),\n      Http::HeaderValueOf(\n          Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitReset, \"3\"));\n\n  cleanup();\n\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\")->value());\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\"));\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.error\"));\n}\n\nTEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OverLimitWithFilterHeaders) {\n  initiateClientConnection();\n  waitForRatelimitRequest();\n\n  Extensions::Filters::Common::RateLimit::DescriptorStatusList descriptor_statuses{\n      Envoy::RateLimit::buildDescriptorStatus(\n          1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, \"first\", 2, 3),\n      Envoy::RateLimit::buildDescriptorStatus(\n          4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, \"second\", 5, 6)};\n  sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT,\n                        descriptor_statuses, Http::TestResponseHeaderMapImpl{},\n                        Http::TestRequestHeaderMapImpl{});\n  waitForFailedUpstreamResponse(429);\n\n  EXPECT_THAT(\n      response_.get()->headers(),\n      Http::HeaderValueOf(\n          Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitLimit,\n          \"1, 1;w=60;name=\\\"first\\\", 4;w=3600;name=\\\"second\\\"\"));\n  EXPECT_THAT(\n      response_.get()->headers(),\n      Http::HeaderValueOf(\n          Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitRemaining,\n          \"2\"));\n  EXPECT_THAT(\n      response_.get()->headers(),\n      Http::HeaderValueOf(\n          Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitReset, \"3\"));\n\n  cleanup();\n\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.ok\"));\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.ratelimit.over_limit\")->value());\n  EXPECT_EQ(nullptr, test_server_->counter(\"cluster.cluster_0.ratelimit.error\"));\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/ratelimit/ratelimit_test.cc",
    "content": "#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/http/ratelimit/v3/rate_limit.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/http/ratelimit/ratelimit.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/filters/common/ratelimit/mocks.h\"\n#include \"test/extensions/filters/common/ratelimit/utils.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/ratelimit/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Not;\nusing testing::Return;\nusing testing::SetArgReferee;\nusing testing::WithArgs;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RateLimitFilter {\nnamespace {\n\nclass HttpRateLimitFilterTest : public testing::Test {\npublic:\n  HttpRateLimitFilterTest() : http_context_(stats_store_.symbolTable()) {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.http_filter_enabled\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.http_filter_enforcing\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.test_key.http_filter_enabled\", 100))\n        .WillByDefault(Return(true));\n  }\n\n  void SetUpTest(const std::string& yaml) {\n    envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config{};\n    TestUtility::loadFromYaml(yaml, proto_config);\n\n    config_ = std::make_shared<FilterConfig>(proto_config, local_info_, stats_store_, runtime_,\n                                             http_context_);\n\n    client_ = new Filters::Common::RateLimit::MockClient();\n    filter_ = std::make_unique<Filter>(config_, Filters::Common::RateLimit::ClientPtr{client_});\n    filter_->setDecoderFilterCallbacks(filter_callbacks_);\n    filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear();\n    filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.emplace_back(\n        route_rate_limit_);\n    filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_.rate_limit_policy_entry_\n        .clear();\n    filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_.rate_limit_policy_entry_\n        .emplace_back(vh_rate_limit_);\n  }\n\n  const std::string fail_close_config_ = R\"EOF(\n  domain: foo\n  failure_mode_deny: true\n  )EOF\";\n\n  const std::string enable_x_ratelimit_headers_config_ = R\"EOF(\n  domain: foo\n  enable_x_ratelimit_headers: DRAFT_VERSION_03\n  )EOF\";\n\n  const std::string filter_config_ = R\"EOF(\n  domain: foo\n  )EOF\";\n\n  Filters::Common::RateLimit::MockClient* client_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> filter_callbacks_;\n  Stats::StatNamePool pool_{filter_callbacks_.clusterInfo()->statsScope().symbolTable()};\n  Stats::StatName ratelimit_ok_{pool_.add(\"ratelimit.ok\")};\n  Stats::StatName ratelimit_error_{pool_.add(\"ratelimit.error\")};\n  Stats::StatName ratelimit_failure_mode_allowed_{pool_.add(\"ratelimit.failure_mode_allowed\")};\n  Stats::StatName ratelimit_over_limit_{pool_.add(\"ratelimit.over_limit\")};\n  Stats::StatName upstream_rq_4xx_{pool_.add(\"upstream_rq_4xx\")};\n  Stats::StatName upstream_rq_429_{pool_.add(\"upstream_rq_429\")};\n  Filters::Common::RateLimit::RequestCallbacks* request_callbacks_{};\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  Buffer::OwnedImpl data_;\n  Buffer::OwnedImpl response_data_;\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  FilterConfigSharedPtr config_;\n  std::unique_ptr<Filter> filter_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Router::MockRateLimitPolicyEntry> route_rate_limit_;\n  NiceMock<Router::MockRateLimitPolicyEntry> vh_rate_limit_;\n  std::vector<RateLimit::Descriptor> descriptor_{{{{\"descriptor_key\", \"descriptor_value\"}}}};\n  std::vector<RateLimit::Descriptor> descriptor_two_{{{{\"key\", \"value\"}}}};\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Http::ContextImpl http_context_;\n};\n\nTEST_F(HttpRateLimitFilterTest, NoRoute) {\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, NoCluster) {\n  SetUpTest(filter_config_);\n\n  ON_CALL(filter_callbacks_, clusterInfo()).WillByDefault(Return(nullptr));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, NoApplicableRateLimit) {\n  SetUpTest(filter_config_);\n\n  filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear();\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, NoDescriptor) {\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1);\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1);\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, RuntimeDisabled) {\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.http_filter_enabled\", 100))\n      .WillOnce(Return(false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, OkResponse) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  request_headers_.addCopy(Http::Headers::get().RequestId, \"requestid\");\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited))\n      .Times(0);\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  request_headers_.addCopy(Http::Headers::get().RequestId, \"requestid\");\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited))\n      .Times(0);\n\n  Http::HeaderMapPtr request_headers_to_add{\n      new Http::TestRequestHeaderMapImpl{{\"x-rls-rate-limited\", \"true\"}}};\n  Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{\n      {\"x-ratelimit-limit\", \"1000\"}, {\"x-ratelimit-remaining\", \"500\"}}};\n\n  request_callbacks_->complete(\n      Filters::Common::RateLimit::LimitStatus::OK, nullptr,\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl(*rl_headers)},\n      Http::RequestHeaderMapPtr{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)});\n  Http::TestResponseHeaderMapImpl expected_headers(*rl_headers);\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n  EXPECT_EQ(true, (expected_headers == response_headers));\n\n  EXPECT_THAT(*request_headers_to_add, IsSubsetOfHeaders(request_headers_));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, OkResponseWithFilterHeaders) {\n  SetUpTest(enable_x_ratelimit_headers_config_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  request_headers_.addCopy(Http::Headers::get().RequestId, \"requestid\");\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited))\n      .Times(0);\n\n  auto descriptor_statuses = {\n      Envoy::RateLimit::buildDescriptorStatus(\n          1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, \"first\", 2, 3),\n      Envoy::RateLimit::buildDescriptorStatus(\n          4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, \"second\", 5, 6)};\n  auto descriptor_statuses_ptr =\n      std::make_unique<Filters::Common::RateLimit::DescriptorStatusList>(descriptor_statuses);\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK,\n                               std::move(descriptor_statuses_ptr), nullptr, nullptr);\n\n  Http::TestResponseHeaderMapImpl expected_headers{\n      {\"x-ratelimit-limit\", \"1, 1;w=60;name=\\\"first\\\", 4;w=3600;name=\\\"second\\\"\"},\n      {\"x-ratelimit-remaining\", \"2\"},\n      {\"x-ratelimit-reset\", \"3\"}};\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n  EXPECT_THAT(response_headers, HeaderMapEqualRef(&expected_headers));\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_error_).value());\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromStatName(ratelimit_failure_mode_allowed_)\n                    .value());\n}\n\nTEST_F(HttpRateLimitFilterTest, ErrorResponse) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited))\n      .Times(0);\n\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_error_).value());\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromStatName(ratelimit_failure_mode_allowed_)\n                    .value());\n}\n\nTEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) {\n  SetUpTest(fail_close_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError))\n      .Times(0);\n\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_error_).value());\n  EXPECT_EQ(0U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromStatName(ratelimit_failure_mode_allowed_)\n                    .value());\n  EXPECT_EQ(\"rate_limiter_error\", filter_callbacks_.details());\n}\n\nTEST_F(HttpRateLimitFilterTest, LimitResponse) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()};\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"429\"},\n      {\"x-envoy-ratelimited\", Http::Headers::get().EnvoyRateLimitedValues.True}};\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited));\n\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr,\n                               std::move(h), nullptr);\n\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromStatName(ratelimit_over_limit_)\n                    .value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value());\n  EXPECT_EQ(\"request_rate_limited\", filter_callbacks_.details());\n}\n\nTEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{\n      {\"x-ratelimit-limit\", \"1000\"}, {\"x-ratelimit-remaining\", \"0\"}, {\"retry-after\", \"33\"}}};\n  Http::TestResponseHeaderMapImpl expected_headers(*rl_headers);\n  expected_headers.addCopy(\":status\", \"429\");\n  expected_headers.addCopy(\"x-envoy-ratelimited\", Http::Headers::get().EnvoyRateLimitedValues.True);\n\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited));\n\n  Http::HeaderMapPtr request_headers_to_add{\n      new Http::TestRequestHeaderMapImpl{{\"x-rls-rate-limited\", \"true\"}}};\n\n  Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl(*rl_headers)};\n  Http::RequestHeaderMapPtr uh{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)};\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr,\n                               std::move(h), std::move(uh));\n\n  EXPECT_THAT(*request_headers_to_add, Not(IsSubsetOfHeaders(request_headers_)));\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromStatName(ratelimit_over_limit_)\n                    .value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) {\n  SetUpTest(enable_x_ratelimit_headers_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  Http::TestResponseHeaderMapImpl expected_headers{\n      {\":status\", \"429\"},\n      {\"x-envoy-ratelimited\", Http::Headers::get().EnvoyRateLimitedValues.True},\n      {\"x-ratelimit-limit\", \"1, 1;w=60;name=\\\"first\\\", 4;w=3600;name=\\\"second\\\"\"},\n      {\"x-ratelimit-remaining\", \"2\"},\n      {\"x-ratelimit-reset\", \"3\"}};\n  EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited));\n\n  auto descriptor_statuses = {\n      Envoy::RateLimit::buildDescriptorStatus(\n          1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, \"first\", 2, 3),\n      Envoy::RateLimit::buildDescriptorStatus(\n          4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, \"second\", 5, 6)};\n  auto descriptor_statuses_ptr =\n      std::make_unique<Filters::Common::RateLimit::DescriptorStatusList>(descriptor_statuses);\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit,\n                               std::move(descriptor_statuses_ptr), nullptr, nullptr);\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromStatName(ratelimit_over_limit_)\n                    .value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.http_filter_enforcing\", 100))\n      .WillOnce(Return(false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()};\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr,\n                               std::move(h), nullptr);\n\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(1U, filter_callbacks_.clusterInfo()\n                    ->statsScope()\n                    .counterFromStatName(ratelimit_over_limit_)\n                    .value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value());\n  EXPECT_EQ(\n      1U,\n      filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, ResetDuringCall) {\n  SetUpTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_->decodeHeaders(request_headers_, false));\n\n  EXPECT_CALL(*client_, cancel());\n  filter_->onDestroy();\n}\n\nTEST_F(HttpRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) {\n  route_rate_limit_.disable_key_ = \"test_key\";\n  SetUpTest(filter_config_);\n\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.test_key.http_filter_enabled\", 100))\n      .WillByDefault(Return(false));\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0);\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, VirtualHostRateLimitDisabledForRouteKey) {\n  vh_rate_limit_.disable_key_ = \"test_vh_key\";\n  SetUpTest(filter_config_);\n\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.test_vh_key.http_filter_enabled\", 100))\n      .WillByDefault(Return(false));\n\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0);\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, IncorrectRequestType) {\n  std::string internal_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\",\n    \"request_type\" : \"internal\"\n  }\n  )EOF\";\n  SetUpTest(internal_filter_config);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0);\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0);\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  std::string external_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\",\n    \"request_type\" : \"external\"\n  }\n  )EOF\";\n  SetUpTest(external_filter_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"}};\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0);\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0);\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n}\n\nTEST_F(HttpRateLimitFilterTest, InternalRequestType) {\n  std::string internal_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\",\n    \"request_type\" : \"internal\"\n  }\n  )EOF\";\n  SetUpTest(internal_filter_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"true\"}};\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, ExternalRequestType) {\n\n  std::string external_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\",\n    \"request_type\" : \"external\"\n  }\n  )EOF\";\n  SetUpTest(external_filter_config);\n  Http::TestRequestHeaderMapImpl request_headers{{\"x-envoy-internal\", \"false\"}};\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, DEPRECATED_FEATURE_TEST(ExcludeVirtualHost)) {\n  std::string external_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\"\n  }\n  )EOF\";\n  SetUpTest(external_filter_config);\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute vh_settings;\n  vh_settings.clear_vh_rate_limits();\n  FilterConfigPerRoute per_route_config_(vh_settings);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0));\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().RateLimit))\n      .WillOnce(Return(&per_route_config_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(0);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\n// Tests that the route rate limit is used when VhRateLimitsOptions::OVERRIDE and route rate limit\n// is set\nTEST_F(HttpRateLimitFilterTest, OverrideVHRateLimitOptionWithRouteRateLimitSet) {\n  SetUpTest(filter_config_);\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings;\n  settings.set_vh_rate_limits(\n      envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::OVERRIDE);\n  FilterConfigPerRoute per_route_config_(settings);\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0));\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().RateLimit))\n      .WillOnce(Return(&per_route_config_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(0);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\n// Tests that the virtual host rate limit is used when VhRateLimitsOptions::OVERRIDE is set and\n// route rate limit is empty\nTEST_F(HttpRateLimitFilterTest, OverrideVHRateLimitOptionWithoutRouteRateLimit) {\n  SetUpTest(filter_config_);\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings;\n  settings.set_vh_rate_limits(\n      envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::OVERRIDE);\n  FilterConfigPerRoute per_route_config_(settings);\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().RateLimit))\n      .WillOnce(Return(&per_route_config_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty())\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\n// Tests that the virtual host rate limit is used when VhRateLimitsOptions::INCLUDE is set and route\n// rate limit is empty\nTEST_F(HttpRateLimitFilterTest, IncludeVHRateLimitOptionWithOnlyVHRateLimitSet) {\n  SetUpTest(filter_config_);\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings;\n  settings.set_vh_rate_limits(\n      envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::INCLUDE);\n  FilterConfigPerRoute per_route_config_(settings);\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().RateLimit))\n      .WillOnce(Return(&per_route_config_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_two_));\n\n  EXPECT_CALL(*client_,\n              limit(_, \"foo\",\n                    testing::ContainerEq(std::vector<RateLimit::Descriptor>{{{{\"key\", \"value\"}}}}),\n                    _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\n// Tests that the virtual host rate limit is used when VhRateLimitsOptions::INCLUDE and route rate\n// limit is set\nTEST_F(HttpRateLimitFilterTest, IncludeVHRateLimitOptionWithRouteAndVHRateLimitSet) {\n  SetUpTest(filter_config_);\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings;\n  settings.set_vh_rate_limits(\n      envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::INCLUDE);\n  FilterConfigPerRoute per_route_config_(settings);\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0));\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().RateLimit))\n      .WillOnce(Return(&per_route_config_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_two_));\n\n  EXPECT_CALL(*client_,\n              limit(_, \"foo\",\n                    testing::ContainerEq(std::vector<RateLimit::Descriptor>{{{{\"key\", \"value\"}}}}),\n                    _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\n// Tests that the route rate limit is used when VhRateLimitsOptions::IGNORE and route rate limit is\n// set\nTEST_F(HttpRateLimitFilterTest, IgnoreVHRateLimitOptionWithRouteRateLimitSet) {\n  SetUpTest(filter_config_);\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings;\n  settings.set_vh_rate_limits(\n      envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::IGNORE);\n  FilterConfigPerRoute per_route_config_(settings);\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0));\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().RateLimit))\n      .WillOnce(Return(&per_route_config_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(0);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\n// Tests that no rate limit is used when VhRateLimitsOptions::IGNORE is set and route rate limit\n// empty\nTEST_F(HttpRateLimitFilterTest, IgnoreVHRateLimitOptionWithOutRouteRateLimit) {\n  SetUpTest(filter_config_);\n  envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings;\n  settings.set_vh_rate_limits(\n      envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::IGNORE);\n  FilterConfigPerRoute per_route_config_(settings);\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits())\n      .WillOnce(Return(false));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_,\n              perFilterConfig(HttpFilterNames::get().RateLimit))\n      .WillOnce(Return(&per_route_config_));\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_,\n              getApplicableRateLimit(0))\n      .Times(0);\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_));\n\n  EXPECT_EQ(\n      0, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value());\n}\n\nTEST_F(HttpRateLimitFilterTest, ConfigValueTest) {\n  std::string stage_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\",\n    \"stage\": 5,\n    \"request_type\" : \"internal\"\n  }\n  )EOF\";\n\n  SetUpTest(stage_filter_config);\n\n  EXPECT_EQ(5UL, config_->stage());\n  EXPECT_EQ(\"foo\", config_->domain());\n  EXPECT_EQ(FilterRequestType::Internal, config_->requestType());\n}\n\nTEST_F(HttpRateLimitFilterTest, DefaultConfigValueTest) {\n  std::string stage_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\"\n  }\n  )EOF\";\n\n  SetUpTest(stage_filter_config);\n\n  EXPECT_EQ(0UL, config_->stage());\n  EXPECT_EQ(\"foo\", config_->domain());\n  EXPECT_EQ(FilterRequestType::Both, config_->requestType());\n}\n\n} // namespace\n} // namespace RateLimitFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/rbac/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_mock\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/http/rbac:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"rbac_filter_test\",\n    srcs = [\"rbac_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/common/rbac:utility_lib\",\n        \"//source/extensions/filters/http/rbac:rbac_filter_lib\",\n        \"//test/extensions/filters/common/rbac:engine_mocks\",\n        \"//test/extensions/filters/http/rbac:route_config_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"rbac_filter_integration_test\",\n    srcs = [\"rbac_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/http/rbac:config\",\n        \"//test/config:utility_lib\",\n        \"//test/integration:http_protocol_integration_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_mock(\n    name = \"route_config_mocks\",\n    hdrs = [\"mocks.h\"],\n    extension_name = \"envoy.filters.http.rbac\",\n    deps = [\n        \"//source/extensions/filters/common/rbac:utility_lib\",\n        \"//source/extensions/filters/http/rbac:rbac_filter_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/rbac/config_test.cc",
    "content": "#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.h\"\n\n#include \"extensions/filters/common/rbac/engine.h\"\n#include \"extensions/filters/http/rbac/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RBACFilter {\nnamespace {\n\nTEST(RoleBasedAccessControlFilterConfigFactoryTest, ValidProto) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  envoy::extensions::filters::http::rbac::v3::RBAC config;\n  (*config.mutable_rules()->mutable_policies())[\"foo\"] = policy;\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RoleBasedAccessControlFilterConfigFactory factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callbacks;\n  EXPECT_CALL(filter_callbacks, addStreamDecoderFilter(_));\n  cb(filter_callbacks);\n}\n\nTEST(RoleBasedAccessControlFilterConfigFactoryTest, EmptyProto) {\n  RoleBasedAccessControlFilterConfigFactory factory;\n  EXPECT_NE(nullptr, dynamic_cast<envoy::extensions::filters::http::rbac::v3::RBAC*>(\n                         factory.createEmptyConfigProto().get()));\n}\n\nTEST(RoleBasedAccessControlFilterConfigFactoryTest, EmptyRouteProto) {\n  RoleBasedAccessControlFilterConfigFactory factory;\n  EXPECT_NE(nullptr, dynamic_cast<envoy::extensions::filters::http::rbac::v3::RBACPerRoute*>(\n                         factory.createEmptyRouteConfigProto().get()));\n}\n\nTEST(RoleBasedAccessControlFilterConfigFactoryTest, RouteSpecificConfig) {\n  RoleBasedAccessControlFilterConfigFactory factory;\n  NiceMock<Server::Configuration::MockServerFactoryContext> context;\n\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();\n  EXPECT_TRUE(proto_config.get());\n\n  Router::RouteSpecificFilterConfigConstSharedPtr route_config =\n      factory.createRouteSpecificFilterConfig(*proto_config, context,\n                                              ProtobufMessage::getNullValidationVisitor());\n  EXPECT_TRUE(route_config.get());\n}\n\n} // namespace\n} // namespace RBACFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/rbac/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n\n#include \"extensions/filters/common/rbac/utility.h\"\n#include \"extensions/filters/http/rbac/rbac_filter.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RBACFilter {\nnamespace {\n\nclass MockRoleBasedAccessControlRouteSpecificFilterConfig\n    : public RoleBasedAccessControlRouteSpecificFilterConfig {\npublic:\n  MockRoleBasedAccessControlRouteSpecificFilterConfig(\n      const envoy::extensions::filters::http::rbac::v3::RBACPerRoute& r)\n      : RoleBasedAccessControlRouteSpecificFilterConfig(r){};\n\n  MOCK_METHOD(Filters::Common::RBAC::RoleBasedAccessControlEngineImpl&, engine, (), (const));\n};\n\n} // namespace\n} // namespace RBACFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/rbac/rbac_filter_integration_test.cc",
    "content": "#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/integration/http_protocol_integration.h\"\n\nnamespace Envoy {\nnamespace {\n\nconst std::string RBAC_CONFIG = R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC\n  rules:\n    policies:\n      foo:\n        permissions:\n          - header: { name: \":method\", exact_match: \"GET\" }\n        principals:\n          - any: true\n)EOF\";\n\nconst std::string RBAC_CONFIG_WITH_DENY_ACTION = R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC\n  rules:\n    action: DENY\n    policies:\n      \"deny policy\":\n        permissions:\n          - header: { name: \":method\", exact_match: \"GET\" }\n        principals:\n          - any: true\n)EOF\";\n\nconst std::string RBAC_CONFIG_WITH_PREFIX_MATCH = R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC\n  rules:\n    policies:\n      foo:\n        permissions:\n          - header: { name: \":path\", prefix_match: \"/foo\" }\n        principals:\n          - any: true\n)EOF\";\n\nconst std::string RBAC_CONFIG_WITH_PATH_EXACT_MATCH = R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC\n  rules:\n    policies:\n      foo:\n        permissions:\n          - url_path:\n              path: { exact: \"/allow\" }\n        principals:\n          - any: true\n)EOF\";\n\nconst std::string RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH = R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC\n  rules:\n    policies:\n      foo:\n        permissions:\n          - url_path:\n              path: { exact: \"/ignore_case\", ignore_case: true }\n        principals:\n          - any: true\n)EOF\";\n\nconst std::string RBAC_CONFIG_WITH_LOG_ACTION = R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC\n  rules:\n    action: LOG\n    policies:\n      foo:\n        permissions:\n          - header: { name: \":method\", exact_match: \"GET\" }\n        principals:\n          - any: true\n)EOF\";\n\nconst std::string RBAC_CONFIG_HEADER_MATCH_CONDITION = R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC\n  rules:\n    policies:\n      foo:\n        permissions:\n          - any: true\n        principals:\n          - any: true\n        condition:\n          call_expr:\n            function: _==_\n            args:\n            - select_expr:\n                operand:\n                  select_expr:\n                    operand:\n                      ident_expr:\n                        name: request\n                    field: headers\n                field: xxx\n            - const_expr:\n               string_value: {}\n)EOF\";\n\nusing RBACIntegrationTest = HttpProtocolIntegrationTest;\n\nINSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(RBACIntegrationTest, Allowed) {\n  useAccessLog(\"%RESPONSE_CODE_DETAILS%\");\n  config_helper_.addFilter(RBAC_CONFIG);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr(\"via_upstream\"));\n}\n\nTEST_P(RBACIntegrationTest, Denied) {\n  useAccessLog(\"%RESPONSE_CODE_DETAILS%\");\n  config_helper_.addFilter(RBAC_CONFIG);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n  EXPECT_THAT(waitForAccessLog(access_log_name_),\n              testing::HasSubstr(\"rbac_access_denied_matched_policy[none]\"));\n}\n\nTEST_P(RBACIntegrationTest, DeniedWithDenyAction) {\n  useAccessLog(\"%RESPONSE_CODE_DETAILS%\");\n  config_helper_.addFilter(RBAC_CONFIG_WITH_DENY_ACTION);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n  // Note the whitespace in the policy id is replaced by '_'.\n  EXPECT_THAT(waitForAccessLog(access_log_name_),\n              testing::HasSubstr(\"rbac_access_denied_matched_policy[deny_policy]\"));\n}\n\nTEST_P(RBACIntegrationTest, DeniedWithPrefixRule) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             cfg) { cfg.mutable_normalize_path()->set_value(false); });\n  config_helper_.addFilter(RBAC_CONFIG_WITH_PREFIX_MATCH);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/foo/../bar\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             cfg) { cfg.mutable_normalize_path()->set_value(true); });\n  config_helper_.addFilter(RBAC_CONFIG_WITH_PREFIX_MATCH);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/foo/../bar\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n}\n\nTEST_P(RBACIntegrationTest, DeniedHeadReply) {\n  config_helper_.addFilter(RBAC_CONFIG);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"HEAD\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n  ASSERT_TRUE(response->headers().ContentLength());\n  EXPECT_NE(\"0\", response->headers().getContentLengthValue());\n  EXPECT_THAT(response->body(), ::testing::IsEmpty());\n}\n\nTEST_P(RBACIntegrationTest, RouteOverride) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             cfg) {\n        envoy::extensions::filters::http::rbac::v3::RBACPerRoute per_route_config;\n        TestUtility::loadFromJson(\"{}\", per_route_config);\n\n        auto* config = cfg.mutable_route_config()\n                           ->mutable_virtual_hosts()\n                           ->Mutable(0)\n                           ->mutable_typed_per_filter_config();\n\n        (*config)[Extensions::HttpFilters::HttpFilterNames::get().Rbac].PackFrom(per_route_config);\n      });\n  config_helper_.addFilter(RBAC_CONFIG);\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(RBACIntegrationTest, PathWithQueryAndFragment) {\n  config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  const std::vector<std::string> paths{\"/allow\", \"/allow?p1=v1&p2=v2\", \"/allow?p1=v1#seg\"};\n\n  for (const auto& path : paths) {\n    auto response = codec_client_->makeRequestWithBody(\n        Http::TestRequestHeaderMapImpl{\n            {\":method\", \"POST\"},\n            {\":path\", path},\n            {\":scheme\", \"http\"},\n            {\":authority\", \"host\"},\n            {\"x-forwarded-for\", \"10.0.0.1\"},\n        },\n        1024);\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n\nTEST_P(RBACIntegrationTest, PathIgnoreCase) {\n  config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  const std::vector<std::string> paths{\"/ignore_case\", \"/IGNORE_CASE\", \"/ignore_CASE\"};\n\n  for (const auto& path : paths) {\n    auto response = codec_client_->makeRequestWithBody(\n        Http::TestRequestHeaderMapImpl{\n            {\":method\", \"POST\"},\n            {\":path\", path},\n            {\":scheme\", \"http\"},\n            {\":authority\", \"host\"},\n            {\"x-forwarded-for\", \"10.0.0.1\"},\n        },\n        1024);\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n\nTEST_P(RBACIntegrationTest, LogConnectionAllow) {\n  config_helper_.addFilter(RBAC_CONFIG_WITH_LOG_ACTION);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"x-forwarded-for\", \"10.0.0.1\"},\n      },\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// Basic CEL match on a header value.\nTEST_P(RBACIntegrationTest, HeaderMatchCondition) {\n  config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, \"yyy\"));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/path\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"xxx\", \"yyy\"},\n      },\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// CEL match on a header value in which the header is a duplicate. Verifies we handle string\n// copying correctly inside the CEL expression.\nTEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderNoMatch) {\n  config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, \"yyy\"));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/path\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"xxx\", \"yyy\"},\n          {\"xxx\", \"zzz\"},\n      },\n      1024);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n}\n\n// CEL match on a header value in which the header is a duplicate. Verifies we handle string\n// copying correctly inside the CEL expression.\nTEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderMatch) {\n  config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, \"yyy,zzz\"));\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"POST\"},\n          {\":path\", \"/path\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"host\"},\n          {\"xxx\", \"yyy\"},\n          {\"xxx\", \"zzz\"},\n      },\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/rbac/rbac_filter_test.cc",
    "content": "#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/http/rbac/v3/rbac.pb.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/common/rbac/utility.h\"\n#include \"extensions/filters/http/rbac/rbac_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#include \"test/extensions/filters/common/rbac/mocks.h\"\n#include \"test/extensions/filters/http/rbac/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RBACFilter {\nnamespace {\n\nenum class LogResult { Yes, No, Undecided };\n\nclass RoleBasedAccessControlFilterTest : public testing::Test {\npublic:\n  RoleBasedAccessControlFilterConfigSharedPtr\n  setupConfig(envoy::config::rbac::v3::RBAC::Action action) {\n    envoy::extensions::filters::http::rbac::v3::RBAC config;\n\n    envoy::config::rbac::v3::Policy policy;\n    auto policy_rules = policy.add_permissions()->mutable_or_rules();\n    policy_rules->add_rules()->mutable_requested_server_name()->set_hidden_envoy_deprecated_regex(\n        \".*cncf.io\");\n    policy_rules->add_rules()->set_destination_port(123);\n    policy_rules->add_rules()->mutable_url_path()->mutable_path()->set_suffix(\"suffix\");\n    policy.add_principals()->set_any(true);\n    config.mutable_rules()->set_action(action);\n    (*config.mutable_rules()->mutable_policies())[\"foo\"] = policy;\n\n    envoy::config::rbac::v3::Policy shadow_policy;\n    auto shadow_policy_rules = shadow_policy.add_permissions()->mutable_or_rules();\n    shadow_policy_rules->add_rules()->mutable_requested_server_name()->set_exact(\"xyz.cncf.io\");\n    shadow_policy_rules->add_rules()->set_destination_port(456);\n    shadow_policy.add_principals()->set_any(true);\n    config.mutable_shadow_rules()->set_action(action);\n    (*config.mutable_shadow_rules()->mutable_policies())[\"bar\"] = shadow_policy;\n\n    return std::make_shared<RoleBasedAccessControlFilterConfig>(config, \"test\", store_);\n  }\n\n  RoleBasedAccessControlFilterTest()\n      : config_(setupConfig(envoy::config::rbac::v3::RBAC::ALLOW)), filter_(config_) {}\n\n  void SetUp() override {\n    EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_));\n    EXPECT_CALL(callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_));\n    filter_.setDecoderFilterCallbacks(callbacks_);\n  }\n\n  void setDestinationPort(uint16_t port) {\n    address_ = Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", port, false);\n    ON_CALL(req_info_, downstreamLocalAddress()).WillByDefault(ReturnRef(address_));\n  }\n\n  void setRequestedServerName(std::string server_name) {\n    requested_server_name_ = server_name;\n    ON_CALL(connection_, requestedServerName()).WillByDefault(Return(requested_server_name_));\n  }\n\n  void checkAccessLogMetadata(LogResult expected) {\n    if (expected != LogResult::Undecided) {\n      auto filter_meta = req_info_.dynamicMetadata().filter_metadata().at(\n          Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace);\n      EXPECT_EQ(expected == LogResult::Yes,\n                filter_meta.fields()\n                    .at(Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey)\n                    .bool_value());\n    } else {\n      EXPECT_EQ(req_info_.dynamicMetadata().filter_metadata().end(),\n                req_info_.dynamicMetadata().filter_metadata().find(\n                    Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace));\n    }\n  }\n\n  void setMetadata() {\n    ON_CALL(req_info_, setDynamicMetadata(HttpFilterNames::get().Rbac, _))\n        .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) {\n          req_info_.metadata_.mutable_filter_metadata()->insert(\n              Protobuf::MapPair<std::string, ProtobufWkt::Struct>(HttpFilterNames::get().Rbac,\n                                                                  obj));\n        }));\n\n    ON_CALL(req_info_,\n            setDynamicMetadata(\n                Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, _))\n        .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) {\n          req_info_.metadata_.mutable_filter_metadata()->insert(\n              Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n                  Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, obj));\n        }));\n  }\n\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  NiceMock<Network::MockConnection> connection_{};\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> req_info_;\n  Stats::IsolatedStoreImpl store_;\n  RoleBasedAccessControlFilterConfigSharedPtr config_;\n  RoleBasedAccessControlFilter filter_;\n\n  Network::Address::InstanceConstSharedPtr address_;\n  std::string requested_server_name_;\n  Http::TestRequestHeaderMapImpl headers_;\n  Http::TestRequestTrailerMapImpl trailers_;\n};\n\nTEST_F(RoleBasedAccessControlFilterTest, Allowed) {\n  setDestinationPort(123);\n  setMetadata();\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false));\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(1U, config_->stats().shadow_denied_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_));\n\n  checkAccessLogMetadata(LogResult::Undecided);\n}\n\nTEST_F(RoleBasedAccessControlFilterTest, RequestedServerName) {\n  setDestinationPort(999);\n  setRequestedServerName(\"www.cncf.io\");\n  setMetadata();\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().denied_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_allowed_.value());\n  EXPECT_EQ(1U, config_->stats().shadow_denied_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_));\n\n  checkAccessLogMetadata(LogResult::Undecided);\n}\n\nTEST_F(RoleBasedAccessControlFilterTest, Path) {\n  setDestinationPort(999);\n  setMetadata();\n\n  auto headers = Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"},\n      {\":path\", \"/suffix#seg?param=value\"},\n      {\":scheme\", \"http\"},\n      {\":authority\", \"host\"},\n  };\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false));\n  checkAccessLogMetadata(LogResult::Undecided);\n}\n\nTEST_F(RoleBasedAccessControlFilterTest, Denied) {\n  setDestinationPort(456);\n  setMetadata();\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"403\"},\n      {\"content-length\", \"19\"},\n      {\"content-type\", \"text/plain\"},\n  };\n  EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));\n  EXPECT_CALL(callbacks_, encodeData(_, true));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers_, true));\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  EXPECT_EQ(1U, config_->stats().shadow_allowed_.value());\n\n  auto filter_meta = req_info_.dynamicMetadata().filter_metadata().at(HttpFilterNames::get().Rbac);\n  EXPECT_EQ(\"allowed\", filter_meta.fields().at(\"shadow_engine_result\").string_value());\n  EXPECT_EQ(\"bar\", filter_meta.fields().at(\"shadow_effective_policy_id\").string_value());\n  EXPECT_EQ(\"rbac_access_denied_matched_policy[none]\", callbacks_.details());\n  checkAccessLogMetadata(LogResult::Undecided);\n}\n\nTEST_F(RoleBasedAccessControlFilterTest, RouteLocalOverride) {\n  setDestinationPort(456);\n  setMetadata();\n\n  envoy::extensions::filters::http::rbac::v3::RBACPerRoute route_config;\n  route_config.mutable_rbac()->mutable_rules()->set_action(envoy::config::rbac::v3::RBAC::DENY);\n  NiceMock<Filters::Common::RBAC::MockEngine> engine{route_config.rbac().rules()};\n  NiceMock<MockRoleBasedAccessControlRouteSpecificFilterConfig> per_route_config_{route_config};\n\n  EXPECT_CALL(engine, handleAction(_, _, _, _)).WillRepeatedly(Return(true));\n  EXPECT_CALL(per_route_config_, engine()).WillRepeatedly(ReturnRef(engine));\n\n  EXPECT_CALL(callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Rbac))\n      .WillRepeatedly(Return(&per_route_config_));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, true));\n  checkAccessLogMetadata(LogResult::Undecided);\n}\n\n// Log Tests\nTEST_F(RoleBasedAccessControlFilterTest, ShouldLog) {\n  config_ = setupConfig(envoy::config::rbac::v3::RBAC::LOG);\n  filter_ = RoleBasedAccessControlFilter(config_);\n  filter_.setDecoderFilterCallbacks(callbacks_);\n\n  setDestinationPort(123);\n  setMetadata();\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_denied_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_));\n\n  checkAccessLogMetadata(LogResult::Yes);\n}\n\nTEST_F(RoleBasedAccessControlFilterTest, ShouldNotLog) {\n  config_ = setupConfig(envoy::config::rbac::v3::RBAC::LOG);\n  filter_ = RoleBasedAccessControlFilter(config_);\n  filter_.setDecoderFilterCallbacks(callbacks_);\n\n  setDestinationPort(456);\n  setMetadata();\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_denied_.value());\n\n  Buffer::OwnedImpl data(\"\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_));\n\n  checkAccessLogMetadata(LogResult::No);\n}\n\n} // namespace\n} // namespace RBACFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.router\",\n    deps = [\n        \"//source/extensions/filters/http/router:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"auto_sni_integration_test\",\n    srcs = [\"auto_sni_integration_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    extension_name = \"envoy.filters.http.router\",\n    deps = [\n        \"//source/extensions/filters/http/router:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/router/auto_sni_integration_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/integration/http_integration.h\"\n\nnamespace Envoy {\nnamespace {\nclass AutoSniIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                               public Event::TestUsingSimulatedTime,\n                               public HttpIntegrationTest {\npublic:\n  AutoSniIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void setup() {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP1);\n\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto& cluster_config = bootstrap.mutable_static_resources()->mutable_clusters()->at(0);\n      cluster_config.mutable_upstream_http_protocol_options()->set_auto_sni(true);\n\n      envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n      auto* validation_context =\n          tls_context.mutable_common_tls_context()->mutable_validation_context();\n      validation_context->mutable_trusted_ca()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n      cluster_config.mutable_transport_socket()->set_name(\"envoy.transport_sockets.tls\");\n      cluster_config.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context);\n    });\n\n    HttpIntegrationTest::initialize();\n  }\n\n  void createUpstreams() override {\n    addFakeUpstream(createUpstreamSslContext(), FakeHttpConnection::Type::HTTP1);\n  }\n\n  Network::TransportSocketFactoryPtr createUpstreamSslContext() {\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    auto* common_tls_context = tls_context.mutable_common_tls_context();\n    auto* tls_cert = common_tls_context->add_tls_certificates();\n    tls_cert->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcert.pem\"));\n    tls_cert->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamkey.pem\"));\n\n    auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n        tls_context, factory_context_);\n\n    static Stats::Scope* upstream_stats_store = new Stats::IsolatedStoreImpl();\n    return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n        std::move(cfg), context_manager_, *upstream_stats_store, std::vector<std::string>{});\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AutoSniIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AutoSniIntegrationTest, BasicAutoSniTest) {\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const auto response_ = sendRequestAndWaitForResponse(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"localhost\"}},\n      0, default_response_headers_, 0);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response_->complete());\n\n  const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const Extensions::TransportSockets::Tls::SslHandshakerImpl*>(\n          fake_upstream_connection_->connection().ssl().get());\n  EXPECT_STREQ(\"localhost\", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name));\n}\n\nTEST_P(AutoSniIntegrationTest, PassingNotDNS) {\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const auto response_ = sendRequestAndWaitForResponse(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"127.0.0.1\"}},\n      0, default_response_headers_, 0);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response_->complete());\n\n  const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const Extensions::TransportSockets::Tls::SslHandshakerImpl*>(\n          fake_upstream_connection_->connection().ssl().get());\n  EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name));\n}\n\nTEST_P(AutoSniIntegrationTest, PassingHostWithoutPort) {\n  setup();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  const auto response_ = sendRequestAndWaitForResponse(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"example.com:8080\"}},\n      0, default_response_headers_, 0);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response_->complete());\n\n  const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const Extensions::TransportSockets::Tls::SslHandshakerImpl*>(\n          fake_upstream_connection_->connection().ssl().get());\n  EXPECT_STREQ(\"example.com\", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name));\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/router/config_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/http/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/http/router/v3/router.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/router/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace RouterFilter {\nnamespace {\n\nTEST(RouterFilterConfigTest, SimpleRouterFilterConfig) {\n  const std::string yaml_string = R\"EOF(\n  dynamic_stats: true\n  start_child_span: true\n  )EOF\";\n\n  envoy::extensions::filters::http::router::v3::Router proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config, false, true);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RouterFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats.\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\nTEST(RouterFilterConfigTest, BadRouterFilterConfig) {\n  const std::string yaml_string = R\"EOF(\n  dynamic_stats: true\n  route: {}\n  )EOF\";\n\n  envoy::extensions::filters::http::router::v3::Router proto_config;\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config, false, true),\n                          EnvoyException, \"route: Cannot find field\");\n}\n\nTEST(RouterFilterConfigTest, RouterFilterWithUnsupportedStrictHeaderCheck) {\n  const std::string yaml = R\"EOF(\n  strict_check_headers:\n  - unsupportedHeader\n  )EOF\";\n\n  envoy::extensions::filters::http::router::v3::Router router_config;\n  TestUtility::loadFromYaml(yaml, router_config, false, true);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RouterFilterConfig factory;\n  EXPECT_THROW_WITH_MESSAGE(\n      factory.createFilterFactoryFromProto(router_config, \"stats.\", context),\n      ProtoValidationException,\n      \"Proto constraint validation failed (RouterValidationError.StrictCheckHeaders[i]: \"\n      \"[\\\"value must be in list \\\" [\"\n      \"\\\"x-envoy-upstream-rq-timeout-ms\\\" \"\n      \"\\\"x-envoy-upstream-rq-per-try-timeout-ms\\\" \"\n      \"\\\"x-envoy-max-retries\\\" \"\n      \"\\\"x-envoy-retry-grpc-on\\\" \"\n      \"\\\"x-envoy-retry-on\\\"\"\n      \"]]): strict_check_headers: \\\"unsupportedHeader\\\"\\n\");\n}\n\nTEST(RouterFilterConfigTest, RouterV2Filter) {\n  envoy::extensions::filters::http::router::v3::Router router_config;\n  router_config.mutable_dynamic_stats()->set_value(true);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RouterFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(router_config, \"stats.\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)).Times(1);\n  cb(filter_callback);\n}\n\nTEST(RouterFilterConfigTest, RouterFilterWithEmptyProtoConfig) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RouterFilterConfig factory;\n  Http::FilterFactoryCb cb =\n      factory.createFilterFactoryFromProto(*factory.createEmptyConfigProto(), \"stats.\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)).Times(1);\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(RouterFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.router\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace RouterFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/squash/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"squash_filter_test\",\n    srcs = [\"squash_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.squash\",\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/http/squash:squash_filter_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"squash_filter_integration_test\",\n    srcs = [\"squash_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.squash\",\n    deps = [\n        \"//source/extensions/filters/http/squash:config\",\n        \"//test/integration:http_integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.http.squash\",\n    deps = [\n        \"//source/extensions/filters/http/squash:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/squash/config_test.cc",
    "content": "#include \"envoy/extensions/filters/http/squash/v3/squash.pb.h\"\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.validate.h\"\n\n#include \"extensions/filters/http/squash/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Squash {\nnamespace {\n\nTEST(SquashFilterConfigFactoryTest, SquashFilterCorrectYaml) {\n  const std::string yaml_string = R\"EOF(\n  cluster: fake_cluster\n  attachment_template:\n    a: b\n  request_timeout: 1.001s\n  attachment_poll_period: 2.002s\n  attachment_timeout: 3.003s\n  )EOF\";\n\n  envoy::extensions::filters::http::squash::v3::Squash proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SquashFilterConfigFactory factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamDecoderFilter(_));\n  cb(filter_callback);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(SquashFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.squash\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace\n} // namespace Squash\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/squash/squash_filter_integration_test.cc",
    "content": "#include <cstdlib>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/environment.h\"\n\n#define ENV_VAR_VALUE \"somerandomevalue\"\n\nusing Envoy::Protobuf::util::MessageDifferencer;\n\nnamespace Envoy {\n\nclass SquashFilterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                    public HttpIntegrationTest {\npublic:\n  SquashFilterIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  ~SquashFilterIntegrationTest() override {\n    if (fake_squash_connection_) {\n      AssertionResult result = fake_squash_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_squash_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n  }\n\n  FakeStreamPtr sendSquash(const std::string& status, const std::string& body) {\n\n    if (!fake_squash_connection_) {\n      AssertionResult result =\n          fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_squash_connection_);\n      RELEASE_ASSERT(result, result.message());\n    }\n\n    FakeStreamPtr request_stream;\n    AssertionResult result =\n        fake_squash_connection_->waitForNewStream(*dispatcher_, request_stream);\n    RELEASE_ASSERT(result, result.message());\n    result = request_stream->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n    if (body.empty()) {\n      request_stream->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", status}}, true);\n    } else {\n      request_stream->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", status}}, false);\n      Buffer::OwnedImpl responseBuffer(body);\n      request_stream->encodeData(responseBuffer, true);\n    }\n\n    return request_stream;\n  }\n\n  FakeStreamPtr sendSquashCreate(const std::string& body = SQUASH_CREATE_DEFAULT) {\n    return sendSquash(\"201\", body);\n  }\n\n  FakeStreamPtr sendSquashOk(const std::string& body) { return sendSquash(\"200\", body); }\n\n  IntegrationStreamDecoderPtr sendDebugRequest(IntegrationCodecClientPtr& codec_client) {\n    Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                           {\":authority\", \"www.solo.io\"},\n                                           {\"x-squash-debug\", \"true\"},\n                                           {\":path\", \"/getsomething\"}};\n    return codec_client->makeHeaderOnlyRequest(headers);\n  }\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  /**\n   * Initializer for an individual integration test.\n   */\n  void initialize() override {\n    TestEnvironment::setEnvVar(\"SQUASH_ENV_TEST\", ENV_VAR_VALUE, 1);\n\n    autonomous_upstream_ = true;\n\n    config_helper_.addFilter(ConfigHelper::defaultSquashFilter());\n\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* squash_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      squash_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      squash_cluster->set_name(\"squash\");\n      squash_cluster->mutable_http2_protocol_options();\n    });\n\n    HttpIntegrationTest::initialize();\n    codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  }\n\n  /**\n   * Initialize before every test.\n   */\n  void SetUp() override { initialize(); }\n\n  FakeHttpConnectionPtr fake_squash_connection_;\n  static const std::string SQUASH_CREATE_DEFAULT;\n  static std::string squashGetAttachmentBodyWithState(const std::string& state) {\n    return \"{\\\"metadata\\\":{\\\"name\\\":\\\"oF8iVdiJs5\\\"},\\\"spec\\\":{\"\n           \"\\\"attachment\\\":{\\\"a\\\":\\\"b\\\"},\\\"image\\\":\\\"debug\\\",\\\"node\\\":\"\n           \"\\\"debug-node\\\"},\\\"status\\\":{\\\"state\\\":\\\"\" +\n           state + \"\\\"}}\";\n  }\n};\n\nconst std::string SquashFilterIntegrationTest::SQUASH_CREATE_DEFAULT =\n    \"{\\\"metadata\\\":{\\\"name\\\":\\\"oF8iVdiJs5\\\"},\"\n    \"\\\"spec\\\":{\\\"attachment\\\":{\\\"a\\\":\\\"b\\\"},\"\n    \"\\\"image\\\":\\\"debug\\\",\\\"node\\\":\\\"debug-node\\\"},\"\n    \"\\\"status\\\":{\\\"state\\\":\\\"none\\\"}}\";\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SquashFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(SquashFilterIntegrationTest, TestHappyPath) {\n  auto response = sendDebugRequest(codec_client_);\n\n  // Respond to create request\n  FakeStreamPtr create_stream = sendSquashCreate();\n\n  // Respond to read attachment request\n  FakeStreamPtr get_stream = sendSquashOk(squashGetAttachmentBodyWithState(\"attached\"));\n\n  response->waitForEndStream();\n\n  EXPECT_EQ(\"POST\", create_stream->headers().getMethodValue());\n  EXPECT_EQ(\"/api/v2/debugattachment/\", create_stream->headers().getPathValue());\n  // Make sure the env var was replaced\n  ProtobufWkt::Struct actualbody;\n  TestUtility::loadFromJson(create_stream->body().toString(), actualbody);\n\n  ProtobufWkt::Struct expectedbody;\n  TestUtility::loadFromJson(\"{\\\"spec\\\": { \\\"attachment\\\" : { \\\"env\\\": \\\"\" ENV_VAR_VALUE\n                            \"\\\" } , \\\"match_request\\\":true} }\",\n                            expectedbody);\n\n  EXPECT_TRUE(MessageDifferencer::Equals(expectedbody, actualbody));\n  // The second request should be for the created object\n  EXPECT_EQ(\"GET\", get_stream->headers().getMethodValue());\n  EXPECT_EQ(\"/api/v2/debugattachment/oF8iVdiJs5\", get_stream->headers().getPathValue());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(SquashFilterIntegrationTest, ErrorAttaching) {\n  auto response = sendDebugRequest(codec_client_);\n\n  // Respond to create request\n  FakeStreamPtr create_stream = sendSquashCreate();\n  // Respond to read attachment request with error!\n  FakeStreamPtr get_stream = sendSquashOk(squashGetAttachmentBodyWithState(\"error\"));\n\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(SquashFilterIntegrationTest, TimeoutAttaching) {\n  auto response = sendDebugRequest(codec_client_);\n\n  // Respond to create request\n  FakeStreamPtr create_stream = sendSquashCreate();\n  // Respond to read attachment. since attachment_timeout is smaller than attachment_poll_period\n  // config, just one response is enough, as the filter will timeout (and continue the iteration)\n  // before issuing another get attachment request.\n  FakeStreamPtr get_stream = sendSquashOk(squashGetAttachmentBodyWithState(\"attaching\"));\n\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(SquashFilterIntegrationTest, ErrorNoSquashServer) {\n  auto response = sendDebugRequest(codec_client_);\n\n  // Don't respond to anything. squash filter should timeout within\n  // squash_request_timeout and continue the request.\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(SquashFilterIntegrationTest, BadCreateResponse) {\n  auto response = sendDebugRequest(codec_client_);\n\n  // Respond to create request\n  FakeStreamPtr create_stream = sendSquashCreate(\"not json...\");\n\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(SquashFilterIntegrationTest, BadGetResponse) {\n  auto response = sendDebugRequest(codec_client_);\n\n  // Respond to create request\n  FakeStreamPtr create_stream = sendSquashCreate();\n  // Respond to read attachment request with error!\n  FakeStreamPtr get_stream = sendSquashOk(\"not json...\");\n\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/squash/squash_filter_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/http/squash/v3/squash.pb.h\"\n\n#include \"common/http/message_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/http/squash/squash_filter.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"fmt/format.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nusing Envoy::Protobuf::util::MessageDifferencer;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Squash {\nnamespace {\n\nSquashFilterConfig constructSquashFilterConfigFromYaml(\n    const std::string& yaml, NiceMock<Envoy::Server::Configuration::MockFactoryContext>& context) {\n  envoy::extensions::filters::http::squash::v3::Squash proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  return SquashFilterConfig(proto_config, context.cluster_manager_);\n}\n\nvoid EXPECT_JSON_EQ(const std::string& expected, const std::string& actual) {\n  ProtobufWkt::Struct actualjson;\n  TestUtility::loadFromJson(actual, actualjson);\n\n  ProtobufWkt::Struct expectedjson;\n  TestUtility::loadFromJson(expected, expectedjson);\n\n  EXPECT_TRUE(MessageDifferencer::Equals(expectedjson, actualjson));\n}\n\n} // namespace\n\nTEST(SoloFilterConfigTest, V2ApiConversion) {\n  const std::string yaml = R\"EOF(\n  cluster: fake_cluster\n  attachment_template:\n    a: b\n  request_timeout: 1.001s\n  attachment_poll_period: 2.002s\n  attachment_timeout: 3.003s\n  )EOF\";\n\n  NiceMock<Envoy::Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_CALL(factory_context.cluster_manager_, get(Eq(\"fake_cluster\"))).Times(1);\n\n  const auto config = constructSquashFilterConfigFromYaml(yaml, factory_context);\n  EXPECT_EQ(\"fake_cluster\", config.clusterName());\n  EXPECT_JSON_EQ(\"{\\\"a\\\":\\\"b\\\"}\", config.attachmentJson());\n  EXPECT_EQ(std::chrono::milliseconds(1001), config.requestTimeout());\n  EXPECT_EQ(std::chrono::milliseconds(2002), config.attachmentPollPeriod());\n  EXPECT_EQ(std::chrono::milliseconds(3003), config.attachmentTimeout());\n}\n\nTEST(SoloFilterConfigTest, NoCluster) {\n  const std::string yaml = R\"EOF(\n  cluster: fake_cluster\n  attachment_template: {}\n  )EOF\";\n\n  NiceMock<Envoy::Server::Configuration::MockFactoryContext> factory_context;\n\n  EXPECT_CALL(factory_context.cluster_manager_, get(Eq(\"fake_cluster\"))).WillOnce(Return(nullptr));\n\n  EXPECT_THROW_WITH_MESSAGE(constructSquashFilterConfigFromYaml(yaml, factory_context),\n                            Envoy::EnvoyException,\n                            \"squash filter: unknown cluster 'fake_cluster' in squash config\");\n}\n\nTEST(SoloFilterConfigTest, ParsesEnvironment) {\n  const std::string yaml = R\"EOF(\n  cluster: squash\n  attachment_template:\n    a: \"{{ MISSING_ENV }}\"\n\n  )EOF\";\n  const std::string expected_json = \"{\\\"a\\\":\\\"\\\"}\";\n\n  NiceMock<Envoy::Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_CALL(factory_context.cluster_manager_, get(Eq(\"squash\"))).Times(1);\n\n  const auto config = constructSquashFilterConfigFromYaml(yaml, factory_context);\n  EXPECT_JSON_EQ(expected_json, config.attachmentJson());\n}\n\nTEST(SoloFilterConfigTest, ParsesAndEscapesEnvironment) {\n  TestEnvironment::setEnvVar(\"ESCAPE_ENV\", \"\\\"\", 1);\n\n  const std::string yaml = R\"EOF(\n  cluster: squash\n  attachment_template:\n    a: \"{{ ESCAPE_ENV }}\"\n  )EOF\";\n\n  const std::string expected_json = \"{\\\"a\\\":\\\"\\\\\\\"\\\"}\";\n\n  NiceMock<Envoy::Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_CALL(factory_context.cluster_manager_, get(Eq(\"squash\"))).Times(1);\n  const auto config = constructSquashFilterConfigFromYaml(yaml, factory_context);\n  EXPECT_JSON_EQ(expected_json, config.attachmentJson());\n}\nTEST(SoloFilterConfigTest, TwoEnvironmentVariables) {\n  TestEnvironment::setEnvVar(\"ENV1\", \"1\", 1);\n  TestEnvironment::setEnvVar(\"ENV2\", \"2\", 1);\n\n  const std::string yaml = R\"EOF(\n  cluster: squash\n  attachment_template:\n    a: \"{{ ENV1 }}-{{ ENV2 }}\"\n  )EOF\";\n\n  const std::string expected_json = \"{\\\"a\\\":\\\"1-2\\\"}\";\n\n  NiceMock<Envoy::Server::Configuration::MockFactoryContext> factory_context;\n  auto config = constructSquashFilterConfigFromYaml(yaml, factory_context);\n  EXPECT_JSON_EQ(expected_json, config.attachmentJson());\n}\n\nTEST(SoloFilterConfigTest, ParsesEnvironmentInComplexTemplate) {\n  TestEnvironment::setEnvVar(\"CONF_ENV\", \"some-config-value\", 1);\n\n  const std::string yaml = R\"EOF(\n  cluster: squash\n  attachment_template:\n    a:\n    - e: \"{{ CONF_ENV }}\"\n    - c: d\n  )EOF\";\n\n  const std::string expected_json = R\"EOF({\"a\":[{\"e\": \"some-config-value\"},{\"c\":\"d\"}]})EOF\";\n\n  NiceMock<Envoy::Server::Configuration::MockFactoryContext> factory_context;\n  EXPECT_CALL(factory_context.cluster_manager_, get(Eq(\"squash\"))).Times(1);\n  const auto config = constructSquashFilterConfigFromYaml(yaml, factory_context);\n  EXPECT_JSON_EQ(expected_json, config.attachmentJson());\n}\n\nclass SquashFilterTest : public testing::Test {\npublic:\n  SquashFilterTest() : request_(&cm_.async_client_) {}\n\nprotected:\n  void SetUp() override {}\n\n  void initFilter() {\n    envoy::extensions::filters::http::squash::v3::Squash p;\n    p.set_cluster(\"squash\");\n    config_ = std::make_shared<SquashFilterConfig>(p, factory_context_.cluster_manager_);\n\n    filter_ = std::make_shared<SquashFilter>(config_, cm_);\n    filter_->setDecoderFilterCallbacks(filter_callbacks_);\n  }\n\n  // start a downstream request marked with the squash header.\n  // note that a side effect of this is that\n  // a call to the squash server will be made.\n  // use popPendingCallback() to reply to that call.\n  void startDownstreamRequest() {\n    initFilter();\n\n    attachmentTimeout_timer_ =\n        new NiceMock<Envoy::Event::MockTimer>(&filter_callbacks_.dispatcher_);\n\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"squash\"))\n        .WillRepeatedly(ReturnRef(cm_.async_client_));\n\n    expectAsyncClientSend();\n\n    EXPECT_CALL(*attachmentTimeout_timer_, enableTimer(config_->attachmentTimeout(), _));\n\n    Envoy::Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                                  {\":authority\", \"www.solo.io\"},\n                                                  {\"x-squash-debug\", \"true\"},\n                                                  {\":path\", \"/getsomething\"}};\n    EXPECT_EQ(Envoy::Http::FilterHeadersStatus::StopIteration,\n              filter_->decodeHeaders(headers, false));\n  }\n\n  void doDownstreamRequest() {\n    startDownstreamRequest();\n\n    Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n    EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map));\n    Http::TestRequestTrailerMapImpl trailers;\n    // Complete a full request cycle\n    Envoy::Buffer::OwnedImpl buffer(\"nothing here\");\n    EXPECT_EQ(Envoy::Http::FilterDataStatus::StopIterationAndBuffer,\n              filter_->decodeData(buffer, false));\n    EXPECT_EQ(Envoy::Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(trailers));\n  }\n\n  void expectAsyncClientSend() {\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(Invoke(\n            [&](Envoy::Http::RequestMessagePtr&, Envoy::Http::AsyncClient::Callbacks& cb,\n                const Http::AsyncClient::RequestOptions&) -> Envoy::Http::AsyncClient::Request* {\n              callbacks_.push_back(&cb);\n              return &request_;\n            }));\n  }\n\n  void completeRequest(const std::string& status, const std::string& body) {\n    Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", status}}}));\n    msg->body().add(body);\n    popPendingCallback()->onSuccess(request_, std::move(msg));\n  }\n\n  void completeCreateRequest() {\n    // return the create request\n    completeRequest(\"201\", R\"EOF({\"metadata\":{\"name\":\"a\"}})EOF\");\n  }\n\n  void completeGetStatusRequest(const std::string& status) {\n    completeRequest(\"200\", fmt::format(R\"EOF({{\"status\":{{\"state\":\"{}\"}}}})EOF\", status));\n  }\n\n  Envoy::Http::AsyncClient::Callbacks* popPendingCallback() {\n    if (callbacks_.empty()) {\n      // Can't use ASSERT_* as this is not a test function\n      throw std::underflow_error(\"empty deque\");\n    }\n\n    auto callbacks = callbacks_.front();\n    callbacks_.pop_front();\n    return callbacks;\n  }\n\n  NiceMock<Envoy::Http::MockStreamDecoderFilterCallbacks> filter_callbacks_;\n  NiceMock<Envoy::Server::Configuration::MockFactoryContext> factory_context_;\n  NiceMock<Envoy::Event::MockTimer>* attachmentTimeout_timer_{};\n  NiceMock<Envoy::Upstream::MockClusterManager> cm_;\n  Envoy::Http::MockAsyncClientRequest request_;\n  SquashFilterConfigSharedPtr config_;\n  std::shared_ptr<SquashFilter> filter_;\n  std::deque<Envoy::Http::AsyncClient::Callbacks*> callbacks_;\n};\n\nTEST_F(SquashFilterTest, DecodeHeaderContinuesOnClientFail) {\n  initFilter();\n\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(\"squash\")).WillOnce(ReturnRef(cm_.async_client_));\n\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillOnce(Invoke(\n          [&](Envoy::Http::RequestMessagePtr&, Envoy::Http::AsyncClient::Callbacks& callbacks,\n              const Http::AsyncClient::RequestOptions&) -> Envoy::Http::AsyncClient::Request* {\n            callbacks.onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset);\n            // Intentionally return nullptr (instead of request handle) to trigger a particular\n            // code path.\n            return nullptr;\n          }));\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                         {\":authority\", \"www.solo.io\"},\n                                         {\"x-squash-debug\", \"true\"},\n                                         {\":path\", \"/getsomething\"}};\n\n  Envoy::Buffer::OwnedImpl data(\"nothing here\");\n  EXPECT_EQ(Envoy::Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(Envoy::Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Envoy::Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n}\n\nTEST_F(SquashFilterTest, DecodeContinuesOnCreateAttachmentFail) {\n  startDownstreamRequest();\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(*attachmentTimeout_timer_, disableTimer());\n  popPendingCallback()->onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset);\n\n  Envoy::Buffer::OwnedImpl data(\"nothing here\");\n  EXPECT_EQ(Envoy::Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Envoy::Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n}\n\nTEST_F(SquashFilterTest, DoesNothingWithNoHeader) {\n  initFilter();\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0);\n\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                         {\":authority\", \"www.solo.io\"},\n                                         {\"x-not-squash-debug\", \"true\"},\n                                         {\":path\", \"/getsomething\"}};\n\n  Envoy::Buffer::OwnedImpl data(\"nothing here\");\n  EXPECT_EQ(Envoy::Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n  EXPECT_EQ(Envoy::Http::FilterDataStatus::Continue, filter_->decodeData(data, false));\n  Http::TestRequestTrailerMapImpl trailers;\n  EXPECT_EQ(Envoy::Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers));\n}\n\nTEST_F(SquashFilterTest, Timeout) {\n  startDownstreamRequest();\n\n  // invoke timeout\n  Envoy::Buffer::OwnedImpl buffer(\"nothing here\");\n\n  EXPECT_EQ(Envoy::Http::FilterDataStatus::StopIterationAndBuffer,\n            filter_->decodeData(buffer, false));\n\n  EXPECT_CALL(request_, cancel());\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n\n  EXPECT_CALL(filter_callbacks_.dispatcher_, setTrackedObject(_)).Times(2);\n  attachmentTimeout_timer_->invokeCallback();\n\n  EXPECT_EQ(Envoy::Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false));\n}\n\nTEST_F(SquashFilterTest, HappyPathWithTrailers) {\n  doDownstreamRequest();\n  // Expect the get attachment request\n  expectAsyncClientSend();\n  completeCreateRequest();\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  completeGetStatusRequest(\"attached\");\n}\n\nTEST_F(SquashFilterTest, CheckRetryPollingAttachment) {\n  doDownstreamRequest();\n  // Expect the get attachment request\n  expectAsyncClientSend();\n  completeCreateRequest();\n\n  auto retry_timer = new NiceMock<Envoy::Event::MockTimer>(&filter_callbacks_.dispatcher_);\n\n  EXPECT_CALL(*retry_timer, enableTimer(config_->attachmentPollPeriod(), _));\n  completeGetStatusRequest(\"attaching\");\n\n  // Expect the second get attachment request\n  expectAsyncClientSend();\n  EXPECT_CALL(filter_callbacks_.dispatcher_, setTrackedObject(_)).Times(2);\n  retry_timer->invokeCallback();\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  completeGetStatusRequest(\"attached\");\n}\n\nTEST_F(SquashFilterTest, CheckRetryPollingAttachmentOnFailure) {\n  doDownstreamRequest();\n  // Expect the first get attachment request\n  expectAsyncClientSend();\n  completeCreateRequest();\n\n  auto retry_timer = new NiceMock<Envoy::Event::MockTimer>(&filter_callbacks_.dispatcher_);\n  EXPECT_CALL(*retry_timer, enableTimer(config_->attachmentPollPeriod(), _));\n  popPendingCallback()->onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset);\n\n  // Expect the second get attachment request\n  expectAsyncClientSend();\n\n  EXPECT_CALL(filter_callbacks_.dispatcher_, setTrackedObject(_)).Times(2);\n  retry_timer->invokeCallback();\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  completeGetStatusRequest(\"attached\");\n}\n\nTEST_F(SquashFilterTest, DestroyedInTheMiddle) {\n  doDownstreamRequest();\n  // Expect the get attachment request\n  expectAsyncClientSend();\n  completeCreateRequest();\n\n  auto retry_timer = new NiceMock<Envoy::Event::MockTimer>(&filter_callbacks_.dispatcher_);\n  EXPECT_CALL(*retry_timer, enableTimer(config_->attachmentPollPeriod(), _));\n  completeGetStatusRequest(\"attaching\");\n\n  EXPECT_CALL(*attachmentTimeout_timer_, disableTimer());\n  EXPECT_CALL(*retry_timer, disableTimer());\n\n  filter_->onDestroy();\n}\n\nTEST_F(SquashFilterTest, InvalidJsonForCreateAttachment) {\n  doDownstreamRequest();\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  completeRequest(\"201\", \"This is not a JSON object\");\n}\n\nTEST_F(SquashFilterTest, InvalidJsonForGetAttachment) {\n  doDownstreamRequest();\n  // Expect the get attachment request\n  expectAsyncClientSend();\n  completeCreateRequest();\n\n  auto retry_timer = new NiceMock<Envoy::Event::MockTimer>(&filter_callbacks_.dispatcher_);\n  EXPECT_CALL(*retry_timer, enableTimer(config_->attachmentPollPeriod(), _));\n  completeRequest(\"200\", \"This is not a JSON object\");\n}\n\nTEST_F(SquashFilterTest, InvalidResponseWithNoBody) {\n  doDownstreamRequest();\n  // Expect the get attachment request\n  expectAsyncClientSend();\n  completeCreateRequest();\n\n  auto retry_timer = new NiceMock<Envoy::Event::MockTimer>(&filter_callbacks_.dispatcher_);\n  EXPECT_CALL(*retry_timer, enableTimer(config_->attachmentPollPeriod(), _));\n  Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-length\", \"0\"}}}));\n  popPendingCallback()->onSuccess(request_, std::move(msg));\n}\n\nTEST_F(SquashFilterTest, DestroyedInFlight) {\n  doDownstreamRequest();\n\n  EXPECT_CALL(request_, cancel());\n  EXPECT_CALL(*attachmentTimeout_timer_, disableTimer());\n\n  filter_->onDestroy();\n}\n\nTEST_F(SquashFilterTest, TimerExpiresInline) {\n  initFilter();\n\n  attachmentTimeout_timer_ = new NiceMock<Envoy::Event::MockTimer>(&filter_callbacks_.dispatcher_);\n  EXPECT_CALL(*attachmentTimeout_timer_, enableTimer(config_->attachmentTimeout(), _))\n      .WillOnce(Invoke([&](const std::chrono::milliseconds&, const ScopeTrackedObject* scope) {\n        attachmentTimeout_timer_->scope_ = scope;\n        attachmentTimeout_timer_->enabled_ = true;\n        // timer expires inline\n        EXPECT_CALL(filter_callbacks_.dispatcher_, setTrackedObject(_)).Times(2);\n        attachmentTimeout_timer_->invokeCallback();\n      }));\n\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillOnce(Invoke([&](Envoy::Http::RequestMessagePtr&, Envoy::Http::AsyncClient::Callbacks&,\n                           const Http::AsyncClient::RequestOptions&)\n                           -> Envoy::Http::AsyncClient::Request* { return &request_; }));\n\n  EXPECT_CALL(request_, cancel());\n  Http::TestRequestHeaderMapImpl headers{{\":method\", \"GET\"},\n                                         {\":authority\", \"www.solo.io\"},\n                                         {\"x-squash-debug\", \"true\"},\n                                         {\":path\", \"/getsomething\"}};\n  EXPECT_EQ(Envoy::Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));\n}\n\n} // namespace Squash\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/tap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"common\",\n    hdrs = [\"common.h\"],\n    deps = [\n        \"//source/extensions/filters/http/tap:tap_config_interface\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"tap_filter_test\",\n    srcs = [\"tap_filter_test.cc\"],\n    extension_name = \"envoy.filters.http.tap\",\n    deps = [\n        \":common\",\n        \"//source/extensions/filters/http/tap:config\",\n        \"//source/extensions/filters/http/tap:tap_config_interface\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"tap_config_impl_test\",\n    srcs = [\"tap_config_impl_test.cc\"],\n    extension_name = \"envoy.filters.http.tap\",\n    deps = [\n        \":common\",\n        \"//source/extensions/filters/http/tap:tap_config_impl\",\n        \"//test/extensions/common/tap:common\",\n        \"//test/mocks:common_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"tap_filter_integration_test\",\n    srcs = [\"tap_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.http.tap\",\n    deps = [\n        \"//source/extensions/filters/http/tap:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/tap/common.h",
    "content": "#include \"extensions/filters/http/tap/tap_config.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\n\nclass MockHttpTapConfig : public HttpTapConfig {\npublic:\n  HttpPerRequestTapperPtr createPerRequestTapper(uint64_t stream_id) override {\n    return HttpPerRequestTapperPtr{createPerRequestTapper_(stream_id)};\n  }\n\n  Extensions::Common::Tap::PerTapSinkHandleManagerPtr\n  createPerTapSinkHandleManager(uint64_t trace_id) override {\n    return Extensions::Common::Tap::PerTapSinkHandleManagerPtr{\n        createPerTapSinkHandleManager_(trace_id)};\n  }\n\n  MOCK_METHOD(HttpPerRequestTapper*, createPerRequestTapper_, (uint64_t stream_id));\n  MOCK_METHOD(Extensions::Common::Tap::PerTapSinkHandleManager*, createPerTapSinkHandleManager_,\n              (uint64_t trace_id));\n  MOCK_METHOD(uint32_t, maxBufferedRxBytes, (), (const));\n  MOCK_METHOD(uint32_t, maxBufferedTxBytes, (), (const));\n  MOCK_METHOD(Extensions::Common::Tap::Matcher::MatchStatusVector, createMatchStatusVector, (),\n              (const));\n  MOCK_METHOD(const Extensions::Common::Tap::Matcher&, rootMatcher, (), (const));\n  MOCK_METHOD(bool, streaming, (), (const));\n};\n\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/tap/tap_config_impl_test.cc",
    "content": "#include \"extensions/filters/http/tap/tap_config_impl.h\"\n\n#include \"test/extensions/common/tap/common.h\"\n#include \"test/extensions/filters/http/tap/common.h\"\n#include \"test/mocks/common.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::_;\nusing testing::Assign;\nusing testing::ByMove;\nusing testing::InSequence;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\nnamespace {\n\nnamespace TapCommon = Extensions::Common::Tap;\n\nclass HttpPerRequestTapperImplTest : public testing::Test {\npublic:\n  HttpPerRequestTapperImplTest() {\n    EXPECT_CALL(*config_, createPerTapSinkHandleManager_(1)).WillOnce(Return(sink_manager_));\n    EXPECT_CALL(*config_, createMatchStatusVector())\n        .WillOnce(Return(ByMove(TapCommon::Matcher::MatchStatusVector(1))));\n    EXPECT_CALL(*config_, rootMatcher()).WillRepeatedly(ReturnRef(matcher_));\n    EXPECT_CALL(matcher_, onNewStream(_)).WillOnce(SaveArgAddress(&statuses_));\n    tapper_ = std::make_unique<HttpPerRequestTapperImpl>(config_, 1);\n  }\n\n  std::shared_ptr<MockHttpTapConfig> config_{std::make_shared<MockHttpTapConfig>()};\n  // Raw pointer, returned via mock to unique_ptr.\n  TapCommon::MockPerTapSinkHandleManager* sink_manager_ =\n      new TapCommon::MockPerTapSinkHandleManager;\n  std::unique_ptr<HttpPerRequestTapperImpl> tapper_;\n  std::vector<TapCommon::MatcherPtr> matchers_{1};\n  TapCommon::MockMatcher matcher_{matchers_};\n  TapCommon::Matcher::MatchStatusVector* statuses_;\n  const Http::TestRequestHeaderMapImpl request_headers_{{\"a\", \"b\"}};\n  const Http::TestRequestTrailerMapImpl request_trailers_{{\"c\", \"d\"}};\n  const Http::TestResponseHeaderMapImpl response_headers_{{\"e\", \"f\"}};\n  const Http::TestResponseTrailerMapImpl response_trailers_{{\"g\", \"h\"}};\n};\n\n// Buffered tap with no match.\nTEST_F(HttpPerRequestTapperImplTest, BufferedFlowNoTap) {\n  EXPECT_CALL(*config_, streaming()).WillRepeatedly(Return(false));\n  EXPECT_CALL(*config_, maxBufferedRxBytes()).WillRepeatedly(Return(1024));\n  EXPECT_CALL(*config_, maxBufferedTxBytes()).WillRepeatedly(Return(1024));\n\n  InSequence s;\n  EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _));\n  tapper_->onRequestHeaders(request_headers_);\n  EXPECT_CALL(matcher_, onRequestBody(_, _));\n  tapper_->onRequestBody(Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _));\n  tapper_->onRequestTrailers(request_trailers_);\n  EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _));\n  tapper_->onResponseHeaders(response_headers_);\n  EXPECT_CALL(matcher_, onResponseBody(_, _));\n  tapper_->onResponseBody(Buffer::OwnedImpl(\"world\"));\n  EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _));\n  tapper_->onResponseTrailers(response_trailers_);\n  EXPECT_FALSE(tapper_->onDestroyLog());\n}\n\n// Buffered tap with a match.\nTEST_F(HttpPerRequestTapperImplTest, BufferedFlowTap) {\n  EXPECT_CALL(*config_, streaming()).WillRepeatedly(Return(false));\n  EXPECT_CALL(*config_, maxBufferedRxBytes()).WillRepeatedly(Return(1024));\n  EXPECT_CALL(*config_, maxBufferedTxBytes()).WillRepeatedly(Return(1024));\n\n  InSequence s;\n  EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _));\n  tapper_->onRequestHeaders(request_headers_);\n  EXPECT_CALL(matcher_, onRequestBody(_, _));\n  tapper_->onRequestBody(Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _));\n  tapper_->onRequestTrailers(request_trailers_);\n  EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _));\n  tapper_->onResponseHeaders(response_headers_);\n  EXPECT_CALL(matcher_, onResponseBody(_, _));\n  tapper_->onResponseBody(Buffer::OwnedImpl(\"world\"));\n  EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _));\n  tapper_->onResponseTrailers(response_trailers_);\n  (*statuses_)[0].matches_ = true;\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_buffered_trace:\n  request:\n    headers:\n      - key: a\n        value: b\n    body:\n      as_bytes: aGVsbG8=\n    trailers:\n      - key: c\n        value: d\n  response:\n    headers:\n      - key: e\n        value: f\n    body:\n      as_bytes: d29ybGQ=\n    trailers:\n      - key: g\n        value: h\n)EOF\")));\n  EXPECT_TRUE(tapper_->onDestroyLog());\n}\n\n// Streamed tap where we match on request trailers and have to flush request headers/body.\nTEST_F(HttpPerRequestTapperImplTest, StreamedMatchRequestTrailers) {\n  EXPECT_CALL(*config_, streaming()).WillRepeatedly(Return(true));\n  EXPECT_CALL(*config_, maxBufferedRxBytes()).WillRepeatedly(Return(1024));\n  EXPECT_CALL(*config_, maxBufferedTxBytes()).WillRepeatedly(Return(1024));\n\n  InSequence s;\n  EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _));\n  tapper_->onRequestHeaders(request_headers_);\n  EXPECT_CALL(matcher_, onRequestBody(_, _));\n  tapper_->onRequestBody(Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _))\n      .WillOnce(Assign(&(*statuses_)[0].matches_, true));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  request_headers:\n    headers:\n      - key: a\n        value: b\n)EOF\")));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  request_body_chunk:\n    as_bytes: aGVsbG8=\n)EOF\")));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  request_trailers:\n    headers:\n      - key: c\n        value: d\n)EOF\")));\n  tapper_->onRequestTrailers(request_trailers_);\n  EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  response_headers:\n    headers:\n      - key: e\n        value: f\n)EOF\")));\n  tapper_->onResponseHeaders(response_headers_);\n  EXPECT_CALL(matcher_, onResponseBody(_, _));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  response_body_chunk:\n    as_bytes: d29ybGQ=\n)EOF\")));\n  tapper_->onResponseBody(Buffer::OwnedImpl(\"world\"));\n  EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  response_trailers:\n    headers:\n      - key: g\n        value: h\n)EOF\")));\n  tapper_->onResponseTrailers(response_trailers_);\n  EXPECT_TRUE(tapper_->onDestroyLog());\n}\n\n// Streamed tap where we match on response trailers and have to flush everything.\nTEST_F(HttpPerRequestTapperImplTest, StreamedMatchResponseTrailers) {\n  EXPECT_CALL(*config_, streaming()).WillRepeatedly(Return(true));\n  EXPECT_CALL(*config_, maxBufferedRxBytes()).WillRepeatedly(Return(1024));\n  EXPECT_CALL(*config_, maxBufferedTxBytes()).WillRepeatedly(Return(1024));\n\n  InSequence s;\n  EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _));\n  tapper_->onRequestHeaders(request_headers_);\n  EXPECT_CALL(matcher_, onRequestBody(_, _));\n  tapper_->onRequestBody(Buffer::OwnedImpl(\"hello\"));\n  EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _));\n  tapper_->onRequestTrailers(request_trailers_);\n  EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _));\n  tapper_->onResponseHeaders(response_headers_);\n  EXPECT_CALL(matcher_, onResponseBody(_, _));\n  tapper_->onResponseBody(Buffer::OwnedImpl(\"world\"));\n  EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _))\n      .WillOnce(Assign(&(*statuses_)[0].matches_, true));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  request_headers:\n    headers:\n      - key: a\n        value: b\n)EOF\")));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  request_body_chunk:\n    as_bytes: aGVsbG8=\n)EOF\")));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  request_trailers:\n    headers:\n      - key: c\n        value: d\n)EOF\")));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  response_headers:\n    headers:\n      - key: e\n        value: f\n)EOF\")));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  response_body_chunk:\n    as_bytes: d29ybGQ=\n)EOF\")));\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nhttp_streamed_trace_segment:\n  trace_id: 1\n  response_trailers:\n    headers:\n      - key: g\n        value: h\n)EOF\")));\n  tapper_->onResponseTrailers(response_trailers_);\n  EXPECT_TRUE(tapper_->onDestroyLog());\n}\n\n} // namespace\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/tap/tap_filter_integration_test.cc",
    "content": "#include <fstream>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass TapIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                           public HttpIntegrationTest {\npublic:\n  TapIntegrationTest()\n      // Note: This test must use HTTP/2 because of the lack of early close detection for\n      // HTTP/1 on OSX. In this test we close the admin /tap stream when we don't want any\n      // more data, and without immediate close detection we can't have a flake free test.\n      // Thus, we use HTTP/2 for everything here.\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {\n\n    // Also use HTTP/2 for upstream so that we can fully test trailers.\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initializeFilter(const std::string& filter_config) {\n    config_helper_.addFilter(filter_config);\n    initialize();\n  }\n\n  const envoy::config::core::v3::HeaderValue*\n  findHeader(const std::string& key,\n             const Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValue>& headers) {\n    for (const auto& header : headers) {\n      if (header.key() == key) {\n        return &header;\n      }\n    }\n\n    return nullptr;\n  }\n\n  void makeRequest(const Http::TestRequestHeaderMapImpl& request_headers,\n                   const std::vector<std::string>& request_body_chunks,\n                   const Http::TestRequestTrailerMapImpl* request_trailers,\n                   const Http::TestResponseHeaderMapImpl& response_headers,\n                   const std::vector<std::string>& response_body_chunks,\n                   const Http::TestResponseTrailerMapImpl* response_trailers) {\n    IntegrationStreamDecoderPtr decoder;\n    if (request_trailers == nullptr && request_body_chunks.empty()) {\n      decoder = codec_client_->makeHeaderOnlyRequest(request_headers);\n    } else {\n      auto result = codec_client_->startRequest(request_headers);\n      decoder = std::move(result.second);\n\n      for (uint64_t index = 0; index < request_body_chunks.size(); index++) {\n        Buffer::OwnedImpl data(request_body_chunks[index]);\n        result.first.encodeData(data, index == request_body_chunks.size() - 1 &&\n                                          request_trailers == nullptr);\n      }\n      if (request_trailers != nullptr) {\n        result.first.encodeTrailers(*request_trailers);\n      }\n    }\n\n    waitForNextUpstreamRequest();\n\n    upstream_request_->encodeHeaders(response_headers,\n                                     response_trailers == nullptr && response_body_chunks.empty());\n    for (uint64_t index = 0; index < response_body_chunks.size(); index++) {\n      Buffer::OwnedImpl data(response_body_chunks[index]);\n      upstream_request_->encodeData(data, index == response_body_chunks.size() - 1 &&\n                                              response_trailers == nullptr);\n    }\n    if (response_trailers != nullptr) {\n      upstream_request_->encodeTrailers(*response_trailers);\n    }\n\n    decoder->waitForEndStream();\n  }\n\n  void startAdminRequest(const std::string& admin_request_yaml) {\n    admin_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"admin\")));\n    const Http::TestRequestHeaderMapImpl admin_request_headers{\n        {\":method\", \"POST\"}, {\":path\", \"/tap\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n    admin_response_ = admin_client_->makeRequestWithBody(admin_request_headers, admin_request_yaml);\n    admin_response_->waitForHeaders();\n    EXPECT_EQ(\"200\", admin_response_->headers().getStatusValue());\n    EXPECT_FALSE(admin_response_->complete());\n  }\n\n  std::string getTempPathPrefix() {\n    const std::string path_prefix = TestEnvironment::temporaryDirectory() + \"/tap_integration_\" +\n                                    testing::UnitTest::GetInstance()->current_test_info()->name();\n    TestEnvironment::createPath(path_prefix);\n    return path_prefix + \"/\";\n  }\n\n  std::vector<envoy::data::tap::v3::TraceWrapper>\n  readTracesFromFile(const std::string& path_prefix) {\n    // Find the written .pb file and verify it.\n    auto files = TestUtility::listFiles(path_prefix, false);\n    auto pb_file_name = std::find_if(files.begin(), files.end(), [](const std::string& s) {\n      return absl::EndsWith(s, MessageUtil::FileExtensions::get().ProtoBinaryLengthDelimited);\n    });\n    EXPECT_NE(pb_file_name, files.end());\n\n    std::vector<envoy::data::tap::v3::TraceWrapper> traces;\n    std::ifstream pb_file(*pb_file_name, std::ios_base::binary);\n    Protobuf::io::IstreamInputStream stream(&pb_file);\n    Protobuf::io::CodedInputStream coded_stream(&stream);\n    while (true) {\n      uint32_t message_size;\n      if (!coded_stream.ReadVarint32(&message_size)) {\n        break;\n      }\n\n      traces.emplace_back();\n\n      auto limit = coded_stream.PushLimit(message_size);\n      EXPECT_TRUE(traces.back().ParseFromCodedStream(&coded_stream));\n      coded_stream.PopLimit(limit);\n    }\n\n    return traces;\n  }\n\n  void verifyStaticFilePerTap(const std::string& filter_config) {\n    const std::string path_prefix = getTempPathPrefix();\n    initializeFilter(fmt::format(filter_config, path_prefix));\n\n    // Initial request/response with tap.\n    codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n    makeRequest(request_headers_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr);\n    codec_client_->close();\n    test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 1);\n\n    // Find the written .pb file and verify it.\n    auto files = TestUtility::listFiles(path_prefix, false);\n    auto pb_file = std::find_if(files.begin(), files.end(),\n                                [](const std::string& s) { return absl::EndsWith(s, \".pb\"); });\n    ASSERT_NE(pb_file, files.end());\n\n    envoy::data::tap::v3::TraceWrapper trace;\n    TestUtility::loadFromFile(*pb_file, trace, *api_);\n    EXPECT_TRUE(trace.has_http_buffered_trace());\n  }\n\n  const Http::TestRequestHeaderMapImpl request_headers_tap_{{\":method\", \"GET\"},\n                                                            {\":path\", \"/\"},\n                                                            {\":scheme\", \"http\"},\n                                                            {\":authority\", \"host\"},\n                                                            {\"foo\", \"bar\"}};\n\n  const Http::TestRequestHeaderMapImpl request_headers_no_tap_{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n\n  const Http::TestRequestTrailerMapImpl request_trailers_{{\"foo_trailer\", \"bar\"}};\n\n  const Http::TestResponseHeaderMapImpl response_headers_tap_{{\":status\", \"200\"}, {\"bar\", \"baz\"}};\n\n  const Http::TestResponseHeaderMapImpl response_headers_no_tap_{{\":status\", \"200\"}};\n\n  const Http::TestResponseTrailerMapImpl response_trailers_{{\"bar_trailer\", \"baz\"}};\n\n  const std::string admin_filter_config_ =\n      R\"EOF(\nname: tap\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap\n  common_config:\n    admin_config:\n      config_id: test_config_id\n)EOF\";\n\n  IntegrationCodecClientPtr admin_client_;\n  IntegrationStreamDecoderPtr admin_response_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, TapIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verify a static configuration with an any matcher, writing to a file per tap sink.\nTEST_P(TapIntegrationTest, StaticFilePerTap) {\n  const std::string filter_config =\n      R\"EOF(\nname: tap\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\n  common_config:\n    static_config:\n      match:\n        any_match: true\n      output_config:\n        sinks:\n          - format: PROTO_BINARY\n            file_per_tap:\n              path_prefix: {}\n)EOF\";\n\n  verifyStaticFilePerTap(filter_config);\n}\n\n// Verify the match field takes precedence over the deprecated match_config field.\nTEST_P(TapIntegrationTest, DEPRECATED_FEATURE_TEST(StaticFilePerTapWithMatchConfigAndMatch)) {\n  const std::string filter_config =\n      R\"EOF(\nname: tap\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\n  common_config:\n    static_config:\n      # match_config should be ignored by the match field.\n      match_config:\n        not_match:\n          any_match: true\n      match:\n        any_match: true\n      output_config:\n        sinks:\n          - format: PROTO_BINARY\n            file_per_tap:\n              path_prefix: {}\n)EOF\";\n\n  verifyStaticFilePerTap(filter_config);\n}\n\n// Verify the deprecated match_config field.\nTEST_P(TapIntegrationTest, DEPRECATED_FEATURE_TEST(StaticFilePerTapWithMatchConfig)) {\n  const std::string filter_config =\n      R\"EOF(\nname: tap\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap\n  common_config:\n    static_config:\n      match_config:\n        any_match: true\n      output_config:\n        sinks:\n          - format: PROTO_BINARY\n            file_per_tap:\n              path_prefix: {}\n)EOF\";\n\n  verifyStaticFilePerTap(filter_config);\n}\n\n// Verify a basic tap flow using the admin handler.\nTEST_P(TapIntegrationTest, AdminBasicFlow) {\n  initializeFilter(admin_filter_config_);\n\n  // Initial request/response with no tap.\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  makeRequest(request_headers_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr);\n\n  const std::string admin_request_yaml =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    or_match:\n      rules:\n        - http_request_headers_match:\n            headers:\n              - name: foo\n                exact_match: bar\n        - http_response_headers_match:\n            headers:\n              - name: bar\n                exact_match: baz\n  output_config:\n    sinks:\n      - streaming_admin: {}\n)EOF\";\n\n  // Setup a tap and disconnect it without any request/response.\n  startAdminRequest(admin_request_yaml);\n  admin_client_->close();\n  test_server_->waitForGaugeEq(\"http.admin.downstream_rq_active\", 0);\n\n  // Second request/response with no tap.\n  makeRequest(request_headers_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr);\n\n  // Setup the tap again and leave it open.\n  startAdminRequest(admin_request_yaml);\n\n  // Do a request which should tap, matching on request headers.\n  makeRequest(request_headers_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr);\n\n  // Wait for the tap message.\n  admin_response_->waitForBodyData(1);\n  envoy::data::tap::v3::TraceWrapper trace;\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n  EXPECT_EQ(trace.http_buffered_trace().request().headers().size(), 8);\n  EXPECT_EQ(trace.http_buffered_trace().response().headers().size(), 4);\n  admin_response_->clearBody();\n\n  // Do a request which should not tap.\n  makeRequest(request_headers_no_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr);\n\n  // Do a request which should tap, matching on response headers.\n  makeRequest(request_headers_no_tap_, {}, nullptr, response_headers_tap_, {}, nullptr);\n\n  // Wait for the tap message.\n  admin_response_->waitForBodyData(1);\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n  EXPECT_EQ(trace.http_buffered_trace().request().headers().size(), 7);\n  EXPECT_EQ(\n      \"http\",\n      findHeader(\"x-forwarded-proto\", trace.http_buffered_trace().request().headers())->value());\n  EXPECT_EQ(trace.http_buffered_trace().response().headers().size(), 5);\n  EXPECT_NE(nullptr, findHeader(\"date\", trace.http_buffered_trace().response().headers()));\n  EXPECT_EQ(\"baz\", findHeader(\"bar\", trace.http_buffered_trace().response().headers())->value());\n\n  admin_client_->close();\n  test_server_->waitForGaugeEq(\"http.admin.downstream_rq_active\", 0);\n\n  // Now setup a tap that matches on logical AND.\n  const std::string admin_request_yaml2 =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    and_match:\n      rules:\n        - http_request_headers_match:\n            headers:\n              - name: foo\n                exact_match: bar\n        - http_response_headers_match:\n            headers:\n              - name: bar\n                exact_match: baz\n  output_config:\n    sinks:\n      - streaming_admin: {}\n)EOF\";\n\n  startAdminRequest(admin_request_yaml2);\n\n  // Do a request that matches, but the response does not match. No tap.\n  makeRequest(request_headers_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr);\n\n  // Do a request that doesn't match, but the response does match. No tap.\n  makeRequest(request_headers_no_tap_, {}, nullptr, response_headers_tap_, {}, nullptr);\n\n  // Do a request that matches and a response that matches. Should tap.\n  makeRequest(request_headers_tap_, {}, nullptr, response_headers_tap_, {}, nullptr);\n\n  // Wait for the tap message.\n  admin_response_->waitForBodyData(1);\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n\n  admin_client_->close();\n  EXPECT_EQ(3UL, test_server_->counter(\"http.config_test.tap.rq_tapped\")->value());\n  test_server_->waitForGaugeEq(\"http.admin.downstream_rq_active\", 0);\n}\n\n// Verify both request and response trailer matching works.\nTEST_P(TapIntegrationTest, AdminTrailers) {\n  initializeFilter(admin_filter_config_);\n\n  const std::string admin_request_yaml =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    and_match:\n      rules:\n        - http_request_trailers_match:\n            headers:\n              - name: foo_trailer\n                exact_match: bar\n        - http_response_trailers_match:\n            headers:\n              - name: bar_trailer\n                exact_match: baz\n  output_config:\n    sinks:\n      - streaming_admin: {}\n)EOF\";\n\n  startAdminRequest(admin_request_yaml);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  makeRequest(request_headers_no_tap_, {}, &request_trailers_, response_headers_no_tap_, {},\n              &response_trailers_);\n\n  envoy::data::tap::v3::TraceWrapper trace;\n  admin_response_->waitForBodyData(1);\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n  EXPECT_EQ(\"bar\",\n            findHeader(\"foo_trailer\", trace.http_buffered_trace().request().trailers())->value());\n  EXPECT_EQ(\"baz\",\n            findHeader(\"bar_trailer\", trace.http_buffered_trace().response().trailers())->value());\n\n  admin_client_->close();\n}\n\n// Verify admin tapping with request/response body as bytes.\nTEST_P(TapIntegrationTest, AdminBodyAsBytes) {\n  initializeFilter(admin_filter_config_);\n\n  const std::string admin_request_yaml =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    any_match: true\n  output_config:\n    sinks:\n      - streaming_admin: {}\n)EOF\";\n\n  startAdminRequest(admin_request_yaml);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  makeRequest(request_headers_no_tap_, {{\"hello\"}}, nullptr, response_headers_no_tap_, {{\"world\"}},\n              nullptr);\n  envoy::data::tap::v3::TraceWrapper trace;\n  admin_response_->waitForBodyData(1);\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n  EXPECT_EQ(\"hello\", trace.http_buffered_trace().request().body().as_bytes());\n  EXPECT_FALSE(trace.http_buffered_trace().request().body().truncated());\n  EXPECT_EQ(\"world\", trace.http_buffered_trace().response().body().as_bytes());\n  EXPECT_FALSE(trace.http_buffered_trace().response().body().truncated());\n\n  admin_client_->close();\n}\n\n// Verify admin tapping with request/response body as strings.\nTEST_P(TapIntegrationTest, AdminBodyAsString) {\n  initializeFilter(admin_filter_config_);\n\n  const std::string admin_request_yaml =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    any_match: true\n  output_config:\n    sinks:\n      - format: JSON_BODY_AS_STRING\n        streaming_admin: {}\n)EOF\";\n\n  startAdminRequest(admin_request_yaml);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  makeRequest(request_headers_no_tap_, {{\"hello\"}}, nullptr, response_headers_no_tap_, {{\"world\"}},\n              nullptr);\n  envoy::data::tap::v3::TraceWrapper trace;\n  admin_response_->waitForBodyData(1);\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n  EXPECT_EQ(\"hello\", trace.http_buffered_trace().request().body().as_string());\n  EXPECT_FALSE(trace.http_buffered_trace().request().body().truncated());\n  EXPECT_EQ(\"world\", trace.http_buffered_trace().response().body().as_string());\n  EXPECT_FALSE(trace.http_buffered_trace().response().body().truncated());\n\n  admin_client_->close();\n}\n\n// Verify admin tapping with truncated request/response body.\nTEST_P(TapIntegrationTest, AdminBodyAsBytesTruncated) {\n  initializeFilter(admin_filter_config_);\n\n  const std::string admin_request_yaml =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    any_match: true\n  output_config:\n    max_buffered_rx_bytes: 3\n    max_buffered_tx_bytes: 4\n    sinks:\n      - streaming_admin: {}\n)EOF\";\n\n  startAdminRequest(admin_request_yaml);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  makeRequest(request_headers_no_tap_, {{\"hello\"}}, nullptr, response_headers_no_tap_, {{\"world\"}},\n              nullptr);\n  envoy::data::tap::v3::TraceWrapper trace;\n  admin_response_->waitForBodyData(1);\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n  EXPECT_EQ(\"hel\", trace.http_buffered_trace().request().body().as_bytes());\n  EXPECT_TRUE(trace.http_buffered_trace().request().body().truncated());\n  EXPECT_EQ(\"worl\", trace.http_buffered_trace().response().body().as_bytes());\n  EXPECT_TRUE(trace.http_buffered_trace().response().body().truncated());\n\n  admin_client_->close();\n}\n\n// Verify a static configuration with a request header matcher, writing to a streamed file per tap\n// sink.\nTEST_P(TapIntegrationTest, StaticFilePerTapStreaming) {\n  const std::string filter_config =\n      R\"EOF(\nname: tap\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap\n  common_config:\n    static_config:\n      match_config:\n        http_request_headers_match:\n          headers:\n            - name: foo\n              exact_match: bar\n      output_config:\n        streaming: true\n        sinks:\n          - format: PROTO_BINARY_LENGTH_DELIMITED\n            file_per_tap:\n              path_prefix: {}\n)EOF\";\n\n  const std::string path_prefix = getTempPathPrefix();\n  initializeFilter(fmt::format(filter_config, path_prefix));\n\n  // Initial request/response with tap.\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  makeRequest(request_headers_tap_, {\"hello\"}, &request_trailers_, response_headers_no_tap_,\n              {\"world\"}, &response_trailers_);\n  codec_client_->close();\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 1);\n\n  std::vector<envoy::data::tap::v3::TraceWrapper> traces = readTracesFromFile(path_prefix);\n  ASSERT_EQ(6, traces.size());\n  EXPECT_TRUE(traces[0].http_streamed_trace_segment().has_request_headers());\n  EXPECT_EQ(\"hello\", traces[1].http_streamed_trace_segment().request_body_chunk().as_bytes());\n  EXPECT_TRUE(traces[2].http_streamed_trace_segment().has_request_trailers());\n  EXPECT_TRUE(traces[3].http_streamed_trace_segment().has_response_headers());\n  EXPECT_EQ(\"world\", traces[4].http_streamed_trace_segment().response_body_chunk().as_bytes());\n  EXPECT_TRUE(traces[5].http_streamed_trace_segment().has_response_trailers());\n\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.tap.rq_tapped\")->value());\n}\n\n// Verify a static configuration with a response header matcher, writing to a streamed file per tap\n// sink. This verifies request buffering.\nTEST_P(TapIntegrationTest, StaticFilePerTapStreamingWithRequestBuffering) {\n  const std::string filter_config =\n      R\"EOF(\nname: tap\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap\n  common_config:\n    static_config:\n      match_config:\n        http_response_headers_match:\n          headers:\n            - name: bar\n              exact_match: baz\n      output_config:\n        streaming: true\n        sinks:\n          - format: PROTO_BINARY_LENGTH_DELIMITED\n            file_per_tap:\n              path_prefix: {}\n)EOF\";\n\n  const std::string path_prefix = getTempPathPrefix();\n  initializeFilter(fmt::format(filter_config, path_prefix));\n\n  // Initial request/response with tap.\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  makeRequest(request_headers_no_tap_, {\"hello\"}, &request_trailers_, response_headers_tap_,\n              {\"world\"}, &response_trailers_);\n  codec_client_->close();\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 1);\n\n  std::vector<envoy::data::tap::v3::TraceWrapper> traces = readTracesFromFile(path_prefix);\n  ASSERT_EQ(6, traces.size());\n  EXPECT_TRUE(traces[0].http_streamed_trace_segment().has_request_headers());\n  EXPECT_EQ(\"hello\", traces[1].http_streamed_trace_segment().request_body_chunk().as_bytes());\n  EXPECT_TRUE(traces[2].http_streamed_trace_segment().has_request_trailers());\n  EXPECT_TRUE(traces[3].http_streamed_trace_segment().has_response_headers());\n  EXPECT_EQ(\"world\", traces[4].http_streamed_trace_segment().response_body_chunk().as_bytes());\n  EXPECT_TRUE(traces[5].http_streamed_trace_segment().has_response_trailers());\n\n  EXPECT_EQ(1UL, test_server_->counter(\"http.config_test.tap.rq_tapped\")->value());\n}\n\n// Verify that body matching works.\nTEST_P(TapIntegrationTest, AdminBodyMatching) {\n  initializeFilter(admin_filter_config_);\n\n  const std::string admin_request_yaml =\n      R\"EOF(\nconfig_id: test_config_id\ntap_config:\n  match:\n    and_match:\n      rules:\n        - http_request_generic_body_match:\n            patterns:\n              - string_match: request\n        - http_response_generic_body_match:\n            patterns:\n              - string_match: response\n  output_config:\n    sinks:\n      - format: JSON_BODY_AS_STRING\n        streaming_admin: {}\n)EOF\";\n\n  startAdminRequest(admin_request_yaml);\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  // Should not tap, request and response body do not match.\n  makeRequest(request_headers_no_tap_, {{\"This is test payload\"}}, nullptr,\n              response_headers_no_tap_, {{\"This is test payload\"}}, nullptr);\n  // Should not tap, request matches but response body does not match.\n  makeRequest(request_headers_no_tap_, {{\"This is request payload\"}}, nullptr,\n              response_headers_no_tap_, {{\"This is test payload\"}}, nullptr);\n  // Should tap, request and response body match.\n  makeRequest(request_headers_no_tap_, {{\"This is request payload\"}}, nullptr,\n              response_headers_no_tap_, {{\"This is resp\"}, {\"onse payload\"}}, nullptr);\n\n  envoy::data::tap::v3::TraceWrapper trace;\n  admin_response_->waitForBodyData(1);\n  TestUtility::loadFromYaml(admin_response_->body(), trace);\n  EXPECT_NE(std::string::npos,\n            trace.http_buffered_trace().request().body().as_string().find(\"request\"));\n  EXPECT_NE(std::string::npos,\n            trace.http_buffered_trace().response().body().as_string().find(\"response\"));\n\n  admin_client_->close();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/tap/tap_filter_test.cc",
    "content": "#include \"extensions/filters/http/tap/config.h\"\n#include \"extensions/filters/http/tap/tap_filter.h\"\n\n#include \"test/extensions/filters/http/tap/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::InSequence;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace TapFilter {\nnamespace {\n\nclass MockFilterConfig : public FilterConfig {\npublic:\n  MOCK_METHOD(HttpTapConfigSharedPtr, currentConfig, ());\n  FilterStats& stats() override { return stats_; }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  FilterStats stats_{Filter::generateStats(\"foo\", stats_store_)};\n};\n\nclass MockHttpPerRequestTapper : public HttpPerRequestTapper {\npublic:\n  MOCK_METHOD(void, onRequestHeaders, (const Http::RequestHeaderMap& headers));\n  MOCK_METHOD(void, onRequestBody, (const Buffer::Instance& data));\n  MOCK_METHOD(void, onRequestTrailers, (const Http::RequestTrailerMap& headers));\n  MOCK_METHOD(void, onResponseHeaders, (const Http::ResponseHeaderMap& headers));\n  MOCK_METHOD(void, onResponseBody, (const Buffer::Instance& data));\n  MOCK_METHOD(void, onResponseTrailers, (const Http::ResponseTrailerMap& headers));\n  MOCK_METHOD(bool, onDestroyLog, ());\n};\n\nclass TapFilterTest : public testing::Test {\npublic:\n  void setup(bool has_config) {\n    if (has_config) {\n      http_tap_config_ = std::make_shared<MockHttpTapConfig>();\n    }\n\n    EXPECT_CALL(*filter_config_, currentConfig()).WillRepeatedly(Return(http_tap_config_));\n    filter_ = std::make_unique<Filter>(filter_config_);\n\n    if (has_config) {\n      EXPECT_CALL(callbacks_, streamId());\n      http_per_request_tapper_ = new MockHttpPerRequestTapper();\n      EXPECT_CALL(*http_tap_config_, createPerRequestTapper_(_))\n          .WillOnce(Return(http_per_request_tapper_));\n    }\n\n    filter_->setDecoderFilterCallbacks(callbacks_);\n  }\n\n  std::shared_ptr<MockFilterConfig> filter_config_{new MockFilterConfig()};\n  std::shared_ptr<MockHttpTapConfig> http_tap_config_;\n  MockHttpPerRequestTapper* http_per_request_tapper_;\n  std::unique_ptr<Filter> filter_;\n  StreamInfo::MockStreamInfo stream_info_;\n  Http::MockStreamDecoderFilterCallbacks callbacks_;\n};\n\n// Verify filter functionality when there is no tap config.\nTEST_F(TapFilterTest, NoConfig) {\n  InSequence s;\n  setup(false);\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl request_body;\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n  Buffer::OwnedImpl response_body;\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, false));\n  Http::TestResponseTrailerMapImpl response_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n  Http::MetadataMap metadata;\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata));\n\n  filter_->log(&request_headers, &response_headers, &response_trailers, stream_info_);\n}\n\n// Verify filter functionality when there is a tap config.\nTEST_F(TapFilterTest, Config) {\n  InSequence s;\n  setup(true);\n\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_CALL(*http_per_request_tapper_, onRequestHeaders(_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl request_body(\"hello\");\n  EXPECT_CALL(*http_per_request_tapper_, onRequestBody(_));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_CALL(*http_per_request_tapper_, onRequestTrailers(_));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers));\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue,\n            filter_->encode100ContinueHeaders(response_headers));\n  EXPECT_CALL(*http_per_request_tapper_, onResponseHeaders(_));\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false));\n  Buffer::OwnedImpl response_body(\"hello\");\n  EXPECT_CALL(*http_per_request_tapper_, onResponseBody(_));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, false));\n  Http::TestResponseTrailerMapImpl response_trailers;\n  EXPECT_CALL(*http_per_request_tapper_, onResponseTrailers(_));\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers));\n\n  EXPECT_CALL(*http_per_request_tapper_, onDestroyLog()).WillOnce(Return(true));\n  filter_->log(&request_headers, &response_headers, &response_trailers, stream_info_);\n  EXPECT_EQ(1UL, filter_config_->stats_.rq_tapped_.value());\n\n  // Workaround InSequence/shared_ptr mock leak.\n  EXPECT_TRUE(testing::Mock::VerifyAndClearExpectations(http_tap_config_.get()));\n}\n\nTEST(TapFilterConfigTest, InvalidProto) {\n  const std::string filter_config =\n      R\"EOF(\n  common_config:\n    static_config:\n      match:\n        any_match: true\n      output_config:\n        sinks:\n          - format: JSON_BODY_AS_STRING\n            streaming_admin: {}\n)EOF\";\n\n  envoy::extensions::filters::http::tap::v3::Tap config;\n  TestUtility::loadFromYaml(filter_config, config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  TapFilterFactory factory;\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(config, \"stats\", context),\n                            EnvoyException,\n                            \"Error: Specifying admin streaming output without configuring admin.\");\n}\n\nTEST(TapFilterConfigTest, NeitherMatchNorMatchConfig) {\n  const std::string filter_config =\n      R\"EOF(\n  common_config:\n    static_config:\n      output_config:\n        sinks:\n          - format: PROTO_BINARY\n            file_per_tap:\n              path_prefix: abc\n)EOF\";\n\n  envoy::extensions::filters::http::tap::v3::Tap config;\n  TestUtility::loadFromYaml(filter_config, config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  TapFilterFactory factory;\n\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(config, \"stats\", context),\n                            EnvoyException,\n                            fmt::format(\"Neither match nor match_config is set in TapConfig: {}\",\n                                        config.common_config().static_config().DebugString()));\n}\n\n} // namespace\n} // namespace TapFilter\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//bazel:envoy_select.bzl\",\n    \"envoy_select_wasm\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"wasm_filter_test\",\n    size = \"enormous\",  # For WAVM without precompilation. TODO: add precompilation.\n    srcs = [\"wasm_filter_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/filters/http/wasm/test_data:async_call_rust.wasm\",\n        \"//test/extensions/filters/http/wasm/test_data:body_rust.wasm\",\n        \"//test/extensions/filters/http/wasm/test_data:headers_rust.wasm\",\n        \"//test/extensions/filters/http/wasm/test_data:metadata_rust.wasm\",\n        \"//test/extensions/filters/http/wasm/test_data:shared_data_rust.wasm\",\n        \"//test/extensions/filters/http/wasm/test_data:shared_queue_rust.wasm\",\n        \"//test/extensions/filters/http/wasm/test_data:test_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.filters.http.wasm\",\n    deps = [\n        \"//source/common/http:message_lib\",\n        \"//source/extensions/filters/http/wasm:wasm_filter_lib\",\n        \"//test/extensions/filters/http/wasm/test_data:test_cpp_plugin\",\n        \"//test/mocks/network:connection_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/test_common:wasm_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    size = \"enormous\",  # For WAVM without precompilation. TODO: add precompilation.\n    srcs = [\"config_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/filters/http/wasm/test_data:test_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.filters.http.wasm\",\n    deps = [\n        \"//source/common/common:base64_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/filters/http/wasm:config\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/wasm/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/config_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/common/hex.h\"\n#include \"common/crypto/utility.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/filters/http/wasm/config.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\n\nusing Common::Wasm::WasmException;\n\nnamespace HttpFilters {\nnamespace Wasm {\n\n#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM)\nclass WasmFilterConfigTest : public Event::TestUsingSimulatedTime,\n                             public testing::TestWithParam<std::string> {\nprotected:\n  WasmFilterConfigTest() : api_(Api::createApiForTest(stats_store_)) {\n    ON_CALL(context_, api()).WillByDefault(ReturnRef(*api_));\n    ON_CALL(context_, scope()).WillByDefault(ReturnRef(stats_store_));\n    ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_));\n    EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager_));\n    ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n    ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n  }\n\n  void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); }\n\n  void initializeForRemote() {\n    retry_timer_ = new Event::MockTimer();\n\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      retry_timer_cb_ = timer_cb;\n      return retry_timer_;\n    }));\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  envoy::config::core::v3::Metadata listener_metadata_;\n  Init::ManagerImpl init_manager_{\"init_manager\"};\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Event::MockTimer* retry_timer_;\n  Event::TimerCb retry_timer_cb_;\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\"\n#endif\n#if defined(ENVOY_WASM_V8) && defined(ENVOY_WASM_WAVM)\n    ,\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\"\n#endif\n);\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmFilterConfigTest, testing_values);\n\nTEST_P(WasmFilterConfigTest, JsonLoadFromFileWasm) {\n  const std::string json = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  {\n  \"config\" : {\n  \"vm_config\": {\n    \"runtime\": \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\",\n    \"configuration\": {\n       \"@type\": \"type.googleapis.com/google.protobuf.StringValue\",\n       \"value\": \"some configuration\"\n    },\n    \"code\": {\n      \"local\": {\n        \"filename\": \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"\n      }\n    },\n  }}}\n  )EOF\"));\n\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromJson(json, proto_config);\n  WasmFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n  cb(filter_callback);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromFileWasm) {\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      configuration:\n         \"@type\": \"type.googleapis.com/google.protobuf.StringValue\"\n         value: \"some configuration\"\n      code:\n        local:\n          filename: \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"\n  )EOF\"));\n\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n  cb(filter_callback);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromFileWasmFailOpenOk) {\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    fail_open: true\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      configuration:\n         \"@type\": \"type.googleapis.com/google.protobuf.StringValue\"\n         value: \"some configuration\"\n      code:\n        local:\n          filename: \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"\n  )EOF\"));\n\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n  cb(filter_callback);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadInlineWasm) {\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  EXPECT_FALSE(code.empty());\n  const std::string yaml = absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                        GetParam(), R\"EOF(\"\n      code: \n        local: { inline_bytes: \")EOF\",\n                                        Base64::encode(code.data(), code.size()), R\"EOF(\" }\n                                        )EOF\");\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n  cb(filter_callback);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadInlineBadCode) {\n  const std::string yaml = absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                        GetParam(), R\"EOF(\"\n      code:\n        local:\n          inline_string: \"bad code\"\n  )EOF\");\n\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context_),\n                            WasmException, \"Unable to create Wasm HTTP filter \");\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWasm) {\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillOnce(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return &request;\n          }));\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n  cb(filter_callback);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWasmFailOnUncachedThenSucceed) {\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      nack_on_code_cache_miss: true\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillOnce(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return &request;\n          }));\n\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context_),\n                            WasmException, \"Unable to create Wasm HTTP filter \");\n\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  Init::ManagerImpl init_manager2{\"init_manager2\"};\n  Init::ExpectableWatcherImpl init_watcher2;\n\n  EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager2));\n\n  auto cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n\n  EXPECT_CALL(init_watcher2, ready());\n  init_manager2.initialize(init_watcher2);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n\n  cb(filter_callback);\n  dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWasmFailCachedThenSucceed) {\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      nack_on_code_cache_miss: true\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          retry_policy:\n            num_retries: 0\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillRepeatedly(ReturnRef(cluster_manager_.async_client_));\n\n  Http::AsyncClient::Callbacks* async_callbacks = nullptr;\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            // Store the callback the first time through for delayed call.\n            if (!async_callbacks) {\n              async_callbacks = &callbacks;\n            } else {\n              // Subsequent send()s happen inline.\n              callbacks.onSuccess(\n                  request,\n                  Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                      new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}})});\n            }\n            return &request;\n          }));\n\n  // Case 1: fail and fetch in the background, got 503, cache failure.\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context_),\n                            WasmException, \"Unable to create Wasm HTTP filter \");\n  // Fail a second time because we are in-progress.\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context_),\n                            WasmException, \"Unable to create Wasm HTTP filter \");\n  async_callbacks->onSuccess(\n      request, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                   new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}})});\n\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  // Case 2: fail immediately with negatively cached result.\n  Init::ManagerImpl init_manager2{\"init_manager2\"};\n  Init::ExpectableWatcherImpl init_watcher2;\n\n  EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager2));\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context_),\n                            WasmException, \"Unable to create Wasm HTTP filter \");\n\n  EXPECT_CALL(init_watcher2, ready());\n  init_manager2.initialize(init_watcher2);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  // Wait for negative cache to timeout.\n  ::Envoy::Extensions::Common::Wasm::setTimeOffsetForCodeCacheForTesting(std::chrono::seconds(10));\n\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return &request;\n          }));\n\n  // Case 3: fail and fetch in the background, got 200, cache success.\n  Init::ManagerImpl init_manager3{\"init_manager3\"};\n  Init::ExpectableWatcherImpl init_watcher3;\n\n  EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager3));\n\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, \"stats\", context_),\n                            WasmException, \"Unable to create Wasm HTTP filter \");\n\n  EXPECT_CALL(init_watcher3, ready());\n  init_manager3.initialize(init_watcher3);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  // Case 4: success from cache.\n  Init::ManagerImpl init_manager4{\"init_manager4\"};\n  Init::ExpectableWatcherImpl init_watcher4;\n\n  EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager4));\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n\n  EXPECT_CALL(init_watcher4, ready());\n  init_manager4.initialize(init_watcher4);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n\n  cb(filter_callback);\n\n  // Wait for cache to timeout.\n  ::Envoy::Extensions::Common::Wasm::setTimeOffsetForCodeCacheForTesting(\n      std::chrono::seconds(10 + 24 * 3600));\n\n  // Case 5: flush the stale cache.\n  const std::string sha256_2 = sha256 + \"new\";\n  const std::string yaml2 = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      nack_on_code_cache_miss: true\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                     GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          retry_policy:\n            num_retries: 0\n          sha256: )EOF\",\n                                                                     sha256_2));\n\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config2;\n  TestUtility::loadFromYaml(yaml2, proto_config2);\n\n  Init::ManagerImpl init_manager5{\"init_manager4\"};\n  Init::ExpectableWatcherImpl init_watcher5;\n\n  EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager5));\n\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config2, \"stats\", context_),\n                            WasmException, \"Unable to create Wasm HTTP filter \");\n\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  // Case 6: fail and fetch in the background, got 200, cache success.\n  Init::ManagerImpl init_manager6{\"init_manager6\"};\n  Init::ExpectableWatcherImpl init_watcher6;\n\n  EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager6));\n\n  factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n\n  EXPECT_CALL(init_watcher6, ready());\n  init_manager6.initialize(init_watcher6);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  // Case 7: success from cache.\n  Init::ManagerImpl init_manager7{\"init_manager7\"};\n  Init::ExpectableWatcherImpl init_watcher7;\n\n  EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager7));\n\n  Http::FilterFactoryCb cb2 = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n\n  EXPECT_CALL(init_watcher7, ready());\n  init_manager7.initialize(init_watcher7);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n\n  Http::MockFilterChainFactoryCallbacks filter_callback2;\n  EXPECT_CALL(filter_callback2, addStreamFilter(_));\n  EXPECT_CALL(filter_callback2, addAccessLogHandler(_));\n\n  cb2(filter_callback2);\n\n  dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteConnectionReset) {\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          retry_policy:\n            num_retries: 0\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillOnce(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks.onFailure(request, Envoy::Http::AsyncClient::FailureReason::Reset);\n            return &request;\n          }));\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessWith503) {\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          retry_policy:\n            num_retries: 0\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillOnce(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks.onSuccess(\n                request,\n                Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}})});\n            return &request;\n          }));\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessIncorrectSha256) {\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          retry_policy:\n            num_retries: 0\n          sha256: xxxx )EOF\"));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillOnce(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return &request;\n          }));\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteMultipleRetries) {\n  initializeForRemote();\n  const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          retry_policy:\n            num_retries: 3\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n  int num_retries = 3;\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillRepeatedly(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .Times(num_retries)\n      .WillRepeatedly(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return &request;\n          }));\n\n  EXPECT_CALL(*retry_timer_, enableTimer(_, _))\n      .WillRepeatedly(Invoke([&](const std::chrono::milliseconds&, const ScopeTrackedObject*) {\n        if (--num_retries == 0) {\n          EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n              .WillOnce(Invoke(\n                  [&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                      const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n                    Http::ResponseMessagePtr response(\n                        new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                            new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n                    response->body().add(code);\n                    callbacks.onSuccess(request, std::move(response));\n                    return &request;\n                  }));\n        }\n\n        retry_timer_cb_();\n      }));\n  EXPECT_CALL(*retry_timer_, disableTimer());\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addStreamFilter(_));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n  cb(filter_callback);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessBadcode) {\n  const std::string code = \"foo\";\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillOnce(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return nullptr;\n          }));\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n\n  // Fail closed.\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  Extensions::Common::Wasm::ContextSharedPtr context;\n  EXPECT_CALL(filter_callback, addStreamFilter(_))\n      .WillOnce(Invoke([&context](Http::StreamFilterSharedPtr filter) {\n        context = std::static_pointer_cast<Extensions::Common::Wasm::Context>(filter);\n      }));\n  EXPECT_CALL(filter_callback, addAccessLogHandler(_));\n  cb(filter_callback);\n  EXPECT_EQ(context->wasm(), nullptr);\n  EXPECT_TRUE(context->isFailed());\n\n  Http::MockStreamDecoderFilterCallbacks decoder_callbacks;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info;\n\n  context->setDecoderFilterCallbacks(decoder_callbacks);\n  EXPECT_CALL(decoder_callbacks, streamInfo()).WillRepeatedly(ReturnRef(stream_info));\n  EXPECT_CALL(stream_info, setResponseCodeDetails(\"wasm_fail_stream\"));\n  EXPECT_CALL(decoder_callbacks, resetStream());\n\n  EXPECT_EQ(context->onRequestHeaders(10, false), proxy_wasm::FilterHeadersStatus::StopIteration);\n}\n\nTEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessBadcodeFailOpen) {\n  const std::string code = \"foo\";\n  const std::string sha256 = Hex::encode(\n      Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code)));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    fail_open: true\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        remote:\n          http_uri:\n            uri: https://example.com/data\n            cluster: cluster_1\n            timeout: 5s\n          sha256: )EOF\",\n                                                                    sha256));\n  envoy::extensions::filters::http::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  NiceMock<Http::MockAsyncClient> client;\n  NiceMock<Http::MockAsyncClientRequest> request(&client);\n\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster_1\"))\n      .WillOnce(ReturnRef(cluster_manager_.async_client_));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            Http::ResponseMessagePtr response(\n                new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n            response->body().add(code);\n            callbacks.onSuccess(request, std::move(response));\n            return nullptr;\n          }));\n\n  Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  Http::MockFilterChainFactoryCallbacks filter_callback;\n  // The filter is not registered.\n  cb(filter_callback);\n}\n#endif\n\n} // namespace Wasm\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\nload(\"//bazel/wasm:wasm.bzl\", \"envoy_wasm_cc_binary\", \"wasm_rust_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nwasm_rust_binary(\n    name = \"async_call_rust.wasm\",\n    srcs = [\"async_call_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nwasm_rust_binary(\n    name = \"body_rust.wasm\",\n    srcs = [\"body_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nwasm_rust_binary(\n    name = \"headers_rust.wasm\",\n    srcs = [\"headers_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nwasm_rust_binary(\n    name = \"metadata_rust.wasm\",\n    srcs = [\"metadata_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nwasm_rust_binary(\n    name = \"shared_data_rust.wasm\",\n    srcs = [\"shared_data_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nwasm_rust_binary(\n    name = \"shared_queue_rust.wasm\",\n    srcs = [\"shared_queue_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"test_cpp_plugin\",\n    srcs = [\n        \"test_async_call_cpp.cc\",\n        \"test_body_cpp.cc\",\n        \"test_cpp.cc\",\n        \"test_cpp_null_plugin.cc\",\n        \"test_grpc_call_cpp.cc\",\n        \"test_grpc_stream_cpp.cc\",\n        \"test_shared_data_cpp.cc\",\n        \"test_shared_queue_cpp.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \":test_cc_proto\",\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n        \"//source/extensions/common/wasm/ext:envoy_null_plugin\",\n        \"@proxy_wasm_cpp_sdk//contrib:contrib_lib\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"test_cpp.wasm\",\n    srcs = [\n        \"test_async_call_cpp.cc\",\n        \"test_body_cpp.cc\",\n        \"test_cpp.cc\",\n        \"test_grpc_call_cpp.cc\",\n        \"test_grpc_stream_cpp.cc\",\n        \"test_shared_data_cpp.cc\",\n        \"test_shared_queue_cpp.cc\",\n    ],\n    deps = [\n        \":test_cc_proto\",\n        \"//source/extensions/common/wasm/ext:declare_property_cc_proto\",\n        \"//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_lib\",\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_lite\",\n        \"@proxy_wasm_cpp_sdk//contrib:contrib_lib\",\n    ],\n)\n\n# NB: this target is compiled both to native code and to Wasm. Hence the generic rule.\nproto_library(\n    name = \"test_proto\",\n    srcs = [\"test.proto\"],\n    deps = [\n        \"@com_google_protobuf//:any_proto\",\n        \"@com_google_protobuf//:timestamp_proto\",\n    ],\n)\n\n# NB: this target is compiled both to native code and to Wasm. Hence the generic rule.\ncc_proto_library(\n    name = \"test_cc_proto\",\n    deps = [\":test_proto\"],\n)\n\n# TODO: FIXME\n#\n#filegroup(\n#    name = \"wavm_binary\",\n#    srcs = [\"//bazel/foreign_cc:wavm\"],\n#    output_group = \"wavm\",\n#)\n#\n#genrule(\n#    name = \"test_cpp_wavm_compile\",\n#    srcs = [\":test_cpp.wasm\"],\n#    outs = [\"test_cpp.wavm_compiled.wasm\"],\n#    cmd = \"./$(location wavm_binary) compile $(location test_cpp.wasm) $(location test_cpp.wavm_compiled.wasm)\",\n#    tools = [\n#        \":test_cpp.wasm\",\n#        \":wavm_binary\",\n#    ],\n#)\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/async_call_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm async call test\"\nname = \"async_call_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/async_call_rust/src/lib.rs",
    "content": "use log::{debug, info, warn};\nuse proxy_wasm::traits::{Context, HttpContext};\nuse proxy_wasm::types::*;\nuse std::time::Duration;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_http_context(|_, _| -> Box<dyn HttpContext> { Box::new(TestStream) });\n}\n\nstruct TestStream;\n\nimpl HttpContext for TestStream {\n    fn on_http_request_headers(&mut self, _: usize) -> Action {\n        self.dispatch_http_call(\n            \"cluster\",\n            vec![(\":method\", \"POST\"), (\":path\", \"/\"), (\":authority\", \"foo\")],\n            Some(b\"hello world\"),\n            vec![(\"trail\", \"cow\")],\n            Duration::from_secs(5),\n        )\n        .unwrap();\n        info!(\"onRequestHeaders\");\n        Action::Pause\n    }\n}\n\nimpl Context for TestStream {\n    fn on_http_call_response(&mut self, _: u32, _: usize, body_size: usize, _: usize) {\n        if body_size == 0 {\n            info!(\"async_call failed\");\n            return;\n        }\n        for (name, value) in &self.get_http_call_response_headers() {\n            info!(\"{} -> {}\", name, value);\n        }\n        if let Some(body) = self.get_http_call_response_body(0, body_size) {\n            debug!(\"{}\", String::from_utf8(body).unwrap());\n        }\n        for (name, value) in &self.get_http_call_response_trailers() {\n            warn!(\"{} -> {}\", name, value);\n        }\n    }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/body_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm HTTP body test\"\nname = \"body_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/body_rust/src/lib.rs",
    "content": "use log::error;\nuse proxy_wasm::traits::{Context, HttpContext};\nuse proxy_wasm::types::*;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_http_context(|_, _| -> Box<dyn HttpContext> {\n        Box::new(TestStream {\n            test: None,\n            body_chunks: 0,\n        })\n    });\n}\n\nstruct TestStream {\n    test: Option<String>,\n    body_chunks: usize,\n}\n\nimpl HttpContext for TestStream {\n    fn on_http_request_headers(&mut self, _: usize) -> Action {\n        self.test = self.get_http_request_header(\"x-test-operation\");\n        self.body_chunks = 0;\n        Action::Continue\n    }\n\n    fn on_http_request_body(&mut self, body_size: usize, end_of_stream: bool) -> Action {\n        match self.test.as_deref() {\n            Some(\"ReadBody\") => {\n                let body = self.get_http_request_body(0, body_size).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                Action::Continue\n            }\n            Some(\"PrependAndAppendToBody\") => {\n                self.set_http_request_body(0, 0, b\"prepend.\");\n                self.set_http_request_body(0xffffffff, 0, b\".append\");\n                let body = self.get_http_request_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                Action::Continue\n            }\n            Some(\"ReplaceBody\") => {\n                self.set_http_request_body(0, 0xffffffff, b\"replace\");\n                let body = self.get_http_request_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                Action::Continue\n            }\n            Some(\"RemoveBody\") => {\n                self.set_http_request_body(0, 0xffffffff, b\"\");\n                if let Some(body) = self.get_http_request_body(0, 0xffffffff) {\n                    error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                } else {\n                    error!(\"onBody \");\n                }\n                Action::Continue\n            }\n            Some(\"BufferBody\") => {\n                let body = self.get_http_request_body(0, body_size).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"PrependAndAppendToBufferedBody\") => {\n                self.set_http_request_body(0, 0, b\"prepend.\");\n                self.set_http_request_body(0xffffffff, 0, b\".append\");\n                let body = self.get_http_request_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"ReplaceBufferedBody\") => {\n                self.set_http_request_body(0, 0xffffffff, b\"replace\");\n                let body = self.get_http_request_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"RemoveBufferedBody\") => {\n                self.set_http_request_body(0, 0xffffffff, b\"\");\n                if let Some(body) = self.get_http_request_body(0, 0xffffffff) {\n                    error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                } else {\n                    error!(\"onBody \");\n                }\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"BufferTwoBodies\") => {\n                if let Some(body) = self.get_http_request_body(0, body_size) {\n                    error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                }\n                self.body_chunks += 1;\n                if end_of_stream || self.body_chunks > 2 {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            _ => Action::Continue,\n        }\n    }\n\n    fn on_http_response_headers(&mut self, _: usize) -> Action {\n        self.test = self.get_http_response_header(\"x-test-operation\");\n        Action::Continue\n    }\n\n    fn on_http_response_body(&mut self, body_size: usize, end_of_stream: bool) -> Action {\n        match self.test.as_deref() {\n            Some(\"ReadBody\") => {\n                let body = self.get_http_response_body(0, body_size).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                Action::Continue\n            }\n            Some(\"PrependAndAppendToBody\") => {\n                self.set_http_response_body(0, 0, b\"prepend.\");\n                self.set_http_response_body(0xffffffff, 0, b\".append\");\n                let body = self.get_http_response_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                Action::Continue\n            }\n            Some(\"ReplaceBody\") => {\n                self.set_http_response_body(0, 0xffffffff, b\"replace\");\n                let body = self.get_http_response_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                Action::Continue\n            }\n            Some(\"RemoveBody\") => {\n                self.set_http_response_body(0, 0xffffffff, b\"\");\n                if let Some(body) = self.get_http_response_body(0, 0xffffffff) {\n                    error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                } else {\n                    error!(\"onBody \");\n                }\n                Action::Continue\n            }\n            Some(\"BufferBody\") => {\n                let body = self.get_http_response_body(0, body_size).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"PrependAndAppendToBufferedBody\") => {\n                self.set_http_response_body(0, 0, b\"prepend.\");\n                self.set_http_response_body(0xffffffff, 0, b\".append\");\n                let body = self.get_http_response_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"ReplaceBufferedBody\") => {\n                self.set_http_response_body(0, 0xffffffff, b\"replace\");\n                let body = self.get_http_response_body(0, 0xffffffff).unwrap();\n                error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"RemoveBufferedBody\") => {\n                self.set_http_response_body(0, 0xffffffff, b\"\");\n                if let Some(body) = self.get_http_response_body(0, 0xffffffff) {\n                    error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                } else {\n                    error!(\"onBody \");\n                }\n                if end_of_stream {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            Some(\"BufferTwoBodies\") => {\n                if let Some(body) = self.get_http_response_body(0, body_size) {\n                    error!(\"onBody {}\", String::from_utf8(body).unwrap());\n                }\n                self.body_chunks += 1;\n                if end_of_stream || self.body_chunks > 2 {\n                    Action::Continue\n                } else {\n                    Action::Pause\n                }\n            }\n            _ => Action::Continue,\n        }\n    }\n}\n\nimpl Context for TestStream {}\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/headers_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm HTTP headers test\"\nname = \"headers_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/headers_rust/src/lib.rs",
    "content": "use log::{debug, error, info, warn};\nuse proxy_wasm::traits::{Context, HttpContext};\nuse proxy_wasm::types::*;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_http_context(|context_id, _| -> Box<dyn HttpContext> {\n        Box::new(TestStream { context_id })\n    });\n}\n\nstruct TestStream {\n    context_id: u32,\n}\n\nimpl HttpContext for TestStream {\n    fn on_http_request_headers(&mut self, _: usize) -> Action {\n        debug!(\"onRequestHeaders {} headers\", self.context_id);\n        if let Some(path) = self.get_http_request_header(\":path\") {\n            info!(\"header path {}\", path);\n        }\n        let action = match self.get_http_request_header(\"server\").as_deref() {\n            Some(\"envoy-wasm-pause\") => Action::Pause,\n            _ => Action::Continue,\n        };\n        self.set_http_request_header(\"newheader\", Some(\"newheadervalue\"));\n        self.set_http_request_header(\"server\", Some(\"envoy-wasm\"));\n        action\n    }\n\n    fn on_http_request_body(&mut self, body_size: usize, _: bool) -> Action {\n        if let Some(body) = self.get_http_request_body(0, body_size) {\n            error!(\"onBody {}\", String::from_utf8(body).unwrap());\n        }\n        Action::Continue\n    }\n\n    fn on_http_response_trailers(&mut self, _: usize) -> Action {\n        Action::Pause\n    }\n\n    fn on_log(&mut self) {\n        if let Some(path) = self.get_http_request_header(\":path\") {\n            warn!(\"onLog {} {}\", self.context_id, path);\n        }\n    }\n}\n\nimpl Context for TestStream {\n    fn on_done(&mut self) -> bool {\n        warn!(\"onDone {}\", self.context_id);\n        true\n    }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/metadata_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm metadata test\"\nname = \"metadata_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/metadata_rust/src/lib.rs",
    "content": "use log::{debug, error, info, trace};\nuse proxy_wasm::traits::{Context, HttpContext, RootContext};\nuse proxy_wasm::types::*;\nuse std::convert::TryFrom;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_root_context(|_| -> Box<dyn RootContext> { Box::new(TestRoot) });\n    proxy_wasm::set_http_context(|_, _| -> Box<dyn HttpContext> { Box::new(TestStream) });\n}\n\nstruct TestRoot;\n\nimpl Context for TestRoot {}\n\nimpl RootContext for TestRoot {\n    fn on_tick(&mut self) {\n        if let Some(value) = self.get_property(vec![\"node\", \"metadata\", \"wasm_node_get_key\"]) {\n            debug!(\"onTick {}\", String::from_utf8(value).unwrap());\n        } else {\n            debug!(\"missing node metadata\");\n        }\n    }\n}\n\nstruct TestStream;\n\nimpl Context for TestStream {}\n\nimpl HttpContext for TestStream {\n    fn on_http_request_headers(&mut self, _: usize) -> Action {\n        if self\n            .get_property(vec![\"node\", \"metadata\", \"wasm_node_get_key\"])\n            .is_none()\n        {\n            debug!(\"missing node metadata\");\n        }\n\n        self.set_property(\n            vec![\"wasm_request_set_key\"],\n            Some(b\"wasm_request_set_value\"),\n        );\n\n        if let Some(path) = self.get_http_request_header(\":path\") {\n            info!(\"header path {}\", path);\n        }\n        self.set_http_request_header(\"newheader\", Some(\"newheadervalue\"));\n        self.set_http_request_header(\"server\", Some(\"envoy-wasm\"));\n\n        if let Some(value) = self.get_property(vec![\"request\", \"duration\"]) {\n            info!(\n                \"duration is {}\",\n                u64::from_le_bytes(<[u8; 8]>::try_from(&value[0..8]).unwrap())\n            );\n        } else {\n            error!(\"failed to get request duration\");\n        }\n        Action::Continue\n    }\n\n    fn on_http_request_body(&mut self, _: usize, _: bool) -> Action {\n        if let Some(value) = self.get_property(vec![\"node\", \"metadata\", \"wasm_node_get_key\"]) {\n            error!(\"onBody {}\", String::from_utf8(value).unwrap());\n        } else {\n            debug!(\"missing node metadata\");\n        }\n        let key1 = self.get_property(vec![\n            \"metadata\",\n            \"filter_metadata\",\n            \"envoy.filters.http.wasm\",\n            \"wasm_request_get_key\",\n        ]);\n        if key1.is_none() {\n            debug!(\"missing request metadata\");\n        }\n        let key2 = self.get_property(vec![\n            \"metadata\",\n            \"filter_metadata\",\n            \"envoy.filters.http.wasm\",\n            \"wasm_request_get_key\",\n        ]);\n        if key2.is_none() {\n            debug!(\"missing request metadata\");\n        }\n        trace!(\n            \"Struct {} {}\",\n            String::from_utf8(key1.unwrap()).unwrap(),\n            String::from_utf8(key2.unwrap()).unwrap()\n        );\n        Action::Continue\n    }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/shared_data_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm shared key-value store test\"\nname = \"shared_data_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/shared_data_rust/src/lib.rs",
    "content": "use log::{debug, info, warn};\nuse proxy_wasm::traits::{Context, RootContext};\nuse proxy_wasm::types::*;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_root_context(|_| -> Box<dyn RootContext> { Box::new(TestRoot) });\n}\n\nstruct TestRoot;\n\nimpl Context for TestRoot {}\n\nimpl RootContext for TestRoot {\n    fn on_tick(&mut self) {\n        if self.get_shared_data(\"shared_data_key_bad\") == (None, None) {\n            debug!(\"get of bad key not found\");\n        }\n        self.set_shared_data(\"shared_data_key1\", Some(b\"shared_data_value0\"), None)\n            .unwrap();\n        self.set_shared_data(\"shared_data_key1\", Some(b\"shared_data_value1\"), None)\n            .unwrap();\n        self.set_shared_data(\"shared_data_key2\", Some(b\"shared_data_value2\"), None)\n            .unwrap();\n        if let (_, Some(cas)) = self.get_shared_data(\"shared_data_key2\") {\n            match self.set_shared_data(\n                \"shared_data_key2\",\n                Some(b\"shared_data_value3\"),\n                Some(cas + 1),\n            ) {\n                Err(Status::CasMismatch) => info!(\"set CasMismatch\"),\n                _ => panic!(),\n            };\n        }\n    }\n\n    fn on_queue_ready(&mut self, _: u32) {\n        if self.get_shared_data(\"shared_data_key_bad\") == (None, None) {\n            debug!(\"second get of bad key not found\");\n        }\n        if let (Some(value), _) = self.get_shared_data(\"shared_data_key1\") {\n            debug!(\"get 1 {}\", String::from_utf8(value).unwrap());\n        }\n        if let (Some(value), _) = self.get_shared_data(\"shared_data_key2\") {\n            warn!(\"get 2 {}\", String::from_utf8(value).unwrap());\n        }\n    }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/shared_queue_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm shared queue test\"\nname = \"shared_queue_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/shared_queue_rust/src/lib.rs",
    "content": "use log::{debug, info, warn};\nuse proxy_wasm::traits::{Context, HttpContext, RootContext};\nuse proxy_wasm::types::*;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_root_context(|_| -> Box<dyn RootContext> {\n        Box::new(TestRoot { queue_id: None })\n    });\n    proxy_wasm::set_http_context(|_, _| -> Box<dyn HttpContext> { Box::new(TestStream) });\n}\n\nstruct TestRoot {\n    queue_id: Option<u32>,\n}\n\nimpl Context for TestRoot {}\n\nimpl RootContext for TestRoot {\n    fn on_vm_start(&mut self, _: usize) -> bool {\n        self.queue_id = Some(self.register_shared_queue(\"my_shared_queue\"));\n        true\n    }\n\n    fn on_queue_ready(&mut self, queue_id: u32) {\n        if Some(queue_id) == self.queue_id {\n            info!(\"onQueueReady\");\n            match self.dequeue_shared_queue(9999999 /* bad queue_id */) {\n                Err(Status::NotFound) => warn!(\"onQueueReady bad token not found\"),\n                _ => (),\n            }\n            if let Some(value) = self.dequeue_shared_queue(queue_id).unwrap() {\n                debug!(\"data {} Ok\", String::from_utf8(value).unwrap());\n            }\n            if self.dequeue_shared_queue(queue_id).unwrap().is_none() {\n                warn!(\"onQueueReady extra data not found\");\n            }\n        }\n    }\n}\n\nstruct TestStream;\n\nimpl Context for TestStream {}\n\nimpl HttpContext for TestStream {\n    fn on_http_request_headers(&mut self, _: usize) -> Action {\n        if self\n            .resolve_shared_queue(\"vm_id\", \"bad_shared_queue\")\n            .is_none()\n        {\n            warn!(\"onRequestHeaders not found bad_shared_queue\");\n        }\n        if let Some(queue_id) = self.resolve_shared_queue(\"vm_id\", \"my_shared_queue\") {\n            self.enqueue_shared_queue(queue_id, Some(b\"data1\")).unwrap();\n            warn!(\"onRequestHeaders enqueue Ok\");\n        }\n        Action::Continue\n    }\n}\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test.proto",
    "content": "syntax = \"proto3\";\n\npackage wasmtest;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\nmessage TestProto {\n  uint64 i = 1;\n  double j = 2;\n  bool k = 3;\n  string s = 4;\n  google.protobuf.Timestamp t = 5;\n  google.protobuf.Any a = 6;\n  TestProto b = 7;\n  repeated string l = 8;\n  map<string, string> m = 9;\n};\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_async_call_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <memory>\n#include <string>\n#include <unordered_map>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_lite.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(HttpWasmTestCpp)\n\nclass AsyncCallContext : public Context {\npublic:\n  explicit AsyncCallContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n};\n\nclass AsyncCallRootContext : public RootContext {\npublic:\n  explicit AsyncCallRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {}\n};\n\nstatic RegisterContextFactory register_AsyncCallContext(CONTEXT_FACTORY(AsyncCallContext),\n                                                        ROOT_FACTORY(AsyncCallRootContext),\n                                                        \"async_call\");\n\nFilterHeadersStatus AsyncCallContext::onRequestHeaders(uint32_t, bool end_of_stream) {\n  auto context_id = id();\n  auto callback = [context_id](uint32_t, size_t body_size, uint32_t) {\n    if (body_size == 0) {\n      logInfo(\"async_call failed\");\n      return;\n    }\n    auto response_headers = getHeaderMapPairs(WasmHeaderMapType::HttpCallResponseHeaders);\n    // Switch context after getting headers, but before getting body to exercise both code paths.\n    getContext(context_id)->setEffectiveContext();\n    auto body = getBufferBytes(WasmBufferType::HttpCallResponseBody, 0, body_size);\n    auto response_trailers = getHeaderMapPairs(WasmHeaderMapType::HttpCallResponseTrailers);\n    for (auto& p : response_headers->pairs()) {\n      logInfo(std::string(p.first) + std::string(\" -> \") + std::string(p.second));\n    }\n    logDebug(std::string(body->view()));\n    for (auto& p : response_trailers->pairs()) {\n      logWarn(std::string(p.first) + std::string(\" -> \") + std::string(p.second));\n    }\n  };\n  if (end_of_stream) {\n    if (root()->httpCall(\"cluster\", {{\":method\", \"POST\"}, {\":path\", \"/\"}, {\":authority\", \"foo\"}},\n                         \"hello world\", {{\"trail\", \"cow\"}}, 1000, callback) == WasmResult::Ok) {\n      logError(\"expected failure did not\");\n    }\n    return FilterHeadersStatus::Continue;\n  }\n  if (root()->httpCall(\"bogus cluster\",\n                       {{\":method\", \"POST\"}, {\":path\", \"/\"}, {\":authority\", \"foo\"}}, \"hello world\",\n                       {{\"trail\", \"cow\"}}, 1000, callback) == WasmResult::Ok) {\n    logError(\"bogus cluster found error\");\n  }\n  if (root()->httpCall(\"cluster\", {{\":method\", \"POST\"}, {\":path\", \"/\"}, {\":authority\", \"foo\"}},\n                       \"hello world\", {{\"trail\", \"cow\"}}, 0xFFFFFFFF, callback) == WasmResult::Ok) {\n    logError(\"bogus timeout accepted error\");\n  }\n  if (root()->httpCall(\"cluster\", {{\":method\", \"POST\"}, {\":authority\", \"foo\"}}, \"hello world\",\n                       {{\"trail\", \"cow\"}}, 1000, callback) == WasmResult::Ok) {\n    logError(\"emissing path accepted error\");\n  }\n  root()->httpCall(\"cluster\", {{\":method\", \"POST\"}, {\":path\", \"/\"}, {\":authority\", \"foo\"}},\n                   \"hello world\", {{\"trail\", \"cow\"}}, 1000, callback);\n  logInfo(\"onRequestHeaders\");\n  return FilterHeadersStatus::StopIteration;\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_body_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <memory>\n#include <string>\n#include <unordered_map>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_lite.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(HttpWasmTestCpp)\n\nclass BodyRootContext : public RootContext {\npublic:\n  explicit BodyRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {}\n};\n\nclass BodyContext : public Context {\npublic:\n  explicit BodyContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n  FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) override;\n  FilterHeadersStatus onResponseHeaders(uint32_t, bool) override;\n  FilterDataStatus onResponseBody(size_t body_buffer_length, bool end_of_stream) override;\n\nprivate:\n  BodyRootContext* root() { return static_cast<BodyRootContext*>(Context::root()); }\n  static void logBody(WasmBufferType type);\n  FilterDataStatus onBody(WasmBufferType type, size_t buffer_length, bool end);\n  std::string body_op_;\n  int num_chunks_ = 0;\n};\n\nstatic RegisterContextFactory register_BodyContext(CONTEXT_FACTORY(BodyContext),\n                                                   ROOT_FACTORY(BodyRootContext), \"body\");\n\nvoid BodyContext::logBody(WasmBufferType type) {\n  size_t buffered_size;\n  uint32_t flags;\n  getBufferStatus(type, &buffered_size, &flags);\n  auto body = getBufferBytes(type, 0, buffered_size);\n  logError(std::string(\"onBody \") + std::string(body->view()));\n}\n\nFilterDataStatus BodyContext::onBody(WasmBufferType type, size_t buffer_length,\n                                     bool end_of_stream) {\n  size_t size;\n  uint32_t flags;\n  if (body_op_ == \"ReadBody\") {\n    auto body = getBufferBytes(type, 0, buffer_length);\n    logError(\"onBody \" + std::string(body->view()));\n\n  } else if (body_op_ == \"PrependAndAppendToBody\") {\n    setBuffer(WasmBufferType::HttpRequestBody, 0, 0, \"prepend.\");\n    getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags);\n    setBuffer(WasmBufferType::HttpRequestBody, size, 0, \".append\");\n    getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags);\n    auto updated = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size);\n    logError(\"onBody \" + std::string(updated->view()));\n    return FilterDataStatus::StopIterationNoBuffer;\n  } else if (body_op_ == \"ReplaceBody\") {\n    setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, \"replace\");\n    getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags);\n    auto replaced = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size);\n    logError(\"onBody \" + std::string(replaced->view()));\n    return FilterDataStatus::StopIterationAndWatermark;\n  } else if (body_op_ == \"RemoveBody\") {\n    setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, \"\");\n    getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags);\n    auto erased = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size);\n    logError(\"onBody \" + std::string(erased->view()));\n\n  } else if (body_op_ == \"BufferBody\") {\n    logBody(type);\n    return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer;\n\n  } else if (body_op_ == \"PrependAndAppendToBufferedBody\") {\n    setBuffer(WasmBufferType::HttpRequestBody, 0, 0, \"prepend.\");\n    getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags);\n    setBuffer(WasmBufferType::HttpRequestBody, size, 0, \".append\");\n    logBody(type);\n    return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer;\n\n  } else if (body_op_ == \"ReplaceBufferedBody\") {\n    setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, \"replace\");\n    getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags);\n    auto replaced = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size);\n    logBody(type);\n    return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer;\n\n  } else if (body_op_ == \"RemoveBufferedBody\") {\n    setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, \"\");\n    getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags);\n    auto erased = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size);\n    logBody(type);\n    return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer;\n\n  } else if (body_op_ == \"BufferTwoBodies\") {\n    logBody(type);\n    num_chunks_++;\n    if (end_of_stream || num_chunks_ > 2) {\n      return FilterDataStatus::Continue;\n    }\n    return FilterDataStatus::StopIterationAndBuffer;\n\n  } else {\n    // This is a test and the test was configured incorrectly.\n    logError(\"Invalid body test op \" + body_op_);\n    abort();\n  }\n  return FilterDataStatus::Continue;\n}\n\nFilterHeadersStatus BodyContext::onRequestHeaders(uint32_t, bool) {\n  body_op_ = getRequestHeader(\"x-test-operation\")->toString();\n  setRequestHeaderPairs({{\"a\", \"a\"}, {\"b\", \"b\"}});\n  return FilterHeadersStatus::Continue;\n}\n\nFilterHeadersStatus BodyContext::onResponseHeaders(uint32_t, bool) {\n  body_op_ = getResponseHeader(\"x-test-operation\")->toString();\n  CHECK_RESULT(replaceResponseHeader(\"x-test-operation\", body_op_));\n  return FilterHeadersStatus::Continue;\n}\n\nFilterDataStatus BodyContext::onRequestBody(size_t body_buffer_length, bool end_of_stream) {\n  return onBody(WasmBufferType::HttpRequestBody, body_buffer_length, end_of_stream);\n}\n\nFilterDataStatus BodyContext::onResponseBody(size_t body_buffer_length, bool end_of_stream) {\n  return onBody(WasmBufferType::HttpResponseBody, body_buffer_length, end_of_stream);\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include \"test/extensions/filters/http/wasm/test_data/test.pb.h\"\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_lite.h\"\n#include \"source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h\"\n#include \"source/extensions/common/wasm/ext/declare_property.pb.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#include \"absl/base/casts.h\"\n#endif\n\nSTART_WASM_PLUGIN(HttpWasmTestCpp)\n\n#include \"contrib/proxy_expr.h\"\n\nclass TestRootContext : public RootContext {\npublic:\n  explicit TestRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {}\n\n  bool onStart(size_t configuration_size) override;\n  void onTick() override;\n  bool onConfigure(size_t) override;\n\n  std::string test_;\n  uint32_t stream_context_id_;\n};\n\nclass TestContext : public Context {\npublic:\n  explicit TestContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n  FilterTrailersStatus onRequestTrailers(uint32_t) override;\n  FilterTrailersStatus onResponseTrailers(uint32_t) override;\n  FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) override;\n  void onLog() override;\n  void onDone() override;\n\nprivate:\n  TestRootContext* root() { return static_cast<TestRootContext*>(Context::root()); }\n};\n\nstatic RegisterContextFactory register_TestContext(CONTEXT_FACTORY(TestContext),\n                                                   ROOT_FACTORY(TestRootContext));\n\nbool TestRootContext::onStart(size_t configuration_size) {\n  test_ = getBufferBytes(WasmBufferType::VmConfiguration, 0, configuration_size)->toString();\n  return true;\n}\n\nbool TestRootContext::onConfigure(size_t) {\n  if (test_ == \"property\") {\n    {\n      // Many properties are not available in the root context.\n      const std::vector<std::string> properties = {\n          \"string_state\",     \"metadata\",   \"request\",        \"response\",    \"connection\",\n          \"connection_id\",    \"upstream\",   \"source\",         \"destination\", \"cluster_name\",\n          \"cluster_metadata\", \"route_name\", \"route_metadata\",\n      };\n      for (const auto& property : properties) {\n        if (getProperty({property}).has_value()) {\n          logWarn(\"getProperty should not return a value in the root context\");\n        }\n      }\n    }\n    {\n      // Some properties are defined in the root context.\n      std::vector<std::pair<std::vector<std::string>, std::string>> properties = {\n          {{\"plugin_name\"}, \"plugin_name\"},\n          {{\"plugin_vm_id\"}, \"vm_id\"},\n          {{\"listener_direction\"}, std::string(\"\\x1\\0\\0\\0\\0\\0\\0\\0\\0\", 8)}, // INBOUND\n          {{\"listener_metadata\"}, \"\"},\n      };\n      for (const auto& property : properties) {\n        std::string value;\n        if (!getValue(property.first, &value)) {\n          logWarn(\"getValue should provide a value in the root context: \" + property.second);\n        }\n        if (value != property.second) {\n          logWarn(\"getValue returned \" + value + \", expect \" + property.second);\n        }\n      }\n    }\n  }\n  return true;\n}\n\nFilterHeadersStatus TestContext::onRequestHeaders(uint32_t, bool) {\n  root()->stream_context_id_ = id();\n  auto test = root()->test_;\n  if (test == \"headers\") {\n    logDebug(std::string(\"onRequestHeaders \") + std::to_string(id()) + std::string(\" \") + test);\n    auto path = getRequestHeader(\":path\");\n    logInfo(std::string(\"header path \") + std::string(path->view()));\n    std::string protocol;\n    addRequestHeader(\"newheader\", \"newheadervalue\");\n    auto server = getRequestHeader(\"server\");\n    replaceRequestHeader(\"server\", \"envoy-wasm\");\n    auto r = addResponseHeader(\"bad\", \"bad\");\n    if (r != WasmResult::BadArgument) {\n      logWarn(\"unexpected success of addResponseHeader\");\n    }\n    if (addResponseTrailer(\"bad\", \"bad\") != WasmResult::BadArgument) {\n      logWarn(\"unexpected success of addResponseTrailer\");\n    }\n    if (removeResponseTrailer(\"bad\") != WasmResult::BadArgument) {\n      logWarn(\"unexpected success of remoteResponseTrailer\");\n    }\n    size_t size;\n    if (getRequestHeaderSize(&size) != WasmResult::Ok) {\n      logWarn(\"unexpected failure of getRequestHeaderMapSize\");\n    }\n    if (getResponseHeaderSize(&size) != WasmResult::BadArgument) {\n      logWarn(\"unexpected success of getResponseHeaderMapSize\");\n    }\n    if (server->view() == \"envoy-wasm-pause\") {\n      return FilterHeadersStatus::StopIteration;\n    } else if (server->view() == \"envoy-wasm-end-stream\") {\n      return FilterHeadersStatus::ContinueAndEndStream;\n    } else if (server->view() == \"envoy-wasm-stop-buffer\") {\n      return FilterHeadersStatus::StopAllIterationAndBuffer;\n    } else if (server->view() == \"envoy-wasm-stop-watermark\") {\n      return FilterHeadersStatus::StopAllIterationAndWatermark;\n    } else {\n      return FilterHeadersStatus::Continue;\n    }\n  } else if (test == \"metadata\") {\n    std::string value;\n    if (!getValue({\"node\", \"metadata\", \"wasm_node_get_key\"}, &value)) {\n      logDebug(\"missing node metadata\");\n    }\n    auto r = setFilterStateStringValue(\"wasm_request_set_key\", \"wasm_request_set_value\");\n    if (r != WasmResult::Ok) {\n      logDebug(toString(r));\n    }\n    auto path = getRequestHeader(\":path\");\n    logInfo(std::string(\"header path \") + path->toString());\n    addRequestHeader(\"newheader\", \"newheadervalue\");\n    replaceRequestHeader(\"server\", \"envoy-wasm\");\n\n    {\n      const std::string expr = R\"(\"server is \" + request.headers[\"server\"])\";\n      uint32_t token = 0;\n      if (WasmResult::Ok != createExpression(expr, &token)) {\n        logError(\"expr_create error\");\n      } else {\n        std::string eval_result;\n        if (!evaluateExpression(token, &eval_result)) {\n          logError(\"expr_eval error\");\n        } else {\n          logInfo(eval_result);\n        }\n        if (WasmResult::Ok != exprDelete(token)) {\n          logError(\"failed to delete an expression\");\n        }\n      }\n    }\n\n    {\n      // Validate a valid CEL expression\n      const std::string expr = R\"(\n  envoy.api.v2.core.GrpcService{\n    envoy_grpc: envoy.api.v2.core.GrpcService.EnvoyGrpc {\n      cluster_name: \"test\"\n    }\n  })\";\n      uint32_t token = 0;\n      if (WasmResult::Ok != createExpression(expr, &token)) {\n        logError(\"expr_create error\");\n      } else {\n        GrpcService eval_result;\n        if (!evaluateMessage(token, &eval_result)) {\n          logError(\"expr_eval error\");\n        } else {\n          logInfo(\"grpc service: \" + eval_result.envoy_grpc().cluster_name());\n        }\n        if (WasmResult::Ok != exprDelete(token)) {\n          logError(\"failed to delete an expression\");\n        }\n      }\n    }\n\n    {\n      // Create a syntactically wrong CEL expression\n      uint32_t token = 0;\n      if (createExpression(\"/ /\", &token) != WasmResult::BadArgument) {\n        logError(\"expect an error on a syntactically wrong expressions\");\n      }\n    }\n\n    {\n      // Create an invalid CEL expression\n      uint32_t token = 0;\n      if (createExpression(\"_&&_(a, b, c)\", &token) != WasmResult::BadArgument) {\n        logError(\"expect an error on invalid expressions\");\n      }\n    }\n\n    {\n      // Evaluate a bad token\n      std::string result;\n      uint64_t token = 0;\n      if (evaluateExpression(token, &result)) {\n        logError(\"expect an error on invalid token in evaluate\");\n      }\n    }\n\n    {\n      // Evaluate a missing token\n      std::string result;\n      uint32_t token = 0xFFFFFFFF;\n      if (evaluateExpression(token, &result)) {\n        logError(\"expect an error on unknown token in evaluate\");\n      }\n      // Delete a missing token\n      if (exprDelete(token) != WasmResult::Ok) {\n        logError(\"expect no error on unknown token in delete expression\");\n      }\n    }\n\n    {\n      // Evaluate two expressions to an error\n      uint32_t token1 = 0;\n      if (createExpression(\"1/0\", &token1) != WasmResult::Ok) {\n        logError(\"unexpected error on division by zero expression\");\n      }\n      uint32_t token2 = 0;\n      if (createExpression(\"request.duration.size\", &token2) != WasmResult::Ok) {\n        logError(\"unexpected error on integer field access expression\");\n      }\n      std::string result;\n      if (evaluateExpression(token1, &result)) {\n        logError(\"expect an error on division by zero\");\n      }\n      if (evaluateExpression(token2, &result)) {\n        logError(\"expect an error on integer field access expression\");\n      }\n      if (exprDelete(token1) != WasmResult::Ok) {\n        logError(\"failed to delete an expression\");\n      }\n      if (exprDelete(token2) != WasmResult::Ok) {\n        logError(\"failed to delete an expression\");\n      }\n    }\n\n    {\n      int64_t dur;\n      if (getValue({\"request\", \"duration\"}, &dur)) {\n        logInfo(\"duration is \" + std::to_string(dur));\n      } else {\n        logError(\"failed to get request duration\");\n      }\n    }\n\n    return FilterHeadersStatus::Continue;\n  }\n  return FilterHeadersStatus::Continue;\n}\n\nFilterTrailersStatus TestContext::onRequestTrailers(uint32_t) {\n  auto request_trailer = getRequestTrailer(\"bogus-trailer\");\n  if (request_trailer && request_trailer->view() != \"\") {\n    logWarn(\"request bogus-trailer found\");\n  }\n  CHECK_RESULT(replaceRequestTrailer(\"new-trailer\", \"value\"));\n  CHECK_RESULT(removeRequestTrailer(\"x\"));\n  // Not available yet.\n  replaceResponseTrailer(\"new-trailer\", \"value\");\n  auto response_trailer = getResponseTrailer(\"bogus-trailer\");\n  if (response_trailer && response_trailer->view() != \"\") {\n    logWarn(\"request bogus-trailer found\");\n  }\n  return FilterTrailersStatus::Continue;\n}\n\nFilterTrailersStatus TestContext::onResponseTrailers(uint32_t) {\n  auto value = getResponseTrailer(\"bogus-trailer\");\n  if (value && value->view() != \"\") {\n    logWarn(\"response bogus-trailer found\");\n  }\n  CHECK_RESULT(replaceResponseTrailer(\"new-trailer\", \"value\"));\n  return FilterTrailersStatus::StopIteration;\n}\n\nFilterDataStatus TestContext::onRequestBody(size_t body_buffer_length, bool) {\n  auto test = root()->test_;\n  if (test == \"headers\") {\n    auto body = getBufferBytes(WasmBufferType::HttpRequestBody, 0, body_buffer_length);\n    logError(std::string(\"onBody \") + std::string(body->view()));\n  } else if (test == \"metadata\") {\n    std::string value;\n    if (!getValue({\"node\", \"metadata\", \"wasm_node_get_key\"}, &value)) {\n      logDebug(\"missing node metadata\");\n    }\n    logError(std::string(\"onBody \") + value);\n    std::string request_string;\n    std::string request_string2;\n    if (!getValue(\n            {\"metadata\", \"filter_metadata\", \"envoy.filters.http.wasm\", \"wasm_request_get_key\"},\n            &request_string)) {\n      logDebug(\"missing request metadata\");\n    }\n    if (!getValue(\n            {\"metadata\", \"filter_metadata\", \"envoy.filters.http.wasm\", \"wasm_request_get_key\"},\n            &request_string2)) {\n      logDebug(\"missing request metadata\");\n    }\n    logTrace(std::string(\"Struct \") + request_string + \" \" + request_string2);\n    return FilterDataStatus::Continue;\n  }\n  return FilterDataStatus::Continue;\n}\n\nvoid TestContext::onLog() {\n  auto test = root()->test_;\n  if (test == \"headers\") {\n    auto path = getRequestHeader(\":path\");\n    logWarn(\"onLog \" + std::to_string(id()) + \" \" + std::string(path->view()));\n    auto response_header = getResponseHeader(\"bogus-header\");\n    if (response_header && response_header->view() != \"\") {\n      logWarn(\"response bogus-header found\");\n    }\n    auto response_trailer = getResponseTrailer(\"bogus-trailer\");\n    if (response_trailer && response_trailer->view() != \"\") {\n      logWarn(\"response bogus-trailer found\");\n    }\n  } else if (test == \"property\") {\n    setFilterState(\"wasm_state\", \"wasm_value\");\n    auto path = getRequestHeader(\":path\");\n    if (path->view() == \"/test_context\") {\n      logWarn(\"request.path: \" + getProperty({\"request\", \"path\"}).value()->toString());\n      logWarn(\"node.metadata: \" +\n              getProperty({\"node\", \"metadata\", \"istio.io/metadata\"}).value()->toString());\n      logWarn(\"metadata: \" + getProperty({\"metadata\", \"filter_metadata\", \"envoy.filters.http.wasm\",\n                                          \"wasm_request_get_key\"})\n                                 .value()\n                                 ->toString());\n      int64_t responseCode;\n      if (getValue({\"response\", \"code\"}, &responseCode)) {\n        logWarn(\"response.code: \" + std::to_string(responseCode));\n      }\n      logWarn(\"state: \" + getProperty({\"wasm_state\"}).value()->toString());\n    } else {\n      logWarn(\"onLog \" + std::to_string(id()) + \" \" + std::string(path->view()));\n    }\n\n    // Wasm state property set and read validation for {i: 1337}\n    // Generated using the following input.json:\n    // {\n    //   \"i\": 1337\n    // }\n    // flatc -b schema.fbs input.json\n    {\n      static const char data[24] = {0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00,\n                                    0x0c, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,\n                                    0x39, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};\n      if (WasmResult::Ok != setFilterState(\"structured_state\", std::string_view(data, 24))) {\n        logWarn(\"setProperty(structured_state) failed\");\n      }\n      int64_t value = 0;\n      if (!getValue({\"structured_state\", \"i\"}, &value)) {\n        logWarn(\"getProperty(structured_state) failed\");\n      }\n      if (value != 1337) {\n        logWarn(\"getProperty(structured_state) returned \" + std::to_string(value));\n      }\n      std::string buffer;\n      if (!getValue({\"structured_state\"}, &buffer)) {\n        logWarn(\"getValue for structured_state should not fail\");\n      }\n      if (buffer.size() != 24) {\n        logWarn(\"getValue for structured_state should return the buffer\");\n      }\n    }\n    {\n      if (setFilterState(\"string_state\", \"unicorns\") != WasmResult::Ok) {\n        logWarn(\"setProperty(string_state) failed\");\n      }\n      std::string value;\n      if (!getValue({\"string_state\"}, &value)) {\n        logWarn(\"getProperty(string_state) failed\");\n      }\n      if (value != \"unicorns\") {\n        logWarn(\"getProperty(string_state) returned \" + value);\n      }\n    }\n    {\n      // access via \"filter_state\" property\n      std::string value;\n      if (!getValue({\"filter_state\", \"wasm.string_state\"}, &value)) {\n        logWarn(\"accessing via filter_state failed\");\n      }\n      if (value != \"unicorns\") {\n        logWarn(\"unexpected value: \" + value);\n      }\n    }\n    {\n      // attempt to write twice for a read only wasm state\n      if (setFilterState(\"string_state\", \"ponies\") == WasmResult::Ok) {\n        logWarn(\"expected second setProperty(string_state) to fail\");\n      }\n      std::string value;\n      if (!getValue({\"string_state\"}, &value)) {\n        logWarn(\"getProperty(string_state) failed\");\n      }\n      if (value != \"unicorns\") {\n        logWarn(\"getProperty(string_state) returned \" + value);\n      }\n    }\n    {\n      if (setFilterState(\"bytes_state\", \"ponies\") != WasmResult::Ok) {\n        logWarn(\"setProperty(bytes_state) failed\");\n      }\n      std::string value;\n      if (!getValue({\"bytes_state\"}, &value)) {\n        logWarn(\"getProperty(bytes_state) failed\");\n      }\n      if (value != \"ponies\") {\n        logWarn(\"getProperty(bytes_state) returned \" + value);\n      }\n    }\n    {\n      wasmtest::TestProto test_proto;\n      uint32_t i = 53;\n      test_proto.set_i(i);\n      double j = 13.0;\n      test_proto.set_j(j);\n      bool k = true;\n      test_proto.set_k(k);\n      std::string s = \"centaur\";\n      test_proto.set_s(s);\n      test_proto.mutable_t()->set_seconds(2);\n      test_proto.mutable_t()->set_nanos(3);\n      test_proto.add_l(\"abc\");\n      test_proto.add_l(\"xyz\");\n      (*test_proto.mutable_m())[\"a\"] = \"b\";\n\n      // validate setting a filter state\n      std::string in;\n      test_proto.SerializeToString(&in);\n      if (setFilterState(\"protobuf_state\", in) != WasmResult::Ok) {\n        logWarn(\"setProperty(protobuf_state) failed\");\n      }\n      // validate uint field\n      uint64_t i2;\n      if (!getValue({\"protobuf_state\", \"i\"}, &i2) || i2 != i) {\n        logWarn(\"uint field returned \" + std::to_string(i2));\n      }\n\n      // validate double field\n      double j2;\n      if (!getValue({\"protobuf_state\", \"j\"}, &j2) || j2 != j) {\n        logWarn(\"double field returned \" + std::to_string(j2));\n      }\n\n      // validate bool field\n      bool k2;\n      if (!getValue({\"protobuf_state\", \"k\"}, &k2) || k2 != k) {\n        logWarn(\"bool field returned \" + std::to_string(k2));\n      }\n\n      // validate string field\n      std::string s2;\n      if (!getValue({\"protobuf_state\", \"s\"}, &s2) || s2 != s) {\n        logWarn(\"string field returned \" + s2);\n      }\n\n      // validate timestamp field\n      int64_t t;\n      if (!getValue({\"protobuf_state\", \"t\"}, &t) || t != 2000000003ull) {\n        logWarn(\"timestamp field returned \" + std::to_string(t));\n      }\n\n      // validate malformed field\n      std::string a;\n      if (getValue({\"protobuf_state\", \"a\"}, &a)) {\n        logWarn(\"expect serialization error for malformed type_url string, got \" + a);\n      }\n\n      // validate null field\n      std::string b;\n      if (!getValue({\"protobuf_state\", \"b\"}, &b) || b != \"\") {\n        logWarn(\"null field returned \" + b);\n      }\n\n      // validate list field\n      auto l = getProperty({\"protobuf_state\", \"l\"});\n      if (l.has_value()) {\n        auto pairs = l.value()->pairs();\n        if (pairs.size() != 2 || pairs[0].first != \"abc\" || pairs[1].first != \"xyz\") {\n          logWarn(\"list field did not return the expected value\");\n        }\n      } else {\n        logWarn(\"list field returned none\");\n      }\n\n      // validate map field\n      auto m = getProperty({\"protobuf_state\", \"m\"});\n      if (m.has_value()) {\n        auto pairs = m.value()->pairs();\n        if (pairs.size() != 1 || pairs[0].first != \"a\" || pairs[0].second != \"b\") {\n          logWarn(\"map field did not return the expected value: \" + std::to_string(pairs.size()));\n        }\n      } else {\n        logWarn(\"map field returned none\");\n      }\n\n      // validate entire message\n      std::string buffer;\n      if (!getValue({\"protobuf_state\"}, &buffer)) {\n        logWarn(\"getValue for protobuf_state should not fail\");\n      }\n      if (buffer.size() != in.size()) {\n        logWarn(\"getValue for protobuf_state should return the buffer\");\n      }\n    }\n    {\n      // Some properties are not available in the stream context.\n      const std::vector<std::string> properties = {\"xxx\", \"request\", \"route_name\", \"node\"};\n      for (const auto& property : properties) {\n        if (getProperty({property, \"xxx\"}).has_value()) {\n          logWarn(\"getProperty should not return a value in the root context\");\n        }\n      }\n    }\n    {\n      // Some properties are defined in the stream context.\n      std::vector<std::pair<std::vector<std::string>, std::string>> properties = {\n          {{\"plugin_name\"}, \"plugin_name\"},\n          {{\"plugin_vm_id\"}, \"vm_id\"},\n          {{\"listener_direction\"}, std::string(\"\\x1\\0\\0\\0\\0\\0\\0\\0\\0\", 8)}, // INBOUND\n          {{\"listener_metadata\"}, \"\"},\n          {{\"route_name\"}, \"route12\"},\n          {{\"cluster_name\"}, \"fake_cluster\"},\n          {{\"connection_id\"}, std::string(\"\\x4\\0\\0\\0\\0\\0\\0\\0\\0\", 8)},\n          {{\"connection\", \"requested_server_name\"}, \"w3.org\"},\n          {{\"source\", \"address\"}, \"127.0.0.1:0\"},\n          {{\"destination\", \"address\"}, \"127.0.0.2:0\"},\n          {{\"upstream\", \"address\"}, \"10.0.0.1:443\"},\n          {{\"cluster_metadata\"}, \"\"},\n          {{\"route_metadata\"}, \"\"},\n      };\n      for (const auto& property : properties) {\n        std::string value;\n        if (!getValue(property.first, &value)) {\n          logWarn(\"getValue should provide a value in the root context: \" + property.second);\n        }\n        if (value != property.second) {\n          logWarn(\"getValue returned \" + value + \", expect \" + property.second);\n        }\n      }\n    }\n  }\n}\n\nvoid TestContext::onDone() {\n  auto test = root()->test_;\n  if (test == \"headers\") {\n    logWarn(\"onDone \" + std::to_string(id()));\n  }\n}\n\nvoid TestRootContext::onTick() {\n  if (test_ == \"headers\") {\n    getContext(stream_context_id_)->setEffectiveContext();\n    replaceRequestHeader(\"server\", \"envoy-wasm-continue\");\n    continueRequest();\n    if (getBufferBytes(WasmBufferType::PluginConfiguration, 0, 1)->view() != \"\") {\n      logDebug(\"unexpectd success of getBufferBytes PluginConfiguration\");\n    }\n  } else if (test_ == \"metadata\") {\n    std::string value;\n    if (!getValue({\"node\", \"metadata\", \"wasm_node_get_key\"}, &value)) {\n      logDebug(\"missing node metadata\");\n    }\n    logDebug(std::string(\"onTick \") + value);\n  } else if (test_ == \"property\") {\n    uint64_t t;\n    if (WasmResult::Ok != proxy_get_current_time_nanoseconds(&t)) {\n      logError(std::string(\"bad proxy_get_current_time_nanoseconds result\"));\n    }\n    std::string function = \"declare_property\";\n    {\n      envoy::source::extensions::common::wasm::DeclarePropertyArguments args;\n      args.set_name(\"structured_state\");\n      args.set_type(envoy::source::extensions::common::wasm::WasmType::FlatBuffers);\n      args.set_span(envoy::source::extensions::common::wasm::LifeSpan::DownstreamConnection);\n      // Reflection flatbuffer for a simple table {i : int64}.\n      // Generated using the following schema.fbs:\n      //\n      // namespace Wasm.Common;\n      // table T {\n      //   i: int64;\n      // }\n      // root_type T;\n      //\n      // flatc --cpp --bfbs-gen-embed schema.fbs\n      static const char bfbsData[192] = {\n          0x18, 0x00, 0x00, 0x00, 0x42, 0x46, 0x42, 0x53, 0x10, 0x00, 0x1C, 0x00, 0x04, 0x00, 0x08,\n          0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, 0x18, 0x00, 0x10, 0x00, 0x00, 0x00, 0x30, 0x00,\n          0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x34,\n          0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n          0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n          0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x04, 0x00,\n          0x08, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08,\n          0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,\n          0x0D, 0x00, 0x00, 0x00, 0x57, 0x61, 0x73, 0x6D, 0x2E, 0x43, 0x6F, 0x6D, 0x6D, 0x6F, 0x6E,\n          0x2E, 0x54, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x12, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00,\n          0x06, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x0C,\n          0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00,\n          0x00, 0x00, 0x00, 0x09, 0x01, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00};\n      args.set_schema(bfbsData, 192);\n      std::string in;\n      args.SerializeToString(&in);\n      char* out = nullptr;\n      size_t out_size = 0;\n      if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(),\n                                                        in.size(), &out, &out_size)) {\n        logError(\"declare_property failed for flatbuffers\");\n      }\n      ::free(out);\n    }\n    {\n      envoy::source::extensions::common::wasm::DeclarePropertyArguments args;\n      args.set_name(\"string_state\");\n      args.set_type(envoy::source::extensions::common::wasm::WasmType::String);\n      args.set_span(envoy::source::extensions::common::wasm::LifeSpan::FilterChain);\n      args.set_readonly(true);\n      std::string in;\n      args.SerializeToString(&in);\n      char* out = nullptr;\n      size_t out_size = 0;\n      if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(),\n                                                        in.size(), &out, &out_size)) {\n        logError(\"declare_property failed for strings\");\n      }\n      ::free(out);\n    }\n    {\n      envoy::source::extensions::common::wasm::DeclarePropertyArguments args;\n      args.set_name(\"bytes_state\");\n      args.set_type(envoy::source::extensions::common::wasm::WasmType::Bytes);\n      args.set_span(envoy::source::extensions::common::wasm::LifeSpan::DownstreamRequest);\n      std::string in;\n      args.SerializeToString(&in);\n      char* out = nullptr;\n      size_t out_size = 0;\n      if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(),\n                                                        in.size(), &out, &out_size)) {\n        logError(\"declare_property failed for bytes\");\n      }\n      ::free(out);\n    }\n    {\n      // double declaration of \"bytes_state\" should return BAD_ARGUMENT\n      envoy::source::extensions::common::wasm::DeclarePropertyArguments args;\n      args.set_name(\"bytes_state\");\n      std::string in;\n      args.SerializeToString(&in);\n      char* out = nullptr;\n      size_t out_size = 0;\n      if (WasmResult::BadArgument != proxy_call_foreign_function(function.data(), function.size(),\n                                                                 in.data(), in.size(), &out,\n                                                                 &out_size)) {\n        logError(\"declare_property must fail for double declaration\");\n      }\n      ::free(out);\n    }\n    {\n      envoy::source::extensions::common::wasm::DeclarePropertyArguments args;\n      args.set_name(\"protobuf_state\");\n      args.set_type(envoy::source::extensions::common::wasm::WasmType::Protobuf);\n      args.set_span(envoy::source::extensions::common::wasm::LifeSpan::DownstreamRequest);\n      args.set_schema(\"type.googleapis.com/wasmtest.TestProto\");\n      std::string in;\n      args.SerializeToString(&in);\n      char* out = nullptr;\n      size_t out_size = 0;\n      if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(),\n                                                        in.size(), &out, &out_size)) {\n        logError(\"declare_property failed for protobuf\");\n      }\n      ::free(out);\n    }\n    {\n      char* out = nullptr;\n      size_t out_size = 0;\n      if (WasmResult::Ok == proxy_call_foreign_function(function.data(), function.size(),\n                                                        function.data(), function.size(), &out,\n                                                        &out_size)) {\n        logError(\"expected declare_property to fail\");\n      }\n      ::free(out);\n    }\n    {\n      // setting a filter state in root context returns NOT_FOUND\n      if (setFilterState(\"string_state\", \"unicorns\") != WasmResult::NotFound) {\n        logWarn(\"setProperty(string_state) should fail in root context\");\n      }\n    }\n  }\n}\n\nclass Context1 : public Context {\npublic:\n  Context1(uint32_t id, RootContext* root) : Context(id, root) {}\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n};\n\nclass Context2 : public Context {\npublic:\n  Context2(uint32_t id, RootContext* root) : Context(id, root) {}\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n};\n\nstatic RegisterContextFactory register_Context1(CONTEXT_FACTORY(Context1), \"context1\");\nstatic RegisterContextFactory register_Contxt2(CONTEXT_FACTORY(Context2), \"context2\");\n\nFilterHeadersStatus Context1::onRequestHeaders(uint32_t, bool) {\n  logDebug(std::string(\"onRequestHeaders1 \") + std::to_string(id()));\n  return FilterHeadersStatus::Continue;\n}\n\nFilterHeadersStatus Context2::onRequestHeaders(uint32_t, bool) {\n  logDebug(std::string(\"onRequestHeaders2 \") + std::to_string(id()));\n  CHECK_RESULT(sendLocalResponse(200, \"ok\", \"body\", {{\"foo\", \"bar\"}}));\n  return FilterHeadersStatus::Continue;\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace HttpWasmTestCpp {\nNullPluginRegistry* context_registry_;\n} // namespace HttpWasmTestCpp\n\nRegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin(\"HttpWasmTestCpp\", []() {\n  return std::make_unique<NullPlugin>(HttpWasmTestCpp::context_registry_);\n});\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <memory>\n#include <string>\n#include <unordered_map>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_lite.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(HttpWasmTestCpp)\n\nclass MyGrpcCallHandler : public GrpcCallHandler<google::protobuf::Value> {\npublic:\n  MyGrpcCallHandler() : GrpcCallHandler<google::protobuf::Value>() {}\n  void onSuccess(size_t body_size) override {\n    auto response = getBufferBytes(WasmBufferType::GrpcReceiveBuffer, 0, body_size);\n    logDebug(response->proto<google::protobuf::Value>().string_value());\n    cancel();\n  }\n  void onFailure(GrpcStatus) override {\n    auto p = getStatus();\n    logDebug(std::string(\"failure \") + std::string(p.second->view()));\n  }\n};\n\nclass GrpcCallRootContext : public RootContext {\npublic:\n  explicit GrpcCallRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {}\n\n  void onQueueReady(uint32_t op) override {\n    if (op == 0) {\n      handler_->cancel();\n    } else {\n      grpcClose(handler_->token());\n    }\n  }\n\n  MyGrpcCallHandler* handler_ = nullptr;\n};\n\nclass GrpcCallContext : public Context {\npublic:\n  explicit GrpcCallContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n\n  GrpcCallRootContext* root() { return static_cast<GrpcCallRootContext*>(Context::root()); }\n};\n\nstatic RegisterContextFactory register_GrpcCallContext(CONTEXT_FACTORY(GrpcCallContext),\n                                                       ROOT_FACTORY(GrpcCallRootContext),\n                                                       \"grpc_call\");\n\nFilterHeadersStatus GrpcCallContext::onRequestHeaders(uint32_t, bool end_of_stream) {\n  GrpcService grpc_service;\n  grpc_service.mutable_envoy_grpc()->set_cluster_name(\"cluster\");\n  std::string grpc_service_string;\n  grpc_service.SerializeToString(&grpc_service_string);\n  google::protobuf::Value value;\n  value.set_string_value(\"request\");\n  HeaderStringPairs initial_metadata;\n  root()->handler_ = new MyGrpcCallHandler();\n  if (end_of_stream) {\n    if (root()->grpcCallHandler(grpc_service_string, \"service\", \"method\", initial_metadata, value,\n                                1000, std::unique_ptr<GrpcCallHandlerBase>(root()->handler_)) ==\n        WasmResult::Ok) {\n      logError(\"expected failure did not occur\");\n    }\n    return FilterHeadersStatus::Continue;\n  }\n  root()->grpcCallHandler(grpc_service_string, \"service\", \"method\", initial_metadata, value, 1000,\n                          std::unique_ptr<GrpcCallHandlerBase>(root()->handler_));\n  if (root()->grpcCallHandler(\n          \"bogus grpc_service\", \"service\", \"method\", initial_metadata, value, 1000,\n          std::unique_ptr<GrpcCallHandlerBase>(new MyGrpcCallHandler())) == WasmResult::Ok) {\n    logError(\"bogus grpc_service accepted error\");\n  }\n  return FilterHeadersStatus::StopIteration;\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <memory>\n#include <string>\n#include <unordered_map>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_lite.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(HttpWasmTestCpp)\n\nclass GrpcStreamContext : public Context {\npublic:\n  explicit GrpcStreamContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n};\n\nclass GrpcStreamRootContext : public RootContext {\npublic:\n  explicit GrpcStreamRootContext(uint32_t id, std::string_view root_id)\n      : RootContext(id, root_id) {}\n};\n\nstatic RegisterContextFactory register_GrpcStreamContext(CONTEXT_FACTORY(GrpcStreamContext),\n                                                         ROOT_FACTORY(GrpcStreamRootContext),\n                                                         \"grpc_stream\");\nclass MyGrpcStreamHandler\n    : public GrpcStreamHandler<google::protobuf::Value, google::protobuf::Value> {\npublic:\n  MyGrpcStreamHandler() : GrpcStreamHandler<google::protobuf::Value, google::protobuf::Value>() {}\n  void onReceiveInitialMetadata(uint32_t) override {\n    auto h = getHeaderMapValue(WasmHeaderMapType::GrpcReceiveInitialMetadata, \"test\");\n    if (h->view() == \"reset\") {\n      reset();\n      return;\n    }\n    // Not Found.\n    h = getHeaderMapValue(WasmHeaderMapType::HttpCallResponseHeaders, \"foo\");\n    h = getHeaderMapValue(WasmHeaderMapType::HttpCallResponseTrailers, \"foo\");\n    addHeaderMapValue(WasmHeaderMapType::GrpcReceiveInitialMetadata, \"foo\", \"bar\");\n  }\n  void onReceive(size_t body_size) override {\n    auto response = getBufferBytes(WasmBufferType::GrpcReceiveBuffer, 0, body_size);\n    auto response_string = response->proto<google::protobuf::Value>().string_value();\n    google::protobuf::Value message;\n    if (response_string == \"close\") {\n      close();\n    } else {\n      send(message, false);\n    }\n    logDebug(std::string(\"response \") + response_string);\n  }\n  void onReceiveTrailingMetadata(uint32_t) override {\n    auto h = getHeaderMapValue(WasmHeaderMapType::GrpcReceiveTrailingMetadata, \"foo\");\n    addHeaderMapValue(WasmHeaderMapType::GrpcReceiveTrailingMetadata, \"foo\", \"bar\");\n  }\n  void onRemoteClose(GrpcStatus) override {\n    auto p = getStatus();\n    logDebug(std::string(\"close \") + std::string(p.second->view()));\n    if (p.second->view() == \"close\") {\n      close();\n    } else if (p.second->view() == \"ok\") {\n      return;\n    } else {\n      reset();\n    }\n  }\n};\n\nFilterHeadersStatus GrpcStreamContext::onRequestHeaders(uint32_t, bool) {\n  GrpcService grpc_service;\n  grpc_service.mutable_envoy_grpc()->set_cluster_name(\"cluster\");\n  std::string grpc_service_string;\n  grpc_service.SerializeToString(&grpc_service_string);\n  HeaderStringPairs initial_metadata;\n  if (root()->grpcStreamHandler(\"bogus service string\", \"service\", \"method\", initial_metadata,\n                                std::unique_ptr<GrpcStreamHandlerBase>(\n                                    new MyGrpcStreamHandler())) != WasmResult::ParseFailure) {\n    logError(\"unexpected bogus service string OK\");\n  }\n  if (root()->grpcStreamHandler(grpc_service_string, \"service\", \"bad method\", initial_metadata,\n                                std::unique_ptr<GrpcStreamHandlerBase>(\n                                    new MyGrpcStreamHandler())) != WasmResult::InternalFailure) {\n    logError(\"unexpected bogus method OK\");\n  }\n  root()->grpcStreamHandler(grpc_service_string, \"service\", \"method\", initial_metadata,\n                            std::unique_ptr<GrpcStreamHandlerBase>(new MyGrpcStreamHandler()));\n  return FilterHeadersStatus::StopIteration;\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_shared_data_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <memory>\n#include <string>\n#include <unordered_map>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_lite.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(HttpWasmTestCpp)\n\nclass SharedDataRootContext : public RootContext {\npublic:\n  explicit SharedDataRootContext(uint32_t id, std::string_view root_id)\n      : RootContext(id, root_id) {}\n\n  void onTick() override;\n  void onQueueReady(uint32_t) override;\n};\n\nstatic RegisterContextFactory register_SharedDataRootContext(ROOT_FACTORY(SharedDataRootContext),\n                                                             \"shared_data\");\n\nvoid SharedDataRootContext::onTick() {\n  setHeaderMapPairs(WasmHeaderMapType::GrpcReceiveInitialMetadata, {});\n  setRequestHeaderPairs({{\"foo\", \"bar\"}});\n  WasmDataPtr value0;\n  if (getSharedData(\"shared_data_key_bad\", &value0) == WasmResult::NotFound) {\n    logDebug(\"get of bad key not found\");\n  }\n  CHECK_RESULT(setSharedData(\"shared_data_key1\", \"shared_data_value0\"));\n  CHECK_RESULT(setSharedData(\"shared_data_key1\", \"shared_data_value1\"));\n  CHECK_RESULT(setSharedData(\"shared_data_key2\", \"shared_data_value2\"));\n  uint32_t cas = 0;\n  auto value2 = getSharedDataValue(\"shared_data_key2\", &cas);\n  if (WasmResult::CasMismatch ==\n      setSharedData(\"shared_data_key2\", \"shared_data_value3\", cas + 1)) { // Bad cas.\n    logInfo(\"set CasMismatch\");\n  }\n}\n\nvoid SharedDataRootContext::onQueueReady(uint32_t) {\n  WasmDataPtr value0;\n  if (getSharedData(\"shared_data_key_bad\", &value0) == WasmResult::NotFound) {\n    logDebug(\"second get of bad key not found\");\n  }\n  auto value1 = getSharedDataValue(\"shared_data_key1\");\n  logDebug(\"get 1 \" + value1->toString());\n  auto value2 = getSharedDataValue(\"shared_data_key2\");\n  logCritical(\"get 2 \" + value2->toString());\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/test_data/test_shared_queue_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <memory>\n#include <string>\n#include <unordered_map>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics_lite.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(HttpWasmTestCpp)\n\nclass SharedQueueContext : public Context {\npublic:\n  explicit SharedQueueContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  FilterHeadersStatus onRequestHeaders(uint32_t, bool) override;\n};\n\nclass SharedQueueRootContext : public RootContext {\npublic:\n  explicit SharedQueueRootContext(uint32_t id, std::string_view root_id)\n      : RootContext(id, root_id) {}\n\n  bool onStart(size_t) override;\n  void onQueueReady(uint32_t) override;\n\n  uint32_t shared_queue_token_;\n};\n\nstatic RegisterContextFactory register_SharedQueueContext(CONTEXT_FACTORY(SharedQueueContext),\n                                                          ROOT_FACTORY(SharedQueueRootContext),\n                                                          \"shared_queue\");\n\nbool SharedQueueRootContext::onStart(size_t) {\n  CHECK_RESULT(registerSharedQueue(\"my_shared_queue\", &shared_queue_token_));\n  return true;\n}\n\nFilterHeadersStatus SharedQueueContext::onRequestHeaders(uint32_t, bool) {\n  uint32_t token;\n  if (resolveSharedQueue(\"vm_id\", \"bad_shared_queue\", &token) == WasmResult::NotFound) {\n    logWarn(\"onRequestHeaders not found bad_shared_queue\");\n  }\n  CHECK_RESULT(resolveSharedQueue(\"vm_id\", \"my_shared_queue\", &token));\n  if (enqueueSharedQueue(token, \"data1\") == WasmResult::Ok) {\n    logWarn(\"onRequestHeaders enqueue Ok\");\n  }\n  return FilterHeadersStatus::Continue;\n}\n\nvoid SharedQueueRootContext::onQueueReady(uint32_t token) {\n  if (token == shared_queue_token_) {\n    logInfo(\"onQueueReady\");\n  }\n  std::unique_ptr<WasmData> data;\n  if (dequeueSharedQueue(9999999 /* bad token */, &data) == WasmResult::NotFound) {\n    logWarn(\"onQueueReady bad token not found\");\n  }\n  if (dequeueSharedQueue(token, &data) == WasmResult::Ok) {\n    logDebug(\"data \" + data->toString() + \" Ok\");\n  }\n  if (dequeueSharedQueue(token, &data) == WasmResult::Empty) {\n    logWarn(\"onQueueReady extra data not found\");\n  }\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/http/wasm/wasm_filter_test.cc",
    "content": "#include \"common/http/message_impl.h\"\n\n#include \"extensions/filters/http/wasm/wasm_filter.h\"\n\n#include \"test/mocks/network/connection.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/test_common/wasm_base.h\"\n\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nMATCHER_P(MapEq, rhs, \"\") {\n  const Envoy::ProtobufWkt::Struct& obj = arg;\n  EXPECT_TRUE(rhs.size() > 0);\n  for (auto const& entry : rhs) {\n    EXPECT_EQ(obj.fields().at(entry.first).string_value(), entry.second);\n  }\n  return true;\n}\n\nusing BufferFunction = std::function<void(::Envoy::Buffer::Instance&)>;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HttpFilters {\nnamespace Wasm {\n\nusing Envoy::Extensions::Common::Wasm::CreateContextFn;\nusing Envoy::Extensions::Common::Wasm::Plugin;\nusing Envoy::Extensions::Common::Wasm::PluginSharedPtr;\nusing Envoy::Extensions::Common::Wasm::Wasm;\nusing Envoy::Extensions::Common::Wasm::WasmHandleSharedPtr;\nusing proxy_wasm::ContextBase;\nusing GrpcService = envoy::config::core::v3::GrpcService;\nusing WasmFilterConfig = envoy::extensions::filters::http::wasm::v3::Wasm;\n\nclass TestFilter : public Envoy::Extensions::Common::Wasm::Context {\npublic:\n  TestFilter(Wasm* wasm, uint32_t root_context_id,\n             Envoy::Extensions::Common::Wasm::PluginSharedPtr plugin)\n      : Envoy::Extensions::Common::Wasm::Context(wasm, root_context_id, plugin) {}\n  MOCK_CONTEXT_LOG_;\n};\n\nclass TestRoot : public Envoy::Extensions::Common::Wasm::Context {\npublic:\n  TestRoot(Wasm* wasm, const std::shared_ptr<Plugin>& plugin) : Context(wasm, plugin) {}\n  MOCK_CONTEXT_LOG_;\n};\n\nclass WasmHttpFilterTest : public Common::Wasm::WasmHttpFilterTestBase<\n                               testing::TestWithParam<std::tuple<std::string, std::string>>> {\npublic:\n  WasmHttpFilterTest() = default;\n  ~WasmHttpFilterTest() override = default;\n\n  CreateContextFn createContextFn() {\n    return [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n      return new TestRoot(wasm, plugin);\n    };\n  }\n\n  void setup(const std::string& code, std::string root_id = \"\", std::string vm_configuration = \"\") {\n    setupBase(std::get<0>(GetParam()), code, createContextFn(), root_id, vm_configuration);\n  }\n  void setupTest(std::string root_id = \"\", std::string vm_configuration = \"\") {\n    std::string code;\n    if (std::get<0>(GetParam()) == \"null\") {\n      code = \"HttpWasmTestCpp\";\n    } else {\n      if (std::get<1>(GetParam()) == \"cpp\") {\n        code = TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath(\n            \"test/extensions/filters/http/wasm/test_data/test_cpp.wasm\"));\n      } else {\n        auto filename = !root_id.empty() ? root_id : vm_configuration;\n        const auto basic_path = TestEnvironment::runfilesPath(\n            absl::StrCat(\"test/extensions/filters/http/wasm/test_data/\", filename));\n        code = TestEnvironment::readFileToStringForTest(basic_path + \"_rust.wasm\");\n      }\n    }\n    setupBase(std::get<0>(GetParam()), code, createContextFn(), root_id, vm_configuration);\n  }\n  void setupFilter(const std::string root_id = \"\") { setupFilterBase<TestFilter>(root_id); }\n\n  void setupGrpcStreamTest(Grpc::RawAsyncStreamCallbacks*& callbacks);\n\n  TestRoot& rootContext() { return *static_cast<TestRoot*>(root_context_); }\n  TestFilter& filter() { return *static_cast<TestFilter*>(context_.get()); }\n\nprotected:\n  NiceMock<Grpc::MockAsyncStream> async_stream_;\n  Grpc::MockAsyncClientManager async_client_manager_;\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    std::make_tuple(\"v8\", \"cpp\"), std::make_tuple(\"v8\", \"rust\"),\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    std::make_tuple(\"wavm\", \"cpp\"), std::make_tuple(\"wavm\", \"rust\"),\n#endif\n    std::make_tuple(\"null\", \"cpp\"));\nINSTANTIATE_TEST_SUITE_P(RuntimesAndLanguages, WasmHttpFilterTest, testing_values);\n\n// Bad code in initial config.\nTEST_P(WasmHttpFilterTest, BadCode) {\n  setup(\"bad code\");\n  EXPECT_EQ(wasm_, nullptr);\n}\n\n// Script touching headers only, request that is headers only.\nTEST_P(WasmHttpFilterTest, HeadersOnlyRequestHeadersOnly) {\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}, {\"server\", \"envoy\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n  EXPECT_THAT(request_headers.get_(\"newheader\"), Eq(\"newheadervalue\"));\n  EXPECT_THAT(request_headers.get_(\"server\"), Eq(\"envoy-wasm\"));\n  // Test some errors.\n  EXPECT_EQ(filter().continueStream(static_cast<proxy_wasm::WasmStreamType>(9999)),\n            proxy_wasm::WasmResult::BadArgument);\n  EXPECT_EQ(filter().closeStream(static_cast<proxy_wasm::WasmStreamType>(9999)),\n            proxy_wasm::WasmResult::BadArgument);\n  Http::TestResponseHeaderMapImpl response_headers;\n  EXPECT_EQ(filter().encode100ContinueHeaders(response_headers),\n            Http::FilterHeadersStatus::Continue);\n  filter().onDestroy();\n}\n\nTEST_P(WasmHttpFilterTest, AllHeadersAndTrailers) {\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}, {\"server\", \"envoy\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  EXPECT_THAT(request_headers.get_(\"newheader\"), Eq(\"newheadervalue\"));\n  EXPECT_THAT(request_headers.get_(\"server\"), Eq(\"envoy-wasm\"));\n  Http::TestRequestTrailerMapImpl request_trailers{};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter().decodeTrailers(request_trailers));\n  Http::MetadataMap request_metadata{};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().decodeMetadata(request_metadata));\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false));\n  Http::TestResponseTrailerMapImpl response_trailers{};\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter().encodeTrailers(response_trailers));\n  Http::MetadataMap response_metadata{};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().encodeMetadata(response_metadata));\n  filter().onDestroy();\n}\n\nTEST_P(WasmHttpFilterTest, AllHeadersAndTrailersNotStarted) {\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  Http::TestRequestTrailerMapImpl request_trailers{};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter().decodeTrailers(request_trailers));\n  Http::MetadataMap request_metadata{};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().decodeMetadata(request_metadata));\n  Http::TestResponseHeaderMapImpl response_headers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false));\n  Http::TestResponseTrailerMapImpl response_trailers{};\n  EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter().encodeTrailers(response_trailers));\n  Http::MetadataMap response_metadata{};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().encodeMetadata(response_metadata));\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data, false));\n  filter().onDestroy();\n}\n\n// Script touching headers only, request that is headers only.\nTEST_P(WasmHttpFilterTest, HeadersOnlyRequestHeadersAndBody) {\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  EXPECT_FALSE(filter().endOfStream(proxy_wasm::WasmStreamType::Request));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n  filter().onDestroy();\n}\n\nTEST_P(WasmHttpFilterTest, HeadersStopAndContinue) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK.\n    return;\n  }\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}, {\"server\", \"envoy-wasm-pause\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, true));\n  root_context_->onTick(0);\n  filter().clearRouteCache();\n  EXPECT_THAT(request_headers.get_(\"newheader\"), Eq(\"newheadervalue\"));\n  EXPECT_THAT(request_headers.get_(\"server\"), Eq(\"envoy-wasm-continue\"));\n  filter().onDestroy();\n}\n\n#if 0\nTEST_P(WasmHttpFilterTest, HeadersStopAndEndStream) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK.\n    return;\n  }\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"server\", \"envoy-wasm-end-stream\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::ContinueAndEndStream,\n            filter().decodeHeaders(request_headers, true));\n  root_context_->onTick(0);\n  EXPECT_THAT(request_headers.get_(\"newheader\"), Eq(\"newheadervalue\"));\n  EXPECT_THAT(request_headers.get_(\"server\"), Eq(\"envoy-wasm-continue\"));\n  filter().onDestroy();\n}\n#endif\n\nTEST_P(WasmHttpFilterTest, HeadersStopAndBuffer) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK.\n    return;\n  }\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"server\", \"envoy-wasm-stop-buffer\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer,\n            filter().decodeHeaders(request_headers, true));\n  root_context_->onTick(0);\n  EXPECT_THAT(request_headers.get_(\"newheader\"), Eq(\"newheadervalue\"));\n  EXPECT_THAT(request_headers.get_(\"server\"), Eq(\"envoy-wasm-continue\"));\n  filter().onDestroy();\n}\n\nTEST_P(WasmHttpFilterTest, HeadersStopAndWatermark) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK.\n    return;\n  }\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"server\", \"envoy-wasm-stop-watermark\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark,\n            filter().decodeHeaders(request_headers, true));\n  root_context_->onTick(0);\n  EXPECT_THAT(request_headers.get_(\"newheader\"), Eq(\"newheadervalue\"));\n  EXPECT_THAT(request_headers.get_(\"server\"), Eq(\"envoy-wasm-continue\"));\n  filter().onDestroy();\n}\n\n// Script that reads the body.\nTEST_P(WasmHttpFilterTest, BodyRequestReadBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}, {\"x-test-operation\", \"ReadBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n  filter().onDestroy();\n}\n\n// Script that prepends and appends to the body.\nTEST_P(WasmHttpFilterTest, BodyRequestPrependAndAppendToBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::err, Eq(absl::string_view(\"onBody prepend.hello.append\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::err,\n                             Eq(absl::string_view(\"onBody prepend.prepend.hello.append.append\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"x-test-operation\", \"PrependAndAppendToBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  if (std::get<1>(GetParam()) == \"rust\") {\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data, true));\n  } else {\n    // This status is not available in the rust SDK.\n    // TODO: update all SDKs to the new revision of the spec and update the tests accordingly.\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter().decodeData(data, true));\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter().encodeData(data, true));\n  }\n  filter().onDestroy();\n}\n\n// Script that replaces the body.\nTEST_P(WasmHttpFilterTest, BodyRequestReplaceBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody replace\")))).Times(2);\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"x-test-operation\", \"ReplaceBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  if (std::get<1>(GetParam()) == \"rust\") {\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n    EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data, true));\n  } else {\n    // This status is not available in the rust SDK.\n    // TODO: update all SDKs to the new revision of the spec and update the tests accordingly.\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter().decodeData(data, true));\n    EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter().encodeData(data, true));\n  }\n  filter().onDestroy();\n}\n\n// Script that removes the body.\nTEST_P(WasmHttpFilterTest, BodyRequestRemoveBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody \"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"x-test-operation\", \"RemoveBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n  filter().onDestroy();\n}\n\n// Script that buffers the body.\nTEST_P(WasmHttpFilterTest, BodyRequestBufferBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"x-test-operation\", \"BufferBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl bufferedBody;\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(&bufferedBody));\n  EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer(_))\n      .WillRepeatedly(Invoke([&bufferedBody](BufferFunction f) { f(bufferedBody); }));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  bufferedBody.add(data1);\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello\")))).Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data1, false));\n\n  Buffer::OwnedImpl data2(\" again \");\n  bufferedBody.add(data2);\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello again \"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data2, false));\n\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello again hello\"))))\n      .Times(1);\n  Buffer::OwnedImpl data3(\"hello\");\n  bufferedBody.add(data3);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data3, true));\n\n  // Verify that the response still works even though we buffered the request.\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"x-test-operation\", \"ReadBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false));\n  // Should not buffer this time\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello\")))).Times(2);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data1, false));\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data1, true));\n\n  filter().onDestroy();\n}\n\n// Script that prepends and appends to the buffered body.\nTEST_P(WasmHttpFilterTest, BodyRequestPrependAndAppendToBufferedBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::err, Eq(absl::string_view(\"onBody prepend.hello.append\"))));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":path\", \"/\"}, {\"x-test-operation\", \"PrependAndAppendToBufferedBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n  filter().onDestroy();\n}\n\n// Script that replaces the buffered body.\nTEST_P(WasmHttpFilterTest, BodyRequestReplaceBufferedBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody replace\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"x-test-operation\", \"ReplaceBufferedBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n  filter().onDestroy();\n}\n\n// Script that removes the buffered body.\nTEST_P(WasmHttpFilterTest, BodyRequestRemoveBufferedBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody \"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"},\n                                                 {\"x-test-operation\", \"RemoveBufferedBody\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n  filter().onDestroy();\n}\n\n// Script that buffers the first part of the body and streams the rest\nTEST_P(WasmHttpFilterTest, BodyRequestBufferThenStreamBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl bufferedBody;\n  EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(&bufferedBody));\n  EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer(_))\n      .WillRepeatedly(Invoke([&bufferedBody](BufferFunction f) { f(bufferedBody); }));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"x-test-operation\", \"BufferTwoBodies\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello\")))).Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data1, false));\n  bufferedBody.add(data1);\n\n  Buffer::OwnedImpl data2(\", there, \");\n  bufferedBody.add(data2);\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello, there, \"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data2, false));\n\n  // Previous callbacks returned \"Buffer\" so we have buffered so far\n  Buffer::OwnedImpl data3(\"world!\");\n  bufferedBody.add(data3);\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello, there, world!\"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data3, false));\n\n  // Last callback returned \"continue\" so we just see individual chunks.\n  Buffer::OwnedImpl data4(\"So it's \");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody So it's \"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data4, false));\n\n  Buffer::OwnedImpl data5(\"goodbye, then!\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody goodbye, then!\"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data5, true));\n\n  filter().onDestroy();\n}\n\n// Script that buffers the first part of the body and streams the rest\nTEST_P(WasmHttpFilterTest, BodyResponseBufferThenStreamBody) {\n  setupTest(\"body\");\n  setupFilter(\"body\");\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n\n  Buffer::OwnedImpl bufferedBody;\n  EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer(_))\n      .WillRepeatedly(Invoke([&bufferedBody](BufferFunction f) { f(bufferedBody); }));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"x-test-operation\", \"BufferTwoBodies\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false));\n\n  Buffer::OwnedImpl data1(\"hello\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello\")))).Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().encodeData(data1, false));\n  bufferedBody.add(data1);\n\n  Buffer::OwnedImpl data2(\", there, \");\n  bufferedBody.add(data2);\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello, there, \"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().encodeData(data2, false));\n\n  // Previous callbacks returned \"Buffer\" so we have buffered so far\n  Buffer::OwnedImpl data3(\"world!\");\n  bufferedBody.add(data3);\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::err, Eq(absl::string_view(\"onBody hello, there, world!\"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data3, false));\n\n  // Last callback returned \"continue\" so we just see individual chunks.\n  Buffer::OwnedImpl data4(\"So it's \");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody So it's \"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data4, false));\n\n  Buffer::OwnedImpl data5(\"goodbye, then!\");\n  EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view(\"onBody goodbye, then!\"))))\n      .Times(1);\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data5, true));\n\n  filter().onDestroy();\n}\n\n// Script testing AccessLog::Instance::log.\nTEST_P(WasmHttpFilterTest, AccessLog) {\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders 2 headers\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onLog 2 /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers{};\n  Http::TestResponseTrailerMapImpl response_trailers{};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  filter().continueStream(proxy_wasm::WasmStreamType::Response);\n  filter().closeStream(proxy_wasm::WasmStreamType::Response);\n  StreamInfo::MockStreamInfo log_stream_info;\n  filter().log(&request_headers, &response_headers, &response_trailers, log_stream_info);\n  filter().onDestroy();\n}\n\nTEST_P(WasmHttpFilterTest, AccessLogCreate) {\n  setupTest(\"\", \"headers\");\n  setupFilter();\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onLog 2 /\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"onDone 2\"))));\n\n  StreamInfo::MockStreamInfo log_stream_info;\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::TestResponseHeaderMapImpl response_headers{};\n  Http::TestResponseTrailerMapImpl response_trailers{};\n  filter().log(&request_headers, &response_headers, &response_trailers, log_stream_info);\n  filter().onDestroy();\n}\n\nTEST_P(WasmHttpFilterTest, AsyncCall) {\n  setupTest(\"async_call\");\n  setupFilter(\"async_call\");\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks = nullptr;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\"))).Times(testing::AtLeast(1));\n  EXPECT_CALL(cluster_manager_, get(Eq(\"bogus cluster\"))).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                      {\":path\", \"/\"},\n                                                      {\":authority\", \"foo\"},\n                                                      {\"content-length\", \"11\"}}),\n                      message->headers());\n            EXPECT_EQ((Http::TestRequestTrailerMapImpl{{\"trail\", \"cow\"}}), *message->trailers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_CALL(filter(), log_(spdlog::level::debug, Eq(\"response\")));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(\":status -> 200\")));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(\"onRequestHeaders\")))\n      .WillOnce(Invoke([&](uint32_t, absl::string_view) -> proxy_wasm::WasmResult {\n        Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n            Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n        response_message->body().add(\"response\");\n        NiceMock<Tracing::MockSpan> span;\n        Http::TestResponseHeaderMapImpl response_header{{\":status\", \"200\"}};\n        callbacks->onBeforeFinalizeUpstreamSpan(span, &response_header);\n        callbacks->onSuccess(request, std::move(response_message));\n        return proxy_wasm::WasmResult::Ok;\n      }));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  EXPECT_NE(callbacks, nullptr);\n}\n\nTEST_P(WasmHttpFilterTest, AsyncCallBadCall) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): The Rust SDK does not support end_of_stream in on_http_request_headers.\n    return;\n  }\n  setupTest(\"async_call\");\n  setupFilter(\"async_call\");\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\"))).Times(testing::AtLeast(1));\n  EXPECT_CALL(cluster_manager_, get(Eq(\"bogus cluster\"))).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  // Just fail the send.\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks&,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n}\n\nTEST_P(WasmHttpFilterTest, AsyncCallFailure) {\n  setupTest(\"async_call\");\n  setupFilter(\"async_call\");\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks = nullptr;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\"))).Times(testing::AtLeast(1));\n  EXPECT_CALL(cluster_manager_, get(Eq(\"bogus cluster\"))).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                      {\":path\", \"/\"},\n                                                      {\":authority\", \"foo\"},\n                                                      {\"content-length\", \"11\"}}),\n                      message->headers());\n            EXPECT_EQ((Http::TestRequestTrailerMapImpl{{\"trail\", \"cow\"}}), *message->trailers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(\"onRequestHeaders\")))\n      .WillOnce(Invoke([&](uint32_t, absl::string_view) -> proxy_wasm::WasmResult {\n        callbacks->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n        return proxy_wasm::WasmResult::Ok;\n      }));\n  // TODO(PiotrSikora): RootContext handling is incomplete in the Rust SDK.\n  if (std::get<1>(GetParam()) == \"rust\") {\n    EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(\"async_call failed\")));\n  } else {\n    EXPECT_CALL(rootContext(), log_(spdlog::level::info, Eq(\"async_call failed\")));\n  }\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  EXPECT_NE(callbacks, nullptr);\n}\n\nTEST_P(WasmHttpFilterTest, AsyncCallAfterDestroyed) {\n  setupTest(\"async_call\");\n  setupFilter(\"async_call\");\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  Http::MockAsyncClientRequest request(&cluster_manager_.async_client_);\n  Http::AsyncClient::Callbacks* callbacks = nullptr;\n  EXPECT_CALL(cluster_manager_, get(Eq(\"cluster\"))).Times(testing::AtLeast(1));\n  EXPECT_CALL(cluster_manager_, get(Eq(\"bogus cluster\"))).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster(\"cluster\"));\n  EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            EXPECT_EQ((Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                      {\":path\", \"/\"},\n                                                      {\":authority\", \"foo\"},\n                                                      {\"content-length\", \"11\"}}),\n                      message->headers());\n            EXPECT_EQ((Http::TestRequestTrailerMapImpl{{\"trail\", \"cow\"}}), *message->trailers());\n            callbacks = &cb;\n            return &request;\n          }));\n\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(\"onRequestHeaders\")));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  EXPECT_CALL(request, cancel()).WillOnce([&]() { callbacks = nullptr; });\n\n  // Destroy the Context, Plugin and VM.\n  context_.reset();\n  plugin_.reset();\n  wasm_.reset();\n\n  Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n  response_message->body().add(\"response\");\n\n  // (Don't) Make the callback on the destroyed VM.\n  EXPECT_EQ(callbacks, nullptr);\n  if (callbacks) {\n    callbacks->onSuccess(request, std::move(response_message));\n  }\n}\n\nTEST_P(WasmHttpFilterTest, GrpcCall) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"grpc_call\");\n  setupFilter(\"grpc_call\");\n  NiceMock<Grpc::MockAsyncRequest> request;\n  Grpc::RawAsyncRequestCallbacks* callbacks = nullptr;\n  Grpc::MockAsyncClientManager client_manager;\n  auto client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n  auto async_client = std::make_unique<Grpc::MockAsyncClient>();\n  Tracing::Span* parent_span{};\n  EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _))\n      .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name,\n                           Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb,\n                           Tracing::Span& span, const Http::AsyncClient::RequestOptions& options)\n                           -> Grpc::AsyncRequest* {\n        EXPECT_EQ(service_full_name, \"service\");\n        EXPECT_EQ(method_name, \"method\");\n        ProtobufWkt::Value value;\n        EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length()));\n        EXPECT_EQ(value.string_value(), \"request\");\n        callbacks = &cb;\n        parent_span = &span;\n        EXPECT_EQ(options.timeout->count(), 1000);\n        return &request;\n      }));\n  EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr {\n    return std::move(async_client);\n  }));\n  EXPECT_CALL(cluster_manager_, grpcAsyncClientManager())\n      .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; }));\n  EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr {\n        return std::move(client_factory);\n      }));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"response\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"response\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_NE(callbacks, nullptr);\n  NiceMock<Tracing::MockSpan> span;\n  if (callbacks) {\n    callbacks->onCreateInitialMetadata(request_headers);\n    callbacks->onSuccessRaw(std::move(response), span);\n  }\n}\n\nTEST_P(WasmHttpFilterTest, GrpcCallBadCall) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"grpc_call\");\n  setupFilter(\"grpc_call\");\n  Grpc::MockAsyncClientManager client_manager;\n  auto client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n  auto async_client = std::make_unique<Grpc::MockAsyncClient>();\n  EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _))\n      .WillOnce(Invoke([&](absl::string_view, absl::string_view, Buffer::InstancePtr&&,\n                           Grpc::RawAsyncRequestCallbacks&, Tracing::Span&,\n                           const Http::AsyncClient::RequestOptions&) -> Grpc::AsyncRequest* {\n        return nullptr;\n      }));\n  EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr {\n    return std::move(async_client);\n  }));\n  EXPECT_CALL(cluster_manager_, grpcAsyncClientManager())\n      .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; }));\n  EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr {\n        return std::move(client_factory);\n      }));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n}\n\nTEST_P(WasmHttpFilterTest, GrpcCallFailure) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"grpc_call\");\n  setupFilter(\"grpc_call\");\n  NiceMock<Grpc::MockAsyncRequest> request;\n  Grpc::RawAsyncRequestCallbacks* callbacks = nullptr;\n  Grpc::MockAsyncClientManager client_manager;\n  auto client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n  auto async_client = std::make_unique<Grpc::MockAsyncClient>();\n  Tracing::Span* parent_span{};\n  EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _))\n      .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name,\n                           Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb,\n                           Tracing::Span& span, const Http::AsyncClient::RequestOptions& options)\n                           -> Grpc::AsyncRequest* {\n        EXPECT_EQ(service_full_name, \"service\");\n        EXPECT_EQ(method_name, \"method\");\n        ProtobufWkt::Value value;\n        EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length()));\n        EXPECT_EQ(value.string_value(), \"request\");\n        callbacks = &cb;\n        parent_span = &span;\n        EXPECT_EQ(options.timeout->count(), 1000);\n        return &request;\n      }));\n  EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr {\n    return std::move(async_client);\n  }));\n  EXPECT_CALL(cluster_manager_, grpcAsyncClientManager())\n      .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; }));\n  EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr {\n        return std::move(client_factory);\n      }));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"failure bad\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  // Test some additional error paths.\n  EXPECT_EQ(filter().grpcSend(99999, \"\", false), proxy_wasm::WasmResult::BadArgument);\n  EXPECT_EQ(filter().grpcSend(10000, \"\", false), proxy_wasm::WasmResult::NotFound);\n  EXPECT_EQ(filter().grpcCancel(9999), proxy_wasm::WasmResult::NotFound);\n  EXPECT_EQ(filter().grpcCancel(10000), proxy_wasm::WasmResult::NotFound);\n  EXPECT_EQ(filter().grpcClose(9999), proxy_wasm::WasmResult::NotFound);\n  EXPECT_EQ(filter().grpcClose(10000), proxy_wasm::WasmResult::NotFound);\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"response\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_NE(callbacks, nullptr);\n  NiceMock<Tracing::MockSpan> span;\n  if (callbacks) {\n    callbacks->onFailure(Grpc::Status::WellKnownGrpcStatus::Canceled, \"bad\", span);\n  }\n}\n\nTEST_P(WasmHttpFilterTest, GrpcCallCancel) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"grpc_call\");\n  setupFilter(\"grpc_call\");\n  NiceMock<Grpc::MockAsyncRequest> request;\n  Grpc::RawAsyncRequestCallbacks* callbacks = nullptr;\n  Grpc::MockAsyncClientManager client_manager;\n  auto client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n  auto async_client = std::make_unique<Grpc::MockAsyncClient>();\n  Tracing::Span* parent_span{};\n  EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _))\n      .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name,\n                           Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb,\n                           Tracing::Span& span, const Http::AsyncClient::RequestOptions& options)\n                           -> Grpc::AsyncRequest* {\n        EXPECT_EQ(service_full_name, \"service\");\n        EXPECT_EQ(method_name, \"method\");\n        ProtobufWkt::Value value;\n        EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length()));\n        EXPECT_EQ(value.string_value(), \"request\");\n        callbacks = &cb;\n        parent_span = &span;\n        EXPECT_EQ(options.timeout->count(), 1000);\n        return &request;\n      }));\n  EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr {\n    return std::move(async_client);\n  }));\n  EXPECT_CALL(cluster_manager_, grpcAsyncClientManager())\n      .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; }));\n  EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr {\n        return std::move(client_factory);\n      }));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  rootContext().onQueueReady(0);\n}\n\nTEST_P(WasmHttpFilterTest, GrpcCallClose) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"grpc_call\");\n  setupFilter(\"grpc_call\");\n  NiceMock<Grpc::MockAsyncRequest> request;\n  Grpc::RawAsyncRequestCallbacks* callbacks = nullptr;\n  Grpc::MockAsyncClientManager client_manager;\n  auto client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n  auto async_client = std::make_unique<Grpc::MockAsyncClient>();\n  Tracing::Span* parent_span{};\n  EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _))\n      .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name,\n                           Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb,\n                           Tracing::Span& span, const Http::AsyncClient::RequestOptions& options)\n                           -> Grpc::AsyncRequest* {\n        EXPECT_EQ(service_full_name, \"service\");\n        EXPECT_EQ(method_name, \"method\");\n        ProtobufWkt::Value value;\n        EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length()));\n        EXPECT_EQ(value.string_value(), \"request\");\n        callbacks = &cb;\n        parent_span = &span;\n        EXPECT_EQ(options.timeout->count(), 1000);\n        return &request;\n      }));\n  EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr {\n    return std::move(async_client);\n  }));\n  EXPECT_CALL(cluster_manager_, grpcAsyncClientManager())\n      .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; }));\n  EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr {\n        return std::move(client_factory);\n      }));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  rootContext().onQueueReady(1);\n}\n\nTEST_P(WasmHttpFilterTest, GrpcCallAfterDestroyed) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"grpc_call\");\n  setupFilter(\"grpc_call\");\n  Grpc::MockAsyncRequest request;\n  Grpc::RawAsyncRequestCallbacks* callbacks = nullptr;\n  Grpc::MockAsyncClientManager client_manager;\n  auto client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n  auto async_client = std::make_unique<Grpc::MockAsyncClient>();\n  Tracing::Span* parent_span{};\n  EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _))\n      .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name,\n                           Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb,\n                           Tracing::Span& span, const Http::AsyncClient::RequestOptions& options)\n                           -> Grpc::AsyncRequest* {\n        EXPECT_EQ(service_full_name, \"service\");\n        EXPECT_EQ(method_name, \"method\");\n        ProtobufWkt::Value value;\n        EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length()));\n        EXPECT_EQ(value.string_value(), \"request\");\n        callbacks = &cb;\n        parent_span = &span;\n        EXPECT_EQ(options.timeout->count(), 1000);\n        return &request;\n      }));\n  EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr {\n    return std::move(async_client);\n  }));\n  EXPECT_CALL(cluster_manager_, grpcAsyncClientManager())\n      .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; }));\n  EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr {\n        return std::move(client_factory);\n      }));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  EXPECT_CALL(request, cancel()).WillOnce([&]() { callbacks = nullptr; });\n\n  // Destroy the Context, Plugin and VM.\n  context_.reset();\n  plugin_.reset();\n  wasm_.reset();\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"response\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_EQ(callbacks, nullptr);\n  NiceMock<Tracing::MockSpan> span;\n  if (callbacks) {\n    callbacks->onSuccessRaw(std::move(response), span);\n  }\n}\n\nvoid WasmHttpFilterTest::setupGrpcStreamTest(Grpc::RawAsyncStreamCallbacks*& callbacks) {\n  setupTest(\"grpc_stream\");\n  setupFilter(\"grpc_stream\");\n\n  EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, _))\n      .WillRepeatedly(\n          Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr {\n            auto client_factory = std::make_unique<Grpc::MockAsyncClientFactory>();\n            EXPECT_CALL(*client_factory, create)\n                .WillRepeatedly(Invoke([&]() -> Grpc::RawAsyncClientPtr {\n                  auto async_client = std::make_unique<Grpc::MockAsyncClient>();\n                  EXPECT_CALL(*async_client, startRaw(_, _, _, _))\n                      .WillRepeatedly(Invoke(\n                          [&](absl::string_view service_full_name, absl::string_view method_name,\n                              Grpc::RawAsyncStreamCallbacks& cb,\n                              const Http::AsyncClient::StreamOptions&) -> Grpc::RawAsyncStream* {\n                            EXPECT_EQ(service_full_name, \"service\");\n                            if (method_name != \"method\") {\n                              return nullptr;\n                            }\n                            callbacks = &cb;\n                            return &async_stream_;\n                          }));\n                  return async_client;\n                }));\n            return client_factory;\n          }));\n  EXPECT_CALL(cluster_manager_, grpcAsyncClientManager())\n      .WillRepeatedly(Invoke([&]() -> Grpc::AsyncClientManager& { return async_client_manager_; }));\n}\n\nTEST_P(WasmHttpFilterTest, GrpcStream) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  Grpc::RawAsyncStreamCallbacks* callbacks = nullptr;\n  setupGrpcStreamTest(callbacks);\n\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"response response\")));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"close done\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"response\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_NE(callbacks, nullptr);\n  if (callbacks) {\n    Http::TestRequestHeaderMapImpl create_initial_metadata{{\"test\", \"create_initial_metadata\"}};\n    callbacks->onCreateInitialMetadata(create_initial_metadata);\n    callbacks->onReceiveInitialMetadata(std::make_unique<Http::TestResponseHeaderMapImpl>());\n    callbacks->onReceiveMessageRaw(std::move(response));\n    callbacks->onReceiveTrailingMetadata(std::make_unique<Http::TestResponseTrailerMapImpl>());\n    callbacks->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, \"done\");\n  }\n}\n\n// Local close followed by remote close.\nTEST_P(WasmHttpFilterTest, GrpcStreamCloseLocal) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  Grpc::RawAsyncStreamCallbacks* callbacks = nullptr;\n  setupGrpcStreamTest(callbacks);\n\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"response close\")));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"close ok\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"close\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_NE(callbacks, nullptr);\n  if (callbacks) {\n    Http::TestRequestHeaderMapImpl create_initial_metadata{{\"test\", \"create_initial_metadata\"}};\n    callbacks->onCreateInitialMetadata(create_initial_metadata);\n    callbacks->onReceiveInitialMetadata(std::make_unique<Http::TestResponseHeaderMapImpl>());\n    callbacks->onReceiveMessageRaw(std::move(response));\n    callbacks->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, \"ok\");\n  }\n}\n\n// Remote close followed by local close.\nTEST_P(WasmHttpFilterTest, GrpcStreamCloseRemote) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  Grpc::RawAsyncStreamCallbacks* callbacks = nullptr;\n  setupGrpcStreamTest(callbacks);\n\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"response response\")));\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"close close\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"response\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_NE(callbacks, nullptr);\n  if (callbacks) {\n    Http::TestRequestHeaderMapImpl create_initial_metadata{{\"test\", \"create_initial_metadata\"}};\n    callbacks->onCreateInitialMetadata(create_initial_metadata);\n    callbacks->onReceiveInitialMetadata(std::make_unique<Http::TestResponseHeaderMapImpl>());\n    callbacks->onReceiveMessageRaw(std::move(response));\n    callbacks->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, \"close\");\n  }\n}\n\nTEST_P(WasmHttpFilterTest, GrpcStreamCancel) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  Grpc::RawAsyncStreamCallbacks* callbacks = nullptr;\n  setupGrpcStreamTest(callbacks);\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"response\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_NE(callbacks, nullptr);\n  NiceMock<Tracing::MockSpan> span;\n  if (callbacks) {\n    Http::TestRequestHeaderMapImpl create_initial_metadata{{\"test\", \"create_initial_metadata\"}};\n    callbacks->onCreateInitialMetadata(create_initial_metadata);\n    callbacks->onReceiveInitialMetadata(std::make_unique<Http::TestResponseHeaderMapImpl>(\n        Http::TestResponseHeaderMapImpl{{\"test\", \"reset\"}}));\n  }\n}\n\nTEST_P(WasmHttpFilterTest, GrpcStreamOpenAtShutdown) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK.\n    return;\n  }\n  Grpc::RawAsyncStreamCallbacks* callbacks = nullptr;\n  setupGrpcStreamTest(callbacks);\n\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(\"response response\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter().decodeHeaders(request_headers, false));\n\n  ProtobufWkt::Value value;\n  value.set_string_value(\"response\");\n  std::string response_string;\n  EXPECT_TRUE(value.SerializeToString(&response_string));\n  auto response = std::make_unique<Buffer::OwnedImpl>(response_string);\n  EXPECT_NE(callbacks, nullptr);\n  NiceMock<Tracing::MockSpan> span;\n  if (callbacks) {\n    Http::TestRequestHeaderMapImpl create_initial_metadata{{\"test\", \"create_initial_metadata\"}};\n    callbacks->onCreateInitialMetadata(create_initial_metadata);\n    callbacks->onReceiveInitialMetadata(std::make_unique<Http::TestResponseHeaderMapImpl>());\n    callbacks->onReceiveMessageRaw(std::move(response));\n    callbacks->onReceiveTrailingMetadata(std::make_unique<Http::TestResponseTrailerMapImpl>());\n  }\n\n  // Destroy the Context, Plugin and VM.\n  context_.reset();\n  plugin_.reset();\n  wasm_.reset();\n}\n\n// Test metadata access including CEL expressions.\n// TODO: re-enable this on Windows if and when the CEL `Antlr` parser compiles on Windows.\n#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM)\nTEST_P(WasmHttpFilterTest, Metadata) {\n  setupTest(\"\", \"metadata\");\n  setupFilter();\n  envoy::config::core::v3::Node node_data;\n  ProtobufWkt::Value node_val;\n  node_val.set_string_value(\"wasm_node_get_value\");\n  (*node_data.mutable_metadata()->mutable_fields())[\"wasm_node_get_key\"] = node_val;\n  EXPECT_CALL(local_info_, node()).WillRepeatedly(ReturnRef(node_data));\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"onTick wasm_node_get_value\"))));\n\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::err, Eq(absl::string_view(\"onBody wasm_node_get_value\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"header path /\"))));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::trace,\n                   Eq(absl::string_view(\"Struct wasm_request_get_value wasm_request_get_value\"))));\n  if (std::get<1>(GetParam()) != \"rust\") {\n    // TODO(PiotrSikora): not yet supported in the Rust SDK.\n    EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"server is envoy-wasm\"))));\n  }\n\n  request_stream_info_.metadata_.mutable_filter_metadata()->insert(\n      Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n          HttpFilters::HttpFilterNames::get().Wasm,\n          MessageUtil::keyValueStruct(\"wasm_request_get_key\", \"wasm_request_get_value\")));\n\n  rootContext().onTick(0);\n\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n  absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(15000000);\n  EXPECT_CALL(request_stream_info_, requestComplete()).WillRepeatedly(Return(dur));\n  EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"duration is 15000000\"))));\n  if (std::get<1>(GetParam()) != \"rust\") {\n    // TODO(PiotrSikora): not yet supported in the Rust SDK.\n    EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view(\"grpc service: test\"))));\n  }\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}, {\"biz\", \"baz\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true));\n\n  StreamInfo::MockStreamInfo log_stream_info;\n  filter().log(&request_headers, nullptr, nullptr, log_stream_info);\n\n  const auto& result = request_stream_info_.filterState()->getDataReadOnly<Common::Wasm::WasmState>(\n      \"wasm.wasm_request_set_key\");\n  EXPECT_EQ(\"wasm_request_set_value\", result.value());\n\n  filter().onDestroy();\n  filter().onDestroy(); // Does nothing.\n}\n#endif\n\nTEST_P(WasmHttpFilterTest, Property) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): test not yet implemented using Rust SDK.\n    return;\n  }\n  setupTest(\"\", \"property\");\n  setupFilter();\n  envoy::config::core::v3::Node node_data;\n  ProtobufWkt::Value node_val;\n  node_val.set_string_value(\"sample_data\");\n  (*node_data.mutable_metadata()->mutable_fields())[\"istio.io/metadata\"] = node_val;\n  EXPECT_CALL(local_info_, node()).WillRepeatedly(ReturnRef(node_data));\n\n  request_stream_info_.metadata_.mutable_filter_metadata()->insert(\n      Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n          HttpFilters::HttpFilterNames::get().Wasm,\n          MessageUtil::keyValueStruct(\"wasm_request_get_key\", \"wasm_request_get_value\")));\n  EXPECT_CALL(request_stream_info_, responseCode()).WillRepeatedly(Return(403));\n  EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_));\n\n  // test outputs should match inputs\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::warn, Eq(absl::string_view(\"request.path: /test_context\"))));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::warn, Eq(absl::string_view(\"node.metadata: sample_data\"))));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::warn, Eq(absl::string_view(\"metadata: wasm_request_get_value\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"response.code: 403\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view(\"state: wasm_value\"))));\n\n  root_context_->onTick(0);\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/test_context\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n  StreamInfo::MockStreamInfo log_stream_info;\n  request_stream_info_.route_name_ = \"route12\";\n  request_stream_info_.requested_server_name_ = \"w3.org\";\n  NiceMock<Network::MockConnection> connection;\n  EXPECT_CALL(connection, id()).WillRepeatedly(Return(4));\n  EXPECT_CALL(encoder_callbacks_, connection()).WillRepeatedly(Return(&connection));\n  NiceMock<Router::MockRouteEntry> route_entry;\n  EXPECT_CALL(request_stream_info_, routeEntry()).WillRepeatedly(Return(&route_entry));\n  filter().log(&request_headers, nullptr, nullptr, log_stream_info);\n}\n\nTEST_P(WasmHttpFilterTest, SharedData) {\n  setupTest(\"shared_data\");\n  EXPECT_CALL(rootContext(), log_(spdlog::level::info, Eq(absl::string_view(\"set CasMismatch\"))));\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"get 1 shared_data_value1\"))));\n  if (std::get<1>(GetParam()) == \"rust\") {\n    EXPECT_CALL(rootContext(),\n                log_(spdlog::level::warn, Eq(absl::string_view(\"get 2 shared_data_value2\"))));\n  } else {\n    EXPECT_CALL(rootContext(),\n                log_(spdlog::level::critical, Eq(absl::string_view(\"get 2 shared_data_value2\"))));\n  }\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"get of bad key not found\"))));\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::debug, Eq(absl::string_view(\"second get of bad key not found\"))));\n  rootContext().onTick(0);\n  rootContext().onQueueReady(0);\n}\n\nTEST_P(WasmHttpFilterTest, SharedQueue) {\n  setupTest(\"shared_queue\");\n  setupFilter(\"shared_queue\");\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::warn, Eq(absl::string_view(\"onRequestHeaders enqueue Ok\"))));\n  EXPECT_CALL(filter(), log_(spdlog::level::warn,\n                             Eq(absl::string_view(\"onRequestHeaders not found bad_shared_queue\"))));\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::warn, Eq(absl::string_view(\"onQueueReady bad token not found\"))))\n      .Times(2);\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::warn, Eq(absl::string_view(\"onQueueReady extra data not found\"))))\n      .Times(2);\n  EXPECT_CALL(rootContext(), log_(spdlog::level::info, Eq(absl::string_view(\"onQueueReady\"))))\n      .Times(2);\n  EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(absl::string_view(\"data data1 Ok\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":path\", \"/\"}};\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n  auto token = proxy_wasm::resolveQueueForTest(\"vm_id\", \"my_shared_queue\");\n  root_context_->onQueueReady(token);\n}\n\n// Script using a root_id which is not registered.\nTEST_P(WasmHttpFilterTest, RootIdNotRegistered) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): proxy_get_property(\"root_id\") is not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest();\n  setupFilter();\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n}\n\n// Script using an explicit root_id which is registered.\nTEST_P(WasmHttpFilterTest, RootId1) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): proxy_get_property(\"root_id\") is not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"context1\");\n  setupFilter(\"context1\");\n  EXPECT_CALL(filter(), log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders1 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n}\n\n// Script using an explicit root_id which is registered.\nTEST_P(WasmHttpFilterTest, RootId2) {\n  if (std::get<1>(GetParam()) == \"rust\") {\n    // TODO(PiotrSikora): proxy_get_property(\"root_id\") is not yet supported in the Rust SDK.\n    return;\n  }\n  setupTest(\"context2\");\n  setupFilter(\"context2\");\n  EXPECT_CALL(filter(), log_(spdlog::level::debug, Eq(absl::string_view(\"onRequestHeaders2 2\"))));\n  Http::TestRequestHeaderMapImpl request_headers;\n  EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true));\n}\n\n} // namespace Wasm\n} // namespace HttpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/common/fuzz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_proto_library(\n    name = \"listener_filter_fuzzer_proto\",\n    srcs = [\"listener_filter_fuzzer.proto\"],\n)\n\nenvoy_cc_test_library(\n    name = \"listener_filter_fuzzer_lib\",\n    srcs = [\"listener_filter_fuzzer.cc\"],\n    hdrs = [\"listener_filter_fuzzer.h\"],\n    deps = [\n        \":listener_filter_fakes\",\n        \":listener_filter_fuzzer_proto_cc_proto\",\n        \"//include/envoy/network:filter_interface\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"listener_filter_fakes\",\n    srcs = [\"listener_filter_fakes.cc\"],\n    hdrs = [\"listener_filter_fakes.h\"],\n    deps = [\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"fuzzed_input_test\",\n    srcs = [\"fuzzed_input_test.cc\"],\n    deps = [\n        \":listener_filter_fuzzer_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/listener/common/fuzz/fuzzed_input_test.cc",
    "content": "#include <memory>\n\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\n\nTEST(FuzzedInputStream, Empty) {\n  std::vector<uint8_t> buffer;\n  std::vector<size_t> indices;\n  FuzzedInputStream data(buffer, indices);\n  EXPECT_TRUE(data.empty());\n  EXPECT_TRUE(data.done());\n}\n\nTEST(FuzzedInputStream, OneRead) {\n  std::vector<uint8_t> buffer{'h', 'e', 'l', 'l', 'o'};\n  std::vector<size_t> indices{4};\n  FuzzedInputStream data(buffer, indices);\n  EXPECT_FALSE(data.empty());\n  EXPECT_EQ(data.size(), 5);\n  EXPECT_TRUE(data.done());\n\n  std::array<uint8_t, 5> read_data;\n\n  // Test peeking\n  EXPECT_EQ(data.read(read_data.data(), 5, true).rc_, 5);\n  EXPECT_EQ(data.size(), 5);\n\n  // Test length > data.size()\n  EXPECT_EQ(data.read(read_data.data(), 10, true).rc_, 5);\n  EXPECT_EQ(data.size(), 5);\n\n  // Test non-peeking\n  EXPECT_EQ(data.read(read_data.data(), 3, false).rc_, 3);\n  EXPECT_EQ(data.size(), 2);\n\n  // Test reaching end-of-stream\n  EXPECT_EQ(data.read(read_data.data(), 5, false).rc_, 2);\n  EXPECT_EQ(data.size(), 0);\n}\n\nTEST(FuzzedInputStream, MultipleReads) {\n  std::vector<uint8_t> buffer{'h', 'e', 'l', 'l', 'o'};\n  std::vector<size_t> indices{1, 3, 4};\n  FuzzedInputStream data(buffer, indices);\n  EXPECT_FALSE(data.empty());\n  EXPECT_EQ(data.size(), 2);\n  EXPECT_FALSE(data.done());\n\n  std::array<uint8_t, 5> read_data;\n\n  // Test peeking (first read)\n  EXPECT_EQ(data.read(read_data.data(), 5, true).rc_, 2);\n  EXPECT_EQ(data.size(), 2);\n\n  data.next();\n  EXPECT_FALSE(data.done());\n  EXPECT_EQ(data.size(), 4);\n\n  // Test non-peeking (second read)\n  EXPECT_EQ(data.read(read_data.data(), 3, false).rc_, 3);\n  EXPECT_EQ(data.size(), 1);\n\n  data.next();\n  EXPECT_TRUE(data.done());\n  EXPECT_EQ(data.size(), 2);\n\n  // Test non-peeking (third read) and reaching end-of-stream\n  EXPECT_EQ(data.read(read_data.data(), 5, false).rc_, 2);\n  EXPECT_EQ(data.size(), 0);\n}\n\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc",
    "content": "#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\n\nNetwork::IoHandle& FakeConnectionSocket::ioHandle() { return *io_handle_; }\n\nconst Network::IoHandle& FakeConnectionSocket::ioHandle() const { return *io_handle_; }\n\nvoid FakeConnectionSocket::setLocalAddress(\n    const Network::Address::InstanceConstSharedPtr& local_address) {\n  local_address_ = local_address;\n  if (local_address_ != nullptr) {\n    addr_type_ = local_address_->type();\n  }\n}\n\nvoid FakeConnectionSocket::setRemoteAddress(\n    const Network::Address::InstanceConstSharedPtr& remote_address) {\n  remote_address_ = remote_address;\n}\n\nconst Network::Address::InstanceConstSharedPtr& FakeConnectionSocket::localAddress() const {\n  return local_address_;\n}\n\nconst Network::Address::InstanceConstSharedPtr& FakeConnectionSocket::remoteAddress() const {\n  return remote_address_;\n}\n\nNetwork::Address::Type FakeConnectionSocket::addressType() const { return addr_type_; }\n\nabsl::optional<Network::Address::IpVersion> FakeConnectionSocket::ipVersion() const {\n  if (local_address_ == nullptr || addr_type_ != Network::Address::Type::Ip) {\n    return absl::nullopt;\n  }\n\n  return local_address_->ip()->version();\n}\n\nvoid FakeConnectionSocket::setDetectedTransportProtocol(absl::string_view protocol) {\n  transport_protocol_ = std::string(protocol);\n}\n\nabsl::string_view FakeConnectionSocket::detectedTransportProtocol() const {\n  return transport_protocol_;\n}\n\nvoid FakeConnectionSocket::setRequestedApplicationProtocols(\n    const std::vector<absl::string_view>& protocols) {\n  application_protocols_.clear();\n  for (const auto& protocol : protocols) {\n    application_protocols_.emplace_back(protocol);\n  }\n}\n\nconst std::vector<std::string>& FakeConnectionSocket::requestedApplicationProtocols() const {\n  return application_protocols_;\n}\n\nvoid FakeConnectionSocket::setRequestedServerName(absl::string_view server_name) {\n  server_name_ = std::string(server_name);\n}\n\nabsl::string_view FakeConnectionSocket::requestedServerName() const { return server_name_; }\n\nApi::SysCallIntResult FakeConnectionSocket::getSocketOption(int level, int, void* optval,\n                                                            socklen_t*) const {\n#ifdef SOL_IP\n  switch (level) {\n  case SOL_IPV6:\n    static_cast<sockaddr_storage*>(optval)->ss_family = AF_INET6;\n    break;\n  case SOL_IP:\n    static_cast<sockaddr_storage*>(optval)->ss_family = AF_INET;\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  return Api::SysCallIntResult{0, 0};\n#else\n  // TODO: Waiting to determine if connection redirection possible, see\n  // Network::Utility::getOriginalDst()\n  return Api::SysCallIntResult{-1, 0};\n#endif\n}\n\nabsl::optional<std::chrono::milliseconds> FakeConnectionSocket::lastRoundTripTime() { return {}; }\n\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h",
    "content": "#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\n\nstatic constexpr int kFakeSocketFd = 42;\n\nclass FakeConnectionSocket : public Network::MockConnectionSocket {\npublic:\n  FakeConnectionSocket()\n      : io_handle_(std::make_unique<Network::IoSocketHandleImpl>(kFakeSocketFd)),\n        local_address_(nullptr), remote_address_(nullptr) {}\n\n  ~FakeConnectionSocket() override { io_handle_->close(); }\n\n  Network::IoHandle& ioHandle() override;\n\n  const Network::IoHandle& ioHandle() const override;\n\n  void setLocalAddress(const Network::Address::InstanceConstSharedPtr& local_address) override;\n\n  void setRemoteAddress(const Network::Address::InstanceConstSharedPtr& remote_address) override;\n\n  const Network::Address::InstanceConstSharedPtr& localAddress() const override;\n\n  const Network::Address::InstanceConstSharedPtr& remoteAddress() const override;\n\n  Network::Address::Type addressType() const override;\n\n  absl::optional<Network::Address::IpVersion> ipVersion() const override;\n\n  void setRequestedApplicationProtocols(const std::vector<absl::string_view>& protocols) override;\n\n  const std::vector<std::string>& requestedApplicationProtocols() const override;\n\n  void setDetectedTransportProtocol(absl::string_view protocol) override;\n\n  absl::string_view detectedTransportProtocol() const override;\n\n  void setRequestedServerName(absl::string_view server_name) override;\n\n  absl::string_view requestedServerName() const override;\n\n  Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override;\n\n  absl::optional<std::chrono::milliseconds> lastRoundTripTime() override;\n\nprivate:\n  const Network::IoHandlePtr io_handle_;\n  Network::Address::InstanceConstSharedPtr local_address_;\n  Network::Address::InstanceConstSharedPtr remote_address_;\n  Network::Address::Type addr_type_;\n  std::vector<std::string> application_protocols_;\n  std::string transport_protocol_;\n  std::string server_name_;\n};\n\n// TODO: Move over to Fake (name is confusing)\nclass FakeOsSysCalls : public Api::OsSysCallsImpl {\npublic:\n  MOCK_METHOD(Api::SysCallSizeResult, recv, (os_fd_t, void*, size_t, int));\n  MOCK_METHOD(Api::SysCallIntResult, ioctl, (os_fd_t, unsigned long int, void*));\n};\n\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc",
    "content": "#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\n\nvoid ListenerFilterFuzzer::fuzz(\n    Network::ListenerFilter& filter,\n    const test::extensions::filters::listener::FilterFuzzTestCase& input) {\n  try {\n    socket_.setLocalAddress(Network::Utility::resolveUrl(input.sock().local_address()));\n  } catch (const EnvoyException& e) {\n    socket_.setLocalAddress(Network::Utility::resolveUrl(\"tcp://0.0.0.0:0\"));\n  }\n  try {\n    socket_.setRemoteAddress(Network::Utility::resolveUrl(input.sock().remote_address()));\n  } catch (const EnvoyException& e) {\n    socket_.setRemoteAddress(Network::Utility::resolveUrl(\"tcp://0.0.0.0:0\"));\n  }\n\n  FuzzedInputStream data(input);\n\n  if (!data.empty()) {\n    ON_CALL(os_sys_calls_, recv(kFakeSocketFd, _, _, _))\n        .WillByDefault(testing::Return(Api::SysCallSizeResult{static_cast<ssize_t>(0), 0}));\n\n    ON_CALL(dispatcher_, createFileEvent_(_, _, _, _))\n        .WillByDefault(testing::DoAll(testing::SaveArg<1>(&file_event_callback_),\n                                      testing::SaveArg<3>(&events_),\n                                      testing::ReturnNew<NiceMock<Event::MockFileEvent>>()));\n  }\n\n  filter.onAccept(cb_);\n\n  if (file_event_callback_ == nullptr) {\n    // If filter does not call createFileEvent (i.e. original_dst and original_src)\n    return;\n  }\n\n  if (!data.empty()) {\n    ON_CALL(os_sys_calls_, ioctl(kFakeSocketFd, FIONREAD, _))\n        .WillByDefault(\n            Invoke([&data](os_fd_t, unsigned long int, void* argp) -> Api::SysCallIntResult {\n              int bytes_avail = static_cast<int>(data.size());\n              memcpy(argp, &bytes_avail, sizeof(int));\n              return Api::SysCallIntResult{bytes_avail, 0};\n            }));\n    {\n      testing::InSequence s;\n\n      EXPECT_CALL(os_sys_calls_, recv(kFakeSocketFd, _, _, _))\n          .Times(testing::AnyNumber())\n          .WillRepeatedly(Invoke(\n              [&data](os_fd_t, void* buffer, size_t length, int flags) -> Api::SysCallSizeResult {\n                return data.read(buffer, length, flags == MSG_PEEK);\n              }));\n    }\n\n    bool got_continue = false;\n\n    ON_CALL(cb_, continueFilterChain(true))\n        .WillByDefault(testing::InvokeWithoutArgs([&got_continue]() { got_continue = true; }));\n\n    while (!got_continue) {\n      if (data.done()) { // End of stream reached but not done\n        if (events_ & Event::FileReadyType::Closed) {\n          file_event_callback_(Event::FileReadyType::Closed);\n        }\n        return;\n      } else {\n        file_event_callback_(Event::FileReadyType::Read);\n      }\n\n      data.next();\n    }\n  }\n}\n\nFuzzedInputStream::FuzzedInputStream(\n    const test::extensions::filters::listener::FilterFuzzTestCase& input)\n    : nreads_(input.data_size()) {\n  size_t len = 0;\n  for (int i = 0; i < nreads_; i++) {\n    len += input.data(i).size();\n  }\n\n  data_.reserve(len);\n\n  for (int i = 0; i < nreads_; i++) {\n    data_.insert(data_.end(), input.data(i).begin(), input.data(i).end());\n    indices_.push_back(data_.size() - 1);\n  }\n}\n\nFuzzedInputStream::FuzzedInputStream(std::vector<uint8_t> buffer, std::vector<size_t> indices)\n    : nreads_(indices.size()), data_(std::move(buffer)), indices_(std::move(indices)) {}\n\nvoid FuzzedInputStream::next() {\n  if (!done()) {\n    nread_++;\n  }\n}\n\nApi::SysCallSizeResult FuzzedInputStream::read(void* buffer, size_t length, bool peek) {\n  const size_t len = std::min(size(), length); // Number of bytes to write\n  memcpy(buffer, data_.data() + index_, len);\n\n  if (!peek) {\n    // If not peeking, written bytes will be marked as read\n    index_ += len;\n  }\n\n  return Api::SysCallSizeResult{static_cast<ssize_t>(len), 0};\n}\n\nsize_t FuzzedInputStream::size() const { return indices_[nread_] - index_ + 1; }\n\nbool FuzzedInputStream::done() { return nread_ >= nreads_ - 1; }\n\nbool FuzzedInputStream::empty() { return nreads_ == 0 || data_.empty(); }\n\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h",
    "content": "#include \"envoy/network/filter.h\"\n\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h\"\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\n\nclass ListenerFilterFuzzer {\npublic:\n  ListenerFilterFuzzer() {\n    ON_CALL(cb_, socket()).WillByDefault(testing::ReturnRef(socket_));\n    ON_CALL(cb_, dispatcher()).WillByDefault(testing::ReturnRef(dispatcher_));\n    ON_CALL(cb_, dynamicMetadata()).WillByDefault(testing::ReturnRef(metadata_));\n    ON_CALL(Const(cb_), dynamicMetadata()).WillByDefault(testing::ReturnRef(metadata_));\n  }\n\n  void fuzz(Network::ListenerFilter& filter,\n            const test::extensions::filters::listener::FilterFuzzTestCase& input);\n\nprivate:\n  FakeOsSysCalls os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{&os_sys_calls_};\n  NiceMock<Network::MockListenerFilterCallbacks> cb_;\n  FakeConnectionSocket socket_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Event::FileReadyCb file_event_callback_;\n  uint32_t events_;\n  envoy::config::core::v3::Metadata metadata_;\n};\n\nclass FuzzedInputStream {\npublic:\n  FuzzedInputStream(const test::extensions::filters::listener::FilterFuzzTestCase& input);\n\n  FuzzedInputStream(std::vector<uint8_t> buffer, std::vector<size_t> indices);\n\n  // Makes data from the next read available to read()\n  void next();\n\n  // Copies data into buffer and returns the number of bytes written\n  Api::SysCallSizeResult read(void* buffer, size_t length, bool peek);\n\n  // Returns the number of bytes currently available to read()\n  size_t size() const;\n\n  // Returns true if end of stream reached (no more reads)\n  bool done();\n\n  // Returns true if data field in proto is empty\n  bool empty();\n\nprivate:\n  const int nreads_; // Number of reads\n  int nread_ = 0;    // Counter of current read\n  size_t index_ = 0; // Index of first unread byte\n  std::vector<uint8_t> data_;\n  std::vector<size_t> indices_; // Ending indices for each read\n};\n\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.listener;\n\nmessage Socket {\n  string local_address = 1;\n  string remote_address = 2;\n}\n\nmessage FilterFuzzTestCase {\n  Socket sock = 1;\n  repeated bytes data = 2;\n}"
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"http_inspector_test\",\n    srcs = [\"http_inspector_test.cc\"],\n    extension_name = \"envoy.filters.listener.http_inspector\",\n    #TODO(davinci26): The test passes on Windows *but* http inspector\n    # *used* to rely on Event::FileTriggerType::Edge and we got away with it\n    # because we mock the dispatcher. Need to verify that the scenario is\n    # actually working.\n    deps = [\n        \"//source/common/common:hex_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/filters/listener/http_inspector:http_inspector_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"http_inspector_config_test\",\n    srcs = [\"http_inspector_config_test.cc\"],\n    extension_name = \"envoy.filters.listener.http_inspector\",\n    deps = [\n        \"//source/extensions/filters/listener:well_known_names\",\n        \"//source/extensions/filters/listener/http_inspector:config\",\n        \"//source/extensions/filters/listener/http_inspector:http_inspector_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:listener_factory_context_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"http_inspector_fuzz_test\",\n    srcs = [\"http_inspector_fuzz_test.cc\"],\n    corpus = \"http_inspector_corpus\",\n    deps = [\n        \"//source/extensions/filters/listener/http_inspector:http_inspector_lib\",\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc",
    "content": "#include \"extensions/filters/listener/http_inspector/http_inspector.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\n#include \"test/mocks/server/listener_factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace HttpInspector {\nnamespace {\n\nTEST(HttpInspectorConfigFactoryTest, TestCreateFactory) {\n  Server::Configuration::NamedListenerFilterConfigFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::NamedListenerFilterConfigFactory>::\n          getFactory(ListenerFilters::ListenerFilterNames::get().HttpInspector);\n\n  EXPECT_EQ(factory->name(), ListenerFilters::ListenerFilterNames::get().HttpInspector);\n\n  const std::string yaml = R\"EOF(\n      {}\n)EOF\";\n\n  ProtobufTypes::MessagePtr proto_config = factory->createEmptyConfigProto();\n  TestUtility::loadFromYaml(yaml, *proto_config);\n\n  Server::Configuration::MockListenerFactoryContext context;\n  EXPECT_CALL(context, scope()).Times(1);\n  Network::ListenerFilterFactoryCb cb =\n      factory->createListenerFilterFactoryFromProto(*proto_config, nullptr, context);\n\n  Network::MockListenerFilterManager manager;\n  Network::ListenerFilterPtr added_filter;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&added_filter](const Network::ListenerFilterMatcherSharedPtr&,\n                                       Network::ListenerFilterPtr& filter) {\n        added_filter = std::move(filter);\n      }));\n  cb(manager);\n\n  // Make sure we actually create the correct type!\n  EXPECT_NE(dynamic_cast<HttpInspector::Filter*>(added_filter.get()), nullptr);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(HttpInspectorConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.listener.http_inspector\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<\n          Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name));\n}\n\n} // namespace\n} // namespace HttpInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header",
    "content": "data: \"X\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header",
    "content": "data: \"GE\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method",
    "content": "data: \"BAD /anything HTTP/1.1\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request",
    "content": "data: \"BAD /anything HTTP/1.1\\r\\n\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10",
    "content": "data: \"GET /anyt\"\ndata: \"hing HT\"\ndata: \"TP/1.0\\r\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete",
    "content": "data: \"G\"\ndata: \"E\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10",
    "content": "data: \"GET /anything HTTP/1.0\\r\\nhost: google.com\\r\\nuser-agent: curl/7.64.0\\r\\naccept: */*\\r\\nx-forwarded-proto: http\\r\\nx-request-id: a52df4a0-ed00-4a19-86a7-80e5049c6c84\\r\\nx-envoy-expected-rq-timeout-ms: 15000\\r\\ncontent-length: 0\\r\\n\\r\\n\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11",
    "content": "data: \"GET /anything HTTP/1.1\\r\\nhost: google.com\\r\\nuser-agent: curl/7.64.0\\r\\naccept: */*\\r\\nx-forwarded-proto: http\\r\\nx-request-id: a52df4a0-ed00-4a19-86a7-80e5049c6c84\\r\\nx-envoy-expected-rq-timeout-ms: 15000\\r\\ncontent-length: 3\\r\\n\\r\\nfoo\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2",
    "content": "data: \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\\x00\\x00\\x0c\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x10\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x08\\x00\\x00\\x00\\x00\\x00\\x0f\\xff\\x00\\x01\\x00\\x00}\\x01\\x05\\x00\\x00\\x00\\x01A\\x8a\\xa0\\xe4\\x1d\\x13\\x9d\\t\\xb8\\xf0\\x00\\x0f\\x04\\x88`uzL\\xe6\\xaaf\\x05\\x82\\x86z\\x88%\\xb6P\\xc3\\xab\\xb8\\xd2\\xe0S\\x03*/*@\\x8d\\xf2\\xb4\\xa7\\xb3\\xc0\\xec\\x90\\xb2-]\\x87I\\xff\\x83\\x9d)\\xaf@\\x89\\xf2\\xb5\\x85\\xediP\\x95\\x8d\\'\\x9a\\x18\\x9e\\x03\\xf1\\xcaU\\x82&_Y\\xa7[\\n\\xc3\\x11\\x19Y\\xc7\\xe4\\x90\\x04\\x90\\x8d\\xb6\\xe8?@\\x96\\xf2\\xb1j\\xee\\x7fK\\x17\\xcde\\\"K\\\"\\xd6vY&\\xa4\\xa7\\xb5+R\\x8f\\x84\\x0b`\\x00?\""
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc",
    "content": "#include \"extensions/filters/listener/http_inspector/http_inspector.h\"\n\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h\"\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace HttpInspector {\n\nDEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n\n  Stats::IsolatedStoreImpl store;\n  ConfigSharedPtr cfg = std::make_shared<Config>(store);\n  auto filter = std::make_unique<Filter>(cfg);\n\n  ListenerFilterFuzzer fuzzer;\n  fuzzer.fuzz(*filter, input);\n}\n\n} // namespace HttpInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/http_inspector/http_inspector_test.cc",
    "content": "#include \"common/common/hex.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"extensions/filters/listener/http_inspector/http_inspector.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnNew;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace HttpInspector {\nnamespace {\n\nclass HttpInspectorTest : public testing::Test {\npublic:\n  HttpInspectorTest()\n      : cfg_(std::make_shared<Config>(store_)),\n        io_handle_(std::make_unique<Network::IoSocketHandleImpl>(42)) {}\n  ~HttpInspectorTest() override { io_handle_->close(); }\n\n  void init(bool include_inline_recv = true) {\n    filter_ = std::make_unique<Filter>(cfg_);\n\n    EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_));\n    EXPECT_CALL(socket_, detectedTransportProtocol()).WillRepeatedly(Return(\"raw_buffer\"));\n    EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_));\n    EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(ReturnRef(*io_handle_));\n    EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_));\n\n    if (include_inline_recv) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Return(Api::SysCallSizeResult{static_cast<ssize_t>(0), 0}));\n\n      EXPECT_CALL(dispatcher_,\n                  createFileEvent_(_, _, Event::PlatformDefaultTriggerType,\n                                   Event::FileReadyType::Read | Event::FileReadyType::Closed))\n          .WillOnce(DoAll(SaveArg<1>(&file_event_callback_),\n                          ReturnNew<NiceMock<Event::MockFileEvent>>()));\n\n      filter_->onAccept(cb_);\n    }\n  }\n\n  NiceMock<Api::MockOsSysCalls> os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{&os_sys_calls_};\n  Stats::IsolatedStoreImpl store_;\n  ConfigSharedPtr cfg_;\n  std::unique_ptr<Filter> filter_;\n  Network::MockListenerFilterCallbacks cb_;\n  Network::MockConnectionSocket socket_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Event::FileReadyCb file_event_callback_;\n  Network::IoHandlePtr io_handle_;\n};\n\nTEST_F(HttpInspectorTest, SkipHttpInspectForTLS) {\n  filter_ = std::make_unique<Filter>(cfg_);\n\n  EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_));\n  EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_));\n  EXPECT_CALL(socket_, detectedTransportProtocol()).WillRepeatedly(Return(\"TLS\"));\n  EXPECT_EQ(filter_->onAccept(cb_), Network::FilterStatus::Continue);\n}\n\nTEST_F(HttpInspectorTest, InlineReadIoError) {\n  init(/*include_inline_recv=*/false);\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(Invoke([](os_fd_t, void*, size_t, int) -> Api::SysCallSizeResult {\n        return Api::SysCallSizeResult{ssize_t(-1), 0};\n      }));\n  EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0);\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(socket_, close()).Times(1);\n  auto accepted = filter_->onAccept(cb_);\n  EXPECT_EQ(accepted, Network::FilterStatus::StopIteration);\n  // It's arguable if io error should bump the not_found counter\n  EXPECT_EQ(0, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InlineReadInspectHttp10) {\n  init(/*include_inline_recv=*/false);\n  const absl::string_view header =\n      \"GET /anything HTTP/1.0\\r\\nhost: google.com\\r\\nuser-agent: curl/7.64.0\\r\\naccept: \"\n      \"*/*\\r\\nx-forwarded-proto: http\\r\\nx-request-id: \"\n      \"a52df4a0-ed00-4a19-86a7-80e5049c6c84\\r\\nx-envoy-expected-rq-timeout-ms: \"\n      \"15000\\r\\ncontent-length: 0\\r\\n\\r\\n\";\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http10};\n\n  EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0);\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  auto accepted = filter_->onAccept(cb_);\n  EXPECT_EQ(accepted, Network::FilterStatus::Continue);\n  EXPECT_EQ(1, cfg_->stats().http10_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InlineReadParseError) {\n  init(/*include_inline_recv=*/false);\n  const absl::string_view header =\n      \"NOT_A_LEGAL_PREFIX /anything HTTP/1.0\\r\\nhost: google.com\\r\\nuser-agent: \"\n      \"curl/7.64.0\\r\\naccept: \"\n      \"*/*\\r\\nx-forwarded-proto: http\\r\\nx-request-id: \"\n      \"a52df4a0-ed00-4a19-86a7-80e5049c6c84\\r\\nx-envoy-expected-rq-timeout-ms: \"\n      \"15000\\r\\ncontent-length: 0\\r\\n\\r\\n\";\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n  EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0);\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  auto accepted = filter_->onAccept(cb_);\n  EXPECT_EQ(accepted, Network::FilterStatus::Continue);\n  EXPECT_EQ(1, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InspectHttp10) {\n  init(true);\n  const absl::string_view header =\n      \"GET /anything HTTP/1.0\\r\\nhost: google.com\\r\\nuser-agent: curl/7.64.0\\r\\naccept: \"\n      \"*/*\\r\\nx-forwarded-proto: http\\r\\nx-request-id: \"\n      \"a52df4a0-ed00-4a19-86a7-80e5049c6c84\\r\\nx-envoy-expected-rq-timeout-ms: \"\n      \"15000\\r\\ncontent-length: 0\\r\\n\\r\\n\";\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http10};\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http10_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InspectHttp11) {\n  init();\n  const absl::string_view header =\n      \"GET /anything HTTP/1.1\\r\\nhost: google.com\\r\\nuser-agent: curl/7.64.0\\r\\naccept: \"\n      \"*/*\\r\\nx-forwarded-proto: http\\r\\nx-request-id: \"\n      \"a52df4a0-ed00-4a19-86a7-80e5049c6c84\\r\\nx-envoy-expected-rq-timeout-ms: \"\n      \"15000\\r\\ncontent-length: 0\\r\\n\\r\\n\";\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http11};\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http11_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InspectHttp11WithNonEmptyRequestBody) {\n  init();\n  const absl::string_view header =\n      \"GET /anything HTTP/1.1\\r\\nhost: google.com\\r\\nuser-agent: curl/7.64.0\\r\\naccept: \"\n      \"*/*\\r\\nx-forwarded-proto: http\\r\\nx-request-id: \"\n      \"a52df4a0-ed00-4a19-86a7-80e5049c6c84\\r\\nx-envoy-expected-rq-timeout-ms: \"\n      \"15000\\r\\ncontent-length: 3\\r\\n\\r\\nfoo\";\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http11};\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http11_found_.value());\n}\n\nTEST_F(HttpInspectorTest, ExtraSpaceInRequestLine) {\n  init();\n  const absl::string_view header = \"GET  /anything  HTTP/1.1\\r\\n\\r\\n\";\n  //                                   ^^         ^^\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http11};\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http11_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InvalidHttpMethod) {\n  init();\n  const absl::string_view header = \"BAD /anything HTTP/1.1\";\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(0, cfg_->stats().http11_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InvalidHttpRequestLine) {\n  init();\n  const absl::string_view header = \"BAD /anything HTTP/1.1\\r\\n\";\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(cb_, continueFilterChain(_));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, OldHttpProtocol) {\n  init();\n  const absl::string_view header = \"GET /anything HTTP/0.9\\r\\n\";\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http10};\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http10_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InvalidRequestLine) {\n  init();\n  const absl::string_view header = \"GET /anything HTTP/1.1 BadRequestLine\\r\\n\";\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= header.size());\n            memcpy(buffer, header.data(), header.size());\n            return Api::SysCallSizeResult{ssize_t(header.size()), 0};\n          }));\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InspectHttp2) {\n  init();\n\n  const std::string header =\n      \"505249202a20485454502f322e300d0a0d0a534d0d0a0d0a00000c04000000000000041000000000020000000000\"\n      \"00040800000000000fff000100007d010500000001418aa0e41d139d09b8f0000f048860757a4ce6aa660582867a\"\n      \"8825b650c3abb8d2e053032a2f2a408df2b4a7b3c0ec90b22d5d8749ff839d29af4089f2b585ed6950958d279a18\"\n      \"9e03f1ca5582265f59a75b0ac3111959c7e49004908db6e83f4096f2b16aee7f4b17cd65224b22d6765926a4a7b5\"\n      \"2b528f840b60003f\";\n  std::vector<uint8_t> data = Hex::decode(header);\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&data](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= data.size());\n            memcpy(buffer, data.data(), data.size());\n            return Api::SysCallSizeResult{ssize_t(data.size()), 0};\n          }));\n\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http2c};\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().http2_found_.value());\n}\n\nTEST_F(HttpInspectorTest, InvalidConnectionPreface) {\n  init();\n\n  const std::string header = \"505249202a20485454502f322e300d0a\";\n  const std::vector<uint8_t> data = Hex::decode(header);\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&data](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= data.size());\n            memcpy(buffer, data.data(), data.size());\n            return Api::SysCallSizeResult{ssize_t(data.size()), 0};\n          }));\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(cb_, continueFilterChain(true)).Times(0);\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(0, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, ReadError) {\n  init();\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n    return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_NOT_SUP};\n  }));\n  EXPECT_CALL(cb_, continueFilterChain(false));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().read_error_.value());\n}\n\nTEST_F(HttpInspectorTest, MultipleReadsHttp2) {\n  init();\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http2c};\n\n  const std::string header =\n      \"505249202a20485454502f322e300d0a0d0a534d0d0a0d0a00000c04000000000000041000000000020000000000\"\n      \"00040800000000000fff000100007d010500000001418aa0e41d139d09b8f0000f048860757a4ce6aa660582867a\"\n      \"8825b650c3abb8d2e053032a2f2a408df2b4a7b3c0ec90b22d5d8749ff839d29af4089f2b585ed6950958d279a18\"\n      \"9e03f1ca5582265f59a75b0ac3111959c7e49004908db6e83f4096f2b16aee7f4b17cd65224b22d6765926a4a7b5\"\n      \"2b528f840b60003f\";\n  const std::vector<uint8_t> data = Hex::decode(header);\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    for (size_t i = 1; i <= 24; i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke(\n              [&data, i](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n                ASSERT(length >= i);\n                memcpy(buffer, data.data(), i);\n                return Api::SysCallSizeResult{ssize_t(i), 0};\n              }));\n    }\n  }\n\n  bool got_continue = false;\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().http2_found_.value());\n}\n\nTEST_F(HttpInspectorTest, MultipleReadsHttp2BadPreface) {\n  init();\n  const std::string header = \"505249202a20485454502f322e300d0a0d0c\";\n  const std::vector<uint8_t> data = Hex::decode(header);\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    for (size_t i = 1; i <= data.size(); i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke(\n              [&data, i](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n                ASSERT(length >= i);\n                memcpy(buffer, data.data(), i);\n                return Api::SysCallSizeResult{ssize_t(i), 0};\n              }));\n    }\n  }\n\n  bool got_continue = false;\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, MultipleReadsHttp1) {\n  init();\n  const absl::string_view data = \"GET /anything HTTP/1.0\\r\";\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    for (size_t i = 1; i <= data.size(); i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke(\n              [&data, i](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n                ASSERT(length >= i);\n                memcpy(buffer, data.data(), i);\n                return Api::SysCallSizeResult{ssize_t(i), 0};\n              }));\n    }\n  }\n\n  bool got_continue = false;\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http10};\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().http10_found_.value());\n}\n\nTEST_F(HttpInspectorTest, MultipleReadsHttp1IncompleteHeader) {\n  init();\n  const absl::string_view data = \"GE\";\n  bool end_stream = false;\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    for (size_t i = 1; i <= data.size(); i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke([&data, &end_stream, i](os_fd_t, void* buffer, size_t length,\n                                                   int) -> Api::SysCallSizeResult {\n            ASSERT(length >= i);\n            memcpy(buffer, data.data(), i);\n            if (i == data.size()) {\n              end_stream = true;\n            }\n\n            return Api::SysCallSizeResult{ssize_t(i), 0};\n          }));\n    }\n  }\n\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_EQ(0, cfg_->stats().http_not_found_.value());\n  while (!end_stream) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n}\n\nTEST_F(HttpInspectorTest, MultipleReadsHttp1IncompleteBadHeader) {\n  init();\n  const absl::string_view data = \"X\";\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    for (size_t i = 1; i <= data.size(); i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke(\n              [&data, i](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n                ASSERT(length >= i);\n                memcpy(buffer, data.data(), i);\n                return Api::SysCallSizeResult{ssize_t(i), 0};\n              }));\n    }\n  }\n\n  bool got_continue = false;\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, MultipleReadsHttp1BadProtocol) {\n  init();\n  const std::string valid_header = \"GET /index HTTP/1.1\\r\";\n  //  offset:                       0         10\n  const std::string truncate_header = valid_header.substr(0, 14).append(\"\\r\");\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    for (size_t i = 1; i <= truncate_header.size(); i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke([&truncate_header, i](os_fd_t, void* buffer, size_t length,\n                                                 int) -> Api::SysCallSizeResult {\n            ASSERT(length >= truncate_header.size());\n            memcpy(buffer, truncate_header.data(), truncate_header.size());\n            return Api::SysCallSizeResult{ssize_t(i), 0};\n          }));\n    }\n  }\n\n  bool got_continue = false;\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().http_not_found_.value());\n}\n\nTEST_F(HttpInspectorTest, Http1WithLargeRequestLine) {\n  // Verify that the http inspector can detect http requests\n  // with large request line even when they are split over\n  // multiple recv calls.\n  init();\n  absl::string_view method = \"GET\", http = \"/index HTTP/1.0\\r\";\n  std::string spaces(Config::MAX_INSPECT_SIZE - method.size() - http.size(), ' ');\n  const std::string data = absl::StrCat(method, spaces, http);\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    uint64_t num_loops = Config::MAX_INSPECT_SIZE;\n#if defined(__has_feature) &&                                                                      \\\n    ((__has_feature(thread_sanitizer)) || (__has_feature(address_sanitizer)))\n    num_loops = 2;\n#endif\n\n    auto ctr = std::make_shared<size_t>(1);\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n        .Times(num_loops)\n        .WillRepeatedly(Invoke([&data, ctr, num_loops](os_fd_t, void* buffer, size_t length,\n                                                       int) -> Api::SysCallSizeResult {\n          size_t len = (*ctr);\n          if (num_loops == 2) {\n            ASSERT(*ctr != 3);\n            len = size_t(Config::MAX_INSPECT_SIZE / (3 - (*ctr)));\n          }\n          ASSERT(length >= len);\n          memcpy(buffer, data.data(), len);\n          *ctr += 1;\n          return Api::SysCallSizeResult{ssize_t(len), 0};\n        }));\n  }\n\n  bool got_continue = false;\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http10};\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().http10_found_.value());\n}\n\nTEST_F(HttpInspectorTest, Http1WithLargeHeader) {\n  init();\n  absl::string_view request = \"GET /index HTTP/1.0\\rfield: \";\n  //                           0                  20\n  std::string value(Config::MAX_INSPECT_SIZE - request.size(), 'a');\n  const std::string data = absl::StrCat(request, value);\n  {\n    InSequence s;\n\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n      return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n    }));\n\n    for (size_t i = 1; i <= 20; i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke(\n              [&data, i](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n                ASSERT(length >= data.size());\n                memcpy(buffer, data.data(), i);\n                return Api::SysCallSizeResult{ssize_t(i), 0};\n              }));\n    }\n  }\n\n  bool got_continue = false;\n  const std::vector<absl::string_view> alpn_protos{Http::Utility::AlpnNames::get().Http10};\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().http10_found_.value());\n}\n\n} // namespace\n} // namespace HttpInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/original_dst/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.listener.original_dst\",\n    deps = [\n        \"//source/extensions/filters/listener/original_dst:config\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"original_dst_fuzz_test\",\n    srcs = [\"original_dst_fuzz_test.cc\"],\n    corpus = \"original_dst_corpus\",\n    deps = [\n        \"//source/extensions/filters/listener/original_dst:original_dst_lib\",\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/listener/original_dst/config_test.cc",
    "content": "#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalDst {\nnamespace {\n\n// Test that the deprecated extension name still functions.\nTEST(OriginalDstConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.listener.original_dst\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<\n          Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name));\n}\n\n} // namespace\n} // namespace OriginalDst\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme",
    "content": "sock {\n  local_address: \"hello world\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_unix",
    "content": "sock {\n  local_address: \"unix://tmp/server\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv4",
    "content": "sock {\n  local_address: \"tcp://0.0.0.0:0\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv6",
    "content": "sock {\n  local_address: \"tcp://[a:b:c:d::]:0\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc",
    "content": "#include \"extensions/filters/listener/original_dst/original_dst.h\"\n\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h\"\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalDst {\n\nDEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n\n  auto filter = std::make_unique<OriginalDstFilter>();\n  ListenerFilterFuzzer fuzzer;\n  fuzzer.fuzz(*filter, input);\n}\n\n} // namespace OriginalDst\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/original_src/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.listener.original_src\",\n    deps = [\n        \"//source/extensions/filters/listener/original_src:config_lib\",\n        \"@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"original_src_config_factory_test\",\n    srcs = [\"original_src_config_factory_test.cc\"],\n    extension_name = \"envoy.filters.listener.original_src\",\n    deps = [\n        \"//source/extensions/filters/listener/original_src:config\",\n        \"//source/extensions/filters/listener/original_src:config_lib\",\n        \"//source/extensions/filters/listener/original_src:original_src_lib\",\n        \"//test/mocks/server:listener_factory_context_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"original_src_test\",\n    srcs = [\"original_src_test.cc\"],\n    extension_name = \"envoy.filters.listener.original_src\",\n    deps = [\n        \"//source/common/network:socket_option_lib\",\n        \"//source/extensions/filters/listener/original_src:original_src_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"original_src_fuzz_test_proto\",\n    srcs = [\"original_src_fuzz_test.proto\"],\n    deps = [\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_proto\",\n        \"@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"original_src_fuzz_test\",\n    srcs = [\"original_src_fuzz_test.cc\"],\n    corpus = \"original_src_corpus\",\n    deps = [\n        \":original_src_fuzz_test_proto_cc_proto\",\n        \"//source/extensions/filters/listener/original_src:original_src_lib\",\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/listener/original_src/config_test.cc",
    "content": "#include <numeric>\n\n#include \"envoy/extensions/filters/listener/original_src/v3/original_src.pb.h\"\n\n#include \"extensions/filters/listener/original_src/config.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\nnamespace {\n\n// In keeping with the class under test, it would have made sense to call this ConfigTest. However,\n// when running coverage tests, that conflicts with tests elsewhere in the codebase.\nclass OriginalSrcConfigTest : public testing::Test {\npublic:\n  Config makeConfigFromProto(\n      const envoy::extensions::filters::listener::original_src::v3::OriginalSrc& proto_config) {\n    return {proto_config};\n  }\n};\n\nTEST_F(OriginalSrcConfigTest, TestUsePortTrue) {\n  envoy::extensions::filters::listener::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_bind_port(true);\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_TRUE(config.usePort());\n}\n\nTEST_F(OriginalSrcConfigTest, TestUsePortFalse) {\n  envoy::extensions::filters::listener::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_bind_port(false);\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_FALSE(config.usePort());\n}\n\nTEST_F(OriginalSrcConfigTest, TestUseMark0) {\n  envoy::extensions::filters::listener::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_mark(0);\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_EQ(config.mark(), 0);\n}\n\nTEST_F(OriginalSrcConfigTest, TestUseMark1234) {\n  envoy::extensions::filters::listener::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_mark(1234);\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_EQ(config.mark(), 1234);\n}\n\nTEST_F(OriginalSrcConfigTest, TestUseMarkMax) {\n  envoy::extensions::filters::listener::original_src::v3::OriginalSrc config_proto;\n  config_proto.set_mark(std::numeric_limits<uint32_t>::max());\n  auto config = makeConfigFromProto(config_proto);\n\n  EXPECT_EQ(config.mark(), std::numeric_limits<uint32_t>::max());\n}\n\n} // namespace\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/original_src/original_src_config_factory_test.cc",
    "content": "#include \"extensions/filters/listener/original_src/config.h\"\n#include \"extensions/filters/listener/original_src/original_src.h\"\n#include \"extensions/filters/listener/original_src/original_src_config_factory.h\"\n\n#include \"test/mocks/server/listener_factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Invoke;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\nnamespace {\n\nTEST(OriginalSrcConfigFactoryTest, TestCreateFactory) {\n  std::string yaml = R\"EOF(\n    mark: 5\n    bind_port: true\n)EOF\";\n\n  OriginalSrcConfigFactory factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto();\n  TestUtility::loadFromYaml(yaml, *proto_config);\n\n  NiceMock<Server::Configuration::MockListenerFactoryContext> context;\n\n  Network::ListenerFilterFactoryCb cb =\n      factory.createListenerFilterFactoryFromProto(*proto_config, nullptr, context);\n  Network::MockListenerFilterManager manager;\n  Network::ListenerFilterPtr added_filter;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&added_filter](const Network::ListenerFilterMatcherSharedPtr&,\n                                       Network::ListenerFilterPtr& filter) {\n        added_filter = std::move(filter);\n      }));\n  cb(manager);\n\n  // Make sure we actually create the correct type!\n  EXPECT_NE(dynamic_cast<OriginalSrcFilter*>(added_filter.get()), nullptr);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(OriginalSrcConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.listener.original_src\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<\n          Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name));\n}\n\n} // namespace\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/original_src/original_src_corpus/valid_ipv4",
    "content": "config {\n  bind_port: false\n  mark: 0\n}\n\nfuzzed {\n  sock {\n    remote_address: \"tcp://1.2.3.4:0\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/listener/original_src/original_src_corpus/valid_unix",
    "content": "config {\n  bind_port: true\n  mark: 15\n}\n\nfuzzed {\n  sock {\n    remote_address: \"unix://domain.socket\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/listener/original_src/original_src_fuzz_test.cc",
    "content": "#include \"extensions/filters/listener/original_src/original_src.h\"\n\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h\"\n#include \"test/extensions/filters/listener/original_src/original_src_fuzz_test.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\n\nDEFINE_PROTO_FUZZER(\n    const test::extensions::filters::listener::original_src::OriginalSrcTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n\n  Config config(input.config());\n  auto filter = std::make_unique<OriginalSrcFilter>(config);\n  ListenerFilterFuzzer fuzzer;\n  fuzzer.fuzz(*filter, input.fuzzed());\n}\n\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/original_src/original_src_fuzz_test.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.listener.original_src;\n\nimport \"envoy/extensions/filters/listener/original_src/v3/original_src.proto\";\nimport \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto\";\nimport \"validate/validate.proto\";\n\nmessage OriginalSrcTestCase {\n  envoy.extensions.filters.listener.original_src.v3.OriginalSrc config = 1\n      [(validate.rules).message.required = true];\n  test.extensions.filters.listener.FilterFuzzTestCase fuzzed = 2\n      [(validate.rules).message.required = true];\n}"
  },
  {
    "path": "test/extensions/filters/listener/original_src/original_src_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/listener/original_src/v3/original_src.pb.h\"\n\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/listener/original_src/original_src.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace OriginalSrc {\nnamespace {\n\nclass OriginalSrcTest : public testing::Test {\npublic:\n  std::unique_ptr<OriginalSrcFilter> makeDefaultFilter() {\n    Config default_config;\n    return std::make_unique<OriginalSrcFilter>(default_config);\n  }\n\n  std::unique_ptr<OriginalSrcFilter> makeMarkingFilter(uint32_t mark) {\n    envoy::extensions::filters::listener::original_src::v3::OriginalSrc proto_config;\n    proto_config.set_mark(mark);\n\n    Config config(proto_config);\n    return std::make_unique<OriginalSrcFilter>(config);\n  }\n\n  void setAddressToReturn(const std::string& address) {\n    callbacks_.socket_.remote_address_ = Network::Utility::resolveUrl(address);\n  }\n\nprotected:\n  MockBuffer buffer_;\n  NiceMock<Network::MockListenerFilterCallbacks> callbacks_;\n\n  absl::optional<Network::Socket::Option::Details>\n  findOptionDetails(const Network::Socket::Options& options, Network::SocketOptionName name,\n                    envoy::config::core::v3::SocketOption::SocketState state) {\n    for (const auto& option : options) {\n      auto details = option->getOptionDetails(callbacks_.socket_, state);\n      if (details.has_value() && details->name_ == name) {\n        return details;\n      }\n    }\n\n    return absl::nullopt;\n  }\n};\n\nTEST_F(OriginalSrcTest, OnNewConnectionUnixSocketSkips) {\n  auto filter = makeDefaultFilter();\n  setAddressToReturn(\"unix://domain.socket\");\n  EXPECT_CALL(callbacks_.socket_, addOption_(_)).Times(0);\n  EXPECT_EQ(filter->onAccept(callbacks_), Network::FilterStatus::Continue);\n}\n\nTEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressAddsOption) {\n  auto filter = makeDefaultFilter();\n\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:0\");\n  EXPECT_CALL(callbacks_.socket_, addOptions_(_)).WillOnce(SaveArg<0>(&options));\n\n  EXPECT_EQ(filter->onAccept(callbacks_), Network::FilterStatus::Continue);\n\n  // not ideal -- we're assuming that the original_src option is first, but it's a fair assumption\n  // for now.\n  ASSERT_NE(options->at(0), nullptr);\n\n  NiceMock<Network::MockConnectionSocket> socket;\n  EXPECT_CALL(socket, setLocalAddress(PointeesEq(callbacks_.socket_.remote_address_)));\n  options->at(0)->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n}\n\nTEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressUsesCorrectAddress) {\n  auto filter = makeDefaultFilter();\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:0\");\n  EXPECT_CALL(callbacks_.socket_, addOptions_(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->onAccept(callbacks_);\n  std::vector<uint8_t> key;\n  // not ideal -- we're assuming that the original_src option is first, but it's a fair assumption\n  // for now.\n  options->at(0)->hashKey(key);\n  std::vector<uint8_t> expected_key = {1, 2, 3, 4};\n\n  EXPECT_EQ(key, expected_key);\n}\n\nTEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressBleachesPort) {\n  auto filter = makeDefaultFilter();\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_.socket_, addOptions_(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->onAccept(callbacks_);\n\n  NiceMock<Network::MockConnectionSocket> socket;\n  const auto expected_address = Network::Utility::parseInternetAddress(\"1.2.3.4\");\n  EXPECT_CALL(socket, setLocalAddress(PointeesEq(expected_address)));\n\n  // not ideal -- we're assuming that the original_src option is first, but it's a fair assumption\n  // for now.\n  options->at(0)->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n}\n\nTEST_F(OriginalSrcTest, FilterAddsTransparentOption) {\n  if (!ENVOY_SOCKET_IP_TRANSPARENT.hasValue()) {\n    // The option isn't supported on this platform. Just skip the test.\n    return;\n  }\n\n  auto filter = makeDefaultFilter();\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_.socket_, addOptions_(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->onAccept(callbacks_);\n\n  auto transparent_option = findOptionDetails(*options, ENVOY_SOCKET_IP_TRANSPARENT,\n                                              envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n  EXPECT_TRUE(transparent_option.has_value());\n}\n\nTEST_F(OriginalSrcTest, FilterAddsMarkOption) {\n  if (!ENVOY_SOCKET_SO_MARK.hasValue()) {\n    // The option isn't supported on this platform. Just skip the test.\n    return;\n  }\n\n  auto filter = makeMarkingFilter(1234);\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_.socket_, addOptions_(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->onAccept(callbacks_);\n\n  auto mark_option = findOptionDetails(*options, ENVOY_SOCKET_SO_MARK,\n                                       envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n  ASSERT_TRUE(mark_option.has_value());\n  uint32_t value = 1234;\n  absl::string_view value_as_bstr(reinterpret_cast<const char*>(&value), sizeof(value));\n  EXPECT_EQ(value_as_bstr, mark_option->value_);\n}\n\nTEST_F(OriginalSrcTest, Mark0NotAdded) {\n  if (!ENVOY_SOCKET_SO_MARK.hasValue()) {\n    // The option isn't supported on this platform. Just skip the test.\n    return;\n  }\n\n  auto filter = makeMarkingFilter(0);\n  Network::Socket::OptionsSharedPtr options;\n  setAddressToReturn(\"tcp://1.2.3.4:80\");\n  EXPECT_CALL(callbacks_.socket_, addOptions_(_)).WillOnce(SaveArg<0>(&options));\n\n  filter->onAccept(callbacks_);\n\n  auto mark_option = findOptionDetails(*options, ENVOY_SOCKET_SO_MARK,\n                                       envoy::config::core::v3::SocketOption::STATE_PREBIND);\n\n  ASSERT_FALSE(mark_option.has_value());\n}\n\n} // namespace\n} // namespace OriginalSrc\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"proxy_protocol_test\",\n    srcs = [\"proxy_protocol_test.cc\"],\n    extension_name = \"envoy.filters.listener.proxy_protocol\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_balancer_lib\",\n        \"//source/common/network:listener_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/listener/proxy_protocol:config\",\n        \"//source/extensions/filters/listener/proxy_protocol:proxy_protocol_lib\",\n        \"//source/server:connection_handler_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:listener_factory_context_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"proxy_protocol_fuzz_test_proto\",\n    srcs = [\"proxy_protocol_fuzz_test.proto\"],\n    deps = [\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_proto\",\n        \"@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"proxy_protocol_fuzz_test\",\n    srcs = [\"proxy_protocol_fuzz_test.cc\"],\n    corpus = \"proxy_protocol_corpus\",\n    deps = [\n        \":proxy_protocol_fuzz_test_proto_cc_proto\",\n        \"//source/extensions/filters/listener/proxy_protocol:proxy_protocol_lib\",\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v1_basic",
    "content": "config {\n\n}\n\nfuzzed {\n  data: \"PROXY TCP4 1.2.3.4 253.253.253.253 65535 1234\\r\\n\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v1_basic_v6",
    "content": "config {\n\n}\n\nfuzzed {\n  data: \"PROXY TCP6 1:2:3::4 5:6::7:8 65535 1234\\r\\n\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v1_minimal",
    "content": "config {\n\n}\n\nfuzzed {\n  data: \"PROXY UNKNOWN\\r\\n\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v2_basic",
    "content": "config {\n\n}\n\nfuzzed {\n  data: \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\\x21\\x11\\x00\\x0c\\x01\\x02\\x03\\x04\\x00\\x01\\x01\\x02\\x03\\x05\\x00\\x02\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v2_basic_v6",
    "content": "config {\n\n}\n\nfuzzed {\n  data: \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\\x21\\x22\\x00\\x24\\x00\\x01\\x00\\x02\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x01\\x01\\x00\\x02\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x08\\x00\\x02\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v2_extensions",
    "content": "config {\n\n}\n\nfuzzed {\n  data: \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\\x21\\x11\\x00\\x14\\x01\\x02\\x03\\x04\\x00\\x01\\x01\\x02\\x03\\x05\\x00\\x02\\x0\\x0\\x1\\xffdata\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v2_tlv",
    "content": "config {\n  rules {\n    tlv_type: 2\n    on_tlv_present {\n      key: \"PP2 type authority\"\n    }\n  }\n}\n\nfuzzed {\n  data: \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\\x21\\x11\\x00\\x1a\\x01\\x02\\x03\\x04\\x00\\x01\\x01\\x02\\x03\\x05\\x00\\x02x0\\x0\\x1\\xff\\x02\\x00\\x07\\x66\\x6f\\x6f\\x2e\\x63\\x6f\\x6d\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_corpus/v2_tlv_multiple",
    "content": "config {\n  rules {\n    tlv_type: 2\n    on_tlv_present {\n      key: \"PP2 type authority\"\n    }\n  }\n  rules {\n    tlv_type: 234\n    on_tlv_present {\n      key: \"PP2 vpc id\"\n    }\n  }\n}\n\nfuzzed {\n  data: \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\\x21\\x11\\x00\\x39\\x01\\x02\\x03\\x04\\x00\\x01\\x01\\x02\\x03\\x05\\x00\\x02\\x00\\x00\\x01\\xff\\x02\\x00\\x07\\x66\\x6f\\x6f\\x2e\\x63\\x6f\\x6d\\x0f\\x00\\x03\\xf0\\x00\\x0f\\xea\\x00\\x16\\x01\\x76\\x70\\x63\\x2d\\x30,0x32\\x35\\x74\\x65\\x73\\x74\\x32\\x66\\x61\\x36\\x63\\x36\\x33\\x68\\x61\\x37\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_fuzz_test.cc",
    "content": "#include \"extensions/filters/listener/proxy_protocol/proxy_protocol.h\"\n\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h\"\n#include \"test/extensions/filters/listener/proxy_protocol/proxy_protocol_fuzz_test.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace ProxyProtocol {\n\nDEFINE_PROTO_FUZZER(\n    const test::extensions::filters::listener::proxy_protocol::ProxyProtocolTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n\n  Stats::IsolatedStoreImpl store;\n  ConfigSharedPtr cfg = std::make_shared<Config>(store, input.config());\n  auto filter = std::make_unique<Filter>(std::move(cfg));\n\n  ListenerFilterFuzzer fuzzer;\n  fuzzer.fuzz(*filter, input.fuzzed());\n}\n\n} // namespace ProxyProtocol\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_fuzz_test.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.listener.proxy_protocol;\n\nimport \"envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto\";\nimport \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto\";\nimport \"validate/validate.proto\";\n\nmessage ProxyProtocolTestCase {\n  envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol config = 1\n      [(validate.rules).message.required = true];\n  test.extensions.filters.listener.FilterFuzzTestCase fuzzed = 2\n      [(validate.rules).message.required = true];\n}"
  },
  {
    "path": "test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc",
    "content": "#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/connection_balancer_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/network/tcp_listener_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"server/connection_handler_impl.h\"\n\n#include \"extensions/filters/listener/proxy_protocol/proxy_protocol.h\"\n#include \"extensions/filters/listener/well_known_names.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/listener_factory_context.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::AtLeast;\nusing testing::ElementsAre;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace ProxyProtocol {\nnamespace {\n\n// Build again on the basis of the connection_handler_test.cc\n\nclass ProxyProtocolTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                          public Network::ListenerConfig,\n                          public Network::FilterChainManager,\n                          protected Logger::Loggable<Logger::Id::main> {\npublic:\n  ProxyProtocolTest()\n      : api_(Api::createApiForTest(stats_store_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        socket_(std::make_shared<Network::TcpListenSocket>(\n            Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)),\n        connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)),\n        name_(\"proxy\"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()),\n        init_manager_(nullptr) {\n    EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream));\n    EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress()));\n    EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_));\n    connection_handler_->addListener(absl::nullopt, *this);\n    conn_ = dispatcher_->createClientConnection(socket_->localAddress(),\n                                                Network::Address::InstanceConstSharedPtr(),\n                                                Network::Test::createRawBufferSocket(), nullptr);\n    conn_->addConnectionCallbacks(connection_callbacks_);\n  }\n\n  // Network::ListenerConfig\n  Network::FilterChainManager& filterChainManager() override { return *this; }\n  Network::FilterChainFactory& filterChainFactory() override { return factory_; }\n  Network::ListenSocketFactory& listenSocketFactory() override { return socket_factory_; }\n  bool bindToPort() override { return true; }\n  bool handOffRestoredDestinationConnections() const override { return false; }\n  uint32_t perConnectionBufferLimitBytes() const override { return 0; }\n  std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; }\n  bool continueOnListenerFiltersTimeout() const override { return false; }\n  Stats::Scope& listenerScope() override { return stats_store_; }\n  uint64_t listenerTag() const override { return 1; }\n  ResourceLimit& openConnections() override { return open_connections_; }\n  const std::string& name() const override { return name_; }\n  Network::ActiveUdpListenerFactory* udpListenerFactory() override { return nullptr; }\n  Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { return absl::nullopt; }\n  Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override {\n    return absl::nullopt;\n  }\n  envoy::config::core::v3::TrafficDirection direction() const override {\n    return envoy::config::core::v3::UNSPECIFIED;\n  }\n  Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; }\n  const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n    return empty_access_logs_;\n  }\n  uint32_t tcpBacklogSize() const override { return ENVOY_TCP_BACKLOG_SIZE; }\n  Init::Manager& initManager() override { return *init_manager_; }\n\n  // Network::FilterChainManager\n  const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override {\n    return filter_chain_.get();\n  }\n\n  void connect(bool read = true,\n               const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol*\n                   proto_config = nullptr) {\n    int expected_callbacks = 2;\n    auto maybeExitDispatcher = [&]() -> void {\n      expected_callbacks--;\n      if (expected_callbacks == 0) {\n        dispatcher_->exit();\n      }\n    };\n\n    EXPECT_CALL(factory_, createListenerFilterChain(_))\n        .WillOnce(Invoke([&](Network::ListenerFilterManager& filter_manager) -> bool {\n          filter_manager.addAcceptFilter(\n              nullptr, std::make_unique<Filter>(std::make_shared<Config>(\n                           listenerScope(), (nullptr != proto_config)\n                                                ? *proto_config\n                                                : envoy::extensions::filters::listener::\n                                                      proxy_protocol::v3::ProxyProtocol())));\n          maybeExitDispatcher();\n          return true;\n        }));\n    conn_->connect();\n    if (read) {\n      read_filter_ = std::make_shared<NiceMock<Network::MockReadFilter>>();\n      EXPECT_CALL(factory_, createNetworkFilterChain(_, _))\n          .WillOnce(Invoke([&](Network::Connection& connection,\n                               const std::vector<Network::FilterFactoryCb>&) -> bool {\n            server_connection_ = &connection;\n            connection.addConnectionCallbacks(server_callbacks_);\n            connection.addReadFilter(read_filter_);\n            return true;\n          }));\n    }\n    EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { maybeExitDispatcher(); }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void write(const uint8_t* s, ssize_t l) {\n    Buffer::OwnedImpl buf(s, l);\n    conn_->write(buf, false);\n  }\n\n  void write(const std::string& s) {\n    Buffer::OwnedImpl buf(s);\n    conn_->write(buf, false);\n  }\n\n  void expectData(std::string expected) {\n    EXPECT_CALL(*read_filter_, onNewConnection());\n    EXPECT_CALL(*read_filter_, onData(_, _))\n        .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> Network::FilterStatus {\n          EXPECT_EQ(buffer.toString(), expected);\n          buffer.drain(expected.length());\n          dispatcher_->exit();\n          return Network::FilterStatus::Continue;\n        }));\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void disconnect() {\n    EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n    EXPECT_CALL(server_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n    conn_->close(Network::ConnectionCloseType::NoFlush);\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void expectProxyProtoError() {\n    EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    EXPECT_EQ(stats_store_.counter(\"downstream_cx_proxy_proto_error\").value(), 1);\n  }\n\n  Stats::TestUtil::TestStore stats_store_;\n  Api::ApiPtr api_;\n  BasicResourceLimitImpl open_connections_;\n  Event::DispatcherPtr dispatcher_;\n  std::shared_ptr<Network::TcpListenSocket> socket_;\n  Network::MockListenSocketFactory socket_factory_;\n  Network::NopConnectionBalancerImpl connection_balancer_;\n  Network::ConnectionHandlerPtr connection_handler_;\n  Network::MockFilterChainFactory factory_;\n  Network::ClientConnectionPtr conn_;\n  NiceMock<Network::MockConnectionCallbacks> connection_callbacks_;\n  Network::Connection* server_connection_;\n  Network::MockConnectionCallbacks server_callbacks_;\n  std::shared_ptr<Network::MockReadFilter> read_filter_;\n  std::string name_;\n  Api::OsSysCallsImpl os_sys_calls_actual_;\n  const Network::FilterChainSharedPtr filter_chain_;\n  const std::vector<AccessLog::InstanceSharedPtr> empty_access_logs_;\n  std::unique_ptr<Init::Manager> init_manager_;\n};\n\n// Parameterize the listener socket address version.\nINSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtocolTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ProxyProtocolTest, V1Basic) {\n  connect();\n  write(\"PROXY TCP4 1.2.3.4 253.253.253.253 65535 1234\\r\\nmore data\");\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"1.2.3.4\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V1Minimal) {\n  connect();\n  write(\"PROXY UNKNOWN\\r\\nmore data\");\n\n  expectData(\"more data\");\n\n  if (GetParam() == Envoy::Network::Address::IpVersion::v4) {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"127.0.0.1\");\n  } else {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"::1\");\n  }\n  EXPECT_FALSE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2Basic) {\n  // A well-formed ipv4/tcp message, no extensions\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect();\n  write(buffer, sizeof(buffer));\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"1.2.3.4\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, BasicV6) {\n  connect();\n  write(\"PROXY TCP6 1:2:3::4 5:6::7:8 65535 1234\\r\\nmore data\");\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"1:2:3::4\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2BasicV6) {\n  // A well-formed ipv6/tcp message, no extensions\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54,\n                                0x0a, 0x21, 0x22, 0x00, 0x24, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03,\n                                0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,\n                                0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,\n                                0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 'm',  'o',  'r',\n                                'e',  ' ',  'd',  'a',  't',  'a'};\n  connect();\n  write(buffer, sizeof(buffer));\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"1:2:3::4\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2UnsupportedAF) {\n  // A well-formed message with an unsupported address family\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x41, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n  write(buffer, sizeof(buffer));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, ErrorRecv_2) {\n  // A well formed v4/tcp message, no extensions, but introduce an error on recv (e.g. socket close)\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  // TODO(davinci26): Mocking should not be used to provide real system calls.\n  EXPECT_CALL(os_sys_calls, connect(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n        return os_sys_calls_actual_.connect(sockfd, addr, addrlen);\n      }));\n  EXPECT_CALL(os_sys_calls, recv(_, _, _, _))\n      .Times(AnyNumber())\n      .WillOnce(Return(Api::SysCallSizeResult{-1, 0}));\n  EXPECT_CALL(os_sys_calls, ioctl(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, unsigned long int request, void* argp) {\n        return os_sys_calls_actual_.ioctl(fd, request, argp);\n      }));\n  EXPECT_CALL(os_sys_calls, writev(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.writev(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, readv(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.readv(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int {\n            return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_;\n          }));\n  EXPECT_CALL(os_sys_calls, getsockname(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* name, socklen_t* namelen) -> Api::SysCallIntResult {\n            return os_sys_calls_actual_.getsockname(sockfd, name, namelen);\n          }));\n  EXPECT_CALL(os_sys_calls, shutdown(_, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int how) { return os_sys_calls_actual_.shutdown(sockfd, how); }));\n  EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) {\n    return os_sys_calls_actual_.close(fd);\n  }));\n  EXPECT_CALL(os_sys_calls, accept(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult {\n            return os_sys_calls_actual_.accept(sockfd, addr, addrlen);\n          }));\n  connect(false);\n  write(buffer, sizeof(buffer));\n\n  errno = 0;\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, ErrorRecv_1) {\n  // A well formed v4/tcp message, no extensions, but introduce an error on recv()\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  // TODO(davinci26): Mocking should not be used to provide real system calls.\n  EXPECT_CALL(os_sys_calls, recv(_, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Return(Api::SysCallSizeResult{-1, 0}));\n  EXPECT_CALL(os_sys_calls, connect(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n        return os_sys_calls_actual_.connect(sockfd, addr, addrlen);\n      }));\n  EXPECT_CALL(os_sys_calls, writev(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.writev(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, readv(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.readv(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int {\n            return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_;\n          }));\n  EXPECT_CALL(os_sys_calls, getsockname(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* name, socklen_t* namelen) -> Api::SysCallIntResult {\n            return os_sys_calls_actual_.getsockname(sockfd, name, namelen);\n          }));\n  EXPECT_CALL(os_sys_calls, shutdown(_, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int how) { return os_sys_calls_actual_.shutdown(sockfd, how); }));\n  EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) {\n    return os_sys_calls_actual_.close(fd);\n  }));\n  EXPECT_CALL(os_sys_calls, accept(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult {\n            return os_sys_calls_actual_.accept(sockfd, addr, addrlen);\n          }));\n  connect(false);\n  write(buffer, sizeof(buffer));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2NotLocalOrOnBehalf) {\n  // An illegal command type: neither 'local' nor 'proxy' command\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x23, 0x1f, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n  write(buffer, sizeof(buffer));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2LocalConnection) {\n  // A 'local' connection, e.g. health-checking, no address, no extensions\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55,\n                                0x49, 0x54, 0x0a, 0x20, 0x00, 0x00, 0x00, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect();\n  write(buffer, sizeof(buffer));\n  expectData(\"more data\");\n  if (server_connection_->remoteAddress()->ip()->version() ==\n      Envoy::Network::Address::IpVersion::v6) {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"::1\");\n  } else if (server_connection_->remoteAddress()->ip()->version() ==\n             Envoy::Network::Address::IpVersion::v4) {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"127.0.0.1\");\n  }\n  EXPECT_FALSE(server_connection_->localAddressRestored());\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2LocalConnectionExtension) {\n  // A 'local' connection, e.g. health-checking, no address, 1 TLV (0x00,0x00,0x01,0xff) is present.\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x20, 0x00, 0x00, 0x04, 0x00, 0x00, 0x01, 0xff,\n                                'm',  'o',  'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect();\n  write(buffer, sizeof(buffer));\n  expectData(\"more data\");\n  if (server_connection_->remoteAddress()->ip()->version() ==\n      Envoy::Network::Address::IpVersion::v6) {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"::1\");\n  } else if (server_connection_->remoteAddress()->ip()->version() ==\n             Envoy::Network::Address::IpVersion::v4) {\n    EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"127.0.0.1\");\n  }\n  EXPECT_FALSE(server_connection_->localAddressRestored());\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2ShortV4) {\n  // An ipv4/tcp connection that has incorrect addr-len encoded\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x21, 0x00, 0x04, 0x00, 0x08, 0x00, 0x02,\n                                'm',  'o',  'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n\n  write(buffer, sizeof(buffer));\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2ShortAddrV4) {\n  // An ipv4/tcp connection that has insufficient header-length encoded\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0b, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n\n  write(buffer, sizeof(buffer));\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2ShortV6) {\n  // An ipv6/tcp connection that has incorrect addr-len encoded\n  constexpr uint8_t buffer[] = {\n      0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x21, 0x22, 0x00,\n      0x14, 0x00, 0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n      0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 'm',  'o',  'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n\n  write(buffer, sizeof(buffer));\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2ShortAddrV6) {\n  // An ipv6/tcp connection that has insufficient header-length encoded\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54,\n                                0x0a, 0x21, 0x22, 0x00, 0x23, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03,\n                                0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,\n                                0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,\n                                0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 'm',  'o',  'r',\n                                'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n\n  write(buffer, sizeof(buffer));\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2AF_UNIX) {\n  // A well-formed AF_UNIX (0x32 in b14) connection is rejected\n  constexpr uint8_t buffer[] = {\n      0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x21, 0x32, 0x00,\n      0x14, 0x00, 0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n      0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 'm',  'o',  'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n  write(buffer, sizeof(buffer));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2BadCommand) {\n  // A non local/proxy command (0x29 in b13) is rejected\n  constexpr uint8_t buffer[] = {\n      0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x29, 0x32, 0x00,\n      0x14, 0x00, 0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n      0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 'm',  'o',  'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n  write(buffer, sizeof(buffer));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2WrongVersion) {\n  // A non '2' version is rejected (0x93 in b13)\n  constexpr uint8_t buffer[] = {\n      0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x21, 0x93, 0x00,\n      0x14, 0x00, 0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n      0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 'm',  'o',  'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect(false);\n  write(buffer, sizeof(buffer));\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V1TooLong) {\n  constexpr uint8_t buffer[] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '};\n  connect(false);\n  write(\"PROXY TCP4 1.2.3.4 2.3.4.5 100 100\");\n  for (size_t i = 0; i < 256; i += sizeof(buffer)) {\n    write(buffer, sizeof(buffer));\n  }\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2ParseExtensions) {\n  // A well-formed ipv4/tcp with a pair of TLV extensions is accepted\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x14, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n  constexpr uint8_t tlv[] = {0x0, 0x0, 0x1, 0xff};\n\n  constexpr uint8_t data[] = {'D', 'A', 'T', 'A'};\n\n  connect();\n  write(buffer, sizeof(buffer));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  for (int i = 0; i < 2; i++) {\n    write(tlv, sizeof(tlv));\n  }\n  write(data, sizeof(data));\n  expectData(\"DATA\");\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2ParseExtensionsRecvError) {\n  // A well-formed ipv4/tcp with a TLV extension. An error is returned on tlv recv()\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n  constexpr uint8_t tlv[] = {0x0, 0x0, 0x1, 0xff};\n\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  // TODO(davinci26): Mocking should not be used to provide real system calls.\n  EXPECT_CALL(os_sys_calls, recv(_, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, void* buf, size_t n, int flags) {\n        const Api::SysCallSizeResult x = os_sys_calls_actual_.recv(fd, buf, n, flags);\n        if (x.rc_ == sizeof(tlv)) {\n          return Api::SysCallSizeResult{-1, 0};\n        } else {\n          return x;\n        }\n      }));\n  EXPECT_CALL(os_sys_calls, connect(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n        return os_sys_calls_actual_.connect(sockfd, addr, addrlen);\n      }));\n  EXPECT_CALL(os_sys_calls, writev(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.writev(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, readv(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.readv(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int {\n            return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_;\n          }));\n  EXPECT_CALL(os_sys_calls, getsockname(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* name, socklen_t* namelen) -> Api::SysCallIntResult {\n            return os_sys_calls_actual_.getsockname(sockfd, name, namelen);\n          }));\n  EXPECT_CALL(os_sys_calls, shutdown(_, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int how) { return os_sys_calls_actual_.shutdown(sockfd, how); }));\n  EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) {\n    return os_sys_calls_actual_.close(fd);\n  }));\n  EXPECT_CALL(os_sys_calls, accept(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult {\n            return os_sys_calls_actual_.accept(sockfd, addr, addrlen);\n          }));\n  connect(false);\n  write(buffer, sizeof(buffer));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  write(tlv, sizeof(tlv));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2ParseExtensionsFrag) {\n  // A well-formed ipv4/tcp header with 2 TLV/extensions, these are fragmented on delivery\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x14, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n  constexpr uint8_t tlv[] = {0x0, 0x0, 0x1, 0xff};\n\n  constexpr uint8_t data[] = {'D', 'A', 'T', 'A'};\n\n  connect();\n  write(buffer, sizeof(buffer));\n  for (int i = 0; i < 2; i++) {\n    write(tlv, sizeof(tlv));\n  }\n  write(data, sizeof(data));\n  expectData(\"DATA\");\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, Fragmented) {\n  connect();\n  write(\"PROXY TCP4\");\n  write(\" 254.254.2\");\n  write(\"54.254 1.2\");\n  write(\".3.4 65535\");\n  write(\" 1234\\r\\n...\");\n\n  // If there is no data after the PROXY line, the read filter does not receive even the\n  // onNewConnection() callback. We need this in order to run the dispatcher in blocking\n  // mode to make sure that proxy protocol processing is completed before we start testing\n  // the results. Since we must have data we might as well check that we get it.\n  expectData(\"...\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"254.254.254.254\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2Fragmented1) {\n  // A well-formed ipv4/tcp header, delivering part of the signature, then part of\n  // the address, then the remainder\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect();\n  write(buffer, 10);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  write(buffer + 10, 10);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  write(buffer + 20, 17);\n\n  expectData(\"more data\");\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"1.2.3.4\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2Fragmented2) {\n  // A well-formed ipv4/tcp header, delivering all of the signature + 1, then the remainder\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n  connect();\n  write(buffer, 17);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  write(buffer + 17, 10);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  write(buffer + 27, 10);\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"1.2.3.4\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2Fragmented3Error) {\n  // A well-formed ipv4/tcp header, delivering all of the signature +1, w/ an error\n  // simulated in recv() on the +1\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  // TODO(davinci26): Mocking should not be used to provide real system calls.\n  EXPECT_CALL(os_sys_calls, recv(_, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, void* buf, size_t len, int flags) {\n        return os_sys_calls_actual_.recv(fd, buf, len, flags);\n      }));\n  EXPECT_CALL(os_sys_calls, recv(_, _, 1, _))\n      .Times(AnyNumber())\n      .WillOnce(Return(Api::SysCallSizeResult{-1, 0}));\n  EXPECT_CALL(os_sys_calls, connect(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n        return os_sys_calls_actual_.connect(sockfd, addr, addrlen);\n      }));\n  EXPECT_CALL(os_sys_calls, ioctl(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, unsigned long int request, void* argp) {\n        return os_sys_calls_actual_.ioctl(fd, request, argp);\n      }));\n  EXPECT_CALL(os_sys_calls, writev(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.writev(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, readv(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.readv(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int {\n            return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_;\n          }));\n  EXPECT_CALL(os_sys_calls, getsockname(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* name, socklen_t* namelen) -> Api::SysCallIntResult {\n            return os_sys_calls_actual_.getsockname(sockfd, name, namelen);\n          }));\n  EXPECT_CALL(os_sys_calls, shutdown(_, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int how) { return os_sys_calls_actual_.shutdown(sockfd, how); }));\n  EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) {\n    return os_sys_calls_actual_.close(fd);\n  }));\n  EXPECT_CALL(os_sys_calls, accept(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult {\n            return os_sys_calls_actual_.accept(sockfd, addr, addrlen);\n          }));\n  connect(false);\n  write(buffer, 17);\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2Fragmented4Error) {\n  // A well-formed ipv4/tcp header, part of the signature with an error introduced\n  // in recv() on the remainder\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm',  'o',\n                                'r',  'e',  ' ',  'd',  'a',  't',  'a'};\n\n  Api::MockOsSysCalls os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n\n  // TODO(davinci26): Mocking should not be used to provide real system calls.\n  EXPECT_CALL(os_sys_calls, recv(_, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, void* buf, size_t len, int flags) {\n        return os_sys_calls_actual_.recv(fd, buf, len, flags);\n      }));\n  EXPECT_CALL(os_sys_calls, recv(_, _, 4, _))\n      .Times(AnyNumber())\n      .WillOnce(Return(Api::SysCallSizeResult{-1, 0}));\n  EXPECT_CALL(os_sys_calls, connect(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) {\n        return os_sys_calls_actual_.connect(sockfd, addr, addrlen);\n      }));\n  EXPECT_CALL(os_sys_calls, ioctl(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, unsigned long int request, void* argp) {\n        return os_sys_calls_actual_.ioctl(fd, request, argp);\n      }));\n  EXPECT_CALL(os_sys_calls, writev(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.writev(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, readv(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke([this](os_fd_t fd, const iovec* iov, int iovcnt) {\n        return os_sys_calls_actual_.readv(fd, iov, iovcnt);\n      }));\n  EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int {\n            return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_;\n          }));\n  EXPECT_CALL(os_sys_calls, getsockname(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* name, socklen_t* namelen) -> Api::SysCallIntResult {\n            return os_sys_calls_actual_.getsockname(sockfd, name, namelen);\n          }));\n  EXPECT_CALL(os_sys_calls, shutdown(_, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, int how) { return os_sys_calls_actual_.shutdown(sockfd, how); }));\n  EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) {\n    return os_sys_calls_actual_.close(fd);\n  }));\n  EXPECT_CALL(os_sys_calls, accept(_, _, _))\n      .Times(AnyNumber())\n      .WillRepeatedly(Invoke(\n          [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult {\n            return os_sys_calls_actual_.accept(sockfd, addr, addrlen);\n          }));\n  connect(false);\n  write(buffer, 10);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  write(buffer + 10, 10);\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, PartialRead) {\n  connect();\n\n  write(\"PROXY TCP4\");\n  write(\" 254.254.2\");\n\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  write(\"54.254 1.2\");\n  write(\".3.4 65535\");\n  write(\" 1234\\r\\n...\");\n\n  expectData(\"...\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"254.254.254.254\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2PartialRead) {\n  // A well-formed ipv4/tcp header, delivered with part of the signature,\n  // part of the header, rest of header + body\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55,\n                                0x49, 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02,\n                                0x03, 0x04, 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00,\n                                0x02, 'm',  'o',  'r',  'e',  'd',  'a',  't',  'a'};\n  connect();\n\n  for (size_t i = 0; i < sizeof(buffer); i += 9) {\n    write(&buffer[i], 9);\n    if (i == 0) {\n      dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n    }\n  }\n\n  expectData(\"moredata\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), \"1.2.3.4\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2ExtractTlvOfInterest) {\n  // A well-formed ipv4/tcp with a pair of TLV extensions is accepted\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n  constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff};\n  constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f,\n                                            0x6f, 0x2e, 0x63, 0x6f, 0x6d};\n  constexpr uint8_t data[] = {'D', 'A', 'T', 'A'};\n\n  envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config;\n  auto rule = proto_config.add_rules();\n  rule->set_tlv_type(0x02);\n  rule->mutable_on_tlv_present()->set_key(\"PP2 type authority\");\n\n  connect(true, &proto_config);\n  write(buffer, sizeof(buffer));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  write(tlv1, sizeof(tlv1));\n  write(tlv_type_authority, sizeof(tlv_type_authority));\n  write(data, sizeof(data));\n  expectData(\"DATA\");\n\n  EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size());\n\n  auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata();\n  EXPECT_EQ(1, metadata.size());\n  EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol));\n\n  auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields();\n  EXPECT_EQ(1, fields.size());\n  EXPECT_EQ(1, fields.count(\"PP2 type authority\"));\n\n  auto value_s = fields.at(\"PP2 type authority\").string_value();\n  ASSERT_THAT(value_s, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d));\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2ExtractTlvOfInterestAndEmitWithSpecifiedMetadataNamespace) {\n  // A well-formed ipv4/tcp with a pair of TLV extensions is accepted\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n  constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff};\n  constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f,\n                                            0x6f, 0x2e, 0x63, 0x6f, 0x6d};\n  constexpr uint8_t data[] = {'D', 'A', 'T', 'A'};\n\n  envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config;\n  auto rule = proto_config.add_rules();\n  rule->set_tlv_type(0x02);\n  rule->mutable_on_tlv_present()->set_key(\"PP2 type authority\");\n  rule->mutable_on_tlv_present()->set_metadata_namespace(\"We need a different metadata namespace\");\n\n  connect(true, &proto_config);\n  write(buffer, sizeof(buffer));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  write(tlv1, sizeof(tlv1));\n  write(tlv_type_authority, sizeof(tlv_type_authority));\n  write(data, sizeof(data));\n  expectData(\"DATA\");\n\n  EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size());\n\n  auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata();\n  EXPECT_EQ(1, metadata.size());\n  EXPECT_EQ(1, metadata.count(\"We need a different metadata namespace\"));\n\n  auto fields = metadata.at(\"We need a different metadata namespace\").fields();\n  EXPECT_EQ(1, fields.size());\n  EXPECT_EQ(1, fields.count(\"PP2 type authority\"));\n\n  auto value_s = fields.at(\"PP2 type authority\").string_value();\n  ASSERT_THAT(value_s, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d));\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2ExtractMultipleTlvsOfInterest) {\n  // A well-formed ipv4/tcp with a pair of TLV extensions is accepted\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x39, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n  // a TLV of type 0x00 with size of 4 (1 byte is value)\n  constexpr uint8_t tlv1[] = {0x00, 0x00, 0x01, 0xff};\n  // a TLV of type 0x02 with size of 10 bytes (7 bytes are value)\n  constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f,\n                                            0x6f, 0x2e, 0x63, 0x6f, 0x6d};\n  // a TLV of type 0x0f with size of 6 bytes (3 bytes are value)\n  constexpr uint8_t tlv3[] = {0x0f, 0x00, 0x03, 0xf0, 0x00, 0x0f};\n  // a TLV of type 0xea with size of 25 bytes (22 bytes are value)\n  constexpr uint8_t tlv_vpc_id[] = {0xea, 0x00, 0x16, 0x01, 0x76, 0x70, 0x63, 0x2d, 0x30,\n                                    0x32, 0x35, 0x74, 0x65, 0x73, 0x74, 0x32, 0x66, 0x61,\n                                    0x36, 0x63, 0x36, 0x33, 0x68, 0x61, 0x37};\n  constexpr uint8_t data[] = {'D', 'A', 'T', 'A'};\n\n  envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config;\n  auto rule_type_authority = proto_config.add_rules();\n  rule_type_authority->set_tlv_type(0x02);\n  rule_type_authority->mutable_on_tlv_present()->set_key(\"PP2 type authority\");\n\n  auto rule_vpc_id = proto_config.add_rules();\n  rule_vpc_id->set_tlv_type(0xea);\n  rule_vpc_id->mutable_on_tlv_present()->set_key(\"PP2 vpc id\");\n\n  connect(true, &proto_config);\n  write(buffer, sizeof(buffer));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  write(tlv1, sizeof(tlv1));\n  write(tlv_type_authority, sizeof(tlv_type_authority));\n  write(tlv3, sizeof(tlv3));\n  write(tlv_vpc_id, sizeof(tlv_vpc_id));\n  write(data, sizeof(data));\n  expectData(\"DATA\");\n\n  EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size());\n\n  auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata();\n  EXPECT_EQ(1, metadata.size());\n  EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol));\n\n  auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields();\n  EXPECT_EQ(2, fields.size());\n  EXPECT_EQ(1, fields.count(\"PP2 type authority\"));\n  EXPECT_EQ(1, fields.count(\"PP2 vpc id\"));\n\n  auto value_type_authority = fields.at(\"PP2 type authority\").string_value();\n  ASSERT_THAT(value_type_authority, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d));\n\n  auto value_vpc_id = fields.at(\"PP2 vpc id\").string_value();\n  ASSERT_THAT(value_vpc_id,\n              ElementsAre(0x01, 0x76, 0x70, 0x63, 0x2d, 0x30, 0x32, 0x35, 0x74, 0x65, 0x73, 0x74,\n                          0x32, 0x66, 0x61, 0x36, 0x63, 0x36, 0x33, 0x68, 0x61, 0x37));\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2WillNotOverwriteTLV) {\n  // A well-formed ipv4/tcp with a pair of TLV extensions is accepted\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x2a, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n  // a TLV of type 0x00 with size of 4 (1 byte is value)\n  constexpr uint8_t tlv1[] = {0x00, 0x00, 0x01, 0xff};\n  // a TLV of type 0x02 with size of 10 bytes (7 bytes are value)\n  constexpr uint8_t tlv_type_authority1[] = {0x02, 0x00, 0x07, 0x66, 0x6f,\n                                             0x6f, 0x2e, 0x63, 0x6f, 0x6d};\n  // a TLV of type 0x0f with size of 6 bytes (3 bytes are value)\n  constexpr uint8_t tlv3[] = {0x0f, 0x00, 0x03, 0xf0, 0x00, 0x0f};\n  // a TLV of type 0x02 (again) with size of 10 bytes (7 bytes are value) and different values\n  constexpr uint8_t tlv_type_authority2[] = {0x02, 0x00, 0x07, 0x62, 0x61,\n                                             0x72, 0x2e, 0x6e, 0x65, 0x74};\n  constexpr uint8_t data[] = {'D', 'A', 'T', 'A'};\n\n  envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config;\n  auto rule_type_authority = proto_config.add_rules();\n  rule_type_authority->set_tlv_type(0x02);\n  rule_type_authority->mutable_on_tlv_present()->set_key(\"PP2 type authority\");\n\n  connect(true, &proto_config);\n  write(buffer, sizeof(buffer));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  write(tlv1, sizeof(tlv1));\n  write(tlv_type_authority1, sizeof(tlv_type_authority1));\n  write(tlv3, sizeof(tlv3));\n  write(tlv_type_authority2, sizeof(tlv_type_authority2));\n  write(data, sizeof(data));\n  expectData(\"DATA\");\n\n  EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size());\n\n  auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata();\n  EXPECT_EQ(1, metadata.size());\n  EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol));\n\n  auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields();\n  EXPECT_EQ(1, fields.size());\n  EXPECT_EQ(1, fields.count(\"PP2 type authority\"));\n\n  auto value_type_authority = fields.at(\"PP2 type authority\").string_value();\n  ASSERT_THAT(value_type_authority, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d));\n\n  disconnect();\n}\n\nTEST_P(ProxyProtocolTest, V2WrongTLVLength) {\n  // A well-formed ipv4/tcp with buffer[14]15] being 0x00 and 0x10. It says we should have 16 bytes\n  // following.\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n\n  // tlv[2] should be 0x1 since there's only one byte for tlv value.\n  constexpr uint8_t tlv[] = {0x0, 0x0, 0x2, 0xff};\n\n  envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config;\n  auto rule_00 = proto_config.add_rules();\n  rule_00->set_tlv_type(0x00);\n  rule_00->mutable_on_tlv_present()->set_key(\"00\");\n\n  connect(false, &proto_config);\n  write(buffer, sizeof(buffer));\n  write(tlv, sizeof(tlv));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, V2IncompleteTLV) {\n  // A ipv4/tcp with buffer[14]15] being 0x00 and 0x11. It says we should have 17 bytes following,\n  // however we have 20.\n  constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                0x54, 0x0a, 0x21, 0x11, 0x00, 0x11, 0x01, 0x02, 0x03, 0x04,\n                                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02};\n\n  // a TLV of type 0x00 with size of 4 (1 byte is value)\n  constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff};\n  // a TLV of type 0x01 with size of 4 (1 byte is value)\n  constexpr uint8_t tlv2[] = {0x1, 0x0, 0x1, 0xff};\n\n  envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config;\n  auto rule_00 = proto_config.add_rules();\n  rule_00->set_tlv_type(0x00);\n  rule_00->mutable_on_tlv_present()->set_key(\"00\");\n\n  auto rule_01 = proto_config.add_rules();\n  rule_01->set_tlv_type(0x01);\n  rule_01->mutable_on_tlv_present()->set_key(\"01\");\n\n  connect(false, &proto_config);\n  write(buffer, sizeof(buffer));\n  write(tlv1, sizeof(tlv1));\n  write(tlv2, sizeof(tlv2));\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, MalformedProxyLine) {\n  connect(false);\n\n  write(\"BOGUS\\r\");\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  write(\"\\n\");\n\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, ProxyLineTooLarge) {\n  connect(false);\n  write(\"012345678901234567890123456789012345678901234567890123456789\"\n        \"012345678901234567890123456789012345678901234567890123456789\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, NotEnoughFields) {\n  connect(false);\n  write(\"PROXY TCP6 1:2:3::4 5:6::7:8 1234\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, UnsupportedProto) {\n  connect(false);\n  write(\"PROXY UDP6 1:2:3::4 5:6::7:8 1234 5678\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, InvalidSrcAddress) {\n  connect(false);\n  write(\"PROXY TCP4 230.0.0.1 10.1.1.3 1234 5678\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, InvalidDstAddress) {\n  connect(false);\n  write(\"PROXY TCP4 10.1.1.2 0.0.0.0 1234 5678\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, BadPort) {\n  connect(false);\n  write(\"PROXY TCP6 1:2:3::4 5:6::7:8 1234 abc\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, NegativePort) {\n  connect(false);\n  write(\"PROXY TCP6 1:2:3::4 5:6::7:8 -1 1234\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, PortOutOfRange) {\n  connect(false);\n  write(\"PROXY TCP6 1:2:3::4 5:6::7:8 66776 1234\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, BadAddress) {\n  connect(false);\n  write(\"PROXY TCP6 1::2:3::4 5:6::7:8 1234 5678\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, AddressVersionsNotMatch) {\n  connect(false);\n  write(\"PROXY TCP4 [1:2:3::4] 1.2.3.4 1234 5678\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, AddressVersionsNotMatch2) {\n  connect(false);\n  write(\"PROXY TCP4 1.2.3.4 [1:2:3: 1234 4]:5678\\r\\nmore data\");\n  expectProxyProtoError();\n}\n\nTEST_P(ProxyProtocolTest, Truncated) {\n  connect(false);\n  write(\"PROXY TCP4 1.2.3.4 5.6.7.8 1234 5678\");\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  conn_->close(Network::ConnectionCloseType::NoFlush);\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(ProxyProtocolTest, Closed) {\n  connect(false);\n  write(\"PROXY TCP4 1.2.3\");\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  conn_->close(Network::ConnectionCloseType::NoFlush);\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(ProxyProtocolTest, ClosedEmpty) {\n  // We may or may not get these, depending on the operating system timing.\n  EXPECT_CALL(factory_, createListenerFilterChain(_)).Times(AtLeast(0));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).Times(AtLeast(0));\n  conn_->connect();\n  conn_->close(Network::ConnectionCloseType::NoFlush);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n}\n\nclass WildcardProxyProtocolTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                  public Network::ListenerConfig,\n                                  public Network::FilterChainManager,\n                                  protected Logger::Loggable<Logger::Id::main> {\npublic:\n  WildcardProxyProtocolTest()\n      : api_(Api::createApiForTest(stats_store_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        socket_(std::make_shared<Network::TcpListenSocket>(Network::Test::getAnyAddress(GetParam()),\n                                                           nullptr, true)),\n        local_dst_address_(Network::Utility::getAddressWithPort(\n            *Network::Test::getCanonicalLoopbackAddress(GetParam()),\n            socket_->localAddress()->ip()->port())),\n        connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)),\n        name_(\"proxy\"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()),\n        init_manager_(nullptr) {\n    EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream));\n    EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress()));\n    EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_));\n    connection_handler_->addListener(absl::nullopt, *this);\n    conn_ = dispatcher_->createClientConnection(local_dst_address_,\n                                                Network::Address::InstanceConstSharedPtr(),\n                                                Network::Test::createRawBufferSocket(), nullptr);\n    conn_->addConnectionCallbacks(connection_callbacks_);\n\n    EXPECT_CALL(factory_, createListenerFilterChain(_))\n        .WillOnce(Invoke([&](Network::ListenerFilterManager& filter_manager) -> bool {\n          filter_manager.addAcceptFilter(\n              nullptr,\n              std::make_unique<Filter>(std::make_shared<Config>(\n                  listenerScope(),\n                  envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol())));\n          return true;\n        }));\n  }\n\n  // Network::ListenerConfig\n  Network::FilterChainManager& filterChainManager() override { return *this; }\n  Network::FilterChainFactory& filterChainFactory() override { return factory_; }\n  Network::ListenSocketFactory& listenSocketFactory() override { return socket_factory_; }\n  bool bindToPort() override { return true; }\n  bool handOffRestoredDestinationConnections() const override { return false; }\n  uint32_t perConnectionBufferLimitBytes() const override { return 0; }\n  std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; }\n  ResourceLimit& openConnections() override { return open_connections_; }\n  bool continueOnListenerFiltersTimeout() const override { return false; }\n  Stats::Scope& listenerScope() override { return stats_store_; }\n  uint64_t listenerTag() const override { return 1; }\n  const std::string& name() const override { return name_; }\n  Network::ActiveUdpListenerFactory* udpListenerFactory() override { return nullptr; }\n  Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { return absl::nullopt; }\n  Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override {\n    return absl::nullopt;\n  }\n  envoy::config::core::v3::TrafficDirection direction() const override {\n    return envoy::config::core::v3::UNSPECIFIED;\n  }\n  Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; }\n  const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n    return empty_access_logs_;\n  }\n  uint32_t tcpBacklogSize() const override { return ENVOY_TCP_BACKLOG_SIZE; }\n  Init::Manager& initManager() override { return *init_manager_; }\n\n  // Network::FilterChainManager\n  const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override {\n    return filter_chain_.get();\n  }\n\n  void connect() {\n    conn_->connect();\n    read_filter_ = std::make_shared<NiceMock<Network::MockReadFilter>>();\n    EXPECT_CALL(factory_, createNetworkFilterChain(_, _))\n        .WillOnce(Invoke([&](Network::Connection& connection,\n                             const std::vector<Network::FilterFactoryCb>&) -> bool {\n          server_connection_ = &connection;\n          connection.addConnectionCallbacks(server_callbacks_);\n          connection.addReadFilter(read_filter_);\n          return true;\n        }));\n    EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void write(const std::string& s) {\n    Buffer::OwnedImpl buf(s);\n    conn_->write(buf, false);\n  }\n\n  void expectData(std::string expected) {\n    EXPECT_CALL(*read_filter_, onNewConnection());\n    EXPECT_CALL(*read_filter_, onData(_, _))\n        .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> Network::FilterStatus {\n          EXPECT_EQ(buffer.toString(), expected);\n          buffer.drain(expected.length());\n          dispatcher_->exit();\n          return Network::FilterStatus::Continue;\n        }));\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  void disconnect() {\n    EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n    conn_->close(Network::ConnectionCloseType::NoFlush);\n    EXPECT_CALL(server_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  BasicResourceLimitImpl open_connections_;\n  Network::MockListenSocketFactory socket_factory_;\n  std::shared_ptr<Network::TcpListenSocket> socket_;\n  Network::Address::InstanceConstSharedPtr local_dst_address_;\n  Network::NopConnectionBalancerImpl connection_balancer_;\n  Network::ConnectionHandlerPtr connection_handler_;\n  Network::MockFilterChainFactory factory_;\n  Network::ClientConnectionPtr conn_;\n  NiceMock<Network::MockConnectionCallbacks> connection_callbacks_;\n  Network::Connection* server_connection_;\n  Network::MockConnectionCallbacks server_callbacks_;\n  std::shared_ptr<Network::MockReadFilter> read_filter_;\n  std::string name_;\n  const Network::FilterChainSharedPtr filter_chain_;\n  const std::vector<AccessLog::InstanceSharedPtr> empty_access_logs_;\n  std::unique_ptr<Init::Manager> init_manager_;\n};\n\n// Parameterize the listener socket address version.\nINSTANTIATE_TEST_SUITE_P(IpVersions, WildcardProxyProtocolTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(WildcardProxyProtocolTest, Basic) {\n  connect();\n  write(\"PROXY TCP4 1.2.3.4 254.254.254.254 65535 1234\\r\\nmore data\");\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->asString(), \"1.2.3.4:65535\");\n  EXPECT_EQ(server_connection_->localAddress()->asString(), \"254.254.254.254:1234\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST_P(WildcardProxyProtocolTest, BasicV6) {\n  connect();\n  write(\"PROXY TCP6 1:2:3::4 5:6::7:8 65535 1234\\r\\nmore data\");\n\n  expectData(\"more data\");\n\n  EXPECT_EQ(server_connection_->remoteAddress()->asString(), \"[1:2:3::4]:65535\");\n  EXPECT_EQ(server_connection_->localAddress()->asString(), \"[5:6::7:8]:1234\");\n  EXPECT_TRUE(server_connection_->localAddressRestored());\n\n  disconnect();\n}\n\nTEST(ProxyProtocolConfigFactoryTest, TestCreateFactory) {\n  Server::Configuration::NamedListenerFilterConfigFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::NamedListenerFilterConfigFactory>::\n          getFactory(ListenerFilters::ListenerFilterNames::get().ProxyProtocol);\n\n  EXPECT_EQ(factory->name(), ListenerFilters::ListenerFilterNames::get().ProxyProtocol);\n\n  const std::string yaml = R\"EOF(\n      rules:\n        - tlv_type: 0x01\n          on_tlv_present:\n            key: \"PP2_TYPE_ALPN\"\n        - tlv_type: 0x1a\n          on_tlv_present:\n            key: \"PP2_TYPE_CUSTOMER_A\"      \n)EOF\";\n\n  ProtobufTypes::MessagePtr proto_config = factory->createEmptyConfigProto();\n  TestUtility::loadFromYaml(yaml, *proto_config);\n\n  Server::Configuration::MockListenerFactoryContext context;\n  EXPECT_CALL(context, scope()).Times(1);\n  EXPECT_CALL(context, messageValidationVisitor()).Times(1);\n  Network::ListenerFilterFactoryCb cb =\n      factory->createListenerFilterFactoryFromProto(*proto_config, nullptr, context);\n\n  Network::MockListenerFilterManager manager;\n  Network::ListenerFilterPtr added_filter;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&added_filter](const Network::ListenerFilterMatcherSharedPtr&,\n                                       Network::ListenerFilterPtr& filter) {\n        added_filter = std::move(filter);\n      }));\n  cb(manager);\n\n  // Make sure we actually create the correct type!\n  EXPECT_NE(dynamic_cast<ProxyProtocol::Filter*>(added_filter.get()), nullptr);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(ProxyProtocolConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.listener.proxy_protocol\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<\n          Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name));\n}\n\n} // namespace\n} // namespace ProxyProtocol\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_library\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_benchmark_test\",\n    \"envoy_extension_cc_benchmark_binary\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"tls_inspector_test\",\n    srcs = [\"tls_inspector_test.cc\"],\n    deps = [\n        \":tls_utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/filters/listener/tls_inspector:config\",\n        \"//source/extensions/filters/listener/tls_inspector:tls_inspector_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"tls_inspector_fuzz_test_proto\",\n    srcs = [\"tls_inspector_fuzz_test.proto\"],\n    deps = [\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"tls_inspector_fuzz_test\",\n    srcs = [\"tls_inspector_fuzz_test.cc\"],\n    corpus = \"tls_inspector_corpus\",\n    deps = [\n        \":tls_inspector_fuzz_test_proto_cc_proto\",\n        \"//source/extensions/filters/listener/tls_inspector:tls_inspector_lib\",\n        \"//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib\",\n    ],\n)\n\nenvoy_extension_cc_benchmark_binary(\n    name = \"tls_inspector_benchmark\",\n    srcs = [\"tls_inspector_benchmark.cc\"],\n    extension_name = \"envoy.filters.listener.tls_inspector\",\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \":tls_utility_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/extensions/filters/listener/tls_inspector:tls_inspector_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_extension_benchmark_test(\n    name = \"tls_inspector_benchmark_test\",\n    benchmark_binary = \"tls_inspector_benchmark\",\n    extension_name = \"envoy.filters.listener.tls_inspector\",\n)\n\nenvoy_cc_library(\n    name = \"tls_utility_lib\",\n    srcs = [\"tls_utility.cc\"],\n    hdrs = [\"tls_utility.h\"],\n    external_deps = [\"ssl\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc",
    "content": "#include <vector>\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n\n#include \"extensions/filters/listener/tls_inspector/tls_inspector.h\"\n\n#include \"test/extensions/filters/listener/tls_inspector/tls_utility.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"benchmark/benchmark.h\"\n#include \"gtest/gtest.h\"\n#include \"openssl/ssl.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace TlsInspector {\n\nclass FastMockListenerFilterCallbacks : public Network::MockListenerFilterCallbacks {\npublic:\n  FastMockListenerFilterCallbacks(Network::ConnectionSocket& socket, Event::Dispatcher& dispatcher)\n      : socket_(socket), dispatcher_(dispatcher) {}\n  Network::ConnectionSocket& socket() override { return socket_; }\n  Event::Dispatcher& dispatcher() override { return dispatcher_; }\n  void continueFilterChain(bool success) override { RELEASE_ASSERT(success, \"\"); }\n\n  Network::ConnectionSocket& socket_;\n  Event::Dispatcher& dispatcher_;\n};\n\n// Don't inherit from the mock implementation at all, because this is instantiated\n// in the hot loop.\nclass FastMockFileEvent : public Event::FileEvent {\n  void activate(uint32_t) override {}\n  void setEnabled(uint32_t) override {}\n};\n\nclass FastMockDispatcher : public Event::MockDispatcher {\npublic:\n  Event::FileEventPtr createFileEvent(os_fd_t, Event::FileReadyCb cb, Event::FileTriggerType,\n                                      uint32_t) override {\n    file_event_callback_ = cb;\n    return std::make_unique<FastMockFileEvent>();\n  }\n\n  Event::FileReadyCb file_event_callback_;\n};\n\nclass FastMockOsSysCalls : public Api::MockOsSysCalls {\npublic:\n  FastMockOsSysCalls(const std::vector<uint8_t>& client_hello) : client_hello_(client_hello) {}\n\n  Api::SysCallSizeResult recv(os_fd_t, void* buffer, size_t length, int) override {\n    RELEASE_ASSERT(length >= client_hello_.size(), \"\");\n    memcpy(buffer, client_hello_.data(), client_hello_.size());\n    return Api::SysCallSizeResult{ssize_t(client_hello_.size()), 0};\n  }\n\n  const std::vector<uint8_t> client_hello_;\n};\n\nstatic void BM_TlsInspector(benchmark::State& state) {\n  NiceMock<FastMockOsSysCalls> os_sys_calls(Tls::Test::generateClientHello(\n      Config::TLS_MIN_SUPPORTED_VERSION, Config::TLS_MAX_SUPPORTED_VERSION, \"example.com\",\n      \"\\x02h2\\x08http/1.1\"));\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls{&os_sys_calls};\n  NiceMock<Stats::MockStore> store;\n  ConfigSharedPtr cfg(std::make_shared<Config>(store));\n  Network::IoHandlePtr io_handle = std::make_unique<Network::IoSocketHandleImpl>();\n  Network::ConnectionSocketImpl socket(std::move(io_handle), nullptr, nullptr);\n  NiceMock<FastMockDispatcher> dispatcher;\n  FastMockListenerFilterCallbacks cb(socket, dispatcher);\n\n  for (auto _ : state) {\n    Filter filter(cfg);\n    filter.onAccept(cb);\n    RELEASE_ASSERT(dispatcher.file_event_callback_ == nullptr, \"\");\n    RELEASE_ASSERT(socket.detectedTransportProtocol() == \"tls\", \"\");\n    RELEASE_ASSERT(socket.requestedServerName() == \"example.com\", \"\");\n    RELEASE_ASSERT(socket.requestedApplicationProtocols().size() == 2 &&\n                       socket.requestedApplicationProtocols().front() ==\n                           Http::Utility::AlpnNames::get().Http2,\n                   \"\");\n    socket.setDetectedTransportProtocol(\"\");\n    socket.setRequestedServerName(\"\");\n    socket.setRequestedApplicationProtocols({});\n  }\n}\n\nBENCHMARK(BM_TlsInspector)->Unit(benchmark::kMicrosecond);\n\n} // namespace TlsInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_corpus/multiple_reads",
    "content": "fuzzed {\n  data: \"\\x16\\x03\\x01\\x00\\xae\\x01\\x00\\x00\\xaa\\x03\\x03V\\xca\\x92\\x12\\xa1\\x07II\\xc2e\\'\\x10\\x1ajm;Nz\\x87\\xd6\\x00\\x17X&\\x81\\xc4\\x95\\xb9_5\\xc5w A\\\"\\xc3\\x1a\\xf1\\xc6\\xaa=\\x9a\\x83\\x9f\\x11w\\x1eW\\xdf\\x960\\x04\\xd0|\\xc5\\xb4\\x88\\xa5\\xc0\\x9e.*\\x8e\\xcf\\xf5\\x00\\x06\\x13\\x01\"\n  data: \"\\x13\\x02\\x13\\x03\\x01\\x00\\x00[\\x00\\n\\x00\\x08\\x00\\x06\\x00\\x1d\\x00\\x17\\x00\\x18\\x00\\r\\x00\\x14\\x00\\x12\\x04\\x03\\x08\\x04\\x04\\x01\\x05\\x03\\x08\\x05\\x05\\x01\\x08\\x06\\x06\\x01\\x02\\x01\\x003\\x00&\\x00$\\x00\\x1d\\x00 F8{\\xd6X\\xda\\xa4\\x15\\xe7g\\xf2\\\\p\\x92\\xc5\\xc2\\xa8L\\xfe\\x9eU\\x1dac\\xde6\\x9dm_\\x04zy\\x00-\\x00\\x02\\x01\\x01\\x00+\\x00\\x03\\x02\\x03\\x04\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_corpus/no_extensions",
    "content": "fuzzed {\n  data: \"\\x16\\x03\\x01\\x00\\xae\\x01\\x00\\x00\\xaa\\x03\\x03V\\xca\\x92\\x12\\xa1\\x07II\\xc2e\\'\\x10\\x1ajm;Nz\\x87\\xd6\\x00\\x17X&\\x81\\xc4\\x95\\xb9_5\\xc5w A\\\"\\xc3\\x1a\\xf1\\xc6\\xaa=\\x9a\\x83\\x9f\\x11w\\x1eW\\xdf\\x960\\x04\\xd0|\\xc5\\xb4\\x88\\xa5\\xc0\\x9e.*\\x8e\\xcf\\xf5\\x00\\x06\\x13\\x01\\x13\\x02\\x13\\x03\\x01\\x00\\x00[\\x00\\n\\x00\\x08\\x00\\x06\\x00\\x1d\\x00\\x17\\x00\\x18\\x00\\r\\x00\\x14\\x00\\x12\\x04\\x03\\x08\\x04\\x04\\x01\\x05\\x03\\x08\\x05\\x05\\x01\\x08\\x06\\x06\\x01\\x02\\x01\\x003\\x00&\\x00$\\x00\\x1d\\x00 F8{\\xd6X\\xda\\xa4\\x15\\xe7g\\xf2\\\\p\\x92\\xc5\\xc2\\xa8L\\xfe\\x9eU\\x1dac\\xde6\\x9dm_\\x04zy\\x00-\\x00\\x02\\x01\\x01\\x00+\\x00\\x03\\x02\\x03\\x04\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_corpus/not_ssl",
    "content": "fuzzed {\n  data: \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_corpus/too_large",
    "content": "max_size: 5\nfuzzed {\n  data: \"\\x16\\x03\\x01\\x00\\xc2\\x01\\x00\\x00\\xbe\\x03\\x03<\\xd0Be\\x8d\\xc5_\\x06\\x0e\\x13\\xad<h\\xb8\\xf1#maQk\\xaaz2\\x8a\\xcb\\xb7\\xb3\\x81\\xf3\\x94\\x9cD \\xdc\\xca\\x1f\\xeeN\\xce\\xbc\\xf2\\xc9\\xcdp=j\\x93\\x04g\\xe8-\\\"CdJ`-\\xa4Ld\\xfb,Yd\\xcc\\x00\\x06\\x13\\x01\\x13\\x02\\x13\\x03\\x01\"\n  data: \"\\x00\\x00o\\x00\\x00\\x00\\x10\\x00\\x0e\\x00\\x00\\x0bexample.com\\x00\\n\\x00\\x08\\x00\\x06\\x00\\x1d\\x00\\x17\\x00\\x18\\x00\\r\\x00\\x14\\x00\\x12\\x04\\x03\\x08\\x04\\x04\\x01\\x05\\x03\\x08\\x05\\x05\\x01\\x08\\x06\\x06\\x01\\x02\\x01\\x003\\x00&\\x00$\\x00\\x1d\\x00 \\xa4:`\\xd4\\x8d/.\\xd9\\xda\\x1f\\x1a\\x9b\\xb0edA\\x87J\\x86eB\\xd8\\x95/\\x9c\\xf7K\\xb2\\x1a\\xd6\\xb2\\x1a\\x00-\\x00\\x02\\x01\\x01\\x00+\\x00\\x03\\x02\\x03\\x04\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_corpus/valid_alpn",
    "content": "fuzzed {\n  data: \"\\x16\\x03\\x01\\x00\\xc0\\x01\\x00\\x00\\xbc\\x03\\x03w9\\x05\\xa3j\\x93)^\\xa73\\x83\\x1e\\xdfY\\t \\xef\\x06\\x99\\xf61tl\\xc3)\\xfe-\\x16c\\xea{p \\xe5\\x02g,;[\\xbd\\x1f\\xc0\\xa1R\\x82\\x95\\xd1\\x81\\x89\\x82\\xd4\\\"f\\xf97\\xa1\\x08\\x82<\\\"\\xb9\\xa4kR`\\x00\\x06\\x13\\x01\\x13\\x02\\x13\\x03\\x01\\x00\\x00m\\x00\\n\\x00\\x08\\x00\\x06\\x00\\x1d\\x00\\x17\\x00\\x18\\x00\\x10\\x00\\x0e\\x00\\x0c\\x02h2\\x08http/1.1\\x00\\r\\x00\\x14\\x00\\x12\\x04\\x03\\x08\\x04\\x04\\x01\\x05\\x03\\x08\\x05\\x05\\x01\\x08\\x06\\x06\\x01\\x02\\x01\\x003\\x00&\\x00$\\x00\\x1d\\x00 7\\xdf*3\\t\\xfan\\xe1/},\\xa2\\xd9\\xac\\xfa Z\\x82=L\\x08\\xc9\\xda\\xbb/\\x0c\\x86\\xadY;\\xfc4\\x00-\\x00\\x02\\x01\\x01\\x00+\\x00\\x03\\x02\\x03\\x04\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_corpus/valid_sni",
    "content": "fuzzed {\n  data: \"\\x16\\x03\\x01\\x00\\xc2\\x01\\x00\\x00\\xbe\\x03\\x03<\\xd0Be\\x8d\\xc5_\\x06\\x0e\\x13\\xad<h\\xb8\\xf1#maQk\\xaaz2\\x8a\\xcb\\xb7\\xb3\\x81\\xf3\\x94\\x9cD \\xdc\\xca\\x1f\\xeeN\\xce\\xbc\\xf2\\xc9\\xcdp=j\\x93\\x04g\\xe8-\\\"CdJ`-\\xa4Ld\\xfb,Yd\\xcc\\x00\\x06\\x13\\x01\\x13\\x02\\x13\\x03\\x01\\x00\\x00o\\x00\\x00\\x00\\x10\\x00\\x0e\\x00\\x00\\x0bexample.com\\x00\\n\\x00\\x08\\x00\\x06\\x00\\x1d\\x00\\x17\\x00\\x18\\x00\\r\\x00\\x14\\x00\\x12\\x04\\x03\\x08\\x04\\x04\\x01\\x05\\x03\\x08\\x05\\x05\\x01\\x08\\x06\\x06\\x01\\x02\\x01\\x003\\x00&\\x00$\\x00\\x1d\\x00 \\xa4:`\\xd4\\x8d/.\\xd9\\xda\\x1f\\x1a\\x9b\\xb0edA\\x87J\\x86eB\\xd8\\x95/\\x9c\\xf7K\\xb2\\x1a\\xd6\\xb2\\x1a\\x00-\\x00\\x02\\x01\\x01\\x00+\\x00\\x03\\x02\\x03\\x04\"\n}"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_fuzz_test.cc",
    "content": "#include \"extensions/filters/listener/tls_inspector/tls_inspector.h\"\n\n#include \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h\"\n#include \"test/extensions/filters/listener/tls_inspector/tls_inspector_fuzz_test.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace TlsInspector {\n\nDEFINE_PROTO_FUZZER(\n    const test::extensions::filters::listener::tls_inspector::TlsInspectorTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n\n  Stats::IsolatedStoreImpl store;\n  ConfigSharedPtr cfg;\n\n  if (input.max_size() == 0) {\n    // If max_size not set, use default constructor\n    cfg = std::make_shared<Config>(store);\n  } else {\n    cfg = std::make_shared<Config>(store, input.max_size());\n  }\n\n  auto filter = std::make_unique<Filter>(std::move(cfg));\n\n  ListenerFilterFuzzer fuzzer;\n  fuzzer.fuzz(*filter, input.fuzzed());\n}\n\n} // namespace TlsInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_fuzz_test.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.listener.tls_inspector;\n\nimport \"test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto\";\nimport \"validate/validate.proto\";\n\nmessage TlsInspectorTestCase {\n  uint32 max_size = 1 [(validate.rules).uint32.lte = 65536];\n  test.extensions.filters.listener.FilterFuzzTestCase fuzzed = 2\n      [(validate.rules).message.required = true];\n}"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc",
    "content": "#include \"common/http/utility.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"extensions/filters/listener/tls_inspector/tls_inspector.h\"\n\n#include \"test/extensions/filters/listener/tls_inspector/tls_utility.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gtest/gtest.h\"\n#include \"openssl/ssl.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::ReturnNew;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ListenerFilters {\nnamespace TlsInspector {\nnamespace {\n\nclass TlsInspectorTest : public testing::TestWithParam<std::tuple<uint16_t, uint16_t>> {\npublic:\n  TlsInspectorTest()\n      : cfg_(std::make_shared<Config>(store_)),\n        io_handle_(std::make_unique<Network::IoSocketHandleImpl>(42)) {}\n  ~TlsInspectorTest() override { io_handle_->close(); }\n\n  void init() {\n    filter_ = std::make_unique<Filter>(cfg_);\n\n    EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_));\n    EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_));\n    EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_));\n\n    // Prepare the first recv attempt during\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n        .WillOnce(\n            Invoke([](os_fd_t fd, void* buffer, size_t length, int flag) -> Api::SysCallSizeResult {\n              ENVOY_LOG_MISC(error, \"In mock syscall recv {} {} {} {}\", fd, buffer, length, flag);\n              return Api::SysCallSizeResult{static_cast<ssize_t>(0), 0};\n            }));\n    EXPECT_CALL(dispatcher_,\n                createFileEvent_(_, _, Event::PlatformDefaultTriggerType,\n                                 Event::FileReadyType::Read | Event::FileReadyType::Closed))\n        .WillOnce(\n            DoAll(SaveArg<1>(&file_event_callback_), ReturnNew<NiceMock<Event::MockFileEvent>>()));\n    filter_->onAccept(cb_);\n  }\n\n  NiceMock<Api::MockOsSysCalls> os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{&os_sys_calls_};\n  Stats::IsolatedStoreImpl store_;\n  ConfigSharedPtr cfg_;\n  std::unique_ptr<Filter> filter_;\n  Network::MockListenerFilterCallbacks cb_;\n  Network::MockConnectionSocket socket_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Event::FileReadyCb file_event_callback_;\n  Network::IoHandlePtr io_handle_;\n};\n\nINSTANTIATE_TEST_SUITE_P(TlsProtocolVersions, TlsInspectorTest,\n                         testing::Values(std::make_tuple(Config::TLS_MIN_SUPPORTED_VERSION,\n                                                         Config::TLS_MAX_SUPPORTED_VERSION),\n                                         std::make_tuple(TLS1_VERSION, TLS1_VERSION),\n                                         std::make_tuple(TLS1_1_VERSION, TLS1_1_VERSION),\n                                         std::make_tuple(TLS1_2_VERSION, TLS1_2_VERSION),\n                                         std::make_tuple(TLS1_3_VERSION, TLS1_3_VERSION)));\n\n// Test that an exception is thrown for an invalid value for max_client_hello_size\nTEST_P(TlsInspectorTest, MaxClientHelloSize) {\n  EXPECT_THROW_WITH_MESSAGE(Config(store_, Config::TLS_MAX_CLIENT_HELLO + 1), EnvoyException,\n                            \"max_client_hello_size of 65537 is greater than maximum of 65536.\");\n}\n\n// Test that the filter detects Closed events and terminates.\nTEST_P(TlsInspectorTest, ConnectionClosed) {\n  init();\n  EXPECT_CALL(cb_, continueFilterChain(false));\n  file_event_callback_(Event::FileReadyType::Closed);\n  EXPECT_EQ(1, cfg_->stats().connection_closed_.value());\n}\n\n// Test that the filter detects detects read errors.\nTEST_P(TlsInspectorTest, ReadError) {\n  init();\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() {\n    return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_NOT_SUP};\n  }));\n  EXPECT_CALL(cb_, continueFilterChain(false));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().read_error_.value());\n}\n\n// Test that a ClientHello with an SNI value causes the correct name notification.\nTEST_P(TlsInspectorTest, SniRegistered) {\n  init();\n  const std::string servername(\"example.com\");\n  std::vector<uint8_t> client_hello = Tls::Test::generateClientHello(\n      std::get<0>(GetParam()), std::get<1>(GetParam()), servername, \"\");\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(Invoke(\n          [&client_hello](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= client_hello.size());\n            memcpy(buffer, client_hello.data(), client_hello.size());\n            return Api::SysCallSizeResult{ssize_t(client_hello.size()), 0};\n          }));\n  EXPECT_CALL(socket_, setRequestedServerName(Eq(servername)));\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(socket_, setDetectedTransportProtocol(absl::string_view(\"tls\")));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().tls_found_.value());\n  EXPECT_EQ(1, cfg_->stats().sni_found_.value());\n  EXPECT_EQ(1, cfg_->stats().alpn_not_found_.value());\n}\n\n// Test that a ClientHello with an ALPN value causes the correct name notification.\nTEST_P(TlsInspectorTest, AlpnRegistered) {\n  init();\n  const auto alpn_protos = std::vector<absl::string_view>{Http::Utility::AlpnNames::get().Http2,\n                                                          Http::Utility::AlpnNames::get().Http11};\n  std::vector<uint8_t> client_hello = Tls::Test::generateClientHello(\n      std::get<0>(GetParam()), std::get<1>(GetParam()), \"\", \"\\x02h2\\x08http/1.1\");\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(Invoke(\n          [&client_hello](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= client_hello.size());\n            memcpy(buffer, client_hello.data(), client_hello.size());\n            return Api::SysCallSizeResult{ssize_t(client_hello.size()), 0};\n          }));\n  EXPECT_CALL(socket_, setRequestedServerName(_)).Times(0);\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(socket_, setDetectedTransportProtocol(absl::string_view(\"tls\")));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().tls_found_.value());\n  EXPECT_EQ(1, cfg_->stats().sni_not_found_.value());\n  EXPECT_EQ(1, cfg_->stats().alpn_found_.value());\n}\n\n// Test with the ClientHello spread over multiple socket reads.\nTEST_P(TlsInspectorTest, MultipleReads) {\n  init();\n  const auto alpn_protos = std::vector<absl::string_view>{Http::Utility::AlpnNames::get().Http2};\n  const std::string servername(\"example.com\");\n  std::vector<uint8_t> client_hello = Tls::Test::generateClientHello(\n      std::get<0>(GetParam()), std::get<1>(GetParam()), servername, \"\\x02h2\");\n  {\n    InSequence s;\n    EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n        .WillOnce(InvokeWithoutArgs([]() -> Api::SysCallSizeResult {\n          return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN};\n        }));\n    for (size_t i = 1; i <= client_hello.size(); i++) {\n      EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n          .WillOnce(Invoke([&client_hello, i](os_fd_t, void* buffer, size_t length,\n                                              int) -> Api::SysCallSizeResult {\n            ASSERT(length >= client_hello.size());\n            memcpy(buffer, client_hello.data(), client_hello.size());\n            return Api::SysCallSizeResult{ssize_t(i), 0};\n          }));\n    }\n  }\n\n  bool got_continue = false;\n  EXPECT_CALL(socket_, setRequestedServerName(Eq(servername)));\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(socket_, setDetectedTransportProtocol(absl::string_view(\"tls\")));\n  EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() {\n    got_continue = true;\n  }));\n  while (!got_continue) {\n    file_event_callback_(Event::FileReadyType::Read);\n  }\n  EXPECT_EQ(1, cfg_->stats().tls_found_.value());\n  EXPECT_EQ(1, cfg_->stats().sni_found_.value());\n  EXPECT_EQ(1, cfg_->stats().alpn_found_.value());\n}\n\n// Test that the filter correctly handles a ClientHello with no extensions present.\nTEST_P(TlsInspectorTest, NoExtensions) {\n  init();\n  std::vector<uint8_t> client_hello =\n      Tls::Test::generateClientHello(std::get<0>(GetParam()), std::get<1>(GetParam()), \"\", \"\");\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(Invoke(\n          [&client_hello](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= client_hello.size());\n            memcpy(buffer, client_hello.data(), client_hello.size());\n            return Api::SysCallSizeResult{ssize_t(client_hello.size()), 0};\n          }));\n  EXPECT_CALL(socket_, setRequestedServerName(_)).Times(0);\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0);\n  EXPECT_CALL(socket_, setDetectedTransportProtocol(absl::string_view(\"tls\")));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().tls_found_.value());\n  EXPECT_EQ(1, cfg_->stats().sni_not_found_.value());\n  EXPECT_EQ(1, cfg_->stats().alpn_not_found_.value());\n}\n\n// Test that the filter fails if the ClientHello is larger than the\n// maximum allowed size.\nTEST_P(TlsInspectorTest, ClientHelloTooBig) {\n  const size_t max_size = 50;\n  cfg_ = std::make_shared<Config>(store_, static_cast<uint32_t>(max_size));\n  std::vector<uint8_t> client_hello = Tls::Test::generateClientHello(\n      std::get<0>(GetParam()), std::get<1>(GetParam()), \"example.com\", \"\");\n  ASSERT(client_hello.size() > max_size);\n  init();\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(Invoke(\n          [=, &client_hello](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length == max_size);\n            memcpy(buffer, client_hello.data(), length);\n            return Api::SysCallSizeResult{ssize_t(length), 0};\n          }));\n  EXPECT_CALL(cb_, continueFilterChain(false));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().client_hello_too_large_.value());\n}\n\n// Test that the filter fails on non-SSL data\nTEST_P(TlsInspectorTest, NotSsl) {\n  init();\n  std::vector<uint8_t> data;\n\n  // Use 100 bytes of zeroes. This is not valid as a ClientHello.\n  data.resize(100);\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(\n          Invoke([&data](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult {\n            ASSERT(length >= data.size());\n            memcpy(buffer, data.data(), data.size());\n            return Api::SysCallSizeResult{ssize_t(data.size()), 0};\n          }));\n  EXPECT_CALL(cb_, continueFilterChain(true));\n  file_event_callback_(Event::FileReadyType::Read);\n  EXPECT_EQ(1, cfg_->stats().tls_not_found_.value());\n}\n\nTEST_P(TlsInspectorTest, InlineReadSucceed) {\n  filter_ = std::make_unique<Filter>(cfg_);\n\n  EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_));\n  EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_));\n  EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_));\n  const auto alpn_protos = std::vector<absl::string_view>{Http::Utility::AlpnNames::get().Http2};\n  const std::string servername(\"example.com\");\n  std::vector<uint8_t> client_hello = Tls::Test::generateClientHello(\n      std::get<0>(GetParam()), std::get<1>(GetParam()), servername, \"\\x02h2\");\n\n  EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK))\n      .WillOnce(Invoke([&client_hello](os_fd_t fd, void* buffer, size_t length,\n                                       int flag) -> Api::SysCallSizeResult {\n        ENVOY_LOG_MISC(trace, \"In mock syscall recv {} {} {} {}\", fd, buffer, length, flag);\n        ASSERT(length >= client_hello.size());\n        memcpy(buffer, client_hello.data(), client_hello.size());\n        return Api::SysCallSizeResult{ssize_t(client_hello.size()), 0};\n      }));\n\n  // No event is created if the inline recv parse the hello.\n  EXPECT_CALL(dispatcher_,\n              createFileEvent_(_, _, Event::PlatformDefaultTriggerType,\n                               Event::FileReadyType::Read | Event::FileReadyType::Closed))\n      .Times(0);\n\n  EXPECT_CALL(socket_, setRequestedServerName(Eq(servername)));\n  EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos));\n  EXPECT_CALL(socket_, setDetectedTransportProtocol(absl::string_view(\"tls\")));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onAccept(cb_));\n}\n\n// Test that the deprecated extension name still functions.\nTEST(TlsInspectorConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.listener.tls_inspector\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<\n          Server::Configuration::NamedListenerFilterConfigFactory>::getFactory(deprecated_name));\n}\n\n} // namespace\n} // namespace TlsInspector\n} // namespace ListenerFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_utility.cc",
    "content": "#include \"test/extensions/filters/listener/tls_inspector/tls_utility.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Tls {\nnamespace Test {\n\nstd::vector<uint8_t> generateClientHello(uint16_t tls_min_version, uint16_t tls_max_version,\n                                         const std::string& sni_name, const std::string& alpn) {\n  bssl::UniquePtr<SSL_CTX> ctx(SSL_CTX_new(TLS_with_buffers_method()));\n\n  SSL_CTX_set_min_proto_version(ctx.get(), tls_min_version);\n  SSL_CTX_set_max_proto_version(ctx.get(), tls_max_version);\n\n  bssl::UniquePtr<SSL> ssl(SSL_new(ctx.get()));\n\n  // Ownership of these is passed to *ssl\n  BIO* in = BIO_new(BIO_s_mem());\n  BIO* out = BIO_new(BIO_s_mem());\n  SSL_set_bio(ssl.get(), in, out);\n\n  SSL_set_connect_state(ssl.get());\n  const char* const PREFERRED_CIPHERS = \"HIGH:!aNULL:!kRSA:!PSK:!SRP:!MD5:!RC4\";\n  SSL_set_cipher_list(ssl.get(), PREFERRED_CIPHERS);\n  if (!sni_name.empty()) {\n    SSL_set_tlsext_host_name(ssl.get(), sni_name.c_str());\n  }\n  if (!alpn.empty()) {\n    SSL_set_alpn_protos(ssl.get(), reinterpret_cast<const uint8_t*>(alpn.data()), alpn.size());\n  }\n  SSL_do_handshake(ssl.get());\n  const uint8_t* data = nullptr;\n  size_t data_len = 0;\n  BIO_mem_contents(out, &data, &data_len);\n  ASSERT(data_len > 0);\n  std::vector<uint8_t> buf(data, data + data_len);\n  return buf;\n}\n\n} // namespace Test\n} // namespace Tls\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/listener/tls_inspector/tls_utility.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\nnamespace Envoy {\nnamespace Tls {\nnamespace Test {\n\n/**\n * Generate a TLS ClientHello in wire-format.\n * @param tls_min_version Minimum supported TLS version to advertise.\n * @param tls_max_version Maximum supported TLS version to advertise.\n * @param sni_name The name to include as a Server Name Indication.\n *                 No SNI extension is added if sni_name is empty.\n * @param alpn Protocol(s) list in the wire-format (i.e. 8-bit length-prefixed string) to advertise\n *             in Application-Layer Protocol Negotiation. No ALPN is advertised if alpn is empty.\n */\nstd::vector<uint8_t> generateClientHello(uint16_t tls_min_version, uint16_t tls_max_version,\n                                         const std::string& sni_name, const std::string& alpn);\n\n} // namespace Test\n} // namespace Tls\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/client_ssl_auth/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"client_ssl_auth_test\",\n    srcs = [\"client_ssl_auth_test.cc\"],\n    data = glob([\"test_data/**\"]),\n    extension_name = \"envoy.filters.network.client_ssl_auth\",\n    deps = [\n        \"//source/extensions/filters/network/client_ssl_auth\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/client_ssl_auth/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.client_ssl_auth\",\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/network/client_ssl_auth:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/client_ssl_auth/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h\"\n\n#include \"common/http/message_impl.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/filters/network/client_ssl_auth/client_ssl_auth.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ClientSslAuth {\n\nTEST(ClientSslAuthAllowedPrincipalsTest, EmptyString) {\n  AllowedPrincipals principals;\n  principals.add(\"\");\n  EXPECT_EQ(0UL, principals.size());\n}\n\nTEST(ClientSslAuthConfigTest, BadClientSslAuthConfig) {\n  std::string yaml = R\"EOF(\nstat_prefix: my_stat_prefix\nauth_api_cluster: fake_cluster\nip_white_list:\n- address_prefix: 192.168.3.0\n  prefix_len: 24\ntest: a\n  )EOF\";\n\n  envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth proto_config{};\n  EXPECT_THROW(TestUtility::loadFromYaml(yaml, proto_config), EnvoyException);\n}\n\nclass ClientSslAuthFilterTest : public testing::Test {\nprotected:\n  ClientSslAuthFilterTest()\n      : request_(&cm_.async_client_), interval_timer_(new Event::MockTimer(&dispatcher_)),\n        api_(Api::createApiForTest(stats_store_)),\n        ssl_(std::make_shared<Ssl::MockConnectionInfo>()) {}\n  ~ClientSslAuthFilterTest() override { tls_.shutdownThread(); }\n\n  void setup() {\n    std::string yaml = R\"EOF(\nauth_api_cluster: vpn\nstat_prefix: vpn\nip_white_list:\n- address_prefix: 1.2.3.4\n  prefix_len: 32\n- address_prefix: '2001:abcd::'\n  prefix_len: 64\n    )EOF\";\n\n    envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth proto_config{};\n    TestUtility::loadFromYaml(yaml, proto_config);\n    EXPECT_CALL(cm_, get(Eq(\"vpn\")));\n    setupRequest();\n    config_ =\n        ClientSslAuthConfig::create(proto_config, tls_, cm_, dispatcher_, stats_store_, random_);\n\n    createAuthFilter();\n  }\n\n  void createAuthFilter() {\n    filter_callbacks_.connection_.callbacks_.clear();\n    instance_ = std::make_unique<ClientSslAuthFilter>(config_);\n    instance_->initializeReadFilterCallbacks(filter_callbacks_);\n\n    // NOP currently.\n    instance_->onAboveWriteBufferHighWatermark();\n    instance_->onBelowWriteBufferLowWatermark();\n  }\n\n  void setupRequest() {\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"vpn\")).WillOnce(ReturnRef(cm_.async_client_));\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(\n            Invoke([this](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                          const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n              callbacks_ = &callbacks;\n              return &request_;\n            }));\n  }\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Upstream::MockClusterManager cm_;\n  Event::MockDispatcher dispatcher_;\n  Http::MockAsyncClientRequest request_;\n  ClientSslAuthConfigSharedPtr config_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  std::unique_ptr<ClientSslAuthFilter> instance_;\n  Event::MockTimer* interval_timer_;\n  Http::AsyncClient::Callbacks* callbacks_;\n  Stats::TestUtil::TestStore stats_store_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Api::ApiPtr api_;\n  std::shared_ptr<Ssl::MockConnectionInfo> ssl_;\n};\n\nTEST_F(ClientSslAuthFilterTest, NoCluster) {\n  std::string yaml = R\"EOF(\nauth_api_cluster: bad_cluster\nstat_prefix: bad_cluster\n  )EOF\";\n\n  envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth proto_config{};\n  TestUtility::loadFromYaml(yaml, proto_config);\n  EXPECT_CALL(cm_, get(Eq(\"bad_cluster\"))).WillOnce(Return(nullptr));\n  EXPECT_THROW(\n      ClientSslAuthConfig::create(proto_config, tls_, cm_, dispatcher_, stats_store_, random_),\n      EnvoyException);\n}\n\nTEST_F(ClientSslAuthFilterTest, NoSsl) {\n  setup();\n  Buffer::OwnedImpl dummy(\"hello\");\n\n  // Check no SSL case, multiple iterations.\n  EXPECT_CALL(filter_callbacks_.connection_, ssl()).WillOnce(Return(nullptr));\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onNewConnection());\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"auth.clientssl.vpn.auth_no_ssl\").value());\n\n  EXPECT_CALL(request_, cancel());\n}\n\nTEST_F(ClientSslAuthFilterTest, Ssl) {\n  InSequence s;\n\n  setup();\n  Buffer::OwnedImpl dummy(\"hello\");\n\n  // Create a new filter for an SSL connection, with no backing auth data yet.\n  createAuthFilter();\n  ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_));\n  filter_callbacks_.connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"192.168.1.1\");\n  std::string expected_sha_1(\"digest\");\n  EXPECT_CALL(*ssl_, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha_1));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::Connected);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  // Respond.\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  Http::ResponseMessagePtr message(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n  message->body().add(api_->fileSystem().fileReadToEnd(TestEnvironment::runfilesPath(\n      \"test/extensions/filters/network/client_ssl_auth/test_data/vpn_response_1.json\")));\n  callbacks_->onSuccess(request_, std::move(message));\n  EXPECT_EQ(1U,\n            stats_store_\n                .gauge(\"auth.clientssl.vpn.total_principals\", Stats::Gauge::ImportMode::NeverImport)\n                .value());\n\n  // Create a new filter for an SSL connection with an authorized cert.\n  createAuthFilter();\n  filter_callbacks_.connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"192.168.1.1\");\n  std::string expected_sha_2(\"1b7d42ef0025ad89c1c911d6c10d7e86a4cb7c5863b2980abcbad1895f8b5314\");\n  EXPECT_CALL(*ssl_, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha_2));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection());\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::Connected);\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  // White list case.\n  createAuthFilter();\n  filter_callbacks_.connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection());\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::Connected);\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  // IPv6 White list case.\n  createAuthFilter();\n  filter_callbacks_.connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv6Instance>(\"2001:abcd::1\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection());\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::Connected);\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false));\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(1U, stats_store_.counter(\"auth.clientssl.vpn.update_success\").value());\n  EXPECT_EQ(2U, stats_store_.counter(\"auth.clientssl.vpn.auth_ip_allowlist\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"auth.clientssl.vpn.auth_digest_match\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"auth.clientssl.vpn.auth_digest_no_match\").value());\n\n  // Interval timer fires.\n  setupRequest();\n  interval_timer_->invokeCallback();\n\n  // Error response.\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  message = std::make_unique<Http::ResponseMessageImpl>(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}});\n  callbacks_->onSuccess(request_, std::move(message));\n\n  // Interval timer fires.\n  setupRequest();\n  interval_timer_->invokeCallback();\n\n  // Parsing error\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  message = std::make_unique<Http::ResponseMessageImpl>(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}});\n  message->body().add(\"bad_json\");\n  callbacks_->onSuccess(request_, std::move(message));\n\n  // Interval timer fires.\n  setupRequest();\n  interval_timer_->invokeCallback();\n\n  // No response failure.\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  callbacks_->onFailure(request_, Http::AsyncClient::FailureReason::Reset);\n\n  // Interval timer fires, cannot obtain async client.\n  EXPECT_CALL(cm_, httpAsyncClientForCluster(\"vpn\")).WillOnce(ReturnRef(cm_.async_client_));\n  EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callbacks.onSuccess(\n                request_,\n                Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n                    new Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}})});\n            // Intentionally return nullptr (instead of request handle) to trigger a particular\n            // code path.\n            return nullptr;\n          }));\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  interval_timer_->invokeCallback();\n\n  EXPECT_EQ(4U, stats_store_.counter(\"auth.clientssl.vpn.update_failure\").value());\n}\n\n} // namespace ClientSslAuth\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/client_ssl_auth/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h\"\n#include \"envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/client_ssl_auth/config.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ClientSslAuth {\n\nclass IpWhiteListConfigTest : public testing::TestWithParam<std::string> {};\n\nconst std::string ipv4_cidr_yaml = R\"EOF(\n- address_prefix: \"192.168.3.0\"\n  prefix_len: 24\n)EOF\";\n\nconst std::string ipv6_cidr_yaml = R\"EOF(\n- address_prefix: \"2001:abcd::\"\n  prefix_len: 64\n)EOF\";\n\nINSTANTIATE_TEST_SUITE_P(IpList, IpWhiteListConfigTest,\n                         ::testing::Values(ipv4_cidr_yaml, ipv6_cidr_yaml));\n\nTEST_P(IpWhiteListConfigTest, ClientSslAuthCorrectJson) {\n  const std::string yaml = R\"EOF(\nstat_prefix: my_stat_prefix\nauth_api_cluster: fake_cluster\nip_white_list:\n)EOF\" + GetParam();\n\n  envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  ClientSslAuthConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST_P(IpWhiteListConfigTest, ClientSslAuthCorrectProto) {\n  const std::string yaml = R\"EOF(\nstat_prefix: my_stat_prefix\nauth_api_cluster: fake_cluster\nip_white_list:\n)EOF\" + GetParam();\n\n  envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  ClientSslAuthConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST_P(IpWhiteListConfigTest, ClientSslAuthEmptyProto) {\n  const std::string yaml = R\"EOF(\nstat_prefix: my_stat_prefix\nauth_api_cluster: fake_cluster\nip_white_list:\n)EOF\" + GetParam();\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  ClientSslAuthConfigFactory factory;\n  envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth proto_config =\n      *dynamic_cast<envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth*>(\n          factory.createEmptyConfigProto().get());\n\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST(ClientSslAuthConfigFactoryTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(\n      ClientSslAuthConfigFactory().createFilterFactoryFromProto(\n          envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth(), context),\n      ProtoValidationException);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(ClientSslAuthConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.client_ssl_auth\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace ClientSslAuth\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/client_ssl_auth/test_data/vpn_response_1.json",
    "content": "{\n  \"certificates\": [\n    {\n      \"device\": \"chrome-os\", \n      \"email\": \"vho@lyft.com\", \n      \"fingerprint_sha1\": \"4c5beecd0b516d9ab53029ae646c4628bc7a1980\", \n      \"fingerprint_sha256\": \"1b7d42ef0025ad89c1c911d6c10d7e86a4cb7c5863b2980abcbad1895f8b5314\", \n      \"first_name\": \"Vivian\", \n      \"group\": null, \n      \"pem_encoded\": \"-----BEGIN CERTIFICATE-----\\nMIIFHDCCBASgAwIBAgIRAKOl7/eNakU3mgSOijyzmv0wDQYJKoZIhvcNAQELBQAw\\nggFBMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwN\\nU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDFHMEUGA1UECww+REFOR0VSOiBU\\nSElTIFBLSSBIQVMgQSBDVVNUT00gVFJVU1QgUFJPVE9DT0wuIFRIRVJFIElTIE5P\\nIENSTC4xRjBEBgNVBAsMPURBTkdFUjogUkVMWUlORyBQQVJUSUVTIE1VU1QgTk9U\\nIFRSVVNUIENFUlQgVkFMSURBVElPTiBBTE9ORS4xPjA8BgNVBAMMNUx5ZnQgVlBO\\nIEludGVybmFsIENlcnRpZmljYXRlIEF1dGhvcml0eSAoREVWRUxPUE1FTlQpMSUw\\nIwYJKoZIhvcNAQkBFhZzZWN1cml0eS10ZWFtQGx5ZnQuY29tMB4XDTE2MDQwNDE3\\nNDUwMloXDTE3MDQwNTE3NDUwMlowggEUMRUwEwYDVQQDDAx2aG9AbHlmdC5jb20x\\nGzAZBgkqhkiG9w0BCQEWDHZob0BseWZ0LmNvbTELMAkGA1UEBAwCSG8xDzANBgNV\\nBCoMBlZpdmlhbjENMAsGA1UECgwETHlmdDELMAkGA1UEBhMCVVMxEzARBgNVBAgM\\nCkNhbGlmb3JuaWExRzBFBgNVBAsMPkRBTkdFUjogVEhJUyBQS0kgSEFTIEEgQ1VT\\nVE9NIFRSVVNUIFBST1RPQ09MLiBUSEVSRSBJUyBOTyBDUkwuMUYwRAYDVQQLDD1E\\nQU5HRVI6IFJFTFlJTkcgUEFSVElFUyBNVVNUIE5PVCBUUlVTVCBDRVJUIFZBTElE\\nQVRJT04gQUxPTkUuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuSDo\\nncWmuzSzSSqQLHOkY0DmThagTzep/KTduHVQyePLMLtNRDDak+/aQaCbXTlQSrYo\\n4IOsC3fwGp1ds70l7odFYry9r0yTt8P7sTRnXD59mxLs6XTmzdwqr42R6K5LpHJQ\\nfTBFocHJ9Gzk/Ejn11BtzvQe6iX+X3ZPf5IF36sJIdktaHCWFG6Sts2IE5UWBX7K\\nOUzAKJ/PlVkiMa4d0nhEBBE2qmZpHBl3g2HGBFHlGoPhmlHqzBeB2IWzn18FN3sL\\nYJs3+Ucmr31ZSo6F3r5PzPzOiUxDTXPLDG1DruMEK6Tq5VSGVWkKB/lnObi6REYA\\n8YFJYEJuZUoOZNjxywIDAQABozgwNjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQE\\nAwIFoDAWBgNVHSUBAf8EDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEA\\nOrD/CxKdqN+fCI3mQGVTFz9xgKqx+ka30nMlxBYx0ACD189MmXW1EdjCW6RUIcvP\\nxDnt+zV10XnqlzMMh6VIlPp/y9m4758TxkDk4Z8Mni3eCHh4NPYo0V54rxYZL9hJ\\nsX8P32Cu6bKcOXb4WIuXcXuKrt/6PVKFACbaYdTYrlvalJz4m0O426o5jcPx8FtI\\naqDTXO42ZUwU04C5c1rWh6kh3oqTPVMXPsAKUklwvVQC4jS/QTfAaZE2hdAFIw9l\\nu0uMferJP4zHP9gzCmHcv4+ZBzJ0wKz2IVeR45Iyq1QvrMN4pz/bZhQO91VMwHHC\\nSPB1iqKP4Ly3rOpmRXWgTg==\\n-----END CERTIFICATE-----\\n\", \n      \"status\": \"approved\", \n      \"surname\": \"Ho\"\n    }\n  ]\n}"
  },
  {
    "path": "test/extensions/filters/network/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\n        \"utility_test.cc\",\n    ],\n    deps = [\n        \"//source/extensions/filters/network/common:utility_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//source/extensions:all_extensions.bzl\",\n    \"envoy_all_network_filters\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_proto_library(\n    name = \"network_readfilter_fuzz_proto\",\n    srcs = [\"network_readfilter_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"network_writefilter_fuzz_proto\",\n    srcs = [\"network_writefilter_fuzz.proto\"],\n    deps = [\n        \"//test/fuzz:common_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"uber_readfilter_lib\",\n    srcs = [\n        \"uber_per_readfilter.cc\",\n        \"uber_readfilter.cc\",\n    ],\n    hdrs = [\"uber_readfilter.h\"],\n    deps = [\n        \":network_readfilter_fuzz_proto_cc_proto\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/filters/common/ratelimit:ratelimit_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:utility_lib\",\n        \"//test/extensions/filters/common/ext_authz:ext_authz_test_common\",\n        \"//test/extensions/filters/network/common/fuzz/utils:network_filter_fuzzer_fakes_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"network_readfilter_fuzz_test\",\n    srcs = [\"network_readfilter_fuzz_test.cc\"],\n    corpus = \"network_readfilter_corpus\",\n    dictionaries = [\"network_readfilter_fuzz_test.dict\"],\n    # All Envoy network filters must be linked to the test in order for the fuzzer to pick\n    # these up via the NamedNetworkFilterConfigFactory.\n    deps = [\n        \":uber_readfilter_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//test/config:utility_lib\",\n    ] + envoy_all_network_filters(),\n)\n\nenvoy_cc_test_library(\n    name = \"uber_writefilter_lib\",\n    srcs = [\n        \"uber_per_writefilter.cc\",\n        \"uber_writefilter.cc\",\n    ],\n    hdrs = [\"uber_writefilter.h\"],\n    deps = [\n        \":network_writefilter_fuzz_proto_cc_proto\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/common:utility_lib\",\n        \"//test/extensions/filters/network/common/fuzz/utils:network_filter_fuzzer_fakes_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"network_writefilter_fuzz_test\",\n    srcs = [\"network_writefilter_fuzz_test.cc\"],\n    corpus = \"network_writefilter_corpus\",\n    # All Envoy network filters must be linked to the test in order for the fuzzer to pick\n    # these up via the NamedNetworkFilterConfigFactory.\n    deps = [\n        \":uber_writefilter_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_broker_config_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:config\",\n        \"//source/extensions/filters/network/mysql_proxy:config\",\n        \"//source/extensions/filters/network/zookeeper_proxy:config\",\n        \"//test/config:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/README.md",
    "content": "Network filters need to be fuzzed. Filters come in two flavors, each with their own fuzzer. Read filters should be added into the [Generic ReadFilter Fuzzer](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc). Write Filters should added into the [Generic WriteFilter Fuzzer](https://github.com/envoyproxy/envoy/blob/master/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc). Some filters are both raed and write filters: They should be added into both fuzzers.\nTo add a new filter into generic network level filter fuzzers, see the [doc](https://github.com/envoyproxy/envoy/blob/master/source/docs/network_filter_fuzzing.md)."
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2",
    "content": "config {\n  name: \"envoy.filters.network.client_ssl_auth\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth\"\n    value: \"\\n\\010\\177\\177_p\\000O\\002@\\022\\007x-clien\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 524288\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 524288\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 524288\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"ppu\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 524288\n  }\n}\nactions {\n  on_data {\n    data: \"type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1",
    "content": "config {\n  name: \"envoy.filters.network.client_ssl_auth\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth\"\n    value: \"\\n%envoy.filters.network.client_ssl_auth\\022\\0011\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 4\n  }\n}\nactions {\n  on_data {\n    data: \"u\\360\"\n  }\n}\nactions {\n  on_data {\n    data: \"u\\360\"\n  }\n}\nactions {\n  on_data {\n    data: \"u\\360\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 4\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1",
    "content": "config {\n  name: \"envoy.filters.network.direct_response\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.direct_response.v3.Config\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n  }\n}\nactions {\n  on_data {\n    data: \"y\"\n  }\n}\nactions {\n  on_data {\n  }\n}\nactions {\n  on_data {\n  }\n}\nactions {\n  on_data {\n    data: \"\\006\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file",
    "content": "config {\n  name: \"envoy.filters.network.direct_response\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.direct_response.v3.Config\"\n    value: \"\\n\\032\\n\\030*\\014\\n\\002\\020\\001\\\"\\006\\020\\001\\\"\\002\\030\\0012\\003\\032\\001\\':\\003\\032\\001\\'\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions{\n  on_data{\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.dubbo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\"\n    value: \"\\n!envoy.filters.network.dubbo_proxy\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 268435\n  }\n}\nactions {\n  on_data {\n    data: \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\013\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\013\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\013\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\013\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_ondata_msg_split",
    "content": "config {\n  name: \"envoy.filters.network.dubbo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\xda\\xbb\\xc2\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xf\\x0\\x0\\x0\\x16\\x5\\x32\\x2e\\x30\\x2e\\x32\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x4\\x74\\x65\\x73\\x74\\x5\\x30\\x2e\\x30\\x2e\\x30\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_ondata_twoway",
    "content": "config {\n  name: \"envoy.filters.network.dubbo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\xda\\xbb\\xc2\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xf\\x0\\x0\\x0\\x16\\x5\\x32\\x2e\\x30\\x2e\\x32\\x4\\x74\\x65\\x73\\x74\\x5\\x30\\x2e\\x30\\x2e\\x30\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_protocol_routing",
    "content": "config {\n  name: \"envoy.filters.network.dubbo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x22\\x5a\\xa\\x5\\x74\\x65\\x73\\x74\\x31\\x12\\x21\\x6f\\x72\\x67\\x2e\\x61\\x70\\x61\\x63\\x68\\x65\\x2e\\x64\\x75\\x62\\x62\\x6f\\x2e\\x64\\x65\\x6d\\x6f\\x2e\\x44\\x65\\x6d\\x6f\\x53\\x65\\x72\\x76\\x69\\x63\\x65\\x2a\\x2e\\xa\\xf\\xa\\xd\\xa\\xb\\x2a\\x9\\xa\\x0\\x12\\x5\\x28\\x2e\\x2a\\x3f\\x29\\x12\\x1b\\xa\\x19\\x75\\x73\\x65\\x72\\x5f\\x73\\x65\\x72\\x76\\x69\\x63\\x65\\x5f\\x64\\x75\\x62\\x62\\x6f\\x5f\\x73\\x65\\x72\\x76\\x65\\x72\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\xda\\xbb\\xc2\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x64\\x0\\x0\\x0\\x16\\x5\\x32\\x2e\\x30\\x2e\\x32\\x4\\x74\\x65\\x73\\x74\\x5\\x30\\x2e\\x30\\x2e\\x30\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n    end_stream: true\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_protocol_routing_failure",
    "content": "config {\n  name: \"envoy.filters.network.dubbo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\xda\\xbb\\xc2\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xf\\x0\\x0\\x0\\x16\\x5\\x32\\x2e\\x30\\x2e\\x32\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n    end_stream: true\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1",
    "content": "config {\n  name: \"envoy.filters.network.echo\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.echo.v3.Echo\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 2097152\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 4194304\n  }\n}\nactions {\n  on_data {\n    data: \"y\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 2097152\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty",
    "content": "config {\n  name: \"envoy.filters.network.local_ratelimit\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\"\n    value:\"\\001\\n\\311\\001type.googleapis.com/envoy.extensions.filters.netwe\\360\\231\\201\\270\\362\\251\\212\\211\\361\\263\\275\\271\\363\\206\\215\\263\\361\\255\\230\\252\\362\\265\\266\\243\\364\\203\\217\\266\\362\\211\\226\\227\\362\\232\\255\\221\\362\\227\\227\\210\\362\\255\\274\\232\\363\\220\\256\\256\\364\\206\\217\\231\\363\\246\\273\\262\\363\\214\\207\\237\\360\\255\\215\\236\\364\\206\\232\\207\\361\\273\\210\\256\\362\\234\\204\\234\\361\\256\\236\\207\\361\\225\\240\\253\\363\\255\\231\\272\\363\\254\\256\\273\\360\\276\\201\\214\\361\\231\\215\\216\\363\\233\\202\\226\\361\\252\\222\\256\\362\\217\\241\\265\\363\\200\\257\\245voy.api.v2.route.RouteActlRateLimit\\022\\017\\010\\200\\312\\002\\022\\004\\010\\200\\312\\002\\032\\003\\010\\200^\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\\022\\002\\010 \\032d\\n\\002\\010\\001\\022^\\n2\\n%envoy.filters.network.local_ratelimit\\022\\000\\032\\007\\n\\002\\010\\001\\022\\001+\\022\\000\\032&\\n\\000\\022\\\"\\000\\000\\000\\000\\000voy.filters.network.lo\\000\\000\\000\\000\\000\\000+\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1",
    "content": "config {\n  name: \"envoy.filters.network.ext_authz\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz\"\n  }\n}\nactions {\n  on_data {\n    data: \"y\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 655360\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2",
    "content": "config {\n  name: \"envoy.filters.network.ext_authz\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz\"\n    value: \"\\n\\037envoy.filters.network.ext_authz\\030\\001(\\001\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \":\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1",
    "content": "config {\n  name: \"envoy.filters.network.http_connection_manager\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\"\n    value: \"\\022\\002B\\001\\\"\\000J\\004(\\001J\\000z\\002\\010\\001\\220\\001\\001\"\n  }\n}\nactions {\n  on_data {\n    data: \"y\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 655360\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2",
    "content": "config {\n  name: \"envoy.filters.network.http_connection_manager\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\"\n    value: \"\\010\\002\\022\\001-\\\"5\\n\\001\\000\\032\\001~\\032\\'envoy.type.matcher.v3.ListStringMatcherB\\001-B\\001~:\\013\\\"\\t\\t\\000\\000\\000\\004\\000\\000\\000\\000B\\002(\\001\\312\\001\\000\\362\\001\\002\\010\\001\"\n  }\n}\nactions {\n  on_data {\n    data: \"y\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 655360\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n\\\"envoy.filters.network.kafka_broker\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 10000\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_process_msg",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n\\\"envoy.filters.network.kafka_broker\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data:\"\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x2\\x69\\x64\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x1\\x0\\x2\\x69\\x64\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x2\\x0\\x0\\x0\\x2\\x0\\x2\\x69\\x64\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x3\\x0\\x0\\x0\\x3\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x4\\x0\\x0\\x0\\x4\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x5\\x0\\x0\\x0\\x5\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x6\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x7\\x0\\x0\\x0\\x7\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x8\\x0\\x0\\x0\\x8\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x38\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x9\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x38\\x0\\x1\\x0\\x1\\x0\\x0\\x0\\xa\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x38\\x0\\x1\\x0\\x2\\x0\\x0\\x0\\xb\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x3c\\x0\\x1\\x0\\x3\\x0\\x0\\x0\\xc\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x3d\\x0\\x1\\x0\\x4\\x0\\x0\\x0\\xd\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x45\\x0\\x1\\x0\\x5\\x0\\x0\\x0\\xe\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x45\\x0\\x1\\x0\\x6\\x0\\x0\\x0\\xf\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x61\\x0\\x1\\x0\\x7\\x0\\x0\\x0\\x10\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x61\\x0\\x1\\x0\\x8\\x0\\x0\\x0\\x11\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x65\\x0\\x1\\x0\\x9\\x0\\x0\\x0\\x12\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x65\\x0\\x1\\x0\\xa\\x0\\x0\\x0\\x13\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x6d\\x0\\x1\\x0\\xb\\x0\\x0\\x0\\x14\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x30\\x0\\x2\\x0\\x0\\x0\\x0\\x0\\x15\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2c\\x0\\x2\\x0\\x1\\x0\\x0\\x0\\x16\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x2d\\x0\\x2\\x0\\x2\\x0\\x0\\x0\\x17\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x2d\\x0\\x2\\x0\\x3\\x0\\x0\\x0\\x18\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x31\\x0\\x2\\x0\\x4\\x0\\x0\\x0\\x19\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x31\\x0\\x2\\x0\\x5\\x0\\x0\\x0\\x1a\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x18\\x0\\x3\\x0\\x0\\x0\\x0\\x0\\x1b\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x18\\x0\\x3\\x0\\x1\\x0\\x0\\x0\\x1c\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x18\\x0\\x3\\x0\\x2\\x0\\x0\\x0\\x1d\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x18\\x0\\x3\\x0\\x3\\x0\\x0\\x0\\x1e\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x19\\x0\\x3\\x0\\x4\\x0\\x0\\x0\\x1f\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x19\\x0\\x3\\x0\\x5\\x0\\x0\\x0\\x20\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x19\\x0\\x3\\x0\\x6\\x0\\x0\\x0\\x21\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x19\\x0\\x3\\x0\\x7\\x0\\x0\\x0\\x22\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1b\\x0\\x3\\x0\\x8\\x0\\x0\\x0\\x23\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x2e\\x0\\x3\\x0\\x9\\x0\\x0\\x0\\x24\\x0\\x2\\x69\\x64\\x0\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x58\\x0\\x4\\x0\\x0\\x0\\x0\\x0\\x25\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x59\\x0\\x4\\x0\\x1\\x0\\x0\\x0\\x26\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x65\\x0\\x4\\x0\\x2\\x0\\x0\\x0\\x27\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x75\\x0\\x4\\x0\\x3\\x0\\x0\\x0\\x28\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x8b\\x0\\x4\\x0\\x4\\x0\\x0\\x0\\x29\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x0\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x25\\x0\\x5\\x0\\x0\\x0\\x0\\x0\\x2a\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x31\\x0\\x5\\x0\\x1\\x0\\x0\\x0\\x2b\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x41\\x0\\x5\\x0\\x2\\x0\\x0\\x0\\x2c\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x58\\x0\\x6\\x0\\x0\\x0\\x0\\x0\\x2d\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x5e\\x0\\x6\\x0\\x1\\x0\\x0\\x0\\x2e\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x66\\x0\\x6\\x0\\x2\\x0\\x0\\x0\\x2f\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x6e\\x0\\x6\\x0\\x3\\x0\\x0\\x0\\x30\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x76\\x0\\x6\\x0\\x4\\x0\\x0\\x0\\x31\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x82\\x0\\x6\\x0\\x5\\x0\\x0\\x0\\x32\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\xa1\\x0\\x6\\x0\\x6\\x0\\x0\\x0\\x33\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x10\\x0\\x7\\x0\\x0\\x0\\x0\\x0\\x34\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x10\\x0\\x7\\x0\\x1\\x0\\x0\\x0\\x35\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x18\\x0\\x7\\x0\\x2\\x0\\x0\\x0\\x36\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x24\\x0\\x7\\x0\\x3\\x0\\x0\\x0\\x37\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x38\\x0\\x8\\x0\\x0\\x0\\x0\\x0\\x38\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4c\\x0\\x8\\x0\\x1\\x0\\x0\\x0\\x39\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4c\\x0\\x8\\x0\\x2\\x0\\x0\\x0\\x3a\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4c\\x0\\x8\\x0\\x3\\x0\\x0\\x0\\x3b\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4c\\x0\\x8\\x0\\x4\\x0\\x0\\x0\\x3c\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x44\\x0\\x8\\x0\\x5\\x0\\x0\\x0\\x3d\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x48\\x0\\x8\\x0\\x6\\x0\\x0\\x0\\x3e\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x50\\x0\\x8\\x0\\x7\\x0\\x0\\x0\\x3f\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x67\\x0\\x8\\x0\\x8\\x0\\x0\\x0\\x40\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x28\\x0\\x9\\x0\\x0\\x0\\x0\\x0\\x41\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x28\\x0\\x9\\x0\\x1\\x0\\x0\\x0\\x42\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x28\\x0\\x9\\x0\\x2\\x0\\x0\\x0\\x43\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x28\\x0\\x9\\x0\\x3\\x0\\x0\\x0\\x44\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x28\\x0\\x9\\x0\\x4\\x0\\x0\\x0\\x45\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x28\\x0\\x9\\x0\\x5\\x0\\x0\\x0\\x46\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x37\\x0\\x9\\x0\\x6\\x0\\x0\\x0\\x47\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x14\\x0\\xa\\x0\\x0\\x0\\x0\\x0\\x48\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x15\\x0\\xa\\x0\\x1\\x0\\x0\\x0\\x49\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x0\\x0\\x15\\x0\\xa\\x0\\x2\\x0\\x0\\x0\\x4a\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x0\\x0\\x20\\x0\\xa\\x0\\x3\\x0\\x0\\x0\\x4b\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x3c\\x0\\xb\\x0\\x0\\x0\\x0\\x0\\x4c\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x40\\x0\\xb\\x0\\x1\\x0\\x0\\x0\\x4d\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x40\\x0\\xb\\x0\\x2\\x0\\x0\\x0\\x4e\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x40\\x0\\xb\\x0\\x3\\x0\\x0\\x0\\x4f\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x40\\x0\\xb\\x0\\x4\\x0\\x0\\x0\\x50\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x48\\x0\\xb\\x0\\x5\\x0\\x0\\x0\\x51\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x54\\x0\\xb\\x0\\x6\\x0\\x0\\x0\\x52\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x5\\x0\\x1\\x2\\x3\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x0\\xc\\x0\\x0\\x0\\x0\\x0\\x53\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\xc\\x0\\x1\\x0\\x0\\x0\\x54\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\xc\\x0\\x2\\x0\\x0\\x0\\x55\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x28\\x0\\xc\\x0\\x3\\x0\\x0\\x0\\x56\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x31\\x0\\xc\\x0\\x4\\x0\\x0\\x0\\x57\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x1c\\x0\\xd\\x0\\x0\\x0\\x0\\x0\\x58\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1c\\x0\\xd\\x0\\x1\\x0\\x0\\x0\\x59\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1c\\x0\\xd\\x0\\x2\\x0\\x0\\x0\\x5a\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x28\\x0\\xd\\x0\\x3\\x0\\x0\\x0\\x5b\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x39\\x0\\xd\\x0\\x4\\x0\\x0\\x0\\x5c\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x34\\x0\\xe\\x0\\x0\\x0\\x0\\x0\\x5d\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x34\\x0\\xe\\x0\\x1\\x0\\x0\\x0\\x5e\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x34\\x0\\xe\\x0\\x2\\x0\\x0\\x0\\x5f\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x3c\\x0\\xe\\x0\\x3\\x0\\x0\\x0\\x60\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x49\\x0\\xe\\x0\\x4\\x0\\x0\\x0\\x61\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x5\\x0\\x1\\x2\\x3\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x18\\x0\\xf\\x0\\x0\\x0\\x0\\x0\\x62\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x18\\x0\\xf\\x0\\x1\\x0\\x0\\x0\\x63\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x18\\x0\\xf\\x0\\x2\\x0\\x0\\x0\\x64\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x19\\x0\\xf\\x0\\x3\\x0\\x0\\x0\\x65\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x19\\x0\\xf\\x0\\x4\\x0\\x0\\x0\\x66\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x21\\x0\\xf\\x0\\x5\\x0\\x0\\x0\\x67\\x0\\x2\\x69\\x64\\x0\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\xc\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x68\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\xc\\x0\\x10\\x0\\x1\\x0\\x0\\x0\\x69\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\xc\\x0\\x10\\x0\\x2\\x0\\x0\\x0\\x6a\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x18\\x0\\x10\\x0\\x3\\x0\\x0\\x0\\x6b\\x0\\x2\\x69\\x64\\x0\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x14\\x0\\x11\\x0\\x0\\x0\\x0\\x0\\x6c\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x14\\x0\\x11\\x0\\x1\\x0\\x0\\x0\\x6d\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\xc\\x0\\x12\\x0\\x0\\x0\\x0\\x0\\x6e\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\xc\\x0\\x12\\x0\\x1\\x0\\x0\\x0\\x6f\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\xc\\x0\\x12\\x0\\x2\\x0\\x0\\x0\\x70\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x26\\x0\\x12\\x0\\x3\\x0\\x0\\x0\\x71\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x46\\x0\\x13\\x0\\x0\\x0\\x0\\x0\\x72\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x47\\x0\\x13\\x0\\x1\\x0\\x0\\x0\\x73\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x47\\x0\\x13\\x0\\x2\\x0\\x0\\x0\\x74\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x47\\x0\\x13\\x0\\x3\\x0\\x0\\x0\\x75\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x47\\x0\\x13\\x0\\x4\\x0\\x0\\x0\\x76\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x65\\x0\\x13\\x0\\x5\\x0\\x0\\x0\\x77\\x0\\x2\\x69\\x64\\x0\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x0\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x1c\\x0\\x14\\x0\\x0\\x0\\x0\\x0\\x78\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1c\\x0\\x14\\x0\\x1\\x0\\x0\\x0\\x79\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1c\\x0\\x14\\x0\\x2\\x0\\x0\\x0\\x7a\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1c\\x0\\x14\\x0\\x3\\x0\\x0\\x0\\x7b\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x24\\x0\\x14\\x0\\x4\\x0\\x0\\x0\\x7c\\x0\\x2\\x69\\x64\\x0\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x2c\\x0\\x15\\x0\\x0\\x0\\x0\\x0\\x7d\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2c\\x0\\x15\\x0\\x1\\x0\\x0\\x0\\x7e\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x18\\x0\\x16\\x0\\x0\\x0\\x0\\x0\\x7f\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x18\\x0\\x16\\x0\\x1\\x0\\x0\\x0\\x80\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x23\\x0\\x16\\x0\\x2\\x0\\x0\\x0\\x81\\x0\\x2\\x69\\x64\\x0\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x24\\x0\\x17\\x0\\x0\\x0\\x0\\x0\\x82\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x24\\x0\\x17\\x0\\x1\\x0\\x0\\x0\\x83\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x28\\x0\\x17\\x0\\x2\\x0\\x0\\x0\\x84\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2c\\x0\\x17\\x0\\x3\\x0\\x0\\x0\\x85\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x32\\x0\\x18\\x0\\x0\\x0\\x0\\x0\\x86\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x32\\x0\\x18\\x0\\x1\\x0\\x0\\x0\\x87\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x26\\x0\\x19\\x0\\x0\\x0\\x0\\x0\\x88\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x26\\x0\\x19\\x0\\x1\\x0\\x0\\x0\\x89\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1f\\x0\\x1a\\x0\\x0\\x0\\x0\\x0\\x8a\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x0\\x1f\\x0\\x1a\\x0\\x1\\x0\\x0\\x0\\x8b\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x0\\x33\\x0\\x1b\\x0\\x0\\x0\\x0\\x0\\x8c\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4a\\x0\\x1c\\x0\\x0\\x0\\x0\\x0\\x8d\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4a\\x0\\x1c\\x0\\x1\\x0\\x0\\x0\\x8e\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4e\\x0\\x1c\\x0\\x2\\x0\\x0\\x0\\x8f\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x27\\x0\\x1d\\x0\\x0\\x0\\x0\\x0\\x90\\x0\\x2\\x69\\x64\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x28\\x0\\x1d\\x0\\x1\\x0\\x0\\x0\\x91\\x0\\x2\\x69\\x64\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x2b\\x0\\x1e\\x0\\x0\\x0\\x0\\x0\\x92\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x2c\\x0\\x1e\\x0\\x1\\x0\\x0\\x0\\x93\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x2b\\x0\\x1f\\x0\\x0\\x0\\x0\\x0\\x94\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x2c\\x0\\x1f\\x0\\x1\\x0\\x0\\x0\\x95\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x25\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x96\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x26\\x0\\x20\\x0\\x1\\x0\\x0\\x0\\x97\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x26\\x0\\x20\\x0\\x2\\x0\\x0\\x0\\x98\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x2e\\x0\\x21\\x0\\x0\\x0\\x0\\x0\\x99\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x2e\\x0\\x21\\x0\\x1\\x0\\x0\\x0\\x9a\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x2c\\x0\\x22\\x0\\x0\\x0\\x0\\x0\\x9b\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2c\\x0\\x22\\x0\\x1\\x0\\x0\\x0\\x9c\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x23\\x0\\x0\\x0\\x0\\x0\\x9d\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x23\\x0\\x1\\x0\\x0\\x0\\x9e\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x14\\x0\\x24\\x0\\x0\\x0\\x0\\x0\\x9f\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x14\\x0\\x24\\x0\\x1\\x0\\x0\\x0\\xa0\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x2d\\x0\\x25\\x0\\x0\\x0\\x0\\x0\\xa1\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x2d\\x0\\x25\\x0\\x1\\x0\\x0\\x0\\xa2\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x28\\x0\\x26\\x0\\x0\\x0\\x0\\x0\\xa3\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x28\\x0\\x26\\x0\\x1\\x0\\x0\\x0\\xa4\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x3a\\x0\\x26\\x0\\x2\\x0\\x0\\x0\\xa5\\x0\\x2\\x69\\x64\\x0\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x1c\\x0\\x27\\x0\\x0\\x0\\x0\\x0\\xa6\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1c\\x0\\x27\\x0\\x1\\x0\\x0\\x0\\xa7\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1c\\x0\\x28\\x0\\x0\\x0\\x0\\x0\\xa8\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1c\\x0\\x28\\x0\\x1\\x0\\x0\\x0\\xa9\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x29\\x0\\x0\\x0\\x0\\x0\\xaa\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x29\\x0\\x1\\x0\\x0\\x0\\xab\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x18\\x0\\x2a\\x0\\x0\\x0\\x0\\x0\\xac\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x18\\x0\\x2a\\x0\\x1\\x0\\x0\\x0\\xad\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x2a\\x0\\x2\\x0\\x0\\x0\\xae\\x0\\x2\\x69\\x64\\x0\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x24\\x0\\x2b\\x0\\x0\\x0\\x0\\x0\\xaf\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x25\\x0\\x2b\\x0\\x1\\x0\\x0\\x0\\xb0\\x0\\x2\\x69\\x64\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x35\\x0\\x2b\\x0\\x2\\x0\\x0\\x0\\xb1\\x0\\x2\\x69\\x64\\x0\\x8\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x2f\\x0\\x2c\\x0\\x0\\x0\\x0\\x0\\xb2\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x48\\x0\\x2c\\x0\\x1\\x0\\x0\\x0\\xb3\\x0\\x2\\x69\\x64\\x0\\x2\\x8\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x44\\x0\\x2d\\x0\\x0\\x0\\x0\\x0\\xb4\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x0\\x20\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x34\\x0\\x2e\\x0\\x0\\x0\\x0\\x0\\xb5\\x0\\x2\\x69\\x64\\x0\\x0\\x0\\x0\\x20\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x28\\x0\\x2f\\x0\\x0\\x0\\x0\\x0\\xb6\\x0\\x2\\x69\\x64\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 10000\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_request1",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n\\\"envoy.filters.network.kafka_broker\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data:\"\\x7f\\xff\\xff\\xff\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x80\\x0\"\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_unknown_request",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n\\\"envoy.filters.network.kafka_broker\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data:\"\\x0\\x0\\x0\\x1d\\x7f\\xff\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x9\\x63\\x6c\\x69\\x65\\x6e\\x74\\x2d\\x69\\x64\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 10000\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1",
    "content": "config {\n  name: \"envoy.filters.network.local_ratelimit\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\"\n    value: \"\\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\\022\\013\\010\\001\\032\\007\\010\\200^\\020\\200\\306\\001\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 12035000\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow",
    "content": "config {\n  name: \"envoy.filters.network.local_ratelimit\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\"\n    value: \"\\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\\022\\017\\010\\001\\032\\013\\010\\200\\336\\200\\200\\240\\007\\020\\200\\306!\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 12035000\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\013\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 12035000\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 53\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/mongodb_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.mongo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    value: \"\\n\\001\\\\\\032\\007\\\"\\003\\010\\200t*\\000 \\001\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\120\\0\\0\\0\\1\\0\\0\\0\\1\\0\\0\\0\\324\\7\\0\\0\\4\\0\\0\\0\\164\\145\\163\\164\\56\\164\\145\\163\\164\\0\\24\\0\\0\\0\\377\\377\\377\\377\\52\\0\\0\\0\\2\\163\\164\\162\\151\\156\\147\\137\\156\\145\\145\\144\\137\\145\\163\\143\\0\\20\\0\\0\\0\\173\\42\\146\\157\\157\\42\\72\\40\\42\\142\\141\\162\\12\\42\\175\\0\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\56\\0\\0\\0\\2\\0\\0\\0\\2\\0\\0\\0\\1\\0\\0\\0\\10\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\\24\\0\\0\\0\\2\\0\\0\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\"\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\45\\0\\0\\0\\3\\0\\0\\0\\3\\0\\0\\0\\325\\7\\0\\0\\0\\0\\0\\0\\164\\145\\163\\164\\0\\24\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\43\\0\\0\\0\\4\\0\\0\\0\\4\\0\\0\\0\\322\\7\\0\\0\\10\\0\\0\\0\\164\\145\\163\\164\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\"\n  }\n}\n\n\n\n\nactions {\n  on_data {\n    data: \"\\50\\0\\0\\0\\5\\0\\0\\0\\5\\0\\0\\0\\327\\7\\0\\0\\0\\0\\0\\0\\2\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\\100\\234\\0\\0\\0\\0\\0\\0\"\n  }\n}\n\n\n\nactions {\n  on_data {\n    data: \"\\120\\0\\0\\0\\17\\0\\0\\0\\31\\0\\0\\0\\332\\7\\0\\0\\124\\145\\163\\164\\40\\144\\141\\164\\141\\142\\141\\163\\145\\0\\124\\145\\163\\164\\40\\143\\157\\155\\155\\141\\156\\144\\40\\156\\141\\155\\145\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\\26\\0\\0\\0\\2\\167\\157\\162\\154\\144\\0\\6\\0\\0\\0\\150\\145\\154\\154\\157\\0\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\60\\0\\0\\0\\20\\0\\0\\0\\32\\0\\0\\0\\333\\7\\0\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\\26\\0\\0\\0\\2\\167\\157\\162\\154\\144\\0\\6\\0\\0\\0\\150\\145\\154\\154\\157\\0\\0\"\n  }\n}\n\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\\001\\000\\000\\000\\000\\000\\000\\001\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"<\"\n    end_stream: true\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  on_data {\n    data: \"pH\\037\\000 `\\000\\000\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"=\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/mongodb_proxy_2",
    "content": "config {\n  name: \"envoy.filters.network.mongo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    value: \"\\n\\001\\\\\\032\\t\\032\\002\\020\\010\\\"\\003\\010\\200t \\001\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\120\\0\\0\\0\\1\\0\\0\\0\\1\\0\\0\\0\\324\\7\\0\\0\\4\\0\\0\\0\\164\\145\\163\\164\\56\\164\\145\\163\\164\\0\\24\\0\\0\\0\\377\\377\\377\\377\\52\\0\\0\\0\\2\\163\\164\\162\\151\\156\\147\\137\\156\\145\\145\\144\\137\\145\\163\\143\\0\\20\\0\\0\\0\\173\\42\\146\\157\\157\\42\\72\\40\\42\\142\\141\\162\\12\\42\\175\\0\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\56\\0\\0\\0\\2\\0\\0\\0\\2\\0\\0\\0\\1\\0\\0\\0\\10\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\\24\\0\\0\\0\\2\\0\\0\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\"\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\45\\0\\0\\0\\3\\0\\0\\0\\3\\0\\0\\0\\325\\7\\0\\0\\0\\0\\0\\0\\164\\145\\163\\164\\0\\24\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\43\\0\\0\\0\\4\\0\\0\\0\\4\\0\\0\\0\\322\\7\\0\\0\\10\\0\\0\\0\\164\\145\\163\\164\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\"\n  }\n}\n\n\n\n\nactions {\n  on_data {\n    data: \"\\50\\0\\0\\0\\5\\0\\0\\0\\5\\0\\0\\0\\327\\7\\0\\0\\0\\0\\0\\0\\2\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\\100\\234\\0\\0\\0\\0\\0\\0\"\n  }\n}\n\n\n\nactions {\n  on_data {\n    data: \"\\120\\0\\0\\0\\17\\0\\0\\0\\31\\0\\0\\0\\332\\7\\0\\0\\124\\145\\163\\164\\40\\144\\141\\164\\141\\142\\141\\163\\145\\0\\124\\145\\163\\164\\40\\143\\157\\155\\155\\141\\156\\144\\40\\156\\141\\155\\145\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\\26\\0\\0\\0\\2\\167\\157\\162\\154\\144\\0\\6\\0\\0\\0\\150\\145\\154\\154\\157\\0\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\60\\0\\0\\0\\20\\0\\0\\0\\32\\0\\0\\0\\333\\7\\0\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\\26\\0\\0\\0\\2\\167\\157\\162\\154\\144\\0\\6\\0\\0\\0\\150\\145\\154\\154\\157\\0\\0\"\n  }\n}\n\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\\001\\000\\000\\000\\000\\000\\000\\001\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"<\"\n    end_stream: true\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  on_data {\n    data: \"pH\\037\\000 `\\000\\000\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"=\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/mysql_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.mysql_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy\"\n    value: \"\\n\\006#\\336\\215\\302\\246\\001\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\34\\0\\0\\0\\12\\65\\56\\60\\56\\65\\64\\0\\136\\0\\0\\0\\41\\100\\163\\141\\154\\164\\43\\44\\0\\1\\1\\41\\0\\2\\0\\2\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\57\\0\\0\\1\\0\\0\\0\\3\\1\\0\\0\\0\\41\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\165\\163\\145\\162\\61\\0\\160\\64\\44\\44\\167\\60\\162\\66\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\2\\376\\1\\0\\0\\0\\1\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\14\\0\\0\\3\\155\\171\\163\\161\\154\\137\\157\\160\\141\\161\\165\\145\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\4\\377\\1\\0\\0\\0\\1\\0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\30\\0\\0\\0\\3\\103\\122\\105\\101\\124\\105\\40\\104\\101\\124\\101\\102\\101\\123\\105\\40\\155\\171\\163\\161\\154\\144\\142\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\34\\0\\0\\0\\12\\65\\56\\60\\56\\65\\64\\0\\136\\0\\0\\0\\41\\100\\163\\141\\154\\164\\43\\44\\0\\1\\1\\41\\0\\2\\0\\2\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\2\\377\\1\\0\\0\\0\\1\\0\"\n    end_stream: true\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\57\\0\\0\\1\\0\\2\\0\\3\\1\\0\\0\\0\\41\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\165\\163\\145\\162\\61\\0\\160\\64\\44\\44\\167\\60\\162\\66\\0\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\2\\0\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\2\\376\\1\\0\\0\\0\\1\\0\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\4\\377\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\57\\0\\0\\1\\0\\0\\0\\3\\1\\0\\0\\0\\41\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\165\\163\\145\\162\\61\\0\\160\\64\\44\\44\\167\\60\\162\\66\\0\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"\\30\\0\\0\\0\\3\\103\\122\\105\\101\\124\\105\\40\\104\\101\\124\\101\\102\\101\\123\\105\\40\\155\\171\\163\\161\\154\\144\\142\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\30\\0\\0\\5\\3\\103\\122\\105\\101\\124\\105\\40\\104\\101\\124\\101\\102\\101\\123\\105\\40\\155\\171\\163\\161\\154\\144\\142\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\1\\0\\0\\0\\4\" \n  }\n}\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\4\\1\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\7\\0\\0\\4\\1\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_data {\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"3\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/oss_redis_proxy_stackoverflow_with_long_route_prefix",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\n\\001\\026\\032\\030\\n\\002\\020\\004\\030\\001 \\200\\372\\003*\\0002\\006\\010\\200\\200\\200\\340\\0038\\004@\\001 \\001*\\204\\367\\013\\n\\200\\003\\n\\303\\001oooooooooooooooooooooooooooooooo\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000ooooooooooo/ooooooooooooooooooooooo\\032\\010\\177\\177\\177\\177\\177\\177\\333\\246\\\"m\\ni\\363\\270\\221\\233\\364\\210\\267\\225\\362\\251\\233\\253\\360\\240\\247\\240\\361\\262\\222\\254\\363\\254\\261\\226\\361\\252\\262\\275\\362\\211\\220\\242\\361\\247\\243\\256\\363\\263\\247\\233\\360\\225\\255\\250\\361\\231\\220\\275\\363\\237\\230\\266\\363\\261\\260\\206\\362\\201\\247\\270\\362\\272\\271\\251\\363\\250\\233\\222\\360\\251\\242\\244\\363\\250\\274\\263\\363\\247\\230\\236\\361\\222\\260\\237\\363\\233\\255\\222\\363\\264\\271\\267\\363\\265\\276\\271\\361\\234\\202\\215\\360\\227\\216\\255/\\030\\001\\\"\\n\\n\\010/dev/fd/\\\"\\014\\n\\002\\000\\003\\022\\006\\n\\004\\010.\\020\\002\\\"\\014\\n\\010envoy.fi\\030\\001\\\"\\027\\n\\013\\177\\177\\177envoy.fi\\022\\010\\n\\006\\010\\262\\354\\300\\341\\005\\n\\361\\001\\n\\002\\000\\003\\020\\001\\032\\010envoy.fi\\\"\\034\\n\\030envoy.type.v3.Fractional\\030\\001\\\">\\n<\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\\"\\024\\n\\010envoy.fi\\022\\010\\n\\002\\010.\\022\\002\\000\\003\\\"k\\ni\\363\\270\\221\\233\\364\\210\\267\\225\\362\\251\\233\\253\\360\\240\\247\\240\\361\\262\\222\\254\\363\\254\\261\\226\\361\\252\\262\\275\\362\\211\\220\\242\\361\\247\\243\\256\\363\\263\\247\\233\\360\\225\\255\\250\\307\\233\\360\\225\\255\\250\\361\\231\\220\\275\\363\\237\\230\\266\\363\\261\\260\\206\\362\\201\\247\\270\\322\\272\\360\\251\\242\\244\\363\\250\\274\\263\\363\\247\\230\\236\\361\\222\\260\\237\\363\\233\\255\\222\\363\\264\\271\\267\\363\\265\\276\\271\\361\\234\\202\\215\\360\\227\\216\\255/\\n\\243\\361\\013\\n\\326\\357\\013\\177\\177\\177envoconfig {\\n  name: \\\"envoy.filters.network.dubbo_proxy\\\"\\n  typed_config {\\n    type_url: \\\"type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\\\"\\n    value: \\\"\\\\n\\\\001B\\\\\\\"\\\\277\\\\373\\\\002\\\\n\\\\001]\\\\\\\"\\\\264\\\\373\\\\002?\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000,\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\020\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\177\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\024\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\001\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\177\\\\177\\\\177\\\\177\\\\177\\\\177\\\\177\\\\035\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\002\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\004\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\y.fi000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\001\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\002\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000#\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\335\\\\221\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000*\\\\000*\\\\000*\\\\005\\\\022\\\\003\\\\n\\\\001\\\\013\\\"\\n  }\\n}\\n\\032\\010\\177\\177\\177\\177\\177\\177\\333\\246\\\"\\022\\n\\010\\001\\000\\000\\000\\000\\000\\000\\005\\022\\004\\n\\002\\020\\001\\030\\001\\\"\\004\\n\\002\\000\\003\\\"\\005\\n\\001&\\030\\001\\\"!\\n\\030envoy.type.v3.Frcational\\022\\005\\n\\003\\010\\256\\020\\\"\\014\\n\\010envoy.fi\\030\\001\\\"k\\ni\\363\\270\\221\\233\\364\\210\\267\\225\\362\\251\\233\\253\\360\\240\\247\\240\\361\\262\\222\\254\\363\\254\\261\\226\\361\\252\\262\\275\\362\\211\\220\\242\\361\\247\\243\\256\\363\\263\\247\\233\\360\\225\\255\\250\\307\\233\\360\\225\\255\\250\\361\\231\\220\\275\\363\\237\\230\\266\\363\\261\\260\\206\\362\\201\\247\\270\\322\\272\\360\\251\\242\\244\\363\\250\\274\\263\\363\\247\\230\\236\\361\\222\\260\\237\\363\\233\\255\\222\\363\\264\\271\\267\\363\\265\\276\\271\\361\\234\\202\\215\\360\\227\\216\\255/\\nb\\n\\010\\177\\177\\177\\177\\177\\177\\333\\246\\020\\001\\032!envoy.filters.netwo\\322\\225\\341\\233\\203bbo_proxy\\\"\\026\\n\\010envoy.fi\\022\\010\\n\\002\\010.\\022\\002\\000\\003\\030\\001\\\"\\031\\n\\013\\177\\177\\177envoy.fi\\022\\010\\n\\006\\010\\262\\354\\300\\341\\005\\030\\001\\020\\0012\\332\\357\\013\\032\\326\\357\\013\\177\\177\\177envoconfig {\\n  name: \\\"envoy.filters.network.dubbo_proxy\\\"\\n  typed_config {\\n    type_url: \\\"type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy\\\"\\n    value: \\\"\\\\n\\\\001B\\\\\\\"\\\\277\\\\373\\\\002\\\\n\\\\001]\\\\\\\"\\\\264\\\\373\\\\002?\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000,\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\020\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\177\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\024\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\001\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\177\\\\177\\\\177\\\\177\\\\177\\\\177\\\\177\\\\035\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\002\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\004\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\y.fi000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\001\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\002\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000#\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\335\\\\221\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000*\\\\000*\\\\000*\\\\005\\\\022\\\\003\\\\n\\\\001\\\\013\\\"\\n  }\\n}\\n:$\\032\\\"mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1",
    "content": "config {\n  name: \"envoy.filters.network.ratelimit\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.ratelimit.v3.RateLimit\"\n    value: \"\\nP\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\022Y\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\032W\\nU\\n\\001[\\022P\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\\"\\005\\020\\200\\200\\214\\001(\\0012e\\022c\\022Y\\n\\010\\001\\000\\000\\000\\000\\000\\000\\002\\\"M\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\032\\006\\010\\200\\200\\204\\360\\002\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 7299840\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1",
    "content": "config {\n  name: \"envoy.filters.network.rbac\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC\"\n    value: \"\\032\\010\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n  }\n}\nactions {\n  on_data {\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\n\\001N\\032\\032\\n\\005\\020\\200\\200\\200\\030\\030\\001 \\377\\377\\377\\337\\017*\\005\\020\\200\\200\\200\\0302\\000@\\001*\\010\\n\\006\\032\\004\\001\\000\\000\\010\"\n  }\n}\nactions {\n  on_new_connection {\n\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1_auth_no_pwd_set",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\n\\001N\\032\\032\\n\\005\\020\\200\\200\\200\\030\\030\\001 \\377\\377\\377\\337\\017*\\005\\020\\200\\200\\200\\0302\\000@\\001*\\010\\n\\006\\032\\004\\001\\000\\000\\010\"\n  }\n}\nactions {\n  on_new_connection {\n\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x2a\\x32\\xd\\xa\\x24\\x34\\xd\\xa\\x61\\x75\\x74\\x68\\xd\\xa\\x24\\x31\\x32\\xd\\xa\\x73\\x6f\\x6d\\x65\\x70\\x61\\x73\\x73\\x77\\x6f\\x72\\x64\\xd\\xa\"\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1_auth_pwd_set",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\xa\\x61\\x64\\x6d\\x69\\x6e\\x3a\\xa\\x20\\x20\\x61\\x63\\x63\\x65\\x73\\x73\\x5f\\x6c\\x6f\\x67\\x5f\\x70\\x61\\x74\\x68\\x3a\\x20\\x2f\\x64\\x65\\x76\\x2f\\x6e\\x75\\x6c\\x6c\\xa\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x73\\x6f\\x63\\x6b\\x65\\x74\\x5f\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\x20\\x31\\x32\\x37\\x2e\\x30\\x2e\\x30\\x2e\\x31\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x70\\x6f\\x72\\x74\\x5f\\x76\\x61\\x6c\\x75\\x65\\x3a\\x20\\x30\\xa\\x73\\x74\\x61\\x74\\x69\\x63\\x5f\\x72\\x65\\x73\\x6f\\x75\\x72\\x63\\x65\\x73\\x3a\\xa\\x20\\x20\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x2d\\x20\\x6e\\x61\\x6d\\x65\\x3a\\x20\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\\x5f\\x30\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x74\\x79\\x70\\x65\\x3a\\x20\\x53\\x54\\x41\\x54\\x49\\x43\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x6c\\x62\\x5f\\x70\\x6f\\x6c\\x69\\x63\\x79\\x3a\\x20\\x52\\x41\\x4e\\x44\\x4f\\x4d\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x6c\\x6f\\x61\\x64\\x5f\\x61\\x73\\x73\\x69\\x67\\x6e\\x6d\\x65\\x6e\\x74\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\\x5f\\x6e\\x61\\x6d\\x65\\x3a\\x20\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\\x5f\\x30\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x65\\x6e\\x64\\x70\\x6f\\x69\\x6e\\x74\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x2d\\x20\\x6c\\x62\\x5f\\x65\\x6e\\x64\\x70\\x6f\\x69\\x6e\\x74\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x2d\\x20\\x65\\x6e\\x64\\x70\\x6f\\x69\\x6e\\x74\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x73\\x6f\\x63\\x6b\\x65\\x74\\x5f\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\x20\\x31\\x32\\x37\\x2e\\x30\\x2e\\x30\\x2e\\x31\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x70\\x6f\\x72\\x74\\x5f\\x76\\x61\\x6c\\x75\\x65\\x3a\\x20\\x30\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x2d\\x20\\x65\\x6e\\x64\\x70\\x6f\\x69\\x6e\\x74\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x73\\x6f\\x63\\x6b\\x65\\x74\\x5f\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\x20\\x31\\x32\\x37\\x2e\\x30\\x2e\\x30\\x2e\\x31\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x70\\x6f\\x72\\x74\\x5f\\x76\\x61\\x6c\\x75\\x65\\x3a\\x20\\x30\\xa\\x20\\x20\\x6c\\x69\\x73\\x74\\x65\\x6e\\x65\\x72\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x6e\\x61\\x6d\\x65\\x3a\\x20\\x6c\\x69\\x73\\x74\\x65\\x6e\\x65\\x72\\x5f\\x30\\xa\\x20\\x20\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x73\\x6f\\x63\\x6b\\x65\\x74\\x5f\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x61\\x64\\x64\\x72\\x65\\x73\\x73\\x3a\\x20\\x31\\x32\\x37\\x2e\\x30\\x2e\\x30\\x2e\\x31\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x70\\x6f\\x72\\x74\\x5f\\x76\\x61\\x6c\\x75\\x65\\x3a\\x20\\x30\\xa\\x20\\x20\\x20\\x20\\x66\\x69\\x6c\\x74\\x65\\x72\\x5f\\x63\\x68\\x61\\x69\\x6e\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x66\\x69\\x6c\\x74\\x65\\x72\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x6e\\x61\\x6d\\x65\\x3a\\x20\\x72\\x65\\x64\\x69\\x73\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x74\\x79\\x70\\x65\\x64\\x5f\\x63\\x6f\\x6e\\x66\\x69\\x67\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x40\\x74\\x79\\x70\\x65\\x22\\x3a\\x20\\x74\\x79\\x70\\x65\\x2e\\x67\\x6f\\x6f\\x67\\x6c\\x65\\x61\\x70\\x69\\x73\\x2e\\x63\\x6f\\x6d\\x2f\\x65\\x6e\\x76\\x6f\\x79\\x2e\\x63\\x6f\\x6e\\x66\\x69\\x67\\x2e\\x66\\x69\\x6c\\x74\\x65\\x72\\x2e\\x6e\\x65\\x74\\x77\\x6f\\x72\\x6b\\x2e\\x72\\x65\\x64\\x69\\x73\\x5f\\x70\\x72\\x6f\\x78\\x79\\x2e\\x76\\x32\\x2e\\x52\\x65\\x64\\x69\\x73\\x50\\x72\\x6f\\x78\\x79\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x73\\x74\\x61\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\\x3a\\x20\\x72\\x65\\x64\\x69\\x73\\x5f\\x73\\x74\\x61\\x74\\x73\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x70\\x72\\x65\\x66\\x69\\x78\\x5f\\x72\\x6f\\x75\\x74\\x65\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x63\\x61\\x74\\x63\\x68\\x5f\\x61\\x6c\\x6c\\x5f\\x72\\x6f\\x75\\x74\\x65\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\\x3a\\x20\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\\x5f\\x30\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x73\\x65\\x74\\x74\\x69\\x6e\\x67\\x73\\x3a\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x6f\\x70\\x5f\\x74\\x69\\x6d\\x65\\x6f\\x75\\x74\\x3a\\x20\\x35\\x73\\xa\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x64\\x6f\\x77\\x6e\\x73\\x74\\x72\\x65\\x61\\x6d\\x5f\\x61\\x75\\x74\\x68\\x5f\\x70\\x61\\x73\\x73\\x77\\x6f\\x72\\x64\\x3a\\x20\\x7b\\x20\\x69\\x6e\\x6c\\x69\\x6e\\x65\\x5f\\x73\\x74\\x72\\x69\\x6e\\x67\\x3a\\x20\\x73\\x6f\\x6d\\x65\\x70\\x61\\x73\\x73\\x77\\x6f\\x72\\x64\\x20\\x7d\\xa\"\n  }\n}\nactions {\n  on_new_connection {\n\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x2a\\x32\\xd\\xa\\x24\\x34\\xd\\xa\\x61\\x75\\x74\\x68\\xd\\xa\\x24\\x31\\x32\\xd\\xa\\x73\\x6f\\x6d\\x65\\x70\\x61\\x73\\x73\\x77\\x6f\\x72\\x64\\xd\\xa\"\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1_bulk_string",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\n\\001N\\032\\032\\n\\005\\020\\200\\200\\200\\030\\030\\001 \\377\\377\\377\\337\\017*\\005\\020\\200\\200\\200\\0302\\000@\\001*\\010\\n\\006\\032\\004\\001\\000\\000\\010\"\n  }\n}\nactions {\n  on_new_connection {\n\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x24\\x31\\x31\\xd\\xa\\x62\\x75\\x6c\\x6b\\x20\\x73\\x74\\x72\\x69\\x6e\\x67\\xd\\xa\"\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1_negative_large_integer",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\n\\001N\\032\\032\\n\\005\\020\\200\\200\\200\\030\\030\\001 \\377\\377\\377\\337\\017*\\005\\020\\200\\200\\200\\0302\\000@\\001*\\010\\n\\006\\032\\004\\001\\000\\000\\010\"\n  }\n}\nactions {\n  on_new_connection {\n\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x2a\\x32\\xd\\xa\\x2a\\x33\\xd\\xa\\x24\\x35\\xd\\xa\\x68\\x65\\x6c\\x6c\\x6f\\xd\\xa\\x3a\\x30\\xd\\xa\\x24\\x2d\\x31\\xd\\xa\\x24\\x35\\xd\\xa\\x77\\x6f\\x72\\x6c\\x64\\xd\\xa\"\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1_nested_array",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\n\\001N\\032\\032\\n\\005\\020\\200\\200\\200\\030\\030\\001 \\377\\377\\377\\337\\017*\\005\\020\\200\\200\\200\\0302\\000@\\001*\\010\\n\\006\\032\\004\\001\\000\\000\\010\"\n  }\n}\nactions {\n  on_new_connection {\n\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x2a\\x32\\xd\\xa\\x2a\\x33\\xd\\xa\\x24\\x35\\xd\\xa\\x68\\x65\\x6c\\x6c\\x6f\\xd\\xa\\x3a\\x30\\xd\\xa\\x24\\x2d\\x31\\xd\\xa\\x24\\x35\\xd\\xa\\x77\\x6f\\x72\\x6c\\x64\\xd\\xa\"\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1_null",
    "content": "config {\n  name: \"envoy.filters.network.redis_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\"\n    value: \"\\n\\001N\\032\\032\\n\\005\\020\\200\\200\\200\\030\\030\\001 \\377\\377\\377\\337\\017*\\005\\020\\200\\200\\200\\0302\\000@\\001*\\010\\n\\006\\032\\004\\001\\000\\000\\010\"\n  }\n}\nactions {\n  on_new_connection {\n\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x24\\x2d\\x31\\xd\\xa\"\n  }\n}\nactions {\n  on_data {\n    data: \"0\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\n \\022\\034\\n\\032__________________________ \\001 \\001\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_end_stream",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\xa5\\x0\\x0\\x0\\xa1\\x7b\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x33\\x35\\x2c\\x22\\x65\\x78\\x74\\x46\\x69\\x65\\x6c\\x64\\x73\\x22\\x3a\\x7b\\x22\\x63\\x6f\\x6e\\x73\\x75\\x6d\\x65\\x72\\x47\\x72\\x6f\\x75\\x70\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x63\\x67\\x22\\x2c\\x22\\x63\\x6c\\x69\\x65\\x6e\\x74\\x49\\x44\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x63\\x6c\\x69\\x65\\x6e\\x74\\x5f\\x69\\x64\\x22\\x7d\\x2c\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x37\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x2c\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x7d\"\n    end_stream: yes\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n    end_stream: yes\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_invalid_header",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1c\\x1\\x0\\x0\\x14\\x72\\x61\\x6e\\x64\\x6f\\x6d\\x20\\x74\\x65\\x78\\x74\\x20\\x73\\x75\\x66\\x66\\x69\\x63\\x65\\x73\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_on_ack_msg",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\xcf\\x0\\x0\\x0\\xcb\\x7b\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x35\\x31\\x2c\\x22\\x65\\x78\\x74\\x46\\x69\\x65\\x6c\\x64\\x73\\x22\\x3a\\x7b\\x22\\x71\\x75\\x65\\x75\\x65\\x49\\x64\\x22\\x3a\\x31\\x2c\\x22\\x74\\x6f\\x70\\x69\\x63\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x22\\x2c\\x22\\x6f\\x66\\x66\\x73\\x65\\x74\\x22\\x3a\\x31\\x2c\\x22\\x63\\x6f\\x6e\\x73\\x75\\x6d\\x65\\x72\\x47\\x72\\x6f\\x75\\x70\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x63\\x67\\x22\\x2c\\x22\\x65\\x78\\x74\\x72\\x61\\x49\\x6e\\x66\\x6f\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x65\\x78\\x74\\x72\\x61\\x22\\x7d\\x2c\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x31\\x38\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x7d\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_on_get_topic_route",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x86\\x0\\x0\\x0\\x82\\x7b\\x22\\x65\\x78\\x74\\x46\\x69\\x65\\x6c\\x64\\x73\\x22\\x3a\\x7b\\x22\\x74\\x6f\\x70\\x69\\x63\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x22\\x7d\\x2c\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x31\\x33\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x2c\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x31\\x30\\x35\\x7d\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_on_heartbeat",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x4\\x7a\\x0\\x0\\x0\\x5d\\x7b\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x31\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x2c\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x33\\x34\\x7d\\xa\\x20\\x20\\x20\\x20\\x7b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6c\\x69\\x65\\x6e\\x74\\x49\\x44\\x22\\x3a\\x20\\x22\\x31\\x32\\x37\\x2e\\x30\\x2e\\x30\\x2e\\x31\\x40\\x39\\x30\\x33\\x33\\x30\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6f\\x6e\\x73\\x75\\x6d\\x65\\x72\\x44\\x61\\x74\\x61\\x53\\x65\\x74\\x22\\x3a\\x20\\x5b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6f\\x6e\\x73\\x75\\x6d\\x65\\x46\\x72\\x6f\\x6d\\x57\\x68\\x65\\x72\\x65\\x22\\x3a\\x20\\x22\\x43\\x4f\\x4e\\x53\\x55\\x4d\\x45\\x5f\\x46\\x52\\x4f\\x4d\\x5f\\x46\\x49\\x52\\x53\\x54\\x5f\\x4f\\x46\\x46\\x53\\x45\\x54\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6f\\x6e\\x73\\x75\\x6d\\x65\\x54\\x79\\x70\\x65\\x22\\x3a\\x20\\x22\\x43\\x4f\\x4e\\x53\\x55\\x4d\\x45\\x5f\\x50\\x41\\x53\\x53\\x49\\x56\\x45\\x4c\\x59\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x67\\x72\\x6f\\x75\\x70\\x4e\\x61\\x6d\\x65\\x22\\x3a\\x20\\x22\\x74\\x65\\x73\\x74\\x5f\\x63\\x67\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x6d\\x65\\x73\\x73\\x61\\x67\\x65\\x4d\\x6f\\x64\\x65\\x6c\\x22\\x3a\\x20\\x22\\x43\\x4c\\x55\\x53\\x54\\x45\\x52\\x49\\x4e\\x47\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x73\\x75\\x62\\x73\\x63\\x72\\x69\\x70\\x74\\x69\\x6f\\x6e\\x44\\x61\\x74\\x61\\x53\\x65\\x74\\x22\\x3a\\x20\\x5b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6c\\x61\\x73\\x73\\x46\\x69\\x6c\\x74\\x65\\x72\\x4d\\x6f\\x64\\x65\\x22\\x3a\\x20\\x66\\x61\\x6c\\x73\\x65\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6f\\x64\\x65\\x53\\x65\\x74\\x22\\x3a\\x20\\x5b\\x5d\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x65\\x78\\x70\\x72\\x65\\x73\\x73\\x69\\x6f\\x6e\\x54\\x79\\x70\\x65\\x22\\x3a\\x20\\x22\\x54\\x41\\x47\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x73\\x75\\x62\\x53\\x74\\x72\\x69\\x6e\\x67\\x22\\x3a\\x20\\x22\\x2a\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x73\\x75\\x62\\x56\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x20\\x31\\x35\\x37\\x35\\x36\\x33\\x30\\x35\\x38\\x37\\x39\\x32\\x35\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x74\\x61\\x67\\x73\\x53\\x65\\x74\\x22\\x3a\\x20\\x5b\\x5d\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x74\\x6f\\x70\\x69\\x63\\x22\\x3a\\x20\\x22\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x22\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7d\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6c\\x61\\x73\\x73\\x46\\x69\\x6c\\x74\\x65\\x72\\x4d\\x6f\\x64\\x65\\x22\\x3a\\x20\\x66\\x61\\x6c\\x73\\x65\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x63\\x6f\\x64\\x65\\x53\\x65\\x74\\x22\\x3a\\x20\\x5b\\x5d\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x65\\x78\\x70\\x72\\x65\\x73\\x73\\x69\\x6f\\x6e\\x54\\x79\\x70\\x65\\x22\\x3a\\x20\\x22\\x54\\x41\\x47\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x73\\x75\\x62\\x53\\x74\\x72\\x69\\x6e\\x67\\x22\\x3a\\x20\\x22\\x2a\\x22\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x73\\x75\\x62\\x56\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x20\\x31\\x35\\x37\\x35\\x36\\x33\\x30\\x35\\x38\\x37\\x39\\x34\\x35\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x74\\x61\\x67\\x73\\x53\\x65\\x74\\x22\\x3a\\x20\\x5b\\x5d\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x74\\x6f\\x70\\x69\\x63\\x22\\x3a\\x20\\x22\\x25\\x52\\x45\\x54\\x52\\x59\\x25\\x70\\x6c\\x65\\x61\\x73\\x65\\x5f\\x72\\x65\\x6e\\x61\\x6d\\x65\\x5f\\x75\\x6e\\x69\\x71\\x75\\x65\\x5f\\x67\\x72\\x6f\\x75\\x70\\x5f\\x6e\\x61\\x6d\\x65\\x5f\\x34\\x22\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7d\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x5d\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x75\\x6e\\x69\\x74\\x4d\\x6f\\x64\\x65\\x22\\x3a\\x20\\x66\\x61\\x6c\\x73\\x65\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7d\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x5d\\x2c\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x70\\x72\\x6f\\x64\\x75\\x63\\x65\\x72\\x44\\x61\\x74\\x61\\x53\\x65\\x74\\x22\\x3a\\x20\\x5b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7b\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x22\\x67\\x72\\x6f\\x75\\x70\\x4e\\x61\\x6d\\x65\\x22\\x3a\\x20\\x22\\x43\\x4c\\x49\\x45\\x4e\\x54\\x5f\\x49\\x4e\\x4e\\x45\\x52\\x5f\\x50\\x52\\x4f\\x44\\x55\\x43\\x45\\x52\\x22\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x7d\\xa\\x20\\x20\\x20\\x20\\x20\\x20\\x5d\\xa\\x20\\x20\\x20\\x20\\x7d\\xa\\x20\\x20\\x20\\x20\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_on_pop_msg",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\xfd\\x0\\x0\\x0\\xf9\\x7b\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x31\\x37\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x2c\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x35\\x30\\x2c\\x22\\x65\\x78\\x74\\x46\\x69\\x65\\x6c\\x64\\x73\\x22\\x3a\\x7b\\x22\\x62\\x6f\\x72\\x6e\\x54\\x69\\x6d\\x65\\x22\\x3a\\x31\\x30\\x30\\x30\\x2c\\x22\\x71\\x75\\x65\\x75\\x65\\x49\\x64\\x22\\x3a\\x31\\x2c\\x22\\x74\\x6f\\x70\\x69\\x63\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x22\\x2c\\x22\\x69\\x6e\\x76\\x69\\x73\\x69\\x62\\x6c\\x65\\x54\\x69\\x6d\\x65\\x22\\x3a\\x36\\x30\\x30\\x30\\x2c\\x22\\x63\\x6f\\x6e\\x73\\x75\\x6d\\x65\\x72\\x47\\x72\\x6f\\x75\\x70\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x63\\x67\\x22\\x2c\\x22\\x70\\x6f\\x6c\\x6c\\x54\\x69\\x6d\\x65\\x22\\x3a\\x33\\x30\\x30\\x30\\x2c\\x22\\x6d\\x61\\x78\\x4d\\x73\\x67\\x4e\\x75\\x6d\\x73\\x22\\x3a\\x33\\x32\\x2c\\x22\\x69\\x6e\\x69\\x74\\x4d\\x6f\\x64\\x65\\x22\\x3a\\x34\\x7d\\x7d\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_sendmsg",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x1\\x9\\x0\\x0\\x0\\xf4\\x7b\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x31\\x30\\x2c\\x22\\x65\\x78\\x74\\x46\\x69\\x65\\x6c\\x64\\x73\\x22\\x3a\\x7b\\x22\\x73\\x79\\x73\\x46\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x62\\x6f\\x72\\x6e\\x54\\x69\\x6d\\x65\\x73\\x74\\x61\\x6d\\x70\\x22\\x3a\\x30\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x71\\x75\\x65\\x75\\x65\\x49\\x64\\x22\\x3a\\x2d\\x31\\x2c\\x22\\x74\\x6f\\x70\\x69\\x63\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x22\\x2c\\x22\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x54\\x6f\\x70\\x69\\x63\\x22\\x3a\\x22\\x22\\x2c\\x22\\x70\\x72\\x6f\\x64\\x75\\x63\\x65\\x72\\x47\\x72\\x6f\\x75\\x70\\x22\\x3a\\x22\\x22\\x2c\\x22\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x54\\x6f\\x70\\x69\\x63\\x51\\x75\\x65\\x75\\x65\\x4e\\x75\\x6d\\x73\\x22\\x3a\\x30\\x7d\\x2c\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x32\\x30\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x2c\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x7d\\x5f\\x41\\x70\\x61\\x63\\x68\\x65\\x5f\\x52\\x6f\\x63\\x6b\\x65\\x74\\x4d\\x51\\x5f\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_sendmsg2",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\xc7\\x0\\x0\\x0\\xb2\\x7b\\x22\\x65\\x78\\x74\\x46\\x69\\x65\\x6c\\x64\\x73\\x22\\x3a\\x7b\\x22\\x61\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x70\\x67\\x22\\x2c\\x22\\x62\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x22\\x2c\\x22\\x63\\x22\\x3a\\x22\\x22\\x2c\\x22\\x64\\x22\\x3a\\x30\\x2c\\x22\\x65\\x22\\x3a\\x2d\\x31\\x2c\\x22\\x66\\x22\\x3a\\x30\\x2c\\x22\\x67\\x22\\x3a\\x30\\x2c\\x22\\x68\\x22\\x3a\\x30\\x7d\\x2c\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x32\\x31\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x2c\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x33\\x31\\x30\\x7d\\x5f\\x41\\x70\\x61\\x63\\x68\\x65\\x5f\\x52\\x6f\\x63\\x6b\\x65\\x74\\x4d\\x51\\x5f\"\n    end_stream: false\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_unregistered_client",
    "content": "config {\n  name: \"envoy.filters.network.rocketmq_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x12\\x31\\xa\\xd\\x64\\x65\\x66\\x61\\x75\\x6c\\x74\\x5f\\x72\\x6f\\x75\\x74\\x65\\x12\\x20\\xa\\xe\\xa\\xc\\xa\\xa\\x74\\x65\\x73\\x74\\x5f\\x74\\x6f\\x70\\x69\\x63\\x12\\xe\\xa\\xc\\x66\\x61\\x6b\\x65\\x5f\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\xa5\\x0\\x0\\x0\\xa1\\x7b\\x22\\x65\\x78\\x74\\x46\\x69\\x65\\x6c\\x64\\x73\\x22\\x3a\\x7b\\x22\\x63\\x6c\\x69\\x65\\x6e\\x74\\x49\\x44\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x63\\x6c\\x69\\x65\\x6e\\x74\\x5f\\x69\\x64\\x22\\x2c\\x22\\x63\\x6f\\x6e\\x73\\x75\\x6d\\x65\\x72\\x47\\x72\\x6f\\x75\\x70\\x22\\x3a\\x22\\x74\\x65\\x73\\x74\\x5f\\x63\\x67\\x22\\x7d\\x2c\\x22\\x6f\\x70\\x61\\x71\\x75\\x65\\x22\\x3a\\x37\\x2c\\x22\\x73\\x65\\x72\\x69\\x61\\x6c\\x69\\x7a\\x65\\x54\\x79\\x70\\x65\\x43\\x75\\x72\\x72\\x65\\x6e\\x74\\x52\\x50\\x43\\x22\\x3a\\x22\\x4a\\x53\\x4f\\x4e\\x22\\x2c\\x22\\x76\\x65\\x72\\x73\\x69\\x6f\\x6e\\x22\\x3a\\x30\\x2c\\x22\\x6c\\x61\\x6e\\x67\\x75\\x61\\x67\\x65\\x22\\x3a\\x22\\x43\\x50\\x50\\x22\\x2c\\x22\\x66\\x6c\\x61\\x67\\x22\\x3a\\x30\\x2c\\x22\\x63\\x6f\\x64\\x65\\x22\\x3a\\x33\\x35\\x7d\"\n    end_stream: false\n  }\n}\n\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1",
    "content": "config {\n  name: \"envoy.filters.network.sni_cluster\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 268435\n  }\n}\nactions {\n  on_data {\n    data: \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee\"\n  }\n}\nactions {\n  on_data {\n    data: \"IIIIIIIIIIIIIIIIIIII\\000\\000\\000\\000\\000\\000\\000;IIIIIIIIIIIIIIIIIIIIIIIIIIIIII\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 16384\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 13\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2",
    "content": "config {\n  name: \"envoy.filters.network.sni_cluster\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 268435\n  }\n}\nactions {\n  on_data {\n    data: \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 1677721\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.sni_dynamic_forward_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig\"\n    value: \"\\nP\\nFenvoy.network.sni_dynamic_fo.filters.network.sni_dynamic_forward_proxy*\\006\\010\\200\\200\\200\\260\\002\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 30976\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 262144\n  }\n}\nactions {\n  on_data {\n    data: \"\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030c.googlers.com\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030c.googlers.com\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030c.googlers.com\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\nYtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.vLLLLLLLLL3.ThriftProxy\\020\\003\\030\\003\\\"\\231\\002\\022\\226\\002\\n\\003\\n\\001A\\022\\216\\002\\032\\201\\002\\n\\361\\001\\n\\010@\\000\\000\\000\\000\\000\\000\\000\\022\\344\\001\\nc\\n_*]\\032[\\nPtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\\022\\007\\020\\002\\\"\\003\\n\\001A\\022\\000\\n}\\nyenvoy.filters.network.thrift_prox\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177y\\022\\000\\n\\013\\n\\000\\022\\007\\n\\005\\n\\001#\\022\\0002\\010A\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\0\\0\\0\\144\\17\\377\\0\\0\\0\\0\\0\\1\\0\\1\\0\\2\\1\\2\\0\\0\"\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\nz\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 10\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n  }\n}\nactions {\n  on_data {\n    data: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.Thrif~tProxy\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_app_exception",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x64\\xf\\xff\\x0\\x0\\x0\\x0\\x0\\x1\\x0\\x1\\x0\\x2\\x1\\x2\\x0\\x0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\nYtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.vLLLLLLLLL3.ThriftProxy\\020\\003\\030\\003\\\"\\231\\002\\022\\226\\002\\n\\003\\n\\001A\\022\\216\\002\\032\\201\\002\\n\\361\\001\\n\\010@\\000\\000\\000\\000\\000\\000\\000\\022\\344\\001\\nc\\n_*]\\032[\\nPtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\\022\\007\\020\\002\\\"\\003\\n\\001A\\022\\000\\n}\\nyenvoy.filters.network.thrift_prox\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177y\\022\\000\\n\\013\\n\\000\\022\\007\\n\\005\\n\\001#\\022\\0002\\010A\\000\\000\\000\\000\\000\\000\\000\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_garbage_request",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_invalid_msg_type",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x80\\x1\\x0\\x1\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\x1\\x8\\xff\\xff\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_on_data_handles_oneway",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1d\\x80\\x1\\x0\\x4\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\xf\\xb\\x0\\x0\\x0\\x0\\x0\\x5\\x66\\x69\\x65\\x6c\\x64\\x0\"\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_on_data_handles_thriftcall",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1d\\x80\\x1\\x0\\x1\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\xf\\xb\\x0\\x0\\x0\\x0\\x0\\x5\\x66\\x69\\x65\\x6c\\x64\\x0\"\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_pipelined_request1",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1d\\x80\\x1\\x0\\x1\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\x1\\xb\\x0\\x0\\x0\\x0\\x0\\x5\\x66\\x69\\x65\\x6c\\x64\\x0\\x0\\x0\\x0\\x1d\\x80\\x1\\x0\\x1\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\x2\\xb\\x0\\x0\\x0\\x0\\x0\\x5\\x66\\x69\\x65\\x6c\\x64\\x0\"\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_protocol_error",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1d\\x80\\x1\\x0\\xff\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\x1\\x0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_router_test",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x10\\x1\\x18\\x1\\x22\\x1d\\xa\\x6\\x72\\x6f\\x75\\x74\\x65\\x73\\x12\\x13\\xa\\x6\\xa\\x4\\x6e\\x61\\x6d\\x65\\x12\\x9\\xa\\x7\\x63\\x6c\\x75\\x73\\x74\\x65\\x72\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1d\\x80\\x1\\x0\\x4\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\xf\\xb\\x0\\x0\\x0\\x0\\x0\\x5\\x66\\x69\\x65\\x6c\\x64\\x0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_stop_and_resume",
    "content": "config {\n  name: \"envoy.filters.network.thrift_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\"\n  }\n}\n\nactions {\n  on_new_connection {\n  }\n}\n\n\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1d\\x80\\x1\\x0\\x4\\x0\\x0\\x0\\x4\\x6e\\x61\\x6d\\x65\\x0\\x0\\x0\\xf\\xb\\x0\\x0\\x0\\x0\\x0\\x5\\x66\\x69\\x65\\x6c\\x64\\x0\"\n  }\n}\n\nactions {\n  on_data {\n    data: \"\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\\032\\000\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 8257536\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 8257536\n  }\n}\nactions {\n  on_data {\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 83886080\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_auth",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x28\\xff\\xff\\xff\\xfc\\x0\\x0\\x0\\x64\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x6\\x64\\x69\\x67\\x65\\x73\\x74\\x0\\x0\\x0\\x6\\x70\\x40\\x73\\x73\\x77\\x64\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_connect",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1c\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x64\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xc8\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_multirequest",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x7c\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\xe\\x0\\x0\\x0\\x1\\x0\\xff\\xff\\xff\\xff\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x1\\x31\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x1\\x0\\xff\\xff\\xff\\xff\\x0\\x0\\x0\\x4\\x2f\\x62\\x61\\x72\\x0\\x0\\x0\\x1\\x31\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xd\\x0\\xff\\xff\\xff\\xff\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x64\\x0\\x0\\x0\\x5\\x0\\xff\\xff\\xff\\xff\\x0\\x0\\x0\\x4\\x2f\\x62\\x61\\x72\\x0\\x0\\x0\\x1\\x32\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x1\\xff\\xff\\xff\\xff\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_request_container",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x13\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x3\\x62\\x61\\x72\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x4\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_request_ephemeral",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x3\\x62\\x61\\x72\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x1\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_request_persistent",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x3\\x62\\x61\\x72\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_request_persistent_ephemeral_sequential",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x3\\x62\\x61\\x72\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x3\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_request_persistent_sequential",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x3\\x62\\x61\\x72\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x1\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_request_ttl",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x15\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x3\\x62\\x61\\x72\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x5\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_request_ttl_sequential",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x1f\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x15\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x3\\x62\\x61\\x72\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x6\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_watch_request",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\x0\\x0\\x0\\x48\\xff\\xff\\xff\\xf8\\x0\\x0\\x0\\x65\\x0\\x0\\x0\\x2\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\\x0\\x0\\x0\\x4\\x2f\\x62\\x61\\x72\\x0\\x0\\x0\\x2\\x0\\x0\\x0\\x5\\x2f\\x66\\x6f\\x6f\\x31\\x0\\x0\\x0\\x5\\x2f\\x62\\x61\\x72\\x31\\x0\\x0\\x0\\x2\\x0\\x0\\x0\\x5\\x2f\\x66\\x6f\\x6f\\x32\\x0\\x0\\x0\\x5\\x2f\\x62\\x61\\x72\\x32\"\n  }\n}\nactions {\n  on_data {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.network;\nimport \"google/protobuf/empty.proto\";\nimport \"validate/validate.proto\";\nimport \"envoy/config/listener/v3/listener_components.proto\";\n\nmessage OnData {\n  bytes data = 1;\n  bool end_stream = 2;\n}\n\nmessage AdvanceTime {\n  // Advance the system time by (0,24] hours.\n  uint32 milliseconds = 1 [(validate.rules).uint32 = {gt: 0 lt: 86400000}];\n}\n\nmessage Action {\n  oneof action_selector {\n    option (validate.required) = true;\n    // Call onNewConnection()\n    google.protobuf.Empty on_new_connection = 1;\n    // Call onData()\n    OnData on_data = 2;\n    // Advance time_source_\n    AdvanceTime advance_time = 3;\n  }\n}\n\nmessage FilterFuzzTestCase {\n  // This is actually a protobuf type for the config of network filters.\n  envoy.config.listener.v3.Filter config = 1;\n  repeated Action actions = 2;\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc",
    "content": "#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.pb.validate.h\"\n#include \"test/extensions/filters/network/common/fuzz/uber_readfilter.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nDEFINE_PROTO_FUZZER(const test::extensions::filters::network::FilterFuzzTestCase& input) {\n  ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = {\n      [](test::extensions::filters::network::FilterFuzzTestCase* input, unsigned int seed) {\n        // This post-processor mutation is applied only when libprotobuf-mutator\n        // calls mutate on an input, and *not* during fuzz target execution.\n        // Replaying a corpus through the fuzzer will not be affected by the\n        // post-processor mutation.\n\n        // TODO(jianwendong): After extending to cover all the filters, we can use\n        // `Registry::FactoryRegistry<\n        // Server::Configuration::NamedNetworkFilterConfigFactory>::registeredNames()`\n        // to get all the filter names instead of calling `UberFilterFuzzer::filter_names()`.\n        static const auto filter_names = UberFilterFuzzer::filterNames();\n        static const auto factories = Registry::FactoryRegistry<\n            Server::Configuration::NamedNetworkFilterConfigFactory>::factories();\n        // Choose a valid filter name.\n        if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) ==\n            std::end(filter_names)) {\n          absl::string_view filter_name = filter_names[seed % filter_names.size()];\n          input->mutable_config()->set_name(std::string(filter_name));\n        }\n        // Set the corresponding type_url for Any.\n        auto& factory = factories.at(input->config().name());\n        input->mutable_config()->mutable_typed_config()->set_type_url(\n            absl::StrCat(\"type.googleapis.com/\",\n                         factory->createEmptyConfigProto()->GetDescriptor()->full_name()));\n      }};\n\n  try {\n    TestUtility::validate(input);\n    // Check the filter's name in case some filters are not supported yet.\n    static const auto filter_names = UberFilterFuzzer::filterNames();\n    // TODO(jianwendong): remove this if block after covering all the filters.\n    if (std::find(filter_names.begin(), filter_names.end(), input.config().name()) ==\n        std::end(filter_names)) {\n      ENVOY_LOG_MISC(debug, \"Test case with unsupported filter type: {}\", input.config().name());\n      return;\n    }\n    static UberFilterFuzzer fuzzer;\n    fuzzer.fuzz(input.config(), input.actions());\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n  }\n}\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict",
    "content": "# The names of supported thrift_filters in ThriftProxy \n\"envoy.filters.thrift.router\"\n\"envoy.filters.thrift.rate_limit\"\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n}\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 268435\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\315\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\\312\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\n\\002\\315\\265\"\n  }\n}\nactions {\n  on_write {\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\020\\000\\000\\000\"\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"p\"\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n  }\n}\nactions {\n  on_write {\n    data: \"-\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_process_response",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n}\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x0\\x0\\x0\\x22\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x26\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x2\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x3\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x4\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x5\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x7\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4e\\x0\\x0\\x0\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2a\\x0\\x0\\x0\\x9\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\xb\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\xc\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x4a\\x0\\x0\\x0\\xd\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x52\\x0\\x0\\x0\\xe\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x52\\x0\\x0\\x0\\xf\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x58\\x0\\x0\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x58\\x0\\x0\\x0\\x11\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x58\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x58\\x0\\x0\\x0\\x13\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x5c\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x26\\x0\\x0\\x0\\x15\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x2a\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x17\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x2e\\x0\\x0\\x0\\x18\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x32\\x0\\x0\\x0\\x19\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x32\\x0\\x0\\x0\\x1a\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x44\\x0\\x0\\x0\\x1b\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x51\\x0\\x0\\x0\\x1c\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x59\\x0\\x0\\x0\\x1d\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x5d\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x5d\\x0\\x0\\x0\\x1f\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x65\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x65\\x0\\x0\\x0\\x21\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x69\\x0\\x0\\x0\\x22\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x71\\x0\\x0\\x0\\x23\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x88\\x0\\x0\\x0\\x24\\x0\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x2\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x2\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x18\\x0\\x0\\x0\\x25\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x18\\x0\\x0\\x0\\x26\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x18\\x0\\x0\\x0\\x27\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x18\\x0\\x0\\x0\\x28\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x2b\\x0\\x0\\x0\\x29\\x0\\x0\\x10\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x18\\x0\\x0\\x0\\x2a\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x18\\x0\\x0\\x0\\x2b\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x2b\\x0\\x0\\x0\\x2c\\x0\\x0\\x10\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x2d\\x0\\x10\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x2e\\x0\\x10\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x2f\\x0\\x10\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x30\\x0\\x10\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x31\\x0\\x10\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x32\\x0\\x10\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x33\\x0\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x34\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x35\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x36\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x29\\x0\\x0\\x0\\x37\\x0\\x0\\x10\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x1a\\x0\\x0\\x0\\x38\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1a\\x0\\x0\\x0\\x39\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1a\\x0\\x0\\x0\\x3a\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x3b\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x3c\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x3d\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x3e\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x3f\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x39\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x20\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x2a\\x0\\x0\\x0\\x41\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x2a\\x0\\x0\\x0\\x42\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x2c\\x0\\x0\\x0\\x43\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x10\\x0\\x0\\x0\\x30\\x0\\x0\\x0\\x44\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x10\\x0\\x0\\x0\\x30\\x0\\x0\\x0\\x45\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x10\\x0\\x0\\x0\\x34\\x0\\x0\\x0\\x46\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x10\\x0\\x0\\x0\\x4e\\x0\\x0\\x0\\x47\\x0\\x0\\x0\\x0\\x20\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x48\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x22\\x0\\x0\\x0\\x49\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x22\\x0\\x0\\x0\\x4a\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x2c\\x0\\x0\\x0\\x4b\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x4c\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x4d\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x3a\\x0\\x0\\x0\\x4e\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x3a\\x0\\x0\\x0\\x4f\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x3a\\x0\\x0\\x0\\x50\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x42\\x0\\x0\\x0\\x51\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x4e\\x0\\x0\\x0\\x52\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x5\\x0\\x1\\x2\\x3\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x53\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x54\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x55\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x56\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x57\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x6\\x0\\x0\\x0\\x58\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x59\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x5a\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x5b\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x32\\x0\\x0\\x0\\x5c\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\xe\\x0\\x0\\x0\\x5d\\x0\\x10\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x5e\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x5f\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x60\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x1b\\x0\\x0\\x0\\x61\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x5\\x0\\x1\\x2\\x3\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x56\\x0\\x0\\x0\\x62\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x5a\\x0\\x0\\x0\\x63\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x5a\\x0\\x0\\x0\\x64\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x5e\\x0\\x0\\x0\\x65\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x66\\x0\\x0\\x0\\x66\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x74\\x0\\x0\\x0\\x67\\x0\\x0\\x0\\x0\\x20\\x2\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x5\\x0\\x1\\x2\\x3\\x5\\x0\\x1\\x2\\x3\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x1a\\x0\\x0\\x0\\x68\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x69\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x6a\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x30\\x0\\x0\\x0\\x6b\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x6c\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x6d\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x10\\x0\\x0\\x0\\x6e\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x10\\x0\\x10\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x6f\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x10\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x70\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x10\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x27\\x0\\x0\\x0\\x71\\x0\\x10\\x2\\x0\\x10\\x0\\x10\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x72\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x1a\\x0\\x0\\x0\\x73\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x74\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x75\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x76\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x53\\x0\\x0\\x0\\x77\\x0\\x0\\x0\\x0\\x20\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x8\\x0\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\x78\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x79\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x7a\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x7b\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x29\\x0\\x0\\x0\\x7c\\x0\\x0\\x0\\x0\\x20\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x26\\x0\\x0\\x0\\x7d\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x26\\x0\\x0\\x0\\x7e\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x7f\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x80\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x81\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x22\\x0\\x0\\x0\\x82\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x26\\x0\\x0\\x0\\x83\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x2a\\x0\\x0\\x0\\x84\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x2a\\x0\\x0\\x0\\x85\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x86\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x87\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x88\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x89\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x8a\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x8b\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x26\\x0\\x0\\x0\\x8c\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x8d\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x8e\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x8f\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x35\\x0\\x0\\x0\\x90\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x91\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x92\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x93\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x3f\\x0\\x0\\x0\\x94\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x95\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x8\\x0\\x0\\x0\\x36\\x0\\x0\\x0\\x96\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x4b\\x0\\x0\\x0\\x97\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x8\\x0\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x0\\x0\\x4b\\x0\\x0\\x0\\x98\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x8\\x0\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x0\\x0\\x1f\\x0\\x0\\x0\\x99\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1f\\x0\\x0\\x0\\x9a\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x9b\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\x9c\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x3b\\x0\\x0\\x0\\x9d\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x3b\\x0\\x0\\x0\\x9e\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\x9f\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\xa0\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\xa1\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1e\\x0\\x0\\x0\\xa2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x42\\x0\\x0\\x0\\xa3\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x42\\x0\\x0\\x0\\xa4\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x48\\x0\\x0\\x0\\xa5\\x0\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x5\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\xa6\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\xa7\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\xa8\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x12\\x0\\x0\\x0\\xa9\\x0\\x10\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x5a\\x0\\x0\\x0\\xaa\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x5a\\x0\\x0\\x0\\xab\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x40\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x4\\x0\\x1\\x2\\x3\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\xac\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x16\\x0\\x0\\x0\\xad\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x0\\x0\\x0\\x29\\x0\\x0\\x0\\xae\\x0\\x0\\x0\\x0\\x20\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x10\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x26\\x0\\x0\\x0\\xaf\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x28\\x0\\x0\\x0\\xb0\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x42\\x0\\x0\\x0\\xb1\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x1f\\x0\\x0\\x0\\xb2\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x10\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x31\\x0\\x0\\x0\\xb3\\x0\\x0\\x0\\x0\\x20\\x2\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x8\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x49\\x0\\x0\\x0\\xb4\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x4f\\x0\\x0\\x0\\xb5\\x0\\x0\\x0\\x0\\x20\\x0\\x10\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x7\\x73\\x74\\x72\\x69\\x6e\\x67\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\x0\\x0\\x0\\x20\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x2\\xa\\x3\\x1\\x2\\x3\\x14\\x3\\x4\\x5\\x6\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\xb6\\x0\\x10\\x0\\x0\\x0\\x20\\x0\\x0\\x0\\x1\\x0\\x6\\x73\\x74\\x72\\x69\\x6e\\x67\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x20\\x0\\x10\"\n  }\n}\nactions {\n  on_write {\n    data: \"\"\n    end_stream: true\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_response1",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n}\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x7f\\xff\\xff\\xff\\x0\\x0\\x0\\x2a\\x80\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_write {\n    data: \"\"\n    end_stream: true\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_unknown_response",
    "content": "config {\n  name: \"envoy.filters.network.kafka_broker\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker\"\n    value: \"\\n}\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x0\\x0\\x0\\x8\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_write {\n    data: \"\"\n    end_stream: true\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.mongo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    value: \"\\n\\001\\\\\\032\\007\\\"\\003\\010\\200t*\\000 \\001\"\n  }\n}\nactions {\n  on_write {\n    data: \"]\\000\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\\001\\000\\000\\000\\000\\000\\000\\001\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"<\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_write {\n    data: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n  }\n}\nactions {\n  on_write {\n    data: \"pH\\037\\000 `\\000\\000\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_write {\n    data: \"=\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\004\\000\"\n    end_stream: true\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_response",
    "content": "config {\n  name: \"envoy.filters.network.mongo_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy\"\n    value: \"\\xa\\x4\\x74\\x65\\x73\\x74\\x1a\\x4\\x1a\\x2\\x8\\x1\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\120\\0\\0\\0\\1\\0\\0\\0\\1\\0\\0\\0\\324\\7\\0\\0\\4\\0\\0\\0\\164\\145\\163\\164\\56\\164\\145\\163\\164\\0\\24\\0\\0\\0\\377\\377\\377\\377\\52\\0\\0\\0\\2\\163\\164\\162\\151\\156\\147\\137\\156\\145\\145\\144\\137\\145\\163\\143\\0\\20\\0\\0\\0\\173\\42\\146\\157\\157\\42\\72\\40\\42\\142\\141\\162\\12\\42\\175\\0\\0\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\56\\0\\0\\0\\2\\0\\0\\0\\2\\0\\0\\0\\1\\0\\0\\0\\10\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\\24\\0\\0\\0\\2\\0\\0\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\"\n  }\n}\n\n\nactions {\n  on_write {\n    data: \"\\45\\0\\0\\0\\3\\0\\0\\0\\3\\0\\0\\0\\325\\7\\0\\0\\0\\0\\0\\0\\164\\145\\163\\164\\0\\24\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\43\\0\\0\\0\\4\\0\\0\\0\\4\\0\\0\\0\\322\\7\\0\\0\\10\\0\\0\\0\\164\\145\\163\\164\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\"\n  }\n}\n\n\n\n\nactions {\n  on_write {\n    data: \"\\50\\0\\0\\0\\5\\0\\0\\0\\5\\0\\0\\0\\327\\7\\0\\0\\0\\0\\0\\0\\2\\0\\0\\0\\40\\116\\0\\0\\0\\0\\0\\0\\100\\234\\0\\0\\0\\0\\0\\0\"\n  }\n}\n\n\n\nactions {\n  on_write {\n    data: \"\\120\\0\\0\\0\\17\\0\\0\\0\\31\\0\\0\\0\\332\\7\\0\\0\\124\\145\\163\\164\\40\\144\\141\\164\\141\\142\\141\\163\\145\\0\\124\\145\\163\\164\\40\\143\\157\\155\\155\\141\\156\\144\\40\\156\\141\\155\\145\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\\26\\0\\0\\0\\2\\167\\157\\162\\154\\144\\0\\6\\0\\0\\0\\150\\145\\154\\154\\157\\0\\0\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\60\\0\\0\\0\\20\\0\\0\\0\\32\\0\\0\\0\\333\\7\\0\\0\\5\\0\\0\\0\\0\\5\\0\\0\\0\\0\\26\\0\\0\\0\\2\\167\\157\\162\\154\\144\\0\\6\\0\\0\\0\\150\\145\\154\\154\\157\\0\\0\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.mysql_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy\"\n    value: \"\\n\\006#\\336\\215\\302\\246\\001\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\031\\031\\031\\031\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\031\\031\\031\\031\\031\\031\\031\\031\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"3\"\n  }\n}\nactions {\n  on_write {\n    data: \"#\"\n  }\n}\nactions {\n  on_write {\n    data: \"#\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"3\"\n  }\n}\nactions {\n  on_write {\n    data: \"#\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"#\"\n  }\n}\nactions {\n  on_write {\n    data: \"#\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\031\\031\\031\\031\\031\\031\\031\\031\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"3\"\n  }\n}\nactions {\n  on_write {\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"3\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_msg_split",
    "content": "config {\n  name: \"envoy.filters.network.mysql_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy\"\n    value: \"\\n\\006#\\336\\215\\302\\246\\001\"\n  }\n}\n {\n  on_write {\n    data: \"\\34\\0\\0\\0\\12\\65\\56\\60\\56\\65\\64\\0\\136\\0\\0\\0\\41\\100\\163\\141\\154\\164\\43\\44\\0\\1\\1\\41\\0\\2\\0\\2\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\57\\0\\0\\1\\0\\0\\0\\3\\1\\0\\0\\0\\41\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\165\\163\\145\\162\\61\\0\\160\\64\\44\\44\\167\\60\\162\\66\\0\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\2\\376\\1\\0\\0\\0\\1\\0\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\14\\0\\0\\3\\155\\171\\163\\161\\154\\137\\157\\160\\141\\161\\165\\145\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\4\\377\\1\\0\\0\\0\\1\\0\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\30\\0\\0\\0\\3\\103\\122\\105\\101\\124\\105\\40\\104\\101\\124\\101\\102\\101\\123\\105\\40\\155\\171\\163\\161\\154\\144\\142\"\n  }\n}\n\nactions {\n  on_write {\n    data: \"\\34\\0\\0\\0\\12\\65\\56\\60\\56\\65\\64\\0\\136\\0\\0\\0\\41\\100\\163\\141\\154\\164\\43\\44\\0\\1\\1\\41\\0\\2\\0\\2\"\n  }\n}\nactions {\n  advance_time {\n    milliseconds: 14848\n  }\n}\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\2\\377\\1\\0\\0\\0\\1\\0\"\n    end_stream: true\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_write {\n    data: \"\\57\\0\\0\\1\\0\\2\\0\\3\\1\\0\\0\\0\\41\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\165\\163\\145\\162\\61\\0\\160\\64\\44\\44\\167\\60\\162\\66\\0\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\2\\0\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\2\\376\\1\\0\\0\\0\\1\\0\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\4\\377\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\57\\0\\0\\1\\0\\0\\0\\3\\1\\0\\0\\0\\41\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\165\\163\\145\\162\\61\\0\\160\\64\\44\\44\\167\\60\\162\\66\\0\"\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"\\30\\0\\0\\0\\3\\103\\122\\105\\101\\124\\105\\40\\104\\101\\124\\101\\102\\101\\123\\105\\40\\155\\171\\163\\161\\154\\144\\142\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\30\\0\\0\\5\\3\\103\\122\\105\\101\\124\\105\\40\\104\\101\\124\\101\\102\\101\\123\\105\\40\\155\\171\\163\\161\\154\\144\\142\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\1\\0\\0\\0\\4\" \n  }\n}\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\4\\1\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\7\\0\\0\\4\\1\\1\\0\\0\\0\\1\\0\"\n  }\n}\nactions {\n  on_write {\n    end_stream: true\n  }\n}\nactions {\n  on_write {\n    data: \"3\"\n  }\n} "
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\\032\\000\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030c.googlers.com\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\\030\"\n  }\n}\nactions {\n  on_write {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\\022\\001!\\032\\006\\010\\377\\376\\377\\317\\017\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\030\\030\\030\\030\\030\\030\\030\\030\"\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_auth",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xc8\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_write {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_connect",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xc8\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_write {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_ping",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x0\\x0\\x0\\x14\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xa\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\xc8\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_write {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_watch_control",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x0\\x0\\x0\\x10\\xff\\xff\\xff\\xf8\\x0\\x0\\x0\\x0\\x0\\x0\\x7\\xd0\\x0\\x0\\x0\\x0\"\n  }\n}\nactions {\n  on_write {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_watch_event",
    "content": "config {\n  name: \"envoy.filters.network.zookeeper_proxy\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\"\n    value: \"\\xa\\xb\\x74\\x65\\x73\\x74\\x5f\\x70\\x72\\x65\\x66\\x69\\x78\"\n  }\n}\nactions {\n  on_write {\n    data: \"\\x0\\x0\\x0\\x20\\xff\\xff\\xff\\xff\\x0\\x0\\x0\\x0\\x0\\x0\\x3\\xe8\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x1\\x0\\x0\\x0\\x0\\x0\\x0\\x0\\x4\\x2f\\x66\\x6f\\x6f\"\n  }\n}\nactions {\n  on_write {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.extensions.filters.network;\nimport \"validate/validate.proto\";\nimport \"envoy/config/listener/v3/listener_components.proto\";\n\nmessage OnWrite {\n  bytes data = 1;\n  bool end_stream = 2;\n}\n\nmessage AdvanceTime {\n  // Advance the system time by (0,24] hours.\n  uint32 milliseconds = 1 [(validate.rules).uint32 = {gt: 0 lt: 86400000}];\n}\n\nmessage WriteAction {\n  oneof action_selector {\n    option (validate.required) = true;\n    // Call onWrite()\n    OnWrite on_write = 2;\n    // Advance time_source_\n    AdvanceTime advance_time = 3;\n  }\n}\n\nmessage FilterFuzzTestCase {\n  // This is actually a protobuf type for the config of network filters.\n  envoy.config.listener.v3.Filter config = 1;\n  repeated WriteAction actions = 2;\n}\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc",
    "content": "#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.pb.validate.h\"\n#include \"test/extensions/filters/network/common/fuzz/uber_writefilter.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nDEFINE_PROTO_FUZZER(const test::extensions::filters::network::FilterFuzzTestCase& input) {\n  ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = {\n      [](test::extensions::filters::network::FilterFuzzTestCase* input, unsigned int seed) {\n        // This post-processor mutation is applied only when libprotobuf-mutator\n        // calls mutate on an input, and *not* during fuzz target execution.\n        // Replaying a corpus through the fuzzer will not be affected by the\n        // post-processor mutation.\n\n        // TODO(jianwendong): consider using a factory to store the names of all\n        // writeFilters.\n        static const auto filter_names = UberWriteFilterFuzzer::filterNames();\n        static const auto factories = Registry::FactoryRegistry<\n            Server::Configuration::NamedNetworkFilterConfigFactory>::factories();\n        // Choose a valid filter name.\n        if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) ==\n            std::end(filter_names)) {\n          absl::string_view filter_name = filter_names[seed % filter_names.size()];\n          input->mutable_config()->set_name(std::string(filter_name));\n        }\n        // Set the corresponding type_url for Any.\n        auto& factory = factories.at(input->config().name());\n        input->mutable_config()->mutable_typed_config()->set_type_url(\n            absl::StrCat(\"type.googleapis.com/\",\n                         factory->createEmptyConfigProto()->GetDescriptor()->full_name()));\n      }};\n  try {\n    TestUtility::validate(input);\n    // Check the filter's name in case some filters are not supported yet.\n    // TODO(jianwendong): remove this if block when we have a factory for writeFilters.\n    static const auto filter_names = UberWriteFilterFuzzer::filterNames();\n    if (std::find(filter_names.begin(), filter_names.end(), input.config().name()) ==\n        std::end(filter_names)) {\n      ENVOY_LOG_MISC(debug, \"Test case with unsupported filter type: {}\", input.config().name());\n      return;\n    }\n    static UberWriteFilterFuzzer fuzzer;\n    fuzzer.fuzz(input.config(), input.actions());\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n  }\n}\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc",
    "content": "#include \"envoy/extensions/filters/network/direct_response/v3/config.pb.h\"\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n\n#include \"extensions/filters/common/ratelimit/ratelimit_impl.h\"\n#include \"extensions/filters/network/common/utility.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/extensions/filters/common/ext_authz/test_common.h\"\n#include \"test/extensions/filters/network/common/fuzz/uber_readfilter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace {\n// Limit the fill_interval in the config of local_ratelimit filter prevent overflow in\n// std::chrono::time_point.\nstatic const int SecondsPerDay = 86400;\n} // namespace\nstd::vector<absl::string_view> UberFilterFuzzer::filterNames() {\n  // These filters have already been covered by this fuzzer.\n  // Will extend to cover other network filters one by one.\n  static std::vector<absl::string_view> filter_names;\n  if (filter_names.empty()) {\n    const auto factories = Registry::FactoryRegistry<\n        Server::Configuration::NamedNetworkFilterConfigFactory>::factories();\n    const std::vector<absl::string_view> supported_filter_names = {\n        NetworkFilterNames::get().ExtAuthorization, NetworkFilterNames::get().LocalRateLimit,\n        NetworkFilterNames::get().RedisProxy, NetworkFilterNames::get().ClientSslAuth,\n        NetworkFilterNames::get().Echo, NetworkFilterNames::get().DirectResponse,\n        NetworkFilterNames::get().DubboProxy, NetworkFilterNames::get().SniCluster,\n        // A dedicated http_connection_manager fuzzer can be found in\n        // test/common/http/conn_manager_impl_fuzz_test.cc\n        NetworkFilterNames::get().HttpConnectionManager, NetworkFilterNames::get().ThriftProxy,\n        NetworkFilterNames::get().ZooKeeperProxy, NetworkFilterNames::get().SniDynamicForwardProxy,\n        NetworkFilterNames::get().KafkaBroker, NetworkFilterNames::get().RocketmqProxy,\n        NetworkFilterNames::get().RateLimit, NetworkFilterNames::get().Rbac,\n        NetworkFilterNames::get().MongoProxy, NetworkFilterNames::get().MySQLProxy\n        // TODO(jianwendong): add \"NetworkFilterNames::get().Postgres\" after it supports untrusted\n        // data.\n        // TODO(jianwendong): add fuzz test for \"NetworkFilterNames::get().TcpProxy\".\n    };\n    // Check whether each filter is loaded into Envoy.\n    // Some customers build Envoy without some filters. When they run fuzzing, the use of a filter\n    // that does not exist will cause fatal errors.\n    for (auto& filter_name : supported_filter_names) {\n      if (factories.contains(filter_name)) {\n        filter_names.push_back(filter_name);\n      } else {\n        ENVOY_LOG_MISC(debug, \"Filter name not found in the factory: {}\", filter_name);\n      }\n    }\n  }\n  return filter_names;\n}\n\nvoid UberFilterFuzzer::perFilterSetup(const std::string& filter_name) {\n  // Set up response for ext_authz filter\n  if (filter_name == NetworkFilterNames::get().ExtAuthorization) {\n\n    async_client_factory_ = std::make_unique<Grpc::MockAsyncClientFactory>();\n    async_client_ = std::make_unique<Grpc::MockAsyncClient>();\n    // TODO(jianwendong): consider testing on different kinds of responses.\n    ON_CALL(*async_client_, sendRaw(_, _, _, _, _, _))\n        .WillByDefault(testing::WithArgs<3>(Invoke([&](Grpc::RawAsyncRequestCallbacks& callbacks) {\n          Filters::Common::ExtAuthz::GrpcClientImpl* grpc_client_impl =\n              dynamic_cast<Filters::Common::ExtAuthz::GrpcClientImpl*>(&callbacks);\n          const std::string empty_body{};\n          const auto expected_headers =\n              Filters::Common::ExtAuthz::TestCommon::makeHeaderValueOption({});\n          auto check_response = Filters::Common::ExtAuthz::TestCommon::makeCheckResponse(\n              Grpc::Status::WellKnownGrpcStatus::Ok, envoy::type::v3::OK, empty_body,\n              expected_headers);\n          // Give response to the grpc_client by calling onSuccess().\n          grpc_client_impl->onSuccess(std::move(check_response), span_);\n          return async_request_.get();\n        })));\n\n    EXPECT_CALL(*async_client_factory_, create()).WillOnce(Invoke([&] {\n      return std::move(async_client_);\n    }));\n\n    EXPECT_CALL(factory_context_.cluster_manager_.async_client_manager_,\n                factoryForGrpcService(_, _, _))\n        .WillOnce(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n          return std::move(async_client_factory_);\n        }));\n    read_filter_callbacks_->connection_.local_address_ = pipe_addr_;\n    read_filter_callbacks_->connection_.remote_address_ = pipe_addr_;\n  } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) {\n    read_filter_callbacks_->connection_.local_address_ = pipe_addr_;\n    read_filter_callbacks_->connection_.remote_address_ = pipe_addr_;\n  } else if (filter_name == NetworkFilterNames::get().RateLimit) {\n    async_client_factory_ = std::make_unique<Grpc::MockAsyncClientFactory>();\n    async_client_ = std::make_unique<Grpc::MockAsyncClient>();\n    // TODO(jianwendong): consider testing on different kinds of responses.\n    ON_CALL(*async_client_, sendRaw(_, _, _, _, _, _))\n        .WillByDefault(testing::WithArgs<3>(Invoke([&](Grpc::RawAsyncRequestCallbacks& callbacks) {\n          Filters::Common::RateLimit::GrpcClientImpl* grpc_client_impl =\n              dynamic_cast<Filters::Common::RateLimit::GrpcClientImpl*>(&callbacks);\n          // Response OK\n          auto response = std::make_unique<envoy::service::ratelimit::v3::RateLimitResponse>();\n          // Give response to the grpc_client by calling onSuccess().\n          grpc_client_impl->onSuccess(std::move(response), span_);\n          return async_request_.get();\n        })));\n\n    EXPECT_CALL(*async_client_factory_, create()).WillOnce(Invoke([&] {\n      return std::move(async_client_);\n    }));\n\n    EXPECT_CALL(factory_context_.cluster_manager_.async_client_manager_,\n                factoryForGrpcService(_, _, _))\n        .WillOnce(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n          return std::move(async_client_factory_);\n        }));\n    read_filter_callbacks_->connection_.local_address_ = pipe_addr_;\n    read_filter_callbacks_->connection_.remote_address_ = pipe_addr_;\n  }\n}\n\nvoid UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name,\n                                                  Protobuf::Message* config_message) {\n  // System calls such as reading files are prohibited in this fuzzer. Some inputs that crash the\n  // mock/fake objects are also prohibited. We could also avoid fuzzing some unfinished features by\n  // checking them here. For now there are only three filters {DirectResponse, LocalRateLimit,\n  // HttpConnectionManager} on which we have constraints.\n  const std::string name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(\n      std::string(filter_name));\n  if (filter_name == NetworkFilterNames::get().DirectResponse) {\n    envoy::extensions::filters::network::direct_response::v3::Config& config =\n        dynamic_cast<envoy::extensions::filters::network::direct_response::v3::Config&>(\n            *config_message);\n    if (config.response().specifier_case() ==\n        envoy::config::core::v3::DataSource::SpecifierCase::kFilename) {\n      throw EnvoyException(\n          absl::StrCat(\"direct_response trying to open a file. Config:\\n{}\", config.DebugString()));\n    }\n  } else if (filter_name == NetworkFilterNames::get().LocalRateLimit) {\n    envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& config =\n        dynamic_cast<envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit&>(\n            *config_message);\n    if (config.token_bucket().fill_interval().seconds() > SecondsPerDay) {\n      // Too large fill_interval may cause \"c++/v1/chrono\" overflow when simulated_time_system_ is\n      // converting it to a smaller unit. Constraining fill_interval to no greater than one day is\n      // reasonable.\n      throw EnvoyException(\n          absl::StrCat(\"local_ratelimit trying to set a large fill_interval. Config:\\n{}\",\n                       config.DebugString()));\n    }\n  } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) {\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n        config = dynamic_cast<envoy::extensions::filters::network::http_connection_manager::v3::\n                                  HttpConnectionManager&>(*config_message);\n    if (config.codec_type() == envoy::extensions::filters::network::http_connection_manager::v3::\n                                   HttpConnectionManager::HTTP3) {\n      // Quiche is still in progress and http_conn_manager has a dedicated fuzzer.\n      // So we won't fuzz it here with complex mocks.\n      throw EnvoyException(absl::StrCat(\n          \"http_conn_manager trying to use Quiche which we won't fuzz here. Config:\\n{}\",\n          config.DebugString()));\n    }\n  }\n}\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc",
    "content": "#include \"extensions/filters/network/common/utility.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/extensions/filters/network/common/fuzz/uber_writefilter.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nstd::vector<absl::string_view> UberWriteFilterFuzzer::filterNames() {\n  // These filters have already been covered by this fuzzer.\n  // Will extend to cover other network filters one by one.\n  static std::vector<absl::string_view> filter_names;\n  if (filter_names.empty()) {\n    const auto factories = Registry::FactoryRegistry<\n        Server::Configuration::NamedNetworkFilterConfigFactory>::factories();\n    const std::vector<absl::string_view> supported_filter_names = {\n        NetworkFilterNames::get().ZooKeeperProxy, NetworkFilterNames::get().KafkaBroker,\n        NetworkFilterNames::get().MongoProxy, NetworkFilterNames::get().MySQLProxy\n        // TODO(jianwendong) Add \"NetworkFilterNames::get().Postgres\" after it supports untrusted\n        // data.\n    };\n    for (auto& filter_name : supported_filter_names) {\n      if (factories.contains(filter_name)) {\n        filter_names.push_back(filter_name);\n      } else {\n        ENVOY_LOG_MISC(debug, \"Filter name not found in the factory: {}\", filter_name);\n      }\n    }\n  }\n  return filter_names;\n}\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/uber_readfilter.cc",
    "content": "#include \"test/extensions/filters/network/common/fuzz/uber_readfilter.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/network/address_impl.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nvoid UberFilterFuzzer::reset() {\n  // Reset some changes made by current filter on some mock objects.\n\n  // Close the connection to make sure the filter's callback is set to nullptr.\n  read_filter_callbacks_->connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n  // Clear the filter's raw pointer stored inside the connection_ and reset the connection_'s state.\n  read_filter_callbacks_->connection_.callbacks_.clear();\n  read_filter_callbacks_->connection_.bytes_sent_callbacks_.clear();\n  read_filter_callbacks_->connection_.state_ = Network::Connection::State::Open;\n  // Clear the pointers inside the mock_dispatcher\n  Event::MockDispatcher& mock_dispatcher =\n      dynamic_cast<Event::MockDispatcher&>(read_filter_callbacks_->connection_.dispatcher_);\n  mock_dispatcher.clearDeferredDeleteList();\n  factory_context_.admin_.config_tracker_.config_tracker_callbacks_.clear();\n  read_filter_.reset();\n}\n\nvoid UberFilterFuzzer::fuzzerSetup() {\n  // Setup process when this fuzzer object is constructed.\n  // For a static fuzzer, this will only be executed once.\n\n  // Get the pointer of read_filter when the read_filter is being added to connection_.\n  read_filter_callbacks_ = std::make_shared<NiceMock<Network::MockReadFilterCallbacks>>();\n  ON_CALL(read_filter_callbacks_->connection_, addReadFilter(_))\n      .WillByDefault(Invoke([&](Network::ReadFilterSharedPtr read_filter) -> void {\n        read_filter_ = read_filter;\n        read_filter_->initializeReadFilterCallbacks(*read_filter_callbacks_);\n      }));\n  ON_CALL(read_filter_callbacks_->connection_, addFilter(_))\n      .WillByDefault(Invoke([&](Network::FilterSharedPtr read_filter) -> void {\n        read_filter_ = read_filter;\n        read_filter_->initializeReadFilterCallbacks(*read_filter_callbacks_);\n      }));\n\n  // Prepare sni for sni_cluster filter and sni_dynamic_forward_proxy filter.\n  ON_CALL(read_filter_callbacks_->connection_, requestedServerName())\n      .WillByDefault(Return(\"fake_cluster\"));\n\n  // Prepare time source for filters such as local_ratelimit filter.\n  factory_context_.prepareSimulatedSystemTime();\n\n  // Prepare address for filters such as ext_authz filter.\n  pipe_addr_ = std::make_shared<Network::Address::PipeInstance>(\"/test/test.sock\");\n  async_request_ = std::make_unique<Grpc::MockAsyncRequest>();\n\n  // Set featureEnabled for mongo_proxy\n  ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled(\"mongo.proxy_enabled\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"mongo.connection_logging_enabled\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled(\"mongo.logging_enabled\", 100))\n      .WillByDefault(Return(true));\n\n  // Set featureEnabled for thrift_proxy\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.thrift_filter_enabled\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.thrift_filter_enforcing\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.test_key.thrift_filter_enabled\", 100))\n      .WillByDefault(Return(true));\n}\n\nUberFilterFuzzer::UberFilterFuzzer() : time_source_(factory_context_.simulatedTimeSystem()) {\n  fuzzerSetup();\n}\n\nvoid UberFilterFuzzer::fuzz(\n    const envoy::config::listener::v3::Filter& proto_config,\n    const Protobuf::RepeatedPtrField<::test::extensions::filters::network::Action>& actions) {\n  try {\n    // Try to create the filter callback(cb_). Exit early if the config is invalid or violates PGV\n    // constraints.\n    const std::string& filter_name = proto_config.name();\n    ENVOY_LOG_MISC(info, \"filter name {}\", filter_name);\n    auto& factory = Config::Utility::getAndCheckFactoryByName<\n        Server::Configuration::NamedNetworkFilterConfigFactory>(filter_name);\n    ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n        proto_config, factory_context_.messageValidationVisitor(), factory);\n    // Make sure no invalid system calls are executed in fuzzer.\n    checkInvalidInputForFuzzer(filter_name, message.get());\n    ENVOY_LOG_MISC(info, \"Config content after decoded: {}\", message->DebugString());\n    cb_ = factory.createFilterFactoryFromProto(*message, factory_context_);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"Controlled exception in filter setup {}\", e.what());\n    return;\n  }\n  perFilterSetup(proto_config.name());\n  // Add filter to connection_.\n  cb_(read_filter_callbacks_->connection_);\n  for (const auto& action : actions) {\n    ENVOY_LOG_MISC(trace, \"action {}\", action.DebugString());\n    switch (action.action_selector_case()) {\n    case test::extensions::filters::network::Action::kOnData: {\n      ASSERT(read_filter_ != nullptr);\n      Buffer::OwnedImpl buffer(action.on_data().data());\n      read_filter_->onData(buffer, action.on_data().end_stream());\n\n      break;\n    }\n    case test::extensions::filters::network::Action::kOnNewConnection: {\n      ASSERT(read_filter_ != nullptr);\n      read_filter_->onNewConnection();\n\n      break;\n    }\n    case test::extensions::filters::network::Action::kAdvanceTime: {\n      time_source_.advanceTimeAndRun(\n          std::chrono::milliseconds(action.advance_time().milliseconds()),\n          factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock);\n      break;\n    }\n    default: {\n      // Unhandled actions.\n      ENVOY_LOG_MISC(debug, \"Action support is missing for:\\n{}\", action.DebugString());\n      PANIC(\"A case is missing for an action\");\n    }\n    }\n  }\n\n  reset();\n}\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/uber_readfilter.h",
    "content": "#include \"envoy/network/filter.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.pb.validate.h\"\n#include \"test/extensions/filters/network/common/fuzz/utils/fakes.h\"\n#include \"test/mocks/network/mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\n\nclass UberFilterFuzzer {\npublic:\n  UberFilterFuzzer();\n  // This creates the filter config and runs the fuzzed data against the filter.\n  void\n  fuzz(const envoy::config::listener::v3::Filter& proto_config,\n       const Protobuf::RepeatedPtrField<::test::extensions::filters::network::Action>& actions);\n  // Get the name of filters which has been covered by this fuzzer.\n  static std::vector<absl::string_view> filterNames();\n  // Check whether the filter's config is invalid for fuzzer(e.g. system call).\n  void checkInvalidInputForFuzzer(const std::string& filter_name,\n                                  Protobuf::Message* config_message);\n\nprotected:\n  // Set-up filter specific mock expectations in constructor.\n  void fuzzerSetup();\n  // Reset the states of the mock objects.\n  void reset();\n  // Mock behaviors for specific filters.\n  void perFilterSetup(const std::string& filter_name);\n\nprivate:\n  Server::Configuration::FakeFactoryContext factory_context_;\n  Network::ReadFilterSharedPtr read_filter_;\n  Network::FilterFactoryCb cb_;\n  Network::Address::InstanceConstSharedPtr pipe_addr_;\n  Event::SimulatedTimeSystem& time_source_;\n  std::shared_ptr<NiceMock<Network::MockReadFilterCallbacks>> read_filter_callbacks_;\n  std::unique_ptr<Grpc::MockAsyncRequest> async_request_;\n  std::unique_ptr<Grpc::MockAsyncClient> async_client_;\n  std::unique_ptr<Grpc::MockAsyncClientFactory> async_client_factory_;\n  Tracing::MockSpan span_;\n};\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/uber_writefilter.cc",
    "content": "#include \"test/extensions/filters/network/common/fuzz/uber_writefilter.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/config/version_converter.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nvoid UberWriteFilterFuzzer::reset() {\n  // Reset the state of dependencies so that a new fuzz input starts in a clean state.\n\n  // Close the connection to make sure the filter's callback is set to nullptr.\n  write_filter_callbacks_->connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n  // Clear the filter's raw pointer stored inside the connection_ and reset the connection_'s state.\n  write_filter_callbacks_->connection_.callbacks_.clear();\n  write_filter_callbacks_->connection_.bytes_sent_callbacks_.clear();\n  write_filter_callbacks_->connection_.state_ = Network::Connection::State::Open;\n  // Clear the pointers inside the mock_dispatcher\n  Event::MockDispatcher& mock_dispatcher =\n      dynamic_cast<Event::MockDispatcher&>(write_filter_callbacks_->connection_.dispatcher_);\n  mock_dispatcher.clearDeferredDeleteList();\n  write_filter_.reset();\n}\n\nvoid UberWriteFilterFuzzer::fuzzerSetup() {\n  // Setup process when this fuzzer object is constructed.\n  // For a static fuzzer, this will only be executed once.\n\n  // Get the pointer of write_filter when the write_filter is being added to connection_.\n  write_filter_callbacks_ = std::make_shared<NiceMock<Network::MockWriteFilterCallbacks>>();\n  read_filter_callbacks_ = std::make_shared<NiceMock<Network::MockReadFilterCallbacks>>();\n  ON_CALL(write_filter_callbacks_->connection_, addWriteFilter(_))\n      .WillByDefault(Invoke([&](Network::WriteFilterSharedPtr write_filter) -> void {\n        write_filter->initializeWriteFilterCallbacks(*write_filter_callbacks_);\n        write_filter_ = write_filter;\n      }));\n  ON_CALL(write_filter_callbacks_->connection_, addFilter(_))\n      .WillByDefault(Invoke([&](Network::FilterSharedPtr filter) -> void {\n        filter->initializeReadFilterCallbacks(*read_filter_callbacks_);\n        filter->initializeWriteFilterCallbacks(*write_filter_callbacks_);\n        write_filter_ = filter;\n      }));\n  factory_context_.prepareSimulatedSystemTime();\n\n  // Set featureEnabled for mongo_proxy\n  ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled(\"mongo.proxy_enabled\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"mongo.connection_logging_enabled\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled(\"mongo.logging_enabled\", 100))\n      .WillByDefault(Return(true));\n\n  // Set featureEnabled for thrift_proxy\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.thrift_filter_enabled\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.thrift_filter_enforcing\", 100))\n      .WillByDefault(Return(true));\n  ON_CALL(factory_context_.runtime_loader_.snapshot_,\n          featureEnabled(\"ratelimit.test_key.thrift_filter_enabled\", 100))\n      .WillByDefault(Return(true));\n}\n\nUberWriteFilterFuzzer::UberWriteFilterFuzzer()\n    : time_source_(factory_context_.simulatedTimeSystem()) {\n  fuzzerSetup();\n}\n\nvoid UberWriteFilterFuzzer::fuzz(\n    const envoy::config::listener::v3::Filter& proto_config,\n    const Protobuf::RepeatedPtrField<::test::extensions::filters::network::WriteAction>& actions) {\n  try {\n    // Try to create the filter callback(cb_). Exit early if the config is invalid or violates PGV\n    // constraints.\n    const std::string& filter_name = proto_config.name();\n    ENVOY_LOG_MISC(debug, \"filter name {}\", filter_name);\n    auto& factory = Config::Utility::getAndCheckFactoryByName<\n        Server::Configuration::NamedNetworkFilterConfigFactory>(filter_name);\n    ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(\n        proto_config, factory_context_.messageValidationVisitor(), factory);\n    ENVOY_LOG_MISC(debug, \"Config content after decoded: {}\", message->DebugString());\n    cb_ = factory.createFilterFactoryFromProto(*message, factory_context_);\n    // Add filter to connection_.\n    cb_(write_filter_callbacks_->connection_);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"Controlled exception in filter setup {}\", e.what());\n    return;\n  }\n  for (const auto& action : actions) {\n    ENVOY_LOG_MISC(debug, \"action {}\", action.DebugString());\n    switch (action.action_selector_case()) {\n    case test::extensions::filters::network::WriteAction::kOnWrite: {\n      ASSERT(write_filter_ != nullptr);\n      Buffer::OwnedImpl buffer(action.on_write().data());\n      write_filter_->onWrite(buffer, action.on_write().end_stream());\n\n      break;\n    }\n    case test::extensions::filters::network::WriteAction::kAdvanceTime: {\n      time_source_.advanceTimeAndRun(\n          std::chrono::milliseconds(action.advance_time().milliseconds()),\n          factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock);\n      break;\n    }\n    default: {\n      // Unhandled actions.\n      ENVOY_LOG_MISC(debug, \"Action support is missing for:\\n{}\", action.DebugString());\n      PANIC(\"A case is missing for an action\");\n    }\n    }\n  }\n\n  reset();\n}\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/uber_writefilter.h",
    "content": "#include \"envoy/network/filter.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.pb.validate.h\"\n#include \"test/extensions/filters/network/common/fuzz/utils/fakes.h\"\n#include \"test/mocks/network/mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\n\nclass UberWriteFilterFuzzer {\npublic:\n  UberWriteFilterFuzzer();\n  // This creates the filter config and runs the fuzzed data against the filter.\n  void fuzz(\n      const envoy::config::listener::v3::Filter& proto_config,\n      const Protobuf::RepeatedPtrField<::test::extensions::filters::network::WriteAction>& actions);\n  // Get the name of filters which has been covered by this fuzzer.\n  static std::vector<absl::string_view> filterNames();\n\nprotected:\n  // Set-up filter specific mock expectations in constructor.\n  void fuzzerSetup();\n  // Reset the states of the mock objects.\n  void reset();\n\nprivate:\n  Server::Configuration::FakeFactoryContext factory_context_;\n  Event::SimulatedTimeSystem& time_source_;\n  Network::WriteFilterSharedPtr write_filter_;\n  Network::FilterFactoryCb cb_;\n  std::shared_ptr<NiceMock<Network::MockWriteFilterCallbacks>> write_filter_callbacks_;\n  std::shared_ptr<NiceMock<Network::MockReadFilterCallbacks>> read_filter_callbacks_;\n};\n\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/utils/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"network_filter_fuzzer_fakes_lib\",\n    hdrs = [\"fakes.h\"],\n    deps = [\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/common/fuzz/utils/fakes.h",
    "content": "#include \"test/mocks/server/factory_context.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass FakeFactoryContext : public MockFactoryContext {\npublic:\n  void prepareSimulatedSystemTime() {\n    api_ = Api::createApiForTest(time_system_);\n    dispatcher_ = api_->allocateDispatcher(\"test_thread\");\n  }\n  AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; }\n  Upstream::ClusterManager& clusterManager() override { return cluster_manager_; }\n  Event::Dispatcher& dispatcher() override { return *dispatcher_; }\n  const Network::DrainDecision& drainDecision() override { return drain_manager_; }\n  Init::Manager& initManager() override { return init_manager_; }\n  ServerLifecycleNotifier& lifecycleNotifier() override { return lifecycle_notifier_; }\n  const LocalInfo::LocalInfo& localInfo() const override { return local_info_; }\n  Envoy::Runtime::Loader& runtime() override { return runtime_loader_; }\n  Stats::Scope& scope() override { return scope_; }\n  Singleton::Manager& singletonManager() override { return *singleton_manager_; }\n  ThreadLocal::Instance& threadLocal() override { return thread_local_; }\n  Server::Admin& admin() override { return admin_; }\n  Stats::Scope& listenerScope() override { return listener_scope_; }\n  Api::Api& api() override { return *api_; }\n  TimeSource& timeSource() override { return time_system_; }\n  OverloadManager& overloadManager() override { return overload_manager_; }\n  ProtobufMessage::ValidationContext& messageValidationContext() override {\n    return validation_context_;\n  }\n  ProtobufMessage::ValidationVisitor& messageValidationVisitor() override {\n    return ProtobufMessage::getStrictValidationVisitor();\n  }\n  Event::SimulatedTimeSystem& simulatedTimeSystem() {\n    return dynamic_cast<Event::SimulatedTimeSystem&>(time_system_);\n  }\n  Event::TestTimeSystem& timeSystem() { return time_system_; }\n  Grpc::Context& grpcContext() override { return grpc_context_; }\n  Http::Context& httpContext() override { return http_context_; }\n\n  Event::DispatcherPtr dispatcher_;\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n};\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"redis_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/filters/network/common/redis:client_lib\",\n        \"//source/extensions/filters/network/common/redis:codec_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_utils_lib\",\n    hdrs = [\"test_utils.h\"],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"codec_impl_test\",\n    srcs = [\"codec_impl_test.cc\"],\n    deps = [\n        \":redis_mocks\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/filters/network/common/redis:codec_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"client_impl_test\",\n    srcs = [\"client_impl_test.cc\"],\n    deps = [\n        \":redis_mocks\",\n        \":test_utils_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/filters/network/common/redis:client_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"fault_test\",\n    srcs = [\"fault_test.cc\"],\n    deps = [\n        \":redis_mocks\",\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/filters/network/common/redis:fault_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/common/redis/client_impl_test.cc",
    "content": "#include <vector>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/utility.h\"\n\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/common/redis/test_utils.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Property;\nusing testing::Ref;\nusing testing::Return;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\nnamespace Client {\n\nclass RedisClientImplTest : public testing::Test,\n                            public Event::TestUsingSimulatedTime,\n                            public Common::Redis::DecoderFactory {\npublic:\n  // Common::Redis::DecoderFactory\n  Common::Redis::DecoderPtr create(Common::Redis::DecoderCallbacks& callbacks) override {\n    callbacks_ = &callbacks;\n    return Common::Redis::DecoderPtr{decoder_};\n  }\n\n  ~RedisClientImplTest() override {\n    client_.reset();\n\n    EXPECT_TRUE(TestUtility::gaugesZeroed(host_->cluster_.stats_store_.gauges()));\n    EXPECT_TRUE(TestUtility::gaugesZeroed(host_->stats_.gauges()));\n  }\n\n  void setup() {\n    config_ = std::make_unique<ConfigImpl>(createConnPoolSettings());\n    finishSetup();\n  }\n\n  void setup(std::unique_ptr<Config>&& config) {\n    config_ = std::move(config);\n    finishSetup();\n  }\n\n  void finishSetup() {\n    upstream_connection_ = new NiceMock<Network::MockClientConnection>();\n    Upstream::MockHost::MockCreateConnectionData conn_info;\n    conn_info.connection_ = upstream_connection_;\n\n    // Create timers in order they are created in client_impl.cc\n    connect_or_op_timer_ = new Event::MockTimer(&dispatcher_);\n    flush_timer_ = new Event::MockTimer(&dispatcher_);\n\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(*host_, createConnection_(_, _)).WillOnce(Return(conn_info));\n    EXPECT_CALL(*upstream_connection_, addReadFilter(_))\n        .WillOnce(SaveArg<0>(&upstream_read_filter_));\n    EXPECT_CALL(*upstream_connection_, connect());\n    EXPECT_CALL(*upstream_connection_, noDelay(true));\n\n    redis_command_stats_ =\n        Common::Redis::RedisCommandStats::createRedisCommandStats(stats_.symbolTable());\n\n    client_ = ClientImpl::create(host_, dispatcher_, Common::Redis::EncoderPtr{encoder_}, *this,\n                                 *config_, redis_command_stats_, stats_);\n    EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_total_.value());\n    EXPECT_EQ(1UL, host_->stats_.cx_total_.value());\n    EXPECT_EQ(false, client_->active());\n\n    // NOP currently.\n    upstream_connection_->runHighWatermarkCallbacks();\n    upstream_connection_->runLowWatermarkCallbacks();\n  }\n\n  void onConnected() {\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    upstream_connection_->raiseEvent(Network::ConnectionEvent::Connected);\n  }\n\n  void respond() {\n    Common::Redis::RespValuePtr response1{new Common::Redis::RespValue()};\n    response1->type(Common::Redis::RespType::SimpleString);\n    response1->asString() = \"OK\";\n    EXPECT_EQ(true, client_->active());\n    ClientImpl* client_impl = dynamic_cast<ClientImpl*>(client_.get());\n    EXPECT_NE(client_impl, nullptr);\n    client_impl->onRespValue(std::move(response1));\n  }\n\n  void testInitializeReadPolicy(\n      envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ReadPolicy\n          read_policy) {\n    InSequence s;\n\n    setup(std::make_unique<ConfigImpl>(createConnPoolSettings(20, true, true, 100, read_policy)));\n\n    Common::Redis::RespValue readonly_request = Utility::ReadOnlyRequest::instance();\n    EXPECT_CALL(*encoder_, encode(Eq(readonly_request), _));\n    EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n    client_->initialize(auth_username_, auth_password_);\n\n    EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value());\n    EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value());\n    EXPECT_EQ(1UL, host_->stats_.rq_total_.value());\n    EXPECT_EQ(1UL, host_->stats_.rq_active_.value());\n\n    EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    client_->close();\n  }\n\n  const std::string cluster_name_{\"foo\"};\n  std::shared_ptr<Upstream::MockHost> host_{new NiceMock<Upstream::MockHost>()};\n  Event::MockDispatcher dispatcher_;\n  Event::MockTimer* flush_timer_{};\n  Event::MockTimer* connect_or_op_timer_{};\n  MockEncoder* encoder_{new MockEncoder()};\n  MockDecoder* decoder_{new MockDecoder()};\n  Common::Redis::DecoderCallbacks* callbacks_{};\n  NiceMock<Network::MockClientConnection>* upstream_connection_{};\n  Network::ReadFilterSharedPtr upstream_read_filter_;\n  std::unique_ptr<Config> config_;\n  ClientPtr client_;\n  NiceMock<Stats::MockIsolatedStatsStore> stats_;\n  Stats::ScopePtr stats_scope_;\n  Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_;\n  std::string auth_username_;\n  std::string auth_password_;\n};\n\nTEST_F(RedisClientImplTest, BatchWithZeroBufferAndTimeout) {\n  // Basic test with a single request, default buffer size (0) and timeout (0).\n  // This means we do not batch requests, and thus the flush timer is never enabled.\n  InSequence s;\n\n  setup();\n\n  // Make the dummy request\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  // Process the dummy request\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nclass ConfigBufferSizeGTSingleRequest : public Config {\n  bool disableOutlierEvents() const override { return false; }\n  std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); }\n  bool enableHashtagging() const override { return false; }\n  bool enableRedirection() const override { return false; }\n  unsigned int maxBufferSizeBeforeFlush() const override { return 8; }\n  std::chrono::milliseconds bufferFlushTimeoutInMs() const override {\n    return std::chrono::milliseconds(1);\n  }\n  uint32_t maxUpstreamUnknownConnections() const override { return 0; }\n  bool enableCommandStats() const override { return false; }\n  ReadPolicy readPolicy() const override { return ReadPolicy::Primary; }\n};\n\nTEST_F(RedisClientImplTest, BatchWithTimerFiring) {\n  // With a flush buffer > single request length, the flush timer comes into play.\n  // In this test, we make a single request that doesn't fill the buffer, so we\n  // have to wait for the flush timer to fire.\n  InSequence s;\n\n  setup(std::make_unique<ConfigBufferSizeGTSingleRequest>());\n\n  // Make the dummy request\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enableTimer(_, _));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  // Pretend the flush timer fires.\n  // The timer callback is the general-purpose flush function, also used when\n  // the buffer is filled. If the buffer fills before the timer fires, we need\n  // to check if the timer is active and cancel it. However, if the timer fires\n  // the callback, this internal check returns false as the timer is finished.\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  flush_timer_->invokeCallback();\n\n  // Process the dummy request\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, BatchWithTimerCancelledByBufferFlush) {\n  // Expanding on the previous test, let's the flush buffer is filled by two requests.\n  // In this test, we make a single request that doesn't fill the buffer, and the timer\n  // starts. However, a second request comes in, which should cancel the timer, such\n  // that it is never invoked.\n  InSequence s;\n\n  setup(std::make_unique<ConfigBufferSizeGTSingleRequest>());\n\n  // Make the dummy request (doesn't fill buffer, starts timer)\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enableTimer(_, _));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  // Make a second dummy request (fills buffer, cancels timer)\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(true));\n  ;\n  EXPECT_CALL(*flush_timer_, disableTimer());\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  // Process the dummy requests\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, Basic) {\n  InSequence s;\n\n  setup();\n\n  client_->initialize(auth_username_, auth_password_);\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nclass ConfigEnableCommandStats : public Config {\n  bool disableOutlierEvents() const override { return false; }\n  std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); }\n  bool enableHashtagging() const override { return false; }\n  bool enableRedirection() const override { return false; }\n  unsigned int maxBufferSizeBeforeFlush() const override { return 0; }\n  std::chrono::milliseconds bufferFlushTimeoutInMs() const override {\n    return std::chrono::milliseconds(0);\n  }\n  ReadPolicy readPolicy() const override { return ReadPolicy::Primary; }\n  uint32_t maxUpstreamUnknownConnections() const override { return 0; }\n  bool enableCommandStats() const override { return true; }\n};\n\nvoid initializeRedisSimpleCommand(Common::Redis::RespValue* request, std::string command_name,\n                                  std::string key) {\n  std::vector<Common::Redis::RespValue> command(2);\n  command[0].type(Common::Redis::RespType::BulkString);\n  command[0].asString() = command_name;\n  command[1].type(Common::Redis::RespType::BulkString);\n  command[1].asString() = key;\n\n  request->type(Common::Redis::RespType::Array);\n  request->asArray().swap(command);\n}\n\nTEST_F(RedisClientImplTest, CommandStatsDisabledSingleRequest) {\n  // Single successful GET request. The upstream command timer works even with stats disabled;\n  // however the per command timers and counts will not be recorded.\n  InSequence s;\n\n  setup();\n\n  client_->initialize(auth_username_, auth_password_);\n\n  std::string get_command = \"get\";\n\n  Common::Redis::RespValue request1;\n  initializeRedisSimpleCommand(&request1, get_command, \"foo\");\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  // Regular Envoy stats function as normal\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n\n    simTime().setMonotonicTime(std::chrono::microseconds(10));\n\n    EXPECT_CALL(stats_,\n                deliverHistogramToSinks(\n                    Property(&Stats::Metric::name, \"upstream_commands.upstream_rq_time\"), 10));\n\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n\n    callbacks_->onRespValue(std::move(response1));\n  }));\n\n  upstream_read_filter_->onData(fake_data, false);\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n\n  // The redis command stats should not show any requests\n  EXPECT_EQ(0UL, stats_.counter(\"upstream_commands.get.success\").value());\n  EXPECT_EQ(0UL, stats_.counter(\"upstream_commands.get.failure\").value());\n  EXPECT_EQ(0UL, stats_.counter(\"upstream_commands.get.total\").value());\n}\n\nTEST_F(RedisClientImplTest, CommandStatsEnabledTwoRequests) {\n  // Make two GET requests (one success, one failure) and verify command stats are recorded\n  InSequence s;\n\n  setup(std::make_unique<ConfigEnableCommandStats>());\n\n  client_->initialize(auth_username_, auth_password_);\n\n  std::string get_command = \"get\";\n\n  Common::Redis::RespValue request1;\n  initializeRedisSimpleCommand(&request1, get_command, \"foo\");\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  initializeRedisSimpleCommand(&request2, get_command, \"bar\");\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  // Regular Envoy stats function as normal\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n\n    simTime().setMonotonicTime(std::chrono::microseconds(10));\n\n    EXPECT_CALL(stats_, deliverHistogramToSinks(\n                            Property(&Stats::Metric::name, \"upstream_commands.get.latency\"), 10));\n    EXPECT_CALL(stats_,\n                deliverHistogramToSinks(\n                    Property(&Stats::Metric::name, \"upstream_commands.upstream_rq_time\"), 10));\n\n    // First request is successful\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    EXPECT_CALL(stats_, deliverHistogramToSinks(\n                            Property(&Stats::Metric::name, \"upstream_commands.get.latency\"), 10));\n    EXPECT_CALL(stats_,\n                deliverHistogramToSinks(\n                    Property(&Stats::Metric::name, \"upstream_commands.upstream_rq_time\"), 10));\n\n    // Second request errors out\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    response2->type(Common::Redis::RespType::Error);\n    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n\n    // Redis command stats reflect one successful and one failed request\n    EXPECT_EQ(1UL, stats_.counter(\"upstream_commands.get.success\").value());\n    EXPECT_EQ(1UL, stats_.counter(\"upstream_commands.get.failure\").value());\n    EXPECT_EQ(2UL, stats_.counter(\"upstream_commands.get.total\").value());\n  }));\n\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, InitializedWithAuthPassword) {\n  InSequence s;\n\n  setup();\n\n  auth_password_ = \"testing password\";\n  Utility::AuthRequest auth_request(auth_password_);\n  EXPECT_CALL(*encoder_, encode(Eq(auth_request), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  client_->initialize(auth_username_, auth_password_);\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_active_.value());\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, InitializedWithAuthAcl) {\n  InSequence s;\n\n  setup();\n\n  auth_username_ = \"testing username\";\n  auth_password_ = \"testing password\";\n  Utility::AuthRequest auth_request(auth_username_, auth_password_);\n  EXPECT_CALL(*encoder_, encode(Eq(auth_request), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  client_->initialize(auth_username_, auth_password_);\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_active_.value());\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, InitializedWithPreferPrimaryReadPolicy) {\n  testInitializeReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::\n                               ConnPoolSettings::PREFER_MASTER);\n}\n\nTEST_F(RedisClientImplTest, InitializedWithReplicaReadPolicy) {\n  testInitializeReadPolicy(\n      envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::REPLICA);\n}\n\nTEST_F(RedisClientImplTest, InitializedWithPreferReplicaReadPolicy) {\n  testInitializeReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::\n                               ConnPoolSettings::PREFER_REPLICA);\n}\n\nTEST_F(RedisClientImplTest, InitializedWithAnyReadPolicy) {\n  testInitializeReadPolicy(\n      envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ANY);\n}\n\nTEST_F(RedisClientImplTest, Cancel) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  handle1->cancel();\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks1, onResponse_(_)).Times(0);\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value());\n}\n\nTEST_F(RedisClientImplTest, FailAll) {\n  InSequence s;\n\n  setup();\n\n  NiceMock<Network::MockConnectionCallbacks> connection_callbacks;\n  client_->addConnectionCallbacks(connection_callbacks);\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  EXPECT_CALL(callbacks1, onFailure());\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose));\n  upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value());\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_remote_with_active_rq_.value());\n}\n\nTEST_F(RedisClientImplTest, FailAllWithCancel) {\n  InSequence s;\n\n  setup();\n\n  NiceMock<Network::MockConnectionCallbacks> connection_callbacks;\n  client_->addConnectionCallbacks(connection_callbacks);\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n  handle1->cancel();\n\n  EXPECT_CALL(callbacks1, onFailure()).Times(0);\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  upstream_connection_->raiseEvent(Network::ConnectionEvent::LocalClose);\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value());\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_local_with_active_rq_.value());\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value());\n}\n\nTEST_F(RedisClientImplTest, ProtocolError) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    throw Common::Redis::ProtocolError(\"error\");\n  }));\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::ExtOriginRequestFailed, _));\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(callbacks1, onFailure());\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_protocol_error_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_error_.value());\n}\n\nTEST_F(RedisClientImplTest, ConnectFail) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  EXPECT_CALL(callbacks1, onFailure());\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value());\n}\n\nclass ConfigOutlierDisabled : public Config {\n  bool disableOutlierEvents() const override { return true; }\n  std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); }\n  bool enableHashtagging() const override { return false; }\n  bool enableRedirection() const override { return false; }\n  unsigned int maxBufferSizeBeforeFlush() const override { return 0; }\n  std::chrono::milliseconds bufferFlushTimeoutInMs() const override {\n    return std::chrono::milliseconds(0);\n  }\n  ReadPolicy readPolicy() const override { return ReadPolicy::Primary; }\n  uint32_t maxUpstreamUnknownConnections() const override { return 0; }\n  bool enableCommandStats() const override { return false; }\n};\n\nTEST_F(RedisClientImplTest, OutlierDisabled) {\n  InSequence s;\n\n  setup(std::make_unique<ConfigOutlierDisabled>());\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  EXPECT_CALL(host_->outlier_detector_, putResult(_, _)).Times(0);\n  EXPECT_CALL(callbacks1, onFailure());\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value());\n  EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value());\n}\n\nTEST_F(RedisClientImplTest, ConnectTimeout) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(callbacks1, onFailure());\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  connect_or_op_timer_->invokeCallback();\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_timeout_.value());\n  EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value());\n}\n\nTEST_F(RedisClientImplTest, OpTimeout) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value());\n\n  EXPECT_CALL(callbacks1, onResponse_(_));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n  respond();\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_rq_active_.value());\n\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n  handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(callbacks1, onFailure());\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  connect_or_op_timer_->invokeCallback();\n\n  EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_timeout_.value());\n  EXPECT_EQ(1UL, host_->stats_.rq_timeout_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_rq_active_.value());\n}\n\nTEST_F(RedisClientImplTest, AskRedirection) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    response1->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response1->asString() = \"ASK 1111 10.1.2.3:4321\";\n    // Simulate redirection failure.\n    EXPECT_CALL(callbacks1, onRedirection_(Ref(response1), \"10.1.2.3:4321\", true))\n        .WillOnce(Return(false));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    response2->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response2->asString() = \"ASK 2222 10.1.2.4:4321\";\n    EXPECT_CALL(callbacks2, onRedirection_(Ref(response2), \"10.1.2.4:4321\", true))\n        .WillOnce(Return(true));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n\n    EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, MovedRedirection) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    response1->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response1->asString() = \"MOVED 1111 10.1.2.3:4321\";\n    // Simulate redirection failure.\n    EXPECT_CALL(callbacks1, onRedirection_(Ref(response1), \"10.1.2.3:4321\", false))\n        .WillOnce(Return(false));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    response2->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response2->asString() = \"MOVED 2222 10.1.2.4:4321\";\n    EXPECT_CALL(callbacks2, onRedirection_(Ref(response2), \"10.1.2.4:4321\", false))\n        .WillOnce(Return(true));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n\n    EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, RedirectionFailure) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n\n    // Test an error that looks like it might be a MOVED or ASK redirection error except for the\n    // first non-whitespace substring.\n    Common::Redis::RespValuePtr response1{new Common::Redis::RespValue()};\n    response1->type(Common::Redis::RespType::Error);\n    response1->asString() = \"NOTMOVEDORASK 1111 1.1.1.1:1\";\n\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n\n    // Test a truncated MOVED error response that cannot be parsed properly.\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    response2->type(Common::Redis::RespType::Error);\n    response2->asString() = \"MOVED 1111\";\n    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, AskRedirectionNotEnabled) {\n  InSequence s;\n\n  setup(std::make_unique<ConfigImpl>(createConnPoolSettings(20, true, false)));\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    response1->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response1->asString() = \"ASK 1111 10.1.2.3:4321\";\n    // Simulate redirection failure.\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    response2->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response2->asString() = \"ASK 2222 10.1.2.4:4321\";\n    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, MovedRedirectionNotEnabled) {\n  InSequence s;\n\n  setup(std::make_unique<ConfigImpl>(createConnPoolSettings(20, true, false)));\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValue request2;\n  MockClientCallbacks callbacks2;\n  EXPECT_CALL(*encoder_, encode(Ref(request2), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);\n  EXPECT_NE(nullptr, handle2);\n\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());\n  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());\n  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InSequence s;\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    response1->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response1->asString() = \"MOVED 1111 10.1.2.3:4321\";\n    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));\n    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response1));\n\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n\n    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n    response2->type(Common::Redis::RespType::Error);\n    // The exact values of the hash slot and IP info are not important.\n    response2->asString() = \"MOVED 2222 10.1.2.4:4321\";\n    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));\n    EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n    EXPECT_CALL(host_->outlier_detector_,\n                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n    callbacks_->onRespValue(std::move(response2));\n\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());\n    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());\n  }));\n  upstream_read_filter_->onData(fake_data, false);\n\n  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  client_->close();\n}\n\nTEST_F(RedisClientImplTest, RemoveFailedHealthCheck) {\n  // This test simulates a health check response signaling traffic should be drained from the host.\n  // As a result, the health checker will close the client in the call back.\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n  // Each call should result in either onResponse or onFailure, never both.\n  EXPECT_CALL(callbacks1, onFailure()).Times(0);\n  EXPECT_CALL(callbacks1, onResponse_(Ref(response1)))\n      .WillOnce(Invoke([&](Common::Redis::RespValuePtr&) {\n        // The health checker might fail the active health check based on the response content, and\n        // result in removing the host and closing the client.\n        client_->close();\n      }));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer()).Times(2);\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));\n  callbacks_->onRespValue(std::move(response1));\n}\n\nTEST_F(RedisClientImplTest, RemoveFailedHost) {\n  // This test simulates a health check request failed due to remote host closing the connection.\n  // As a result the health checker will close the client in the call back.\n  InSequence s;\n\n  setup();\n\n  NiceMock<Network::MockConnectionCallbacks> connection_callbacks;\n  client_->addConnectionCallbacks(connection_callbacks);\n\n  Common::Redis::RespValue request1;\n  MockClientCallbacks callbacks1;\n  EXPECT_CALL(*encoder_, encode(Ref(request1), _));\n  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));\n  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);\n  EXPECT_NE(nullptr, handle1);\n\n  onConnected();\n\n  EXPECT_CALL(host_->outlier_detector_,\n              putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));\n  EXPECT_CALL(callbacks1, onFailure()).WillOnce(Invoke([&]() { client_->close(); }));\n  EXPECT_CALL(*connect_or_op_timer_, disableTimer());\n  EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose));\n  upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST(RedisClientFactoryImplTest, Basic) {\n  ClientFactoryImpl factory;\n  Upstream::MockHost::MockCreateConnectionData conn_info;\n  conn_info.connection_ = new NiceMock<Network::MockClientConnection>();\n  std::shared_ptr<Upstream::MockHost> host(new NiceMock<Upstream::MockHost>());\n  EXPECT_CALL(*host, createConnection_(_, _)).WillOnce(Return(conn_info));\n  NiceMock<Event::MockDispatcher> dispatcher;\n  ConfigImpl config(createConnPoolSettings());\n  Stats::IsolatedStoreImpl stats_;\n  auto redis_command_stats =\n      Common::Redis::RedisCommandStats::createRedisCommandStats(stats_.symbolTable());\n  const std::string auth_username;\n  const std::string auth_password;\n  ClientPtr client = factory.create(host, dispatcher, config, redis_command_stats, stats_,\n                                    auth_username, auth_password);\n  client->close();\n}\n} // namespace Client\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/redis/codec_impl_test.cc",
    "content": "#include <vector>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n\n#include \"extensions/filters/network/common/redis/codec_impl.h\"\n\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::ContainerEq;\nusing testing::InSequence;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nclass RedisRespValueTest : public testing::Test {\npublic:\n  void makeBulkStringArray(RespValue& value, const std::vector<std::string>& strings) {\n    std::vector<RespValue> values(strings.size());\n    for (uint64_t i = 0; i < strings.size(); i++) {\n      values[i].type(RespType::BulkString);\n      values[i].asString() = strings[i];\n    }\n\n    value.type(RespType::Array);\n    value.asArray().swap(values);\n  }\n\n  void makeArray(RespValue& value, const std::vector<RespValue> items) {\n    value.type(RespType::Array);\n    value.asArray().insert(value.asArray().end(), items.begin(), items.end());\n  }\n\n  void verifyMoves(RespValue& value) {\n    RespValue copy = value;\n    RespValue move(std::move(copy));\n    EXPECT_TRUE(value == move);\n\n    RespValue move_assign;\n    move_assign = std::move(move);\n    EXPECT_TRUE(value == move_assign);\n  }\n\n  void validateIterator(RespValue& value, const std::vector<std::string>& strings) {\n    EXPECT_EQ(RespType::CompositeArray, value.type());\n    EXPECT_EQ(value.asCompositeArray().size(), strings.size());\n    std::vector<std::string> values;\n    for (const RespValue& part : value.asCompositeArray()) {\n      values.emplace_back(part.asString());\n    }\n    EXPECT_THAT(values, ContainerEq(strings));\n  }\n};\n\nTEST_F(RedisRespValueTest, EqualityTestingAndCopyingTest) {\n  InSequence s;\n\n  RespValue value1, value2, value3;\n\n  makeBulkStringArray(value1, {\"get\", \"foo\", \"bar\", \"now\"});\n  makeBulkStringArray(value2, {\"get\", \"foo\", \"bar\", \"now\"});\n  makeBulkStringArray(value3, {\"get\", \"foo\", \"bar\", \"later\"});\n\n  EXPECT_TRUE(value1 == value2);\n  EXPECT_FALSE(value1 == value3);\n\n  RespValue value4, value5;\n  value4.type(RespType::Array);\n  value4.asArray() = {value1, value2};\n  value5.type(RespType::Array);\n  value5.asArray() = {value1, value3};\n\n  EXPECT_FALSE(value4 == value5);\n  EXPECT_TRUE(value4 == value4);\n  EXPECT_TRUE(value5 == value5);\n\n  RespValue bulkstring_value, simplestring_value, error_value, integer_value, null_value;\n  bulkstring_value.type(RespType::BulkString);\n  simplestring_value.type(RespType::SimpleString);\n  error_value.type(RespType::Error);\n  integer_value.type(RespType::Integer);\n  integer_value.asInteger() = 123;\n\n  EXPECT_NE(bulkstring_value, simplestring_value);\n  EXPECT_NE(bulkstring_value, error_value);\n  EXPECT_NE(bulkstring_value, integer_value);\n  EXPECT_NE(bulkstring_value, null_value);\n\n  RespValue value6, value7, value8;\n  makeArray(value6,\n            {bulkstring_value, simplestring_value, error_value, integer_value, null_value, value1});\n  makeArray(value7,\n            {bulkstring_value, simplestring_value, error_value, integer_value, null_value, value2});\n  makeArray(value8,\n            {bulkstring_value, simplestring_value, error_value, integer_value, null_value, value3});\n\n  // This may look weird, but it is a way to actually do self-assignment without generating compiler\n  // warnings. Self-assignment should succeed without changing the RespValue, and therefore no\n  // expectations should change.\n  RespValue* value6_ptr = &value6;\n  value6 = *value6_ptr;\n  EXPECT_EQ(value6, value7);\n  EXPECT_NE(value6, value8);\n  EXPECT_NE(value7, value8);\n  EXPECT_EQ(value6.asArray()[5].asArray()[3].asString(), \"now\");\n  EXPECT_EQ(value7.asArray()[5].asArray()[3].asString(), \"now\");\n  EXPECT_EQ(value8.asArray()[5].asArray()[3].asString(), \"later\");\n\n  value8 = value1;\n  EXPECT_EQ(value8.type(), RespType::Array);\n  EXPECT_EQ(value8.asArray().size(), value1.asArray().size());\n  EXPECT_EQ(value8.asArray().size(), 4);\n  for (unsigned int i = 0; i < value8.asArray().size(); i++) {\n    EXPECT_EQ(value8.asArray()[i].type(), RespType::BulkString);\n    EXPECT_EQ(value8.asArray()[i].asString(), value1.asArray()[i].asString());\n  }\n  value7 = value1;\n  EXPECT_EQ(value7, value8);\n  value7 = value3;\n  EXPECT_NE(value7, value8);\n\n  value8 = bulkstring_value;\n  EXPECT_EQ(value8.type(), RespType::BulkString);\n  value8 = simplestring_value;\n  EXPECT_EQ(value8.type(), RespType::SimpleString);\n  value8 = error_value;\n  EXPECT_EQ(value8.type(), RespType::Error);\n  value8 = integer_value;\n  EXPECT_EQ(value8.type(), RespType::Integer);\n  value8 = null_value;\n  EXPECT_EQ(value8.type(), RespType::Null);\n}\n\nTEST_F(RedisRespValueTest, MoveOperationsTest) {\n  InSequence s;\n\n  RespValue array_value, bulkstring_value, simplestring_value, error_value, integer_value,\n      null_value, composite_array_empty;\n  makeBulkStringArray(array_value, {\"get\", \"foo\", \"bar\", \"now\"});\n  bulkstring_value.type(RespType::BulkString);\n  bulkstring_value.asString() = \"foo\";\n  simplestring_value.type(RespType::SimpleString);\n  simplestring_value.asString() = \"bar\";\n  error_value.type(RespType::Error);\n  error_value.asString() = \"error\";\n  integer_value.type(RespType::Integer);\n  integer_value.asInteger() = 123;\n  composite_array_empty.type(RespType::CompositeArray);\n\n  verifyMoves(array_value);\n  verifyMoves(bulkstring_value);\n  verifyMoves(simplestring_value);\n  verifyMoves(error_value);\n  verifyMoves(integer_value);\n  verifyMoves(null_value);\n  verifyMoves(composite_array_empty);\n}\n\nTEST_F(RedisRespValueTest, SwapTest) {\n  InSequence s;\n\n  RespValue value1, value2, value3;\n\n  makeBulkStringArray(value1, {\"get\", \"foo\", \"bar\", \"now\"});\n  makeBulkStringArray(value2, {\"get\", \"foo\", \"bar\", \"now\"});\n  makeBulkStringArray(value3, {\"get\", \"foo\", \"bar\", \"later\"});\n\n  std::swap(value2, value3);\n  EXPECT_TRUE(value1 == value3);\n\n  std::swap(value3, value3);\n  EXPECT_TRUE(value1 == value3);\n}\n\nTEST_F(RedisRespValueTest, CompositeArrayTest) {\n  InSequence s;\n\n  RespValueSharedPtr base = std::make_shared<RespValue>();\n  makeBulkStringArray(*base, {\"get\", \"foo\", \"bar\", \"now\"});\n\n  RespValue command;\n  command.type(RespType::SimpleString);\n  command.asString() = \"get\";\n\n  RespValue value1{base, command, 1, 1};\n  RespValue value2{base, command, 2, 2};\n  RespValue value3{base, command, 3, 3};\n\n  validateIterator(value1, {\"get\", \"foo\"});\n  validateIterator(value2, {\"get\", \"bar\"});\n  validateIterator(value3, {\"get\", \"now\"});\n\n  EXPECT_EQ(value1.asCompositeArray().command(), &command);\n  EXPECT_EQ(value1.asCompositeArray().baseArray(), base);\n\n  RespValue value4{base, command, 1, 1};\n  EXPECT_TRUE(value1 == value1);\n  EXPECT_FALSE(value1 == value2);\n  EXPECT_FALSE(value1 == value3);\n  EXPECT_TRUE(value1 == value4);\n\n  RespValue value5;\n  value5 = value1;\n  EXPECT_TRUE(value1 == value5);\n\n  RespValue empty;\n  empty.type(RespType::CompositeArray);\n  validateIterator(empty, {});\n}\n\nclass RedisEncoderDecoderImplTest : public testing::Test, public DecoderCallbacks {\npublic:\n  RedisEncoderDecoderImplTest() : decoder_(*this) {}\n\n  // RedisProxy::DecoderCallbacks\n  void onRespValue(RespValuePtr&& value) override {\n    decoded_values_.emplace_back(std::move(value));\n  }\n\n  EncoderImpl encoder_;\n  DecoderImpl decoder_;\n  Buffer::OwnedImpl buffer_;\n  std::vector<RespValuePtr> decoded_values_;\n};\n\nTEST_F(RedisEncoderDecoderImplTest, Null) {\n  RespValue value;\n  EXPECT_EQ(\"null\", value.toString());\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\"$-1\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, Error) {\n  RespValue value;\n  value.type(RespType::Error);\n  value.asString() = \"error\";\n  EXPECT_EQ(\"\\\"error\\\"\", value.toString());\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\"-error\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, SimpleString) {\n  RespValue value;\n  value.type(RespType::SimpleString);\n  value.asString() = \"simple string\";\n  EXPECT_EQ(\"\\\"simple string\\\"\", value.toString());\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\"+simple string\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, BulkString) {\n  RespValue value;\n  value.type(RespType::BulkString);\n  value.asString() = \"bulk string\";\n  EXPECT_EQ(\"\\\"bulk string\\\"\", value.toString());\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\"$11\\r\\nbulk string\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, Integer) {\n  RespValue value;\n  value.type(RespType::Integer);\n  value.asInteger() = std::numeric_limits<int64_t>::max();\n  EXPECT_EQ(\"9223372036854775807\", value.toString());\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\":9223372036854775807\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, NegativeIntegerSmall) {\n  RespValue value;\n  value.type(RespType::Integer);\n  value.asInteger() = -1;\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\":-1\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, NegativeIntegerLarge) {\n  RespValue value;\n  value.type(RespType::Integer);\n  value.asInteger() = std::numeric_limits<int64_t>::min();\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\":-9223372036854775808\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, EmptyArray) {\n  RespValue value;\n  value.type(RespType::Array);\n  EXPECT_EQ(\"[]\", value.toString());\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\"*0\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, Array) {\n  std::vector<RespValue> values(2);\n  values[0].type(RespType::BulkString);\n  values[0].asString() = \"hello\";\n  values[1].type(RespType::Integer);\n  values[1].asInteger() = -5;\n\n  RespValue value;\n  value.type(RespType::Array);\n  value.asArray().swap(values);\n  EXPECT_EQ(\"[\\\"hello\\\", -5]\", value.toString());\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\"*2\\r\\n$5\\r\\nhello\\r\\n:-5\\r\\n\", buffer_.toString());\n  decoder_.decode(buffer_);\n  EXPECT_EQ(value, *decoded_values_[0]);\n  EXPECT_EQ(0UL, buffer_.length());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, CompositeArray) {\n  std::vector<RespValue> values(2);\n  values[0].type(RespType::BulkString);\n  values[0].asString() = \"bar\";\n  values[1].type(RespType::BulkString);\n  values[1].asString() = \"foo\";\n\n  auto base = std::make_shared<RespValue>();\n  base->type(RespType::Array);\n  base->asArray().swap(values);\n\n  RespValue command;\n  command.type(RespType::SimpleString);\n  command.asString() = \"get\";\n\n  RespValue value1{base, command, 0, 0};\n  RespValue value2{base, command, 1, 1};\n\n  EXPECT_EQ(\"[\\\"get\\\", \\\"bar\\\"]\", value1.toString());\n  encoder_.encode(value1, buffer_);\n  EXPECT_EQ(\"*2\\r\\n+get\\r\\n$3\\r\\nbar\\r\\n\", buffer_.toString());\n\n  EXPECT_EQ(\"[\\\"get\\\", \\\"foo\\\"]\", value2.toString());\n  encoder_.encode(value2, buffer_);\n  EXPECT_EQ(\"*2\\r\\n+get\\r\\n$3\\r\\nbar\\r\\n*2\\r\\n+get\\r\\n$3\\r\\nfoo\\r\\n\", buffer_.toString());\n\n  // There is no decoder for composite array\n}\n\nTEST_F(RedisEncoderDecoderImplTest, NestedArray) {\n  std::vector<RespValue> nested_values(3);\n  nested_values[0].type(RespType::BulkString);\n  nested_values[0].asString() = \"hello\";\n  nested_values[1].type(RespType::Integer);\n  nested_values[1].asInteger() = 0;\n\n  std::vector<RespValue> values(2);\n  values[0].type(RespType::Array);\n  values[0].asArray().swap(nested_values);\n  values[1].type(RespType::BulkString);\n  values[1].asString() = \"world\";\n\n  RespValue value;\n  value.type(RespType::Array);\n  value.asArray().swap(values);\n  encoder_.encode(value, buffer_);\n  EXPECT_EQ(\"*2\\r\\n*3\\r\\n$5\\r\\nhello\\r\\n:0\\r\\n$-1\\r\\n$5\\r\\nworld\\r\\n\", buffer_.toString());\n\n  // To test partial decode we will feed the buffer in 1 char at a time.\n  for (char c : buffer_.toString()) {\n    Buffer::OwnedImpl temp_buffer(&c, 1);\n    decoder_.decode(temp_buffer);\n    EXPECT_EQ(0UL, temp_buffer.length());\n  }\n\n  EXPECT_EQ(value, *decoded_values_[0]);\n}\n\nTEST_F(RedisEncoderDecoderImplTest, NullArray) {\n  buffer_.add(\"*-1\\r\\n\");\n  decoder_.decode(buffer_);\n  EXPECT_EQ(RespType::Null, decoded_values_[0]->type());\n}\n\nTEST_F(RedisEncoderDecoderImplTest, InvalidType) {\n  buffer_.add(\"^\");\n  EXPECT_THROW(decoder_.decode(buffer_), ProtocolError);\n}\n\nTEST_F(RedisEncoderDecoderImplTest, InvalidInteger) {\n  buffer_.add(\":-a\");\n  EXPECT_THROW(decoder_.decode(buffer_), ProtocolError);\n}\n\nTEST_F(RedisEncoderDecoderImplTest, InvalidIntegerExpectLF) {\n  buffer_.add(\":-123\\ra\");\n  EXPECT_THROW(decoder_.decode(buffer_), ProtocolError);\n}\n\nTEST_F(RedisEncoderDecoderImplTest, InvalidBulkStringExpectCR) {\n  buffer_.add(\"$1\\r\\nab\");\n  EXPECT_THROW(decoder_.decode(buffer_), ProtocolError);\n}\n\nTEST_F(RedisEncoderDecoderImplTest, InvalidBulkStringExpectLF) {\n  buffer_.add(\"$1\\r\\na\\ra\");\n  EXPECT_THROW(decoder_.decode(buffer_), ProtocolError);\n}\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/redis/fault_test.cc",
    "content": "#include \"envoy/common/random_generator.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"extensions/filters/network/common/redis/fault_impl.h\"\n\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nusing RedisProxy = envoy::extensions::filters::network::redis_proxy::v3::RedisProxy;\nusing FractionalPercent = envoy::type::v3::FractionalPercent;\nclass FaultTest : public testing::Test {\npublic:\n  const std::string RUNTIME_KEY = \"runtime_key\";\n\n  void\n  createCommandFault(RedisProxy::RedisFault* fault, std::string command_str, int delay_seconds,\n                     absl::optional<int> fault_percentage,\n                     absl::optional<envoy::type::v3::FractionalPercent_DenominatorType> denominator,\n                     absl::optional<std::string> runtime_key) {\n    // We don't set fault type as it isn't used in the test\n\n    auto* commands = fault->mutable_commands();\n    auto* command = commands->Add();\n    command->assign(command_str);\n\n    fault->set_fault_type(envoy::extensions::filters::network::redis_proxy::v3::\n                              RedisProxy_RedisFault_RedisFaultType_ERROR);\n\n    addFaultPercentage(fault, fault_percentage, denominator, runtime_key);\n    addDelay(fault, delay_seconds);\n  }\n\n  void\n  createAllKeyFault(RedisProxy::RedisFault* fault, int delay_seconds,\n                    absl::optional<int> fault_percentage,\n                    absl::optional<envoy::type::v3::FractionalPercent_DenominatorType> denominator,\n                    absl::optional<std::string> runtime_key) {\n    addFaultPercentage(fault, fault_percentage, denominator, runtime_key);\n    addDelay(fault, delay_seconds);\n  }\n\n  void\n  addFaultPercentage(RedisProxy::RedisFault* fault, absl::optional<int> fault_percentage,\n                     absl::optional<envoy::type::v3::FractionalPercent_DenominatorType> denominator,\n                     absl::optional<std::string> runtime_key) {\n    envoy::config::core::v3::RuntimeFractionalPercent* fault_enabled =\n        fault->mutable_fault_enabled();\n\n    if (runtime_key.has_value()) {\n      fault_enabled->set_runtime_key(runtime_key.value());\n    }\n    auto* percentage = fault_enabled->mutable_default_value();\n    if (fault_percentage.has_value()) {\n      percentage->set_numerator(fault_percentage.value());\n    }\n    if (denominator.has_value()) {\n      percentage->set_denominator(denominator.value());\n    }\n  }\n\n  void addDelay(RedisProxy::RedisFault* fault, int delay_seconds) {\n    std::chrono::seconds duration = std::chrono::seconds(delay_seconds);\n    fault->mutable_delay()->set_seconds(duration.count());\n  }\n\n  testing::NiceMock<Random::MockRandomGenerator> random_;\n  testing::NiceMock<Runtime::MockLoader> runtime_;\n};\n\nTEST_F(FaultTest, MakeFaultForTestHelper) {\n  Common::Redis::FaultSharedPtr fault_ptr =\n      FaultManagerImpl::makeFaultForTest(FaultType::Error, std::chrono::milliseconds(10));\n\n  ASSERT_TRUE(fault_ptr->faultType() == FaultType::Error);\n  ASSERT_TRUE(fault_ptr->delayMs() == std::chrono::milliseconds(10));\n}\n\nTEST_F(FaultTest, NoFaults) {\n  RedisProxy redis_config;\n  auto* faults = redis_config.mutable_faults();\n\n  TestScopedRuntime scoped_runtime;\n  FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults);\n\n  const Fault* fault_ptr = fault_manager.getFaultForCommand(\"get\");\n  ASSERT_TRUE(fault_ptr == nullptr);\n}\n\nTEST_F(FaultTest, SingleCommandFaultNotEnabled) {\n  RedisProxy redis_config;\n  auto* faults = redis_config.mutable_faults();\n  createCommandFault(faults->Add(), \"get\", 0, 0, FractionalPercent::HUNDRED, RUNTIME_KEY);\n\n  TestScopedRuntime scoped_runtime;\n  FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults);\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(0));\n  EXPECT_CALL(runtime_, snapshot());\n  const Fault* fault_ptr = fault_manager.getFaultForCommand(\"get\");\n  ASSERT_TRUE(fault_ptr == nullptr);\n}\n\nTEST_F(FaultTest, SingleCommandFault) {\n  // Inject a single fault. Notably we use a different denominator to test that code path; normally\n  // we use FractionalPercent::HUNDRED.\n  RedisProxy redis_config;\n  auto* faults = redis_config.mutable_faults();\n  createCommandFault(faults->Add(), \"ttl\", 0, 5000, FractionalPercent::TEN_THOUSAND, RUNTIME_KEY);\n\n  TestScopedRuntime scoped_runtime;\n  FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults);\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(1));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(RUNTIME_KEY, 50)).WillOnce(Return(10));\n\n  const Fault* fault_ptr = fault_manager.getFaultForCommand(\"ttl\");\n  ASSERT_TRUE(fault_ptr != nullptr);\n}\n\nTEST_F(FaultTest, SingleCommandFaultWithNoDefaultValueOrRuntimeValue) {\n  // Inject a single fault with no default value or runtime value.\n  RedisProxy redis_config;\n  auto* faults = redis_config.mutable_faults();\n  createCommandFault(faults->Add(), \"ttl\", 0, absl::nullopt, absl::nullopt, absl::nullopt);\n\n  TestScopedRuntime scoped_runtime;\n  FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults);\n\n  EXPECT_CALL(random_, random()).WillOnce(Return(1));\n  const Fault* fault_ptr = fault_manager.getFaultForCommand(\"ttl\");\n  ASSERT_TRUE(fault_ptr == nullptr);\n}\n\nTEST_F(FaultTest, MultipleFaults) {\n  // This creates 2 faults, but the map will have 3 entries, as each command points to\n  // command specific faults AND the general fault. The second fault has no runtime key,\n  // forcing the runtime key check to be false in application code and falling back to the\n  // default value.\n  RedisProxy redis_config;\n  auto* faults = redis_config.mutable_faults();\n  createCommandFault(faults->Add(), \"get\", 0, 25, FractionalPercent::HUNDRED, RUNTIME_KEY);\n  createAllKeyFault(faults->Add(), 2, 25, FractionalPercent::HUNDRED, absl::nullopt);\n\n  TestScopedRuntime scoped_runtime;\n  FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults);\n  const Fault* fault_ptr;\n\n  // Get command - should have a fault 50% of time\n  // For the first call we mock the random percentage to be 10%, which will give us the first fault\n  // with 0s delay.\n  EXPECT_CALL(random_, random()).WillOnce(Return(1));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, 25)).WillOnce(Return(10));\n  fault_ptr = fault_manager.getFaultForCommand(\"get\");\n  ASSERT_TRUE(fault_ptr != nullptr);\n  ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(0));\n\n  // Another Get; we mock the random percentage to be 25%, giving us the ALL_KEY fault\n  EXPECT_CALL(random_, random()).WillOnce(Return(25));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, _))\n      .Times(2)\n      .WillOnce(Return(10))\n      .WillOnce(Return(50));\n  fault_ptr = fault_manager.getFaultForCommand(\"get\");\n  ASSERT_TRUE(fault_ptr != nullptr);\n  ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(2000));\n\n  // No fault for Get command with mocked random percentage >= 50%.\n  EXPECT_CALL(random_, random()).WillOnce(Return(50));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)).Times(2);\n  fault_ptr = fault_manager.getFaultForCommand(\"get\");\n  ASSERT_TRUE(fault_ptr == nullptr);\n\n  // Any other command; we mock the random percentage to be 1%, giving us the ALL_KEY fault\n  EXPECT_CALL(random_, random()).WillOnce(Return(1));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)).WillOnce(Return(10));\n\n  fault_ptr = fault_manager.getFaultForCommand(\"ttl\");\n  ASSERT_TRUE(fault_ptr != nullptr);\n  ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(2000));\n\n  // No fault for any other command with mocked random percentage >= 25%.\n  EXPECT_CALL(random_, random()).WillOnce(Return(25));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(_, _));\n  fault_ptr = fault_manager.getFaultForCommand(\"ttl\");\n  ASSERT_TRUE(fault_ptr == nullptr);\n}\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/redis/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include <cstdint>\n\n#include \"common/common/assert.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\nvoid PrintTo(const RespValue& value, std::ostream* os) { *os << value.toString(); }\n\nvoid PrintTo(const RespValuePtr& value, std::ostream* os) { *os << value->toString(); }\n\nMockEncoder::MockEncoder() {\n  ON_CALL(*this, encode(_, _))\n      .WillByDefault(\n          Invoke([this](const Common::Redis::RespValue& value, Buffer::Instance& out) -> void {\n            real_encoder_.encode(value, out);\n          }));\n}\n\nMockEncoder::~MockEncoder() = default;\n\nMockDecoder::MockDecoder() = default;\nMockDecoder::~MockDecoder() = default;\n\nnamespace Client {\n\nMockClient::MockClient() {\n  ON_CALL(*this, addConnectionCallbacks(_))\n      .WillByDefault(Invoke([this](Network::ConnectionCallbacks& callbacks) -> void {\n        callbacks_.push_back(&callbacks);\n      }));\n  ON_CALL(*this, close()).WillByDefault(Invoke([this]() -> void {\n    raiseEvent(Network::ConnectionEvent::LocalClose);\n  }));\n}\n\nMockClient::~MockClient() = default;\n\nMockPoolRequest::MockPoolRequest() = default;\nMockPoolRequest::~MockPoolRequest() = default;\n\nMockClientCallbacks::MockClientCallbacks() = default;\nMockClientCallbacks::~MockClientCallbacks() = default;\n\n} // namespace Client\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/redis/mocks.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/codec_impl.h\"\n\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\n\n/**\n * Pretty print const RespValue& value\n */\n\nvoid PrintTo(const RespValue& value, std::ostream* os);\nvoid PrintTo(const RespValuePtr& value, std::ostream* os);\n\nclass MockEncoder : public Common::Redis::Encoder {\npublic:\n  MockEncoder();\n  ~MockEncoder() override;\n\n  MOCK_METHOD(void, encode, (const Common::Redis::RespValue& value, Buffer::Instance& out));\n\nprivate:\n  Common::Redis::EncoderImpl real_encoder_;\n};\n\nclass MockDecoder : public Common::Redis::Decoder {\npublic:\n  MockDecoder();\n  ~MockDecoder() override;\n\n  MOCK_METHOD(void, decode, (Buffer::Instance & data));\n};\n\nnamespace Client {\n\nclass MockPoolRequest : public PoolRequest {\npublic:\n  MockPoolRequest();\n  ~MockPoolRequest() override;\n\n  MOCK_METHOD(void, cancel, ());\n};\n\nclass MockClient : public Client {\npublic:\n  MockClient();\n  ~MockClient() override;\n\n  void raiseEvent(Network::ConnectionEvent event) {\n    for (Network::ConnectionCallbacks* callbacks : callbacks_) {\n      callbacks->onEvent(event);\n    }\n  }\n\n  void runHighWatermarkCallbacks() {\n    for (auto* callback : callbacks_) {\n      callback->onAboveWriteBufferHighWatermark();\n    }\n  }\n\n  void runLowWatermarkCallbacks() {\n    for (auto* callback : callbacks_) {\n      callback->onBelowWriteBufferLowWatermark();\n    }\n  }\n\n  PoolRequest* makeRequest(const Common::Redis::RespValue& request,\n                           ClientCallbacks& callbacks) override {\n    client_callbacks_.push_back(&callbacks);\n    return makeRequest_(request, callbacks);\n  }\n\n  MOCK_METHOD(void, addConnectionCallbacks, (Network::ConnectionCallbacks & callbacks));\n  MOCK_METHOD(bool, active, ());\n  MOCK_METHOD(void, close, ());\n  MOCK_METHOD(PoolRequest*, makeRequest_,\n              (const Common::Redis::RespValue& request, ClientCallbacks& callbacks));\n  MOCK_METHOD(void, initialize, (const std::string& username, const std::string& password));\n\n  std::list<Network::ConnectionCallbacks*> callbacks_;\n  std::list<ClientCallbacks*> client_callbacks_;\n};\n\nclass MockClientCallbacks : public ClientCallbacks {\npublic:\n  MockClientCallbacks();\n  ~MockClientCallbacks() override;\n\n  void onResponse(Common::Redis::RespValuePtr&& value) override { onResponse_(value); }\n  bool onRedirection(Common::Redis::RespValuePtr&& value, const std::string& host_address,\n                     bool ask_redirection) override {\n    return onRedirection_(value, host_address, ask_redirection);\n  }\n\n  MOCK_METHOD(void, onResponse_, (Common::Redis::RespValuePtr & value));\n  MOCK_METHOD(void, onFailure, ());\n  MOCK_METHOD(bool, onRedirection_,\n              (Common::Redis::RespValuePtr & value, const std::string& host_address,\n               bool ask_redirection));\n};\n\n} // namespace Client\n\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/redis/test_utils.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace Redis {\nnamespace Client {\n\ninline envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings\ncreateConnPoolSettings(\n    int64_t millis = 20, bool hashtagging = true, bool redirection_support = true,\n    uint32_t max_unknown_conns = 100,\n    envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ReadPolicy\n        read_policy = envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::\n            ConnPoolSettings::MASTER) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings setting{};\n  setting.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(millis));\n  setting.set_enable_hashtagging(hashtagging);\n  setting.set_enable_redirection(redirection_support);\n  setting.mutable_max_upstream_unknown_connections()->set_value(max_unknown_conns);\n  setting.set_read_policy(read_policy);\n  return setting;\n}\n\n} // namespace Client\n} // namespace Redis\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/common/utility_test.cc",
    "content": "#include \"extensions/filters/network/common/utility.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Common {\nnamespace {\n\n// Test that canonical (or unknown) names are returned unmodified.\nTEST(FilterNameUtilTest, TestIgnoreCanonicalName) {\n  NiceMock<Runtime::MockLoader> runtime;\n\n  EXPECT_EQ(NetworkFilterNames::get().RedisProxy,\n            FilterNameUtil::canonicalFilterName(NetworkFilterNames::get().RedisProxy, &runtime));\n  EXPECT_EQ(\"canonical.name\", FilterNameUtil::canonicalFilterName(\"canonical.name\", &runtime));\n}\n\n// Test that deprecated names are canonicalized.\nTEST(FilterNameUtilTest, DEPRECATED_FEATURE_TEST(TestDeprecatedName)) {\n  NiceMock<Runtime::MockLoader> runtime;\n\n  EXPECT_CALL(\n      runtime.snapshot_,\n      deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n      .WillRepeatedly(Return(true));\n\n  EXPECT_EQ(NetworkFilterNames::get().RedisProxy,\n            FilterNameUtil::canonicalFilterName(\"envoy.redis_proxy\", &runtime));\n}\n\n// Test that deprecated names trigger an exception if the deprecated name feature is disabled.\nTEST(FilterNameUtilTest, TestDeprecatedNameThrows) {\n  NiceMock<Runtime::MockLoader> runtime;\n\n  EXPECT_CALL(\n      runtime.snapshot_,\n      deprecatedFeatureEnabled(\"envoy.deprecated_features.allow_deprecated_extension_names\", _))\n      .WillRepeatedly(Return(false));\n\n  EXPECT_THROW_WITH_REGEX(FilterNameUtil::canonicalFilterName(\"envoy.redis_proxy\", &runtime),\n                          EnvoyException,\n                          \"Using deprecated network filter extension name 'envoy.redis_proxy' .*\");\n}\n\n} // namespace\n} // namespace Common\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/direct_response/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"direct_response_integration_test\",\n    srcs = [\n        \"direct_response_integration_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.direct_response\",\n    deps = [\n        \"//source/extensions/filters/network/direct_response:config\",\n        \"//test/integration:integration_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"direct_response_test\",\n    srcs = [\"direct_response_test.cc\"],\n    extension_name = \"envoy.filters.network.direct_response\",\n    deps = [\n        \"//source/extensions/filters/network/direct_response:filter\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/direct_response/direct_response_integration_test.cc",
    "content": "#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\n\nclass DirectResponseIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                      public BaseIntegrationTest {\npublic:\n  DirectResponseIntegrationTest() : BaseIntegrationTest(GetParam(), directResponseConfig()) {}\n\n  static std::string directResponseConfig() {\n    return absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n      - name: direct_response\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.direct_response.v3.Config\n          response:\n            inline_string: \"hello, world!\\n\"\n      )EOF\");\n  }\n\n  void SetUp() override {\n    useListenerAccessLog(\"%RESPONSE_CODE_DETAILS%\");\n    BaseIntegrationTest::initialize();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, DirectResponseIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(DirectResponseIntegrationTest, DirectResponseOnConnection) {\n  std::string response;\n  // This test becomes flaky (especially on Windows) if the connection is closed by the server\n  // before the client finishes transmitting the data it writes (resulting in a connection aborted\n  // error when the client reads). Instead, we just initiate the connection and do not send from\n  // the client to avoid this.\n  auto connection = createConnectionDriver(\n      lookupPort(\"listener_0\"), \"\",\n      [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n        conn.close(Network::ConnectionCloseType::FlushWrite);\n      });\n  connection->run();\n  EXPECT_EQ(\"hello, world!\\n\", response);\n  EXPECT_THAT(waitForAccessLog(listener_access_log_name_),\n              testing::HasSubstr(StreamInfo::ResponseCodeDetails::get().DirectResponse));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/direct_response/direct_response_test.cc",
    "content": "#include \"envoy/extensions/filters/network/direct_response/v3/config.pb.validate.h\"\n\n#include \"extensions/filters/network/direct_response/filter.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DirectResponse {\n\nclass DirectResponseFilterTest : public testing::Test {\npublic:\n  void initialize(const std::string& response) {\n    EXPECT_CALL(read_filter_callbacks_.connection_, enableHalfClose(true));\n    filter_ = std::make_shared<DirectResponseFilter>(response);\n    filter_->initializeReadFilterCallbacks(read_filter_callbacks_);\n  }\n  std::shared_ptr<DirectResponseFilter> filter_;\n  NiceMock<Network::MockReadFilterCallbacks> read_filter_callbacks_;\n};\n\n// Test the filter's onNewConnection() with a non-empty response\nTEST_F(DirectResponseFilterTest, OnNewConnection) {\n  initialize(\"hello\");\n  Buffer::OwnedImpl response(\"hello\");\n  EXPECT_CALL(read_filter_callbacks_.connection_, write(BufferEqual(&response), true));\n  EXPECT_CALL(read_filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  EXPECT_CALL(read_filter_callbacks_.connection_.stream_info_,\n              setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().DirectResponse));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n}\n\n// Test the filter's onNewConnection() with an empty response\nTEST_F(DirectResponseFilterTest, OnNewConnectionEmptyResponse) {\n  initialize(\"\");\n  EXPECT_CALL(read_filter_callbacks_.connection_, write(_, _)).Times(0);\n  EXPECT_CALL(read_filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  EXPECT_CALL(read_filter_callbacks_.connection_.stream_info_,\n              setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().DirectResponse));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n}\n\n// Test the filter's onData()\nTEST_F(DirectResponseFilterTest, OnData) {\n  initialize(\"hello\");\n  Buffer::OwnedImpl data(\"data\");\n  EXPECT_CALL(read_filter_callbacks_.connection_, write(_, _)).Times(0);\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n}\n\n} // namespace DirectResponse\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"mocks_lib\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:decoder_events_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:decoder_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:protocol_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy:serializer_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:factory_base_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:filter_interface\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:router_interface\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:printers_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:byte_order_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"hessian_utils_test\",\n    srcs = [\"hessian_utils_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:byte_order_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:hessian_utils_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"dubbo_protocol_impl_test\",\n    srcs = [\"dubbo_protocol_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib\",\n        \"//test/mocks/server:server_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"dubbo_hessian2_serializer_impl_test\",\n    srcs = [\"dubbo_hessian2_serializer_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_hessian2_serializer_impl_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:hessian_utils_lib\",\n        \"//test/mocks/server:server_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:config\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:filter_config_interface\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"metadata_test\",\n    srcs = [\"metadata_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:serializer_interface\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"route_matcher_test\",\n    srcs = [\"route_matcher_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:route_matcher\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"router_test\",\n    srcs = [\"router_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:app_exception_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_hessian2_serializer_impl_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:registry_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"app_exception_test\",\n    srcs = [\"app_exception_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:app_exception_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_hessian2_serializer_impl_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:hessian_utils_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"router_filter_config_test\",\n    srcs = [\"router_filter_config_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy/filters:well_known_names\",\n        \"//source/extensions/filters/network/dubbo_proxy/router:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"decoder_test\",\n    srcs = [\"decoder_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:decoder_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:metadata_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"conn_manager_test\",\n    srcs = [\"conn_manager_test.cc\"],\n    extension_name = \"envoy.filters.network.dubbo_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:config\",\n        \"//source/extensions/filters/network/dubbo_proxy:conn_manager_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_hessian2_serializer_impl_lib\",\n        \"//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/app_exception_test.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/app_exception.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/hessian_utils.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass AppExceptionTest : public testing::Test {\npublic:\n  AppExceptionTest() : metadata_(std::make_shared<MessageMetadata>()) {\n    protocol_.initSerializer(SerializationType::Hessian2);\n  }\n\n  DubboProtocolImpl protocol_;\n  MessageMetadataSharedPtr metadata_;\n};\n\nTEST_F(AppExceptionTest, Encode) {\n  std::string mock_message(\"invalid method name 'Sub'\");\n  AppException app_exception(ResponseStatus::ServiceNotFound, mock_message);\n\n  Buffer::OwnedImpl buffer;\n  size_t expect_body_size =\n      HessianUtils::writeString(buffer, mock_message) +\n      HessianUtils::writeInt(buffer, static_cast<uint8_t>(app_exception.response_type_));\n  buffer.drain(buffer.length());\n\n  metadata_->setSerializationType(SerializationType::Hessian2);\n  metadata_->setRequestId(0);\n\n  EXPECT_EQ(app_exception.encode(*(metadata_.get()), protocol_, buffer),\n            DubboFilters::DirectResponse::ResponseType::Exception);\n  MessageMetadataSharedPtr metadata = std::make_shared<MessageMetadata>();\n  auto result = protocol_.decodeHeader(buffer, metadata);\n  EXPECT_TRUE(result.second);\n\n  const ContextImpl* context = static_cast<const ContextImpl*>(result.first.get());\n  EXPECT_EQ(expect_body_size, context->bodySize());\n  EXPECT_EQ(metadata->messageType(), MessageType::Response);\n  buffer.drain(context->headerSize());\n\n  // Verify the response type and content.\n  size_t hessian_int_size;\n  int type_value = HessianUtils::peekInt(buffer, &hessian_int_size);\n  EXPECT_EQ(static_cast<uint8_t>(app_exception.response_type_), static_cast<uint8_t>(type_value));\n\n  size_t hessian_string_size;\n  std::string message = HessianUtils::peekString(buffer, &hessian_string_size, sizeof(uint8_t));\n  EXPECT_EQ(mock_message, message);\n\n  EXPECT_EQ(buffer.length(), hessian_int_size + hessian_string_size);\n\n  auto rpc_result = protocol_.serializer()->deserializeRpcResult(buffer, result.first);\n  EXPECT_TRUE(rpc_result.second);\n  EXPECT_TRUE(rpc_result.first->hasException());\n  buffer.drain(buffer.length());\n\n  AppException new_app_exception(app_exception);\n  EXPECT_EQ(new_app_exception.status_, ResponseStatus::ServiceNotFound);\n\n  MockProtocol mock_protocol;\n  EXPECT_CALL(mock_protocol, encode(_, _, _, _)).WillOnce(Return(false));\n  EXPECT_THROW(app_exception.encode(*(metadata_.get()), mock_protocol, buffer), EnvoyException);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/config.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/filter_config.h\"\n\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nusing DubboProxyProto = envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy;\n\nnamespace {\n\nDubboProxyProto parseDubboProxyFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) {\n  DubboProxyProto dubbo_proxy;\n  TestUtility::loadFromYaml(yaml, dubbo_proxy, false, avoid_boosting);\n  return dubbo_proxy;\n}\n\n} // namespace\n\nclass DubboFilterConfigTestBase {\npublic:\n  void testConfig(DubboProxyProto& config) {\n    Network::FilterFactoryCb cb;\n    EXPECT_NO_THROW({ cb = factory_.createFilterFactoryFromProto(config, context_); });\n\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, addReadFilter(_));\n    cb(connection);\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  DubboProxyFilterConfigFactory factory_;\n};\n\nclass DubboFilterConfigTest : public DubboFilterConfigTestBase, public testing::Test {};\n\nTEST_F(DubboFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(DubboProxyFilterConfigFactory().createFilterFactoryFromProto(\n                   envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy(), context),\n               ProtoValidationException);\n}\n\nTEST_F(DubboFilterConfigTest, ValidProtoConfiguration) {\n  envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy config{};\n\n  config.set_stat_prefix(\"my_stat_prefix\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  DubboProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  EXPECT_TRUE(factory.isTerminalFilter());\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST_F(DubboFilterConfigTest, DubboProxyWithEmptyProto) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  DubboProxyFilterConfigFactory factory;\n  envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy config =\n      *dynamic_cast<envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy*>(\n          factory.createEmptyConfigProto().get());\n  config.set_stat_prefix(\"my_stat_prefix\");\n\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\n// Test config with an explicitly defined router filter.\nTEST_F(DubboFilterConfigTest, DubboProxyWithExplicitRouterConfig) {\n  const std::string yaml = R\"EOF(\n    stat_prefix: dubbo\n    route_config:\n      name: local_route\n    dubbo_filters:\n      - name: envoy.filters.dubbo.router\n    )EOF\";\n\n  DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml);\n  testConfig(config);\n}\n\n// Test config with an unknown filter.\nTEST_F(DubboFilterConfigTest, DubboProxyWithUnknownFilter) {\n  const std::string yaml = R\"EOF(\n    stat_prefix: dubbo\n    route_config:\n      name: local_route\n    dubbo_filters:\n      - name: no_such_filter\n      - name: envoy.filters.dubbo.router\n    )EOF\";\n\n  DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml);\n\n  EXPECT_THROW_WITH_REGEX(factory_.createFilterFactoryFromProto(config, context_), EnvoyException,\n                          \"no_such_filter\");\n}\n\n// Test config with multiple filters.\nTEST_F(DubboFilterConfigTest, DubboProxyWithMultipleFilters) {\n  const std::string yaml = R\"EOF(\n    stat_prefix: ingress\n    route_config:\n      name: local_route\n    dubbo_filters:\n      - name: envoy.filters.dubbo.mock_filter\n        config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            name: test_service\n      - name: envoy.filters.dubbo.router\n    )EOF\";\n\n  DubboFilters::MockFilterConfigFactory factory;\n  Registry::InjectFactory<DubboFilters::NamedDubboFilterConfigFactory> registry(factory);\n\n  DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml);\n  testConfig(config);\n\n  EXPECT_EQ(1, factory.config_struct_.fields_size());\n  EXPECT_EQ(\"test_service\", factory.config_struct_.fields().at(\"name\").string_value());\n  EXPECT_EQ(\"dubbo.ingress.\", factory.config_stat_prefix_);\n}\n\nTEST_F(DubboFilterConfigTest, CreateFilterChain) {\n  const std::string yaml = R\"EOF(\n    stat_prefix: ingress\n    route_config:\n      name: local_route\n    dubbo_filters:\n      - name: envoy.filters.dubbo.mock_filter\n        config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            name: test_service\n      - name: envoy.filters.dubbo.router\n    )EOF\";\n\n  DubboFilters::MockFilterConfigFactory factory;\n  Registry::InjectFactory<DubboFilters::NamedDubboFilterConfigFactory> registry(factory);\n\n  DubboProxyProto dubbo_config = parseDubboProxyFromV3Yaml(yaml);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  DubboFilters::MockFilterChainFactoryCallbacks callbacks;\n  ConfigImpl config(dubbo_config, context);\n  EXPECT_CALL(callbacks, addDecoderFilter(_)).Times(2);\n  config.createFilterChain(callbacks);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc",
    "content": "#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.validate.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/app_exception.h\"\n#include \"extensions/filters/network/dubbo_proxy/config.h\"\n#include \"extensions/filters/network/dubbo_proxy/conn_manager.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n#include \"test/extensions/filters/network/dubbo_proxy/utility.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nusing ConfigDubboProxy = envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy;\n\nclass ConnectionManagerTest;\nclass TestConfigImpl : public ConfigImpl {\npublic:\n  TestConfigImpl(ConfigDubboProxy proto_config, Server::Configuration::MockFactoryContext& context,\n                 DubboFilterStats& stats)\n      : ConfigImpl(proto_config, context), stats_(stats) {}\n\n  // ConfigImpl\n  DubboFilterStats& stats() override { return stats_; }\n  void createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) override {\n    if (setupChain) {\n      for (auto& decoder : decoder_filters_) {\n        callbacks.addDecoderFilter(decoder);\n      }\n      for (auto& encoder : encoder_filters_) {\n        callbacks.addEncoderFilter(encoder);\n      }\n      return;\n    }\n\n    if (codec_filter_) {\n      callbacks.addFilter(codec_filter_);\n    }\n  }\n\n  void setupFilterChain(int num_decoder_filters, int num_encoder_filters) {\n    for (int i = 0; i < num_decoder_filters; i++) {\n      decoder_filters_.push_back(std::make_shared<NiceMock<DubboFilters::MockDecoderFilter>>());\n    }\n    for (int i = 0; i < num_encoder_filters; i++) {\n      encoder_filters_.push_back(std::make_shared<NiceMock<DubboFilters::MockEncoderFilter>>());\n    }\n    setupChain = true;\n  }\n\n  void expectFilterCallbacks() {\n    for (auto& decoder : decoder_filters_) {\n      EXPECT_CALL(*decoder, setDecoderFilterCallbacks(_));\n    }\n    for (auto& encoder : encoder_filters_) {\n      EXPECT_CALL(*encoder, setEncoderFilterCallbacks(_));\n    }\n  }\n\n  void expectOnDestroy() {\n    for (auto& decoder : decoder_filters_) {\n      EXPECT_CALL(*decoder, onDestroy());\n    }\n\n    for (auto& encoder : encoder_filters_) {\n      EXPECT_CALL(*encoder, onDestroy());\n    }\n  }\n\n  ProtocolPtr createProtocol() override {\n    if (protocol_) {\n      return ProtocolPtr{protocol_};\n    }\n    return ConfigImpl::createProtocol();\n  }\n\n  Router::RouteConstSharedPtr route(const MessageMetadata& metadata,\n                                    uint64_t random_value) const override {\n    if (route_) {\n      return route_;\n    }\n    return ConfigImpl::route(metadata, random_value);\n  }\n\n  DubboFilters::CodecFilterSharedPtr codec_filter_;\n  DubboFilterStats& stats_;\n  MockSerializer* serializer_{};\n  MockProtocol* protocol_{};\n  std::shared_ptr<Router::MockRoute> route_;\n\n  NiceMock<DubboFilters::MockFilterChainFactory> filter_factory_;\n  std::vector<std::shared_ptr<DubboFilters::MockDecoderFilter>> decoder_filters_;\n  std::vector<std::shared_ptr<DubboFilters::MockEncoderFilter>> encoder_filters_;\n  bool setupChain = false;\n};\n\nclass ConnectionManagerTest : public testing::Test {\npublic:\n  ConnectionManagerTest() : stats_(DubboFilterStats::generateStats(\"test.\", store_)) {}\n  ~ConnectionManagerTest() override {\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n\n  TimeSource& timeSystem() { return factory_context_.dispatcher().timeSource(); }\n\n  void initializeFilter() { initializeFilter(\"\"); }\n\n  void initializeFilter(const std::string& yaml) {\n    for (const auto& counter : store_.counters()) {\n      counter->reset();\n    }\n\n    if (!yaml.empty()) {\n      TestUtility::loadFromYaml(yaml, proto_config_);\n      TestUtility::validate(proto_config_);\n    }\n\n    proto_config_.set_stat_prefix(\"test\");\n    config_ = std::make_unique<TestConfigImpl>(proto_config_, factory_context_, stats_);\n    if (custom_serializer_) {\n      config_->serializer_ = custom_serializer_;\n    }\n    if (custom_protocol_) {\n      config_->protocol_ = custom_protocol_;\n    }\n\n    ON_CALL(random_, random()).WillByDefault(Return(42));\n    conn_manager_ = std::make_unique<ConnectionManager>(\n        *config_, random_, filter_callbacks_.connection_.dispatcher_.timeSource());\n    conn_manager_->initializeReadFilterCallbacks(filter_callbacks_);\n    conn_manager_->onNewConnection();\n\n    // NOP currently.\n    conn_manager_->onAboveWriteBufferHighWatermark();\n    conn_manager_->onBelowWriteBufferLowWatermark();\n  }\n\n  void writeHessianErrorResponseMessage(Buffer::Instance& buffer, bool is_event,\n                                        int64_t request_id) {\n    uint8_t msg_type = 0x42; // request message, two_way, not event\n\n    if (is_event) {\n      msg_type = msg_type | 0x20;\n    }\n\n    buffer.add(std::string{'\\xda', '\\xbb'});\n    buffer.add(static_cast<void*>(&msg_type), 1);\n    buffer.add(std::string{0x46});                     // Response status\n    addInt64(buffer, request_id);                      // Request Id\n    buffer.add(std::string{0x00, 0x00, 0x00, 0x06,     // Body Length\n                           '\\x91',                     // return type, exception\n                           0x05, 't', 'e', 's', 't'}); // return body\n  }\n\n  void writeHessianExceptionResponseMessage(Buffer::Instance& buffer, bool is_event,\n                                            int64_t request_id) {\n    uint8_t msg_type = 0x42; // request message, two_way, not event\n\n    if (is_event) {\n      msg_type = msg_type | 0x20;\n    }\n\n    buffer.add(std::string{'\\xda', '\\xbb'});\n    buffer.add(static_cast<void*>(&msg_type), 1);\n    buffer.add(std::string{0x14});\n    addInt64(buffer, request_id);                      // Request Id\n    buffer.add(std::string{0x00, 0x00, 0x00, 0x06,     // Body Length\n                           '\\x90',                     // return type, exception\n                           0x05, 't', 'e', 's', 't'}); // return body\n  }\n\n  void writeInvalidResponseMessage(Buffer::Instance& buffer) {\n    buffer.add(std::string{\n        '\\xda', '\\xbb', 0x43, 0x14, // Response Message Header, illegal serialization id\n        0x00,   0x00,   0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Request Id\n        0x00,   0x00,   0x00, 0x06,                         // Body Length\n        '\\x94',                                             // return type\n        0x05,   't',    'e',  's',  't',                    // return body\n    });\n  }\n\n  void writeInvalidRequestMessage(Buffer::Instance& buffer) {\n    buffer.add(std::string{\n        '\\xda', '\\xbb', '\\xc3', 0x00, // Response Message Header, illegal serialization id\n        0x00,   0x00,   0x00,   0x00, 0x00, 0x00, 0x00, 0x01, // Request Id\n        0x00,   0x00,   0x00,   0x16,                         // Body Length\n        0x05,   '2',    '.',    '0',  '.',  '2',              // Dubbo version\n        0x04,   't',    'e',    's',  't',                    // Service name\n        0x05,   '0',    '.',    '0',  '.',  '0',              // Service version\n        0x04,   't',    'e',    's',  't',                    // method name\n    });\n  }\n\n  void writePartialHessianResponseMessage(Buffer::Instance& buffer, bool is_event,\n                                          int64_t request_id, bool start) {\n\n    uint8_t msg_type = 0x42; // request message, two_way, not event\n\n    if (is_event) {\n      msg_type = msg_type | 0x20;\n    }\n\n    if (start) {\n      buffer.add(std::string{'\\xda', '\\xbb'});\n      buffer.add(static_cast<void*>(&msg_type), 1);\n      buffer.add(std::string{0x14});\n      addInt64(buffer, request_id);                  // Request Id\n      buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length\n                             '\\x94'});               // return type, exception\n    } else {\n      buffer.add(std::string{0x05, 't', 'e', 's', 't'}); // return body\n    }\n  }\n\n  void writeHessianResponseMessage(Buffer::Instance& buffer, bool is_event, int64_t request_id) {\n    uint8_t msg_type = 0x42; // request message, two_way, not event\n\n    if (is_event) {\n      msg_type = msg_type | 0x20;\n    }\n\n    buffer.add(std::string{'\\xda', '\\xbb'});\n    buffer.add(static_cast<void*>(&msg_type), 1);\n    buffer.add(std::string{0x14});\n    addInt64(buffer, request_id);                              // Request Id\n    buffer.add(std::string{0x00, 0x00, 0x00, 0x06,             // Body Length\n                           '\\x94', 0x05, 't', 'e', 's', 't'}); // return type, exception\n  }\n\n  void writePartialHessianRequestMessage(Buffer::Instance& buffer, bool is_one_way, bool is_event,\n                                         int64_t request_id, bool start) {\n    uint8_t msg_type = 0xc2; // request message, two_way, not event\n    if (is_one_way) {\n      msg_type = msg_type & 0xbf;\n    }\n\n    if (is_event) {\n      msg_type = msg_type | 0x20;\n    }\n\n    if (start) {\n      buffer.add(std::string{'\\xda', '\\xbb'});\n      buffer.add(static_cast<void*>(&msg_type), 1);\n      buffer.add(std::string{0x00});\n      addInt64(buffer, request_id);                           // Request Id\n      buffer.add(std::string{0x00, 0x00, 0x00, 0x16,          // Body Length\n                             0x05, '2', '.', '0', '.', '2'}); // Dubbo version\n    } else {\n      buffer.add(std::string{\n          0x04, 't', 'e', 's', 't',      // Service name\n          0x05, '0', '.', '0', '.', '0', // Service version\n          0x04, 't', 'e', 's', 't',      // method name\n      });\n    }\n  }\n\n  void writeHessianRequestMessage(Buffer::Instance& buffer, bool is_one_way, bool is_event,\n                                  int64_t request_id) {\n    uint8_t msg_type = 0xc2; // request message, two_way, not event\n    if (is_one_way) {\n      msg_type = msg_type & 0xbf;\n    }\n\n    if (is_event) {\n      msg_type = msg_type | 0x20;\n    }\n\n    buffer.add(std::string{'\\xda', '\\xbb'});\n    buffer.add(static_cast<void*>(&msg_type), 1);\n    buffer.add(std::string{0x00});\n    addInt64(buffer, request_id);                            // Request Id\n    buffer.add(std::string{0x00, 0x00, 0x00, 0x16,           // Body Length\n                           0x05, '2',  '.',  '0',  '.', '2', // Dubbo version\n                           0x04, 't',  'e',  's',  't',      // Service name\n                           0x05, '0',  '.',  '0',  '.', '0', // Service version\n                           0x04, 't',  'e',  's',  't'});    // method name\n  }\n\n  void writeHessianHeartbeatRequestMessage(Buffer::Instance& buffer, int64_t request_id) {\n    uint8_t msg_type = 0xc2; // request message, two_way, not event\n    msg_type = msg_type | 0x20;\n\n    buffer.add(std::string{'\\xda', '\\xbb'});\n    buffer.add(static_cast<void*>(&msg_type), 1);\n    buffer.add(std::string{0x14});\n    addInt64(buffer, request_id);                    // Request Id\n    buffer.add(std::string{0x00, 0x00, 0x00, 0x01}); // Body Length\n    buffer.add(std::string{0x01});                   // Body\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Stats::TestUtil::TestStore store_;\n  DubboFilterStats stats_;\n  ConfigDubboProxy proto_config_;\n\n  std::unique_ptr<TestConfigImpl> config_;\n\n  Buffer::OwnedImpl buffer_;\n  Buffer::OwnedImpl write_buffer_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  std::unique_ptr<ConnectionManager> conn_manager_;\n  MockSerializer* custom_serializer_{};\n  MockProtocol* custom_protocol_{};\n};\n\nTEST_F(ConnectionManagerTest, OnDataHandlesRequestTwoWay) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_twoway\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_oneway\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_event\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n}\n\nTEST_F(ConnectionManagerTest, OnDataHandlesRequestOneWay) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, true, false, 0x0F);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_twoway\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_oneway\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_event\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, OnDataHandlesHeartbeatEvent) {\n  initializeFilter();\n  writeHessianHeartbeatRequestMessage(buffer_, 0x0F);\n\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        ProtocolPtr protocol = conn_manager_->config().createProtocol();\n        MessageMetadataSharedPtr metadata(std::make_shared<MessageMetadata>());\n        auto result = protocol->decodeHeader(buffer, metadata);\n        EXPECT_TRUE(result.second);\n        const DubboProxy::ContextImpl& ctx = *static_cast<const ContextImpl*>(result.first.get());\n        EXPECT_TRUE(ctx.isHeartbeat());\n        EXPECT_TRUE(metadata->hasResponseStatus());\n        EXPECT_FALSE(metadata->isTwoWay());\n        EXPECT_EQ(ProtocolType::Dubbo, metadata->protocolType());\n        EXPECT_EQ(metadata->responseStatus(), ResponseStatus::Ok);\n        EXPECT_EQ(metadata->messageType(), MessageType::HeartbeatResponse);\n        buffer.drain(ctx.headerSize());\n      }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, buffer_.length());\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_event\").value());\n}\n\nTEST_F(ConnectionManagerTest, HandlesHeartbeatWithException) {\n  custom_protocol_ = new NiceMock<MockProtocol>();\n  initializeFilter();\n\n  EXPECT_CALL(*custom_protocol_, encode(_, _, _, _)).WillOnce(Return(false));\n\n  MessageMetadataSharedPtr meta = std::make_shared<MessageMetadata>();\n  EXPECT_THROW_WITH_MESSAGE(conn_manager_->onHeartbeat(meta), EnvoyException,\n                            \"failed to encode heartbeat message\");\n}\n\nTEST_F(ConnectionManagerTest, OnDataHandlesMessageSplitAcrossBuffers) {\n  initializeFilter();\n  writePartialHessianRequestMessage(buffer_, false, false, 0x0F, true);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0, buffer_.length());\n\n  // Complete the buffer\n  writePartialHessianRequestMessage(buffer_, false, false, 0x0F, false);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  EXPECT_EQ(1U, store_.counter(\"test.request_twoway\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n}\n\nTEST_F(ConnectionManagerTest, OnDataHandlesProtocolError) {\n  initializeFilter();\n  writeInvalidRequestMessage(buffer_);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(0, buffer_.length());\n\n  // Sniffing is now disabled.\n  bool one_way = true;\n  writeHessianRequestMessage(buffer_, one_way, false, 0x0F);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n}\n\nTEST_F(ConnectionManagerTest, OnDataHandlesProtocolErrorOnWrite) {\n  initializeFilter();\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  // Start the read buffer\n  writePartialHessianRequestMessage(buffer_, false, false, 0x0F, true);\n  uint64_t len = buffer_.length();\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  len -= buffer_.length();\n\n  // Disable sniffing\n  writeInvalidRequestMessage(write_buffer_);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_NE(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n  EXPECT_EQ(1U, store_.counter(\"test.response_decoding_error\").value());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_F(ConnectionManagerTest, OnDataStopsSniffingWithTooManyPendingCalls) {\n  initializeFilter();\n  config_->setupFilterChain(1, 0);\n  // config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  int request_count = 64;\n  for (int i = 0; i < request_count; i++) {\n    writeHessianRequestMessage(buffer_, false, false, i);\n  }\n\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_)).Times(request_count);\n  EXPECT_CALL(*decoder_filter, onDestroy()).Times(request_count);\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _)).Times(request_count);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(64U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  // Sniffing is now disabled.\n  writeInvalidRequestMessage(buffer_);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, OnWriteHandlesResponse) {\n  uint64_t request_id = 100;\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, request_id);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  writeHessianResponseMessage(write_buffer_, false, request_id);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_EQ(callbacks->requestId(), request_id);\n  EXPECT_EQ(callbacks->connection(), &(filter_callbacks_.connection_));\n  EXPECT_GE(callbacks->streamId(), 0);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_decoding_error\").value());\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, HandlesResponseContainExceptionInfo) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 1);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_success\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  writeHessianExceptionResponseMessage(write_buffer_, false, 1);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_decoding_success\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_business_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_decoding_error\").value());\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, HandlesResponseError) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 1);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  writeHessianErrorResponseMessage(write_buffer_, false, 1);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_error\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_decoding_error\").value());\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, OnWriteHandlesResponseException) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 1);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n\n  writeInvalidRequestMessage(write_buffer_);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Reset, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.local_response_business_exception\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_decoding_error\").value());\n}\n\n// Tests stop iteration/resume with multiple filters.\nTEST_F(ConnectionManagerTest, OnDataResumesWithNextFilter) {\n  initializeFilter();\n\n  config_->setupFilterChain(2, 0);\n  config_->expectOnDestroy();\n  auto first_filter = config_->decoder_filters_[0];\n  auto second_filter = config_->decoder_filters_[1];\n\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*first_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*second_filter, setDecoderFilterCallbacks(_));\n\n  // First filter stops iteration.\n  {\n    EXPECT_CALL(*first_filter, onMessageDecoded(_, _))\n        .WillOnce(Return(FilterStatus::StopIteration));\n    EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n    EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n    EXPECT_EQ(1U,\n              store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n  }\n\n  // Resume processing.\n  {\n    InSequence s;\n    EXPECT_CALL(*first_filter, onMessageDecoded(_, _)).WillOnce(Return(FilterStatus::Continue));\n    EXPECT_CALL(*second_filter, onMessageDecoded(_, _)).WillOnce(Return(FilterStatus::Continue));\n    callbacks->continueDecoding();\n  }\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\n// Tests multiple filters are invoked in the correct order.\nTEST_F(ConnectionManagerTest, OnDataHandlesDubboCallWithMultipleFilters) {\n  initializeFilter();\n\n  config_->setupFilterChain(2, 0);\n  config_->expectOnDestroy();\n  auto first_filter = config_->decoder_filters_[0];\n  auto second_filter = config_->decoder_filters_[1];\n\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  InSequence s;\n  EXPECT_CALL(*first_filter, onMessageDecoded(_, _)).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*second_filter, onMessageDecoded(_, _)).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, PipelinedRequestAndResponse) {\n  initializeFilter();\n\n  config_->setupFilterChain(1, 0);\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  writeHessianRequestMessage(buffer_, false, false, 1);\n  writeHessianRequestMessage(buffer_, false, false, 2);\n\n  std::list<DubboFilters::DecoderFilterCallbacks*> callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillRepeatedly(Invoke(\n          [&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks.push_back(&cb); }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(2U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n  EXPECT_EQ(2U, store_.counter(\"test.request\").value());\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(2);\n  EXPECT_CALL(*decoder_filter, onDestroy()).Times(2);\n\n  writeHessianResponseMessage(write_buffer_, false, 0x01);\n  callbacks.front()->startUpstreamResponse();\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete,\n            callbacks.front()->upstreamData(write_buffer_));\n  callbacks.pop_front();\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n\n  writeHessianResponseMessage(write_buffer_, false, 0x02);\n  callbacks.front()->startUpstreamResponse();\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete,\n            callbacks.front()->upstreamData(write_buffer_));\n  callbacks.pop_front();\n  EXPECT_EQ(2U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(2U, store_.counter(\"test.response_success\").value());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, ResetDownstreamConnection) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n  callbacks->resetDownstreamConnection();\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, OnEvent) {\n  // No active calls\n  {\n    initializeFilter();\n    conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose);\n    conn_manager_->onEvent(Network::ConnectionEvent::LocalClose);\n    EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n    EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n  }\n\n  // Remote close mid-request\n  {\n    initializeFilter();\n\n    writePartialHessianRequestMessage(buffer_, false, false, 1, true);\n    EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose);\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n  }\n\n  // Local close mid-request\n  {\n    initializeFilter();\n    writePartialHessianRequestMessage(buffer_, false, false, 1, true);\n    EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    conn_manager_->onEvent(Network::ConnectionEvent::LocalClose);\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n\n    buffer_.drain(buffer_.length());\n  }\n\n  // Remote close before response\n  {\n    initializeFilter();\n    writeHessianRequestMessage(buffer_, false, false, 1);\n    EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose);\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n\n    buffer_.drain(buffer_.length());\n  }\n\n  // Local close before response\n  {\n    initializeFilter();\n    writeHessianRequestMessage(buffer_, false, false, 1);\n    EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    conn_manager_->onEvent(Network::ConnectionEvent::LocalClose);\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n\n    buffer_.drain(buffer_.length());\n  }\n}\nTEST_F(ConnectionManagerTest, ResponseWithUnknownSequenceID) {\n  initializeFilter();\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  writeHessianRequestMessage(buffer_, false, false, 1);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  writeHessianResponseMessage(write_buffer_, false, 10);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Reset, callbacks->upstreamData(write_buffer_));\n  EXPECT_EQ(1U, store_.counter(\"test.response_decoding_error\").value());\n}\n\nTEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 1);\n\n  config_->setupFilterChain(2, 0);\n  config_->expectOnDestroy();\n  auto& first_filter = config_->decoder_filters_[0];\n  auto& second_filter = config_->decoder_filters_[1];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*first_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*second_filter, setDecoderFilterCallbacks(_));\n\n  const std::string fake_response(\"mock dubbo response\");\n  NiceMock<DubboFilters::MockDirectResponse> direct_response;\n  EXPECT_CALL(direct_response, encode(_, _, _))\n      .WillOnce(Invoke([&](MessageMetadata&, Protocol&,\n                           Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType {\n        buffer.add(fake_response);\n        return DubboFilters::DirectResponse::ResponseType::SuccessReply;\n      }));\n\n  // First filter sends local reply.\n  EXPECT_CALL(*first_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        callbacks->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound);\n        callbacks->sendLocalReply(direct_response, false);\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(fake_response, buffer.toString());\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(SerializationType::Hessian2, callbacks->serializationType());\n  EXPECT_EQ(ProtocolType::Dubbo, callbacks->protocolType());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.local_response_success\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 1);\n\n  config_->setupFilterChain(2, 0);\n  config_->expectOnDestroy();\n  auto& first_filter = config_->decoder_filters_[0];\n  auto& second_filter = config_->decoder_filters_[1];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*first_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*second_filter, setDecoderFilterCallbacks(_));\n\n  const std::string fake_response(\"mock dubbo response\");\n  NiceMock<DubboFilters::MockDirectResponse> direct_response;\n  EXPECT_CALL(direct_response, encode(_, _, _))\n      .WillOnce(Invoke([&](MessageMetadata&, Protocol&,\n                           Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType {\n        buffer.add(fake_response);\n        return DubboFilters::DirectResponse::ResponseType::ErrorReply;\n      }));\n\n  // First filter sends local reply.\n  EXPECT_CALL(*first_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        callbacks->sendLocalReply(direct_response, false);\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(fake_response, buffer.toString());\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.local_response_error\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, TwoWayRequestWithEndStream) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        return FilterStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite))\n      .Times(1);\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(conn_manager_->onData(buffer_, true), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n}\n\nTEST_F(ConnectionManagerTest, OneWayRequestWithEndStream) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, true, false, 0x0F);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite))\n      .Times(1);\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(conn_manager_->onData(buffer_, true), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n}\n\nTEST_F(ConnectionManagerTest, EmptyRequestData) {\n  initializeFilter();\n  buffer_.drain(buffer_.length());\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0);\n  EXPECT_EQ(conn_manager_->onData(buffer_, true), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, StopHandleRequest) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  ON_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillByDefault(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        return FilterStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite))\n      .Times(0);\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n}\n\nTEST_F(ConnectionManagerTest, HandlesHeartbeatEventWithConnectionClose) {\n  initializeFilter();\n  writeHessianHeartbeatRequestMessage(buffer_, 0x0F);\n\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false)).Times(0);\n\n  filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_event\").value());\n}\n\nTEST_F(ConnectionManagerTest, SendsLocalReplyWithCloseConnection) {\n  initializeFilter();\n\n  const std::string fake_response(\"mock dubbo response\");\n  NiceMock<DubboFilters::MockDirectResponse> direct_response;\n  EXPECT_CALL(direct_response, encode(_, _, _))\n      .WillOnce(Invoke([&](MessageMetadata&, Protocol&,\n                           Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType {\n        buffer.add(fake_response);\n        return DubboFilters::DirectResponse::ResponseType::ErrorReply;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite))\n      .Times(1);\n\n  MessageMetadata metadata;\n  conn_manager_->sendLocalReply(metadata, direct_response, true);\n  EXPECT_EQ(1U, store_.counter(\"test.local_response_error\").value());\n\n  // The connection closed.\n  EXPECT_CALL(direct_response, encode(_, _, _)).Times(0);\n  conn_manager_->sendLocalReply(metadata, direct_response, true);\n}\n\nTEST_F(ConnectionManagerTest, ContinueDecodingWithHalfClose) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, true, false, 0x0F);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite))\n      .Times(1);\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(conn_manager_->onData(buffer_, true), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n\n  conn_manager_->continueDecoding();\n}\n\nTEST_F(ConnectionManagerTest, RoutingSuccess) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  config_->route_ = std::make_shared<Router::MockRoute>();\n  EXPECT_EQ(config_->route_, callbacks->route());\n\n  // Use the cache.\n  EXPECT_NE(nullptr, callbacks->route());\n}\n\nTEST_F(ConnectionManagerTest, RoutingFailure) {\n  initializeFilter();\n  writePartialHessianRequestMessage(buffer_, false, false, 0x0F, true);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _)).Times(0);\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  // The metadata is nullptr.\n  config_->route_ = std::make_shared<Router::MockRoute>();\n  EXPECT_EQ(nullptr, callbacks->route());\n}\n\nTEST_F(ConnectionManagerTest, ResetStream) {\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, 0x0F);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  callbacks->resetStream();\n}\n\nTEST_F(ConnectionManagerTest, NeedMoreDataForHandleResponse) {\n  uint64_t request_id = 100;\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, request_id);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  writePartialHessianRequestMessage(write_buffer_, false, false, 0x0F, true);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::MoreData, callbacks->upstreamData(write_buffer_));\n}\n\nTEST_F(ConnectionManagerTest, PendingMessageEnd) {\n  uint64_t request_id = 100;\n  initializeFilter();\n  writeHessianRequestMessage(buffer_, false, false, request_id);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        return FilterStatus::StopIteration;\n      }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n}\n\nTEST_F(ConnectionManagerTest, Routing) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nprotocol_type: Dubbo\nserialization_type: Hessian2\nroute_config:\n  - name: test1\n    interface: org.apache.dubbo.demo.DemoService\n    routes:\n      - match:\n          method:\n            name:\n              safe_regex:\n                google_re2: {}\n                regex: \"(.*?)\"\n        route:\n            cluster: user_service_dubbo_server\n)EOF\";\n\n  initializeFilter(yaml);\n  writeHessianRequestMessage(buffer_, false, false, 100);\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus {\n        auto invo = static_cast<const RpcInvocationBase*>(&metadata->invocationInfo());\n        auto data = const_cast<RpcInvocationBase*>(invo);\n        data->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n        data->setMethodName(\"test\");\n        return FilterStatus::StopIteration;\n      }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  Router::RouteConstSharedPtr route = callbacks->route();\n  EXPECT_NE(nullptr, route);\n  EXPECT_NE(nullptr, route->routeEntry());\n  EXPECT_EQ(\"user_service_dubbo_server\", route->routeEntry()->clusterName());\n}\n\nTEST_F(ConnectionManagerTest, TransportEndWithConnectionClose) {\n  initializeFilter();\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  writeHessianRequestMessage(buffer_, false, false, 1);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  writeHessianResponseMessage(write_buffer_, false, 1);\n\n  callbacks->startUpstreamResponse();\n\n  filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite);\n\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Reset, callbacks->upstreamData(write_buffer_));\n  EXPECT_EQ(1U, store_.counter(\"test.response_error_caused_connection_close\").value());\n}\n\nTEST_F(ConnectionManagerTest, MessageDecodedReturnStopIteration) {\n  initializeFilter();\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  // The sendLocalReply is not called and the message type is not oneway,\n  // the ActiveMessage object is not destroyed.\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0);\n\n  writeHessianRequestMessage(buffer_, false, false, 1);\n\n  size_t buf_size = buffer_.length();\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr ctx) -> FilterStatus {\n        EXPECT_EQ(ctx->messageSize(), buf_size);\n        return FilterStatus::StopIteration;\n      }));\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  // Buffer data should be consumed.\n  EXPECT_EQ(0, buffer_.length());\n\n  // The finalizeRequest should not be called.\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n}\n\nTEST_F(ConnectionManagerTest, SendLocalReplyInMessageDecoded) {\n  initializeFilter();\n\n  config_->setupFilterChain(1, 0);\n  config_->expectOnDestroy();\n  auto& decoder_filter = config_->decoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  const std::string fake_response(\"mock dubbo response\");\n  NiceMock<DubboFilters::MockDirectResponse> direct_response;\n  EXPECT_CALL(direct_response, encode(_, _, _))\n      .WillOnce(Invoke([&](MessageMetadata&, Protocol&,\n                           Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType {\n        buffer.add(fake_response);\n        return DubboFilters::DirectResponse::ResponseType::ErrorReply;\n      }));\n  EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus {\n        EXPECT_EQ(1, conn_manager_->getActiveMessagesForTest().size());\n        EXPECT_NE(nullptr, conn_manager_->getActiveMessagesForTest().front()->metadata());\n        callbacks->sendLocalReply(direct_response, false);\n        return FilterStatus::StopIteration;\n      }));\n\n  // The sendLocalReply is called, the ActiveMessage object should be destroyed.\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n\n  writeHessianRequestMessage(buffer_, false, false, 1);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  // Buffer data should be consumed.\n  EXPECT_EQ(0, buffer_.length());\n\n  // The finalizeRequest should be called.\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n}\n\nTEST_F(ConnectionManagerTest, HandleResponseWithEncoderFilter) {\n  uint64_t request_id = 100;\n  initializeFilter();\n\n  writeHessianRequestMessage(buffer_, false, false, request_id);\n\n  config_->setupFilterChain(1, 1);\n  auto& decoder_filter = config_->decoder_filters_[0];\n  auto& encoder_filter = config_->encoder_filters_[0];\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_CALL(*encoder_filter, setEncoderFilterCallbacks(_)).Times(1);\n\n  EXPECT_CALL(*decoder_filter, onDestroy()).Times(1);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  writeHessianResponseMessage(write_buffer_, false, request_id);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_EQ(callbacks->requestId(), request_id);\n  EXPECT_EQ(callbacks->connection(), &(filter_callbacks_.connection_));\n  EXPECT_GE(callbacks->streamId(), 0);\n\n  size_t expect_response_length = write_buffer_.length();\n  EXPECT_CALL(*encoder_filter, onMessageEncoded(_, _))\n      .WillOnce(\n          Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) -> FilterStatus {\n            EXPECT_EQ(metadata->requestId(), request_id);\n            EXPECT_EQ(ctx->messageSize(), expect_response_length);\n            return FilterStatus::Continue;\n          }));\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n  EXPECT_CALL(*encoder_filter, onDestroy()).Times(1);\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n}\n\nTEST_F(ConnectionManagerTest, HandleResponseWithCodecFilter) {\n  uint64_t request_id = 100;\n  initializeFilter();\n  config_->codec_filter_ = std::make_unique<DubboFilters::MockCodecFilter>();\n  auto mock_codec_filter =\n      static_cast<DubboFilters::MockCodecFilter*>(config_->codec_filter_.get());\n\n  writeHessianRequestMessage(buffer_, false, false, request_id);\n\n  DubboFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*mock_codec_filter, setDecoderFilterCallbacks(_))\n      .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*mock_codec_filter, onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus {\n        EXPECT_EQ(metadata->requestId(), request_id);\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(*mock_codec_filter, setEncoderFilterCallbacks(_)).Times(1);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.gauge(\"test.request_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  writeHessianResponseMessage(write_buffer_, false, request_id);\n\n  callbacks->startUpstreamResponse();\n\n  EXPECT_EQ(callbacks->requestId(), request_id);\n  EXPECT_EQ(callbacks->connection(), &(filter_callbacks_.connection_));\n  EXPECT_GE(callbacks->streamId(), 0);\n\n  size_t expect_response_length = write_buffer_.length();\n  EXPECT_CALL(*mock_codec_filter, onMessageEncoded(_, _))\n      .WillOnce(\n          Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) -> FilterStatus {\n            EXPECT_EQ(metadata->requestId(), request_id);\n            EXPECT_EQ(ctx->messageSize(), expect_response_length);\n            return FilterStatus::Continue;\n          }));\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n  EXPECT_CALL(*mock_codec_filter, onDestroy()).Times(1);\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n}\n\nTEST_F(ConnectionManagerTest, AddDataWithStopAndContinue) {\n  InSequence s;\n  initializeFilter();\n  config_->setupFilterChain(3, 3);\n\n  uint64_t request_id = 100;\n\n  EXPECT_CALL(*config_->decoder_filters_[0], onMessageDecoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus {\n        EXPECT_EQ(metadata->requestId(), request_id);\n        return FilterStatus::Continue;\n      }));\n  EXPECT_CALL(*config_->decoder_filters_[1], onMessageDecoded(_, _))\n      .WillOnce(Return(FilterStatus::StopIteration))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*config_->decoder_filters_[2], onMessageDecoded(_, _))\n      .WillOnce(Return(FilterStatus::Continue));\n  writeHessianRequestMessage(buffer_, false, false, request_id);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  config_->decoder_filters_[1]->callbacks_->continueDecoding();\n\n  // For encode direction\n  EXPECT_CALL(*config_->encoder_filters_[0], onMessageEncoded(_, _))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus {\n        EXPECT_EQ(metadata->requestId(), request_id);\n        return FilterStatus::Continue;\n      }));\n  EXPECT_CALL(*config_->encoder_filters_[1], onMessageEncoded(_, _))\n      .WillOnce(Return(FilterStatus::StopIteration))\n      .WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*config_->encoder_filters_[2], onMessageEncoded(_, _))\n      .WillOnce(Return(FilterStatus::Continue));\n\n  writeHessianResponseMessage(write_buffer_, false, request_id);\n  config_->decoder_filters_[0]->callbacks_->startUpstreamResponse();\n  EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete,\n            config_->decoder_filters_[0]->callbacks_->upstreamData(write_buffer_));\n\n  config_->encoder_filters_[1]->callbacks_->continueEncoding();\n  config_->expectOnDestroy();\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/decoder_test.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/decoder.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n#include \"test/extensions/filters/network/dubbo_proxy/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass DecoderStateMachineTestBase {\npublic:\n  DecoderStateMachineTestBase() = default;\n  virtual ~DecoderStateMachineTestBase() { active_stream_.reset(); }\n\n  void initHandler() {\n    ON_CALL(delegate_, newStream(_, _))\n        .WillByDefault(Invoke([this](MessageMetadataSharedPtr data,\n                                     ContextSharedPtr ctx) -> ActiveStream* {\n          this->active_stream_ = std::make_shared<NiceMock<MockActiveStream>>(handler_, data, ctx);\n          return active_stream_.get();\n        }));\n  }\n\n  void initProtocolDecoder(MessageType type, int32_t body_size) {\n    ON_CALL(protocol_, decodeHeader(_, _))\n        .WillByDefault(\n            Invoke([=](Buffer::Instance&,\n                       MessageMetadataSharedPtr metadata) -> std::pair<ContextSharedPtr, bool> {\n              auto context = std::make_shared<ContextImpl>();\n              context->setHeaderSize(16);\n              context->setBodySize(body_size);\n              metadata->setMessageType(type);\n\n              return std::pair<ContextSharedPtr, bool>(context, true);\n            }));\n  }\n\n  NiceMock<MockProtocol> protocol_;\n  NiceMock<MockDecoderStateMachineDelegate> delegate_;\n  std::shared_ptr<NiceMock<MockActiveStream>> active_stream_;\n  NiceMock<MockStreamHandler> handler_;\n};\n\nclass DubboDecoderStateMachineTest : public DecoderStateMachineTestBase, public testing::Test {};\n\nclass DubboDecoderTest : public testing::Test {\npublic:\n  DubboDecoderTest() = default;\n  ~DubboDecoderTest() override = default;\n\n  NiceMock<MockProtocol> protocol_;\n  NiceMock<MockStreamHandler> handler_;\n  NiceMock<MockRequestDecoderCallbacks> request_callbacks_;\n  NiceMock<MockResponseDecoderCallbacks> response_callbacks_;\n};\n\nTEST_F(DubboDecoderStateMachineTest, EmptyData) {\n  EXPECT_CALL(protocol_, decodeHeader(_, _)).Times(1);\n  EXPECT_CALL(delegate_, newStream(_, _)).Times(0);\n  EXPECT_CALL(delegate_, onHeartbeat(_)).Times(0);\n\n  DecoderStateMachine dsm(protocol_, delegate_);\n  Buffer::OwnedImpl buffer;\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n}\n\nTEST_F(DubboDecoderStateMachineTest, OnlyHaveHeaderData) {\n  initHandler();\n  initProtocolDecoder(MessageType::Request, 1);\n\n  EXPECT_CALL(delegate_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(false));\n\n  Buffer::OwnedImpl buffer;\n  DecoderStateMachine dsm(protocol_, delegate_);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n}\n\nTEST_F(DubboDecoderStateMachineTest, RequestMessageCallbacks) {\n  initHandler();\n  initProtocolDecoder(MessageType::Request, 0);\n\n  EXPECT_CALL(delegate_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true));\n  EXPECT_CALL(handler_, onStreamDecoded(_, _)).Times(1);\n\n  DecoderStateMachine dsm(protocol_, delegate_);\n  Buffer::OwnedImpl buffer;\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::Done);\n\n  EXPECT_EQ(active_stream_->metadata_->messageType(), MessageType::Request);\n}\n\nTEST_F(DubboDecoderStateMachineTest, ResponseMessageCallbacks) {\n  initHandler();\n  initProtocolDecoder(MessageType::Response, 0);\n\n  EXPECT_CALL(delegate_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true));\n  EXPECT_CALL(handler_, onStreamDecoded(_, _)).Times(1);\n\n  DecoderStateMachine dsm(protocol_, delegate_);\n  Buffer::OwnedImpl buffer;\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::Done);\n\n  EXPECT_EQ(active_stream_->metadata_->messageType(), MessageType::Response);\n}\n\nTEST_F(DubboDecoderStateMachineTest, SerializeRpcInvocationException) {\n  initHandler();\n  initProtocolDecoder(MessageType::Request, 0);\n\n  EXPECT_CALL(delegate_, newStream(_, _)).Times(1);\n  EXPECT_CALL(delegate_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(handler_, onStreamDecoded(_, _)).Times(0);\n\n  EXPECT_CALL(protocol_, decodeData(_, _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, ContextSharedPtr, MessageMetadataSharedPtr) -> bool {\n        throw EnvoyException(fmt::format(\"mock serialize exception\"));\n      }));\n\n  DecoderStateMachine dsm(protocol_, delegate_);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_THROW_WITH_MESSAGE(dsm.run(buffer), EnvoyException, \"mock serialize exception\");\n  EXPECT_EQ(dsm.currentState(), ProtocolState::OnDecodeStreamData);\n}\n\nTEST_F(DubboDecoderStateMachineTest, SerializeRpcResultException) {\n  initHandler();\n  initProtocolDecoder(MessageType::Response, 0);\n\n  EXPECT_CALL(delegate_, newStream(_, _)).Times(1);\n  EXPECT_CALL(delegate_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(handler_, onStreamDecoded(_, _)).Times(0);\n\n  EXPECT_CALL(protocol_, decodeData(_, _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, ContextSharedPtr, MessageMetadataSharedPtr) -> bool {\n        throw EnvoyException(fmt::format(\"mock serialize exception\"));\n      }));\n\n  DecoderStateMachine dsm(protocol_, delegate_);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_THROW_WITH_MESSAGE(dsm.run(buffer), EnvoyException, \"mock serialize exception\");\n  EXPECT_EQ(dsm.currentState(), ProtocolState::OnDecodeStreamData);\n}\n\nTEST_F(DubboDecoderStateMachineTest, ProtocolDecodeException) {\n  EXPECT_CALL(delegate_, newStream(_, _)).Times(0);\n  EXPECT_CALL(protocol_, decodeHeader(_, _))\n      .WillOnce(Invoke(\n          [](Buffer::Instance&, MessageMetadataSharedPtr) -> std::pair<ContextSharedPtr, bool> {\n            throw EnvoyException(fmt::format(\"mock protocol decode exception\"));\n          }));\n\n  DecoderStateMachine dsm(protocol_, delegate_);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_THROW_WITH_MESSAGE(dsm.run(buffer), EnvoyException, \"mock protocol decode exception\");\n  EXPECT_EQ(dsm.currentState(), ProtocolState::OnDecodeStreamHeader);\n}\n\nTEST_F(DubboDecoderTest, NeedMoreDataForProtocolHeader) {\n  EXPECT_CALL(request_callbacks_, newStream()).Times(0);\n  EXPECT_CALL(protocol_, decodeHeader(_, _))\n      .WillOnce(Invoke(\n          [](Buffer::Instance&, MessageMetadataSharedPtr) -> std::pair<ContextSharedPtr, bool> {\n            return std::pair<ContextSharedPtr, bool>(nullptr, false);\n          }));\n\n  RequestDecoder decoder(protocol_, request_callbacks_);\n\n  Buffer::OwnedImpl buffer;\n  bool buffer_underflow;\n  EXPECT_EQ(decoder.onData(buffer, buffer_underflow), FilterStatus::Continue);\n  EXPECT_EQ(buffer_underflow, true);\n}\n\nTEST_F(DubboDecoderTest, NeedMoreDataForProtocolBody) {\n  EXPECT_CALL(protocol_, decodeHeader(_, _))\n      .WillOnce(Invoke([](Buffer::Instance&,\n                          MessageMetadataSharedPtr metadate) -> std::pair<ContextSharedPtr, bool> {\n        metadate->setMessageType(MessageType::Response);\n        auto context = std::make_shared<ContextImpl>();\n        context->setHeaderSize(16);\n        context->setBodySize(10);\n        return std::pair<ContextSharedPtr, bool>(context, true);\n      }));\n  EXPECT_CALL(protocol_, decodeData(_, _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, ContextSharedPtr, MessageMetadataSharedPtr) -> bool {\n        return false;\n      }));\n\n  std::shared_ptr<NiceMock<MockActiveStream>> active_stream;\n\n  EXPECT_CALL(response_callbacks_, newStream()).WillOnce(Invoke([this]() -> StreamHandler& {\n    return handler_;\n  }));\n  EXPECT_CALL(response_callbacks_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(handler_, onStreamDecoded(_, _)).Times(0);\n\n  ResponseDecoder decoder(protocol_, response_callbacks_);\n\n  Buffer::OwnedImpl buffer;\n  bool buffer_underflow;\n  EXPECT_EQ(decoder.onData(buffer, buffer_underflow), FilterStatus::Continue);\n  EXPECT_EQ(buffer_underflow, true);\n}\n\nTEST_F(DubboDecoderTest, DecodeResponseMessage) {\n  Buffer::OwnedImpl buffer;\n  buffer.add(std::string({'\\xda', '\\xbb', '\\xc2', 0x00}));\n\n  EXPECT_CALL(protocol_, decodeHeader(_, _))\n      .WillOnce(Invoke([](Buffer::Instance&,\n                          MessageMetadataSharedPtr metadate) -> std::pair<ContextSharedPtr, bool> {\n        metadate->setMessageType(MessageType::Response);\n        auto context = std::make_shared<ContextImpl>();\n        context->setHeaderSize(16);\n        context->setBodySize(10);\n        return std::pair<ContextSharedPtr, bool>(context, true);\n      }));\n  EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true));\n  EXPECT_CALL(response_callbacks_, newStream()).WillOnce(ReturnRef(handler_));\n  EXPECT_CALL(response_callbacks_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(handler_, onStreamDecoded(_, _)).Times(1);\n\n  ResponseDecoder decoder(protocol_, response_callbacks_);\n\n  bool buffer_underflow;\n  EXPECT_EQ(decoder.onData(buffer, buffer_underflow), FilterStatus::Continue);\n  EXPECT_EQ(buffer_underflow, true);\n\n  decoder.reset();\n\n  EXPECT_EQ(ProtocolType::Dubbo, decoder.protocol().type());\n  EXPECT_CALL(protocol_, decodeHeader(_, _))\n      .WillOnce(Invoke([](Buffer::Instance&,\n                          MessageMetadataSharedPtr metadate) -> std::pair<ContextSharedPtr, bool> {\n        metadate->setMessageType(MessageType::Response);\n        auto context = std::make_shared<ContextImpl>();\n        context->setHeaderSize(16);\n        context->setBodySize(10);\n        return std::pair<ContextSharedPtr, bool>(context, true);\n      }));\n  EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true));\n  EXPECT_CALL(response_callbacks_, newStream()).WillOnce(ReturnRef(handler_));\n  EXPECT_CALL(response_callbacks_, onHeartbeat(_)).Times(0);\n  EXPECT_CALL(handler_, onStreamDecoded(_, _)).Times(1);\n\n  buffer_underflow = false;\n  EXPECT_EQ(decoder.onData(buffer, buffer_underflow), FilterStatus::Continue);\n  EXPECT_EQ(buffer_underflow, true);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/hessian_utils.h\"\n#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n#include \"test/extensions/filters/network/dubbo_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nTEST(HessianProtocolTest, Name) {\n  DubboHessian2SerializerImpl serializer;\n  EXPECT_EQ(serializer.name(), \"dubbo.hessian2\");\n}\n\nTEST(HessianProtocolTest, deserializeRpcInvocation) {\n  DubboHessian2SerializerImpl serializer;\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        0x05, '2', '.', '0', '.', '2', // Dubbo version\n        0x04, 't', 'e', 's', 't',      // Service name\n        0x05, '0', '.', '0', '.', '0', // Service version\n        0x04, 't', 'e', 's', 't',      // method name\n    }));\n    std::shared_ptr<ContextImpl> context = std::make_shared<ContextImpl>();\n    context->setBodySize(buffer.length());\n    auto result = serializer.deserializeRpcInvocation(buffer, context);\n    EXPECT_TRUE(result.second);\n\n    auto invo = result.first;\n    EXPECT_STREQ(\"test\", invo->methodName().c_str());\n    EXPECT_STREQ(\"test\", invo->serviceName().c_str());\n    EXPECT_STREQ(\"0.0.0\", invo->serviceVersion().value().c_str());\n  }\n\n  // incorrect body size\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        0x05, '2', '.', '0', '.', '2', // Dubbo version\n        0x04, 't', 'e', 's', 't',      // Service name\n        0x05, '0', '.', '0', '.', '0', // Service version\n        0x04, 't', 'e', 's', 't',      // method name\n    }));\n    std::string exception_string = fmt::format(\"RpcInvocation size({}) large than body size({})\",\n                                               buffer.length(), buffer.length() - 1);\n    std::shared_ptr<ContextImpl> context = std::make_shared<ContextImpl>();\n    context->setBodySize(buffer.length() - 1);\n    EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcInvocation(buffer, context), EnvoyException,\n                              exception_string);\n  }\n}\n\nTEST(HessianProtocolTest, deserializeRpcResult) {\n  DubboHessian2SerializerImpl serializer;\n  std::shared_ptr<ContextImpl> context = std::make_shared<ContextImpl>();\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        '\\x94',                   // return type\n        0x04, 't', 'e', 's', 't', // return body\n    }));\n    context->setBodySize(4);\n    auto result = serializer.deserializeRpcResult(buffer, context);\n    EXPECT_TRUE(result.second);\n    EXPECT_FALSE(result.first->hasException());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        '\\x93',                   // return type\n        0x04, 't', 'e', 's', 't', // return body\n    }));\n    context->setBodySize(4);\n    auto result = serializer.deserializeRpcResult(buffer, context);\n    EXPECT_TRUE(result.second);\n    EXPECT_TRUE(result.first->hasException());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        '\\x90',                   // return type\n        0x04, 't', 'e', 's', 't', // return body\n    }));\n    context->setBodySize(4);\n    auto result = serializer.deserializeRpcResult(buffer, context);\n    EXPECT_TRUE(result.second);\n    EXPECT_TRUE(result.first->hasException());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        '\\x91',                   // return type\n        0x04, 't', 'e', 's', 't', // return body\n    }));\n    context->setBodySize(4);\n    auto result = serializer.deserializeRpcResult(buffer, context);\n    EXPECT_TRUE(result.second);\n    EXPECT_TRUE(result.first->hasException());\n  }\n\n  // incorrect body size\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        '\\x94',                   // return type\n        0x05, 't', 'e', 's', 't', // return body\n    }));\n    context->setBodySize(0);\n    EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException,\n                              \"RpcResult size(1) large than body size(0)\");\n  }\n\n  // incorrect return type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        '\\x96',                   // incorrect return type\n        0x05, 't', 'e', 's', 't', // return body\n    }));\n    context->setBodySize(buffer.length());\n    EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException,\n                              \"not supported return type 6\");\n  }\n\n  // incorrect value size\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({\n        '\\x92',                   // without the value of the return type\n        0x05, 't', 'e', 's', 't', // return body\n    }));\n    std::string exception_string =\n        fmt::format(\"RpcResult is no value, but the rest of the body size({}) not equal 0\",\n                    buffer.length() - 1);\n    context->setBodySize(buffer.length());\n    EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException,\n                              exception_string);\n  }\n}\n\nTEST(HessianProtocolTest, HessianDeserializerConfigFactory) {\n  auto serializer =\n      NamedSerializerConfigFactory::getFactory(ProtocolType::Dubbo, SerializationType::Hessian2)\n          .createSerializer();\n  EXPECT_EQ(serializer->name(), \"dubbo.hessian2\");\n  EXPECT_EQ(serializer->type(), SerializationType::Hessian2);\n}\n\nTEST(HessianProtocolTest, serializeRpcResult) {\n  Buffer::OwnedImpl buffer;\n  std::string mock_response(\"invalid method name 'Add'\");\n  RpcResponseType mock_response_type = RpcResponseType::ResponseWithException;\n  DubboHessian2SerializerImpl serializer;\n\n  EXPECT_NE(serializer.serializeRpcResult(buffer, mock_response, mock_response_type), 0);\n\n  size_t hessian_int_size;\n  int type_value = HessianUtils::peekInt(buffer, &hessian_int_size);\n  EXPECT_EQ(static_cast<uint8_t>(mock_response_type), static_cast<uint8_t>(type_value));\n\n  size_t hessian_string_size;\n  std::string content = HessianUtils::peekString(buffer, &hessian_string_size, sizeof(uint8_t));\n  EXPECT_EQ(mock_response, content);\n\n  EXPECT_EQ(buffer.length(), hessian_int_size + hessian_string_size);\n\n  size_t body_size = mock_response.size() + sizeof(mock_response_type);\n  std::shared_ptr<ContextImpl> context = std::make_shared<ContextImpl>();\n  context->setBodySize(body_size);\n  auto result = serializer.deserializeRpcResult(buffer, context);\n  EXPECT_TRUE(result.first->hasException());\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n#include \"test/extensions/filters/network/dubbo_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nTEST(DubboProtocolImplTest, NotEnoughData) {\n  Buffer::OwnedImpl buffer;\n  DubboProtocolImpl dubbo_protocol;\n  MessageMetadataSharedPtr metadata = std::make_shared<MessageMetadata>();\n  auto result = dubbo_protocol.decodeHeader(buffer, metadata);\n  EXPECT_FALSE(result.second);\n\n  buffer.add(std::string(15, 0x00));\n  result = dubbo_protocol.decodeHeader(buffer, metadata);\n  EXPECT_FALSE(result.second);\n}\n\nTEST(DubboProtocolImplTest, Name) {\n  DubboProtocolImpl dubbo_protocol;\n  EXPECT_EQ(dubbo_protocol.name(), \"dubbo\");\n}\n\nTEST(DubboProtocolImplTest, Normal) {\n  DubboProtocolImpl dubbo_protocol;\n  // Normal dubbo request message\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadataSharedPtr metadata = std::make_shared<MessageMetadata>();\n    buffer.add(std::string({'\\xda', '\\xbb', '\\xc2', 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 1);\n\n    auto result = dubbo_protocol.decodeHeader(buffer, metadata);\n    auto context = result.first;\n    EXPECT_TRUE(result.second);\n    EXPECT_EQ(1, metadata->requestId());\n    EXPECT_EQ(1, context->bodySize());\n    EXPECT_EQ(MessageType::Request, metadata->messageType());\n  }\n\n  // Normal dubbo response message\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadataSharedPtr metadata = std::make_shared<MessageMetadata>();\n    buffer.add(std::string({'\\xda', '\\xbb', 0x42, 20}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 1);\n    auto result = dubbo_protocol.decodeHeader(buffer, metadata);\n    auto context = result.first;\n    EXPECT_TRUE(result.second);\n    EXPECT_EQ(1, metadata->requestId());\n    EXPECT_EQ(1, context->bodySize());\n    EXPECT_EQ(MessageType::Response, metadata->messageType());\n  }\n}\n\nTEST(DubboProtocolImplTest, InvalidProtocol) {\n  DubboProtocolImpl dubbo_protocol;\n  MessageMetadataSharedPtr metadata = std::make_shared<MessageMetadata>();\n\n  // Invalid dubbo magic number\n  {\n    Buffer::OwnedImpl buffer;\n    addInt64(buffer, 0);\n    addInt64(buffer, 0);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              \"invalid dubbo message magic number 0\");\n  }\n\n  // Invalid message size\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xda', '\\xbb', '\\xc2', 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, DubboProtocolImpl::MaxBodySize + 1);\n    std::string exception_string =\n        fmt::format(\"invalid dubbo message size {}\", DubboProtocolImpl::MaxBodySize + 1);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              exception_string);\n  }\n\n  // Invalid serialization type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xda', '\\xbb', '\\xc3', 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 0xff);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              \"invalid dubbo message serialization type 3\");\n  }\n\n  // Invalid response status\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xda', '\\xbb', 0x42, 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 0xff);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              \"invalid dubbo message response status 0\");\n  }\n}\n\nTEST(DubboProtocolImplTest, DubboProtocolConfigFactory) {\n  auto protocol = NamedProtocolConfigFactory::getFactory(ProtocolType::Dubbo)\n                      .createProtocol(SerializationType::Hessian2);\n  EXPECT_EQ(protocol->name(), \"dubbo\");\n  EXPECT_EQ(protocol->type(), ProtocolType::Dubbo);\n  EXPECT_EQ(protocol->serializer()->type(), SerializationType::Hessian2);\n}\n\nTEST(DubboProtocolImplTest, encode) {\n  MessageMetadata metadata;\n  metadata.setMessageType(MessageType::Response);\n  metadata.setResponseStatus(ResponseStatus::ServiceNotFound);\n  metadata.setSerializationType(SerializationType::Hessian2);\n  metadata.setRequestId(100);\n\n  Buffer::OwnedImpl buffer;\n  DubboProtocolImpl dubbo_protocol;\n  dubbo_protocol.initSerializer(SerializationType::Hessian2);\n  std::string content(\"this is test data\");\n  EXPECT_TRUE(dubbo_protocol.encode(buffer, metadata, content, RpcResponseType::ResponseWithValue));\n\n  MessageMetadataSharedPtr output_metadata = std::make_shared<MessageMetadata>();\n  auto result = dubbo_protocol.decodeHeader(buffer, output_metadata);\n  EXPECT_TRUE(result.second);\n\n  EXPECT_EQ(metadata.messageType(), output_metadata->messageType());\n  EXPECT_EQ(metadata.responseStatus(), output_metadata->responseStatus());\n  EXPECT_EQ(metadata.serializationType(), output_metadata->serializationType());\n  EXPECT_EQ(metadata.requestId(), output_metadata->requestId());\n\n  Buffer::OwnedImpl body_buffer;\n  size_t serialized_body_size = dubbo_protocol.serializer()->serializeRpcResult(\n      body_buffer, content, RpcResponseType::ResponseWithValue);\n  auto context = result.first;\n  EXPECT_EQ(context->bodySize(), serialized_body_size);\n  EXPECT_EQ(false, context->hasAttachments());\n  EXPECT_EQ(0, context->attachments().size());\n\n  buffer.drain(context->headerSize());\n  EXPECT_TRUE(dubbo_protocol.decodeData(buffer, context, output_metadata));\n}\n\nTEST(DubboProtocolImplTest, decode) {\n  Buffer::OwnedImpl buffer;\n  MessageMetadataSharedPtr metadata;\n  DubboProtocolImpl dubbo_protocol;\n\n  // metadata is nullptr\n  EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                            \"invalid metadata parameter\");\n\n  metadata = std::make_shared<MessageMetadata>();\n\n  // Invalid message header size\n  EXPECT_FALSE(dubbo_protocol.decodeHeader(buffer, metadata).second);\n\n  // Invalid dubbo magic number\n  {\n    addInt64(buffer, 0);\n    addInt64(buffer, 0);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              \"invalid dubbo message magic number 0\");\n    buffer.drain(buffer.length());\n  }\n\n  // Invalid message body size\n  {\n    buffer.add(std::string({'\\xda', '\\xbb', '\\xc2', 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, DubboProtocolImpl::MaxBodySize + 1);\n    std::string exception_string =\n        fmt::format(\"invalid dubbo message size {}\", DubboProtocolImpl::MaxBodySize + 1);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              exception_string);\n    buffer.drain(buffer.length());\n  }\n\n  // Invalid serialization type\n  {\n    buffer.add(std::string({'\\xda', '\\xbb', '\\xc3', 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 0xff);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              \"invalid dubbo message serialization type 3\");\n    buffer.drain(buffer.length());\n  }\n\n  // Invalid response status\n  {\n    buffer.add(std::string({'\\xda', '\\xbb', 0x42, 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 0xff);\n    EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decodeHeader(buffer, metadata), EnvoyException,\n                              \"invalid dubbo message response status 0\");\n    buffer.drain(buffer.length());\n  }\n\n  // The dubbo request message\n  {\n    buffer.add(std::string({'\\xda', '\\xbb', '\\xc2', 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 1);\n    auto result = dubbo_protocol.decodeHeader(buffer, metadata);\n    EXPECT_TRUE(result.second);\n    auto context = result.first;\n    EXPECT_EQ(1, context->bodySize());\n    EXPECT_EQ(MessageType::Request, metadata->messageType());\n    EXPECT_EQ(1, metadata->requestId());\n    EXPECT_EQ(SerializationType::Hessian2, metadata->serializationType());\n    buffer.drain(buffer.length());\n  }\n\n  // The One-way dubbo request message\n  {\n    buffer.add(std::string({'\\xda', '\\xbb', '\\x82', 0x00}));\n    addInt64(buffer, 1);\n    addInt32(buffer, 1);\n    auto result = dubbo_protocol.decodeHeader(buffer, metadata);\n    EXPECT_TRUE(result.second);\n    auto context = result.first;\n    EXPECT_EQ(1, context->bodySize());\n    EXPECT_EQ(MessageType::Oneway, metadata->messageType());\n    EXPECT_EQ(1, metadata->requestId());\n    EXPECT_EQ(SerializationType::Hessian2, metadata->serializationType());\n  }\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/hessian_utils_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/hessian_utils.h\"\n\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nTEST(HessianUtilsTest, peekString) {\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x02, 't'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekString(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x30}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekString(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x30, 't'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekString(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x53, 't'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekString(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x53, 't', 'e'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekString(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x52, 't'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekString(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x20, 't'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekString(buffer, &size), EnvoyException,\n                              \"hessian type is not string 32\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01, 't'}));\n    size_t size;\n    EXPECT_STREQ(\"t\", HessianUtils::peekString(buffer, &size).c_str());\n    EXPECT_EQ(2, size);\n  }\n\n  // empty string\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x00}));\n    size_t size;\n    EXPECT_STREQ(\"\", HessianUtils::peekString(buffer, &size).c_str());\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01, 0x00}));\n    size_t size;\n    EXPECT_STREQ(\"\", HessianUtils::peekString(buffer, &size, 1).c_str());\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x53, 0x00, 0x05, 'h', 'e', 'l', 'l', 'o'}));\n    size_t size;\n    EXPECT_STREQ(\"hello\", HessianUtils::peekString(buffer, &size).c_str());\n    EXPECT_EQ(8, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\n        {0x52, 0x00, 0x07, 'h', 'e', 'l', 'l', 'o', ',', ' ', 0x05, 'w', 'o', 'r', 'l', 'd'}));\n    size_t size;\n    EXPECT_STREQ(\"hello, world\", HessianUtils::peekString(buffer, &size).c_str());\n    EXPECT_EQ(16, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x31, 0x01}) + std::string(256 + 0x01, 't'));\n    size_t size;\n    EXPECT_STREQ(std::string(256 + 0x01, 't').c_str(),\n                 HessianUtils::peekString(buffer, &size).c_str());\n    EXPECT_EQ(256 + 0x01 + 2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x31, 0x01}) + std::string(256 + 0x01, 't'));\n    EXPECT_STREQ(std::string(256 + 0x01, 't').c_str(), HessianUtils::readString(buffer).c_str());\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, peekLong) {\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xf0'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekLong(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x38, '1'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekLong(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x59, '1'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekLong(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4c, '1'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekLong(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x40}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekLong(buffer, &size), EnvoyException,\n                              \"hessian type is not long 64\");\n  }\n\n  // Single octet longs\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xef'}));\n    size_t size;\n    EXPECT_EQ(15, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xe0'}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xd8'}));\n    size_t size;\n    EXPECT_EQ(-8, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  // Two octet longs\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xf8', 0x00}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xf0', 0x00}));\n    size_t size;\n    EXPECT_EQ(-2048, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xf7', 0x00}));\n    size_t size;\n    EXPECT_EQ(-256, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xff', '\\xff'}));\n    size_t size;\n    EXPECT_EQ(2047, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  // Three octet longs\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x3c, 0x00, 0x00}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x38, 0x00, 0x00}));\n    size_t size;\n    EXPECT_EQ(-262144, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\x3f', '\\xff', '\\xff'}));\n    size_t size;\n    EXPECT_EQ(262143, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  // four octet longs\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x59, 0x00, 0x00, 0x00, 0x00}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(5, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01, 0x59, 0x00, 0x00, 0x01, 0x2c}));\n    size_t size;\n    EXPECT_EQ(300, HessianUtils::peekLong(buffer, &size, 1));\n    EXPECT_EQ(5, size);\n  }\n\n  // eight octet longs\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x2c}));\n    size_t size;\n    EXPECT_EQ(300, HessianUtils::peekLong(buffer, &size));\n    EXPECT_EQ(9, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x2c}));\n    EXPECT_EQ(300, HessianUtils::readLong(buffer));\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, peekBool) {\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekBool(buffer, &size), EnvoyException,\n                              \"hessian type is not bool 1\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'T'}));\n    size_t size;\n    EXPECT_TRUE(HessianUtils::peekBool(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'F'}));\n    size_t size;\n    EXPECT_FALSE(HessianUtils::peekBool(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'F'}));\n    EXPECT_FALSE(HessianUtils::readBool(buffer));\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, peekInt) {\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xc1'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekInt(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xd0'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekInt(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x49}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekInt(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekInt(buffer, &size), EnvoyException,\n                              \"hessian type is not int 1\");\n  }\n\n  // Single octet integers\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\x90'}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\x80'}));\n    size_t size;\n    EXPECT_EQ(-16, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xbf'}));\n    size_t size;\n    EXPECT_EQ(47, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  // Two octet integers\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xc8', 0x00}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xc0', 0x00}));\n    size_t size;\n    EXPECT_EQ(-2048, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xc7', 0x00}));\n    size_t size;\n    EXPECT_EQ(-256, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xcf', '\\xff'}));\n    size_t size;\n    EXPECT_EQ(2047, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  // Three octet integers\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xd4', 0x00, 0x00}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xd0', 0x00, 0x00}));\n    size_t size;\n    EXPECT_EQ(-262144, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({'\\xd7', '\\xff', '\\xff'}));\n    size_t size;\n    EXPECT_EQ(262143, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  // Four octet integers\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x49, 0x00, 0x00, 0x00, 0x00}));\n    size_t size;\n    EXPECT_EQ(0, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(5, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x49, 0x00, 0x00, 0x01, 0x2c}));\n    size_t size;\n    EXPECT_EQ(300, HessianUtils::peekInt(buffer, &size));\n    EXPECT_EQ(5, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x49, 0x00, 0x00, 0x01, 0x2c}));\n    EXPECT_EQ(300, HessianUtils::readInt(buffer));\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, peekDouble) {\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5d}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDouble(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5e}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDouble(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5f}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDouble(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x44}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDouble(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDouble(buffer, &size), EnvoyException,\n                              \"hessian type is not double 1\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5b}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(0.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5c}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(1.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5d, 0x00}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(0.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5d, '\\x80'}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(-128.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5d, '\\x7f'}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(127.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(2, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5e, 0x00, 0x00}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(0.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5e, '\\x80', 0x00}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(-32768.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5e, '\\x7f', '\\xff'}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(32767.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(3, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5f, 0x00, 0x00, 0x00, 0x00}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(0.0, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(5, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x44, 0x40, 0x28, '\\x80', 0x00, 0x00, 0x00, 0x00, 0x00}));\n    size_t size;\n    EXPECT_DOUBLE_EQ(12.25, HessianUtils::peekDouble(buffer, &size));\n    EXPECT_EQ(9, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x5f, 0x00, 0x00, 0x00, 0x00}));\n    EXPECT_DOUBLE_EQ(0.0, HessianUtils::readDouble(buffer));\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, peekNull) {\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekNull(buffer, &size), EnvoyException,\n                              \"hessian type is not null 1\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4e}));\n    size_t size = 0;\n    HessianUtils::peekNull(buffer, &size);\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4e}));\n    HessianUtils::readNull(buffer);\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, peekDate) {\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4a}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDate(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4b}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDate(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekDate(buffer, &size), EnvoyException,\n                              \"hessian type is not date 1\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4b, 0x00, 0x00, 0x00, 0x00}));\n    size_t size;\n    auto t = HessianUtils::peekDate(buffer, &size);\n    EXPECT_EQ(5, size);\n    EXPECT_EQ(0, t.count());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4b, 0x00, '\\xe3', '\\x83', '\\x8f'}));\n    size_t size;\n    auto t = HessianUtils::peekDate(buffer, &size);\n    EXPECT_EQ(5, size);\n    EXPECT_EQ(894621060000, t.count());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4a, 0x00, 0x00, 0x00, '\\xd0', 0x4b, '\\x92', '\\x84', '\\xb8'}));\n    size_t size = 0;\n    auto t = HessianUtils::peekDate(buffer, &size);\n    EXPECT_EQ(9, size);\n    // Time zone was UTC\n    EXPECT_EQ(894621091000, t.count());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x4a, 0x00, 0x00, 0x00, '\\xd0', 0x4b, '\\x92', '\\x84', '\\xb8'}));\n    auto t = HessianUtils::readDate(buffer);\n    // Time zone was UTC\n    EXPECT_EQ(894621091000, t.count());\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, peekByte) {\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x23}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekByte(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x42}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekByte(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x42, 't', 'e'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekByte(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x41, 't'}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekByte(buffer, &size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  // Incorrect type\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x01}));\n    size_t size;\n    EXPECT_THROW_WITH_MESSAGE(HessianUtils::peekByte(buffer, &size), EnvoyException,\n                              \"hessian type is not byte 1\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x20}));\n    size_t size = 0;\n    EXPECT_STREQ(\"\", HessianUtils::peekByte(buffer, &size).c_str());\n    EXPECT_EQ(1, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x23, 0x01, 0x02, 0x03}));\n    size_t size = 0;\n    EXPECT_STREQ(\"\\x1\\x2\\x3\", HessianUtils::peekByte(buffer, &size).c_str());\n    EXPECT_EQ(4, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x42, 0x10, 0x00}) + std::string(0x10 * 256, 't'));\n    size_t size = 0;\n    EXPECT_STREQ(std::string(0x10 * 256, 't').c_str(),\n                 HessianUtils::peekByte(buffer, &size).c_str());\n    EXPECT_EQ(3 + 0x10 * 256, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x41, 0x04, 0x00}) + std::string(0x04 * 256, 't') +\n               std::string({0x23, 0x01, 0x02, 0x03}));\n    size_t size = 0;\n    std::string expect_string = std::string(0x04 * 256, 't') + \"\\x1\\x2\\x3\";\n    EXPECT_STREQ(expect_string.c_str(), HessianUtils::peekByte(buffer, &size).c_str());\n    EXPECT_EQ(3 + 0x04 * 256 + 4, size);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string({0x23, 0x01, 0x02, 0x03}));\n    EXPECT_STREQ(\"\\x1\\x2\\x3\", HessianUtils::readByte(buffer).c_str());\n    EXPECT_EQ(0, buffer.length());\n  }\n}\n\nTEST(HessianUtilsTest, writeString) {\n  const size_t max = 65535;\n  const size_t segment_mark_length = 3;\n\n  {\n    const std::string append_content(\"b\");\n    const size_t append_content_hessian_length = 1;\n    std::string message(max, 'a');\n    message.append(append_content);\n    size_t expect_serialized_size =\n        max + segment_mark_length + append_content_hessian_length + append_content.size();\n\n    Buffer::OwnedImpl buffer;\n    size_t size = HessianUtils::writeString(buffer, message);\n    EXPECT_EQ(size, expect_serialized_size);\n  }\n\n  {\n    const std::string append_content(33, 'b');\n    const size_t append_content_hessian_length = 2;\n    std::string message(max, 'a');\n    message.append(append_content);\n    size_t expect_serialized_size =\n        max + segment_mark_length + append_content_hessian_length + append_content.size();\n\n    Buffer::OwnedImpl buffer;\n    size_t size = HessianUtils::writeString(buffer, message);\n    EXPECT_EQ(size, expect_serialized_size);\n  }\n\n  {\n    const std::string append_content(1025, 'b');\n    const size_t append_content_hessian_length = 3;\n    std::string message(max, 'a');\n    message.append(append_content);\n    size_t expect_serialized_size =\n        max + segment_mark_length + append_content_hessian_length + append_content.size();\n\n    Buffer::OwnedImpl buffer;\n    size_t size = HessianUtils::writeString(buffer, message);\n    EXPECT_EQ(size, expect_serialized_size);\n  }\n\n  {\n    const std::string append_content(1025, 'b');\n    const size_t append_content_hessian_length = 3;\n    const size_t max_size = 2 * max;\n    std::string message(max_size, 'a');\n    message.append(append_content);\n    size_t expect_serialized_size =\n        max * 2 + segment_mark_length * 2 + append_content_hessian_length + append_content.size();\n\n    Buffer::OwnedImpl buffer;\n    size_t size = HessianUtils::writeString(buffer, message);\n    EXPECT_EQ(size, expect_serialized_size);\n  }\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/metadata_test.cc",
    "content": "#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/metadata.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nTEST(MessageMetadataTest, Fields) {\n  MessageMetadata metadata;\n  auto invo = std::make_shared<RpcInvocationImpl>();\n\n  EXPECT_FALSE(metadata.hasInvocationInfo());\n  metadata.setInvocationInfo(invo);\n  EXPECT_TRUE(metadata.hasInvocationInfo());\n\n  EXPECT_THROW(metadata.timeout().value(), absl::bad_optional_access);\n  metadata.setTimeout(3);\n  EXPECT_TRUE(metadata.timeout().has_value());\n\n  invo->setMethodName(\"method\");\n  EXPECT_EQ(\"method\", invo->methodName());\n\n  EXPECT_FALSE(invo->serviceVersion().has_value());\n  EXPECT_THROW(invo->serviceVersion().value(), absl::bad_optional_access);\n  invo->setServiceVersion(\"1.0.0\");\n  EXPECT_TRUE(invo->serviceVersion().has_value());\n  EXPECT_EQ(\"1.0.0\", invo->serviceVersion().value());\n\n  EXPECT_FALSE(invo->serviceGroup().has_value());\n  EXPECT_THROW(invo->serviceGroup().value(), absl::bad_optional_access);\n  invo->setServiceGroup(\"group\");\n  EXPECT_TRUE(invo->serviceGroup().has_value());\n  EXPECT_EQ(\"group\", invo->serviceGroup().value());\n}\n\nTEST(MessageMetadataTest, Headers) {\n  MessageMetadata metadata;\n  auto invo = std::make_shared<RpcInvocationImpl>();\n\n  EXPECT_FALSE(invo->hasHeaders());\n  invo->addHeader(\"k\", \"v\");\n  EXPECT_EQ(invo->headers().size(), 1);\n}\n\nTEST(MessageMetadataTest, Parameters) {\n  MessageMetadata metadata;\n  auto invo = std::make_shared<RpcInvocationImpl>();\n\n  EXPECT_FALSE(invo->hasParameters());\n  invo->addParameterValue(0, \"test\");\n  EXPECT_TRUE(invo->hasParameters());\n  EXPECT_EQ(invo->parameters().size(), 1);\n  EXPECT_EQ(invo->getParameterValue(0), \"test\");\n  EXPECT_EQ(invo->getParameterValue(1), \"\");\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/mocks.cc",
    "content": "#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n\n#include <memory>\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nMockStreamDecoder::MockStreamDecoder() {\n  ON_CALL(*this, onMessageDecoded(_, _)).WillByDefault(Return(FilterStatus::Continue));\n}\n\nMockStreamEncoder::MockStreamEncoder() {\n  ON_CALL(*this, onMessageEncoded(_, _)).WillByDefault(Return(FilterStatus::Continue));\n}\n\nMockRequestDecoderCallbacks::MockRequestDecoderCallbacks() {\n  ON_CALL(*this, newStream()).WillByDefault(ReturnRef(handler_));\n}\n\nMockResponseDecoderCallbacks::MockResponseDecoderCallbacks() {\n  ON_CALL(*this, newStream()).WillByDefault(ReturnRef(handler_));\n}\n\nMockProtocol::MockProtocol() {\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, type()).WillByDefault(Return(type_));\n  ON_CALL(*this, serializer()).WillByDefault(Return(&serializer_));\n}\nMockProtocol::~MockProtocol() = default;\n\nMockSerializer::MockSerializer() {\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, type()).WillByDefault(Return(type_));\n}\nMockSerializer::~MockSerializer() = default;\n\nnamespace DubboFilters {\n\nMockFilterChainFactory::MockFilterChainFactory() = default;\nMockFilterChainFactory::~MockFilterChainFactory() = default;\n\nMockFilterChainFactoryCallbacks::MockFilterChainFactoryCallbacks() = default;\nMockFilterChainFactoryCallbacks::~MockFilterChainFactoryCallbacks() = default;\n\nMockDecoderFilter::MockDecoderFilter() {\n  ON_CALL(*this, setDecoderFilterCallbacks(_))\n      .WillByDefault(\n          Invoke([this](DecoderFilterCallbacks& callbacks) -> void { callbacks_ = &callbacks; }));\n}\nMockDecoderFilter::~MockDecoderFilter() = default;\n\nMockDecoderFilterCallbacks::MockDecoderFilterCallbacks() {\n  route_ = std::make_shared<NiceMock<Router::MockRoute>>();\n\n  ON_CALL(*this, streamId()).WillByDefault(Return(stream_id_));\n  ON_CALL(*this, connection()).WillByDefault(Return(&connection_));\n  ON_CALL(*this, route()).WillByDefault(Return(route_));\n  ON_CALL(*this, streamInfo()).WillByDefault(ReturnRef(stream_info_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n}\nMockDecoderFilterCallbacks::~MockDecoderFilterCallbacks() = default;\n\nMockEncoderFilter::MockEncoderFilter() {\n  ON_CALL(*this, setEncoderFilterCallbacks(_))\n      .WillByDefault(\n          Invoke([this](EncoderFilterCallbacks& callbacks) -> void { callbacks_ = &callbacks; }));\n}\nMockEncoderFilter::~MockEncoderFilter() = default;\n\nMockEncoderFilterCallbacks::MockEncoderFilterCallbacks() {\n  route_ = std::make_shared<NiceMock<Router::MockRoute>>();\n\n  ON_CALL(*this, streamId()).WillByDefault(Return(stream_id_));\n  ON_CALL(*this, connection()).WillByDefault(Return(&connection_));\n  ON_CALL(*this, route()).WillByDefault(Return(route_));\n  ON_CALL(*this, streamInfo()).WillByDefault(ReturnRef(stream_info_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n}\nMockEncoderFilterCallbacks::~MockEncoderFilterCallbacks() = default;\n\nMockCodecFilter::MockCodecFilter() {\n  ON_CALL(*this, setDecoderFilterCallbacks(_))\n      .WillByDefault(Invoke(\n          [this](DecoderFilterCallbacks& callbacks) -> void { decoder_callbacks_ = &callbacks; }));\n  ON_CALL(*this, setEncoderFilterCallbacks(_))\n      .WillByDefault(Invoke(\n          [this](EncoderFilterCallbacks& callbacks) -> void { encoder_callbacks_ = &callbacks; }));\n}\nMockCodecFilter::~MockCodecFilter() = default;\n\nMockFilterConfigFactory::MockFilterConfigFactory()\n    : MockFactoryBase(\"envoy.filters.dubbo.mock_filter\"),\n      mock_filter_(std::make_shared<NiceMock<MockDecoderFilter>>()) {}\n\nMockFilterConfigFactory::~MockFilterConfigFactory() = default;\n\nFilterFactoryCb\nMockFilterConfigFactory::createFilterFactoryFromProtoTyped(const ProtobufWkt::Struct& proto_config,\n                                                           const std::string& stat_prefix,\n                                                           Server::Configuration::FactoryContext&) {\n  config_struct_ = proto_config;\n  config_stat_prefix_ = stat_prefix;\n\n  return [this](DubboFilters::FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addDecoderFilter(mock_filter_);\n  };\n}\n\n} // namespace DubboFilters\n\nnamespace Router {\n\nMockRouteEntry::MockRouteEntry() {\n  ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_));\n}\nMockRouteEntry::~MockRouteEntry() = default;\n\nMockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); }\nMockRoute::~MockRoute() = default;\n\n} // namespace Router\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/mocks.h",
    "content": "#pragma once\n\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/decoder.h\"\n#include \"extensions/filters/network/dubbo_proxy/decoder_event_handler.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/factory_base.h\"\n#include \"extensions/filters/network/dubbo_proxy/filters/filter.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\nclass MockStreamDecoder : public StreamDecoder {\npublic:\n  MockStreamDecoder();\n\n  MOCK_METHOD(FilterStatus, onMessageDecoded, (MessageMetadataSharedPtr, ContextSharedPtr));\n};\n\nclass MockStreamEncoder : public StreamEncoder {\npublic:\n  MockStreamEncoder();\n\n  MOCK_METHOD(FilterStatus, onMessageEncoded, (MessageMetadataSharedPtr, ContextSharedPtr));\n};\n\nclass MockStreamHandler : public StreamHandler {\npublic:\n  MockStreamHandler() = default;\n\n  MOCK_METHOD(void, onStreamDecoded, (MessageMetadataSharedPtr, ContextSharedPtr));\n};\n\nclass MockRequestDecoderCallbacks : public RequestDecoderCallbacks {\npublic:\n  MockRequestDecoderCallbacks();\n  ~MockRequestDecoderCallbacks() override = default;\n\n  MOCK_METHOD(StreamHandler&, newStream, ());\n  MOCK_METHOD(void, onHeartbeat, (MessageMetadataSharedPtr));\n\n  MockStreamHandler handler_;\n};\nclass MockResponseDecoderCallbacks : public ResponseDecoderCallbacks {\npublic:\n  MockResponseDecoderCallbacks();\n  ~MockResponseDecoderCallbacks() override = default;\n\n  MOCK_METHOD(StreamHandler&, newStream, ());\n  MOCK_METHOD(void, onHeartbeat, (MessageMetadataSharedPtr));\n\n  MockStreamHandler handler_;\n};\n\nclass MockActiveStream : public ActiveStream {\npublic:\n  MockActiveStream(StreamHandler& handler, MessageMetadataSharedPtr metadata,\n                   ContextSharedPtr context)\n      : ActiveStream(handler, metadata, context) {}\n  ~MockActiveStream() = default;\n\n  MOCK_METHOD(ActiveStream*, newStream, (MessageMetadataSharedPtr, ContextSharedPtr));\n  MOCK_METHOD(void, onHeartbeat, (MessageMetadataSharedPtr));\n};\n\nclass MockDecoderStateMachineDelegate : public DecoderStateMachine::Delegate {\npublic:\n  MockDecoderStateMachineDelegate() = default;\n  ~MockDecoderStateMachineDelegate() override = default;\n\n  MOCK_METHOD(ActiveStream*, newStream, (MessageMetadataSharedPtr, ContextSharedPtr));\n  MOCK_METHOD(void, onHeartbeat, (MessageMetadataSharedPtr));\n};\n\nclass MockSerializer : public Serializer {\npublic:\n  MockSerializer();\n  ~MockSerializer() override;\n\n  // DubboProxy::Serializer\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(SerializationType, type, (), (const));\n  MOCK_METHOD((std::pair<RpcInvocationSharedPtr, bool>), deserializeRpcInvocation,\n              (Buffer::Instance&, ContextSharedPtr));\n  MOCK_METHOD((std::pair<RpcResultSharedPtr, bool>), deserializeRpcResult,\n              (Buffer::Instance&, ContextSharedPtr));\n  MOCK_METHOD(size_t, serializeRpcResult, (Buffer::Instance&, const std::string&, RpcResponseType));\n\n  std::string name_{\"mockDeserializer\"};\n  SerializationType type_{SerializationType::Hessian2};\n};\n\nclass MockProtocol : public Protocol {\npublic:\n  MockProtocol();\n  ~MockProtocol() override;\n\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(ProtocolType, type, (), (const));\n  MOCK_METHOD(Serializer*, serializer, (), (const));\n  MOCK_METHOD((std::pair<ContextSharedPtr, bool>), decodeHeader,\n              (Buffer::Instance&, MessageMetadataSharedPtr));\n  MOCK_METHOD(bool, decodeData, (Buffer::Instance&, ContextSharedPtr, MessageMetadataSharedPtr));\n  MOCK_METHOD(bool, encode,\n              (Buffer::Instance&, const MessageMetadata&, const std::string&, RpcResponseType));\n\n  std::string name_{\"MockProtocol\"};\n  ProtocolType type_{ProtocolType::Dubbo};\n  NiceMock<MockSerializer> serializer_;\n};\n\nclass MockNamedSerializerConfigFactory : public NamedSerializerConfigFactory {\npublic:\n  MockNamedSerializerConfigFactory(std::function<MockSerializer*()> f) : f_(f) {}\n\n  SerializerPtr createSerializer() override { return SerializerPtr{f_()}; }\n  std::string name() const override {\n    return SerializerNames::get().fromType(SerializationType::Hessian2);\n  }\n\n  std::function<MockSerializer*()> f_;\n};\n\nclass MockNamedProtocolConfigFactory : public NamedProtocolConfigFactory {\npublic:\n  MockNamedProtocolConfigFactory(std::function<MockProtocol*()> f) : f_(f) {}\n\n  ProtocolPtr createProtocol(SerializationType serialization_type) override {\n    auto protocol = ProtocolPtr{f_()};\n    protocol->initSerializer(serialization_type);\n    return protocol;\n  }\n  std::string name() const override { return ProtocolNames::get().fromType(ProtocolType::Dubbo); }\n\n  std::function<MockProtocol*()> f_;\n};\n\nnamespace Router {\nclass MockRoute;\n} // namespace Router\n\nnamespace DubboFilters {\n\nclass MockFilterChainFactory : public FilterChainFactory {\npublic:\n  MockFilterChainFactory();\n  ~MockFilterChainFactory() override;\n\n  MOCK_METHOD(void, createFilterChain, (DubboFilters::FilterChainFactoryCallbacks & callbacks));\n};\n\nclass MockFilterChainFactoryCallbacks : public FilterChainFactoryCallbacks {\npublic:\n  MockFilterChainFactoryCallbacks();\n  ~MockFilterChainFactoryCallbacks() override;\n\n  MOCK_METHOD(void, addDecoderFilter, (DecoderFilterSharedPtr));\n  MOCK_METHOD(void, addEncoderFilter, (EncoderFilterSharedPtr));\n  MOCK_METHOD(void, addFilter, (CodecFilterSharedPtr));\n};\n\nclass MockDecoderFilter : public DecoderFilter {\npublic:\n  MockDecoderFilter();\n  ~MockDecoderFilter() override;\n\n  // DubboProxy::DubboFilters::DecoderFilter\n  MOCK_METHOD(void, onDestroy, ());\n  MOCK_METHOD(void, setDecoderFilterCallbacks, (DecoderFilterCallbacks & callbacks));\n  MOCK_METHOD(FilterStatus, onMessageDecoded, (MessageMetadataSharedPtr, ContextSharedPtr));\n\n  DecoderFilterCallbacks* callbacks_{};\n};\n\nclass MockDecoderFilterCallbacks : public DecoderFilterCallbacks {\npublic:\n  MockDecoderFilterCallbacks();\n  ~MockDecoderFilterCallbacks() override;\n\n  // DubboProxy::DubboFilters::DecoderFilterCallbacks\n  MOCK_METHOD(uint64_t, requestId, (), (const));\n  MOCK_METHOD(uint64_t, streamId, (), (const));\n  MOCK_METHOD(const Network::Connection*, connection, (), (const));\n  MOCK_METHOD(void, continueDecoding, ());\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, ());\n  MOCK_METHOD(SerializationType, serializationType, (), (const));\n  MOCK_METHOD(ProtocolType, protocolType, (), (const));\n  MOCK_METHOD(void, sendLocalReply, (const DirectResponse&, bool));\n  MOCK_METHOD(void, startUpstreamResponse, ());\n  MOCK_METHOD(UpstreamResponseStatus, upstreamData, (Buffer::Instance&));\n  MOCK_METHOD(void, resetDownstreamConnection, ());\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n  MOCK_METHOD(void, resetStream, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n\n  uint64_t stream_id_{1};\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n  std::shared_ptr<Router::MockRoute> route_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n};\n\nclass MockEncoderFilter : public EncoderFilter {\npublic:\n  MockEncoderFilter();\n  ~MockEncoderFilter() override;\n\n  // DubboProxy::DubboFilters::EncoderFilter\n  MOCK_METHOD(void, onDestroy, ());\n  MOCK_METHOD(void, setEncoderFilterCallbacks, (EncoderFilterCallbacks & callbacks));\n  MOCK_METHOD(FilterStatus, onMessageEncoded, (MessageMetadataSharedPtr, ContextSharedPtr));\n\n  EncoderFilterCallbacks* callbacks_{};\n};\n\nclass MockEncoderFilterCallbacks : public EncoderFilterCallbacks {\npublic:\n  MockEncoderFilterCallbacks();\n  ~MockEncoderFilterCallbacks() override;\n\n  // DubboProxy::DubboFilters::MockEncoderFilterCallbacks\n  MOCK_METHOD(uint64_t, requestId, (), (const));\n  MOCK_METHOD(uint64_t, streamId, (), (const));\n  MOCK_METHOD(const Network::Connection*, connection, (), (const));\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, ());\n  MOCK_METHOD(SerializationType, serializationType, (), (const));\n  MOCK_METHOD(ProtocolType, protocolType, (), (const));\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n  MOCK_METHOD(void, resetStream, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(void, continueEncoding, ());\n  MOCK_METHOD(void, continueDecoding, ());\n\n  uint64_t stream_id_{1};\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n  std::shared_ptr<Router::MockRoute> route_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n};\n\nclass MockCodecFilter : public CodecFilter {\npublic:\n  MockCodecFilter();\n  ~MockCodecFilter() override;\n\n  MOCK_METHOD(void, onDestroy, ());\n  MOCK_METHOD(void, setEncoderFilterCallbacks, (EncoderFilterCallbacks & callbacks));\n  MOCK_METHOD(FilterStatus, onMessageEncoded, (MessageMetadataSharedPtr, ContextSharedPtr));\n  MOCK_METHOD(void, setDecoderFilterCallbacks, (DecoderFilterCallbacks & callbacks));\n  MOCK_METHOD(FilterStatus, onMessageDecoded, (MessageMetadataSharedPtr, ContextSharedPtr));\n\n  DecoderFilterCallbacks* decoder_callbacks_{};\n  EncoderFilterCallbacks* encoder_callbacks_{};\n};\n\nclass MockDirectResponse : public DirectResponse {\npublic:\n  MockDirectResponse() = default;\n  ~MockDirectResponse() override = default;\n\n  MOCK_METHOD(DirectResponse::ResponseType, encode,\n              (MessageMetadata&, Protocol&, Buffer::Instance&), (const));\n};\n\ntemplate <class ConfigProto> class MockFactoryBase : public NamedDubboFilterConfigFactory {\npublic:\n  FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& proto_config,\n                               const std::string& stats_prefix,\n                               Server::Configuration::FactoryContext& context) override {\n    const auto& typed_config = dynamic_cast<const ConfigProto&>(proto_config);\n    return createFilterFactoryFromProtoTyped(typed_config, stats_prefix, context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ConfigProto>();\n  }\n\n  std::string name() const override { return name_; }\n\nprotected:\n  MockFactoryBase(const std::string& name) : name_(name) {}\n\nprivate:\n  virtual FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const ConfigProto& proto_config,\n                                    const std::string& stats_prefix,\n                                    Server::Configuration::FactoryContext& context) PURE;\n\n  const std::string name_;\n};\n\nclass MockFilterConfigFactory : public MockFactoryBase<ProtobufWkt::Struct> {\npublic:\n  MockFilterConfigFactory();\n  ~MockFilterConfigFactory() override;\n\n  DubboFilters::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const ProtobufWkt::Struct& proto_config,\n                                    const std::string& stat_prefix,\n                                    Server::Configuration::FactoryContext& context) override;\n\n  std::shared_ptr<MockDecoderFilter> mock_filter_;\n  ProtobufWkt::Struct config_struct_;\n  std::string config_stat_prefix_;\n};\n\n} // namespace DubboFilters\n\nnamespace Router {\n\nclass MockRouteEntry : public RouteEntry {\npublic:\n  MockRouteEntry();\n  ~MockRouteEntry() override;\n\n  // DubboProxy::Router::RouteEntry\n  MOCK_METHOD(const std::string&, clusterName, (), (const));\n  MOCK_METHOD(const Envoy::Router::MetadataMatchCriteria*, metadataMatchCriteria, (), (const));\n\n  std::string cluster_name_{\"fake_cluster\"};\n};\n\nclass MockRoute : public Route {\npublic:\n  MockRoute();\n  ~MockRoute() override;\n\n  // DubboProxy::Router::Route\n  MOCK_METHOD(const RouteEntry*, routeEntry, (), (const));\n\n  NiceMock<MockRouteEntry> route_entry_;\n};\n\n} // namespace Router\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc",
    "content": "#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.pb.validate.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/route.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/v3/route.pb.validate.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/router/route_matcher.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\nnamespace {\n\nenvoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration\nparseRouteConfigurationFromV2Yaml(const std::string& yaml) {\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration route_config;\n  TestUtility::loadFromYaml(yaml, route_config);\n  TestUtility::validate(route_config);\n  return route_config;\n}\n\nenvoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy\nparseDubboProxyFromV2Yaml(const std::string& yaml) {\n  envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy config;\n  TestUtility::loadFromYaml(yaml, config);\n  TestUtility::validate(config);\n  return config;\n}\n\n} // namespace\n\nTEST(DubboRouteMatcherTest, RouteByServiceNameWithAnyMethod) {\n  {\n    const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          safe_regex:\n            google_re2: {}\n            regex: \"(.*?)\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n    envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n        parseRouteConfigurationFromV2Yaml(yaml);\n\n    NiceMock<Server::Configuration::MockFactoryContext> context;\n    SingleRouteMatcherImpl matcher(config, context);\n    auto invo = std::make_shared<RpcInvocationImpl>();\n    MessageMetadata metadata;\n    metadata.setInvocationInfo(invo);\n    invo->setMethodName(\"test\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceName(\"unknown\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceGroup(\"test\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceVersion(\"1.0.0\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n    EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n\n    // Ignore version matches if there is no version field in the configuration information.\n    invo->setServiceVersion(\"1.0.1\");\n    EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n\n    invo->setServiceGroup(\"test_one\");\n    EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n  }\n\n  // Service name with optional(version and group) matches.\n  {\n    const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nversion: 1.0.0\ngroup: test\nroutes:\n  - match:\n      method:\n        name:\n          safe_regex:\n            google_re2: {}\n            regex: \"(.*?)\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n    envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n        parseRouteConfigurationFromV2Yaml(yaml);\n\n    NiceMock<Server::Configuration::MockFactoryContext> context;\n    SingleRouteMatcherImpl matcher(config, context);\n    auto invo = std::make_shared<RpcInvocationImpl>();\n    MessageMetadata metadata;\n    metadata.setInvocationInfo(invo);\n    invo->setMethodName(\"test\");\n    invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceGroup(\"test\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceVersion(\"1.0.0\");\n    EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n  }\n\n  // Service name with version matches.\n  {\n    const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nversion: 1.0.0\nroutes:\n  - match:\n      method:\n        name:\n          safe_regex:\n            google_re2: {}\n            regex: \"(.*?)\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n    envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n        parseRouteConfigurationFromV2Yaml(yaml);\n\n    NiceMock<Server::Configuration::MockFactoryContext> context;\n    SingleRouteMatcherImpl matcher(config, context);\n    auto invo = std::make_shared<RpcInvocationImpl>();\n    MessageMetadata metadata;\n    metadata.setInvocationInfo(invo);\n    invo->setMethodName(\"test\");\n    invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceGroup(\"test\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceVersion(\"1.0.0\");\n    EXPECT_NE(nullptr, matcher.route(metadata, 0));\n    EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n\n    // Ignore group matches if there is no group field in the configuration information.\n    invo->setServiceGroup(\"test_1\");\n    EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n  }\n\n  // Service name with group matches.\n  {\n    const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\ngroup: HSF\nroutes:\n  - match:\n      method:\n        name:\n          safe_regex:\n            google_re2: {}\n            regex: \"(.*?)\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n    envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n        parseRouteConfigurationFromV2Yaml(yaml);\n\n    NiceMock<Server::Configuration::MockFactoryContext> context;\n    SingleRouteMatcherImpl matcher(config, context);\n    auto invo = std::make_shared<RpcInvocationImpl>();\n    MessageMetadata metadata;\n    metadata.setInvocationInfo(invo);\n    invo->setMethodName(\"test\");\n    invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceGroup(\"test\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceVersion(\"1.0.0\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n    invo->setServiceGroup(\"HSF\");\n    EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n  }\n}\n\nTEST(DubboRouteMatcherTest, RouteByMethodWithExactMatch) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          exact: \"add\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"sub\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"add\");\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n}\n\nTEST(DubboRouteMatcherTest, RouteByMethodWithSuffixMatch) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          suffix: \"test\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"sub\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"add123test\");\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n}\n\nTEST(DubboRouteMatcherTest, RouteByMethodWithPrefixMatch) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          prefix: \"test\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"ab12test\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"test12d2test\");\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n\n  invo->setMethodName(\"testme\");\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n}\n\nTEST(DubboRouteMatcherTest, RouteByMethodWithRegexMatch) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          safe_regex:\n            google_re2: {}\n            regex: \"\\\\d{3}test\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"12test\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  invo->setMethodName(\"456test\");\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n\n  invo->setMethodName(\"4567test\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n}\n\nTEST(DubboRouteMatcherTest, RouteByParameterWithRangeMatch) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          exact: \"add\"\n        params_match:\n          0:\n            range_match:\n              start: 100\n              end: 200\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n  invo->setMethodName(\"add\");\n  invo->addParameterValue(0, \"150\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n}\n\nTEST(DubboRouteMatcherTest, RouteByParameterWithExactMatch) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          exact: \"add\"\n        params_match:\n          1:\n            exact_match: \"user_id:94562\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n  invo->setMethodName(\"add\");\n  invo->addParameterValue(1, \"user_id:94562\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n}\n\nTEST(DubboRouteMatcherTest, RouteWithHeaders) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          exact: \"add\"\n      headers:\n      - name: custom\n        exact_match: \"123\"\n      - name: custom1\n        exact_match: \"123\"\n        invert_match: true\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n  invo->setMethodName(\"add\");\n  invo->addHeader(\"custom\", \"123\");\n  std::string test_value(\"123\");\n\n  Envoy::Http::LowerCaseString test_key(\"custom1\");\n  invo->addHeaderReference(test_key, test_value);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  test_value = \"456\";\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0)->routeEntry()->metadataMatchCriteria());\n\n  test_value = \"123\";\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n}\n\nTEST(MultiDubboRouteMatcherTest, Route) {\n  const std::string yaml = R\"EOF(\nstat_prefix: dubbo_incomming_stats\nprotocol_type: Dubbo\nserialization_type: Hessian2\nroute_config:\n  - name: test1\n    interface: org.apache.dubbo.demo.DemoService\n    routes:\n      - match:\n          method:\n            name:\n              exact: \"add\"\n            params_match:\n              1:\n                exact_match: \"user_id\"\n        route:\n            cluster: user_service_dubbo_server\n  - name: test2\n    interface: org.apache.dubbo.demo.FormatService\n    routes:\n      - match:\n          method:\n            name:\n              exact: \"format\"\n        route:\n            cluster: format_service\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy config =\n      parseDubboProxyFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n  invo->setMethodName(\"add\");\n  invo->addParameterValue(1, \"user_id\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  MultiRouteMatcher matcher(config.route_config(), context);\n  EXPECT_EQ(\"user_service_dubbo_server\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n\n  {\n    envoy::extensions::filters::network::dubbo_proxy::v3::DubboProxy invalid_config;\n    MultiRouteMatcher matcher(invalid_config.route_config(), context);\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n  }\n}\n\nTEST(DubboRouteMatcherTest, RouteByInvalidParameter) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          exact: \"add\"\n        params_match:\n          1:\n            exact_match: \"user_id:94562\"\n    route:\n        cluster: user_service_dubbo_server\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n  invo->setMethodName(\"add\");\n\n  // There is no parameter information in metadata.\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  // The parameter is empty.\n  invo->addParameterValue(1, \"\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  {\n    auto invo = std::make_shared<RpcInvocationImpl>();\n    MessageMetadata metadata;\n    metadata.setInvocationInfo(invo);\n    invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n    invo->setMethodName(\"add\");\n    invo->addParameterValue(1, \"user_id:562\");\n    EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n  }\n}\n\nTEST(DubboRouteMatcherTest, WeightedClusters) {\n  const std::string yaml = R\"EOF(\nname: local_route\ninterface: org.apache.dubbo.demo.DemoService\nroutes:\n  - match:\n      method:\n        name:\n          exact: \"method1\"\n    route:\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 30\n          - name: cluster2\n            weight: 30\n          - name: cluster3\n            weight: 40\n  - match:\n      method:\n        name:\n          exact: \"method2\"\n    route:\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 2000\n          - name: cluster2\n            weight: 3000\n          - name: cluster3\n            weight: 5000\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  auto invo = std::make_shared<RpcInvocationImpl>();\n  MessageMetadata metadata;\n  metadata.setInvocationInfo(invo);\n  invo->setServiceName(\"org.apache.dubbo.demo.DemoService\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SingleRouteMatcherImpl matcher(config, context);\n\n  {\n    invo->setMethodName(\"method1\");\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 29)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 30)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 59)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 60)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 99)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 100)->routeEntry()->clusterName());\n    EXPECT_EQ(nullptr, matcher.route(metadata, 100)->routeEntry()->metadataMatchCriteria());\n  }\n\n  {\n    invo->setMethodName(\"method2\");\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 1999)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 2000)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 4999)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 5000)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 9999)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 10000)->routeEntry()->clusterName());\n    EXPECT_EQ(nullptr, matcher.route(metadata, 10000)->routeEntry()->metadataMatchCriteria());\n  }\n}\n\nTEST(DubboRouteMatcherTest, WeightedClusterMissingWeight) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method:\n        name:\n          exact: \"method1\"\n    route:\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 20000\n          - name: cluster2\n          - name: cluster3\n            weight: 5000\n)EOF\";\n\n  envoy::extensions::filters::network::dubbo_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV2Yaml(yaml);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(SingleRouteMatcherImpl m(config, context), EnvoyException);\n}\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/dubbo_proxy/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/network/dubbo_proxy/router/v3/router.pb.validate.h\"\n\n#include \"extensions/filters/network/dubbo_proxy/filters/well_known_names.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/config.h\"\n\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nTEST(DubboProxyRouterFilterConfigTest, RouterV2Alpha1Filter) {\n  envoy::extensions::filters::network::dubbo_proxy::router::v3::Router router_config;\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RouterFilterConfig factory;\n  DubboFilters::FilterFactoryCb cb =\n      factory.createFilterFactoryFromProto(router_config, \"stats\", context);\n  DubboFilters::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addDecoderFilter(_));\n  cb(filter_callback);\n}\n\nTEST(DubboProxyRouterFilterConfigTest, RouterFilterWithEmptyProtoConfig) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RouterFilterConfig factory;\n  DubboFilters::FilterFactoryCb cb =\n      factory.createFilterFactoryFromProto(*factory.createEmptyConfigProto(), \"stats\", context);\n  DubboFilters::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addDecoderFilter(_));\n  cb(filter_callback);\n}\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/router_test.cc",
    "content": "#include <memory>\n\n#include \"extensions/filters/network/dubbo_proxy/app_exception.h\"\n#include \"extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/message_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/protocol.h\"\n#include \"extensions/filters/network/dubbo_proxy/router/router_impl.h\"\n#include \"extensions/filters/network/dubbo_proxy/serializer_impl.h\"\n\n#include \"test/extensions/filters/network/dubbo_proxy/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ContainsRegex;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\nnamespace Router {\n\nnamespace {\n\nclass TestNamedSerializerConfigFactory : public NamedSerializerConfigFactory {\npublic:\n  TestNamedSerializerConfigFactory(std::function<MockSerializer*()> f) : f_(f) {}\n\n  SerializerPtr createSerializer() override { return SerializerPtr{f_()}; }\n  std::string name() const override {\n    return SerializerNames::get().fromType(SerializationType::Hessian2);\n  }\n\n  std::function<MockSerializer*()> f_;\n};\n\nclass TestNamedProtocolConfigFactory : public NamedProtocolConfigFactory {\npublic:\n  TestNamedProtocolConfigFactory(std::function<MockProtocol*()> f) : f_(f) {}\n\n  ProtocolPtr createProtocol(SerializationType serialization_type) override {\n    auto protocol = ProtocolPtr{f_()};\n    protocol->initSerializer(serialization_type);\n    return protocol;\n  }\n  std::string name() const override { return ProtocolNames::get().fromType(ProtocolType::Dubbo); }\n\n  std::function<MockProtocol*()> f_;\n};\n\n} // namespace\n\nclass DubboRouterTestBase {\npublic:\n  DubboRouterTestBase()\n      : serializer_factory_([&]() -> MockSerializer* {\n          ASSERT(serializer_ == nullptr);\n          serializer_ = new NiceMock<MockSerializer>();\n          if (mock_serializer_cb_) {\n            mock_serializer_cb_(serializer_);\n          }\n          return serializer_;\n        }),\n        protocol_factory_([&]() -> MockProtocol* {\n          ASSERT(protocol_ == nullptr);\n          protocol_ = new NiceMock<MockProtocol>();\n          if (mock_protocol_cb_) {\n            mock_protocol_cb_(protocol_);\n          }\n          return protocol_;\n        }),\n        serializer_register_(serializer_factory_), protocol_register_(protocol_factory_) {}\n\n  void initializeRouter() {\n    route_ = new NiceMock<MockRoute>();\n    route_ptr_.reset(route_);\n\n    router_ = std::make_unique<Router>(context_.clusterManager());\n\n    EXPECT_EQ(nullptr, router_->downstreamConnection());\n\n    router_->setDecoderFilterCallbacks(callbacks_);\n  }\n\n  void initializeMetadata(MessageType msg_type) {\n    msg_type_ = msg_type;\n\n    metadata_ = std::make_shared<MessageMetadata>();\n    metadata_->setMessageType(msg_type_);\n    metadata_->setRequestId(1);\n\n    auto invo = std::make_shared<RpcInvocationImpl>();\n    metadata_->setInvocationInfo(invo);\n    invo->setMethodName(\"test\");\n\n    message_context_ = std::make_shared<ContextImpl>();\n  }\n\n  void startRequest(MessageType msg_type) {\n    initializeMetadata(msg_type);\n\n    EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n    EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n    EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n\n    EXPECT_CALL(callbacks_, serializationType()).WillOnce(Return(SerializationType::Hessian2));\n    EXPECT_CALL(callbacks_, protocolType()).WillOnce(Return(ProtocolType::Dubbo));\n\n    EXPECT_EQ(FilterStatus::StopIteration, router_->onMessageDecoded(metadata_, message_context_));\n\n    EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_));\n    EXPECT_EQ(&connection_, router_->downstreamConnection());\n\n    // Not yet implemented:\n    EXPECT_EQ(absl::optional<uint64_t>(), router_->computeHashKey());\n    EXPECT_EQ(nullptr, router_->metadataMatchCriteria());\n    EXPECT_EQ(nullptr, router_->downstreamHeaders());\n  }\n\n  void connectUpstream() {\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_))\n        .WillOnce(Invoke([&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void {\n          upstream_callbacks_ = &cb;\n        }));\n\n    conn_state_.reset();\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState())\n        .WillRepeatedly(\n            Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); }));\n\n    EXPECT_CALL(callbacks_, continueDecoding());\n    context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n\n    EXPECT_NE(nullptr, upstream_callbacks_);\n  }\n\n  void startRequestWithExistingConnection(MessageType msg_type) {\n    EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n    EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n    EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n\n    initializeMetadata(msg_type);\n\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_))\n        .WillOnce(Invoke([&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void {\n          upstream_callbacks_ = &cb;\n        }));\n\n    EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_));\n    EXPECT_EQ(&connection_, router_->downstreamConnection());\n\n    // Not yet implemented:\n    EXPECT_EQ(absl::optional<uint64_t>(), router_->computeHashKey());\n    EXPECT_EQ(nullptr, router_->metadataMatchCriteria());\n    EXPECT_EQ(nullptr, router_->downstreamHeaders());\n\n    EXPECT_CALL(callbacks_, serializationType()).WillOnce(Return(SerializationType::Hessian2));\n    EXPECT_CALL(callbacks_, protocolType()).WillOnce(Return(ProtocolType::Dubbo));\n\n    EXPECT_CALL(callbacks_, continueDecoding()).Times(0);\n    EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_))\n        .WillOnce(\n            Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* {\n              context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb);\n              context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n              return nullptr;\n            }));\n  }\n\n  void returnResponse() {\n    Buffer::OwnedImpl buffer;\n\n    EXPECT_CALL(callbacks_, startUpstreamResponse());\n\n    EXPECT_CALL(callbacks_, upstreamData(Ref(buffer)))\n        .WillOnce(Return(DubboFilters::UpstreamResponseStatus::MoreData));\n    upstream_callbacks_->onUpstreamData(buffer, false);\n\n    // Nothing to do.\n    upstream_callbacks_->onAboveWriteBufferHighWatermark();\n    upstream_callbacks_->onBelowWriteBufferLowWatermark();\n\n    EXPECT_CALL(callbacks_, upstreamData(Ref(buffer)))\n        .WillOnce(Return(DubboFilters::UpstreamResponseStatus::Complete));\n    EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, released(Ref(upstream_connection_)));\n    upstream_callbacks_->onUpstreamData(buffer, false);\n  }\n\n  void destroyRouter() {\n    router_->onDestroy();\n    router_.reset();\n  }\n\n  TestNamedSerializerConfigFactory serializer_factory_;\n  TestNamedProtocolConfigFactory protocol_factory_;\n  Registry::InjectFactory<NamedSerializerConfigFactory> serializer_register_;\n  Registry::InjectFactory<NamedProtocolConfigFactory> protocol_register_;\n\n  std::function<void(MockSerializer*)> mock_serializer_cb_{};\n  std::function<void(MockProtocol*)> mock_protocol_cb_{};\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  NiceMock<Network::MockClientConnection> connection_;\n  NiceMock<DubboFilters::MockDecoderFilterCallbacks> callbacks_;\n  NiceMock<MockSerializer>* serializer_{};\n  NiceMock<MockProtocol>* protocol_{};\n  NiceMock<MockRoute>* route_{};\n  NiceMock<MockRouteEntry> route_entry_;\n  NiceMock<Upstream::MockHostDescription>* host_{};\n  Tcp::ConnectionPool::ConnectionStatePtr conn_state_;\n\n  RouteConstSharedPtr route_ptr_;\n  std::unique_ptr<Router> router_;\n\n  std::string cluster_name_{\"cluster\"};\n\n  MessageType msg_type_{MessageType::Request};\n  MessageMetadataSharedPtr metadata_;\n  ContextSharedPtr message_context_;\n\n  Tcp::ConnectionPool::UpstreamCallbacks* upstream_callbacks_{};\n  NiceMock<Network::MockClientConnection> upstream_connection_;\n};\n\nclass DubboRouterTest : public DubboRouterTestBase, public testing::Test {};\n\nTEST_F(DubboRouterTest, PoolRemoteConnectionFailure) {\n  initializeRouter();\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServerError, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n  startRequest(MessageType::Request);\n\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n}\n\nTEST_F(DubboRouterTest, PoolTimeout) {\n  initializeRouter();\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServerError, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n  startRequest(MessageType::Request);\n\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(ConnectionPool::PoolFailureReason::Timeout);\n}\n\nTEST_F(DubboRouterTest, PoolOverflowFailure) {\n  initializeRouter();\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServerError, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*too many connections.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n  startRequest(MessageType::Request);\n\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(ConnectionPool::PoolFailureReason::Overflow);\n}\n\nTEST_F(DubboRouterTest, ClusterMaintenanceMode) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n  EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n  EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n  EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, maintenanceMode())\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServerError, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*maintenance mode.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, router_->onMessageDecoded(metadata_, message_context_));\n}\n\nTEST_F(DubboRouterTest, NoHealthyHosts) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n  EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n  EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n  EXPECT_CALL(context_.cluster_manager_, tcpConnPoolForCluster(cluster_name_, _, _))\n      .WillOnce(Return(nullptr));\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServerError, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*no healthy upstream.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n\n  EXPECT_EQ(FilterStatus::StopIteration, router_->onMessageDecoded(metadata_, message_context_));\n}\n\nTEST_F(DubboRouterTest, PoolConnectionFailureWithOnewayMessage) {\n  initializeRouter();\n  initializeMetadata(MessageType::Oneway);\n\n  EXPECT_CALL(callbacks_, protocolType()).WillOnce(Return(ProtocolType::Dubbo));\n  EXPECT_CALL(callbacks_, serializationType()).WillOnce(Return(SerializationType::Hessian2));\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _)).Times(0);\n  EXPECT_CALL(callbacks_, resetStream()).Times(1);\n  EXPECT_EQ(FilterStatus::StopIteration, router_->onMessageDecoded(metadata_, message_context_));\n\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, NoRoute) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServiceNotFound, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*no route.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, router_->onMessageDecoded(metadata_, message_context_));\n}\n\nTEST_F(DubboRouterTest, NoCluster) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n  EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n  EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n  EXPECT_CALL(context_.cluster_manager_, get(Eq(cluster_name_))).WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServerError, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*unknown cluster.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, router_->onMessageDecoded(metadata_, message_context_));\n}\n\nTEST_F(DubboRouterTest, UnexpectedRouterDestroy) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n  EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(std::string({'\\xda', '\\xbb', 0x42, 20})); // Header\n  buffer.add(\"test\");                                  // Body\n\n  auto ctx = static_cast<ContextImpl*>(message_context_.get());\n  ctx->messageOriginData().move(buffer, buffer.length());\n  startRequest(MessageType::Request);\n  connectUpstream();\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, UpstreamRemoteCloseMidResponse) {\n  initializeRouter();\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DubboFilters::DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(ResponseStatus::ServerError, app_ex.status_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_FALSE(end_stream);\n      }));\n  startRequest(MessageType::Request);\n  connectUpstream();\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose);\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, UpstreamLocalCloseMidResponse) {\n  initializeRouter();\n  startRequest(MessageType::Request);\n  connectUpstream();\n\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, OneWay) {\n  initializeRouter();\n  initializeMetadata(MessageType::Oneway);\n\n  EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, released(Ref(upstream_connection_)));\n\n  startRequest(MessageType::Oneway);\n  connectUpstream();\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, Call) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(upstream_connection_, write(_, false));\n\n  startRequest(MessageType::Request);\n  connectUpstream();\n  returnResponse();\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, DecoderFilterCallbacks) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(upstream_connection_, write(_, false));\n  EXPECT_CALL(callbacks_, startUpstreamResponse()).Times(1);\n  EXPECT_CALL(callbacks_, upstreamData(_)).Times(1);\n\n  startRequest(MessageType::Request);\n  connectUpstream();\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(std::string(\"This is the test data\"));\n  router_->onUpstreamData(buffer, true);\n\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, UpstreamDataReset) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(callbacks_, startUpstreamResponse()).Times(1);\n  EXPECT_CALL(callbacks_, upstreamData(_))\n      .WillOnce(Return(DubboFilters::UpstreamResponseStatus::Reset));\n  EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  startRequest(MessageType::Request);\n  connectUpstream();\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(std::string(\"This is the test data\"));\n  router_->onUpstreamData(buffer, false);\n\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, StartRequestWithExistingConnection) {\n  initializeRouter();\n  startRequestWithExistingConnection(MessageType::Request);\n\n  EXPECT_EQ(FilterStatus::Continue, router_->onMessageDecoded(metadata_, message_context_));\n\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, DestroyWhileConnecting) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  NiceMock<Envoy::ConnectionPool::MockCancellable> conn_pool_handle;\n  EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_))\n      .WillOnce(Invoke([&](Tcp::ConnectionPool::Callbacks&) -> Tcp::ConnectionPool::Cancellable* {\n        return &conn_pool_handle;\n      }));\n\n  EXPECT_CALL(conn_pool_handle, cancel(Tcp::ConnectionPool::CancelPolicy::Default));\n\n  startRequest(MessageType::Request);\n  router_->onDestroy();\n\n  destroyRouter();\n}\n\nTEST_F(DubboRouterTest, LocalClosedWhileResponseComplete) {\n  initializeRouter();\n  initializeMetadata(MessageType::Request);\n\n  EXPECT_CALL(callbacks_, startUpstreamResponse()).Times(1);\n  EXPECT_CALL(callbacks_, upstreamData(_))\n      .WillOnce(Return(DubboFilters::UpstreamResponseStatus::Complete));\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _)).Times(0);\n\n  startRequest(MessageType::Request);\n  connectUpstream();\n\n  Buffer::OwnedImpl buffer;\n  buffer.add(std::string(\"This is the test data\"));\n  router_->onUpstreamData(buffer, false);\n\n  upstream_connection_.close(Network::ConnectionCloseType::NoFlush);\n\n  destroyRouter();\n}\n\n} // namespace Router\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/dubbo_proxy/utility.h",
    "content": "#pragma once\n\n#include <initializer_list>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/byte_order.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace DubboProxy {\n\ninline void addInt32(Buffer::Instance& buffer, uint32_t value) {\n  value = htobe32(value);\n  buffer.add(&value, 4);\n}\n\ninline void addInt64(Buffer::Instance& buffer, uint64_t value) {\n  value = htobe64(value);\n  buffer.add(&value, 8);\n}\n\n} // namespace DubboProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/ext_authz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"ext_authz_test\",\n    srcs = [\"ext_authz_test.cc\"],\n    extension_name = \"envoy.filters.network.ext_authz\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/ext_authz\",\n        \"//test/extensions/filters/common/ext_authz:ext_authz_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.ext_authz\",\n    deps = [\n        \"//source/extensions/filters/network/ext_authz:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"ext_authz_fuzz_proto\",\n    srcs = [\"ext_authz_fuzz.proto\"],\n    deps = [\n        \"@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"ext_authz_fuzz_test\",\n    srcs = [\"ext_authz_fuzz_test.cc\"],\n    corpus = \"ext_authz_corpus\",\n    deps = [\n        \":ext_authz_fuzz_proto_cc_proto\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/ext_authz\",\n        \"//test/extensions/filters/common/ext_authz:ext_authz_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/ext_authz/config_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.validate.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"extensions/filters/network/ext_authz/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ExtAuthz {\n\nnamespace {\nvoid expectCorrectProto(envoy::config::core::v3::ApiVersion api_version) {\n  std::string yaml = R\"EOF(\n  grpc_service:\n    google_grpc:\n      target_uri: ext_authz_server\n      stat_prefix: google\n  failure_mode_allow: false\n  stat_prefix: name\n  transport_api_version: {}\n)EOF\";\n\n  ExtAuthzConfigFactory factory;\n  ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto();\n  TestUtility::loadFromYaml(\n      fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_CALL(context.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        return std::make_unique<NiceMock<Grpc::MockAsyncClientFactory>>();\n      }));\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n} // namespace\n\nTEST(ExtAuthzFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(ExtAuthzConfigFactory().createFilterFactoryFromProto(\n                   envoy::extensions::filters::network::ext_authz::v3::ExtAuthz(), context),\n               ProtoValidationException);\n}\n\nTEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) {\n  expectCorrectProto(envoy::config::core::v3::ApiVersion::AUTO);\n  expectCorrectProto(envoy::config::core::v3::ApiVersion::V2);\n  expectCorrectProto(envoy::config::core::v3::ApiVersion::V3);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(ExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.ext_authz\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace ExtAuthz\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc",
    "content": "config {\n  stat_prefix: \"\\361\\261\\261\\261\\361\\261\\261\\261\\361\\261\\261\\261\\361\\261\\261\\261\\361\\261\\261\\261\\361\\261\\261\\261\\321\\261\"\n  failure_mode_allow: true\n  include_peer_certificate: true\n}\nactions {\n  on_data {\n    result {\n      check_status_ok {\n      }\n    }\n    data: \"123\"\n  }\n}\nactions {\n  remote_close {\n  }\n}\nactions {\n  local_close {\n  }\n}\n\n"
  },
  {
    "path": "test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2",
    "content": "config {\n  stat_prefix: \"envoy.extensions.filters.network.e\"\n  failure_mode_allow: true\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\"\n    end_stream: true\n    result {\n      check_status_denied {\n      }\n    }\n  }\n}\nactions {\n  on_data {\n    data: \"CCCCCCCCCCCC\"\n    end_stream: true\n    result {\n      check_status_denied {\n      }\n    }\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\\000\\000\"\n    end_stream: true\n    result {\n      check_status_error {\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto",
    "content": "syntax = \"proto3\";\npackage envoy.extensions.filters.network.ext_authz;\n\nimport \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"validate/validate.proto\";\n\nmessage Result {\n  oneof result_selector {\n    option (validate.required) = true;\n    // Authorization check status\n    google.protobuf.Empty check_status_error = 1;\n    google.protobuf.Empty check_status_denied = 2;\n    google.protobuf.Empty check_status_ok = 3;\n  }\n}\n\nmessage OnData {\n  bytes data = 1;\n  bool end_stream = 2;\n  // optional: to set the default authorization check result for this and the following onData()\n  Result result = 3;\n}\n\nmessage Action {\n  oneof action_selector {\n    option (validate.required) = true;\n    // Call onNewConnection().\n    google.protobuf.Empty on_new_connection = 1;\n    // Call onData().\n    OnData on_data = 2;\n    // Connection close\n    google.protobuf.Empty remote_close = 3;\n    google.protobuf.Empty local_close = 4;\n  }\n}\nmessage ExtAuthzTestCase {\n  envoy.extensions.filters.network.ext_authz.v3.ExtAuthz config = 1\n      [(validate.rules).message = {required: true}];\n  repeated Action actions = 2;\n}\n"
  },
  {
    "path": "test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc",
    "content": "#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/filters/network/ext_authz/ext_authz.h\"\n\n#include \"test/extensions/filters/common/ext_authz/mocks.h\"\n#include \"test/extensions/filters/network/ext_authz/ext_authz_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::ReturnRef;\nusing testing::WithArgs;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ExtAuthz {\n\nFilters::Common::ExtAuthz::ResponsePtr\nmakeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus status) {\n  Filters::Common::ExtAuthz::ResponsePtr response =\n      std::make_unique<Filters::Common::ExtAuthz::Response>();\n  response->status = status;\n  return response;\n}\n\nFilters::Common::ExtAuthz::CheckStatus resultCaseToCheckStatus(\n    envoy::extensions::filters::network::ext_authz::Result::ResultSelectorCase result_case) {\n  Filters::Common::ExtAuthz::CheckStatus check_status;\n  switch (result_case) {\n  case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusOk: {\n    check_status = Filters::Common::ExtAuthz::CheckStatus::OK;\n    break;\n  }\n  case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusError: {\n    check_status = Filters::Common::ExtAuthz::CheckStatus::Error;\n    break;\n  }\n  case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusDenied: {\n    check_status = Filters::Common::ExtAuthz::CheckStatus::Denied;\n    break;\n  }\n  default: {\n    // Unhandled status\n    PANIC(\"A check status handle is missing\");\n  }\n  }\n  return check_status;\n}\n\nDEFINE_PROTO_FUZZER(const envoy::extensions::filters::network::ext_authz::ExtAuthzTestCase& input) {\n  try {\n    TestUtility::validate(input);\n  } catch (const EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException during validation: {}\", e.what());\n    return;\n  }\n\n  Stats::TestUtil::TestStore stats_store;\n  Filters::Common::ExtAuthz::MockClient* client = new Filters::Common::ExtAuthz::MockClient();\n  envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config = input.config();\n\n  ConfigSharedPtr config = std::make_shared<Config>(proto_config, stats_store);\n  std::unique_ptr<Filter> filter =\n      std::make_unique<Filter>(config, Filters::Common::ExtAuthz::ClientPtr{client});\n\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks;\n  filter->initializeReadFilterCallbacks(filter_callbacks);\n  static Network::Address::InstanceConstSharedPtr addr =\n      std::make_shared<Network::Address::PipeInstance>(\"/test/test.sock\");\n\n  ON_CALL(filter_callbacks.connection_, remoteAddress()).WillByDefault(ReturnRef(addr));\n  ON_CALL(filter_callbacks.connection_, localAddress()).WillByDefault(ReturnRef(addr));\n\n  for (const auto& action : input.actions()) {\n    switch (action.action_selector_case()) {\n    case envoy::extensions::filters::network::ext_authz::Action::kOnData: {\n      // Optional input field to set default authorization check result for the following \"onData()\"\n      if (action.on_data().has_result()) {\n        ON_CALL(*client, check(_, _, _, _, _))\n            .WillByDefault(WithArgs<0>(\n                Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n                  callbacks.onComplete(makeAuthzResponse(\n                      resultCaseToCheckStatus(action.on_data().result().result_selector_case())));\n                })));\n      }\n      Buffer::OwnedImpl buffer(action.on_data().data());\n      filter->onData(buffer, action.on_data().end_stream());\n      break;\n    }\n    case envoy::extensions::filters::network::ext_authz::Action::kOnNewConnection: {\n      filter->onNewConnection();\n      break;\n    }\n    case envoy::extensions::filters::network::ext_authz::Action::kRemoteClose: {\n      filter_callbacks.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n      break;\n    }\n    case envoy::extensions::filters::network::ext_authz::Action::kLocalClose: {\n      filter_callbacks.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n      break;\n    }\n    default: {\n      // Unhandled actions\n      PANIC(\"A case is missing for an action\");\n    }\n    }\n  }\n}\n\n} // namespace ExtAuthz\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/ext_authz/ext_authz_test.cc",
    "content": "#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h\"\n#include \"envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.validate.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/ext_authz/ext_authz.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/extensions/filters/common/ext_authz/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::ReturnRef;\nusing testing::WithArgs;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ExtAuthz {\n\nclass ExtAuthzFilterTest : public testing::Test {\npublic:\n  void initialize(std::string yaml) {\n    envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{};\n    TestUtility::loadFromYaml(yaml, proto_config);\n    config_ = std::make_shared<Config>(proto_config, stats_store_);\n    client_ = new Filters::Common::ExtAuthz::MockClient();\n    filter_ = std::make_unique<Filter>(config_, Filters::Common::ExtAuthz::ClientPtr{client_});\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n    addr_ = std::make_shared<Network::Address::PipeInstance>(\"/test/test.sock\");\n\n    // NOP currently.\n    filter_->onAboveWriteBufferHighWatermark();\n    filter_->onBelowWriteBufferLowWatermark();\n  }\n\n  Filters::Common::ExtAuthz::ResponsePtr\n  makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus status) {\n    Filters::Common::ExtAuthz::ResponsePtr response =\n        std::make_unique<Filters::Common::ExtAuthz::Response>();\n    response->status = status;\n    return response;\n  }\n\n  ~ExtAuthzFilterTest() override {\n    for (const Stats::GaugeSharedPtr& gauge : stats_store_.gauges()) {\n      EXPECT_EQ(0U, gauge->value());\n    }\n  }\n\n  void expectOKWithOnData() {\n    EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n    EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n    EXPECT_CALL(*client_, check(_, _, _, testing::A<Tracing::Span&>(), _))\n        .WillOnce(\n            WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n              request_callbacks_ = &callbacks;\n            })));\n\n    EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n    // Confirm that the invocation of onNewConnection did NOT increment the active or total count!\n    EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.total\").value());\n    EXPECT_EQ(\n        0U,\n        stats_store_.gauge(\"ext_authz.name.active\", Stats::Gauge::ImportMode::Accumulate).value());\n    Buffer::OwnedImpl data(\"hello\");\n    EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n    // Confirm that the invocation of onData does increment the active and total count!\n    EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n    EXPECT_EQ(\n        1U,\n        stats_store_.gauge(\"ext_authz.name.active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n    Filters::Common::ExtAuthz::Response response{};\n    response.status = Filters::Common::ExtAuthz::CheckStatus::OK;\n    response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{\"foo\"}, \"bar\"}};\n\n    auto* fields = response.dynamic_metadata.mutable_fields();\n    (*fields)[\"foo\"] = ValueUtil::stringValue(\"ok\");\n    (*fields)[\"bar\"] = ValueUtil::numberValue(1);\n\n    EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _))\n        .WillOnce(Invoke([&response](const std::string& ns,\n                                     const ProtobufWkt::Struct& returned_dynamic_metadata) {\n          EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization);\n          EXPECT_TRUE(\n              TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata));\n        }));\n\n    EXPECT_CALL(filter_callbacks_, continueReading());\n    request_callbacks_->onComplete(std::make_unique<Filters::Common::ExtAuthz::Response>(response));\n\n    EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n    EXPECT_CALL(*client_, cancel()).Times(0);\n    filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n\n    EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n    EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n    EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.error\").value());\n    EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n    EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n    EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n    EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.ok\").value());\n    EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n  }\n\n  Stats::TestUtil::TestStore stats_store_;\n  ConfigSharedPtr config_;\n  Filters::Common::ExtAuthz::MockClient* client_;\n  std::unique_ptr<Filter> filter_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  Network::Address::InstanceConstSharedPtr addr_;\n  Filters::Common::ExtAuthz::RequestCallbacks* request_callbacks_{};\n  const std::string default_yaml_string_ = R\"EOF(\ngrpc_service:\n  envoy_grpc:\n    cluster_name: ext_authz_server\n\nfailure_mode_allow: true\nstat_prefix: name\n  )EOF\";\n  const std::string metadata_yaml_string_ = R\"EOF(\ngrpc_service:\n  envoy_grpc:\n    cluster_name: ext_authz_server\nfailure_mode_allow: true\nstat_prefix: name\nfilter_enabled_metadata:\n  filter: \"abc.xyz\"\n  path:\n  - key: \"k1\"\n  value:\n    string_match:\n      exact: \"check\"\n  )EOF\";\n};\n\nTEST_F(ExtAuthzFilterTest, BadExtAuthzConfig) {\n  std::string yaml_string = R\"EOF(\ngrpc_service: {}\nstat_prefix: name\n  )EOF\";\n\n  envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{};\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n\n  EXPECT_THROW(\n      TestUtility::downcastAndValidate<\n          const envoy::extensions::filters::network::ext_authz::v3::ExtAuthz&>(proto_config),\n      ProtoValidationException);\n}\n\nTEST_F(ExtAuthzFilterTest, OKWithOnData) {\n  initialize(default_yaml_string_);\n  expectOKWithOnData();\n}\n\nTEST_F(ExtAuthzFilterTest, DeniedWithOnData) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  // Confirm that the invocation of onNewConnection did NOT increment the active or total count!\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(\n      0U,\n      stats_store_.gauge(\"ext_authz.name.active\", Stats::Gauge::ImportMode::Accumulate).value());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n  // Confirm that the invocation of onData does increment the active and total count!\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(\n      1U,\n      stats_store_.gauge(\"ext_authz.name.active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Denied));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\nTEST_F(ExtAuthzFilterTest, FailOpen) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0);\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\nTEST_F(ExtAuthzFilterTest, FailClose) {\n  initialize(default_yaml_string_);\n  InSequence s;\n  // Explicitly set the failure_mode_allow to false.\n  config_->setFailModeAllow(false);\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(1);\n  EXPECT_CALL(filter_callbacks_, continueReading()).Times(0);\n  request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error));\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify that when callback from the authorization service has completed the filter\n// does not invoke Cancel on RemoteClose event.\nTEST_F(ExtAuthzFilterTest, DoNotCallCancelonRemoteClose) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify that Cancel is invoked when a RemoteClose event occurs while the call\n// to the authorization service was in progress.\nTEST_F(ExtAuthzFilterTest, VerifyCancelOnRemoteClose) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify that on stack response from the authorization service does NOT\n// result in calling cancel.\nTEST_F(ExtAuthzFilterTest, ImmediateOK) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_, continueReading()).Times(0);\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK));\n          })));\n  EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)).Times(0);\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify that on stack denied response from the authorization service does\n// result in stoppage of the filter chain.\nTEST_F(ExtAuthzFilterTest, ImmediateNOK) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_, continueReading()).Times(0);\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Denied));\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify that on stack Error response when failure_mode_allow is configured\n// result in request being allowed.\nTEST_F(ExtAuthzFilterTest, ImmediateErrorFailOpen) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_, continueReading()).Times(0);\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error));\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify that timeout the proper stat is incremented.\nTEST_F(ExtAuthzFilterTest, TimeoutError) {\n  initialize(default_yaml_string_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_));\n  EXPECT_CALL(filter_callbacks_, continueReading()).Times(0);\n  EXPECT_CALL(*client_, check(_, _, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void {\n            auto resp = makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error);\n            resp->error_kind = Filters::Common::ExtAuthz::ErrorKind::Timedout;\n            callbacks.onComplete(std::move(resp));\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify the filter is disabled with metadata.\nTEST_F(ExtAuthzFilterTest, DisabledWithMetadata) {\n  initialize(metadata_yaml_string_);\n\n  // Disable in filter_enabled_metadata.\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    abc.xyz:\n      k1: skip\n  )EOF\";\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.connection_.stream_info_, dynamicMetadata())\n      .WillByDefault(ReturnRef(metadata));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0);\n  EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0);\n  EXPECT_CALL(*client_, cancel()).Times(0);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ext_authz.name.disabled\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.total\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.timeout\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.failure_mode_allowed\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.denied\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.ok\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ext_authz.name.cx_closed\").value());\n}\n\n// Test to verify the filter is enabled with metadata.\nTEST_F(ExtAuthzFilterTest, EnabledWithMetadata) {\n  initialize(metadata_yaml_string_);\n\n  // Enable in filter_enabled_metadata.\n  const std::string yaml = R\"EOF(\n  filter_metadata:\n    abc.xyz:\n      k1: check\n  )EOF\";\n  envoy::config::core::v3::Metadata metadata;\n  TestUtility::loadFromYaml(yaml, metadata);\n  ON_CALL(filter_callbacks_.connection_.stream_info_, dynamicMetadata())\n      .WillByDefault(ReturnRef(metadata));\n\n  expectOKWithOnData();\n}\n\n} // namespace ExtAuthz\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/http_connection_manager/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_proto_library(\n    name = \"config\",\n    srcs = [\"config.proto\"],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.http_connection_manager\",\n    deps = [\n        \":config_cc_proto\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/filter/http:filter_config_discovery_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//source/extensions/filters/http/router:config\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//test/integration/filters:encoder_decoder_buffer_filter_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/http_connection_manager/config.proto",
    "content": "syntax = \"proto3\";\n\npackage test.http_connection_manager;\n\nmessage CustomRequestIDExtension {\n  string test_field = 1;\n}\n\nmessage UnknownRequestIDExtension {\n}"
  },
  {
    "path": "test/extensions/filters/network/http_connection_manager/config_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/config/trace/v3/opencensus.pb.h\"\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h\"\n#include \"envoy/server/request_id_extension_config.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/filter/http/filter_config_discovery_impl.h\"\n#include \"common/http/date_provider_impl.h\"\n#include \"common/http/request_id_extension_uuid_impl.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/filters/network/http_connection_manager/config.h\"\n\n#include \"test/extensions/filters/network/http_connection_manager/config.pb.h\"\n#include \"test/extensions/filters/network/http_connection_manager/config.pb.validate.h\"\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::An;\nusing testing::Eq;\nusing testing::NotNull;\nusing testing::Pointee;\nusing testing::Return;\nusing testing::WhenDynamicCastTo;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace HttpConnectionManager {\n\nenvoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\nparseHttpConnectionManagerFromYaml(const std::string& yaml, bool avoid_boosting = true) {\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      http_connection_manager;\n  TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager, false, avoid_boosting);\n  return http_connection_manager;\n}\n\nclass HttpConnectionManagerConfigTest : public testing::Test {\npublic:\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Http::SlowDateProviderImpl date_provider_{context_.dispatcher().timeSource()};\n  NiceMock<Router::MockRouteConfigProviderManager> route_config_provider_manager_;\n  NiceMock<Config::MockConfigProviderManager> scoped_routes_config_provider_manager_;\n  NiceMock<Tracing::MockHttpTracerManager> http_tracer_manager_;\n  Filter::Http::FilterConfigProviderManagerImpl filter_config_provider_manager_;\n  std::shared_ptr<NiceMock<Tracing::MockHttpTracer>> http_tracer_{\n      std::make_shared<NiceMock<Tracing::MockHttpTracer>>()};\n  void createHttpConnectionManagerConfig(const std::string& yaml) {\n    HttpConnectionManagerConfig(parseHttpConnectionManagerFromYaml(yaml), context_, date_provider_,\n                                route_config_provider_manager_,\n                                scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                filter_config_provider_manager_);\n  }\n};\n\nTEST_F(HttpConnectionManagerConfigTest, ValidateFail) {\n  EXPECT_THROW(\n      HttpConnectionManagerFilterConfigFactory().createFilterFactoryFromProto(\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager(),\n          context_),\n      ProtoValidationException);\n}\n\nTEST_F(HttpConnectionManagerConfigTest, InvalidFilterName) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n                            \"Didn't find a registered implementation for name: 'foo'\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, InvalidServerName) {\n  const std::string yaml_string = R\"EOF(\nserver_name: >\n  foo\nroute_config:\n  name: local_route\nstat_prefix: router\n  )EOF\";\n\n  EXPECT_THROW(createHttpConnectionManagerConfig(yaml_string), ProtoValidationException);\n}\n\nTEST_F(HttpConnectionManagerConfigTest, RouterInverted) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: envoy.filters.http.router\n- name: health_check\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n    pass_through_mode: false\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"Error: terminal filter named envoy.filters.http.router of type envoy.filters.http.router \"\n      \"must be the last filter in a http filter chain.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, NonTerminalFilter) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: health_check\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n    pass_through_mode: false\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n                            \"Error: non-terminal filter named health_check of type \"\n                            \"envoy.filters.http.health_check is the last filter in a http filter \"\n                            \"chain.\");\n}\n\n// When deprecating v2, remove the old style \"operation_name: egress\" config\n// but retain the rest of the test.\nTEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(MiscConfig)) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\ntracing:\n  operation_name: egress\n  max_path_tag_length: 128\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  EXPECT_EQ(128, config.tracingConfig()->max_path_tag_length_);\n  EXPECT_EQ(*context_.local_info_.address_, config.localAddress());\n  EXPECT_EQ(\"foo\", config.serverName());\n  EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE,\n            config.serverHeaderTransformation());\n  EXPECT_EQ(5 * 60 * 1000, config.streamIdleTimeout().count());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, TracingNotEnabledAndNoTracingConfigInBootstrap) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  // When tracing is not enabled on a given \"envoy.filters.network.http_connection_manager\" filter,\n  // there is no reason to obtain an actual HttpTracer.\n  EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(_)).Times(0);\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  // By default, tracer must be a null object (Tracing::HttpNullTracer) rather than nullptr.\n  EXPECT_THAT(config.tracer().get(), WhenDynamicCastTo<Tracing::HttpNullTracer*>(NotNull()));\n}\n\nTEST_F(HttpConnectionManagerConfigTest, TracingNotEnabledWhileThereIsTracingConfigInBootstrap) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  // Simulate tracer provider configuration in the bootstrap config.\n  envoy::config::trace::v3::Tracing tracing_config;\n  tracing_config.mutable_http()->set_name(\"zipkin\");\n  tracing_config.mutable_http()->mutable_typed_config()->PackFrom(\n      envoy::config::trace::v3::ZipkinConfig{});\n  context_.http_context_.setDefaultTracingConfig(tracing_config);\n\n  // When tracing is not enabled on a given \"envoy.filters.network.http_connection_manager\" filter,\n  // there is no reason to obtain an actual HttpTracer.\n  EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(_)).Times(0);\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  // Even though tracer provider is configured in the bootstrap config, a given filter instance\n  // should not have a tracer associated with it.\n\n  // By default, tracer must be a null object (Tracing::HttpNullTracer) rather than nullptr.\n  EXPECT_THAT(config.tracer().get(), WhenDynamicCastTo<Tracing::HttpNullTracer*>(NotNull()));\n}\n\nTEST_F(HttpConnectionManagerConfigTest, TracingIsEnabledWhileThereIsNoTracingConfigInBootstrap) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\ntracing: {} # notice that tracing is enabled\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  // When tracing is enabled on a given \"envoy.filters.network.http_connection_manager\" filter,\n  // an actual HttpTracer must be obtained from the HttpTracerManager.\n  EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(nullptr)).WillOnce(Return(http_tracer_));\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  // Actual HttpTracer must be obtained from the HttpTracerManager.\n  EXPECT_THAT(config.tracer(), Eq(http_tracer_));\n}\n\nTEST_F(HttpConnectionManagerConfigTest, TracingIsEnabledAndThereIsTracingConfigInBootstrap) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\ntracing: {} # notice that tracing is enabled\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  // Simulate tracer provider configuration in the bootstrap config.\n  envoy::config::trace::v3::Tracing tracing_config;\n  tracing_config.mutable_http()->set_name(\"zipkin\");\n  tracing_config.mutable_http()->mutable_typed_config()->PackFrom(\n      envoy::config::trace::v3::ZipkinConfig{});\n  context_.http_context_.setDefaultTracingConfig(tracing_config);\n\n  // When tracing is enabled on a given \"envoy.filters.network.http_connection_manager\" filter,\n  // an actual HttpTracer must be obtained from the HttpTracerManager.\n  EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(tracing_config.http()))))\n      .WillOnce(Return(http_tracer_));\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  // Actual HttpTracer must be obtained from the HttpTracerManager.\n  EXPECT_THAT(config.tracer(), Eq(http_tracer_));\n}\n\nTEST_F(HttpConnectionManagerConfigTest, TracingIsEnabledAndThereIsInlinedTracerProvider) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\ntracing:\n  operation_name: ingress\n  max_path_tag_length: 128\n  provider:                # notice inlined tracing provider configuration\n    name: zipkin\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig\n      collector_cluster: zipkin\n      collector_endpoint: \"/api/v1/spans\"\n      collector_endpoint_version: HTTP_JSON\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  // Simulate tracer provider configuration in the bootstrap config.\n  envoy::config::trace::v3::Tracing bootstrap_tracing_config;\n  bootstrap_tracing_config.mutable_http()->set_name(\"opencensus\");\n  bootstrap_tracing_config.mutable_http()->mutable_typed_config()->PackFrom(\n      envoy::config::trace::v3::OpenCensusConfig{});\n  context_.http_context_.setDefaultTracingConfig(bootstrap_tracing_config);\n\n  // Set up expected tracer provider configuration.\n  envoy::config::trace::v3::Tracing_Http inlined_tracing_config;\n  inlined_tracing_config.set_name(\"zipkin\");\n  envoy::config::trace::v3::ZipkinConfig zipkin_config;\n  zipkin_config.set_collector_cluster(\"zipkin\");\n  zipkin_config.set_collector_endpoint(\"/api/v1/spans\");\n  zipkin_config.set_collector_endpoint_version(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON);\n  inlined_tracing_config.mutable_typed_config()->PackFrom(zipkin_config);\n\n  // When tracing is enabled on a given \"envoy.filters.network.http_connection_manager\" filter,\n  // an actual HttpTracer must be obtained from the HttpTracerManager.\n  // Expect inlined tracer provider configuration to take precedence over bootstrap configuration.\n  EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(inlined_tracing_config))))\n      .WillOnce(Return(http_tracer_));\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  // Actual HttpTracer must be obtained from the HttpTracerManager.\n  EXPECT_THAT(config.tracer(), Eq(http_tracer_));\n}\n\nTEST_F(HttpConnectionManagerConfigTest, TracingCustomTagsConfig) {\n  const std::string yaml_string = R\"EOF(\nstat_prefix: router\nroute_config:\n  name: local_route\ntracing:\n  custom_tags:\n  - tag: ltag\n    literal:\n      value: lvalue\n  - tag: etag\n    environment:\n      name: E_TAG\n  - tag: rtag\n    request_header:\n      name: X-Tag\n  - tag: mtag\n    metadata:\n      kind: { request: {} }\n      metadata_key:\n        key: com.bar.foo\n        path: [ { key: xx }, { key: yy } ]\n  )EOF\";\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  std::vector<std::string> custom_tags{\"ltag\", \"etag\", \"rtag\", \"mtag\"};\n  const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_;\n  for (const std::string& custom_tag : custom_tags) {\n    EXPECT_NE(custom_tag_map.find(custom_tag), custom_tag_map.end());\n  }\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(RequestHeaderForTagsConfig)) {\n  const std::string yaml_string = R\"EOF(\nstat_prefix: router\nroute_config:\n  name: local_route\ntracing:\n  request_headers_for_tags:\n  - foo\n  )EOF\";\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_;\n  const Tracing::RequestHeaderCustomTag* foo = dynamic_cast<const Tracing::RequestHeaderCustomTag*>(\n      custom_tag_map.find(\"foo\")->second.get());\n  EXPECT_NE(foo, nullptr);\n  EXPECT_EQ(foo->tag(), \"foo\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest,\n       DEPRECATED_FEATURE_TEST(ListenerDirectionOutboundOverride)) {\n  const std::string yaml_string = R\"EOF(\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\ntracing:\n  operation_name: ingress\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::OUTBOUND));\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(Tracing::OperationName::Egress, config.tracingConfig()->operation_name_);\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(ListenerDirectionInboundOverride)) {\n  const std::string yaml_string = R\"EOF(\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\ntracing:\n  operation_name: egress\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::INBOUND));\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(Tracing::OperationName::Ingress, config.tracingConfig()->operation_name_);\n}\n\nTEST_F(HttpConnectionManagerConfigTest, SamplingDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  internal_address_config:\n    unix_sockets: true\n  route_config:\n    name: local_route\n  tracing:\n    operation_name: ingress\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  EXPECT_EQ(100, config.tracingConfig()->client_sampling_.numerator());\n  EXPECT_EQ(Tracing::DefaultMaxPathTagLength, config.tracingConfig()->max_path_tag_length_);\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED,\n            config.tracingConfig()->client_sampling_.denominator());\n  EXPECT_EQ(10000, config.tracingConfig()->random_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::TEN_THOUSAND,\n            config.tracingConfig()->random_sampling_.denominator());\n  EXPECT_EQ(100, config.tracingConfig()->overall_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED,\n            config.tracingConfig()->overall_sampling_.denominator());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, SamplingConfigured) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  internal_address_config:\n    unix_sockets: true\n  route_config:\n    name: local_route\n  tracing:\n    operation_name: ingress\n    client_sampling:\n      value: 1\n    random_sampling:\n      value: 2\n    overall_sampling:\n      value: 3\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  EXPECT_EQ(1, config.tracingConfig()->client_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED,\n            config.tracingConfig()->client_sampling_.denominator());\n  EXPECT_EQ(200, config.tracingConfig()->random_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::TEN_THOUSAND,\n            config.tracingConfig()->random_sampling_.denominator());\n  EXPECT_EQ(3, config.tracingConfig()->overall_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED,\n            config.tracingConfig()->overall_sampling_.denominator());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, FractionalSamplingConfigured) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  internal_address_config:\n    unix_sockets: true\n  route_config:\n    name: local_route\n  tracing:\n    operation_name: ingress\n    client_sampling:\n      value: 0.1\n    random_sampling:\n      value: 0.2\n    overall_sampling:\n      value: 0.3\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  EXPECT_EQ(0, config.tracingConfig()->client_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED,\n            config.tracingConfig()->client_sampling_.denominator());\n  EXPECT_EQ(20, config.tracingConfig()->random_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::TEN_THOUSAND,\n            config.tracingConfig()->random_sampling_.denominator());\n  EXPECT_EQ(0, config.tracingConfig()->overall_sampling_.numerator());\n  EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED,\n            config.tracingConfig()->overall_sampling_.denominator());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, UnixSocketInternalAddress) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  internal_address_config:\n    unix_sockets: true\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  Network::Address::PipeInstance unixAddress{\"/foo\"};\n  Network::Address::Ipv4Instance internalIpAddress{\"127.0.0.1\", 0, nullptr};\n  Network::Address::Ipv4Instance externalIpAddress{\"12.0.0.1\", 0, nullptr};\n  EXPECT_TRUE(config.internalAddressConfig().isInternalAddress(unixAddress));\n  EXPECT_TRUE(config.internalAddressConfig().isInternalAddress(internalIpAddress));\n  EXPECT_FALSE(config.internalAddressConfig().isInternalAddress(externalIpAddress));\n}\n\nTEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(60, config.maxRequestHeadersKb());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbConfigured) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  max_request_headers_kb: 16\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(16, config.maxRequestHeadersKb());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbMaxConfigurable) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  max_request_headers_kb: 96\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(96, config.maxRequestHeadersKb());\n}\n\n// Validated that an explicit zero stream idle timeout disables.\nTEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  stream_idle_timeout: 0s\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(0, config.streamIdleTimeout().count());\n}\n\n// Validate that deprecated idle_timeout is still ingested.\nTEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(IdleTimeout)) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  idle_timeout: 1s\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false),\n                                     context_, date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(1000, config.idleTimeout().value().count());\n}\n\n// Validate that idle_timeout set in common_http_protocol_options is used.\nTEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeout) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  common_http_protocol_options:\n    idle_timeout: 1s\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(1000, config.idleTimeout().value().count());\n}\n\n// Validate that idle_timeout defaults to 1h\nTEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(std::chrono::hours(1), config.idleTimeout().value());\n}\n\n// Validate that idle_timeouts can be turned off\nTEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutOff) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  common_http_protocol_options:\n    idle_timeout: 0s\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_FALSE(config.idleTimeout().has_value());\n}\n\n// Check that the default max request header count is 100.\nTEST_F(HttpConnectionManagerConfigTest, DefaultMaxRequestHeaderCount) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(100, config.maxRequestHeadersCount());\n}\n\n// Check that max request header count is configured.\nTEST_F(HttpConnectionManagerConfigTest, MaxRequestHeaderCountConfigurable) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  common_http_protocol_options:\n    max_headers_count: 200\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(200, config.maxRequestHeadersCount());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, ServerOverwrite) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  server_header_transformation: OVERWRITE\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An<uint64_t>()))\n      .WillOnce(Invoke(&context_.runtime_loader_.snapshot_,\n                       &Runtime::MockSnapshot::featureEnabledDefault));\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE,\n            config.serverHeaderTransformation());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, ServerAppendIfAbsent) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  server_header_transformation: APPEND_IF_ABSENT\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An<uint64_t>()))\n      .WillOnce(Invoke(&context_.runtime_loader_.snapshot_,\n                       &Runtime::MockSnapshot::featureEnabledDefault));\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::APPEND_IF_ABSENT,\n            config.serverHeaderTransformation());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, ServerPassThrough) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  server_header_transformation: PASS_THROUGH\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An<uint64_t>()))\n      .WillOnce(Invoke(&context_.runtime_loader_.snapshot_,\n                       &Runtime::MockSnapshot::featureEnabledDefault));\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::PASS_THROUGH,\n            config.serverHeaderTransformation());\n}\n\n// Validated that by default we don't normalize paths\n// unless set build flag path_normalization_by_default=true\nTEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An<uint64_t>()))\n      .WillOnce(Invoke(&context_.runtime_loader_.snapshot_,\n                       &Runtime::MockSnapshot::featureEnabledDefault));\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n#ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT\n  EXPECT_TRUE(config.shouldNormalizePath());\n#else\n  EXPECT_FALSE(config.shouldNormalizePath());\n#endif\n}\n\n// Validated that we normalize paths with runtime override when not specified.\nTEST_F(HttpConnectionManagerConfigTest, NormalizePathRuntime) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_CALL(context_.runtime_loader_.snapshot_,\n              featureEnabled(\"http_connection_manager.normalize_path\", An<uint64_t>()))\n      .WillOnce(Return(true));\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_TRUE(config.shouldNormalizePath());\n}\n\n// Validated that when configured, we normalize paths, ignoring runtime.\nTEST_F(HttpConnectionManagerConfigTest, NormalizePathTrue) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  normalize_path: true\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_CALL(context_.runtime_loader_.snapshot_,\n              featureEnabled(\"http_connection_manager.normalize_path\", An<uint64_t>()))\n      .Times(0);\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_TRUE(config.shouldNormalizePath());\n}\n\n// Validated that when explicitly set false, we don't normalize, ignoring runtime.\nTEST_F(HttpConnectionManagerConfigTest, NormalizePathFalse) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  normalize_path: false\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_CALL(context_.runtime_loader_.snapshot_,\n              featureEnabled(\"http_connection_manager.normalize_path\", An<uint64_t>()))\n      .Times(0);\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_FALSE(config.shouldNormalizePath());\n}\n\n// Validated that by default we don't merge slashes.\nTEST_F(HttpConnectionManagerConfigTest, MergeSlashesDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_FALSE(config.shouldMergeSlashes());\n}\n\n// Validated that when configured, we merge slashes.\nTEST_F(HttpConnectionManagerConfigTest, MergeSlashesTrue) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  merge_slashes: true\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_TRUE(config.shouldMergeSlashes());\n}\n\n// Validated that when explicitly set false, we don't merge slashes.\nTEST_F(HttpConnectionManagerConfigTest, MergeSlashesFalse) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  merge_slashes: false\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_FALSE(config.shouldMergeSlashes());\n}\n\n// Validated that by default we don't remove port.\nTEST_F(HttpConnectionManagerConfigTest, RemovePortDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_FALSE(config.shouldStripMatchingPort());\n}\n\n// Validated that when configured, we remove port.\nTEST_F(HttpConnectionManagerConfigTest, RemovePortTrue) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  strip_matching_host_port: true\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_TRUE(config.shouldStripMatchingPort());\n}\n\n// Validated that when explicitly set false, we don't remove port.\nTEST_F(HttpConnectionManagerConfigTest, RemovePortFalse) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  strip_matching_host_port: false\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_FALSE(config.shouldStripMatchingPort());\n}\n\n// Validated that by default we allow requests with header names containing underscores.\nTEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresAllowedByDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::ALLOW,\n            config.headersWithUnderscoresAction());\n}\n\n// Validated that when configured, we drop headers with underscores.\nTEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresDroppedByConfig) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  common_http_protocol_options:\n    headers_with_underscores_action: DROP_HEADER\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER,\n            config.headersWithUnderscoresAction());\n}\n\n// Validated that when configured, we reject requests with header names containing underscores.\nTEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresRequestRejectedByConfig) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  common_http_protocol_options:\n    headers_with_underscores_action: REJECT_REQUEST\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST,\n            config.headersWithUnderscoresAction());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, ConfiguredRequestTimeout) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  request_timeout: 53s\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(53 * 1000, config.requestTimeout().count());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DisabledRequestTimeout) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  request_timeout: 0s\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(0, config.requestTimeout().count());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, UnconfiguredRequestTimeout) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_EQ(0, config.requestTimeout().count());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, SingleDateProvider) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n- name: envoy.filters.http.router\n  )EOF\";\n\n  auto proto_config = parseHttpConnectionManagerFromYaml(yaml_string);\n  HttpConnectionManagerFilterConfigFactory factory;\n  // We expect a single slot allocation vs. multiple.\n  EXPECT_CALL(context_.thread_local_, allocateSlot());\n  Network::FilterFactoryCb cb1 = factory.createFilterFactoryFromProto(proto_config, context_);\n  Network::FilterFactoryCb cb2 = factory.createFilterFactoryFromProto(proto_config, context_);\n  EXPECT_TRUE(factory.isTerminalFilter());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, BadHttpConnectionMangerConfig) {\n  std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nfilter:\n- {}\n  )EOF\";\n\n  EXPECT_THROW(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException);\n}\n\nTEST_F(HttpConnectionManagerConfigTest, BadAccessLogConfig) {\n  std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n  config: {}\naccess_log:\n- name: accesslog\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n    path: \"/dev/null\"\n  filter: []\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException,\n                          \"filter: Proto field is not repeating, cannot start list.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, BadAccessLogType) {\n  std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n  typed_config: {}\naccess_log:\n- name: accesslog\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n    path: \"/dev/null\"\n  filter:\n    bad_type: {}\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException,\n                          \"bad_type: Cannot find field\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, BadAccessLogNestedTypes) {\n  std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n  typed_config: {}\naccess_log:\n- name: accesslog\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n    path: \"/dev/null\"\n  filter:\n    and_filter:\n      filters:\n      - or_filter:\n          filters:\n          - duration_filter:\n              op: \">=\"\n              value: 10000\n          - bad_type: {}\n      - not_health_check_filter: {}\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException,\n                          \"bad_type: Cannot find field\");\n}\n\n// Validates that HttpConnectionManagerConfig construction succeeds when there are no collisions\n// between named and user defined parameters, and server push is not modified.\nTEST_F(HttpConnectionManagerConfigTest, UserDefinedSettingsNoCollision) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http2\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  typed_config: {}\nhttp2_protocol_options:\n  hpack_table_size: 1024\n  custom_settings_parameters: { identifier: 3, value: 2048 }\n  )EOF\";\n  // This will throw when Http2ProtocolOptions validation fails.\n  createHttpConnectionManagerConfig(yaml_string);\n}\n\n// Validates that named and user defined parameter collisions will trigger a config validation\n// failure.\nTEST_F(HttpConnectionManagerConfigTest, UserDefinedSettingsNamedParameterCollision) {\n  // Override both hpack_table_size (id = 0x1) and max_concurrent_streams (id = 0x3) with custom\n  // parameters of the same and different values (respectively).\n  const std::string yaml_string = R\"EOF(\ncodec_type: http2\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n  typed_config: {}\nhttp2_protocol_options:\n  hpack_table_size: 2048\n  max_concurrent_streams: 4096\n  custom_settings_parameters:\n    - { identifier: 1, value: 2048 }\n    - { identifier: 3, value: 1024 }\n  )EOF\";\n  EXPECT_THROW_WITH_REGEX(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      R\"(the \\{hpack_table_size,max_concurrent_streams\\} HTTP/2 SETTINGS parameter\\(s\\) can not be)\"\n      \" configured\");\n}\n\n// Validates that `allow_connect` can only be configured through the named field. All other\n// SETTINGS parameters can be set via the named _or_ custom parameters fields, but `allow_connect`\n// required an exception due to the use of a primitive type which does not support presence\n// checks.\nTEST_F(HttpConnectionManagerConfigTest, UserDefinedSettingsAllowConnectOnlyViaNamedField) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http2\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  typed_config: {}\nhttp2_protocol_options:\n  custom_settings_parameters:\n    - { identifier: 8, value: 0 }\n  )EOF\";\n  EXPECT_THROW_WITH_REGEX(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"the \\\"allow_connect\\\" SETTINGS parameter must only be configured through the named field\");\n\n  const std::string yaml_string2 = R\"EOF(\ncodec_type: http2\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  typed_config: {}\nhttp2_protocol_options:\n  allow_connect: true\n  )EOF\";\n  createHttpConnectionManagerConfig(yaml_string2);\n}\n\n// Validates that setting the server push parameter via user defined parameters is disallowed.\nTEST_F(HttpConnectionManagerConfigTest, UserDefinedSettingsDisallowServerPush) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http2\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n  typed_config: {}\nhttp2_protocol_options:\n  custom_settings_parameters: { identifier: 2, value: 1 }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"server push is not supported by Envoy and can not be enabled via a SETTINGS parameter.\");\n\n  // Specify both the server push parameter and colliding named and user defined parameters.\n  const std::string yaml_string2 = R\"EOF(\ncodec_type: http2\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n  typed_config: {}\nhttp2_protocol_options:\n  hpack_table_size: 2048\n  max_concurrent_streams: 4096\n  custom_settings_parameters:\n    - { identifier: 1, value: 2048 }\n    - { identifier: 2, value: 1 }\n    - { identifier: 3, value: 1024 }\n  )EOF\";\n\n  // The server push exception is thrown first.\n  EXPECT_THROW_WITH_REGEX(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"server push is not supported by Envoy and can not be enabled via a SETTINGS parameter.\");\n}\n\n// Validates that inconsistent custom parameters are rejected.\nTEST_F(HttpConnectionManagerConfigTest, UserDefinedSettingsRejectInconsistentCustomParameters) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http2\nstat_prefix: my_stat_prefix\nroute_config:\n  virtual_hosts:\n  - name: default\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: fake_cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  typed_config: {}\nhttp2_protocol_options:\n  custom_settings_parameters:\n    - { identifier: 10, value: 0 }\n    - { identifier: 10, value: 1 }\n    - { identifier: 12, value: 10 }\n    - { identifier: 14, value: 1 }\n    - { identifier: 12, value: 10 }\n  )EOF\";\n  EXPECT_THROW_WITH_REGEX(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      R\"(inconsistent HTTP/2 custom SETTINGS parameter\\(s\\) detected; identifiers = \\{0x0a\\})\");\n}\n\n// Test that the deprecated extension name still functions.\nTEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.http_connection_manager\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\nTEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseDefault) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_FALSE(config.alwaysSetRequestIdInResponse());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseConfigured) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  always_set_request_id_in_response: true\n  route_config:\n    name: local_route\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  EXPECT_TRUE(config.alwaysSetRequestIdInResponse());\n}\n\nnamespace {\n\nclass TestRequestIDExtension : public Http::RequestIDExtension {\npublic:\n  TestRequestIDExtension(const test::http_connection_manager::CustomRequestIDExtension& config)\n      : config_(config) {}\n\n  void set(Http::RequestHeaderMap&, bool) override {}\n  void setInResponse(Http::ResponseHeaderMap&, const Http::RequestHeaderMap&) override {}\n  bool modBy(const Http::RequestHeaderMap&, uint64_t&, uint64_t) override { return false; }\n  Http::TraceStatus getTraceStatus(const Http::RequestHeaderMap&) override {\n    return Http::TraceStatus::Sampled;\n  }\n  void setTraceStatus(Http::RequestHeaderMap&, Http::TraceStatus) override {}\n\n  std::string testField() { return config_.test_field(); }\n\nprivate:\n  test::http_connection_manager::CustomRequestIDExtension config_;\n};\n\nclass TestRequestIDExtensionFactory : public Server::Configuration::RequestIDExtensionFactory {\npublic:\n  std::string name() const override {\n    return \"test.http_connection_manager.CustomRequestIDExtension\";\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<test::http_connection_manager::CustomRequestIDExtension>();\n  }\n\n  Http::RequestIDExtensionSharedPtr\n  createExtensionInstance(const Protobuf::Message& config,\n                          Server::Configuration::FactoryContext& context) override {\n    const auto& custom_config = MessageUtil::downcastAndValidate<\n        const test::http_connection_manager::CustomRequestIDExtension&>(\n        config, context.messageValidationVisitor());\n    return std::make_shared<TestRequestIDExtension>(custom_config);\n  }\n};\n\n} // namespace\n\nTEST_F(HttpConnectionManagerConfigTest, CustomRequestIDExtension) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  request_id_extension:\n    typed_config:\n      \"@type\": type.googleapis.com/test.http_connection_manager.CustomRequestIDExtension\n      test_field: example\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  TestRequestIDExtensionFactory factory;\n  Registry::InjectFactory<Server::Configuration::RequestIDExtensionFactory> registration(factory);\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  auto request_id_extension =\n      dynamic_cast<TestRequestIDExtension*>(config.requestIDExtension().get());\n  ASSERT_NE(nullptr, request_id_extension);\n  EXPECT_EQ(\"example\", request_id_extension->testField());\n}\n\nTEST_F(HttpConnectionManagerConfigTest, UnknownRequestIDExtension) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  request_id_extension:\n    typed_config:\n      \"@type\": type.googleapis.com/test.http_connection_manager.UnknownRequestIDExtension\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n                          \"Didn't find a registered implementation for type\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: ingress_http\n  route_config:\n    name: local_route\n  request_id_extension: {}\n  http_filters:\n  - name: envoy.filters.http.router\n  )EOF\";\n\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n  auto request_id_extension =\n      dynamic_cast<Http::UUIDRequestIDExtension*>(config.requestIDExtension().get());\n  ASSERT_NE(nullptr, request_id_extension);\n}\n\nTEST_F(HttpConnectionManagerConfigTest, LegacyH1Codecs) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks;\n  EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false));\n  auto http_connection_manager_factory =\n      HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto(\n          proto_config, context_, filter_callbacks);\n  http_connection_manager_factory();\n}\n\nTEST_F(HttpConnectionManagerConfigTest, LegacyH2Codecs) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http2\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: envoy.filters.http.router\n  )EOF\";\n\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager\n      proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks;\n  EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false));\n  auto http_connection_manager_factory =\n      HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto(\n          proto_config, context_, filter_callbacks);\n  http_connection_manager_factory();\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DynamicFilterWarmingNoDefault) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    apply_default_config_without_warming: true\n    type_urls:\n    - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"Error: filter config foo applied without warming but has no default config.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DynamicFilterBadDefault) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    default_config:\n      \"@type\": type.googleapis.com/google.protobuf.Value\n    type_urls:\n    - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"Error: cannot find filter factory foo for default filter configuration with type URL \"\n      \"type.googleapis.com/google.protobuf.Value.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultNotTerminal) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    default_config:\n      \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n    type_urls:\n    - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"Error: non-terminal filter named foo of type envoy.filters.http.health_check is the last \"\n      \"filter in a http filter chain.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultTerminal) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    default_config:\n      \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n    type_urls:\n    - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n- name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n                            \"Error: terminal filter named foo of type envoy.filters.http.router \"\n                            \"must be the last filter in a http filter chain.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultRequireTypeUrl) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    default_config:\n      \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n      type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n    type_urls:\n    - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n- name: envoy.filters.http.router\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"Error: filter config has type URL envoy.extensions.filters.http.router.v3.Router but \"\n      \"expect envoy.config.filter.http.health_check.v2.HealthCheck.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DynamicFilterRequireTypeUrlMissingFactory) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    type_urls:\n    - type.googleapis.com/google.protobuf.Value\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(\n      createHttpConnectionManagerConfig(yaml_string), EnvoyException,\n      \"Error: no factory found for a required type URL google.protobuf.Value.\");\n}\n\nTEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultValid) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    default_config:\n      \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n      pass_through_mode: false\n    type_urls:\n    - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n    apply_default_config_without_warming: true\n- name: envoy.filters.http.router\n  )EOF\";\n\n  createHttpConnectionManagerConfig(yaml_string);\n}\n\nclass FilterChainTest : public HttpConnectionManagerConfigTest {\npublic:\n  const std::string basic_config_ = R\"EOF(\ncodec_type: http1\nserver_name: foo\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: encoder-decoder-buffer-filter\n- name: envoy.filters.http.router\n\n  )EOF\";\n};\n\nTEST_F(FilterChainTest, CreateFilterChain) {\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(basic_config_), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  Http::MockFilterChainFactoryCallbacks callbacks;\n  EXPECT_CALL(callbacks, addStreamFilter(_));        // Buffer\n  EXPECT_CALL(callbacks, addStreamDecoderFilter(_)); // Router\n  config.createFilterChain(callbacks);\n}\n\nTEST_F(FilterChainTest, CreateDynamicFilterChain) {\n  const std::string yaml_string = R\"EOF(\ncodec_type: http1\nstat_prefix: router\nroute_config:\n  virtual_hosts:\n  - name: service\n    domains:\n    - \"*\"\n    routes:\n    - match:\n        prefix: \"/\"\n      route:\n        cluster: cluster\nhttp_filters:\n- name: foo\n  config_discovery:\n    config_source: { ads: {} }\n    type_urls:\n    - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n- name: bar\n  config_discovery:\n    config_source: { ads: {} }\n    type_urls:\n    - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n- name: envoy.filters.http.router\n  )EOF\";\n  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,\n                                     date_provider_, route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  Http::MockFilterChainFactoryCallbacks callbacks;\n  Http::StreamDecoderFilterSharedPtr missing_config_filter;\n  EXPECT_CALL(callbacks, addStreamDecoderFilter(_))\n      .Times(2)\n      .WillOnce(testing::SaveArg<0>(&missing_config_filter))\n      .WillOnce(Return()); // MissingConfigFilter (only once) and router\n  config.createFilterChain(callbacks);\n\n  Http::MockStreamDecoderFilterCallbacks decoder_callbacks;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  EXPECT_CALL(decoder_callbacks, streamInfo()).WillRepeatedly(ReturnRef(stream_info));\n  EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::InternalServerError, _, _, _, _))\n      .WillRepeatedly(Return());\n  Http::TestRequestHeaderMapImpl headers;\n  missing_config_filter->setDecoderFilterCallbacks(decoder_callbacks);\n  missing_config_filter->decodeHeaders(headers, false);\n  EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound));\n}\n\n// Tests where upgrades are configured on via the HCM.\nTEST_F(FilterChainTest, CreateUpgradeFilterChain) {\n  auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_);\n  hcm_config.add_upgrade_configs()->set_upgrade_type(\"websocket\");\n\n  HttpConnectionManagerConfig config(hcm_config, context_, date_provider_,\n                                     route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  NiceMock<Http::MockFilterChainFactoryCallbacks> callbacks;\n  // Check the case where WebSockets are configured in the HCM, and no router\n  // config is present. We should create an upgrade filter chain for\n  // WebSockets.\n  {\n    EXPECT_CALL(callbacks, addStreamFilter(_));        // Buffer\n    EXPECT_CALL(callbacks, addStreamDecoderFilter(_)); // Router\n    EXPECT_TRUE(config.createUpgradeFilterChain(\"WEBSOCKET\", nullptr, callbacks));\n  }\n\n  // Check the case where WebSockets are configured in the HCM, and no router\n  // config is present. We should not create an upgrade filter chain for Foo\n  {\n    EXPECT_CALL(callbacks, addStreamFilter(_)).Times(0);\n    EXPECT_CALL(callbacks, addStreamDecoderFilter(_)).Times(0);\n    EXPECT_FALSE(config.createUpgradeFilterChain(\"foo\", nullptr, callbacks));\n  }\n\n  // Now override the HCM with a route-specific disabling of WebSocket to\n  // verify route-specific disabling works.\n  {\n    std::map<std::string, bool> upgrade_map;\n    upgrade_map.emplace(std::make_pair(\"WebSocket\", false));\n    EXPECT_FALSE(config.createUpgradeFilterChain(\"WEBSOCKET\", &upgrade_map, callbacks));\n  }\n\n  // For paranoia's sake make sure route-specific enabling doesn't break\n  // anything.\n  {\n    EXPECT_CALL(callbacks, addStreamFilter(_));        // Buffer\n    EXPECT_CALL(callbacks, addStreamDecoderFilter(_)); // Router\n    std::map<std::string, bool> upgrade_map;\n    upgrade_map.emplace(std::make_pair(\"WebSocket\", true));\n    EXPECT_TRUE(config.createUpgradeFilterChain(\"WEBSOCKET\", &upgrade_map, callbacks));\n  }\n}\n\n// Tests where upgrades are configured off via the HCM.\nTEST_F(FilterChainTest, CreateUpgradeFilterChainHCMDisabled) {\n  auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_);\n  hcm_config.add_upgrade_configs()->set_upgrade_type(\"websocket\");\n  hcm_config.mutable_upgrade_configs(0)->mutable_enabled()->set_value(false);\n\n  HttpConnectionManagerConfig config(hcm_config, context_, date_provider_,\n                                     route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  NiceMock<Http::MockFilterChainFactoryCallbacks> callbacks;\n  // Check the case where WebSockets are off in the HCM, and no router config is present.\n  { EXPECT_FALSE(config.createUpgradeFilterChain(\"WEBSOCKET\", nullptr, callbacks)); }\n\n  // Check the case where WebSockets are off in the HCM and in router config.\n  {\n    std::map<std::string, bool> upgrade_map;\n    upgrade_map.emplace(std::make_pair(\"WebSocket\", false));\n    EXPECT_FALSE(config.createUpgradeFilterChain(\"WEBSOCKET\", &upgrade_map, callbacks));\n  }\n\n  // With a route-specific enabling for WebSocket, WebSocket should work.\n  {\n    std::map<std::string, bool> upgrade_map;\n    upgrade_map.emplace(std::make_pair(\"WebSocket\", true));\n    EXPECT_TRUE(config.createUpgradeFilterChain(\"WEBSOCKET\", &upgrade_map, callbacks));\n  }\n\n  // With only a route-config we should do what the route config says.\n  {\n    std::map<std::string, bool> upgrade_map;\n    upgrade_map.emplace(std::make_pair(\"foo\", true));\n    upgrade_map.emplace(std::make_pair(\"bar\", false));\n    EXPECT_TRUE(config.createUpgradeFilterChain(\"foo\", &upgrade_map, callbacks));\n    EXPECT_FALSE(config.createUpgradeFilterChain(\"bar\", &upgrade_map, callbacks));\n    EXPECT_FALSE(config.createUpgradeFilterChain(\"eep\", &upgrade_map, callbacks));\n  }\n}\n\nTEST_F(FilterChainTest, CreateCustomUpgradeFilterChain) {\n  auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_);\n  auto websocket_config = hcm_config.add_upgrade_configs();\n  websocket_config->set_upgrade_type(\"websocket\");\n\n  ASSERT_TRUE(websocket_config->add_filters()->ParseFromString(\"\\n\"\n                                                               \"\\x19\"\n                                                               \"envoy.filters.http.router\"));\n\n  auto foo_config = hcm_config.add_upgrade_configs();\n  foo_config->set_upgrade_type(\"foo\");\n  foo_config->add_filters()->ParseFromString(\"\\n\"\n                                             \"\\x1D\"\n                                             \"encoder-decoder-buffer-filter\");\n  foo_config->add_filters()->ParseFromString(\"\\n\"\n                                             \"\\x1D\"\n                                             \"encoder-decoder-buffer-filter\");\n  foo_config->add_filters()->ParseFromString(\"\\n\"\n                                             \"\\x19\"\n                                             \"envoy.filters.http.router\");\n\n  HttpConnectionManagerConfig config(hcm_config, context_, date_provider_,\n                                     route_config_provider_manager_,\n                                     scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                     filter_config_provider_manager_);\n\n  {\n    Http::MockFilterChainFactoryCallbacks callbacks;\n    EXPECT_CALL(callbacks, addStreamFilter(_));        // Buffer\n    EXPECT_CALL(callbacks, addStreamDecoderFilter(_)); // Router\n    config.createFilterChain(callbacks);\n  }\n\n  {\n    Http::MockFilterChainFactoryCallbacks callbacks;\n    EXPECT_CALL(callbacks, addStreamDecoderFilter(_)).Times(1);\n    EXPECT_TRUE(config.createUpgradeFilterChain(\"websocket\", nullptr, callbacks));\n  }\n\n  {\n    Http::MockFilterChainFactoryCallbacks callbacks;\n    EXPECT_CALL(callbacks, addStreamDecoderFilter(_)).Times(1);\n    EXPECT_CALL(callbacks, addStreamFilter(_)).Times(2); // Buffer\n    EXPECT_TRUE(config.createUpgradeFilterChain(\"Foo\", nullptr, callbacks));\n  }\n}\n\nTEST_F(FilterChainTest, CreateCustomUpgradeFilterChainWithRouterNotLast) {\n  auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_);\n  auto websocket_config = hcm_config.add_upgrade_configs();\n  websocket_config->set_upgrade_type(\"websocket\");\n\n  ASSERT_TRUE(websocket_config->add_filters()->ParseFromString(\"\\n\"\n                                                               \"\\x19\"\n                                                               \"envoy.filters.http.router\"));\n\n  auto foo_config = hcm_config.add_upgrade_configs();\n  foo_config->set_upgrade_type(\"foo\");\n  foo_config->add_filters()->ParseFromString(\"\\n\"\n                                             \"\\x19\"\n                                             \"envoy.filters.http.router\");\n  foo_config->add_filters()->ParseFromString(\"\\n\"\n                                             \"\\x1D\"\n                                             \"encoder-decoder-buffer-filter\");\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HttpConnectionManagerConfig(hcm_config, context_, date_provider_,\n                                  route_config_provider_manager_,\n                                  scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                  filter_config_provider_manager_),\n      EnvoyException,\n      \"Error: terminal filter named envoy.filters.http.router of type envoy.filters.http.router \"\n      \"must be the last filter in a http upgrade filter chain.\");\n}\n\nTEST_F(FilterChainTest, InvalidConfig) {\n  auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_);\n  hcm_config.add_upgrade_configs()->set_upgrade_type(\"WEBSOCKET\");\n  hcm_config.add_upgrade_configs()->set_upgrade_type(\"websocket\");\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HttpConnectionManagerConfig(hcm_config, context_, date_provider_,\n                                  route_config_provider_manager_,\n                                  scoped_routes_config_provider_manager_, http_tracer_manager_,\n                                  filter_config_provider_manager_),\n      EnvoyException, \"Error: multiple upgrade configs with the same name: 'websocket'\");\n}\n\nclass HcmUtilityTest : public testing::Test {\npublic:\n  HcmUtilityTest() {\n    // Although different Listeners will have separate FactoryContexts,\n    // those contexts must share the same SingletonManager.\n    ON_CALL(context_two_, singletonManager()).WillByDefault([&]() -> Singleton::Manager& {\n      return *context_one_.singleton_manager_;\n    });\n  }\n  NiceMock<Server::Configuration::MockFactoryContext> context_one_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_two_;\n};\n\nTEST_F(HcmUtilityTest, EnsureCreateSingletonsActuallyReturnsTheSameInstances) {\n  // Simulate `HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped()`\n  // call for filter instance \"one\".\n  auto singletons_one = Utility::createSingletons(context_one_);\n\n  EXPECT_THAT(singletons_one.date_provider_.get(), NotNull());\n  EXPECT_THAT(singletons_one.route_config_provider_manager_.get(), NotNull());\n  EXPECT_THAT(singletons_one.scoped_routes_config_provider_manager_.get(), NotNull());\n  EXPECT_THAT(singletons_one.http_tracer_manager_.get(), NotNull());\n\n  // Simulate `HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped()`\n  // call for filter instance \"two\".\n  auto singletons_two = Utility::createSingletons(context_two_);\n\n  // Ensure that returned values are still the same, even though the context has changed.\n  EXPECT_EQ(singletons_two.date_provider_, singletons_one.date_provider_);\n  EXPECT_EQ(singletons_two.route_config_provider_manager_,\n            singletons_one.route_config_provider_manager_);\n  EXPECT_EQ(singletons_two.scoped_routes_config_provider_manager_,\n            singletons_one.scoped_routes_config_provider_manager_);\n  EXPECT_EQ(singletons_two.http_tracer_manager_, singletons_one.http_tracer_manager_);\n}\n\n} // namespace HttpConnectionManager\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\nload(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\"@kafka_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"buffer_based_test_lib\",\n    srcs = [],\n    hdrs = [\"buffer_based_test.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/kafka:serialization_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"serialization_utilities_lib\",\n    srcs = [\"serialization_utilities.cc\"],\n    hdrs = [\"serialization_utilities.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/kafka:serialization_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"serialization_test\",\n    srcs = [\"serialization_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:serialization_lib\",\n        \"//source/extensions/filters/network/kafka:tagged_fields_lib\",\n        \"//test/mocks/server:server_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"serialization_composite_test\",\n    srcs = [\"external/serialization_composite_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:serialization_lib\",\n        \"//test/mocks/server:server_mocks\",\n    ],\n)\n\ngenrule(\n    name = \"serialization_composite_generated_tests\",\n    srcs = [],\n    outs = [\"external/serialization_composite_test.cc\"],\n    cmd = \"\"\"\n        ./$(location :serialization_composite_test_generator_bin) \\\n        $(location external/serialization_composite_test.cc)\n    \"\"\",\n    tools = [\n        \":serialization_composite_test_generator_bin\",\n    ],\n)\n\npy_binary(\n    name = \"serialization_composite_test_generator_bin\",\n    srcs = [\"serialization/launcher.py\"],\n    data = glob([\"serialization/*.j2\"]),\n    main = \"serialization/launcher.py\",\n    deps = [\n        \"//source/extensions/filters/network/kafka:serialization_composite_generator_lib\",\n        requirement(\"Jinja2\"),\n        requirement(\"MarkupSafe\"),\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"message_utilities\",\n    srcs = [\n        \"external/request_utilities.cc\",\n        \"external/response_utilities.cc\",\n    ],\n    hdrs = [\"message_utilities.h\"],\n    deps = [\n        \"//source/extensions/filters/network/kafka:kafka_request_parser_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_response_parser_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"kafka_request_parser_test\",\n    srcs = [\"kafka_request_parser_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_request_parser_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"request_codec_unit_test\",\n    srcs = [\"request_codec_unit_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_request_codec_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"request_codec_integration_test\",\n    srcs = [\"request_codec_integration_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_request_codec_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"request_codec_request_test\",\n    srcs = [\"external/request_codec_request_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_request_codec_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"requests_test\",\n    srcs = [\"external/requests_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_request_codec_lib\",\n    ],\n)\n\ngenrule(\n    name = \"request_generated_tests\",\n    srcs = [\n        \"@kafka_source//:request_protocol_files\",\n    ],\n    outs = [\n        \"external/requests_test.cc\",\n        \"external/request_codec_request_test.cc\",\n        \"external/request_utilities.cc\",\n    ],\n    cmd = \"\"\"\n        ./$(location :kafka_protocol_test_generator_bin) request \\\n        $(location external/requests_test.cc) \\\n        $(location external/request_codec_request_test.cc) \\\n        $(location external/request_utilities.cc) \\\n        $(SRCS)\n    \"\"\",\n    tools = [\n        \":kafka_protocol_test_generator_bin\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"kafka_response_parser_test\",\n    srcs = [\"kafka_response_parser_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_response_parser_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"response_codec_unit_test\",\n    srcs = [\"response_codec_unit_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_response_codec_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"response_codec_integration_test\",\n    srcs = [\"response_codec_integration_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_response_codec_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"response_codec_response_test\",\n    srcs = [\"external/response_codec_response_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \":serialization_utilities_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_response_codec_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"responses_test\",\n    srcs = [\"external/responses_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":buffer_based_test_lib\",\n        \"//source/extensions/filters/network/kafka:kafka_response_codec_lib\",\n    ],\n)\n\ngenrule(\n    name = \"response_generated_tests\",\n    srcs = [\n        \"@kafka_source//:response_protocol_files\",\n    ],\n    outs = [\n        \"external/responses_test.cc\",\n        \"external/response_codec_response_test.cc\",\n        \"external/response_utilities.cc\",\n    ],\n    cmd = \"\"\"\n        ./$(location :kafka_protocol_test_generator_bin) response \\\n        $(location external/responses_test.cc) \\\n        $(location external/response_codec_response_test.cc) \\\n        $(location external/response_utilities.cc) \\\n        $(SRCS)\n    \"\"\",\n    tools = [\n        \":kafka_protocol_test_generator_bin\",\n    ],\n)\n\npy_binary(\n    name = \"kafka_protocol_test_generator_bin\",\n    srcs = [\"protocol/launcher.py\"],\n    data = glob([\"protocol/*.j2\"]),\n    main = \"protocol/launcher.py\",\n    deps = [\n        \"//source/extensions/filters/network/kafka:kafka_protocol_generator_lib\",\n        requirement(\"Jinja2\"),\n        requirement(\"MarkupSafe\"),\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"metrics_integration_test\",\n    srcs = [\"metrics_integration_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \":message_utilities\",\n        \"//source/extensions/filters/network/kafka:kafka_broker_filter_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_unit_test\",\n    srcs = [\"config_unit_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \"//source/extensions/filters/network/kafka:kafka_broker_config_lib\",\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_unit_test\",\n    srcs = [\"filter_unit_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \"//include/envoy/event:timer_interface\",\n        \"//source/extensions/filters/network/kafka:kafka_broker_filter_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_protocol_test\",\n    srcs = [\"filter_protocol_test.cc\"],\n    extension_name = \"envoy.filters.network.kafka_broker\",\n    deps = [\n        \"//source/extensions/filters/network/kafka:kafka_broker_filter_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/extensions/filters/network/kafka:buffer_based_test_lib\",\n        \"//test/extensions/filters/network/kafka:message_utilities\",\n        \"//test/test_common:test_time_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/config_unit_test.cc",
    "content": "#include \"extensions/filters/network/kafka/broker/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace Broker {\n\nTEST(KafkaConfigFactoryUnitTest, shouldCreateFilter) {\n  // given\n  const std::string yaml = R\"EOF(\nstat_prefix: test_prefix\n  )EOF\";\n\n  KafkaBrokerProtoConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  KafkaConfigFactory factory;\n\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n\n  // when\n  cb(connection);\n\n  // then - connection had `addFilter` invoked\n}\n\nTEST(KafkaConfigFactoryUnitTest, shouldThrowOnInvalidStatPrefix) {\n  // given\n  const std::string yaml = R\"EOF(\nstat_prefix: \"\"\n  )EOF\";\n\n  KafkaBrokerProtoConfig proto_config;\n\n  // when\n  // then - exception gets thrown\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException);\n}\n\n} // namespace Broker\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/filter_protocol_test.cc",
    "content": "/**\n * Tests in this file verify whether Kafka broker filter instance is capable of processing protocol\n * messages properly.\n */\n\n#include \"common/common/utility.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/network/kafka/broker/filter.h\"\n#include \"extensions/filters/network/kafka/external/requests.h\"\n#include \"extensions/filters/network/kafka/external/responses.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n#include \"test/extensions/filters/network/kafka/message_utilities.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace Broker {\n\nusing RequestB = MessageBasedTest<RequestEncoder>;\nusing ResponseB = MessageBasedTest<ResponseEncoder>;\n\n// Message size for all kind of broken messages (we are not going to process all the bytes).\nconstexpr static int32_t BROKEN_MESSAGE_SIZE = std::numeric_limits<int32_t>::max();\n\nclass KafkaBrokerFilterProtocolTest : public testing::Test,\n                                      protected RequestB,\n                                      protected ResponseB {\nprotected:\n  Stats::TestUtil::TestStore scope_;\n  Event::TestRealTimeSystem time_source_;\n  KafkaBrokerFilter testee_{scope_, time_source_, \"prefix\"};\n\n  Network::FilterStatus consumeRequestFromBuffer() {\n    return testee_.onData(RequestB::buffer_, false);\n  }\n\n  Network::FilterStatus consumeResponseFromBuffer() {\n    return testee_.onWrite(ResponseB::buffer_, false);\n  }\n};\n\nTEST_F(KafkaBrokerFilterProtocolTest, ShouldHandleUnknownRequestAndResponseWithoutBreaking) {\n  // given\n  const int16_t unknown_api_key = std::numeric_limits<int16_t>::max();\n\n  const RequestHeader request_header = {unknown_api_key, 0, 0, \"client-id\"};\n  const ProduceRequest request_data = {0, 0, {}};\n  const Request<ProduceRequest> produce_request = {request_header, request_data};\n  RequestB::putMessageIntoBuffer(produce_request);\n\n  const ResponseMetadata response_metadata = {unknown_api_key, 0, 0};\n  const ProduceResponse response_data = {{}};\n  const Response<ProduceResponse> produce_response = {response_metadata, response_data};\n  ResponseB::putMessageIntoBuffer(produce_response);\n\n  // when\n  const Network::FilterStatus result1 = consumeRequestFromBuffer();\n  const Network::FilterStatus result2 = consumeResponseFromBuffer();\n\n  // then\n  ASSERT_EQ(result1, Network::FilterStatus::Continue);\n  ASSERT_EQ(result2, Network::FilterStatus::Continue);\n  ASSERT_EQ(scope_.counter(\"kafka.prefix.request.unknown\").value(), 1);\n  ASSERT_EQ(scope_.counter(\"kafka.prefix.response.unknown\").value(), 1);\n}\n\nTEST_F(KafkaBrokerFilterProtocolTest, ShouldHandleBrokenRequestPayload) {\n  // given\n\n  // Encode broken request into buffer.\n  // We will put invalid length of nullable string passed as client-id (length < -1).\n  RequestB::putIntoBuffer(BROKEN_MESSAGE_SIZE);\n  RequestB::putIntoBuffer(static_cast<int16_t>(0)); // Api key.\n  RequestB::putIntoBuffer(static_cast<int16_t>(0)); // Api version.\n  RequestB::putIntoBuffer(static_cast<int32_t>(0)); // Correlation-id.\n  RequestB::putIntoBuffer(static_cast<int16_t>(std::numeric_limits<int16_t>::min())); // Client-id.\n\n  // when\n  const Network::FilterStatus result = consumeRequestFromBuffer();\n\n  // then\n  ASSERT_EQ(result, Network::FilterStatus::StopIteration);\n  ASSERT_EQ(testee_.getRequestDecoderForTest()->getCurrentParserForTest(), nullptr);\n}\n\nTEST_F(KafkaBrokerFilterProtocolTest, ShouldHandleBrokenResponsePayload) {\n  // given\n\n  const int32_t correlation_id = 42;\n  // Encode broken response into buffer.\n  // Produce response v0 is a nullable array of TopicProduceResponses.\n  // Encoding invalid length (< -1) of this nullable array is going to break the parser.\n  ResponseB::putIntoBuffer(BROKEN_MESSAGE_SIZE);\n  ResponseB::putIntoBuffer(correlation_id); // Correlation-id.\n  ResponseB::putIntoBuffer(static_cast<int32_t>(std::numeric_limits<int32_t>::min())); // Array.\n\n  testee_.getResponseDecoderForTest()->expectResponse(correlation_id, 0, 0);\n\n  // when\n  const Network::FilterStatus result = consumeResponseFromBuffer();\n\n  // then\n  ASSERT_EQ(result, Network::FilterStatus::StopIteration);\n  ASSERT_EQ(testee_.getResponseDecoderForTest()->getCurrentParserForTest(), nullptr);\n}\n\nTEST_F(KafkaBrokerFilterProtocolTest, ShouldAbortOnUnregisteredResponse) {\n  // given\n  const ResponseMetadata response_metadata = {0, 0, 0};\n  const ProduceResponse response_data = {{}};\n  const Response<ProduceResponse> produce_response = {response_metadata, response_data};\n  ResponseB::putMessageIntoBuffer(produce_response);\n\n  // when\n  const Network::FilterStatus result = consumeResponseFromBuffer();\n\n  // then\n  ASSERT_EQ(result, Network::FilterStatus::StopIteration);\n}\n\nTEST_F(KafkaBrokerFilterProtocolTest, ShouldProcessMessages) {\n  // given\n  // For every request/response type & version, put a corresponding request into the buffer.\n  for (const AbstractRequestSharedPtr& message : MessageUtilities::makeAllRequests()) {\n    RequestB::putMessageIntoBuffer(*message);\n  }\n  for (const AbstractResponseSharedPtr& message : MessageUtilities::makeAllResponses()) {\n    ResponseB::putMessageIntoBuffer(*message);\n  }\n\n  // when\n  const Network::FilterStatus result1 = consumeRequestFromBuffer();\n  const Network::FilterStatus result2 = consumeResponseFromBuffer();\n\n  // then\n  ASSERT_EQ(result1, Network::FilterStatus::Continue);\n  ASSERT_EQ(result2, Network::FilterStatus::Continue);\n\n  // Also, assert that every message type has been processed properly.\n  for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) {\n    // We should have received one request per api version.\n    const Stats::Counter& request_counter = scope_.counter(MessageUtilities::requestMetric(i));\n    ASSERT_EQ(request_counter.value(), MessageUtilities::requestApiVersions(i));\n    // We should have received one response per api version.\n    const Stats::Counter& response_counter = scope_.counter(MessageUtilities::responseMetric(i));\n    ASSERT_EQ(response_counter.value(), MessageUtilities::responseApiVersions(i));\n  }\n}\n\n} // namespace Broker\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/filter_unit_test.cc",
    "content": "#include \"envoy/event/timer.h\"\n\n#include \"extensions/filters/network/kafka/broker/filter.h\"\n#include \"extensions/filters/network/kafka/external/requests.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\nusing testing::Throw;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace Broker {\n\n// Mocks.\n\nclass MockKafkaMetricsFacade : public KafkaMetricsFacade {\npublic:\n  MOCK_METHOD(void, onMessage, (AbstractRequestSharedPtr));\n  MOCK_METHOD(void, onMessage, (AbstractResponseSharedPtr));\n  MOCK_METHOD(void, onFailedParse, (RequestParseFailureSharedPtr));\n  MOCK_METHOD(void, onFailedParse, (ResponseMetadataSharedPtr));\n  MOCK_METHOD(void, onRequestException, ());\n  MOCK_METHOD(void, onResponseException, ());\n};\n\nusing MockKafkaMetricsFacadeSharedPtr = std::shared_ptr<MockKafkaMetricsFacade>;\n\nclass MockResponseDecoder : public ResponseDecoder {\npublic:\n  MockResponseDecoder() : ResponseDecoder{{}} {};\n  MOCK_METHOD(void, onData, (Buffer::Instance&));\n  MOCK_METHOD(void, expectResponse, (const int32_t, const int16_t, const int16_t));\n  MOCK_METHOD(void, reset, ());\n};\n\nusing MockResponseDecoderSharedPtr = std::shared_ptr<MockResponseDecoder>;\n\nclass MockRequestDecoder : public RequestDecoder {\npublic:\n  MockRequestDecoder() : RequestDecoder{{}} {};\n  MOCK_METHOD(void, onData, (Buffer::Instance&));\n  MOCK_METHOD(void, reset, ());\n};\n\nusing MockRequestDecoderSharedPtr = std::shared_ptr<MockRequestDecoder>;\n\nclass MockTimeSource : public TimeSource {\npublic:\n  MOCK_METHOD(SystemTime, systemTime, ());\n  MOCK_METHOD(MonotonicTime, monotonicTime, ());\n};\n\nclass MockRichRequestMetrics : public RichRequestMetrics {\npublic:\n  MOCK_METHOD(void, onRequest, (const int16_t));\n  MOCK_METHOD(void, onUnknownRequest, ());\n  MOCK_METHOD(void, onBrokenRequest, ());\n};\n\nclass MockRichResponseMetrics : public RichResponseMetrics {\npublic:\n  MOCK_METHOD(void, onResponse, (const int16_t, const long long duration));\n  MOCK_METHOD(void, onUnknownResponse, ());\n  MOCK_METHOD(void, onBrokenResponse, ());\n};\n\nclass MockRequest : public AbstractRequest {\npublic:\n  MockRequest(const int16_t api_key, const int16_t api_version, const int32_t correlation_id)\n      : AbstractRequest{{api_key, api_version, correlation_id, \"\"}} {};\n  uint32_t computeSize() const override { return 0; };\n  uint32_t encode(Buffer::Instance&) const override { return 0; };\n};\n\nclass MockResponse : public AbstractResponse {\npublic:\n  MockResponse(const int16_t api_key, const int32_t correlation_id)\n      : AbstractResponse{{api_key, 0, correlation_id}} {};\n  uint32_t computeSize() const override { return 0; };\n  uint32_t encode(Buffer::Instance&) const override { return 0; };\n};\n\n// Tests.\n\nclass KafkaBrokerFilterUnitTest : public testing::Test {\nprotected:\n  MockKafkaMetricsFacadeSharedPtr metrics_{std::make_shared<MockKafkaMetricsFacade>()};\n  MockResponseDecoderSharedPtr response_decoder_{std::make_shared<MockResponseDecoder>()};\n  MockRequestDecoderSharedPtr request_decoder_{std::make_shared<MockRequestDecoder>()};\n\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n\n  KafkaBrokerFilter testee_{metrics_, response_decoder_, request_decoder_};\n\n  void initialize() {\n    testee_.initializeReadFilterCallbacks(filter_callbacks_);\n    testee_.onNewConnection();\n  }\n};\n\nTEST_F(KafkaBrokerFilterUnitTest, ShouldAcceptDataSentByKafkaClient) {\n  // given\n  Buffer::OwnedImpl data;\n  EXPECT_CALL(*request_decoder_, onData(_));\n\n  // when\n  initialize();\n  const auto result = testee_.onData(data, false);\n\n  // then\n  ASSERT_EQ(result, Network::FilterStatus::Continue);\n  // Also, request_decoder got invoked.\n}\n\nTEST_F(KafkaBrokerFilterUnitTest, ShouldStopIterationIfProcessingDataFromKafkaClientFails) {\n  // given\n  Buffer::OwnedImpl data;\n  EXPECT_CALL(*request_decoder_, onData(_)).WillOnce(Throw(EnvoyException(\"boom\")));\n  EXPECT_CALL(*request_decoder_, reset());\n  EXPECT_CALL(*metrics_, onRequestException());\n\n  // when\n  initialize();\n  const auto result = testee_.onData(data, false);\n\n  // then\n  ASSERT_EQ(result, Network::FilterStatus::StopIteration);\n}\n\nTEST_F(KafkaBrokerFilterUnitTest, ShouldAcceptDataSentByKafkaBroker) {\n  // given\n  Buffer::OwnedImpl data;\n  EXPECT_CALL(*response_decoder_, onData(_));\n\n  // when\n  initialize();\n  const auto result = testee_.onWrite(data, false);\n\n  // then\n  ASSERT_EQ(result, Network::FilterStatus::Continue);\n  // Also, request_decoder got invoked.\n}\n\nTEST_F(KafkaBrokerFilterUnitTest, ShouldStopIterationIfProcessingDataFromKafkaBrokerFails) {\n  // given\n  Buffer::OwnedImpl data;\n  EXPECT_CALL(*response_decoder_, onData(_)).WillOnce(Throw(EnvoyException(\"boom\")));\n  EXPECT_CALL(*response_decoder_, reset());\n  EXPECT_CALL(*metrics_, onResponseException());\n\n  // when\n  initialize();\n  const auto result = testee_.onWrite(data, false);\n\n  // then\n  ASSERT_EQ(result, Network::FilterStatus::StopIteration);\n}\n\nclass ForwarderUnitTest : public testing::Test {\nprotected:\n  MockResponseDecoderSharedPtr response_decoder_{std::make_shared<MockResponseDecoder>()};\n  Forwarder testee_{*response_decoder_};\n};\n\nTEST_F(ForwarderUnitTest, ShouldUpdateResponseDecoderState) {\n  // given\n  const int16_t api_key = 42;\n  const int16_t api_version = 13;\n  const int32_t correlation_id = 1234;\n  AbstractRequestSharedPtr request =\n      std::make_shared<MockRequest>(api_key, api_version, correlation_id);\n\n  EXPECT_CALL(*response_decoder_, expectResponse(correlation_id, api_key, api_version));\n\n  // when\n  testee_.onMessage(request);\n\n  // then - response_decoder_ had a new expected response registered.\n}\n\nTEST_F(ForwarderUnitTest, ShouldUpdateResponseDecoderStateOnFailedParse) {\n  // given\n  const int16_t api_key = 42;\n  const int16_t api_version = 13;\n  const int32_t correlation_id = 1234;\n  RequestHeader header = {api_key, api_version, correlation_id, \"\"};\n  RequestParseFailureSharedPtr parse_failure = std::make_shared<RequestParseFailure>(header);\n\n  EXPECT_CALL(*response_decoder_, expectResponse(correlation_id, api_key, api_version));\n\n  // when\n  testee_.onFailedParse(parse_failure);\n\n  // then - response_decoder_ had a new expected response registered.\n}\n\nclass KafkaMetricsFacadeImplUnitTest : public testing::Test {\nprotected:\n  MockTimeSource time_source_;\n  std::shared_ptr<MockRichRequestMetrics> request_metrics_ =\n      std::make_shared<MockRichRequestMetrics>();\n  std::shared_ptr<MockRichResponseMetrics> response_metrics_ =\n      std::make_shared<MockRichResponseMetrics>();\n  KafkaMetricsFacadeImpl testee_{time_source_, request_metrics_, response_metrics_};\n};\n\nTEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterRequest) {\n  // given\n  const int16_t api_key = 42;\n  const int32_t correlation_id = 1234;\n  AbstractRequestSharedPtr request = std::make_shared<MockRequest>(api_key, 0, correlation_id);\n\n  EXPECT_CALL(*request_metrics_, onRequest(api_key));\n\n  MonotonicTime time_point{Event::TimeSystem::Milliseconds(1234)};\n  EXPECT_CALL(time_source_, monotonicTime()).WillOnce(Return(time_point));\n\n  // when\n  testee_.onMessage(request);\n\n  // then\n  const auto& request_arrivals = testee_.getRequestArrivalsForTest();\n  ASSERT_EQ(request_arrivals.at(correlation_id), time_point);\n}\n\nTEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterUnknownRequest) {\n  // given\n  RequestHeader header = {0, 0, 0, \"\"};\n  RequestParseFailureSharedPtr unknown_request = std::make_shared<RequestParseFailure>(header);\n\n  EXPECT_CALL(*request_metrics_, onUnknownRequest());\n\n  // when\n  testee_.onFailedParse(unknown_request);\n\n  // then - request_metrics_ is updated.\n}\n\nTEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterResponse) {\n  // given\n  const int16_t api_key = 42;\n  const int32_t correlation_id = 1234;\n  AbstractResponseSharedPtr response = std::make_shared<MockResponse>(api_key, correlation_id);\n\n  MonotonicTime request_time_point{Event::TimeSystem::Milliseconds(1234)};\n  testee_.getRequestArrivalsForTest()[correlation_id] = request_time_point;\n\n  MonotonicTime response_time_point{Event::TimeSystem::Milliseconds(2345)};\n\n  EXPECT_CALL(*response_metrics_, onResponse(api_key, 1111));\n  EXPECT_CALL(time_source_, monotonicTime()).WillOnce(Return(response_time_point));\n\n  // when\n  testee_.onMessage(response);\n\n  // then\n  const auto& request_arrivals = testee_.getRequestArrivalsForTest();\n  ASSERT_EQ(request_arrivals.find(correlation_id), request_arrivals.end());\n}\n\nTEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterUnknownResponse) {\n  // given\n  ResponseMetadataSharedPtr unknown_response = std::make_shared<ResponseMetadata>(0, 0, 0);\n\n  EXPECT_CALL(*response_metrics_, onUnknownResponse());\n\n  // when\n  testee_.onFailedParse(unknown_response);\n\n  // then - response_metrics_ is updated.\n}\n\n} // namespace Broker\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/integration_test/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\"@rules_python//python:defs.bzl\", \"py_test\")\nload(\"@kafka_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\npy_test(\n    name = \"kafka_broker_integration_test\",\n    srcs = [\n        \"kafka_broker_integration_test.py\",\n        \"@kafka_python_client//:all\",\n    ],\n    data = [\n        \"//source/exe:envoy-static\",\n        \"//bazel:remote_jdk11\",\n        \"@kafka_server_binary//:all\",\n    ] + glob([\"*.j2\"]),\n    flaky = True,\n    python_version = \"PY3\",\n    srcs_version = \"PY3\",\n    tags = [\"manual\"],\n    deps = [\n        requirement(\"Jinja2\"),\n        requirement(\"MarkupSafe\"),\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/integration_test/README.md",
    "content": "Kafka broker integration test\n=============================\n\nThe code in this directory provides `kafka_broker_integration_test.py`\nwhich is used to launch full integration test for Kafka broker.\n\nThe Python script allocates starts Envoy, Zookeeper, and Kafka as separate\nprocesses, all of them listening on randomly-allocated ports.\nAfterwards, the Python Kafka consumers and producers are initialized and\ndo run the traffic through Kafka.\n\nThe tests verify if:\n- Kafka operations behave properly (get expected results, no exceptions),\n- Kafka metrics in Envoy show proper increases.\n\n**Right now this test is not executed as a part of normal build, and needs to be invoked manually.**\n\n**Please re-run this test if you are making any changes to Kafka-related code:**\n\n```\nbazel test \\\n\t//test/extensions/filters/network/kafka/broker/integration_test:kafka_broker_integration_test \\\n\t--runs_per_test 1000\n```\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2",
    "content": "static_resources:\n  listeners:\n  - address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: {{ data['kafka_envoy_port'] }}\n    filter_chains:\n    - filters:\n      - name: kafka\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker\n          stat_prefix: testfilter\n      - name: tcp\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy\n          stat_prefix: ingress_tcp\n          cluster: localinstallation\n  clusters:\n  - name: localinstallation\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    hosts:\n    - socket_address:\n        address: 127.0.0.1\n        port_value: {{ data['kafka_real_port'] }}\nadmin:\n  access_log_path: /dev/null\n  profile_path: /dev/null\n  address:\n    socket_address: { address: 127.0.0.1, port_value: {{ data['envoy_monitoring_port'] }} }\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/integration_test/kafka_broker_integration_test.py",
    "content": "#!/usr/bin/python\n\nimport random\nimport os\nimport shutil\nimport socket\nimport subprocess\nimport tempfile\nfrom threading import Thread, Semaphore\nimport time\nimport unittest\n\nfrom kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer, TopicPartition\nfrom kafka.admin import ConfigResource, ConfigResourceType, NewPartitions, NewTopic\nimport urllib.request\n\n\nclass KafkaBrokerIntegrationTest(unittest.TestCase):\n  \"\"\"\n  All tests in this class depend on Envoy/Zookeeper/Kafka running.\n  For each of these tests we are going to create Kafka consumers/producers/admins and point them\n  to Envoy (that proxies Kafka).\n  We expect every operation to succeed (as they should reach Kafka) and the corresponding metrics\n  to increase on Envoy side (to show that messages were received and forwarded successfully).\n  \"\"\"\n\n  services = None\n\n  @classmethod\n  def setUpClass(cls):\n    KafkaBrokerIntegrationTest.services = ServicesHolder()\n    KafkaBrokerIntegrationTest.services.start()\n\n  @classmethod\n  def tearDownClass(cls):\n    KafkaBrokerIntegrationTest.services.shut_down()\n\n  def setUp(self):\n    # We want to check if our services are okay before running any kind of test.\n    KafkaBrokerIntegrationTest.services.check_state()\n    self.metrics = MetricsHolder(self)\n\n  def tearDown(self):\n    # We want to check if our services are okay after running any test.\n    KafkaBrokerIntegrationTest.services.check_state()\n\n  @classmethod\n  def kafka_address(cls):\n    return '127.0.0.1:%s' % KafkaBrokerIntegrationTest.services.kafka_envoy_port\n\n  @classmethod\n  def envoy_stats_address(cls):\n    return 'http://127.0.0.1:%s/stats' % KafkaBrokerIntegrationTest.services.envoy_monitoring_port\n\n  def test_kafka_consumer_with_no_messages_received(self):\n    \"\"\"\n    This test verifies that consumer sends fetches correctly, and receives nothing.\n    \"\"\"\n\n    consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),\n                             fetch_max_wait_ms=500)\n    consumer.assign([TopicPartition('test_kafka_consumer_with_no_messages_received', 0)])\n    for _ in range(10):\n      records = consumer.poll(timeout_ms=1000)\n      self.assertEqual(len(records), 0)\n\n    self.metrics.collect_final_metrics()\n    # 'consumer.poll()' can translate into 0 or more fetch requests.\n    # We have set API timeout to 1000ms, while fetch_max_wait is 500ms.\n    # This means that consumer will send roughly 2 (1000/500) requests per API call (so 20 total).\n    # So increase of 10 (half of that value) should be safe enough to test.\n    self.metrics.assert_metric_increase('fetch', 10)\n    # Metadata is used by consumer to figure out current partition leader.\n    self.metrics.assert_metric_increase('metadata', 1)\n\n  def test_kafka_producer_and_consumer(self):\n    \"\"\"\n    This test verifies that producer can send messages, and consumer can receive them.\n    \"\"\"\n\n    messages_to_send = 100\n    partition = TopicPartition('test_kafka_producer_and_consumer', 0)\n\n    producer = KafkaProducer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address())\n    for _ in range(messages_to_send):\n      future = producer.send(value=b'some_message_bytes',\n                             topic=partition.topic,\n                             partition=partition.partition)\n      send_status = future.get()\n      self.assertTrue(send_status.offset >= 0)\n\n    consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),\n                             auto_offset_reset='earliest',\n                             fetch_max_bytes=100)\n    consumer.assign([partition])\n    received_messages = []\n    while (len(received_messages) < messages_to_send):\n      poll_result = consumer.poll(timeout_ms=1000)\n      received_messages += poll_result[partition]\n\n    self.metrics.collect_final_metrics()\n    self.metrics.assert_metric_increase('metadata', 2)\n    self.metrics.assert_metric_increase('produce', 100)\n    # 'fetch_max_bytes' was set to a very low value, so client will need to send a FetchRequest\n    # multiple times to broker to get all 100 messages (otherwise all 100 records could have been\n    # received in one go).\n    self.metrics.assert_metric_increase('fetch', 20)\n    # Both producer & consumer had to fetch cluster metadata.\n    self.metrics.assert_metric_increase('metadata', 2)\n\n  def test_consumer_with_consumer_groups(self):\n    \"\"\"\n    This test verifies that multiple consumers can form a Kafka consumer group.\n    \"\"\"\n\n    consumer_count = 10\n    consumers = []\n    for id in range(consumer_count):\n      consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),\n                               group_id='test',\n                               client_id='test-%s' % id)\n      consumer.subscribe(['test_consumer_with_consumer_groups'])\n      consumers.append(consumer)\n\n    worker_threads = []\n    for consumer in consumers:\n      thread = Thread(target=KafkaBrokerIntegrationTest.worker, args=(consumer,))\n      thread.start()\n      worker_threads.append(thread)\n\n    for thread in worker_threads:\n      thread.join()\n\n    for consumer in consumers:\n      consumer.close()\n\n    self.metrics.collect_final_metrics()\n    self.metrics.assert_metric_increase('api_versions', consumer_count)\n    self.metrics.assert_metric_increase('metadata', consumer_count)\n    self.metrics.assert_metric_increase('join_group', consumer_count)\n    self.metrics.assert_metric_increase('find_coordinator', consumer_count)\n    self.metrics.assert_metric_increase('leave_group', consumer_count)\n\n  @staticmethod\n  def worker(consumer):\n    \"\"\"\n    Worker thread for Kafka consumer.\n    Multiple poll-s are done here, so that the group can safely form.\n    \"\"\"\n\n    poll_operations = 10\n    for i in range(poll_operations):\n      consumer.poll(timeout_ms=1000)\n\n  def test_admin_client(self):\n    \"\"\"\n    This test verifies that Kafka Admin Client can still be used to manage Kafka.\n    \"\"\"\n\n    admin_client = KafkaAdminClient(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address())\n\n    # Create a topic with 3 partitions.\n    new_topic_spec = NewTopic(name='test_admin_client', num_partitions=3, replication_factor=1)\n    create_response = admin_client.create_topics([new_topic_spec])\n    error_data = create_response.topic_errors\n    self.assertEqual(len(error_data), 1)\n    self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))\n\n    # Alter topic (change some Kafka-level property).\n    config_resource = ConfigResource(ConfigResourceType.TOPIC, new_topic_spec.name,\n                                     {'flush.messages': 42})\n    alter_response = admin_client.alter_configs([config_resource])\n    error_data = alter_response.resources\n    self.assertEqual(len(error_data), 1)\n    self.assertEqual(error_data[0][0], 0)\n\n    # Add 2 more partitions to topic.\n    new_partitions_spec = {new_topic_spec.name: NewPartitions(5)}\n    new_partitions_response = admin_client.create_partitions(new_partitions_spec)\n    error_data = create_response.topic_errors\n    self.assertEqual(len(error_data), 1)\n    self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))\n\n    # Delete a topic.\n    delete_response = admin_client.delete_topics([new_topic_spec.name])\n    error_data = create_response.topic_errors\n    self.assertEqual(len(error_data), 1)\n    self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))\n\n    self.metrics.collect_final_metrics()\n    self.metrics.assert_metric_increase('create_topics', 1)\n    self.metrics.assert_metric_increase('alter_configs', 1)\n    self.metrics.assert_metric_increase('create_partitions', 1)\n    self.metrics.assert_metric_increase('delete_topics', 1)\n\n\nclass MetricsHolder:\n  \"\"\"\n  Utility for storing Envoy metrics.\n  Expected to be created before the test (to get initial metrics), and then to collect them at the\n  end of test, so the expected increases can be verified.\n  \"\"\"\n\n  def __init__(self, owner):\n    self.owner = owner\n    self.initial_requests, self.inital_responses = MetricsHolder.get_envoy_stats()\n    self.final_requests = None\n    self.final_responses = None\n\n  def collect_final_metrics(self):\n    self.final_requests, self.final_responses = MetricsHolder.get_envoy_stats()\n\n  def assert_metric_increase(self, message_type, count):\n    request_type = message_type + '_request'\n    response_type = message_type + '_response'\n\n    initial_request_value = self.initial_requests.get(request_type, 0)\n    final_request_value = self.final_requests.get(request_type, 0)\n    self.owner.assertGreaterEqual(final_request_value, initial_request_value + count)\n\n    initial_response_value = self.inital_responses.get(response_type, 0)\n    final_response_value = self.final_responses.get(response_type, 0)\n    self.owner.assertGreaterEqual(final_response_value, initial_response_value + count)\n\n  @staticmethod\n  def get_envoy_stats():\n    \"\"\"\n    Grab request/response metrics from envoy's stats interface.\n    \"\"\"\n\n    stats_url = KafkaBrokerIntegrationTest.envoy_stats_address()\n    requests = {}\n    responses = {}\n    with urllib.request.urlopen(stats_url) as remote_metrics_url:\n      payload = remote_metrics_url.read().decode()\n      lines = payload.splitlines()\n      for line in lines:\n        request_prefix = 'kafka.testfilter.request.'\n        response_prefix = 'kafka.testfilter.response.'\n        if line.startswith(request_prefix):\n          data = line[len(request_prefix):].split(': ')\n          requests[data[0]] = int(data[1])\n          pass\n        if line.startswith(response_prefix) and '_response:' in line:\n          data = line[len(response_prefix):].split(': ')\n          responses[data[0]] = int(data[1])\n    return [requests, responses]\n\n\nclass ServicesHolder:\n  \"\"\"\n  Utility class for setting up our external dependencies: Envoy, Zookeeper & Kafka.\n  \"\"\"\n\n  def __init__(self):\n    self.kafka_tmp_dir = None\n\n    self.envoy_worker = None\n    self.zk_worker = None\n    self.kafka_worker = None\n\n  @staticmethod\n  def get_random_listener_port():\n    \"\"\"\n    Here we count on OS to give us some random socket.\n    Obviously this method will need to be invoked in a try loop anyways, as in degenerate scenario\n    someone else might have bound to it after we had closed the socket and before the service\n    that's supposed to use it binds to it.\n    \"\"\"\n\n    import socket\n    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:\n      server_socket.bind(('0.0.0.0', 0))\n      socket_port = server_socket.getsockname()[1]\n      print('returning %s' % socket_port)\n      return socket_port\n\n  def start(self):\n    \"\"\"\n    Starts all the services we need for integration tests.\n    \"\"\"\n\n    # Find java installation that we are going to use to start Zookeeper & Kafka.\n    java_directory = ServicesHolder.find_java()\n\n    launcher_environment = os.environ.copy()\n    # Make `java` visible to build script:\n    # https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L226\n    new_path = os.path.abspath(java_directory) + os.pathsep + launcher_environment['PATH']\n    launcher_environment['PATH'] = new_path\n    # Both ZK & Kafka use Kafka launcher script.\n    # By default it sets up JMX options:\n    # https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L167\n    # But that forces the JVM to load file that is not present due to:\n    # https://docs.oracle.com/javase/9/management/monitoring-and-management-using-jmx-technology.htm\n    # Let's make it simple and just disable JMX.\n    launcher_environment['KAFKA_JMX_OPTS'] = ' '\n\n    # Setup a temporary directory, which will be used by Kafka & Zookeeper servers.\n    self.kafka_tmp_dir = tempfile.mkdtemp()\n    print('Temporary directory used for tests: ' + self.kafka_tmp_dir)\n\n    # This directory will store the configuration files fed to services.\n    config_dir = self.kafka_tmp_dir + '/config'\n    os.mkdir(config_dir)\n    # This directory will store Zookeeper's data (== Kafka server metadata).\n    zookeeper_store_dir = self.kafka_tmp_dir + '/zookeeper_data'\n    os.mkdir(zookeeper_store_dir)\n    # This directory will store Kafka's data (== partitions).\n    kafka_store_dir = self.kafka_tmp_dir + '/kafka_data'\n    os.mkdir(kafka_store_dir)\n\n    # Find the Kafka server 'bin' directory.\n    kafka_bin_dir = os.path.join('.', 'external', 'kafka_server_binary', 'bin')\n\n    # Main initialization block:\n    # - generate random ports,\n    # - render configuration with these ports,\n    # - start services and check if they are running okay,\n    # - if anything is having problems, kill everything and start again.\n    while True:\n\n      # Generate random ports.\n      zk_port = ServicesHolder.get_random_listener_port()\n      kafka_real_port = ServicesHolder.get_random_listener_port()\n      kafka_envoy_port = ServicesHolder.get_random_listener_port()\n      envoy_monitoring_port = ServicesHolder.get_random_listener_port()\n\n      # These ports need to be exposed to tests.\n      self.kafka_envoy_port = kafka_envoy_port\n      self.envoy_monitoring_port = envoy_monitoring_port\n\n      # Render config file for Envoy.\n      template = RenderingHelper.get_template('envoy_config_yaml.j2')\n      contents = template.render(\n          data={\n              'kafka_real_port': kafka_real_port,\n              'kafka_envoy_port': kafka_envoy_port,\n              'envoy_monitoring_port': envoy_monitoring_port\n          })\n      envoy_config_file = os.path.join(config_dir, 'envoy_config.yaml')\n      with open(envoy_config_file, 'w') as fd:\n        fd.write(contents)\n        print('Envoy config file rendered at: ' + envoy_config_file)\n\n      # Render config file for Zookeeper.\n      template = RenderingHelper.get_template('zookeeper_properties.j2')\n      contents = template.render(data={'data_dir': zookeeper_store_dir, 'zk_port': zk_port})\n      zookeeper_config_file = os.path.join(config_dir, 'zookeeper.properties')\n      with open(zookeeper_config_file, 'w') as fd:\n        fd.write(contents)\n        print('Zookeeper config file rendered at: ' + zookeeper_config_file)\n\n      # Render config file for Kafka.\n      template = RenderingHelper.get_template('kafka_server_properties.j2')\n      contents = template.render(\n          data={\n              'data_dir': kafka_store_dir,\n              'zk_port': zk_port,\n              'kafka_real_port': kafka_real_port,\n              'kafka_envoy_port': kafka_envoy_port\n          })\n      kafka_config_file = os.path.join(config_dir, 'kafka_server.properties')\n      with open(kafka_config_file, 'w') as fd:\n        fd.write(contents)\n        print('Kafka config file rendered at: ' + kafka_config_file)\n\n      # Start the services now.\n      try:\n\n        # Start Envoy in the background, pointing to rendered config file.\n        envoy_binary = ServicesHolder.find_envoy()\n        # --base-id is added to allow multiple Envoy instances to run at the same time.\n        envoy_args = [\n            os.path.abspath(envoy_binary), '-c', envoy_config_file, '--base-id',\n            str(random.randint(1, 999999))\n        ]\n        envoy_handle = subprocess.Popen(envoy_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        self.envoy_worker = ProcessWorker(envoy_handle, 'Envoy', 'starting main dispatch loop')\n        self.envoy_worker.await_startup()\n\n        # Start Zookeeper in background, pointing to rendered config file.\n        zk_binary = os.path.join(kafka_bin_dir, 'zookeeper-server-start.sh')\n        zk_args = [os.path.abspath(zk_binary), zookeeper_config_file]\n        zk_handle = subprocess.Popen(zk_args,\n                                     env=launcher_environment,\n                                     stdout=subprocess.PIPE,\n                                     stderr=subprocess.PIPE)\n        self.zk_worker = ProcessWorker(zk_handle, 'Zookeeper', 'binding to port')\n        self.zk_worker.await_startup()\n\n        # Start Kafka in background, pointing to rendered config file.\n        kafka_binary = os.path.join(kafka_bin_dir, 'kafka-server-start.sh')\n        kafka_args = [os.path.abspath(kafka_binary), kafka_config_file]\n        kafka_handle = subprocess.Popen(kafka_args,\n                                        env=launcher_environment,\n                                        stdout=subprocess.PIPE,\n                                        stderr=subprocess.PIPE)\n        self.kafka_worker = ProcessWorker(kafka_handle, 'Kafka', '[KafkaServer id=0] started')\n        self.kafka_worker.await_startup()\n\n        # All services have started without problems - now we can finally finish.\n        break\n\n      except Exception as e:\n        print('Could not start services, will try again', e)\n\n        if self.kafka_worker:\n          self.kafka_worker.kill()\n          self.kafka_worker = None\n        if self.zk_worker:\n          self.zk_worker.kill()\n          self.zk_worker = None\n        if self.envoy_worker:\n          self.envoy_worker.kill()\n          self.envoy_worker = None\n\n  @staticmethod\n  def find_java():\n    \"\"\"\n    This method just locates the Java installation in current directory.\n    We cannot hardcode the name, as the dirname changes as per:\n    https://github.com/bazelbuild/bazel/blob/master/tools/jdk/BUILD#L491\n    \"\"\"\n\n    external_dir = os.path.join('.', 'external')\n    for directory in os.listdir(external_dir):\n      if 'remotejdk11' in directory:\n        result = os.path.join(external_dir, directory, 'bin')\n        print('Using Java: ' + result)\n        return result\n    raise Exception('Could not find Java in: ' + external_dir)\n\n  @staticmethod\n  def find_envoy():\n    \"\"\"\n    This method locates envoy binary.\n    It's present at ./source/exe/envoy-static (at least for mac/bazel-asan/bazel-tsan),\n    or at ./external/envoy/source/exe/envoy-static (for bazel-compile_time_options).\n    \"\"\"\n\n    candidate = os.path.join('.', 'source', 'exe', 'envoy-static')\n    if os.path.isfile(candidate):\n      return candidate\n    candidate = os.path.join('.', 'external', 'envoy', 'source', 'exe', 'envoy-static')\n    if os.path.isfile(candidate):\n      return candidate\n    raise Exception(\"Could not find Envoy\")\n\n  def shut_down(self):\n    # Teardown - kill Kafka, Zookeeper, and Envoy. Then delete their data directory.\n    print('Cleaning up')\n\n    if self.kafka_worker:\n      self.kafka_worker.kill()\n\n    if self.zk_worker:\n      self.zk_worker.kill()\n\n    if self.envoy_worker:\n      self.envoy_worker.kill()\n\n    if self.kafka_tmp_dir:\n      print('Removing temporary directory: ' + self.kafka_tmp_dir)\n      shutil.rmtree(self.kafka_tmp_dir)\n\n  def check_state(self):\n    self.envoy_worker.check_state()\n    self.zk_worker.check_state()\n    self.kafka_worker.check_state()\n\n\nclass ProcessWorker:\n  \"\"\"\n  Helper class that wraps the external service process.\n  Provides ability to wait until service is ready to use (this is done by tracing logs) and\n  printing service's output to stdout.\n  \"\"\"\n\n  # Service is considered to be properly initialized after it has logged its startup message\n  # and has been alive for INITIALIZATION_WAIT_SECONDS after that message has been seen.\n  # This (clunky) design is needed because Zookeeper happens to log \"binding to port\" and then\n  # might fail to bind.\n  INITIALIZATION_WAIT_SECONDS = 3\n\n  def __init__(self, process_handle, name, startup_message):\n    # Handle to process and pretty name.\n    self.process_handle = process_handle\n    self.name = name\n\n    self.startup_message = startup_message\n    self.startup_message_ts = None\n\n    # Semaphore raised when startup has finished and information regarding startup's success.\n    self.initialization_semaphore = Semaphore(value=0)\n    self.initialization_ok = False\n\n    self.state_worker = Thread(target=ProcessWorker.initialization_worker, args=(self,))\n    self.state_worker.start()\n    self.out_worker = Thread(target=ProcessWorker.pipe_handler,\n                             args=(self, self.process_handle.stdout, 'out'))\n    self.out_worker.start()\n    self.err_worker = Thread(target=ProcessWorker.pipe_handler,\n                             args=(self, self.process_handle.stderr, 'err'))\n    self.err_worker.start()\n\n  @staticmethod\n  def initialization_worker(owner):\n    \"\"\"\n    Worker thread.\n    Responsible for detecting if service died during initialization steps and ensuring if enough\n    time has passed since the startup message has been seen.\n    When either of these happens, we just raise the initialization semaphore.\n    \"\"\"\n\n    while True:\n      status = owner.process_handle.poll()\n      if status:\n        # Service died.\n        print('%s did not initialize properly - finished with: %s' % (owner.name, status))\n        owner.initialization_ok = False\n        owner.initialization_semaphore.release()\n        break\n      else:\n        # Service is still running.\n        startup_message_ts = owner.startup_message_ts\n        if startup_message_ts:\n          # The log message has been registered (by pipe_handler thread), let's just ensure that\n          # some time has passed and mark the service as running.\n          current_time = int(round(time.time()))\n          if current_time - startup_message_ts >= ProcessWorker.INITIALIZATION_WAIT_SECONDS:\n            print('Startup message seen %s seconds ago, and service is still running' %\n                  (ProcessWorker.INITIALIZATION_WAIT_SECONDS),\n                  flush=True)\n            owner.initialization_ok = True\n            owner.initialization_semaphore.release()\n            break\n      time.sleep(1)\n    print('Initialization worker for %s has finished' % (owner.name))\n\n  @staticmethod\n  def pipe_handler(owner, pipe, pipe_name):\n    \"\"\"\n    Worker thread.\n    If a service startup message is seen, then it just registers the timestamp of its appearance.\n    Also prints every received message.\n    \"\"\"\n\n    try:\n      for raw_line in pipe:\n        line = raw_line.decode().rstrip()\n        print('%s(%s):' % (owner.name, pipe_name), line, flush=True)\n        if owner.startup_message in line:\n          print('%s initialization message [%s] has been logged' %\n                (owner.name, owner.startup_message))\n          owner.startup_message_ts = int(round(time.time()))\n    finally:\n      pipe.close()\n    print('Pipe handler for %s(%s) has finished' % (owner.name, pipe_name))\n\n  def await_startup(self):\n    \"\"\"\n    Awaits on initialization semaphore, and then verifies the initialization state.\n    If everything is okay, we just continue (we can use the service), otherwise throw.\n    \"\"\"\n\n    print('Waiting for %s to start...' % (self.name))\n    self.initialization_semaphore.acquire()\n    try:\n      if self.initialization_ok:\n        print('Service %s started successfully' % (self.name))\n      else:\n        raise Exception('%s could not start' % (self.name))\n    finally:\n      self.initialization_semaphore.release()\n\n  def check_state(self):\n    \"\"\"\n    Verifies if the service is still running. Throws if it is not.\n    \"\"\"\n\n    status = self.process_handle.poll()\n    if status:\n      raise Exception('%s died with: %s' % (self.name, str(status)))\n\n  def kill(self):\n    \"\"\"\n    Utility method to kill the main service thread and all related workers.\n    \"\"\"\n\n    print('Stopping service %s' % self.name)\n\n    # Kill the real process.\n    self.process_handle.kill()\n    self.process_handle.wait()\n\n    # The sub-workers are going to finish on their own, as they will detect main thread dying\n    # (through pipes closing, or .poll() returning a non-null value).\n    self.state_worker.join()\n    self.out_worker.join()\n    self.err_worker.join()\n\n    print('Service %s has been stopped' % self.name)\n\n\nclass RenderingHelper:\n  \"\"\"\n  Helper for jinja templates.\n  \"\"\"\n\n  @staticmethod\n  def get_template(template):\n    import jinja2\n    import os\n    import sys\n    # Templates are resolved relatively to main start script, due to main & test templates being\n    # stored in different directories.\n    env = jinja2.Environment(loader=jinja2.FileSystemLoader(\n        searchpath=os.path.dirname(os.path.abspath(__file__))))\n    return env.get_template(template)\n\n\nif __name__ == '__main__':\n  unittest.main()\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/integration_test/kafka_server_properties.j2",
    "content": "broker.id=0\nlisteners=PLAINTEXT://127.0.0.1:{{ data['kafka_real_port'] }}\nadvertised.listeners=PLAINTEXT://127.0.0.1:{{ data['kafka_envoy_port'] }}\n\nnum.network.threads=3\nnum.io.threads=8\nsocket.send.buffer.bytes=102400\nsocket.receive.buffer.bytes=102400\nsocket.request.max.bytes=104857600\n\nlog.dirs={{ data['data_dir'] }}\nnum.partitions=1\nnum.recovery.threads.per.data.dir=1\n\noffsets.topic.replication.factor=1\ntransaction.state.log.replication.factor=1\ntransaction.state.log.min.isr=1\n\nlog.retention.hours=168\nlog.segment.bytes=1073741824\nlog.retention.check.interval.ms=300000\n\nzookeeper.connect=127.0.0.1:{{ data['zk_port'] }}\nzookeeper.connection.timeout.ms=6000\n\ngroup.initial.rebalance.delay.ms=0\n\n# The number of __consumer_offsets partitions is reduced to make logs a bit more readable.\noffsets.topic.num.partitions=5\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/broker/integration_test/zookeeper_properties.j2",
    "content": "clientPort={{ data['zk_port'] }}\ndataDir={{ data['data_dir'] }}\nmaxClientCnxns=0\n# ZK 3.5 tries to bind 8080 for introspection capacility - we do not need that.\nadmin.enableServer=false\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/buffer_based_test.h",
    "content": "#pragma once\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/kafka/serialization.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n// Common utilities for various Kafka buffer-related tests.\n\n/**\n * Utility superclass that keeps a buffer that can be played with during the test.\n */\nclass BufferBasedTest {\nprotected:\n  const char* getBytes() {\n    Buffer::RawSliceVector slices = buffer_.getRawSlices(1);\n    ASSERT(slices.size() == 1);\n    return reinterpret_cast<const char*>((slices[0]).mem_);\n  }\n\n  template <typename T> uint32_t putIntoBuffer(const T& arg) {\n    EncodingContext encoder_{-1}; // Context's api_version is not used when serializing primitives.\n    return encoder_.encode(arg, buffer_);\n  }\n\n  absl::string_view putGarbageIntoBuffer(uint32_t size = 1024) {\n    putIntoBuffer(Bytes(size));\n    return {getBytes(), size};\n  }\n\n  Buffer::OwnedImpl buffer_;\n};\n\n/**\n * Utility superclass that keeps a buffer and can put messages into buffer.\n * @param Encoder class used for encoding messages into buffer\n */\ntemplate <class Encoder> class MessageBasedTest : public BufferBasedTest {\nprotected:\n  template <typename T> void putMessageIntoBuffer(const T& arg) {\n    Encoder encoder{buffer_};\n    encoder.encode(arg);\n  }\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/kafka_request_parser_test.cc",
    "content": "#include \"extensions/filters/network/kafka/kafka_request_parser.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace KafkaRequestParserTest {\n\nconst int32_t FAILED_DESERIALIZER_STEP = 13;\n\nclass KafkaRequestParserTest : public testing::Test, public BufferBasedTest {};\n\nclass MockRequestParserResolver : public RequestParserResolver {\npublic:\n  MockRequestParserResolver() = default;\n  MOCK_METHOD(RequestParserSharedPtr, createParser, (int16_t, int16_t, RequestContextSharedPtr),\n              (const));\n};\n\nTEST_F(KafkaRequestParserTest, RequestStartParserTestShouldReturnRequestHeaderParser) {\n  // given\n  MockRequestParserResolver resolver{};\n  RequestStartParser testee{resolver};\n\n  int32_t request_len = 1234;\n  putIntoBuffer(request_len);\n\n  const absl::string_view orig_data = {getBytes(), 1024};\n  absl::string_view data = orig_data;\n\n  // when\n  const RequestParseResponse result = testee.parse(data);\n\n  // then\n  ASSERT_EQ(result.hasData(), true);\n  ASSERT_NE(std::dynamic_pointer_cast<RequestHeaderParser>(result.next_parser_), nullptr);\n  ASSERT_EQ(result.message_, nullptr);\n  ASSERT_EQ(result.failure_data_, nullptr);\n  ASSERT_EQ(testee.contextForTest()->remaining_request_size_, request_len);\n  assertStringViewIncrement(data, orig_data, sizeof(int32_t));\n}\n\nclass MockParser : public RequestParser {\npublic:\n  RequestParseResponse parse(absl::string_view&) override {\n    throw EnvoyException(\"should not be invoked\");\n  }\n};\n\nTEST_F(KafkaRequestParserTest, RequestHeaderParserShouldExtractHeaderAndResolveNextParser) {\n  // given\n  const MockRequestParserResolver parser_resolver;\n  const RequestParserSharedPtr parser{new MockParser{}};\n  EXPECT_CALL(parser_resolver, createParser(_, _, _)).WillOnce(Return(parser));\n\n  const int32_t request_len = 1000;\n  RequestContextSharedPtr context{new RequestContext()};\n  context->remaining_request_size_ = request_len;\n  RequestHeaderParser testee{parser_resolver, context};\n\n  const int16_t api_key{1};\n  const int16_t api_version{2};\n  const int32_t correlation_id{10};\n  const NullableString client_id{\"aaa\"};\n  uint32_t header_len = 0;\n  header_len += putIntoBuffer(api_key);\n  header_len += putIntoBuffer(api_version);\n  header_len += putIntoBuffer(correlation_id);\n  header_len += putIntoBuffer(client_id);\n\n  const absl::string_view orig_data = putGarbageIntoBuffer();\n  absl::string_view data = orig_data;\n\n  // when\n  const RequestParseResponse result = testee.parse(data);\n\n  // then\n  ASSERT_EQ(result.hasData(), true);\n  ASSERT_EQ(result.next_parser_, parser);\n  ASSERT_EQ(result.message_, nullptr);\n  ASSERT_EQ(result.failure_data_, nullptr);\n\n  const RequestHeader expected_header{api_key, api_version, correlation_id, client_id};\n  ASSERT_EQ(testee.contextForTest()->request_header_, expected_header);\n  ASSERT_EQ(testee.contextForTest()->remaining_request_size_, request_len - header_len);\n\n  assertStringViewIncrement(data, orig_data, header_len);\n}\n\nTEST_F(KafkaRequestParserTest, RequestDataParserShouldHandleDeserializerExceptionsDuringFeeding) {\n  // given\n\n  // This deserializer throws during feeding.\n  class ThrowingDeserializer : public Deserializer<int32_t> {\n  public:\n    uint32_t feed(absl::string_view&) override {\n      // Move some pointers to simulate data consumption.\n      throw EnvoyException(\"feed\");\n    };\n\n    bool ready() const override { throw std::runtime_error(\"should not be invoked at all\"); };\n\n    int32_t get() const override { throw std::runtime_error(\"should not be invoked at all\"); };\n  };\n\n  RequestContextSharedPtr request_context{new RequestContext{1024, {0, 0, 0, absl::nullopt}}};\n  RequestDataParser<int32_t, ThrowingDeserializer> testee{request_context};\n\n  absl::string_view data = putGarbageIntoBuffer();\n\n  // when\n  bool caught = false;\n  try {\n    testee.parse(data);\n  } catch (EnvoyException& e) {\n    caught = true;\n  }\n\n  // then\n  ASSERT_EQ(caught, true);\n}\n\n// This deserializer consumes FAILED_DESERIALIZER_STEP bytes and returns 0\nclass SomeBytesDeserializer : public Deserializer<int32_t> {\npublic:\n  uint32_t feed(absl::string_view& data) override {\n    data = {data.data() + FAILED_DESERIALIZER_STEP, data.size() - FAILED_DESERIALIZER_STEP};\n    return FAILED_DESERIALIZER_STEP;\n  };\n\n  bool ready() const override { return true; };\n\n  int32_t get() const override { return 0; };\n};\n\nTEST_F(KafkaRequestParserTest,\n       RequestDataParserShouldHandleDeserializerReturningReadyButLeavingData) {\n  // given\n  const int32_t request_size = 1024; // There are still 1024 bytes to read to complete the request.\n  RequestContextSharedPtr request_context{\n      new RequestContext{request_size, {0, 0, 0, absl::nullopt}}};\n\n  RequestDataParser<int32_t, SomeBytesDeserializer> testee{request_context};\n\n  const absl::string_view orig_data = putGarbageIntoBuffer();\n  absl::string_view data = orig_data;\n\n  // when\n  const RequestParseResponse result = testee.parse(data);\n\n  // then\n  ASSERT_EQ(result.hasData(), true);\n  ASSERT_NE(std::dynamic_pointer_cast<SentinelParser>(result.next_parser_), nullptr);\n  ASSERT_EQ(result.message_, nullptr);\n  ASSERT_EQ(result.failure_data_, nullptr);\n\n  ASSERT_EQ(testee.contextForTest()->remaining_request_size_,\n            request_size - FAILED_DESERIALIZER_STEP);\n\n  assertStringViewIncrement(data, orig_data, FAILED_DESERIALIZER_STEP);\n}\n\nTEST_F(KafkaRequestParserTest, SentinelParserShouldConsumeDataUntilEndOfRequest) {\n  // given\n  const int32_t request_len = 1000;\n  RequestContextSharedPtr context{new RequestContext()};\n  context->remaining_request_size_ = request_len;\n  SentinelParser testee{context};\n\n  const absl::string_view orig_data = putGarbageIntoBuffer(request_len * 2);\n  absl::string_view data = orig_data;\n\n  // when\n  const RequestParseResponse result = testee.parse(data);\n\n  // then\n  ASSERT_EQ(result.hasData(), true);\n  ASSERT_EQ(result.next_parser_, nullptr);\n  ASSERT_EQ(result.message_, nullptr);\n  ASSERT_NE(std::dynamic_pointer_cast<RequestParseFailure>(result.failure_data_), nullptr);\n\n  ASSERT_EQ(testee.contextForTest()->remaining_request_size_, 0);\n\n  assertStringViewIncrement(data, orig_data, request_len);\n}\n\n} // namespace KafkaRequestParserTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/kafka_response_parser_test.cc",
    "content": "#include \"extensions/filters/network/kafka/kafka_response_parser.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace KafkaResponseParserTest {\n\nconst int32_t FAILED_DESERIALIZER_STEP = 13;\n\nclass KafkaResponseParserTest : public testing::Test, public BufferBasedTest {};\n\nclass MockResponseParserResolver : public ResponseParserResolver {\npublic:\n  MockResponseParserResolver() = default;\n  MOCK_METHOD(ResponseParserSharedPtr, createParser, (ResponseContextSharedPtr), (const));\n};\n\nclass MockParser : public ResponseParser {\npublic:\n  ResponseParseResponse parse(absl::string_view&) override {\n    throw EnvoyException(\"should not be invoked\");\n  }\n};\n\nTEST_F(KafkaResponseParserTest, ResponseHeaderParserShouldExtractHeaderAndResolveNextParser) {\n  // given\n  const int32_t payload_length = 100;\n  const int32_t correlation_id = 1234;\n  uint32_t header_len = 0;\n  header_len += putIntoBuffer(payload_length);\n  header_len += putIntoBuffer(correlation_id); // Insert correlation id.\n\n  const absl::string_view orig_data = putGarbageIntoBuffer();\n  absl::string_view data = orig_data;\n\n  const int16_t api_key = 42;\n  const int16_t api_version = 123;\n\n  ExpectedResponsesSharedPtr expected_responses = std::make_shared<ExpectedResponses>();\n  (*expected_responses)[correlation_id] = {api_key, api_version};\n\n  const MockResponseParserResolver parser_resolver;\n  const ResponseParserSharedPtr parser{new MockParser{}};\n  EXPECT_CALL(parser_resolver, createParser(_)).WillOnce(Return(parser));\n\n  ResponseHeaderParser testee{expected_responses, parser_resolver};\n\n  // when\n  const ResponseParseResponse result = testee.parse(data);\n\n  // then\n  ASSERT_EQ(result.hasData(), true);\n  ASSERT_EQ(result.next_parser_, parser);\n  ASSERT_EQ(result.message_, nullptr);\n  ASSERT_EQ(result.failure_data_, nullptr);\n\n  const auto context = testee.contextForTest();\n  ASSERT_EQ(context->remaining_response_size_, payload_length - sizeof(correlation_id));\n  ASSERT_EQ(context->correlation_id_, correlation_id);\n  ASSERT_EQ(context->api_key_, api_key);\n  ASSERT_EQ(context->api_version_, api_version);\n\n  ASSERT_EQ(expected_responses->size(), 0);\n\n  assertStringViewIncrement(data, orig_data, header_len);\n}\n\nTEST_F(KafkaResponseParserTest, ResponseHeaderParserShouldThrowIfThereIsUnexpectedResponse) {\n  // given\n  const int32_t payload_length = 100;\n  const int32_t correlation_id = 1234;\n  uint32_t header_len = 0;\n  header_len += putIntoBuffer(payload_length);\n  header_len += putIntoBuffer(correlation_id); // Insert correlation id.\n\n  absl::string_view data = putGarbageIntoBuffer();\n\n  ExpectedResponsesSharedPtr expected_responses = std::make_shared<ExpectedResponses>();\n  const MockResponseParserResolver parser_resolver;\n  const ResponseParserSharedPtr parser{new MockParser{}};\n  EXPECT_CALL(parser_resolver, createParser(_)).Times(0);\n\n  ResponseHeaderParser testee{expected_responses, parser_resolver};\n\n  // when\n  // then - exception gets thrown.\n  EXPECT_THROW(testee.parse(data), EnvoyException);\n}\n\nTEST_F(KafkaResponseParserTest, ResponseDataParserShoulRethrowDeserializerExceptionsDuringFeeding) {\n  // given\n\n  // This deserializer throws during feeding.\n  class ThrowingDeserializer : public Deserializer<int32_t> {\n  public:\n    uint32_t feed(absl::string_view&) override {\n      // Move some pointers to simulate data consumption.\n      throw EnvoyException(\"feed\");\n    };\n\n    bool ready() const override { throw std::runtime_error(\"should not be invoked at all\"); };\n\n    int32_t get() const override { throw std::runtime_error(\"should not be invoked at all\"); };\n  };\n\n  ResponseContextSharedPtr context = std::make_shared<ResponseContext>();\n  ResponseDataParser<int32_t, ThrowingDeserializer> testee{context};\n\n  absl::string_view data = putGarbageIntoBuffer();\n\n  // when\n  bool caught = false;\n  try {\n    testee.parse(data);\n  } catch (EnvoyException& e) {\n    caught = true;\n  }\n\n  // then\n  ASSERT_EQ(caught, true);\n}\n\n// This deserializer consumes FAILED_DESERIALIZER_STEP bytes and returns 0\nclass SomeBytesDeserializer : public Deserializer<int32_t> {\npublic:\n  uint32_t feed(absl::string_view& data) override {\n    data = {data.data() + FAILED_DESERIALIZER_STEP, data.size() - FAILED_DESERIALIZER_STEP};\n    return FAILED_DESERIALIZER_STEP;\n  };\n\n  bool ready() const override { return true; };\n\n  int32_t get() const override { return 0; };\n};\n\nTEST_F(KafkaResponseParserTest,\n       ResponseDataParserShouldHandleDeserializerReturningReadyButLeavingData) {\n  // given\n  const int32_t message_size = 1024; // There are still 1024 bytes to read to complete the message.\n  ResponseContextSharedPtr context = std::make_shared<ResponseContext>();\n  context->remaining_response_size_ = message_size;\n\n  ResponseDataParser<int32_t, SomeBytesDeserializer> testee{context};\n\n  const absl::string_view orig_data = putGarbageIntoBuffer();\n  absl::string_view data = orig_data;\n\n  // when\n  const ResponseParseResponse result = testee.parse(data);\n\n  // then\n  ASSERT_EQ(result.hasData(), true);\n  ASSERT_NE(std::dynamic_pointer_cast<SentinelResponseParser>(result.next_parser_), nullptr);\n  ASSERT_EQ(result.message_, nullptr);\n  ASSERT_EQ(result.failure_data_, nullptr);\n\n  ASSERT_EQ(testee.contextForTest()->remaining_response_size_,\n            message_size - FAILED_DESERIALIZER_STEP);\n\n  assertStringViewIncrement(data, orig_data, FAILED_DESERIALIZER_STEP);\n}\n\nTEST_F(KafkaResponseParserTest, SentinelResponseParserShouldConsumeDataUntilEndOfMessage) {\n  // given\n  const int32_t response_len = 1000;\n  ResponseContextSharedPtr context = std::make_shared<ResponseContext>();\n  context->remaining_response_size_ = response_len;\n  SentinelResponseParser testee{context};\n\n  const absl::string_view orig_data = putGarbageIntoBuffer(response_len * 2);\n  absl::string_view data = orig_data;\n\n  // when\n  const ResponseParseResponse result = testee.parse(data);\n\n  // then\n  ASSERT_EQ(result.hasData(), true);\n  ASSERT_EQ(result.next_parser_, nullptr);\n  ASSERT_EQ(result.message_, nullptr);\n  ASSERT_NE(std::dynamic_pointer_cast<ResponseMetadata>(result.failure_data_), nullptr);\n\n  ASSERT_EQ(testee.contextForTest()->remaining_response_size_, 0);\n\n  assertStringViewIncrement(data, orig_data, response_len);\n}\n\n} // namespace KafkaResponseParserTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/message_utilities.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"extensions/filters/network/kafka/kafka_request.h\"\n#include \"extensions/filters/network/kafka/kafka_response.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Payload-related test utilities.\n * This class is intended to be an entry point for all generated methods.\n *\n * The methods declared here are implemented in generated files:\n * - request_utilities.cc (from request_utilities_cc.j2) - for requests,\n * - response_utilities.cc (from response_utilities_cc.j2) - for responses,\n * as they are derived from Kafka protocol specification.\n */\nclass MessageUtilities {\nprivate:\n  MessageUtilities() = default;\n\npublic:\n  /**\n   * How many request/response types are supported.\n   * Proper values are 0..apiKeys() - 1.\n   */\n  static int16_t apiKeys();\n\n  /**\n   * How many request types are supported for given api key.\n   */\n  static int16_t requestApiVersions(const int16_t api_key);\n\n  /**\n   * Make example requests with given api_key.\n   * One message per api version in given api key.\n   * The message correlation id-s start at value provided.\n   */\n  static std::vector<AbstractRequestSharedPtr> makeRequests(const int16_t api_key,\n                                                            int32_t& correlation_id);\n\n  /**\n   * Make example requests, one message per given api key + api version pair.\n   * The message correlation id-s start at 0.\n   */\n  static std::vector<AbstractRequestSharedPtr> makeAllRequests();\n\n  /**\n   * Get the name of request counter metric for given request type.\n   */\n  static std::string requestMetric(const int16_t api_key);\n\n  /**\n   * How many response types are supported for given api key.\n   */\n  static int16_t responseApiVersions(const int16_t api_key);\n\n  /**\n   * Make example requests with given api_key.\n   * One message per api version in given api key.\n   * The message correlation id-s start at value provided.\n   */\n  static std::vector<AbstractResponseSharedPtr> makeResponses(const int16_t api_key,\n                                                              int32_t& correlation_id);\n\n  /**\n   * Make example responses, one message per given api key + api version pair.\n   * The message correlation id-s start at 0.\n   */\n  static std::vector<AbstractResponseSharedPtr> makeAllResponses();\n\n  /**\n   * Get the name of response counter metric for given request type.\n   */\n  static std::string responseMetric(const int16_t api_key);\n};\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/metrics_integration_test.cc",
    "content": "#include \"extensions/filters/network/kafka/external/request_metrics.h\"\n#include \"extensions/filters/network/kafka/external/response_metrics.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/extensions/filters/network/kafka/message_utilities.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace MetricsIntegrationTest {\n\nclass MetricsIntegrationTest : public testing::Test {\nprotected:\n  Stats::TestUtil::TestStore scope_;\n  RichRequestMetricsImpl request_metrics_{scope_, \"prefix\"};\n  RichResponseMetricsImpl response_metrics_{scope_, \"prefix\"};\n};\n\nconstexpr static int32_t UPDATE_COUNT = 42;\n\nTEST_F(MetricsIntegrationTest, ShouldUpdateRequestMetrics) {\n  for (int16_t api_key = 0; api_key < MessageUtilities::apiKeys(); ++api_key) {\n    // given\n    // when\n    for (int i = 0; i < UPDATE_COUNT; ++i) {\n      request_metrics_.onRequest(api_key);\n    }\n\n    // then\n    Stats::Counter& counter = scope_.counter(MessageUtilities::requestMetric(api_key));\n    ASSERT_EQ(counter.value(), UPDATE_COUNT);\n  };\n}\n\nTEST_F(MetricsIntegrationTest, ShouldHandleUnparseableRequest) {\n  // given\n  // when\n  for (int i = 0; i < UPDATE_COUNT; ++i) {\n    request_metrics_.onUnknownRequest();\n  }\n\n  // then\n  ASSERT_EQ(scope_.counter(\"kafka.prefix.request.unknown\").value(), UPDATE_COUNT);\n}\n\nTEST_F(MetricsIntegrationTest, ShouldUpdateResponseMetrics) {\n  for (int16_t api_key = 0; api_key < MessageUtilities::apiKeys(); ++api_key) {\n    // given\n    // when\n    for (int i = 0; i < UPDATE_COUNT; ++i) {\n      response_metrics_.onResponse(api_key, 0);\n    }\n\n    // then\n    Stats::Counter& counter = scope_.counter(MessageUtilities::responseMetric(api_key));\n    ASSERT_EQ(counter.value(), UPDATE_COUNT);\n  };\n}\n\nTEST_F(MetricsIntegrationTest, ShouldHandleUnparseableResponse) {\n  // given\n  // when\n  for (int i = 0; i < UPDATE_COUNT; ++i) {\n    response_metrics_.onUnknownResponse();\n  }\n\n  // then\n  ASSERT_EQ(scope_.counter(\"kafka.prefix.response.unknown\").value(), UPDATE_COUNT);\n}\n\n} // namespace MetricsIntegrationTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/protocol/launcher.py",
    "content": "#!/usr/bin/python\n\n# Launcher for generating Kafka protocol tests.\n\nimport source.extensions.filters.network.kafka.protocol.generator as generator\nimport sys\nimport os\n\n\ndef main():\n  \"\"\"\n  Kafka test generator script\n  ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n  Generates tests from Kafka protocol specification.\n\n  Usage:\n    launcher.py MESSAGE_TYPE OUTPUT_FILES INPUT_FILES\n  where:\n  MESSAGE_TYPE : 'request' or 'response'\n  OUTPUT_FILES : location of 'requests_test.cc'/'responses_test.cc',\n                 'request_codec_request_test.cc' / 'response_codec_response_test.cc',\n                 'request_utilities.cc.cc' / 'response_utilities.cc'.\n  INPUT_FILES: Kafka protocol json files to be processed.\n\n  Kafka spec files are provided in Kafka clients jar file.\n\n  Files created are:\n    - ${MESSAGE_TYPE}s_test.cc - serialization/deserialization tests for kafka structures,\n    - ${MESSAGE_TYPE}_codec_${MESSAGE_TYPE}_test.cc - integration tests involving codec for all\n      request/response operations,\n    - ${MESSAGE_TYPE}_utilities.cc - utilities for creating sample messages of given type.\n\n  Templates used are:\n  - to create '${MESSAGE_TYPE}s_test.cc': ${MESSAGE_TYPE}s_test_cc.j2,\n  - to create '${MESSAGE_TYPE}_codec_${MESSAGE_TYPE}_test.cc' -\n      ${MESSAGE_TYPE}_codec_${MESSAGE_TYPE}_test_cc.j2,\n  - to create '${MESSAGE_TYPE}_utilities.cc' - ${MESSAGE_TYPE}_utilities_cc.j2.\n  \"\"\"\n  type = sys.argv[1]\n  header_test_cc_file = os.path.abspath(sys.argv[2])\n  codec_test_cc_file = os.path.abspath(sys.argv[3])\n  utilities_cc_file = os.path.abspath(sys.argv[4])\n  input_files = sys.argv[5:]\n  generator.generate_test_code(type, header_test_cc_file, codec_test_cc_file, utilities_cc_file,\n                               input_files)\n\n\nif __name__ == \"__main__\":\n  main()\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2",
    "content": "{#\n  Template for 'request_codec_request_test.cc'.\n\n  Provides integration tests using Kafka codec.\n  The tests do the following:\n  - create the message,\n  - serialize the message into buffer,\n  - pass the buffer to the codec,\n  - capture messages received in callback,\n  - verify that captured messages are identical to the ones sent.\n#}\n#include \"extensions/filters/network/kafka/external/requests.h\"\n#include \"extensions/filters/network/kafka/request_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace RequestCodecRequestTest {\n\nclass RequestCodecRequestTest : public testing::Test, public MessageBasedTest<RequestEncoder> {};\n\nusing RequestCapturingCallback = CapturingCallback<RequestCallback, AbstractRequestSharedPtr,\n  RequestParseFailureSharedPtr>;\n\n{% for message_type in message_types %}\n\n// Integration test for {{ message_type.name }} messages.\n\nTEST_F(RequestCodecRequestTest, ShouldHandle{{ message_type.name }}Messages) {\n  // given\n  using RequestUnderTest = Request<{{ message_type.name }}>;\n\n  std::vector<RequestUnderTest> sent;\n  int32_t correlation = 0;\n\n  {% for field_list in message_type.compute_field_lists() %}\n  for (int i = 0; i < 100; ++i ) {\n    {# Request header cannot contain tagged fields if request does not support them. #}\n    const TaggedFields tagged_fields = requestUsesTaggedFieldsInHeader(\n      {{ message_type.get_extra('api_key') }}, {{ field_list.version }}) ?\n        TaggedFields{ { TaggedField{ 10, Bytes{1, 2, 3, 4} } } }:\n        TaggedFields({});\n    const RequestHeader header = {\n      {{ message_type.get_extra('api_key') }},\n      {{ field_list.version }},\n      correlation++,\n      \"id\",\n      tagged_fields\n    };\n    const {{ message_type.name }} data = { {{ field_list.example_value() }} };\n    const RequestUnderTest request = {header, data};\n    putMessageIntoBuffer(request);\n    sent.push_back(request);\n  }\n  {% endfor %}\n\n  const InitialParserFactory& initial_parser_factory = InitialParserFactory::getDefaultInstance();\n  const RequestParserResolver& request_parser_resolver =\n    RequestParserResolver::getDefaultInstance();\n  const auto callback = std::make_shared<RequestCapturingCallback>();\n\n  RequestDecoder testee{initial_parser_factory, request_parser_resolver, {callback}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  const std::vector<AbstractRequestSharedPtr>& received = callback->getCapturedMessages();\n  ASSERT_EQ(received.size(), sent.size());\n  ASSERT_EQ(received.size(), correlation);\n\n  for (size_t i = 0; i < received.size(); ++i) {\n    const std::shared_ptr<RequestUnderTest> request =\n      std::dynamic_pointer_cast<RequestUnderTest>(received[i]);\n    ASSERT_NE(request, nullptr);\n    ASSERT_EQ(*request, sent[i]);\n  }\n}\n{% endfor %}\n\n} // namespace RequestCodecRequestTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/protocol/request_utilities_cc.j2",
    "content": "{#\n  Template for 'request_utilities.cc'.\n  This file contains implementation of request-related methods contained in 'message_utilities.h'.\n#}\n\n#include \"test/extensions/filters/network/kafka/message_utilities.h\"\n\n#include \"extensions/filters/network/kafka/external/requests.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nint16_t MessageUtilities::apiKeys() {\n  return {{ message_types | length }};\n}\n\nint16_t MessageUtilities::requestApiVersions(const int16_t api_key) {\n  switch (api_key) {\n  {% for message_type in message_types %}\n  case {{ message_type.get_extra('api_key') }} :\n    return {{ message_type.compute_field_lists() | length }} ;\n  {% endfor %}\n  default:\n    throw EnvoyException(\"unsupported api key used in test code\");\n  }\n}\n\nstd::vector<AbstractRequestSharedPtr> MessageUtilities::makeRequests(\n  const int16_t api_key, int32_t& correlation_id) {\n\n  if ((api_key < 0) || (api_key >= {{ message_types | length }})) {\n    throw EnvoyException(\"unsupported api key used in test code\");\n  }\n\n  std::vector<AbstractRequestSharedPtr> result;\n  {% for message_type in message_types %}\n  if ({{ message_type.get_extra('api_key') }} == api_key) {\n    {% for field_list in message_type.compute_field_lists() %}\n    {\n      const RequestHeader header = {\n          {{ message_type.get_extra('api_key') }}, {{ field_list.version }}, correlation_id++,\n          \"id\" };\n      const {{ message_type.name }} data = { {{ field_list.example_value() }} };\n      const AbstractRequestSharedPtr request = std::make_shared<Request<{{ message_type.name }}>>(\n          header, data);\n      result.push_back(request);\n    }\n    {% endfor %}\n  }\n  {% endfor %}\n  return result;\n}\n\nstd::vector<AbstractRequestSharedPtr> MessageUtilities::makeAllRequests() {\n  std::vector<AbstractRequestSharedPtr> result;\n  int32_t correlation_id = 0;\n  for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) {\n    const std::vector<AbstractRequestSharedPtr> tmp =\n        MessageUtilities::makeRequests(i, correlation_id);\n    result.insert(result.end(), tmp.begin(), tmp.end());\n  }\n  return result;\n}\n\nstd::string MessageUtilities::requestMetric(const int16_t api_key) {\n  switch (api_key) {\n  {% for message_type in message_types %}\n  case {{ message_type.get_extra('api_key') }} :\n    return \"kafka.prefix.request.{{ message_type.name_in_c_case() }}\" ;\n  {% endfor %}\n  default:\n    throw EnvoyException(\"unsupported api key used in test code\");\n  }\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/protocol/requests_test_cc.j2",
    "content": "{#\n  Template for request serialization/deserialization tests.\n  For every request, we want to check if it can be serialized and deserialized properly.\n#}\n\n#include \"extensions/filters/network/kafka/external/requests.h\"\n#include \"extensions/filters/network/kafka/request_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace RequestTest {\n\nclass RequestTest : public testing::Test, public MessageBasedTest<RequestEncoder> {\nprotected:\n  template <typename T> std::shared_ptr<T> serializeAndDeserialize(T message);\n};\n\nclass MockMessageListener : public RequestCallback {\npublic:\n  MOCK_METHOD(void, onMessage, (AbstractRequestSharedPtr));\n  MOCK_METHOD(void, onFailedParse, (RequestParseFailureSharedPtr));\n};\n\n/**\n * Helper method.\n * Takes an instance of a request, serializes it, then deserializes it.\n * This method gets executed for every request * version pair.\n */\ntemplate <typename T> std::shared_ptr<T> RequestTest::serializeAndDeserialize(T message) {\n  putMessageIntoBuffer(message);\n\n  std::shared_ptr<MockMessageListener> mock_listener = std::make_shared<MockMessageListener>();\n  RequestDecoder testee{ {mock_listener} };\n\n  AbstractRequestSharedPtr received_message;\n  EXPECT_CALL(*mock_listener, onMessage(testing::_))\n    .WillOnce(testing::SaveArg<0>(&received_message));\n\n  testee.onData(buffer_);\n\n  return std::dynamic_pointer_cast<T>(received_message);\n};\n\n{#\n  Concrete tests for each message_type and version (field_list).\n  Each request is naively constructed using some default values\n  (put \"string\" as std::string, 32 as uint32_t, etc.).\n#}\n{% for message_type in message_types %}{% for field_list in message_type.compute_field_lists() %}\nTEST_F(RequestTest, ShouldParse{{ message_type.name }}V{{ field_list.version }}) {\n  // given\n  {{ message_type.name }} data = { {{ field_list.example_value() }} };\n  Request<{{ message_type.name }}> message = { {\n    {{ message_type.get_extra('api_key') }}, {{ field_list.version }}, 0, absl::nullopt }, data };\n\n  // when\n  auto received = serializeAndDeserialize(message);\n\n  // then\n  ASSERT_NE(received, nullptr);\n  ASSERT_EQ(*received, message);\n}\n{% endfor %}{% endfor %}\n\n} // namespace RequestTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2",
    "content": "{#\n  Template for 'response_codec_response_test.cc'.\n\n  Provides integration tests using Kafka codec.\n  The tests do the following:\n  - create the message,\n  - serialize the message into buffer,\n  - pass the buffer to the codec,\n  - capture messages received in callback,\n  - verify that captured messages are identical to the ones sent.\n#}\n#include \"extensions/filters/network/kafka/external/responses.h\"\n#include \"extensions/filters/network/kafka/response_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace ResponseCodecResponseTest {\n\nclass ResponseCodecResponseTest : public testing::Test, public MessageBasedTest<ResponseEncoder> {};\n\nusing ResponseCapturingCallback = CapturingCallback<ResponseCallback, AbstractResponseSharedPtr,\n  ResponseMetadataSharedPtr>;\n\n{% for message_type in message_types %}\n\n// Integration test for {{ message_type.name }} messages.\n\nTEST_F(ResponseCodecResponseTest, ShouldHandle{{ message_type.name }}Messages) {\n  // given\n  const auto callback = std::make_shared<ResponseCapturingCallback>();\n  ResponseDecoder testee{ {callback} };\n\n  using ResponseUnderTest = Response<{{ message_type.name }}>;\n\n  std::vector<ResponseUnderTest> sent;\n  int32_t correlation_id = 0;\n\n  {% for field_list in message_type.compute_field_lists() %}\n  for (int i = 0; i < 100; ++i ) {\n    {# Response header cannot contain tagged fields if response does not support them. #}\n    const TaggedFields tagged_fields = responseUsesTaggedFieldsInHeader(\n      {{ message_type.get_extra('api_key') }}, {{ field_list.version }}) ?\n        TaggedFields{ { TaggedField{ 10, Bytes{1, 2, 3, 4} } } }:\n        TaggedFields({});\n    const ResponseMetadata metadata = {\n      {{ message_type.get_extra('api_key') }},\n      {{ field_list.version }},\n      ++correlation_id,\n      tagged_fields,\n    };\n    const {{ message_type.name }} data = { {{ field_list.example_value() }} };\n    const ResponseUnderTest response = {metadata, data};\n    putMessageIntoBuffer(response);\n    testee.expectResponse(\n      correlation_id, {{ message_type.get_extra('api_key') }}, {{ field_list.version }});\n    sent.push_back(response);\n  }\n  {% endfor %}\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  const std::vector<AbstractResponseSharedPtr>& received = callback->getCapturedMessages();\n  ASSERT_EQ(received.size(), sent.size());\n  ASSERT_EQ(received.size(), correlation_id);\n\n  for (size_t i = 0; i < received.size(); ++i) {\n    const std::shared_ptr<ResponseUnderTest> response =\n      std::dynamic_pointer_cast<ResponseUnderTest>(received[i]);\n    ASSERT_NE(response, nullptr);\n    ASSERT_EQ(*response, sent[i]);\n  }\n}\n{% endfor %}\n\n} // namespace ResponseCodecResponseTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/protocol/response_utilities_cc.j2",
    "content": "{#\n  Template for 'response_utilities.cc'.\n  This file contains implementation of response-related methods contained in 'message_utilities.h'.\n#}\n\n#include \"test/extensions/filters/network/kafka/message_utilities.h\"\n\n#include \"extensions/filters/network/kafka/external/responses.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nint16_t MessageUtilities::responseApiVersions(const int16_t api_key) {\n  switch (api_key) {\n  {% for message_type in message_types %}\n  case {{ message_type.get_extra('api_key') }} :\n    return {{ message_type.compute_field_lists() | length }} ;\n  {% endfor %}\n  default:\n    throw EnvoyException(\"unsupported api key used in test code\");\n  }\n}\n\nstd::vector<AbstractResponseSharedPtr> MessageUtilities::makeResponses(\n  const int16_t api_key, int32_t& correlation_id) {\n\n  if ((api_key < 0) || (api_key >= {{ message_types | length }})) {\n    throw EnvoyException(\"unsupported api key used in test code\");\n  }\n\n  std::vector<AbstractResponseSharedPtr> result;\n  {% for message_type in message_types %}\n  if ({{ message_type.get_extra('api_key') }} == api_key) {\n    {% for field_list in message_type.compute_field_lists() %}\n    {\n      const ResponseMetadata metadata = {\n          {{ message_type.get_extra('api_key') }}, {{ field_list.version }}, correlation_id++ };\n      const {{ message_type.name }} data = { {{ field_list.example_value() }} };\n      const AbstractResponseSharedPtr response =\n          std::make_shared<Response<{{ message_type.name }}>>(metadata, data);\n      result.push_back(response);\n    }\n    {% endfor %}\n  }\n  {% endfor %}\n  return result;\n}\n\nstd::vector<AbstractResponseSharedPtr> MessageUtilities::makeAllResponses() {\n  std::vector<AbstractResponseSharedPtr> result;\n  int32_t correlation_id = 0;\n  for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) {\n    const std::vector<AbstractResponseSharedPtr> tmp =\n        MessageUtilities::makeResponses(i, correlation_id);\n    result.insert(result.end(), tmp.begin(), tmp.end());\n  }\n  return result;\n}\n\nstd::string MessageUtilities::responseMetric(const int16_t api_key) {\n  switch (api_key) {\n  {% for message_type in message_types %}\n  case {{ message_type.get_extra('api_key') }} :\n    return \"kafka.prefix.response.{{ message_type.name_in_c_case() }}\" ;\n  {% endfor %}\n  default:\n    throw EnvoyException(\"unsupported api key used in test code\");\n  }\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/protocol/responses_test_cc.j2",
    "content": "{#\n  Template for response serialization/deserialization tests.\n  For every response, we want to check if it can be serialized and deserialized properly.\n#}\n\n#include \"extensions/filters/network/kafka/external/responses.h\"\n#include \"extensions/filters/network/kafka/response_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace ResponseTest {\n\nclass ResponseTest : public testing::Test, public MessageBasedTest<ResponseEncoder> {\nprotected:\n  template <typename T> std::shared_ptr<T> serializeAndDeserialize(T message);\n};\n\nclass MockMessageListener : public ResponseCallback {\npublic:\n  MOCK_METHOD(void, onMessage, (AbstractResponseSharedPtr));\n  MOCK_METHOD(void, onFailedParse, (ResponseMetadataSharedPtr));\n};\n\n/**\n * Helper method.\n * Takes an instance of a response, serializes it, then deserializes it.\n * This method gets executed for every response * version pair.\n */\ntemplate <typename T> std::shared_ptr<T> ResponseTest::serializeAndDeserialize(T message) {\n  putMessageIntoBuffer(message);\n\n  std::shared_ptr<MockMessageListener> mock_listener = std::make_shared<MockMessageListener>();\n  ResponseDecoder testee{ {mock_listener} };\n  const ResponseMetadata& metadata = message.metadata_;\n  testee.expectResponse(metadata.correlation_id_, metadata.api_key_, metadata.api_version_);\n\n  AbstractResponseSharedPtr received_message;\n  EXPECT_CALL(*mock_listener, onMessage(testing::_))\n    .WillOnce(testing::SaveArg<0>(&received_message));\n\n  testee.onData(buffer_);\n\n  return std::dynamic_pointer_cast<T>(received_message);\n};\n\n{#\n  Concrete tests for each message_type and version (field_list).\n  Each response is naively constructed using some default values\n  (put \"string\" as std::string, 32 as uint32_t, etc.).\n#}\n{% for message_type in message_types %}{% for field_list in message_type.compute_field_lists() %}\nTEST_F(ResponseTest, ShouldParse{{ message_type.name }}V{{ field_list.version }}) {\n  // given\n  {{ message_type.name }} data = { {{ field_list.example_value() }} };\n  Response<{{ message_type.name }}> message = { {\n    {{ message_type.get_extra('api_key') }}, {{ field_list.version }}, 0 }, data };\n\n  // when\n  auto received = serializeAndDeserialize(message);\n\n  // then\n  ASSERT_NE(received, nullptr);\n  ASSERT_EQ(*received, message);\n}\n{% endfor %}{% endfor %}\n\n} // namespace ResponseTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/request_codec_integration_test.cc",
    "content": "#include \"extensions/filters/network/kafka/request_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace RequestCodecIntegrationTest {\n\nclass RequestCodecIntegrationTest : public testing::Test,\n                                    public MessageBasedTest<RequestEncoder> {};\n\nusing RequestCapturingCallback =\n    CapturingCallback<RequestCallback, AbstractRequestSharedPtr, RequestParseFailureSharedPtr>;\n\n// Other request types are tested in (generated) 'request_codec_request_test.cc'.\nTEST_F(RequestCodecIntegrationTest, ShouldProduceAbortedMessageOnUnknownData) {\n  // given\n  // As real api keys have values below 100, the messages generated in this loop should not be\n  // recognized by the codec.\n  const int16_t base_api_key = 100;\n  std::vector<RequestHeader> sent_headers;\n  for (int16_t i = 0; i < 1000; ++i) {\n    const int16_t api_key = static_cast<int16_t>(base_api_key + i);\n    const RequestHeader header = {api_key, 0, 0, \"client-id\"};\n    const std::vector<unsigned char> data = std::vector<unsigned char>(1024);\n    const auto message = Request<std::vector<unsigned char>>{header, data};\n    putMessageIntoBuffer(message);\n    sent_headers.push_back(header);\n  }\n\n  const InitialParserFactory& initial_parser_factory = InitialParserFactory::getDefaultInstance();\n  const RequestParserResolver& request_parser_resolver =\n      RequestParserResolver::getDefaultInstance();\n  const auto request_callback = std::make_shared<RequestCapturingCallback>();\n\n  RequestDecoder testee{initial_parser_factory, request_parser_resolver, {request_callback}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(request_callback->getCapturedMessages().size(), 0);\n\n  const std::vector<RequestParseFailureSharedPtr>& parse_failures =\n      request_callback->getParseFailures();\n  ASSERT_EQ(parse_failures.size(), sent_headers.size());\n\n  for (size_t i = 0; i < parse_failures.size(); ++i) {\n    const std::shared_ptr<RequestParseFailure> failure_data =\n        std::dynamic_pointer_cast<RequestParseFailure>(parse_failures[i]);\n    ASSERT_NE(failure_data, nullptr);\n    ASSERT_EQ(failure_data->request_header_, sent_headers[i]);\n  }\n}\n\n} // namespace RequestCodecIntegrationTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/request_codec_unit_test.cc",
    "content": "#include \"extensions/filters/network/kafka/request_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Invoke;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace RequestCodecUnitTest {\n\nclass MockParserFactory : public InitialParserFactory {\npublic:\n  MOCK_METHOD(RequestParserSharedPtr, create, (const RequestParserResolver&), (const));\n};\n\nclass MockParser : public RequestParser {\npublic:\n  MOCK_METHOD(RequestParseResponse, parse, (absl::string_view&));\n};\n\nusing MockParserSharedPtr = std::shared_ptr<MockParser>;\n\nclass MockRequestParserResolver : public RequestParserResolver {\npublic:\n  MockRequestParserResolver() : RequestParserResolver({}){};\n  MOCK_METHOD(RequestParserSharedPtr, createParser, (int16_t, int16_t, RequestContextSharedPtr),\n              (const));\n};\n\nclass MockRequestCallback : public RequestCallback {\npublic:\n  MOCK_METHOD(void, onMessage, (AbstractRequestSharedPtr));\n  MOCK_METHOD(void, onFailedParse, (RequestParseFailureSharedPtr));\n};\n\nusing MockRequestCallbackSharedPtr = std::shared_ptr<MockRequestCallback>;\n\nclass RequestCodecUnitTest : public testing::Test, public BufferBasedTest {\nprotected:\n  MockParserFactory initial_parser_factory_{};\n  MockRequestParserResolver parser_resolver_{};\n  MockRequestCallbackSharedPtr callback_{std::make_shared<MockRequestCallback>()};\n};\n\nRequestParseResponse consumeOneByte(absl::string_view& data) {\n  data = {data.data() + 1, data.size() - 1};\n  return RequestParseResponse::stillWaiting();\n}\n\nTEST_F(RequestCodecUnitTest, ShouldDoNothingIfParserReturnsWaiting) {\n  // given\n  putGarbageIntoBuffer();\n\n  MockParserSharedPtr parser = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser, parse(_)).Times(AnyNumber()).WillRepeatedly(Invoke(consumeOneByte));\n\n  EXPECT_CALL(initial_parser_factory_, create(_)).WillOnce(Return(parser));\n\n  EXPECT_CALL(*callback_, onMessage(_)).Times(0);\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  RequestDecoder testee{initial_parser_factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  // There were no interactions with `callback_`.\n}\n\nTEST_F(RequestCodecUnitTest, ShouldUseNewParserAsResponse) {\n  // given\n  putGarbageIntoBuffer();\n\n  MockParserSharedPtr parser1 = std::make_shared<MockParser>();\n  MockParserSharedPtr parser2 = std::make_shared<MockParser>();\n  MockParserSharedPtr parser3 = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser1, parse(_)).WillOnce(Return(RequestParseResponse::nextParser(parser2)));\n  EXPECT_CALL(*parser2, parse(_)).WillOnce(Return(RequestParseResponse::nextParser(parser3)));\n  EXPECT_CALL(*parser3, parse(_)).Times(AnyNumber()).WillRepeatedly(Invoke(consumeOneByte));\n\n  EXPECT_CALL(initial_parser_factory_, create(_)).WillOnce(Return(parser1));\n  EXPECT_CALL(parser_resolver_, createParser(_, _, _)).Times(0);\n\n  EXPECT_CALL(*callback_, onMessage(_)).Times(0);\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  RequestDecoder testee{initial_parser_factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), parser3);\n  // Also, there were no interactions with `callback_`.\n}\n\nTEST_F(RequestCodecUnitTest, ShouldPassParsedMessageToCallback) {\n  // given\n  putGarbageIntoBuffer();\n\n  const AbstractRequestSharedPtr parsed_message =\n      std::make_shared<Request<int32_t>>(RequestHeader{0, 0, 0, \"\"}, 0);\n\n  MockParserSharedPtr all_consuming_parser = std::make_shared<MockParser>();\n  auto consume_and_return = [&parsed_message](absl::string_view& data) -> RequestParseResponse {\n    data = {data.data() + data.size(), 0};\n    return RequestParseResponse::parsedMessage(parsed_message);\n  };\n  EXPECT_CALL(*all_consuming_parser, parse(_)).WillOnce(Invoke(consume_and_return));\n\n  EXPECT_CALL(initial_parser_factory_, create(_)).WillOnce(Return(all_consuming_parser));\n  EXPECT_CALL(parser_resolver_, createParser(_, _, _)).Times(0);\n\n  EXPECT_CALL(*callback_, onMessage(parsed_message));\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  RequestDecoder testee{initial_parser_factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), nullptr);\n  // Also, `callback_` had `onMessage` invoked once with matching argument.\n}\n\nTEST_F(RequestCodecUnitTest, ShouldPassParsedMessageToCallbackAndInitializeNextParser) {\n  // given\n  putGarbageIntoBuffer();\n\n  const AbstractRequestSharedPtr parsed_message =\n      std::make_shared<Request<int32_t>>(RequestHeader{0, 0, 0, absl::nullopt}, 0);\n\n  MockParserSharedPtr parser1 = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser1, parse(_))\n      .WillOnce(Return(RequestParseResponse::parsedMessage(parsed_message)));\n\n  MockParserSharedPtr parser2 = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser2, parse(_)).Times(AnyNumber()).WillRepeatedly(Invoke(consumeOneByte));\n\n  EXPECT_CALL(initial_parser_factory_, create(_))\n      .WillOnce(Return(parser1))\n      .WillOnce(Return(parser2));\n\n  EXPECT_CALL(*callback_, onMessage(parsed_message));\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  RequestDecoder testee{initial_parser_factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), parser2);\n  // Also, `callback_` had `onMessage` invoked once with matching argument.\n}\n\nTEST_F(RequestCodecUnitTest, ShouldPassParseFailureDataToCallback) {\n  // given\n  putGarbageIntoBuffer();\n\n  const RequestParseFailureSharedPtr failure_data =\n      std::make_shared<RequestParseFailure>(RequestHeader{0, 0, 0, absl::nullopt});\n\n  MockParserSharedPtr parser = std::make_shared<MockParser>();\n  auto consume_and_return = [&failure_data](absl::string_view& data) -> RequestParseResponse {\n    data = {data.data() + data.size(), 0};\n    return RequestParseResponse::parseFailure(failure_data);\n  };\n  EXPECT_CALL(*parser, parse(_)).WillOnce(Invoke(consume_and_return));\n\n  EXPECT_CALL(initial_parser_factory_, create(_)).WillOnce(Return(parser));\n  EXPECT_CALL(parser_resolver_, createParser(_, _, _)).Times(0);\n\n  EXPECT_CALL(*callback_, onMessage(_)).Times(0);\n  EXPECT_CALL(*callback_, onFailedParse(failure_data));\n\n  RequestDecoder testee{initial_parser_factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), nullptr);\n  // Also, `callback_` had `onFailedParse` invoked once with matching argument.\n}\n\n} // namespace RequestCodecUnitTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/response_codec_integration_test.cc",
    "content": "#include \"extensions/filters/network/kafka/response_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace ResponseCodecIntegrationTest {\n\nclass ResponseCodecIntegrationTest : public testing::Test,\n                                     public MessageBasedTest<ResponseEncoder> {};\n\nusing ResponseCapturingCallback =\n    CapturingCallback<ResponseCallback, AbstractResponseSharedPtr, ResponseMetadataSharedPtr>;\n\n// Other response types are tested in (generated) 'response_codec_response_test.cc'.\nTEST_F(ResponseCodecIntegrationTest, ShouldProduceAbortedMessageOnUnknownData) {\n  // given\n  const auto callback = std::make_shared<ResponseCapturingCallback>();\n  ResponseDecoder testee{{callback}};\n\n  // As real api keys have values below 100, the messages generated in this loop should not be\n  // recognized by the codec.\n  const int16_t base_api_key = 100;\n  const int32_t base_correlation_id = 0;\n  std::vector<ResponseMetadata> sent;\n\n  for (int16_t i = 0; i < 1000; ++i) {\n    const int16_t api_key = static_cast<int16_t>(base_api_key + i);\n    const int16_t api_version = 0;\n    const int32_t correlation_id = base_correlation_id + i;\n\n    const ResponseMetadata metadata = {api_key, api_version, correlation_id};\n    const std::vector<unsigned char> data = std::vector<unsigned char>(1024);\n    const auto message = Response<std::vector<unsigned char>>{metadata, data};\n    putMessageIntoBuffer(message);\n    sent.push_back(metadata);\n    // We need to register the response, so the parser knows what to expect.\n    testee.expectResponse(correlation_id, api_key, api_version);\n  }\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(callback->getCapturedMessages().size(), 0);\n\n  const std::vector<ResponseMetadataSharedPtr>& parse_failures = callback->getParseFailures();\n  ASSERT_EQ(parse_failures.size(), sent.size());\n  for (size_t i = 0; i < parse_failures.size(); ++i) {\n    ASSERT_EQ(*(parse_failures[i]), sent[i]);\n  }\n}\n\nTEST_F(ResponseCodecIntegrationTest, ShouldThrowIfAttemptingToParseResponseButNothingIsExpected) {\n  // given\n  const auto callback = std::make_shared<ResponseCapturingCallback>();\n  ResponseDecoder testee{{callback}};\n\n  putGarbageIntoBuffer();\n\n  // when\n  bool caught = false;\n  try {\n    testee.onData(buffer_);\n  } catch (EnvoyException& e) {\n    caught = true;\n  }\n\n  // then\n  ASSERT_EQ(caught, true);\n}\n\n} // namespace ResponseCodecIntegrationTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/response_codec_unit_test.cc",
    "content": "#include \"extensions/filters/network/kafka/response_codec.h\"\n\n#include \"test/extensions/filters/network/kafka/buffer_based_test.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Invoke;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace ResponseCodecUnitTest {\n\nclass MockResponseInitialParserFactory : public ResponseInitialParserFactory {\npublic:\n  MOCK_METHOD(ResponseParserSharedPtr, create,\n              (ExpectedResponsesSharedPtr, const ResponseParserResolver&), (const));\n};\n\nclass MockParser : public ResponseParser {\npublic:\n  MOCK_METHOD(ResponseParseResponse, parse, (absl::string_view&));\n};\n\nusing MockParserSharedPtr = std::shared_ptr<MockParser>;\n\nclass MockResponseParserResolver : public ResponseParserResolver {\npublic:\n  MockResponseParserResolver() : ResponseParserResolver({}){};\n  MOCK_METHOD(ResponseParserSharedPtr, createParser, (ResponseContextSharedPtr), (const));\n};\n\nclass MockResponseCallback : public ResponseCallback {\npublic:\n  MOCK_METHOD(void, onMessage, (AbstractResponseSharedPtr));\n  MOCK_METHOD(void, onFailedParse, (ResponseMetadataSharedPtr));\n};\n\nusing MockResponseCallbackSharedPtr = std::shared_ptr<MockResponseCallback>;\n\nclass ResponseCodecUnitTest : public testing::Test, public BufferBasedTest {\nprotected:\n  MockResponseInitialParserFactory factory_{};\n  MockResponseParserResolver parser_resolver_{};\n  MockResponseCallbackSharedPtr callback_{std::make_shared<MockResponseCallback>()};\n};\n\nResponseParseResponse consumeOneByte(absl::string_view& data) {\n  data = {data.data() + 1, data.size() - 1};\n  return ResponseParseResponse::stillWaiting();\n}\n\nTEST_F(ResponseCodecUnitTest, ShouldDoNothingIfParserReturnsWaiting) {\n  // given\n  putGarbageIntoBuffer();\n\n  MockParserSharedPtr parser = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser, parse(_)).Times(AnyNumber()).WillRepeatedly(Invoke(consumeOneByte));\n\n  EXPECT_CALL(factory_, create(_, _)).WillOnce(Return(parser));\n  EXPECT_CALL(parser_resolver_, createParser(_)).Times(0);\n\n  EXPECT_CALL(*callback_, onMessage(_)).Times(0);\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  ResponseDecoder testee{factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  // There were no interactions with `callback_`.\n}\n\nTEST_F(ResponseCodecUnitTest, ShouldUseNewParserAsResponse) {\n  // given\n  putGarbageIntoBuffer();\n\n  MockParserSharedPtr parser1 = std::make_shared<MockParser>();\n  MockParserSharedPtr parser2 = std::make_shared<MockParser>();\n  MockParserSharedPtr parser3 = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser1, parse(_)).WillOnce(Return(ResponseParseResponse::nextParser(parser2)));\n  EXPECT_CALL(*parser2, parse(_)).WillOnce(Return(ResponseParseResponse::nextParser(parser3)));\n  EXPECT_CALL(*parser3, parse(_)).Times(AnyNumber()).WillRepeatedly(Invoke(consumeOneByte));\n\n  EXPECT_CALL(factory_, create(_, _)).WillOnce(Return(parser1));\n  EXPECT_CALL(parser_resolver_, createParser(_)).Times(0);\n\n  EXPECT_CALL(*callback_, onMessage(_)).Times(0);\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  ResponseDecoder testee{factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), parser3);\n  // Also, there were no interactions with `callback_`.\n}\n\nTEST_F(ResponseCodecUnitTest, ShouldPassParsedMessageToCallback) {\n  // given\n  putGarbageIntoBuffer();\n\n  const AbstractResponseSharedPtr parsed_message =\n      std::make_shared<Response<int32_t>>(ResponseMetadata{0, 0, 0}, 0);\n\n  MockParserSharedPtr all_consuming_parser = std::make_shared<MockParser>();\n  auto consume_and_return = [&parsed_message](absl::string_view& data) -> ResponseParseResponse {\n    data = {data.data() + data.size(), 0};\n    return ResponseParseResponse::parsedMessage(parsed_message);\n  };\n  EXPECT_CALL(*all_consuming_parser, parse(_)).WillOnce(Invoke(consume_and_return));\n\n  EXPECT_CALL(factory_, create(_, _)).WillOnce(Return(all_consuming_parser));\n  EXPECT_CALL(parser_resolver_, createParser(_)).Times(0);\n\n  EXPECT_CALL(*callback_, onMessage(parsed_message));\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  ResponseDecoder testee{factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), nullptr);\n  // Also, `callback_` had `onMessage` invoked once with matching argument.\n}\n\nTEST_F(ResponseCodecUnitTest, ShouldPassParsedMessageToCallbackAndInitializeNextParser) {\n  // given\n  putGarbageIntoBuffer();\n\n  const AbstractResponseSharedPtr parsed_message =\n      std::make_shared<Response<int32_t>>(ResponseMetadata{0, 0, 0}, 0);\n\n  MockParserSharedPtr parser1 = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser1, parse(_))\n      .WillOnce(Return(ResponseParseResponse::parsedMessage(parsed_message)));\n\n  MockParserSharedPtr parser2 = std::make_shared<MockParser>();\n  EXPECT_CALL(*parser2, parse(_)).Times(AnyNumber()).WillRepeatedly(Invoke(consumeOneByte));\n\n  EXPECT_CALL(factory_, create(_, _)).WillOnce(Return(parser1)).WillOnce(Return(parser2));\n\n  EXPECT_CALL(*callback_, onMessage(parsed_message));\n  EXPECT_CALL(*callback_, onFailedParse(_)).Times(0);\n\n  ResponseDecoder testee{factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), parser2);\n  // Also, `callback_` had `onMessage` invoked once with matching argument.\n}\n\nTEST_F(ResponseCodecUnitTest, ShouldPassParseFailureDataToCallback) {\n  // given\n  putGarbageIntoBuffer();\n\n  const ResponseMetadataSharedPtr failure_data = std::make_shared<ResponseMetadata>(0, 0, 0);\n\n  MockParserSharedPtr parser = std::make_shared<MockParser>();\n  auto consume_and_return = [&failure_data](absl::string_view& data) -> ResponseParseResponse {\n    data = {data.data() + data.size(), 0};\n    return ResponseParseResponse::parseFailure(failure_data);\n  };\n  EXPECT_CALL(*parser, parse(_)).WillOnce(Invoke(consume_and_return));\n\n  EXPECT_CALL(factory_, create(_, _)).WillOnce(Return(parser));\n  EXPECT_CALL(parser_resolver_, createParser(_)).Times(0);\n\n  EXPECT_CALL(*callback_, onMessage(_)).Times(0);\n  EXPECT_CALL(*callback_, onFailedParse(failure_data));\n\n  ResponseDecoder testee{factory_, parser_resolver_, {callback_}};\n\n  // when\n  testee.onData(buffer_);\n\n  // then\n  ASSERT_EQ(testee.getCurrentParserForTest(), nullptr);\n  // Also, `callback_` had `onFailedParse` invoked once with matching argument.\n}\n\n} // namespace ResponseCodecUnitTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/serialization/launcher.py",
    "content": "#!/usr/bin/python\n\n# Launcher for generating composite serializer tests.\n\nimport source.extensions.filters.network.kafka.serialization.generator as generator\nimport sys\nimport os\n\n\ndef main():\n  \"\"\"\n  Serialization composite test generator\n  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n  Generates test source files for composite deserializers.\n  The files are generated, as they are extremely repetitive (tests for composite deserializer\n  for 0..9 sub-deserializers).\n\n  Usage:\n    launcher.py LOCATION_OF_OUTPUT_FILE\n  where:\n  LOCATION_OF_OUTPUT_FILE : location of 'serialization_composite_test.cc'.\n\n  Creates 'serialization_composite_test.cc' - tests composite deserializers.\n\n  Template used is 'serialization_composite_test_cc.j2'.\n  \"\"\"\n  serialization_composite_test_cc_file = os.path.abspath(sys.argv[1])\n  generator.generate_test_code(serialization_composite_test_cc_file)\n\n\nif __name__ == \"__main__\":\n  main()\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/serialization/serialization_composite_test_cc.j2",
    "content": "{#\n  Creates 'serialization_composite_test.cc'.\n\n  Template for composite serializer tests (the CompositeDeserializerWith_N_Delegates classes).\n  Covers the corner case of 0 delegates, and then uses templating to create tests for 1..N cases.\n#}\n\n#include \"extensions/filters/network/kafka/external/serialization_composite.h\"\n\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Tests in this class are supposed to check whether serialization operations on composite\n * deserializers are correct.\n */\n\n// Tests for composite deserializer with 0 fields (corner case).\n\nstruct CompositeResultWith0Fields {\n  uint32_t encode(Buffer::Instance&, EncodingContext&) const { return 0; }\n  bool operator==(const CompositeResultWith0Fields&) const { return true; }\n};\n\nusing TestCompositeDeserializer0 = CompositeDeserializerWith0Delegates<CompositeResultWith0Fields>;\n\n// Composite with 0 delegates is special case: it's always ready.\nTEST(CompositeDeserializerWith0Delegates, EmptyBufferShouldBeReady) {\n  // given\n  const TestCompositeDeserializer0 testee{};\n  // when, then\n  ASSERT_EQ(testee.ready(), true);\n}\n\nTEST(CompositeDeserializerWith0Delegates, ShouldDeserialize) {\n  const CompositeResultWith0Fields expected{};\n  serializeThenDeserializeAndCheckEquality<TestCompositeDeserializer0>(expected);\n}\n\n// Tests for composite deserializer with N+ fields.\n\n{% for field_count in counts %}\nstruct CompositeResultWith{{ field_count }}Fields {\n  {% for field in range(1, field_count + 1) %}\n  const std::string field{{ field }}_;\n  {% endfor %}\n\n  uint32_t encode(Buffer::Instance& dst, EncodingContext& encoder) const {\n    uint32_t written{0};\n    {% for field in range(1, field_count + 1) %}\n    written += encoder.encode(field{{ field }}_, dst);\n    {% endfor %}\n    return written;\n  }\n\n  bool operator==(const CompositeResultWith{{ field_count }}Fields& rhs) const {\n    return true\n    {% for field in range(1, field_count + 1) %} && field{{ field }}_ == rhs.field{{ field }}_\n    {% endfor %};\n  }\n};\n\nusing TestCompositeDeserializer{{ field_count }} =\n  CompositeDeserializerWith{{ field_count }}Delegates<\n  CompositeResultWith{{ field_count }}Fields\n  {% for field in range(1, field_count + 1) %}, StringDeserializer{% endfor %}>;\n\nTEST(CompositeDeserializerWith{{ field_count }}Delegates, EmptyBufferShouldNotBeReady) {\n  // given\n  const TestCompositeDeserializer{{ field_count }} testee{};\n  // when, then\n  ASSERT_EQ(testee.ready(), false);\n}\n\nTEST(CompositeDeserializerWith{{ field_count }}Delegates, ShouldDeserialize) {\n  const CompositeResultWith{{ field_count }}Fields expected{\n    {% for field in range(1, field_count + 1) %}\"s{{ field }}\", {% endfor %}\n  };\n  serializeThenDeserializeAndCheckEquality<TestCompositeDeserializer{{ field_count }}>(expected);\n}\n{% endfor %}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/serialization_test.cc",
    "content": "#include \"extensions/filters/network/kafka/tagged_fields.h\"\n\n#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\nnamespace SerializationTest {\n\n/**\n * Tests in this file are supposed to check whether serialization operations\n * on Kafka-primitive types (ints, strings, arrays) are behaving correctly.\n */\n\n// Freshly created deserializers should not be ready.\n#define TEST_EmptyDeserializerShouldNotBeReady(DeserializerClass)                                  \\\n  TEST(DeserializerClass, EmptyBufferShouldNotBeReady) {                                           \\\n    const DeserializerClass testee{};                                                              \\\n    ASSERT_EQ(testee.ready(), false);                                                              \\\n  }\n\nTEST_EmptyDeserializerShouldNotBeReady(Int8Deserializer);\nTEST_EmptyDeserializerShouldNotBeReady(Int16Deserializer);\nTEST_EmptyDeserializerShouldNotBeReady(Int32Deserializer);\nTEST_EmptyDeserializerShouldNotBeReady(UInt32Deserializer);\nTEST_EmptyDeserializerShouldNotBeReady(Int64Deserializer);\nTEST_EmptyDeserializerShouldNotBeReady(BooleanDeserializer);\nTEST_EmptyDeserializerShouldNotBeReady(VarUInt32Deserializer);\n\nTEST_EmptyDeserializerShouldNotBeReady(StringDeserializer);\nTEST_EmptyDeserializerShouldNotBeReady(CompactStringDeserializer);\nTEST_EmptyDeserializerShouldNotBeReady(NullableStringDeserializer);\nTEST_EmptyDeserializerShouldNotBeReady(NullableCompactStringDeserializer);\nTEST_EmptyDeserializerShouldNotBeReady(BytesDeserializer);\nTEST_EmptyDeserializerShouldNotBeReady(CompactBytesDeserializer);\nTEST_EmptyDeserializerShouldNotBeReady(NullableBytesDeserializer);\n\nTEST(ArrayDeserializer, EmptyBufferShouldNotBeReady) {\n  // given\n  const ArrayDeserializer<Int8Deserializer> testee{};\n  // when, then\n  ASSERT_EQ(testee.ready(), false);\n}\n\nTEST(CompactArrayDeserializer, EmptyBufferShouldNotBeReady) {\n  // given\n  const CompactArrayDeserializer<Int32Deserializer> testee{};\n  // when, then\n  ASSERT_EQ(testee.ready(), false);\n}\n\nTEST(NullableArrayDeserializer, EmptyBufferShouldNotBeReady) {\n  // given\n  const NullableArrayDeserializer<Int8Deserializer> testee{};\n  // when, then\n  ASSERT_EQ(testee.ready(), false);\n}\n\nTEST(NullableCompactArrayDeserializer, EmptyBufferShouldNotBeReady) {\n  // given\n  const NullableCompactArrayDeserializer<Int32Deserializer> testee{};\n  // when, then\n  ASSERT_EQ(testee.ready(), false);\n}\n\n// Extracted test for numeric buffers.\n#define TEST_DeserializerShouldDeserialize(BufferClass, DataClass, Value)                          \\\n  TEST(DataClass, ShouldConsumeCorrectAmountOfData) {                                              \\\n    /* given */                                                                                    \\\n    const DataClass value = Value;                                                                 \\\n    serializeThenDeserializeAndCheckEquality<BufferClass>(value);                                  \\\n  }\n\nTEST_DeserializerShouldDeserialize(Int8Deserializer, int8_t, 42);\nTEST_DeserializerShouldDeserialize(Int16Deserializer, int16_t, 42);\nTEST_DeserializerShouldDeserialize(Int32Deserializer, int32_t, 42);\nTEST_DeserializerShouldDeserialize(UInt32Deserializer, uint32_t, 42);\nTEST_DeserializerShouldDeserialize(Int64Deserializer, int64_t, 42);\nTEST_DeserializerShouldDeserialize(BooleanDeserializer, bool, true);\n\nEncodingContext encoder{-1}; // Provided api_version does not matter for primitive types.\n\n// Variable-length uint32_t tests.\n\nTEST(VarUInt32Deserializer, ShouldDeserialize) {\n  const uint32_t value = 0;\n  serializeCompactThenDeserializeAndCheckEquality<VarUInt32Deserializer>(value);\n}\n\nTEST(VarUInt32Deserializer, ShouldDeserializeMaxUint32) {\n  const uint32_t value = std::numeric_limits<uint32_t>::max();\n  serializeCompactThenDeserializeAndCheckEquality<VarUInt32Deserializer>(value);\n}\n\nTEST(VarUInt32Deserializer, ShouldDeserializeEdgeValues) {\n  // Each of these values should fit in 1, 2, 3, 4 bytes.\n  std::vector<uint32_t> values = {0x7f, 0x3fff, 0x1fffff, 0xfffffff};\n  for (auto i = 0; i < static_cast<int>(values.size()); ++i) {\n    // given\n    Buffer::OwnedImpl buffer;\n\n    // when\n    const uint32_t written = encoder.encodeCompact(values[i], buffer);\n\n    // then\n    ASSERT_EQ(written, i + 1);\n    absl::string_view data = {getRawData(buffer), 1024};\n    // All bits in lower bytes need to be set.\n    for (auto j = 0; j + 1 < i; ++j) {\n      ASSERT_EQ(static_cast<uint8_t>(data[j]), 0xFF);\n    }\n    // Highest bit in last byte needs to be clear (end marker).\n    ASSERT_EQ(static_cast<uint8_t>(data[i]), 0x7F);\n  }\n}\n\nTEST(VarUInt32Deserializer, ShouldSerializeMaxUint32Properly) {\n  // given\n  Buffer::OwnedImpl buffer;\n\n  // when\n  const uint32_t value = std::numeric_limits<uint32_t>::max();\n  const uint32_t result = encoder.encodeCompact(value, buffer);\n\n  // then\n  ASSERT_EQ(result, 5);\n  absl::string_view data = {getRawData(buffer), 1024};\n  ASSERT_EQ(static_cast<uint8_t>(data[0]), 0xFF); // Bits 1-7 (starting at 1).\n  ASSERT_EQ(static_cast<uint8_t>(data[1]), 0xFF); // Bits 8-14.\n  ASSERT_EQ(static_cast<uint8_t>(data[2]), 0xFF); // Bits 15-21.\n  ASSERT_EQ(static_cast<uint8_t>(data[3]), 0xFF); // Bits 22-28.\n  ASSERT_EQ(static_cast<uint8_t>(data[4]), 0x0F); // Bits 29-32.\n}\n\nTEST(VarUInt32Deserializer, ShouldThrowIfNoEndWith5Bytes) {\n  // given\n  VarUInt32Deserializer testee;\n  Buffer::OwnedImpl buffer;\n\n  // The buffer makes no sense, it's 5 times 0xFF, while varint encoding ensures that in the worst\n  // case 5th byte has the highest bit clear.\n  for (int i = 0; i < 5; ++i) {\n    const uint8_t all_bits_set = 0xFF;\n    buffer.add(&all_bits_set, sizeof(all_bits_set));\n  }\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// String tests.\n\nTEST(StringDeserializer, ShouldDeserialize) {\n  const std::string value = \"sometext\";\n  serializeThenDeserializeAndCheckEquality<StringDeserializer>(value);\n}\n\nTEST(StringDeserializer, ShouldDeserializeEmptyString) {\n  const std::string value = \"\";\n  serializeThenDeserializeAndCheckEquality<StringDeserializer>(value);\n}\n\nTEST(StringDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  StringDeserializer testee;\n  Buffer::OwnedImpl buffer;\n\n  int16_t len = -1; // STRING accepts length >= 0.\n  encoder.encode(len, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Compact string tests.\n\nTEST(CompactStringDeserializer, ShouldDeserialize) {\n  const std::string value = \"sometext\";\n  serializeCompactThenDeserializeAndCheckEquality<CompactStringDeserializer>(value);\n}\n\nTEST(CompactStringDeserializer, ShouldDeserializeEmptyString) {\n  const std::string value = \"\";\n  serializeCompactThenDeserializeAndCheckEquality<CompactStringDeserializer>(value);\n}\n\nTEST(CompactStringDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  CompactStringDeserializer testee;\n  Buffer::OwnedImpl buffer;\n\n  const uint32_t len = 0; // COMPACT_STRING requires length >= 1.\n  encoder.encodeCompact(len, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Nullable string tests.\n\nTEST(NullableStringDeserializer, ShouldDeserializeString) {\n  // given\n  const NullableString value{\"sometext\"};\n  serializeThenDeserializeAndCheckEquality<NullableStringDeserializer>(value);\n}\n\nTEST(NullableStringDeserializer, ShouldDeserializeEmptyString) {\n  // given\n  const NullableString value{\"\"};\n  serializeThenDeserializeAndCheckEquality<NullableStringDeserializer>(value);\n}\n\nTEST(NullableStringDeserializer, ShouldDeserializeAbsentString) {\n  // given\n  const NullableString value = absl::nullopt;\n  serializeThenDeserializeAndCheckEquality<NullableStringDeserializer>(value);\n}\n\nTEST(NullableStringDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  NullableStringDeserializer testee;\n  Buffer::OwnedImpl buffer;\n\n  int16_t len = -2; // -1 is OK for NULLABLE_STRING.\n  encoder.encode(len, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Nullable compact string tests.\n\nTEST(NullableCompactStringDeserializer, ShouldDeserializeString) {\n  // given\n  const NullableString value{\"sometext\"};\n  serializeCompactThenDeserializeAndCheckEquality<NullableCompactStringDeserializer>(value);\n}\n\nTEST(NullableCompactStringDeserializer, ShouldDeserializeEmptyString) {\n  // given\n  const NullableString value{\"\"};\n  serializeCompactThenDeserializeAndCheckEquality<NullableCompactStringDeserializer>(value);\n}\n\nTEST(NullableCompactStringDeserializer, ShouldDeserializeAbsentString) {\n  // given\n  const NullableString value = absl::nullopt;\n  serializeCompactThenDeserializeAndCheckEquality<NullableCompactStringDeserializer>(value);\n}\n\n// Byte array tests.\n\nTEST(BytesDeserializer, ShouldDeserialize) {\n  const Bytes value{'a', 'b', 'c', 'd'};\n  serializeThenDeserializeAndCheckEquality<BytesDeserializer>(value);\n}\n\nTEST(BytesDeserializer, ShouldDeserializeEmptyBytes) {\n  const Bytes value{};\n  serializeThenDeserializeAndCheckEquality<BytesDeserializer>(value);\n}\n\nTEST(BytesDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  BytesDeserializer testee;\n  Buffer::OwnedImpl buffer;\n\n  const int32_t bytes_length = -1; // BYTES accepts length >= 0.\n  encoder.encode(bytes_length, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Compact byte array tests.\n\nTEST(CompactBytesDeserializer, ShouldDeserialize) {\n  const Bytes value{'a', 'b', 'c', 'd'};\n  serializeCompactThenDeserializeAndCheckEquality<CompactBytesDeserializer>(value);\n}\n\nTEST(CompactBytesDeserializer, ShouldDeserializeEmptyBytes) {\n  const Bytes value{};\n  serializeCompactThenDeserializeAndCheckEquality<CompactBytesDeserializer>(value);\n}\n\nTEST(CompactBytesDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  CompactBytesDeserializer testee;\n  Buffer::OwnedImpl buffer;\n\n  const uint32_t bytes_length = 0; // COMPACT_BYTES requires length >= 1.\n  encoder.encodeCompact(bytes_length, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Nullable byte array tests.\n\nTEST(NullableBytesDeserializer, ShouldDeserialize) {\n  const NullableBytes value{{'a', 'b', 'c', 'd'}};\n  serializeThenDeserializeAndCheckEquality<NullableBytesDeserializer>(value);\n}\n\nTEST(NullableBytesDeserializer, ShouldDeserializeEmptyBytes) {\n  // gcc refuses to initialize optional with empty vector with value{{}}\n  const NullableBytes value = {{}};\n  serializeThenDeserializeAndCheckEquality<NullableBytesDeserializer>(value);\n}\n\nTEST(NullableBytesDeserializer, ShouldDeserializeNullBytes) {\n  const NullableBytes value = absl::nullopt;\n  serializeThenDeserializeAndCheckEquality<NullableBytesDeserializer>(value);\n}\n\nTEST(NullableBytesDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  NullableBytesDeserializer testee;\n  Buffer::OwnedImpl buffer;\n\n  const int32_t bytes_length = -2; // -1 is OK for NULLABLE_BYTES.\n  encoder.encode(bytes_length, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Generic array tests.\n\nTEST(ArrayDeserializer, ShouldConsumeCorrectAmountOfData) {\n  const std::vector<std::string> value{{\"aaa\", \"bbbbb\", \"cc\", \"d\", \"e\", \"ffffffff\"}};\n  serializeThenDeserializeAndCheckEquality<ArrayDeserializer<StringDeserializer>>(value);\n}\n\nTEST(ArrayDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  ArrayDeserializer<StringDeserializer> testee;\n  Buffer::OwnedImpl buffer;\n\n  const int32_t len = -1; // ARRAY accepts length >= 0.\n  encoder.encode(len, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Compact generic array tests.\n\nTEST(CompactArrayDeserializer, ShouldConsumeCorrectAmountOfData) {\n  const std::vector<int32_t> value{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}};\n  serializeCompactThenDeserializeAndCheckEquality<CompactArrayDeserializer<Int32Deserializer>>(\n      value);\n}\n\nTEST(CompactArrayDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  CompactArrayDeserializer<Int8Deserializer> testee;\n  Buffer::OwnedImpl buffer;\n\n  const uint32_t len = 0; // COMPACT_ARRAY accepts length >= 1.\n  encoder.encodeCompact(len, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Generic nullable array tests.\n\nTEST(NullableArrayDeserializer, ShouldConsumeCorrectAmountOfData) {\n  const NullableArray<std::string> value{{\"aaa\", \"bbbbb\", \"cc\", \"d\", \"e\", \"ffffffff\"}};\n  serializeThenDeserializeAndCheckEquality<NullableArrayDeserializer<StringDeserializer>>(value);\n}\n\nTEST(NullableArrayDeserializer, ShouldConsumeNullArray) {\n  const NullableArray<std::string> value = absl::nullopt;\n  serializeThenDeserializeAndCheckEquality<NullableArrayDeserializer<StringDeserializer>>(value);\n}\n\nTEST(NullableArrayDeserializer, ShouldThrowOnInvalidLength) {\n  // given\n  NullableArrayDeserializer<StringDeserializer> testee;\n  Buffer::OwnedImpl buffer;\n\n  const int32_t len = -2; // -1 is OK for NULLABLE_ARRAY.\n  encoder.encode(len, buffer);\n\n  absl::string_view data = {getRawData(buffer), 1024};\n\n  // when\n  // then\n  EXPECT_THROW(testee.feed(data), EnvoyException);\n}\n\n// Compact nullable generic array tests.\n\nTEST(NullableCompactArrayDeserializer, ShouldConsumeCorrectAmountOfData) {\n  const NullableArray<int32_t> value{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}};\n  serializeCompactThenDeserializeAndCheckEquality<\n      NullableCompactArrayDeserializer<Int32Deserializer>>(value);\n}\n\nTEST(NullableCompactArrayDeserializer, ShouldConsumeNullArray) {\n  const NullableArray<int32_t> value = absl::nullopt;\n  serializeCompactThenDeserializeAndCheckEquality<\n      NullableCompactArrayDeserializer<Int32Deserializer>>(value);\n}\n\n// Tagged fields.\n\nTEST(TaggedFieldDeserializer, ShouldConsumeCorrectAmountOfData) {\n  const TaggedField value{200, Bytes{1, 2, 3, 4, 5, 6}};\n  serializeCompactThenDeserializeAndCheckEquality<TaggedFieldDeserializer>(value);\n}\n\nTEST(TaggedFieldsDeserializer, ShouldConsumeCorrectAmountOfData) {\n  std::vector<TaggedField> fields;\n  for (uint32_t i = 0; i < 200; ++i) {\n    const TaggedField tagged_field = {i, Bytes{1, 2, 3, 4}};\n    fields.push_back(tagged_field);\n  }\n  const TaggedFields value{fields};\n  serializeCompactThenDeserializeAndCheckEquality<TaggedFieldsDeserializer>(value);\n}\n\n} // namespace SerializationTest\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/serialization_utilities.cc",
    "content": "#include \"test/extensions/filters/network/kafka/serialization_utilities.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\nvoid assertStringViewIncrement(const absl::string_view incremented,\n                               const absl::string_view original, const size_t difference) {\n\n  ASSERT_EQ(incremented.data(), original.data() + difference);\n  ASSERT_EQ(incremented.size(), original.size() - difference);\n}\n\nconst char* getRawData(const Buffer::OwnedImpl& buffer) {\n  Buffer::RawSliceVector slices = buffer.getRawSlices(1);\n  ASSERT(slices.size() == 1);\n  return reinterpret_cast<const char*>((slices[0]).mem_);\n}\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/kafka/serialization_utilities.h",
    "content": "#pragma once\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/kafka/serialization.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/string_view.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Kafka {\n\n/**\n * Verifies that 'incremented' string view is actually 'original' string view, that has incremented\n * by 'difference' bytes.\n */\nvoid assertStringViewIncrement(absl::string_view incremented, absl::string_view original,\n                               size_t difference);\n\n// Helper function converting buffer to raw bytes.\nconst char* getRawData(const Buffer::OwnedImpl& buffer);\n\n// Exactly what is says on the tin:\n// 1. serialize expected using Encoder,\n// 2. deserialize byte array using testee deserializer,\n// 3. verify that testee is ready, and its result is equal to expected,\n// 4. verify that data pointer moved correct amount,\n// 5. feed testee more data,\n// 6. verify that nothing more was consumed (because the testee has been ready since step 3).\ntemplate <typename BT, typename AT>\nvoid serializeThenDeserializeAndCheckEqualityInOneGo(AT expected) {\n  // given\n  BT testee{};\n\n  Buffer::OwnedImpl buffer;\n  EncodingContext encoder{-1};\n  const uint32_t written = encoder.encode(expected, buffer);\n  // Insert garbage after serialized payload.\n  const uint32_t garbage_size = encoder.encode(Bytes(10000), buffer);\n\n  // Tell parser that there is more data, it should never consume more than written.\n  const absl::string_view orig_data = {getRawData(buffer), written + garbage_size};\n  absl::string_view data = orig_data;\n\n  // when\n  const uint32_t consumed = testee.feed(data);\n\n  // then\n  ASSERT_EQ(consumed, written);\n  ASSERT_EQ(testee.ready(), true);\n  ASSERT_EQ(testee.get(), expected);\n  assertStringViewIncrement(data, orig_data, consumed);\n\n  // when - 2\n  const uint32_t consumed2 = testee.feed(data);\n\n  // then - 2 (nothing changes)\n  ASSERT_EQ(consumed2, 0);\n  assertStringViewIncrement(data, orig_data, consumed);\n}\n\n// Does the same thing as the above test, but instead of providing whole data at one, it provides\n// it in N one-byte chunks.\n// This verifies if deserializer keeps state properly (no overwrites etc.).\ntemplate <typename BT, typename AT>\nvoid serializeThenDeserializeAndCheckEqualityWithChunks(AT expected) {\n  // given\n  BT testee{};\n\n  Buffer::OwnedImpl buffer;\n  EncodingContext encoder{-1};\n  const uint32_t written = encoder.encode(expected, buffer);\n  // Insert garbage after serialized payload.\n  const uint32_t garbage_size = encoder.encode(Bytes(10000), buffer);\n\n  const absl::string_view orig_data = {getRawData(buffer), written + garbage_size};\n\n  // when\n  absl::string_view data = orig_data;\n  uint32_t consumed = 0;\n  for (uint32_t i = 0; i < written; ++i) {\n    data = {data.data(), 1}; // Consume data byte-by-byte.\n    uint32_t step = testee.feed(data);\n    consumed += step;\n    ASSERT_EQ(step, 1);\n    ASSERT_EQ(data.size(), 0);\n  }\n\n  // then\n  ASSERT_EQ(consumed, written);\n  ASSERT_EQ(testee.ready(), true);\n  ASSERT_EQ(testee.get(), expected);\n\n  ASSERT_EQ(data.data(), orig_data.data() + consumed);\n\n  // when - 2\n  absl::string_view more_data = {data.data(), garbage_size};\n  const uint32_t consumed2 = testee.feed(more_data);\n\n  // then - 2 (nothing changes)\n  ASSERT_EQ(consumed2, 0);\n  ASSERT_EQ(more_data.data(), data.data());\n  ASSERT_EQ(more_data.size(), garbage_size);\n}\n\n// Same thing as 'serializeThenDeserializeAndCheckEqualityInOneGo', just uses compact encoding.\ntemplate <typename BT, typename AT>\nvoid serializeCompactThenDeserializeAndCheckEqualityInOneGo(AT expected) {\n  // given\n  BT testee{};\n\n  Buffer::OwnedImpl buffer;\n  EncodingContext encoder{-1};\n  const uint32_t written = encoder.encodeCompact(expected, buffer);\n  // Insert garbage after serialized payload.\n  const uint32_t garbage_size = encoder.encode(Bytes(10000), buffer);\n\n  // Tell parser that there is more data, it should never consume more than written.\n  const absl::string_view orig_data = {getRawData(buffer), written + garbage_size};\n  absl::string_view data = orig_data;\n\n  // when\n  const uint32_t consumed = testee.feed(data);\n\n  // then\n  ASSERT_EQ(consumed, written);\n  ASSERT_EQ(testee.ready(), true);\n  ASSERT_EQ(testee.get(), expected);\n  assertStringViewIncrement(data, orig_data, consumed);\n\n  // when - 2\n  const uint32_t consumed2 = testee.feed(data);\n\n  // then - 2 (nothing changes)\n  ASSERT_EQ(consumed2, 0);\n  assertStringViewIncrement(data, orig_data, consumed);\n}\n\n// Same thing as 'serializeThenDeserializeAndCheckEqualityWithChunks', just uses compact encoding.\ntemplate <typename BT, typename AT>\nvoid serializeCompactThenDeserializeAndCheckEqualityWithChunks(AT expected) {\n  // given\n  BT testee{};\n\n  Buffer::OwnedImpl buffer;\n  EncodingContext encoder{-1};\n  const uint32_t written = encoder.encodeCompact(expected, buffer);\n  // Insert garbage after serialized payload.\n  const uint32_t garbage_size = encoder.encode(Bytes(10000), buffer);\n\n  const absl::string_view orig_data = {getRawData(buffer), written + garbage_size};\n\n  // when\n  absl::string_view data = orig_data;\n  uint32_t consumed = 0;\n  for (uint32_t i = 0; i < written; ++i) {\n    data = {data.data(), 1}; // Consume data byte-by-byte.\n    uint32_t step = testee.feed(data);\n    consumed += step;\n    ASSERT_EQ(step, 1);\n    ASSERT_EQ(data.size(), 0);\n  }\n\n  // then\n  ASSERT_EQ(consumed, written);\n  ASSERT_EQ(testee.ready(), true);\n  ASSERT_EQ(testee.get(), expected);\n\n  ASSERT_EQ(data.data(), orig_data.data() + consumed);\n\n  // when - 2\n  absl::string_view more_data = {data.data(), garbage_size};\n  const uint32_t consumed2 = testee.feed(more_data);\n\n  // then - 2 (nothing changes)\n  ASSERT_EQ(consumed2, 0);\n  ASSERT_EQ(more_data.data(), data.data());\n  ASSERT_EQ(more_data.size(), garbage_size);\n}\n\n// Wrapper to run both tests for normal serialization.\ntemplate <typename BT, typename AT> void serializeThenDeserializeAndCheckEquality(AT expected) {\n  serializeThenDeserializeAndCheckEqualityInOneGo<BT>(expected);\n  serializeThenDeserializeAndCheckEqualityWithChunks<BT>(expected);\n}\n\n// Wrapper to run both tests for compact serialization.\ntemplate <typename BT, typename AT>\nvoid serializeCompactThenDeserializeAndCheckEquality(AT expected) {\n  serializeCompactThenDeserializeAndCheckEqualityInOneGo<BT>(expected);\n  serializeCompactThenDeserializeAndCheckEqualityWithChunks<BT>(expected);\n}\n\n/**\n * Message callback that captures the messages.\n */\ntemplate <typename Base, typename Message, typename Failure> class CapturingCallback : public Base {\npublic:\n  /**\n   * Stores the message.\n   */\n  void onMessage(Message message) override { captured_messages_.push_back(message); }\n\n  /**\n   * Returns the stored messages.\n   */\n  const std::vector<Message>& getCapturedMessages() const { return captured_messages_; }\n\n  void onFailedParse(Failure failure_data) override { parse_failures_.push_back(failure_data); }\n\n  const std::vector<Failure>& getParseFailures() const { return parse_failures_; }\n\nprivate:\n  std::vector<Message> captured_messages_;\n  std::vector<Failure> parse_failures_;\n};\n\ntemplate <typename Base, typename Message, typename Failure>\nusing CapturingCallbackSharedPtr = std::shared_ptr<CapturingCallback<Base, Message, Failure>>;\n\n} // namespace Kafka\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/local_ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"local_ratelimit_test\",\n    srcs = [\"local_ratelimit_test.cc\"],\n    extension_name = \"envoy.filters.network.local_ratelimit\",\n    deps = [\n        \"//source/extensions/filters/network/local_ratelimit:local_ratelimit_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"local_ratelimit_integration_test\",\n    srcs = [\"local_ratelimit_integration_test.cc\"],\n    extension_name = \"envoy.filters.network.local_ratelimit\",\n    deps = [\n        \"//source/extensions/filters/network/local_ratelimit:config\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/integration:integration_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"local_ratelimit_fuzz_proto\",\n    srcs = [\"local_ratelimit_fuzz.proto\"],\n    deps = [\n        \"@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"local_ratelimit_fuzz_test\",\n    srcs = [\"local_ratelimit_fuzz_test.cc\"],\n    corpus = \"local_ratelimit_corpus\",\n    deps = [\n        \":local_ratelimit_fuzz_proto_cc_proto\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/local_ratelimit:local_ratelimit_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case",
    "content": "config{\n    stat_prefix: \"local_rate_limit_stats\"\n    token_bucket:{\n        max_tokens: 1\n        fill_interval{ \n          seconds: 1\n          }\n    }\n    runtime_enabled:{\n      default_value: {\n        value: true\n      }\n      runtime_key: \"foo_key\"\n    }\n    \n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}\nactions {\n  on_new_connection {\n  }\n}\nactions {\n  on_data {\n    data: \"\\000\\000\"\n  }\n}"
  },
  {
    "path": "test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto",
    "content": "syntax = \"proto3\";\npackage envoy.extensions.filters.network.local_ratelimit;\n\nimport \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"validate/validate.proto\";\n\nmessage OnData {\n  bytes data = 1;\n  bool end_stream = 2;\n}\n\nmessage Action {\n  oneof action_selector {\n    option (validate.required) = true;\n    // Call onNewConnection().\n    google.protobuf.Empty on_new_connection = 1;\n    // Call onData().\n    OnData on_data = 2;\n    // Timer ends and refill the bucket.\n    google.protobuf.Empty refill = 3;\n  }\n}\nmessage LocalRateLimitTestCase {\n  envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit config = 1\n      [(validate.rules).message = {required: true}];\n  repeated Action actions = 2;\n}\n"
  },
  {
    "path": "test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/network/local_ratelimit/local_ratelimit.h\"\n\n#include \"test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.pb.validate.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace LocalRateLimitFilter {\nstruct ActiveFilter {\n  ActiveFilter(const ConfigSharedPtr& config) : filter_(config) {\n    filter_.initializeReadFilterCallbacks(read_filter_callbacks_);\n  }\n\n  NiceMock<Network::MockReadFilterCallbacks> read_filter_callbacks_;\n  Filter filter_;\n};\n\nDEFINE_PROTO_FUZZER(\n    const envoy::extensions::filters::network::local_ratelimit::LocalRateLimitTestCase& input) {\n\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  } catch (const ProtobufMessage::DeprecatedProtoFieldException& e) {\n    ENVOY_LOG_MISC(debug, \"DeprecatedProtoFieldException: {}\", e.what());\n    return;\n  }\n  if (input.config().token_bucket().fill_interval().nanos() < 0) {\n    // TODO:\n    // protoc-gen-validate has an issue on type \"Duration\" which may generate interval with seconds\n    // > 0 while \"nanos\" < 0. And negative \"nanos\" will cause validation inside the filter to fail.\n    // see https://github.com/envoyproxy/protoc-gen-validate/issues/348 for detail.\n    ENVOY_LOG_MISC(debug, \"In fill_interval, nanos should not be negative!\");\n    return;\n  }\n  static NiceMock<Event::MockDispatcher> dispatcher;\n  Stats::IsolatedStoreImpl stats_store;\n  static NiceMock<Runtime::MockLoader> runtime;\n  Event::MockTimer* fill_timer = new Event::MockTimer(&dispatcher);\n  envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit proto_config =\n      input.config();\n  ConfigSharedPtr config = nullptr;\n  try {\n    config = std::make_shared<Config>(proto_config, dispatcher, stats_store, runtime);\n  } catch (EnvoyException& e) {\n    ENVOY_LOG_MISC(debug, \"EnvoyException in config's constructor: {}\", e.what());\n    return;\n  }\n\n  ActiveFilter active_filter(config);\n  std::chrono::milliseconds fill_interval(\n      PROTOBUF_GET_MS_REQUIRED(proto_config.token_bucket(), fill_interval));\n\n  for (const auto& action : input.actions()) {\n    ENVOY_LOG_MISC(trace, \"action {}\", action.DebugString());\n\n    switch (action.action_selector_case()) {\n    case envoy::extensions::filters::network::local_ratelimit::Action::kOnData: {\n      Buffer::OwnedImpl buffer(action.on_data().data());\n      active_filter.filter_.onData(buffer, action.on_data().end_stream());\n      break;\n    }\n    case envoy::extensions::filters::network::local_ratelimit::Action::kOnNewConnection: {\n      active_filter.filter_.onNewConnection();\n      break;\n    }\n    case envoy::extensions::filters::network::local_ratelimit::Action::kRefill: {\n      EXPECT_CALL(*fill_timer, enableTimer(fill_interval, nullptr));\n      fill_timer->invokeCallback();\n      break;\n    }\n    default:\n      // Unhandled actions\n      PANIC(\"A case is missing for an action\");\n    }\n  }\n} // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks)\n  // Silence clang-tidy here because it thinks there is a memory leak for \"fill_timer\"\n  // However, ownership of each MockTimer instance is transferred to the (caller of) dispatcher's\n  // createTimer_(), so to avoid destructing it twice, the MockTimer must have been dynamically\n  // allocated and must not be deleted by it's creator. See test/mocks/event/mocks.cc for detail.\n} // namespace LocalRateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc",
    "content": "#include \"test/integration/integration.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass LocalRateLimitIntegrationTest : public Event::TestUsingSimulatedTime,\n                                      public testing::TestWithParam<Network::Address::IpVersion>,\n                                      public BaseIntegrationTest {\npublic:\n  LocalRateLimitIntegrationTest()\n      : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {}\n\n  void setup(const std::string& filter_yaml) {\n    config_helper_.addNetworkFilter(filter_yaml);\n    BaseIntegrationTest::initialize();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, LocalRateLimitIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Make sure the filter works in the basic case.\nTEST_P(LocalRateLimitIntegrationTest, NoRateLimiting) {\n  setup(R\"EOF(\nname: ratelimit\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.network.local_rate_limit.v2alpha.LocalRateLimit\n  stat_prefix: local_rate_limit_stats\n  token_bucket:\n    max_tokens: 1\n    fill_interval: 0.2s\n)EOF\");\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5));\n  ASSERT_TRUE(fake_upstream_connection->write(\"world\"));\n  tcp_client->waitForData(\"world\");\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  EXPECT_EQ(0,\n            test_server_->counter(\"local_rate_limit.local_rate_limit_stats.rate_limited\")->value());\n}\n\n// TODO(mattklein123): Create an integration test that tests rate limiting. Right now this is\n// not easily possible using simulated time due to the fact that simulated time runs alarms on\n// their correct threads when woken up, but does not have any barrier for when the alarms have\n// actually fired. This makes a deterministic test impossible without resorting to hacks like\n// storing the number of tokens in a stat, etc.\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/local_ratelimit/local_ratelimit_test.cc",
    "content": "#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.validate.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/network/local_ratelimit/local_ratelimit.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace LocalRateLimitFilter {\n\nclass LocalRateLimitTestBase : public testing::Test {\npublic:\n  void initialize(const std::string& filter_yaml, bool expect_timer_create = true) {\n    envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit proto_config;\n    TestUtility::loadFromYamlAndValidate(filter_yaml, proto_config);\n    fill_timer_ = new Event::MockTimer(&dispatcher_);\n    if (expect_timer_create) {\n      EXPECT_CALL(*fill_timer_, enableTimer(_, nullptr));\n      EXPECT_CALL(*fill_timer_, disableTimer());\n    }\n    config_ = std::make_shared<Config>(proto_config, dispatcher_, stats_store_, runtime_);\n  }\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Stats::IsolatedStoreImpl stats_store_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Event::MockTimer* fill_timer_{};\n  ConfigSharedPtr config_;\n};\n\nclass LocalRateLimitFilterTest : public LocalRateLimitTestBase {\npublic:\n  struct ActiveFilter {\n    ActiveFilter(const ConfigSharedPtr& config) : filter_(config) {\n      filter_.initializeReadFilterCallbacks(read_filter_callbacks_);\n    }\n\n    NiceMock<Network::MockReadFilterCallbacks> read_filter_callbacks_;\n    Filter filter_;\n  };\n};\n\n// Basic no rate limit case.\nTEST_F(LocalRateLimitFilterTest, NoRateLimit) {\n  initialize(R\"EOF(\nstat_prefix: local_rate_limit_stats\ntoken_bucket:\n  max_tokens: 1\n  fill_interval: 0.2s\n)EOF\");\n\n  InSequence s;\n  ActiveFilter active_filter(config_);\n  EXPECT_EQ(Network::FilterStatus::Continue, active_filter.filter_.onNewConnection());\n  EXPECT_EQ(0, TestUtility::findCounter(stats_store_,\n                                        \"local_rate_limit.local_rate_limit_stats.rate_limited\")\n                   ->value());\n}\n\n// Basic rate limit case.\nTEST_F(LocalRateLimitFilterTest, RateLimit) {\n  initialize(R\"EOF(\nstat_prefix: local_rate_limit_stats\ntoken_bucket:\n  max_tokens: 1\n  fill_interval: 0.2s\n)EOF\");\n\n  // First connection is OK.\n  InSequence s;\n  ActiveFilter active_filter1(config_);\n  EXPECT_EQ(Network::FilterStatus::Continue, active_filter1.filter_.onNewConnection());\n\n  // Second connection should be rate limited.\n  ActiveFilter active_filter2(config_);\n  EXPECT_CALL(active_filter2.read_filter_callbacks_.connection_, close(_));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, active_filter2.filter_.onNewConnection());\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_,\n                                        \"local_rate_limit.local_rate_limit_stats.rate_limited\")\n                   ->value());\n\n  // Refill the bucket.\n  EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr));\n  fill_timer_->invokeCallback();\n\n  // Third connection is OK.\n  ActiveFilter active_filter3(config_);\n  EXPECT_EQ(Network::FilterStatus::Continue, active_filter3.filter_.onNewConnection());\n}\n\n// Verify the runtime disable functionality.\nTEST_F(LocalRateLimitFilterTest, RuntimeDisabled) {\n  initialize(R\"EOF(\nstat_prefix: local_rate_limit_stats\ntoken_bucket:\n  max_tokens: 1\n  fill_interval: 0.2s\nruntime_enabled:\n  default_value: true\n  runtime_key: foo_key\n)EOF\");\n\n  // First connection is OK.\n  InSequence s;\n  ActiveFilter active_filter1(config_);\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo_key\", true)).WillOnce(Return(true));\n  EXPECT_EQ(Network::FilterStatus::Continue, active_filter1.filter_.onNewConnection());\n\n  // Second connection should be rate limited but won't be due to filter disable.\n  ActiveFilter active_filter2(config_);\n  EXPECT_CALL(runtime_.snapshot_, getBoolean(\"foo_key\", true)).WillOnce(Return(false));\n  EXPECT_EQ(Network::FilterStatus::Continue, active_filter2.filter_.onNewConnection());\n  EXPECT_EQ(0, TestUtility::findCounter(stats_store_,\n                                        \"local_rate_limit.local_rate_limit_stats.rate_limited\")\n                   ->value());\n}\n\n} // namespace LocalRateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mongo_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"bson_impl_test\",\n    srcs = [\"bson_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.mongo_proxy\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:bson_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"codec_impl_test\",\n    srcs = [\"codec_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.mongo_proxy\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:bson_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:codec_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"proxy_test\",\n    srcs = [\"proxy_test.cc\"],\n    extension_name = \"envoy.filters.network.mongo_proxy\",\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:bson_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:codec_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:proxy_lib\",\n        \"//test/common/stream_info:test_util\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"@envoy_api//envoy/extensions/filters/common/fault/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    extension_name = \"envoy.filters.network.mongo_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/mongo_proxy:bson_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:codec_lib\",\n        \"//source/extensions/filters/network/mongo_proxy:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.mongo_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/mongo_proxy:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/mongo_proxy/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/mongo_proxy/bson_impl_test.cc",
    "content": "#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/mongo_proxy/bson_impl.h\"\n\n#include \"test/test_common/printers.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\nnamespace Bson {\n\nTEST(BsonImplTest, BadCast) {\n  DocumentSharedPtr doc = DocumentImpl::create()->addString(\"hello\", \"world\");\n  EXPECT_THROW(doc->values().front()->asDouble(), EnvoyException);\n}\n\nTEST(BsonImplTest, Equal) {\n  DocumentSharedPtr doc1 = DocumentImpl::create();\n  DocumentSharedPtr doc2 = DocumentImpl::create()->addString(\"hello\", \"world\");\n  EXPECT_FALSE(*doc1 == *doc2);\n\n  doc1->addDouble(\"hello\", 2.0);\n  EXPECT_FALSE(*doc1 == *doc2);\n}\n\nTEST(BsonImplTest, InvalidMessageLength) {\n  Buffer::OwnedImpl buffer;\n  BufferHelper::writeInt32(buffer, 100);\n  EXPECT_THROW(DocumentImpl::create(buffer), EnvoyException);\n}\n\nTEST(BsonImplTest, InvalidElementType) {\n  Buffer::OwnedImpl buffer;\n  std::string key_name(\"hello\");\n  BufferHelper::writeInt32(buffer, 4 + 1 + key_name.size() + 1);\n  uint8_t invalid_element_type = 0x20;\n  buffer.add(&invalid_element_type, sizeof(invalid_element_type));\n  BufferHelper::writeCString(buffer, key_name);\n  EXPECT_THROW(DocumentImpl::create(buffer), EnvoyException);\n}\n\nTEST(BsonImplTest, InvalodDocumentTermination) {\n  Buffer::OwnedImpl buffer;\n  BufferHelper::writeInt32(buffer, 5);\n  uint8_t invalid_document_end = 0x1;\n  buffer.add(&invalid_document_end, sizeof(invalid_document_end));\n  EXPECT_THROW(DocumentImpl::create(buffer), EnvoyException);\n}\n\nTEST(BufferHelperTest, InvalidSize) {\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW(BufferHelper::peekInt32(buffer), EnvoyException);\n    EXPECT_THROW(BufferHelper::removeByte(buffer), EnvoyException);\n    EXPECT_THROW(BufferHelper::removeBytes(buffer, nullptr, 1), EnvoyException);\n    EXPECT_THROW(BufferHelper::removeCString(buffer), EnvoyException);\n    EXPECT_THROW(BufferHelper::removeDouble(buffer), EnvoyException);\n    EXPECT_THROW(BufferHelper::removeInt64(buffer), EnvoyException);\n    EXPECT_THROW(BufferHelper::removeString(buffer), EnvoyException);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeInt32(buffer, 4);\n    EXPECT_THROW(BufferHelper::removeString(buffer), EnvoyException);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeInt32(buffer, 4);\n    uint8_t dummy = 0;\n    buffer.add(&dummy, sizeof(dummy));\n    EXPECT_THROW(BufferHelper::removeBinary(buffer), EnvoyException);\n  }\n}\n\n} // namespace Bson\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mongo_proxy/codec_impl_test.cc",
    "content": "#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/json/json_loader.h\"\n\n#include \"extensions/filters/network/mongo_proxy/bson_impl.h\"\n#include \"extensions/filters/network/mongo_proxy/codec_impl.h\"\n\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Pointee;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nclass TestDecoderCallbacks : public DecoderCallbacks {\npublic:\n  void decodeGetMore(GetMoreMessagePtr&& message) override { decodeGetMore_(message); }\n  void decodeInsert(InsertMessagePtr&& message) override { decodeInsert_(message); }\n  void decodeKillCursors(KillCursorsMessagePtr&& message) override { decodeKillCursors_(message); }\n  void decodeQuery(QueryMessagePtr&& message) override { decodeQuery_(message); }\n  void decodeReply(ReplyMessagePtr&& message) override { decodeReply_(message); }\n  void decodeCommand(CommandMessagePtr&& message) override { decodeCommand_(message); }\n  void decodeCommandReply(CommandReplyMessagePtr&& message) override {\n    decodeCommandReply_(message);\n  }\n\n  MOCK_METHOD(void, decodeGetMore_, (GetMoreMessagePtr & message));\n  MOCK_METHOD(void, decodeInsert_, (InsertMessagePtr & message));\n  MOCK_METHOD(void, decodeKillCursors_, (KillCursorsMessagePtr & message));\n  MOCK_METHOD(void, decodeQuery_, (QueryMessagePtr & message));\n  MOCK_METHOD(void, decodeReply_, (ReplyMessagePtr & message));\n  MOCK_METHOD(void, decodeCommand_, (CommandMessagePtr & message));\n  MOCK_METHOD(void, decodeCommandReply_, (CommandReplyMessagePtr & message));\n};\n\nclass MongoCodecImplTest : public testing::Test {\npublic:\n  Buffer::OwnedImpl output_;\n  EncoderImpl encoder_{output_};\n  NiceMock<TestDecoderCallbacks> callbacks_;\n  DecoderImpl decoder_{callbacks_};\n};\n\nTEST_F(MongoCodecImplTest, QueryEqual) {\n  {\n    QueryMessageImpl q1(0, 0);\n    QueryMessageImpl q2(1, 1);\n    EXPECT_FALSE(q1 == q2);\n  }\n\n  {\n    QueryMessageImpl q1(0, 0);\n    q1.fullCollectionName(\"hello\");\n    QueryMessageImpl q2(0, 0);\n    q2.fullCollectionName(\"world\");\n    EXPECT_FALSE(q1 == q2);\n  }\n\n  {\n    QueryMessageImpl q1(0, 0);\n    q1.query(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    QueryMessageImpl q2(0, 0);\n    q2.query(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(q1 == q2);\n  }\n\n  {\n    QueryMessageImpl q1(0, 0);\n    q1.returnFieldsSelector(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    QueryMessageImpl q2(0, 0);\n    q2.returnFieldsSelector(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(q1 == q2);\n  }\n}\n\nTEST_F(MongoCodecImplTest, Query) {\n  QueryMessageImpl query(1, 1);\n  query.flags(0x4);\n  query.fullCollectionName(\"test\");\n  query.numberToSkip(20);\n  query.numberToReturn(-1);\n  query.query(\n      Bson::DocumentImpl::create()\n          ->addString(\"string\", \"string\")\n          ->addSymbol(\"symbol\", \"symbol\")\n          ->addDouble(\"double\", 2.1)\n          ->addDocument(\"document\", Bson::DocumentImpl::create()->addString(\"hello\", \"world\"))\n          ->addArray(\"array\", Bson::DocumentImpl::create()->addString(\"0\", \"foo\"))\n          ->addBinary(\"binary\", \"binary_value\")\n          ->addObjectId(\"object_id\", Bson::Field::ObjectId())\n          ->addBoolean(\"true\", true)\n          ->addBoolean(\"false\", false)\n          ->addDatetime(\"datetime\", 1)\n          ->addNull(\"null\")\n          ->addRegex(\"regex\", {\"hello\", \"\"})\n          ->addInt32(\"int32\", 1)\n          ->addTimestamp(\"timestamp\", 1000)\n          ->addInt64(\"int64\", 2));\n\n  QueryMessageImpl query2(2, 2);\n  query2.fullCollectionName(\"test2\");\n  query2.query(Bson::DocumentImpl::create()->addString(\"string2\", \"string2_value\"));\n  query2.returnFieldsSelector(Bson::DocumentImpl::create()->addDouble(\"double2\", -2.3));\n\n  Json::Factory::loadFromString(query.toString(true));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(query.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(query.toString(false)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(query2.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(query2.toString(false)));\n\n  encoder_.encodeQuery(query);\n  encoder_.encodeQuery(query2);\n  EXPECT_CALL(callbacks_, decodeQuery_(Pointee(Eq(query))));\n  EXPECT_CALL(callbacks_, decodeQuery_(Pointee(Eq(query2))));\n  decoder_.onData(output_);\n}\n\nTEST_F(MongoCodecImplTest, ReplyEqual) {\n  {\n    ReplyMessageImpl r1(0, 0);\n    ReplyMessageImpl r2(1, 1);\n    EXPECT_FALSE(r1 == r2);\n  }\n\n  {\n    ReplyMessageImpl r1(0, 0);\n    r1.cursorId(1);\n    ReplyMessageImpl r2(0, 0);\n    r2.cursorId(2);\n    EXPECT_FALSE(r1 == r2);\n  }\n\n  {\n    ReplyMessageImpl r1(0, 0);\n    r1.numberReturned(1);\n    r1.documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    ReplyMessageImpl r2(0, 0);\n    r2.numberReturned(1);\n    r2.documents().push_back(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(r1 == r2);\n  }\n}\n\nTEST_F(MongoCodecImplTest, Reply) {\n  ReplyMessageImpl reply(2, 2);\n  reply.flags(0x8);\n  reply.cursorId(20000);\n  reply.startingFrom(20);\n  reply.numberReturned(2);\n  reply.documents().push_back(Bson::DocumentImpl::create());\n  reply.documents().push_back(Bson::DocumentImpl::create());\n\n  EXPECT_NO_THROW(Json::Factory::loadFromString(reply.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(reply.toString(false)));\n\n  encoder_.encodeReply(reply);\n  EXPECT_CALL(callbacks_, decodeReply_(Pointee(Eq(reply))));\n  decoder_.onData(output_);\n}\n\nTEST_F(MongoCodecImplTest, GetMoreEqual) {\n  {\n    GetMoreMessageImpl g1(0, 0);\n    GetMoreMessageImpl g2(1, 1);\n    EXPECT_FALSE(g1 == g2);\n  }\n\n  {\n    GetMoreMessageImpl g1(0, 0);\n    g1.cursorId(1);\n    GetMoreMessageImpl g2(0, 0);\n    g1.cursorId(2);\n    EXPECT_FALSE(g1 == g2);\n  }\n}\n\nTEST_F(MongoCodecImplTest, GetMore) {\n  GetMoreMessageImpl get_more(3, 3);\n  get_more.fullCollectionName(\"test\");\n  get_more.numberToReturn(20);\n  get_more.cursorId(20000);\n\n  EXPECT_NO_THROW(Json::Factory::loadFromString(get_more.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(get_more.toString(false)));\n\n  encoder_.encodeGetMore(get_more);\n  EXPECT_CALL(callbacks_, decodeGetMore_(Pointee(Eq(get_more))));\n  decoder_.onData(output_);\n}\n\nTEST_F(MongoCodecImplTest, InsertEqual) {\n  {\n    InsertMessageImpl i1(0, 0);\n    InsertMessageImpl i2(1, 1);\n    EXPECT_FALSE(i1 == i2);\n  }\n\n  {\n    InsertMessageImpl i1(0, 0);\n    i1.fullCollectionName(\"hello\");\n    InsertMessageImpl i2(0, 0);\n    i2.fullCollectionName(\"world\");\n    EXPECT_FALSE(i1 == i2);\n  }\n\n  {\n    InsertMessageImpl i1(0, 0);\n    i1.fullCollectionName(\"hello\");\n    i1.documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    InsertMessageImpl i2(0, 0);\n    i2.fullCollectionName(\"hello\");\n    i2.documents().push_back(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(i1 == i2);\n  }\n}\n\nTEST_F(MongoCodecImplTest, Insert) {\n  InsertMessageImpl insert(4, 4);\n  insert.flags(0x8);\n  insert.fullCollectionName(\"test\");\n  insert.documents().push_back(Bson::DocumentImpl::create());\n  insert.documents().push_back(Bson::DocumentImpl::create());\n\n  EXPECT_NO_THROW(Json::Factory::loadFromString(insert.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(insert.toString(false)));\n\n  encoder_.encodeInsert(insert);\n  EXPECT_CALL(callbacks_, decodeInsert_(Pointee(Eq(insert))));\n  decoder_.onData(output_);\n}\n\nTEST_F(MongoCodecImplTest, KillCursorsEqual) {\n  {\n    KillCursorsMessageImpl k1(0, 0);\n    KillCursorsMessageImpl k2(1, 1);\n    EXPECT_FALSE(k1 == k2);\n  }\n\n  {\n    KillCursorsMessageImpl k1(0, 0);\n    k1.numberOfCursorIds(1);\n    KillCursorsMessageImpl k2(0, 0);\n    k2.numberOfCursorIds(2);\n    EXPECT_FALSE(k1 == k2);\n  }\n\n  {\n    KillCursorsMessageImpl k1(0, 0);\n    k1.numberOfCursorIds(1);\n    k1.cursorIds({1});\n    KillCursorsMessageImpl k2(0, 0);\n    k2.numberOfCursorIds(1);\n    k2.cursorIds({2});\n    EXPECT_FALSE(k1 == k2);\n  }\n}\n\nTEST_F(MongoCodecImplTest, KillCursors) {\n  KillCursorsMessageImpl kill(5, 5);\n  kill.numberOfCursorIds(2);\n  kill.cursorIds({20000, 40000});\n\n  EXPECT_NO_THROW(Json::Factory::loadFromString(kill.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(kill.toString(false)));\n\n  encoder_.encodeKillCursors(kill);\n  EXPECT_CALL(callbacks_, decodeKillCursors_(Pointee(Eq(kill))));\n  decoder_.onData(output_);\n}\n\nTEST_F(MongoCodecImplTest, EncodeExceptions) {\n  QueryMessageImpl q(0, 0);\n  EXPECT_THROW(encoder_.encodeQuery(q), EnvoyException);\n  q.fullCollectionName(\"hello\");\n  EXPECT_THROW(encoder_.encodeQuery(q), EnvoyException);\n  q.query(Bson::DocumentImpl::create());\n  encoder_.encodeQuery(q);\n\n  KillCursorsMessageImpl k(0, 0);\n  EXPECT_THROW(encoder_.encodeKillCursors(k), EnvoyException);\n  k.numberOfCursorIds(1);\n  EXPECT_THROW(encoder_.encodeKillCursors(k), EnvoyException);\n  k.cursorIds({1});\n  encoder_.encodeKillCursors(k);\n\n  InsertMessageImpl i(0, 0);\n  EXPECT_THROW(encoder_.encodeInsert(i), EnvoyException);\n  i.fullCollectionName(\"hello\");\n  EXPECT_THROW(encoder_.encodeInsert(i), EnvoyException);\n  i.documents().push_back(Bson::DocumentImpl::create());\n  encoder_.encodeInsert(i);\n\n  GetMoreMessageImpl g(0, 0);\n  EXPECT_THROW(encoder_.encodeGetMore(g), EnvoyException);\n  g.fullCollectionName(\"hello\");\n  EXPECT_THROW(encoder_.encodeGetMore(g), EnvoyException);\n  g.cursorId(1);\n  encoder_.encodeGetMore(g);\n}\n\nTEST_F(MongoCodecImplTest, PartialMessages) {\n  output_.add(\"2\");\n  decoder_.onData(output_);\n  output_.drain(output_.length());\n\n  Bson::BufferHelper::writeInt32(output_, 100);\n  decoder_.onData(output_);\n  EXPECT_EQ(4U, output_.length());\n}\n\nTEST_F(MongoCodecImplTest, InvalidMessage) {\n  Bson::BufferHelper::writeInt32(output_, 16); // Size\n  Bson::BufferHelper::writeInt32(output_, 0);  // Request ID\n  Bson::BufferHelper::writeInt32(output_, 1);  // Response to\n  Bson::BufferHelper::writeInt32(output_, 2);  // Invalid op\n  EXPECT_THROW(decoder_.onData(output_), EnvoyException);\n}\n\nTEST_F(MongoCodecImplTest, QueryToStringWithEscape) {\n  QueryMessageImpl query(1, 1);\n  query.flags(0x4);\n  query.fullCollectionName(\"test\");\n  query.numberToSkip(20);\n  query.numberToReturn(-1);\n  query.query(Bson::DocumentImpl::create()->addString(\"string_need_esc\", \"{\\\"foo\\\": \\\"bar\\n\\\"}\"));\n\n  const std::string expectedQuery =\n      R\"EOF({\"opcode\": \"OP_QUERY\", \"id\": 1, \"response_to\": 1, \"flags\": \"0x4\", )EOF\"\n      R\"EOF(\"collection\": \"test\", \"skip\": 20, \"return\": -1, \"query\": )EOF\"\n      R\"EOF({\"string_need_esc\": \"{\\\"foo\\\": \\\"bar\\n\\\"}\"}, \"fields\": {}})EOF\";\n\n  EXPECT_EQ(query.toString(true), expectedQuery);\n\n  EXPECT_NO_THROW(Json::Factory::loadFromString(query.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(query.toString(false)));\n}\n\nTEST_F(MongoCodecImplTest, CommandEqual) {\n  {\n    CommandMessageImpl m1(0, 0);\n    CommandMessageImpl m2(1, 1);\n    EXPECT_FALSE(m1 == m2);\n  }\n\n  // Trigger fail on comparing metadata.\n  {\n    CommandMessageImpl m1(0, 0);\n    m1.metadata(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    CommandMessageImpl m2(1, 1);\n    m2.metadata(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(m1 == m2);\n  }\n\n  // Trigger fail on comparing commandArgs.\n  {\n    CommandMessageImpl m1(0, 0);\n    m1.commandArgs(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    CommandMessageImpl m2(1, 1);\n    m2.commandArgs(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(m1 == m2);\n  }\n\n  // Trigger fail on comparing inputDocs.\n  {\n    CommandMessageImpl m1(0, 0);\n    m1.inputDocs().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    CommandMessageImpl m2(1, 1);\n    m2.inputDocs().push_back(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(m1 == m2);\n  }\n}\n\nTEST_F(MongoCodecImplTest, Command) {\n  CommandMessageImpl command(15, 25);\n\n  command.database(std::string(\"Test database\"));\n  command.commandName(std::string(\"Test command name\"));\n  command.metadata(Bson::DocumentImpl::create());\n  command.commandArgs(Bson::DocumentImpl::create());\n  command.inputDocs().push_back(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n\n  EXPECT_NO_THROW(Json::Factory::loadFromString(command.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(command.toString(false)));\n\n  encoder_.encodeCommand(command);\n  EXPECT_CALL(callbacks_, decodeCommand_(Pointee(Eq(command))));\n  decoder_.onData(output_);\n}\n\nTEST_F(MongoCodecImplTest, CommandReplyEqual) {\n  {\n    CommandReplyMessageImpl m1(0, 0);\n    CommandReplyMessageImpl m2(1, 1);\n    EXPECT_FALSE(m1 == m2);\n  }\n\n  // Trigger fail on comparing metadata.\n  {\n    CommandReplyMessageImpl m1(0, 0);\n    m1.metadata(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    CommandReplyMessageImpl m2(1, 1);\n    m2.metadata(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(m1 == m2);\n  }\n\n  // Trigger fail on comparing commandReply.\n  {\n    CommandReplyMessageImpl m1(0, 0);\n    m1.commandReply(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    CommandReplyMessageImpl m2(1, 1);\n    m2.commandReply(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(m1 == m2);\n  }\n\n  // Trigger fail on comparing outputDocs.\n  {\n    CommandReplyMessageImpl m1(0, 0);\n    m1.outputDocs().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    CommandReplyMessageImpl m2(1, 1);\n    m2.outputDocs().push_back(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n    EXPECT_FALSE(m1 == m2);\n  }\n}\nTEST_F(MongoCodecImplTest, CommandReply) {\n  CommandReplyMessageImpl commandReply(16, 26);\n\n  commandReply.metadata(Bson::DocumentImpl::create());\n  commandReply.commandReply(Bson::DocumentImpl::create());\n  commandReply.outputDocs().push_back(Bson::DocumentImpl::create()->addString(\"world\", \"hello\"));\n\n  EXPECT_NO_THROW(Json::Factory::loadFromString(commandReply.toString(true)));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(commandReply.toString(false)));\n\n  encoder_.encodeCommandReply(commandReply);\n  EXPECT_CALL(callbacks_, decodeCommandReply_(Pointee(Eq(commandReply))));\n  decoder_.onData(output_);\n}\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mongo_proxy/config_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.validate.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"extensions/filters/network/mongo_proxy/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nTEST(MongoFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(MongoProxyFilterConfigFactory().createFilterFactoryFromProto(\n                   envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy(), context),\n               ProtoValidationException);\n}\n\nTEST(MongoFilterConfigTest, CorrectConfigurationNoFaults) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: my_stat_prefix\n  access_log: path/to/access/log\n  )EOF\";\n\n  envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  MongoProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\nTEST(MongoFilterConfigTest, ValidProtoConfigurationNoFaults) {\n  envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy config;\n\n  config.set_access_log(\"path/to/access/log\");\n  config.set_stat_prefix(\"my_stat_prefix\");\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  MongoProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\nTEST(MongoFilterConfigTest, MongoFilterWithEmptyProto) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  MongoProxyFilterConfigFactory factory;\n  envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy config =\n      *dynamic_cast<envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy*>(\n          factory.createEmptyConfigProto().get());\n  config.set_access_log(\"path/to/access/log\");\n  config.set_stat_prefix(\"my_stat_prefix\");\n\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\nvoid handleInvalidConfiguration(const std::string& yaml_string, const std::string& error_regex) {\n  envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy config;\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYamlAndValidate(yaml_string, config), EnvoyException,\n                          error_regex);\n}\n\nTEST(MongoFilterConfigTest, InvalidExtraProperty) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: my_stat_prefix\n  access_log: path/to/access/log\n  test: a\n  )EOF\";\n\n  handleInvalidConfiguration(yaml_string, \"test: Cannot find field\");\n}\n\nTEST(MongoFilterConfigTest, EmptyConfig) {\n  handleInvalidConfiguration(\n      \"{}\", R\"(StatPrefix: \\[\"value length must be at least \" '\\\\x01' \" runes\"\\])\");\n}\n\nTEST(MongoFilterConfigTest, InvalidFaultsEmptyConfig) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: my_stat_prefix\n  delay: {}\n  )EOF\";\n\n  handleInvalidConfiguration(yaml_string,\n                             R\"(caused by field: \"fault_delay_secifier\", reason: is required)\");\n}\n\nTEST(MongoFilterConfigTest, InvalidFaultsMissingFixedDelayTime) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: my_stat_prefix\n  delay:\n    percentage:\n      numerator: 1\n      denominator: HUNDRED\n  )EOF\";\n\n  handleInvalidConfiguration(yaml_string,\n                             R\"(caused by field: \"fault_delay_secifier\", reason: is required)\");\n}\n\nTEST(MongoFilterConfigTest, InvalidFaultsNegativeMs) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: my_stat_prefix\n  delay:\n    percentage:\n      numerator: 1\n      denominator: HUNDRED\n    fixed_delay: -1s\n  )EOF\";\n\n  handleInvalidConfiguration(yaml_string, R\"(FixedDelay: \\[\"value must be greater than \" \"0s\"\\])\");\n}\n\nTEST(MongoFilterConfigTest, InvalidFaultsDelayPercent) {\n  {\n    const std::string yaml_string = R\"EOF(\n    stat_prefix: my_stat_prefix\n    delay:\n      percentage:\n        numerator: -1\n        denominator: HUNDRED\n      fixed_delay: 1s\n    )EOF\";\n\n    handleInvalidConfiguration(yaml_string, R\"(invalid value -1 for type TYPE_UINT32)\");\n  }\n}\n\nTEST(MongoFilterConfigTest, InvalidFaultsType) {\n  {\n    const std::string yaml_string = R\"EOF(\n    stat_prefix: my_stat_prefix\n    delay:\n      percentage:\n        numerator: df\n        denominator: HUNDRED\n      fixed_delay: 1s\n    )EOF\";\n\n    handleInvalidConfiguration(yaml_string, R\"(invalid value \"df\" for type TYPE_UINT32)\");\n  }\n\n  {\n    const std::string yaml_string = R\"EOF(\n    stat_prefix: my_stat_prefix\n    delay:\n      percentage:\n        numerator: 1\n        denominator: HUNDRED\n      fixed_delay: ab\n    )EOF\";\n\n    handleInvalidConfiguration(yaml_string, \"Illegal duration format; duration must end with 's'\");\n  }\n\n  {\n    const std::string yaml_string = R\"EOF(\n    stat_prefix: my_stat_prefix\n    delay:\n      percentage:\n        numerator: 3\n        denominator: HUNDRED\n      fixed_delay: 0s\n    )EOF\";\n\n    handleInvalidConfiguration(yaml_string,\n                               R\"(FixedDelay: \\[\"value must be greater than \" \"0s\"\\])\");\n  }\n}\n\nTEST(MongoFilterConfigTest, CorrectFaultConfiguration) {\n  const std::string yaml_string = R\"EOF(\n  stat_prefix: my_stat_prefix\n  delay:\n    percentage:\n      numerator: 1\n      denominator: HUNDRED\n    fixed_delay: 0.001s\n  )EOF\";\n\n  envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy proto_config;\n  TestUtility::loadFromYaml(yaml_string, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  MongoProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\nTEST(MongoFilterConfigTest, CorrectFaultConfigurationInProto) {\n  envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy config{};\n  config.set_stat_prefix(\"my_stat_prefix\");\n  config.mutable_delay()->mutable_percentage()->set_numerator(50);\n  config.mutable_delay()->mutable_percentage()->set_denominator(\n      envoy::type::v3::FractionalPercent::HUNDRED);\n  config.mutable_delay()->mutable_fixed_delay()->set_seconds(500);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  MongoProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(MongoFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.mongo_proxy\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mongo_proxy/proxy_test.cc",
    "content": "#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/common/fault/v3/fault.pb.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"extensions/filters/network/mongo_proxy/bson_impl.h\"\n#include \"extensions/filters/network/mongo_proxy/codec_impl.h\"\n#include \"extensions/filters/network/mongo_proxy/mongo_stats.h\"\n#include \"extensions/filters/network/mongo_proxy/proxy.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/common/stream_info/test_util.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AtLeast;\nusing testing::Invoke;\nusing testing::Matcher;\nusing testing::NiceMock;\nusing testing::Property;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nclass MockDecoder : public Decoder {\npublic:\n  MOCK_METHOD(void, onData, (Buffer::Instance & data));\n};\n\nclass TestProxyFilter : public ProxyFilter {\npublic:\n  using ProxyFilter::ProxyFilter;\n\n  // ProxyFilter\n  DecoderPtr createDecoder(DecoderCallbacks& callbacks) override {\n    callbacks_ = &callbacks;\n    return DecoderPtr{decoder_};\n  }\n\n  MockDecoder* decoder_{new MockDecoder()};\n  DecoderCallbacks* callbacks_{};\n};\n\nclass MongoProxyFilterTest : public testing::Test {\npublic:\n  MongoProxyFilterTest() : mongo_stats_(std::make_shared<MongoStats>(store_, \"test\")) { setup(); }\n\n  void setup() {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"mongo.proxy_enabled\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"mongo.connection_logging_enabled\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"mongo.logging_enabled\", 100))\n        .WillByDefault(Return(true));\n\n    EXPECT_CALL(read_filter_callbacks_, connection())\n        .WillRepeatedly(ReturnRef(read_filter_callbacks_.connection_));\n    EXPECT_CALL(read_filter_callbacks_.connection_, streamInfo())\n        .WillRepeatedly(ReturnRef(stream_info_));\n\n    EXPECT_CALL(log_manager_, createAccessLog(_)).WillOnce(Return(file_));\n    access_log_ = std::make_shared<AccessLog>(\"test\", log_manager_, dispatcher_.timeSource());\n  }\n\n  void initializeFilter(bool emit_dynamic_metadata = false) {\n    filter_ = std::make_unique<TestProxyFilter>(\n        \"test.\", store_, runtime_, access_log_, fault_config_, drain_decision_,\n        dispatcher_.timeSource(), emit_dynamic_metadata, mongo_stats_);\n    filter_->initializeReadFilterCallbacks(read_filter_callbacks_);\n    filter_->onNewConnection();\n\n    // NOP currently.\n    filter_->onAboveWriteBufferHighWatermark();\n    filter_->onBelowWriteBufferLowWatermark();\n  }\n\n  void setupDelayFault(bool enable_fault) {\n    envoy::extensions::filters::common::fault::v3::FaultDelay fault;\n    fault.mutable_percentage()->set_numerator(50);\n    fault.mutable_percentage()->set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n    fault.mutable_fixed_delay()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(10));\n\n    fault_config_ = std::make_shared<Filters::Common::Fault::FaultDelayConfig>(fault);\n\n    EXPECT_CALL(runtime_.snapshot_,\n                featureEnabled(\"mongo.fault.fixed_delay.percent\",\n                               Matcher<const envoy::type::v3::FractionalPercent&>(Percent(50))))\n        .WillOnce(Return(enable_fault));\n\n    if (enable_fault) {\n      EXPECT_CALL(runtime_.snapshot_, getInteger(\"mongo.fault.fixed_delay.duration_ms\", 10))\n          .WillOnce(Return(10));\n    }\n  }\n\n  Buffer::OwnedImpl fake_data_;\n  NiceMock<Stats::MockIsolatedStatsStore> store_;\n  MongoStatsSharedPtr mongo_stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  std::shared_ptr<Envoy::AccessLog::MockAccessLogFile> file_{\n      new NiceMock<Envoy::AccessLog::MockAccessLogFile>()};\n  AccessLogSharedPtr access_log_;\n  Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config_;\n  std::unique_ptr<TestProxyFilter> filter_;\n  NiceMock<Network::MockReadFilterCallbacks> read_filter_callbacks_;\n  Envoy::AccessLog::MockAccessLogManager log_manager_;\n  NiceMock<Network::MockDrainDecision> drain_decision_;\n  TestStreamInfo stream_info_;\n};\n\nTEST_F(MongoProxyFilterTest, DelayFaults) {\n  setupDelayFault(true);\n  initializeFilter();\n\n  Event::MockTimer* delay_timer =\n      new Event::MockTimer(&read_filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*delay_timer, enableTimer(std::chrono::milliseconds(10), _));\n  EXPECT_CALL(*file_, write(_)).Times(AtLeast(1));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(fake_data_, false));\n  EXPECT_EQ(1U, store_.counter(\"test.op_query\").value());\n\n  // Requests during active delay.\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(fake_data_, false));\n  EXPECT_EQ(2U, store_.counter(\"test.op_query\").value());\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    GetMoreMessagePtr message(new GetMoreMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->cursorId(1);\n    filter_->callbacks_->decodeGetMore(std::move(message));\n  }));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(fake_data_, false));\n  EXPECT_EQ(1U, store_.counter(\"test.op_get_more\").value());\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    KillCursorsMessagePtr message(new KillCursorsMessageImpl(0, 0));\n    message->numberOfCursorIds(1);\n    message->cursorIds({1});\n    filter_->callbacks_->decodeKillCursors(std::move(message));\n  }));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(fake_data_, false));\n  EXPECT_EQ(1U, store_.counter(\"test.op_kill_cursors\").value());\n\n  EXPECT_CALL(read_filter_callbacks_, continueReading());\n  delay_timer->invokeCallback();\n  EXPECT_EQ(1U, store_.counter(\"test.delays_injected\").value());\n}\n\nTEST_F(MongoProxyFilterTest, DelayFaultsRuntimeDisabled) {\n  setupDelayFault(false);\n  initializeFilter();\n\n  EXPECT_CALL(dispatcher_, createTimer_(_)).Times(0);\n  EXPECT_CALL(*file_, write(_)).Times(AtLeast(1));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data_, false));\n  EXPECT_EQ(0U, store_.counter(\"test.delays_injected\").value());\n}\n\nTEST_F(MongoProxyFilterTest, DynamicMetadata) {\n  initializeFilter(true);\n\n  EXPECT_CALL(*file_, write(_)).Times(AtLeast(1));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  auto& metadata =\n      stream_info_.dynamicMetadata().filter_metadata().at(NetworkFilterNames::get().MongoProxy);\n  EXPECT_TRUE(metadata.fields().find(\"db.test\") != metadata.fields().end());\n  EXPECT_EQ(\"query\", metadata.fields().at(\"db.test\").list_value().values(0).string_value());\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InsertMessagePtr message(new InsertMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->documents().push_back(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeInsert(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_TRUE(metadata.fields().find(\"db.test\") != metadata.fields().end());\n  EXPECT_EQ(\"insert\", metadata.fields().at(\"db.test\").list_value().values(0).string_value());\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message1(new QueryMessageImpl(0, 0));\n    message1->fullCollectionName(\"db1.test1\");\n    message1->flags(0b1110010);\n    message1->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message1));\n\n    InsertMessagePtr message2(new InsertMessageImpl(0, 0));\n    message2->fullCollectionName(\"db2.test2\");\n    message2->documents().push_back(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeInsert(std::move(message2));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_TRUE(metadata.fields().find(\"db1.test1\") != metadata.fields().end());\n  EXPECT_EQ(\"query\", metadata.fields().at(\"db1.test1\").list_value().values(0).string_value());\n  EXPECT_TRUE(metadata.fields().find(\"db2.test2\") != metadata.fields().end());\n  EXPECT_EQ(\"insert\", metadata.fields().at(\"db2.test2\").list_value().values(0).string_value());\n}\n\nTEST_F(MongoProxyFilterTest, DynamicMetadataDisabled) {\n  initializeFilter(false);\n\n  EXPECT_CALL(*file_, write(_)).Times(AtLeast(1));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(0, stream_info_.dynamicMetadata().filter_metadata().count(\n                   NetworkFilterNames::get().MongoProxy));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InsertMessagePtr message(new InsertMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->documents().push_back(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeInsert(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(0, stream_info_.dynamicMetadata().filter_metadata().count(\n                   NetworkFilterNames::get().MongoProxy));\n}\n\nTEST_F(MongoProxyFilterTest, Stats) {\n  initializeFilter();\n\n  EXPECT_CALL(*file_, write(_)).Times(AtLeast(1));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"test.collection.test.query.reply_num_docs\"), 1));\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"test.collection.test.query.reply_size\"), 22));\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"test.collection.test.query.reply_time_ms\"), _));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    ReplyMessagePtr message(new ReplyMessageImpl(0, 0));\n    message->flags(0b11);\n    message->cursorId(1);\n    message->documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    filter_->callbacks_->decodeReply(std::move(message));\n  }));\n  filter_->onWrite(fake_data_, false);\n\n  EXPECT_EQ(1U, store_.counter(\"test.op_query\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_query_tailable_cursor\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_query_no_cursor_timeout\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_query_await_data\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_query_exhaust\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_query_no_max_time\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_query_scatter_get\").value());\n\n  EXPECT_EQ(1U, store_.counter(\"test.collection.test.query.total\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.collection.test.query.scatter_get\").value());\n\n  EXPECT_EQ(1U, store_.counter(\"test.op_reply\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_reply_cursor_not_found\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_reply_query_failure\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_reply_valid_cursor\").value());\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    GetMoreMessagePtr message(new GetMoreMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->cursorId(1);\n    filter_->callbacks_->decodeGetMore(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    InsertMessagePtr message(new InsertMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->documents().push_back(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeInsert(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    KillCursorsMessagePtr message(new KillCursorsMessageImpl(0, 0));\n    message->numberOfCursorIds(1);\n    message->cursorIds({1});\n    filter_->callbacks_->decodeKillCursors(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    CommandMessagePtr message(new CommandMessageImpl(0, 0));\n    message->database(std::string(\"Test database\"));\n    message->commandName(std::string(\"Test command name\"));\n    message->metadata(Bson::DocumentImpl::create());\n    message->commandArgs(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeCommand(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    CommandReplyMessagePtr message(new CommandReplyMessageImpl(0, 0));\n    message->metadata(Bson::DocumentImpl::create());\n    message->commandReply(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeCommandReply(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(1U, store_.counter(\"test.op_get_more\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_insert\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_kill_cursors\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.delays_injected\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_command\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.op_command_reply\").value());\n}\n\nTEST_F(MongoProxyFilterTest, CommandStats) {\n  initializeFilter();\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.$cmd\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addString(\"insert\", \"bar\"));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"test.cmd.insert.reply_num_docs\"), 1));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"test.cmd.insert.reply_size\"), 22));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"test.cmd.insert.reply_time_ms\"), _));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    ReplyMessagePtr message(new ReplyMessageImpl(0, 0));\n    message->flags(0b11);\n    message->cursorId(1);\n    message->documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    filter_->callbacks_->decodeReply(std::move(message));\n  }));\n  filter_->onWrite(fake_data_, false);\n\n  EXPECT_EQ(1U, store_.counter(\"test.cmd.insert.total\").value());\n}\n\nTEST_F(MongoProxyFilterTest, CallingFunctionStats) {\n  initializeFilter();\n\n  std::string json = R\"EOF(\n    {\n      \"hostname\":\"api-production-iad-canary\",\n      \"httpUniqueId\":\"VqqX7H8AAQEAAE@8EUkAAAAR\",\n      \"callingFunction\":\"getByMongoId\"\n    }\n  )EOF\";\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addString(\"$comment\", std::move(json)));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(1U, store_.counter(\"test.collection.test.query.total\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.collection.test.query.scatter_get\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.collection.test.callsite.getByMongoId.query.total\").value());\n  EXPECT_EQ(1U,\n            store_.counter(\"test.collection.test.callsite.getByMongoId.query.scatter_get\").value());\n\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"test.collection.test.query.reply_num_docs\"), 1));\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"test.collection.test.query.reply_size\"), 22));\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name, \"test.collection.test.query.reply_time_ms\"), _));\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name,\n                           \"test.collection.test.callsite.getByMongoId.query.reply_num_docs\"),\n                  1));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   \"test.collection.test.callsite.getByMongoId.query.reply_size\"),\n                          22));\n  EXPECT_CALL(store_,\n              deliverHistogramToSinks(\n                  Property(&Stats::Metric::name,\n                           \"test.collection.test.callsite.getByMongoId.query.reply_time_ms\"),\n                  _));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    ReplyMessagePtr message(new ReplyMessageImpl(0, 0));\n    message->flags(0b11);\n    message->cursorId(1);\n    message->documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    filter_->callbacks_->decodeReply(std::move(message));\n  }));\n  filter_->onWrite(fake_data_, false);\n}\n\nTEST_F(MongoProxyFilterTest, MultiGet) {\n  initializeFilter();\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addDocument(\n        \"_id\", Bson::DocumentImpl::create()->addArray(\"$in\", Bson::DocumentImpl::create())));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(1U, store_.counter(\"test.op_query_multi_get\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.collection.test.query.multi_get\").value());\n}\n\nTEST_F(MongoProxyFilterTest, MaxTime) {\n  initializeFilter();\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addInt32(\"$maxTimeMS\", 100));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(0U, store_.counter(\"test.op_query_no_max_time\").value());\n}\n\nTEST_F(MongoProxyFilterTest, MaxTimeCursor) {\n  initializeFilter();\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addInt32(\"maxTimeMS\", 500));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(0U, store_.counter(\"test.op_query_no_max_time\").value());\n}\n\nTEST_F(MongoProxyFilterTest, DecodeError) {\n  initializeFilter();\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    throw EnvoyException(\"bad decode\");\n  }));\n  filter_->onData(fake_data_, false);\n\n  // Should not call decode again.\n  filter_->onData(fake_data_, false);\n\n  EXPECT_EQ(1U, store_.counter(\"test.decoding_error\").value());\n}\n\nTEST_F(MongoProxyFilterTest, ConcurrentQueryWithDrainClose) {\n  initializeFilter();\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(1, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n\n    message = std::make_unique<QueryMessageImpl>(2, 0);\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create());\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n  EXPECT_EQ(2U, store_.gauge(\"test.op_query_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  Event::MockTimer* drain_timer = nullptr;\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    ReplyMessagePtr message(new ReplyMessageImpl(0, 1));\n    message->flags(0b11);\n    message->cursorId(1);\n    message->documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    filter_->callbacks_->decodeReply(std::move(message));\n\n    message = std::make_unique<ReplyMessageImpl>(0, 2);\n    message->flags(0b11);\n    message->cursorId(1);\n    message->documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"mongo.drain_close_enabled\", 100))\n        .WillByDefault(Return(true));\n    EXPECT_CALL(drain_decision_, drainClose()).WillOnce(Return(true));\n    drain_timer = new Event::MockTimer(&read_filter_callbacks_.connection_.dispatcher_);\n    EXPECT_CALL(*drain_timer, enableTimer(std::chrono::milliseconds(0), _));\n    filter_->callbacks_->decodeReply(std::move(message));\n  }));\n  filter_->onWrite(fake_data_, false);\n\n  EXPECT_CALL(read_filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  EXPECT_CALL(*drain_timer, disableTimer());\n  drain_timer->invokeCallback();\n\n  EXPECT_EQ(0U, store_.gauge(\"test.op_query_active\", Stats::Gauge::ImportMode::Accumulate).value());\n  EXPECT_EQ(1U, store_.counter(\"test.cx_drain_close\").value());\n}\n\nTEST_F(MongoProxyFilterTest, EmptyActiveQueryList) {\n  initializeFilter();\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.$cmd\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addString(\"query\", \"bar\"));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    ReplyMessagePtr message(new ReplyMessageImpl(0, 0));\n    message->flags(0b11);\n    message->cursorId(1);\n    message->documents().push_back(Bson::DocumentImpl::create()->addString(\"hello\", \"world\"));\n    filter_->callbacks_->decodeReply(std::move(message));\n  }));\n  filter_->onWrite(fake_data_, false);\n  read_filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n}\n\nTEST_F(MongoProxyFilterTest, ConnectionDestroyLocal) {\n  setupDelayFault(true);\n  initializeFilter();\n\n  Event::MockTimer* delay_timer =\n      new Event::MockTimer(&read_filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*delay_timer, enableTimer(std::chrono::milliseconds(10), _));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addDocument(\n        \"_id\", Bson::DocumentImpl::create()->addArray(\"$in\", Bson::DocumentImpl::create())));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(*delay_timer, disableTimer());\n  read_filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n}\n\nTEST_F(MongoProxyFilterTest, ConnectionDestroyRemote) {\n  setupDelayFault(true);\n  initializeFilter();\n\n  Event::MockTimer* delay_timer =\n      new Event::MockTimer(&read_filter_callbacks_.connection_.dispatcher_);\n  EXPECT_CALL(*delay_timer, enableTimer(std::chrono::milliseconds(10), _));\n\n  EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    QueryMessagePtr message(new QueryMessageImpl(0, 0));\n    message->fullCollectionName(\"db.test\");\n    message->flags(0b1110010);\n    message->query(Bson::DocumentImpl::create()->addDocument(\n        \"_id\", Bson::DocumentImpl::create()->addArray(\"$in\", Bson::DocumentImpl::create())));\n    filter_->callbacks_->decodeQuery(std::move(message));\n  }));\n  filter_->onData(fake_data_, false);\n\n  EXPECT_CALL(*delay_timer, disableTimer());\n  read_filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n  EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n}\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mongo_proxy/utility_test.cc",
    "content": "#include <string>\n\n#include \"extensions/filters/network/mongo_proxy/bson_impl.h\"\n#include \"extensions/filters/network/mongo_proxy/codec_impl.h\"\n#include \"extensions/filters/network/mongo_proxy/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MongoProxy {\n\nTEST(QueryMessageInfoTest, FindCommand) {\n  std::string json = R\"EOF(\n    {\"hostname\":\"api-production-iad-canary\",\"httpUniqueId\":\"VqqX7H8AAQEAAE@8EUkAAAAR\",\"callingFunction\":\"getByMongoId\"}\n  )EOF\";\n\n  QueryMessageImpl q(0, 0);\n  q.fullCollectionName(\"db.$cmd\");\n  q.query(Bson::DocumentImpl::create()\n              ->addString(\"find\", \"foo_collection\")\n              ->addString(\"comment\", std::move(json))\n              ->addDocument(\"filter\", Bson::DocumentImpl::create()->addString(\"_id\", \"foo\")));\n  QueryMessageInfo info(q);\n  EXPECT_EQ(\"\", info.command());\n  EXPECT_EQ(\"foo_collection\", info.collection());\n  EXPECT_EQ(\"getByMongoId\", info.callsite());\n  EXPECT_EQ(QueryMessageInfo::QueryType::PrimaryKey, info.type());\n}\n\nTEST(QueryMessageInfoTest, Type) {\n  {\n    QueryMessageImpl q(1, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create());\n    QueryMessageInfo info(q);\n    EXPECT_EQ(QueryMessageInfo::QueryType::ScatterGet, info.type());\n    EXPECT_EQ(1, info.requestId());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addInt32(\"_id\", 2));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(QueryMessageInfo::QueryType::PrimaryKey, info.type());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addDocument(\n        \"_id\", Bson::DocumentImpl::create()->addArray(\"$in\", Bson::DocumentImpl::create())));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(QueryMessageInfo::QueryType::MultiGet, info.type());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addDocument(\"$query\", Bson::DocumentImpl::create()));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(QueryMessageInfo::QueryType::ScatterGet, info.type());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addDocument(\n        \"$query\", Bson::DocumentImpl::create()->addInt32(\"_id\", 2)));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(QueryMessageInfo::QueryType::PrimaryKey, info.type());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addDocument(\n        \"$query\",\n        Bson::DocumentImpl::create()->addDocument(\n            \"_id\", Bson::DocumentImpl::create()->addArray(\"$in\", Bson::DocumentImpl::create()))));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(QueryMessageInfo::QueryType::MultiGet, info.type());\n  }\n}\n\nTEST(QueryMessageInfoTest, CollectionFromFullCollectionName) {\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create());\n    QueryMessageInfo info(q);\n    EXPECT_EQ(\"foo\", info.collection());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"foo\");\n    EXPECT_THROW((QueryMessageInfo(q)), EnvoyException);\n  }\n}\n\nTEST(QueryMessageInfoTest, Callsite) {\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create());\n    QueryMessageInfo info(q);\n    EXPECT_EQ(\"\", info.callsite());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addString(\"$comment\", \"bad json\"));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(\"\", info.callsite());\n  }\n\n  {\n    std::string json = R\"EOF(\n      {\"hostname\":\"api-production-iad-canary\",\"httpUniqueId\":\"VqqX7H8AAQEAAE@8EUkAAAAR\",\"callingFunction\":\"getByMongoId\"}\n    )EOF\";\n\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addString(\"$comment\", std::move(json)));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(\"getByMongoId\", info.callsite());\n  }\n}\n\nTEST(QueryMessageInfoTest, MaxTime) {\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create());\n    QueryMessageInfo info(q);\n    EXPECT_EQ(0, info.maxTime());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addInt32(\"$maxTimeMS\", 1212));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(1212, info.maxTime());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addInt64(\"$maxTimeMS\", 1212));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(1212, info.maxTime());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addInt64(\"maxTimeMS\", 2400));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(2400, info.maxTime());\n  }\n}\n\nTEST(QueryMessageInfoTest, Command) {\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.$cmd\");\n    q.query(Bson::DocumentImpl::create()->addString(\"foo\", \"bar\"));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(\"foo\", info.command());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.foo\");\n    q.query(Bson::DocumentImpl::create()->addString(\"foo\", \"bar\"));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(\"\", info.command());\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.$cmd\");\n    q.query(Bson::DocumentImpl::create());\n    EXPECT_THROW((QueryMessageInfo(q)), EnvoyException);\n  }\n\n  {\n    QueryMessageImpl q(0, 0);\n    q.fullCollectionName(\"db.$cmd\");\n    q.query(Bson::DocumentImpl::create()->addDocument(\n        \"$query\", Bson::DocumentImpl::create()->addInt32(\"ismaster\", 1)));\n    QueryMessageInfo info(q);\n    EXPECT_EQ(\"ismaster\", info.command());\n  }\n}\n\n} // namespace MongoProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n    \"envoy_extension_cc_test_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test_library(\n    name = \"mysql_test_utils_lib\",\n    srcs = [\"mysql_test_utils.cc\"],\n    hdrs = [\"mysql_test_utils.h\"],\n    extension_name = \"envoy.filters.network.mysql_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/mysql_proxy:proxy_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"mysql_codec_tests\",\n    srcs = [\n        \"mysql_codec_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.mysql_proxy\",\n    deps = [\n        \":mysql_test_utils_lib\",\n        \"//source/extensions/filters/network/mysql_proxy:proxy_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"mysql_filter_tests\",\n    srcs = [\n        \"mysql_filter_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.mysql_proxy\",\n    deps = [\n        \":mysql_test_utils_lib\",\n        \"//source/extensions/filters/network/mysql_proxy:config\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"mysql_integration_test\",\n    srcs = [\n        \"mysql_integration_test.cc\",\n    ],\n    data = [\n        \"mysql_test_config.yaml\",\n    ],\n    extension_name = \"envoy.filters.network.mysql_proxy\",\n    deps = [\n        \":mysql_test_utils_lib\",\n        \"//source/common/tcp_proxy\",\n        \"//source/extensions/filters/network/mysql_proxy:config\",\n        \"//source/extensions/filters/network/mysql_proxy:proxy_lib\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/integration:integration_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"mysql_command_tests\",\n    srcs = [\n        \"mysql_command_test.cc\",\n    ],\n    data = [\"mysql_test_config.yaml\"],\n    extension_name = \"envoy.filters.network.mysql_proxy\",\n    external_deps = [\"sqlparser\"],\n    deps = [\n        \":mysql_test_utils_lib\",\n        \"//source/common/tcp_proxy\",\n        \"//source/extensions/filters/network/mysql_proxy:config\",\n        \"//source/extensions/filters/network/mysql_proxy:proxy_lib\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/integration:integration_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/mysql_codec_test.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_command.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mysql_test_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nconstexpr int MYSQL_UT_RESP_OK = 0;\nconstexpr int MYSQL_UT_LAST_ID = 0;\nconstexpr int MYSQL_UT_SERVER_OK = 0;\nconstexpr int MYSQL_UT_SERVER_WARNINGS = 0x0001;\n\nclass MySQLCodecTest : public testing::Test {};\n\nTEST_F(MySQLCodecTest, MySQLServerChallengeV9EncDec) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_9);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string salt(MySQLTestUtils::getSalt());\n  mysql_greet_encode.setSalt(salt);\n  std::string data = mysql_greet_encode.encode();\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getSalt(), mysql_greet_encode.getSalt());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), mysql_greet_encode.getThreadId());\n  EXPECT_EQ(mysql_greet_decode.getServerLanguage(), 0);\n  EXPECT_EQ(mysql_greet_decode.getServerStatus(), 0);\n  EXPECT_EQ(mysql_greet_decode.getExtServerCap(), 0);\n  EXPECT_EQ(mysql_greet_decode.getServerCap(), 0);\n}\n\n/*\n * Test the MYSQL Greeting message V10 parser:\n * - message is encoded using the ServerGreeting class\n * - message is decoded using the ServerGreeting class\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeV10EncDec) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_10);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string salt(MySQLTestUtils::getSalt());\n  mysql_greet_encode.setSalt(salt);\n  mysql_greet_encode.setServerCap(MYSQL_SERVER_CAPAB);\n  mysql_greet_encode.setServerLanguage(MYSQL_SERVER_LANGUAGE);\n  mysql_greet_encode.setServerStatus(MYSQL_SERVER_STATUS);\n  mysql_greet_encode.setExtServerCap(MYSQL_SERVER_EXT_CAPAB);\n  std::string data = mysql_greet_encode.encode();\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getSalt(), mysql_greet_encode.getSalt());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), mysql_greet_encode.getThreadId());\n  EXPECT_EQ(mysql_greet_decode.getServerLanguage(), mysql_greet_encode.getServerLanguage());\n  EXPECT_EQ(mysql_greet_decode.getServerStatus(), mysql_greet_encode.getServerStatus());\n  EXPECT_EQ(mysql_greet_decode.getExtServerCap(), mysql_greet_encode.getExtServerCap());\n  EXPECT_EQ(mysql_greet_decode.getServerCap(), mysql_greet_encode.getServerCap());\n}\n\n/*\n * Negative Testing: Server Greetings Incomplete\n * - incomplete protocol\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeIncompleteProtocol) {\n  ServerGreeting mysql_greet_encode{};\n  std::string data = \"\";\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), 0);\n}\n\n/*\n * Negative Testing: Server Greetings Incomplete\n * - incomplete version\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeIncompleteVersion) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_9);\n  std::string data = mysql_greet_encode.encode();\n  int incomplete_size = sizeof(MYSQL_PROTOCOL_9);\n  data = data.substr(0, incomplete_size);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), \"\");\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n}\n\n/*\n * Negative Testing: Server Greetings Incomplete\n * - incomplete thread_id\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeIncompleteThreadId) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_9);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  std::string data = mysql_greet_encode.encode();\n  int incomplete_size = sizeof(MYSQL_PROTOCOL_9) + ver.length() + 1;\n  data = data.substr(0, incomplete_size);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), 0);\n}\n\n/*\n * Negative Testing: Server Greetings Incomplete\n * - incomplete salt\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeIncompleteSalt) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_9);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string data = mysql_greet_encode.encode();\n  int incomplete_size = sizeof(MYSQL_PROTOCOL_9) + ver.length() + 1 + sizeof(MYSQL_THREAD_ID);\n  data = data.substr(0, incomplete_size);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getSalt(), \"\");\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), mysql_greet_encode.getThreadId());\n}\n\n/*\n * Negative Testing: Server Greetings Incomplete\n * - incomplete Server Capabilities\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeIncompleteServerCap) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_10);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string salt(MySQLTestUtils::getSalt());\n  mysql_greet_encode.setSalt(salt);\n  std::string data = mysql_greet_encode.encode();\n  int incomplete_size =\n      sizeof(MYSQL_PROTOCOL_9) + ver.length() + 1 + sizeof(MYSQL_THREAD_ID) + salt.length() + 1;\n  data = data.substr(0, incomplete_size);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getSalt(), mysql_greet_encode.getSalt());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), mysql_greet_encode.getThreadId());\n  EXPECT_EQ(mysql_greet_decode.getExtServerCap(), 0);\n}\n\n/*\n * Negative Testing: Server Greetings Incomplete\n * - incomplete Server Status\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeIncompleteServerStatus) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_10);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string salt(MySQLTestUtils::getSalt());\n  mysql_greet_encode.setSalt(salt);\n  mysql_greet_encode.setServerCap(MYSQL_SERVER_CAPAB);\n  mysql_greet_encode.setServerLanguage(MYSQL_SERVER_LANGUAGE);\n  std::string data = mysql_greet_encode.encode();\n  int incomplete_size = sizeof(MYSQL_PROTOCOL_9) + ver.length() + 1 + sizeof(MYSQL_THREAD_ID) +\n                        salt.length() + 1 + sizeof(MYSQL_SERVER_CAPAB) +\n                        sizeof(MYSQL_SERVER_LANGUAGE);\n  data = data.substr(0, incomplete_size);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getSalt(), mysql_greet_encode.getSalt());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), mysql_greet_encode.getThreadId());\n  EXPECT_EQ(mysql_greet_decode.getServerLanguage(), mysql_greet_encode.getServerLanguage());\n  EXPECT_EQ(mysql_greet_decode.getServerStatus(), 0);\n  EXPECT_EQ(mysql_greet_decode.getServerCap(), mysql_greet_encode.getServerCap());\n}\n\n/*\n * Negative Testing: Server Greetings Incomplete\n * - incomplete extended Server Capabilities\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeIncompleteExtServerCap) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_10);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string salt(MySQLTestUtils::getSalt());\n  mysql_greet_encode.setSalt(salt);\n  mysql_greet_encode.setServerCap(MYSQL_SERVER_CAPAB);\n  mysql_greet_encode.setServerLanguage(MYSQL_SERVER_LANGUAGE);\n  mysql_greet_encode.setServerStatus(MYSQL_SERVER_STATUS);\n  std::string data = mysql_greet_encode.encode();\n  int incomplete_size = sizeof(MYSQL_PROTOCOL_9) + ver.length() + 1 + sizeof(MYSQL_THREAD_ID) +\n                        salt.length() + 1 + sizeof(MYSQL_SERVER_CAPAB) +\n                        sizeof(MYSQL_SERVER_LANGUAGE) + sizeof(MYSQL_SERVER_STATUS);\n  data = data.substr(0, incomplete_size);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getSalt(), mysql_greet_encode.getSalt());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), mysql_greet_encode.getThreadId());\n  EXPECT_EQ(mysql_greet_decode.getServerLanguage(), mysql_greet_encode.getServerLanguage());\n  EXPECT_EQ(mysql_greet_decode.getServerStatus(), mysql_greet_encode.getServerStatus());\n  EXPECT_EQ(mysql_greet_decode.getExtServerCap(), 0);\n  EXPECT_EQ(mysql_greet_decode.getServerCap(), mysql_greet_encode.getServerCap());\n}\n\n/*\n * Testing: Server Greetings Protocol 10 Server Capabilities only\n */\nTEST_F(MySQLCodecTest, MySQLServerChallengeP10ServerCapOnly) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(MYSQL_PROTOCOL_10);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string salt(MySQLTestUtils::getSalt());\n  mysql_greet_encode.setSalt(salt);\n  mysql_greet_encode.setServerCap(MYSQL_SERVER_CAPAB);\n  std::string data = mysql_greet_encode.encode();\n  int incomplete_size = sizeof(MYSQL_PROTOCOL_9) + ver.length() + 1 + sizeof(MYSQL_THREAD_ID) +\n                        salt.length() + 1 + sizeof(MYSQL_SERVER_CAPAB);\n  data = data.substr(0, incomplete_size);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ServerGreeting mysql_greet_decode{};\n  mysql_greet_decode.decode(*decode_data, GREETING_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_greet_decode.getSalt(), mysql_greet_encode.getSalt());\n  EXPECT_EQ(mysql_greet_decode.getVersion(), mysql_greet_encode.getVersion());\n  EXPECT_EQ(mysql_greet_decode.getProtocol(), mysql_greet_encode.getProtocol());\n  EXPECT_EQ(mysql_greet_decode.getThreadId(), mysql_greet_encode.getThreadId());\n  EXPECT_EQ(mysql_greet_decode.getExtServerCap(), mysql_greet_encode.getExtServerCap());\n}\n\n/*\n * Test the MYSQL Client Login 41 message parser:\n * - message is encoded using the ClientLogin class\n *   - CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA set\n * - message is decoded using the ClientLogin class\n */\nTEST_F(MySQLCodecTest, MySQLClLoginV41PluginAuthEncDec) {\n  ClientLogin mysql_clogin_encode{};\n  uint16_t client_capab = 0;\n  client_capab |= (MYSQL_CLIENT_CONNECT_WITH_DB | MYSQL_CLIENT_CAPAB_41VS320);\n  mysql_clogin_encode.setClientCap(client_capab);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string db = \"mysql_db\";\n  mysql_clogin_encode.setDb(db);\n  std::string data = mysql_clogin_encode.encode();\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.isResponse41(), true);\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), mysql_clogin_encode.getAuthResp());\n  EXPECT_EQ(mysql_clogin_decode.getDb(), mysql_clogin_encode.getDb());\n}\n\n/*\n * Test the MYSQL Client Login 41 message parser:\n * - message is encoded using the ClientLogin class\n *   - CLIENT_SECURE_CONNECTION set\n * - message is decoded using the ClientLogin class\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin41SecureConnEncDec) {\n  ClientLogin mysql_clogin_encode{};\n  uint16_t client_capab = 0;\n  client_capab |= (MYSQL_CLIENT_CONNECT_WITH_DB | MYSQL_CLIENT_CAPAB_41VS320);\n  mysql_clogin_encode.setClientCap(client_capab);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_SECURE_CONNECTION);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string db = \"mysql_db\";\n  mysql_clogin_encode.setDb(db);\n  std::string data = mysql_clogin_encode.encode();\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.isResponse41(), true);\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), mysql_clogin_encode.getAuthResp());\n  EXPECT_EQ(mysql_clogin_decode.getDb(), mysql_clogin_encode.getDb());\n}\n\n/*\n * Test the MYSQL Client Login 41 message parser:\n * - message is encoded using the ClientLogin class\n * - message is decoded using the ClientLogin class\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin41EncDec) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(MYSQL_CLIENT_CAPAB_41VS320);\n  mysql_clogin_encode.setExtendedClientCap(0);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.isResponse41(), true);\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), mysql_clogin_encode.getAuthResp());\n}\n\n/*\n * Test the MYSQL Client Login 320 message parser:\n * - message is encoded using the ClientLogin class\n * - message is decoded using the ClientLogin class\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320EncDec) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n\n  std::string data = mysql_clogin_encode.encode();\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.isResponse320(), true);\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), mysql_clogin_encode.getAuthResp());\n}\n\nTEST_F(MySQLCodecTest, MySQLParseLengthEncodedInteger) {\n  {\n    // encode 2 byte value\n    Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n    uint64_t input_val = 5;\n    uint64_t output_val = 0;\n    BufferHelper::addUint8(*buffer, LENENCODINT_2BYTES);\n    BufferHelper::addUint16(*buffer, input_val);\n    EXPECT_EQ(BufferHelper::readLengthEncodedInteger(*buffer, output_val), MYSQL_SUCCESS);\n    EXPECT_EQ(input_val, output_val);\n  }\n\n  {\n    // encode 3 byte value\n    Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n    uint64_t input_val = 5;\n    uint64_t output_val = 0;\n    BufferHelper::addUint8(*buffer, LENENCODINT_3BYTES);\n    BufferHelper::addUint16(*buffer, input_val);\n    BufferHelper::addUint8(*buffer, 0);\n    EXPECT_EQ(BufferHelper::readLengthEncodedInteger(*buffer, output_val), MYSQL_SUCCESS);\n    EXPECT_EQ(input_val, output_val);\n  }\n\n  {\n    // encode 8 byte value\n    Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n    uint64_t input_val = 5;\n    uint64_t output_val = 0;\n    BufferHelper::addUint8(*buffer, LENENCODINT_8BYTES);\n    BufferHelper::addUint32(*buffer, input_val);\n    BufferHelper::addUint32(*buffer, 0);\n    EXPECT_EQ(BufferHelper::readLengthEncodedInteger(*buffer, output_val), MYSQL_SUCCESS);\n    EXPECT_EQ(input_val, output_val);\n  }\n\n  {\n    // encode invalid length header\n    Buffer::InstancePtr buffer(new Buffer::OwnedImpl());\n    uint64_t input_val = 5;\n    uint64_t output_val = 0;\n    BufferHelper::addUint8(*buffer, 0xff);\n    BufferHelper::addUint32(*buffer, input_val);\n    EXPECT_EQ(BufferHelper::readLengthEncodedInteger(*buffer, output_val), MYSQL_FAILURE);\n  }\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at Client Capability\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteClientCap) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  std::string data = mysql_clogin_encode.encode();\n  int client_cap_len = sizeof(uint8_t);\n  data = data.substr(0, client_cap_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), 0);\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at Extended Client Capability\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteExtClientCap) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t);\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), 0);\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at Max Packet\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteMaxPacket) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), 0);\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at Charset\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteCharset) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len =\n      sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) + sizeof(MYSQL_MAX_PACKET);\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), 0);\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at Unset bytes\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteUnsetBytes) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET);\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at username\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteUser) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET) + UNSET_BYTES;\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), \"\");\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at authlen\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteAuthLen) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET) + UNSET_BYTES +\n                       user.length() + 1;\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), \"\");\n}\n\n/*\n * Negative Test the MYSQL Client Login 320 message parser:\n * Incomplete header at \"authpasswd\"\n */\nTEST_F(MySQLCodecTest, MySQLClientLogin320IncompleteAuthPasswd) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET) + UNSET_BYTES +\n                       user.length() + 3;\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), \"\");\n}\n\n/*\n * Negative Test the MYSQL Client SSL login message parser:\n * Incomplete header at authlen\n */\nTEST_F(MySQLCodecTest, MySQLClientSSLLoginIncompleteAuthLen) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_SECURE_CONNECTION);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET) + UNSET_BYTES +\n                       user.length() + 1;\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), \"\");\n}\n\n/*\n * Negative Test the MYSQL Client SSL login message parser:\n * Incomplete header at username\n */\nTEST_F(MySQLCodecTest, MySQLClientSSLLoginIncompleteAuthPasswd) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_SECURE_CONNECTION);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET) + UNSET_BYTES +\n                       user.length() + 3;\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), \"\");\n}\n\n/*\n * Negative Test the MYSQL Client login message parser:\n * Incomplete auth len\n */\nTEST_F(MySQLCodecTest, MySQLClientLoginIncompleteAuthPasswd) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(0);\n  mysql_clogin_encode.setExtendedClientCap(0);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET) + UNSET_BYTES +\n                       user.length() + 3;\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), \"\");\n}\n\n/*\n * Negative Test the MYSQL Client login message parser:\n * Incomplete auth len\n */\nTEST_F(MySQLCodecTest, MySQLClientLoginIncompleteConnectDb) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(MYSQL_CLIENT_CONNECT_WITH_DB);\n  mysql_clogin_encode.setExtendedClientCap(0);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n  int incomplete_len = sizeof(uint16_t) + sizeof(MYSQL_EXT_CL_PLG_AUTH_CL_DATA) +\n                       sizeof(MYSQL_MAX_PACKET) + sizeof(MYSQL_CHARSET) + UNSET_BYTES +\n                       user.length() + 3 + user.length() + 2;\n  data = data.substr(0, incomplete_len);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n  EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset());\n  EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername());\n  EXPECT_EQ(mysql_clogin_decode.getAuthResp(), mysql_clogin_encode.getAuthResp());\n}\n\n/*\n * Test the MYSQL Client Login SSL message parser:\n * - message is encoded using the ClientLogin class\n * - message is decoded using the ClientLogin class\n */\nTEST_F(MySQLCodecTest, MySQLClientLoginSSLEncDec) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(MYSQL_CLIENT_CAPAB_SSL | MYSQL_CLIENT_CAPAB_41VS320);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CL_PLG_AUTH_CL_DATA);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  std::string user(\"user1\");\n  mysql_clogin_encode.setUsername(user);\n  std::string passwd = MySQLTestUtils::getAuthResp();\n  mysql_clogin_encode.setAuthResp(passwd);\n  std::string data = mysql_clogin_encode.encode();\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLogin mysql_clogin_decode{};\n  mysql_clogin_decode.decode(*decode_data, CHALLENGE_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_clogin_decode.isSSLRequest(), true);\n  EXPECT_EQ(mysql_clogin_decode.getClientCap(), mysql_clogin_encode.getClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getExtendedClientCap(), mysql_clogin_encode.getExtendedClientCap());\n  EXPECT_EQ(mysql_clogin_decode.getMaxPacket(), mysql_clogin_encode.getMaxPacket());\n}\n\n/*\n * Test the MYSQL Server Login OK message parser:\n * - message is encoded using the ClientLoginResponse class\n * - message is decoded using the ClientLoginResponse class\n */\nTEST_F(MySQLCodecTest, MySQLLoginOkEncDec) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(MYSQL_UT_RESP_OK);\n  mysql_loginok_encode.setAffectedRows(1);\n  mysql_loginok_encode.setLastInsertId(MYSQL_UT_LAST_ID);\n  mysql_loginok_encode.setServerStatus(MYSQL_UT_SERVER_OK);\n  mysql_loginok_encode.setWarnings(MYSQL_UT_SERVER_WARNINGS);\n  std::string data = mysql_loginok_encode.encode();\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLoginResponse mysql_loginok_decode{};\n  mysql_loginok_decode.decode(*decode_data, CHALLENGE_RESP_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_loginok_decode.getRespCode(), mysql_loginok_encode.getRespCode());\n  EXPECT_EQ(mysql_loginok_decode.getAffectedRows(), mysql_loginok_encode.getAffectedRows());\n  EXPECT_EQ(mysql_loginok_decode.getLastInsertId(), mysql_loginok_encode.getLastInsertId());\n  EXPECT_EQ(mysql_loginok_decode.getServerStatus(), mysql_loginok_encode.getServerStatus());\n  EXPECT_EQ(mysql_loginok_decode.getWarnings(), mysql_loginok_encode.getWarnings());\n}\n\n/*\n * Test the MYSQL Server Login Old Auth Switch message parser:\n * - message is encoded using the ClientLoginResponse class\n * - message is decoded using the ClientLoginResponse class\n */\nTEST_F(MySQLCodecTest, MySQLLoginOldAuthSwitch) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(MYSQL_RESP_AUTH_SWITCH);\n  std::string data = mysql_loginok_encode.encode();\n  data = data.substr(0, 1);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLoginResponse mysql_loginok_decode{};\n  mysql_loginok_decode.decode(*decode_data, CHALLENGE_RESP_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_loginok_decode.getRespCode(), mysql_loginok_encode.getRespCode());\n}\n\n/*\n * Negative Test the MYSQL Server Login OK message parser:\n * - incomplete Client Login OK response\n */\nTEST_F(MySQLCodecTest, MySQLLoginOkIncompleteRespCode) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(MYSQL_UT_RESP_OK);\n  std::string data;\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLoginResponse mysql_loginok_decode{};\n  mysql_loginok_decode.decode(*decode_data, CHALLENGE_RESP_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_loginok_decode.getRespCode(), 0);\n}\n\n/*\n * Negative Test the MYSQL Server Login OK message parser:\n * - incomplete Client Login OK affected rows\n */\nTEST_F(MySQLCodecTest, MySQLLoginOkIncompleteAffectedRows) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(MYSQL_UT_RESP_OK);\n  mysql_loginok_encode.setAffectedRows(1);\n  std::string data = mysql_loginok_encode.encode();\n  data = data.substr(0, 1);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLoginResponse mysql_loginok_decode{};\n  mysql_loginok_decode.decode(*decode_data, CHALLENGE_RESP_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_loginok_decode.getRespCode(), mysql_loginok_encode.getRespCode());\n}\n\n/*\n * Negative Test the MYSQL Server Login OK message parser:\n * - incomplete Client Login OK last insert id\n */\nTEST_F(MySQLCodecTest, MySQLLoginOkIncompleteLastInsertId) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(MYSQL_UT_RESP_OK);\n  mysql_loginok_encode.setAffectedRows(1);\n  mysql_loginok_encode.setLastInsertId(MYSQL_UT_LAST_ID);\n  std::string data = mysql_loginok_encode.encode();\n  data = data.substr(0, 2);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLoginResponse mysql_loginok_decode{};\n  mysql_loginok_decode.decode(*decode_data, CHALLENGE_RESP_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_loginok_decode.getRespCode(), mysql_loginok_encode.getRespCode());\n  EXPECT_EQ(mysql_loginok_decode.getAffectedRows(), mysql_loginok_encode.getAffectedRows());\n}\n\n/*\n * Negative Test the MYSQL Server Login OK message parser:\n * - incomplete Client Login OK server status\n */\nTEST_F(MySQLCodecTest, MySQLLoginOkIncompleteServerStatus) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(MYSQL_UT_RESP_OK);\n  mysql_loginok_encode.setAffectedRows(1);\n  mysql_loginok_encode.setLastInsertId(MYSQL_UT_LAST_ID);\n  mysql_loginok_encode.setServerStatus(MYSQL_UT_SERVER_OK);\n  std::string data = mysql_loginok_encode.encode();\n  data = data.substr(0, 3);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLoginResponse mysql_loginok_decode{};\n  mysql_loginok_decode.decode(*decode_data, CHALLENGE_RESP_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_loginok_decode.getRespCode(), mysql_loginok_encode.getRespCode());\n  EXPECT_EQ(mysql_loginok_decode.getAffectedRows(), mysql_loginok_encode.getAffectedRows());\n  EXPECT_EQ(mysql_loginok_decode.getLastInsertId(), mysql_loginok_encode.getLastInsertId());\n  EXPECT_EQ(mysql_loginok_decode.getServerStatus(), 0);\n}\n\n/*\n * Negative Test the MYSQL Server Login OK message parser:\n * - incomplete Client Login OK warnings\n */\nTEST_F(MySQLCodecTest, MySQLLoginOkIncompleteWarnings) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(MYSQL_UT_RESP_OK);\n  mysql_loginok_encode.setAffectedRows(1);\n  mysql_loginok_encode.setLastInsertId(MYSQL_UT_LAST_ID);\n  mysql_loginok_encode.setServerStatus(MYSQL_UT_SERVER_OK);\n  mysql_loginok_encode.setWarnings(MYSQL_UT_SERVER_WARNINGS);\n  std::string data = mysql_loginok_encode.encode();\n  data = data.substr(0, 5);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  ClientLoginResponse mysql_loginok_decode{};\n  mysql_loginok_decode.decode(*decode_data, CHALLENGE_RESP_SEQ_NUM, decode_data->length());\n  EXPECT_EQ(mysql_loginok_decode.getRespCode(), mysql_loginok_encode.getRespCode());\n  EXPECT_EQ(mysql_loginok_decode.getAffectedRows(), mysql_loginok_encode.getAffectedRows());\n  EXPECT_EQ(mysql_loginok_decode.getLastInsertId(), mysql_loginok_encode.getLastInsertId());\n  EXPECT_EQ(mysql_loginok_decode.getServerStatus(), mysql_loginok_encode.getServerStatus());\n  EXPECT_EQ(mysql_loginok_decode.getWarnings(), 0);\n}\n\nTEST_F(MySQLCodecTest, MySQLCommandError) {\n  Command mysql_cmd_encode{};\n  std::string data = mysql_cmd_encode.encode();\n  data = \"\";\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(data));\n  Command mysql_cmd_decode{};\n  decode_data->drain(4);\n  mysql_cmd_decode.decode(*decode_data, 0, 0);\n  EXPECT_EQ(mysql_cmd_decode.getCmd(), Command::Cmd::Null);\n}\n\nTEST_F(MySQLCodecTest, MySQLCommandInitDb) {\n  Command mysql_cmd_encode{};\n  mysql_cmd_encode.setCmd(Command::Cmd::InitDb);\n  std::string db = \"mysqlDB\";\n  mysql_cmd_encode.setData(db);\n  std::string data = mysql_cmd_encode.encode();\n\n  std::string mysql_msg = BufferHelper::encodeHdr(data, 0);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(mysql_msg));\n  Command mysql_cmd_decode{};\n  decode_data->drain(4);\n  mysql_cmd_decode.decode(*decode_data, 0, db.length() + 1);\n  EXPECT_EQ(mysql_cmd_decode.getDb(), db);\n}\n\nTEST_F(MySQLCodecTest, MySQLCommandCreateDb) {\n  Command mysql_cmd_encode{};\n  mysql_cmd_encode.setCmd(Command::Cmd::CreateDb);\n  std::string db = \"mysqlDB\";\n  mysql_cmd_encode.setData(db);\n  std::string data = mysql_cmd_encode.encode();\n\n  std::string mysql_msg = BufferHelper::encodeHdr(data, 0);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(mysql_msg));\n  Command mysql_cmd_decode{};\n  decode_data->drain(4);\n  mysql_cmd_decode.decode(*decode_data, 0, db.length() + 1);\n  EXPECT_EQ(mysql_cmd_decode.getDb(), db);\n}\n\nTEST_F(MySQLCodecTest, MySQLCommandDropDb) {\n  Command mysql_cmd_encode{};\n  mysql_cmd_encode.setCmd(Command::Cmd::DropDb);\n  std::string db = \"mysqlDB\";\n  mysql_cmd_encode.setData(db);\n  std::string data = mysql_cmd_encode.encode();\n\n  std::string mysql_msg = BufferHelper::encodeHdr(data, 0);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(mysql_msg));\n  Command mysql_cmd_decode{};\n  decode_data->drain(4);\n  mysql_cmd_decode.decode(*decode_data, 0, db.length() + 1);\n  EXPECT_EQ(mysql_cmd_decode.getDb(), db);\n}\n\nTEST_F(MySQLCodecTest, MySQLCommandOther) {\n  Command mysql_cmd_encode{};\n  mysql_cmd_encode.setCmd(Command::Cmd::FieldList);\n  std::string data = mysql_cmd_encode.encode();\n\n  std::string mysql_msg = BufferHelper::encodeHdr(data, 0);\n\n  Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(mysql_msg));\n  Command mysql_cmd_decode{};\n  decode_data->drain(4);\n  mysql_cmd_decode.decode(*decode_data, 0, 0);\n  EXPECT_EQ(mysql_cmd_decode.getCmd(), Command::Cmd::FieldList);\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/mysql_command_test.cc",
    "content": "#include <cstdint>\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_command.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"include/sqlparser/SQLParser.h\"\n#include \"mysql_test_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nclass MySQLCommandTest : public testing::Test, public MySQLTestUtils {\npublic:\n  int encodeQuery(std::string query, hsql::SQLParserResult& result) {\n    Command mysql_cmd_encode{};\n    Command mysql_cmd_decode{};\n    uint8_t seq = 0u;\n    uint32_t len = 0u;\n    mysql_cmd_encode.setCmd(Command::Cmd::Query);\n    mysql_cmd_encode.setData(query);\n    std::string data = mysql_cmd_encode.encode();\n    std::string mysql_msg = BufferHelper::encodeHdr(data, 0);\n\n    Buffer::InstancePtr decode_data(new Buffer::OwnedImpl(mysql_msg));\n    if (BufferHelper::peekHdr(*decode_data, len, seq) != MYSQL_SUCCESS) {\n      return MYSQL_FAILURE;\n    }\n    BufferHelper::consumeHdr(*decode_data);\n    if (mysql_cmd_decode.decode(*decode_data, seq, len) != MYSQL_SUCCESS) {\n      return MYSQL_FAILURE;\n    }\n    hsql::SQLParser::parse(mysql_cmd_decode.getData(), &result);\n    return MYSQL_SUCCESS;\n  }\n\n  enum TestResource {\n    TABLE,\n    DB,\n    SCHEMA,\n    EVENT,\n    INDEX,\n  };\n\n  const std::string SPACE = \" \";\n  const std::string FROM = \"FROM \";\n  const std::string INTO = \"INTO \";\n  const std::string IF_EXISTS = \"IF EXISTS \";\n  const std::string IF_NOT_EXISTS = \"IF NOT EXISTS \";\n\n  std::string buildShow(std::string resource) {\n    std::string command(\"SHOW \");\n    command.append(resource);\n    return command;\n  }\n\n  std::string buildUse(std::string db) {\n    std::string command(\"USE \");\n    command.append(db);\n    return command;\n  }\n\n  // CREATE table\n  std::string buildCreate(enum TestResource res, std::string option, bool if_not_exists,\n                          std::string res_name, std::string value) {\n    std::string command(\"CREATE \");\n    if (!option.empty()) {\n      command.append(option);\n      command.append(SPACE);\n    }\n    switch (res) {\n    case TABLE:\n      command.append(\"TABLE \");\n      break;\n    case DB:\n      command.append(\"DATABASE \");\n      break;\n    case EVENT:\n      command.append(\"EVENT \");\n      break;\n    case INDEX:\n      command.append(\"INDEX \");\n      break;\n    default:\n      return command;\n    }\n    if (if_not_exists) {\n      command.append(IF_NOT_EXISTS);\n    }\n    command.append(res_name);\n    command.append(SPACE);\n    command.append(value);\n    return command;\n  }\n\n  // ALTER a resource\n  std::string buildAlter(enum TestResource res, std::string res_name, std::string values) {\n    std::string command(\"ALTER \");\n    switch (res) {\n    case TABLE:\n      command.append(\"TABLE \");\n      break;\n    case DB:\n      command.append(\"DATABASE \");\n      break;\n    case SCHEMA:\n      command.append(\"SCHEMA \");\n      break;\n    default:\n      return command;\n    }\n    command.append(res_name);\n    command.append(SPACE);\n    command.append(values);\n    return command;\n  }\n\n  // UPDATE\n  std::string buildUpdate(std::string table, std::string option, std::string set_value) {\n    std::string command(\"UPDATE \");\n    command.append(option);\n    command.append(SPACE);\n    command.append(table);\n    command.append(SPACE);\n    command.append(set_value);\n    return command;\n  }\n\n  // DROP Resource\n  std::string buildDrop(enum TestResource res, bool if_exists, std::string res_name) {\n    std::string command(\"DROP \");\n    switch (res) {\n    case TABLE:\n      command.append(\"TABLE \");\n      break;\n    case DB:\n      command.append(\"DATABASE \");\n      break;\n    case EVENT:\n      command.append(\"SCHEMA \");\n      break;\n    default:\n      return command;\n    }\n    if (if_exists) {\n      command.append(IF_EXISTS);\n    }\n    command.append(res_name);\n    return command;\n  }\n\n  //\"INSERT INTO <table> ...\n  std::string buildInsert(std::string option, bool into, std::string table, std::string values) {\n    std::string command(\"INSERT \");\n    if (!option.empty()) {\n      command.append(option);\n      command.append(SPACE);\n    }\n    if (into) {\n      command.append(INTO);\n    }\n    command.append(table);\n    command.append(SPACE);\n    command.append(values);\n    return command;\n  }\n\n  // DELETE FROM <table> ...\n  std::string buildDelete(std::string option, std::string table, std::string values) {\n    std::string command(\"DELETE \");\n    command.append(option);\n    command.append(SPACE);\n    command.append(FROM);\n    command.append(table);\n    command.append(SPACE);\n    command.append(values);\n    return command;\n  }\n\n  // SELECT FROM <table> ...\n  std::string buildSelect(std::string select_fields, std::string table, std::string where_clause) {\n    std::string command(\"SELECT \");\n    command.append(select_fields);\n    command.append(SPACE);\n    command.append(FROM);\n    command.append(table);\n    command.append(SPACE);\n    command.append(where_clause);\n    return command;\n  }\n\n  void expectStatementTypeAndTableAccessMap(const hsql::SQLParserResult& result,\n                                            hsql::StatementType statement_type,\n                                            const hsql::TableAccessMap& expected_table_access_map) {\n    EXPECT_TRUE(result.isValid());\n    EXPECT_EQ(1UL, result.size());\n    EXPECT_EQ(statement_type, result.getStatement(0)->type());\n    hsql::TableAccessMap table_access_map;\n    if (expected_table_access_map.empty() && (statement_type == hsql::StatementType::kStmtShow)) {\n      return;\n    }\n    result.getStatement(0)->tablesAccessed(table_access_map);\n    EXPECT_EQ(table_access_map, expected_table_access_map);\n  }\n};\n\n/*\n * Tests query: \"show databases\"\n */\nTEST_F(MySQLCommandTest, MySQLTest1) {\n  std::string command = buildShow(\"databases\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtShow, {});\n}\n\n/*\n * Tests query: \"show tables\"\n */\nTEST_F(MySQLCommandTest, MySQLTest2) {\n  std::string command = buildShow(\"tables\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtShow, {});\n}\n\n/*\n * \"CREATE table IF NOT EXISTS <table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest3) {\n  std::string table = \"table1\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  std::string command = buildCreate(TestResource::TABLE, \"\", true, table, value);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{table, {\"create\"}}});\n}\n\n/*\n * Tests query with optional cmd and quotes:\n * \"CREATE table IF NOT EXISTS <table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest4) {\n  std::string table = \"\\\"table1\\\"\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  hsql::SQLParserResult result;\n  std::string command = buildCreate(TestResource::TABLE, \"\", true, table, value);\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{\"table1\", {\"create\"}}});\n}\n\n/*\n * Tests query with optional cmd and backticks:\n * \"CREATE table IF NOT EXISTS <table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest5) {\n  std::string table = \"`table1`\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  std::string command = buildCreate(TestResource::TABLE, \"\", true, table, value);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{\"table1\", {\"create\"}}});\n}\n\n/*\n * Tests query with optional cmd:\n * \"CREATE table IF NOT EXISTS <table_name_with_spaces>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest6) {\n  std::string table = \"\\\"table 1\\\"\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  hsql::SQLParserResult result;\n  std::string command = buildCreate(TestResource::TABLE, \"\", true, table, value);\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{\"table 1\", {\"create\"}}});\n}\n\n/*\n * Tests query with optional cmd:\n * \"CREATE table IF NOT EXISTS <table_name_with_2_spaces>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest7) {\n  std::string table = \"`table number 1`\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  std::string command = buildCreate(TestResource::TABLE, \"\", true, table, value);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{\"table number 1\", {\"create\"}}});\n}\n\n/*\n * Test query with optional cmd:\n * \"CREATE table IF NOT EXISTS <table_name_with_multi_spaces>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest8) {\n  std::string table = \"`my sql table number 1`\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  std::string command = buildCreate(TestResource::TABLE, \"\", true, table, value);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{\"my sql table number 1\", {\"create\"}}});\n}\n\n/*\n * Test query with optional cmd and backticks name delimiters\n * \"CREATE table IF NOT EXISTS <table_name_with_multi_spaces_backticks>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest9) {\n  std::string table = \"`my sql table number 1`\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  std::string command = buildCreate(TestResource::TABLE, \"\", true, table, value);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{\"my sql table number 1\", {\"create\"}}});\n}\n\n/*\n * Test query: \"CREATE table <table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest10) {\n  std::string table = \"table1\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  std::string command = buildCreate(TestResource::TABLE, \"\", false, table, value);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{table, {\"create\"}}});\n}\n\n/*\n * Negative Test query: \"CREATE <table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest11) {\n  std::string table = \"table1\";\n  std::string command = \"CREATE \";\n  command.append(table);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  EXPECT_EQ(false, result.isValid());\n}\n\n/*\n * Test query with optional cmd:\n * \"CREATE TEMPORARY table <table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest12) {\n  std::string table = \"table1\";\n  std::string value = \"(Usr VARCHAR(40),Count INT);\";\n  std::string command = buildCreate(TestResource::TABLE, \"TEMPORARY\", false, table, value);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate,\n                                       {{\"table1\", {\"create\"}}});\n}\n\n/*\n * Test query: \"CREATE DATABASE <DB>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest13) {\n  std::string db = \"mysqldb\";\n  std::string command = buildCreate(TestResource::DB, \"\", false, db, \"\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate, {});\n}\n\n/*\n * Test query with optional cmd:\n * \"CREATE DATABASE IF NOT EXISTS <DB>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest14) {\n  std::string db = \"mysqldb\";\n  std::string command = buildCreate(TestResource::DB, \"\", true, db, \"\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtCreate, {});\n}\n\n/*\n * Test query: \"CREATE EVENT <event>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest15) {\n  std::string event = \"event1\";\n  std::string command = buildCreate(TestResource::EVENT, \"\", false, event, \"\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  EXPECT_EQ(false, result.isValid());\n}\n\n/*\n * Test query: \"ALTER DATABASE <DB> CHARACTER SET charset_name\"\n */\nTEST_F(MySQLCommandTest, MySQLTest16) {\n  std::string db = \"mysqldb\";\n  std::string command = buildAlter(TestResource::DB, db, \"CHARACTER SET charset_name\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, {});\n}\n\n/*\n * Test query: \"ALTER DATABASE <DB> default CHARACTER SET charset_name\"\n */\nTEST_F(MySQLCommandTest, MySQLTest17) {\n  std::string db = \"mysqldb\";\n  std::string command = buildAlter(TestResource::DB, db, \"default CHARACTER SET charset_name\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, {});\n}\n\n/*\n * Test query: \"ALTER DATABASE <DB> default CHARACTER SET = charset_name\"\n */\nTEST_F(MySQLCommandTest, MySQLTest18) {\n  std::string db = \"mysqldb\";\n  std::string command = buildAlter(TestResource::DB, db, \"default CHARACTER SET = charset_name\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, {});\n}\n\n/*\n * Test query: \"ALTER SCHEMA <DB> default CHARACTER SET = charset_name\"\n */\nTEST_F(MySQLCommandTest, MySQLTest19) {\n  std::string db = \"mysqldb\";\n  std::string command =\n      buildAlter(TestResource::SCHEMA, db, \"default CHARACTER SET = charset_name\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, {});\n}\n\n/*\n * Test query: \"ALTER TABLE <table> add column Id varchar (20)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest20) {\n  std::string table = \"table1\";\n  std::string command = buildAlter(TestResource::TABLE, table, \"add column Id varchar (20)\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter,\n                                       {{table, {\"alter\"}}});\n}\n\n/*\n * Test query: \"DROP DATABASE <DB>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest21) {\n  std::string db = \"mysqldb\";\n  std::string command = buildDrop(TestResource::DB, false, db);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtDrop, {});\n}\n\n/*\n * Test query with optional cmd:\n * \"DROP DATABASE IF EXISTS <DB>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest22) {\n  std::string db = \"mysqldb\";\n  std::string command = buildDrop(TestResource::DB, true, db);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtDrop, {});\n}\n\n/*\n * Test query with optional cmd:\n * \"DROP TABLE IF EXISTS <Table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest23) {\n  std::string table = \"table1\";\n  std::string command = buildDrop(TestResource::TABLE, true, table);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtDrop, {{table, {\"drop\"}}});\n}\n\n/*\n * Test query INSERT:\n * \"INSERT INTO <table> (Usr, Count) VALUES ('allsp2', 3)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest24) {\n  std::string table = \"table1\";\n  std::string command = buildInsert(\"\", true, table, \" (Usr, Count) VALUES ('allsp2', 3)\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtInsert,\n                                       {{table, {\"insert\"}}});\n}\n\n/*\n * Test query INSERT with optional parameters:\n * \"INSERT LOW_PRIORITY INTO <table> (Usr, Count) VALUES ('allsp2', 3)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest25) {\n  std::string table = \"table1\";\n  std::string command =\n      buildInsert(\"LOW_PRIORITY\", true, table, \" (Usr, Count) VALUES ('allsp2', 3)\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtInsert,\n                                       {{table, {\"insert\"}}});\n}\n\n/*\n * Test query INSERT with optional parameters:\n * \"INSERT IGNORE INTO <table> (Usr, Count) VALUES ('allsp2', 3)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest26) {\n  std::string table = \"table1\";\n  std::string command = buildInsert(\"IGNORE\", true, table, \" (Usr, Count) VALUES ('allsp2', 3)\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtInsert,\n                                       {{table, {\"insert\"}}});\n}\n\n/*\n * Test query DELETE:\n * \"DELETE FROM <table> (Usr, Count) VALUES ('allsp2', 3)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest27) {\n  std::string table = \"table1\";\n  std::string command = buildDelete(\"\", table, \"WHERE Count > 3\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtDelete,\n                                       {{table, {\"delete\"}}});\n}\n\n/*\n * Test query DELETE with optional parameters:\n * \"DELETE LOW_PRIORITY FROM <table> (Usr, Count) VALUES ('allsp2', 3)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest28) {\n  std::string table = \"table1\";\n  std::string command = buildDelete(\"LOW_PRIORITY\", table, \"WHERE Count > 3\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtDelete,\n                                       {{table, {\"delete\"}}});\n}\n\n/*\n * Test query DELETE with optional parameters:\n * \"DELETE QUICK FROM <table> (Usr, Count) VALUES ('allsp2', 3)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest29) {\n  std::string table = \"table1\";\n  std::string command = buildDelete(\"QUICK\", table, \"WHERE Count > 3\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtDelete,\n                                       {{table, {\"delete\"}}});\n}\n\n/*\n * Test query DELETE with optional parameters:\n * \"DELETE QUICK FROM <table> (Usr, Count) VALUES ('allsp2', 3)\"\n */\nTEST_F(MySQLCommandTest, MySQLTest30) {\n  std::string table = \"table1\";\n  std::string command = buildDelete(\"IGNORE\", table, \"WHERE Count > 3\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtDelete,\n                                       {{table, {\"delete\"}}});\n}\n\n/*\n * Test query SELECT:\n * \"SELECT * FROM <table> ProductDetails WHERE Count = 1\"\n */\nTEST_F(MySQLCommandTest, MySQLTest31) {\n  std::string table = \"table1\";\n  std::string command = buildSelect(\"*\", table, \"WHERE Count = 1\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtSelect,\n                                       {{table, {\"select\"}}});\n}\n\n/*\n * Test query SELECT:\n * \"SELECT FROM <table> ProductDetails WHERE Count = 1\"\n */\nTEST_F(MySQLCommandTest, MySQLTest32) {\n  std::string table = \"table1\";\n  std::string command = buildSelect(\"Product.category\", table, \"WHERE Count = 1\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtSelect,\n                                       {{table, {\"select\"}}, {\"Product\", {\"unknown\"}}});\n}\n\n/*\n * Test query SELECT:\n * \"SELECT DISTINCT Usr FROM <table>\"\n */\nTEST_F(MySQLCommandTest, MySQLTest33) {\n  std::string table = \"table1\";\n  std::string command = buildSelect(\"DISTINCT Usr\", table, \"\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtSelect,\n                                       {{table, {\"select\"}}});\n}\n\n/*\n * Test query SELECT:\n * \"SELECT Usr,Count FROM <table> ORDER BY Count DESC\"\n */\nTEST_F(MySQLCommandTest, MySQLTest34) {\n  std::string table = \"table1\";\n  std::string command = buildSelect(\"Usr,Count\", table, \"ORDER BY Count DESC\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtSelect,\n                                       {{table, {\"select\"}}});\n}\n\n/*\n * Test query SELECT:\n * \"SELECT Usr,Count FROM <table> ORDER BY Count DESC\"\n */\nTEST_F(MySQLCommandTest, MySQLTest35) {\n  std::string table = \"table1\";\n  std::string command = buildSelect(\"Usr,Count\", table, \"ORDER BY Count DESC\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtSelect,\n                                       {{table, {\"select\"}}});\n}\n\n/*\n * Negative Test query: SELECT\n */\nTEST_F(MySQLCommandTest, MySQLTest36) {\n  std::string command = buildSelect(\"\", \"\", \"\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  EXPECT_EQ(false, result.isValid());\n}\n\n/*\n * Test query: SELECT no FROM\n */\nTEST_F(MySQLCommandTest, MySQLTest37) {\n  std::string command = buildSelect(\"USr,Count\", \"\", \"\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  EXPECT_EQ(false, result.isValid());\n}\n\n/*\n * Test correlated queries: INSERT, SELECT\n */\nTEST_F(MySQLCommandTest, MySQLTest38) {\n  // SPELLCHECKER(off)\n  std::string table1 = \"table1\";\n  std::string table2 = \"table2\";\n  std::string ins_command = buildInsert(\"\", true, table1, \"\");\n  std::string sel_command = buildSelect(\"*\", table2, \"\" /*\"WHERE tbl_temp1.fld_order_id > 100\"*/);\n  ins_command.append(sel_command);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(ins_command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtInsert,\n                                       {{table1, {\"insert\"}}, {table2, {\"select\"}}});\n  // SPELLCHECKER(on)\n}\n\n/*\n * Test not correlated queries: INSERT, SELECT\n */\nTEST_F(MySQLCommandTest, MySQLTest39) {\n  std::string table1 = \"table1\";\n  std::string table2 = \"table2\";\n  std::string ins_command = buildInsert(\"\", true, table1, \"\");\n  std::string sel_command = buildSelect(\"tbl_temp1.fld_order_id\", table2, \"\");\n  ins_command.append(sel_command);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(ins_command, result));\n  expectStatementTypeAndTableAccessMap(\n      result, hsql::StatementType::kStmtInsert,\n      {{\"tbl_temp1\", {\"unknown\"}}, {\"table2\", {\"select\"}}, {\"table1\", {\"insert\"}}});\n}\n\n/*\n * Negative Test query: INSERT, Wrong SELECT\n */\nTEST_F(MySQLCommandTest, MySQLTest40) {\n  std::string table1 = \"table1\";\n  std::string table2 = \"table2\";\n  std::string ins_command = \"INSERT INTO \";\n  std::string ins_command2 = \" (fld_id) \";\n  std::string sel_command =\n      buildSelect(\"tbl_temp1.fld_order_id\", table1, \"WHERE tbl_temp1.fld_order_id > 100;\");\n  ins_command.append(table1);\n  ins_command.append(ins_command2);\n  ins_command.append(sel_command);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(ins_command, result));\n  EXPECT_EQ(true, result.isValid());\n  EXPECT_EQ(1UL, result.size());\n}\n\n/*\n * Test query: UPDATE\n */\nTEST_F(MySQLCommandTest, MySQLTest41) {\n  std::string table = \"table1\";\n  std::string command = buildUpdate(table, \"\", \"SET col1 = col1 + 1\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtUpdate,\n                                       {{table, {\"update\"}}});\n}\n\n/*\n * Test query: UPDATE\n */\nTEST_F(MySQLCommandTest, MySQLTest42) {\n  std::string table = \"table1\";\n  std::string command = buildUpdate(table, \"LOW_PRIORITY\", \"SET col1 = col1 + 1\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtUpdate,\n                                       {{table, {\"update\"}}});\n}\n\n/*\n * Test query: UPDATE\n */\nTEST_F(MySQLCommandTest, MySQLTest43) {\n  std::string table = \"table1\";\n  std::string command = buildUpdate(table, \"IGNORE\", \"SET col1 = col1 + 1\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtUpdate,\n                                       {{table, {\"update\"}}});\n}\n\n/*\n * Test query: UPDATE\n */\nTEST_F(MySQLCommandTest, MySQLTest44) {\n  std::string table = \"table1\";\n  std::string command = buildUpdate(table, \"LOW_PRIORITY IGNORE\", \"SET col1 = col1 + 1\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtUpdate,\n                                       {{table, {\"update\"}}});\n}\n\n/*\n * Test correlated queries: UPDATE, SELECT\n */\nTEST_F(MySQLCommandTest, MySQLTest45) {\n  std::string table1 = \"table1\";\n  std::string table2 = \"table2\";\n  std::string command = buildUpdate(table1, \"\", \"set column1=\");\n  std::string command2 = buildSelect(\"columnX\", table2, \")\");\n  command.append(\"(\");\n  command.append(command2);\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtUpdate,\n                                       {{table1, {\"update\"}}, {table2, {\"select\"}}});\n}\n\n/*\n * Test query: SELECT\n */\nTEST_F(MySQLCommandTest, MySQLTest46) {\n  std::string table = \"table1\";\n  std::string command = buildSelect(\"12 AS a, a \", table, \"GROUP BY a;\");\n  hsql::SQLParserResult result;\n  EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result));\n  expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtSelect,\n                                       {{table, {\"select\"}}});\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/mysql_filter_test.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_filter.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mysql_test_utils.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nconstexpr int SESSIONS = 5;\n\nclass MySQLFilterTest : public testing::Test, public MySQLTestUtils {\npublic:\n  MySQLFilterTest() { ENVOY_LOG_MISC(info, \"test\"); }\n\n  void initialize() {\n    config_ = std::make_shared<MySQLFilterConfig>(stat_prefix_, scope_);\n    filter_ = std::make_unique<MySQLFilter>(config_);\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n  }\n\n  MySQLFilterConfigSharedPtr config_;\n  std::unique_ptr<MySQLFilter> filter_;\n  Stats::IsolatedStoreImpl scope_;\n  std::string stat_prefix_{\"test.\"};\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n};\n\n// Test New Session counter increment\nTEST_F(MySQLFilterTest, NewSessionStatsTest) {\n  initialize();\n\n  for (int idx = 0; idx < SESSIONS; idx++) {\n    EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  }\n  EXPECT_EQ(SESSIONS, config_->stats().sessions_.value());\n}\n\n// Test that the filter falls back to tcp proxy if it cant decode\nTEST_F(MySQLFilterTest, MySqlFallbackToTcpProxy) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(\" \"));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(1UL, config_->stats().decoder_errors_.value());\n\n  Buffer::InstancePtr more_data(new Buffer::OwnedImpl(\"scooby doo - part 2!\"));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*more_data, false));\n}\n\n/**\n * Test MySQL Handshake with protocol version 41\n * SM: greeting(p=10) -> challenge-req(v41) -> serv-resp-ok\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41OkTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n}\n\n/**\n * Test MySQL Handshake with partial messages.\n * SM: greeting(p=10) -> challenge-req(v41) -> serv-resp-ok\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41PartialMessagesTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n\n  Buffer::InstancePtr greet_data_part_1(\n      new Buffer::OwnedImpl(greeting_data.substr(0, greeting_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(*greet_data_part_1, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n\n  Buffer::InstancePtr greet_data_part_2(\n      new Buffer::OwnedImpl(greeting_data.substr(greeting_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(*greet_data_part_2, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n\n  Buffer::InstancePtr client_login_data_part_1(\n      new Buffer::OwnedImpl(clogin_data.substr(0, clogin_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onData(*client_login_data_part_1, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  Buffer::InstancePtr client_login_data_part_2(\n      new Buffer::OwnedImpl(clogin_data.substr(clogin_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onData(*client_login_data_part_2, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n\n  Buffer::InstancePtr server_resp_data_part_1(\n      new Buffer::OwnedImpl(srv_resp_data.substr(0, srv_resp_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onWrite(*server_resp_data_part_1, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n\n  Buffer::InstancePtr server_resp_data_part_2(\n      new Buffer::OwnedImpl(srv_resp_data.substr(srv_resp_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onWrite(*server_resp_data_part_2, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n}\n\n/**\n * Test that the filter falls back to tcp proxy if it cant decode partial messages.\n */\nTEST_F(MySQLFilterTest, MySqlFallbackPartialMessagesTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n\n  Buffer::InstancePtr greet_data_part_1(\n      new Buffer::OwnedImpl(greeting_data.substr(0, greeting_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(*greet_data_part_1, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n\n  Buffer::InstancePtr corrupt_data(new Buffer::OwnedImpl(\" \"));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*corrupt_data, false));\n  EXPECT_EQ(1UL, config_->stats().decoder_errors_.value());\n\n  Buffer::InstancePtr greet_data_part_2(\n      new Buffer::OwnedImpl(greeting_data.substr(greeting_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(*greet_data_part_2, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n\n  Buffer::InstancePtr client_login_data_part_1(\n      new Buffer::OwnedImpl(clogin_data.substr(0, clogin_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onData(*client_login_data_part_1, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n\n  Buffer::InstancePtr client_login_data_part_2(\n      new Buffer::OwnedImpl(clogin_data.substr(clogin_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onData(*client_login_data_part_2, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n  EXPECT_EQ(0UL, config_->stats().login_attempts_.value());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n\n  Buffer::InstancePtr server_resp_data_part_1(\n      new Buffer::OwnedImpl(srv_resp_data.substr(0, srv_resp_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onWrite(*server_resp_data_part_1, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n\n  Buffer::InstancePtr server_resp_data_part_2(\n      new Buffer::OwnedImpl(srv_resp_data.substr(srv_resp_data.length() / 2)));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onWrite(*server_resp_data_part_2, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n}\n\n/**\n * Test MySQL Handshake with protocol version 41\n * Server responds with Error\n * SM: greeting(p=10) -> challenge-req(v41) -> serv-resp-err\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41ErrTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_ERR);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_failures_.value());\n  EXPECT_EQ(MySQLSession::State::Error, filter_->getSession().getState());\n}\n\n/**\n * Test MySQL Handshake with protocol version 320\n * SM: greeting(p=10) -> challenge-req(v320) -> serv-resp-ok\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320OkTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n}\n\n/**\n * Test MySQL Handshake with protocol version 320\n * Server responds with Error\n * SM: greeting(p=10) -> challenge-req(v320) -> serv-resp-err\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320ErrTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_ERR);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_failures_.value());\n  EXPECT_EQ(MySQLSession::State::Error, filter_->getSession().getState());\n}\n\n/**\n * Test MySQL Handshake with SSL Request\n * State-machine moves to SSL-Pass-Through\n * SM: greeting(p=10) -> challenge-req(v320) -> SSL_PT\n */\nTEST_F(MySQLFilterTest, MySqlHandshakeSSLTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(MYSQL_CLIENT_CAPAB_SSL | MYSQL_CLIENT_CAPAB_41VS320,\n                                              \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(1UL, config_->stats().upgraded_to_ssl_.value());\n  EXPECT_EQ(MySQLSession::State::SslPt, filter_->getSession().getState());\n\n  std::string encr_data = \"!@#$encr$#@!\";\n  std::string mysql_ssl_msg = BufferHelper::encodeHdr(encr_data, 2);\n  Buffer::InstancePtr query_create_index(new Buffer::OwnedImpl(mysql_ssl_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*query_create_index, false));\n  EXPECT_EQ(MySQLSession::State::SslPt, filter_->getSession().getState());\n}\n\n/**\n * Test MySQL Handshake with protocol version 320\n * Server responds with Auth Switch\n * SM: greeting(p=10) -> challenge-req(v320) -> serv-resp-auth-switch ->\n * -> auth_switch_resp -> serv-resp-ok\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320AuthSwitchTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_AUTH_SWITCH);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n\n  std::string auth_switch_resp = encodeAuthSwitchResp();\n  Buffer::InstancePtr client_switch_resp(new Buffer::OwnedImpl(auth_switch_resp));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_switch_resp, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchMore, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(MYSQL_RESP_OK, 1);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n}\n\n/**\n * Test MySQL Handshake with protocol version 320\n * Server responds with Auth Switch and error\n * SM: greeting(p=10) -> challenge-req(v320) -> serv-resp-auth-switch ->\n * -> auth_switch_resp -> serv-resp-err\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320AuthSwitchErrTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_AUTH_SWITCH);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n\n  std::string auth_switch_resp = encodeAuthSwitchResp();\n  Buffer::InstancePtr client_switch_resp(new Buffer::OwnedImpl(auth_switch_resp));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_switch_resp, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchMore, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(MYSQL_RESP_ERR, 1);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::Resync, filter_->getSession().getState());\n\n  Command mysql_cmd_encode{};\n  mysql_cmd_encode.setCmd(Command::Cmd::Query);\n  std::string query = \"CREATE DATABASE mysqldb\";\n  mysql_cmd_encode.setData(query);\n  std::string query_data = mysql_cmd_encode.encode();\n  std::string mysql_msg = BufferHelper::encodeHdr(query_data, 0);\n  Buffer::InstancePtr client_query_data(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_query_data, false));\n  EXPECT_EQ(MySQLSession::State::ReqResp, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats().queries_parsed_.value());\n}\n\n/**\n * Resync Test failure MySQL Handshake with protocol version 320\n * Server responds with Auth Switch and error\n * SM: greeting(p=10) -> challenge-req(v320) -> serv-resp-auth-switch ->\n * -> auth_switch_resp -> serv-resp-err -> Resync fails\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320AuthSwitchErrFailResync) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_AUTH_SWITCH);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n\n  std::string auth_switch_resp = encodeAuthSwitchResp();\n  Buffer::InstancePtr client_switch_resp(new Buffer::OwnedImpl(auth_switch_resp));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_switch_resp, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchMore, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(MYSQL_RESP_ERR, 1);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::Resync, filter_->getSession().getState());\n\n  Command mysql_cmd_encode{};\n  mysql_cmd_encode.setCmd(Command::Cmd::Query);\n  std::string query = \"CREATE DATABASE mysqldb\";\n  mysql_cmd_encode.setData(query);\n  std::string query_data = mysql_cmd_encode.encode();\n  std::string mysql_msg = BufferHelper::encodeHdr(query_data, 5);\n  Buffer::InstancePtr client_query_data(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_query_data, false));\n  EXPECT_EQ(MySQLSession::State::Resync, filter_->getSession().getState());\n}\n\n/**\n * Negative Testing MySQL Handshake with protocol version 320\n * Server responds with Auth Switch More\n * SM: greeting(p=10) -> challenge-req(v320) -> serv-resp-auth-switch ->\n * -> auth_switch_resp -> serv-resp-auth-switch-more\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320AuthSwitchMoreandMore) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_AUTH_SWITCH);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n\n  std::string auth_switch_resp = encodeAuthSwitchResp();\n  Buffer::InstancePtr client_switch_resp(new Buffer::OwnedImpl(auth_switch_resp));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_switch_resp, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchMore, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(MYSQL_RESP_MORE, 1);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchResp, filter_->getSession().getState());\n}\n\n/**\n * Negative Testing MySQL Handshake with protocol version 320\n * Server responds with unhandled code\n * SM: greeting(p=10) -> challenge-req(v320) -> serv-resp-auth-switch ->\n * -> auth_switch_resp -> serv-resp-unhandled\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320AuthSwitchMoreandUnhandled) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_AUTH_SWITCH);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n\n  std::string auth_switch_resp = encodeAuthSwitchResp();\n  Buffer::InstancePtr client_switch_resp(new Buffer::OwnedImpl(auth_switch_resp));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_switch_resp, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchMore, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(0x32, 1);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::NotHandled, filter_->getSession().getState());\n}\n\n/**\n * Negative sequence\n * Test MySQL Handshake with protocol version 41\n * - send 2 back-to-back Greeting message (duplicated message)\n * -> expect filter to ignore the second.\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41Ok2GreetTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string greeting_data2 = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data2(new Buffer::OwnedImpl(greeting_data2));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data2, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats().protocol_errors_.value());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(2UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n}\n\n/**\n * Negative sequence\n * Test MySQL Handshake with protocol version 41\n * - send 2 back-to-back Challenge messages.\n * -> expect the filter to ignore the second\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41Ok2CloginTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n\n  std::string clogin_data2 =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data2(new Buffer::OwnedImpl(clogin_data2));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data2, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats().protocol_errors_.value());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n}\n\n/**\n * Negative sequence\n * Test MySQL Handshake with protocol version 41\n * - send out or order challenge and greeting messages.\n * -> expect the filter to ignore the challenge,\n *    since greeting was not seen\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41OkOOOLoginTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats().protocol_errors_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n}\n\n/**\n * Negative sequence\n * Test MySQL Handshake with protocol version 41\n * - send out or order challenge and greeting messages\n *   followed by login ok\n * -> expect the filter to ignore initial challenge as well as\n *    serverOK because out of order\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41OkOOOFullLoginTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(MySQLSession::State::Init, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats().protocol_errors_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n  EXPECT_EQ(2UL, config_->stats().protocol_errors_.value());\n}\n\n/**\n * Negative sequence\n * Test MySQL Handshake with protocol version 41\n * - send greeting messages followed by login ok\n * -> expect filter to ignore serverOK, because it has not\n *    processed Challenge message\n */\nTEST_F(MySQLFilterTest, MySqlHandshake41OkGreetingLoginOKTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats_.protocol_errors_.value());\n}\n\n/**\n * Negative Testing\n * Test MySQL Handshake with protocol version 320\n * and wrong Client Login Sequence number\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320WrongCloginSeqTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", 2);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n}\n\n/**\n * Negative Testing\n * Test MySQL Handshake with protocol version 320\n * Server responds with Auth Switch wrong sequence\n * -> expect filter to ignore auth-switch message\n *    because of wrong seq.\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320AuthSwitchWromgSeqTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string auth_switch_resp = encodeAuthSwitchResp();\n  Buffer::InstancePtr client_switch_resp(new Buffer::OwnedImpl(auth_switch_resp));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_switch_resp, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_AUTH_SWITCH);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchResp, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(MYSQL_RESP_OK, 1);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::AuthSwitchResp, filter_->getSession().getState());\n}\n\n/**\n * Negative Testing\n * Test MySQL Handshake with protocol version 320\n * Server responds with unexpected code\n * -> expect filter to set state to not handled\n */\nTEST_F(MySQLFilterTest, MySqlHandshake320WrongServerRespCode) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(0x53, 0);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::NotHandled, filter_->getSession().getState());\n\n  std::string msg_data;\n  std::string mysql_msg = BufferHelper::encodeHdr(msg_data, 3);\n  Buffer::InstancePtr client_query_data(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_query_data, false));\n  EXPECT_EQ(MySQLSession::State::NotHandled, filter_->getSession().getState());\n}\n\n/**\n * Negative Testing\n * Invalid Mysql Pkt Hdr\n * -> expect filter to set state to not handled\n */\nTEST_F(MySQLFilterTest, MySqlWrongHdrPkt) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data = encodeClientLogin(0, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp320, filter_->getSession().getState());\n\n  std::string srv_resp_ok_data = encodeClientLoginResp(0x53, 0);\n  Buffer::InstancePtr server_resp_ok_data(new Buffer::OwnedImpl(srv_resp_ok_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_ok_data, false));\n  EXPECT_EQ(MySQLSession::State::NotHandled, filter_->getSession().getState());\n\n  Command mysql_cmd_encode{};\n  std::string query_data = mysql_cmd_encode.encode();\n  std::string mysql_msg = \"123\";\n  Buffer::InstancePtr client_query_data(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_query_data, false));\n  EXPECT_EQ(MySQLSession::State::NotHandled, filter_->getSession().getState());\n}\n\n/*\n * Test Mysql query handler, after handshake completes\n * SM: greeting(p=10) -> challenge-req(v41) -> serv-resp-ok ->\n * -> Query-request -> Query-response\n * validate counters and state-machine\n */\nTEST_F(MySQLFilterTest, MySqlLoginAndQueryTest) {\n  initialize();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(1UL, config_->stats().sessions_.value());\n\n  std::string greeting_data = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  Buffer::InstancePtr greet_data(new Buffer::OwnedImpl(greeting_data));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*greet_data, false));\n  EXPECT_EQ(MySQLSession::State::ChallengeReq, filter_->getSession().getState());\n\n  std::string clogin_data =\n      encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, \"user1\", CHALLENGE_SEQ_NUM);\n  Buffer::InstancePtr client_login_data(new Buffer::OwnedImpl(clogin_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_login_data, false));\n  EXPECT_EQ(1UL, config_->stats().login_attempts_.value());\n  EXPECT_EQ(MySQLSession::State::ChallengeResp41, filter_->getSession().getState());\n\n  std::string srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK);\n  Buffer::InstancePtr server_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*server_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n\n  Command mysql_cmd_encode{};\n  mysql_cmd_encode.setCmd(Command::Cmd::Query);\n  std::string query = \"CREATE DATABASE mysqldb\";\n  mysql_cmd_encode.setData(query);\n  std::string query_data = mysql_cmd_encode.encode();\n  std::string mysql_msg = BufferHelper::encodeHdr(query_data, 0);\n  Buffer::InstancePtr client_query_data(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*client_query_data, false));\n  EXPECT_EQ(MySQLSession::State::ReqResp, filter_->getSession().getState());\n  EXPECT_EQ(1UL, config_->stats().queries_parsed_.value());\n\n  srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK, 0, 1);\n  Buffer::InstancePtr request_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*request_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n\n  mysql_cmd_encode.setCmd(Command::Cmd::Query);\n  query = \"show databases\";\n  mysql_cmd_encode.setData(query);\n  query_data = mysql_cmd_encode.encode();\n  mysql_msg = BufferHelper::encodeHdr(query_data, 0);\n  Buffer::InstancePtr query_show(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*query_show, false));\n  EXPECT_EQ(MySQLSession::State::ReqResp, filter_->getSession().getState());\n  EXPECT_EQ(2UL, config_->stats().queries_parsed_.value());\n\n  srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK, 0, 1);\n  Buffer::InstancePtr show_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*show_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n\n  mysql_cmd_encode.setCmd(Command::Cmd::Query);\n  query = \"CREATE TABLE students (name TEXT, student_number INTEGER, city TEXT)\";\n  mysql_cmd_encode.setData(query);\n  query_data = mysql_cmd_encode.encode();\n  mysql_msg = BufferHelper::encodeHdr(query_data, 0);\n  Buffer::InstancePtr query_create(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*query_create, false));\n  EXPECT_EQ(MySQLSession::State::ReqResp, filter_->getSession().getState());\n  EXPECT_EQ(3UL, config_->stats().queries_parsed_.value());\n\n  srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK, 0, 1);\n  Buffer::InstancePtr create_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*create_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n\n  mysql_cmd_encode.setCmd(Command::Cmd::Query);\n  query = \"CREATE index index1\";\n  mysql_cmd_encode.setData(query);\n  query_data = mysql_cmd_encode.encode();\n  mysql_msg = BufferHelper::encodeHdr(query_data, 0);\n  Buffer::InstancePtr query_create_index(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*query_create_index, false));\n  EXPECT_EQ(MySQLSession::State::ReqResp, filter_->getSession().getState());\n  EXPECT_EQ(3UL, config_->stats().queries_parsed_.value());\n\n  srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK, 0, 1);\n  Buffer::InstancePtr create_index_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue,\n            filter_->onData(*create_index_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n\n  mysql_cmd_encode.setCmd(Command::Cmd::FieldList);\n  query = \"\";\n  mysql_cmd_encode.setData(query);\n  query_data = mysql_cmd_encode.encode();\n  mysql_msg = BufferHelper::encodeHdr(query_data, 0);\n  Buffer::InstancePtr cmd_field_list(new Buffer::OwnedImpl(mysql_msg));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*cmd_field_list, false));\n  EXPECT_EQ(MySQLSession::State::ReqResp, filter_->getSession().getState());\n  EXPECT_EQ(3UL, config_->stats().queries_parsed_.value());\n\n  srv_resp_data = encodeClientLoginResp(MYSQL_RESP_OK, 0, 1);\n  Buffer::InstancePtr field_list_resp_data(new Buffer::OwnedImpl(srv_resp_data));\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(*field_list_resp_data, false));\n  EXPECT_EQ(MySQLSession::State::Req, filter_->getSession().getState());\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc",
    "content": "#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h\"\n\n#include \"test/integration/fake_upstream.h\"\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mysql_test_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nconstexpr int SESSIONS = 5;\n\nclass MySQLIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                             public MySQLTestUtils,\n                             public BaseIntegrationTest {\n  std::string mysqlConfig() {\n    return fmt::format(TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath(\n                           \"test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml\")),\n                       Platform::null_device_path,\n                       Network::Test::getLoopbackAddressString(GetParam()),\n                       Network::Test::getLoopbackAddressString(GetParam()),\n                       Network::Test::getAnyAddressString(GetParam()));\n  }\n\npublic:\n  MySQLIntegrationTest() : BaseIntegrationTest(GetParam(), mysqlConfig()){};\n\n  void SetUp() override { BaseIntegrationTest::initialize(); }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, MySQLIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\n/**\n * NewSession Test:\n * Attempt a New Session and verify it is received by mysql onNewConnection.\n */\nTEST_P(MySQLIntegrationTest, MySQLStatsNewSessionTest) {\n  for (int idx = 0; idx < SESSIONS; idx++) {\n    IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n    FakeRawConnectionPtr fake_upstream_connection;\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n    tcp_client->close();\n    ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  }\n\n  test_server_->waitForCounterGe(\"mysql.mysql_stats.sessions\", SESSIONS);\n}\n\n/**\n * Login Test:\n * Attempt a mysql login and verify it is processed by the filter:\n * Verify counters:\n * - correct number of attempts\n * - no failures\n */\nTEST_P(MySQLIntegrationTest, MySQLLoginTest) {\n  std::string str;\n  std::string rcvd_data;\n  std::string user = \"user1\";\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  // greeting\n  std::string greeting = encodeServerGreeting(MYSQL_PROTOCOL_10);\n  ASSERT_TRUE(fake_upstream_connection->write(greeting));\n\n  str.append(greeting);\n  tcp_client->waitForData(str, true);\n\n  // Client username/password and capabilities\n  std::string login = encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, user, CHALLENGE_SEQ_NUM);\n  ASSERT_TRUE(tcp_client->write(login));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(login.length(), &rcvd_data));\n  EXPECT_EQ(login, rcvd_data);\n\n  // Server response OK to username/password\n  std::string loginok = encodeClientLoginResp(MYSQL_RESP_OK);\n  ASSERT_TRUE(fake_upstream_connection->write(loginok));\n\n  str.append(loginok);\n  tcp_client->waitForData(str, true);\n\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  test_server_->waitForCounterGe(\"mysql.mysql_stats.login_attempts\", 1);\n  EXPECT_EQ(test_server_->counter(\"mysql.mysql_stats.login_failures\")->value(), 0);\n}\n\n/**\n * Multiple Connections Login Test:\n * Attempt a mysql login and verify it is processed by the filter:\n * Verify counters:\n * - correct number of attempts\n * - no failures\n */\nTEST_P(MySQLIntegrationTest, MySQLUnitTestMultiClientsLoop) {\n  int idx;\n  std::string rcvd_data;\n\n  for (idx = 0; idx < CLIENT_NUM; idx++) {\n    std::string str;\n    std::string user(\"user\");\n    user.append(std::to_string(idx));\n\n    IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n    FakeRawConnectionPtr fake_upstream_connection;\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n    // greeting\n    std::string greeting = encodeServerGreeting(MYSQL_PROTOCOL_10);\n    ASSERT_TRUE(fake_upstream_connection->write(greeting));\n\n    str.append(greeting);\n    tcp_client->waitForData(str, true);\n\n    // Client username/password and capabilities\n    std::string login = encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, user, CHALLENGE_SEQ_NUM);\n    ASSERT_TRUE(tcp_client->write(login));\n    ASSERT_TRUE(fake_upstream_connection->waitForData(login.length(), &rcvd_data));\n    EXPECT_EQ(login, rcvd_data);\n\n    // Server response OK to username/password\n    std::string loginok = encodeClientLoginResp(MYSQL_RESP_OK);\n    ASSERT_TRUE(fake_upstream_connection->write(loginok));\n\n    str.append(loginok);\n    tcp_client->waitForData(str, true);\n\n    tcp_client->close();\n    ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  }\n\n  // Verify counters: CLIENT_NUM login attempts, no failures\n  test_server_->waitForCounterGe(\"mysql.mysql_stats.login_attempts\", CLIENT_NUM);\n  EXPECT_EQ(test_server_->counter(\"mysql.mysql_stats.login_attempts\")->value(), CLIENT_NUM);\n  EXPECT_EQ(test_server_->counter(\"mysql.mysql_stats.login_failures\")->value(), 0);\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml",
    "content": "admin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: \"{}\"\n      port_value: 0\nstatic_resources:\n  clusters:\n    name: cluster_0\n    connect_timeout: 2s\n    load_assignment:\n      cluster_name: cluster_0\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: \"{}\"\n                port_value: 0\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: \"{}\"\n        port_value: 0\n    filter_chains:\n      - filters:\n          - name: mysql\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy\n              stat_prefix: mysql_stats\n          - name: tcp\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy\n              stat_prefix: tcp_stats\n              cluster: cluster_0\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/mysql_test_utils.cc",
    "content": "#include \"mysql_test_utils.h\"\n\n#include \"extensions/filters/network/mysql_proxy/mysql_codec.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_greeting.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h\"\n#include \"extensions/filters/network/mysql_proxy/mysql_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nstd::string MySQLTestUtils::encodeServerGreeting(int protocol) {\n  ServerGreeting mysql_greet_encode{};\n  mysql_greet_encode.setProtocol(protocol);\n  std::string ver(MySQLTestUtils::getVersion());\n  mysql_greet_encode.setVersion(ver);\n  mysql_greet_encode.setThreadId(MYSQL_THREAD_ID);\n  std::string salt(getSalt());\n  mysql_greet_encode.setSalt(salt);\n  mysql_greet_encode.setServerCap(MYSQL_SERVER_CAPAB);\n  mysql_greet_encode.setServerLanguage(MYSQL_SERVER_LANGUAGE);\n  mysql_greet_encode.setServerStatus(MYSQL_SERVER_STATUS);\n  mysql_greet_encode.setExtServerCap(MYSQL_SERVER_EXT_CAPAB);\n  std::string data = mysql_greet_encode.encode();\n  std::string mysql_msg = BufferHelper::encodeHdr(data, GREETING_SEQ_NUM);\n  return mysql_msg;\n}\n\nstd::string MySQLTestUtils::encodeClientLogin(uint16_t client_cap, std::string user, uint8_t seq) {\n  ClientLogin mysql_clogin_encode{};\n  mysql_clogin_encode.setClientCap(client_cap);\n  mysql_clogin_encode.setExtendedClientCap(MYSQL_EXT_CLIENT_CAPAB);\n  mysql_clogin_encode.setMaxPacket(MYSQL_MAX_PACKET);\n  mysql_clogin_encode.setCharset(MYSQL_CHARSET);\n  mysql_clogin_encode.setUsername(user);\n  std::string auth_resp(getAuthResp());\n  mysql_clogin_encode.setAuthResp(auth_resp);\n  std::string data = mysql_clogin_encode.encode();\n  std::string mysql_msg = BufferHelper::encodeHdr(data, seq);\n  return mysql_msg;\n}\n\nstd::string MySQLTestUtils::encodeClientLoginResp(uint8_t srv_resp, uint8_t it, uint8_t seq_force) {\n  ClientLoginResponse mysql_loginok_encode{};\n  mysql_loginok_encode.setRespCode(srv_resp);\n  mysql_loginok_encode.setAffectedRows(MYSQL_SM_AFFECTED_ROWS);\n  mysql_loginok_encode.setLastInsertId(MYSQL_SM_LAST_ID);\n  mysql_loginok_encode.setServerStatus(MYSQL_SM_SERVER_OK);\n  mysql_loginok_encode.setWarnings(MYSQL_SM_SERVER_WARNINGS);\n  std::string data = mysql_loginok_encode.encode();\n  uint8_t seq = CHALLENGE_RESP_SEQ_NUM + 2 * it;\n  if (seq_force > 0) {\n    seq = seq_force;\n  }\n  std::string mysql_msg = BufferHelper::encodeHdr(data, seq);\n  return mysql_msg;\n}\n\nstd::string MySQLTestUtils::encodeAuthSwitchResp() {\n  ClientSwitchResponse mysql_switch_resp_encode{};\n  std::string resp_opaque_data(\"mysql_opaque\");\n  mysql_switch_resp_encode.setAuthPluginResp(resp_opaque_data);\n  std::string data = mysql_switch_resp_encode.encode();\n  std::string mysql_msg = BufferHelper::encodeHdr(data, AUTH_SWITH_RESP_SEQ);\n  return mysql_msg;\n}\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/mysql_proxy/mysql_test_utils.h",
    "content": "#pragma once\n#include \"fmt/format.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace MySQLProxy {\n\nconstexpr int MYSQL_VER_MAJOR = 5;\nconstexpr int MYSQL_VER_MINOR = 0;\nconstexpr int MYSQL_VER_VAR = 54;\nconstexpr int MYSQL_SM_LAST_ID = 0;\nconstexpr int MYSQL_SM_SERVER_OK = 0;\nconstexpr int MYSQL_SM_SERVER_WARNINGS = 0x0001;\nconstexpr int MYSQL_SM_AFFECTED_ROWS = 1;\nconstexpr int CLIENT_NUM = 10;\nconstexpr int PARALLEL_SESSIONS = 4;\n\nclass MySQLTestUtils {\n\npublic:\n  static std::string getSalt() { return \"!@salt#$\"; }\n  static std::string getAuthResp() { return \"p4$$w0r6\"; }\n  static std::string getVersion() {\n    return fmt::format(\"{0}.{1}.{2}\", MYSQL_VER_MAJOR, MYSQL_VER_MINOR, MYSQL_VER_VAR);\n  }\n\n  std::string encodeServerGreeting(int protocol);\n  std::string encodeClientLogin(uint16_t client_cap, std::string user, uint8_t seq);\n  std::string encodeClientLoginResp(uint8_t srv_resp, uint8_t it = 0, uint8_t seq_force = 0);\n  std::string encodeAuthSwitchResp();\n};\n\n} // namespace MySQLProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n    \"envoy_extension_cc_test_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test_library(\n    name = \"postgres_test_utils_lib\",\n    srcs = [\"postgres_test_utils.cc\"],\n    hdrs = [\"postgres_test_utils.h\"],\n    extension_name = \"envoy.filters.network.postgres_proxy\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"postgres_decoder_tests\",\n    srcs = [\n        \"postgres_decoder_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.postgres_proxy\",\n    deps = [\n        \":postgres_test_utils_lib\",\n        \"//source/extensions/filters/network/postgres_proxy:filter\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"postgres_message_tests\",\n    srcs = [\n        \"postgres_message_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.postgres_proxy\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/extensions/filters/network/postgres_proxy:filter\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"postgres_filter_tests\",\n    srcs = [\n        \"postgres_filter_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.postgres_proxy\",\n    deps = [\n        \":postgres_test_utils_lib\",\n        \"//source/extensions/filters/network/postgres_proxy:filter\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"postgres_integration_test\",\n    srcs = [\n        \"postgres_integration_test.cc\",\n    ],\n    data = [\n        \"postgres_test_config.yaml\",\n    ],\n    extension_name = \"envoy.filters.network.postgres_proxy\",\n    deps = [\n        \"//source/common/tcp_proxy\",\n        \"//source/extensions/filters/network/postgres_proxy:config\",\n        \"//source/extensions/filters/network/postgres_proxy:filter\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/integration:integration_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc",
    "content": "#include <gmock/gmock.h>\n#include <gtest/gtest.h>\n\n#include \"extensions/filters/network/postgres_proxy/postgres_decoder.h\"\n\n#include \"test/extensions/filters/network/postgres_proxy/postgres_test_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\nclass DecoderCallbacksMock : public DecoderCallbacks {\npublic:\n  MOCK_METHOD(void, incMessagesBackend, (), (override));\n  MOCK_METHOD(void, incMessagesFrontend, (), (override));\n  MOCK_METHOD(void, incMessagesUnknown, (), (override));\n  MOCK_METHOD(void, incSessionsEncrypted, (), (override));\n  MOCK_METHOD(void, incSessionsUnencrypted, (), (override));\n  MOCK_METHOD(void, incStatements, (StatementType), (override));\n  MOCK_METHOD(void, incTransactions, (), (override));\n  MOCK_METHOD(void, incTransactionsCommit, (), (override));\n  MOCK_METHOD(void, incTransactionsRollback, (), (override));\n  MOCK_METHOD(void, incNotices, (NoticeType), (override));\n  MOCK_METHOD(void, incErrors, (ErrorType), (override));\n  MOCK_METHOD(void, processQuery, (const std::string&), (override));\n};\n\n// Define fixture class with decoder and mock callbacks.\nclass PostgresProxyDecoderTestBase {\npublic:\n  PostgresProxyDecoderTestBase() {\n    decoder_ = std::make_unique<DecoderImpl>(&callbacks_);\n    decoder_->initialize();\n    decoder_->setStartup(false);\n  }\n\nprotected:\n  ::testing::NiceMock<DecoderCallbacksMock> callbacks_;\n  std::unique_ptr<DecoderImpl> decoder_;\n\n  // fields often used\n  Buffer::OwnedImpl data_;\n  char buf_[256]{};\n  std::string payload_;\n};\n\nclass PostgresProxyDecoderTest : public PostgresProxyDecoderTestBase, public ::testing::Test {};\n\n// Class is used for parameterized tests for frontend messages.\nclass PostgresProxyFrontendDecoderTest : public PostgresProxyDecoderTestBase,\n                                         public ::testing::TestWithParam<std::string> {};\n\n// Class is used for parameterized tests for encrypted messages.\nclass PostgresProxyFrontendEncrDecoderTest : public PostgresProxyDecoderTestBase,\n                                             public ::testing::TestWithParam<uint32_t> {};\n\n// Class is used for parameterized tests for backend messages.\nclass PostgresProxyBackendDecoderTest : public PostgresProxyDecoderTestBase,\n                                        public ::testing::TestWithParam<std::string> {};\n\nclass PostgresProxyErrorTest\n    : public PostgresProxyDecoderTestBase,\n      public ::testing::TestWithParam<std::tuple<std::string, DecoderCallbacks::ErrorType>> {};\n\nclass PostgresProxyNoticeTest\n    : public PostgresProxyDecoderTestBase,\n      public ::testing::TestWithParam<std::tuple<std::string, DecoderCallbacks::NoticeType>> {};\n\n// Test processing the startup message from a client.\n// For historical reasons, the first message does not include\n// command (first byte). It starts with length. The startup\n// message contains the protocol version. After processing the\n// startup message the server should start using message format\n// with command as 1st byte.\nTEST_F(PostgresProxyDecoderTest, StartupMessage) {\n  decoder_->setStartup(true);\n\n  buf_[0] = '\\0';\n  // Startup message has the following structure:\n  // Length (4 bytes) - payload and length field\n  // version (4 bytes)\n  // Attributes: key/value pairs separated by '\\0'\n  data_.writeBEInt<uint32_t>(53);\n  // Add version code\n  data_.writeBEInt<uint32_t>(0x00030000);\n  // user-postgres key-pair\n  data_.add(\"user\"); // 4 bytes\n  data_.add(buf_, 1);\n  data_.add(\"postgres\"); // 8 bytes\n  data_.add(buf_, 1);\n  // database-test-db key-pair\n  data_.add(\"database\"); // 8 bytes\n  data_.add(buf_, 1);\n  data_.add(\"testdb\"); // 6 bytes\n  data_.add(buf_, 1);\n  // Some other attribute\n  data_.add(\"attribute\"); // 9 bytes\n  data_.add(buf_, 1);\n  data_.add(\"blah\"); // 4 bytes\n  data_.add(buf_, 1);\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 0);\n  // Verify parsing attributes\n  ASSERT_THAT(decoder_->getAttributes().at(\"user\"), \"postgres\");\n  ASSERT_THAT(decoder_->getAttributes().at(\"database\"), \"testdb\");\n  // This attribute should not be found\n  ASSERT_THAT(decoder_->getAttributes().find(\"no\"), decoder_->getAttributes().end());\n\n  // Now feed normal message with 1bytes as command.\n  data_.add(\"P\");\n  // Add length.\n  data_.writeBEInt<uint32_t>(6); // 4 bytes of length + 2 bytes of data.\n  data_.add(\"AB\");\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 0);\n}\n\n// Test verifies that when Startup message does not carry\n// \"database\" attribute, it is derived from \"user\".\nTEST_F(PostgresProxyDecoderTest, StartupMessageNoAttr) {\n  decoder_->setStartup(true);\n\n  buf_[0] = '\\0';\n  // Startup message has the following structure:\n  // Length (4 bytes) - payload and length field\n  // version (4 bytes)\n  // Attributes: key/value pairs separated by '\\0'\n  data_.writeBEInt<uint32_t>(37);\n  // Add version code\n  data_.writeBEInt<uint32_t>(0x00030000);\n  // user-postgres key-pair\n  data_.add(\"user\"); // 4 bytes\n  data_.add(buf_, 1);\n  data_.add(\"postgres\"); // 8 bytes\n  data_.add(buf_, 1);\n  // database-test-db key-pair\n  // Some other attribute\n  data_.add(\"attribute\"); // 9 bytes\n  data_.add(buf_, 1);\n  data_.add(\"blah\"); // 4 bytes\n  data_.add(buf_, 1);\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 0);\n\n  // Verify parsing attributes\n  ASSERT_THAT(decoder_->getAttributes().at(\"user\"), \"postgres\");\n  ASSERT_THAT(decoder_->getAttributes().at(\"database\"), \"postgres\");\n  // This attribute should not be found\n  ASSERT_THAT(decoder_->getAttributes().find(\"no\"), decoder_->getAttributes().end());\n}\n\n// Test processing messages which map 1:1 with buffer.\n// The buffer contains just a single entire message and\n// nothing more.\nTEST_F(PostgresProxyDecoderTest, ReadingBufferSingleMessages) {\n\n  // Feed empty buffer - should not crash.\n  decoder_->onData(data_, true);\n\n  // Put one byte. This is not enough to parse the message and that byte\n  // should stay in the buffer.\n  data_.add(\"P\");\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 1);\n\n  // Add length of 4 bytes. It would mean completely empty message.\n  // but it should be consumed.\n  data_.writeBEInt<uint32_t>(4);\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 0);\n\n  // Create a message with 5 additional bytes.\n  data_.add(\"P\");\n  // Add length.\n  data_.writeBEInt<uint32_t>(9); // 4 bytes of length field + 5 of data.\n  data_.add(buf_, 5);\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 0);\n}\n\n// Test simulates situation when decoder is called with incomplete message.\n// The message should not be processed until the buffer is filled\n// with missing bytes.\nTEST_F(PostgresProxyDecoderTest, ReadingBufferLargeMessages) {\n  // Fill the buffer with message of 100 bytes long\n  // but the buffer contains only 98 bytes.\n  // It should not be processed.\n  data_.add(\"P\");\n  // Add length.\n  data_.writeBEInt<uint32_t>(100); // This also includes length field\n  data_.add(buf_, 94);\n  decoder_->onData(data_, true);\n  // The buffer contains command (1 byte), length (4 bytes) and 94 bytes of message.\n  ASSERT_THAT(data_.length(), 99);\n\n  // Add 2 missing bytes and feed again to decoder.\n  data_.add(\"AB\");\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 0);\n}\n\n// Test simulates situation when a buffer contains more than one\n// message. Call to the decoder should consume only one message\n// at a time and only when the buffer contains the entire message.\nTEST_F(PostgresProxyDecoderTest, TwoMessagesInOneBuffer) {\n  // Create the first message of 50 bytes long (+1 for command).\n  data_.add(\"P\");\n  // Add length.\n  data_.writeBEInt<uint32_t>(50);\n  data_.add(buf_, 46);\n\n  // Create the second message of 50 + 46 bytes (+1 for command).\n  data_.add(\"P\");\n  // Add length.\n  data_.writeBEInt<uint32_t>(96);\n  data_.add(buf_, 46);\n  data_.add(buf_, 46);\n\n  // The buffer contains two messaged:\n  // 1st: command (1 byte), length (4 bytes), 46 bytes of data\n  // 2nd: command (1 byte), length (4 bytes), 92 bytes of data\n  ASSERT_THAT(data_.length(), 148);\n  // Process the first message.\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 97);\n  // Process the second message.\n  decoder_->onData(data_, true);\n  ASSERT_THAT(data_.length(), 0);\n}\n\nTEST_F(PostgresProxyDecoderTest, Unknown) {\n  // Create invalid message. The first byte is invalid \"=\"\n  // Message must be at least 5 bytes to be parsed.\n  EXPECT_CALL(callbacks_, incMessagesUnknown()).Times(1);\n  createPostgresMsg(data_, \"=\", \"some not important string which will be ignored anyways\");\n  decoder_->onData(data_, true);\n}\n\n// Test if each frontend command calls incMessagesFrontend() method.\nTEST_P(PostgresProxyFrontendDecoderTest, FrontendInc) {\n  EXPECT_CALL(callbacks_, incMessagesFrontend()).Times(1);\n  createPostgresMsg(data_, GetParam(), \"SELECT 1;\");\n  decoder_->onData(data_, true);\n}\n\n// Run the above test for each frontend message.\nINSTANTIATE_TEST_SUITE_P(FrontEndMessagesTests, PostgresProxyFrontendDecoderTest,\n                         ::testing::Values(\"B\", \"C\", \"d\", \"c\", \"f\", \"D\", \"E\", \"H\", \"F\", \"p\", \"P\",\n                                           \"p\", \"Q\", \"S\", \"X\"));\n\n// Test if X message triggers incRollback and sets proper state in transaction.\nTEST_F(PostgresProxyFrontendDecoderTest, TerminateMessage) {\n  // Set decoder state NOT to be in_transaction.\n  decoder_->getSession().setInTransaction(false);\n  EXPECT_CALL(callbacks_, incTransactionsRollback()).Times(0);\n  createPostgresMsg(data_, \"X\");\n  decoder_->onData(data_, true);\n\n  // Now set the decoder to be in_transaction state.\n  decoder_->getSession().setInTransaction(true);\n  EXPECT_CALL(callbacks_, incTransactionsRollback()).Times(1);\n  createPostgresMsg(data_, \"X\");\n  decoder_->onData(data_, true);\n  ASSERT_FALSE(decoder_->getSession().inTransaction());\n}\n\n// Query message should invoke filter's callback message\nTEST_F(PostgresProxyFrontendDecoderTest, QueryMessage) {\n  EXPECT_CALL(callbacks_, processQuery).Times(1);\n  createPostgresMsg(data_, \"Q\", \"SELECT * FROM whatever;\");\n  decoder_->onData(data_, true);\n}\n\n// Parse message has optional Query name which may be in front of actual\n// query statement. This test verifies that both formats are processed\n// correctly.\nTEST_F(PostgresProxyFrontendDecoderTest, ParseMessage) {\n  std::string query = \"SELECT * FROM whatever;\";\n  std::string query_name, query_params;\n\n  // Should be called twice with the same query.\n  EXPECT_CALL(callbacks_, processQuery(query)).Times(2);\n\n  // Set params to be zero.\n  query_params.reserve(2);\n  query_params += '\\0';\n  query_params += '\\0';\n\n  // Message without optional query name.\n  query_name.reserve(1);\n  query_name += '\\0';\n  createPostgresMsg(data_, \"P\", query_name + query + query_params);\n  decoder_->onData(data_, true);\n\n  // Message with optional name query_name\n  query_name.clear();\n  query_name.reserve(5);\n  query_name += \"P0_8\";\n  query_name += '\\0';\n  createPostgresMsg(data_, \"P\", query_name + query + query_params);\n  decoder_->onData(data_, true);\n}\n\n// Test if each backend command calls incMessagesBackend()) method.\nTEST_P(PostgresProxyBackendDecoderTest, BackendInc) {\n  EXPECT_CALL(callbacks_, incMessagesBackend()).Times(1);\n  createPostgresMsg(data_, GetParam(), \"Some not important message\");\n  decoder_->onData(data_, false);\n}\n\n// Run the above test for each backend message.\nINSTANTIATE_TEST_SUITE_P(BackendMessagesTests, PostgresProxyBackendDecoderTest,\n                         ::testing::Values(\"R\", \"K\", \"2\", \"3\", \"C\", \"d\", \"c\", \"G\", \"H\", \"D\", \"I\",\n                                           \"E\", \"V\", \"v\", \"n\", \"N\", \"A\", \"t\", \"S\", \"1\", \"s\", \"Z\",\n                                           \"T\"));\n// Test parsing backend messages.\n// The parser should react only to the first word until the space.\nTEST_F(PostgresProxyBackendDecoderTest, ParseStatement) {\n  // Payload contains a space after the keyword\n  // Rollback counter should be bumped up.\n  EXPECT_CALL(callbacks_, incTransactionsRollback());\n  createPostgresMsg(data_, \"C\", \"ROLLBACK 123\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  // Now try just keyword without a space at the end.\n  EXPECT_CALL(callbacks_, incTransactionsRollback());\n  createPostgresMsg(data_, \"C\", \"ROLLBACK\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  // Partial message should be ignored.\n  EXPECT_CALL(callbacks_, incTransactionsRollback()).Times(0);\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other));\n  createPostgresMsg(data_, \"C\", \"ROLL\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  // Keyword without a space  should be ignored.\n  EXPECT_CALL(callbacks_, incTransactionsRollback()).Times(0);\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other));\n  createPostgresMsg(data_, \"C\", \"ROLLBACK123\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n}\n\n// Test Backend messages and make sure that they\n// trigger proper stats updates.\nTEST_F(PostgresProxyDecoderTest, Backend) {\n  // C message\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other));\n  createPostgresMsg(data_, \"C\", \"BEGIN 123\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n  ASSERT_TRUE(decoder_->getSession().inTransaction());\n\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Other));\n  createPostgresMsg(data_, \"C\", \"START TR\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Noop));\n  EXPECT_CALL(callbacks_, incTransactionsCommit());\n  createPostgresMsg(data_, \"C\", \"COMMIT\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Select));\n  EXPECT_CALL(callbacks_, incTransactionsCommit());\n  createPostgresMsg(data_, \"C\", \"SELECT\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Noop));\n  EXPECT_CALL(callbacks_, incTransactionsRollback());\n  createPostgresMsg(data_, \"C\", \"ROLLBACK\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Insert));\n  EXPECT_CALL(callbacks_, incTransactionsCommit());\n  createPostgresMsg(data_, \"C\", \"INSERT 1\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Update));\n  EXPECT_CALL(callbacks_, incTransactionsCommit());\n  createPostgresMsg(data_, \"C\", \"UPDATE 123\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  EXPECT_CALL(callbacks_, incStatements(DecoderCallbacks::StatementType::Delete));\n  EXPECT_CALL(callbacks_, incTransactionsCommit());\n  createPostgresMsg(data_, \"C\", \"DELETE 88\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n}\n\n// Test checks deep inspection of the R message.\n// During login/authentication phase client and server exchange\n// multiple R messages. Only payload with length is 8 and\n// payload with uint32 number equal to 0 indicates\n// successful authentication.\nTEST_F(PostgresProxyBackendDecoderTest, AuthenticationMsg) {\n  // Create authentication message which does not\n  // mean that authentication was OK. The number of\n  // sessions must not be increased.\n  EXPECT_CALL(callbacks_, incSessionsUnencrypted()).Times(0);\n  createPostgresMsg(data_, \"R\", \"blah blah\");\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n\n  // Create the correct payload which means that\n  // authentication completed successfully.\n  EXPECT_CALL(callbacks_, incSessionsUnencrypted());\n  data_.add(\"R\");\n  // Add length.\n  data_.writeBEInt<uint32_t>(8);\n  // Add 4-byte code.\n  data_.writeBEInt<uint32_t>(0);\n  decoder_->onData(data_, false);\n  data_.drain(data_.length());\n}\n\n// Test check parsing of E message. The message\n// indicates error.\nTEST_P(PostgresProxyErrorTest, ParseErrorMsgs) {\n  EXPECT_CALL(callbacks_, incErrors(std::get<1>(GetParam())));\n  createPostgresMsg(data_, \"E\", std::get<0>(GetParam()));\n  decoder_->onData(data_, false);\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    PostgresProxyErrorTestSuite, PostgresProxyErrorTest,\n    ::testing::Values(\n        std::make_tuple(\"blah blah\", DecoderCallbacks::ErrorType::Unknown),\n        std::make_tuple(\"SERRORC1234\", DecoderCallbacks::ErrorType::Error),\n        std::make_tuple(\"SERRORVERRORC1234\", DecoderCallbacks::ErrorType::Error),\n        std::make_tuple(\"SFATALVFATALC22012\", DecoderCallbacks::ErrorType::Fatal),\n        std::make_tuple(\"SPANICVPANICC22012\", DecoderCallbacks::ErrorType::Panic),\n        // This is the real German message in Postgres > 9.6. It contains keyword\n        // in English with V prefix.\n        std::make_tuple(\"SPANIKVPANICC42501Mkonnte Datei »pg_wal/000000010000000100000096« nicht \"\n                        \"öffnen: Permission deniedFxlog.cL3229RXLogFileInit\",\n                        DecoderCallbacks::ErrorType::Panic),\n        // This is German message indicating error. The comment field contains word PANIC.\n        // Since we do not decode other languages, it should go into Other bucket.\n        // This situation can only happen in Postgres < 9.6. Starting with version 9.6\n        // messages must have severity in English with prefix V.\n        std::make_tuple(\"SFEHLERCP0001MMy PANIC ugly messageFpl_exec.cL3216Rexec_stmt_raise\",\n                        DecoderCallbacks::ErrorType::Unknown)));\n\n// Test parsing N message. It indicate notice\n// and carries additional information about the\n// purpose of the message.\nTEST_P(PostgresProxyNoticeTest, ParseNoticeMsgs) {\n  EXPECT_CALL(callbacks_, incNotices(std::get<1>(GetParam())));\n  createPostgresMsg(data_, \"N\", std::get<0>(GetParam()));\n  decoder_->onData(data_, false);\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    PostgresProxyNoticeTestSuite, PostgresProxyNoticeTest,\n    ::testing::Values(std::make_tuple(\"blah blah\", DecoderCallbacks::NoticeType::Unknown),\n                      std::make_tuple(\"SblalalaC2345\", DecoderCallbacks::NoticeType::Unknown),\n                      std::make_tuple(\"SblahVWARNING23345\", DecoderCallbacks::NoticeType::Warning),\n                      std::make_tuple(\"SNOTICEERRORbbal4\", DecoderCallbacks::NoticeType::Notice),\n                      std::make_tuple(\"SINFOVblabla\", DecoderCallbacks::NoticeType::Info),\n                      std::make_tuple(\"SDEBUGDEBUG\", DecoderCallbacks::NoticeType::Debug),\n                      std::make_tuple(\"SLOGGGGINFO\", DecoderCallbacks::NoticeType::Log)));\n\n// Test checks if the decoder can detect initial message which indicates\n// that protocol uses encryption.\nTEST_P(PostgresProxyFrontendEncrDecoderTest, EncyptedTraffic) {\n  // Set decoder to wait for initial message.\n  decoder_->setStartup(true);\n\n  // Initial state is no-encryption.\n  ASSERT_FALSE(decoder_->encrypted());\n\n  // Create SSLRequest.\n  EXPECT_CALL(callbacks_, incSessionsEncrypted());\n  // Add length.\n  data_.writeBEInt<uint32_t>(8);\n  // 1234 in the most significant 16 bits, and some code in the least significant 16 bits.\n  // Add 4 bytes long code\n  data_.writeBEInt<uint32_t>(GetParam());\n  decoder_->onData(data_, false);\n  ASSERT_TRUE(decoder_->encrypted());\n  // Decoder should drain data.\n  ASSERT_THAT(data_.length(), 0);\n\n  // Now when decoder detected encrypted traffic is should not\n  // react to any messages (even not encrypted ones).\n  EXPECT_CALL(callbacks_, incMessagesFrontend()).Times(0);\n\n  createPostgresMsg(data_, \"P\", \"Some message just to fill the payload.\");\n  decoder_->onData(data_, true);\n  // Decoder should drain data.\n  ASSERT_THAT(data_.length(), 0);\n}\n\n// Run encryption tests.\n// 80877103 is SSL code\n// 80877104 is GSS code\nINSTANTIATE_TEST_SUITE_P(FrontendEncryptedMessagesTests, PostgresProxyFrontendEncrDecoderTest,\n                         ::testing::Values(80877103, 80877104));\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc",
    "content": "#include <gmock/gmock.h>\n#include <gtest/gtest.h>\n\n#include <tuple>\n\n#include \"extensions/filters/network/postgres_proxy/postgres_filter.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/extensions/filters/network/postgres_proxy/postgres_test_utils.h\"\n#include \"test/mocks/network/mocks.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\nusing testing::ReturnRef;\nusing ::testing::WithArgs;\n\n// Decoder mock.\nclass MockDecoderTest : public Decoder {\npublic:\n  MOCK_METHOD(bool, onData, (Buffer::Instance&, bool), (override));\n  MOCK_METHOD(PostgresSession&, getSession, (), (override));\n};\n\n// Fixture class.\nclass PostgresFilterTest\n    : public ::testing::TestWithParam<\n          std::tuple<std::function<void(PostgresFilter*, Buffer::Instance&, bool)>,\n                     std::function<uint32_t(const PostgresFilter*)>>> {\npublic:\n  PostgresFilterTest() {\n    config_ = std::make_shared<PostgresFilterConfig>(stat_prefix_, true, scope_);\n    filter_ = std::make_unique<PostgresFilter>(config_);\n\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n  }\n\n  void setMetadata() {\n    EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(ReturnRef(connection_));\n    EXPECT_CALL(connection_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_));\n    ON_CALL(stream_info_, setDynamicMetadata(NetworkFilterNames::get().PostgresProxy, _))\n        .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) {\n          stream_info_.metadata_.mutable_filter_metadata()->insert(\n              Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n                  NetworkFilterNames::get().PostgresProxy, obj));\n        }));\n  }\n\n  Stats::IsolatedStoreImpl scope_;\n  std::string stat_prefix_{\"test.\"};\n  std::unique_ptr<PostgresFilter> filter_;\n  PostgresFilterConfigSharedPtr config_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info_;\n\n  // These variables are used internally in tests.\n  Buffer::OwnedImpl data_;\n  char buf_[256];\n};\n\nTEST_F(PostgresFilterTest, NewConnection) {\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onNewConnection());\n}\n\n// Test reading buffer until the buffer is exhausted\n// or decoder indicates that there is not enough data in a buffer\n// to process a message.\nTEST_P(PostgresFilterTest, ReadData) {\n  // Create mock decoder, obtain raw pointer to it (required for EXPECT_CALL)\n  // and pass the decoder to filter.\n  std::unique_ptr<MockDecoderTest> decoder = std::make_unique<MockDecoderTest>();\n  MockDecoderTest* decoderPtr = decoder.get();\n  filter_->setDecoder(std::move(decoder));\n\n  data_.add(buf_, 256);\n\n  // Simulate reading entire buffer.\n  EXPECT_CALL(*decoderPtr, onData)\n      .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> bool {\n        data.drain(data.length());\n        return true;\n      })));\n  std::get<0>(GetParam())(filter_.get(), data_, false);\n  ASSERT_THAT(std::get<1>(GetParam())(filter_.get()), 0);\n\n  // Simulate reading entire data in two steps.\n  EXPECT_CALL(*decoderPtr, onData)\n      .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> bool {\n        data.drain(100);\n        return true;\n      })))\n      .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> bool {\n        data.drain(156);\n        return true;\n      })));\n  std::get<0>(GetParam())(filter_.get(), data_, false);\n  ASSERT_THAT(std::get<1>(GetParam())(filter_.get()), 0);\n\n  // Simulate reading 3 packets. The first two were processed correctly and\n  // for the third one there was not enough data. There should be 56 bytes\n  // of unprocessed data.\n  EXPECT_CALL(*decoderPtr, onData)\n      .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> bool {\n        data.drain(100);\n        return true;\n      })))\n      .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> bool {\n        data.drain(100);\n        return true;\n      })))\n      .WillOnce(WithArgs<0, 1>(Invoke([](Buffer::Instance& data, bool) -> bool {\n        data.drain(0);\n        return false;\n      })));\n  std::get<0>(GetParam())(filter_.get(), data_, false);\n  ASSERT_THAT(std::get<1>(GetParam())(filter_.get()), 56);\n}\n\n// Parameterized test:\n// First value in the tuple is method taking buffer with received data.\n// Second value in the tuple is method returning how many bytes are left after processing.\nINSTANTIATE_TEST_SUITE_P(ProcessDataTests, PostgresFilterTest,\n                         ::testing::Values(std::make_tuple(&PostgresFilter::onData,\n                                                           &PostgresFilter::getFrontendBufLength),\n                                           std::make_tuple(&PostgresFilter::onWrite,\n                                                           &PostgresFilter::getBackendBufLength)));\n\n// Test generates various postgres payloads and feeds them into filter.\n// It expects that certain statistics are updated.\nTEST_F(PostgresFilterTest, BackendMsgsStats) {\n  // pretend that startup message has been received.\n  static_cast<DecoderImpl*>(filter_->getDecoder())->setStartup(false);\n\n  // unknown message\n  createPostgresMsg(data_, \"=\", \"blah blah blah\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().messages_unknown_.value(), 1);\n\n  filter_->getDecoder()->getSession().setInTransaction(true);\n  createPostgresMsg(data_, \"C\", \"COMMIT\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().statements_.value(), 1);\n  ASSERT_THAT(filter_->getStats().transactions_.value(), 0);\n  ASSERT_THAT(filter_->getStats().transactions_commit_.value(), 1);\n\n  createPostgresMsg(data_, \"C\", \"ROLLBACK 234\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().transactions_.value(), 0);\n  ASSERT_THAT(filter_->getStats().statements_.value(), 2);\n  ASSERT_THAT(filter_->getStats().statements_other_.value(), 0);\n  ASSERT_THAT(filter_->getStats().transactions_rollback_.value(), 1);\n\n  createPostgresMsg(data_, \"C\", \"SELECT blah\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().statements_.value(), 3);\n  ASSERT_THAT(filter_->getStats().statements_select_.value(), 1);\n\n  createPostgresMsg(data_, \"C\", \"INSERT 123\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().statements_.value(), 4);\n  ASSERT_THAT(filter_->getStats().statements_insert_.value(), 1);\n\n  createPostgresMsg(data_, \"C\", \"DELETE 123\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().statements_.value(), 5);\n  ASSERT_THAT(filter_->getStats().statements_delete_.value(), 1);\n\n  createPostgresMsg(data_, \"C\", \"UPDATE 123\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().statements_.value(), 6);\n  ASSERT_THAT(filter_->getStats().statements_update_.value(), 1);\n\n  createPostgresMsg(data_, \"C\", \"BEGIN 123\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().statements_.value(), 7);\n  ASSERT_THAT(filter_->getStats().statements_other_.value(), 1);\n}\n\n// Test sends series of E type error messages to the filter and\n// verifies that statistic counters are increased.\nTEST_F(PostgresFilterTest, ErrorMsgsStats) {\n  // Pretend that startup message has been received.\n  static_cast<DecoderImpl*>(filter_->getDecoder())->setStartup(false);\n\n  createPostgresMsg(data_, \"E\", \"SERRORVERRORC22012\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().errors_.value(), 1);\n  ASSERT_THAT(filter_->getStats().errors_error_.value(), 1);\n\n  createPostgresMsg(data_, \"E\", \"SFATALVFATALC22012\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().errors_.value(), 2);\n  ASSERT_THAT(filter_->getStats().errors_fatal_.value(), 1);\n\n  createPostgresMsg(data_, \"E\", \"SPANICVPANICC22012\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().errors_.value(), 3);\n  ASSERT_THAT(filter_->getStats().errors_panic_.value(), 1);\n\n  createPostgresMsg(data_, \"E\", \"SBLAHBLAHC22012\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().errors_.value(), 4);\n  ASSERT_THAT(filter_->getStats().errors_unknown_.value(), 1);\n}\n\n// Test sends series of N type messages to the filter and verifies\n// that corresponding stats counters are updated.\nTEST_F(PostgresFilterTest, NoticeMsgsStats) {\n  // Pretend that startup message has been received.\n  static_cast<DecoderImpl*>(filter_->getDecoder())->setStartup(false);\n\n  createPostgresMsg(data_, \"N\", \"SblalalaC2345\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().notices_.value(), 1);\n  ASSERT_THAT(filter_->getStats().notices_unknown_.value(), 1);\n\n  createPostgresMsg(data_, \"N\", \"SblahVWARNING23345\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().notices_.value(), 2);\n  ASSERT_THAT(filter_->getStats().notices_warning_.value(), 1);\n\n  createPostgresMsg(data_, \"N\", \"SNOTICEERRORbbal4\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().notices_.value(), 3);\n  ASSERT_THAT(filter_->getStats().notices_notice_.value(), 1);\n\n  createPostgresMsg(data_, \"N\", \"SINFOVblabla\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().notices_.value(), 4);\n  ASSERT_THAT(filter_->getStats().notices_info_.value(), 1);\n\n  createPostgresMsg(data_, \"N\", \"SDEBUGDEBUG\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().notices_.value(), 5);\n  ASSERT_THAT(filter_->getStats().notices_debug_.value(), 1);\n\n  createPostgresMsg(data_, \"N\", \"SLOGGGGINFO\");\n  filter_->onWrite(data_, false);\n  ASSERT_THAT(filter_->getStats().notices_.value(), 6);\n  ASSERT_THAT(filter_->getStats().notices_log_.value(), 1);\n}\n\n// Encrypted sessions are detected based on the first received message.\nTEST_F(PostgresFilterTest, EncryptedSessionStats) {\n  data_.writeBEInt<uint32_t>(8);\n  // 1234 in the most significant 16 bits and some code in the least significant 16 bits.\n  data_.writeBEInt<uint32_t>(80877103); // SSL code.\n  filter_->onData(data_, false);\n  ASSERT_THAT(filter_->getStats().sessions_.value(), 1);\n  ASSERT_THAT(filter_->getStats().sessions_encrypted_.value(), 1);\n}\n\n// Test verifies that incorrect SQL statement does not create\n// Postgres metadata.\nTEST_F(PostgresFilterTest, MetadataIncorrectSQL) {\n  // Pretend that startup message has been received.\n  static_cast<DecoderImpl*>(filter_->getDecoder())->setStartup(false);\n  setMetadata();\n\n  createPostgresMsg(data_, \"Q\", \"BLAH blah blah\");\n  filter_->onData(data_, false);\n\n  // SQL statement was wrong. No metadata should have been created.\n  ASSERT_THAT(filter_->connection().streamInfo().dynamicMetadata().filter_metadata().contains(\n                  NetworkFilterNames::get().PostgresProxy),\n              false);\n  ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 1);\n  ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 0);\n}\n\n// Test verifies that Postgres metadata is created for correct SQL statement.\n// and it happens only when parse_sql flag is true.\nTEST_F(PostgresFilterTest, QueryMessageMetadata) {\n  // Pretend that startup message has been received.\n  static_cast<DecoderImpl*>(filter_->getDecoder())->setStartup(false);\n  setMetadata();\n\n  // Disable creating parsing SQL and creating metadata.\n  filter_->getConfig()->enable_sql_parsing_ = false;\n  createPostgresMsg(data_, \"Q\", \"SELECT * FROM whatever\");\n  filter_->onData(data_, false);\n\n  ASSERT_THAT(filter_->connection().streamInfo().dynamicMetadata().filter_metadata().contains(\n                  NetworkFilterNames::get().PostgresProxy),\n              false);\n  ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 0);\n  ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 0);\n\n  // Now enable SQL parsing and creating metadata.\n  filter_->getConfig()->enable_sql_parsing_ = true;\n  filter_->onData(data_, false);\n\n  auto& filter_meta = filter_->connection().streamInfo().dynamicMetadata().filter_metadata().at(\n      NetworkFilterNames::get().PostgresProxy);\n  auto& fields = filter_meta.fields();\n\n  ASSERT_THAT(fields.size(), 1);\n  ASSERT_THAT(fields.contains(\"whatever\"), true);\n\n  const auto& operations = fields.at(\"whatever\").list_value();\n  ASSERT_EQ(\"select\", operations.values(0).string_value());\n\n  ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 0);\n  ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 1);\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc",
    "content": "#include \"test/integration/fake_upstream.h\"\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\nclass PostgresIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                public BaseIntegrationTest {\n\n  std::string postgresConfig() {\n    return fmt::format(\n        TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath(\n            \"test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml\")),\n        Platform::null_device_path, Network::Test::getLoopbackAddressString(GetParam()),\n        Network::Test::getLoopbackAddressString(GetParam()),\n        Network::Test::getAnyAddressString(GetParam()));\n  }\n\npublic:\n  PostgresIntegrationTest() : BaseIntegrationTest(GetParam(), postgresConfig()){};\n\n  void SetUp() override { BaseIntegrationTest::initialize(); }\n};\nINSTANTIATE_TEST_SUITE_P(IpVersions, PostgresIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\n// Test that the filter is properly chained and reacts to successful login\n// message.\nTEST_P(PostgresIntegrationTest, Login) {\n  std::string str;\n  std::string recv;\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  // Send the startup message.\n  Buffer::OwnedImpl data;\n  std::string rcvd;\n  char buf[32];\n\n  memset(buf, 0, sizeof(buf));\n  // Add length.\n  data.writeBEInt<uint32_t>(12);\n  // Add 8 bytes of some data.\n  data.add(buf, 8);\n  ASSERT_TRUE(tcp_client->write(data.toString()));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(data.toString().length(), &rcvd));\n  data.drain(data.length());\n\n  // TCP session is up. Just send the AuthenticationOK downstream.\n  data.add(\"R\");\n  // Add length.\n  data.writeBEInt<uint32_t>(8);\n  uint32_t code = 0;\n  data.add(&code, sizeof(code));\n\n  rcvd.clear();\n  ASSERT_TRUE(fake_upstream_connection->write(data.toString()));\n  rcvd.append(data.toString());\n  tcp_client->waitForData(rcvd, true);\n\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  // Make sure that the successful login bumped up the number of sessions.\n  test_server_->waitForCounterEq(\"postgres.postgres_stats.sessions\", 1);\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/postgres_message_test.cc",
    "content": "#include <gmock/gmock.h>\n#include <gtest/gtest.h>\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/postgres_proxy/postgres_message.h\"\n\n#include \"fmt/printf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n// Tests for individual types used in Postgres messages.\n//\n// Integer types.\n\n// Fixture class for testing Integer types.\ntemplate <typename T> class IntTest : public testing::Test {\npublic:\n  T field_;\n  Buffer::OwnedImpl data_;\n};\n\nusing IntTypes = ::testing::Types<Int32, Int16, Int8>;\nTYPED_TEST_SUITE(IntTest, IntTypes);\n\nTYPED_TEST(IntTest, BasicRead) {\n  this->data_.template writeBEInt<decltype(std::declval<TypeParam>().get())>(12);\n  uint64_t pos = 0;\n  uint64_t left = this->data_.length();\n  ASSERT_TRUE(this->field_.read(this->data_, pos, left));\n\n  ASSERT_THAT(this->field_.toString(), \"[12]\");\n  // pos should be moved forward by the number of bytes read.\n  ASSERT_THAT(pos, sizeof(TypeParam));\n  ASSERT_THAT(12, this->field_.get());\n\n  // Make sure that all bytes have been read from the buffer.\n  ASSERT_THAT(left, 0);\n}\n\nTYPED_TEST(IntTest, ReadWithLeftovers) {\n  this->data_.template writeBEInt<decltype(std::declval<TypeParam>().get())>(12);\n  // Write 1 byte more.\n  this->data_.template writeBEInt<uint8_t>(11);\n  uint64_t pos = 0;\n  uint64_t left = this->data_.length();\n  ASSERT_TRUE(this->field_.read(this->data_, pos, left));\n  ASSERT_THAT(this->field_.toString(), \"[12]\");\n  // pos should be moved forward by the number of bytes read.\n  ASSERT_THAT(pos, sizeof(TypeParam));\n\n  // Make sure that all bytes have been read from the buffer.\n  ASSERT_THAT(left, 1);\n}\n\nTYPED_TEST(IntTest, ReadAtOffset) {\n  // write 1 byte before the actual value.\n  this->data_.template writeBEInt<uint8_t>(11);\n  this->data_.template writeBEInt<decltype(std::declval<TypeParam>().get())>(12);\n  uint64_t pos = 1;\n  uint64_t left = this->data_.length() - 1;\n  ASSERT_TRUE(this->field_.read(this->data_, pos, left));\n  ASSERT_THAT(this->field_.toString(), \"[12]\");\n  // pos should be moved forward by the number of bytes read.\n  ASSERT_THAT(pos, 1 + sizeof(TypeParam));\n  // Nothing should be left to read.\n  ASSERT_THAT(left, 0);\n}\n\nTYPED_TEST(IntTest, NotEnoughData) {\n  this->data_.template writeBEInt<decltype(std::declval<TypeParam>().get())>(12);\n  // Start from offset 1. There is not enough data in the buffer for the required type.\n  uint64_t pos = 1;\n  uint64_t left = this->data_.length() - pos;\n  ASSERT_FALSE(this->field_.read(this->data_, pos, left));\n}\n\n// Byte1 should format content as char.\nTEST(Byte1, Formatting) {\n  Byte1 field;\n\n  Buffer::OwnedImpl data;\n  data.add(\"I\");\n\n  uint64_t pos = 0;\n  uint64_t left = 1;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 1);\n  ASSERT_THAT(left, 0);\n\n  ASSERT_THAT(field.toString(), \"[I]\");\n}\n\n// Tests for String type.\nTEST(StringType, SingleString) {\n  String field;\n\n  Buffer::OwnedImpl data;\n  data.add(\"test\");\n  data.writeBEInt<uint8_t>(0);\n  uint64_t pos = 0;\n  uint64_t left = 5;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 5);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_THAT(out, \"[test]\");\n}\n\nTEST(StringType, MultipleStrings) {\n  String field;\n\n  // Add 3 strings.\n  Buffer::OwnedImpl data;\n  data.add(\"test1\");\n  data.writeBEInt<uint8_t>(0);\n  data.add(\"test2\");\n  data.writeBEInt<uint8_t>(0);\n  data.add(\"test3\");\n  data.writeBEInt<uint8_t>(0);\n  uint64_t pos = 0;\n  uint64_t left = 3 * 6;\n\n  // Read the first string.\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 1 * 6);\n  ASSERT_THAT(left, 2 * 6);\n  auto out = field.toString();\n  ASSERT_THAT(out, \"[test1]\");\n\n  // Read the second string.\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 2 * 6);\n  ASSERT_THAT(left, 1 * 6);\n  out = field.toString();\n  ASSERT_THAT(out, \"[test2]\");\n\n  // Read the third string.\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 3 * 6);\n  ASSERT_THAT(left, 0);\n  out = field.toString();\n  ASSERT_THAT(out, \"[test3]\");\n}\n\nTEST(StringType, NoTerminatingByte) {\n  String field;\n\n  Buffer::OwnedImpl data;\n  data.add(\"test\");\n  uint64_t pos = 0;\n  uint64_t left = 4;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\n// ByteN type is always placed at the end of Postgres message.\n// There is no explicit message length. Length must be deduced from\n// \"length\" field on Postgres message.\nTEST(ByteN, BasicTest) {\n  ByteN field;\n\n  Buffer::OwnedImpl data;\n  // Write 11 bytes. We will read only 10 to make sure\n  // that len is used, not buffer's length.\n  for (auto i = 0; i < 11; i++) {\n    data.writeBEInt<uint8_t>(i);\n  }\n  uint64_t pos = 0;\n  uint64_t left = 10;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 10);\n  // One byte should be left in the buffer.\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_THAT(out, \"[0 1 2 3 4 5 6 7 8 9]\");\n}\n\nTEST(ByteN, NotEnoughData) {\n  ByteN field;\n\n  Buffer::OwnedImpl data;\n  // Write 10 bytes, but set message length to be 11.\n  for (auto i = 0; i < 10; i++) {\n    data.writeBEInt<uint8_t>(i);\n  }\n  uint64_t pos = 0;\n  uint64_t left = 11;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\nTEST(ByteN, Empty) {\n  ByteN field;\n\n  Buffer::OwnedImpl data;\n  // Write nothing to data buffer.\n  uint64_t pos = 0;\n  uint64_t left = 0;\n  ASSERT_TRUE(field.read(data, pos, left));\n\n  auto out = field.toString();\n  ASSERT_THAT(out, \"[]\");\n}\n\n// VarByteN type. It contains 4 bytes length field with value which follows.\nTEST(VarByteN, BasicTest) {\n  VarByteN field;\n\n  Buffer::OwnedImpl data;\n  // Write VarByteN with length equal to zero. No value follows.\n  data.writeBEInt<uint32_t>(0);\n\n  // Write value with 5 bytes.\n  data.writeBEInt<uint32_t>(5);\n  for (auto i = 0; i < 5; i++) {\n    data.writeBEInt<uint8_t>(10 + i);\n  }\n\n  // Write special case value with length -1. No value follows.\n  data.writeBEInt<int32_t>(-1);\n\n  uint64_t pos = 0;\n  uint64_t left = 4 + 4 + 5 + 4;\n  uint64_t expected_left = left;\n\n  // Read the first value.\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 4);\n  expected_left -= 4;\n  ASSERT_THAT(left, expected_left);\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"0 bytes\") != std::string::npos);\n\n  // Read the second value.\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 4 + 4 + 5);\n  expected_left -= (4 + 5);\n  ASSERT_THAT(left, expected_left);\n  out = field.toString();\n  ASSERT_TRUE(out.find(\"5 bytes\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"10 11 12 13 14\") != std::string::npos);\n\n  // Read the third value.\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 4 + 4 + 5 + 4);\n  expected_left -= 4;\n  ASSERT_THAT(left, expected_left);\n  out = field.toString();\n  ASSERT_TRUE(out.find(\"-1 bytes\") != std::string::npos);\n}\n\nTEST(VarByteN, NotEnoughLengthData) {\n  VarByteN field;\n\n  Buffer::OwnedImpl data;\n  // Write 3 bytes. Minimum for this type is 4 bytes of length.\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n\n  uint64_t pos = 0;\n  uint64_t left = 3;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\nTEST(VarByteN, NotEnoughValueData) {\n  VarByteN field;\n\n  Buffer::OwnedImpl data;\n  // Write length of the value to be 5 bytes, but supply only 4 bytes.\n  data.writeBEInt<int32_t>(5);\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n  data.writeBEInt<uint8_t>(3);\n\n  uint64_t pos = 0;\n  uint64_t left = 5 + 4;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\n// Array composite type tests.\nTEST(Array, SingleInt) {\n  Array<Int32> field;\n\n  Buffer::OwnedImpl data;\n  // Write the number of elements in the array.\n  data.writeBEInt<uint16_t>(1);\n  data.writeBEInt<uint32_t>(123);\n\n  uint64_t pos = 0;\n  uint64_t left = 2 + 4;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 6);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"Array of 1\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"123\") != std::string::npos);\n}\n\nTEST(Array, MultipleInts) {\n  Array<Int8> field;\n\n  Buffer::OwnedImpl data;\n  // Write 3 elements into array.\n  data.writeBEInt<uint16_t>(3);\n  data.writeBEInt<uint8_t>(211);\n  data.writeBEInt<uint8_t>(212);\n  data.writeBEInt<uint8_t>(213);\n\n  uint64_t pos = 0;\n  uint64_t left = 2 + 3 * 1;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 5);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"Array of 3\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"211\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"212\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"213\") != std::string::npos);\n}\n\nTEST(Array, Empty) {\n  Array<Int16> field;\n\n  Buffer::OwnedImpl data;\n  // Write 0 elements into array.\n  data.writeBEInt<uint16_t>(0);\n\n  uint64_t pos = 0;\n  uint64_t left = 2;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 2);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"Array of 0\") != std::string::npos);\n}\n\n// Test situation when there is not enough data to read the length of the Array.\nTEST(Array, NotEnoughDataForLength) {\n  Array<Int16> field;\n\n  Buffer::OwnedImpl data;\n  // Data field is 2 bytes long. Write just one byte.\n  data.writeBEInt<uint8_t>(1);\n\n  uint64_t pos = 0;\n  uint64_t left = 1;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\n// Test situation when there is not enough data in the buffer to read one of the elements\n// in the array.\nTEST(Array, NotEnoughDataForValues) {\n  Array<Int32> field;\n\n  Buffer::OwnedImpl data;\n  // There will be 2 elements in the array.\n  // The first element is 4 bytes long.\n  // The second element should be 4 bytes long but is only 2 bytes long.\n  data.writeBEInt<uint16_t>(2);\n  data.writeBEInt<uint32_t>(101);\n  data.writeBEInt<uint16_t>(102);\n\n  uint64_t pos = 0;\n  uint64_t left = 2 + 4 + 2;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\n// Repeated composite type tests.\nTEST(Repeated, BasicTestWithStrings) {\n  Repeated<String> field;\n\n  Buffer::OwnedImpl data;\n  // Write some data to simulate message header.\n  // It will be ignored.\n  data.writeBEInt<uint32_t>(101);\n  data.writeBEInt<uint8_t>(102);\n  // Now write 3 strings. Each terminated by zero byte.\n  data.add(\"test1\");\n  data.writeBEInt<uint8_t>(0);\n  data.add(\"test2\");\n  data.writeBEInt<uint8_t>(0);\n  data.add(\"test3\");\n  data.writeBEInt<uint8_t>(0);\n  uint64_t pos = 5;\n  uint64_t left = 3 * 6;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 5 + 3 * 6);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"test1\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test2\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test3\") != std::string::npos);\n}\n\n// Test verifies that read fails when there is less\n// bytes in the buffer than bytes needed to read to the end of the message.\nTEST(Repeated, NotEnoughData) {\n  Repeated<String> field;\n\n  Buffer::OwnedImpl data;\n  // Write some data to simulate message header.\n  // It will be ignored.\n  data.writeBEInt<uint32_t>(101);\n  data.writeBEInt<uint8_t>(102);\n  data.add(\"test\");\n\n  // \"test\" with terminating zero is 5 bytes.\n  // Set \"left\" to indicate that 6 bytes are needed.\n  uint64_t pos = 5;\n  uint64_t left = 5 + 6;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\n// Test verifies that entire read fails when one of\n// subordinate reads fails.\nTEST(Repeated, NotEnoughDataForSecondString) {\n  Repeated<String> field;\n\n  Buffer::OwnedImpl data;\n  // Write some data to simulate message header.\n  // It will be ignored.\n  data.writeBEInt<uint32_t>(101);\n  data.writeBEInt<uint8_t>(102);\n  // Now write 3 strings. Each terminated by zero byte.\n  data.add(\"test1\");\n  data.writeBEInt<uint8_t>(0);\n  data.add(\"test2\");\n  // Do not write terminating zero.\n  // Read should fail here.\n  uint64_t pos = 5;\n  uint64_t left = 6 + 5;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\n// Sequence composite type tests.\nTEST(Sequence, Int32SingleValue) {\n  Sequence<Int32> field;\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(101);\n\n  uint64_t pos = 0;\n  uint64_t left = 4;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 4);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"101\") != std::string::npos);\n}\n\nTEST(Sequence, Int16SingleValue) {\n  Sequence<Int16> field;\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint16_t>(101);\n\n  uint64_t pos = 0;\n  uint64_t left = 2;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 2);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"101\") != std::string::npos);\n}\n\nTEST(Sequence, BasicMultipleValues1) {\n  Sequence<Int32, String> field;\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(101);\n  data.add(\"test\");\n  data.writeBEInt<uint8_t>(0);\n\n  uint64_t pos = 0;\n  uint64_t left = 4 + 5;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, 4 + 5);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"101\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test\") != std::string::npos);\n}\n\nTEST(Sequence, BasicMultipleValues2) {\n  Sequence<Int32, Int16> field;\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(100);\n  data.writeBEInt<uint16_t>(101);\n\n  uint64_t pos = 0;\n  uint64_t left = 4 + 2;\n  uint64_t expected_pos = left;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, expected_pos);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"100\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"101\") != std::string::npos);\n}\n\nTEST(Sequence, BasicMultipleValues3) {\n  Sequence<Int32, Int16, Int32, Int16> field;\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(100);\n  data.writeBEInt<uint16_t>(101);\n  data.writeBEInt<uint32_t>(102);\n  data.writeBEInt<uint16_t>(103);\n\n  uint64_t pos = 0;\n  uint64_t left = 4 + 2 + 4 + 2;\n  uint64_t expected_pos = left;\n  ASSERT_TRUE(field.read(data, pos, left));\n  ASSERT_THAT(pos, expected_pos);\n  ASSERT_THAT(left, 0);\n\n  auto out = field.toString();\n  ASSERT_TRUE(out.find(\"100\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"101\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"102\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"103\") != std::string::npos);\n}\n\n// Test versifies that read fails when reading of one element\n// in Sequence fails.\nTEST(Sequence, NotEnoughData) {\n  Sequence<Int32, String> field;\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(101);\n  // Do not write terminating zero for the string.\n  data.add(\"test\");\n\n  uint64_t pos = 0;\n  uint64_t left = 4 + 4;\n  ASSERT_FALSE(field.read(data, pos, left));\n}\n\n// Tests for Message interface and helper function createMsgBodyReader.\nTEST(PostgresMessage, SingleFieldInt32) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int32>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(12);\n  ASSERT_TRUE(msg->read(data, 4));\n  auto out = msg->toString();\n  ASSERT_THAT(out, \"[12]\");\n}\n\nTEST(PostgresMessage, SingleFieldInt16) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int16>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint16_t>(12);\n  ASSERT_TRUE(msg->read(data, 2));\n  auto out = msg->toString();\n  ASSERT_THAT(out, \"[12]\");\n}\n\nTEST(PostgresMessage, SingleByteN) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<ByteN>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n  data.writeBEInt<uint8_t>(3);\n  data.writeBEInt<uint8_t>(4);\n  ASSERT_TRUE(msg->read(data, 5 * 1));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"0\") != std::string::npos); // NOLINT\n  ASSERT_TRUE(out.find(\"1\") != std::string::npos); // NOLINT\n  ASSERT_TRUE(out.find(\"2\") != std::string::npos); // NOLINT\n  ASSERT_TRUE(out.find(\"3\") != std::string::npos); // NOLINT\n  ASSERT_TRUE(out.find(\"4\") != std::string::npos); // NOLINT\n}\n\nTEST(PostgresMessage, MultipleValues1) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int32, Int16>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(12);\n  data.writeBEInt<uint16_t>(13);\n  ASSERT_TRUE(msg->read(data, 4 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"12\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n}\n\nTEST(PostgresMessage, MultipleValues2) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int16, Int32, Int16>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint32_t>(14);\n  data.writeBEInt<uint16_t>(15);\n  ASSERT_TRUE(msg->read(data, 2 + 4 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"14\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"15\") != std::string::npos);\n}\n\nTEST(PostgresMessage, MultipleValues3) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int32, Int16, Int32, Int16>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(12);\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint32_t>(14);\n  data.writeBEInt<uint16_t>(15);\n  ASSERT_TRUE(msg->read(data, 4 + 2 + 4 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"12\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"14\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"15\") != std::string::npos);\n}\n\nTEST(PostgresMessage, MultipleValues4) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int16, Int32, Int16, Int32, Int16>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint32_t>(14);\n  data.writeBEInt<uint16_t>(15);\n  data.writeBEInt<uint32_t>(16);\n  data.writeBEInt<uint16_t>(17);\n  ASSERT_TRUE(msg->read(data, 2 + 4 + 2 + 4 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"14\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"15\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"16\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"17\") != std::string::npos);\n}\n\nTEST(PostgresMessage, MultipleValues5) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int32, Int16, Int32, Int16, Int32, Int16>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<uint32_t>(12);\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint32_t>(14);\n  data.writeBEInt<uint16_t>(15);\n  data.writeBEInt<uint32_t>(16);\n  data.writeBEInt<uint16_t>(17);\n  ASSERT_TRUE(msg->read(data, 4 + 2 + 4 + 2 + 4 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"12\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"14\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"15\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"16\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"17\") != std::string::npos);\n}\n\nTEST(PostgresMessage, MultipleValues6) {\n  std::unique_ptr<Message> msg =\n      createMsgBodyReader<String, Int32, Int16, Int32, Int16, Int32, Int16>();\n\n  Buffer::OwnedImpl data;\n  data.add(\"test\");\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint32_t>(12);\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint32_t>(14);\n  data.writeBEInt<uint16_t>(15);\n  data.writeBEInt<uint32_t>(16);\n  data.writeBEInt<uint16_t>(17);\n  ASSERT_TRUE(msg->read(data, 5 + 4 + 2 + 4 + 2 + 4 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"test\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"12\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"14\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"15\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"16\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"17\") != std::string::npos);\n}\n\nTEST(PostgresMessage, MultipleValues7) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<String, Array<Int32>>();\n\n  Buffer::OwnedImpl data;\n  data.add(\"test\");\n  data.writeBEInt<uint8_t>(0);\n\n  // Array of 3 elements.\n  data.writeBEInt<int16_t>(3);\n  data.writeBEInt<uint32_t>(13);\n  data.writeBEInt<uint32_t>(14);\n  data.writeBEInt<uint32_t>(15);\n  ASSERT_TRUE(msg->read(data, 5 + 2 + 3 * 4));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"test\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"14\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"15\") != std::string::npos);\n}\n\nTEST(PostgresMessage, ArraySet1) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Array<Int16>>();\n\n  Buffer::OwnedImpl data;\n  // There will be 3 elements in the array.\n  data.writeBEInt<int16_t>(3);\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint16_t>(14);\n  data.writeBEInt<uint16_t>(15);\n  ASSERT_TRUE(msg->read(data, 2 + 3 * 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"14\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"15\") != std::string::npos);\n}\n\nTEST(PostgresMessage, ArraySet2) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Array<VarByteN>, Int16>();\n\n  Buffer::OwnedImpl data;\n  // Array of 1 element of VarByteN.\n  data.writeBEInt<int16_t>(1);\n  // VarByteN of 5 bytes long.\n  data.writeBEInt<int32_t>(5);\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n  data.writeBEInt<uint8_t>(3);\n  data.writeBEInt<uint8_t>(114);\n\n  // 16-bits value.\n  data.writeBEInt<uint16_t>(115);\n\n  ASSERT_TRUE(msg->read(data, 2 + 4 + 5 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"114\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"115\") != std::string::npos);\n}\n\nTEST(PostgresMessage, ArraySet3) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Array<Int16>, Array<VarByteN>, Int16>();\n\n  Buffer::OwnedImpl data;\n  // There will be 3 elements in the array.\n  data.writeBEInt<int16_t>(3);\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint16_t>(14);\n  data.writeBEInt<uint16_t>(15);\n\n  // Array of 1 element of VarByteN.\n  data.writeBEInt<int16_t>(1);\n  // VarByteN of 5 bytes long.\n  data.writeBEInt<int32_t>(5);\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n  data.writeBEInt<uint8_t>(3);\n  data.writeBEInt<uint8_t>(4);\n\n  // 16-bits value.\n  data.writeBEInt<uint16_t>(115);\n\n  ASSERT_TRUE(msg->read(data, 2 + 3 * 2 + 2 + 4 + 5 + 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"115\") != std::string::npos);\n}\n\nTEST(PostgresMessage, ArraySet4) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Array<VarByteN>, Array<Int16>>();\n\n  Buffer::OwnedImpl data;\n  // Array of 1 element of VarByteN.\n  data.writeBEInt<int16_t>(1);\n  // VarByteN of 5 bytes long.\n  data.writeBEInt<int32_t>(5);\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(111);\n  data.writeBEInt<uint8_t>(2);\n  data.writeBEInt<uint8_t>(3);\n  data.writeBEInt<uint8_t>(4);\n\n  // Array of 2 elements in the second array.\n  data.writeBEInt<int16_t>(2);\n  data.writeBEInt<uint16_t>(113);\n  data.writeBEInt<uint16_t>(114);\n\n  ASSERT_TRUE(msg->read(data, 2 + 4 + 5 + 2 + 2 * 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"111\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"114\") != std::string::npos);\n}\n\nTEST(PostgresMessage, ArraySet5) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Array<Int16>, Array<VarByteN>, Array<Int16>>();\n\n  Buffer::OwnedImpl data;\n  // There will be 3 elements in the first array.\n  data.writeBEInt<int16_t>(3);\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint16_t>(14);\n  data.writeBEInt<uint16_t>(15);\n\n  // Array of 1 element of VarByteN.\n  data.writeBEInt<int16_t>(1);\n  // VarByteN of 5 bytes long.\n  data.writeBEInt<int32_t>(5);\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n  data.writeBEInt<uint8_t>(3);\n  data.writeBEInt<uint8_t>(4);\n\n  // Array of 2 elements in the third array.\n  data.writeBEInt<int16_t>(2);\n  data.writeBEInt<uint16_t>(113);\n  data.writeBEInt<uint16_t>(114);\n\n  ASSERT_TRUE(msg->read(data, 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"114\") != std::string::npos);\n}\n\nTEST(PostgresMessage, ArraySet6) {\n  std::unique_ptr<Message> msg =\n      createMsgBodyReader<String, Array<Int16>, Array<VarByteN>, Array<Int16>>();\n\n  Buffer::OwnedImpl data;\n  // Write string.\n  data.add(\"test\");\n  data.writeBEInt<int8_t>(0);\n\n  // There will be 3 elements in the first array.\n  data.writeBEInt<int16_t>(3);\n  data.writeBEInt<uint16_t>(13);\n  data.writeBEInt<uint16_t>(14);\n  data.writeBEInt<uint16_t>(15);\n\n  // Array of 1 element of VarByteN.\n  data.writeBEInt<int16_t>(1);\n  // VarByteN of 5 bytes long.\n  data.writeBEInt<int32_t>(5);\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n  data.writeBEInt<uint8_t>(3);\n  data.writeBEInt<uint8_t>(4);\n\n  // Array of 2 elements in the third array.\n  data.writeBEInt<int16_t>(2);\n  data.writeBEInt<uint16_t>(113);\n  data.writeBEInt<uint16_t>(114);\n\n  ASSERT_TRUE(msg->read(data, 5 + 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"test\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"13\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"114\") != std::string::npos);\n}\n\nTEST(PostgresMessage, Repeated1) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Repeated<String>>();\n\n  Buffer::OwnedImpl data;\n  // Write 3 strings.\n  data.add(\"test1\");\n  data.writeBEInt<int8_t>(0);\n  data.add(\"test2\");\n  data.writeBEInt<int8_t>(0);\n  data.add(\"test3\");\n  data.writeBEInt<int8_t>(0);\n\n  ASSERT_TRUE(msg->read(data, 3 * 6));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"test1\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test2\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test3\") != std::string::npos);\n}\n\nTEST(PostgresMessage, Repeated2) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int32, Repeated<String>>();\n\n  Buffer::OwnedImpl data;\n  data.writeBEInt<int32_t>(115);\n  // Write 3 strings.\n  data.add(\"test1\");\n  data.writeBEInt<int8_t>(0);\n  data.add(\"test2\");\n  data.writeBEInt<int8_t>(0);\n  data.add(\"test3\");\n  data.writeBEInt<int8_t>(0);\n\n  ASSERT_TRUE(msg->read(data, 4 + 3 * 6));\n  auto out = msg->toString();\n  ASSERT_TRUE(out.find(\"115\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test1\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test2\") != std::string::npos);\n  ASSERT_TRUE(out.find(\"test3\") != std::string::npos);\n}\n\nTEST(PostgresMessage, NotEnoughData) {\n  std::unique_ptr<Message> msg = createMsgBodyReader<Int32, String>();\n  Buffer::OwnedImpl data;\n  // Write only 3 bytes into the buffer.\n  data.writeBEInt<uint8_t>(0);\n  data.writeBEInt<uint8_t>(1);\n  data.writeBEInt<uint8_t>(2);\n\n  ASSERT_FALSE(msg->read(data, 3));\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml",
    "content": "admin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: \"{}\"\n      port_value: 0\nstatic_resources:\n  clusters:\n    name: cluster_0\n    type: STATIC\n    load_assignment:\n      cluster_name: cluster_0\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: \"{}\"\n                port_value: 0\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: \"{}\"\n        port_value: 0\n    filter_chains:\n      - filters:\n          - name: postgres\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.postgres_proxy.v3alpha.PostgresProxy\n              stat_prefix: postgres_stats\n          - name: tcp\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy\n              stat_prefix: tcp_stats\n              cluster: cluster_0\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc",
    "content": "#include \"test/extensions/filters/network/postgres_proxy/postgres_test_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\n// Helper function to create postgres messages.\nvoid createPostgresMsg(Buffer::Instance& data, std::string type, std::string payload) {\n  data.drain(data.length());\n  ASSERT(1 == type.length());\n  data.add(type);\n  data.writeBEInt<uint32_t>(4 + payload.length());\n  data.add(payload);\n}\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/postgres_proxy/postgres_test_utils.h",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace PostgresProxy {\n\nvoid createPostgresMsg(Buffer::Instance& data, std::string type, std::string payload = \"\");\n\n} // namespace PostgresProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"ratelimit_test\",\n    srcs = [\"ratelimit_test.cc\"],\n    extension_name = \"envoy.filters.network.ratelimit\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/network/ratelimit:ratelimit_lib\",\n        \"//test/extensions/filters/common/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.ratelimit\",\n    deps = [\n        \"//source/extensions/filters/network/ratelimit:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/ratelimit/config_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/network/ratelimit/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RateLimitFilter {\n\nTEST(RateLimitFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(RateLimitConfigFactory().createFilterFactoryFromProto(\n                   envoy::extensions::filters::network::ratelimit::v3::RateLimit(), context),\n               ProtoValidationException);\n}\n\nTEST(RateLimitFilterConfigTest, CorrectProto) {\n  const std::string yaml = R\"EOF(\n  stat_prefix: my_stat_prefix\n  domain: fake_domain\n  descriptors:\n    entries:\n       key: my_key\n       value: my_value\n  timeout: 2s\n  rate_limit_service:\n    grpc_service:\n      envoy_grpc:\n        cluster_name: ratelimit_cluster\n  )EOF\";\n\n  envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{};\n  TestUtility::loadFromYaml(yaml, proto_config);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_CALL(context.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        return std::make_unique<NiceMock<Grpc::MockAsyncClientFactory>>();\n      }));\n\n  RateLimitConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST(RateLimitFilterConfigTest, EmptyProto) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  NiceMock<Server::MockInstance> instance;\n  RateLimitConfigFactory factory;\n\n  envoy::extensions::filters::network::ratelimit::v3::RateLimit empty_proto_config =\n      *dynamic_cast<envoy::extensions::filters::network::ratelimit::v3::RateLimit*>(\n          factory.createEmptyConfigProto().get());\n  EXPECT_THROW(factory.createFilterFactoryFromProto(empty_proto_config, context), EnvoyException);\n}\n\nTEST(RateLimitFilterConfigTest, IncorrectProto) {\n  std::string yaml_string = R\"EOF(\nstat_prefix: my_stat_prefix\ndomain: fake_domain\ndescriptors:\n- entries:\n  - key: my_key\n    value: my_value\nip_allowlist: '12'\n  )EOF\";\n\n  envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config;\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config), EnvoyException,\n                          \"ip_allowlist: Cannot find field\");\n}\n\n// Test that the deprecated extension name still functions.\nTEST(RateLimitFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.ratelimit\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace RateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/ratelimit/ratelimit_test.cc",
    "content": "#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/ratelimit/ratelimit.h\"\n\n#include \"test/extensions/filters/common/ratelimit/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ratelimit/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::WithArgs;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RateLimitFilter {\n\nclass RateLimitFilterTest : public testing::Test {\npublic:\n  void SetUpTest(const std::string& yaml) {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.tcp_filter_enabled\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.tcp_filter_enforcing\", 100))\n        .WillByDefault(Return(true));\n\n    envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{};\n    TestUtility::loadFromYaml(yaml, proto_config, false, true);\n    config_ = std::make_shared<Config>(proto_config, stats_store_, runtime_);\n    client_ = new Filters::Common::RateLimit::MockClient();\n    filter_ = std::make_unique<Filter>(config_, Filters::Common::RateLimit::ClientPtr{client_});\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n\n    // NOP currently.\n    filter_->onAboveWriteBufferHighWatermark();\n    filter_->onBelowWriteBufferLowWatermark();\n  }\n\n  ~RateLimitFilterTest() override {\n    for (const Stats::GaugeSharedPtr& gauge : stats_store_.gauges()) {\n      EXPECT_EQ(0U, gauge->value());\n    }\n  }\n\n  const std::string filter_config_ = R\"EOF(\ndomain: foo\ndescriptors:\n- entries:\n   - key: hello\n     value: world\n   - key: foo\n     value: bar\n- entries:\n   - key: foo2\n     value: bar2\nstat_prefix: name\n)EOF\";\n\n  const std::string fail_close_config_ = R\"EOF(\ndomain: foo\ndescriptors:\n- entries:\n   - key: hello\n     value: world\n   - key: foo\n     value: bar\n- entries:\n   - key: foo2\n     value: bar2\nstat_prefix: name\nfailure_mode_deny: true\n)EOF\";\n\n  Stats::TestUtil::TestStore stats_store_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  ConfigSharedPtr config_;\n  Filters::Common::RateLimit::MockClient* client_;\n  std::unique_ptr<Filter> filter_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  Filters::Common::RateLimit::RequestCallbacks* request_callbacks_{};\n};\n\nTEST_F(RateLimitFilterTest, OK) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"hello\", \"world\"}, {\"foo\", \"bar\"}}}, {{{\"foo2\", \"bar2\"}}}}),\n                              testing::A<Tracing::Span&>()))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.ok\").value());\n}\n\nTEST_F(RateLimitFilterTest, OverLimit) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\", _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.over_limit\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.cx_closed\").value());\n}\n\nTEST_F(RateLimitFilterTest, OverLimitNotEnforcing) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\", _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.tcp_filter_enforcing\", 100))\n      .WillOnce(Return(false));\n  EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0);\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.over_limit\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ratelimit.name.cx_closed\").value());\n}\n\nTEST_F(RateLimitFilterTest, Error) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\", _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(filter_callbacks_, continueReading());\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.error\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.failure_mode_allowed\").value());\n}\n\nTEST_F(RateLimitFilterTest, Disconnect) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\", _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n}\n\nTEST_F(RateLimitFilterTest, ImmediateOK) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(filter_callbacks_, continueReading()).Times(0);\n  EXPECT_CALL(*client_, limit(_, \"foo\", _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.ok\").value());\n}\n\nTEST_F(RateLimitFilterTest, ImmediateError) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(filter_callbacks_, continueReading()).Times(0);\n  EXPECT_CALL(*client_, limit(_, \"foo\", _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.error\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.failure_mode_allowed\").value());\n}\n\nTEST_F(RateLimitFilterTest, RuntimeDisable) {\n  InSequence s;\n  SetUpTest(filter_config_);\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.tcp_filter_enabled\", 100))\n      .WillOnce(Return(false));\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n}\n\nTEST_F(RateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) {\n  InSequence s;\n  SetUpTest(fail_close_config_);\n\n  EXPECT_CALL(*client_, limit(_, \"foo\", _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false));\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false));\n\n  EXPECT_CALL(*client_, cancel()).Times(0);\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.total\").value());\n  EXPECT_EQ(1U, stats_store_.counter(\"ratelimit.name.error\").value());\n  EXPECT_EQ(0U, stats_store_.counter(\"ratelimit.name.failure_mode_allowed\").value());\n}\n\n} // namespace RateLimitFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rbac/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.rbac\",\n    deps = [\n        \"//source/extensions/filters/network/rbac:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/rbac/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"filter_test\",\n    srcs = [\"filter_test.cc\"],\n    extension_name = \"envoy.filters.network.rbac\",\n    deps = [\n        \"//source/extensions/filters/common/rbac:utility_lib\",\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/rbac:rbac_filter\",\n        \"//test/mocks/network:network_mocks\",\n        \"@envoy_api//envoy/config/rbac/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/rbac/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"integration_test\",\n    srcs = [\"integration_test.cc\"],\n    extension_name = \"envoy.filters.network.rbac\",\n    deps = [\n        \"//source/extensions/filters/network/echo:config\",\n        \"//source/extensions/filters/network/rbac:config\",\n        \"//test/integration:integration_lib\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/rbac/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/rbac/config_test.cc",
    "content": "#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.validate.h\"\n\n#include \"extensions/filters/network/rbac/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"fmt/printf.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RBACFilter {\nnamespace {\n\nconst std::string header = R\"EOF(\n{ \"header\": {\"name\": \"key\", \"exact_match\": \"value\"} }\n)EOF\";\n\n} // namespace\n\nclass RoleBasedAccessControlNetworkFilterConfigFactoryTest : public testing::Test {\npublic:\n  void validateRule(const std::string& policy_json) {\n    checkRule(fmt::sprintf(policy_json, header));\n  }\n\nprivate:\n  void checkRule(const std::string& policy_json) {\n    envoy::config::rbac::v3::Policy policy_proto{};\n    TestUtility::loadFromJson(policy_json, policy_proto);\n\n    envoy::extensions::filters::network::rbac::v3::RBAC config{};\n    config.set_stat_prefix(\"test\");\n    (*config.mutable_rules()->mutable_policies())[\"foo\"] = policy_proto;\n\n    NiceMock<Server::Configuration::MockFactoryContext> context;\n    RoleBasedAccessControlNetworkFilterConfigFactory factory;\n    EXPECT_THROW(factory.createFilterFactoryFromProto(config, context), Envoy::EnvoyException);\n\n    config.clear_rules();\n    (*config.mutable_shadow_rules()->mutable_policies())[\"foo\"] = policy_proto;\n    EXPECT_THROW(factory.createFilterFactoryFromProto(config, context), Envoy::EnvoyException);\n  }\n};\n\nTEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, ValidProto) {\n  envoy::config::rbac::v3::Policy policy;\n  policy.add_permissions()->set_any(true);\n  policy.add_principals()->set_any(true);\n  envoy::extensions::filters::network::rbac::v3::RBAC config;\n  config.set_stat_prefix(\"stats\");\n  (*config.mutable_rules()->mutable_policies())[\"foo\"] = policy;\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RoleBasedAccessControlNetworkFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, EmptyProto) {\n  RoleBasedAccessControlNetworkFilterConfigFactory factory;\n  EXPECT_NE(nullptr, dynamic_cast<envoy::extensions::filters::network::rbac::v3::RBAC*>(\n                         factory.createEmptyConfigProto().get()));\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, InvalidPermission) {\n  validateRule(R\"EOF(\n{\n  \"permissions\": [ { \"any\": true }, { \"and_rules\": { \"rules\": [ { \"any\": true }, %s ] } } ],\n  \"principals\": [ { \"any\": true } ]\n}\n)EOF\");\n\n  validateRule(R\"EOF(\n{\n  \"permissions\": [ { \"any\": true }, { \"or_rules\": { \"rules\": [ { \"any\": true }, %s ] } } ],\n  \"principals\": [ { \"any\": true } ]\n}\n)EOF\");\n\n  validateRule(R\"EOF(\n{\n  \"permissions\": [ { \"any\": true }, { \"not_rule\": %s } ],\n  \"principals\": [ { \"any\": true } ]\n}\n)EOF\");\n\n  validateRule(R\"EOF(\n{\n  \"permissions\": [ { \"any\": true }, %s ],\n  \"principals\": [ { \"any\": true } ]\n}\n)EOF\");\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, InvalidPrincipal) {\n  validateRule(R\"EOF(\n{\n  \"principals\": [ { \"any\": true }, { \"and_ids\": { \"ids\": [ { \"any\": true }, %s ] } } ],\n  \"permissions\": [ { \"any\": true } ]\n}\n)EOF\");\n\n  validateRule(R\"EOF(\n{\n  \"principals\": [ { \"any\": true }, { \"or_ids\": { \"ids\": [ { \"any\": true }, %s ] } } ],\n  \"permissions\": [ { \"any\": true } ]\n}\n)EOF\");\n\n  validateRule(R\"EOF(\n{\n  \"principals\": [ { \"any\": true }, { \"not_id\": %s } ],\n  \"permissions\": [ { \"any\": true } ]\n}\n)EOF\");\n\n  validateRule(R\"EOF(\n{\n  \"principals\": [ { \"any\": true }, %s ],\n  \"permissions\": [ { \"any\": true } ]\n}\n)EOF\");\n}\n\n} // namespace RBACFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rbac/filter_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.h\"\n\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/common/rbac/utility.h\"\n#include \"extensions/filters/network/rbac/rbac_filter.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/mocks/network/mocks.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RBACFilter {\n\nclass RoleBasedAccessControlNetworkFilterTest : public testing::Test {\npublic:\n  RoleBasedAccessControlFilterConfigSharedPtr\n  setupConfig(bool with_policy = true, bool continuous = false,\n              envoy::config::rbac::v3::RBAC::Action action = envoy::config::rbac::v3::RBAC::ALLOW) {\n\n    envoy::extensions::filters::network::rbac::v3::RBAC config;\n    config.set_stat_prefix(\"tcp.\");\n\n    if (with_policy) {\n      envoy::config::rbac::v3::Policy policy;\n      auto policy_rules = policy.add_permissions()->mutable_or_rules();\n      policy_rules->add_rules()->mutable_requested_server_name()->set_hidden_envoy_deprecated_regex(\n          \".*cncf.io\");\n      policy_rules->add_rules()->set_destination_port(123);\n      policy.add_principals()->set_any(true);\n      config.mutable_rules()->set_action(action);\n      (*config.mutable_rules()->mutable_policies())[\"foo\"] = policy;\n\n      envoy::config::rbac::v3::Policy shadow_policy;\n      auto shadow_policy_rules = shadow_policy.add_permissions()->mutable_or_rules();\n      shadow_policy_rules->add_rules()->mutable_requested_server_name()->set_exact(\"xyz.cncf.io\");\n      shadow_policy_rules->add_rules()->set_destination_port(456);\n      shadow_policy.add_principals()->set_any(true);\n      config.mutable_shadow_rules()->set_action(action);\n      (*config.mutable_shadow_rules()->mutable_policies())[\"bar\"] = shadow_policy;\n    }\n\n    if (continuous) {\n      config.set_enforcement_type(envoy::extensions::filters::network::rbac::v3::RBAC::CONTINUOUS);\n    }\n\n    return std::make_shared<RoleBasedAccessControlFilterConfig>(config, store_);\n  }\n\n  RoleBasedAccessControlNetworkFilterTest() : config_(setupConfig()) {\n    EXPECT_CALL(callbacks_, connection()).WillRepeatedly(ReturnRef(callbacks_.connection_));\n    EXPECT_CALL(callbacks_.connection_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_));\n\n    filter_ = std::make_unique<RoleBasedAccessControlFilter>(config_);\n    filter_->initializeReadFilterCallbacks(callbacks_);\n  }\n\n  void setDestinationPort(uint16_t port) {\n    address_ = Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", port, false);\n    EXPECT_CALL(stream_info_, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address_));\n  }\n\n  void setRequestedServerName(std::string server_name) {\n    requested_server_name_ = server_name;\n    ON_CALL(callbacks_.connection_, requestedServerName())\n        .WillByDefault(Return(requested_server_name_));\n  }\n\n  void checkAccessLogMetadata(bool expected) {\n    auto filter_meta = stream_info_.dynamicMetadata().filter_metadata().at(\n        Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace);\n    EXPECT_EQ(expected,\n              filter_meta.fields()\n                  .at(Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey)\n                  .bool_value());\n  }\n\n  void setMetadata() {\n    ON_CALL(stream_info_, setDynamicMetadata(NetworkFilterNames::get().Rbac, _))\n        .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) {\n          stream_info_.metadata_.mutable_filter_metadata()->insert(\n              Protobuf::MapPair<std::string, ProtobufWkt::Struct>(NetworkFilterNames::get().Rbac,\n                                                                  obj));\n        }));\n\n    ON_CALL(stream_info_,\n            setDynamicMetadata(\n                Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, _))\n        .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) {\n          stream_info_.metadata_.mutable_filter_metadata()->insert(\n              Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n                  Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, obj));\n        }));\n  }\n\n  NiceMock<Network::MockReadFilterCallbacks> callbacks_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info_;\n  Stats::IsolatedStoreImpl store_;\n  Buffer::OwnedImpl data_;\n  RoleBasedAccessControlFilterConfigSharedPtr config_;\n\n  std::unique_ptr<RoleBasedAccessControlFilter> filter_;\n  Network::Address::InstanceConstSharedPtr address_;\n  std::string requested_server_name_;\n};\n\nTEST_F(RoleBasedAccessControlNetworkFilterTest, AllowedWithOneTimeEnforcement) {\n  setDestinationPort(123);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n\n  // Call onData() twice, should only increase stats once.\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().denied_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_allowed_.value());\n  EXPECT_EQ(1U, config_->stats().shadow_denied_.value());\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterTest, AllowedWithContinuousEnforcement) {\n  config_ = setupConfig(true, true /* continuous enforcement */);\n  filter_ = std::make_unique<RoleBasedAccessControlFilter>(config_);\n  filter_->initializeReadFilterCallbacks(callbacks_);\n  setDestinationPort(123);\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n\n  // Call onData() twice, should increase stats twice.\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(2U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().denied_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_allowed_.value());\n  EXPECT_EQ(2U, config_->stats().shadow_denied_.value());\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterTest, RequestedServerName) {\n  setDestinationPort(999);\n  setRequestedServerName(\"www.cncf.io\");\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n\n  // Call onData() twice, should only increase stats once.\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().denied_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_allowed_.value());\n  EXPECT_EQ(1U, config_->stats().shadow_denied_.value());\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterTest, AllowedWithNoPolicy) {\n  config_ = setupConfig(false /* with_policy */);\n  filter_ = std::make_unique<RoleBasedAccessControlFilter>(config_);\n  filter_->initializeReadFilterCallbacks(callbacks_);\n  setDestinationPort(0);\n\n  // Allow access and no metric change when there is no policy.\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(0U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().denied_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_allowed_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_denied_.value());\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterTest, Denied) {\n  setDestinationPort(456);\n  setMetadata();\n\n  EXPECT_CALL(callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)).Times(2);\n\n  // Call onData() twice, should only increase stats once.\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data_, false));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data_, false));\n  EXPECT_EQ(0U, config_->stats().allowed_.value());\n  EXPECT_EQ(1U, config_->stats().denied_.value());\n  EXPECT_EQ(1U, config_->stats().shadow_allowed_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_denied_.value());\n\n  auto filter_meta =\n      stream_info_.dynamicMetadata().filter_metadata().at(NetworkFilterNames::get().Rbac);\n  EXPECT_EQ(\"bar\", filter_meta.fields().at(\"shadow_effective_policy_id\").string_value());\n  EXPECT_EQ(\"allowed\", filter_meta.fields().at(\"shadow_engine_result\").string_value());\n}\n\n// Log Tests\nTEST_F(RoleBasedAccessControlNetworkFilterTest, ShouldLog) {\n  config_ = setupConfig(true, false, envoy::config::rbac::v3::RBAC::LOG);\n  filter_ = std::make_unique<RoleBasedAccessControlFilter>(config_);\n  filter_->initializeReadFilterCallbacks(callbacks_);\n\n  setDestinationPort(123);\n  setMetadata();\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_denied_.value());\n\n  checkAccessLogMetadata(true);\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterTest, ShouldNotLog) {\n  config_ = setupConfig(true, false, envoy::config::rbac::v3::RBAC::LOG);\n  filter_ = std::make_unique<RoleBasedAccessControlFilter>(config_);\n  filter_->initializeReadFilterCallbacks(callbacks_);\n\n  setDestinationPort(456);\n  setMetadata();\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n  EXPECT_EQ(1U, config_->stats().allowed_.value());\n  EXPECT_EQ(0U, config_->stats().shadow_denied_.value());\n\n  checkAccessLogMetadata(false);\n}\n\nTEST_F(RoleBasedAccessControlNetworkFilterTest, AllowNoChangeLog) {\n  setDestinationPort(123);\n  setMetadata();\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false));\n\n  // Check that Allow action does not set access log metadata\n  EXPECT_EQ(stream_info_.dynamicMetadata().filter_metadata().end(),\n            stream_info_.dynamicMetadata().filter_metadata().find(\n                Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace));\n}\n\n} // namespace RBACFilter\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rbac/integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.h\"\n#include \"envoy/extensions/filters/network/rbac/v3/rbac.pb.validate.h\"\n\n#include \"extensions/filters/network/rbac/config.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"fmt/printf.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RBAC {\nnamespace {\n\nstd::string rbac_config;\n\n} // namespace\n\nclass RoleBasedAccessControlNetworkFilterIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public BaseIntegrationTest {\npublic:\n  RoleBasedAccessControlNetworkFilterIntegrationTest()\n      : BaseIntegrationTest(GetParam(), rbac_config) {}\n\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    rbac_config = absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n       -  name: rbac\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC\n            stat_prefix: tcp.\n            rules:\n              policies:\n                \"foo\":\n                  permissions:\n                    - any: true\n                  principals:\n                    - not_id:\n                        any: true\n       -  name: envoy.filters.network.echo\n)EOF\");\n  }\n\n  void initializeFilter(const std::string& config) {\n    config_helper_.addConfigModifier([config](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      envoy::config::listener::v3::Filter filter;\n      TestUtility::loadFromYaml(config, filter);\n      ASSERT_GT(bootstrap.mutable_static_resources()->listeners_size(), 0);\n      auto l = bootstrap.mutable_static_resources()->mutable_listeners(0);\n      ASSERT_GT(l->filter_chains_size(), 0);\n      ASSERT_GT(l->filter_chains(0).filters_size(), 0);\n      l->mutable_filter_chains(0)->mutable_filters(0)->Swap(&filter);\n    });\n\n    BaseIntegrationTest::initialize();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RoleBasedAccessControlNetworkFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(RoleBasedAccessControlNetworkFilterIntegrationTest, Allowed) {\n  initializeFilter(R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC\n  stat_prefix: tcp.\n  rules:\n    policies:\n      \"allow_all\":\n        permissions:\n          - any: true\n        principals:\n          - any: true\n  shadow_rules:\n    policies:\n      \"deny_all\":\n        permissions:\n          - any: true\n        principals:\n          - not_id:\n              any: true\n)EOF\");\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  ASSERT_TRUE(tcp_client->connected());\n  tcp_client->close();\n\n  test_server_->waitForCounterGe(\"tcp.rbac.allowed\", 1);\n  EXPECT_EQ(0U, test_server_->counter(\"tcp.rbac.denied\")->value());\n  EXPECT_EQ(0U, test_server_->counter(\"tcp.rbac.shadow_allowed\")->value());\n  test_server_->waitForCounterGe(\"tcp.rbac.shadow_denied\", 1);\n}\n\nTEST_P(RoleBasedAccessControlNetworkFilterIntegrationTest, Denied) {\n  initializeFilter(R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC\n  stat_prefix: tcp.\n  rules:\n    policies:\n      \"deny_all\":\n        permissions:\n          - any: true\n        principals:\n          - not_id:\n              any: true\n  shadow_rules:\n    policies:\n      \"allow_all\":\n        permissions:\n          - any: true\n        principals:\n          - any: true\n)EOF\");\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\", false, false));\n  tcp_client->waitForDisconnect();\n\n  EXPECT_EQ(0U, test_server_->counter(\"tcp.rbac.allowed\")->value());\n  EXPECT_EQ(1U, test_server_->counter(\"tcp.rbac.denied\")->value());\n  EXPECT_EQ(1U, test_server_->counter(\"tcp.rbac.shadow_allowed\")->value());\n  EXPECT_EQ(0U, test_server_->counter(\"tcp.rbac.shadow_denied\")->value());\n}\n\nTEST_P(RoleBasedAccessControlNetworkFilterIntegrationTest, DeniedWithDenyAction) {\n  useListenerAccessLog(\"%CONNECTION_TERMINATION_DETAILS%\");\n  initializeFilter(R\"EOF(\nname: rbac\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC\n  stat_prefix: tcp.\n  rules:\n    action: DENY\n    policies:\n      \"deny all\":\n        permissions:\n          - any: true\n        principals:\n          - any: true\n)EOF\");\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\", false, false));\n  tcp_client->waitForDisconnect();\n\n  EXPECT_EQ(0U, test_server_->counter(\"tcp.rbac.allowed\")->value());\n  EXPECT_EQ(1U, test_server_->counter(\"tcp.rbac.denied\")->value());\n  // Note the whitespace in the policy id is replaced by '_'.\n  EXPECT_THAT(waitForAccessLog(listener_access_log_name_),\n              testing::HasSubstr(\"rbac_access_denied_matched_policy[deny_all]\"));\n}\n\n} // namespace RBAC\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_benchmark_test\",\n    \"envoy_extension_cc_benchmark_binary\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"command_splitter_impl_test\",\n    srcs = [\"command_splitter_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    # This test takes a while to run specially under tsan.\n    # Shard it to avoid test timeout.\n    shard_count = 2,\n    deps = [\n        \":redis_mocks\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/network/common/redis:fault_lib\",\n        \"//source/extensions/filters/network/redis_proxy:command_splitter_lib\",\n        \"//source/extensions/filters/network/redis_proxy:router_interface\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"conn_pool_impl_test\",\n    srcs = [\"conn_pool_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    deps = [\n        \":redis_mocks\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/filters/network/common/redis:utility_lib\",\n        \"//source/extensions/filters/network/redis_proxy:conn_pool_lib\",\n        \"//test/extensions/clusters/redis:redis_cluster_mocks\",\n        \"//test/extensions/common/redis:mocks_lib\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/extensions/filters/network/common/redis:test_utils_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:cluster_mocks\",\n        \"//test/mocks/upstream:cluster_update_callbacks_handle_mocks\",\n        \"//test/mocks/upstream:cluster_update_callbacks_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n        \"@envoy_api//envoy/config/cluster/redis:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"proxy_filter_test\",\n    srcs = [\"proxy_filter_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    deps = [\n        \":redis_mocks\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/extensions/filters/network/redis_proxy:proxy_filter_lib\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"redis_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/extensions/common/redis:cluster_refresh_manager_interface\",\n        \"//source/extensions/filters/network/common/redis:client_interface\",\n        \"//source/extensions/filters/network/common/redis:codec_lib\",\n        \"//source/extensions/filters/network/common/redis:fault_interface\",\n        \"//source/extensions/filters/network/redis_proxy:command_splitter_interface\",\n        \"//source/extensions/filters/network/redis_proxy:conn_pool_interface\",\n        \"//source/extensions/filters/network/redis_proxy:router_interface\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_benchmark_binary(\n    name = \"command_lookup_speed_test\",\n    srcs = [\"command_lookup_speed_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \":redis_mocks\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/network/redis_proxy:command_splitter_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_extension_benchmark_test(\n    name = \"command_lookup_speed_test_benchmark_test\",\n    benchmark_binary = \"command_lookup_speed_test\",\n    extension_name = \"envoy.filters.network.redis_proxy\",\n)\n\nenvoy_extension_cc_test(\n    name = \"router_impl_test\",\n    srcs = [\"router_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    deps = [\n        \":redis_mocks\",\n        \"//source/extensions/filters/network/redis_proxy:router_lib\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"redis_proxy_integration_test\",\n    srcs = [\"redis_proxy_integration_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/common/redis:fault_lib\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//test/integration:integration_lib\",\n    ],\n)\n\nenvoy_extension_cc_benchmark_binary(\n    name = \"command_split_speed_test\",\n    srcs = [\"command_split_speed_test.cc\"],\n    extension_name = \"envoy.filters.network.redis_proxy\",\n    external_deps = [\n        \"benchmark\",\n    ],\n    deps = [\n        \":redis_mocks\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/network/redis_proxy:command_splitter_lib\",\n        \"//source/extensions/filters/network/redis_proxy:router_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_extension_benchmark_test(\n    name = \"command_split_speed_test_benchmark_test\",\n    benchmark_binary = \"command_split_speed_test\",\n    extension_name = \"envoy.filters.network.redis_proxy\",\n)\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n\n#include <chrono>\n#include <string>\n#include <vector>\n\n#include \"common/common/fmt.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/supported_commands.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter_impl.h\"\n\n#include \"test/extensions/filters/network/redis_proxy/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"benchmark/benchmark.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nclass NoOpSplitCallbacks : public CommandSplitter::SplitCallbacks {\npublic:\n  NoOpSplitCallbacks() = default;\n  ~NoOpSplitCallbacks() override = default;\n\n  bool connectionAllowed() override { return true; }\n  void onAuth(const std::string&) override {}\n  void onAuth(const std::string&, const std::string&) override {}\n  void onResponse(Common::Redis::RespValuePtr&&) override {}\n};\n\nclass NullRouterImpl : public Router {\n  RouteSharedPtr upstreamPool(std::string&) override { return nullptr; }\n};\n\nclass CommandLookUpSpeedTest {\npublic:\n  void makeBulkStringArray(Common::Redis::RespValue& value,\n                           const std::vector<std::string>& strings) {\n    std::vector<Common::Redis::RespValue> values(strings.size());\n    for (uint64_t i = 0; i < strings.size(); i++) {\n      values[i].type(Common::Redis::RespType::BulkString);\n      values[i].asString() = strings[i];\n    }\n\n    value.type(Common::Redis::RespType::Array);\n    value.asArray().swap(values);\n  }\n\n  void makeRequests() {\n    for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) {\n      Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n      makeBulkStringArray(*request, {command, \"hello\"});\n      splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n    }\n\n    for (const std::string& command : Common::Redis::SupportedCommands::evalCommands()) {\n      Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n      makeBulkStringArray(*request, {command, \"hello\"});\n      splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n    }\n  }\n\n  Router* router_{new NullRouterImpl()};\n  Stats::IsolatedStoreImpl store_;\n  Event::SimulatedTimeSystem time_system_;\n  NiceMock<MockFaultManager> fault_manager_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  CommandSplitter::InstanceImpl splitter_{\n      RouterPtr{router_}, store_, \"redis.foo.\",\n      time_system_,       false,  std::make_unique<NiceMock<MockFaultManager>>(fault_manager_)};\n  NoOpSplitCallbacks callbacks_;\n  CommandSplitter::SplitRequestPtr handle_;\n};\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n\nstatic void BM_MakeRequests(benchmark::State& state) {\n  Envoy::Extensions::NetworkFilters::RedisProxy::CommandLookUpSpeedTest context;\n\n  for (auto _ : state) {\n    context.makeRequests();\n  }\n}\nBENCHMARK(BM_MakeRequests);\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/command_split_speed_test.cc",
    "content": "// Note: this should be run with --compilation_mode=opt, and would benefit from a\n// quiescent system with disabled cstate power management.\n\n#include <chrono>\n#include <string>\n#include <vector>\n\n#include \"common/common/fmt.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/network/common/redis/client_impl.h\"\n#include \"extensions/filters/network/common/redis/supported_commands.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter_impl.h\"\n#include \"extensions/filters/network/redis_proxy/router_impl.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"absl/types/variant.h\"\n#include \"benchmark/benchmark.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nclass CommandSplitSpeedTest {\npublic:\n  Common::Redis::RespValueSharedPtr\n  makeSharedBulkStringArray(uint64_t batch_size, uint64_t key_size, uint64_t value_size) {\n    Common::Redis::RespValueSharedPtr request{new Common::Redis::RespValue()};\n    std::vector<Common::Redis::RespValue> values(batch_size * 2 + 1);\n    values[0].type(Common::Redis::RespType::BulkString);\n    values[0].asString() = \"mset\";\n    for (uint64_t i = 1; i < batch_size * 2 + 1; i += 2) {\n      values[i].type(Common::Redis::RespType::BulkString);\n      values[i].asString() = std::string(key_size, 'k');\n      values[i + 1].type(Common::Redis::RespType::BulkString);\n      values[i + 1].asString() = std::string(value_size, 'v');\n    }\n\n    request->type(Common::Redis::RespType::Array);\n    request->asArray().swap(values);\n\n    return request;\n  }\n  using ValueOrPointer =\n      absl::variant<const Common::Redis::RespValue, Common::Redis::RespValueConstSharedPtr>;\n\n  void createShared(Common::Redis::RespValueSharedPtr request) {\n    for (uint64_t i = 1; i < request->asArray().size(); i += 2) {\n      auto single_set = std::make_shared<const Common::Redis::RespValue>(\n          request, Common::Redis::Utility::SetRequest::instance(), i, i + 1);\n    }\n  }\n\n  void createVariant(Common::Redis::RespValueSharedPtr request) {\n    for (uint64_t i = 1; i < request->asArray().size(); i += 2) {\n      Common::Redis::RespValue single_set(request, Common::Redis::Utility::SetRequest::instance(),\n                                          i, i + 1);\n      ValueOrPointer variant(single_set);\n    }\n  }\n\n  void createLocalCompositeArray(Common::Redis::RespValueSharedPtr& request) {\n    for (uint64_t i = 1; i < request->asArray().size(); i += 2) {\n      Common::Redis::RespValue single_set(request, Common::Redis::Utility::SetRequest::instance(),\n                                          i, i + 1);\n    }\n  }\n\n  void copy(Common::Redis::RespValueSharedPtr& request) {\n    std::vector<Common::Redis::RespValue> values(3);\n    values[0].type(Common::Redis::RespType::BulkString);\n    values[0].asString() = \"set\";\n    values[1].type(Common::Redis::RespType::BulkString);\n    values[2].type(Common::Redis::RespType::BulkString);\n    Common::Redis::RespValue single_mset;\n    single_mset.type(Common::Redis::RespType::Array);\n    single_mset.asArray().swap(values);\n\n    for (uint64_t i = 1; i < request->asArray().size(); i += 2) {\n      single_mset.asArray()[1].asString() = request->asArray()[i].asString();\n      single_mset.asArray()[2].asString() = request->asArray()[i + 1].asString();\n    }\n  }\n};\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n\nstatic void BM_Split_CompositeArray(benchmark::State& state) {\n  Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitSpeedTest context;\n  Envoy::Extensions::NetworkFilters::Common::Redis::RespValueSharedPtr request =\n      context.makeSharedBulkStringArray(state.range(0), 36, state.range(1));\n  for (auto _ : state) {\n    context.createLocalCompositeArray(request);\n  }\n}\nBENCHMARK(BM_Split_CompositeArray)->Ranges({{1, 100}, {64, 8 << 14}});\n\nstatic void BM_Split_Copy(benchmark::State& state) {\n  Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitSpeedTest context;\n  Envoy::Extensions::NetworkFilters::Common::Redis::RespValueSharedPtr request =\n      context.makeSharedBulkStringArray(state.range(0), 36, state.range(1));\n  for (auto _ : state) {\n    context.copy(request);\n  }\n}\nBENCHMARK(BM_Split_Copy)->Ranges({{1, 100}, {64, 8 << 14}});\n\nstatic void BM_Split_CreateShared(benchmark::State& state) {\n  Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitSpeedTest context;\n  Envoy::Extensions::NetworkFilters::Common::Redis::RespValueSharedPtr request =\n      context.makeSharedBulkStringArray(state.range(0), 36, state.range(1));\n  for (auto _ : state) {\n    context.createShared(request);\n  }\n  state.counters[\"use_count\"] = request.use_count();\n}\nBENCHMARK(BM_Split_CreateShared)->Ranges({{1, 100}, {64, 8 << 14}});\n\nstatic void BM_Split_CreateVariant(benchmark::State& state) {\n  Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitSpeedTest context;\n  Envoy::Extensions::NetworkFilters::Common::Redis::RespValueSharedPtr request =\n      context.makeSharedBulkStringArray(state.range(0), 36, state.range(1));\n  for (auto _ : state) {\n    context.createVariant(request);\n  }\n  state.counters[\"use_count\"] = request.use_count();\n}\nBENCHMARK(BM_Split_CreateVariant)->Ranges({{1, 100}, {64, 8 << 14}});\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc",
    "content": "#include <cstdint>\n#include <list>\n#include <string>\n#include <vector>\n\n#include \"common/common/fmt.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/filters/network/common/redis/fault_impl.h\"\n#include \"extensions/filters/network/common/redis/supported_commands.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter_impl.h\"\n\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/redis_proxy/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Property;\nusing testing::Return;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace CommandSplitter {\n\nclass RedisCommandSplitterImplTest : public testing::Test {\npublic:\n  RedisCommandSplitterImplTest() : RedisCommandSplitterImplTest(false) {}\n  RedisCommandSplitterImplTest(bool latency_in_macro)\n      : RedisCommandSplitterImplTest(latency_in_macro, nullptr) {}\n  RedisCommandSplitterImplTest(bool latency_in_macro, Common::Redis::FaultSharedPtr fault_ptr)\n      : latency_in_micros_(latency_in_macro) {\n    ON_CALL(*getFaultManager(), getFaultForCommand(_)).WillByDefault(Return(fault_ptr.get()));\n  }\n  void makeBulkStringArray(Common::Redis::RespValue& value,\n                           const std::vector<std::string>& strings) {\n    std::vector<Common::Redis::RespValue> values(strings.size());\n    for (uint64_t i = 0; i < strings.size(); i++) {\n      values[i].type(Common::Redis::RespType::BulkString);\n      values[i].asString() = strings[i];\n    }\n\n    value.type(Common::Redis::RespType::Array);\n    value.asArray().swap(values);\n  }\n\n  void setupMirrorPolicy() {\n    auto mirror_policy = std::make_shared<NiceMock<MockMirrorPolicy>>(mirror_conn_pool_shared_ptr_);\n    route_->policies_.push_back(mirror_policy);\n  }\n\n  MockFaultManager* getFaultManager() {\n    auto fault_manager_ptr = splitter_.fault_manager_.get();\n    return static_cast<MockFaultManager*>(fault_manager_ptr);\n  }\n\n  const bool latency_in_micros_;\n  ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()};\n  ConnPool::MockInstance* mirror_conn_pool_{new ConnPool::MockInstance()};\n  ConnPool::InstanceSharedPtr mirror_conn_pool_shared_ptr_{mirror_conn_pool_};\n  std::shared_ptr<NiceMock<MockRoute>> route_{\n      new NiceMock<MockRoute>(ConnPool::InstanceSharedPtr{conn_pool_})};\n  NiceMock<Stats::MockIsolatedStatsStore> store_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<MockFaultManager> fault_manager_;\n\n  Event::SimulatedTimeSystem time_system_;\n  InstanceImpl splitter_{std::make_unique<NiceMock<MockRouter>>(route_),\n                         store_,\n                         \"redis.foo.\",\n                         time_system_,\n                         latency_in_micros_,\n                         std::make_unique<NiceMock<MockFaultManager>>(fault_manager_)};\n  MockSplitCallbacks callbacks_;\n  SplitRequestPtr handle_;\n};\n\nTEST_F(RedisCommandSplitterImplTest, AuthWithNoPassword) {\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = Response::get().InvalidRequest;\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"auth\"});\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.splitter.invalid_request\").value());\n}\n\nTEST_F(RedisCommandSplitterImplTest, CommandWhenAuthStillNeeded) {\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = \"NOAUTH Authentication required.\";\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(false));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"get\", \"foo\"});\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n}\n\nTEST_F(RedisCommandSplitterImplTest, InvalidRequestNotArray) {\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = Response::get().InvalidRequest;\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.splitter.invalid_request\").value());\n}\n\nTEST_F(RedisCommandSplitterImplTest, InvalidRequestEmptyArray) {\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = Response::get().InvalidRequest;\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  request->type(Common::Redis::RespType::Array);\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.splitter.invalid_request\").value());\n}\n\nTEST_F(RedisCommandSplitterImplTest, InvalidRequestArrayTooSmall) {\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = Response::get().InvalidRequest;\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"incr\"});\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.splitter.invalid_request\").value());\n}\n\nTEST_F(RedisCommandSplitterImplTest, InvalidRequestArrayNotStrings) {\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = Response::get().InvalidRequest;\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"incr\", \"\"});\n  request->asArray()[1].type(Common::Redis::RespType::Null);\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.splitter.invalid_request\").value());\n}\n\nTEST_F(RedisCommandSplitterImplTest, UnsupportedCommand) {\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = \"unsupported command 'newcommand'\";\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"newcommand\", \"hello\"});\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.splitter.unsupported_command\").value());\n}\n\nMATCHER_P(RespVariantEq, rhs, \"RespVariant should be equal\") {\n  const ConnPool::RespVariant& obj = arg;\n  EXPECT_EQ(obj.index(), 1);\n  EXPECT_EQ(*(absl::get<Common::Redis::RespValueConstSharedPtr>(obj)), rhs);\n  return true;\n}\n\nclass RedisSingleServerRequestTest : public RedisCommandSplitterImplTest,\n                                     public testing::WithParamInterface<std::string> {\npublic:\n  RedisSingleServerRequestTest() : RedisSingleServerRequestTest(false) {}\n  RedisSingleServerRequestTest(bool latency_in_micros)\n      : RedisCommandSplitterImplTest(latency_in_micros) {}\n  void makeRequest(const std::string& hash_key, Common::Redis::RespValuePtr&& request,\n                   bool mirrored = false) {\n    EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n    EXPECT_CALL(*conn_pool_, makeRequest_(hash_key, RespVariantEq(*request), _))\n        .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));\n    if (mirrored) {\n      EXPECT_CALL(*mirror_conn_pool_, makeRequest_(hash_key, RespVariantEq(*request), _))\n          .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&mirror_pool_callbacks_)),\n                          Return(&mirror_pool_request_)));\n    }\n    handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n  }\n\n  void fail() {\n    Common::Redis::RespValue response;\n    response.type(Common::Redis::RespType::Error);\n    response.asString() = Response::get().UpstreamFailure;\n    EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n    pool_callbacks_->onFailure();\n  }\n\n  void respond(bool mirrored = false) {\n    Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n    Common::Redis::RespValue* response1_ptr = response1.get();\n    if (mirrored) {\n      // expect no-opt for mirrored requests\n      mirror_pool_callbacks_->onResponse(std::move(response1));\n    } else {\n      EXPECT_CALL(callbacks_, onResponse_(PointeesEq(response1_ptr)));\n      pool_callbacks_->onResponse(std::move(response1));\n    }\n  }\n\n  ConnPool::PoolCallbacks* pool_callbacks_;\n  Common::Redis::Client::MockPoolRequest pool_request_;\n\n  ConnPool::PoolCallbacks* mirror_pool_callbacks_;\n  Common::Redis::Client::MockPoolRequest mirror_pool_request_;\n};\n\nTEST_P(RedisSingleServerRequestTest, Success) {\n  InSequence s;\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n  makeRequest(\"hello\", std::move(request));\n  EXPECT_NE(nullptr, handle_);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          10));\n  respond();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n};\n\nTEST_P(RedisSingleServerRequestTest, Mirrored) {\n  InSequence s;\n\n  setupMirrorPolicy();\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n  makeRequest(\"hello\", std::move(request), true);\n  EXPECT_NE(nullptr, handle_);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          10));\n  respond();\n  respond(true);\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n};\n\nTEST_P(RedisSingleServerRequestTest, MirroredFailed) {\n  InSequence s;\n\n  setupMirrorPolicy();\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n  makeRequest(\"hello\", std::move(request), true);\n  EXPECT_NE(nullptr, handle_);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          10));\n  // Mirrored request failure should not result in main path failure\n  mirror_pool_callbacks_->onFailure();\n  respond();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n};\n\nTEST_P(RedisSingleServerRequestTest, SuccessMultipleArgs) {\n  InSequence s;\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\", \"123\", \"world\"});\n  makeRequest(\"hello\", std::move(request));\n  EXPECT_NE(nullptr, handle_);\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          10));\n  respond();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n};\n\nTEST_P(RedisSingleServerRequestTest, Fail) {\n  InSequence s;\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n  makeRequest(\"hello\", std::move(request));\n  EXPECT_NE(nullptr, handle_);\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(5));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          5));\n  fail();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.error\", lower_command)).value());\n};\n\nTEST_P(RedisSingleServerRequestTest, Cancel) {\n  InSequence s;\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n  makeRequest(\"hello\", std::move(request));\n  EXPECT_NE(nullptr, handle_);\n\n  EXPECT_CALL(pool_request_, cancel());\n  handle_->cancel();\n};\n\nTEST_P(RedisSingleServerRequestTest, NoUpstream) {\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n  EXPECT_CALL(*conn_pool_, makeRequest_(\"hello\", RespVariantEq(*request), _))\n      .WillOnce(Return(nullptr));\n\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = Response::get().NoUpstreamHost;\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n  EXPECT_EQ(nullptr, handle_);\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + lower_command + \".total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + lower_command + \".error\").value());\n};\n\nINSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestTest, RedisSingleServerRequestTest,\n                         testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands()));\n\nINSTANTIATE_TEST_SUITE_P(RedisSimpleRequestCommandHandlerMixedCaseTests,\n                         RedisSingleServerRequestTest, testing::Values(\"INCR\", \"inCrBY\"));\n\nTEST_F(RedisSingleServerRequestTest, PingSuccess) {\n  InSequence s;\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"ping\"});\n\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::SimpleString);\n  response.asString() = \"PONG\";\n\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n  EXPECT_EQ(nullptr, handle_);\n};\n\nTEST_F(RedisSingleServerRequestTest, EvalSuccess) {\n  InSequence s;\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"eval\", \"return {ARGV[1]}\", \"1\", \"key\", \"arg\"});\n  makeRequest(\"key\", std::move(request));\n  EXPECT_NE(nullptr, handle_);\n\n  std::string lower_command = absl::AsciiStrToLower(\"eval\");\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          10));\n  respond();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n};\n\nTEST_F(RedisSingleServerRequestTest, EvalShaSuccess) {\n  InSequence s;\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"EVALSHA\", \"return {ARGV[1]}\", \"1\", \"keykey\", \"arg\"});\n  makeRequest(\"keykey\", std::move(request));\n  EXPECT_NE(nullptr, handle_);\n\n  std::string lower_command = absl::AsciiStrToLower(\"evalsha\");\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          10));\n  respond();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n};\n\nTEST_F(RedisSingleServerRequestTest, EvalWrongNumberOfArgs) {\n  InSequence s;\n\n  Common::Redis::RespValuePtr request1{new Common::Redis::RespValue()};\n  Common::Redis::RespValuePtr request2{new Common::Redis::RespValue()};\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n\n  response.asString() = \"wrong number of arguments for 'eval' command\";\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  makeBulkStringArray(*request1, {\"eval\", \"return {ARGV[1]}\"});\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request1), callbacks_, dispatcher_));\n\n  response.asString() = \"wrong number of arguments for 'evalsha' command\";\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  makeBulkStringArray(*request2, {\"evalsha\", \"return {ARGV[1]}\", \"1\"});\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request2), callbacks_, dispatcher_));\n};\n\nTEST_F(RedisSingleServerRequestTest, EvalNoUpstream) {\n  InSequence s;\n\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"eval\", \"return {ARGV[1]}\", \"1\", \"key\", \"arg\"});\n  EXPECT_CALL(*conn_pool_, makeRequest_(\"key\", RespVariantEq(*request), _))\n      .WillOnce(Return(nullptr));\n\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = Response::get().NoUpstreamHost;\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n  EXPECT_EQ(nullptr, handle_);\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.eval.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.eval.error\").value());\n};\n\nMATCHER_P(CompositeArrayEq, rhs, \"CompositeArray should be equal\") {\n  const ConnPool::RespVariant& obj = arg;\n  const auto& lhs = absl::get<const Common::Redis::RespValue>(obj);\n  EXPECT_TRUE(lhs.type() == Common::Redis::RespType::CompositeArray);\n  EXPECT_EQ(lhs.asCompositeArray().size(), rhs.size());\n  std::vector<std::string> array;\n  for (auto const& entry : lhs.asCompositeArray()) {\n    array.emplace_back(entry.asString());\n  }\n  EXPECT_EQ(array, rhs);\n  return true;\n}\n\nclass FragmentedRequestCommandHandlerTest : public RedisCommandSplitterImplTest {\npublic:\n  void makeRequest(std::vector<std::string>& request_strings,\n                   const std::list<uint64_t>& null_handle_indexes, bool mirrored) {\n    uint32_t num_gets = expected_requests_.size();\n\n    Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n    makeBulkStringArray(*request, request_strings);\n\n    pool_callbacks_.resize(num_gets);\n    mirror_pool_callbacks_.resize(num_gets);\n    std::vector<Common::Redis::Client::MockPoolRequest> tmp_pool_requests(num_gets);\n    pool_requests_.swap(tmp_pool_requests);\n    std::vector<Common::Redis::Client::MockPoolRequest> tmp_mirrored_pool_requests(num_gets);\n    mirror_pool_requests_.swap(tmp_mirrored_pool_requests);\n\n    EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n\n    std::vector<Common::Redis::Client::MockPoolRequest> dummy_requests(num_gets);\n    for (uint32_t i = 0; i < num_gets; i++) {\n      Common::Redis::Client::PoolRequest* request_to_use = nullptr;\n      if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) ==\n          null_handle_indexes.end()) {\n        request_to_use = &pool_requests_[i];\n      }\n      Common::Redis::Client::PoolRequest* mirror_request_to_use = nullptr;\n      if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) ==\n          null_handle_indexes.end()) {\n        mirror_request_to_use = &dummy_requests[i];\n      }\n      EXPECT_CALL(*conn_pool_,\n                  makeRequest_(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _))\n          .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use)));\n      if (mirrored) {\n        EXPECT_CALL(*mirror_conn_pool_,\n                    makeRequest_(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _))\n            .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&mirror_pool_callbacks_[i])),\n                            Return(mirror_request_to_use)));\n      }\n    }\n\n    handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n  }\n\n  std::vector<std::vector<std::string>> expected_requests_;\n  std::vector<ConnPool::PoolCallbacks*> pool_callbacks_;\n  std::vector<Common::Redis::Client::MockPoolRequest> pool_requests_;\n  std::vector<ConnPool::PoolCallbacks*> mirror_pool_callbacks_;\n  std::vector<Common::Redis::Client::MockPoolRequest> mirror_pool_requests_;\n};\n\nclass RedisMGETCommandHandlerTest : public FragmentedRequestCommandHandlerTest {\npublic:\n  void setup(uint32_t num_gets, const std::list<uint64_t>& null_handle_indexes,\n             bool mirrored = false) {\n    expected_requests_.reserve(num_gets);\n    std::vector<std::string> request_strings = {\"mget\"};\n    for (uint32_t i = 0; i < num_gets; i++) {\n      request_strings.push_back(std::to_string(i));\n      expected_requests_.push_back({\"get\", std::to_string(i)});\n    }\n    makeRequest(request_strings, null_handle_indexes, mirrored);\n  }\n\n  Common::Redis::RespValuePtr response(const std::string& result) {\n    Common::Redis::RespValuePtr response = std::make_unique<Common::Redis::RespValue>();\n    response->type(Common::Redis::RespType::BulkString);\n    response->asString() = result;\n    return response;\n  }\n};\n\nTEST_F(RedisMGETCommandHandlerTest, Normal) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> elements(2);\n  elements[0].type(Common::Redis::RespType::BulkString);\n  elements[0].asString() = \"response\";\n  elements[1].type(Common::Redis::RespType::BulkString);\n  elements[1].asString() = \"5\";\n  expected_response.asArray().swap(elements);\n\n  pool_callbacks_[1]->onResponse(response(\"5\"));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"redis.foo.command.mget.latency\"), 10));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(response(\"response\"));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.success\").value());\n};\n\nTEST_F(RedisMGETCommandHandlerTest, Mirrored) {\n  InSequence s;\n\n  setupMirrorPolicy();\n  setup(2, {}, true);\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> elements(2);\n  elements[0].type(Common::Redis::RespType::BulkString);\n  elements[0].asString() = \"response\";\n  elements[1].type(Common::Redis::RespType::BulkString);\n  elements[1].asString() = \"5\";\n  expected_response.asArray().swap(elements);\n\n  pool_callbacks_[1]->onResponse(response(\"5\"));\n  mirror_pool_callbacks_[1]->onResponse(response(\"5\"));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"redis.foo.command.mget.latency\"), 10));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(response(\"response\"));\n  mirror_pool_callbacks_[0]->onResponse(response(\"response\"));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.success\").value());\n};\n\nTEST_F(RedisMGETCommandHandlerTest, NormalWithNull) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> elements(2);\n  elements[0].type(Common::Redis::RespType::BulkString);\n  elements[0].asString() = \"response\";\n  expected_response.asArray().swap(elements);\n\n  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n  pool_callbacks_[1]->onResponse(std::move(response2));\n\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(response(\"response\"));\n};\n\nTEST_F(RedisMGETCommandHandlerTest, NoUpstreamHostForAll) {\n  // No InSequence to avoid making setup() more complicated.\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> elements(2);\n  elements[0].type(Common::Redis::RespType::Error);\n  elements[0].asString() = Response::get().NoUpstreamHost;\n  elements[1].type(Common::Redis::RespType::Error);\n  elements[1].asString() = Response::get().NoUpstreamHost;\n  expected_response.asArray().swap(elements);\n\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  setup(2, {0, 1});\n  EXPECT_EQ(nullptr, handle_);\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.error\").value());\n};\n\nTEST_F(RedisMGETCommandHandlerTest, NoUpstreamHostForOne) {\n  InSequence s;\n\n  setup(2, {0});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> elements(2);\n  elements[0].type(Common::Redis::RespType::Error);\n  elements[0].asString() = Response::get().NoUpstreamHost;\n  elements[1].type(Common::Redis::RespType::Error);\n  elements[1].asString() = Response::get().UpstreamFailure;\n  expected_response.asArray().swap(elements);\n\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[1]->onFailure();\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.error\").value());\n};\n\nTEST_F(RedisMGETCommandHandlerTest, Failure) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> elements(2);\n  elements[0].type(Common::Redis::RespType::BulkString);\n  elements[0].asString() = \"response\";\n  elements[1].type(Common::Redis::RespType::Error);\n  elements[1].asString() = Response::get().UpstreamFailure;\n  expected_response.asArray().swap(elements);\n\n  pool_callbacks_[1]->onFailure();\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(5));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"redis.foo.command.mget.latency\"), 5));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(response(\"response\"));\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.error\").value());\n};\n\nTEST_F(RedisMGETCommandHandlerTest, InvalidUpstreamResponse) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Array);\n  std::vector<Common::Redis::RespValue> elements(2);\n  elements[0].type(Common::Redis::RespType::Error);\n  elements[0].asString() = Response::get().UpstreamProtocolError;\n  elements[1].type(Common::Redis::RespType::Error);\n  elements[1].asString() = Response::get().UpstreamFailure;\n  expected_response.asArray().swap(elements);\n\n  pool_callbacks_[1]->onFailure();\n\n  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n  response1->type(Common::Redis::RespType::Integer);\n  response1->asInteger() = 5;\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"redis.foo.command.mget.latency\"), 10));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(std::move(response1));\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mget.error\").value());\n};\n\nTEST_F(RedisMGETCommandHandlerTest, Cancel) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  EXPECT_CALL(pool_requests_[0], cancel());\n  EXPECT_CALL(pool_requests_[1], cancel());\n  handle_->cancel();\n};\n\nclass RedisMSETCommandHandlerTest : public FragmentedRequestCommandHandlerTest {\npublic:\n  void setup(uint32_t num_sets, const std::list<uint64_t>& null_handle_indexes,\n             bool mirrored = false) {\n\n    expected_requests_.reserve(num_sets);\n    std::vector<std::string> request_strings = {\"mset\"};\n    for (uint32_t i = 0; i < num_sets; i++) {\n      // key\n      request_strings.push_back(std::to_string(i));\n      // value\n      request_strings.push_back(std::to_string(i));\n\n      expected_requests_.push_back({\"set\", std::to_string(i), std::to_string(i)});\n    }\n    makeRequest(request_strings, null_handle_indexes, mirrored);\n  }\n\n  Common::Redis::RespValuePtr okResponse() {\n    Common::Redis::RespValuePtr response = std::make_unique<Common::Redis::RespValue>();\n    response->type(Common::Redis::RespType::SimpleString);\n    response->asString() = Response::get().OK;\n    return response;\n  }\n};\n\nTEST_F(RedisMSETCommandHandlerTest, Normal) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::SimpleString);\n  expected_response.asString() = Response::get().OK;\n\n  pool_callbacks_[1]->onResponse(okResponse());\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"redis.foo.command.mset.latency\"), 10));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(okResponse());\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.success\").value());\n};\n\nTEST_F(RedisMSETCommandHandlerTest, Mirrored) {\n  InSequence s;\n\n  setupMirrorPolicy();\n  setup(2, {}, true);\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::SimpleString);\n  expected_response.asString() = Response::get().OK;\n\n  pool_callbacks_[1]->onResponse(okResponse());\n  mirror_pool_callbacks_[1]->onResponse(okResponse());\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name, \"redis.foo.command.mset.latency\"), 10));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(okResponse());\n  mirror_pool_callbacks_[0]->onResponse(okResponse());\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.success\").value());\n};\n\nTEST_F(RedisMSETCommandHandlerTest, NoUpstreamHostForAll) {\n  // No InSequence to avoid making setup() more complicated.\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Error);\n  expected_response.asString() = \"finished with 2 error(s)\";\n\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  setup(2, {0, 1});\n  EXPECT_EQ(nullptr, handle_);\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.error\").value());\n};\n\nTEST_F(RedisMSETCommandHandlerTest, NoUpstreamHostForOne) {\n  InSequence s;\n\n  setup(2, {0});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Error);\n  expected_response.asString() = \"finished with 1 error(s)\";\n\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[1]->onResponse(okResponse());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.error\").value());\n};\n\nTEST_F(RedisMSETCommandHandlerTest, Cancel) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  EXPECT_CALL(pool_requests_[0], cancel());\n  EXPECT_CALL(pool_requests_[1], cancel());\n  handle_->cancel();\n};\n\nTEST_F(RedisMSETCommandHandlerTest, WrongNumberOfArgs) {\n  InSequence s;\n\n  Common::Redis::RespValue response;\n  response.type(Common::Redis::RespType::Error);\n  response.asString() = \"wrong number of arguments for 'mset' command\";\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response)));\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {\"mset\", \"foo\", \"bar\", \"fizz\"});\n  EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_, dispatcher_));\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.mset.error\").value());\n};\n\nclass RedisSplitKeysSumResultHandlerTest : public FragmentedRequestCommandHandlerTest,\n                                           public testing::WithParamInterface<std::string> {\npublic:\n  void setup(uint32_t num_commands, const std::list<uint64_t>& null_handle_indexes,\n             bool mirrored = false) {\n\n    expected_requests_.reserve(num_commands);\n    std::vector<std::string> request_strings = {GetParam()};\n    for (uint32_t i = 0; i < num_commands; i++) {\n      request_strings.push_back(std::to_string(i));\n      expected_requests_.push_back({GetParam(), std::to_string(i)});\n    }\n    makeRequest(request_strings, null_handle_indexes, mirrored);\n  }\n\n  Common::Redis::RespValuePtr response(int64_t value) {\n    Common::Redis::RespValuePtr response = std::make_unique<Common::Redis::RespValue>();\n    response->type(Common::Redis::RespType::Integer);\n    response->asInteger() = value;\n    return response;\n  }\n};\n\nTEST_P(RedisSplitKeysSumResultHandlerTest, Normal) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Integer);\n  expected_response.asInteger() = 2;\n\n  pool_callbacks_[1]->onResponse(response(1));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(\n      store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"redis.foo.command.\" + GetParam() + \".latency\"), 10));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(response(1));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".success\").value());\n};\n\nTEST_P(RedisSplitKeysSumResultHandlerTest, Mirrored) {\n  InSequence s;\n\n  setupMirrorPolicy();\n  setup(2, {}, true);\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Integer);\n  expected_response.asInteger() = 2;\n\n  pool_callbacks_[1]->onResponse(response(1));\n  mirror_pool_callbacks_[1]->onResponse(response(1));\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(\n      store_,\n      deliverHistogramToSinks(\n          Property(&Stats::Metric::name, \"redis.foo.command.\" + GetParam() + \".latency\"), 10));\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(response(1));\n  mirror_pool_callbacks_[0]->onResponse(response(1));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".success\").value());\n};\n\nTEST_P(RedisSplitKeysSumResultHandlerTest, NormalOneZero) {\n  InSequence s;\n\n  setup(2, {});\n  EXPECT_NE(nullptr, handle_);\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Integer);\n  expected_response.asInteger() = 1;\n\n  pool_callbacks_[1]->onResponse(response(0));\n\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  pool_callbacks_[0]->onResponse(response(1));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".success\").value());\n};\n\nTEST_P(RedisSplitKeysSumResultHandlerTest, NoUpstreamHostForAll) {\n  // No InSequence to avoid making setup() more complicated.\n\n  Common::Redis::RespValue expected_response;\n  expected_response.type(Common::Redis::RespType::Error);\n  expected_response.asString() = \"finished with 2 error(s)\";\n\n  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));\n  setup(2, {0, 1});\n  EXPECT_EQ(nullptr, handle_);\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".total\").value());\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.command.\" + GetParam() + \".error\").value());\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    RedisSplitKeysSumResultHandlerTest, RedisSplitKeysSumResultHandlerTest,\n    testing::ValuesIn(Common::Redis::SupportedCommands::hashMultipleSumResultCommands()));\n\nclass RedisSingleServerRequestWithLatencyMicrosTest : public RedisSingleServerRequestTest {\npublic:\n  RedisSingleServerRequestWithLatencyMicrosTest() : RedisSingleServerRequestTest(true) {}\n};\n\nTEST_P(RedisSingleServerRequestWithLatencyMicrosTest, Success) {\n  InSequence s;\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n  makeRequest(\"hello\", std::move(request));\n  EXPECT_NE(nullptr, handle_);\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(10));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          10000));\n  respond();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n};\n\nINSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithLatencyMicrosTest,\n                         RedisSingleServerRequestWithLatencyMicrosTest,\n                         testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands()));\n\n// In subclasses of fault test, we mock the expected faults in the constructor, as the\n// fault manager is owned by the splitter, which is also generated later in construction\n// of the base test class.\nclass RedisSingleServerRequestWithFaultTest : public RedisSingleServerRequestTest {\npublic:\n  NiceMock<Event::MockTimer>* timer_;\n  Event::TimerCb timer_cb_;\n  int delay_ms_;\n  Common::Redis::FaultSharedPtr fault_ptr_;\n};\n\nclass RedisSingleServerRequestWithErrorFaultTest : public RedisSingleServerRequestWithFaultTest {\npublic:\n  RedisSingleServerRequestWithErrorFaultTest() {\n    delay_ms_ = 0;\n    fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest(\n        Common::Redis::FaultType::Error, std::chrono::milliseconds(delay_ms_));\n    ON_CALL(*getFaultManager(), getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get()));\n  }\n};\n\nTEST_P(RedisSingleServerRequestWithErrorFaultTest, Fault) {\n  InSequence s;\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(callbacks_, onResponse_(_));\n  handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n  EXPECT_EQ(nullptr, handle_);\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.error\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.error_fault\", lower_command)).value());\n};\n\nclass RedisSingleServerRequestWithErrorWithDelayFaultTest\n    : public RedisSingleServerRequestWithFaultTest {\npublic:\n  RedisSingleServerRequestWithErrorWithDelayFaultTest() {\n    delay_ms_ = 5;\n    fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest(\n        Common::Redis::FaultType::Error, std::chrono::milliseconds(delay_ms_));\n    ON_CALL(*getFaultManager(), getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get()));\n    timer_ = new NiceMock<Event::MockTimer>();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithErrorFaultTest,\n                         RedisSingleServerRequestWithErrorFaultTest,\n                         testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands()));\n\nTEST_P(RedisSingleServerRequestWithErrorWithDelayFaultTest, Fault) {\n  InSequence s;\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n\n  // As error faults have zero latency, recorded latency is equal to the delay.\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n    timer_cb_ = timer_cb;\n    return timer_;\n  }));\n\n  handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n  EXPECT_NE(nullptr, handle_);\n  time_system_.setMonotonicTime(std::chrono::milliseconds(delay_ms_));\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          delay_ms_));\n  EXPECT_CALL(callbacks_, onResponse_(_));\n  timer_cb_();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.error\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.error_fault\", lower_command)).value());\n};\n\nINSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithErrorWithDelayFaultTest,\n                         RedisSingleServerRequestWithErrorWithDelayFaultTest,\n                         testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands()));\n\nclass RedisSingleServerRequestWithDelayFaultTest : public RedisSingleServerRequestWithFaultTest {\npublic:\n  RedisSingleServerRequestWithDelayFaultTest() {\n    delay_ms_ = 15;\n    fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest(\n        Common::Redis::FaultType::Delay, std::chrono::milliseconds(delay_ms_));\n    ON_CALL(*getFaultManager(), getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get()));\n    timer_ = new NiceMock<Event::MockTimer>();\n  }\n};\n\nTEST_P(RedisSingleServerRequestWithDelayFaultTest, Fault) {\n  InSequence s;\n\n  std::string lower_command = absl::AsciiStrToLower(GetParam());\n  std::string hash_key = \"hello\";\n\n  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};\n  makeBulkStringArray(*request, {GetParam(), \"hello\"});\n\n  EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));\n  EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n    timer_cb_ = timer_cb;\n    return timer_;\n  }));\n  EXPECT_CALL(*conn_pool_, makeRequest_(hash_key, RespVariantEq(*request), _))\n      .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));\n\n  handle_ = splitter_.makeRequest(std::move(request), callbacks_, dispatcher_);\n\n  EXPECT_NE(nullptr, handle_);\n\n  EXPECT_CALL(store_, deliverHistogramToSinks(\n                          Property(&Stats::Metric::name,\n                                   fmt::format(\"redis.foo.command.{}.latency\", lower_command)),\n                          delay_ms_));\n  respond();\n\n  time_system_.setMonotonicTime(std::chrono::milliseconds(delay_ms_));\n  timer_cb_();\n\n  EXPECT_EQ(1UL, store_.counter(fmt::format(\"redis.foo.command.{}.total\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.success\", lower_command)).value());\n  EXPECT_EQ(1UL,\n            store_.counter(fmt::format(\"redis.foo.command.{}.delay_fault\", lower_command)).value());\n};\n\nINSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithDelayFaultTest,\n                         RedisSingleServerRequestWithDelayFaultTest,\n                         testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands()));\n\n} // namespace CommandSplitter\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/redis_proxy/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/test_runtime.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nTEST(RedisProxyFilterConfigFactoryTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(RedisProxyFilterConfigFactory().createFilterFactoryFromProto(\n                   envoy::extensions::filters::network::redis_proxy::v3::RedisProxy(), context),\n               ProtoValidationException);\n}\n\nTEST(RedisProxyFilterConfigFactoryTest, NoUpstreamDefined) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings settings;\n  settings.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(20));\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy config;\n  config.set_stat_prefix(\"foo\");\n  config.mutable_settings()->CopyFrom(settings);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_THROW_WITH_MESSAGE(\n      RedisProxyFilterConfigFactory().createFilterFactoryFromProto(config, context), EnvoyException,\n      \"cannot configure a redis-proxy without any upstream\");\n}\n\nTEST(RedisProxyFilterConfigFactoryTest, RedisProxyNoSettings) {\n  const std::string yaml = R\"EOF(\nprefix_routes:\n  catch_all_route:\n    cluster: fake_cluster\nstat_prefix: foo\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config;\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYamlAndValidate(yaml, proto_config),\n                          ProtoValidationException, \"value is required\");\n}\n\nTEST(RedisProxyFilterConfigFactoryTest, RedisProxyNoOpTimeout) {\n  const std::string yaml = R\"EOF(\nprefix_routes:\n  catch_all_route:\n    cluster: fake_cluster\nstat_prefix: foo\nsettings: {}\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config;\n  EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYamlAndValidate(yaml, proto_config),\n                          ProtoValidationException, \"embedded message failed validation\");\n}\n\nTEST(RedisProxyFilterConfigFactoryTest,\n     DEPRECATED_FEATURE_TEST(RedisProxyCorrectProtoLegacyCluster)) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.deprecated_features:envoy.config.filter.network.redis_proxy.v2.RedisProxy.cluster\",\n        \"true\"},\n       {\"envoy.deprecated_features:envoy.extensions.filters.network.redis_proxy.v3.RedisProxy.\"\n        \"hidden_envoy_deprecated_cluster\",\n        \"true\"}});\n\n  const std::string yaml = R\"EOF(\ncluster: fake_cluster\nstat_prefix: foo\nsettings:\n  op_timeout: 0.02s\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{};\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RedisProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  EXPECT_TRUE(factory.isTerminalFilter());\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST(RedisProxyFilterConfigFactoryTest,\n     DEPRECATED_FEATURE_TEST(RedisProxyCorrectProtoLegacyCatchAllCluster)) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.deprecated_features:envoy.config.filter.network.redis_proxy.v2.RedisProxy.\"\n        \"PrefixRoutes.catch_all_cluster\",\n        \"true\"},\n       {\"envoy.deprecated_features:envoy.extensions.filters.network.redis_proxy.v3.RedisProxy.\"\n        \"PrefixRoutes.hidden_envoy_deprecated_catch_all_cluster\",\n        \"true\"}});\n  const std::string yaml = R\"EOF(\nprefix_routes:\n  catch_all_cluster: fake_cluster\nstat_prefix: foo\nsettings:\n  op_timeout: 0.02s\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{};\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RedisProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  EXPECT_TRUE(factory.isTerminalFilter());\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectProto) {\n  const std::string yaml = R\"EOF(\nprefix_routes:\n  catch_all_route:\n    cluster: fake_cluster\nstat_prefix: foo\nsettings:\n  op_timeout: 0.02s\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{};\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RedisProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  EXPECT_TRUE(factory.isTerminalFilter());\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST(RedisProxyFilterConfigFactoryTest, RedisProxyEmptyProto) {\n  const std::string yaml = R\"EOF(\nprefix_routes:\n  catch_all_route:\n    cluster: fake_cluster\nstat_prefix: foo\nsettings:\n  op_timeout: 0.02s\n  )EOF\";\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RedisProxyFilterConfigFactory factory;\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config =\n      *dynamic_cast<envoy::extensions::filters::network::redis_proxy::v3::RedisProxy*>(\n          factory.createEmptyConfigProto().get());\n\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST(RedisProxyFilterConfigFactoryTest, RedisProxyFaultProto) {\n  const std::string yaml = R\"EOF(\nprefix_routes:\n  catch_all_route:\n    cluster: fake_cluster\nstat_prefix: foo\nfaults:\n- fault_type: ERROR\n  fault_enabled:\n    default_value:\n      numerator: 30\n      denominator: HUNDRED\n    runtime_key: \"bogus_key\"\n  commands:\n  - GET\n- fault_type: DELAY\n  fault_enabled:\n    default_value:\n      numerator: 20\n      denominator: HUNDRED\n    runtime_key: \"bogus_key\"\n  delay: 2s\nsettings:\n  op_timeout: 0.02s\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{};\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config);\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RedisProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  EXPECT_TRUE(factory.isTerminalFilter());\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(RedisProxyFilterConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.redis_proxy\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/cluster/redis/redis_cluster.pb.h\"\n#include \"envoy/config/cluster/redis/redis_cluster.pb.validate.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/filters/network/common/redis/utility.h\"\n#include \"extensions/filters/network/redis_proxy/conn_pool_impl.h\"\n\n#include \"test/extensions/clusters/redis/mocks.h\"\n#include \"test/extensions/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/common/redis/test_utils.h\"\n#include \"test/extensions/filters/network/redis_proxy/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/cluster.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks_handle.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Ref;\nusing testing::Return;\nusing testing::ReturnNew;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\nnamespace ConnPool {\n\nclass RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client::ClientFactory {\npublic:\n  void setup(bool cluster_exists = true, bool hashtagging = true,\n             uint32_t max_unknown_conns = 100) {\n    EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_))\n        .WillOnce(DoAll(SaveArgAddress(&update_callbacks_),\n                        ReturnNew<Upstream::MockClusterUpdateCallbacksHandle>()));\n    if (!cluster_exists) {\n      EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillOnce(Return(nullptr));\n    }\n\n    std::unique_ptr<NiceMock<Stats::MockStore>> store =\n        std::make_unique<NiceMock<Stats::MockStore>>();\n\n    upstream_cx_drained_.value_ = 0;\n    ON_CALL(*store, counter(Eq(\"upstream_cx_drained\")))\n        .WillByDefault(ReturnRef(upstream_cx_drained_));\n    ON_CALL(upstream_cx_drained_, value()).WillByDefault(Invoke([&]() -> uint64_t {\n      return upstream_cx_drained_.value_;\n    }));\n    ON_CALL(upstream_cx_drained_, inc()).WillByDefault(Invoke([&]() {\n      upstream_cx_drained_.value_++;\n    }));\n\n    max_upstream_unknown_connections_reached_.value_ = 0;\n    ON_CALL(*store, counter(Eq(\"max_upstream_unknown_connections_reached\")))\n        .WillByDefault(ReturnRef(max_upstream_unknown_connections_reached_));\n    ON_CALL(max_upstream_unknown_connections_reached_, value())\n        .WillByDefault(\n            Invoke([&]() -> uint64_t { return max_upstream_unknown_connections_reached_.value_; }));\n    ON_CALL(max_upstream_unknown_connections_reached_, inc()).WillByDefault(Invoke([&]() {\n      max_upstream_unknown_connections_reached_.value_++;\n    }));\n\n    cluster_refresh_manager_ =\n        std::make_shared<NiceMock<Extensions::Common::Redis::MockClusterRefreshManager>>();\n    auto redis_command_stats =\n        Common::Redis::RedisCommandStats::createRedisCommandStats(store->symbolTable());\n    std::shared_ptr<InstanceImpl> conn_pool_impl = std::make_shared<InstanceImpl>(\n        cluster_name_, cm_, *this, tls_,\n        Common::Redis::Client::createConnPoolSettings(20, hashtagging, true, max_unknown_conns,\n                                                      read_policy_),\n        api_, std::move(store), redis_command_stats, cluster_refresh_manager_);\n    conn_pool_impl->init();\n    // Set the authentication password for this connection pool.\n    conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().auth_username_ = auth_username_;\n    conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().auth_password_ = auth_password_;\n    conn_pool_ = std::move(conn_pool_impl);\n    test_address_ = Network::Utility::resolveUrl(\"tcp://127.0.0.1:3000\");\n  }\n\n  void makeSimpleRequest(bool create_client, const std::string& hash_key, uint64_t hash_value) {\n    auto expectHash = [&](const uint64_t hash) {\n      return [&, hash](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n        EXPECT_EQ(context->computeHashKey().value(), hash);\n        return cm_.thread_local_cluster_.lb_.host_;\n      };\n    };\n\n    EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n        .WillOnce(Invoke(expectHash(hash_value)));\n    if (create_client) {\n      client_ = new NiceMock<Common::Redis::Client::MockClient>();\n      EXPECT_CALL(*this, create_(_)).WillOnce(Return(client_));\n    }\n    Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n    MockPoolCallbacks callbacks;\n    std::list<Common::Redis::Client::ClientCallbacks*> client_callbacks;\n    Common::Redis::Client::MockPoolRequest active_request;\n    EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n        .WillRepeatedly(Return(test_address_));\n    EXPECT_CALL(*client_, makeRequest_(Ref(*value), _))\n        .WillOnce(Invoke(\n            [&](const Common::Redis::RespValue&, Common::Redis::Client::ClientCallbacks& callbacks)\n                -> Common::Redis::Client::PoolRequest* {\n              client_callbacks.push_back(&callbacks);\n              return &active_request;\n            }));\n    Common::Redis::Client::PoolRequest* request =\n        conn_pool_->makeRequest(hash_key, value, callbacks);\n    EXPECT_NE(nullptr, request);\n    EXPECT_NE(nullptr, client_callbacks.back());\n\n    EXPECT_CALL(active_request, cancel());\n    request->cancel();\n  }\n\n  void makeRequest(Common::Redis::Client::MockClient* client,\n                   Common::Redis::RespValueSharedPtr& value, MockPoolCallbacks& callbacks,\n                   Common::Redis::Client::MockPoolRequest& active_request,\n                   bool create_client = true) {\n    EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n        .WillOnce(\n            Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n              EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(\"hash_key\"));\n              EXPECT_EQ(context->metadataMatchCriteria(), nullptr);\n              EXPECT_EQ(context->downstreamConnection(), nullptr);\n              return this->cm_.thread_local_cluster_.lb_.host_;\n            }));\n    if (create_client) {\n      EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));\n    }\n    EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n        .WillRepeatedly(Return(this->test_address_));\n    EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));\n    Common::Redis::Client::PoolRequest* request =\n        this->conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n    EXPECT_NE(nullptr, request);\n  }\n\n  absl::node_hash_map<Upstream::HostConstSharedPtr, InstanceImpl::ThreadLocalActiveClientPtr>&\n  clientMap() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().client_map_;\n  }\n\n  InstanceImpl::ThreadLocalActiveClient* clientMap(Upstream::HostConstSharedPtr host) {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().client_map_[host].get();\n  }\n\n  absl::node_hash_map<std::string, Upstream::HostConstSharedPtr>& hostAddressMap() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().host_address_map_;\n  }\n\n  std::list<Upstream::HostSharedPtr>& createdViaRedirectHosts() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>()\n        .created_via_redirect_hosts_;\n  }\n\n  std::list<InstanceImpl::ThreadLocalActiveClientPtr>& clientsToDrain() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().clients_to_drain_;\n  }\n\n  InstanceImpl::ThreadLocalPool& threadLocalPool() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>();\n  }\n\n  Event::TimerPtr& drainTimer() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().drain_timer_;\n  }\n\n  void drainClients() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    conn_pool_impl->tls_->getTyped<InstanceImpl::ThreadLocalPool>().drainClients();\n  }\n\n  Stats::Counter& upstreamCxDrained() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->redis_cluster_stats_.upstream_cx_drained_;\n  }\n\n  Stats::Counter& maxUpstreamUnknownConnectionsReached() {\n    InstanceImpl* conn_pool_impl = dynamic_cast<InstanceImpl*>(conn_pool_.get());\n    return conn_pool_impl->redis_cluster_stats_.max_upstream_unknown_connections_reached_;\n  }\n\n  // Common::Redis::Client::ClientFactory\n  Common::Redis::Client::ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher&,\n                                          const Common::Redis::Client::Config&,\n                                          const Common::Redis::RedisCommandStatsSharedPtr&,\n                                          Stats::Scope&, const std::string& username,\n                                          const std::string& password) override {\n    EXPECT_EQ(auth_username_, username);\n    EXPECT_EQ(auth_password_, password);\n    return Common::Redis::Client::ClientPtr{create_(host)};\n  }\n\n  void testReadPolicy(\n      envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ReadPolicy\n          read_policy,\n      NetworkFilters::Common::Redis::Client::ReadPolicy expected_read_policy) {\n    InSequence s;\n\n    read_policy_ = read_policy;\n    setup();\n\n    Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n    Common::Redis::Client::MockPoolRequest auth_request, active_request, readonly_request;\n    MockPoolCallbacks callbacks;\n    Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n\n    EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n        .WillOnce(\n            Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n              EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(\"hash_key\"));\n              EXPECT_EQ(context->metadataMatchCriteria(), nullptr);\n              EXPECT_EQ(context->downstreamConnection(), nullptr);\n              auto redis_context =\n                  dynamic_cast<Clusters::Redis::RedisLoadBalancerContext*>(context);\n              EXPECT_EQ(redis_context->readPolicy(), expected_read_policy);\n              return cm_.thread_local_cluster_.lb_.host_;\n            }));\n    EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));\n    EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n        .WillRepeatedly(Return(test_address_));\n    EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));\n    Common::Redis::Client::PoolRequest* request =\n        conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n    EXPECT_NE(nullptr, request);\n\n    EXPECT_CALL(active_request, cancel());\n    EXPECT_CALL(callbacks, onFailure_());\n    EXPECT_CALL(*client, close());\n    tls_.shutdownThread();\n  }\n\n  void respond(MockPoolCallbacks& callbacks, Common::Redis::Client::MockClient* client) {\n    EXPECT_CALL(callbacks, onResponse_(_));\n    client->client_callbacks_.back()->onResponse(std::make_unique<Common::Redis::RespValue>());\n    EXPECT_EQ(0,\n              conn_pool_->tls_->getTyped<InstanceImpl::ThreadLocalPool>().pending_requests_.size());\n  }\n\n  void verifyInvalidMoveResponse(Common::Redis::Client::MockClient* client,\n                                 const std::string& host_address, bool create_client) {\n    Common::Redis::RespValueSharedPtr request_value = std::make_shared<Common::Redis::RespValue>();\n    Common::Redis::Client::MockPoolRequest active_request;\n    MockPoolCallbacks callbacks;\n    makeRequest(client, request_value, callbacks, active_request, create_client);\n    Common::Redis::RespValuePtr moved_response{new Common::Redis::RespValue()};\n    moved_response->type(Common::Redis::RespType::Error);\n    moved_response->asString() = \"MOVE 1111 \" + host_address;\n    EXPECT_CALL(callbacks, onResponse_(Ref(moved_response)));\n    EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(moved_response),\n                                                                 host_address, false));\n  }\n\n  MOCK_METHOD(Common::Redis::Client::Client*, create_, (Upstream::HostConstSharedPtr host));\n\n  const std::string cluster_name_{\"fake_cluster\"};\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  std::shared_ptr<InstanceImpl> conn_pool_;\n  Upstream::ClusterUpdateCallbacks* update_callbacks_{};\n  Common::Redis::Client::MockClient* client_{};\n  Network::Address::InstanceConstSharedPtr test_address_;\n  std::string auth_username_;\n  std::string auth_password_;\n  NiceMock<Api::MockApi> api_;\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ReadPolicy\n      read_policy_ = envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::\n          ConnPoolSettings::MASTER;\n  NiceMock<Stats::MockCounter> upstream_cx_drained_;\n  NiceMock<Stats::MockCounter> max_upstream_unknown_connections_reached_;\n  std::shared_ptr<NiceMock<Extensions::Common::Redis::MockClusterRefreshManager>>\n      cluster_refresh_manager_;\n};\n\nTEST_F(RedisConnPoolImplTest, Basic) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  Common::Redis::Client::MockPoolRequest active_request;\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n        EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(\"hash_key\"));\n        EXPECT_EQ(context->metadataMatchCriteria(), nullptr);\n        EXPECT_EQ(context->downstreamConnection(), nullptr);\n        return cm_.thread_local_cluster_.lb_.host_;\n      }));\n  EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));\n  EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n      .WillRepeatedly(Return(test_address_));\n  EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));\n  Common::Redis::Client::PoolRequest* request =\n      conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n  EXPECT_NE(nullptr, request);\n\n  EXPECT_CALL(active_request, cancel());\n  EXPECT_CALL(callbacks, onFailure_());\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n};\n\nTEST_F(RedisConnPoolImplTest, BasicRespVariant) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue value;\n  Common::Redis::Client::MockPoolRequest active_request;\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n        EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(\"hash_key\"));\n        EXPECT_EQ(context->metadataMatchCriteria(), nullptr);\n        EXPECT_EQ(context->downstreamConnection(), nullptr);\n        return cm_.thread_local_cluster_.lb_.host_;\n      }));\n  EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));\n  EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n      .WillRepeatedly(Return(test_address_));\n  EXPECT_CALL(*client, makeRequest_(Eq(value), _)).WillOnce(Return(&active_request));\n  Common::Redis::Client::PoolRequest* request =\n      conn_pool_->makeRequest(\"hash_key\", ConnPool::RespVariant(value), callbacks);\n  EXPECT_NE(nullptr, request);\n\n  EXPECT_CALL(active_request, cancel());\n  EXPECT_CALL(callbacks, onFailure_());\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n};\n\nTEST_F(RedisConnPoolImplTest, ClientRequestFailed) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValue value;\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n        EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(\"hash_key\"));\n        EXPECT_EQ(context->metadataMatchCriteria(), nullptr);\n        EXPECT_EQ(context->downstreamConnection(), nullptr);\n        return cm_.thread_local_cluster_.lb_.host_;\n      }));\n  EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));\n  EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n      .WillRepeatedly(Return(test_address_));\n  EXPECT_CALL(*client, makeRequest_(Eq(value), _)).WillOnce(Return(nullptr));\n  Common::Redis::Client::PoolRequest* request =\n      conn_pool_->makeRequest(\"hash_key\", ConnPool::RespVariant(value), callbacks);\n\n  // the request should be null and the callback is not called\n  EXPECT_EQ(nullptr, request);\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n};\n\nTEST_F(RedisConnPoolImplTest, BasicWithReadPolicy) {\n  testReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::\n                     ConnPoolSettings::PREFER_MASTER,\n                 NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary);\n  testReadPolicy(\n      envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::REPLICA,\n      NetworkFilters::Common::Redis::Client::ReadPolicy::Replica);\n  testReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::\n                     ConnPoolSettings::PREFER_REPLICA,\n                 NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica);\n  testReadPolicy(\n      envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ANY,\n      NetworkFilters::Common::Redis::Client::ReadPolicy::Any);\n};\n\nTEST_F(RedisConnPoolImplTest, Hashtagging) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  MockPoolCallbacks callbacks;\n\n  auto expectHashKey = [](const std::string& s) {\n    return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n      EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(s));\n      return nullptr;\n    };\n  };\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Invoke(expectHashKey(\"foo\")));\n  conn_pool_->makeRequest(\"{foo}.bar\", value, callbacks);\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke(expectHashKey(\"foo{}{bar}\")));\n  conn_pool_->makeRequest(\"foo{}{bar}\", value, callbacks);\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Invoke(expectHashKey(\"{bar\")));\n  conn_pool_->makeRequest(\"foo{{bar}}zap\", value, callbacks);\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Invoke(expectHashKey(\"bar\")));\n  conn_pool_->makeRequest(\"foo{bar}{zap}\", value, callbacks);\n\n  tls_.shutdownThread();\n};\n\nTEST_F(RedisConnPoolImplTest, HashtaggingNotEnabled) {\n  InSequence s;\n\n  setup(true, false); // Test with hashtagging not enabled.\n\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  MockPoolCallbacks callbacks;\n\n  auto expectHashKey = [](const std::string& s) {\n    return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n      EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(s));\n      return nullptr;\n    };\n  };\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke(expectHashKey(\"{foo}.bar\")));\n  conn_pool_->makeRequest(\"{foo}.bar\", value, callbacks);\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke(expectHashKey(\"foo{}{bar}\")));\n  conn_pool_->makeRequest(\"foo{}{bar}\", value, callbacks);\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke(expectHashKey(\"foo{{bar}}zap\")));\n  conn_pool_->makeRequest(\"foo{{bar}}zap\", value, callbacks);\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke(expectHashKey(\"foo{bar}{zap}\")));\n  conn_pool_->makeRequest(\"foo{bar}{zap}\", value, callbacks);\n\n  tls_.shutdownThread();\n};\n\n// ConnPool created when no cluster exists at creation time. Dynamic cluster creation and removal\n// work correctly.\nTEST_F(RedisConnPoolImplTest, NoClusterAtConstruction) {\n  InSequence s;\n\n  setup(false);\n\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::PoolRequest* request =\n      conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n  EXPECT_EQ(nullptr, request);\n\n  // Now add the cluster. Request to the cluster should succeed.\n  update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_);\n  // MurmurHash of \"foo\" is 9631199822919835226U\n  makeSimpleRequest(true, \"foo\", 9631199822919835226U);\n\n  // Remove the cluster. Request to the cluster should fail.\n  EXPECT_CALL(*client_, close());\n  update_callbacks_->onClusterRemoval(\"fake_cluster\");\n  request = conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n  EXPECT_EQ(nullptr, request);\n\n  // Add a cluster we don't care about.\n  NiceMock<Upstream::MockThreadLocalCluster> cluster2;\n  cluster2.cluster_.info_->name_ = \"cluster2\";\n  update_callbacks_->onClusterAddOrUpdate(cluster2);\n\n  // Add the cluster back. Request to the cluster should succeed.\n  update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_);\n  // MurmurHash of \"foo\" is 9631199822919835226U\n  makeSimpleRequest(true, \"foo\", 9631199822919835226U);\n\n  // Remove a cluster we don't care about. Request to the cluster should succeed.\n  update_callbacks_->onClusterRemoval(\"some_other_cluster\");\n  // MurmurHash of \"foo\" is 9631199822919835226U\n  makeSimpleRequest(false, \"foo\", 9631199822919835226U);\n\n  // Update the cluster. This should count as a remove followed by an add. Request to the cluster\n  // should succeed.\n  EXPECT_CALL(*client_, close());\n  update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_);\n  // MurmurHash of \"foo\" is 9631199822919835226U\n  makeSimpleRequest(true, \"foo\", 9631199822919835226U);\n\n  // Remove the cluster to make sure we safely destruct with no cluster.\n  EXPECT_CALL(*client_, close());\n  update_callbacks_->onClusterRemoval(\"fake_cluster\");\n}\n\n// This test removes a single host from the ConnPool after learning about 2 hosts from the\n// associated load balancer.\nTEST_F(RedisConnPoolImplTest, HostRemove) {\n  setup();\n\n  MockPoolCallbacks callbacks;\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  std::shared_ptr<Upstream::MockHost> host1(new Upstream::MockHost());\n  std::shared_ptr<Upstream::MockHost> host2(new Upstream::MockHost());\n  Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(host1));\n  EXPECT_CALL(*this, create_(Eq(host1))).WillOnce(Return(client1));\n\n  Common::Redis::Client::MockPoolRequest active_request1;\n  EXPECT_CALL(*host1, address()).WillRepeatedly(Return(test_address_));\n  EXPECT_CALL(*client1, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request1));\n  Common::Redis::Client::PoolRequest* request1 =\n      conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n  EXPECT_NE(nullptr, request1);\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(host2));\n  EXPECT_CALL(*this, create_(Eq(host2))).WillOnce(Return(client2));\n\n  Common::Redis::Client::MockPoolRequest active_request2;\n  EXPECT_CALL(*host2, address()).WillRepeatedly(Return(test_address_));\n  EXPECT_CALL(*client2, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request2));\n  Common::Redis::Client::PoolRequest* request2 = conn_pool_->makeRequest(\"bar\", value, callbacks);\n  EXPECT_NE(nullptr, request2);\n\n  EXPECT_CALL(*client2, close());\n  EXPECT_CALL(*host2, address()).WillRepeatedly(Return(test_address_));\n  cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, {host2});\n\n  EXPECT_CALL(active_request1, cancel());\n  EXPECT_CALL(active_request2, cancel());\n  EXPECT_CALL(*client1, close());\n  EXPECT_CALL(callbacks, onFailure_()).Times(2);\n  tls_.shutdownThread();\n\n  ASSERT_TRUE(testing::Mock::VerifyAndClearExpectations(host1.get()));\n  ASSERT_TRUE(testing::Mock::VerifyAndClearExpectations(host2.get()));\n  testing::Mock::AllowLeak(host1.get());\n  testing::Mock::AllowLeak(host2.get());\n}\n\n// This test removes a host from a ConnPool that was never added in the first place. No errors\n// should be encountered.\nTEST_F(RedisConnPoolImplTest, HostRemovedNeverAdded) {\n  InSequence s;\n\n  setup();\n\n  std::shared_ptr<Upstream::MockHost> host1(new Upstream::MockHost());\n  auto host1_test_address = Network::Utility::resolveUrl(\"tcp://10.0.0.1:3000\");\n  EXPECT_CALL(*host1, address()).WillOnce(Return(host1_test_address));\n  EXPECT_NO_THROW(cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks(\n      {}, {host1}));\n  EXPECT_EQ(hostAddressMap().size(), 0);\n\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, DeleteFollowedByClusterUpdateCallback) {\n  setup();\n  conn_pool_.reset();\n\n  std::shared_ptr<Upstream::Host> host(new Upstream::MockHost());\n  cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, {host});\n}\n\nTEST_F(RedisConnPoolImplTest, NoHost) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  MockPoolCallbacks callbacks;\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(nullptr));\n  Common::Redis::Client::PoolRequest* request =\n      conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n  EXPECT_EQ(nullptr, request);\n\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, RemoteClose) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  Common::Redis::Client::MockPoolRequest active_request;\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_));\n  EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));\n  EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n      .WillRepeatedly(Return(test_address_));\n  EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));\n  conn_pool_->makeRequest(\"hash_key\", value, callbacks);\n\n  EXPECT_CALL(tls_.dispatcher_, deferredDelete_(_));\n  client->runHighWatermarkCallbacks();\n  client->runLowWatermarkCallbacks();\n  client->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  EXPECT_CALL(active_request, cancel());\n  EXPECT_CALL(callbacks, onFailure_());\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, MakeRequestToHost) {\n  Common::Redis::RespValue value;\n  Common::Redis::Client::MockPoolRequest active_request1;\n  Common::Redis::Client::MockPoolRequest active_request2;\n  Common::Redis::Client::MockClientCallbacks callbacks1;\n  Common::Redis::Client::MockClientCallbacks callbacks2;\n  Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n  Upstream::HostConstSharedPtr host2;\n\n  {\n    InSequence s;\n\n    setup(false);\n\n    // There is no cluster yet, so makeRequestToHost() should fail.\n    EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost(\"10.0.0.1:3000\", value, callbacks1));\n    // Add the cluster now.\n    update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_);\n\n    EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));\n    EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))\n        .WillOnce(Return(&active_request1));\n    Common::Redis::Client::PoolRequest* request1 =\n        conn_pool_->makeRequestToHost(\"10.0.0.1:3000\", value, callbacks1);\n    EXPECT_EQ(&active_request1, request1);\n    EXPECT_EQ(host1->address()->asString(), \"10.0.0.1:3000\");\n\n    // IPv6 address returned from Redis server will not have square brackets\n    // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around\n    // the address.\n    EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));\n    EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))\n        .WillOnce(Return(&active_request2));\n    Common::Redis::Client::PoolRequest* request2 =\n        conn_pool_->makeRequestToHost(\"2001:470:813B:0:0:0:0:1:3333\", value, callbacks2);\n    EXPECT_EQ(&active_request2, request2);\n    EXPECT_EQ(host2->address()->asString(), \"[2001:470:813b::1]:3333\");\n\n    // Test with a badly specified host address (no colon, no address, no port).\n    EXPECT_EQ(conn_pool_->makeRequestToHost(\"bad\", value, callbacks1), nullptr);\n    // Test with a badly specified IPv4 address.\n    EXPECT_EQ(conn_pool_->makeRequestToHost(\"10.0.bad:3000\", value, callbacks1), nullptr);\n    // Test with a badly specified TCP port.\n    EXPECT_EQ(conn_pool_->makeRequestToHost(\"10.0.0.1:bad\", value, callbacks1), nullptr);\n    // Test with a TCP port outside of the acceptable range for a 32-bit integer.\n    EXPECT_EQ(conn_pool_->makeRequestToHost(\"10.0.0.1:4294967297\", value, callbacks1),\n              nullptr); // 2^32 + 1\n    // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535).\n    EXPECT_EQ(conn_pool_->makeRequestToHost(\"10.0.0.1:65536\", value, callbacks1), nullptr);\n    // Test with a badly specified IPv6-like address.\n    EXPECT_EQ(conn_pool_->makeRequestToHost(\"bad:ipv6:3000\", value, callbacks1), nullptr);\n    // Test with a valid IPv6 address and a badly specified TCP port (out of range).\n    EXPECT_EQ(conn_pool_->makeRequestToHost(\"2001:470:813b:::70000\", value, callbacks1), nullptr);\n  }\n\n  // We cannot guarantee which order close will be called, perform these checks unsequenced\n  EXPECT_CALL(*client1, close());\n  EXPECT_CALL(*client2, close());\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, MakeRequestToHostWithZeroMaxUnknownUpstreamConnectionLimit) {\n  InSequence s;\n\n  // Create a ConnPool with a max_upstream_unknown_connections setting of 0.\n  setup(true, true, 0);\n\n  Common::Redis::RespValue value;\n  Common::Redis::Client::MockClientCallbacks callbacks1;\n\n  // The max_unknown_upstream_connections is set to 0. Request should fail.\n  EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost(\"10.0.0.1:3000\", value, callbacks1));\n  EXPECT_EQ(maxUpstreamUnknownConnectionsReached().value(), 1);\n  tls_.shutdownThread();\n}\n\n// This test forces the creation of 2 hosts (one with an IPv4 address, and the other with an IPv6\n// address) and pending requests using makeRequestToHost(). After their creation, \"new\" hosts are\n// discovered, and the original hosts are put aside to drain. The test then verifies the drain\n// logic.\nTEST_F(RedisConnPoolImplTest, HostsAddedAndRemovedWithDraining) {\n  setup();\n\n  Common::Redis::RespValue value;\n  Common::Redis::Client::MockPoolRequest auth_request1, active_request1;\n  Common::Redis::Client::MockPoolRequest auth_request2, active_request2;\n  Common::Redis::Client::MockClientCallbacks callbacks1;\n  Common::Redis::Client::MockClientCallbacks callbacks2;\n  Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n  Upstream::HostConstSharedPtr host2;\n\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));\n  EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))\n      .WillOnce(Return(&active_request1));\n  Common::Redis::Client::PoolRequest* request1 =\n      conn_pool_->makeRequestToHost(\"10.0.0.1:3000\", value, callbacks1);\n  EXPECT_EQ(&active_request1, request1);\n  EXPECT_EQ(host1->address()->asString(), \"10.0.0.1:3000\");\n\n  // IPv6 address returned from Redis server will not have square brackets\n  // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around\n  // the address.\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));\n  EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))\n      .WillOnce(Return(&active_request2));\n  Common::Redis::Client::PoolRequest* request2 =\n      conn_pool_->makeRequestToHost(\"2001:470:813B:0:0:0:0:1:3333\", value, callbacks2);\n  EXPECT_EQ(&active_request2, request2);\n  EXPECT_EQ(host2->address()->asString(), \"[2001:470:813b::1]:3333\");\n\n  absl::node_hash_map<std::string, Upstream::HostConstSharedPtr>& host_address_map =\n      hostAddressMap();\n  EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created.\n  EXPECT_EQ(host_address_map[host1->address()->asString()], host1);\n  EXPECT_EQ(host_address_map[host2->address()->asString()], host2);\n  EXPECT_EQ(clientMap().size(), 2);\n  EXPECT_NE(clientMap().find(host1), clientMap().end());\n  EXPECT_NE(clientMap().find(host2), clientMap().end());\n  void* host1_active_client = clientMap(host1);\n  EXPECT_EQ(createdViaRedirectHosts().size(), 2);\n  EXPECT_EQ(clientsToDrain().size(), 0);\n  EXPECT_EQ(drainTimer()->enabled(), false);\n\n  std::shared_ptr<Upstream::MockHost> new_host1(new Upstream::MockHost());\n  std::shared_ptr<Upstream::MockHost> new_host2(new Upstream::MockHost());\n  auto new_host1_test_address = Network::Utility::resolveUrl(\"tcp://10.0.0.1:3000\");\n  auto new_host2_test_address = Network::Utility::resolveUrl(\"tcp://[2001:470:813b::1]:3333\");\n  EXPECT_CALL(*new_host1, address()).WillRepeatedly(Return(new_host1_test_address));\n  EXPECT_CALL(*new_host2, address()).WillRepeatedly(Return(new_host2_test_address));\n  EXPECT_CALL(*client1, active()).WillOnce(Return(true));\n  EXPECT_CALL(*client2, active()).WillOnce(Return(false));\n  EXPECT_CALL(*client2, close());\n\n  cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks(\n      {new_host1, new_host2}, {});\n\n  host_address_map = hostAddressMap();\n  EXPECT_EQ(host_address_map.size(), 2); // new_host1 and new_host2 have been added.\n  EXPECT_EQ(host_address_map[new_host1_test_address->asString()], new_host1);\n  EXPECT_EQ(host_address_map[new_host2_test_address->asString()], new_host2);\n  EXPECT_EQ(clientMap().size(), 0);\n  EXPECT_EQ(createdViaRedirectHosts().size(), 0);\n  EXPECT_EQ(clientsToDrain().size(), 1); // client2 has already been drained.\n  EXPECT_EQ(clientsToDrain().front().get(), host1_active_client); // client1 is still active.\n  EXPECT_EQ(drainTimer()->enabled(), true);\n\n  cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks(\n      {}, {new_host1, new_host2});\n\n  EXPECT_EQ(host_address_map.size(), 0); // new_host1 and new_host2 have been removed.\n  EXPECT_EQ(clientMap().size(), 0);\n  EXPECT_EQ(createdViaRedirectHosts().size(), 0);\n  EXPECT_EQ(clientsToDrain().size(), 1);\n  EXPECT_EQ(clientsToDrain().front().get(), host1_active_client);\n  EXPECT_EQ(drainTimer()->enabled(), true);\n\n  EXPECT_CALL(*client1, active()).WillOnce(Return(true));\n  drainTimer()->disableTimer();\n  drainClients();\n  EXPECT_EQ(clientsToDrain().size(), 1); // Nothing happened. client1 is still active.\n  EXPECT_EQ(drainTimer()->enabled(), true);\n\n  EXPECT_CALL(*client1, active()).Times(2).WillRepeatedly(Return(false));\n  EXPECT_CALL(*client1, close());\n  drainTimer()->disableTimer();\n  drainClients();\n  EXPECT_EQ(clientsToDrain().size(), 0); // client1 has been drained and closed.\n  EXPECT_EQ(drainTimer()->enabled(), false);\n  EXPECT_EQ(upstreamCxDrained().value(), 1);\n\n  tls_.shutdownThread();\n}\n\n// This test creates 2 hosts (one with an IPv4 address, and the other with an IPv6\n// address) and pending requests using makeRequestToHost(). After their creation, \"new\" hosts are\n// discovered (added), and the original hosts are put aside to drain. Destructors are then\n// called on these not yet drained clients, and the underlying connections should be closed.\nTEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithNoDraining) {\n  setup();\n\n  Common::Redis::RespValue value;\n  Common::Redis::Client::MockPoolRequest auth_request1, active_request1;\n  Common::Redis::Client::MockPoolRequest auth_request2, active_request2;\n  Common::Redis::Client::MockClientCallbacks callbacks1;\n  Common::Redis::Client::MockClientCallbacks callbacks2;\n  Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n  Upstream::HostConstSharedPtr host2;\n\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));\n  EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))\n      .WillOnce(Return(&active_request1));\n  Common::Redis::Client::PoolRequest* request1 =\n      conn_pool_->makeRequestToHost(\"10.0.0.1:3000\", value, callbacks1);\n  EXPECT_EQ(&active_request1, request1);\n  EXPECT_EQ(host1->address()->asString(), \"10.0.0.1:3000\");\n\n  // IPv6 address returned from Redis server will not have square brackets\n  // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around\n  // the address.\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));\n  EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))\n      .WillOnce(Return(&active_request2));\n  Common::Redis::Client::PoolRequest* request2 =\n      conn_pool_->makeRequestToHost(\"2001:470:813B:0:0:0:0:1:3333\", value, callbacks2);\n  EXPECT_EQ(&active_request2, request2);\n  EXPECT_EQ(host2->address()->asString(), \"[2001:470:813b::1]:3333\");\n\n  absl::node_hash_map<std::string, Upstream::HostConstSharedPtr>& host_address_map =\n      hostAddressMap();\n  EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created.\n  EXPECT_EQ(host_address_map[host1->address()->asString()], host1);\n  EXPECT_EQ(host_address_map[host2->address()->asString()], host2);\n  EXPECT_EQ(clientMap().size(), 2);\n  EXPECT_NE(clientMap().find(host1), clientMap().end());\n  EXPECT_NE(clientMap().find(host2), clientMap().end());\n  EXPECT_EQ(createdViaRedirectHosts().size(), 2);\n  EXPECT_EQ(clientsToDrain().size(), 0);\n  EXPECT_EQ(drainTimer()->enabled(), false);\n\n  std::shared_ptr<Upstream::MockHost> new_host1(new Upstream::MockHost());\n  std::shared_ptr<Upstream::MockHost> new_host2(new Upstream::MockHost());\n  auto new_host1_test_address = Network::Utility::resolveUrl(\"tcp://10.0.0.1:3000\");\n  auto new_host2_test_address = Network::Utility::resolveUrl(\"tcp://[2001:470:813b::1]:3333\");\n  EXPECT_CALL(*new_host1, address()).WillRepeatedly(Return(new_host1_test_address));\n  EXPECT_CALL(*new_host2, address()).WillRepeatedly(Return(new_host2_test_address));\n  EXPECT_CALL(*client1, active()).WillOnce(Return(true));\n  EXPECT_CALL(*client2, active()).WillOnce(Return(true));\n\n  cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks(\n      {new_host1, new_host2}, {});\n\n  host_address_map = hostAddressMap();\n  EXPECT_EQ(host_address_map.size(), 2); // new_host1 and new_host2 have been added.\n  EXPECT_EQ(host_address_map[new_host1_test_address->asString()], new_host1);\n  EXPECT_EQ(host_address_map[new_host2_test_address->asString()], new_host2);\n  EXPECT_EQ(clientMap().size(), 0);\n  EXPECT_EQ(createdViaRedirectHosts().size(), 0);\n  EXPECT_EQ(clientsToDrain().size(), 2); // host1 and host2 have been put aside to drain.\n  EXPECT_EQ(drainTimer()->enabled(), true);\n\n  EXPECT_CALL(*client1, close());\n  EXPECT_CALL(*client2, close());\n  EXPECT_CALL(*client1, active()).WillOnce(Return(true));\n  EXPECT_CALL(*client2, active()).WillOnce(Return(true));\n  EXPECT_EQ(upstreamCxDrained().value(), 0);\n\n  tls_.shutdownThread();\n}\n\n// This test creates 2 hosts (one with an IPv4 address, and the other with an IPv6\n// address) and pending requests using makeRequestToHost(). After their creation, \"new\" hosts are\n// discovered (added), and the original hosts are put aside to drain. The cluster is removed and the\n// underlying connections should be closed.\nTEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithClusterRemoval) {\n  setup();\n\n  Common::Redis::RespValue value;\n  Common::Redis::Client::MockPoolRequest auth_request1, active_request1;\n  Common::Redis::Client::MockPoolRequest auth_request2, active_request2;\n  Common::Redis::Client::MockClientCallbacks callbacks1;\n  Common::Redis::Client::MockClientCallbacks callbacks2;\n  Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n  Upstream::HostConstSharedPtr host2;\n\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));\n  EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))\n      .WillOnce(Return(&active_request1));\n  Common::Redis::Client::PoolRequest* request1 =\n      conn_pool_->makeRequestToHost(\"10.0.0.1:3000\", value, callbacks1);\n  EXPECT_EQ(&active_request1, request1);\n  EXPECT_EQ(host1->address()->asString(), \"10.0.0.1:3000\");\n\n  // IPv6 address returned from Redis server will not have square brackets\n  // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around\n  // the address.\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));\n  EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))\n      .WillOnce(Return(&active_request2));\n  Common::Redis::Client::PoolRequest* request2 =\n      conn_pool_->makeRequestToHost(\"2001:470:813B:0:0:0:0:1:3333\", value, callbacks2);\n  EXPECT_EQ(&active_request2, request2);\n  EXPECT_EQ(host2->address()->asString(), \"[2001:470:813b::1]:3333\");\n\n  absl::node_hash_map<std::string, Upstream::HostConstSharedPtr>& host_address_map =\n      hostAddressMap();\n  EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created.\n  EXPECT_EQ(host_address_map[host1->address()->asString()], host1);\n  EXPECT_EQ(host_address_map[host2->address()->asString()], host2);\n  EXPECT_EQ(clientMap().size(), 2);\n  EXPECT_NE(clientMap().find(host1), clientMap().end());\n  EXPECT_NE(clientMap().find(host2), clientMap().end());\n  EXPECT_EQ(createdViaRedirectHosts().size(), 2);\n  EXPECT_EQ(clientsToDrain().size(), 0);\n  EXPECT_EQ(drainTimer()->enabled(), false);\n\n  std::shared_ptr<Upstream::MockHost> new_host1(new Upstream::MockHost());\n  std::shared_ptr<Upstream::MockHost> new_host2(new Upstream::MockHost());\n  auto new_host1_test_address = Network::Utility::resolveUrl(\"tcp://10.0.0.1:3000\");\n  auto new_host2_test_address = Network::Utility::resolveUrl(\"tcp://[2001:470:813b::1]:3333\");\n  EXPECT_CALL(*new_host1, address()).WillRepeatedly(Return(new_host1_test_address));\n  EXPECT_CALL(*new_host2, address()).WillRepeatedly(Return(new_host2_test_address));\n  EXPECT_CALL(*client1, active()).WillOnce(Return(true));\n  EXPECT_CALL(*client2, active()).WillOnce(Return(true));\n\n  cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks(\n      {new_host1, new_host2}, {});\n\n  host_address_map = hostAddressMap();\n  EXPECT_EQ(host_address_map.size(), 2); // new_host1 and new_host2 have been added.\n  EXPECT_EQ(host_address_map[new_host1_test_address->asString()], new_host1);\n  EXPECT_EQ(host_address_map[new_host2_test_address->asString()], new_host2);\n  EXPECT_EQ(clientMap().size(), 0);\n  EXPECT_EQ(createdViaRedirectHosts().size(), 0);\n  EXPECT_EQ(clientsToDrain().size(), 2); // host1 and host2 have been put aside to drain.\n  EXPECT_EQ(drainTimer()->enabled(), true);\n\n  EXPECT_CALL(*client1, close());\n  EXPECT_CALL(*client2, close());\n  EXPECT_CALL(*client1, active()).WillOnce(Return(true));\n  EXPECT_CALL(*client2, active()).WillOnce(Return(true));\n  update_callbacks_->onClusterRemoval(\"fake_cluster\");\n\n  EXPECT_EQ(hostAddressMap().size(), 0);\n  EXPECT_EQ(clientMap().size(), 0);\n  EXPECT_EQ(clientsToDrain().size(), 0);\n  EXPECT_EQ(upstreamCxDrained().value(), 0);\n\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, MakeRequestToRedisCluster) {\n\n  absl::optional<envoy::config::cluster::v3::Cluster::CustomClusterType> cluster_type;\n  cluster_type.emplace();\n  cluster_type->set_name(\"envoy.clusters.redis\");\n  EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, clusterType())\n      .WillOnce(ReturnRef(cluster_type));\n  EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, lbType())\n      .WillOnce(Return(Upstream::LoadBalancerType::ClusterProvided));\n\n  setup();\n\n  makeSimpleRequest(true, \"foo\", 44950);\n\n  makeSimpleRequest(false, \"bar\", 37829);\n\n  EXPECT_CALL(*client_, close());\n  tls_.shutdownThread();\n};\n\nTEST_F(RedisConnPoolImplTest, MakeRequestToRedisClusterHashtag) {\n\n  absl::optional<envoy::config::cluster::v3::Cluster::CustomClusterType> cluster_type;\n  cluster_type.emplace();\n  cluster_type->set_name(\"envoy.clusters.redis\");\n  EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, clusterType())\n      .WillOnce(ReturnRef(cluster_type));\n  EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, lbType())\n      .WillOnce(Return(Upstream::LoadBalancerType::ClusterProvided));\n\n  setup();\n\n  makeSimpleRequest(true, \"{foo}bar\", 44950);\n\n  makeSimpleRequest(false, \"foo{bar}\", 37829);\n\n  EXPECT_CALL(*client_, close());\n  tls_.shutdownThread();\n};\n\nTEST_F(RedisConnPoolImplTest, MovedRedirectionSuccess) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValueSharedPtr request_value = std::make_shared<Common::Redis::RespValue>();\n  Common::Redis::Client::MockPoolRequest active_request;\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n  makeRequest(client, request_value, callbacks, active_request);\n\n  Common::Redis::Client::MockPoolRequest active_request2;\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n\n  Common::Redis::RespValuePtr moved_response{new Common::Redis::RespValue()};\n  moved_response->type(Common::Redis::RespType::Error);\n  moved_response->asString() = \"MOVED 1111 10.1.2.3:4000\";\n\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));\n  EXPECT_CALL(*client2, makeRequest_(Ref(*request_value), _)).WillOnce(Return(&active_request2));\n  EXPECT_TRUE(client->client_callbacks_.back()->onRedirection(std::move(moved_response),\n                                                              \"10.1.2.3:4000\", false));\n  EXPECT_EQ(host1->address()->asString(), \"10.1.2.3:4000\");\n\n  respond(callbacks, client2);\n\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, MovedRedirectionFailure) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n\n  // Test with a badly specified host address (no colon, no address, no port).\n  verifyInvalidMoveResponse(client, \"bad\", true);\n\n  // Test with a badly specified IPv4 address.\n  verifyInvalidMoveResponse(client, \"10.0.bad:3000\", false);\n\n  // Test with a badly specified TCP port.\n  verifyInvalidMoveResponse(client, \"10.0.bad:3000\", false);\n\n  // Test with a TCP port outside of the acceptable range for a 32-bit integer.\n  verifyInvalidMoveResponse(client, \"10.0.0.1:4294967297\", false); // 2^32 + 1\n\n  // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535).\n  verifyInvalidMoveResponse(client, \"10.0.0.1:65536\", false);\n\n  // Test with a badly specified IPv6-like address.\n  verifyInvalidMoveResponse(client, \"bad:ipv6:3000\", false);\n\n  // Test with a valid IPv6 address and a badly specified TCP port (out of range).\n  verifyInvalidMoveResponse(client, \"2001:470:813b:::70000\", false);\n\n  // Test an upstream error preventing the request from being sent.\n  MockPoolCallbacks callbacks;\n  Common::Redis::RespValueSharedPtr request3 = std::make_shared<Common::Redis::RespValue>();\n  Common::Redis::Client::MockPoolRequest active_request3;\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n  makeRequest(client, request3, callbacks, active_request3, false);\n  Common::Redis::RespValuePtr moved_response3{new Common::Redis::RespValue()};\n  moved_response3->type(Common::Redis::RespType::Error);\n  moved_response3->asString() = \"MOVED 1111 10.1.2.3:4000\";\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));\n  EXPECT_CALL(*client2, makeRequest_(Ref(*request3), _)).WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks, onResponse_(Ref(moved_response3)));\n  EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(moved_response3),\n                                                               \"10.1.2.3:4000\", false));\n  EXPECT_EQ(host1->address()->asString(), \"10.1.2.3:4000\");\n\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, AskRedirectionSuccess) {\n  InSequence s;\n\n  setup();\n\n  Common::Redis::RespValueSharedPtr request_value = std::make_shared<Common::Redis::RespValue>();\n  Common::Redis::Client::MockPoolRequest active_request;\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n  makeRequest(client, request_value, callbacks, active_request);\n\n  Common::Redis::Client::MockPoolRequest ask_request, active_request2;\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n\n  Common::Redis::RespValuePtr ask_response{new Common::Redis::RespValue()};\n  ask_response->type(Common::Redis::RespType::Error);\n  ask_response->asString() = \"ASK 1111 10.1.2.3:4000\";\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));\n  // Verify that the request has been properly prepended with an \"asking\" command.\n  EXPECT_CALL(*client2, makeRequest_(Ref(Common::Redis::Utility::AskingRequest::instance()), _))\n      .WillOnce(Return(&ask_request));\n  EXPECT_CALL(*client2, makeRequest_(Ref(*request_value), _)).WillOnce(Return(&active_request2));\n  EXPECT_TRUE(client->client_callbacks_.back()->onRedirection(std::move(ask_response),\n                                                              \"10.1.2.3:4000\", true));\n  EXPECT_EQ(host1->address()->asString(), \"10.1.2.3:4000\");\n\n  respond(callbacks, client2);\n\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, AskRedirectionFailure) {\n  InSequence s;\n\n  setup();\n\n  MockPoolCallbacks callbacks;\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n\n  // Test an upstream error from trying to send an \"asking\" command upstream.\n  Common::Redis::Client::MockPoolRequest active_request3;\n  Common::Redis::RespValueSharedPtr request3 = std::make_shared<Common::Redis::RespValue>();\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n  makeRequest(client, request3, callbacks, active_request3);\n  Common::Redis::RespValuePtr ask_response3{new Common::Redis::RespValue()};\n  ask_response3->type(Common::Redis::RespType::Error);\n  ask_response3->asString() = \"ASK 1111 10.1.2.3:4000\";\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));\n  EXPECT_CALL(*client2, makeRequest_(Ref(Common::Redis::Utility::AskingRequest::instance()), _))\n      .WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks, onResponse_(Ref(ask_response3)));\n  EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(ask_response3),\n                                                               \"10.1.2.3:4000\", true));\n  EXPECT_EQ(host1->address()->asString(), \"10.1.2.3:4000\");\n\n  // Test an upstream error from trying to send the original request after the \"asking\" command is\n  // sent successfully.\n  Common::Redis::Client::MockPoolRequest active_request4, active_request5;\n  Common::Redis::RespValueSharedPtr request4 = std::make_shared<Common::Redis::RespValue>();\n  makeRequest(client, request4, callbacks, active_request4, false);\n  Common::Redis::RespValuePtr ask_response4{new Common::Redis::RespValue()};\n  ask_response4->type(Common::Redis::RespType::Error);\n  ask_response4->asString() = \"ASK 1111 10.1.2.3:4000\";\n  EXPECT_CALL(*client2, makeRequest_(Ref(Common::Redis::Utility::AskingRequest::instance()), _))\n      .WillOnce(Return(&active_request5));\n  EXPECT_CALL(*client2, makeRequest_(Ref(*request4), _)).WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks, onResponse_(Ref(ask_response4)));\n  EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(ask_response4),\n                                                               \"10.1.2.3:4000\", true));\n\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n}\n\nTEST_F(RedisConnPoolImplTest, MakeRequestAndRedirectFollowedByDelete) {\n  tls_.defer_delete = true;\n  std::unique_ptr<NiceMock<Stats::MockStore>> store =\n      std::make_unique<NiceMock<Stats::MockStore>>();\n  cluster_refresh_manager_ =\n      std::make_shared<NiceMock<Extensions::Common::Redis::MockClusterRefreshManager>>();\n  auto redis_command_stats =\n      Common::Redis::RedisCommandStats::createRedisCommandStats(store->symbolTable());\n  conn_pool_ = std::make_shared<InstanceImpl>(\n      cluster_name_, cm_, *this, tls_,\n      Common::Redis::Client::createConnPoolSettings(20, true, true, 100, read_policy_), api_,\n      std::move(store), redis_command_stats, cluster_refresh_manager_);\n  conn_pool_->init();\n\n  auto& local_pool = threadLocalPool();\n  conn_pool_.reset();\n\n  // Request\n  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();\n  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();\n  Common::Redis::Client::MockPoolRequest active_request;\n  MockPoolCallbacks callbacks;\n  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n        EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(\"hash_key\"));\n        EXPECT_EQ(context->metadataMatchCriteria(), nullptr);\n        EXPECT_EQ(context->downstreamConnection(), nullptr);\n        return this->cm_.thread_local_cluster_.lb_.host_;\n      }));\n  EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));\n  EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())\n      .WillRepeatedly(Return(this->test_address_));\n  EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));\n  EXPECT_NE(nullptr, local_pool.makeRequest(\"hash_key\", value, callbacks));\n\n  // Move redirection.\n  Common::Redis::Client::MockPoolRequest active_request2;\n  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();\n  Upstream::HostConstSharedPtr host1;\n  Common::Redis::RespValuePtr moved_response{new Common::Redis::RespValue()};\n  moved_response->type(Common::Redis::RespType::Error);\n  moved_response->asString() = \"MOVED 1111 10.1.2.3:4000\";\n\n  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));\n  EXPECT_CALL(*client2, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request2));\n  EXPECT_TRUE(client->client_callbacks_.back()->onRedirection(std::move(moved_response),\n                                                              \"10.1.2.3:4000\", false));\n  EXPECT_EQ(host1->address()->asString(), \"10.1.2.3:4000\");\n  EXPECT_CALL(callbacks, onResponse_(_));\n  client2->client_callbacks_.back()->onResponse(std::make_unique<Common::Redis::RespValue>());\n\n  EXPECT_CALL(*client, close());\n  tls_.shutdownThread();\n}\n\n} // namespace ConnPool\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/mocks.cc",
    "content": "#include \"mocks.h\"\n\nusing testing::_;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nMockRouter::MockRouter(RouteSharedPtr route) : route_(std::move(route)) {\n  ON_CALL(*this, upstreamPool(_)).WillByDefault(Return(route_));\n}\nMockRouter::~MockRouter() = default;\n\nMockRoute::MockRoute(ConnPool::InstanceSharedPtr conn_pool) : conn_pool_(std::move(conn_pool)) {\n  ON_CALL(*this, upstream()).WillByDefault(Return(conn_pool_));\n  ON_CALL(*this, mirrorPolicies()).WillByDefault(ReturnRef(policies_));\n}\nMockRoute::~MockRoute() = default;\n\nMockMirrorPolicy::MockMirrorPolicy(ConnPool::InstanceSharedPtr conn_pool)\n    : conn_pool_(std::move(conn_pool)) {\n  ON_CALL(*this, upstream()).WillByDefault(Return(conn_pool_));\n  ON_CALL(*this, shouldMirror(_)).WillByDefault(Return(true));\n}\n\nMockFaultManager::MockFaultManager() = default;\nMockFaultManager::MockFaultManager(const MockFaultManager&) {}\nMockFaultManager::~MockFaultManager() = default;\n\nnamespace ConnPool {\n\nMockPoolCallbacks::MockPoolCallbacks() = default;\nMockPoolCallbacks::~MockPoolCallbacks() = default;\n\nMockInstance::MockInstance() = default;\nMockInstance::~MockInstance() = default;\n\n} // namespace ConnPool\n\nnamespace CommandSplitter {\n\nMockSplitRequest::MockSplitRequest() = default;\nMockSplitRequest::~MockSplitRequest() = default;\n\nMockSplitCallbacks::MockSplitCallbacks() = default;\nMockSplitCallbacks::~MockSplitCallbacks() = default;\n\nMockInstance::MockInstance() = default;\nMockInstance::~MockInstance() = default;\n\n} // namespace CommandSplitter\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/mocks.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"extensions/common/redis/cluster_refresh_manager.h\"\n#include \"extensions/filters/network/common/redis/client.h\"\n#include \"extensions/filters/network/common/redis/codec_impl.h\"\n#include \"extensions/filters/network/common/redis/fault.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter.h\"\n#include \"extensions/filters/network/redis_proxy/conn_pool.h\"\n#include \"extensions/filters/network/redis_proxy/router.h\"\n\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nclass MockRouter : public Router {\npublic:\n  MockRouter(RouteSharedPtr route);\n  ~MockRouter() override;\n\n  MOCK_METHOD(RouteSharedPtr, upstreamPool, (std::string & key));\n  RouteSharedPtr route_;\n};\n\nclass MockRoute : public Route {\npublic:\n  MockRoute(ConnPool::InstanceSharedPtr);\n  ~MockRoute() override;\n\n  MOCK_METHOD(ConnPool::InstanceSharedPtr, upstream, (), (const));\n  MOCK_METHOD(const MirrorPolicies&, mirrorPolicies, (), (const));\n  ConnPool::InstanceSharedPtr conn_pool_;\n  MirrorPolicies policies_;\n};\n\nclass MockMirrorPolicy : public MirrorPolicy {\npublic:\n  MockMirrorPolicy(ConnPool::InstanceSharedPtr);\n  ~MockMirrorPolicy() override = default;\n\n  MOCK_METHOD(ConnPool::InstanceSharedPtr, upstream, (), (const));\n  MOCK_METHOD(bool, shouldMirror, (const std::string&), (const));\n\n  ConnPool::InstanceSharedPtr conn_pool_;\n};\n\nclass MockFaultManager : public Common::Redis::FaultManager {\npublic:\n  MockFaultManager();\n  MockFaultManager(const MockFaultManager& other);\n  ~MockFaultManager() override;\n\n  MOCK_METHOD(const Common::Redis::Fault*, getFaultForCommand, (const std::string&), (const));\n};\n\nnamespace ConnPool {\n\nclass MockPoolCallbacks : public PoolCallbacks {\npublic:\n  MockPoolCallbacks();\n  ~MockPoolCallbacks() override;\n\n  void onResponse(Common::Redis::RespValuePtr&& value) override { onResponse_(value); }\n  void onFailure() override { onFailure_(); }\n\n  MOCK_METHOD(void, onResponse_, (Common::Redis::RespValuePtr & value));\n  MOCK_METHOD(void, onFailure_, ());\n};\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  Common::Redis::Client::PoolRequest* makeRequest(const std::string& hash_key,\n                                                  RespVariant&& request,\n                                                  PoolCallbacks& callbacks) override {\n    return makeRequest_(hash_key, request, callbacks);\n  }\n\n  MOCK_METHOD(Common::Redis::Client::PoolRequest*, makeRequest_,\n              (const std::string& hash_key, RespVariant& request, PoolCallbacks& callbacks));\n  MOCK_METHOD(bool, onRedirection, ());\n};\n} // namespace ConnPool\n\nnamespace CommandSplitter {\n\nclass MockSplitRequest : public SplitRequest {\npublic:\n  MockSplitRequest();\n  ~MockSplitRequest() override;\n\n  MOCK_METHOD(void, cancel, ());\n};\n\nclass MockSplitCallbacks : public SplitCallbacks {\npublic:\n  MockSplitCallbacks();\n  ~MockSplitCallbacks() override;\n\n  void onResponse(Common::Redis::RespValuePtr&& value) override { onResponse_(value); }\n\n  MOCK_METHOD(bool, connectionAllowed, ());\n  MOCK_METHOD(void, onAuth, (const std::string& password));\n  MOCK_METHOD(void, onAuth, (const std::string& username, const std::string& password));\n  MOCK_METHOD(void, onResponse_, (Common::Redis::RespValuePtr & value));\n};\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks,\n                              Event::Dispatcher& dispatcher) override {\n    return SplitRequestPtr{makeRequest_(*request, callbacks, dispatcher)};\n  }\n\n  MOCK_METHOD(SplitRequest*, makeRequest_,\n              (const Common::Redis::RespValue& request, SplitCallbacks& callbacks,\n               Event::Dispatcher& dispatcher));\n};\n\n} // namespace CommandSplitter\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/proxy_filter_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n\n#include \"extensions/filters/network/redis_proxy/proxy_filter.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/redis_proxy/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ByRef;\nusing testing::DoAll;\nusing testing::Eq;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nenvoy::extensions::filters::network::redis_proxy::v3::RedisProxy\nparseProtoFromYaml(const std::string& yaml_string) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy config;\n  TestUtility::loadFromYaml(yaml_string, config);\n  return config;\n}\n\nclass RedisProxyFilterConfigTest : public testing::Test {\npublic:\n  Stats::TestUtil::TestStore store_;\n  Network::MockDrainDecision drain_decision_;\n  Runtime::MockLoader runtime_;\n  NiceMock<Api::MockApi> api_;\n};\n\nTEST_F(RedisProxyFilterConfigTest, Normal) {\n  const std::string yaml_string = R\"EOF(\n  prefix_routes:\n    catch_all_route:\n      cluster: fake_cluster\n  stat_prefix: foo\n  settings:\n    op_timeout: 0.01s\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config =\n      parseProtoFromYaml(yaml_string);\n  ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_, api_);\n  EXPECT_EQ(\"redis.foo.\", config.stat_prefix_);\n  EXPECT_TRUE(config.downstream_auth_username_.empty());\n  EXPECT_TRUE(config.downstream_auth_password_.empty());\n}\n\nTEST_F(RedisProxyFilterConfigTest, BadRedisProxyConfig) {\n  const std::string yaml_string = R\"EOF(\n  cluster_name: fake_cluster\n  cluster: fake_cluster\n  )EOF\";\n\n  EXPECT_THROW(parseProtoFromYaml(yaml_string), EnvoyException);\n}\n\nTEST_F(RedisProxyFilterConfigTest, DownstreamAuthPasswordSet) {\n  const std::string yaml_string = R\"EOF(\n  prefix_routes:\n    catch_all_route:\n      cluster: fake_cluster\n  stat_prefix: foo\n  settings:\n    op_timeout: 0.01s\n  downstream_auth_password:\n    inline_string: somepassword\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config =\n      parseProtoFromYaml(yaml_string);\n  ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_, api_);\n  EXPECT_EQ(config.downstream_auth_password_, \"somepassword\");\n}\n\nTEST_F(RedisProxyFilterConfigTest, DownstreamAuthAclSet) {\n  const std::string yaml_string = R\"EOF(\n  prefix_routes:\n    catch_all_route:\n      cluster: fake_cluster\n  stat_prefix: foo\n  settings:\n    op_timeout: 0.01s\n  downstream_auth_username:\n    inline_string: someusername\n  downstream_auth_password:\n    inline_string: somepassword\n  )EOF\";\n\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config =\n      parseProtoFromYaml(yaml_string);\n  ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_, api_);\n  EXPECT_EQ(config.downstream_auth_username_, \"someusername\");\n  EXPECT_EQ(config.downstream_auth_password_, \"somepassword\");\n}\n\nclass RedisProxyFilterTest : public testing::Test, public Common::Redis::DecoderFactory {\npublic:\n  static constexpr const char* DefaultConfig = R\"EOF(\n  prefix_routes:\n    catch_all_route:\n      cluster: fake_cluster\n  stat_prefix: foo\n  settings:\n    op_timeout: 0.01s\n  )EOF\";\n\n  RedisProxyFilterTest(const std::string& yaml_string) {\n    envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config =\n        parseProtoFromYaml(yaml_string);\n    config_ =\n        std::make_shared<ProxyFilterConfig>(proto_config, store_, drain_decision_, runtime_, api_);\n    filter_ = std::make_unique<ProxyFilter>(*this, Common::Redis::EncoderPtr{encoder_}, splitter_,\n                                            config_);\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n    EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n    EXPECT_EQ(1UL, config_->stats_.downstream_cx_total_.value());\n    EXPECT_EQ(1UL, config_->stats_.downstream_cx_active_.value());\n\n    // NOP currently.\n    filter_->onAboveWriteBufferHighWatermark();\n    filter_->onBelowWriteBufferLowWatermark();\n  }\n\n  RedisProxyFilterTest() : RedisProxyFilterTest(DefaultConfig) {}\n\n  ~RedisProxyFilterTest() override {\n    filter_.reset();\n    for (const Stats::GaugeSharedPtr& gauge : store_.gauges()) {\n      EXPECT_EQ(0U, gauge->value());\n    }\n  }\n\n  // Common::Redis::DecoderFactory\n  Common::Redis::DecoderPtr create(Common::Redis::DecoderCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n    return Common::Redis::DecoderPtr{decoder_};\n  }\n\n  Common::Redis::MockEncoder* encoder_{new Common::Redis::MockEncoder()};\n  Common::Redis::MockDecoder* decoder_{new Common::Redis::MockDecoder()};\n  Common::Redis::DecoderCallbacks* decoder_callbacks_{};\n  CommandSplitter::MockInstance splitter_;\n  Stats::TestUtil::TestStore store_;\n  NiceMock<Network::MockDrainDecision> drain_decision_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  ProxyFilterConfigSharedPtr config_;\n  std::unique_ptr<ProxyFilter> filter_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  NiceMock<Api::MockApi> api_;\n};\n\nclass RedisProxyFilterTestWithTwoCallbacks : public RedisProxyFilterTest {\npublic:\n  CommandSplitter::MockSplitRequest* request_handle1_{new CommandSplitter::MockSplitRequest()};\n  CommandSplitter::MockSplitRequest* request_handle2_{new CommandSplitter::MockSplitRequest()};\n  CommandSplitter::SplitCallbacks* request_callbacks1_;\n  CommandSplitter::SplitCallbacks* request_callbacks2_;\n\n  void decodeHelper(Buffer::Instance&) {\n    Common::Redis::RespValuePtr request1(new Common::Redis::RespValue());\n    EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _, _))\n        .WillOnce(\n            DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1_)), Return(request_handle1_)));\n    decoder_callbacks_->onRespValue(std::move(request1));\n\n    Common::Redis::RespValuePtr request2(new Common::Redis::RespValue());\n    EXPECT_CALL(splitter_, makeRequest_(Ref(*request2), _, _))\n        .WillOnce(\n            DoAll(WithArg<1>(SaveArgAddress(&request_callbacks2_)), Return(request_handle2_)));\n    decoder_callbacks_->onRespValue(std::move(request2));\n  }\n};\n\nTEST_F(RedisProxyFilterTestWithTwoCallbacks, OutOfOrderResponseWithDrainClose) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data)))\n      .WillOnce(Invoke(this, &RedisProxyFilterTestWithTwoCallbacks::decodeHelper));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n\n  EXPECT_EQ(2UL, config_->stats_.downstream_rq_total_.value());\n  EXPECT_EQ(2UL, config_->stats_.downstream_rq_active_.value());\n\n  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n  Common::Redis::RespValue* response2_ptr = response2.get();\n  request_callbacks2_->onResponse(std::move(response2));\n\n  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());\n  EXPECT_CALL(*encoder_, encode(Ref(*response1), _));\n  EXPECT_CALL(*encoder_, encode(Ref(*response2_ptr), _));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n  EXPECT_CALL(drain_decision_, drainClose()).WillOnce(Return(true));\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"redis.drain_close_enabled\", 100))\n      .WillOnce(Return(true));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  request_callbacks1_->onResponse(std::move(response1));\n\n  EXPECT_EQ(1UL, config_->stats_.downstream_cx_drain_close_.value());\n}\n\nTEST_F(RedisProxyFilterTestWithTwoCallbacks, OutOfOrderResponseDownstreamDisconnectBeforeFlush) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data)))\n      .WillOnce(Invoke(this, &RedisProxyFilterTestWithTwoCallbacks::decodeHelper));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n\n  EXPECT_EQ(2UL, config_->stats_.downstream_rq_total_.value());\n  EXPECT_EQ(2UL, config_->stats_.downstream_rq_active_.value());\n\n  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());\n  request_callbacks2_->onResponse(std::move(response2));\n  EXPECT_CALL(*request_handle1_, cancel());\n\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(RedisProxyFilterTest, DownstreamDisconnectWithActive) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  CommandSplitter::MockSplitRequest* request_handle1 = new CommandSplitter::MockSplitRequest();\n  CommandSplitter::SplitCallbacks* request_callbacks1;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    Common::Redis::RespValuePtr request1(new Common::Redis::RespValue());\n    EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _, _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1)));\n    decoder_callbacks_->onRespValue(std::move(request1));\n  }));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n\n  EXPECT_CALL(*request_handle1, cancel());\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(RedisProxyFilterTest, ImmediateResponse) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request1(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request1));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            Common::Redis::RespValuePtr error(new Common::Redis::RespValue());\n            error->type(Common::Redis::RespType::Error);\n            error->asString() = \"no healthy upstream\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*error)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onResponse(std::move(error));\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n  filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(RedisProxyFilterTest, ProtocolError) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    throw Common::Redis::ProtocolError(\"error\");\n  }));\n\n  Common::Redis::RespValue error;\n  error.type(Common::Redis::RespType::Error);\n  error.asString() = \"downstream protocol error\";\n  EXPECT_CALL(*encoder_, encode(Eq(ByRef(error)), _));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(fake_data, false));\n\n  EXPECT_EQ(1UL, store_.counter(\"redis.foo.downstream_cx_protocol_error\").value());\n}\n\nTEST_F(RedisProxyFilterTest, AuthWhenNotRequired) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            EXPECT_TRUE(callbacks.connectionAllowed());\n            Common::Redis::RespValuePtr error(new Common::Redis::RespValue());\n            error->type(Common::Redis::RespType::Error);\n            error->asString() = \"ERR Client sent AUTH, but no password is set\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*error)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onAuth(\"foo\");\n            // callbacks cannot be accessed now.\n            EXPECT_TRUE(filter_->connectionAllowed());\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n}\n\nTEST_F(RedisProxyFilterTest, AuthAclWhenNotRequired) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            EXPECT_TRUE(callbacks.connectionAllowed());\n            Common::Redis::RespValuePtr error(new Common::Redis::RespValue());\n            error->type(Common::Redis::RespType::Error);\n            error->asString() = \"ERR Client sent AUTH, but no username-password pair is set\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*error)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onAuth(\"foo\", \"bar\");\n            // callbacks cannot be accessed now.\n            EXPECT_TRUE(filter_->connectionAllowed());\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n}\n\nconst std::string downstream_auth_password_config = R\"EOF(\nprefix_routes:\n  catch_all_route:\n      cluster: fake_cluster\nstat_prefix: foo\nsettings:\n  op_timeout: 0.01s\ndownstream_auth_password:\n  inline_string: somepassword\n)EOF\";\n\nclass RedisProxyFilterWithAuthPasswordTest : public RedisProxyFilterTest {\npublic:\n  RedisProxyFilterWithAuthPasswordTest() : RedisProxyFilterTest(downstream_auth_password_config) {}\n};\n\nTEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordCorrect) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            EXPECT_FALSE(callbacks.connectionAllowed());\n            Common::Redis::RespValuePtr reply(new Common::Redis::RespValue());\n            reply->type(Common::Redis::RespType::SimpleString);\n            reply->asString() = \"OK\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onAuth(\"somepassword\");\n            // callbacks cannot be accessed now.\n            EXPECT_TRUE(filter_->connectionAllowed());\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n}\n\nTEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordIncorrect) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            EXPECT_FALSE(callbacks.connectionAllowed());\n            Common::Redis::RespValuePtr reply(new Common::Redis::RespValue());\n            reply->type(Common::Redis::RespType::Error);\n            reply->asString() = \"ERR invalid password\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onAuth(\"wrongpassword\");\n            // callbacks cannot be accessed now.\n            EXPECT_FALSE(filter_->connectionAllowed());\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n}\n\nconst std::string downstream_auth_acl_config = R\"EOF(\nprefix_routes:\n  catch_all_route:\n      cluster: fake_cluster\nstat_prefix: foo\nsettings:\n  op_timeout: 0.01s\ndownstream_auth_username:\n  inline_string: someusername\ndownstream_auth_password:\n  inline_string: somepassword\n)EOF\";\n\nclass RedisProxyFilterWithAuthAclTest : public RedisProxyFilterTest {\npublic:\n  RedisProxyFilterWithAuthAclTest() : RedisProxyFilterTest(downstream_auth_acl_config) {}\n};\n\nTEST_F(RedisProxyFilterWithAuthAclTest, AuthAclCorrect) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            EXPECT_FALSE(callbacks.connectionAllowed());\n            Common::Redis::RespValuePtr reply(new Common::Redis::RespValue());\n            reply->type(Common::Redis::RespType::SimpleString);\n            reply->asString() = \"OK\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onAuth(\"someusername\", \"somepassword\");\n            // callbacks cannot be accessed now.\n            EXPECT_TRUE(filter_->connectionAllowed());\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n}\n\nTEST_F(RedisProxyFilterWithAuthAclTest, AuthAclUsernameIncorrect) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            EXPECT_FALSE(callbacks.connectionAllowed());\n            Common::Redis::RespValuePtr reply(new Common::Redis::RespValue());\n            reply->type(Common::Redis::RespType::Error);\n            reply->asString() = \"WRONGPASS invalid username-password pair\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onAuth(\"wrongusername\", \"somepassword\");\n            // callbacks cannot be accessed now.\n            EXPECT_FALSE(filter_->connectionAllowed());\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n}\n\nTEST_F(RedisProxyFilterWithAuthAclTest, AuthAclPasswordIncorrect) {\n  InSequence s;\n\n  Buffer::OwnedImpl fake_data;\n  Common::Redis::RespValuePtr request(new Common::Redis::RespValue());\n  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {\n    decoder_callbacks_->onRespValue(std::move(request));\n  }));\n  EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _, _))\n      .WillOnce(\n          Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks,\n                     Event::Dispatcher&) -> CommandSplitter::SplitRequest* {\n            EXPECT_FALSE(callbacks.connectionAllowed());\n            Common::Redis::RespValuePtr reply(new Common::Redis::RespValue());\n            reply->type(Common::Redis::RespType::Error);\n            reply->asString() = \"WRONGPASS invalid username-password pair\";\n            EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _));\n            EXPECT_CALL(filter_callbacks_.connection_, write(_, _));\n            callbacks.onAuth(\"someusername\", \"wrongpassword\");\n            // callbacks cannot be accessed now.\n            EXPECT_FALSE(filter_->connectionAllowed());\n            return nullptr;\n          }));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false));\n}\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc",
    "content": "#include <sstream>\n#include <vector>\n\n#include \"common/common/fmt.h\"\n\n#include \"extensions/filters/network/common/redis/fault_impl.h\"\n#include \"extensions/filters/network/redis_proxy/command_splitter_impl.h\"\n\n#include \"test/integration/integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace RedisCmdSplitter = Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitter;\n\nnamespace Envoy {\nnamespace {\n\n// This is a basic redis_proxy configuration with 2 endpoints/hosts\n// in the cluster. The load balancing policy must be set\n// to random for proper test operation.\n\nconst std::string CONFIG = fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\nstatic_resources:\n  clusters:\n    - name: cluster_0\n      type: STATIC\n      lb_policy: RANDOM\n      load_assignment:\n        cluster_name: cluster_0\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n    filter_chains:\n      filters:\n        name: redis\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy\n          stat_prefix: redis_stats\n          prefix_routes:\n            catch_all_route:\n              cluster: cluster_0\n          settings:\n            op_timeout: 5s\n)EOF\",\n                                       Platform::null_device_path);\n\n// This is a configuration with command stats enabled.\nconst std::string CONFIG_WITH_COMMAND_STATS = CONFIG + R\"EOF(\n            enable_command_stats: true\n)EOF\";\n\n// This is a configuration with moved/ask redirection support enabled.\nconst std::string CONFIG_WITH_REDIRECTION = CONFIG + R\"EOF(\n            enable_redirection: true\n)EOF\";\n\n// This is a configuration with batching enabled.\nconst std::string CONFIG_WITH_BATCHING = CONFIG + R\"EOF(\n            max_buffer_size_before_flush: 1024 \n            buffer_flush_timeout: 0.003s \n)EOF\";\n\nconst std::string CONFIG_WITH_ROUTES_BASE = fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\nstatic_resources:\n  clusters:\n    - name: cluster_0\n      type: STATIC\n      lb_policy: RANDOM\n      load_assignment:\n        cluster_name: cluster_0\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n    - name: cluster_1\n      type: STATIC\n      lb_policy: RANDOM\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n    - name: cluster_2\n      type: STATIC\n      lb_policy: RANDOM\n      load_assignment:\n        cluster_name: cluster_2\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n    filter_chains:\n      filters:\n        name: redis\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy\n          stat_prefix: redis_stats\n          settings:\n            op_timeout: 5s\n)EOF\",\n                                                        Platform::null_device_path);\n\nconst std::string CONFIG_WITH_ROUTES = CONFIG_WITH_ROUTES_BASE + R\"EOF(\n          prefix_routes:\n            catch_all_route:\n              cluster: cluster_0\n            routes:\n            - prefix: \"foo:\"\n              cluster: cluster_1\n            - prefix: \"baz:\"\n              cluster: cluster_2\n)EOF\";\n\nconst std::string CONFIG_WITH_MIRROR = CONFIG_WITH_ROUTES_BASE + R\"EOF(\n          prefix_routes:\n            catch_all_route:\n              cluster: cluster_0\n              request_mirror_policy:\n              - cluster: cluster_1\n              - cluster: cluster_2\n            routes:\n            - prefix: \"write_only:\"\n              cluster: cluster_0\n              request_mirror_policy:\n              - cluster: cluster_1\n                exclude_read_commands: true\n            - prefix: \"percentage:\"\n              cluster: cluster_0\n              request_mirror_policy:\n              - cluster: cluster_1\n                runtime_fraction:\n                  default_value:\n                    numerator: 50\n                    denominator: HUNDRED\n                  runtime_key: \"bogus_key\"\n)EOF\";\n\nconst std::string CONFIG_WITH_DOWNSTREAM_AUTH_PASSWORD_SET = CONFIG + R\"EOF(\n          downstream_auth_password: { inline_string: somepassword }\n)EOF\";\n\nconst std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\nstatic_resources:\n  clusters:\n    - name: cluster_0\n      type: STATIC\n      typed_extension_protocol_options:\n        envoy.filters.network.redis_proxy:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions\n          auth_password: {{ inline_string: cluster_0_password }}\n      lb_policy: RANDOM\n      load_assignment:\n        cluster_name: cluster_0\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n    - name: cluster_1\n      type: STATIC\n      lb_policy: RANDOM\n      typed_extension_protocol_options:\n        envoy.filters.network.redis_proxy:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions\n          auth_password: {{ inline_string: cluster_1_password }}\n      load_assignment:\n        cluster_name: cluster_1\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n    - name: cluster_2\n      type: STATIC\n      typed_extension_protocol_options:\n        envoy.filters.network.redis_proxy:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions\n          auth_password: {{ inline_string: cluster_2_password }}\n      lb_policy: RANDOM\n      load_assignment:\n        cluster_name: cluster_2\n        endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: 127.0.0.1\n                    port_value: 0\n  listeners:\n    name: listener_0\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n    filter_chains:\n      filters:\n        name: redis\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy\n          stat_prefix: redis_stats\n          settings:\n            op_timeout: 5s\n          prefix_routes:\n            catch_all_route:\n              cluster: cluster_0\n            routes:\n            - prefix: \"foo:\"\n              cluster: cluster_1\n            - prefix: \"baz:\"\n              cluster: cluster_2\n)EOF\",\n                                                                      Platform::null_device_path);\n\n// This is a configuration with fault injection enabled.\nconst std::string CONFIG_WITH_FAULT_INJECTION = CONFIG + R\"EOF(\n          faults:\n          - fault_type: ERROR\n            fault_enabled:\n              default_value:\n                numerator: 100\n                denominator: HUNDRED\n            commands:\n            - GET\n          - fault_type: DELAY\n            fault_enabled:\n              default_value:\n                numerator: 20\n                denominator: HUNDRED\n              runtime_key: \"bogus_key\"\n            delay: 2s\n            commands:\n            - SET\n)EOF\";\n\n// This function encodes commands as an array of bulkstrings as transmitted by Redis clients to\n// Redis servers, according to the Redis protocol.\nstd::string makeBulkStringArray(std::vector<std::string>&& command_strings) {\n  std::stringstream result;\n\n  result << \"*\" << command_strings.size() << \"\\r\\n\";\n  for (auto& command_string : command_strings) {\n    result << \"$\" << command_string.size() << \"\\r\\n\";\n    result << command_string << \"\\r\\n\";\n  }\n\n  return result.str();\n}\n\nclass RedisProxyIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                  public BaseIntegrationTest {\npublic:\n  RedisProxyIntegrationTest(const std::string& config = CONFIG, int num_upstreams = 2)\n      : BaseIntegrationTest(GetParam(), config), num_upstreams_(num_upstreams),\n        version_(GetParam()) {}\n\n  // This method encodes a fake upstream's IP address and TCP port in the\n  // same format as one would expect from a Redis server in\n  // an ask/moved redirection error.\n\n  std::string redisAddressAndPort(FakeUpstreamPtr& upstream) {\n    std::stringstream result;\n    if (version_ == Network::Address::IpVersion::v4) {\n      result << \"127.0.0.1\"\n             << \":\";\n    } else {\n      result << \"::1\"\n             << \":\";\n    }\n    result << upstream->localAddress()->ip()->port();\n    return result.str();\n  }\n\n  void initialize() override;\n\n  /**\n   * Simple bi-directional test between a fake Redis client and Redis server.\n   * @param request supplies Redis client data to transmit to the Redis server.\n   * @param response supplies Redis server data to transmit to the client.\n   */\n  void simpleRequestAndResponse(const std::string& request, const std::string& response) {\n    return simpleRoundtripToUpstream(fake_upstreams_[0], request, response);\n  }\n\n  /**\n   * Simple bi-direction test between a fake redis client and a specific redis server.\n   * @param upstream a handle to the server that will respond to the request.\n   * @param request supplies Redis client data to transmit to the Redis server.\n   * @param response supplies Redis server data to transmit to the client.\n   */\n  void simpleRoundtripToUpstream(FakeUpstreamPtr& upstream, const std::string& request,\n                                 const std::string& response);\n\n  /**\n   * Simple bi-directional test between a fake Redis client and proxy server.\n   * @param request supplies Redis client data to transmit to the proxy.\n   * @param proxy_response supplies proxy data in response to the client's request.\n   */\n  void simpleProxyResponse(const std::string& request, const std::string& proxy_response);\n\n  /**\n   * A single step of a larger test involving a fake Redis client and the proxy server.\n   * @param request supplies Redis client data to transmit to the proxy.\n   * @param proxy_response supplies proxy data in response to the client's request.\n   * @param redis_client a handle to the fake redis client that sends the request.\n   */\n  void proxyResponseStep(const std::string& request, const std::string& proxy_response,\n                         IntegrationTcpClientPtr& redis_client);\n\n  /**\n   * A single step of a larger test involving a fake Redis client and a specific Redis server.\n   * @param upstream a handle to the server that will respond to the request.\n   * @param request supplies Redis client data to transmit to the Redis server.\n   * @param response supplies Redis server data to transmit to the client.\n   * @param redis_client a handle to the fake redis client that sends the request.\n   * @param fake_upstream_connection supplies a handle to connection from the proxy to the fake\n   * server.\n   * @param auth_username supplies the fake upstream's server username, if not an empty string.\n   * @param auth_password supplies the fake upstream's server password, if not an empty string.\n   */\n  void roundtripToUpstreamStep(FakeUpstreamPtr& upstream, const std::string& request,\n                               const std::string& response, IntegrationTcpClientPtr& redis_client,\n                               FakeRawConnectionPtr& fake_upstream_connection,\n                               const std::string& auth_username, const std::string& auth_password);\n  /**\n   * A upstream server expects the request on the upstream and respond with the response.\n   * @param upstream a handle to the server that will respond to the request.\n   * @param request supplies request data sent to the Redis server.\n   * @param response supplies Redis server response data to transmit to the client.\n   * @param fake_upstream_connection supplies a handle to connection from the proxy to the fake\n   * server.\n   * @param auth_username supplies the fake upstream's server username, if not an empty string.\n   * @param auth_password supplies the fake upstream's server password, if not an empty string.\n   */\n  void expectUpstreamRequestResponse(FakeUpstreamPtr& upstream, const std::string& request,\n                                     const std::string& response,\n                                     FakeRawConnectionPtr& fake_upstream_connection,\n                                     const std::string& auth_username = \"\",\n                                     const std::string& auth_password = \"\");\n\nprotected:\n  const int num_upstreams_;\n  const Network::Address::IpVersion version_;\n  Runtime::MockLoader* runtime_{};\n};\n\nclass RedisProxyWithRedirectionIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithRedirectionIntegrationTest()\n      : RedisProxyIntegrationTest(CONFIG_WITH_REDIRECTION, 2) {}\n\n  /**\n   * Simple bi-directional test with a fake Redis client and 2 fake Redis servers.\n   * @param target_server a handle to the second server that will respond to the request.\n   * @param request supplies client data to transmit to the first upstream server.\n   * @param redirection_response supplies the moved or ask redirection error from the first server.\n   * @param response supplies data sent by the second server back to the fake Redis client.\n   * @param asking_response supplies the target_server's response to an \"asking\" command, if\n   * appropriate.\n   */\n  void simpleRedirection(FakeUpstreamPtr& target_server, const std::string& request,\n                         const std::string& redirection_response, const std::string& response,\n                         const std::string& asking_response = \"+OK\\r\\n\");\n};\n\nclass RedisProxyWithBatchingIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithBatchingIntegrationTest() : RedisProxyIntegrationTest(CONFIG_WITH_BATCHING, 2) {}\n};\n\nclass RedisProxyWithRoutesIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithRoutesIntegrationTest() : RedisProxyIntegrationTest(CONFIG_WITH_ROUTES, 6) {}\n};\n\nclass RedisProxyWithDownstreamAuthIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithDownstreamAuthIntegrationTest()\n      : RedisProxyIntegrationTest(CONFIG_WITH_DOWNSTREAM_AUTH_PASSWORD_SET, 2) {}\n};\n\nclass RedisProxyWithRoutesAndAuthPasswordsIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithRoutesAndAuthPasswordsIntegrationTest()\n      : RedisProxyIntegrationTest(CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS, 3) {}\n};\n\nclass RedisProxyWithMirrorsIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithMirrorsIntegrationTest() : RedisProxyIntegrationTest(CONFIG_WITH_MIRROR, 6) {}\n};\n\nclass RedisProxyWithCommandStatsIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithCommandStatsIntegrationTest()\n      : RedisProxyIntegrationTest(CONFIG_WITH_COMMAND_STATS, 2) {}\n};\n\nclass RedisProxyWithFaultInjectionIntegrationTest : public RedisProxyIntegrationTest {\npublic:\n  RedisProxyWithFaultInjectionIntegrationTest()\n      : RedisProxyIntegrationTest(CONFIG_WITH_FAULT_INJECTION, 2) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithRedirectionIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithBatchingIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithRoutesIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithDownstreamAuthIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithRoutesAndAuthPasswordsIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithMirrorsIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithCommandStatsIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithFaultInjectionIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nvoid RedisProxyIntegrationTest::initialize() {\n  setUpstreamCount(num_upstreams_);\n  setDeterministic();\n  config_helper_.renameListener(\"redis_proxy\");\n  BaseIntegrationTest::initialize();\n}\n\nvoid RedisProxyIntegrationTest::roundtripToUpstreamStep(\n    FakeUpstreamPtr& upstream, const std::string& request, const std::string& response,\n    IntegrationTcpClientPtr& redis_client, FakeRawConnectionPtr& fake_upstream_connection,\n    const std::string& auth_username, const std::string& auth_password) {\n  redis_client->clearData();\n  ASSERT_TRUE(redis_client->write(request));\n\n  expectUpstreamRequestResponse(upstream, request, response, fake_upstream_connection,\n                                auth_username, auth_password);\n\n  redis_client->waitForData(response);\n  // The original response should be received by the fake Redis client.\n  EXPECT_EQ(response, redis_client->data());\n}\n\nvoid RedisProxyIntegrationTest::expectUpstreamRequestResponse(\n    FakeUpstreamPtr& upstream, const std::string& request, const std::string& response,\n    FakeRawConnectionPtr& fake_upstream_connection, const std::string& auth_username,\n    const std::string& auth_password) {\n  std::string proxy_to_server;\n  bool expect_auth_command = false;\n  std::string ok = \"+OK\\r\\n\";\n\n  if (fake_upstream_connection.get() == nullptr) {\n    expect_auth_command = (!auth_password.empty());\n    EXPECT_TRUE(upstream->waitForRawConnection(fake_upstream_connection));\n  }\n  if (expect_auth_command) {\n    std::string auth_command = (auth_username.empty())\n                                   ? makeBulkStringArray({\"auth\", auth_password})\n                                   : makeBulkStringArray({\"auth\", auth_username, auth_password});\n    EXPECT_TRUE(fake_upstream_connection->waitForData(auth_command.size() + request.size(),\n                                                      &proxy_to_server));\n    // The original request should be the same as the data received by the server.\n    EXPECT_EQ(auth_command + request, proxy_to_server);\n    // Send back an OK for the auth command.\n    EXPECT_TRUE(fake_upstream_connection->write(ok));\n\n  } else {\n    EXPECT_TRUE(fake_upstream_connection->waitForData(request.size(), &proxy_to_server));\n    // The original request should be the same as the data received by the server.\n    EXPECT_EQ(request, proxy_to_server);\n  }\n\n  EXPECT_TRUE(fake_upstream_connection->write(response));\n}\n\nvoid RedisProxyIntegrationTest::simpleRoundtripToUpstream(FakeUpstreamPtr& upstream,\n                                                          const std::string& request,\n                                                          const std::string& response) {\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n\n  roundtripToUpstreamStep(upstream, request, response, redis_client, fake_upstream_connection, \"\",\n                          \"\");\n\n  EXPECT_TRUE(fake_upstream_connection->close());\n  redis_client->close();\n}\n\nvoid RedisProxyIntegrationTest::proxyResponseStep(const std::string& request,\n                                                  const std::string& proxy_response,\n                                                  IntegrationTcpClientPtr& redis_client) {\n  redis_client->clearData();\n  ASSERT_TRUE(redis_client->write(request));\n  redis_client->waitForData(proxy_response);\n  // After sending the request to the proxy, the fake redis client should receive proxy_response.\n  EXPECT_EQ(proxy_response, redis_client->data());\n}\n\nvoid RedisProxyIntegrationTest::simpleProxyResponse(const std::string& request,\n                                                    const std::string& proxy_response) {\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  proxyResponseStep(request, proxy_response, redis_client);\n  redis_client->close();\n}\n\nvoid RedisProxyWithRedirectionIntegrationTest::simpleRedirection(\n    FakeUpstreamPtr& target_server, const std::string& request,\n    const std::string& redirection_response, const std::string& response,\n    const std::string& asking_response) {\n\n  bool asking = (redirection_response.find(\"-ASK\") != std::string::npos);\n  std::string proxy_to_server;\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(request));\n\n  FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2;\n\n  // Data from the client should always be routed to fake_upstreams_[0] by the load balancer.\n  EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_1));\n  EXPECT_TRUE(fake_upstream_connection_1->waitForData(request.size(), &proxy_to_server));\n  // The data in request should be received by the first server, fake_upstreams_[0].\n  EXPECT_EQ(request, proxy_to_server);\n  proxy_to_server.clear();\n\n  // Send the redirection_response from the first fake Redis server back to the proxy.\n  EXPECT_TRUE(fake_upstream_connection_1->write(redirection_response));\n  // The proxy should initiate a new connection to the fake redis server, target_server, in\n  // response.\n  EXPECT_TRUE(target_server->waitForRawConnection(fake_upstream_connection_2));\n\n  if (asking) {\n    // The server, target_server, should receive an \"asking\" command before the original request.\n    std::string asking_request = makeBulkStringArray({\"asking\"});\n    EXPECT_TRUE(fake_upstream_connection_2->waitForData(asking_request.size() + request.size(),\n                                                        &proxy_to_server));\n    EXPECT_EQ(asking_request + request, proxy_to_server);\n    // Respond to the \"asking\" command.\n    EXPECT_TRUE(fake_upstream_connection_2->write(asking_response));\n  } else {\n    // The server, target_server, should receive request unchanged.\n    EXPECT_TRUE(fake_upstream_connection_2->waitForData(request.size(), &proxy_to_server));\n    EXPECT_EQ(request, proxy_to_server);\n  }\n\n  // Send response from the second fake Redis server, target_server, to the client.\n  EXPECT_TRUE(fake_upstream_connection_2->write(response));\n  redis_client->waitForData(response);\n  // The client should receive response unchanged.\n  EXPECT_EQ(response, redis_client->data());\n\n  EXPECT_TRUE(fake_upstream_connection_1->close());\n  EXPECT_TRUE(fake_upstream_connection_2->close());\n  redis_client->close();\n}\n\n// This test sends a simple \"get foo\" command from a fake\n// downstream client through the proxy to a fake upstream\n// Redis server. The fake server sends a valid response\n// back to the client. The request and response should\n// make it through the envoy proxy server code unchanged.\n\nTEST_P(RedisProxyIntegrationTest, SimpleRequestAndResponse) {\n  initialize();\n  simpleRequestAndResponse(makeBulkStringArray({\"get\", \"foo\"}), \"$3\\r\\nbar\\r\\n\");\n}\n\nTEST_P(RedisProxyWithCommandStatsIntegrationTest, MGETRequestAndResponse) {\n  initialize();\n  std::string request = makeBulkStringArray({\"mget\", \"foo\"});\n  std::string upstream_response = \"$3\\r\\nbar\\r\\n\";\n  std::string downstream_response =\n      \"*1\\r\\n\" + upstream_response; // Downstream response is array of length 1\n\n  // Make MGET request from downstream\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  redis_client->clearData();\n  ASSERT_TRUE(redis_client->write(request));\n\n  // Make GET request to upstream (MGET is turned into GETs for upstream)\n  FakeUpstreamPtr& upstream = fake_upstreams_[0];\n  FakeRawConnectionPtr fake_upstream_connection;\n  std::string auth_username = \"\";\n  std::string auth_password = \"\";\n  std::string upstream_request = makeBulkStringArray({\"get\", \"foo\"});\n  expectUpstreamRequestResponse(upstream, upstream_request, upstream_response,\n                                fake_upstream_connection, auth_username, auth_password);\n\n  // Downstream response for MGET\n  redis_client->waitForData(downstream_response);\n  EXPECT_EQ(downstream_response, redis_client->data());\n\n  // Cleanup\n  EXPECT_TRUE(fake_upstream_connection->close());\n  redis_client->close();\n}\n\n// This test sends an invalid Redis command from a fake\n// downstream client to the envoy proxy. Envoy will respond\n// with an invalid request error.\n\nTEST_P(RedisProxyIntegrationTest, InvalidRequest) {\n  std::stringstream error_response;\n  error_response << \"-\" << RedisCmdSplitter::Response::get().InvalidRequest << \"\\r\\n\";\n  initialize();\n  simpleProxyResponse(makeBulkStringArray({\"foo\"}), error_response.str());\n}\n\n// This test sends a simple Redis command to a fake upstream\n// Redis server. The server replies with a MOVED or ASK redirection\n// error, and that error is passed unchanged to the fake downstream\n// since redirection support has not been enabled (by default).\n\nTEST_P(RedisProxyIntegrationTest, RedirectWhenNotEnabled) {\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n  initialize();\n  if (version_ == Network::Address::IpVersion::v4) {\n    simpleRequestAndResponse(request, \"-MOVED 1111 127.0.0.1:34123\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 1111 127.0.0.1:34123\\r\\n\");\n  } else {\n    simpleRequestAndResponse(request, \"-MOVED 1111 ::1:34123\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 1111 ::1:34123\\r\\n\");\n  }\n}\n\n// This test sends an AUTH command from the fake downstream client to\n// the Envoy proxy. Envoy will respond with a no-password-set error since\n// no downstream_auth_password has been set for the filter.\n\nTEST_P(RedisProxyIntegrationTest, DownstreamAuthWhenNoPasswordSet) {\n  initialize();\n  simpleProxyResponse(makeBulkStringArray({\"auth\", \"somepassword\"}),\n                      \"-ERR Client sent AUTH, but no password is set\\r\\n\");\n}\n\n// This test sends a simple Redis command to a sequence of fake upstream\n// Redis servers. The first server replies with a MOVED or ASK redirection\n// error that specifies the second upstream server in the static configuration\n// as its target. The target server responds to a possibly transformed\n// request, and its response is received unchanged by the fake Redis client.\n\nTEST_P(RedisProxyWithRedirectionIntegrationTest, RedirectToKnownServer) {\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n  initialize();\n  std::stringstream redirection_error;\n  redirection_error << \"-MOVED 1111 \" << redisAddressAndPort(fake_upstreams_[1]) << \"\\r\\n\";\n  simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), \"$3\\r\\nbar\\r\\n\");\n\n  redirection_error.str(\"\");\n  redirection_error << \"-ASK 1111 \" << redisAddressAndPort(fake_upstreams_[1]) << \"\\r\\n\";\n  simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), \"$3\\r\\nbar\\r\\n\");\n}\n\n// This test sends a simple Redis commands to a sequence of fake upstream\n// Redis servers. The first server replies with a MOVED or ASK redirection\n// error that specifies an unknown upstream server not in its static configuration\n// as its target. The target server responds to a possibly transformed\n// request, and its response is received unchanged by the fake Redis client.\n\nTEST_P(RedisProxyWithRedirectionIntegrationTest, RedirectToUnknownServer) {\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n  initialize();\n\n  auto endpoint =\n      Network::Utility::parseInternetAddress(Network::Test::getAnyAddressString(version_), 0);\n  FakeUpstreamPtr target_server{createFakeUpstream(endpoint, upstreamProtocol())};\n\n  std::stringstream redirection_error;\n  redirection_error << \"-MOVED 1111 \" << redisAddressAndPort(target_server) << \"\\r\\n\";\n  simpleRedirection(target_server, request, redirection_error.str(), \"$3\\r\\nbar\\r\\n\");\n\n  redirection_error.str(\"\");\n  redirection_error << \"-ASK 1111 \" << redisAddressAndPort(target_server) << \"\\r\\n\";\n  simpleRedirection(target_server, request, redirection_error.str(), \"$3\\r\\nbar\\r\\n\");\n}\n\n// This test verifies that various forms of bad MOVED/ASK redirection errors\n// from a fake Redis server are not acted upon, and are passed unchanged\n// to the fake Redis client.\n\nTEST_P(RedisProxyWithRedirectionIntegrationTest, BadRedirectStrings) {\n  initialize();\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n\n  // Test with truncated moved errors.\n  simpleRequestAndResponse(request, \"-MOVED 1111\\r\\n\");\n  simpleRequestAndResponse(request, \"-MOVED\\r\\n\");\n  // Test with truncated ask errors.\n  simpleRequestAndResponse(request, \"-ASK 1111\\r\\n\");\n  simpleRequestAndResponse(request, \"-ASK\\r\\n\");\n  // Test with a badly specified IP address and TCP port field.\n  simpleRequestAndResponse(request, \"-MOVED 2222 badfield\\r\\n\");\n  simpleRequestAndResponse(request, \"-ASK 2222 badfield\\r\\n\");\n  // Test with a bad IP address specification.\n  if (version_ == Network::Address::IpVersion::v4) {\n    simpleRequestAndResponse(request, \"-MOVED 2222 127.0:3333\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 127.0:3333\\r\\n\");\n  } else {\n    simpleRequestAndResponse(request, \"-MOVED 2222 ::11111:3333\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 ::11111:3333\\r\\n\");\n  }\n  // Test with a bad IP address specification (not numeric).\n  if (version_ == Network::Address::IpVersion::v4) {\n    simpleRequestAndResponse(request, \"-MOVED 2222 badaddress:3333\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 badaddress:3333\\r\\n\");\n  } else {\n    simpleRequestAndResponse(request, \"-MOVED 2222 badaddress:3333\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 badaddress:3333\\r\\n\");\n  }\n  // Test with a bad TCP port specification (out of range).\n  if (version_ == Network::Address::IpVersion::v4) {\n    simpleRequestAndResponse(request, \"-MOVED 2222 127.0.0.1:100000\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 127.0.0.1:100000\\r\\n\");\n  } else {\n    simpleRequestAndResponse(request, \"-MOVED 2222 ::1:1000000\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 ::1:1000000\\r\\n\");\n  }\n  // Test with a bad TCP port specification (not numeric).\n  if (version_ == Network::Address::IpVersion::v4) {\n    simpleRequestAndResponse(request, \"-MOVED 2222 127.0.0.1:badport\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 127.0.0.1:badport\\r\\n\");\n  } else {\n    simpleRequestAndResponse(request, \"-MOVED 2222 ::1:badport\\r\\n\");\n    simpleRequestAndResponse(request, \"-ASK 2222 ::1:badport\\r\\n\");\n  }\n}\n\n// This test verifies that an upstream connection failure during ask redirection processing is\n// handled correctly. In this case the \"asking\" command and original client request have been sent\n// to the target server, and then the connection is closed. The fake Redis client should receive an\n// upstream failure error in response to its request.\n\nTEST_P(RedisProxyWithRedirectionIntegrationTest, ConnectionFailureBeforeAskingResponse) {\n  initialize();\n\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n  std::stringstream redirection_error;\n  redirection_error << \"-ASK 1111 \" << redisAddressAndPort(fake_upstreams_[1]) << \"\\r\\n\";\n\n  std::string proxy_to_server;\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(request));\n\n  FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2;\n\n  // Data from the client should always be routed to fake_upstreams_[0] by the load balancer.\n  EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_1));\n  EXPECT_TRUE(fake_upstream_connection_1->waitForData(request.size(), &proxy_to_server));\n  // The data in request should be received by the first server, fake_upstreams_[0].\n  EXPECT_EQ(request, proxy_to_server);\n  proxy_to_server.clear();\n\n  // Send the redirection_response from the first fake Redis server back to the proxy.\n  EXPECT_TRUE(fake_upstream_connection_1->write(redirection_error.str()));\n  // The proxy should initiate a new connection to the fake redis server, target_server, in\n  // response.\n  EXPECT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_2));\n\n  // The server, fake_upstreams_[1], should receive an \"asking\" command before the original request.\n  std::string asking_request = makeBulkStringArray({\"asking\"});\n  EXPECT_TRUE(fake_upstream_connection_2->waitForData(asking_request.size() + request.size(),\n                                                      &proxy_to_server));\n  EXPECT_EQ(asking_request + request, proxy_to_server);\n  // Close the upstream connection before responding to the \"asking\" command.\n  EXPECT_TRUE(fake_upstream_connection_2->close());\n\n  // The fake Redis client should receive an upstream failure error from the proxy.\n  std::stringstream error_response;\n  error_response << \"-\" << RedisCmdSplitter::Response::get().UpstreamFailure << \"\\r\\n\";\n  redis_client->waitForData(error_response.str());\n  EXPECT_EQ(error_response.str(), redis_client->data());\n\n  EXPECT_TRUE(fake_upstream_connection_1->close());\n  redis_client->close();\n}\n\n// This test verifies that a ASK redirection error as a response to an \"asking\" command is ignored.\n// This is a negative test scenario that should never happen since a Redis server will reply to an\n// \"asking\" command with either a \"cluster support not enabled\" error or \"OK\".\n\nTEST_P(RedisProxyWithRedirectionIntegrationTest, IgnoreRedirectionForAsking) {\n  initialize();\n  std::string request = makeBulkStringArray({\"get\", \"foo\"});\n  std::stringstream redirection_error, asking_response;\n  redirection_error << \"-ASK 1111 \" << redisAddressAndPort(fake_upstreams_[1]) << \"\\r\\n\";\n  asking_response << \"-ASK 1111 \" << redisAddressAndPort(fake_upstreams_[0]) << \"\\r\\n\";\n  simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), \"$3\\r\\nbar\\r\\n\",\n                    asking_response.str());\n}\n\n// This test verifies that batching works properly. If batching is enabled, when multiple\n// clients make a request to a Redis server within a certain time window, they will be batched\n// together. The below example, two clients send \"GET foo\", and Redis receives those two as\n// a single concatenated request.\n\nTEST_P(RedisProxyWithBatchingIntegrationTest, SimpleBatching) {\n  initialize();\n\n  const std::string& request = makeBulkStringArray({\"get\", \"foo\"});\n  const std::string& response = \"$3\\r\\nbar\\r\\n\";\n\n  std::string proxy_to_server;\n  IntegrationTcpClientPtr redis_client_1 = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  IntegrationTcpClientPtr redis_client_2 = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client_1->write(request));\n  ASSERT_TRUE(redis_client_2->write(request));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  EXPECT_TRUE(fake_upstream_connection->waitForData(request.size() * 2, &proxy_to_server));\n  // The original request should be the same as the data received by the server.\n  EXPECT_EQ(request + request, proxy_to_server);\n\n  EXPECT_TRUE(fake_upstream_connection->write(response + response));\n  redis_client_1->waitForData(response);\n  redis_client_2->waitForData(response);\n  // The original response should be received by the fake Redis client.\n  EXPECT_EQ(response, redis_client_1->data());\n  EXPECT_EQ(response, redis_client_2->data());\n\n  redis_client_1->close();\n  EXPECT_TRUE(fake_upstream_connection->close());\n  redis_client_2->close();\n  EXPECT_TRUE(fake_upstream_connection->close());\n}\n\n// This test verifies that it's possible to route keys to 3 different upstream pools.\n\nTEST_P(RedisProxyWithRoutesIntegrationTest, SimpleRequestAndResponseRoutedByPrefix) {\n  initialize();\n\n  // roundtrip to cluster_0 (catch_all route)\n  simpleRoundtripToUpstream(fake_upstreams_[0], makeBulkStringArray({\"get\", \"toto\"}),\n                            \"$3\\r\\nbar\\r\\n\");\n\n  // roundtrip to cluster_1 (prefix \"foo:\" route)\n  simpleRoundtripToUpstream(fake_upstreams_[2], makeBulkStringArray({\"get\", \"foo:123\"}),\n                            \"$3\\r\\nbar\\r\\n\");\n\n  // roundtrip to cluster_2 (prefix \"baz:\" route)\n  simpleRoundtripToUpstream(fake_upstreams_[4], makeBulkStringArray({\"get\", \"baz:123\"}),\n                            \"$3\\r\\nbar\\r\\n\");\n}\n\n// This test verifies that a client connection cannot issue a command to an upstream\n// server until it supplies a valid Redis AUTH command when downstream_auth_password\n// is set for the redis_proxy filter. It also verifies the errors sent by the proxy\n// when no password or the wrong password is received.\n\nTEST_P(RedisProxyWithDownstreamAuthIntegrationTest, ErrorsUntilCorrectPasswordSent) {\n  initialize();\n\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n\n  proxyResponseStep(makeBulkStringArray({\"get\", \"foo\"}), \"-NOAUTH Authentication required.\\r\\n\",\n                    redis_client);\n\n  std::stringstream error_response;\n  error_response << \"-\" << RedisCmdSplitter::Response::get().InvalidRequest << \"\\r\\n\";\n  proxyResponseStep(makeBulkStringArray({\"auth\"}), error_response.str(), redis_client);\n\n  proxyResponseStep(makeBulkStringArray({\"auth\", \"wrongpassword\"}), \"-ERR invalid password\\r\\n\",\n                    redis_client);\n\n  proxyResponseStep(makeBulkStringArray({\"get\", \"foo\"}), \"-NOAUTH Authentication required.\\r\\n\",\n                    redis_client);\n\n  proxyResponseStep(makeBulkStringArray({\"auth\", \"somepassword\"}), \"+OK\\r\\n\", redis_client);\n\n  roundtripToUpstreamStep(fake_upstreams_[0], makeBulkStringArray({\"get\", \"foo\"}), \"$3\\r\\nbar\\r\\n\",\n                          redis_client, fake_upstream_connection, \"\", \"\");\n\n  EXPECT_TRUE(fake_upstream_connection->close());\n  redis_client->close();\n}\n\n// This test verifies that upstream server connections are transparently authenticated if an\n// auth_password is specified for each cluster.\n\nTEST_P(RedisProxyWithRoutesAndAuthPasswordsIntegrationTest, TransparentAuthentication) {\n  initialize();\n\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  std::array<FakeRawConnectionPtr, 3> fake_upstream_connection;\n\n  // roundtrip to cluster_0 (catch_all route)\n  roundtripToUpstreamStep(fake_upstreams_[0], makeBulkStringArray({\"get\", \"toto\"}), \"$3\\r\\nbar\\r\\n\",\n                          redis_client, fake_upstream_connection[0], \"\", \"cluster_0_password\");\n\n  // roundtrip to cluster_1 (prefix \"foo:\" route)\n  roundtripToUpstreamStep(fake_upstreams_[1], makeBulkStringArray({\"get\", \"foo:123\"}),\n                          \"$3\\r\\nbar\\r\\n\", redis_client, fake_upstream_connection[1], \"\",\n                          \"cluster_1_password\");\n\n  // roundtrip to cluster_2 (prefix \"baz:\" route)\n  roundtripToUpstreamStep(fake_upstreams_[2], makeBulkStringArray({\"get\", \"baz:123\"}),\n                          \"$3\\r\\nbar\\r\\n\", redis_client, fake_upstream_connection[2], \"\",\n                          \"cluster_2_password\");\n\n  EXPECT_TRUE(fake_upstream_connection[0]->close());\n  EXPECT_TRUE(fake_upstream_connection[1]->close());\n  EXPECT_TRUE(fake_upstream_connection[2]->close());\n  redis_client->close();\n}\n\nTEST_P(RedisProxyWithMirrorsIntegrationTest, MirroredCatchAllRequest) {\n  initialize();\n\n  std::array<FakeRawConnectionPtr, 3> fake_upstream_connection;\n  const std::string& request = makeBulkStringArray({\"get\", \"toto\"});\n  const std::string& response = \"$3\\r\\nbar\\r\\n\";\n  // roundtrip to cluster_0 (catch_all route)\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(request));\n\n  expectUpstreamRequestResponse(fake_upstreams_[0], request, response, fake_upstream_connection[0]);\n\n  // mirror to cluster_1 and cluster_2\n  expectUpstreamRequestResponse(fake_upstreams_[2], request, \"$4\\r\\nbar1\\r\\n\",\n                                fake_upstream_connection[1]);\n  expectUpstreamRequestResponse(fake_upstreams_[4], request, \"$4\\r\\nbar2\\r\\n\",\n                                fake_upstream_connection[2]);\n\n  redis_client->waitForData(response);\n  // The original response from the cluster_0 should be received by the fake Redis client and the\n  // response from mirrored requests are ignored.\n  EXPECT_EQ(response, redis_client->data());\n\n  EXPECT_TRUE(fake_upstream_connection[0]->close());\n  EXPECT_TRUE(fake_upstream_connection[1]->close());\n  EXPECT_TRUE(fake_upstream_connection[2]->close());\n  redis_client->close();\n}\n\nTEST_P(RedisProxyWithMirrorsIntegrationTest, MirroredWriteOnlyRequest) {\n  initialize();\n\n  std::array<FakeRawConnectionPtr, 2> fake_upstream_connection;\n  const std::string& set_request = makeBulkStringArray({\"set\", \"write_only:toto\", \"bar\"});\n  const std::string& set_response = \":1\\r\\n\";\n\n  // roundtrip to cluster_0 (write_only route)\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(set_request));\n\n  expectUpstreamRequestResponse(fake_upstreams_[0], set_request, set_response,\n                                fake_upstream_connection[0]);\n\n  // mirror to cluster_1\n  expectUpstreamRequestResponse(fake_upstreams_[2], set_request, \":2\\r\\n\",\n                                fake_upstream_connection[1]);\n\n  // The original response from the cluster_1 should be received by the fake Redis client\n  redis_client->waitForData(set_response);\n  EXPECT_EQ(set_response, redis_client->data());\n\n  EXPECT_TRUE(fake_upstream_connection[0]->close());\n  EXPECT_TRUE(fake_upstream_connection[1]->close());\n  redis_client->close();\n}\n\nTEST_P(RedisProxyWithMirrorsIntegrationTest, ExcludeReadCommands) {\n  initialize();\n\n  FakeRawConnectionPtr cluster_0_connection;\n  const std::string& get_request = makeBulkStringArray({\"get\", \"write_only:toto\"});\n  const std::string& get_response = \"$3\\r\\nbar\\r\\n\";\n\n  // roundtrip to cluster_0 (write_only route)\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(get_request));\n\n  expectUpstreamRequestResponse(fake_upstreams_[0], get_request, get_response,\n                                cluster_0_connection);\n\n  // command is not mirrored to cluster 1\n  FakeRawConnectionPtr cluster_1_connection;\n  EXPECT_FALSE(fake_upstreams_[2]->waitForRawConnection(cluster_1_connection,\n                                                        std::chrono::milliseconds(500)));\n\n  redis_client->waitForData(get_response);\n  EXPECT_EQ(get_response, redis_client->data());\n\n  EXPECT_TRUE(cluster_0_connection->close());\n  redis_client->close();\n}\n\nTEST_P(RedisProxyWithMirrorsIntegrationTest, EnabledViaRuntimeFraction) {\n  initialize();\n\n  std::array<FakeRawConnectionPtr, 2> fake_upstream_connection;\n  // When random_value is < 50, the percentage:* will be mirrored, random() default is 0\n  const std::string& request = makeBulkStringArray({\"get\", \"percentage:toto\"});\n  const std::string& response = \"$3\\r\\nbar\\r\\n\";\n  // roundtrip to cluster_0 (catch_all route)\n  IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort(\"redis_proxy\"));\n  ASSERT_TRUE(redis_client->write(request));\n\n  expectUpstreamRequestResponse(fake_upstreams_[0], request, response, fake_upstream_connection[0]);\n\n  // mirror to cluster_1\n  expectUpstreamRequestResponse(fake_upstreams_[2], request, \"$4\\r\\nbar1\\r\\n\",\n                                fake_upstream_connection[1]);\n\n  redis_client->waitForData(response);\n  // The original response from the cluster_0 should be received by the fake Redis client and the\n  // response from mirrored requests are ignored.\n  EXPECT_EQ(response, redis_client->data());\n\n  EXPECT_TRUE(fake_upstream_connection[0]->close());\n  EXPECT_TRUE(fake_upstream_connection[1]->close());\n  redis_client->close();\n}\n\nTEST_P(RedisProxyWithFaultInjectionIntegrationTest, ErrorFault) {\n  std::string fault_response =\n      fmt::format(\"-{}\\r\\n\", Extensions::NetworkFilters::Common::Redis::FaultMessages::get().Error);\n  initialize();\n  simpleProxyResponse(makeBulkStringArray({\"get\", \"foo\"}), fault_response);\n\n  EXPECT_EQ(1, test_server_->counter(\"redis.redis_stats.command.get.error\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"redis.redis_stats.command.get.error_fault\")->value());\n}\n\nTEST_P(RedisProxyWithFaultInjectionIntegrationTest, DelayFault) {\n  const std::string& set_request = makeBulkStringArray({\"set\", \"write_only:toto\", \"bar\"});\n  const std::string& set_response = \":1\\r\\n\";\n  initialize();\n  simpleRequestAndResponse(set_request, set_response);\n\n  EXPECT_EQ(1, test_server_->counter(\"redis.redis_stats.command.set.success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"redis.redis_stats.command.set.delay_fault\")->value());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/redis_proxy/router_impl_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"extensions/filters/network/redis_proxy/conn_pool_impl.h\"\n#include \"extensions/filters/network/redis_proxy/router_impl.h\"\n\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/redis_proxy/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::Eq;\nusing testing::Matcher;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RedisProxy {\n\nenvoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes\ncreatePrefixRoutes() {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes prefix_routes;\n  auto* routes = prefix_routes.mutable_routes();\n\n  {\n    auto* route = routes->Add();\n    route->set_prefix(\"ab\");\n    route->set_cluster(\"fake_clusterA\");\n  }\n\n  {\n    auto* route = routes->Add();\n    route->set_prefix(\"a\");\n    route->set_cluster(\"fake_clusterB\");\n  }\n\n  return prefix_routes;\n}\n\nTEST(PrefixRoutesTest, MissingCatchAll) {\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", std::make_shared<ConnPool::MockInstance>());\n  upstreams.emplace(\"fake_clusterB\", std::make_shared<ConnPool::MockInstance>());\n\n  Runtime::MockLoader runtime_;\n\n  PrefixRoutes router(createPrefixRoutes(), std::move(upstreams), runtime_);\n\n  std::string key(\"c:bar\");\n  EXPECT_EQ(nullptr, router.upstreamPool(key));\n}\n\nTEST(PrefixRoutesTest, RoutedToCatchAll) {\n  auto upstream_c = std::make_shared<ConnPool::MockInstance>();\n\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", std::make_shared<ConnPool::MockInstance>());\n  upstreams.emplace(\"fake_clusterB\", std::make_shared<ConnPool::MockInstance>());\n  upstreams.emplace(\"fake_clusterC\", upstream_c);\n\n  Runtime::MockLoader runtime_;\n\n  auto prefix_routes = createPrefixRoutes();\n  prefix_routes.mutable_catch_all_route()->set_cluster(\"fake_clusterC\");\n\n  PrefixRoutes router(prefix_routes, std::move(upstreams), runtime_);\n\n  std::string key(\"c:bar\");\n  EXPECT_EQ(upstream_c, router.upstreamPool(key)->upstream());\n}\n\nTEST(PrefixRoutesTest, RoutedToLongestPrefix) {\n  auto upstream_a = std::make_shared<ConnPool::MockInstance>();\n\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", upstream_a);\n  upstreams.emplace(\"fake_clusterB\", std::make_shared<ConnPool::MockInstance>());\n\n  Runtime::MockLoader runtime_;\n\n  PrefixRoutes router(createPrefixRoutes(), std::move(upstreams), runtime_);\n\n  std::string key(\"ab:bar\");\n  EXPECT_EQ(upstream_a, router.upstreamPool(key)->upstream());\n}\n\nTEST(PrefixRoutesTest, CaseUnsensitivePrefix) {\n  auto upstream_a = std::make_shared<ConnPool::MockInstance>();\n\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", upstream_a);\n  upstreams.emplace(\"fake_clusterB\", std::make_shared<ConnPool::MockInstance>());\n\n  Runtime::MockLoader runtime_;\n\n  auto prefix_routes = createPrefixRoutes();\n  prefix_routes.set_case_insensitive(true);\n\n  PrefixRoutes router(prefix_routes, std::move(upstreams), runtime_);\n\n  std::string key(\"AB:bar\");\n  EXPECT_EQ(upstream_a, router.upstreamPool(key)->upstream());\n}\n\nTEST(PrefixRoutesTest, RemovePrefix) {\n  auto upstream_a = std::make_shared<ConnPool::MockInstance>();\n\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", upstream_a);\n  upstreams.emplace(\"fake_clusterB\", std::make_shared<ConnPool::MockInstance>());\n\n  Runtime::MockLoader runtime_;\n\n  auto prefix_routes = createPrefixRoutes();\n\n  {\n    auto* route = prefix_routes.mutable_routes()->Add();\n    route->set_prefix(\"abc\");\n    route->set_cluster(\"fake_clusterA\");\n    route->set_remove_prefix(true);\n  }\n\n  PrefixRoutes router(prefix_routes, std::move(upstreams), runtime_);\n\n  std::string key(\"abc:bar\");\n  EXPECT_EQ(upstream_a, router.upstreamPool(key)->upstream());\n  EXPECT_EQ(\":bar\", key);\n}\n\nTEST(PrefixRoutesTest, RoutedToShortestPrefix) {\n  auto upstream_b = std::make_shared<ConnPool::MockInstance>();\n\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", std::make_shared<ConnPool::MockInstance>());\n  upstreams.emplace(\"fake_clusterB\", upstream_b);\n\n  Runtime::MockLoader runtime_;\n\n  PrefixRoutes router(createPrefixRoutes(), std::move(upstreams), runtime_);\n\n  std::string key(\"a:bar\");\n  EXPECT_EQ(upstream_b, router.upstreamPool(key)->upstream());\n  EXPECT_EQ(\"a:bar\", key);\n}\n\nTEST(PrefixRoutesTest, DifferentPrefixesSameUpstream) {\n  auto upstream_b = std::make_shared<ConnPool::MockInstance>();\n\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", std::make_shared<ConnPool::MockInstance>());\n  upstreams.emplace(\"fake_clusterB\", upstream_b);\n\n  Runtime::MockLoader runtime_;\n\n  auto prefix_routes = createPrefixRoutes();\n\n  {\n    auto* route = prefix_routes.mutable_routes()->Add();\n    route->set_prefix(\"also_route_to_b\");\n    route->set_cluster(\"fake_clusterB\");\n  }\n\n  PrefixRoutes router(prefix_routes, std::move(upstreams), runtime_);\n\n  std::string key1(\"a:bar\");\n  EXPECT_EQ(upstream_b, router.upstreamPool(key1)->upstream());\n\n  std::string key2(\"also_route_to_b:bar\");\n  EXPECT_EQ(upstream_b, router.upstreamPool(key2)->upstream());\n}\n\nTEST(PrefixRoutesTest, DuplicatePrefix) {\n  Upstreams upstreams;\n  upstreams.emplace(\"fake_clusterA\", std::make_shared<ConnPool::MockInstance>());\n  upstreams.emplace(\"fake_clusterB\", std::make_shared<ConnPool::MockInstance>());\n  upstreams.emplace(\"this_will_throw\", std::make_shared<ConnPool::MockInstance>());\n\n  Runtime::MockLoader runtime_;\n\n  auto prefix_routes = createPrefixRoutes();\n\n  {\n    auto* route = prefix_routes.mutable_routes()->Add();\n    route->set_prefix(\"ab\");\n    route->set_cluster(\"this_will_throw\");\n  }\n\n  EXPECT_THROW_WITH_MESSAGE(PrefixRoutes router(prefix_routes, std::move(upstreams), runtime_),\n                            EnvoyException, \"prefix `ab` already exists.\")\n}\n\nTEST(MirrorPolicyImplTest, ShouldMirrorDefault) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route::\n      RequestMirrorPolicy config;\n  auto upstream = std::make_shared<ConnPool::MockInstance>();\n  NiceMock<Runtime::MockLoader> runtime;\n\n  MirrorPolicyImpl policy(config, upstream, runtime);\n\n  EXPECT_EQ(true, policy.shouldMirror(\"get\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"set\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"GET\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"SET\"));\n}\n\nTEST(MirrorPolicyImplTest, MissingUpstream) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route::\n      RequestMirrorPolicy config;\n  NiceMock<Runtime::MockLoader> runtime;\n\n  MirrorPolicyImpl policy(config, nullptr, runtime);\n\n  EXPECT_EQ(false, policy.shouldMirror(\"get\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"set\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"GET\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"SET\"));\n}\n\nTEST(MirrorPolicyImplTest, ExcludeReadCommands) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route::\n      RequestMirrorPolicy config;\n  config.set_exclude_read_commands(true);\n  auto upstream = std::make_shared<ConnPool::MockInstance>();\n  NiceMock<Runtime::MockLoader> runtime;\n\n  MirrorPolicyImpl policy(config, upstream, runtime);\n\n  EXPECT_EQ(false, policy.shouldMirror(\"get\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"set\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"GET\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"SET\"));\n}\n\nTEST(MirrorPolicyImplTest, DefaultValueZero) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route::\n      RequestMirrorPolicy config;\n  auto* runtime_fraction = config.mutable_runtime_fraction();\n  auto* percentage = runtime_fraction->mutable_default_value();\n  percentage->set_numerator(0);\n  percentage->set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  auto upstream = std::make_shared<ConnPool::MockInstance>();\n  NiceMock<Runtime::MockLoader> runtime;\n\n  MirrorPolicyImpl policy(config, upstream, runtime);\n\n  EXPECT_EQ(false, policy.shouldMirror(\"get\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"set\"));\n}\n\nTEST(MirrorPolicyImplTest, DeterminedByRuntimeFraction) {\n  envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes::Route::\n      RequestMirrorPolicy config;\n  auto* runtime_fraction = config.mutable_runtime_fraction();\n  runtime_fraction->set_runtime_key(\"runtime_key\");\n  auto* percentage = runtime_fraction->mutable_default_value();\n  percentage->set_numerator(50);\n  percentage->set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  auto upstream = std::make_shared<ConnPool::MockInstance>();\n\n  NiceMock<Runtime::MockLoader> runtime;\n  MirrorPolicyImpl policy(config, upstream, runtime);\n\n  EXPECT_CALL(runtime.snapshot_,\n              featureEnabled(\"runtime_key\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(50))))\n      .Times(4)\n      .WillRepeatedly(Return(true));\n  EXPECT_EQ(true, policy.shouldMirror(\"get\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"set\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"GET\"));\n  EXPECT_EQ(true, policy.shouldMirror(\"SET\"));\n\n  EXPECT_CALL(runtime.snapshot_,\n              featureEnabled(\"runtime_key\",\n                             Matcher<const envoy::type::v3::FractionalPercent&>(Percent(50))))\n      .Times(4)\n      .WillRepeatedly(Return(false));\n  EXPECT_EQ(false, policy.shouldMirror(\"get\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"set\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"GET\"));\n  EXPECT_EQ(false, policy.shouldMirror(\"SET\"));\n}\n\n} // namespace RedisProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"mocks_lib\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/extensions/filters/network/rocketmq_proxy:config\",\n        \"//source/extensions/filters/network/rocketmq_proxy/router:router_lib\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    deps = [\n        \"//source/extensions/filters/network/rocketmq_proxy:config\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"protocol_test\",\n    srcs = [\"protocol_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/rocketmq_proxy:config\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"router_test\",\n    srcs = [\"router_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \":mocks_lib\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/rocketmq_proxy:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"topic_route_test\",\n    srcs = [\"topic_route_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/filters/network/rocketmq_proxy:config\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"conn_manager_test\",\n    srcs = [\"conn_manager_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \":utility_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"active_message_test\",\n    srcs = [\"active_message_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \":utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/filters/network/rocketmq_proxy:config\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/rocketmq_proxy:config\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"codec_test\",\n    srcs = [\"codec_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \":utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:registry_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"route_matcher_test\",\n    srcs = [\"route_matcher_test.cc\"],\n    extension_name = \"envoy.filters.network.rocketmq_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/rocketmq_proxy/router:route_matcher\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/active_message_test.cc",
    "content": "#include \"common/network/address_impl.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/active_message.h\"\n#include \"extensions/filters/network/rocketmq_proxy/config.h\"\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n#include \"extensions/filters/network/rocketmq_proxy/protocol.h\"\n#include \"extensions/filters/network/rocketmq_proxy/well_known_names.h\"\n\n#include \"test/extensions/filters/network/rocketmq_proxy/utility.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass ActiveMessageTest : public testing::Test {\npublic:\n  ActiveMessageTest()\n      : stats_(RocketmqFilterStats::generateStats(\"test.\", store_)),\n        config_(rocketmq_proxy_config_, factory_context_),\n        connection_manager_(config_, factory_context_.dispatcher().timeSource()) {\n    connection_manager_.initializeReadFilterCallbacks(filter_callbacks_);\n  }\n\n  ~ActiveMessageTest() override {\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n\nprotected:\n  ConfigImpl::RocketmqProxyConfig rocketmq_proxy_config_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Stats::IsolatedStoreImpl store_;\n  RocketmqFilterStats stats_;\n  ConfigImpl config_;\n  ConnectionManager connection_manager_;\n};\n\nTEST_F(ActiveMessageTest, ClusterName) {\n  std::string json = R\"EOF(\n  {\n    \"opaque\": 1,\n    \"code\": 35,\n    \"version\": 1,\n    \"language\": \"JAVA\",\n    \"serializeTypeCurrentRPC\": \"JSON\",\n    \"flag\": 0,\n    \"extFields\": {\n      \"clientID\": \"SampleClient_01\",\n      \"producerGroup\": \"PG_Example_01\",\n      \"consumerGroup\": \"CG_001\"\n    }\n  }\n  )EOF\";\n\n  Buffer::OwnedImpl buffer;\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  bool underflow = false;\n  bool has_error = false;\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n  EXPECT_FALSE(underflow);\n  EXPECT_FALSE(has_error);\n\n  ActiveMessage activeMessage(connection_manager_, std::move(cmd));\n  EXPECT_FALSE(activeMessage.metadata()->hasTopicName());\n}\n\nTEST_F(ActiveMessageTest, FillBrokerData) {\n\n  absl::node_hash_map<int64_t, std::string> address;\n  address.emplace(0, \"1.2.3.4:10911\");\n  BrokerData broker_data(\"DefaultCluster\", \"broker-a\", std::move(address));\n\n  std::vector<BrokerData> list;\n  list.push_back(broker_data);\n\n  ActiveMessage::fillBrokerData(list, \"DefaultCluster\", \"broker-a\", 1, \"localhost:10911\");\n  ActiveMessage::fillBrokerData(list, \"DefaultCluster\", \"broker-a\", 0, \"localhost:10911\");\n  EXPECT_EQ(1, list.size());\n  for (auto& it : list) {\n    auto& address = it.brokerAddresses();\n    EXPECT_EQ(2, address.size());\n    EXPECT_STREQ(\"1.2.3.4:10911\", address[0].c_str());\n  }\n}\n\nTEST_F(ActiveMessageTest, FillAckMessageDirectiveSuccess) {\n  RemotingCommandPtr cmd = std::make_unique<RemotingCommand>();\n  ActiveMessage active_message(connection_manager_, std::move(cmd));\n\n  Buffer::OwnedImpl buffer;\n  // frame length\n  buffer.writeBEInt<int32_t>(98);\n\n  // magic code\n  buffer.writeBEInt<int32_t>(enumToSignedInt(MessageVersion::V1));\n\n  // body CRC\n  buffer.writeBEInt<int32_t>(1);\n\n  // queue Id\n  buffer.writeBEInt<int32_t>(2);\n\n  // flag\n  buffer.writeBEInt<int32_t>(3);\n\n  // queue offset\n  buffer.writeBEInt<int64_t>(4);\n\n  // physical offset\n  buffer.writeBEInt<int64_t>(5);\n\n  // system flag\n  buffer.writeBEInt<int32_t>(6);\n\n  // born timestamp\n  buffer.writeBEInt<int64_t>(7);\n\n  // born host\n  buffer.writeBEInt<int32_t>(8);\n\n  // born host port\n  buffer.writeBEInt<int32_t>(9);\n\n  // store timestamp\n  buffer.writeBEInt<int64_t>(10);\n\n  // store host address ip:port --> long\n  Network::Address::Ipv4Instance host_address(\"127.0.0.1\", 10911);\n  const sockaddr_in* sock_addr = reinterpret_cast<const sockaddr_in*>(host_address.sockAddr());\n  buffer.writeBEInt<int32_t>(sock_addr->sin_addr.s_addr);\n  buffer.writeBEInt<int32_t>(sock_addr->sin_port);\n\n  // re-consume times\n  buffer.writeBEInt<int32_t>(11);\n\n  // transaction offset\n  buffer.writeBEInt<int64_t>(12);\n\n  // body size\n  buffer.writeBEInt<int32_t>(0);\n\n  const std::string topic = \"TopicTest\";\n\n  // topic length\n  buffer.writeBEInt<int8_t>(topic.length());\n\n  // topic data\n  buffer.add(topic);\n\n  AckMessageDirective directive(\"broker-a\", 0, connection_manager_.timeSource().monotonicTime());\n  const std::string group = \"Group\";\n  active_message.fillAckMessageDirective(buffer, group, topic, directive);\n\n  const std::string fake_topic = \"FakeTopic\";\n  active_message.fillAckMessageDirective(buffer, group, fake_topic, directive);\n\n  EXPECT_EQ(connection_manager_.getAckDirectiveTableForTest().size(), 1);\n}\n\nTEST_F(ActiveMessageTest, RecordPopRouteInfo) {\n  auto host_description = new NiceMock<Upstream::MockHostDescription>();\n\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>();\n  ProtobufWkt::Struct topic_route_data;\n  auto* fields = topic_route_data.mutable_fields();\n\n  std::string broker_name = \"broker-a\";\n  int32_t broker_id = 0;\n\n  (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4);\n  (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4);\n  (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue(\"DefaultCluster\");\n  (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue(broker_name);\n  (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(broker_id);\n  (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6);\n  metadata->mutable_filter_metadata()->insert(Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n      NetworkFilterNames::get().RocketmqProxy, topic_route_data));\n\n  EXPECT_CALL(*host_description, metadata()).WillRepeatedly(Return(metadata));\n\n  Upstream::HostDescriptionConstSharedPtr host_description_ptr(host_description);\n\n  Buffer::OwnedImpl buffer;\n  BufferUtility::fillRequestBuffer(buffer, RequestCode::PopMessage);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  RemotingCommandPtr cmd = Decoder::decode(buffer, underflow, has_error);\n  ActiveMessage active_message(connection_manager_, std::move(cmd));\n  active_message.recordPopRouteInfo(host_description_ptr);\n  auto custom_header = active_message.downstreamRequest()->typedCustomHeader<CommandCustomHeader>();\n  EXPECT_EQ(custom_header->targetBrokerName(), broker_name);\n  EXPECT_EQ(custom_header->targetBrokerId(), broker_id);\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/codec_test.cc",
    "content": "#include \"common/common/empty_string.h\"\n#include \"common/common/enum_to_int.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/codec.h\"\n\n#include \"test/extensions/filters/network/rocketmq_proxy/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass RocketmqCodecTest : public testing::Test {\npublic:\n  RocketmqCodecTest() = default;\n  ~RocketmqCodecTest() override = default;\n};\n\nTEST_F(RocketmqCodecTest, DecodeWithMinFrameSize) {\n  Buffer::OwnedImpl buffer;\n\n  buffer.add(std::string({'\\x00', '\\x00', '\\x01', '\\x8b'}));\n  buffer.add(std::string({'\\x00', '\\x00', '\\x01', '\\x76'}));\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_TRUE(underflow);\n  EXPECT_FALSE(has_error);\n  EXPECT_TRUE(nullptr == cmd);\n}\n\nTEST_F(RocketmqCodecTest, DecodeWithOverMaxFrameSizeData) {\n  Buffer::OwnedImpl buffer;\n\n  buffer.add(std::string({'\\x00', '\\x40', '\\x00', '\\x01'}));\n  buffer.add(std::string({'\\x00', '\\x20', '\\x00', '\\x00', '\\x00'}));\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(has_error);\n  EXPECT_TRUE(nullptr == cmd);\n}\n\nTEST_F(RocketmqCodecTest, DecodeUnsupportHeaderSerialization) {\n  Buffer::OwnedImpl buffer;\n  std::string header = \"random text suffices\";\n\n  buffer.writeBEInt<int32_t>(4 + 4 + header.size());\n  uint32_t mark = header.size();\n  mark |= (1u << 24u);\n  buffer.writeBEInt<uint32_t>(mark);\n  buffer.add(header);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(has_error);\n  EXPECT_TRUE(nullptr == cmd);\n}\n\nTEST_F(RocketmqCodecTest, DecodeInvalidJson) {\n  Buffer::OwnedImpl buffer;\n  // Invalid json string.\n  std::string invalid_json = R\"EOF({a: 3)EOF\";\n\n  buffer.writeBEInt<int32_t>(4 + 4 + invalid_json.size());\n  buffer.writeBEInt<int32_t>(invalid_json.size());\n  buffer.add(invalid_json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(has_error);\n  EXPECT_TRUE(cmd == nullptr);\n}\n\nTEST_F(RocketmqCodecTest, DecodeCodeMissing) {\n  Buffer::OwnedImpl buffer;\n  // Invalid json string.\n  std::string invalid_json = R\"EOF({\"a\": 3})EOF\";\n\n  buffer.writeBEInt<int32_t>(4 + 4 + invalid_json.size());\n  buffer.writeBEInt<int32_t>(invalid_json.size());\n  buffer.add(invalid_json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(has_error);\n  EXPECT_TRUE(cmd == nullptr);\n}\n\nTEST_F(RocketmqCodecTest, DecodeVersionMissing) {\n  Buffer::OwnedImpl buffer;\n  // Invalid json string.\n  std::string invalid_json = R\"EOF({\"code\": 3})EOF\";\n\n  buffer.writeBEInt<int32_t>(4 + 4 + invalid_json.size());\n  buffer.writeBEInt<int32_t>(invalid_json.size());\n  buffer.add(invalid_json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(has_error);\n  EXPECT_TRUE(cmd == nullptr);\n}\n\nTEST_F(RocketmqCodecTest, DecodeOpaqueMissing) {\n  Buffer::OwnedImpl buffer;\n  // Invalid json string.\n  std::string invalid_json = R\"EOF(\n  {\n    \"code\": 3,\n    \"version\": 1\n  }\n  )EOF\";\n\n  buffer.writeBEInt<int32_t>(4 + 4 + invalid_json.size());\n  buffer.writeBEInt<int32_t>(invalid_json.size());\n  buffer.add(invalid_json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(has_error);\n  EXPECT_TRUE(cmd == nullptr);\n}\n\nTEST_F(RocketmqCodecTest, DecodeFlagMissing) {\n  Buffer::OwnedImpl buffer;\n  // Invalid json string.\n  std::string invalid_json = R\"EOF(\n  {\n    \"code\": 3,\n    \"version\": 1,\n    \"opaque\": 1\n  }\n  )EOF\";\n\n  buffer.writeBEInt<int32_t>(4 + 4 + invalid_json.size());\n  buffer.writeBEInt<int32_t>(invalid_json.size());\n  buffer.add(invalid_json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(has_error);\n  EXPECT_TRUE(cmd == nullptr);\n}\n\nTEST_F(RocketmqCodecTest, DecodeRequestSendMessage) {\n  Buffer::OwnedImpl buffer;\n  BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessage);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow || has_error);\n  EXPECT_EQ(request->opaque(), BufferUtility::opaque_);\n  Buffer::Instance& body = request->body();\n  EXPECT_EQ(body.toString(), BufferUtility::msg_body_);\n\n  auto header = request->typedCustomHeader<SendMessageRequestHeader>();\n\n  EXPECT_EQ(header->topic(), BufferUtility::topic_name_);\n  EXPECT_EQ(header->version(), SendMessageRequestVersion::V1);\n  EXPECT_EQ(header->queueId(), -1);\n}\n\nTEST_F(RocketmqCodecTest, DecodeRequestSendMessageV2) {\n  Buffer::OwnedImpl buffer;\n\n  BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessageV2);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow || has_error);\n  EXPECT_EQ(request->opaque(), BufferUtility::opaque_);\n\n  Buffer::Instance& body = request->body();\n\n  EXPECT_EQ(body.toString(), BufferUtility::msg_body_);\n\n  auto header = request->typedCustomHeader<SendMessageRequestHeader>();\n\n  EXPECT_EQ(header->topic(), BufferUtility::topic_name_);\n  EXPECT_EQ(header->version(), SendMessageRequestVersion::V2);\n  EXPECT_EQ(header->queueId(), -1);\n}\n\nTEST_F(RocketmqCodecTest, DecodeRequestSendMessageV1) {\n  std::string json = R\"EOF(\n  {\n    \"code\": 10,\n    \"version\": 1,\n    \"opaque\": 1,\n    \"flag\": 0,\n    \"extFields\": {\n      \"batch\": false,\n      \"bornTimestamp\": 1575872212297,\n      \"defaultTopic\": \"TBW102\",\n      \"defaultTopicQueueNums\": 3,\n      \"flag\": 124,\n      \"producerGroup\": \"FooBarGroup\",\n      \"queueId\": 1,\n      \"reconsumeTimes\": 0,\n      \"sysFlag\": 0,\n      \"topic\": \"FooBar\",\n      \"unitMode\": false,\n      \"properties\": \"mock_properties\",\n      \"maxReconsumeTimes\": 32\n      }\n  }\n  )EOF\";\n  Buffer::OwnedImpl buffer;\n\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_FALSE(underflow);\n  EXPECT_FALSE(has_error);\n  EXPECT_TRUE(nullptr != cmd);\n  EXPECT_EQ(10, cmd->code());\n  EXPECT_EQ(1, cmd->version());\n  EXPECT_EQ(1, cmd->opaque());\n}\n\nTEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithSystemError) {\n  std::string json = R\"EOF(\n  {\n    \"code\": 1,\n    \"language\": \"JAVA\",\n    \"version\": 2,\n    \"opaque\": 1,\n    \"flag\": 1,\n    \"remark\": \"System error\",\n    \"serializeTypeCurrentRPC\": \"JSON\"\n  }\n  )EOF\";\n  Buffer::OwnedImpl buffer;\n\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd =\n      Decoder::decode(buffer, underflow, has_error, static_cast<int>(RequestCode::SendMessage));\n\n  EXPECT_FALSE(has_error);\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(nullptr != cmd);\n  EXPECT_STREQ(\"JAVA\", cmd->language().c_str());\n  EXPECT_STREQ(\"JSON\", cmd->serializeTypeCurrentRPC().c_str());\n  EXPECT_STREQ(\"System error\", cmd->remark().c_str());\n  EXPECT_TRUE(nullptr == cmd->customHeader());\n}\n\nTEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithSystemBusy) {\n  std::string json = R\"EOF(\n  {\n    \"code\": 2,\n    \"language\": \"JAVA\",\n    \"version\": 2,\n    \"opaque\": 1,\n    \"flag\": 1,\n    \"remark\": \"System busy\",\n    \"serializeTypeCurrentRPC\": \"JSON\"\n  }\n  )EOF\";\n  Buffer::OwnedImpl buffer;\n\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd =\n      Decoder::decode(buffer, underflow, has_error, static_cast<int>(RequestCode::SendMessage));\n\n  EXPECT_FALSE(has_error);\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(nullptr != cmd);\n  EXPECT_STREQ(\"JAVA\", cmd->language().c_str());\n  EXPECT_STREQ(\"JSON\", cmd->serializeTypeCurrentRPC().c_str());\n  EXPECT_STREQ(\"System busy\", cmd->remark().c_str());\n  EXPECT_TRUE(nullptr == cmd->customHeader());\n}\n\nTEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithCodeNotSupported) {\n  std::string json = R\"EOF(\n  {\n    \"code\": 3,\n    \"language\": \"JAVA\",\n    \"version\": 2,\n    \"opaque\": 1,\n    \"flag\": 1,\n    \"remark\": \"Code not supported\",\n    \"serializeTypeCurrentRPC\": \"JSON\"\n  }\n  )EOF\";\n  Buffer::OwnedImpl buffer;\n\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd =\n      Decoder::decode(buffer, underflow, has_error, static_cast<int>(RequestCode::SendMessage));\n\n  EXPECT_FALSE(has_error);\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(nullptr != cmd);\n  EXPECT_STREQ(\"JAVA\", cmd->language().c_str());\n  EXPECT_STREQ(\"JSON\", cmd->serializeTypeCurrentRPC().c_str());\n  EXPECT_STREQ(\"Code not supported\", cmd->remark().c_str());\n  EXPECT_TRUE(nullptr == cmd->customHeader());\n}\n\nTEST_F(RocketmqCodecTest, DecodeSendMessageResponseNormal) {\n  std::string json = R\"EOF(\n  {\n    \"code\": 0,\n    \"language\": \"JAVA\",\n    \"version\": 2,\n    \"opaque\": 1,\n    \"flag\": 1,\n    \"remark\": \"OK\",\n    \"serializeTypeCurrentRPC\": \"JSON\",\n    \"extFields\": {\n      \"msgId\": \"A001\",\n      \"queueId\": \"10\",\n      \"queueOffset\": \"2\",\n      \"transactionId\": \"\"\n    }\n  }\n  )EOF\";\n  Buffer::OwnedImpl buffer;\n\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd =\n      Decoder::decode(buffer, underflow, has_error, static_cast<int>(RequestCode::SendMessage));\n\n  EXPECT_FALSE(has_error);\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(nullptr != cmd);\n  EXPECT_STREQ(\"JAVA\", cmd->language().c_str());\n  EXPECT_STREQ(\"JSON\", cmd->serializeTypeCurrentRPC().c_str());\n  EXPECT_STREQ(\"OK\", cmd->remark().c_str());\n  EXPECT_TRUE(nullptr != cmd->customHeader());\n\n  auto extHeader = cmd->typedCustomHeader<SendMessageResponseHeader>();\n\n  EXPECT_STREQ(\"A001\", extHeader->msgId().c_str());\n  EXPECT_EQ(10, extHeader->queueId());\n  EXPECT_EQ(2, extHeader->queueOffset());\n}\n\nTEST_F(RocketmqCodecTest, DecodePopMessageResponseNormal) {\n  std::string json = R\"EOF(\n  {\n    \"code\": 0,\n    \"language\": \"JAVA\",\n    \"version\": 2,\n    \"opaque\": 1,\n    \"flag\": 1,\n    \"remark\": \"OK\",\n    \"serializeTypeCurrentRPC\": \"JSON\",\n    \"extFields\": {\n      \"popTime\": \"1234\",\n      \"invisibleTime\": \"10\",\n      \"reviveQid\": \"2\",\n      \"restNum\": \"10\",\n      \"startOffsetInfo\": \"3\",\n      \"msgOffsetInfo\": \"mock_msg_offset_info\",\n      \"orderCountInfo\": \"mock_order_count_info\"\n    }\n  }\n  )EOF\";\n  Buffer::OwnedImpl buffer;\n\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  bool underflow = false;\n  bool has_error = false;\n\n  auto cmd =\n      Decoder::decode(buffer, underflow, has_error, static_cast<int>(RequestCode::PopMessage));\n\n  EXPECT_FALSE(has_error);\n  EXPECT_FALSE(underflow);\n  EXPECT_TRUE(nullptr != cmd);\n  EXPECT_STREQ(\"JAVA\", cmd->language().c_str());\n  EXPECT_STREQ(\"JSON\", cmd->serializeTypeCurrentRPC().c_str());\n  EXPECT_STREQ(\"OK\", cmd->remark().c_str());\n  EXPECT_TRUE(nullptr != cmd->customHeader());\n\n  auto extHeader = cmd->typedCustomHeader<PopMessageResponseHeader>();\n\n  EXPECT_EQ(1234, extHeader->popTimeForTest());\n  EXPECT_EQ(10, extHeader->invisibleTime());\n  EXPECT_EQ(2, extHeader->reviveQid());\n  EXPECT_EQ(10, extHeader->restNum());\n  EXPECT_STREQ(\"3\", extHeader->startOffsetInfo().c_str());\n  EXPECT_STREQ(\"mock_msg_offset_info\", extHeader->msgOffsetInfo().c_str());\n  EXPECT_STREQ(\"mock_order_count_info\", extHeader->orderCountInfo().c_str());\n}\n\nTEST_F(RocketmqCodecTest, DecodeRequestSendMessageV2underflow) {\n  Buffer::OwnedImpl buffer;\n\n  buffer.add(std::string({'\\x00', '\\x00', '\\x01', '\\x8b'}));\n  buffer.add(std::string({'\\x00', '\\x00', '\\x01', '\\x76'}));\n\n  std::string header_json = R\"EOF(\n  {\n    \"code\": 310,\n    \"extFields\": {\n      \"a\": \"GID_LINGCHU_TEST_0\"\n  }\n  )EOF\";\n\n  buffer.add(header_json);\n  buffer.add(std::string{\"_Apache_RocketMQ_\"});\n\n  bool underflow = false;\n  bool has_error = false;\n\n  RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error);\n\n  EXPECT_EQ(underflow, true);\n  EXPECT_EQ(has_error, false);\n}\n\nTEST_F(RocketmqCodecTest, EncodeResponseSendMessageSuccess) {\n  const int version = 285;\n  const int opaque = 4;\n  const std::string msg_id = \"1E05789ABD1F18B4AAC2895B8BE60003\";\n\n  RemotingCommandPtr response =\n      std::make_unique<RemotingCommand>(static_cast<int>(ResponseCode::Success), version, opaque);\n\n  response->markAsResponse();\n\n  const int queue_id = 0;\n  const int queue_offset = 0;\n\n  std::unique_ptr<SendMessageResponseHeader> sendMessageResponseHeader =\n      std::make_unique<SendMessageResponseHeader>(msg_id, queue_id, queue_offset, EMPTY_STRING);\n  CommandCustomHeaderPtr extHeader(sendMessageResponseHeader.release());\n  response->customHeader(extHeader);\n\n  Buffer::OwnedImpl response_buffer;\n  Encoder::encode(response, response_buffer);\n\n  uint32_t frame_length = response_buffer.peekBEInt<uint32_t>();\n  uint32_t header_length =\n      response_buffer.peekBEInt<uint32_t>(Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE);\n\n  EXPECT_EQ(header_length + Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE, frame_length);\n\n  std::unique_ptr<char[]> header_data = std::make_unique<char[]>(header_length);\n  const uint32_t frame_header_content_offset =\n      Decoder::FRAME_LENGTH_FIELD_SIZE + Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE;\n  response_buffer.copyOut(frame_header_content_offset, header_length, header_data.get());\n  std::string header_json(header_data.get(), header_length);\n  ProtobufWkt::Struct doc;\n  MessageUtil::loadFromJson(header_json, doc);\n  const auto& members = doc.fields();\n\n  EXPECT_EQ(members.at(\"code\").number_value(), 0);\n  EXPECT_EQ(members.at(\"version\").number_value(), version);\n  EXPECT_EQ(members.at(\"opaque\").number_value(), opaque);\n\n  const auto& extFields = members.at(\"extFields\").struct_value().fields();\n\n  EXPECT_EQ(extFields.at(\"msgId\").string_value(), msg_id);\n  EXPECT_EQ(extFields.at(\"queueId\").number_value(), queue_id);\n  EXPECT_EQ(extFields.at(\"queueOffset\").number_value(), queue_offset);\n}\n\nTEST_F(RocketmqCodecTest, DecodeQueueIdWithIncompleteBuffer) {\n  Buffer::OwnedImpl buffer;\n  // incomplete buffer\n  buffer.add(std::string({'\\x00'}));\n\n  EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), -1);\n}\n\nTEST_F(RocketmqCodecTest, DecodeQueueIdSuccess) {\n  Buffer::OwnedImpl buffer;\n  // frame length\n  buffer.writeBEInt(16);\n\n  for (int i = 0; i < 3; i++) {\n    buffer.writeBEInt(i);\n  }\n  EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), 2);\n}\n\nTEST_F(RocketmqCodecTest, DecodeQueueIdFailure) {\n  Buffer::OwnedImpl buffer;\n  buffer.writeBEInt(128);\n\n  // Some random data, but incomplete frame\n  buffer.writeBEInt(12);\n\n  EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), -1);\n}\n\nTEST_F(RocketmqCodecTest, DecodeQueueOffsetSuccess) {\n  Buffer::OwnedImpl buffer;\n  // frame length\n  buffer.writeBEInt(28);\n\n  // frame data\n  for (int i = 0; i < 4; i++) {\n    buffer.writeBEInt(i);\n  }\n  // write queue offset which takes up 8 bytes\n  buffer.writeBEInt<int64_t>(4);\n\n  EXPECT_EQ(Decoder::decodeQueueOffset(buffer, 0), 4);\n}\n\nTEST_F(RocketmqCodecTest, DecodeQueueOffsetFailure) {\n  Buffer::OwnedImpl buffer;\n\n  // Define length of the frame as 128 bytes\n  buffer.writeBEInt(128);\n\n  // some random data, just make sure the frame is incomplete\n  for (int i = 0; i < 6; i++) {\n    buffer.writeBEInt<int32_t>(i);\n  }\n\n  EXPECT_EQ(Decoder::decodeQueueOffset(buffer, 0), -1);\n}\n\nTEST_F(RocketmqCodecTest, DecodeMsgIdSuccess) {\n  Buffer::OwnedImpl buffer;\n\n  // frame length\n  buffer.writeBEInt<int32_t>(64);\n\n  // magic code\n  buffer.writeBEInt<int32_t>(0);\n\n  // body CRC\n  buffer.writeBEInt<int32_t>(1);\n\n  // queue Id\n  buffer.writeBEInt<int32_t>(2);\n\n  // flag\n  buffer.writeBEInt<int32_t>(3);\n\n  // queue offset\n  buffer.writeBEInt<int64_t>(4);\n\n  // physical offset\n  buffer.writeBEInt<int64_t>(5);\n\n  // system flag\n  buffer.writeBEInt<int32_t>(6);\n\n  // born timestamp\n  buffer.writeBEInt<int64_t>(7);\n\n  // born host\n  buffer.writeBEInt<int32_t>(8);\n\n  // born host port\n  buffer.writeBEInt<int32_t>(9);\n\n  // store timestamp\n  buffer.writeBEInt<int64_t>(10);\n\n  // store host address ip:port --> long\n  Network::Address::Ipv4Instance host_address(\"127.0.0.1\", 10911);\n  const sockaddr_in* sock_addr = reinterpret_cast<const sockaddr_in*>(host_address.sockAddr());\n  buffer.writeBEInt<int32_t>(sock_addr->sin_addr.s_addr);\n  buffer.writeBEInt<int32_t>(sock_addr->sin_port);\n  EXPECT_EQ(Decoder::decodeMsgId(buffer, 0).empty(), false);\n}\n\nTEST_F(RocketmqCodecTest, DecodeMsgIdFailure) {\n  Buffer::OwnedImpl buffer;\n\n  // frame length\n  buffer.writeBEInt<int32_t>(101);\n\n  // magic code\n  buffer.writeBEInt<int32_t>(0);\n  EXPECT_EQ(Decoder::decodeMsgId(buffer, 0).empty(), true);\n}\n\nTEST_F(RocketmqCodecTest, DecodeTopicSuccessV1) {\n  Buffer::OwnedImpl buffer;\n\n  // frame length\n  buffer.writeBEInt<int32_t>(98);\n\n  // magic code\n  buffer.writeBEInt<int32_t>(enumToSignedInt(MessageVersion::V1));\n\n  // body CRC\n  buffer.writeBEInt<int32_t>(1);\n\n  // queue Id\n  buffer.writeBEInt<int32_t>(2);\n\n  // flag\n  buffer.writeBEInt<int32_t>(3);\n\n  // queue offset\n  buffer.writeBEInt<int64_t>(4);\n\n  // physical offset\n  buffer.writeBEInt<int64_t>(5);\n\n  // system flag\n  buffer.writeBEInt<int32_t>(6);\n\n  // born timestamp\n  buffer.writeBEInt<int64_t>(7);\n\n  // born host\n  buffer.writeBEInt<int32_t>(8);\n\n  // born host port\n  buffer.writeBEInt<int32_t>(9);\n\n  // store timestamp\n  buffer.writeBEInt<int64_t>(10);\n\n  // store host address ip:port --> long\n  Network::Address::Ipv4Instance host_address(\"127.0.0.1\", 10911);\n  const sockaddr_in* sock_addr = reinterpret_cast<const sockaddr_in*>(host_address.sockAddr());\n  buffer.writeBEInt<int32_t>(sock_addr->sin_addr.s_addr);\n  buffer.writeBEInt<int32_t>(sock_addr->sin_port);\n\n  // re-consume times\n  buffer.writeBEInt<int32_t>(11);\n\n  // transaction offset\n  buffer.writeBEInt<int64_t>(12);\n\n  // body size\n  buffer.writeBEInt<int32_t>(0);\n\n  const std::string topic = \"TopicTest\";\n\n  // topic length\n  buffer.writeBEInt<int8_t>(topic.length());\n\n  // topic data\n  buffer.add(topic);\n\n  EXPECT_STREQ(Decoder::decodeTopic(buffer, 0).c_str(), topic.c_str());\n}\n\nTEST_F(RocketmqCodecTest, DecodeTopicSuccessV2) {\n  Buffer::OwnedImpl buffer;\n\n  // frame length\n  buffer.writeBEInt<int32_t>(99);\n\n  // magic code\n  buffer.writeBEInt<int32_t>(enumToSignedInt(MessageVersion::V2));\n\n  // body CRC\n  buffer.writeBEInt<int32_t>(1);\n\n  // queue Id\n  buffer.writeBEInt<int32_t>(2);\n\n  // flag\n  buffer.writeBEInt<int32_t>(3);\n\n  // queue offset\n  buffer.writeBEInt<int64_t>(4);\n\n  // physical offset\n  buffer.writeBEInt<int64_t>(5);\n\n  // system flag\n  buffer.writeBEInt<int32_t>(6);\n\n  // born timestamp\n  buffer.writeBEInt<int64_t>(7);\n\n  // born host\n  buffer.writeBEInt<int32_t>(8);\n\n  // born host port\n  buffer.writeBEInt<int32_t>(9);\n\n  // store timestamp\n  buffer.writeBEInt<int64_t>(10);\n\n  // store host address ip:port --> long\n  Network::Address::Ipv4Instance host_address(\"127.0.0.1\", 10911);\n  const sockaddr_in* sock_addr = reinterpret_cast<const sockaddr_in*>(host_address.sockAddr());\n  buffer.writeBEInt<int32_t>(sock_addr->sin_addr.s_addr);\n  buffer.writeBEInt<int32_t>(sock_addr->sin_port);\n\n  // re-consume times\n  buffer.writeBEInt<int32_t>(11);\n\n  // transaction offset\n  buffer.writeBEInt<int64_t>(12);\n\n  // body size\n  buffer.writeBEInt<int32_t>(0);\n\n  const std::string topic = \"TopicTest\";\n\n  // topic length\n  buffer.writeBEInt<int16_t>(topic.length());\n\n  // topic data\n  buffer.add(topic);\n\n  EXPECT_STREQ(Decoder::decodeTopic(buffer, 0).c_str(), topic.c_str());\n}\n\nTEST_F(RocketmqCodecTest, DecodeTopicFailure) {\n  Buffer::OwnedImpl buffer;\n\n  // frame length\n  buffer.writeBEInt<int32_t>(64);\n\n  // magic code\n  buffer.writeBEInt<int32_t>(0);\n  EXPECT_EQ(Decoder::decodeTopic(buffer, 0).empty(), true);\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/config.h\"\n\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nusing RocketmqProxyProto = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy;\n\nRocketmqProxyProto parseRocketmqProxyFromV3Yaml(const std::string& yaml,\n                                                bool avoid_boosting = true) {\n  RocketmqProxyProto rocketmq_proxy;\n  TestUtility::loadFromYaml(yaml, rocketmq_proxy, false, avoid_boosting);\n  return rocketmq_proxy;\n}\n\nclass RocketmqFilterConfigTestBase {\npublic:\n  void testConfig(RocketmqProxyProto& config) {\n    Network::FilterFactoryCb cb;\n    EXPECT_NO_THROW({ cb = factory_.createFilterFactoryFromProto(config, context_); });\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, addReadFilter(_));\n    cb(connection);\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  RocketmqProxyFilterConfigFactory factory_;\n};\n\nclass RocketmqFilterConfigTest : public RocketmqFilterConfigTestBase, public testing::Test {\npublic:\n  ~RocketmqFilterConfigTest() override = default;\n};\n\nTEST_F(RocketmqFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(\n      RocketmqProxyFilterConfigFactory().createFilterFactoryFromProto(\n          envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy(), context),\n      ProtoValidationException);\n}\n\nTEST_F(RocketmqFilterConfigTest, ValidProtoConfiguration) {\n  envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy config{};\n  config.set_stat_prefix(\"my_stat_prefix\");\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RocketmqProxyFilterConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST_F(RocketmqFilterConfigTest, RocketmqProxyWithEmptyProto) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  RocketmqProxyFilterConfigFactory factory;\n  envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy config =\n      *dynamic_cast<envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy*>(\n          factory.createEmptyConfigProto().get());\n  config.set_stat_prefix(\"my_stat_prefix\");\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\nTEST_F(RocketmqFilterConfigTest, RocketmqProxyWithFullConfig) {\n  const std::string yaml = R\"EOF(\n    stat_prefix: rocketmq_incomming_stats\n    develop_mode: true\n    transient_object_life_span:\n      seconds: 30\n    )EOF\";\n  RocketmqProxyProto config = parseRocketmqProxyFromV3Yaml(yaml);\n  testConfig(config);\n}\n\nTEST_F(RocketmqFilterConfigTest, ProxyAddress) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  Server::Configuration::MockServerFactoryContext factory_context;\n  EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context));\n\n  LocalInfo::MockLocalInfo local_info;\n  EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n  std::shared_ptr<const Network::MockResolvedAddress> instance =\n      std::make_shared<Network::MockResolvedAddress>(\"logical\", \"physical\");\n  EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance));\n  EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip));\n\n  Network::MockIp* ip = new Network::MockIp();\n  EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip));\n\n  std::string address(\"1.2.3.4\");\n  EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address));\n  EXPECT_CALL(*ip, port()).WillRepeatedly(Return(1234));\n  ConfigImpl::RocketmqProxyConfig proxyConfig;\n  ConfigImpl configImpl(proxyConfig, context);\n\n  EXPECT_STREQ(\"1.2.3.4:1234\", configImpl.proxyAddress().c_str());\n  delete ip;\n}\n\nTEST_F(RocketmqFilterConfigTest, ProxyAddressWithDefaultPort) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  Server::Configuration::MockServerFactoryContext factory_context;\n  EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context));\n\n  LocalInfo::MockLocalInfo local_info;\n  EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n  std::shared_ptr<const Network::MockResolvedAddress> instance =\n      std::make_shared<Network::MockResolvedAddress>(\"logical\", \"physical\");\n  EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance));\n  EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip));\n\n  Network::MockIp* ip = new Network::MockIp();\n  EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip));\n\n  std::string address(\"1.2.3.4\");\n  EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address));\n  EXPECT_CALL(*ip, port()).WillRepeatedly(Return(0));\n  ConfigImpl::RocketmqProxyConfig proxyConfig;\n  ConfigImpl configImpl(proxyConfig, context);\n\n  EXPECT_STREQ(\"1.2.3.4:10000\", configImpl.proxyAddress().c_str());\n  delete ip;\n}\n\nTEST_F(RocketmqFilterConfigTest, ProxyAddressWithNonIpType) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  Server::Configuration::MockServerFactoryContext factory_context;\n  EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context));\n\n  LocalInfo::MockLocalInfo local_info;\n  EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n  std::shared_ptr<const Network::MockResolvedAddress> instance =\n      std::make_shared<Network::MockResolvedAddress>(\"logical\", \"physical\");\n  EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance));\n  EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Pipe));\n\n  Network::MockIp* ip = new Network::MockIp();\n  EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip));\n\n  std::string address(\"1.2.3.4\");\n  EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address));\n  EXPECT_CALL(*ip, port()).WillRepeatedly(Return(0));\n  ConfigImpl::RocketmqProxyConfig proxyConfig;\n  ConfigImpl configImpl(proxyConfig, context);\n\n  EXPECT_STREQ(\"physical\", configImpl.proxyAddress().c_str());\n  delete ip;\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc",
    "content": "#include \"envoy/network/connection.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/config.h\"\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n#include \"extensions/filters/network/rocketmq_proxy/well_known_names.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/extensions/filters/network/rocketmq_proxy/utility.h\"\n#include \"test/mocks/network/connection.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nusing ConfigRocketmqProxy = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy;\n\nclass TestConfigImpl : public ConfigImpl {\npublic:\n  TestConfigImpl(RocketmqProxyConfig config, Server::Configuration::MockFactoryContext& context,\n                 RocketmqFilterStats& stats)\n      : ConfigImpl(config, context), stats_(stats) {}\n\n  RocketmqFilterStats& stats() override { return stats_; }\n\nprivate:\n  RocketmqFilterStats stats_;\n};\n\nclass RocketmqConnectionManagerTest : public testing::Test {\npublic:\n  RocketmqConnectionManagerTest() : stats_(RocketmqFilterStats::generateStats(\"test.\", store_)) {}\n\n  ~RocketmqConnectionManagerTest() override {\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n\n  void initializeFilter() { initializeFilter(\"\"); }\n\n  void initializeFilter(const std::string& yaml) {\n    if (!yaml.empty()) {\n      TestUtility::loadFromYaml(yaml, proto_config_);\n      TestUtility::validate(proto_config_);\n    }\n    config_ = std::make_unique<TestConfigImpl>(proto_config_, factory_context_, stats_);\n    conn_manager_ =\n        std::make_unique<ConnectionManager>(*config_, factory_context_.dispatcher().timeSource());\n    conn_manager_->initializeReadFilterCallbacks(filter_callbacks_);\n    conn_manager_->onNewConnection();\n    current_ = factory_context_.dispatcher().timeSource().monotonicTime();\n  }\n\n  void initializeCluster() {\n    Upstream::HostVector hosts;\n    hosts.emplace_back(host_);\n    priority_set_.updateHosts(\n        1,\n        Upstream::HostSetImpl::partitionHosts(std::make_shared<Upstream::HostVector>(hosts),\n                                              Upstream::HostsPerLocalityImpl::empty()),\n        nullptr, hosts, {}, 100);\n    ON_CALL(thread_local_cluster_, prioritySet()).WillByDefault(ReturnRef(priority_set_));\n    EXPECT_CALL(factory_context_.cluster_manager_, get(_))\n        .WillRepeatedly(Return(&thread_local_cluster_));\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Stats::TestUtil::TestStore store_;\n  RocketmqFilterStats stats_;\n  ConfigRocketmqProxy proto_config_;\n\n  std::unique_ptr<TestConfigImpl> config_;\n\n  Buffer::OwnedImpl buffer_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  std::unique_ptr<ConnectionManager> conn_manager_;\n\n  Encoder encoder_;\n  Decoder decoder_;\n\n  MonotonicTime current_;\n\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_info_{\n      new NiceMock<Upstream::MockClusterInfo>()};\n  Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_info_, \"tcp://127.0.0.1:80\")};\n  Upstream::PrioritySetImpl priority_set_;\n  NiceMock<Upstream::MockThreadLocalCluster> thread_local_cluster_;\n};\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeat) {\n  initializeFilter();\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.heartbeat\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithDecodeError) {\n  initializeFilter();\n\n  std::string json = R\"EOF(\n  {\n    \"language\": \"JAVA\",\n    \"version\": 2,\n    \"opaque\": 1,\n    \"flag\": 1,\n    \"serializeTypeCurrentRPC\": \"JSON\"\n  }\n  )EOF\";\n\n  buffer_.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer_.writeBEInt<int32_t>(json.size());\n  buffer_.add(json);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_error\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithInvalidBodyJson) {\n  initializeFilter();\n\n  RemotingCommandPtr cmd = std::make_unique<RemotingCommand>();\n  cmd->code(static_cast<int>(RequestCode::HeartBeat));\n  std::string heartbeat_data = R\"EOF({\"clientID\": \"127})EOF\";\n  cmd->body().add(heartbeat_data);\n  encoder_.encode(cmd, buffer_);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithBodyJsonLackofClientId) {\n  initializeFilter();\n\n  RemotingCommandPtr cmd = std::make_unique<RemotingCommand>();\n  cmd->code(static_cast<int>(RequestCode::HeartBeat));\n  std::string heartbeat_data = R\"EOF(\n  {\n    \"consumerDataSet\": [{}]\n  }\n  )EOF\";\n  cmd->body().add(heartbeat_data);\n  encoder_.encode(cmd, buffer_);\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExists) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  std::vector<ConsumerGroupMember> group_members;\n  ConsumerGroupMember group_member(\"127.0.0.1@90330\", *conn_manager_);\n  group_member.setLastForTest(current_);\n  group_members.emplace_back(group_member);\n  group_members_map[\"test_cg\"] = group_members;\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.heartbeat\").value());\n  EXPECT_FALSE(group_member.expired());\n  EXPECT_FALSE(group_members_map.at(\"test_cg\").empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExistsButExpired) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  std::vector<ConsumerGroupMember> group_members;\n  ConsumerGroupMember group_member(\"127.0.0.2@90330\", *conn_manager_);\n  group_member.setLastForTest(current_ - std::chrono::seconds(31));\n  group_members.emplace_back(group_member);\n  group_members_map[\"test_cg\"] = group_members;\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.heartbeat\").value());\n  EXPECT_TRUE(group_member.expired());\n  EXPECT_TRUE(group_members_map.empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExistsButLackOfClientID) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  std::vector<ConsumerGroupMember> group_members;\n  ConsumerGroupMember group_member(\"127.0.0.2@90330\", *conn_manager_);\n  group_member.setLastForTest(current_);\n  group_members.emplace_back(group_member);\n  group_members_map[\"test_cg\"] = group_members;\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.heartbeat\").value());\n  EXPECT_FALSE(group_member.expired());\n  EXPECT_FALSE(group_members_map.at(\"test_cg\").empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithDownstreamConnecitonClosed) {\n  initializeFilter();\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat);\n  NiceMock<Network::MockConnection> connection;\n  EXPECT_CALL(connection, state()).Times(1).WillOnce(Invoke([&]() -> Network::Connection::State {\n    return Network::Connection::State::Closed;\n  }));\n  EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(Invoke([&]() -> Network::Connection& {\n    return connection;\n  }));\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.heartbeat\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithPurgeDirectiveTable) {\n  initializeFilter();\n\n  std::string broker_name = \"broker_name\";\n  int32_t broker_id = 0;\n  std::chrono::milliseconds delay_0(31 * 1000);\n  AckMessageDirective directive_0(broker_name, broker_id,\n                                  conn_manager_->timeSource().monotonicTime() - delay_0);\n  std::string directive_key_0 = \"key_0\";\n  conn_manager_->insertAckDirective(directive_key_0, directive_0);\n\n  std::chrono::milliseconds delay_1(29 * 1000);\n  AckMessageDirective directive_1(broker_name, broker_id,\n                                  conn_manager_->timeSource().monotonicTime() - delay_1);\n  std::string directive_key_1 = \"key_1\";\n  conn_manager_->insertAckDirective(directive_key_1, directive_1);\n\n  EXPECT_EQ(2, conn_manager_->getAckDirectiveTableForTest().size());\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.heartbeat\").value());\n\n  EXPECT_EQ(1, conn_manager_->getAckDirectiveTableForTest().size());\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnUnregisterClient) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.unregister\").value());\n  EXPECT_TRUE(group_members_map.empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnUnregisterClientWithGroupMembersMapExists) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  std::vector<ConsumerGroupMember> group_members;\n  ConsumerGroupMember group_member(\"test_client_id\", *conn_manager_);\n  group_member.setLastForTest(current_);\n  group_members.emplace_back(group_member);\n  group_members_map[\"test_cg\"] = group_members;\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.unregister\").value());\n  EXPECT_FALSE(group_member.expired());\n  EXPECT_TRUE(group_members_map.empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnUnregisterClientWithGroupMembersMapExistsButExpired) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  std::vector<ConsumerGroupMember> group_members;\n  ConsumerGroupMember group_member(\"127.0.0.2@90330\", *conn_manager_);\n  group_member.setLastForTest(current_ - std::chrono::seconds(31));\n  group_members.emplace_back(group_member);\n  group_members_map[\"test_cg\"] = group_members;\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.unregister\").value());\n  EXPECT_TRUE(group_member.expired());\n  EXPECT_TRUE(group_members_map.empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest,\n       OnUnregisterClientWithGroupMembersMapExistsButLackOfClientID) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  std::vector<ConsumerGroupMember> group_members;\n  ConsumerGroupMember group_member(\"127.0.0.2@90330\", *conn_manager_);\n  group_member.setLastForTest(current_);\n  group_members.emplace_back(group_member);\n  group_members_map[\"test_cg\"] = group_members;\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.unregister\").value());\n  EXPECT_FALSE(group_member.expired());\n  EXPECT_FALSE(group_members_map.empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnGetTopicRoute) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  initializeFilter(yaml);\n\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>();\n  ProtobufWkt::Struct topic_route_data;\n  auto* fields = topic_route_data.mutable_fields();\n  (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4);\n  (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4);\n  (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue(\"DefaultCluster\");\n  (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue(\"broker-a\");\n  (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(0);\n  (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6);\n  metadata->mutable_filter_metadata()->insert(Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n      NetworkFilterNames::get().RocketmqProxy, topic_route_data));\n  host_->metadata(metadata);\n  initializeCluster();\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.get_topic_route\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteWithoutRoutes) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_another_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  initializeFilter(yaml);\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.get_topic_route\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteWithoutCluster) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  initializeFilter(yaml);\n\n  EXPECT_CALL(factory_context_.cluster_manager_, get(_)).WillRepeatedly(Return(nullptr));\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.get_topic_route\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteInDevelopMode) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\ndevelop_mode: true\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_factory_context;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Network::MockIp> ip;\n  std::shared_ptr<const Network::MockResolvedAddress> instance =\n      std::make_shared<Network::MockResolvedAddress>(\"logical\", \"physical\");\n  EXPECT_CALL(factory_context_, getServerFactoryContext())\n      .WillRepeatedly(ReturnRef(server_factory_context));\n  EXPECT_CALL(server_factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info));\n  EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance));\n  EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip));\n  EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(&ip));\n  const std::string address{\"1.2.3.4\"};\n  EXPECT_CALL(ip, addressAsString()).WillRepeatedly(ReturnRef(address));\n  EXPECT_CALL(ip, port()).WillRepeatedly(Return(1234));\n  initializeFilter(yaml);\n\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>();\n  ProtobufWkt::Struct topic_route_data;\n  auto* fields = topic_route_data.mutable_fields();\n  (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4);\n  (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4);\n  (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue(\"DefaultCluster\");\n  (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue(\"broker-a\");\n  (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(0);\n  (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6);\n  metadata->mutable_filter_metadata()->insert(Protobuf::MapPair<std::string, ProtobufWkt::Struct>(\n      NetworkFilterNames::get().RocketmqProxy, topic_route_data));\n  host_->metadata(metadata);\n  initializeCluster();\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.get_topic_route\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnGetConsumerListByGroup) {\n  initializeFilter();\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetConsumerListByGroup);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.get_consumer_list\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnGetConsumerListByGroupWithGroupMemberMapExists) {\n  initializeFilter();\n\n  auto& group_members_map = conn_manager_->groupMembersForTest();\n  std::vector<ConsumerGroupMember> group_members;\n  ConsumerGroupMember group_member(\"127.0.0.2@90330\", *conn_manager_);\n  group_member.setLastForTest(current_ - std::chrono::seconds(31));\n  group_members.emplace_back(group_member);\n  group_members_map[\"test_cg\"] = group_members;\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetConsumerListByGroup);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.get_consumer_list\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnPopMessage) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  initializeFilter(yaml);\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::PopMessage);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.pop_message\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnAckMessage) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  initializeFilter(yaml);\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::AckMessage);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.ack_message\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnData) {\n  initializeFilter();\n\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0, buffer_.length());\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnDataWithEndStream) {\n  initializeFilter();\n\n  Buffer::OwnedImpl buffer;\n  BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessageV2);\n  bool underflow, has_error;\n  RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error);\n  conn_manager_->createActiveMessage(request);\n  EXPECT_EQ(1, conn_manager_->activeMessageList().size());\n  conn_manager_->onData(buffer_, true);\n  EXPECT_TRUE(conn_manager_->activeMessageList().empty());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnDataWithMinFrameSize) {\n  initializeFilter();\n\n  buffer_.add(std::string({'\\x00', '\\x00', '\\x01', '\\x8b'}));\n  buffer_.add(std::string({'\\x00', '\\x00', '\\x01', '\\x76'}));\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnDataSendMessage) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  initializeFilter(yaml);\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::SendMessage);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.send_message_v1\").value());\n  EXPECT_EQ(\n      1U,\n      store_.gauge(\"test.send_message_v1_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnDataSendMessageV2) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test\nroute_config:\n  name: default_route\n  routes:\n    - match:\n        topic:\n          exact: test_topic\n      route:\n        cluster: fake_cluster\n)EOF\";\n  initializeFilter(yaml);\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::SendMessageV2);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.send_message_v2\").value());\n  EXPECT_EQ(\n      1U,\n      store_.gauge(\"test.send_message_v2_active\", Stats::Gauge::ImportMode::Accumulate).value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnDataWithUnsupportedCode) {\n  initializeFilter();\n\n  BufferUtility::fillRequestBuffer(buffer_, RequestCode::Unsupported);\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, OnDataInvalidFrameLength) {\n  // Test against the invalid input where frame_length <= header_length.\n  const std::string yaml = R\"EOF(\n  stat_prefix: test\n  )EOF\";\n  initializeFilter(yaml);\n  buffer_.add(\n      std::string({'\\x00', '\\x00', '\\x00', '\\x00', '\\x00', '\\x00', '\\x00', '\\x00', '\\x00'}));\n  EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n\n  buffer_.drain(buffer_.length());\n}\n\nTEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberEqual) {\n  initializeFilter();\n\n  ConsumerGroupMember m1(\"abc\", *conn_manager_);\n  ConsumerGroupMember m2(\"abc\", *conn_manager_);\n  EXPECT_TRUE(m1 == m2);\n}\n\nTEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberLessThan) {\n  initializeFilter();\n\n  ConsumerGroupMember m1(\"abc\", *conn_manager_);\n  ConsumerGroupMember m2(\"def\", *conn_manager_);\n  EXPECT_TRUE(m1 < m2);\n}\n\nTEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberExpired) {\n  initializeFilter();\n\n  ConsumerGroupMember member(\"Mock\", *conn_manager_);\n  EXPECT_FALSE(member.expired());\n  EXPECT_STREQ(\"Mock\", member.clientId().data());\n}\n\nTEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberRefresh) {\n  initializeFilter();\n\n  ConsumerGroupMember member(\"Mock\", *conn_manager_);\n  EXPECT_FALSE(member.expired());\n  member.setLastForTest(current_ - std::chrono::seconds(31));\n  EXPECT_TRUE(member.expired());\n  member.refresh();\n  EXPECT_FALSE(member.expired());\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/mocks.cc",
    "content": "#include \"test/extensions/filters/network/rocketmq_proxy/mocks.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/router/router_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ByMove;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nMockActiveMessage::MockActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request)\n    : ActiveMessage(conn_manager, std::move(request)) {\n  route_ = std::make_shared<NiceMock<Router::MockRoute>>();\n\n  ON_CALL(*this, onError(_)).WillByDefault(Invoke([&](absl::string_view error_message) {\n    ActiveMessage::onError(error_message);\n  }));\n  ON_CALL(*this, onReset()).WillByDefault(Return());\n  ON_CALL(*this, sendResponseToDownstream()).WillByDefault(Invoke([&]() {\n    ActiveMessage::sendResponseToDownstream();\n  }));\n  ON_CALL(*this, metadata()).WillByDefault(Invoke([&]() { return ActiveMessage::metadata(); }));\n  ON_CALL(*this, route()).WillByDefault(Return(route_));\n}\nMockActiveMessage::~MockActiveMessage() = default;\n\nMockConfig::MockConfig() : stats_(RocketmqFilterStats::generateStats(\"test.\", store_)) {\n  ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_));\n  ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n  ON_CALL(*this, createRouter())\n      .WillByDefault(Return(ByMove(std::make_unique<Router::RouterImpl>(cluster_manager_))));\n  ON_CALL(*this, developMode()).WillByDefault(Return(false));\n  ON_CALL(*this, proxyAddress()).WillByDefault(Return(std::string{\"1.2.3.4:1234\"}));\n}\n\nnamespace Router {\n\nMockRouteEntry::MockRouteEntry() {\n  ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_));\n}\n\nMockRouteEntry::~MockRouteEntry() = default;\n\nMockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); }\nMockRoute::~MockRoute() = default;\n\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/mocks.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/rocketmq_proxy/active_message.h\"\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n\n#include \"test/mocks/upstream/cluster_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nnamespace Router {\nclass MockRoute;\n} // namespace Router\n\nclass MockActiveMessage : public ActiveMessage {\npublic:\n  MockActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request);\n  ~MockActiveMessage() override;\n\n  MOCK_METHOD(void, createFilterChain, ());\n  MOCK_METHOD(void, sendRequestToUpstream, ());\n  MOCK_METHOD(RemotingCommandPtr&, downstreamRequest, ());\n  MOCK_METHOD(void, sendResponseToDownstream, ());\n  MOCK_METHOD(void, onQueryTopicRoute, ());\n  MOCK_METHOD(void, onError, (absl::string_view));\n  MOCK_METHOD(ConnectionManager&, connectionManager, ());\n  MOCK_METHOD(void, onReset, ());\n  MOCK_METHOD(bool, onUpstreamData,\n              (Buffer::Instance&, bool, Tcp::ConnectionPool::ConnectionDataPtr&));\n  MOCK_METHOD(MessageMetadataSharedPtr, metadata, (), (const));\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, ());\n\n  std::shared_ptr<Router::MockRoute> route_;\n};\n\nclass MockConfig : public Config {\npublic:\n  MockConfig();\n  ~MockConfig() override = default;\n\n  MOCK_METHOD(RocketmqFilterStats&, stats, ());\n  MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ());\n  MOCK_METHOD(Router::RouterPtr, createRouter, ());\n  MOCK_METHOD(bool, developMode, (), (const));\n  MOCK_METHOD(std::string, proxyAddress, ());\n  MOCK_METHOD(Router::Config&, routerConfig, ());\n\nprivate:\n  Stats::IsolatedStoreImpl store_;\n  RocketmqFilterStats stats_;\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  Router::RouterPtr router_;\n};\n\nnamespace Router {\n\nclass MockRouteEntry : public RouteEntry {\npublic:\n  MockRouteEntry();\n  ~MockRouteEntry() override;\n\n  // RocketmqProxy::Router::RouteEntry\n  MOCK_METHOD(const std::string&, clusterName, (), (const));\n  MOCK_METHOD(Envoy::Router::MetadataMatchCriteria*, metadataMatchCriteria, (), (const));\n\n  std::string cluster_name_{\"fake_cluster\"};\n};\n\nclass MockRoute : public Route {\npublic:\n  MockRoute();\n  ~MockRoute() override;\n\n  // RocketmqProxy::Router::Route\n  MOCK_METHOD(const RouteEntry*, routeEntry, (), (const));\n\n  NiceMock<MockRouteEntry> route_entry_;\n};\n} // namespace Router\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/protocol_test.cc",
    "content": "#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/protocol.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass UnregisterClientRequestHeaderTest : public testing::Test {\npublic:\n  std::string client_id_{\"SampleClient_01\"};\n  std::string producer_group_{\"PG_Example_01\"};\n  std::string consumer_group_{\"CG_001\"};\n};\n\nTEST_F(UnregisterClientRequestHeaderTest, Encode) {\n  UnregisterClientRequestHeader request_header;\n  request_header.clientId(client_id_);\n  request_header.producerGroup(producer_group_);\n  request_header.consumerGroup(consumer_group_);\n\n  ProtobufWkt::Value doc;\n  request_header.encode(doc);\n\n  const auto& members = doc.struct_value().fields();\n  EXPECT_STREQ(client_id_.c_str(), members.at(\"clientID\").string_value().c_str());\n  EXPECT_STREQ(producer_group_.c_str(), members.at(\"producerGroup\").string_value().c_str());\n  EXPECT_STREQ(consumer_group_.c_str(), members.at(\"consumerGroup\").string_value().c_str());\n}\n\nTEST_F(UnregisterClientRequestHeaderTest, Decode) {\n\n  std::string json = R\"EOF(\n  {\n    \"clientID\": \"SampleClient_01\",\n    \"producerGroup\": \"PG_Example_01\",\n    \"consumerGroup\": \"CG_001\"\n  }\n  )EOF\";\n\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  UnregisterClientRequestHeader unregister_client_request_header;\n  unregister_client_request_header.decode(doc);\n  EXPECT_STREQ(client_id_.c_str(), unregister_client_request_header.clientId().c_str());\n  EXPECT_STREQ(producer_group_.c_str(), unregister_client_request_header.producerGroup().c_str());\n  EXPECT_STREQ(consumer_group_.c_str(), unregister_client_request_header.consumerGroup().c_str());\n}\n\nTEST(GetConsumerListByGroupResponseBodyTest, Encode) {\n  GetConsumerListByGroupResponseBody response_body;\n  response_body.add(\"localhost@1\");\n  response_body.add(\"localhost@2\");\n\n  ProtobufWkt::Struct doc;\n  response_body.encode(doc);\n\n  const auto& members = doc.fields();\n  EXPECT_TRUE(members.contains(\"consumerIdList\"));\n  EXPECT_EQ(2, members.at(\"consumerIdList\").list_value().values_size());\n}\n\nclass AckMessageRequestHeaderTest : public testing::Test {\npublic:\n  std::string consumer_group{\"CG_Unit_Test\"};\n  std::string topic{\"T_UnitTest\"};\n  int32_t queue_id{1};\n  std::string extra_info{\"extra_info_UT\"};\n  int64_t offset{100};\n};\n\nTEST_F(AckMessageRequestHeaderTest, Encode) {\n  AckMessageRequestHeader ack_header;\n  ack_header.consumerGroup(consumer_group);\n  ack_header.topic(topic);\n  ack_header.queueId(queue_id);\n  ack_header.extraInfo(extra_info);\n  ack_header.offset(offset);\n\n  ProtobufWkt::Value doc;\n  ack_header.encode(doc);\n\n  const auto& members = doc.struct_value().fields();\n\n  EXPECT_TRUE(members.contains(\"consumerGroup\"));\n  EXPECT_STREQ(consumer_group.c_str(), members.at(\"consumerGroup\").string_value().c_str());\n\n  EXPECT_TRUE(members.contains(\"topic\"));\n  EXPECT_STREQ(topic.c_str(), members.at(\"topic\").string_value().c_str());\n\n  EXPECT_TRUE(members.contains(\"queueId\"));\n  EXPECT_EQ(queue_id, members.at(\"queueId\").number_value());\n\n  EXPECT_TRUE(members.contains(\"extraInfo\"));\n  EXPECT_STREQ(extra_info.c_str(), members.at(\"extraInfo\").string_value().c_str());\n\n  EXPECT_TRUE(members.contains(\"offset\"));\n  EXPECT_EQ(offset, members.at(\"offset\").number_value());\n}\n\nTEST_F(AckMessageRequestHeaderTest, Decode) {\n  std::string json = R\"EOF(\n  {\n    \"consumerGroup\": \"CG_Unit_Test\",\n    \"topic\": \"T_UnitTest\",\n    \"queueId\": 1,\n    \"extraInfo\": \"extra_info_UT\",\n    \"offset\": 100\n  }\n  )EOF\";\n\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n\n  AckMessageRequestHeader ack_header;\n  ack_header.decode(doc);\n  ASSERT_STREQ(consumer_group.c_str(), ack_header.consumerGroup().data());\n  ASSERT_STREQ(topic.c_str(), ack_header.topic().c_str());\n  ASSERT_EQ(queue_id, ack_header.queueId());\n  ASSERT_STREQ(extra_info.c_str(), ack_header.extraInfo().data());\n  ASSERT_EQ(offset, ack_header.offset());\n}\n\nTEST_F(AckMessageRequestHeaderTest, DecodeNumSerializedAsString) {\n  std::string json = R\"EOF(\n  {\n    \"consumerGroup\": \"CG_Unit_Test\",\n    \"topic\": \"T_UnitTest\",\n    \"queueId\": \"1\",\n    \"extraInfo\": \"extra_info_UT\",\n    \"offset\": \"100\"\n  }\n  )EOF\";\n\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n\n  AckMessageRequestHeader ack_header;\n  ack_header.decode(doc);\n  ASSERT_STREQ(consumer_group.c_str(), ack_header.consumerGroup().data());\n  ASSERT_STREQ(topic.c_str(), ack_header.topic().c_str());\n  ASSERT_EQ(queue_id, ack_header.queueId());\n  ASSERT_STREQ(extra_info.c_str(), ack_header.extraInfo().data());\n  ASSERT_EQ(offset, ack_header.offset());\n}\n\nclass PopMessageRequestHeaderTest : public testing::Test {\npublic:\n  std::string consumer_group{\"CG_UT\"};\n  std::string topic{\"T_UT\"};\n  int32_t queue_id{1};\n  int32_t max_msg_nums{2};\n  int64_t invisible_time{3};\n  int64_t poll_time{4};\n  int64_t born_time{5};\n  int32_t init_mode{6};\n\n  std::string exp_type{\"exp_type_UT\"};\n  std::string exp{\"exp_UT\"};\n};\n\nTEST_F(PopMessageRequestHeaderTest, Encode) {\n  PopMessageRequestHeader pop_request_header;\n  pop_request_header.consumerGroup(consumer_group);\n  pop_request_header.topic(topic);\n  pop_request_header.queueId(queue_id);\n  pop_request_header.maxMsgNum(max_msg_nums);\n  pop_request_header.invisibleTime(invisible_time);\n  pop_request_header.pollTime(poll_time);\n  pop_request_header.bornTime(born_time);\n  pop_request_header.initMode(init_mode);\n  pop_request_header.expType(exp_type);\n  pop_request_header.exp(exp);\n\n  ProtobufWkt::Value doc;\n  pop_request_header.encode(doc);\n\n  const auto& members = doc.struct_value().fields();\n\n  EXPECT_TRUE(members.contains(\"consumerGroup\"));\n  EXPECT_STREQ(consumer_group.c_str(), members.at(\"consumerGroup\").string_value().c_str());\n\n  EXPECT_TRUE(members.contains(\"topic\"));\n  EXPECT_STREQ(topic.c_str(), members.at(\"topic\").string_value().c_str());\n\n  EXPECT_TRUE(members.contains(\"queueId\"));\n  EXPECT_EQ(queue_id, members.at(\"queueId\").number_value());\n\n  EXPECT_TRUE(members.contains(\"maxMsgNums\"));\n  EXPECT_EQ(max_msg_nums, members.at(\"maxMsgNums\").number_value());\n\n  EXPECT_TRUE(members.contains(\"invisibleTime\"));\n  EXPECT_EQ(invisible_time, members.at(\"invisibleTime\").number_value());\n\n  EXPECT_TRUE(members.contains(\"pollTime\"));\n  EXPECT_EQ(poll_time, members.at(\"pollTime\").number_value());\n\n  EXPECT_TRUE(members.contains(\"bornTime\"));\n  EXPECT_EQ(born_time, members.at(\"bornTime\").number_value());\n\n  EXPECT_TRUE(members.contains(\"initMode\"));\n  EXPECT_EQ(init_mode, members.at(\"initMode\").number_value());\n\n  EXPECT_TRUE(members.contains(\"expType\"));\n  EXPECT_STREQ(exp_type.c_str(), members.at(\"expType\").string_value().c_str());\n\n  EXPECT_TRUE(members.contains(\"exp\"));\n  EXPECT_STREQ(exp.c_str(), members.at(\"exp\").string_value().c_str());\n}\n\nTEST_F(PopMessageRequestHeaderTest, Decode) {\n  std::string json = R\"EOF(\n  {\n    \"consumerGroup\": \"CG_UT\",\n    \"topic\": \"T_UT\",\n    \"queueId\": 1,\n    \"maxMsgNums\": 2,\n    \"invisibleTime\": 3,\n    \"pollTime\": 4,\n    \"bornTime\": 5,\n    \"initMode\": 6,\n    \"expType\": \"exp_type_UT\",\n    \"exp\": \"exp_UT\"\n  }\n  )EOF\";\n\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  PopMessageRequestHeader pop_request_header;\n  pop_request_header.decode(doc);\n\n  ASSERT_STREQ(consumer_group.c_str(), pop_request_header.consumerGroup().data());\n  ASSERT_STREQ(topic.c_str(), pop_request_header.topic().c_str());\n  ASSERT_EQ(queue_id, pop_request_header.queueId());\n  ASSERT_EQ(max_msg_nums, pop_request_header.maxMsgNum());\n  ASSERT_EQ(invisible_time, pop_request_header.invisibleTime());\n  ASSERT_EQ(poll_time, pop_request_header.pollTime());\n  ASSERT_EQ(born_time, pop_request_header.bornTime());\n  ASSERT_EQ(init_mode, pop_request_header.initMode());\n  ASSERT_STREQ(exp_type.c_str(), pop_request_header.expType().c_str());\n  ASSERT_STREQ(exp.c_str(), pop_request_header.exp().c_str());\n}\n\nTEST_F(PopMessageRequestHeaderTest, DecodeNumSerializedAsString) {\n  std::string json = R\"EOF(\n  {\n    \"consumerGroup\": \"CG_UT\",\n    \"topic\": \"T_UT\",\n    \"queueId\": \"1\",\n    \"maxMsgNums\": \"2\",\n    \"invisibleTime\": \"3\",\n    \"pollTime\": \"4\",\n    \"bornTime\": \"5\",\n    \"initMode\": \"6\",\n    \"expType\": \"exp_type_UT\",\n    \"exp\": \"exp_UT\"\n  }\n  )EOF\";\n\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  PopMessageRequestHeader pop_request_header;\n  pop_request_header.decode(doc);\n\n  ASSERT_STREQ(consumer_group.c_str(), pop_request_header.consumerGroup().data());\n  ASSERT_STREQ(topic.c_str(), pop_request_header.topic().c_str());\n  ASSERT_EQ(queue_id, pop_request_header.queueId());\n  ASSERT_EQ(max_msg_nums, pop_request_header.maxMsgNum());\n  ASSERT_EQ(invisible_time, pop_request_header.invisibleTime());\n  ASSERT_EQ(poll_time, pop_request_header.pollTime());\n  ASSERT_EQ(born_time, pop_request_header.bornTime());\n  ASSERT_EQ(init_mode, pop_request_header.initMode());\n  ASSERT_STREQ(exp_type.c_str(), pop_request_header.expType().c_str());\n  ASSERT_STREQ(exp.c_str(), pop_request_header.exp().c_str());\n}\n\nclass PopMessageResponseHeaderTest : public testing::Test {\npublic:\n  int64_t pop_time{1};\n  int64_t invisible_time{2};\n  int32_t revive_qid{3};\n  int64_t rest_num{4};\n\n  std::string start_offset_info{\"start\"};\n  std::string msg_offset_info{\"msg\"};\n  std::string order_count_info{\"order\"};\n};\n\nTEST_F(PopMessageResponseHeaderTest, Encode) {\n  PopMessageResponseHeader pop_response_header;\n  pop_response_header.popTime(pop_time);\n  pop_response_header.invisibleTime(invisible_time);\n  pop_response_header.reviveQid(revive_qid);\n  pop_response_header.restNum(rest_num);\n  pop_response_header.startOffsetInfo(start_offset_info);\n  pop_response_header.msgOffsetInfo(msg_offset_info);\n  pop_response_header.orderCountInfo(order_count_info);\n\n  ProtobufWkt::Value doc;\n  pop_response_header.encode(doc);\n\n  const auto& members = doc.struct_value().fields();\n\n  EXPECT_TRUE(members.contains(\"popTime\"));\n  EXPECT_TRUE(members.contains(\"invisibleTime\"));\n  EXPECT_TRUE(members.contains(\"reviveQid\"));\n  EXPECT_TRUE(members.contains(\"restNum\"));\n  EXPECT_TRUE(members.contains(\"startOffsetInfo\"));\n  EXPECT_TRUE(members.contains(\"msgOffsetInfo\"));\n  EXPECT_TRUE(members.contains(\"orderCountInfo\"));\n\n  EXPECT_EQ(pop_time, members.at(\"popTime\").number_value());\n  EXPECT_EQ(invisible_time, members.at(\"invisibleTime\").number_value());\n  EXPECT_EQ(revive_qid, members.at(\"reviveQid\").number_value());\n  EXPECT_EQ(rest_num, members.at(\"restNum\").number_value());\n  EXPECT_STREQ(start_offset_info.c_str(), members.at(\"startOffsetInfo\").string_value().c_str());\n  EXPECT_STREQ(msg_offset_info.c_str(), members.at(\"msgOffsetInfo\").string_value().c_str());\n  EXPECT_STREQ(order_count_info.c_str(), members.at(\"orderCountInfo\").string_value().c_str());\n}\n\nTEST_F(PopMessageResponseHeaderTest, Decode) {\n  std::string json = R\"EOF(\n  {\n    \"popTime\": 1,\n    \"invisibleTime\": 2,\n    \"reviveQid\": 3,\n    \"restNum\": 4,\n    \"startOffsetInfo\": \"start\",\n    \"msgOffsetInfo\": \"msg\",\n     \"orderCountInfo\": \"order\"\n  }\n  )EOF\";\n\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n\n  PopMessageResponseHeader header;\n  header.decode(doc);\n\n  EXPECT_EQ(pop_time, header.popTimeForTest());\n  EXPECT_EQ(invisible_time, header.invisibleTime());\n  EXPECT_EQ(revive_qid, header.reviveQid());\n  EXPECT_EQ(rest_num, header.restNum());\n\n  EXPECT_STREQ(start_offset_info.c_str(), header.startOffsetInfo().data());\n  EXPECT_STREQ(msg_offset_info.c_str(), header.msgOffsetInfo().data());\n  EXPECT_STREQ(order_count_info.c_str(), header.orderCountInfo().data());\n}\n\nTEST_F(PopMessageResponseHeaderTest, DecodeNumSerializedAsString) {\n  std::string json = R\"EOF(\n  {\n    \"popTime\": \"1\",\n    \"invisibleTime\": \"2\",\n    \"reviveQid\": \"3\",\n    \"restNum\": \"4\",\n    \"startOffsetInfo\": \"start\",\n    \"msgOffsetInfo\": \"msg\",\n    \"orderCountInfo\": \"order\"\n  }\n  )EOF\";\n\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n\n  PopMessageResponseHeader header;\n  header.decode(doc);\n\n  EXPECT_EQ(pop_time, header.popTimeForTest());\n  EXPECT_EQ(invisible_time, header.invisibleTime());\n  EXPECT_EQ(revive_qid, header.reviveQid());\n  EXPECT_EQ(rest_num, header.restNum());\n\n  EXPECT_STREQ(start_offset_info.c_str(), header.startOffsetInfo().data());\n  EXPECT_STREQ(msg_offset_info.c_str(), header.msgOffsetInfo().data());\n  EXPECT_STREQ(order_count_info.c_str(), header.orderCountInfo().data());\n}\n\nclass SendMessageResponseHeaderTest : public testing::Test {\npublic:\n  SendMessageResponseHeader response_header_;\n};\n\nTEST_F(SendMessageResponseHeaderTest, Encode) {\n  response_header_.msgIdForTest(\"MSG_ID_01\");\n  response_header_.queueId(1);\n  response_header_.queueOffset(100);\n  response_header_.transactionId(\"TX_01\");\n  ProtobufWkt::Value doc;\n  response_header_.encode(doc);\n\n  const auto& members = doc.struct_value().fields();\n  EXPECT_TRUE(members.contains(\"msgId\"));\n  EXPECT_TRUE(members.contains(\"queueId\"));\n  EXPECT_TRUE(members.contains(\"queueOffset\"));\n  EXPECT_TRUE(members.contains(\"transactionId\"));\n\n  EXPECT_STREQ(\"MSG_ID_01\", members.at(\"msgId\").string_value().c_str());\n  EXPECT_STREQ(\"TX_01\", members.at(\"transactionId\").string_value().c_str());\n  EXPECT_EQ(1, members.at(\"queueId\").number_value());\n  EXPECT_EQ(100, members.at(\"queueOffset\").number_value());\n}\n\nTEST_F(SendMessageResponseHeaderTest, Decode) {\n  std::string json = R\"EOF(\n  {\n    \"msgId\": \"abc\",\n    \"queueId\": 1,\n    \"queueOffset\": 10,\n    \"transactionId\": \"TX_1\"\n  }\n  )EOF\";\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  response_header_.decode(doc);\n  EXPECT_STREQ(\"abc\", response_header_.msgId().c_str());\n  EXPECT_EQ(1, response_header_.queueId());\n  EXPECT_EQ(10, response_header_.queueOffset());\n  EXPECT_STREQ(\"TX_1\", response_header_.transactionId().c_str());\n}\n\nTEST_F(SendMessageResponseHeaderTest, DecodeNumSerializedAsString) {\n  std::string json = R\"EOF(\n  {\n    \"msgId\": \"abc\",\n    \"queueId\": \"1\",\n    \"queueOffset\": \"10\",\n    \"transactionId\": \"TX_1\"\n   }\n  )EOF\";\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  response_header_.decode(doc);\n  EXPECT_STREQ(\"abc\", response_header_.msgId().c_str());\n  EXPECT_EQ(1, response_header_.queueId());\n  EXPECT_EQ(10, response_header_.queueOffset());\n  EXPECT_STREQ(\"TX_1\", response_header_.transactionId().c_str());\n}\n\nclass SendMessageRequestHeaderTest : public testing::Test {};\n\nTEST_F(SendMessageRequestHeaderTest, EncodeDefault) {\n  SendMessageRequestHeader header;\n  ProtobufWkt::Value doc;\n  header.encode(doc);\n  const auto& members = doc.struct_value().fields();\n  EXPECT_TRUE(members.contains(\"producerGroup\"));\n  EXPECT_TRUE(members.contains(\"topic\"));\n  EXPECT_TRUE(members.contains(\"defaultTopic\"));\n  EXPECT_TRUE(members.contains(\"defaultTopicQueueNums\"));\n  EXPECT_TRUE(members.contains(\"queueId\"));\n  EXPECT_TRUE(members.contains(\"sysFlag\"));\n  EXPECT_TRUE(members.contains(\"bornTimestamp\"));\n  EXPECT_TRUE(members.contains(\"flag\"));\n  EXPECT_FALSE(members.contains(\"properties\"));\n  EXPECT_FALSE(members.contains(\"reconsumeTimes\"));\n  EXPECT_FALSE(members.contains(\"unitMode\"));\n  EXPECT_FALSE(members.contains(\"batch\"));\n  EXPECT_FALSE(members.contains(\"maxReconsumeTimes\"));\n}\n\nTEST_F(SendMessageRequestHeaderTest, EncodeOptional) {\n  SendMessageRequestHeader header;\n  header.properties(\"mock\");\n  header.reconsumeTimes(1);\n  header.unitMode(true);\n  header.batch(true);\n  header.maxReconsumeTimes(32);\n  ProtobufWkt::Value doc;\n  header.encode(doc);\n  const auto& members = doc.struct_value().fields();\n  EXPECT_TRUE(members.contains(\"producerGroup\"));\n  EXPECT_TRUE(members.contains(\"topic\"));\n  EXPECT_TRUE(members.contains(\"defaultTopic\"));\n  EXPECT_TRUE(members.contains(\"defaultTopicQueueNums\"));\n  EXPECT_TRUE(members.contains(\"queueId\"));\n  EXPECT_TRUE(members.contains(\"sysFlag\"));\n  EXPECT_TRUE(members.contains(\"bornTimestamp\"));\n  EXPECT_TRUE(members.contains(\"flag\"));\n  EXPECT_TRUE(members.contains(\"properties\"));\n  EXPECT_TRUE(members.contains(\"reconsumeTimes\"));\n  EXPECT_TRUE(members.contains(\"unitMode\"));\n  EXPECT_TRUE(members.contains(\"batch\"));\n  EXPECT_TRUE(members.contains(\"maxReconsumeTimes\"));\n\n  EXPECT_STREQ(\"mock\", members.at(\"properties\").string_value().c_str());\n  EXPECT_EQ(1, members.at(\"reconsumeTimes\").number_value());\n  EXPECT_TRUE(members.at(\"unitMode\").bool_value());\n  EXPECT_TRUE(members.at(\"batch\").bool_value());\n  EXPECT_EQ(32, members.at(\"maxReconsumeTimes\").number_value());\n}\n\nTEST_F(SendMessageRequestHeaderTest, EncodeDefaultV2) {\n  SendMessageRequestHeader header;\n  header.version(SendMessageRequestVersion::V2);\n  ProtobufWkt::Value doc;\n  header.encode(doc);\n  const auto& members = doc.struct_value().fields();\n  EXPECT_TRUE(members.contains(\"a\"));\n  EXPECT_TRUE(members.contains(\"b\"));\n  EXPECT_TRUE(members.contains(\"c\"));\n  EXPECT_TRUE(members.contains(\"d\"));\n  EXPECT_TRUE(members.contains(\"e\"));\n  EXPECT_TRUE(members.contains(\"f\"));\n  EXPECT_TRUE(members.contains(\"g\"));\n  EXPECT_TRUE(members.contains(\"h\"));\n  EXPECT_FALSE(members.contains(\"i\"));\n  EXPECT_FALSE(members.contains(\"j\"));\n  EXPECT_FALSE(members.contains(\"k\"));\n  EXPECT_FALSE(members.contains(\"l\"));\n  EXPECT_FALSE(members.contains(\"m\"));\n}\n\nTEST_F(SendMessageRequestHeaderTest, EncodeOptionalV2) {\n  SendMessageRequestHeader header;\n  header.properties(\"mock\");\n  header.reconsumeTimes(1);\n  header.unitMode(true);\n  header.batch(true);\n  header.maxReconsumeTimes(32);\n  header.version(SendMessageRequestVersion::V2);\n  ProtobufWkt::Value doc;\n  header.encode(doc);\n\n  const auto& members = doc.struct_value().fields();\n  EXPECT_TRUE(members.contains(\"a\"));\n  EXPECT_TRUE(members.contains(\"b\"));\n  EXPECT_TRUE(members.contains(\"c\"));\n  EXPECT_TRUE(members.contains(\"d\"));\n  EXPECT_TRUE(members.contains(\"e\"));\n  EXPECT_TRUE(members.contains(\"f\"));\n  EXPECT_TRUE(members.contains(\"g\"));\n  EXPECT_TRUE(members.contains(\"h\"));\n  EXPECT_TRUE(members.contains(\"i\"));\n  EXPECT_TRUE(members.contains(\"j\"));\n  EXPECT_TRUE(members.contains(\"k\"));\n  EXPECT_TRUE(members.contains(\"l\"));\n  EXPECT_TRUE(members.contains(\"m\"));\n\n  EXPECT_STREQ(\"mock\", members.at(\"i\").string_value().c_str());\n  EXPECT_EQ(1, members.at(\"j\").number_value());\n  EXPECT_TRUE(members.at(\"k\").bool_value());\n  EXPECT_TRUE(members.at(\"m\").bool_value());\n  EXPECT_EQ(32, members.at(\"l\").number_value());\n}\n\nTEST_F(SendMessageRequestHeaderTest, EncodeV3) {\n  SendMessageRequestHeader header;\n  header.version(SendMessageRequestVersion::V3);\n  ProtobufWkt::Value doc;\n  header.encode(doc);\n}\n\nTEST_F(SendMessageRequestHeaderTest, DecodeV1) {\n  std::string json = R\"EOF(\n  {\n    \"batch\": false,\n    \"bornTimestamp\": 1575872212297,\n    \"defaultTopic\": \"TBW102\",\n    \"defaultTopicQueueNums\": 3,\n    \"flag\": 124,\n    \"producerGroup\": \"FooBarGroup\",\n    \"queueId\": 1,\n    \"reconsumeTimes\": 0,\n    \"sysFlag\": 0,\n    \"topic\": \"FooBar\",\n    \"unitMode\": false\n  }\n  )EOF\";\n\n  SendMessageRequestHeader header;\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  header.decode(doc);\n  EXPECT_STREQ(\"FooBar\", header.topic().c_str());\n  EXPECT_EQ(1, header.queueId());\n  EXPECT_STREQ(\"FooBarGroup\", header.producerGroup().c_str());\n  EXPECT_STREQ(\"TBW102\", header.defaultTopic().c_str());\n  EXPECT_EQ(3, header.defaultTopicQueueNumber());\n  EXPECT_EQ(0, header.sysFlag());\n  EXPECT_EQ(1575872212297, header.bornTimestamp());\n  EXPECT_EQ(124, header.flag());\n  EXPECT_STREQ(\"\", header.properties().c_str());\n  EXPECT_EQ(0, header.reconsumeTimes());\n  EXPECT_FALSE(header.unitMode());\n  EXPECT_FALSE(header.batch());\n  EXPECT_EQ(0, header.maxReconsumeTimes());\n}\n\nTEST_F(SendMessageRequestHeaderTest, DecodeV1Optional) {\n  std::string json = R\"EOF(\n  {\n    \"batch\": false,\n    \"bornTimestamp\": 1575872212297,\n    \"defaultTopic\": \"TBW102\",\n    \"defaultTopicQueueNums\": 3,\n    \"flag\": 124,\n    \"producerGroup\": \"FooBarGroup\",\n    \"queueId\": 1,\n    \"reconsumeTimes\": 0,\n    \"sysFlag\": 0,\n    \"topic\": \"FooBar\",\n    \"unitMode\": false,\n    \"properties\": \"mock_properties\",\n    \"maxReconsumeTimes\": 32\n  }\n  )EOF\";\n\n  SendMessageRequestHeader header;\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  header.decode(doc);\n  EXPECT_STREQ(\"FooBar\", header.topic().c_str());\n  EXPECT_EQ(1, header.queueId());\n  EXPECT_STREQ(\"FooBarGroup\", header.producerGroup().c_str());\n  EXPECT_STREQ(\"TBW102\", header.defaultTopic().c_str());\n  EXPECT_EQ(3, header.defaultTopicQueueNumber());\n  EXPECT_EQ(0, header.sysFlag());\n  EXPECT_EQ(1575872212297, header.bornTimestamp());\n  EXPECT_EQ(124, header.flag());\n  EXPECT_STREQ(\"mock_properties\", header.properties().c_str());\n  EXPECT_EQ(0, header.reconsumeTimes());\n  EXPECT_FALSE(header.unitMode());\n  EXPECT_FALSE(header.batch());\n  EXPECT_EQ(32, header.maxReconsumeTimes());\n}\n\nTEST_F(SendMessageRequestHeaderTest, DecodeV1OptionalNumSerializedAsString) {\n  std::string json = R\"EOF(\n  {\n    \"batch\": \"false\",\n    \"bornTimestamp\": \"1575872212297\",\n    \"defaultTopic\": \"TBW102\",\n    \"defaultTopicQueueNums\": \"3\",\n    \"flag\": \"124\",\n    \"producerGroup\": \"FooBarGroup\",\n    \"queueId\": \"1\",\n    \"reconsumeTimes\": \"0\",\n    \"sysFlag\": \"0\",\n    \"topic\": \"FooBar\",\n    \"unitMode\": \"false\",\n    \"properties\": \"mock_properties\",\n    \"maxReconsumeTimes\": \"32\"\n  }\n  )EOF\";\n\n  SendMessageRequestHeader header;\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  header.decode(doc);\n  EXPECT_STREQ(\"FooBar\", header.topic().c_str());\n  EXPECT_EQ(1, header.queueId());\n  EXPECT_STREQ(\"FooBarGroup\", header.producerGroup().c_str());\n  EXPECT_STREQ(\"TBW102\", header.defaultTopic().c_str());\n  EXPECT_EQ(3, header.defaultTopicQueueNumber());\n  EXPECT_EQ(0, header.sysFlag());\n  EXPECT_EQ(1575872212297, header.bornTimestamp());\n  EXPECT_EQ(124, header.flag());\n  EXPECT_STREQ(\"mock_properties\", header.properties().c_str());\n  EXPECT_EQ(0, header.reconsumeTimes());\n  EXPECT_FALSE(header.unitMode());\n  EXPECT_FALSE(header.batch());\n  EXPECT_EQ(32, header.maxReconsumeTimes());\n}\n\nTEST_F(SendMessageRequestHeaderTest, DecodeV2) {\n  std::string json = R\"EOF(\n  {\n    \"a\": \"FooBarGroup\",\n    \"b\": \"FooBar\",\n    \"c\": \"TBW102\",\n    \"d\": 3,\n    \"e\": 1,\n    \"f\": 0,\n    \"g\": 1575872563203,\n    \"h\": 124,\n    \"j\": 0,\n    \"k\": false,\n    \"m\": false\n  }\n  )EOF\";\n\n  SendMessageRequestHeader header;\n  header.version(SendMessageRequestVersion::V2);\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  header.decode(doc);\n  EXPECT_STREQ(\"FooBar\", header.topic().c_str());\n  EXPECT_EQ(1, header.queueId());\n  EXPECT_STREQ(\"FooBarGroup\", header.producerGroup().c_str());\n  EXPECT_STREQ(\"TBW102\", header.defaultTopic().c_str());\n  EXPECT_EQ(3, header.defaultTopicQueueNumber());\n  EXPECT_EQ(0, header.sysFlag());\n  EXPECT_EQ(1575872563203, header.bornTimestamp());\n  EXPECT_EQ(124, header.flag());\n  EXPECT_STREQ(\"\", header.properties().c_str());\n  EXPECT_EQ(0, header.reconsumeTimes());\n  EXPECT_FALSE(header.unitMode());\n  EXPECT_FALSE(header.batch());\n  EXPECT_EQ(0, header.maxReconsumeTimes());\n}\n\nTEST_F(SendMessageRequestHeaderTest, DecodeV2Optional) {\n  std::string json = R\"EOF(\n  {\n    \"a\": \"FooBarGroup\",\n    \"b\": \"FooBar\",\n    \"c\": \"TBW102\",\n    \"d\": 3,\n    \"e\": 1,\n    \"f\": 0,\n    \"g\": 1575872563203,\n    \"h\": 124,\n    \"i\": \"mock_properties\",\n    \"j\": 0,\n    \"k\": false,\n    \"l\": 1,\n    \"m\": false\n  }\n  )EOF\";\n\n  SendMessageRequestHeader header;\n  header.version(SendMessageRequestVersion::V2);\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  header.decode(doc);\n  EXPECT_STREQ(\"FooBar\", header.topic().c_str());\n  EXPECT_EQ(1, header.queueId());\n  EXPECT_STREQ(\"FooBarGroup\", header.producerGroup().c_str());\n  EXPECT_STREQ(\"TBW102\", header.defaultTopic().c_str());\n  EXPECT_EQ(3, header.defaultTopicQueueNumber());\n  EXPECT_EQ(0, header.sysFlag());\n  EXPECT_EQ(1575872563203, header.bornTimestamp());\n  EXPECT_EQ(124, header.flag());\n  EXPECT_STREQ(\"mock_properties\", header.properties().c_str());\n  EXPECT_EQ(0, header.reconsumeTimes());\n  EXPECT_FALSE(header.unitMode());\n  EXPECT_FALSE(header.batch());\n  EXPECT_EQ(1, header.maxReconsumeTimes());\n}\n\nTEST_F(SendMessageRequestHeaderTest, DecodeV2OptionalNumSerializedAsString) {\n  std::string json = R\"EOF(\n  {\n    \"a\": \"FooBarGroup\",\n    \"b\": \"FooBar\",\n    \"c\": \"TBW102\",\n    \"d\": \"3\",\n    \"e\": \"1\",\n    \"f\": \"0\",\n    \"g\": \"1575872563203\",\n    \"h\": \"124\",\n    \"i\": \"mock_properties\",\n    \"j\": \"0\",\n    \"k\": \"false\",\n    \"l\": \"1\",\n    \"m\": \"false\"\n  }\n  )EOF\";\n\n  SendMessageRequestHeader header;\n  header.version(SendMessageRequestVersion::V2);\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  header.decode(doc);\n  EXPECT_STREQ(\"FooBar\", header.topic().c_str());\n  EXPECT_EQ(1, header.queueId());\n  EXPECT_STREQ(\"FooBarGroup\", header.producerGroup().c_str());\n  EXPECT_STREQ(\"TBW102\", header.defaultTopic().c_str());\n  EXPECT_EQ(3, header.defaultTopicQueueNumber());\n  EXPECT_EQ(0, header.sysFlag());\n  EXPECT_EQ(1575872563203, header.bornTimestamp());\n  EXPECT_EQ(124, header.flag());\n  EXPECT_STREQ(\"mock_properties\", header.properties().c_str());\n  EXPECT_EQ(0, header.reconsumeTimes());\n  EXPECT_FALSE(header.unitMode());\n  EXPECT_FALSE(header.batch());\n  EXPECT_EQ(1, header.maxReconsumeTimes());\n}\n\nTEST_F(SendMessageRequestHeaderTest, DecodeV3) {\n  std::string json = R\"EOF(\n  {\n    \"batch\": false,\n    \"bornTimestamp\": 1575872212297,\n    \"defaultTopic\": \"TBW102\",\n    \"defaultTopicQueueNums\": 3,\n    \"flag\": 124,\n    \"producerGroup\": \"FooBarGroup\",\n    \"queueId\": 1,\n    \"reconsumeTimes\": 0,\n    \"sysFlag\": 0,\n    \"topic\": \"FooBar\",\n    \"unitMode\": false\n  }\n  )EOF\";\n\n  SendMessageRequestHeader header;\n  ProtobufWkt::Value doc;\n  MessageUtil::loadFromJson(json, *(doc.mutable_struct_value()));\n  header.version(SendMessageRequestVersion::V3);\n  header.decode(doc);\n}\n\nclass HeartbeatDataTest : public testing::Test {\npublic:\n  HeartbeatData data_;\n};\n\nTEST_F(HeartbeatDataTest, Decoding) {\n  std::string json = R\"EOF(\n  {\n    \"clientID\": \"127.0.0.1@23606\",\n    \"consumerDataSet\": [\n      {\n        \"consumeFromWhere\": \"CONSUME_FROM_LAST_OFFSET\",\n        \"consumeType\": \"CONSUME_ACTIVELY\",\n        \"groupName\": \"please_rename_unique_group_name_4\",\n        \"messageModel\": \"CLUSTERING\",\n        \"subscriptionDataSet\": [\n          {\n            \"classFilterMode\": false,\n            \"codeSet\": [],\n            \"expressionType\": \"TAG\",\n            \"subString\": \"*\",\n            \"subVersion\": 0,\n            \"tagsSet\": [],\n            \"topic\": \"test_topic\"\n          }\n        ],\n        \"unitMode\": false\n      }\n    ],\n    \"producerDataSet\": [\n      {\n        \"groupName\": \"CLIENT_INNER_PRODUCER\"\n      }\n    ]\n  }\n  )EOF\";\n\n  const char* clientId = \"127.0.0.1@23606\";\n  const char* consumerGroup = \"please_rename_unique_group_name_4\";\n\n  HeartbeatData heart_beat_data;\n  ProtobufWkt::Struct doc;\n  MessageUtil::loadFromJson(json, doc);\n\n  heart_beat_data.decode(doc);\n  EXPECT_STREQ(clientId, heart_beat_data.clientId().c_str());\n  EXPECT_EQ(1, heart_beat_data.consumerGroups().size());\n  EXPECT_STREQ(consumerGroup, heart_beat_data.consumerGroups()[0].c_str());\n}\n\nTEST_F(HeartbeatDataTest, DecodeClientIdMissing) {\n  std::string json = R\"EOF(\n  {\n    \"consumerDataSet\": [\n      {\n        \"consumeFromWhere\": \"CONSUME_FROM_LAST_OFFSET\",\n        \"consumeType\": \"CONSUME_ACTIVELY\",\n        \"groupName\": \"please_rename_unique_group_name_4\",\n        \"messageModel\": \"CLUSTERING\",\n        \"subscriptionDataSet\": [\n          {\n            \"classFilterMode\": false,\n            \"codeSet\": [],\n            \"expressionType\": \"TAG\",\n            \"subString\": \"*\",\n            \"subVersion\": 0,\n            \"tagsSet\": [],\n            \"topic\": \"test_topic\"\n          }\n        ],\n        \"unitMode\": false\n      }\n    ],\n    \"producerDataSet\": [\n      {\n        \"groupName\": \"CLIENT_INNER_PRODUCER\"\n      }\n    ]\n  }\n  )EOF\";\n\n  ProtobufWkt::Struct doc;\n  MessageUtil::loadFromJson(json, doc);\n  EXPECT_FALSE(data_.decode(doc));\n}\n\nTEST_F(HeartbeatDataTest, Encode) {\n  data_.clientId(\"CID_01\");\n  ProtobufWkt::Struct doc;\n  data_.encode(doc);\n  const auto& members = doc.fields();\n  EXPECT_TRUE(members.contains(\"clientID\"));\n  EXPECT_STREQ(\"CID_01\", members.at(\"clientID\").string_value().c_str());\n}\n\nclass RemotingCommandTest : public testing::Test {\npublic:\n  RemotingCommand cmd_;\n};\n\nTEST_F(RemotingCommandTest, FlagResponse) {\n  cmd_.markAsResponse();\n  EXPECT_EQ(1, cmd_.flag());\n}\n\nTEST_F(RemotingCommandTest, FlagOneway) {\n  cmd_.markAsOneway();\n  EXPECT_EQ(2, cmd_.flag());\n}\n\nTEST_F(RemotingCommandTest, Remark) {\n  const char* remark = \"OK\";\n  cmd_.remark(remark);\n  EXPECT_STREQ(remark, cmd_.remark().c_str());\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc",
    "content": "#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h\"\n#include \"envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.validate.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/metadata.h\"\n#include \"extensions/filters/network/rocketmq_proxy/router/route_matcher.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\nnamespace Router {\n\nusing RouteConfigurationProto =\n    envoy::extensions::filters::network::rocketmq_proxy::v3::RouteConfiguration;\n\nRouteConfigurationProto parseRouteConfigurationFromV2Yaml(const std::string& yaml) {\n  RouteConfigurationProto route_config;\n  TestUtility::loadFromYaml(yaml, route_config);\n  TestUtility::validate(route_config);\n  return route_config;\n}\n\nTEST(RocketmqRouteMatcherTest, RouteWithHeaders) {\n  const std::string yaml = R\"EOF(\nname: default_route\nroutes:\n  - match:\n      topic:\n        exact: test_topic\n      headers:\n        - name: code\n          exact_match: '310'\n    route:\n      cluster: fake_cluster\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k1: v1\n)EOF\";\n\n  RouteConfigurationProto config = parseRouteConfigurationFromV2Yaml(yaml);\n\n  MessageMetadata metadata;\n  std::string topic_name = \"test_topic\";\n  metadata.setTopicName(topic_name);\n  uint64_t code = 310;\n  metadata.headers().addCopy(Http::LowerCaseString(\"code\"), code);\n  RouteMatcher matcher(config);\n  const Envoy::Router::MetadataMatchCriteria* criteria =\n      matcher.route(metadata)->routeEntry()->metadataMatchCriteria();\n  const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n      criteria->metadataMatchCriteria();\n\n  ProtobufWkt::Value v1;\n  v1.set_string_value(\"v1\");\n  HashedValue hv1(v1);\n\n  EXPECT_EQ(1, mmc.size());\n  EXPECT_EQ(\"k1\", mmc[0]->name());\n  EXPECT_EQ(hv1, mmc[0]->value());\n}\n\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/router_test.cc",
    "content": "#include \"extensions/filters/network/rocketmq_proxy/config.h\"\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n#include \"extensions/filters/network/rocketmq_proxy/router/router.h\"\n#include \"extensions/filters/network/rocketmq_proxy/well_known_names.h\"\n\n#include \"test/extensions/filters/network/rocketmq_proxy/mocks.h\"\n#include \"test/extensions/filters/network/rocketmq_proxy/utility.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ContainsRegex;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\nnamespace Router {\n\nclass RocketmqRouterTestBase {\npublic:\n  RocketmqRouterTestBase()\n      : config_(rocketmq_proxy_config_, context_),\n        cluster_info_(std::make_shared<Upstream::MockClusterInfo>()) {\n    conn_manager_ =\n        std::make_unique<ConnectionManager>(config_, context_.dispatcher().timeSource());\n    conn_manager_->initializeReadFilterCallbacks(filter_callbacks_);\n  }\n\n  ~RocketmqRouterTestBase() { filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); }\n\n  void initializeRouter() {\n    router_ = std::make_unique<RouterImpl>(context_.clusterManager());\n    EXPECT_EQ(nullptr, router_->downstreamConnection());\n  }\n\n  void initSendMessageRequest(std::string topic_name = \"test_topic\", bool is_oneway = false) {\n    RemotingCommandPtr request = std::make_unique<RemotingCommand>();\n    request->code(static_cast<int>(RequestCode::SendMessageV2));\n    if (is_oneway) {\n      request->flag(2);\n    }\n    SendMessageRequestHeader* header = new SendMessageRequestHeader();\n    absl::string_view t = topic_name;\n    header->topic(t);\n    CommandCustomHeaderPtr custom_header(header);\n    request->customHeader(custom_header);\n    active_message_ =\n        std::make_unique<NiceMock<MockActiveMessage>>(*conn_manager_, std::move(request));\n\n    // Not yet implemented:\n    EXPECT_EQ(nullptr, router_->metadataMatchCriteria());\n  }\n\n  void initPopMessageRequest() {\n    Buffer::OwnedImpl buffer;\n    BufferUtility::fillRequestBuffer(buffer, RequestCode::PopMessage);\n\n    bool underflow = false;\n    bool has_error = false;\n\n    RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error);\n\n    active_message_ =\n        std::make_unique<NiceMock<MockActiveMessage>>(*conn_manager_, std::move(request));\n  }\n\n  void initAckMessageRequest() {\n    Buffer::OwnedImpl buffer;\n    BufferUtility::fillRequestBuffer(buffer, RequestCode::AckMessage);\n\n    bool underflow = false;\n    bool has_error = false;\n\n    RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error);\n\n    active_message_ =\n        std::make_unique<NiceMock<MockActiveMessage>>(*conn_manager_, std::move(request));\n  }\n\n  void initOneWayAckMessageRequest() {\n    RemotingCommandPtr request = std::make_unique<RemotingCommand>();\n    request->code(static_cast<int>(RequestCode::AckMessage));\n    request->flag(2);\n    std::unique_ptr<AckMessageRequestHeader> header = std::make_unique<AckMessageRequestHeader>();\n    header->consumerGroup(\"test_cg\");\n    header->topic(\"test_topic\");\n    header->queueId(0);\n    header->extraInfo(\"test_extra\");\n    header->offset(1);\n    CommandCustomHeaderPtr ptr(header.release());\n    request->customHeader(ptr);\n    active_message_ =\n        std::make_unique<NiceMock<MockActiveMessage>>(*conn_manager_, std::move(request));\n  }\n\n  void startRequest() { router_->sendRequestToUpstream(*active_message_); }\n\n  void connectUpstream() {\n    context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n  }\n\n  void startRequestWithExistingConnection() {\n    EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_))\n        .WillOnce(\n            Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* {\n              context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb);\n              context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n              return nullptr;\n            }));\n    router_->sendRequestToUpstream(*active_message_);\n  }\n\n  void receiveEmptyResponse() {\n    Buffer::OwnedImpl buffer;\n    router_->onAboveWriteBufferHighWatermark();\n    router_->onBelowWriteBufferLowWatermark();\n    router_->onUpstreamData(buffer, false);\n  }\n\n  void receiveSendMessageResponse(bool end_stream) {\n    Buffer::OwnedImpl buffer;\n    BufferUtility::fillResponseBuffer(buffer, RequestCode::SendMessageV2, ResponseCode::Success);\n    router_->onUpstreamData(buffer, end_stream);\n  }\n\n  void receivePopMessageResponse() {\n    Buffer::OwnedImpl buffer;\n    BufferUtility::fillResponseBuffer(buffer, RequestCode::PopMessage, ResponseCode::Success);\n    router_->onUpstreamData(buffer, false);\n  }\n\n  void receiveAckMessageResponse() {\n    Buffer::OwnedImpl buffer;\n    BufferUtility::fillResponseBuffer(buffer, RequestCode::AckMessage, ResponseCode::Success);\n    router_->onUpstreamData(buffer, false);\n  }\n\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  ConfigImpl::RocketmqProxyConfig rocketmq_proxy_config_;\n  ConfigImpl config_;\n  std::unique_ptr<ConnectionManager> conn_manager_;\n\n  std::unique_ptr<Router> router_;\n\n  std::unique_ptr<NiceMock<MockActiveMessage>> active_message_;\n  NiceMock<Network::MockClientConnection> upstream_connection_;\n\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_info_;\n  NiceMock<Upstream::MockThreadLocalCluster> thread_local_cluster_;\n};\n\nclass RocketmqRouterTest : public RocketmqRouterTestBase, public testing::Test {};\n\nTEST_F(RocketmqRouterTest, PoolRemoteConnectionFailure) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*remote connection failure*.\"));\n      }));\n\n  startRequest();\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n}\n\nTEST_F(RocketmqRouterTest, PoolTimeout) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*timeout*.\"));\n      }));\n  EXPECT_CALL(*active_message_, onReset());\n\n  startRequest();\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      Tcp::ConnectionPool::PoolFailureReason::Timeout);\n}\n\nTEST_F(RocketmqRouterTest, PoolLocalConnectionFailure) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*local connection failure*.\"));\n      }));\n  EXPECT_CALL(*active_message_, onReset());\n\n  startRequest();\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure);\n}\n\nTEST_F(RocketmqRouterTest, PoolOverflowFailure) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*overflow*.\"));\n      }));\n  EXPECT_CALL(*active_message_, onReset());\n\n  startRequest();\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      Tcp::ConnectionPool::PoolFailureReason::Overflow);\n}\n\nTEST_F(RocketmqRouterTest, ClusterMaintenanceMode) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*Cluster under maintenance*.\"));\n      }));\n  EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, maintenanceMode())\n      .WillOnce(Return(true));\n  EXPECT_CALL(*active_message_, onReset());\n\n  startRequest();\n}\n\nTEST_F(RocketmqRouterTest, NoHealthyHosts) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*No host available*.\"));\n      }));\n  EXPECT_CALL(context_.cluster_manager_, tcpConnPoolForCluster(\"fake_cluster\", _, _))\n      .WillOnce(Return(nullptr));\n  EXPECT_CALL(*active_message_, onReset());\n\n  startRequest();\n}\n\nTEST_F(RocketmqRouterTest, NoRouteForRequest) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*No route for current request*.\"));\n      }));\n  EXPECT_CALL(*active_message_, route()).WillRepeatedly(Return(nullptr));\n  EXPECT_CALL(*active_message_, onReset());\n\n  startRequest();\n}\n\nTEST_F(RocketmqRouterTest, NoCluster) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onReset());\n  EXPECT_CALL(context_.cluster_manager_, get(_)).WillRepeatedly(Return(nullptr));\n\n  startRequest();\n}\n\nTEST_F(RocketmqRouterTest, CallWithEmptyResponse) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0);\n  EXPECT_CALL(*active_message_, onReset()).Times(0);\n\n  receiveEmptyResponse();\n}\n\nTEST_F(RocketmqRouterTest, OneWayRequest) {\n  initializeRouter();\n  initSendMessageRequest(\"test_topic\", true);\n  startRequest();\n\n  EXPECT_CALL(*active_message_, onReset());\n\n  connectUpstream();\n\n  EXPECT_TRUE(active_message_->metadata()->isOneWay());\n}\n\nTEST_F(RocketmqRouterTest, ReceiveSendMessageResponse) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream());\n  EXPECT_CALL(*active_message_, onReset());\n\n  receiveSendMessageResponse(false);\n}\n\nTEST_F(RocketmqRouterTest, ReceivePopMessageResponse) {\n  initializeRouter();\n  initPopMessageRequest();\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream());\n  EXPECT_CALL(*active_message_, onReset());\n\n  receivePopMessageResponse();\n}\n\nTEST_F(RocketmqRouterTest, ReceiveAckMessageResponse) {\n  initializeRouter();\n  initAckMessageRequest();\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream());\n  EXPECT_CALL(*active_message_, onReset());\n\n  receiveAckMessageResponse();\n}\n\nTEST_F(RocketmqRouterTest, OneWayAckMessage) {\n  initializeRouter();\n  initOneWayAckMessageRequest();\n\n  startRequest();\n\n  EXPECT_CALL(*active_message_, onReset());\n\n  connectUpstream();\n}\n\nTEST_F(RocketmqRouterTest, ReceivedSendMessageResponseWithDecodeError) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*Failed to decode response*.\"));\n      }));\n\n  EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  startRequest();\n  connectUpstream();\n  std::string json = R\"EOF(\n  {\n    \"language\": \"JAVA\",\n    \"version\": 2,\n    \"opaque\": 1,\n    \"flag\": 1,\n    \"serializeTypeCurrentRPC\": \"JSON\"\n  }\n  )EOF\";\n  Buffer::OwnedImpl buffer;\n  buffer.writeBEInt<int32_t>(4 + 4 + json.size());\n  buffer.writeBEInt<int32_t>(json.size());\n  buffer.add(json);\n\n  EXPECT_CALL(*active_message_, onReset()).WillRepeatedly(Invoke([&]() -> void {\n    conn_manager_->deferredDelete(**conn_manager_->activeMessageList().begin());\n  }));\n  EXPECT_CALL(*active_message_, onReset());\n\n  LinkedList::moveIntoList(std::move(active_message_), conn_manager_->activeMessageList());\n  router_->onUpstreamData(buffer, false);\n}\n\nTEST_F(RocketmqRouterTest, ReceivedSendMessageResponseWithStreamEnd) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream());\n  EXPECT_CALL(*active_message_, onReset());\n\n  receiveSendMessageResponse(true);\n}\n\nTEST_F(RocketmqRouterTest, UpstreamRemoteCloseMidResponse) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*Connection to upstream is closed*.\"));\n      }));\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0);\n  EXPECT_CALL(*active_message_, onReset());\n\n  router_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(RocketmqRouterTest, UpstreamLocalCloseMidResponse) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_))\n      .Times(1)\n      .WillOnce(Invoke([&](absl::string_view error_message) -> void {\n        EXPECT_THAT(error_message, ContainsRegex(\".*Connection to upstream has been closed*.\"));\n      }));\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0);\n  EXPECT_CALL(*active_message_, onReset());\n\n  router_->onEvent(Network::ConnectionEvent::LocalClose);\n}\n\nTEST_F(RocketmqRouterTest, UpstreamConnected) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  startRequest();\n  connectUpstream();\n\n  EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0);\n  EXPECT_CALL(*active_message_, onReset()).Times(0);\n\n  router_->onEvent(Network::ConnectionEvent::Connected);\n}\n\nTEST_F(RocketmqRouterTest, StartRequestWithExistingConnection) {\n  initializeRouter();\n  initSendMessageRequest();\n\n  EXPECT_CALL(*active_message_, onError(_)).Times(0);\n  EXPECT_CALL(*active_message_, onReset()).Times(0);\n\n  startRequestWithExistingConnection();\n}\n\n} // namespace Router\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc",
    "content": "#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/rocketmq_proxy/topic_route.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nTEST(TopicRouteTest, Serialization) {\n  QueueData queue_data(\"broker-a\", 8, 8, 6);\n  ProtobufWkt::Struct doc;\n  queue_data.encode(doc);\n\n  const auto& members = doc.fields();\n\n  ASSERT_STREQ(\"broker-a\", members.at(\"brokerName\").string_value().c_str());\n  ASSERT_EQ(queue_data.brokerName(), members.at(\"brokerName\").string_value());\n  ASSERT_EQ(queue_data.readQueueNum(), members.at(\"readQueueNums\").number_value());\n  ASSERT_EQ(queue_data.writeQueueNum(), members.at(\"writeQueueNums\").number_value());\n  ASSERT_EQ(queue_data.perm(), members.at(\"perm\").number_value());\n}\n\nTEST(BrokerDataTest, Serialization) {\n  absl::node_hash_map<int64_t, std::string> broker_addrs;\n  std::string dummy_address(\"127.0.0.1:10911\");\n  for (int64_t i = 0; i < 3; i++) {\n    broker_addrs[i] = dummy_address;\n  }\n  std::string cluster(\"DefaultCluster\");\n  std::string broker_name(\"broker-a\");\n  BrokerData broker_data(cluster, broker_name, std::move(broker_addrs));\n\n  ProtobufWkt::Struct doc;\n  broker_data.encode(doc);\n\n  const auto& members = doc.fields();\n\n  ASSERT_STREQ(cluster.c_str(), members.at(\"cluster\").string_value().c_str());\n  ASSERT_STREQ(broker_name.c_str(), members.at(\"brokerName\").string_value().c_str());\n}\n\nTEST(TopicRouteDataTest, Serialization) {\n  TopicRouteData topic_route_data;\n\n  for (int i = 0; i < 16; i++) {\n    topic_route_data.queueData().push_back(QueueData(\"broker-a\", 8, 8, 6));\n  }\n\n  std::string cluster(\"DefaultCluster\");\n  std::string broker_name(\"broker-a\");\n  std::string dummy_address(\"127.0.0.1:10911\");\n\n  for (int i = 0; i < 16; i++) {\n    absl::node_hash_map<int64_t, std::string> broker_addrs;\n    for (int64_t i = 0; i < 3; i++) {\n      broker_addrs[i] = dummy_address;\n    }\n    topic_route_data.brokerData().emplace_back(\n        BrokerData(cluster, broker_name, std::move(broker_addrs)));\n  }\n  ProtobufWkt::Struct doc;\n  EXPECT_NO_THROW(topic_route_data.encode(doc));\n  MessageUtil::getJsonStringFromMessage(doc);\n}\n\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/utility.cc",
    "content": "#include \"test/extensions/filters/network/rocketmq_proxy/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nconst std::string BufferUtility::topic_name_ = \"test_topic\";\nconst std::string BufferUtility::client_id_ = \"test_client_id\";\nconst std::string BufferUtility::producer_group_ = \"test_pg\";\nconst std::string BufferUtility::consumer_group_ = \"test_cg\";\nconst std::string BufferUtility::extra_info_ = \"test_extra\";\nconst std::string BufferUtility::msg_body_ = \"_Apache_RocketMQ_\";\nconst int BufferUtility::queue_id_ = 1;\nint BufferUtility::opaque_ = 0;\n\nvoid BufferUtility::fillRequestBuffer(Buffer::OwnedImpl& buffer, RequestCode code) {\n\n  RemotingCommandPtr cmd = std::make_unique<RemotingCommand>();\n  cmd->code(static_cast<int>(code));\n  cmd->opaque(++opaque_);\n\n  switch (code) {\n  case RequestCode::SendMessage: {\n    std::unique_ptr<SendMessageRequestHeader> header = std::make_unique<SendMessageRequestHeader>();\n    header->topic(topic_name_);\n    header->version(SendMessageRequestVersion::V1);\n    std::string msg_body = msg_body_;\n    cmd->body().add(msg_body);\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n  } break;\n\n  case RequestCode::HeartBeat: {\n    std::string heartbeat_data = R\"EOF(\n    {\n      \"clientID\": \"127.0.0.1@90330\",\n      \"consumerDataSet\": [\n        {\n          \"consumeFromWhere\": \"CONSUME_FROM_FIRST_OFFSET\",\n          \"consumeType\": \"CONSUME_PASSIVELY\",\n          \"groupName\": \"test_cg\",\n          \"messageModel\": \"CLUSTERING\",\n          \"subscriptionDataSet\": [\n            {\n              \"classFilterMode\": false,\n              \"codeSet\": [],\n              \"expressionType\": \"TAG\",\n              \"subString\": \"*\",\n              \"subVersion\": 1575630587925,\n              \"tagsSet\": [],\n              \"topic\": \"test_topic\"\n            },\n            {\n              \"classFilterMode\": false,\n              \"codeSet\": [],\n              \"expressionType\": \"TAG\",\n              \"subString\": \"*\",\n              \"subVersion\": 1575630587945,\n              \"tagsSet\": [],\n              \"topic\": \"%RETRY%please_rename_unique_group_name_4\"\n            }\n          ],\n         \"unitMode\": false\n        }\n      ],\n      \"producerDataSet\": [\n        {\n          \"groupName\": \"CLIENT_INNER_PRODUCER\"\n        }\n      ]\n    }\n    )EOF\";\n    cmd->body().add(heartbeat_data);\n  } break;\n\n  case RequestCode::UnregisterClient: {\n    std::unique_ptr<UnregisterClientRequestHeader> header =\n        std::make_unique<UnregisterClientRequestHeader>();\n    header->clientId(client_id_);\n    header->consumerGroup(consumer_group_);\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n    break;\n  }\n\n  case RequestCode::GetRouteInfoByTopic: {\n    std::unique_ptr<GetRouteInfoRequestHeader> header =\n        std::make_unique<GetRouteInfoRequestHeader>();\n    header->topic(topic_name_);\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n    break;\n  }\n\n  case RequestCode::GetConsumerListByGroup: {\n    std::unique_ptr<GetConsumerListByGroupRequestHeader> header =\n        std::make_unique<GetConsumerListByGroupRequestHeader>();\n    header->consumerGroup(consumer_group_);\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n    break;\n  }\n\n  case RequestCode::SendMessageV2: {\n    std::unique_ptr<SendMessageRequestHeader> header = std::make_unique<SendMessageRequestHeader>();\n    header->topic(topic_name_);\n    header->version(SendMessageRequestVersion::V2);\n    header->producerGroup(producer_group_);\n    std::string msg_body = msg_body_;\n    cmd->body().add(msg_body);\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n    break;\n  }\n\n  case RequestCode::PopMessage: {\n    std::unique_ptr<PopMessageRequestHeader> header = std::make_unique<PopMessageRequestHeader>();\n    header->consumerGroup(consumer_group_);\n    header->topic(topic_name_);\n    header->queueId(queue_id_);\n    header->maxMsgNum(32);\n    header->invisibleTime(6000);\n    header->pollTime(3000);\n    header->bornTime(1000);\n    header->initMode(4);\n\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n    break;\n  }\n\n  case RequestCode::AckMessage: {\n    std::unique_ptr<AckMessageRequestHeader> header = std::make_unique<AckMessageRequestHeader>();\n    header->consumerGroup(consumer_group_);\n    header->topic(topic_name_);\n    header->queueId(queue_id_);\n    header->extraInfo(extra_info_);\n    header->offset(1);\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n    break;\n  }\n\n  default:\n    break;\n  }\n  Encoder encoder_;\n  buffer.drain(buffer.length());\n  encoder_.encode(cmd, buffer);\n}\n\nvoid BufferUtility::fillResponseBuffer(Buffer::OwnedImpl& buffer, RequestCode req_code,\n                                       ResponseCode resp_code) {\n  RemotingCommandPtr cmd = std::make_unique<RemotingCommand>();\n  cmd->code(static_cast<int>(resp_code));\n  cmd->opaque(opaque_);\n\n  switch (req_code) {\n  case RequestCode::SendMessageV2: {\n    std::unique_ptr<SendMessageResponseHeader> header =\n        std::make_unique<SendMessageResponseHeader>();\n    header->msgIdForTest(\"MSG_ID_01\");\n    header->queueId(1);\n    header->queueOffset(100);\n    header->transactionId(\"TX_01\");\n    break;\n  }\n  case RequestCode::PopMessage: {\n    std::unique_ptr<PopMessageResponseHeader> header = std::make_unique<PopMessageResponseHeader>();\n    header->popTime(1587386521445);\n    header->invisibleTime(50000);\n    header->reviveQid(5);\n    std::string msg_offset_info = \"0 6 147\";\n    header->msgOffsetInfo(msg_offset_info);\n    std::string start_offset_info = \"0 6 147\";\n    header->startOffsetInfo(start_offset_info);\n    CommandCustomHeaderPtr ptr(header.release());\n    cmd->customHeader(ptr);\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\xD5'}));\n    cmd->body().add(std::string({'\\xDA', '\\xA3', '\\x20', '\\xA7'}));\n    cmd->body().add(std::string({'\\x01', '\\xE5', '\\x9A', '\\x3E'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x06'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x00'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x00'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x93'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x00'}));\n    cmd->body().add(std::string({'\\x00', '\\x4A', '\\xE0', '\\x46'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x00'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x01', '\\x71'}));\n    cmd->body().add(std::string({'\\x97', '\\x98', '\\x71', '\\xB6'}));\n    cmd->body().add(std::string({'\\x0A', '\\x65', '\\xC4', '\\x91'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x1A', '\\xF4'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x01', '\\x71'}));\n    cmd->body().add(std::string({'\\x97', '\\x98', '\\x71', '\\xAF'}));\n    cmd->body().add(std::string({'\\x0A', '\\x65', '\\xC1', '\\x2D'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x1F', '\\x53'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x00'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x00'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x00'}));\n    cmd->body().add(std::string({'\\x00', '\\x00', '\\x00', '\\x11'}));\n    cmd->body().add(std::string(\"Hello RocketMQ 52\"));\n    cmd->body().add(std::string({'\\x04'}));\n    cmd->body().add(std::string(\"mesh\"));\n    cmd->body().add(std::string({'\\x00', '\\x65'}));\n    cmd->body().add(std::string(\"TRACE_ON\"));\n    cmd->body().add(std::string({'\\x01'}));\n    cmd->body().add(std::string(\"true\"));\n    cmd->body().add(std::string({'\\x02'}));\n    cmd->body().add(std::string(\"MSG_REGION\"));\n    cmd->body().add(std::string({'\\x01'}));\n    cmd->body().add(std::string(\"DefaultRegion\"));\n    cmd->body().add(std::string({'\\x02'}));\n    cmd->body().add(std::string(\"UNIQ_KEY\"));\n    cmd->body().add(std::string({'\\x01'}));\n    cmd->body().add(std::string(\"1EE10882893E18B4AAC2664649B60034\"));\n    cmd->body().add(std::string({'\\x02'}));\n    cmd->body().add(std::string(\"WAIT\"));\n    cmd->body().add(std::string({'\\x01'}));\n    cmd->body().add(std::string(\"true\"));\n    cmd->body().add(std::string({'\\x02'}));\n    cmd->body().add(std::string(\"TAGS\"));\n    cmd->body().add(std::string({'\\x01'}));\n    cmd->body().add(std::string(\"TagA\"));\n    cmd->body().add(std::string({'\\x02'}));\n    break;\n  }\n  default:\n    break;\n  }\n  Encoder encoder_;\n  buffer.drain(buffer.length());\n  encoder_.encode(cmd, buffer);\n}\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/rocketmq_proxy/utility.h",
    "content": "#pragma once\n\n#include \"extensions/filters/network/rocketmq_proxy/config.h\"\n#include \"extensions/filters/network/rocketmq_proxy/conn_manager.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace RocketmqProxy {\n\nclass BufferUtility {\npublic:\n  static void fillRequestBuffer(Buffer::OwnedImpl& buffer, RequestCode code);\n  static void fillResponseBuffer(Buffer::OwnedImpl& buffer, RequestCode req_code,\n                                 ResponseCode resp_code);\n\n  const static std::string topic_name_;\n  const static std::string client_id_;\n  const static std::string producer_group_;\n  const static std::string consumer_group_;\n  const static std::string msg_body_;\n  const static std::string extra_info_;\n  const static int queue_id_;\n  static int opaque_;\n};\n} // namespace RocketmqProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/filters/network/sni_cluster/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"sni_cluster_test\",\n    srcs = [\"sni_cluster_test.cc\"],\n    extension_name = \"envoy.filters.network.sni_cluster\",\n    deps = [\n        \"//source/extensions/filters/network/sni_cluster\",\n        \"//source/extensions/filters/network/sni_cluster:config\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/sni_cluster/sni_cluster_test.cc",
    "content": "#include \"common/tcp_proxy/tcp_proxy.h\"\n\n#include \"extensions/filters/network/sni_cluster/config.h\"\n#include \"extensions/filters/network/sni_cluster/sni_cluster.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniCluster {\n\n// Test that a SniCluster filter config works.\nTEST(SniCluster, ConfigTest) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  SniClusterNetworkFilterConfigFactory factory;\n\n  Network::FilterFactoryCb cb =\n      factory.createFilterFactoryFromProto(*factory.createEmptyConfigProto(), context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addReadFilter(_));\n  cb(connection);\n}\n\n// Test that per connection filter config is set if SNI is available\nTEST(SniCluster, SetTcpProxyClusterOnlyIfSniIsPresent) {\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks;\n\n  NiceMock<StreamInfo::MockStreamInfo> stream_info;\n  ON_CALL(filter_callbacks.connection_, streamInfo()).WillByDefault(ReturnRef(stream_info));\n  ON_CALL(Const(filter_callbacks.connection_), streamInfo()).WillByDefault(ReturnRef(stream_info));\n\n  SniClusterFilter filter;\n  filter.initializeReadFilterCallbacks(filter_callbacks);\n\n  // no sni\n  {\n    ON_CALL(filter_callbacks.connection_, requestedServerName())\n        .WillByDefault(Return(EMPTY_STRING));\n    filter.onNewConnection();\n\n    EXPECT_FALSE(stream_info.filterState()->hasData<TcpProxy::PerConnectionCluster>(\n        TcpProxy::PerConnectionCluster::key()));\n  }\n\n  // with sni\n  {\n    ON_CALL(filter_callbacks.connection_, requestedServerName())\n        .WillByDefault(Return(\"filter_state_cluster\"));\n    filter.onNewConnection();\n\n    EXPECT_TRUE(stream_info.filterState()->hasData<TcpProxy::PerConnectionCluster>(\n        TcpProxy::PerConnectionCluster::key()));\n\n    auto per_connection_cluster =\n        stream_info.filterState()->getDataReadOnly<TcpProxy::PerConnectionCluster>(\n            TcpProxy::PerConnectionCluster::key());\n    EXPECT_EQ(per_connection_cluster.value(), \"filter_state_cluster\");\n  }\n}\n\n} // namespace SniCluster\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"proxy_filter_test\",\n    srcs = [\"proxy_filter_test.cc\"],\n    extension_name = \"envoy.filters.network.sni_dynamic_forward_proxy\",\n    deps = [\n        \"//source/extensions/filters/network:well_known_names\",\n        \"//source/extensions/filters/network/sni_dynamic_forward_proxy:config\",\n        \"//test/extensions/common/dynamic_forward_proxy:mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/upstream:basic_resource_limit_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"proxy_filter_integration_test\",\n    srcs = [\"proxy_filter_integration_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    extension_name = \"envoy.filters.network.sni_dynamic_forward_proxy\",\n    deps = [\n        \"//source/extensions/clusters/dynamic_forward_proxy:cluster\",\n        \"//source/extensions/filters/listener/tls_inspector:config\",\n        \"//source/extensions/filters/network/sni_dynamic_forward_proxy:config\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/integration:http_integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/ssl_utility.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass SniDynamicProxyFilterIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public Event::TestUsingSimulatedTime,\n      public HttpIntegrationTest {\npublic:\n  // This test is using HTTP integration test to use the utilities to pass SNI from downstream\n  // to upstream. The config being tested is tcp_proxy.\n  SniDynamicProxyFilterIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(),\n                            ConfigHelper::tcpProxyConfig()) {}\n\n  void setup(uint64_t max_hosts = 1024, uint32_t max_pending_requests = 1024) {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP1);\n\n    config_helper_.addListenerFilter(ConfigHelper::tlsInspectorFilter());\n\n    config_helper_.addConfigModifier([this, max_hosts, max_pending_requests](\n                                         envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Switch predefined cluster_0 to CDS filesystem sourcing.\n      bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path());\n      bootstrap.mutable_static_resources()->clear_clusters();\n\n      const std::string filter =\n          fmt::format(R\"EOF(\nname: envoy.filters.http.dynamic_forward_proxy\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig\n  dns_cache_config:\n    name: foo\n    dns_lookup_family: {}\n    max_hosts: {}\n    dns_cache_circuit_breaker:\n      max_pending_requests: {}\n  port_value: {}\n)EOF\",\n                      Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts,\n                      max_pending_requests, fake_upstreams_[0]->localAddress()->ip()->port());\n      config_helper_.addNetworkFilter(filter);\n    });\n\n    // Setup the initial CDS cluster.\n    cluster_.mutable_connect_timeout()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    cluster_.set_name(\"cluster_0\");\n    cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED);\n\n    const std::string cluster_type_config = fmt::format(\n        R\"EOF(\nname: envoy.clusters.dynamic_forward_proxy\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig\n  dns_cache_config:\n    name: foo\n    dns_lookup_family: {}\n    max_hosts: {}\n    dns_cache_circuit_breaker:\n      max_pending_requests: {}\n)EOF\",\n        Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests);\n\n    TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type());\n\n    // Load the CDS cluster and wait for it to initialize.\n    cds_helper_.setCds({cluster_});\n    HttpIntegrationTest::initialize();\n    test_server_->waitForCounterEq(\"cluster_manager.cluster_added\", 1);\n    test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n  }\n\n  void createUpstreams() override {\n    addFakeUpstream(\n        Ssl::createFakeUpstreamSslContext(upstream_cert_name_, context_manager_, factory_context_),\n        FakeHttpConnection::Type::HTTP1);\n  }\n\n  Network::ClientConnectionPtr\n  makeSslClientConnection(const Ssl::ClientSslTransportOptions& options) {\n\n    Network::Address::InstanceConstSharedPtr address =\n        Ssl::getSslAddress(version_, lookupPort(\"http\"));\n    auto client_transport_socket_factory_ptr =\n        Ssl::createClientSslTransportSocketFactory(options, context_manager_, *api_);\n    return dispatcher_->createClientConnection(\n        address, Network::Address::InstanceConstSharedPtr(),\n        client_transport_socket_factory_ptr->createTransportSocket({}), nullptr);\n  }\n\n  std::string upstream_cert_name_{\"server\"};\n  CdsHelper cds_helper_;\n  envoy::config::cluster::v3::Cluster cluster_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SniDynamicProxyFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verify that upstream TLS works with auto verification for SAN as well as auto setting SNI.\nTEST_P(SniDynamicProxyFilterIntegrationTest, UpstreamTls) {\n  setup();\n  fake_upstreams_[0]->setReadDisableOnNewConnection(false);\n\n  codec_client_ = makeHttpConnection(\n      makeSslClientConnection(Ssl::ClientSslTransportOptions().setSni(\"localhost\")));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(\n      *dispatcher_, fake_upstream_connection_, TestUtility::DefaultTimeout, max_request_headers_kb_,\n      max_request_headers_count_));\n\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},\n      {\":path\", \"/test/long/url\"},\n      {\":scheme\", \"http\"},\n      {\":authority\",\n       fmt::format(\"localhost:{}\", fake_upstreams_[0]->localAddress()->ip()->port())}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  checkSimpleRequestSuccess(0, 0, response.get());\n}\n\nTEST_P(SniDynamicProxyFilterIntegrationTest, CircuitBreakerInvokedUpstreamTls) {\n  setup(1024, 0);\n\n  codec_client_ = makeRawHttpConnection(\n      makeSslClientConnection(Ssl::ClientSslTransportOptions().setSni(\"localhost\")), absl::nullopt);\n  ASSERT_FALSE(codec_client_->connected());\n  EXPECT_EQ(1, test_server_->counter(\"dns_cache.foo.dns_rq_pending_overflow\")->value());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc",
    "content": "#include \"envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h\"\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/extensions/common/dynamic_forward_proxy/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/upstream/basic_resource_limit.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/transport_socket_match.h\"\n\nusing testing::AtLeast;\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace SniDynamicForwardProxy {\nnamespace {\n\nusing LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus;\nusing MockLoadDnsCacheEntryResult =\n    Common::DynamicForwardProxy::MockDnsCache::MockLoadDnsCacheEntryResult;\n\nclass SniDynamicProxyFilterTest\n    : public testing::Test,\n      public Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory {\npublic:\n  SniDynamicProxyFilterTest() {\n    FilterConfig proto_config;\n    proto_config.set_port_value(443);\n    EXPECT_CALL(*dns_cache_manager_, getCache(_));\n    filter_config_ = std::make_shared<ProxyFilterConfig>(proto_config, *this, cm_);\n    filter_ = std::make_unique<ProxyFilter>(filter_config_);\n    filter_->initializeReadFilterCallbacks(callbacks_);\n\n    // Allow for an otherwise strict mock.\n    ON_CALL(callbacks_, connection()).WillByDefault(ReturnRef(connection_));\n    EXPECT_CALL(callbacks_, connection()).Times(AtLeast(0));\n  }\n\n  ~SniDynamicProxyFilterTest() override {\n    EXPECT_TRUE(\n        cm_.thread_local_cluster_.cluster_.info_->resource_manager_->pendingRequests().canCreate());\n  }\n\n  Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr get() override {\n    return dns_cache_manager_;\n  }\n\n  std::shared_ptr<Extensions::Common::DynamicForwardProxy::MockDnsCacheManager> dns_cache_manager_{\n      new Extensions::Common::DynamicForwardProxy::MockDnsCacheManager()};\n  Upstream::MockClusterManager cm_;\n  ProxyFilterConfigSharedPtr filter_config_;\n  std::unique_ptr<ProxyFilter> filter_;\n  Network::MockReadFilterCallbacks callbacks_;\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<Upstream::MockBasicResourceLimit> pending_requests_;\n};\n\n// No SNI handling.\nTEST_F(SniDynamicProxyFilterTest, NoSNI) {\n  EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return(\"\"));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n}\n\nTEST_F(SniDynamicProxyFilterTest, LoadDnsCache) {\n  EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return(\"foo\"));\n  Upstream::ResourceAutoIncDec* circuit_breakers_{\n      new Upstream::ResourceAutoIncDec(pending_requests_)};\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle =\n      new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle();\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 443, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle}));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n\n  EXPECT_CALL(callbacks_, continueReading());\n  filter_->onLoadDnsCacheComplete();\n\n  EXPECT_CALL(*handle, onDestroy());\n}\n\nTEST_F(SniDynamicProxyFilterTest, LoadDnsInCache) {\n  EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return(\"foo\"));\n  Upstream::ResourceAutoIncDec* circuit_breakers_{\n      new Upstream::ResourceAutoIncDec(pending_requests_)};\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 443, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::InCache, nullptr}));\n\n  EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection());\n}\n\n// Cache overflow.\nTEST_F(SniDynamicProxyFilterTest, CacheOverflow) {\n  EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return(\"foo\"));\n  Upstream::ResourceAutoIncDec* circuit_breakers_{\n      new Upstream::ResourceAutoIncDec(pending_requests_)};\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_))\n      .WillOnce(Return(circuit_breakers_));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq(\"foo\"), 443, _))\n      .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Overflow, nullptr}));\n  EXPECT_CALL(connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n}\n\nTEST_F(SniDynamicProxyFilterTest, CircuitBreakerInvoked) {\n  EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return(\"foo\"));\n  EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)).WillOnce(Return(nullptr));\n  EXPECT_CALL(connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection());\n}\n\n} // namespace\n\n} // namespace SniDynamicForwardProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/tcp_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.tcp_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/tcp_proxy/config_test.cc",
    "content": "#include <string>\n\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/tcp_proxy/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace TcpProxy {\n\nclass RouteIpListConfigTest : public testing::TestWithParam<std::string> {};\n\nINSTANTIATE_TEST_SUITE_P(IpList, RouteIpListConfigTest,\n                         ::testing::Values(R\"EOF(\"destination_ip_list\": [\n                                                  {\n                                                    \"address_prefix\": \"192.168.1.1\",\n                                                    \"prefix_len\": 32\n                                                  },\n                                                  {\n                                                    \"address_prefix\": \"192.168.1.0\",\n                                                    \"prefix_len\": 24\n                                                  }\n                                                ],\n                                                \"source_ip_list\": [\n                                                  {\n                                                    \"address_prefix\": \"192.168.0.0\",\n                                                    \"prefix_len\": 16\n                                                  },\n                                                  {\n                                                    \"address_prefix\": \"192.0.0.0\",\n                                                    \"prefix_len\": 8\n                                                  },\n                                                  {\n                                                    \"address_prefix\": \"127.0.0.0\",\n                                                    \"prefix_len\": 8\n                                                  }\n                                                ],)EOF\",\n                                           R\"EOF(\"destination_ip_list\": [\n                                                  {\n                                                    \"address_prefix\": \"2001:abcd::\",\n                                                    \"prefix_len\": 64\n                                                  },\n                                                  {\n                                                    \"address_prefix\": \"2002:ffff::\",\n                                                    \"prefix_len\": 32\n                                                  }\n                                                ],\n                                                \"source_ip_list\": [\n                                                  {\n                                                    \"address_prefix\": \"ffee::\",\n                                                    \"prefix_len\": 128\n                                                  },\n                                                  {\n                                                    \"address_prefix\": \"2001::abcd\",\n                                                    \"prefix_len\": 64\n                                                  },\n                                                  {\n                                                    \"address_prefix\": \"1234::5678\",\n                                                    \"prefix_len\": 128\n                                                  }\n                                                ],)EOF\"));\n\nTEST_P(RouteIpListConfigTest, DEPRECATED_FEATURE_TEST(TcpProxy)) {\n  const std::string json_string = R\"EOF(\n  {\n    \"stat_prefix\": \"my_stat_prefix\",\n    \"cluster\": \"foobar\",\n    \"deprecated_v1\": {\n      \"routes\": [\n        {)EOF\" + GetParam() +\n                                  R\"EOF(\"destination_ports\": \"1-1024,2048-4096,12345\",\n          \"cluster\": \"fake_cluster\"\n        },\n        {\n          \"source_ports\": \"23457,23459\",\n          \"cluster\": \"fake_cluster2\"\n        }\n      ]\n    }\n  }\n  )EOF\";\n\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy proto_config;\n  TestUtility::loadFromJson(json_string, proto_config, true, false);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  ConfigFactory factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  NiceMock<Network::MockReadFilterCallbacks> readFilterCallback;\n  EXPECT_CALL(connection, addReadFilter(_))\n      .WillRepeatedly(Invoke([&readFilterCallback](Network::ReadFilterSharedPtr filter) {\n        filter->initializeReadFilterCallbacks(readFilterCallback);\n      }));\n  cb(connection);\n}\n\nTEST(ConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(ConfigFactory().createFilterFactoryFromProto(\n                   envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy(), context),\n               ProtoValidationException);\n}\n\n// Test that a minimal TcpProxy v2 config works.\nTEST(ConfigTest, ConfigTest) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  ConfigFactory factory;\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config =\n      *dynamic_cast<envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy*>(\n          factory.createEmptyConfigProto().get());\n  config.set_stat_prefix(\"prefix\");\n  config.set_cluster(\"cluster\");\n\n  EXPECT_TRUE(factory.isTerminalFilter());\n\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context);\n  Network::MockConnection connection;\n  NiceMock<Network::MockReadFilterCallbacks> readFilterCallback;\n  EXPECT_CALL(connection, addReadFilter(_))\n      .WillRepeatedly(Invoke([&readFilterCallback](Network::ReadFilterSharedPtr filter) {\n        filter->initializeReadFilterCallbacks(readFilterCallback);\n      }));\n  cb(connection);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(ConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.tcp_proxy\";\n\n  ASSERT_NE(\n      nullptr,\n      Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(\n          deprecated_name));\n}\n\n} // namespace TcpProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_mock\",\n    \"envoy_extension_cc_test\",\n    \"envoy_extension_cc_test_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_mock(\n    name = \"mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/thrift_proxy:conn_manager_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:protocol_interface\",\n        \"//source/extensions/filters/network/thrift_proxy:transport_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:factory_base_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:filter_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_ratelimit_interface\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:printers_lib\",\n    ],\n)\n\nenvoy_extension_cc_test_library(\n    name = \"integration_lib\",\n    srcs = [\"integration.cc\"],\n    hdrs = [\"integration.h\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/thrift_proxy:config\",\n        \"//source/extensions/filters/network/thrift_proxy:conn_manager_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/router:config\",\n        \"//test/integration:integration_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:byte_order_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:thrift_lib\",\n        \"//test/common/buffer:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"app_exception_impl_test\",\n    srcs = [\"app_exception_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \"//source/extensions/filters/network/thrift_proxy:app_exception_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"buffer_helper_test\",\n    srcs = [\"buffer_helper_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:buffer_helper_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"conn_state_test\",\n    srcs = [\"conn_state_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/thrift_proxy:conn_state_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"auto_transport_impl_test\",\n    srcs = [\"auto_transport_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:auto_transport_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"framed_transport_impl_test\",\n    srcs = [\"framed_transport_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:framed_transport_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"header_transport_impl_test\",\n    srcs = [\"header_transport_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:header_transport_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"unframed_transport_impl_test\",\n    srcs = [\"unframed_transport_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:unframed_transport_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"auto_protocol_impl_test\",\n    srcs = [\"auto_protocol_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:auto_protocol_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"binary_protocol_impl_test\",\n    srcs = [\"binary_protocol_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:binary_protocol_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"compact_protocol_impl_test\",\n    srcs = [\"compact_protocol_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:compact_protocol_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"twitter_protocol_impl_test\",\n    srcs = [\"twitter_protocol_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:framed_transport_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:twitter_protocol_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \"//source/extensions/filters/network/thrift_proxy:config\",\n        \"//source/extensions/filters/network/thrift_proxy/router:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"conn_manager_test\",\n    srcs = [\"conn_manager_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:config\",\n        \"//source/extensions/filters/network/thrift_proxy:conn_manager_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters:filter_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/router:config\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_interface\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"decoder_test\",\n    srcs = [\"decoder_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:app_exception_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:decoder_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"metadata_test\",\n    srcs = [\"metadata_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/thrift_proxy:metadata_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"router_test\",\n    srcs = [\"router_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:app_exception_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/router:config\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/config/filter/thrift/router/v2alpha1:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"router_ratelimit_test\",\n    srcs = [\"router_ratelimit_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//include/envoy/common:base_includes\",\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:config\",\n        \"//source/extensions/filters/network/thrift_proxy/router:config\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_ratelimit_lib\",\n        \"//test/mocks/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"route_matcher_test\",\n    srcs = [\"route_matcher_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/router:config\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_interface\",\n        \"//source/extensions/filters/network/thrift_proxy/router:router_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/filter/thrift/router/v2alpha1:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"thrift_object_impl_test\",\n    srcs = [\"thrift_object_impl_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \":mocks\",\n        \":utility_lib\",\n        \"//source/extensions/filters/network/thrift_proxy:thrift_object_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:registry_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"integration_test\",\n    srcs = [\"integration_test.cc\"],\n    data = [\n        \"//test/extensions/filters/network/thrift_proxy/driver:generate_fixture\",\n    ],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    tags = [\"fails_on_windows\"],\n    deps = [\n        \":integration_lib\",\n        \":utility_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"translation_integration_test\",\n    srcs = [\"translation_integration_test.cc\"],\n    data = [\n        \"//test/extensions/filters/network/thrift_proxy/driver:generate_fixture\",\n    ],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    tags = [\"fails_on_windows\"],\n    deps = [\n        \":integration_lib\",\n        \":utility_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/app_exception_impl_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::InSequence;\nusing testing::Ref;\nusing testing::StrictMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nTEST(AppExceptionImplTest, CopyConstructor) {\n  AppException app_ex(AppExceptionType::InternalError, \"msg\");\n  AppException copy(app_ex);\n\n  EXPECT_EQ(app_ex.type_, copy.type_);\n  EXPECT_STREQ(\"msg\", copy.what());\n}\n\nTEST(AppExceptionImplTest, TestEncode) {\n  AppException app_ex(AppExceptionType::InternalError, \"msg\");\n\n  MessageMetadata metadata;\n  metadata.setMethodName(\"method\");\n  metadata.setSequenceId(99);\n  metadata.setMessageType(MessageType::Call);\n\n  StrictMock<MockProtocol> proto;\n  Buffer::OwnedImpl buffer;\n\n  InSequence dummy;\n  EXPECT_CALL(proto, writeMessageBegin(Ref(buffer), Ref(metadata)))\n      .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void {\n        EXPECT_EQ(\"method\", metadata.methodName());\n        EXPECT_EQ(99, metadata.sequenceId());\n        EXPECT_EQ(MessageType::Exception, metadata.messageType());\n      }));\n  EXPECT_CALL(proto, writeStructBegin(Ref(buffer), \"TApplicationException\"));\n  EXPECT_CALL(proto, writeFieldBegin(Ref(buffer), \"message\", FieldType::String, 1));\n  EXPECT_CALL(proto, writeString(Ref(buffer), \"msg\"));\n  EXPECT_CALL(proto, writeFieldEnd(Ref(buffer)));\n  EXPECT_CALL(proto, writeFieldBegin(Ref(buffer), \"type\", FieldType::I32, 2));\n  EXPECT_CALL(proto, writeInt32(Ref(buffer), static_cast<int>(AppExceptionType::InternalError)));\n  EXPECT_CALL(proto, writeFieldEnd(Ref(buffer)));\n  EXPECT_CALL(proto, writeFieldBegin(Ref(buffer), \"\", FieldType::Stop, 0));\n  EXPECT_CALL(proto, writeStructEnd(Ref(buffer)));\n  EXPECT_CALL(proto, writeMessageEnd(Ref(buffer)));\n\n  EXPECT_EQ(DirectResponse::ResponseType::Exception, app_ex.encode(metadata, proto, buffer));\n}\n\nTEST(AppExceptionImplTest, TestEncodeEmptyMetadata) {\n  AppException app_ex(AppExceptionType::InternalError, \"msg\");\n\n  MessageMetadata metadata;\n  StrictMock<MockProtocol> proto;\n  Buffer::OwnedImpl buffer;\n\n  InSequence dummy;\n  EXPECT_CALL(proto, writeMessageBegin(Ref(buffer), Ref(metadata)))\n      .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void {\n        EXPECT_EQ(\"\", metadata.methodName());\n        EXPECT_EQ(0, metadata.sequenceId());\n        EXPECT_EQ(MessageType::Exception, metadata.messageType());\n      }));\n  EXPECT_CALL(proto, writeStructBegin(Ref(buffer), \"TApplicationException\"));\n  EXPECT_CALL(proto, writeFieldBegin(Ref(buffer), \"message\", FieldType::String, 1));\n  EXPECT_CALL(proto, writeString(Ref(buffer), \"msg\"));\n  EXPECT_CALL(proto, writeFieldEnd(Ref(buffer)));\n  EXPECT_CALL(proto, writeFieldBegin(Ref(buffer), \"type\", FieldType::I32, 2));\n  EXPECT_CALL(proto, writeInt32(Ref(buffer), static_cast<int>(AppExceptionType::InternalError)));\n  EXPECT_CALL(proto, writeFieldEnd(Ref(buffer)));\n  EXPECT_CALL(proto, writeFieldBegin(Ref(buffer), \"\", FieldType::Stop, 0));\n  EXPECT_CALL(proto, writeStructEnd(Ref(buffer)));\n  EXPECT_CALL(proto, writeMessageEnd(Ref(buffer)));\n\n  EXPECT_EQ(DirectResponse::ResponseType::Exception, app_ex.encode(metadata, proto, buffer));\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/auto_protocol_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/auto_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/compact_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/twitter_protocol_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass AutoProtocolTest : public testing::Test {\npublic:\n  void resetMetadata() {\n    metadata_.setMethodName(\"-\");\n    metadata_.setMessageType(MessageType::Oneway);\n    metadata_.setSequenceId(-1);\n  }\n\n  void expectMetadata(const std::string& name, MessageType msg_type, int32_t seq_id) {\n    EXPECT_TRUE(metadata_.hasMethodName());\n    EXPECT_EQ(name, metadata_.methodName());\n\n    EXPECT_TRUE(metadata_.hasMessageType());\n    EXPECT_EQ(msg_type, metadata_.messageType());\n\n    EXPECT_TRUE(metadata_.hasSequenceId());\n    EXPECT_EQ(seq_id, metadata_.sequenceId());\n\n    EXPECT_FALSE(metadata_.hasFrameSize());\n    EXPECT_FALSE(metadata_.hasProtocol());\n    EXPECT_FALSE(metadata_.hasAppException());\n    EXPECT_EQ(metadata_.headers().size(), 0);\n  }\n\n  void expectDefaultMetadata() { expectMetadata(\"-\", MessageType::Oneway, -1); }\n\n  MessageMetadata metadata_;\n};\n\nTEST(ProtocolNames, FromType) {\n  for (int i = 0; i <= static_cast<int>(ProtocolType::LastProtocolType); i++) {\n    auto type = static_cast<ProtocolType>(i);\n    EXPECT_NE(\"\", ProtocolNames::get().fromType(type));\n  }\n}\n\nTEST_F(AutoProtocolTest, NotEnoughData) {\n  // Too short for any auto detection\n  {\n    Buffer::OwnedImpl buffer;\n    AutoProtocolImpl proto;\n    resetMetadata();\n\n    buffer.writeByte(0);\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n  }\n\n  // Binary protocol, but too short to distinguish Twitter protocol\n  {\n    AutoProtocolImpl proto;\n    resetMetadata();\n\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(0x8001);\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n  }\n}\n\nTEST_F(AutoProtocolTest, UnknownProtocol) {\n  Buffer::OwnedImpl buffer;\n  AutoProtocolImpl proto;\n  resetMetadata();\n\n  buffer.writeBEInt<int16_t>(0x0102);\n\n  EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                            \"unknown thrift auto protocol message start 0102\");\n  expectDefaultMetadata();\n}\n\nTEST_F(AutoProtocolTest, ReadMessageBegin) {\n  // Binary Protocol\n  {\n    AutoProtocolImpl proto;\n    resetMetadata();\n\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeByte(0);\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(8);\n    buffer.add(\"the_name\");\n    buffer.writeBEInt<int32_t>(1);\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"the_name\", MessageType::Call, 1);\n    EXPECT_EQ(buffer.length(), 0);\n    EXPECT_EQ(proto.name(), \"binary(auto)\");\n    EXPECT_EQ(proto.type(), ProtocolType::Binary);\n  }\n\n  // Compact protocol\n  {\n    AutoProtocolImpl proto;\n    resetMetadata();\n\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(0x8221);\n    buffer.writeBEInt<int16_t>(0x8202); // 0x0102\n    buffer.writeByte(8);\n    buffer.add(\"the_name\");\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"the_name\", MessageType::Call, 0x0102);\n    EXPECT_EQ(buffer.length(), 0);\n    EXPECT_EQ(proto.name(), \"compact(auto)\");\n    EXPECT_EQ(proto.type(), ProtocolType::Compact);\n  }\n\n  // Twitter protocol\n  {\n    AutoProtocolImpl proto;\n    resetMetadata();\n\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeByte(0);\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length());\n    buffer.add(TwitterProtocolImpl::upgradeMethodName());\n    buffer.writeBEInt<int32_t>(1);\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(TwitterProtocolImpl::upgradeMethodName(), MessageType::Call, 1);\n    EXPECT_EQ(buffer.length(), 0);\n    EXPECT_EQ(proto.name(), \"twitter(auto)\");\n    EXPECT_EQ(proto.type(), ProtocolType::Twitter);\n  }\n}\n\nTEST_F(AutoProtocolTest, ReadDelegation) {\n  auto* proto = new NiceMock<MockProtocol>();\n  AutoProtocolImpl auto_proto;\n  auto_proto.setProtocol(ProtocolPtr{proto});\n\n  // readMessageBegin\n  Buffer::OwnedImpl buffer;\n  resetMetadata();\n\n  EXPECT_CALL(*proto, readMessageBegin(Ref(buffer), Ref(metadata_))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readMessageBegin(buffer, metadata_));\n\n  // readMessageEnd\n  EXPECT_CALL(*proto, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readMessageEnd(buffer));\n\n  // readStructBegin\n  std::string name;\n  EXPECT_CALL(*proto, readStructBegin(Ref(buffer), Ref(name))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readStructBegin(buffer, name));\n\n  // readStructEnd\n  EXPECT_CALL(*proto, readStructEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readStructEnd(buffer));\n\n  // readFieldBegin\n  FieldType field_type = FieldType::Stop;\n  int16_t field_id = 1;\n\n  EXPECT_CALL(*proto, readFieldBegin(Ref(buffer), Ref(name), Ref(field_type), Ref(field_id)))\n      .WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readFieldBegin(buffer, name, field_type, field_id));\n\n  // readFieldEnd\n  EXPECT_CALL(*proto, readFieldEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readFieldEnd(buffer));\n\n  // readMapBegin\n  FieldType value_type = FieldType::Stop;\n  uint32_t size = 1;\n\n  EXPECT_CALL(*proto, readMapBegin(Ref(buffer), Ref(field_type), Ref(value_type), Ref(size)))\n      .WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readMapBegin(buffer, field_type, value_type, size));\n\n  // readMapEnd\n  EXPECT_CALL(*proto, readMapEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readMapEnd(buffer));\n\n  // readListBegin\n  EXPECT_CALL(*proto, readListBegin(Ref(buffer), Ref(field_type), Ref(size)))\n      .WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readListBegin(buffer, field_type, size));\n\n  // readListEnd\n  EXPECT_CALL(*proto, readListEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readListEnd(buffer));\n\n  // readSetBegin\n  EXPECT_CALL(*proto, readSetBegin(Ref(buffer), Ref(field_type), Ref(size))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readSetBegin(buffer, field_type, size));\n\n  // readSetEnd\n  EXPECT_CALL(*proto, readSetEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.readSetEnd(buffer));\n\n  // readBool\n  {\n    bool value;\n    EXPECT_CALL(*proto, readBool(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readBool(buffer, value));\n  }\n\n  // readByte\n  {\n    uint8_t value;\n    EXPECT_CALL(*proto, readByte(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readByte(buffer, value));\n  }\n\n  // readInt16\n  {\n    int16_t value;\n    EXPECT_CALL(*proto, readInt16(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readInt16(buffer, value));\n  }\n\n  // readInt32\n  {\n    int32_t value;\n    EXPECT_CALL(*proto, readInt32(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readInt32(buffer, value));\n  }\n\n  // readInt64\n  {\n    int64_t value;\n    EXPECT_CALL(*proto, readInt64(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readInt64(buffer, value));\n  }\n\n  // readDouble\n  {\n    double value;\n    EXPECT_CALL(*proto, readDouble(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readDouble(buffer, value));\n  }\n\n  // readString\n  {\n    std::string value = \"x\";\n    EXPECT_CALL(*proto, readString(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readString(buffer, value));\n  }\n\n  // readBinary\n  {\n    std::string value = \"x\";\n    EXPECT_CALL(*proto, readBinary(Ref(buffer), Ref(value))).WillOnce(Return(true));\n    EXPECT_TRUE(auto_proto.readBinary(buffer, value));\n  }\n}\n\nTEST_F(AutoProtocolTest, WriteDelegation) {\n  auto* proto = new NiceMock<MockProtocol>();\n  AutoProtocolImpl auto_proto;\n  auto_proto.setProtocol(ProtocolPtr{proto});\n\n  // writeMessageBegin\n  Buffer::OwnedImpl buffer;\n  EXPECT_CALL(*proto, writeMessageBegin(Ref(buffer), Ref(metadata_)));\n  auto_proto.writeMessageBegin(buffer, metadata_);\n\n  // writeMessageEnd\n  EXPECT_CALL(*proto, writeMessageEnd(Ref(buffer)));\n  auto_proto.writeMessageEnd(buffer);\n\n  // writeStructBegin\n  EXPECT_CALL(*proto, writeStructBegin(Ref(buffer), \"name\"));\n  auto_proto.writeStructBegin(buffer, \"name\");\n\n  // writeStructEnd\n  EXPECT_CALL(*proto, writeStructEnd(Ref(buffer)));\n  auto_proto.writeStructEnd(buffer);\n\n  // writeFieldBegin\n  EXPECT_CALL(*proto, writeFieldBegin(Ref(buffer), \"name\", FieldType::Stop, 100));\n  auto_proto.writeFieldBegin(buffer, \"name\", FieldType::Stop, 100);\n\n  // writeFieldEnd\n  EXPECT_CALL(*proto, writeFieldEnd(Ref(buffer)));\n  auto_proto.writeFieldEnd(buffer);\n\n  // writeMapBegin\n  EXPECT_CALL(*proto, writeMapBegin(Ref(buffer), FieldType::I32, FieldType::String, 100));\n  auto_proto.writeMapBegin(buffer, FieldType::I32, FieldType::String, 100);\n\n  // writeMapEnd\n  EXPECT_CALL(*proto, writeMapEnd(Ref(buffer)));\n  auto_proto.writeMapEnd(buffer);\n\n  // writeListBegin\n  EXPECT_CALL(*proto, writeListBegin(Ref(buffer), FieldType::String, 100));\n  auto_proto.writeListBegin(buffer, FieldType::String, 100);\n\n  // writeListEnd\n  EXPECT_CALL(*proto, writeListEnd(Ref(buffer)));\n  auto_proto.writeListEnd(buffer);\n\n  // writeSetBegin\n  EXPECT_CALL(*proto, writeSetBegin(Ref(buffer), FieldType::String, 100));\n  auto_proto.writeSetBegin(buffer, FieldType::String, 100);\n\n  // writeSetEnd\n  EXPECT_CALL(*proto, writeSetEnd(Ref(buffer)));\n  auto_proto.writeSetEnd(buffer);\n\n  // writeBool\n  EXPECT_CALL(*proto, writeBool(Ref(buffer), true));\n  auto_proto.writeBool(buffer, true);\n\n  // writeByte\n  EXPECT_CALL(*proto, writeByte(Ref(buffer), 100));\n  auto_proto.writeByte(buffer, 100);\n\n  // writeInt16\n  EXPECT_CALL(*proto, writeInt16(Ref(buffer), 100));\n  auto_proto.writeInt16(buffer, 100);\n\n  // writeInt32\n  EXPECT_CALL(*proto, writeInt32(Ref(buffer), 100));\n  auto_proto.writeInt32(buffer, 100);\n\n  // writeInt64\n  EXPECT_CALL(*proto, writeInt64(Ref(buffer), 100));\n  auto_proto.writeInt64(buffer, 100);\n\n  // writeDouble\n  EXPECT_CALL(*proto, writeDouble(Ref(buffer), 10.0));\n  auto_proto.writeDouble(buffer, 10.0);\n\n  // writeString\n  EXPECT_CALL(*proto, writeString(Ref(buffer), \"string\"));\n  auto_proto.writeString(buffer, \"string\");\n\n  // writeBinary\n  EXPECT_CALL(*proto, writeBinary(Ref(buffer), \"binary\"));\n  auto_proto.writeBinary(buffer, \"binary\");\n}\n\n// Test that protocol-upgrade methods are delegated to the detected protocol.\nTEST_F(AutoProtocolTest, ProtocolUpgradeDelegation) {\n  auto* proto = new NiceMock<MockProtocol>();\n  AutoProtocolImpl auto_proto;\n  auto_proto.setProtocol(ProtocolPtr{proto});\n\n  // supportsUpgrade\n  EXPECT_CALL(*proto, supportsUpgrade()).WillOnce(Return(true));\n  EXPECT_TRUE(auto_proto.supportsUpgrade());\n\n  // upgradeRequestDecoder\n  {\n    DecoderEventHandlerSharedPtr handler{new MockDecoderEventHandler()};\n    EXPECT_CALL(*proto, upgradeRequestDecoder()).WillOnce(Return(handler));\n    EXPECT_EQ(auto_proto.upgradeRequestDecoder().get(), handler.get());\n  }\n\n  // upgradeResponse\n  {\n    NiceMock<MockDecoderEventHandler> handler;\n    DirectResponse* response = new NiceMock<MockDirectResponse>();\n    EXPECT_CALL(*proto, upgradeResponse(Ref(handler)))\n        .WillOnce(Invoke([&](const DecoderEventHandler&) -> DirectResponsePtr {\n          return DirectResponsePtr{response};\n        }));\n    EXPECT_EQ(response, auto_proto.upgradeResponse(handler).get());\n  }\n\n  // attemptUpgrade\n  {\n    ThriftConnectionState state;\n    NiceMock<MockTransport> transport;\n    Buffer::OwnedImpl buffer;\n    ThriftObject* obj = new NiceMock<MockThriftObject>();\n    EXPECT_CALL(*proto, attemptUpgrade(Ref(transport), Ref(state), Ref(buffer)))\n        .WillOnce(\n            Invoke([&](Transport&, ThriftConnectionState&, Buffer::Instance&) -> ThriftObjectPtr {\n              return ThriftObjectPtr{obj};\n            }));\n    EXPECT_EQ(obj, auto_proto.attemptUpgrade(transport, state, buffer).get());\n  }\n\n  // completeUpgrade\n  {\n    ThriftConnectionState state;\n    NiceMock<MockThriftObject> obj;\n    EXPECT_CALL(*proto, completeUpgrade(Ref(state), Ref(obj)));\n    auto_proto.completeUpgrade(state, obj);\n  }\n}\n\nTEST_F(AutoProtocolTest, Name) {\n  AutoProtocolImpl proto;\n  EXPECT_EQ(proto.name(), \"auto\");\n}\n\nTEST_F(AutoProtocolTest, Type) {\n  AutoProtocolImpl proto;\n  EXPECT_EQ(proto.type(), ProtocolType::Auto);\n}\n\nTEST_F(AutoProtocolTest, SetUnexpectedType) {\n  Buffer::OwnedImpl buffer;\n  AutoProtocolImpl proto;\n  resetMetadata();\n\n  buffer.writeBEInt<int16_t>(0x0102);\n\n  proto.setType(ProtocolType::Auto);\n  EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                            \"unknown thrift auto protocol message start 0102\");\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/auto_transport_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/auto_transport_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Ref;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nTEST(TransportNames, FromType) {\n  for (int i = 0; i <= static_cast<int>(TransportType::LastTransportType); i++) {\n    TransportType type = static_cast<TransportType>(i);\n    EXPECT_NE(\"\", TransportNames::get().fromType(type));\n  }\n}\n\nTEST(AutoTransportTest, NotEnoughData) {\n  Buffer::OwnedImpl buffer;\n  AutoTransportImpl transport;\n  MessageMetadata metadata;\n\n  EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, IsEmptyMetadata());\n\n  addRepeated(buffer, 7, 0);\n\n  EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, IsEmptyMetadata());\n}\n\nTEST(AutoTransportTest, UnknownTransport) {\n  AutoTransportImpl transport;\n\n  // Looks like unframed, but fails protocol check.\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0);\n    buffer.writeBEInt<int32_t>(0);\n\n    MessageMetadata metadata;\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"unknown thrift auto transport frame start 00 00 00 00 00 00 00 00\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  // Looks like framed, but fails protocol check.\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0xFF);\n    buffer.writeBEInt<int32_t>(0);\n\n    MessageMetadata metadata;\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"unknown thrift auto transport frame start 00 00 00 ff 00 00 00 00\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n}\n\nTEST(AutoTransportTest, DecodeFrameStart) {\n  // Framed transport + binary protocol\n  {\n    AutoTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0xFF);\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeBEInt<int16_t>(0);\n\n    MessageMetadata metadata;\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasOnlyFrameSize(255U));\n    EXPECT_EQ(transport.name(), \"framed(auto)\");\n    EXPECT_EQ(transport.type(), TransportType::Framed);\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Framed transport + compact protocol\n  {\n    AutoTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0xFFF);\n    buffer.writeBEInt<int16_t>(0x8201);\n    buffer.writeBEInt<int16_t>(0);\n\n    MessageMetadata metadata;\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasOnlyFrameSize(4095U));\n    EXPECT_EQ(transport.name(), \"framed(auto)\");\n    EXPECT_EQ(transport.type(), TransportType::Framed);\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Unframed transport + binary protocol\n  {\n    AutoTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(0x8001);\n    addRepeated(buffer, 6, 0);\n\n    MessageMetadata metadata;\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n    EXPECT_EQ(transport.name(), \"unframed(auto)\");\n    EXPECT_EQ(transport.type(), TransportType::Unframed);\n    EXPECT_EQ(buffer.length(), 8);\n  }\n\n  // Unframed transport + compact protocol\n  {\n    AutoTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int16_t>(0x8201);\n    addRepeated(buffer, 6, 0);\n\n    MessageMetadata metadata;\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n    EXPECT_EQ(transport.name(), \"unframed(auto)\");\n    EXPECT_EQ(transport.type(), TransportType::Unframed);\n    EXPECT_EQ(buffer.length(), 8);\n  }\n\n  // Header transport + binary protocol\n  {\n    AutoTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0xFF);\n    buffer.writeBEInt<int16_t>(0x0FFF); // header magic\n    buffer.writeBEInt<int16_t>(0x0000);\n    buffer.writeBEInt<int32_t>(0xEE); // sequence id\n    buffer.writeBEInt<int16_t>(1);\n    buffer.writeBEInt<int32_t>(0); // protocol (binary), 0 transforms + padding\n    buffer.writeBEInt<int16_t>(0x8001);\n\n    MessageMetadata metadata;\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasFrameSize(241U));\n    EXPECT_THAT(metadata, HasProtocol(ProtocolType::Binary));\n    EXPECT_THAT(metadata, HasSequenceId(0xEE));\n    EXPECT_EQ(transport.name(), \"header(auto)\");\n    EXPECT_EQ(transport.type(), TransportType::Header);\n    EXPECT_EQ(buffer.length(), 2);\n  }\n\n  // Header transport + compact protocol\n  {\n    AutoTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0xFF);\n    buffer.writeBEInt<int16_t>(0x0FFF); // header magic\n    buffer.writeBEInt<int16_t>(0x0000);\n    buffer.writeBEInt<int32_t>(0xEE); // sequence id\n    buffer.writeBEInt<int16_t>(1);\n    buffer.writeBEInt<int32_t>(0x02000000); // protocol (binary), 0 transforms + padding\n    buffer.writeBEInt<int16_t>(0x8201);\n\n    MessageMetadata metadata;\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasFrameSize(241U));\n    EXPECT_THAT(metadata, HasProtocol(ProtocolType::Compact));\n    EXPECT_THAT(metadata, HasSequenceId(0xEE));\n    EXPECT_EQ(transport.name(), \"header(auto)\");\n    EXPECT_EQ(transport.type(), TransportType::Header);\n    EXPECT_EQ(buffer.length(), 2);\n  }\n}\n\nTEST(AutoTransportTest, DecodeFrameEnd) {\n  AutoTransportImpl transport;\n  Buffer::OwnedImpl buffer;\n  buffer.writeBEInt<int32_t>(0xFF);\n  buffer.writeBEInt<int16_t>(0x8001);\n  buffer.writeBEInt<int16_t>(0);\n\n  MessageMetadata metadata;\n  EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n\n  EXPECT_EQ(buffer.length(), 4);\n\n  EXPECT_TRUE(transport.decodeFrameEnd(buffer));\n}\n\nTEST(AutoTransportTest, EncodeFrame) {\n  MockTransport* mock_transport = new NiceMock<MockTransport>();\n\n  AutoTransportImpl transport;\n  transport.setTransport(TransportPtr{mock_transport});\n\n  MessageMetadata metadata;\n  Buffer::OwnedImpl buffer;\n  Buffer::OwnedImpl message;\n\n  EXPECT_CALL(*mock_transport, encodeFrame(Ref(buffer), Ref(metadata), Ref(message)));\n  transport.encodeFrame(buffer, metadata, message);\n}\n\nTEST(AutoTransportTest, Name) {\n  AutoTransportImpl transport;\n  EXPECT_EQ(transport.name(), \"auto\");\n}\n\nTEST(AutoTransportTest, Type) {\n  AutoTransportImpl transport;\n  EXPECT_EQ(transport.type(), TransportType::Auto);\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/binary_protocol_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass BinaryProtocolTest : public testing::Test {\npublic:\n  void resetMetadata() {\n    metadata_.setMethodName(\"-\");\n    metadata_.setMessageType(MessageType::Oneway);\n    metadata_.setSequenceId(1);\n  }\n\n  void expectMetadata(const std::string& name, MessageType msg_type, int32_t seq_id) {\n    EXPECT_TRUE(metadata_.hasMethodName());\n    EXPECT_EQ(name, metadata_.methodName());\n\n    EXPECT_TRUE(metadata_.hasMessageType());\n    EXPECT_EQ(msg_type, metadata_.messageType());\n\n    EXPECT_TRUE(metadata_.hasSequenceId());\n    EXPECT_EQ(seq_id, metadata_.sequenceId());\n\n    EXPECT_FALSE(metadata_.hasFrameSize());\n    EXPECT_FALSE(metadata_.hasProtocol());\n    EXPECT_FALSE(metadata_.hasAppException());\n    EXPECT_EQ(metadata_.headers().size(), 0);\n  }\n\n  void expectDefaultMetadata() { expectMetadata(\"-\", MessageType::Oneway, 1); }\n\n  MessageMetadata metadata_;\n};\n\nclass LaxBinaryProtocolTest : public BinaryProtocolTest {};\n\nTEST_F(BinaryProtocolTest, Name) {\n  BinaryProtocolImpl proto;\n  EXPECT_EQ(proto.name(), \"binary\");\n}\n\nTEST_F(BinaryProtocolTest, ReadMessageBegin) {\n  BinaryProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    addRepeated(buffer, 11, 'x');\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 11);\n  }\n\n  // Wrong protocol version\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x0102);\n    addRepeated(buffer, 10, 'x');\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                              \"invalid binary protocol version 0x0102 != 0x8001\");\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 12);\n  }\n\n  // Invalid message type\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeByte('x');\n    buffer.writeByte(static_cast<int8_t>(MessageType::LastMessageType) + 1);\n    addRepeated(buffer, 8, 'x');\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                              fmt::format(\"invalid binary protocol message type {}\",\n                                          static_cast<int8_t>(MessageType::LastMessageType) + 1));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 12);\n  }\n\n  // Empty name\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeByte('x');\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(0);\n    buffer.writeBEInt<int32_t>(1234);\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"\", MessageType::Call, 1234);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Insufficient data after checking name length\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeByte('x');\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(4); // name length\n    buffer.add(\"abcd\");\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 12);\n  }\n\n  // Named message\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeByte(0);\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(8);\n    buffer.add(\"the_name\");\n    buffer.writeBEInt<int32_t>(5678);\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"the_name\", MessageType::Call, 5678);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(BinaryProtocolTest, ReadMessageEnd) {\n  Buffer::OwnedImpl buffer;\n  BinaryProtocolImpl proto;\n\n  EXPECT_TRUE(proto.readMessageEnd(buffer));\n}\n\nTEST_F(BinaryProtocolTest, ReadStructBegin) {\n  Buffer::OwnedImpl buffer;\n  BinaryProtocolImpl proto;\n  std::string name = \"-\";\n\n  EXPECT_TRUE(proto.readStructBegin(buffer, name));\n  EXPECT_EQ(name, \"\");\n}\n\nTEST_F(BinaryProtocolTest, ReadStructEnd) {\n  Buffer::OwnedImpl buffer;\n  BinaryProtocolImpl proto;\n\n  EXPECT_TRUE(proto.readStructEnd(buffer));\n}\n\nTEST_F(BinaryProtocolTest, ReadFieldBegin) {\n  BinaryProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    EXPECT_FALSE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n  }\n\n  // Stop field\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(FieldType::Stop);\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"\");\n    EXPECT_EQ(field_type, FieldType::Stop);\n    EXPECT_EQ(field_id, 0);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Insufficient data for non-stop field\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(FieldType::I32);\n\n    EXPECT_FALSE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n  }\n\n  // Non-stop field\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(FieldType::I32);\n    buffer.writeBEInt<int16_t>(99);\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"\");\n    EXPECT_EQ(field_type, FieldType::I32);\n    EXPECT_EQ(field_id, 99);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // field id < 0\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(FieldType::I32);\n    buffer.writeBEInt<int16_t>(-1);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readFieldBegin(buffer, name, field_type, field_id),\n                              EnvoyException, \"invalid binary protocol field id -1\");\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n    EXPECT_EQ(buffer.length(), 3);\n  }\n}\n\nTEST_F(BinaryProtocolTest, ReadFieldEnd) {\n  Buffer::OwnedImpl buffer;\n  BinaryProtocolImpl proto;\n  EXPECT_TRUE(proto.readFieldEnd(buffer));\n}\n\nTEST_F(BinaryProtocolTest, ReadMapBegin) {\n  BinaryProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    addRepeated(buffer, 5, 0);\n\n    EXPECT_FALSE(proto.readMapBegin(buffer, key_type, value_type, size));\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 5);\n  }\n\n  // Invalid map size\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(FieldType::I32);\n    buffer.writeByte(FieldType::I32);\n    buffer.writeBEInt<int32_t>(-1);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMapBegin(buffer, key_type, value_type, size),\n                              EnvoyException, \"negative binary protocol map size -1\");\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n\n  // Valid map start\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(FieldType::I32);\n    buffer.writeByte(FieldType::Double);\n    buffer.writeBEInt<int32_t>(10);\n\n    EXPECT_TRUE(proto.readMapBegin(buffer, key_type, value_type, size));\n    EXPECT_EQ(key_type, FieldType::I32);\n    EXPECT_EQ(value_type, FieldType::Double);\n    EXPECT_EQ(size, 10);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(BinaryProtocolTest, ReadMapEnd) {\n  Buffer::OwnedImpl buffer;\n  BinaryProtocolImpl proto;\n  EXPECT_TRUE(proto.readMapEnd(buffer));\n}\n\nTEST_F(BinaryProtocolTest, ReadListBegin) {\n  BinaryProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    addRepeated(buffer, 4, 0);\n\n    EXPECT_FALSE(proto.readListBegin(buffer, elem_type, size));\n    EXPECT_EQ(elem_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Invalid list size\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(FieldType::I32);\n    buffer.writeBEInt<int32_t>(-1);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readListBegin(buffer, elem_type, size), EnvoyException,\n                              \"negative binary protocol list/set size -1\");\n    EXPECT_EQ(elem_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 5);\n  }\n\n  // Valid list start\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(FieldType::I32);\n    buffer.writeBEInt<int32_t>(10);\n\n    EXPECT_TRUE(proto.readListBegin(buffer, elem_type, size));\n    EXPECT_EQ(elem_type, FieldType::I32);\n    EXPECT_EQ(size, 10);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(BinaryProtocolTest, ReadListEnd) {\n  Buffer::OwnedImpl buffer;\n  BinaryProtocolImpl proto;\n  EXPECT_TRUE(proto.readListEnd(buffer));\n}\n\nTEST_F(BinaryProtocolTest, ReadSetBegin) {\n  BinaryProtocolImpl proto;\n\n  // Test only the happy path, since this method is just delegated to readListBegin()\n  Buffer::OwnedImpl buffer;\n  FieldType elem_type = FieldType::String;\n  uint32_t size = 1;\n\n  buffer.writeByte(FieldType::I32);\n  buffer.writeBEInt<int32_t>(10);\n\n  EXPECT_TRUE(proto.readSetBegin(buffer, elem_type, size));\n  EXPECT_EQ(elem_type, FieldType::I32);\n  EXPECT_EQ(size, 10);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST_F(BinaryProtocolTest, ReadSetEnd) {\n  Buffer::OwnedImpl buffer;\n  BinaryProtocolImpl proto;\n  EXPECT_TRUE(proto.readSetEnd(buffer));\n}\n\nTEST_F(BinaryProtocolTest, ReadIntegerTypes) {\n  BinaryProtocolImpl proto;\n\n  // Bool\n  {\n    Buffer::OwnedImpl buffer;\n    bool value = false;\n\n    EXPECT_FALSE(proto.readBool(buffer, value));\n    EXPECT_FALSE(value);\n\n    buffer.writeByte(1);\n    EXPECT_TRUE(proto.readBool(buffer, value));\n    EXPECT_TRUE(value);\n    EXPECT_EQ(buffer.length(), 0);\n\n    buffer.writeByte(0);\n    EXPECT_TRUE(proto.readBool(buffer, value));\n    EXPECT_FALSE(value);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Byte\n  {\n    Buffer::OwnedImpl buffer;\n    uint8_t value = 1;\n\n    EXPECT_FALSE(proto.readByte(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    buffer.writeByte(0);\n    EXPECT_TRUE(proto.readByte(buffer, value));\n    EXPECT_EQ(value, 0);\n    EXPECT_EQ(buffer.length(), 0);\n\n    buffer.writeByte(0xFF);\n    EXPECT_TRUE(proto.readByte(buffer, value));\n    EXPECT_EQ(value, 0xFF);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Int16\n  {\n    Buffer::OwnedImpl buffer;\n    int16_t value = 1;\n\n    buffer.writeByte(0);\n    EXPECT_FALSE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    buffer.writeByte(0);\n    EXPECT_TRUE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, 0);\n    EXPECT_EQ(buffer.length(), 0);\n\n    buffer.writeByte(0x01);\n    buffer.writeByte(0x02);\n    EXPECT_TRUE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, 0x0102);\n    EXPECT_EQ(buffer.length(), 0);\n\n    addRepeated(buffer, 2, 0xFF);\n    EXPECT_TRUE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, -1);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Int32\n  {\n    Buffer::OwnedImpl buffer;\n    int32_t value = 1;\n\n    addRepeated(buffer, 3, 0);\n    EXPECT_FALSE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    buffer.writeByte(0);\n    EXPECT_TRUE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, 0);\n    EXPECT_EQ(buffer.length(), 0);\n\n    addSeq(buffer, {0x01, 0x02, 0x03, 0x04});\n    EXPECT_TRUE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, 0x01020304);\n    EXPECT_EQ(buffer.length(), 0);\n\n    addRepeated(buffer, 4, 0xFF);\n    EXPECT_TRUE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, -1);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Int64\n  {\n    Buffer::OwnedImpl buffer;\n    int64_t value = 1;\n\n    addRepeated(buffer, 7, 0);\n    EXPECT_FALSE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    buffer.writeByte(0);\n    EXPECT_TRUE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, 0);\n    EXPECT_EQ(buffer.length(), 0);\n\n    addSeq(buffer, {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08});\n    EXPECT_TRUE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, 0x0102030405060708);\n    EXPECT_EQ(buffer.length(), 0);\n\n    addRepeated(buffer, 8, 0xFF);\n    EXPECT_TRUE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, -1);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(BinaryProtocolTest, ReadDouble) {\n  BinaryProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    double value = 1.0;\n    addRepeated(buffer, 7, 0);\n    EXPECT_FALSE(proto.readDouble(buffer, value));\n    EXPECT_EQ(value, 1.0);\n    EXPECT_EQ(buffer.length(), 7);\n  }\n\n  // double value\n  {\n    Buffer::OwnedImpl buffer;\n    double value = 1.0;\n\n    // 01000000 00001000 00000000 0000000 00000000 00000000 00000000 000000000 = 3\n    // c.f. https://en.wikipedia.org/wiki/Double-precision_floating-point_format\n    addSeq(buffer, {0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00});\n\n    EXPECT_TRUE(proto.readDouble(buffer, value));\n    EXPECT_EQ(value, 3.0);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(BinaryProtocolTest, ReadString) {\n  BinaryProtocolImpl proto;\n\n  // Insufficient data to read length\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    addRepeated(buffer, 3, 0);\n\n    EXPECT_FALSE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"-\");\n    EXPECT_EQ(buffer.length(), 3);\n  }\n\n  // Insufficient data to read string\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeBEInt<int32_t>(1);\n\n    EXPECT_FALSE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"-\");\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Invalid length\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeBEInt<int32_t>(-1);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readString(buffer, value), EnvoyException,\n                              \"negative binary protocol string/binary length -1\");\n    EXPECT_EQ(value, \"-\");\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // empty string\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeBEInt<int32_t>(0);\n\n    EXPECT_TRUE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"\");\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // non-empty string\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeBEInt<int32_t>(6);\n    buffer.add(\"string\");\n\n    EXPECT_TRUE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"string\");\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(BinaryProtocolTest, ReadBinary) {\n  // Test only the happy path, since this method is just delegated to readString()\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  std::string value = \"-\";\n\n  buffer.writeBEInt<int32_t>(6);\n  buffer.add(\"binary\");\n\n  EXPECT_TRUE(proto.readBinary(buffer, value));\n  EXPECT_EQ(value, \"binary\");\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST_F(BinaryProtocolTest, WriteMessageBegin) {\n  BinaryProtocolImpl proto;\n\n  // Named call\n  {\n    metadata_.setMethodName(\"message\");\n    metadata_.setMessageType(MessageType::Call);\n    metadata_.setSequenceId(1);\n\n    Buffer::OwnedImpl buffer;\n    proto.writeMessageBegin(buffer, metadata_);\n    EXPECT_EQ(std::string(\"\\x80\\x1\\0\\x1\\0\\0\\0\\x7message\\0\\0\\0\\x1\", 19), buffer.toString());\n  }\n\n  // Unnamed oneway\n  {\n    metadata_.setMethodName(\"\");\n    metadata_.setMessageType(MessageType::Oneway);\n    metadata_.setSequenceId(2);\n\n    Buffer::OwnedImpl buffer;\n    proto.writeMessageBegin(buffer, metadata_);\n    EXPECT_EQ(std::string(\"\\x80\\x1\\0\\x4\\0\\0\\0\\0\\0\\0\\0\\x2\", 12), buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteMessageEnd) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeMessageEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(BinaryProtocolTest, WriteStructBegin) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeStructBegin(buffer, \"unused\");\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(BinaryProtocolTest, WriteStructEnd) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeStructEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(BinaryProtocolTest, WriteFieldBegin) {\n  BinaryProtocolImpl proto;\n\n  // Stop field\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeFieldBegin(buffer, \"unused\", FieldType::Stop, 1);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n\n  // Normal field\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeFieldBegin(buffer, \"unused\", FieldType::I32, 1);\n    EXPECT_EQ(std::string(\"\\x8\\0\\x1\", 3), buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteFieldEnd) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeFieldEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(BinaryProtocolTest, WriteMapBegin) {\n  BinaryProtocolImpl proto;\n\n  // Non-empty map\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeMapBegin(buffer, FieldType::I32, FieldType::String, 3);\n    EXPECT_EQ(std::string(\"\\x8\\xb\\0\\0\\0\\x3\", 6), buffer.toString());\n  }\n\n  // Empty map\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeMapBegin(buffer, FieldType::I32, FieldType::String, 0);\n    EXPECT_EQ(std::string(\"\\x8\\xb\\0\\0\\0\\0\", 6), buffer.toString());\n  }\n\n  // Oversized map\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(\n        proto.writeMapBegin(buffer, FieldType::I32, FieldType::String, 3000000000), EnvoyException,\n        \"illegal binary protocol map size 3000000000\");\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteMapEnd) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeMapEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(BinaryProtocolTest, WriteListBegin) {\n  BinaryProtocolImpl proto;\n\n  // Non-empty list\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeListBegin(buffer, FieldType::String, 3);\n    EXPECT_EQ(std::string(\"\\xb\\0\\0\\0\\x3\", 5), buffer.toString());\n  }\n\n  // Empty list\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeListBegin(buffer, FieldType::String, 0);\n    EXPECT_EQ(std::string(\"\\xb\\0\\0\\0\\0\", 5), buffer.toString());\n  }\n\n  // Oversized list\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(proto.writeListBegin(buffer, FieldType::String, 3000000000),\n                              EnvoyException, \"illegal binary protocol list/set size 3000000000\");\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteListEnd) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeListEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(BinaryProtocolTest, WriteSetBegin) {\n  BinaryProtocolImpl proto;\n\n  // Only test the happy path, as this shares an implementation with writeListBegin\n  // Non-empty list\n  Buffer::OwnedImpl buffer;\n  proto.writeSetBegin(buffer, FieldType::String, 3);\n  EXPECT_EQ(std::string(\"\\xb\\0\\0\\0\\x3\", 5), buffer.toString());\n}\n\nTEST_F(BinaryProtocolTest, WriteSetEnd) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeSetEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(BinaryProtocolTest, WriteBool) {\n  BinaryProtocolImpl proto;\n\n  // True\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeBool(buffer, true);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n\n  // False\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeBool(buffer, false);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteByte) {\n  BinaryProtocolImpl proto;\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeByte(buffer, -1);\n    EXPECT_EQ(\"\\xFF\", buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeByte(buffer, 127);\n    EXPECT_EQ(\"\\x7F\", buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteInt16) {\n  BinaryProtocolImpl proto;\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt16(buffer, -1);\n    EXPECT_EQ(\"\\xFF\\xFF\", buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt16(buffer, 0x0102);\n    EXPECT_EQ(\"\\x1\\x2\", buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteInt32) {\n  BinaryProtocolImpl proto;\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt32(buffer, -1);\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\", buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt32(buffer, 0x01020304);\n    EXPECT_EQ(\"\\x1\\x2\\x3\\x4\", buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteInt64) {\n  BinaryProtocolImpl proto;\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt64(buffer, -1);\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\", buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt64(buffer, 0x0102030405060708);\n    EXPECT_EQ(\"\\x1\\x2\\x3\\x4\\x5\\x6\\x7\\x8\", buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteDouble) {\n  BinaryProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeDouble(buffer, 3.0);\n  EXPECT_EQ(std::string(\"\\x40\\x8\\0\\0\\0\\0\\0\\0\", 8), buffer.toString());\n}\n\nTEST_F(BinaryProtocolTest, WriteString) {\n  BinaryProtocolImpl proto;\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeString(buffer, \"abc\");\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x3\"\n                          \"abc\",\n                          7),\n              buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeString(buffer, \"\");\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\", 4), buffer.toString());\n  }\n}\n\nTEST_F(BinaryProtocolTest, WriteBinary) {\n  BinaryProtocolImpl proto;\n\n  // Happy path only, since this is just a synonym for writeString\n  Buffer::OwnedImpl buffer;\n  proto.writeBinary(buffer, \"abc\");\n  EXPECT_EQ(std::string(\"\\0\\0\\0\\x3\"\n                        \"abc\",\n                        7),\n            buffer.toString());\n}\n\nTEST_F(LaxBinaryProtocolTest, Name) {\n  LaxBinaryProtocolImpl proto;\n  EXPECT_EQ(proto.name(), \"binary/non-strict\");\n}\n\nTEST_F(LaxBinaryProtocolTest, ReadMessageBegin) {\n  LaxBinaryProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    addRepeated(buffer, 8, 'x');\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 8);\n  }\n\n  // Invalid message type\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int32_t>(0);\n    buffer.writeByte(static_cast<int8_t>(MessageType::LastMessageType) + 1);\n    addRepeated(buffer, 4, 'x');\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                              fmt::format(\"invalid (lax) binary protocol message type {}\",\n                                          static_cast<int8_t>(MessageType::LastMessageType) + 1));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 9);\n  }\n\n  // Empty name\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int32_t>(0);\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(1234);\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"\", MessageType::Call, 1234);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Insufficient data after checking name length\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int32_t>(1); // name length\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(1234);\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 9);\n  }\n\n  // Named message\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int32_t>(8);\n    buffer.add(\"the_name\");\n    buffer.writeByte(MessageType::Call);\n    buffer.writeBEInt<int32_t>(5678);\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"the_name\", MessageType::Call, 5678);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(LaxBinaryProtocolTest, WriteMessageBegin) {\n  LaxBinaryProtocolImpl proto;\n\n  // Named call\n  {\n    metadata_.setMethodName(\"message\");\n    metadata_.setMessageType(MessageType::Call);\n    metadata_.setSequenceId(1);\n\n    Buffer::OwnedImpl buffer;\n    proto.writeMessageBegin(buffer, metadata_);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x7message\\x1\\0\\0\\0\\x1\", 16), buffer.toString());\n  }\n\n  // Unnamed oneway\n  {\n    metadata_.setMethodName(\"\");\n    metadata_.setMessageType(MessageType::Oneway);\n    metadata_.setSequenceId(2);\n\n    Buffer::OwnedImpl buffer;\n    proto.writeMessageBegin(buffer, metadata_);\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\0\\x4\\0\\0\\0\\x2\", 9), buffer.toString());\n  }\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/buffer_helper_test.cc",
    "content": "#include <limits>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nTEST(BufferHelperTest, DrainDouble) {\n  Buffer::OwnedImpl buffer;\n\n  // c.f. https://en.wikipedia.org/wiki/Double-precision_floating-point_format\n  // 01000000 00001000 00000000 0000000 00000000 00000000 00000000 000000000 = 3\n  addSeq(buffer, {0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00});\n\n  // 11111111 11101111 11111111 1111111 11111111 11111111 11111111 111111111 = -DBL_MAX\n  addSeq(buffer, {0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});\n\n  EXPECT_EQ(BufferHelper::drainBEDouble(buffer), 3.0);\n  EXPECT_EQ(BufferHelper::drainBEDouble(buffer), std::numeric_limits<double>::lowest());\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(BufferHelperTest, PeekVarInt32) {\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeByte(0);\n    buffer.writeByte(0x7F);\n    addSeq(buffer, {0xFF, 0x01});                   // 0xFF\n    addSeq(buffer, {0xFF, 0xFF, 0x03});             // 0xFFFF\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x07});       // 0xFFFFFF\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x07}); // 0x7FFFFFFF\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x0F}); // 0xFFFFFFFF\n\n    int size = 0;\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 0, size), 0);\n    EXPECT_EQ(size, 1);\n\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 1, size), 0x7F);\n    EXPECT_EQ(size, 1);\n\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 2, size), 0xFF);\n    EXPECT_EQ(size, 2);\n\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 4, size), 0xFFFF);\n    EXPECT_EQ(size, 3);\n\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 7, size), 0xFFFFFF);\n    EXPECT_EQ(size, 4);\n\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 11, size), 0x7FFFFFFF);\n    EXPECT_EQ(size, 5);\n\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 16, size), 0xFFFFFFFF);\n    EXPECT_EQ(size, 5);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    int size = 0;\n    EXPECT_THROW_WITH_MESSAGE(BufferHelper::peekVarIntI32(buffer, 0, size), EnvoyException,\n                              \"buffer underflow\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    int size = 0;\n    buffer.writeByte(0);\n    EXPECT_THROW_WITH_MESSAGE(BufferHelper::peekVarIntI32(buffer, 1, size), EnvoyException,\n                              \"buffer underflow\");\n  }\n}\n\nTEST(BufferHelperTest, PeekVarInt32BufferUnderflow) {\n  Buffer::OwnedImpl buffer;\n  int size = 0;\n\n  for (int i = 1; i < 5; i++) {\n    buffer.writeByte(0x80);\n    EXPECT_EQ(BufferHelper::peekVarIntI32(buffer, 0, size), 0);\n    EXPECT_EQ(size, -i);\n  }\n\n  buffer.writeByte(0x80);\n  EXPECT_THROW_WITH_MESSAGE(BufferHelper::peekVarIntI32(buffer, 0, size), EnvoyException,\n                            \"invalid compact protocol varint i32\");\n}\n\nTEST(BufferHelperTest, PeekZigZagI32) {\n  Buffer::OwnedImpl buffer;\n  buffer.writeByte(0);                            // unzigzag(0) = 0\n  buffer.writeByte(1);                            // unzigzag(1) = -1\n  buffer.writeByte(2);                            // unzigzag(2) = 1\n  addSeq(buffer, {0xFE, 0x01});                   // unzigzag(0xFE) = 127\n  addSeq(buffer, {0xFF, 0x01});                   // unzigzag(0xFF) = -128\n  addSeq(buffer, {0xFF, 0xFF, 0x03});             // unzigzag(0xFFFF) = -32768\n  addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x07});       // unzigzag(0xFFFFFF) = -8388608\n  addSeq(buffer, {0xFE, 0xFF, 0xFF, 0xFF, 0x07}); // unzigzag(0x7FFFFFFE) = 0x3FFFFFFF\n  addSeq(buffer, {0xFE, 0xFF, 0xFF, 0xFF, 0x0F}); // unzigzag(0xFFFFFFFE) = 0x7FFFFFFF\n  addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x0F}); // unzigzag(0xFFFFFFFF) = 0x80000000\n\n  int size = 0;\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 0, size), 0);\n  EXPECT_EQ(size, 1);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 1, size), -1);\n  EXPECT_EQ(size, 1);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 2, size), 1);\n  EXPECT_EQ(size, 1);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 3, size), 127);\n  EXPECT_EQ(size, 2);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 5, size), -128);\n  EXPECT_EQ(size, 2);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 7, size), -32768);\n  EXPECT_EQ(size, 3);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 10, size), -8388608);\n  EXPECT_EQ(size, 4);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 14, size), 0x3FFFFFFF);\n  EXPECT_EQ(size, 5);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 19, size), 0x7FFFFFFF);\n  EXPECT_EQ(size, 5);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 24, size), 0x80000000);\n  EXPECT_EQ(size, 5);\n}\n\nTEST(BufferHelperTest, PeekZigZagI32BufferUnderflow) {\n  Buffer::OwnedImpl buffer;\n  int size = 0;\n\n  for (int i = 1; i < 5; i++) {\n    buffer.writeByte(0x80);\n    EXPECT_EQ(BufferHelper::peekZigZagI32(buffer, 0, size), 0);\n    EXPECT_EQ(size, -i);\n  }\n\n  buffer.writeByte(0x80);\n  EXPECT_THROW_WITH_MESSAGE(BufferHelper::peekZigZagI32(buffer, 0, size), EnvoyException,\n                            \"invalid compact protocol zig-zag i32\");\n}\n\nTEST(BufferHelperTest, PeekZigZagI64) {\n  Buffer::OwnedImpl buffer;\n  buffer.writeByte(0);                            // unzigzag(0) = 0\n  buffer.writeByte(1);                            // unzigzag(1) = -1\n  buffer.writeByte(2);                            // unzigzag(2) = 1\n  addSeq(buffer, {0xFF, 0xFF, 0x03});             // unzigzag(0xFFFF) = -32768\n  addSeq(buffer, {0xFE, 0xFF, 0xFF, 0xFF, 0x0F}); // unzigzag(0xFFFFFFFE) = 0x7FFFFFFF\n\n  // unzigzag(0xFFFF FFFFFFFE) = 0x7FFF FFFFFFFF\n  addSeq(buffer, {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F});\n\n  // unzigzag(0x7FFFFFFF FFFFFFFE) = 0x3FFFFFFF FFFFFFFF\n  addSeq(buffer, {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F});\n\n  // unzigzag(0xFFFFFFFF FFFFFFFF) = 0x80000000 00000000 (-2^63)\n  addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01});\n\n  int size = 0;\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 0, size), 0);\n  EXPECT_EQ(size, 1);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 1, size), -1);\n  EXPECT_EQ(size, 1);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 2, size), 1);\n  EXPECT_EQ(size, 1);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 3, size), -32768);\n  EXPECT_EQ(size, 3);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 6, size), 0x7FFFFFFF);\n  EXPECT_EQ(size, 5);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 11, size), 0x7FFFFFFFFFFF);\n  EXPECT_EQ(size, 7);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 18, size), 0x3FFFFFFFFFFFFFFF);\n  EXPECT_EQ(size, 9);\n\n  EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 27, size), 0x8000000000000000);\n  EXPECT_EQ(size, 10);\n}\n\nTEST(BufferHelperTest, PeekZigZagI64BufferUnderflow) {\n  Buffer::OwnedImpl buffer;\n  int size = 0;\n\n  for (int i = 1; i < 10; i++) {\n    buffer.writeByte(0x80);\n    EXPECT_EQ(BufferHelper::peekZigZagI64(buffer, 0, size), 0);\n    EXPECT_EQ(size, -i);\n  }\n\n  buffer.writeByte(0x80);\n  EXPECT_THROW_WITH_MESSAGE(BufferHelper::peekZigZagI64(buffer, 0, size), EnvoyException,\n                            \"invalid compact protocol zig-zag i64\");\n}\n\nTEST(BufferHelperTest, WriteDouble) {\n  // See the DrainDouble test.\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeBEDouble(buffer, 3.0);\n    EXPECT_EQ(std::string(\"\\x40\\x8\\0\\0\\0\\0\\0\\0\", 8), buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeBEDouble(buffer, std::numeric_limits<double>::lowest());\n    EXPECT_EQ(\"\\xFF\\xEF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteVarIntI32) {\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, 0);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, 1);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, 128);\n    EXPECT_EQ(\"\\x80\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, (1 << 14) + 1);\n    EXPECT_EQ(\"\\x81\\x80\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, (1 << 28) + 1);\n    EXPECT_EQ(\"\\x81\\x80\\x80\\x80\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, std::numeric_limits<int32_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\x7\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, -1);\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xF\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI32(buffer, std::numeric_limits<int32_t>::min());\n    EXPECT_EQ(\"\\x80\\x80\\x80\\x80\\x8\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteVarIntI64) {\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, 0);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, 1);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, 128);\n    EXPECT_EQ(\"\\x80\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, (1 << 14) + 1);\n    EXPECT_EQ(\"\\x81\\x80\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, (1 << 28) + 1);\n    EXPECT_EQ(\"\\x81\\x80\\x80\\x80\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, (static_cast<int64_t>(1) << 56) + 1);\n    EXPECT_EQ(\"\\x81\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, std::numeric_limits<int32_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\x7\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, std::numeric_limits<int64_t>::max());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x7F\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, -1);\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, std::numeric_limits<int32_t>::min());\n    EXPECT_EQ(\"\\x80\\x80\\x80\\x80\\xF8\\xFF\\xFF\\xFF\\xFF\\x1\", buffer.toString());\n  }\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeVarIntI64(buffer, std::numeric_limits<int64_t>::min());\n    EXPECT_EQ(\"\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x1\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteZigZagI32) {\n  // zigzag(0) = 0\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, 0);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n\n  // zigzag(-1) = 1\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, -1);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n\n  // zigzag(1) = 2\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, 1);\n    EXPECT_EQ(\"\\x2\", buffer.toString());\n  }\n\n  // zigzag(127) = 0xFE\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, 127);\n    EXPECT_EQ(\"\\xFE\\x1\", buffer.toString());\n  }\n\n  // zigzag(128) = 0x100\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, 128);\n    EXPECT_EQ(\"\\x80\\x2\", buffer.toString());\n  }\n\n  // zigzag(-128) = 0xFF\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, -128);\n    EXPECT_EQ(\"\\xFF\\x1\", buffer.toString());\n  }\n\n  // zigzag(0x7FFFFFFF) = 0xFFFFFFFE\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, std::numeric_limits<int32_t>::max());\n    EXPECT_EQ(\"\\xFE\\xFF\\xFF\\xFF\\xF\", buffer.toString());\n  }\n\n  // zigzag(0x80000000) = 0xFFFFFFFF\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI32(buffer, std::numeric_limits<int32_t>::min());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xF\", buffer.toString());\n  }\n}\n\nTEST(BufferHelperTest, WriteZigZagI64) {\n  // zigzag(0) = 0\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, 0);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n\n  // zigzag(-1) = 1\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, -1);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n\n  // zigzag(1) = 2\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, 1);\n    EXPECT_EQ(\"\\x2\", buffer.toString());\n  }\n\n  // zigzag(127) = 0xFE\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, 127);\n    EXPECT_EQ(\"\\xFE\\x1\", buffer.toString());\n  }\n\n  // zigzag(128) = 0x100\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, 128);\n    EXPECT_EQ(\"\\x80\\x2\", buffer.toString());\n  }\n\n  // zigzag(-128) = 0xFF\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, -128);\n    EXPECT_EQ(\"\\xFF\\x1\", buffer.toString());\n  }\n\n  // zigzag(0x7FFFFFFF) = 0xFFFFFFFE\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, std::numeric_limits<int32_t>::max());\n    EXPECT_EQ(\"\\xFE\\xFF\\xFF\\xFF\\xF\", buffer.toString());\n  }\n\n  // zigzag(0x80000000) = 0xFFFFFFFF\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, std::numeric_limits<int32_t>::min());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xF\", buffer.toString());\n  }\n\n  // zigzag(0x7FFFFFFF FFFFFFFF) = 0xFFFFFFFFFFFFFFFE\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, std::numeric_limits<int64_t>::max());\n    EXPECT_EQ(\"\\xFE\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x1\", buffer.toString());\n  }\n\n  // zigzag(0x8000000000000000) = 0xFFFFFFFFFFFFFFFF\n  {\n    Buffer::OwnedImpl buffer;\n    BufferHelper::writeZigZagI64(buffer, std::numeric_limits<int64_t>::min());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x1\", buffer.toString());\n  }\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/compact_protocol_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/compact_protocol_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::Values;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass CompactProtocolTest : public testing::Test {\npublic:\n  void resetMetadata() {\n    metadata_.setMethodName(\"-\");\n    metadata_.setMessageType(MessageType::Oneway);\n    metadata_.setSequenceId(1);\n  }\n\n  void expectMetadata(const std::string& name, MessageType msg_type, int32_t seq_id) {\n    EXPECT_TRUE(metadata_.hasMethodName());\n    EXPECT_EQ(name, metadata_.methodName());\n\n    EXPECT_TRUE(metadata_.hasMessageType());\n    EXPECT_EQ(msg_type, metadata_.messageType());\n\n    EXPECT_TRUE(metadata_.hasSequenceId());\n    EXPECT_EQ(seq_id, metadata_.sequenceId());\n\n    EXPECT_FALSE(metadata_.hasFrameSize());\n    EXPECT_FALSE(metadata_.hasProtocol());\n    EXPECT_FALSE(metadata_.hasAppException());\n    EXPECT_EQ(metadata_.headers().size(), 0);\n  }\n\n  void expectDefaultMetadata() { expectMetadata(\"-\", MessageType::Oneway, 1); }\n\n  MessageMetadata metadata_;\n};\n\nTEST_F(CompactProtocolTest, Name) {\n  CompactProtocolImpl proto;\n  EXPECT_EQ(proto.name(), \"compact\");\n}\n\nTEST_F(CompactProtocolTest, ReadMessageBegin) {\n  CompactProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    addRepeated(buffer, 3, 'x');\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 3);\n  }\n\n  // Wrong protocol version\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x0102);\n    addRepeated(buffer, 2, 'x');\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                              \"invalid compact protocol version 0x0102 != 0x8201\");\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Invalid message type\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    // Message type is encoded in the 3 highest order bits of the second byte.\n    int8_t invalid_msg_type = static_cast<int8_t>(MessageType::LastMessageType) + 1;\n    buffer.writeBEInt<int16_t>(static_cast<int16_t>(0x8201 | (invalid_msg_type << 5)));\n    addRepeated(buffer, 2, 'x');\n\n    EXPECT_THROW_WITH_MESSAGE(\n        proto.readMessageBegin(buffer, metadata_), EnvoyException,\n        absl::StrCat(\"invalid compact protocol message type \", invalid_msg_type));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Insufficient data to read message id\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    addRepeated(buffer, 2, 0x81);\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Invalid sequence id encoding\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    addSeq(buffer, {0x81, 0x81, 0x81, 0x81, 0x81, 0}); // > 32 bit varint\n    buffer.writeByte(0);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                              \"invalid compact protocol varint i32\");\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 9);\n  }\n\n  // Insufficient data to read message name length\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    buffer.writeByte(32);\n    buffer.writeByte(0x81); // unterminated varint\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Insufficient data to read message name\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    buffer.writeByte(32);\n    buffer.writeByte(10);\n    buffer.add(\"partial\");\n\n    EXPECT_FALSE(proto.readMessageBegin(buffer, metadata_));\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 11);\n  }\n\n  // Empty name\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    buffer.writeByte(32);\n    buffer.writeByte(0);\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"\", MessageType::Call, 32);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Invalid name length encoding\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    buffer.writeByte(32);\n    addSeq(buffer, {0x81, 0x81, 0x81, 0x81, 0x81, 0}); // > 32 bit varint\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                              \"invalid compact protocol varint i32\");\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 9);\n  }\n\n  // Invalid name length\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    buffer.writeByte(32);\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMessageBegin(buffer, metadata_), EnvoyException,\n                              \"negative compact protocol message name length -1\");\n    expectDefaultMetadata();\n    EXPECT_EQ(buffer.length(), 8);\n  }\n\n  // Named message\n  {\n    Buffer::OwnedImpl buffer;\n    resetMetadata();\n\n    buffer.writeBEInt<int16_t>(0x8221);\n    buffer.writeBEInt<int16_t>(0x8202); // 0x0102\n    buffer.writeByte(8);\n    buffer.add(\"the_name\");\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, metadata_));\n    expectMetadata(\"the_name\", MessageType::Call, 0x102);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadMessageEnd) {\n  Buffer::OwnedImpl buffer;\n  CompactProtocolImpl proto;\n\n  EXPECT_TRUE(proto.readMessageEnd(buffer));\n}\n\nTEST_F(CompactProtocolTest, ReadStruct) {\n  Buffer::OwnedImpl buffer;\n  CompactProtocolImpl proto;\n  std::string name = \"-\";\n\n  EXPECT_TRUE(proto.readStructBegin(buffer, name));\n  EXPECT_EQ(name, \"\");\n\n  EXPECT_TRUE(proto.readStructEnd(buffer));\n\n  EXPECT_THROW_WITH_MESSAGE(proto.readStructEnd(buffer), EnvoyException,\n                            \"invalid check for compact protocol struct end\")\n}\n\nTEST_F(CompactProtocolTest, ReadFieldBegin) {\n  CompactProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    EXPECT_FALSE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n  }\n\n  // Stop field\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0xF0);\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"\");\n    EXPECT_EQ(field_type, FieldType::Stop);\n    EXPECT_EQ(field_id, 0);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Long-form field header, insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0x05);\n\n    EXPECT_FALSE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n    EXPECT_EQ(buffer.length(), 1);\n  }\n\n  // Long-form field header, insufficient data for field id (or invalid field id encoding)\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0x05);\n    buffer.writeByte(0x81);\n\n    EXPECT_FALSE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n    EXPECT_EQ(buffer.length(), 2);\n\n    addRepeated(buffer, 4, 0x81);\n    EXPECT_THROW_WITH_MESSAGE(proto.readFieldBegin(buffer, name, field_type, field_id),\n                              EnvoyException, \"invalid compact protocol zig-zag i32\");\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n\n  // Long-form field header, field id > 32767\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0x05);\n    addSeq(buffer, {0x80, 0x80, 0x04}); // zigzag(0x10000) = 0x8000\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readFieldBegin(buffer, name, field_type, field_id),\n                              EnvoyException, \"invalid compact protocol field id 32768\");\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n    EXPECT_EQ(buffer.length(), 4);\n  }\n\n  // Long-form field header, field id < 0\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0x05);\n    addSeq(buffer, {0x01}); // zigzag(1) = -1\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readFieldBegin(buffer, name, field_type, field_id),\n                              EnvoyException, \"invalid compact protocol field id -1\");\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n    EXPECT_EQ(buffer.length(), 2);\n  }\n\n  // Unknown compact protocol field type\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0x0D);\n    buffer.writeByte(0x04);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readFieldBegin(buffer, name, field_type, field_id),\n                              EnvoyException, \"unknown compact protocol field type 13\");\n    EXPECT_EQ(name, \"-\");\n    EXPECT_EQ(field_type, FieldType::String);\n    EXPECT_EQ(field_id, 1);\n    EXPECT_EQ(buffer.length(), 2);\n  }\n\n  // Valid long-form field-header\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0x05);\n    buffer.writeByte(0x04);\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"\");\n    EXPECT_EQ(field_type, FieldType::I32);\n    EXPECT_EQ(field_id, 2);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Valid short-form field header (must follow a valid long-form header)\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name = \"-\";\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n\n    buffer.writeByte(0xF5);\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"\");\n    EXPECT_EQ(field_type, FieldType::I32);\n    EXPECT_EQ(field_id, 17);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadFieldEnd) {\n  Buffer::OwnedImpl buffer;\n  CompactProtocolImpl proto;\n  EXPECT_TRUE(proto.readFieldEnd(buffer));\n}\n\nTEST_F(CompactProtocolTest, ReadMapBegin) {\n  CompactProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0x81); // unterminated varint\n\n    EXPECT_FALSE(proto.readMapBegin(buffer, key_type, value_type, size));\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 1);\n  }\n\n  // Invalid map size encoding\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    addSeq(buffer, {0x81, 0x81, 0x81, 0x81, 0x81, 0x00});\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMapBegin(buffer, key_type, value_type, size),\n                              EnvoyException, \"invalid compact protocol varint i32\");\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n\n  // Invalid map size\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMapBegin(buffer, key_type, value_type, size),\n                              EnvoyException, \"negative compact protocol map size -1\");\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 5);\n  }\n\n  // Insufficient data after reading map size\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(2);\n\n    EXPECT_FALSE(proto.readMapBegin(buffer, key_type, value_type, size));\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 1);\n  }\n\n  // Empty map\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0);\n\n    EXPECT_TRUE(proto.readMapBegin(buffer, key_type, value_type, size));\n    EXPECT_EQ(key_type, FieldType::Stop);\n    EXPECT_EQ(value_type, FieldType::Stop);\n    EXPECT_EQ(size, 0);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Non-empty map\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    addSeq(buffer, {0x80, 0x01}); // 0x80\n    buffer.writeByte(0x57);\n\n    EXPECT_TRUE(proto.readMapBegin(buffer, key_type, value_type, size));\n    EXPECT_EQ(key_type, FieldType::I32);\n    EXPECT_EQ(value_type, FieldType::Double);\n    EXPECT_EQ(size, 128);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Unknown key type\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0x02);\n    buffer.writeByte(0xD7);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMapBegin(buffer, key_type, value_type, size),\n                              EnvoyException, \"unknown compact protocol field type 13\");\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 2);\n  }\n\n  // Unknown value type\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType key_type = FieldType::String;\n    FieldType value_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0x02);\n    buffer.writeByte(0x5D);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readMapBegin(buffer, key_type, value_type, size),\n                              EnvoyException, \"unknown compact protocol field type 13\");\n    EXPECT_EQ(key_type, FieldType::String);\n    EXPECT_EQ(value_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 2);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadMapEnd) {\n  Buffer::OwnedImpl buffer;\n  CompactProtocolImpl proto;\n  EXPECT_TRUE(proto.readMapEnd(buffer));\n}\n\nTEST_F(CompactProtocolTest, ReadListBegin) {\n  CompactProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    EXPECT_FALSE(proto.readListBegin(buffer, elem_type, size));\n    EXPECT_EQ(elem_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Short-form list header\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0xE5);\n\n    EXPECT_TRUE(proto.readListBegin(buffer, elem_type, size));\n    EXPECT_EQ(elem_type, FieldType::I32);\n    EXPECT_EQ(size, 14);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Long-form list header, insufficient data to read size\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0xF5);\n    buffer.writeByte(0x81);\n\n    EXPECT_FALSE(proto.readListBegin(buffer, elem_type, size));\n    EXPECT_EQ(elem_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 2);\n  }\n\n  // Long-form list header, invalid size encoding\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0xF5);\n    addSeq(buffer, {0x81, 0x81, 0x81, 0x81, 0x81, 0}); // > 32 bit varint\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readListBegin(buffer, elem_type, size), EnvoyException,\n                              \"invalid compact protocol varint i32\");\n    EXPECT_EQ(elem_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 7);\n  }\n\n  // Long-form list header, illegal size\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0xF5);\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readListBegin(buffer, elem_type, size), EnvoyException,\n                              \"negative compact protocol list/set size -1\");\n    EXPECT_EQ(elem_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n\n  // Long-form list header\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0xF5);\n    addSeq(buffer, {0x80, 0x01}); // 0x80\n\n    EXPECT_TRUE(proto.readListBegin(buffer, elem_type, size));\n    EXPECT_EQ(elem_type, FieldType::I32);\n    EXPECT_EQ(size, 128);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Unknown list type\n  {\n    Buffer::OwnedImpl buffer;\n    FieldType elem_type = FieldType::String;\n    uint32_t size = 1;\n\n    buffer.writeByte(0x1D);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readListBegin(buffer, elem_type, size), EnvoyException,\n                              \"unknown compact protocol field type 13\");\n    EXPECT_EQ(elem_type, FieldType::String);\n    EXPECT_EQ(size, 1);\n    EXPECT_EQ(buffer.length(), 1);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadListEnd) {\n  Buffer::OwnedImpl buffer;\n  CompactProtocolImpl proto;\n  EXPECT_TRUE(proto.readListEnd(buffer));\n}\n\nTEST_F(CompactProtocolTest, ReadSetBegin) {\n  CompactProtocolImpl proto;\n\n  // Test only the happy path, since this method is just delegated to readListBegin()\n  Buffer::OwnedImpl buffer;\n  FieldType elem_type = FieldType::String;\n  uint32_t size = 0;\n\n  buffer.writeByte(0x15);\n\n  EXPECT_TRUE(proto.readSetBegin(buffer, elem_type, size));\n  EXPECT_EQ(elem_type, FieldType::I32);\n  EXPECT_EQ(size, 1);\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST_F(CompactProtocolTest, ReadSetEnd) {\n  Buffer::OwnedImpl buffer;\n  CompactProtocolImpl proto;\n  EXPECT_TRUE(proto.readSetEnd(buffer));\n}\n\nTEST_F(CompactProtocolTest, ReadBool) {\n  CompactProtocolImpl proto;\n\n  // Bool field values are encoded in the field type\n  {\n    Buffer::OwnedImpl buffer;\n    std::string name;\n    FieldType field_type = FieldType::String;\n    int16_t field_id = 1;\n    bool value = false;\n\n    buffer.writeByte(0x01);\n    buffer.writeByte(0x04);\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"\");\n    EXPECT_EQ(field_type, FieldType::Bool);\n    EXPECT_EQ(field_id, 2);\n    EXPECT_EQ(buffer.length(), 0);\n\n    EXPECT_TRUE(proto.readBool(buffer, value));\n    EXPECT_TRUE(value);\n\n    // readFieldEnd clears stored bool value\n    EXPECT_TRUE(proto.readFieldEnd(buffer));\n    EXPECT_FALSE(proto.readBool(buffer, value));\n\n    buffer.writeByte(0x02);\n    buffer.writeByte(0x06);\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(name, \"\");\n    EXPECT_EQ(field_type, FieldType::Bool);\n    EXPECT_EQ(field_id, 3);\n    EXPECT_EQ(buffer.length(), 0);\n\n    EXPECT_TRUE(proto.readBool(buffer, value));\n    EXPECT_FALSE(value);\n\n    // readFieldEnd clears stored bool value\n    EXPECT_TRUE(proto.readFieldEnd(buffer));\n    EXPECT_FALSE(proto.readBool(buffer, value));\n  }\n\n  // Outside of the readFieldBegin/End pair (with boolean type), readBool expects a byte.\n  {\n    Buffer::OwnedImpl buffer;\n    bool value = false;\n\n    EXPECT_FALSE(proto.readBool(buffer, value));\n    EXPECT_FALSE(value);\n\n    buffer.writeByte(1);\n    EXPECT_TRUE(proto.readBool(buffer, value));\n    EXPECT_TRUE(value);\n    EXPECT_EQ(buffer.length(), 0);\n\n    buffer.writeByte(0);\n    EXPECT_TRUE(proto.readBool(buffer, value));\n    EXPECT_FALSE(value);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadIntegerTypes) {\n  CompactProtocolImpl proto;\n\n  // Byte\n  {\n    Buffer::OwnedImpl buffer;\n    uint8_t value = 1;\n\n    EXPECT_FALSE(proto.readByte(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    buffer.writeByte(0);\n    EXPECT_TRUE(proto.readByte(buffer, value));\n    EXPECT_EQ(value, 0);\n    EXPECT_EQ(buffer.length(), 0);\n\n    buffer.writeByte(0xFF);\n    EXPECT_TRUE(proto.readByte(buffer, value));\n    EXPECT_EQ(value, 0xFF);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Int16\n  {\n    Buffer::OwnedImpl buffer;\n    int16_t value = 1;\n\n    // Insufficient data\n    EXPECT_FALSE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    // Still insufficient\n    buffer.writeByte(0x80);\n    EXPECT_FALSE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, 1);\n    buffer.drain(1);\n\n    addSeq(buffer, {0xFE, 0xFF, 0x03}); // zigzag(0xFFFE) = 0x7FFF\n    EXPECT_TRUE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, 32767);\n    EXPECT_EQ(buffer.length(), 0);\n\n    addSeq(buffer, {0xFF, 0xFF, 0x03}); // zigzag(0xFFFF) = 0x8000\n    EXPECT_TRUE(proto.readInt16(buffer, value));\n    EXPECT_EQ(value, -32768);\n    EXPECT_EQ(buffer.length(), 0);\n\n    // More than 32 bits\n    value = 1;\n    addSeq(buffer, {0x81, 0x81, 0x81, 0x81, 0x81, 0}); // > 32 bit varint\n    EXPECT_THROW_WITH_MESSAGE(proto.readInt16(buffer, value), EnvoyException,\n                              \"invalid compact protocol zig-zag i32\");\n    EXPECT_EQ(value, 1);\n    EXPECT_EQ(buffer.length(), 6);\n    buffer.drain(6);\n\n    // Within the encoding's range, but too large for i16\n    value = 1;\n    addSeq(buffer, {0xFE, 0xFF, 0x0F}); // zigzag(0x3FFFE) = 0x1FFFF\n    EXPECT_THROW_WITH_MESSAGE(proto.readInt16(buffer, value), EnvoyException,\n                              \"compact protocol i16 exceeds allowable range 131071\");\n    EXPECT_EQ(buffer.length(), 3);\n  }\n\n  // Int32\n  {\n    Buffer::OwnedImpl buffer;\n    int32_t value = 1;\n\n    // Insufficient data\n    EXPECT_FALSE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    // Still insufficient\n    buffer.writeByte(0x80);\n    EXPECT_FALSE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, 1);\n    buffer.drain(1);\n\n    addSeq(buffer, {0xFE, 0xFF, 0xFF, 0xFF, 0x0F}); // zigzag(0xFFFFFFFE) = 0x7FFFFFFF\n    EXPECT_TRUE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, std::numeric_limits<int32_t>::max());\n\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x0F}); // zigzag(0xFFFFFFFF) = 0x80000000\n    EXPECT_TRUE(proto.readInt32(buffer, value));\n    EXPECT_EQ(value, std::numeric_limits<int32_t>::min());\n\n    // More than 32 bits\n    value = 1;\n    addSeq(buffer, {0x81, 0x81, 0x81, 0x81, 0x81, 0}); // > 32 bit varint\n    EXPECT_THROW_WITH_MESSAGE(proto.readInt32(buffer, value), EnvoyException,\n                              \"invalid compact protocol zig-zag i32\");\n    EXPECT_EQ(value, 1);\n    EXPECT_EQ(buffer.length(), 6);\n  }\n\n  // Int64\n  {\n    Buffer::OwnedImpl buffer;\n    int64_t value = 1;\n\n    // Insufficient data\n    EXPECT_FALSE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, 1);\n\n    // Still insufficient\n    buffer.writeByte(0x80);\n    EXPECT_FALSE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, 1);\n    buffer.drain(1);\n\n    // zigzag(0xFFFFFFFFFFFFFFFE) = 0x7FFFFFFFFFFFFFFF\n    addSeq(buffer, {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01});\n    EXPECT_TRUE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, std::numeric_limits<int64_t>::max());\n\n    // zigzag(0xFFFFFFFFFFFFFFFF) = 0x8000000000000000\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01});\n    EXPECT_TRUE(proto.readInt64(buffer, value));\n    EXPECT_EQ(value, std::numeric_limits<int64_t>::min());\n\n    // More than 64 bits\n    value = 1;\n    addRepeated(buffer, 11, 0x81); // > 64 bit varint\n    EXPECT_THROW_WITH_MESSAGE(proto.readInt64(buffer, value), EnvoyException,\n                              \"invalid compact protocol zig-zag i64\");\n    EXPECT_EQ(value, 1);\n    EXPECT_EQ(buffer.length(), 11);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadDouble) {\n  CompactProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    double value = 1.0;\n    addRepeated(buffer, 7, 0);\n    EXPECT_FALSE(proto.readDouble(buffer, value));\n    EXPECT_EQ(value, 1.0);\n    EXPECT_EQ(buffer.length(), 7);\n  }\n\n  // double value\n  {\n    Buffer::OwnedImpl buffer;\n    double value = 1.0;\n\n    // 01000000 00001000 00000000 0000000 00000000 00000000 00000000 000000000 = 3\n    // c.f. https://en.wikipedia.org/wiki/Double-precision_floating-point_format\n    buffer.writeByte(0x40);\n    buffer.writeByte(0x08);\n    addRepeated(buffer, 6, 0);\n\n    EXPECT_TRUE(proto.readDouble(buffer, value));\n    EXPECT_EQ(value, 3.0);\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadString) {\n  CompactProtocolImpl proto;\n\n  // Insufficient data\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    EXPECT_FALSE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"-\");\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Insufficient data to read length\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeByte(0x81);\n\n    EXPECT_FALSE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"-\");\n    EXPECT_EQ(buffer.length(), 1);\n  }\n\n  // Insufficient data to read string\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeByte(0x4);\n\n    EXPECT_FALSE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"-\");\n    EXPECT_EQ(buffer.length(), 1);\n  }\n\n  // Invalid length\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1\n\n    EXPECT_THROW_WITH_MESSAGE(proto.readString(buffer, value), EnvoyException,\n                              \"negative compact protocol string/binary length -1\");\n    EXPECT_EQ(value, \"-\");\n    EXPECT_EQ(buffer.length(), 5);\n  }\n\n  // empty string\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeByte(0);\n\n    EXPECT_TRUE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"\");\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // non-empty string\n  {\n    Buffer::OwnedImpl buffer;\n    std::string value = \"-\";\n\n    buffer.writeByte(0x06);\n    buffer.add(\"string\");\n\n    EXPECT_TRUE(proto.readString(buffer, value));\n    EXPECT_EQ(value, \"string\");\n    EXPECT_EQ(buffer.length(), 0);\n  }\n}\n\nTEST_F(CompactProtocolTest, ReadBinary) {\n  // Test only the happy path, since this method is just delegated to readString()\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  std::string value = \"-\";\n\n  buffer.writeByte(0x06);\n  buffer.add(\"string\");\n\n  EXPECT_TRUE(proto.readBinary(buffer, value));\n  EXPECT_EQ(value, \"string\");\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nclass CompactProtocolFieldTypeTest : public testing::TestWithParam<uint8_t> {};\n\nTEST_P(CompactProtocolFieldTypeTest, ConvertsToFieldType) {\n  uint8_t compact_field_type = GetParam();\n\n  CompactProtocolImpl proto;\n  std::string name = \"-\";\n  int8_t invalid_field_type = static_cast<int8_t>(FieldType::LastFieldType) + 1;\n  FieldType field_type = static_cast<FieldType>(invalid_field_type);\n  int16_t field_id = 0;\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeByte(compact_field_type);\n    buffer.writeByte(0x02); // zigzag(2) = 1\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_LE(field_type, FieldType::LastFieldType);\n  }\n\n  {\n    // Long form field header\n    Buffer::OwnedImpl buffer;\n    proto.writeFieldBegin(buffer, \"-\", field_type, 100);\n    if (field_type == FieldType::Bool) {\n      proto.writeBool(buffer, compact_field_type == 1);\n    }\n\n    uint8_t* data = static_cast<uint8_t*>(buffer.linearize(1));\n    EXPECT_NE(nullptr, data);\n    EXPECT_EQ(compact_field_type, *data);\n  }\n}\n\nINSTANTIATE_TEST_SUITE_P(CompactFieldTypes, CompactProtocolFieldTypeTest,\n                         Values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12));\n\nTEST_F(CompactProtocolTest, WriteMessageBegin) {\n  CompactProtocolImpl proto;\n\n  // Named call\n  {\n    metadata_.setMethodName(\"message\");\n    metadata_.setMessageType(MessageType::Call);\n    metadata_.setSequenceId(1);\n\n    Buffer::OwnedImpl buffer;\n    proto.writeMessageBegin(buffer, metadata_);\n    EXPECT_EQ(std::string(\"\\x82\\x21\\x1\\x7message\", 11), buffer.toString());\n  }\n\n  // Unnamed oneway\n  {\n    metadata_.setMethodName(\"\");\n    metadata_.setMessageType(MessageType::Oneway);\n    metadata_.setSequenceId(2);\n\n    Buffer::OwnedImpl buffer;\n    proto.writeMessageBegin(buffer, metadata_);\n    EXPECT_EQ(std::string(\"\\x82\\x81\\x2\\0\", 4), buffer.toString());\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteMessageEnd) {\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeMessageEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(CompactProtocolTest, WriteStruct) {\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n\n  proto.writeStructBegin(buffer, \"unused\");\n  proto.writeStructEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n\n  // struct begin/end always appear in nested pairs\n  EXPECT_THROW_WITH_MESSAGE(proto.writeStructEnd(buffer), EnvoyException,\n                            \"invalid write of compact protocol struct end\")\n}\n\nTEST_F(CompactProtocolTest, WriteFieldBegin) {\n  // Stop field\n  {\n    CompactProtocolImpl proto;\n    Buffer::OwnedImpl buffer;\n    proto.writeFieldBegin(buffer, \"unused\", FieldType::Stop, 1);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n\n  {\n    CompactProtocolImpl proto;\n\n    // Short form\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::I32, 1);\n      EXPECT_EQ(\"\\x15\", buffer.toString());\n    }\n\n    // Long form\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Struct, 17);\n      EXPECT_EQ(std::string(\"\\xC\\x22\", 2), buffer.toString());\n    }\n\n    // Short form\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Byte, 32);\n      EXPECT_EQ(\"\\xF3\", buffer.toString());\n    }\n\n    // Short form\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::String, 33);\n      EXPECT_EQ(\"\\x18\", buffer.toString());\n    }\n  }\n\n  {\n    CompactProtocolImpl proto;\n\n    // Long form\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::I32, 16);\n      EXPECT_EQ(std::string(\"\\x5\\x20\", 2), buffer.toString());\n    }\n\n    // Short form\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Struct, 17);\n      EXPECT_EQ(\"\\x1C\", buffer.toString());\n    }\n\n    // Long form\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Byte, 33);\n      EXPECT_EQ(std::string(\"\\x3\\x42\", 2), buffer.toString());\n    }\n\n    // Long form (3 bytes)\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::String, 64);\n      EXPECT_EQ(std::string(\"\\x8\\x80\\x1\", 3), buffer.toString());\n    }\n  }\n\n  // Unknown field type\n  {\n    CompactProtocolImpl proto;\n    Buffer::OwnedImpl buffer;\n\n    int8_t invalid_field_type = static_cast<int8_t>(FieldType::LastFieldType) + 1;\n    FieldType field_type = static_cast<FieldType>(invalid_field_type);\n\n    EXPECT_THROW_WITH_MESSAGE(proto.writeFieldBegin(buffer, \"unused\", field_type, 1),\n                              EnvoyException,\n                              absl::StrCat(\"unknown protocol field type \", invalid_field_type));\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteFieldEnd) {\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeFieldEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(CompactProtocolTest, WriteBoolField) {\n  // Boolean struct fields are encoded with custom types to save a byte\n\n  // Short form field\n  {\n    CompactProtocolImpl proto;\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Bool, 8);\n      EXPECT_EQ(0, buffer.length());\n      proto.writeBool(buffer, true);\n      EXPECT_EQ(\"\\x81\", buffer.toString());\n    }\n\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Bool, 12);\n      EXPECT_EQ(0, buffer.length());\n      proto.writeBool(buffer, false);\n      EXPECT_EQ(\"\\x42\", buffer.toString());\n    }\n  }\n\n  // Long form field\n  {\n    CompactProtocolImpl proto;\n\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Bool, 16);\n      EXPECT_EQ(0, buffer.length());\n      proto.writeBool(buffer, true);\n      EXPECT_EQ(std::string(\"\\x1\\x20\", 2), buffer.toString());\n    }\n\n    {\n      Buffer::OwnedImpl buffer;\n      proto.writeFieldBegin(buffer, \"unused\", FieldType::Bool, 32);\n      EXPECT_EQ(0, buffer.length());\n      proto.writeBool(buffer, false);\n      EXPECT_EQ(std::string(\"\\x2\\x40\", 2), buffer.toString());\n    }\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteMapBegin) {\n  CompactProtocolImpl proto;\n\n  // Empty map\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeMapBegin(buffer, FieldType::I32, FieldType::Bool, 0);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n\n  // Non-empty map\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeMapBegin(buffer, FieldType::I32, FieldType::Bool, 3);\n    EXPECT_EQ(\"\\3\\x51\", buffer.toString());\n  }\n\n  // Oversized map\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(\n        proto.writeMapBegin(buffer, FieldType::I32, FieldType::Bool, 3000000000), EnvoyException,\n        \"illegal compact protocol map size 3000000000\");\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteMapEnd) {\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeMapEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(CompactProtocolTest, WriteListBegin) {\n  CompactProtocolImpl proto;\n\n  // Empty list\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeListBegin(buffer, FieldType::I32, 0);\n    EXPECT_EQ(\"\\x5\", buffer.toString());\n  }\n\n  // List (short form)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeListBegin(buffer, FieldType::I32, 14);\n    EXPECT_EQ(\"\\xE5\", buffer.toString());\n  }\n\n  // List (long form)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeListBegin(buffer, FieldType::Bool, 15);\n    EXPECT_EQ(\"\\xF1\\xF\", buffer.toString());\n  }\n\n  // Oversized list\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(proto.writeListBegin(buffer, FieldType::I32, 3000000000),\n                              EnvoyException, \"illegal compact protocol list/set size 3000000000\");\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteListEnd) {\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeListEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(CompactProtocolTest, WriteSetBegin) {\n  CompactProtocolImpl proto;\n\n  // Empty set only, as writeSetBegin delegates to writeListBegin.\n  Buffer::OwnedImpl buffer;\n  proto.writeSetBegin(buffer, FieldType::I32, 0);\n  EXPECT_EQ(\"\\x5\", buffer.toString());\n}\n\nTEST_F(CompactProtocolTest, WriteSetEnd) {\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeSetEnd(buffer);\n  EXPECT_EQ(0, buffer.length());\n}\n\nTEST_F(CompactProtocolTest, WriteBool) {\n  CompactProtocolImpl proto;\n\n  // Non-field bools (see WriteBoolField test)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeBool(buffer, true);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeBool(buffer, false);\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteByte) {\n  CompactProtocolImpl proto;\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeByte(buffer, -1);\n    EXPECT_EQ(\"\\xFF\", buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeByte(buffer, 127);\n    EXPECT_EQ(\"\\x7F\", buffer.toString());\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteInt16) {\n  CompactProtocolImpl proto;\n\n  // zigzag(1) = 2\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt16(buffer, 1);\n    EXPECT_EQ(\"\\x2\", buffer.toString());\n  }\n\n  // zigzag(128) = 256 (0x200)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt16(buffer, 128);\n    EXPECT_EQ(\"\\x80\\x2\", buffer.toString());\n  }\n\n  // zigzag(-1) = 1\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt16(buffer, -1);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n\n  // zigzag(32767) = 65534 (0xFFFE)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt16(buffer, std::numeric_limits<int16_t>::max());\n    EXPECT_EQ(\"\\xFE\\xFF\\x3\", buffer.toString());\n  }\n\n  // zigzag(-32768) = 65535 (0xFFFF)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt16(buffer, std::numeric_limits<int16_t>::min());\n    EXPECT_EQ(\"\\xFF\\xFF\\x3\", buffer.toString());\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteInt32) {\n  CompactProtocolImpl proto;\n\n  // zigzag(1) = 2\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt32(buffer, 1);\n    EXPECT_EQ(\"\\x2\", buffer.toString());\n  }\n\n  // zigzag(128) = 256 (0x200)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt32(buffer, 128);\n    EXPECT_EQ(\"\\x80\\x2\", buffer.toString());\n  }\n\n  // zigzag(-1) = 1\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt32(buffer, -1);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n\n  // zigzag(0x7FFFFFFF) = 0xFFFFFFFE\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt32(buffer, std::numeric_limits<int32_t>::max());\n    EXPECT_EQ(\"\\xFE\\xFF\\xFF\\xFF\\xF\", buffer.toString());\n  }\n\n  // zigzag(0x80000000) = 0xFFFFFFFF\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt32(buffer, std::numeric_limits<int32_t>::min());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xF\", buffer.toString());\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteInt64) {\n  CompactProtocolImpl proto;\n\n  // zigzag(1) = 2\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt64(buffer, 1);\n    EXPECT_EQ(\"\\x2\", buffer.toString());\n  }\n\n  // zigzag(128) = 256 (0x200)\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt64(buffer, 128);\n    EXPECT_EQ(\"\\x80\\x2\", buffer.toString());\n  }\n\n  // zigzag(-1) = 1\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt64(buffer, -1);\n    EXPECT_EQ(\"\\x1\", buffer.toString());\n  }\n\n  // zigzag(0x7FFFFFFF FFFFFFFF) = 0xFFFFFFFF FFFFFFFE\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt64(buffer, std::numeric_limits<int64_t>::max());\n    EXPECT_EQ(\"\\xFE\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x1\", buffer.toString());\n  }\n\n  // zigzag(0x80000000 00000000) = 0xFFFFFFFF FFFFFFFF\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeInt64(buffer, std::numeric_limits<int64_t>::min());\n    EXPECT_EQ(\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x1\", buffer.toString());\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteDouble) {\n  CompactProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeDouble(buffer, 3.0);\n  EXPECT_EQ(std::string(\"\\x40\\x8\\0\\0\\0\\0\\0\\0\", 8), buffer.toString());\n}\n\nTEST_F(CompactProtocolTest, WriteString) {\n  CompactProtocolImpl proto;\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeString(buffer, \"abc\");\n    EXPECT_EQ(std::string(\"\\x3\"\n                          \"abc\",\n                          4),\n              buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    std::string data(192, 'a');\n    proto.writeString(buffer, data);\n    EXPECT_EQ(std::string(\"\\xC0\\x1\") + data, buffer.toString());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    proto.writeString(buffer, \"\");\n    EXPECT_EQ(std::string(\"\\0\", 1), buffer.toString());\n  }\n}\n\nTEST_F(CompactProtocolTest, WriteBinary) {\n  CompactProtocolImpl proto;\n\n  // writeBinary is an alias for writeString\n  Buffer::OwnedImpl buffer;\n  proto.writeBinary(buffer, \"abc\");\n  EXPECT_EQ(std::string(\"\\x3\"\n                        \"abc\",\n                        4),\n            buffer.toString());\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/thrift_proxy/config.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/factory_base.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\nstd::vector<envoy::extensions::filters::network::thrift_proxy::v3::TransportType>\ngetTransportTypes() {\n  std::vector<envoy::extensions::filters::network::thrift_proxy::v3::TransportType> v;\n  int transport = envoy::extensions::filters::network::thrift_proxy::v3::TransportType_MIN;\n  while (transport <= envoy::extensions::filters::network::thrift_proxy::v3::TransportType_MAX) {\n    v.push_back(static_cast<envoy::extensions::filters::network::thrift_proxy::v3::TransportType>(\n        transport));\n    transport++;\n  }\n  return v;\n}\n\nstd::vector<envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType>\ngetProtocolTypes() {\n  std::vector<envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType> v;\n  int protocol = envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType_MIN;\n  while (protocol <= envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType_MAX) {\n    v.push_back(\n        static_cast<envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType>(protocol));\n    protocol++;\n  }\n  return v;\n}\n\nenvoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy\nparseThriftProxyFromV2Yaml(const std::string& yaml) {\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy thrift_proxy;\n  TestUtility::loadFromYaml(yaml, thrift_proxy);\n  return thrift_proxy;\n}\n\n} // namespace\n\nclass ThriftFilterConfigTestBase {\npublic:\n  void testConfig(envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& config) {\n    Network::FilterFactoryCb cb;\n    EXPECT_NO_THROW({ cb = factory_.createFilterFactoryFromProto(config, context_); });\n    EXPECT_TRUE(factory_.isTerminalFilter());\n\n    Network::MockConnection connection;\n    EXPECT_CALL(connection, addReadFilter(_));\n    cb(connection);\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  ThriftProxyFilterConfigFactory factory_;\n};\n\nclass ThriftFilterConfigTest : public testing::Test, public ThriftFilterConfigTestBase {};\n\nclass ThriftFilterTransportConfigTest\n    : public testing::TestWithParam<\n          envoy::extensions::filters::network::thrift_proxy::v3::TransportType>,\n      public ThriftFilterConfigTestBase {};\n\nINSTANTIATE_TEST_SUITE_P(TransportTypes, ThriftFilterTransportConfigTest,\n                         testing::ValuesIn(getTransportTypes()));\n\nclass ThriftFilterProtocolConfigTest\n    : public testing::TestWithParam<\n          envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType>,\n      public ThriftFilterConfigTestBase {};\n\nINSTANTIATE_TEST_SUITE_P(ProtocolTypes, ThriftFilterProtocolConfigTest,\n                         testing::ValuesIn(getProtocolTypes()));\n\nTEST_F(ThriftFilterConfigTest, ValidateFail) {\n  EXPECT_THROW(factory_.createFilterFactoryFromProto(\n                   envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy(), context_),\n               ProtoValidationException);\n}\n\nTEST_F(ThriftFilterConfigTest, ValidProtoConfiguration) {\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config{};\n  config.set_stat_prefix(\"my_stat_prefix\");\n\n  testConfig(config);\n}\n\nTEST_P(ThriftFilterTransportConfigTest, ValidProtoConfiguration) {\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config{};\n  config.set_stat_prefix(\"my_stat_prefix\");\n  config.set_transport(GetParam());\n  testConfig(config);\n}\n\nTEST_P(ThriftFilterProtocolConfigTest, ValidProtoConfiguration) {\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config{};\n  config.set_stat_prefix(\"my_stat_prefix\");\n  config.set_protocol(GetParam());\n  testConfig(config);\n}\n\nTEST_F(ThriftFilterConfigTest, ThriftProxyWithEmptyProto) {\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config =\n      *dynamic_cast<envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy*>(\n          factory_.createEmptyConfigProto().get());\n  config.set_stat_prefix(\"my_stat_prefix\");\n\n  testConfig(config);\n}\n\n// Test config with an invalid cluster_header.\nTEST_F(ThriftFilterConfigTest, RouterConfigWithInvalidClusterHeader) {\n  const std::string yaml = R\"EOF(\nstat_prefix: thrift\nroute_config:\n  name: local_route\n  routes:\n    match:\n      method_name: A\n    route:\n      cluster_header: A\nthrift_filters:\n  - name: envoy.filters.thrift.router\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config =\n      parseThriftProxyFromV2Yaml(yaml);\n  std::string header = \"A\";\n  header.push_back('\\000'); // Add an invalid character for http header.\n  config.mutable_route_config()->mutable_routes()->at(0).mutable_route()->set_cluster_header(\n      header);\n  EXPECT_THROW(factory_.createFilterFactoryFromProto(config, context_), ProtoValidationException);\n}\n\n// Test config with an explicitly defined router filter.\nTEST_F(ThriftFilterConfigTest, ThriftProxyWithExplicitRouterConfig) {\n  const std::string yaml = R\"EOF(\nstat_prefix: thrift\nroute_config:\n  name: local_route\nthrift_filters:\n  - name: envoy.filters.thrift.router\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config =\n      parseThriftProxyFromV2Yaml(yaml);\n  testConfig(config);\n}\n\n// Test config with an unknown filter.\nTEST_F(ThriftFilterConfigTest, ThriftProxyWithUnknownFilter) {\n  const std::string yaml = R\"EOF(\nstat_prefix: thrift\nroute_config:\n  name: local_route\nthrift_filters:\n  - name: no_such_filter\n  - name: envoy.filters.thrift.router\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config =\n      parseThriftProxyFromV2Yaml(yaml);\n\n  EXPECT_THROW_WITH_REGEX(factory_.createFilterFactoryFromProto(config, context_), EnvoyException,\n                          \"no_such_filter\");\n}\n\n// Test config with multiple filters.\nTEST_F(ThriftFilterConfigTest, ThriftProxyWithMultipleFilters) {\n  const std::string yaml = R\"EOF(\nstat_prefix: ingress\nroute_config:\n  name: local_route\nthrift_filters:\n  - name: envoy.filters.thrift.mock_filter\n    typed_config:\n      \"@type\": type.googleapis.com/google.protobuf.Struct\n      value:\n        key: value\n  - name: envoy.filters.thrift.router\n)EOF\";\n\n  ThriftFilters::MockFilterConfigFactory factory;\n  Registry::InjectFactory<ThriftFilters::NamedThriftFilterConfigFactory> registry(factory);\n\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config =\n      parseThriftProxyFromV2Yaml(yaml);\n  testConfig(config);\n\n  EXPECT_EQ(1, factory.config_struct_.fields_size());\n  EXPECT_EQ(\"value\", factory.config_struct_.fields().at(\"key\").string_value());\n  EXPECT_EQ(\"thrift.ingress.\", factory.config_stat_prefix_);\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/conn_manager_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.validate.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/binary_protocol_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n#include \"extensions/filters/network/thrift_proxy/config.h\"\n#include \"extensions/filters/network/thrift_proxy/conn_manager.h\"\n#include \"extensions/filters/network/thrift_proxy/framed_transport_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/header_transport_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass TestConfigImpl : public ConfigImpl {\npublic:\n  TestConfigImpl(envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy proto_config,\n                 Server::Configuration::MockFactoryContext& context,\n                 ThriftFilters::DecoderFilterSharedPtr decoder_filter, ThriftFilterStats& stats)\n      : ConfigImpl(proto_config, context), decoder_filter_(decoder_filter), stats_(stats) {}\n\n  // ConfigImpl\n  ThriftFilterStats& stats() override { return stats_; }\n  void createFilterChain(ThriftFilters::FilterChainFactoryCallbacks& callbacks) override {\n    if (custom_filter_) {\n      callbacks.addDecoderFilter(custom_filter_);\n    }\n    callbacks.addDecoderFilter(decoder_filter_);\n  }\n  TransportPtr createTransport() override {\n    if (transport_) {\n      return TransportPtr{transport_};\n    }\n    return ConfigImpl::createTransport();\n  }\n  ProtocolPtr createProtocol() override {\n    if (protocol_) {\n      return ProtocolPtr{protocol_};\n    }\n    return ConfigImpl::createProtocol();\n  }\n\n  ThriftFilters::DecoderFilterSharedPtr custom_filter_;\n  ThriftFilters::DecoderFilterSharedPtr decoder_filter_;\n  ThriftFilterStats& stats_;\n  MockTransport* transport_{};\n  MockProtocol* protocol_{};\n};\n\nclass ThriftConnectionManagerTest : public testing::Test {\npublic:\n  ThriftConnectionManagerTest() : stats_(ThriftFilterStats::generateStats(\"test.\", store_)) {}\n  ~ThriftConnectionManagerTest() override {\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n\n  void initializeFilter() { initializeFilter(\"\"); }\n\n  void initializeFilter(const std::string& yaml) {\n    // Destroy any existing filter first.\n    filter_ = nullptr;\n\n    for (const auto& counter : store_.counters()) {\n      counter->reset();\n    }\n\n    if (yaml.empty()) {\n      proto_config_.set_stat_prefix(\"test\");\n    } else {\n      TestUtility::loadFromYaml(yaml, proto_config_);\n      TestUtility::validate(proto_config_);\n    }\n\n    proto_config_.set_stat_prefix(\"test\");\n\n    decoder_filter_ = std::make_shared<NiceMock<ThriftFilters::MockDecoderFilter>>();\n\n    config_ = std::make_unique<TestConfigImpl>(proto_config_, context_, decoder_filter_, stats_);\n    if (custom_transport_) {\n      config_->transport_ = custom_transport_;\n    }\n    if (custom_protocol_) {\n      config_->protocol_ = custom_protocol_;\n    }\n    if (custom_filter_) {\n      config_->custom_filter_ = custom_filter_;\n    }\n\n    ON_CALL(random_, random()).WillByDefault(Return(42));\n    filter_ = std::make_unique<ConnectionManager>(\n        *config_, random_, filter_callbacks_.connection_.dispatcher_.timeSource());\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n    filter_->onNewConnection();\n\n    // NOP currently.\n    filter_->onAboveWriteBufferHighWatermark();\n    filter_->onBelowWriteBufferLowWatermark();\n  }\n\n  void writeMessage(Buffer::Instance& buffer, TransportType transport_type,\n                    ProtocolType protocol_type, MessageType msg_type, int32_t seq_id) {\n    Buffer::OwnedImpl msg;\n    ProtocolPtr proto = NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol();\n    MessageMetadata metadata;\n    metadata.setProtocol(protocol_type);\n    metadata.setMethodName(\"name\");\n    metadata.setMessageType(msg_type);\n    metadata.setSequenceId(seq_id);\n\n    proto->writeMessageBegin(msg, metadata);\n    proto->writeStructBegin(msg, \"response\");\n    proto->writeFieldBegin(msg, \"success\", FieldType::String, 0);\n    proto->writeString(msg, \"field\");\n    proto->writeFieldEnd(msg);\n    proto->writeFieldBegin(msg, \"\", FieldType::Stop, 0);\n    proto->writeStructEnd(msg);\n    proto->writeMessageEnd(msg);\n\n    TransportPtr transport =\n        NamedTransportConfigFactory::getFactory(transport_type).createTransport();\n    transport->encodeFrame(buffer, metadata, msg);\n  }\n\n  void writeFramedBinaryMessage(Buffer::Instance& buffer, MessageType msg_type, int32_t seq_id) {\n    writeMessage(buffer, TransportType::Framed, ProtocolType::Binary, msg_type, seq_id);\n  }\n\n  void writeComplexFramedBinaryMessage(Buffer::Instance& buffer, MessageType msg_type,\n                                       int32_t seq_id) {\n    Buffer::OwnedImpl msg;\n    ProtocolPtr proto =\n        NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol();\n    MessageMetadata metadata;\n    metadata.setMethodName(\"name\");\n    metadata.setMessageType(msg_type);\n    metadata.setSequenceId(seq_id);\n\n    proto->writeMessageBegin(msg, metadata);\n    proto->writeStructBegin(msg, \"wrapper\"); // call args struct or response struct\n    proto->writeFieldBegin(msg, \"wrapper_field\", FieldType::Struct, 0); // call arg/response success\n\n    proto->writeStructBegin(msg, \"payload\");\n    proto->writeFieldBegin(msg, \"f1\", FieldType::Bool, 1);\n    proto->writeBool(msg, true);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f2\", FieldType::Byte, 2);\n    proto->writeByte(msg, 2);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f3\", FieldType::Double, 3);\n    proto->writeDouble(msg, 3.0);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f4\", FieldType::I16, 4);\n    proto->writeInt16(msg, 4);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f5\", FieldType::I32, 5);\n    proto->writeInt32(msg, 5);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f6\", FieldType::I64, 6);\n    proto->writeInt64(msg, 6);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f7\", FieldType::String, 7);\n    proto->writeString(msg, \"seven\");\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f8\", FieldType::Map, 8);\n    proto->writeMapBegin(msg, FieldType::I32, FieldType::I32, 1);\n    proto->writeInt32(msg, 8);\n    proto->writeInt32(msg, 8);\n    proto->writeMapEnd(msg);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f9\", FieldType::List, 9);\n    proto->writeListBegin(msg, FieldType::I32, 1);\n    proto->writeInt32(msg, 8);\n    proto->writeListEnd(msg);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"f10\", FieldType::Set, 10);\n    proto->writeSetBegin(msg, FieldType::I32, 1);\n    proto->writeInt32(msg, 8);\n    proto->writeSetEnd(msg);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"\", FieldType::Stop, 0); // payload stop field\n    proto->writeStructEnd(msg);\n    proto->writeFieldEnd(msg);\n\n    proto->writeFieldBegin(msg, \"\", FieldType::Stop, 0); // wrapper stop field\n    proto->writeStructEnd(msg);\n    proto->writeMessageEnd(msg);\n\n    TransportPtr transport =\n        NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport();\n    transport->encodeFrame(buffer, metadata, msg);\n  }\n\n  void writePartialFramedBinaryMessage(Buffer::Instance& buffer, MessageType msg_type,\n                                       int32_t seq_id, bool start) {\n    Buffer::OwnedImpl frame;\n    writeFramedBinaryMessage(frame, msg_type, seq_id);\n\n    if (start) {\n      buffer.move(frame, 27);\n    } else {\n      frame.drain(27);\n      buffer.move(frame);\n    }\n  }\n\n  void writeVoidFramedBinaryMessage(Buffer::Instance& buffer, int32_t seq_id) {\n    Buffer::OwnedImpl msg;\n    ProtocolPtr proto =\n        NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol();\n    MessageMetadata metadata;\n    metadata.setMethodName(\"name\");\n    metadata.setMessageType(MessageType::Reply);\n    metadata.setSequenceId(seq_id);\n\n    proto->writeMessageBegin(msg, metadata);\n    proto->writeStructBegin(msg, \"\");\n    proto->writeFieldBegin(msg, \"\", FieldType::Stop, 0);\n    proto->writeStructEnd(msg);\n    proto->writeMessageEnd(msg);\n\n    TransportPtr transport =\n        NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport();\n    transport->encodeFrame(buffer, metadata, msg);\n  }\n\n  void writeFramedBinaryTApplicationException(Buffer::Instance& buffer, int32_t seq_id) {\n    Buffer::OwnedImpl msg;\n    ProtocolPtr proto =\n        NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol();\n    MessageMetadata metadata;\n    metadata.setMethodName(\"name\");\n    metadata.setMessageType(MessageType::Exception);\n    metadata.setSequenceId(seq_id);\n\n    proto->writeMessageBegin(msg, metadata);\n    proto->writeStructBegin(msg, \"\");\n    proto->writeFieldBegin(msg, \"\", FieldType::String, 1);\n    proto->writeString(msg, \"error\");\n    proto->writeFieldEnd(msg);\n    proto->writeFieldBegin(msg, \"\", FieldType::I32, 2);\n    proto->writeInt32(msg, 1);\n    proto->writeFieldEnd(msg);\n    proto->writeFieldBegin(msg, \"\", FieldType::Stop, 0);\n    proto->writeStructEnd(msg);\n    proto->writeMessageEnd(msg);\n\n    TransportPtr transport =\n        NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport();\n    transport->encodeFrame(buffer, metadata, msg);\n  }\n\n  void writeFramedBinaryIDLException(Buffer::Instance& buffer, int32_t seq_id) {\n    Buffer::OwnedImpl msg;\n    ProtocolPtr proto =\n        NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol();\n    MessageMetadata metadata;\n    metadata.setMethodName(\"name\");\n    metadata.setMessageType(MessageType::Reply);\n    metadata.setSequenceId(seq_id);\n\n    proto->writeMessageBegin(msg, metadata);\n    proto->writeStructBegin(msg, \"\");\n    proto->writeFieldBegin(msg, \"\", FieldType::Struct, 2);\n\n    proto->writeStructBegin(msg, \"\");\n    proto->writeFieldBegin(msg, \"\", FieldType::String, 1);\n    proto->writeString(msg, \"err\");\n    proto->writeFieldEnd(msg);\n    proto->writeFieldBegin(msg, \"\", FieldType::Stop, 0);\n    proto->writeStructEnd(msg);\n\n    proto->writeFieldEnd(msg);\n    proto->writeFieldBegin(msg, \"\", FieldType::Stop, 0);\n    proto->writeStructEnd(msg);\n    proto->writeMessageEnd(msg);\n\n    TransportPtr transport =\n        NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport();\n    transport->encodeFrame(buffer, metadata, msg);\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  std::shared_ptr<ThriftFilters::MockDecoderFilter> decoder_filter_;\n  Stats::TestUtil::TestStore store_;\n  ThriftFilterStats stats_;\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy proto_config_;\n\n  std::unique_ptr<TestConfigImpl> config_;\n\n  Buffer::OwnedImpl buffer_;\n  Buffer::OwnedImpl write_buffer_;\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  std::unique_ptr<ConnectionManager> filter_;\n  MockTransport* custom_transport_{};\n  MockProtocol* custom_protocol_{};\n  ThriftFilters::DecoderFilterSharedPtr custom_filter_;\n};\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesThriftCall) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_oneway\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesThriftOneWay) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Oneway, 0x0F);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_oneway\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesStopIterationAndResume) {\n  initializeFilter();\n\n  writeFramedBinaryMessage(buffer_, MessageType::Oneway, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, messageBegin(_)).WillOnce(Return(FilterStatus::StopIteration));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n\n  // Nothing further happens: we're stopped.\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  EXPECT_EQ(42, callbacks->streamId());\n  EXPECT_EQ(TransportType::Framed, callbacks->downstreamTransportType());\n  EXPECT_EQ(ProtocolType::Binary, callbacks->downstreamProtocolType());\n  EXPECT_EQ(&filter_callbacks_.connection_, callbacks->connection());\n\n  // Resume processing.\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  callbacks->continueDecoding();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_oneway\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0U, stats_.request_active_.value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesFrameSplitAcrossBuffers) {\n  initializeFilter();\n\n  writePartialFramedBinaryMessage(buffer_, MessageType::Call, 0x10, true);\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0, buffer_.length());\n\n  // Complete the buffer\n  writePartialFramedBinaryMessage(buffer_, MessageType::Call, 0x10, false);\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0, buffer_.length());\n\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_decoding_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesInvalidMsgType) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Reply, 0x0F); // reply is invalid for a request\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.request_oneway\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_invalid_type\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesProtocolError) {\n  initializeFilter();\n  addSeq(buffer_, {\n                      0x00, 0x00, 0x00, 0x1f,                     // framed: 31 bytes\n                      0x80, 0x01, 0x00, 0x01,                     // binary, call\n                      0x00, 0x00, 0x00, 0x04, 'n', 'a', 'm', 'e', // message name\n                      0x00, 0x00, 0x00, 0x01,                     // sequence id\n                      0x08, 0xff, 0xff                            // illegal field id\n                  });\n\n  std::string err = \"invalid binary protocol field id -1\";\n  addSeq(write_buffer_, {\n                            0x00, 0x00, 0x00, 0x42,                     // framed: 66 bytes\n                            0x80, 0x01, 0x00, 0x03,                     // binary, exception\n                            0x00, 0x00, 0x00, 0x04, 'n', 'a', 'm', 'e', // message name\n                            0x00, 0x00, 0x00, 0x01,                     // sequence id\n                            0x0b, 0x00, 0x01,                           // begin string field\n                        });\n  write_buffer_.writeBEInt<uint32_t>(err.length());\n  write_buffer_.add(err);\n  addSeq(write_buffer_, {\n                            0x08, 0x00, 0x02,       // begin i32 field\n                            0x00, 0x00, 0x00, 0x07, // protocol error\n                            0x00,                   // stop field\n                        });\n\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(write_buffer_.toString(), buffer.toString());\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0U, stats_.request_active_.value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesProtocolErrorDuringMessageBegin) {\n  initializeFilter();\n  addSeq(buffer_, {\n                      0x00, 0x00, 0x00, 0x1d,                     // framed: 29 bytes\n                      0x80, 0x01, 0x00, 0xff,                     // binary, invalid type\n                      0x00, 0x00, 0x00, 0x04, 'n', 'a', 'm', 'e', // message name\n                      0x00, 0x00, 0x00, 0x01,                     // sequence id\n                      0x00,                                       // stop field\n                  });\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesTransportApplicationException) {\n  initializeFilter();\n  addSeq(buffer_, {\n                      0x00, 0x00, 0x00, 0x64, // header: 100 bytes\n                      0x0f, 0xff, 0x00, 0x00, // magic, flags\n                      0x00, 0x00, 0x00, 0x01, // sequence id\n                      0x00, 0x01, 0x00, 0x02, // header size 4, binary proto, 2 transforms\n                      0x01, 0x02, 0x00, 0x00, // transforms: 1, 2; padding\n                  });\n\n  std::string err = \"Unknown transform 1\";\n  uint8_t len = 41 + err.length();\n  addSeq(write_buffer_, {\n                            0x00, 0x00, 0x00, len,  // header frame size\n                            0x0f, 0xff, 0x00, 0x00, // magic, flags\n                            0x00, 0x00, 0x00, 0x00, // sequence id 0\n                            0x00, 0x01, 0x00, 0x00, // header size 4, binary, 0 transforms\n                            0x00, 0x00,             // header padding\n                            0x80, 0x01, 0x00, 0x03, // binary, exception\n                            0x00, 0x00, 0x00, 0x00, // message name \"\"\n                            0x00, 0x00, 0x00, 0x00, // sequence id\n                            0x0b, 0x00, 0x01,       // begin string field\n                        });\n  write_buffer_.writeBEInt<int32_t>(err.length());\n  write_buffer_.add(err);\n  addSeq(write_buffer_, {\n                            0x08, 0x00, 0x02,       // begin i32 field\n                            0x00, 0x00, 0x00, 0x05, // missing result\n                            0x00,                   // stop field\n                        });\n\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, true))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(write_buffer_.toString(), buffer.toString());\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n}\n\n// Tests that OnData handles non-thrift input. Regression test for crash on invalid input.\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesGarbageRequest) {\n  initializeFilter();\n  addRepeated(buffer_, 8, 0);\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_decoding_error\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n}\n\nTEST_F(ThriftConnectionManagerTest, OnEvent) {\n  // No active calls\n  {\n    initializeFilter();\n    filter_->onEvent(Network::ConnectionEvent::RemoteClose);\n    filter_->onEvent(Network::ConnectionEvent::LocalClose);\n    EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n    EXPECT_EQ(0U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n  }\n\n  // Remote close mid-request\n  {\n    initializeFilter();\n    addSeq(buffer_, {\n                        0x00, 0x00, 0x00, 0x1d,                     // framed: 29 bytes\n                        0x80, 0x01, 0x00, 0x01,                     // binary proto, call type\n                        0x00, 0x00, 0x00, 0x04, 'n', 'a', 'm', 'e', // message name\n                        0x00, 0x00, 0x00, 0x0F,                     // seq id\n                    });\n    EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    filter_->onEvent(Network::ConnectionEvent::RemoteClose);\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n\n  // Local close mid-request\n  {\n    initializeFilter();\n    addSeq(buffer_, {\n                        0x00, 0x00, 0x00, 0x1d,                     // framed: 29 bytes\n                        0x80, 0x01, 0x00, 0x01,                     // binary proto, call type\n                        0x00, 0x00, 0x00, 0x04, 'n', 'a', 'm', 'e', // message name\n                        0x00, 0x00, 0x00, 0x0F,                     // seq id\n                    });\n    EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    filter_->onEvent(Network::ConnectionEvent::LocalClose);\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n\n    buffer_.drain(buffer_.length());\n\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n\n  // Remote close before response\n  {\n    initializeFilter();\n    writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n    EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    filter_->onEvent(Network::ConnectionEvent::RemoteClose);\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_remote_with_active_rq\").value());\n\n    buffer_.drain(buffer_.length());\n\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n\n  // Local close before response\n  {\n    initializeFilter();\n    writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n    EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n    EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n    filter_->onEvent(Network::ConnectionEvent::LocalClose);\n\n    EXPECT_EQ(1U, store_.counter(\"test.cx_destroy_local_with_active_rq\").value());\n\n    buffer_.drain(buffer_.length());\n\n    filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  }\n}\n\nTEST_F(ThriftConnectionManagerTest, Routing) {\n  const std::string yaml = R\"EOF(\ntransport: FRAMED\nprotocol: BINARY\nstat_prefix: test\nroute_config:\n  name: \"routes\"\n  routes:\n    - match:\n        method_name: name\n      route:\n        cluster: cluster\n)EOF\";\n\n  initializeFilter(yaml);\n  writeFramedBinaryMessage(buffer_, MessageType::Oneway, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, messageBegin(_)).WillOnce(Return(FilterStatus::StopIteration));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n\n  Router::RouteConstSharedPtr route = callbacks->route();\n  EXPECT_NE(nullptr, route);\n  EXPECT_NE(nullptr, route->routeEntry());\n  EXPECT_EQ(\"cluster\", route->routeEntry()->clusterName());\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  callbacks->continueDecoding();\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n}\n\nTEST_F(ThriftConnectionManagerTest, RequestAndResponse) {\n  initializeFilter();\n  writeComplexFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  writeComplexFramedBinaryMessage(write_buffer_, MessageType::Reply, 0x0F);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, RequestAndVoidResponse) {\n  initializeFilter();\n  writeComplexFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  writeVoidFramedBinaryMessage(write_buffer_, 0x0F);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n}\n\n// Tests that the downstream request's sequence number is used for the response.\nTEST_F(ThriftConnectionManagerTest, RequestAndResponseSequenceIdHandling) {\n  initializeFilter();\n  writeComplexFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  writeComplexFramedBinaryMessage(write_buffer_, MessageType::Reply, 0xFF);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  Buffer::OwnedImpl response_buffer;\n  writeComplexFramedBinaryMessage(response_buffer, MessageType::Reply, 0x0F);\n\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(response_buffer.toString(), buffer.toString());\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, RequestAndExceptionResponse) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  writeFramedBinaryTApplicationException(write_buffer_, 0x0F);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, RequestAndErrorResponse) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  writeFramedBinaryIDLException(write_buffer_, 0x0F);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, RequestAndInvalidResponse) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  // Call is not valid in a response\n  writeFramedBinaryMessage(write_buffer_, MessageType::Call, 0x0F);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, RequestAndResponseProtocolError) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  // illegal field id\n  addSeq(write_buffer_, {\n                            0x00, 0x00, 0x00, 0x1f,                     // framed: 31 bytes\n                            0x80, 0x01, 0x00, 0x02,                     // binary, reply\n                            0x00, 0x00, 0x00, 0x04, 'n', 'a', 'm', 'e', // message name\n                            0x00, 0x00, 0x00, 0x01,                     // sequence id\n                            0x08, 0xff, 0xff                            // illegal field id\n                        });\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, true));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Reset, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_decoding_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, RequestAndTransportApplicationException) {\n  initializeFilter();\n  writeMessage(buffer_, TransportType::Header, ProtocolType::Binary, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  // Response with unknown transform\n  addSeq(write_buffer_, {\n                            0x00, 0x00, 0x00, 0x64, // header: 100 bytes\n                            0x0f, 0xff, 0x00, 0x00, // magic, flags\n                            0x00, 0x00, 0x00, 0x01, // sequence id\n                            0x00, 0x01, 0x00, 0x02, // header size 4, binary proto, 2 transforms\n                            0x01, 0x02, 0x00, 0x00, // transforms: 1, 2; padding\n                        });\n\n  HeaderTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Reset, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_decoding_error\").value());\n}\n\n// Tests that a request is routed and a non-thrift response is handled.\nTEST_F(ThriftConnectionManagerTest, RequestAndGarbageResponse) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  addRepeated(write_buffer_, 8, 0);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Reset, callbacks->upstreamData(write_buffer_));\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_reply\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_exception\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_invalid_type\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_success\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n}\n\nTEST_F(ThriftConnectionManagerTest, PipelinedRequestAndResponse) {\n  initializeFilter();\n\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x01);\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x02);\n\n  std::list<ThriftFilters::DecoderFilterCallbacks*> callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillRepeatedly(Invoke(\n          [&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks.push_back(&cb); }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(2U, stats_.request_active_.value());\n  EXPECT_EQ(2U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(2U, store_.counter(\"test.request_call\").value());\n\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(2);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n\n  writeFramedBinaryMessage(write_buffer_, MessageType::Reply, 0x01);\n  callbacks.front()->startUpstreamResponse(transport, proto);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete,\n            callbacks.front()->upstreamData(write_buffer_));\n  callbacks.pop_front();\n  EXPECT_EQ(1U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_reply\").value());\n\n  writeFramedBinaryMessage(write_buffer_, MessageType::Reply, 0x02);\n  callbacks.front()->startUpstreamResponse(transport, proto);\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Complete,\n            callbacks.front()->upstreamData(write_buffer_));\n  callbacks.pop_front();\n  EXPECT_EQ(2U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(2U, store_.counter(\"test.response_reply\").value());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n\n  EXPECT_EQ(0U, stats_.request_active_.value());\n}\n\nTEST_F(ThriftConnectionManagerTest, ResetDownstreamConnection) {\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n\n  EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_));\n  callbacks->resetDownstreamConnection();\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0U, stats_.request_active_.value());\n}\n\nTEST_F(ThriftConnectionManagerTest, DownstreamProtocolUpgrade) {\n  custom_transport_ = new NiceMock<MockTransport>();\n  custom_protocol_ = new NiceMock<MockProtocol>();\n  initializeFilter();\n\n  EXPECT_CALL(*custom_transport_, decodeFrameStart(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(*custom_protocol_, readMessageBegin(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setSequenceId(0);\n        metadata.setMessageType(MessageType::Call);\n        metadata.setProtocolUpgradeMessage(true);\n        return true;\n      }));\n  EXPECT_CALL(*custom_protocol_, supportsUpgrade()).Times(AnyNumber()).WillRepeatedly(Return(true));\n\n  MockDecoderEventHandler* upgrade_decoder = new NiceMock<MockDecoderEventHandler>();\n  EXPECT_CALL(*custom_protocol_, upgradeRequestDecoder())\n      .WillOnce(Invoke([&]() -> DecoderEventHandlerSharedPtr {\n        return DecoderEventHandlerSharedPtr{upgrade_decoder};\n      }));\n  EXPECT_CALL(*upgrade_decoder, messageBegin(_)).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*custom_protocol_, readStructBegin(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(*upgrade_decoder, structBegin(_)).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*custom_protocol_, readFieldBegin(_, _, _, _))\n      .WillOnce(Invoke(\n          [&](Buffer::Instance&, std::string&, FieldType& field_type, int16_t& field_id) -> bool {\n            field_type = FieldType::Stop;\n            field_id = 0;\n            return true;\n          }));\n  EXPECT_CALL(*custom_protocol_, readStructEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(*upgrade_decoder, structEnd()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*custom_protocol_, readMessageEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(*upgrade_decoder, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*custom_transport_, decodeFrameEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(*upgrade_decoder, transportEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  MockDirectResponse* direct_response = new NiceMock<MockDirectResponse>();\n\n  EXPECT_CALL(*custom_protocol_, upgradeResponse(Ref(*upgrade_decoder)))\n      .WillOnce(Invoke([&](const DecoderEventHandler&) -> DirectResponsePtr {\n        return DirectResponsePtr{direct_response};\n      }));\n\n  EXPECT_CALL(*direct_response, encode(_, Ref(*custom_protocol_), _))\n      .WillOnce(Invoke([&](MessageMetadata&, Protocol&,\n                           Buffer::Instance& buffer) -> DirectResponse::ResponseType {\n        buffer.add(\"response\");\n        return DirectResponse::ResponseType::SuccessReply;\n      }));\n  EXPECT_CALL(*custom_transport_, encodeFrame(_, _, _))\n      .WillOnce(Invoke(\n          [&](Buffer::Instance& buffer, const MessageMetadata&, Buffer::Instance& message) -> void {\n            EXPECT_EQ(\"response\", message.toString());\n            buffer.add(\"transport-encoded response\");\n          }));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(\"transport-encoded response\", buffer.toString());\n      }));\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n}\n\n// Tests multiple filters are invoked in the correct order.\nTEST_F(ThriftConnectionManagerTest, OnDataHandlesThriftCallWithMultipleFilters) {\n  auto* filter = new NiceMock<ThriftFilters::MockDecoderFilter>();\n  custom_filter_.reset(filter);\n  initializeFilter();\n\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  InSequence s;\n  EXPECT_CALL(*filter, messageBegin(_)).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*decoder_filter_, messageBegin(_)).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*filter, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n  EXPECT_CALL(*decoder_filter_, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n}\n\n// Tests stop iteration/resume with multiple filters.\nTEST_F(ThriftConnectionManagerTest, OnDataResumesWithNextFilter) {\n  auto* filter = new NiceMock<ThriftFilters::MockDecoderFilter>();\n  custom_filter_.reset(filter);\n\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_));\n\n  // First filter stops iteration.\n  {\n    EXPECT_CALL(*filter, messageBegin(_)).WillOnce(Return(FilterStatus::StopIteration));\n    EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n    EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n    EXPECT_EQ(1U, stats_.request_active_.value());\n  }\n\n  // Resume processing.\n  {\n    InSequence s;\n    EXPECT_CALL(*decoder_filter_, messageBegin(_)).WillOnce(Return(FilterStatus::Continue));\n    EXPECT_CALL(*filter, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n    EXPECT_CALL(*decoder_filter_, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n    callbacks->continueDecoding();\n  }\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n}\n\n// Tests stop iteration/resume with multiple filters when iteration is stopped during transportEnd.\nTEST_F(ThriftConnectionManagerTest, OnDataResumesWithNextFilterOnTransportEnd) {\n  auto* filter = new NiceMock<ThriftFilters::MockDecoderFilter>();\n  custom_filter_.reset(filter);\n\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_));\n\n  // First filter stops iteration.\n  {\n    InSequence s;\n    EXPECT_CALL(*filter, transportBegin(_)).WillOnce(Return(FilterStatus::Continue));\n    EXPECT_CALL(*decoder_filter_, transportBegin(_)).WillOnce(Return(FilterStatus::Continue));\n    EXPECT_CALL(*filter, transportEnd()).WillOnce(Return(FilterStatus::StopIteration));\n    EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n    EXPECT_EQ(0U, store_.counter(\"test.request\").value());\n    EXPECT_EQ(1U, stats_.request_active_.value());\n  }\n\n  // Resume processing.\n  {\n    InSequence s;\n    EXPECT_CALL(*decoder_filter_, transportEnd()).WillOnce(Return(FilterStatus::Continue));\n    callbacks->continueDecoding();\n  }\n\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n}\n\n// Tests multiple filters where one invokes sendLocalReply with a successful reply.\nTEST_F(ThriftConnectionManagerTest, OnDataWithFilterSendsLocalReply) {\n  auto* filter = new NiceMock<ThriftFilters::MockDecoderFilter>();\n  custom_filter_.reset(filter);\n\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_));\n\n  NiceMock<MockDirectResponse> direct_response;\n  EXPECT_CALL(direct_response, encode(_, _, _))\n      .WillOnce(Invoke([&](MessageMetadata&, Protocol&,\n                           Buffer::Instance& buffer) -> DirectResponse::ResponseType {\n        buffer.add(\"response\");\n        return DirectResponse::ResponseType::SuccessReply;\n      }));\n\n  // First filter sends local reply.\n  EXPECT_CALL(*filter, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> FilterStatus {\n        callbacks->sendLocalReply(direct_response, false);\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(8, buffer.drainBEInt<int32_t>());\n        EXPECT_EQ(\"response\", buffer.toString());\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_success\").value());\n}\n\n// Tests multiple filters where one invokes sendLocalReply with an error reply.\nTEST_F(ThriftConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) {\n  auto* filter = new NiceMock<ThriftFilters::MockDecoderFilter>();\n  custom_filter_.reset(filter);\n\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_));\n\n  NiceMock<MockDirectResponse> direct_response;\n  EXPECT_CALL(direct_response, encode(_, _, _))\n      .WillOnce(Invoke([&](MessageMetadata&, Protocol&,\n                           Buffer::Instance& buffer) -> DirectResponse::ResponseType {\n        buffer.add(\"response\");\n        return DirectResponse::ResponseType::ErrorReply;\n      }));\n\n  // First filter sends local reply.\n  EXPECT_CALL(*filter, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> FilterStatus {\n        callbacks->sendLocalReply(direct_response, false);\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(8, buffer.drainBEInt<int32_t>());\n        EXPECT_EQ(\"response\", buffer.toString());\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_error\").value());\n}\n\n// sendLocalReply does nothing, when the remote closed the connection.\nTEST_F(ThriftConnectionManagerTest, OnDataWithFilterSendLocalReplyRemoteClosedConnection) {\n  auto* filter = new NiceMock<ThriftFilters::MockDecoderFilter>();\n  custom_filter_.reset(filter);\n\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_));\n\n  NiceMock<MockDirectResponse> direct_response;\n  EXPECT_CALL(direct_response, encode(_, _, _)).Times(0);\n\n  // First filter sends local reply.\n  EXPECT_CALL(*filter, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> FilterStatus {\n        callbacks->sendLocalReply(direct_response, false);\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(filter_callbacks_.connection_, write(_, false)).Times(0);\n  EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1);\n\n  // Remote closes the connection.\n  filter_callbacks_.connection_.state_ = Network::Connection::State::Closed;\n  EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration);\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(0U, stats_.request_active_.value());\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(0U, store_.counter(\"test.response_error\").value());\n}\n\n// Tests a decoder filter that modifies data.\nTEST_F(ThriftConnectionManagerTest, DecoderFiltersModifyRequests) {\n  auto* filter = new NiceMock<ThriftFilters::MockDecoderFilter>();\n  custom_filter_.reset(filter);\n\n  initializeFilter();\n  writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*filter, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_));\n\n  Http::LowerCaseString key{\"key\"};\n\n  EXPECT_CALL(*filter, transportBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_THAT(*metadata, HasNoHeaders());\n        metadata->headers().addCopy(key, \"value\");\n        return FilterStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filter_, transportBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        const Http::HeaderEntry* header = metadata->headers().get(key);\n        EXPECT_NE(nullptr, header);\n        EXPECT_EQ(\"value\", header->value().getStringView());\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(*filter, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_EQ(\"name\", metadata->methodName());\n        metadata->setMethodName(\"alternate\");\n        return FilterStatus::Continue;\n      }));\n  EXPECT_CALL(*decoder_filter_, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_EQ(\"alternate\", metadata->methodName());\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n  EXPECT_EQ(1U, stats_.request_active_.value());\n}\n\nTEST_F(ThriftConnectionManagerTest, TransportEndWhenRemoteClose) {\n  initializeFilter();\n  writeComplexFramedBinaryMessage(buffer_, MessageType::Call, 0x0F);\n\n  ThriftFilters::DecoderFilterCallbacks* callbacks{};\n  EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_))\n      .WillOnce(\n          Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; }));\n\n  EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration);\n  EXPECT_EQ(1U, store_.counter(\"test.request_call\").value());\n\n  writeComplexFramedBinaryMessage(write_buffer_, MessageType::Reply, 0x0F);\n\n  FramedTransportImpl transport;\n  BinaryProtocolImpl proto;\n  callbacks->startUpstreamResponse(transport, proto);\n\n  // Remote closes the connection.\n  filter_callbacks_.connection_.state_ = Network::Connection::State::Closed;\n  EXPECT_EQ(ThriftFilters::ResponseStatus::Reset, callbacks->upstreamData(write_buffer_));\n  EXPECT_EQ(0U, store_.counter(\"test.response\").value());\n  EXPECT_EQ(1U, store_.counter(\"test.response_decoding_error\").value());\n\n  filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/conn_state_test.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/conn_state.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n// Test behavior of nextSequenceId()\nTEST(ThriftConnectionStateTest, NextSequenceId) {\n  // Default sequence ids\n  {\n    ThriftConnectionState cs;\n\n    EXPECT_EQ(0, cs.nextSequenceId());\n    EXPECT_EQ(1, cs.nextSequenceId());\n  }\n\n  // Overflow is handled without producing negative values.\n  {\n    ThriftConnectionState cs(std::numeric_limits<int32_t>::max());\n\n    EXPECT_EQ(std::numeric_limits<int32_t>::max(), cs.nextSequenceId());\n    EXPECT_EQ(0, cs.nextSequenceId());\n  }\n}\n\n// Test how markUpgraded/upgradedAttempts/isUpgraded when upgrade is successful.\nTEST(ThriftConnectionStateTest, TestUpgradeSucceeded) {\n  ThriftConnectionState cs;\n  EXPECT_FALSE(cs.upgradeAttempted());\n  EXPECT_FALSE(cs.isUpgraded());\n\n  cs.markUpgraded();\n  EXPECT_TRUE(cs.upgradeAttempted());\n  EXPECT_TRUE(cs.isUpgraded());\n}\n\n// Test how markUpgraded/upgradedAttempts/isUpgraded when upgrade fails.\nTEST(ThriftConnectionStateTest, TestUpgradeFailed) {\n  ThriftConnectionState cs;\n  EXPECT_FALSE(cs.upgradeAttempted());\n  EXPECT_FALSE(cs.isUpgraded());\n\n  cs.markUpgradeFailed();\n  EXPECT_TRUE(cs.upgradeAttempted());\n  EXPECT_FALSE(cs.isUpgraded());\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/decoder_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/decoder.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Combine;\nusing testing::DoAll;\nusing testing::ExpectationSet;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::SetArgReferee;\nusing testing::StrictMock;\nusing ::testing::TestParamInfo;\nusing testing::Values;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\nExpectationSet expectValue(MockProtocol& proto, MockDecoderEventHandler& handler,\n                           FieldType field_type, bool result = true) {\n  ExpectationSet s;\n  switch (field_type) {\n  case FieldType::Bool:\n    s += EXPECT_CALL(proto, readBool(_, _)).WillOnce(Return(result));\n    if (result) {\n      s += EXPECT_CALL(handler, boolValue(_)).WillOnce(Return(FilterStatus::Continue));\n    }\n    break;\n  case FieldType::Byte:\n    s += EXPECT_CALL(proto, readByte(_, _)).WillOnce(Return(result));\n    if (result) {\n      s += EXPECT_CALL(handler, byteValue(_)).WillOnce(Return(FilterStatus::Continue));\n    }\n    break;\n  case FieldType::Double:\n    s += EXPECT_CALL(proto, readDouble(_, _)).WillOnce(Return(result));\n    if (result) {\n      s += EXPECT_CALL(handler, doubleValue(_)).WillOnce(Return(FilterStatus::Continue));\n    }\n    break;\n  case FieldType::I16:\n    s += EXPECT_CALL(proto, readInt16(_, _)).WillOnce(Return(result));\n    if (result) {\n      s += EXPECT_CALL(handler, int16Value(_)).WillOnce(Return(FilterStatus::Continue));\n    }\n    break;\n  case FieldType::I32:\n    s += EXPECT_CALL(proto, readInt32(_, _)).WillOnce(Return(result));\n    if (result) {\n      s += EXPECT_CALL(handler, int32Value(_)).WillOnce(Return(FilterStatus::Continue));\n    }\n    break;\n  case FieldType::I64:\n    s += EXPECT_CALL(proto, readInt64(_, _)).WillOnce(Return(result));\n    if (result) {\n      s += EXPECT_CALL(handler, int64Value(_)).WillOnce(Return(FilterStatus::Continue));\n    }\n    break;\n  case FieldType::String:\n    s += EXPECT_CALL(proto, readString(_, _)).WillOnce(Return(result));\n    if (result) {\n      s += EXPECT_CALL(handler, stringValue(_)).WillOnce(Return(FilterStatus::Continue));\n    }\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  return s;\n}\n\nExpectationSet expectContainerStart(MockProtocol& proto, MockDecoderEventHandler& handler,\n                                    FieldType field_type, FieldType inner_type) {\n  int16_t field_id = 1;\n  uint32_t size = 1;\n\n  ExpectationSet s;\n  switch (field_type) {\n  case FieldType::Struct:\n    s += EXPECT_CALL(proto, readStructBegin(_, _)).WillOnce(Return(true));\n    s += EXPECT_CALL(handler, structBegin(absl::string_view()))\n             .WillOnce(Return(FilterStatus::Continue));\n    s += EXPECT_CALL(proto, readFieldBegin(_, _, _, _))\n             .WillOnce(\n                 DoAll(SetArgReferee<2>(inner_type), SetArgReferee<3>(field_id), Return(true)));\n    s += EXPECT_CALL(handler, fieldBegin(absl::string_view(), _, _))\n             .WillOnce(Invoke([=](absl::string_view, FieldType& ft, int16_t& id) -> FilterStatus {\n               EXPECT_EQ(inner_type, ft);\n               EXPECT_EQ(field_id, id);\n               return FilterStatus::Continue;\n             }));\n    break;\n  case FieldType::List:\n    s += EXPECT_CALL(proto, readListBegin(_, _, _))\n             .WillOnce(DoAll(SetArgReferee<1>(inner_type), SetArgReferee<2>(size), Return(true)));\n    s += EXPECT_CALL(handler, listBegin(_, _))\n             .WillOnce(Invoke([=](FieldType& t, uint32_t& s) -> FilterStatus {\n               EXPECT_EQ(inner_type, t);\n               EXPECT_EQ(size, s);\n               return FilterStatus::Continue;\n             }));\n    break;\n  case FieldType::Map:\n    s += EXPECT_CALL(proto, readMapBegin(_, _, _, _))\n             .WillOnce(DoAll(SetArgReferee<1>(inner_type), SetArgReferee<2>(inner_type),\n                             SetArgReferee<3>(size), Return(true)));\n    s += EXPECT_CALL(handler, mapBegin(_, _, _))\n             .WillOnce(Invoke([=](FieldType& kt, FieldType& vt, uint32_t& s) -> FilterStatus {\n               EXPECT_EQ(inner_type, kt);\n               EXPECT_EQ(inner_type, vt);\n               EXPECT_EQ(size, s);\n               return FilterStatus::Continue;\n             }));\n    break;\n  case FieldType::Set:\n    s += EXPECT_CALL(proto, readSetBegin(_, _, _))\n             .WillOnce(DoAll(SetArgReferee<1>(inner_type), SetArgReferee<2>(size), Return(true)));\n    s += EXPECT_CALL(handler, setBegin(_, _))\n             .WillOnce(Invoke([=](FieldType& t, uint32_t& s) -> FilterStatus {\n               EXPECT_EQ(inner_type, t);\n               EXPECT_EQ(size, s);\n               return FilterStatus::Continue;\n             }));\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  return s;\n}\n\nExpectationSet expectContainerEnd(MockProtocol& proto, MockDecoderEventHandler& handler,\n                                  FieldType field_type) {\n  ExpectationSet s;\n  switch (field_type) {\n  case FieldType::Struct:\n    s += EXPECT_CALL(proto, readFieldEnd(_)).WillOnce(Return(true));\n    s += EXPECT_CALL(handler, fieldEnd()).WillOnce(Return(FilterStatus::Continue));\n    s += EXPECT_CALL(proto, readFieldBegin(_, _, _, _))\n             .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n    s += EXPECT_CALL(proto, readStructEnd(_)).WillOnce(Return(true));\n    s += EXPECT_CALL(handler, structEnd()).WillOnce(Return(FilterStatus::Continue));\n    break;\n  case FieldType::List:\n    s += EXPECT_CALL(proto, readListEnd(_)).WillOnce(Return(true));\n    s += EXPECT_CALL(handler, listEnd()).WillOnce(Return(FilterStatus::Continue));\n    break;\n  case FieldType::Map:\n    s += EXPECT_CALL(proto, readMapEnd(_)).WillOnce(Return(true));\n    s += EXPECT_CALL(handler, mapEnd()).WillOnce(Return(FilterStatus::Continue));\n    break;\n  case FieldType::Set:\n    s += EXPECT_CALL(proto, readSetEnd(_)).WillOnce(Return(true));\n    s += EXPECT_CALL(handler, setEnd()).WillOnce(Return(FilterStatus::Continue));\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  return s;\n}\n\n} // namespace\n\nclass DecoderStateMachineTestBase {\npublic:\n  DecoderStateMachineTestBase() : metadata_(std::make_shared<MessageMetadata>()) {}\n  virtual ~DecoderStateMachineTestBase() = default;\n\n  NiceMock<MockProtocol> proto_;\n  MessageMetadataSharedPtr metadata_;\n  NiceMock<MockDecoderEventHandler> handler_;\n};\n\nclass DecoderStateMachineNonValueTest : public DecoderStateMachineTestBase,\n                                        public testing::TestWithParam<ProtocolState> {};\n\nstatic std::string protoStateParamToString(const TestParamInfo<ProtocolState>& params) {\n  return ProtocolStateNameValues::name(params.param);\n}\n\nINSTANTIATE_TEST_SUITE_P(NonValueProtocolStates, DecoderStateMachineNonValueTest,\n                         Values(ProtocolState::MessageBegin, ProtocolState::MessageEnd,\n                                ProtocolState::StructBegin, ProtocolState::StructEnd,\n                                ProtocolState::FieldBegin, ProtocolState::FieldEnd,\n                                ProtocolState::MapBegin, ProtocolState::MapEnd,\n                                ProtocolState::ListBegin, ProtocolState::ListEnd,\n                                ProtocolState::SetBegin, ProtocolState::SetEnd),\n                         protoStateParamToString);\n\nclass DecoderStateMachineTest : public testing::Test, public DecoderStateMachineTestBase {};\nclass DecoderStateMachineValueTest : public DecoderStateMachineTestBase,\n                                     public testing::TestWithParam<FieldType> {};\n\nINSTANTIATE_TEST_SUITE_P(PrimitiveFieldTypes, DecoderStateMachineValueTest,\n                         Values(FieldType::Bool, FieldType::Byte, FieldType::Double, FieldType::I16,\n                                FieldType::I32, FieldType::I64, FieldType::String),\n                         fieldTypeParamToString);\n\nclass DecoderStateMachineNestingTest\n    : public DecoderStateMachineTestBase,\n      public testing::TestWithParam<std::tuple<FieldType, FieldType, FieldType>> {};\n\nstatic std::string nestedFieldTypesParamToString(\n    const TestParamInfo<std::tuple<FieldType, FieldType, FieldType>>& params) {\n  FieldType outer_field_type, inner_type, value_type;\n  std::tie(outer_field_type, inner_type, value_type) = params.param;\n  return fmt::format(\"{}Of{}Of{}\", fieldTypeToString(outer_field_type),\n                     fieldTypeToString(inner_type), fieldTypeToString(value_type));\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    NestedTypes, DecoderStateMachineNestingTest,\n    Combine(Values(FieldType::Struct, FieldType::List, FieldType::Map, FieldType::Set),\n            Values(FieldType::Struct, FieldType::List, FieldType::Map, FieldType::Set),\n            Values(FieldType::Bool, FieldType::Byte, FieldType::Double, FieldType::I16,\n                   FieldType::I32, FieldType::I64, FieldType::String)),\n    nestedFieldTypesParamToString);\n\nTEST_P(DecoderStateMachineNonValueTest, NoData) {\n  ProtocolState state = GetParam();\n  Buffer::OwnedImpl buffer;\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n  dsm.setCurrentState(state);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), state);\n}\n\nTEST_P(DecoderStateMachineValueTest, NoFieldValueData) {\n  FieldType field_type = GetParam();\n\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(std::string(\"\")), SetArgReferee<2>(field_type),\n                      SetArgReferee<3>(1), Return(true)));\n  expectValue(proto_, handler_, field_type, false);\n  expectValue(proto_, handler_, field_type, true);\n  EXPECT_CALL(proto_, readFieldEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _)).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::FieldBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::FieldValue);\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::FieldBegin);\n}\n\nTEST_P(DecoderStateMachineValueTest, FieldValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(std::string(\"\")), SetArgReferee<2>(field_type),\n                      SetArgReferee<3>(1), Return(true)));\n\n  expectValue(proto_, handler_, field_type);\n\n  EXPECT_CALL(proto_, readFieldEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _)).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::FieldBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::FieldBegin);\n}\n\nTEST_F(DecoderStateMachineTest, NoListValueData) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readListBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(1), Return(true)));\n  EXPECT_CALL(proto_, readInt32(Ref(buffer), _)).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::ListBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::ListValue);\n}\n\nTEST_F(DecoderStateMachineTest, EmptyList) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readListBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(0), Return(true)));\n  EXPECT_CALL(proto_, readListEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::ListBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, ListValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readListBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true)));\n\n  expectValue(proto_, handler_, field_type);\n\n  EXPECT_CALL(proto_, readListEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::ListBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, IncompleteListValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readListBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true)));\n\n  expectValue(proto_, handler_, field_type, false);\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::ListBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::ListValue);\n\n  expectValue(proto_, handler_, field_type);\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, MultipleListValues) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readListBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(5), Return(true)));\n\n  for (int i = 0; i < 5; i++) {\n    expectValue(proto_, handler_, field_type);\n  }\n\n  EXPECT_CALL(proto_, readListEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::ListBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd);\n}\n\nTEST_F(DecoderStateMachineTest, NoMapKeyData) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(FieldType::String),\n                      SetArgReferee<3>(1), Return(true)));\n  EXPECT_CALL(proto_, readInt32(Ref(buffer), _)).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapKey);\n}\n\nTEST_F(DecoderStateMachineTest, NoMapValueData) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(FieldType::String),\n                      SetArgReferee<3>(1), Return(true)));\n  EXPECT_CALL(proto_, readInt32(Ref(buffer), _)).WillOnce(Return(true));\n  EXPECT_CALL(proto_, readString(Ref(buffer), _)).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapValue);\n}\n\nTEST_F(DecoderStateMachineTest, EmptyMap) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(FieldType::String),\n                      SetArgReferee<3>(0), Return(true)));\n  EXPECT_CALL(proto_, readMapEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, MapKeyValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(FieldType::String),\n                      SetArgReferee<3>(1), Return(true)));\n\n  expectValue(proto_, handler_, field_type);        // key\n  expectValue(proto_, handler_, FieldType::String); // value\n\n  EXPECT_CALL(proto_, readMapEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, MapValueValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(field_type),\n                      SetArgReferee<3>(1), Return(true)));\n\n  expectValue(proto_, handler_, FieldType::I32); // key\n  expectValue(proto_, handler_, field_type);     // value\n\n  EXPECT_CALL(proto_, readMapEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, IncompleteMapKey) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(FieldType::I32),\n                      SetArgReferee<3>(1), Return(true)));\n\n  expectValue(proto_, handler_, field_type, false); // key\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapKey);\n\n  expectValue(proto_, handler_, field_type);     // key\n  expectValue(proto_, handler_, FieldType::I32); // value\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, IncompleteMapValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(field_type),\n                      SetArgReferee<3>(1), Return(true)));\n\n  expectValue(proto_, handler_, FieldType::I32);    // key\n  expectValue(proto_, handler_, field_type, false); // value\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapValue);\n\n  expectValue(proto_, handler_, field_type); // value\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, MultipleMapKeyValues) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(field_type),\n                      SetArgReferee<3>(5), Return(true)));\n\n  for (int i = 0; i < 5; i++) {\n    expectValue(proto_, handler_, FieldType::I32); // key\n    expectValue(proto_, handler_, field_type);     // value\n  }\n\n  EXPECT_CALL(proto_, readMapEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::MapBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd);\n}\n\nTEST_F(DecoderStateMachineTest, NoSetValueData) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readSetBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(1), Return(true)));\n  EXPECT_CALL(proto_, readInt32(Ref(buffer), _)).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::SetBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::SetValue);\n}\n\nTEST_F(DecoderStateMachineTest, EmptySet) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readSetBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(0), Return(true)));\n  EXPECT_CALL(proto_, readSetEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::SetBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, SetValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readSetBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true)));\n\n  expectValue(proto_, handler_, field_type);\n\n  EXPECT_CALL(proto_, readSetEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::SetBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, IncompleteSetValue) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readSetBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true)));\n\n  expectValue(proto_, handler_, field_type, false);\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::SetBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::SetValue);\n\n  expectValue(proto_, handler_, field_type);\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd);\n}\n\nTEST_P(DecoderStateMachineValueTest, MultipleSetValues) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readSetBegin(Ref(buffer), _, _))\n      .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(5), Return(true)));\n\n  for (int i = 0; i < 5; i++) {\n    expectValue(proto_, handler_, field_type);\n  }\n\n  EXPECT_CALL(proto_, readSetEnd(Ref(buffer))).WillOnce(Return(false));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  dsm.setCurrentState(ProtocolState::SetBegin);\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd);\n}\n\nTEST_F(DecoderStateMachineTest, EmptyStruct) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMessageBegin(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(proto_, readStructBegin(Ref(buffer), _)).WillOnce(Return(true));\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto_, readStructEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(proto_, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::Done);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::Done);\n}\n\nTEST_P(DecoderStateMachineValueTest, SingleFieldStruct) {\n  FieldType field_type = GetParam();\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  EXPECT_CALL(proto_, readMessageBegin(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(handler_, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasMethodName());\n        EXPECT_TRUE(metadata->hasMessageType());\n        EXPECT_TRUE(metadata->hasSequenceId());\n        EXPECT_EQ(\"name\", metadata->methodName());\n        EXPECT_EQ(MessageType::Call, metadata->messageType());\n        EXPECT_EQ(100U, metadata->sequenceId());\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(proto_, readStructBegin(Ref(buffer), _)).WillOnce(Return(true));\n  EXPECT_CALL(handler_, structBegin(absl::string_view())).WillOnce(Return(FilterStatus::Continue));\n\n  int16_t field_id = 1;\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(field_type), SetArgReferee<3>(field_id), Return(true)));\n  EXPECT_CALL(handler_, fieldBegin(absl::string_view(), _, _))\n      .WillOnce(Invoke([&](absl::string_view, FieldType& ft, int16_t& id) -> FilterStatus {\n        EXPECT_EQ(field_type, ft);\n        EXPECT_EQ(field_id, id);\n        return FilterStatus::Continue;\n      }));\n\n  expectValue(proto_, handler_, field_type);\n\n  EXPECT_CALL(proto_, readFieldEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler_, fieldEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n\n  EXPECT_CALL(proto_, readStructEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler_, structEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(proto_, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler_, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::Done);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::Done);\n}\n\nTEST_F(DecoderStateMachineTest, MultiFieldStruct) {\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  std::vector<FieldType> field_types = {FieldType::Bool,  FieldType::Byte, FieldType::Double,\n                                        FieldType::I16,   FieldType::I32,  FieldType::I64,\n                                        FieldType::String};\n\n  EXPECT_CALL(proto_, readMessageBegin(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(handler_, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasMethodName());\n        EXPECT_TRUE(metadata->hasMessageType());\n        EXPECT_TRUE(metadata->hasSequenceId());\n        EXPECT_EQ(\"name\", metadata->methodName());\n        EXPECT_EQ(MessageType::Call, metadata->messageType());\n        EXPECT_EQ(100U, metadata->sequenceId());\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(proto_, readStructBegin(Ref(buffer), _)).WillOnce(Return(true));\n  EXPECT_CALL(handler_, structBegin(absl::string_view())).WillOnce(Return(FilterStatus::Continue));\n\n  int16_t field_id = 1;\n  for (FieldType field_type : field_types) {\n    EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _))\n        .WillOnce(DoAll(SetArgReferee<2>(field_type), SetArgReferee<3>(field_id), Return(true)));\n    EXPECT_CALL(handler_, fieldBegin(absl::string_view(), _, _))\n        .WillOnce(Invoke([=](absl::string_view, FieldType& ft, int16_t& id) -> FilterStatus {\n          EXPECT_EQ(field_type, ft);\n          EXPECT_EQ(field_id, id);\n          return FilterStatus::Continue;\n        }));\n    field_id++;\n\n    expectValue(proto_, handler_, field_type);\n\n    EXPECT_CALL(proto_, readFieldEnd(Ref(buffer))).WillOnce(Return(true));\n    EXPECT_CALL(handler_, fieldEnd()).WillOnce(Return(FilterStatus::Continue));\n  }\n\n  EXPECT_CALL(proto_, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto_, readStructEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler_, structEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(proto_, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler_, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::Done);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::Done);\n}\n\nTEST_P(DecoderStateMachineNestingTest, NestedTypes) {\n  FieldType outer_field_type, inner_type, value_type;\n  std::tie(outer_field_type, inner_type, value_type) = GetParam();\n\n  Buffer::OwnedImpl buffer;\n  InSequence dummy;\n\n  // start of message and outermost struct\n  EXPECT_CALL(proto_, readMessageBegin(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(handler_, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasMethodName());\n        EXPECT_TRUE(metadata->hasMessageType());\n        EXPECT_TRUE(metadata->hasSequenceId());\n        EXPECT_EQ(\"name\", metadata->methodName());\n        EXPECT_EQ(MessageType::Call, metadata->messageType());\n        EXPECT_EQ(100U, metadata->sequenceId());\n        return FilterStatus::Continue;\n      }));\n\n  expectContainerStart(proto_, handler_, FieldType::Struct, outer_field_type);\n\n  expectContainerStart(proto_, handler_, outer_field_type, inner_type);\n\n  int outer_reps = outer_field_type == FieldType::Map ? 2 : 1;\n  for (int i = 0; i < outer_reps; i++) {\n    expectContainerStart(proto_, handler_, inner_type, value_type);\n\n    int inner_reps = inner_type == FieldType::Map ? 2 : 1;\n    for (int j = 0; j < inner_reps; j++) {\n      expectValue(proto_, handler_, value_type);\n    }\n\n    expectContainerEnd(proto_, handler_, inner_type);\n  }\n\n  expectContainerEnd(proto_, handler_, outer_field_type);\n\n  // end of message and outermost struct\n  expectContainerEnd(proto_, handler_, FieldType::Struct);\n\n  EXPECT_CALL(proto_, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler_, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  DecoderStateMachine dsm(proto_, metadata_, handler_);\n\n  EXPECT_EQ(dsm.run(buffer), ProtocolState::Done);\n  EXPECT_EQ(dsm.currentState(), ProtocolState::Done);\n}\n\nTEST(DecoderTest, OnData) {\n  NiceMock<MockTransport> transport;\n  NiceMock<MockProtocol> proto;\n  NiceMock<MockDecoderCallbacks> callbacks;\n  StrictMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  InSequence dummy;\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setFrameSize(100);\n        return true;\n      }));\n  EXPECT_CALL(handler, transportBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasFrameSize());\n        EXPECT_EQ(100U, metadata->frameSize());\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(proto, readMessageBegin(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(handler, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasMethodName());\n        EXPECT_TRUE(metadata->hasMessageType());\n        EXPECT_TRUE(metadata->hasSequenceId());\n        EXPECT_EQ(\"name\", metadata->methodName());\n        EXPECT_EQ(MessageType::Call, metadata->messageType());\n        EXPECT_EQ(100U, metadata->sequenceId());\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(proto, readStructBegin(Ref(buffer), _)).WillOnce(Return(true));\n  EXPECT_CALL(handler, structBegin(absl::string_view())).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(proto, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto, readStructEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, structEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(proto, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(transport, decodeFrameEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, transportEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  bool underflow = false;\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow);\n}\n\nTEST(DecoderTest, OnDataWithProtocolHint) {\n  NiceMock<MockTransport> transport;\n  NiceMock<MockProtocol> proto;\n  NiceMock<MockDecoderCallbacks> callbacks;\n  StrictMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  InSequence dummy;\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setFrameSize(100);\n        metadata.setProtocol(ProtocolType::Binary);\n        return true;\n      }));\n  EXPECT_CALL(proto, type()).WillOnce(Return(ProtocolType::Auto));\n  EXPECT_CALL(proto, setType(ProtocolType::Binary));\n  EXPECT_CALL(handler, transportBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasFrameSize());\n        EXPECT_EQ(100U, metadata->frameSize());\n\n        EXPECT_TRUE(metadata->hasProtocol());\n        EXPECT_EQ(ProtocolType::Binary, metadata->protocol());\n\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(proto, readMessageBegin(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(handler, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasMethodName());\n        EXPECT_TRUE(metadata->hasMessageType());\n        EXPECT_TRUE(metadata->hasSequenceId());\n        EXPECT_EQ(\"name\", metadata->methodName());\n        EXPECT_EQ(MessageType::Call, metadata->messageType());\n        EXPECT_EQ(100U, metadata->sequenceId());\n        return FilterStatus::Continue;\n      }));\n\n  EXPECT_CALL(proto, readStructBegin(Ref(buffer), _)).WillOnce(Return(true));\n  EXPECT_CALL(handler, structBegin(absl::string_view())).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(proto, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto, readStructEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, structEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(proto, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, messageEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  EXPECT_CALL(transport, decodeFrameEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, transportEnd()).WillOnce(Return(FilterStatus::Continue));\n\n  bool underflow = false;\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow);\n}\n\nTEST(DecoderTest, OnDataWithInconsistentProtocolHint) {\n  NiceMock<MockTransport> transport;\n  NiceMock<MockProtocol> proto;\n  NiceMock<MockDecoderCallbacks> callbacks;\n  StrictMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  InSequence dummy;\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setFrameSize(100);\n        metadata.setProtocol(ProtocolType::Binary);\n        return true;\n      }));\n  EXPECT_CALL(proto, type()).WillRepeatedly(Return(ProtocolType::Compact));\n\n  bool underflow = false;\n  EXPECT_THROW_WITH_MESSAGE(decoder.onData(buffer, underflow), EnvoyException,\n                            \"transport reports protocol binary, but configured for compact\");\n}\n\nTEST(DecoderTest, OnDataThrowsTransportAppException) {\n  NiceMock<MockTransport> transport;\n  NiceMock<MockProtocol> proto;\n  NiceMock<MockDecoderCallbacks> callbacks;\n  StrictMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  InSequence dummy;\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setAppException(AppExceptionType::InvalidTransform, \"unknown xform\");\n        return true;\n      }));\n\n  bool underflow = false;\n  EXPECT_THROW_WITH_MESSAGE(decoder.onData(buffer, underflow), AppException, \"unknown xform\");\n}\n\nTEST(DecoderTest, OnDataResumes) {\n  NiceMock<MockTransport> transport;\n  NiceMock<MockProtocol> proto;\n  NiceMock<MockDecoderCallbacks> callbacks;\n  NiceMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  InSequence dummy;\n\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n  buffer.add(\"x\");\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setFrameSize(100);\n        return true;\n      }));\n  EXPECT_CALL(proto, readMessageBegin(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(proto, readStructBegin(_, _)).WillOnce(Return(false));\n\n  bool underflow = false;\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow);\n\n  EXPECT_CALL(proto, readStructBegin(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(proto, readFieldBegin(_, _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto, readStructEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(proto, readMessageEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(transport, decodeFrameEnd(_)).WillOnce(Return(true));\n\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow); // buffer.length() == 1\n}\n\nTEST(DecoderTest, OnDataResumesTransportFrameStart) {\n  StrictMock<MockTransport> transport;\n  StrictMock<MockProtocol> proto;\n  NiceMock<MockDecoderCallbacks> callbacks;\n  NiceMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  EXPECT_CALL(transport, name()).Times(AnyNumber());\n  EXPECT_CALL(proto, name()).Times(AnyNumber());\n\n  InSequence dummy;\n\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n  bool underflow = false;\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _)).WillOnce(Return(false));\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow);\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setFrameSize(100);\n        return true;\n      }));\n  EXPECT_CALL(proto, readMessageBegin(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(proto, readStructBegin(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(proto, readFieldBegin(_, _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto, readStructEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(proto, readMessageEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(transport, decodeFrameEnd(_)).WillOnce(Return(true));\n\n  underflow = false;\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow); // buffer.length() == 0\n}\n\nTEST(DecoderTest, OnDataResumesTransportFrameEnd) {\n  StrictMock<MockTransport> transport;\n  StrictMock<MockProtocol> proto;\n  NiceMock<MockDecoderCallbacks> callbacks;\n  NiceMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  EXPECT_CALL(transport, name()).Times(AnyNumber());\n  EXPECT_CALL(proto, name()).Times(AnyNumber());\n\n  InSequence dummy;\n\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setFrameSize(100);\n        return true;\n      }));\n  EXPECT_CALL(proto, readMessageBegin(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(proto, readStructBegin(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(proto, readFieldBegin(_, _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto, readStructEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(proto, readMessageEnd(_)).WillOnce(Return(true));\n  EXPECT_CALL(transport, decodeFrameEnd(_)).WillOnce(Return(false));\n\n  bool underflow = false;\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow);\n\n  EXPECT_CALL(transport, decodeFrameEnd(_)).WillOnce(Return(true));\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow); // buffer.length() == 0\n}\n\nTEST(DecoderTest, OnDataHandlesStopIterationAndResumes) {\n  StrictMock<MockTransport> transport;\n  EXPECT_CALL(transport, name()).WillRepeatedly(ReturnRef(transport.name_));\n\n  StrictMock<MockProtocol> proto;\n  EXPECT_CALL(proto, name()).WillRepeatedly(ReturnRef(proto.name_));\n\n  NiceMock<MockDecoderCallbacks> callbacks;\n  StrictMock<MockDecoderEventHandler> handler;\n  ON_CALL(callbacks, newDecoderEventHandler()).WillByDefault(ReturnRef(handler));\n\n  InSequence dummy;\n  Decoder decoder(transport, proto, callbacks);\n  Buffer::OwnedImpl buffer;\n  bool underflow = true;\n\n  EXPECT_CALL(transport, decodeFrameStart(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setFrameSize(100);\n        return true;\n      }));\n  EXPECT_CALL(handler, transportBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasFrameSize());\n        EXPECT_EQ(100U, metadata->frameSize());\n\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_CALL(proto, readMessageBegin(Ref(buffer), _))\n      .WillOnce(Invoke([&](Buffer::Instance&, MessageMetadata& metadata) -> bool {\n        metadata.setMethodName(\"name\");\n        metadata.setMessageType(MessageType::Call);\n        metadata.setSequenceId(100);\n        return true;\n      }));\n  EXPECT_CALL(handler, messageBegin(_))\n      .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus {\n        EXPECT_TRUE(metadata->hasMethodName());\n        EXPECT_TRUE(metadata->hasMessageType());\n        EXPECT_TRUE(metadata->hasSequenceId());\n        EXPECT_EQ(\"name\", metadata->methodName());\n        EXPECT_EQ(MessageType::Call, metadata->messageType());\n        EXPECT_EQ(100U, metadata->sequenceId());\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_CALL(proto, readStructBegin(Ref(buffer), _)).WillOnce(Return(true));\n  EXPECT_CALL(handler, structBegin(absl::string_view()))\n      .WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  FieldType field_type = FieldType::I32;\n  int16_t field_id = 1;\n  EXPECT_CALL(proto, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(field_type), SetArgReferee<3>(field_id), Return(true)));\n  EXPECT_CALL(handler, fieldBegin(absl::string_view(), _, _))\n      .WillOnce(Invoke([&](absl::string_view, FieldType& ft, int16_t& id) -> FilterStatus {\n        EXPECT_EQ(field_type, ft);\n        EXPECT_EQ(field_id, id);\n        return FilterStatus::StopIteration;\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_CALL(proto, readInt32(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(handler, int32Value(_)).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_CALL(proto, readFieldEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, fieldEnd()).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_CALL(proto, readFieldBegin(Ref(buffer), _, _, _))\n      .WillOnce(DoAll(SetArgReferee<2>(FieldType::Stop), Return(true)));\n  EXPECT_CALL(proto, readStructEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, structEnd()).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_CALL(proto, readMessageEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, messageEnd()).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_CALL(transport, decodeFrameEnd(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(handler, transportEnd()).WillOnce(Return(FilterStatus::StopIteration));\n  EXPECT_EQ(FilterStatus::StopIteration, decoder.onData(buffer, underflow));\n  EXPECT_FALSE(underflow);\n\n  EXPECT_EQ(FilterStatus::Continue, decoder.onData(buffer, underflow));\n  EXPECT_TRUE(underflow);\n}\n\n#define TEST_NAME(X) EXPECT_EQ(ProtocolStateNameValues::name(ProtocolState::X), #X);\n\nTEST(ProtocolStateNameValuesTest, ValidNames) { ALL_PROTOCOL_STATES(TEST_NAME) }\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nfilegroup(\n    name = \"generate_fixture\",\n    srcs = [\"generate_fixture.sh\"],\n    data = [\n        \":client\",\n        \":server\",\n    ],\n)\n\npy_binary(\n    name = \"client\",\n    srcs = [\"client.py\"],\n    python_version = \"PY2\",\n    deps = [\n        \"//test/extensions/filters/network/thrift_proxy/driver/fbthrift:fbthrift_lib\",\n        \"//test/extensions/filters/network/thrift_proxy/driver/finagle:finagle_lib\",\n        \"//test/extensions/filters/network/thrift_proxy/driver/generated/example:example_lib\",\n        \"@com_github_twitter_common_rpc//:twitter_common_rpc\",\n    ],\n)\n\npy_binary(\n    name = \"server\",\n    srcs = [\"server.py\"],\n    python_version = \"PY2\",\n    deps = [\n        \"//test/extensions/filters/network/thrift_proxy/driver/fbthrift:fbthrift_lib\",\n        \"//test/extensions/filters/network/thrift_proxy/driver/finagle:finagle_lib\",\n        \"//test/extensions/filters/network/thrift_proxy/driver/generated/example:example_lib\",\n        \"@com_github_twitter_common_rpc//:twitter_common_rpc\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/README.md",
    "content": "Thrift Integration Test Driver\n==============================\n\nThe code in this package provides `client.py` and `server.py` which\ncan be used as a thrift client and server pair. Both scripts support\nall the Thrift transport and protocol variations that Envoy's Thrift\nproxy supports (or will eventually support):\n\nTransports: framed, unframed, header\nProtocols: binary, compact, json, ttwitter (e.g., finagle-thrift)\n\nThe client script can be configured to write its request and the\nserver's response to a file. The server script can be configured to\nreturn successful responses, IDL-defined exceptions, or server\n(application) exceptions.\n\nEnvoy's thrift_proxy integration tests use the `generate_fixtures.sh`\nscript to create request and response files for various combinations\nof transport, protocol, service multiplexing. In addition, the\nintegration tests generate IDL and application exception responses.\nThe generated data is used with the Envoy's integration test\ninfrastructure to simulate downstream and upstream connections.\nGenerated files are used instead of running the client and server\nscripts directly to eliminate the need to select a Thrift upstream\nserver port (or determine its self-selected port).\n\nRegenerating example.thrift\n---------------------------\n\nInstall the Apache thrift library (from source or a package) so that\nthe `thrift` command is available. The `generate_bindings.sh` script\nwill regenerate the Python bindings which are checked into the\nrepository.\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/client.py",
    "content": "#!/usr/bin/env python\n\nimport argparse\nimport io\nimport sys\n\nfrom generated.example import Example\nfrom generated.example.ttypes import (Param, TheWorks, AppException)\n\nfrom thrift import Thrift\nfrom thrift.protocol import (TBinaryProtocol, TCompactProtocol, TJSONProtocol, TMultiplexedProtocol)\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom fbthrift import THeaderTransport\nfrom twitter.common.rpc.finagle.protocol import TFinagleProtocol\n\n\nclass TRecordingTransport(TTransport.TTransportBase):\n\n  def __init__(self, underlying, writehandle, readhandle):\n    self._underlying = underlying\n    self._whandle = writehandle\n    self._rhandle = readhandle\n\n  def isOpen(self):\n    return self._underlying.isOpen()\n\n  def open(self):\n    if not self._underlying.isOpen():\n      self._underlying.open()\n\n  def close(self):\n    self._underlying.close()\n    self._whandle.close()\n    self._rhandle.close()\n\n  def read(self, sz):\n    buf = self._underlying.read(sz)\n    if len(buf) != 0:\n      self._rhandle.write(buf)\n    return buf\n\n  def write(self, buf):\n    if len(buf) != 0:\n      self._whandle.write(buf)\n    self._underlying.write(buf)\n\n  def flush(self):\n    self._underlying.flush()\n    self._whandle.flush()\n    self._rhandle.flush()\n\n\ndef main(cfg, reqhandle, resphandle):\n  if cfg.unix:\n    if cfg.addr == \"\":\n      sys.exit(\"invalid unix domain socket: {}\".format(cfg.addr))\n    socket = TSocket.TSocket(unix_socket=cfg.addr)\n  else:\n    try:\n      (host, port) = cfg.addr.rsplit(\":\", 1)\n      if host == \"\":\n        host = \"localhost\"\n      socket = TSocket.TSocket(host=host, port=int(port))\n    except ValueError:\n      sys.exit(\"invalid address: {}\".format(cfg.addr))\n\n  transport = TRecordingTransport(socket, reqhandle, resphandle)\n\n  if cfg.transport == \"framed\":\n    transport = TTransport.TFramedTransport(transport)\n  elif cfg.transport == \"unframed\":\n    transport = TTransport.TBufferedTransport(transport)\n  elif cfg.transport == \"header\":\n    transport = THeaderTransport.THeaderTransport(\n        transport,\n        client_type=THeaderTransport.CLIENT_TYPE.HEADER,\n    )\n\n    if cfg.headers is not None:\n      pairs = cfg.headers.split(\",\")\n      for p in pairs:\n        key, value = p.split(\"=\")\n        transport.set_header(key, value)\n\n    if cfg.protocol == \"binary\":\n      transport.set_protocol_id(THeaderTransport.T_BINARY_PROTOCOL)\n    elif cfg.protocol == \"compact\":\n      transport.set_protocol_id(THeaderTransport.T_COMPACT_PROTOCOL)\n    else:\n      sys.exit(\"header transport cannot be used with protocol {0}\".format(cfg.protocol))\n  else:\n    sys.exit(\"unknown transport {0}\".format(cfg.transport))\n\n  transport.open()\n\n  if cfg.protocol == \"binary\":\n    protocol = TBinaryProtocol.TBinaryProtocol(transport)\n  elif cfg.protocol == \"compact\":\n    protocol = TCompactProtocol.TCompactProtocol(transport)\n  elif cfg.protocol == \"json\":\n    protocol = TJSONProtocol.TJSONProtocol(transport)\n  elif cfg.protocol == \"finagle\":\n    protocol = TFinagleProtocol(transport, client_id=\"thrift-playground\")\n  else:\n    sys.exit(\"unknown protocol {0}\".format(cfg.protocol))\n\n  if cfg.service is not None:\n    protocol = TMultiplexedProtocol.TMultiplexedProtocol(protocol, cfg.service)\n\n  client = Example.Client(protocol)\n\n  try:\n    if cfg.method == \"ping\":\n      client.ping()\n      print(\"client: pinged\")\n    elif cfg.method == \"poke\":\n      client.poke()\n      print(\"client: poked\")\n    elif cfg.method == \"add\":\n      if len(cfg.params) != 2:\n        sys.exit(\"add takes 2 arguments, got: {0}\".format(cfg.params))\n\n      a = int(cfg.params[0])\n      b = int(cfg.params[1])\n      v = client.add(a, b)\n      print(\"client: added {0} + {1} = {2}\".format(a, b, v))\n    elif cfg.method == \"execute\":\n      param = Param(return_fields=cfg.params,\n                    the_works=TheWorks(\n                        field_1=True,\n                        field_2=0x7f,\n                        field_3=0x7fff,\n                        field_4=0x7fffffff,\n                        field_5=0x7fffffffffffffff,\n                        field_6=-1.5,\n                        field_7=u\"string is UTF-8: \\U0001f60e\",\n                        field_8=b\"binary is bytes: \\x80\\x7f\\x00\\x01\",\n                        field_9={\n                            1: \"one\",\n                            2: \"two\",\n                            3: \"three\"\n                        },\n                        field_10=[1, 2, 4, 8],\n                        field_11=set([\"a\", \"b\", \"c\"]),\n                        field_12=False,\n                    ))\n\n      try:\n        result = client.execute(param)\n        print(\"client: executed {0}: {1}\".format(param, result))\n      except AppException as e:\n        print(\"client: execute failed with IDL Exception: {0}\".format(e.why))\n    else:\n      sys.exit(\"unknown method {0}\".format(cfg.method))\n  except Thrift.TApplicationException as e:\n    print(\"client exception: {0}: {1}\".format(e.type, e.message))\n\n  if cfg.request is None:\n    req = \"\".join([\"%02X \" % ord(x) for x in reqhandle.getvalue()]).strip()\n    print(\"request: {}\".format(req))\n  if cfg.response is None:\n    resp = \"\".join([\"%02X \" % ord(x) for x in resphandle.getvalue()]).strip()\n    print(\"response: {}\".format(resp))\n\n  transport.close()\n\n\nif __name__ == \"__main__\":\n  parser = argparse.ArgumentParser(description=\"Thrift client tool.\",)\n  parser.add_argument(\n      \"method\",\n      metavar=\"METHOD\",\n      help=\"Name of the service method to invoke.\",\n  )\n  parser.add_argument(\n      \"params\",\n      metavar=\"PARAMS\",\n      nargs=\"*\",\n      help=\"Method parameters\",\n  )\n  parser.add_argument(\n      \"-a\",\n      \"--addr\",\n      metavar=\"ADDR\",\n      dest=\"addr\",\n      required=True,\n      help=\"Target address for requests in the form host:port. The host is optional. If --unix\" +\n      \" is set, the address is the socket name.\",\n  )\n  parser.add_argument(\n      \"-m\",\n      \"--multiplex\",\n      metavar=\"SERVICE\",\n      dest=\"service\",\n      help=\"Enable service multiplexing and set the service name.\",\n  )\n  parser.add_argument(\n      \"-p\",\n      \"--protocol\",\n      dest=\"protocol\",\n      default=\"binary\",\n      choices=[\"binary\", \"compact\", \"json\", \"finagle\"],\n      help=\"selects a protocol.\",\n  )\n  parser.add_argument(\n      \"--request\",\n      metavar=\"FILE\",\n      dest=\"request\",\n      help=\"Writes the Thrift request to a file.\",\n  )\n  parser.add_argument(\n      \"--response\",\n      metavar=\"FILE\",\n      dest=\"response\",\n      help=\"Writes the Thrift response to a file.\",\n  )\n  parser.add_argument(\n      \"-t\",\n      \"--transport\",\n      dest=\"transport\",\n      default=\"framed\",\n      choices=[\"framed\", \"unframed\", \"header\"],\n      help=\"selects a transport.\",\n  )\n  parser.add_argument(\n      \"-u\",\n      \"--unix\",\n      dest=\"unix\",\n      action=\"store_true\",\n  )\n  parser.add_argument(\n      \"--headers\",\n      dest=\"headers\",\n      metavar=\"KEY=VALUE[,KEY=VALUE]\",\n      help=\"list of comma-delimited, key value pairs to include as transport headers.\",\n  )\n\n  cfg = parser.parse_args()\n\n  reqhandle = io.BytesIO()\n  resphandle = io.BytesIO()\n  if cfg.request is not None:\n    try:\n      reqhandle = io.open(cfg.request, \"wb\")\n    except IOError as e:\n      sys.exit(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n  if cfg.response is not None:\n    try:\n      resphandle = io.open(cfg.response, \"wb\")\n    except IOError as e:\n      sys.exit(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n  try:\n    main(cfg, reqhandle, resphandle)\n  except Thrift.TException as tx:\n    sys.exit(\"Unhandled Thrift Exception: {0}\".format(tx.message))\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/example.thrift",
    "content": "// TheWorks contains one instance of each type of field. Envoy does not\n// concern itself with the optionality of fields, so we leave it\n// defaulted.\nstruct TheWorks {\n  1: bool field_1,\n  2: i8 field_2,\n  3: i16 field_3,\n  4: i32 field_4,\n  5: i64 field_5,\n  6: double field_6,\n  7: string field_7,\n  8: binary field_8,\n  9: map<i32, string> field_9,\n  10: list<i32> field_10,\n  11: set<string> field_11,\n  12: bool field_12,\n}\n\nstruct Param {\n  1: list<string> return_fields,\n  2: TheWorks the_works,\n}\n\nstruct Result {\n  1: TheWorks the_works,\n}\n\nexception AppException {\n  1: string why,\n}\n\nservice Example {\n  void ping(),\n\n  oneway void poke(),\n\n  i32 add(1:i32 a, 2:i32 b),\n\n  Result execute(1:Param input) throws (1:AppException appex),\n}\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_library\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\nload(\"@thrift_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\npy_library(\n    name = \"fbthrift_lib\",\n    srcs = [\n        \"THeaderTransport.py\",\n        \"__init__.py\",\n    ],\n    deps = [\n        requirement(\"thrift\"),\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/fbthrift/THeaderTransport.py",
    "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n# INFO:(zuercher):  Adapted from\n# https://github.com/facebook/fbthrift/blob/b090870/thrift/lib/py/transport/THeaderTransport.py\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nif sys.version_info[0] >= 3:\n  from http import server\n  BaseHTTPServer = server\n  xrange = range\n  from io import BytesIO as StringIO\n  PY3 = True\nelse:\n  import BaseHTTPServer\n  from cStringIO import StringIO\n  PY3 = False\n\nfrom struct import pack, unpack\nimport zlib\n\nfrom thrift.Thrift import TApplicationException\nfrom thrift.transport.TTransport import TTransportException, TTransportBase, CReadableTransport\n\n# INFO:(zuercher): Instead of importing these constants from TBinaryProtocol and TCompactProtocol\nBINARY_PROTO_ID = 0x80\nCOMPACT_PROTO_ID = 0x82\n\n\n# INFO:(zuercher): Copied from:\n# https://github.com/facebook/fbthrift/blob/b090870/thrift/lib/py/protocol/TCompactProtocol.py\ndef getVarint(n):\n  out = []\n  while True:\n    if n & ~0x7f == 0:\n      out.append(n)\n      break\n    else:\n      out.append((n & 0xff) | 0x80)\n      n = n >> 7\n  if sys.version_info[0] >= 3:\n    return bytes(out)\n  else:\n    return b''.join(map(chr, out))\n\n\n# INFO:(zuercher): Copied from\n# https://github.com/facebook/fbthrift/blob/b090870/thrift/lib/py/protocol/TCompactProtocol.py\ndef readVarint(trans):\n  result = 0\n  shift = 0\n  while True:\n    x = trans.read(1)\n    byte = ord(x)\n    result |= (byte & 0x7f) << shift\n    if byte >> 7 == 0:\n      return result\n    shift += 7\n\n\n# Import the snappy module if it is available\ntry:\n  import snappy\nexcept ImportError:\n  # If snappy is not available, don't fail immediately.\n  # Only raise an error if we actually ever need to perform snappy\n  # compression.\n  class DummySnappy(object):\n\n    def compress(self, buf):\n      raise TTransportException(TTransportException.INVALID_TRANSFORM,\n                                'snappy module not available')\n\n    def decompress(self, buf):\n      raise TTransportException(TTransportException.INVALID_TRANSFORM,\n                                'snappy module not available')\n\n  snappy = DummySnappy()  # type: ignore\n\n# Definitions from THeader.h\n\n\nclass CLIENT_TYPE:\n  HEADER = 0\n  FRAMED_DEPRECATED = 1\n  UNFRAMED_DEPRECATED = 2\n  HTTP_SERVER = 3\n  HTTP_CLIENT = 4\n  FRAMED_COMPACT = 5\n  HEADER_SASL = 6\n  HTTP_GET = 7\n  UNKNOWN = 8\n  UNFRAMED_COMPACT_DEPRECATED = 9\n\n\nclass HEADER_FLAG:\n  SUPPORT_OUT_OF_ORDER = 0x01\n  DUPLEX_REVERSE = 0x08\n  SASL = 0x10\n\n\nclass TRANSFORM:\n  NONE = 0x00\n  ZLIB = 0x01\n  HMAC = 0x02\n  SNAPPY = 0x03\n  QLZ = 0x04\n  ZSTD = 0x05\n\n\nclass INFO:\n  NORMAL = 1\n  PERSISTENT = 2\n\n\nT_BINARY_PROTOCOL = 0\nT_COMPACT_PROTOCOL = 2\nHEADER_MAGIC = 0x0FFF0000\nPACKED_HEADER_MAGIC = pack(b'!H', HEADER_MAGIC >> 16)\nHEADER_MASK = 0xFFFF0000\nFLAGS_MASK = 0x0000FFFF\nHTTP_SERVER_MAGIC = 0x504F5354  # POST\nHTTP_CLIENT_MAGIC = 0x48545450  # HTTP\nHTTP_GET_CLIENT_MAGIC = 0x47455420  # GET\nHTTP_HEAD_CLIENT_MAGIC = 0x48454144  # HEAD\nBIG_FRAME_MAGIC = 0x42494746  # BIGF\nMAX_FRAME_SIZE = 0x3FFFFFFF\nMAX_BIG_FRAME_SIZE = 2**61 - 1\n\n\nclass THeaderTransport(TTransportBase, CReadableTransport):\n  \"\"\"Transport that sends headers.  Also understands framed/unframed/HTTP\n    transports and will do the right thing\"\"\"\n\n  __max_frame_size = MAX_FRAME_SIZE\n\n  # Defaults to current user, but there is also a setter below.\n  __identity = None\n  IDENTITY_HEADER = \"identity\"\n  ID_VERSION_HEADER = \"id_version\"\n  ID_VERSION = \"1\"\n\n  def __init__(self, trans, client_types=None, client_type=None):\n    self.__trans = trans\n    self.__rbuf = StringIO()\n    self.__rbuf_frame = False\n    self.__wbuf = StringIO()\n    self.seq_id = 0\n    self.__flags = 0\n    self.__read_transforms = []\n    self.__write_transforms = []\n    self.__supported_client_types = set(client_types or (CLIENT_TYPE.HEADER,))\n    self.__proto_id = T_COMPACT_PROTOCOL  # default to compact like c++\n    self.__client_type = client_type or CLIENT_TYPE.HEADER\n    self.__read_headers = {}\n    self.__read_persistent_headers = {}\n    self.__write_headers = {}\n    self.__write_persistent_headers = {}\n\n    self.__supported_client_types.add(self.__client_type)\n\n    # If we support unframed binary / framed binary also support compact\n    if CLIENT_TYPE.UNFRAMED_DEPRECATED in self.__supported_client_types:\n      self.__supported_client_types.add(CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED)\n    if CLIENT_TYPE.FRAMED_DEPRECATED in self.__supported_client_types:\n      self.__supported_client_types.add(CLIENT_TYPE.FRAMED_COMPACT)\n\n  def set_header_flag(self, flag):\n    self.__flags |= flag\n\n  def clear_header_flag(self, flag):\n    self.__flags &= ~flag\n\n  def header_flags(self):\n    return self.__flags\n\n  def set_max_frame_size(self, size):\n    if size > MAX_BIG_FRAME_SIZE:\n      raise TTransportException(TTransportException.INVALID_FRAME_SIZE,\n                                \"Cannot set max frame size > %s\" % MAX_BIG_FRAME_SIZE)\n    if size > MAX_FRAME_SIZE and self.__client_type != CLIENT_TYPE.HEADER:\n      raise TTransportException(\n          TTransportException.INVALID_FRAME_SIZE,\n          \"Cannot set max frame size > %s for clients other than HEADER\" % MAX_FRAME_SIZE)\n    self.__max_frame_size = size\n\n  def get_peer_identity(self):\n    if self.IDENTITY_HEADER in self.__read_headers:\n      if self.__read_headers[self.ID_VERSION_HEADER] == self.ID_VERSION:\n        return self.__read_headers[self.IDENTITY_HEADER]\n    return None\n\n  def set_identity(self, identity):\n    self.__identity = identity\n\n  def get_protocol_id(self):\n    return self.__proto_id\n\n  def set_protocol_id(self, proto_id):\n    self.__proto_id = proto_id\n\n  def set_header(self, str_key, str_value):\n    self.__write_headers[str_key] = str_value\n\n  def get_write_headers(self):\n    return self.__write_headers\n\n  def get_headers(self):\n    return self.__read_headers\n\n  def clear_headers(self):\n    self.__write_headers.clear()\n\n  def set_persistent_header(self, str_key, str_value):\n    self.__write_persistent_headers[str_key] = str_value\n\n  def get_write_persistent_headers(self):\n    return self.__write_persistent_headers\n\n  def clear_persistent_headers(self):\n    self.__write_persistent_headers.clear()\n\n  def add_transform(self, trans_id):\n    self.__write_transforms.append(trans_id)\n\n  def _reset_protocol(self):\n    # HTTP calls that are one way need to flush here.\n    if self.__client_type == CLIENT_TYPE.HTTP_SERVER:\n      self.flush()\n    # set to anything except unframed\n    self.__client_type = CLIENT_TYPE.UNKNOWN\n    # Read header bytes to check which protocol to decode\n    self.readFrame(0)\n\n  def getTransport(self):\n    return self.__trans\n\n  def isOpen(self):\n    return self.getTransport().isOpen()\n\n  def open(self):\n    return self.getTransport().open()\n\n  def close(self):\n    return self.getTransport().close()\n\n  def read(self, sz):\n    ret = self.__rbuf.read(sz)\n    if len(ret) == sz:\n      return ret\n\n    if self.__client_type in (CLIENT_TYPE.UNFRAMED_DEPRECATED,\n                              CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED):\n      return ret + self.getTransport().readAll(sz - len(ret))\n\n    self.readFrame(sz - len(ret))\n    return ret + self.__rbuf.read(sz - len(ret))\n\n  readAll = read  # TTransportBase.readAll does a needless copy here.\n\n  def readFrame(self, req_sz):\n    self.__rbuf_frame = True\n    word1 = self.getTransport().readAll(4)\n    sz = unpack(b'!I', word1)[0]\n    proto_id = word1[0] if PY3 else ord(word1[0])\n    if proto_id == BINARY_PROTO_ID:\n      # unframed\n      self.__client_type = CLIENT_TYPE.UNFRAMED_DEPRECATED\n      self.__proto_id = T_BINARY_PROTOCOL\n      if req_sz <= 4:  # check for reads < 0.\n        self.__rbuf = StringIO(word1)\n      else:\n        self.__rbuf = StringIO(word1 + self.getTransport().read(req_sz - 4))\n    elif proto_id == COMPACT_PROTO_ID:\n      self.__client_type = CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED\n      self.__proto_id = T_COMPACT_PROTOCOL\n      if req_sz <= 4:  # check for reads < 0.\n        self.__rbuf = StringIO(word1)\n      else:\n        self.__rbuf = StringIO(word1 + self.getTransport().read(req_sz - 4))\n    elif sz == HTTP_SERVER_MAGIC:\n      self.__client_type = CLIENT_TYPE.HTTP_SERVER\n      mf = self.getTransport().handle.makefile('rb', -1)\n\n      self.handler = RequestHandler(mf, 'client_address:port', '')\n      self.header = self.handler.wfile\n      self.__rbuf = StringIO(self.handler.data)\n    else:\n      if sz == BIG_FRAME_MAGIC:\n        sz = unpack(b'!Q', self.getTransport().readAll(8))[0]\n      # could be header format or framed.  Check next two bytes.\n      magic = self.getTransport().readAll(2)\n      proto_id = magic[0] if PY3 else ord(magic[0])\n      if proto_id == COMPACT_PROTO_ID:\n        self.__client_type = CLIENT_TYPE.FRAMED_COMPACT\n        self.__proto_id = T_COMPACT_PROTOCOL\n        _frame_size_check(sz, self.__max_frame_size, header=False)\n        self.__rbuf = StringIO(magic + self.getTransport().readAll(sz - 2))\n      elif proto_id == BINARY_PROTO_ID:\n        self.__client_type = CLIENT_TYPE.FRAMED_DEPRECATED\n        self.__proto_id = T_BINARY_PROTOCOL\n        _frame_size_check(sz, self.__max_frame_size, header=False)\n        self.__rbuf = StringIO(magic + self.getTransport().readAll(sz - 2))\n      elif magic == PACKED_HEADER_MAGIC:\n        self.__client_type = CLIENT_TYPE.HEADER\n        _frame_size_check(sz, self.__max_frame_size)\n        # flags(2), seq_id(4), header_size(2)\n        n_header_meta = self.getTransport().readAll(8)\n        self.__flags, self.seq_id, header_size = unpack(b'!HIH', n_header_meta)\n        data = StringIO()\n        data.write(magic)\n        data.write(n_header_meta)\n        data.write(self.getTransport().readAll(sz - 10))\n        data.seek(10)\n        self.read_header_format(sz - 10, header_size, data)\n      else:\n        self.__client_type = CLIENT_TYPE.UNKNOWN\n        raise TTransportException(TTransportException.INVALID_CLIENT_TYPE,\n                                  \"Could not detect client transport type\")\n\n    if self.__client_type not in self.__supported_client_types:\n      raise TTransportException(TTransportException.INVALID_CLIENT_TYPE,\n                                \"Client type {} not supported on server\".format(self.__client_type))\n\n  def read_header_format(self, sz, header_size, data):\n    # clear out any previous transforms\n    self.__read_transforms = []\n\n    header_size = header_size * 4\n    if header_size > sz:\n      raise TTransportException(TTransportException.INVALID_FRAME_SIZE,\n                                \"Header size is larger than frame\")\n    end_header = header_size + data.tell()\n\n    self.__proto_id = readVarint(data)\n    num_headers = readVarint(data)\n\n    if self.__proto_id == 1 and self.__client_type != \\\n            CLIENT_TYPE.HTTP_SERVER:\n      raise TTransportException(TTransportException.INVALID_CLIENT_TYPE,\n                                \"Trying to recv JSON encoding over binary\")\n\n    # Read the headers.  Data for each header varies.\n    for _ in range(0, num_headers):\n      trans_id = readVarint(data)\n      if trans_id == TRANSFORM.ZLIB:\n        self.__read_transforms.insert(0, trans_id)\n      elif trans_id == TRANSFORM.SNAPPY:\n        self.__read_transforms.insert(0, trans_id)\n      elif trans_id == TRANSFORM.HMAC:\n        raise TApplicationException(TApplicationException.INVALID_TRANSFORM,\n                                    \"Hmac transform is no longer supported: %i\" % trans_id)\n      else:\n        # TApplicationException will be sent back to client\n        raise TApplicationException(TApplicationException.INVALID_TRANSFORM,\n                                    \"Unknown transform in client request: %i\" % trans_id)\n\n    # Clear out previous info headers.\n    self.__read_headers.clear()\n\n    # Read the info headers.\n    while data.tell() < end_header:\n      info_id = readVarint(data)\n      if info_id == INFO.NORMAL:\n        _read_info_headers(data, end_header, self.__read_headers)\n      elif info_id == INFO.PERSISTENT:\n        _read_info_headers(data, end_header, self.__read_persistent_headers)\n      else:\n        break  # Unknown header.  Stop info processing.\n\n    if self.__read_persistent_headers:\n      self.__read_headers.update(self.__read_persistent_headers)\n\n    # Skip the rest of the header\n    data.seek(end_header)\n\n    payload = data.read(sz - header_size)\n\n    # Read the data section.\n    self.__rbuf = StringIO(self.untransform(payload))\n\n  def write(self, buf):\n    self.__wbuf.write(buf)\n\n  def transform(self, buf):\n    for trans_id in self.__write_transforms:\n      if trans_id == TRANSFORM.ZLIB:\n        buf = zlib.compress(buf)\n      elif trans_id == TRANSFORM.SNAPPY:\n        buf = snappy.compress(buf)\n      else:\n        raise TTransportException(TTransportException.INVALID_TRANSFORM,\n                                  \"Unknown transform during send\")\n    return buf\n\n  def untransform(self, buf):\n    for trans_id in self.__read_transforms:\n      if trans_id == TRANSFORM.ZLIB:\n        buf = zlib.decompress(buf)\n      elif trans_id == TRANSFORM.SNAPPY:\n        buf = snappy.decompress(buf)\n      if trans_id not in self.__write_transforms:\n        self.__write_transforms.append(trans_id)\n    return buf\n\n  def flush(self):\n    self.flushImpl(False)\n\n  def onewayFlush(self):\n    self.flushImpl(True)\n\n  def _flushHeaderMessage(self, buf, wout, wsz):\n    \"\"\"Write a message for CLIENT_TYPE.HEADER\n\n        @param buf(StringIO): Buffer to write message to\n        @param wout(str): Payload\n        @param wsz(int): Payload length\n        \"\"\"\n    transform_data = StringIO()\n    # For now, all transforms don't require data.\n    num_transforms = len(self.__write_transforms)\n    for trans_id in self.__write_transforms:\n      transform_data.write(getVarint(trans_id))\n\n    # Add in special flags.\n    if self.__identity:\n      self.__write_headers[self.ID_VERSION_HEADER] = self.ID_VERSION\n      self.__write_headers[self.IDENTITY_HEADER] = self.__identity\n\n    info_data = StringIO()\n\n    # Write persistent kv-headers\n    _flush_info_headers(info_data, self.get_write_persistent_headers(), INFO.PERSISTENT)\n\n    # Write non-persistent kv-headers\n    _flush_info_headers(info_data, self.__write_headers, INFO.NORMAL)\n\n    header_data = StringIO()\n    header_data.write(getVarint(self.__proto_id))\n    header_data.write(getVarint(num_transforms))\n\n    header_size = transform_data.tell() + header_data.tell() + \\\n        info_data.tell()\n\n    padding_size = 4 - (header_size % 4)\n    header_size = header_size + padding_size\n\n    # MAGIC(2) | FLAGS(2) + SEQ_ID(4) + HEADER_SIZE(2)\n    wsz += header_size + 10\n    if wsz > MAX_FRAME_SIZE:\n      buf.write(pack(b\"!I\", BIG_FRAME_MAGIC))\n      buf.write(pack(b\"!Q\", wsz))\n    else:\n      buf.write(pack(b\"!I\", wsz))\n    buf.write(pack(b\"!HH\", HEADER_MAGIC >> 16, self.__flags))\n    buf.write(pack(b\"!I\", self.seq_id))\n    buf.write(pack(b\"!H\", header_size // 4))\n\n    buf.write(header_data.getvalue())\n    buf.write(transform_data.getvalue())\n    buf.write(info_data.getvalue())\n\n    # Pad out the header with 0x00\n    for _ in range(0, padding_size, 1):\n      buf.write(pack(b\"!c\", b'\\0'))\n\n    # Send data section\n    buf.write(wout)\n\n  def flushImpl(self, oneway):\n    wout = self.__wbuf.getvalue()\n    wout = self.transform(wout)\n    wsz = len(wout)\n\n    # reset wbuf before write/flush to preserve state on underlying failure\n    self.__wbuf.seek(0)\n    self.__wbuf.truncate()\n\n    if self.__proto_id == 1 and self.__client_type != CLIENT_TYPE.HTTP_SERVER:\n      raise TTransportException(TTransportException.INVALID_CLIENT_TYPE,\n                                \"Trying to send JSON encoding over binary\")\n\n    buf = StringIO()\n    if self.__client_type == CLIENT_TYPE.HEADER:\n      self._flushHeaderMessage(buf, wout, wsz)\n    elif self.__client_type in (CLIENT_TYPE.FRAMED_DEPRECATED, CLIENT_TYPE.FRAMED_COMPACT):\n      buf.write(pack(b\"!i\", wsz))\n      buf.write(wout)\n    elif self.__client_type in (CLIENT_TYPE.UNFRAMED_DEPRECATED,\n                                CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED):\n      buf.write(wout)\n    elif self.__client_type == CLIENT_TYPE.HTTP_SERVER:\n      # Reset the client type if we sent something -\n      # oneway calls via HTTP expect a status response otherwise\n      buf.write(self.header.getvalue())\n      buf.write(wout)\n      self.__client_type == CLIENT_TYPE.HEADER\n    elif self.__client_type == CLIENT_TYPE.UNKNOWN:\n      raise TTransportException(TTransportException.INVALID_CLIENT_TYPE, \"Unknown client type\")\n\n    # We don't include the framing bytes as part of the frame size check\n    frame_size = buf.tell() - (4 if wsz < MAX_FRAME_SIZE else 12)\n    _frame_size_check(frame_size,\n                      self.__max_frame_size,\n                      header=self.__client_type == CLIENT_TYPE.HEADER)\n    self.getTransport().write(buf.getvalue())\n    if oneway:\n      self.getTransport().onewayFlush()\n    else:\n      self.getTransport().flush()\n\n  # Implement the CReadableTransport interface.\n  @property\n  def cstringio_buf(self):\n    if not self.__rbuf_frame:\n      self.readFrame(0)\n    return self.__rbuf\n\n  def cstringio_refill(self, prefix, reqlen):\n    # self.__rbuf will already be empty here because fastproto doesn't\n    # ask for a refill until the previous buffer is empty.  Therefore,\n    # we can start reading new frames immediately.\n\n    # On unframed clients, there is a chance there is something left\n    # in rbuf, and the read pointer is not advanced by fastproto\n    # so seek to the end to be safe\n    self.__rbuf.seek(0, 2)\n    while len(prefix) < reqlen:\n      prefix += self.read(reqlen)\n    self.__rbuf = StringIO(prefix)\n    return self.__rbuf\n\n\ndef _serialize_string(str_):\n  if PY3 and not isinstance(str_, bytes):\n    str_ = str_.encode()\n  return getVarint(len(str_)) + str_\n\n\ndef _flush_info_headers(info_data, write_headers, type):\n  if (len(write_headers) > 0):\n    info_data.write(getVarint(type))\n    info_data.write(getVarint(len(write_headers)))\n    write_headers_iter = write_headers.items()\n    for str_key, str_value in write_headers_iter:\n      info_data.write(_serialize_string(str_key))\n      info_data.write(_serialize_string(str_value))\n    write_headers.clear()\n\n\ndef _read_string(bufio, buflimit):\n  str_sz = readVarint(bufio)\n  if str_sz + bufio.tell() > buflimit:\n    raise TTransportException(TTransportException.INVALID_FRAME_SIZE, \"String read too big\")\n  return bufio.read(str_sz)\n\n\ndef _read_info_headers(data, end_header, read_headers):\n  num_keys = readVarint(data)\n  for _ in xrange(num_keys):\n    str_key = _read_string(data, end_header)\n    str_value = _read_string(data, end_header)\n    read_headers[str_key] = str_value\n\n\ndef _frame_size_check(sz, set_max_size, header=True):\n  if sz > set_max_size or (not header and sz > MAX_FRAME_SIZE):\n    raise TTransportException(TTransportException.INVALID_FRAME_SIZE,\n                              \"%s transport frame was too large\" % 'Header' if header else 'Framed')\n\n\nclass RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n\n  # Same as superclass function, but append 'POST' because we\n  # stripped it in the calling function.  Would be nice if\n  # we had an ungetch instead\n  def handle_one_request(self):\n    self.raw_requestline = self.rfile.readline()\n    if not self.raw_requestline:\n      self.close_connection = 1\n      return\n    self.raw_requestline = \"POST\" + self.raw_requestline\n    if not self.parse_request():\n      # An error code has been sent, just exit\n      return\n    mname = 'do_' + self.command\n    if not hasattr(self, mname):\n      self.send_error(501, \"Unsupported method (%r)\" % self.command)\n      return\n    method = getattr(self, mname)\n    method()\n\n  def setup(self):\n    self.rfile = self.request\n    self.wfile = StringIO()  # New output buffer\n\n  def finish(self):\n    if not self.rfile.closed:\n      self.rfile.close()\n    # leave wfile open for reading.\n\n  def do_POST(self):\n    if int(self.headers['Content-Length']) > 0:\n      self.data = self.rfile.read(int(self.headers['Content-Length']))\n    else:\n      self.data = \"\"\n\n    # Prepare a response header, to be sent later.\n    self.send_response(200)\n    self.send_header(\"content-type\", \"application/x-thrift\")\n    self.end_headers()\n\n\n# INFO:(zuercher): Added to simplify usage\nclass THeaderTransportFactory:\n\n  def __init__(self, proto_id):\n    self.__proto_id = proto_id\n\n  def getTransport(self, trans):\n    header_trans = THeaderTransport(trans, client_type=CLIENT_TYPE.HEADER)\n    header_trans.set_protocol_id(self.__proto_id)\n    return header_trans\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/fbthrift/__init__.py",
    "content": ""
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_library\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\nload(\"@thrift_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\npy_library(\n    name = \"finagle_lib\",\n    srcs = [\n        \"TFinagleServerProcessor.py\",\n        \"TFinagleServerProtocol.py\",\n        \"__init__.py\",\n    ],\n    deps = [\n        \"@com_github_twitter_common_finagle_thrift//:twitter_common_finagle_thrift\",\n        \"@com_github_twitter_common_rpc//:twitter_common_rpc\",\n        requirement(\"thrift\"),\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/finagle/TFinagleServerProcessor.py",
    "content": "import logging\n\nfrom thrift.Thrift import TProcessor, TMessageType, TException\nfrom thrift.protocol import TProtocolDecorator\nfrom gen.twitter.finagle.thrift.ttypes import (ConnectionOptions, UpgradeReply)\n\n# Matches twitter/common/rpc/finagle/protocol.py\nUPGRADE_METHOD = \"__can__finagle__trace__v3__\"\n\n\n# Twitter's TFinagleProcessor only works for the client side of an RPC.\nclass TFinagleServerProcessor(TProcessor):\n\n  def __init__(self, underlying):\n    self._underlying = underlying\n\n  def process(self, iprot, oprot):\n    try:\n      if iprot.upgraded() is not None:\n        return self._underlying.process(iprot, oprot)\n    except AttributeError as e:\n      logging.exception(\"underlying protocol object is not a TFinagleServerProtocol\", e)\n      return self._underlying.process(iprot, oprot)\n\n    (name, ttype, seqid) = iprot.readMessageBegin()\n    if ttype != TMessageType.CALL and ttype != TMessageType.ONEWAY:\n      raise TException(\"TFinagle protocol only supports CALL & ONEWAY\")\n\n    # Check if this is an upgrade request.\n    if name == UPGRADE_METHOD:\n      connection_options = ConnectionOptions()\n      connection_options.read(iprot)\n      iprot.readMessageEnd()\n\n      oprot.writeMessageBegin(UPGRADE_METHOD, TMessageType.REPLY, seqid)\n      upgrade_reply = UpgradeReply()\n      upgrade_reply.write(oprot)\n      oprot.writeMessageEnd()\n      oprot.trans.flush()\n\n      iprot.set_upgraded(True)\n      oprot.set_upgraded(True)\n      return True\n\n    # Not upgraded. Replay the message begin to the underlying processor.\n    iprot.set_upgraded(False)\n    oprot.set_upgraded(False)\n    msg = (name, ttype, seqid)\n    return self._underlying.process(StoredMessageProtocol(iprot, msg), oprot)\n\n\nclass StoredMessageProtocol(TProtocolDecorator.TProtocolDecorator):\n\n  def __init__(self, protocol, messageBegin):\n    TProtocolDecorator.TProtocolDecorator.__init__(self, protocol)\n    self.messageBegin = messageBegin\n\n  def readMessageBegin(self):\n    return self.messageBegin\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/finagle/TFinagleServerProtocol.py",
    "content": "from thrift.protocol import TBinaryProtocol\nfrom gen.twitter.finagle.thrift.ttypes import (RequestHeader, ResponseHeader)\n\n\nclass TFinagleServerProtocolFactory(object):\n\n  def getProtocol(self, trans):\n    return TFinagleServerProtocol(trans)\n\n\nclass TFinagleServerProtocol(TBinaryProtocol.TBinaryProtocol):\n\n  def __init__(self, *args, **kw):\n    self._last_request = None\n    self._upgraded = None\n    TBinaryProtocol.TBinaryProtocol.__init__(self, *args, **kw)\n\n  def upgraded(self):\n    return self._upgraded\n\n  def set_upgraded(self, upgraded):\n    self._upgraded = upgraded\n\n  def writeMessageBegin(self, *args, **kwargs):\n    if self._upgraded:\n      header = ResponseHeader()  # .. TODO set some fields\n      header.write(self)\n    return TBinaryProtocol.TBinaryProtocol.writeMessageBegin(self, *args, **kwargs)\n\n  def readMessageBegin(self, *args, **kwargs):\n    if self._upgraded:\n      header = RequestHeader()\n      header.read(self)\n      self._last_request = header\n    return TBinaryProtocol.TBinaryProtocol.readMessageBegin(self, *args, **kwargs)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/finagle/__init__.py",
    "content": ""
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generate_bindings.sh",
    "content": "#!/bin/bash\n\n# Generates the thrift bindings for example.thrift. Requires that\n# apache-thrift's thrift generator is installed and on the path.\n\nDIR=$(cd \"$(dirname \"$0\")\" && pwd)\ncd \"${DIR}\" || exit 1\n\nthrift --gen py --out ./generated example.thrift\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh",
    "content": "#!/bin/bash\n\n# Generates request and response fixtures for integration tests.\n\n# Usage: generate_fixture.sh <transport> <protocol> -s [multiplex-service] -H [headers] method [param...]\n\nset -e\n\nfunction usage() {\n    echo \"Usage: $0 <mode> <transport> <protocol> -s [multiplex-service] -H [headers] method [param...]\"\n    echo \"where mode is success, exception, or idl-exception\"\n    exit 1\n}\n\nFIXTURE_DIR=\"${TEST_TMPDIR}\"\nmkdir -p \"${FIXTURE_DIR}\"\n\nDRIVER_DIR=\"${TEST_SRCDIR}/envoy/test/extensions/filters/network/thrift_proxy/driver\"\n\nif [[ -z \"${TEST_UDSDIR}\" ]]; then\n    TEST_UDSDIR=$(mktemp -d /tmp/envoy_test_thrift.XXXXXX)\nfi\n\nMODE=\"$1\"\nTRANSPORT=\"$2\"\nPROTOCOL=\"$3\"\n\nif ! shift 3; then\n    usage\nfi\n\nif [[ -z \"${MODE}\" || -z \"${TRANSPORT}\" || -z \"${PROTOCOL}\" ]]; then\n    usage\nfi\n\nMULTIPLEX=\nHEADERS=\nwhile getopts \":s:H:\" opt; do\n    case ${opt} in\n        s)\n            MULTIPLEX=$OPTARG\n            ;;\n        H)\n            HEADERS=$OPTARG\n            ;;\n\n        \\?)\n            echo \"Invalid Option: -$OPTARG\" >&2\n            exit 1\n            ;;\n        :)\n            echo \"Invalid Option: -$OPTARG requires an argument\" >&2\n            exit 1\n            ;;\n    esac\ndone\nshift $((OPTIND -1))\n\nMETHOD=\"$1\"\nif [[ \"${METHOD}\" == \"\" ]]; then\n    usage\nfi\nshift\n\nSOCKET=\"${TEST_UDSDIR}/fixture.sock\"\nrm -f \"${SOCKET}\"\n\nSERVICE_FLAGS=(\"--addr\" \"${SOCKET}\"\n               \"--unix\"\n               \"--response\" \"${MODE}\"\n               \"--transport\" \"${TRANSPORT}\"\n               \"--protocol\" \"${PROTOCOL}\")\n\nif [[ -n \"$MULTIPLEX\" ]]; then\n    SERVICE_FLAGS+=(\"--multiplex\")\n    SERVICE_FLAGS+=(\"${MULTIPLEX}\")\n\n    REQUEST_FILE=\"${FIXTURE_DIR}/${TRANSPORT}-${PROTOCOL}-${MULTIPLEX}-${MODE}.request\"\n    RESPONSE_FILE=\"${FIXTURE_DIR}/${TRANSPORT}-${PROTOCOL}-${MULTIPLEX}-${MODE}.response\"\nelse\n    REQUEST_FILE=\"${FIXTURE_DIR}/${TRANSPORT}-${PROTOCOL}-${MODE}.request\"\n    RESPONSE_FILE=\"${FIXTURE_DIR}/${TRANSPORT}-${PROTOCOL}-${MODE}.response\"\nfi\n\n# start server\n\"${DRIVER_DIR}/server\" \"${SERVICE_FLAGS[@]}\" &\nSERVER_PID=\"$!\"\n\ntrap 'kill ${SERVER_PID}' EXIT;\n\nwhile [[ ! -a \"${SOCKET}\" ]]; do\n    sleep 0.1\n\n    if ! kill -0 \"${SERVER_PID}\"; then\n        echo \"server failed to start\"\n        exit 1\n    fi\ndone\n\nif [[ -n \"$HEADERS\" ]]; then\n    SERVICE_FLAGS+=(\"--headers\")\n    SERVICE_FLAGS+=(\"$HEADERS\")\nfi\n\n\"${DRIVER_DIR}/client\" \"${SERVICE_FLAGS[@]}\" \\\n                       --request \"${REQUEST_FILE}\" \\\n                       --response \"${RESPONSE_FILE}\" \\\n                       \"${METHOD}\" \"$@\"\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generated/__init__.py",
    "content": ""
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_library\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\nload(\"@thrift_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\npy_library(\n    name = \"example_lib\",\n    srcs = [\n        \"Example.py\",\n        \"__init__.py\",\n        \"constants.py\",\n        \"ttypes.py\",\n    ],\n    deps = [\n        requirement(\"thrift\"),\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generated/example/Example-remote",
    "content": "#!/usr/bin/env python\n#\n# Autogenerated by Thrift Compiler (0.11.0)\n#\n# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n#\n#  options string: py\n#\n\nimport sys\nimport pprint\nif sys.version_info[0] > 2:\n    from urllib.parse import urlparse\nelse:\n    from urlparse import urlparse\nfrom thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient\nfrom thrift.protocol.TBinaryProtocol import TBinaryProtocol\n\nfrom example import Example\nfrom example.ttypes import *\n\nif len(sys.argv) <= 1 or sys.argv[1] == '--help':\n    print('')\n    print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] [-novalidate] [-ca_certs certs] [-keyfile keyfile] [-certfile certfile] function [arg1 [arg2...]]')\n    print('')\n    print('Functions:')\n    print('  void ping()')\n    print('  void poke()')\n    print('  i32 add(i32 a, i32 b)')\n    print('  Result execute(Param input)')\n    print('')\n    sys.exit(0)\n\npp = pprint.PrettyPrinter(indent=2)\nhost = 'localhost'\nport = 9090\nuri = ''\nframed = False\nssl = False\nvalidate = True\nca_certs = None\nkeyfile = None\ncertfile = None\nhttp = False\nargi = 1\n\nif sys.argv[argi] == '-h':\n    parts = sys.argv[argi + 1].split(':')\n    host = parts[0]\n    if len(parts) > 1:\n        port = int(parts[1])\n    argi += 2\n\nif sys.argv[argi] == '-u':\n    url = urlparse(sys.argv[argi + 1])\n    parts = url[1].split(':')\n    host = parts[0]\n    if len(parts) > 1:\n        port = int(parts[1])\n    else:\n        port = 80\n    uri = url[2]\n    if url[4]:\n        uri += '?%s' % url[4]\n    http = True\n    argi += 2\n\nif sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':\n    framed = True\n    argi += 1\n\nif sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':\n    ssl = True\n    argi += 1\n\nif sys.argv[argi] == '-novalidate':\n    validate = False\n    argi += 1\n\nif sys.argv[argi] == '-ca_certs':\n    ca_certs = sys.argv[argi+1]\n    argi += 2\n\nif sys.argv[argi] == '-keyfile':\n    keyfile = sys.argv[argi+1]\n    argi += 2\n\nif sys.argv[argi] == '-certfile':\n    certfile = sys.argv[argi+1]\n    argi += 2\n\ncmd = sys.argv[argi]\nargs = sys.argv[argi + 1:]\n\nif http:\n    transport = THttpClient.THttpClient(host, port, uri)\nelse:\n    if ssl:\n        socket = TSSLSocket.TSSLSocket(host, port, validate=validate, ca_certs=ca_certs, keyfile=keyfile, certfile=certfile)\n    else:\n        socket = TSocket.TSocket(host, port)\n    if framed:\n        transport = TTransport.TFramedTransport(socket)\n    else:\n        transport = TTransport.TBufferedTransport(socket)\nprotocol = TBinaryProtocol(transport)\nclient = Example.Client(protocol)\ntransport.open()\n\nif cmd == 'ping':\n    if len(args) != 0:\n        print('ping requires 0 args')\n        sys.exit(1)\n    pp.pprint(client.ping())\n\nelif cmd == 'poke':\n    if len(args) != 0:\n        print('poke requires 0 args')\n        sys.exit(1)\n    pp.pprint(client.poke())\n\nelif cmd == 'add':\n    if len(args) != 2:\n        print('add requires 2 args')\n        sys.exit(1)\n    pp.pprint(client.add(eval(args[0]), eval(args[1]),))\n\nelif cmd == 'execute':\n    if len(args) != 1:\n        print('execute requires 1 args')\n        sys.exit(1)\n    pp.pprint(client.execute(eval(args[0]),))\n\nelse:\n    print('Unrecognized method %s' % cmd)\n    sys.exit(1)\n\ntransport.close()\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generated/example/Example.py",
    "content": "#\n# Autogenerated by Thrift Compiler (0.11.0)\n#\n# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n#\n#  options string: py\n#\n\nfrom thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException\nfrom thrift.protocol.TProtocol import TProtocolException\nfrom thrift.TRecursive import fix_spec\n\nimport sys\nimport logging\nfrom .ttypes import *\nfrom thrift.Thrift import TProcessor\nfrom thrift.transport import TTransport\nall_structs = []\n\n\nclass Iface(object):\n    def ping(self):\n        pass\n\n    def poke(self):\n        pass\n\n    def add(self, a, b):\n        \"\"\"\n        Parameters:\n         - a\n         - b\n        \"\"\"\n        pass\n\n    def execute(self, input):\n        \"\"\"\n        Parameters:\n         - input\n        \"\"\"\n        pass\n\n\nclass Client(Iface):\n    def __init__(self, iprot, oprot=None):\n        self._iprot = self._oprot = iprot\n        if oprot is not None:\n            self._oprot = oprot\n        self._seqid = 0\n\n    def ping(self):\n        self.send_ping()\n        self.recv_ping()\n\n    def send_ping(self):\n        self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)\n        args = ping_args()\n        args.write(self._oprot)\n        self._oprot.writeMessageEnd()\n        self._oprot.trans.flush()\n\n    def recv_ping(self):\n        iprot = self._iprot\n        (fname, mtype, rseqid) = iprot.readMessageBegin()\n        if mtype == TMessageType.EXCEPTION:\n            x = TApplicationException()\n            x.read(iprot)\n            iprot.readMessageEnd()\n            raise x\n        result = ping_result()\n        result.read(iprot)\n        iprot.readMessageEnd()\n        return\n\n    def poke(self):\n        self.send_poke()\n\n    def send_poke(self):\n        self._oprot.writeMessageBegin('poke', TMessageType.ONEWAY, self._seqid)\n        args = poke_args()\n        args.write(self._oprot)\n        self._oprot.writeMessageEnd()\n        self._oprot.trans.flush()\n\n    def add(self, a, b):\n        \"\"\"\n        Parameters:\n         - a\n         - b\n        \"\"\"\n        self.send_add(a, b)\n        return self.recv_add()\n\n    def send_add(self, a, b):\n        self._oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)\n        args = add_args()\n        args.a = a\n        args.b = b\n        args.write(self._oprot)\n        self._oprot.writeMessageEnd()\n        self._oprot.trans.flush()\n\n    def recv_add(self):\n        iprot = self._iprot\n        (fname, mtype, rseqid) = iprot.readMessageBegin()\n        if mtype == TMessageType.EXCEPTION:\n            x = TApplicationException()\n            x.read(iprot)\n            iprot.readMessageEnd()\n            raise x\n        result = add_result()\n        result.read(iprot)\n        iprot.readMessageEnd()\n        if result.success is not None:\n            return result.success\n        raise TApplicationException(TApplicationException.MISSING_RESULT, \"add failed: unknown result\")\n\n    def execute(self, input):\n        \"\"\"\n        Parameters:\n         - input\n        \"\"\"\n        self.send_execute(input)\n        return self.recv_execute()\n\n    def send_execute(self, input):\n        self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)\n        args = execute_args()\n        args.input = input\n        args.write(self._oprot)\n        self._oprot.writeMessageEnd()\n        self._oprot.trans.flush()\n\n    def recv_execute(self):\n        iprot = self._iprot\n        (fname, mtype, rseqid) = iprot.readMessageBegin()\n        if mtype == TMessageType.EXCEPTION:\n            x = TApplicationException()\n            x.read(iprot)\n            iprot.readMessageEnd()\n            raise x\n        result = execute_result()\n        result.read(iprot)\n        iprot.readMessageEnd()\n        if result.success is not None:\n            return result.success\n        if result.appex is not None:\n            raise result.appex\n        raise TApplicationException(TApplicationException.MISSING_RESULT, \"execute failed: unknown result\")\n\n\nclass Processor(Iface, TProcessor):\n    def __init__(self, handler):\n        self._handler = handler\n        self._processMap = {}\n        self._processMap[\"ping\"] = Processor.process_ping\n        self._processMap[\"poke\"] = Processor.process_poke\n        self._processMap[\"add\"] = Processor.process_add\n        self._processMap[\"execute\"] = Processor.process_execute\n\n    def process(self, iprot, oprot):\n        (name, type, seqid) = iprot.readMessageBegin()\n        if name not in self._processMap:\n            iprot.skip(TType.STRUCT)\n            iprot.readMessageEnd()\n            x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))\n            oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)\n            x.write(oprot)\n            oprot.writeMessageEnd()\n            oprot.trans.flush()\n            return\n        else:\n            self._processMap[name](self, seqid, iprot, oprot)\n        return True\n\n    def process_ping(self, seqid, iprot, oprot):\n        args = ping_args()\n        args.read(iprot)\n        iprot.readMessageEnd()\n        result = ping_result()\n        try:\n            self._handler.ping()\n            msg_type = TMessageType.REPLY\n        except TTransport.TTransportException:\n            raise\n        except TApplicationException as ex:\n            logging.exception('TApplication exception in handler')\n            msg_type = TMessageType.EXCEPTION\n            result = ex\n        except Exception:\n            logging.exception('Unexpected exception in handler')\n            msg_type = TMessageType.EXCEPTION\n            result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')\n        oprot.writeMessageBegin(\"ping\", msg_type, seqid)\n        result.write(oprot)\n        oprot.writeMessageEnd()\n        oprot.trans.flush()\n\n    def process_poke(self, seqid, iprot, oprot):\n        args = poke_args()\n        args.read(iprot)\n        iprot.readMessageEnd()\n        try:\n            self._handler.poke()\n        except TTransport.TTransportException:\n            raise\n        except Exception:\n            logging.exception('Exception in oneway handler')\n\n    def process_add(self, seqid, iprot, oprot):\n        args = add_args()\n        args.read(iprot)\n        iprot.readMessageEnd()\n        result = add_result()\n        try:\n            result.success = self._handler.add(args.a, args.b)\n            msg_type = TMessageType.REPLY\n        except TTransport.TTransportException:\n            raise\n        except TApplicationException as ex:\n            logging.exception('TApplication exception in handler')\n            msg_type = TMessageType.EXCEPTION\n            result = ex\n        except Exception:\n            logging.exception('Unexpected exception in handler')\n            msg_type = TMessageType.EXCEPTION\n            result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')\n        oprot.writeMessageBegin(\"add\", msg_type, seqid)\n        result.write(oprot)\n        oprot.writeMessageEnd()\n        oprot.trans.flush()\n\n    def process_execute(self, seqid, iprot, oprot):\n        args = execute_args()\n        args.read(iprot)\n        iprot.readMessageEnd()\n        result = execute_result()\n        try:\n            result.success = self._handler.execute(args.input)\n            msg_type = TMessageType.REPLY\n        except TTransport.TTransportException:\n            raise\n        except AppException as appex:\n            msg_type = TMessageType.REPLY\n            result.appex = appex\n        except TApplicationException as ex:\n            logging.exception('TApplication exception in handler')\n            msg_type = TMessageType.EXCEPTION\n            result = ex\n        except Exception:\n            logging.exception('Unexpected exception in handler')\n            msg_type = TMessageType.EXCEPTION\n            result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')\n        oprot.writeMessageBegin(\"execute\", msg_type, seqid)\n        result.write(oprot)\n        oprot.writeMessageEnd()\n        oprot.trans.flush()\n\n# HELPER FUNCTIONS AND STRUCTURES\n\n\nclass ping_args(object):\n\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('ping_args')\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(ping_args)\nping_args.thrift_spec = (\n)\n\n\nclass ping_result(object):\n\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('ping_result')\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(ping_result)\nping_result.thrift_spec = (\n)\n\n\nclass poke_args(object):\n\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('poke_args')\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(poke_args)\npoke_args.thrift_spec = (\n)\n\n\nclass add_args(object):\n    \"\"\"\n    Attributes:\n     - a\n     - b\n    \"\"\"\n\n\n    def __init__(self, a=None, b=None,):\n        self.a = a\n        self.b = b\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 1:\n                if ftype == TType.I32:\n                    self.a = iprot.readI32()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 2:\n                if ftype == TType.I32:\n                    self.b = iprot.readI32()\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('add_args')\n        if self.a is not None:\n            oprot.writeFieldBegin('a', TType.I32, 1)\n            oprot.writeI32(self.a)\n            oprot.writeFieldEnd()\n        if self.b is not None:\n            oprot.writeFieldBegin('b', TType.I32, 2)\n            oprot.writeI32(self.b)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(add_args)\nadd_args.thrift_spec = (\n    None,  # 0\n    (1, TType.I32, 'a', None, None, ),  # 1\n    (2, TType.I32, 'b', None, None, ),  # 2\n)\n\n\nclass add_result(object):\n    \"\"\"\n    Attributes:\n     - success\n    \"\"\"\n\n\n    def __init__(self, success=None,):\n        self.success = success\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 0:\n                if ftype == TType.I32:\n                    self.success = iprot.readI32()\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('add_result')\n        if self.success is not None:\n            oprot.writeFieldBegin('success', TType.I32, 0)\n            oprot.writeI32(self.success)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(add_result)\nadd_result.thrift_spec = (\n    (0, TType.I32, 'success', None, None, ),  # 0\n)\n\n\nclass execute_args(object):\n    \"\"\"\n    Attributes:\n     - input\n    \"\"\"\n\n\n    def __init__(self, input=None,):\n        self.input = input\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 1:\n                if ftype == TType.STRUCT:\n                    self.input = Param()\n                    self.input.read(iprot)\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('execute_args')\n        if self.input is not None:\n            oprot.writeFieldBegin('input', TType.STRUCT, 1)\n            self.input.write(oprot)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(execute_args)\nexecute_args.thrift_spec = (\n    None,  # 0\n    (1, TType.STRUCT, 'input', [Param, None], None, ),  # 1\n)\n\n\nclass execute_result(object):\n    \"\"\"\n    Attributes:\n     - success\n     - appex\n    \"\"\"\n\n\n    def __init__(self, success=None, appex=None,):\n        self.success = success\n        self.appex = appex\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 0:\n                if ftype == TType.STRUCT:\n                    self.success = Result()\n                    self.success.read(iprot)\n                else:\n                    iprot.skip(ftype)\n            elif fid == 1:\n                if ftype == TType.STRUCT:\n                    self.appex = AppException()\n                    self.appex.read(iprot)\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('execute_result')\n        if self.success is not None:\n            oprot.writeFieldBegin('success', TType.STRUCT, 0)\n            self.success.write(oprot)\n            oprot.writeFieldEnd()\n        if self.appex is not None:\n            oprot.writeFieldBegin('appex', TType.STRUCT, 1)\n            self.appex.write(oprot)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(execute_result)\nexecute_result.thrift_spec = (\n    (0, TType.STRUCT, 'success', [Result, None], None, ),  # 0\n    (1, TType.STRUCT, 'appex', [AppException, None], None, ),  # 1\n)\nfix_spec(all_structs)\ndel all_structs\n\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generated/example/__init__.py",
    "content": "__all__ = ['ttypes', 'constants', 'Example']\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generated/example/constants.py",
    "content": "#\n# Autogenerated by Thrift Compiler (0.11.0)\n#\n# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n#\n#  options string: py\n#\n\nfrom thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException\nfrom thrift.protocol.TProtocol import TProtocolException\nfrom thrift.TRecursive import fix_spec\n\nimport sys\nfrom .ttypes import *\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/generated/example/ttypes.py",
    "content": "#\n# Autogenerated by Thrift Compiler (0.11.0)\n#\n# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n#\n#  options string: py\n#\n\nfrom thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException\nfrom thrift.protocol.TProtocol import TProtocolException\nfrom thrift.TRecursive import fix_spec\n\nimport sys\n\nfrom thrift.transport import TTransport\nall_structs = []\n\n\nclass TheWorks(object):\n    \"\"\"\n    Attributes:\n     - field_1\n     - field_2\n     - field_3\n     - field_4\n     - field_5\n     - field_6\n     - field_7\n     - field_8\n     - field_9\n     - field_10\n     - field_11\n     - field_12\n    \"\"\"\n\n\n    def __init__(self, field_1=None, field_2=None, field_3=None, field_4=None, field_5=None, field_6=None, field_7=None, field_8=None, field_9=None, field_10=None, field_11=None, field_12=None,):\n        self.field_1 = field_1\n        self.field_2 = field_2\n        self.field_3 = field_3\n        self.field_4 = field_4\n        self.field_5 = field_5\n        self.field_6 = field_6\n        self.field_7 = field_7\n        self.field_8 = field_8\n        self.field_9 = field_9\n        self.field_10 = field_10\n        self.field_11 = field_11\n        self.field_12 = field_12\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 1:\n                if ftype == TType.BOOL:\n                    self.field_1 = iprot.readBool()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 2:\n                if ftype == TType.BYTE:\n                    self.field_2 = iprot.readByte()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 3:\n                if ftype == TType.I16:\n                    self.field_3 = iprot.readI16()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 4:\n                if ftype == TType.I32:\n                    self.field_4 = iprot.readI32()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 5:\n                if ftype == TType.I64:\n                    self.field_5 = iprot.readI64()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 6:\n                if ftype == TType.DOUBLE:\n                    self.field_6 = iprot.readDouble()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 7:\n                if ftype == TType.STRING:\n                    self.field_7 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 8:\n                if ftype == TType.STRING:\n                    self.field_8 = iprot.readBinary()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 9:\n                if ftype == TType.MAP:\n                    self.field_9 = {}\n                    (_ktype1, _vtype2, _size0) = iprot.readMapBegin()\n                    for _i4 in range(_size0):\n                        _key5 = iprot.readI32()\n                        _val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()\n                        self.field_9[_key5] = _val6\n                    iprot.readMapEnd()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 10:\n                if ftype == TType.LIST:\n                    self.field_10 = []\n                    (_etype10, _size7) = iprot.readListBegin()\n                    for _i11 in range(_size7):\n                        _elem12 = iprot.readI32()\n                        self.field_10.append(_elem12)\n                    iprot.readListEnd()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 11:\n                if ftype == TType.SET:\n                    self.field_11 = set()\n                    (_etype16, _size13) = iprot.readSetBegin()\n                    for _i17 in range(_size13):\n                        _elem18 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()\n                        self.field_11.add(_elem18)\n                    iprot.readSetEnd()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 12:\n                if ftype == TType.BOOL:\n                    self.field_12 = iprot.readBool()\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('TheWorks')\n        if self.field_1 is not None:\n            oprot.writeFieldBegin('field_1', TType.BOOL, 1)\n            oprot.writeBool(self.field_1)\n            oprot.writeFieldEnd()\n        if self.field_2 is not None:\n            oprot.writeFieldBegin('field_2', TType.BYTE, 2)\n            oprot.writeByte(self.field_2)\n            oprot.writeFieldEnd()\n        if self.field_3 is not None:\n            oprot.writeFieldBegin('field_3', TType.I16, 3)\n            oprot.writeI16(self.field_3)\n            oprot.writeFieldEnd()\n        if self.field_4 is not None:\n            oprot.writeFieldBegin('field_4', TType.I32, 4)\n            oprot.writeI32(self.field_4)\n            oprot.writeFieldEnd()\n        if self.field_5 is not None:\n            oprot.writeFieldBegin('field_5', TType.I64, 5)\n            oprot.writeI64(self.field_5)\n            oprot.writeFieldEnd()\n        if self.field_6 is not None:\n            oprot.writeFieldBegin('field_6', TType.DOUBLE, 6)\n            oprot.writeDouble(self.field_6)\n            oprot.writeFieldEnd()\n        if self.field_7 is not None:\n            oprot.writeFieldBegin('field_7', TType.STRING, 7)\n            oprot.writeString(self.field_7.encode('utf-8') if sys.version_info[0] == 2 else self.field_7)\n            oprot.writeFieldEnd()\n        if self.field_8 is not None:\n            oprot.writeFieldBegin('field_8', TType.STRING, 8)\n            oprot.writeBinary(self.field_8)\n            oprot.writeFieldEnd()\n        if self.field_9 is not None:\n            oprot.writeFieldBegin('field_9', TType.MAP, 9)\n            oprot.writeMapBegin(TType.I32, TType.STRING, len(self.field_9))\n            for kiter19, viter20 in self.field_9.items():\n                oprot.writeI32(kiter19)\n                oprot.writeString(viter20.encode('utf-8') if sys.version_info[0] == 2 else viter20)\n            oprot.writeMapEnd()\n            oprot.writeFieldEnd()\n        if self.field_10 is not None:\n            oprot.writeFieldBegin('field_10', TType.LIST, 10)\n            oprot.writeListBegin(TType.I32, len(self.field_10))\n            for iter21 in self.field_10:\n                oprot.writeI32(iter21)\n            oprot.writeListEnd()\n            oprot.writeFieldEnd()\n        if self.field_11 is not None:\n            oprot.writeFieldBegin('field_11', TType.SET, 11)\n            oprot.writeSetBegin(TType.STRING, len(self.field_11))\n            for iter22 in self.field_11:\n                oprot.writeString(iter22.encode('utf-8') if sys.version_info[0] == 2 else iter22)\n            oprot.writeSetEnd()\n            oprot.writeFieldEnd()\n        if self.field_12 is not None:\n            oprot.writeFieldBegin('field_12', TType.BOOL, 12)\n            oprot.writeBool(self.field_12)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\n\n\nclass Param(object):\n    \"\"\"\n    Attributes:\n     - return_fields\n     - the_works\n    \"\"\"\n\n\n    def __init__(self, return_fields=None, the_works=None,):\n        self.return_fields = return_fields\n        self.the_works = the_works\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 1:\n                if ftype == TType.LIST:\n                    self.return_fields = []\n                    (_etype26, _size23) = iprot.readListBegin()\n                    for _i27 in range(_size23):\n                        _elem28 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()\n                        self.return_fields.append(_elem28)\n                    iprot.readListEnd()\n                else:\n                    iprot.skip(ftype)\n            elif fid == 2:\n                if ftype == TType.STRUCT:\n                    self.the_works = TheWorks()\n                    self.the_works.read(iprot)\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('Param')\n        if self.return_fields is not None:\n            oprot.writeFieldBegin('return_fields', TType.LIST, 1)\n            oprot.writeListBegin(TType.STRING, len(self.return_fields))\n            for iter29 in self.return_fields:\n                oprot.writeString(iter29.encode('utf-8') if sys.version_info[0] == 2 else iter29)\n            oprot.writeListEnd()\n            oprot.writeFieldEnd()\n        if self.the_works is not None:\n            oprot.writeFieldBegin('the_works', TType.STRUCT, 2)\n            self.the_works.write(oprot)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\n\n\nclass Result(object):\n    \"\"\"\n    Attributes:\n     - the_works\n    \"\"\"\n\n\n    def __init__(self, the_works=None,):\n        self.the_works = the_works\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 1:\n                if ftype == TType.STRUCT:\n                    self.the_works = TheWorks()\n                    self.the_works.read(iprot)\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('Result')\n        if self.the_works is not None:\n            oprot.writeFieldBegin('the_works', TType.STRUCT, 1)\n            self.the_works.write(oprot)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\n\n\nclass AppException(TException):\n    \"\"\"\n    Attributes:\n     - why\n    \"\"\"\n\n\n    def __init__(self, why=None,):\n        self.why = why\n\n    def read(self, iprot):\n        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:\n            iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])\n            return\n        iprot.readStructBegin()\n        while True:\n            (fname, ftype, fid) = iprot.readFieldBegin()\n            if ftype == TType.STOP:\n                break\n            if fid == 1:\n                if ftype == TType.STRING:\n                    self.why = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()\n                else:\n                    iprot.skip(ftype)\n            else:\n                iprot.skip(ftype)\n            iprot.readFieldEnd()\n        iprot.readStructEnd()\n\n    def write(self, oprot):\n        if oprot._fast_encode is not None and self.thrift_spec is not None:\n            oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))\n            return\n        oprot.writeStructBegin('AppException')\n        if self.why is not None:\n            oprot.writeFieldBegin('why', TType.STRING, 1)\n            oprot.writeString(self.why.encode('utf-8') if sys.version_info[0] == 2 else self.why)\n            oprot.writeFieldEnd()\n        oprot.writeFieldStop()\n        oprot.writeStructEnd()\n\n    def validate(self):\n        return\n\n    def __str__(self):\n        return repr(self)\n\n    def __repr__(self):\n        L = ['%s=%r' % (key, value)\n             for key, value in self.__dict__.items()]\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__\n\n    def __ne__(self, other):\n        return not (self == other)\nall_structs.append(TheWorks)\nTheWorks.thrift_spec = (\n    None,  # 0\n    (1, TType.BOOL, 'field_1', None, None, ),  # 1\n    (2, TType.BYTE, 'field_2', None, None, ),  # 2\n    (3, TType.I16, 'field_3', None, None, ),  # 3\n    (4, TType.I32, 'field_4', None, None, ),  # 4\n    (5, TType.I64, 'field_5', None, None, ),  # 5\n    (6, TType.DOUBLE, 'field_6', None, None, ),  # 6\n    (7, TType.STRING, 'field_7', 'UTF8', None, ),  # 7\n    (8, TType.STRING, 'field_8', 'BINARY', None, ),  # 8\n    (9, TType.MAP, 'field_9', (TType.I32, None, TType.STRING, 'UTF8', False), None, ),  # 9\n    (10, TType.LIST, 'field_10', (TType.I32, None, False), None, ),  # 10\n    (11, TType.SET, 'field_11', (TType.STRING, 'UTF8', False), None, ),  # 11\n    (12, TType.BOOL, 'field_12', None, None, ),  # 12\n)\nall_structs.append(Param)\nParam.thrift_spec = (\n    None,  # 0\n    (1, TType.LIST, 'return_fields', (TType.STRING, 'UTF8', False), None, ),  # 1\n    (2, TType.STRUCT, 'the_works', [TheWorks, None], None, ),  # 2\n)\nall_structs.append(Result)\nResult.thrift_spec = (\n    None,  # 0\n    (1, TType.STRUCT, 'the_works', [TheWorks, None], None, ),  # 1\n)\nall_structs.append(AppException)\nAppException.thrift_spec = (\n    None,  # 0\n    (1, TType.STRING, 'why', 'UTF8', None, ),  # 1\n)\nfix_spec(all_structs)\ndel all_structs\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/driver/server.py",
    "content": "#!/usr/bin/env python\n\nimport argparse\nimport logging\nimport sys\n\nfrom generated.example import Example\nfrom generated.example.ttypes import (Result, TheWorks, AppException)\n\nfrom thrift import Thrift, TMultiplexedProcessor\nfrom thrift.protocol import TBinaryProtocol, TCompactProtocol, TJSONProtocol\nfrom thrift.server import TServer\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom fbthrift import THeaderTransport\nfrom finagle import TFinagleServerProcessor, TFinagleServerProtocol\n\n\nclass SuccessHandler:\n\n  def ping(self):\n    print(\"server: ping()\")\n\n  def poke(self):\n    print(\"server: poke()\")\n\n  def add(self, a, b):\n    result = a + b\n    print(\"server: add({0}, {1}) = {2}\".format(a, b, result))\n    return result\n\n  def execute(self, param):\n    print(\"server: execute({0})\".format(param))\n    if \"all\" in param.return_fields:\n      return Result(param.the_works)\n    elif \"none\" in param.return_fields:\n      return Result(TheWorks())\n    the_works = TheWorks()\n    for field, value in vars(param.the_works).items():\n      if field in param.return_fields:\n        setattr(the_works, field, value)\n    return Result(the_works)\n\n\nclass IDLExceptionHandler:\n\n  def ping(self):\n    print(\"server: ping()\")\n\n  def poke(self):\n    print(\"server: poke()\")\n\n  def add(self, a, b):\n    result = a + b\n    print(\"server: add({0}, {1}) = {2}\".format(a, b, result))\n    return result\n\n  def execute(self, param):\n    print(\"server: app error: execute failed\")\n    raise AppException(\"execute failed\")\n\n\nclass ExceptionHandler:\n\n  def ping(self):\n    print(\"server: ping failure\")\n    raise Thrift.TApplicationException(\n        type=Thrift.TApplicationException.INTERNAL_ERROR,\n        message=\"for ping\",\n    )\n\n  def poke(self):\n    print(\"server: poke failure\")\n    raise Thrift.TApplicationException(\n        type=Thrift.TApplicationException.INTERNAL_ERROR,\n        message=\"for poke\",\n    )\n\n  def add(self, a, b):\n    print(\"server: add failure\")\n    raise Thrift.TApplicationException(\n        type=Thrift.TApplicationException.INTERNAL_ERROR,\n        message=\"for add\",\n    )\n\n  def execute(self, param):\n    print(\"server: execute failure\")\n    raise Thrift.TApplicationException(\n        type=Thrift.TApplicationException.INTERNAL_ERROR,\n        message=\"for execute\",\n    )\n\n\ndef main(cfg):\n  if cfg.unix:\n    if cfg.addr == \"\":\n      sys.exit(\"invalid listener unix domain socket: {}\".format(cfg.addr))\n  else:\n    try:\n      (host, port) = cfg.addr.rsplit(\":\", 1)\n      port = int(port)\n    except ValueError:\n      sys.exit(\"invalid listener address: {}\".format(cfg.addr))\n\n  if cfg.response == \"success\":\n    handler = SuccessHandler()\n  elif cfg.response == \"idl-exception\":\n    handler = IDLExceptionHandler()\n  elif cfg.response == \"exception\":\n    # squelch traceback for the exception we throw\n    logging.getLogger().setLevel(logging.CRITICAL)\n    handler = ExceptionHandler()\n  else:\n    sys.exit(\"unknown server response mode {0}\".format(cfg.response))\n\n  processor = Example.Processor(handler)\n  if cfg.service is not None:\n    # wrap processor with multiplexor\n    multi = TMultiplexedProcessor.TMultiplexedProcessor()\n    multi.registerProcessor(cfg.service, processor)\n    processor = multi\n\n  if cfg.protocol == \"finagle\":\n    # wrap processor with finagle request/response header handler\n    processor = TFinagleServerProcessor.TFinagleServerProcessor(processor)\n\n  if cfg.unix:\n    transport = TSocket.TServerSocket(unix_socket=cfg.addr)\n  else:\n    transport = TSocket.TServerSocket(host=host, port=port)\n\n  if cfg.transport == \"framed\":\n    transport_factory = TTransport.TFramedTransportFactory()\n  elif cfg.transport == \"unframed\":\n    transport_factory = TTransport.TBufferedTransportFactory()\n  elif cfg.transport == \"header\":\n    if cfg.protocol == \"binary\":\n      transport_factory = THeaderTransport.THeaderTransportFactory(\n          THeaderTransport.T_BINARY_PROTOCOL)\n    elif cfg.protocol == \"compact\":\n      transport_factory = THeaderTransport.THeaderTransportFactory(\n          THeaderTransport.T_COMPACT_PROTOCOL)\n    else:\n      sys.exit(\"header transport cannot be used with protocol {0}\".format(cfg.protocol))\n  else:\n    sys.exit(\"unknown transport {0}\".format(cfg.transport))\n\n  if cfg.protocol == \"binary\":\n    protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()\n  elif cfg.protocol == \"compact\":\n    protocol_factory = TCompactProtocol.TCompactProtocolFactory()\n  elif cfg.protocol == \"json\":\n    protocol_factory = TJSONProtocol.TJSONProtocolFactory()\n  elif cfg.protocol == \"finagle\":\n    protocol_factory = TFinagleServerProtocol.TFinagleServerProtocolFactory()\n  else:\n    sys.exit(\"unknown protocol {0}\".format(cfg.protocol))\n\n  print(\"Thrift Server listening on {0} for {1} {2} requests\".format(cfg.addr, cfg.transport,\n                                                                     cfg.protocol))\n  if cfg.service is not None:\n    print(\"Thrift Server service name {0}\".format(cfg.service))\n  if cfg.response == \"idl-exception\":\n    print(\"Thrift Server will throw IDL exceptions when defined\")\n  elif cfg.response == \"exception\":\n    print(\"Thrift Server will throw Thrift exceptions for all messages\")\n\n  server = TServer.TThreadedServer(processor, transport, transport_factory, protocol_factory)\n  try:\n    server.serve()\n  except KeyboardInterrupt:\n    print\n\n\nif __name__ == \"__main__\":\n  logging.basicConfig()\n  parser = argparse.ArgumentParser(description=\"Thrift server to match client.py.\")\n  parser.add_argument(\n      \"-a\",\n      \"--addr\",\n      metavar=\"ADDR\",\n      dest=\"addr\",\n      default=\":0\",\n      help=\"Listener address for server in the form host:port. The host is optional. If --unix\" +\n      \" is set, the address is the socket name.\",\n  )\n  parser.add_argument(\n      \"-m\",\n      \"--multiplex\",\n      metavar=\"SERVICE\",\n      dest=\"service\",\n      help=\"Enable service multiplexing and set the service name.\",\n  )\n  parser.add_argument(\n      \"-p\",\n      \"--protocol\",\n      help=\"Selects a protocol.\",\n      dest=\"protocol\",\n      default=\"binary\",\n      choices=[\"binary\", \"compact\", \"json\", \"finagle\"],\n  )\n  parser.add_argument(\n      \"-r\",\n      \"--response\",\n      dest=\"response\",\n      default=\"success\",\n      choices=[\"success\", \"idl-exception\", \"exception\"],\n      help=\"Controls how the server responds to requests\",\n  )\n  parser.add_argument(\n      \"-t\",\n      \"--transport\",\n      help=\"Selects a transport.\",\n      dest=\"transport\",\n      default=\"framed\",\n      choices=[\"framed\", \"unframed\", \"header\"],\n  )\n  parser.add_argument(\n      \"-u\",\n      \"--unix\",\n      dest=\"unix\",\n      action=\"store_true\",\n  )\n  cfg = parser.parse_args()\n\n  try:\n    main(cfg)\n  except Thrift.TException as tx:\n    sys.exit(\"Thrift exception: {0}\".format(tx.message))\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/filters/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"pass_through_filter_test\",\n    srcs = [\"pass_through_filter_test.cc\"],\n    extension_name = \"envoy.filters.network.thrift_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/thrift_proxy/filters:pass_through_filter_lib\",\n        \"//test/extensions/filters/network/thrift_proxy:mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"extensions/filters/network/thrift_proxy/filters/pass_through_filter.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace ThriftFilters {\n\nusing namespace Envoy::Extensions::NetworkFilters;\n\nclass ThriftPassThroughDecoderFilterTest : public testing::Test {\npublic:\n  class Filter : public PassThroughDecoderFilter {\n  public:\n    DecoderFilterCallbacks* decoderFilterCallbacks() { return decoder_callbacks_; }\n  };\n\n  void initialize() {\n    filter_ = std::make_unique<Filter>();\n    filter_->setDecoderFilterCallbacks(filter_callbacks_);\n  }\n\n  std::unique_ptr<Filter> filter_;\n  NiceMock<MockDecoderFilterCallbacks> filter_callbacks_;\n  ThriftProxy::MessageMetadataSharedPtr request_metadata_;\n};\n\n// Tests that each method returns ThriftProxy::FilterStatus::Continue.\nTEST_F(ThriftPassThroughDecoderFilterTest, AllMethodsAreImplementedTrivially) {\n  initialize();\n\n  EXPECT_EQ(&filter_callbacks_, filter_->decoderFilterCallbacks());\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportBegin(request_metadata_));\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n  {\n    std::string dummy_str = \"dummy\";\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structBegin(dummy_str));\n  }\n  {\n    std::string dummy_str = \"dummy\";\n    ThriftProxy::FieldType dummy_ft{ThriftProxy::FieldType::I32};\n    int16_t dummy_id{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue,\n              filter_->fieldBegin(dummy_str, dummy_ft, dummy_id));\n  }\n  {\n    bool dummy_val{false};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->boolValue(dummy_val));\n  }\n  {\n    uint8_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->byteValue(dummy_val));\n  }\n  {\n    int16_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int16Value(dummy_val));\n  }\n  {\n    int32_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int32Value(dummy_val));\n  }\n  {\n    int64_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int64Value(dummy_val));\n  }\n  {\n    double dummy_val{0.0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->doubleValue(dummy_val));\n  }\n  {\n    std::string dummy_str = \"dummy\";\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->stringValue(dummy_str));\n  }\n  {\n    ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32;\n    uint32_t dummy_size{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue,\n              filter_->mapBegin(dummy_ft, dummy_ft, dummy_size));\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->mapEnd());\n  }\n  {\n    ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32;\n    uint32_t dummy_size{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listBegin(dummy_ft, dummy_size));\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listEnd());\n  }\n  {\n    ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32;\n    uint32_t dummy_size{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setBegin(dummy_ft, dummy_size));\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setEnd());\n  }\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structEnd());\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->fieldEnd());\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageEnd());\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportEnd());\n\n  EXPECT_NO_THROW(filter_->onDestroy());\n}\n\n} // namespace ThriftFilters\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"ratelimit_test\",\n    srcs = [\"ratelimit_test.cc\"],\n    extension_name = \"envoy.filters.thrift.ratelimit\",\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/network/thrift_proxy/filters/ratelimit:ratelimit_lib\",\n        \"//test/extensions/filters/common/ratelimit:ratelimit_mocks\",\n        \"//test/extensions/filters/network/thrift_proxy:mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/ratelimit:ratelimit_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.filters.thrift.ratelimit\",\n    deps = [\n        \"//source/extensions/filters/network/thrift_proxy/filters/ratelimit:config\",\n        \"//test/extensions/filters/network/thrift_proxy:mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.validate.h\"\n\n#include \"extensions/filters/network/thrift_proxy/filters/ratelimit/config.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ThriftFilters {\nnamespace RateLimitFilter {\nnamespace {\n\nenvoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit\nparseRateLimitFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) {\n  envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit rate_limit;\n  TestUtility::loadFromYaml(yaml, rate_limit, false, avoid_boosting);\n  return rate_limit;\n}\n\n} // namespace\n\nTEST(RateLimitFilterConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(\n      RateLimitFilterConfig().createFilterFactoryFromProto(\n          envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit(),\n          \"stats\", context),\n      ProtoValidationException);\n}\n\nTEST(RateLimitFilterConfigTest, RateLimitFilterCorrectProto) {\n  const std::string yaml_string = R\"EOF(\ndomain: \"test\"\ntimeout: \"1.337s\"\nrate_limit_service:\n  grpc_service:\n    envoy_grpc:\n      cluster_name: ratelimit_cluster\n  )EOF\";\n\n  auto proto_config = parseRateLimitFromV3Yaml(yaml_string);\n\n  NiceMock<Server::Configuration::MockFactoryContext> context;\n\n  EXPECT_CALL(context.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _))\n      .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        return std::make_unique<NiceMock<Grpc::MockAsyncClientFactory>>();\n      }));\n\n  RateLimitFilterConfig factory;\n  auto cb = factory.createFilterFactoryFromProto(proto_config, \"stats\", context);\n  NetworkFilters::ThriftProxy::ThriftFilters::MockFilterChainFactoryCallbacks filter_callback;\n  EXPECT_CALL(filter_callback, addDecoderFilter(_));\n  cb(filter_callback);\n}\n\n} // namespace RateLimitFilter\n} // namespace ThriftFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc",
    "content": "#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h\"\n\n#include \"test/extensions/filters/common/ratelimit/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/ratelimit/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::SetArgReferee;\nusing testing::WithArgs;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ThriftFilters {\nnamespace RateLimitFilter {\n\nusing namespace Envoy::Extensions::NetworkFilters;\n\nclass ThriftRateLimitFilterTest : public testing::Test {\npublic:\n  ThriftRateLimitFilterTest() {\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.thrift_filter_enabled\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.thrift_filter_enforcing\", 100))\n        .WillByDefault(Return(true));\n    ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.test_key.thrift_filter_enabled\", 100))\n        .WillByDefault(Return(true));\n  }\n\n  void setupTest(const std::string& yaml) {\n    envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit\n        proto_config{};\n    TestUtility::loadFromYaml(yaml, proto_config, false, true);\n\n    config_ = std::make_shared<Config>(proto_config, local_info_, stats_store_, runtime_, cm_);\n\n    request_metadata_ = std::make_shared<ThriftProxy::MessageMetadata>();\n\n    client_ = new Filters::Common::RateLimit::MockClient();\n    filter_ = std::make_unique<Filter>(config_, Filters::Common::RateLimit::ClientPtr{client_});\n    filter_->setDecoderFilterCallbacks(filter_callbacks_);\n    filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear();\n    filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.emplace_back(\n        route_rate_limit_);\n  }\n\n  const std::string fail_close_config_ = R\"EOF(\n  domain: foo\n  failure_mode_deny: true\n  )EOF\";\n\n  const std::string filter_config_ = R\"EOF(\n  domain: foo\n  )EOF\";\n\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  ConfigSharedPtr config_;\n  Filters::Common::RateLimit::MockClient* client_;\n  std::unique_ptr<Filter> filter_;\n  NiceMock<ThriftProxy::ThriftFilters::MockDecoderFilterCallbacks> filter_callbacks_;\n  Filters::Common::RateLimit::RequestCallbacks* request_callbacks_{};\n  ThriftProxy::MessageMetadataSharedPtr request_metadata_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Buffer::OwnedImpl data_;\n  Buffer::OwnedImpl response_data_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<ThriftProxy::Router::MockRateLimitPolicyEntry> route_rate_limit_;\n  std::vector<RateLimit::Descriptor> descriptor_{{{{\"descriptor_key\", \"descriptor_value\"}}}};\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n};\n\nTEST_F(ThriftRateLimitFilterTest, NoRoute) {\n  setupTest(filter_config_);\n\n  EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportBegin(request_metadata_));\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n  {\n    std::string dummy_str = \"dummy\";\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structBegin(dummy_str));\n  }\n  {\n    std::string dummy_str = \"dummy\";\n    ThriftProxy::FieldType dummy_ft{ThriftProxy::FieldType::I32};\n    int16_t dummy_id{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue,\n              filter_->fieldBegin(dummy_str, dummy_ft, dummy_id));\n  }\n  {\n    bool dummy_val{false};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->boolValue(dummy_val));\n  }\n  {\n    uint8_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->byteValue(dummy_val));\n  }\n  {\n    int16_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int16Value(dummy_val));\n  }\n  {\n    int32_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int32Value(dummy_val));\n  }\n  {\n    int64_t dummy_val{0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int64Value(dummy_val));\n  }\n  {\n    double dummy_val{0.0};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->doubleValue(dummy_val));\n  }\n  {\n    std::string dummy_str = \"dummy\";\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->stringValue(dummy_str));\n  }\n  {\n    ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32;\n    uint32_t dummy_size{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue,\n              filter_->mapBegin(dummy_ft, dummy_ft, dummy_size));\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->mapEnd());\n  }\n  {\n    ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32;\n    uint32_t dummy_size{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listBegin(dummy_ft, dummy_size));\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listEnd());\n  }\n  {\n    ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32;\n    uint32_t dummy_size{1};\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setBegin(dummy_ft, dummy_size));\n    EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setEnd());\n  }\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structEnd());\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->fieldEnd());\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageEnd());\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportEnd());\n}\n\nTEST_F(ThriftRateLimitFilterTest, NoCluster) {\n  setupTest(filter_config_);\n\n  ON_CALL(cm_, get(_)).WillByDefault(Return(nullptr));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n}\n\nTEST_F(ThriftRateLimitFilterTest, NoApplicableRateLimit) {\n  setupTest(filter_config_);\n\n  filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear();\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n}\n\nTEST_F(ThriftRateLimitFilterTest, NoDescriptor) {\n  setupTest(filter_config_);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1);\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n}\n\nTEST_F(ThriftRateLimitFilterTest, RuntimeDisabled) {\n  setupTest(filter_config_);\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.thrift_filter_enabled\", 100))\n      .WillOnce(Return(false));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n}\n\nTEST_F(ThriftRateLimitFilterTest, OkResponse) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0))\n      .Times(1);\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  request_metadata_->headers().addCopy(ThriftProxy::Headers::get().ClientId, \"clientid\");\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited))\n      .Times(0);\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.ok\").value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.ok\").value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n\n  EXPECT_CALL(*client_, limit(_, \"foo\",\n                              testing::ContainerEq(std::vector<RateLimit::Descriptor>{\n                                  {{{\"descriptor_key\", \"descriptor_value\"}}}}),\n                              _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n          })));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n\n  EXPECT_EQ(\n      1U,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.error\").value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"ratelimit.failure_mode_allowed\")\n                    .value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, ErrorResponse) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_));\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageEnd());\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited))\n      .Times(0);\n\n  EXPECT_EQ(\n      1U,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.error\").value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"ratelimit.failure_mode_allowed\")\n                    .value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) {\n  setupTest(fail_close_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_));\n\n  EXPECT_CALL(filter_callbacks_, sendLocalReply(_, false))\n      .WillOnce(Invoke([&](const ThriftProxy::DirectResponse& response, bool) -> void {\n        const auto& app_ex = dynamic_cast<const ThriftProxy::AppException&>(response);\n        EXPECT_STREQ(\"limiter error\", app_ex.what());\n        EXPECT_EQ(ThriftProxy::AppExceptionType::InternalError, app_ex.type_);\n      }));\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError));\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(\n      1U,\n      cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.error\").value());\n  EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"ratelimit.failure_mode_allowed\")\n                    .value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, LimitResponse) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_));\n\n  EXPECT_CALL(filter_callbacks_, sendLocalReply(_, false))\n      .WillOnce(Invoke([&](const ThriftProxy::DirectResponse& response, bool) -> void {\n        const auto& app_ex = dynamic_cast<const ThriftProxy::AppException&>(response);\n        EXPECT_STREQ(\"over limit\", app_ex.what());\n        EXPECT_EQ(ThriftProxy::AppExceptionType::InternalError, app_ex.type_);\n      }));\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited));\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.over_limit\")\n                .value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_));\n\n  Http::HeaderMapPtr rl_headers{new Http::TestRequestHeaderMapImpl{\n      {\"x-ratelimit-limit\", \"1000\"}, {\"x-ratelimit-remaining\", \"0\"}, {\"retry-after\", \"33\"}}};\n\n  EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0);\n\n  // TODO(zuercher): Headers are currently ignored, but sendLocalReply is the place to pass them.\n  EXPECT_CALL(filter_callbacks_, sendLocalReply(_, false));\n  EXPECT_CALL(filter_callbacks_.stream_info_,\n              setResponseFlag(StreamInfo::ResponseFlag::RateLimited));\n\n  Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl(*rl_headers)};\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr,\n                               std::move(h), nullptr);\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.over_limit\")\n                .value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_));\n\n  EXPECT_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.thrift_filter_enforcing\", 100))\n      .WillOnce(Return(false));\n  EXPECT_CALL(filter_callbacks_, continueDecoding());\n  request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr,\n                               nullptr);\n\n  EXPECT_EQ(1U,\n            cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter(\"ratelimit.over_limit\")\n                .value());\n}\n\nTEST_F(ThriftRateLimitFilterTest, ResetDuringCall) {\n  setupTest(filter_config_);\n  InSequence s;\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _))\n      .WillOnce(SetArgReferee<1>(descriptor_));\n  EXPECT_CALL(*client_, limit(_, _, _, _))\n      .WillOnce(\n          WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {\n            request_callbacks_ = &callbacks;\n          })));\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_));\n\n  EXPECT_CALL(*client_, cancel());\n  filter_->onDestroy();\n}\n\nTEST_F(ThriftRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) {\n  route_rate_limit_.disable_key_ = \"test_key\";\n  setupTest(filter_config_);\n\n  ON_CALL(runtime_.snapshot_, featureEnabled(\"ratelimit.test_key.thrift_filter_enabled\", 100))\n      .WillByDefault(Return(false));\n\n  EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0);\n  EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0);\n\n  EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_));\n}\n\nTEST_F(ThriftRateLimitFilterTest, ConfigValueTest) {\n  std::string stage_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\",\n    \"stage\": 5,\n  }\n  )EOF\";\n\n  setupTest(stage_filter_config);\n\n  EXPECT_EQ(5UL, config_->stage());\n  EXPECT_EQ(\"foo\", config_->domain());\n}\n\nTEST_F(ThriftRateLimitFilterTest, DefaultConfigValueTest) {\n  std::string stage_filter_config = R\"EOF(\n  {\n    \"domain\": \"foo\"\n  }\n  )EOF\";\n\n  setupTest(stage_filter_config);\n\n  EXPECT_EQ(0UL, config_->stage());\n  EXPECT_EQ(\"foo\", config_->domain());\n}\n\n} // namespace RateLimitFilter\n} // namespace ThriftFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/framed_transport_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/framed_transport_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nTEST(FramedTransportTest, Name) {\n  FramedTransportImpl transport;\n  EXPECT_EQ(transport.name(), \"framed\");\n}\n\nTEST(FramedTransportTest, Type) {\n  FramedTransportImpl transport;\n  EXPECT_EQ(transport.type(), TransportType::Framed);\n}\n\nTEST(FramedTransportTest, NotEnoughData) {\n  Buffer::OwnedImpl buffer;\n  FramedTransportImpl transport;\n  MessageMetadata metadata;\n\n  EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, IsEmptyMetadata());\n\n  addRepeated(buffer, 3, 0);\n\n  EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, IsEmptyMetadata());\n}\n\nTEST(FramedTransportTest, InvalidFrameSize) {\n  FramedTransportImpl transport;\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(-1);\n\n    MessageMetadata metadata;\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid thrift framed transport frame size -1\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0x7fffffff);\n\n    MessageMetadata metadata;\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid thrift framed transport frame size 2147483647\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n}\n\nTEST(FramedTransportTest, DecodeFrameStart) {\n  FramedTransportImpl transport;\n\n  Buffer::OwnedImpl buffer;\n  buffer.writeBEInt<int32_t>(100);\n\n  EXPECT_EQ(buffer.length(), 4);\n\n  MessageMetadata metadata;\n  EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, HasOnlyFrameSize(100U));\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(FramedTransportTest, DecodeFrameEnd) {\n  FramedTransportImpl transport;\n\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_TRUE(transport.decodeFrameEnd(buffer));\n}\n\nTEST(FramedTransportTest, EncodeFrame) {\n  FramedTransportImpl transport;\n\n  {\n    MessageMetadata metadata;\n    Buffer::OwnedImpl message;\n    message.add(\"fake message\");\n\n    Buffer::OwnedImpl buffer;\n    transport.encodeFrame(buffer, metadata, message);\n\n    EXPECT_EQ(0, message.length());\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\xC\"\n                          \"fake message\",\n                          16),\n              buffer.toString());\n  }\n\n  {\n    MessageMetadata metadata;\n    Buffer::OwnedImpl message;\n    Buffer::OwnedImpl buffer;\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, metadata, message), EnvoyException,\n                              \"invalid thrift framed transport frame size 0\");\n  }\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc",
    "content": "#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/header_transport_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\nclass MockBuffer : public Envoy::MockBuffer {\npublic:\n  MockBuffer() = default;\n  ~MockBuffer() override = default;\n\n  MOCK_METHOD(uint64_t, length, (), (const));\n};\n\nMessageMetadataSharedPtr mkMessageMetadata(uint32_t num_headers) {\n  MessageMetadataSharedPtr metadata = std::make_shared<MessageMetadata>();\n\n  while (num_headers-- > 0) {\n    metadata->headers().addCopy(Http::LowerCaseString(\"x\"), \"y\");\n  }\n  return metadata;\n}\n\n} // namespace\n\nTEST(HeaderTransportTest, Name) {\n  HeaderTransportImpl transport;\n  EXPECT_EQ(transport.name(), \"header\");\n}\n\nTEST(HeaderTransportTest, NotEnoughData) {\n  HeaderTransportImpl transport;\n  MessageMetadata metadata;\n\n  // Empty buffer\n  {\n    Buffer::OwnedImpl buffer;\n    EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  // Too short for minimum header\n  {\n    Buffer::OwnedImpl buffer;\n    addRepeated(buffer, 13, 0);\n    EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  // Missing header data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(1); // header size / 4\n    addRepeated(buffer, 3, 0);\n    EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n}\n\nTEST(HeaderTransportTest, InvalidFrameSize) {\n  HeaderTransportImpl transport;\n  MessageMetadata metadata;\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(-1);\n    addRepeated(buffer, 10, 0);\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid thrift header transport frame size -1\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.writeBEInt<int32_t>(0x7fffffff);\n    addRepeated(buffer, 10, 0);\n\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid thrift header transport frame size 2147483647\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n}\n\nTEST(HeaderTransportTest, InvalidMagic) {\n  HeaderTransportImpl transport;\n  Buffer::OwnedImpl buffer;\n  MessageMetadata metadata;\n\n  buffer.writeBEInt<int32_t>(0x100);\n  buffer.writeBEInt<int16_t>(0x0123);\n  addRepeated(buffer, 8, 0);\n  EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                            \"invalid thrift header transport magic 0123\");\n  EXPECT_THAT(metadata, IsEmptyMetadata());\n}\n\nTEST(HeaderTransportTest, InvalidHeaderSize) {\n  HeaderTransportImpl transport;\n  MessageMetadata metadata;\n\n  // Minimum header size is 1 = 4 bytes\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(0x100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(0);\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"no header data\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  // Minimum header size is 1 = 4 bytes\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(0x100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(-1);\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid thrift header transport header size -4 (ffff)\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  // Max header size is 16384 = 65536 bytes\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(0x100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(0x4001);\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid thrift header transport header size 65540 (4001)\");\n    EXPECT_THAT(metadata, IsEmptyMetadata());\n  }\n\n  // Header data extends past stated header size.\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(0x100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1);                  // sequence number\n    buffer.writeBEInt<int16_t>(1);                  // 4 bytes\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // var int -1, exceeds header size\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"unable to read header transport protocol id: header too small\");\n  }\n\n  // Partial var-int at end of header\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(0x100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1);            // sequence number\n    buffer.writeBEInt<int16_t>(1);            // 4 bytes\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF}); // partial var int\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"unable to read header transport protocol id: header too small\");\n  }\n}\n\nTEST(HeaderTransportTest, InvalidProto) {\n  HeaderTransportImpl transport;\n  MessageMetadata metadata;\n\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(1); // size 4\n    addSeq(buffer, {1, 0, 0, 0});  // 1 = json, 0 = num transforms, pad, pad\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"Unknown protocol 1\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(1); // size 4\n    addSeq(buffer, {3, 0, 0, 0});  // 3 = invalid proto, 0 = num transforms, pad, pad\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"Unknown protocol 3\");\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1);                  // sequence number\n    buffer.writeBEInt<int16_t>(2);                  // size 8\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1 = invalid proto\n    addSeq(buffer, {0, 0, 0});                      // 0 transforms and padding\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"Unknown protocol -1\");\n  }\n}\n\nTEST(HeaderTransportTest, NoTransformsOrInfo) {\n  HeaderTransportImpl transport;\n\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(1); // size 4\n    addSeq(buffer, {0, 0, 0, 0});  // 0 = binary proto, 0 = num transforms, pad, pad\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasFrameSize(86U));\n    EXPECT_THAT(metadata, HasProtocol(ProtocolType::Binary));\n    EXPECT_THAT(metadata, HasSequenceId(1));\n    EXPECT_THAT(metadata, HasNoHeaders());\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n\n    buffer.writeBEInt<int32_t>(101);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(2); // sequence number\n    buffer.writeBEInt<int16_t>(1); // size 4\n    addSeq(buffer, {2, 0, 0, 0});  // 2 = compact proto, 0 = num transforms, pad, pad\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasFrameSize(87U));\n    EXPECT_THAT(metadata, HasProtocol(ProtocolType::Compact));\n    EXPECT_THAT(metadata, HasSequenceId(2));\n    EXPECT_THAT(metadata, HasNoHeaders());\n  }\n}\n\nTEST(HeaderTransportTest, TransformErrors) {\n  MessageMetadata metadata;\n\n  // Invalid number of transforms\n  {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1);                  // sequence number\n    buffer.writeBEInt<int16_t>(2);                  // size 8\n    buffer.writeByte(0);                            // binary proto\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1 = invalid num transforms\n    addSeq(buffer, {0, 0});                         // padding\n\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid header transport transform count -1\");\n  }\n\n  // Unknown transform ids\n  for (uint8_t xform_id = 1; xform_id < 5; xform_id++) {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1);       // sequence number\n    buffer.writeBEInt<int16_t>(1);       // size 4\n    addSeq(buffer, {0, 1, xform_id, 0}); // 0 = binary proto, 1 = num transforms, xform id, pad\n\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasFrameSize(86U));\n    EXPECT_THAT(metadata, HasProtocol(ProtocolType::Binary));\n    EXPECT_THAT(metadata, HasAppException(AppExceptionType::MissingResult,\n                                          absl::StrCat(\"Unknown transform \", xform_id)));\n  }\n\n  // Only the first of multiple errors is reported\n  {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(1); // size 4\n    addSeq(buffer, {0, 2, 1, 2});  // 0 = binary proto, 2 = num transforms, xform id 1, xform id 2\n\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasFrameSize(86U));\n    EXPECT_THAT(metadata, HasProtocol(ProtocolType::Binary));\n    EXPECT_THAT(metadata, HasAppException(AppExceptionType::MissingResult, \"Unknown transform 1\"));\n  }\n}\n\nTEST(HeaderTransportTest, InvalidInfoBlock) {\n  // Unknown info block id\n  {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(1); // size 4\n    addSeq(buffer, {0, 0, 2, 0});  // 0 = binary proto, 0 = num transforms, 2 = unknown info id, pad\n\n    // Unknown info id is ignored.\n    EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n    EXPECT_THAT(metadata, HasFrameSize(86U));\n    EXPECT_THAT(metadata, HasProtocol(ProtocolType::Binary));\n    EXPECT_THAT(metadata, HasSequenceId(1));\n    EXPECT_THAT(metadata, HasNoHeaders());\n    EXPECT_EQ(buffer.length(), 0);\n  }\n\n  // Num headers info info block id 1 must be >= 0\n  {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(3); // size 12\n    addSeq(buffer, {0, 0, 1});     // 0 = binary proto, 0 = num transforms, 1 key-value\n    addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1 headers\n    addSeq(buffer, {0, 0, 0, 0});\n\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"invalid header transport header count -1\");\n  }\n\n  // Header key length exceeds max allowed size\n  {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(2); // size 8\n    addSeq(buffer, {0, 0, 1, 1});  // 0 = binary proto, 0 = num transforms, 1 key-value, 1 = num kvs\n    addSeq(buffer, {0x80, 0x80, 0x40}); // var int 0x100000\n    buffer.writeByte(0);\n\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"header transport header key: value 1048576 exceeds max i16 (32767)\");\n  }\n\n  // Header key extends past stated header size\n  {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(2); // size 8\n    addSeq(buffer, {0, 0, 1, 1});  // 0 = binary proto, 0 = num transforms, 1 key-value, 1 = num kvs\n    buffer.writeByte(4);           // exceeds specified header size\n    buffer.add(\"key_\");\n\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"unable to read header transport header key: header too small\");\n  }\n\n  // Header key ends at stated header size (no value)\n  {\n    HeaderTransportImpl transport;\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n\n    buffer.writeBEInt<int32_t>(100);\n    buffer.writeBEInt<int16_t>(0x0FFF);\n    buffer.writeBEInt<int16_t>(0);\n    buffer.writeBEInt<int32_t>(1); // sequence number\n    buffer.writeBEInt<int16_t>(2); // size 8\n    addSeq(buffer, {0, 0, 1, 1});  // 0 = binary proto, 0 = num transforms, 1 key-value, 1 = num kvs\n    buffer.writeByte(3);           // head ends with key, no room for value\n    buffer.add(\"abc\");\n    buffer.writeByte(0);\n\n    EXPECT_THROW_WITH_MESSAGE(transport.decodeFrameStart(buffer, metadata), EnvoyException,\n                              \"unable to read header transport header value: header too small\");\n  }\n}\n\nTEST(HeaderTransportTest, InfoBlock) {\n  HeaderTransportImpl transport;\n  Buffer::OwnedImpl buffer;\n  MessageMetadata metadata;\n  metadata.headers().addCopy(Http::LowerCaseString(\"not\"), \"empty\");\n\n  buffer.writeBEInt<int32_t>(200);\n  buffer.writeBEInt<int16_t>(0x0FFF);\n  buffer.writeBEInt<int16_t>(0);\n  buffer.writeBEInt<int32_t>(1);  // sequence number\n  buffer.writeBEInt<int16_t>(38); // size 152\n  addSeq(buffer, {0, 0, 1, 3}); // 0 = binary proto, 0 = num transforms, 1 = key value, 3 = num kvs\n  buffer.writeByte(3);\n  buffer.add(\"key\");\n  buffer.writeByte(5);\n  buffer.add(\"value\");\n  buffer.writeByte(4);\n  buffer.add(\"key2\");\n  addSeq(buffer, {0x80, 0x01}); // var int 128\n  buffer.add(std::string(128, 'x'));\n  buffer.writeByte(0); // empty key\n  buffer.writeByte(0); // empty value\n  buffer.writeByte(0); // padding\n\n  Http::TestRequestHeaderMapImpl expected_headers;\n  expected_headers.addCopy(Http::LowerCaseString(\"not\"), \"empty\");\n  expected_headers.addCopy(Http::LowerCaseString(\"key\"), \"value\");\n  expected_headers.addCopy(Http::LowerCaseString(\"key2\"), std::string(128, 'x'));\n  expected_headers.addCopy(Http::LowerCaseString(\"\"), \"\");\n\n  EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, HasFrameSize(38U));\n\n  EXPECT_EQ(expected_headers, metadata.headers());\n  EXPECT_EQ(buffer.length(), 0);\n}\n\nTEST(HeaderTransportTest, DecodeFrameEnd) {\n  HeaderTransportImpl transport;\n  Buffer::OwnedImpl buffer;\n  EXPECT_TRUE(transport.decodeFrameEnd(buffer));\n}\n\nTEST(HeaderTransportImpl, TestEncodeFrame) {\n  HeaderTransportImpl transport;\n\n  // No message\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    Buffer::OwnedImpl msg;\n\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, metadata, msg), EnvoyException,\n                              \"invalid thrift header transport message size 0\");\n  }\n\n  // No protocol\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, metadata, msg), EnvoyException,\n                              \"missing header transport protocol\");\n  }\n\n  // Illegal protocol\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    metadata.setProtocol(ProtocolType::Auto);\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, metadata, msg), EnvoyException,\n                              \"invalid header transport protocol auto\");\n  }\n\n  // Message too large\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    metadata.setProtocol(ProtocolType::Binary);\n\n    MockBuffer msg;\n    EXPECT_CALL(msg, length()).WillOnce(Return(0x40000000));\n\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, metadata, msg), EnvoyException,\n                              \"invalid thrift header transport frame size 1073741838\");\n  }\n\n  // Too many headers\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadataSharedPtr metadata = mkMessageMetadata(32769);\n    metadata->setProtocol(ProtocolType::Binary);\n\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, *metadata, msg), EnvoyException,\n                              \"invalid thrift header transport too many headers 32769\");\n  }\n\n  // Header string too large\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    metadata.setProtocol(ProtocolType::Binary);\n    metadata.headers().addCopy(Http::LowerCaseString(\"key\"), std::string(32768, 'x'));\n\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, metadata, msg), EnvoyException,\n                              \"header string too long: 32768\");\n  }\n\n  // Header info block too large\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    metadata.setProtocol(ProtocolType::Binary);\n    metadata.headers().addCopy(Http::LowerCaseString(\"k1\"), std::string(16384, 'x'));\n    metadata.headers().addCopy(Http::LowerCaseString(\"k2\"), std::string(16384, 'x'));\n    metadata.headers().addCopy(Http::LowerCaseString(\"k3\"), std::string(16384, 'x'));\n    metadata.headers().addCopy(Http::LowerCaseString(\"k4\"), std::string(16384, 'x'));\n\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    EXPECT_THROW_WITH_MESSAGE(transport.encodeFrame(buffer, metadata, msg), EnvoyException,\n                              \"invalid thrift header transport header size 65568\");\n  }\n\n  // Trivial frame with binary protocol\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    metadata.setProtocol(ProtocolType::Binary);\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    transport.encodeFrame(buffer, metadata, msg);\n\n    EXPECT_EQ(0, msg.length());\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x1a\"\n                          \"\\xf\\xff\\0\\0\"\n                          \"\\0\\0\\0\\0\"\n                          \"\\0\\x1\"\n                          \"\\0\\0\\0\\0\"\n                          \"fake message\",\n                          30),\n              buffer.toString());\n  }\n\n  // Trivial frame with compact protocol\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    metadata.setProtocol(ProtocolType::Compact);\n    metadata.setSequenceId(10);\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    transport.encodeFrame(buffer, metadata, msg);\n\n    EXPECT_EQ(0, msg.length());\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x1a\"\n                          \"\\xf\\xff\\0\\0\"\n                          \"\\0\\0\\0\\x0a\"\n                          \"\\0\\x1\"     // header size = 4\n                          \"\\x2\\0\\0\\0\" // compact, no transforms, padding\n                          \"fake message\",\n                          30),\n              buffer.toString());\n  }\n\n  // Frame with headers\n  {\n    Buffer::OwnedImpl buffer;\n    MessageMetadata metadata;\n    metadata.setProtocol(ProtocolType::Compact);\n    metadata.setSequenceId(10);\n    metadata.headers().addCopy(Http::LowerCaseString(\"key\"), \"value\");\n    metadata.headers().addCopy(Http::LowerCaseString(\"\"), \"\");\n    Buffer::OwnedImpl msg;\n    msg.add(\"fake message\");\n\n    transport.encodeFrame(buffer, metadata, msg);\n\n    EXPECT_EQ(0, msg.length());\n    EXPECT_EQ(std::string(\"\\0\\0\\0\\x2a\"\n                          \"\\xf\\xff\\0\\0\"\n                          \"\\0\\0\\0\\x0a\"\n                          \"\\0\\x5\"          // header size = 20\n                          \"\\x2\\0\"          // compact, no transforms\n                          \"\\x1\\x2\"         // header info block, 2 headers\n                          \"\\x3key\\x5value\" // first header\n                          \"\\0\\0\"           // second header\n                          \"\\0\\0\\0\\0\"       // padding\n                          \"fake message\",\n                          46),\n              buffer.toString());\n  }\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/integration.cc",
    "content": "#include \"test/extensions/filters/network/thrift_proxy/integration.h\"\n\n#include <algorithm>\n#include <fstream>\n\n#include \"test/test_common/environment.h\"\n\n#include \"absl/strings/str_join.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nstd::string PayloadOptions::modeName() const {\n  switch (mode_) {\n  case DriverMode::Success:\n    return \"success\";\n  case DriverMode::IDLException:\n    return \"idl-exception\";\n  case DriverMode::Exception:\n    return \"exception\";\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nstd::string PayloadOptions::transportName() const {\n  switch (transport_) {\n  case TransportType::Framed:\n    return \"framed\";\n  case TransportType::Unframed:\n    return \"unframed\";\n  case TransportType::Header:\n    return \"header\";\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nstd::string PayloadOptions::protocolName() const {\n  switch (protocol_) {\n  case ProtocolType::Binary:\n    return \"binary\";\n  case ProtocolType::Compact:\n    return \"compact\";\n  case ProtocolType::Twitter:\n    return \"finagle\";\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\nstd::string BaseThriftIntegrationTest::thrift_config_;\n\nvoid BaseThriftIntegrationTest::preparePayloads(const PayloadOptions& options,\n                                                Buffer::Instance& request_buffer,\n                                                Buffer::Instance& response_buffer) {\n  std::vector<std::string> args = {\n      TestEnvironment::runfilesPath(\n          \"test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh\"),\n      options.modeName(),\n      options.transportName(),\n      options.protocolName(),\n  };\n\n  if (options.service_name_) {\n    args.push_back(\"-s\");\n    args.push_back(*options.service_name_);\n  }\n\n  if (!options.headers_.empty()) {\n    args.push_back(\"-H\");\n\n    std::vector<std::string> headers;\n    std::transform(options.headers_.begin(), options.headers_.end(), std::back_inserter(headers),\n                   [](const std::pair<std::string, std::string>& header) -> std::string {\n                     return header.first + \"=\" + header.second;\n                   });\n    args.push_back(absl::StrJoin(headers, \",\"));\n  }\n\n  args.push_back(options.method_name_);\n  std::copy(options.method_args_.begin(), options.method_args_.end(), std::back_inserter(args));\n\n  TestEnvironment::exec(args);\n\n  std::stringstream file_base;\n  file_base << \"{{ test_tmpdir }}/\" << options.transportName() << \"-\" << options.protocolName()\n            << \"-\";\n  if (options.service_name_) {\n    file_base << *options.service_name_ << \"-\";\n  }\n  file_base << options.modeName();\n\n  readAll(file_base.str() + \".request\", request_buffer);\n  readAll(file_base.str() + \".response\", response_buffer);\n}\n\nvoid BaseThriftIntegrationTest::readAll(std::string file, Buffer::Instance& buffer) {\n  file = TestEnvironment::substitute(file, version_);\n\n  std::string data = api_->fileSystem().fileReadToEnd(file);\n  buffer.add(data);\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/integration.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n\n#include \"test/integration/integration.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\n/**\n * DriverMode represents the modes the test driver server modes.\n */\nenum class DriverMode {\n  // Server returns successful responses.\n  Success,\n\n  // Server throws IDL-defined exceptions.\n  IDLException,\n\n  // Server throws application exceptions.\n  Exception,\n};\n\nstruct PayloadOptions {\n  PayloadOptions(TransportType transport, ProtocolType protocol, DriverMode mode,\n                 absl::optional<std::string> service_name, std::string method_name,\n                 std::vector<std::string> method_args = {},\n                 std::vector<std::pair<std::string, std::string>> headers = {})\n      : transport_(transport), protocol_(protocol), mode_(mode), service_name_(service_name),\n        method_name_(method_name), method_args_(method_args), headers_(headers) {}\n\n  std::string modeName() const;\n  std::string transportName() const;\n  std::string protocolName() const;\n\n  const TransportType transport_;\n  const ProtocolType protocol_;\n  const DriverMode mode_;\n  const absl::optional<std::string> service_name_;\n  const std::string method_name_;\n  const std::vector<std::string> method_args_;\n  const std::vector<std::pair<std::string, std::string>> headers_;\n};\n\nclass BaseThriftIntegrationTest : public BaseIntegrationTest {\npublic:\n  BaseThriftIntegrationTest()\n      : BaseIntegrationTest(Network::Address::IpVersion::v4, thrift_config_) {}\n\n  /**\n   * Given PayloadOptions, generate a client request and server response and store the\n   * data in the given Buffers.\n   */\n  void preparePayloads(const PayloadOptions& options, Buffer::Instance& request_buffer,\n                       Buffer::Instance& response_buffer);\n\nprotected:\n  // Tests should use a static SetUpTestSuite method to initialize this field with a suitable\n  // configuration.\n  static std::string thrift_config_;\n\nprivate:\n  void readAll(std::string file, Buffer::Instance& buffer);\n};\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"extensions/filters/network/thrift_proxy/buffer_helper.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/integration.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::Combine;\nusing testing::HasSubstr;\nusing ::testing::TestParamInfo;\nusing testing::Values;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass ThriftConnManagerIntegrationTest\n    : public testing::TestWithParam<std::tuple<TransportType, ProtocolType, bool>>,\n      public BaseThriftIntegrationTest {\npublic:\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    thrift_config_ = absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n        - name: thrift\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy\n            stat_prefix: thrift_stats\n            route_config:\n              name: \"routes\"\n              routes:\n                - match:\n                    service_name: \"svcname\"\n                  route:\n                    cluster: \"cluster_0\"\n                - match:\n                    method_name: \"execute\"\n                    headers:\n                    - name: \"x-header-1\"\n                      exact_match: \"x-value-1\"\n                    - name: \"x-header-2\"\n                      safe_regex_match:\n                        google_re2: {}\n                        regex: \"0.[5-9]\"\n                    - name: \"x-header-3\"\n                      range_match:\n                        start: 100\n                        end: 200\n                    - name: \"x-header-4\"\n                      prefix_match: \"user_id:\"\n                    - name: \"x-header-5\"\n                      suffix_match: \"asdf\"\n                  route:\n                    cluster: \"cluster_1\"\n                - match:\n                    method_name: \"execute\"\n                  route:\n                    cluster: \"cluster_2\"\n                - match:\n                    method_name: \"poke\"\n                  route:\n                    cluster: \"cluster_3\"\n      )EOF\");\n  }\n\n  void initializeCall(DriverMode mode) {\n    std::tie(transport_, protocol_, multiplexed_) = GetParam();\n\n    absl::optional<std::string> service_name;\n    if (multiplexed_) {\n      service_name = \"svcname\";\n    }\n\n    std::vector<std::pair<std::string, std::string>> headers;\n    if (transport_ == TransportType::Header) {\n      headers.push_back(std::make_pair(\"x-header-1\", \"x-value-1\"));\n      headers.push_back(std::make_pair(\"x-header-2\", \"0.6\"));\n      headers.push_back(std::make_pair(\"x-header-3\", \"150\"));\n      headers.push_back(std::make_pair(\"x-header-4\", \"user_id:10\"));\n      headers.push_back(std::make_pair(\"x-header-5\", \"garbage_asdf\"));\n    }\n\n    PayloadOptions options(transport_, protocol_, mode, service_name, \"execute\", {}, headers);\n    preparePayloads(options, request_bytes_, response_bytes_);\n    ASSERT(request_bytes_.length() > 0);\n    ASSERT(response_bytes_.length() > 0);\n    initializeCommon();\n  }\n\n  void initializeOneway() {\n    std::tie(transport_, protocol_, multiplexed_) = GetParam();\n\n    absl::optional<std::string> service_name;\n    if (multiplexed_) {\n      service_name = \"svcname\";\n    }\n\n    PayloadOptions options(transport_, protocol_, DriverMode::Success, service_name, \"poke\");\n    preparePayloads(options, request_bytes_, response_bytes_);\n    ASSERT(request_bytes_.length() > 0);\n    ASSERT(response_bytes_.length() == 0);\n    initializeCommon();\n  }\n\n  // We allocate as many upstreams as there are clusters, with each upstream being allocated\n  // to clusters in the order they're defined in the bootstrap config.\n  void initializeCommon() {\n    setUpstreamCount(4);\n\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      for (int i = 1; i < 4; i++) {\n        auto* c = bootstrap.mutable_static_resources()->add_clusters();\n        c->MergeFrom(bootstrap.static_resources().clusters()[0]);\n        c->set_name(absl::StrCat(\"cluster_\", i));\n      }\n    });\n\n    BaseThriftIntegrationTest::initialize();\n  }\n\nprotected:\n  // Multiplexed requests are handled by the service name route match,\n  // while oneway's are handled by the \"poke\" method. All other requests\n  // are handled by \"execute\".\n  FakeUpstream* getExpectedUpstream(bool oneway) {\n    int upstreamIdx = 2;\n    if (multiplexed_) {\n      upstreamIdx = 0;\n    } else if (oneway) {\n      upstreamIdx = 3;\n    } else if (transport_ == TransportType::Header) {\n      upstreamIdx = 1;\n    }\n\n    return fake_upstreams_[upstreamIdx].get();\n  }\n\n  TransportType transport_;\n  ProtocolType protocol_;\n  bool multiplexed_;\n\n  std::string result_;\n\n  Buffer::OwnedImpl request_bytes_;\n  Buffer::OwnedImpl response_bytes_;\n};\n\nstatic std::string\nparamToString(const TestParamInfo<std::tuple<TransportType, ProtocolType, bool>>& params) {\n  TransportType transport;\n  ProtocolType protocol;\n  bool multiplexed;\n  std::tie(transport, protocol, multiplexed) = params.param;\n\n  std::string transport_name = transportNameForTest(transport);\n  std::string protocol_name = protocolNameForTest(protocol);\n\n  if (multiplexed) {\n    return fmt::format(\"{}{}Multiplexed\", transport_name, protocol_name);\n  }\n  return fmt::format(\"{}{}\", transport_name, protocol_name);\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    TransportAndProtocol, ThriftConnManagerIntegrationTest,\n    Combine(Values(TransportType::Framed, TransportType::Unframed, TransportType::Header),\n            Values(ProtocolType::Binary, ProtocolType::Compact), Values(false, true)),\n    paramToString);\n\nTEST_P(ThriftConnManagerIntegrationTest, Success) {\n  initializeCall(DriverMode::Success);\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(request_bytes_.toString()));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  FakeUpstream* expected_upstream = getExpectedUpstream(false);\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data);\n  EXPECT_EQ(request_bytes_.toString(), upstream_request.toString());\n\n  ASSERT_TRUE(fake_upstream_connection->write(response_bytes_.toString()));\n\n  tcp_client->waitForData(response_bytes_.toString());\n  tcp_client->close();\n\n  EXPECT_TRUE(TestUtility::buffersEqual(Buffer::OwnedImpl(tcp_client->data()), response_bytes_));\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_call\");\n  EXPECT_EQ(1U, counter->value());\n  counter = test_server_->counter(\"thrift.thrift_stats.response_success\");\n  EXPECT_EQ(1U, counter->value());\n}\n\nTEST_P(ThriftConnManagerIntegrationTest, IDLException) {\n  initializeCall(DriverMode::IDLException);\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(request_bytes_.toString()));\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(false);\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data);\n  EXPECT_EQ(request_bytes_.toString(), upstream_request.toString());\n\n  ASSERT_TRUE(fake_upstream_connection->write(response_bytes_.toString()));\n\n  tcp_client->waitForData(response_bytes_.toString());\n  tcp_client->close();\n\n  EXPECT_TRUE(TestUtility::buffersEqual(Buffer::OwnedImpl(tcp_client->data()), response_bytes_));\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_call\");\n  EXPECT_EQ(1U, counter->value());\n  counter = test_server_->counter(\"thrift.thrift_stats.response_error\");\n  EXPECT_EQ(1U, counter->value());\n}\n\nTEST_P(ThriftConnManagerIntegrationTest, Exception) {\n  initializeCall(DriverMode::Exception);\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(request_bytes_.toString()));\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(false);\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data);\n  EXPECT_EQ(request_bytes_.toString(), upstream_request.toString());\n\n  ASSERT_TRUE(fake_upstream_connection->write(response_bytes_.toString()));\n\n  tcp_client->waitForData(response_bytes_.toString());\n  tcp_client->close();\n\n  EXPECT_TRUE(TestUtility::buffersEqual(Buffer::OwnedImpl(tcp_client->data()), response_bytes_));\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_call\");\n  EXPECT_EQ(1U, counter->value());\n  counter = test_server_->counter(\"thrift.thrift_stats.response_exception\");\n  EXPECT_EQ(1U, counter->value());\n}\n\nTEST_P(ThriftConnManagerIntegrationTest, EarlyClose) {\n  initializeCall(DriverMode::Success);\n\n  const std::string partial_request =\n      request_bytes_.toString().substr(0, request_bytes_.length() - 5);\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(false);\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(partial_request));\n  tcp_client->close();\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n\n  test_server_->waitForCounterGe(\"thrift.thrift_stats.cx_destroy_remote_with_active_rq\", 1);\n\n  Stats::CounterSharedPtr counter =\n      test_server_->counter(\"thrift.thrift_stats.cx_destroy_remote_with_active_rq\");\n  EXPECT_EQ(1U, counter->value());\n}\n\n// Tests when the downstream client closes before completing a request but an upstream has already\n// been connected/assigned.\nTEST_P(ThriftConnManagerIntegrationTest, EarlyCloseWithUpstream) {\n  initializeCall(DriverMode::Success);\n\n  const std::string partial_request =\n      request_bytes_.toString().substr(0, request_bytes_.length() - 5);\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(partial_request));\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(false);\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n\n  tcp_client->close();\n\n  test_server_->waitForCounterGe(\"thrift.thrift_stats.cx_destroy_remote_with_active_rq\", 1);\n\n  Stats::CounterSharedPtr counter =\n      test_server_->counter(\"thrift.thrift_stats.cx_destroy_remote_with_active_rq\");\n  EXPECT_EQ(1U, counter->value());\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/9037.\nTEST_P(ThriftConnManagerIntegrationTest, EarlyUpstreamClose) {\n  initializeCall(DriverMode::Success);\n\n  const std::string partial_request =\n      request_bytes_.toString().substr(0, request_bytes_.length() - 5);\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(request_bytes_.toString()));\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(false);\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data);\n  EXPECT_EQ(request_bytes_.toString(), upstream_request.toString());\n\n  ASSERT_TRUE(fake_upstream_connection->close());\n\n  tcp_client->waitForDisconnect();\n\n  EXPECT_THAT(tcp_client->data(), HasSubstr(\"connection failure\"));\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_call\");\n  EXPECT_EQ(1U, counter->value());\n  counter = test_server_->counter(\"thrift.thrift_stats.response_exception\");\n  EXPECT_EQ(1U, counter->value());\n}\n\nTEST_P(ThriftConnManagerIntegrationTest, Oneway) {\n  initializeOneway();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(request_bytes_.toString()));\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(true);\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data);\n  EXPECT_TRUE(TestUtility::buffersEqual(upstream_request, request_bytes_));\n  EXPECT_EQ(request_bytes_.toString(), upstream_request.toString());\n\n  tcp_client->close();\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_oneway\");\n  EXPECT_EQ(1U, counter->value());\n}\n\nTEST_P(ThriftConnManagerIntegrationTest, OnewayEarlyClose) {\n  initializeOneway();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(request_bytes_.toString()));\n  tcp_client->close();\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(true);\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data);\n  EXPECT_EQ(request_bytes_.toString(), upstream_request.toString());\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_oneway\");\n  EXPECT_EQ(1U, counter->value());\n}\n\nTEST_P(ThriftConnManagerIntegrationTest, OnewayEarlyClosePartialRequest) {\n  initializeOneway();\n\n  const std::string partial_request =\n      request_bytes_.toString().substr(0, request_bytes_.length() - 1);\n\n  FakeUpstream* expected_upstream = getExpectedUpstream(true);\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(partial_request));\n  tcp_client->close();\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n\n  test_server_->waitForCounterGe(\"thrift.thrift_stats.cx_destroy_remote_with_active_rq\", 1);\n\n  Stats::CounterSharedPtr counter =\n      test_server_->counter(\"thrift.thrift_stats.cx_destroy_remote_with_active_rq\");\n  EXPECT_EQ(1U, counter->value());\n}\n\nclass ThriftTwitterConnManagerIntegrationTest : public ThriftConnManagerIntegrationTest {};\n\nINSTANTIATE_TEST_SUITE_P(FramedTwitter, ThriftTwitterConnManagerIntegrationTest,\n                         Combine(Values(TransportType::Framed), Values(ProtocolType::Twitter),\n                                 Values(false, true)),\n                         paramToString);\n\n// Because of the protocol upgrade requests and the difficulty of separating them, we test this\n// protocol independently.\nTEST_P(ThriftTwitterConnManagerIntegrationTest, Success) {\n  initializeCall(DriverMode::Success);\n\n  uint32_t upgrade_request_size = request_bytes_.peekBEInt<uint32_t>() + 4;\n  Buffer::OwnedImpl upgrade_request_bytes;\n  upgrade_request_bytes.move(request_bytes_, upgrade_request_size);\n\n  uint32_t upgrade_response_size = response_bytes_.peekBEInt<uint32_t>() + 4;\n  Buffer::OwnedImpl upgrade_response_bytes;\n  upgrade_response_bytes.move(response_bytes_, upgrade_response_size);\n\n  // Upgrade request/response happens without an upstream.\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(upgrade_request_bytes.toString()));\n  tcp_client->waitForData(upgrade_response_bytes.toString());\n  EXPECT_TRUE(\n      TestUtility::buffersEqual(Buffer::OwnedImpl(tcp_client->data()), upgrade_response_bytes));\n\n  // First real request triggers upstream connection.\n  ASSERT_TRUE(tcp_client->write(request_bytes_.toString()));\n  FakeRawConnectionPtr fake_upstream_connection;\n  FakeUpstream* expected_upstream = getExpectedUpstream(false);\n  ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection));\n\n  // Check that upstream receives the upgrade request\n  std::string upgrade_data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(upgrade_request_size, &upgrade_data));\n  Buffer::OwnedImpl upstream_upgrade_request(upgrade_data);\n  EXPECT_EQ(upgrade_request_bytes.toString(), upstream_upgrade_request.toString());\n\n  // Respond with successful upgrade reply.\n  ASSERT_TRUE(fake_upstream_connection->write(upgrade_response_bytes.toString()));\n\n  // Check that upstream receives the real request.\n  // TODO(zuercher): fix FakeRawConnection to allow data to be reset so we don't have to account\n  // for the upgrade message that we already checked.\n  std::string data;\n  ASSERT_TRUE(\n      fake_upstream_connection->waitForData(upgrade_request_size + request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data.substr(upgrade_request_size));\n  EXPECT_EQ(request_bytes_.toString(), upstream_request.toString());\n\n  // Respond to request.\n  ASSERT_TRUE(fake_upstream_connection->write(response_bytes_.toString()));\n\n  // TODO(zuercher): likewise fix IntegrationTcpClient to allow data to be reset so we don't have\n  // to account for the upgrade response we already checked.\n  tcp_client->waitForData(response_bytes_.toString(), false);\n  tcp_client->close();\n\n  EXPECT_TRUE(TestUtility::buffersEqual(\n      Buffer::OwnedImpl(tcp_client->data().substr(upgrade_response_size)), response_bytes_));\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_call\");\n  EXPECT_EQ(2U, counter->value());\n  counter = test_server_->counter(\"thrift.thrift_stats.response_success\");\n  EXPECT_EQ(2U, counter->value());\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/metadata_test.cc",
    "content": "#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nTEST(MessageMetadataTest, Fields) {\n  MessageMetadata metadata;\n\n  EXPECT_FALSE(metadata.hasFrameSize());\n  EXPECT_THROW(metadata.frameSize(), absl::bad_optional_access);\n  metadata.setFrameSize(100);\n  EXPECT_TRUE(metadata.hasFrameSize());\n  EXPECT_EQ(100, metadata.frameSize());\n\n  EXPECT_FALSE(metadata.hasProtocol());\n  EXPECT_THROW(metadata.protocol(), absl::bad_optional_access);\n  metadata.setProtocol(ProtocolType::Binary);\n  EXPECT_TRUE(metadata.hasProtocol());\n  EXPECT_EQ(ProtocolType::Binary, metadata.protocol());\n\n  EXPECT_FALSE(metadata.hasMethodName());\n  EXPECT_THROW(metadata.methodName(), absl::bad_optional_access);\n  metadata.setMethodName(\"method\");\n  EXPECT_TRUE(metadata.hasMethodName());\n  EXPECT_EQ(\"method\", metadata.methodName());\n\n  EXPECT_FALSE(metadata.hasMessageType());\n  EXPECT_THROW(metadata.messageType(), absl::bad_optional_access);\n  metadata.setMessageType(MessageType::Call);\n  EXPECT_TRUE(metadata.hasMessageType());\n  EXPECT_EQ(MessageType::Call, metadata.messageType());\n\n  EXPECT_FALSE(metadata.hasSequenceId());\n  EXPECT_THROW(metadata.sequenceId(), absl::bad_optional_access);\n  metadata.setSequenceId(101);\n  EXPECT_TRUE(metadata.hasSequenceId());\n  EXPECT_EQ(101, metadata.sequenceId());\n\n  EXPECT_FALSE(metadata.hasAppException());\n  EXPECT_THROW(metadata.appExceptionType(), absl::bad_optional_access);\n  EXPECT_THROW(metadata.appExceptionMessage(), absl::bad_optional_access);\n  metadata.setAppException(AppExceptionType::InternalError, \"oops\");\n  EXPECT_TRUE(metadata.hasAppException());\n  EXPECT_EQ(AppExceptionType::InternalError, metadata.appExceptionType());\n  EXPECT_EQ(\"oops\", metadata.appExceptionMessage());\n}\n\nTEST(MessageMetadataTest, Headers) {\n  MessageMetadata metadata;\n\n  EXPECT_EQ(metadata.headers().size(), 0);\n  metadata.headers().addCopy(Http::LowerCaseString(\"k\"), \"v\");\n  EXPECT_EQ(metadata.headers().size(), 1);\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/mocks.cc",
    "content": "#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n\n#include <memory>\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\n\n// Provide a specialization for ProtobufWkt::Struct (for MockFilterConfigFactory)\ntemplate <>\nvoid MessageUtil::validate(const ProtobufWkt::Struct&, ProtobufMessage::ValidationVisitor&) {}\n\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nMockConfig::MockConfig() = default;\nMockConfig::~MockConfig() = default;\n\nMockTransport::MockTransport() {\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, type()).WillByDefault(Return(type_));\n}\nMockTransport::~MockTransport() = default;\n\nMockProtocol::MockProtocol() {\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, type()).WillByDefault(Return(type_));\n  ON_CALL(*this, setType(_)).WillByDefault(Invoke([&](ProtocolType type) -> void {\n    type_ = type;\n  }));\n  ON_CALL(*this, supportsUpgrade()).WillByDefault(Return(false));\n}\nMockProtocol::~MockProtocol() = default;\n\nMockDecoderCallbacks::MockDecoderCallbacks() = default;\nMockDecoderCallbacks::~MockDecoderCallbacks() = default;\n\nMockDecoderEventHandler::MockDecoderEventHandler() = default;\nMockDecoderEventHandler::~MockDecoderEventHandler() = default;\n\nMockDirectResponse::MockDirectResponse() = default;\nMockDirectResponse::~MockDirectResponse() = default;\n\nMockThriftObject::MockThriftObject() = default;\nMockThriftObject::~MockThriftObject() = default;\n\nnamespace ThriftFilters {\n\nMockFilterChainFactoryCallbacks::MockFilterChainFactoryCallbacks() = default;\nMockFilterChainFactoryCallbacks::~MockFilterChainFactoryCallbacks() = default;\n\nMockDecoderFilter::MockDecoderFilter() {\n  ON_CALL(*this, transportBegin(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, transportEnd()).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, messageBegin(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, messageEnd()).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, structBegin(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, structEnd()).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, fieldBegin(_, _, _)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, fieldEnd()).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, boolValue(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, byteValue(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, int16Value(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, int32Value(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, int64Value(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, doubleValue(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, stringValue(_)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, mapBegin(_, _, _)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, mapEnd()).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, listBegin(_, _)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, listEnd()).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, setBegin(_, _)).WillByDefault(Return(FilterStatus::Continue));\n  ON_CALL(*this, setEnd()).WillByDefault(Return(FilterStatus::Continue));\n}\nMockDecoderFilter::~MockDecoderFilter() = default;\n\nMockDecoderFilterCallbacks::MockDecoderFilterCallbacks() {\n  route_ = std::make_shared<NiceMock<Router::MockRoute>>();\n\n  ON_CALL(*this, streamId()).WillByDefault(Return(stream_id_));\n  ON_CALL(*this, connection()).WillByDefault(Return(&connection_));\n  ON_CALL(*this, route()).WillByDefault(Return(route_));\n  ON_CALL(*this, streamInfo()).WillByDefault(ReturnRef(stream_info_));\n}\nMockDecoderFilterCallbacks::~MockDecoderFilterCallbacks() = default;\n\nMockFilterConfigFactory::MockFilterConfigFactory() : name_(\"envoy.filters.thrift.mock_filter\") {\n  mock_filter_ = std::make_shared<NiceMock<MockDecoderFilter>>();\n}\n\nMockFilterConfigFactory::~MockFilterConfigFactory() = default;\n\nFilterFactoryCb MockFilterConfigFactory::createFilterFactoryFromProto(\n    const Protobuf::Message& proto_config, const std::string& stats_prefix,\n    Server::Configuration::FactoryContext& context) {\n  UNREFERENCED_PARAMETER(context);\n\n  config_struct_ = dynamic_cast<const ProtobufWkt::Struct&>(proto_config);\n  config_stat_prefix_ = stats_prefix;\n\n  return [this](FilterChainFactoryCallbacks& callbacks) -> void {\n    callbacks.addDecoderFilter(mock_filter_);\n  };\n}\n\n} // namespace ThriftFilters\n\nnamespace Router {\n\nMockRateLimitPolicyEntry::MockRateLimitPolicyEntry() {\n  ON_CALL(*this, disableKey()).WillByDefault(ReturnRef(disable_key_));\n}\nMockRateLimitPolicyEntry::~MockRateLimitPolicyEntry() = default;\n\nMockRateLimitPolicy::MockRateLimitPolicy() {\n  ON_CALL(*this, empty()).WillByDefault(Return(true));\n  ON_CALL(*this, getApplicableRateLimit(_)).WillByDefault(ReturnRef(rate_limit_policy_entry_));\n}\nMockRateLimitPolicy::~MockRateLimitPolicy() = default;\n\nMockRouteEntry::MockRouteEntry() {\n  ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_));\n  ON_CALL(*this, rateLimitPolicy()).WillByDefault(ReturnRef(rate_limit_policy_));\n  ON_CALL(*this, clusterHeader()).WillByDefault(ReturnRef(cluster_header_));\n}\nMockRouteEntry::~MockRouteEntry() = default;\n\nMockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); }\nMockRoute::~MockRoute() = default;\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/router/router.h\"\n\n#include \"extensions/filters/network/thrift_proxy/conn_manager.h\"\n#include \"extensions/filters/network/thrift_proxy/conn_state.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/factory_base.h\"\n#include \"extensions/filters/network/thrift_proxy/filters/filter.h\"\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/protocol.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_ratelimit.h\"\n#include \"extensions/filters/network/thrift_proxy/transport.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass MockConfig : public Config {\npublic:\n  MockConfig();\n  ~MockConfig() override;\n\n  // ThriftProxy::Config\n  MOCK_METHOD(ThriftFilters::FilterChainFactory&, filterFactory, ());\n  MOCK_METHOD(ThriftFilterStats&, stats, ());\n  MOCK_METHOD(DecoderPtr, createDecoder, (DecoderCallbacks&));\n  MOCK_METHOD(Router::Config&, routerConfig, ());\n};\n\nclass MockTransport : public Transport {\npublic:\n  MockTransport();\n  ~MockTransport() override;\n\n  // ThriftProxy::Transport\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(TransportType, type, (), (const));\n  MOCK_METHOD(bool, decodeFrameStart, (Buffer::Instance&, MessageMetadata&));\n  MOCK_METHOD(bool, decodeFrameEnd, (Buffer::Instance&));\n  MOCK_METHOD(void, encodeFrame, (Buffer::Instance&, const MessageMetadata&, Buffer::Instance&));\n\n  std::string name_{\"mock\"};\n  TransportType type_{TransportType::Auto};\n};\n\nclass MockProtocol : public Protocol {\npublic:\n  MockProtocol();\n  ~MockProtocol() override;\n\n  // ThriftProxy::Protocol\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(ProtocolType, type, (), (const));\n  MOCK_METHOD(void, setType, (ProtocolType));\n  MOCK_METHOD(bool, readMessageBegin, (Buffer::Instance & buffer, MessageMetadata& metadata));\n  MOCK_METHOD(bool, readMessageEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(bool, readStructBegin, (Buffer::Instance & buffer, std::string& name));\n  MOCK_METHOD(bool, readStructEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(bool, readFieldBegin,\n              (Buffer::Instance & buffer, std::string& name, FieldType& field_type,\n               int16_t& field_id));\n  MOCK_METHOD(bool, readFieldEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(bool, readMapBegin,\n              (Buffer::Instance & buffer, FieldType& key_type, FieldType& value_type,\n               uint32_t& size));\n  MOCK_METHOD(bool, readMapEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(bool, readListBegin,\n              (Buffer::Instance & buffer, FieldType& elem_type, uint32_t& size));\n  MOCK_METHOD(bool, readListEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(bool, readSetBegin,\n              (Buffer::Instance & buffer, FieldType& elem_type, uint32_t& size));\n  MOCK_METHOD(bool, readSetEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(bool, readBool, (Buffer::Instance & buffer, bool& value));\n  MOCK_METHOD(bool, readByte, (Buffer::Instance & buffer, uint8_t& value));\n  MOCK_METHOD(bool, readInt16, (Buffer::Instance & buffer, int16_t& value));\n  MOCK_METHOD(bool, readInt32, (Buffer::Instance & buffer, int32_t& value));\n  MOCK_METHOD(bool, readInt64, (Buffer::Instance & buffer, int64_t& value));\n  MOCK_METHOD(bool, readDouble, (Buffer::Instance & buffer, double& value));\n  MOCK_METHOD(bool, readString, (Buffer::Instance & buffer, std::string& value));\n  MOCK_METHOD(bool, readBinary, (Buffer::Instance & buffer, std::string& value));\n\n  MOCK_METHOD(void, writeMessageBegin,\n              (Buffer::Instance & buffer, const MessageMetadata& metadata));\n  MOCK_METHOD(void, writeMessageEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(void, writeStructBegin, (Buffer::Instance & buffer, const std::string& name));\n  MOCK_METHOD(void, writeStructEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(void, writeFieldBegin,\n              (Buffer::Instance & buffer, const std::string& name, FieldType field_type,\n               int16_t field_id));\n  MOCK_METHOD(void, writeFieldEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(void, writeMapBegin,\n              (Buffer::Instance & buffer, FieldType key_type, FieldType value_type, uint32_t size));\n  MOCK_METHOD(void, writeMapEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(void, writeListBegin,\n              (Buffer::Instance & buffer, FieldType elem_type, uint32_t size));\n  MOCK_METHOD(void, writeListEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(void, writeSetBegin, (Buffer::Instance & buffer, FieldType elem_type, uint32_t size));\n  MOCK_METHOD(void, writeSetEnd, (Buffer::Instance & buffer));\n  MOCK_METHOD(void, writeBool, (Buffer::Instance & buffer, bool value));\n  MOCK_METHOD(void, writeByte, (Buffer::Instance & buffer, uint8_t value));\n  MOCK_METHOD(void, writeInt16, (Buffer::Instance & buffer, int16_t value));\n  MOCK_METHOD(void, writeInt32, (Buffer::Instance & buffer, int32_t value));\n  MOCK_METHOD(void, writeInt64, (Buffer::Instance & buffer, int64_t value));\n  MOCK_METHOD(void, writeDouble, (Buffer::Instance & buffer, double value));\n  MOCK_METHOD(void, writeString, (Buffer::Instance & buffer, const std::string& value));\n  MOCK_METHOD(void, writeBinary, (Buffer::Instance & buffer, const std::string& value));\n  MOCK_METHOD(bool, supportsUpgrade, ());\n  MOCK_METHOD(DecoderEventHandlerSharedPtr, upgradeRequestDecoder, ());\n  MOCK_METHOD(DirectResponsePtr, upgradeResponse, (const DecoderEventHandler&));\n  MOCK_METHOD(ThriftObjectPtr, attemptUpgrade,\n              (Transport&, ThriftConnectionState&, Buffer::Instance&));\n  MOCK_METHOD(void, completeUpgrade, (ThriftConnectionState&, ThriftObject&));\n\n  std::string name_{\"mock\"};\n  ProtocolType type_{ProtocolType::Auto};\n};\n\nclass MockDecoderCallbacks : public DecoderCallbacks {\npublic:\n  MockDecoderCallbacks();\n  ~MockDecoderCallbacks() override;\n\n  // ThriftProxy::DecoderCallbacks\n  MOCK_METHOD(DecoderEventHandler&, newDecoderEventHandler, ());\n};\n\nclass MockDecoderEventHandler : public DecoderEventHandler {\npublic:\n  MockDecoderEventHandler();\n  ~MockDecoderEventHandler() override;\n\n  // ThriftProxy::DecoderEventHandler\n  MOCK_METHOD(FilterStatus, transportBegin, (MessageMetadataSharedPtr metadata));\n  MOCK_METHOD(FilterStatus, transportEnd, ());\n  MOCK_METHOD(FilterStatus, messageBegin, (MessageMetadataSharedPtr metadata));\n  MOCK_METHOD(FilterStatus, messageEnd, ());\n  MOCK_METHOD(FilterStatus, structBegin, (const absl::string_view name));\n  MOCK_METHOD(FilterStatus, structEnd, ());\n  MOCK_METHOD(FilterStatus, fieldBegin,\n              (const absl::string_view name, FieldType& msg_type, int16_t& field_id));\n  MOCK_METHOD(FilterStatus, fieldEnd, ());\n  MOCK_METHOD(FilterStatus, boolValue, (bool& value));\n  MOCK_METHOD(FilterStatus, byteValue, (uint8_t & value));\n  MOCK_METHOD(FilterStatus, int16Value, (int16_t & value));\n  MOCK_METHOD(FilterStatus, int32Value, (int32_t & value));\n  MOCK_METHOD(FilterStatus, int64Value, (int64_t & value));\n  MOCK_METHOD(FilterStatus, doubleValue, (double& value));\n  MOCK_METHOD(FilterStatus, stringValue, (absl::string_view value));\n  MOCK_METHOD(FilterStatus, mapBegin,\n              (FieldType & key_type, FieldType& value_type, uint32_t& size));\n  MOCK_METHOD(FilterStatus, mapEnd, ());\n  MOCK_METHOD(FilterStatus, listBegin, (FieldType & elem_type, uint32_t& size));\n  MOCK_METHOD(FilterStatus, listEnd, ());\n  MOCK_METHOD(FilterStatus, setBegin, (FieldType & elem_type, uint32_t& size));\n  MOCK_METHOD(FilterStatus, setEnd, ());\n};\n\nclass MockDirectResponse : public DirectResponse {\npublic:\n  MockDirectResponse();\n  ~MockDirectResponse() override;\n\n  // ThriftProxy::DirectResponse\n  MOCK_METHOD(DirectResponse::ResponseType, encode,\n              (MessageMetadata&, Protocol&, Buffer::Instance&), (const));\n};\n\nclass MockThriftObject : public ThriftObject {\npublic:\n  MockThriftObject();\n  ~MockThriftObject() override;\n\n  MOCK_METHOD(ThriftFieldPtrList&, fields, (), (const));\n  MOCK_METHOD(bool, onData, (Buffer::Instance&));\n};\n\nnamespace Router {\nclass MockRoute;\n} // namespace Router\n\nnamespace ThriftFilters {\n\nclass MockFilterChainFactoryCallbacks : public FilterChainFactoryCallbacks {\npublic:\n  MockFilterChainFactoryCallbacks();\n  ~MockFilterChainFactoryCallbacks() override;\n\n  MOCK_METHOD(void, addDecoderFilter, (DecoderFilterSharedPtr));\n};\n\nclass MockDecoderFilter : public DecoderFilter {\npublic:\n  MockDecoderFilter();\n  ~MockDecoderFilter() override;\n\n  // ThriftProxy::ThriftFilters::DecoderFilter\n  MOCK_METHOD(void, onDestroy, ());\n  MOCK_METHOD(void, setDecoderFilterCallbacks, (DecoderFilterCallbacks & callbacks));\n  MOCK_METHOD(void, resetUpstreamConnection, ());\n\n  // ThriftProxy::DecoderEventHandler\n  MOCK_METHOD(FilterStatus, transportBegin, (MessageMetadataSharedPtr metadata));\n  MOCK_METHOD(FilterStatus, transportEnd, ());\n  MOCK_METHOD(FilterStatus, messageBegin, (MessageMetadataSharedPtr metadata));\n  MOCK_METHOD(FilterStatus, messageEnd, ());\n  MOCK_METHOD(FilterStatus, structBegin, (absl::string_view name));\n  MOCK_METHOD(FilterStatus, structEnd, ());\n  MOCK_METHOD(FilterStatus, fieldBegin,\n              (absl::string_view name, FieldType& msg_type, int16_t& field_id));\n  MOCK_METHOD(FilterStatus, fieldEnd, ());\n  MOCK_METHOD(FilterStatus, boolValue, (bool& value));\n  MOCK_METHOD(FilterStatus, byteValue, (uint8_t & value));\n  MOCK_METHOD(FilterStatus, int16Value, (int16_t & value));\n  MOCK_METHOD(FilterStatus, int32Value, (int32_t & value));\n  MOCK_METHOD(FilterStatus, int64Value, (int64_t & value));\n  MOCK_METHOD(FilterStatus, doubleValue, (double& value));\n  MOCK_METHOD(FilterStatus, stringValue, (absl::string_view value));\n  MOCK_METHOD(FilterStatus, mapBegin,\n              (FieldType & key_type, FieldType& value_type, uint32_t& size));\n  MOCK_METHOD(FilterStatus, mapEnd, ());\n  MOCK_METHOD(FilterStatus, listBegin, (FieldType & elem_type, uint32_t& size));\n  MOCK_METHOD(FilterStatus, listEnd, ());\n  MOCK_METHOD(FilterStatus, setBegin, (FieldType & elem_type, uint32_t& size));\n  MOCK_METHOD(FilterStatus, setEnd, ());\n};\n\nclass MockDecoderFilterCallbacks : public DecoderFilterCallbacks {\npublic:\n  MockDecoderFilterCallbacks();\n  ~MockDecoderFilterCallbacks() override;\n\n  // ThriftProxy::ThriftFilters::DecoderFilterCallbacks\n  MOCK_METHOD(uint64_t, streamId, (), (const));\n  MOCK_METHOD(const Network::Connection*, connection, (), (const));\n  MOCK_METHOD(void, continueDecoding, ());\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, ());\n  MOCK_METHOD(TransportType, downstreamTransportType, (), (const));\n  MOCK_METHOD(ProtocolType, downstreamProtocolType, (), (const));\n  MOCK_METHOD(void, sendLocalReply, (const DirectResponse&, bool));\n  MOCK_METHOD(void, startUpstreamResponse, (Transport&, Protocol&));\n  MOCK_METHOD(ResponseStatus, upstreamData, (Buffer::Instance&));\n  MOCK_METHOD(void, resetDownstreamConnection, ());\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n\n  uint64_t stream_id_{1};\n  NiceMock<Network::MockConnection> connection_;\n  NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n  std::shared_ptr<Router::MockRoute> route_;\n};\n\nclass MockFilterConfigFactory : public NamedThriftFilterConfigFactory {\npublic:\n  MockFilterConfigFactory();\n  ~MockFilterConfigFactory() override;\n\n  FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& proto_config,\n                               const std::string& stats_prefix,\n                               Server::Configuration::FactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ProtobufWkt::Struct>();\n  }\n\n  std::string name() const override { return name_; }\n\n  ProtobufWkt::Struct config_struct_;\n  std::string config_stat_prefix_;\n\nprivate:\n  std::shared_ptr<MockDecoderFilter> mock_filter_;\n  const std::string name_;\n};\n\n} // namespace ThriftFilters\n\nnamespace Router {\n\nclass MockRateLimitPolicyEntry : public RateLimitPolicyEntry {\npublic:\n  MockRateLimitPolicyEntry();\n  ~MockRateLimitPolicyEntry() override;\n\n  MOCK_METHOD(uint32_t, stage, (), (const));\n  MOCK_METHOD(const std::string&, disableKey, (), (const));\n  MOCK_METHOD(void, populateDescriptors,\n              (const RouteEntry&, std::vector<RateLimit::Descriptor>&, const std::string&,\n               const MessageMetadata&, const Network::Address::Instance&),\n              (const));\n\n  std::string disable_key_;\n};\n\nclass MockRateLimitPolicy : public RateLimitPolicy {\npublic:\n  MockRateLimitPolicy();\n  ~MockRateLimitPolicy() override;\n\n  MOCK_METHOD(bool, empty, (), (const));\n  MOCK_METHOD(const std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>&,\n              getApplicableRateLimit, (uint32_t), (const));\n\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limit_policy_entry_;\n};\n\nclass MockRouteEntry : public RouteEntry {\npublic:\n  MockRouteEntry();\n  ~MockRouteEntry() override;\n\n  // ThriftProxy::Router::RouteEntry\n  MOCK_METHOD(const std::string&, clusterName, (), (const));\n  MOCK_METHOD(const Envoy::Router::MetadataMatchCriteria*, metadataMatchCriteria, (), (const));\n  MOCK_METHOD(const Envoy::Router::TlsContextMatchCriteria*, tlsContextMatchCriteria, (), (const));\n  MOCK_METHOD(RateLimitPolicy&, rateLimitPolicy, (), (const));\n  MOCK_METHOD(bool, stripServiceName, (), (const));\n  MOCK_METHOD(const Http::LowerCaseString&, clusterHeader, (), (const));\n\n  std::string cluster_name_{\"fake_cluster\"};\n  Http::LowerCaseString cluster_header_{\"\"};\n  NiceMock<MockRateLimitPolicy> rate_limit_policy_;\n};\n\nclass MockRoute : public Route {\npublic:\n  MockRoute();\n  ~MockRoute() override;\n\n  // ThriftProxy::Router::Route\n  MOCK_METHOD(const RouteEntry*, routeEntry, (), (const));\n\n  NiceMock<MockRouteEntry> route_entry_;\n};\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/requirements.txt",
    "content": "thrift==0.13.0 \\\n    --hash=sha256:9af1c86bf73433afc6010ed376a6c6aca2b54099cc0d61895f640870a9ae7d89\nsix==1.15.0 \\\n    --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \\\n    --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/route_matcher_test.cc",
    "content": "#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.h\"\n#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.validate.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/route.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/route.pb.validate.h\"\n\n#include \"common/config/metadata.h\"\n\n#include \"extensions/filters/network/thrift_proxy/router/config.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\nnamespace {\n\nenvoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration\nparseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) {\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration route_config;\n  TestUtility::loadFromYaml(yaml, route_config, false, avoid_boosting);\n  TestUtility::validate(route_config);\n  return route_config;\n}\n\nTEST(ThriftRouteMatcherTest, RouteByMethodNameWithNoInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      cluster: \"cluster1\"\n  - match:\n      method_name: \"method2\"\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n  metadata.setMethodName(\"unknown\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n  metadata.setMethodName(\"METHOD1\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  metadata.setMethodName(\"method1\");\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"method2\");\n  RouteConstSharedPtr route2 = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route2);\n  EXPECT_EQ(\"cluster2\", route2->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteByMethodNameWithInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      cluster: \"cluster1\"\n  - match:\n      method_name: \"method2\"\n      invert: true\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster2\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"unknown\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster2\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"METHOD1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster2\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"method2\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n}\n\nTEST(ThriftRouteMatcherTest, RouteByAnyMethodNameWithNoInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      cluster: \"cluster1\"\n  - match:\n      method_name: \"\"\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n\n  {\n    MessageMetadata metadata;\n    metadata.setMethodName(\"method1\");\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n\n    metadata.setMethodName(\"anything\");\n    RouteConstSharedPtr route2 = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route2);\n    EXPECT_EQ(\"cluster2\", route2->routeEntry()->clusterName());\n  }\n\n  {\n    MessageMetadata metadata;\n    RouteConstSharedPtr route2 = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route2);\n    EXPECT_EQ(\"cluster2\", route2->routeEntry()->clusterName());\n  }\n}\n\nTEST(ThriftRouteMatcherTest, RouteByAnyMethodNameWithInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"\"\n      invert: true\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  EXPECT_THROW(new RouteMatcher(config), EnvoyException);\n}\n\nTEST(ThriftRouteMatcherTest, RouteByServiceNameWithNoInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      cluster: \"cluster1\"\n  - match:\n      service_name: \"service2\"\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n  metadata.setMethodName(\"unknown\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n  metadata.setMethodName(\"METHOD1\");\n  EXPECT_EQ(nullptr, matcher.route(metadata, 0));\n\n  metadata.setMethodName(\"service2:method1\");\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster2\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"service2:method2\");\n  RouteConstSharedPtr route2 = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route2);\n  EXPECT_EQ(\"cluster2\", route2->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteByServiceNameWithInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      cluster: \"cluster1\"\n  - match:\n      service_name: \"service2\"\n      invert: true\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster2\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"unknown\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster2\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"METHOD1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster2\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n\n  metadata.setMethodName(\"service2:method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n}\n\nTEST(ThriftRouteMatcherTest, RouteByAnyServiceNameWithNoInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      cluster: \"cluster1\"\n  - match:\n      service_name: \"\"\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n\n  {\n    MessageMetadata metadata;\n    metadata.setMethodName(\"method1\");\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n\n    metadata.setMethodName(\"anything\");\n    RouteConstSharedPtr route2 = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route2);\n    EXPECT_EQ(\"cluster2\", route2->routeEntry()->clusterName());\n  }\n\n  {\n    MessageMetadata metadata;\n    RouteConstSharedPtr route2 = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route2);\n    EXPECT_EQ(\"cluster2\", route2->routeEntry()->clusterName());\n  }\n}\n\nTEST(ThriftRouteMatcherTest, RouteByAnyServiceNameWithInversion) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      service_name: \"\"\n      invert: true\n    route:\n      cluster: \"cluster2\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  EXPECT_THROW(new RouteMatcher(config), EnvoyException);\n}\n\nTEST(ThriftRouteMatcherTest, RouteByExactHeaderMatcher) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n      headers:\n      - name: \"x-header-1\"\n        exact_match: \"x-value-1\"\n    route:\n      cluster: \"cluster1\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-header-1\"), \"x-value-1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteByRegexHeaderMatcher) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n      headers:\n      - name: \"x-version\"\n        safe_regex_match:\n          google_re2: {}\n          regex: \"0.[5-9]\"\n    route:\n      cluster: \"cluster1\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-version\"), \"0.1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n  metadata.headers().remove(Http::LowerCaseString(\"x-version\"));\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-version\"), \"0.8\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteByRangeHeaderMatcher) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n      headers:\n      - name: \"x-user-id\"\n        range_match:\n          start: 100\n          end: 200\n    route:\n      cluster: \"cluster1\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-user-id\"), \"50\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n  metadata.headers().remove(Http::LowerCaseString(\"x-user-id\"));\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-user-id\"), \"199\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteByPresentHeaderMatcher) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n      headers:\n      - name: \"x-user-id\"\n        present_match: true\n    route:\n      cluster: \"cluster1\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-user-id\"), \"50\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n  metadata.headers().remove(Http::LowerCaseString(\"x-user-id\"));\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-user-id\"), \"\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteByPrefixHeaderMatcher) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n      headers:\n      - name: \"x-header-1\"\n        prefix_match: \"user_id:\"\n    route:\n      cluster: \"cluster1\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-header-1\"), \"500\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n  metadata.headers().remove(Http::LowerCaseString(\"x-header-1\"));\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-header-1\"), \"user_id:500\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteBySuffixHeaderMatcher) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n      headers:\n      - name: \"x-header-1\"\n        suffix_match: \"asdf\"\n    route:\n      cluster: \"cluster1\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-header-1\"), \"asdfvalue\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n  metadata.headers().remove(Http::LowerCaseString(\"x-header-1\"));\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-header-1\"), \"valueasdfvalue\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n  metadata.headers().remove(Http::LowerCaseString(\"x-header-1\"));\n\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-header-1\"), \"value:asdf\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, RouteByClusterHeader) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"\"\n    route:\n      cluster_header: \"x-cluster\"\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  RouteConstSharedPtr route;\n\n  // No method nor header.\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  // Method, but no header.\n  metadata.setMethodName(\"method1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  // The wrong header is present.\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-something\"), \"cluster1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_EQ(nullptr, route);\n\n  // Header is present.\n  metadata.headers().addCopy(Http::LowerCaseString(\"x-cluster\"), \"cluster1\");\n  route = matcher.route(metadata, 0);\n  EXPECT_NE(nullptr, route);\n  EXPECT_EQ(\"cluster1\", route->routeEntry()->clusterName());\n}\n\nTEST(ThriftRouteMatcherTest, WeightedClusters) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 30\n          - name: cluster2\n            weight: 30\n          - name: cluster3\n            weight: 40\n  - match:\n      method_name: \"method2\"\n    route:\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 2000\n          - name: cluster2\n            weight: 3000\n          - name: cluster3\n            weight: 5000\n)EOF\";\n\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n\n  {\n    metadata.setMethodName(\"method1\");\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 29)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 30)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 59)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 60)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 99)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 100)->routeEntry()->clusterName());\n  }\n\n  {\n    metadata.setMethodName(\"method2\");\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 0)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 1999)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 2000)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster2\", matcher.route(metadata, 4999)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 5000)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster3\", matcher.route(metadata, 9999)->routeEntry()->clusterName());\n    EXPECT_EQ(\"cluster1\", matcher.route(metadata, 10000)->routeEntry()->clusterName());\n  }\n}\n\nTEST(ThriftRouteMatcherTest, WeightedClusterMissingWeight) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method2\"\n    route:\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 20000\n          - name: cluster2\n          - name: cluster3\n            weight: 5000\n)EOF\";\n\n  const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n  EXPECT_THROW(RouteMatcher m(config), EnvoyException);\n}\n\nTEST(ThriftRouteMatcherTest, RouteActionMetadataMatch) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      cluster: cluster1\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k1: v1\n            k2: v2\n  - match:\n      method_name: \"method2\"\n    route:\n      cluster: cluster2\n)EOF\";\n\n  const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n\n  // match with metadata\n  {\n    metadata.setMethodName(\"method1\");\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n\n    const Envoy::Router::MetadataMatchCriteria* criteria =\n        route->routeEntry()->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n    const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n        criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, mmc.size());\n\n    ProtobufWkt::Value v1, v2;\n    v1.set_string_value(\"v1\");\n    v2.set_string_value(\"v2\");\n    HashedValue hv1(v1), hv2(v2);\n\n    EXPECT_EQ(\"k1\", mmc[0]->name());\n    EXPECT_EQ(hv1, mmc[0]->value());\n\n    EXPECT_EQ(\"k2\", mmc[1]->name());\n    EXPECT_EQ(hv2, mmc[1]->value());\n  }\n\n  // match with no metadata\n  {\n    metadata.setMethodName(\"method2\");\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n    EXPECT_EQ(nullptr, route->routeEntry()->metadataMatchCriteria());\n  }\n}\n\nTEST(ThriftRouteMatcherTest, WeightedClusterMetadataMatch) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 2000\n            metadata_match:\n              filter_metadata:\n                envoy.lb:\n                  k1: v1\n                  k2: v2\n          - name: cluster2\n            weight: 3000\n            metadata_match:\n              filter_metadata:\n                not.envoy.lb:\n                  k1: v1\n                  k2: v2\n          - name: cluster3\n            weight: 5000\n            metadata_match:\n              filter_metadata:\n                envoy.lb:\n                  k3: v3\n)EOF\";\n\n  const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  metadata.setMethodName(\"method1\");\n  ProtobufWkt::Value v1, v2, v3;\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  v3.set_string_value(\"v3\");\n  HashedValue hv1(v1), hv2(v2), hv3(v3);\n\n  // match with multiple sets of weighted cluster metadata criteria defined\n  {\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n\n    const Envoy::Router::MetadataMatchCriteria* criteria =\n        route->routeEntry()->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n    const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n        criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, mmc.size());\n\n    EXPECT_EQ(\"k1\", mmc[0]->name());\n    EXPECT_EQ(hv1, mmc[0]->value());\n\n    EXPECT_EQ(\"k2\", mmc[1]->name());\n    EXPECT_EQ(hv2, mmc[1]->value());\n\n    EXPECT_EQ(Http::LowerCaseString{\"\"}, route->routeEntry()->clusterHeader());\n  }\n\n  // match with weighted cluster with different metadata key\n  {\n    RouteConstSharedPtr route = matcher.route(metadata, 2001);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n    EXPECT_EQ(nullptr, route->routeEntry()->metadataMatchCriteria());\n  }\n\n  // weighted cluster match with single metadata entry\n  {\n    RouteConstSharedPtr route = matcher.route(metadata, 5001);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n\n    const Envoy::Router::MetadataMatchCriteria* criteria =\n        route->routeEntry()->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n    const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n        criteria->metadataMatchCriteria();\n    EXPECT_EQ(1, mmc.size());\n\n    EXPECT_EQ(\"k3\", mmc[0]->name());\n    EXPECT_EQ(hv3, mmc[0]->value());\n  }\n}\n\nTEST(ThriftRouteMatcherTest, WeightedClusterRouteActionMetadataMatchMerged) {\n  const std::string yaml = R\"EOF(\nname: config\nroutes:\n  - match:\n      method_name: \"method1\"\n    route:\n      metadata_match:\n        filter_metadata:\n          envoy.lb:\n            k1: v1\n            k2: v2\n      weighted_clusters:\n        clusters:\n          - name: cluster1\n            weight: 2000\n            metadata_match:\n              filter_metadata:\n                envoy.lb:\n                  k3: v3\n          - name: cluster2\n            weight: 3000\n          - name: cluster3\n            weight: 5000\n            metadata_match:\n              filter_metadata:\n                envoy.lb:\n                  k2: v3\n)EOF\";\n\n  const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config =\n      parseRouteConfigurationFromV3Yaml(yaml);\n  RouteMatcher matcher(config);\n  MessageMetadata metadata;\n  metadata.setMethodName(\"method1\");\n  ProtobufWkt::Value v1, v2, v3;\n  v1.set_string_value(\"v1\");\n  v2.set_string_value(\"v2\");\n  v3.set_string_value(\"v3\");\n  HashedValue hv1(v1), hv2(v2), hv3(v3);\n\n  // match with weighted cluster metadata and route action metadata\n  {\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n\n    const Envoy::Router::MetadataMatchCriteria* criteria =\n        route->routeEntry()->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n    const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n        criteria->metadataMatchCriteria();\n    EXPECT_EQ(3, mmc.size());\n\n    EXPECT_EQ(\"k1\", mmc[0]->name());\n    EXPECT_EQ(hv1, mmc[0]->value());\n\n    EXPECT_EQ(\"k2\", mmc[1]->name());\n    EXPECT_EQ(hv2, mmc[1]->value());\n\n    EXPECT_EQ(\"k3\", mmc[2]->name());\n    EXPECT_EQ(hv3, mmc[2]->value());\n  }\n\n  // match with just route action metadata\n  {\n    RouteConstSharedPtr route = matcher.route(metadata, 2001);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n\n    const Envoy::Router::MetadataMatchCriteria* criteria =\n        route->routeEntry()->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n    const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n        criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, mmc.size());\n\n    EXPECT_EQ(\"k1\", mmc[0]->name());\n    EXPECT_EQ(hv1, mmc[0]->value());\n\n    EXPECT_EQ(\"k2\", mmc[1]->name());\n    EXPECT_EQ(hv2, mmc[1]->value());\n  }\n\n  // match with weighted cluster metadata and route action metadata merged\n  {\n    RouteConstSharedPtr route = matcher.route(metadata, 5001);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n\n    const Envoy::Router::MetadataMatchCriteria* criteria =\n        route->routeEntry()->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n    const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n        criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, mmc.size());\n\n    EXPECT_EQ(\"k1\", mmc[0]->name());\n    EXPECT_EQ(hv1, mmc[0]->value());\n\n    EXPECT_EQ(\"k2\", mmc[1]->name());\n    EXPECT_EQ(hv3, mmc[1]->value());\n  }\n}\n\n// Test that the route entry has metadata match criteria when using a cluster header.\nTEST(ThriftRouteMatcherTest, ClusterHeaderMetadataMatch) {\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config;\n  {\n    config.set_name(\"config\");\n    auto* route = config.add_routes();\n    route->mutable_match()->set_method_name(\"method1\");\n    auto* action = route->mutable_route();\n    action->set_cluster_header(\"header_name\");\n    auto* metadata = action->mutable_metadata_match();\n    Envoy::Config::Metadata::mutableMetadataValue(*metadata, \"envoy.lb\", \"k1\")\n        .set_string_value(\"v1\");\n    Envoy::Config::Metadata::mutableMetadataValue(*metadata, \"envoy.lb\", \"k2\")\n        .set_string_value(\"v2\");\n\n    auto* route2 = config.add_routes();\n    route2->mutable_match()->set_method_name(\"method2\");\n    auto* action2 = route2->mutable_route();\n    action2->set_cluster(\"cluster2\");\n  }\n\n  RouteMatcher matcher(config);\n\n  // match with metadata\n  {\n    MessageMetadata metadata;\n    metadata.setMethodName(\"method1\");\n    metadata.headers().addCopy(Http::LowerCaseString{\"header_name\"}, \"cluster1\");\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n\n    EXPECT_EQ(Http::LowerCaseString{\"header_name\"}, route->routeEntry()->clusterHeader());\n\n    const Envoy::Router::MetadataMatchCriteria* criteria =\n        route->routeEntry()->metadataMatchCriteria();\n    EXPECT_NE(nullptr, criteria);\n    const std::vector<Envoy::Router::MetadataMatchCriterionConstSharedPtr>& mmc =\n        criteria->metadataMatchCriteria();\n    EXPECT_EQ(2, mmc.size());\n\n    ProtobufWkt::Value v1, v2;\n    v1.set_string_value(\"v1\");\n    v2.set_string_value(\"v2\");\n    HashedValue hv1(v1), hv2(v2);\n\n    EXPECT_EQ(\"k1\", mmc[0]->name());\n    EXPECT_EQ(hv1, mmc[0]->value());\n\n    EXPECT_EQ(\"k2\", mmc[1]->name());\n    EXPECT_EQ(hv2, mmc[1]->value());\n  }\n\n  // match with no metadata\n  {\n    MessageMetadata metadata;\n    metadata.setMethodName(\"method2\");\n    RouteConstSharedPtr route = matcher.route(metadata, 0);\n    EXPECT_NE(nullptr, route);\n    EXPECT_NE(nullptr, route->routeEntry());\n    EXPECT_EQ(nullptr, route->routeEntry()->metadataMatchCriteria());\n\n    EXPECT_EQ(Http::LowerCaseString{\"\"}, route->routeEntry()->clusterHeader());\n  }\n}\n\n// Tests that weighted cluster route entries can be configured to strip the service name.\nTEST(RouteMatcherTest, WeightedClusterWithStripServiceEnabled) {\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config;\n  {\n    config.set_name(\"config\");\n    auto* route = config.add_routes();\n    route->mutable_match()->set_method_name(\"method1\");\n    auto* action = route->mutable_route();\n    auto* cluster1 = action->mutable_weighted_clusters()->add_clusters();\n    cluster1->set_name(\"cluster1\");\n    cluster1->mutable_weight()->set_value(50);\n    auto* cluster2 = action->mutable_weighted_clusters()->add_clusters();\n    cluster2->set_name(\"cluster2\");\n    cluster2->mutable_weight()->set_value(50);\n    action->set_strip_service_name(true);\n  }\n\n  RouteMatcher matcher(config);\n\n  MessageMetadata metadata;\n  metadata.setMethodName(\"method1\");\n\n  EXPECT_TRUE(matcher.route(metadata, 0)->routeEntry()->stripServiceName());\n}\n\n// Tests that dynamic route entries can be configured to strip the service name.\nTEST(RouteMatcherTest, ClusterHeaderWithStripServiceEnabled) {\n  envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config;\n  {\n    config.set_name(\"config\");\n    auto* route = config.add_routes();\n    route->mutable_match()->set_method_name(\"method1\");\n    auto* action = route->mutable_route();\n    action->set_cluster_header(\"header_name\");\n    action->set_strip_service_name(true);\n  }\n\n  RouteMatcher matcher(config);\n\n  MessageMetadata metadata;\n  metadata.setMethodName(\"method1\");\n  metadata.headers().addCopy(Http::LowerCaseString{\"header_name\"}, \"cluster1\");\n\n  EXPECT_TRUE(matcher.route(metadata, 0)->routeEntry()->stripServiceName());\n}\n\n} // namespace\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.validate.h\"\n#include \"envoy/ratelimit/ratelimit.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/filters/network/thrift_proxy/config.h\"\n#include \"extensions/filters/network/thrift_proxy/metadata.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/mocks/ratelimit/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::ContainerEq;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\nnamespace {\n\nclass ThriftRateLimitConfigurationTest : public testing::Test {\npublic:\n  void initialize(const std::string& yaml, bool avoid_boosting = true) {\n    envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config;\n    TestUtility::loadFromYaml(yaml, config, false, avoid_boosting);\n    initialize(config);\n  }\n\n  void initialize(envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& config) {\n    config_ = std::make_unique<ThriftProxy::ConfigImpl>(config, factory_context_);\n  }\n\n  MessageMetadata& genMetadata(const std::string& method_name) {\n    metadata_ = std::make_shared<MessageMetadata>();\n    metadata_->setMethodName(method_name);\n    return *metadata_;\n  }\n\n  std::unique_ptr<ThriftProxy::ConfigImpl> config_;\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context_;\n  Network::Address::Ipv4Instance default_remote_address_{\"10.0.0.1\"};\n  MessageMetadataSharedPtr metadata_;\n};\n\nTEST_F(ThriftRateLimitConfigurationTest, NoApplicableRateLimit) {\n  const std::string yaml = R\"EOF(\nroute_config:\n  name: config\n  routes:\n    - match: { method_name: \"foo\" }\n      route:\n        cluster: thrift\n        rate_limits:\n          - actions:\n              - remote_address: {}\n    - match: { method_name: \"bar\" }\n      route: { cluster: thrift }\n)EOF\";\n\n  initialize(yaml);\n\n  EXPECT_EQ(0U, config_->route(genMetadata(\"bar\"), 0)\n                    ->routeEntry()\n                    ->rateLimitPolicy()\n                    .getApplicableRateLimit(0)\n                    .size());\n}\n\nTEST_F(ThriftRateLimitConfigurationTest, NoRateLimitPolicy) {\n  const std::string yaml = R\"EOF(\nroute_config:\n  name: config\n  routes:\n    - match: { method_name: \"bar\" }\n      route: { cluster: thrift }\n)EOF\";\n\n  initialize(yaml);\n\n  auto route = config_->route(genMetadata(\"bar\"), 0)->routeEntry();\n  EXPECT_EQ(0U, route->rateLimitPolicy().getApplicableRateLimit(0).size());\n  EXPECT_TRUE(route->rateLimitPolicy().empty());\n}\n\nTEST_F(ThriftRateLimitConfigurationTest, TestGetApplicableRateLimit) {\n  const std::string yaml = R\"EOF(\nroute_config:\n  name: config\n  routes:\n    - match: { method_name: \"foo\" }\n      route:\n        cluster: thrift\n        rate_limits:\n          - actions:\n              - remote_address: {}\n)EOF\";\n\n  initialize(yaml);\n\n  auto route = config_->route(genMetadata(\"foo\"), 0)->routeEntry();\n  EXPECT_FALSE(route->rateLimitPolicy().empty());\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limits =\n      route->rateLimitPolicy().getApplicableRateLimit(0);\n  EXPECT_EQ(1U, rate_limits.size());\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route, descriptors, \"\", *metadata_, default_remote_address_);\n  }\n\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              ContainerEq(descriptors));\n}\n\nTEST_F(ThriftRateLimitConfigurationTest, Stages) {\n  const std::string yaml = R\"EOF(\nroute_config:\n  name: config\n  routes:\n    - match: { method_name: \"foo\" }\n      route:\n        cluster: thrift\n        rate_limits:\n          - stage: 1\n            actions:\n              - remote_address: {}\n          - actions:\n              - destination_cluster: {}\n          - actions:\n              - destination_cluster: {}\n              - source_cluster: {}\n)EOF\";\n\n  initialize(yaml);\n\n  auto route = config_->route(genMetadata(\"foo\"), 0)->routeEntry();\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limits =\n      route->rateLimitPolicy().getApplicableRateLimit(0);\n  EXPECT_EQ(2U, rate_limits.size());\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route, descriptors, \"service_cluster\", *metadata_,\n                                   default_remote_address_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>(\n                  {{{{\"destination_cluster\", \"thrift\"}}},\n                   {{{\"destination_cluster\", \"thrift\"}, {\"source_cluster\", \"service_cluster\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  descriptors.clear();\n  rate_limits = route->rateLimitPolicy().getApplicableRateLimit(1);\n  EXPECT_EQ(1U, rate_limits.size());\n\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route, descriptors, \"service_cluster\", *metadata_,\n                                   default_remote_address_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  rate_limits = route->rateLimitPolicy().getApplicableRateLimit(10);\n  EXPECT_TRUE(rate_limits.empty());\n}\n\n// Test that rate limiter stages work with weighted cluster route entries.\nTEST_F(ThriftRateLimitConfigurationTest, WeightedClusterStages) {\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config;\n  {\n    auto* route_config = config.mutable_route_config();\n    route_config->set_name(\"config\");\n    auto* route = route_config->add_routes();\n    route->mutable_match()->set_method_name(\"foo\");\n    auto* action = route->mutable_route();\n    auto* cluster1 = action->mutable_weighted_clusters()->add_clusters();\n    cluster1->set_name(\"thrift\");\n    cluster1->mutable_weight()->set_value(50);\n    auto* cluster2 = action->mutable_weighted_clusters()->add_clusters();\n    cluster2->set_name(\"thrift2\");\n    cluster2->mutable_weight()->set_value(50);\n\n    auto* limit1 = action->add_rate_limits();\n    limit1->mutable_stage()->set_value(1);\n    limit1->add_actions()->mutable_remote_address();\n\n    action->add_rate_limits()->add_actions()->mutable_destination_cluster();\n\n    auto* limit3 = action->add_rate_limits();\n    limit3->add_actions()->mutable_destination_cluster();\n    limit3->add_actions()->mutable_source_cluster();\n  }\n  initialize(config);\n\n  auto route = config_->route(genMetadata(\"foo\"), 0)->routeEntry();\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limits =\n      route->rateLimitPolicy().getApplicableRateLimit(0);\n  EXPECT_EQ(2U, rate_limits.size());\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route, descriptors, \"service_cluster\", *metadata_,\n                                   default_remote_address_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>(\n                  {{{{\"destination_cluster\", \"thrift\"}}},\n                   {{{\"destination_cluster\", \"thrift\"}, {\"source_cluster\", \"service_cluster\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  descriptors.clear();\n  rate_limits = route->rateLimitPolicy().getApplicableRateLimit(1);\n  EXPECT_EQ(1U, rate_limits.size());\n\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route, descriptors, \"service_cluster\", *metadata_,\n                                   default_remote_address_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  rate_limits = route->rateLimitPolicy().getApplicableRateLimit(10);\n  EXPECT_TRUE(rate_limits.empty());\n}\n\n// Test that rate limiter stages work with dynamic route entries.\nTEST_F(ThriftRateLimitConfigurationTest, ClusterHeaderStages) {\n  envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config;\n  {\n    auto* route_config = config.mutable_route_config();\n    route_config->set_name(\"config\");\n    auto* route = route_config->add_routes();\n    route->mutable_match()->set_method_name(\"foo\");\n    auto* action = route->mutable_route();\n    action->set_cluster_header(\"header_name\");\n\n    auto* limit1 = action->add_rate_limits();\n    limit1->mutable_stage()->set_value(1);\n    limit1->add_actions()->mutable_remote_address();\n\n    action->add_rate_limits()->add_actions()->mutable_destination_cluster();\n\n    auto* limit3 = action->add_rate_limits();\n    limit3->add_actions()->mutable_destination_cluster();\n    limit3->add_actions()->mutable_source_cluster();\n  }\n  initialize(config);\n\n  auto& metadata = genMetadata(\"foo\");\n  metadata.headers().addCopy(Http::LowerCaseString{\"header_name\"}, \"thrift\");\n\n  // Keep hold of route, it's a newly minted shared pointer.\n  auto route = config_->route(metadata, 0);\n  auto* route_entry = route->routeEntry();\n\n  std::vector<std::reference_wrapper<const RateLimitPolicyEntry>> rate_limits =\n      route_entry->rateLimitPolicy().getApplicableRateLimit(0);\n\n  EXPECT_EQ(2U, rate_limits.size());\n\n  std::vector<Envoy::RateLimit::Descriptor> descriptors;\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route_entry, descriptors, \"service_cluster\", *metadata_,\n                                   default_remote_address_);\n  }\n\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>(\n                  {{{{\"destination_cluster\", \"thrift\"}}},\n                   {{{\"destination_cluster\", \"thrift\"}, {\"source_cluster\", \"service_cluster\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  descriptors.clear();\n  rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(1);\n  EXPECT_EQ(1U, rate_limits.size());\n\n  for (const RateLimitPolicyEntry& rate_limit : rate_limits) {\n    rate_limit.populateDescriptors(*route_entry, descriptors, \"service_cluster\", *metadata_,\n                                   default_remote_address_);\n  }\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              testing::ContainerEq(descriptors));\n\n  rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(10);\n  EXPECT_TRUE(rate_limits.empty());\n}\n\nclass ThriftRateLimitPolicyEntryTest : public testing::Test {\npublic:\n  void initialize(const std::string& yaml) {\n    envoy::config::route::v3::RateLimit rate_limit;\n    TestUtility::loadFromYaml(yaml, rate_limit);\n\n    rate_limit_entry_ = std::make_unique<RateLimitPolicyEntryImpl>(rate_limit);\n    descriptors_.clear();\n  }\n\n  std::unique_ptr<RateLimitPolicyEntryImpl> rate_limit_entry_;\n  MessageMetadata metadata_;\n  NiceMock<MockRouteEntry> route_;\n  std::vector<Envoy::RateLimit::Descriptor> descriptors_;\n  Network::Address::Ipv4Instance default_remote_address_{\"10.0.0.1\"};\n};\n\nTEST_F(ThriftRateLimitPolicyEntryTest, RateLimitPolicyEntryMembers) {\n  std::string yaml = R\"EOF(\nstage: 2\ndisable_key: \"no_ratelimit\"\nactions:\n  - remote_address: {}\n  )EOF\";\n\n  initialize(yaml);\n\n  EXPECT_EQ(2UL, rate_limit_entry_->stage());\n  EXPECT_EQ(\"no_ratelimit\", rate_limit_entry_->disableKey());\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, RemoteAddressAction) {\n  std::string yaml = R\"EOF(\nactions:\n  - remote_address: {}\n  )EOF\";\n\n  initialize(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"remote_address\", \"10.0.0.1\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, RemoteAddressActionNoDescriptorIfPipeAddr) {\n  std::string yaml = R\"EOF(\nactions:\n  - remote_address: {}\n  )EOF\";\n\n  initialize(yaml);\n\n  Network::Address::PipeInstance pipe_address(\"/hello\");\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"\", metadata_, pipe_address);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, SourceClusterAction) {\n  std::string yaml = R\"EOF(\nactions:\n  - source_cluster: {}\n  )EOF\";\n\n  initialize(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(\n      std::vector<Envoy::RateLimit::Descriptor>({{{{\"source_cluster\", \"service_cluster\"}}}}),\n      testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, DestinationClusterAction) {\n  std::string yaml = R\"EOF(\nactions:\n  - destination_cluster: {}\n  )EOF\";\n\n  initialize(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(\n      std::vector<Envoy::RateLimit::Descriptor>({{{{\"destination_cluster\", \"fake_cluster\"}}}}),\n      testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, RequestHeadersAction) {\n  std::string yaml = R\"EOF(\nactions:\n  - request_headers:\n      header_name: x-header-name\n      descriptor_key: my_header_name\n  )EOF\";\n\n  initialize(yaml);\n  metadata_.headers().addCopy(Http::LowerCaseString{\"x-header-name\"}, \"test_value\");\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"my_header_name\", \"test_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, RequestHeadersActionNoMatch) {\n  std::string yaml = R\"EOF(\nactions:\n  - request_headers:\n      header_name: x-header-name\n      descriptor_key: my_header_name\n  )EOF\";\n\n  initialize(yaml);\n  metadata_.headers().addCopy(Http::LowerCaseString{\"x-not-header-name\"}, \"test_value\");\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, RequestHeadersActionMethodName) {\n  std::string yaml = R\"EOF(\nactions:\n  - request_headers:\n      header_name: \":method-name\"\n      descriptor_key: method_name\n  )EOF\";\n\n  initialize(yaml);\n  metadata_.setMethodName(\"foo\");\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"method_name\", \"foo\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, RequestHeadersActionMethodNameMissing) {\n  std::string yaml = R\"EOF(\nactions:\n  - request_headers:\n      header_name: \":method-name\"\n      descriptor_key: method_name\n  )EOF\";\n\n  initialize(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, GenericKeyAction) {\n  std::string yaml = R\"EOF(\nactions:\n  - generic_key:\n      descriptor_value: fake_key\n  )EOF\";\n\n  initialize(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"generic_key\", \"fake_key\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, HeaderValueActionMatch) {\n  std::string yaml = R\"EOF(\nactions:\n  - header_value_match:\n      descriptor_value: fake_value\n      headers:\n        - name: x-header-name\n          exact_match: test_value\n  )EOF\";\n\n  initialize(yaml);\n  metadata_.headers().addCopy(Http::LowerCaseString{\"x-header-name\"}, \"test_value\");\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"header_match\", \"fake_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, HeaderValueActionValueMismatch) {\n  std::string yaml = R\"EOF(\nactions:\n  - header_value_match:\n      descriptor_value: fake_value\n      headers:\n        - name: x-header-name\n          exact_match: test_value\n  )EOF\";\n\n  initialize(yaml);\n  metadata_.headers().addCopy(Http::LowerCaseString{\"x-header-name\"}, \"not_test_value\");\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, HeaderValueActionNegateMatch) {\n  std::string yaml = R\"EOF(\nactions:\n  - header_value_match:\n      descriptor_value: fake_value\n      expect_match: false\n      headers:\n        - name: x-header-name\n          exact_match: test_value\n  )EOF\";\n\n  initialize(yaml);\n  metadata_.headers().addCopy(Http::LowerCaseString{\"x-header-name\"}, \"test_value\");\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, HeaderValueActionNegatedMatchProducesDescriptors) {\n  std::string yaml = R\"EOF(\nactions:\n  - header_value_match:\n      descriptor_value: fake_value\n      expect_match: false\n      headers:\n        - name: x-header-name\n          exact_match: test_value\n  )EOF\";\n\n  initialize(yaml);\n  metadata_.headers().addCopy(Http::LowerCaseString{\"x-header-name\"}, \"not_test_value\");\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(std::vector<Envoy::RateLimit::Descriptor>({{{{\"header_match\", \"fake_value\"}}}}),\n              testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, CompoundAction) {\n  std::string yaml = R\"EOF(\nactions:\n  - destination_cluster: {}\n  - source_cluster: {}\n  )EOF\";\n\n  initialize(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_THAT(\n      std::vector<Envoy::RateLimit::Descriptor>(\n          {{{{\"destination_cluster\", \"fake_cluster\"}, {\"source_cluster\", \"service_cluster\"}}}}),\n      testing::ContainerEq(descriptors_));\n}\n\nTEST_F(ThriftRateLimitPolicyEntryTest, CompoundActionNoDescriptor) {\n  std::string yaml = R\"EOF(\nactions:\n  - destination_cluster: {}\n  - header_value_match:\n      descriptor_value: fake_value\n      headers:\n        - name: x-header-name\n          exact_match: test_value\n  )EOF\";\n\n  initialize(yaml);\n\n  rate_limit_entry_->populateDescriptors(route_, descriptors_, \"service_cluster\", metadata_,\n                                         default_remote_address_);\n  EXPECT_TRUE(descriptors_.empty());\n}\n\n} // namespace\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/router_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.h\"\n#include \"envoy/config/filter/thrift/router/v2alpha1/router.pb.validate.h\"\n#include \"envoy/tcp/conn_pool.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/app_exception_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/router/config.h\"\n#include \"extensions/filters/network/thrift_proxy/router/router_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ContainsRegex;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::Values;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace Router {\nnamespace {\n\nclass TestNamedTransportConfigFactory : public NamedTransportConfigFactory {\npublic:\n  TestNamedTransportConfigFactory(std::function<MockTransport*()> f) : f_(f) {}\n\n  TransportPtr createTransport() override { return TransportPtr{f_()}; }\n  std::string name() const override { return TransportNames::get().FRAMED; }\n\n  std::function<MockTransport*()> f_;\n};\n\nclass TestNamedProtocolConfigFactory : public NamedProtocolConfigFactory {\npublic:\n  TestNamedProtocolConfigFactory(std::function<MockProtocol*()> f) : f_(f) {}\n\n  ProtocolPtr createProtocol() override { return ProtocolPtr{f_()}; }\n  std::string name() const override { return ProtocolNames::get().BINARY; }\n\n  std::function<MockProtocol*()> f_;\n};\n\n} // namespace\n\nclass ThriftRouterTestBase {\npublic:\n  ThriftRouterTestBase()\n      : transport_factory_([&]() -> MockTransport* {\n          ASSERT(transport_ == nullptr);\n          transport_ = new NiceMock<MockTransport>();\n          if (mock_transport_cb_) {\n            mock_transport_cb_(transport_);\n          }\n          return transport_;\n        }),\n        protocol_factory_([&]() -> MockProtocol* {\n          ASSERT(protocol_ == nullptr);\n          protocol_ = new NiceMock<MockProtocol>();\n          if (mock_protocol_cb_) {\n            mock_protocol_cb_(protocol_);\n          }\n          return protocol_;\n        }),\n        transport_register_(transport_factory_), protocol_register_(protocol_factory_) {}\n\n  void initializeRouter() {\n    route_ = new NiceMock<MockRoute>();\n    route_ptr_.reset(route_);\n\n    router_ = std::make_unique<Router>(context_.clusterManager(), \"test\", context_.scope());\n\n    EXPECT_EQ(nullptr, router_->downstreamConnection());\n\n    router_->setDecoderFilterCallbacks(callbacks_);\n  }\n\n  void initializeMetadata(MessageType msg_type, std::string method = \"method\") {\n    msg_type_ = msg_type;\n\n    metadata_ = std::make_shared<MessageMetadata>();\n    metadata_->setMethodName(method);\n    metadata_->setMessageType(msg_type_);\n    metadata_->setSequenceId(1);\n  }\n\n  void startRequest(MessageType msg_type, std::string method = \"method\",\n                    const bool strip_service_name = false) {\n    EXPECT_EQ(FilterStatus::Continue, router_->transportBegin(metadata_));\n\n    EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n    EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n    EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n\n    if (strip_service_name) {\n      EXPECT_CALL(route_entry_, stripServiceName()).WillOnce(Return(true));\n    }\n\n    initializeMetadata(msg_type, method);\n\n    EXPECT_CALL(callbacks_, downstreamTransportType()).WillOnce(Return(TransportType::Framed));\n    EXPECT_CALL(callbacks_, downstreamProtocolType()).WillOnce(Return(ProtocolType::Binary));\n    EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));\n\n    EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_));\n    EXPECT_EQ(&connection_, router_->downstreamConnection());\n\n    // Not yet implemented:\n    EXPECT_EQ(absl::optional<uint64_t>(), router_->computeHashKey());\n    EXPECT_EQ(nullptr, router_->metadataMatchCriteria());\n    EXPECT_EQ(nullptr, router_->downstreamHeaders());\n  }\n\n  void connectUpstream() {\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_))\n        .WillOnce(Invoke([&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void {\n          upstream_callbacks_ = &cb;\n        }));\n\n    conn_state_.reset();\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState())\n        .WillRepeatedly(\n            Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); }));\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, setConnectionState_(_))\n        .WillOnce(Invoke(\n            [&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { conn_state_.swap(cs); }));\n\n    EXPECT_CALL(*protocol_, writeMessageBegin(_, _))\n        .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void {\n          EXPECT_EQ(metadata_->methodName(), metadata.methodName());\n          EXPECT_EQ(metadata_->messageType(), metadata.messageType());\n          EXPECT_EQ(metadata_->sequenceId(), metadata.sequenceId());\n        }));\n\n    EXPECT_CALL(callbacks_, continueDecoding());\n    context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n\n    EXPECT_NE(nullptr, upstream_callbacks_);\n  }\n\n  void startRequestWithExistingConnection(MessageType msg_type) {\n    EXPECT_EQ(FilterStatus::Continue, router_->transportBegin({}));\n\n    EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n    EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n    EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n\n    initializeMetadata(msg_type);\n\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_))\n        .WillOnce(Invoke([&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void {\n          upstream_callbacks_ = &cb;\n        }));\n\n    if (!conn_state_) {\n      conn_state_ = std::make_unique<ThriftConnectionState>();\n    }\n    EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState())\n        .WillRepeatedly(\n            Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); }));\n\n    EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_));\n    EXPECT_EQ(&connection_, router_->downstreamConnection());\n\n    // Not yet implemented:\n    EXPECT_EQ(absl::optional<uint64_t>(), router_->computeHashKey());\n    EXPECT_EQ(nullptr, router_->metadataMatchCriteria());\n    EXPECT_EQ(nullptr, router_->downstreamHeaders());\n\n    EXPECT_CALL(callbacks_, downstreamTransportType()).WillOnce(Return(TransportType::Framed));\n    EXPECT_CALL(callbacks_, downstreamProtocolType()).WillOnce(Return(ProtocolType::Binary));\n\n    mock_protocol_cb_ = [&](MockProtocol* protocol) -> void {\n      ON_CALL(*protocol, type()).WillByDefault(Return(ProtocolType::Binary));\n      EXPECT_CALL(*protocol, writeMessageBegin(_, _))\n          .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void {\n            EXPECT_EQ(metadata_->methodName(), metadata.methodName());\n            EXPECT_EQ(metadata_->messageType(), metadata.messageType());\n            EXPECT_EQ(metadata_->sequenceId(), metadata.sequenceId());\n          }));\n    };\n    EXPECT_CALL(callbacks_, continueDecoding()).Times(0);\n    EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_))\n        .WillOnce(\n            Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* {\n              context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb);\n              context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n              return nullptr;\n            }));\n\n    EXPECT_EQ(FilterStatus::Continue, router_->messageBegin(metadata_));\n    EXPECT_NE(nullptr, upstream_callbacks_);\n  }\n\n  void sendTrivialStruct(FieldType field_type) {\n    EXPECT_CALL(*protocol_, writeStructBegin(_, \"\"));\n    EXPECT_EQ(FilterStatus::Continue, router_->structBegin({}));\n\n    int16_t id = 1;\n    EXPECT_CALL(*protocol_, writeFieldBegin(_, \"\", field_type, id));\n    EXPECT_EQ(FilterStatus::Continue, router_->fieldBegin({}, field_type, id));\n\n    sendTrivialValue(field_type);\n\n    EXPECT_CALL(*protocol_, writeFieldEnd(_));\n    EXPECT_EQ(FilterStatus::Continue, router_->fieldEnd());\n\n    EXPECT_CALL(*protocol_, writeFieldBegin(_, \"\", FieldType::Stop, 0));\n    EXPECT_CALL(*protocol_, writeStructEnd(_));\n    EXPECT_EQ(FilterStatus::Continue, router_->structEnd());\n  }\n\n  void sendTrivialValue(FieldType field_type) {\n    switch (field_type) {\n    case FieldType::Bool: {\n      bool v = true;\n      EXPECT_CALL(*protocol_, writeBool(_, v));\n      EXPECT_EQ(FilterStatus::Continue, router_->boolValue(v));\n    } break;\n    case FieldType::Byte: {\n      uint8_t v = 2;\n      EXPECT_CALL(*protocol_, writeByte(_, v));\n      EXPECT_EQ(FilterStatus::Continue, router_->byteValue(v));\n    } break;\n    case FieldType::I16: {\n      int16_t v = 3;\n      EXPECT_CALL(*protocol_, writeInt16(_, v));\n      EXPECT_EQ(FilterStatus::Continue, router_->int16Value(v));\n    } break;\n    case FieldType::I32: {\n      int32_t v = 4;\n      EXPECT_CALL(*protocol_, writeInt32(_, v));\n      EXPECT_EQ(FilterStatus::Continue, router_->int32Value(v));\n    } break;\n    case FieldType::I64: {\n      int64_t v = 5;\n      EXPECT_CALL(*protocol_, writeInt64(_, v));\n      EXPECT_EQ(FilterStatus::Continue, router_->int64Value(v));\n    } break;\n    case FieldType::Double: {\n      double v = 6.0;\n      EXPECT_CALL(*protocol_, writeDouble(_, v));\n      EXPECT_EQ(FilterStatus::Continue, router_->doubleValue(v));\n    } break;\n    case FieldType::String: {\n      std::string v = \"seven\";\n      EXPECT_CALL(*protocol_, writeString(_, v));\n      EXPECT_EQ(FilterStatus::Continue, router_->stringValue(v));\n    } break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  void completeRequest() {\n    EXPECT_CALL(*protocol_, writeMessageEnd(_));\n    EXPECT_CALL(*transport_, encodeFrame(_, _, _));\n    EXPECT_CALL(upstream_connection_, write(_, false));\n\n    if (msg_type_ == MessageType::Oneway) {\n      EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, released(Ref(upstream_connection_)));\n    }\n\n    EXPECT_EQ(FilterStatus::Continue, router_->messageEnd());\n    EXPECT_EQ(FilterStatus::Continue, router_->transportEnd());\n  }\n\n  void returnResponse() {\n    Buffer::OwnedImpl buffer;\n\n    EXPECT_CALL(callbacks_, startUpstreamResponse(_, _));\n\n    EXPECT_CALL(callbacks_, upstreamData(Ref(buffer)))\n        .WillOnce(Return(ThriftFilters::ResponseStatus::MoreData));\n    upstream_callbacks_->onUpstreamData(buffer, false);\n\n    EXPECT_CALL(callbacks_, upstreamData(Ref(buffer)))\n        .WillOnce(Return(ThriftFilters::ResponseStatus::Complete));\n    EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, released(Ref(upstream_connection_)));\n    upstream_callbacks_->onUpstreamData(buffer, false);\n  }\n\n  void destroyRouter() {\n    router_->onDestroy();\n    router_.reset();\n  }\n\n  TestNamedTransportConfigFactory transport_factory_;\n  TestNamedProtocolConfigFactory protocol_factory_;\n  Registry::InjectFactory<NamedTransportConfigFactory> transport_register_;\n  Registry::InjectFactory<NamedProtocolConfigFactory> protocol_register_;\n\n  std::function<void(MockTransport*)> mock_transport_cb_{};\n  std::function<void(MockProtocol*)> mock_protocol_cb_{};\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  NiceMock<Network::MockClientConnection> connection_;\n  NiceMock<ThriftFilters::MockDecoderFilterCallbacks> callbacks_;\n  NiceMock<MockTransport>* transport_{};\n  NiceMock<MockProtocol>* protocol_{};\n  NiceMock<MockRoute>* route_{};\n  NiceMock<MockRouteEntry> route_entry_;\n  NiceMock<Upstream::MockHostDescription>* host_{};\n  Tcp::ConnectionPool::ConnectionStatePtr conn_state_;\n\n  RouteConstSharedPtr route_ptr_;\n  std::unique_ptr<Router> router_;\n\n  std::string cluster_name_{\"cluster\"};\n\n  MessageType msg_type_{MessageType::Call};\n  MessageMetadataSharedPtr metadata_;\n\n  Tcp::ConnectionPool::UpstreamCallbacks* upstream_callbacks_{};\n  NiceMock<Network::MockClientConnection> upstream_connection_;\n};\n\nclass ThriftRouterTest : public testing::Test, public ThriftRouterTestBase {\npublic:\n};\n\nclass ThriftRouterFieldTypeTest : public testing::TestWithParam<FieldType>,\n                                  public ThriftRouterTestBase {\npublic:\n};\n\nINSTANTIATE_TEST_SUITE_P(PrimitiveFieldTypes, ThriftRouterFieldTypeTest,\n                         Values(FieldType::Bool, FieldType::Byte, FieldType::I16, FieldType::I32,\n                                FieldType::I64, FieldType::Double, FieldType::String),\n                         fieldTypeParamToString);\n\nclass ThriftRouterContainerTest : public testing::TestWithParam<FieldType>,\n                                  public ThriftRouterTestBase {\npublic:\n};\n\nINSTANTIATE_TEST_SUITE_P(ContainerFieldTypes, ThriftRouterContainerTest,\n                         Values(FieldType::Map, FieldType::List, FieldType::Set),\n                         fieldTypeParamToString);\n\nTEST_F(ThriftRouterTest, PoolRemoteConnectionFailure) {\n  initializeRouter();\n\n  startRequest(MessageType::Call);\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n}\n\nTEST_F(ThriftRouterTest, PoolLocalConnectionFailure) {\n  initializeRouter();\n\n  startRequest(MessageType::Call);\n\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      ConnectionPool::PoolFailureReason::LocalConnectionFailure);\n}\n\nTEST_F(ThriftRouterTest, PoolTimeout) {\n  initializeRouter();\n\n  startRequest(MessageType::Call);\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(ConnectionPool::PoolFailureReason::Timeout);\n}\n\nTEST_F(ThriftRouterTest, PoolOverflowFailure) {\n  initializeRouter();\n\n  startRequest(MessageType::Call);\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*too many connections.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(ConnectionPool::PoolFailureReason::Overflow,\n                                                       true);\n}\n\nTEST_F(ThriftRouterTest, PoolConnectionFailureWithOnewayMessage) {\n  initializeRouter();\n  startRequest(MessageType::Oneway);\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _)).Times(0);\n  EXPECT_CALL(callbacks_, resetDownstreamConnection());\n  context_.cluster_manager_.tcp_conn_pool_.poolFailure(\n      ConnectionPool::PoolFailureReason::RemoteConnectionFailure);\n\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, NoRoute) {\n  initializeRouter();\n  initializeMetadata(MessageType::Call);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::UnknownMethod, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*no route.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));\n  EXPECT_EQ(1U, context_.scope().counterFromString(\"test.route_missing\").value());\n}\n\nTEST_F(ThriftRouterTest, NoCluster) {\n  initializeRouter();\n  initializeMetadata(MessageType::Call);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n  EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n  EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n  EXPECT_CALL(context_.cluster_manager_, get(Eq(cluster_name_))).WillOnce(Return(nullptr));\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*unknown cluster.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));\n  EXPECT_EQ(1U, context_.scope().counterFromString(\"test.unknown_cluster\").value());\n}\n\nTEST_F(ThriftRouterTest, ClusterMaintenanceMode) {\n  initializeRouter();\n  initializeMetadata(MessageType::Call);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n  EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n  EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n  EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, maintenanceMode())\n      .WillOnce(Return(true));\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*maintenance mode.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));\n  EXPECT_EQ(1U, context_.scope().counterFromString(\"test.upstream_rq_maintenance_mode\").value());\n}\n\nTEST_F(ThriftRouterTest, NoHealthyHosts) {\n  initializeRouter();\n  initializeMetadata(MessageType::Call);\n\n  EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_));\n  EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_));\n  EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_));\n  EXPECT_CALL(context_.cluster_manager_, tcpConnPoolForCluster(cluster_name_, _, _))\n      .WillOnce(Return(nullptr));\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*no healthy upstream.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n\n  EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));\n  EXPECT_EQ(1U, context_.scope().counterFromString(\"test.no_healthy_upstream\").value());\n}\n\nTEST_F(ThriftRouterTest, TruncatedResponse) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  sendTrivialStruct(FieldType::String);\n  completeRequest();\n\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_CALL(callbacks_, startUpstreamResponse(_, _));\n  EXPECT_CALL(callbacks_, upstreamData(Ref(buffer)))\n      .WillOnce(Return(ThriftFilters::ResponseStatus::MoreData));\n  EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, released(Ref(upstream_connection_)));\n  EXPECT_CALL(callbacks_, resetDownstreamConnection());\n\n  upstream_callbacks_->onUpstreamData(buffer, true);\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, UpstreamRemoteCloseMidResponse) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose);\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, UpstreamLocalCloseMidResponse) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, UpstreamCloseAfterResponse) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  sendTrivialStruct(FieldType::String);\n  completeRequest();\n\n  upstream_callbacks_->onEvent(Network::ConnectionEvent::LocalClose);\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, UpstreamDataTriggersReset) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  sendTrivialStruct(FieldType::String);\n  completeRequest();\n\n  Buffer::OwnedImpl buffer;\n\n  EXPECT_CALL(callbacks_, startUpstreamResponse(_, _));\n  EXPECT_CALL(callbacks_, upstreamData(Ref(buffer)))\n      .WillOnce(Return(ThriftFilters::ResponseStatus::Reset));\n  EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n\n  upstream_callbacks_->onUpstreamData(buffer, true);\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, UnexpectedUpstreamRemoteClose) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  sendTrivialStruct(FieldType::String);\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  router_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(ThriftRouterTest, UnexpectedUpstreamLocalClose) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  sendTrivialStruct(FieldType::String);\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  router_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/9037.\nTEST_F(ThriftRouterTest, DontCloseConnectionTwice) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  sendTrivialStruct(FieldType::String);\n\n  EXPECT_CALL(callbacks_, sendLocalReply(_, _))\n      .WillOnce(Invoke([&](const DirectResponse& response, bool end_stream) -> void {\n        auto& app_ex = dynamic_cast<const AppException&>(response);\n        EXPECT_EQ(AppExceptionType::InternalError, app_ex.type_);\n        EXPECT_THAT(app_ex.what(), ContainsRegex(\".*connection failure.*\"));\n        EXPECT_TRUE(end_stream);\n      }));\n  router_->onEvent(Network::ConnectionEvent::RemoteClose);\n\n  // Connection close shouldn't happen in onDestroy(), since it's been handled.\n  EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush)).Times(0);\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, UnexpectedRouterDestroyBeforeUpstreamConnect) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n\n  EXPECT_EQ(1, context_.cluster_manager_.tcp_conn_pool_.handles_.size());\n  EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_.handles_.front(),\n              cancel(Tcp::ConnectionPool::CancelPolicy::Default));\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, UnexpectedRouterDestroy) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush));\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, ProtocolUpgrade) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_))\n      .WillOnce(Invoke(\n          [&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void { upstream_callbacks_ = &cb; }));\n\n  conn_state_.reset();\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState())\n      .WillRepeatedly(\n          Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); }));\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, setConnectionState_(_))\n      .WillOnce(Invoke(\n          [&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { conn_state_.swap(cs); }));\n\n  EXPECT_CALL(*protocol_, supportsUpgrade()).WillOnce(Return(true));\n\n  MockThriftObject* upgrade_response = new NiceMock<MockThriftObject>();\n\n  EXPECT_CALL(*protocol_, attemptUpgrade(_, _, _))\n      .WillOnce(Invoke(\n          [&](Transport&, ThriftConnectionState&, Buffer::Instance& buffer) -> ThriftObjectPtr {\n            buffer.add(\"upgrade request\");\n            return ThriftObjectPtr{upgrade_response};\n          }));\n  EXPECT_CALL(upstream_connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(\"upgrade request\", buffer.toString());\n      }));\n\n  context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n  EXPECT_NE(nullptr, upstream_callbacks_);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(false));\n  upstream_callbacks_->onUpstreamData(buffer, false);\n\n  EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(*protocol_, completeUpgrade(_, Ref(*upgrade_response)));\n  EXPECT_CALL(callbacks_, continueDecoding());\n  EXPECT_CALL(*protocol_, writeMessageBegin(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void {\n        EXPECT_EQ(metadata_->methodName(), metadata.methodName());\n        EXPECT_EQ(metadata_->messageType(), metadata.messageType());\n        EXPECT_EQ(metadata_->sequenceId(), metadata.sequenceId());\n      }));\n  upstream_callbacks_->onUpstreamData(buffer, false);\n\n  // Then the actual request...\n  sendTrivialStruct(FieldType::String);\n  completeRequest();\n  returnResponse();\n  destroyRouter();\n}\n\n// Test the case where an upgrade will occur, but the conn pool\n// returns immediately with a valid, but never, used connection.\nTEST_F(ThriftRouterTest, ProtocolUpgradeOnExistingUnusedConnection) {\n  initializeRouter();\n\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_))\n      .WillOnce(Invoke(\n          [&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void { upstream_callbacks_ = &cb; }));\n\n  conn_state_.reset();\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState())\n      .WillRepeatedly(\n          Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); }));\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, setConnectionState_(_))\n      .WillOnce(Invoke(\n          [&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { conn_state_.swap(cs); }));\n\n  MockThriftObject* upgrade_response = new NiceMock<MockThriftObject>();\n\n  EXPECT_CALL(upstream_connection_, write(_, false))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void {\n        EXPECT_EQ(\"upgrade request\", buffer.toString());\n      }));\n\n  // Simulate an existing connection that's never been used.\n  EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_))\n      .WillOnce(\n          Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* {\n            context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb);\n\n            EXPECT_CALL(*protocol_, supportsUpgrade()).WillOnce(Return(true));\n\n            EXPECT_CALL(*protocol_, attemptUpgrade(_, _, _))\n                .WillOnce(Invoke([&](Transport&, ThriftConnectionState&,\n                                     Buffer::Instance& buffer) -> ThriftObjectPtr {\n                  buffer.add(\"upgrade request\");\n                  return ThriftObjectPtr{upgrade_response};\n                }));\n\n            context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n            return nullptr;\n          }));\n\n  startRequest(MessageType::Call);\n\n  EXPECT_NE(nullptr, upstream_callbacks_);\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(false));\n  upstream_callbacks_->onUpstreamData(buffer, false);\n\n  EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(true));\n  EXPECT_CALL(*protocol_, completeUpgrade(_, Ref(*upgrade_response)));\n  EXPECT_CALL(callbacks_, continueDecoding());\n  EXPECT_CALL(*protocol_, writeMessageBegin(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void {\n        EXPECT_EQ(metadata_->methodName(), metadata.methodName());\n        EXPECT_EQ(metadata_->messageType(), metadata.messageType());\n        EXPECT_EQ(metadata_->sequenceId(), metadata.sequenceId());\n      }));\n  upstream_callbacks_->onUpstreamData(buffer, false);\n\n  // Then the actual request...\n  sendTrivialStruct(FieldType::String);\n  completeRequest();\n  returnResponse();\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, ProtocolUpgradeSkippedOnExistingConnection) {\n  initializeRouter();\n  startRequest(MessageType::Call);\n\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_))\n      .WillOnce(Invoke(\n          [&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void { upstream_callbacks_ = &cb; }));\n\n  conn_state_ = std::make_unique<ThriftConnectionState>();\n  EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState())\n      .WillRepeatedly(\n          Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); }));\n\n  EXPECT_CALL(*protocol_, supportsUpgrade()).WillOnce(Return(true));\n\n  // Protocol determines that connection state shows upgrade already occurred\n  EXPECT_CALL(*protocol_, attemptUpgrade(_, _, _))\n      .WillOnce(Invoke([&](Transport&, ThriftConnectionState&,\n                           Buffer::Instance&) -> ThriftObjectPtr { return nullptr; }));\n\n  EXPECT_CALL(*protocol_, writeMessageBegin(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void {\n        EXPECT_EQ(metadata_->methodName(), metadata.methodName());\n        EXPECT_EQ(metadata_->messageType(), metadata.messageType());\n        EXPECT_EQ(metadata_->sequenceId(), metadata.sequenceId());\n      }));\n  EXPECT_CALL(callbacks_, continueDecoding());\n\n  context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_);\n  EXPECT_NE(nullptr, upstream_callbacks_);\n\n  // Then the actual request...\n  sendTrivialStruct(FieldType::String);\n  completeRequest();\n  returnResponse();\n  destroyRouter();\n}\n\nTEST_P(ThriftRouterFieldTypeTest, OneWay) {\n  FieldType field_type = GetParam();\n\n  initializeRouter();\n  startRequest(MessageType::Oneway);\n  connectUpstream();\n  sendTrivialStruct(field_type);\n  completeRequest();\n  destroyRouter();\n}\n\nTEST_P(ThriftRouterFieldTypeTest, Call) {\n  FieldType field_type = GetParam();\n\n  initializeRouter();\n  startRequest(MessageType::Call);\n  connectUpstream();\n  sendTrivialStruct(field_type);\n  completeRequest();\n  returnResponse();\n  destroyRouter();\n}\n\n// Ensure the service name gets stripped when strip_service_name = true.\nTEST_P(ThriftRouterFieldTypeTest, StripServiceNameEnabled) {\n  FieldType field_type = GetParam();\n\n  initializeRouter();\n  startRequest(MessageType::Call, \"Service:method\", true);\n  connectUpstream();\n  sendTrivialStruct(field_type);\n  completeRequest();\n\n  EXPECT_EQ(\"method\", metadata_->methodName());\n\n  returnResponse();\n  destroyRouter();\n}\n\n// Ensure the service name prefix isn't stripped when strip_service_name = false.\nTEST_P(ThriftRouterFieldTypeTest, StripServiceNameDisabled) {\n  FieldType field_type = GetParam();\n\n  initializeRouter();\n  startRequest(MessageType::Call, \"Service:method\", false);\n  connectUpstream();\n  sendTrivialStruct(field_type);\n  completeRequest();\n\n  EXPECT_EQ(\"Service:method\", metadata_->methodName());\n\n  returnResponse();\n  destroyRouter();\n}\n\nTEST_F(ThriftRouterTest, CallWithExistingConnection) {\n  initializeRouter();\n\n  // Simulate previous sequence id usage.\n  conn_state_ = std::make_unique<ThriftConnectionState>(3);\n\n  startRequestWithExistingConnection(MessageType::Call);\n  sendTrivialStruct(FieldType::I32);\n  completeRequest();\n\n  EXPECT_EQ(3, metadata_->sequenceId());\n\n  returnResponse();\n  destroyRouter();\n}\n\nTEST_P(ThriftRouterContainerTest, DecoderFilterCallbacks) {\n  FieldType field_type = GetParam();\n  int16_t field_id = 1;\n\n  initializeRouter();\n\n  startRequest(MessageType::Oneway);\n  connectUpstream();\n\n  EXPECT_CALL(*protocol_, writeStructBegin(_, \"\"));\n  EXPECT_EQ(FilterStatus::Continue, router_->structBegin({}));\n\n  EXPECT_CALL(*protocol_, writeFieldBegin(_, \"\", field_type, field_id));\n  EXPECT_EQ(FilterStatus::Continue, router_->fieldBegin({}, field_type, field_id));\n\n  FieldType container_type = FieldType::I32;\n  uint32_t size{};\n\n  switch (field_type) {\n  case FieldType::Map:\n    size = 2;\n    EXPECT_CALL(*protocol_, writeMapBegin(_, container_type, container_type, size));\n    EXPECT_EQ(FilterStatus::Continue, router_->mapBegin(container_type, container_type, size));\n    for (int i = 0; i < 2; i++) {\n      EXPECT_CALL(*protocol_, writeInt32(_, i));\n      EXPECT_EQ(FilterStatus::Continue, router_->int32Value(i));\n      int j = i + 100;\n      EXPECT_CALL(*protocol_, writeInt32(_, j));\n      EXPECT_EQ(FilterStatus::Continue, router_->int32Value(j));\n    }\n    EXPECT_CALL(*protocol_, writeMapEnd(_));\n    EXPECT_EQ(FilterStatus::Continue, router_->mapEnd());\n    break;\n  case FieldType::List:\n    size = 3;\n    EXPECT_CALL(*protocol_, writeListBegin(_, container_type, size));\n    EXPECT_EQ(FilterStatus::Continue, router_->listBegin(container_type, size));\n    for (int i = 0; i < 3; i++) {\n      EXPECT_CALL(*protocol_, writeInt32(_, i));\n      EXPECT_EQ(FilterStatus::Continue, router_->int32Value(i));\n    }\n    EXPECT_CALL(*protocol_, writeListEnd(_));\n    EXPECT_EQ(FilterStatus::Continue, router_->listEnd());\n    break;\n  case FieldType::Set:\n    size = 4;\n    EXPECT_CALL(*protocol_, writeSetBegin(_, container_type, size));\n    EXPECT_EQ(FilterStatus::Continue, router_->setBegin(container_type, size));\n    for (int i = 0; i < 4; i++) {\n      EXPECT_CALL(*protocol_, writeInt32(_, i));\n      EXPECT_EQ(FilterStatus::Continue, router_->int32Value(i));\n    }\n    EXPECT_CALL(*protocol_, writeSetEnd(_));\n    EXPECT_EQ(FilterStatus::Continue, router_->setEnd());\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n\n  EXPECT_CALL(*protocol_, writeFieldEnd(_));\n  EXPECT_EQ(FilterStatus::Continue, router_->fieldEnd());\n\n  EXPECT_CALL(*protocol_, writeFieldBegin(_, _, FieldType::Stop, 0));\n  EXPECT_CALL(*protocol_, writeStructEnd(_));\n  EXPECT_EQ(FilterStatus::Continue, router_->structEnd());\n\n  completeRequest();\n  destroyRouter();\n}\n\n} // namespace Router\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/thrift_object_impl_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/thrift_object_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/mocks.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Expectation;\nusing testing::ExpectationSet;\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\nusing testing::Values;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass ThriftObjectImplTestBase {\npublic:\n  virtual ~ThriftObjectImplTestBase() = default;\n\n  Expectation expectValue(FieldType field_type) {\n    switch (field_type) {\n    case FieldType::Bool:\n      return EXPECT_CALL(protocol_, readBool(Ref(buffer_), _))\n          .WillOnce(Invoke([](Buffer::Instance&, bool& value) -> bool {\n            value = true;\n            return true;\n          }));\n    case FieldType::Byte:\n      return EXPECT_CALL(protocol_, readByte(Ref(buffer_), _))\n          .WillOnce(Invoke([](Buffer::Instance&, uint8_t& value) -> bool {\n            value = 1;\n            return true;\n          }));\n    case FieldType::Double:\n      return EXPECT_CALL(protocol_, readDouble(Ref(buffer_), _))\n          .WillOnce(Invoke([](Buffer::Instance&, double& value) -> bool {\n            value = 2.0;\n            return true;\n          }));\n    case FieldType::I16:\n      return EXPECT_CALL(protocol_, readInt16(Ref(buffer_), _))\n          .WillOnce(Invoke([](Buffer::Instance&, int16_t& value) -> bool {\n            value = 3;\n            return true;\n          }));\n    case FieldType::I32:\n      return EXPECT_CALL(protocol_, readInt32(Ref(buffer_), _))\n          .WillOnce(Invoke([](Buffer::Instance&, int32_t& value) -> bool {\n            value = 4;\n            return true;\n          }));\n    case FieldType::I64:\n      return EXPECT_CALL(protocol_, readInt64(Ref(buffer_), _))\n          .WillOnce(Invoke([](Buffer::Instance&, int64_t& value) -> bool {\n            value = 5;\n            return true;\n          }));\n    case FieldType::String:\n      return EXPECT_CALL(protocol_, readString(Ref(buffer_), _))\n          .WillOnce(Invoke([](Buffer::Instance&, std::string& value) -> bool {\n            value = \"six\";\n            return true;\n          }));\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  Expectation expectFieldBegin(FieldType field_type, int16_t field_id) {\n    return EXPECT_CALL(protocol_, readFieldBegin(Ref(buffer_), _, _, _))\n        .WillOnce(\n            Invoke([=](Buffer::Instance&, std::string&, FieldType& type, int16_t& id) -> bool {\n              type = field_type;\n              id = field_id;\n              return true;\n            }));\n  }\n\n  Expectation expectFieldEnd() {\n    return EXPECT_CALL(protocol_, readFieldEnd(Ref(buffer_))).WillOnce(Return(true));\n  }\n\n  ExpectationSet expectField(FieldType field_type, int16_t field_id) {\n    ExpectationSet s;\n    s += expectFieldBegin(field_type, field_id);\n    s += expectValue(field_type);\n    s += expectFieldEnd();\n    return s;\n  }\n\n  Expectation expectStopField() { return expectFieldBegin(FieldType::Stop, 0); }\n\n  void checkValue(FieldType field_type, const ThriftValue& value) {\n    EXPECT_EQ(field_type, value.type());\n\n    switch (field_type) {\n    case FieldType::Bool:\n      EXPECT_EQ(true, value.getValueTyped<bool>());\n      break;\n    case FieldType::Byte:\n      EXPECT_EQ(1, value.getValueTyped<uint8_t>());\n      break;\n    case FieldType::Double:\n      EXPECT_EQ(2.0, value.getValueTyped<double>());\n      break;\n    case FieldType::I16:\n      EXPECT_EQ(3, value.getValueTyped<int16_t>());\n      break;\n    case FieldType::I32:\n      EXPECT_EQ(4, value.getValueTyped<int32_t>());\n      break;\n    case FieldType::I64:\n      EXPECT_EQ(5, value.getValueTyped<int64_t>());\n      break;\n    case FieldType::String:\n      EXPECT_EQ(\"six\", value.getValueTyped<std::string>());\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  void checkFieldValue(const ThriftField& field) {\n    const ThriftValue& value = field.getValue();\n    checkValue(field.fieldType(), value);\n  }\n\n  NiceMock<MockTransport> transport_;\n  NiceMock<MockProtocol> protocol_;\n  Buffer::OwnedImpl buffer_;\n};\n\nclass ThriftObjectImplTest : public testing::Test, public ThriftObjectImplTestBase {};\n\n// Test parsing an empty struct (just a stop field).\nTEST_F(ThriftObjectImplTest, ParseEmptyStruct) {\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_TRUE(thrift_obj.fields().empty());\n}\n\nclass ThriftObjectImplValueTest : public ThriftObjectImplTestBase,\n                                  public testing::TestWithParam<FieldType> {};\n\nINSTANTIATE_TEST_SUITE_P(PrimitiveFieldTypes, ThriftObjectImplValueTest,\n                         Values(FieldType::Bool, FieldType::Byte, FieldType::Double, FieldType::I16,\n                                FieldType::I32, FieldType::I64, FieldType::String),\n                         fieldTypeParamToString);\n\n// Test parsing a struct with a single field with a simple value.\nTEST_P(ThriftObjectImplValueTest, ParseSingleValueStruct) {\n  FieldType field_type = GetParam();\n\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectField(field_type, 1);\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_EQ(1, thrift_obj.fields().size());\n  EXPECT_EQ(field_type, thrift_obj.fields().front()->fieldType());\n  EXPECT_EQ(1, thrift_obj.fields().front()->fieldId());\n  checkFieldValue(*thrift_obj.fields().front());\n}\n\n// Test parsing nested structs (struct -> struct -> simple field).\nTEST_P(ThriftObjectImplValueTest, ParseNestedSingleValueStruct) {\n  FieldType field_type = GetParam();\n\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectFieldBegin(FieldType::Struct, 1);\n\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectField(field_type, 2);\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  expectFieldEnd();\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_EQ(1, thrift_obj.fields().size());\n  const ThriftField& field = *thrift_obj.fields().front();\n  EXPECT_EQ(FieldType::Struct, field.fieldType());\n\n  const ThriftStructValue& nested = field.getValue().getValueTyped<ThriftStructValue>();\n  EXPECT_EQ(1, nested.fields().size());\n  EXPECT_EQ(field_type, nested.fields().front()->fieldType());\n  EXPECT_EQ(2, nested.fields().front()->fieldId());\n  checkFieldValue(*nested.fields().front());\n}\n\n// Test parsing a struct with a single list field (struct -> list).\nTEST_P(ThriftObjectImplValueTest, ParseNestedListValue) {\n  FieldType field_type = GetParam();\n\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectFieldBegin(FieldType::List, 1);\n\n  EXPECT_CALL(protocol_, readListBegin(Ref(buffer_), _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& type, uint32_t& size) -> bool {\n        type = field_type;\n        size = 2;\n        return true;\n      }));\n  expectValue(field_type);\n  expectValue(field_type);\n  EXPECT_CALL(protocol_, readListEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  expectFieldEnd();\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_EQ(1, thrift_obj.fields().size());\n  const ThriftField& field = *thrift_obj.fields().front();\n  EXPECT_EQ(1, field.fieldId());\n  EXPECT_EQ(FieldType::List, field.fieldType());\n\n  const ThriftListValue& nested = field.getValue().getValueTyped<ThriftListValue>();\n  EXPECT_EQ(field_type, nested.elementType());\n  EXPECT_EQ(2, nested.elements().size());\n  for (auto& value : nested.elements()) {\n    checkValue(field_type, *value);\n  }\n}\n\n// Test parsing a struct with a single set field (struct -> set).\nTEST_P(ThriftObjectImplValueTest, ParseNestedSetValue) {\n  FieldType field_type = GetParam();\n\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectFieldBegin(FieldType::Set, 1);\n\n  EXPECT_CALL(protocol_, readSetBegin(Ref(buffer_), _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& type, uint32_t& size) -> bool {\n        type = field_type;\n        size = 2;\n        return true;\n      }));\n  expectValue(field_type);\n  expectValue(field_type);\n  EXPECT_CALL(protocol_, readSetEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  expectFieldEnd();\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_EQ(1, thrift_obj.fields().size());\n  const ThriftField& field = *thrift_obj.fields().front();\n  EXPECT_EQ(1, field.fieldId());\n  EXPECT_EQ(FieldType::Set, field.fieldType());\n\n  const ThriftSetValue& nested = field.getValue().getValueTyped<ThriftSetValue>();\n  EXPECT_EQ(field_type, nested.elementType());\n  EXPECT_EQ(2, nested.elements().size());\n  for (auto& value : nested.elements()) {\n    checkValue(field_type, *value);\n  }\n}\n\n// Test parsing a struct with a single map field (struct -> map).\nTEST_P(ThriftObjectImplValueTest, ParseNestedMapValue) {\n  FieldType field_type = GetParam();\n\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectFieldBegin(FieldType::Map, 1);\n\n  EXPECT_CALL(protocol_, readMapBegin(Ref(buffer_), _, _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& key_type, FieldType& value_type,\n                           uint32_t& size) -> bool {\n        key_type = field_type;\n        value_type = FieldType::String;\n        size = 2;\n        return true;\n      }));\n  expectValue(field_type);\n  expectValue(FieldType::String);\n  expectValue(field_type);\n  expectValue(FieldType::String);\n  EXPECT_CALL(protocol_, readMapEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  expectFieldEnd();\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_EQ(1, thrift_obj.fields().size());\n  const ThriftField& field = *thrift_obj.fields().front();\n  EXPECT_EQ(1, field.fieldId());\n  EXPECT_EQ(FieldType::Map, field.fieldType());\n\n  const ThriftMapValue& nested = field.getValue().getValueTyped<ThriftMapValue>();\n  EXPECT_EQ(field_type, nested.keyType());\n  EXPECT_EQ(FieldType::String, nested.valueType());\n  EXPECT_EQ(2, nested.elements().size());\n  for (auto& value : nested.elements()) {\n    checkValue(field_type, *value.first);\n    checkValue(FieldType::String, *value.second);\n  }\n}\n\n// Test a struct with a map -> list -> set -> map -> list -> set -> struct.\nTEST_F(ThriftObjectImplTest, DeeplyNestedStruct) {\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectFieldBegin(FieldType::Map, 1);\n\n  EXPECT_CALL(protocol_, readMapBegin(Ref(buffer_), _, _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& key_type, FieldType& value_type,\n                           uint32_t& size) -> bool {\n        key_type = FieldType::I32;\n        value_type = FieldType::List;\n        size = 1;\n        return true;\n      }));\n  expectValue(FieldType::I32);\n  EXPECT_CALL(protocol_, readListBegin(Ref(buffer_), _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& elem_type, uint32_t& size) -> bool {\n        elem_type = FieldType::Set;\n        size = 1;\n        return true;\n      }));\n  EXPECT_CALL(protocol_, readSetBegin(Ref(buffer_), _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& elem_type, uint32_t& size) -> bool {\n        elem_type = FieldType::Map;\n        size = 1;\n        return true;\n      }));\n\n  EXPECT_CALL(protocol_, readMapBegin(Ref(buffer_), _, _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& key_type, FieldType& value_type,\n                           uint32_t& size) -> bool {\n        key_type = FieldType::I32;\n        value_type = FieldType::List;\n        size = 1;\n        return true;\n      }));\n  expectValue(FieldType::I32);\n  EXPECT_CALL(protocol_, readListBegin(Ref(buffer_), _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& elem_type, uint32_t& size) -> bool {\n        elem_type = FieldType::Set;\n        size = 1;\n        return true;\n      }));\n  EXPECT_CALL(protocol_, readSetBegin(Ref(buffer_), _, _))\n      .WillOnce(Invoke([&](Buffer::Instance&, FieldType& elem_type, uint32_t& size) -> bool {\n        elem_type = FieldType::Struct;\n        size = 1;\n        return true;\n      }));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectField(FieldType::I64, 100);\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readSetEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readListEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMapEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readSetEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readListEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMapEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  expectFieldEnd();\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_EQ(1, thrift_obj.fields().size());\n\n  EXPECT_EQ(FieldType::Map, thrift_obj.fields().front()->fieldType());\n  const ThriftMapValue& map_value =\n      thrift_obj.fields().front()->getValue().getValueTyped<ThriftMapValue>();\n  EXPECT_EQ(1, map_value.elements().size());\n\n  const ThriftListValue& list_value =\n      map_value.elements().front().second->getValueTyped<ThriftListValue>();\n  EXPECT_EQ(1, list_value.elements().size());\n\n  const ThriftSetValue& set_value = list_value.elements().front()->getValueTyped<ThriftSetValue>();\n  EXPECT_EQ(1, set_value.elements().size());\n\n  const ThriftMapValue& map_value2 = set_value.elements().front()->getValueTyped<ThriftMapValue>();\n  EXPECT_EQ(1, map_value2.elements().size());\n\n  const ThriftListValue& list_value2 =\n      map_value2.elements().front().second->getValueTyped<ThriftListValue>();\n  EXPECT_EQ(1, list_value2.elements().size());\n\n  const ThriftSetValue& set_value2 =\n      list_value2.elements().front()->getValueTyped<ThriftSetValue>();\n  EXPECT_EQ(1, set_value2.elements().size());\n\n  const ThriftStructValue& struct_value =\n      set_value2.elements().front()->getValueTyped<ThriftStructValue>();\n  EXPECT_EQ(1, struct_value.fields().size());\n\n  EXPECT_EQ(5, struct_value.fields().front()->getValue().getValueTyped<int64_t>());\n}\n\n// Tests when caller requests wrong value type.\nTEST_F(ThriftObjectImplTest, WrongValueType) {\n  ThriftObjectImpl thrift_obj(transport_, protocol_);\n\n  InSequence s;\n  EXPECT_CALL(transport_, decodeFrameStart(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readStructBegin(Ref(buffer_), _)).WillOnce(Return(true));\n  expectField(FieldType::String, 1);\n  expectStopField();\n  EXPECT_CALL(protocol_, readStructEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(protocol_, readMessageEnd(Ref(buffer_))).WillOnce(Return(true));\n  EXPECT_CALL(transport_, decodeFrameEnd(Ref(buffer_))).WillOnce(Return(true));\n\n  EXPECT_TRUE(thrift_obj.onData(buffer_));\n  EXPECT_EQ(1, thrift_obj.fields().size());\n\n  const ThriftValue& value = thrift_obj.fields().front()->getValue();\n  EXPECT_THROW_WITH_MESSAGE(value.getValueTyped<int32_t>(), EnvoyException,\n                            fmt::format(\"expected field type {}, got {}\",\n                                        static_cast<int>(FieldType::I32),\n                                        static_cast<int>(FieldType::String)));\n}\n\n} // Namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/translation_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n\n#include \"extensions/filters/network/well_known_names.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/integration.h\"\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::Combine;\nusing ::testing::TestParamInfo;\nusing testing::Values;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass ThriftTranslationIntegrationTest\n    : public testing::TestWithParam<\n          std::tuple<TransportType, ProtocolType, TransportType, ProtocolType>>,\n      public BaseThriftIntegrationTest {\npublic:\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    thrift_config_ = absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n        - name: thrift\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy\n            stat_prefix: thrift_stats\n            route_config:\n              name: \"routes\"\n              routes:\n                - match:\n                    method_name: \"add\"\n                  route:\n                    cluster: \"cluster_0\"\n      )EOF\");\n  }\n\n  void initialize() override {\n    TransportType downstream_transport, upstream_transport;\n    ProtocolType downstream_protocol, upstream_protocol;\n    std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol) =\n        GetParam();\n\n    auto upstream_transport_proto = transportTypeToProto(upstream_transport);\n    auto upstream_protocol_proto = protocolTypeToProto(upstream_protocol);\n\n    envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions proto_opts;\n    proto_opts.set_transport(upstream_transport_proto);\n    proto_opts.set_protocol(upstream_protocol_proto);\n\n    config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* opts = bootstrap.mutable_static_resources()\n                       ->mutable_clusters(0)\n                       ->mutable_typed_extension_protocol_options();\n      (*opts)[NetworkFilterNames::get().ThriftProxy].PackFrom(proto_opts);\n    });\n\n    // Invent some varying, but deterministic, values to add. We use the add method instead of\n    // execute because the default execute params contains a set and the ordering can vary across\n    // generated payloads.\n    std::vector<std::string> args({\n        fmt::format(\"{}\", (static_cast<int>(downstream_transport) << 8) +\n                              static_cast<int>(downstream_protocol)),\n        fmt::format(\"{}\", (static_cast<int>(upstream_transport) << 8) +\n                              static_cast<int>(upstream_protocol)),\n    });\n\n    PayloadOptions downstream_opts(downstream_transport, downstream_protocol, DriverMode::Success,\n                                   {}, \"add\", args);\n    preparePayloads(downstream_opts, downstream_request_bytes_, downstream_response_bytes_);\n\n    PayloadOptions upstream_opts(upstream_transport, upstream_protocol, DriverMode::Success, {},\n                                 \"add\", args);\n    preparePayloads(upstream_opts, upstream_request_bytes_, upstream_response_bytes_);\n\n    BaseThriftIntegrationTest::initialize();\n  }\n\n  Buffer::OwnedImpl downstream_request_bytes_;\n  Buffer::OwnedImpl downstream_response_bytes_;\n  Buffer::OwnedImpl upstream_request_bytes_;\n  Buffer::OwnedImpl upstream_response_bytes_;\n};\n\nstatic std::string paramToString(\n    const TestParamInfo<std::tuple<TransportType, ProtocolType, TransportType, ProtocolType>>&\n        params) {\n  TransportType downstream_transport, upstream_transport;\n  ProtocolType downstream_protocol, upstream_protocol;\n  std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol) =\n      params.param;\n\n  return fmt::format(\"From{}{}To{}{}\", transportNameForTest(downstream_transport),\n                     protocolNameForTest(downstream_protocol),\n                     transportNameForTest(upstream_transport),\n                     protocolNameForTest(upstream_protocol));\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    TransportsAndProtocols, ThriftTranslationIntegrationTest,\n    Combine(Values(TransportType::Framed, TransportType::Unframed, TransportType::Header),\n            Values(ProtocolType::Binary, ProtocolType::Compact),\n            Values(TransportType::Framed, TransportType::Unframed, TransportType::Header),\n            Values(ProtocolType::Binary, ProtocolType::Compact)),\n    paramToString);\n\n// Tests that the proxy will translate between different downstream and upstream transports and\n// protocols.\nTEST_P(ThriftTranslationIntegrationTest, Translates) {\n  initialize();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(downstream_request_bytes_.toString()));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(upstream_request_bytes_.length(), &data));\n  Buffer::OwnedImpl upstream_request(data);\n  EXPECT_EQ(upstream_request_bytes_.toString(), upstream_request.toString());\n\n  ASSERT_TRUE(fake_upstream_connection->write(upstream_response_bytes_.toString()));\n\n  tcp_client->waitForData(downstream_response_bytes_.toString());\n  tcp_client->close();\n\n  EXPECT_TRUE(\n      TestUtility::buffersEqual(Buffer::OwnedImpl(tcp_client->data()), downstream_response_bytes_));\n\n  Stats::CounterSharedPtr counter = test_server_->counter(\"thrift.thrift_stats.request_call\");\n  EXPECT_EQ(1U, counter->value());\n  counter = test_server_->counter(\"thrift.thrift_stats.response_success\");\n  EXPECT_EQ(1U, counter->value());\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/common/exception.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/framed_transport_impl.h\"\n#include \"extensions/filters/network/thrift_proxy/twitter_protocol_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nclass TestTwitterProtocolImpl : public TwitterProtocolImpl {\npublic:\n  void readRequestHeaderForTest(Buffer::Instance& buffer, MessageMetadata& metadata) {\n    ThriftObjectPtr thrift_obj = newHeader();\n    EXPECT_TRUE(thrift_obj->onData(buffer));\n    updateMetadataWithRequestHeader(*thrift_obj, metadata);\n  }\n  void readResponseHeaderForTest(Buffer::Instance& buffer, MessageMetadata& metadata) {\n    ThriftObjectPtr thrift_obj = newHeader();\n    EXPECT_TRUE(thrift_obj->onData(buffer));\n    updateMetadataWithResponseHeader(*thrift_obj, metadata);\n  }\n  void writeRequestHeaderForTest(Buffer::Instance& buffer, const MessageMetadata& metadata) {\n    writeRequestHeader(buffer, metadata);\n  }\n  void writeResponseHeaderForTest(Buffer::Instance& buffer, const MessageMetadata& metadata) {\n    writeResponseHeader(buffer, metadata);\n  }\n};\n\nclass TwitterProtocolTest : public testing::Test {\npublic:\n  void clearMetadata() { metadata_ = std::make_shared<MessageMetadata>(); }\n\n  void resetMetadata() {\n    clearMetadata();\n    metadata_->setMethodName(\"-\");\n    metadata_->setMessageType(MessageType::Oneway);\n    metadata_->setSequenceId(1);\n  }\n\n  void expectMetadata(const std::string& name, MessageType msg_type, int32_t seq_id) {\n    EXPECT_TRUE(metadata_->hasMethodName());\n    EXPECT_EQ(name, metadata_->methodName());\n\n    EXPECT_TRUE(metadata_->hasMessageType());\n    EXPECT_EQ(msg_type, metadata_->messageType());\n\n    EXPECT_TRUE(metadata_->hasSequenceId());\n    EXPECT_EQ(seq_id, metadata_->sequenceId());\n\n    EXPECT_FALSE(metadata_->hasFrameSize());\n    EXPECT_FALSE(metadata_->hasProtocol());\n    EXPECT_FALSE(metadata_->hasAppException());\n    EXPECT_EQ(metadata_->headers().size(), 0);\n  }\n\n  void addMessageStart(Buffer::Instance& buffer, const std::string& name = \"the_name\",\n                       MessageType msg_type = MessageType::Call, int32_t seq_id = 101) {\n    buffer.writeBEInt<int16_t>(0x8001);\n    buffer.writeByte(0);\n    buffer.writeByte(msg_type);\n    buffer.writeBEInt<int32_t>(name.length());\n    buffer.add(name);\n    buffer.writeBEInt<int32_t>(seq_id);\n  }\n\n  void addUpgradedMessageStart(Buffer::Instance& buffer, const std::string& name = \"the_name\",\n                               MessageType msg_type = MessageType::Call, int32_t seq_id = 101,\n                               int64_t trace_id = 1, int64_t span_id = 2,\n                               const std::string& client_id = \"test_client\") {\n    clearMetadata();\n\n    TestTwitterProtocolImpl proto;\n    metadata_->setTraceId(trace_id);\n    metadata_->setSpanId(span_id);\n    metadata_->headers().addCopy(Http::LowerCaseString(\":client-id\"), client_id);\n\n    proto.writeRequestHeaderForTest(buffer, *metadata_);\n    addMessageStart(buffer, name, msg_type, seq_id);\n\n    clearMetadata();\n  }\n\n  void addUpgradedReplyStart(Buffer::Instance& buffer, const std::string& name = \"the_name\",\n                             MessageType msg_type = MessageType::Reply, int32_t seq_id = 101,\n                             int64_t trace_id = 1, int64_t span_id = 2) {\n    clearMetadata();\n\n    TestTwitterProtocolImpl proto;\n\n    metadata_->mutableSpans().emplace_back(trace_id, \"\", span_id, absl::optional<int64_t>(),\n                                           AnnotationList(), BinaryAnnotationList(), false);\n    metadata_->headers().addCopy(Http::LowerCaseString(\"test-header\"), \"test-header-value\");\n\n    proto.writeResponseHeaderForTest(buffer, *metadata_);\n    addMessageStart(buffer, name, msg_type, seq_id);\n\n    clearMetadata();\n  }\n\n  void addUpgradeMessage(Buffer::Instance& buffer, int32_t seq_id = 100) {\n    addMessageStart(buffer, TwitterProtocolImpl::upgradeMethodName(), MessageType::Call, seq_id);\n  }\n\n  // Mutates the given TwitterProtocolImpl to be in the upgraded state for processing request\n  // (call/oneway) messages.\n  void upgradeRequestProto(TwitterProtocolImpl& proto) {\n    Buffer::OwnedImpl buffer;\n    clearMetadata();\n\n    addUpgradeMessage(buffer);\n    buffer.writeByte(0); // empty connection options\n\n    EXPECT_TRUE(proto.readMessageBegin(buffer, *metadata_));\n\n    DecoderEventHandlerSharedPtr decoder = proto.upgradeRequestDecoder();\n    EXPECT_NE(nullptr, decoder);\n\n    std::string name;\n    FieldType field_type;\n    int16_t field_id;\n\n    EXPECT_TRUE(proto.readStructBegin(buffer, name));\n    EXPECT_EQ(FilterStatus::Continue, decoder->structBegin(name));\n\n    EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n    EXPECT_EQ(FieldType::Stop, field_type);\n    EXPECT_EQ(FilterStatus::Continue, decoder->fieldBegin(name, field_type, field_id));\n\n    EXPECT_TRUE(proto.readStructEnd(buffer));\n    EXPECT_EQ(FilterStatus::Continue, decoder->structEnd());\n\n    EXPECT_TRUE(proto.readMessageEnd(buffer));\n    EXPECT_EQ(FilterStatus::Continue, decoder->messageEnd());\n    EXPECT_EQ(FilterStatus::Continue, decoder->transportEnd());\n    EXPECT_EQ(buffer.length(), 0);\n\n    DirectResponsePtr response = proto.upgradeResponse(*decoder);\n    EXPECT_NE(nullptr, response);\n    EXPECT_TRUE(proto.upgraded().has_value());\n    EXPECT_TRUE(proto.upgraded().value());\n  }\n\n  // Mutates the given TwitterProtocolImpl to be in the upgraded state for processing response\n  // (reply/exception) messages.\n  void upgradeResponseProto(TwitterProtocolImpl& proto) {\n    FramedTransportImpl transport;\n    ThriftConnectionState conn_state;\n    clearMetadata();\n\n    ThriftObjectPtr response_decoder;\n    {\n      Buffer::OwnedImpl buffer;\n\n      response_decoder = proto.attemptUpgrade(transport, conn_state, buffer);\n      EXPECT_NE(nullptr, response_decoder);\n    }\n\n    {\n      Buffer::OwnedImpl buffer;\n      buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length() + 13);\n      addSeq(buffer, {\n                         0x80, 0x01, 0x00, 0x02, // binary, reply\n                     });\n      buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length());\n      buffer.add(TwitterProtocolImpl::upgradeMethodName());\n      buffer.writeBEInt<int32_t>(0);\n      buffer.writeByte(0); // upgrade response stop field\n\n      EXPECT_TRUE(response_decoder->onData(buffer));\n    }\n\n    proto.completeUpgrade(conn_state, *response_decoder);\n\n    EXPECT_TRUE(conn_state.upgradeAttempted());\n    EXPECT_TRUE(conn_state.isUpgraded());\n    EXPECT_TRUE(proto.upgraded().has_value());\n    EXPECT_TRUE(proto.upgraded().value());\n  }\n\n  MessageMetadataSharedPtr metadata_{new MessageMetadata()};\n};\n\nTEST_F(TwitterProtocolTest, Name) {\n  TwitterProtocolImpl proto;\n  EXPECT_EQ(proto.name(), \"twitter\");\n}\n\nTEST_F(TwitterProtocolTest, Type) {\n  TwitterProtocolImpl proto;\n  EXPECT_EQ(proto.type(), ProtocolType::Twitter);\n}\n\n// Tests readMessageBegin with insufficient data.\nTEST_F(TwitterProtocolTest, ReadMessageBeginInsufficientData) {\n  TwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  resetMetadata();\n\n  addRepeated(buffer, 11, 'x');\n\n  EXPECT_FALSE(proto.readMessageBegin(buffer, *metadata_));\n  expectMetadata(\"-\", MessageType::Oneway, 1);\n  EXPECT_EQ(buffer.length(), 11);\n  EXPECT_FALSE(proto.upgraded().has_value());\n}\n\n// Tests readMessageBegin when the initial message does not upgrade to twitter protocol.\nTEST_F(TwitterProtocolTest, ReadMessageBeginNoUpgrade) {\n  TwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  resetMetadata();\n\n  addMessageStart(buffer);\n\n  EXPECT_TRUE(proto.readMessageBegin(buffer, *metadata_));\n  expectMetadata(\"the_name\", MessageType::Call, 101);\n  EXPECT_FALSE(metadata_->isProtocolUpgradeMessage());\n\n  EXPECT_EQ(buffer.length(), 0);\n  EXPECT_TRUE(proto.upgraded().has_value());\n  EXPECT_FALSE(proto.upgraded().value());\n\n  // Upgrade only works on first request attempt\n  addMessageStart(buffer, proto.upgradeMethodName());\n\n  EXPECT_TRUE(proto.readMessageBegin(buffer, *metadata_));\n  EXPECT_EQ(proto.upgradeMethodName(), metadata_->methodName());\n  EXPECT_FALSE(metadata_->isProtocolUpgradeMessage());\n  EXPECT_TRUE(proto.upgraded().has_value());\n  EXPECT_FALSE(proto.upgraded().value());\n}\n\n// Tests readMessageBegin when the initial message upgrades to twitter protocol\nTEST_F(TwitterProtocolTest, ReadMessageBeginWithUpgrade) {\n  TwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  resetMetadata();\n\n  addUpgradeMessage(buffer);\n\n  EXPECT_TRUE(proto.readMessageBegin(buffer, *metadata_));\n  expectMetadata(TwitterProtocolImpl::upgradeMethodName(), MessageType::Call, 100);\n  EXPECT_EQ(buffer.length(), 0);\n  EXPECT_FALSE(proto.upgraded().has_value());\n  EXPECT_TRUE(metadata_->isProtocolUpgradeMessage());\n}\n\n// Tests readMessageBegin/upgradeRequestDecoder/upgradeResponse sequence.\nTEST_F(TwitterProtocolTest, RequestUpgradeSequence) {\n  TwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  resetMetadata();\n\n  addUpgradeMessage(buffer);\n  buffer.writeByte(0); // empty connection options\n\n  EXPECT_TRUE(proto.readMessageBegin(buffer, *metadata_));\n  expectMetadata(TwitterProtocolImpl::upgradeMethodName(), MessageType::Call, 100);\n  EXPECT_EQ(buffer.length(), 1);\n  EXPECT_FALSE(proto.upgraded().has_value());\n  EXPECT_TRUE(proto.supportsUpgrade());\n\n  DecoderEventHandlerSharedPtr decoder = proto.upgradeRequestDecoder();\n  EXPECT_NE(nullptr, decoder);\n\n  std::string name;\n  FieldType field_type;\n  int16_t field_id;\n\n  EXPECT_TRUE(proto.readStructBegin(buffer, name));\n  EXPECT_EQ(FilterStatus::Continue, decoder->structBegin(name));\n\n  EXPECT_TRUE(proto.readFieldBegin(buffer, name, field_type, field_id));\n  EXPECT_EQ(FieldType::Stop, field_type);\n  EXPECT_EQ(FilterStatus::Continue, decoder->fieldBegin(name, field_type, field_id));\n\n  EXPECT_TRUE(proto.readStructEnd(buffer));\n  EXPECT_EQ(FilterStatus::Continue, decoder->structEnd());\n\n  EXPECT_TRUE(proto.readMessageEnd(buffer));\n  EXPECT_EQ(FilterStatus::Continue, decoder->messageEnd());\n  EXPECT_EQ(FilterStatus::Continue, decoder->transportEnd());\n  EXPECT_EQ(buffer.length(), 0);\n\n  DirectResponsePtr response = proto.upgradeResponse(*decoder);\n  EXPECT_NE(nullptr, response);\n\n  Buffer::OwnedImpl response_buffer;\n  EXPECT_EQ(DirectResponse::ResponseType::SuccessReply,\n            response->encode(*metadata_, proto, response_buffer));\n\n  Buffer::OwnedImpl expected_buffer;\n  addSeq(expected_buffer,\n         {\n             0x80,\n             0x01,\n             0x00,\n             0x02, // binary, reply\n             0x00,\n             0x00,\n             0x00,\n             static_cast<uint8_t>(TwitterProtocolImpl::upgradeMethodName().length()),\n         });\n  expected_buffer.add(TwitterProtocolImpl::upgradeMethodName());\n  addSeq(expected_buffer, {\n                              0x00, 0x00, 0x00, 0x64, // sequence number\n                              0x00,                   // upgrade response stop field\n                          });\n  EXPECT_EQ(expected_buffer.toString(), response_buffer.toString());\n\n  EXPECT_TRUE(proto.upgraded().has_value());\n  EXPECT_TRUE(proto.upgraded().value());\n}\n\n// Tests happy path attemptUpgrade/completeUpgrade sequence\nTEST_F(TwitterProtocolTest, ResponseUpgradeSequence) {\n  FramedTransportImpl transport;\n  TwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  ThriftConnectionState conn_state;\n  resetMetadata();\n\n  EXPECT_TRUE(proto.supportsUpgrade());\n\n  ThriftObjectPtr response_decoder = proto.attemptUpgrade(transport, conn_state, buffer);\n  EXPECT_FALSE(conn_state.upgradeAttempted());\n  EXPECT_FALSE(conn_state.isUpgraded());\n  EXPECT_NE(nullptr, response_decoder);\n\n  Buffer::OwnedImpl expected_buffer;\n  expected_buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length() + 13);\n  addSeq(expected_buffer, {\n                              0x80, 0x01, 0x00, 0x01, // binary, call\n                          });\n  expected_buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length());\n  expected_buffer.add(TwitterProtocolImpl::upgradeMethodName());\n  expected_buffer.writeBEInt<int32_t>(0);\n  expected_buffer.writeByte(0); // connection options stop field\n  EXPECT_EQ(expected_buffer.toString(), buffer.toString());\n\n  Buffer::OwnedImpl response_buffer;\n  response_buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length() + 13);\n  addSeq(response_buffer, {\n                              0x80, 0x01, 0x00, 0x02, // binary, reply\n                          });\n  response_buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length());\n  response_buffer.add(TwitterProtocolImpl::upgradeMethodName());\n  response_buffer.writeBEInt<int32_t>(0);\n  response_buffer.writeByte(0); // upgrade response stop field\n\n  EXPECT_TRUE(response_decoder->onData(response_buffer));\n\n  EXPECT_FALSE(conn_state.upgradeAttempted());\n  EXPECT_FALSE(conn_state.isUpgraded());\n  EXPECT_FALSE(proto.upgraded().has_value());\n\n  proto.completeUpgrade(conn_state, *response_decoder);\n\n  EXPECT_TRUE(conn_state.upgradeAttempted());\n  EXPECT_TRUE(conn_state.isUpgraded());\n  EXPECT_TRUE(proto.upgraded().has_value());\n  EXPECT_TRUE(proto.upgraded().value());\n\n  // Test that a subsequent upgrade attempt is skipped (since we've already upgraded)\n  {\n    TwitterProtocolImpl proto2;\n    EXPECT_FALSE(proto2.upgraded().has_value());\n\n    EXPECT_EQ(nullptr, proto2.attemptUpgrade(transport, conn_state, buffer));\n    EXPECT_TRUE(proto2.upgraded().has_value());\n    EXPECT_TRUE(proto2.upgraded().value());\n  }\n}\n\n// Tests rejection of the attemptUpgrade/completeUpgrade sequence\nTEST_F(TwitterProtocolTest, ResponseUpgradeRejectedSequence) {\n  FramedTransportImpl transport;\n  TwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  ThriftConnectionState conn_state;\n  resetMetadata();\n\n  EXPECT_TRUE(proto.supportsUpgrade());\n\n  ThriftObjectPtr response_decoder = proto.attemptUpgrade(transport, conn_state, buffer);\n  EXPECT_FALSE(conn_state.upgradeAttempted());\n  EXPECT_FALSE(conn_state.isUpgraded());\n  EXPECT_NE(nullptr, response_decoder);\n  EXPECT_NE(0, buffer.length());\n\n  Buffer::OwnedImpl response_buffer;\n  std::string response_err =\n      fmt::format(\"Unknown function {}\", TwitterProtocolImpl::upgradeMethodName());\n  response_buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length() +\n                                      response_err.length() + 27);\n  addSeq(response_buffer, {\n                              0x80, 0x01, 0x00, 0x03, // binary, exception\n                          });\n  response_buffer.writeBEInt<int32_t>(TwitterProtocolImpl::upgradeMethodName().length());\n  response_buffer.add(TwitterProtocolImpl::upgradeMethodName());\n  response_buffer.writeBEInt<int32_t>(0);\n  addSeq(response_buffer, {\n                              0x0B, 0x00, 0x01, // string field 1\n                          });\n  response_buffer.writeBEInt<int32_t>(response_err.length());\n  response_buffer.add(response_err);\n  addSeq(response_buffer,\n         {\n             0x08, 0x00, 0x02, // int field 2\n             0x00, 0x00, 0x00, static_cast<uint8_t>(AppExceptionType::UnknownMethod),\n             0x00, // stop field\n         });\n\n  EXPECT_TRUE(response_decoder->onData(response_buffer));\n\n  EXPECT_FALSE(conn_state.upgradeAttempted());\n  EXPECT_FALSE(conn_state.isUpgraded());\n  EXPECT_FALSE(proto.upgraded().has_value());\n\n  proto.completeUpgrade(conn_state, *response_decoder);\n\n  EXPECT_TRUE(conn_state.upgradeAttempted());\n  EXPECT_FALSE(conn_state.isUpgraded());\n  EXPECT_TRUE(proto.upgraded().has_value());\n  EXPECT_FALSE(proto.upgraded().value());\n}\n\n// Tests parsing a RequestHeader\nTEST_F(TwitterProtocolTest, ParseRequestHeader) {\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer,\n         {\n             0x0A, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // trace_id\n             0x0A, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, // span_id\n             0x0A, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, // parent_span_id\n             0x02, 0x00, 0x05, 0x01,                                           // sampled\n             0x0C, 0x00, 0x06,                                                 // client-id struct\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10,                         // string, length 16\n             0x74, 0x68, 0x72, 0x69, 0x66, 0x74, 0x2D, 0x63, 0x6C, 0x69, 0x65,\n             0x6E, 0x74, 0x2D, 0x69, 0x64,\n             0x00,                                                             // stop client-id\n             0x0A, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, // flags\n             0x0F, 0x00, 0x08, 0x0C, 0x00, 0x00, 0x00, 0x02,                   // contexts, size 2\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x6B, 0x31,             // key string\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x76, 0x31,             // value string\n             0x00,                                                             // stop context 1\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x6B, 0x32,             // key string\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x76, 0x32,             // value string\n             0x00,                                                             // stop context 2\n             0x0B, 0x00, 0x09, 0x00, 0x00, 0x00, 0x04, 0x64, 0x65, 0x73, 0x74, // dest\n             0x0F, 0x00, 0x0A, 0x0C, 0x00, 0x00, 0x00, 0x02,                   // delegations (2)\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x73, 0x31,             // src string\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x64, 0x31,             // dst string\n             0x00,                                                             // stop delegation 1\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x73, 0x32,             // src string\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x64, 0x32,             // dst string\n             0x00,                                                             // stop delegation 2\n             0x0A, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // trace_id_high\n             0x00,                                                             // stop\n         });\n\n  proto.readRequestHeaderForTest(buffer, *metadata_);\n\n  EXPECT_TRUE(metadata_->traceId());\n  EXPECT_EQ(1, *metadata_->traceId());\n  EXPECT_TRUE(metadata_->traceIdHigh());\n  EXPECT_EQ(2, *metadata_->traceIdHigh());\n  EXPECT_TRUE(metadata_->spanId());\n  EXPECT_EQ(100, *metadata_->spanId());\n  EXPECT_TRUE(metadata_->parentSpanId());\n  EXPECT_EQ(10, *metadata_->parentSpanId());\n  EXPECT_TRUE(metadata_->sampled().has_value());\n  EXPECT_TRUE(metadata_->sampled());\n  EXPECT_TRUE(metadata_->flags());\n  EXPECT_EQ(5, *metadata_->flags());\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(6, test_headers.size());\n\n  EXPECT_EQ(\"thrift-client-id\", test_headers.get_(\":client-id\"));\n  EXPECT_EQ(\"dest\", test_headers.get_(\":dest\"));\n\n  // Delegations\n  EXPECT_EQ(\"d1\", test_headers.get_(\":d:s1\"));\n  EXPECT_EQ(\"d2\", test_headers.get_(\":d:s2\"));\n\n  // Contexts\n  EXPECT_EQ(\"v1\", test_headers.get_(\"k1\"));\n  EXPECT_EQ(\"v2\", test_headers.get_(\"k2\"));\n}\n\n// Tests parsing an empty RequestHeader\nTEST_F(TwitterProtocolTest, ParseEmptyRequestHeader) {\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {\n                     0x00,\n                 });\n\n  proto.readRequestHeaderForTest(buffer, *metadata_);\n\n  // trace_id and span_id are not optional fields, so they get default values when missing\n  EXPECT_TRUE(metadata_->traceId());\n  EXPECT_EQ(0, *metadata_->traceId());\n  EXPECT_FALSE(metadata_->traceIdHigh());\n  EXPECT_TRUE(metadata_->spanId());\n  EXPECT_EQ(0, *metadata_->spanId());\n  EXPECT_FALSE(metadata_->parentSpanId());\n  EXPECT_FALSE(metadata_->sampled().has_value());\n  EXPECT_FALSE(metadata_->flags());\n  EXPECT_TRUE(metadata_->spans().empty());\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(0, test_headers.size());\n}\n\n// Test writing a RequestHeader\nTEST_F(TwitterProtocolTest, WriteRequestHeader) {\n  metadata_->setTraceId(1);\n  metadata_->setTraceIdHigh(2);\n  metadata_->setSpanId(100);\n  metadata_->setParentSpanId(10);\n  metadata_->setSampled(true);\n  metadata_->setFlags(5);\n  Http::HeaderMap& headers = metadata_->headers();\n  headers.addCopy(Http::LowerCaseString(\":client-id\"), \"thrift-client-id\");\n  headers.addCopy(Http::LowerCaseString(\":dest\"), \"dest\");\n  headers.addCopy(Http::LowerCaseString(\":d:s1\"), \"d1\");\n  headers.addCopy(Http::LowerCaseString(\"key\"), \"value\");\n  headers.addCopy(Http::LowerCaseString(\"\"), \"value\"); // ignored\n\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeRequestHeaderForTest(buffer, *metadata_);\n  clearMetadata();\n\n  proto.readRequestHeaderForTest(buffer, *metadata_);\n\n  EXPECT_EQ(1, *metadata_->traceId());\n  EXPECT_EQ(2, *metadata_->traceIdHigh());\n  EXPECT_EQ(100, *metadata_->spanId());\n  EXPECT_EQ(10, *metadata_->parentSpanId());\n  EXPECT_TRUE(*metadata_->sampled());\n  EXPECT_EQ(5, *metadata_->flags());\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(4, test_headers.size());\n  EXPECT_EQ(\"thrift-client-id\", test_headers.get_(\":client-id\"));\n  EXPECT_EQ(\"dest\", test_headers.get_(\":dest\"));\n  EXPECT_EQ(\"d1\", test_headers.get_(\":d:s1\"));\n  EXPECT_EQ(\"value\", test_headers.get_(\"key\"));\n}\n\n// Test writing a mostly empty RequestHeader\nTEST_F(TwitterProtocolTest, WriteMostlyEmptyRequestHeader) {\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeRequestHeaderForTest(buffer, *metadata_);\n  clearMetadata();\n\n  proto.readRequestHeaderForTest(buffer, *metadata_);\n\n  EXPECT_EQ(0, *metadata_->traceId());\n  EXPECT_EQ(0, *metadata_->spanId());\n\n  EXPECT_FALSE(metadata_->traceIdHigh());\n  EXPECT_FALSE(metadata_->parentSpanId());\n  EXPECT_FALSE(metadata_->sampled());\n  EXPECT_FALSE(metadata_->flags());\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(0, test_headers.size());\n}\n\n// Tests parsing of ResponseHeader structs\nTEST_F(TwitterProtocolTest, ParseResponseHeader) {\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer,\n         {\n             0x0F, 0x00, 0x01, 0x0C, 0x00, 0x00, 0x00, 0x02,                   // spans list, size 2\n             0x0A, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // trace_id\n             0x0B, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x73, 0x31,             // name\n             0x0A, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, // span_id\n             0x0A, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, // parent_span_id\n             0x0F, 0x00, 0x06, 0x0C, 0x00, 0x00, 0x00, 0x02,                   // annotations (2)\n             0x0A, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, // timestamp\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x61, 0x31,             // value\n             0x00,                                                             // stop annotation 1\n             0x0A, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x0D, 0x40, // timestamp\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x61, 0x32,             // value\n             0x0C, 0x00, 0x03,                                                 // endpoint struct\n             0x08, 0x00, 0x01, 0xC0, 0xA8, 0x00, 0x01,                         // ipv4\n             0x06, 0x00, 0x02, 0x1F, 0x40,                                     // port\n             0x0B, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x73, 0x65, 0x72, 0x76, // service_name\n             0x69, 0x63, 0x65,\n             0x00,                                                             // stop endpoint\n             0x00,                                                             // stop annotation 2\n             0x0F, 0x00, 0x08, 0x0C, 0x00, 0x00, 0x00, 0x02,                   // bin anno (2)\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x62, 0x61, 0x6B, 0x31, // key\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x62, 0x61, 0x76, 0x31, // value\n             0x08, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06,                         // annotation_type\n             0x00,                                                             // stop bi anno 1\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x62, 0x61, 0x6B, 0x32, // key\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x62, 0x61, 0x76, 0x32, // value\n             0x08, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06,                         // annotation_type\n             0x0C, 0x00, 0x04,                                                 // endpoint struct\n             0x08, 0x00, 0x01, 0xC0, 0xA8, 0x00, 0x02,                         // ipv4\n             0x06, 0x00, 0x02, 0x23, 0x28,                                     // port\n             0x00,                                                             // stop endpoint\n             0x00,                                                             // stop bin anno 2\n             0x02, 0x00, 0x09, 0x01,                                           // debug\n             0x00,                                                             // stop span 1\n             0x0A, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // trace_id\n             0x0B, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x73, 0x32,             // name\n             0x0A, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC8, // span_id\n             0x02, 0x00, 0x09, 0x00,                                           // debug\n             0x00,                                                             // stop span 2\n             0x0F, 0x00, 0x02, 0x0C, 0x00, 0x00, 0x00, 0x02,                   // contexts, size 2\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x6B, 0x31,             // key\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x76, 0x31,             // value\n             0x00,                                                             // stop context 1\n             0x0B, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x6B, 0x32,             // key\n             0x0B, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x76, 0x32,             // value\n             0x00,                                                             // stop context 2\n             0x00,                                                             // stop span 2\n         });\n\n  clearMetadata();\n  proto.readResponseHeaderForTest(buffer, *metadata_);\n\n  EXPECT_EQ(2, metadata_->spans().size());\n  {\n    const Span& span = metadata_->spans().front();\n    EXPECT_EQ(1, span.trace_id_);\n    EXPECT_EQ(\"s1\", span.name_);\n    EXPECT_EQ(100, span.span_id_);\n    EXPECT_TRUE(span.parent_span_id_);\n    EXPECT_EQ(10, *span.parent_span_id_);\n    EXPECT_EQ(2, span.annotations_.size());\n    {\n      const Annotation& anno = span.annotations_.front();\n      EXPECT_EQ(100000, anno.timestamp_);\n      EXPECT_EQ(\"a1\", anno.value_);\n      EXPECT_FALSE(anno.host_);\n    }\n    {\n      const Annotation& anno = span.annotations_.back();\n      EXPECT_EQ(200000, anno.timestamp_);\n      EXPECT_EQ(\"a2\", anno.value_);\n      EXPECT_TRUE(anno.host_);\n      EXPECT_EQ(0xC0A80001, static_cast<uint32_t>(anno.host_->ipv4_)); // 192.168.0.1\n      EXPECT_EQ(8000, anno.host_->port_);\n      EXPECT_EQ(\"service\", anno.host_->service_name_);\n    }\n    EXPECT_EQ(2, span.binary_annotations_.size());\n    {\n      const BinaryAnnotation& anno = span.binary_annotations_.front();\n      EXPECT_EQ(\"bak1\", anno.key_);\n      EXPECT_EQ(\"bav1\", anno.value_);\n      EXPECT_EQ(AnnotationType::String, anno.annotation_type_);\n    }\n    {\n      const BinaryAnnotation& anno = span.binary_annotations_.back();\n      EXPECT_EQ(\"bak2\", anno.key_);\n      EXPECT_EQ(\"bav2\", anno.value_);\n      EXPECT_EQ(AnnotationType::String, anno.annotation_type_);\n      EXPECT_TRUE(anno.host_);\n      EXPECT_EQ(0xC0A80002, static_cast<uint32_t>(anno.host_->ipv4_)); // 192.168.0.2\n      EXPECT_EQ(9000, anno.host_->port_);\n      EXPECT_EQ(\"\", anno.host_->service_name_);\n    }\n    EXPECT_TRUE(span.debug_);\n  }\n  {\n    const Span& span = metadata_->spans().back();\n    EXPECT_EQ(2, span.trace_id_);\n    EXPECT_EQ(\"s2\", span.name_);\n    EXPECT_EQ(200, span.span_id_);\n    EXPECT_FALSE(span.parent_span_id_);\n    EXPECT_TRUE(span.annotations_.empty());\n    EXPECT_TRUE(span.binary_annotations_.empty());\n    EXPECT_FALSE(span.debug_);\n  }\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(2, test_headers.size());\n  EXPECT_EQ(\"v1\", test_headers.get_(\"k1\"));\n  EXPECT_EQ(\"v2\", test_headers.get_(\"k2\"));\n}\n\n// Tests parsing of an empty ResponseHeader struct\nTEST_F(TwitterProtocolTest, ParseEmptyResponseHeader) {\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  addSeq(buffer, {\n                     0x00,\n                 });\n\n  proto.readResponseHeaderForTest(buffer, *metadata_);\n\n  EXPECT_TRUE(metadata_->spans().empty());\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(0, test_headers.size());\n}\n\n// Test writing a ResponseHeader\nTEST_F(TwitterProtocolTest, WriteResponseHeader) {\n  Http::HeaderMap& headers = metadata_->headers();\n  headers.addCopy(Http::LowerCaseString(\"key1\"), \"value1\");\n  headers.addCopy(Http::LowerCaseString(\"key2\"), \"value2\");\n\n  SpanList& spans = metadata_->mutableSpans();\n  spans.emplace_back(1, \"s1\", 100, absl::optional<int64_t>(10),\n                     AnnotationList({\n                         Annotation(100000, \"a1\", {Endpoint(0xC0A80001, 0, \"\")}),\n                         Annotation(100001, \"a2\", {}),\n                     }),\n                     BinaryAnnotationList({\n                         BinaryAnnotation(\"bak1\", \"bav1\", AnnotationType::I32,\n                                          {\n                                              Endpoint(0xC0A80002, 80, \"service_name\"),\n                                          }),\n                         BinaryAnnotation(\"bak2\", \"bav2\", AnnotationType::String, {}),\n                     }),\n                     true);\n  spans.emplace_back(2, \"s2\", 200, absl::optional<int64_t>(), AnnotationList(),\n                     BinaryAnnotationList(), false);\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeResponseHeaderForTest(buffer, *metadata_);\n\n  clearMetadata();\n  proto.readResponseHeaderForTest(buffer, *metadata_);\n\n  EXPECT_FALSE(metadata_->traceId());\n  EXPECT_FALSE(metadata_->traceIdHigh());\n  EXPECT_FALSE(metadata_->spanId());\n  EXPECT_FALSE(metadata_->parentSpanId());\n  EXPECT_FALSE(metadata_->sampled());\n  EXPECT_FALSE(metadata_->flags());\n\n  const SpanList& read_spans = metadata_->spans();\n  EXPECT_EQ(2, read_spans.size());\n\n  const Span& span1 = read_spans.front();\n  EXPECT_EQ(1, span1.trace_id_);\n  EXPECT_EQ(\"s1\", span1.name_);\n  EXPECT_EQ(100, span1.span_id_);\n  EXPECT_EQ(10, *span1.parent_span_id_);\n  EXPECT_EQ(2, span1.annotations_.size());\n  const Annotation& anno1 = span1.annotations_.front();\n  EXPECT_EQ(100000, anno1.timestamp_);\n  EXPECT_EQ(\"a1\", anno1.value_);\n  EXPECT_TRUE(anno1.host_);\n  EXPECT_EQ(0xC0A80001, anno1.host_->ipv4_);\n  EXPECT_EQ(0, anno1.host_->port_);\n  EXPECT_EQ(\"\", anno1.host_->service_name_);\n  EXPECT_EQ(2, span1.binary_annotations_.size());\n  const Annotation& anno2 = span1.annotations_.back();\n  EXPECT_EQ(100001, anno2.timestamp_);\n  EXPECT_EQ(\"a2\", anno2.value_);\n  EXPECT_FALSE(anno2.host_);\n  const BinaryAnnotation& bin_anno1 = span1.binary_annotations_.front();\n  EXPECT_EQ(\"bak1\", bin_anno1.key_);\n  EXPECT_EQ(\"bav1\", bin_anno1.value_);\n  EXPECT_EQ(AnnotationType::I32, bin_anno1.annotation_type_);\n  EXPECT_TRUE(bin_anno1.host_);\n  EXPECT_EQ(0xC0A80002, bin_anno1.host_->ipv4_);\n  EXPECT_EQ(80, bin_anno1.host_->port_);\n  EXPECT_EQ(\"service_name\", bin_anno1.host_->service_name_);\n  const BinaryAnnotation& bin_anno2 = span1.binary_annotations_.back();\n  EXPECT_EQ(\"bak2\", bin_anno2.key_);\n  EXPECT_EQ(\"bav2\", bin_anno2.value_);\n  EXPECT_EQ(AnnotationType::String, bin_anno2.annotation_type_);\n  EXPECT_FALSE(bin_anno2.host_);\n\n  const Span& span2 = read_spans.back();\n  EXPECT_EQ(2, span2.trace_id_);\n  EXPECT_EQ(\"s2\", span2.name_);\n  EXPECT_EQ(200, span2.span_id_);\n  EXPECT_FALSE(span2.parent_span_id_);\n  EXPECT_TRUE(span2.annotations_.empty());\n  EXPECT_TRUE(span2.binary_annotations_.empty());\n  EXPECT_FALSE(span2.debug_);\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(\"value1\", test_headers.get_(\"key1\"));\n  EXPECT_EQ(\"value2\", test_headers.get_(\"key2\"));\n}\n\n// Test writing an empty ResponseHeader\nTEST_F(TwitterProtocolTest, WriteEmptyResponseHeader) {\n  MessageMetadata metadata;\n  TestTwitterProtocolImpl proto;\n  Buffer::OwnedImpl buffer;\n  proto.writeResponseHeaderForTest(buffer, *metadata_);\n\n  clearMetadata();\n  proto.readResponseHeaderForTest(buffer, *metadata_);\n\n  EXPECT_FALSE(metadata_->traceId());\n  EXPECT_FALSE(metadata_->traceIdHigh());\n  EXPECT_FALSE(metadata_->spanId());\n  EXPECT_FALSE(metadata_->parentSpanId());\n  EXPECT_FALSE(metadata_->sampled());\n  EXPECT_FALSE(metadata_->flags());\n\n  EXPECT_TRUE(metadata_->spans().empty());\n\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(0, test_headers.size());\n}\n\nTEST_F(TwitterProtocolTest, TestUpgradedRequestMessageBegin) {\n  TwitterProtocolImpl proto;\n  upgradeRequestProto(proto);\n\n  Buffer::OwnedImpl buffer;\n  addUpgradedMessageStart(buffer);\n\n  MessageMetadata metadata;\n  EXPECT_TRUE(proto.readMessageBegin(buffer, *metadata_));\n  EXPECT_EQ(\"the_name\", metadata_->methodName());\n  EXPECT_EQ(MessageType::Call, metadata_->messageType());\n  EXPECT_EQ(101, metadata_->sequenceId());\n  EXPECT_EQ(1, *metadata_->traceId());\n  EXPECT_EQ(2, *metadata_->spanId());\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(\"test_client\", test_headers.get_(\":client-id\"));\n}\n\nTEST_F(TwitterProtocolTest, TestUpgradedRequestMessageContinuation) {\n  Buffer::OwnedImpl buffer;\n  addUpgradedMessageStart(buffer);\n\n  for (uint64_t split = 1; split < buffer.length() - 1; split++) {\n    TwitterProtocolImpl proto;\n    upgradeRequestProto(proto);\n\n    Buffer::OwnedImpl partial_buffer;\n    uint8_t* data = static_cast<uint8_t*>(buffer.linearize(buffer.length()));\n    partial_buffer.add(data, split);\n    EXPECT_FALSE(proto.readMessageBegin(partial_buffer, *metadata_));\n\n    partial_buffer.add(data + split, buffer.length() - split);\n    EXPECT_TRUE(proto.readMessageBegin(partial_buffer, *metadata_));\n\n    EXPECT_EQ(\"the_name\", metadata_->methodName());\n    EXPECT_EQ(MessageType::Call, metadata_->messageType());\n    EXPECT_EQ(101, metadata_->sequenceId());\n    EXPECT_EQ(1, *metadata_->traceId());\n    EXPECT_EQ(2, *metadata_->spanId());\n    Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n    EXPECT_EQ(\"test_client\", test_headers.get_(\":client-id\"));\n  }\n}\n\nTEST_F(TwitterProtocolTest, TestUpgradedReplyMessageBegin) {\n  TwitterProtocolImpl proto;\n  upgradeResponseProto(proto);\n\n  Buffer::OwnedImpl buffer;\n  addUpgradedReplyStart(buffer);\n\n  EXPECT_TRUE(proto.readMessageBegin(buffer, *metadata_));\n  EXPECT_EQ(\"the_name\", metadata_->methodName());\n  EXPECT_EQ(MessageType::Reply, metadata_->messageType());\n  EXPECT_EQ(101, metadata_->sequenceId());\n\n  EXPECT_EQ(1, metadata_->spans().size());\n  EXPECT_EQ(1, metadata_->spans().front().trace_id_);\n  EXPECT_EQ(2, metadata_->spans().front().span_id_);\n  Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n  EXPECT_EQ(\"test-header-value\", test_headers.get_(\"test-header\"));\n}\n\nTEST_F(TwitterProtocolTest, TestUpgradedReplyMessageContinuation) {\n  Buffer::OwnedImpl buffer;\n  addUpgradedReplyStart(buffer);\n\n  for (uint64_t split = 1; split < buffer.length() - 1; split++) {\n    TwitterProtocolImpl proto;\n    upgradeResponseProto(proto);\n\n    Buffer::OwnedImpl partial_buffer;\n    uint8_t* data = static_cast<uint8_t*>(buffer.linearize(buffer.length()));\n    partial_buffer.add(data, split);\n    EXPECT_FALSE(proto.readMessageBegin(partial_buffer, *metadata_));\n\n    partial_buffer.add(data + split, buffer.length() - split);\n    EXPECT_TRUE(proto.readMessageBegin(partial_buffer, *metadata_));\n\n    EXPECT_EQ(\"the_name\", metadata_->methodName());\n    EXPECT_EQ(MessageType::Reply, metadata_->messageType());\n    EXPECT_EQ(101, metadata_->sequenceId());\n\n    EXPECT_EQ(1, metadata_->spans().size());\n    EXPECT_EQ(1, metadata_->spans().front().trace_id_);\n    EXPECT_EQ(2, metadata_->spans().front().span_id_);\n    Http::TestRequestHeaderMapImpl test_headers(metadata_->headers());\n    EXPECT_EQ(\"test-header-value\", test_headers.get_(\"test-header\"));\n  }\n}\n\nTEST_F(TwitterProtocolTest, TestUpgradedWriteMessageBegin) {\n  TwitterProtocolImpl proto;\n  upgradeRequestProto(proto);\n\n  metadata_->setMethodName(\"message\");\n  metadata_->setSequenceId(1);\n  metadata_->setTraceId(1);\n  metadata_->mutableSpans().emplace_back(100, \"\", 100, absl::optional<int64_t>(), AnnotationList(),\n                                         BinaryAnnotationList(), false);\n\n  {\n    // Call\n    Buffer::OwnedImpl buffer;\n    metadata_->setMessageType(MessageType::Call);\n    proto.writeMessageBegin(buffer, *metadata_);\n\n    EXPECT_EQ(std::string(\"\\xA\\0\\x1\\0\\0\\0\\0\\0\\0\\0\\x1\" // trace_id\n                          \"\\xA\\0\\x2\\0\\0\\0\\0\\0\\0\\0\\0\"  // span_id\n                          \"\\0\"                        // end request header\n                          \"\\x80\\x1\\0\\x1\\0\\0\\0\\x7message\\0\\0\\0\\x1\",\n                          42),\n              buffer.toString());\n  }\n  {\n    // Oneway\n    Buffer::OwnedImpl buffer;\n    metadata_->setMessageType(MessageType::Oneway);\n    proto.writeMessageBegin(buffer, *metadata_);\n\n    EXPECT_EQ(std::string(\"\\xA\\0\\x1\\0\\0\\0\\0\\0\\0\\0\\x1\" // trace_id\n                          \"\\xA\\0\\x2\\0\\0\\0\\0\\0\\0\\0\\0\"  // span_id\n                          \"\\0\"                        // end request header\n                          \"\\x80\\x1\\0\\x4\\0\\0\\0\\x7message\\0\\0\\0\\x1\",\n                          42),\n              buffer.toString());\n  }\n\n  {\n    // Reply\n    Buffer::OwnedImpl buffer;\n    metadata_->setMessageType(MessageType::Reply);\n    proto.writeMessageBegin(buffer, *metadata_);\n\n    EXPECT_EQ(std::string(\"\\xF\\0\\x1\\xC\\0\\0\\0\\x1\"       // spans\n                          \"\\xA\\0\\x1\\0\\0\\0\\0\\0\\0\\0\\x64\" // span: trace_id\n                          \"\\xB\\0\\x3\\0\\0\\0\\0\"           // span: name\n                          \"\\xA\\0\\x4\\0\\0\\0\\0\\0\\0\\0\\x64\" // span: id\n                          \"\\xF\\0\\x6\\xC\\0\\0\\0\\0\"        // span: annotations\n                          \"\\xF\\0\\x8\\xC\\0\\0\\0\\0\"        // span: binary_annotations\n                          \"\\x2\\0\\x9\\0\"                 // span: debug\n                          \"\\0\"                         // end span\n                          \"\\0\"                         // end response header\n                          \"\\x80\\x1\\0\\x2\\0\\0\\0\\x7message\\0\\0\\0\\x1\",\n                          78),\n              buffer.toString());\n  }\n\n  {\n    // Exception\n    Buffer::OwnedImpl buffer;\n    metadata_->setMessageType(MessageType::Exception);\n    proto.writeMessageBegin(buffer, *metadata_);\n\n    EXPECT_EQ(std::string(\"\\xF\\0\\x1\\xC\\0\\0\\0\\x1\"       // spans\n                          \"\\xA\\0\\x1\\0\\0\\0\\0\\0\\0\\0\\x64\" // span: trace_id\n                          \"\\xB\\0\\x3\\0\\0\\0\\0\"           // span: name\n                          \"\\xA\\0\\x4\\0\\0\\0\\0\\0\\0\\0\\x64\" // span: id\n                          \"\\xF\\0\\x6\\xC\\0\\0\\0\\0\"        // span: annotations\n                          \"\\xF\\0\\x8\\xC\\0\\0\\0\\0\"        // span: binary_annotations\n                          \"\\x2\\0\\x9\\0\"                 // span: debug\n                          \"\\0\"                         // end span\n                          \"\\0\"                         // end response header\n                          \"\\x80\\x1\\0\\x3\\0\\0\\0\\x7message\\0\\0\\0\\x1\",\n                          78),\n              buffer.toString());\n  }\n}\n\n// Tests isUpgradePrefix\nTEST_F(TwitterProtocolTest, IsUpgradePrefix) {\n  EXPECT_EQ(27, TwitterProtocolImpl::upgradeMethodName().length());\n\n  // Doesn't start with magic bytes\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(12, 'x'));\n    EXPECT_FALSE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n\n  // Message name length too short\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\"\\x80\\x01xx\\0\\0\\0\\x1Axxxx\", 12));\n    EXPECT_FALSE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n\n  // Message name length too long\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\"\\x80\\x01xx\\0\\0\\0\\x1Cxxxx\", 12));\n    EXPECT_FALSE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n\n  // Message name doesn't match expected\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\"\\x80\\x01xx\\0\\0\\0\\x1Bname\", 12));\n    EXPECT_FALSE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n\n  // Message name doesn't match expected\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\"\\x80\\x01xx\\0\\0\\0\\x1B__caNOPE\", 16));\n    EXPECT_FALSE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n\n  // Minimal match\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\"\\x80\\x01xx\\0\\0\\0\\x1B__ca\", 12));\n    EXPECT_TRUE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n\n  // Complete match\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\"\\x80\\x01xx\\0\\0\\0\\x1B\", 8) + TwitterProtocolImpl::upgradeMethodName());\n    EXPECT_TRUE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n\n  // Extra data\n  {\n    Buffer::OwnedImpl buffer;\n    buffer.add(std::string(\"\\x80\\x01xx\\0\\0\\0\\x1B\", 8) + TwitterProtocolImpl::upgradeMethodName() +\n               \"xxx\");\n    EXPECT_TRUE(TwitterProtocolImpl::isUpgradePrefix(buffer));\n  }\n}\n\n} // Namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/unframed_transport_impl_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/thrift_proxy/unframed_transport_impl.h\"\n\n#include \"test/extensions/filters/network/thrift_proxy/utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\n\nTEST(UnframedTransportTest, Name) {\n  UnframedTransportImpl transport;\n  EXPECT_EQ(transport.name(), \"unframed\");\n}\n\nTEST(UnframedTransportTest, Type) {\n  UnframedTransportImpl transport;\n  EXPECT_EQ(transport.type(), TransportType::Unframed);\n}\n\nTEST(UnframedTransportTest, DecodeFrameStart) {\n  UnframedTransportImpl transport;\n\n  Buffer::OwnedImpl buffer;\n  buffer.writeBEInt<uint32_t>(0xDEADBEEF);\n  EXPECT_EQ(buffer.length(), 4);\n\n  MessageMetadata metadata;\n  EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, IsEmptyMetadata());\n  EXPECT_EQ(buffer.length(), 4);\n}\n\nTEST(UnframedTransportTest, DecodeFrameStartWithNoData) {\n  UnframedTransportImpl transport;\n\n  Buffer::OwnedImpl buffer;\n  MessageMetadata metadata;\n  EXPECT_FALSE(transport.decodeFrameStart(buffer, metadata));\n  EXPECT_THAT(metadata, IsEmptyMetadata());\n}\n\nTEST(UnframedTransportTest, DecodeFrameEnd) {\n  UnframedTransportImpl transport;\n\n  Buffer::OwnedImpl buffer;\n  EXPECT_TRUE(transport.decodeFrameEnd(buffer));\n}\n\nTEST(UnframedTransportTest, EncodeFrame) {\n  UnframedTransportImpl transport;\n\n  MessageMetadata metadata;\n\n  Buffer::OwnedImpl message;\n  message.add(\"fake message\");\n\n  Buffer::OwnedImpl buffer;\n  transport.encodeFrame(buffer, metadata, message);\n\n  EXPECT_EQ(0, message.length());\n  EXPECT_EQ(\"fake message\", buffer.toString());\n}\n\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/thrift_proxy/utility.h",
    "content": "#pragma once\n\n#include <initializer_list>\n\n#include \"envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/byte_order.h\"\n\n#include \"extensions/filters/network/thrift_proxy/thrift.h\"\n\n#include \"test/common/buffer/utility.h\"\n\n#include \"absl/strings/ascii.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::TestParamInfo;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ThriftProxy {\nnamespace {\n\nusing Envoy::Buffer::addRepeated; // NOLINT(misc-unused-using-decls)\nusing Envoy::Buffer::addSeq;      // NOLINT(misc-unused-using-decls)\n\ninline std::string fieldTypeToString(const FieldType& field_type) {\n  switch (field_type) {\n  case FieldType::Stop:\n    return \"Stop\";\n  case FieldType::Void:\n    return \"Void\";\n  case FieldType::Bool:\n    return \"Bool\";\n  case FieldType::Byte:\n    return \"Byte\";\n  case FieldType::Double:\n    return \"Double\";\n  case FieldType::I16:\n    return \"I16\";\n  case FieldType::I32:\n    return \"I32\";\n  case FieldType::I64:\n    return \"I64\";\n  case FieldType::String:\n    return \"String\";\n  case FieldType::Struct:\n    return \"Struct\";\n  case FieldType::Map:\n    return \"Map\";\n  case FieldType::Set:\n    return \"Set\";\n  case FieldType::List:\n    return \"List\";\n  default:\n    return \"UnknownFieldType\";\n  }\n}\n\ninline std::string fieldTypeParamToString(const TestParamInfo<FieldType>& params) {\n  return fieldTypeToString(params.param);\n}\n\ninline envoy::extensions::filters::network::thrift_proxy::v3::TransportType\ntransportTypeToProto(TransportType transport_type) {\n  switch (transport_type) {\n  case TransportType::Framed:\n    return envoy::extensions::filters::network::thrift_proxy::v3::FRAMED;\n  case TransportType::Unframed:\n    return envoy::extensions::filters::network::thrift_proxy::v3::UNFRAMED;\n  case TransportType::Header:\n    return envoy::extensions::filters::network::thrift_proxy::v3::HEADER;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\ninline envoy::extensions::filters::network::thrift_proxy::v3::ProtocolType\nprotocolTypeToProto(ProtocolType protocol_type) {\n  switch (protocol_type) {\n  case ProtocolType::Binary:\n    return envoy::extensions::filters::network::thrift_proxy::v3::BINARY;\n  case ProtocolType::Compact:\n    return envoy::extensions::filters::network::thrift_proxy::v3::COMPACT;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\ninline std::string transportNameForTest(TransportType transport_type) {\n  std::string name = TransportNames::get().fromType(transport_type);\n  name[0] = absl::ascii_toupper(name[0]);\n  return name;\n}\n\ninline std::string protocolNameForTest(ProtocolType protocol_type) {\n  std::string name = ProtocolNames::get().fromType(protocol_type);\n  name[0] = absl::ascii_toupper(name[0]);\n  return name;\n}\n\nMATCHER(IsEmptyMetadata, \"\") {\n  if (arg.hasFrameSize()) {\n    *result_listener << \"has a frame size of \" << arg.frameSize();\n    return false;\n  }\n  if (arg.hasProtocol()) {\n    *result_listener << \"has a protocol of \" << ProtocolNames::get().fromType(arg.protocol());\n    return false;\n  }\n  if (arg.hasMethodName()) {\n    *result_listener << \"has a method name of \" << arg.methodName();\n    return false;\n  }\n  if (arg.hasSequenceId()) {\n    *result_listener << \"has a sequence id \" << arg.sequenceId();\n    return false;\n  }\n  if (arg.hasMessageType()) {\n    *result_listener << \"has a message type of \" << static_cast<int>(arg.messageType());\n    return false;\n  }\n  if (arg.headers().size() > 0) {\n    *result_listener << \"has \" << arg.headers().size() << \" headers\";\n    return false;\n  }\n  if (arg.hasAppException()) {\n    *result_listener << \"has an app exception\";\n    return false;\n  }\n  return true;\n}\n\nMATCHER_P(HasOnlyFrameSize, n, \"\") {\n  return arg.hasFrameSize() && arg.frameSize() == n && !arg.hasProtocol() && !arg.hasMethodName() &&\n         !arg.hasSequenceId() && !arg.hasMessageType() && arg.headers().size() == 0 &&\n         !arg.hasAppException();\n}\n\nMATCHER_P(HasFrameSize, n, \"\") {\n  if (!arg.hasFrameSize()) {\n    *result_listener << \"has no frame size\";\n    return false;\n  }\n  *result_listener << \"has frame size = \" << arg.frameSize();\n  return arg.frameSize() == n;\n}\n\nMATCHER_P(HasProtocol, p, \"\") { return arg.hasProtocol() && arg.protocol() == p; }\nMATCHER_P(HasSequenceId, id, \"\") { return arg.hasSequenceId() && arg.sequenceId() == id; }\nMATCHER(HasNoHeaders, \"\") { return arg.headers().size() == 0; }\n\nMATCHER_P2(HasAppException, t, m, \"\") {\n  if (!arg.hasAppException()) {\n    *result_listener << \"has no exception\";\n    return false;\n  }\n\n  if (arg.appExceptionType() != t) {\n    *result_listener << \"has exception with type \" << static_cast<int>(arg.appExceptionType());\n    return false;\n  }\n\n  if (std::string(m) != arg.appExceptionMessage()) {\n    *result_listener << \"has exception with message \" << arg.appExceptionMessage();\n    return false;\n  }\n\n  return true;\n}\n\n} // namespace\n} // namespace ThriftProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//bazel:envoy_select.bzl\",\n    \"envoy_select_wasm\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/filters/network/wasm/test_data:test_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.filters.network.wasm\",\n    deps = [\n        \"//source/common/common:base64_lib\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/crypto:utility_lib\",\n        \"//source/extensions/common/crypto:utility_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/filters/network/wasm:config\",\n        \"//test/extensions/filters/network/wasm/test_data:test_cpp_plugin\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/wasm/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"wasm_filter_test\",\n    srcs = [\"wasm_filter_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/filters/network/wasm/test_data:logging_rust.wasm\",\n        \"//test/extensions/filters/network/wasm/test_data:test_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.filters.network.wasm\",\n    deps = [\n        \"//source/extensions/filters/network/wasm:wasm_filter_lib\",\n        \"//test/extensions/filters/network/wasm/test_data:test_cpp_plugin\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:wasm_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/common/hex.h\"\n#include \"common/crypto/utility.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/filters/network/wasm/config.h\"\n#include \"extensions/filters/network/wasm/wasm_filter.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Wasm {\n\nclass WasmNetworkFilterConfigTest : public testing::TestWithParam<std::string> {\nprotected:\n  WasmNetworkFilterConfigTest() : api_(Api::createApiForTest(stats_store_)) {\n    ON_CALL(context_, api()).WillByDefault(ReturnRef(*api_));\n    ON_CALL(context_, scope()).WillByDefault(ReturnRef(stats_store_));\n    ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_));\n    ON_CALL(context_, initManager()).WillByDefault(ReturnRef(init_manager_));\n    ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n    ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n  }\n\n  void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); }\n\n  void initializeForRemote() {\n    retry_timer_ = new Event::MockTimer();\n\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) {\n      retry_timer_cb_ = timer_cb;\n      return retry_timer_;\n    }));\n  }\n\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  envoy::config::core::v3::Metadata listener_metadata_;\n  Init::ManagerImpl init_manager_{\"init_manager\"};\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Event::MockTimer* retry_timer_;\n  Event::TimerCb retry_timer_cb_;\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\",\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\",\n#endif\n    \"null\");\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmNetworkFilterConfigTest, testing_values);\n\nTEST_P(WasmNetworkFilterConfigTest, YamlLoadFromFileWasm) {\n  if (GetParam() == \"null\") {\n    return;\n  }\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        local:\n          filename: \"{{ test_rundir }}/test/extensions/filters/network/wasm/test_data/test_cpp.wasm\"\n  )EOF\"));\n\n  envoy::extensions::filters::network::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\nTEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineWasm) {\n  const std::string code =\n      GetParam() != \"null\"\n          ? TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n                \"{{ test_rundir }}/test/extensions/filters/network/wasm/test_data/test_cpp.wasm\"))\n          : \"NetworkTestCpp\";\n  EXPECT_FALSE(code.empty());\n  const std::string yaml = absl::StrCat(R\"EOF(\n  config:\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                        GetParam(), R\"EOF(\"\n      code:\n        local: { inline_bytes: \")EOF\",\n                                        Base64::encode(code.data(), code.size()), R\"EOF(\" }\n  )EOF\");\n\n  envoy::extensions::filters::network::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_);\n  EXPECT_CALL(init_watcher_, ready());\n  context_.initManager().initialize(init_watcher_);\n  EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\nTEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineBadCode) {\n  const std::string yaml = absl::StrCat(R\"EOF(\n  config:\n    name: \"test\"\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                        GetParam(), R\"EOF(\"\n      code:\n        local: { inline_string: \"bad code\" }\n  )EOF\");\n\n  envoy::extensions::filters::network::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, context_),\n                            Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm network filter test\");\n}\n\nTEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineBadCodeFailOpenNackConfig) {\n  const std::string yaml = absl::StrCat(R\"EOF(\n  config:\n    name: \"test\"\n    fail_open: true\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                        GetParam(), R\"EOF(\"\n      code:\n        local: { inline_string: \"bad code\" }\n  )EOF\");\n\n  envoy::extensions::filters::network::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  WasmFilterConfig factory;\n  EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, context_),\n                            Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm network filter test\");\n}\n\nTEST_P(WasmNetworkFilterConfigTest, FilterConfigFailOpen) {\n  if (GetParam() == \"null\") {\n    return;\n  }\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n  config:\n    fail_open: true\n    vm_config:\n      runtime: \"envoy.wasm.runtime.)EOF\",\n                                                                    GetParam(), R\"EOF(\"\n      code:\n        local:\n          filename: \"{{ test_rundir }}/test/extensions/filters/network/wasm/test_data/test_cpp.wasm\"\n  )EOF\"));\n\n  envoy::extensions::filters::network::wasm::v3::Wasm proto_config;\n  TestUtility::loadFromYaml(yaml, proto_config);\n  NetworkFilters::Wasm::FilterConfig filter_config(proto_config, context_);\n  filter_config.wasm()->fail(proxy_wasm::FailState::RuntimeError, \"\");\n  EXPECT_EQ(filter_config.createFilter(), nullptr);\n}\n\n} // namespace Wasm\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\nload(\"//bazel/wasm:wasm.bzl\", \"envoy_wasm_cc_binary\", \"wasm_rust_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nwasm_rust_binary(\n    name = \"logging_rust.wasm\",\n    srcs = [\"logging_rust/src/lib.rs\"],\n    deps = [\n        \"//bazel/external/cargo:log\",\n        \"//bazel/external/cargo:proxy_wasm\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"test_cpp_plugin\",\n    srcs = [\n        \"test_cpp.cc\",\n        \"test_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//external:abseil_node_hash_map\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:c_smart_ptr_lib\",\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"test_cpp.wasm\",\n    srcs = [\"test_cpp.cc\"],\n    deps = [\n        \"@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/test_data/logging_rust/Cargo.toml",
    "content": "[package]\ndescription = \"Proxy-Wasm logging test\"\nname = \"logging_rust\"\nversion = \"0.0.1\"\nauthors = [\"Piotr Sikora <piotrsikora@google.com>\"]\nedition = \"2018\"\n\n[dependencies]\nproxy-wasm = \"0.1\"\nlog = \"0.4\"\n\n[lib]\ncrate-type = [\"cdylib\"]\npath = \"src/*.rs\"\n\n[profile.release]\nlto = true\nopt-level = 3\npanic = \"abort\"\n\n[raze]\nworkspace_path = \"//bazel/external/cargo\"\ngenmode = \"Remote\"\n\n[raze.crates.log.'0.4.11']\nadditional_flags = [\"--cfg=atomic_cas\"]\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/test_data/logging_rust/src/lib.rs",
    "content": "use log::trace;\nuse proxy_wasm::hostcalls;\nuse proxy_wasm::traits::{Context, StreamContext};\nuse proxy_wasm::types::*;\n\n#[no_mangle]\npub fn _start() {\n    proxy_wasm::set_log_level(LogLevel::Trace);\n    proxy_wasm::set_stream_context(|context_id, _| -> Box<dyn StreamContext> {\n        Box::new(TestStream { context_id })\n    });\n}\n\nstruct TestStream {\n    context_id: u32,\n}\n\nimpl Context for TestStream {}\n\nimpl StreamContext for TestStream {\n    fn on_new_connection(&mut self) -> Action {\n        trace!(\"onNewConnection {}\", self.context_id);\n        Action::Continue\n    }\n\n    fn on_downstream_data(&mut self, data_size: usize, end_of_stream: bool) -> Action {\n        if let Some(data) = self.get_downstream_data(0, data_size) {\n            trace!(\n                \"onDownstreamData {} len={} end_stream={}\\n{}\",\n                self.context_id,\n                data_size,\n                end_of_stream as u32,\n                String::from_utf8(data).unwrap()\n            );\n        }\n        hostcalls::set_buffer(BufferType::DownstreamData, 0, data_size, b\"write\").unwrap();\n        Action::Continue\n    }\n\n    fn on_upstream_data(&mut self, data_size: usize, end_of_stream: bool) -> Action {\n        if let Some(data) = self.get_upstream_data(0, data_size) {\n            trace!(\n                \"onUpstreamData {} len={} end_stream={}\\n{}\",\n                self.context_id,\n                data_size,\n                end_of_stream as u32,\n                String::from_utf8(data).unwrap()\n            );\n        }\n        Action::Continue\n    }\n\n    fn on_downstream_close(&mut self, peer_type: PeerType) {\n        trace!(\n            \"onDownstreamConnectionClose {} {}\",\n            self.context_id,\n            peer_type as u32,\n        );\n    }\n\n    fn on_upstream_close(&mut self, peer_type: PeerType) {\n        trace!(\n            \"onUpstreamConnectionClose {} {}\",\n            self.context_id,\n            peer_type as u32,\n        );\n    }\n}\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/test_data/test_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics.h\"\n#else\n#include \"include/proxy-wasm/null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(NetworkTestCpp)\n\nstatic int* badptr = nullptr;\n\nclass ExampleContext : public Context {\npublic:\n  explicit ExampleContext(uint32_t id, RootContext* root) : Context(id, root) {}\n\n  FilterStatus onNewConnection() override;\n  FilterStatus onDownstreamData(size_t data_length, bool end_stream) override;\n  FilterStatus onUpstreamData(size_t data_length, bool end_stream) override;\n  void onForeignFunction(uint32_t, uint32_t) override;\n  void onDownstreamConnectionClose(CloseType close_type) override;\n  void onUpstreamConnectionClose(CloseType close_type) override;\n};\nstatic RegisterContextFactory register_ExampleContext(CONTEXT_FACTORY(ExampleContext));\n\nFilterStatus ExampleContext::onNewConnection() {\n  logTrace(\"onNewConnection \" + std::to_string(id()));\n  return FilterStatus::Continue;\n}\n\nFilterStatus ExampleContext::onDownstreamData(size_t data_length, bool end_stream) {\n  WasmDataPtr data = getBufferBytes(WasmBufferType::NetworkDownstreamData, 0, data_length);\n  logTrace(\"onDownstreamData \" + std::to_string(id()) + \" len=\" + std::to_string(data_length) +\n           \" end_stream=\" + std::to_string(end_stream) + \"\\n\" + std::string(data->view()));\n  setBuffer(WasmBufferType::NetworkDownstreamData, 0, 5, \"write\");\n  return FilterStatus::Continue;\n}\n\nFilterStatus ExampleContext::onUpstreamData(size_t data_length, bool end_stream) {\n  WasmDataPtr data = getBufferBytes(WasmBufferType::NetworkUpstreamData, 0, data_length);\n  logTrace(\"onUpstreamData \" + std::to_string(id()) + \" len=\" + std::to_string(data_length) +\n           \" end_stream=\" + std::to_string(end_stream) + \"\\n\" + std::string(data->view()));\n  return FilterStatus::Continue;\n}\n\nvoid ExampleContext::onForeignFunction(uint32_t, uint32_t) {\n  logTrace(\"before segv\");\n  *badptr = 1;\n  logTrace(\"after segv\");\n}\n\nvoid ExampleContext::onDownstreamConnectionClose(CloseType close_type) {\n  logTrace(\"onDownstreamConnectionClose \" + std::to_string(id()) + \" \" +\n           std::to_string(static_cast<uint32_t>(close_type)));\n}\n\nvoid ExampleContext::onUpstreamConnectionClose(CloseType close_type) {\n  logTrace(\"onUpstreamConnectionClose \" + std::to_string(id()) + \" \" +\n           std::to_string(static_cast<uint32_t>(close_type)));\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/test_data/test_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace NetworkTestCpp {\nNullPluginRegistry* context_registry_;\n} // namespace NetworkTestCpp\n\nRegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin(\"NetworkTestCpp\", []() {\n  return std::make_unique<NullPlugin>(NetworkTestCpp::context_registry_);\n});\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/filters/network/wasm/wasm_filter_test.cc",
    "content": "#include \"envoy/server/lifecycle_notifier.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/filters/network/wasm/wasm_filter.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/wasm_base.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Eq;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace Wasm {\n\nusing Envoy::Extensions::Common::Wasm::Context;\nusing Envoy::Extensions::Common::Wasm::Plugin;\nusing Envoy::Extensions::Common::Wasm::PluginSharedPtr;\nusing Envoy::Extensions::Common::Wasm::Wasm;\nusing proxy_wasm::ContextBase;\n\nclass TestFilter : public Context {\npublic:\n  TestFilter(Wasm* wasm, uint32_t root_context_id, PluginSharedPtr plugin)\n      : Context(wasm, root_context_id, plugin) {}\n  MOCK_CONTEXT_LOG_;\n\n  void testClose() { onCloseTCP(); }\n};\n\nclass TestRoot : public Context {\npublic:\n  TestRoot(Wasm* wasm, const std::shared_ptr<Plugin>& plugin) : Context(wasm, plugin) {}\n  MOCK_CONTEXT_LOG_;\n};\n\nclass WasmNetworkFilterTest : public Common::Wasm::WasmNetworkFilterTestBase<\n                                  testing::TestWithParam<std::tuple<std::string, std::string>>> {\npublic:\n  WasmNetworkFilterTest() = default;\n  ~WasmNetworkFilterTest() override = default;\n\n  void setupConfig(const std::string& code, std::string vm_configuration, bool fail_open = false) {\n    if (code.empty()) {\n      setupWasmCode(vm_configuration);\n    } else {\n      code_ = code;\n    }\n    setupBase(\n        std::get<0>(GetParam()), code_,\n        [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n          return new TestRoot(wasm, plugin);\n        },\n        \"\" /* root_id */, \"\" /* vm_configuration */, fail_open);\n  }\n\n  void setupFilter() { setupFilterBase<TestFilter>(\"\"); }\n\n  TestFilter& filter() { return *static_cast<TestFilter*>(context_.get()); }\n\nprivate:\n  void setupWasmCode(std::string vm_configuration) {\n    if (std::get<0>(GetParam()) == \"null\") {\n      code_ = \"NetworkTestCpp\";\n    } else {\n      if (std::get<1>(GetParam()) == \"cpp\") {\n        code_ = TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath(\n            \"test/extensions/filters/network/wasm/test_data/test_cpp.wasm\"));\n      } else {\n        code_ = TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath(absl::StrCat(\n            \"test/extensions/filters/network/wasm/test_data/\", vm_configuration + \"_rust.wasm\")));\n      }\n    }\n    EXPECT_FALSE(code_.empty());\n  }\n\nprotected:\n  std::string code_;\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    std::make_tuple(\"v8\", \"cpp\"), std::make_tuple(\"v8\", \"rust\"),\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    std::make_tuple(\"wavm\", \"cpp\"), std::make_tuple(\"wavm\", \"rust\"),\n#endif\n    std::make_tuple(\"null\", \"cpp\"));\nINSTANTIATE_TEST_SUITE_P(RuntimesAndLanguages, WasmNetworkFilterTest, testing_values);\n\n// Bad code in initial config.\nTEST_P(WasmNetworkFilterTest, BadCode) {\n  setupConfig(\"bad code\", \"\");\n  EXPECT_EQ(wasm_, nullptr);\n  setupFilter();\n  filter().isFailed();\n  EXPECT_CALL(read_filter_callbacks_.connection_,\n              close(Envoy::Network::ConnectionCloseType::FlushWrite));\n  EXPECT_EQ(Network::FilterStatus::StopIteration, filter().onNewConnection());\n}\n\nTEST_P(WasmNetworkFilterTest, BadCodeFailOpen) {\n  setupConfig(\"bad code\", \"\", true);\n  EXPECT_EQ(wasm_, nullptr);\n  setupFilter();\n  filter().isFailed();\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection());\n}\n\n// Test happy path.\nTEST_P(WasmNetworkFilterTest, HappyPath) {\n  setupConfig(\"\", \"logging\");\n  setupFilter();\n\n  EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view(\"onNewConnection 2\"))));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection());\n\n  Buffer::OwnedImpl fake_downstream_data(\"Fake\");\n  EXPECT_CALL(filter(), log_(spdlog::level::trace,\n                             Eq(absl::string_view(\"onDownstreamData 2 len=4 end_stream=0\\nFake\"))));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onData(fake_downstream_data, false));\n  EXPECT_EQ(fake_downstream_data.toString(), \"write\");\n\n  Buffer::OwnedImpl fake_upstream_data(\"Done\");\n  EXPECT_CALL(filter(), log_(spdlog::level::trace,\n                             Eq(absl::string_view(\"onUpstreamData 2 len=4 end_stream=1\\nDone\"))));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::trace, Eq(absl::string_view(\"onUpstreamConnectionClose 2 0\"))));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onWrite(fake_upstream_data, true));\n  filter().onAboveWriteBufferHighWatermark();\n  filter().onBelowWriteBufferLowWatermark();\n\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::trace, Eq(absl::string_view(\"onDownstreamConnectionClose 2 1\"))));\n  read_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite);\n  // Noop.\n  read_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite);\n  filter().testClose();\n}\n\nTEST_P(WasmNetworkFilterTest, CloseDownstreamFirst) {\n  setupConfig(\"\", \"logging\");\n  setupFilter();\n\n  EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view(\"onNewConnection 2\"))));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection());\n\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::trace, Eq(absl::string_view(\"onDownstreamConnectionClose 2 1\"))));\n  write_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite);\n  read_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite);\n}\n\nTEST_P(WasmNetworkFilterTest, CloseStream) {\n  setupConfig(\"\", \"logging\");\n  setupFilter();\n\n  // No Context, does nothing.\n  filter().onEvent(Network::ConnectionEvent::RemoteClose);\n  Buffer::OwnedImpl fake_upstream_data(\"Done\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onWrite(fake_upstream_data, true));\n  Buffer::OwnedImpl fake_downstream_data(\"Fake\");\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onData(fake_downstream_data, false));\n\n  // Create context.\n  EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view(\"onNewConnection 2\"))));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection());\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::trace, Eq(absl::string_view(\"onDownstreamConnectionClose 2 1\"))));\n  EXPECT_CALL(filter(),\n              log_(spdlog::level::trace, Eq(absl::string_view(\"onDownstreamConnectionClose 2 2\"))));\n\n  filter().onEvent(static_cast<Network::ConnectionEvent>(9999)); // Does nothing.\n  filter().onEvent(Network::ConnectionEvent::RemoteClose);\n  filter().closeStream(proxy_wasm::WasmStreamType::Downstream);\n  filter().closeStream(proxy_wasm::WasmStreamType::Upstream);\n}\n\nTEST_P(WasmNetworkFilterTest, SegvFailOpen) {\n  if (std::get<0>(GetParam()) != \"v8\" || std::get<1>(GetParam()) != \"cpp\") {\n    return;\n  }\n  setupConfig(\"\", \"logging\", true);\n  EXPECT_TRUE(plugin_->fail_open_);\n  setupFilter();\n\n  EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view(\"onNewConnection 2\"))));\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection());\n\n  EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view(\"before segv\"))));\n  filter().onForeignFunction(0, 0);\n  EXPECT_TRUE(wasm_->wasm()->isFailed());\n\n  Buffer::OwnedImpl fake_downstream_data(\"Fake\");\n  // No logging expected.\n  EXPECT_EQ(Network::FilterStatus::Continue, filter().onData(fake_downstream_data, false));\n}\n\n} // namespace Wasm\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/zookeeper_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"filter_test\",\n    srcs = [\n        \"filter_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.zookeeper_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/zookeeper_proxy:config\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\n        \"config_test.cc\",\n    ],\n    extension_name = \"envoy.filters.network.zookeeper_proxy\",\n    deps = [\n        \"//source/extensions/filters/network/zookeeper_proxy:config\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/network/zookeeper_proxy/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/zookeeper_proxy/config.h\"\n\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\nusing ZooKeeperProxyProtoConfig =\n    envoy::extensions::filters::network::zookeeper_proxy::v3::ZooKeeperProxy;\n\nTEST(ZookeeperFilterConfigTest, ValidateFail) {\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  EXPECT_THROW(\n      ZooKeeperConfigFactory().createFilterFactoryFromProto(ZooKeeperProxyProtoConfig(), context),\n      ProtoValidationException);\n}\n\nTEST(ZookeeperFilterConfigTest, InvalidStatPrefix) {\n  const std::string yaml = R\"EOF(\nstat_prefix: \"\"\n  )EOF\";\n\n  ZooKeeperProxyProtoConfig proto_config;\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException);\n}\n\nTEST(ZookeeperFilterConfigTest, InvalidMaxPacketBytes) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test_prefix\nmax_packet_bytes: -1\n  )EOF\";\n\n  ZooKeeperProxyProtoConfig proto_config;\n  EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), EnvoyException);\n}\n\nTEST(ZookeeperFilterConfigTest, SimpleConfig) {\n  const std::string yaml = R\"EOF(\nstat_prefix: test_prefix\n  )EOF\";\n\n  ZooKeeperProxyProtoConfig proto_config;\n  TestUtility::loadFromYamlAndValidate(yaml, proto_config, false, true);\n\n  testing::NiceMock<Server::Configuration::MockFactoryContext> context;\n  ZooKeeperConfigFactory factory;\n\n  Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context);\n  Network::MockConnection connection;\n  EXPECT_CALL(connection, addFilter(_));\n  cb(connection);\n}\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/network/zookeeper_proxy/filter_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/zookeeper_proxy/decoder.h\"\n#include \"extensions/filters/network/zookeeper_proxy/filter.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace NetworkFilters {\nnamespace ZooKeeperProxy {\n\nbool protoMapEq(const ProtobufWkt::Struct& obj, const std::map<std::string, std::string>& rhs) {\n  EXPECT_TRUE(!rhs.empty());\n  for (auto const& entry : rhs) {\n    EXPECT_EQ(obj.fields().at(entry.first).string_value(), entry.second);\n  }\n  return true;\n}\n\nMATCHER_P(MapEq, rhs, \"\") { return protoMapEq(arg, rhs); }\n\nclass ZooKeeperFilterTest : public testing::Test {\npublic:\n  ZooKeeperFilterTest() { ENVOY_LOG_MISC(info, \"test\"); }\n\n  void initialize() {\n    config_ = std::make_shared<ZooKeeperFilterConfig>(stat_prefix_, 1048576, scope_);\n    filter_ = std::make_unique<ZooKeeperFilter>(config_, time_system_);\n    filter_->initializeReadFilterCallbacks(filter_callbacks_);\n  }\n\n  Buffer::OwnedImpl encodeConnect(const bool readonly = false, const uint64_t zxid = 100,\n                                  const uint32_t session_timeout = 10,\n                                  const uint32_t session_id = 200,\n                                  const std::string& passwd = \"\") const {\n    Buffer::OwnedImpl buffer;\n    const uint32_t message_size = readonly ? 28 + passwd.length() + 1 : 28 + passwd.length();\n\n    buffer.writeBEInt<uint32_t>(message_size);\n    buffer.writeBEInt<uint32_t>(0); // Protocol version.\n    buffer.writeBEInt<uint64_t>(zxid);\n    buffer.writeBEInt<uint32_t>(session_timeout);\n    buffer.writeBEInt<uint64_t>(session_id);\n    addString(buffer, passwd);\n\n    if (readonly) {\n      const char readonly_flag = 0b1;\n      buffer.add(std::string(1, readonly_flag));\n    }\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeConnectResponse(const bool readonly = false,\n                                          const uint32_t session_timeout = 10,\n                                          const uint32_t session_id = 200,\n                                          const std::string& passwd = \"\") const {\n    Buffer::OwnedImpl buffer;\n    const uint32_t message_size = readonly ? 20 + passwd.length() + 1 : 20 + passwd.length();\n\n    buffer.writeBEInt<uint32_t>(message_size);\n    buffer.writeBEInt<uint32_t>(0); // Protocol version.\n    buffer.writeBEInt<uint32_t>(session_timeout);\n    buffer.writeBEInt<uint64_t>(session_id);\n    addString(buffer, passwd);\n\n    if (readonly) {\n      const char readonly_flag = 0b1;\n      buffer.add(std::string(1, readonly_flag));\n    }\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeResponseHeader(const int32_t xid, const int64_t zxid,\n                                         const int32_t error) const {\n    Buffer::OwnedImpl buffer;\n    const uint32_t message_size = 16;\n\n    buffer.writeBEInt<uint32_t>(message_size);\n    buffer.writeBEInt<uint32_t>(xid);\n    buffer.writeBEInt<uint64_t>(zxid);\n    buffer.writeBEInt<uint32_t>(error);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeWatchEvent(const std::string& path, const int32_t event_type,\n                                     const int32_t client_state) const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<uint32_t>(28 + path.size());\n    buffer.writeBEInt<uint32_t>(enumToSignedInt(XidCodes::WatchXid));\n    buffer.writeBEInt<uint64_t>(1000);\n    buffer.writeBEInt<uint32_t>(0);\n    buffer.writeBEInt<uint32_t>(event_type);\n    buffer.writeBEInt<uint32_t>(client_state);\n    addString(buffer, path);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeBadMessage() const {\n    Buffer::OwnedImpl buffer;\n\n    // Bad length.\n    buffer.writeBEInt<uint32_t>(1);\n    // Trailing int.\n    buffer.writeBEInt<uint32_t>(3);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeTooBigMessage() const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<uint32_t>(1048577);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeBiggerThanLengthMessage() const {\n    Buffer::OwnedImpl buffer;\n\n    // Craft a delete request with a path that's longer than\n    // the declared message length.\n    buffer.writeBEInt<int32_t>(50);\n    buffer.writeBEInt<int32_t>(1000);\n    // Opcode.\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::Delete));\n    // Path.\n    addString(buffer, std::string(2 * 1024 * 1024, '*'));\n    // Version.\n    buffer.writeBEInt<int32_t>(-1);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodePing() const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<uint32_t>(8);\n    buffer.writeBEInt<int32_t>(enumToSignedInt(XidCodes::PingXid));\n    buffer.writeBEInt<uint32_t>(enumToInt(OpCodes::Ping));\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeUnknownOpcode() const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<uint32_t>(8);\n    buffer.writeBEInt<int32_t>(1000);\n    buffer.writeBEInt<uint32_t>(200);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeCloseRequest() const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<uint32_t>(8);\n    buffer.writeBEInt<int32_t>(1000);\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::Close));\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeAuth(const std::string& scheme) const {\n    const std::string credential = \"p@sswd\";\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<uint32_t>(28 + scheme.length() + credential.length());\n    buffer.writeBEInt<int32_t>(enumToSignedInt(XidCodes::AuthXid));\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::SetAuth));\n    // Type.\n    buffer.writeBEInt<int32_t>(0);\n    addString(buffer, scheme);\n    addString(buffer, credential);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl\n  encodePathWatch(const std::string& path, const bool watch,\n                  const int32_t opcode = enumToSignedInt(OpCodes::GetData)) const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(13 + path.length());\n    buffer.writeBEInt<int32_t>(1000);\n    // Opcode.\n    buffer.writeBEInt<int32_t>(opcode);\n    // Path.\n    addString(buffer, path);\n    // Watch.\n    const char watch_flag = watch ? 0b1 : 0b0;\n    buffer.add(std::string(1, watch_flag));\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodePathVersion(const std::string& path, const int32_t version,\n                                      const int32_t opcode = enumToSignedInt(OpCodes::GetData),\n                                      const bool txn = false) const {\n    Buffer::OwnedImpl buffer;\n\n    if (!txn) {\n      buffer.writeBEInt<int32_t>(16 + path.length());\n      buffer.writeBEInt<int32_t>(1000);\n      buffer.writeBEInt<int32_t>(opcode);\n    }\n\n    // Path.\n    addString(buffer, path);\n    // Version\n    buffer.writeBEInt<int32_t>(version);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodePath(const std::string& path, const int32_t opcode) const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(8 + path.length());\n    buffer.writeBEInt<int32_t>(1000);\n    // Opcode.\n    buffer.writeBEInt<int32_t>(opcode);\n    // Path.\n    addString(buffer, path);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodePathLongerThanBuffer(const std::string& path,\n                                               const int32_t opcode) const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(8 + path.length());\n    buffer.writeBEInt<int32_t>(1000);\n    buffer.writeBEInt<int32_t>(opcode);\n    buffer.writeBEInt<uint32_t>(path.length() * 2);\n    buffer.add(path);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl\n  encodeCreateRequest(const std::string& path, const std::string& data, const CreateFlags flags,\n                      const bool txn = false,\n                      const int32_t opcode = enumToSignedInt(OpCodes::Create)) const {\n    Buffer::OwnedImpl buffer;\n\n    if (!txn) {\n      buffer.writeBEInt<int32_t>(24 + path.length() + data.length());\n      buffer.writeBEInt<int32_t>(1000);\n      buffer.writeBEInt<int32_t>(opcode);\n    }\n\n    // Path.\n    addString(buffer, path);\n    // Data.\n    addString(buffer, data);\n    // Acls.\n    buffer.writeBEInt<int32_t>(0);\n    // Flags.\n    buffer.writeBEInt<int32_t>(static_cast<int32_t>(flags));\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeSetRequest(const std::string& path, const std::string& data,\n                                     const int32_t version, const bool txn = false) const {\n    Buffer::OwnedImpl buffer;\n\n    if (!txn) {\n      buffer.writeBEInt<int32_t>(20 + path.length() + data.length());\n      buffer.writeBEInt<int32_t>(1000);\n      buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::SetData));\n    }\n\n    // Path.\n    addString(buffer, path);\n    // Data.\n    addString(buffer, data);\n    // Version.\n    buffer.writeBEInt<int32_t>(version);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeDeleteRequest(const std::string& path, const int32_t version) const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(16 + path.length());\n    buffer.writeBEInt<int32_t>(1000);\n    // Opcode.\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::Delete));\n    // Path.\n    addString(buffer, path);\n    // Version.\n    buffer.writeBEInt<int32_t>(version);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeSetAclRequest(const std::string& path, const std::string& scheme,\n                                        const std::string& credential,\n                                        const int32_t version) const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(32 + path.length() + scheme.length() + credential.length());\n    buffer.writeBEInt<int32_t>(1000);\n    // Opcode.\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::SetAcl));\n    // Path.\n    addString(buffer, path);\n\n    // Acls.\n    buffer.writeBEInt<int32_t>(1);\n    // Type.\n    buffer.writeBEInt<int32_t>(0);\n    // Scheme.\n    addString(buffer, scheme);\n    // Credential.\n    addString(buffer, credential);\n\n    // Version.\n    buffer.writeBEInt<int32_t>(version);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeReconfigRequest(const std::string& joining, const std::string& leaving,\n                                          const std::string& new_members, int64_t config_id) const {\n    Buffer::OwnedImpl buffer;\n\n    buffer.writeBEInt<int32_t>(28 + joining.length() + leaving.length() + new_members.length());\n    buffer.writeBEInt<int32_t>(1000);\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::Reconfig));\n    addString(buffer, joining);\n    addString(buffer, leaving);\n    addString(buffer, new_members);\n    buffer.writeBEInt<int64_t>(config_id);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl encodeSetWatchesRequest(const std::vector<std::string>& dataw,\n                                            const std::vector<std::string>& existw,\n                                            const std::vector<std::string>& childw,\n                                            int32_t xid = 1000) const {\n    Buffer::OwnedImpl buffer;\n    Buffer::OwnedImpl watches_buffer;\n\n    addStrings(watches_buffer, dataw);\n    addStrings(watches_buffer, existw);\n    addStrings(watches_buffer, childw);\n\n    buffer.writeBEInt<int32_t>(8 + watches_buffer.length());\n    buffer.writeBEInt<int32_t>(xid);\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::SetWatches));\n    buffer.add(watches_buffer);\n\n    return buffer;\n  }\n\n  Buffer::OwnedImpl\n  encodeMultiRequest(const std::vector<std::pair<int32_t, Buffer::OwnedImpl>>& ops) const {\n    Buffer::OwnedImpl buffer;\n    Buffer::OwnedImpl requests;\n\n    for (const auto& op_pair : ops) {\n      // Header.\n      requests.writeBEInt<int32_t>(op_pair.first);\n      requests.add(std::string(1, 0b0));\n      requests.writeBEInt<int32_t>(-1);\n\n      // Payload.\n      requests.add(op_pair.second);\n    }\n\n    // Done header.\n    requests.writeBEInt<int32_t>(-1);\n    requests.add(std::string(1, 0b1));\n    requests.writeBEInt<int32_t>(-1);\n\n    // Multi prefix.\n    buffer.writeBEInt<int32_t>(8 + requests.length());\n    buffer.writeBEInt<int32_t>(1000);\n    buffer.writeBEInt<int32_t>(enumToSignedInt(OpCodes::Multi));\n\n    // Requests.\n    buffer.add(requests);\n\n    return buffer;\n  }\n\n  void addString(Buffer::OwnedImpl& buffer, const std::string& str) const {\n    buffer.writeBEInt<uint32_t>(str.length());\n    buffer.add(str);\n  }\n\n  void addStrings(Buffer::OwnedImpl& buffer, const std::vector<std::string>& watches) const {\n    buffer.writeBEInt<uint32_t>(watches.size());\n\n    for (const auto& watch : watches) {\n      addString(buffer, watch);\n    }\n  }\n\n  using StrStrMap = std::map<std::string, std::string>;\n\n  void expectSetDynamicMetadata(const std::vector<StrStrMap>& values) {\n    EXPECT_CALL(filter_callbacks_.connection_, streamInfo())\n        .WillRepeatedly(ReturnRef(stream_info_));\n\n    auto& call = EXPECT_CALL(stream_info_, setDynamicMetadata(_, _));\n\n    for (const auto& value : values) {\n      call.WillOnce(Invoke([value](const std::string& key, const ProtobufWkt::Struct& obj) -> void {\n        EXPECT_STREQ(key.c_str(), \"envoy.filters.network.zookeeper_proxy\");\n        protoMapEq(obj, value);\n      }));\n    }\n  }\n\n  void testCreate(CreateFlags flags, const OpCodes opcode = OpCodes::Create) {\n    initialize();\n    Buffer::OwnedImpl data =\n        encodeCreateRequest(\"/foo\", \"bar\", flags, false, enumToSignedInt(opcode));\n    std::string opname = \"create\";\n\n    switch (opcode) {\n    case OpCodes::CreateContainer:\n      opname = \"createcontainer\";\n      break;\n    case OpCodes::CreateTtl:\n      opname = \"createttl\";\n      break;\n    default:\n      break;\n    }\n\n    expectSetDynamicMetadata(\n        {{{\"opname\", opname}, {\"path\", \"/foo\"}, {\"create_type\", createFlagsToString(flags)}},\n         {{\"bytes\", \"35\"}}});\n\n    EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n\n    switch (opcode) {\n    case OpCodes::Create:\n      EXPECT_EQ(1UL, config_->stats().create_rq_.value());\n      break;\n    case OpCodes::CreateContainer:\n      EXPECT_EQ(1UL, config_->stats().createcontainer_rq_.value());\n      break;\n    case OpCodes::CreateTtl:\n      EXPECT_EQ(1UL, config_->stats().createttl_rq_.value());\n      break;\n    default:\n      break;\n    }\n\n    EXPECT_EQ(35UL, config_->stats().request_bytes_.value());\n    EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n  }\n\n  void testRequest(Buffer::OwnedImpl& data, const std::vector<StrStrMap>& metadata_values,\n                   const Stats::Counter& stat, const uint64_t request_bytes) {\n    expectSetDynamicMetadata(metadata_values);\n    EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n    EXPECT_EQ(1UL, stat.value());\n    EXPECT_EQ(request_bytes, config_->stats().request_bytes_.value());\n    EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n  }\n\n  void testResponse(const std::vector<StrStrMap>& metadata_values, const Stats::Counter& stat,\n                    uint32_t xid = 1000) {\n    Buffer::OwnedImpl data = encodeResponseHeader(xid, 2000, 0);\n\n    expectSetDynamicMetadata(metadata_values);\n    EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(data, false));\n    EXPECT_EQ(1UL, stat.value());\n    EXPECT_EQ(20UL, config_->stats().response_bytes_.value());\n    EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n    const auto histogram_name =\n        fmt::format(\"test.zookeeper.{}_latency\", metadata_values[0].find(\"opname\")->second);\n    EXPECT_NE(absl::nullopt, findHistogram(histogram_name));\n  }\n\n  Stats::HistogramOptConstRef findHistogram(const std::string& name) {\n    Stats::StatNameManagedStorage storage(name, scope_.symbolTable());\n    return scope_.findHistogram(storage.statName());\n  }\n\n  Stats::TestUtil::TestStore scope_;\n  ZooKeeperFilterConfigSharedPtr config_;\n  std::unique_ptr<ZooKeeperFilter> filter_;\n  std::string stat_prefix_{\"test.zookeeper\"};\n  NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> stream_info_;\n  Event::SimulatedTimeSystem time_system_;\n};\n\nTEST_F(ZooKeeperFilterTest, Connect) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeConnect();\n\n  testRequest(data, {{{\"opname\", \"connect\"}}, {{\"bytes\", \"32\"}}}, config_->stats().connect_rq_, 32);\n\n  data = encodeConnectResponse();\n  expectSetDynamicMetadata({{{\"opname\", \"connect_response\"},\n                             {\"protocol_version\", \"0\"},\n                             {\"timeout\", \"10\"},\n                             {\"readonly\", \"0\"}},\n                            {{\"bytes\", \"24\"}}});\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(data, false));\n  EXPECT_EQ(1UL, config_->stats().connect_resp_.value());\n  EXPECT_EQ(24UL, config_->stats().response_bytes_.value());\n  EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n  EXPECT_NE(absl::nullopt, findHistogram(\"test.zookeeper.connect_response_latency\"));\n}\n\nTEST_F(ZooKeeperFilterTest, ConnectReadonly) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeConnect(true);\n\n  testRequest(data, {{{\"opname\", \"connect_readonly\"}}, {{\"bytes\", \"33\"}}},\n              config_->stats().connect_readonly_rq_, 33);\n\n  data = encodeConnectResponse(true);\n  expectSetDynamicMetadata({{{\"opname\", \"connect_response\"},\n                             {\"protocol_version\", \"0\"},\n                             {\"timeout\", \"10\"},\n                             {\"readonly\", \"1\"}},\n                            {{\"bytes\", \"25\"}}});\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(data, false));\n  EXPECT_EQ(1UL, config_->stats().connect_resp_.value());\n  EXPECT_EQ(25UL, config_->stats().response_bytes_.value());\n  EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n  EXPECT_NE(absl::nullopt, findHistogram(\"test.zookeeper.connect_response_latency\"));\n}\n\nTEST_F(ZooKeeperFilterTest, Fallback) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeBadMessage();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(0UL, config_->stats().connect_rq_.value());\n  EXPECT_EQ(0UL, config_->stats().connect_readonly_rq_.value());\n  EXPECT_EQ(1UL, config_->stats().decoder_error_.value());\n}\n\nTEST_F(ZooKeeperFilterTest, PacketTooBig) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeTooBigMessage();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(1UL, config_->stats().decoder_error_.value());\n}\n\nTEST_F(ZooKeeperFilterTest, PacketBiggerThanLength) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeBiggerThanLengthMessage();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(1UL, config_->stats().decoder_error_.value());\n}\n\nTEST_F(ZooKeeperFilterTest, UnknownOpcode) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeUnknownOpcode();\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(1UL, config_->stats().decoder_error_.value());\n}\n\nTEST_F(ZooKeeperFilterTest, BufferSmallerThanStringLength) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathLongerThanBuffer(\"/foo\", enumToSignedInt(OpCodes::Sync));\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(1UL, config_->stats().decoder_error_.value());\n}\n\nTEST_F(ZooKeeperFilterTest, PingRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePing();\n\n  testRequest(data, {{{\"opname\", \"ping\"}}, {{\"bytes\", \"12\"}}}, config_->stats().ping_rq_, 12);\n  testResponse({{{\"opname\", \"ping_response\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().ping_resp_, enumToSignedInt(XidCodes::PingXid));\n}\n\nTEST_F(ZooKeeperFilterTest, AuthRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeAuth(\"digest\");\n\n  testRequest(data, {{{\"opname\", \"auth\"}}, {{\"bytes\", \"36\"}}},\n              scope_.counter(\"test.zookeeper.auth.digest_rq\"), 36);\n  testResponse({{{\"opname\", \"auth_response\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().auth_resp_, enumToSignedInt(XidCodes::AuthXid));\n}\n\nTEST_F(ZooKeeperFilterTest, GetDataRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathWatch(\"/foo\", true);\n\n  testRequest(data,\n              {{{\"opname\", \"getdata\"}, {\"path\", \"/foo\"}, {\"watch\", \"true\"}}, {{\"bytes\", \"21\"}}},\n              config_->stats().getdata_rq_, 21);\n  testResponse({{{\"opname\", \"getdata_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().getdata_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, GetDataRequestEmptyPath) {\n  initialize();\n\n  // It's valid to see an empty string as the path, which gets treated as /\n  // by the server.\n  Buffer::OwnedImpl data = encodePathWatch(\"\", true);\n\n  testRequest(data, {{{\"opname\", \"getdata\"}, {\"path\", \"\"}, {\"watch\", \"true\"}}, {{\"bytes\", \"17\"}}},\n              config_->stats().getdata_rq_, 17);\n  testResponse({{{\"opname\", \"getdata_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().getdata_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CreateRequestPersistent) { testCreate(CreateFlags::Persistent); }\n\nTEST_F(ZooKeeperFilterTest, CreateRequestPersistentSequential) {\n  testCreate(CreateFlags::PersistentSequential);\n  testResponse({{{\"opname\", \"create_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().create_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CreateRequestEphemeral) { testCreate(CreateFlags::Ephemeral); }\n\nTEST_F(ZooKeeperFilterTest, CreateRequestEphemeralSequential) {\n  testCreate(CreateFlags::EphemeralSequential);\n  testResponse({{{\"opname\", \"create_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().create_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CreateRequestContainer) {\n  testCreate(CreateFlags::Container, OpCodes::CreateContainer);\n  testResponse(\n      {{{\"opname\", \"createcontainer_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().createcontainer_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CreateRequestTTL) {\n  testCreate(CreateFlags::PersistentWithTtl, OpCodes::CreateTtl);\n  testResponse(\n      {{{\"opname\", \"createttl_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().createttl_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CreateRequestTTLSequential) {\n  testCreate(CreateFlags::PersistentSequentialWithTtl, OpCodes::CreateTtl);\n  testResponse(\n      {{{\"opname\", \"createttl_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().createttl_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CreateRequest2) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeCreateRequest(\"/foo\", \"bar\", CreateFlags::Persistent, false,\n                                               enumToSignedInt(OpCodes::Create2));\n\n  testRequest(\n      data,\n      {{{\"opname\", \"create2\"}, {\"path\", \"/foo\"}, {\"create_type\", \"persistent\"}}, {{\"bytes\", \"35\"}}},\n      config_->stats().create2_rq_, 35);\n  testResponse({{{\"opname\", \"create2_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().create2_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, SetRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeSetRequest(\"/foo\", \"bar\", -1);\n\n  testRequest(data, {{{\"opname\", \"setdata\"}, {\"path\", \"/foo\"}}, {{\"bytes\", \"31\"}}},\n              config_->stats().setdata_rq_, 31);\n  testResponse({{{\"opname\", \"setdata_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().setdata_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, GetChildrenRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathWatch(\"/foo\", false, enumToSignedInt(OpCodes::GetChildren));\n\n  testRequest(\n      data, {{{\"opname\", \"getchildren\"}, {\"path\", \"/foo\"}, {\"watch\", \"false\"}}, {{\"bytes\", \"21\"}}},\n      config_->stats().getchildren_rq_, 21);\n  testResponse(\n      {{{\"opname\", \"getchildren_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().getchildren_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, GetChildrenRequest2) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathWatch(\"/foo\", false, enumToSignedInt(OpCodes::GetChildren2));\n\n  testRequest(\n      data, {{{\"opname\", \"getchildren2\"}, {\"path\", \"/foo\"}, {\"watch\", \"false\"}}, {{\"bytes\", \"21\"}}},\n      config_->stats().getchildren2_rq_, 21);\n  testResponse(\n      {{{\"opname\", \"getchildren2_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().getchildren2_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, DeleteRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeDeleteRequest(\"/foo\", -1);\n\n  testRequest(data,\n              {{{\"opname\", \"delete\"}, {\"path\", \"/foo\"}, {\"version\", \"-1\"}}, {{\"bytes\", \"24\"}}},\n              config_->stats().delete_rq_, 24);\n  testResponse({{{\"opname\", \"delete_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().delete_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, ExistsRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathWatch(\"/foo\", false, enumToSignedInt(OpCodes::Exists));\n\n  testRequest(data,\n              {{{\"opname\", \"exists\"}, {\"path\", \"/foo\"}, {\"watch\", \"false\"}}, {{\"bytes\", \"21\"}}},\n              config_->stats().exists_rq_, 21);\n  testResponse({{{\"opname\", \"exists_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().exists_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, GetAclRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePath(\"/foo\", enumToSignedInt(OpCodes::GetAcl));\n\n  testRequest(data, {{{\"opname\", \"getacl\"}, {\"path\", \"/foo\"}}, {{\"bytes\", \"20\"}}},\n              config_->stats().getacl_rq_, 20);\n  testResponse({{{\"opname\", \"getacl_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().getacl_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, SetAclRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeSetAclRequest(\"/foo\", \"digest\", \"passwd\", -1);\n\n  testRequest(data,\n              {{{\"opname\", \"setacl\"}, {\"path\", \"/foo\"}, {\"version\", \"-1\"}}, {{\"bytes\", \"52\"}}},\n              config_->stats().setacl_rq_, 52);\n  testResponse({{{\"opname\", \"setacl_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().setacl_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, SyncRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePath(\"/foo\", enumToSignedInt(OpCodes::Sync));\n\n  testRequest(data, {{{\"opname\", \"sync\"}, {\"path\", \"/foo\"}}, {{\"bytes\", \"20\"}}},\n              config_->stats().sync_rq_, 20);\n  testResponse({{{\"opname\", \"sync_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().sync_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, GetEphemeralsRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePath(\"/foo\", enumToSignedInt(OpCodes::GetEphemerals));\n\n  testRequest(data, {{{\"opname\", \"getephemerals\"}, {\"path\", \"/foo\"}}, {{\"bytes\", \"20\"}}},\n              config_->stats().getephemerals_rq_, 20);\n  testResponse(\n      {{{\"opname\", \"getephemerals_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().getephemerals_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, GetAllChildrenNumberRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePath(\"/foo\", enumToSignedInt(OpCodes::GetAllChildrenNumber));\n\n  testRequest(data, {{{\"opname\", \"getallchildrennumber\"}, {\"path\", \"/foo\"}}, {{\"bytes\", \"20\"}}},\n              config_->stats().getallchildrennumber_rq_, 20);\n  testResponse({{{\"opname\", \"getallchildrennumber_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}},\n                {{\"bytes\", \"20\"}}},\n               config_->stats().getallchildrennumber_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CheckRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathVersion(\"/foo\", 100, enumToSignedInt(OpCodes::Check));\n\n  expectSetDynamicMetadata({{{\"bytes\", \"24\"}}});\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(1UL, config_->stats().check_rq_.value());\n  EXPECT_EQ(24UL, config_->stats().request_bytes_.value());\n  EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n\n  testResponse({{{\"opname\", \"check_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().check_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, MultiRequest) {\n  initialize();\n\n  Buffer::OwnedImpl create1 = encodeCreateRequest(\"/foo\", \"1\", CreateFlags::Persistent, true);\n  Buffer::OwnedImpl create2 = encodeCreateRequest(\"/bar\", \"1\", CreateFlags::Persistent, true);\n  Buffer::OwnedImpl check1 = encodePathVersion(\"/foo\", 100, enumToSignedInt(OpCodes::Check), true);\n  Buffer::OwnedImpl set1 = encodeSetRequest(\"/bar\", \"2\", -1, true);\n\n  std::vector<std::pair<int32_t, Buffer::OwnedImpl>> ops;\n  ops.push_back(std::make_pair(enumToSignedInt(OpCodes::Create), std::move(create1)));\n  ops.push_back(std::make_pair(enumToSignedInt(OpCodes::Create), std::move(create2)));\n  ops.push_back(std::make_pair(enumToSignedInt(OpCodes::Check), std::move(check1)));\n  ops.push_back(std::make_pair(enumToSignedInt(OpCodes::SetData), std::move(set1)));\n\n  Buffer::OwnedImpl data = encodeMultiRequest(ops);\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false));\n  EXPECT_EQ(1UL, config_->stats().multi_rq_.value());\n  EXPECT_EQ(128UL, config_->stats().request_bytes_.value());\n  EXPECT_EQ(2UL, config_->stats().create_rq_.value());\n  EXPECT_EQ(1UL, config_->stats().setdata_rq_.value());\n  EXPECT_EQ(1UL, config_->stats().check_rq_.value());\n  EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n\n  testResponse({{{\"opname\", \"multi_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().multi_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, ReconfigRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeReconfigRequest(\"s1\", \"s2\", \"s3\", 1000);\n\n  testRequest(data, {{{\"opname\", \"reconfig\"}}, {{\"bytes\", \"38\"}}}, config_->stats().reconfig_rq_,\n              38);\n  testResponse({{{\"opname\", \"reconfig_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().reconfig_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, SetWatchesRequestControlXid) {\n  initialize();\n\n  const std::vector<std::string> dataw = {\"/foo\", \"/bar\"};\n  const std::vector<std::string> existw = {\"/foo1\", \"/bar1\"};\n  const std::vector<std::string> childw = {\"/foo2\", \"/bar2\"};\n\n  Buffer::OwnedImpl data =\n      encodeSetWatchesRequest(dataw, existw, childw, enumToSignedInt(XidCodes::SetWatchesXid));\n\n  testRequest(data, {{{\"opname\", \"setwatches\"}}, {{\"bytes\", \"76\"}}},\n              config_->stats().setwatches_rq_, 76);\n  testResponse(\n      {{{\"opname\", \"setwatches_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().setwatches_resp_, enumToSignedInt(XidCodes::SetWatchesXid));\n}\n\nTEST_F(ZooKeeperFilterTest, SetWatchesRequest) {\n  initialize();\n\n  const std::vector<std::string> dataw = {\"/foo\", \"/bar\"};\n  const std::vector<std::string> existw = {\"/foo1\", \"/bar1\"};\n  const std::vector<std::string> childw = {\"/foo2\", \"/bar2\"};\n\n  Buffer::OwnedImpl data = encodeSetWatchesRequest(dataw, existw, childw);\n\n  testRequest(data, {{{\"opname\", \"setwatches\"}}, {{\"bytes\", \"76\"}}},\n              config_->stats().setwatches_rq_, 76);\n  testResponse(\n      {{{\"opname\", \"setwatches_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().setwatches_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CheckWatchesRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathVersion(\"/foo\", enumToSignedInt(WatcherType::Children),\n                                             enumToSignedInt(OpCodes::CheckWatches));\n\n  testRequest(data, {{{\"opname\", \"checkwatches\"}, {\"path\", \"/foo\"}}, {{\"bytes\", \"24\"}}},\n              config_->stats().checkwatches_rq_, 24);\n  testResponse(\n      {{{\"opname\", \"checkwatches_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().checkwatches_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, RemoveWatchesRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodePathVersion(\"/foo\", enumToSignedInt(WatcherType::Data),\n                                             enumToSignedInt(OpCodes::RemoveWatches));\n\n  testRequest(data, {{{\"opname\", \"removewatches\"}, {\"path\", \"/foo\"}}, {{\"bytes\", \"24\"}}},\n              config_->stats().removewatches_rq_, 24);\n  testResponse(\n      {{{\"opname\", \"removewatches_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n      config_->stats().removewatches_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, CloseRequest) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeCloseRequest();\n\n  testRequest(data, {{{\"opname\", \"close\"}}, {{\"bytes\", \"12\"}}}, config_->stats().close_rq_, 12);\n  testResponse({{{\"opname\", \"close_resp\"}, {\"zxid\", \"2000\"}, {\"error\", \"0\"}}, {{\"bytes\", \"20\"}}},\n               config_->stats().close_resp_);\n}\n\nTEST_F(ZooKeeperFilterTest, WatchEvent) {\n  initialize();\n\n  Buffer::OwnedImpl data = encodeWatchEvent(\"/foo\", 1, 0);\n  expectSetDynamicMetadata({{{\"opname\", \"watch_event\"},\n                             {\"event_type\", \"1\"},\n                             {\"client_state\", \"0\"},\n                             {\"zxid\", \"1000\"},\n                             {\"error\", \"0\"}},\n                            {{\"bytes\", \"36\"}}});\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(data, false));\n  EXPECT_EQ(1UL, config_->stats().watch_event_.value());\n  EXPECT_EQ(36UL, config_->stats().response_bytes_.value());\n  EXPECT_EQ(0UL, config_->stats().decoder_error_.value());\n}\n\nTEST_F(ZooKeeperFilterTest, MissingXid) {\n  initialize();\n\n  const auto& stat = config_->stats().getdata_resp_;\n  Buffer::OwnedImpl data = encodeResponseHeader(1000, 2000, 0);\n\n  EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(data, false));\n  EXPECT_EQ(0UL, stat.value());\n  EXPECT_EQ(0UL, config_->stats().response_bytes_.value());\n  EXPECT_EQ(1UL, config_->stats().decoder_error_.value());\n}\n\n} // namespace ZooKeeperProxy\n} // namespace NetworkFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n    \"envoy_extension_cc_test_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test_library(\n    name = \"dns_filter_test_lib\",\n    srcs = [\"dns_filter_test_utils.cc\"],\n    hdrs = [\"dns_filter_test_utils.h\"],\n    extension_name = \"envoy.filters.udp_listener.dns_filter\",\n    deps = [\n        \"//source/extensions/filters/udp/dns_filter:dns_filter_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"dns_filter_test\",\n    srcs = [\"dns_filter_test.cc\"],\n    extension_name = \"envoy.filters.udp_listener.dns_filter\",\n    deps = [\n        \":dns_filter_test_lib\",\n        \"//source/extensions/filters/udp/dns_filter:dns_filter_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:listener_factory_context_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"dns_filter_integration_test\",\n    srcs = [\"dns_filter_integration_test.cc\"],\n    extension_name = \"envoy.filters.udp_listener.dns_filter\",\n    deps = [\n        \":dns_filter_test_lib\",\n        \"//source/extensions/filters/udp/dns_filter:config\",\n        \"//source/extensions/filters/udp/dns_filter:dns_filter_lib\",\n        \"//test/integration:integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"dns_filter_utils_test\",\n    srcs = [\"dns_filter_utils_test.cc\"],\n    extension_name = \"envoy.filters.udp_listener.dns_filter\",\n    deps = [\n        \":dns_filter_test_lib\",\n        \"//source/extensions/filters/udp/dns_filter:config\",\n        \"//source/extensions/filters/udp/dns_filter:dns_filter_lib\",\n        \"//test/integration:integration_lib\",\n        \"@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"dns_filter_fuzz_test\",\n    srcs = [\"dns_filter_fuzz_test.cc\"],\n    corpus = \"dns_filter_corpus\",\n    deps = [\n        \"//source/extensions/filters/udp/dns_filter:dns_filter_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_corpus/88c22fd07c15d34576b085cb3e869e5da9b23b3f",
    "content": ""
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_corpus/95bcb3090cb222d80fa4fee7b88e84b99ae408b1",
    "content": ""
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_corpus/e9c8cd789e907d07e56e7e4d998e3de6c0550b9d",
    "content": ""
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_corpus/f1220105b4e868a7ce4d908eefbec7f403e5ddb8",
    "content": ""
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc",
    "content": "#include \"common/common/logger.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace {\n\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  static const auto local = Network::Utility::parseInternetAddressAndPort(\"127.0.2.1:5353\");\n  static const auto peer = Network::Utility::parseInternetAddressAndPort(\"127.0.2.1:55088\");\n\n  static NiceMock<Random::MockRandomGenerator> random;\n  static NiceMock<Stats::MockHistogram> histogram;\n  histogram.unit_ = Stats::Histogram::Unit::Milliseconds;\n  static Api::ApiPtr api = Api::createApiForTest();\n  static NiceMock<Stats::MockCounter> mock_query_buffer_underflow;\n  static NiceMock<Stats::MockCounter> mock_record_name_overflow;\n  static NiceMock<Stats::MockCounter> query_parsing_failure;\n  static DnsParserCounters counters(mock_query_buffer_underflow, mock_record_name_overflow,\n                                    query_parsing_failure);\n\n  FuzzedDataProvider data_provider(buf, len);\n  Buffer::InstancePtr query_buffer = std::make_unique<Buffer::OwnedImpl>();\n\n  while (data_provider.remaining_bytes()) {\n    const std::string query = data_provider.ConsumeRandomLengthString(1024);\n    query_buffer->add(query.data(), query.size());\n\n    const uint16_t retry_count = data_provider.ConsumeIntegralInRange<uint16_t>(0, 3);\n    DnsMessageParser message_parser(true, api->timeSource(), retry_count, random, histogram);\n    uint64_t offset = data_provider.ConsumeIntegralInRange<uint64_t>(0, query.size());\n\n    const uint8_t fuzz_function = data_provider.ConsumeIntegralInRange<uint8_t>(0, 2);\n    switch (fuzz_function) {\n    case 0: {\n      DnsQueryContextPtr query_context =\n          std::make_unique<DnsQueryContext>(local, peer, counters, retry_count);\n      bool result = message_parser.parseDnsObject(query_context, query_buffer);\n      UNREFERENCED_PARAMETER(result);\n    } break;\n\n    case 1: {\n      DnsQueryRecordPtr ptr = message_parser.parseDnsQueryRecord(query_buffer, offset);\n      UNREFERENCED_PARAMETER(ptr);\n    } break;\n\n    case 2: {\n      DnsAnswerRecordPtr ptr = message_parser.parseDnsAnswerRecord(query_buffer, offset);\n      UNREFERENCED_PARAMETER(ptr);\n    } break;\n    } // end case\n    query_buffer->drain(query_buffer->length());\n  }\n}\n} // namespace\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"dns_filter_test_utils.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace {\n\nclass DnsFilterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                 public BaseIntegrationTest {\npublic:\n  DnsFilterIntegrationTest()\n      : BaseIntegrationTest(GetParam(), configToUse()), api_(Api::createApiForTest()),\n        counters_(mock_query_buffer_underflow_, mock_record_name_overflow_,\n                  query_parsing_failure_) {\n    setupResponseParser();\n  }\n\n  void setupResponseParser() {\n    histogram_.unit_ = Stats::Histogram::Unit::Milliseconds;\n    response_parser_ = std::make_unique<DnsMessageParser>(\n        true /* recursive queries */, api_->timeSource(), 0 /* retries */, random_, histogram_);\n  }\n\n  static std::string configToUse() {\n    return fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\nstatic_resources:\n  clusters:\n    name: cluster_0\n    load_assignment:\n      cluster_name: cluster_0\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {}\n                port_value: 0\n    )EOF\",\n                       Platform::null_device_path,\n                       Network::Test::getLoopbackAddressString(GetParam()));\n  }\n\n  Network::Address::InstanceConstSharedPtr getListenerBindAddressAndPort() {\n    auto addr = Network::Utility::parseInternetAddressAndPort(\n        fmt::format(\"{}:{}\", Envoy::Network::Test::getLoopbackAddressUrlString(version_), 0),\n        false);\n\n    ASSERT(addr != nullptr);\n\n    addr = Network::Test::findOrCheckFreePort(addr, Network::Socket::Type::Datagram);\n    ASSERT(addr != nullptr && addr->ip() != nullptr);\n\n    return addr;\n  }\n\n  envoy::config::listener::v3::Listener\n  getListener0(Network::Address::InstanceConstSharedPtr& addr) {\n    auto config = fmt::format(R\"EOF(\nname: listener_0\nreuse_port: true\naddress:\n  socket_address:\n    address: {}\n    port_value: 0\n    protocol: udp\nlistener_filters:\n  name: \"envoy.filters.udp.dns_filter\"\n  typed_config:\n    '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig'\n    stat_prefix: \"my_prefix\"\n    client_config:\n      resolver_timeout: 1s\n      upstream_resolvers:\n      - socket_address:\n          address: {}\n          port_value: {}\n      max_pending_lookups: 256\n    server_config:\n      inline_dns_table:\n        external_retry_count: 0\n        known_suffixes:\n        - suffix: \"foo1.com\"\n        - suffix: \"cluster_0\"\n        virtual_domains:\n        - name: \"www.foo1.com\"\n          endpoint:\n            address_list:\n              address:\n              - 10.0.0.1\n              - 10.0.0.2\n              - 10.0.0.3\n              - 10.0.0.4\n        - name: \"cluster.foo1.com\"\n          endpoint:\n            cluster_name: \"cluster_0\"\n        - name: \"web.foo1.com\"\n          endpoint:\n            service_list:\n              services:\n              - service_name: \"http\"\n                protocol: {{ name: \"tcp\" }}\n                ttl: 43200s\n                targets:\n                - cluster_name: \"cluster_0\"\n                  weight: 10\n                  priority: 40\n                  port: 80\n              - service_name: \"https\"\n                protocol: {{ name: \"tcp\" }}\n                ttl: 43200s\n                targets:\n                - cluster_name: \"cluster_0\"\n                  weight: 20\n                  priority: 10\n)EOF\",\n                              addr->ip()->addressAsString(), addr->ip()->addressAsString(),\n                              addr->ip()->port());\n    return TestUtility::parseYaml<envoy::config::listener::v3::Listener>(config);\n  }\n\n  envoy::config::listener::v3::Listener\n  getListener1(Network::Address::InstanceConstSharedPtr& addr) {\n    auto config = fmt::format(R\"EOF(\nname: listener_1\naddress:\n  socket_address:\n    address: {}\n    port_value: {}\n    protocol: udp\nlistener_filters:\n  name: \"envoy.filters.udp.dns_filter\"\n  typed_config:\n    '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig'\n    stat_prefix: \"external_resolver\"\n    server_config:\n      inline_dns_table:\n        external_retry_count: 0\n        known_suffixes:\n        - suffix: \"google.com\"\n        virtual_domains:\n        - name: \"www.google.com\"\n          endpoint:\n            address_list:\n              address:\n              - 42.42.42.42\n              - 2607:42:42::42:42\n)EOF\",\n                              addr->ip()->addressAsString(), addr->ip()->port());\n    return TestUtility::parseYaml<envoy::config::listener::v3::Listener>(config);\n  }\n\n  void setup(uint32_t upstream_count) {\n    udp_fake_upstream_ = true;\n    if (upstream_count > 1) {\n      setDeterministic();\n      setUpstreamCount(upstream_count);\n      config_helper_.addConfigModifier(\n          [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n            for (uint32_t i = 1; i < upstream_count; i++) {\n              bootstrap.mutable_static_resources()\n                  ->mutable_clusters(0)\n                  ->mutable_load_assignment()\n                  ->mutable_endpoints(0)\n                  ->add_lb_endpoints()\n                  ->mutable_endpoint()\n                  ->MergeFrom(ConfigHelper::buildEndpoint(\n                      Network::Test::getLoopbackAddressString(GetParam())));\n            }\n          });\n    }\n\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto addr_port = getListenerBindAddressAndPort();\n      auto listener_0 = getListener0(addr_port);\n      auto listener_1 = getListener1(addr_port);\n      bootstrap.mutable_static_resources()->add_listeners()->MergeFrom(listener_0);\n      bootstrap.mutable_static_resources()->add_listeners()->MergeFrom(listener_1);\n    });\n\n    BaseIntegrationTest::initialize();\n  }\n\n  void requestResponseWithListenerAddress(const Network::Address::Instance& listener_address,\n                                          const std::string& data_to_send,\n                                          Network::UdpRecvData& response_datagram) {\n    Network::Test::UdpSyncPeer client(version_);\n    client.write(data_to_send, listener_address);\n    client.recv(response_datagram);\n  }\n\n  Api::ApiPtr api_;\n  NiceMock<Stats::MockHistogram> histogram_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Stats::MockCounter> mock_query_buffer_underflow_;\n  NiceMock<Stats::MockCounter> mock_record_name_overflow_;\n  NiceMock<Stats::MockCounter> query_parsing_failure_;\n  DnsParserCounters counters_;\n  std::unique_ptr<DnsMessageParser> response_parser_;\n  DnsQueryContextPtr query_ctx_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, DnsFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(DnsFilterIntegrationTest, ExternalLookupTest) {\n  setup(0);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  Network::UdpRecvData response;\n  std::string query =\n      Utils::buildQueryForDomain(\"www.google.com\", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  requestResponseWithListenerAddress(*listener_address, query, response);\n\n  query_ctx_ = response_parser_->createQueryContext(response, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(1, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n}\n\nTEST_P(DnsFilterIntegrationTest, ExternalLookupTestIPv6) {\n  setup(0);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  Network::UdpRecvData response;\n  std::string query =\n      Utils::buildQueryForDomain(\"www.google.com\", DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN);\n  requestResponseWithListenerAddress(*listener_address, query, response);\n\n  query_ctx_ = response_parser_->createQueryContext(response, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(1, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n}\n\nTEST_P(DnsFilterIntegrationTest, LocalLookupTest) {\n  setup(0);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  Network::UdpRecvData response;\n  std::string query =\n      Utils::buildQueryForDomain(\"www.foo1.com\", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  requestResponseWithListenerAddress(*listener_address, query, response);\n\n  query_ctx_ = response_parser_->createQueryContext(response, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(4, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n}\n\nTEST_P(DnsFilterIntegrationTest, ClusterLookupTest) {\n  setup(2);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  uint16_t record_type;\n  if (listener_address->ip()->ipv6()) {\n    record_type = DNS_RECORD_TYPE_AAAA;\n  } else {\n    record_type = DNS_RECORD_TYPE_A;\n  }\n\n  Network::UdpRecvData response;\n  std::string query = Utils::buildQueryForDomain(\"cluster_0\", record_type, DNS_RECORD_CLASS_IN);\n  requestResponseWithListenerAddress(*listener_address, query, response);\n\n  query_ctx_ = response_parser_->createQueryContext(response, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(2, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n}\n\nTEST_P(DnsFilterIntegrationTest, ClusterEndpointLookupTest) {\n  setup(2);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  uint16_t record_type;\n  if (listener_address->ip()->ipv6()) {\n    record_type = DNS_RECORD_TYPE_AAAA;\n  } else {\n    record_type = DNS_RECORD_TYPE_A;\n  }\n\n  Network::UdpRecvData response;\n  std::string query =\n      Utils::buildQueryForDomain(\"cluster.foo1.com\", record_type, DNS_RECORD_CLASS_IN);\n  requestResponseWithListenerAddress(*listener_address, query, response);\n\n  query_ctx_ = response_parser_->createQueryContext(response, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(2, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n}\n\nTEST_P(DnsFilterIntegrationTest, ClusterEndpointWithPortServiceRecordLookupTest) {\n  setup(2);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  const std::string service(\"_http._tcp.web.foo1.com\");\n  Network::UdpRecvData response;\n  std::string query = Utils::buildQueryForDomain(service, DNS_RECORD_TYPE_SRV, DNS_RECORD_CLASS_IN);\n  requestResponseWithListenerAddress(*listener_address, query, response);\n\n  query_ctx_ = response_parser_->createQueryContext(response, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(2, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.second->type_, DNS_RECORD_TYPE_SRV);\n\n    DnsSrvRecord* srv_rec = dynamic_cast<DnsSrvRecord*>(answer.second.get());\n\n    EXPECT_EQ(service, srv_rec->name_);\n    EXPECT_EQ(43200, srv_rec->ttl_.count());\n\n    EXPECT_EQ(1, srv_rec->targets_.size());\n    const auto& target = srv_rec->targets_.begin();\n\n    EXPECT_EQ(10, target->second.weight);\n    EXPECT_EQ(40, target->second.priority);\n    EXPECT_EQ(80, target->second.port);\n  }\n}\n\nTEST_P(DnsFilterIntegrationTest, ClusterEndpointWithoutPortServiceRecordLookupTest) {\n  constexpr size_t endpoints = 2;\n  setup(endpoints);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  const std::string service(\"_https._tcp.web.foo1.com\");\n  Network::UdpRecvData response;\n  std::string query = Utils::buildQueryForDomain(service, DNS_RECORD_TYPE_SRV, DNS_RECORD_CLASS_IN);\n  requestResponseWithListenerAddress(*listener_address, query, response);\n\n  query_ctx_ = response_parser_->createQueryContext(response, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(endpoints, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n\n  std::set<uint16_t> ports;\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.second->type_, DNS_RECORD_TYPE_SRV);\n\n    DnsSrvRecord* srv_rec = dynamic_cast<DnsSrvRecord*>(answer.second.get());\n\n    EXPECT_EQ(service, srv_rec->name_);\n    EXPECT_EQ(43200, srv_rec->ttl_.count());\n\n    EXPECT_EQ(1, srv_rec->targets_.size());\n    const auto& target = srv_rec->targets_.begin();\n\n    EXPECT_EQ(20, target->second.weight);\n    EXPECT_EQ(10, target->second.priority);\n\n    // The port is unspecified and automatically assigned by the cluster\n    EXPECT_NE(0, target->second.priority);\n    ports.emplace(target->second.port);\n  }\n\n  EXPECT_EQ(endpoints, ports.size());\n}\n} // namespace\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_test.cc",
    "content": "#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h\"\n#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter_constants.h\"\n#include \"extensions/filters/udp/dns_filter/dns_filter_utils.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/listener_factory_context.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"dns_filter_test_utils.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AnyNumber;\nusing testing::AtLeast;\nusing testing::InSequence;\nusing testing::Mock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace {\n\nApi::IoCallUint64Result makeNoError(uint64_t rc) {\n  auto no_error = Api::ioCallUint64ResultNoError();\n  no_error.rc_ = rc;\n  return no_error;\n}\n\nclass DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime {\npublic:\n  DnsFilterTest()\n      : listener_address_(Network::Utility::parseInternetAddressAndPort(\"127.0.2.1:5353\")),\n        api_(Api::createApiForTest(random_)),\n        counters_(mock_query_buffer_underflow_, mock_record_name_overflow_,\n                  query_parsing_failure_) {\n    udp_response_.addresses_.local_ = listener_address_;\n    udp_response_.addresses_.peer_ = listener_address_;\n    udp_response_.buffer_ = std::make_unique<Buffer::OwnedImpl>();\n\n    setupResponseParser();\n    EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0));\n    EXPECT_CALL(callbacks_.udp_listener_, send(_))\n        .WillRepeatedly(\n            Invoke([this](const Network::UdpSendData& send_data) -> Api::IoCallUint64Result {\n              udp_response_.buffer_->drain(udp_response_.buffer_->length());\n              udp_response_.buffer_->move(send_data.buffer_);\n              return makeNoError(udp_response_.buffer_->length());\n            }));\n    EXPECT_CALL(callbacks_.udp_listener_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_));\n  }\n\n  ~DnsFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); }\n\n  void setupResponseParser() {\n    histogram_.unit_ = Stats::Histogram::Unit::Milliseconds;\n    response_parser_ = std::make_unique<DnsMessageParser>(\n        true /* recursive queries */, api_->timeSource(), 0 /* retries */, random_, histogram_);\n  }\n\n  void setup(const std::string& yaml) {\n    envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig config;\n    TestUtility::loadFromYamlAndValidate(yaml, config);\n    auto store = stats_store_.createScope(\"dns_scope\");\n    ON_CALL(listener_factory_, scope()).WillByDefault(ReturnRef(*store));\n    ON_CALL(listener_factory_, api()).WillByDefault(ReturnRef(*api_));\n    ON_CALL(random_, random()).WillByDefault(Return(3));\n    ON_CALL(listener_factory_, random()).WillByDefault(ReturnRef(random_));\n\n    resolver_ = std::make_shared<Network::MockDnsResolver>();\n    ON_CALL(dispatcher_, createDnsResolver(_, _)).WillByDefault(Return(resolver_));\n\n    config_ = std::make_shared<DnsFilterEnvoyConfig>(listener_factory_, config);\n    filter_ = std::make_unique<DnsFilter>(callbacks_, config_);\n  }\n\n  void sendQueryFromClient(const std::string& peer_address, const std::string& buffer) {\n    Network::UdpRecvData data{};\n    data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(peer_address);\n    data.addresses_.local_ = listener_address_;\n    data.buffer_ = std::make_unique<Buffer::OwnedImpl>(buffer);\n    data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n    filter_->onData(data);\n  }\n\n  const Network::Address::InstanceConstSharedPtr listener_address_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  Api::ApiPtr api_;\n  DnsFilterEnvoyConfigSharedPtr config_;\n  NiceMock<Stats::MockCounter> mock_query_buffer_underflow_;\n  NiceMock<Stats::MockCounter> mock_record_name_overflow_;\n  NiceMock<Stats::MockCounter> query_parsing_failure_;\n  DnsParserCounters counters_;\n  DnsQueryContextPtr query_ctx_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  Network::MockUdpReadFilterCallbacks callbacks_;\n  Network::UdpRecvData udp_response_;\n  NiceMock<Filesystem::MockInstance> file_system_;\n  NiceMock<Stats::MockHistogram> histogram_;\n  NiceMock<Server::Configuration::MockListenerFactoryContext> listener_factory_;\n  Stats::IsolatedStoreImpl stats_store_;\n  std::shared_ptr<Network::MockDnsResolver> resolver_;\n  std::unique_ptr<DnsFilter> filter_;\n  std::unique_ptr<DnsMessageParser> response_parser_;\n\n  const std::string forward_query_off_config = R\"EOF(\nstat_prefix: \"my_prefix\"\nserver_config:\n  inline_dns_table:\n    external_retry_count: 3\n    known_suffixes:\n    - suffix: foo1.com\n    - suffix: foo2.com\n    - suffix: foo3.com\n    - suffix: foo16.com\n    - suffix: thisismydomainforafivehundredandtwelvebytetest.com\n    virtual_domains:\n    - name: \"www.foo1.com\"\n      endpoint:\n        address_list:\n          address:\n          - \"10.0.0.1\"\n          - \"10.0.0.2\"\n    - name: \"www.foo2.com\"\n      endpoint:\n        address_list:\n          address:\n          - \"2001:8a:c1::2800:7\"\n          - \"2001:8a:c1::2800:8\"\n          - \"2001:8a:c1::2800:9\"\n    - name: \"www.foo3.com\"\n      endpoint:\n        address_list:\n          address:\n          - \"10.0.3.1\"\n    - name: \"www.foo16.com\"\n      endpoint:\n        address_list:\n          address:\n          - \"10.0.16.1\"\n          - \"10.0.16.2\"\n          - \"10.0.16.3\"\n          - \"10.0.16.4\"\n          - \"10.0.16.5\"\n          - \"10.0.16.6\"\n          - \"10.0.16.7\"\n          - \"10.0.16.8\"\n          - \"10.0.16.9\"\n          - \"10.0.16.10\"\n          - \"10.0.16.11\"\n          - \"10.0.16.12\"\n          - \"10.0.16.13\"\n          - \"10.0.16.14\"\n          - \"10.0.16.15\"\n          - \"10.0.16.16\"\n    - name: www.supercalifragilisticexpialidocious.thisismydomainforafivehundredandtwelvebytetest.com\n      endpoint:\n        address_list:\n          address:\n          - \"2001:8a:c1::2801:0001\"\n          - \"2001:8a:c1::2801:0002\"\n          - \"2001:8a:c1::2801:0003\"\n          - \"2001:8a:c1::2801:0004\"\n          - \"2001:8a:c1::2801:0005\"\n          - \"2001:8a:c1::2801:0006\"\n          - \"2001:8a:c1::2801:0007\"\n          - \"2001:8a:c1::2801:0008\"\n)EOF\";\n\n  const std::string forward_query_on_config = R\"EOF(\nstat_prefix: \"my_prefix\"\nclient_config:\n  resolver_timeout: 1s\n  upstream_resolvers:\n  - socket_address:\n      address: \"1.1.1.1\"\n      port_value: 53\n  - socket_address:\n      address: \"8.8.8.8\"\n      port_value: 53\n  - socket_address:\n      address: \"8.8.4.4\"\n      port_value: 53\n  max_pending_lookups: 1\nserver_config:\n  inline_dns_table:\n    external_retry_count: 0\n    known_suffixes:\n    - suffix: foo1.com\n    - suffix: foo2.com\n    virtual_domains:\n      - name: \"www.foo1.com\"\n        endpoint:\n          address_list:\n            address:\n            - \"10.0.0.1\"\n)EOF\";\n\n  const std::string external_dns_table_config = R\"EOF(\nstat_prefix: \"my_prefix\"\nclient_config:\n  resolver_timeout: 1s\n  upstream_resolvers:\n  - socket_address:\n      address: \"1.1.1.1\"\n      port_value: 53\n  max_pending_lookups: 256\nserver_config:\n  external_dns_table:\n    filename: {}\n)EOF\";\n\n  const std::string external_dns_table_json = R\"EOF(\n{\n  \"external_retry_count\": 3,\n  \"known_suffixes\": [ { \"suffix\": \"com\" } ],\n  \"virtual_domains\": [\n    {\n      \"name\": \"www.external_foo1.com\",\n      \"endpoint\": { \"address_list\": { \"address\": [ \"10.0.0.1\", \"10.0.0.2\" ] } }\n    },\n    {\n      \"name\": \"www.external_foo2.com\",\n      \"endpoint\": { \"address_list\": { \"address\": [ \"2001:8a:c1::2800:7\" ] } }\n    },\n    {\n      \"name\": \"www.external_foo3.com\",\n      \"endpoint\": { \"address_list\": { \"address\": [ \"10.0.3.1\" ] } }\n    }\n  ]\n}\n)EOF\";\n\n  const std::string external_dns_table_yaml = R\"EOF(\nexternal_retry_count: 3\nknown_suffixes:\n  - suffix: \"com\"\nvirtual_domains:\n  - name: \"www.external_foo1.com\"\n    endpoint:\n      address_list:\n        address:\n        - \"10.0.0.1\"\n  - name: \"www.external_foo1.com\"\n    endpoint:\n      address_list:\n        address:\n        - \"10.0.0.2\"\n  - name: \"www.external_foo2.com\"\n    endpoint:\n      address_list:\n        address:\n        - \"2001:8a:c1::2800:7\"\n  - name: \"www.external_foo3.com\"\n    endpoint:\n      address_list:\n        address:\n        - \"10.0.3.1\"\n)EOF\";\n\n  const std::string max_records_table_yaml = R\"EOF(\nexternal_retry_count: 3\nknown_suffixes:\n  - suffix: \"ermac.com\"\nvirtual_domains:\n  - name: \"one.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.1\" ] }\n  - name: \"two.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.2\" ] }\n  - name: \"three.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.3\" ] }\n  - name: \"four.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.4\" ] }\n  - name: \"five.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.5\" ] }\n  - name: \"six.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.6\" ] }\n  - name: \"seven.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.7\" ] }\n  - name: \"eight.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.8\" ] }\n  - name: \"nine.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.9\" ] }\n  - name: \"ten.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.10\" ] }\n  - name: \"eleven.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.11\" ] }\n  - name: \"twelve.web.ermac.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.17.12\" ] }\n  - name: \"web.ermac.com\"\n    endpoint:\n      service_list:\n        services:\n        - service_name: \"http\"\n          protocol: { number: 6 }\n          ttl: 86400s\n          targets: [\n            { host_name: \"one.web.ermac.com\" , weight: 120, priority: 10, port: 80 },\n            { host_name: \"two.web.ermac.com\", weight: 110, priority: 10, port: 80 },\n            { host_name: \"three.web.ermac.com\", weight: 100, priority: 10, port: 80 },\n            { host_name: \"four.web.ermac.com\", weight: 90, priority: 10, port: 80 },\n            { host_name: \"five.web.ermac.com\" , weight: 80, priority: 10, port: 80 },\n            { host_name: \"six.web.ermac.com\", weight: 70, priority: 10, port: 80 },\n            { host_name: \"seven.web.ermac.com\", weight: 60, priority: 10, port: 80 },\n            { host_name: \"eight.web.ermac.com\", weight: 50, priority: 10, port: 80 },\n            { host_name: \"nine.web.ermac.com\" , weight: 40, priority: 10, port: 80 },\n            { host_name: \"ten.web.ermac.com\", weight: 30, priority: 10, port: 80 },\n            { host_name: \"eleven.web.ermac.com\", weight: 20, priority: 10, port: 80 },\n            { host_name: \"twelve.web.ermac.com\", weight: 10, priority: 10, port: 80 }\n          ]\n)EOF\";\n\n  const std::string external_dns_table_services_yaml = R\"EOF(\nexternal_retry_count: 3\nknown_suffixes:\n  - suffix: \"subzero.com\"\nvirtual_domains:\n  - name: \"primary.voip.subzero.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.3.1\" ] }\n  - name: \"secondary.voip.subzero.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.3.2\" ] }\n  - name: \"backup.voip.subzero.com\"\n    endpoint:\n      address_list: { address: [ \"10.0.3.3\" ] }\n  - name: \"emergency.voip.subzero.com\"\n    endpoint:\n      address_list: { address: [ \"2200:823f::cafe:beef\" ] }\n  - name: \"voip.subzero.com\"\n    endpoint:\n      service_list:\n        services:\n        - service_name: \"sip\"\n          protocol: { number: 6 }\n          ttl: 86400s\n          targets: [\n            { host_name: \"primary.voip.subzero.com\" , weight: 30, priority: 10, port: 5060 },\n            { host_name: \"secondary.voip.subzero.com\", weight: 20, priority: 10, port: 5061 },\n            { host_name: \"backup.voip.subzero.com\", weight: 10, priority: 10, port: 5062 },\n            { host_name: \"emergency.voip.subzero.com\", weight: 40, priority: 10, port: 5063 }\n          ]\n  - name: \"web.subzero.com\"\n    endpoint:\n      service_list:\n        services:\n        - service_name: \"http\"\n          protocol: { name: \"tcp\" }\n          ttl: 43200s\n          port: 80\n          targets:\n          - name:\n              cluster_name: \"fake_http_cluster_0\"\n            weight: 10\n            priority: 1\n        - service_name: \"https\"\n          protocol: { name: \"tcp\" }\n          ttl: 43200s\n          targets:\n          - name:\n              cluster_name: \"fake_http_cluster_1\"\n            weight: 10\n            priority: 1\n        - service_name: \"for_coverage_no_protocol_defined_so_record_is_skipped\"\n          ttl: 86400s\n          targets:\n          - name:\n              cluster_name: \"fake_http_cluster_3\"\n            weight: 3\n            priority: 99\n)EOF\";\n};\n\nTEST_F(DnsFilterTest, InvalidQuery) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  sendQueryFromClient(\"10.0.0.1:1000\", \"hello\");\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n  EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used());\n  EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used());\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n}\n\nTEST_F(DnsFilterTest, MaxQueryAndResponseSizeTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  std::string domain(\n      \"www.supercalifragilisticexpialidocious.thisismydomainforafivehundredandtwelvebytetest.com\");\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n  EXPECT_LT(udp_response_.buffer_->length(), Utils::MAX_UDP_DNS_SIZE);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  // There are 8 addresses, however, since the domain is part of the answer record, each\n  // serialized answer is over 100 bytes in size, there is room for 3 before the next\n  // serialized answer puts the buffer over the 512 byte limit. The query itself is also\n  // around 100 bytes.\n  EXPECT_EQ(3, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value());\n\n  // Although there are only 3 answers returned, the filter did find 8 records for the query\n  EXPECT_EQ(8, config_->stats().local_aaaa_record_answers_.value());\n  EXPECT_EQ(0, config_->stats().downstream_rx_invalid_queries_.value());\n  EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used());\n  EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used());\n}\n\nTEST_F(DnsFilterTest, InvalidQueryNameTooLongTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  std::string domain = \"www.\" + std::string(256, 'a') + \".com\";\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n  EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used());\n  EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used());\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n}\n\nTEST_F(DnsFilterTest, InvalidLabelNameTooLongTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  std::string domain(64, 'a');\n  domain += \".com\";\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n  EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used());\n  EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used());\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n}\n\nTEST_F(DnsFilterTest, SingleTypeAQuery) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n\n  const std::string domain(\"www.foo3.com\");\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(1, query_ctx_->answers_.size());\n\n  // Verify that we have an answer record for the queried domain\n\n  const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second;\n\n  // Verify the address returned\n  const std::list<std::string> expected{\"10.0.3.1\"};\n\n  Utils::verifyAddress(expected, answer);\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(1, config_->stats().local_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n  EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used());\n  EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used());\n}\n\nTEST_F(DnsFilterTest, RepeatedTypeAQuerySuccess) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  constexpr size_t loopCount = 5;\n  const std::string domain(\"www.foo3.com\");\n  size_t total_query_bytes = 0;\n\n  for (size_t i = 0; i < loopCount; i++) {\n    const std::string query =\n        Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n    total_query_bytes += query.size();\n    ASSERT_FALSE(query.empty());\n    sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n    query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n    EXPECT_TRUE(query_ctx_->parse_status_);\n\n    EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n    EXPECT_EQ(1, query_ctx_->answers_.size());\n\n    // Verify that we have an answer record for the queried domain\n    const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second;\n\n    // Verify the address returned\n    std::list<std::string> expected{\"10.0.3.1\"};\n    Utils::verifyAddress(expected, answer);\n  }\n\n  // Validate stats\n  EXPECT_EQ(loopCount, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(loopCount, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(loopCount, config_->stats().local_a_record_answers_.value());\n  EXPECT_EQ(loopCount, config_->stats().a_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, LocalTypeAQueryFail) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  const std::string query =\n      Utils::buildQueryForDomain(\"www.foo2.com\", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(0, config_->stats().local_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().unanswered_queries_.value());\n}\n\nTEST_F(DnsFilterTest, LocalTypeAAAAQuerySuccess) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  std::list<std::string> expected{\"2001:8a:c1::2800:7\", \"2001:8a:c1::2800:8\", \"2001:8a:c1::2800:9\"};\n  const std::string domain(\"www.foo2.com\");\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(expected.size(), query_ctx_->answers_.size());\n\n  // Verify the address returned\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.first, domain);\n    Utils::verifyAddress(expected, answer.second);\n  }\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(3, config_->stats().local_aaaa_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, ExternalResolutionReturnSingleAddress) {\n  InSequence s;\n\n  auto timeout_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1);\n\n  const std::string expected_address(\"130.207.244.251\");\n  const std::string domain(\"www.foobaz.com\");\n  setup(forward_query_on_config);\n\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  // Verify that we are calling the resolver with the expected name\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(domain, _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n\n  // Send a query to for a name not in our configuration\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  EXPECT_CALL(*timeout_timer, disableTimer()).Times(AnyNumber());\n\n  // Execute resolve callback\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({expected_address}));\n\n  // parse the result\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(1, query_ctx_->answers_.size());\n\n  std::list<std::string> expected{expected_address};\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.first, domain);\n    Utils::verifyAddress(expected, answer.second);\n  }\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().unanswered_queries_.value());\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get()));\n}\n\nTEST_F(DnsFilterTest, ExternalResolutionIpv6SingleAddress) {\n  InSequence s;\n\n  auto timeout_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1);\n\n  const std::string expected_address(\"2a04:4e42:d::323\");\n  const std::string domain(\"www.foobaz.com\");\n\n  setup(forward_query_on_config);\n\n  // Verify that we are calling the resolver with the expected name\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(domain, _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  // Send a query to for a name not in our configuration\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  EXPECT_CALL(*timeout_timer, disableTimer()).Times(1);\n\n  // Execute resolve callback\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({expected_address}));\n\n  // parse the result\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(1, query_ctx_->answers_.size());\n\n  std::list<std::string> expected{expected_address};\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.first, domain);\n    Utils::verifyAddress(expected, answer.second);\n  }\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_aaaa_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_aaaa_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().unanswered_queries_.value());\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get()));\n}\n\nTEST_F(DnsFilterTest, ExternalResolutionReturnMultipleAddresses) {\n  InSequence s;\n\n  auto timeout_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1);\n\n  const std::list<std::string> expected_address{\"130.207.244.251\", \"130.207.244.252\",\n                                                \"130.207.244.253\", \"130.207.244.254\"};\n  const std::string domain(\"www.foobaz.com\");\n  setup(forward_query_on_config);\n\n  // Verify that we are calling the resolver with the expected name\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(domain, _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  // Send a query to for a name not in our configuration\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  EXPECT_CALL(*timeout_timer, disableTimer()).Times(1);\n\n  // Execute resolve callback\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({expected_address}));\n\n  // parse the result\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(expected_address.size(), query_ctx_->answers_.size());\n\n  EXPECT_LT(udp_response_.buffer_->length(), Utils::MAX_UDP_DNS_SIZE);\n\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.first, domain);\n    Utils::verifyAddress(expected_address, answer.second);\n  }\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_a_record_queries_.value());\n  EXPECT_EQ(expected_address.size(), config_->stats().external_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().unanswered_queries_.value());\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get()));\n}\n\nTEST_F(DnsFilterTest, ExternalResolutionReturnNoAddresses) {\n  InSequence s;\n\n  auto timeout_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1);\n\n  const std::string domain(\"www.foobaz.com\");\n  setup(forward_query_on_config);\n\n  // Verify that we are calling the resolver with the expected name\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(domain, _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  // Send a query to for a name not in our configuration\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  EXPECT_CALL(*timeout_timer, disableTimer()).Times(1);\n\n  // Execute resolve callback\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({}));\n\n  // parse the result\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().external_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().unanswered_queries_.value());\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get()));\n}\n\nTEST_F(DnsFilterTest, ExternalResolutionTimeout) {\n  InSequence s;\n\n  auto timeout_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1);\n\n  const std::string domain(\"www.foobaz.com\");\n  setup(forward_query_on_config);\n\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  EXPECT_CALL(*resolver_, resolve(domain, _, _)).WillOnce(Return(&resolver_->active_query_));\n\n  // Send a query to for a name not in our configuration\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n  simTime().advanceTimeWait(std::chrono::milliseconds(1500));\n\n  // Execute timeout timer callback\n  timeout_timer->invokeCallback();\n\n  // parse the result\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().external_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().unanswered_queries_.value());\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get()));\n}\n\nTEST_F(DnsFilterTest, ExternalResolutionTimeout2) {\n  InSequence s;\n\n  auto timeout_timer = new NiceMock<Event::MockTimer>(&dispatcher_);\n  EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1);\n\n  const std::string domain(\"www.foobaz.com\");\n  setup(forward_query_on_config);\n\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n\n  // Verify that we are calling the resolver with the expected name\n  Network::DnsResolver::ResolveCb resolve_cb;\n  EXPECT_CALL(*resolver_, resolve(domain, _, _))\n      .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_)));\n\n  // Send a query to for a name not in our configuration\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n  simTime().advanceTimeWait(std::chrono::milliseconds(1500));\n\n  // Execute timeout timer callback\n  timeout_timer->invokeCallback();\n\n  // Execute resolve callback. This should harmlessly return and not alter\n  // the response received by the client. Even though we are returning a successful\n  // response, the client does not get an answer\n  resolve_cb(Network::DnsResolver::ResolutionStatus::Success,\n             TestUtility::makeDnsResponse({\"130.207.244.251\"}));\n\n  // parse the result\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().external_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().unanswered_queries_.value());\n\n  EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get()));\n}\n\nTEST_F(DnsFilterTest, ExternalResolutionExceedMaxPendingLookups) {\n  InSequence s;\n\n  const std::string domain(\"www.foobaz.com\");\n  setup(forward_query_on_config);\n  const std::string query1 =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query1.empty());\n\n  const std::string query2 =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query2.empty());\n\n  const std::string query3 =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query3.empty());\n\n  // Send the first query. This will remain 'in-flight'\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  EXPECT_CALL(*resolver_, resolve(domain, _, _));\n  sendQueryFromClient(\"10.0.0.1:1000\", query1);\n\n  // Send the second query. This will remain 'in-flight' also\n  EXPECT_CALL(dispatcher_, createTimer_(_));\n  EXPECT_CALL(*resolver_, resolve(domain, _, _));\n  sendQueryFromClient(\"10.0.0.1:1000\", query2);\n\n  // The third query should be rejected since pending queries (2) > 1, and\n  // we've disabled retries. The client will get a response for this single\n  // query\n  sendQueryFromClient(\"10.0.0.1:1000\", query3);\n\n  // Parse the result for the third query. Since the first two queries are\n  // still in flight, the third query is the only one to generate a response\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n\n  // Validate stats\n  EXPECT_EQ(3, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().external_a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().external_a_record_answers_.value());\n  EXPECT_EQ(2, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().unanswered_queries_.value());\n}\n\nTEST_F(DnsFilterTest, ConsumeExternalJsonTableTest) {\n  InSequence s;\n\n  std::string temp_path =\n      TestEnvironment::writeStringToFileForTest(\"dns_table.json\", external_dns_table_json);\n  std::string config_to_use = fmt::format(external_dns_table_config, temp_path);\n  setup(config_to_use);\n\n  const std::string domain(\"www.external_foo1.com\");\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n\n  ASSERT_FALSE(query.empty());\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(2, query_ctx_->answers_.size());\n\n  // Verify the address returned\n  const std::list<std::string> expected{\"10.0.0.1\", \"10.0.0.2\"};\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.first, domain);\n    Utils::verifyAddress(expected, answer.second);\n  }\n\n  // Validate stats\n  ASSERT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  ASSERT_EQ(1, config_->stats().known_domain_queries_.value());\n  ASSERT_EQ(2, config_->stats().local_a_record_answers_.value());\n  ASSERT_EQ(1, config_->stats().a_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, ConsumeExternalJsonTableTestNoIpv6Answer) {\n  InSequence s;\n\n  std::string temp_path =\n      TestEnvironment::writeStringToFileForTest(\"dns_table.json\", external_dns_table_json);\n  std::string config_to_use = fmt::format(external_dns_table_config, temp_path);\n  setup(config_to_use);\n\n  const std::string domain(\"www.external_foo1.com\");\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN);\n\n  ASSERT_FALSE(query.empty());\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  ASSERT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  ASSERT_EQ(1, config_->stats().known_domain_queries_.value());\n  ASSERT_EQ(0, config_->stats().local_a_record_answers_.value());\n  ASSERT_EQ(0, config_->stats().local_aaaa_record_answers_.value());\n  ASSERT_EQ(0, config_->stats().a_record_queries_.value());\n  ASSERT_EQ(1, config_->stats().aaaa_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, ConsumeExternalYamlTableTest) {\n  InSequence s;\n\n  std::string temp_path =\n      TestEnvironment::writeStringToFileForTest(\"dns_table.yaml\", external_dns_table_yaml);\n  std::string config_to_use = fmt::format(external_dns_table_config, temp_path);\n  setup(config_to_use);\n\n  const std::string domain(\"www.external_foo1.com\");\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n\n  ASSERT_FALSE(query.empty());\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(2, query_ctx_->answers_.size());\n\n  // Verify the address returned\n  const std::list<std::string> expected{\"10.0.0.1\", \"10.0.0.2\"};\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.first, domain);\n    Utils::verifyAddress(expected, answer.second);\n  }\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(2, config_->stats().local_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().a_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, RawBufferTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  const std::string domain(\"www.foo3.com\");\n\n  constexpr char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(1, query_ctx_->answers_.size());\n\n  // Verify that we have an answer record for the queried domain\n  const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second;\n\n  // Verify the address returned\n  const std::list<std::string> expected{\"10.0.3.1\"};\n  Utils::verifyAddress(expected, answer);\n}\n\nTEST_F(DnsFilterTest, InvalidAnswersInQueryTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  const std::string domain(\"www.foo3.com\");\n\n  // Answer count is non-zero in a query.\n  constexpr char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n}\n\nTEST_F(DnsFilterTest, InvalidQueryNameTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n\n  // In this buffer the name segment sizes are incorrect. The filter will indicate that the parsing\n  // failed\n  constexpr char dns_request[] = {\n      0x36, 0x6c,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x02, 0x77, 0x77, 0x77, 0x03, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x01, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n}\n\nTEST_F(DnsFilterTest, InvalidAnswerNameTest) {\n  InSequence s;\n\n  // In this buffer the name label is incorrect for the answer. The labels are separated\n  // by periods and not the segment length. The filter will indicate that the parsing failed\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x81, 0x80,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x01,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Record Type\n      0x00, 0x01,                               // Record Class\n      0x69, 0x70, 0x76, 0x36, 0x2e, 0x68,       // Answer record for\n      0x65, 0x2e, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Answer Record Type\n      0x00, 0x01,                               // Answer Record Class\n      0x00, 0x00, 0x01, 0x19,                   // Answer TTL\n      0x00, 0x04,                               // Answer Data Length\n      0x42, 0xdc, 0x02, 0x4b,                   // Answer IP Address\n      0x00,                                     // Additional RR\n      0x00, 0x29, 0x10, 0x00,                   // UDP Payload Size (4096)\n      0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have zero parsed answers\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, InvalidAnswerTypeTest) {\n  InSequence s;\n\n  // In this buffer the answer type is incorrect for the given query. The answer is a NS\n  // type when an A record was requested. This should not happen on the wire.\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x81, 0x80,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x01,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Record Type\n      0x00, 0x01,                               // Record Class\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x02,                               // Answer Record Type\n      0x00, 0x01,                               // Answer Record Class\n      0x00, 0x00, 0x01, 0x19,                   // Answer TTL\n      0x00, 0x04,                               // Answer Data Length\n      0x42, 0xdc, 0x02, 0x4b,                   // Answer IP Address\n      0x00,                                     // Additional RR\n      0x00, 0x29, 0x10, 0x00,                   // UDP Payload Size (4096)\n      0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have zero parsed answers\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, InvalidAnswerClassTest) {\n  InSequence s;\n\n  // In this buffer the answer class is incorrect for the given query. The answer is a CH\n  // class when an IN class was requested. This should not happen on the wire.\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x81, 0x80,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x01,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Record Type\n      0x00, 0x01,                               // Record Class\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Answer Record Type\n      0x00, 0x03,                               // Answer Record Class\n      0x00, 0x00, 0x01, 0x19,                   // Answer TTL\n      0x00, 0x04,                               // Answer Data Length\n      0x42, 0xdc, 0x02, 0x4b,                   // Answer IP Address\n      0x00,                                     // Additional RR\n      0x00, 0x29, 0x10, 0x00,                   // UDP Payload Size (4096)\n      0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have zero parsed answers\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, InvalidAnswerAddressTest) {\n  InSequence s;\n\n  // In this buffer the address in the answer record is invalid. The IP should\n  // fail to parse. The class suggests it's an IPv6 address but there are only 4\n  // bytes available.\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x81, 0x80,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x01,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Record Type\n      0x00, 0x01,                               // Record Class\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x1c,                               // Answer Record Type\n      0x00, 0x01,                               // Answer Record Class\n      0x00, 0x00, 0x01, 0x19,                   // Answer TTL\n      0x00, 0x10,                               // Answer Data Length\n      0x42, 0xdc, 0x02, 0x4b,                   // Answer IP Address\n      0x00,                                     // Additional RR\n      0x00, 0x29, 0x10, 0x00,                   // UDP Payload Size (4096)\n      0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  setup(forward_query_off_config);\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have one parsed query\n  EXPECT_FALSE(query_ctx_->queries_.empty());\n\n  // We should have zero parsed answers due to the IP parsing failure\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, InvalidAnswerDataLengthTest) {\n  InSequence s;\n\n  // In this buffer the answer data length is invalid (zero). This should not\n  // occur in data on the wire.\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x81, 0x80,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x01,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Record Type\n      0x00, 0x01,                               // Record Class\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Answer Record Type\n      0x00, 0x01,                               // Answer Record Class\n      0x00, 0x00, 0x01, 0x19,                   // Answer TTL\n      0x00, 0x00,                               // Answer Data Length\n      0x42, 0xdc, 0x02, 0x4b,                   // Answer IP Address\n      0x00,                                     // Additional RR (we do not parse this)\n      0x00, 0x29, 0x10, 0x00,                   // UDP Payload Size (4096)\n      0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have zero parsed answers\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, TruncatedAnswerRecordTest) {\n  InSequence s;\n\n  // In this buffer the answer record is truncated. The filter should indicate\n  // a parsing failure\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x81, 0x80,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Record Type\n      0x00, 0x01,                               // Record Class\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Answer Record Type\n      0x00, 0x01,                               // Answer Record Class\n      0x00, 0x00, 0x01, 0x19,                   // Answer TTL\n                                                // Remaining data is truncated\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  setup(forward_query_off_config);\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have one parsed query\n  EXPECT_FALSE(query_ctx_->queries_.empty());\n\n  // We should have zero parsed answers due to the IP parsing failure\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, TruncatedQueryBufferTest) {\n  InSequence s;\n\n  // In this buffer the query record is truncated. The filter should indicate\n  // a parsing failure\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x01,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01                                // Record Type\n                                                // Truncated bytes here\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have zero parsed answers\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, InvalidQueryClassAndAnswerTypeTest) {\n  InSequence s;\n\n  // In this buffer the answer type is unsupported, and the query class is unsupported.\n  constexpr unsigned char dns_request[] = {\n      0x36, 0x6b,                               // Transaction ID\n      0x81, 0x80,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x01,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x01,                               // Additional RRs\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x01,                               // Record Type\n      0x00, 0x02,                               // Record Class\n      0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for\n      0x65, 0x03, 0x6e, 0x65, 0x74, 0x00,       // ipv6.he.net\n      0x00, 0x17,                               // Answer Record Type\n      0x00, 0x01,                               // Answer Record Class\n      0x00, 0x00, 0x01, 0x19,                   // Answer TTL\n      0x00, 0x04,                               // Answer Data Length\n      0x42, 0xdc, 0x02, 0x4b,                   // Answer IP Address\n      0x00,                                     // Additional RR (we do not parse this)\n      0x00, 0x29, 0x10, 0x00,                   // UDP Payload Size (4096)\n      0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n\n  Network::UdpRecvData data{};\n  data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\");\n  data.addresses_.local_ = listener_address_;\n  data.buffer_ = std::make_unique<Buffer::OwnedImpl>(dns_request, count);\n  data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n\n  query_ctx_ = response_parser_->createQueryContext(data, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n\n  // We should have zero parsed queries or answers\n  EXPECT_TRUE(query_ctx_->queries_.empty());\n  EXPECT_TRUE(query_ctx_->answers_.empty());\n}\n\nTEST_F(DnsFilterTest, InvalidQueryNameTest2) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // In this buffer the name segment sizes are incorrect. The first segment points\n  // past the end of the buffer. The filter will indicate that the parsing failed.\n  constexpr char dns_request[] = {\n      0x36, 0x6c,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x4c, 0x77, 0x77, 0x77, 0x03, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x01, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n\n  // TODO(abaptiste): underflow/overflow stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n}\n\nTEST_F(DnsFilterTest, MultipleQueryCountTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // In this buffer we have 2 queries for two different domains. This is a rare case\n  // and serves to validate that we handle the protocol correctly. We will return an\n  // error to the client since most implementations will send the two questions as two\n  // separate DNS queries\n  constexpr char dns_request[] = {\n      0x36, 0x6d,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x02,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // begin query record for\n      0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for\n      0x6f, 0x31, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo1.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n}\n\nTEST_F(DnsFilterTest, InvalidQueryCountTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // In this buffer the Questions count is zero. This is an invalid query and is handled as such.\n  constexpr char dns_request[] = {\n      0x36, 0x6f,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x00,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n}\n\nTEST_F(DnsFilterTest, InvalidNameLabelTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // In this buffer the name label is not formatted as the RFC specifies. The\n  // label separators are periods and not the label length\n  constexpr char dns_request[] = {\n      0x36, 0x6f,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x77, 0x77, 0x77, 0x2e, 0x66, 0x6f, 0x6f, // Query record for\n      0x33, 0x2e, 0x63, 0x6f, 0x6d, 0x00,       // www.foo3.com\n      0x00, 0x01,                               // Query Type - A\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n}\n\nTEST_F(DnsFilterTest, NotImplementedQueryTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // This buffer requests a CNAME record which we do not support. We respond to the client with a\n  // \"not implemented\" response code\n  constexpr char dns_request[] = {\n      0x36, 0x70,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x05,                               // Query Type - CNAME\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NOT_IMPLEMENTED, response_parser_->getQueryResponseCode());\n\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(0, config_->stats().downstream_rx_invalid_queries_.value());\n}\n\nTEST_F(DnsFilterTest, NotImplementedAuthorityRRTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // This buffer specifies that 4 Authority Resource records exist. We should return a\n  // \"not implemented\" response code\n  constexpr char dns_request[] = {\n      0x36, 0x70,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x04,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x05,                               // Query Type - CNAME\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NOT_IMPLEMENTED, response_parser_->getQueryResponseCode());\n}\n\nTEST_F(DnsFilterTest, NoTransactionIdTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // This buffer has an invalid Transaction ID. We should return an error\n  // to the client\n  constexpr char dns_request[] = {\n      0x00, 0x00,                               // Transaction ID\n      0x01, 0x20,                               // Flags\n      0x00, 0x01,                               // Questions\n      0x00, 0x00,                               // Answers\n      0x00, 0x00,                               // Authority RRs\n      0x00, 0x00,                               // Additional RRs\n      0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for\n      0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com\n      0x00, 0x05,                               // Query Type - CNAME\n      0x00, 0x01,                               // Query Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n}\n\nTEST_F(DnsFilterTest, InvalidShortBufferTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // This is an invalid query. Envoy should handle the packet and indicate a parsing failure\n  constexpr char dns_request[] = {0x1c};\n  const std::string query = Utils::buildQueryFromBytes(dns_request, 1);\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_FALSE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode());\n\n  EXPECT_EQ(0, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value());\n}\n\nTEST_F(DnsFilterTest, RandomizeFirstAnswerTest) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  const std::string domain(\"www.foo16.com\");\n\n  const std::string query =\n      Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n\n  // Although 16 addresses are defined, only 8 are returned\n  EXPECT_EQ(8, query_ctx_->answers_.size());\n\n  // We shuffle the list of addresses when we read the config, and in the case of more than\n  // 8 defined addresses, we randomize the initial starting index. We should not end up with\n  // the first answer being the first defined address, or the answers appearing in the same\n  // order as they are defined.\n  const std::list<std::string> defined_order{\"10.0.16.1\", \"10.0.16.2\", \"10.0.16.3\", \"10.0.16.4\",\n                                             \"10.0.16.5\", \"10.0.16.6\", \"10.0.16.7\", \"10.0.16.8\"};\n  auto defined_answer_iter = defined_order.begin();\n  for (const auto& answer : query_ctx_->answers_) {\n    const auto resolved_address = answer.second->ip_addr_->ip()->addressAsString();\n    EXPECT_NE(0L, resolved_address.compare(*defined_answer_iter++));\n  }\n}\n\nTEST_F(DnsFilterTest, ConsumeExternalTableWithServicesTest) {\n  InSequence s;\n\n  std::string temp_path =\n      TestEnvironment::writeStringToFileForTest(\"dns_table.yaml\", external_dns_table_services_yaml);\n  std::string config_to_use = fmt::format(external_dns_table_config, temp_path);\n  setup(config_to_use);\n\n  const std::string service(\"_sip._tcp.voip.subzero.com\");\n\n  const std::string query =\n      Utils::buildQueryForDomain(service, DNS_RECORD_TYPE_SRV, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n\n  std::map<uint16_t, std::string> validation_weight_map = {\n      {10, \"backup.voip.subzero.com\"},\n      {20, \"secondary.voip.subzero.com\"},\n      {30, \"primary.voip.subzero.com\"},\n      {40, \"emergency.voip.subzero.com\"},\n  };\n\n  std::map<uint16_t, std::string> validation_port_map = {\n      {5062, \"backup.voip.subzero.com\"},\n      {5061, \"secondary.voip.subzero.com\"},\n      {5060, \"primary.voip.subzero.com\"},\n      {5063, \"emergency.voip.subzero.com\"},\n  };\n\n  // Validate the weight for each SRV record. The TTL and priority are the same value for each\n  // entry\n  EXPECT_EQ(validation_weight_map.size(), query_ctx_->answers_.size());\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.second->type_, DNS_RECORD_TYPE_SRV);\n\n    DnsSrvRecord* srv_rec = dynamic_cast<DnsSrvRecord*>(answer.second.get());\n\n    EXPECT_STREQ(\"_sip._tcp.voip.subzero.com\", srv_rec->name_.c_str());\n    EXPECT_EQ(86400, srv_rec->ttl_.count());\n\n    EXPECT_EQ(1, srv_rec->targets_.size());\n    const auto target = srv_rec->targets_.begin();\n    const auto target_name = target->first;\n    const auto& attributes = target->second;\n\n    EXPECT_EQ(10, attributes.priority);\n    auto expected_target = validation_weight_map[attributes.weight];\n    EXPECT_EQ(expected_target, target_name);\n\n    auto port_entry = validation_port_map[attributes.port];\n    EXPECT_EQ(expected_target, port_entry);\n  }\n\n  // Validate additional records from the SRV query. Remove a matching\n  // entry to ensure that we are getting unique addresses in the additional\n  // records\n  std::map<std::string, std::string> target_map = {\n      {\"primary.voip.subzero.com\", \"10.0.3.1\"},\n      {\"secondary.voip.subzero.com\", \"10.0.3.2\"},\n      {\"backup.voip.subzero.com\", \"10.0.3.3\"},\n      {\"emergency.voip.subzero.com\", \"2200:823f::cafe:beef\"},\n  };\n  const size_t target_size = target_map.size();\n\n  EXPECT_EQ(target_map.size(), query_ctx_->additional_.size());\n  for (const auto& [hostname, address] : query_ctx_->additional_) {\n    const auto& entry = target_map.find(hostname);\n    EXPECT_NE(entry, target_map.end());\n    Utils::verifyAddress({entry->second}, address);\n    target_map.erase(hostname);\n  }\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(target_size, config_->stats().local_srv_record_answers_.value());\n  EXPECT_EQ(target_size - 1, config_->stats().local_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().local_aaaa_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().srv_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, SrvTargetResolution) {\n  InSequence s;\n\n  std::string temp_path =\n      TestEnvironment::writeStringToFileForTest(\"dns_table.yaml\", external_dns_table_services_yaml);\n  std::string config_to_use = fmt::format(external_dns_table_config, temp_path);\n  setup(config_to_use);\n\n  struct RecordProperties {\n    uint16_t type;\n    std::string address;\n  };\n\n  const std::map<std::string, struct RecordProperties> target_map = {\n      {\"primary.voip.subzero.com\", {DNS_RECORD_TYPE_A, \"10.0.3.1\"}},\n      {\"secondary.voip.subzero.com\", {DNS_RECORD_TYPE_A, \"10.0.3.2\"}},\n      {\"backup.voip.subzero.com\", {DNS_RECORD_TYPE_A, \"10.0.3.3\"}},\n      {\"emergency.voip.subzero.com\", {DNS_RECORD_TYPE_AAAA, \"2200:823f::cafe:beef\"}},\n  };\n\n  for (const auto& [domain, properties] : target_map) {\n    const uint16_t address_type = properties.type;\n    const std::string& ip = properties.address;\n\n    const std::string query = Utils::buildQueryForDomain(domain, address_type, DNS_RECORD_CLASS_IN);\n    ASSERT_FALSE(query.empty());\n    sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n    query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n    EXPECT_TRUE(query_ctx_->parse_status_);\n    EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n    EXPECT_EQ(1, query_ctx_->answers_.size());\n\n    const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second;\n    Utils::verifyAddress({ip}, answer);\n  }\n\n  // Validate stats\n  EXPECT_EQ(target_map.size(), config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(target_map.size(), config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(target_map.size() - 1, config_->stats().local_a_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().local_aaaa_record_answers_.value());\n  EXPECT_EQ(target_map.size() - 1, config_->stats().a_record_queries_.value());\n  EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, NonExistentClusterServiceLookup) {\n  InSequence s;\n\n  std::string temp_path =\n      TestEnvironment::writeStringToFileForTest(\"dns_table.yaml\", external_dns_table_services_yaml);\n  std::string config_to_use = fmt::format(external_dns_table_config, temp_path);\n  setup(config_to_use);\n\n  const std::string service(\"_http._tcp.web.subzero.com\");\n\n  const std::string query =\n      Utils::buildQueryForDomain(service, DNS_RECORD_TYPE_SRV, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(0, query_ctx_->answers_.size());\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(1, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(0, config_->stats().local_srv_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().srv_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, SrvRecordQuery) {\n  InSequence s;\n\n  setup(forward_query_off_config);\n  // This buffer requests a SRV record\n  constexpr char dns_request[] = {\n      0x32, 0x6e,             // Transaction ID\n      0x01, 0x00,             // Flags\n      0x00, 0x01,             // Questions\n      0x00, 0x00,             // Answers\n      0x00, 0x00,             // Authority RRs\n      0x00, 0x00,             // Additional RRs\n      0x05, 0x5f, 0x6c, 0x64, // SRV query for\n      0x61, 0x70, 0x04, 0x5f, // _ldap._tcp.Default-First-Site-Name._sites.dc._msdcs.utelsystems.local\n      0x74, 0x63, 0x70, 0x17, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2d, 0x46, 0x69,\n      0x72, 0x73, 0x74, 0x2d, 0x53, 0x69, 0x74, 0x65, 0x2d, 0x4e, 0x61, 0x6d, 0x65, 0x06,\n      0x5f, 0x73, 0x69, 0x74, 0x65, 0x73, 0x02, 0x64, 0x63, 0x06, 0x5f, 0x6d, 0x73, 0x64,\n      0x63, 0x73, 0x0b, 0x75, 0x74, 0x65, 0x6c, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73,\n      0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x00, 0x00, 0x21, // Type - SRV (0x21 -> 33)\n      0x00, 0x01                                            // Class - IN\n  };\n\n  constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]);\n  const std::string query = Utils::buildQueryFromBytes(dns_request, count);\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode());\n  EXPECT_EQ(1, query_ctx_->queries_.size());\n\n  const auto& parsed_query = query_ctx_->queries_.front();\n  EXPECT_EQ(parsed_query->type_, DNS_RECORD_TYPE_SRV);\n  EXPECT_STREQ(\"_ldap._tcp.Default-First-Site-Name._sites.dc._msdcs.utelsystems.local\",\n               parsed_query->name_.c_str());\n\n  // Validate stats\n  EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value());\n  EXPECT_EQ(0, config_->stats().known_domain_queries_.value());\n  EXPECT_EQ(0, config_->stats().local_srv_record_answers_.value());\n  EXPECT_EQ(1, config_->stats().srv_record_queries_.value());\n}\n\nTEST_F(DnsFilterTest, SrvQueryMaxRecords) {\n  InSequence s;\n\n  std::string temp_path =\n      TestEnvironment::writeStringToFileForTest(\"dns_table.yaml\", max_records_table_yaml);\n  std::string config_to_use = fmt::format(external_dns_table_config, temp_path);\n  setup(config_to_use);\n\n  const std::string service{\"_http._tcp.web.ermac.com\"};\n  const std::string query =\n      Utils::buildQueryForDomain(service, DNS_RECORD_TYPE_SRV, DNS_RECORD_CLASS_IN);\n  ASSERT_FALSE(query.empty());\n  sendQueryFromClient(\"10.0.0.1:1000\", query);\n\n  query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_);\n  EXPECT_TRUE(query_ctx_->parse_status_);\n  EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode());\n\n  // We can only serialize 7 records before reaching the 512 byte limit\n  EXPECT_LT(query_ctx_->answers_.size(), MAX_RETURNED_RECORDS);\n  EXPECT_LT(query_ctx_->additional_.size(), MAX_RETURNED_RECORDS);\n\n  const std::list<std::string> hosts{\n      \"one.web.ermac.com\",  \"two.web.ermac.com\", \"three.web.ermac.com\", \"four.web.ermac.com\",\n      \"five.web.ermac.com\", \"six.web.ermac.com\", \"seven.web.ermac.com\",\n  };\n\n  // Verify the service name and targets are sufficiently randomized\n  size_t exact_matches = 0;\n  auto host = hosts.begin();\n  for (const auto& answer : query_ctx_->answers_) {\n    EXPECT_EQ(answer.second->type_, DNS_RECORD_TYPE_SRV);\n    DnsSrvRecord* srv_rec = dynamic_cast<DnsSrvRecord*>(answer.second.get());\n\n    EXPECT_STREQ(service.c_str(), srv_rec->name_.c_str());\n\n    const auto target = srv_rec->targets_.begin();\n    const auto target_name = target->first;\n    exact_matches += (target_name.compare(*host++) == 0);\n  }\n  EXPECT_LT(exact_matches, hosts.size());\n\n  // Verify that the additional records are not in the same order as the configuration\n  exact_matches = 0;\n  host = hosts.begin();\n  for (const auto& answer : query_ctx_->additional_) {\n    exact_matches += (answer.first.compare(*host++) == 0);\n  }\n  EXPECT_LT(exact_matches, hosts.size());\n}\n\n} // namespace\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc",
    "content": "#include \"dns_filter_test_utils.h\"\n\n#include \"common/common/random_generator.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace Utils {\n\nstd::string buildQueryFromBytes(const char* bytes, const size_t count) {\n  std::string query;\n  for (size_t i = 0; i < count; i++) {\n    query.append(static_cast<const char*>(&bytes[i]), 1);\n  }\n  return query;\n}\n\nstd::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class) {\n  Random::RandomGeneratorImpl random_;\n  struct DnsMessageParser::DnsHeader query {};\n  uint16_t id = random_.random() & 0xFFFF;\n\n  // Generate a random query ID\n  query.id = id;\n\n  // Signify that this is a query\n  query.flags.qr = 0;\n\n  // This should usually be zero\n  query.flags.opcode = 0;\n\n  query.flags.aa = 0;\n  query.flags.tc = 0;\n\n  // Set Recursion flags (at least one bit set so that the flags are not all zero)\n  query.flags.rd = 1;\n  query.flags.ra = 0;\n\n  // reserved flag is not set\n  query.flags.z = 0;\n\n  // Set the authenticated flags to zero\n  query.flags.ad = 0;\n  query.flags.cd = 0;\n\n  query.questions = 1;\n  query.answers = 0;\n  query.authority_rrs = 0;\n  query.additional_rrs = 0;\n\n  Buffer::OwnedImpl buffer;\n  buffer.writeBEInt<uint16_t>(query.id);\n\n  uint16_t flags;\n  ::memcpy(&flags, static_cast<void*>(&query.flags), sizeof(uint16_t));\n  buffer.writeBEInt<uint16_t>(flags);\n\n  buffer.writeBEInt<uint16_t>(query.questions);\n  buffer.writeBEInt<uint16_t>(query.answers);\n  buffer.writeBEInt<uint16_t>(query.authority_rrs);\n  buffer.writeBEInt<uint16_t>(query.additional_rrs);\n\n  DnsQueryRecord query_rec(name, rec_type, rec_class);\n  query_rec.serialize(buffer);\n  return buffer.toString();\n}\n\nvoid verifyAddress(const std::list<std::string>& addresses, const DnsAnswerRecordPtr& answer) {\n  ASSERT_TRUE(answer != nullptr);\n  ASSERT_TRUE(answer->ip_addr_ != nullptr);\n\n  const auto resolved_address = answer->ip_addr_->ip()->addressAsString();\n  if (addresses.size() == 1) {\n    const auto expected = addresses.begin();\n    ASSERT_EQ(*expected, resolved_address);\n    return;\n  }\n\n  const auto iter = std::find(addresses.begin(), addresses.end(), resolved_address);\n  ASSERT_TRUE(iter != addresses.end());\n}\n\n} // namespace Utils\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h",
    "content": "#pragma once\n\n#include \"extensions/filters/udp/dns_filter/dns_filter.h\"\n#include \"extensions/filters/udp/dns_filter/dns_filter_constants.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace Utils {\n\nstatic constexpr uint64_t MAX_UDP_DNS_SIZE{512};\n\nstd::string buildQueryFromBytes(const char* bytes, const size_t count);\nstd::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class);\nvoid verifyAddress(const std::list<std::string>& addresses, const DnsAnswerRecordPtr& answer);\nsize_t getResponseQueryCount(DnsMessageParser& parser);\n\n} // namespace Utils\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc",
    "content": "#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h\"\n#include \"envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h\"\n\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/filters/udp/dns_filter/dns_filter_utils.h\"\n\n#include \"test/test_common/environment.h\"\n\n#include \"dns_filter_test_utils.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace DnsFilter {\nnamespace Utils {\nnamespace {\n\nclass DnsFilterUtilsTest : public testing::Test {};\n\nTEST_F(DnsFilterUtilsTest, UtilsProtoNameTest) {\n  std::map<uint16_t, std::string> proto_name_test = {\n      {6, \"tcp\"},\n      {17, \"udp\"},\n      {155, \"\"},\n  };\n\n  using envoy::data::dns::v3::DnsTable;\n\n  for (const auto& proto : proto_name_test) {\n    DnsTable::DnsServiceProtocol p;\n    p.set_number(proto.first);\n    const std::string proto_name = Utils::getProtoName(p);\n\n    EXPECT_STREQ(proto_name.c_str(), proto.second.c_str());\n  }\n}\n\nTEST_F(DnsFilterUtilsTest, ServiceNameSynthesisTest) {\n  struct DnsServiceTestData {\n    const std::string name;\n    const std::string proto;\n    const std::string domain;\n    const std::string expected;\n  } service_data[] = {\n      // When creating the full service name, we prepend an underscore if necessary\n      {\"name1\", \"proto1\", \"test.com\", \"_name1._proto1.test.com\"},\n      {\"name2\", \"_proto2\", \"test2.com\", \"_name2._proto2.test2.com\"},\n      {\"_name3\", \"proto3\", \"test3.com\", \"_name3._proto3.test3.com\"},\n      {\"name4\", \"proto4\", \"_sites.test4.com\", \"_name4._proto4._sites.test4.com\"},\n  };\n\n  for (auto& ptr : service_data) {\n    const std::string result = Utils::buildServiceName(ptr.name, ptr.proto, ptr.domain);\n    EXPECT_STREQ(ptr.expected.c_str(), result.c_str());\n  }\n}\n\nTEST_F(DnsFilterUtilsTest, ServiceNameParsingTest) {\n  struct DnsServiceTestData {\n    const std::string domain;\n    const std::string expected_service;\n    const std::string expected_proto;\n  } service_data[] = {\n      // Service names and protocols must begin with an underscore\n      {\"_ldap._tcp.Default-First-Site-Name._sites.dc._msdcs.utelsystems.local\", \"ldap\", \"tcp\"},\n      {\"_ldap._tcp._sites.dc._msdcs.utelsystems.local\", \"ldap\", \"tcp\"},\n      {\"_ldap._nottcp._sites.dc._msdcs.utelsystems.local\", \"ldap\", \"nottcp\"},\n      {\"ldap.tcp._sites.dc._msdcs.utelsystems.local\", \"\", \"\"},\n      {\".tcp._sites.dc._msdcs.utelsystems.local\", \"\", \"\"},\n      {\"\", \"\", \"\"},\n  };\n\n  for (auto& ptr : service_data) {\n    const absl::string_view service = Utils::getServiceFromName(ptr.domain);\n    const std::string service_str(service);\n    EXPECT_STREQ(ptr.expected_service.c_str(), service_str.c_str());\n\n    const absl::string_view proto = Utils::getProtoFromName(ptr.domain);\n    const std::string proto_str(proto);\n    EXPECT_STREQ(ptr.expected_proto.c_str(), proto_str.c_str());\n  }\n}\n\nTEST_F(DnsFilterUtilsTest, GetAddressRecordTypeTest) {\n  const std::string pipe_path(Platform::null_device_path);\n  const auto pipe = std::make_shared<Network::Address::PipeInstance>(pipe_path, 600);\n  auto addr_type = getAddressRecordType(pipe);\n  EXPECT_EQ(addr_type, absl::nullopt);\n\n  const auto ipv6addr = Network::Utility::parseInternetAddress(\"fec0:1::1\", 0);\n  addr_type = getAddressRecordType(ipv6addr);\n  EXPECT_TRUE(addr_type.has_value());\n  EXPECT_EQ(addr_type.value(), DNS_RECORD_TYPE_AAAA);\n\n  const auto ipv4addr = Network::Utility::parseInternetAddress(\"127.0.0.1\", 0);\n  addr_type = getAddressRecordType(ipv4addr);\n  EXPECT_TRUE(addr_type.has_value());\n  EXPECT_EQ(addr_type.value(), DNS_RECORD_TYPE_A);\n}\n\n} // namespace\n} // namespace Utils\n} // namespace DnsFilter\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/udp_proxy/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"udp_proxy_filter_test\",\n    srcs = [\"udp_proxy_filter_test.cc\"],\n    extension_name = \"envoy.filters.udp_listener.udp_proxy\",\n    deps = [\n        \"//source/common/common:hash_lib\",\n        \"//source/extensions/filters/udp/udp_proxy:udp_proxy_filter_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:socket_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:cluster_update_callbacks_handle_mocks\",\n        \"//test/mocks/upstream:cluster_update_callbacks_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"hash_policy_impl_test\",\n    srcs = [\"hash_policy_impl_test.cc\"],\n    extension_name = \"envoy.filters.udp_listener.udp_proxy\",\n    deps = [\n        \"//source/common/common:hash_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/filters/udp/udp_proxy:hash_policy_lib\",\n        \"@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"udp_proxy_integration_test\",\n    srcs = [\"udp_proxy_integration_test.cc\"],\n    extension_name = \"envoy.filters.udp_listener.udp_proxy\",\n    deps = [\n        \"//source/extensions/filters/udp/udp_proxy:config\",\n        \"//test/integration:integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/filters/udp/udp_proxy/hash_policy_impl_test.cc",
    "content": "#include \"envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/udp/udp_proxy/hash_policy_impl.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\nnamespace {\n\nusing namespace envoy::extensions::filters::udp::udp_proxy::v3;\n\nclass HashPolicyImplBaseTest : public testing::Test {\npublic:\n  HashPolicyImplBaseTest()\n      : HashPolicyImplBaseTest(Network::Utility::parseInternetAddressAndPort(\"10.0.0.1:1000\")) {}\n\n  HashPolicyImplBaseTest(Network::Address::InstanceConstSharedPtr&& peer_address)\n      : peer_address_(std::move(peer_address)) {}\n\n  void setup() {\n    hash_policy_config_ = config_.add_hash_policies();\n    hash_policy_config_->clear_policy_specifier();\n    additionalSetup();\n\n    hash_policy_ = std::make_unique<HashPolicyImpl>(config_.hash_policies());\n  }\n\n  virtual void additionalSetup(){\n      // Nothing to do here.\n  };\n\n  std::unique_ptr<const HashPolicyImpl> hash_policy_;\n  UdpProxyConfig config_;\n  UdpProxyConfig::HashPolicy* hash_policy_config_;\n  const Network::Address::InstanceConstSharedPtr peer_address_;\n};\n\nclass HashPolicyImplSourceIpTest : public HashPolicyImplBaseTest {\npublic:\n  HashPolicyImplSourceIpTest() : pipe_address_(Network::Utility::resolveUrl(\"unix://test_pipe\")) {}\n\n  void additionalSetup() override { hash_policy_config_->set_source_ip(true); }\n\n  const Network::Address::InstanceConstSharedPtr pipe_address_;\n};\n\n// Check invalid policy type\nTEST_F(HashPolicyImplBaseTest, NotSupportedPolicy) {\n  EXPECT_DEATH(setup(), \".*panic: not reached.*\");\n}\n\n// Check if generate correct hash\nTEST_F(HashPolicyImplSourceIpTest, SourceIpHash) {\n  setup();\n\n  auto generated_hash = HashUtil::xxHash64(peer_address_->ip()->addressAsString());\n  auto hash = hash_policy_->generateHash(*peer_address_);\n\n  EXPECT_EQ(generated_hash, hash.value());\n}\n\n// Check that returns null hash in case of unix domain socket(pipe) type\nTEST_F(HashPolicyImplSourceIpTest, SourceIpWithUnixDomainSocketType) {\n  setup();\n\n  auto hash = hash_policy_->generateHash(*pipe_address_);\n\n  EXPECT_FALSE(hash.has_value());\n}\n\n} // namespace\n} // namespace UdpProxy\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc",
    "content": "#include \"envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h\"\n#include \"envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.validate.h\"\n\n#include \"common/common/hash.h\"\n#include \"common/network/socket_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n\n#include \"extensions/filters/udp/udp_proxy/udp_proxy_filter.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/socket.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks_handle.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AtLeast;\nusing testing::ByMove;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\nusing testing::ReturnNew;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace UdpFilters {\nnamespace UdpProxy {\nnamespace {\n\nclass TestUdpProxyFilter : public UdpProxyFilter {\npublic:\n  using UdpProxyFilter::UdpProxyFilter;\n\n  MOCK_METHOD(Network::SocketPtr, createSocket, (const Upstream::HostConstSharedPtr& host));\n};\n\nApi::IoCallUint64Result makeNoError(uint64_t rc) {\n  auto no_error = Api::ioCallUint64ResultNoError();\n  no_error.rc_ = rc;\n  return no_error;\n}\n\nApi::IoCallUint64Result makeError(int sys_errno) {\n  return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(sys_errno),\n                                                    Network::IoSocketError::deleteIoError));\n}\n\nclass UdpProxyFilterTest : public testing::Test {\npublic:\n  struct TestSession {\n    TestSession(UdpProxyFilterTest& parent,\n                const Network::Address::InstanceConstSharedPtr& upstream_address)\n        : parent_(parent), upstream_address_(upstream_address),\n          socket_(new NiceMock<Network::MockSocket>()) {\n      ON_CALL(*socket_, ipVersion()).WillByDefault(Return(upstream_address_->ip()->version()));\n    }\n\n    void expectSetIpTransparentSocketOption() {\n      EXPECT_CALL(*socket_->io_handle_, setOption(_, _, _, _))\n          .WillRepeatedly(Invoke([this](int level, int optname, const void* optval,\n                                        socklen_t) -> Api::SysCallIntResult {\n            sock_opts_[level][optname] = *reinterpret_cast<const int*>(optval);\n            return Api::SysCallIntResult{0, 0};\n          }));\n    }\n\n    void expectWriteToUpstream(const std::string& data, int sys_errno = 0,\n                               const Network::Address::Ip* local_ip = nullptr) {\n      EXPECT_CALL(*idle_timer_, enableTimer(parent_.config_->sessionTimeout(), nullptr));\n      EXPECT_CALL(*socket_->io_handle_, sendmsg(_, 1, 0, _, _))\n          .WillOnce(Invoke(\n              [this, data, local_ip, sys_errno](\n                  const Buffer::RawSlice* slices, uint64_t, int,\n                  const Network::Address::Ip* self_ip,\n                  const Network::Address::Instance& peer_address) -> Api::IoCallUint64Result {\n                EXPECT_EQ(data, absl::string_view(static_cast<const char*>(slices[0].mem_),\n                                                  slices[0].len_));\n                EXPECT_EQ(peer_address, *upstream_address_);\n                if (local_ip == nullptr) {\n                  EXPECT_EQ(nullptr, self_ip);\n                } else {\n                  EXPECT_EQ(self_ip->addressAsString(), local_ip->addressAsString());\n                }\n                // For suppression of clang-tidy NewDeleteLeaks rule, don't use the ternary\n                // operator.\n                if (sys_errno == 0) {\n                  return makeNoError(data.size());\n                } else {\n                  return makeError(sys_errno);\n                }\n              }));\n    }\n\n    void recvDataFromUpstream(const std::string& data, int recv_sys_errno = 0,\n                              int send_sys_errno = 0) {\n      EXPECT_CALL(*idle_timer_, enableTimer(parent_.config_->sessionTimeout(), nullptr));\n\n      EXPECT_CALL(*socket_->io_handle_, supportsUdpGro());\n      EXPECT_CALL(*socket_->io_handle_, supportsMmsg());\n      // Return the datagram.\n      EXPECT_CALL(*socket_->io_handle_, recvmsg(_, 1, _, _))\n          .WillOnce(\n              Invoke([this, data, recv_sys_errno](\n                         Buffer::RawSlice* slices, const uint64_t, uint32_t,\n                         Network::IoHandle::RecvMsgOutput& output) -> Api::IoCallUint64Result {\n                if (recv_sys_errno != 0) {\n                  return makeError(recv_sys_errno);\n                } else {\n                  ASSERT(data.size() <= slices[0].len_);\n                  memcpy(slices[0].mem_, data.data(), data.size());\n                  output.msg_[0].peer_address_ = upstream_address_;\n                  return makeNoError(data.size());\n                }\n              }));\n      if (recv_sys_errno == 0) {\n        // Send the datagram downstream.\n        EXPECT_CALL(parent_.callbacks_.udp_listener_, send(_))\n            .WillOnce(Invoke([data, send_sys_errno](\n                                 const Network::UdpSendData& send_data) -> Api::IoCallUint64Result {\n              // TODO(mattklein123): Verify peer/local address.\n              EXPECT_EQ(send_data.buffer_.toString(), data);\n              if (send_sys_errno == 0) {\n                send_data.buffer_.drain(send_data.buffer_.length());\n                return makeNoError(data.size());\n              } else {\n                return makeError(send_sys_errno);\n              }\n            }));\n        // Return an EAGAIN result.\n        EXPECT_CALL(*socket_->io_handle_, supportsUdpGro());\n        EXPECT_CALL(*socket_->io_handle_, supportsMmsg());\n        EXPECT_CALL(*socket_->io_handle_, recvmsg(_, 1, _, _))\n            .WillOnce(Return(ByMove(Api::IoCallUint64Result(\n                0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(),\n                                   Network::IoSocketError::deleteIoError)))));\n      }\n\n      // Kick off the receive.\n      file_event_cb_(Event::FileReadyType::Read);\n    }\n\n    UdpProxyFilterTest& parent_;\n    const Network::Address::InstanceConstSharedPtr upstream_address_;\n    Event::MockTimer* idle_timer_{};\n    NiceMock<Network::MockSocket>* socket_;\n    std::map<int, std::map<int, int>> sock_opts_;\n    Event::FileReadyCb file_event_cb_;\n  };\n\n  UdpProxyFilterTest()\n      : UdpProxyFilterTest(Network::Utility::parseInternetAddressAndPort(peer_ip_address_)) {}\n\n  explicit UdpProxyFilterTest(Network::Address::InstanceConstSharedPtr&& peer_address)\n      : os_calls_(&os_sys_calls_),\n        upstream_address_(Network::Utility::parseInternetAddressAndPort(upstream_ip_address_)),\n        peer_address_(std::move(peer_address)) {\n    // Disable strict mock warnings.\n    ON_CALL(os_sys_calls_, supportsIpTransparent()).WillByDefault(Return(true));\n    EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0));\n    EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, address())\n        .WillRepeatedly(Return(upstream_address_));\n    EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, health())\n        .WillRepeatedly(Return(Upstream::Host::Health::Healthy));\n  }\n\n  ~UdpProxyFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); }\n\n  void setup(const std::string& yaml, bool has_cluster = true) {\n    envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig config;\n    TestUtility::loadFromYamlAndValidate(yaml, config);\n    config_ = std::make_shared<UdpProxyFilterConfig>(cluster_manager_, time_system_, stats_store_,\n                                                     config);\n    EXPECT_CALL(cluster_manager_, addThreadLocalClusterUpdateCallbacks_(_))\n        .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks_),\n                        ReturnNew<Upstream::MockClusterUpdateCallbacksHandle>()));\n    if (has_cluster) {\n      EXPECT_CALL(cluster_manager_, get(_));\n    } else {\n      EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(nullptr));\n    }\n    filter_ = std::make_unique<TestUdpProxyFilter>(callbacks_, config_);\n  }\n\n  void recvDataFromDownstream(const std::string& peer_address, const std::string& local_address,\n                              const std::string& buffer) {\n    Network::UdpRecvData data;\n    data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(peer_address);\n    data.addresses_.local_ = Network::Utility::parseInternetAddressAndPort(local_address);\n    data.buffer_ = std::make_unique<Buffer::OwnedImpl>(buffer);\n    data.receive_time_ = MonotonicTime(std::chrono::seconds(0));\n    filter_->onData(data);\n  }\n\n  void expectSessionCreate(const Network::Address::InstanceConstSharedPtr& address) {\n    test_sessions_.emplace_back(*this, address);\n    TestSession& new_session = test_sessions_.back();\n    new_session.idle_timer_ = new Event::MockTimer(&callbacks_.udp_listener_.dispatcher_);\n    EXPECT_CALL(*filter_, createSocket(_))\n        .WillOnce(Return(ByMove(Network::SocketPtr{test_sessions_.back().socket_})));\n    EXPECT_CALL(\n        *new_session.socket_->io_handle_,\n        createFileEvent_(_, _, Event::PlatformDefaultTriggerType, Event::FileReadyType::Read))\n        .WillOnce(DoAll(SaveArg<1>(&new_session.file_event_cb_), Return(nullptr)));\n    // Internal Buffer is Empty, flush will be a no-op\n    ON_CALL(callbacks_.udp_listener_, flush())\n        .WillByDefault(\n            InvokeWithoutArgs([]() -> Api::IoCallUint64Result { return makeNoError(0); }));\n  }\n\n  std::shared_ptr<NiceMock<Upstream::MockHost>>\n  createHost(const Network::Address::InstanceConstSharedPtr& host_address) {\n    auto host = std::make_shared<NiceMock<Upstream::MockHost>>();\n    ON_CALL(*host, address()).WillByDefault(Return(host_address));\n    ON_CALL(*host, health()).WillByDefault(Return(Upstream::Host::Health::Healthy));\n    return host;\n  }\n\n  void checkTransferStats(uint64_t rx_bytes, uint64_t rx_datagrams, uint64_t tx_bytes,\n                          uint64_t tx_datagrams) {\n    EXPECT_EQ(rx_bytes, config_->stats().downstream_sess_rx_bytes_.value());\n    EXPECT_EQ(rx_datagrams, config_->stats().downstream_sess_rx_datagrams_.value());\n    EXPECT_EQ(tx_bytes, config_->stats().downstream_sess_tx_bytes_.value());\n    EXPECT_EQ(tx_datagrams, config_->stats().downstream_sess_tx_datagrams_.value());\n  }\n\n  void checkSocketOptions(TestSession& session, const Network::SocketOptionName& ipv4_option,\n                          int ipv4_expect, const Network::SocketOptionName& ipv6_option,\n                          int ipv6_expect) {\n    EXPECT_EQ(ipv4_expect, session.sock_opts_[ipv4_option.level()][ipv4_option.option()]);\n    EXPECT_EQ(ipv6_expect, session.sock_opts_[ipv6_option.level()][ipv6_option.option()]);\n  }\n\n  void\n  ensureIpTransparentSocketOptions(const Network::Address::InstanceConstSharedPtr& upstream_address,\n                                   const std::string& local_address, int ipv4_expect,\n                                   int ipv6_expect) {\n    setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\nuse_original_src_ip: true\n    )EOF\");\n\n    expectSessionCreate(upstream_address);\n    test_sessions_[0].expectSetIpTransparentSocketOption();\n    test_sessions_[0].expectWriteToUpstream(\"hello\", 0, peer_address_->ip());\n    recvDataFromDownstream(peer_address_->asString(), local_address, \"hello\");\n\n    checkSocketOptions(test_sessions_[0], ENVOY_SOCKET_IP_TRANSPARENT, ipv4_expect,\n                       ENVOY_SOCKET_IPV6_TRANSPARENT, ipv6_expect);\n    EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n    EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n    checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);\n\n    test_sessions_[0].recvDataFromUpstream(\"world\");\n    checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 5 /*tx_bytes*/, 1 /*tx_datagrams*/);\n  }\n\n  bool isTransparentSocketOptionsSupported() {\n    for (const auto& option_name : transparent_options_) {\n      if (!option_name.hasValue()) {\n        return false;\n      }\n    }\n\n    return true;\n  }\n\n  Api::MockOsSysCalls os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_;\n  Upstream::MockClusterManager cluster_manager_;\n  NiceMock<MockTimeSystem> time_system_;\n  Stats::IsolatedStoreImpl stats_store_;\n  UdpProxyFilterConfigSharedPtr config_;\n  Network::MockUdpReadFilterCallbacks callbacks_;\n  Upstream::ClusterUpdateCallbacks* cluster_update_callbacks_{};\n  std::unique_ptr<TestUdpProxyFilter> filter_;\n  std::vector<TestSession> test_sessions_;\n  const Network::Address::InstanceConstSharedPtr upstream_address_;\n  const Network::Address::InstanceConstSharedPtr peer_address_;\n  const std::vector<Network::SocketOptionName> transparent_options_{ENVOY_SOCKET_IP_TRANSPARENT,\n                                                                    ENVOY_SOCKET_IPV6_TRANSPARENT};\n  inline static const std::string upstream_ip_address_ = \"20.0.0.1:443\";\n  inline static const std::string peer_ip_address_ = \"10.0.0.1:1000\";\n};\n\nclass UdpProxyFilterIpv6Test : public UdpProxyFilterTest {\npublic:\n  UdpProxyFilterIpv6Test()\n      : UdpProxyFilterIpv6Test(\n            Network::Utility::parseInternetAddressAndPort(upstream_ipv6_address_)) {}\n\n  explicit UdpProxyFilterIpv6Test(Network::Address::InstanceConstSharedPtr&& upstream_address_v6)\n      : UdpProxyFilterTest(Network::Utility::parseInternetAddressAndPort(peer_ipv6_address_)),\n        upstream_address_v6_(std::move(upstream_address_v6)) {\n    EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, address())\n        .WillRepeatedly(Return(upstream_address_v6_));\n  }\n\n  const Network::Address::InstanceConstSharedPtr upstream_address_v6_;\n  inline static const std::string upstream_ipv6_address_ = \"[2001:db8:85a3::8a2e:370:7334]:443\";\n  inline static const std::string peer_ipv6_address_ = \"[2001:db8:85a3::9a2e:370:7334]:1000\";\n};\n\nclass UdpProxyFilterIpv4Ipv6Test : public UdpProxyFilterIpv6Test {\npublic:\n  UdpProxyFilterIpv4Ipv6Test()\n      : UdpProxyFilterIpv6Test(Network::Utility::parseInternetAddressAndPort(\n            UdpProxyFilterIpv6Test::upstream_ipv6_address_, false)) {}\n\n  void ensureNoIpTransparentSocketOptions() {\n    expectSessionCreate(upstream_address_v6_);\n    test_sessions_[0].expectWriteToUpstream(\"hello\");\n    recvDataFromDownstream(\"[2001:db8:85a3::9a2e:370:7334]:1000\",\n                           \"[2001:db8:85a3::9a2e:370:7335]:80\", \"hello\");\n\n    checkSocketOptions(test_sessions_[0], ENVOY_SOCKET_IP_TRANSPARENT, 0,\n                       ENVOY_SOCKET_IPV6_TRANSPARENT, 0);\n    EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n    EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n    checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);\n\n    test_sessions_[0].recvDataFromUpstream(\"world\");\n    checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 5 /*tx_bytes*/, 1 /*tx_datagrams*/);\n  }\n};\n\n// Basic UDP proxy flow with a single session.\nTEST_F(UdpProxyFilterTest, BasicFlow) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n  checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);\n  test_sessions_[0].recvDataFromUpstream(\"world\");\n  checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 5 /*tx_bytes*/, 1 /*tx_datagrams*/);\n\n  test_sessions_[0].expectWriteToUpstream(\"hello2\");\n  test_sessions_[0].expectWriteToUpstream(\"hello3\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello2\");\n  checkTransferStats(11 /*rx_bytes*/, 2 /*rx_datagrams*/, 5 /*tx_bytes*/, 1 /*tx_datagrams*/);\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello3\");\n  checkTransferStats(17 /*rx_bytes*/, 3 /*rx_datagrams*/, 5 /*tx_bytes*/, 1 /*tx_datagrams*/);\n\n  test_sessions_[0].recvDataFromUpstream(\"world2\");\n  checkTransferStats(17 /*rx_bytes*/, 3 /*rx_datagrams*/, 11 /*tx_bytes*/, 2 /*tx_datagrams*/);\n  test_sessions_[0].recvDataFromUpstream(\"world3\");\n  checkTransferStats(17 /*rx_bytes*/, 3 /*rx_datagrams*/, 17 /*tx_bytes*/, 3 /*tx_datagrams*/);\n}\n\n// Idle timeout flow.\nTEST_F(UdpProxyFilterTest, IdleTimeout) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  test_sessions_[0].idle_timer_->invokeCallback();\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[1].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n}\n\n// Verify downstream send and receive error handling.\nTEST_F(UdpProxyFilterTest, SendReceiveErrorHandling) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  filter_->onReceiveError(Api::IoError::IoErrorCode::UnknownError);\n  EXPECT_EQ(1, config_->stats().downstream_sess_rx_errors_.value());\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);\n  EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_\n                   .upstream_cx_tx_bytes_total_.value());\n\n  test_sessions_[0].recvDataFromUpstream(\"world2\", 0, SOCKET_ERROR_MSG_SIZE);\n  checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);\n  EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_\n                   .upstream_cx_rx_bytes_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_tx_errors_.value());\n\n  test_sessions_[0].recvDataFromUpstream(\"world2\", SOCKET_ERROR_MSG_SIZE, 0);\n  checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);\n  EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_\n                   .upstream_cx_rx_bytes_total_.value());\n  EXPECT_EQ(1, TestUtility::findCounter(\n                   cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_,\n                   \"udp.sess_rx_errors\")\n                   ->value());\n\n  test_sessions_[0].expectWriteToUpstream(\"hello\", SOCKET_ERROR_MSG_SIZE);\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  checkTransferStats(10 /*rx_bytes*/, 2 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);\n  EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_\n                   .upstream_cx_tx_bytes_total_.value());\n  EXPECT_EQ(1, TestUtility::findCounter(\n                   cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_,\n                   \"udp.sess_tx_errors\")\n                   ->value());\n}\n\n// No upstream host handling.\nTEST_F(UdpProxyFilterTest, NoUpstreamHost) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  EXPECT_CALL(cluster_manager_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(nullptr));\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_\n                   .upstream_cx_none_healthy_.value());\n}\n\n// No cluster at filter creation.\nTEST_F(UdpProxyFilterTest, NoUpstreamClusterAtCreation) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\",\n        false);\n\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_no_route_.value());\n}\n\n// Dynamic cluster addition and removal handling.\nTEST_F(UdpProxyFilterTest, ClusterDynamicAddAndRemoval) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\",\n        false);\n\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_no_route_.value());\n  EXPECT_EQ(0, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());\n\n  // Add a cluster that we don't care about.\n  NiceMock<Upstream::MockThreadLocalCluster> other_thread_local_cluster;\n  other_thread_local_cluster.cluster_.info_->name_ = \"other_cluster\";\n  cluster_update_callbacks_->onClusterAddOrUpdate(other_thread_local_cluster);\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(2, config_->stats().downstream_sess_no_route_.value());\n  EXPECT_EQ(0, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());\n\n  // Now add the cluster we care about.\n  cluster_update_callbacks_->onClusterAddOrUpdate(cluster_manager_.thread_local_cluster_);\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  // Remove a cluster we don't care about.\n  cluster_update_callbacks_->onClusterRemoval(\"other_cluster\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  // Remove the cluster we do care about. This should purge all sessions.\n  cluster_update_callbacks_->onClusterRemoval(\"fake_cluster\");\n  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());\n}\n\n// Hitting the maximum per-cluster connection/session circuit breaker.\nTEST_F(UdpProxyFilterTest, MaxSessionsCircuitBreaker) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  // Allow only a single session.\n  cluster_manager_.thread_local_cluster_.cluster_.info_->resetResourceManager(1, 0, 0, 0, 0);\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  // This should hit the session circuit breaker.\n  recvDataFromDownstream(\"10.0.0.2:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(\n      1,\n      cluster_manager_.thread_local_cluster_.cluster_.info_->stats_.upstream_cx_overflow_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  // Timing out the 1st session should allow us to create another.\n  test_sessions_[0].idle_timer_->invokeCallback();\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());\n  expectSessionCreate(upstream_address_);\n  test_sessions_[1].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.2:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n}\n\n// Verify that all sessions for a host are removed when a host is removed.\nTEST_F(UdpProxyFilterTest, RemoveHostSessions) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  cluster_manager_.thread_local_cluster_.cluster_.priority_set_.runUpdateCallbacks(\n      0, {}, {cluster_manager_.thread_local_cluster_.lb_.host_});\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[1].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n}\n\n// In this case the host becomes unhealthy, but we get the same host back, so just keep using the\n// current session.\nTEST_F(UdpProxyFilterTest, HostUnhealthyPickSameHost) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, health())\n      .WillRepeatedly(Return(Upstream::Host::Health::Unhealthy));\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n}\n\n// Make sure that we are able to create a new session if there is an available healthy host and\n// our current host is unhealthy.\nTEST_F(UdpProxyFilterTest, HostUnhealthyPickDifferentHost) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n\n  EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, health())\n      .WillRepeatedly(Return(Upstream::Host::Health::Unhealthy));\n  auto new_host_address = Network::Utility::parseInternetAddressAndPort(\"20.0.0.2:443\");\n  auto new_host = createHost(new_host_address);\n  EXPECT_CALL(cluster_manager_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(new_host));\n  expectSessionCreate(new_host_address);\n  test_sessions_[1].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());\n  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());\n}\n\n// Make sure socket option is set correctly if use_original_src_ip is set.\nTEST_F(UdpProxyFilterTest, SocketOptionForUseOriginalSrcIp) {\n  if (!isTransparentSocketOptionsSupported()) {\n    // The option is not supported on this platform. Just skip the test.\n    GTEST_SKIP();\n  }\n  EXPECT_CALL(os_sys_calls_, supportsIpTransparent());\n\n  InSequence s;\n\n  ensureIpTransparentSocketOptions(upstream_address_, \"10.0.0.2:80\", 1, 0);\n}\n\n// Make sure socket option is set correctly if use_original_src_ip is set in case of ipv6.\nTEST_F(UdpProxyFilterIpv6Test, SocketOptionForUseOriginalSrcIpInCaseOfIpv6) {\n  if (!isTransparentSocketOptionsSupported()) {\n    // The option is not supported on this platform. Just skip the test.\n    GTEST_SKIP();\n  }\n  EXPECT_CALL(os_sys_calls_, supportsIpTransparent());\n\n  InSequence s;\n\n  ensureIpTransparentSocketOptions(upstream_address_v6_, \"[2001:db8:85a3::9a2e:370:7335]:80\", 0, 1);\n}\n\n// Make sure socket options should not be set if use_original_src_ip is not set.\nTEST_F(UdpProxyFilterIpv4Ipv6Test, NoSocketOptionIfUseOriginalSrcIpIsNotSet) {\n  if (!isTransparentSocketOptionsSupported()) {\n    // The option is not supported on this platform. Just skip the test.\n    GTEST_SKIP();\n  }\n\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\nuse_original_src_ip: false\n  )EOF\");\n\n  ensureNoIpTransparentSocketOptions();\n}\n\n// Make sure socket options should not be set if use_original_src_ip is not mentioned.\nTEST_F(UdpProxyFilterIpv4Ipv6Test, NoSocketOptionIfUseOriginalSrcIpIsNotMentioned) {\n  if (!isTransparentSocketOptionsSupported()) {\n    // The option is not supported on this platform. Just skip the test.\n    GTEST_SKIP();\n  }\n\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  ensureNoIpTransparentSocketOptions();\n}\n\n// Make sure exit when use the use_original_src_ip but platform does not support ip\n// transparent option.\nTEST_F(UdpProxyFilterTest, ExitIpTransparentNoPlatformSupport) {\n  EXPECT_CALL(os_sys_calls_, supportsIpTransparent()).WillOnce(Return(false));\n\n  InSequence s;\n\n  auto config = R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\nuse_original_src_ip: true\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      setup(config), EnvoyException,\n      \"The platform does not support either IP_TRANSPARENT or IPV6_TRANSPARENT. Or the envoy is \"\n      \"not running with the CAP_NET_ADMIN capability.\");\n}\n\n// Make sure hash policy with source_ip is created.\nTEST_F(UdpProxyFilterTest, HashPolicyWithSourceIp) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\nhash_policies:\n- source_ip: true\n  )EOF\");\n\n  EXPECT_NE(nullptr, config_->hashPolicy());\n}\n\n// Make sure validation fails if source_ip is false.\nTEST_F(UdpProxyFilterTest, ValidateHashPolicyWithSourceIp) {\n  InSequence s;\n  auto config = R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\nhash_policies:\n- source_ip: false\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(setup(config), EnvoyException,\n                          \"caused by HashPolicyValidationError\\\\.SourceIp: \\\\[\\\"value must equal \"\n                          \"\\\" %!q\\\\(bool=true\\\\)\\\\]\");\n}\n\n// Make sure hash policy is null if it is not mentioned.\nTEST_F(UdpProxyFilterTest, NoHashPolicy) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  EXPECT_EQ(nullptr, config_->hashPolicy());\n}\n\n// Expect correct hash is created if hash_policy with source_ip is mentioned.\nTEST_F(UdpProxyFilterTest, HashWithSourceIp) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\nhash_policies:\n- source_ip: true\n  )EOF\");\n\n  auto host = createHost(upstream_address_);\n  auto generated_hash = HashUtil::xxHash64(\"10.0.0.1\");\n  EXPECT_CALL(cluster_manager_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(Invoke([host, generated_hash](\n                           Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n        auto hash = context->computeHashKey();\n        EXPECT_TRUE(hash.has_value());\n        EXPECT_EQ(generated_hash, hash.value());\n        return host;\n      }));\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  test_sessions_[0].recvDataFromUpstream(\"world\");\n}\n\n// Expect null hash value if hash_policy is not mentioned.\nTEST_F(UdpProxyFilterTest, NullHashWithoutHashPolicy) {\n  InSequence s;\n\n  setup(R\"EOF(\nstat_prefix: foo\ncluster: fake_cluster\n  )EOF\");\n\n  auto host = createHost(upstream_address_);\n  EXPECT_CALL(cluster_manager_.thread_local_cluster_.lb_, chooseHost(_))\n      .WillOnce(\n          Invoke([host](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {\n            auto hash = context->computeHashKey();\n            EXPECT_FALSE(hash.has_value());\n            return host;\n          }));\n  expectSessionCreate(upstream_address_);\n  test_sessions_[0].expectWriteToUpstream(\"hello\");\n  recvDataFromDownstream(\"10.0.0.1:1000\", \"10.0.0.2:80\", \"hello\");\n  test_sessions_[0].recvDataFromUpstream(\"world\");\n}\n\n} // namespace\n} // namespace UdpProxy\n} // namespace UdpFilters\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/test_common/network_utility.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass UdpProxyIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                public BaseIntegrationTest {\npublic:\n  UdpProxyIntegrationTest() : BaseIntegrationTest(GetParam(), configToUse()) {}\n\n  static std::string configToUse() {\n    return absl::StrCat(ConfigHelper::baseUdpListenerConfig(), R\"EOF(\n    listener_filters:\n      name: udp_proxy\n      typed_config:\n        '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig\n        stat_prefix: foo\n        cluster: cluster_0\n      )EOF\");\n  }\n\n  void setup(uint32_t upstream_count) {\n    udp_fake_upstream_ = true;\n    if (upstream_count > 1) {\n      setDeterministic();\n      setUpstreamCount(upstream_count);\n      config_helper_.addConfigModifier(\n          [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n            for (uint32_t i = 1; i < upstream_count; i++) {\n              bootstrap.mutable_static_resources()\n                  ->mutable_clusters(0)\n                  ->mutable_load_assignment()\n                  ->mutable_endpoints(0)\n                  ->add_lb_endpoints()\n                  ->mutable_endpoint()\n                  ->MergeFrom(ConfigHelper::buildEndpoint(\n                      Network::Test::getLoopbackAddressString(GetParam())));\n            }\n          });\n    }\n    BaseIntegrationTest::initialize();\n  }\n\n  void requestResponseWithListenerAddress(const Network::Address::Instance& listener_address) {\n    // Send datagram to be proxied.\n    Network::Test::UdpSyncPeer client(version_);\n    client.write(\"hello\", listener_address);\n\n    // Wait for the upstream datagram.\n    Network::UdpRecvData request_datagram;\n    ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(request_datagram));\n    EXPECT_EQ(\"hello\", request_datagram.buffer_->toString());\n\n    // Respond from the upstream.\n    fake_upstreams_[0]->sendUdpDatagram(\"world1\", request_datagram.addresses_.peer_);\n    Network::UdpRecvData response_datagram;\n    client.recv(response_datagram);\n    EXPECT_EQ(\"world1\", response_datagram.buffer_->toString());\n    EXPECT_EQ(listener_address.asString(), response_datagram.addresses_.peer_->asString());\n\n    EXPECT_EQ(5, test_server_->counter(\"udp.foo.downstream_sess_rx_bytes\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"udp.foo.downstream_sess_rx_datagrams\")->value());\n    EXPECT_EQ(5, test_server_->counter(\"cluster.cluster_0.upstream_cx_tx_bytes_total\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.udp.sess_tx_datagrams\")->value());\n\n    EXPECT_EQ(6, test_server_->counter(\"cluster.cluster_0.upstream_cx_rx_bytes_total\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.udp.sess_rx_datagrams\")->value());\n    // The stat is incremented after the send so there is a race condition and we must wait for\n    // the counter to be incremented.\n    test_server_->waitForCounterEq(\"udp.foo.downstream_sess_tx_bytes\", 6);\n    test_server_->waitForCounterEq(\"udp.foo.downstream_sess_tx_datagrams\", 1);\n\n    EXPECT_EQ(1, test_server_->counter(\"udp.foo.downstream_sess_total\")->value());\n    EXPECT_EQ(1, test_server_->gauge(\"udp.foo.downstream_sess_active\")->value());\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, UdpProxyIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Make sure that we gracefully fail if the user does not configure reuse port and concurrency is\n// > 1.\nTEST_P(UdpProxyIntegrationTest, NoReusePort) {\n  concurrency_ = 2;\n  // Do not wait for listeners to start as the listener will fail.\n  defer_listener_finalization_ = true;\n  setup(1);\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_rejected\", 1);\n}\n\n// Basic loopback test.\nTEST_P(UdpProxyIntegrationTest, HelloWorldOnLoopback) {\n  setup(1);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n  requestResponseWithListenerAddress(*listener_address);\n}\n\n// Verifies calling sendmsg with a non-local address. Note that this test is only fully complete for\n// IPv4. See the comment below for more details.\nTEST_P(UdpProxyIntegrationTest, HelloWorldOnNonLocalAddress) {\n  setup(1);\n  const uint32_t port = lookupPort(\"listener_0\");\n  Network::Address::InstanceConstSharedPtr listener_address;\n  if (version_ == Network::Address::IpVersion::v4) {\n    // Kernel regards any 127.x.x.x as local address.\n    listener_address = std::make_shared<Network::Address::Ipv4Instance>(\n#if defined(__APPLE__) || defined(WIN32)\n        \"127.0.0.1\",\n#else\n        \"127.0.0.3\",\n#endif\n        port);\n  } else {\n    // IPv6 doesn't allow any non-local source address for sendmsg. And the only\n    // local address guaranteed in tests in loopback. Unfortunately, even if it's not\n    // specified, kernel will pick this address as source address. So this test\n    // only checks if IoSocketHandle::sendmsg() sets up CMSG_DATA correctly,\n    // i.e. cmsg_len is big enough when that code path is executed.\n    listener_address = std::make_shared<Network::Address::Ipv6Instance>(\"::1\", port);\n  }\n\n  requestResponseWithListenerAddress(*listener_address);\n}\n\n// Make sure multiple clients are routed correctly to a single upstream host.\nTEST_P(UdpProxyIntegrationTest, MultipleClients) {\n  setup(1);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  Network::Test::UdpSyncPeer client1(version_);\n  client1.write(\"client1_hello\", *listener_address);\n\n  Network::Test::UdpSyncPeer client2(version_);\n  client2.write(\"client2_hello\", *listener_address);\n  client2.write(\"client2_hello_2\", *listener_address);\n\n  Network::UdpRecvData client1_request_datagram;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(client1_request_datagram));\n  EXPECT_EQ(\"client1_hello\", client1_request_datagram.buffer_->toString());\n\n  Network::UdpRecvData client2_request_datagram;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(client2_request_datagram));\n  EXPECT_EQ(\"client2_hello\", client2_request_datagram.buffer_->toString());\n  ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(client2_request_datagram));\n  EXPECT_EQ(\"client2_hello_2\", client2_request_datagram.buffer_->toString());\n\n  // We should not be getting datagrams from the same peer.\n  EXPECT_NE(*client1_request_datagram.addresses_.peer_, *client2_request_datagram.addresses_.peer_);\n\n  // Send two datagrams back to client 2.\n  fake_upstreams_[0]->sendUdpDatagram(\"client2_world\", client2_request_datagram.addresses_.peer_);\n  fake_upstreams_[0]->sendUdpDatagram(\"client2_world_2\", client2_request_datagram.addresses_.peer_);\n  Network::UdpRecvData response_datagram;\n  client2.recv(response_datagram);\n  EXPECT_EQ(\"client2_world\", response_datagram.buffer_->toString());\n  client2.recv(response_datagram);\n  EXPECT_EQ(\"client2_world_2\", response_datagram.buffer_->toString());\n\n  // Send 1 datagram back to client 1.\n  fake_upstreams_[0]->sendUdpDatagram(\"client1_world\", client1_request_datagram.addresses_.peer_);\n  client1.recv(response_datagram);\n  EXPECT_EQ(\"client1_world\", response_datagram.buffer_->toString());\n}\n\n// Make sure sessions correctly forward to the same upstream host when there are multiple upstream\n// hosts.\nTEST_P(UdpProxyIntegrationTest, MultipleUpstreams) {\n  setup(2);\n  const uint32_t port = lookupPort(\"listener_0\");\n  const auto listener_address = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n\n  Network::Test::UdpSyncPeer client(version_);\n  client.write(\"hello1\", *listener_address);\n  client.write(\"hello2\", *listener_address);\n  Network::UdpRecvData request_datagram;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(request_datagram));\n  EXPECT_EQ(\"hello1\", request_datagram.buffer_->toString());\n  ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(request_datagram));\n  EXPECT_EQ(\"hello2\", request_datagram.buffer_->toString());\n\n  fake_upstreams_[0]->sendUdpDatagram(\"world1\", request_datagram.addresses_.peer_);\n  fake_upstreams_[0]->sendUdpDatagram(\"world2\", request_datagram.addresses_.peer_);\n  Network::UdpRecvData response_datagram;\n  client.recv(response_datagram);\n  EXPECT_EQ(\"world1\", response_datagram.buffer_->toString());\n  client.recv(response_datagram);\n  EXPECT_EQ(\"world2\", response_datagram.buffer_->toString());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/grpc_credentials/aws_iam/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n    \"envoy_select_google_grpc\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"aws_iam_grpc_credentials_test\",\n    srcs = envoy_select_google_grpc([\"aws_iam_grpc_credentials_test.cc\"]),\n    data = [\"//test/config/integration/certs\"],\n    deps = [\n        \"//source/extensions/grpc_credentials:well_known_names\",\n        \"//source/extensions/grpc_credentials/aws_iam:config\",\n        \"//test/common/grpc:grpc_client_integration_test_harness_lib\",\n        \"//test/integration:integration_lib\",\n        \"@envoy_api//envoy/config/grpc_credential/v2alpha:pkg_cc_proto\",\n    ] + envoy_select_google_grpc([\"//source/common/grpc:google_async_client_lib\"]),\n)\n"
  },
  {
    "path": "test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc",
    "content": "#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/grpc_credential/v3/aws_iam.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/common/utility.h\"\n#include \"common/grpc/google_async_client_impl.h\"\n\n#include \"extensions/grpc_credentials/well_known_names.h\"\n\n#include \"test/common/grpc/grpc_client_integration_test_harness.h\"\n#include \"test/integration/fake_upstream.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\n// AWS IAM credential validation tests.\nclass GrpcAwsIamClientIntegrationTest : public GrpcSslClientIntegrationTest {\npublic:\n  void SetUp() override {\n    GrpcSslClientIntegrationTest::SetUp();\n    TestEnvironment::setEnvVar(\"AWS_ACCESS_KEY_ID\", \"test_akid\", 1);\n    TestEnvironment::setEnvVar(\"AWS_SECRET_ACCESS_KEY\", \"test_secret\", 1);\n  }\n\n  void TearDown() override {\n    GrpcSslClientIntegrationTest::TearDown();\n    TestEnvironment::unsetEnvVar(\"AWS_REGION\");\n    TestEnvironment::unsetEnvVar(\"AWS_ACCESS_KEY_ID\");\n    TestEnvironment::unsetEnvVar(\"AWS_SECRET_ACCESS_KEY\");\n  }\n\n  void expectExtraHeaders(FakeStream& fake_stream) override {\n    if (call_credentials_ != CallCredentials::FromPlugin) {\n      return;\n    }\n    AssertionResult result = fake_stream.waitForHeadersComplete();\n    RELEASE_ASSERT(result, result.message());\n    Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers());\n    const auto auth_header = stream_headers.get_(\"Authorization\");\n    const auto auth_parts = StringUtil::splitToken(auth_header, \", \", false);\n    ASSERT_EQ(4, auth_parts.size());\n    EXPECT_EQ(\"AWS4-HMAC-SHA256\", auth_parts[0]);\n    EXPECT_TRUE(absl::StartsWith(auth_parts[1], \"Credential=test_akid/\"));\n    EXPECT_TRUE(absl::EndsWith(auth_parts[1],\n                               fmt::format(\"{}/{}/aws4_request\", region_name_, service_name_)));\n    EXPECT_EQ(\"SignedHeaders=host;x-amz-content-sha256;x-amz-date\", auth_parts[2]);\n    // We don't verify correctness off the signature here, as this is part of the signer unit tests.\n    EXPECT_TRUE(absl::StartsWith(auth_parts[3], \"Signature=\"));\n  }\n\n  envoy::config::core::v3::GrpcService createGoogleGrpcConfig() override {\n    auto config = GrpcSslClientIntegrationTest::createGoogleGrpcConfig();\n    auto* google_grpc = config.mutable_google_grpc();\n    google_grpc->set_credentials_factory_name(credentials_factory_name_);\n    auto* ssl_creds = google_grpc->mutable_channel_credentials()->mutable_ssl_credentials();\n    ssl_creds->mutable_root_certs()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n\n    switch (call_credentials_) {\n    case CallCredentials::FromPlugin: {\n      std::string config_yaml;\n      switch (region_location_) {\n      case RegionLocation::InEnvironment:\n        TestEnvironment::setEnvVar(\"AWS_REGION\", region_name_, 1);\n        ABSL_FALLTHROUGH_INTENDED;\n      case RegionLocation::NotProvided:\n        config_yaml = fmt::format(R\"EOF(\n  \"@type\": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig\n  service_name: {}\n  )EOF\",\n                                  service_name_);\n        break;\n      case RegionLocation::InConfig:\n        config_yaml = fmt::format(R\"EOF(\n  \"@type\": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig\n  service_name: {}\n  region: {}\n  )EOF\",\n                                  service_name_, region_name_);\n        break;\n      }\n\n      auto* plugin_config = google_grpc->add_call_credentials()->mutable_from_plugin();\n      plugin_config->set_name(credentials_factory_name_);\n      Envoy::TestUtility::loadFromYaml(config_yaml, *plugin_config->mutable_typed_config());\n      return config;\n    }\n    case CallCredentials::AccessToken:\n      google_grpc->add_call_credentials()->mutable_access_token()->assign(\"foo\");\n      return config;\n    default:\n      return config;\n    }\n  }\n  enum class RegionLocation {\n    NotProvided,\n    InEnvironment,\n    InConfig,\n  };\n\n  enum class CallCredentials {\n    FromPlugin,\n    AccessToken,\n  };\n\n  RegionLocation region_location_ = RegionLocation::NotProvided;\n  CallCredentials call_credentials_ = CallCredentials::FromPlugin;\n  std::string service_name_{};\n  std::string region_name_{};\n  std::string credentials_factory_name_{};\n};\n\nINSTANTIATE_TEST_SUITE_P(SslIpVersionsClientType, GrpcAwsIamClientIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\nTEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_ConfigRegion) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  service_name_ = \"test_service\";\n  region_name_ = \"test_region_static\";\n  region_location_ = RegionLocation::InConfig;\n  credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\nTEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_EnvRegion) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  service_name_ = \"test_service\";\n  region_name_ = \"test_region_env\";\n  region_location_ = RegionLocation::InEnvironment;\n  credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\nTEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_NoRegion) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  service_name_ = \"test_service\";\n  region_name_ = \"test_region_env\";\n  region_location_ = RegionLocation::NotProvided;\n  credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam;\n  EXPECT_THROW_WITH_REGEX(initialize();, EnvoyException, \"AWS region\");\n}\n\nTEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_UnexpectedCallCredentials) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  call_credentials_ = CallCredentials::AccessToken;\n  credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/grpc_credentials/file_based_metadata/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n    \"envoy_select_google_grpc\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"file_based_metadata_grpc_credentials_test\",\n    srcs = [\"file_based_metadata_grpc_credentials_test.cc\"],\n    data = [\"//test/config/integration/certs\"],\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \"//source/extensions/grpc_credentials:well_known_names\",\n        \"//source/extensions/grpc_credentials/file_based_metadata:config\",\n        \"//test/common/grpc:grpc_client_integration_test_harness_lib\",\n        \"//test/integration:integration_lib\",\n        \"@envoy_api//envoy/config/grpc_credential/v2alpha:pkg_cc_proto\",\n    ] + envoy_select_google_grpc([\"//source/common/grpc:google_async_client_lib\"]),\n)\n"
  },
  {
    "path": "test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc",
    "content": "#ifdef ENVOY_GOOGLE_GRPC\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/grpc_credential/v3/file_based_metadata.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/grpc/google_async_client_impl.h\"\n\n#include \"extensions/grpc_credentials/file_based_metadata/config.h\"\n#include \"extensions/grpc_credentials/well_known_names.h\"\n\n#include \"test/common/grpc/grpc_client_integration_test_harness.h\"\n#include \"test/integration/fake_upstream.h\"\n#include \"test/test_common/environment.h\"\n\nnamespace Envoy {\nnamespace Grpc {\nnamespace {\n\n// FileBasedMetadata credential validation tests.\nclass GrpcFileBasedMetadataClientIntegrationTest : public GrpcSslClientIntegrationTest {\npublic:\n  void expectExtraHeaders(FakeStream& fake_stream) override {\n    AssertionResult result = fake_stream.waitForHeadersComplete();\n    RELEASE_ASSERT(result, result.message());\n    Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers());\n    if (!header_value_1_.empty()) {\n      EXPECT_EQ(header_prefix_1_ + header_value_1_, stream_headers.get_(header_key_1_));\n    }\n    if (!header_value_2_.empty()) {\n      EXPECT_EQ(header_value_2_, stream_headers.get_(\"authorization\"));\n    }\n  }\n\n  envoy::config::core::v3::GrpcService createGoogleGrpcConfig() override {\n    auto config = GrpcClientIntegrationTest::createGoogleGrpcConfig();\n    auto* google_grpc = config.mutable_google_grpc();\n    google_grpc->set_credentials_factory_name(credentials_factory_name_);\n    auto* ssl_creds = google_grpc->mutable_channel_credentials()->mutable_ssl_credentials();\n    ssl_creds->mutable_root_certs()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n    if (!header_value_1_.empty()) {\n      const std::string yaml1 = fmt::format(R\"EOF(\n\"@type\": type.googleapis.com/envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig        \nsecret_data:\n  inline_string: {}\nheader_key: {}\nheader_prefix: {}\n)EOF\",\n                                            header_value_1_, header_key_1_, header_prefix_1_);\n      auto* plugin_config = google_grpc->add_call_credentials()->mutable_from_plugin();\n      plugin_config->set_name(credentials_factory_name_);\n      envoy::config::grpc_credential::v3::FileBasedMetadataConfig metadata_config;\n      Envoy::TestUtility::loadFromYaml(yaml1, *plugin_config->mutable_typed_config());\n    }\n    if (!header_value_2_.empty()) {\n      // uses default key/prefix\n      const std::string yaml2 = fmt::format(R\"EOF(\n\"@type\": type.googleapis.com/envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig        \nsecret_data:\n  inline_string: {}\n)EOF\",\n                                            header_value_2_);\n      envoy::config::grpc_credential::v3::FileBasedMetadataConfig metadata_config2;\n      auto* plugin_config2 = google_grpc->add_call_credentials()->mutable_from_plugin();\n      plugin_config2->set_name(credentials_factory_name_);\n      Envoy::TestUtility::loadFromYaml(yaml2, *plugin_config2->mutable_typed_config());\n    }\n    if (!access_token_value_.empty()) {\n      google_grpc->add_call_credentials()->set_access_token(access_token_value_);\n    }\n    return config;\n  }\n\n  std::string header_key_1_{};\n  std::string header_value_1_{};\n  std::string header_value_2_{};\n  std::string header_prefix_1_{};\n  std::string access_token_value_{};\n  std::string credentials_factory_name_{};\n};\n\n// Parameterize the loopback test server socket address and gRPC client type.\nINSTANTIATE_TEST_SUITE_P(SslIpVersionsClientType, GrpcFileBasedMetadataClientIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Validate that a simple request-reply unary RPC works with FileBasedMetadata auth.\nTEST_P(GrpcFileBasedMetadataClientIntegrationTest, FileBasedMetadataGrpcAuthRequest) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  header_key_1_ = \"header1\";\n  header_prefix_1_ = \"prefix1\";\n  header_value_1_ = \"secretvalue\";\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().FileBasedMetadata;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that two separate metadata plugins work with FileBasedMetadata auth.\nTEST_P(GrpcFileBasedMetadataClientIntegrationTest, DoubleFileBasedMetadataGrpcAuthRequest) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  header_key_1_ = \"header1\";\n  header_prefix_1_ = \"prefix1\";\n  header_value_1_ = \"secretvalue\";\n  header_value_2_ = \"secret2\";\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().FileBasedMetadata;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that FileBasedMetadata auth plugin works without a config loaded\nTEST_P(GrpcFileBasedMetadataClientIntegrationTest, EmptyFileBasedMetadataGrpcAuthRequest) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().FileBasedMetadata;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\n// Validate that FileBasedMetadata auth plugin works with extra credentials configured\nTEST_P(GrpcFileBasedMetadataClientIntegrationTest, ExtraConfigFileBasedMetadataGrpcAuthRequest) {\n  SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc);\n  access_token_value_ = \"testaccesstoken\";\n  header_key_1_ = \"header1\";\n  header_prefix_1_ = \"prefix1\";\n  header_value_1_ = \"secretvalue\";\n  credentials_factory_name_ =\n      Extensions::GrpcCredentials::GrpcCredentialsNames::get().FileBasedMetadata;\n  initialize();\n  auto request = createRequest(empty_metadata_);\n  request->sendReply();\n  dispatcher_helper_.runDispatcher();\n}\n\nclass MockAuthContext : public ::grpc::AuthContext {\npublic:\n  ~MockAuthContext() override = default;\n  MOCK_METHOD(bool, IsPeerAuthenticated, (), (const, override));\n  MOCK_METHOD(std::vector<grpc::string_ref>, GetPeerIdentity, (), (const, override));\n  MOCK_METHOD(std::string, GetPeerIdentityPropertyName, (), (const, override));\n  MOCK_METHOD(std::vector<grpc::string_ref>, FindPropertyValues, (const std::string& name),\n              (const, override));\n  MOCK_METHOD(::grpc::AuthPropertyIterator, begin, (), (const, override));\n  MOCK_METHOD(::grpc::AuthPropertyIterator, end, (), (const, override));\n  MOCK_METHOD(void, AddProperty, (const std::string& key, const grpc::string_ref& value),\n              (override));\n  MOCK_METHOD(bool, SetPeerIdentityPropertyName, (const std::string& name), (override));\n};\n\nTEST(GrpcFileBasedMetadata, MissingSecretData) {\n  const std::string yaml = R\"EOF(\nsecret_data:\n  filename: missing-file\n)EOF\";\n  envoy::config::grpc_credential::v3::FileBasedMetadataConfig metadata_config;\n  Envoy::TestUtility::loadFromYaml(yaml, metadata_config);\n  Api::ApiPtr api = Api::createApiForTest();\n  Extensions::GrpcCredentials::FileBasedMetadata::FileBasedMetadataAuthenticator authenticator(\n      metadata_config, *api);\n\n  MockAuthContext context;\n  std::multimap<grpc::string, grpc::string> metadata;\n  auto status =\n      authenticator.GetMetadata(grpc::string_ref(), grpc::string_ref(), context, &metadata);\n  EXPECT_EQ(grpc::StatusCode::NOT_FOUND, status.error_code());\n}\n\n} // namespace\n} // namespace Grpc\n} // namespace Envoy\n#endif\n"
  },
  {
    "path": "test/extensions/health_checkers/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"redis_test\",\n    srcs = [\"redis_test.cc\"],\n    extension_name = \"envoy.health_checkers.redis\",\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/extensions/health_checkers/redis\",\n        \"//source/extensions/health_checkers/redis:utility\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/extensions/filters/network/common/redis:redis_mocks\",\n        \"//test/extensions/filters/network/common/redis:test_utils_lib\",\n        \"//test/extensions/filters/network/redis_proxy:redis_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/mocks/upstream:health_check_event_logger_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.health_checkers.redis\",\n    deps = [\n        \"//source/common/upstream:health_checker_lib\",\n        \"//source/extensions/health_checkers/redis:config\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:health_checker_factory_context_mocks\",\n        \"//test/mocks/upstream:health_checker_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/health_checkers/redis/config_test.cc",
    "content": "#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\n#include \"common/upstream/health_checker_impl.h\"\n\n#include \"extensions/health_checkers/redis/config.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/health_checker_factory_context.h\"\n#include \"test/mocks/upstream/health_checker.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\nnamespace RedisHealthChecker {\nnamespace {\n\nusing CustomRedisHealthChecker = Extensions::HealthCheckers::RedisHealthChecker::RedisHealthChecker;\n\nTEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisDeprecated)) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: envoy.health_checkers.redis\n      config:\n        key: foo\n    )EOF\";\n\n  NiceMock<Server::Configuration::MockHealthCheckerFactoryContext> context;\n\n  RedisHealthCheckerFactory factory;\n  EXPECT_NE(nullptr, dynamic_cast<CustomRedisHealthChecker*>(\n                         factory\n                             .createCustomHealthChecker(\n                                 Upstream::parseHealthCheckFromV3Yaml(yaml, false), context)\n                             .get()));\n}\n\nTEST(HealthCheckerFactoryTest, CreateRedis) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n        key: foo\n    )EOF\";\n\n  NiceMock<Server::Configuration::MockHealthCheckerFactoryContext> context;\n\n  RedisHealthCheckerFactory factory;\n  EXPECT_NE(\n      nullptr,\n      dynamic_cast<CustomRedisHealthChecker*>(\n          factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context)\n              .get()));\n}\n\nTEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisWithoutKeyDeprecated)) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: envoy.health_checkers.redis\n      config:\n    )EOF\";\n\n  NiceMock<Server::Configuration::MockHealthCheckerFactoryContext> context;\n\n  RedisHealthCheckerFactory factory;\n  EXPECT_NE(nullptr, dynamic_cast<CustomRedisHealthChecker*>(\n                         factory\n                             .createCustomHealthChecker(\n                                 Upstream::parseHealthCheckFromV3Yaml(yaml, false), context)\n                             .get()));\n}\n\nTEST(HealthCheckerFactoryTest, CreateRedisWithoutKey) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n    )EOF\";\n\n  NiceMock<Server::Configuration::MockHealthCheckerFactoryContext> context;\n\n  RedisHealthCheckerFactory factory;\n  EXPECT_NE(\n      nullptr,\n      dynamic_cast<CustomRedisHealthChecker*>(\n          factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context)\n              .get()));\n}\n\nTEST(HealthCheckerFactoryTest, CreateRedisWithLogHCFailure) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n    always_log_health_check_failures: true\n    )EOF\";\n\n  NiceMock<Server::Configuration::MockHealthCheckerFactoryContext> context;\n\n  RedisHealthCheckerFactory factory;\n  EXPECT_NE(\n      nullptr,\n      dynamic_cast<CustomRedisHealthChecker*>(\n          factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context)\n              .get()));\n}\n\nTEST(HealthCheckerFactoryTest, CreateRedisViaUpstreamHealthCheckerFactory) {\n  const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n        key: foo\n    )EOF\";\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster;\n  Runtime::MockLoader runtime;\n  Random::MockRandomGenerator random;\n  Event::MockDispatcher dispatcher;\n  AccessLog::MockAccessLogManager log_manager;\n  NiceMock<Api::MockApi> api;\n\n  EXPECT_NE(nullptr,\n            dynamic_cast<CustomRedisHealthChecker*>(\n                Upstream::HealthCheckerFactory::create(\n                    Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, dispatcher,\n                    log_manager, ProtobufMessage::getStrictValidationVisitor(), api)\n                    .get()));\n}\n} // namespace\n} // namespace RedisHealthChecker\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/health_checkers/redis/redis_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h\"\n\n#include \"extensions/health_checkers/redis/redis.h\"\n#include \"extensions/health_checkers/redis/utility.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/extensions/filters/network/common/redis/mocks.h\"\n#include \"test/extensions/filters/network/redis_proxy/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/health_check_event_logger.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Ref;\nusing testing::Return;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace HealthCheckers {\nnamespace RedisHealthChecker {\n\nclass RedisHealthCheckerTest\n    : public testing::Test,\n      public Extensions::NetworkFilters::Common::Redis::Client::ClientFactory {\npublic:\n  RedisHealthCheckerTest()\n      : cluster_(new NiceMock<Upstream::MockClusterMockPrioritySet>()),\n        event_logger_(new Upstream::MockHealthCheckEventLogger()), api_(Api::createApiForTest()) {}\n\n  void setup() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n    )EOF\";\n\n    const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml);\n    const auto& redis_config = getRedisHealthCheckConfig(\n        health_check_config, ProtobufMessage::getStrictValidationVisitor());\n\n    health_checker_ = std::make_shared<RedisHealthChecker>(\n        *cluster_, health_check_config, redis_config, dispatcher_, runtime_,\n        Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this);\n  }\n\n  void setupWithAuth() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n    )EOF\";\n\n    const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml);\n    const auto& redis_config = getRedisHealthCheckConfig(\n        health_check_config, ProtobufMessage::getStrictValidationVisitor());\n\n    std::string auth_yaml = R\"EOF(\n    auth_username: { inline_string: \"test user\" }\n    auth_password: { inline_string: \"test password\" }\n    )EOF\";\n    envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions proto_config{};\n    TestUtility::loadFromYaml(auth_yaml, proto_config);\n\n    Upstream::ProtocolOptionsConfigConstSharedPtr options = std::make_shared<\n        const Envoy::Extensions::NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl>(\n        proto_config);\n\n    EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options));\n\n    health_checker_ = std::make_shared<RedisHealthChecker>(\n        *cluster_, health_check_config, redis_config, dispatcher_, runtime_,\n        Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this);\n  }\n\n  void setupAlwaysLogHealthCheckFailures() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    always_log_health_check_failures: true\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n    )EOF\";\n\n    const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml);\n    const auto& redis_config = getRedisHealthCheckConfig(\n        health_check_config, ProtobufMessage::getStrictValidationVisitor());\n\n    health_checker_ = std::make_shared<RedisHealthChecker>(\n        *cluster_, health_check_config, redis_config, dispatcher_, runtime_,\n        Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this);\n  }\n\n  void setupExistsHealthcheck() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n        key: foo\n    )EOF\";\n\n    const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml);\n    const auto& redis_config = getRedisHealthCheckConfig(\n        health_check_config, ProtobufMessage::getStrictValidationVisitor());\n\n    health_checker_ = std::make_shared<RedisHealthChecker>(\n        *cluster_, health_check_config, redis_config, dispatcher_, runtime_,\n        Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this);\n  }\n\n  void setupExistsHealthcheckWithAuth() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n        key: foo\n    )EOF\";\n\n    const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml);\n    const auto& redis_config = getRedisHealthCheckConfig(\n        health_check_config, ProtobufMessage::getStrictValidationVisitor());\n\n    std::string auth_yaml = R\"EOF(\n    auth_username: { inline_string: \"test user\" }\n    auth_password: { inline_string: \"test password\" }\n    )EOF\";\n    envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions proto_config{};\n    TestUtility::loadFromYaml(auth_yaml, proto_config);\n\n    Upstream::ProtocolOptionsConfigConstSharedPtr options = std::make_shared<\n        const Envoy::Extensions::NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl>(\n        proto_config);\n\n    EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options));\n\n    health_checker_ = std::make_shared<RedisHealthChecker>(\n        *cluster_, health_check_config, redis_config, dispatcher_, runtime_,\n        Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this);\n  }\n\n  void setupExistsHealthcheckDeprecated(bool avoid_boosting = true) {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    custom_health_check:\n      name: envoy.health_checkers.redis\n      config:\n        key: foo\n    )EOF\";\n\n    const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml, avoid_boosting);\n    const auto& redis_config = getRedisHealthCheckConfig(\n        health_check_config, ProtobufMessage::getStrictValidationVisitor());\n\n    health_checker_ = std::make_shared<RedisHealthChecker>(\n        *cluster_, health_check_config, redis_config, dispatcher_, runtime_,\n        Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this);\n  }\n\n  void setupDontReuseConnection() {\n    const std::string yaml = R\"EOF(\n    timeout: 1s\n    interval: 1s\n    no_traffic_interval: 5s\n    interval_jitter: 1s\n    unhealthy_threshold: 1\n    healthy_threshold: 1\n    reuse_connection: false\n    custom_health_check:\n      name: redis\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis\n    )EOF\";\n\n    const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml);\n    const auto& redis_config = getRedisHealthCheckConfig(\n        health_check_config, ProtobufMessage::getStrictValidationVisitor());\n\n    health_checker_ = std::make_shared<RedisHealthChecker>(\n        *cluster_, health_check_config, redis_config, dispatcher_, runtime_,\n        Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this);\n  }\n\n  Extensions::NetworkFilters::Common::Redis::Client::ClientPtr\n  create(Upstream::HostConstSharedPtr, Event::Dispatcher&,\n         const Extensions::NetworkFilters::Common::Redis::Client::Config&,\n         const Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr&,\n         Stats::Scope&, const std::string& username, const std::string& password) override {\n    EXPECT_EQ(auth_username_, username);\n    EXPECT_EQ(auth_password_, password);\n    return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{create_()};\n  }\n\n  MOCK_METHOD(Extensions::NetworkFilters::Common::Redis::Client::Client*, create_, ());\n\n  void expectSessionCreate() {\n    interval_timer_ = new Event::MockTimer(&dispatcher_);\n    timeout_timer_ = new Event::MockTimer(&dispatcher_);\n  }\n\n  void expectClientCreate() {\n    client_ = new Extensions::NetworkFilters::Common::Redis::Client::MockClient();\n    EXPECT_CALL(*this, create_()).WillOnce(Return(client_));\n    EXPECT_CALL(*client_, addConnectionCallbacks(_));\n  }\n\n  void expectExistsRequestCreate() {\n    EXPECT_CALL(*client_, makeRequest_(Ref(RedisHealthChecker::existsHealthCheckRequest(\"\")), _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));\n    EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  }\n\n  void expectPingRequestCreate() {\n    EXPECT_CALL(*client_, makeRequest_(Ref(RedisHealthChecker::pingHealthCheckRequest()), _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));\n    EXPECT_CALL(*timeout_timer_, enableTimer(_, _));\n  }\n\n  void exerciseStubs() {\n    Upstream::HostSharedPtr host = Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:100\");\n    RedisHealthChecker::RedisActiveHealthCheckSessionPtr session =\n        std::make_unique<RedisHealthChecker::RedisActiveHealthCheckSession>(*health_checker_, host);\n\n    EXPECT_TRUE(session->disableOutlierEvents());\n    EXPECT_EQ(session->opTimeout(),\n              std::chrono::milliseconds(2000)); // Timeout is 1s is test configurations.\n    EXPECT_FALSE(session->enableHashtagging());\n    EXPECT_TRUE(session->enableRedirection());\n    EXPECT_EQ(session->maxBufferSizeBeforeFlush(), 0);\n    EXPECT_EQ(session->bufferFlushTimeoutInMs(), std::chrono::milliseconds(1));\n    EXPECT_EQ(session->maxUpstreamUnknownConnections(), 0);\n    EXPECT_FALSE(session->enableCommandStats());\n    session->onDeferredDeleteBase(); // This must be called to pass assertions in the destructor.\n  }\n\n  std::shared_ptr<Upstream::MockClusterMockPrioritySet> cluster_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  Upstream::MockHealthCheckEventLogger* event_logger_{};\n  Event::MockTimer* timeout_timer_{};\n  Event::MockTimer* interval_timer_{};\n  Extensions::NetworkFilters::Common::Redis::Client::MockClient* client_{};\n  Extensions::NetworkFilters::Common::Redis::Client::MockPoolRequest pool_request_;\n  Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks* pool_callbacks_{};\n  std::shared_ptr<RedisHealthChecker> health_checker_;\n  Api::ApiPtr api_;\n  std::string auth_username_;\n  std::string auth_password_;\n};\n\nTEST_F(RedisHealthCheckerTest, PingWithAuth) {\n  InSequence s;\n\n  auth_username_ = \"test user\";\n  auth_password_ = \"test password\";\n\n  setupWithAuth();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectPingRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Success\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::SimpleString);\n  response->asString() = \"PONG\";\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure, invalid auth\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  response->type(NetworkFilters::Common::Redis::RespType::Error);\n  response->asString() = \"WRONGPASS invalid username-password pair\";\n  pool_callbacks_->onResponse(std::move(response));\n\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.network_failure\").value());\n}\n\nTEST_F(RedisHealthCheckerTest, ExistsWithAuth) {\n  InSequence s;\n\n  auth_username_ = \"test user\";\n  auth_password_ = \"test password\";\n\n  setupExistsHealthcheckWithAuth();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectExistsRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Success\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::Integer);\n  response->asInteger() = 0;\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectExistsRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure, invalid auth\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  response->type(NetworkFilters::Common::Redis::RespType::Error);\n  response->asString() = \"WRONGPASS invalid username-password pair\";\n  pool_callbacks_->onResponse(std::move(response));\n\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n}\n\nTEST_F(RedisHealthCheckerTest, PingAndVariousFailures) {\n  InSequence s;\n  setup();\n\n  // Exercise stubbed out interfaces for coverage.\n  exerciseStubs();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectPingRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Success\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::SimpleString);\n  response->asString() = \"PONG\";\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Redis failure via disconnect\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  pool_callbacks_->onFailure();\n  client_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  expectClientCreate();\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Timeout\n  EXPECT_CALL(pool_request_, cancel());\n  EXPECT_CALL(*client_, close());\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  timeout_timer_->invokeCallback();\n\n  expectClientCreate();\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Shutdown with active request.\n  EXPECT_CALL(pool_request_, cancel());\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(5UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(3UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.network_failure\").value());\n}\n\nTEST_F(RedisHealthCheckerTest, FailuresLogging) {\n  InSequence s;\n  setupAlwaysLogHealthCheckFailures();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectPingRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Success\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::SimpleString);\n  response->asString() = \"PONG\";\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, false));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Fail again\n  EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, false));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Shutdown with active request.\n  EXPECT_CALL(pool_request_, cancel());\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(4UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.network_failure\").value());\n}\n\nTEST_F(RedisHealthCheckerTest, LogInitialFailure) {\n  InSequence s;\n  setup();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectPingRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Redis failure via disconnect\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  pool_callbacks_->onFailure();\n  client_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  expectClientCreate();\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Success\n  EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::SimpleString);\n  response->asString() = \"PONG\";\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Shutdown with active request.\n  EXPECT_CALL(pool_request_, cancel());\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(3UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.network_failure\").value());\n}\n\nTEST_F(RedisHealthCheckerTest, DEPRECATED_FEATURE_TEST(ExistsDeprecated)) {\n  InSequence s;\n  setupExistsHealthcheckDeprecated(false);\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectExistsRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Success\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::Integer);\n  response->asInteger() = 0;\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectExistsRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure, exists\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  response->type(NetworkFilters::Common::Redis::RespType::Integer);\n  response->asInteger() = 1;\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectExistsRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure, no value\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  pool_callbacks_->onResponse(std::move(response));\n\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(3UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n}\n\nTEST_F(RedisHealthCheckerTest, Exists) {\n  InSequence s;\n  setupExistsHealthcheck();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectExistsRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Success\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::Integer);\n  response->asInteger() = 0;\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectExistsRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure, exists\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  response->type(NetworkFilters::Common::Redis::RespType::Integer);\n  response->asInteger() = 1;\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectExistsRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Failure, no value\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  pool_callbacks_->onResponse(std::move(response));\n\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(3UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n}\n\nTEST_F(RedisHealthCheckerTest, ExistsRedirected) {\n  InSequence s;\n  setupExistsHealthcheck();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectExistsRequestCreate();\n  health_checker_->start();\n\n  client_->runHighWatermarkCallbacks();\n  client_->runLowWatermarkCallbacks();\n\n  // Success with moved redirection\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr moved_response{\n      new NetworkFilters::Common::Redis::RespValue()};\n  moved_response->type(NetworkFilters::Common::Redis::RespType::Error);\n  moved_response->asString() = \"MOVED 1111 127.0.0.1:81\"; // exact values not important\n  pool_callbacks_->onRedirection(std::move(moved_response), \"127.0.0.1:81\", false);\n\n  expectExistsRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Success with ask redirection\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  NetworkFilters::Common::Redis::RespValuePtr ask_response{\n      new NetworkFilters::Common::Redis::RespValue()};\n  ask_response->type(NetworkFilters::Common::Redis::RespType::Error);\n  ask_response->asString() = \"ASK 1111 127.0.0.1:81\"; // exact values not important\n  pool_callbacks_->onRedirection(std::move(ask_response), \"127.0.0.1:81\", true);\n\n  EXPECT_CALL(*client_, close());\n\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n}\n\n// Tests that redis client will behave appropriately when reuse_connection is false.\nTEST_F(RedisHealthCheckerTest, NoConnectionReuse) {\n  InSequence s;\n  setupDontReuseConnection();\n\n  cluster_->prioritySet().getMockHostSet(0)->hosts_ = {\n      Upstream::makeTestHost(cluster_->info_, \"tcp://127.0.0.1:80\")};\n\n  expectSessionCreate();\n  expectClientCreate();\n  expectPingRequestCreate();\n  health_checker_->start();\n\n  // The connection will close on success.\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*client_, close());\n  NetworkFilters::Common::Redis::RespValuePtr response(\n      new NetworkFilters::Common::Redis::RespValue());\n  response->type(NetworkFilters::Common::Redis::RespType::SimpleString);\n  response->asString() = \"PONG\";\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectClientCreate();\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // The connection will close on failure.\n  EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _));\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  EXPECT_CALL(*client_, close());\n  response = std::make_unique<NetworkFilters::Common::Redis::RespValue>();\n  pool_callbacks_->onResponse(std::move(response));\n\n  expectClientCreate();\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Redis failure via disconnect, the connection was closed by the other end.\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  pool_callbacks_->onFailure();\n  client_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  expectClientCreate();\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Timeout, the connection will be closed.\n  EXPECT_CALL(pool_request_, cancel());\n  EXPECT_CALL(*client_, close());\n  EXPECT_CALL(*timeout_timer_, disableTimer());\n  EXPECT_CALL(*interval_timer_, enableTimer(_, _));\n  timeout_timer_->invokeCallback();\n\n  expectClientCreate();\n  expectPingRequestCreate();\n  interval_timer_->invokeCallback();\n\n  // Shutdown with active request.\n  EXPECT_CALL(pool_request_, cancel());\n  EXPECT_CALL(*client_, close());\n\n  // The metrics expected after all tests have run.\n  EXPECT_EQ(5UL, cluster_->info_->stats_store_.counter(\"health_check.attempt\").value());\n  EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter(\"health_check.success\").value());\n  EXPECT_EQ(3UL, cluster_->info_->stats_store_.counter(\"health_check.failure\").value());\n  EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter(\"health_check.network_failure\").value());\n}\n\n} // namespace RedisHealthChecker\n} // namespace HealthCheckers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/internal_redirect/previous_routes/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.internal_redirect_predicates.previous_routes\",\n    deps = [\n        \"//source/common/stream_info:filter_state_lib\",\n        \"//source/extensions/internal_redirect:well_known_names\",\n        \"//source/extensions/internal_redirect/previous_routes:config\",\n        \"@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/internal_redirect/previous_routes/config_test.cc",
    "content": "#include \"envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/router/internal_redirect.h\"\n\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"extensions/internal_redirect/previous_routes/config.h\"\n#include \"extensions/internal_redirect/well_known_names.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing namespace testing;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace InternalRedirect {\nnamespace {\n\nclass PreviousRoutesTest : public testing::Test {\nprotected:\n  PreviousRoutesTest() : filter_state_(StreamInfo::FilterState::LifeSpan::FilterChain) {\n    factory_ = Registry::FactoryRegistry<Router::InternalRedirectPredicateFactory>::getFactory(\n        InternalRedirectPredicateValues::get().PreviousRoutesPredicate);\n    config_ = factory_->createEmptyConfigProto();\n  }\n\n  StreamInfo::FilterStateImpl filter_state_;\n  Router::InternalRedirectPredicateFactory* factory_;\n  ProtobufTypes::MessagePtr config_;\n};\n\nTEST_F(PreviousRoutesTest, TargetIsOnlyTakenOnce) {\n  std::string current_route_name = \"fake_current_route\";\n  // Create the predicate for the first time. It should remember nothing in the\n  // filter state, so it allows the redirect.\n  {\n    auto predicate = factory_->createInternalRedirectPredicate(*config_, current_route_name);\n    ASSERT(predicate);\n\n    EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, \"route_1\", false, false));\n    // New filter state data is created with route name.\n    EXPECT_TRUE(filter_state_.hasDataWithName(\n        \"envoy.internal_redirect.previous_routes_predicate_state.fake_current_route\"));\n  }\n\n  // The second predicate should see the previously taken route.\n  {\n    auto predicate = factory_->createInternalRedirectPredicate(*config_, current_route_name);\n    ASSERT(predicate);\n\n    EXPECT_FALSE(predicate->acceptTargetRoute(filter_state_, \"route_1\", false, false));\n  }\n}\n\nTEST_F(PreviousRoutesTest, RoutesAreIndependent) {\n  // Create the predicate on route_0.\n  {\n    auto predicate = factory_->createInternalRedirectPredicate(*config_, \"route_0\");\n    ASSERT(predicate);\n\n    EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, \"route_2\", false, false));\n    // New filter state data is created with route name.\n    EXPECT_TRUE(filter_state_.hasDataWithName(\n        \"envoy.internal_redirect.previous_routes_predicate_state.route_0\"));\n  }\n\n  // The predicate created on route_1 should also allow a redirect to route_2\n  {\n    auto predicate = factory_->createInternalRedirectPredicate(*config_, \"route_1\");\n    ASSERT(predicate);\n\n    EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, \"route_2\", false, false));\n    // New filter state data is created with route name.\n    EXPECT_TRUE(filter_state_.hasDataWithName(\n        \"envoy.internal_redirect.previous_routes_predicate_state.route_1\"));\n  }\n}\n\n} // namespace\n} // namespace InternalRedirect\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"envoy_quic_alarm_test\",\n    srcs = [\"envoy_quic_alarm_test.cc\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_alarm_lib\",\n        \"//source/extensions/quic_listeners/quiche/platform:envoy_quic_clock_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_writer_test\",\n    srcs = [\"envoy_quic_writer_test.cc\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/common/network:io_socket_error_lib\",\n        \"//source/common/network:udp_packet_writer_handler_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_packet_writer_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_proof_source_test\",\n    srcs = [\"envoy_quic_proof_source_test.cc\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"@com_googlesource_quiche//:quic_core_versions_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_test_certificates_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_proof_verifier_test\",\n    srcs = [\"envoy_quic_proof_verifier_test.cc\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"@com_googlesource_quiche//:quic_test_tools_test_certificates_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_server_stream_test\",\n    srcs = [\"envoy_quic_server_stream_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_test_utils_for_envoy_lib\",\n        \":test_utils_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_server_connection_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_server_session_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/http:stream_decoder_mock\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@com_googlesource_quiche//:quic_core_http_spdy_session_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_session_peer_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_client_stream_test\",\n    srcs = [\"envoy_quic_client_stream_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_test_utils_for_envoy_lib\",\n        \":test_utils_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_client_connection_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_client_session_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/http:stream_decoder_mock\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@com_googlesource_quiche//:quic_core_http_spdy_session_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_server_session_test\",\n    srcs = [\"envoy_quic_server_session_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_test_utils_for_envoy_lib\",\n        \":test_proof_source_lib\",\n        \":test_utils_lib\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/extensions/quic_listeners/quiche:codec_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_server_connection_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_server_session_lib\",\n        \"//source/server:configuration_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/http:stream_decoder_mock\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:global_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_config_peer_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_server_session_base_peer\",\n        \"@com_googlesource_quiche//:quic_test_tools_test_utils_interface_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_client_session_test\",\n    srcs = [\"envoy_quic_client_session_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_test_utils_for_envoy_lib\",\n        \":test_utils_lib\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/extensions/quic_listeners/quiche:codec_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_client_connection_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_client_session_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/http:stream_decoder_mock\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"active_quic_listener_test\",\n    srcs = [\"active_quic_listener_test.cc\"],\n    tags = [\n        \"fails_on_windows\",\n        \"nofips\",\n    ],\n    deps = [\n        \":quic_test_utils_for_envoy_lib\",\n        \":test_utils_lib\",\n        \"//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib\",\n        \"//source/extensions/quic_listeners/quiche:active_quic_listener_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib\",\n        \"//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_lib\",\n        \"//source/server:configuration_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_crypto_server_config_peer_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_dispatcher_test\",\n    srcs = [\"envoy_quic_dispatcher_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_test_utils_for_envoy_lib\",\n        \":test_proof_source_lib\",\n        \":test_utils_lib\",\n        \"//include/envoy/stats:stats_macros\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_dispatcher_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_server_session_lib\",\n        \"//source/server:configuration_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:global_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_proof_source_lib\",\n    hdrs = [\"test_proof_source.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_base_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"@com_googlesource_quiche//:quic_test_tools_test_certificates_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_proof_verifier_lib\",\n    hdrs = [\"test_proof_verifier.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_base_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_test_utils_for_envoy_lib\",\n    srcs = [\"crypto_test_utils_for_envoy.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":test_proof_source_lib\",\n        \":test_proof_verifier_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_test_utils_interface_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"quic_io_handle_wrapper_test\",\n    srcs = [\"quic_io_handle_wrapper_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche:quic_io_handle_wrapper_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_utils_test\",\n    srcs = [\"envoy_quic_utils_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_test_utils_for_envoy_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"active_quic_listener_config_test\",\n    srcs = [\"active_quic_listener_config_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/common/config:utility_lib\",\n        \"//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_simulated_watermark_buffer_test\",\n    srcs = [\"envoy_quic_simulated_watermark_buffer_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\"//source/extensions/quic_listeners/quiche:envoy_quic_simulated_watermark_buffer_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"test_utils_lib\",\n    hdrs = [\"test_utils.h\"],\n    external_deps = [\"bazel_runfiles\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche:quic_filter_manager_connection_lib\",\n        \"//test/test_common:environment_lib\",\n        \"@com_googlesource_quiche//:quic_core_http_spdy_session_lib\",\n        \"@com_googlesource_quiche//:quic_test_tools_first_flight_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc",
    "content": "#include \"common/config/utility.h\"\n\n#include \"extensions/quic_listeners/quiche/active_quic_listener.h\"\n#include \"extensions/quic_listeners/quiche/active_quic_listener_config.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nclass ActiveQuicListenerFactoryPeer {\npublic:\n  static quic::QuicConfig& quicConfig(ActiveQuicListenerFactory& factory) {\n    return factory.quic_config_;\n  }\n  static envoy::config::core::v3::RuntimeFeatureFlag&\n  runtimeEnabled(ActiveQuicListenerFactory& factory) {\n    return factory.enabled_;\n  }\n};\n\nTEST(ActiveQuicListenerConfigTest, CreateActiveQuicListenerFactory) {\n  std::string listener_name = QuicListenerName;\n  auto& config_factory =\n      Config::Utility::getAndCheckFactoryByName<Server::ActiveUdpListenerConfigFactory>(\n          listener_name);\n  ProtobufTypes::MessagePtr config = config_factory.createEmptyConfigProto();\n\n  std::string yaml = R\"EOF(\n    max_concurrent_streams: 10\n    idle_timeout: {\n      seconds: 2\n    }\n    enabled:\n      default_value: true\n      runtime_key: foo_key\n  )EOF\";\n  TestUtility::loadFromYaml(yaml, *config);\n  Network::ActiveUdpListenerFactoryPtr listener_factory =\n      config_factory.createActiveUdpListenerFactory(*config, /*concurrency=*/1);\n  EXPECT_NE(nullptr, listener_factory);\n  quic::QuicConfig& quic_config = ActiveQuicListenerFactoryPeer::quicConfig(\n      dynamic_cast<ActiveQuicListenerFactory&>(*listener_factory));\n  EXPECT_EQ(10u, quic_config.GetMaxBidirectionalStreamsToSend());\n  EXPECT_EQ(10u, quic_config.GetMaxUnidirectionalStreamsToSend());\n  EXPECT_EQ(2000u, quic_config.IdleNetworkTimeout().ToMilliseconds());\n  // Default value if not present in config.\n  EXPECT_EQ(20000u, quic_config.max_time_before_crypto_handshake().ToMilliseconds());\n  envoy::config::core::v3::RuntimeFeatureFlag& runtime_enabled =\n      ActiveQuicListenerFactoryPeer::runtimeEnabled(\n          dynamic_cast<ActiveQuicListenerFactory&>(*listener_factory));\n  EXPECT_EQ(true, runtime_enabled.default_value().value());\n  EXPECT_EQ(\"foo_key\", runtime_enabled.runtime_key());\n}\n\nTEST(ActiveQuicListenerConfigTest, QuicListenerFlagNotConfigured) {\n  std::string listener_name = QuicListenerName;\n  auto& config_factory =\n      Config::Utility::getAndCheckFactoryByName<Server::ActiveUdpListenerConfigFactory>(\n          listener_name);\n  ProtobufTypes::MessagePtr config = config_factory.createEmptyConfigProto();\n\n  std::string yaml = R\"EOF(\n    max_concurrent_streams: 10\n    idle_timeout: {\n      seconds: 2\n    }\n  )EOF\";\n  TestUtility::loadFromYaml(yaml, *config);\n  Network::ActiveUdpListenerFactoryPtr listener_factory =\n      config_factory.createActiveUdpListenerFactory(*config, /*concurrency=*/1);\n  EXPECT_NE(nullptr, listener_factory);\n  envoy::config::core::v3::RuntimeFeatureFlag& runtime_enabled =\n      ActiveQuicListenerFactoryPeer::runtimeEnabled(\n          dynamic_cast<ActiveQuicListenerFactory&>(*listener_factory));\n  auto& quic_config =\n      dynamic_cast<const envoy::config::listener::v3::QuicProtocolOptions&>(*config);\n  EXPECT_FALSE(runtime_enabled.has_default_value());\n  EXPECT_FALSE(quic_config.has_enabled());\n  EXPECT_EQ(\"\", runtime_enabled.runtime_key());\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/active_quic_listener_test.cc",
    "content": "#include <cstdlib>\n#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/base.pb.validate.h\"\n#include \"envoy/network/exception.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/crypto/crypto_protocol.h\"\n#include \"quiche/quic/test_tools/crypto_test_utils.h\"\n#include \"quiche/quic/test_tools/quic_dispatcher_peer.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n#include \"quiche/quic/test_tools/quic_crypto_server_config_peer.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"server/configuration_impl.h\"\n#include \"common/common/logger.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/udp_packet_writer_handler_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"extensions/quic_listeners/quiche/active_quic_listener.h\"\n#include \"test/extensions/quic_listeners/quiche/test_utils.h\"\n#include \"test/extensions/quic_listeners/quiche/test_proof_source.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"absl/time/time.h\"\n#include \"gtest/gtest.h\"\n#include \"gmock/gmock.h\"\n#include \"extensions/quic_listeners/quiche/active_quic_listener_config.h\"\n#include \"extensions/quic_listeners/quiche/platform/envoy_quic_clock.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/udp_gso_batch_writer.h\"\n\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass ActiveQuicListenerPeer {\npublic:\n  static EnvoyQuicDispatcher* quicDispatcher(ActiveQuicListener& listener) {\n    return listener.quic_dispatcher_.get();\n  }\n\n  static quic::QuicCryptoServerConfig& cryptoConfig(ActiveQuicListener& listener) {\n    return *listener.crypto_config_;\n  }\n\n  static bool enabled(ActiveQuicListener& listener) { return listener.enabled_.enabled(); }\n};\n\nclass ActiveQuicListenerFactoryPeer {\npublic:\n  static envoy::config::core::v3::RuntimeFeatureFlag&\n  runtimeEnabled(ActiveQuicListenerFactory* factory) {\n    return factory->enabled_;\n  }\n};\n\nclass ActiveQuicListenerTest : public QuicMultiVersionTest {\nprotected:\n  using Socket =\n      Network::NetworkListenSocket<Network::NetworkSocketTrait<Network::Socket::Type::Datagram>>;\n\n  ActiveQuicListenerTest()\n      : version_(GetParam().first), api_(Api::createApiForTest(simulated_time_system_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")), clock_(*dispatcher_),\n        local_address_(Network::Test::getCanonicalLoopbackAddress(version_)),\n        connection_handler_(*dispatcher_, absl::nullopt), quic_version_([]() {\n          if (GetParam().second == QuicVersionType::GquicQuicCrypto) {\n            return quic::CurrentSupportedVersionsWithQuicCrypto();\n          }\n          bool use_http3 = GetParam().second == QuicVersionType::Iquic;\n          SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3);\n          SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3);\n          return quic::CurrentSupportedVersions();\n        }()[0]) {}\n\n  template <typename A, typename B>\n  std::unique_ptr<A> staticUniquePointerCast(std::unique_ptr<B>&& source) {\n    return std::unique_ptr<A>{static_cast<A*>(source.release())};\n  }\n\n  void SetUp() override {\n    envoy::config::bootstrap::v3::LayeredRuntime config;\n    config.add_layers()->mutable_admin_layer();\n    loader_ = std::make_unique<Runtime::ScopedLoaderSingleton>(\n        Runtime::LoaderPtr{new Runtime::LoaderImpl(*dispatcher_, tls_, config, local_info_, store_,\n                                                   generator_, validation_visitor_, *api_)});\n\n    listen_socket_ =\n        std::make_shared<Network::UdpListenSocket>(local_address_, nullptr, /*bind*/ true);\n    listen_socket_->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions());\n    listen_socket_->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions());\n\n    ON_CALL(listener_config_, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_));\n    ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(listen_socket_));\n\n    // Use UdpGsoBatchWriter to perform non-batched writes for the purpose of this test, if it is\n    // supported.\n    ON_CALL(listener_config_, udpPacketWriterFactory())\n        .WillByDefault(Return(\n            std::reference_wrapper<Network::UdpPacketWriterFactory>(udp_packet_writer_factory_)));\n    ON_CALL(udp_packet_writer_factory_, createUdpPacketWriter(_, _))\n        .WillByDefault(Invoke(\n            [&](Network::IoHandle& io_handle, Stats::Scope& scope) -> Network::UdpPacketWriterPtr {\n#if UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT\n              return std::make_unique<Quic::UdpGsoBatchWriter>(io_handle, scope);\n#else\n              UNREFERENCED_PARAMETER(scope);\n              return std::make_unique<Network::UdpDefaultWriter>(io_handle);\n#endif\n            }));\n\n    listener_factory_ = createQuicListenerFactory(yamlForQuicConfig());\n    EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager_));\n    quic_listener_ =\n        staticUniquePointerCast<ActiveQuicListener>(listener_factory_->createActiveUdpListener(\n            0, connection_handler_, *dispatcher_, listener_config_));\n    quic_dispatcher_ = ActiveQuicListenerPeer::quicDispatcher(*quic_listener_);\n    quic::QuicCryptoServerConfig& crypto_config =\n        ActiveQuicListenerPeer::cryptoConfig(*quic_listener_);\n    quic::test::QuicCryptoServerConfigPeer crypto_config_peer(&crypto_config);\n    auto proof_source = std::make_unique<TestProofSource>();\n    filter_chain_ = &proof_source->filterChain();\n    crypto_config_peer.ResetProofSource(std::move(proof_source));\n    simulated_time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), *dispatcher_,\n                                             Event::Dispatcher::RunType::NonBlock);\n\n    // The state of whether client hellos can be buffered or not is different before and after\n    // the first packet processed by the listener. This only matters in tests. Force an event\n    // to get it into a consistent state.\n    dispatcher_->post([this]() { quic_listener_->onReadReady(); });\n\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  Network::ActiveUdpListenerFactoryPtr createQuicListenerFactory(const std::string& yaml) {\n    std::string listener_name = QuicListenerName;\n    auto& config_factory =\n        Config::Utility::getAndCheckFactoryByName<Server::ActiveUdpListenerConfigFactory>(\n            listener_name);\n    ProtobufTypes::MessagePtr config_proto = config_factory.createEmptyConfigProto();\n    TestUtility::loadFromYaml(yaml, *config_proto);\n    return config_factory.createActiveUdpListenerFactory(*config_proto, /*concurrency=*/1);\n  }\n\n  void maybeConfigureMocks(int connection_count) {\n    if (quic_version_.UsesTls()) {\n      return;\n    }\n    EXPECT_CALL(listener_config_, filterChainFactory()).Times(connection_count);\n    EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _))\n        .Times(connection_count)\n        .WillRepeatedly(Invoke([](Network::Connection& connection,\n                                  const std::vector<Network::FilterFactoryCb>& filter_factories) {\n          EXPECT_EQ(1u, filter_factories.size());\n          Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories);\n          return true;\n        }));\n    EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n        .Times(connection_count);\n    EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose))\n        .Times(connection_count);\n\n    testing::Sequence seq;\n    for (int i = 0; i < connection_count; ++i) {\n      auto read_filter = std::make_shared<Network::MockReadFilter>();\n      filter_factories_.push_back(\n          {Network::FilterFactoryCb([read_filter, this](Network::FilterManager& filter_manager) {\n            filter_manager.addReadFilter(read_filter);\n            read_filter->callbacks_->connection().addConnectionCallbacks(\n                network_connection_callbacks_);\n          })});\n      // Stop iteration to avoid calling getRead/WriteBuffer().\n      EXPECT_CALL(*read_filter, onNewConnection())\n          .WillOnce(Return(Network::FilterStatus::StopIteration));\n      read_filters_.push_back(std::move(read_filter));\n      // A Sequence must be used to allow multiple EXPECT_CALL().WillOnce()\n      // calls for the same object.\n      EXPECT_CALL(*filter_chain_, networkFilterFactories())\n          .InSequence(seq)\n          .WillOnce(ReturnRef(filter_factories_.back()));\n    }\n  }\n\n  void sendCHLO(quic::QuicConnectionId connection_id) {\n    client_sockets_.push_back(std::make_unique<Socket>(local_address_, nullptr, /*bind*/ false));\n    Buffer::OwnedImpl payload = generateChloPacketToSend(\n        quic_version_, quic_config_, ActiveQuicListenerPeer::cryptoConfig(*quic_listener_),\n        connection_id, clock_, envoyIpAddressToQuicSocketAddress(local_address_->ip()),\n        envoyIpAddressToQuicSocketAddress(local_address_->ip()), \"test.example.org\");\n    Buffer::RawSliceVector slice = payload.getRawSlices();\n    ASSERT_EQ(1u, slice.size());\n    // Send a full CHLO to finish 0-RTT handshake.\n    auto send_rc = Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), slice.data(),\n                                                   1, nullptr, *listen_socket_->localAddress());\n    ASSERT_EQ(slice[0].len_, send_rc.rc_);\n\n#if defined(__APPLE__)\n    // This sleep makes the tests pass more reliably. Some debugging showed that without this,\n    // no packet is received when the event loop is running.\n    // TODO(ggreenway): make tests more reliable, and handle packet loss during the tests, possibly\n    // by retransmitting on a timer.\n    ::usleep(1000);\n#endif\n  }\n\n  void readFromClientSockets() {\n    for (auto& client_socket : client_sockets_) {\n      Buffer::InstancePtr result_buffer(new Buffer::OwnedImpl());\n      const uint64_t bytes_to_read = 11;\n      uint64_t bytes_read = 0;\n      int retry = 0;\n\n      do {\n        Api::IoCallUint64Result result =\n            client_socket->ioHandle().read(*result_buffer, bytes_to_read - bytes_read);\n\n        if (result.ok()) {\n          bytes_read += result.rc_;\n        } else if (retry == 10 || result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) {\n          break;\n        }\n\n        if (bytes_read == bytes_to_read) {\n          break;\n        }\n\n        retry++;\n        absl::SleepFor(absl::Milliseconds(10));\n      } while (true);\n    }\n  }\n\n  void TearDown() override {\n    if (quic_listener_ != nullptr) {\n      quic_listener_->onListenerShutdown();\n    }\n    // Trigger alarm to fire before listener destruction.\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n    Runtime::LoaderSingleton::clear();\n  }\n\nprotected:\n  virtual std::string yamlForQuicConfig() {\n    return R\"EOF(\n    enabled:\n      default_value: true\n      runtime_key: quic.enabled\n)EOF\";\n  }\n\n  Network::Address::IpVersion version_;\n  Event::SimulatedTimeSystemHelper simulated_time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  EnvoyQuicClock clock_;\n  Network::Address::InstanceConstSharedPtr local_address_;\n  Network::SocketSharedPtr listen_socket_;\n  Network::SocketPtr client_socket_;\n  std::shared_ptr<Network::MockReadFilter> read_filter_;\n  Network::MockConnectionCallbacks network_connection_callbacks_;\n  NiceMock<Network::MockListenerConfig> listener_config_;\n  NiceMock<Network::MockUdpPacketWriterFactory> udp_packet_writer_factory_;\n  quic::QuicConfig quic_config_;\n  Server::ConnectionHandlerImpl connection_handler_;\n  std::unique_ptr<ActiveQuicListener> quic_listener_;\n  Network::ActiveUdpListenerFactoryPtr listener_factory_;\n  NiceMock<Network::MockListenSocketFactory> socket_factory_;\n  EnvoyQuicDispatcher* quic_dispatcher_;\n  std::unique_ptr<Runtime::ScopedLoaderSingleton> loader_;\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Stats::TestUtil::TestStore store_;\n  Random::MockRandomGenerator generator_;\n  Random::MockRandomGenerator rand_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Init::MockManager init_manager_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n\n  std::list<std::unique_ptr<Socket>> client_sockets_;\n  std::list<std::shared_ptr<Network::MockReadFilter>> read_filters_;\n  Network::MockFilterChainManager filter_chain_manager_;\n  // The following two containers must guarantee pointer stability as addresses\n  // of elements are saved in expectations before new elements are added.\n  std::list<std::vector<Network::FilterFactoryCb>> filter_factories_;\n  const Network::MockFilterChain* filter_chain_;\n  quic::ParsedQuicVersion quic_version_;\n};\n\nINSTANTIATE_TEST_SUITE_P(ActiveQuicListenerTests, ActiveQuicListenerTest,\n                         testing::ValuesIn(generateTestParam()), testParamsToString);\n\nTEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) {\n  auto option = std::make_unique<Network::MockSocketOption>();\n  EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_BOUND))\n      .WillOnce(Return(false));\n  auto options = std::make_shared<std::vector<Network::Socket::OptionConstSharedPtr>>();\n  options->emplace_back(std::move(option));\n  quic_listener_.reset();\n  EXPECT_THROW_WITH_REGEX(\n      (void)std::make_unique<ActiveQuicListener>(\n          0, 1, *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_,\n          options, false,\n          ActiveQuicListenerFactoryPeer::runtimeEnabled(\n              static_cast<ActiveQuicListenerFactory*>(listener_factory_.get()))),\n      Network::CreateListenerException, \"Failed to apply socket options.\");\n}\n\nTEST_P(ActiveQuicListenerTest, ReceiveCHLO) {\n  quic::QuicBufferedPacketStore* const buffered_packets =\n      quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_);\n  maybeConfigureMocks(/* connection_count = */ 1);\n  sendCHLO(quic::test::TestConnectionId(1));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  EXPECT_FALSE(buffered_packets->HasChlosBuffered());\n  EXPECT_FALSE(quic_dispatcher_->session_map().empty());\n  readFromClientSockets();\n}\n\nTEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) {\n  quic::QuicBufferedPacketStore* const buffered_packets =\n      quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_);\n  const uint32_t count = (ActiveQuicListener::kNumSessionsToCreatePerLoop * 2) + 1;\n  maybeConfigureMocks(count);\n\n  // Generate one more CHLO than can be processed immediately.\n  for (size_t i = 1; i <= count; ++i) {\n    sendCHLO(quic::test::TestConnectionId(i));\n  }\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  // The first kNumSessionsToCreatePerLoop were processed immediately, the next\n  // kNumSessionsToCreatePerLoop were buffered for the next run of the event loop, and the last one\n  // was buffered to the subsequent event loop.\n  EXPECT_EQ(2, quic_listener_->eventLoopsWithBufferedChlosForTest());\n\n  for (size_t i = 1; i <= count; ++i) {\n    EXPECT_FALSE(buffered_packets->HasBufferedPackets(quic::test::TestConnectionId(i)));\n  }\n  EXPECT_FALSE(buffered_packets->HasChlosBuffered());\n  EXPECT_FALSE(quic_dispatcher_->session_map().empty());\n  readFromClientSockets();\n}\n\nTEST_P(ActiveQuicListenerTest, QuicProcessingDisabledAndEnabled) {\n  maybeConfigureMocks(/* connection_count = */ 2);\n  EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_));\n  sendCHLO(quic::test::TestConnectionId(1));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(quic_dispatcher_->session_map().size(), 1);\n\n  Runtime::LoaderSingleton::getExisting()->mergeValues({{\"quic.enabled\", \" false\"}});\n  sendCHLO(quic::test::TestConnectionId(2));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  // If listener was enabled, there should have been session created for active connection.\n  EXPECT_EQ(quic_dispatcher_->session_map().size(), 1);\n  EXPECT_FALSE(ActiveQuicListenerPeer::enabled(*quic_listener_));\n\n  Runtime::LoaderSingleton::getExisting()->mergeValues({{\"quic.enabled\", \" true\"}});\n  sendCHLO(quic::test::TestConnectionId(2));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(quic_dispatcher_->session_map().size(), 2);\n  EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_));\n}\n\nclass ActiveQuicListenerEmptyFlagConfigTest : public ActiveQuicListenerTest {\nprotected:\n  std::string yamlForQuicConfig() override {\n    return R\"EOF(\n    max_concurrent_streams: 10\n  )EOF\";\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(ActiveQuicListenerEmptyFlagConfigTests,\n                         ActiveQuicListenerEmptyFlagConfigTest,\n                         testing::ValuesIn(generateTestParam()), testParamsToString);\n\n// Quic listener should be enabled by default, if not enabled explicitly in config.\nTEST_P(ActiveQuicListenerEmptyFlagConfigTest, ReceiveFullQuicCHLO) {\n  quic::QuicBufferedPacketStore* const buffered_packets =\n      quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_);\n  maybeConfigureMocks(/* connection_count = */ 1);\n  sendCHLO(quic::test::TestConnectionId(1));\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  EXPECT_FALSE(buffered_packets->HasChlosBuffered());\n  EXPECT_FALSE(quic_dispatcher_->session_map().empty());\n  EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_));\n  readFromClientSockets();\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file defines platform dependent test utility functions which is declared\n// in quiche/quic/test_tools/crypto_test_utils.h.\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wtype-limits\"\n#endif\n\n#include \"quiche/quic/test_tools/crypto_test_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include <memory>\n#include \"test/extensions/quic_listeners/quiche/test_proof_verifier.h\"\n#include \"test/extensions/quic_listeners/quiche/test_proof_source.h\"\n\nnamespace quic {\nnamespace test {\nnamespace crypto_test_utils {\n// NOLINTNEXTLINE(readability-identifier-naming)\nstd::unique_ptr<ProofSource> ProofSourceForTesting() {\n  return std::make_unique<Envoy::Quic::TestProofSource>();\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstd::unique_ptr<ProofVerifier> ProofVerifierForTesting() {\n  return std::make_unique<Envoy::Quic::TestProofVerifier>();\n}\n\n// NOLINTNEXTLINE(readability-identifier-naming)\nstd::unique_ptr<ProofVerifyContext> ProofVerifyContextForTesting() {\n  // No context needed for fake verifier.\n  return nullptr;\n}\n\n} // namespace crypto_test_utils\n} // namespace test\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_alarm.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/platform/envoy_quic_clock.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing Envoy::Event::Dispatcher;\nusing quic::QuicTime;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass TestDelegate : public quic::QuicAlarm::Delegate {\npublic:\n  TestDelegate() = default;\n\n  // quic::QuicAlarm::Delegate\n  void OnAlarm() override { fired_ = true; }\n\n  bool fired() const { return fired_; }\n  void set_fired(bool fired) { fired_ = fired; }\n\nprivate:\n  bool fired_{false};\n};\n\nclass EnvoyQuicAlarmTest : public ::testing::Test {\npublic:\n  EnvoyQuicAlarmTest()\n      : api_(Api::createApiForTest(time_system_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")), clock_(*dispatcher_),\n        alarm_factory_(*dispatcher_, clock_) {}\n\n  void advanceMsAndLoop(int64_t delay_ms) {\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(delay_ms), *dispatcher_,\n                                   Dispatcher::RunType::NonBlock);\n  }\n\nprotected:\n  Event::SimulatedTimeSystemHelper time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  EnvoyQuicClock clock_;\n  EnvoyQuicAlarmFactory alarm_factory_;\n  quic::QuicConnectionArena arena_;\n};\n\nTEST_F(EnvoyQuicAlarmTest, CreateAlarmByFactory) {\n  auto unowned_delegate = new TestDelegate();\n  quic::QuicAlarm* alarm = alarm_factory_.CreateAlarm(unowned_delegate);\n  alarm->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  // Advance 9us, alarm shouldn't fire.\n  advanceMsAndLoop(9);\n  EXPECT_FALSE(unowned_delegate->fired());\n  // Advance 1us, alarm should have fired.\n  advanceMsAndLoop(1);\n  EXPECT_TRUE(unowned_delegate->fired());\n  delete alarm;\n\n  unowned_delegate = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm_ptr = alarm_factory_.CreateAlarm(\n      quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate>(unowned_delegate), &arena_);\n  EXPECT_FALSE(alarm_ptr->IsSet());\n  unowned_delegate = new TestDelegate();\n  alarm_ptr = alarm_factory_.CreateAlarm(\n      quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate>(unowned_delegate), nullptr);\n  EXPECT_FALSE(alarm_ptr->IsSet());\n}\n\nTEST_F(EnvoyQuicAlarmTest, CreateAlarmAndCancel) {\n  auto unowned_delegate1 = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm1(alarm_factory_.CreateAlarm(unowned_delegate1));\n  alarm1->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  EXPECT_TRUE(alarm1->IsSet());\n  auto unowned_delegate2 = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm2(alarm_factory_.CreateAlarm(unowned_delegate2));\n  alarm2->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  EXPECT_TRUE(alarm2->IsSet());\n\n  alarm1->Cancel();\n  EXPECT_FALSE(alarm1->IsSet());\n  // Advance 10us, alarm1 shouldn't fire, but alarm2 should.\n  advanceMsAndLoop(10);\n  EXPECT_TRUE(unowned_delegate2->fired());\n  EXPECT_FALSE(unowned_delegate1->fired());\n}\n\nTEST_F(EnvoyQuicAlarmTest, CreateAlarmAndReset) {\n  auto unowned_delegate1 = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm1(alarm_factory_.CreateAlarm(unowned_delegate1));\n  alarm1->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  auto unowned_delegate2 = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm2(alarm_factory_.CreateAlarm(unowned_delegate2));\n  alarm2->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  EXPECT_TRUE(alarm2->IsSet());\n\n  // Reset alarm1 to a different deadline.\n  alarm1->Cancel();\n  alarm1->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(5));\n  // Advance 9us, alarm1 should have fired but alarm2 shouldn't.\n  advanceMsAndLoop(9);\n  EXPECT_TRUE(unowned_delegate1->fired());\n  EXPECT_FALSE(unowned_delegate2->fired());\n\n  advanceMsAndLoop(1);\n  EXPECT_TRUE(unowned_delegate2->fired());\n}\n\nTEST_F(EnvoyQuicAlarmTest, CreateAlarmAndUpdate) {\n  auto unowned_delegate1 = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm1(alarm_factory_.CreateAlarm(unowned_delegate1));\n  alarm1->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  auto unowned_delegate2 = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm2(alarm_factory_.CreateAlarm(unowned_delegate2));\n  alarm2->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  EXPECT_TRUE(alarm2->IsSet());\n\n  // Update alarm1 to an earlier deadline.\n  alarm1->Update(clock_.Now() + QuicTime::Delta::FromMilliseconds(5),\n                 quic::QuicTime::Delta::Zero());\n  // Advance 9us, alarm1 should have fired but alarm2 shouldn't.\n  advanceMsAndLoop(9);\n  EXPECT_TRUE(unowned_delegate1->fired());\n  EXPECT_FALSE(unowned_delegate2->fired());\n\n  advanceMsAndLoop(1);\n  EXPECT_TRUE(unowned_delegate2->fired());\n}\n\nTEST_F(EnvoyQuicAlarmTest, PostponeDeadline) {\n  auto unowned_delegate = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm(alarm_factory_.CreateAlarm(unowned_delegate));\n  alarm->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  advanceMsAndLoop(9);\n  EXPECT_FALSE(unowned_delegate->fired());\n  // Postpone deadline to a later time.\n  alarm->Update(clock_.Now() + QuicTime::Delta::FromMilliseconds(5), quic::QuicTime::Delta::Zero());\n  advanceMsAndLoop(1);\n  EXPECT_EQ(10, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds());\n  // alarm shouldn't fire at old deadline.\n  EXPECT_FALSE(unowned_delegate->fired());\n\n  advanceMsAndLoop(4);\n  // alarm should fire at new deadline.\n  EXPECT_TRUE(unowned_delegate->fired());\n}\n\nTEST_F(EnvoyQuicAlarmTest, SetAlarmToPastTime) {\n  advanceMsAndLoop(100);\n  EXPECT_EQ(100, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds());\n  auto unowned_delegate = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm(alarm_factory_.CreateAlarm(unowned_delegate));\n  // Alarm will be active 1ms after Update() for the purpose of avoiding firing\n  // in the same event loop.\n  alarm->Set(clock_.Now() - QuicTime::Delta::FromMilliseconds(10));\n  EXPECT_FALSE(unowned_delegate->fired());\n  advanceMsAndLoop(1);\n  EXPECT_TRUE(unowned_delegate->fired());\n}\n\nTEST_F(EnvoyQuicAlarmTest, UpdateAlarmWithPastDeadline) {\n  auto unowned_delegate = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm(alarm_factory_.CreateAlarm(unowned_delegate));\n  alarm->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  advanceMsAndLoop(9);\n  EXPECT_EQ(9, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds());\n  EXPECT_FALSE(unowned_delegate->fired());\n  // Alarm will be active 1ms after Update() for the purpose of avoiding firing\n  // in the same event loop.\n  alarm->Update(clock_.Now() - QuicTime::Delta::FromMilliseconds(1), quic::QuicTime::Delta::Zero());\n  advanceMsAndLoop(1);\n  EXPECT_TRUE(unowned_delegate->fired());\n  unowned_delegate->set_fired(false);\n  advanceMsAndLoop(1);\n  // alarm shouldn't fire at the original deadline.\n  EXPECT_FALSE(unowned_delegate->fired());\n}\n\nTEST_F(EnvoyQuicAlarmTest, CancelActiveAlarm) {\n  advanceMsAndLoop(100);\n  EXPECT_EQ(100, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds());\n  auto unowned_delegate = new TestDelegate();\n  quic::QuicArenaScopedPtr<quic::QuicAlarm> alarm(alarm_factory_.CreateAlarm(unowned_delegate));\n  // alarm becomes active upon Set().\n  alarm->Set(clock_.Now() - QuicTime::Delta::FromMilliseconds(10));\n  alarm->Cancel();\n  dispatcher_->run(Dispatcher::RunType::NonBlock);\n  EXPECT_FALSE(unowned_delegate->fired());\n}\n\nTEST_F(EnvoyQuicAlarmTest, CancelUponDestruction) {\n  auto unowned_delegate = new TestDelegate();\n  quic::QuicAlarm* alarm = alarm_factory_.CreateAlarm(unowned_delegate);\n  // alarm becomes active upon Set().\n  alarm->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));\n  // delegate should be destroyed with alarm.\n  delete alarm;\n  // alarm firing callback should have been cancelled, otherwise the delegate\n  // would be used after free.\n  advanceMsAndLoop(10);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc",
    "content": "#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/crypto/null_encrypter.h\"\n#include \"quiche/quic/test_tools/crypto_test_utils.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_session.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_connection.h\"\n#include \"extensions/quic_listeners/quiche/codec_impl.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection_helper.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"test/extensions/quic_listeners/quiche/test_utils.h\"\n\n#include \"envoy/stats/stats_macros.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/stream_decoder.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass TestEnvoyQuicClientConnection : public EnvoyQuicClientConnection {\npublic:\n  TestEnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,\n                                quic::QuicConnectionHelperInterface& helper,\n                                quic::QuicAlarmFactory& alarm_factory,\n                                quic::QuicPacketWriter& writer,\n                                const quic::ParsedQuicVersionVector& supported_versions,\n                                Event::Dispatcher& dispatcher,\n                                Network::ConnectionSocketPtr&& connection_socket)\n      : EnvoyQuicClientConnection(server_connection_id, helper, alarm_factory, &writer, false,\n                                  supported_versions, dispatcher, std::move(connection_socket)) {\n    SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE);\n    SetEncrypter(quic::ENCRYPTION_FORWARD_SECURE,\n                 std::make_unique<quic::NullEncrypter>(quic::Perspective::IS_CLIENT));\n  }\n\n  MOCK_METHOD(void, SendConnectionClosePacket, (quic::QuicErrorCode, const std::string&));\n  MOCK_METHOD(bool, SendControlFrame, (const quic::QuicFrame& frame));\n\n  using EnvoyQuicClientConnection::connectionStats;\n};\n\nclass TestQuicCryptoClientStream : public quic::QuicCryptoClientStream {\npublic:\n  TestQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session,\n                             std::unique_ptr<quic::ProofVerifyContext> verify_context,\n                             quic::QuicCryptoClientConfig* crypto_config,\n                             ProofHandler* proof_handler, bool has_application_state)\n      : quic::QuicCryptoClientStream(server_id, session, std::move(verify_context), crypto_config,\n                                     proof_handler, has_application_state) {}\n\n  bool encryption_established() const override { return true; }\n};\n\nclass TestEnvoyQuicClientSession : public EnvoyQuicClientSession {\npublic:\n  TestEnvoyQuicClientSession(const quic::QuicConfig& config,\n                             const quic::ParsedQuicVersionVector& supported_versions,\n                             std::unique_ptr<EnvoyQuicClientConnection> connection,\n                             const quic::QuicServerId& server_id,\n                             quic::QuicCryptoClientConfig* crypto_config,\n                             quic::QuicClientPushPromiseIndex* push_promise_index,\n                             Event::Dispatcher& dispatcher, uint32_t send_buffer_limit)\n      : EnvoyQuicClientSession(config, supported_versions, std::move(connection), server_id,\n                               crypto_config, push_promise_index, dispatcher, send_buffer_limit) {}\n\n  std::unique_ptr<quic::QuicCryptoClientStreamBase> CreateQuicCryptoStream() override {\n    return std::make_unique<TestQuicCryptoClientStream>(\n        server_id(), this, crypto_config()->proof_verifier()->CreateDefaultContext(),\n        crypto_config(), this, true);\n  }\n};\n\nclass EnvoyQuicClientSessionTest : public testing::TestWithParam<bool> {\npublic:\n  EnvoyQuicClientSessionTest()\n      : api_(Api::createApiForTest(time_system_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")), connection_helper_(*dispatcher_),\n        alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() {\n          SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam());\n          SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam());\n          return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0);\n        }()),\n        peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),\n                                                        12345)),\n        self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),\n                                                        54321)),\n        quic_connection_(new TestEnvoyQuicClientConnection(\n            quic::test::TestConnectionId(), connection_helper_, alarm_factory_, writer_,\n            quic_version_, *dispatcher_, createConnectionSocket(peer_addr_, self_addr_, nullptr))),\n        crypto_config_(quic::test::crypto_test_utils::ProofVerifierForTesting()),\n        envoy_quic_session_(quic_config_, quic_version_,\n                            std::unique_ptr<TestEnvoyQuicClientConnection>(quic_connection_),\n                            quic::QuicServerId(\"example.com\", 443, false), &crypto_config_, nullptr,\n                            *dispatcher_,\n                            /*send_buffer_limit*/ 1024 * 1024),\n        http_connection_(envoy_quic_session_, http_connection_callbacks_) {\n    EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime());\n    EXPECT_EQ(EMPTY_STRING, envoy_quic_session_.nextProtocol());\n    EXPECT_EQ(Http::Protocol::Http3, http_connection_.protocol());\n\n    time_system_.advanceTimeWait(std::chrono::milliseconds(1));\n    ON_CALL(writer_, WritePacket(_, _, _, _, _))\n        .WillByDefault(testing::Return(quic::WriteResult(quic::WRITE_STATUS_OK, 1)));\n  }\n\n  void SetUp() override {\n    envoy_quic_session_.Initialize();\n    setQuicConfigWithDefaultValues(envoy_quic_session_.config());\n    envoy_quic_session_.OnConfigNegotiated();\n    envoy_quic_session_.addConnectionCallbacks(network_connection_callbacks_);\n    envoy_quic_session_.setConnectionStats(\n        {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr});\n    EXPECT_EQ(&read_total_, &quic_connection_->connectionStats().read_total_);\n  }\n\n  void TearDown() override {\n    if (quic_connection_->connected()) {\n      EXPECT_CALL(*quic_connection_,\n                  SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n      EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n      envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);\n    }\n  }\n\n  EnvoyQuicClientStream& sendGetRequest(Http::ResponseDecoder& response_decoder,\n                                        Http::StreamCallbacks& stream_callbacks) {\n    auto& stream =\n        dynamic_cast<EnvoyQuicClientStream&>(http_connection_.newStream(response_decoder));\n    stream.getStream().addCallbacks(stream_callbacks);\n\n    std::string host(\"www.abc.com\");\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":authority\", host}, {\":method\", \"GET\"}, {\":path\", \"/\"}};\n    stream.encodeHeaders(request_headers, true);\n    return stream;\n  }\n\nprotected:\n  Event::SimulatedTimeSystemHelper time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  EnvoyQuicConnectionHelper connection_helper_;\n  EnvoyQuicAlarmFactory alarm_factory_;\n  quic::ParsedQuicVersionVector quic_version_;\n  testing::NiceMock<quic::test::MockPacketWriter> writer_;\n  Network::Address::InstanceConstSharedPtr peer_addr_;\n  Network::Address::InstanceConstSharedPtr self_addr_;\n  TestEnvoyQuicClientConnection* quic_connection_;\n  quic::QuicConfig quic_config_;\n  quic::QuicCryptoClientConfig crypto_config_;\n  TestEnvoyQuicClientSession envoy_quic_session_;\n  Network::MockConnectionCallbacks network_connection_callbacks_;\n  Http::MockServerConnectionCallbacks http_connection_callbacks_;\n  testing::StrictMock<Stats::MockCounter> read_total_;\n  testing::StrictMock<Stats::MockGauge> read_current_;\n  testing::StrictMock<Stats::MockCounter> write_total_;\n  testing::StrictMock<Stats::MockGauge> write_current_;\n  QuicHttpClientConnectionImpl http_connection_;\n};\n\nINSTANTIATE_TEST_SUITE_P(EnvoyQuicClientSessionTests, EnvoyQuicClientSessionTest,\n                         testing::ValuesIn({true, false}));\n\nTEST_P(EnvoyQuicClientSessionTest, NewStream) {\n  Http::MockResponseDecoder response_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks);\n\n  quic::QuicHeaderList headers;\n  headers.OnHeaderBlockStart();\n  headers.OnHeader(\":status\", \"200\");\n  headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  // Response headers should be propagated to decoder.\n  EXPECT_CALL(response_decoder, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(\"200\", decoded_headers->getStatusValue());\n      }));\n  stream.OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers);\n}\n\nTEST_P(EnvoyQuicClientSessionTest, OnResetFrame) {\n  Http::MockResponseDecoder response_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks);\n\n  // G-QUIC or IETF bi-directional stream.\n  quic::QuicStreamId stream_id = stream.id();\n  quic::QuicRstStreamFrame rst1(/*control_frame_id=*/1u, stream_id,\n                                quic::QUIC_ERROR_PROCESSING_STREAM, /*bytes_written=*/0u);\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::RemoteReset, _));\n  stream.OnStreamReset(rst1);\n}\n\nTEST_P(EnvoyQuicClientSessionTest, OnGoAwayFrame) {\n  Http::MockResponseDecoder response_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n\n  EXPECT_CALL(http_connection_callbacks_, onGoAway(Http::GoAwayErrorCode::NoError));\n  if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) {\n    envoy_quic_session_.OnHttp3GoAway(4u);\n  } else {\n    quic::QuicGoAwayFrame goaway;\n    quic_connection_->OnGoAwayFrame(goaway);\n  }\n}\n\nTEST_P(EnvoyQuicClientSessionTest, ConnectionClose) {\n  std::string error_details(\"dummy details\");\n  quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA);\n  quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, error, error_details,\n                                       /* transport_close_frame_type = */ 0);\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose));\n  quic_connection_->OnConnectionCloseFrame(frame);\n  EXPECT_EQ(absl::StrCat(quic::QuicErrorCodeToString(error), \" with details: \", error_details),\n            envoy_quic_session_.transportFailureReason());\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n}\n\nTEST_P(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) {\n  Http::MockResponseDecoder response_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks);\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_TRUE(stream.write_side_closed() && stream.reading_stopped());\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_connection.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_stream.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection_helper.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\n#include \"test/extensions/quic_listeners/quiche/test_utils.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/http/stream_decoder.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nusing testing::_;\nusing testing::Invoke;\n\nclass EnvoyQuicClientStreamTest : public testing::TestWithParam<bool> {\npublic:\n  EnvoyQuicClientStreamTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        connection_helper_(*dispatcher_),\n        alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() {\n          SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam());\n          SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam());\n          return quic::CurrentSupportedVersions()[0];\n        }()),\n        peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),\n                                                        12345)),\n        self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),\n                                                        54321)),\n        quic_connection_(new EnvoyQuicClientConnection(\n            quic::test::TestConnectionId(), connection_helper_, alarm_factory_, &writer_,\n            /*owns_writer=*/false, {quic_version_}, *dispatcher_,\n            createConnectionSocket(peer_addr_, self_addr_, nullptr))),\n        quic_session_(quic_config_, {quic_version_}, quic_connection_, *dispatcher_,\n                      quic_config_.GetInitialStreamFlowControlWindowToSend() * 2),\n        stream_id_(quic::VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u),\n        quic_stream_(new EnvoyQuicClientStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)),\n        request_headers_{{\":authority\", host_}, {\":method\", \"POST\"}, {\":path\", \"/\"}},\n        request_trailers_{{\"trailer-key\", \"trailer-value\"}} {\n    quic_stream_->setResponseDecoder(stream_decoder_);\n    quic_stream_->addCallbacks(stream_callbacks_);\n    quic_session_.ActivateStream(std::unique_ptr<EnvoyQuicClientStream>(quic_stream_));\n    EXPECT_CALL(quic_session_, ShouldYield(_)).WillRepeatedly(testing::Return(false));\n    EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n        .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset,\n                                  quic::StreamSendingState state, bool,\n                                  quiche::QuicheOptional<quic::EncryptionLevel>) {\n          return quic::QuicConsumedData{write_length, state != quic::NO_FIN};\n        }));\n    EXPECT_CALL(writer_, WritePacket(_, _, _, _, _))\n        .WillRepeatedly(Invoke([](const char*, size_t buf_len, const quic::QuicIpAddress&,\n                                  const quic::QuicSocketAddress&, quic::PerPacketOptions*) {\n          return quic::WriteResult{quic::WRITE_STATUS_OK, static_cast<int>(buf_len)};\n        }));\n  }\n\n  void SetUp() override {\n    quic_session_.Initialize();\n    setQuicConfigWithDefaultValues(quic_session_.config());\n    quic_session_.OnConfigNegotiated();\n    quic_connection_->setUpConnectionSocket();\n    response_headers_.OnHeaderBlockStart();\n    response_headers_.OnHeader(\":status\", \"200\");\n    response_headers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0,\n                                       /*compressed_header_bytes=*/0);\n\n    trailers_.OnHeaderBlockStart();\n    trailers_.OnHeader(\"key1\", \"value1\");\n    if (!quic::VersionUsesHttp3(quic_version_.transport_version)) {\n      // \":final-offset\" is required and stripped off by quic.\n      trailers_.OnHeader(\":final-offset\", absl::StrCat(\"\", response_body_.length()));\n    }\n    trailers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  }\n\n  void TearDown() override {\n    if (quic_connection_->connected()) {\n      quic_connection_->CloseConnection(\n          quic::QUIC_NO_ERROR, \"Closed by application\",\n          quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);\n    }\n  }\n\nprotected:\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  EnvoyQuicConnectionHelper connection_helper_;\n  EnvoyQuicAlarmFactory alarm_factory_;\n  testing::NiceMock<quic::test::MockPacketWriter> writer_;\n  quic::ParsedQuicVersion quic_version_;\n  quic::QuicConfig quic_config_;\n  Network::Address::InstanceConstSharedPtr peer_addr_;\n  Network::Address::InstanceConstSharedPtr self_addr_;\n  EnvoyQuicClientConnection* quic_connection_;\n  MockEnvoyQuicClientSession quic_session_;\n  quic::QuicStreamId stream_id_;\n  EnvoyQuicClientStream* quic_stream_;\n  Http::MockResponseDecoder stream_decoder_;\n  Http::MockStreamCallbacks stream_callbacks_;\n  std::string host_{\"www.abc.com\"};\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Http::TestRequestTrailerMapImpl request_trailers_;\n  quic::QuicHeaderList response_headers_;\n  quic::QuicHeaderList trailers_;\n  Buffer::OwnedImpl request_body_{\"Hello world\"};\n  std::string response_body_{\"OK\\n\"};\n};\n\nINSTANTIATE_TEST_SUITE_P(EnvoyQuicClientStreamTests, EnvoyQuicClientStreamTest,\n                         testing::ValuesIn({true, false}));\n\nTEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) {\n  EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions());\n  quic_stream_->encodeHeaders(request_headers_, false);\n  quic_stream_->encodeData(request_body_, false);\n  quic_stream_->encodeTrailers(request_trailers_);\n\n  EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false))\n      .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) {\n        EXPECT_EQ(\"200\", headers->getStatusValue());\n      }));\n  quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(),\n                                   response_headers_);\n  EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());\n\n  EXPECT_CALL(stream_decoder_, decodeData(_, _))\n      .Times(testing::AtMost(2))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) {\n        EXPECT_EQ(response_body_, buffer.toString());\n        EXPECT_FALSE(finished_reading);\n      }))\n      // Depends on QUIC version, there may be an empty STREAM_FRAME with FIN. But\n      // since there is trailers, finished_reading should always be false.\n      .WillOnce(Invoke([](Buffer::Instance& buffer, bool finished_reading) {\n        EXPECT_FALSE(finished_reading);\n        EXPECT_EQ(0, buffer.length());\n      }));\n  std::string data = response_body_;\n  if (quic::VersionUsesHttp3(quic_version_.transport_version)) {\n    std::unique_ptr<char[]> data_buffer;\n    quic::QuicByteCount data_frame_header_length =\n        quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer);\n    quiche::QuicheStringPiece data_frame_header(data_buffer.get(), data_frame_header_length);\n    data = absl::StrCat(data_frame_header, response_body_);\n  }\n  quic::QuicStreamFrame frame(stream_id_, false, 0, data);\n  quic_stream_->OnStreamFrame(frame);\n\n  EXPECT_CALL(stream_decoder_, decodeTrailers_(_))\n      .WillOnce(Invoke([](const Http::ResponseTrailerMapPtr& headers) {\n        Http::LowerCaseString key1(\"key1\");\n        Http::LowerCaseString key2(\":final-offset\");\n        EXPECT_EQ(\"value1\", headers->get(key1)->value().getStringView());\n        EXPECT_EQ(nullptr, headers->get(key2));\n      }));\n  quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_);\n}\n\nTEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) {\n  if (quic::VersionUsesHttp3(quic_version_.transport_version)) {\n    EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n    return;\n  }\n  quic_stream_->encodeHeaders(request_headers_, true);\n  EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false))\n      .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) {\n        EXPECT_EQ(\"200\", headers->getStatusValue());\n      }));\n  quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(),\n                                   response_headers_);\n  EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());\n\n  // Trailer should be delivered to HCM later after body arrives.\n  quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_);\n\n  std::string data = response_body_;\n  if (quic::VersionUsesHttp3(quic_version_.transport_version)) {\n    std::unique_ptr<char[]> data_buffer;\n    quic::QuicByteCount data_frame_header_length =\n        quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer);\n    quiche::QuicheStringPiece data_frame_header(data_buffer.get(), data_frame_header_length);\n    data = absl::StrCat(data_frame_header, response_body_);\n  }\n  quic::QuicStreamFrame frame(stream_id_, false, 0, data);\n  EXPECT_CALL(stream_decoder_, decodeData(_, _))\n      .Times(testing::AtMost(2))\n      .WillOnce(Invoke([this](Buffer::Instance& buffer, bool finished_reading) {\n        EXPECT_EQ(response_body_, buffer.toString());\n        EXPECT_FALSE(finished_reading);\n      }))\n      // Depends on QUIC version, there may be an empty STREAM_FRAME with FIN. But\n      // since there is trailers, finished_reading should always be false.\n      .WillOnce(Invoke([](Buffer::Instance& buffer, bool finished_reading) {\n        EXPECT_FALSE(finished_reading);\n        EXPECT_EQ(0, buffer.length());\n      }));\n\n  EXPECT_CALL(stream_decoder_, decodeTrailers_(_))\n      .WillOnce(Invoke([](const Http::ResponseTrailerMapPtr& headers) {\n        Http::LowerCaseString key1(\"key1\");\n        Http::LowerCaseString key2(\":final-offset\");\n        EXPECT_EQ(\"value1\", headers->get(key1)->value().getStringView());\n        EXPECT_EQ(nullptr, headers->get(key2));\n      }));\n  quic_stream_->OnStreamFrame(frame);\n}\n\nTEST_P(EnvoyQuicClientStreamTest, WatermarkSendBuffer) {\n  // Bump connection flow control window large enough not to cause connection\n  // level flow control blocked.\n  quic::QuicWindowUpdateFrame window_update(\n      quic::kInvalidControlFrameId,\n      quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024);\n  quic_session_.OnWindowUpdateFrame(window_update);\n\n  request_headers_.addCopy(\":content-length\", \"32770\"); // 32KB + 2 byte\n  quic_stream_->encodeHeaders(request_headers_, /*end_stream=*/false);\n  // Encode 32kB request body. first 16KB should be written out right away. The\n  // rest should be buffered. The high watermark is 16KB, so this call should\n  // make the send buffer reach its high watermark.\n  std::string request(32 * 1024 + 1, 'a');\n  Buffer::OwnedImpl buffer(request);\n  EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark());\n  quic_stream_->encodeData(buffer, false);\n\n  EXPECT_EQ(0u, buffer.length());\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  // Receive a WINDOW_UPDATE frame not large enough to drain half of the send\n  // buffer.\n  quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             16 * 1024 + 8 * 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update1);\n  EXPECT_FALSE(quic_stream_->IsFlowControlBlocked());\n  quic_session_.OnCanWrite();\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  // Receive another WINDOW_UPDATE frame to drain the send buffer till below low\n  // watermark.\n  quic::QuicWindowUpdateFrame window_update2(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             16 * 1024 + 8 * 1024 + 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update2);\n  EXPECT_FALSE(quic_stream_->IsFlowControlBlocked());\n  EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([this]() {\n    std::string rest_request(1, 'a');\n    Buffer::OwnedImpl buffer(rest_request);\n    quic_stream_->encodeData(buffer, true);\n  }));\n  quic_session_.OnCanWrite();\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  quic::QuicWindowUpdateFrame window_update3(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             32 * 1024 + 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update3);\n  quic_session_.OnCanWrite();\n\n  EXPECT_TRUE(quic_stream_->local_end_stream_);\n  EXPECT_TRUE(quic_stream_->write_side_closed());\n  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n}\n\n// Tests that headers and trailers buffered in send buffer contribute towards buffer watermark\n// limits. Only IETF QUIC writes them on data stream, gQUIC writes them on dedicated headers stream\n// and only contributes to connection watermark buffer.\nTEST_P(EnvoyQuicClientStreamTest, HeadersContributeToWatermarkIquic) {\n  if (!quic::VersionUsesHttp3(quic_version_.transport_version)) {\n    EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n    return;\n  }\n\n  // Bump connection flow control window large enough not to cause connection level flow control\n  // blocked\n  quic::QuicWindowUpdateFrame window_update(\n      quic::kInvalidControlFrameId,\n      quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024);\n  quic_session_.OnWindowUpdateFrame(window_update);\n\n  // Make the stream blocked by congestion control.\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillOnce(Invoke([](quic::QuicStreamId, size_t /*write_length*/, quic::QuicStreamOffset,\n                          quic::StreamSendingState state, bool,\n                          quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{0u, state != quic::NO_FIN};\n      }));\n  quic_stream_->encodeHeaders(request_headers_, /*end_stream=*/false);\n\n  // Encode 16kB -10 bytes request body. Because the high watermark is 16KB, with previously\n  // buffered headers, this call should make the send buffers reach their high watermark.\n  std::string request(16 * 1024 - 10, 'a');\n  Buffer::OwnedImpl buffer(request);\n  EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark());\n  quic_stream_->encodeData(buffer, false);\n  EXPECT_EQ(0u, buffer.length());\n\n  // Unblock writing now, and this will write out 16kB data and cause stream to\n  // be blocked by the flow control limit.\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset,\n                          quic::StreamSendingState state, bool,\n                          quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{write_length, state != quic::NO_FIN};\n      }));\n  EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark());\n  quic_session_.OnCanWrite();\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  // Update flow control window to write all the buffered data.\n  quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             32 * 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update1);\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset,\n                          quic::StreamSendingState state, bool,\n                          quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{write_length, state != quic::NO_FIN};\n      }));\n  quic_session_.OnCanWrite();\n  // No data should be buffered at this point.\n\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillOnce(Invoke([](quic::QuicStreamId, size_t, quic::QuicStreamOffset,\n                          quic::StreamSendingState state, bool,\n                          quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{0u, state != quic::NO_FIN};\n      }));\n  // Send more data. If watermark bytes counting were not cleared in previous\n  // OnCanWrite, this write would have caused the stream to exceed its high watermark.\n  std::string request1(16 * 1024 - 3, 'a');\n  Buffer::OwnedImpl buffer1(request1);\n  quic_stream_->encodeData(buffer1, false);\n  // Buffering more trailers will cause stream to reach high watermark, but\n  // because trailers closes the stream, no callback should be triggered.\n  quic_stream_->encodeTrailers(request_trailers_);\n\n  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc",
    "content": "#include <openssl/evp.h>\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/quic_dispatcher.h\"\n#include \"quiche/quic/test_tools/quic_dispatcher_peer.h\"\n#include \"quiche/quic/test_tools/crypto_test_utils.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n#include \"quiche/common/platform/api/quiche_text_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include <memory>\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection_helper.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"extensions/quic_listeners/quiche/platform/envoy_quic_clock.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_dispatcher.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_session.h\"\n#include \"test/extensions/quic_listeners/quiche/test_proof_source.h\"\n#include \"test/extensions/quic_listeners/quiche/test_utils.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n#include \"server/configuration_impl.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Quic {\n\nnamespace {\nconst size_t kNumSessionsToCreatePerLoopForTests = 16;\n}\n\nclass EnvoyQuicDispatcherTest : public QuicMultiVersionTest,\n                                protected Logger::Loggable<Logger::Id::main> {\npublic:\n  EnvoyQuicDispatcherTest()\n      : version_(GetParam().first), api_(Api::createApiForTest(time_system_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        listen_socket_(std::make_unique<Network::NetworkListenSocket<\n                           Network::NetworkSocketTrait<Network::Socket::Type::Datagram>>>(\n            Network::Test::getCanonicalLoopbackAddress(version_), nullptr, /*bind*/ true)),\n        connection_helper_(*dispatcher_), proof_source_(new TestProofSource()),\n        crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(),\n                       std::unique_ptr<TestProofSource>(proof_source_),\n                       quic::KeyExchangeSource::Default()),\n        version_manager_([]() {\n          if (GetParam().second == QuicVersionType::GquicQuicCrypto) {\n            return quic::CurrentSupportedVersionsWithQuicCrypto();\n          }\n          bool use_http3 = GetParam().second == QuicVersionType::Iquic;\n          SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3);\n          SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3);\n          return quic::CurrentSupportedVersions();\n        }()),\n        quic_version_(version_manager_.GetSupportedVersions()[0]),\n        listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()),\n                                            POOL_GAUGE(listener_config_.listenerScope()),\n                                            POOL_HISTOGRAM(listener_config_.listenerScope()))}),\n        per_worker_stats_({ALL_PER_HANDLER_LISTENER_STATS(\n            POOL_COUNTER_PREFIX(listener_config_.listenerScope(), \"worker.\"),\n            POOL_GAUGE_PREFIX(listener_config_.listenerScope(), \"worker.\"))}),\n        connection_handler_(*dispatcher_, absl::nullopt),\n        envoy_quic_dispatcher_(\n            &crypto_config_, quic_config_, &version_manager_,\n            std::make_unique<EnvoyQuicConnectionHelper>(*dispatcher_),\n            std::make_unique<EnvoyQuicAlarmFactory>(*dispatcher_, *connection_helper_.GetClock()),\n            quic::kQuicDefaultConnectionIdLength, connection_handler_, listener_config_,\n            listener_stats_, per_worker_stats_, *dispatcher_, *listen_socket_),\n        connection_id_(quic::test::TestConnectionId(1)) {\n    auto writer = new testing::NiceMock<quic::test::MockPacketWriter>();\n    envoy_quic_dispatcher_.InitializeWithWriter(writer);\n    EXPECT_CALL(*writer, WritePacket(_, _, _, _, _))\n        .WillRepeatedly(Return(quic::WriteResult(quic::WRITE_STATUS_OK, 0)));\n  }\n\n  void SetUp() override {\n    // Advance time a bit because QuicTime regards 0 as uninitialized timestamp.\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), *dispatcher_,\n                                   Event::Dispatcher::RunType::NonBlock);\n    EXPECT_CALL(listener_config_, perConnectionBufferLimitBytes())\n        .WillRepeatedly(Return(1024 * 1024));\n  }\n\n  void TearDown() override {\n    quic::QuicBufferedPacketStore* buffered_packets =\n        quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_);\n    EXPECT_FALSE(buffered_packets->HasChlosBuffered());\n    EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_));\n\n    envoy_quic_dispatcher_.Shutdown();\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  void processValidChloPacketAndCheckStatus(bool should_buffer) {\n    quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4\n                                          ? quic::QuicIpAddress::Loopback4()\n                                          : quic::QuicIpAddress::Loopback6(),\n                                      54321);\n    quic::QuicBufferedPacketStore* buffered_packets =\n        quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_);\n    if (!should_buffer) {\n      // Set QuicDispatcher::new_sessions_allowed_per_event_loop_ to\n      // |kNumSessionsToCreatePerLoopForTests| so that received CHLOs can be\n      // processed immediately.\n      envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests);\n      EXPECT_FALSE(buffered_packets->HasChlosBuffered());\n      EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_));\n    }\n\n    // Create a Quic Crypto or TLS1.3 CHLO packet.\n    EnvoyQuicClock clock(*dispatcher_);\n    Buffer::OwnedImpl payload = generateChloPacketToSend(\n        quic_version_, quic_config_, crypto_config_, connection_id_, clock,\n        envoyIpAddressToQuicSocketAddress(listen_socket_->localAddress()->ip()), peer_addr,\n        \"test.example.org\");\n    Buffer::RawSliceVector slice = payload.getRawSlices();\n    ASSERT(slice.size() == 1);\n    auto encrypted_packet = std::make_unique<quic::QuicEncryptedPacket>(\n        static_cast<char*>(slice[0].mem_), slice[0].len_);\n    std::unique_ptr<quic::QuicReceivedPacket> received_packet =\n        std::unique_ptr<quic::QuicReceivedPacket>(\n            quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now()));\n\n    envoy_quic_dispatcher_.ProcessPacket(\n        envoyIpAddressToQuicSocketAddress(listen_socket_->localAddress()->ip()), peer_addr,\n        *received_packet);\n\n    if (should_buffer) {\n      // Incoming CHLO packet is buffered, because ProcessPacket() is called before\n      // ProcessBufferedChlos().\n      EXPECT_TRUE(buffered_packets->HasChlosBuffered());\n      EXPECT_TRUE(buffered_packets->HasBufferedPackets(connection_id_));\n\n      // Process the buffered CHLO now.\n      envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests);\n    }\n\n    EXPECT_FALSE(buffered_packets->HasChlosBuffered());\n    EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_));\n\n    // A new QUIC connection is created and its filter installed based on self and peer address.\n    EXPECT_EQ(1u, envoy_quic_dispatcher_.session_map().size());\n    quic::QuicSession* session =\n        envoy_quic_dispatcher_.session_map().find(connection_id_)->second.get();\n    ASSERT(session != nullptr);\n    EXPECT_TRUE(session->IsEncryptionEstablished());\n    EXPECT_EQ(1u, connection_handler_.numConnections());\n    auto envoy_connection = static_cast<EnvoyQuicServerSession*>(session);\n    EXPECT_EQ(\"test.example.org\", envoy_connection->requestedServerName());\n    EXPECT_EQ(peer_addr,\n              envoyIpAddressToQuicSocketAddress(envoy_connection->remoteAddress()->ip()));\n    ASSERT(envoy_connection->localAddress() != nullptr);\n    EXPECT_EQ(*listen_socket_->localAddress(), *envoy_connection->localAddress());\n  }\n\n  void processValidChloPacketAndInitializeFilters(bool should_buffer) {\n    Network::MockFilterChainManager filter_chain_manager;\n    std::shared_ptr<Network::MockReadFilter> read_filter(new Network::MockReadFilter());\n    Network::MockConnectionCallbacks network_connection_callbacks;\n    testing::StrictMock<Stats::MockCounter> read_total;\n    testing::StrictMock<Stats::MockGauge> read_current;\n    testing::StrictMock<Stats::MockCounter> write_total;\n    testing::StrictMock<Stats::MockGauge> write_current;\n\n    std::vector<Network::FilterFactoryCb> filter_factory(\n        {[&](Network::FilterManager& filter_manager) {\n          filter_manager.addReadFilter(read_filter);\n          read_filter->callbacks_->connection().addConnectionCallbacks(\n              network_connection_callbacks);\n          read_filter->callbacks_->connection().setConnectionStats(\n              {read_total, read_current, write_total, write_current, nullptr, nullptr});\n        }});\n    EXPECT_CALL(proof_source_->filterChain(), networkFilterFactories())\n        .WillOnce(ReturnRef(filter_factory));\n    EXPECT_CALL(listener_config_, filterChainFactory());\n    EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _))\n        .WillOnce(Invoke([](Network::Connection& connection,\n                            const std::vector<Network::FilterFactoryCb>& filter_factories) {\n          EXPECT_EQ(1u, filter_factories.size());\n          Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories);\n          return true;\n        }));\n    EXPECT_CALL(*read_filter, onNewConnection())\n        // Stop iteration to avoid calling getRead/WriteBuffer().\n        .WillOnce(Return(Network::FilterStatus::StopIteration));\n    EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected));\n\n    processValidChloPacketAndCheckStatus(should_buffer);\n    EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n    // Shutdown() to close the connection.\n    envoy_quic_dispatcher_.Shutdown();\n  }\n\n  bool quicVersionUsesTls() { return quic_version_.UsesTls(); }\n\nprotected:\n  Network::Address::IpVersion version_;\n  Event::SimulatedTimeSystemHelper time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Network::SocketPtr listen_socket_;\n  EnvoyQuicConnectionHelper connection_helper_;\n  TestProofSource* proof_source_;\n  quic::QuicCryptoServerConfig crypto_config_;\n  quic::QuicConfig quic_config_;\n  quic::QuicVersionManager version_manager_;\n  quic::ParsedQuicVersion quic_version_;\n  testing::NiceMock<Network::MockListenerConfig> listener_config_;\n  Server::ListenerStats listener_stats_;\n  Server::PerHandlerListenerStats per_worker_stats_;\n  Server::ConnectionHandlerImpl connection_handler_;\n  EnvoyQuicDispatcher envoy_quic_dispatcher_;\n  const quic::QuicConnectionId connection_id_;\n};\n\nINSTANTIATE_TEST_SUITE_P(EnvoyQuicDispatcherTests, EnvoyQuicDispatcherTest,\n                         testing::ValuesIn(generateTestParam()), testParamsToString);\n\nTEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponCHLO) {\n  if (quicVersionUsesTls()) {\n    // QUICHE doesn't support 0-RTT TLS1.3 handshake yet.\n    processValidChloPacketAndCheckStatus(false);\n    // Shutdown() to close the connection.\n    envoy_quic_dispatcher_.Shutdown();\n    return;\n  }\n  processValidChloPacketAndInitializeFilters(false);\n}\n\nTEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) {\n  if (quicVersionUsesTls()) {\n    // QUICHE doesn't support 0-RTT TLS1.3 handshake yet.\n    processValidChloPacketAndCheckStatus(true);\n    // Shutdown() to close the connection.\n    envoy_quic_dispatcher_.Shutdown();\n    return;\n  }\n  processValidChloPacketAndInitializeFilters(true);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc",
    "content": "#include <memory>\n#include <string>\n#include <vector>\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_source.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"quiche/quic/test_tools/test_certificates.h\"\n\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\n\nnamespace Quic {\n\nclass TestGetProofCallback : public quic::ProofSource::Callback {\npublic:\n  TestGetProofCallback(bool& called, bool should_succeed, const std::string& server_config,\n                       quic::QuicTransportVersion& version, quiche::QuicheStringPiece chlo_hash,\n                       Network::FilterChain& filter_chain)\n      : called_(called), should_succeed_(should_succeed), server_config_(server_config),\n        version_(version), chlo_hash_(chlo_hash), expected_filter_chain_(filter_chain) {\n    ON_CALL(client_context_config_, cipherSuites)\n        .WillByDefault(ReturnRef(\n            Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CIPHER_SUITES));\n    ON_CALL(client_context_config_, ecdhCurves)\n        .WillByDefault(\n            ReturnRef(Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CURVES));\n    const std::string alpn(\"h2,http/1.1\");\n    ON_CALL(client_context_config_, alpnProtocols()).WillByDefault(ReturnRef(alpn));\n    const std::string empty_string;\n    ON_CALL(client_context_config_, serverNameIndication()).WillByDefault(ReturnRef(empty_string));\n    ON_CALL(client_context_config_, signingAlgorithmsForTest())\n        .WillByDefault(ReturnRef(empty_string));\n    ON_CALL(client_context_config_, certificateValidationContext())\n        .WillByDefault(Return(&cert_validation_ctx_config_));\n\n    // Getting the last cert in the chain as the root CA cert.\n    std::string cert_chain(quic::test::kTestCertificateChainPem);\n    const std::string& root_ca_cert =\n        cert_chain.substr(cert_chain.rfind(\"-----BEGIN CERTIFICATE-----\"));\n    const std::string path_string(\"some_path\");\n    ON_CALL(cert_validation_ctx_config_, caCert()).WillByDefault(ReturnRef(root_ca_cert));\n    ON_CALL(cert_validation_ctx_config_, caCertPath()).WillByDefault(ReturnRef(path_string));\n    ON_CALL(cert_validation_ctx_config_, trustChainVerification)\n        .WillByDefault(Return(envoy::extensions::transport_sockets::tls::v3::\n                                  CertificateValidationContext::VERIFY_TRUST_CHAIN));\n    ON_CALL(cert_validation_ctx_config_, allowExpiredCertificate()).WillByDefault(Return(true));\n    const std::string crl_list;\n    ON_CALL(cert_validation_ctx_config_, certificateRevocationList())\n        .WillByDefault(ReturnRef(crl_list));\n    ON_CALL(cert_validation_ctx_config_, certificateRevocationListPath())\n        .WillByDefault(ReturnRef(path_string));\n    const std::vector<std::string> empty_string_list;\n    ON_CALL(cert_validation_ctx_config_, verifySubjectAltNameList())\n        .WillByDefault(ReturnRef(empty_string_list));\n    const std::vector<envoy::type::matcher::v3::StringMatcher> san_matchers;\n    ON_CALL(cert_validation_ctx_config_, subjectAltNameMatchers())\n        .WillByDefault(ReturnRef(san_matchers));\n    ON_CALL(cert_validation_ctx_config_, verifyCertificateHashList())\n        .WillByDefault(ReturnRef(empty_string_list));\n    ON_CALL(cert_validation_ctx_config_, verifyCertificateSpkiList())\n        .WillByDefault(ReturnRef(empty_string_list));\n    verifier_ =\n        std::make_unique<EnvoyQuicProofVerifier>(store_, client_context_config_, time_system_);\n  }\n\n  // quic::ProofSource::Callback\n  void Run(bool ok, const quic::QuicReferenceCountedPointer<quic::ProofSource::Chain>& chain,\n           const quic::QuicCryptoProof& proof,\n           std::unique_ptr<quic::ProofSource::Details> details) override {\n    called_ = true;\n    if (!should_succeed_) {\n      EXPECT_FALSE(ok);\n      return;\n    };\n    EXPECT_TRUE(ok);\n    EXPECT_EQ(2, chain->certs.size());\n    std::string error;\n    EXPECT_EQ(quic::QUIC_SUCCESS,\n              verifier_->VerifyProof(\"www.example.org\", 54321, server_config_, version_, chlo_hash_,\n                                     chain->certs, proof.leaf_cert_scts, proof.signature, nullptr,\n                                     &error, nullptr, nullptr))\n        << error;\n    EXPECT_EQ(&expected_filter_chain_,\n              &static_cast<EnvoyQuicProofSourceDetails*>(details.get())->filterChain());\n  }\n\nprivate:\n  bool& called_;\n  bool should_succeed_;\n  const std::string& server_config_;\n  const quic::QuicTransportVersion& version_;\n  quiche::QuicheStringPiece chlo_hash_;\n  Network::FilterChain& expected_filter_chain_;\n  NiceMock<Stats::MockStore> store_;\n  Event::GlobalTimeSystem time_system_;\n  NiceMock<Ssl::MockClientContextConfig> client_context_config_;\n  NiceMock<Ssl::MockCertificateValidationContextConfig> cert_validation_ctx_config_;\n  std::unique_ptr<EnvoyQuicProofVerifier> verifier_;\n};\n\nclass TestSignatureCallback : public quic::ProofSource::SignatureCallback {\npublic:\n  TestSignatureCallback(bool expect_success) : expect_success_(expect_success) {}\n  ~TestSignatureCallback() override { EXPECT_TRUE(run_called_); }\n\n  // quic::ProofSource::SignatureCallback\n  void Run(bool ok, std::string, std::unique_ptr<quic::ProofSource::Details>) override {\n    EXPECT_EQ(expect_success_, ok);\n    run_called_ = true;\n  }\n\nprivate:\n  bool expect_success_;\n  bool run_called_{false};\n};\n\nclass EnvoyQuicProofSourceTest : public ::testing::Test {\npublic:\n  EnvoyQuicProofSourceTest()\n      : server_address_(quic::QuicIpAddress::Loopback4(), 12345),\n        client_address_(quic::QuicIpAddress::Loopback4(), 54321),\n        transport_socket_factory_(std::make_unique<Ssl::MockServerContextConfig>()),\n        listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()),\n                                            POOL_GAUGE(listener_config_.listenerScope()),\n                                            POOL_HISTOGRAM(listener_config_.listenerScope()))}),\n        proof_source_(listen_socket_, filter_chain_manager_, listener_stats_) {}\n\n  void expectCertChainAndPrivateKey(const std::string& cert, bool expect_private_key) {\n    EXPECT_CALL(listen_socket_, ioHandle()).Times(expect_private_key ? 2u : 1u);\n    EXPECT_CALL(filter_chain_manager_, findFilterChain(_))\n        .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) {\n          EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_),\n                    *connection_socket.localAddress());\n          EXPECT_EQ(*quicAddressToEnvoyAddressInstance(client_address_),\n                    *connection_socket.remoteAddress());\n          EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic,\n                    connection_socket.detectedTransportProtocol());\n          EXPECT_EQ(\"h2\", connection_socket.requestedApplicationProtocols()[0]);\n          return &filter_chain_;\n        }));\n    EXPECT_CALL(filter_chain_, transportSocketFactory())\n        .WillRepeatedly(ReturnRef(transport_socket_factory_));\n\n    std::vector<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> tls_cert_configs{\n        std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>(tls_cert_config_)};\n    EXPECT_CALL(dynamic_cast<const Ssl::MockServerContextConfig&>(\n                    transport_socket_factory_.serverContextConfig()),\n                tlsCertificates())\n        .WillRepeatedly(Return(tls_cert_configs));\n    EXPECT_CALL(tls_cert_config_, certificateChain()).WillOnce(ReturnRef(cert));\n    if (expect_private_key) {\n      EXPECT_CALL(tls_cert_config_, privateKey()).WillOnce(ReturnRef(pkey_));\n    }\n  }\n\n  void testGetProof(bool expect_success) {\n    bool called = false;\n    auto callback = std::make_unique<TestGetProofCallback>(called, expect_success, server_config_,\n                                                           version_, chlo_hash_, filter_chain_);\n    proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_,\n                           chlo_hash_, std::move(callback));\n    EXPECT_TRUE(called);\n  }\n\nprotected:\n  std::string hostname_{\"www.fake.com\"};\n  quic::QuicSocketAddress server_address_;\n  quic::QuicSocketAddress client_address_;\n  quic::QuicTransportVersion version_{quic::QUIC_VERSION_UNSUPPORTED};\n  quiche::QuicheStringPiece chlo_hash_{\"aaaaa\"};\n  std::string server_config_{\"Server Config\"};\n  std::string expected_certs_{quic::test::kTestCertificateChainPem};\n  std::string pkey_{quic::test::kTestCertificatePrivateKeyPem};\n  Network::MockFilterChain filter_chain_;\n  Network::MockFilterChainManager filter_chain_manager_;\n  Network::MockListenSocket listen_socket_;\n  testing::NiceMock<Network::MockListenerConfig> listener_config_;\n  QuicServerTransportSocketFactory transport_socket_factory_;\n  Ssl::MockTlsCertificateConfig tls_cert_config_;\n  Server::ListenerStats listener_stats_;\n  EnvoyQuicProofSource proof_source_;\n};\n\nTEST_F(EnvoyQuicProofSourceTest, TestGetProof) {\n  expectCertChainAndPrivateKey(expected_certs_, true);\n  testGetProof(true);\n}\n\nTEST_F(EnvoyQuicProofSourceTest, GetProofFailNoFilterChain) {\n  bool called = false;\n  auto callback = std::make_unique<TestGetProofCallback>(called, false, server_config_, version_,\n                                                         chlo_hash_, filter_chain_);\n  EXPECT_CALL(listen_socket_, ioHandle());\n  EXPECT_CALL(filter_chain_manager_, findFilterChain(_))\n      .WillRepeatedly(Invoke([&](const Network::ConnectionSocket&) { return nullptr; }));\n  proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_,\n                         chlo_hash_, std::move(callback));\n  EXPECT_TRUE(called);\n}\n\nTEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidCert) {\n  std::string invalid_cert{R\"(-----BEGIN CERTIFICATE-----\n    invalid certificate\n    -----END CERTIFICATE-----)\"};\n  expectCertChainAndPrivateKey(invalid_cert, false);\n  testGetProof(false);\n}\n\nTEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidPublicKeyInCert) {\n  // This is a valid cert with RSA public key. But we don't support RSA key with\n  // length < 1024.\n  std::string cert_with_rsa_1024{R\"(-----BEGIN CERTIFICATE-----\nMIIC2jCCAkOgAwIBAgIUDBHEwlCvLGh3w0O8VwIW+CjYXY8wDQYJKoZIhvcNAQEL\nBQAwfzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1BMRIwEAYDVQQHDAlDYW1icmlk\nZ2UxDzANBgNVBAoMBkdvb2dsZTEOMAwGA1UECwwFZW52b3kxDTALBgNVBAMMBHRl\nc3QxHzAdBgkqhkiG9w0BCQEWEGRhbnpoQGdvb2dsZS5jb20wHhcNMjAwODA0MTg1\nOTQ4WhcNMjEwODA0MTg1OTQ4WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTUEx\nEjAQBgNVBAcMCUNhbWJyaWRnZTEPMA0GA1UECgwGR29vZ2xlMQ4wDAYDVQQLDAVl\nbnZveTENMAsGA1UEAwwEdGVzdDEfMB0GCSqGSIb3DQEJARYQZGFuemhAZ29vZ2xl\nLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAykCZNjxws+sNfnp18nsp\n+7LN81J/RSwAHLkGnwEtd3OxSUuiCYHgYlyuEAwJdf99+SaFrgcA4LvYJ/Mhm/fZ\nmsnpfsAvoQ49+ax0fm1x56ii4KgNiu9iFsWwwVmkHkgjlRcRsmhr4WeIf14Yvpqs\nJNsbNVSCZ4GLQ2V6BqIHlhcCAwEAAaNTMFEwHQYDVR0OBBYEFDO1KPYcdRmeKDvL\nH2Yzj8el2Xe1MB8GA1UdIwQYMBaAFDO1KPYcdRmeKDvLH2Yzj8el2Xe1MA8GA1Ud\nEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAnwWVmwSK9TDml7oHGBavzOC1\nf/lOd5zz2e7Tu2pUtx1sX1tlKph1D0ANpJwxRV78R2hjmynLSl7h4Ual9NMubqkD\nx96rVeUbRJ/qU4//nNM/XQa9vIAIcTZ0jFhmb0c3R4rmoqqC3vkSDwtaE5yuS5T4\nGUy+n0vQNB0cXGzgcGI=\n-----END CERTIFICATE-----)\"};\n  expectCertChainAndPrivateKey(cert_with_rsa_1024, false);\n  testGetProof(false);\n}\n\nTEST_F(EnvoyQuicProofSourceTest, UnexpectedPrivateKey) {\n  EXPECT_CALL(listen_socket_, ioHandle());\n  EXPECT_CALL(filter_chain_manager_, findFilterChain(_))\n      .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return &filter_chain_; }));\n  auto server_context_config = std::make_unique<Ssl::MockServerContextConfig>();\n  auto server_context_config_ptr = server_context_config.get();\n  QuicServerTransportSocketFactory transport_socket_factory(std::move(server_context_config));\n  EXPECT_CALL(filter_chain_, transportSocketFactory())\n      .WillRepeatedly(ReturnRef(transport_socket_factory));\n\n  Ssl::MockTlsCertificateConfig tls_cert_config;\n  std::vector<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> tls_cert_configs{\n      std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>(tls_cert_config)};\n  EXPECT_CALL(*server_context_config_ptr, tlsCertificates())\n      .WillRepeatedly(Return(tls_cert_configs));\n  std::string rsa_pkey_1024_len(R\"(-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQC79hDq/OwN3ke3EF6Ntdi9R+VSrl9MStk992l1us8lZhq+e0zU\nOlvxbUeZ8wyVkzs1gqI1it1IwF+EpdGhHhjggZjg040GD3HWSuyCzpHh+nLwJxtQ\nD837PCg0zl+TnKv1YjY3I1F3trGhIqfd2B6pgaJ4hpr+0hdqnKP0Htd4DwIDAQAB\nAoGASNypUD59Tx70k+1fifWNMEq3heacgJmfPxsyoXWqKSg8g8yOStLYo20mTXJf\nVXg+go7CTJkpELOqE2SoL5nYMD0D/YIZCgDx85k0GWHdA6udNn4to95ZTeZPrBHx\nT0QNQHnZI3A7RwLinO60IRY0NYzhkTEBxIuvIY6u0DVbrAECQQDpshbxK3DHc7Yi\nAu7BUsxP8RbG4pP5IIVoD4YvJuwUkdrfrwejqTdkfchJJc+Gu/+h8vy7eASPHLLT\nNBk5wFoPAkEAzeaKnx0CgNs0RX4+sSF727FroD98VUM38OFEJQ6U9OAWGvaKd8ey\nyAYUjR2Sl5ZRyrwWv4IqyWgUGhZqNG0CAQJAPTjjm8DGpenhcB2WkNzxG4xMbEQV\ngfGMIYvXmmi29liTn4AKH00IbvIo00jtih2cRcATh8VUZG2fR4dhiGik7wJAWSwS\nNwzaS7IjtkERp6cHvELfiLxV/Zsp/BGjcKUbD96I1E6X834ySHyRo/f9x9bbP4Es\nHO6j1yxTIGU6w8++AQJACdFPnRidOaj5oJmcZq0s6WGTYfegjTOKgi5KQzO0FTwG\nqGm130brdD+1U1EJnEFmleLZ/W6mEi3MxcKpWOpTqQ==\n-----END RSA PRIVATE KEY-----)\");\n  EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(rsa_pkey_1024_len));\n  proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_,\n                                    SSL_SIGN_RSA_PSS_RSAE_SHA256, \"payload\",\n                                    std::make_unique<TestSignatureCallback>(false));\n}\n\nTEST_F(EnvoyQuicProofSourceTest, InvalidPrivateKey) {\n  EXPECT_CALL(listen_socket_, ioHandle());\n  EXPECT_CALL(filter_chain_manager_, findFilterChain(_))\n      .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return &filter_chain_; }));\n  auto server_context_config = std::make_unique<Ssl::MockServerContextConfig>();\n  auto server_context_config_ptr = server_context_config.get();\n  QuicServerTransportSocketFactory transport_socket_factory(std::move(server_context_config));\n  EXPECT_CALL(filter_chain_, transportSocketFactory())\n      .WillRepeatedly(ReturnRef(transport_socket_factory));\n\n  Ssl::MockTlsCertificateConfig tls_cert_config;\n  std::vector<std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>> tls_cert_configs{\n      std::reference_wrapper<const Envoy::Ssl::TlsCertificateConfig>(tls_cert_config)};\n  EXPECT_CALL(*server_context_config_ptr, tlsCertificates())\n      .WillRepeatedly(Return(tls_cert_configs));\n  std::string invalid_pkey(\"abcdefg\");\n  EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(invalid_pkey));\n  proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_,\n                                    SSL_SIGN_RSA_PSS_RSAE_SHA256, \"payload\",\n                                    std::make_unique<TestSignatureCallback>(false));\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc",
    "content": "#include <algorithm>\n#include <memory>\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h\"\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"quiche/quic/core/crypto/certificate_view.h\"\n#include \"quiche/quic/test_tools/test_certificates.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicProofVerifierTest : public testing::Test {\npublic:\n  EnvoyQuicProofVerifierTest()\n      : root_ca_cert_(cert_chain_.substr(cert_chain_.rfind(\"-----BEGIN CERTIFICATE-----\"))),\n        leaf_cert_([=]() {\n          std::stringstream pem_stream(cert_chain_);\n          std::vector<std::string> chain = quic::CertificateView::LoadPemFromStream(&pem_stream);\n          return chain[0];\n        }()) {\n    ON_CALL(client_context_config_, cipherSuites)\n        .WillByDefault(ReturnRef(\n            Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CIPHER_SUITES));\n    ON_CALL(client_context_config_, ecdhCurves)\n        .WillByDefault(\n            ReturnRef(Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CURVES));\n    ON_CALL(client_context_config_, alpnProtocols()).WillByDefault(ReturnRef(alpn_));\n    ON_CALL(client_context_config_, serverNameIndication()).WillByDefault(ReturnRef(empty_string_));\n    ON_CALL(client_context_config_, signingAlgorithmsForTest()).WillByDefault(ReturnRef(sig_algs_));\n    ON_CALL(client_context_config_, certificateValidationContext())\n        .WillByDefault(Return(&cert_validation_ctx_config_));\n  }\n\n  // Since this cert chain contains an expired cert, we can flip allow_expired_cert to test the code\n  // paths for BoringSSL cert verification success and failure.\n  void configCertVerificationDetails(bool allow_expired_cert) {\n    // Getting the last cert in the chain as the root CA cert.\n    EXPECT_CALL(cert_validation_ctx_config_, caCert()).WillRepeatedly(ReturnRef(root_ca_cert_));\n    EXPECT_CALL(cert_validation_ctx_config_, caCertPath()).WillRepeatedly(ReturnRef(path_string_));\n    EXPECT_CALL(cert_validation_ctx_config_, trustChainVerification)\n        .WillRepeatedly(Return(envoy::extensions::transport_sockets::tls::v3::\n                                   CertificateValidationContext::VERIFY_TRUST_CHAIN));\n    EXPECT_CALL(cert_validation_ctx_config_, allowExpiredCertificate())\n        .WillRepeatedly(Return(allow_expired_cert));\n    EXPECT_CALL(cert_validation_ctx_config_, certificateRevocationList())\n        .WillRepeatedly(ReturnRef(empty_string_));\n    EXPECT_CALL(cert_validation_ctx_config_, certificateRevocationListPath())\n        .WillRepeatedly(ReturnRef(path_string_));\n    EXPECT_CALL(cert_validation_ctx_config_, verifySubjectAltNameList())\n        .WillRepeatedly(ReturnRef(empty_string_list_));\n    EXPECT_CALL(cert_validation_ctx_config_, subjectAltNameMatchers())\n        .WillRepeatedly(ReturnRef(san_matchers_));\n    EXPECT_CALL(cert_validation_ctx_config_, verifyCertificateHashList())\n        .WillRepeatedly(ReturnRef(empty_string_list_));\n    EXPECT_CALL(cert_validation_ctx_config_, verifyCertificateSpkiList())\n        .WillRepeatedly(ReturnRef(empty_string_list_));\n    verifier_ =\n        std::make_unique<EnvoyQuicProofVerifier>(store_, client_context_config_, time_system_);\n  }\n\nprotected:\n  const std::string path_string_{\"some_path\"};\n  const std::string alpn_{\"h2,http/1.1\"};\n  const std::string sig_algs_{\"rsa_pss_rsae_sha256\"};\n  const std::vector<envoy::type::matcher::v3::StringMatcher> san_matchers_;\n  const std::string empty_string_;\n  const std::vector<std::string> empty_string_list_;\n  const std::string cert_chain_{quic::test::kTestCertificateChainPem};\n  const std::string root_ca_cert_;\n  const std::string leaf_cert_;\n  NiceMock<Stats::MockStore> store_;\n  Event::GlobalTimeSystem time_system_;\n  NiceMock<Ssl::MockClientContextConfig> client_context_config_;\n  Ssl::MockCertificateValidationContextConfig cert_validation_ctx_config_;\n  std::unique_ptr<EnvoyQuicProofVerifier> verifier_;\n};\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainSuccess) {\n  configCertVerificationDetails(true);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(leaf_cert_);\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  EXPECT_EQ(quic::QUIC_SUCCESS,\n            verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321,\n                                       {leaf_cert_}, ocsp_response, cert_sct, nullptr,\n                                       &error_details, nullptr, nullptr))\n      << error_details;\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureFromSsl) {\n  configCertVerificationDetails(false);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(leaf_cert_);\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321,\n                                       {leaf_cert_}, ocsp_response, cert_sct, nullptr,\n                                       &error_details, nullptr, nullptr))\n      << error_details;\n  EXPECT_EQ(\"X509_verify_cert: certificate verification error at depth 1: certificate has expired\",\n            error_details);\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidLeafCert) {\n  configCertVerificationDetails(true);\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  const std::vector<std::string> certs{\"invalid leaf cert\"};\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyCertChain(\"www.google.com\", 54321, certs, ocsp_response, cert_sct,\n                                       nullptr, &error_details, nullptr, nullptr));\n  EXPECT_EQ(\"d2i_X509: fail to parse DER\", error_details);\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureLeafCertWithGarbage) {\n  configCertVerificationDetails(true);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(leaf_cert_);\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string cert_with_trailing_garbage = absl::StrCat(leaf_cert_, \"AAAAAA\");\n  std::string error_details;\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321,\n                                       {cert_with_trailing_garbage}, ocsp_response, cert_sct,\n                                       nullptr, &error_details, nullptr, nullptr))\n      << error_details;\n  EXPECT_EQ(\"There is trailing garbage in DER.\", error_details);\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidHost) {\n  configCertVerificationDetails(true);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(leaf_cert_);\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyCertChain(\"unknown.org\", 54321, {leaf_cert_}, ocsp_response, cert_sct,\n                                       nullptr, &error_details, nullptr, nullptr))\n      << error_details;\n  EXPECT_EQ(\"Leaf certificate doesn't match hostname: unknown.org\", error_details);\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureEmptyCertChain) {\n  configCertVerificationDetails(true);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(leaf_cert_);\n  quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED};\n  quiche::QuicheStringPiece chlo_hash{\"aaaaa\"};\n  std::string server_config{\"Server Config\"};\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  const std::vector<std::string> certs;\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321,\n                                   server_config, version, chlo_hash, certs, cert_sct, \"signature\",\n                                   nullptr, &error_details, nullptr, nullptr));\n  EXPECT_EQ(\"Received empty cert chain.\", error_details);\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidLeafCert) {\n  configCertVerificationDetails(true);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(leaf_cert_);\n  quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED};\n  quiche::QuicheStringPiece chlo_hash{\"aaaaa\"};\n  std::string server_config{\"Server Config\"};\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  const std::vector<std::string> certs{\"invalid leaf cert\"};\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321,\n                                   server_config, version, chlo_hash, certs, cert_sct, \"signature\",\n                                   nullptr, &error_details, nullptr, nullptr));\n  EXPECT_EQ(\"Invalid leaf cert.\", error_details);\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureUnsupportedECKey) {\n  configCertVerificationDetails(true);\n  quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED};\n  quiche::QuicheStringPiece chlo_hash{\"aaaaa\"};\n  std::string server_config{\"Server Config\"};\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  // This is a EC cert with secp384r1 curve which is not supported by Envoy.\n  const std::string certs{R\"(-----BEGIN CERTIFICATE-----\nMIICkDCCAhagAwIBAgIUTZbykU9eQL3GdrNlodxrOJDecIQwCgYIKoZIzj0EAwIw\nfzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1BMRIwEAYDVQQHDAlDYW1icmlkZ2Ux\nDzANBgNVBAoMBkdvb2dsZTEOMAwGA1UECwwFZW52b3kxDTALBgNVBAMMBHRlc3Qx\nHzAdBgkqhkiG9w0BCQEWEGRhbnpoQGdvb2dsZS5jb20wHhcNMjAwODA1MjAyMDI0\nWhcNMjIwODA1MjAyMDI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTUExEjAQ\nBgNVBAcMCUNhbWJyaWRnZTEPMA0GA1UECgwGR29vZ2xlMQ4wDAYDVQQLDAVlbnZv\neTENMAsGA1UEAwwEdGVzdDEfMB0GCSqGSIb3DQEJARYQZGFuemhAZ29vZ2xlLmNv\nbTB2MBAGByqGSM49AgEGBSuBBAAiA2IABGRaEAtVq+xHXfsF4R/j+mqVN2E29ZYL\noFlvnelKeeT2B51bSfUv+X+Ci1BSa2OxPCVS6o0vpcF6YOlz4CS7QcXZIoRfhsv7\nO2Hz/IdxAPhX/gdK/70T1x+V/6nvIHiiw6NTMFEwHQYDVR0OBBYEFF75rDce6xNJ\nGfpKbUg4emG2KWRMMB8GA1UdIwQYMBaAFF75rDce6xNJGfpKbUg4emG2KWRMMA8G\nA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDaAAwZQIxAIyZghTK3cmyrRWkxfQ7\nxEc11gujcT8nbytYbM6jodKwcbtR6SOmLx2ychXrCMm2ZAIwXqmrTYBtrbqb3mBx\nVdGXMAjeXhnOnPvmDi5hUz/uvI+Pg6cNmUoCRwSCnK/DazhA\n-----END CERTIFICATE-----)\"};\n  std::stringstream pem_stream(certs);\n  std::vector<std::string> chain = quic::CertificateView::LoadPemFromStream(&pem_stream);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(chain[0]);\n  ASSERT(cert_view);\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyProof(\"www.google.com\", 54321, server_config, version, chlo_hash,\n                                   chain, cert_sct, \"signature\", nullptr, &error_details, nullptr,\n                                   nullptr));\n  EXPECT_EQ(\"Invalid leaf cert, only P-256 ECDSA certificates are supported\", error_details);\n}\n\nTEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidSignature) {\n  configCertVerificationDetails(true);\n  std::unique_ptr<quic::CertificateView> cert_view =\n      quic::CertificateView::ParseSingleCertificate(leaf_cert_);\n  quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED};\n  quiche::QuicheStringPiece chlo_hash{\"aaaaa\"};\n  std::string server_config{\"Server Config\"};\n  const std::string ocsp_response;\n  const std::string cert_sct;\n  std::string error_details;\n  EXPECT_EQ(quic::QUIC_FAILURE,\n            verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321,\n                                   server_config, version, chlo_hash, {leaf_cert_}, cert_sct,\n                                   \"signature\", nullptr, &error_details, nullptr, nullptr));\n  EXPECT_EQ(\"Signature is not valid.\", error_details);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc",
    "content": "#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/crypto/null_encrypter.h\"\n#include \"quiche/quic/core/quic_crypto_server_stream.h\"\n#include \"quiche/quic/core/quic_utils.h\"\n#include \"quiche/quic/core/quic_versions.h\"\n#include \"quiche/quic/test_tools/crypto_test_utils.h\"\n#include \"quiche/quic/test_tools/quic_connection_peer.h\"\n#include \"quiche/quic/test_tools/quic_server_session_base_peer.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include <string>\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_session.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_stream.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_connection.h\"\n#include \"extensions/quic_listeners/quiche/codec_impl.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection_helper.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"test/extensions/quic_listeners/quiche/test_proof_source.h\"\n#include \"test/extensions/quic_listeners/quiche/test_utils.h\"\n#include \"extensions/transport_sockets/well_known_names.h\"\n\n#include \"envoy/stats/stats_macros.h\"\n#include \"common/event/libevent_scheduler.h\"\n#include \"server/configuration_impl.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/stream_decoder.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass TestEnvoyQuicServerConnection : public EnvoyQuicServerConnection {\npublic:\n  TestEnvoyQuicServerConnection(quic::QuicConnectionHelperInterface& helper,\n                                quic::QuicAlarmFactory& alarm_factory,\n                                quic::QuicPacketWriter& writer,\n                                const quic::ParsedQuicVersionVector& supported_versions,\n                                Network::Socket& listen_socket)\n      : EnvoyQuicServerConnection(quic::test::TestConnectionId(),\n                                  quic::QuicSocketAddress(quic::QuicIpAddress::Loopback4(), 12345),\n                                  helper, alarm_factory, &writer, /*owns_writer=*/false,\n                                  supported_versions, listen_socket) {}\n\n  Network::Connection::ConnectionStats& connectionStats() const {\n    return EnvoyQuicConnection::connectionStats();\n  }\n\n  MOCK_METHOD(void, SendConnectionClosePacket, (quic::QuicErrorCode, const std::string&));\n  MOCK_METHOD(bool, SendControlFrame, (const quic::QuicFrame& frame));\n};\n\n// Derive to have simpler priority mechanism.\nclass TestEnvoyQuicServerSession : public EnvoyQuicServerSession {\npublic:\n  using EnvoyQuicServerSession::EnvoyQuicServerSession;\n\n  bool ShouldYield(quic::QuicStreamId /*stream_id*/) override {\n    // Never yield to other stream so that it's easier to predict stream write\n    // behavior.\n    return false;\n  }\n};\n\nclass ProofSourceDetailsSetter {\npublic:\n  virtual ~ProofSourceDetailsSetter() = default;\n\n  virtual void setProofSourceDetails(std::unique_ptr<EnvoyQuicProofSourceDetails> details) = 0;\n};\n\nclass TestQuicCryptoServerStream : public quic::QuicCryptoServerStream,\n                                   public ProofSourceDetailsSetter {\npublic:\n  ~TestQuicCryptoServerStream() override = default;\n\n  explicit TestQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config,\n                                      quic::QuicCompressedCertsCache* compressed_certs_cache,\n                                      quic::QuicSession* session,\n                                      quic::QuicCryptoServerStreamBase::Helper* helper)\n      : quic::QuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {}\n\n  bool encryption_established() const override { return true; }\n\n  const EnvoyQuicProofSourceDetails* ProofSourceDetails() const override { return details_.get(); }\n\n  void setProofSourceDetails(std::unique_ptr<EnvoyQuicProofSourceDetails> details) override {\n    details_ = std::move(details);\n  }\n\nprivate:\n  std::unique_ptr<EnvoyQuicProofSourceDetails> details_;\n};\n\nclass TestEnvoyQuicTlsServerHandshaker : public quic::TlsServerHandshaker,\n                                         public ProofSourceDetailsSetter {\npublic:\n  ~TestEnvoyQuicTlsServerHandshaker() override = default;\n\n  TestEnvoyQuicTlsServerHandshaker(quic::QuicSession* session,\n                                   const quic::QuicCryptoServerConfig& crypto_config)\n      : quic::TlsServerHandshaker(session, crypto_config),\n        params_(new quic::QuicCryptoNegotiatedParameters) {\n    params_->cipher_suite = 1;\n  }\n\n  bool encryption_established() const override { return true; }\n  const EnvoyQuicProofSourceDetails* ProofSourceDetails() const override { return details_.get(); }\n  void setProofSourceDetails(std::unique_ptr<EnvoyQuicProofSourceDetails> details) override {\n    details_ = std::move(details);\n  }\n  const quic::QuicCryptoNegotiatedParameters& crypto_negotiated_params() const override {\n    return *params_;\n  }\n\nprivate:\n  std::unique_ptr<EnvoyQuicProofSourceDetails> details_;\n  quic::QuicReferenceCountedPointer<quic::QuicCryptoNegotiatedParameters> params_;\n};\n\nclass EnvoyQuicServerSessionTest : public testing::TestWithParam<bool> {\npublic:\n  EnvoyQuicServerSessionTest()\n      : api_(Api::createApiForTest(time_system_)),\n        dispatcher_(api_->allocateDispatcher(\"test_thread\")), connection_helper_(*dispatcher_),\n        alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() {\n          SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam());\n          SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam());\n          return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0);\n        }()),\n        quic_connection_(new TestEnvoyQuicServerConnection(\n            connection_helper_, alarm_factory_, writer_, quic_version_, *listener_config_.socket_)),\n        crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(),\n                       std::make_unique<TestProofSource>(), quic::KeyExchangeSource::Default()),\n        envoy_quic_session_(quic_config_, quic_version_,\n                            std::unique_ptr<TestEnvoyQuicServerConnection>(quic_connection_),\n                            /*visitor=*/nullptr, &crypto_stream_helper_, &crypto_config_,\n                            &compressed_certs_cache_, *dispatcher_,\n                            /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5,\n                            listener_config_),\n        read_filter_(new Network::MockReadFilter()) {\n\n    EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime());\n    EXPECT_EQ(EMPTY_STRING, envoy_quic_session_.nextProtocol());\n\n    // Advance time and trigger update of Dispatcher::approximateMonotonicTime()\n    // because zero QuicTime is considered uninitialized.\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(1), *dispatcher_,\n                                   Event::Dispatcher::RunType::NonBlock);\n    connection_helper_.GetClock()->Now();\n\n    ON_CALL(writer_, WritePacket(_, _, _, _, _))\n        .WillByDefault(Invoke([](const char*, size_t buf_len, const quic::QuicIpAddress&,\n                                 const quic::QuicSocketAddress&, quic::PerPacketOptions*) {\n          return quic::WriteResult{quic::WRITE_STATUS_OK, static_cast<int>(buf_len)};\n        }));\n    ON_CALL(crypto_stream_helper_, CanAcceptClientHello(_, _, _, _, _)).WillByDefault(Return(true));\n  }\n\n  void SetUp() override {\n    envoy_quic_session_.Initialize();\n    setQuicConfigWithDefaultValues(envoy_quic_session_.config());\n    envoy_quic_session_.OnConfigNegotiated();\n    quic::test::QuicConfigPeer::SetNegotiated(envoy_quic_session_.config(), true);\n    quic::test::QuicConnectionPeer::SetAddressValidated(quic_connection_);\n    // Switch to a encryption forward secure crypto stream.\n    quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr);\n    quic::QuicCryptoServerStreamBase* crypto_stream = nullptr;\n    if (quic_version_[0].handshake_protocol == quic::PROTOCOL_QUIC_CRYPTO) {\n      auto test_crypto_stream = new TestQuicCryptoServerStream(\n          &crypto_config_, &compressed_certs_cache_, &envoy_quic_session_, &crypto_stream_helper_);\n      crypto_stream = test_crypto_stream;\n      crypto_stream_ = test_crypto_stream;\n    } else {\n      auto test_crypto_stream =\n          new TestEnvoyQuicTlsServerHandshaker(&envoy_quic_session_, crypto_config_);\n      crypto_stream = test_crypto_stream;\n      crypto_stream_ = test_crypto_stream;\n    }\n    quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, crypto_stream);\n    quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE);\n    quic_connection_->SetEncrypter(\n        quic::ENCRYPTION_FORWARD_SECURE,\n        std::make_unique<quic::NullEncrypter>(quic::Perspective::IS_SERVER));\n  }\n\n  bool installReadFilter() {\n    // Setup read filter.\n    envoy_quic_session_.addReadFilter(read_filter_);\n    EXPECT_EQ(Http::Protocol::Http3,\n              read_filter_->callbacks_->connection().streamInfo().protocol().value());\n    EXPECT_EQ(envoy_quic_session_.id(), read_filter_->callbacks_->connection().id());\n    EXPECT_EQ(&envoy_quic_session_, &read_filter_->callbacks_->connection());\n    read_filter_->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks_);\n    read_filter_->callbacks_->connection().setConnectionStats(\n        {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr});\n    EXPECT_EQ(&read_total_, &quic_connection_->connectionStats().read_total_);\n    EXPECT_CALL(*read_filter_, onNewConnection()).WillOnce(Invoke([this]() {\n      // Create ServerConnection instance and setup callbacks for it.\n      http_connection_ = std::make_unique<QuicHttpServerConnectionImpl>(envoy_quic_session_,\n                                                                        http_connection_callbacks_);\n      EXPECT_EQ(Http::Protocol::Http3, http_connection_->protocol());\n      // Stop iteration to avoid calling getRead/WriteBuffer().\n      return Network::FilterStatus::StopIteration;\n    }));\n    return envoy_quic_session_.initializeReadFilters();\n  }\n\n  quic::QuicStream* createNewStream(Http::MockRequestDecoder& request_decoder,\n                                    Http::MockStreamCallbacks& stream_callbacks) {\n    EXPECT_CALL(http_connection_callbacks_, newStream(_, false))\n        .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder,\n                                                               bool) -> Http::RequestDecoder& {\n          encoder.getStream().addCallbacks(stream_callbacks);\n          return request_decoder;\n        }));\n    quic::QuicStreamId stream_id =\n        quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u;\n    return envoy_quic_session_.GetOrCreateStream(stream_id);\n  }\n\n  void TearDown() override {\n    if (quic_connection_->connected()) {\n      EXPECT_CALL(*quic_connection_,\n                  SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n      EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n      EXPECT_CALL(*quic_connection_, SendControlFrame(_))\n          .Times(testing::AtMost(1))\n          .WillOnce(Invoke([](const quic::QuicFrame&) { return false; }));\n      envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);\n    }\n  }\n\nprotected:\n  Event::SimulatedTimeSystemHelper time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  EnvoyQuicConnectionHelper connection_helper_;\n  EnvoyQuicAlarmFactory alarm_factory_;\n  quic::ParsedQuicVersionVector quic_version_;\n  testing::NiceMock<quic::test::MockPacketWriter> writer_;\n  testing::NiceMock<Network::MockListenerConfig> listener_config_;\n  TestEnvoyQuicServerConnection* quic_connection_;\n  quic::QuicConfig quic_config_;\n  quic::QuicCryptoServerConfig crypto_config_;\n  testing::NiceMock<quic::test::MockQuicCryptoServerStreamHelper> crypto_stream_helper_;\n  ProofSourceDetailsSetter* crypto_stream_;\n  TestEnvoyQuicServerSession envoy_quic_session_;\n  quic::QuicCompressedCertsCache compressed_certs_cache_{100};\n  std::shared_ptr<Network::MockReadFilter> read_filter_;\n  Network::MockConnectionCallbacks network_connection_callbacks_;\n  Http::MockServerConnectionCallbacks http_connection_callbacks_;\n  testing::StrictMock<Stats::MockCounter> read_total_;\n  testing::StrictMock<Stats::MockGauge> read_current_;\n  testing::StrictMock<Stats::MockCounter> write_total_;\n  testing::StrictMock<Stats::MockGauge> write_current_;\n  Http::ServerConnectionPtr http_connection_;\n};\n\nINSTANTIATE_TEST_SUITE_P(EnvoyQuicServerSessionTests, EnvoyQuicServerSessionTest,\n                         testing::ValuesIn({true, false}));\n\nTEST_P(EnvoyQuicServerSessionTest, NewStream) {\n  installReadFilter();\n\n  Http::MockRequestDecoder request_decoder;\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false))\n      .WillOnce(testing::ReturnRef(request_decoder));\n  quic::QuicStreamId stream_id =\n      quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u;\n  auto stream =\n      reinterpret_cast<quic::QuicSpdyStream*>(envoy_quic_session_.GetOrCreateStream(stream_id));\n  // Receive a GET request on created stream.\n  quic::QuicHeaderList headers;\n  headers.OnHeaderBlockStart();\n  std::string host(\"www.abc.com\");\n  headers.OnHeader(\":authority\", host);\n  headers.OnHeader(\":method\", \"GET\");\n  headers.OnHeader(\":path\", \"/\");\n  headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  // Request headers should be propagated to decoder.\n  EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(host, decoded_headers->getHostValue());\n        EXPECT_EQ(\"/\", decoded_headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue());\n      }));\n  stream->OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers);\n}\n\nTEST_P(EnvoyQuicServerSessionTest, InvalidIncomingStreamId) {\n  quic::SetVerbosityLogThreshold(1);\n  installReadFilter();\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  // IETF stream 5 and G-Quic stream 2 are server initiated.\n  quic::QuicStreamId stream_id =\n      quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u;\n  std::string data(\"aaaa\");\n  quic::QuicStreamFrame stream_frame(stream_id, false, 0, data);\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0);\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket((quic::VersionUsesHttp3(quic_version_[0].transport_version)\n                                             ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION\n                                             : quic::QUIC_INVALID_STREAM_ID),\n                                        \"Data for nonexistent stream\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n\n  envoy_quic_session_.OnStreamFrame(stream_frame);\n}\n\nTEST_P(EnvoyQuicServerSessionTest, NoNewStreamForInvalidIncomingStream) {\n  installReadFilter();\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  // IETF stream 5 and G-Quic stream 2 are server initiated.\n  quic::QuicStreamId stream_id =\n      quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u;\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0);\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::VersionUsesHttp3(quic_version_[0].transport_version)\n                                            ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION\n                                            : quic::QUIC_INVALID_STREAM_ID,\n                                        \"Data for nonexistent stream\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n\n  // Stream creation on closed connection should fail.\n  EXPECT_EQ(nullptr, envoy_quic_session_.GetOrCreateStream(stream_id));\n}\n\nTEST_P(EnvoyQuicServerSessionTest, OnResetFrame) {\n  installReadFilter();\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  quic::QuicStream* stream1 = createNewStream(request_decoder, stream_callbacks);\n  quic::QuicRstStreamFrame rst1(/*control_frame_id=*/1u, stream1->id(),\n                                quic::QUIC_ERROR_PROCESSING_STREAM, /*bytes_written=*/0u);\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::RemoteReset, _));\n  if (!quic::VersionUsesHttp3(quic_version_[0].transport_version)) {\n    EXPECT_CALL(*quic_connection_, SendControlFrame(_))\n        .WillOnce(Invoke([stream_id = stream1->id()](const quic::QuicFrame& frame) {\n          EXPECT_EQ(stream_id, frame.rst_stream_frame->stream_id);\n          EXPECT_EQ(quic::QUIC_RST_ACKNOWLEDGEMENT, frame.rst_stream_frame->error_code);\n          return false;\n        }));\n  } else {\n  }\n  stream1->OnStreamReset(rst1);\n\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false))\n      .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder,\n                                                             bool) -> Http::RequestDecoder& {\n        encoder.getStream().addCallbacks(stream_callbacks);\n        return request_decoder;\n      }));\n  quic::QuicStream* stream2 = envoy_quic_session_.GetOrCreateStream(stream1->id() + 4u);\n  quic::QuicRstStreamFrame rst2(/*control_frame_id=*/1u, stream2->id(), quic::QUIC_REFUSED_STREAM,\n                                /*bytes_written=*/0u);\n  EXPECT_CALL(stream_callbacks,\n              onResetStream(Http::StreamResetReason::RemoteRefusedStreamReset, _));\n  stream2->OnStreamReset(rst2);\n}\n\nTEST_P(EnvoyQuicServerSessionTest, ConnectionClose) {\n  installReadFilter();\n\n  std::string error_details(\"dummy details\");\n  quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA);\n  quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, error, error_details,\n                                       /* transport_close_frame_type = */ 0);\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose));\n  quic_connection_->OnConnectionCloseFrame(frame);\n  EXPECT_EQ(absl::StrCat(quic::QuicErrorCodeToString(error), \" with details: \", error_details),\n            envoy_quic_session_.transportFailureReason());\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, ConnectionCloseWithActiveStream) {\n  installReadFilter();\n\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  quic::QuicStream* stream = createNewStream(request_decoder, stream_callbacks);\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_TRUE(stream->write_side_closed() && stream->reading_stopped());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, NoFlushWithDataToWrite) {\n  installReadFilter();\n\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  quic::QuicStream* stream = createNewStream(request_decoder, stream_callbacks);\n  envoy_quic_session_.MarkConnectionLevelWriteBlocked(stream->id());\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  // Even though the stream is write blocked, connection should be closed\n  // immediately.\n  envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_TRUE(stream->write_side_closed() && stream->reading_stopped());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, FlushCloseWithDataToWrite) {\n  installReadFilter();\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  quic::QuicStream* stream = createNewStream(request_decoder, stream_callbacks);\n\n  envoy_quic_session_.MarkConnectionLevelWriteBlocked(stream->id());\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n  // Connection shouldn't be closed right away as there is a stream write blocked.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWrite);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  // Unblock that stream to trigger actual connection close.\n  envoy_quic_session_.OnCanWrite();\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_FALSE(quic_connection_->connected());\n}\n\n// Tests that a write event after flush close should update the delay close\n// timer.\nTEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) {\n  installReadFilter();\n  // Drive congestion control manually.\n  auto send_algorithm = new testing::NiceMock<quic::test::MockSendAlgorithm>;\n  quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm);\n  EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));\n  EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS));\n  EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n  EXPECT_CALL(*send_algorithm, BandwidthEstimate())\n      .WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n\n  EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber());\n\n  // Bump connection flow control window large enough not to interfere\n  // stream writing.\n  envoy_quic_session_.flow_controller()->UpdateSendWindowOffset(\n      10 * quic::kDefaultFlowControlSendWindow);\n\n  envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100));\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  // Create a stream and write enough data to make it blocked.\n  auto stream =\n      dynamic_cast<EnvoyQuicServerStream*>(createNewStream(request_decoder, stream_callbacks));\n\n  // Receive a GET request on created stream.\n  quic::QuicHeaderList request_headers;\n  request_headers.OnHeaderBlockStart();\n  std::string host(\"www.abc.com\");\n  request_headers.OnHeader(\":authority\", host);\n  request_headers.OnHeader(\":method\", \"GET\");\n  request_headers.OnHeader(\":path\", \"/\");\n  request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  // Request headers should be propagated to decoder.\n  EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(host, decoded_headers->getHostValue());\n        EXPECT_EQ(\"/\", decoded_headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue());\n      }));\n  stream->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(),\n                             request_headers);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\":content-length\", \"32770\"}}; // 32KB + 2 bytes\n\n  stream->encodeHeaders(response_headers, false);\n  std::string response(32 * 1024 + 1, 'a');\n  Buffer::OwnedImpl buffer(response);\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  stream->encodeData(buffer, false);\n  // Stream become write blocked.\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n  EXPECT_TRUE(stream->IsFlowControlBlocked());\n  EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked());\n\n  // Connection shouldn't be closed right away as there is a stream write blocked.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWrite);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  // Another write event without updating flow control window shouldn't trigger\n  // connection close, but it should update the timer.\n  envoy_quic_session_.OnCanWrite();\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n\n  // Timer shouldn't fire at original deadline.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  // Advance the time to fire connection close timer.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_FALSE(quic_connection_->connected());\n}\n\n// Tests that if delay close timeout is not configured, flush close will not act\n// based on timeout.\nTEST_P(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) {\n  installReadFilter();\n  // Switch to a encryption forward secure crypto stream.\n  quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr);\n  quic::test::QuicServerSessionBasePeer::SetCryptoStream(\n      &envoy_quic_session_,\n      new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_,\n                                     &envoy_quic_session_, &crypto_stream_helper_));\n  quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE);\n  quic_connection_->SetEncrypter(\n      quic::ENCRYPTION_FORWARD_SECURE,\n      std::make_unique<quic::NullEncrypter>(quic::Perspective::IS_SERVER));\n  // Drive congestion control manually.\n  auto send_algorithm = new testing::NiceMock<quic::test::MockSendAlgorithm>;\n  quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm);\n  EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));\n  EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS));\n  EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n  EXPECT_CALL(*send_algorithm, BandwidthEstimate())\n      .WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n\n  EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber());\n\n  // Bump connection flow control window large enough not to interfere\n  // stream writing.\n  envoy_quic_session_.flow_controller()->UpdateSendWindowOffset(\n      10 * quic::kDefaultFlowControlSendWindow);\n\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  // Create a stream and write enough data to make it blocked.\n  auto stream =\n      dynamic_cast<EnvoyQuicServerStream*>(createNewStream(request_decoder, stream_callbacks));\n\n  // Receive a GET request on created stream.\n  quic::QuicHeaderList request_headers;\n  request_headers.OnHeaderBlockStart();\n  std::string host(\"www.abc.com\");\n  request_headers.OnHeader(\":authority\", host);\n  request_headers.OnHeader(\":method\", \"GET\");\n  request_headers.OnHeader(\":path\", \"/\");\n  request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  // Request headers should be propagated to decoder.\n  EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(host, decoded_headers->getHostValue());\n        EXPECT_EQ(\"/\", decoded_headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue());\n      }));\n  stream->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(),\n                             request_headers);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\":content-length\", \"32770\"}}; // 32KB + 2 bytes\n\n  stream->encodeHeaders(response_headers, false);\n  std::string response(32 * 1024 + 1, 'a');\n  Buffer::OwnedImpl buffer(response);\n  stream->encodeData(buffer, true);\n  // Stream become write blocked.\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n  EXPECT_TRUE(stream->IsFlowControlBlocked());\n  EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked());\n\n  // Connection shouldn't be closed right away as there is a stream write blocked.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWrite);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n  // Another write event without updating flow control window shouldn't trigger\n  // connection close.\n  envoy_quic_session_.OnCanWrite();\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n\n  // No timeout set, so alarm shouldn't fire.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  // Force close connection.\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  EXPECT_CALL(*quic_connection_, SendControlFrame(_))\n      .Times(testing::AtMost(1))\n      .WillOnce(Invoke([](const quic::QuicFrame&) { return false; }));\n  envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);\n}\n\nTEST_P(EnvoyQuicServerSessionTest, FlushCloseWithTimeout) {\n  installReadFilter();\n  envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100));\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  quic::QuicStream* stream = createNewStream(request_decoder, stream_callbacks);\n\n  envoy_quic_session_.MarkConnectionLevelWriteBlocked(stream->id());\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n  // Connection shouldn't be closed right away as there is a stream write blocked.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWrite);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  // Advance the time a bit and try to close again. The delay close timer\n  // shouldn't be rescheduled by this call.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  // Advance the time to fire connection close timer.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_FALSE(quic_connection_->connected());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithTimeout) {\n  installReadFilter();\n  envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100));\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  quic::QuicStream* stream = createNewStream(request_decoder, stream_callbacks);\n\n  envoy_quic_session_.MarkConnectionLevelWriteBlocked(stream->id());\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n  // Connection shouldn't be closed right away as there is a stream write blocked.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n  // Unblocking the stream shouldn't close the connection as it should be\n  // delayed.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  envoy_quic_session_.OnCanWrite();\n  // delay close alarm should have been rescheduled.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  // Advance the time to fire connection close timer.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_FALSE(quic_connection_->connected());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, FlusWriteTransitToFlushWriteWithDelay) {\n  installReadFilter();\n  envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100));\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  quic::QuicStream* stream = createNewStream(request_decoder, stream_callbacks);\n\n  envoy_quic_session_.MarkConnectionLevelWriteBlocked(stream->id());\n  EXPECT_TRUE(envoy_quic_session_.HasDataToWrite());\n  // Connection shouldn't be closed right away as there is a stream write blocked.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWrite);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  // The closing behavior should be changed.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay);\n  // Unblocking the stream shouldn't close the connection as it should be\n  // delayed.\n  envoy_quic_session_.OnCanWrite();\n\n  // delay close alarm should have been rescheduled.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));\n  // Advance the time to fire connection close timer.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n  EXPECT_FALSE(quic_connection_->connected());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithNoPendingData) {\n  installReadFilter();\n  envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100));\n  // This close should be delayed as configured.\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  // Advance the time a bit and try to close again. The delay close timer\n  // shouldn't be rescheduled by this call.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay);\n  EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state());\n\n  EXPECT_CALL(*quic_connection_,\n              SendConnectionClosePacket(quic::QUIC_NO_ERROR, \"Closed by application\"));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n  // Advance the time to fire connection close timer.\n  time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_,\n                                 Event::Dispatcher::RunType::NonBlock);\n  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, ShutdownNotice) {\n  installReadFilter();\n  testing::NiceMock<quic::test::MockHttp3DebugVisitor> debug_visitor;\n  envoy_quic_session_.set_debug_visitor(&debug_visitor);\n  if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) {\n    EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_));\n  } else {\n    // This is a no-op for pre-HTTP3 versions of QUIC.\n  }\n  http_connection_->shutdownNotice();\n}\n\nTEST_P(EnvoyQuicServerSessionTest, GoAway) {\n  installReadFilter();\n  testing::NiceMock<quic::test::MockHttp3DebugVisitor> debug_visitor;\n  envoy_quic_session_.set_debug_visitor(&debug_visitor);\n  if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) {\n    EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_));\n  } else {\n    EXPECT_CALL(*quic_connection_, SendControlFrame(_));\n  }\n  http_connection_->goAway();\n}\n\nTEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) {\n  Network::MockFilterChain filter_chain;\n  crypto_stream_->setProofSourceDetails(\n      std::make_unique<EnvoyQuicProofSourceDetails>(filter_chain));\n  std::vector<Network::FilterFactoryCb> filter_factory{[this](\n                                                           Network::FilterManager& filter_manager) {\n    filter_manager.addReadFilter(read_filter_);\n    read_filter_->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks_);\n    read_filter_->callbacks_->connection().setConnectionStats(\n        {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr});\n  }};\n  EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory));\n  EXPECT_CALL(*read_filter_, onNewConnection())\n      // Stop iteration to avoid calling getRead/WriteBuffer().\n      .WillOnce(Return(Network::FilterStatus::StopIteration));\n  EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _))\n      .WillOnce(Invoke([](Network::Connection& connection,\n                          const std::vector<Network::FilterFactoryCb>& filter_factories) {\n        EXPECT_EQ(1u, filter_factories.size());\n        Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories);\n        return true;\n      }));\n  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected));\n  if (!quic_version_[0].UsesTls()) {\n    envoy_quic_session_.SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE);\n  } else {\n    EXPECT_CALL(*quic_connection_, SendControlFrame(_));\n    envoy_quic_session_.OnTlsHandshakeComplete();\n  }\n  EXPECT_EQ(nullptr, envoy_quic_session_.socketOptions());\n  EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().isOpen());\n  EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().close().ok());\n  EXPECT_FALSE(quic_connection_->connectionSocket()->ioHandle().isOpen());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, NetworkConnectionInterface) {\n  installReadFilter();\n  EXPECT_EQ(dispatcher_.get(), &envoy_quic_session_.dispatcher());\n  EXPECT_TRUE(envoy_quic_session_.readEnabled());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) {\n  // Switch to a encryption forward secure crypto stream.\n  quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr);\n  quic::test::QuicServerSessionBasePeer::SetCryptoStream(\n      &envoy_quic_session_,\n      new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_,\n                                     &envoy_quic_session_, &crypto_stream_helper_));\n  quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE);\n  quic_connection_->SetEncrypter(\n      quic::ENCRYPTION_FORWARD_SECURE,\n      std::make_unique<quic::NullEncrypter>(quic::Perspective::IS_SERVER));\n  // Drive congestion control manually.\n  auto send_algorithm = new testing::NiceMock<quic::test::MockSendAlgorithm>;\n  quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm);\n  EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));\n  EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS));\n  EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n  EXPECT_CALL(*send_algorithm, BandwidthEstimate())\n      .WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n  EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber());\n\n  // Bump connection flow control window large enough not to interfere\n  // stream writing.\n  envoy_quic_session_.flow_controller()->UpdateSendWindowOffset(\n      10 * quic::kDefaultFlowControlSendWindow);\n  installReadFilter();\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false))\n      .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder,\n                                                             bool) -> Http::RequestDecoder& {\n        encoder.getStream().addCallbacks(stream_callbacks);\n        return request_decoder;\n      }));\n  quic::QuicStreamId stream_id =\n      quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u;\n  auto stream1 =\n      dynamic_cast<EnvoyQuicServerStream*>(envoy_quic_session_.GetOrCreateStream(stream_id));\n\n  // Receive a GET request on created stream.\n  quic::QuicHeaderList request_headers;\n  request_headers.OnHeaderBlockStart();\n  std::string host(\"www.abc.com\");\n  request_headers.OnHeader(\":authority\", host);\n  request_headers.OnHeader(\":method\", \"GET\");\n  request_headers.OnHeader(\":path\", \"/\");\n  request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  // Request headers should be propagated to decoder.\n  EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(host, decoded_headers->getHostValue());\n        EXPECT_EQ(\"/\", decoded_headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue());\n      }));\n  stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(),\n                              request_headers);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\":content-length\", \"32770\"}}; // 32KB + 2 bytes\n\n  stream1->encodeHeaders(response_headers, false);\n  std::string response(32 * 1024 + 1, 'a');\n  Buffer::OwnedImpl buffer(response);\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  stream1->encodeData(buffer, false);\n  EXPECT_TRUE(stream1->IsFlowControlBlocked());\n  EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked());\n\n  // Receive another request and send back response to trigger connection level\n  // send buffer watermark.\n  Http::MockRequestDecoder request_decoder2;\n  Http::MockStreamCallbacks stream_callbacks2;\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false))\n      .WillOnce(Invoke([&request_decoder2, &stream_callbacks2](Http::ResponseEncoder& encoder,\n                                                               bool) -> Http::RequestDecoder& {\n        encoder.getStream().addCallbacks(stream_callbacks2);\n        return request_decoder2;\n      }));\n  auto stream2 =\n      dynamic_cast<EnvoyQuicServerStream*>(envoy_quic_session_.GetOrCreateStream(stream_id + 4));\n  EXPECT_CALL(request_decoder2, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(host, decoded_headers->getHostValue());\n        EXPECT_EQ(\"/\", decoded_headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue());\n      }));\n  stream2->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(),\n                              request_headers);\n  stream2->encodeHeaders(response_headers, false);\n  // This response will trigger both stream and connection's send buffer watermark upper limits.\n  Buffer::OwnedImpl buffer2(response);\n  EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark)\n      .WillOnce(Invoke(\n          [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); }));\n  EXPECT_CALL(stream_callbacks2, onAboveWriteBufferHighWatermark()).Times(2);\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  stream2->encodeData(buffer2, false);\n\n  // Receive another request, the new stream should be notified about connection\n  // high watermark reached upon creation.\n  Http::MockRequestDecoder request_decoder3;\n  Http::MockStreamCallbacks stream_callbacks3;\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false))\n      .WillOnce(Invoke([&request_decoder3, &stream_callbacks3](Http::ResponseEncoder& encoder,\n                                                               bool) -> Http::RequestDecoder& {\n        encoder.getStream().addCallbacks(stream_callbacks3);\n        return request_decoder3;\n      }));\n  EXPECT_CALL(stream_callbacks3, onAboveWriteBufferHighWatermark());\n  auto stream3 =\n      dynamic_cast<EnvoyQuicServerStream*>(envoy_quic_session_.GetOrCreateStream(stream_id + 8));\n  EXPECT_CALL(request_decoder3, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(host, decoded_headers->getHostValue());\n        EXPECT_EQ(\"/\", decoded_headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue());\n      }));\n  stream3->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(),\n                              request_headers);\n\n  // Update flow control window for stream1.\n  quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, stream1->id(),\n                                             32 * 1024);\n  stream1->OnWindowUpdateFrame(window_update1);\n  EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([stream1]() {\n    // Write rest response to stream1.\n    std::string rest_response(1, 'a');\n    Buffer::OwnedImpl buffer(rest_response);\n    stream1->encodeData(buffer, true);\n  }));\n  envoy_quic_session_.OnCanWrite();\n  EXPECT_TRUE(stream1->IsFlowControlBlocked());\n\n  // Update flow control window for stream2.\n  quic::QuicWindowUpdateFrame window_update2(quic::kInvalidControlFrameId, stream2->id(),\n                                             32 * 1024);\n  stream2->OnWindowUpdateFrame(window_update2);\n  EXPECT_CALL(stream_callbacks2, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([stream2]() {\n    // Write rest response to stream2.\n    std::string rest_response(1, 'a');\n    Buffer::OwnedImpl buffer(rest_response);\n    stream2->encodeData(buffer, true);\n  }));\n  // Writing out another 16k on stream2 will trigger connection's send buffer\n  // come down below low watermark.\n  EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark)\n      .WillOnce(Invoke([this]() {\n        // This call shouldn't be propagate to stream1 and stream2 because they both wrote to the\n        // end of stream.\n        http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark();\n      }));\n  EXPECT_CALL(stream_callbacks3, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([=]() {\n    std::string super_large_response(40 * 1024, 'a');\n    Buffer::OwnedImpl buffer(super_large_response);\n    // This call will buffer 24k on stream3, raise the buffered bytes above\n    // high watermarks of the stream and connection.\n    // But callback will not propagate to stream_callback3 as the steam is\n    // ended locally.\n    stream3->encodeData(buffer, true);\n  }));\n  EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark());\n  envoy_quic_session_.OnCanWrite();\n  EXPECT_TRUE(stream2->IsFlowControlBlocked());\n\n  // Resetting stream3 should lower the buffered bytes, but callbacks will not\n  // be triggered because reset callback has been already triggered.\n  EXPECT_CALL(stream_callbacks3, onResetStream(Http::StreamResetReason::LocalReset, \"\"));\n  // Connection buffered data book keeping should also be updated.\n  EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark());\n  stream3->resetStream(Http::StreamResetReason::LocalReset);\n\n  // Update flow control window for stream1.\n  quic::QuicWindowUpdateFrame window_update3(quic::kInvalidControlFrameId, stream1->id(),\n                                             48 * 1024);\n  stream1->OnWindowUpdateFrame(window_update3);\n  // Update flow control window for stream2.\n  quic::QuicWindowUpdateFrame window_update4(quic::kInvalidControlFrameId, stream2->id(),\n                                             48 * 1024);\n  stream2->OnWindowUpdateFrame(window_update4);\n  envoy_quic_session_.OnCanWrite();\n\n  EXPECT_TRUE(stream1->write_side_closed());\n  EXPECT_TRUE(stream2->write_side_closed());\n}\n\nTEST_P(EnvoyQuicServerSessionTest, HeadersContributeToWatermarkGquic) {\n  if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) {\n    installReadFilter();\n    return;\n  }\n  // Switch to a encryption forward secure crypto stream.\n  quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr);\n  quic::test::QuicServerSessionBasePeer::SetCryptoStream(\n      &envoy_quic_session_,\n      new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_,\n                                     &envoy_quic_session_, &crypto_stream_helper_));\n  quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE);\n  quic_connection_->SetEncrypter(\n      quic::ENCRYPTION_FORWARD_SECURE,\n      std::make_unique<quic::NullEncrypter>(quic::Perspective::IS_SERVER));\n  // Drive congestion control manually.\n  auto send_algorithm = new testing::NiceMock<quic::test::MockSendAlgorithm>;\n  quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm);\n  EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n  EXPECT_CALL(*send_algorithm, BandwidthEstimate())\n      .WillRepeatedly(Return(quic::QuicBandwidth::Zero()));\n  EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber());\n\n  // Bump connection flow control window large enough not to interfere\n  // stream writing.\n  envoy_quic_session_.flow_controller()->UpdateSendWindowOffset(\n      10 * quic::kDefaultFlowControlSendWindow);\n  installReadFilter();\n  Http::MockRequestDecoder request_decoder;\n  Http::MockStreamCallbacks stream_callbacks;\n  EXPECT_CALL(http_connection_callbacks_, newStream(_, false))\n      .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder,\n                                                             bool) -> Http::RequestDecoder& {\n        encoder.getStream().addCallbacks(stream_callbacks);\n        return request_decoder;\n      }));\n  quic::QuicStreamId stream_id =\n      quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u;\n  auto stream1 =\n      dynamic_cast<EnvoyQuicServerStream*>(envoy_quic_session_.GetOrCreateStream(stream_id));\n\n  // Receive a GET request on created stream.\n  quic::QuicHeaderList request_headers;\n  request_headers.OnHeaderBlockStart();\n  std::string host(\"www.abc.com\");\n  request_headers.OnHeader(\":authority\", host);\n  request_headers.OnHeader(\":method\", \"GET\");\n  request_headers.OnHeader(\":path\", \"/\");\n  request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  // Request headers should be propagated to decoder.\n  EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) {\n        EXPECT_EQ(host, decoded_headers->getHostValue());\n        EXPECT_EQ(\"/\", decoded_headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue());\n      }));\n  stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(),\n                              request_headers);\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  // Make connection congestion control blocked so headers are buffered.\n  EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false));\n  stream1->encodeHeaders(response_headers, false);\n  // Buffer a response slightly smaller than connection level watermark, but\n  // with the previously buffered headers, this write should reach high\n  // watermark.\n  std::string response(24 * 1024 - 1, 'a');\n  Buffer::OwnedImpl buffer(response);\n  // Triggered twice, once by stream, the other time by connection.\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()).Times(2);\n  EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark)\n      .WillOnce(Invoke(\n          [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); }));\n  stream1->encodeData(buffer, false);\n  EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked());\n\n  // Write the buffered data out till stream is flow control blocked. Both\n  // stream and connection level buffers should drop below watermark.\n  EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));\n  EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS));\n  EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark)\n      .WillOnce(Invoke(\n          [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); }));\n  EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(2);\n  envoy_quic_session_.OnCanWrite();\n  EXPECT_TRUE(stream1->IsFlowControlBlocked());\n\n  // Buffer more response because of flow control. The buffered bytes become just below connection\n  // level high watermark.\n  std::string response1(16 * 1024 - 20, 'a');\n  Buffer::OwnedImpl buffer1(response1);\n  EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());\n  stream1->encodeData(buffer1, false);\n\n  // Make connection congestion control blocked again.\n  EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false));\n  // Buffering the trailers will cause connection to reach high watermark.\n  EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark)\n      .WillOnce(Invoke(\n          [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); }));\n  Http::TestResponseTrailerMapImpl response_trailers{{\"trailer-key\", \"trailer-value\"}};\n  stream1->encodeTrailers(response_trailers);\n\n  EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark)\n      .WillOnce(Invoke(\n          [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); }));\n  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::LocalReset, _));\n  stream1->resetStream(Http::StreamResetReason::LocalReset);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc",
    "content": "#include <string>\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/test_tools/quic_connection_peer.h\"\n#include \"quiche/quic/test_tools/quic_session_peer.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"common/event/libevent_scheduler.h\"\n#include \"common/http/headers.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection_helper.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_connection.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_session.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_server_stream.h\"\n\n#include \"test/extensions/quic_listeners/quiche/test_utils.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/http/stream_decoder.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicServerStreamTest : public testing::TestWithParam<bool> {\npublic:\n  EnvoyQuicServerStreamTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        connection_helper_(*dispatcher_),\n        alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() {\n          SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam());\n          SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam());\n          return quic::CurrentSupportedVersions()[0];\n        }()),\n        listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()),\n                                            POOL_GAUGE(listener_config_.listenerScope()),\n                                            POOL_HISTOGRAM(listener_config_.listenerScope()))}),\n        quic_connection_(quic::test::TestConnectionId(),\n                         quic::QuicSocketAddress(quic::QuicIpAddress::Any6(), 12345),\n                         connection_helper_, alarm_factory_, &writer_,\n                         /*owns_writer=*/false, {quic_version_}, *listener_config_.socket_),\n        quic_session_(quic_config_, {quic_version_}, &quic_connection_, *dispatcher_,\n                      quic_config_.GetInitialStreamFlowControlWindowToSend() * 2),\n        stream_id_(VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u),\n        quic_stream_(new EnvoyQuicServerStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)),\n        response_headers_{{\":status\", \"200\"}, {\"response-key\", \"response-value\"}},\n        response_trailers_{{\"trailer-key\", \"trailer-value\"}} {\n    quic_stream_->setRequestDecoder(stream_decoder_);\n    quic_stream_->addCallbacks(stream_callbacks_);\n    quic::test::QuicConnectionPeer::SetAddressValidated(&quic_connection_);\n    quic_session_.ActivateStream(std::unique_ptr<EnvoyQuicServerStream>(quic_stream_));\n    EXPECT_CALL(quic_session_, ShouldYield(_)).WillRepeatedly(testing::Return(false));\n    EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n        .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset,\n                                  quic::StreamSendingState state, bool,\n                                  quiche::QuicheOptional<quic::EncryptionLevel>) {\n          return quic::QuicConsumedData{write_length, state != quic::NO_FIN};\n        }));\n    EXPECT_CALL(writer_, WritePacket(_, _, _, _, _))\n        .WillRepeatedly(Invoke([](const char*, size_t buf_len, const quic::QuicIpAddress&,\n                                  const quic::QuicSocketAddress&, quic::PerPacketOptions*) {\n          return quic::WriteResult{quic::WRITE_STATUS_OK, static_cast<int>(buf_len)};\n        }));\n  }\n\n  void SetUp() override {\n    quic_session_.Initialize();\n    setQuicConfigWithDefaultValues(quic_session_.config());\n    quic_session_.OnConfigNegotiated();\n    request_headers_.OnHeaderBlockStart();\n    request_headers_.OnHeader(\":authority\", host_);\n    request_headers_.OnHeader(\":method\", \"POST\");\n    request_headers_.OnHeader(\":path\", \"/\");\n    request_headers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0,\n                                      /*compressed_header_bytes=*/0);\n\n    trailers_.OnHeaderBlockStart();\n    trailers_.OnHeader(\"key1\", \"value1\");\n    if (!quic::VersionUsesHttp3(quic_version_.transport_version)) {\n      // \":final-offset\" is required and stripped off by quic.\n      trailers_.OnHeader(\":final-offset\", absl::StrCat(\"\", request_body_.length()));\n    }\n    trailers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);\n  }\n\n  void TearDown() override {\n    if (quic_connection_.connected()) {\n      quic_session_.close(Network::ConnectionCloseType::NoFlush);\n    }\n  }\n\n  std::string bodyToStreamPayload(const std::string& body) {\n    std::string data = body;\n    if (quic::VersionUsesHttp3(quic_version_.transport_version)) {\n      std::unique_ptr<char[]> data_buffer;\n      quic::QuicByteCount data_frame_header_length =\n          quic::HttpEncoder::SerializeDataFrameHeader(body.length(), &data_buffer);\n      quiche::QuicheStringPiece data_frame_header(data_buffer.get(), data_frame_header_length);\n      data = absl::StrCat(data_frame_header, body);\n    }\n    return data;\n  }\n\n  size_t sendRequest(const std::string& payload, bool fin, size_t decoder_buffer_high_watermark) {\n    EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false))\n        .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) {\n          EXPECT_EQ(host_, headers->getHostValue());\n          EXPECT_EQ(\"/\", headers->getPathValue());\n          EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue());\n        }));\n    quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(),\n                                     request_headers_);\n    EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());\n\n    EXPECT_CALL(stream_decoder_, decodeData(_, _))\n        .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) {\n          EXPECT_EQ(payload, buffer.toString());\n          EXPECT_EQ(fin, finished_reading);\n          if (!finished_reading && buffer.length() > decoder_buffer_high_watermark) {\n            quic_stream_->readDisable(true);\n          }\n        }));\n    std::string data = bodyToStreamPayload(payload);\n    quic::QuicStreamFrame frame(stream_id_, fin, 0, data);\n    quic_stream_->OnStreamFrame(frame);\n    return data.length();\n  }\n\nprotected:\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  EnvoyQuicConnectionHelper connection_helper_;\n  EnvoyQuicAlarmFactory alarm_factory_;\n  testing::NiceMock<quic::test::MockPacketWriter> writer_;\n  quic::ParsedQuicVersion quic_version_;\n  quic::QuicConfig quic_config_;\n  testing::NiceMock<Network::MockListenerConfig> listener_config_;\n  Server::ListenerStats listener_stats_;\n  EnvoyQuicServerConnection quic_connection_;\n  MockEnvoyQuicSession quic_session_;\n  quic::QuicStreamId stream_id_;\n  EnvoyQuicServerStream* quic_stream_;\n  Http::MockRequestDecoder stream_decoder_;\n  Http::MockStreamCallbacks stream_callbacks_;\n  quic::QuicHeaderList request_headers_;\n  Http::TestResponseHeaderMapImpl response_headers_;\n  Http::TestResponseTrailerMapImpl response_trailers_;\n  quic::QuicHeaderList trailers_;\n  std::string host_{\"www.abc.com\"};\n  std::string request_body_{\"Hello world\"};\n};\n\nINSTANTIATE_TEST_SUITE_P(EnvoyQuicServerStreamTests, EnvoyQuicServerStreamTest,\n                         testing::ValuesIn({true, false}));\n\nTEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) {\n  quic::QuicHeaderList request_headers;\n  request_headers.OnHeaderBlockStart();\n  request_headers.OnHeader(\":authority\", host_);\n  request_headers.OnHeader(\":method\", \"GET\");\n  request_headers.OnHeader(\":path\", \"/\");\n  request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0,\n                                   /*compressed_header_bytes=*/0);\n\n  EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/true))\n      .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) {\n        EXPECT_EQ(host_, headers->getHostValue());\n        EXPECT_EQ(\"/\", headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Get, headers->getMethodValue());\n      }));\n  quic_stream_->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(),\n                                   request_headers);\n  EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());\n  quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/true);\n}\n\nTEST_P(EnvoyQuicServerStreamTest, PostRequestAndResponse) {\n  EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions());\n  sendRequest(request_body_, true, request_body_.size() * 2);\n  quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false);\n  quic_stream_->encodeTrailers(response_trailers_);\n}\n\nTEST_P(EnvoyQuicServerStreamTest, DecodeHeadersBodyAndTrailers) {\n  sendRequest(request_body_, false, request_body_.size() * 2);\n  EXPECT_CALL(stream_decoder_, decodeTrailers_(_))\n      .WillOnce(Invoke([](const Http::RequestTrailerMapPtr& headers) {\n        Http::LowerCaseString key1(\"key1\");\n        Http::LowerCaseString key2(\":final-offset\");\n        EXPECT_EQ(\"value1\", headers->get(key1)->value().getStringView());\n        EXPECT_EQ(nullptr, headers->get(key2));\n      }));\n  quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_);\n  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n}\n\nTEST_P(EnvoyQuicServerStreamTest, OutOfOrderTrailers) {\n  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n  if (quic::VersionUsesHttp3(quic_version_.transport_version)) {\n    return;\n  }\n  EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false))\n      .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) {\n        EXPECT_EQ(host_, headers->getHostValue());\n        EXPECT_EQ(\"/\", headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue());\n      }));\n  quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(),\n                                   request_headers_);\n  EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());\n\n  // Trailer should be delivered to HCM later after body arrives.\n  quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_);\n\n  std::string data = bodyToStreamPayload(request_body_);\n  quic::QuicStreamFrame frame(stream_id_, false, 0, data);\n  EXPECT_CALL(stream_decoder_, decodeData(_, _))\n      .WillOnce(Invoke([this](Buffer::Instance& buffer, bool finished_reading) {\n        EXPECT_EQ(request_body_, buffer.toString());\n        EXPECT_FALSE(finished_reading);\n      }));\n\n  EXPECT_CALL(stream_decoder_, decodeTrailers_(_))\n      .WillOnce(Invoke([](const Http::RequestTrailerMapPtr& headers) {\n        Http::LowerCaseString key1(\"key1\");\n        Http::LowerCaseString key2(\":final-offset\");\n        EXPECT_EQ(\"value1\", headers->get(key1)->value().getStringView());\n        EXPECT_EQ(nullptr, headers->get(key2));\n      }));\n  quic_stream_->OnStreamFrame(frame);\n}\n\nTEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) {\n  std::string large_request(1024, 'a');\n  // Sending such large request will cause read to be disabled.\n  size_t payload_offset = sendRequest(large_request, false, 512);\n  EXPECT_FALSE(quic_stream_->HasBytesToRead());\n  // Disable reading one more time.\n  quic_stream_->readDisable(true);\n  std::string second_part_request = bodyToStreamPayload(\"bbb\");\n  // Receiving more data shouldn't push the receiving pipe line as the stream\n  // should have been marked blocked.\n  quic::QuicStreamFrame frame(stream_id_, false, payload_offset, second_part_request);\n  EXPECT_CALL(stream_decoder_, decodeData(_, _)).Times(0);\n  quic_stream_->OnStreamFrame(frame);\n\n  // Re-enable reading just once shouldn't unblock stream.\n  quic_stream_->readDisable(false);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  // This data frame should also be buffered.\n  std::string last_part_request = bodyToStreamPayload(\"ccc\");\n  quic::QuicStreamFrame frame2(stream_id_, true, payload_offset + second_part_request.length(),\n                               last_part_request);\n  quic_stream_->OnStreamFrame(frame2);\n\n  // Unblock stream now. The remaining data in the receiving buffer should be\n  // pushed to upstream.\n  EXPECT_CALL(stream_decoder_, decodeData(_, _))\n      .WillOnce(Invoke([](Buffer::Instance& buffer, bool finished_reading) {\n        std::string rest_request = \"bbbccc\";\n        EXPECT_EQ(rest_request.size(), buffer.length());\n        EXPECT_EQ(rest_request, buffer.toString());\n        EXPECT_TRUE(finished_reading);\n      }));\n  quic_stream_->readDisable(false);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n\n  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n}\n\n// Tests that ReadDisable() doesn't cause re-entry of OnBodyAvailable().\nTEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) {\n  EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false))\n      .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) {\n        EXPECT_EQ(host_, headers->getHostValue());\n        EXPECT_EQ(\"/\", headers->getPathValue());\n        EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue());\n      }));\n  quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(),\n                                   request_headers_);\n  EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());\n\n  std::string payload(1024, 'a');\n  EXPECT_CALL(stream_decoder_, decodeData(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) {\n        EXPECT_EQ(payload, buffer.toString());\n        EXPECT_FALSE(finished_reading);\n        quic_stream_->readDisable(true);\n        // Re-enable reading should not trigger another decodeData.\n        quic_stream_->readDisable(false);\n      }));\n  std::string data = bodyToStreamPayload(payload);\n  quic::QuicStreamFrame frame(stream_id_, false, 0, data);\n  quic_stream_->OnStreamFrame(frame);\n\n  std::string last_part_request = bodyToStreamPayload(\"bbb\");\n  quic::QuicStreamFrame frame2(stream_id_, true, data.length(), last_part_request);\n  EXPECT_CALL(stream_decoder_, decodeData(_, _))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) {\n        EXPECT_EQ(\"bbb\", buffer.toString());\n        EXPECT_TRUE(finished_reading);\n      }));\n\n  quic_stream_->OnStreamFrame(frame2);\n  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n}\n\n// Tests that the stream with a send buffer whose high limit is 16k and low\n// limit is 8k sends over 32kB response.\nTEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) {\n  sendRequest(request_body_, true, request_body_.size() * 2);\n\n  // Bump connection flow control window large enough not to cause connection\n  // level flow control blocked.\n  quic::QuicWindowUpdateFrame window_update(\n      quic::kInvalidControlFrameId,\n      quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024);\n  quic_session_.OnWindowUpdateFrame(window_update);\n\n  // 32KB + 2 byte. The initial stream flow control window is 16k.\n  response_headers_.addCopy(\":content-length\", \"32770\");\n  quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false);\n\n  // Encode 32kB response body. first 16KB should be written out right away. The\n  // rest should be buffered. The high watermark is 16KB, so this call should\n  // make the send buffer reach its high watermark.\n  std::string response(32 * 1024 + 1, 'a');\n  Buffer::OwnedImpl buffer(response);\n  EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark());\n  quic_stream_->encodeData(buffer, false);\n\n  EXPECT_EQ(0u, buffer.length());\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  // Receive a WINDOW_UPDATE frame not large enough to drain half of the send\n  // buffer.\n  quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             16 * 1024 + 8 * 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update1);\n  EXPECT_FALSE(quic_stream_->IsFlowControlBlocked());\n  quic_session_.OnCanWrite();\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  // Receive another WINDOW_UPDATE frame to drain the send buffer till below low\n  // watermark.\n  quic::QuicWindowUpdateFrame window_update2(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             16 * 1024 + 8 * 1024 + 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update2);\n  EXPECT_FALSE(quic_stream_->IsFlowControlBlocked());\n  EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([this]() {\n    std::string rest_response(1, 'a');\n    Buffer::OwnedImpl buffer(rest_response);\n    quic_stream_->encodeData(buffer, true);\n  }));\n  quic_session_.OnCanWrite();\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  quic::QuicWindowUpdateFrame window_update3(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             32 * 1024 + 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update3);\n  quic_session_.OnCanWrite();\n\n  EXPECT_TRUE(quic_stream_->local_end_stream_);\n  EXPECT_TRUE(quic_stream_->write_side_closed());\n}\n\nTEST_P(EnvoyQuicServerStreamTest, HeadersContributeToWatermarkIquic) {\n  if (!quic::VersionUsesHttp3(quic_version_.transport_version)) {\n    EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n    return;\n  }\n\n  sendRequest(request_body_, true, request_body_.size() * 2);\n\n  // Bump connection flow control window large enough not to cause connection level flow control\n  // blocked\n  quic::QuicWindowUpdateFrame window_update(\n      quic::kInvalidControlFrameId,\n      quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024);\n  quic_session_.OnWindowUpdateFrame(window_update);\n\n  // Make the stream blocked by congestion control.\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillOnce(Invoke([](quic::QuicStreamId, size_t /*write_length*/, quic::QuicStreamOffset,\n                          quic::StreamSendingState state, bool,\n                          quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{0u, state != quic::NO_FIN};\n      }));\n  quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false);\n\n  // Encode 16kB -10 bytes request body. Because the high watermark is 16KB, with previously\n  // buffered headers, this call should make the send buffers reach their high watermark.\n  std::string response(16 * 1024 - 10, 'a');\n  Buffer::OwnedImpl buffer(response);\n  EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark());\n  quic_stream_->encodeData(buffer, false);\n  EXPECT_EQ(0u, buffer.length());\n\n  // Unblock writing now, and this will write out 16kB data and cause stream to\n  // be blocked by the flow control limit.\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset,\n                          quic::StreamSendingState state, bool,\n                          quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{write_length, state != quic::NO_FIN};\n      }));\n  EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark());\n  quic_session_.OnCanWrite();\n  EXPECT_TRUE(quic_stream_->IsFlowControlBlocked());\n\n  // Update flow control window to write all the buffered data.\n  quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(),\n                                             32 * 1024);\n  quic_stream_->OnWindowUpdateFrame(window_update1);\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset,\n                          quic::StreamSendingState state, bool,\n                          quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{write_length, state != quic::NO_FIN};\n      }));\n  quic_session_.OnCanWrite();\n  // No data should be buffered at this point.\n\n  EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _))\n      .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t, quic::QuicStreamOffset,\n                                quic::StreamSendingState state, bool,\n                                quiche::QuicheOptional<quic::EncryptionLevel>) {\n        return quic::QuicConsumedData{0u, state != quic::NO_FIN};\n      }));\n  // Send more data. If watermark bytes counting were not cleared in previous\n  // OnCanWrite, this write would have caused the stream to exceed its high watermark.\n  std::string response1(16 * 1024 - 3, 'a');\n  Buffer::OwnedImpl buffer1(response1);\n  quic_stream_->encodeData(buffer1, false);\n  // Buffering more trailers will cause stream to reach high watermark, but\n  // because trailers closes the stream, no callback should be triggered.\n  quic_stream_->encodeTrailers(response_trailers_);\n\n  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_simulated_watermark_buffer_test.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_simulated_watermark_buffer.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicSimulatedWatermarkBufferTest : public ::testing::Test,\n                                              protected Logger::Loggable<Logger::Id::testing> {\npublic:\n  EnvoyQuicSimulatedWatermarkBufferTest()\n      : simulated_watermark_buffer_(\n            low_watermark_, high_watermark_, [this]() { onBelowLowWatermark(); },\n            [this]() { onAboveHighWatermark(); }, ENVOY_LOGGER()) {}\n\n  void onAboveHighWatermark() { ++above_high_watermark_; }\n\n  void onBelowLowWatermark() { ++below_low_watermark_; }\n\nprotected:\n  size_t above_high_watermark_{0};\n  size_t below_low_watermark_{0};\n  uint32_t high_watermark_{100};\n  uint32_t low_watermark_{60};\n  EnvoyQuicSimulatedWatermarkBuffer simulated_watermark_buffer_;\n};\n\nTEST_F(EnvoyQuicSimulatedWatermarkBufferTest, InitialState) {\n  EXPECT_TRUE(simulated_watermark_buffer_.isBelowLowWatermark());\n  EXPECT_FALSE(simulated_watermark_buffer_.isAboveHighWatermark());\n  EXPECT_EQ(high_watermark_, simulated_watermark_buffer_.highWatermark());\n}\n\nTEST_F(EnvoyQuicSimulatedWatermarkBufferTest, GoAboveHighWatermarkAndComeDown) {\n  simulated_watermark_buffer_.checkHighWatermark(low_watermark_ + 1);\n  EXPECT_EQ(0U, above_high_watermark_);\n  // Even though the buffered data is above low watermark, the buffer is still regarded\n  // as below watermark because it didn't reach high watermark.\n  EXPECT_TRUE(simulated_watermark_buffer_.isBelowLowWatermark());\n  simulated_watermark_buffer_.checkLowWatermark(low_watermark_ - 1);\n  // Going down below low watermark shouldn't trigger callback as it never\n  // reached high watermark.\n  EXPECT_EQ(0U, below_low_watermark_);\n\n  simulated_watermark_buffer_.checkHighWatermark(high_watermark_ + 1);\n  EXPECT_EQ(1U, above_high_watermark_);\n  EXPECT_TRUE(simulated_watermark_buffer_.isAboveHighWatermark());\n  EXPECT_FALSE(simulated_watermark_buffer_.isBelowLowWatermark());\n\n  simulated_watermark_buffer_.checkHighWatermark(high_watermark_ + 10);\n  EXPECT_EQ(1U, above_high_watermark_);\n\n  simulated_watermark_buffer_.checkLowWatermark(low_watermark_);\n  EXPECT_EQ(0U, below_low_watermark_);\n\n  simulated_watermark_buffer_.checkHighWatermark(high_watermark_ + 10);\n  // Crossing high watermark continuously shouldn't trigger callback.\n  EXPECT_EQ(1U, above_high_watermark_);\n\n  // Crossing low watermark after coming down from high watermark should trigger\n  // callback and change status.\n  simulated_watermark_buffer_.checkLowWatermark(low_watermark_ - 1);\n  EXPECT_EQ(1U, below_low_watermark_);\n  EXPECT_TRUE(simulated_watermark_buffer_.isBelowLowWatermark());\n  EXPECT_FALSE(simulated_watermark_buffer_.isAboveHighWatermark());\n}\n\nTEST_F(EnvoyQuicSimulatedWatermarkBufferTest, NoWatermarkSpecified) {\n  EnvoyQuicSimulatedWatermarkBuffer buffer(\n      0, 0, [this]() { onBelowLowWatermark(); }, [this]() { onAboveHighWatermark(); },\n      ENVOY_LOGGER());\n  buffer.checkHighWatermark(10);\n  EXPECT_EQ(0U, above_high_watermark_);\n\n  simulated_watermark_buffer_.checkLowWatermark(0);\n  EXPECT_EQ(0U, below_low_watermark_);\n  EXPECT_TRUE(simulated_watermark_buffer_.isBelowLowWatermark());\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Quic {\n\nTEST(EnvoyQuicUtilsTest, ConversionBetweenQuicAddressAndEnvoyAddress) {\n  // Mock out socket() system call to test both V4 and V6 address conversion.\n  testing::NiceMock<Envoy::Api::MockOsSysCalls> os_sys_calls;\n  TestThreadsafeSingletonInjector<Envoy::Api::OsSysCallsImpl> os_calls{&os_sys_calls};\n  ON_CALL(os_sys_calls, socket(_, _, _)).WillByDefault(Return(Api::SysCallSocketResult{1, 0}));\n  ON_CALL(os_sys_calls, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));\n\n  quic::QuicSocketAddress quic_uninitialized_addr;\n  EXPECT_EQ(nullptr, quicAddressToEnvoyAddressInstance(quic_uninitialized_addr));\n\n  for (const std::string& ip_str : {\"fd00:0:0:1::1\", \"1.2.3.4\"}) {\n    quic::QuicIpAddress quic_ip;\n    quic_ip.FromString(ip_str);\n    quic::QuicSocketAddress quic_addr(quic_ip, 12345);\n    Network::Address::InstanceConstSharedPtr envoy_addr =\n        quicAddressToEnvoyAddressInstance(quic_addr);\n    EXPECT_EQ(quic_addr.ToString(), envoy_addr->asStringView());\n    EXPECT_EQ(quic_addr, envoyIpAddressToQuicSocketAddress(envoy_addr->ip()));\n  }\n}\n\nTEST(EnvoyQuicUtilsTest, HeadersConversion) {\n  spdy::SpdyHeaderBlock headers_block;\n  headers_block[\":authority\"] = \"www.google.com\";\n  headers_block[\":path\"] = \"/index.hml\";\n  headers_block[\":scheme\"] = \"https\";\n  auto envoy_headers = spdyHeaderBlockToEnvoyHeaders<Http::RequestHeaderMapImpl>(headers_block);\n  EXPECT_EQ(headers_block.size(), envoy_headers->size());\n  EXPECT_EQ(\"www.google.com\", envoy_headers->getHostValue());\n  EXPECT_EQ(\"/index.hml\", envoy_headers->getPathValue());\n  EXPECT_EQ(\"https\", envoy_headers->getSchemeValue());\n\n  quic::QuicHeaderList quic_headers = quic::test::AsHeaderList(headers_block);\n  auto envoy_headers2 = quicHeadersToEnvoyHeaders<Http::RequestHeaderMapImpl>(quic_headers);\n  EXPECT_EQ(*envoy_headers, *envoy_headers2);\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc",
    "content": "#include <sys/types.h>\n\n#include <memory>\n#include <string>\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/io_socket_error_impl.h\"\n#include \"common/network/udp_packet_writer_handler_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_packet_writer.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass EnvoyQuicWriterTest : public ::testing::Test {\npublic:\n  EnvoyQuicWriterTest()\n      : envoy_quic_writer_(std::make_unique<Network::UdpDefaultWriter>(socket_.ioHandle())) {\n    self_address_.FromString(\"::\");\n    quic::QuicIpAddress peer_ip;\n    peer_ip.FromString(\"::1\");\n    peer_address_ = quic::QuicSocketAddress(peer_ip, /*port=*/123);\n    ON_CALL(os_sys_calls_, socket(_, _, _)).WillByDefault(Return(Api::SysCallSocketResult{3, 0}));\n    ON_CALL(os_sys_calls_, close(3)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));\n  }\n\n  void verifySendData(const std::string& content, const msghdr* message) {\n    EXPECT_EQ(peer_address_.ToString(), Network::Address::addressFromSockAddr(\n                                            *reinterpret_cast<sockaddr_storage*>(message->msg_name),\n                                            message->msg_namelen, /*v6only=*/false)\n                                            ->asString());\n    cmsghdr* const cmsg = CMSG_FIRSTHDR(message);\n    auto pktinfo = reinterpret_cast<in6_pktinfo*>(CMSG_DATA(cmsg));\n    EXPECT_EQ(0, memcmp(self_address_.GetIPv6().s6_addr, pktinfo->ipi6_addr.s6_addr,\n                        sizeof(pktinfo->ipi6_addr.s6_addr)));\n    EXPECT_EQ(1, message->msg_iovlen);\n    iovec iov = message->msg_iov[0];\n    EXPECT_EQ(content, std::string(reinterpret_cast<char*>(iov.iov_base), iov.iov_len));\n  }\n\nprotected:\n  testing::NiceMock<Api::MockOsSysCalls> os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{&os_sys_calls_};\n  testing::NiceMock<Network::MockListenSocket> socket_;\n  quic::QuicIpAddress self_address_;\n  quic::QuicSocketAddress peer_address_;\n  EnvoyQuicPacketWriter envoy_quic_writer_;\n};\n\nTEST_F(EnvoyQuicWriterTest, AssertOnNonNullPacketOption) {\n  std::string str(\"Hello World!\");\n  EXPECT_DEBUG_DEATH(envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_,\n                                                    peer_address_,\n                                                    reinterpret_cast<quic::PerPacketOptions*>(0x1)),\n                     \"\");\n}\n\nTEST_F(EnvoyQuicWriterTest, SendSuccessfully) {\n  std::string str(\"Hello World!\");\n\n  EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _))\n      .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) {\n        verifySendData(str, message);\n        return Api::SysCallSizeResult{static_cast<ssize_t>(str.length()), 0};\n      }));\n  quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_,\n                                                            peer_address_, nullptr);\n  EXPECT_EQ(quic::WRITE_STATUS_OK, result.status);\n  EXPECT_EQ(str.length(), result.bytes_written);\n  EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked());\n}\n\nTEST_F(EnvoyQuicWriterTest, SendBlocked) {\n  std::string str(\"Hello World!\");\n  EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _))\n      .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) {\n        verifySendData(str, message);\n        return Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN};\n      }));\n  quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_,\n                                                            peer_address_, nullptr);\n  EXPECT_EQ(quic::WRITE_STATUS_BLOCKED, result.status);\n  EXPECT_EQ(static_cast<int>(Api::IoError::IoErrorCode::Again), result.error_code);\n  EXPECT_TRUE(envoy_quic_writer_.IsWriteBlocked());\n  // Writing while blocked is not allowed.\n#ifdef NDEBUG\n  EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _))\n      .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) {\n        verifySendData(str, message);\n        return Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN};\n      }));\n#endif\n  EXPECT_DEBUG_DEATH(envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_,\n                                                    peer_address_, nullptr),\n                     \"\");\n  envoy_quic_writer_.SetWritable();\n  EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked());\n}\n\nTEST_F(EnvoyQuicWriterTest, SendFailure) {\n  std::string str(\"Hello World!\");\n  EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _))\n      .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) {\n        verifySendData(str, message);\n        return Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP};\n      }));\n  quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_,\n                                                            peer_address_, nullptr);\n  EXPECT_EQ(quic::WRITE_STATUS_ERROR, result.status);\n  EXPECT_EQ(static_cast<int>(Api::IoError::IoErrorCode::NoSupport), result.error_code);\n  EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked());\n}\n\nTEST_F(EnvoyQuicWriterTest, SendFailureMessageTooBig) {\n  std::string str(\"Hello World!\");\n  EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _))\n      .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) {\n        verifySendData(str, message);\n        return Api::SysCallSizeResult{-1, SOCKET_ERROR_MSG_SIZE};\n      }));\n  quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_,\n                                                            peer_address_, nullptr);\n  // Currently MessageSize should be propagated through error_code. This test\n  // would fail if QUICHE changes to propagate through status in the future.\n  EXPECT_EQ(quic::WRITE_STATUS_ERROR, result.status);\n  EXPECT_EQ(static_cast<int>(Api::IoError::IoErrorCode::MessageTooBig), result.error_code);\n  EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked());\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/integration/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"quic_http_integration_test\",\n    size = \"medium\",\n    srcs = [\"quic_http_integration_test.cc\"],\n    data = [\"//test/config/integration/certs\"],\n    tags = [\n        \"fails_on_windows\",\n        \"nofips\",\n    ],\n    deps = [\n        \"//source/extensions/filters/http/dynamo:config\",\n        \"//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib\",\n        \"//source/extensions/quic_listeners/quiche:codec_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_client_connection_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_client_session_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib\",\n        \"//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib\",\n        \"//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib\",\n        \"//source/extensions/resource_monitors/injected_resource:config\",\n        \"//test/extensions/quic_listeners/quiche:quic_test_utils_for_envoy_lib\",\n        \"//test/extensions/quic_listeners/quiche:test_utils_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/overload/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc",
    "content": "#include <openssl/x509_vfy.h>\n\n#include <cstddef>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/overload/v3/overload.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/ssl_utility.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/http/quic_client_push_promise_index.h\"\n#include \"quiche/quic/core/quic_utils.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_session.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_client_connection.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_connection_helper.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_packet_writer.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"extensions/quic_listeners/quiche/quic_transport_socket_factory.h\"\n#include \"test/extensions/quic_listeners/quiche/test_utils.h\"\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nclass CodecClientCallbacksForTest : public Http::CodecClientCallbacks {\npublic:\n  void onStreamDestroy() override {}\n\n  void onStreamReset(Http::StreamResetReason reason) override {\n    last_stream_reset_reason_ = reason;\n  }\n\n  Http::StreamResetReason last_stream_reset_reason_{Http::StreamResetReason::LocalReset};\n};\n\nvoid updateResource(AtomicFileUpdater& updater, double pressure) {\n  updater.update(absl::StrCat(pressure));\n}\n\nstd::unique_ptr<QuicClientTransportSocketFactory>\ncreateQuicClientTransportSocketFactory(const Ssl::ClientSslTransportOptions& options, Api::Api& api,\n                                       const std::string& san_to_match) {\n  std::string yaml_plain = R\"EOF(\n  common_tls_context:\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/cacert.pem\"\n)EOF\";\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml_plain), tls_context);\n  auto* common_context = tls_context.mutable_common_tls_context();\n\n  if (options.alpn_) {\n    common_context->add_alpn_protocols(\"h3\");\n  }\n  if (options.san_) {\n    common_context->mutable_validation_context()->add_match_subject_alt_names()->set_exact(\n        san_to_match);\n  }\n  for (const std::string& cipher_suite : options.cipher_suites_) {\n    common_context->mutable_tls_params()->add_cipher_suites(cipher_suite);\n  }\n  if (!options.sni_.empty()) {\n    tls_context.set_sni(options.sni_);\n  }\n\n  common_context->mutable_tls_params()->set_tls_minimum_protocol_version(options.tls_version_);\n  common_context->mutable_tls_params()->set_tls_maximum_protocol_version(options.tls_version_);\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> mock_factory_ctx;\n  ON_CALL(mock_factory_ctx, api()).WillByDefault(testing::ReturnRef(api));\n  auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ClientContextConfigImpl>(\n      tls_context, options.sigalgs_, mock_factory_ctx);\n  return std::make_unique<QuicClientTransportSocketFactory>(std::move(cfg));\n}\n\nclass QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVersionTest {\npublic:\n  QuicHttpIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam().first,\n                            ConfigHelper::quicHttpProxyConfig()),\n        supported_versions_([]() {\n          if (GetParam().second == QuicVersionType::GquicQuicCrypto) {\n            return quic::CurrentSupportedVersionsWithQuicCrypto();\n          }\n          bool use_http3 = GetParam().second == QuicVersionType::Iquic;\n          SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3);\n          SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3);\n          return quic::CurrentSupportedVersions();\n        }()),\n        conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()),\n        injected_resource_filename_1_(TestEnvironment::temporaryPath(\"injected_resource_1\")),\n        injected_resource_filename_2_(TestEnvironment::temporaryPath(\"injected_resource_2\")),\n        file_updater_1_(injected_resource_filename_1_),\n        file_updater_2_(injected_resource_filename_2_) {}\n\n  ~QuicHttpIntegrationTest() override { cleanupUpstreamAndDownstream(); }\n\n  Network::ClientConnectionPtr makeClientConnectionWithOptions(\n      uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) override {\n    // Setting socket options is not supported.\n    ASSERT(!options);\n    server_addr_ = Network::Utility::resolveUrl(\n        fmt::format(\"udp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port));\n    Network::Address::InstanceConstSharedPtr local_addr =\n        Network::Test::getCanonicalLoopbackAddress(version_);\n    // Initiate a QUIC connection with the highest supported version. If not\n    // supported by server, this connection will fail.\n    // TODO(danzh) Implement retry upon version mismatch and modify test frame work to specify a\n    // different version set on server side to test that.\n    auto connection = std::make_unique<EnvoyQuicClientConnection>(\n        getNextConnectionId(), server_addr_, conn_helper_, alarm_factory_,\n        quic::ParsedQuicVersionVector{supported_versions_[0]}, local_addr, *dispatcher_, nullptr);\n    quic_connection_ = connection.get();\n    auto session = std::make_unique<EnvoyQuicClientSession>(\n        quic_config_, supported_versions_, std::move(connection), server_id_, crypto_config_.get(),\n        &push_promise_index_, *dispatcher_, 0);\n    session->Initialize();\n    return session;\n  }\n\n  // This call may fail because of INVALID_VERSION, because QUIC connection doesn't support\n  // in-connection version negotiation.\n  // TODO(#8479) Propagate INVALID_VERSION error to caller and let caller to use server advertised\n  // version list to create a new connection with mutually supported version and make client codec\n  // again.\n  IntegrationCodecClientPtr makeRawHttpConnection(\n      Network::ClientConnectionPtr&& conn,\n      absl::optional<envoy::config::core::v3::Http2ProtocolOptions> http2_options) override {\n    IntegrationCodecClientPtr codec =\n        HttpIntegrationTest::makeRawHttpConnection(std::move(conn), http2_options);\n    if (codec->disconnected()) {\n      // Connection may get closed during version negotiation or handshake.\n      ENVOY_LOG(error, \"Fail to connect to server with error: {}\",\n                codec->connection()->transportFailureReason());\n    } else {\n      codec->setCodecClientCallbacks(client_codec_callback_);\n    }\n    return codec;\n  }\n\n  quic::QuicConnectionId getNextConnectionId() {\n    if (designated_connection_ids_.empty()) {\n      return quic::QuicUtils::CreateRandomConnectionId();\n    }\n    quic::QuicConnectionId cid = designated_connection_ids_.front();\n    designated_connection_ids_.pop_front();\n    return cid;\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport\n          quic_transport_socket_config;\n      auto tls_context = quic_transport_socket_config.mutable_downstream_tls_context();\n      ConfigHelper::initializeTls(ConfigHelper::ServerSslOptions().setRsaCert(true).setTlsV13(true),\n                                  *tls_context->mutable_common_tls_context());\n      auto* filter_chain =\n          bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);\n      auto* transport_socket = filter_chain->mutable_transport_socket();\n      transport_socket->mutable_typed_config()->PackFrom(quic_transport_socket_config);\n\n      bootstrap.mutable_static_resources()->mutable_listeners(0)->set_reuse_port(set_reuse_port_);\n\n      const std::string overload_config =\n          fmt::format(R\"EOF(\n        refresh_interval:\n          seconds: 0\n          nanos: 1000000\n        resource_monitors:\n          - name: \"envoy.resource_monitors.injected_resource_1\"\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig\n              filename: \"{}\"\n          - name: \"envoy.resource_monitors.injected_resource_2\"\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig\n              filename: \"{}\"\n        actions:\n          - name: \"envoy.overload_actions.stop_accepting_requests\"\n            triggers:\n              - name: \"envoy.resource_monitors.injected_resource_1\"\n                threshold:\n                  value: 0.95\n          - name: \"envoy.overload_actions.stop_accepting_connections\"\n            triggers:\n              - name: \"envoy.resource_monitors.injected_resource_1\"\n                threshold:\n                  value: 0.9\n          - name: \"envoy.overload_actions.disable_http_keepalive\"\n            triggers:\n              - name: \"envoy.resource_monitors.injected_resource_2\"\n                threshold:\n                  value: 0.8\n      )EOF\",\n                      injected_resource_filename_1_, injected_resource_filename_2_);\n      *bootstrap.mutable_overload_manager() =\n          TestUtility::parseYaml<envoy::config::overload::v3::OverloadManager>(overload_config);\n    });\n    config_helper_.addConfigModifier(\n        [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n               hcm) {\n          hcm.mutable_drain_timeout()->clear_seconds();\n          hcm.mutable_drain_timeout()->set_nanos(500 * 1000 * 1000);\n          EXPECT_EQ(hcm.codec_type(), envoy::extensions::filters::network::http_connection_manager::\n                                          v3::HttpConnectionManager::HTTP3);\n        });\n\n    updateResource(file_updater_1_, 0);\n    updateResource(file_updater_2_, 0);\n    HttpIntegrationTest::initialize();\n    registerTestServerPorts({\"http\"});\n    crypto_config_ =\n        std::make_unique<quic::QuicCryptoClientConfig>(std::make_unique<EnvoyQuicProofVerifier>(\n            stats_store_,\n            createQuicClientTransportSocketFactory(\n                Ssl::ClientSslTransportOptions().setAlpn(true).setSan(true), *api_, san_to_match_)\n                ->clientContextConfig(),\n            timeSystem()));\n  }\n\n  void testMultipleQuicConnections() {\n    concurrency_ = 8;\n    set_reuse_port_ = true;\n    initialize();\n    std::vector<IntegrationCodecClientPtr> codec_clients;\n    for (size_t i = 1; i <= concurrency_; ++i) {\n      // The BPF filter and ActiveQuicListener::destination() look at the 1st word of connection id\n      // in the packet header. And currently all QUIC versions support 8 bytes connection id. So\n      // create connections with the first 4 bytes of connection id different from each\n      // other so they should be evenly distributed.\n      designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32));\n      codec_clients.push_back(makeHttpConnection(lookupPort(\"http\")));\n    }\n    constexpr auto timeout_first = std::chrono::seconds(15);\n    constexpr auto timeout_subsequent = std::chrono::milliseconds(10);\n    if (GetParam().first == Network::Address::IpVersion::v4) {\n      test_server_->waitForCounterEq(\"listener.0.0.0.0_0.downstream_cx_total\", 8u, timeout_first);\n    } else {\n      test_server_->waitForCounterEq(\"listener.[__]_0.downstream_cx_total\", 8u, timeout_first);\n    }\n    for (size_t i = 0; i < concurrency_; ++i) {\n      if (GetParam().first == Network::Address::IpVersion::v4) {\n        test_server_->waitForGaugeEq(\n            fmt::format(\"listener.0.0.0.0_0.worker_{}.downstream_cx_active\", i), 1u,\n            timeout_subsequent);\n        test_server_->waitForCounterEq(\n            fmt::format(\"listener.0.0.0.0_0.worker_{}.downstream_cx_total\", i), 1u,\n            timeout_subsequent);\n      } else {\n        test_server_->waitForGaugeEq(\n            fmt::format(\"listener.[__]_0.worker_{}.downstream_cx_active\", i), 1u,\n            timeout_subsequent);\n        test_server_->waitForCounterEq(\n            fmt::format(\"listener.[__]_0.worker_{}.downstream_cx_total\", i), 1u,\n            timeout_subsequent);\n      }\n    }\n    for (size_t i = 0; i < concurrency_; ++i) {\n      codec_clients[i]->close();\n    }\n  }\n\nprotected:\n  quic::QuicConfig quic_config_;\n  quic::QuicServerId server_id_{\"lyft.com\", 443, false};\n  std::string san_to_match_{\"spiffe://lyft.com/backend-team\"};\n  quic::QuicClientPushPromiseIndex push_promise_index_;\n  quic::ParsedQuicVersionVector supported_versions_;\n  std::unique_ptr<quic::QuicCryptoClientConfig> crypto_config_;\n  EnvoyQuicConnectionHelper conn_helper_;\n  EnvoyQuicAlarmFactory alarm_factory_;\n  CodecClientCallbacksForTest client_codec_callback_;\n  Network::Address::InstanceConstSharedPtr server_addr_;\n  EnvoyQuicClientConnection* quic_connection_{nullptr};\n  bool set_reuse_port_{false};\n  const std::string injected_resource_filename_1_;\n  const std::string injected_resource_filename_2_;\n  AtomicFileUpdater file_updater_1_;\n  AtomicFileUpdater file_updater_2_;\n  std::list<quic::QuicConnectionId> designated_connection_ids_;\n};\n\nINSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicHttpIntegrationTest,\n                         testing::ValuesIn(generateTestParam()), testParamsToString);\n\nTEST_P(QuicHttpIntegrationTest, GetRequestAndEmptyResponse) {\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\nTEST_P(QuicHttpIntegrationTest, GetRequestAndResponseWithBody) {\n  initialize();\n  sendRequestAndVerifyResponse(default_request_headers_, /*request_size=*/0,\n                               default_response_headers_, /*response_size=*/1024,\n                               /*backend_index*/ 0);\n}\n\nTEST_P(QuicHttpIntegrationTest, PostRequestAndResponseWithBody) {\n  testRouterRequestAndResponseWithBody(1024, 512, false);\n}\n\nTEST_P(QuicHttpIntegrationTest, PostRequestWithBigHeadersAndResponseWithBody) {\n  testRouterRequestAndResponseWithBody(1024, 512, true);\n}\n\nTEST_P(QuicHttpIntegrationTest, RouterUpstreamDisconnectBeforeRequestcomplete) {\n  testRouterUpstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(QuicHttpIntegrationTest, RouterUpstreamDisconnectBeforeResponseComplete) {\n  testRouterUpstreamDisconnectBeforeResponseComplete();\n  EXPECT_EQ(Http::StreamResetReason::RemoteReset, client_codec_callback_.last_stream_reset_reason_);\n}\n\nTEST_P(QuicHttpIntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {\n  testRouterDownstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(QuicHttpIntegrationTest, RouterDownstreamDisconnectBeforeResponseComplete) {\n  testRouterDownstreamDisconnectBeforeResponseComplete();\n}\n\nTEST_P(QuicHttpIntegrationTest, RouterUpstreamResponseBeforeRequestComplete) {\n  testRouterUpstreamResponseBeforeRequestComplete();\n}\n\nTEST_P(QuicHttpIntegrationTest, Retry) { testRetry(); }\n\nTEST_P(QuicHttpIntegrationTest, UpstreamReadDisabledOnGiantResponseBody) {\n  config_helper_.setBufferLimits(/*upstream_buffer_limit=*/1024, /*downstream_buffer_limit=*/1024);\n  testRouterRequestAndResponseWithBody(/*request_size=*/512, /*response_size=*/1024 * 1024, false);\n}\n\nTEST_P(QuicHttpIntegrationTest, DownstreamReadDisabledOnGiantPost) {\n  config_helper_.setBufferLimits(/*upstream_buffer_limit=*/1024, /*downstream_buffer_limit=*/1024);\n  testRouterRequestAndResponseWithBody(/*request_size=*/1024 * 1024, /*response_size=*/1024, false);\n}\n\n// Tests that a connection idle times out after 1s and starts delayed close.\nTEST_P(QuicHttpIntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) {\n  config_helper_.addFilter(\"{ name: envoy.filters.http.dynamo, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) {\n        // 200ms.\n        hcm.mutable_delayed_close_timeout()->set_nanos(200000000);\n        hcm.mutable_drain_timeout()->set_seconds(1);\n        hcm.mutable_common_http_protocol_options()->mutable_idle_timeout()->set_seconds(1);\n      });\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  codec_client_->sendData(*request_encoder_, 1024 * 65, false);\n\n  response->waitForEndStream();\n  // The delayed close timeout should trigger since client is not closing the connection.\n  EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(5000)));\n  EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value(),\n            1);\n}\n\nTEST_P(QuicHttpIntegrationTest, MultipleQuicConnectionsWithBPF) { testMultipleQuicConnections(); }\n\nTEST_P(QuicHttpIntegrationTest, MultipleQuicConnectionsNoBPF) {\n  config_helper_.addRuntimeOverride(\n      \"envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing\", \"false\");\n\n  testMultipleQuicConnections();\n}\n\nTEST_P(QuicHttpIntegrationTest, ConnectionMigration) {\n  concurrency_ = 2;\n  set_reuse_port_ = true;\n  initialize();\n  uint32_t old_port = lookupPort(\"http\");\n  codec_client_ = makeHttpConnection(old_port);\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  codec_client_->sendData(*request_encoder_, 1024u, false);\n\n  // Change to a new port by switching socket, and connection should still continue.\n  Network::Address::InstanceConstSharedPtr local_addr =\n      Network::Test::getCanonicalLoopbackAddress(version_);\n  quic_connection_->switchConnectionSocket(\n      createConnectionSocket(server_addr_, local_addr, nullptr));\n  EXPECT_NE(old_port, local_addr->ip()->port());\n  // Send the rest data.\n  codec_client_->sendData(*request_encoder_, 1024u, true);\n  waitForNextUpstreamRequest(0, TestUtility::DefaultTimeout);\n  // Send response headers, and end_stream if there is no response body.\n  const Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  size_t response_size{5u};\n  upstream_request_->encodeHeaders(response_headers, false);\n  upstream_request_->encodeData(response_size, true);\n  response->waitForEndStream();\n  verifyResponse(std::move(response), \"200\", response_headers, std::string(response_size, 'a'));\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024u * 2, upstream_request_->bodyLength());\n  cleanupUpstreamAndDownstream();\n}\n\nTEST_P(QuicHttpIntegrationTest, StopAcceptingConnectionsWhenOverloaded) {\n  initialize();\n\n  // Put envoy in overloaded state and check that it doesn't accept the new client connection.\n  updateResource(file_updater_1_, 0.9);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.stop_accepting_connections.active\",\n                               1);\n  codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort(\"http\"))), absl::nullopt);\n  EXPECT_TRUE(codec_client_->disconnected());\n\n  // Reduce load a little to allow the connection to be accepted connection.\n  updateResource(file_updater_1_, 0.8);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.stop_accepting_connections.active\",\n                               0);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest(0);\n  // Send response headers, but hold response body for now.\n  upstream_request_->encodeHeaders(default_response_headers_, /*end_stream=*/false);\n\n  updateResource(file_updater_1_, 0.95);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.stop_accepting_requests.active\", 1);\n  // Existing request should be able to finish.\n  upstream_request_->encodeData(10, true);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  // New request should be rejected.\n  auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  response2->waitForEndStream();\n  EXPECT_EQ(\"503\", response2->headers().getStatusValue());\n  EXPECT_EQ(\"envoy overloaded\", response2->body());\n  codec_client_->close();\n\n  EXPECT_TRUE(makeRawHttpConnection(makeClientConnection((lookupPort(\"http\"))), absl::nullopt)\n                  ->disconnected());\n}\n\nTEST_P(QuicHttpIntegrationTest, NoNewStreamsWhenOverloaded) {\n  initialize();\n  updateResource(file_updater_1_, 0.7);\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n\n  // Send a complete request and start a second.\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest(0);\n\n  // Enable the disable-keepalive overload action. This should send a shutdown notice before\n  // encoding the headers.\n  updateResource(file_updater_2_, 0.9);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.disable_http_keepalive.active\", 1);\n\n  upstream_request_->encodeHeaders(default_response_headers_, /*end_stream=*/false);\n  upstream_request_->encodeData(10, true);\n\n  response2->waitForHeaders();\n  EXPECT_TRUE(codec_client_->waitForDisconnect());\n\n  EXPECT_TRUE(codec_client_->sawGoAway());\n  codec_client_->close();\n}\n\nTEST_P(QuicHttpIntegrationTest, AdminDrainDrainsListeners) {\n  testAdminDrain(Http::CodecClient::Type::HTTP1);\n}\n\nTEST_P(QuicHttpIntegrationTest, CertVerificationFailure) {\n  san_to_match_ = \"www.random_domain.com\";\n  initialize();\n  codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort(\"http\"))), absl::nullopt);\n  EXPECT_FALSE(codec_client_->connected());\n  std::string failure_reason =\n      GetParam().second == QuicVersionType::GquicQuicCrypto\n          ? \"QUIC_PROOF_INVALID with details: Proof invalid: X509_verify_cert: certificate \"\n            \"verification error at depth 0: ok\"\n          : \"QUIC_HANDSHAKE_FAILED with details: TLS handshake failure (ENCRYPTION_HANDSHAKE) 46: \"\n            \"certificate unknown\";\n  EXPECT_EQ(failure_reason, codec_client_->connection()->transportFailureReason());\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"quiche_platform_test\",\n    srcs = [\"quiche_platform_test.cc\"],\n    external_deps = [\"quiche_common_platform\"],\n    deps = [\n        \"@com_googlesource_quiche//:quiche_common_platform\",\n        \"@com_googlesource_quiche//:quiche_common_platform_endian\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http2_platform_test\",\n    srcs = [\"http2_platform_test.cc\"],\n    external_deps = [\"quiche_http2_platform\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche/platform:flags_impl_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@com_googlesource_quiche//:http2_test_tools_random\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"quic_platform_test\",\n    srcs = select({\n        \"//bazel:linux\": [\"quic_platform_test.cc\"],\n        \"//conditions:default\": [],\n    }),\n    copts = select({\n        \"//bazel:windows_x86_64\": [],\n        \"//conditions:default\": [\"-Wno-unused-parameter\"],\n    }),\n    data = [\"//test/extensions/transport_sockets/tls/test_data:certs\"],\n    external_deps = [\"quiche_quic_platform\"],\n    tags = [\"nofips\"],\n    deps = [\n        \":quic_platform_epoll_clock_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/extensions/quic_listeners/quiche/platform:flags_impl_lib\",\n        \"//test/common/buffer:utility_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/extensions/transport_sockets/tls:ssl_test_utils\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@com_googlesource_quiche//:epoll_server_lib\",\n        \"@com_googlesource_quiche//:quic_core_buffer_allocator_lib\",\n        \"@com_googlesource_quiche//:quic_core_error_codes_lib\",\n        \"@com_googlesource_quiche//:quic_core_types_lib\",\n        \"@com_googlesource_quiche//:quic_platform_expect_bug\",\n        \"@com_googlesource_quiche//:quic_platform_mem_slice_span\",\n        \"@com_googlesource_quiche//:quic_platform_mem_slice_storage\",\n        \"@com_googlesource_quiche//:quic_platform_mock_log\",\n        \"@com_googlesource_quiche//:quic_platform_port_utils\",\n        \"@com_googlesource_quiche//:quic_platform_sleep\",\n        \"@com_googlesource_quiche//:quic_platform_system_event_loop\",\n        \"@com_googlesource_quiche//:quic_platform_test\",\n        \"@com_googlesource_quiche//:quic_platform_test_output\",\n        \"@com_googlesource_quiche//:quic_platform_thread\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"spdy_platform_test\",\n    srcs = [\"spdy_platform_test.cc\"],\n    external_deps = [\"quiche_spdy_platform\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche/platform:flags_impl_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@com_googlesource_quiche//:spdy_platform\",\n        \"@com_googlesource_quiche//:spdy_platform_test_helpers\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"epoll_server_platform_impl_lib\",\n    hdrs = [\n        \"epoll_address_test_utils_impl.h\",\n        \"epoll_bug_impl.h\",\n        \"epoll_expect_bug_impl.h\",\n        \"epoll_export_impl.h\",\n        \"epoll_logging_impl.h\",\n        \"epoll_ptr_util_impl.h\",\n        \"epoll_test_impl.h\",\n        \"epoll_thread_impl.h\",\n        \"epoll_time_impl.h\",\n    ],\n    external_deps = [\"abseil_time\"],\n    deps = [\n        \":quic_platform_expect_bug_impl_lib\",\n        \":quic_platform_thread_impl_lib\",\n        \"//include/envoy/network:address_interface\",\n        \"//source/extensions/quic_listeners/quiche/platform:quic_platform_base_impl_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_epoll_clock_lib\",\n    srcs = select({\n        \"//bazel:linux\": [\"quic_epoll_clock.cc\"],\n        \"//conditions:default\": [],\n    }),\n    hdrs = select({\n        \"//bazel:linux\": [\"quic_epoll_clock.h\"],\n        \"//conditions:default\": [],\n    }),\n    tags = [\"nofips\"],\n    deps = [\n        \"@com_googlesource_quiche//:quic_core_clock_lib\",\n        \"@com_googlesource_quiche//:quic_platform\",\n        \"@com_googlesource_quiche//:quic_platform_epoll_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_epoll_impl_lib\",\n    hdrs = [\"quic_epoll_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\"@com_googlesource_quiche//:epoll_server_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_expect_bug_impl_lib\",\n    hdrs = [\"quic_expect_bug_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"@com_googlesource_quiche//:quic_platform_base\",\n        \"@com_googlesource_quiche//:quic_platform_mock_log\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_mock_log_impl_lib\",\n    hdrs = [\"quic_mock_log_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\"@com_googlesource_quiche//:quic_platform_base\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_port_utils_impl_lib\",\n    srcs = [\"quic_port_utils_impl.cc\"],\n    hdrs = [\"quic_port_utils_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/common/network:utility_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_test_mem_slice_vector_impl_lib\",\n    hdrs = [\"quic_test_mem_slice_vector_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"@com_googlesource_quiche//:quic_platform_mem_slice_span\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_sleep_impl_lib\",\n    hdrs = [\"quic_sleep_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\"@com_googlesource_quiche//:quic_core_time_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_system_event_loop_impl_lib\",\n    hdrs = [\"quic_system_event_loop_impl.h\"],\n    tags = [\"nofips\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_thread_impl_lib\",\n    hdrs = [\"quic_thread_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//include/envoy/thread:thread_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//test/test_common:thread_factory_for_test_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_test_impl_lib\",\n    hdrs = [\"quic_test_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\"//source/common/common:assert_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"quic_platform_test_output_impl_lib\",\n    srcs = [\"quic_test_output_impl.cc\"],\n    hdrs = [\"quic_test_output_impl.h\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//test/test_common:file_system_for_test_lib\",\n        \"@com_googlesource_quiche//:quic_platform_base\",\n        \"@com_googlesource_quiche//:quiche_common_platform\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"quiche_common_platform_test_impl_lib\",\n    hdrs = [\"quiche_test_impl.h\"],\n)\n\nenvoy_cc_test_library(\n    name = \"spdy_platform_test_helpers_impl_lib\",\n    hdrs = [\"spdy_test_helpers_impl.h\"],\n    deps = [\n        \":quic_platform_expect_bug_impl_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"envoy_quic_clock_test\",\n    srcs = [\"envoy_quic_clock_test.cc\"],\n    tags = [\"nofips\"],\n    deps = [\n        \"//source/extensions/quic_listeners/quiche/platform:envoy_quic_clock_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/envoy_quic_clock_test.cc",
    "content": "#include <memory>\n\n#include \"extensions/quic_listeners/quiche/platform/envoy_quic_clock.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nTEST(EnvoyQuicClockTest, TestNow) {\n  Event::SimulatedTimeSystemHelper time_system;\n  Api::ApiPtr api = Api::createApiForTest(time_system);\n  Event::DispatcherPtr dispatcher = api->allocateDispatcher(\"test_thread\");\n  EnvoyQuicClock clock(*dispatcher);\n  uint64_t mono_time = std::chrono::duration_cast<std::chrono::microseconds>(\n                           time_system.monotonicTime().time_since_epoch())\n                           .count();\n  uint64_t sys_time = std::chrono::duration_cast<std::chrono::microseconds>(\n                          time_system.systemTime().time_since_epoch())\n                          .count();\n  // Advance time by 1000000us.\n  time_system.advanceTimeWait(std::chrono::microseconds(1000000));\n  EXPECT_EQ(mono_time + 1000000, (clock.Now() - quic::QuicTime::Zero()).ToMicroseconds());\n  EXPECT_EQ(sys_time + 1000000, clock.WallNow().ToUNIXMicroseconds());\n\n  // Advance time by 10us.\n  time_system.advanceTimeWait(std::chrono::microseconds(10));\n  EXPECT_EQ(mono_time + 1000000 + 10, (clock.Now() - quic::QuicTime::Zero()).ToMicroseconds());\n  EXPECT_EQ(sys_time + 1000000 + 10, clock.WallNow().ToUNIXMicroseconds());\n\n  // Advance time by 2ms.\n  time_system.advanceTimeWait(std::chrono::milliseconds(2));\n  EXPECT_EQ(mono_time + 1000000 + 10 + 2 * 1000,\n            (clock.Now() - quic::QuicTime::Zero()).ToMicroseconds());\n  EXPECT_EQ(sys_time + 1000000 + 10 + 2 * 1000, clock.WallNow().ToUNIXMicroseconds());\n}\n\n// Tests that Now() should never go back.\nTEST(EnvoyQuicClockTest, TestMonotonicityWithReadTimeSystem) {\n  Event::TestRealTimeSystem time_system;\n  Api::ApiPtr api = Api::createApiForTest(time_system);\n  Event::DispatcherPtr dispatcher = api->allocateDispatcher(\"test_thread\");\n  EnvoyQuicClock clock(*dispatcher);\n  quic::QuicTime last_now = clock.Now();\n  for (int i = 0; i < 1000; ++i) {\n    quic::QuicTime now = clock.Now();\n    ASSERT_LE(last_now, now);\n    last_now = now;\n  }\n}\n\nTEST(EnvoyQuicClockTest, ApproximateNow) {\n  Event::SimulatedTimeSystemHelper time_system;\n  Api::ApiPtr api = Api::createApiForTest(time_system);\n  Event::DispatcherPtr dispatcher = api->allocateDispatcher(\"test_thread\");\n  EnvoyQuicClock clock(*dispatcher);\n\n  // ApproximateTime() is cached, it not change only because time passes.\n  const int kDeltaMicroseconds = 10;\n  quic::QuicTime approximate_now1 = clock.ApproximateNow();\n  time_system.advanceTimeWait(std::chrono::microseconds(kDeltaMicroseconds));\n  quic::QuicTime approximate_now2 = clock.ApproximateNow();\n  EXPECT_EQ(approximate_now1, approximate_now2);\n\n  // Calling Now() updates ApproximateTime().\n  quic::QuicTime now = clock.Now();\n  approximate_now2 = clock.ApproximateNow();\n  EXPECT_EQ(now, approximate_now2);\n  EXPECT_EQ(now, approximate_now1 + quic::QuicTime::Delta::FromMicroseconds(kDeltaMicroseconds));\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_address_test_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <sys/socket.h>\n\n#include <algorithm>\n\n#include \"envoy/network/address.h\"\n\n#include \"test/test_common/environment.h\"\n\nnamespace epoll_server {\n\nnamespace {\n\nint addressFamilyUnderTestHelper() {\n  std::vector<Envoy::Network::Address::IpVersion> versions =\n      Envoy::TestEnvironment::getIpVersionsForTest();\n  if (std::find(versions.begin(), versions.end(), Envoy::Network::Address::IpVersion::v4) !=\n      versions.end()) {\n    return AF_INET;\n  }\n  if (std::find(versions.begin(), versions.end(), Envoy::Network::Address::IpVersion::v6) !=\n      versions.end()) {\n    return AF_INET6;\n  }\n  return -1;\n}\n\n} // namespace\n\n// Returns the address family to be used for test. Return v4 if the environment\n// supports v4 only or both v4 and v6. Otherwise return v6 or an invalid value.\nint AddressFamilyUnderTestImpl() {\n  static const int version = addressFamilyUnderTestHelper();\n  return version;\n}\n\n} // namespace epoll_server\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_bug_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h\"\n\n#define EPOLL_BUG_IMPL QUIC_BUG_IMPL\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_expect_bug_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"test/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h\"\n\n#define EXPECT_EPOLL_BUG_IMPL EXPECT_QUIC_BUG_IMPL\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_export_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#define EPOLL_EXPORT\n#define EPOLL_EXPORT_PRIVATE\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_logging_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"extensions/quic_listeners/quiche/platform/quic_logging_impl.h\"\n\nnamespace epoll_server {\n\n#define EPOLL_LOG_IMPL(severity) QUICHE_LOG_IMPL(severity)\n#define EPOLL_VLOG_IMPL(verbosity) QUICHE_VLOG_IMPL(verbosity)\n\n#define EPOLL_PLOG_IMPL(severity) QUICHE_PLOG_IMPL(severity)\n\n#define EPOLL_DVLOG_IMPL(verbosity) QUICHE_DVLOG_IMPL(verbosity)\n\n} // namespace epoll_server\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_ptr_util_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <memory>\n\nnamespace epoll_server {\n\ntemplate <typename T, typename... Args> std::unique_ptr<T> EpollMakeUniqueImpl(Args&&... args) {\n  return std::make_unique<T>(std::forward<Args>(args)...);\n}\n\n} // namespace epoll_server\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_test_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing EpollTestImpl = ::testing::Test;\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_thread_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"test/extensions/quic_listeners/quiche/platform/quic_thread_impl.h\"\n\nnamespace epoll_server {\n\nusing EpollThreadImpl = quic::QuicThreadImpl;\n\n} // namespace epoll_server\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/epoll_time_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/time/clock.h\"\n\nnamespace epoll_server {\n\ninline int64_t WallTimeNowInUsecImpl() { return absl::GetCurrentTimeNanos() / 1000; }\n\n} // namespace epoll_server\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <memory>\n#include <string>\n\n#include \"extensions/quic_listeners/quiche/platform/flags_impl.h\"\n\n#include \"test/test_common/logging.h\"\n\n#include \"gtest/gtest.h\"\n#include \"quiche/http2/platform/api/http2_bug_tracker.h\"\n#include \"quiche/http2/platform/api/http2_containers.h\"\n#include \"quiche/http2/platform/api/http2_estimate_memory_usage.h\"\n#include \"quiche/http2/platform/api/http2_flags.h\"\n#include \"quiche/http2/platform/api/http2_logging.h\"\n#include \"quiche/http2/platform/api/http2_macros.h\"\n#include \"quiche/http2/test_tools/http2_random.h\"\n\n// Basic tests to validate functioning of the QUICHE http2 platform\n// implementation. For platform APIs in which the implementation is a simple\n// typedef/passthrough to a std:: or absl:: construct, the tests are kept\n// minimal, and serve primarily to verify the APIs compile and link without\n// issue.\n\nnamespace http2 {\nnamespace {\n\nTEST(Http2PlatformTest, Http2BugTracker) {\n  EXPECT_DEBUG_DEATH(HTTP2_BUG << \"Here is a bug,\", \" bug\");\n  EXPECT_DEBUG_DEATH(HTTP2_BUG_IF(true) << \"There is a bug,\", \" bug\");\n  EXPECT_LOG_NOT_CONTAINS(\"error\", \"\", HTTP2_BUG_IF(false) << \"A feature is not a bug.\");\n\n  EXPECT_EQ(true, FLAGS_http2_always_log_bugs_for_tests);\n}\n\nTEST(Http2PlatformTest, Http2Deque) {\n  http2::Http2Deque<int> deque;\n  deque.push_back(10);\n  EXPECT_EQ(10, deque.back());\n}\n\nTEST(Http2PlatformTest, Http2EstimateMemoryUsage) {\n  std::string s = \"foo\";\n  // Stubbed out to always return 0.\n  EXPECT_EQ(0, http2::Http2EstimateMemoryUsage(s));\n}\n\nTEST(Http2PlatformTest, Http2Log) {\n  // HTTP2_LOG macros are defined to QUIC_LOG macros, which is tested in\n  // QuicPlatformTest. Here we just make sure HTTP2_LOG macros compile.\n  HTTP2_LOG(INFO) << \"INFO log may not show up by default.\";\n  HTTP2_LOG(ERROR) << \"ERROR log should show up by default.\";\n\n  // VLOG are only emitted if INFO is enabled and verbosity level is high enough.\n  HTTP2_VLOG(1) << \"VLOG(1)\";\n\n  HTTP2_DLOG(INFO) << \"DLOG(INFO)\";\n  HTTP2_DLOG(ERROR) << \"DLOG(ERROR)\";\n\n  HTTP2_DLOG_IF(ERROR, true) << \"DLOG_IF(ERROR, true)\";\n  HTTP2_DLOG_IF(ERROR, false) << \"DLOG_IF(ERROR, false)\";\n\n  HTTP2_DVLOG(2) << \"DVLOG(2)\";\n\n  HTTP2_DVLOG_IF(3, true) << \"DVLOG_IF(3, true)\";\n  HTTP2_DVLOG_IF(4, false) << \"DVLOG_IF(4, false)\";\n\n  HTTP2_DLOG_EVERY_N(ERROR, 2) << \"DLOG_EVERY_N(ERROR, 2)\";\n}\n\nTEST(Http2PlatformTest, Http2StringPiece) {\n  std::string s = \"bar\";\n  quiche::QuicheStringPiece sp(s);\n  EXPECT_EQ('b', sp[0]);\n}\n\nTEST(Http2PlatformTest, Http2Macro) {\n  EXPECT_DEBUG_DEATH(HTTP2_UNREACHABLE(), \"\");\n  EXPECT_DEATH(HTTP2_DIE_IF_NULL(nullptr), \"\");\n}\n\nTEST(Http2PlatformTest, Http2Flags) {\n  auto& flag_registry = quiche::FlagRegistry::GetInstance();\n  flag_registry.ResetFlags();\n  EXPECT_FALSE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n  SetHttp2ReloadableFlag(http2_testonly_default_false, true);\n  EXPECT_TRUE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n\n  for (std::string s : {\"1\", \"t\", \"true\", \"TRUE\", \"y\", \"yes\", \"Yes\"}) {\n    SetHttp2ReloadableFlag(http2_testonly_default_false, false);\n    EXPECT_FALSE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n    EXPECT_TRUE(flag_registry.FindFlag(\"http2_reloadable_flag_http2_testonly_default_false\")\n                    ->SetValueFromString(s));\n    EXPECT_TRUE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n  }\n  for (std::string s : {\"0\", \"f\", \"false\", \"FALSE\", \"n\", \"no\", \"No\"}) {\n    SetHttp2ReloadableFlag(http2_testonly_default_false, true);\n    EXPECT_TRUE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n    EXPECT_TRUE(flag_registry.FindFlag(\"http2_reloadable_flag_http2_testonly_default_false\")\n                    ->SetValueFromString(s));\n    EXPECT_FALSE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n  }\n  for (std::string s : {\"some\", \"invalid\", \"values\", \"\"}) {\n    SetHttp2ReloadableFlag(http2_testonly_default_false, false);\n    EXPECT_FALSE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n    EXPECT_FALSE(flag_registry.FindFlag(\"http2_reloadable_flag_http2_testonly_default_false\")\n                     ->SetValueFromString(s));\n    EXPECT_FALSE(GetHttp2ReloadableFlag(http2_testonly_default_false));\n  }\n}\n\n} // namespace\n} // namespace http2\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_epoll_clock.cc",
    "content": "// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"test/extensions/quic_listeners/quiche/platform/quic_epoll_clock.h\"\n\nnamespace quic {\n\nQuicEpollClock::QuicEpollClock(epoll_server::SimpleEpollServer* epoll_server)\n    : epoll_server_(epoll_server), largest_time_(QuicTime::Zero()) {}\n\nQuicTime QuicEpollClock::ApproximateNow() const {\n  return CreateTimeFromMicroseconds(epoll_server_->ApproximateNowInUsec());\n}\n\nQuicTime QuicEpollClock::Now() const {\n  QuicTime now = CreateTimeFromMicroseconds(epoll_server_->NowInUsec());\n\n  if (now <= largest_time_) {\n    // Time not increasing, return |largest_time_|.\n    return largest_time_;\n  }\n\n  largest_time_ = now;\n  return largest_time_;\n}\n\nQuicWallTime QuicEpollClock::WallNow() const {\n  return QuicWallTime::FromUNIXMicroseconds(epoll_server_->ApproximateNowInUsec());\n}\n\nQuicTime QuicEpollClock::ConvertWallTimeToQuicTime(const QuicWallTime& walltime) const {\n  return QuicTime::Zero() + QuicTime::Delta::FromMicroseconds(walltime.ToUNIXMicroseconds());\n}\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_epoll_clock.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"quiche/epoll_server/simple_epoll_server.h\"\n#include \"quiche/quic/core/quic_clock.h\"\n#include \"quiche/quic/core/quic_time.h\"\n\nnamespace quic {\n\n// Clock to efficiently retrieve an approximately accurate time from an\n// epoll_server::SimpleEpollServer.\nclass QuicEpollClock : public QuicClock {\npublic:\n  explicit QuicEpollClock(epoll_server::SimpleEpollServer* epoll_server);\n\n  QuicEpollClock(const QuicEpollClock&) = delete;\n  QuicEpollClock& operator=(const QuicEpollClock&) = delete;\n\n  ~QuicEpollClock() override = default;\n\n  // Returns the approximate current time as a QuicTime object.\n  QuicTime ApproximateNow() const override;\n\n  // Returns the current time as a QuicTime object.\n  // Note: this uses significant resources, please use only if needed.\n  QuicTime Now() const override;\n\n  // Returns the current time as a QuicWallTime object.\n  // Note: this uses significant resources, please use only if needed.\n  QuicWallTime WallNow() const override;\n\n  // Override to do less work in this implementation. The epoll clock is\n  // already based on system (unix epoch) time, no conversion required.\n  QuicTime ConvertWallTimeToQuicTime(const QuicWallTime& walltime) const override;\n\nprotected:\n  epoll_server::SimpleEpollServer* epoll_server_;\n  // Largest time returned from Now() so far.\n  mutable QuicTime largest_time_;\n};\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_epoll_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"quiche/epoll_server/simple_epoll_server.h\"\n\nnamespace quic {\n\nusing QuicEpollServerImpl = epoll_server::SimpleEpollServer;\nusing QuicEpollEventImpl = epoll_server::EpollEvent;\nusing QuicEpollAlarmBaseImpl = epoll_server::EpollAlarm;\nusing QuicEpollCallbackInterfaceImpl = epoll_server::EpollCallbackInterface;\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"quiche/quic/platform/api/quic_logging.h\"\n#include \"quiche/quic/platform/api/quic_mock_log.h\"\n\n#define EXPECT_QUIC_BUG_IMPL(statement, regex)                                                     \\\n  EXPECT_QUIC_DFATAL_IMPL(statement, testing::ContainsRegex(regex))\n\n#define EXPECT_QUIC_PEER_BUG_IMPL(statement, regex)                                                \\\n  EXPECT_QUIC_LOG_IMPL(statement, ERROR, testing::ContainsRegex(regex))\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <string>\n\n#include \"common/common/assert.h\"\n\n#include \"gmock/gmock.h\"\n#include \"quiche/quic/platform/api/quic_logging.h\"\n\nnamespace quic {\n\n// A QuicEnvoyMockLog object captures QUIC_LOG() messages emitted between StartCapturingLogs() and\n// destruction(or StopCapturingLogs()).\nclass QuicEnvoyMockLog : public QuicLogSink {\npublic:\n  QuicEnvoyMockLog() = default;\n\n  ~QuicEnvoyMockLog() override {\n    if (is_capturing_) {\n      StopCapturingLogs();\n    }\n  }\n\n  MOCK_METHOD(void, Log, (QuicLogLevel level, const std::string& message));\n\n  void StartCapturingLogs() {\n    ASSERT(!is_capturing_);\n    is_capturing_ = true;\n    original_sink_ = SetLogSink(this);\n  }\n\n  void StopCapturingLogs() {\n    ASSERT(is_capturing_);\n    is_capturing_ = false;\n    SetLogSink(original_sink_);\n  }\n\nprivate:\n  QuicLogSink* original_sink_;\n  bool is_capturing_{false};\n};\n\n// ScopedDisableExitOnDFatal is used to disable exiting the program when we encounter a\n// QUIC_LOG(DFATAL) within the current block. After we leave the current block, the previous\n// behavior is restored.\nclass ScopedDisableExitOnDFatal {\npublic:\n  ScopedDisableExitOnDFatal() : previous_value_(IsDFatalExitDisabled()) {\n    SetDFatalExitDisabled(true);\n  }\n\n  ScopedDisableExitOnDFatal(const ScopedDisableExitOnDFatal&) = delete;\n  ScopedDisableExitOnDFatal& operator=(const ScopedDisableExitOnDFatal&) = delete;\n\n  ~ScopedDisableExitOnDFatal() { SetDFatalExitDisabled(previous_value_); }\n\nprivate:\n  const bool previous_value_;\n};\n\n} // namespace quic\n\nusing QuicMockLogImpl = quic::QuicEnvoyMockLog;\n\n#define CREATE_QUIC_MOCK_LOG_IMPL(log) QuicMockLog log\n\n#define EXPECT_QUIC_LOG_CALL_IMPL(log) EXPECT_CALL(log, Log(testing::_, testing::_))\n\n#define EXPECT_QUIC_LOG_CALL_CONTAINS_IMPL(log, level, content)                                    \\\n  EXPECT_CALL(log, Log(quic::level, testing::HasSubstr(content)))\n\n// Not part of the api exposed by quic_mock_log.h. This is used by\n// quic_expect_bug_impl.h.\n#define EXPECT_QUIC_LOG_IMPL(statement, level, matcher)                                            \\\n  do {                                                                                             \\\n    quic::QuicEnvoyMockLog mock_log;                                                               \\\n    EXPECT_CALL(mock_log, Log(quic::level, matcher)).Times(testing::AtLeast(1));                   \\\n    mock_log.StartCapturingLogs();                                                                 \\\n    { statement; }                                                                                 \\\n    mock_log.StopCapturingLogs();                                                                  \\\n    if (!testing::Mock::VerifyAndClear(&mock_log)) {                                               \\\n      GTEST_NONFATAL_FAILURE_(\"\");                                                                 \\\n    }                                                                                              \\\n  } while (false)\n\n#define EXPECT_QUIC_DFATAL_IMPL(statement, matcher)                                                \\\n  EXPECT_QUIC_LOG_IMPL(                                                                            \\\n      {                                                                                            \\\n        quic::ScopedDisableExitOnDFatal disable_exit_on_dfatal;                                    \\\n        statement;                                                                                 \\\n      },                                                                                           \\\n      DFATAL, matcher)\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <netinet/in.h>\n\n#include <fstream>\n\n#include \"common/memory/stats.h\"\n#include \"common/network/socket_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/quic_listeners/quiche/platform/flags_impl.h\"\n\n#include \"test/common/buffer/utility.h\"\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/extensions/quic_listeners/quiche/platform/quic_epoll_clock.h\"\n#include \"test/extensions/transport_sockets/tls/ssl_test_utility.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"fmt/printf.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n#include \"quiche/epoll_server/fake_simple_epoll_server.h\"\n#include \"quiche/quic/platform/api/quic_aligned.h\"\n#include \"quiche/quic/platform/api/quic_bug_tracker.h\"\n#include \"quiche/quic/platform/api/quic_cert_utils.h\"\n#include \"quiche/quic/platform/api/quic_client_stats.h\"\n#include \"quiche/quic/platform/api/quic_containers.h\"\n#include \"quiche/quic/platform/api/quic_estimate_memory_usage.h\"\n#include \"quiche/quic/platform/api/quic_expect_bug.h\"\n#include \"quiche/quic/platform/api/quic_exported_stats.h\"\n#include \"quiche/quic/platform/api/quic_file_utils.h\"\n#include \"quiche/quic/platform/api/quic_flags.h\"\n#include \"quiche/quic/platform/api/quic_hostname_utils.h\"\n#include \"quiche/quic/platform/api/quic_logging.h\"\n#include \"quiche/quic/platform/api/quic_macros.h\"\n#include \"quiche/quic/platform/api/quic_map_util.h\"\n#include \"quiche/quic/platform/api/quic_mem_slice.h\"\n#include \"quiche/quic/platform/api/quic_mem_slice_span.h\"\n#include \"quiche/quic/platform/api/quic_mem_slice_storage.h\"\n#include \"quiche/quic/platform/api/quic_mock_log.h\"\n#include \"quiche/quic/platform/api/quic_mutex.h\"\n#include \"quiche/quic/platform/api/quic_pcc_sender.h\"\n#include \"quiche/quic/platform/api/quic_port_utils.h\"\n#include \"quiche/quic/platform/api/quic_ptr_util.h\"\n#include \"quiche/quic/platform/api/quic_server_stats.h\"\n#include \"quiche/quic/platform/api/quic_sleep.h\"\n#include \"quiche/quic/platform/api/quic_stack_trace.h\"\n#include \"quiche/quic/platform/api/quic_stream_buffer_allocator.h\"\n#include \"quiche/quic/platform/api/quic_system_event_loop.h\"\n#include \"quiche/quic/platform/api/quic_test.h\"\n#include \"quiche/quic/platform/api/quic_test_output.h\"\n#include \"quiche/quic/platform/api/quic_thread.h\"\n#include \"quiche/quic/platform/api/quic_uint128.h\"\n\n// Basic tests to validate functioning of the QUICHE quic platform\n// implementation. For platform APIs in which the implementation is a simple\n// typedef/passthrough to a std:: or absl:: construct, the tests are kept\n// minimal, and serve primarily to verify the APIs compile and link without\n// issue.\n\nusing testing::_;\nusing testing::HasSubstr;\nusing testing::Return;\n\nnamespace quic {\nnamespace {\n\nclass QuicPlatformTest : public testing::Test {\nprotected:\n  QuicPlatformTest()\n      : log_level_(GetLogger().level()), verbosity_log_threshold_(GetVerbosityLogThreshold()) {\n    SetVerbosityLogThreshold(0);\n    GetLogger().set_level(ERROR);\n  }\n\n  ~QuicPlatformTest() override {\n    SetVerbosityLogThreshold(verbosity_log_threshold_);\n    GetLogger().set_level(log_level_);\n  }\n\n  const QuicLogLevel log_level_;\n  const int verbosity_log_threshold_;\n};\n\nTEST_F(QuicPlatformTest, QuicAlignOf) { EXPECT_LT(0, QUIC_ALIGN_OF(int)); }\n\nenum class TestEnum { ZERO = 0, ONE, TWO, COUNT };\n\nTEST_F(QuicPlatformTest, QuicBugTracker) {\n  EXPECT_DEBUG_DEATH(QUIC_BUG << \"Here is a bug,\", \" bug\");\n  EXPECT_DEBUG_DEATH(QUIC_BUG_IF(true) << \"There is a bug,\", \" bug\");\n  EXPECT_LOG_NOT_CONTAINS(\"error\", \"\", QUIC_BUG_IF(false) << \"A feature is not a bug.\");\n\n  EXPECT_LOG_CONTAINS(\"error\", \" bug\", QUIC_PEER_BUG << \"Everywhere's a bug,\");\n  EXPECT_LOG_CONTAINS(\"error\", \" here\", QUIC_PEER_BUG_IF(true) << \"Including here.\");\n  EXPECT_LOG_NOT_CONTAINS(\"error\", \"\", QUIC_PEER_BUG_IF(false) << \"But not there.\");\n}\n\nTEST_F(QuicPlatformTest, QuicClientStats) {\n  // Just make sure they compile.\n  QUIC_CLIENT_HISTOGRAM_ENUM(\"my.enum.histogram\", TestEnum::ONE, TestEnum::COUNT, \"doc\");\n  QUIC_CLIENT_HISTOGRAM_BOOL(\"my.bool.histogram\", false, \"doc\");\n  QUIC_CLIENT_HISTOGRAM_TIMES(\"my.timing.histogram\", QuicTime::Delta::FromSeconds(5),\n                              QuicTime::Delta::FromSeconds(1), QuicTime::Delta::FromSecond(3600),\n                              100, \"doc\");\n  QUIC_CLIENT_HISTOGRAM_COUNTS(\"my.count.histogram\", 123, 0, 1000, 100, \"doc\");\n  QuicClientSparseHistogram(\"my.sparse.histogram\", 345);\n  // Make sure compiler doesn't report unused-parameter error.\n  bool should_be_used;\n  QUIC_CLIENT_HISTOGRAM_BOOL(\"my.bool.histogram\", should_be_used, \"doc\");\n}\n\nTEST_F(QuicPlatformTest, QuicExpectBug) {\n  auto bug = [](const char* error_message) { QUIC_BUG << error_message; };\n\n  auto peer_bug = [](const char* error_message) { QUIC_PEER_BUG << error_message; };\n\n  EXPECT_QUIC_BUG(bug(\"bug one is expected\"), \"bug one\");\n  EXPECT_QUIC_BUG(bug(\"bug two is expected\"), \"bug two\");\n\n  EXPECT_QUIC_PEER_BUG(peer_bug(\"peer_bug_1 is expected\"), \"peer_bug_1\");\n  EXPECT_QUIC_PEER_BUG(peer_bug(\"peer_bug_2 is expected\"), \"peer_bug_2\");\n}\n\nTEST_F(QuicPlatformTest, QuicExportedStats) {\n  // Just make sure they compile.\n  QUIC_HISTOGRAM_ENUM(\"my.enum.histogram\", TestEnum::ONE, TestEnum::COUNT, \"doc\");\n  QUIC_HISTOGRAM_BOOL(\"my.bool.histogram\", false, \"doc\");\n  QUIC_HISTOGRAM_TIMES(\"my.timing.histogram\", QuicTime::Delta::FromSeconds(5),\n                       QuicTime::Delta::FromSeconds(1), QuicTime::Delta::FromSecond(3600), 100,\n                       \"doc\");\n  QUIC_HISTOGRAM_COUNTS(\"my.count.histogram\", 123, 0, 1000, 100, \"doc\");\n}\n\nTEST_F(QuicPlatformTest, QuicHostnameUtils) {\n  EXPECT_FALSE(QuicHostnameUtils::IsValidSNI(\"!!\"));\n  EXPECT_FALSE(QuicHostnameUtils::IsValidSNI(\"envoyproxy\"));\n  EXPECT_TRUE(QuicHostnameUtils::IsValidSNI(\"www.envoyproxy.io\"));\n  EXPECT_EQ(\"lyft.com\", QuicHostnameUtils::NormalizeHostname(\"lyft.com\"));\n  EXPECT_EQ(\"google.com\", QuicHostnameUtils::NormalizeHostname(\"google.com...\"));\n  EXPECT_EQ(\"quicwg.org\", QuicHostnameUtils::NormalizeHostname(\"QUICWG.ORG\"));\n}\n\nTEST_F(QuicPlatformTest, QuicUnorderedMap) {\n  QuicUnorderedMap<std::string, int> umap;\n  umap.insert({\"foo\", 2});\n  EXPECT_EQ(2, umap[\"foo\"]);\n}\n\nTEST_F(QuicPlatformTest, QuicUnorderedSet) {\n  QuicUnorderedSet<std::string> uset({\"foo\", \"bar\"});\n  EXPECT_EQ(1, uset.count(\"bar\"));\n  EXPECT_EQ(0, uset.count(\"qux\"));\n}\n\nTEST_F(QuicPlatformTest, QuicQueue) {\n  QuicQueue<int> queue;\n  queue.push(10);\n  EXPECT_EQ(10, queue.back());\n}\n\nTEST_F(QuicPlatformTest, QuicInlinedVector) {\n  QuicInlinedVector<int, 5> vec;\n  vec.push_back(3);\n  EXPECT_EQ(3, vec[0]);\n}\n\nTEST_F(QuicPlatformTest, QuicEstimateMemoryUsage) {\n  std::string s = \"foo\";\n  // Stubbed out to always return 0.\n  EXPECT_EQ(0, QuicEstimateMemoryUsage(s));\n}\n\nTEST_F(QuicPlatformTest, QuicMapUtil) {\n  std::map<std::string, int> stdmap = {{\"one\", 1}, {\"two\", 2}, {\"three\", 3}};\n  EXPECT_TRUE(QuicContainsKey(stdmap, \"one\"));\n  EXPECT_FALSE(QuicContainsKey(stdmap, \"zero\"));\n\n  QuicUnorderedMap<int, int> umap = {{1, 1}, {2, 4}, {3, 9}};\n  EXPECT_TRUE(QuicContainsKey(umap, 2));\n  EXPECT_FALSE(QuicContainsKey(umap, 10));\n\n  QuicUnorderedSet<std::string> uset({\"foo\", \"bar\"});\n  EXPECT_TRUE(QuicContainsKey(uset, \"foo\"));\n  EXPECT_FALSE(QuicContainsKey(uset, \"abc\"));\n\n  std::vector<int> stdvec = {1, 2, 3};\n  EXPECT_TRUE(QuicContainsValue(stdvec, 1));\n  EXPECT_FALSE(QuicContainsValue(stdvec, 0));\n}\n\nTEST_F(QuicPlatformTest, QuicMockLog) {\n  ASSERT_EQ(ERROR, GetLogger().level());\n\n  {\n    // Test a mock log that is not capturing logs.\n    CREATE_QUIC_MOCK_LOG(log);\n    EXPECT_QUIC_LOG_CALL(log).Times(0);\n    QUIC_LOG(ERROR) << \"This should be logged but not captured by the mock.\";\n  }\n\n  // Test nested mock logs.\n  CREATE_QUIC_MOCK_LOG(outer_log);\n  outer_log.StartCapturingLogs();\n\n  {\n    // Test a mock log that captures logs.\n    CREATE_QUIC_MOCK_LOG(inner_log);\n    inner_log.StartCapturingLogs();\n\n    EXPECT_QUIC_LOG_CALL_CONTAINS(inner_log, ERROR, \"Inner log message\");\n    QUIC_LOG(ERROR) << \"Inner log message should be captured.\";\n\n    // Destruction of inner_log should restore the QUIC log sink to outer_log.\n  }\n\n  EXPECT_QUIC_LOG_CALL_CONTAINS(outer_log, ERROR, \"Outer log message\");\n  QUIC_LOG(ERROR) << \"Outer log message should be captured.\";\n}\n\nTEST_F(QuicPlatformTest, QuicServerStats) {\n  // Just make sure they compile.\n  QUIC_SERVER_HISTOGRAM_ENUM(\"my.enum.histogram\", TestEnum::ONE, TestEnum::COUNT, \"doc\");\n  QUIC_SERVER_HISTOGRAM_BOOL(\"my.bool.histogram\", false, \"doc\");\n  QUIC_SERVER_HISTOGRAM_TIMES(\"my.timing.histogram\", QuicTime::Delta::FromSeconds(5),\n                              QuicTime::Delta::FromSeconds(1), QuicTime::Delta::FromSecond(3600),\n                              100, \"doc\");\n  QUIC_SERVER_HISTOGRAM_COUNTS(\"my.count.histogram\", 123, 0, 1000, 100, \"doc\");\n}\n\nTEST_F(QuicPlatformTest, QuicStackTraceTest) {\n#if !defined(ENVOY_CONFIG_COVERAGE) && !defined(GCC_COMPILER)\n  // This doesn't work in coverage build because part of the stacktrace will be overwritten by\n  // __llvm_coverage_mapping\n  // Stack trace under gcc with optimizations on (-c opt) doesn't include the test name\n  EXPECT_THAT(QuicStackTrace(), HasSubstr(\"QuicStackTraceTest\"));\n#endif\n}\n\nTEST_F(QuicPlatformTest, QuicSleep) { QuicSleep(QuicTime::Delta::FromMilliseconds(20)); }\n\nTEST_F(QuicPlatformTest, QuicThread) {\n  class AdderThread : public QuicThread {\n  public:\n    AdderThread(int* value, int increment)\n        : QuicThread(\"adder_thread\"), value_(value), increment_(increment) {}\n\n    ~AdderThread() override = default;\n\n  protected:\n    void Run() override { *value_ += increment_; }\n\n  private:\n    int* value_;\n    int increment_;\n  };\n\n  int value = 0;\n\n  // A QuicThread that is never started, which is ok.\n  { AdderThread t0(&value, 1); }\n  EXPECT_EQ(0, value);\n\n  // A QuicThread that is started and joined as usual.\n  {\n    AdderThread t1(&value, 1);\n    t1.Start();\n    t1.Join();\n  }\n  EXPECT_EQ(1, value);\n\n  // QuicThread will panic if it's started but not joined.\n  EXPECT_DEATH({ AdderThread(&value, 2).Start(); },\n               \"QuicThread should be joined before destruction\");\n}\n\nTEST_F(QuicPlatformTest, QuicUint128) {\n  QuicUint128 i = MakeQuicUint128(16777216, 315);\n  EXPECT_EQ(315, QuicUint128Low64(i));\n  EXPECT_EQ(16777216, QuicUint128High64(i));\n}\n\nTEST_F(QuicPlatformTest, QuicPtrUtil) {\n  auto p = QuicWrapUnique(new std::string(\"aaa\"));\n  EXPECT_EQ(\"aaa\", *p);\n}\n\nTEST_F(QuicPlatformTest, QuicLog) {\n  // By default, tests emit logs at level ERROR or higher.\n  ASSERT_EQ(ERROR, GetLogger().level());\n\n  int i = 0;\n\n  QUIC_LOG(INFO) << (i = 10);\n  QUIC_LOG_IF(INFO, false) << i++;\n  QUIC_LOG_IF(INFO, true) << i++;\n  EXPECT_EQ(0, i);\n\n  EXPECT_LOG_CONTAINS(\"error\", \"i=11\", QUIC_LOG(ERROR) << \"i=\" << (i = 11));\n  EXPECT_EQ(11, i);\n\n  QUIC_LOG_IF(ERROR, false) << i++;\n  EXPECT_EQ(11, i);\n\n  EXPECT_LOG_CONTAINS(\"error\", \"i=11\", QUIC_LOG_IF(ERROR, true) << \"i=\" << i++);\n  EXPECT_EQ(12, i);\n\n  // Set QUIC log level to INFO, since VLOG is emitted at the INFO level.\n  GetLogger().set_level(INFO);\n\n  ASSERT_EQ(0, GetVerbosityLogThreshold());\n\n  QUIC_VLOG(1) << (i = 1);\n  EXPECT_EQ(12, i);\n\n  SetVerbosityLogThreshold(1);\n\n  EXPECT_LOG_CONTAINS(\"info\", \"i=1\", QUIC_VLOG(1) << \"i=\" << (i = 1));\n  EXPECT_EQ(1, i);\n\n  errno = SOCKET_ERROR_INVAL;\n  EXPECT_LOG_CONTAINS(\"info\", \"i=3:\", QUIC_PLOG(INFO) << \"i=\" << (i = 3));\n  EXPECT_EQ(3, i);\n}\n\n#ifdef NDEBUG\n#define VALUE_BY_COMPILE_MODE(debug_mode_value, release_mode_value) release_mode_value\n#else\n#define VALUE_BY_COMPILE_MODE(debug_mode_value, release_mode_value) debug_mode_value\n#endif\n\nTEST_F(QuicPlatformTest, LogIoManipulators) {\n  GetLogger().set_level(ERROR);\n  QUIC_DLOG(ERROR) << \"aaaa\" << std::endl;\n  EXPECT_LOG_CONTAINS(\"error\", \"aaaa\\n\\n\", QUIC_LOG(ERROR) << \"aaaa\" << std::endl << std::endl);\n  EXPECT_LOG_NOT_CONTAINS(\"error\", \"aaaa\\n\\n\\n\",\n                          QUIC_LOG(ERROR) << \"aaaa\" << std::endl\n                                          << std::endl);\n\n  EXPECT_LOG_CONTAINS(\"error\", \"42 in octal is 52\",\n                      QUIC_LOG(ERROR) << 42 << \" in octal is \" << std::oct << 42);\n}\n\nTEST_F(QuicPlatformTest, QuicDLog) {\n  int i = 0;\n\n  GetLogger().set_level(ERROR);\n\n  QUIC_DLOG(INFO) << (i = 10);\n  QUIC_DLOG_IF(INFO, false) << i++;\n  QUIC_DLOG_IF(INFO, true) << i++;\n  EXPECT_EQ(0, i);\n\n  GetLogger().set_level(INFO);\n\n  QUIC_DLOG(INFO) << (i = 10);\n  QUIC_DLOG_IF(INFO, false) << i++;\n  EXPECT_EQ(VALUE_BY_COMPILE_MODE(10, 0), i);\n\n  QUIC_DLOG_IF(INFO, true) << (i = 11);\n  EXPECT_EQ(VALUE_BY_COMPILE_MODE(11, 0), i);\n\n  ASSERT_EQ(0, GetVerbosityLogThreshold());\n\n  QUIC_DVLOG(1) << (i = 1);\n  EXPECT_EQ(VALUE_BY_COMPILE_MODE(11, 0), i);\n\n  SetVerbosityLogThreshold(1);\n\n  QUIC_DVLOG(1) << (i = 1);\n  EXPECT_EQ(VALUE_BY_COMPILE_MODE(1, 0), i);\n\n  QUIC_DVLOG_IF(1, false) << (i = 2);\n  EXPECT_EQ(VALUE_BY_COMPILE_MODE(1, 0), i);\n\n  QUIC_DVLOG_IF(1, true) << (i = 2);\n  EXPECT_EQ(VALUE_BY_COMPILE_MODE(2, 0), i);\n}\n\n#undef VALUE_BY_COMPILE_MODE\n\nTEST_F(QuicPlatformTest, QuicCHECK) {\n  CHECK(1 == 1);\n  CHECK(1 == 1) << \" 1 == 1 is forever true.\";\n\n  EXPECT_DEBUG_DEATH({ DCHECK(false) << \" Supposed to fail in debug mode.\"; },\n                     \"CHECK failed:.* Supposed to fail in debug mode.\");\n  EXPECT_DEBUG_DEATH({ DCHECK(false); }, \"CHECK failed\");\n\n  EXPECT_DEATH({ CHECK(false) << \" Supposed to fail in all modes.\"; },\n               \"CHECK failed:.* Supposed to fail in all modes.\");\n  EXPECT_DEATH({ CHECK(false); }, \"CHECK failed\");\n}\n\n// Test the behaviors of the cross products of\n//\n//   {QUIC_LOG, QUIC_DLOG} x {FATAL, DFATAL} x {debug, release}\nTEST_F(QuicPlatformTest, QuicFatalLog) {\n#ifdef NDEBUG\n  // Release build\n  EXPECT_DEATH(QUIC_LOG(FATAL) << \"Should abort 0\", \"Should abort 0\");\n  QUIC_LOG(DFATAL) << \"Should not abort\";\n  QUIC_DLOG(FATAL) << \"Should compile out\";\n  QUIC_DLOG(DFATAL) << \"Should compile out\";\n#else\n  // Debug build\n  EXPECT_DEATH(QUIC_LOG(FATAL) << \"Should abort 1\", \"Should abort 1\");\n  EXPECT_DEATH(QUIC_LOG(DFATAL) << \"Should abort 2\", \"Should abort 2\");\n  EXPECT_DEATH(QUIC_DLOG(FATAL) << \"Should abort 3\", \"Should abort 3\");\n  EXPECT_DEATH(QUIC_DLOG(DFATAL) << \"Should abort 4\", \"Should abort 4\");\n#endif\n}\n\nTEST_F(QuicPlatformTest, QuicBranchPrediction) {\n  GetLogger().set_level(INFO);\n\n  if (QUIC_PREDICT_FALSE(rand() % RAND_MAX == 123456789)) {\n    QUIC_LOG(INFO) << \"Go buy some lottery tickets.\";\n  } else {\n    QUIC_LOG(INFO) << \"As predicted.\";\n  }\n}\n\nTEST_F(QuicPlatformTest, QuicNotReached) {\n#ifdef NDEBUG\n  QUIC_NOTREACHED(); // Expect no-op.\n#else\n  EXPECT_DEATH(QUIC_NOTREACHED(), \"not reached\");\n#endif\n}\n\nTEST_F(QuicPlatformTest, QuicMutex) {\n  QuicMutex mu;\n\n  QuicWriterMutexLock wmu(&mu);\n  mu.AssertReaderHeld();\n  mu.WriterUnlock();\n  {\n    QuicReaderMutexLock rmu(&mu);\n    mu.AssertReaderHeld();\n  }\n  mu.WriterLock();\n}\n\nTEST_F(QuicPlatformTest, QuicNotification) {\n  QuicNotification notification;\n  EXPECT_FALSE(notification.HasBeenNotified());\n  notification.Notify();\n  notification.WaitForNotification();\n  EXPECT_TRUE(notification.HasBeenNotified());\n}\n\nTEST_F(QuicPlatformTest, QuicCertUtils) {\n  bssl::UniquePtr<X509> x509_cert =\n      Envoy::Extensions::TransportSockets::Tls::readCertFromFile(Envoy::TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  // Encode X509 cert with DER encoding.\n  unsigned char* der = nullptr;\n  int len = i2d_X509(x509_cert.get(), &der);\n  ASSERT_GT(len, 0);\n  quiche::QuicheStringPiece out;\n  QuicCertUtils::ExtractSubjectNameFromDERCert(\n      quiche::QuicheStringPiece(reinterpret_cast<const char*>(der), len), &out);\n  EXPECT_EQ(\"0z1\\v0\\t\\x6\\x3U\\x4\\x6\\x13\\x2US1\\x13\"\n            \"0\\x11\\x6\\x3U\\x4\\b\\f\\nCalifornia1\\x16\"\n            \"0\\x14\\x6\\x3U\\x4\\a\\f\\rSan Francisco1\\r\"\n            \"0\\v\\x6\\x3U\\x4\\n\\f\\x4Lyft1\\x19\"\n            \"0\\x17\\x6\\x3U\\x4\\v\\f\\x10Lyft Engineering1\\x14\"\n            \"0\\x12\\x6\\x3U\\x4\\x3\\f\\vTest Server\",\n            out);\n  OPENSSL_free(static_cast<void*>(der));\n}\n\nTEST_F(QuicPlatformTest, QuicTestOutput) {\n  Envoy::TestEnvironment::setEnvVar(\"QUIC_TEST_OUTPUT_DIR\", \"/tmp\", /*overwrite=*/false);\n\n  // Set log level to INFO to see the test output path in log.\n  GetLogger().set_level(INFO);\n\n  EXPECT_LOG_NOT_CONTAINS(\"warn\", \"\", QuicRecordTrace(\"quic_test_output.1\", \"output 1 content\\n\"));\n  EXPECT_LOG_NOT_CONTAINS(\"error\", \"\", QuicRecordTrace(\"quic_test_output.2\", \"output 2 content\\n\"));\n  EXPECT_LOG_CONTAINS(\"info\", \"Recorded test output into\",\n                      QuicRecordTrace(\"quic_test_output.3\", \"output 3 content\\n\"));\n\n  std::string content4{\"output 4 content\\n\"};\n  const testing::TestInfo* test_info = testing::UnitTest::GetInstance()->current_test_info();\n\n  std::string timestamp = absl::FormatTime(\"%Y%m%d%H%M%S\", absl::Now(), absl::LocalTimeZone());\n\n  std::string filename = fmt::sprintf(\"%s.%s.%s.%s.qtr\", test_info->name(),\n                                      test_info->test_case_name(), \"quic_test_output.4\", timestamp);\n\n  EXPECT_LOG_CONTAINS(\"info\", \"Recorded test output into\", QuicSaveTestOutput(filename, content4));\n\n  std::string content;\n  EXPECT_TRUE(QuicLoadTestOutput(filename, &content));\n  EXPECT_EQ(\"output 4 content\\n\", content);\n  EXPECT_FALSE(QuicLoadTestOutput(\"nonexisting_file\", &content));\n}\n\nTEST_F(QuicPlatformTest, ApproximateNowInUsec) {\n  epoll_server::test::FakeSimpleEpollServer epoll_server;\n  QuicEpollClock clock(&epoll_server);\n\n  epoll_server.set_now_in_usec(1000000);\n  EXPECT_EQ(1000000, (clock.ApproximateNow() - QuicTime::Zero()).ToMicroseconds());\n  EXPECT_EQ(1u, clock.WallNow().ToUNIXSeconds());\n  EXPECT_EQ(1000000u, clock.WallNow().ToUNIXMicroseconds());\n\n  epoll_server.AdvanceBy(5);\n  EXPECT_EQ(1000005, (clock.ApproximateNow() - QuicTime::Zero()).ToMicroseconds());\n  EXPECT_EQ(1u, clock.WallNow().ToUNIXSeconds());\n  EXPECT_EQ(1000005u, clock.WallNow().ToUNIXMicroseconds());\n\n  epoll_server.AdvanceBy(10 * 1000000);\n  EXPECT_EQ(11u, clock.WallNow().ToUNIXSeconds());\n  EXPECT_EQ(11000005u, clock.WallNow().ToUNIXMicroseconds());\n}\n\nTEST_F(QuicPlatformTest, NowInUsec) {\n  epoll_server::test::FakeSimpleEpollServer epoll_server;\n  QuicEpollClock clock(&epoll_server);\n\n  epoll_server.set_now_in_usec(1000000);\n  EXPECT_EQ(1000000, (clock.Now() - QuicTime::Zero()).ToMicroseconds());\n\n  epoll_server.AdvanceBy(5);\n  EXPECT_EQ(1000005, (clock.Now() - QuicTime::Zero()).ToMicroseconds());\n}\n\nTEST_F(QuicPlatformTest, MonotonicityWithRealEpollClock) {\n  epoll_server::SimpleEpollServer epoll_server;\n  QuicEpollClock clock(&epoll_server);\n\n  quic::QuicTime last_now = clock.Now();\n  for (int i = 0; i < 1e5; ++i) {\n    quic::QuicTime now = clock.Now();\n\n    ASSERT_LE(last_now, now);\n\n    last_now = now;\n  }\n}\n\nTEST_F(QuicPlatformTest, MonotonicityWithFakeEpollClock) {\n  epoll_server::test::FakeSimpleEpollServer epoll_server;\n  QuicEpollClock clock(&epoll_server);\n\n  epoll_server.set_now_in_usec(100);\n  quic::QuicTime last_now = clock.Now();\n\n  epoll_server.set_now_in_usec(90);\n  quic::QuicTime now = clock.Now();\n\n  ASSERT_EQ(last_now, now);\n}\n\nTEST_F(QuicPlatformTest, QuicFlags) {\n  auto& flag_registry = quiche::FlagRegistry::GetInstance();\n  flag_registry.ResetFlags();\n\n  EXPECT_FALSE(GetQuicReloadableFlag(quic_testonly_default_false));\n  EXPECT_TRUE(GetQuicReloadableFlag(quic_testonly_default_true));\n  SetQuicReloadableFlag(quic_testonly_default_false, true);\n  EXPECT_TRUE(GetQuicReloadableFlag(quic_testonly_default_false));\n\n  EXPECT_FALSE(GetQuicRestartFlag(quic_testonly_default_false));\n  EXPECT_TRUE(GetQuicRestartFlag(quic_testonly_default_true));\n  SetQuicRestartFlag(quic_testonly_default_false, true);\n  EXPECT_TRUE(GetQuicRestartFlag(quic_testonly_default_false));\n\n  EXPECT_EQ(200, GetQuicFlag(FLAGS_quic_time_wait_list_seconds));\n  SetQuicFlag(FLAGS_quic_time_wait_list_seconds, 100);\n  EXPECT_EQ(100, GetQuicFlag(FLAGS_quic_time_wait_list_seconds));\n\n  flag_registry.ResetFlags();\n  EXPECT_FALSE(GetQuicReloadableFlag(quic_testonly_default_false));\n  EXPECT_TRUE(GetQuicRestartFlag(quic_testonly_default_true));\n  EXPECT_EQ(200, GetQuicFlag(FLAGS_quic_time_wait_list_seconds));\n  flag_registry.FindFlag(\"quic_reloadable_flag_quic_testonly_default_false\")\n      ->SetValueFromString(\"true\");\n  flag_registry.FindFlag(\"quic_restart_flag_quic_testonly_default_true\")->SetValueFromString(\"0\");\n  flag_registry.FindFlag(\"quic_time_wait_list_seconds\")->SetValueFromString(\"100\");\n  EXPECT_TRUE(GetQuicReloadableFlag(quic_testonly_default_false));\n  EXPECT_FALSE(GetQuicRestartFlag(quic_testonly_default_true));\n  EXPECT_EQ(100, GetQuicFlag(FLAGS_quic_time_wait_list_seconds));\n}\n\nTEST_F(QuicPlatformTest, QuicPccSender) {\n  EXPECT_DEATH(quic::CreatePccSender(/*clock=*/nullptr, /*rtt_stats=*/nullptr,\n                                     /*unacked_packets=*/nullptr, /*random=*/nullptr,\n                                     /*stats=*/nullptr,\n                                     /*initial_congestion_window=*/0,\n                                     /*max_congestion_window=*/0),\n               \"PccSender is not supported.\");\n}\n\nclass FileUtilsTest : public testing::Test {\npublic:\n  FileUtilsTest() : dir_path_(Envoy::TestEnvironment::temporaryPath(\"quic_file_util_test\")) {\n    files_to_remove_.push(dir_path_);\n  }\n\nprotected:\n  void SetUp() override { Envoy::TestEnvironment::createPath(dir_path_); }\n\n  void TearDown() override {\n    while (!files_to_remove_.empty()) {\n      const std::string& f = files_to_remove_.top();\n      Envoy::TestEnvironment::removePath(f);\n      files_to_remove_.pop();\n    }\n  }\n\n  void addSubDirs(std::list<std::string> sub_dirs) {\n    for (const std::string& dir_name : sub_dirs) {\n      const std::string full_path = dir_path_ + \"/\" + dir_name;\n      Envoy::TestEnvironment::createPath(full_path);\n      files_to_remove_.push(full_path);\n    }\n  }\n\n  void addFiles(std::list<std::string> files) {\n    for (const std::string& file_name : files) {\n      const std::string full_path = dir_path_ + \"/\" + file_name;\n      { const std::ofstream file(full_path); }\n      files_to_remove_.push(full_path);\n    }\n  }\n\n  const std::string dir_path_;\n  std::stack<std::string> files_to_remove_;\n};\n\nTEST_F(FileUtilsTest, ReadDirContents) {\n  addSubDirs({\"sub_dir1\", \"sub_dir2\", \"sub_dir1/sub_dir1_1\"});\n  addFiles({\"file\", \"sub_dir1/sub_file1\", \"sub_dir1/sub_dir1_1/sub_file1_1\", \"sub_dir2/sub_file2\"});\n\n  EXPECT_THAT(ReadFileContents(dir_path_),\n              testing::UnorderedElementsAre(dir_path_ + \"/file\", dir_path_ + \"/sub_dir1/sub_file1\",\n                                            dir_path_ + \"/sub_dir1/sub_dir1_1/sub_file1_1\",\n                                            dir_path_ + \"/sub_dir2/sub_file2\"));\n}\n\nTEST_F(FileUtilsTest, ReadFileContents) {\n  const std::string data = \"test string\\ntest\";\n  const std::string file_path =\n      Envoy::TestEnvironment::writeStringToFileForTest(\"test_envoy\", data);\n  std::string output;\n  ReadFileContents(file_path, &output);\n  EXPECT_EQ(data, output);\n}\n\nTEST_F(QuicPlatformTest, PickUnsedPort) {\n  int port = QuicPickServerPortForTestsOrDie();\n  std::vector<Envoy::Network::Address::IpVersion> supported_versions =\n      Envoy::TestEnvironment::getIpVersionsForTest();\n  for (auto ip_version : supported_versions) {\n    Envoy::Network::Address::InstanceConstSharedPtr addr =\n        Envoy::Network::Test::getCanonicalLoopbackAddress(ip_version);\n    Envoy::Network::Address::InstanceConstSharedPtr addr_with_port =\n        Envoy::Network::Utility::getAddressWithPort(*addr, port);\n    Envoy::Network::SocketImpl sock(Envoy::Network::Socket::Type::Datagram, addr_with_port);\n    // binding of given port should success.\n    EXPECT_EQ(0, sock.bind(addr_with_port).rc_);\n  }\n}\n\nTEST_F(QuicPlatformTest, FailToPickUnsedPort) {\n  Envoy::Api::MockOsSysCalls os_sys_calls;\n  Envoy::TestThreadsafeSingletonInjector<Envoy::Api::OsSysCallsImpl> os_calls(&os_sys_calls);\n  // Actually create sockets.\n  EXPECT_CALL(os_sys_calls, socket(_, _, _)).WillRepeatedly([](int domain, int type, int protocol) {\n    os_fd_t fd = ::socket(domain, type, protocol);\n    return Envoy::Api::SysCallSocketResult{fd, errno};\n  });\n  // Fail bind call's to mimic port exhaustion.\n  EXPECT_CALL(os_sys_calls, bind(_, _, _))\n      .WillRepeatedly(Return(Envoy::Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE}));\n  EXPECT_DEATH(QuicPickServerPortForTestsOrDie(), \"Failed to pick a port for test.\");\n}\n\nTEST_F(QuicPlatformTest, TestEnvoyQuicBufferAllocator) {\n  QuicStreamBufferAllocator allocator;\n  Envoy::Stats::TestUtil::MemoryTest memory_test;\n  if (memory_test.mode() == Envoy::Stats::TestUtil::MemoryTest::Mode::Disabled) {\n    return;\n  }\n  char* p = allocator.New(1024);\n  EXPECT_NE(nullptr, p);\n  EXPECT_GT(memory_test.consumedBytes(), 0);\n  memset(p, 'a', 1024);\n  allocator.Delete(p);\n  EXPECT_EQ(memory_test.consumedBytes(), 0);\n}\n\nTEST_F(QuicPlatformTest, TestSystemEventLoop) {\n  // These two interfaces are no-op in Envoy. The test just makes sure they\n  // build.\n  QuicRunSystemEventLoopIteration();\n  QuicSystemEventLoop(\"dummy\");\n}\n\nQUIC_MUST_USE_RESULT bool dummyTestFunction() { return false; }\n\nTEST_F(QuicPlatformTest, TestQuicMacros) {\n  // Just make sure it compiles.\n  EXPECT_FALSE(dummyTestFunction());\n  int a QUIC_UNUSED;\n}\n\nTEST(EnvoyQuicMemSliceTest, ConstructMemSliceFromBuffer) {\n  std::string str(512, 'b');\n  // Fragment needs to out-live buffer.\n  bool fragment_releaser_called = false;\n  Envoy::Buffer::BufferFragmentImpl fragment(\n      str.data(), str.length(),\n      [&fragment_releaser_called](const void*, size_t, const Envoy::Buffer::BufferFragmentImpl*) {\n        // Used to verify that mem slice release appropriately.\n        fragment_releaser_called = true;\n      });\n  Envoy::Buffer::OwnedImpl buffer;\n  EXPECT_DEBUG_DEATH(quic::QuicMemSlice slice0{quic::QuicMemSliceImpl(buffer, 0)}, \"\");\n  std::string str2(1024, 'a');\n  // str2 is copied.\n  buffer.add(str2);\n  EXPECT_EQ(1u, buffer.getRawSlices().size());\n  buffer.addBufferFragment(fragment);\n\n  quic::QuicMemSlice slice1{quic::QuicMemSliceImpl(buffer, str2.length())};\n  EXPECT_EQ(str.length(), buffer.length());\n  EXPECT_EQ(str2, std::string(slice1.data(), slice1.length()));\n  std::string str2_old = str2; // NOLINT(performance-unnecessary-copy-initialization)\n  // slice1 is released, but str2 should not be affected.\n  slice1.Reset();\n  EXPECT_TRUE(slice1.empty());\n  EXPECT_EQ(nullptr, slice1.data());\n  EXPECT_EQ(str2_old, str2);\n\n  quic::QuicMemSlice slice2{quic::QuicMemSliceImpl(buffer, str.length())};\n  EXPECT_EQ(0, buffer.length());\n  EXPECT_EQ(str.data(), slice2.data());\n  EXPECT_EQ(str, std::string(slice2.data(), slice2.length()));\n  slice2.Reset();\n  EXPECT_TRUE(slice2.empty());\n  EXPECT_EQ(nullptr, slice2.data());\n  EXPECT_TRUE(fragment_releaser_called);\n}\n\nTEST(EnvoyQuicMemSliceTest, ConstructQuicMemSliceSpan) {\n  Envoy::Buffer::OwnedImpl buffer;\n  std::string str(1024, 'a');\n  buffer.add(str);\n  quic::QuicMemSlice slice{quic::QuicMemSliceImpl(buffer, str.length())};\n\n  QuicMemSliceSpan span(&slice);\n  EXPECT_EQ(1024u, span.total_length());\n  EXPECT_EQ(str, span.GetData(0));\n}\n\nTEST(EnvoyQuicMemSliceTest, QuicMemSliceStorage) {\n  std::string str(512, 'a');\n  iovec iov = {const_cast<char*>(str.data()), str.length()};\n  SimpleBufferAllocator allocator;\n  QuicMemSliceStorage storage(&iov, 1, &allocator, 1024);\n  // Test copy constructor.\n  QuicMemSliceStorage other = storage;\n  QuicMemSliceSpan span = storage.ToSpan();\n  EXPECT_EQ(1u, span.NumSlices());\n  EXPECT_EQ(str.length(), span.total_length());\n  EXPECT_EQ(str, span.GetData(0));\n  QuicMemSliceSpan span_other = other.ToSpan();\n  EXPECT_EQ(1u, span_other.NumSlices());\n  EXPECT_EQ(str, span_other.GetData(0));\n  EXPECT_NE(span_other.GetData(0).data(), span.GetData(0).data());\n}\n\n} // namespace\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h\"\n\n#include \"envoy/network/address.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n\nnamespace quic {\n\nint QuicPickServerPortForTestsOrDieImpl() {\n  std::vector<Envoy::Network::Address::IpVersion> supported_versions =\n      Envoy::TestEnvironment::getIpVersionsForTest();\n  ASSERT(!supported_versions.empty());\n  // Checking availability under corresponding supported version if test\n  // supports v4 only or v6 only.\n  // If it supports both v4 and v6, checking availability under v6 with IPV6_V6ONLY\n  // set to false is sufficient because such socket can be used on v4-mapped\n  // v6 address.\n  const Envoy::Network::Address::IpVersion ip_version =\n      supported_versions.size() == 1 ? supported_versions[0]\n                                     : Envoy::Network::Address::IpVersion::v6;\n  auto addr_port = Envoy::Network::Utility::parseInternetAddressAndPort(\n      fmt::format(\"{}:{}\", Envoy::Network::Test::getAnyAddressUrlString(ip_version), /*port*/ 0),\n      /*v6only*/ false);\n  ASSERT(addr_port != nullptr);\n  addr_port =\n      Envoy::Network::Test::findOrCheckFreePort(addr_port, Envoy::Network::Socket::Type::Datagram);\n  if (addr_port != nullptr && addr_port->ip() != nullptr) {\n    // Find a port.\n    return addr_port->ip()->port();\n  }\n  RELEASE_ASSERT(false, \"Failed to pick a port for test.\");\n}\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\nnamespace quic {\n\nint QuicPickServerPortForTestsOrDieImpl();\ninline void QuicRecyclePortImpl(int) {\n  // No-op with current port picking implementation.\n}\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_sleep_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"quiche/quic/core/quic_time.h\"\n\nnamespace quic {\n\ninline void QuicSleepImpl(QuicTime::Delta duration) {\n  absl::SleepFor(absl::Microseconds(duration.ToMicroseconds()));\n}\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_system_event_loop_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <string>\n\ninline void QuicRunSystemEventLoopIterationImpl() {\n  // No-op.\n}\n\nclass QuicSystemEventLoopImpl {\npublic:\n  // Only used by quic_client_bin.cc which is not required in Envoy.\n  QuicSystemEventLoopImpl(std::string /*context_name*/) {}\n};\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_test_impl.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/common/assert.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n// TODO(mpwarres): implement once QUICHE flag mechanism is defined.\nclass QuicFlagSaverImpl {};\n\n// No special setup needed for tests to use threads.\nclass ScopedEnvironmentForThreadsImpl {};\n\nusing QuicTestImpl = ::testing::Test;\n\ntemplate <class T> using QuicTestWithParamImpl = ::testing::TestWithParam<T>;\n\ninline std::string QuicGetTestMemoryCachePathImpl() {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // TODO(mpwarres): implement\n}\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_test_mem_slice_vector_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h\"\n\nnamespace quic {\nnamespace test {\n\nclass QuicTestMemSliceVectorImpl {\npublic:\n  explicit QuicTestMemSliceVectorImpl(std::vector<std::pair<char*, size_t>> buffers) {\n    for (auto it : buffers) {\n      auto fragment = new Envoy::Buffer::BufferFragmentImpl(\n          it.first, it.second,\n          [](const void*, size_t, const Envoy::Buffer::BufferFragmentImpl* fragment) {\n            delete fragment;\n          });\n      buffer_.addBufferFragment(*fragment);\n    }\n  }\n\n  QuicMemSliceSpanImpl span() { return QuicMemSliceSpanImpl(buffer_); }\n\nprivate:\n  Envoy::Buffer::OwnedImpl buffer_;\n};\n\n} // namespace test\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_test_output_impl.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"test/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h\"\n\n#include <cstdlib>\n\n#include \"test/test_common/file_system_for_test.h\"\n\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"fmt/printf.h\"\n#include \"gtest/gtest.h\"\n#include \"quiche/quic/platform/api/quic_logging.h\"\n\nnamespace quic {\nnamespace {\n\nvoid QuicRecordTestOutputToFile(const std::string& filename, quiche::QuicheStringPiece data) {\n  const char* output_dir_env = std::getenv(\"QUIC_TEST_OUTPUT_DIR\");\n  if (output_dir_env == nullptr) {\n    QUIC_LOG(WARNING) << \"Could not save test output since QUIC_TEST_OUTPUT_DIR is not set\";\n    return;\n  }\n\n  std::string output_dir = output_dir_env;\n  if (output_dir.empty()) {\n    QUIC_LOG(WARNING) << \"Could not save test output since QUIC_TEST_OUTPUT_DIR is empty\";\n    return;\n  }\n\n  if (output_dir.back() != '/') {\n    output_dir += '/';\n  }\n\n  Envoy::Filesystem::Instance& file_system = Envoy::Filesystem::fileSystemForTest();\n  if (!file_system.directoryExists(output_dir)) {\n    QUIC_LOG(ERROR) << \"Directory does not exist while writing test output: \" << output_dir;\n    return;\n  }\n\n  static constexpr Envoy::Filesystem::FlagSet DefaultFlags{\n      1 << Envoy::Filesystem::File::Operation::Read |\n      1 << Envoy::Filesystem::File::Operation::Write |\n      1 << Envoy::Filesystem::File::Operation::Create};\n\n  const std::string output_path = output_dir + filename;\n  Envoy::Filesystem::FilePtr file = file_system.createFile(output_path);\n  if (!file->open(DefaultFlags).rc_) {\n    QUIC_LOG(ERROR) << \"Failed to open test output file: \" << output_path;\n    return;\n  }\n\n  if (file->write(data).rc_ != static_cast<ssize_t>(data.size())) {\n    QUIC_LOG(ERROR) << \"Failed to write to test output file: \" << output_path;\n  } else {\n    QUIC_LOG(INFO) << \"Recorded test output into \" << output_path;\n  }\n\n  file->close();\n}\n} // namespace\n\nvoid QuicSaveTestOutputImpl(quiche::QuicheStringPiece filename, quiche::QuicheStringPiece data) {\n  QuicRecordTestOutputToFile(filename.data(), data);\n}\n\nbool QuicLoadTestOutputImpl(quiche::QuicheStringPiece filename, std::string* data) {\n  const char* read_dir_env = std::getenv(\"QUIC_TEST_OUTPUT_DIR\");\n  if (read_dir_env == nullptr) {\n    QUIC_LOG(WARNING) << \"Could not load test output since QUIC_TEST_OUTPUT_DIR is not set\";\n    return false;\n  }\n\n  std::string read_dir = read_dir_env;\n  if (read_dir.empty()) {\n    QUIC_LOG(WARNING) << \"Could not load test output since QUIC_TEST_OUTPUT_DIR is empty\";\n    return false;\n  }\n\n  if (read_dir.back() != '/') {\n    read_dir += '/';\n  }\n\n  const std::string read_path = read_dir + filename.data();\n\n  Envoy::Filesystem::Instance& file_system = Envoy::Filesystem::fileSystemForTest();\n  if (!file_system.fileExists(read_path)) {\n    QUIC_LOG(ERROR) << \"Test output file does not exist: \" << read_path;\n    return false;\n  }\n  *data = file_system.fileReadToEnd(read_path);\n  return true;\n}\n\nvoid QuicRecordTraceImpl(quiche::QuicheStringPiece identifier, quiche::QuicheStringPiece data) {\n  const testing::TestInfo* test_info = testing::UnitTest::GetInstance()->current_test_info();\n\n  std::string timestamp = absl::FormatTime(\"%Y%m%d%H%M%S\", absl::Now(), absl::LocalTimeZone());\n\n  std::string filename = fmt::sprintf(\"%s.%s.%s.%s.qtr\", test_info->name(),\n                                      test_info->test_case_name(), identifier.data(), timestamp);\n\n  QuicRecordTestOutputToFile(filename, data);\n}\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n\nnamespace quic {\n\nvoid QuicSaveTestOutputImpl(quiche::QuicheStringPiece filename, quiche::QuicheStringPiece data);\n\nbool QuicLoadTestOutputImpl(quiche::QuicheStringPiece filename, std::string* data);\n\nvoid QuicRecordTraceImpl(quiche::QuicheStringPiece identifier, quiche::QuicheStringPiece data);\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quic_thread_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include <string>\n\n#include \"envoy/thread/thread.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"test/test_common/thread_factory_for_test.h\"\n\n#include \"absl/synchronization/notification.h\"\n\nnamespace quic {\n\n// A class representing a thread of execution in QUIC.\nclass QuicThreadImpl {\npublic:\n  QuicThreadImpl(const std::string& /*name*/)\n      : thread_factory_(Envoy::Thread::threadFactoryForTest()) {}\n\n  QuicThreadImpl(const QuicThreadImpl&) = delete;\n  QuicThreadImpl& operator=(const QuicThreadImpl&) = delete;\n\n  virtual ~QuicThreadImpl() {\n    if (thread_ != nullptr) {\n      PANIC(\"QuicThread should be joined before destruction.\");\n    }\n  }\n\n  void Start() {\n    if (thread_ != nullptr || thread_is_set_.HasBeenNotified()) {\n      PANIC(\"QuicThread can only be started once.\");\n    }\n    thread_ = thread_factory_.createThread([this]() {\n      thread_is_set_.WaitForNotification();\n      this->Run();\n    });\n    thread_is_set_.Notify();\n  }\n\n  void Join() {\n    if (thread_ == nullptr) {\n      PANIC(\"QuicThread has not been started.\");\n    }\n    thread_->join();\n    thread_ = nullptr;\n  }\n\nprotected:\n  virtual void Run() {\n    // We don't want this function to be pure virtual, because it will be called if:\n    // 1. An object of a derived class calls Start(), which starts the child thread\n    // but has not called Run() yet.\n    // 2. The destructor of the derived class is called, but not the destructor\n    // of this base class.\n    // 3. The child thread calls QuicThreadImpl::Run()(this function), since the destructor of the\n    // derived class has been called.\n  }\n\nprivate:\n  Envoy::Thread::ThreadPtr thread_;\n  Envoy::Thread::ThreadFactory& thread_factory_;\n  absl::Notification thread_is_set_; // Whether |thread_| is set in parent.\n};\n\n} // namespace quic\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quiche_platform_test.cc",
    "content": "// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"gtest/gtest.h\"\n#include \"quiche/common/platform/api/quiche_arraysize.h\"\n#include \"quiche/common/platform/api/quiche_endian.h\"\n#include \"quiche/common/platform/api/quiche_optional.h\"\n#include \"quiche/common/platform/api/quiche_ptr_util.h\"\n#include \"quiche/common/platform/api/quiche_string_piece.h\"\n\nnamespace quiche {\n\nTEST(QuichePlatformTest, Arraysize) {\n  int array[] = {0, 1, 2, 3, 4};\n  EXPECT_EQ(5, QUICHE_ARRAYSIZE(array));\n}\n\nTEST(QuichePlatformTest, StringPiece) {\n  std::string s = \"bar\";\n  QuicheStringPiece sp(s);\n  EXPECT_EQ('b', sp[0]);\n}\n\nTEST(QuichePlatformTest, WrapUnique) {\n  auto p = QuicheWrapUnique(new int(6));\n  EXPECT_EQ(6, *p);\n}\n\nTEST(QuichePlatformTest, TestQuicheOptional) {\n  QuicheOptional<int32_t> maybe_a;\n  EXPECT_FALSE(maybe_a.has_value());\n  maybe_a = 1;\n  EXPECT_EQ(1, *maybe_a);\n}\n\n} // namespace quiche\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/quiche_test_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace quiche {\nnamespace test {\n\nusing QuicheTest = ::testing::Test;\n\ntemplate <class T> using QuicheTestWithParamImpl = ::testing::TestWithParam<T>;\n\n} // namespace test\n} // namespace quiche\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/spdy_platform_test.cc",
    "content": "#include <functional>\n#include <string>\n\n#include \"extensions/quic_listeners/quiche/platform/flags_impl.h\"\n\n#include \"test/test_common/logging.h\"\n\n#include \"gtest/gtest.h\"\n#include \"quiche/spdy/platform/api/spdy_bug_tracker.h\"\n#include \"quiche/spdy/platform/api/spdy_containers.h\"\n#include \"quiche/spdy/platform/api/spdy_endianness_util.h\"\n#include \"quiche/spdy/platform/api/spdy_estimate_memory_usage.h\"\n#include \"quiche/spdy/platform/api/spdy_flags.h\"\n#include \"quiche/spdy/platform/api/spdy_logging.h\"\n#include \"quiche/spdy/platform/api/spdy_test_helpers.h\"\n\n// Basic tests to validate functioning of the QUICHE spdy platform\n// implementation. For platform APIs in which the implementation is a simple\n// typedef/passthrough to a std:: or absl:: construct, the tests are kept\n// minimal, and serve primarily to verify the APIs compile and link without\n// issue.\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace QuicListeners {\nnamespace Quiche {\nnamespace {\n\nTEST(SpdyPlatformTest, SpdyBugTracker) {\n  EXPECT_DEBUG_DEATH(SPDY_BUG << \"Here is a bug,\", \" bug\");\n  EXPECT_DEBUG_DEATH(SPDY_BUG_IF(true) << \"There is a bug,\", \" bug\");\n  EXPECT_LOG_NOT_CONTAINS(\"error\", \"\", SPDY_BUG_IF(false) << \"A feature is not a bug.\");\n\n  EXPECT_EQ(true, FLAGS_spdy_always_log_bugs_for_tests);\n}\n\nTEST(SpdyPlatformTest, SpdyHashMap) {\n  spdy::SpdyHashMap<std::string, int> hmap;\n  hmap.insert({\"foo\", 2});\n  EXPECT_EQ(2, hmap[\"foo\"]);\n}\n\nTEST(SpdyPlatformTest, SpdyHashSet) {\n  spdy::SpdyHashSet<std::string, spdy::SpdyHash<std::string>, std::equal_to<std::string>> hset(\n      {\"foo\", \"bar\"});\n  EXPECT_EQ(1, hset.count(\"bar\"));\n  EXPECT_EQ(0, hset.count(\"qux\"));\n}\n\nTEST(SpdyPlatformTest, SpdyEndianness) {\n  EXPECT_EQ(0x1234, spdy::SpdyNetToHost16(spdy::SpdyHostToNet16(0x1234)));\n  EXPECT_EQ(0x12345678, spdy::SpdyNetToHost32(spdy::SpdyHostToNet32(0x12345678)));\n}\n\nTEST(SpdyPlatformTest, SpdyEstimateMemoryUsage) {\n  std::string s = \"foo\";\n  // Stubbed out to always return 0.\n  EXPECT_EQ(0, spdy::SpdyEstimateMemoryUsage(s));\n}\n\nTEST(SpdyPlatformTest, SpdyLog) {\n  // SPDY_LOG macros are defined to QUIC_LOG macros, which is tested in\n  // QuicPlatformTest. Here we just make sure SPDY_LOG macros compile.\n  SPDY_LOG(INFO) << \"INFO log may not show up by default.\";\n  SPDY_LOG(ERROR) << \"ERROR log should show up by default.\";\n\n  // VLOG is only emitted if INFO is enabled and verbosity level is high enough.\n  SPDY_VLOG(1) << \"VLOG(1)\";\n\n  SPDY_DLOG(INFO) << \"DLOG(INFO)\";\n  SPDY_DLOG(ERROR) << \"DLOG(ERROR)\";\n\n  SPDY_DLOG_IF(ERROR, true) << \"DLOG_IF(ERROR, true)\";\n  SPDY_DLOG_IF(ERROR, false) << \"DLOG_IF(ERROR, false)\";\n\n  SPDY_DVLOG(2) << \"DVLOG(2)\";\n\n  SPDY_DVLOG_IF(3, true) << \"DVLOG_IF(3, true)\";\n  SPDY_DVLOG_IF(4, false) << \"DVLOG_IF(4, false)\";\n}\n\nTEST(SpdyPlatformTest, SpdyString) {\n  std::string s = \"foo\";\n  EXPECT_EQ('o', s[1]);\n}\n\nTEST(SpdyPlatformTest, SpdyTestHelpers) {\n  auto bug = [](const char* error_message) { SPDY_BUG << error_message; };\n\n  EXPECT_SPDY_BUG(bug(\"bug one is expected\"), \"bug one\");\n  EXPECT_SPDY_BUG(bug(\"bug two is expected\"), \"bug two\");\n}\n\nTEST(SpdyPlatformTest, SpdyFlags) {\n  auto& flag_registry = quiche::FlagRegistry::GetInstance();\n  flag_registry.ResetFlags();\n  EXPECT_FALSE(GetSpdyReloadableFlag(spdy_testonly_default_false));\n  EXPECT_FALSE(GetSpdyRestartFlag(spdy_testonly_default_false));\n\n  flag_registry.FindFlag(\"spdy_reloadable_flag_spdy_testonly_default_false\")\n      ->SetValueFromString(\"true\");\n  EXPECT_TRUE(GetSpdyReloadableFlag(spdy_testonly_default_false));\n  EXPECT_FALSE(GetSpdyRestartFlag(spdy_testonly_default_false));\n\n  flag_registry.ResetFlags();\n  flag_registry.FindFlag(\"spdy_restart_flag_spdy_testonly_default_false\")\n      ->SetValueFromString(\"yes\");\n  EXPECT_FALSE(GetSpdyReloadableFlag(spdy_testonly_default_false));\n  EXPECT_TRUE(GetSpdyRestartFlag(spdy_testonly_default_false));\n}\n\n} // namespace\n} // namespace Quiche\n} // namespace QuicListeners\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/platform/spdy_test_helpers_impl.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n//\n// This file is part of the QUICHE platform implementation, and is not to be\n// consumed or referenced directly by other Envoy code. It serves purely as a\n// porting layer for QUICHE.\n\n#include \"test/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h\"\n\n#define EXPECT_SPDY_BUG_IMPL EXPECT_QUIC_BUG_IMPL\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc",
    "content": "#include <cstddef>\n#include <memory>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/quic_listeners/quiche/quic_io_handle_wrapper.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Quic {\n\nclass QuicIoHandleWrapperTest : public testing::Test {\npublic:\n  QuicIoHandleWrapperTest() : wrapper_(std::make_unique<QuicIoHandleWrapper>(socket_.ioHandle())) {\n    EXPECT_TRUE(wrapper_->isOpen());\n    EXPECT_FALSE(socket_.ioHandle().isOpen());\n  }\n  ~QuicIoHandleWrapperTest() override = default;\n\nprotected:\n  testing::NiceMock<Network::MockListenSocket> socket_;\n  std::unique_ptr<QuicIoHandleWrapper> wrapper_;\n  testing::StrictMock<Envoy::Api::MockOsSysCalls> os_sys_calls_;\n  TestThreadsafeSingletonInjector<Envoy::Api::OsSysCallsImpl> os_calls_{&os_sys_calls_};\n};\n\nTEST_F(QuicIoHandleWrapperTest, Close) {\n  EXPECT_TRUE(wrapper_->close().ok());\n  EXPECT_FALSE(wrapper_->isOpen());\n}\n\nTEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) {\n  // TODO(fcoras): seems we could do without the fd in the tests lower. Can we remove it?\n  os_fd_t fd = socket_.ioHandle().fdDoNotUse();\n  char data[5];\n  Buffer::RawSlice slice{data, 5};\n  EXPECT_CALL(os_sys_calls_, readv(fd, _, 1)).WillOnce(Return(Api::SysCallSizeResult{5u, 0}));\n  wrapper_->readv(5, &slice, 1);\n\n  EXPECT_CALL(os_sys_calls_, writev(fd, _, 1)).WillOnce(Return(Api::SysCallSizeResult{5u, 0}));\n  wrapper_->writev(&slice, 1);\n\n  EXPECT_CALL(os_sys_calls_, socket(AF_INET6, SOCK_STREAM, 0))\n      .WillRepeatedly(Return(Api::SysCallSocketResult{1, 0}));\n  EXPECT_CALL(os_sys_calls_, close(1)).WillRepeatedly(Return(Api::SysCallIntResult{0, 0}));\n\n  Network::Address::InstanceConstSharedPtr addr(new Network::Address::Ipv4Instance(12345));\n  EXPECT_CALL(os_sys_calls_, sendmsg(fd, _, 0)).WillOnce(Return(Api::SysCallSizeResult{5u, 0}));\n  wrapper_->sendmsg(&slice, 1, 0, /*self_ip=*/nullptr, *addr);\n\n  wrapper_->domain();\n\n  EXPECT_CALL(os_sys_calls_, getsockname(_, _, _))\n      .WillOnce(Invoke([](os_fd_t, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallIntResult {\n        addr->sa_family = AF_INET6;\n        *addrlen = sizeof(sockaddr_in6);\n        return Api::SysCallIntResult{0, 0};\n      }));\n  addr = wrapper_->localAddress();\n\n  EXPECT_CALL(os_sys_calls_, getpeername(_, _, _))\n      .WillOnce(Invoke([](os_fd_t, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallIntResult {\n        addr->sa_family = AF_INET6;\n        *addrlen = sizeof(sockaddr_in6);\n        return Api::SysCallIntResult{0, 0};\n      }));\n  addr = wrapper_->peerAddress();\n\n  Network::IoHandle::RecvMsgOutput output(1, nullptr);\n  EXPECT_CALL(os_sys_calls_, recvmsg(fd, _, 0)).WillOnce(Invoke([](os_fd_t, msghdr* msg, int) {\n    sockaddr_storage ss;\n    auto ipv6_addr = reinterpret_cast<sockaddr_in6*>(&ss);\n    memset(ipv6_addr, 0, sizeof(sockaddr_in6));\n    ipv6_addr->sin6_family = AF_INET6;\n    ipv6_addr->sin6_addr = in6addr_loopback;\n    ipv6_addr->sin6_port = htons(54321);\n    *reinterpret_cast<sockaddr_in6*>(msg->msg_name) = *ipv6_addr;\n    msg->msg_namelen = sizeof(sockaddr_in6);\n    msg->msg_controllen = 0;\n    return Api::SysCallSizeResult{5u, 0};\n  }));\n  wrapper_->recvmsg(&slice, 1, /*self_port=*/12345, output);\n\n  size_t num_packet_per_call = 1u;\n  Network::IoHandle::RecvMsgOutput output2(num_packet_per_call, nullptr);\n  RawSliceArrays slices(num_packet_per_call,\n                        absl::FixedArray<Buffer::RawSlice>({Buffer::RawSlice{data, 5}}));\n  EXPECT_CALL(os_sys_calls_, recvmmsg(fd, _, num_packet_per_call, _, nullptr))\n      .WillOnce(Invoke([](os_fd_t, struct mmsghdr*, unsigned int, int, struct timespec*) {\n        return Api::SysCallIntResult{1u, 0};\n      }));\n  wrapper_->recvmmsg(slices, /*self_port=*/12345, output2);\n\n  EXPECT_TRUE(wrapper_->close().ok());\n\n  // Following calls shouldn't be delegated.\n  wrapper_->readv(5, &slice, 1);\n  wrapper_->writev(&slice, 1);\n  wrapper_->sendmsg(&slice, 1, 0, /*self_ip=*/nullptr, *addr);\n  EXPECT_DEBUG_DEATH(wrapper_->recvmsg(&slice, 1, /*self_port=*/12345, output),\n                     \"recvmmsg is called after close\");\n  EXPECT_DEBUG_DEATH(wrapper_->recvmmsg(slices, /*self_port=*/12345, output2),\n                     \"recvmmsg is called after close\");\n\n  EXPECT_CALL(os_sys_calls_, supportsUdpGro());\n  wrapper_->supportsUdpGro();\n\n  EXPECT_CALL(os_sys_calls_, supportsMmsg());\n  wrapper_->supportsMmsg();\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/test_proof_source.h",
    "content": "#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#pragma GCC diagnostic ignored \"-Wtype-limits\"\n#endif\n\n#include \"quiche/quic/test_tools/test_certificates.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include <memory>\n\n#include \"test/mocks/network/mocks.h\"\n#include \"extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A test ProofSource which always provide a hard-coded test certificate in\n// QUICHE and a fake signature.\nclass TestProofSource : public EnvoyQuicProofSourceBase {\npublic:\n  quic::QuicReferenceCountedPointer<quic::ProofSource::Chain>\n  GetCertChain(const quic::QuicSocketAddress& /*server_address*/,\n               const quic::QuicSocketAddress& /*client_address*/,\n               const std::string& /*hostname*/) override {\n    return cert_chain_;\n  }\n\n  const Network::MockFilterChain& filterChain() const { return filter_chain_; }\n\nprotected:\n  void signPayload(const quic::QuicSocketAddress& /*server_address*/,\n                   const quic::QuicSocketAddress& /*client_address*/,\n                   const std::string& /*hostname*/, uint16_t /*signature_algorithm*/,\n                   quiche::QuicheStringPiece in,\n                   std::unique_ptr<quic::ProofSource::SignatureCallback> callback) override {\n    callback->Run(true, absl::StrCat(\"Fake signature for { \", in, \" }\"),\n                  std::make_unique<EnvoyQuicProofSourceDetails>(filter_chain_));\n  }\n\nprivate:\n  quic::QuicReferenceCountedPointer<quic::ProofSource::Chain> cert_chain_{\n      new quic::ProofSource::Chain(\n          std::vector<std::string>{std::string(quic::test::kTestCertificate)})};\n\n  Network::MockFilterChain filter_chain_;\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/test_proof_verifier.h",
    "content": "#include \"extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\n// A test quic::ProofVerifier which always approves the certs and signature.\nclass TestProofVerifier : public EnvoyQuicProofVerifierBase {\npublic:\n  //  quic::ProofVerifier\n  quic::QuicAsyncStatus\n  VerifyCertChain(const std::string& /*hostname*/, const uint16_t /*port*/,\n                  const std::vector<std::string>& /*certs*/, const std::string& /*ocsp_response*/,\n                  const std::string& /*cert_sct*/, const quic::ProofVerifyContext* /*context*/,\n                  std::string* /*error_details*/,\n                  std::unique_ptr<quic::ProofVerifyDetails>* /*details*/,\n                  std::unique_ptr<quic::ProofVerifierCallback> /*callback*/) override {\n    return quic::QUIC_SUCCESS;\n  }\n\nprotected:\n  // EnvoyQuicProofVerifierBase\n  bool verifySignature(const std::string& /*server_config*/, absl::string_view /*chlo_hash*/,\n                       const std::string& /*cert*/, const std::string& /*signature*/,\n                       std::string* /*error_details*/) override {\n    return true;\n  }\n};\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/quic_listeners/quiche/test_utils.h",
    "content": "#include \"extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n#pragma GCC diagnostic ignored \"-Winvalid-offsetof\"\n#endif\n\n#include \"quiche/quic/core/http/quic_spdy_session.h\"\n#include \"quiche/quic/core/http/quic_spdy_client_session.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n#include \"quiche/quic/test_tools/first_flight.h\"\n#include \"quiche/quic/core/quic_utils.h\"\n#include \"quiche/quic/test_tools/crypto_test_utils.h\"\n#include \"quiche/quic/test_tools/quic_config_peer.h\"\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n\n#include \"extensions/quic_listeners/quiche/envoy_quic_utils.h\"\n#include \"test/test_common/environment.h\"\n\nnamespace Envoy {\nnamespace Quic {\n\nclass MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterManagerConnectionImpl {\npublic:\n  MockEnvoyQuicSession(const quic::QuicConfig& config,\n                       const quic::ParsedQuicVersionVector& supported_versions,\n                       EnvoyQuicConnection* connection, Event::Dispatcher& dispatcher,\n                       uint32_t send_buffer_limit)\n      : quic::QuicSpdySession(connection, /*visitor=*/nullptr, config, supported_versions),\n        QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit) {\n    crypto_stream_ = std::make_unique<quic::test::MockQuicCryptoStream>(this);\n  }\n\n  // From QuicSession.\n  MOCK_METHOD(quic::QuicSpdyStream*, CreateIncomingStream, (quic::QuicStreamId id));\n  MOCK_METHOD(quic::QuicSpdyStream*, CreateIncomingStream, (quic::PendingStream * pending));\n  MOCK_METHOD(quic::QuicSpdyStream*, CreateOutgoingBidirectionalStream, ());\n  MOCK_METHOD(quic::QuicSpdyStream*, CreateOutgoingUnidirectionalStream, ());\n  MOCK_METHOD(bool, ShouldCreateIncomingStream, (quic::QuicStreamId id));\n  MOCK_METHOD(bool, ShouldCreateOutgoingBidirectionalStream, ());\n  MOCK_METHOD(bool, ShouldCreateOutgoingUnidirectionalStream, ());\n  MOCK_METHOD(quic::QuicConsumedData, WritevData,\n              (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset,\n               quic::StreamSendingState state, quic::TransmissionType type,\n               quiche::QuicheOptional<quic::EncryptionLevel> level));\n  MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id));\n\n  absl::string_view requestedServerName() const override {\n    return {GetCryptoStream()->crypto_negotiated_params().sni};\n  }\n\n  quic::QuicCryptoStream* GetMutableCryptoStream() override { return crypto_stream_.get(); }\n\n  const quic::QuicCryptoStream* GetCryptoStream() const override { return crypto_stream_.get(); }\n\n  using quic::QuicSpdySession::ActivateStream;\n\nprotected:\n  bool hasDataToWrite() override { return HasDataToWrite(); }\n\nprivate:\n  std::unique_ptr<quic::QuicCryptoStream> crypto_stream_;\n};\n\nclass MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession,\n                                   public QuicFilterManagerConnectionImpl {\npublic:\n  MockEnvoyQuicClientSession(const quic::QuicConfig& config,\n                             const quic::ParsedQuicVersionVector& supported_versions,\n                             EnvoyQuicConnection* connection, Event::Dispatcher& dispatcher,\n                             uint32_t send_buffer_limit)\n      : quic::QuicSpdyClientSession(config, supported_versions, connection,\n                                    quic::QuicServerId(\"example.com\", 443, false), &crypto_config_,\n                                    nullptr),\n        QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit),\n        crypto_config_(quic::test::crypto_test_utils::ProofVerifierForTesting()) {}\n\n  // From QuicSession.\n  MOCK_METHOD(quic::QuicSpdyClientStream*, CreateIncomingStream, (quic::QuicStreamId id));\n  MOCK_METHOD(quic::QuicSpdyClientStream*, CreateIncomingStream, (quic::PendingStream * pending));\n  MOCK_METHOD(quic::QuicSpdyClientStream*, CreateOutgoingBidirectionalStream, ());\n  MOCK_METHOD(quic::QuicSpdyClientStream*, CreateOutgoingUnidirectionalStream, ());\n  MOCK_METHOD(bool, ShouldCreateIncomingStream, (quic::QuicStreamId id));\n  MOCK_METHOD(bool, ShouldCreateOutgoingBidirectionalStream, ());\n  MOCK_METHOD(bool, ShouldCreateOutgoingUnidirectionalStream, ());\n  MOCK_METHOD(quic::QuicConsumedData, WritevData,\n              (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset,\n               quic::StreamSendingState state, quic::TransmissionType type,\n               quiche::QuicheOptional<quic::EncryptionLevel> level));\n  MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id));\n\n  absl::string_view requestedServerName() const override {\n    return {GetCryptoStream()->crypto_negotiated_params().sni};\n  }\n\n  using quic::QuicSpdySession::ActivateStream;\n\nprotected:\n  bool hasDataToWrite() override { return HasDataToWrite(); }\n\nprivate:\n  quic::QuicCryptoClientConfig crypto_config_;\n};\n\nBuffer::OwnedImpl\ngenerateChloPacketToSend(quic::ParsedQuicVersion quic_version, quic::QuicConfig& quic_config,\n                         quic::QuicCryptoServerConfig& crypto_config,\n                         quic::QuicConnectionId connection_id, quic::QuicClock& clock,\n                         const quic::QuicSocketAddress& server_address,\n                         const quic::QuicSocketAddress& client_address, std::string sni) {\n  if (quic_version.UsesTls()) {\n    std::unique_ptr<quic::QuicReceivedPacket> packet =\n        std::move(quic::test::GetFirstFlightOfPackets(quic_version, quic_config, connection_id)[0]);\n    return Buffer::OwnedImpl(packet->data(), packet->length());\n  }\n  quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO(\n      &clock, quic_version.transport_version, &crypto_config);\n  chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ});\n  chlo.SetStringPiece(quic::kSNI, sni);\n  quic::CryptoHandshakeMessage full_chlo;\n  quic::QuicReferenceCountedPointer<quic::QuicSignedServerConfig> signed_config(\n      new quic::QuicSignedServerConfig);\n  quic::QuicCompressedCertsCache cache(\n      quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize);\n  quic::test::crypto_test_utils::GenerateFullCHLO(chlo, &crypto_config, server_address,\n                                                  client_address, quic_version.transport_version,\n                                                  &clock, signed_config, &cache, &full_chlo);\n  // Overwrite version label to the version passed in.\n  full_chlo.SetVersion(quic::kVER, quic_version);\n  quic::QuicConfig quic_config_tmp;\n  quic_config_tmp.ToHandshakeMessage(&full_chlo, quic_version.transport_version);\n\n  std::string packet_content(full_chlo.GetSerialized().AsStringPiece());\n  quic::ParsedQuicVersionVector supported_versions{quic_version};\n  auto encrypted_packet =\n      std::unique_ptr<quic::QuicEncryptedPacket>(quic::test::ConstructEncryptedPacket(\n          connection_id, quic::EmptyQuicConnectionId(),\n          /*version_flag=*/true, /*reset_flag*/ false,\n          /*packet_number=*/1, packet_content, quic::CONNECTION_ID_PRESENT,\n          quic::CONNECTION_ID_ABSENT, quic::PACKET_4BYTE_PACKET_NUMBER, &supported_versions));\n\n  return Buffer::OwnedImpl(encrypted_packet->data(), encrypted_packet->length());\n}\n\nvoid setQuicConfigWithDefaultValues(quic::QuicConfig* config) {\n  quic::test::QuicConfigPeer::SetReceivedMaxBidirectionalStreams(\n      config, quic::kDefaultMaxStreamsPerConnection);\n  quic::test::QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(\n      config, quic::kDefaultMaxStreamsPerConnection);\n  quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional(\n      config, quic::kMinimumFlowControlSendWindow);\n  quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional(\n      config, quic::kMinimumFlowControlSendWindow);\n  quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional(\n      config, quic::kMinimumFlowControlSendWindow);\n  quic::test::QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(\n      config, quic::kMinimumFlowControlSendWindow);\n}\n\nenum class QuicVersionType {\n  GquicQuicCrypto,\n  GquicTls,\n  Iquic,\n};\n\n// A test suite with variation of ip version and a knob to turn on/off IETF QUIC implementation.\nclass QuicMultiVersionTest\n    : public testing::TestWithParam<std::pair<Network::Address::IpVersion, QuicVersionType>> {};\n\nstd::vector<std::pair<Network::Address::IpVersion, QuicVersionType>> generateTestParam() {\n  std::vector<std::pair<Network::Address::IpVersion, QuicVersionType>> param;\n  for (auto ip_version : TestEnvironment::getIpVersionsForTest()) {\n    param.emplace_back(ip_version, QuicVersionType::GquicQuicCrypto);\n    param.emplace_back(ip_version, QuicVersionType::GquicTls);\n    param.emplace_back(ip_version, QuicVersionType::Iquic);\n  }\n\n  return param;\n}\n\nstd::string testParamsToString(\n    const ::testing::TestParamInfo<std::pair<Network::Address::IpVersion, QuicVersionType>>&\n        params) {\n  std::string ip_version = params.param.first == Network::Address::IpVersion::v4 ? \"IPv4\" : \"IPv6\";\n  switch (params.param.second) {\n  case QuicVersionType::GquicQuicCrypto:\n    return absl::StrCat(ip_version, \"_UseGQuicWithQuicCrypto\");\n  case QuicVersionType::GquicTls:\n    return absl::StrCat(ip_version, \"_UseGQuicWithTLS\");\n  case QuicVersionType::Iquic:\n    return absl::StrCat(ip_version, \"_UseHttp3\");\n  }\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\n} // namespace Quic\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/resource_monitors/fixed_heap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"fixed_heap_monitor_test\",\n    srcs = [\"fixed_heap_monitor_test.cc\"],\n    extension_name = \"envoy.resource_monitors.fixed_heap\",\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/extensions/resource_monitors/fixed_heap:fixed_heap_monitor\",\n        \"@envoy_api//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.resource_monitors.fixed_heap\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/resource_monitors/fixed_heap:config\",\n        \"//source/server:resource_monitor_config_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"@envoy_api//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/resource_monitors/fixed_heap/config_test.cc",
    "content": "#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.h\"\n#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"server/resource_monitor_config_impl.h\"\n\n#include \"extensions/resource_monitors/fixed_heap/config.h\"\n\n#include \"test/mocks/event/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace FixedHeapMonitor {\nnamespace {\n\nTEST(FixedHeapMonitorFactoryTest, CreateMonitor) {\n  auto factory =\n      Registry::FactoryRegistry<Server::Configuration::ResourceMonitorFactory>::getFactory(\n          \"envoy.resource_monitors.fixed_heap\");\n  EXPECT_NE(factory, nullptr);\n\n  envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig config;\n  config.set_max_heap_size_bytes(std::numeric_limits<uint64_t>::max());\n  Event::MockDispatcher dispatcher;\n  Api::ApiPtr api = Api::createApiForTest();\n  Server::Configuration::ResourceMonitorFactoryContextImpl context(\n      dispatcher, *api, ProtobufMessage::getStrictValidationVisitor());\n  auto monitor = factory->createResourceMonitor(config, context);\n  EXPECT_NE(monitor, nullptr);\n}\n\n} // namespace\n} // namespace FixedHeapMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc",
    "content": "#include \"envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.pb.h\"\n\n#include \"extensions/resource_monitors/fixed_heap/fixed_heap_monitor.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace FixedHeapMonitor {\nnamespace {\n\nclass MockMemoryStatsReader : public MemoryStatsReader {\npublic:\n  MockMemoryStatsReader() = default;\n\n  MOCK_METHOD(uint64_t, reservedHeapBytes, ());\n  MOCK_METHOD(uint64_t, unmappedHeapBytes, ());\n};\n\nclass ResourcePressure : public Server::ResourceMonitor::Callbacks {\npublic:\n  void onSuccess(const Server::ResourceUsage& usage) override {\n    pressure_ = usage.resource_pressure_;\n  }\n\n  void onFailure(const EnvoyException& error) override { error_ = error; }\n\n  bool hasPressure() const { return pressure_.has_value(); }\n  bool hasError() const { return error_.has_value(); }\n\n  double pressure() const { return *pressure_; }\n\nprivate:\n  absl::optional<double> pressure_;\n  absl::optional<EnvoyException> error_;\n};\n\nTEST(FixedHeapMonitorTest, ComputesCorrectUsage) {\n  envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig config;\n  config.set_max_heap_size_bytes(1000);\n  auto stats_reader = std::make_unique<MockMemoryStatsReader>();\n  EXPECT_CALL(*stats_reader, reservedHeapBytes()).WillOnce(testing::Return(800));\n  EXPECT_CALL(*stats_reader, unmappedHeapBytes()).WillOnce(testing::Return(100));\n  std::unique_ptr<FixedHeapMonitor> monitor(new FixedHeapMonitor(config, std::move(stats_reader)));\n\n  ResourcePressure resource;\n  monitor->updateResourceUsage(resource);\n  EXPECT_TRUE(resource.hasPressure());\n  EXPECT_FALSE(resource.hasError());\n  EXPECT_EQ(resource.pressure(), 0.7);\n}\n\nTEST(FixedHeapMonitorTest, ComputeUsageWithRealMemoryStats) {\n  envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig config;\n  uint64_t max_heap = 1024 * 1024 * 1024;\n  config.set_max_heap_size_bytes(max_heap);\n  auto stats_reader = std::make_unique<MemoryStatsReader>();\n  const double expected_usage =\n      (stats_reader->reservedHeapBytes() - stats_reader->unmappedHeapBytes()) /\n      static_cast<double>(max_heap);\n  std::unique_ptr<FixedHeapMonitor> monitor(new FixedHeapMonitor(config, std::move(stats_reader)));\n\n  ResourcePressure resource;\n  monitor->updateResourceUsage(resource);\n  EXPECT_NEAR(resource.pressure(), expected_usage, 0.0005);\n}\n\n} // namespace\n} // namespace FixedHeapMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/resource_monitors/injected_resource/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"injected_resource_monitor_test\",\n    srcs = [\"injected_resource_monitor_test.cc\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/resource_monitors/injected_resource:injected_resource_monitor\",\n        \"//source/server:resource_monitor_config_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/resource_monitor/injected_resource/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.resource_monitors.injected_resource\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/resource_monitors/injected_resource:config\",\n        \"//source/server:resource_monitor_config_lib\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/resource_monitor/injected_resource/v2alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/resource_monitors/injected_resource/config_test.cc",
    "content": "#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.h\"\n#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n\n#include \"server/resource_monitor_config_impl.h\"\n\n#include \"extensions/resource_monitors/injected_resource/config.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace InjectedResourceMonitor {\nnamespace {\n\nTEST(InjectedResourceMonitorFactoryTest, CreateMonitor) {\n  auto factory =\n      Registry::FactoryRegistry<Server::Configuration::ResourceMonitorFactory>::getFactory(\n          \"envoy.resource_monitors.injected_resource\");\n  ASSERT_NE(factory, nullptr);\n\n  envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig config;\n  config.set_filename(TestEnvironment::temporaryPath(\"injected_resource\"));\n  Api::ApiPtr api = Api::createApiForTest();\n  Event::DispatcherPtr dispatcher(api->allocateDispatcher(\"test_thread\"));\n  Server::Configuration::ResourceMonitorFactoryContextImpl context(\n      *dispatcher, *api, ProtobufMessage::getStrictValidationVisitor());\n  Server::ResourceMonitorPtr monitor = factory->createResourceMonitor(config, context);\n  EXPECT_NE(monitor, nullptr);\n}\n\n} // namespace\n} // namespace InjectedResourceMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc",
    "content": "#include \"envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.pb.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"server/resource_monitor_config_impl.h\"\n\n#include \"extensions/resource_monitors/injected_resource/injected_resource_monitor.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace ResourceMonitors {\nnamespace InjectedResourceMonitor {\nnamespace {\n\nclass TestableInjectedResourceMonitor : public InjectedResourceMonitor {\npublic:\n  TestableInjectedResourceMonitor(\n      const envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig&\n          config,\n      Server::Configuration::ResourceMonitorFactoryContext& context)\n      : InjectedResourceMonitor(config, context), dispatcher_(context.dispatcher()) {}\n\nprotected:\n  void onFileChanged() override {\n    InjectedResourceMonitor::onFileChanged();\n    dispatcher_.exit();\n  }\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n};\n\nclass MockedCallbacks : public Server::ResourceMonitor::Callbacks {\npublic:\n  MOCK_METHOD(void, onSuccess, (const Server::ResourceUsage&));\n  MOCK_METHOD(void, onFailure, (const EnvoyException&));\n};\n\nclass InjectedResourceMonitorTest : public testing::Test {\nprotected:\n  InjectedResourceMonitorTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test_thread\")),\n        resource_filename_(TestEnvironment::temporaryPath(\"injected_resource\")),\n        file_updater_(resource_filename_), monitor_(createMonitor()) {}\n\n  void updateResource(const std::string& contents) {\n    file_updater_.update(contents);\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n    monitor_->updateResourceUsage(cb_);\n  }\n\n  void updateResource(double pressure) { updateResource(absl::StrCat(pressure)); }\n\n  std::unique_ptr<InjectedResourceMonitor> createMonitor() {\n    envoy::config::resource_monitor::injected_resource::v2alpha::InjectedResourceConfig config;\n    config.set_filename(resource_filename_);\n    Server::Configuration::ResourceMonitorFactoryContextImpl context(\n        *dispatcher_, *api_, ProtobufMessage::getStrictValidationVisitor());\n    return std::make_unique<TestableInjectedResourceMonitor>(config, context);\n  }\n\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  const std::string resource_filename_;\n  AtomicFileUpdater file_updater_;\n  MockedCallbacks cb_;\n  std::unique_ptr<InjectedResourceMonitor> monitor_;\n};\n\nTEST_F(InjectedResourceMonitorTest, ReportsCorrectPressure) {\n  EXPECT_CALL(cb_, onSuccess(Server::ResourceUsage{0.6}));\n  updateResource(0.6);\n\n  EXPECT_CALL(cb_, onSuccess(Server::ResourceUsage{0.7}));\n  updateResource(0.7);\n}\n\nMATCHER_P(ExceptionContains, rhs, \"\") { return absl::StrContains(arg.what(), rhs); }\n\nTEST_F(InjectedResourceMonitorTest, ReportsParseError) {\n  EXPECT_CALL(cb_, onFailure(ExceptionContains(\"failed to parse injected resource pressure\")));\n  updateResource(\"bad content\");\n}\n\nTEST_F(InjectedResourceMonitorTest, ReportsErrorForOutOfRangePressure) {\n  EXPECT_CALL(cb_, onFailure(ExceptionContains(\"pressure out of range\")));\n  updateResource(-1);\n\n  EXPECT_CALL(cb_, onFailure(ExceptionContains(\"pressure out of range\")));\n  updateResource(2);\n}\n\nTEST_F(InjectedResourceMonitorTest, ReportsErrorOnFileRead) {\n  EXPECT_CALL(cb_, onFailure(ExceptionContains(\"Invalid path\")));\n  monitor_->updateResourceUsage(cb_);\n}\n\n} // namespace\n} // namespace InjectedResourceMonitor\n} // namespace ResourceMonitors\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/retry/host/omit_canary_hosts/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.retry_host_predicates.omit_canary_hosts\",\n    deps = [\n        \"//source/extensions/retry/host/omit_canary_hosts:config\",\n        \"//test/mocks/upstream:host_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/retry/host/omit_canary_hosts/config_test.cc",
    "content": "#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"extensions/retry/host/omit_canary_hosts/config.h\"\n\n#include \"test/mocks/upstream/host.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing namespace testing;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\nnamespace {\n\nTEST(OmitCanaryHostsRetryPredicateTest, PredicateTest) {\n  auto factory = Registry::FactoryRegistry<Upstream::RetryHostPredicateFactory>::getFactory(\n      \"envoy.retry_host_predicates.omit_canary_hosts\");\n\n  ASSERT_NE(nullptr, factory);\n\n  ProtobufWkt::Struct config;\n  auto predicate = factory->createHostPredicate(config, 3);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n\n  ON_CALL(*host1, canary()).WillByDefault(Return(false));\n  ON_CALL(*host2, canary()).WillByDefault(Return(true));\n\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host1));\n  ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host2));\n  predicate->onHostAttempted(host1);\n}\n\nTEST(OmitCanaryHostsRetryPredicateTest, EmptyConfig) {\n  auto factory = Registry::FactoryRegistry<Upstream::RetryHostPredicateFactory>::getFactory(\n      \"envoy.retry_host_predicates.omit_canary_hosts\");\n\n  ASSERT_NE(nullptr, factory);\n\n  ProtobufTypes::MessagePtr config = factory->createEmptyConfigProto();\n  EXPECT_TRUE(dynamic_cast<envoy::config::retry::omit_canary_hosts::v2::OmitCanaryHostsPredicate*>(\n      config.get()));\n}\n\n} // namespace\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/retry/host/omit_host_metadata/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.retry_host_predicates.omit_host_metadata\",\n    deps = [\n        \"//source/extensions/retry/host/omit_host_metadata:config\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"@envoy_api//envoy/extensions/retry/host/omit_host_metadata/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/retry/host/omit_host_metadata/config_test.cc",
    "content": "#include \"envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"extensions/retry/host/omit_host_metadata/omit_host_metadata.h\"\n\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing namespace testing;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\nnamespace {\n\nTEST(OmitHostsRetryPredicateTest, PredicateTest) {\n  auto factory = Registry::FactoryRegistry<Upstream::RetryHostPredicateFactory>::getFactory(\n      \"envoy.retry_host_predicates.omit_host_metadata\");\n\n  ASSERT_NE(nullptr, factory);\n\n  envoy::extensions::retry::host::omit_host_metadata::v3::OmitHostMetadataConfig config;\n  auto empty = factory->createEmptyConfigProto();\n  empty->MergeFrom(config);\n  auto predicate = factory->createHostPredicate(*empty, 3);\n\n  auto host = std::make_shared<NiceMock<Upstream::MockHost>>();\n\n  // Test: if no metadata match criteria defined, the host should not be rejected.\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host));\n\n  auto* metadata_match = config.mutable_metadata_match();\n  Envoy::Config::Metadata::mutableMetadataValue(\n      *metadata_match, Envoy::Config::MetadataFilters::get().ENVOY_LB, \"key\")\n      .set_string_value(\"value\");\n  empty->MergeFrom(config);\n  predicate = factory->createHostPredicate(*empty, 3);\n\n  // Test: if host doesn't have metadata, it should not be rejected.\n  ON_CALL(*host, metadata())\n      .WillByDefault(Return(std::make_shared<envoy::config::core::v3::Metadata>()));\n\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host));\n\n  // Test: if host has matching metadata, it should be rejected.\n  ON_CALL(*host, metadata())\n      .WillByDefault(Return(std::make_shared<envoy::config::core::v3::Metadata>(\n          TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n              R\"EOF(\n          filter_metadata:\n            envoy.lb:\n              key: \"value\"\n        )EOF\"))));\n\n  ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host));\n\n  // Test: if host doesn't have matching metadata, it should not be rejected.\n  ON_CALL(*host, metadata())\n      .WillByDefault(Return(std::make_shared<envoy::config::core::v3::Metadata>(\n          TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n              R\"EOF(\n          filter_metadata:\n            envoy.lb:\n              key1: \"value1\"\n        )EOF\"))));\n\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host));\n\n  // Test: if host metadata has matching key but not the value, it should not be rejected.\n  ON_CALL(*host, metadata())\n      .WillByDefault(Return(std::make_shared<envoy::config::core::v3::Metadata>(\n          TestUtility::parseYaml<envoy::config::core::v3::Metadata>(\n              R\"EOF(\n          filter_metadata:\n            envoy.lb:\n              key: \"value1\"\n        )EOF\"))));\n\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host));\n\n  predicate->onHostAttempted(host);\n}\n} // namespace\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/retry/host/previous_hosts/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.retry_host_predicates.previous_hosts\",\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/extensions/retry/host/previous_hosts:config\",\n        \"//test/mocks/upstream:host_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/retry/host/previous_hosts/config_test.cc",
    "content": "#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"common/network/address_impl.h\"\n\n#include \"extensions/retry/host/previous_hosts/config.h\"\n\n#include \"test/mocks/upstream/host.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing namespace testing;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Host {\nnamespace {\n\nTEST(PreviousHostsRetryPredicateConfigTest, PredicateTest) {\n  auto factory = Registry::FactoryRegistry<Upstream::RetryHostPredicateFactory>::getFactory(\n      \"envoy.retry_host_predicates.previous_hosts\");\n\n  ASSERT_NE(nullptr, factory);\n\n  ProtobufWkt::Struct config;\n  auto predicate = factory->createHostPredicate(config, 3);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  auto host1_address = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 123);\n  ON_CALL(*host1, address()).WillByDefault(Return(host1_address));\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  auto host2_address = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 456);\n  ON_CALL(*host2, address()).WillByDefault(Return(host2_address));\n\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host1));\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host2));\n\n  predicate->onHostAttempted(host1);\n\n  ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host1));\n  ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host2));\n\n  predicate->onHostAttempted(host2);\n\n  ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host1));\n  ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host2));\n}\n\nTEST(PreviousHostsRetryPredicateConfigTest, EmptyConfig) {\n  auto factory = Registry::FactoryRegistry<Upstream::RetryHostPredicateFactory>::getFactory(\n      \"envoy.retry_host_predicates.previous_hosts\");\n\n  ASSERT_NE(nullptr, factory);\n\n  ProtobufTypes::MessagePtr config = factory->createEmptyConfigProto();\n  EXPECT_TRUE(dynamic_cast<envoy::config::retry::previous_hosts::v2::PreviousHostsPredicate*>(\n      config.get()));\n}\n\n} // namespace\n} // namespace Host\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/retry/priority/previous_priorities/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.retry_priorities.previous_priorities\",\n    deps = [\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/extensions/retry/priority:well_known_names\",\n        \"//source/extensions/retry/priority/previous_priorities:config\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/mocks/upstream:host_set_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n        \"@envoy_api//envoy/config/retry/previous_priorities:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/retry/priority/previous_priorities/config_test.cc",
    "content": "#include \"envoy/config/retry/previous_priorities/previous_priorities_config.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/upstream/retry.h\"\n\n#include \"common/protobuf/message_validator_impl.h\"\n\n#include \"extensions/retry/priority/previous_priorities/config.h\"\n#include \"extensions/retry/priority/well_known_names.h\"\n\n#include \"test/mocks/upstream/host.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing namespace testing;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Retry {\nnamespace Priority {\nnamespace {\n\nclass RetryPriorityTest : public testing::Test {\npublic:\n  void initialize(const Upstream::HealthyLoad& original_healthy_priority_load,\n                  const Upstream::DegradedLoad& original_degraded_priority_load) {\n    auto factory = Registry::FactoryRegistry<Upstream::RetryPriorityFactory>::getFactory(\n        RetryPriorityValues::get().PreviousPrioritiesRetryPriority);\n\n    envoy::config::retry::previous_priorities::PreviousPrioritiesConfig config;\n    config.set_update_frequency(update_frequency_);\n    // Use createEmptyConfigProto to exercise that code path. This ensures the proto returned\n    // by that method is compatible with the downcast in createRetryPriority.\n    auto empty = factory->createEmptyConfigProto();\n    empty->MergeFrom(config);\n    retry_priority_ =\n        factory->createRetryPriority(*empty, ProtobufMessage::getStrictValidationVisitor(), 3);\n    original_priority_load_ = Upstream::HealthyAndDegradedLoad{original_healthy_priority_load,\n                                                               original_degraded_priority_load};\n  }\n\n  void addHosts(size_t priority, int count, int healthy_count, int degraded_count = 0) {\n    auto host_set = priority_set_.getMockHostSet(priority);\n\n    ASSERT(count >= healthy_count + degraded_count);\n\n    host_set->hosts_.resize(count);\n    host_set->healthy_hosts_.resize(healthy_count);\n    host_set->degraded_hosts_.resize(degraded_count);\n    host_set->runCallbacks({}, {});\n  }\n\n  void verifyPriorityLoads(const Upstream::HealthyLoad& expected_healthy_priority_load,\n                           const Upstream::DegradedLoad& expected_degraded_priority_load,\n                           absl::optional<Upstream::RetryPriority::PriorityMappingFunc>\n                               priority_mapping_func = absl::nullopt) {\n    const auto& priority_loads = retry_priority_->determinePriorityLoad(\n        priority_set_, original_priority_load_,\n        priority_mapping_func.value_or(Upstream::RetryPriority::defaultPriorityMapping));\n    // Unwrapping gives a nicer gtest error.\n    ASSERT_EQ(priority_loads.healthy_priority_load_.get(), expected_healthy_priority_load.get());\n    ASSERT_EQ(priority_loads.degraded_priority_load_.get(), expected_degraded_priority_load.get());\n  }\n\n  std::vector<Upstream::MockHostSet> host_sets_;\n  uint32_t update_frequency_{1};\n  NiceMock<Upstream::MockPrioritySet> priority_set_;\n  Upstream::RetryPrioritySharedPtr retry_priority_;\n  Upstream::HealthyAndDegradedLoad original_priority_load_;\n};\n\nTEST_F(RetryPriorityTest, DefaultFrequency) {\n  const Upstream::HealthyLoad original_priority_load({100, 0});\n  const Upstream::DegradedLoad original_degraded_priority_load({0, 0});\n\n  initialize(original_priority_load, original_degraded_priority_load);\n  addHosts(0, 2, 2);\n  addHosts(1, 2, 2);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host1, priority()).WillByDefault(Return(0));\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host2, priority()).WillByDefault(Return(1));\n\n  // Before any hosts attempted, load should be unchanged.\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n\n  const Upstream::HealthyLoad expected_priority_load({0, 100});\n  const Upstream::DegradedLoad expected_degraded_priority_load({0, 0});\n\n  // After attempting a host in P0, P1 should receive all the load.\n  retry_priority_->onHostAttempted(host1);\n  verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n\n  // After we've tried host2, we've attempted all priorities and should reset back to the original\n  // priority load.\n  retry_priority_->onHostAttempted(host2);\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n}\n\nTEST_F(RetryPriorityTest, PriorityMappingCallback) {\n  const Upstream::HealthyLoad original_priority_load({100, 0});\n  const Upstream::DegradedLoad original_degraded_priority_load({0, 0});\n\n  initialize(original_priority_load, original_degraded_priority_load);\n  addHosts(0, 2, 2);\n  addHosts(1, 2, 2);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  EXPECT_CALL(*host1, priority()).Times(0);\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  EXPECT_CALL(*host2, priority()).Times(0);\n\n  Upstream::RetryPriority::PriorityMappingFunc priority_mapping_func =\n      [&](const Upstream::HostDescription& host) -> absl::optional<uint32_t> {\n    if (&host == host1.get()) {\n      return 0;\n    }\n    ASSERT(&host == host2.get());\n    return 1;\n  };\n\n  const Upstream::HealthyLoad expected_priority_load({0, 100});\n  const Upstream::DegradedLoad expected_degraded_priority_load({0, 0});\n\n  // After attempting a host in P0, P1 should receive all the load.\n  retry_priority_->onHostAttempted(host1);\n  verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load,\n                      priority_mapping_func);\n\n  // With a mapping function that doesn't recognize host2, results will remain the same as after\n  // only trying host1.\n  retry_priority_->onHostAttempted(host2);\n  Upstream::RetryPriority::PriorityMappingFunc priority_mapping_func_no_host2 =\n      [&](const Upstream::HostDescription& host) -> absl::optional<uint32_t> {\n    if (&host == host1.get()) {\n      return 0;\n    }\n    ASSERT(&host == host2.get());\n    return absl::nullopt;\n  };\n  verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load,\n                      priority_mapping_func_no_host2);\n\n  // After we've tried host2, we've attempted all priorities and should reset back to the original\n  // priority load.\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load,\n                      priority_mapping_func);\n}\n\n// Tests that we handle all hosts being unhealthy in the original priority set.\nTEST_F(RetryPriorityTest, NoHealthyUpstreams) {\n  const Upstream::HealthyLoad original_priority_load({0, 0, 0});\n  const Upstream::DegradedLoad original_degraded_priority_load({0, 0});\n\n  initialize(original_priority_load, original_degraded_priority_load);\n  addHosts(0, 10, 0);\n  addHosts(1, 10, 0);\n  addHosts(2, 10, 0);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host1, priority()).WillByDefault(Return(0));\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host2, priority()).WillByDefault(Return(1));\n\n  auto host3 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host3, priority()).WillByDefault(Return(2));\n\n  // Before any hosts attempted, load should be unchanged.\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n\n  {\n    // After attempting a host in P0, load should remain unchanged.\n    const Upstream::HealthyLoad expected_priority_load({0, 0, 0});\n    const Upstream::DegradedLoad expected_degraded_priority_load({0, 0, 0});\n\n    retry_priority_->onHostAttempted(host1);\n    verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n  }\n}\n\n// Tests that spillover happens as we ignore attempted priorities.\nTEST_F(RetryPriorityTest, DefaultFrequencyUnhealthyPriorities) {\n  const Upstream::HealthyLoad original_priority_load({42, 28, 30});\n  const Upstream::DegradedLoad original_degraded_priority_load({0, 0, 0});\n\n  initialize(original_priority_load, original_degraded_priority_load);\n  addHosts(0, 10, 3);\n  addHosts(1, 10, 2);\n  addHosts(2, 10, 10);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host1, priority()).WillByDefault(Return(0));\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host2, priority()).WillByDefault(Return(1));\n\n  auto host3 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host3, priority()).WillByDefault(Return(2));\n\n  // Before any hosts attempted, load should be unchanged.\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n\n  {\n    // After attempting a host in P0, load should be split between P1 and P2 since P1 is degraded.\n    const Upstream::HealthyLoad expected_priority_load({0, 28, 72});\n    const Upstream::DegradedLoad expected_degraded_priority_load({0, 0, 0});\n    retry_priority_->onHostAttempted(host1);\n    verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n  }\n\n  // After we've tried host2, everything should go to P2.\n  const Upstream::HealthyLoad expected_priority_load({0, 0, 100});\n  const Upstream::DegradedLoad expected_degraded_priority_load({0, 0, 0});\n  retry_priority_->onHostAttempted(host2);\n  verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n\n  // Once we've exhausted all priorities, we should return to the original load.\n  retry_priority_->onHostAttempted(host3);\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n}\n\n// Tests that spillover happens as we ignore attempted priorities for degraded\n// hosts.\nTEST_F(RetryPriorityTest, DefaultFrequencyUnhealthyPrioritiesDegradedLoad) {\n  const Upstream::HealthyLoad original_priority_load({0, 0, 0});\n  const Upstream::DegradedLoad original_degraded_priority_load({42, 28, 30});\n\n  initialize(original_priority_load, original_degraded_priority_load);\n  addHosts(0, 10, 0, 3);\n  addHosts(1, 10, 0, 2);\n  addHosts(2, 10, 0, 10);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host1, priority()).WillByDefault(Return(0));\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host2, priority()).WillByDefault(Return(1));\n\n  auto host3 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host3, priority()).WillByDefault(Return(2));\n\n  // Before any hosts attempted, load should be unchanged.\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n\n  {\n    // After attempting a host in P0, load should be split between P1 and P2 since P1 is degraded.\n    const Upstream::HealthyLoad expected_priority_load({0, 0, 0});\n    const Upstream::DegradedLoad expected_degraded_priority_load({0, 28, 72});\n    retry_priority_->onHostAttempted(host1);\n    verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n  }\n\n  // After we've tried host2, everything should go to P2.\n  const Upstream::HealthyLoad expected_priority_load({0, 0, 0});\n  const Upstream::DegradedLoad expected_degraded_priority_load({0, 0, 100});\n  retry_priority_->onHostAttempted(host2);\n  verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n\n  // Once we've exhausted all priorities, we should return to the original load.\n  retry_priority_->onHostAttempted(host3);\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n}\n\n// Tests that we account for spillover between healthy and degraded priority load.\nTEST_F(RetryPriorityTest, DefaultFrequencyUnhealthyPrioritiesDegradedLoadSpillover) {\n  const Upstream::HealthyLoad original_priority_load({0, 100, 0});\n  const Upstream::DegradedLoad original_degraded_priority_load({0, 0, 0});\n\n  initialize(original_priority_load, original_degraded_priority_load);\n  addHosts(0, 10, 0, 3);\n  addHosts(1, 10, 9, 1);\n  addHosts(2, 10, 2, 0);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host1, priority()).WillByDefault(Return(0));\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host2, priority()).WillByDefault(Return(1));\n\n  auto host3 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host3, priority()).WillByDefault(Return(2));\n\n  // Before any hosts attempted, load should be unchanged.\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n\n  {\n    // After attempting a host in P1, load should be split between P2 (healthy),\n    // and P0, P2 (degraded).\n    const Upstream::HealthyLoad expected_priority_load({0, 0, 40});\n    const Upstream::DegradedLoad expected_degraded_priority_load({60, 0, 0});\n    retry_priority_->onHostAttempted(host2);\n    verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n  }\n\n  // After we've tried host3, everything should go to P0 (degraded).\n  const Upstream::HealthyLoad expected_priority_load({0, 0, 0});\n  const Upstream::DegradedLoad expected_degraded_priority_load({100, 0, 0});\n  retry_priority_->onHostAttempted(host3);\n  verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n\n  // Once we've exhausted all priorities, we should return to the original load.\n  retry_priority_->onHostAttempted(host1);\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n}\n\n// Tests that we can override the frequency at which we update the priority load with the\n// update_frequency parameter.\nTEST_F(RetryPriorityTest, OverriddenFrequency) {\n  update_frequency_ = 2;\n\n  const Upstream::HealthyLoad original_priority_load({100, 0});\n  const Upstream::DegradedLoad original_degraded_priority_load({0, 0});\n\n  initialize(original_priority_load, original_degraded_priority_load);\n  addHosts(0, 2, 2);\n  addHosts(1, 2, 2);\n\n  auto host1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host1, priority()).WillByDefault(Return(0));\n\n  auto host2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  ON_CALL(*host2, priority()).WillByDefault(Return(1));\n\n  // Before any hosts attempted, load should be unchanged.\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n\n  // After attempting a single host in P0, we should leave the priority load unchanged.\n  retry_priority_->onHostAttempted(host1);\n  verifyPriorityLoads(original_priority_load, original_degraded_priority_load);\n\n  // After a second attempt, the priority load should change.\n  const Upstream::HealthyLoad expected_priority_load({0, 100});\n  const Upstream::DegradedLoad expected_degraded_priority_load({0, 0});\n  retry_priority_->onHostAttempted(host1);\n  verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load);\n}\n\n// Tests that an invalid frequency results into a config error.\nTEST_F(RetryPriorityTest, OverriddenFrequencyInvalidValue) {\n  update_frequency_ = 0;\n\n  const Upstream::HealthyLoad original_priority_load({100, 0});\n  const Upstream::DegradedLoad original_degraded_priority_load({0, 0});\n\n  EXPECT_THROW_WITH_REGEX(initialize(original_priority_load, original_degraded_priority_load),\n                          EnvoyException,\n                          \"Proto constraint validation failed.*value must be greater than.*\");\n}\n\n} // namespace\n} // namespace Priority\n} // namespace Retry\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/common/statsd/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"statsd_test\",\n    srcs = [\"statsd_test.cc\"],\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/stat_sinks/common/statsd:statsd_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"udp_statsd_test\",\n    srcs = [\"udp_statsd_test.cc\"],\n    deps = [\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/stat_sinks/common/statsd:statsd_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/stats_sinks/common/statsd/statsd_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n\n#include \"common/network/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/stat_sinks/common/statsd/statsd.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/host.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Common {\nnamespace Statsd {\nnamespace {\n\nclass TcpStatsdSinkTest : public testing::Test {\npublic:\n  TcpStatsdSinkTest() {\n    sink_ = std::make_unique<TcpStatsdSink>(\n        local_info_, \"fake_cluster\", tls_, cluster_manager_,\n        cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_);\n  }\n\n  void expectCreateConnection() {\n    connection_ = new NiceMock<Network::MockClientConnection>();\n    Upstream::MockHost::MockCreateConnectionData conn_info;\n    conn_info.connection_ = connection_;\n    conn_info.host_description_ = Upstream::makeTestHost(\n        std::make_unique<NiceMock<Upstream::MockClusterInfo>>(), \"tcp://127.0.0.1:80\");\n\n    EXPECT_CALL(cluster_manager_, tcpConnForCluster_(\"fake_cluster\", _))\n        .WillOnce(Return(conn_info));\n    EXPECT_CALL(*connection_, setConnectionStats(_));\n    EXPECT_CALL(*connection_, connect());\n  }\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  std::unique_ptr<TcpStatsdSink> sink_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  Network::MockClientConnection* connection_{};\n  NiceMock<Stats::MockMetricSnapshot> snapshot_;\n};\n\nTEST_F(TcpStatsdSinkTest, EmptyFlush) {\n  InSequence s;\n  expectCreateConnection();\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"\"), _));\n  sink_->flush(snapshot_);\n}\n\nTEST_F(TcpStatsdSinkTest, BasicFlow) {\n  InSequence s;\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.latch_ = 1;\n  counter.used_ = true;\n  snapshot_.counters_.push_back({1, counter});\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 2;\n  gauge.used_ = true;\n  snapshot_.gauges_.push_back(gauge);\n\n  expectCreateConnection();\n  EXPECT_CALL(*connection_,\n              write(BufferStringEqual(\"envoy.test_counter:1|c\\nenvoy.test_gauge:2|g\\n\"), _));\n  sink_->flush(snapshot_);\n\n  connection_->runHighWatermarkCallbacks();\n  connection_->runLowWatermarkCallbacks();\n\n  // Test a disconnect. We should connect again.\n  connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);\n\n  expectCreateConnection();\n\n  NiceMock<Stats::MockHistogram> timer;\n  timer.name_ = \"test_timer\";\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"envoy.test_timer:5|ms\\n\"), _));\n  sink_->onHistogramComplete(timer, 5);\n\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n  tls_.shutdownThread();\n}\n\nTEST_F(TcpStatsdSinkTest, SiSuffix) {\n  InSequence s;\n  expectCreateConnection();\n\n  NiceMock<Stats::MockHistogram> items;\n  items.name_ = \"items\";\n  items.unit_ = Stats::Histogram::Unit::Unspecified;\n\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"envoy.items:1|ms\\n\"), _));\n  sink_->onHistogramComplete(items, 1);\n\n  NiceMock<Stats::MockHistogram> information;\n  information.name_ = \"information\";\n  information.unit_ = Stats::Histogram::Unit::Bytes;\n\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"envoy.information:2|ms\\n\"), _));\n  sink_->onHistogramComplete(information, 2);\n\n  NiceMock<Stats::MockHistogram> duration_micro;\n  duration_micro.name_ = \"duration\";\n  duration_micro.unit_ = Stats::Histogram::Unit::Microseconds;\n\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"envoy.duration:3|ms\\n\"), _));\n  sink_->onHistogramComplete(duration_micro, 3);\n\n  NiceMock<Stats::MockHistogram> duration_milli;\n  duration_milli.name_ = \"duration\";\n  duration_milli.unit_ = Stats::Histogram::Unit::Milliseconds;\n\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"envoy.duration:4|ms\\n\"), _));\n  sink_->onHistogramComplete(duration_milli, 4);\n\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n  tls_.shutdownThread();\n}\n\n// Verify that when there is no statsd host we correctly empty all output buffers so we don't\n// infinitely buffer.\nTEST_F(TcpStatsdSinkTest, NoHost) {\n  InSequence s;\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.latch_ = 1;\n  counter.used_ = true;\n  snapshot_.counters_.push_back({1, counter});\n\n  Upstream::MockHost::MockCreateConnectionData conn_info;\n  EXPECT_CALL(cluster_manager_, tcpConnForCluster_(\"fake_cluster\", _))\n      .WillOnce(Return(conn_info))\n      .WillOnce(Return(conn_info));\n  sink_->flush(snapshot_);\n\n  // Flush again to make sure we correctly drain the buffer and the output buffer is empty.\n  sink_->flush(snapshot_);\n}\n\nTEST_F(TcpStatsdSinkTest, WithCustomPrefix) {\n  sink_ = std::make_unique<TcpStatsdSink>(\n      local_info_, \"fake_cluster\", tls_, cluster_manager_,\n      cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_, \"test_prefix\");\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.latch_ = 1;\n  counter.used_ = true;\n  snapshot_.counters_.push_back({1, counter});\n\n  expectCreateConnection();\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"test_prefix.test_counter:1|c\\n\"), _));\n  sink_->flush(snapshot_);\n}\n\nTEST_F(TcpStatsdSinkTest, BufferReallocate) {\n  InSequence s;\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.latch_ = 1;\n  counter.used_ = true;\n\n  snapshot_.counters_.resize(2000, {1, counter});\n\n  expectCreateConnection();\n  EXPECT_CALL(*connection_, write(_, _))\n      .WillOnce(Invoke([](Buffer::Instance& buffer, bool) -> void {\n        std::string compare;\n        for (int i = 0; i < 2000; i++) {\n          compare += \"envoy.test_counter:1|c\\n\";\n        }\n        EXPECT_EQ(compare, buffer.toString());\n        buffer.drain(buffer.length());\n      }));\n  sink_->flush(snapshot_);\n}\n\nTEST_F(TcpStatsdSinkTest, Overflow) {\n  InSequence s;\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.latch_ = 1;\n  counter.used_ = true;\n  snapshot_.counters_.push_back({1, counter});\n\n  // Synthetically set buffer above high watermark. Make sure we don't write anything.\n  cluster_manager_.thread_local_cluster_.cluster_.info_->stats().upstream_cx_tx_bytes_buffered_.set(\n      1024 * 1024 * 17);\n  sink_->flush(snapshot_);\n\n  // Lower and make sure we write.\n  cluster_manager_.thread_local_cluster_.cluster_.info_->stats().upstream_cx_tx_bytes_buffered_.set(\n      1024 * 1024 * 15);\n  expectCreateConnection();\n  EXPECT_CALL(*connection_, write(BufferStringEqual(\"envoy.test_counter:1|c\\n\"), _));\n  sink_->flush(snapshot_);\n\n  // Raise and make sure we don't write and kill connection.\n  cluster_manager_.thread_local_cluster_.cluster_.info_->stats().upstream_cx_tx_bytes_buffered_.set(\n      1024 * 1024 * 17);\n  EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));\n  sink_->flush(snapshot_);\n\n  EXPECT_EQ(2UL, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_\n                     .counter(\"statsd.cx_overflow\")\n                     .value());\n  tls_.shutdownThread();\n}\n\n} // namespace\n} // namespace Statsd\n} // namespace Common\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/stat_sinks/common/statsd/statsd.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"spdlog/spdlog.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Common {\nnamespace Statsd {\nnamespace {\n\nclass MockWriter : public UdpStatsdSink::Writer {\npublic:\n  MOCK_METHOD(void, write, (const std::string& message));\n  MOCK_METHOD(void, writeBuffer, (Buffer::Instance & buffer));\n\n  void delegateBufferFake() {\n    ON_CALL(*this, writeBuffer).WillByDefault([this](Buffer::Instance& buffer) {\n      this->buffer_writes.push_back(buffer.toString());\n    });\n  }\n\n  std::vector<std::string> buffer_writes;\n};\n\n// Skipping this test as Datagram sockets are not currently supported by UDS on Windows\n#ifndef WIN32\n// Regression test for https://github.com/envoyproxy/envoy/issues/8911\nTEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) {\n  auto uds_address = std::make_shared<Network::Address::PipeInstance>(\n      TestEnvironment::unixDomainSocketPath(\"udstest.1.sock\"));\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  UdpStatsdSink sink(tls_, uds_address, false);\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  snapshot.counters_.push_back({1, counter});\n\n  // Flush before the server is running. This will fail.\n  sink.flush(snapshot);\n\n  // Start the server.\n  Network::SocketImpl sock(Network::Socket::Type::Datagram, uds_address);\n  RELEASE_ASSERT(sock.setBlockingForTest(false).rc_ != -1, \"\");\n  sock.bind(uds_address);\n\n  // Do the flush which should have somewhere to write now.\n  sink.flush(snapshot);\n  Buffer::OwnedImpl receive_buffer;\n  sock.ioHandle().read(receive_buffer, 32);\n  EXPECT_EQ(\"envoy.test_counter:1|c\", receive_buffer.toString());\n}\n#endif\n\nclass UdpStatsdSinkTest : public testing::TestWithParam<Network::Address::IpVersion> {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, UdpStatsdSinkTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(UdpStatsdSinkTest, InitWithIpAddress) {\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  Network::Test::UdpSyncPeer server(GetParam());\n  UdpStatsdSink sink(tls_, server.localAddress(), false);\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  snapshot.counters_.push_back({1, counter});\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 1;\n  gauge.used_ = true;\n  snapshot.gauges_.push_back(gauge);\n\n  sink.flush(snapshot);\n  Network::UdpRecvData data;\n  server.recv(data);\n  EXPECT_EQ(\"envoy.test_counter:1|c\", data.buffer_->toString());\n  Network::UdpRecvData data2;\n  server.recv(data2);\n  EXPECT_EQ(\"envoy.test_gauge:1|g\", data2.buffer_->toString());\n\n  NiceMock<Stats::MockHistogram> timer;\n  timer.name_ = \"test_timer\";\n  sink.onHistogramComplete(timer, 5);\n  Network::UdpRecvData data3;\n  server.recv(data3);\n  EXPECT_EQ(\"envoy.test_timer:5|ms\", data3.buffer_->toString());\n\n  tls_.shutdownThread();\n}\n\nclass UdpStatsdSinkWithTagsTest : public testing::TestWithParam<Network::Address::IpVersion> {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, UdpStatsdSinkWithTagsTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(UdpStatsdSinkWithTagsTest, InitWithIpAddress) {\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  Network::Test::UdpSyncPeer server(GetParam());\n  UdpStatsdSink sink(tls_, server.localAddress(), true);\n\n  std::vector<Stats::Tag> tags = {Stats::Tag{\"node\", \"test\"}};\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  counter.setTags(tags);\n  snapshot.counters_.push_back({1, counter});\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 1;\n  gauge.used_ = true;\n  gauge.setTags(tags);\n  snapshot.gauges_.push_back(gauge);\n\n  sink.flush(snapshot);\n  Network::UdpRecvData data;\n  server.recv(data);\n  EXPECT_EQ(\"envoy.test_counter:1|c|#node:test\", data.buffer_->toString());\n  Network::UdpRecvData data2;\n  server.recv(data2);\n  EXPECT_EQ(\"envoy.test_gauge:1|g|#node:test\", data2.buffer_->toString());\n\n  NiceMock<Stats::MockHistogram> timer;\n  timer.name_ = \"test_timer\";\n  timer.setTags(tags);\n  sink.onHistogramComplete(timer, 5);\n  Network::UdpRecvData data3;\n  server.recv(data3);\n  EXPECT_EQ(\"envoy.test_timer:5|ms|#node:test\", data3.buffer_->toString());\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkTest, CheckActualStats) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  writer_ptr->delegateBufferFake();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), 1024);\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  snapshot.counters_.push_back({1, counter});\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr), writeBuffer(_))\n      .Times(1);\n  sink.flush(snapshot);\n  EXPECT_EQ(writer_ptr->buffer_writes.size(), 1);\n  EXPECT_EQ(writer_ptr->buffer_writes.at(0), \"envoy.test_counter:1|c\");\n  counter.used_ = false;\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 1;\n  gauge.used_ = true;\n  snapshot.gauges_.push_back(gauge);\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr), writeBuffer(_));\n  sink.flush(snapshot);\n  EXPECT_EQ(writer_ptr->buffer_writes.size(), 2);\n  EXPECT_EQ(writer_ptr->buffer_writes.at(1), \"envoy.test_gauge:1|g\");\n\n  NiceMock<Stats::MockHistogram> timer;\n  timer.name_ = \"test_timer\";\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.test_timer:5|ms\"));\n  sink.onHistogramComplete(timer, 5);\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkTest, CheckMetricLargerThanBuffer) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  writer_ptr->delegateBufferFake();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  uint64_t buffer_size = 4;\n  UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size);\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  snapshot.counters_.push_back({1, counter});\n\n  // Expect the metric to skip the buffer\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.test_counter:1|c\"));\n  sink.flush(snapshot);\n  counter.used_ = false;\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 1;\n  gauge.used_ = true;\n  snapshot.gauges_.push_back(gauge);\n\n  // Expect the metric to skip the buffer\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.test_gauge:1|g\"));\n  sink.flush(snapshot);\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkTest, CheckBufferedWritesWithinBufferSize) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  writer_ptr->delegateBufferFake();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  uint64_t buffer_size = 1024;\n  UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size);\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  snapshot.counters_.push_back({1, counter});\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 1;\n  gauge.used_ = true;\n  snapshot.gauges_.push_back(gauge);\n\n  // Expect both metrics to be present in single write\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr), writeBuffer(_))\n      .Times(1);\n  sink.flush(snapshot);\n  EXPECT_EQ(writer_ptr->buffer_writes.size(), 1);\n  EXPECT_EQ(writer_ptr->buffer_writes.at(0), \"envoy.test_counter:1|c\\nenvoy.test_gauge:1|g\");\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkTest, CheckBufferedWritesExceedingBufferSize) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  writer_ptr->delegateBufferFake();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  uint64_t buffer_size = 64;\n  UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size);\n\n  NiceMock<Stats::MockCounter> counter_1;\n  counter_1.name_ = \"test_counter_1\";\n  counter_1.used_ = true;\n  counter_1.latch_ = 1;\n  snapshot.counters_.push_back({1, counter_1});\n\n  NiceMock<Stats::MockCounter> counter_2;\n  counter_2.name_ = \"test_counter_2\";\n  counter_2.used_ = true;\n  counter_2.latch_ = 1;\n  snapshot.counters_.push_back({1, counter_2});\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 1;\n  gauge.used_ = true;\n  snapshot.gauges_.push_back(gauge);\n\n  // Expect both metrics to be present in single write\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr), writeBuffer(_))\n      .Times(2);\n  sink.flush(snapshot);\n  EXPECT_EQ(writer_ptr->buffer_writes.size(), 2);\n  EXPECT_EQ(writer_ptr->buffer_writes.at(0), \"envoy.test_counter_1:1|c\\nenvoy.test_counter_2:1|c\");\n  EXPECT_EQ(writer_ptr->buffer_writes.at(1), \"envoy.test_gauge:1|g\");\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkTest, CheckActualStatsWithCustomPrefix) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  writer_ptr->delegateBufferFake();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  UdpStatsdSink sink(tls_, writer_ptr, false, \"test_prefix\", 1024);\n\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  snapshot.counters_.push_back({1, counter});\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr), writeBuffer(_));\n  sink.flush(snapshot);\n  EXPECT_EQ(writer_ptr->buffer_writes.size(), 1);\n  EXPECT_EQ(writer_ptr->buffer_writes.at(0), \"test_prefix.test_counter:1|c\");\n  counter.used_ = false;\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkTest, SiSuffix) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  UdpStatsdSink sink(tls_, writer_ptr, false);\n\n  NiceMock<Stats::MockHistogram> items;\n  items.name_ = \"items\";\n  items.unit_ = Stats::Histogram::Unit::Unspecified;\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.items:1|ms\"));\n  sink.onHistogramComplete(items, 1);\n\n  NiceMock<Stats::MockHistogram> information;\n  information.name_ = \"information\";\n  information.unit_ = Stats::Histogram::Unit::Bytes;\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.information:2|ms\"));\n  sink.onHistogramComplete(information, 2);\n\n  NiceMock<Stats::MockHistogram> duration_micro;\n  duration_micro.name_ = \"duration\";\n  duration_micro.unit_ = Stats::Histogram::Unit::Microseconds;\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.duration:3|ms\"));\n  sink.onHistogramComplete(duration_micro, 3);\n\n  NiceMock<Stats::MockHistogram> duration_milli;\n  duration_milli.name_ = \"duration\";\n  duration_milli.unit_ = Stats::Histogram::Unit::Milliseconds;\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.duration:4|ms\"));\n  sink.onHistogramComplete(duration_milli, 4);\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkWithTagsTest, CheckActualStats) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  writer_ptr->delegateBufferFake();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  UdpStatsdSink sink(tls_, writer_ptr, true, getDefaultPrefix(), 1024);\n\n  std::vector<Stats::Tag> tags = {Stats::Tag{\"key1\", \"value1\"}, Stats::Tag{\"key2\", \"value2\"}};\n  NiceMock<Stats::MockCounter> counter;\n  counter.name_ = \"test_counter\";\n  counter.used_ = true;\n  counter.latch_ = 1;\n  counter.setTags(tags);\n  snapshot.counters_.push_back({1, counter});\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr), writeBuffer(_));\n  sink.flush(snapshot);\n  EXPECT_EQ(writer_ptr->buffer_writes.size(), 1);\n  EXPECT_EQ(writer_ptr->buffer_writes.at(0), \"envoy.test_counter:1|c|#key1:value1,key2:value2\");\n  counter.used_ = false;\n\n  NiceMock<Stats::MockGauge> gauge;\n  gauge.name_ = \"test_gauge\";\n  gauge.value_ = 1;\n  gauge.used_ = true;\n  gauge.setTags(tags);\n  snapshot.gauges_.push_back(gauge);\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr), writeBuffer(_));\n  sink.flush(snapshot);\n  EXPECT_EQ(writer_ptr->buffer_writes.size(), 2);\n  EXPECT_EQ(writer_ptr->buffer_writes.at(1), \"envoy.test_gauge:1|g|#key1:value1,key2:value2\");\n\n  NiceMock<Stats::MockHistogram> timer;\n  timer.name_ = \"test_timer\";\n  timer.setTags(tags);\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.test_timer:5|ms|#key1:value1,key2:value2\"));\n  sink.onHistogramComplete(timer, 5);\n\n  tls_.shutdownThread();\n}\n\nTEST(UdpStatsdSinkWithTagsTest, SiSuffix) {\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  auto writer_ptr = std::make_shared<NiceMock<MockWriter>>();\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  UdpStatsdSink sink(tls_, writer_ptr, true);\n\n  std::vector<Stats::Tag> tags = {Stats::Tag{\"key1\", \"value1\"}, Stats::Tag{\"key2\", \"value2\"}};\n\n  NiceMock<Stats::MockHistogram> items;\n  items.name_ = \"items\";\n  items.unit_ = Stats::Histogram::Unit::Unspecified;\n  items.setTags(tags);\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.items:1|ms|#key1:value1,key2:value2\"));\n  sink.onHistogramComplete(items, 1);\n\n  NiceMock<Stats::MockHistogram> information;\n  information.name_ = \"information\";\n  information.unit_ = Stats::Histogram::Unit::Bytes;\n  information.setTags(tags);\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.information:2|ms|#key1:value1,key2:value2\"));\n  sink.onHistogramComplete(information, 2);\n\n  NiceMock<Stats::MockHistogram> duration_micro;\n  duration_micro.name_ = \"duration\";\n  duration_micro.unit_ = Stats::Histogram::Unit::Microseconds;\n  duration_micro.setTags(tags);\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.duration:3|ms|#key1:value1,key2:value2\"));\n  sink.onHistogramComplete(duration_micro, 3);\n\n  NiceMock<Stats::MockHistogram> duration_milli;\n  duration_milli.name_ = \"duration\";\n  duration_milli.unit_ = Stats::Histogram::Unit::Milliseconds;\n  duration_milli.setTags(tags);\n\n  EXPECT_CALL(*std::dynamic_pointer_cast<NiceMock<MockWriter>>(writer_ptr),\n              write(\"envoy.duration:4|ms|#key1:value1,key2:value2\"));\n  sink.onHistogramComplete(duration_milli, 4);\n\n  tls_.shutdownThread();\n}\n\n} // namespace\n} // namespace Statsd\n} // namespace Common\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/dog_statsd/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.stat_sinks.dog_statsd\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/stat_sinks/dog_statsd:config\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/stats_sinks/dog_statsd/config_test.cc",
    "content": "#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/stat_sinks/common/statsd/statsd.h\"\n#include \"extensions/stat_sinks/dog_statsd/config.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace DogStatsd {\nnamespace {\n\nclass DogStatsdConfigLoopbackTest : public testing::TestWithParam<Network::Address::IpVersion> {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, DogStatsdConfigLoopbackTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) {\n  const std::string name = StatsSinkNames::get().DogStatsd;\n\n  envoy::config::metrics::v3::DogStatsdSink sink_config;\n  envoy::config::core::v3::Address& address = *sink_config.mutable_address();\n  envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address();\n  socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP);\n  Network::Address::InstanceConstSharedPtr loopback_flavor =\n      Network::Test::getCanonicalLoopbackAddress(GetParam());\n  socket_address.set_address(loopback_flavor->ip()->addressAsString());\n  socket_address.set_port_value(8125);\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  EXPECT_NE(sink, nullptr);\n  auto udp_sink = dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get());\n  EXPECT_NE(udp_sink, nullptr);\n  EXPECT_EQ(udp_sink->getUseTagForTest(), true);\n  EXPECT_EQ(udp_sink->getPrefix(), Common::Statsd::getDefaultPrefix());\n}\n\n// Negative test for protoc-gen-validate constraints for dog_statsd.\nTEST(DogStatsdConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  EXPECT_THROW(\n      DogStatsdSinkFactory().createStatsSink(envoy::config::metrics::v3::DogStatsdSink(), server),\n      ProtoValidationException);\n}\n\nTEST_P(DogStatsdConfigLoopbackTest, CustomBufferSize) {\n  const std::string name = StatsSinkNames::get().DogStatsd;\n\n  envoy::config::metrics::v3::DogStatsdSink sink_config;\n  sink_config.mutable_max_bytes_per_datagram()->set_value(128);\n  envoy::config::core::v3::Address& address = *sink_config.mutable_address();\n  envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address();\n  socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP);\n  Network::Address::InstanceConstSharedPtr loopback_flavor =\n      Network::Test::getCanonicalLoopbackAddress(GetParam());\n  socket_address.set_address(loopback_flavor->ip()->addressAsString());\n  socket_address.set_port_value(8125);\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  ASSERT_NE(sink, nullptr);\n  auto udp_sink = dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get());\n  ASSERT_NE(udp_sink, nullptr);\n  EXPECT_EQ(udp_sink->getBufferSizeForTest(), 128);\n}\n\nTEST_P(DogStatsdConfigLoopbackTest, DefaultBufferSize) {\n  const std::string name = StatsSinkNames::get().DogStatsd;\n\n  envoy::config::metrics::v3::DogStatsdSink sink_config;\n  envoy::config::core::v3::Address& address = *sink_config.mutable_address();\n  envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address();\n  socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP);\n  Network::Address::InstanceConstSharedPtr loopback_flavor =\n      Network::Test::getCanonicalLoopbackAddress(GetParam());\n  socket_address.set_address(loopback_flavor->ip()->addressAsString());\n  socket_address.set_port_value(8125);\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  ASSERT_NE(sink, nullptr);\n  auto udp_sink = dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get());\n  ASSERT_NE(udp_sink, nullptr);\n  // Expect default buffer size of 0 (no buffering)\n  EXPECT_EQ(udp_sink->getBufferSizeForTest(), 0);\n}\n\nTEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) {\n  const std::string name = StatsSinkNames::get().DogStatsd;\n\n  envoy::config::metrics::v3::DogStatsdSink sink_config;\n  envoy::config::core::v3::Address& address = *sink_config.mutable_address();\n  envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address();\n  socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP);\n  Network::Address::InstanceConstSharedPtr loopback_flavor =\n      Network::Test::getCanonicalLoopbackAddress(GetParam());\n  socket_address.set_address(loopback_flavor->ip()->addressAsString());\n  socket_address.set_port_value(8125);\n\n  const std::string customPrefix = \"prefix.test\";\n  sink_config.set_prefix(customPrefix);\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  ASSERT_NE(sink, nullptr);\n  auto udp_sink = dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get());\n  ASSERT_NE(udp_sink, nullptr);\n  EXPECT_EQ(udp_sink->getPrefix(), customPrefix);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(DogStatsdConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.dog_statsd\";\n\n  ASSERT_NE(nullptr, Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(\n                         deprecated_name));\n}\n\n} // namespace\n} // namespace DogStatsd\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/hystrix/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.stat_sinks.hystrix\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/stat_sinks/hystrix:config\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"hystrix_test\",\n    srcs = [\"hystrix_test.cc\"],\n    extension_name = \"envoy.stat_sinks.hystrix\",\n    deps = [\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/stat_sinks/hystrix:hystrix_lib\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:admin_stream_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"hystrix_integration_test\",\n    srcs = [\"hystrix_integration_test.cc\"],\n    extension_name = \"envoy.stat_sinks.hystrix\",\n    deps = [\n        \"//source/extensions/stat_sinks/hystrix:config\",\n        \"//test/integration:http_protocol_integration_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/stats_sinks/hystrix/config_test.cc",
    "content": "#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/stat_sinks/hystrix/config.h\"\n#include \"extensions/stat_sinks/hystrix/hystrix.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Hystrix {\nnamespace {\n\nTEST(StatsConfigTest, ValidHystrixSink) {\n  const std::string name = StatsSinkNames::get().Hystrix;\n\n  envoy::config::metrics::v3::HystrixSink sink_config;\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  EXPECT_NE(sink, nullptr);\n  EXPECT_NE(dynamic_cast<Hystrix::HystrixSink*>(sink.get()), nullptr);\n}\n\n} // namespace\n} // namespace Hystrix\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc",
    "content": "#include \"test/integration/http_protocol_integration.h\"\n\nusing testing::HasSubstr;\nusing testing::Not;\nusing testing::StartsWith;\n\nnamespace Envoy {\n\nclass HystrixIntegrationTest : public HttpProtocolIntegrationTest {};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, HystrixIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2},\n                             {FakeHttpConnection::Type::HTTP1})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(HystrixIntegrationTest, NoChunkEncoding) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* metrics_sink = bootstrap.add_stats_sinks();\n    metrics_sink->set_name(\"envoy.stat_sinks.hystrix\");\n    bootstrap.mutable_stats_flush_interval()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n  });\n  initialize();\n\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    // For HTTP/1.1 we use a raw client to make absolutely sure there is no chunk encoding.\n    std::string response;\n    auto connection = createConnectionDriver(\n        lookupPort(\"admin\"), \"GET /hystrix_event_stream HTTP/1.1\\r\\nHost: admin\\r\\n\\r\\n\",\n        [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void {\n          response.append(data.toString());\n          if (response.find(\"rollingCountCollapsedRequests\") != std::string::npos) {\n            conn.close(Network::ConnectionCloseType::NoFlush);\n          }\n        });\n    connection->run();\n    EXPECT_THAT(response, StartsWith(\"HTTP/1.1 200 OK\\r\\n\"));\n    // Make sure that the response is not actually chunk encoded, but it does have the hystrix flush\n    // trailer.\n    EXPECT_THAT(response, Not(HasSubstr(\"chunked\")));\n    EXPECT_THAT(response, Not(HasSubstr(\"3\\r\\n:\\n\\n\")));\n    EXPECT_THAT(response, HasSubstr(\":\\n\\n\"));\n    connection->close();\n  } else {\n    codec_client_ = makeHttpConnection(lookupPort(\"admin\"));\n    auto response = codec_client_->makeHeaderOnlyRequest(\n        Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                       {\":path\", \"/hystrix_event_stream\"},\n                                       {\":scheme\", \"http\"},\n                                       {\":authority\", \"admin\"}});\n    response->waitForBodyData(1);\n    EXPECT_THAT(response->body(), HasSubstr(\"rollingCountCollapsedRequests\"));\n    codec_client_->close();\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/hystrix/hystrix_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <sstream>\n\n#include \"common/json/json_loader.h\"\n\n#include \"extensions/stat_sinks/hystrix/hystrix.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/server/admin_stream.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n\n#include \"absl/strings/str_split.h\"\n#include \"circllhist.h\"\n#include \"fmt/printf.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::HasSubstr;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Hystrix {\nnamespace {\n\nclass ClusterTestInfo {\n\npublic:\n  ClusterTestInfo(const std::string cluster_name) : cluster_name_(cluster_name) {\n    ON_CALL(cluster_, info()).WillByDefault(Return(cluster_info_ptr_));\n    ON_CALL(*cluster_info_, name()).WillByDefault(testing::ReturnRefOfCopy(cluster_name_));\n    ON_CALL(*cluster_info_, statsScope()).WillByDefault(ReturnRef(cluster_stats_scope_));\n\n    // Set gauge value.\n    membership_total_gauge_.name_ = \"membership_total\";\n    ON_CALL(cluster_stats_scope_, gauge(\"membership_total\", Stats::Gauge::ImportMode::NeverImport))\n        .WillByDefault(ReturnRef(membership_total_gauge_));\n    ON_CALL(membership_total_gauge_, value()).WillByDefault(Return(5));\n\n    // Attach counters.\n    setCounterForTest(success_counter_, \"upstream_rq_2xx\");\n    setCounterForTest(error_5xx_counter_, \"upstream_rq_5xx\");\n    setCounterForTest(retry_5xx_counter_, \"retry.upstream_rq_5xx\");\n    setCounterForTest(error_4xx_counter_, \"upstream_rq_4xx\");\n    setCounterForTest(retry_4xx_counter_, \"retry.upstream_rq_4xx\");\n    setCountersToZero();\n  }\n\n  // Attach the counter to cluster_stat_scope and set default value.\n  void setCounterForTest(NiceMock<Stats::MockCounter>& counter, std::string counter_name) {\n    counter.name_ = counter_name;\n    ON_CALL(cluster_stats_scope_, counter(counter_name)).WillByDefault(ReturnRef(counter));\n  }\n\n  void setCountersToZero() {\n    ON_CALL(error_5xx_counter_, value()).WillByDefault(Return(0));\n    ON_CALL(retry_5xx_counter_, value()).WillByDefault(Return(0));\n    ON_CALL(error_4xx_counter_, value()).WillByDefault(Return(0));\n    ON_CALL(retry_4xx_counter_, value()).WillByDefault(Return(0));\n    ON_CALL(success_counter_, value()).WillByDefault(Return(0));\n  }\n\n  // Set counter return values to simulate traffic\n  void setCounterReturnValues(const uint64_t i, const uint64_t success_step,\n                              const uint64_t error_4xx_step, const uint64_t error_4xx_retry_step,\n                              const uint64_t error_5xx_step, const uint64_t error_5xx_retry_step,\n                              const uint64_t timeout_step, const uint64_t timeout_retry_step,\n                              const uint64_t rejected_step) {\n    ON_CALL(error_5xx_counter_, value()).WillByDefault(Return((i + 1) * error_5xx_step));\n    ON_CALL(retry_5xx_counter_, value()).WillByDefault(Return((i + 1) * error_5xx_retry_step));\n    ON_CALL(error_4xx_counter_, value()).WillByDefault(Return((i + 1) * error_4xx_step));\n    ON_CALL(retry_4xx_counter_, value()).WillByDefault(Return((i + 1) * error_4xx_retry_step));\n    ON_CALL(success_counter_, value()).WillByDefault(Return((i + 1) * success_step));\n    cluster_info_->stats().upstream_rq_timeout_.add(timeout_step);\n    cluster_info_->stats().upstream_rq_per_try_timeout_.add(timeout_retry_step);\n    cluster_info_->stats().upstream_rq_pending_overflow_.add(rejected_step);\n  }\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster_;\n  Upstream::MockClusterInfo* cluster_info_ = new NiceMock<Upstream::MockClusterInfo>();\n  Upstream::ClusterInfoConstSharedPtr cluster_info_ptr_{cluster_info_};\n\n  NiceMock<Stats::MockStore> stats_store_;\n  NiceMock<Stats::MockStore> cluster_stats_scope_;\n  const std::string cluster_name_;\n\n  NiceMock<Stats::MockGauge> membership_total_gauge_;\n  NiceMock<Stats::MockCounter> success_counter_;\n  NiceMock<Stats::MockCounter> error_5xx_counter_;\n  NiceMock<Stats::MockCounter> retry_5xx_counter_;\n  NiceMock<Stats::MockCounter> error_4xx_counter_;\n  NiceMock<Stats::MockCounter> retry_4xx_counter_;\n};\n\nclass HistogramWrapper {\npublic:\n  HistogramWrapper() : histogram_(hist_alloc()) {}\n\n  ~HistogramWrapper() { hist_free(histogram_); }\n\n  const histogram_t* getHistogram() { return histogram_; }\n\n  void setHistogramValues(const std::vector<uint64_t>& values) {\n    for (uint64_t value : values) {\n      hist_insert_intscale(histogram_, value, 0, 1);\n    }\n  }\n\nprivate:\n  histogram_t* histogram_;\n};\n\nclass HystrixSinkTest : public testing::Test {\npublic:\n  HystrixSinkTest() { sink_ = std::make_unique<HystrixSink>(server_, window_size_); }\n\n  Buffer::OwnedImpl createClusterAndCallbacks() {\n    // Set cluster.\n    cluster_map_.emplace(cluster1_name_, cluster1_.cluster_);\n    ON_CALL(server_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n    ON_CALL(cluster_manager_, clusters()).WillByDefault(Return(cluster_map_));\n\n    Buffer::OwnedImpl buffer;\n    auto encode_callback = [&buffer](Buffer::Instance& data, bool) {\n      // Set callbacks to send data to buffer. This will append to the end of the buffer, so\n      // multiple calls will all be dumped one after another into this buffer.\n      buffer.add(data);\n    };\n    ON_CALL(callbacks_, encodeData(_, _)).WillByDefault(Invoke(encode_callback));\n    return buffer;\n  }\n\n  void addClusterToMap(const std::string& cluster_name,\n                       NiceMock<Upstream::MockClusterMockPrioritySet>& cluster) {\n    cluster_map_.emplace(cluster_name, cluster);\n    // Redefining since cluster_map_ is returned by value.\n    ON_CALL(cluster_manager_, clusters()).WillByDefault(Return(cluster_map_));\n  }\n\n  void removeClusterFromMap(const std::string& cluster_name) {\n    cluster_map_.erase(cluster_name);\n    // Redefining since cluster_map_ is returned by value.\n    ON_CALL(cluster_manager_, clusters()).WillByDefault(Return(cluster_map_));\n  }\n\n  void addSecondClusterHelper(Buffer::OwnedImpl& buffer) {\n    buffer.drain(buffer.length());\n    cluster2_.setCountersToZero();\n    addClusterToMap(cluster2_name_, cluster2_.cluster_);\n  }\n\n  absl::node_hash_map<std::string, std::string>\n  addSecondClusterAndSendDataHelper(Buffer::OwnedImpl& buffer, const uint64_t success_step,\n                                    const uint64_t error_step, const uint64_t timeout_step,\n                                    const uint64_t success_step2, const uint64_t error_step2,\n                                    const uint64_t timeout_step2) {\n\n    // Add new cluster.\n    addSecondClusterHelper(buffer);\n\n    // Generate data to both clusters.\n    for (uint64_t i = 0; i < (window_size_ + 1); i++) {\n      buffer.drain(buffer.length());\n      cluster1_.setCounterReturnValues(i, success_step, error_step, 0, 0, 0, timeout_step, 0, 0);\n      cluster2_.setCounterReturnValues(i, success_step2, error_step2, 0, 0, 0, timeout_step2, 0, 0);\n      sink_->flush(snapshot_);\n    }\n\n    return buildClusterMap(buffer.toString());\n  }\n\n  void removeSecondClusterHelper(Buffer::OwnedImpl& buffer) {\n    buffer.drain(buffer.length());\n    removeClusterFromMap(cluster2_name_);\n    sink_->flush(snapshot_);\n  }\n\n  void validateResults(const std::string& data_message, uint64_t success_step, uint64_t error_step,\n                       uint64_t timeout_step, uint64_t timeout_retry_step, uint64_t rejected_step,\n                       uint64_t window_size) {\n    // Convert to json object.\n    Json::ObjectSharedPtr json_data_message = Json::Factory::loadFromString(data_message);\n    EXPECT_EQ(json_data_message->getInteger(\"rollingCountSemaphoreRejected\"),\n              (window_size * rejected_step))\n        << \"window_size=\" << window_size << \", rejected_step=\" << rejected_step;\n    EXPECT_EQ(json_data_message->getInteger(\"rollingCountSuccess\"), (window_size * success_step))\n        << \"window_size=\" << window_size << \", success_step=\" << success_step;\n    EXPECT_EQ(json_data_message->getInteger(\"rollingCountTimeout\"),\n              (window_size * (timeout_step + timeout_retry_step)))\n        << \"window_size=\" << window_size << \", timeout_step=\" << timeout_step\n        << \", timeout_retry_step=\" << timeout_retry_step;\n    EXPECT_EQ(json_data_message->getInteger(\"errorCount\"),\n              (window_size * (error_step - timeout_step)))\n        << \"window_size=\" << window_size << \", error_step=\" << error_step\n        << \", timeout_step=\" << timeout_step;\n    uint64_t total = error_step + success_step + rejected_step + timeout_retry_step;\n    EXPECT_EQ(json_data_message->getInteger(\"requestCount\"), (window_size * total))\n        << \"window_size=\" << window_size << \", total=\" << total;\n\n    if (total != 0) {\n      EXPECT_EQ(json_data_message->getInteger(\"errorPercentage\"),\n                (static_cast<uint64_t>(100 * (static_cast<double>(total - success_step) /\n                                              static_cast<double>(total)))))\n          << \"total=\" << total << \", success_step=\" << success_step;\n\n    } else {\n      EXPECT_EQ(json_data_message->getInteger(\"errorPercentage\"), 0);\n    }\n  }\n\n  absl::node_hash_map<std::string, std::string> buildClusterMap(absl::string_view data_message) {\n    absl::node_hash_map<std::string, std::string> cluster_message_map;\n    std::vector<std::string> messages =\n        absl::StrSplit(data_message, \"data: \", absl::SkipWhitespace());\n    for (auto message : messages) {\n      // Arrange message to remove \":\" that comes from the keepalive sync.\n      absl::RemoveExtraAsciiWhitespace(&message);\n      std::string clear_message(absl::StripSuffix(message, \":\"));\n      Json::ObjectSharedPtr json_message = Json::Factory::loadFromString(clear_message);\n      if (absl::StrContains(json_message->getString(\"type\"), \"HystrixCommand\")) {\n        std::string cluster_name(json_message->getString(\"name\"));\n        cluster_message_map[cluster_name] = message;\n      }\n    }\n    return cluster_message_map;\n  }\n\n  TestRandomGenerator rand_;\n  uint64_t window_size_ = rand_.random() % 10 + 5; // Arbitrary reasonable number.\n  const std::string cluster1_name_{\"test_cluster1\"};\n  ClusterTestInfo cluster1_{cluster1_name_};\n\n  // Second cluster for \"end and remove cluster\" tests.\n  const std::string cluster2_name_{\"test_cluster2\"};\n  ClusterTestInfo cluster2_{cluster2_name_};\n\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  NiceMock<Server::Configuration::MockServerFactoryContext> server_;\n  Upstream::ClusterManager::ClusterInfoMap cluster_map_;\n\n  std::unique_ptr<HystrixSink> sink_;\n  NiceMock<Stats::MockMetricSnapshot> snapshot_;\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n};\n\nTEST_F(HystrixSinkTest, EmptyFlush) {\n  InSequence s;\n  Buffer::OwnedImpl buffer = createClusterAndCallbacks();\n  // Register callback to sink.\n  sink_->registerConnection(&callbacks_);\n  sink_->flush(snapshot_);\n  absl::node_hash_map<std::string, std::string> cluster_message_map =\n      buildClusterMap(buffer.toString());\n  validateResults(cluster_message_map[cluster1_name_], 0, 0, 0, 0, 0, window_size_);\n}\n\nTEST_F(HystrixSinkTest, BasicFlow) {\n  InSequence s;\n  Buffer::OwnedImpl buffer = createClusterAndCallbacks();\n  // Register callback to sink.\n  sink_->registerConnection(&callbacks_);\n\n  // Only success traffic, check randomly increasing traffic\n  // Later in the test we'll \"shortcut\" by constant traffic\n  uint64_t traffic_counter = 0;\n\n  sink_->flush(snapshot_); // init window with 0\n  for (uint64_t i = 0; i < (window_size_ - 1); i++) {\n    buffer.drain(buffer.length());\n    traffic_counter += rand_.random() % 1000;\n    ON_CALL(cluster1_.success_counter_, value()).WillByDefault(Return(traffic_counter));\n    sink_->flush(snapshot_);\n  }\n\n  absl::node_hash_map<std::string, std::string> cluster_message_map =\n      buildClusterMap(buffer.toString());\n\n  Json::ObjectSharedPtr json_buffer =\n      Json::Factory::loadFromString(cluster_message_map[cluster1_name_]);\n  EXPECT_EQ(json_buffer->getInteger(\"rollingCountSuccess\"), traffic_counter);\n  EXPECT_EQ(json_buffer->getInteger(\"requestCount\"), traffic_counter);\n  EXPECT_EQ(json_buffer->getInteger(\"errorCount\"), 0);\n  EXPECT_EQ(json_buffer->getInteger(\"errorPercentage\"), 0);\n\n  // Check mixed traffic.\n  // Values are unimportant - they represent traffic statistics, and for the purpose of the test any\n  // arbitrary number will do. Only restriction is that errors >= timeouts, since in Envoy timeouts\n  // are counted as errors and therefore the code that prepares the stream for the dashboard deducts\n  // the number of timeouts from total number of errors.\n  const uint64_t success_step = 13;\n  const uint64_t error_4xx_step = 12;\n  const uint64_t error_4xx_retry_step = 11;\n  const uint64_t error_5xx_step = 10;\n  const uint64_t error_5xx_retry_step = 9;\n  const uint64_t timeout_step = 8;\n  const uint64_t timeout_retry_step = 7;\n  const uint64_t rejected_step = 6;\n\n  for (uint64_t i = 0; i < (window_size_ + 1); i++) {\n    buffer.drain(buffer.length());\n    cluster1_.setCounterReturnValues(i, success_step, error_4xx_step, error_4xx_retry_step,\n                                     error_5xx_step, error_5xx_retry_step, timeout_step,\n                                     timeout_retry_step, rejected_step);\n    sink_->flush(snapshot_);\n  }\n\n  std::string rolling_map = sink_->printRollingWindows();\n  EXPECT_NE(std::string::npos, rolling_map.find(cluster1_name_ + \".total\"))\n      << \"cluster1_name = \" << cluster1_name_;\n\n  cluster_message_map = buildClusterMap(buffer.toString());\n\n  // Check stream format and data.\n  validateResults(cluster_message_map[cluster1_name_], success_step,\n                  error_4xx_step + error_4xx_retry_step + error_5xx_step + error_5xx_retry_step,\n                  timeout_step, timeout_retry_step, rejected_step, window_size_);\n\n  // Check the values are reset.\n  buffer.drain(buffer.length());\n  sink_->resetRollingWindow();\n  sink_->flush(snapshot_);\n  cluster_message_map = buildClusterMap(buffer.toString());\n  validateResults(cluster_message_map[cluster1_name_], 0, 0, 0, 0, 0, window_size_);\n}\n\n//\nTEST_F(HystrixSinkTest, Disconnect) {\n  InSequence s;\n  Buffer::OwnedImpl buffer = createClusterAndCallbacks();\n\n  sink_->flush(snapshot_);\n  EXPECT_EQ(buffer.length(), 0);\n\n  // Register callback to sink.\n  sink_->registerConnection(&callbacks_);\n\n  // Arbitrary numbers for testing. Make sure error > timeout.\n  uint64_t success_step = 1;\n\n  for (uint64_t i = 0; i < (window_size_ + 1); i++) {\n    buffer.drain(buffer.length());\n    ON_CALL(cluster1_.success_counter_, value()).WillByDefault(Return((i + 1) * success_step));\n    sink_->flush(snapshot_);\n  }\n\n  EXPECT_NE(buffer.length(), 0);\n  absl::node_hash_map<std::string, std::string> cluster_message_map =\n      buildClusterMap(buffer.toString());\n  Json::ObjectSharedPtr json_buffer =\n      Json::Factory::loadFromString(cluster_message_map[cluster1_name_]);\n  EXPECT_EQ(json_buffer->getInteger(\"rollingCountSuccess\"), (success_step * window_size_));\n\n  // Disconnect.\n  buffer.drain(buffer.length());\n  sink_->unregisterConnection(&callbacks_);\n  sink_->flush(snapshot_);\n  EXPECT_EQ(buffer.length(), 0);\n\n  // Reconnect.\n  buffer.drain(buffer.length());\n  sink_->registerConnection(&callbacks_);\n  ON_CALL(cluster1_.success_counter_, value()).WillByDefault(Return(success_step));\n  sink_->flush(snapshot_);\n  EXPECT_NE(buffer.length(), 0);\n  cluster_message_map = buildClusterMap(buffer.toString());\n  json_buffer = Json::Factory::loadFromString(cluster_message_map[cluster1_name_]);\n  EXPECT_EQ(json_buffer->getInteger(\"rollingCountSuccess\"), 0);\n}\n\nTEST_F(HystrixSinkTest, AddCluster) {\n  InSequence s;\n  // Register callback to sink.\n  sink_->registerConnection(&callbacks_);\n\n  // Arbitrary values for testing. Make sure error > timeout.\n  const uint64_t success_step = 6;\n  const uint64_t error_step = 3;\n  const uint64_t timeout_step = 1;\n\n  const uint64_t success_step2 = 44;\n  const uint64_t error_step2 = 33;\n  const uint64_t timeout_step2 = 22;\n\n  Buffer::OwnedImpl buffer = createClusterAndCallbacks();\n\n  // Add cluster and \"run\" some traffic.\n  absl::node_hash_map<std::string, std::string> cluster_message_map =\n      addSecondClusterAndSendDataHelper(buffer, success_step, error_step, timeout_step,\n                                        success_step2, error_step2, timeout_step2);\n\n  // Expect that add worked.\n  ASSERT_NE(cluster_message_map.find(cluster1_name_), cluster_message_map.end())\n      << \"cluster1_name = \" << cluster1_name_;\n  ASSERT_NE(cluster_message_map.find(cluster2_name_), cluster_message_map.end())\n      << \"cluster2_name = \" << cluster2_name_;\n\n  // Check stream format and data.\n  validateResults(cluster_message_map[cluster1_name_], success_step, error_step, timeout_step, 0, 0,\n                  window_size_);\n  validateResults(cluster_message_map[cluster2_name_], success_step2, error_step2, timeout_step2, 0,\n                  0, window_size_);\n}\n\nTEST_F(HystrixSinkTest, AddAndRemoveClusters) {\n  InSequence s;\n  // Register callback to sink.\n  sink_->registerConnection(&callbacks_);\n\n  // Arbitrary values for testing. Make sure error > timeout.\n  const uint64_t success_step = 436;\n  const uint64_t error_step = 547;\n  const uint64_t timeout_step = 156;\n\n  const uint64_t success_step2 = 309;\n  const uint64_t error_step2 = 934;\n  const uint64_t timeout_step2 = 212;\n\n  Buffer::OwnedImpl buffer = createClusterAndCallbacks();\n\n  // Add cluster and \"run\" some traffic.\n  addSecondClusterAndSendDataHelper(buffer, success_step, error_step, timeout_step, success_step2,\n                                    error_step2, timeout_step2);\n\n  // Remove cluster and flush data to sink.\n  removeSecondClusterHelper(buffer);\n\n  // Check that removed worked.\n  absl::node_hash_map<std::string, std::string> cluster_message_map =\n      buildClusterMap(buffer.toString());\n  ASSERT_NE(cluster_message_map.find(cluster1_name_), cluster_message_map.end())\n      << \"cluster1_name = \" << cluster1_name_;\n  ASSERT_EQ(cluster_message_map.find(cluster2_name_), cluster_message_map.end())\n      << \"cluster2_name = \" << cluster2_name_;\n\n  // Add cluster again and flush data to sink.\n  addSecondClusterHelper(buffer);\n\n  sink_->flush(snapshot_);\n\n  // Check that add worked.\n  cluster_message_map = buildClusterMap(buffer.toString());\n  ASSERT_NE(cluster_message_map.find(cluster1_name_), cluster_message_map.end())\n      << \"cluster1_name = \" << cluster1_name_;\n  ASSERT_NE(cluster_message_map.find(cluster2_name_), cluster_message_map.end())\n      << \"cluster2_name = \" << cluster2_name_;\n\n  // Check that old values of test_cluster2 were deleted.\n  validateResults(cluster_message_map[cluster2_name_], 0, 0, 0, 0, 0, window_size_);\n}\n\nTEST_F(HystrixSinkTest, HistogramTest) {\n  InSequence s;\n\n  // Create histogram for the Hystrix sink to read.\n  auto histogram = std::make_shared<NiceMock<Stats::MockParentHistogram>>();\n  histogram->name_ = \"cluster.\" + cluster1_name_ + \".upstream_rq_time\";\n  histogram->setTagExtractedName(\"cluster.upstream_rq_time\");\n  histogram->addTag(Stats::Tag{Config::TagNames::get().CLUSTER_NAME, cluster1_name_});\n  histogram->used_ = true;\n\n  // Init with data such that the quantile value is equal to the quantile.\n  std::vector<uint64_t> h1_interval_values;\n  for (size_t i = 0; i < 100; ++i) {\n    h1_interval_values.push_back(i);\n  }\n\n  HistogramWrapper hist1_interval;\n  hist1_interval.setHistogramValues(h1_interval_values);\n\n  Stats::HistogramStatisticsImpl h1_interval_statistics(hist1_interval.getHistogram());\n  ON_CALL(*histogram, intervalStatistics())\n      .WillByDefault(testing::ReturnRef(h1_interval_statistics));\n  snapshot_.histograms_.push_back(*histogram);\n\n  Buffer::OwnedImpl buffer = createClusterAndCallbacks();\n  // Register callback to sink.\n  sink_->registerConnection(&callbacks_);\n  sink_->flush(snapshot_);\n\n  absl::node_hash_map<std::string, std::string> cluster_message_map =\n      buildClusterMap(buffer.toString());\n\n  Json::ObjectSharedPtr latency = Json::Factory::loadFromString(cluster_message_map[cluster1_name_])\n                                      ->getObject(\"latencyExecute\");\n\n  // Data was added such that the value equals the quantile:\n  // \"latencyExecute\": {\"99.5\": 99.500000, \"95\": 95.000000, \"90\": 90.000000, \"100\": 100.000000, \"0\":\n  // 0.000000, \"25\": 25.000000, \"99\": 99.000000, \"50\": 50.000000, \"75\": 75.000000}.\n  for (const double quantile : hystrix_quantiles) {\n    EXPECT_EQ(quantile * 100, latency->getDouble(fmt::sprintf(\"%g\", quantile * 100)));\n  }\n}\n\nTEST_F(HystrixSinkTest, HystrixEventStreamHandler) {\n  InSequence s;\n  Buffer::OwnedImpl buffer = createClusterAndCallbacks();\n  // Register callback to sink.\n  sink_->registerConnection(&callbacks_);\n\n  // This value doesn't matter in handlerHystrixEventStream\n  absl::string_view path_and_query;\n\n  Http::TestResponseHeaderMapImpl response_headers;\n\n  NiceMock<Server::MockAdminStream> admin_stream_mock;\n  NiceMock<Network::MockConnection> connection_mock;\n\n  auto addr_instance_ = Envoy::Network::Utility::parseInternetAddress(\"2.3.4.5\", 123, false);\n\n  Http::MockHttp1StreamEncoderOptions stream_encoder_options;\n  ON_CALL(admin_stream_mock, getDecoderFilterCallbacks()).WillByDefault(ReturnRef(callbacks_));\n  ON_CALL(admin_stream_mock, http1StreamEncoderOptions())\n      .WillByDefault(Return(Http::Http1StreamEncoderOptionsOptRef(stream_encoder_options)));\n  ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_mock));\n  ON_CALL(connection_mock, remoteAddress()).WillByDefault(ReturnRef(addr_instance_));\n\n  EXPECT_CALL(stream_encoder_options, disableChunkEncoding());\n  ASSERT_EQ(\n      sink_->handlerHystrixEventStream(path_and_query, response_headers, buffer, admin_stream_mock),\n      Http::Code::OK);\n\n  // Check that response_headers has been set correctly\n  EXPECT_EQ(response_headers.ContentType()->value(), \"text/event-stream\");\n  EXPECT_EQ(response_headers.get_(\"cache-control\"), \"no-cache\");\n  EXPECT_EQ(response_headers.Connection()->value(), \"close\");\n  EXPECT_EQ(response_headers.get_(\"access-control-allow-origin\"), \"*\");\n  EXPECT_THAT(response_headers.get_(\"access-control-allow-headers\"), HasSubstr(\"Accept\"));\n}\n\n} // namespace\n} // namespace Hystrix\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/metrics_service/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.stat_sinks.metrics_service\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/extensions/stat_sinks/metrics_service:config\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"metrics_service_test\",\n    srcs = [\"grpc_metrics_service_impl_test.cc\"],\n    extension_name = \"envoy.stat_sinks.metrics_service\",\n    deps = [\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/stat_sinks/metrics_service:metrics_service_grpc_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/service/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"metrics_service_integration_test\",\n    srcs = [\"metrics_service_integration_test.cc\"],\n    extension_name = \"envoy.stat_sinks.metrics_service\",\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/stats:histogram_lib\",\n        \"//source/extensions/stat_sinks/metrics_service:config\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/metrics/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/stats_sinks/metrics_service/config_test.cc",
    "content": "#include \"envoy/registry/registry.h\"\n\n#include \"extensions/stat_sinks/metrics_service/config.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\nnamespace {\n\n// Test that the deprecated extension name still functions.\nTEST(MetricsServiceConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.metrics_service\";\n\n  ASSERT_NE(nullptr, Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(\n                         deprecated_name));\n}\n\n} // namespace\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc",
    "content": "#include \"envoy/service/metrics/v3/metrics_service.pb.h\"\n\n#include \"extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\nusing namespace std::chrono_literals;\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace MetricsService {\nnamespace {\n\nclass GrpcMetricsStreamerImplTest : public testing::Test {\npublic:\n  using MockMetricsStream = Grpc::MockAsyncStream;\n  using MetricsServiceCallbacks =\n      Grpc::AsyncStreamCallbacks<envoy::service::metrics::v3::StreamMetricsResponse>;\n\n  GrpcMetricsStreamerImplTest() {\n    EXPECT_CALL(*factory_, create()).WillOnce(Invoke([this] {\n      return Grpc::RawAsyncClientPtr{async_client_};\n    }));\n    streamer_ = std::make_unique<GrpcMetricsStreamerImpl>(\n        Grpc::AsyncClientFactoryPtr{factory_}, local_info_,\n        envoy::config::core::v3::ApiVersion::AUTO);\n  }\n\n  void expectStreamStart(MockMetricsStream& stream, MetricsServiceCallbacks** callbacks_to_set) {\n    EXPECT_CALL(*async_client_, startRaw(_, _, _, _))\n        .WillOnce(Invoke([&stream, callbacks_to_set](absl::string_view, absl::string_view,\n                                                     Grpc::RawAsyncStreamCallbacks& callbacks,\n                                                     const Http::AsyncClient::StreamOptions&) {\n          *callbacks_to_set = dynamic_cast<MetricsServiceCallbacks*>(&callbacks);\n          return &stream;\n        }));\n  }\n\n  LocalInfo::MockLocalInfo local_info_;\n  Grpc::MockAsyncClient* async_client_{new NiceMock<Grpc::MockAsyncClient>};\n  Grpc::MockAsyncClientFactory* factory_{new Grpc::MockAsyncClientFactory};\n  GrpcMetricsStreamerImplPtr streamer_;\n};\n\n// Test basic metrics streaming flow.\nTEST_F(GrpcMetricsStreamerImplTest, BasicFlow) {\n  InSequence s;\n\n  // Start a stream and send first message.\n  MockMetricsStream stream1;\n  MetricsServiceCallbacks* callbacks1;\n  expectStreamStart(stream1, &callbacks1);\n  EXPECT_CALL(local_info_, node());\n  EXPECT_CALL(stream1, sendMessageRaw_(_, false));\n  envoy::service::metrics::v3::StreamMetricsMessage message_metrics1;\n  streamer_->send(message_metrics1);\n  // Verify that sending an empty response message doesn't do anything bad.\n  callbacks1->onReceiveMessage(\n      std::make_unique<envoy::service::metrics::v3::StreamMetricsResponse>());\n}\n\n// Test that stream failure is handled correctly.\nTEST_F(GrpcMetricsStreamerImplTest, StreamFailure) {\n  InSequence s;\n\n  EXPECT_CALL(*async_client_, startRaw(_, _, _, _))\n      .WillOnce(\n          Invoke([](absl::string_view, absl::string_view, Grpc::RawAsyncStreamCallbacks& callbacks,\n                    const Http::AsyncClient::StreamOptions&) {\n            callbacks.onRemoteClose(Grpc::Status::Internal, \"bad\");\n            return nullptr;\n          }));\n  EXPECT_CALL(local_info_, node());\n  envoy::service::metrics::v3::StreamMetricsMessage message_metrics1;\n  streamer_->send(message_metrics1);\n}\n\nclass MockGrpcMetricsStreamer : public GrpcMetricsStreamer {\npublic:\n  // GrpcMetricsStreamer\n  MOCK_METHOD(void, send, (envoy::service::metrics::v3::StreamMetricsMessage & message));\n};\n\nclass MetricsServiceSinkTest : public testing::Test {\npublic:\n  MetricsServiceSinkTest() = default;\n\n  NiceMock<Stats::MockMetricSnapshot> snapshot_;\n  Event::SimulatedTimeSystem time_system_;\n  std::shared_ptr<MockGrpcMetricsStreamer> streamer_{new MockGrpcMetricsStreamer()};\n};\n\nTEST_F(MetricsServiceSinkTest, CheckSendCall) {\n  MetricsServiceSink sink(streamer_, time_system_, false);\n\n  auto counter = std::make_shared<NiceMock<Stats::MockCounter>>();\n  counter->name_ = \"test_counter\";\n  counter->latch_ = 1;\n  counter->used_ = true;\n  snapshot_.counters_.push_back({1, *counter});\n\n  auto gauge = std::make_shared<NiceMock<Stats::MockGauge>>();\n  gauge->name_ = \"test_gauge\";\n  gauge->value_ = 1;\n  gauge->used_ = true;\n  snapshot_.gauges_.push_back(*gauge);\n\n  auto histogram = std::make_shared<NiceMock<Stats::MockParentHistogram>>();\n  histogram->name_ = \"test_histogram\";\n  histogram->used_ = true;\n\n  EXPECT_CALL(*streamer_, send(_));\n\n  sink.flush(snapshot_);\n}\n\nTEST_F(MetricsServiceSinkTest, CheckStatsCount) {\n  MetricsServiceSink sink(streamer_, time_system_, false);\n\n  auto counter = std::make_shared<NiceMock<Stats::MockCounter>>();\n  counter->name_ = \"test_counter\";\n  counter->value_ = 100;\n  counter->used_ = true;\n  snapshot_.counters_.push_back({1, *counter});\n\n  auto gauge = std::make_shared<NiceMock<Stats::MockGauge>>();\n  gauge->name_ = \"test_gauge\";\n  gauge->value_ = 1;\n  gauge->used_ = true;\n  snapshot_.gauges_.push_back(*gauge);\n\n  EXPECT_CALL(*streamer_, send(_))\n      .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) {\n        EXPECT_EQ(2, message.envoy_metrics_size());\n      }));\n  sink.flush(snapshot_);\n\n  // Verify only newly added metrics come after endFlush call.\n  gauge->used_ = false;\n  EXPECT_CALL(*streamer_, send(_))\n      .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) {\n        EXPECT_EQ(1, message.envoy_metrics_size());\n      }));\n  sink.flush(snapshot_);\n}\n\n// Test that verifies counters are correctly reported as current value when configured to do so.\nTEST_F(MetricsServiceSinkTest, ReportCountersValues) {\n  MetricsServiceSink sink(streamer_, time_system_, false);\n\n  auto counter = std::make_shared<NiceMock<Stats::MockCounter>>();\n  counter->name_ = \"test_counter\";\n  counter->value_ = 100;\n  counter->used_ = true;\n  snapshot_.counters_.push_back({1, *counter});\n\n  EXPECT_CALL(*streamer_, send(_))\n      .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) {\n        EXPECT_EQ(1, message.envoy_metrics_size());\n        EXPECT_EQ(100, message.envoy_metrics(0).metric(0).counter().value());\n      }));\n  sink.flush(snapshot_);\n}\n\n// Test that verifies counters are reported as the delta between flushes when configured to do so.\nTEST_F(MetricsServiceSinkTest, ReportCountersAsDeltas) {\n  MetricsServiceSink sink(streamer_, time_system_, true);\n\n  auto counter = std::make_shared<NiceMock<Stats::MockCounter>>();\n  counter->name_ = \"test_counter\";\n  counter->value_ = 100;\n  counter->used_ = true;\n  snapshot_.counters_.push_back({1, *counter});\n\n  EXPECT_CALL(*streamer_, send(_))\n      .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) {\n        EXPECT_EQ(1, message.envoy_metrics_size());\n        EXPECT_EQ(1, message.envoy_metrics(0).metric(0).counter().value());\n      }));\n  sink.flush(snapshot_);\n}\n\n} // namespace\n} // namespace MetricsService\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/metrics/v3/metrics_service.pb.h\"\n#include \"envoy/service/metrics/v3/metrics_service.pb.h\"\n\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/stats/histogram_impl.h\"\n#include \"common/version/version.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\nnamespace {\n\nclass MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest,\n                                      public HttpIntegrationTest {\npublic:\n  MetricsServiceIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {}\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // metrics_service cluster for Envoy gRPC.\n      auto* metrics_service_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      metrics_service_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      metrics_service_cluster->set_name(\"metrics_service\");\n      metrics_service_cluster->mutable_http2_protocol_options();\n      // metrics_service gRPC service definition.\n      auto* metrics_sink = bootstrap.add_stats_sinks();\n      metrics_sink->set_name(\"envoy.stat_sinks.metrics_service\");\n      envoy::config::metrics::v3::MetricsServiceConfig config;\n      setGrpcService(*config.mutable_grpc_service(), \"metrics_service\",\n                     fake_upstreams_.back()->localAddress());\n      config.set_transport_api_version(apiVersion());\n      metrics_sink->mutable_typed_config()->PackFrom(config);\n      // Shrink reporting period down to 1s to make test not take forever.\n      bootstrap.mutable_stats_flush_interval()->CopyFrom(\n          Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    });\n\n    HttpIntegrationTest::initialize();\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForMetricsServiceConnection() {\n    return fake_upstreams_[1]->waitForHttpConnection(*dispatcher_,\n                                                     fake_metrics_service_connection_);\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForMetricsStream() {\n    return fake_metrics_service_connection_->waitForNewStream(*dispatcher_,\n                                                              metrics_service_request_);\n  }\n\n  ABSL_MUST_USE_RESULT\n  AssertionResult waitForMetricsRequest() {\n    bool known_summary_exists = false;\n    bool known_histogram_exists = false;\n    bool known_counter_exists = false;\n    bool known_gauge_exists = false;\n\n    // Sometimes stats do not come in the first flush cycle, this loop ensures that we wait till\n    // required stats are flushed.\n    // TODO(ramaraochavali): Figure out a more robust way to find out all required stats have been\n    // flushed.\n    while (!(known_counter_exists && known_gauge_exists && known_histogram_exists)) {\n      envoy::service::metrics::v3::StreamMetricsMessage request_msg;\n      VERIFY_ASSERTION(metrics_service_request_->waitForGrpcMessage(*dispatcher_, request_msg));\n      EXPECT_EQ(\"POST\", metrics_service_request_->headers().getMethodValue());\n      EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.metrics.{}.MetricsService\",\n                                                    \"StreamMetrics\", apiVersion()),\n                metrics_service_request_->headers().getPathValue());\n      EXPECT_EQ(\"application/grpc\", metrics_service_request_->headers().getContentTypeValue());\n      EXPECT_TRUE(request_msg.envoy_metrics_size() > 0);\n      const Protobuf::RepeatedPtrField<::io::prometheus::client::MetricFamily>& envoy_metrics =\n          request_msg.envoy_metrics();\n\n      for (const ::io::prometheus::client::MetricFamily& metrics_family : envoy_metrics) {\n        if (metrics_family.name() == \"cluster.cluster_0.membership_change\" &&\n            metrics_family.type() == ::io::prometheus::client::MetricType::COUNTER) {\n          known_counter_exists = true;\n          EXPECT_EQ(1, metrics_family.metric(0).counter().value());\n        }\n        if (metrics_family.name() == \"cluster.cluster_0.membership_total\" &&\n            metrics_family.type() == ::io::prometheus::client::MetricType::GAUGE) {\n          known_gauge_exists = true;\n          EXPECT_EQ(1, metrics_family.metric(0).gauge().value());\n        }\n        if (metrics_family.name() == \"cluster.cluster_0.upstream_rq_time\" &&\n            metrics_family.type() == ::io::prometheus::client::MetricType::SUMMARY) {\n          known_summary_exists = true;\n          Stats::HistogramStatisticsImpl empty_statistics;\n          EXPECT_EQ(metrics_family.metric(0).summary().quantile_size(),\n                    empty_statistics.supportedQuantiles().size());\n        }\n        if (metrics_family.name() == \"cluster.cluster_0.upstream_rq_time\" &&\n            metrics_family.type() == ::io::prometheus::client::MetricType::HISTOGRAM) {\n          known_histogram_exists = true;\n          EXPECT_EQ(metrics_family.metric(0).histogram().bucket_size(),\n                    Stats::HistogramSettingsImpl::defaultBuckets().size());\n        }\n        ASSERT(metrics_family.metric(0).has_timestamp_ms());\n        if (known_counter_exists && known_gauge_exists && known_histogram_exists) {\n          break;\n        }\n      }\n    }\n    EXPECT_TRUE(known_counter_exists);\n    EXPECT_TRUE(known_gauge_exists);\n    EXPECT_TRUE(known_summary_exists);\n    EXPECT_TRUE(known_histogram_exists);\n\n    return AssertionSuccess();\n  }\n\n  void cleanup() {\n    if (fake_metrics_service_connection_ != nullptr) {\n      AssertionResult result = fake_metrics_service_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_metrics_service_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n  }\n\n  FakeHttpConnectionPtr fake_metrics_service_connection_;\n  FakeStreamPtr metrics_service_request_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, MetricsServiceIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Test a basic metric service flow.\nTEST_P(MetricsServiceIntegrationTest, BasicFlow) {\n  initialize();\n  // Send an empty request so that histogram values merged for cluster_0.\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  ASSERT_TRUE(waitForMetricsServiceConnection());\n  ASSERT_TRUE(waitForMetricsStream());\n  ASSERT_TRUE(waitForMetricsRequest());\n\n  // Send an empty response and end the stream. This should never happen but make sure nothing\n  // breaks and we make a new stream on a follow up request.\n  metrics_service_request_->startGrpcStream();\n  envoy::service::metrics::v3::StreamMetricsResponse response_msg;\n  metrics_service_request_->sendGrpcMessage(response_msg);\n  metrics_service_request_->finishGrpcStream(Grpc::Status::Ok);\n\n  switch (clientType()) {\n  case Grpc::ClientType::EnvoyGrpc:\n    test_server_->waitForGaugeEq(\"cluster.metrics_service.upstream_rq_active\", 0);\n    break;\n  case Grpc::ClientType::GoogleGrpc:\n    test_server_->waitForCounterGe(\"grpc.metrics_service.streams_closed_0\", 1);\n    break;\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n  cleanup();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/statsd/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.stat_sinks.statsd\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/extensions/stat_sinks/statsd:config\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/stats_sinks/statsd/config_test.cc",
    "content": "#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/config/well_known_names.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/stat_sinks/common/statsd/statsd.h\"\n#include \"extensions/stat_sinks/statsd/config.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Statsd {\nnamespace {\n\nTEST(StatsConfigTest, ValidTcpStatsd) {\n  const std::string name = StatsSinkNames::get().Statsd;\n\n  envoy::config::metrics::v3::StatsdSink sink_config;\n  sink_config.set_tcp_cluster_name(\"fake_cluster\");\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  EXPECT_NE(sink, nullptr);\n  EXPECT_NE(dynamic_cast<Common::Statsd::TcpStatsdSink*>(sink.get()), nullptr);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(StatsConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.statsd\";\n\n  ASSERT_NE(nullptr, Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(\n                         deprecated_name));\n}\n\nclass StatsConfigParameterizedTest : public testing::TestWithParam<Network::Address::IpVersion> {};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, StatsConfigParameterizedTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(StatsConfigParameterizedTest, UdpSinkDefaultPrefix) {\n  const std::string name = StatsSinkNames::get().Statsd;\n  const auto& defaultPrefix = Common::Statsd::getDefaultPrefix();\n\n  envoy::config::metrics::v3::StatsdSink sink_config;\n  envoy::config::core::v3::Address& address = *sink_config.mutable_address();\n  envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address();\n  socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP);\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    socket_address.set_address(\"127.0.0.1\");\n  } else {\n    socket_address.set_address(\"::1\");\n  }\n  socket_address.set_port_value(8125);\n  EXPECT_EQ(sink_config.prefix(), \"\");\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  ASSERT_NE(sink, nullptr);\n\n  auto udp_sink = dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get());\n  ASSERT_NE(udp_sink, nullptr);\n  EXPECT_EQ(udp_sink->getPrefix(), defaultPrefix);\n}\n\nTEST_P(StatsConfigParameterizedTest, UdpSinkCustomPrefix) {\n  const std::string name = StatsSinkNames::get().Statsd;\n  const std::string customPrefix = \"prefix.test\";\n\n  envoy::config::metrics::v3::StatsdSink sink_config;\n  envoy::config::core::v3::Address& address = *sink_config.mutable_address();\n  envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address();\n  socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP);\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    socket_address.set_address(\"127.0.0.1\");\n  } else {\n    socket_address.set_address(\"::1\");\n  }\n  socket_address.set_port_value(8125);\n  sink_config.set_prefix(customPrefix);\n  EXPECT_NE(sink_config.prefix(), \"\");\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  ASSERT_NE(sink, nullptr);\n\n  auto udp_sink = dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get());\n  ASSERT_NE(udp_sink, nullptr);\n  EXPECT_EQ(udp_sink->getPrefix(), customPrefix);\n}\n\nTEST(StatsConfigTest, TcpSinkDefaultPrefix) {\n  const std::string name = StatsSinkNames::get().Statsd;\n\n  envoy::config::metrics::v3::StatsdSink sink_config;\n  const auto& defaultPrefix = Common::Statsd::getDefaultPrefix();\n  sink_config.set_tcp_cluster_name(\"fake_cluster\");\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n  EXPECT_EQ(sink_config.prefix(), \"\");\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  ASSERT_NE(sink, nullptr);\n\n  auto tcp_sink = dynamic_cast<Common::Statsd::TcpStatsdSink*>(sink.get());\n  ASSERT_NE(tcp_sink, nullptr);\n  EXPECT_EQ(tcp_sink->getPrefix(), defaultPrefix);\n}\n\nTEST(StatsConfigTest, TcpSinkCustomPrefix) {\n  const std::string name = StatsSinkNames::get().Statsd;\n\n  envoy::config::metrics::v3::StatsdSink sink_config;\n  std::string prefix = \"prefixTest\";\n  sink_config.set_tcp_cluster_name(\"fake_cluster\");\n  ASSERT_NE(sink_config.prefix(), prefix);\n  sink_config.set_prefix(prefix);\n  EXPECT_EQ(sink_config.prefix(), prefix);\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  ASSERT_NE(sink, nullptr);\n\n  auto tcp_sink = dynamic_cast<Common::Statsd::TcpStatsdSink*>(sink.get());\n  ASSERT_NE(tcp_sink, nullptr);\n  EXPECT_EQ(tcp_sink->getPrefix(), prefix);\n}\n\nclass StatsConfigLoopbackTest : public testing::TestWithParam<Network::Address::IpVersion> {};\nINSTANTIATE_TEST_SUITE_P(IpVersions, StatsConfigLoopbackTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(StatsConfigLoopbackTest, ValidUdpIpStatsd) {\n  const std::string name = StatsSinkNames::get().Statsd;\n\n  envoy::config::metrics::v3::StatsdSink sink_config;\n  envoy::config::core::v3::Address& address = *sink_config.mutable_address();\n  envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address();\n  socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP);\n  auto loopback_flavor = Network::Test::getCanonicalLoopbackAddress(GetParam());\n  socket_address.set_address(loopback_flavor->ip()->addressAsString());\n  socket_address.set_port_value(8125);\n\n  Server::Configuration::StatsSinkFactory* factory =\n      Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(name);\n  ASSERT_NE(factory, nullptr);\n\n  ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto();\n  TestUtility::jsonConvert(sink_config, *message);\n\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  Stats::SinkPtr sink = factory->createStatsSink(*message, server);\n  EXPECT_NE(sink, nullptr);\n  EXPECT_NE(dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get()), nullptr);\n  EXPECT_EQ(dynamic_cast<Common::Statsd::UdpStatsdSink*>(sink.get())->getUseTagForTest(), false);\n}\n\n// Negative test for protoc-gen-validate constraints for statsd.\nTEST(StatsdConfigTest, ValidateFail) {\n  NiceMock<Server::Configuration::MockServerFactoryContext> server;\n  EXPECT_THROW(\n      StatsdSinkFactory().createStatsSink(envoy::config::metrics::v3::StatsdSink(), server),\n      ProtoValidationException);\n}\n\n} // namespace\n} // namespace Statsd\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/wasm/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//bazel:envoy_select.bzl\",\n    \"envoy_select_wasm\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/stats_sinks/wasm/test_data:test_context_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.stat_sinks.wasm\",\n    deps = [\n        \"//source/extensions/stat_sinks/wasm:config\",\n        \"//test/extensions/stats_sinks/wasm/test_data:test_context_cpp_plugin\",\n        \"//test/mocks/server:server_mocks\",\n        \"@envoy_api//envoy/extensions/stat_sinks/wasm/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"wasm_stat_sink_test\",\n    srcs = [\"wasm_stat_sink_test.cc\"],\n    data = envoy_select_wasm([\n        \"//test/extensions/stats_sinks/wasm/test_data:test_context_cpp.wasm\",\n    ]),\n    extension_name = \"envoy.stat_sinks.wasm\",\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/extensions/stats_sinks/wasm/test_data:test_context_cpp_plugin\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:wasm_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/stats_sinks/wasm/config_test.cc",
    "content": "#include \"envoy/extensions/stat_sinks/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/stat_sinks/wasm/config.h\"\n#include \"extensions/stat_sinks/wasm/wasm_stat_sink_impl.h\"\n#include \"extensions/stat_sinks/well_known_names.h\"\n\n#include \"test/mocks/server/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace StatSinks {\nnamespace Wasm {\n\nclass WasmStatSinkConfigTest : public testing::TestWithParam<std::string> {\nprotected:\n  WasmStatSinkConfigTest() {\n    config_.mutable_config()->mutable_vm_config()->set_runtime(\n        absl::StrCat(\"envoy.wasm.runtime.\", GetParam()));\n    if (GetParam() != \"null\") {\n      config_.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename(\n          TestEnvironment::substitute(\n              \"{{ test_rundir \"\n              \"}}/test/extensions/stats_sinks/wasm/test_data/test_context_cpp.wasm\"));\n    } else {\n      config_.mutable_config()\n          ->mutable_vm_config()\n          ->mutable_code()\n          ->mutable_local()\n          ->set_inline_bytes(\"CommonWasmTestContextCpp\");\n    }\n    config_.mutable_config()->set_name(\"test\");\n  }\n\n  void initializeWithConfig(const envoy::extensions::stat_sinks::wasm::v3::Wasm& config) {\n    auto factory = Registry::FactoryRegistry<Server::Configuration::StatsSinkFactory>::getFactory(\n        StatsSinkNames::get().Wasm);\n    ASSERT_NE(factory, nullptr);\n    api_ = Api::createApiForTest(stats_store_);\n    EXPECT_CALL(context_, api()).WillRepeatedly(testing::ReturnRef(*api_));\n    EXPECT_CALL(context_, initManager()).WillRepeatedly(testing::ReturnRef(init_manager_));\n    EXPECT_CALL(context_, lifecycleNotifier())\n        .WillRepeatedly(testing::ReturnRef(lifecycle_notifier_));\n    sink_ = factory->createStatsSink(config, context_);\n    EXPECT_CALL(init_watcher_, ready());\n    init_manager_.initialize(init_watcher_);\n  }\n\n  envoy::extensions::stat_sinks::wasm::v3::Wasm config_;\n  testing::NiceMock<Server::Configuration::MockServerFactoryContext> context_;\n  testing::NiceMock<Server::MockServerLifecycleNotifier> lifecycle_notifier_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  Init::ManagerImpl init_manager_{\"init_manager\"};\n  Stats::SinkPtr sink_;\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\",\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\",\n#endif\n    \"null\");\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmStatSinkConfigTest, testing_values);\n\nTEST_P(WasmStatSinkConfigTest, CreateWasmFromEmpty) {\n  envoy::extensions::stat_sinks::wasm::v3::Wasm config;\n  EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config), Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm Stat Sink \");\n}\n\nTEST_P(WasmStatSinkConfigTest, CreateWasmFailOpen) {\n  envoy::extensions::stat_sinks::wasm::v3::Wasm config;\n  config.mutable_config()->set_fail_open(true);\n  EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config), Extensions::Common::Wasm::WasmException,\n                            \"Unable to create Wasm Stat Sink \");\n}\n\nTEST_P(WasmStatSinkConfigTest, CreateWasmFromWASM) {\n  initializeWithConfig(config_);\n\n  EXPECT_NE(sink_, nullptr);\n  NiceMock<Stats::MockMetricSnapshot> snapshot;\n  sink_->flush(snapshot);\n  NiceMock<Stats::MockHistogram> histogram;\n  sink_->onHistogramComplete(histogram, 0);\n}\n\n} // namespace Wasm\n} // namespace StatSinks\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/stats_sinks/wasm/test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\nload(\"//bazel/wasm:wasm.bzl\", \"envoy_wasm_cc_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"test_context_cpp_plugin\",\n    srcs = [\n        \"test_context_cpp.cc\",\n        \"test_context_cpp_null_plugin.cc\",\n    ],\n    copts = [\"-DNULL_PLUGIN=1\"],\n    deps = [\n        \"//source/extensions/common/wasm:wasm_hdr\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//source/extensions/common/wasm:well_known_names\",\n        \"//source/extensions/common/wasm/ext:envoy_null_plugin\",\n    ],\n)\n\nenvoy_wasm_cc_binary(\n    name = \"test_context_cpp.wasm\",\n    srcs = [\"test_context_cpp.cc\"],\n    deps = [\n        \"//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/stats_sinks/wasm/test_data/test_context_cpp.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <string>\n#include <unordered_map>\n#include <vector>\n\n#ifndef NULL_PLUGIN\n#include \"proxy_wasm_intrinsics.h\"\n#include \"source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h\"\n#else\n#include \"extensions/common/wasm/ext/envoy_null_plugin.h\"\n#endif\n\nSTART_WASM_PLUGIN(CommonWasmTestContextCpp)\n\nclass TestContext : public EnvoyContext {\npublic:\n  explicit TestContext(uint32_t id, RootContext* root) : EnvoyContext(id, root) {}\n};\n\nclass TestRootContext : public EnvoyRootContext {\npublic:\n  explicit TestRootContext(uint32_t id, std::string_view root_id) : EnvoyRootContext(id, root_id) {}\n\n  void onStatsUpdate(uint32_t result_size) override;\n  bool onDone() override;\n};\n\nstatic RegisterContextFactory register_TestContext(CONTEXT_FACTORY(TestContext),\n                                                   ROOT_FACTORY(TestRootContext));\n\nvoid TestRootContext::onStatsUpdate(uint32_t result_size) {\n  logWarn(\"TestRootContext::onStat\");\n  auto stats_buffer = getBufferBytes(WasmBufferType::CallData, 0, result_size);\n  auto stats = parseStatResults(stats_buffer->view());\n  for (auto& e : stats.counters) {\n    logInfo(\"TestRootContext::onStat \" + std::string(e.name) + \":\" + std::to_string(e.delta));\n  }\n  for (auto& e : stats.gauges) {\n    logInfo(\"TestRootContext::onStat \" + std::string(e.name) + \":\" + std::to_string(e.value));\n  }\n}\n\nbool TestRootContext::onDone() {\n  logWarn(\"TestRootContext::onDone \" + std::to_string(id()));\n  return true;\n}\n\nEND_WASM_PLUGIN\n"
  },
  {
    "path": "test/extensions/stats_sinks/wasm/test_data/test_context_cpp_null_plugin.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"include/proxy-wasm/null_plugin.h\"\n\nnamespace proxy_wasm {\nnamespace null_plugin {\nnamespace CommonWasmTestContextCpp {\nNullPluginRegistry* context_registry_;\n} // namespace CommonWasmTestContextCpp\n\nRegisterNullVmPluginFactory\n    register_common_wasm_test_context_cpp_plugin(\"CommonWasmTestContextCpp\", []() {\n      return std::make_unique<NullPlugin>(CommonWasmTestContextCpp::context_registry_);\n    });\n\n} // namespace null_plugin\n} // namespace proxy_wasm\n"
  },
  {
    "path": "test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc",
    "content": "#include \"envoy/server/lifecycle_notifier.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/wasm_base.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Eq;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\nclass TestContext : public ::Envoy::Extensions::Common::Wasm::Context {\npublic:\n  using ::Envoy::Extensions::Common::Wasm::Context::Context;\n  ~TestContext() override = default;\n  using ::Envoy::Extensions::Common::Wasm::Context::log;\n  proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override {\n    std::cerr << std::string(message) << \"\\n\";\n    log_(static_cast<spdlog::level::level_enum>(level), message);\n    Extensions::Common::Wasm::Context::log(static_cast<spdlog::level::level_enum>(level), message);\n    return proxy_wasm::WasmResult::Ok;\n  }\n  MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message));\n};\n\nclass WasmCommonContextTest\n    : public Common::Wasm::WasmTestBase<testing::TestWithParam<std::string>> {\npublic:\n  WasmCommonContextTest() = default;\n\n  void setup(const std::string& code, std::string root_id = \"\") {\n    setupBase(\n        GetParam(), code,\n        [](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) -> ContextBase* {\n          return new TestContext(wasm, plugin);\n        },\n        root_id);\n  }\n  void setupContext() {\n    context_ = std::make_unique<TestContext>(wasm_->wasm().get(), root_context_->id(), plugin_);\n    context_->onCreate();\n  }\n\n  TestContext& rootContext() { return *static_cast<TestContext*>(root_context_); }\n  TestContext& context() { return *context_; }\n\n  std::unique_ptr<TestContext> context_;\n};\n\n// NB: this is required by VC++ which can not handle the use of macros in the macro definitions\n// used by INSTANTIATE_TEST_SUITE_P.\nauto testing_values = testing::Values(\n#if defined(ENVOY_WASM_V8)\n    \"v8\",\n#endif\n#if defined(ENVOY_WASM_WAVM)\n    \"wavm\",\n#endif\n    \"null\");\nINSTANTIATE_TEST_SUITE_P(Runtimes, WasmCommonContextTest, testing_values);\n\nTEST_P(WasmCommonContextTest, OnStat) {\n  std::string code;\n  NiceMock<Stats::MockMetricSnapshot> snapshot_;\n  if (GetParam() != \"null\") {\n    code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(absl::StrCat(\n        \"{{ test_rundir }}/test/extensions/stats_sinks/wasm/test_data/test_context_cpp.wasm\")));\n  } else {\n    // The name of the Null VM plugin.\n    code = \"CommonWasmTestContextCpp\";\n  }\n  EXPECT_FALSE(code.empty());\n  setup(code);\n  setupContext();\n\n  EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq(\"TestRootContext::onStat\")));\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::info, Eq(\"TestRootContext::onStat upstream_rq_2xx:1\")));\n\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::info, Eq(\"TestRootContext::onStat upstream_rq_5xx:2\")));\n\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::info, Eq(\"TestRootContext::onStat membership_total:3\")));\n\n  EXPECT_CALL(rootContext(),\n              log_(spdlog::level::info, Eq(\"TestRootContext::onStat duration_total:4\")));\n\n  EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq(\"TestRootContext::onDone 1\")));\n\n  NiceMock<Stats::MockCounter> success_counter;\n  success_counter.name_ = \"upstream_rq_2xx\";\n  success_counter.latch_ = 1;\n  success_counter.used_ = true;\n\n  NiceMock<Stats::MockCounter> error_5xx_counter;\n  error_5xx_counter.name_ = \"upstream_rq_5xx\";\n  error_5xx_counter.latch_ = 1;\n  error_5xx_counter.used_ = true;\n\n  snapshot_.counters_.push_back({1, success_counter});\n  snapshot_.counters_.push_back({2, error_5xx_counter});\n\n  NiceMock<Stats::MockGauge> membership_total;\n  membership_total.name_ = \"membership_total\";\n  membership_total.value_ = 3;\n  membership_total.used_ = true;\n  snapshot_.gauges_.push_back(membership_total);\n\n  NiceMock<Stats::MockGauge> duration_total;\n  duration_total.name_ = \"duration_total\";\n  duration_total.value_ = 4;\n  duration_total.used_ = true;\n  snapshot_.gauges_.push_back(duration_total);\n\n  rootContext().onStatsUpdate(snapshot_);\n}\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/common/ot/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"opentracing_driver_impl_test\",\n    srcs = [\n        \"opentracing_driver_impl_test.cc\",\n    ],\n    extension_name = \"envoy.tracers.dynamic_ot\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/extensions/tracers/dynamic_ot:dynamic_opentracing_driver_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"@io_opentracing_cpp//mocktracer\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc",
    "content": "#include <memory>\n\n#include \"extensions/tracers/common/ot/opentracing_driver_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"opentracing/mocktracer/in_memory_recorder.h\"\n#include \"opentracing/mocktracer/tracer.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Common {\nnamespace Ot {\nnamespace {\n\nclass TestDriver : public OpenTracingDriver {\npublic:\n  TestDriver(OpenTracingDriver::PropagationMode propagation_mode,\n             const opentracing::mocktracer::PropagationOptions& propagation_options,\n             Stats::Scope& scope)\n      : OpenTracingDriver{scope}, propagation_mode_{propagation_mode} {\n    opentracing::mocktracer::MockTracerOptions options;\n    auto recorder = new opentracing::mocktracer::InMemoryRecorder{};\n    recorder_ = recorder;\n    options.recorder.reset(recorder);\n    options.propagation_options = propagation_options;\n    tracer_ = std::make_shared<opentracing::mocktracer::MockTracer>(std::move(options));\n  }\n\n  const opentracing::mocktracer::InMemoryRecorder& recorder() const { return *recorder_; }\n\n  // OpenTracingDriver\n  opentracing::Tracer& tracer() override { return *tracer_; }\n\n  PropagationMode propagationMode() const override { return propagation_mode_; }\n\nprivate:\n  const OpenTracingDriver::PropagationMode propagation_mode_;\n  const opentracing::mocktracer::InMemoryRecorder* recorder_;\n  std::shared_ptr<opentracing::mocktracer::MockTracer> tracer_;\n};\n\nclass OpenTracingDriverTest : public testing::Test {\npublic:\n  void\n  setupValidDriver(OpenTracingDriver::PropagationMode propagation_mode =\n                       OpenTracingDriver::PropagationMode::SingleHeader,\n                   const opentracing::mocktracer::PropagationOptions& propagation_options = {}) {\n    driver_ = std::make_unique<TestDriver>(propagation_mode, propagation_options, stats_);\n  }\n\n  const std::string operation_name_{\"test\"};\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n  const Http::TestResponseHeaderMapImpl response_headers_{{\":status\", \"500\"}};\n  SystemTime start_time_;\n\n  std::unique_ptr<TestDriver> driver_;\n  Stats::TestUtil::TestStore stats_;\n\n  NiceMock<Tracing::MockConfig> config_;\n};\n\nTEST_F(OpenTracingDriverTest, FlushSpanWithTag) {\n  setupValidDriver();\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  first_span->setTag(\"abc\", \"123\");\n  first_span->finishSpan();\n\n  const std::map<std::string, opentracing::Value> expected_tags = {\n      {\"abc\", std::string{\"123\"}},\n      {opentracing::ext::span_kind, std::string{opentracing::ext::span_kind_rpc_server}}};\n\n  EXPECT_EQ(1, driver_->recorder().spans().size());\n  EXPECT_EQ(expected_tags, driver_->recorder().top().tags);\n}\n\nTEST_F(OpenTracingDriverTest, FlushSpanWithLog) {\n  setupValidDriver();\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  const auto timestamp =\n      SystemTime{std::chrono::duration_cast<SystemTime::duration>(std::chrono::hours{123})};\n  first_span->log(timestamp, \"abc\");\n  first_span->finishSpan();\n\n  const std::vector<opentracing::LogRecord> expected_logs = {\n      {timestamp, {{\"event\", std::string{\"abc\"}}}}};\n\n  EXPECT_EQ(1, driver_->recorder().spans().size());\n  EXPECT_EQ(expected_logs, driver_->recorder().top().logs);\n}\n\nTEST_F(OpenTracingDriverTest, FlushSpanWithBaggage) {\n  setupValidDriver();\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  first_span->setBaggage(\"abc\", \"123\");\n  first_span->finishSpan();\n\n  const std::map<std::string, std::string> expected_baggage = {{\"abc\", \"123\"}};\n\n  EXPECT_EQ(1, driver_->recorder().spans().size());\n  EXPECT_EQ(expected_baggage, driver_->recorder().top().span_context.baggage);\n}\n\nTEST_F(OpenTracingDriverTest, TagSamplingFalseByDecision) {\n  setupValidDriver(OpenTracingDriver::PropagationMode::TracerNative, {});\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, false});\n  first_span->finishSpan();\n\n  const std::map<std::string, opentracing::Value> expected_tags = {\n      {opentracing::ext::sampling_priority, 0},\n      {opentracing::ext::span_kind, std::string{opentracing::ext::span_kind_rpc_server}}};\n\n  EXPECT_EQ(1, driver_->recorder().spans().size());\n  EXPECT_EQ(expected_tags, driver_->recorder().top().tags);\n}\n\nTEST_F(OpenTracingDriverTest, TagSamplingFalseByFlag) {\n  setupValidDriver(OpenTracingDriver::PropagationMode::TracerNative, {});\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  first_span->setSampled(false);\n  first_span->finishSpan();\n\n  const std::map<std::string, opentracing::Value> expected_tags = {\n      {opentracing::ext::sampling_priority, 0},\n      {opentracing::ext::span_kind, std::string{opentracing::ext::span_kind_rpc_server}}};\n\n  EXPECT_EQ(1, driver_->recorder().spans().size());\n  EXPECT_EQ(expected_tags, driver_->recorder().top().tags);\n}\n\nTEST_F(OpenTracingDriverTest, TagSpanKindClient) {\n  setupValidDriver(OpenTracingDriver::PropagationMode::TracerNative, {});\n\n  ON_CALL(config_, operationName()).WillByDefault(testing::Return(Tracing::OperationName::Egress));\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  first_span->finishSpan();\n\n  const std::map<std::string, opentracing::Value> expected_tags = {\n      {opentracing::ext::span_kind, std::string{opentracing::ext::span_kind_rpc_client}}};\n\n  EXPECT_EQ(1, driver_->recorder().spans().size());\n  EXPECT_EQ(expected_tags, driver_->recorder().top().tags);\n}\n\nTEST_F(OpenTracingDriverTest, TagSpanKindServer) {\n  setupValidDriver(OpenTracingDriver::PropagationMode::TracerNative, {});\n\n  ON_CALL(config_, operationName()).WillByDefault(testing::Return(Tracing::OperationName::Ingress));\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  first_span->finishSpan();\n\n  const std::map<std::string, opentracing::Value> expected_tags = {\n      {opentracing::ext::span_kind, std::string{opentracing::ext::span_kind_rpc_server}}};\n\n  EXPECT_EQ(1, driver_->recorder().spans().size());\n  EXPECT_EQ(expected_tags, driver_->recorder().top().tags);\n}\n\nTEST_F(OpenTracingDriverTest, InjectFailure) {\n  for (OpenTracingDriver::PropagationMode propagation_mode :\n       {OpenTracingDriver::PropagationMode::SingleHeader,\n        OpenTracingDriver::PropagationMode::TracerNative}) {\n    opentracing::mocktracer::PropagationOptions propagation_options;\n    propagation_options.inject_error_code = std::make_error_code(std::errc::bad_message);\n    setupValidDriver(propagation_mode, propagation_options);\n\n    Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                               start_time_, {Tracing::Reason::Sampling, true});\n\n    const auto span_context_injection_error_count =\n        stats_.counter(\"tracing.opentracing.span_context_injection_error\").value();\n    EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext));\n    span->injectContext(request_headers_);\n\n    EXPECT_EQ(span_context_injection_error_count + 1,\n              stats_.counter(\"tracing.opentracing.span_context_injection_error\").value());\n  }\n}\n\nTEST_F(OpenTracingDriverTest, ExtractWithUnindexedHeader) {\n  opentracing::mocktracer::PropagationOptions propagation_options;\n  propagation_options.propagation_key = \"unindexed-header\";\n  setupValidDriver(OpenTracingDriver::PropagationMode::TracerNative, propagation_options);\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  first_span->injectContext(request_headers_);\n\n  Tracing::SpanPtr second_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                    start_time_, {Tracing::Reason::Sampling, true});\n  second_span->finishSpan();\n  first_span->finishSpan();\n\n  auto spans = driver_->recorder().spans();\n  EXPECT_EQ(spans.at(1).span_context.span_id, spans.at(0).references.at(0).span_id);\n}\n\n} // namespace\n} // namespace Ot\n} // namespace Common\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/datadog/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"datadog_tracer_impl_test\",\n    srcs = [\n        \"datadog_tracer_impl_test.cc\",\n    ],\n    extension_name = \"envoy.tracers.datadog\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/common/common:base64_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/tracers/datadog:datadog_tracer_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.tracers.datadog\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/extensions/tracers/datadog:config\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/tracers/datadog/config_test.cc",
    "content": "#include \"envoy/config/trace/v3/datadog.pb.h\"\n#include \"envoy/config/trace/v3/datadog.pb.validate.h\"\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n\n#include \"extensions/tracers/datadog/config.h\"\n\n#include \"test/mocks/server/tracer_factory.h\"\n#include \"test/mocks/server/tracer_factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Datadog {\nnamespace {\n\nTEST(DatadogTracerConfigTest, DatadogHttpTracer) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  EXPECT_CALL(context.server_factory_context_.cluster_manager_, get(Eq(\"fake_cluster\")))\n      .WillRepeatedly(\n          Return(&context.server_factory_context_.cluster_manager_.thread_local_cluster_));\n  ON_CALL(*context.server_factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_,\n          features())\n      .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: datadog\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.DatadogConfig\n      collector_cluster: fake_cluster\n      service_name: fake_file\n   )EOF\";\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  DatadogTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr datadog_tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, datadog_tracer);\n}\n\n} // namespace\n} // namespace Datadog\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/datadog/datadog_tracer_impl_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <sstream>\n#include <string>\n\n#include \"envoy/config/trace/v3/datadog.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/datadog/datadog_tracer_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::DoAll;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::StrictMock;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Datadog {\nnamespace {\n\nclass DatadogDriverTest : public testing::Test {\npublic:\n  void setup(envoy::config::trace::v3::DatadogConfig& datadog_config, bool init_timer) {\n    cm_.thread_local_cluster_.cluster_.info_->name_ = \"fake_cluster\";\n    ON_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillByDefault(ReturnRef(cm_.async_client_));\n\n    if (init_timer) {\n      timer_ = new NiceMock<Event::MockTimer>(&tls_.dispatcher_);\n      EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _));\n    }\n\n    driver_ = std::make_unique<Driver>(datadog_config, cm_, stats_, tls_, runtime_);\n  }\n\n  void setupValidDriver() {\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n    ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n        .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n    envoy::config::trace::v3::DatadogConfig datadog_config;\n    TestUtility::loadFromYaml(yaml_string, datadog_config);\n\n    setup(datadog_config, true);\n  }\n\n  const std::string operation_name_{\"test\"};\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n  const Http::TestResponseHeaderMapImpl response_headers_{{\":status\", \"500\"}};\n  SystemTime start_time_;\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  std::unique_ptr<Driver> driver_;\n  NiceMock<Event::MockTimer>* timer_;\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n\n  NiceMock<Tracing::MockConfig> config_;\n};\n\nTEST_F(DatadogDriverTest, InitializeDriver) {\n  {\n    envoy::config::trace::v3::DatadogConfig datadog_config;\n\n    EXPECT_THROW(setup(datadog_config, false), EnvoyException);\n  }\n\n  {\n    // Valid config but not valid cluster.\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillOnce(Return(nullptr));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n    envoy::config::trace::v3::DatadogConfig datadog_config;\n    TestUtility::loadFromYaml(yaml_string, datadog_config);\n\n    EXPECT_THROW(setup(datadog_config, false), EnvoyException);\n  }\n\n  {\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n    ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n        .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n    envoy::config::trace::v3::DatadogConfig datadog_config;\n    TestUtility::loadFromYaml(yaml_string, datadog_config);\n\n    setup(datadog_config, true);\n  }\n}\n\nTEST_F(DatadogDriverTest, AllowCollectorClusterToBeAddedViaApi) {\n  EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n      .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true));\n\n  const std::string yaml_string = R\"EOF(\n  collector_cluster: fake_cluster\n  )EOF\";\n  envoy::config::trace::v3::DatadogConfig datadog_config;\n  TestUtility::loadFromYaml(yaml_string, datadog_config);\n\n  setup(datadog_config, true);\n}\n\nTEST_F(DatadogDriverTest, FlushSpansTimer) {\n  setupValidDriver();\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(1));\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n            EXPECT_EQ(\"application/msgpack\", message->headers().getContentTypeValue());\n\n            return &request;\n          }));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  span->finishSpan();\n\n  // Timer should be re-enabled.\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _));\n\n  timer_->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.timer_flushed\").value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.traces_sent\").value());\n\n  Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n\n  callback->onSuccess(request, std::move(msg));\n\n  EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_skipped_no_cluster\").value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.reports_sent\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_dropped\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_failed\").value());\n}\n\nTEST_F(DatadogDriverTest, NoBody) {\n  setupValidDriver();\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(1));\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n            EXPECT_EQ(\"application/msgpack\", message->headers().getContentTypeValue());\n\n            return &request;\n          }));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  span->finishSpan();\n\n  // Timer should be re-enabled.\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _));\n\n  timer_->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.timer_flushed\").value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.traces_sent\").value());\n\n  Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{\n      new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"content-length\", \"0\"}}}));\n  callback->onSuccess(request, std::move(msg));\n\n  EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_skipped_no_cluster\").value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.reports_sent\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_dropped\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_failed\").value());\n}\n\nTEST_F(DatadogDriverTest, SkipReportIfCollectorClusterHasBeenRemoved) {\n  Upstream::ClusterUpdateCallbacks* cluster_update_callbacks;\n  EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_))\n      .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks), Return(nullptr)));\n\n  setupValidDriver();\n\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _)).Times(AnyNumber());\n\n  // Verify the effect of onClusterAddOrUpdate()/onClusterRemoval() on reporting logic,\n  // keeping in mind that they will be called both for relevant and irrelevant clusters.\n\n  {\n    // Simulate removal of the relevant cluster.\n    cluster_update_callbacks->onClusterRemoval(\"fake_cluster\");\n\n    // Verify that no report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0);\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0);\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    timer_->invokeCallback();\n\n    // Verify observability.\n    EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.timer_flushed\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.traces_sent\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_dropped\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_failed\").value());\n  }\n\n  {\n    // Simulate addition of an irrelevant cluster.\n    NiceMock<Upstream::MockThreadLocalCluster> unrelated_cluster;\n    unrelated_cluster.cluster_.info_->name_ = \"unrelated_cluster\";\n    cluster_update_callbacks->onClusterAddOrUpdate(unrelated_cluster);\n\n    // Verify that no report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0);\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0);\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    timer_->invokeCallback();\n\n    // Verify observability.\n    EXPECT_EQ(2U, stats_.counter(\"tracing.datadog.timer_flushed\").value());\n    EXPECT_EQ(2U, stats_.counter(\"tracing.datadog.traces_sent\").value());\n    EXPECT_EQ(2U, stats_.counter(\"tracing.datadog.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_dropped\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_failed\").value());\n  }\n\n  {\n    // Simulate addition of the relevant cluster.\n    cluster_update_callbacks->onClusterAddOrUpdate(cm_.thread_local_cluster_);\n\n    // Verify that report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillOnce(ReturnRef(cm_.async_client_));\n    Http::MockAsyncClientRequest request(&cm_.async_client_);\n    Http::AsyncClient::Callbacks* callback{};\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request)));\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    timer_->invokeCallback();\n\n    // Complete in-flight request.\n    callback->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n\n    // Verify observability.\n    EXPECT_EQ(3U, stats_.counter(\"tracing.datadog.timer_flushed\").value());\n    EXPECT_EQ(3U, stats_.counter(\"tracing.datadog.traces_sent\").value());\n    EXPECT_EQ(2U, stats_.counter(\"tracing.datadog.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_dropped\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.reports_failed\").value());\n  }\n\n  {\n    // Simulate removal of an irrelevant cluster.\n    cluster_update_callbacks->onClusterRemoval(\"unrelated_cluster\");\n\n    // Verify that report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillOnce(ReturnRef(cm_.async_client_));\n    Http::MockAsyncClientRequest request(&cm_.async_client_);\n    Http::AsyncClient::Callbacks* callback{};\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request)));\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    timer_->invokeCallback();\n\n    // Complete in-flight request.\n    Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}}));\n    callback->onSuccess(request, std::move(msg));\n\n    // Verify observability.\n    EXPECT_EQ(4U, stats_.counter(\"tracing.datadog.timer_flushed\").value());\n    EXPECT_EQ(4U, stats_.counter(\"tracing.datadog.traces_sent\").value());\n    EXPECT_EQ(2U, stats_.counter(\"tracing.datadog.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.datadog.reports_sent\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.reports_dropped\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.datadog.reports_failed\").value());\n  }\n}\n\nTEST_F(DatadogDriverTest, CancelInflightRequestsOnDestruction) {\n  setupValidDriver();\n\n  StrictMock<Http::MockAsyncClientRequest> request1(&cm_.async_client_),\n      request2(&cm_.async_client_), request3(&cm_.async_client_), request4(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback{};\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(1));\n\n  // Expect 4 separate report requests to be made.\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request1)))\n      .WillOnce(Return(&request2))\n      .WillOnce(Return(&request3))\n      .WillOnce(Return(&request4));\n  // Expect timer to be re-enabled on each tick.\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _)).Times(4);\n\n  // Trigger 1st report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  timer_->invokeCallback();\n  // Trigger 2nd report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  timer_->invokeCallback();\n  // Trigger 3rd report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  timer_->invokeCallback();\n  // Trigger 4th report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  timer_->invokeCallback();\n\n  Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}}));\n  // Simulate completion of the 2nd report request.\n  callback->onSuccess(request2, std::move(msg));\n\n  // Simulate failure of the 3rd report request.\n  callback->onFailure(request3, Http::AsyncClient::FailureReason::Reset);\n\n  // Expect 1st and 4th requests to be cancelled on destruction.\n  EXPECT_CALL(request1, cancel());\n  EXPECT_CALL(request4, cancel());\n\n  // Trigger destruction.\n  driver_.reset();\n}\n\n} // namespace\n} // namespace Datadog\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/dynamic_ot/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"dynamic_opentracing_driver_impl_test\",\n    srcs = [\n        \"dynamic_opentracing_driver_impl_test.cc\",\n    ],\n    data = [\n        \"@io_opentracing_cpp//mocktracer:libmocktracer_plugin.so\",\n    ],\n    extension_name = \"envoy.tracers.dynamic_ot\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/tracers/dynamic_ot:dynamic_opentracing_driver_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    data = [\n        \"@io_opentracing_cpp//mocktracer:libmocktracer_plugin.so\",\n    ],\n    extension_name = \"envoy.tracers.dynamic_ot\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/extensions/tracers/dynamic_ot:config\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/tracers/dynamic_ot/config_test.cc",
    "content": "#include \"envoy/config/trace/v3/dynamic_ot.pb.h\"\n#include \"envoy/config/trace/v3/dynamic_ot.pb.validate.h\"\n#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n\n#include \"extensions/tracers/dynamic_ot/config.h\"\n\n#include \"test/mocks/server/tracer_factory.h\"\n#include \"test/mocks/server/tracer_factory_context.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"fmt/printf.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace DynamicOt {\nnamespace {\n\nTEST(DynamicOtTracerConfigTest, DynamicOpentracingHttpTracer) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  EXPECT_CALL(context.server_factory_context_.cluster_manager_, get(Eq(\"fake_cluster\")))\n      .WillRepeatedly(\n          Return(&context.server_factory_context_.cluster_manager_.thread_local_cluster_));\n  ON_CALL(*context.server_factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_,\n          features())\n      .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n  const std::string yaml_string = fmt::sprintf(\n      R\"EOF(\n  http:\n    name: envoy.tracers.dynamic_ot\n    config:\n      library: %s\n      config:\n        output_file: fake_file\n  )EOF\",\n      TestEnvironment::runfilesPath(\"mocktracer/libmocktracer_plugin.so\", \"io_opentracing_cpp\"));\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  DynamicOpenTracingTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  const Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, tracer);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(DynamicOtTracerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.dynamic.ot\";\n\n  ASSERT_NE(nullptr, Registry::FactoryRegistry<Server::Configuration::TracerFactory>::getFactory(\n                         deprecated_name));\n}\n\n} // namespace\n} // namespace DynamicOt\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc",
    "content": "#include <memory>\n\n#include \"common/http/header_map_impl.h\"\n\n#include \"extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"fmt/printf.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace DynamicOt {\nnamespace {\n\nclass DynamicOpenTracingDriverTest : public testing::Test {\npublic:\n  void setup(const std::string& library, const std::string& tracer_config) {\n    driver_ = std::make_unique<DynamicOpenTracingDriver>(stats_, library, tracer_config);\n  }\n\n  void setupValidDriver() { setup(library_path_, tracer_config_); }\n\n  const std::string library_path_ =\n      TestEnvironment::runfilesPath(\"mocktracer/libmocktracer_plugin.so\", \"io_opentracing_cpp\");\n  const std::string spans_file_ = TestEnvironment::temporaryDirectory() + \"/spans.json\";\n  const std::string tracer_config_ = fmt::sprintf(R\"EOF(\n      {\n        \"output_file\": \"%s\"\n      }\n    )EOF\",\n                                                  spans_file_);\n  std::unique_ptr<DynamicOpenTracingDriver> driver_;\n  Stats::IsolatedStoreImpl stats_;\n\n  const std::string operation_name_{\"test\"};\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n  SystemTime start_time_;\n  NiceMock<Tracing::MockConfig> config_;\n};\n\nTEST_F(DynamicOpenTracingDriverTest, FormatErrorMessage) {\n  const std::error_code error_code = std::make_error_code(std::errc::permission_denied);\n  EXPECT_EQ(error_code.message(), DynamicOpenTracingDriver::formatErrorMessage(error_code, \"\"));\n  EXPECT_EQ(error_code.message() + \": abc\",\n            DynamicOpenTracingDriver::formatErrorMessage(error_code, \"abc\"));\n}\n\nTEST_F(DynamicOpenTracingDriverTest, InitializeDriver) {\n  {\n    std::string invalid_library = \"abc123\";\n    std::string invalid_config = R\"EOF(\n      {\"fake\" : \"fake\"}\n    )EOF\";\n\n    EXPECT_THROW(setup(invalid_library, invalid_config), EnvoyException);\n  }\n\n  {\n    std::string empty_config = \"{}\";\n\n    EXPECT_THROW(setup(library_path_, empty_config), EnvoyException);\n  }\n}\n\n// This test fails under gcc, please see https://github.com/envoyproxy/envoy/issues/7647\n// for more details.\n#ifndef GCC_COMPILER\nTEST_F(DynamicOpenTracingDriverTest, FlushSpans) {\n  setupValidDriver();\n\n  {\n    Tracing::SpanPtr first_span = driver_->startSpan(\n        config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true});\n    first_span->finishSpan();\n    driver_->tracer().Close();\n  }\n\n  driver_ = nullptr;\n\n  const Json::ObjectSharedPtr spans_json =\n      TestEnvironment::jsonLoadFromString(TestEnvironment::readFileToStringForTest(spans_file_));\n  EXPECT_NE(spans_json, nullptr);\n  EXPECT_EQ(spans_json->asObjectArray().size(), 1);\n}\n#endif\n\n} // namespace\n} // namespace DynamicOt\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/lightstep/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"lightstep_tracer_impl_test\",\n    srcs = [\n        \"lightstep_tracer_impl_test.cc\",\n    ],\n    extension_name = \"envoy.tracers.lightstep\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/common/common:base64_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/tracers/lightstep:lightstep_tracer_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n        \"//test/test_common:global_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.tracers.lightstep\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/extensions/tracers/lightstep:config\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/tracers/lightstep/config_test.cc",
    "content": "#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/config/trace/v3/lightstep.pb.h\"\n#include \"envoy/config/trace/v3/lightstep.pb.validate.h\"\n\n#include \"extensions/tracers/lightstep/config.h\"\n\n#include \"test/mocks/server/tracer_factory.h\"\n#include \"test/mocks/server/tracer_factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Eq;\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Lightstep {\nnamespace {\n\nTEST(LightstepTracerConfigTest, LightstepHttpTracer) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  EXPECT_CALL(context.server_factory_context_.cluster_manager_, get(Eq(\"fake_cluster\")))\n      .WillRepeatedly(\n          Return(&context.server_factory_context_.cluster_manager_.thread_local_cluster_));\n  ON_CALL(*context.server_factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_,\n          features())\n      .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: lightstep\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.LightstepConfig\n      collector_cluster: fake_cluster\n      access_token_file: fake_file\n   )EOF\";\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  LightstepTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr lightstep_tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, lightstep_tracer);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(LightstepTracerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.lightstep\";\n\n  ASSERT_NE(nullptr, Registry::FactoryRegistry<Server::Configuration::TracerFactory>::getFactory(\n                         deprecated_name));\n}\n\n} // namespace\n} // namespace Lightstep\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc",
    "content": "#include <chrono>\n#include <memory>\n#include <sstream>\n#include <string>\n\n#include \"envoy/config/trace/v3/lightstep.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/lightstep/lightstep_tracer_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AtLeast;\nusing testing::DoAll;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Lightstep {\n\nstatic Http::ResponseMessagePtr makeSuccessResponse() {\n  Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n\n  msg->trailers(\n      Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{{\"grpc-status\", \"0\"}}});\n  std::unique_ptr<Protobuf::Message> collector_response =\n      lightstep::Transporter::MakeCollectorResponse();\n  EXPECT_NE(collector_response, nullptr);\n  msg->body().add(*Grpc::Common::serializeToGrpcFrame(*collector_response));\n  return msg;\n}\n\nnamespace {\n\nclass LightStepDriverTest : public testing::Test {\npublic:\n  LightStepDriverTest() : grpc_context_(*symbol_table_) {}\n\n  void setup(envoy::config::trace::v3::LightstepConfig& lightstep_config, bool init_timer,\n             Common::Ot::OpenTracingDriver::PropagationMode propagation_mode =\n                 Common::Ot::OpenTracingDriver::PropagationMode::TracerNative) {\n    std::unique_ptr<lightstep::LightStepTracerOptions> opts(\n        new lightstep::LightStepTracerOptions());\n    opts->access_token = \"sample_token\";\n    opts->component_name = \"component\";\n\n    cm_.thread_local_cluster_.cluster_.info_->name_ = \"fake_cluster\";\n    ON_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillByDefault(ReturnRef(cm_.async_client_));\n\n    if (init_timer) {\n      timer_ = new NiceMock<Event::MockTimer>(&tls_.dispatcher_);\n      EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(1000), _)).Times(AtLeast(1));\n    }\n\n    driver_ = std::make_unique<LightStepDriver>(lightstep_config, cm_, stats_, tls_, runtime_,\n                                                std::move(opts), propagation_mode, grpc_context_);\n  }\n\n  void setupValidDriver(int min_flush_spans = LightStepDriver::DefaultMinFlushSpans,\n                        Common::Ot::OpenTracingDriver::PropagationMode propagation_mode =\n                            Common::Ot::OpenTracingDriver::PropagationMode::TracerNative) {\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n    ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n        .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.flush_interval_ms\", _))\n        .Times(AtLeast(1))\n        .WillRepeatedly(Return(1000));\n\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.min_flush_spans\",\n                                               LightStepDriver::DefaultMinFlushSpans))\n        .Times(AtLeast(1))\n        .WillRepeatedly(Return(min_flush_spans));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n    envoy::config::trace::v3::LightstepConfig lightstep_config;\n    TestUtility::loadFromYaml(yaml_string, lightstep_config);\n\n    setup(lightstep_config, true, propagation_mode);\n  }\n\n  const std::string operation_name_{\"test\"};\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n  const Http::TestResponseHeaderMapImpl response_headers_{{\":status\", \"500\"}};\n  SystemTime start_time_;\n  StreamInfo::MockStreamInfo stream_info_;\n\n  Stats::TestSymbolTable symbol_table_;\n  Grpc::ContextImpl grpc_context_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Stats::MockIsolatedStatsStore> stats_;\n  std::unique_ptr<LightStepDriver> driver_;\n  NiceMock<Event::MockTimer>* timer_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<Random::MockRandomGenerator> random_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n\n  NiceMock<Tracing::MockConfig> config_;\n};\n\nTEST_F(LightStepDriverTest, LightStepLogger) {\n  LightStepLogger logger;\n\n  // Verify calls to logger don't crash.\n  logger(lightstep::LogLevel::debug, \"abc\");\n  logger(lightstep::LogLevel::info, \"abc\");\n  logger(lightstep::LogLevel::error, \"abc\");\n}\n\nTEST_F(LightStepDriverTest, InitializeDriver) {\n  {\n    envoy::config::trace::v3::LightstepConfig lightstep_config;\n\n    EXPECT_THROW(setup(lightstep_config, false), EnvoyException);\n  }\n\n  {\n    // Valid config but not valid cluster.\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillOnce(Return(nullptr));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n    envoy::config::trace::v3::LightstepConfig lightstep_config;\n    TestUtility::loadFromYaml(yaml_string, lightstep_config);\n\n    EXPECT_THROW(setup(lightstep_config, false), EnvoyException);\n  }\n\n  {\n    // Valid config, but upstream cluster does not support http2.\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n    ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()).WillByDefault(Return(0));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n    envoy::config::trace::v3::LightstepConfig lightstep_config;\n    TestUtility::loadFromYaml(yaml_string, lightstep_config);\n\n    EXPECT_THROW(setup(lightstep_config, false), EnvoyException);\n  }\n\n  {\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n    ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n        .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n    envoy::config::trace::v3::LightstepConfig lightstep_config;\n    TestUtility::loadFromYaml(yaml_string, lightstep_config);\n\n    setup(lightstep_config, true);\n  }\n}\n\nTEST_F(LightStepDriverTest, DeferredTlsInitialization) {\n  EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n      .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n  const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    )EOF\";\n  envoy::config::trace::v3::LightstepConfig lightstep_config;\n  TestUtility::loadFromYaml(yaml_string, lightstep_config);\n\n  std::unique_ptr<lightstep::LightStepTracerOptions> opts(new lightstep::LightStepTracerOptions());\n  opts->access_token = \"sample_token\";\n  opts->component_name = \"component\";\n\n  ON_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n      .WillByDefault(ReturnRef(cm_.async_client_));\n\n  auto propagation_mode = Common::Ot::OpenTracingDriver::PropagationMode::TracerNative;\n\n  tls_.defer_data = true;\n  driver_ = std::make_unique<LightStepDriver>(lightstep_config, cm_, stats_, tls_, runtime_,\n                                              std::move(opts), propagation_mode, grpc_context_);\n  tls_.call();\n}\n\nTEST_F(LightStepDriverTest, AllowCollectorClusterToBeAddedViaApi) {\n  EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n      .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true));\n\n  const std::string yaml_string = R\"EOF(\n  collector_cluster: fake_cluster\n  )EOF\";\n  envoy::config::trace::v3::LightstepConfig lightstep_config;\n  TestUtility::loadFromYaml(yaml_string, lightstep_config);\n\n  setup(lightstep_config, true);\n}\n\nTEST_F(LightStepDriverTest, FlushSeveralSpans) {\n  setupValidDriver(2);\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback = nullptr;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            EXPECT_EQ(\"/lightstep.collector.CollectorService/Report\",\n                      message->headers().getPathValue());\n            EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n            EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n\n            return &request;\n          }));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n\n  // Currently not possible to access the operation from the span, but this\n  // invocation will make sure setting the operation does not cause a crash!\n  first_span->setOperation(\"myOperation\");\n  first_span->finishSpan();\n\n  Tracing::SpanPtr second_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                    start_time_, {Tracing::Reason::Sampling, true});\n  second_span->finishSpan();\n\n  Tracing::SpanPtr third_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n  third_span->finishSpan();\n\n  callback->onSuccess(request, makeSuccessResponse());\n\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"grpc.lightstep.collector.CollectorService.Report.success\")\n                    .value());\n\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"grpc.lightstep.collector.CollectorService.Report.total\")\n                    .value());\n  EXPECT_EQ(2U, stats_.counter(\"tracing.lightstep.spans_sent\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n}\n\nTEST_F(LightStepDriverTest, SkipReportIfCollectorClusterHasBeenRemoved) {\n  Upstream::ClusterUpdateCallbacks* cluster_update_callbacks;\n  EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_))\n      .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks), Return(nullptr)));\n\n  setupValidDriver(1);\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.request_timeout\", 5000U))\n      .WillRepeatedly(Return(5000U));\n\n  // Verify the effect of onClusterAddOrUpdate()/onClusterRemoval() on reporting logic,\n  // keeping in mind that they will be called both for relevant and irrelevant clusters.\n\n  {\n    // Simulate removal of the relevant cluster.\n    cluster_update_callbacks->onClusterRemoval(\"fake_cluster\");\n\n    // Verify that no report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0);\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0);\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    driver_->flush();\n\n    // Verify observability.\n    EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.spans_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.spans_dropped\").value());\n  }\n\n  {\n    // Simulate addition of an irrelevant cluster.\n    NiceMock<Upstream::MockThreadLocalCluster> unrelated_cluster;\n    unrelated_cluster.cluster_.info_->name_ = \"unrelated_cluster\";\n    cluster_update_callbacks->onClusterAddOrUpdate(unrelated_cluster);\n\n    // Verify that no report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0);\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0);\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    driver_->flush();\n\n    // Verify observability.\n    EXPECT_EQ(2U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.spans_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.spans_dropped\").value());\n  }\n\n  {\n    // Simulate addition of the relevant cluster.\n    cluster_update_callbacks->onClusterAddOrUpdate(cm_.thread_local_cluster_);\n\n    // Verify that report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillOnce(ReturnRef(cm_.async_client_));\n    Http::MockAsyncClientRequest request(&cm_.async_client_);\n    Http::AsyncClient::Callbacks* callback{};\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request)));\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    driver_->flush();\n\n    // Complete in-flight request.\n    callback->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n\n    // Verify observability.\n    EXPECT_EQ(2U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.spans_sent\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.spans_dropped\").value());\n  }\n\n  {\n    // Simulate removal of an irrelevant cluster.\n    cluster_update_callbacks->onClusterRemoval(\"unrelated_cluster\");\n\n    // Verify that report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillOnce(ReturnRef(cm_.async_client_));\n    Http::MockAsyncClientRequest request(&cm_.async_client_);\n    Http::AsyncClient::Callbacks* callback{};\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request)));\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n    driver_->flush();\n\n    // Complete in-flight request.\n    Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}}));\n    callback->onSuccess(request, std::move(msg));\n\n    // Verify observability.\n    EXPECT_EQ(2U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.spans_sent\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.spans_dropped\").value());\n  }\n}\n\nTEST_F(LightStepDriverTest, FlushOneFailure) {\n  setupValidDriver(1);\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback = nullptr;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            EXPECT_EQ(\"/lightstep.collector.CollectorService/Report\",\n                      message->headers().getPathValue());\n            EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n            EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n\n            return &request;\n          }));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n\n  Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                   start_time_, {Tracing::Reason::Sampling, true});\n\n  first_span->finishSpan();\n\n  Tracing::SpanPtr second_span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                                    start_time_, {Tracing::Reason::Sampling, true});\n\n  second_span->finishSpan();\n\n  callback->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"grpc.lightstep.collector.CollectorService.Report.failure\")\n                    .value());\n  EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_\n                    .counter(\"grpc.lightstep.collector.CollectorService.Report.total\")\n                    .value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.spans_dropped\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n}\n\nTEST_F(LightStepDriverTest, FlushWithActiveReport) {\n  setupValidDriver(1);\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback = nullptr;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            EXPECT_EQ(\"/lightstep.collector.CollectorService/Report\",\n                      message->headers().getPathValue());\n            EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n            EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n\n            return &request;\n          }));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  driver_->flush();\n\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  driver_->flush();\n\n  EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.spans_dropped\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n\n  EXPECT_CALL(request, cancel());\n\n  driver_.reset();\n}\n\nTEST_F(LightStepDriverTest, OnFullWithActiveReport) {\n  setupValidDriver(1);\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback = nullptr;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            EXPECT_EQ(\"/lightstep.collector.CollectorService/Report\",\n                      message->headers().getPathValue());\n            EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n            EXPECT_EQ(\"application/grpc\", message->headers().getContentTypeValue());\n\n            return &request;\n          }));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  driver_->flush();\n\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n\n  EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.spans_dropped\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n\n  EXPECT_CALL(request, cancel());\n\n  driver_.reset();\n}\n\nTEST_F(LightStepDriverTest, FlushSpansTimer) {\n  setupValidDriver();\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback = nullptr;\n\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& /*message*/, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            return &request;\n          }));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  span->finishSpan();\n\n  // Timer should be re-enabled.\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(1000), _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.flush_interval_ms\", 1000U))\n      .WillOnce(Return(1000U));\n\n  timer_->invokeCallback();\n\n  callback->onSuccess(request, makeSuccessResponse());\n\n  EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.timer_flushed\").value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.lightstep.spans_sent\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.lightstep.reports_skipped_no_cluster\").value());\n}\n\nTEST_F(LightStepDriverTest, CancelRequestOnDestruction) {\n  setupValidDriver(1);\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback = nullptr;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& /*message*/, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            return &request;\n          }));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  span->finishSpan();\n\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n\n  EXPECT_CALL(request, cancel());\n\n  driver_.reset();\n}\n\nTEST_F(LightStepDriverTest, SerializeAndDeserializeContext) {\n  for (Common::Ot::OpenTracingDriver::PropagationMode propagation_mode :\n       {Common::Ot::OpenTracingDriver::PropagationMode::SingleHeader,\n        Common::Ot::OpenTracingDriver::PropagationMode::TracerNative}) {\n    setupValidDriver(LightStepDriver::DefaultMinFlushSpans, propagation_mode);\n\n    // Supply bogus context, that will be simply ignored.\n    const std::string invalid_context = \"notvalidcontext\";\n    request_headers_.setCopy(Http::CustomHeaders::get().OtSpanContext, invalid_context);\n    stats_.counter(\"tracing.opentracing.span_context_extraction_error\").reset();\n    driver_->startSpan(config_, request_headers_, operation_name_, start_time_,\n                       {Tracing::Reason::Sampling, true});\n    EXPECT_EQ(1U, stats_.counter(\"tracing.opentracing.span_context_extraction_error\").value());\n\n    std::string injected_ctx(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext));\n    EXPECT_FALSE(injected_ctx.empty());\n\n    // Supply empty context.\n    request_headers_.remove(Http::CustomHeaders::get().OtSpanContext);\n    Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                               start_time_, {Tracing::Reason::Sampling, true});\n\n    EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext));\n    span->injectContext(request_headers_);\n\n    injected_ctx = std::string(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext));\n    EXPECT_FALSE(injected_ctx.empty());\n\n    // Context can be parsed fine.\n    const opentracing::Tracer& tracer = driver_->tracer();\n    std::string context = Base64::decode(injected_ctx);\n    std::istringstream iss{context, std::ios::binary};\n    EXPECT_TRUE(tracer.Extract(iss));\n\n    // Supply parent context, request_headers has properly populated x-ot-span-context.\n    Tracing::SpanPtr span_with_parent = driver_->startSpan(\n        config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true});\n    request_headers_.remove(Http::CustomHeaders::get().OtSpanContext);\n    span_with_parent->injectContext(request_headers_);\n    injected_ctx = std::string(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext));\n    EXPECT_FALSE(injected_ctx.empty());\n  }\n}\n\nTEST_F(LightStepDriverTest, MultiplePropagationModes) {\n  const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    propagation_modes:\n    - ENVOY\n    - LIGHTSTEP\n    - B3\n    - TRACE_CONTEXT\n    )EOF\";\n  envoy::config::trace::v3::LightstepConfig lightstep_config;\n  TestUtility::loadFromYaml(yaml_string, lightstep_config);\n\n  EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features())\n      .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.flush_interval_ms\", _))\n      .Times(AtLeast(1))\n      .WillRepeatedly(Return(1000));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.lightstep.min_flush_spans\",\n                                             LightStepDriver::DefaultMinFlushSpans))\n      .Times(AtLeast(1))\n      .WillRepeatedly(Return(1));\n\n  setup(lightstep_config, true);\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext));\n  span->injectContext(request_headers_);\n  EXPECT_TRUE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext));\n  EXPECT_TRUE(request_headers_.has(\"ot-tracer-traceid\"));\n  EXPECT_TRUE(request_headers_.has(\"x-b3-traceid\"));\n  EXPECT_TRUE(request_headers_.has(\"traceparent\"));\n}\n\nTEST_F(LightStepDriverTest, SpawnChild) {\n  setupValidDriver();\n\n  Tracing::SpanPtr parent = driver_->startSpan(config_, request_headers_, operation_name_,\n                                               start_time_, {Tracing::Reason::Sampling, true});\n  parent->injectContext(request_headers_);\n\n  Tracing::SpanPtr childViaHeaders = driver_->startSpan(\n      config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true});\n  Tracing::SpanPtr childViaSpawn = parent->spawnChild(config_, operation_name_, start_time_);\n\n  Http::TestRequestHeaderMapImpl base1{{\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n  Http::TestRequestHeaderMapImpl base2{{\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n\n  childViaHeaders->injectContext(base1);\n  childViaSpawn->injectContext(base2);\n\n  std::string base1_context =\n      Base64::decode(std::string(base1.get_(Http::CustomHeaders::get().OtSpanContext)));\n  std::string base2_context =\n      Base64::decode(std::string(base2.get_(Http::CustomHeaders::get().OtSpanContext)));\n\n  EXPECT_FALSE(base1_context.empty());\n  EXPECT_FALSE(base2_context.empty());\n}\n\nTEST_F(LightStepDriverTest, GetAndSetBaggage) {\n  setupValidDriver();\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  std::string key = \"key1\";\n  std::string value = \"value1\";\n  span->setBaggage(key, value);\n  EXPECT_EQ(span->getBaggage(key), value);\n}\n\n} // namespace\n} // namespace Lightstep\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/opencensus/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"tracer_test\",\n    srcs = [\"tracer_test.cc\"],\n    extension_name = \"envoy.tracers.opencensus\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/extensions/tracers/opencensus:opencensus_tracer_impl\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.tracers.opencensus\",\n    # TODO(wrowe): envoy_extension_ rules don't currently exclude windows extensions\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//source/extensions/tracers/opencensus:config\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/tracers/opencensus/config_test.cc",
    "content": "#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/config/trace/v3/opencensus.pb.h\"\n#include \"envoy/config/trace/v3/opencensus.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/tracers/opencensus/config.h\"\n\n#include \"test/mocks/server/tracer_factory.h\"\n#include \"test/mocks/server/tracer_factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"opencensus/trace/sampler.h\"\n#include \"opencensus/trace/trace_config.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace OpenCensus {\n\nTEST(OpenCensusTracerConfigTest, InvalidStackdriverConfiguration) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  OpenCensusTracerFactory factory;\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      stackdriver_exporter_enabled: true\n      stackdriver_grpc_service:\n        envoy_grpc:\n          cluster_name: stackdriver\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message, context)), EnvoyException,\n                            \"Opencensus stackdriver tracer only support GoogleGrpc.\");\n}\n\nTEST(OpenCensusTracerConfigTest, InvalidOcagentConfiguration) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  OpenCensusTracerFactory factory;\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      ocagent_exporter_enabled: true\n      ocagent_grpc_service:\n        envoy_grpc:\n          cluster_name: opencensus\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message, context)), EnvoyException,\n                            \"Opencensus ocagent tracer only supports GoogleGrpc.\");\n}\n\nTEST(OpenCensusTracerConfigTest, OpenCensusHttpTracer) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n  )EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  OpenCensusTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, tracer);\n}\n\nTEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerWithTypedConfig) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      trace_config:\n        rate_limiting_sampler:\n          qps: 123\n        max_number_of_attributes: 12\n        max_number_of_annotations: 34\n        max_number_of_message_events: 56\n        max_number_of_links: 78\n      stdout_exporter_enabled: true\n      stackdriver_exporter_enabled: true\n      stackdriver_project_id: test_project_id\n      zipkin_exporter_enabled: true\n      zipkin_url: http://127.0.0.1:9411/api/v2/spans\n      ocagent_exporter_enabled: true\n      ocagent_address: 127.0.0.1:55678\n      incoming_trace_context: b3\n      incoming_trace_context: trace_context\n      incoming_trace_context: grpc_trace_bin\n      incoming_trace_context: cloud_trace_context\n      outgoing_trace_context: trace_context\n  )EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  OpenCensusTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, tracer);\n\n  // Reset TraceParams back to default.\n  ::opencensus::trace::TraceConfig::SetCurrentTraceParams(\n      {32, 32, 128, 32, ::opencensus::trace::ProbabilitySampler(1e-4)});\n}\n\nTEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerGrpc) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      trace_config:\n        rate_limiting_sampler:\n          qps: 123\n        max_number_of_attributes: 12\n        max_number_of_annotations: 34\n        max_number_of_message_events: 56\n        max_number_of_links: 78\n      ocagent_exporter_enabled: true\n      ocagent_grpc_service:\n        google_grpc:\n          target_uri: 127.0.0.1:55678\n          stat_prefix: test\n      incoming_trace_context: b3\n      incoming_trace_context: trace_context\n      incoming_trace_context: grpc_trace_bin\n      incoming_trace_context: cloud_trace_context\n      outgoing_trace_context: trace_context\n  )EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  OpenCensusTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n#ifdef ENVOY_GOOGLE_GRPC\n  Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, tracer);\n\n  // Reset TraceParams back to default.\n  ::opencensus::trace::TraceConfig::SetCurrentTraceParams(\n      {32, 32, 128, 32, ::opencensus::trace::ProbabilitySampler(1e-4)});\n#else\n  EXPECT_THROW_WITH_MESSAGE(\n      (factory.createHttpTracer(*message, context)), EnvoyException,\n      \"Opencensus tracer: cannot handle ocagent google grpc service, google grpc is not built in.\");\n#endif\n}\n\nTEST(OpenCensusTracerConfigTest, ShouldCreateAtMostOneOpenCensusTracer) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  OpenCensusTracerFactory factory;\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      trace_config:\n        rate_limiting_sampler:\n          qps: 123\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  auto message_one = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr tracer_one = factory.createHttpTracer(*message_one, context);\n  EXPECT_NE(nullptr, tracer_one);\n\n  auto message_two = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context);\n  // Verify that no new tracer has been created.\n  EXPECT_EQ(tracer_two, tracer_one);\n}\n\nTEST(OpenCensusTracerConfigTest, ShouldCacheFirstCreatedTracerUsingStrongReference) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  OpenCensusTracerFactory factory;\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  auto message_one = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  std::weak_ptr<Tracing::HttpTracer> tracer_one = factory.createHttpTracer(*message_one, context);\n  // Verify that tracer factory keeps a strong reference.\n  EXPECT_NE(nullptr, tracer_one.lock());\n\n  auto message_two = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context);\n  EXPECT_NE(nullptr, tracer_two);\n  // Verify that no new tracer has been created.\n  EXPECT_EQ(tracer_two, tracer_one.lock());\n}\n\nTEST(OpenCensusTracerConfigTest, ShouldNotCacheInvalidConfiguration) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  OpenCensusTracerFactory factory;\n\n  const std::string yaml_one = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      ocagent_exporter_enabled: true\n      ocagent_grpc_service:\n        envoy_grpc:\n          cluster_name: opencensus\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration_one;\n  TestUtility::loadFromYaml(yaml_one, configuration_one);\n\n  auto message_one = Config::Utility::translateToFactoryConfig(\n      configuration_one.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message_one, context)), EnvoyException,\n                            \"Opencensus ocagent tracer only supports GoogleGrpc.\");\n\n  const std::string yaml_two = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      ocagent_exporter_enabled: true\n      ocagent_grpc_service:\n        google_grpc:\n          target_uri: 127.0.0.1:55678\n          stat_prefix: test\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration_two;\n  TestUtility::loadFromYaml(yaml_two, configuration_two);\n\n  auto message_two = Config::Utility::translateToFactoryConfig(\n      configuration_two.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n#ifdef ENVOY_GOOGLE_GRPC\n  Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context);\n  // Verify that a new tracer has been created despite an earlier failed attempt.\n  EXPECT_NE(nullptr, tracer_two);\n#else\n  EXPECT_THROW_WITH_MESSAGE(\n      (factory.createHttpTracer(*message_two, context)), EnvoyException,\n      \"Opencensus tracer: cannot handle ocagent google grpc service, google grpc is not built in.\");\n#endif\n}\n\nTEST(OpenCensusTracerConfigTest, ShouldRejectSubsequentCreateAttemptsWithDifferentConfig) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  OpenCensusTracerFactory factory;\n\n  const std::string yaml_one = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      trace_config:\n        rate_limiting_sampler:\n          qps: 123\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration_one;\n  TestUtility::loadFromYaml(yaml_one, configuration_one);\n\n  auto message_one = Config::Utility::translateToFactoryConfig(\n      configuration_one.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr tracer_one = factory.createHttpTracer(*message_one, context);\n  EXPECT_NE(nullptr, tracer_one);\n\n  const std::string yaml_two = R\"EOF(\n  http:\n    name: envoy.tracers.opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      trace_config:\n        rate_limiting_sampler:\n          qps: 321\n  )EOF\";\n  envoy::config::trace::v3::Tracing configuration_two;\n  TestUtility::loadFromYaml(yaml_two, configuration_two);\n\n  auto message_two = Config::Utility::translateToFactoryConfig(\n      configuration_two.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  // Verify that OpenCensus is only configured once in a lifetime.\n  EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message_two, context)), EnvoyException,\n                            \"Opencensus has already been configured with a different config.\");\n}\n\nTEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerStackdriverGrpc) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: opencensus\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig\n      stackdriver_exporter_enabled: true\n      stackdriver_grpc_service:\n        google_grpc:\n          target_uri: 127.0.0.1:55678\n          stat_prefix: test\n        initial_metadata:\n        - key: foo\n          value: bar\n  )EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  OpenCensusTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n#ifdef ENVOY_GOOGLE_GRPC\n  Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, tracer);\n#else\n  EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message, context)), EnvoyException,\n                            \"Opencensus tracer: cannot handle stackdriver google grpc service, \"\n                            \"google grpc is not built in.\");\n#endif\n}\n\n} // namespace OpenCensus\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/opencensus/tracer_test.cc",
    "content": "// Usage:\n// bazel run //test/extensions/tracers/opencensus:tracer_test -- -l debug\n\n#include <cstdint>\n#include <iostream>\n#include <vector>\n\n#include \"envoy/config/trace/v3/opencensus.pb.h\"\n\n#include \"common/common/base64.h\"\n\n#include \"extensions/tracers/opencensus/opencensus_tracer_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"opencensus/trace/exporter/span_data.h\"\n#include \"opencensus/trace/exporter/span_exporter.h\"\n#include \"opencensus/trace/propagation/b3.h\"\n#include \"opencensus/trace/propagation/cloud_trace_context.h\"\n#include \"opencensus/trace/propagation/grpc_trace_bin.h\"\n#include \"opencensus/trace/propagation/trace_context.h\"\n#include \"opencensus/trace/span.h\"\n#include \"opencensus/trace/span_id.h\"\n\nusing testing::NiceMock;\n\nnamespace opencensus {\nnamespace trace {\nnamespace exporter {\n\nclass SpanExporterTestPeer {\npublic:\n  static constexpr auto& exportForTesting = SpanExporter::ExportForTesting;\n};\n\n} // namespace exporter\n} // namespace trace\n} // namespace opencensus\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace OpenCensus {\n\nusing envoy::config::trace::v3::OpenCensusConfig;\nusing ::opencensus::trace::exporter::SpanData;\nusing ::opencensus::trace::exporter::SpanExporter;\n\nnamespace {\n\n// Custom export handler. We register this as an OpenCensus trace exporter, and\n// use it to catch the spans we produce.\nclass SpanCatcher : public SpanExporter::Handler {\npublic:\n  void Export(const std::vector<SpanData>& spans) override {\n    absl::MutexLock lock(&mu_);\n    for (const auto& span : spans) {\n      spans_.emplace_back(span);\n    }\n  }\n\n  // Returns generated SpanData, and clears the catcher.\n  std::vector<SpanData> catchSpans() {\n    // OpenCensus's trace exporter is running in a background thread, waiting\n    // for a periodic export. Force it to flush right now.\n    opencensus::trace::exporter::SpanExporterTestPeer::exportForTesting();\n    absl::MutexLock lock(&mu_);\n    std::vector<SpanData> ret = std::move(spans_);\n    spans_.clear();\n    return ret;\n  }\n\nprivate:\n  mutable absl::Mutex mu_;\n  std::vector<SpanData> spans_ ABSL_GUARDED_BY(mu_);\n};\n\n// Use a Singleton SpanCatcher.\nSpanCatcher* getSpanCatcher() {\n  static auto g_span_catcher = new SpanCatcher();\n  return g_span_catcher;\n}\n\n// Call this before generating spans to register the exporter that catches them.\nvoid registerSpanCatcher() {\n  static bool done_once = false;\n  if (done_once) {\n    return;\n  }\n  SpanExporter::RegisterHandler(absl::WrapUnique(getSpanCatcher()));\n  done_once = true;\n}\n\n} // namespace\n\n// Create a Span via the driver, test all of the Tracing::Span API, and verify\n// the produced SpanData.\nTEST(OpenCensusTracerTest, Span) {\n  registerSpanCatcher();\n  OpenCensusConfig oc_config;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  std::unique_ptr<Tracing::Driver> driver(\n      new OpenCensus::Driver(oc_config, local_info, *Api::createApiForTest()));\n\n  NiceMock<Tracing::MockConfig> config;\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n  const std::string operation_name{\"my_operation_1\"};\n  SystemTime start_time;\n\n  {\n    Tracing::SpanPtr span = driver->startSpan(config, request_headers, operation_name, start_time,\n                                              {Tracing::Reason::Sampling, true});\n    span->setOperation(\"different_name\");\n    span->setTag(\"my_key\", \"my_value\");\n    span->log(start_time, \"my annotation\");\n    // injectContext is tested in another unit test.\n    Tracing::SpanPtr child = span->spawnChild(config, \"child_span\", start_time);\n    child->finishSpan();\n    span->setSampled(false); // Abandon tracer.\n    span->finishSpan();\n\n    // Baggage methods are a noop in opencensus and won't affect events.\n    span->setBaggage(\"baggage_key\", \"baggage_value\");\n    ASSERT_EQ(\"\", span->getBaggage(\"baggage_key\"));\n  }\n\n  // Retrieve SpanData from the OpenCensus trace exporter.\n  std::vector<SpanData> spans = getSpanCatcher()->catchSpans();\n  ASSERT_EQ(2, spans.size());\n  ::opencensus::trace::SpanId parent_span_id;\n\n  // Check contents of parent span.\n  {\n    const auto& sd = (spans[0].name() == operation_name) ? spans[0] : spans[1];\n    ENVOY_LOG_MISC(debug, \"{}\", sd.DebugString());\n\n    EXPECT_EQ(\"different_name\", sd.name());\n    EXPECT_TRUE(sd.context().IsValid());\n    EXPECT_TRUE(sd.context().trace_options().IsSampled());\n    ::opencensus::trace::SpanId zeros;\n    EXPECT_EQ(zeros, sd.parent_span_id());\n    parent_span_id = sd.context().span_id();\n\n    ASSERT_EQ(3, sd.annotations().events().size());\n    EXPECT_EQ(\"my annotation\", sd.annotations().events()[0].event().description());\n    EXPECT_EQ(\"spawnChild\", sd.annotations().events()[1].event().description());\n    EXPECT_EQ(\"setSampled\", sd.annotations().events()[2].event().description());\n    EXPECT_TRUE(sd.has_ended());\n  }\n\n  // And child span.\n  {\n    const auto& sd = (spans[0].name() == \"child_span\") ? spans[0] : spans[1];\n    ENVOY_LOG_MISC(debug, \"{}\", sd.DebugString());\n\n    EXPECT_EQ(\"child_span\", sd.name());\n    EXPECT_TRUE(sd.context().IsValid());\n    EXPECT_TRUE(sd.context().trace_options().IsSampled());\n    EXPECT_EQ(parent_span_id, sd.parent_span_id());\n    EXPECT_TRUE(sd.has_ended());\n  }\n}\n\nnamespace {\n\nusing testing::PrintToString;\n\nMATCHER_P2(ContainHeader, header, expected_value,\n           \"contains the header \" + PrintToString(header) + \" with value \" +\n               PrintToString(expected_value)) {\n  const auto found_value = arg.get(Http::LowerCaseString(header));\n  if (found_value == nullptr) {\n    return false;\n  }\n  return found_value->value().getStringView() == expected_value;\n}\n\n// Given incoming headers, test that trace context propagation works and generates all the expected\n// outgoing headers.\nvoid testIncomingHeaders(\n    const std::initializer_list<std::pair<const char*, const char*>>& headers) {\n  registerSpanCatcher();\n  OpenCensusConfig oc_config;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  oc_config.add_incoming_trace_context(OpenCensusConfig::NONE);\n  oc_config.add_incoming_trace_context(OpenCensusConfig::B3);\n  oc_config.add_incoming_trace_context(OpenCensusConfig::TRACE_CONTEXT);\n  oc_config.add_incoming_trace_context(OpenCensusConfig::GRPC_TRACE_BIN);\n  oc_config.add_incoming_trace_context(OpenCensusConfig::CLOUD_TRACE_CONTEXT);\n  oc_config.add_outgoing_trace_context(OpenCensusConfig::NONE);\n  oc_config.add_outgoing_trace_context(OpenCensusConfig::B3);\n  oc_config.add_outgoing_trace_context(OpenCensusConfig::TRACE_CONTEXT);\n  oc_config.add_outgoing_trace_context(OpenCensusConfig::GRPC_TRACE_BIN);\n  oc_config.add_outgoing_trace_context(OpenCensusConfig::CLOUD_TRACE_CONTEXT);\n  std::unique_ptr<Tracing::Driver> driver(\n      new OpenCensus::Driver(oc_config, local_info, *Api::createApiForTest()));\n  NiceMock<Tracing::MockConfig> config;\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":path\", \"/\"},\n      {\":method\", \"GET\"},\n      {\"x-request-id\", \"foo\"},\n  };\n  for (const auto& kv : headers) {\n    request_headers.addCopy(Http::LowerCaseString(kv.first), kv.second);\n  }\n\n  const std::string operation_name{\"my_operation_2\"};\n  SystemTime start_time;\n  Http::TestRequestHeaderMapImpl injected_headers;\n  {\n    Tracing::SpanPtr span = driver->startSpan(config, request_headers, operation_name, start_time,\n                                              {Tracing::Reason::Sampling, false});\n    span->injectContext(injected_headers);\n    span->finishSpan();\n  }\n\n  // Retrieve SpanData from the OpenCensus trace exporter.\n  std::vector<SpanData> spans = getSpanCatcher()->catchSpans();\n  ASSERT_EQ(1, spans.size());\n  const auto& sd = spans[0];\n  ENVOY_LOG_MISC(debug, \"{}\", sd.DebugString());\n\n  // Check contents.\n  EXPECT_TRUE(sd.has_remote_parent());\n  EXPECT_EQ(\"6162636465666768\", sd.parent_span_id().ToHex());\n  EXPECT_EQ(\"404142434445464748494a4b4c4d4e4f\", sd.context().trace_id().ToHex());\n  EXPECT_TRUE(sd.context().trace_options().IsSampled())\n      << \"parent was sampled, child should be also\";\n\n  // Check injectContext.\n  // The SpanID is unpredictable so re-serialize context to check it.\n  const auto& ctx = sd.context();\n  const auto& hdrs = injected_headers;\n  EXPECT_THAT(hdrs, ContainHeader(\"traceparent\",\n                                  ::opencensus::trace::propagation::ToTraceParentHeader(ctx)));\n  {\n    std::string expected = ::opencensus::trace::propagation::ToGrpcTraceBinHeader(ctx);\n    expected = Base64::encode(expected.data(), expected.size(), /*add_padding=*/false);\n    EXPECT_THAT(hdrs, ContainHeader(\"grpc-trace-bin\", expected));\n  }\n  EXPECT_THAT(hdrs,\n              ContainHeader(\"x-cloud-trace-context\",\n                            ::opencensus::trace::propagation::ToCloudTraceContextHeader(ctx)));\n  EXPECT_THAT(hdrs, ContainHeader(\"x-b3-traceid\", \"404142434445464748494a4b4c4d4e4f\"));\n  EXPECT_THAT(\n      hdrs, ContainHeader(\"x-b3-spanid\", ::opencensus::trace::propagation::ToB3SpanIdHeader(ctx)));\n  EXPECT_THAT(hdrs, ContainHeader(\"x-b3-sampled\", \"1\"));\n}\n} // namespace\n\nTEST(OpenCensusTracerTest, PropagateTraceParentContext) {\n  testIncomingHeaders({{\"traceparent\", \"00-404142434445464748494a4b4c4d4e4f-6162636465666768-01\"}});\n}\n\nTEST(OpenCensusTracerTest, PropagateGrpcTraceBinContext) {\n  testIncomingHeaders({{\"grpc-trace-bin\", \"AABAQUJDREVGR0hJSktMTU5PAWFiY2RlZmdoAgE\"}});\n}\n\nTEST(OpenCensusTracerTest, PropagateCloudTraceContext) {\n  testIncomingHeaders(\n      {{\"x-cloud-trace-context\", \"404142434445464748494a4b4c4d4e4f/7017280452245743464;o=1\"}});\n}\n\nTEST(OpenCensusTracerTest, PropagateB3Context) {\n  testIncomingHeaders({{\"x-b3-traceid\", \"404142434445464748494a4b4c4d4e4f\"},\n                       {\"x-b3-spanid\", \"6162636465666768\"},\n                       {\"x-b3-sampled\", \"1\"}});\n}\n\nTEST(OpenCensusTracerTest, PropagateB3ContextWithDebugFlag) {\n  testIncomingHeaders({{\"x-b3-traceid\", \"404142434445464748494a4b4c4d4e4f\"},\n                       {\"x-b3-spanid\", \"6162636465666768\"},\n                       {\"x-b3-flags\", \"1\"}}); // Debug flag causes sampling.\n}\n\nnamespace {\n\n// Create a Span using the given config and return how many spans made it to\n// the exporter (either zero or one).\nint SamplerTestHelper(const OpenCensusConfig& oc_config) {\n  registerSpanCatcher();\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  std::unique_ptr<Tracing::Driver> driver(\n      new OpenCensus::Driver(oc_config, local_info, *Api::createApiForTest()));\n  auto span = ::opencensus::trace::Span::StartSpan(\"test_span\");\n  span.End();\n  // Retrieve SpanData from the OpenCensus trace exporter.\n  std::vector<SpanData> spans = getSpanCatcher()->catchSpans();\n  EXPECT_GE(spans.size(), 0);\n  EXPECT_LE(spans.size(), 1);\n  if (!spans.empty()) {\n    EXPECT_TRUE(spans[0].context().trace_options().IsSampled());\n  }\n  return spans.size();\n}\n\n} // namespace\n\n// Test constant_sampler that's always on.\nTEST(OpenCensusTracerTest, ConstantSamplerAlwaysOn) {\n  OpenCensusConfig oc_config;\n  oc_config.mutable_trace_config()->mutable_constant_sampler()->set_decision(\n      ::opencensus::proto::trace::v1::ConstantSampler::ALWAYS_ON);\n  EXPECT_EQ(1, SamplerTestHelper(oc_config));\n}\n\n// Test constant_sampler that's always off.\nTEST(OpenCensusTracerTest, ConstantSamplerAlwaysOff) {\n  OpenCensusConfig oc_config;\n  oc_config.mutable_trace_config()->mutable_constant_sampler()->set_decision(\n      ::opencensus::proto::trace::v1::ConstantSampler::ALWAYS_OFF);\n  EXPECT_EQ(0, SamplerTestHelper(oc_config));\n}\n\n// Test probability_sampler that's always on.\nTEST(OpenCensusTracerTest, ProbabilitySamplerAlwaysOn) {\n  OpenCensusConfig oc_config;\n  oc_config.mutable_trace_config()->mutable_probability_sampler()->set_samplingprobability(1.0);\n  EXPECT_EQ(1, SamplerTestHelper(oc_config));\n}\n\n// Test probability_sampler that's always off.\nTEST(OpenCensusTracerTest, ProbabilitySamplerAlwaysOff) {\n  OpenCensusConfig oc_config;\n  oc_config.mutable_trace_config()->mutable_probability_sampler()->set_samplingprobability(0.0);\n  EXPECT_EQ(0, SamplerTestHelper(oc_config));\n}\n\n} // namespace OpenCensus\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/xray/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"xray_test\",\n    srcs = [\n        \"localized_sampling_test.cc\",\n        \"tracer_test.cc\",\n        \"util_test.cc\",\n        \"xray_tracer_impl_test.cc\",\n    ],\n    extension_name = \"envoy.tracers.xray\",\n    deps = [\n        \"//source/extensions/tracers/xray:xray_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:tracer_factory_context_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.tracers.xray\",\n    deps = [\n        \"//source/extensions/tracers/xray:config\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:tracer_factory_context_mocks\",\n        \"//test/mocks/server:tracer_factory_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"xray_fuzz_test\",\n    srcs = [\"fuzz_test.cc\"],\n    corpus = \"wildcard_matcher_corpus\",\n    deps = [\n        \"//source/extensions/tracers/xray:xray_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/tracers/xray/config_test.cc",
    "content": "#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/config/trace/v3/xray.pb.h\"\n#include \"envoy/config/trace/v3/xray.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/tracers/xray/config.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/tracer_factory.h\"\n#include \"test/mocks/server/tracer_factory_context.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::Throw;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\nnamespace {\n\nTEST(XRayTracerConfigTest, XRayHttpTracerWithTypedConfig) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: xray\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2alpha.XRayConfig\n      daemon_endpoint:\n        protocol: UDP\n        address: 127.0.0.1\n        port_value: 2000\n      segment_name: AwsAppMesh\n      sampling_rule_manifest:\n        filename: \"rules.json\")EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  XRayTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr xray_tracer = factory.createHttpTracer(*message, context);\n  ASSERT_NE(nullptr, xray_tracer);\n}\n\nTEST(XRayTracerConfigTest, XRayHttpTracerWithInvalidFileName) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  NiceMock<Api::MockApi> api;\n  NiceMock<Filesystem::MockInstance> file_system;\n\n  // fake invalid file\n  EXPECT_CALL(file_system, fileReadToEnd(\"rules.json\"))\n      .WillRepeatedly(Throw(EnvoyException(\"failed to open file.\")));\n  EXPECT_CALL(api, fileSystem()).WillRepeatedly(ReturnRef(file_system));\n  EXPECT_CALL(context.server_factory_context_, api()).WillRepeatedly(ReturnRef(api));\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: xray\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2alpha.XRayConfig\n      daemon_endpoint:\n        protocol: UDP\n        address: 127.0.0.1\n        port_value: 2000\n      segment_name: AwsAppMesh\n      sampling_rule_manifest:\n        filename: \"rules.json\")EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  XRayTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n\n  Tracing::HttpTracerSharedPtr xray_tracer = factory.createHttpTracer(*message, context);\n  ASSERT_NE(nullptr, xray_tracer);\n}\n\nTEST(XRayTracerConfigTest, ProtocolNotUDPThrows) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: xray\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2alpha.XRayConfig\n      daemon_endpoint:\n        protocol: TCP\n        address: 127.0.0.1\n        port_value: 2000\n      segment_name: AwsAppMesh\n      sampling_rule_manifest:\n        filename: \"rules.json\")EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  XRayTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n\n  ASSERT_THROW(factory.createHttpTracer(*message, context), EnvoyException);\n}\n\nTEST(XRayTracerConfigTest, UsingNamedPortThrows) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: xray\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2alpha.XRayConfig\n      daemon_endpoint:\n        protocol: UDP\n        address: 127.0.0.1\n        named_port: SMTP\n      segment_name: AwsAppMesh\n      sampling_rule_manifest:\n        filename: \"rules.json\")EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  XRayTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n\n  ASSERT_THROW(factory.createHttpTracer(*message, context), EnvoyException);\n}\n\nTEST(XRayTracerConfigTest, XRayHttpTracerWithSegmentFieldsTypedConfig) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n\n  const std::string yaml_string = R\"EOF(\n    http:\n      name: xray\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.config.trace.v3.XRayConfig\n        daemon_endpoint:\n          protocol: UDP\n          address: 127.0.0.1\n          port_value: 2000\n        segment_name: AwsAppMesh\n        sampling_rule_manifest:\n          filename: \"rules.json\"\n        segment_fields:\n          origin: AWS::Origin::Name\n          aws:\n            key: value\n            list:\n              - test\n              - test\n    )EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  XRayTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr xray_tracer = factory.createHttpTracer(*message, context);\n  ASSERT_NE(nullptr, xray_tracer);\n}\n\n} // namespace\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/xray/fuzz_test.cc",
    "content": "#include \"extensions/tracers/xray/util.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\n// TODO(@marcomagdy): @htuch suggests to compare results with re2 (after replacing * with .* and ?\n// with '.' and doing proper regex escaping)\nDEFINE_FUZZER(const uint8_t* buf, size_t len) {\n  absl::string_view pattern, input;\n  if (len > 1) {\n    pattern = absl::string_view(reinterpret_cast<const char*>(buf), len / 2);\n    input = absl::string_view(reinterpret_cast<const char*>(buf + len / 2), len - len / 2);\n    wildcardMatch(pattern, input);\n  } else { // buf is a single byte, use it for both pattern and input\n    absl::string_view sv(reinterpret_cast<const char*>(buf), len);\n    wildcardMatch(sv, \"hello\");\n    wildcardMatch(\"*\", sv);\n    wildcardMatch(\"?\", sv);\n  }\n}\n\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/xray/localized_sampling_test.cc",
    "content": "#include \"extensions/tracers/xray/localized_sampling.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nnamespace {\n\nclass LocalizedSamplingStrategyTest : public ::testing::Test {\nprotected:\n  Event::SimulatedTimeSystem time_system_;\n};\n\nTEST_F(LocalizedSamplingStrategyTest, EmptyRules) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  LocalizedSamplingStrategy strategy{\"\", random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, BadJson) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  LocalizedSamplingStrategy strategy{\"{{}\", random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, ValidCustomRules) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_FALSE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, InvalidRate) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 1.5\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, InvalidFixedTarget) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 4.2,\n      \"rate\": 0.1\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingRate) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingFixedTarget) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"rate\": 0.5\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, WrongVersion) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto wrong_version = R\"EOF(\n{\n  \"version\": 1,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.5\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{wrong_version, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, MissingVersion) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto missing_version = R\"EOF(\n{\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.5\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{missing_version, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, MissingDefaultRules) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ]\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleHostIsNotString) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": null,\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleHttpMethodIsNotString) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": 42,\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleUrlPathIsNotString) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": { \"another\": \"object\" },\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingFixedTarget) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingRate) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleArrayElementWithWrongType) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0\n    },\n    \"should be an array, not string\"\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedRate) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": -1,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeRate) {\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/api/move/*\",\n      \"fixed_target\": 0,\n      \"rate\": 0.05\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": -0.1\n  }\n}\n  )EOF\";\n  LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_};\n  ASSERT_TRUE(strategy.usingDefaultManifest());\n}\n\nTEST_F(LocalizedSamplingStrategyTest, TraceOnlyFromReservoir) {\n  NiceMock<Random::MockRandomGenerator> rng;\n  EXPECT_CALL(rng, random()).WillRepeatedly(Return(90));\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"*\",\n      \"fixed_target\": 1,\n      \"rate\": 0.5\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.5\n  }\n}\n  )EOF\";\n\n  LocalizedSamplingStrategy strategy{rules_json, rng, time_system_};\n  ASSERT_FALSE(strategy.usingDefaultManifest());\n\n  SamplingRequest req;\n  ASSERT_TRUE(strategy.shouldTrace(req)); // first one should be traced\n  int i = 10;\n  while (i-- > 0) {\n    ASSERT_FALSE(strategy.shouldTrace(req));\n  }\n}\n\nTEST_F(LocalizedSamplingStrategyTest, TraceFromReservoirAndByRate) {\n  NiceMock<Random::MockRandomGenerator> rng;\n  EXPECT_CALL(rng, random()).WillRepeatedly(Return(1));\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"*\",\n      \"fixed_target\": 1,\n      \"rate\": 0.1\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 1,\n    \"rate\": 0.5\n  }\n}\n  )EOF\";\n\n  LocalizedSamplingStrategy strategy{rules_json, rng, time_system_};\n  ASSERT_FALSE(strategy.usingDefaultManifest());\n\n  SamplingRequest req;\n  int i = 10;\n  while (i-- > 0) {\n    ASSERT_TRUE(strategy.shouldTrace(req));\n  }\n}\n\nTEST_F(LocalizedSamplingStrategyTest, NoMatchingHost) {\n  NiceMock<Random::MockRandomGenerator> rng;\n  // this following value doesn't affect the test\n  EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/));\n  // the following rules say:\n  // \"Sample 1 request/sec then 90% of the requests there after. Requests must have example.com as\n  // its host\"\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"Player moves.\",\n      \"host\": \"example.com\",\n      \"http_method\": \"*\",\n      \"url_path\": \"*\",\n      \"fixed_target\": 1,\n      \"rate\": 0.9\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 0,\n    \"rate\": 0\n  }\n}\n  )EOF\";\n\n  LocalizedSamplingStrategy strategy{rules_json, rng, time_system_};\n  ASSERT_FALSE(strategy.usingDefaultManifest());\n\n  SamplingRequest req;\n  req.host_ = \"amazon.com\"; // host does not match, so default rules apply.\n  int i = 10;\n  while (i-- > 0) {\n    ASSERT_FALSE(strategy.shouldTrace(req));\n  }\n}\n\nTEST_F(LocalizedSamplingStrategyTest, NoMatchingHttpMethod) {\n  NiceMock<Random::MockRandomGenerator> rng;\n  // this following value doesn't affect the test\n  EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/));\n  // the following rules say:\n  // \"Sample 1 request/sec then 90% of the requests there after. Requests must have example.com as\n  // its host\"\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"Player moves.\",\n      \"host\": \"*\",\n      \"http_method\": \"POST\",\n      \"url_path\": \"*\",\n      \"fixed_target\": 1,\n      \"rate\": 0.9\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 0,\n    \"rate\": 0\n  }\n}\n  )EOF\";\n\n  LocalizedSamplingStrategy strategy{rules_json, rng, time_system_};\n  ASSERT_FALSE(strategy.usingDefaultManifest());\n\n  SamplingRequest req;\n  req.http_method_ = \"GET\"; // method does not match, so default rules apply.\n  int i = 10;\n  while (i-- > 0) {\n    ASSERT_FALSE(strategy.shouldTrace(req));\n  }\n}\n\nTEST_F(LocalizedSamplingStrategyTest, NoMatchingPath) {\n  NiceMock<Random::MockRandomGenerator> rng;\n  // this following value doesn't affect the test\n  EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/));\n  // the following rules say:\n  // \"Sample 1 request/sec then 90% of the requests there after. Requests must have example.com as\n  // its host\"\n  constexpr auto rules_json = R\"EOF(\n{\n  \"version\": 2,\n  \"rules\": [\n    {\n      \"description\": \"X-Ray rule\",\n      \"host\": \"*\",\n      \"http_method\": \"*\",\n      \"url_path\": \"/available/*\",\n      \"fixed_target\": 1,\n      \"rate\": 0.9\n    }\n  ],\n  \"default\": {\n    \"fixed_target\": 0,\n    \"rate\": 0\n  }\n}\n  )EOF\";\n\n  LocalizedSamplingStrategy strategy{rules_json, rng, time_system_};\n  ASSERT_FALSE(strategy.usingDefaultManifest());\n\n  SamplingRequest req;\n  req.http_url_ = \"/\"; // method does not match, so default rules apply.\n  int i = 10;\n  while (i-- > 0) {\n    ASSERT_FALSE(strategy.shouldTrace(req));\n  }\n}\n\n} // namespace\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/xray/tracer_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"source/extensions/tracers/xray/daemon.pb.h\"\n\n#include \"extensions/tracers/xray/tracer.h\"\n#include \"extensions/tracers/xray/xray_configuration.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_split.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nnamespace {\nusing ::testing::_;\nusing ::testing::Invoke;\nusing ::testing::Return;\nusing namespace source::extensions::tracers::xray;\n\nstruct MockDaemonBroker : DaemonBroker {\n  MockDaemonBroker(const std::string& endpoint) { UNREFERENCED_PARAMETER(endpoint); }\n  MOCK_METHOD(void, send, (std::string const&), (const, override));\n};\n\nclass XRayTracerTest : public ::testing::Test {\npublic:\n  XRayTracerTest() : broker_(std::make_unique<MockDaemonBroker>(\"127.0.0.1:2000\")) {}\n\n  absl::flat_hash_map<std::string, ProtobufWkt::Value> aws_metadata_;\n  NiceMock<Server::MockInstance> server_;\n  std::unique_ptr<MockDaemonBroker> broker_;\n};\n\nTEST_F(XRayTracerTest, SerializeSpanTest) {\n  constexpr auto expected_span_name = \"Service 1\";\n  constexpr auto expected_origin_name = \"AWS::Service::Proxy\";\n  constexpr auto expected_aws_key_value = \"test_value\";\n  constexpr auto expected_operation_name = \"Create\";\n  constexpr auto expected_http_method = \"POST\";\n  constexpr auto expected_http_url = \"/first/second\";\n  constexpr auto expected_user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X)\";\n  constexpr uint32_t expected_status_code = 202;\n  constexpr uint32_t expected_content_length = 1337;\n  constexpr auto expected_client_ip = \"10.0.0.100\";\n  constexpr auto expected_x_forwarded_for = false;\n  constexpr auto expected_upstream_address = \"10.0.0.200\";\n\n  auto on_send = [&](const std::string& json) {\n    ASSERT_FALSE(json.empty());\n    daemon::Segment s;\n    MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor());\n    ASSERT_FALSE(s.trace_id().empty());\n    ASSERT_FALSE(s.id().empty());\n    ASSERT_EQ(1, s.annotations().size());\n    ASSERT_TRUE(s.parent_id().empty());\n    ASSERT_STREQ(expected_span_name, s.name().c_str());\n    ASSERT_STREQ(expected_origin_name, s.origin().c_str());\n    ASSERT_STREQ(expected_aws_key_value, s.aws().fields().at(\"key\").string_value().c_str());\n    ASSERT_STREQ(expected_http_method,\n                 s.http().request().fields().at(\"method\").string_value().c_str());\n    ASSERT_STREQ(expected_http_url, s.http().request().fields().at(\"url\").string_value().c_str());\n    ASSERT_STREQ(expected_user_agent,\n                 s.http().request().fields().at(\"user_agent\").string_value().c_str());\n    ASSERT_DOUBLE_EQ(expected_status_code,\n                     s.http().response().fields().at(\"status\").number_value());\n    ASSERT_DOUBLE_EQ(expected_content_length,\n                     s.http().response().fields().at(\"content_length\").number_value());\n    ASSERT_STREQ(expected_client_ip,\n                 s.http().request().fields().at(\"client_ip\").string_value().c_str());\n    ASSERT_EQ(expected_x_forwarded_for,\n              s.http().request().fields().at(\"x_forwarded_for\").bool_value());\n    ASSERT_STREQ(expected_upstream_address, s.annotations().at(\"upstream_address\").c_str());\n  };\n\n  EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send));\n  aws_metadata_.insert({\"key\", ValueUtil::stringValue(expected_aws_key_value)});\n  Tracer tracer{expected_span_name, expected_origin_name, aws_metadata_,\n                std::move(broker_), server_.timeSource(), server_.api().randomGenerator()};\n  auto span = tracer.startSpan(expected_operation_name, server_.timeSource().systemTime(),\n                               absl::nullopt /*headers*/);\n  span->setTag(\"http.method\", expected_http_method);\n  span->setTag(\"http.url\", expected_http_url);\n  span->setTag(\"user_agent\", expected_user_agent);\n  span->setTag(\"http.status_code\", absl::StrFormat(\"%d\", expected_status_code));\n  span->setTag(\"response_size\", absl::StrFormat(\"%d\", expected_content_length));\n  span->setTag(\"peer.address\", expected_client_ip);\n  span->setTag(\"upstream_address\", expected_upstream_address);\n  span->finishSpan();\n}\n\nTEST_F(XRayTracerTest, NonSampledSpansNotSerialized) {\n  Tracer tracer{\"\" /*span name*/,   \"\" /*origin*/,        aws_metadata_,\n                std::move(broker_), server_.timeSource(), server_.api().randomGenerator()};\n  auto span = tracer.createNonSampledSpan();\n  span->finishSpan();\n}\n\nTEST_F(XRayTracerTest, BaggageNotImplemented) {\n  Tracer tracer{\"\" /*span name*/,   \"\" /*origin*/,        aws_metadata_,\n                std::move(broker_), server_.timeSource(), server_.api().randomGenerator()};\n  auto span = tracer.createNonSampledSpan();\n  span->setBaggage(\"baggage_key\", \"baggage_value\");\n  span->finishSpan();\n\n  // Baggage isn't supported so getBaggage should always return empty\n  ASSERT_EQ(\"\", span->getBaggage(\"baggage_key\"));\n}\n\nTEST_F(XRayTracerTest, ChildSpanHasParentInfo) {\n  NiceMock<Tracing::MockConfig> config;\n  constexpr auto expected_span_name = \"Service 1\";\n  constexpr auto expected_operation_name = \"Create\";\n  const auto& broker = *broker_;\n  Tracer tracer{expected_span_name,   \"\",\n                aws_metadata_,        std::move(broker_),\n                server_.timeSource(), server_.api().randomGenerator()};\n  // Span id taken from random generator\n  EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(999));\n  auto parent_span = tracer.startSpan(expected_operation_name, server_.timeSource().systemTime(),\n                                      absl::nullopt /*headers*/);\n\n  const XRay::Span* xray_parent_span = static_cast<XRay::Span*>(parent_span.get());\n  auto on_send = [&](const std::string& json) {\n    ASSERT_FALSE(json.empty());\n    daemon::Segment s;\n    MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor());\n    // Hex encoded 64 bit identifier\n    ASSERT_STREQ(\"00000000000003e7\", s.parent_id().c_str());\n    ASSERT_STREQ(expected_span_name, s.name().c_str());\n    ASSERT_STREQ(xray_parent_span->traceId().c_str(), s.trace_id().c_str());\n    ASSERT_STREQ(\"0000003d25bebe62\", s.id().c_str());\n  };\n\n  EXPECT_CALL(broker, send(_)).WillOnce(Invoke(on_send));\n\n  // Span id taken from random generator\n  EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(262626262626));\n  auto child =\n      parent_span->spawnChild(config, expected_operation_name, server_.timeSource().systemTime());\n  child->finishSpan();\n}\n\nTEST_F(XRayTracerTest, UseExistingHeaderInformation) {\n  XRayHeader xray_header;\n  xray_header.trace_id_ = \"a\";\n  xray_header.parent_id_ = \"b\";\n  constexpr auto span_name = \"my span\";\n  constexpr auto operation_name = \"my operation\";\n\n  Tracer tracer{span_name,\n                \"\",\n                aws_metadata_,\n                std::move(broker_),\n                server_.timeSource(),\n                server_.api().randomGenerator()};\n  auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header);\n\n  const XRay::Span* xray_span = static_cast<XRay::Span*>(span.get());\n  ASSERT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str());\n  ASSERT_STREQ(xray_header.parent_id_.c_str(), xray_span->parentId().c_str());\n}\n\nTEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) {\n  constexpr auto span_name = \"my span\";\n  constexpr auto operation_name = \"my operation\";\n\n  Tracer tracer{span_name,\n                \"\",\n                aws_metadata_,\n                std::move(broker_),\n                server_.timeSource(),\n                server_.api().randomGenerator()};\n  auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(),\n                               absl::nullopt /*headers*/);\n  Http::TestRequestHeaderMapImpl request_headers;\n  span->injectContext(request_headers);\n  auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader});\n  ASSERT_NE(header, nullptr);\n  ASSERT_NE(header->value().getStringView().find(\"Root=\"), absl::string_view::npos);\n  ASSERT_NE(header->value().getStringView().find(\"Parent=\"), absl::string_view::npos);\n  ASSERT_NE(header->value().getStringView().find(\"Sampled=1\"), absl::string_view::npos);\n}\n\nTEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) {\n  constexpr auto span_name = \"my span\";\n  Tracer tracer{span_name,\n                \"\",\n                aws_metadata_,\n                std::move(broker_),\n                server_.timeSource(),\n                server_.api().randomGenerator()};\n  auto span = tracer.createNonSampledSpan();\n  Http::TestRequestHeaderMapImpl request_headers;\n  span->injectContext(request_headers);\n  auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader});\n  ASSERT_NE(header, nullptr);\n  ASSERT_NE(header->value().getStringView().find(\"Root=\"), absl::string_view::npos);\n  ASSERT_NE(header->value().getStringView().find(\"Parent=\"), absl::string_view::npos);\n  ASSERT_NE(header->value().getStringView().find(\"Sampled=0\"), absl::string_view::npos);\n}\n\nTEST_F(XRayTracerTest, TraceIDFormatTest) {\n  constexpr auto span_name = \"my span\";\n  Tracer tracer{span_name,\n                \"\",\n                aws_metadata_,\n                std::move(broker_),\n                server_.timeSource(),\n                server_.api().randomGenerator()};\n  auto span = tracer.createNonSampledSpan(); // startSpan and createNonSampledSpan use the same\n                                             // logic to create a trace ID\n  XRay::Span* xray_span = span.get();\n  std::vector<std::string> parts = absl::StrSplit(xray_span->traceId(), absl::ByChar('-'));\n  ASSERT_EQ(3, parts.size());\n  ASSERT_EQ(1, parts[0].length());\n  ASSERT_EQ(8, parts[1].length());\n  ASSERT_EQ(24, parts[2].length());\n}\n\nclass XRayDaemonTest : public testing::TestWithParam<Network::Address::IpVersion> {};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, XRayDaemonTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(XRayDaemonTest, VerifyUdpPacketContents) {\n  absl::flat_hash_map<std::string, ProtobufWkt::Value> aws_metadata;\n  NiceMock<Server::MockInstance> server;\n  Network::Test::UdpSyncPeer xray_fake_daemon(GetParam());\n  const std::string daemon_endpoint = xray_fake_daemon.localAddress()->asString();\n  Tracer tracer{\"my_segment\",        \"origin\",\n                aws_metadata,        std::make_unique<DaemonBrokerImpl>(daemon_endpoint),\n                server.timeSource(), server.api().randomGenerator()};\n  auto span = tracer.startSpan(\"ingress\" /*operation name*/, server.timeSource().systemTime(),\n                               absl::nullopt /*headers*/);\n\n  span->setTag(\"http.status_code\", \"202\");\n  span->finishSpan();\n\n  Network::UdpRecvData datagram;\n  xray_fake_daemon.recv(datagram);\n\n  const std::string header_json = R\"EOF({\"format\":\"json\",\"version\":1})EOF\";\n  // The UDP datagram contains two independent, consecutive JSON documents; a header and a body.\n  const std::string payload = datagram.buffer_->toString();\n  // Make sure the payload has enough data.\n  ASSERT_GT(payload.length(), header_json.length());\n  // Skip the header since we're only interested in the body.\n  const std::string body = payload.substr(header_json.length());\n\n  EXPECT_EQ(0, payload.find(header_json));\n\n  // Deserialize the body to verify it.\n  source::extensions::tracers::xray::daemon::Segment seg;\n  MessageUtil::loadFromJson(body, seg, ProtobufMessage::getNullValidationVisitor());\n  EXPECT_STREQ(\"my_segment\", seg.name().c_str());\n  for (auto&& f : seg.http().request().fields()) {\n    // there should only be a single field\n    EXPECT_EQ(202, f.second.number_value());\n  }\n}\n\n} // namespace\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/xray/util_test.cc",
    "content": "#include \"extensions/tracers/xray/util.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nTEST(XRayWildcardTest, MatchingEmpty) {\n  ASSERT_TRUE(wildcardMatch(\"\", \"\"));\n  ASSERT_FALSE(wildcardMatch(\"\", \"42\"));\n  ASSERT_TRUE(wildcardMatch(\"*\", \"\"));\n  ASSERT_FALSE(wildcardMatch(\"?\", \"\"));\n}\n\nTEST(XRayWildcardTest, MatchIdentityCaseInsensitive) {\n  ASSERT_TRUE(wildcardMatch(\"foo\", \"foo\"));\n  ASSERT_TRUE(wildcardMatch(\"foo\", \"FOO\"));\n  ASSERT_TRUE(wildcardMatch(\"foo\", \"Foo\"));\n  ASSERT_TRUE(wildcardMatch(\"6543210\", \"6543210\"));\n}\n\nTEST(XRayWildcardTest, MatchIdentityExtra) {\n  ASSERT_FALSE(wildcardMatch(\"foo\", \"foob\"));\n  ASSERT_FALSE(wildcardMatch(\"foo\", \"xfoo\"));\n  ASSERT_FALSE(wildcardMatch(\"foo\", \"bar\"));\n}\n\nTEST(XRayWildcardTest, SingleWildcard) {\n  ASSERT_FALSE(wildcardMatch(\"f?o\", \"boo\"));\n  ASSERT_TRUE(wildcardMatch(\"fo?\", \"foo\"));\n}\n\nTEST(XRayWildcardTest, MultipleWildcards) {\n  ASSERT_FALSE(wildcardMatch(\"f??\", \"boo\"));\n  ASSERT_TRUE(wildcardMatch(\"he??o\", \"Hello\"));\n  ASSERT_TRUE(wildcardMatch(\"?o?\", \"foo\"));\n}\n\nTEST(XRayWildcardTest, GlobMatch) {\n  ASSERT_TRUE(wildcardMatch(\"f?o*ba*\", \"foobazbar\"));\n  ASSERT_TRUE(wildcardMatch(\"*oo\", \"foo\"));\n  ASSERT_TRUE(wildcardMatch(\"*o?\", \"foo\"));\n  ASSERT_TRUE(wildcardMatch(\"mis*spell\", \"mistily spell\"));\n  ASSERT_TRUE(wildcardMatch(\"mis*spell\", \"misspell\"));\n}\n\nTEST(XRayWildcardTest, GlobMismatch) {\n  ASSERT_FALSE(wildcardMatch(\"foo*\", \"fo0\"));\n  ASSERT_FALSE(wildcardMatch(\"fo*obar\", \"foobaz\"));\n  ASSERT_FALSE(wildcardMatch(\"mis*spellx\", \"mispellx\"));\n  ASSERT_FALSE(wildcardMatch(\"f?*\", \"boo\"));\n}\n\nTEST(XRayWildcardTest, OnlyGlob) {\n  ASSERT_TRUE(wildcardMatch(\"*\", \"foo\"));\n  ASSERT_TRUE(wildcardMatch(\"*\", \"anything\"));\n  ASSERT_TRUE(wildcardMatch(\"*\", \"12354\"));\n  ASSERT_TRUE(wildcardMatch(\"*\", \"UPPERCASE\"));\n  ASSERT_TRUE(wildcardMatch(\"*\", \"miXEDcaSe\"));\n  ASSERT_TRUE(wildcardMatch(\"*******\", \"Envoy\"));\n}\n\nTEST(XRayWildcardTest, LengthAtLeastTwo) {\n  EXPECT_FALSE(wildcardMatch(\"??*\", \"a\"));\n  EXPECT_TRUE(wildcardMatch(\"??*\", \"aa\"));\n  EXPECT_TRUE(wildcardMatch(\"??*\", \"aaa\"));\n}\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/xray/wildcard_matcher_corpus/example",
    "content": "/match/on/th?s/*/route*\n"
  },
  {
    "path": "test/extensions/tracers/xray/xray_tracer_impl_test.cc",
    "content": "#include <string>\n\n#include \"extensions/tracers/xray/tracer.h\"\n#include \"extensions/tracers/xray/xray_configuration.h\"\n#include \"extensions/tracers/xray/xray_tracer_impl.h\"\n\n#include \"test/mocks/server/tracer_factory_context.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace XRay {\n\nnamespace {\n\nclass XRayDriverTest : public ::testing::Test {\npublic:\n  const std::string operation_name_ = \"test_operation_name\";\n  absl::flat_hash_map<std::string, ProtobufWkt::Value> aws_metadata_;\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Tracing::MockConfig> tracing_config_;\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":authority\", \"api.amazon.com\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}};\n};\n\nTEST_F(XRayDriverTest, XRayTraceHeaderNotSampled) {\n  request_headers_.addCopy(XRayTraceHeader, \"Root=1-272793;Parent=5398ad8;Sampled=0\");\n\n  XRayConfiguration config{\"\" /*daemon_endpoint*/, \"test_segment_name\", \"\" /*sampling_rules*/,\n                           \"\" /*origin*/, aws_metadata_};\n  Driver driver(config, context_);\n\n  Tracing::Decision tracing_decision{Tracing::Reason::Sampling, false /*sampled*/};\n  Envoy::SystemTime start_time;\n  auto span = driver.startSpan(tracing_config_, request_headers_, operation_name_, start_time,\n                               tracing_decision);\n  ASSERT_NE(span, nullptr);\n  auto* xray_span = static_cast<XRay::Span*>(span.get());\n  ASSERT_FALSE(xray_span->sampled());\n}\n\nTEST_F(XRayDriverTest, XRayTraceHeaderSampled) {\n  request_headers_.addCopy(XRayTraceHeader, \"Root=1-272793;Parent=5398ad8;Sampled=1\");\n\n  XRayConfiguration config{\"\" /*daemon_endpoint*/, \"test_segment_name\", \"\" /*sampling_rules*/,\n                           \"\" /*origin*/, aws_metadata_};\n  Driver driver(config, context_);\n\n  Tracing::Decision tracing_decision{Tracing::Reason::Sampling, false /*sampled*/};\n  Envoy::SystemTime start_time;\n  auto span = driver.startSpan(tracing_config_, request_headers_, operation_name_, start_time,\n                               tracing_decision);\n  ASSERT_NE(span, nullptr);\n}\n\nTEST_F(XRayDriverTest, XRayTraceHeaderSamplingUnknown) {\n  request_headers_.addCopy(XRayTraceHeader, \"Root=1-272793;Parent=5398ad8\");\n\n  XRayConfiguration config{\"\" /*daemon_endpoint*/, \"test_segment_name\", \"\" /*sampling_rules*/,\n                           \"\" /*origin*/, aws_metadata_};\n  Driver driver(config, context_);\n\n  Tracing::Decision tracing_decision{Tracing::Reason::Sampling, false /*sampled*/};\n  Envoy::SystemTime start_time;\n  auto span = driver.startSpan(tracing_config_, request_headers_, operation_name_, start_time,\n                               tracing_decision);\n  // sampling should fall back to the default manifest since:\n  // a) there is sampling decision in the X-Ray header\n  // b) there are no sampling rules passed, so the default rules apply (1 req/sec and 5% after that\n  // within that second)\n  ASSERT_NE(span, nullptr);\n}\n\nTEST_F(XRayDriverTest, NoXRayTracerHeader) {\n  XRayConfiguration config{\"\" /*daemon_endpoint*/, \"test_segment_name\", \"\" /*sampling_rules*/,\n                           \"\" /*origin*/, aws_metadata_};\n  Driver driver(config, context_);\n\n  Tracing::Decision tracing_decision{Tracing::Reason::Sampling, false /*sampled*/};\n  Envoy::SystemTime start_time;\n  auto span = driver.startSpan(tracing_config_, request_headers_, operation_name_, start_time,\n                               tracing_decision);\n  // sampling should fall back to the default manifest since:\n  // a) there is no X-Ray header to determine the sampling decision\n  // b) there are no sampling rules passed, so the default rules apply (1 req/sec and 5% after that\n  // within that second)\n  ASSERT_NE(span, nullptr);\n}\n\n} // namespace\n} // namespace XRay\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/zipkin/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"zipkin_test\",\n    srcs = [\n        \"span_buffer_test.cc\",\n        \"span_context_extractor_test.cc\",\n        \"tracer_test.cc\",\n        \"zipkin_core_types_test.cc\",\n        \"zipkin_tracer_impl_test.cc\",\n    ],\n    extension_name = \"envoy.tracers.zipkin\",\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/common:hex_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/extensions/tracers/zipkin:zipkin_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.tracers.zipkin\",\n    deps = [\n        \"//source/extensions/tracers/zipkin:config\",\n        \"//test/mocks/server:tracer_factory_context_mocks\",\n        \"//test/mocks/server:tracer_factory_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/trace/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/tracers/zipkin/config_test.cc",
    "content": "#include \"envoy/config/trace/v3/http_tracer.pb.h\"\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n#include \"envoy/config/trace/v3/zipkin.pb.validate.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/tracers/zipkin/config.h\"\n\n#include \"test/mocks/server/tracer_factory.h\"\n#include \"test/mocks/server/tracer_factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::Eq;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\nnamespace {\n\nTEST(ZipkinTracerConfigTest, ZipkinHttpTracer) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n\n  EXPECT_CALL(context.server_factory_context_.cluster_manager_, get(Eq(\"fake_cluster\")))\n      .WillRepeatedly(\n          Return(&context.server_factory_context_.cluster_manager_.thread_local_cluster_));\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: zipkin\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n      collector_cluster: fake_cluster\n      collector_endpoint: /api/v1/spans\n      collector_endpoint_version: HTTP_JSON\n  )EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  ZipkinTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr zipkin_tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, zipkin_tracer);\n}\n\nTEST(ZipkinTracerConfigTest, ZipkinHttpTracerWithTypedConfig) {\n  NiceMock<Server::Configuration::MockTracerFactoryContext> context;\n\n  EXPECT_CALL(context.server_factory_context_.cluster_manager_, get(Eq(\"fake_cluster\")))\n      .WillRepeatedly(\n          Return(&context.server_factory_context_.cluster_manager_.thread_local_cluster_));\n\n  const std::string yaml_string = R\"EOF(\n  http:\n    name: zipkin\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n      collector_cluster: fake_cluster\n      collector_endpoint: /api/v2/spans\n      collector_endpoint_version: HTTP_PROTO\n  )EOF\";\n\n  envoy::config::trace::v3::Tracing configuration;\n  TestUtility::loadFromYaml(yaml_string, configuration);\n\n  ZipkinTracerFactory factory;\n  auto message = Config::Utility::translateToFactoryConfig(\n      configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory);\n  Tracing::HttpTracerSharedPtr zipkin_tracer = factory.createHttpTracer(*message, context);\n  EXPECT_NE(nullptr, zipkin_tracer);\n}\n\n// Test that the deprecated extension name still functions.\nTEST(ZipkinTracerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {\n  const std::string deprecated_name = \"envoy.zipkin\";\n\n  ASSERT_NE(nullptr, Registry::FactoryRegistry<Server::Configuration::TracerFactory>::getFactory(\n                         deprecated_name));\n}\n\n} // namespace\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/zipkin/span_buffer_test.cc",
    "content": "#include \"envoy/config/trace/v3/zipkin.pb.h\"\n\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"extensions/tracers/zipkin/span_buffer.h\"\n#include \"extensions/tracers/zipkin/util.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_format.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::HasSubstr;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\nnamespace {\n\n// If this default timestamp is wrapped as double (using ValueUtil::numberValue()) and then it is\n// serialized using Protobuf::util::MessageToJsonString, it renders as: 1.58432429547687e+15.\nconstexpr uint64_t DEFAULT_TEST_TIMESTAMP = 1584324295476870;\nconstexpr uint64_t DEFAULT_TEST_DURATION = 2584324295476870;\nconst Util::Replacements DEFAULT_TEST_REPLACEMENTS = {\n    {\"DEFAULT_TEST_TIMESTAMP\", std::to_string(DEFAULT_TEST_TIMESTAMP)}};\nconst Util::Replacements DEFAULT_TEST_DURATIONS = {\n    {\"DEFAULT_TEST_DURATION\", std::to_string(DEFAULT_TEST_DURATION)}};\n\nenum class IpType { V4, V6 };\n\nEndpoint createEndpoint(const IpType ip_type) {\n  Endpoint endpoint;\n  endpoint.setAddress(ip_type == IpType::V6\n                          ? Envoy::Network::Utility::parseInternetAddress(\n                                \"2001:db8:85a3::8a2e:370:4444\", 7334, true)\n                          : Envoy::Network::Utility::parseInternetAddress(\"1.2.3.4\", 8080, false));\n  endpoint.setServiceName(\"service1\");\n  return endpoint;\n}\n\nAnnotation createAnnotation(const absl::string_view value, const IpType ip_type) {\n  Annotation annotation;\n  annotation.setValue(value.data());\n  annotation.setTimestamp(DEFAULT_TEST_TIMESTAMP);\n  annotation.setEndpoint(createEndpoint(ip_type));\n  return annotation;\n}\n\nBinaryAnnotation createTag() {\n  BinaryAnnotation tag;\n  tag.setKey(\"response_size\");\n  // ensure duration replacement doesn't override this value.\n  tag.setValue(std::to_string(DEFAULT_TEST_DURATION));\n  return tag;\n}\n\nSpan createSpan(const std::vector<absl::string_view>& annotation_values, const IpType ip_type) {\n  Event::SimulatedTimeSystem simulated_time_system;\n  Span span(simulated_time_system);\n  span.setId(1);\n  span.setTraceId(1);\n  span.setDuration(DEFAULT_TEST_DURATION);\n  std::vector<Annotation> annotations;\n  annotations.reserve(annotation_values.size());\n  for (absl::string_view value : annotation_values) {\n    annotations.push_back(createAnnotation(value, ip_type));\n  }\n  span.setAnnotations(annotations);\n  span.setBinaryAnnotations({createTag()});\n  return span;\n}\n\n// To render a string with DEFAULT_TEST_TIMESTAMP and DEFAULT_TEST_DURATION placeholder with\n// DEFAULT_TEST_TIMESTAMP and DEFAULT_TEST_DURATION values.\nstd::string withDefaultTimestampAndDuration(const std::string& expected) {\n  const auto with_default_timestamp = absl::StrReplaceAll(expected, DEFAULT_TEST_REPLACEMENTS);\n  return absl::StrReplaceAll(with_default_timestamp, DEFAULT_TEST_DURATIONS);\n}\n\n// To wrap JSON array string in a object for JSON string comparison through JsonStringEq test\n// utility. Every DEFAULT_TEST_TIMESTAMP and DEFAULT_TEST_DURATION strings found in array_string\n// will be replaced by DEFAULT_TEST_REPLACEMENTS and DEFAULT_TEST_DURATIONS respectively. i.e. to\n// replace every DEFAULT_TEST_TIMESTAMP string occurrence with DEFAULT_TEST_TIMESTAMP value (the\n// same with DEFAULT_TEST_DURATION).\nstd::string wrapAsObject(absl::string_view array_string) {\n  return withDefaultTimestampAndDuration(absl::StrFormat(R\"({\"root\":%s})\", array_string));\n}\n\nvoid expectSerializedBuffer(SpanBuffer& buffer, const bool delay_allocation,\n                            const std::vector<std::string>& expected_list) {\n  Event::SimulatedTimeSystem test_time;\n\n  EXPECT_EQ(0ULL, buffer.pendingSpans());\n  EXPECT_EQ(\"[]\", buffer.serialize());\n\n  if (delay_allocation) {\n    EXPECT_FALSE(buffer.addSpan(createSpan({\"cs\", \"sr\"}, IpType::V4)));\n    buffer.allocateBuffer(expected_list.size() + 1);\n  }\n\n  // Add span after allocation, but missing required annotations should be false.\n  EXPECT_FALSE(buffer.addSpan(Span(test_time.timeSystem())));\n  EXPECT_FALSE(buffer.addSpan(createSpan({\"aa\"}, IpType::V4)));\n\n  for (uint64_t i = 0; i < expected_list.size(); i++) {\n    buffer.addSpan(createSpan({\"cs\", \"sr\"}, IpType::V4));\n    EXPECT_EQ(i + 1, buffer.pendingSpans());\n    EXPECT_THAT(wrapAsObject(expected_list.at(i)), JsonStringEq(wrapAsObject(buffer.serialize())));\n  }\n\n  // Add a valid span. Valid means can be serialized to v2.\n  EXPECT_TRUE(buffer.addSpan(createSpan({\"cs\"}, IpType::V4)));\n  // While the span is valid, however the buffer is full.\n  EXPECT_FALSE(buffer.addSpan(createSpan({\"cs\", \"sr\"}, IpType::V4)));\n\n  buffer.clear();\n  EXPECT_EQ(0ULL, buffer.pendingSpans());\n  EXPECT_EQ(\"[]\", buffer.serialize());\n}\n\ntemplate <typename Type> std::string serializedMessageToJson(const std::string& serialized) {\n  Type message;\n  message.ParseFromString(serialized);\n  std::string json;\n  Protobuf::util::MessageToJsonString(message, &json);\n  return json;\n}\n\nTEST(ZipkinSpanBufferTest, TestSerializeTimestamp) {\n  const std::string default_timestamp_string = std::to_string(DEFAULT_TEST_TIMESTAMP);\n\n  ProtobufWkt::Struct object;\n  auto* fields = object.mutable_fields();\n  Util::Replacements replacements;\n  (*fields)[\"timestamp\"] = Util::uint64Value(DEFAULT_TEST_TIMESTAMP, \"timestamp\", replacements);\n\n  ASSERT_EQ(1, replacements.size());\n  EXPECT_EQ(absl::StrCat(\"\\\"timestamp\\\":\\\"\", default_timestamp_string, \"\\\"\"),\n            replacements.at(0).first);\n  EXPECT_EQ(absl::StrCat(\"\\\"timestamp\\\":\", default_timestamp_string), replacements.at(0).second);\n}\n\nTEST(ZipkinSpanBufferTest, ConstructBuffer) {\n  const std::string expected1 =\n      withDefaultTimestampAndDuration(R\"([{\"traceId\":\"0000000000000001\",)\"\n                                      R\"(\"name\":\"\",)\"\n                                      R\"(\"id\":\"0000000000000001\",)\"\n                                      R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                                      R\"(\"annotations\":[{\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                                      R\"(\"value\":\"cs\",)\"\n                                      R\"(\"endpoint\":{\"ipv4\":\"1.2.3.4\",)\"\n                                      R\"(\"port\":8080,)\"\n                                      R\"(\"serviceName\":\"service1\"}},)\"\n                                      R\"({\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                                      R\"(\"value\":\"sr\",)\"\n                                      R\"(\"endpoint\":{\"ipv4\":\"1.2.3.4\",)\"\n                                      R\"(\"port\":8080,)\"\n                                      R\"(\"serviceName\":\"service1\"}}],)\"\n                                      R\"(\"binaryAnnotations\":[{\"key\":\"response_size\",)\"\n                                      R\"(\"value\":\"DEFAULT_TEST_DURATION\"}]}])\");\n\n  const std::string expected2 =\n      withDefaultTimestampAndDuration(R\"([{\"traceId\":\"0000000000000001\",)\"\n                                      R\"(\"name\":\"\",)\"\n                                      R\"(\"id\":\"0000000000000001\",)\"\n                                      R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                                      R\"(\"annotations\":[{\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                                      R\"(\"value\":\"cs\",)\"\n                                      R\"(\"endpoint\":{\"ipv4\":\"1.2.3.4\",)\"\n                                      R\"(\"port\":8080,)\"\n                                      R\"(\"serviceName\":\"service1\"}},)\"\n                                      R\"({\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                                      R\"(\"value\":\"sr\",)\"\n                                      R\"(\"endpoint\":{\"ipv4\":\"1.2.3.4\",)\"\n                                      R\"(\"port\":8080,)\"\n                                      R\"(\"serviceName\":\"service1\"}}],)\"\n                                      R\"(\"binaryAnnotations\":[{\"key\":\"response_size\",)\"\n                                      R\"(\"value\":\"DEFAULT_TEST_DURATION\"}]},)\"\n                                      R\"({\"traceId\":\"0000000000000001\",)\"\n                                      R\"(\"name\":\"\",)\"\n                                      R\"(\"id\":\"0000000000000001\",)\"\n                                      R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                                      R\"(\"annotations\":[{\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                                      R\"(\"value\":\"cs\",)\"\n                                      R\"(\"endpoint\":{\"ipv4\":\"1.2.3.4\",)\"\n                                      R\"(\"port\":8080,)\"\n                                      R\"(\"serviceName\":\"service1\"}},)\"\n                                      R\"({\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                                      R\"(\"value\":\"sr\",)\"\n                                      R\"(\"endpoint\":{\"ipv4\":\"1.2.3.4\",)\"\n                                      R\"(\"port\":8080,)\"\n                                      R\"(\"serviceName\":\"service1\"}}],)\"\n                                      R\"(\"binaryAnnotations\":[{\"key\":\"response_size\",)\"\n                                      R\"(\"value\":\"DEFAULT_TEST_DURATION\"}]}])\");\n  const bool shared = true;\n  const bool delay_allocation = true;\n\n  SpanBuffer buffer1(envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1,\n                     shared);\n  expectSerializedBuffer(buffer1, delay_allocation, {expected1, expected2});\n\n  // Prepare 3 slots, since we will add one more inside the `expectSerializedBuffer` function.\n  SpanBuffer buffer2(envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1,\n                     shared, 3);\n  expectSerializedBuffer(buffer2, !delay_allocation, {expected1, expected2});\n}\n\nTEST(ZipkinSpanBufferTest, SerializeSpan) {\n  const bool shared = true;\n  SpanBuffer buffer1(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, shared, 2);\n  buffer1.addSpan(createSpan({\"cs\"}, IpType::V4));\n  EXPECT_THAT(wrapAsObject(\"[{\"\n                           R\"(\"traceId\":\"0000000000000001\",)\"\n                           R\"(\"id\":\"0000000000000001\",)\"\n                           R\"(\"kind\":\"CLIENT\",)\"\n                           R\"(\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                           R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                           R\"(\"localEndpoint\":{)\"\n                           R\"(\"serviceName\":\"service1\",)\"\n                           R\"(\"ipv4\":\"1.2.3.4\",)\"\n                           R\"(\"port\":8080},)\"\n                           R\"(\"tags\":{)\"\n                           R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"},)\"\n                           \"}]\"),\n              JsonStringEq(wrapAsObject(buffer1.serialize())));\n\n  SpanBuffer buffer1_v6(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, shared, 2);\n  buffer1_v6.addSpan(createSpan({\"cs\"}, IpType::V6));\n  EXPECT_THAT(wrapAsObject(\"[{\"\n                           R\"(\"traceId\":\"0000000000000001\",)\"\n                           R\"(\"id\":\"0000000000000001\",)\"\n                           R\"(\"kind\":\"CLIENT\",)\"\n                           R\"(\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                           R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                           R\"(\"localEndpoint\":{)\"\n                           R\"(\"serviceName\":\"service1\",)\"\n                           R\"(\"ipv6\":\"2001:db8:85a3::8a2e:370:4444\",)\"\n                           R\"(\"port\":7334},)\"\n                           R\"(\"tags\":{)\"\n                           R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"},)\"\n                           \"}]\"),\n              JsonStringEq(wrapAsObject(buffer1_v6.serialize())));\n\n  SpanBuffer buffer2(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, shared, 2);\n  buffer2.addSpan(createSpan({\"cs\", \"sr\"}, IpType::V4));\n  EXPECT_THAT(wrapAsObject(\"[{\"\n                           R\"(\"traceId\":\"0000000000000001\",)\"\n                           R\"(\"id\":\"0000000000000001\",)\"\n                           R\"(\"kind\":\"CLIENT\",)\"\n                           R\"(\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                           R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                           R\"(\"localEndpoint\":{)\"\n                           R\"(\"serviceName\":\"service1\",)\"\n                           R\"(\"ipv4\":\"1.2.3.4\",)\"\n                           R\"(\"port\":8080},)\"\n                           R\"(\"tags\":{)\"\n                           R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"}},)\"\n                           R\"({)\"\n                           R\"(\"traceId\":\"0000000000000001\",)\"\n                           R\"(\"id\":\"0000000000000001\",)\"\n                           R\"(\"kind\":\"SERVER\",)\"\n                           R\"(\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                           R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                           R\"(\"localEndpoint\":{)\"\n                           R\"(\"serviceName\":\"service1\",)\"\n                           R\"(\"ipv4\":\"1.2.3.4\",)\"\n                           R\"(\"port\":8080},)\"\n                           R\"(\"tags\":{)\"\n                           R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"},)\"\n                           R\"(\"shared\":true)\"\n                           \"}]\"),\n              JsonStringEq(wrapAsObject(buffer2.serialize())));\n\n  SpanBuffer buffer3(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, !shared, 2);\n  buffer3.addSpan(createSpan({\"cs\", \"sr\"}, IpType::V4));\n  EXPECT_THAT(wrapAsObject(\"[{\"\n                           R\"(\"traceId\":\"0000000000000001\",)\"\n                           R\"(\"id\":\"0000000000000001\",)\"\n                           R\"(\"kind\":\"CLIENT\",)\"\n                           R\"(\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                           R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                           R\"(\"localEndpoint\":{)\"\n                           R\"(\"serviceName\":\"service1\",)\"\n                           R\"(\"ipv4\":\"1.2.3.4\",)\"\n                           R\"(\"port\":8080},)\"\n                           R\"(\"tags\":{)\"\n                           R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"}},)\"\n                           R\"({)\"\n                           R\"(\"traceId\":\"0000000000000001\",)\"\n                           R\"(\"id\":\"0000000000000001\",)\"\n                           R\"(\"kind\":\"SERVER\",)\"\n                           R\"(\"timestamp\":DEFAULT_TEST_TIMESTAMP,)\"\n                           R\"(\"duration\":DEFAULT_TEST_DURATION,)\"\n                           R\"(\"localEndpoint\":{)\"\n                           R\"(\"serviceName\":\"service1\",)\"\n                           R\"(\"ipv4\":\"1.2.3.4\",)\"\n                           R\"(\"port\":8080},)\"\n                           R\"(\"tags\":{)\"\n                           R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"})\"\n                           \"}]\"),\n              JsonStringEq(wrapAsObject(buffer3.serialize())));\n\n  SpanBuffer buffer4(envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO, shared, 2);\n  buffer4.addSpan(createSpan({\"cs\"}, IpType::V4));\n  EXPECT_EQ(withDefaultTimestampAndDuration(\"{\"\n                                            R\"(\"spans\":[{)\"\n                                            R\"(\"traceId\":\"AAAAAAAAAAE=\",)\"\n                                            R\"(\"id\":\"AQAAAAAAAAA=\",)\"\n                                            R\"(\"kind\":\"CLIENT\",)\"\n                                            R\"(\"timestamp\":\"DEFAULT_TEST_TIMESTAMP\",)\"\n                                            R\"(\"duration\":\"DEFAULT_TEST_DURATION\",)\"\n                                            R\"(\"localEndpoint\":{)\"\n                                            R\"(\"serviceName\":\"service1\",)\"\n                                            R\"(\"ipv4\":\"AQIDBA==\",)\"\n                                            R\"(\"port\":8080},)\"\n                                            R\"(\"tags\":{)\"\n                                            R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"})\"\n                                            \"}]}\"),\n            serializedMessageToJson<zipkin::proto3::ListOfSpans>(buffer4.serialize()));\n\n  SpanBuffer buffer4_v6(envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO, shared, 2);\n  buffer4_v6.addSpan(createSpan({\"cs\"}, IpType::V6));\n  EXPECT_EQ(withDefaultTimestampAndDuration(\"{\"\n                                            R\"(\"spans\":[{)\"\n                                            R\"(\"traceId\":\"AAAAAAAAAAE=\",)\"\n                                            R\"(\"id\":\"AQAAAAAAAAA=\",)\"\n                                            R\"(\"kind\":\"CLIENT\",)\"\n                                            R\"(\"timestamp\":\"DEFAULT_TEST_TIMESTAMP\",)\"\n                                            R\"(\"duration\":\"DEFAULT_TEST_DURATION\",)\"\n                                            R\"(\"localEndpoint\":{)\"\n                                            R\"(\"serviceName\":\"service1\",)\"\n                                            R\"(\"ipv6\":\"IAENuIWjAAAAAIouA3BERA==\",)\"\n                                            R\"(\"port\":7334},)\"\n                                            R\"(\"tags\":{)\"\n                                            R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"})\"\n                                            \"}]}\"),\n            serializedMessageToJson<zipkin::proto3::ListOfSpans>(buffer4_v6.serialize()));\n\n  SpanBuffer buffer5(envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO, shared, 2);\n  buffer5.addSpan(createSpan({\"cs\", \"sr\"}, IpType::V4));\n  EXPECT_EQ(withDefaultTimestampAndDuration(\"{\"\n                                            R\"(\"spans\":[{)\"\n                                            R\"(\"traceId\":\"AAAAAAAAAAE=\",)\"\n                                            R\"(\"id\":\"AQAAAAAAAAA=\",)\"\n                                            R\"(\"kind\":\"CLIENT\",)\"\n                                            R\"(\"timestamp\":\"DEFAULT_TEST_TIMESTAMP\",)\"\n                                            R\"(\"duration\":\"DEFAULT_TEST_DURATION\",)\"\n                                            R\"(\"localEndpoint\":{)\"\n                                            R\"(\"serviceName\":\"service1\",)\"\n                                            R\"(\"ipv4\":\"AQIDBA==\",)\"\n                                            R\"(\"port\":8080},)\"\n                                            R\"(\"tags\":{)\"\n                                            R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"}},)\"\n                                            R\"({)\"\n                                            R\"(\"traceId\":\"AAAAAAAAAAE=\",)\"\n                                            R\"(\"id\":\"AQAAAAAAAAA=\",)\"\n                                            R\"(\"kind\":\"SERVER\",)\"\n                                            R\"(\"timestamp\":\"DEFAULT_TEST_TIMESTAMP\",)\"\n                                            R\"(\"duration\":\"DEFAULT_TEST_DURATION\",)\"\n                                            R\"(\"localEndpoint\":{)\"\n                                            R\"(\"serviceName\":\"service1\",)\"\n                                            R\"(\"ipv4\":\"AQIDBA==\",)\"\n                                            R\"(\"port\":8080},)\"\n                                            R\"(\"tags\":{)\"\n                                            R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"},)\"\n                                            R\"(\"shared\":true)\"\n                                            \"}]}\"),\n            serializedMessageToJson<zipkin::proto3::ListOfSpans>(buffer5.serialize()));\n\n  SpanBuffer buffer6(envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO, !shared, 2);\n  buffer6.addSpan(createSpan({\"cs\", \"sr\"}, IpType::V4));\n  EXPECT_EQ(withDefaultTimestampAndDuration(\"{\"\n                                            R\"(\"spans\":[{)\"\n                                            R\"(\"traceId\":\"AAAAAAAAAAE=\",)\"\n                                            R\"(\"id\":\"AQAAAAAAAAA=\",)\"\n                                            R\"(\"kind\":\"CLIENT\",)\"\n                                            R\"(\"timestamp\":\"DEFAULT_TEST_TIMESTAMP\",)\"\n                                            R\"(\"duration\":\"DEFAULT_TEST_DURATION\",)\"\n                                            R\"(\"localEndpoint\":{)\"\n                                            R\"(\"serviceName\":\"service1\",)\"\n                                            R\"(\"ipv4\":\"AQIDBA==\",)\"\n                                            R\"(\"port\":8080},)\"\n                                            R\"(\"tags\":{)\"\n                                            R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"}},)\"\n                                            R\"({)\"\n                                            R\"(\"traceId\":\"AAAAAAAAAAE=\",)\"\n                                            R\"(\"id\":\"AQAAAAAAAAA=\",)\"\n                                            R\"(\"kind\":\"SERVER\",)\"\n                                            R\"(\"timestamp\":\"DEFAULT_TEST_TIMESTAMP\",)\"\n                                            R\"(\"duration\":\"DEFAULT_TEST_DURATION\",)\"\n                                            R\"(\"localEndpoint\":{)\"\n                                            R\"(\"serviceName\":\"service1\",)\"\n                                            R\"(\"ipv4\":\"AQIDBA==\",)\"\n                                            R\"(\"port\":8080},)\"\n                                            R\"(\"tags\":{)\"\n                                            R\"(\"response_size\":\"DEFAULT_TEST_DURATION\"})\"\n                                            \"}]}\"),\n            serializedMessageToJson<zipkin::proto3::ListOfSpans>(buffer6.serialize()));\n}\n\nTEST(ZipkinSpanBufferTest, TestSerializeTimestampInTheFuture) {\n  ProtobufWkt::Struct objectWithScientificNotation;\n  auto* objectWithScientificNotationFields = objectWithScientificNotation.mutable_fields();\n  (*objectWithScientificNotationFields)[\"timestamp\"] = ValueUtil::numberValue(\n      DEFAULT_TEST_TIMESTAMP); // the value of DEFAULT_TEST_TIMESTAMP is 1584324295476870.\n  const auto objectWithScientificNotationJson =\n      MessageUtil::getJsonStringFromMessage(objectWithScientificNotation, false, true);\n  // Since we use ValueUtil::numberValue to set the timestamp, we expect to\n  // see the value is rendered with scientific notation (1.58432429547687e+15).\n  EXPECT_EQ(R\"({\"timestamp\":1.58432429547687e+15})\", objectWithScientificNotationJson);\n\n  ProtobufWkt::Struct object;\n  auto* objectFields = object.mutable_fields();\n  Util::Replacements replacements;\n  (*objectFields)[\"timestamp\"] =\n      Util::uint64Value(DEFAULT_TEST_TIMESTAMP, \"timestamp\", replacements);\n  const auto objectJson = MessageUtil::getJsonStringFromMessage(object, false, true);\n  // We still have \"1584324295476870\" from MessageUtil::getJsonStringFromMessage here.\n  EXPECT_EQ(R\"({\"timestamp\":\"1584324295476870\"})\", objectJson);\n  // However, then the replacement correctly replaces \"1584324295476870\" with 1584324295476870\n  // (without quotes).\n  EXPECT_EQ(R\"({\"timestamp\":1584324295476870})\", absl::StrReplaceAll(objectJson, replacements));\n\n  SpanBuffer bufferDeprecatedJsonV1(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, true, 2);\n  bufferDeprecatedJsonV1.addSpan(createSpan({\"cs\"}, IpType::V4));\n  // We do \"HasSubstr\" here since we could not compare the serialized JSON of a ProtobufWkt::Struct\n  // object, since the positions of keys are not consistent between calls.\n  EXPECT_THAT(bufferDeprecatedJsonV1.serialize(), HasSubstr(R\"(\"timestamp\":1584324295476870)\"));\n  EXPECT_THAT(bufferDeprecatedJsonV1.serialize(),\n              Not(HasSubstr(R\"(\"timestamp\":1.58432429547687e+15)\")));\n  EXPECT_THAT(bufferDeprecatedJsonV1.serialize(),\n              Not(HasSubstr(R\"(\"timestamp\":\"1584324295476870\")\")));\n  EXPECT_THAT(bufferDeprecatedJsonV1.serialize(), HasSubstr(R\"(\"duration\":2584324295476870)\"));\n  EXPECT_THAT(bufferDeprecatedJsonV1.serialize(),\n              Not(HasSubstr(R\"(\"duration\":2.584324295476870e+15)\")));\n  EXPECT_THAT(bufferDeprecatedJsonV1.serialize(),\n              Not(HasSubstr(R\"(\"duration\":\"2584324295476870\")\")));\n\n  SpanBuffer bufferJsonV2(\n      envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1, true, 2);\n  bufferJsonV2.addSpan(createSpan({\"cs\"}, IpType::V4));\n  EXPECT_THAT(bufferJsonV2.serialize(), HasSubstr(R\"(\"timestamp\":1584324295476870)\"));\n  EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R\"(\"timestamp\":1.58432429547687e+15)\")));\n  EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R\"(\"timestamp\":\"1584324295476870\")\")));\n  EXPECT_THAT(bufferJsonV2.serialize(), HasSubstr(R\"(\"duration\":2584324295476870)\"));\n  EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R\"(\"duration\":2.584324295476870e+15)\")));\n  EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R\"(\"duration\":\"2584324295476870\")\")));\n}\n\n} // namespace\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/zipkin/span_context_extractor_test.cc",
    "content": "#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/zipkin/span_context.h\"\n#include \"extensions/tracers/zipkin/span_context_extractor.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\nnamespace {\n\nconst std::string trace_id{\"0000000000000001\"};\nconst std::string trace_id_high{\"0000000000000009\"};\nconst std::string span_id{\"0000000000000003\"};\nconst std::string parent_id{\"0000000000000002\"};\n\n} // namespace\n\nTEST(ZipkinSpanContextExtractorTest, Largest) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"b3\", fmt::format(\"{}{}-{}-1-{}\", trace_id_high, trace_id, span_id, parent_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(2, context.first.parentId());\n  EXPECT_TRUE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(9, context.first.traceIdHigh());\n  EXPECT_TRUE(context.first.sampled());\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, WithoutParentDebug) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"b3\", fmt::format(\"{}{}-{}-d\", trace_id_high, trace_id, span_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_TRUE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(9, context.first.traceIdHigh());\n  EXPECT_TRUE(context.first.sampled());\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, MalformedUuid) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"b970dafd-0d95-40aa-95d8-1d8725aebe40\"}};\n  SpanContextExtractor extractor(request_headers);\n  EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                            \"Invalid input: invalid trace id b970dafd-0d95-40\");\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, MiddleOfString) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"b3\", fmt::format(\"{}{}-{},\", trace_id, trace_id, span_id)}};\n  SpanContextExtractor extractor(request_headers);\n  EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                            \"Invalid input: truncated\");\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, DebugOnly) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"d\"}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_FALSE(context.second);\n  EXPECT_EQ(0, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(0, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_FALSE(context.first.sampled());\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, Sampled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"1\"}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_FALSE(context.second);\n  EXPECT_EQ(0, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(0, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_FALSE(context.first.sampled());\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, SampledFalse) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"0\"}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_FALSE(context.second);\n  EXPECT_EQ(0, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(0, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_FALSE(context.first.sampled());\n  EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, IdNotYetSampled128) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"b3\", fmt::format(\"{}{}-{}\", trace_id_high, trace_id, span_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_TRUE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(9, context.first.traceIdHigh());\n  EXPECT_TRUE(context.first.sampled());\n  EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, IdsUnsampled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", fmt::format(\"{}-{}-0\", trace_id, span_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_TRUE(context.first.sampled());\n  EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, ParentUnsampled) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"b3\", fmt::format(\"{}-{}-0-{}\", trace_id, span_id, parent_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(2, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_TRUE(context.first.sampled());\n  EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, ParentDebug) {\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\"b3\", fmt::format(\"{}-{}-d-{}\", trace_id, span_id, parent_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(2, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_TRUE(context.first.sampled());\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, IdsWithDebug) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", fmt::format(\"{}-{}-d\", trace_id, span_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(true);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_TRUE(context.first.sampled());\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, WithoutSampled) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", fmt::format(\"{}-{}\", trace_id, span_id)}};\n  SpanContextExtractor extractor(request_headers);\n  auto context = extractor.extractSpanContext(false);\n  EXPECT_TRUE(context.second);\n  EXPECT_EQ(3, context.first.id());\n  EXPECT_EQ(0, context.first.parentId());\n  EXPECT_FALSE(context.first.is128BitTraceId());\n  EXPECT_EQ(1, context.first.traceId());\n  EXPECT_EQ(0, context.first.traceIdHigh());\n  EXPECT_FALSE(context.first.sampled());\n  EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true}));\n}\n\nTEST(ZipkinSpanContextExtractorTest, TooBig) {\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}{}{}-{}-{}\", trace_id, trace_id, trace_id, span_id, trace_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: too long\");\n    EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, false}));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}{}-{}-1-{}a\", trace_id_high, trace_id, span_id, parent_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: too long\");\n  }\n}\n\nTEST(ZipkinSpanContextExtractorTest, Empty) {\n  Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"\"}};\n  SpanContextExtractor extractor(request_headers);\n  EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                            \"Invalid input: empty\");\n}\n\nTEST(ZipkinSpanContextExtractorTest, InvalidInput) {\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"X-B3-TraceId\", trace_id_high + trace_id.substr(0, 15) + \"!\"}, {\"X-B3-SpanId\", span_id}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              fmt::format(\"Invalid traceid_high {} or tracid {}\", trace_id_high,\n                                          trace_id.substr(0, 15) + \"!\"));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}!{}-{}\", trace_id.substr(0, 15), trace_id, span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(\n        extractor.extractSpanContext(true), ExtractorException,\n        fmt::format(\"Invalid input: invalid trace id high {}!\", trace_id.substr(0, 15)));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}{}!-{}\", trace_id, trace_id.substr(0, 15), span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(\n        extractor.extractSpanContext(true), ExtractorException,\n        fmt::format(\"Invalid input: invalid trace id {}!\", trace_id.substr(0, 15)));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}!-{}\", trace_id.substr(0, 15), span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(\n        extractor.extractSpanContext(true), ExtractorException,\n        fmt::format(\"Invalid input: invalid trace id {}!\", trace_id.substr(0, 15)));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", fmt::format(\"{}!{}\", trace_id, span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: not exists span id\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}!\", trace_id, span_id.substr(0, 15))}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(\n        extractor.extractSpanContext(true), ExtractorException,\n        fmt::format(\"Invalid input: invalid span id {}!\", span_id.substr(0, 15)));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}!0\", trace_id, span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: not exists sampling field\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}-c\", trace_id, span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: invalid sampling flag c\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}-d!{}\", trace_id, span_id, parent_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}-d-{}!\", trace_id, span_id, parent_id.substr(0, 15))}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(\n        extractor.extractSpanContext(true), ExtractorException,\n        fmt::format(\"Invalid input: invalid parent id {}!\", parent_id.substr(0, 15)));\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"-\"}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true}));\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: invalid sampling flag -\");\n  }\n}\n\nTEST(ZipkinSpanContextExtractorTest, Truncated) {\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"-1\"}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"1-\"}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", \"1-\"}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", trace_id.substr(0, 15)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", trace_id}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\"b3\", trace_id + \"-\"}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}\", trace_id.substr(0, 15), span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}\", trace_id, span_id.substr(0, 15))}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}-\", trace_id, span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}-1-\", trace_id, span_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}-1-{}\", trace_id, span_id, parent_id.substr(0, 15))}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\"b3\", fmt::format(\"{}-{}-{}{}\", trace_id, span_id, trace_id, trace_id)}};\n    SpanContextExtractor extractor(request_headers);\n    EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException,\n                              \"Invalid input: truncated\");\n  }\n}\n\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/zipkin/tracer_test.cc",
    "content": "#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"extensions/tracers/zipkin/tracer.h\"\n#include \"extensions/tracers/zipkin/util.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\nnamespace {\n\nclass TestReporterImpl : public Reporter {\npublic:\n  TestReporterImpl(int value) : value_(value) {}\n  void reportSpan(Span&& span) override { reported_spans_.push_back(span); }\n  int getValue() { return value_; }\n  std::vector<Span>& reportedSpans() { return reported_spans_; }\n\nprivate:\n  int value_;\n  std::vector<Span> reported_spans_;\n};\n\nclass ZipkinTracerTest : public testing::Test {\nprotected:\n  Event::SimulatedTimeSystem time_system_;\n};\n\nTEST_F(ZipkinTracerTest, SpanCreation) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:9000\");\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  Tracer tracer(\"my_service_name\", addr, random_generator, false, true, time_system_);\n  SystemTime timestamp = time_system_.systemTime();\n\n  NiceMock<Tracing::MockConfig> config;\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  // ==============\n  // Test the creation of a root span --> CS\n  // ==============\n  ON_CALL(random_generator, random()).WillByDefault(Return(1000));\n  time_system_.advanceTimeWait(std::chrono::milliseconds(1));\n  SpanPtr root_span = tracer.startSpan(config, \"my_span\", timestamp);\n\n  EXPECT_EQ(\"my_span\", root_span->name());\n  EXPECT_NE(0LL, root_span->startTime());\n  EXPECT_NE(0ULL, root_span->traceId());            // trace id must be set\n  EXPECT_FALSE(root_span->isSetTraceIdHigh());      // by default, should be using 64 bit trace id\n  EXPECT_EQ(root_span->traceId(), root_span->id()); // span id and trace id must be the same\n  EXPECT_FALSE(root_span->isSetParentId());         // no parent set\n  // span's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      root_span->timestamp());\n\n  // A CS annotation must have been added\n  EXPECT_EQ(1ULL, root_span->annotations().size());\n  Annotation ann = root_span->annotations()[0];\n  EXPECT_EQ(CLIENT_SEND, ann.value());\n  // annotation's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      ann.timestamp());\n  EXPECT_TRUE(ann.isSetEndpoint());\n  Endpoint endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n\n  // The tracer must have been properly set\n  EXPECT_EQ(dynamic_cast<TracerInterface*>(&tracer), root_span->tracer());\n\n  // Duration is not set at span-creation time\n  EXPECT_FALSE(root_span->isSetDuration());\n\n  // ==============\n  // Test the creation of a shared-context span --> SR\n  // ==============\n\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress));\n\n  SpanContext root_span_context(*root_span);\n  SpanPtr server_side_shared_context_span =\n      tracer.startSpan(config, \"my_span\", timestamp, root_span_context);\n\n  EXPECT_NE(0LL, server_side_shared_context_span->startTime());\n\n  EXPECT_EQ(\"my_span\", server_side_shared_context_span->name());\n\n  // trace id must be the same in the CS and SR sides\n  EXPECT_EQ(root_span->traceId(), server_side_shared_context_span->traceId());\n\n  // span id must be the same in the CS and SR sides\n  EXPECT_EQ(root_span->id(), server_side_shared_context_span->id());\n\n  // The parent should be the same as in the CS side (none in this case)\n  EXPECT_FALSE(server_side_shared_context_span->isSetParentId());\n\n  // span timestamp should not be set (it was set in the CS side)\n  EXPECT_FALSE(server_side_shared_context_span->isSetTimestamp());\n\n  // An SR annotation must have been added\n  EXPECT_EQ(1ULL, server_side_shared_context_span->annotations().size());\n  ann = server_side_shared_context_span->annotations()[0];\n  EXPECT_EQ(SERVER_RECV, ann.value());\n  // annotation's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      ann.timestamp());\n  EXPECT_TRUE(ann.isSetEndpoint());\n  endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n\n  // The tracer must have been properly set\n  EXPECT_EQ(dynamic_cast<TracerInterface*>(&tracer), server_side_shared_context_span->tracer());\n\n  // Duration is not set at span-creation time\n  EXPECT_FALSE(server_side_shared_context_span->isSetDuration());\n\n  // ==============\n  // Test the creation of a child span --> CS\n  // ==============\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  ON_CALL(random_generator, random()).WillByDefault(Return(2000));\n  SpanContext server_side_context(*server_side_shared_context_span);\n  SpanPtr child_span = tracer.startSpan(config, \"my_child_span\", timestamp, server_side_context);\n\n  EXPECT_EQ(\"my_child_span\", child_span->name());\n  EXPECT_NE(0LL, child_span->startTime());\n\n  // trace id must be retained\n  EXPECT_NE(0ULL, child_span->traceId());\n  EXPECT_EQ(server_side_shared_context_span->traceId(), child_span->traceId());\n\n  // span id and trace id must NOT be the same\n  EXPECT_NE(child_span->traceId(), child_span->id());\n\n  // parent should be the previous span\n  EXPECT_TRUE(child_span->isSetParentId());\n  EXPECT_EQ(server_side_shared_context_span->id(), child_span->parentId());\n\n  // span's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      child_span->timestamp());\n\n  // A CS annotation must have been added\n  EXPECT_EQ(1ULL, child_span->annotations().size());\n  ann = child_span->annotations()[0];\n  EXPECT_EQ(CLIENT_SEND, ann.value());\n  // Annotation's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      ann.timestamp());\n  EXPECT_TRUE(ann.isSetEndpoint());\n  endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n\n  // The tracer must have been properly set\n  EXPECT_EQ(dynamic_cast<TracerInterface*>(&tracer), child_span->tracer());\n\n  // Duration is not set at span-creation time\n  EXPECT_FALSE(child_span->isSetDuration());\n\n  // ==============\n  // Test the creation of a shared-context span with a parent --> SR\n  // ==============\n\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress));\n  TestRandomGenerator generator;\n  const uint64_t generated_parent_id = generator.random();\n  SpanContext modified_root_span_context(root_span_context.traceIdHigh(),\n                                         root_span_context.traceId(), root_span_context.id(),\n                                         generated_parent_id, root_span_context.sampled());\n  SpanPtr new_shared_context_span =\n      tracer.startSpan(config, \"new_shared_context_span\", timestamp, modified_root_span_context);\n  EXPECT_NE(0LL, new_shared_context_span->startTime());\n\n  EXPECT_EQ(\"new_shared_context_span\", new_shared_context_span->name());\n\n  // trace id must be the same in the CS and SR sides\n  EXPECT_EQ(root_span->traceId(), new_shared_context_span->traceId());\n\n  // span id must be the same in the CS and SR sides\n  EXPECT_EQ(root_span->id(), new_shared_context_span->id());\n\n  // The parent should be the same as in the CS side\n  EXPECT_TRUE(new_shared_context_span->isSetParentId());\n  EXPECT_EQ(modified_root_span_context.parentId(), new_shared_context_span->parentId());\n\n  // span timestamp should not be set (it was set in the CS side)\n  EXPECT_FALSE(new_shared_context_span->isSetTimestamp());\n\n  // An SR annotation must have been added\n  EXPECT_EQ(1ULL, new_shared_context_span->annotations().size());\n  ann = new_shared_context_span->annotations()[0];\n  EXPECT_EQ(SERVER_RECV, ann.value());\n  // annotation's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      ann.timestamp());\n  EXPECT_TRUE(ann.isSetEndpoint());\n  endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n\n  // The tracer must have been properly set\n  EXPECT_EQ(dynamic_cast<TracerInterface*>(&tracer), new_shared_context_span->tracer());\n\n  // Duration is not set at span-creation time\n  EXPECT_FALSE(new_shared_context_span->isSetDuration());\n}\n\nTEST_F(ZipkinTracerTest, FinishSpan) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:9000\");\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  Tracer tracer(\"my_service_name\", addr, random_generator, false, true, time_system_);\n  SystemTime timestamp = time_system_.systemTime();\n\n  // ==============\n  // Test finishing a span containing a CS annotation\n  // ==============\n\n  NiceMock<Tracing::MockConfig> config;\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  // Creates a root-span with a CS annotation\n  SpanPtr span = tracer.startSpan(config, \"my_span\", timestamp);\n  span->setSampled(true);\n\n  // Finishing a root span with a CS annotation must add a CR annotation\n  span->finish();\n  EXPECT_EQ(2ULL, span->annotations().size());\n\n  // Check the CS annotation added at span-creation time\n  Annotation ann = span->annotations()[0];\n  EXPECT_EQ(CLIENT_SEND, ann.value());\n\n  // Annotation's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      ann.timestamp());\n  EXPECT_TRUE(ann.isSetEndpoint());\n  Endpoint endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n\n  // Check the CR annotation added when ending the span\n  ann = span->annotations()[1];\n  EXPECT_EQ(CLIENT_RECV, ann.value());\n  EXPECT_NE(0ULL, ann.timestamp()); // annotation's timestamp must be set\n  EXPECT_TRUE(ann.isSetEndpoint());\n  endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n\n  // ==============\n  // Test finishing a span containing an SR annotation\n  // ==============\n\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress));\n\n  SpanContext context(*span);\n  SpanPtr server_side = tracer.startSpan(config, \"my_span\", timestamp, context);\n\n  // Associate a reporter with the tracer\n  TestReporterImpl* reporter_object = new TestReporterImpl(135);\n  ReporterPtr reporter_ptr(reporter_object);\n  tracer.setReporter(std::move(reporter_ptr));\n\n  // Finishing a server-side span with an SR annotation must add an SS annotation\n  server_side->finish();\n  EXPECT_EQ(2ULL, server_side->annotations().size());\n\n  // Test if the reporter's reportSpan method was actually called upon finishing the span\n  EXPECT_EQ(1ULL, reporter_object->reportedSpans().size());\n\n  // Check the SR annotation added at span-creation time\n  ann = server_side->annotations()[0];\n  EXPECT_EQ(SERVER_RECV, ann.value());\n  // Annotation's timestamp must be set\n  EXPECT_EQ(\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count(),\n      ann.timestamp());\n  EXPECT_TRUE(ann.isSetEndpoint());\n  endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n\n  // Check the SS annotation added when ending the span\n  ann = server_side->annotations()[1];\n  EXPECT_EQ(SERVER_SEND, ann.value());\n  EXPECT_NE(0ULL, ann.timestamp()); // annotation's timestamp must be set\n  EXPECT_TRUE(ann.isSetEndpoint());\n  endpoint = ann.endpoint();\n  EXPECT_EQ(\"my_service_name\", endpoint.serviceName());\n}\n\nTEST_F(ZipkinTracerTest, FinishNotSampledSpan) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:9000\");\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  Tracer tracer(\"my_service_name\", addr, random_generator, false, true, time_system_);\n  SystemTime timestamp = time_system_.systemTime();\n\n  // ==============\n  // Test finishing a span that is marked as not sampled\n  // ==============\n\n  NiceMock<Tracing::MockConfig> config;\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  // Associate a reporter with the tracer\n  TestReporterImpl* reporter_object = new TestReporterImpl(135);\n  ReporterPtr reporter_ptr(reporter_object);\n  tracer.setReporter(std::move(reporter_ptr));\n\n  // Creates a root-span with a CS annotation\n  SpanPtr span = tracer.startSpan(config, \"my_span\", timestamp);\n  span->setSampled(false);\n  span->finish();\n\n  // Test if the reporter's reportSpan method was NOT called upon finishing the span\n  EXPECT_EQ(0ULL, reporter_object->reportedSpans().size());\n}\n\nTEST_F(ZipkinTracerTest, SpanSampledPropagatedToChild) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:9000\");\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  Tracer tracer(\"my_service_name\", addr, random_generator, false, true, time_system_);\n  SystemTime timestamp = time_system_.systemTime();\n\n  NiceMock<Tracing::MockConfig> config;\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  // Create parent span\n  SpanPtr parent_span = tracer.startSpan(config, \"parent_span\", timestamp);\n  parent_span->setSampled(true);\n\n  SpanContext parent_context1(*parent_span);\n  SpanPtr child_span1 = tracer.startSpan(config, \"child_span 1\", timestamp, parent_context1);\n\n  // Test that child span sampled flag is true\n  EXPECT_TRUE(child_span1->sampled());\n\n  parent_span->setSampled(false);\n  SpanContext parent_context2(*parent_span);\n  SpanPtr child_span2 = tracer.startSpan(config, \"child_span 2\", timestamp, parent_context2);\n\n  // Test that sampled flag is false\n  EXPECT_FALSE(child_span2->sampled());\n}\n\nTEST_F(ZipkinTracerTest, RootSpan128bitTraceId) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:9000\");\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  Tracer tracer(\"my_service_name\", addr, random_generator, true, true, time_system_);\n  SystemTime timestamp = time_system_.systemTime();\n\n  NiceMock<Tracing::MockConfig> config;\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  // Create root span\n  SpanPtr root_span = tracer.startSpan(config, \"root_span\", timestamp);\n\n  // Test that high 64 bit trace id is set\n  EXPECT_TRUE(root_span->isSetTraceIdHigh());\n}\n\n// This test checks that when configured to use shared span context, a child span\n// is created with the same id as the parent span.\nTEST_F(ZipkinTracerTest, SharedSpanContext) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:9000\");\n  NiceMock<Random::MockRandomGenerator> random_generator;\n\n  const bool shared_span_context = true;\n  Tracer tracer(\"my_service_name\", addr, random_generator, false, shared_span_context,\n                time_system_);\n  const SystemTime timestamp = time_system_.systemTime();\n\n  NiceMock<Tracing::MockConfig> config;\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  // Create parent span\n  SpanPtr parent_span = tracer.startSpan(config, \"parent_span\", timestamp);\n  SpanContext parent_context(*parent_span);\n\n  // An CS annotation must have been added\n  EXPECT_EQ(1ULL, parent_span->annotations().size());\n  Annotation ann = parent_span->annotations()[0];\n  EXPECT_EQ(CLIENT_SEND, ann.value());\n\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress));\n\n  SpanPtr child_span = tracer.startSpan(config, \"child_span\", timestamp, parent_context);\n\n  EXPECT_EQ(parent_span->id(), child_span->id());\n\n  // An SR annotation must have been added\n  EXPECT_EQ(1ULL, child_span->annotations().size());\n  ann = child_span->annotations()[0];\n  EXPECT_EQ(SERVER_RECV, ann.value());\n}\n\n// This test checks that when configured to NOT use shared span context, a child span\n// is created with a different id to the parent span.\nTEST_F(ZipkinTracerTest, NotSharedSpanContext) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:9000\");\n  NiceMock<Random::MockRandomGenerator> random_generator;\n\n  const bool shared_span_context = false;\n  Tracer tracer(\"my_service_name\", addr, random_generator, false, shared_span_context,\n                time_system_);\n  const SystemTime timestamp = time_system_.systemTime();\n\n  NiceMock<Tracing::MockConfig> config;\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress));\n\n  // Create parent span\n  SpanPtr parent_span = tracer.startSpan(config, \"parent_span\", timestamp);\n  SpanContext parent_context(*parent_span);\n\n  // An CS annotation must have been added\n  EXPECT_EQ(1ULL, parent_span->annotations().size());\n  Annotation ann = parent_span->annotations()[0];\n  EXPECT_EQ(CLIENT_SEND, ann.value());\n\n  ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress));\n\n  SpanPtr child_span = tracer.startSpan(config, \"child_span\", timestamp, parent_context);\n\n  EXPECT_EQ(parent_span->id(), child_span->parentId());\n\n  // An SR annotation must have been added\n  EXPECT_EQ(1ULL, child_span->annotations().size());\n  ann = child_span->annotations()[0];\n  EXPECT_EQ(SERVER_RECV, ann.value());\n}\n\n} // namespace\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/zipkin/zipkin_core_types_test.cc",
    "content": "#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n#include \"extensions/tracers/zipkin/zipkin_core_types.h\"\n\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\nnamespace {\n\nTEST(ZipkinCoreTypesEndpointTest, defaultConstructor) {\n  Endpoint ep;\n  Util::Replacements replacements;\n\n  EXPECT_EQ(\"\", ep.serviceName());\n  EXPECT_TRUE(\n      TestUtility::protoEqual(TestUtility::jsonToStruct(R\"({\"ipv4\":\"\",\"port\":0,\"serviceName\":\"\"})\"),\n                              ep.toStruct(replacements)));\n\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddress(\"127.0.0.1\");\n  ep.setAddress(addr);\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(R\"({\"ipv4\":\"127.0.0.1\",\"port\":0,\"serviceName\":\"\"})\"),\n      ep.toStruct(replacements)));\n\n  addr = Network::Utility::parseInternetAddressAndPort(\n      \"[2001:0db8:85a3:0000:0000:8a2e:0370:4444]:7334\");\n  ep.setAddress(addr);\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"ipv6\":\"2001:db8:85a3::8a2e:370:4444\",\"port\":7334,\"serviceName\":\"\"})\"),\n      ep.toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n\n  ep.setServiceName(\"my_service\");\n  EXPECT_EQ(\"my_service\", ep.serviceName());\n\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"ipv6\":\"2001:db8:85a3::8a2e:370:4444\",\"port\":7334,\"serviceName\":\"my_service\"})\"),\n      ep.toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n}\n\nTEST(ZipkinCoreTypesEndpointTest, customConstructor) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep(std::string(\"my_service\"), addr);\n  Util::Replacements replacements;\n\n  EXPECT_EQ(\"my_service\", ep.serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(R\"({\"ipv4\":\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"})\"),\n      ep.toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n\n  addr = Network::Utility::parseInternetAddressAndPort(\n      \"[2001:0db8:85a3:0000:0000:8a2e:0370:4444]:7334\");\n  ep.setAddress(addr);\n\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"ipv6\":\"2001:db8:85a3::8a2e:370:4444\",\"port\":7334,\"serviceName\":\"my_service\"})\"),\n      ep.toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n}\n\nTEST(ZipkinCoreTypesEndpointTest, copyOperator) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep1(std::string(\"my_service\"), addr);\n  Endpoint& ep2(ep1);\n  Util::Replacements replacements;\n\n  EXPECT_EQ(\"my_service\", ep1.serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(R\"({\"ipv4\":\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"})\"),\n      ep1.toStruct(replacements)));\n\n  EXPECT_EQ(ep1.serviceName(), ep2.serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(ep1.toStruct(replacements), ep2.toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n}\n\nTEST(ZipkinCoreTypesEndpointTest, assignmentOperator) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep1(std::string(\"my_service\"), addr);\n  Endpoint& ep2 = ep1;\n  Util::Replacements replacements;\n\n  EXPECT_EQ(\"my_service\", ep1.serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(R\"({\"ipv4\":\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"})\"),\n      ep1.toStruct(replacements)));\n\n  EXPECT_EQ(ep1.serviceName(), ep2.serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(ep1.toStruct(replacements), ep2.toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n}\n\nTEST(ZipkinCoreTypesAnnotationTest, defaultConstructor) {\n  Annotation ann;\n  Util::Replacements replacements;\n\n  EXPECT_EQ(0ULL, ann.timestamp());\n  EXPECT_EQ(\"\", ann.value());\n  EXPECT_FALSE(ann.isSetEndpoint());\n\n  Event::SimulatedTimeSystem test_time;\n  uint64_t timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                           test_time.timeSystem().systemTime().time_since_epoch())\n                           .count();\n  ann.setTimestamp(timestamp);\n  EXPECT_EQ(timestamp, ann.timestamp());\n\n  ann.setValue(CLIENT_SEND);\n  EXPECT_EQ(CLIENT_SEND, ann.value());\n\n  std::string expected_json = R\"({\"timestamp\":\")\" + std::to_string(timestamp) + R\"(\")\" +\n                              R\"(,\"value\":\")\" + CLIENT_SEND + R\"(\"})\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n  EXPECT_EQ(1, replacements.size());\n\n  replacements.clear();\n  // Test the copy-semantics flavor of setEndpoint\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep(std::string(\"my_service\"), addr);\n  ann.setEndpoint(ep);\n  EXPECT_TRUE(ann.isSetEndpoint());\n  EXPECT_EQ(\"my_service\", ann.endpoint().serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(R\"({\"ipv4\":\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"})\"),\n      (const_cast<Endpoint&>(ann.endpoint())).toStruct(replacements)));\n\n  expected_json = R\"({\"timestamp\":\")\" + std::to_string(timestamp) + R\"(\")\" + R\"(,\"value\":\")\" +\n                  CLIENT_SEND +\n                  R\"(\",\"endpoint\":{\"ipv4\":)\"\n                  R\"(\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"}})\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n\n  EXPECT_EQ(1, replacements.size());\n\n  replacements.clear();\n  // Test the move-semantics flavor of setEndpoint\n  addr = Network::Utility::parseInternetAddressAndPort(\"192.168.1.1:5555\");\n  Endpoint ep2(std::string(\"my_service_2\"), addr);\n  ann.setEndpoint(std::move(ep2));\n  EXPECT_TRUE(ann.isSetEndpoint());\n  EXPECT_EQ(\"my_service_2\", ann.endpoint().serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"ipv4\":\"192.168.1.1\",\"port\":5555,\"serviceName\":\"my_service_2\"})\"),\n      (const_cast<Endpoint&>(ann.endpoint())).toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n\n  replacements.clear();\n  expected_json = R\"({\"timestamp\":\")\" + std::to_string(timestamp) + R\"(\")\" + R\"(,\"value\":\")\" +\n                  CLIENT_SEND +\n                  R\"(\",\"endpoint\":{\"ipv4\":\"192.168.1.1\",)\"\n                  R\"(\"port\":5555,\"serviceName\":\"my_service_2\"}})\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n  EXPECT_EQ(1, replacements.size());\n\n  replacements.clear();\n  // Test change endpoint service name.\n  ann.changeEndpointServiceName(\"NEW_SERVICE_NAME\");\n  EXPECT_EQ(\"NEW_SERVICE_NAME\", ann.endpoint().serviceName());\n  expected_json = R\"({\"timestamp\":\")\" + std::to_string(timestamp) + R\"(\")\" + R\"(,\"value\":\")\" +\n                  CLIENT_SEND +\n                  R\"(\",\"endpoint\":{\"ipv4\":\"192.168.1.1\",)\"\n                  R\"(\"port\":5555,\"serviceName\":\"NEW_SERVICE_NAME\"}})\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n  EXPECT_EQ(1, replacements.size());\n}\n\nTEST(ZipkinCoreTypesAnnotationTest, customConstructor) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep(std::string(\"my_service\"), addr);\n  Event::SimulatedTimeSystem test_time;\n  uint64_t timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                           test_time.timeSystem().systemTime().time_since_epoch())\n                           .count();\n  Annotation ann(timestamp, CLIENT_SEND, ep);\n  Util::Replacements replacements;\n\n  EXPECT_EQ(timestamp, ann.timestamp());\n  EXPECT_EQ(CLIENT_SEND, ann.value());\n  EXPECT_TRUE(ann.isSetEndpoint());\n\n  EXPECT_EQ(\"my_service\", ann.endpoint().serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(R\"({\"ipv4\":\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"})\"),\n      (const_cast<Endpoint&>(ann.endpoint())).toStruct(replacements)));\n  EXPECT_TRUE(replacements.empty());\n\n  std::string expected_json = R\"({\"timestamp\":\")\" + std::to_string(timestamp) + R\"(\")\" +\n                              R\"(,\"value\":\")\" + CLIENT_SEND +\n                              R\"(\",\"endpoint\":{\"ipv4\":\"127.0.0.1\",)\"\n                              R\"(\"port\":3306,\"serviceName\":\"my_service\"}})\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n  EXPECT_EQ(1, replacements.size());\n}\n\nTEST(ZipkinCoreTypesAnnotationTest, copyConstructor) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep(std::string(\"my_service\"), addr);\n  Event::SimulatedTimeSystem test_time;\n  uint64_t timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                           test_time.timeSystem().systemTime().time_since_epoch())\n                           .count();\n  Annotation ann(timestamp, CLIENT_SEND, ep);\n  Annotation& ann2(ann);\n  Util::Replacements replacements;\n\n  EXPECT_EQ(ann.value(), ann2.value());\n  EXPECT_EQ(ann.timestamp(), ann2.timestamp());\n  EXPECT_EQ(ann.isSetEndpoint(), ann2.isSetEndpoint());\n  EXPECT_TRUE(TestUtility::protoEqual(ann.toStruct(replacements), ann2.toStruct(replacements)));\n  EXPECT_EQ(ann.endpoint().serviceName(), ann2.endpoint().serviceName());\n}\n\nTEST(ZipkinCoreTypesAnnotationTest, assignmentOperator) {\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep(std::string(\"my_service\"), addr);\n  Event::SimulatedTimeSystem test_time;\n  uint64_t timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                           test_time.timeSystem().systemTime().time_since_epoch())\n                           .count();\n  Annotation ann(timestamp, CLIENT_SEND, ep);\n  Annotation& ann2 = ann;\n  Util::Replacements replacements;\n\n  EXPECT_EQ(ann.value(), ann2.value());\n  EXPECT_EQ(ann.timestamp(), ann2.timestamp());\n  EXPECT_EQ(ann.isSetEndpoint(), ann2.isSetEndpoint());\n  EXPECT_TRUE(TestUtility::protoEqual(ann.toStruct(replacements), ann2.toStruct(replacements)));\n  EXPECT_EQ(ann.endpoint().serviceName(), ann2.endpoint().serviceName());\n}\n\nTEST(ZipkinCoreTypesBinaryAnnotationTest, defaultConstructor) {\n  BinaryAnnotation ann;\n  Util::Replacements replacements;\n\n  EXPECT_EQ(\"\", ann.key());\n  EXPECT_EQ(\"\", ann.value());\n  EXPECT_FALSE(ann.isSetEndpoint());\n  EXPECT_EQ(AnnotationType::STRING, ann.annotationType());\n\n  ann.setKey(\"key\");\n  EXPECT_EQ(\"key\", ann.key());\n\n  ann.setValue(\"value\");\n  EXPECT_EQ(\"value\", ann.value());\n\n  std::string expected_json = R\"({\"key\":\"key\",\"value\":\"value\"})\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n\n  // Test the copy-semantics flavor of setEndpoint\n\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"127.0.0.1:3306\");\n  Endpoint ep(std::string(\"my_service\"), addr);\n  ann.setEndpoint(ep);\n  EXPECT_TRUE(ann.isSetEndpoint());\n  EXPECT_EQ(\"my_service\", ann.endpoint().serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(R\"({\"ipv4\":\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"})\"),\n      (const_cast<Endpoint&>(ann.endpoint())).toStruct(replacements)));\n\n  expected_json = \"{\"\n                  R\"(\"key\":\"key\",\"value\":\"value\",)\"\n                  R\"(\"endpoint\":)\"\n                  R\"({\"ipv4\":\"127.0.0.1\",\"port\":3306,\"serviceName\":\"my_service\"})\"\n                  \"}\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n\n  // Test the move-semantics flavor of setEndpoint\n  addr = Network::Utility::parseInternetAddressAndPort(\"192.168.1.1:5555\");\n  Endpoint ep2(std::string(\"my_service_2\"), addr);\n  ann.setEndpoint(ep2);\n  EXPECT_TRUE(ann.isSetEndpoint());\n  EXPECT_EQ(\"my_service_2\", ann.endpoint().serviceName());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"ipv4\":\"192.168.1.1\",\"port\":5555,\"serviceName\":\"my_service_2\"})\"),\n      (const_cast<Endpoint&>(ann.endpoint())).toStruct(replacements)));\n  expected_json = \"{\"\n                  R\"(\"key\":\"key\",\"value\":\"value\",)\"\n                  R\"(\"endpoint\":)\"\n                  R\"({\"ipv4\":\"192.168.1.1\",\"port\":5555,\"serviceName\":\"my_service_2\"})\"\n                  \"}\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n}\n\nTEST(ZipkinCoreTypesBinaryAnnotationTest, customConstructor) {\n  BinaryAnnotation ann(\"key\", \"value\");\n  Util::Replacements replacements;\n\n  EXPECT_EQ(\"key\", ann.key());\n  EXPECT_EQ(\"value\", ann.value());\n  EXPECT_FALSE(ann.isSetEndpoint());\n  EXPECT_EQ(AnnotationType::STRING, ann.annotationType());\n  std::string expected_json = R\"({\"key\":\"key\",\"value\":\"value\"})\";\n  EXPECT_TRUE(TestUtility::protoEqual(TestUtility::jsonToStruct(expected_json),\n                                      ann.toStruct(replacements)));\n}\n\nTEST(ZipkinCoreTypesBinaryAnnotationTest, copyConstructor) {\n  BinaryAnnotation ann(\"key\", \"value\");\n  BinaryAnnotation& ann2(ann);\n  Util::Replacements replacements;\n\n  EXPECT_EQ(ann.value(), ann2.value());\n  EXPECT_EQ(ann.key(), ann2.key());\n  EXPECT_EQ(ann.isSetEndpoint(), ann2.isSetEndpoint());\n  EXPECT_TRUE(TestUtility::protoEqual(ann.toStruct(replacements), ann2.toStruct(replacements)));\n  EXPECT_EQ(ann.annotationType(), ann2.annotationType());\n}\n\nTEST(ZipkinCoreTypesBinaryAnnotationTest, assignmentOperator) {\n  BinaryAnnotation ann(\"key\", \"value\");\n  BinaryAnnotation& ann2 = ann;\n  Util::Replacements replacements;\n\n  EXPECT_EQ(ann.value(), ann2.value());\n  EXPECT_EQ(ann.key(), ann2.key());\n  EXPECT_EQ(ann.isSetEndpoint(), ann2.isSetEndpoint());\n  EXPECT_TRUE(TestUtility::protoEqual(ann.toStruct(replacements), ann2.toStruct(replacements)));\n  EXPECT_EQ(ann.annotationType(), ann2.annotationType());\n}\n\nTEST(ZipkinCoreTypesSpanTest, defaultConstructor) {\n  Event::SimulatedTimeSystem test_time;\n  Span span(test_time.timeSystem());\n  Util::Replacements replacements;\n\n  EXPECT_EQ(0ULL, span.id());\n  EXPECT_EQ(0ULL, span.traceId());\n  EXPECT_EQ(\"\", span.name());\n  EXPECT_EQ(0ULL, span.annotations().size());\n  EXPECT_EQ(0ULL, span.binaryAnnotations().size());\n  EXPECT_EQ(\"0000000000000000\", span.idAsHexString());\n  EXPECT_EQ(\"0000000000000000\", span.parentIdAsHexString());\n  EXPECT_EQ(\"0000000000000000\", span.traceIdAsHexString());\n  EXPECT_EQ(0LL, span.startTime());\n  EXPECT_FALSE(span.debug());\n  EXPECT_FALSE(span.isSetDuration());\n  EXPECT_FALSE(span.isSetParentId());\n  EXPECT_FALSE(span.isSetTimestamp());\n  EXPECT_FALSE(span.isSetTraceIdHigh());\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"traceId\":\"0000000000000000\",\"name\":\"\",\"id\":\"0000000000000000\"})\"),\n      span.toStruct(replacements)));\n\n  uint64_t id = Util::generateRandom64(test_time.timeSystem());\n  std::string id_hex = Hex::uint64ToHex(id);\n  span.setId(id);\n  EXPECT_EQ(id, span.id());\n  EXPECT_EQ(id_hex, span.idAsHexString());\n\n  id = Util::generateRandom64(test_time.timeSystem());\n  id_hex = Hex::uint64ToHex(id);\n  span.setParentId(id);\n  EXPECT_EQ(id, span.parentId());\n  EXPECT_EQ(id_hex, span.parentIdAsHexString());\n  EXPECT_TRUE(span.isSetParentId());\n\n  id = Util::generateRandom64(test_time.timeSystem());\n  id_hex = Hex::uint64ToHex(id);\n  span.setTraceId(id);\n  EXPECT_EQ(id, span.traceId());\n  EXPECT_EQ(id_hex, span.traceIdAsHexString());\n\n  id = Util::generateRandom64(test_time.timeSystem());\n  id_hex = Hex::uint64ToHex(id);\n  span.setTraceIdHigh(id);\n  EXPECT_EQ(id, span.traceIdHigh());\n  EXPECT_TRUE(span.isSetTraceIdHigh());\n\n  int64_t timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                          test_time.timeSystem().systemTime().time_since_epoch())\n                          .count();\n  span.setTimestamp(timestamp);\n  EXPECT_EQ(timestamp, span.timestamp());\n  EXPECT_TRUE(span.isSetTimestamp());\n\n  int64_t start_time = std::chrono::duration_cast<std::chrono::microseconds>(\n                           test_time.timeSystem().monotonicTime().time_since_epoch())\n                           .count();\n  span.setStartTime(start_time);\n  EXPECT_EQ(start_time, span.startTime());\n\n  span.setDuration(3000LL);\n  EXPECT_EQ(3000LL, span.duration());\n  EXPECT_TRUE(span.isSetDuration());\n\n  span.setName(\"span_name\");\n  EXPECT_EQ(\"span_name\", span.name());\n\n  span.setDebug();\n  EXPECT_TRUE(span.debug());\n\n  Endpoint endpoint;\n  Annotation ann;\n  BinaryAnnotation bann;\n  std::vector<Annotation> annotations;\n  std::vector<BinaryAnnotation> binary_annotations;\n\n  endpoint.setServiceName(\"my_service_name\");\n  Network::Address::InstanceConstSharedPtr addr =\n      Network::Utility::parseInternetAddressAndPort(\"192.168.1.2:3306\");\n  endpoint.setAddress(addr);\n\n  ann.setValue(CLIENT_SEND);\n  ann.setTimestamp(timestamp);\n  ann.setEndpoint(endpoint);\n\n  annotations.push_back(ann);\n  span.setAnnotations(annotations);\n  EXPECT_EQ(1ULL, span.annotations().size());\n\n  bann.setKey(LOCAL_COMPONENT);\n  bann.setValue(\"my_component_name\");\n  bann.setEndpoint(endpoint);\n\n  binary_annotations.push_back(bann);\n  span.setBinaryAnnotations(binary_annotations);\n  EXPECT_EQ(1ULL, span.binaryAnnotations().size());\n\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"traceId\":\")\" + span.traceIdAsHexString() + R\"(\",\"name\":\"span_name\",\"id\":\")\" +\n          span.idAsHexString() + R\"(\",\"parentId\":\")\" + span.parentIdAsHexString() +\n          R\"(\",\"timestamp\":\")\" + std::to_string(span.timestamp()) +\n          R\"(\")\"\n          R\"(,\"duration\":\"3000\",)\"\n          R\"(\"annotations\":[)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(span.timestamp()) +\n          R\"(\")\"\n          R\"(,\"value\":\"cs\",\"endpoint\":)\"\n          R\"({\"ipv4\":\"192.168.1.2\",\"port\":3306,\"serviceName\":\"my_service_name\"}}],)\"\n          R\"(\"binaryAnnotations\":[{\"key\":\"lc\",\"value\":\"my_component_name\",\"endpoint\":)\"\n          R\"({\"ipv4\":\"192.168.1.2\",\"port\":3306,\"serviceName\":\"my_service_name\"}}]})\"),\n      span.toStruct(replacements)));\n  EXPECT_EQ(3, replacements.size());\n\n  // Test the copy-semantics flavor of addAnnotation and addBinaryAnnotation\n\n  ann.setValue(SERVER_SEND);\n  span.addAnnotation(ann);\n  bann.setKey(\"http.return_code\");\n  bann.setValue(\"200\");\n  span.addBinaryAnnotation(bann);\n\n  EXPECT_EQ(2ULL, span.annotations().size());\n  EXPECT_EQ(2ULL, span.binaryAnnotations().size());\n\n  // Test the move-semantics flavor of addAnnotation and addBinaryAnnotation\n\n  ann.setValue(SERVER_RECV);\n  Annotation ann_copy(ann);\n  span.addAnnotation(std::move(ann));\n  bann.setKey(\"http.return_code\");\n  bann.setValue(\"400\");\n  span.addBinaryAnnotation(std::move(bann));\n\n  EXPECT_EQ(3ULL, span.annotations().size());\n  EXPECT_EQ(3ULL, span.binaryAnnotations().size());\n\n  replacements.clear();\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"traceId\":\")\" + span.traceIdAsHexString() + R\"(\",\"name\":\"span_name\",\"id\":\")\" +\n          span.idAsHexString() + R\"(\",\"parentId\":\")\" + span.parentIdAsHexString() +\n          R\"(\",\"timestamp\":\")\" + std::to_string(span.timestamp()) +\n          R\"(\")\"\n          R\"(,\"duration\":\"3000\",)\"\n          R\"(\"annotations\":[)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(timestamp) +\n          R\"(\")\"\n          R\"(,\"value\":\"cs\",\"endpoint\":)\"\n          R\"({\"ipv4\":\"192.168.1.2\",\"port\":3306,\"serviceName\":\"my_service_name\"}},)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(timestamp) +\n          R\"(\")\"\n          R\"(,\"value\":\"ss\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}},)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(timestamp) +\n          R\"(\")\"\n          R\"(,\"value\":\"sr\",\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}}],)\"\n          R\"(\"binaryAnnotations\":[{\"key\":\"lc\",\"value\":\"my_component_name\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}},)\"\n          R\"({\"key\":\"http.return_code\",\"value\":\"200\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}},)\"\n          R\"({\"key\":\"http.return_code\",\"value\":\"400\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}}]})\"),\n      span.toStruct(replacements)));\n  EXPECT_EQ(5, replacements.size());\n\n  // Test setSourceServiceName and setDestinationServiceName\n\n  ann_copy.setValue(CLIENT_RECV);\n  span.addAnnotation(ann_copy);\n  span.setServiceName(\"NEW_SERVICE_NAME\");\n  replacements.clear();\n  EXPECT_TRUE(TestUtility::protoEqual(\n      TestUtility::jsonToStruct(\n          R\"({\"traceId\":\")\" + span.traceIdAsHexString() + R\"(\",\"name\":\"span_name\",\"id\":\")\" +\n          span.idAsHexString() + R\"(\",\"parentId\":\")\" + span.parentIdAsHexString() +\n          R\"(\",\"timestamp\":\")\" + std::to_string(span.timestamp()) +\n          R\"(\")\"\n          R\"(,\"duration\":\"3000\",)\"\n          R\"(\"annotations\":[)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(timestamp) +\n          R\"(\")\"\n          R\"(,\"value\":\"cs\",\"endpoint\":)\"\n          R\"({\"ipv4\":\"192.168.1.2\",\"port\":3306,\"serviceName\":\"NEW_SERVICE_NAME\"}},)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(timestamp) +\n          R\"(\")\"\n          R\"(,\"value\":\"ss\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"NEW_SERVICE_NAME\"}},)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(timestamp) +\n          R\"(\")\"\n          R\"(,\"value\":\"sr\",\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"NEW_SERVICE_NAME\"}},)\"\n          R\"({\"timestamp\":\")\" +\n          std::to_string(timestamp) +\n          R\"(\")\"\n          R\"(,\"value\":\"cr\",\"endpoint\":)\"\n          R\"({\"ipv4\":\"192.168.1.2\",\"port\":3306,\"serviceName\":\"NEW_SERVICE_NAME\"}}],)\"\n          R\"(\"binaryAnnotations\":[{\"key\":\"lc\",\"value\":\"my_component_name\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}},)\"\n          R\"({\"key\":\"http.return_code\",\"value\":\"200\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}},)\"\n          R\"({\"key\":\"http.return_code\",\"value\":\"400\",)\"\n          R\"(\"endpoint\":{\"ipv4\":\"192.168.1.2\",\"port\":3306,)\"\n          R\"(\"serviceName\":\"my_service_name\"}}]})\"),\n      span.toStruct(replacements)));\n  EXPECT_EQ(6, replacements.size());\n}\n\nTEST(ZipkinCoreTypesSpanTest, copyConstructor) {\n  Event::SimulatedTimeSystem test_time;\n  Span span(test_time.timeSystem());\n  Util::Replacements replacements;\n\n  uint64_t id = Util::generateRandom64(test_time.timeSystem());\n  std::string id_hex = Hex::uint64ToHex(id);\n  span.setId(id);\n  span.setParentId(id);\n  span.setTraceId(id);\n  int64_t timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                          test_time.timeSystem().systemTime().time_since_epoch())\n                          .count();\n  span.setTimestamp(timestamp);\n  span.setDuration(3000LL);\n  span.setName(\"span_name\");\n\n  Span span2(span);\n\n  EXPECT_EQ(span.id(), span2.id());\n  EXPECT_EQ(span.parentId(), span2.parentId());\n  EXPECT_EQ(span.traceId(), span2.traceId());\n  EXPECT_EQ(span.name(), span2.name());\n  EXPECT_EQ(span.annotations().size(), span2.annotations().size());\n  EXPECT_EQ(span.binaryAnnotations().size(), span2.binaryAnnotations().size());\n  EXPECT_EQ(span.idAsHexString(), span2.idAsHexString());\n  EXPECT_EQ(span.parentIdAsHexString(), span2.parentIdAsHexString());\n  EXPECT_EQ(span.traceIdAsHexString(), span2.traceIdAsHexString());\n  EXPECT_EQ(span.timestamp(), span2.timestamp());\n  EXPECT_EQ(span.duration(), span2.duration());\n  EXPECT_EQ(span.startTime(), span2.startTime());\n  EXPECT_EQ(span.debug(), span2.debug());\n  EXPECT_EQ(span.isSetDuration(), span2.isSetDuration());\n  EXPECT_EQ(span.isSetParentId(), span2.isSetParentId());\n  EXPECT_EQ(span.isSetTimestamp(), span2.isSetTimestamp());\n  EXPECT_EQ(span.isSetTraceIdHigh(), span2.isSetTraceIdHigh());\n}\n\nTEST(ZipkinCoreTypesSpanTest, assignmentOperator) {\n  Event::SimulatedTimeSystem test_time;\n  Span span(test_time.timeSystem());\n  Util::Replacements replacements;\n\n  uint64_t id = Util::generateRandom64(test_time.timeSystem());\n  std::string id_hex = Hex::uint64ToHex(id);\n  span.setId(id);\n  span.setParentId(id);\n  span.setTraceId(id);\n  int64_t timestamp = std::chrono::duration_cast<std::chrono::microseconds>(\n                          test_time.timeSystem().systemTime().time_since_epoch())\n                          .count();\n  span.setTimestamp(timestamp);\n  span.setDuration(3000LL);\n  span.setName(\"span_name\");\n\n  Span span2 = span;\n\n  EXPECT_EQ(span.id(), span2.id());\n  EXPECT_EQ(span.parentId(), span2.parentId());\n  EXPECT_EQ(span.traceId(), span2.traceId());\n  EXPECT_EQ(span.name(), span2.name());\n  EXPECT_EQ(span.annotations().size(), span2.annotations().size());\n  EXPECT_EQ(span.binaryAnnotations().size(), span2.binaryAnnotations().size());\n  EXPECT_EQ(span.idAsHexString(), span2.idAsHexString());\n  EXPECT_EQ(span.parentIdAsHexString(), span2.parentIdAsHexString());\n  EXPECT_EQ(span.traceIdAsHexString(), span2.traceIdAsHexString());\n  EXPECT_EQ(span.timestamp(), span2.timestamp());\n  EXPECT_EQ(span.duration(), span2.duration());\n  EXPECT_EQ(span.startTime(), span2.startTime());\n  EXPECT_EQ(span.debug(), span2.debug());\n  EXPECT_EQ(span.isSetDuration(), span2.isSetDuration());\n  EXPECT_EQ(span.isSetParentId(), span2.isSetParentId());\n  EXPECT_EQ(span.isSetTimestamp(), span2.isSetTimestamp());\n  EXPECT_EQ(span.isSetTraceIdHigh(), span2.isSetTraceIdHigh());\n}\n\nTEST(ZipkinCoreTypesSpanTest, setTag) {\n  Event::SimulatedTimeSystem test_time;\n  Span span(test_time.timeSystem());\n\n  span.setTag(\"key1\", \"value1\");\n  span.setTag(\"key2\", \"value2\");\n\n  EXPECT_EQ(2ULL, span.binaryAnnotations().size());\n\n  BinaryAnnotation bann = span.binaryAnnotations()[0];\n  EXPECT_EQ(\"key1\", bann.key());\n  EXPECT_EQ(\"value1\", bann.value());\n\n  bann = span.binaryAnnotations()[1];\n  EXPECT_EQ(\"key2\", bann.key());\n  EXPECT_EQ(\"value2\", bann.value());\n}\n\n} // namespace\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc",
    "content": "#include <chrono>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/trace/v3/zipkin.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/tracing/http_tracer_impl.h\"\n\n#include \"extensions/tracers/zipkin/zipkin_core_constants.h\"\n#include \"extensions/tracers/zipkin/zipkin_tracer_impl.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::Eq;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::StrictMock;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Tracers {\nnamespace Zipkin {\nnamespace {\n\nclass ZipkinDriverTest : public testing::Test {\npublic:\n  ZipkinDriverTest() : time_source_(test_time_.timeSystem()) {}\n\n  void setup(envoy::config::trace::v3::ZipkinConfig& zipkin_config, bool init_timer) {\n    cm_.thread_local_cluster_.cluster_.info_->name_ = \"fake_cluster\";\n    ON_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillByDefault(ReturnRef(cm_.async_client_));\n\n    if (init_timer) {\n      timer_ = new NiceMock<Event::MockTimer>(&tls_.dispatcher_);\n      EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(5000), _));\n    }\n\n    driver_ = std::make_unique<Driver>(zipkin_config, cm_, stats_, tls_, runtime_, local_info_,\n                                       random_, time_source_);\n  }\n\n  void setupValidDriver(const std::string& version) {\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n\n    const std::string yaml_string = fmt::format(R\"EOF(\n    collector_cluster: fake_cluster\n    collector_endpoint: /api/v1/spans\n    collector_endpoint_version: {}\n    )EOF\",\n                                                version);\n    envoy::config::trace::v3::ZipkinConfig zipkin_config;\n    TestUtility::loadFromYaml(yaml_string, zipkin_config);\n\n    setup(zipkin_config, true);\n  }\n\n  void expectValidFlushSeveralSpans(const std::string& version, const std::string& content_type) {\n    setupValidDriver(version);\n\n    Http::MockAsyncClientRequest request(&cm_.async_client_);\n    Http::AsyncClient::Callbacks* callback;\n    const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n    EXPECT_CALL(cm_.async_client_,\n                send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n        .WillOnce(\n            Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                       const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n              callback = &callbacks;\n\n              EXPECT_EQ(\"/api/v1/spans\", message->headers().getPathValue());\n              EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n              EXPECT_EQ(content_type, message->headers().getContentTypeValue());\n\n              return &request;\n            }));\n\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.min_flush_spans\", 5))\n        .Times(2)\n        .WillRepeatedly(Return(2));\n    EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.request_timeout\", 5000U))\n        .WillOnce(Return(5000U));\n\n    Tracing::SpanPtr first_span = driver_->startSpan(\n        config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true});\n    first_span->finishSpan();\n\n    Tracing::SpanPtr second_span = driver_->startSpan(\n        config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true});\n    second_span->finishSpan();\n\n    Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"202\"}}}));\n\n    callback->onSuccess(request, std::move(msg));\n\n    EXPECT_EQ(2U, stats_.counter(\"tracing.zipkin.spans_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_dropped\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_failed\").value());\n\n    callback->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n\n    EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.reports_failed\").value());\n  }\n\n  // TODO(#4160): Currently time_system_ is initialized from DangerousDeprecatedTestTime, which uses\n  // real time, not mock-time. When that is switched to use mock-time instead, I think\n  // generateRandom64() may not be as random as we want, and we'll need to inject entropy\n  // appropriate for the test.\n  uint64_t generateRandom64() { return Util::generateRandom64(time_source_); }\n\n  const std::string operation_name_{\"test\"};\n  Http::TestRequestHeaderMapImpl request_headers_{\n      {\":authority\", \"api.lyft.com\"}, {\":path\", \"/\"}, {\":method\", \"GET\"}, {\"x-request-id\", \"foo\"}};\n  SystemTime start_time_;\n  StreamInfo::MockStreamInfo stream_info_;\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  std::unique_ptr<Driver> driver_;\n  NiceMock<Event::MockTimer>* timer_;\n  NiceMock<Stats::MockIsolatedStatsStore> stats_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Random::MockRandomGenerator> random_;\n\n  NiceMock<Tracing::MockConfig> config_;\n  Event::SimulatedTimeSystem test_time_;\n  TimeSource& time_source_;\n};\n\nTEST_F(ZipkinDriverTest, InitializeDriver) {\n  {\n    // Empty config\n    envoy::config::trace::v3::ZipkinConfig zipkin_config;\n\n    EXPECT_THROW(setup(zipkin_config, false), EnvoyException);\n  }\n\n  {\n    // Valid config but collector cluster doesn't exists.\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillOnce(Return(nullptr));\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    collector_endpoint: /api/v1/spans\n    )EOF\";\n    envoy::config::trace::v3::ZipkinConfig zipkin_config;\n    TestUtility::loadFromYaml(yaml_string, zipkin_config);\n\n    EXPECT_THROW(setup(zipkin_config, false), EnvoyException);\n  }\n\n  {\n    // valid config\n    EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n    ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()).WillByDefault(Return(0));\n\n    const std::string yaml_string = R\"EOF(\n    collector_cluster: fake_cluster\n    collector_endpoint: /api/v1/spans\n    )EOF\";\n    envoy::config::trace::v3::ZipkinConfig zipkin_config;\n    TestUtility::loadFromYaml(yaml_string, zipkin_config);\n\n    setup(zipkin_config, true);\n  }\n}\n\nTEST_F(ZipkinDriverTest, AllowCollectorClusterToBeAddedViaApi) {\n  EXPECT_CALL(cm_, get(Eq(\"fake_cluster\"))).WillRepeatedly(Return(&cm_.thread_local_cluster_));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()).WillByDefault(Return(0));\n  ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true));\n\n  const std::string yaml_string = R\"EOF(\n  collector_cluster: fake_cluster\n  collector_endpoint: /api/v1/spans\n  )EOF\";\n  envoy::config::trace::v3::ZipkinConfig zipkin_config;\n  TestUtility::loadFromYaml(yaml_string, zipkin_config);\n\n  setup(zipkin_config, true);\n}\n\nTEST_F(ZipkinDriverTest, FlushSeveralSpans) {\n  expectValidFlushSeveralSpans(\"HTTP_JSON_V1\", \"application/json\");\n}\n\nTEST_F(ZipkinDriverTest, FlushSeveralSpansHttpJsonV1) {\n  expectValidFlushSeveralSpans(\"HTTP_JSON_V1\", \"application/json\");\n}\n\nTEST_F(ZipkinDriverTest, FlushSeveralSpansHttpJson) {\n  expectValidFlushSeveralSpans(\"HTTP_JSON\", \"application/json\");\n}\n\nTEST_F(ZipkinDriverTest, FlushSeveralSpansHttpProto) {\n  expectValidFlushSeveralSpans(\"HTTP_PROTO\", \"application/x-protobuf\");\n}\n\nTEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  Http::MockAsyncClientRequest request(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback;\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(\n          Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks,\n                     const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {\n            callback = &callbacks;\n\n            EXPECT_EQ(\"/api/v1/spans\", message->headers().getPathValue());\n            EXPECT_EQ(\"fake_cluster\", message->headers().getHostValue());\n            EXPECT_EQ(\"application/json\", message->headers().getContentTypeValue());\n\n            return &request;\n          }));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.min_flush_spans\", 5))\n      .WillOnce(Return(1));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  span->finishSpan();\n\n  Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}}));\n\n  // AsyncClient can fail with valid HTTP headers\n  callback->onSuccess(request, std::move(msg));\n\n  EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.spans_sent\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_skipped_no_cluster\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_sent\").value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.reports_dropped\").value());\n  EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_failed\").value());\n}\n\nTEST_F(ZipkinDriverTest, SkipReportIfCollectorClusterHasBeenRemoved) {\n  Upstream::ClusterUpdateCallbacks* cluster_update_callbacks;\n  EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_))\n      .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks), Return(nullptr)));\n\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.min_flush_spans\", 5))\n      .WillRepeatedly(Return(1));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.request_timeout\", 5000U))\n      .WillRepeatedly(Return(5000U));\n\n  // Verify the effect of onClusterAddOrUpdate()/onClusterRemoval() on reporting logic,\n  // keeping in mind that they will be called both for relevant and irrelevant clusters.\n\n  {\n    // Simulate removal of the relevant cluster.\n    cluster_update_callbacks->onClusterRemoval(\"fake_cluster\");\n\n    // Verify that no report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0);\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0);\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n\n    // Verify observability.\n    EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.spans_sent\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_dropped\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_failed\").value());\n  }\n\n  {\n    // Simulate addition of an irrelevant cluster.\n    NiceMock<Upstream::MockThreadLocalCluster> unrelated_cluster;\n    unrelated_cluster.cluster_.info_->name_ = \"unrelated_cluster\";\n    cluster_update_callbacks->onClusterAddOrUpdate(unrelated_cluster);\n\n    // Verify that no report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0);\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0);\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n\n    // Verify observability.\n    EXPECT_EQ(2U, stats_.counter(\"tracing.zipkin.spans_sent\").value());\n    EXPECT_EQ(2U, stats_.counter(\"tracing.zipkin.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_dropped\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_failed\").value());\n  }\n\n  {\n    // Simulate addition of the relevant cluster.\n    cluster_update_callbacks->onClusterAddOrUpdate(cm_.thread_local_cluster_);\n\n    // Verify that report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillOnce(ReturnRef(cm_.async_client_));\n    Http::MockAsyncClientRequest request(&cm_.async_client_);\n    Http::AsyncClient::Callbacks* callback{};\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request)));\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n\n    // Complete in-flight request.\n    callback->onFailure(request, Http::AsyncClient::FailureReason::Reset);\n\n    // Verify observability.\n    EXPECT_EQ(3U, stats_.counter(\"tracing.zipkin.spans_sent\").value());\n    EXPECT_EQ(2U, stats_.counter(\"tracing.zipkin.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_dropped\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.reports_failed\").value());\n  }\n\n  {\n    // Simulate removal of an irrelevant cluster.\n    cluster_update_callbacks->onClusterRemoval(\"unrelated_cluster\");\n\n    // Verify that report will be sent.\n    EXPECT_CALL(cm_, httpAsyncClientForCluster(\"fake_cluster\"))\n        .WillOnce(ReturnRef(cm_.async_client_));\n    Http::MockAsyncClientRequest request(&cm_.async_client_);\n    Http::AsyncClient::Callbacks* callback{};\n    EXPECT_CALL(cm_.async_client_, send_(_, _, _))\n        .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request)));\n\n    // Trigger flush of a span.\n    driver_\n        ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                    {Tracing::Reason::Sampling, true})\n        ->finishSpan();\n\n    // Complete in-flight request.\n    Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n        Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"202\"}}}));\n    callback->onSuccess(request, std::move(msg));\n\n    // Verify observability.\n    EXPECT_EQ(4U, stats_.counter(\"tracing.zipkin.spans_sent\").value());\n    EXPECT_EQ(2U, stats_.counter(\"tracing.zipkin.reports_skipped_no_cluster\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.reports_sent\").value());\n    EXPECT_EQ(0U, stats_.counter(\"tracing.zipkin.reports_dropped\").value());\n    EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.reports_failed\").value());\n  }\n}\n\nTEST_F(ZipkinDriverTest, CancelInflightRequestsOnDestruction) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  StrictMock<Http::MockAsyncClientRequest> request1(&cm_.async_client_),\n      request2(&cm_.async_client_), request3(&cm_.async_client_), request4(&cm_.async_client_);\n  Http::AsyncClient::Callbacks* callback{};\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n\n  // Expect 4 separate report requests to be made.\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)))\n      .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request1)))\n      .WillOnce(Return(&request2))\n      .WillOnce(Return(&request3))\n      .WillOnce(Return(&request4));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.min_flush_spans\", 5))\n      .Times(4)\n      .WillRepeatedly(Return(1));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.request_timeout\", 5000U))\n      .Times(4)\n      .WillRepeatedly(Return(5000U));\n\n  // Trigger 1st report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  // Trigger 2nd report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  // Trigger 3rd report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n  // Trigger 4th report request.\n  driver_\n      ->startSpan(config_, request_headers_, operation_name_, start_time_,\n                  {Tracing::Reason::Sampling, true})\n      ->finishSpan();\n\n  Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(\n      Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}}));\n\n  // Simulate completion of the 2nd report request.\n  callback->onSuccess(request2, std::move(msg));\n\n  // Simulate failure of the 3rd report request.\n  callback->onFailure(request3, Http::AsyncClient::FailureReason::Reset);\n\n  // Expect 1st and 4th requests to be cancelled on destruction.\n  EXPECT_CALL(request1, cancel());\n  EXPECT_CALL(request4, cancel());\n\n  // Trigger destruction.\n  driver_.reset();\n}\n\nTEST_F(ZipkinDriverTest, FlushSpansTimer) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  const absl::optional<std::chrono::milliseconds> timeout(std::chrono::seconds(5));\n  EXPECT_CALL(cm_.async_client_,\n              send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout)));\n\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.min_flush_spans\", 5))\n      .WillOnce(Return(5));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  span->finishSpan();\n\n  // Timer should be re-enabled.\n  EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(5000), _));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.request_timeout\", 5000U))\n      .WillOnce(Return(5000U));\n  EXPECT_CALL(runtime_.snapshot_, getInteger(\"tracing.zipkin.flush_interval_ms\", 5000U))\n      .WillOnce(Return(5000U));\n\n  timer_->invokeCallback();\n\n  EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.timer_flushed\").value());\n  EXPECT_EQ(1U, stats_.counter(\"tracing.zipkin.spans_sent\").value());\n}\n\nTEST_F(ZipkinDriverTest, NoB3ContextSampledTrue) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n  EXPECT_TRUE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, NoB3ContextSampledFalse) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, false});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n  EXPECT_FALSE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleTrue) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n  EXPECT_TRUE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleFalse) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, false});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n  EXPECT_FALSE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, PropagateB3NotSampled) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID));\n\n  // Only context header set is B3 sampled to indicate trace should not be sampled\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, NOT_SAMPLED);\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  request_headers_.remove(ZipkinCoreConstants::get().X_B3_SAMPLED);\n\n  span->injectContext(request_headers_);\n\n  auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED);\n\n  // Check B3 sampled flag is set to not sample\n  EXPECT_EQ(NOT_SAMPLED, sampled_entry->value().getStringView());\n}\n\nTEST_F(ZipkinDriverTest, PropagateB3NotSampledWithFalse) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID));\n\n  // Only context header set is B3 sampled to indicate trace should not be sampled (using legacy\n  // 'false' value)\n  const std::string sampled = \"false\";\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, sampled);\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  request_headers_.remove(ZipkinCoreConstants::get().X_B3_SAMPLED);\n\n  span->injectContext(request_headers_);\n\n  auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED);\n  // Check B3 sampled flag is set to not sample\n  EXPECT_EQ(NOT_SAMPLED, sampled_entry->value().getStringView());\n}\n\nTEST_F(ZipkinDriverTest, PropagateB3SampledWithTrue) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID));\n  EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID));\n\n  // Only context header set is B3 sampled to indicate trace should be sampled (using legacy\n  // 'true' value)\n  const std::string sampled = \"true\";\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, sampled);\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, false});\n\n  request_headers_.remove(ZipkinCoreConstants::get().X_B3_SAMPLED);\n\n  span->injectContext(request_headers_);\n\n  auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED);\n  // Check B3 sampled flag is set to sample\n  EXPECT_EQ(SAMPLED, sampled_entry->value().getStringView());\n}\n\nTEST_F(ZipkinDriverTest, PropagateB3SampleFalse) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, NOT_SAMPLED);\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n  EXPECT_FALSE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, ZipkinSpanTest) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  // ====\n  // Test effective setTag()\n  // ====\n\n  request_headers_.remove(Http::CustomHeaders::get().OtSpanContext);\n\n  // New span will have a CS annotation\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n  zipkin_span->setTag(\"key\", \"value\");\n\n  Span& zipkin_zipkin_span = zipkin_span->span();\n  EXPECT_EQ(1ULL, zipkin_zipkin_span.binaryAnnotations().size());\n  EXPECT_EQ(\"key\", zipkin_zipkin_span.binaryAnnotations()[0].key());\n  EXPECT_EQ(\"value\", zipkin_zipkin_span.binaryAnnotations()[0].value());\n\n  // ====\n  // Test setTag() with SR annotated span\n  // ====\n\n  const std::string trace_id = Hex::uint64ToHex(generateRandom64());\n  const std::string span_id = Hex::uint64ToHex(generateRandom64());\n  const std::string parent_id = Hex::uint64ToHex(generateRandom64());\n  const std::string context = trace_id + \";\" + span_id + \";\" + parent_id + \";\" + CLIENT_SEND;\n\n  request_headers_.setCopy(Http::CustomHeaders::get().OtSpanContext, context);\n\n  // New span will have an SR annotation\n  Tracing::SpanPtr span2 = driver_->startSpan(config_, request_headers_, operation_name_,\n                                              start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span2(dynamic_cast<ZipkinSpan*>(span2.release()));\n  zipkin_span2->setTag(\"key2\", \"value2\");\n\n  Span& zipkin_zipkin_span2 = zipkin_span2->span();\n  EXPECT_EQ(1ULL, zipkin_zipkin_span2.binaryAnnotations().size());\n  EXPECT_EQ(\"key2\", zipkin_zipkin_span2.binaryAnnotations()[0].key());\n  EXPECT_EQ(\"value2\", zipkin_zipkin_span2.binaryAnnotations()[0].value());\n\n  // ====\n  // Test setTag() with empty annotations vector\n  // ====\n  Tracing::SpanPtr span3 = driver_->startSpan(config_, request_headers_, operation_name_,\n                                              start_time_, {Tracing::Reason::Sampling, true});\n  ZipkinSpanPtr zipkin_span3(dynamic_cast<ZipkinSpan*>(span3.release()));\n  Span& zipkin_zipkin_span3 = zipkin_span3->span();\n\n  std::vector<Annotation> annotations;\n  zipkin_zipkin_span3.setAnnotations(annotations);\n\n  zipkin_span3->setTag(\"key3\", \"value3\");\n  EXPECT_EQ(1ULL, zipkin_zipkin_span3.binaryAnnotations().size());\n  EXPECT_EQ(\"key3\", zipkin_zipkin_span3.binaryAnnotations()[0].key());\n  EXPECT_EQ(\"value3\", zipkin_zipkin_span3.binaryAnnotations()[0].value());\n\n  // ====\n  // Test effective log()\n  // ====\n\n  Tracing::SpanPtr span4 = driver_->startSpan(config_, request_headers_, operation_name_,\n                                              start_time_, {Tracing::Reason::Sampling, true});\n  const auto timestamp =\n      SystemTime{std::chrono::duration_cast<SystemTime::duration>(std::chrono::hours{123})};\n  const auto timestamp_count =\n      std::chrono::duration_cast<std::chrono::microseconds>(timestamp.time_since_epoch()).count();\n  span4->log(timestamp, \"abc\");\n\n  ZipkinSpanPtr zipkin_span4(dynamic_cast<ZipkinSpan*>(span4.release()));\n  Span& zipkin_zipkin_span4 = zipkin_span4->span();\n  EXPECT_FALSE(zipkin_zipkin_span4.annotations().empty());\n  EXPECT_EQ(timestamp_count, zipkin_zipkin_span4.annotations().back().timestamp());\n  EXPECT_EQ(\"abc\", zipkin_zipkin_span4.annotations().back().value());\n\n  // ====\n  // Test baggage noop\n  // ====\n  Tracing::SpanPtr span5 = driver_->startSpan(config_, request_headers_, operation_name_,\n                                              start_time_, {Tracing::Reason::Sampling, true});\n  span5->setBaggage(\"baggage_key\", \"baggage_value\");\n  EXPECT_EQ(\"\", span5->getBaggage(\"baggage_key\"));\n}\n\nTEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersTest) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  const std::string trace_id = Hex::uint64ToHex(generateRandom64());\n  const std::string span_id = Hex::uint64ToHex(generateRandom64());\n  const std::string parent_id = Hex::uint64ToHex(generateRandom64());\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, trace_id);\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, span_id);\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID, parent_id);\n\n  // New span will have an SR annotation - so its span and parent ids will be\n  // the same as the supplied span context (i.e. shared context)\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n\n  EXPECT_EQ(trace_id, zipkin_span->span().traceIdAsHexString());\n  EXPECT_EQ(span_id, zipkin_span->span().idAsHexString());\n  EXPECT_EQ(parent_id, zipkin_span->span().parentIdAsHexString());\n  EXPECT_TRUE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersEmptyParentSpanTest) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  // Root span so have same trace and span id\n  const std::string id = Hex::uint64ToHex(generateRandom64());\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, id);\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, id);\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, SAMPLED);\n\n  // Set parent span id to empty string, to ensure it is ignored\n  const std::string parent_span_id = \"\";\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID, parent_span_id);\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n  EXPECT_TRUE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3Headers128TraceIdTest) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  const uint64_t trace_id_high = generateRandom64();\n  const uint64_t trace_id_low = generateRandom64();\n  const std::string trace_id = Hex::uint64ToHex(trace_id_high) + Hex::uint64ToHex(trace_id_low);\n  const std::string span_id = Hex::uint64ToHex(generateRandom64());\n  const std::string parent_id = Hex::uint64ToHex(generateRandom64());\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, trace_id);\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, span_id);\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID, parent_id);\n\n  // New span will have an SR annotation - so its span and parent ids will be\n  // the same as the supplied span context (i.e. shared context)\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  ZipkinSpanPtr zipkin_span(dynamic_cast<ZipkinSpan*>(span.release()));\n\n  EXPECT_EQ(trace_id_high, zipkin_span->span().traceIdHigh());\n  EXPECT_EQ(trace_id_low, zipkin_span->span().traceId());\n  EXPECT_EQ(trace_id, zipkin_span->span().traceIdAsHexString());\n  EXPECT_EQ(span_id, zipkin_span->span().idAsHexString());\n  EXPECT_EQ(parent_id, zipkin_span->span().parentIdAsHexString());\n  EXPECT_TRUE(zipkin_span->span().sampled());\n}\n\nTEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidTraceIdB3HeadersTest) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, std::string(\"xyz\"));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  EXPECT_NE(nullptr, dynamic_cast<Tracing::NullSpan*>(span.get()));\n}\n\nTEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidSpanIdB3HeadersTest) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, std::string(\"xyz\"));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  EXPECT_NE(nullptr, dynamic_cast<Tracing::NullSpan*>(span.get()));\n}\n\nTEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidParentIdB3HeadersTest) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID,\n                                   std::string(\"xyz\"));\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n  EXPECT_NE(nullptr, dynamic_cast<Tracing::NullSpan*>(span.get()));\n}\n\nTEST_F(ZipkinDriverTest, ExplicitlySetSampledFalse) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, true});\n\n  span->setSampled(false);\n\n  request_headers_.remove(ZipkinCoreConstants::get().X_B3_SAMPLED);\n\n  span->injectContext(request_headers_);\n\n  auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED);\n  // Check B3 sampled flag is set to not sample\n  EXPECT_EQ(NOT_SAMPLED, sampled_entry->value().getStringView());\n}\n\nTEST_F(ZipkinDriverTest, ExplicitlySetSampledTrue) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, false});\n\n  span->setSampled(true);\n\n  request_headers_.remove(ZipkinCoreConstants::get().X_B3_SAMPLED);\n\n  span->injectContext(request_headers_);\n\n  auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED);\n  // Check B3 sampled flag is set to sample\n  EXPECT_EQ(SAMPLED, sampled_entry->value().getStringView());\n}\n\nTEST_F(ZipkinDriverTest, DuplicatedHeader) {\n  setupValidDriver(\"HTTP_JSON_V1\");\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID,\n                                   Hex::uint64ToHex(generateRandom64()));\n  Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_,\n                                             start_time_, {Tracing::Reason::Sampling, false});\n\n  using DupCallback = std::function<bool(absl::string_view key)>;\n  DupCallback dup_callback = [](absl::string_view key) -> bool {\n    static absl::flat_hash_map<std::string, bool> dup;\n    if (dup.find(key) == dup.end()) {\n      dup[key] = true;\n      return false;\n    }\n    return true;\n  };\n\n  span->setSampled(true);\n  span->injectContext(request_headers_);\n  request_headers_.iterate(\n      [&dup_callback](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n        dup_callback(header.key().getStringView());\n        return Http::HeaderMap::Iterate::Continue;\n      });\n}\n\n} // namespace\n} // namespace Zipkin\n} // namespace Tracers\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/alts/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n    \"envoy_select_google_grpc\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.transport_sockets.alts\",\n    deps = [\n        \"//source/common/singleton:manager_impl_lib\",\n        \"//source/extensions/transport_sockets/alts:config\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"tsi_frame_protector_test\",\n    srcs = [\"tsi_frame_protector_test.cc\"],\n    extension_name = \"envoy.transport_sockets.alts\",\n    deps = [\n        \"//source/extensions/transport_sockets/alts:tsi_frame_protector\",\n        \"//test/mocks/buffer:buffer_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"tsi_handshaker_test\",\n    srcs = [\"tsi_handshaker_test.cc\"],\n    extension_name = \"envoy.transport_sockets.alts\",\n    # Fails intermittantly on local build\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/extensions/transport_sockets/alts:tsi_handshaker\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/event:event_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"tsi_socket_test\",\n    srcs = [\"tsi_socket_test.cc\"],\n    extension_name = \"envoy.transport_sockets.alts\",\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//source/extensions/transport_sockets/alts:tsi_socket\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"noop_transport_socket_callbacks_test\",\n    srcs = [\"noop_transport_socket_callbacks_test.cc\"],\n    extension_name = \"envoy.transport_sockets.alts\",\n    deps = [\n        \"//source/extensions/transport_sockets/alts:noop_transport_socket_callbacks_lib\",\n        \"//test/mocks/network:network_mocks\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"alts_integration_test\",\n    srcs = envoy_select_google_grpc([\"alts_integration_test.cc\"]),\n    extension_name = \"envoy.transport_sockets.alts\",\n    external_deps = [\n        \"grpc_alts_fake_handshaker_server\",\n        \"grpc_alts_handshaker_proto\",\n        \"grpc_alts_transport_security_common_proto\",\n    ],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/transport_sockets/alts:config\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/transport_socket/alts/v2alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/alts/alts_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/transport_sockets/alts/v3/alts.pb.h\"\n\n#include \"common/common/thread.h\"\n\n#include \"extensions/transport_sockets/alts/config.h\"\n\n#ifdef major\n#undef major\n#endif\n#ifdef minor\n#undef minor\n#endif\n\n#include \"test/core/tsi/alts/fake_handshaker/fake_handshaker_server.h\"\n#include \"test/core/tsi/alts/fake_handshaker/handshaker.grpc.pb.h\"\n#include \"test/core/tsi/alts/fake_handshaker/handshaker.pb.h\"\n#include \"test/core/tsi/alts/fake_handshaker/transport_security_common.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/integration.h\"\n#include \"test/integration/server.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"gmock/gmock.h\"\n#include \"grpcpp/grpcpp.h\"\n#include \"grpcpp/impl/codegen/service_type.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\nnamespace {\n\n// Fake handshaker message, copied from grpc::gcp::FakeHandshakerService implementation.\nconstexpr char kClientInitFrame[] = \"ClientInit\";\n\n// Hollowed out implementation of HandshakerService that is dysfunctional, but\n// responds correctly to the first client request, capturing client and server\n// ALTS versions in the process.\nclass CapturingHandshakerService : public grpc::gcp::HandshakerService::Service {\npublic:\n  CapturingHandshakerService() = default;\n\n  grpc::Status\n  DoHandshake(grpc::ServerContext*,\n              grpc::ServerReaderWriter<grpc::gcp::HandshakerResp, grpc::gcp::HandshakerReq>* stream)\n      override {\n    grpc::gcp::HandshakerReq request;\n    grpc::gcp::HandshakerResp response;\n    while (stream->Read(&request)) {\n      if (request.has_client_start()) {\n        client_versions = request.client_start().rpc_versions();\n        // Sets response to make first request successful.\n        response.set_out_frames(kClientInitFrame);\n        response.set_bytes_consumed(0);\n        response.mutable_status()->set_code(grpc::StatusCode::OK);\n      } else if (request.has_server_start()) {\n        server_versions = request.server_start().rpc_versions();\n        response.mutable_status()->set_code(grpc::StatusCode::CANCELLED);\n      }\n      stream->Write(response);\n      request.Clear();\n    }\n    return grpc::Status::OK;\n  }\n\n  // Storing client and server RPC versions for later verification.\n  grpc::gcp::RpcProtocolVersions client_versions;\n  grpc::gcp::RpcProtocolVersions server_versions;\n};\n\nclass AltsIntegrationTestBase : public testing::TestWithParam<Network::Address::IpVersion>,\n                                public HttpIntegrationTest {\npublic:\n  AltsIntegrationTestBase(const std::string& server_peer_identity,\n                          const std::string& client_peer_identity, bool server_connect_handshaker,\n                          bool client_connect_handshaker, bool capturing_handshaker = false)\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()),\n        server_peer_identity_(server_peer_identity), client_peer_identity_(client_peer_identity),\n        server_connect_handshaker_(server_connect_handshaker),\n        client_connect_handshaker_(client_connect_handshaker),\n        capturing_handshaker_(capturing_handshaker) {}\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* transport_socket = bootstrap.mutable_static_resources()\n                                   ->mutable_listeners(0)\n                                   ->mutable_filter_chains(0)\n                                   ->mutable_transport_socket();\n      transport_socket->set_name(\"envoy.transport_sockets.alts\");\n      envoy::extensions::transport_sockets::alts::v3::Alts alts_config;\n      if (!server_peer_identity_.empty()) {\n        alts_config.add_peer_service_accounts(server_peer_identity_);\n      }\n      alts_config.set_handshaker_service(fakeHandshakerServerAddress(server_connect_handshaker_));\n      transport_socket->mutable_typed_config()->PackFrom(alts_config);\n    });\n    HttpIntegrationTest::initialize();\n    registerTestServerPorts({\"http\"});\n  }\n\n  void SetUp() override {\n    fake_handshaker_server_thread_ = api_->threadFactory().createThread([this]() {\n      std::unique_ptr<grpc::Service> service;\n      if (capturing_handshaker_) {\n        capturing_handshaker_service_ = new CapturingHandshakerService();\n        service = std::unique_ptr<grpc::Service>{capturing_handshaker_service_};\n      } else {\n        capturing_handshaker_service_ = nullptr;\n        service = grpc::gcp::CreateFakeHandshakerService();\n      }\n\n      std::string server_address = Network::Test::getLoopbackAddressUrlString(version_) + \":0\";\n      grpc::ServerBuilder builder;\n      builder.AddListeningPort(server_address, grpc::InsecureServerCredentials(),\n                               &fake_handshaker_server_port_);\n      builder.RegisterService(service.get());\n\n      fake_handshaker_server_ = builder.BuildAndStart();\n      fake_handshaker_server_ci_.setReady();\n      fake_handshaker_server_->Wait();\n    });\n\n    fake_handshaker_server_ci_.waitReady();\n\n    NiceMock<Server::Configuration::MockTransportSocketFactoryContext> mock_factory_ctx;\n    // We fake the singleton manager for the client, since it doesn't need to manage ALTS global\n    // state, this is done by the test server instead.\n    // TODO(htuch): Make this a proper mock.\n    class FakeSingletonManager : public Singleton::Manager {\n    public:\n      Singleton::InstanceSharedPtr get(const std::string&, Singleton::SingletonFactoryCb) override {\n        return nullptr;\n      }\n    };\n    FakeSingletonManager fsm;\n    ON_CALL(mock_factory_ctx, singletonManager()).WillByDefault(ReturnRef(fsm));\n    UpstreamAltsTransportSocketConfigFactory factory;\n\n    envoy::extensions::transport_sockets::alts::v3::Alts alts_config;\n    alts_config.set_handshaker_service(fakeHandshakerServerAddress(client_connect_handshaker_));\n    if (!client_peer_identity_.empty()) {\n      alts_config.add_peer_service_accounts(client_peer_identity_);\n    }\n    ProtobufTypes::MessagePtr config = factory.createEmptyConfigProto();\n    TestUtility::jsonConvert(alts_config, *config);\n    ENVOY_LOG_MISC(info, \"{}\", config->DebugString());\n\n    client_alts_ = factory.createTransportSocketFactory(*config, mock_factory_ctx);\n  }\n\n  void TearDown() override {\n    HttpIntegrationTest::cleanupUpstreamAndDownstream();\n    dispatcher_->clearDeferredDeleteList();\n    if (fake_handshaker_server_ != nullptr) {\n      fake_handshaker_server_->Shutdown();\n    }\n    fake_handshaker_server_thread_->join();\n  }\n\n  Network::ClientConnectionPtr makeAltsConnection() {\n    Network::Address::InstanceConstSharedPtr address = getAddress(version_, lookupPort(\"http\"));\n    return dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                               client_alts_->createTransportSocket(nullptr),\n                                               nullptr);\n  }\n\n  std::string fakeHandshakerServerAddress(bool connect_to_handshaker) {\n    if (connect_to_handshaker) {\n      return absl::StrCat(Network::Test::getLoopbackAddressUrlString(version_), \":\",\n                          std::to_string(fake_handshaker_server_port_));\n    }\n    return wrongHandshakerServerAddress();\n  }\n\n  std::string wrongHandshakerServerAddress() { return \" \"; }\n\n  Network::Address::InstanceConstSharedPtr getAddress(const Network::Address::IpVersion& version,\n                                                      int port) {\n    std::string url =\n        \"tcp://\" + Network::Test::getLoopbackAddressUrlString(version) + \":\" + std::to_string(port);\n    return Network::Utility::resolveUrl(url);\n  }\n\n  const std::string server_peer_identity_;\n  const std::string client_peer_identity_;\n  bool server_connect_handshaker_;\n  bool client_connect_handshaker_;\n  Thread::ThreadPtr fake_handshaker_server_thread_;\n  std::unique_ptr<grpc::Server> fake_handshaker_server_;\n  ConditionalInitializer fake_handshaker_server_ci_;\n  int fake_handshaker_server_port_{};\n  Network::TransportSocketFactoryPtr client_alts_;\n  bool capturing_handshaker_;\n  CapturingHandshakerService* capturing_handshaker_service_;\n};\n\nclass AltsIntegrationTestValidPeer : public AltsIntegrationTestBase {\npublic:\n  // FakeHandshake server sends \"peer_identity\" as peer service account. Set this\n  // information into config to pass validation.\n  AltsIntegrationTestValidPeer()\n      : AltsIntegrationTestBase(\"peer_identity\", \"\",\n                                /* server_connect_handshaker */ true,\n                                /* client_connect_handshaker */ true) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestValidPeer,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that when received peer service account passes validation, the alts\n// handshake succeeds.\nTEST_P(AltsIntegrationTestValidPeer, RouterRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = [this]() -> Network::ClientConnectionPtr {\n    return makeAltsConnection();\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nclass AltsIntegrationTestEmptyPeer : public AltsIntegrationTestBase {\npublic:\n  AltsIntegrationTestEmptyPeer()\n      : AltsIntegrationTestBase(\"\", \"\",\n                                /* server_connect_handshaker */ true,\n                                /* client_connect_handshaker */ true) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestEmptyPeer,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that when peer service account is not set into config, the alts\n// handshake succeeds.\nTEST_P(AltsIntegrationTestEmptyPeer, RouterRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = [this]() -> Network::ClientConnectionPtr {\n    return makeAltsConnection();\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nclass AltsIntegrationTestClientInvalidPeer : public AltsIntegrationTestBase {\npublic:\n  AltsIntegrationTestClientInvalidPeer()\n      : AltsIntegrationTestBase(\"\", \"invalid_client_identity\",\n                                /* server_connect_handshaker */ true,\n                                /* client_connect_handshaker */ true) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestClientInvalidPeer,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that when client receives peer service account which does not match\n// any account in config, the handshake will fail and client closes connection.\nTEST_P(AltsIntegrationTestClientInvalidPeer, ClientValidationFail) {\n  initialize();\n  codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt);\n  EXPECT_FALSE(codec_client_->connected());\n}\n\nclass AltsIntegrationTestServerInvalidPeer : public AltsIntegrationTestBase {\npublic:\n  AltsIntegrationTestServerInvalidPeer()\n      : AltsIntegrationTestBase(\"invalid_server_identity\", \"\",\n                                /* server_connect_handshaker */ true,\n                                /* client_connect_handshaker */ true) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestServerInvalidPeer,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that when Envoy receives peer service account which does not match\n// any account in config, the handshake will fail and Envoy closes connection.\nTEST_P(AltsIntegrationTestServerInvalidPeer, ServerValidationFail) {\n  initialize();\n\n  testing::NiceMock<Network::MockConnectionCallbacks> client_callbacks;\n  Network::ClientConnectionPtr client_conn = makeAltsConnection();\n  client_conn->addConnectionCallbacks(client_callbacks);\n  EXPECT_CALL(client_callbacks, onEvent(Network::ConnectionEvent::Connected));\n  client_conn->connect();\n\n  EXPECT_CALL(client_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nclass AltsIntegrationTestClientWrongHandshaker : public AltsIntegrationTestBase {\npublic:\n  AltsIntegrationTestClientWrongHandshaker()\n      : AltsIntegrationTestBase(\"\", \"\",\n                                /* server_connect_handshaker */ true,\n                                /* client_connect_handshaker */ false) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestClientWrongHandshaker,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that when client connects to the wrong handshaker server, handshake fails\n// and connection closes.\nTEST_P(AltsIntegrationTestClientWrongHandshaker, ConnectToWrongHandshakerAddress) {\n  initialize();\n  codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt);\n  EXPECT_FALSE(codec_client_->connected());\n}\n\nclass AltsIntegrationTestCapturingHandshaker : public AltsIntegrationTestBase {\npublic:\n  AltsIntegrationTestCapturingHandshaker()\n      : AltsIntegrationTestBase(\"\", \"\",\n                                /* server_connect_handshaker */ true,\n                                /* client_connect_handshaker */ true,\n                                /* capturing_handshaker */ true) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestCapturingHandshaker,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verifies that handshake request should include ALTS version.\nTEST_P(AltsIntegrationTestCapturingHandshaker, CheckAltsVersion) {\n  initialize();\n  codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt);\n  EXPECT_FALSE(codec_client_->connected());\n  EXPECT_EQ(capturing_handshaker_service_->client_versions.max_rpc_version().major(),\n            capturing_handshaker_service_->server_versions.max_rpc_version().major());\n  EXPECT_EQ(capturing_handshaker_service_->client_versions.max_rpc_version().minor(),\n            capturing_handshaker_service_->server_versions.max_rpc_version().minor());\n  EXPECT_EQ(capturing_handshaker_service_->client_versions.min_rpc_version().major(),\n            capturing_handshaker_service_->server_versions.min_rpc_version().major());\n  EXPECT_EQ(capturing_handshaker_service_->client_versions.min_rpc_version().minor(),\n            capturing_handshaker_service_->server_versions.min_rpc_version().minor());\n  EXPECT_NE(0, capturing_handshaker_service_->client_versions.max_rpc_version().major());\n  EXPECT_NE(0, capturing_handshaker_service_->client_versions.max_rpc_version().minor());\n  EXPECT_NE(0, capturing_handshaker_service_->client_versions.min_rpc_version().major());\n  EXPECT_NE(0, capturing_handshaker_service_->client_versions.min_rpc_version().minor());\n}\n\n} // namespace\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/alts/config_test.cc",
    "content": "#include \"common/protobuf/protobuf.h\"\n#include \"common/singleton/manager_impl.h\"\n\n#include \"extensions/transport_sockets/alts/config.h\"\n\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing Envoy::Server::Configuration::MockTransportSocketFactoryContext;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\nnamespace {\n\nTEST(UpstreamAltsConfigTest, CreateSocketFactory) {\n  NiceMock<MockTransportSocketFactoryContext> factory_context;\n  Singleton::ManagerImpl singleton_manager{Thread::threadFactoryForTest()};\n  EXPECT_CALL(factory_context, singletonManager()).WillRepeatedly(ReturnRef(singleton_manager));\n  UpstreamAltsTransportSocketConfigFactory factory;\n\n  ProtobufTypes::MessagePtr config = factory.createEmptyConfigProto();\n\n  std::string yaml = R\"EOF(\n  handshaker_service: 169.254.169.254:8080\n  peer_service_accounts: [\"server-sa\"]\n  )EOF\";\n  TestUtility::loadFromYaml(yaml, *config);\n\n  auto socket_factory = factory.createTransportSocketFactory(*config, factory_context);\n\n  EXPECT_NE(nullptr, socket_factory);\n  EXPECT_TRUE(socket_factory->implementsSecureTransport());\n}\n\nTEST(DownstreamAltsConfigTest, CreateSocketFactory) {\n  NiceMock<MockTransportSocketFactoryContext> factory_context;\n  Singleton::ManagerImpl singleton_manager{Thread::threadFactoryForTest()};\n  EXPECT_CALL(factory_context, singletonManager()).WillRepeatedly(ReturnRef(singleton_manager));\n  DownstreamAltsTransportSocketConfigFactory factory;\n\n  ProtobufTypes::MessagePtr config = factory.createEmptyConfigProto();\n\n  std::string yaml = R\"EOF(\n  handshaker_service: 169.254.169.254:8080\n  peer_service_accounts: [\"server-sa\"]\n  )EOF\";\n  TestUtility::loadFromYaml(yaml, *config);\n\n  auto socket_factory = factory.createTransportSocketFactory(*config, factory_context, {});\n\n  EXPECT_NE(nullptr, socket_factory);\n  EXPECT_TRUE(socket_factory->implementsSecureTransport());\n}\n\n} // namespace\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/alts/noop_transport_socket_callbacks_test.cc",
    "content": "#include \"envoy/network/transport_socket.h\"\n\n#include \"common/network/io_socket_handle_impl.h\"\n\n#include \"extensions/transport_sockets/alts/noop_transport_socket_callbacks.h\"\n\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\nnamespace {\n\nclass TestTransportSocketCallbacks : public Network::TransportSocketCallbacks {\npublic:\n  explicit TestTransportSocketCallbacks(Network::Connection& connection)\n      : io_handle_(std::make_unique<Network::IoSocketHandleImpl>()), connection_(connection) {}\n\n  ~TestTransportSocketCallbacks() override = default;\n  Network::IoHandle& ioHandle() override { return *io_handle_; }\n  const Network::IoHandle& ioHandle() const override { return *io_handle_; }\n  Network::Connection& connection() override { return connection_; }\n  bool shouldDrainReadBuffer() override { return false; }\n  void setReadBufferReady() override { set_read_buffer_ready_ = true; }\n  void raiseEvent(Network::ConnectionEvent) override { event_raised_ = true; }\n  void flushWriteBuffer() override { write_buffer_flushed_ = true; }\n\n  bool event_raised() const { return event_raised_; }\n  bool set_read_buffer_ready() const { return set_read_buffer_ready_; }\n  bool write_buffer_flushed() const { return write_buffer_flushed_; }\n\nprivate:\n  bool event_raised_{false};\n  bool set_read_buffer_ready_{false};\n  bool write_buffer_flushed_{false};\n  Network::IoHandlePtr io_handle_;\n  Network::Connection& connection_;\n};\n\nclass NoOpTransportSocketCallbacksTest : public testing::Test {\nprotected:\n  NoOpTransportSocketCallbacksTest()\n      : wrapper_callbacks_(connection_), wrapped_callbacks_(wrapper_callbacks_) {}\n\n  Network::MockConnection connection_;\n  TestTransportSocketCallbacks wrapper_callbacks_;\n  NoOpTransportSocketCallbacks wrapped_callbacks_;\n};\n\nTEST_F(NoOpTransportSocketCallbacksTest, TestAllCallbacks) {\n  EXPECT_EQ(&wrapper_callbacks_.ioHandle(), &wrapped_callbacks_.ioHandle());\n  EXPECT_EQ(&connection_, &wrapped_callbacks_.connection());\n  EXPECT_FALSE(wrapped_callbacks_.shouldDrainReadBuffer());\n\n  wrapped_callbacks_.setReadBufferReady();\n  EXPECT_FALSE(wrapper_callbacks_.set_read_buffer_ready());\n  wrapped_callbacks_.raiseEvent(Network::ConnectionEvent::Connected);\n  EXPECT_FALSE(wrapper_callbacks_.event_raised());\n  wrapped_callbacks_.flushWriteBuffer();\n  EXPECT_FALSE(wrapper_callbacks_.write_buffer_flushed());\n}\n\n} // namespace\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/alts/tsi_frame_protector_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/transport_sockets/alts/tsi_frame_protector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"src/core/tsi/fake_transport_security.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\nnamespace {\n\nusing namespace std::string_literals;\n\n/**\n * Test with fake frame protector. The protected frame header is 4 byte length (little endian,\n * include header itself) and following the body.\n */\nclass TsiFrameProtectorTest : public testing::Test {\npublic:\n  TsiFrameProtectorTest()\n      : raw_frame_protector_(tsi_create_fake_zero_copy_grpc_protector(nullptr)),\n        frame_protector_(CFrameProtectorPtr{raw_frame_protector_}) {}\n\nprotected:\n  tsi_zero_copy_grpc_protector* raw_frame_protector_;\n  TsiFrameProtector frame_protector_;\n};\n\nTEST_F(TsiFrameProtectorTest, Protect) {\n  {\n    Buffer::OwnedImpl input, encrypted;\n    input.add(\"foo\");\n\n    EXPECT_EQ(TSI_OK, frame_protector_.protect(input, encrypted));\n    EXPECT_EQ(\"\\x07\\0\\0\\0foo\"s, encrypted.toString());\n  }\n\n  {\n    Buffer::OwnedImpl input, encrypted;\n    input.add(\"foo\");\n\n    EXPECT_EQ(TSI_OK, frame_protector_.protect(input, encrypted));\n    EXPECT_EQ(\"\\x07\\0\\0\\0foo\"s, encrypted.toString());\n\n    input.add(\"bar\");\n    EXPECT_EQ(TSI_OK, frame_protector_.protect(input, encrypted));\n    EXPECT_EQ(\"\\x07\\0\\0\\0foo\\x07\\0\\0\\0bar\"s, encrypted.toString());\n  }\n\n  {\n    Buffer::OwnedImpl input, encrypted;\n    input.add(std::string(20000, 'a'));\n\n    EXPECT_EQ(TSI_OK, frame_protector_.protect(input, encrypted));\n\n    // fake frame protector will split long buffer to 2 \"encrypted\" frames with length 16K.\n    std::string expected =\n        \"\\0\\x40\\0\\0\"s + std::string(16380, 'a') + \"\\x28\\x0e\\0\\0\"s + std::string(3620, 'a');\n    EXPECT_EQ(expected, encrypted.toString());\n  }\n}\n\nTEST_F(TsiFrameProtectorTest, ProtectError) {\n  const tsi_zero_copy_grpc_protector_vtable* vtable = raw_frame_protector_->vtable;\n  tsi_zero_copy_grpc_protector_vtable mock_vtable = *raw_frame_protector_->vtable;\n  mock_vtable.protect = [](tsi_zero_copy_grpc_protector*, grpc_slice_buffer*, grpc_slice_buffer*) {\n    return TSI_INTERNAL_ERROR;\n  };\n  raw_frame_protector_->vtable = &mock_vtable;\n\n  Buffer::OwnedImpl input, encrypted;\n  input.add(\"foo\");\n\n  EXPECT_EQ(TSI_INTERNAL_ERROR, frame_protector_.protect(input, encrypted));\n\n  raw_frame_protector_->vtable = vtable;\n}\n\nTEST_F(TsiFrameProtectorTest, Unprotect) {\n  {\n    Buffer::OwnedImpl input, decrypted;\n    input.add(\"\\x07\\0\\0\\0bar\"s);\n\n    EXPECT_EQ(TSI_OK, frame_protector_.unprotect(input, decrypted));\n    EXPECT_EQ(\"bar\", decrypted.toString());\n  }\n\n  {\n    Buffer::OwnedImpl input, decrypted;\n    input.add(\"\\x0a\\0\\0\\0foo\"s);\n\n    EXPECT_EQ(TSI_OK, frame_protector_.unprotect(input, decrypted));\n    EXPECT_EQ(\"\", decrypted.toString());\n\n    input.add(\"bar\");\n    EXPECT_EQ(TSI_OK, frame_protector_.unprotect(input, decrypted));\n    EXPECT_EQ(\"foobar\", decrypted.toString());\n  }\n\n  {\n    Buffer::OwnedImpl input, decrypted;\n    input.add(\"\\0\\x40\\0\\0\"s + std::string(16380, 'a'));\n    input.add(\"\\x28\\x0e\\0\\0\"s + std::string(3620, 'a'));\n\n    EXPECT_EQ(TSI_OK, frame_protector_.unprotect(input, decrypted));\n    EXPECT_EQ(std::string(20000, 'a'), decrypted.toString());\n  }\n}\nTEST_F(TsiFrameProtectorTest, UnprotectError) {\n  const tsi_zero_copy_grpc_protector_vtable* vtable = raw_frame_protector_->vtable;\n  tsi_zero_copy_grpc_protector_vtable mock_vtable = *raw_frame_protector_->vtable;\n  mock_vtable.unprotect = [](tsi_zero_copy_grpc_protector*, grpc_slice_buffer*,\n                             grpc_slice_buffer*) { return TSI_INTERNAL_ERROR; };\n  raw_frame_protector_->vtable = &mock_vtable;\n\n  Buffer::OwnedImpl input, decrypted;\n  input.add(\"\\x0a\\0\\0\\0foo\"s);\n\n  EXPECT_EQ(TSI_INTERNAL_ERROR, frame_protector_.unprotect(input, decrypted));\n\n  raw_frame_protector_->vtable = vtable;\n}\n\n} // namespace\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/alts/tsi_handshaker_test.cc",
    "content": "#include \"extensions/transport_sockets/alts/tsi_handshaker.h\"\n\n#include \"test/mocks/event/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"src/core/tsi/fake_transport_security.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\nnamespace {\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::SaveArg;\n\nclass MockTsiHandshakerCallbacks : public TsiHandshakerCallbacks {\npublic:\n  void onNextDone(NextResultPtr&& result) override { onNextDone_(result.get()); }\n  MOCK_METHOD(void, onNextDone_, (NextResult*));\n\n  void expectDone(tsi_result status, Buffer::Instance& to_send, CHandshakerResultPtr& result) {\n    EXPECT_CALL(*this, onNextDone_(_))\n        .WillOnce(Invoke([&, status](TsiHandshakerCallbacks::NextResult* next_result) {\n          EXPECT_EQ(status, next_result->status_);\n          to_send.add(*next_result->to_send_);\n          result.swap(next_result->result_);\n        }));\n  }\n};\n\nclass TsiHandshakerTest : public testing::Test {\npublic:\n  TsiHandshakerTest()\n      : server_handshaker_({tsi_create_fake_handshaker(0)}, dispatcher_),\n        client_handshaker_({tsi_create_fake_handshaker(1)}, dispatcher_) {\n    server_handshaker_.setHandshakerCallbacks(server_callbacks_);\n    client_handshaker_.setHandshakerCallbacks(client_callbacks_);\n  }\n\nprotected:\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  MockTsiHandshakerCallbacks server_callbacks_;\n  MockTsiHandshakerCallbacks client_callbacks_;\n  TsiHandshaker server_handshaker_;\n  TsiHandshaker client_handshaker_;\n};\n\nTEST_F(TsiHandshakerTest, DoHandshake) {\n  InSequence s;\n\n  Buffer::OwnedImpl server_sent;\n  Buffer::OwnedImpl client_sent;\n\n  CHandshakerResultPtr client_result;\n  CHandshakerResultPtr server_result;\n\n  client_callbacks_.expectDone(TSI_OK, client_sent, client_result);\n  client_handshaker_.next(server_sent); // Initially server_sent is empty.\n  EXPECT_EQ(nullptr, client_result);\n  EXPECT_EQ(\"CLIENT_INIT\", client_sent.toString().substr(4));\n\n  server_callbacks_.expectDone(TSI_OK, server_sent, server_result);\n  server_handshaker_.next(client_sent);\n  EXPECT_EQ(nullptr, client_result);\n  EXPECT_EQ(\"SERVER_INIT\", server_sent.toString().substr(4));\n\n  client_callbacks_.expectDone(TSI_OK, client_sent, client_result);\n  client_handshaker_.next(server_sent);\n  EXPECT_EQ(nullptr, client_result);\n  EXPECT_EQ(\"CLIENT_FINISHED\", client_sent.toString().substr(4));\n\n  server_callbacks_.expectDone(TSI_OK, server_sent, server_result);\n  server_handshaker_.next(client_sent);\n  EXPECT_NE(nullptr, server_result);\n  EXPECT_EQ(\"SERVER_FINISHED\", server_sent.toString().substr(4));\n\n  client_callbacks_.expectDone(TSI_OK, client_sent, client_result);\n  client_handshaker_.next(server_sent);\n  EXPECT_NE(nullptr, client_result);\n  EXPECT_EQ(\"\", client_sent.toString());\n\n  tsi_peer client_peer;\n  EXPECT_EQ(TSI_OK, tsi_handshaker_result_extract_peer(client_result.get(), &client_peer));\n  EXPECT_EQ(1, client_peer.property_count);\n  EXPECT_STREQ(\"certificate_type\", client_peer.properties[0].name);\n  absl::string_view client_certificate_type{client_peer.properties[0].value.data,\n                                            client_peer.properties[0].value.length};\n  EXPECT_EQ(\"FAKE\", client_certificate_type);\n\n  tsi_peer server_peer;\n  EXPECT_EQ(TSI_OK, tsi_handshaker_result_extract_peer(server_result.get(), &server_peer));\n  EXPECT_EQ(1, server_peer.property_count);\n  EXPECT_STREQ(\"certificate_type\", server_peer.properties[0].name);\n  absl::string_view server_certificate_type{server_peer.properties[0].value.data,\n                                            server_peer.properties[0].value.length};\n  EXPECT_EQ(\"FAKE\", server_certificate_type);\n\n  tsi_peer_destruct(&client_peer);\n  tsi_peer_destruct(&server_peer);\n}\n\nTEST_F(TsiHandshakerTest, IncompleteData) {\n  InSequence s;\n\n  Buffer::OwnedImpl server_sent;\n  Buffer::OwnedImpl client_sent;\n\n  CHandshakerResultPtr client_result;\n  CHandshakerResultPtr server_result;\n\n  client_callbacks_.expectDone(TSI_OK, client_sent, client_result);\n  client_handshaker_.next(server_sent); // Initially server_sent is empty.\n  EXPECT_EQ(nullptr, client_result);\n  EXPECT_EQ(\"CLIENT_INIT\", client_sent.toString().substr(4));\n\n  client_sent.drain(3); // make data incomplete\n  server_callbacks_.expectDone(TSI_INCOMPLETE_DATA, server_sent, server_result);\n  server_handshaker_.next(client_sent);\n  EXPECT_EQ(nullptr, client_result);\n  EXPECT_EQ(\"\", server_sent.toString());\n}\n\nTEST_F(TsiHandshakerTest, DeferredDelete) {\n  InSequence s;\n\n  TsiHandshakerPtr handshaker{new TsiHandshaker({tsi_create_fake_handshaker(0)}, dispatcher_)};\n  handshaker->deferredDelete();\n  // The handshaker is now in dispatcher_ to delete queue.\n  EXPECT_EQ(dispatcher_.to_delete_.back().get(), handshaker.get());\n  handshaker.release();\n}\n\nTEST_F(TsiHandshakerTest, DeleteOnDone) {\n  InSequence s;\n\n  TsiHandshakerPtr handshaker(new TsiHandshaker({tsi_create_fake_handshaker(1)}, dispatcher_));\n  handshaker->setHandshakerCallbacks(client_callbacks_);\n\n  Buffer::OwnedImpl empty;\n  std::function<void()> done;\n\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&done));\n\n  handshaker->next(empty);\n  handshaker->deferredDelete();\n\n  // Make sure the handshaker is not in dispatcher_ queue, since the next call is not done.\n  EXPECT_NE(dispatcher_.to_delete_.back().get(), handshaker.get());\n\n  // After deferredDelete, the callback should be never invoked, in real use it might be already\n  // a dangling pointer.\n  EXPECT_CALL(client_callbacks_, onNextDone_(_)).Times(0);\n\n  // Simulate the next call is completed.\n  done();\n\n  // The handshaker is now in dispatcher_ to delete queue.\n  EXPECT_EQ(dispatcher_.to_delete_.back().get(), handshaker.get());\n  handshaker.release();\n}\n\n} // namespace\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/alts/tsi_socket_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/transport_sockets/alts/tsi_socket.h\"\n\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"src/core/tsi/fake_transport_security.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Alts {\nnamespace {\n\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nclass TsiSocketTest : public testing::Test {\nprotected:\n  TsiSocketTest() {\n    server_.handshaker_factory_ = [](Event::Dispatcher& dispatcher,\n                                     const Network::Address::InstanceConstSharedPtr&,\n                                     const Network::Address::InstanceConstSharedPtr&) {\n      CHandshakerPtr handshaker{tsi_create_fake_handshaker(/*is_client=*/0)};\n\n      return std::make_unique<TsiHandshaker>(std::move(handshaker), dispatcher);\n    };\n\n    client_.handshaker_factory_ = [](Event::Dispatcher& dispatcher,\n                                     const Network::Address::InstanceConstSharedPtr&,\n                                     const Network::Address::InstanceConstSharedPtr&) {\n      CHandshakerPtr handshaker{tsi_create_fake_handshaker(/*is_client=*/1)};\n\n      return std::make_unique<TsiHandshaker>(std::move(handshaker), dispatcher);\n    };\n  }\n\n  void TearDown() override {\n    client_.tsi_socket_->closeSocket(Network::ConnectionEvent::LocalClose);\n    server_.tsi_socket_->closeSocket(Network::ConnectionEvent::RemoteClose);\n  }\n\n  void initialize(HandshakeValidator server_validator, HandshakeValidator client_validator) {\n    server_.raw_socket_ = new NiceMock<Network::MockTransportSocket>();\n\n    server_.tsi_socket_ =\n        std::make_unique<TsiSocket>(server_.handshaker_factory_, server_validator,\n                                    Network::TransportSocketPtr{server_.raw_socket_});\n\n    client_.raw_socket_ = new NiceMock<Network::MockTransportSocket>();\n\n    client_.tsi_socket_ =\n        std::make_unique<TsiSocket>(client_.handshaker_factory_, client_validator,\n                                    Network::TransportSocketPtr{client_.raw_socket_});\n\n    ON_CALL(client_.callbacks_.connection_, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n    ON_CALL(server_.callbacks_.connection_, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n\n    ON_CALL(client_.callbacks_.connection_, id()).WillByDefault(Return(11));\n    ON_CALL(server_.callbacks_.connection_, id()).WillByDefault(Return(12));\n\n    ON_CALL(*client_.raw_socket_, doWrite(_, _))\n        .WillByDefault(Invoke([&](Buffer::Instance& buffer, bool) {\n          Network::IoResult result = {Network::PostIoAction::KeepOpen, buffer.length(), false};\n          client_to_server_.move(buffer);\n          return result;\n        }));\n    ON_CALL(*server_.raw_socket_, doWrite(_, _))\n        .WillByDefault(Invoke([&](Buffer::Instance& buffer, bool) {\n          Network::IoResult result = {Network::PostIoAction::KeepOpen, buffer.length(), false};\n          server_to_client_.move(buffer);\n          return result;\n        }));\n\n    ON_CALL(*client_.raw_socket_, doRead(_)).WillByDefault(Invoke([&](Buffer::Instance& buffer) {\n      Network::IoResult result = {Network::PostIoAction::KeepOpen, server_to_client_.length(),\n                                  false};\n      buffer.move(server_to_client_);\n      return result;\n    }));\n    ON_CALL(*server_.raw_socket_, doRead(_)).WillByDefault(Invoke([&](Buffer::Instance& buffer) {\n      Network::IoResult result = {Network::PostIoAction::KeepOpen, client_to_server_.length(),\n                                  false};\n      buffer.move(client_to_server_);\n      return result;\n    }));\n\n    client_.tsi_socket_->setTransportSocketCallbacks(client_.callbacks_);\n\n    server_.tsi_socket_->setTransportSocketCallbacks(server_.callbacks_);\n  }\n\n  void expectIoResult(Network::IoResult expected, Network::IoResult actual) {\n    EXPECT_EQ(expected.action_, actual.action_);\n    EXPECT_EQ(expected.bytes_processed_, actual.bytes_processed_);\n    EXPECT_EQ(expected.end_stream_read_, actual.end_stream_read_);\n  }\n\n  std::string makeFakeTsiFrame(const std::string& payload) {\n    uint32_t length = static_cast<uint32_t>(payload.length()) + 4;\n    std::string frame;\n    frame.reserve(length);\n    frame.push_back(static_cast<uint8_t>(length));\n    length >>= 8;\n    frame.push_back(static_cast<uint8_t>(length));\n    length >>= 8;\n    frame.push_back(static_cast<uint8_t>(length));\n    length >>= 8;\n    frame.push_back(static_cast<uint8_t>(length));\n\n    frame.append(payload);\n    return frame;\n  }\n\n  void doFakeInitHandshake() {\n    EXPECT_CALL(*client_.raw_socket_, doWrite(_, false));\n    client_.tsi_socket_->onConnected();\n    expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                   client_.tsi_socket_->doWrite(client_.write_buffer_, false));\n    EXPECT_EQ(makeFakeTsiFrame(\"CLIENT_INIT\"), client_to_server_.toString());\n\n    EXPECT_CALL(*server_.raw_socket_, doRead(_));\n    EXPECT_CALL(*server_.raw_socket_, doWrite(_, false));\n    expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                   server_.tsi_socket_->doRead(server_.read_buffer_));\n    EXPECT_EQ(makeFakeTsiFrame(\"SERVER_INIT\"), server_to_client_.toString());\n    EXPECT_EQ(0L, server_.read_buffer_.length());\n  }\n\n  void doHandshakeAndExpectSuccess() {\n    doFakeInitHandshake();\n\n    EXPECT_CALL(*client_.raw_socket_, doRead(_));\n    EXPECT_CALL(*client_.raw_socket_, doWrite(_, false));\n    expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                   client_.tsi_socket_->doRead(client_.read_buffer_));\n    EXPECT_EQ(makeFakeTsiFrame(\"CLIENT_FINISHED\"), client_to_server_.toString());\n    EXPECT_EQ(0L, client_.read_buffer_.length());\n\n    EXPECT_CALL(*server_.raw_socket_, doRead(_));\n    EXPECT_CALL(*server_.raw_socket_, doWrite(_, false));\n    EXPECT_CALL(server_.callbacks_, raiseEvent(Network::ConnectionEvent::Connected));\n    expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                   server_.tsi_socket_->doRead(server_.read_buffer_));\n    EXPECT_EQ(makeFakeTsiFrame(\"SERVER_FINISHED\"), server_to_client_.toString());\n\n    EXPECT_CALL(*client_.raw_socket_, doRead(_));\n    EXPECT_CALL(client_.callbacks_, raiseEvent(Network::ConnectionEvent::Connected));\n    expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                   client_.tsi_socket_->doRead(client_.read_buffer_));\n  }\n\n  void expectTransferDataFromClientToServer(const std::string& data) {\n\n    EXPECT_EQ(0L, server_.read_buffer_.length());\n    EXPECT_EQ(0L, client_.read_buffer_.length());\n\n    EXPECT_EQ(\"\", client_.tsi_socket_->protocol());\n\n    EXPECT_CALL(*client_.raw_socket_, doWrite(_, false));\n    expectIoResult({Network::PostIoAction::KeepOpen, 21UL, false},\n                   client_.tsi_socket_->doWrite(client_.write_buffer_, false));\n    EXPECT_EQ(makeFakeTsiFrame(data), client_to_server_.toString());\n\n    EXPECT_CALL(*server_.raw_socket_, doRead(_));\n    expectIoResult({Network::PostIoAction::KeepOpen, 21UL, false},\n                   server_.tsi_socket_->doRead(server_.read_buffer_));\n    EXPECT_EQ(data, server_.read_buffer_.toString());\n  }\n\n  struct SocketForTest {\n    HandshakerFactory handshaker_factory_;\n    std::unique_ptr<TsiSocket> tsi_socket_;\n    NiceMock<Network::MockTransportSocket>* raw_socket_{};\n    NiceMock<Network::MockTransportSocketCallbacks> callbacks_;\n    Buffer::OwnedImpl read_buffer_;\n    Buffer::OwnedImpl write_buffer_;\n  };\n\n  SocketForTest client_;\n  SocketForTest server_;\n\n  Buffer::OwnedImpl client_to_server_;\n  Buffer::OwnedImpl server_to_client_;\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n};\n\nstatic const std::string ClientToServerData = \"hello from client\";\n\nTEST_F(TsiSocketTest, DoesNotHaveSsl) {\n  initialize(nullptr, nullptr);\n  EXPECT_EQ(nullptr, client_.tsi_socket_->ssl());\n\n  const auto& socket_ = *client_.tsi_socket_;\n  EXPECT_EQ(nullptr, socket_.ssl());\n}\n\nTEST_F(TsiSocketTest, HandshakeWithoutValidationAndTransferData) {\n  // pass a nullptr validator to skip validation.\n  initialize(nullptr, nullptr);\n\n  client_.write_buffer_.add(ClientToServerData);\n\n  doHandshakeAndExpectSuccess();\n  expectTransferDataFromClientToServer(ClientToServerData);\n}\n\nTEST_F(TsiSocketTest, HandshakeWithSucessfulValidationAndTransferData) {\n  auto validator = [](const tsi_peer&, std::string&) { return true; };\n  initialize(validator, validator);\n\n  client_.write_buffer_.add(ClientToServerData);\n\n  doHandshakeAndExpectSuccess();\n  expectTransferDataFromClientToServer(ClientToServerData);\n}\n\nTEST_F(TsiSocketTest, HandshakeValidationFail) {\n  auto validator = [](const tsi_peer&, std::string&) { return false; };\n  initialize(validator, validator);\n\n  client_.write_buffer_.add(ClientToServerData);\n\n  doFakeInitHandshake();\n\n  EXPECT_CALL(*client_.raw_socket_, doRead(_));\n  EXPECT_CALL(*client_.raw_socket_, doWrite(_, false));\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 client_.tsi_socket_->doRead(client_.read_buffer_));\n  EXPECT_EQ(makeFakeTsiFrame(\"CLIENT_FINISHED\"), client_to_server_.toString());\n  EXPECT_EQ(0L, client_.read_buffer_.length());\n\n  EXPECT_CALL(*server_.raw_socket_, doRead(_));\n  EXPECT_CALL(server_.callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  // doRead won't immediately fail, but it will result connection close.\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 server_.tsi_socket_->doRead(server_.read_buffer_));\n  EXPECT_EQ(0, server_to_client_.length());\n}\n\nTEST_F(TsiSocketTest, HandshakerCreationFail) {\n  client_.handshaker_factory_ =\n      [](Event::Dispatcher&, const Network::Address::InstanceConstSharedPtr&,\n         const Network::Address::InstanceConstSharedPtr&) { return nullptr; };\n  auto validator = [](const tsi_peer&, std::string&) { return true; };\n  initialize(validator, validator);\n\n  EXPECT_CALL(*client_.raw_socket_, doWrite(_, _)).Times(0);\n  EXPECT_CALL(client_.callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  client_.tsi_socket_->onConnected();\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 client_.tsi_socket_->doWrite(client_.write_buffer_, false));\n  EXPECT_EQ(\"\", client_to_server_.toString());\n\n  EXPECT_CALL(*server_.raw_socket_, doRead(_));\n  EXPECT_CALL(*server_.raw_socket_, doWrite(_, _)).Times(0);\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 server_.tsi_socket_->doRead(server_.read_buffer_));\n  EXPECT_EQ(\"\", server_to_client_.toString());\n}\n\nTEST_F(TsiSocketTest, HandshakeWithUnusedData) {\n  initialize(nullptr, nullptr);\n\n  doFakeInitHandshake();\n  EXPECT_CALL(*client_.raw_socket_, doRead(_));\n  EXPECT_CALL(*client_.raw_socket_, doWrite(_, false));\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 client_.tsi_socket_->doRead(client_.read_buffer_));\n  EXPECT_EQ(makeFakeTsiFrame(\"CLIENT_FINISHED\"), client_to_server_.toString());\n  EXPECT_EQ(0L, client_.read_buffer_.length());\n\n  // Inject unused data\n  client_to_server_.add(makeFakeTsiFrame(ClientToServerData));\n\n  EXPECT_CALL(*server_.raw_socket_, doRead(_));\n  EXPECT_CALL(*server_.raw_socket_, doWrite(_, false));\n  EXPECT_CALL(server_.callbacks_, raiseEvent(Network::ConnectionEvent::Connected));\n  expectIoResult({Network::PostIoAction::KeepOpen, 21UL, false},\n                 server_.tsi_socket_->doRead(server_.read_buffer_));\n  EXPECT_EQ(makeFakeTsiFrame(\"SERVER_FINISHED\"), server_to_client_.toString());\n  EXPECT_EQ(ClientToServerData, server_.read_buffer_.toString());\n\n  EXPECT_CALL(*client_.raw_socket_, doRead(_));\n  EXPECT_CALL(client_.callbacks_, raiseEvent(Network::ConnectionEvent::Connected));\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 client_.tsi_socket_->doRead(client_.read_buffer_));\n}\n\nTEST_F(TsiSocketTest, HandshakeWithUnusedDataAndEndOfStream) {\n  initialize(nullptr, nullptr);\n\n  doFakeInitHandshake();\n  EXPECT_CALL(*client_.raw_socket_, doRead(_));\n  EXPECT_CALL(*client_.raw_socket_, doWrite(_, false));\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 client_.tsi_socket_->doRead(client_.read_buffer_));\n  EXPECT_EQ(makeFakeTsiFrame(\"CLIENT_FINISHED\"), client_to_server_.toString());\n  EXPECT_EQ(0L, client_.read_buffer_.length());\n\n  // Inject unused data\n  client_to_server_.add(makeFakeTsiFrame(ClientToServerData));\n\n  EXPECT_CALL(*server_.raw_socket_, doRead(_)).WillOnce(Invoke([&](Buffer::Instance& buffer) {\n    Network::IoResult result = {Network::PostIoAction::KeepOpen, client_to_server_.length(), true};\n    buffer.move(client_to_server_);\n    return result;\n  }));\n  EXPECT_CALL(*server_.raw_socket_, doWrite(_, false));\n  EXPECT_CALL(server_.callbacks_, raiseEvent(Network::ConnectionEvent::Connected));\n  expectIoResult({Network::PostIoAction::KeepOpen, 21UL, true},\n                 server_.tsi_socket_->doRead(server_.read_buffer_));\n  EXPECT_EQ(makeFakeTsiFrame(\"SERVER_FINISHED\"), server_to_client_.toString());\n  EXPECT_EQ(ClientToServerData, server_.read_buffer_.toString());\n\n  EXPECT_CALL(*client_.raw_socket_, doRead(_));\n  EXPECT_CALL(client_.callbacks_, raiseEvent(Network::ConnectionEvent::Connected));\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 client_.tsi_socket_->doRead(client_.read_buffer_));\n}\n\nTEST_F(TsiSocketTest, HandshakeWithImmediateReadError) {\n  initialize(nullptr, nullptr);\n\n  EXPECT_CALL(*client_.raw_socket_, doRead(_)).WillOnce(Invoke([&](Buffer::Instance& buffer) {\n    Network::IoResult result = {Network::PostIoAction::Close, server_to_client_.length(), false};\n    buffer.move(server_to_client_);\n    return result;\n  }));\n  EXPECT_CALL(*client_.raw_socket_, doWrite(_, false)).Times(0);\n  expectIoResult({Network::PostIoAction::Close, 0UL, false},\n                 client_.tsi_socket_->doRead(client_.read_buffer_));\n  EXPECT_EQ(\"\", client_to_server_.toString());\n  EXPECT_EQ(0L, client_.read_buffer_.length());\n}\n\nTEST_F(TsiSocketTest, HandshakeWithReadError) {\n  initialize(nullptr, nullptr);\n\n  doFakeInitHandshake();\n\n  EXPECT_CALL(*client_.raw_socket_, doRead(_)).WillOnce(Invoke([&](Buffer::Instance& buffer) {\n    Network::IoResult result = {Network::PostIoAction::Close, server_to_client_.length(), false};\n    buffer.move(server_to_client_);\n    return result;\n  }));\n  EXPECT_CALL(*client_.raw_socket_, doWrite(_, false)).Times(0);\n  EXPECT_CALL(client_.callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  expectIoResult({Network::PostIoAction::KeepOpen, 0UL, false},\n                 client_.tsi_socket_->doRead(client_.read_buffer_));\n  EXPECT_EQ(\"\", client_to_server_.toString());\n  EXPECT_EQ(0L, client_.read_buffer_.length());\n}\n\nTEST_F(TsiSocketTest, HandshakeWithInternalError) {\n  auto raw_handshaker = tsi_create_fake_handshaker(/* is_client= */ 1);\n  const tsi_handshaker_vtable* vtable = raw_handshaker->vtable;\n  tsi_handshaker_vtable mock_vtable = *vtable;\n  mock_vtable.next = [](tsi_handshaker*, const unsigned char*, size_t, const unsigned char**,\n                        size_t*, tsi_handshaker_result**, tsi_handshaker_on_next_done_cb,\n                        void*) { return TSI_INTERNAL_ERROR; };\n  raw_handshaker->vtable = &mock_vtable;\n\n  client_.handshaker_factory_ = [&](Event::Dispatcher& dispatcher,\n                                    const Network::Address::InstanceConstSharedPtr&,\n                                    const Network::Address::InstanceConstSharedPtr&) {\n    CHandshakerPtr handshaker{raw_handshaker};\n\n    return std::make_unique<TsiHandshaker>(std::move(handshaker), dispatcher);\n  };\n\n  initialize(nullptr, nullptr);\n\n  EXPECT_CALL(client_.callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush));\n  // doWrite won't immediately fail, but it will result connection close.\n  client_.tsi_socket_->onConnected();\n\n  raw_handshaker->vtable = vtable;\n}\n\nclass TsiSocketFactoryTest : public testing::Test {\nprotected:\n  void SetUp() override {\n    auto handshaker_factory = [](Event::Dispatcher& dispatcher,\n                                 const Network::Address::InstanceConstSharedPtr&,\n                                 const Network::Address::InstanceConstSharedPtr&) {\n      CHandshakerPtr handshaker{tsi_create_fake_handshaker(/*is_client=*/0)};\n\n      return std::make_unique<TsiHandshaker>(std::move(handshaker), dispatcher);\n    };\n\n    socket_factory_ = std::make_unique<TsiSocketFactory>(handshaker_factory, nullptr);\n  }\n  Network::TransportSocketFactoryPtr socket_factory_;\n};\n\nTEST_F(TsiSocketFactoryTest, CreateTransportSocket) {\n  EXPECT_NE(nullptr, socket_factory_->createTransportSocket(nullptr));\n}\n\nTEST_F(TsiSocketFactoryTest, ImplementsSecureTransport) {\n  EXPECT_TRUE(socket_factory_->implementsSecureTransport());\n}\n\n} // namespace\n} // namespace Alts\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"passthrough_test\",\n    srcs = [\"passthrough_test.cc\"],\n    deps = [\n        \"//source/extensions/transport_sockets/common:passthrough_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/network:transport_socket_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/common/passthrough_test.cc",
    "content": "#include \"extensions/transport_sockets/common/passthrough.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/network/transport_socket.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace {\n\nclass PassthroughTest : public testing::Test {\nprotected:\n  void SetUp() override {\n    auto inner_socket = std::make_unique<NiceMock<Network::MockTransportSocket>>();\n    inner_socket_ = inner_socket.get();\n    passthrough_socket_ = std::make_unique<PassthroughSocket>(std::move(inner_socket));\n  }\n\n  NiceMock<Network::MockTransportSocket>* inner_socket_;\n  std::unique_ptr<PassthroughSocket> passthrough_socket_;\n};\n\n// Test setTransportSocketCallbacks method defers to inner socket\nTEST_F(PassthroughTest, SetTransportSocketCallbacksDefersToInnerSocket) {\n  auto transport_callbacks = std::make_unique<NiceMock<Network::MockTransportSocketCallbacks>>();\n  EXPECT_CALL(*inner_socket_, setTransportSocketCallbacks(Ref(*transport_callbacks))).Times(1);\n  passthrough_socket_->setTransportSocketCallbacks(*transport_callbacks);\n}\n\n// Test protocol method defers to inner socket\nTEST_F(PassthroughTest, ProtocolDefersToInnerSocket) {\n  EXPECT_CALL(*inner_socket_, protocol()).Times(1);\n  passthrough_socket_->protocol();\n}\n\n// Test failureReason method defers to inner socket\nTEST_F(PassthroughTest, FailureReasonDefersToInnerSocket) {\n  EXPECT_CALL(*inner_socket_, failureReason()).Times(1);\n  passthrough_socket_->failureReason();\n}\n\n// Test canFlushClose method defers to inner socket\nTEST_F(PassthroughTest, CanFlushCloseDefersToInnerSocket) {\n  EXPECT_CALL(*inner_socket_, canFlushClose()).Times(1);\n  passthrough_socket_->canFlushClose();\n}\n\n// Test closeSocket method defers to inner socket\nTEST_F(PassthroughTest, CloseSocketDefersToInnerSocket) {\n  EXPECT_CALL(*inner_socket_, closeSocket(testing::Eq(Network::ConnectionEvent::LocalClose)))\n      .Times(1);\n  passthrough_socket_->closeSocket(Network::ConnectionEvent::LocalClose);\n}\n\n// Test doRead method defers to inner socket\nTEST_F(PassthroughTest, DoReadDefersToInnerSocket) {\n  auto buff = Buffer::OwnedImpl(\"data\");\n  EXPECT_CALL(*inner_socket_, doRead(BufferEqual(&buff))).Times(1);\n  passthrough_socket_->doRead(buff);\n}\n\n// Test doWrite method defers to inner socket\nTEST_F(PassthroughTest, DoWriteDefersToInnerSocket) {\n  auto buff = Buffer::OwnedImpl(\"data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&buff), false)).Times(1);\n  passthrough_socket_->doWrite(buff, false);\n}\n\n// Test onConnected method defers to inner socket\nTEST_F(PassthroughTest, OnConnectedDefersToInnerSocket) {\n  EXPECT_CALL(*inner_socket_, onConnected()).Times(1);\n  passthrough_socket_->onConnected();\n}\n\n// Test ssl method defers to inner socket\nTEST_F(PassthroughTest, SslDefersToInnerSocket) {\n  EXPECT_CALL(*inner_socket_, ssl()).Times(1);\n  passthrough_socket_->ssl();\n}\n\n} // namespace\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/transport_sockets/proxy_protocol/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"proxy_protocol_test\",\n    srcs = [\"proxy_protocol_test.cc\"],\n    extension_name = \"envoy.transport_sockets.upstream_proxy_protocol\",\n    deps = [\n        \"//include/envoy/network:proxy_protocol_options_lib\",\n        \"//source/extensions/common/proxy_protocol:proxy_protocol_header_lib\",\n        \"//source/extensions/transport_sockets/proxy_protocol:upstream_proxy_protocol\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:io_handle_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/network:transport_socket_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"proxy_protocol_integration_test\",\n    srcs = [\"proxy_protocol_integration_test.cc\"],\n    extension_name = \"envoy.transport_sockets.upstream_proxy_protocol\",\n    deps = [\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//source/extensions/transport_sockets/proxy_protocol:upstream_config\",\n        \"//test/integration:integration_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/proxy_protocol/proxy_protocol_integration_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/core/v3/proxy_protocol.pb.h\"\n#include \"envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.pb.h\"\n\n#include \"test/integration/integration.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass ProxyProtocolIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                     public BaseIntegrationTest {\npublic:\n  ProxyProtocolIntegrationTest()\n      : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {}\n\n  void TearDown() override {\n    test_server_.reset();\n    fake_upstreams_.clear();\n  }\n\n  void setup(envoy::config::core::v3::ProxyProtocolConfig_Version version, bool health_checks,\n             std::string inner_socket) {\n    version_ = version;\n    health_checks_ = health_checks;\n    inner_socket_ = inner_socket;\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* transport_socket =\n          bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket();\n      transport_socket->set_name(\"envoy.transport_sockets.upstream_proxy_protocol\");\n      envoy::config::core::v3::TransportSocket inner_socket;\n      inner_socket.set_name(inner_socket_);\n      envoy::config::core::v3::ProxyProtocolConfig proxy_proto_config;\n      proxy_proto_config.set_version(version_);\n      envoy::extensions::transport_sockets::proxy_protocol::v3::ProxyProtocolUpstreamTransport\n          proxy_proto_transport;\n      proxy_proto_transport.mutable_transport_socket()->MergeFrom(inner_socket);\n      proxy_proto_transport.mutable_config()->MergeFrom(proxy_proto_config);\n      transport_socket->mutable_typed_config()->PackFrom(proxy_proto_transport);\n\n      if (health_checks_) {\n        auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n        cluster->set_close_connections_on_host_health_failure(false);\n        cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0);\n        cluster->add_health_checks()->mutable_timeout()->set_seconds(20);\n        cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true);\n        cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1);\n        cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1);\n        cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1);\n        cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1);\n        cluster->mutable_health_checks(0)->mutable_tcp_health_check();\n        cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text(\n            \"50696E67\");\n        cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text(\n            \"506F6E67\");\n      }\n    });\n    BaseIntegrationTest::initialize();\n  }\n\n  FakeRawConnectionPtr fake_upstream_connection_;\n\nprivate:\n  envoy::config::core::v3::ProxyProtocolConfig_Version version_;\n  bool health_checks_;\n  std::string inner_socket_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtocolIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Test sending proxy protocol v1\nTEST_P(ProxyProtocolIntegrationTest, TestV1ProxyProtocol) {\n  setup(envoy::config::core::v3::ProxyProtocolConfig::V1, false,\n        \"envoy.transport_sockets.raw_buffer\");\n  initialize();\n\n  auto listener_port = lookupPort(\"listener_0\");\n  auto tcp_client = makeTcpConnection(listener_port);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_));\n\n  std::string observed_data;\n  ASSERT_TRUE(tcp_client->write(\"data\"));\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForData(48, &observed_data));\n    EXPECT_THAT(observed_data, testing::StartsWith(\"PROXY TCP4 127.0.0.1 127.0.0.1 \"));\n  } else if (GetParam() == Network::Address::IpVersion::v6) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForData(36, &observed_data));\n    EXPECT_THAT(observed_data, testing::StartsWith(\"PROXY TCP6 ::1 ::1 \"));\n  }\n  EXPECT_THAT(observed_data, testing::EndsWith(absl::StrCat(\" \", listener_port, \"\\r\\ndata\")));\n\n  auto previous_data = observed_data;\n  observed_data.clear();\n  ASSERT_TRUE(tcp_client->write(\" more data\"));\n  ASSERT_TRUE(fake_upstream_connection_->waitForData(previous_data.length() + 10, &observed_data));\n  EXPECT_EQ(previous_data + \" more data\", observed_data);\n\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n}\n\n// Test header is sent unencrypted using a TLS inner socket\nTEST_P(ProxyProtocolIntegrationTest, TestTLSSocket) {\n  setup(envoy::config::core::v3::ProxyProtocolConfig::V1, false, \"envoy.transport_sockets.tls\");\n  initialize();\n\n  auto listener_port = lookupPort(\"listener_0\");\n  auto tcp_client = makeTcpConnection(listener_port);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_));\n\n  ASSERT_TRUE(tcp_client->write(\"data\"));\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForData(\n        fake_upstream_connection_->waitForInexactMatch(\"PROXY TCP4 127.0.0.1 127.0.0.1 \")));\n  } else if (GetParam() == Network::Address::IpVersion::v6) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForData(\n        fake_upstream_connection_->waitForInexactMatch(\"PROXY TCP6 ::1 ::1 \")));\n  }\n\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n}\n\n// Test sending proxy protocol health check\nTEST_P(ProxyProtocolIntegrationTest, TestProxyProtocolHealthCheck) {\n  setup(envoy::config::core::v3::ProxyProtocolConfig::V1, true,\n        \"envoy.transport_sockets.raw_buffer\");\n  FakeRawConnectionPtr fake_upstream_health_connection;\n  on_server_init_function_ = [&](void) -> void {\n    std::string observed_data;\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection));\n    if (GetParam() == Network::Address::IpVersion::v4) {\n      ASSERT_TRUE(fake_upstream_health_connection->waitForData(48, &observed_data));\n      EXPECT_THAT(observed_data, testing::StartsWith(\"PROXY TCP4 127.0.0.1 127.0.0.1 \"));\n    } else if (GetParam() == Network::Address::IpVersion::v6) {\n      ASSERT_TRUE(fake_upstream_health_connection->waitForData(36, &observed_data));\n      EXPECT_THAT(observed_data, testing::StartsWith(\"PROXY TCP6 ::1 ::1 \"));\n    }\n    ASSERT_TRUE(fake_upstream_health_connection->write(\"Pong\"));\n  };\n\n  initialize();\n\n  ASSERT_TRUE(fake_upstream_health_connection->close());\n  ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect());\n}\n\n// Test sending proxy protocol v2\nTEST_P(ProxyProtocolIntegrationTest, TestV2ProxyProtocol) {\n  setup(envoy::config::core::v3::ProxyProtocolConfig::V2, false,\n        \"envoy.transport_sockets.raw_buffer\");\n  initialize();\n\n  auto listener_port = lookupPort(\"listener_0\");\n  auto tcp_client = makeTcpConnection(listener_port);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_));\n\n  std::string observed_data;\n  ASSERT_TRUE(tcp_client->write(\"data\"));\n  if (GetParam() == Envoy::Network::Address::IpVersion::v4) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForData(32, &observed_data));\n    // - signature\n    // - version and command type, address family and protocol, length of addresses\n    // - src address, dest address\n    auto header_start = \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\\\n                         \\x21\\x11\\x00\\x0c\\\n                         \\x7f\\x00\\x00\\x01\\x7f\\x00\\x00\\x01\";\n    EXPECT_THAT(observed_data, testing::StartsWith(header_start));\n    EXPECT_EQ(static_cast<uint8_t>(observed_data[26]), listener_port >> 8);\n    EXPECT_EQ(static_cast<uint8_t>(observed_data[27]), listener_port & 0xFF);\n  } else if (GetParam() == Envoy::Network::Address::IpVersion::v6) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForData(56, &observed_data));\n    // - signature\n    // - version and command type, address family and protocol, length of addresses\n    // - src address\n    // - dest address\n    auto header_start = \"\\x0d\\x0a\\x0d\\x0a\\x00\\x0d\\x0a\\x51\\x55\\x49\\x54\\x0a\\\n                         \\x21\\x21\\x00\\x24\\\n                         \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\\n                         \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\";\n    EXPECT_THAT(observed_data, testing::StartsWith(header_start));\n    EXPECT_EQ(static_cast<uint8_t>(observed_data[50]), listener_port >> 8);\n    EXPECT_EQ(static_cast<uint8_t>(observed_data[51]), listener_port & 0xFF);\n  }\n  EXPECT_THAT(observed_data, testing::EndsWith(\"data\"));\n\n  auto previous_data = observed_data;\n  observed_data.clear();\n  ASSERT_TRUE(tcp_client->write(\" more data\"));\n  ASSERT_TRUE(fake_upstream_connection_->waitForData(previous_data.length() + 10, &observed_data));\n  EXPECT_EQ(previous_data + \" more data\", observed_data);\n\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n}\n\n} // namespace\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc",
    "content": "#include \"envoy/config/core/v3/proxy_protocol.pb.h\"\n#include \"envoy/network/proxy_protocol.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n#include \"extensions/transport_sockets/proxy_protocol/proxy_protocol.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/network/io_handle.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/network/transport_socket.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnNull;\nusing testing::ReturnRef;\n\nusing envoy::config::core::v3::ProxyProtocolConfig;\nusing envoy::config::core::v3::ProxyProtocolConfig_Version;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace ProxyProtocol {\nnamespace {\n\nclass ProxyProtocolTest : public testing::Test {\npublic:\n  void initialize(ProxyProtocolConfig_Version version,\n                  Network::TransportSocketOptionsSharedPtr socket_options) {\n    auto inner_socket = std::make_unique<NiceMock<Network::MockTransportSocket>>();\n    inner_socket_ = inner_socket.get();\n    ON_CALL(transport_callbacks_, ioHandle()).WillByDefault(ReturnRef(io_handle_));\n    proxy_protocol_socket_ = std::make_unique<UpstreamProxyProtocolSocket>(std::move(inner_socket),\n                                                                           socket_options, version);\n    proxy_protocol_socket_->setTransportSocketCallbacks(transport_callbacks_);\n    proxy_protocol_socket_->onConnected();\n  }\n\n  NiceMock<Network::MockTransportSocket>* inner_socket_;\n  NiceMock<Network::MockIoHandle> io_handle_;\n  std::unique_ptr<UpstreamProxyProtocolSocket> proxy_protocol_socket_;\n  NiceMock<Network::MockTransportSocketCallbacks> transport_callbacks_;\n};\n\n// Test injects PROXY protocol header only once\nTEST_F(ProxyProtocolTest, InjectesHeaderOnlyOnce) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://172.0.0.1:80\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"174.2.2.222\", \"172.0.0.1\", 50000, 80,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  auto msg2 = Buffer::OwnedImpl(\"more data\");\n\n  {\n    InSequence s;\n    EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n    EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg2), false)).Times(1);\n  }\n\n  proxy_protocol_socket_->doWrite(msg, false);\n  proxy_protocol_socket_->doWrite(msg2, false);\n}\n\n// Test returned bytes processed includes the PROXY protocol header\nTEST_F(ProxyProtocolTest, BytesProcessedIncludesProxyProtocolHeader) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://172.0.0.1:80\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"174.2.2.222\", \"172.0.0.1\", 50000, 80,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  auto msg2 = Buffer::OwnedImpl(\"more data\");\n  {\n    InSequence s;\n    EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false))\n        .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg.length(), false}));\n    EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg2), false))\n        .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg2.length(), false}));\n  }\n\n  auto resp = proxy_protocol_socket_->doWrite(msg, false);\n  EXPECT_EQ(expected_buff.length() + msg.length(), resp.bytes_processed_);\n  auto resp2 = proxy_protocol_socket_->doWrite(msg2, false);\n  EXPECT_EQ(msg2.length(), resp2.bytes_processed_);\n}\n\n// Test returns KeepOpen action when write error is Again\nTEST_F(ProxyProtocolTest, ReturnsKeepOpenWhenWriteErrorIsAgain) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://172.0.0.1:80\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"174.2.2.222\", \"172.0.0.1\", 50000, 80,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr);\n\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  {\n    InSequence s;\n    EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n        .WillOnce(Invoke([&](Buffer::Instance&) -> Api::IoCallUint64Result {\n          return Api::IoCallUint64Result(\n              0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(),\n                                 Network::IoSocketError::deleteIoError));\n        }));\n    EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n        .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n          auto length = buffer.length();\n          buffer.drain(length);\n          return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n        }));\n    EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false))\n        .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg.length(), false}));\n  }\n\n  auto resp = proxy_protocol_socket_->doWrite(msg, false);\n  EXPECT_EQ(Network::PostIoAction::KeepOpen, resp.action_);\n  auto resp2 = proxy_protocol_socket_->doWrite(msg, false);\n  EXPECT_EQ(Network::PostIoAction::KeepOpen, resp2.action_);\n}\n\n// Test returns Close action when write error is not Again\nTEST_F(ProxyProtocolTest, ReturnsCloseWhenWriteErrorIsNotAgain) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://172.0.0.1:80\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"174.2.2.222\", \"172.0.0.1\", 50000, 80,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr);\n\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  {\n    InSequence s;\n    EXPECT_CALL(io_handle_, write(_))\n        .WillOnce(Invoke([&](Buffer::Instance&) -> Api::IoCallUint64Result {\n          return Api::IoCallUint64Result(0,\n                                         Api::IoErrorPtr(new Network::IoSocketError(EADDRNOTAVAIL),\n                                                         Network::IoSocketError::deleteIoError));\n        }));\n  }\n\n  auto resp = proxy_protocol_socket_->doWrite(msg, false);\n  EXPECT_EQ(Network::PostIoAction::Close, resp.action_);\n}\n\n// Test injects V1 PROXY protocol using upstream addresses when transport options are null\nTEST_F(ProxyProtocolTest, V1IPV4LocalAddressWhenTransportOptionsAreNull) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://172.0.0.1:80\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"174.2.2.222\", \"172.0.0.1\", 50000, 80,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V1 PROXY protocol using upstream addresses when header options are null\nTEST_F(ProxyProtocolTest, V1IPV4LocalAddressesWhenHeaderOptionsAreNull) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://172.0.0.1:80\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"174.2.2.222\", \"172.0.0.1\", 50000, 80,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1,\n             std::make_shared<Network::TransportSocketOptionsImpl>());\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = 43;\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V1 PROXY protocol using upstream addresses when header options are null\nTEST_F(ProxyProtocolTest, V1IPV6LocalAddressesWhenHeaderOptionsAreNull) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://[a:b:c:d::]:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://[e:b:c:f::]:8080\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"a:b:c:d::\", \"e:b:c:f::\", 50000, 8080,\n                                          Network::Address::IpVersion::v6, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1,\n             std::make_shared<Network::TransportSocketOptionsImpl>());\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V1 PROXY protocol for downstream IPV4 addresses\nTEST_F(ProxyProtocolTest, V1IPV4DownstreamAddresses) {\n  auto src_addr = Network::Address::InstanceConstSharedPtr(\n      new Network::Address::Ipv4Instance(\"202.168.0.13\", 52000));\n  auto dst_addr = Network::Address::InstanceConstSharedPtr(\n      new Network::Address::Ipv4Instance(\"174.2.2.222\", 80));\n  Network::TransportSocketOptionsSharedPtr socket_options =\n      std::make_shared<Network::TransportSocketOptionsImpl>(\n          \"\", std::vector<std::string>{}, std::vector<std::string>{}, absl::nullopt,\n          absl::optional<Network::ProxyProtocolData>(\n              Network::ProxyProtocolData{src_addr, dst_addr}));\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://174.2.2.222:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://172.0.0.1:8080\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"202.168.0.13\", \"174.2.2.222\", 52000, 80,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V1 PROXY protocol for downstream IPV6 addresses\nTEST_F(ProxyProtocolTest, V1IPV6DownstreamAddresses) {\n  auto src_addr =\n      Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance(\"1::2:3\", 52000));\n  auto dst_addr =\n      Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance(\"a:b:c:d::\", 80));\n  Network::TransportSocketOptionsSharedPtr socket_options =\n      std::make_shared<Network::TransportSocketOptionsImpl>(\n          \"\", std::vector<std::string>{}, std::vector<std::string>{}, absl::nullopt,\n          absl::optional<Network::ProxyProtocolData>(\n              Network::ProxyProtocolData{src_addr, dst_addr}));\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://[a:b:c:d::]:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://[e:b:c:f::]:8080\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV1Header(\"1::2:3\", \"a:b:c:d::\", 52000, 80,\n                                          Network::Address::IpVersion::v6, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V2 PROXY protocol using upstream addresses when transport options are null\nTEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenTransportOptionsAreNull) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.2.3.4:773\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://0.1.1.2:513\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV2LocalHeader(expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, nullptr);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V2 PROXY protocol using upstream addresses when header options are null\nTEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenHeaderOptionsAreNull) {\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://1.2.3.4:773\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://0.1.1.2:513\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV2LocalHeader(expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2,\n             std::make_shared<Network::TransportSocketOptionsImpl>());\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V2 PROXY protocol for downstream IPV4 addresses\nTEST_F(ProxyProtocolTest, V2IPV4DownstreamAddresses) {\n  auto src_addr =\n      Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance(\"1.2.3.4\", 773));\n  auto dst_addr =\n      Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance(\"0.1.1.2\", 513));\n  Network::TransportSocketOptionsSharedPtr socket_options =\n      std::make_shared<Network::TransportSocketOptionsImpl>(\n          \"\", std::vector<std::string>{}, std::vector<std::string>{}, absl::nullopt,\n          absl::optional<Network::ProxyProtocolData>(\n              Network::ProxyProtocolData{src_addr, dst_addr}));\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://0.1.1.2:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://3.3.3.3:80\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV2Header(\"1.2.3.4\", \"0.1.1.2\", 773, 513,\n                                          Network::Address::IpVersion::v4, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test injects V2 PROXY protocol for downstream IPV6 addresses\nTEST_F(ProxyProtocolTest, V2IPV6DownstreamAddresses) {\n  auto src_addr =\n      Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance(\"1:2:3::4\", 8));\n  auto dst_addr = Network::Address::InstanceConstSharedPtr(\n      new Network::Address::Ipv6Instance(\"1:100:200:3::\", 2));\n  Network::TransportSocketOptionsSharedPtr socket_options =\n      std::make_shared<Network::TransportSocketOptionsImpl>(\n          \"\", std::vector<std::string>{}, std::vector<std::string>{}, absl::nullopt,\n          absl::optional<Network::ProxyProtocolData>(\n              Network::ProxyProtocolData{src_addr, dst_addr}));\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://[1:100:200:3::]:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://[e:b:c:f::]:8080\");\n  Buffer::OwnedImpl expected_buff{};\n  Common::ProxyProtocol::generateV2Header(\"1:2:3::4\", \"1:100:200:3::\", 8, 2,\n                                          Network::Address::IpVersion::v6, expected_buff);\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options);\n\n  EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString())))\n      .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result {\n        auto length = buffer.length();\n        buffer.drain(length);\n        return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}));\n      }));\n  auto msg = Buffer::OwnedImpl(\"some data\");\n  EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1);\n\n  proxy_protocol_socket_->doWrite(msg, false);\n}\n\n// Test onConnected calls inner onConnected\nTEST_F(ProxyProtocolTest, OnConnectedCallsInnerOnConnected) {\n  auto src_addr =\n      Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance(\"1:2:3::4\", 8));\n  auto dst_addr = Network::Address::InstanceConstSharedPtr(\n      new Network::Address::Ipv6Instance(\"1:100:200:3::\", 2));\n  Network::TransportSocketOptionsSharedPtr socket_options =\n      std::make_shared<Network::TransportSocketOptionsImpl>(\n          \"\", std::vector<std::string>{}, std::vector<std::string>{}, absl::nullopt,\n          absl::optional<Network::ProxyProtocolData>(\n              Network::ProxyProtocolData{src_addr, dst_addr}));\n  transport_callbacks_.connection_.local_address_ =\n      Network::Utility::resolveUrl(\"tcp://[1:100:200:3::]:50000\");\n  transport_callbacks_.connection_.remote_address_ =\n      Network::Utility::resolveUrl(\"tcp://[e:b:c:f::]:8080\");\n  initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options);\n\n  EXPECT_CALL(*inner_socket_, onConnected()).Times(1);\n  proxy_protocol_socket_->onConnected();\n}\n\nclass ProxyProtocolSocketFactoryTest : public testing::Test {\npublic:\n  void initialize() {\n    auto inner_factory = std::make_unique<NiceMock<Network::MockTransportSocketFactory>>();\n    inner_factory_ = inner_factory.get();\n    factory_ = std::make_unique<UpstreamProxyProtocolSocketFactory>(std::move(inner_factory),\n                                                                    ProxyProtocolConfig());\n  }\n\n  NiceMock<Network::MockTransportSocketFactory>* inner_factory_;\n  std::unique_ptr<UpstreamProxyProtocolSocketFactory> factory_;\n};\n\n// Test createTransportSocket returns nullptr if inner call returns nullptr\nTEST_F(ProxyProtocolSocketFactoryTest, CreateSocketReturnsNullWhenInnerFactoryReturnsNull) {\n  initialize();\n  EXPECT_CALL(*inner_factory_, createTransportSocket(_)).WillOnce(ReturnNull());\n  ASSERT_EQ(nullptr, factory_->createTransportSocket(nullptr));\n}\n\n// Test implementsSecureTransport calls inner factory\nTEST_F(ProxyProtocolSocketFactoryTest, ImplementsSecureTransportCallInnerFactory) {\n  initialize();\n  EXPECT_CALL(*inner_factory_, implementsSecureTransport()).WillOnce(Return(true));\n  ASSERT_TRUE(factory_->implementsSecureTransport());\n}\n\n} // namespace\n} // namespace ProxyProtocol\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy"
  },
  {
    "path": "test/extensions/transport_sockets/tap/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"tap_config_impl_test\",\n    srcs = [\"tap_config_impl_test.cc\"],\n    extension_name = \"envoy.transport_sockets.tap\",\n    deps = [\n        \"//source/extensions/transport_sockets/tap:tap_config_impl\",\n        \"//test/extensions/common/tap:common\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/tap/tap_config_impl_test.cc",
    "content": "#include \"common/network/address_impl.h\"\n\n#include \"extensions/transport_sockets/tap/tap_config_impl.h\"\n\n#include \"test/extensions/common/tap/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\nusing testing::_;\nusing testing::ByMove;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tap {\nnamespace {\n\nnamespace TapCommon = Extensions::Common::Tap;\n\nclass MockSocketTapConfig : public SocketTapConfig {\npublic:\n  PerSocketTapperPtr createPerSocketTapper(const Network::Connection& connection) override {\n    return PerSocketTapperPtr{createPerSocketTapper_(connection)};\n  }\n\n  Extensions::Common::Tap::PerTapSinkHandleManagerPtr\n  createPerTapSinkHandleManager(uint64_t trace_id) override {\n    return Extensions::Common::Tap::PerTapSinkHandleManagerPtr{\n        createPerTapSinkHandleManager_(trace_id)};\n  }\n\n  MOCK_METHOD(PerSocketTapper*, createPerSocketTapper_, (const Network::Connection& connection));\n  MOCK_METHOD(Extensions::Common::Tap::PerTapSinkHandleManager*, createPerTapSinkHandleManager_,\n              (uint64_t trace_id));\n  MOCK_METHOD(uint32_t, maxBufferedRxBytes, (), (const));\n  MOCK_METHOD(uint32_t, maxBufferedTxBytes, (), (const));\n  MOCK_METHOD(Extensions::Common::Tap::Matcher::MatchStatusVector, createMatchStatusVector, (),\n              (const));\n  MOCK_METHOD(const Extensions::Common::Tap::Matcher&, rootMatcher, (), (const));\n  MOCK_METHOD(bool, streaming, (), (const));\n  MOCK_METHOD(TimeSource&, timeSource, (), (const));\n};\n\nclass PerSocketTapperImplTest : public testing::Test {\npublic:\n  void setup(bool streaming) {\n    connection_.local_address_ =\n        std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1000);\n    ON_CALL(connection_, id()).WillByDefault(Return(1));\n    EXPECT_CALL(*config_, createPerTapSinkHandleManager_(1)).WillOnce(Return(sink_manager_));\n    EXPECT_CALL(*config_, createMatchStatusVector())\n        .WillOnce(Return(ByMove(TapCommon::Matcher::MatchStatusVector(1))));\n    EXPECT_CALL(*config_, rootMatcher()).WillRepeatedly(ReturnRef(matcher_));\n    EXPECT_CALL(matcher_, onNewStream(_))\n        .WillOnce(Invoke([this](TapCommon::Matcher::MatchStatusVector& statuses) {\n          statuses_ = &statuses;\n          statuses[0].matches_ = true;\n          statuses[0].might_change_status_ = false;\n        }));\n    EXPECT_CALL(*config_, streaming()).WillRepeatedly(Return(streaming));\n    EXPECT_CALL(*config_, maxBufferedRxBytes()).WillRepeatedly(Return(1024));\n    EXPECT_CALL(*config_, maxBufferedTxBytes()).WillRepeatedly(Return(1024));\n    EXPECT_CALL(*config_, timeSource()).WillRepeatedly(ReturnRef(time_system_));\n    time_system_.setSystemTime(std::chrono::seconds(0));\n    tapper_ = std::make_unique<PerSocketTapperImpl>(config_, connection_);\n  }\n\n  std::shared_ptr<MockSocketTapConfig> config_{std::make_shared<MockSocketTapConfig>()};\n  // Raw pointer, returned via mock to unique_ptr.\n  TapCommon::MockPerTapSinkHandleManager* sink_manager_ =\n      new TapCommon::MockPerTapSinkHandleManager;\n  std::unique_ptr<PerSocketTapperImpl> tapper_;\n  std::vector<TapCommon::MatcherPtr> matchers_{1};\n  TapCommon::MockMatcher matcher_{matchers_};\n  TapCommon::Matcher::MatchStatusVector* statuses_;\n  NiceMock<Network::MockConnection> connection_;\n  Event::SimulatedTimeSystem time_system_;\n};\n\n// Verify the full streaming flow.\nTEST_F(PerSocketTapperImplTest, StreamingFlow) {\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nsocket_streamed_trace_segment:\n  trace_id: 1\n  connection:\n    local_address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 1000\n    remote_address:\n      socket_address:\n        address: 10.0.0.3\n        port_value: 50000\n)EOF\")));\n  setup(true);\n\n  InSequence s;\n\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nsocket_streamed_trace_segment:\n  trace_id: 1\n  event:\n    timestamp: 1970-01-01T00:00:00Z\n    read:\n      data:\n        as_bytes: aGVsbG8=\n)EOF\")));\n  tapper_->onRead(Buffer::OwnedImpl(\"hello\"), 5);\n\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nsocket_streamed_trace_segment:\n  trace_id: 1\n  event:\n    timestamp: 1970-01-01T00:00:01Z\n    write:\n      data:\n        as_bytes: d29ybGQ=\n      end_stream: true\n)EOF\")));\n  time_system_.setSystemTime(std::chrono::seconds(1));\n  tapper_->onWrite(Buffer::OwnedImpl(\"world\"), 5, true);\n\n  EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual(\n                                  R\"EOF(\nsocket_streamed_trace_segment:\n  trace_id: 1\n  event:\n    timestamp: 1970-01-01T00:00:02Z\n    closed: {}\n)EOF\")));\n  time_system_.setSystemTime(std::chrono::seconds(2));\n  tapper_->closeSocket(Network::ConnectionEvent::RemoteClose);\n}\n\n} // namespace\n} // namespace Tap\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"ssl_socket_test\",\n    srcs = [\n        \"ssl_certs_test.h\",\n        \"ssl_socket_test.cc\",\n    ],\n    data = [\n        \"gen_unittest_certs.sh\",\n        # TODO(mattklein123): We should consolidate all of our test certs in a single place as\n        # right now we have a bunch of duplication which is confusing.\n        \"//test/config/integration/certs\",\n        \"//test/extensions/transport_sockets/tls/test_data:certs\",\n        \"//test/extensions/transport_sockets/tls/ocsp:gen_ocsp_data\",\n    ],\n    external_deps = [\"ssl\"],\n    shard_count = 4,\n    # TODO(wrowe): Diagnose timeout error on Windows (skipped for the moment)\n    tags = [\"fails_on_windows\"],\n    deps = [\n        \":test_private_key_method_provider_test_lib\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:transport_socket_options_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//source/extensions/transport_sockets/tls:ssl_socket_lib\",\n        \"//source/extensions/transport_sockets/tls:utility_lib\",\n        \"//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib\",\n        \"//test/extensions/transport_sockets/tls/test_data:cert_infos\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:io_handle_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"context_impl_test\",\n    srcs = [\n        \"context_impl_test.cc\",\n        \"ssl_certs_test.h\",\n    ],\n    data = [\n        \"gen_unittest_certs.sh\",\n        \"//test/extensions/transport_sockets/tls/ocsp:gen_ocsp_data\",\n        \"//test/extensions/transport_sockets/tls/test_data:certs\",\n    ],\n    # Fails intermittantly on local build\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":ssl_test_utils\",\n        \"//source/common/common:base64_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/extensions/transport_sockets/tls/test_data:cert_infos\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"io_handle_bio_test\",\n    srcs = [\"io_handle_bio_test.cc\"],\n    external_deps = [\"ssl\"],\n    deps = [\n        \":ssl_test_utils\",\n        \"//source/extensions/transport_sockets/tls:ssl_socket_lib\",\n        \"//test/mocks/network:io_handle_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\n        \"utility_test.cc\",\n    ],\n    data = [\n        \"gen_unittest_certs.sh\",\n        \"//test/extensions/transport_sockets/tls/ocsp:gen_ocsp_data\",\n        \"//test/extensions/transport_sockets/tls/test_data:certs\",\n    ],\n    external_deps = [\"ssl\"],\n    deps = [\n        \":ssl_test_utils\",\n        \"//source/extensions/transport_sockets/tls:utility_lib\",\n        \"//test/extensions/transport_sockets/tls/test_data:cert_infos\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"ssl_test_utils\",\n    srcs = [\n        \"ssl_test_utility.h\",\n    ],\n    deps = [\n        \"//source/extensions/transport_sockets/tls:utility_lib\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_private_key_method_provider_test_lib\",\n    srcs = [\n        \"test_private_key_method_provider.cc\",\n    ],\n    hdrs = [\n        \"test_private_key_method_provider.h\",\n    ],\n    external_deps = [\"ssl\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl/private_key:private_key_config_interface\",\n        \"//include/envoy/ssl/private_key:private_key_interface\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"handshaker_test\",\n    srcs = [\"handshaker_test.cc\"],\n    data = [\n        \"gen_unittest_certs.sh\",\n        \"//test/config/integration/certs\",\n        \"//test/extensions/transport_sockets/tls/test_data:certs\",\n    ],\n    external_deps = [\"ssl\"],\n    # TODO(sunjayBhatia): Diagnose openssl DLL load issue on Windows\n    # See: https://github.com/envoyproxy/envoy/pull/13276\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":ssl_socket_test\",\n        \":ssl_test_utils\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/transport_sockets/tls:ssl_handshaker_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/context_impl_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"envoy/admin/v3/certs.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n\n#include \"common/common/base64.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/secret/sds_api.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_impl.h\"\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"test/extensions/transport_sockets/tls/ssl_certs_test.h\"\n#include \"test/extensions/transport_sockets/tls/ssl_test_utility.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_dns3_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_ip_cert_info.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n#include \"openssl/x509v3.h\"\n\nusing Envoy::Protobuf::util::MessageDifferencer;\nusing testing::EndsWith;\nusing testing::NiceMock;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nnamespace {\nconst std::vector<std::string>& knownCipherSuites() {\n  CONSTRUCT_ON_FIRST_USE(std::vector<std::string>, {\"ECDHE-ECDSA-AES128-GCM-SHA256\",\n                                                    \"ECDHE-RSA-AES128-GCM-SHA256\",\n                                                    \"ECDHE-ECDSA-AES256-GCM-SHA384\",\n                                                    \"ECDHE-RSA-AES256-GCM-SHA384\",\n                                                    \"ECDHE-ECDSA-CHACHA20-POLY1305\",\n                                                    \"ECDHE-RSA-CHACHA20-POLY1305\",\n                                                    \"ECDHE-PSK-CHACHA20-POLY1305\",\n                                                    \"ECDHE-ECDSA-AES128-SHA\",\n                                                    \"ECDHE-RSA-AES128-SHA\",\n                                                    \"ECDHE-PSK-AES128-CBC-SHA\",\n                                                    \"ECDHE-ECDSA-AES256-SHA\",\n                                                    \"ECDHE-RSA-AES256-SHA\",\n                                                    \"ECDHE-PSK-AES256-CBC-SHA\",\n                                                    \"AES128-GCM-SHA256\",\n                                                    \"AES256-GCM-SHA384\",\n                                                    \"AES128-SHA\",\n                                                    \"PSK-AES128-CBC-SHA\",\n                                                    \"AES256-SHA\",\n                                                    \"PSK-AES256-CBC-SHA\",\n                                                    \"DES-CBC3-SHA\"});\n}\n} // namespace\n\nclass SslLibraryCipherSuiteSupport : public ::testing::TestWithParam<std::string> {};\n\nINSTANTIATE_TEST_SUITE_P(CipherSuites, SslLibraryCipherSuiteSupport,\n                         ::testing::ValuesIn(knownCipherSuites()));\n\n// Tests for whether new cipher suites are added. When they are, they must be added to\n// knownCipherSuites() so that this test can detect if they are removed in the future.\nTEST_F(SslLibraryCipherSuiteSupport, CipherSuitesNotAdded) {\n  bssl::UniquePtr<SSL_CTX> ctx(SSL_CTX_new(TLS_method()));\n  EXPECT_NE(0, SSL_CTX_set_strict_cipher_list(ctx.get(), \"ALL\"));\n\n  std::vector<std::string> present_cipher_suites;\n  for (const SSL_CIPHER* cipher : SSL_CTX_get_ciphers(ctx.get())) {\n    present_cipher_suites.push_back(SSL_CIPHER_get_name(cipher));\n  }\n  EXPECT_THAT(present_cipher_suites, testing::IsSubsetOf(knownCipherSuites()));\n}\n\n// Test that no previously supported cipher suites were removed from the SSL library. If a cipher\n// suite is removed, it must be added to the release notes as an incompatible change, because it can\n// cause previously loadable configurations to no longer load if they reference the cipher suite.\nTEST_P(SslLibraryCipherSuiteSupport, CipherSuitesNotRemoved) {\n  bssl::UniquePtr<SSL_CTX> ctx(SSL_CTX_new(TLS_method()));\n  EXPECT_NE(0, SSL_CTX_set_strict_cipher_list(ctx.get(), GetParam().c_str()));\n}\n\nclass SslContextImplTest : public SslCertsTest {\nprotected:\n  Event::SimulatedTimeSystem time_system_;\n  ContextManagerImpl manager_{time_system_};\n};\n\nTEST_F(SslContextImplTest, TestDnsNameMatching) {\n  EXPECT_TRUE(ContextImpl::dnsNameMatch(\"lyft.com\", \"lyft.com\"));\n  EXPECT_TRUE(ContextImpl::dnsNameMatch(\"a.lyft.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"a.b.lyft.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"foo.test.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"lyft.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"alyft.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"alyft.com\", \"*lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"lyft.com\", \"*lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"\", \"*lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"lyft.com\", \"\"));\n}\n\nTEST_F(SslContextImplTest, TestDnsNameMatchingLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fix_wildcard_matching\", \"false\"}});\n  EXPECT_TRUE(ContextImpl::dnsNameMatch(\"lyft.com\", \"lyft.com\"));\n  EXPECT_TRUE(ContextImpl::dnsNameMatch(\"a.lyft.com\", \"*.lyft.com\"));\n  // Legacy behavior\n  EXPECT_TRUE(ContextImpl::dnsNameMatch(\"a.b.lyft.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"foo.test.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"lyft.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"alyft.com\", \"*.lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"alyft.com\", \"*lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"lyft.com\", \"*lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"\", \"*lyft.com\"));\n  EXPECT_FALSE(ContextImpl::dnsNameMatch(\"lyft.com\", \"\"));\n}\n\nTEST_F(SslContextImplTest, TestVerifySubjectAltNameDNSMatched) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  std::vector<std::string> verify_subject_alt_name_list = {\"server1.example.com\",\n                                                           \"server2.example.com\"};\n  EXPECT_TRUE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list));\n}\n\nTEST_F(SslContextImplTest, TestMatchSubjectAltNameDNSMatched) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_hidden_envoy_deprecated_regex(\".*.example.com\");\n  std::vector<Matchers::StringMatcherImpl> subject_alt_name_matchers;\n  subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));\n  EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers));\n}\n\nTEST_F(SslContextImplTest, TestMatchSubjectAltNameWildcardDNSMatched) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\"));\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"api.example.com\");\n  std::vector<Matchers::StringMatcherImpl> subject_alt_name_matchers;\n  subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));\n  EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers));\n}\n\nTEST_F(SslContextImplTest, TestMultiLevelMatch) {\n  // san_multiple_dns_cert matches *.example.com\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\"));\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"foo.api.example.com\");\n  std::vector<Matchers::StringMatcherImpl> subject_alt_name_matchers;\n  subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));\n  EXPECT_FALSE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers));\n}\n\nTEST_F(SslContextImplTest, TestMultiLevelMatchLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fix_wildcard_matching\", \"false\"}});\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\"));\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_exact(\"foo.api.example.com\");\n  std::vector<Matchers::StringMatcherImpl> subject_alt_name_matchers;\n  subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));\n  EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers));\n}\n\nTEST_F(SslContextImplTest, TestVerifySubjectAltNameURIMatched) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  std::vector<std::string> verify_subject_alt_name_list = {\"spiffe://lyft.com/fake-team\",\n                                                           \"spiffe://lyft.com/test-team\"};\n  EXPECT_TRUE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list));\n}\n\nTEST_F(SslContextImplTest, TestVerifySubjectAltMultiDomain) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\"));\n  std::vector<std::string> verify_subject_alt_name_list = {\"https://a.www.example.com\"};\n  EXPECT_FALSE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list));\n}\n\nTEST_F(SslContextImplTest, TestVerifySubjectAltMultiDomainLegacy) {\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.fix_wildcard_matching\", \"false\"}});\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\"));\n  std::vector<std::string> verify_subject_alt_name_list = {\"https://a.www.example.com\"};\n  EXPECT_TRUE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list));\n}\n\nTEST_F(SslContextImplTest, TestMatchSubjectAltNameURIMatched) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_hidden_envoy_deprecated_regex(\"spiffe://lyft.com/.*-team\");\n  std::vector<Matchers::StringMatcherImpl> subject_alt_name_matchers;\n  subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));\n  EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers));\n}\n\nTEST_F(SslContextImplTest, TestVerifySubjectAltNameNotMatched) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  std::vector<std::string> verify_subject_alt_name_list = {\"foo\", \"bar\"};\n  EXPECT_FALSE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list));\n}\n\nTEST_F(SslContextImplTest, TestMatchSubjectAltNameNotMatched) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  envoy::type::matcher::v3::StringMatcher matcher;\n  matcher.set_hidden_envoy_deprecated_regex(\".*.foo.com\");\n  std::vector<Matchers::StringMatcherImpl> subject_alt_name_matchers;\n  subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher));\n  EXPECT_FALSE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers));\n}\n\nTEST_F(SslContextImplTest, TestCipherSuites) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites: \"-ALL:+[AES128-SHA|BOGUS1-SHA256]:BOGUS2-SHA:AES256-SHA\"\n  )EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n  ClientContextConfigImpl cfg(tls_context, factory_context_);\n  EXPECT_THROW_WITH_MESSAGE(\n      manager_.createSslClientContext(store_, cfg), EnvoyException,\n      \"Failed to initialize cipher suites \"\n      \"-ALL:+[AES128-SHA|BOGUS1-SHA256]:BOGUS2-SHA:AES256-SHA. The following \"\n      \"ciphers were rejected when tried individually: BOGUS1-SHA256, BOGUS2-SHA\");\n}\n\nTEST_F(SslContextImplTest, TestExpiringCert) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n )EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n\n  ClientContextConfigImpl cfg(tls_context, factory_context_);\n  Envoy::Ssl::ClientContextSharedPtr context(manager_.createSslClientContext(store_, cfg));\n\n  // This is a total hack, but right now we generate the cert and it expires in 15 days only in the\n  // first second that it's valid. This can become invalid and then cause slower tests to fail.\n  // Optimally we would make the cert valid for 15 days and 23 hours, but that is not easy to do\n  // with the command line so we have this for now. Good enough.\n  EXPECT_TRUE(15 == context->daysUntilFirstCertExpires() ||\n              14 == context->daysUntilFirstCertExpires());\n}\n\nTEST_F(SslContextImplTest, TestExpiredCert) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/expired_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/expired_key.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n  ClientContextConfigImpl cfg(tls_context, factory_context_);\n  Envoy::Ssl::ClientContextSharedPtr context(manager_.createSslClientContext(store_, cfg));\n  EXPECT_EQ(0U, context->daysUntilFirstCertExpires());\n}\n\nTEST_F(SslContextImplTest, TestGetCertInformation) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n  ClientContextConfigImpl cfg(tls_context, factory_context_);\n\n  Envoy::Ssl::ClientContextSharedPtr context(manager_.createSslClientContext(store_, cfg));\n  // This is similar to the hack above, but right now we generate the ca_cert and it expires in 15\n  // days only in the first second that it's valid. We will partially match for up until Days until\n  // Expiration: 1.\n  // For the cert_chain, it is dynamically created when we run_envoy_test.sh which changes the\n  // serial number with\n  // every build. For cert_chain output, we check only for the certificate path.\n  std::string ca_cert_json = absl::StrCat(R\"EOF({\n \"path\": \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\",\n \"serial_number\": \")EOF\",\n                                          TEST_NO_SAN_CERT_SERIAL, R\"EOF(\",\n \"subject_alt_names\": [],\n }\n)EOF\");\n\n  std::string cert_chain_json = R\"EOF({\n \"path\": \"{{ test_tmpdir }}/unittestcert.pem\",\n }\n)EOF\";\n\n  std::string ca_cert_partial_output(TestEnvironment::substitute(ca_cert_json));\n  std::string cert_chain_partial_output(TestEnvironment::substitute(cert_chain_json));\n  envoy::admin::v3::CertificateDetails certificate_details, cert_chain_details;\n  TestUtility::loadFromJson(ca_cert_partial_output, certificate_details);\n  TestUtility::loadFromJson(cert_chain_partial_output, cert_chain_details);\n\n  MessageDifferencer message_differencer;\n  message_differencer.set_scope(MessageDifferencer::Scope::PARTIAL);\n  EXPECT_TRUE(message_differencer.Compare(certificate_details, *context->getCaCertInformation()));\n  EXPECT_TRUE(\n      message_differencer.Compare(cert_chain_details, *context->getCertChainInformation()[0]));\n}\n\nTEST_F(SslContextImplTest, TestGetCertInformationWithSAN) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n  ClientContextConfigImpl cfg(tls_context, factory_context_);\n\n  Envoy::Ssl::ClientContextSharedPtr context(manager_.createSslClientContext(store_, cfg));\n  std::string ca_cert_json = absl::StrCat(R\"EOF({\n \"path\": \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem\",\n \"serial_number\": \")EOF\",\n                                          TEST_SAN_DNS3_CERT_SERIAL, R\"EOF(\",\n \"subject_alt_names\": [\n  {\n   \"dns\": \"server1.example.com\"\n  }\n ]\n }\n)EOF\");\n\n  std::string cert_chain_json = R\"EOF({\n \"path\": \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\",\n }\n)EOF\";\n\n  // This is similar to the hack above, but right now we generate the ca_cert and it expires in 15\n  // days only in the first second that it's valid. We will partially match for up until Days until\n  // Expiration: 1.\n  // For the cert_chain, it is dynamically created when we run_envoy_test.sh which changes the\n  // serial number with\n  // every build. For cert_chain output, we check only for the certificate path.\n  std::string ca_cert_partial_output(TestEnvironment::substitute(ca_cert_json));\n  std::string cert_chain_partial_output(TestEnvironment::substitute(cert_chain_json));\n  envoy::admin::v3::CertificateDetails certificate_details, cert_chain_details;\n  TestUtility::loadFromJson(ca_cert_partial_output, certificate_details);\n  TestUtility::loadFromJson(cert_chain_partial_output, cert_chain_details);\n\n  MessageDifferencer message_differencer;\n  message_differencer.set_scope(MessageDifferencer::Scope::PARTIAL);\n  EXPECT_TRUE(message_differencer.Compare(certificate_details, *context->getCaCertInformation()));\n  EXPECT_TRUE(\n      message_differencer.Compare(cert_chain_details, *context->getCertChainInformation()[0]));\n}\n\nTEST_F(SslContextImplTest, TestGetCertInformationWithIPSAN) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_ip_chain.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_ip_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_ip_cert.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n  ClientContextConfigImpl cfg(tls_context, factory_context_);\n\n  Envoy::Ssl::ClientContextSharedPtr context(manager_.createSslClientContext(store_, cfg));\n  std::string ca_cert_json = absl::StrCat(R\"EOF({\n \"path\": \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_ip_cert.pem\",\n \"serial_number\": \")EOF\",\n                                          TEST_SAN_IP_CERT_SERIAL, R\"EOF(\",\n \"subject_alt_names\": [\n  {\n   \"ip_address\": \"1.1.1.1\"\n  }\n ]\n }\n)EOF\");\n\n  std::string cert_chain_json = R\"EOF({\n \"path\": \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_ip_chain.pem\",\n }\n)EOF\";\n\n  // This is similar to the hack above, but right now we generate the ca_cert and it expires in 15\n  // days only in the first second that it's valid. We will partially match for up until Days until\n  // Expiration: 1.\n  // For the cert_chain, it is dynamically created when we run_envoy_test.sh which changes the\n  // serial number with\n  // every build. For cert_chain output, we check only for the certificate path.\n  std::string ca_cert_partial_output(TestEnvironment::substitute(ca_cert_json));\n  std::string cert_chain_partial_output(TestEnvironment::substitute(cert_chain_json));\n  envoy::admin::v3::CertificateDetails certificate_details, cert_chain_details;\n  TestUtility::loadFromJson(ca_cert_partial_output, certificate_details);\n  TestUtility::loadFromJson(cert_chain_partial_output, cert_chain_details);\n\n  MessageDifferencer message_differencer;\n  message_differencer.set_scope(MessageDifferencer::Scope::PARTIAL);\n  EXPECT_TRUE(message_differencer.Compare(certificate_details, *context->getCaCertInformation()));\n  EXPECT_TRUE(\n      message_differencer.Compare(cert_chain_details, *context->getCertChainInformation()[0]));\n}\n\nstd::string convertTimeCertInfoToCertDetails(std::string cert_info_time) {\n  return TestUtility::convertTime(cert_info_time, \"%b %e %H:%M:%S %Y GMT\", \"%Y-%m-%dT%H:%M:%SZ\");\n}\n\nTEST_F(SslContextImplTest, TestGetCertInformationWithExpiration) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n  ClientContextConfigImpl cfg(tls_context, factory_context_);\n\n  Envoy::Ssl::ClientContextSharedPtr context(manager_.createSslClientContext(store_, cfg));\n  std::string ca_cert_json =\n      absl::StrCat(R\"EOF({\n \"path\": \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem\",\n \"serial_number\": \")EOF\",\n                   TEST_SAN_DNS3_CERT_SERIAL, R\"EOF(\",\n \"subject_alt_names\": [\n  {\n   \"dns\": \"server1.example.com\"\n  }\n ],\n \"valid_from\": \")EOF\",\n                   convertTimeCertInfoToCertDetails(TEST_SAN_DNS3_CERT_NOT_BEFORE), R\"EOF(\",\n \"expiration_time\": \")EOF\",\n                   convertTimeCertInfoToCertDetails(TEST_SAN_DNS3_CERT_NOT_AFTER), R\"EOF(\"\n }\n)EOF\");\n\n  const std::string ca_cert_partial_output(TestEnvironment::substitute(ca_cert_json));\n  envoy::admin::v3::CertificateDetails certificate_details;\n  TestUtility::loadFromJson(ca_cert_partial_output, certificate_details);\n\n  MessageDifferencer message_differencer;\n  message_differencer.set_scope(MessageDifferencer::Scope::PARTIAL);\n  EXPECT_TRUE(message_differencer.Compare(certificate_details, *context->getCaCertInformation()));\n}\n\nTEST_F(SslContextImplTest, TestNoCert) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext config;\n  ClientContextConfigImpl cfg(config, factory_context_);\n  Envoy::Ssl::ClientContextSharedPtr context(manager_.createSslClientContext(store_, cfg));\n  EXPECT_EQ(nullptr, context->getCaCertInformation());\n  EXPECT_TRUE(context->getCertChainInformation().empty());\n}\n\n// Multiple RSA certificates are rejected.\nTEST_F(SslContextImplTest, AtMostOneRsaCert) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned2_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_THROW_WITH_REGEX(manager_.createSslServerContext(store_, server_context_config, {}),\n                          EnvoyException,\n                          \"at most one certificate of a given type may be specified\");\n}\n\n// Multiple ECDSA certificates are rejected.\nTEST_F(SslContextImplTest, AtMostOneEcdsaCert) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned2_ecdsa_p256_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_THROW_WITH_REGEX(manager_.createSslServerContext(store_, server_context_config, {}),\n                          EnvoyException,\n                          \"at most one certificate of a given type may be specified\");\n}\n\n// Certificates with no subject CN and no SANs are rejected.\nTEST_F(SslContextImplTest, MustHaveSubjectOrSAN) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_subject_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_subject_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_THROW_WITH_REGEX(manager_.createSslServerContext(store_, server_context_config, {}),\n                          EnvoyException, \"has neither subject CN nor SAN names\");\n}\n\nclass SslServerContextImplOcspTest : public SslContextImplTest {\npublic:\n  Envoy::Ssl::ServerContextSharedPtr loadConfig(ServerContextConfigImpl& cfg) {\n    return manager_.createSslServerContext(store_, cfg, std::vector<std::string>{});\n  }\n\n  Envoy::Ssl::ServerContextSharedPtr loadConfigYaml(const std::string& yaml) {\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n    ServerContextConfigImpl cfg(tls_context, factory_context_);\n    return loadConfig(cfg);\n  }\n};\n\nTEST_F(SslServerContextImplOcspTest, TestFilenameOcspStapleConfigLoads) {\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n  loadConfigYaml(tls_context_yaml);\n}\n\nTEST_F(SslServerContextImplOcspTest, TestInlineBytesOcspStapleConfigLoads) {\n  auto der_response = TestEnvironment::readFileToStringForTest(\n      TestEnvironment::substitute(\"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\"));\n  auto base64_response = Base64::encode(der_response.c_str(), der_response.length(), true);\n  const std::string tls_context_yaml = fmt::format(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{{{ test_tmpdir }}}}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{{{ test_tmpdir }}}}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n       inline_bytes: \"{}\"\n  ocsp_staple_policy: must_staple\n  )EOF\",\n                                                   base64_response);\n\n  loadConfigYaml(tls_context_yaml);\n}\n\nTEST_F(SslServerContextImplOcspTest, TestInlineStringOcspStapleConfigFails) {\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n       inline_string: \"abcd\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException,\n                            \"OCSP staple cannot be provided via inline_string\");\n}\n\nTEST_F(SslServerContextImplOcspTest, TestMismatchedOcspStapleConfigFails) {\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException,\n                            \"OCSP response does not match its TLS certificate\");\n}\n\nTEST_F(SslServerContextImplOcspTest, TestStaplingRequiredWithoutStapleConfigFails) {\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException,\n                            \"Required OCSP response is missing from TLS context\");\n}\n\nTEST_F(SslServerContextImplOcspTest, TestUnsuccessfulOcspResponseConfigFails) {\n  std::vector<uint8_t> data = {\n      // SEQUENCE\n      0x30, 3,\n      // OcspResponseStatus - InternalError\n      0xau, 1, 2,\n      // no response bytes\n  };\n  std::string der_response(data.begin(), data.end());\n  auto base64_response = Base64::encode(der_response.c_str(), der_response.length(), true);\n  const std::string tls_context_yaml = fmt::format(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{{{ test_tmpdir }}}}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{{{ test_tmpdir }}}}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n       inline_bytes: \"{}\"\n  ocsp_staple_policy: must_staple\n  )EOF\",\n                                                   base64_response);\n\n  EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException,\n                            \"OCSP response was unsuccessful\");\n}\n\nTEST_F(SslServerContextImplOcspTest, TestMustStapleCertWithoutStapleConfigFails) {\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem\"\n  ocsp_staple_policy: lenient_stapling\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException,\n                            \"OCSP response is required for must-staple certificate\");\n}\n\nTEST_F(SslServerContextImplOcspTest, TestMustStapleCertWithoutStapleFeatureFlagOff) {\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem\"\n  ocsp_staple_policy: lenient_stapling\n  )EOF\";\n\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs\", \"false\"}});\n  loadConfigYaml(tls_context_yaml);\n}\n\nTEST_F(SslServerContextImplOcspTest, TestGetCertInformationWithOCSP) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context);\n  auto context = loadConfigYaml(yaml);\n\n  constexpr absl::string_view this_update = \"This Update: \";\n  constexpr absl::string_view next_update = \"Next Update: \";\n\n  auto ocsp_text_details =\n      absl::StrSplit(TestEnvironment::readFileToStringForTest(\n                         TestEnvironment::substitute(\n                             \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp_details.txt\"),\n                         true),\n                     '\\n');\n  std::string valid_from, expiration;\n  for (const auto& detail : ocsp_text_details) {\n    std::string::size_type pos = detail.find(this_update);\n    if (pos != std::string::npos) {\n      valid_from = detail.substr(pos + this_update.size());\n      continue;\n    }\n\n    pos = detail.find(next_update);\n    if (pos != std::string::npos) {\n      expiration = detail.substr(pos + next_update.size());\n      continue;\n    }\n  }\n\n  std::string ocsp_json = absl::StrCat(R\"EOF({\n\"valid_from\": \")EOF\",\n                                       convertTimeCertInfoToCertDetails(valid_from), R\"EOF(\",\n\"expiration\": \")EOF\",\n                                       convertTimeCertInfoToCertDetails(expiration), R\"EOF(\"\n}\n)EOF\");\n\n  envoy::admin::v3::CertificateDetails::OcspDetails ocsp_details;\n  TestUtility::loadFromJson(ocsp_json, ocsp_details);\n\n  MessageDifferencer message_differencer;\n  message_differencer.set_scope(MessageDifferencer::Scope::PARTIAL);\n  EXPECT_TRUE(message_differencer.Compare(ocsp_details,\n                                          context->getCertChainInformation()[0]->ocsp_details()));\n}\n\nclass SslServerContextImplTicketTest : public SslContextImplTest {\npublic:\n  void loadConfig(ServerContextConfigImpl& cfg) {\n    Envoy::Ssl::ServerContextSharedPtr server_ctx(\n        manager_.createSslServerContext(store_, cfg, std::vector<std::string>{}));\n  }\n\n  void loadConfigV2(envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext& cfg) {\n    // Must add a certificate for the config to be considered valid.\n    envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n        cfg.mutable_common_tls_context()->add_tls_certificates();\n    server_cert->mutable_certificate_chain()->set_filename(\n        TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestcert.pem\"));\n    server_cert->mutable_private_key()->set_filename(\n        TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestkey.pem\"));\n\n    ServerContextConfigImpl server_context_config(cfg, factory_context_);\n    loadConfig(server_context_config);\n  }\n\n  void loadConfigYaml(const std::string& yaml, bool avoid_boosting = true) {\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context, false,\n                              avoid_boosting);\n    ServerContextConfigImpl cfg(tls_context, factory_context_);\n    loadConfig(cfg);\n  }\n};\n\nTEST_F(SslServerContextImplTicketTest, TicketKeySuccess) {\n  // Both keys are valid; no error should be thrown\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_b\"\n)EOF\";\n  EXPECT_NO_THROW(loadConfigYaml(yaml));\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInvalidLen) {\n  // First key is valid, second key isn't. Should throw if any keys are invalid.\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_wrong_len\"\n)EOF\";\n  EXPECT_THROW(loadConfigYaml(yaml), EnvoyException);\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInvalidCannotRead) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/this_file_does_not_exist\"\n)EOF\";\n  EXPECT_THROW(loadConfigYaml(yaml), std::exception);\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyNone) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext cfg;\n  EXPECT_NO_THROW(loadConfigV2(cfg));\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInlineBytesSuccess) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext cfg;\n  cfg.mutable_session_ticket_keys()->add_keys()->set_inline_bytes(std::string(80, '\\0'));\n  EXPECT_NO_THROW(loadConfigV2(cfg));\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInlineStringSuccess) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext cfg;\n  cfg.mutable_session_ticket_keys()->add_keys()->set_inline_string(std::string(80, '\\0'));\n  EXPECT_NO_THROW(loadConfigV2(cfg));\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInlineBytesFailTooBig) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext cfg;\n  cfg.mutable_session_ticket_keys()->add_keys()->set_inline_bytes(std::string(81, '\\0'));\n  EXPECT_THROW(loadConfigV2(cfg), EnvoyException);\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInlineStringFailTooBig) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext cfg;\n  cfg.mutable_session_ticket_keys()->add_keys()->set_inline_string(std::string(81, '\\0'));\n  EXPECT_THROW(loadConfigV2(cfg), EnvoyException);\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInlineBytesFailTooSmall) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext cfg;\n  cfg.mutable_session_ticket_keys()->add_keys()->set_inline_bytes(std::string(79, '\\0'));\n  EXPECT_THROW(loadConfigV2(cfg), EnvoyException);\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeyInlineStringFailTooSmall) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext cfg;\n  cfg.mutable_session_ticket_keys()->add_keys()->set_inline_string(std::string(79, '\\0'));\n  EXPECT_THROW(loadConfigV2(cfg), EnvoyException);\n}\n\nTEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      tls_context.mutable_common_tls_context()->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"));\n\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  NiceMock<Random::MockRandomGenerator> random;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Upstream::MockClusterManager> cluster_manager;\n  NiceMock<Init::MockManager> init_manager;\n  EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  // EXPECT_CALL(factory_context_, random()).WillOnce(ReturnRef(random));\n  EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats));\n  EXPECT_CALL(factory_context_, clusterManager()).WillOnce(ReturnRef(cluster_manager));\n  EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  auto* sds_secret_configs = tls_context.mutable_session_ticket_keys_sds_secret_config();\n  sds_secret_configs->set_name(\"abc.com\");\n  sds_secret_configs->mutable_sds_config();\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  // When sds secret is not downloaded, config is not ready.\n  EXPECT_FALSE(server_context_config.isReady());\n  // Set various callbacks to config.\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  server_context_config.setSecretUpdateCallback(\n      [&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n  server_context_config.setSecretUpdateCallback([]() {});\n}\n\n// Validate that client context config with static TLS ticket encryption keys is created\n// successfully.\nTEST_F(SslServerContextImplTicketTest, StaticTickeyKey) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n\n  const std::string yaml = R\"EOF(\nname: \"abc.com\"\nsession_ticket_keys:\n  keys:\n    - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n    - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_b\"\n)EOF\";\n\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n  factory_context_.secretManager().addStaticSecret(secret_config);\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      tls_context.mutable_common_tls_context()->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"));\n\n  tls_context.mutable_session_ticket_keys_sds_secret_config()->set_name(\"abc.com\");\n\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n\n  EXPECT_TRUE(server_context_config.isReady());\n  ASSERT_EQ(server_context_config.sessionTicketKeys().size(), 2);\n}\n\nTEST_F(SslServerContextImplTicketTest, CRLSuccess) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  EXPECT_NO_THROW(loadConfigYaml(yaml));\n}\n\nTEST_F(SslServerContextImplTicketTest, CRLInvalid) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/not_a_crl.crl\"\n)EOF\";\n  EXPECT_THROW_WITH_REGEX(loadConfigYaml(yaml), EnvoyException,\n                          \"^Failed to load CRL from .*/not_a_crl.crl$\");\n}\n\nTEST_F(SslServerContextImplTicketTest, CRLWithNoCA) {\n  const std::string yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n    validation_context:\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/not_a_crl.crl\"\n)EOF\";\n  EXPECT_THROW_WITH_REGEX(loadConfigYaml(yaml), EnvoyException,\n                          \"^Failed to load CRL from .* without trusted CA$\");\n}\n\nTEST_F(SslServerContextImplTicketTest, VerifySanWithNoCA) {\n  const std::string yaml = R\"EOF(\n       common_tls_context:\n          tls_certificates:\n            certificate_chain:\n              filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n            private_key:\n              filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n          validation_context:\n            match_subject_alt_names:\n              exact : \"spiffe://lyft.com/testclient\"\n)EOF\";\n  EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(yaml), EnvoyException,\n                            \"SAN-based verification of peer certificates without trusted CA \"\n                            \"is insecure and not allowed\");\n}\n\nTEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionEnabledByDefault) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_FALSE(server_context_config.disableStatelessSessionResumption());\n}\n\nTEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionExplicitlyEnabled) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  disable_stateless_session_resumption: false\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_FALSE(server_context_config.disableStatelessSessionResumption());\n}\n\nTEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionDisabled) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  disable_stateless_session_resumption: true\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_TRUE(server_context_config.disableStatelessSessionResumption());\n}\n\nTEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionEnabledWhenKeyIsConfigured) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_FALSE(server_context_config.disableStatelessSessionResumption());\n}\n\nclass ClientContextConfigImplTest : public SslCertsTest {};\n\n// Validate that empty SNI (according to C string rules) fails config validation.\nTEST_F(ClientContextConfigImplTest, EmptyServerNameIndication) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;\n\n  tls_context.set_sni(std::string(\"\\000\", 1));\n  EXPECT_THROW_WITH_MESSAGE(\n      ClientContextConfigImpl client_context_config(tls_context, factory_context), EnvoyException,\n      \"SNI names containing NULL-byte are not allowed\");\n  tls_context.set_sni(std::string(\"a\\000b\", 3));\n  EXPECT_THROW_WITH_MESSAGE(\n      ClientContextConfigImpl client_context_config(tls_context, factory_context), EnvoyException,\n      \"SNI names containing NULL-byte are not allowed\");\n}\n\n// Validate that values other than a hex-encoded SHA-256 fail config validation.\nTEST_F(ClientContextConfigImplTest, InvalidCertificateHash) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_validation_context()\n      // This is valid hex-encoded string, but it doesn't represent SHA-256 (80 vs 64 chars).\n      ->add_verify_certificate_hash(\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n                                    \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\");\n  ClientContextConfigImpl client_context_config(tls_context, factory_context);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  EXPECT_THROW_WITH_REGEX(manager.createSslClientContext(store, client_context_config),\n                          EnvoyException, \"Invalid hex-encoded SHA-256 .*\");\n}\n\n// Validate that values other than a base64-encoded SHA-256 fail config validation.\nTEST_F(ClientContextConfigImplTest, InvalidCertificateSpki) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_validation_context()\n      // Not a base64-encoded string.\n      ->add_verify_certificate_spki(\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\");\n  ClientContextConfigImpl client_context_config(tls_context, factory_context);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  EXPECT_THROW_WITH_REGEX(manager.createSslClientContext(store, client_context_config),\n                          EnvoyException, \"Invalid base64-encoded SHA-256 .*\");\n}\n\n// Validate that 2048-bit RSA certificates load successfully.\nTEST_F(ClientContextConfigImplTest, RSA2048Cert) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  manager.createSslClientContext(store, client_context_config);\n}\n\n// Validate that 1024-bit RSA certificates are rejected.\nTEST_F(ClientContextConfigImplTest, RSA1024Cert) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_1024_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_1024_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n\n  std::string error_msg(\n      \"Failed to load certificate chain from .*selfsigned_rsa_1024_cert.pem, only RSA certificates \"\n#ifdef BORINGSSL_FIPS\n      \"with 2048-bit or 3072-bit keys are supported in FIPS mode\"\n#else\n      \"with 2048-bit or larger keys are supported\"\n#endif\n  );\n  EXPECT_THROW_WITH_REGEX(manager.createSslClientContext(store, client_context_config),\n                          EnvoyException, error_msg);\n}\n\n// Validate that 3072-bit RSA certificates load successfully.\nTEST_F(ClientContextConfigImplTest, RSA3072Cert) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_3072_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_3072_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  manager.createSslClientContext(store, client_context_config);\n}\n\n// Validate that 4096-bit RSA certificates load successfully in non-FIPS builds, but are rejected\n// in FIPS builds.\nTEST_F(ClientContextConfigImplTest, RSA4096Cert) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_4096_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_4096_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n#ifdef BORINGSSL_FIPS\n  EXPECT_THROW_WITH_REGEX(\n      manager.createSslClientContext(store, client_context_config), EnvoyException,\n      \"Failed to load certificate chain from .*selfsigned_rsa_4096_cert.pem, only RSA certificates \"\n      \"with 2048-bit or 3072-bit keys are supported in FIPS mode\");\n#else\n  manager.createSslClientContext(store, client_context_config);\n#endif\n}\n\n// Validate that P256 ECDSA certs load.\nTEST_F(ClientContextConfigImplTest, P256EcdsaCert) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  manager.createSslClientContext(store, client_context_config);\n}\n\n// Validate that non-P256 ECDSA certs are rejected.\nTEST_F(ClientContextConfigImplTest, NonP256EcdsaCert) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p384_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p384_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  EXPECT_THROW_WITH_REGEX(manager.createSslClientContext(store, client_context_config),\n                          EnvoyException,\n                          \"Failed to load certificate chain from .*selfsigned_ecdsa_p384_cert.pem, \"\n                          \"only P-256 ECDSA certificates are supported\");\n}\n\n// Multiple TLS certificates are not yet supported.\n// TODO(PiotrSikora): Support multiple TLS certificates.\nTEST_F(ClientContextConfigImplTest, MultipleTlsCertificates) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  EXPECT_THROW_WITH_MESSAGE(\n      ClientContextConfigImpl client_context_config(tls_context, factory_context_), EnvoyException,\n      \"Multiple TLS certificates are not supported for client contexts\");\n}\n\n// Validate context config does not support handling both static TLS certificate and dynamic TLS\n// certificate.\nTEST_F(ClientContextConfigImplTest, TlsCertificatesAndSdsConfig) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  tls_context.mutable_common_tls_context()->add_tls_certificate_sds_secret_configs();\n  EXPECT_THROW_WITH_MESSAGE(\n      ClientContextConfigImpl client_context_config(tls_context, factory_context_), EnvoyException,\n      \"Multiple TLS certificates are not supported for client contexts\");\n}\n\n// Validate context config supports SDS, and is marked as not ready if secrets are not yet\n// downloaded.\nTEST_F(ClientContextConfigImplTest, SecretNotReady) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats));\n  EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  auto sds_secret_configs =\n      tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add();\n  sds_secret_configs->set_name(\"abc.com\");\n  sds_secret_configs->mutable_sds_config();\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  // When sds secret is not downloaded, config is not ready.\n  EXPECT_FALSE(client_context_config.isReady());\n  // Set various callbacks to config.\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  client_context_config.setSecretUpdateCallback(\n      [&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n  client_context_config.setSecretUpdateCallback([]() {});\n}\n\n// Validate client context config supports SDS, and is marked as not ready if dynamic\n// certificate validation context is not yet downloaded.\nTEST_F(ClientContextConfigImplTest, ValidationContextNotReady) {\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      tls_context.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats));\n  EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  auto sds_secret_configs =\n      tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config();\n  sds_secret_configs->set_name(\"abc.com\");\n  sds_secret_configs->mutable_sds_config();\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n  // When sds secret is not downloaded, config is not ready.\n  EXPECT_FALSE(client_context_config.isReady());\n  // Set various callbacks to config.\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  client_context_config.setSecretUpdateCallback(\n      [&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n  client_context_config.setSecretUpdateCallback([]() {});\n}\n\n// Validate that client context config with static TLS certificates is created successfully.\nTEST_F(ClientContextConfigImplTest, StaticTlsCertificates) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n\n  const std::string yaml = R\"EOF(\nname: \"abc.com\"\ntls_certificate:\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n)EOF\";\n\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_tls_certificate_sds_secret_configs()\n      ->Add()\n      ->set_name(\"abc.com\");\n\n  factory_context_.secretManager().addStaticSecret(secret_config);\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n\n  const std::string cert_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            client_context_config.tlsCertificates()[0].get().certificateChain());\n  const std::string key_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(key_pem)),\n            client_context_config.tlsCertificates()[0].get().privateKey());\n}\n\n// Validate that client context config with password-protected TLS certificates is created\n// successfully.\nTEST_F(ClientContextConfigImplTest, PasswordProtectedTlsCertificates) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n  secret_config.set_name(\"abc.com\");\n\n  auto* tls_certificate = secret_config.mutable_tls_certificate();\n  tls_certificate->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_cert.pem\"));\n  tls_certificate->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_key.pem\"));\n  tls_certificate->mutable_password()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_password.txt\"));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_tls_certificate_sds_secret_configs()\n      ->Add()\n      ->set_name(\"abc.com\");\n\n  factory_context_.secretManager().addStaticSecret(secret_config);\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n\n  const std::string cert_pem =\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            client_context_config.tlsCertificates()[0].get().certificateChain());\n  const std::string key_pem =\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_key.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(key_pem)),\n            client_context_config.tlsCertificates()[0].get().privateKey());\n  const std::string password_file =\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_password.txt\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(password_file)),\n            client_context_config.tlsCertificates()[0].get().password());\n}\n\n// Validate that not supplying a passphrase for password-protected TLS certificates\n// triggers a failure.\nTEST_F(ClientContextConfigImplTest, PasswordNotSuppliedTlsCertificates) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n  secret_config.set_name(\"abc.com\");\n\n  auto* tls_certificate = secret_config.mutable_tls_certificate();\n  tls_certificate->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_cert.pem\"));\n  const std::string private_key_path = TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_key.pem\");\n  tls_certificate->mutable_private_key()->set_filename(private_key_path);\n  // Don't supply the password.\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_tls_certificate_sds_secret_configs()\n      ->Add()\n      ->set_name(\"abc.com\");\n\n  factory_context_.secretManager().addStaticSecret(secret_config);\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  EXPECT_THROW_WITH_REGEX(manager.createSslClientContext(store, client_context_config),\n                          EnvoyException,\n                          absl::StrCat(\"Failed to load private key from \", private_key_path));\n}\n\n// Validate that client context config with static certificate validation context is created\n// successfully.\nTEST_F(ClientContextConfigImplTest, StaticCertificateValidationContext) {\n  envoy::extensions::transport_sockets::tls::v3::Secret tls_certificate_secret_config;\n  const std::string tls_certificate_yaml = R\"EOF(\n  name: \"abc.com\"\n  tls_certificate:\n    certificate_chain:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n    private_key:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            tls_certificate_secret_config);\n  factory_context_.secretManager().addStaticSecret(tls_certificate_secret_config);\n  envoy::extensions::transport_sockets::tls::v3::Secret\n      certificate_validation_context_secret_config;\n  const std::string certificate_validation_context_yaml = R\"EOF(\n    name: \"def.com\"\n    validation_context:\n      trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n      allow_expired_certificate: true\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(certificate_validation_context_yaml),\n                            certificate_validation_context_secret_config);\n  factory_context_.secretManager().addStaticSecret(certificate_validation_context_secret_config);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_tls_certificate_sds_secret_configs()\n      ->Add()\n      ->set_name(\"abc.com\");\n  tls_context.mutable_common_tls_context()\n      ->mutable_validation_context_sds_secret_config()\n      ->set_name(\"def.com\");\n  ClientContextConfigImpl client_context_config(tls_context, factory_context_);\n\n  const std::string cert_pem =\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\";\n  EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)),\n            client_context_config.certificateValidationContext()->caCert());\n}\n\n// Validate that constructor of client context config throws an exception when static TLS\n// certificate is missing.\nTEST_F(ClientContextConfigImplTest, MissingStaticSecretTlsCertificates) {\n  envoy::extensions::transport_sockets::tls::v3::Secret secret_config;\n\n  const std::string yaml = R\"EOF(\nname: \"abc.com\"\ntls_certificate:\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n)EOF\";\n\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config);\n\n  factory_context_.secretManager().addStaticSecret(secret_config);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_tls_certificate_sds_secret_configs()\n      ->Add()\n      ->set_name(\"missing\");\n\n  EXPECT_THROW_WITH_MESSAGE(\n      ClientContextConfigImpl client_context_config(tls_context, factory_context_), EnvoyException,\n      \"Unknown static secret: missing\");\n}\n\n// Validate that constructor of client context config throws an exception when static certificate\n// validation context is missing.\nTEST_F(ClientContextConfigImplTest, MissingStaticCertificateValidationContext) {\n  envoy::extensions::transport_sockets::tls::v3::Secret tls_certificate_secret_config;\n  const std::string tls_certificate_yaml = R\"EOF(\n    name: \"abc.com\"\n    tls_certificate:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n    )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            tls_certificate_secret_config);\n  factory_context_.secretManager().addStaticSecret(tls_certificate_secret_config);\n  envoy::extensions::transport_sockets::tls::v3::Secret\n      certificate_validation_context_secret_config;\n  const std::string certificate_validation_context_yaml = R\"EOF(\n      name: \"def.com\"\n      validation_context:\n        trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n        allow_expired_certificate: true\n    )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(certificate_validation_context_yaml),\n                            certificate_validation_context_secret_config);\n  factory_context_.secretManager().addStaticSecret(certificate_validation_context_secret_config);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()\n      ->mutable_tls_certificate_sds_secret_configs()\n      ->Add()\n      ->set_name(\"abc.com\");\n  tls_context.mutable_common_tls_context()\n      ->mutable_validation_context_sds_secret_config()\n      ->set_name(\"missing\");\n  EXPECT_THROW_WITH_MESSAGE(\n      ClientContextConfigImpl client_context_config(tls_context, factory_context_), EnvoyException,\n      \"Unknown static certificate validation context: missing\");\n}\n\nclass ServerContextConfigImplTest : public SslCertsTest {};\n\n// Multiple TLS certificates are supported.\nTEST_F(ServerContextConfigImplTest, MultipleTlsCertificates) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  EXPECT_THROW_WITH_MESSAGE(\n      ServerContextConfigImpl client_context_config(tls_context, factory_context_), EnvoyException,\n      \"No TLS certificates found for server context\");\n  const std::string rsa_tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n  )EOF\";\n  const std::string ecdsa_tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(rsa_tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  TestUtility::loadFromYaml(TestEnvironment::substitute(ecdsa_tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  auto tls_certs = server_context_config.tlsCertificates();\n  ASSERT_EQ(2, tls_certs.size());\n  EXPECT_THAT(tls_certs[0].get().privateKeyPath(), EndsWith(\"selfsigned_key.pem\"));\n  EXPECT_THAT(tls_certs[1].get().privateKeyPath(), EndsWith(\"selfsigned_ecdsa_p256_key.pem\"));\n}\n\nTEST_F(ServerContextConfigImplTest, TlsCertificatesAndSdsConfig) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  EXPECT_THROW_WITH_MESSAGE(\n      ServerContextConfigImpl server_context_config(tls_context, factory_context_), EnvoyException,\n      \"No TLS certificates found for server context\");\n  const std::string tls_certificate_yaml = R\"EOF(\n  certificate_chain:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n  private_key:\n    filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_certificate_yaml),\n                            *tls_context.mutable_common_tls_context()->add_tls_certificates());\n  tls_context.mutable_common_tls_context()->add_tls_certificate_sds_secret_configs();\n  EXPECT_THROW_WITH_MESSAGE(\n      ServerContextConfigImpl server_context_config(tls_context, factory_context_), EnvoyException,\n      \"SDS and non-SDS TLS certificates may not be mixed in server contexts\");\n}\n\nTEST_F(ServerContextConfigImplTest, MultiSdsConfig) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()->add_tls_certificate_sds_secret_configs();\n  tls_context.mutable_common_tls_context()->add_tls_certificate_sds_secret_configs();\n  EXPECT_THROW_WITH_REGEX(\n      TestUtility::validate<envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext>(\n          tls_context),\n      EnvoyException, \"Proto constraint validation failed\");\n}\n\nTEST_F(ServerContextConfigImplTest, SecretNotReady) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats));\n  EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  auto sds_secret_configs =\n      tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add();\n  sds_secret_configs->set_name(\"abc.com\");\n  sds_secret_configs->mutable_sds_config();\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  // When sds secret is not downloaded, config is not ready.\n  EXPECT_FALSE(server_context_config.isReady());\n  // Set various callbacks to config.\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  server_context_config.setSecretUpdateCallback(\n      [&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n  server_context_config.setSecretUpdateCallback([]() {});\n}\n\n// Validate server context config supports SDS, and is marked as not ready if dynamic\n// certificate validation context is not yet downloaded.\nTEST_F(ServerContextConfigImplTest, ValidationContextNotReady) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      tls_context.mutable_common_tls_context()->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  Stats::IsolatedStoreImpl stats;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats));\n  EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  auto sds_secret_configs =\n      tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config();\n  sds_secret_configs->set_name(\"abc.com\");\n  sds_secret_configs->mutable_sds_config();\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  // When sds secret is not downloaded, config is not ready.\n  EXPECT_FALSE(server_context_config.isReady());\n  // Set various callbacks to config.\n  NiceMock<Secret::MockSecretCallbacks> secret_callback;\n  server_context_config.setSecretUpdateCallback(\n      [&secret_callback]() { secret_callback.onAddOrUpdateSecret(); });\n  server_context_config.setSecretUpdateCallback([]() {});\n}\n\n// TlsCertificate messages must have a cert for servers.\nTEST_F(ServerContextConfigImplTest, TlsCertificateNonEmpty) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()->add_tls_certificates();\n  ServerContextConfigImpl client_context_config(tls_context, factory_context_);\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  Stats::IsolatedStoreImpl store;\n  EXPECT_THROW_WITH_MESSAGE(\n      Envoy::Ssl::ServerContextSharedPtr server_ctx(\n          manager.createSslServerContext(store, client_context_config, std::vector<std::string>{})),\n      EnvoyException, \"Server TlsCertificates must have a certificate specified\");\n}\n\n// Cannot ignore certificate expiration without a trusted CA.\nTEST_F(ServerContextConfigImplTest, InvalidIgnoreCertsNoCA) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx =\n          tls_context.mutable_common_tls_context()->mutable_validation_context();\n\n  server_validation_ctx->set_allow_expired_certificate(true);\n\n  EXPECT_THROW_WITH_MESSAGE(\n      ServerContextConfigImpl server_context_config(tls_context, factory_context_), EnvoyException,\n      \"Certificate validity period is always ignored without trusted CA\");\n\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      tls_context.mutable_common_tls_context()->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(\n      TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestcert.pem\"));\n  server_cert->mutable_private_key()->set_filename(\n      TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestkey.pem\"));\n\n  server_validation_ctx->set_allow_expired_certificate(false);\n\n  EXPECT_NO_THROW(ServerContextConfigImpl server_context_config(tls_context, factory_context_));\n\n  server_validation_ctx->set_allow_expired_certificate(true);\n\n  EXPECT_THROW_WITH_MESSAGE(\n      ServerContextConfigImpl server_context_config(tls_context, factory_context_), EnvoyException,\n      \"Certificate validity period is always ignored without trusted CA\");\n\n  // But once you add a trusted CA, you should be able to create the context.\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n\n  EXPECT_NO_THROW(ServerContextConfigImpl server_context_config(tls_context, factory_context_));\n}\n\nTEST_F(ServerContextConfigImplTest, PrivateKeyMethodLoadFailureNoProvider) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  NiceMock<Ssl::MockContextManager> context_manager;\n  NiceMock<Ssl::MockPrivateKeyMethodManager> private_key_method_manager;\n  EXPECT_CALL(factory_context_, sslContextManager()).WillOnce(ReturnRef(context_manager));\n  EXPECT_CALL(context_manager, privateKeyMethodManager())\n      .WillOnce(ReturnRef(private_key_method_manager));\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key_provider:\n        provider_name: mock_provider\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            test_value: 100\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n  EXPECT_THROW_WITH_REGEX(\n      ServerContextConfigImpl server_context_config(tls_context, factory_context_), EnvoyException,\n      \"Failed to load incomplete certificate from \");\n}\n\nTEST_F(ServerContextConfigImplTest, PrivateKeyMethodLoadFailureNoMethod) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  tls_context.mutable_common_tls_context()->add_tls_certificates();\n  Stats::IsolatedStoreImpl store;\n  NiceMock<Ssl::MockContextManager> context_manager;\n  NiceMock<Ssl::MockPrivateKeyMethodManager> private_key_method_manager;\n  auto private_key_method_provider_ptr =\n      std::make_shared<NiceMock<Ssl::MockPrivateKeyMethodProvider>>();\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(time_system);\n  EXPECT_CALL(factory_context_, sslContextManager()).WillOnce(ReturnRef(context_manager));\n  EXPECT_CALL(context_manager, privateKeyMethodManager())\n      .WillOnce(ReturnRef(private_key_method_manager));\n  EXPECT_CALL(private_key_method_manager, createPrivateKeyMethodProvider(_, _))\n      .WillOnce(Return(private_key_method_provider_ptr));\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key_provider:\n        provider_name: mock_provider\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            test_value: 100\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n  EXPECT_THROW_WITH_MESSAGE(\n      Envoy::Ssl::ServerContextSharedPtr server_ctx(\n          manager.createSslServerContext(store, server_context_config, std::vector<std::string>{})),\n      EnvoyException, \"Failed to get BoringSSL private key method from provider\");\n}\n\nTEST_F(ServerContextConfigImplTest, PrivateKeyMethodLoadSuccess) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  NiceMock<Ssl::MockContextManager> context_manager;\n  NiceMock<Ssl::MockPrivateKeyMethodManager> private_key_method_manager;\n  auto private_key_method_provider_ptr =\n      std::make_shared<NiceMock<Ssl::MockPrivateKeyMethodProvider>>();\n  EXPECT_CALL(factory_context_, sslContextManager()).WillOnce(ReturnRef(context_manager));\n  EXPECT_CALL(context_manager, privateKeyMethodManager())\n      .WillOnce(ReturnRef(private_key_method_manager));\n  EXPECT_CALL(private_key_method_manager, createPrivateKeyMethodProvider(_, _))\n      .WillOnce(Return(private_key_method_provider_ptr));\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key_provider:\n        provider_name: mock_provider\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            test_value: 100\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n  ServerContextConfigImpl server_context_config(tls_context, factory_context_);\n}\n\nTEST_F(ServerContextConfigImplTest, PrivateKeyMethodLoadFailureBothKeyAndMethod) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  NiceMock<Ssl::MockContextManager> context_manager;\n  NiceMock<Ssl::MockPrivateKeyMethodManager> private_key_method_manager;\n  auto private_key_method_provider_ptr =\n      std::make_shared<NiceMock<Ssl::MockPrivateKeyMethodProvider>>();\n  EXPECT_CALL(factory_context_, sslContextManager()).WillOnce(ReturnRef(context_manager));\n  EXPECT_CALL(context_manager, privateKeyMethodManager())\n      .WillOnce(ReturnRef(private_key_method_manager));\n  EXPECT_CALL(private_key_method_manager, createPrivateKeyMethodProvider(_, _))\n      .WillOnce(Return(private_key_method_provider_ptr));\n  const std::string tls_context_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n      private_key_provider:\n        provider_name: mock_provider\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            test_value: 100\n  )EOF\";\n  TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context);\n  EXPECT_THROW_WITH_MESSAGE(\n      ServerContextConfigImpl server_context_config(tls_context, factory_context_), EnvoyException,\n      \"Certificate configuration can't have both private_key and private_key_provider\");\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/gen_unittest_certs.sh",
    "content": "#!/bin/bash\n#\n# Create a test certificate with a 15-day expiration for SSL tests.\n\nset -e\n\nTEST_CERT_DIR=\"${TEST_TMPDIR}\"\n\nmkdir -p \"${TEST_CERT_DIR}\"\n\nexport OPENSSL_CONF=\"${TEST_CERT_DIR}\"/openssl.cnf\n(cat << EOF\n[ req ]\ndefault_bits            = 2048\ndistinguished_name      = req_distinguished_name\n\n[ req_distinguished_name ]\ncountryName                     = Country Name (2 letter code)\ncountryName_default             = AU\ncountryName_min                 = 2\ncountryName_max                 = 2\n\nstateOrProvinceName             = State or Province Name (full name)\nstateOrProvinceName_default     = Some-State\n\nlocalityName                    = Locality Name (eg, city)\n\n0.organizationName              = Organization Name (eg, company)\n0.organizationName_default      = Internet Widgits Pty Ltd\n\norganizationalUnitName          = Organizational Unit Name (eg, section)\n\ncommonName                      = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max                  = 64\n\nemailAddress                    = Email Address\nemailAddress_max                = 64\nEOF\n) > \"${OPENSSL_CONF}\"\n\nopenssl genrsa -out \"${TEST_CERT_DIR}/unittestkey.pem\" 2048\nopenssl req -new -key \"${TEST_CERT_DIR}/unittestkey.pem\" -out \"${TEST_CERT_DIR}/unittestcert.csr\" \\\n    -sha256 <<EOF\nUS\nCalifornia\nSan Francisco\nLyft\nTest\nUnit Test CA\nunittest@lyft.com\n\n\nEOF\nopenssl x509 -req -days 15 -in \"${TEST_CERT_DIR}/unittestcert.csr\" -sha256 \\\n    -signkey \"${TEST_CERT_DIR}/unittestkey.pem\" -out \"${TEST_CERT_DIR}/unittestcert.pem\"\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/handshaker_test.cc",
    "content": "#include <openssl/ssl3.h>\n\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/ssl/handshaker.h\"\n\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/transport_sockets/tls/ssl_handshaker.h\"\n\n#include \"test/extensions/transport_sockets/tls/ssl_certs_test.h\"\n#include \"test/mocks/network/connection.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"openssl/evp.h\"\n#include \"openssl/hmac.h\"\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace {\n\nusing ::testing::NiceMock;\nusing ::testing::Return;\nusing ::testing::StrictMock;\n\n// A callback shaped like pem_password_cb.\n// See https://www.openssl.org/docs/man1.1.0/man3/pem_password_cb.html.\nint pemPasswordCallback(char* buf, int buf_size, int, void* u) {\n  if (u == nullptr) {\n    return 0;\n  }\n  std::string passphrase = *reinterpret_cast<std::string*>(u);\n  RELEASE_ASSERT(buf_size >= static_cast<int>(passphrase.size()),\n                 \"Passphrase was larger than buffer.\");\n  memcpy(buf, passphrase.data(), passphrase.size());\n  return passphrase.size();\n}\n\nclass MockHandshakeCallbacks : public Ssl::HandshakeCallbacks {\npublic:\n  ~MockHandshakeCallbacks() override = default;\n  MOCK_METHOD(Network::Connection&, connection, (), (const, override));\n  MOCK_METHOD(void, onSuccess, (SSL*), (override));\n  MOCK_METHOD(void, onFailure, (), (override));\n  MOCK_METHOD(Network::TransportSocketCallbacks*, transportSocketCallbacks, (), (override));\n};\n\nclass HandshakerTest : public SslCertsTest {\nprotected:\n  HandshakerTest()\n      : dispatcher_(api_->allocateDispatcher(\"test_thread\")), stream_info_(api_->timeSource()),\n        client_ctx_(SSL_CTX_new(TLS_method())), server_ctx_(SSL_CTX_new(TLS_method())) {}\n\n  void SetUp() override {\n    // Set up key and cert, initialize two SSL objects and a pair of BIOs for\n    // handshaking.\n    auto key = makeKey();\n    auto cert = makeCert();\n    auto chain = std::vector<CRYPTO_BUFFER*>{cert.get()};\n\n    server_ssl_ = bssl::UniquePtr<SSL>(SSL_new(server_ctx_.get()));\n    SSL_set_accept_state(server_ssl_.get());\n    ASSERT_NE(key, nullptr);\n    ASSERT_EQ(1, SSL_set_chain_and_key(server_ssl_.get(), chain.data(), chain.size(), key.get(),\n                                       nullptr));\n\n    client_ssl_ = bssl::UniquePtr<SSL>(SSL_new(client_ctx_.get()));\n    SSL_set_connect_state(client_ssl_.get());\n\n    ASSERT_EQ(1, BIO_new_bio_pair(&client_bio_, kBufferLength, &server_bio_, kBufferLength));\n\n    BIO_up_ref(client_bio_);\n    BIO_up_ref(server_bio_);\n    SSL_set0_rbio(client_ssl_.get(), client_bio_);\n    SSL_set0_wbio(client_ssl_.get(), client_bio_);\n    SSL_set0_rbio(server_ssl_.get(), server_bio_);\n    SSL_set0_wbio(server_ssl_.get(), server_bio_);\n  }\n\n  // Read in key.pem and return a new private key.\n  bssl::UniquePtr<EVP_PKEY> makeKey() {\n    std::string file = TestEnvironment::readFileToStringForTest(\n        TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestkey.pem\"));\n    std::string passphrase = \"\";\n    bssl::UniquePtr<BIO> bio(BIO_new_mem_buf(file.data(), file.size()));\n\n    bssl::UniquePtr<EVP_PKEY> key(EVP_PKEY_new());\n\n    RSA* rsa = PEM_read_bio_RSAPrivateKey(bio.get(), nullptr, &pemPasswordCallback, &passphrase);\n    RELEASE_ASSERT(rsa != nullptr, \"PEM_read_bio_RSAPrivateKey failed.\");\n    RELEASE_ASSERT(1 == EVP_PKEY_assign_RSA(key.get(), rsa), \"EVP_PKEY_assign_RSA failed.\");\n    return key;\n  }\n\n  // Read in cert.pem and return a certificate.\n  bssl::UniquePtr<CRYPTO_BUFFER> makeCert() {\n    std::string file = TestEnvironment::readFileToStringForTest(\n        TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestcert.pem\"));\n    bssl::UniquePtr<BIO> bio(BIO_new_mem_buf(file.data(), file.size()));\n\n    uint8_t* data = nullptr;\n    long len = 0;\n    RELEASE_ASSERT(\n        PEM_bytes_read_bio(&data, &len, nullptr, PEM_STRING_X509, bio.get(), nullptr, nullptr),\n        \"PEM_bytes_read_bio failed\");\n    bssl::UniquePtr<uint8_t> tmp(data); // Prevents memory leak.\n    return bssl::UniquePtr<CRYPTO_BUFFER>(CRYPTO_BUFFER_new(data, len, nullptr));\n  }\n\n  const size_t kBufferLength{100};\n\n  Event::DispatcherPtr dispatcher_;\n  StreamInfo::StreamInfoImpl stream_info_;\n\n  BIO *client_bio_, *server_bio_;\n  bssl::UniquePtr<SSL_CTX> client_ctx_, server_ctx_;\n  bssl::UniquePtr<SSL> client_ssl_, server_ssl_;\n};\n\nTEST_F(HandshakerTest, NormalOperation) {\n  NiceMock<Network::MockConnection> mock_connection;\n  ON_CALL(mock_connection, state).WillByDefault(Return(Network::Connection::State::Closed));\n\n  NiceMock<MockHandshakeCallbacks> handshake_callbacks;\n  EXPECT_CALL(handshake_callbacks, onSuccess).Times(1);\n  ON_CALL(handshake_callbacks, connection()).WillByDefault(ReturnRef(mock_connection));\n\n  SslHandshakerImpl handshaker(std::move(server_ssl_), 0, &handshake_callbacks);\n\n  auto post_io_action = Network::PostIoAction::KeepOpen; // default enum\n\n  // Run the handshakes from the client and server until SslHandshakerImpl decides\n  // we're done and returns PostIoAction::Close.\n  while (post_io_action != Network::PostIoAction::Close) {\n    SSL_do_handshake(client_ssl_.get());\n    post_io_action = handshaker.doHandshake();\n  }\n\n  EXPECT_EQ(post_io_action, Network::PostIoAction::Close);\n}\n\n// We induce some kind of BIO mismatch and force the SSL_do_handshake to\n// return an error code without error handling, i.e. not SSL_ERROR_WANT_READ\n// or _WRITE or _PRIVATE_KEY_OPERATION.\nTEST_F(HandshakerTest, ErrorCbOnAbnormalOperation) {\n  // We make a new BIO, set it as the `rbio`/`wbio` for the client SSL object,\n  // and break the BIO pair connecting the two SSL objects. Now handshaking will\n  // fail, likely with SSL_ERROR_SSL.\n  BIO* bio = BIO_new(BIO_s_socket());\n  SSL_set_bio(client_ssl_.get(), bio, bio);\n\n  StrictMock<MockHandshakeCallbacks> handshake_callbacks;\n  EXPECT_CALL(handshake_callbacks, onFailure).Times(1);\n\n  SslHandshakerImpl handshaker(std::move(server_ssl_), 0, &handshake_callbacks);\n\n  auto post_io_action = Network::PostIoAction::KeepOpen; // default enum\n\n  while (post_io_action != Network::PostIoAction::Close) {\n    SSL_do_handshake(client_ssl_.get());\n    post_io_action = handshaker.doHandshake();\n  }\n\n  // In the error case, SslHandshakerImpl also closes the connection.\n  EXPECT_EQ(post_io_action, Network::PostIoAction::Close);\n}\n\n// Example SslHandshakerImpl demonstrating special-case behavior which necessitates\n// extra SSL_ERROR case handling. Here, we induce an SSL_ERROR_WANT_X509_LOOKUP,\n// check for it in the handshaker, faux-trigger the lookup, and then proceed as\n// normal.\nclass SslHandshakerImplForTest : public SslHandshakerImpl {\npublic:\n  SslHandshakerImplForTest(bssl::UniquePtr<SSL> ssl_ptr,\n                           Ssl::HandshakeCallbacks* handshake_callbacks,\n                           std::function<void()> requested_cert_cb)\n      : SslHandshakerImpl(std::move(ssl_ptr), 0, handshake_callbacks),\n        requested_cert_cb_(requested_cert_cb) {\n    SSL_set_cert_cb(\n        ssl(), [](SSL*, void* arg) -> int { return *static_cast<bool*>(arg) ? 1 : -1; },\n        &cert_cb_ok_);\n  }\n\n  Network::PostIoAction doHandshake() override {\n    RELEASE_ASSERT(state() != Ssl::SocketState::HandshakeComplete &&\n                       state() != Ssl::SocketState::ShutdownSent,\n                   \"Handshaker state was either complete or sent.\");\n\n    int rc = SSL_do_handshake(ssl());\n    if (rc == 1) {\n      setState(Ssl::SocketState::HandshakeComplete);\n      handshakeCallbacks()->onSuccess(ssl());\n      return Network::PostIoAction::Close;\n    } else {\n      switch (SSL_get_error(ssl(), rc)) {\n      case SSL_ERROR_WANT_READ:\n      case SSL_ERROR_WANT_WRITE:\n        return Network::PostIoAction::KeepOpen;\n      case SSL_ERROR_WANT_X509_LOOKUP:\n        // Special case. Once this lookup is requested, we flip the bit and allow\n        // the handshake to proceed.\n        requested_cert_cb_();\n        return Network::PostIoAction::KeepOpen;\n      default:\n        handshakeCallbacks()->onFailure();\n        return Network::PostIoAction::Close;\n      }\n    }\n  }\n\n  void setCertCbOk() { cert_cb_ok_ = true; }\n\nprivate:\n  std::function<void()> requested_cert_cb_;\n  bool cert_cb_ok_{false};\n};\n\nTEST_F(HandshakerTest, NormalOperationWithSslHandshakerImplForTest) {\n  ::testing::MockFunction<void()> requested_cert_cb;\n\n  StrictMock<MockHandshakeCallbacks> handshake_callbacks;\n  EXPECT_CALL(handshake_callbacks, onSuccess).Times(1);\n\n  SslHandshakerImplForTest handshaker(std::move(server_ssl_), &handshake_callbacks,\n                                      requested_cert_cb.AsStdFunction());\n\n  EXPECT_CALL(requested_cert_cb, Call).WillOnce([&]() { handshaker.setCertCbOk(); });\n\n  auto post_io_action = Network::PostIoAction::KeepOpen; // default enum\n\n  while (post_io_action != Network::PostIoAction::Close) {\n    SSL_do_handshake(client_ssl_.get());\n    post_io_action = handshaker.doHandshake();\n  }\n\n  EXPECT_EQ(post_io_action, Network::PostIoAction::Close);\n}\n\n} // namespace\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/integration/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"ssl_integration_test\",\n    srcs = [\n        \"ssl_integration_test.cc\",\n        \"ssl_integration_test.h\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc",
    "content": "#include \"ssl_integration_test.h\"\n\n#include <memory>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/tap/v3/common.pb.h\"\n#include \"envoy/data/tap/v3/wrapper.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/transport_sockets/tap/v3/tap.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/connection_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nvoid SslIntegrationTestBase::initialize() {\n  config_helper_.addSslConfig(ConfigHelper::ServerSslOptions()\n                                  .setRsaCert(server_rsa_cert_)\n                                  .setRsaCertOcspStaple(server_rsa_cert_ocsp_staple_)\n                                  .setEcdsaCert(server_ecdsa_cert_)\n                                  .setEcdsaCertOcspStaple(server_ecdsa_cert_ocsp_staple_)\n                                  .setOcspStapleRequired(ocsp_staple_required_)\n                                  .setTlsV13(server_tlsv1_3_)\n                                  .setExpectClientEcdsaCert(client_ecdsa_cert_));\n  HttpIntegrationTest::initialize();\n\n  context_manager_ =\n      std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem());\n\n  registerTestServerPorts({\"http\"});\n}\n\nvoid SslIntegrationTestBase::TearDown() {\n  HttpIntegrationTest::cleanupUpstreamAndDownstream();\n  codec_client_.reset();\n  context_manager_.reset();\n}\n\nNetwork::ClientConnectionPtr\nSslIntegrationTestBase::makeSslClientConnection(const ClientSslTransportOptions& options) {\n  Network::Address::InstanceConstSharedPtr address = getSslAddress(version_, lookupPort(\"http\"));\n  if (debug_with_s_client_) {\n    const std::string s_client_cmd = TestEnvironment::substitute(\n        \"openssl s_client -connect \" + address->asString() +\n            \" -showcerts -debug -msg -CAfile \"\n            \"{{ test_rundir }}/test/config/integration/certs/cacert.pem \"\n            \"-servername lyft.com -cert \"\n            \"{{ test_rundir }}/test/config/integration/certs/clientcert.pem \"\n            \"-key \"\n            \"{{ test_rundir }}/test/config/integration/certs/clientkey.pem \",\n        version_);\n    ENVOY_LOG_MISC(debug, \"Executing {}\", s_client_cmd);\n    RELEASE_ASSERT(::system(s_client_cmd.c_str()) == 0, \"\");\n  }\n  auto client_transport_socket_factory_ptr =\n      createClientSslTransportSocketFactory(options, *context_manager_, *api_);\n  return dispatcher_->createClientConnection(\n      address, Network::Address::InstanceConstSharedPtr(),\n      client_transport_socket_factory_ptr->createTransportSocket({}), nullptr);\n}\n\nvoid SslIntegrationTestBase::checkStats() {\n  const uint32_t expected_handshakes = debug_with_s_client_ ? 2 : 1;\n  Stats::CounterSharedPtr counter = test_server_->counter(listenerStatPrefix(\"ssl.handshake\"));\n  EXPECT_EQ(expected_handshakes, counter->value());\n  counter->reset();\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SslIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(SslIntegrationTest, RouterRequestAndResponseWithGiantBodyBuffer) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, false, &creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBufferHttp2) {\n  setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n  config_helper_.setClientCodec(envoy::extensions::filters::network::http_connection_manager::v3::\n                                    HttpConnectionManager::AUTO);\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ClientSslTransportOptions().setAlpn(true));\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBufferVerifySAN) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ClientSslTransportOptions().setSan(true));\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBufferHttp2VerifySAN) {\n  setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ClientSslTransportOptions().setAlpn(true).setSan(true));\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterHeaderOnlyRequestAndResponse) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterUpstreamDisconnectBeforeResponseComplete) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterUpstreamDisconnectBeforeResponseComplete(&creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterDownstreamDisconnectBeforeRequestComplete(&creator);\n  checkStats();\n}\n\nTEST_P(SslIntegrationTest, RouterDownstreamDisconnectBeforeResponseComplete) {\n#if defined(__APPLE__) || defined(WIN32)\n  // Skip this test on OS X + Windows: we can't detect the early close on non-Linux, and we\n  // won't clean up the upstream connection until it times out. See #4294.\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    return;\n  }\n#endif\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterDownstreamDisconnectBeforeResponseComplete(&creator);\n  checkStats();\n}\n\n// This test must be here vs integration_admin_test so that it tests a server with loaded certs.\nTEST_P(SslIntegrationTest, AdminCertEndpoint) {\n  initialize();\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"GET\", \"/certs\", \"\", downstreamProtocol(), version_);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// Validate certificate selection across different certificate types and client TLS versions.\nclass SslCertficateIntegrationTest\n    : public testing::TestWithParam<\n          std::tuple<Network::Address::IpVersion,\n                     envoy::extensions::transport_sockets::tls::v3::TlsParameters::TlsProtocol>>,\n      public SslIntegrationTestBase {\npublic:\n  SslCertficateIntegrationTest() : SslIntegrationTestBase(std::get<0>(GetParam())) {\n    server_tlsv1_3_ = true;\n  }\n\n  Network::ClientConnectionPtr\n  makeSslClientConnection(const ClientSslTransportOptions& options) override {\n    ClientSslTransportOptions modified_options{options};\n    modified_options.setTlsVersion(tls_version_);\n    return SslIntegrationTestBase::makeSslClientConnection(modified_options);\n  }\n\n  void TearDown() override { SslIntegrationTestBase::TearDown(); };\n\n  ClientSslTransportOptions rsaOnlyClientOptions() {\n    if (tls_version_ == envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3) {\n      return ClientSslTransportOptions().setSigningAlgorithmsForTest(\"rsa_pss_rsae_sha256\");\n    } else {\n      return ClientSslTransportOptions().setCipherSuites({\"ECDHE-RSA-AES128-GCM-SHA256\"});\n    }\n  }\n\n  ClientSslTransportOptions ecdsaOnlyClientOptions() {\n    auto options = ClientSslTransportOptions().setClientEcdsaCert(true);\n    if (tls_version_ == envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3) {\n      return options.setSigningAlgorithmsForTest(\"ecdsa_secp256r1_sha256\");\n    } else {\n      return options.setCipherSuites({\"ECDHE-ECDSA-AES128-GCM-SHA256\"});\n    }\n  }\n\n  static std::string ipClientVersionTestParamsToString(\n      const ::testing::TestParamInfo<\n          std::tuple<Network::Address::IpVersion,\n                     envoy::extensions::transport_sockets::tls::v3::TlsParameters::TlsProtocol>>&\n          params) {\n    return fmt::format(\"{}_TLSv1_{}\",\n                       std::get<0>(params.param) == Network::Address::IpVersion::v4 ? \"IPv4\"\n                                                                                    : \"IPv6\",\n                       std::get<1>(params.param) - 1);\n  }\n\n  const envoy::extensions::transport_sockets::tls::v3::TlsParameters::TlsProtocol tls_version_{\n      std::get<1>(GetParam())};\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    IpVersionsClientVersions, SslCertficateIntegrationTest,\n    testing::Combine(\n        testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n        testing::Values(envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2,\n                        envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3)),\n    SslCertficateIntegrationTest::ipClientVersionTestParamsToString);\n\n// Server with an RSA certificate and a client with RSA/ECDSA cipher suites works.\nTEST_P(SslCertficateIntegrationTest, ServerRsa) {\n  server_rsa_cert_ = true;\n  server_ecdsa_cert_ = false;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server with an ECDSA certificate and a client with RSA/ECDSA cipher suites works.\nTEST_P(SslCertficateIntegrationTest, ServerEcdsa) {\n  server_rsa_cert_ = false;\n  server_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server with RSA/ECDSAs certificates and a client with RSA/ECDSA cipher suites works.\nTEST_P(SslCertficateIntegrationTest, ServerRsaEcdsa) {\n  server_rsa_cert_ = true;\n  server_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server with an RSA certificate and a client with only RSA cipher suites works.\nTEST_P(SslCertficateIntegrationTest, ClientRsaOnly) {\n  server_rsa_cert_ = true;\n  server_ecdsa_cert_ = false;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(rsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server has only an ECDSA certificate, client is only RSA capable, leads to a connection fail.\nTEST_P(SslCertficateIntegrationTest, ServerEcdsaClientRsaOnly) {\n  server_rsa_cert_ = false;\n  server_ecdsa_cert_ = true;\n  initialize();\n  auto codec_client =\n      makeRawHttpConnection(makeSslClientConnection(rsaOnlyClientOptions()), absl::nullopt);\n  EXPECT_FALSE(codec_client->connected());\n  const std::string counter_name = listenerStatPrefix(\"ssl.connection_error\");\n  Stats::CounterSharedPtr counter = test_server_->counter(counter_name);\n  test_server_->waitForCounterGe(counter_name, 1);\n  EXPECT_EQ(1U, counter->value());\n  counter->reset();\n}\n\n// Server with RSA/ECDSA certificates and a client with only RSA cipher suites works.\nTEST_P(SslCertficateIntegrationTest, ServerRsaEcdsaClientRsaOnly) {\n  server_rsa_cert_ = true;\n  server_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(rsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server has only an RSA certificate, client is only ECDSA capable, leads to connection fail.\nTEST_P(SslCertficateIntegrationTest, ServerRsaClientEcdsaOnly) {\n  server_rsa_cert_ = true;\n  server_ecdsa_cert_ = false;\n  client_ecdsa_cert_ = true;\n  initialize();\n  EXPECT_FALSE(\n      makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()), absl::nullopt)\n          ->connected());\n  const std::string counter_name = listenerStatPrefix(\"ssl.connection_error\");\n  Stats::CounterSharedPtr counter = test_server_->counter(counter_name);\n  test_server_->waitForCounterGe(counter_name, 1);\n  EXPECT_EQ(1U, counter->value());\n  counter->reset();\n}\n\n// Server has only an ECDSA certificate, client is only ECDSA capable works.\nTEST_P(SslCertficateIntegrationTest, ServerEcdsaClientEcdsaOnly) {\n  server_rsa_cert_ = false;\n  server_ecdsa_cert_ = true;\n  client_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ecdsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server has RSA/ECDSA certificates, client is only ECDSA capable works.\nTEST_P(SslCertficateIntegrationTest, ServerRsaEcdsaClientEcdsaOnly) {\n  server_rsa_cert_ = true;\n  server_ecdsa_cert_ = true;\n  client_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ecdsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server has an RSA certificate with an OCSP response works.\nTEST_P(SslCertficateIntegrationTest, ServerRsaOnlyOcspResponse) {\n  server_rsa_cert_ = true;\n  server_rsa_cert_ocsp_staple_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(rsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server has an ECDSA certificate with an OCSP response works.\nTEST_P(SslCertficateIntegrationTest, ServerEcdsaOnlyOcspResponse) {\n  server_ecdsa_cert_ = true;\n  server_ecdsa_cert_ocsp_staple_ = true;\n  client_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ecdsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server has two certificates one with and one without OCSP response works under optional policy.\nTEST_P(SslCertficateIntegrationTest, BothEcdsaAndRsaOnlyRsaOcspResponse) {\n  server_rsa_cert_ = true;\n  server_rsa_cert_ocsp_staple_ = true;\n  server_ecdsa_cert_ = true;\n  client_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ecdsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// Server has ECDSA and RSA certificates with OCSP responses and stapling required policy works.\nTEST_P(SslCertficateIntegrationTest, BothEcdsaAndRsaWithOcspResponseStaplingRequired) {\n  server_rsa_cert_ = true;\n  server_rsa_cert_ocsp_staple_ = true;\n  server_ecdsa_cert_ = true;\n  server_ecdsa_cert_ocsp_staple_ = true;\n  ocsp_staple_required_ = true;\n  client_ecdsa_cert_ = true;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection(ecdsaOnlyClientOptions());\n  };\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n}\n\n// TODO(zuercher): write an additional OCSP integration test that validates behavior with an\n// expired OCSP response. (Requires OCSP client-side support in upstream TLS.)\n\n// TODO(mattklein123): Move this into a dedicated integration test for the tap transport socket as\n// well as add more tests.\nclass SslTapIntegrationTest : public SslIntegrationTest {\npublic:\n  void initialize() override {\n    // TODO(mattklein123): Merge/use the code in ConfigHelper::setTapTransportSocket().\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // The test supports tapping either the downstream or upstream connection, but not both.\n      if (upstream_tap_) {\n        setupUpstreamTap(bootstrap);\n      } else {\n        setupDownstreamTap(bootstrap);\n      }\n    });\n    SslIntegrationTest::initialize();\n    // This confuses our socket counting.\n    debug_with_s_client_ = false;\n  }\n\n  void setupUpstreamTap(envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* transport_socket =\n        bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket();\n    transport_socket->set_name(\"envoy.transport_sockets.tap\");\n    envoy::config::core::v3::TransportSocket raw_transport_socket;\n    raw_transport_socket.set_name(\"envoy.transport_sockets.raw_buffer\");\n    envoy::extensions::transport_sockets::tap::v3::Tap tap_config =\n        createTapConfig(raw_transport_socket);\n    tap_config.mutable_transport_socket()->MergeFrom(raw_transport_socket);\n    transport_socket->mutable_typed_config()->PackFrom(tap_config);\n  }\n\n  void setupDownstreamTap(envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* filter_chain =\n        bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);\n    // Configure inner SSL transport socket based on existing config.\n    envoy::config::core::v3::TransportSocket ssl_transport_socket;\n    auto* transport_socket = filter_chain->mutable_transport_socket();\n    ssl_transport_socket.Swap(transport_socket);\n    // Configure outer tap transport socket.\n    transport_socket->set_name(\"envoy.transport_sockets.tap\");\n    envoy::extensions::transport_sockets::tap::v3::Tap tap_config =\n        createTapConfig(ssl_transport_socket);\n    tap_config.mutable_transport_socket()->MergeFrom(ssl_transport_socket);\n    transport_socket->mutable_typed_config()->PackFrom(tap_config);\n  }\n\n  envoy::extensions::transport_sockets::tap::v3::Tap\n  createTapConfig(const envoy::config::core::v3::TransportSocket& inner_transport) {\n    envoy::extensions::transport_sockets::tap::v3::Tap tap_config;\n    tap_config.mutable_common_config()->mutable_static_config()->mutable_match()->set_any_match(\n        true);\n    auto* output_config =\n        tap_config.mutable_common_config()->mutable_static_config()->mutable_output_config();\n    if (max_rx_bytes_.has_value()) {\n      output_config->mutable_max_buffered_rx_bytes()->set_value(max_rx_bytes_.value());\n    }\n    if (max_tx_bytes_.has_value()) {\n      output_config->mutable_max_buffered_tx_bytes()->set_value(max_tx_bytes_.value());\n    }\n\n    auto* output_sink = output_config->mutable_sinks()->Add();\n    output_sink->set_format(format_);\n    output_sink->mutable_file_per_tap()->set_path_prefix(path_prefix_);\n    tap_config.mutable_transport_socket()->MergeFrom(inner_transport);\n    return tap_config;\n  }\n\n  std::string path_prefix_ = TestEnvironment::temporaryPath(\"ssl_trace\");\n  envoy::config::tap::v3::OutputSink::Format format_{\n      envoy::config::tap::v3::OutputSink::PROTO_BINARY};\n  absl::optional<uint64_t> max_rx_bytes_;\n  absl::optional<uint64_t> max_tx_bytes_;\n  bool upstream_tap_{};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SslTapIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Validate two back-to-back requests with binary proto output.\nTEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) {\n  initialize();\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n\n  // First request (ID will be +1 since the client will also bump).\n  const uint64_t first_id = Network::ConnectionImpl::nextGlobalIdForTest() + 1;\n  codec_client_ = makeHttpConnection(creator());\n  Http::TestRequestHeaderMapImpl post_request_headers{\n      {\":method\", \"POST\"},    {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"},\n      {\":authority\", \"host\"}, {\"x-lyft-user-id\", \"123\"},   {\"x-forwarded-for\", \"10.0.0.1\"}};\n  auto response =\n      sendRequestAndWaitForResponse(post_request_headers, 128, default_response_headers_, 256);\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(128, upstream_request_->bodyLength());\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(256, response->body().size());\n  checkStats();\n  envoy::config::core::v3::Address expected_local_address;\n  Network::Utility::addressToProtobufAddress(*codec_client_->connection()->remoteAddress(),\n                                             expected_local_address);\n  envoy::config::core::v3::Address expected_remote_address;\n  Network::Utility::addressToProtobufAddress(*codec_client_->connection()->localAddress(),\n                                             expected_remote_address);\n  codec_client_->close();\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 1);\n  envoy::data::tap::v3::TraceWrapper trace;\n  TestUtility::loadFromFile(fmt::format(\"{}_{}.pb\", path_prefix_, first_id), trace, *api_);\n  // Validate general expected properties in the trace.\n  EXPECT_EQ(first_id, trace.socket_buffered_trace().trace_id());\n  EXPECT_THAT(expected_local_address,\n              ProtoEq(trace.socket_buffered_trace().connection().local_address()));\n  EXPECT_THAT(expected_remote_address,\n              ProtoEq(trace.socket_buffered_trace().connection().remote_address()));\n  ASSERT_GE(trace.socket_buffered_trace().events().size(), 2);\n  EXPECT_TRUE(absl::StartsWith(trace.socket_buffered_trace().events(0).read().data().as_bytes(),\n                               \"POST /test/long/url HTTP/1.1\"));\n  EXPECT_TRUE(absl::StartsWith(trace.socket_buffered_trace().events(1).write().data().as_bytes(),\n                               \"HTTP/1.1 200 OK\"));\n  EXPECT_FALSE(trace.socket_buffered_trace().read_truncated());\n  EXPECT_FALSE(trace.socket_buffered_trace().write_truncated());\n\n  // Verify a second request hits a different file.\n  const uint64_t second_id = Network::ConnectionImpl::nextGlobalIdForTest() + 1;\n  codec_client_ = makeHttpConnection(creator());\n  Http::TestRequestHeaderMapImpl get_request_headers{\n      {\":method\", \"GET\"},     {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"},\n      {\":authority\", \"host\"}, {\"x-lyft-user-id\", \"123\"},   {\"x-forwarded-for\", \"10.0.0.1\"}};\n  response =\n      sendRequestAndWaitForResponse(get_request_headers, 128, default_response_headers_, 256);\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(128, upstream_request_->bodyLength());\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(256, response->body().size());\n  checkStats();\n  codec_client_->close();\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 2);\n  TestUtility::loadFromFile(fmt::format(\"{}_{}.pb\", path_prefix_, second_id), trace, *api_);\n  // Validate second connection ID.\n  EXPECT_EQ(second_id, trace.socket_buffered_trace().trace_id());\n  ASSERT_GE(trace.socket_buffered_trace().events().size(), 2);\n  EXPECT_TRUE(absl::StartsWith(trace.socket_buffered_trace().events(0).read().data().as_bytes(),\n                               \"GET /test/long/url HTTP/1.1\"));\n  EXPECT_TRUE(absl::StartsWith(trace.socket_buffered_trace().events(1).write().data().as_bytes(),\n                               \"HTTP/1.1 200 OK\"));\n  EXPECT_FALSE(trace.socket_buffered_trace().read_truncated());\n  EXPECT_FALSE(trace.socket_buffered_trace().write_truncated());\n}\n\n// Verify that truncation works correctly across multiple transport socket frames.\nTEST_P(SslTapIntegrationTest, TruncationWithMultipleDataFrames) {\n  max_rx_bytes_ = 4;\n  max_tx_bytes_ = 5;\n\n  initialize();\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n\n  const uint64_t id = Network::ConnectionImpl::nextGlobalIdForTest() + 1;\n  codec_client_ = makeHttpConnection(creator());\n  const Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  auto result = codec_client_->startRequest(request_headers);\n  auto decoder = std::move(result.second);\n  Buffer::OwnedImpl data1(\"one\");\n  result.first.encodeData(data1, false);\n  Buffer::OwnedImpl data2(\"two\");\n  result.first.encodeData(data2, true);\n  waitForNextUpstreamRequest();\n  const Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  upstream_request_->encodeHeaders(response_headers, false);\n  Buffer::OwnedImpl data3(\"three\");\n  upstream_request_->encodeData(data3, false);\n  decoder->waitForBodyData(5);\n  Buffer::OwnedImpl data4(\"four\");\n  upstream_request_->encodeData(data4, true);\n  decoder->waitForEndStream();\n\n  checkStats();\n  codec_client_->close();\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 1);\n\n  envoy::data::tap::v3::TraceWrapper trace;\n  TestUtility::loadFromFile(fmt::format(\"{}_{}.pb\", path_prefix_, id), trace, *api_);\n\n  ASSERT_EQ(trace.socket_buffered_trace().events().size(), 2);\n  EXPECT_TRUE(trace.socket_buffered_trace().events(0).read().data().truncated());\n  EXPECT_TRUE(trace.socket_buffered_trace().events(1).write().data().truncated());\n  EXPECT_TRUE(trace.socket_buffered_trace().read_truncated());\n  EXPECT_TRUE(trace.socket_buffered_trace().write_truncated());\n}\n\n// Validate a single request with text proto output.\nTEST_P(SslTapIntegrationTest, RequestWithTextProto) {\n  format_ = envoy::config::tap::v3::OutputSink::PROTO_TEXT;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  const uint64_t id = Network::ConnectionImpl::nextGlobalIdForTest() + 1;\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  checkStats();\n  codec_client_->close();\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 1);\n  envoy::data::tap::v3::TraceWrapper trace;\n  TestUtility::loadFromFile(fmt::format(\"{}_{}.pb_text\", path_prefix_, id), trace, *api_);\n  // Test some obvious properties.\n  EXPECT_TRUE(absl::StartsWith(trace.socket_buffered_trace().events(0).read().data().as_bytes(),\n                               \"POST /test/long/url HTTP/1.1\"));\n  EXPECT_TRUE(absl::StartsWith(trace.socket_buffered_trace().events(1).write().data().as_bytes(),\n                               \"HTTP/1.1 200 OK\"));\n  EXPECT_TRUE(trace.socket_buffered_trace().read_truncated());\n  EXPECT_FALSE(trace.socket_buffered_trace().write_truncated());\n}\n\n// Validate a single request with JSON (body as string) output. This test uses an upstream tap.\nTEST_P(SslTapIntegrationTest, RequestWithJsonBodyAsStringUpstreamTap) {\n  upstream_tap_ = true;\n  max_rx_bytes_ = 5;\n  max_tx_bytes_ = 4;\n\n  format_ = envoy::config::tap::v3::OutputSink::JSON_BODY_AS_STRING;\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection({});\n  };\n  const uint64_t id = Network::ConnectionImpl::nextGlobalIdForTest() + 2;\n  testRouterRequestAndResponseWithBody(512, 1024, false, false, &creator);\n  checkStats();\n  codec_client_->close();\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_destroy\", 1);\n  test_server_.reset();\n\n  // This must be done after server shutdown so that connection pool connections are closed and\n  // the tap written.\n  envoy::data::tap::v3::TraceWrapper trace;\n  TestUtility::loadFromFile(fmt::format(\"{}_{}.json\", path_prefix_, id), trace, *api_);\n\n  // Test some obvious properties.\n  EXPECT_EQ(trace.socket_buffered_trace().events(0).write().data().as_string(), \"POST\");\n  EXPECT_EQ(trace.socket_buffered_trace().events(1).read().data().as_string(), \"HTTP/\");\n  EXPECT_TRUE(trace.socket_buffered_trace().read_truncated());\n  EXPECT_TRUE(trace.socket_buffered_trace().write_truncated());\n}\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/integration/ssl_integration_test.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/server.h\"\n#include \"test/integration/ssl_utility.h\"\n#include \"test/mocks/secret/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass SslIntegrationTestBase : public HttpIntegrationTest {\npublic:\n  SslIntegrationTestBase(Network::Address::IpVersion ip_version)\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ip_version) {}\n\n  void initialize() override;\n\n  void TearDown();\n\n  Network::ClientConnectionPtr makeSslConn() { return makeSslClientConnection({}); }\n  virtual Network::ClientConnectionPtr\n  makeSslClientConnection(const ClientSslTransportOptions& options);\n  void checkStats();\n\nprotected:\n  bool server_tlsv1_3_{false};\n  bool server_rsa_cert_{true};\n  bool server_rsa_cert_ocsp_staple_{false};\n  bool server_ecdsa_cert_{false};\n  bool server_ecdsa_cert_ocsp_staple_{false};\n  bool ocsp_staple_required_{false};\n  bool client_ecdsa_cert_{false};\n  // Set this true to debug SSL handshake issues with openssl s_client. The\n  // verbose trace will be in the logs, openssl must be installed separately.\n  bool debug_with_s_client_{false};\n\nprivate:\n  std::unique_ptr<ContextManager> context_manager_;\n};\n\nclass SslIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                           public SslIntegrationTestBase {\npublic:\n  SslIntegrationTest() : SslIntegrationTestBase(GetParam()) {}\n  void TearDown() override { SslIntegrationTestBase::TearDown(); };\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/io_handle_bio_test.cc",
    "content": "#include \"common/network/io_socket_error_impl.h\"\n\n#include \"extensions/transport_sockets/tls/io_handle_bio.h\"\n\n#include \"test/mocks/network/io_handle.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"openssl/ssl.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\nclass IoHandleBioTest : public testing::Test {\npublic:\n  IoHandleBioTest() { bio_ = BIO_new_io_handle(&io_handle_); }\n  ~IoHandleBioTest() override { BIO_free(bio_); }\n\n  BIO* bio_;\n  NiceMock<Network::MockIoHandle> io_handle_;\n};\n\nTEST_F(IoHandleBioTest, TestMiscApis) {\n  EXPECT_EQ(bio_->method->destroy(nullptr), 0);\n  EXPECT_EQ(bio_->method->bread(nullptr, nullptr, 0), 0);\n\n  EXPECT_DEATH(bio_->method->ctrl(bio_, BIO_C_GET_FD, 0, nullptr), \"should not be called\");\n  EXPECT_DEATH(bio_->method->ctrl(bio_, BIO_C_SET_FD, 0, nullptr), \"should not be called\");\n\n  int ret = bio_->method->ctrl(bio_, BIO_CTRL_RESET, 0, nullptr);\n  EXPECT_EQ(ret, 0);\n\n  ret = bio_->method->ctrl(bio_, BIO_CTRL_FLUSH, 0, nullptr);\n  EXPECT_EQ(ret, 1);\n\n  ret = bio_->method->ctrl(bio_, BIO_CTRL_SET_CLOSE, 1, nullptr);\n  EXPECT_EQ(ret, 1);\n\n  ret = bio_->method->ctrl(bio_, BIO_CTRL_GET_CLOSE, 0, nullptr);\n  EXPECT_EQ(ret, 1);\n\n  EXPECT_CALL(io_handle_, close())\n      .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result{\n          0, Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError)})));\n  bio_->init = 1;\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/ocsp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"ocsp_test\",\n    srcs = [\n        \"ocsp_test.cc\",\n    ],\n    data = [\n        \":gen_ocsp_data\",\n    ],\n    external_deps = [\"ssl\"],\n    # TODO: Diagnose intermittent failure on Windows; this script uses the\n    # locally deployed openssl for test cert creation and manipulation, rather\n    # than envoy's current build of the most current openssl tool\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \"//source/common/filesystem:filesystem_lib\",\n        \"//source/extensions/transport_sockets/tls:utility_lib\",\n        \"//source/extensions/transport_sockets/tls/ocsp:ocsp_lib\",\n        \"//test/extensions/transport_sockets/tls:ssl_socket_test\",\n        \"//test/extensions/transport_sockets/tls:ssl_test_utils\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"asn1_utility_test\",\n    srcs = [\n        \"asn1_utility_test.cc\",\n    ],\n    external_deps = [\"ssl\"],\n    deps = [\n        \"//source/extensions/transport_sockets/tls/ocsp:asn1_utility_lib\",\n        \"//test/extensions/transport_sockets/tls:ssl_test_utils\",\n    ],\n)\n\nfilegroup(\n    name = \"gen_ocsp_data\",\n    srcs = [\"gen_unittest_ocsp_data.sh\"],\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/ocsp/asn1_utility_test.cc",
    "content": "#include <limits>\n\n#include \"extensions/transport_sockets/tls/ocsp/asn1_utility.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace Ocsp {\n\nnamespace {\n\nclass Asn1UtilityTest : public testing::Test {\npublic:\n  // DER encoding of a single TLV `ASN.1` element.\n  // returns a pointer to the underlying buffer and transfers\n  // ownership to the caller.\n  uint8_t* asn1Encode(CBS& cbs, std::string& value, unsigned tag) {\n    bssl::ScopedCBB cbb;\n    CBB child;\n    auto data_head = reinterpret_cast<const uint8_t*>(value.c_str());\n\n    EXPECT_TRUE(CBB_init(cbb.get(), 0));\n    EXPECT_TRUE(CBB_add_asn1(cbb.get(), &child, tag));\n    EXPECT_TRUE(CBB_add_bytes(&child, data_head, value.size()));\n\n    uint8_t* buf;\n    size_t buf_len;\n    EXPECT_TRUE(CBB_finish(cbb.get(), &buf, &buf_len));\n\n    CBS_init(&cbs, buf, buf_len);\n    return buf;\n  }\n\n  template <typename T>\n  void expectParseResultErrorOnWrongTag(std::function<ParsingResult<T>(CBS&)> parse) {\n    CBS cbs;\n    CBS_init(&cbs, asn1_true.data(), asn1_true.size());\n    EXPECT_NO_THROW(absl::get<1>(parse(cbs)));\n  }\n\n  const std::vector<uint8_t> asn1_true = {0x1u, 1, 0xff};\n  const std::vector<uint8_t> asn1_empty_seq = {0x30, 0};\n};\n\nTEST_F(Asn1UtilityTest, ParseMethodsWrongTagTest) {\n  expectParseResultErrorOnWrongTag<std::vector<std::vector<uint8_t>>>([](CBS& cbs) {\n    return Asn1Utility::parseSequenceOf<std::vector<uint8_t>>(cbs, Asn1Utility::parseOctetString);\n  });\n  expectParseResultErrorOnWrongTag<std::string>(Asn1Utility::parseOid);\n  expectParseResultErrorOnWrongTag<Envoy::SystemTime>(Asn1Utility::parseGeneralizedTime);\n  expectParseResultErrorOnWrongTag<std::string>(Asn1Utility::parseInteger);\n  expectParseResultErrorOnWrongTag<std::vector<uint8_t>>(Asn1Utility::parseOctetString);\n}\n\nTEST_F(Asn1UtilityTest, ToStringTest) {\n  CBS cbs;\n  absl::string_view str = \"test\";\n  CBS_init(&cbs, reinterpret_cast<const uint8_t*>(str.data()), str.size());\n  EXPECT_EQ(str, Asn1Utility::cbsToString(cbs));\n}\n\nTEST_F(Asn1UtilityTest, ParseSequenceOfEmptySequenceTest) {\n  CBS cbs;\n  CBS_init(&cbs, asn1_empty_seq.data(), asn1_empty_seq.size());\n\n  std::vector<std::vector<uint8_t>> vec;\n  auto actual = absl::get<0>(\n      Asn1Utility::parseSequenceOf<std::vector<uint8_t>>(cbs, Asn1Utility::parseOctetString));\n  EXPECT_EQ(vec, actual);\n}\n\nTEST_F(Asn1UtilityTest, ParseSequenceOfMultipleElementSequenceTest) {\n  std::vector<uint8_t> octet_seq = {\n      // SEQUENCE OF 3 2-byte elements\n      0x30,\n      3 * (2 + 2),\n      // 1st OCTET STRING\n      0x4u,\n      2,\n      0x1,\n      0x2,\n      // 2nd OCTET STRING\n      0x4u,\n      2,\n      0x3,\n      0x4,\n      // 3rd OCTET STRING\n      0x4u,\n      2,\n      0x5,\n      0x6,\n  };\n  CBS cbs;\n  CBS_init(&cbs, octet_seq.data(), octet_seq.size());\n\n  std::vector<std::vector<uint8_t>> vec = {{0x1, 0x2}, {0x3, 0x4}, {0x5, 0x6}};\n  auto actual = absl::get<0>(\n      Asn1Utility::parseSequenceOf<std::vector<uint8_t>>(cbs, Asn1Utility::parseOctetString));\n  EXPECT_EQ(vec, actual);\n}\n\nTEST_F(Asn1UtilityTest, SequenceOfLengthMismatchErrorTest) {\n  std::vector<uint8_t> malformed = {\n      // SEQUENCE OF length wrongfully 2 instead of 4 bytes\n      0x30,\n      3,\n      // 1st OCTET STRING\n      0x4u,\n      2,\n      0x1,\n      0x2,\n  };\n  CBS cbs;\n  CBS_init(&cbs, malformed.data(), malformed.size());\n\n  EXPECT_EQ(\"Input is not a well-formed ASN.1 OCTETSTRING\",\n            absl::get<1>(Asn1Utility::parseSequenceOf<std::vector<uint8_t>>(\n                cbs, Asn1Utility::parseOctetString)));\n}\n\nTEST_F(Asn1UtilityTest, SequenceOfMixedTypeErrorTest) {\n  std::vector<uint8_t> mixed_type = {\n      // SEQUENCE OF 1 OCTET STRING and 1 BOOLEAN\n      0x30,\n      7,\n      // OCTET STRING\n      0x4u,\n      2,\n      0x1,\n      0x2,\n      // BOOLEAN true\n      0x1u,\n      1,\n      0xff,\n  };\n  CBS cbs;\n  CBS_init(&cbs, mixed_type.data(), mixed_type.size());\n\n  EXPECT_EQ(\"Input is not a well-formed ASN.1 OCTETSTRING\",\n            absl::get<1>(Asn1Utility::parseSequenceOf<std::vector<uint8_t>>(\n                cbs, Asn1Utility::parseOctetString)));\n}\n\nTEST_F(Asn1UtilityTest, GetOptionalTest) {\n  CBS cbs;\n  CBS_init(&cbs, asn1_true.data(), asn1_true.size());\n\n  const uint8_t* start = CBS_data(&cbs);\n  EXPECT_EQ(absl::nullopt, absl::get<0>(Asn1Utility::getOptional(cbs, CBS_ASN1_INTEGER)));\n  EXPECT_EQ(start, CBS_data(&cbs));\n\n  CBS value = absl::get<0>(Asn1Utility::getOptional(cbs, CBS_ASN1_BOOLEAN)).value();\n  EXPECT_EQ(0xff, *CBS_data(&value));\n}\n\nTEST_F(Asn1UtilityTest, GetOptionalMissingValueTest) {\n  std::vector<uint8_t> missing_val_bool = {0x1u, 1};\n  CBS cbs;\n  CBS_init(&cbs, missing_val_bool.data(), missing_val_bool.size());\n\n  auto res = Asn1Utility::getOptional(cbs, CBS_ASN1_BOOLEAN);\n  EXPECT_TRUE(absl::holds_alternative<absl::string_view>(res));\n  EXPECT_EQ(\"Failed to parse ASN.1 element tag\", absl::get<1>(res));\n}\n\nTEST_F(Asn1UtilityTest, ParseOptionalTest) {\n  std::vector<uint8_t> nothing;\n  std::vector<uint8_t> explicit_optional_true = {0, 3, 0x1u, 1, 0xff};\n  std::vector<uint8_t> missing_val_bool = {0x1u, 1};\n\n  auto parse_bool = [](CBS& cbs) -> bool {\n    int res;\n    CBS_get_asn1_bool(&cbs, &res);\n    return res;\n  };\n\n  auto parse_bool_fail = [](CBS&) -> ParsingResult<bool> {\n    std::cout << \"failing\" << std::endl;\n    return absl::string_view{\"failed\"};\n  };\n\n  {\n    CBS cbs_explicit_optional_true;\n    CBS_init(&cbs_explicit_optional_true, explicit_optional_true.data(),\n             explicit_optional_true.size());\n\n    absl::optional<bool> expected(true);\n    EXPECT_EQ(expected, absl::get<0>(Asn1Utility::parseOptional<bool>(cbs_explicit_optional_true,\n                                                                      parse_bool, 0)));\n  }\n\n  {\n    CBS cbs_empty_seq;\n    CBS_init(&cbs_empty_seq, asn1_empty_seq.data(), asn1_empty_seq.size());\n    EXPECT_EQ(absl::nullopt, absl::get<0>(Asn1Utility::parseOptional<bool>(\n                                 cbs_empty_seq, parse_bool, CBS_ASN1_BOOLEAN)));\n  }\n\n  {\n    CBS cbs_nothing;\n    CBS_init(&cbs_nothing, nothing.data(), nothing.size());\n\n    EXPECT_EQ(absl::nullopt, absl::get<0>(Asn1Utility::parseOptional<bool>(cbs_nothing, parse_bool,\n                                                                           CBS_ASN1_BOOLEAN)));\n  }\n\n  {\n    CBS cbs_missing_val;\n    CBS_init(&cbs_missing_val, missing_val_bool.data(), missing_val_bool.size());\n\n    EXPECT_EQ(\"Failed to parse ASN.1 element tag\",\n              absl::get<1>(\n                  Asn1Utility::parseOptional<bool>(cbs_missing_val, parse_bool, CBS_ASN1_BOOLEAN)));\n  }\n\n  {\n    CBS cbs_explicit_optional_true;\n    CBS_init(&cbs_explicit_optional_true, explicit_optional_true.data(),\n             explicit_optional_true.size());\n\n    EXPECT_EQ(\"failed\", absl::get<1>(Asn1Utility::parseOptional<bool>(cbs_explicit_optional_true,\n                                                                      parse_bool_fail, 0)));\n  }\n}\n\nTEST_F(Asn1UtilityTest, ParseOidTest) {\n  std::string oid = \"1.1.1.1.1.1.1\";\n\n  bssl::ScopedCBB cbb;\n  CBB child;\n  ASSERT_TRUE(CBB_init(cbb.get(), 0));\n  ASSERT_TRUE(CBB_add_asn1(cbb.get(), &child, CBS_ASN1_OBJECT));\n  ASSERT_TRUE(CBB_add_asn1_oid_from_text(&child, oid.c_str(), oid.size()));\n\n  uint8_t* buf;\n  size_t buf_len;\n  CBS cbs;\n  ASSERT_TRUE(CBB_finish(cbb.get(), &buf, &buf_len));\n  CBS_init(&cbs, buf, buf_len);\n  bssl::UniquePtr<uint8_t> scoped(buf);\n\n  EXPECT_EQ(oid, absl::get<0>(Asn1Utility::parseOid(cbs)));\n}\n\nTEST_F(Asn1UtilityTest, ParseOidInvalidValueTest) {\n  // 0x80 is not valid within an OID\n  std::vector<uint8_t> invalid_oid = {0x6, 0x6, 0x29, 0x80, 0x1, 0x1, 0x1, 0x1};\n\n  CBS cbs;\n  CBS_init(&cbs, invalid_oid.data(), invalid_oid.size());\n\n  EXPECT_EQ(\"Failed to parse oid\", absl::get<1>(Asn1Utility::parseOid(cbs)));\n}\n\nTEST_F(Asn1UtilityTest, ParseGeneralizedTimeWrongFormatErrorTest) {\n  std::string invalid_time = \"\";\n  CBS cbs;\n  bssl::UniquePtr<uint8_t> scoped(asn1Encode(cbs, invalid_time, CBS_ASN1_GENERALIZEDTIME));\n  Asn1Utility::parseGeneralizedTime(cbs);\n  EXPECT_EQ(\"Input is not a well-formed ASN.1 GENERALIZEDTIME\",\n            absl::get<absl::string_view>(Asn1Utility::parseGeneralizedTime(cbs)));\n}\n\nTEST_F(Asn1UtilityTest, ParseGeneralizedTimeTest) {\n  std::string time = \"20070614185900z\";\n  std::string expected_time = \"20070614185900\";\n\n  CBS cbs;\n  bssl::UniquePtr<uint8_t> scoped(asn1Encode(cbs, time, CBS_ASN1_GENERALIZEDTIME));\n  absl::Time expected = TestUtility::parseTime(expected_time, \"%E4Y%m%d%H%M%S\");\n  auto actual = absl::get<Envoy::SystemTime>(Asn1Utility::parseGeneralizedTime(cbs));\n\n  EXPECT_EQ(absl::ToChronoTime(expected), actual);\n}\n\nTEST_F(Asn1UtilityTest, TestParseGeneralizedTimeRejectsNonUTCTime) {\n  std::string local_time = \"20070601145918\";\n  CBS cbs;\n  bssl::UniquePtr<uint8_t> scoped(asn1Encode(cbs, local_time, CBS_ASN1_GENERALIZEDTIME));\n\n  EXPECT_EQ(\"GENERALIZEDTIME must be in UTC\",\n            absl::get<absl::string_view>(Asn1Utility::parseGeneralizedTime(cbs)));\n}\n\nTEST_F(Asn1UtilityTest, TestParseGeneralizedTimeInvalidTime) {\n  std::string ymd = \"20070601Z\";\n  CBS cbs;\n  bssl::UniquePtr<uint8_t> scoped(asn1Encode(cbs, ymd, CBS_ASN1_GENERALIZEDTIME));\n\n  EXPECT_EQ(\"Error parsing string of GENERALIZEDTIME format\",\n            absl::get<1>(Asn1Utility::parseGeneralizedTime(cbs)));\n}\n\n// Taken from\n// https://boringssl.googlesource.com/boringssl/+/master/crypto/bytestring/cbb.c#531\n// because boringssl_fips does not yet implement `CBB_add_asn1_int64`\nvoid cbbAddAsn1Int64(CBB* cbb, int64_t value) {\n  if (value >= 0) {\n    ASSERT_TRUE(CBB_add_asn1_uint64(cbb, value));\n  }\n\n  // Skip past bytes that are purely sign extension.\n  int start;\n  for (start = 7; start > 0; start--) {\n    uint8_t byte = (value >> start * 8) & 0xFF;\n    if (byte != 0xFF) {\n      break;\n    }\n\n    uint8_t next_byte = (value >> (start - 1) * 8) & 0xFF;\n    if ((next_byte & 0x80) == 0) {\n      break;\n    }\n  }\n\n  CBB child;\n  ASSERT_TRUE(CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER));\n  for (int i = start; i >= 0; i--) {\n    uint8_t byte = (value >> i * 8) & 0xFF;\n    ASSERT_TRUE(CBB_add_u8(&child, byte));\n  }\n  CBB_flush(cbb);\n}\n\nTEST_F(Asn1UtilityTest, ParseIntegerTest) {\n  std::vector<std::pair<int64_t, std::string>> integers = {\n      {1, \"01\"}, {10, \"0a\"}, {1000000, \"0f4240\"}, {-1, \"-01\"}, {-128, \"-80\"},\n  };\n  bssl::ScopedCBB cbb;\n  CBS cbs;\n  uint8_t* buf;\n  size_t buf_len;\n  for (auto const& int_and_hex : integers) {\n    ASSERT_TRUE(CBB_init(cbb.get(), 0));\n    cbbAddAsn1Int64(cbb.get(), int_and_hex.first);\n    ASSERT_TRUE(CBB_finish(cbb.get(), &buf, &buf_len));\n\n    CBS_init(&cbs, buf, buf_len);\n    bssl::UniquePtr<uint8_t> scoped_buf(buf);\n\n    EXPECT_EQ(int_and_hex.second, absl::get<0>(Asn1Utility::parseInteger(cbs)));\n    cbb.Reset();\n  }\n}\n\nTEST_F(Asn1UtilityTest, ParseOctetStringTest) {\n  std::vector<uint8_t> data = {0x1, 0x2, 0x3};\n  std::string data_str(data.begin(), data.end());\n  CBS cbs;\n  bssl::UniquePtr<uint8_t> scoped(asn1Encode(cbs, data_str, CBS_ASN1_OCTETSTRING));\n\n  EXPECT_EQ(data, absl::get<0>(Asn1Utility::parseOctetString(cbs)));\n}\n\nTEST_F(Asn1UtilityTest, SkipOptionalPresentAdvancesTest) {\n  CBS cbs;\n  CBS_init(&cbs, asn1_empty_seq.data(), asn1_empty_seq.size());\n\n  const uint8_t* start = CBS_data(&cbs);\n  EXPECT_NO_THROW(absl::get<0>(Asn1Utility::skipOptional(cbs, CBS_ASN1_SEQUENCE)));\n  EXPECT_EQ(start + 2, CBS_data(&cbs));\n}\n\nTEST_F(Asn1UtilityTest, SkipOptionalNotPresentDoesNotAdvanceTest) {\n  CBS cbs;\n  CBS_init(&cbs, asn1_empty_seq.data(), asn1_empty_seq.size());\n\n  const uint8_t* start = CBS_data(&cbs);\n  EXPECT_NO_THROW(absl::get<0>(Asn1Utility::skipOptional(cbs, CBS_ASN1_BOOLEAN)));\n  EXPECT_EQ(start, CBS_data(&cbs));\n}\n\nTEST_F(Asn1UtilityTest, SkipOptionalMalformedTagTest) {\n  std::vector<uint8_t> malformed_seq = {0x30};\n  CBS cbs;\n  CBS_init(&cbs, malformed_seq.data(), malformed_seq.size());\n\n  EXPECT_EQ(\"Failed to parse ASN.1 element tag\",\n            absl::get<1>(Asn1Utility::skipOptional(cbs, CBS_ASN1_SEQUENCE)));\n}\n\nTEST_F(Asn1UtilityTest, SkipMalformedTagTest) {\n  std::vector<uint8_t> malformed_seq = {0x30};\n  CBS cbs;\n  CBS_init(&cbs, malformed_seq.data(), malformed_seq.size());\n\n  EXPECT_EQ(\"Failed to parse ASN.1 element\",\n            absl::get<1>(Asn1Utility::skip(cbs, CBS_ASN1_SEQUENCE)));\n}\n\n} // namespace\n\n} // namespace Ocsp\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh",
    "content": "#!/bin/bash\n#\n# Create test certificates and OCSP responses for them for unittests.\n\nset -e\n\ntrap cleanup EXIT\ncleanup() {\n  rm -f ./*_index*\n  rm -f ./*.csr\n  rm -f ./*.cnf\n  rm -f ./*_serial*\n}\n\n[[ -z \"${TEST_TMPDIR}\" ]] && TEST_TMPDIR=\"$(cd \"$(dirname \"$0\")\" && pwd)\"\n\nTEST_OCSP_DIR=\"${TEST_TMPDIR}/ocsp_test_data\"\nmkdir -p \"${TEST_OCSP_DIR}\"\n\nrm -f \"${TEST_OCSP_DIR}\"/*\n\ncd \"$TEST_OCSP_DIR\" || exit 1\n\n##################################################\n# Make the configuration file\n##################################################\n\n# $1=<certificate name> $2=<CA name>\ngenerate_config() {\ntouch \"${1}_index.txt\"\necho \"unique_subject = no\" > \"${1}_index.txt.attr\"\necho 1000 > \"${1}_serial\"\n\n(cat << EOF\n[ req ]\ndefault_bits            = 2048\ndistinguished_name      = req_distinguished_name\n\n[ req_distinguished_name ]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = $1\ncommonName_default = $1\ncommonName_max  = 64\n\n[ ca ]\ndefault_ca = CA_default\n\n[ CA_default ]\ndir           = ${TEST_OCSP_DIR}\ncerts         = ${TEST_OCSP_DIR}\nnew_certs_dir = ${TEST_OCSP_DIR}\nserial        = ${TEST_OCSP_DIR}\ndatabase      = ${TEST_OCSP_DIR}/$2_index.txt\nserial        = ${TEST_OCSP_DIR}/$2_serial\n\nprivate_key   = ${TEST_OCSP_DIR}/$2_key.pem\ncertificate   = ${TEST_OCSP_DIR}/$2_cert.pem\n\ndefault_days  = 375\ndefault_md    = sha256\npreserve      = no\npolicy        = policy_default\n\n[ policy_default ]\ncountryName             = optional\nstateOrProvinceName     = optional\norganizationName        = optional\norganizationalUnitName  = optional\ncommonName              = supplied\nemailAddress            = optional\n\n\n[ v3_ca ]\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always,issuer\nbasicConstraints = critical, CA:true\nkeyUsage = critical, digitalSignature, cRLSign, keyCertSign\n\n[ must_staple ]\ntlsfeature = status_request\nEOF\n) > \"${1}.cnf\"\n}\n\n# $1=<CA name> $2=[issuer name]\ngenerate_ca() {\n  local extra_args=()\n  if [[ -n \"$2\" ]]; then\n      extra_args=(-CA \"${2}_cert.pem\" -CAkey \"${2}_key.pem\" -CAcreateserial)\n  fi\n  openssl genrsa -out \"${1}_key.pem\" 2048\n  openssl req -new -key \"${1}_key.pem\" -out \"${1}_cert.csr\" \\\n    -config \"${1}.cnf\" -batch -sha256\n  openssl x509 -req \\\n    -in \"${1}_cert.csr\" -signkey \"${1}_key.pem\" -out \"${1}_cert.pem\" \\\n    -extensions v3_ca -extfile \"${1}.cnf\" \"${extra_args[@]}\"\n}\n\n# $1=<certificate name> $2=<CA name> $3=[req args]\ngenerate_rsa_cert() {\n  openssl genrsa -out \"${1}_key.pem\" 2048\n  openssl req -new -key \"${1}_key.pem\" -out \"${1}_cert.csr\" -config \"${1}.cnf\" -batch -sha256\n  openssl ca -config \"${1}.cnf\" -notext -batch -in \"${1}_cert.csr\" -out \"${1}_cert.pem\" \"${@:3}\"\n}\n\n# $1=<certificate name> $2=<CA name> $3=[req args]\ngenerate_ecdsa_cert() {\n  openssl ecparam -name secp256r1 -genkey -out \"${1}_key.pem\"\n  openssl req -new -key \"${1}_key.pem\" -out \"${1}_cert.csr\" -config \"${1}.cnf\" -batch -sha256\n  openssl ca -config \"${1}.cnf\" -notext -batch -in \"${1}_cert.csr\" -out \"${1}_cert.pem\" \"${@:3}\"\n}\n\n# $1=<certificate name> $2=<CA name> $3=<test name> $4=[extra args]\ngenerate_ocsp_response() {\n  # Generate an OCSP request\n  openssl ocsp -CAfile \"${2}_cert.pem\" -issuer \"${2}_cert.pem\" \\\n    -cert \"${1}_cert.pem\" -reqout \"${3}_ocsp_req.der\"\n\n  # Generate the OCSP response\n  openssl ocsp -CA \"${2}_cert.pem\" \\\n    -rkey \"${2}_key.pem\" -rsigner \"${2}_cert.pem\" -index \"${2}_index.txt\" \\\n    -reqin \"${3}_ocsp_req.der\" -respout \"${3}_ocsp_resp.der\" \"${@:4}\"\n}\n\n# $1=<certificate name> $2=<CA name>\nrevoke_certificate() {\n  openssl ca -revoke \"${1}_cert.pem\" -keyfile \"${2}_key.pem\" -cert \"${2}_cert.pem\" -config \"${2}.cnf\"\n}\n\n# $1=<test name> $2=<CA name>\ndump_ocsp_details() {\n  openssl ocsp -respin \"${1}_ocsp_resp.der\" -issuer \"${2}_cert.pem\" -resp_text \\\n    -out \"${1}_ocsp_resp_details.txt\"\n}\n\n# Set up the CA\ngenerate_config ca ca\ngenerate_ca ca\n\n# Set up an intermediate CA with a different database\ngenerate_config intermediate_ca intermediate_ca\ngenerate_ca intermediate_ca ca\n\n# Generate valid cert and OCSP response\ngenerate_config good ca\ngenerate_rsa_cert good ca\ngenerate_ocsp_response good ca good -ndays 7\ndump_ocsp_details good ca\n\n# Generate OCSP response with the responder key hash instead of name\ngenerate_ocsp_response good ca responder_key_hash -resp_key_id\n\n# Generate and revoke a cert and create OCSP response\ngenerate_config revoked ca\ngenerate_rsa_cert revoked ca -extensions must_staple\nrevoke_certificate revoked ca\ngenerate_ocsp_response revoked ca revoked\n\n# Create OCSP response for cert unknown to the CA\ngenerate_ocsp_response good intermediate_ca unknown\n\n# Generate cert with ECDSA key and OCSP response\ngenerate_config ecdsa ca\ngenerate_ecdsa_cert ecdsa ca\ngenerate_ocsp_response ecdsa ca ecdsa\n\n# Generate an OCSP request/response for multiple certs\nopenssl ocsp -CAfile ca_cert.pem -issuer ca_cert.pem \\\n  -cert good_cert.pem -cert revoked_cert.pem -reqout multiple_cert_ocsp_req.der\nopenssl ocsp -CA ca_cert.pem \\\n  -rkey ca_key.pem -rsigner ca_cert.pem -index ca_index.txt \\\n  -reqin multiple_cert_ocsp_req.der -respout multiple_cert_ocsp_resp.der\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/ocsp/ocsp_test.cc",
    "content": "#include \"common/filesystem/filesystem_impl.h\"\n\n#include \"extensions/transport_sockets/tls/ocsp/ocsp.h\"\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"test/extensions/transport_sockets/tls/ssl_test_utility.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"openssl/x509v3.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace Ocsp {\n\nnamespace {\n\nnamespace CertUtility = Envoy::Extensions::TransportSockets::Tls::Utility;\n\nclass OcspFullResponseParsingTest : public testing::Test {\npublic:\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    TestEnvironment::exec({TestEnvironment::runfilesPath(\n        \"test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh\")});\n  }\n\n  std::string fullPath(std::string filename) {\n    return TestEnvironment::substitute(\"{{ test_tmpdir }}/ocsp_test_data/\" + filename);\n  }\n\n  std::vector<uint8_t> readFile(std::string filename) {\n    auto str = TestEnvironment::readFileToStringForTest(fullPath(filename));\n    return {str.begin(), str.end()};\n  }\n\n  void setup(std::string response_filename) {\n    auto der_response = readFile(response_filename);\n    response_ = std::make_unique<OcspResponseWrapper>(der_response, time_system_);\n    EXPECT_EQ(response_->rawBytes(), der_response);\n  }\n\n  void expectSuccessful() {\n    EXPECT_EQ(OcspResponseStatus::Successful, response_->getResponseStatus());\n  }\n\n  void expectCertificateMatches(std::string cert_filename) {\n    auto cert_ = readCertFromFile(fullPath(cert_filename));\n    EXPECT_TRUE(response_->matchesCertificate(*cert_));\n  }\n\nprotected:\n  Event::SimulatedTimeSystem time_system_;\n  OcspResponseWrapperPtr response_;\n};\n\nTEST_F(OcspFullResponseParsingTest, GoodCertTest) {\n  setup(\"good_ocsp_resp.der\");\n  expectSuccessful();\n  expectCertificateMatches(\"good_cert.pem\");\n\n  auto cert = readCertFromFile(fullPath(\"revoked_cert.pem\"));\n  EXPECT_FALSE(response_->matchesCertificate(*cert));\n\n  // Contains nextUpdate that is in the future\n  EXPECT_FALSE(response_->isExpired());\n  EXPECT_GT(response_->secondsUntilExpiration(), 0);\n}\n\nTEST_F(OcspFullResponseParsingTest, RevokedCertTest) {\n  setup(\"revoked_ocsp_resp.der\");\n  expectSuccessful();\n  expectCertificateMatches(\"revoked_cert.pem\");\n  EXPECT_TRUE(response_->isExpired());\n  EXPECT_EQ(response_->secondsUntilExpiration(), 0);\n}\n\nTEST_F(OcspFullResponseParsingTest, UnknownCertTest) {\n  setup(\"unknown_ocsp_resp.der\");\n  expectSuccessful();\n  expectCertificateMatches(\"good_cert.pem\");\n  EXPECT_TRUE(response_->isExpired());\n}\n\nTEST_F(OcspFullResponseParsingTest, ExpiredResponseTest) {\n  auto next_week = time_system_.systemTime() + std::chrono::hours(8 * 24);\n  time_system_.setSystemTime(next_week);\n  setup(\"good_ocsp_resp.der\");\n  // nextUpdate is present but in the past\n  EXPECT_TRUE(response_->isExpired());\n  EXPECT_EQ(response_->secondsUntilExpiration(), 0);\n}\n\nTEST_F(OcspFullResponseParsingTest, ThisUpdateAfterNowTest) {\n  auto past_time = TestUtility::parseTime(\"2000 01 01\", \"%Y %m %d\");\n  time_system_.setSystemTime(absl::ToChronoTime(past_time));\n  EXPECT_LOG_CONTAINS(\"warning\", \"OCSP Response thisUpdate field is set in the future\",\n                      setup(\"good_ocsp_resp.der\"));\n}\n\nTEST_F(OcspFullResponseParsingTest, ResponderIdKeyHashTest) {\n  setup(\"responder_key_hash_ocsp_resp.der\");\n  expectSuccessful();\n  expectCertificateMatches(\"good_cert.pem\");\n  EXPECT_TRUE(response_->isExpired());\n}\n\nTEST_F(OcspFullResponseParsingTest, MultiCertResponseTest) {\n  auto resp_bytes = readFile(\"multiple_cert_ocsp_resp.der\");\n  EXPECT_THROW_WITH_MESSAGE(OcspResponseWrapper response(resp_bytes, time_system_), EnvoyException,\n                            \"OCSP Response must be for one certificate only\");\n}\n\nTEST_F(OcspFullResponseParsingTest, UnsuccessfulResponseTest) {\n  std::vector<uint8_t> data = {\n      // SEQUENCE\n      0x30, 3,\n      // OcspResponseStatus - InternalError\n      0xau, 1, 2,\n      // no response bytes\n  };\n  EXPECT_THROW_WITH_MESSAGE(OcspResponseWrapper response(data, time_system_), EnvoyException,\n                            \"OCSP response was unsuccessful\");\n}\n\nTEST_F(OcspFullResponseParsingTest, NoResponseBodyTest) {\n  std::vector<uint8_t> data = {\n      // SEQUENCE\n      0x30, 3,\n      // OcspResponseStatus - Success\n      0xau, 1, 0,\n      // no response bytes\n  };\n  EXPECT_THROW_WITH_MESSAGE(OcspResponseWrapper response(data, time_system_), EnvoyException,\n                            \"OCSP response has no body\");\n}\n\nTEST_F(OcspFullResponseParsingTest, OnlyOneResponseInByteStringTest) {\n  auto resp_bytes = readFile(\"good_ocsp_resp.der\");\n  auto resp2_bytes = readFile(\"revoked_ocsp_resp.der\");\n  resp_bytes.insert(resp_bytes.end(), resp2_bytes.begin(), resp2_bytes.end());\n\n  EXPECT_THROW_WITH_MESSAGE(OcspResponseWrapper response_wrapper(resp_bytes, time_system_),\n                            EnvoyException, \"Data contained more than a single OCSP response\");\n}\n\nTEST_F(OcspFullResponseParsingTest, ParseOcspResponseWrongTagTest) {\n  auto resp_bytes = readFile(\"good_ocsp_resp.der\");\n  // Change the SEQUENCE tag to an `OCTETSTRING` tag\n  resp_bytes[0] = 0x4u;\n  EXPECT_THROW_WITH_MESSAGE(OcspResponseWrapper response_wrapper(resp_bytes, time_system_),\n                            EnvoyException, \"OCSP Response is not a well-formed ASN.1 SEQUENCE\");\n}\n\nclass Asn1OcspUtilityTest : public testing::Test {\npublic:\n  void expectResponseStatus(uint8_t code, OcspResponseStatus expected) {\n    std::vector<uint8_t> asn1_enum = {0xau, 1, code};\n    CBS cbs;\n    CBS_init(&cbs, asn1_enum.data(), asn1_enum.size());\n\n    EXPECT_EQ(expected, Asn1OcspUtility::parseResponseStatus(cbs));\n  }\n\n  void expectThrowOnWrongTag(std::function<void(CBS&)> parse) {\n    CBS cbs;\n    CBS_init(&cbs, asn1_true.data(), asn1_true.size());\n    EXPECT_THROW(parse(cbs), EnvoyException);\n  }\n\n  const std::vector<uint8_t> asn1_true = {0x1u, 1, 0xff};\n};\n\nTEST_F(Asn1OcspUtilityTest, ParseResponseStatusTest) {\n  expectResponseStatus(0, OcspResponseStatus::Successful);\n  expectResponseStatus(1, OcspResponseStatus::MalformedRequest);\n  expectResponseStatus(2, OcspResponseStatus::InternalError);\n  expectResponseStatus(3, OcspResponseStatus::TryLater);\n  expectResponseStatus(5, OcspResponseStatus::SigRequired);\n  expectResponseStatus(6, OcspResponseStatus::Unauthorized);\n}\n\nTEST_F(Asn1OcspUtilityTest, ParseMethodWrongTagTest) {\n  expectThrowOnWrongTag(Asn1OcspUtility::parseResponseBytes);\n  expectThrowOnWrongTag(Asn1OcspUtility::parseBasicOcspResponse);\n  expectThrowOnWrongTag(Asn1OcspUtility::parseResponseData);\n  expectThrowOnWrongTag(Asn1OcspUtility::parseSingleResponse);\n  expectThrowOnWrongTag(Asn1OcspUtility::parseCertId);\n  expectThrowOnWrongTag(Asn1OcspUtility::parseResponseStatus);\n}\n\nTEST_F(Asn1OcspUtilityTest, ParseResponseDataBadResponderIdVariantTest) {\n  std::vector<uint8_t> data = {\n      // SEQUENCE\n      0x30,\n      6,\n      // version\n      0,\n      1,\n      0,\n      // Invalid Responder ID tag 3\n      3,\n      1,\n      0,\n  };\n  CBS cbs;\n  CBS_init(&cbs, data.data(), data.size());\n  EXPECT_THROW_WITH_MESSAGE(Asn1OcspUtility::parseResponseData(cbs), EnvoyException,\n                            \"Unknown choice for Responder ID: 3\");\n}\n\nTEST_F(Asn1OcspUtilityTest, ParseOcspResponseBytesMissingTest) {\n  std::vector<uint8_t> data = {\n      // SEQUENCE\n      0x30, 3,\n      // OcspResponseStatus - InternalError\n      0xau, 1, 2,\n      // no response bytes\n  };\n  CBS cbs;\n  CBS_init(&cbs, data.data(), data.size());\n  auto response = Asn1OcspUtility::parseOcspResponse(cbs);\n  EXPECT_EQ(response->status_, OcspResponseStatus::InternalError);\n  EXPECT_TRUE(response->response_ == nullptr);\n}\n\nTEST_F(Asn1OcspUtilityTest, ParseResponseStatusUnknownVariantTest) {\n  std::vector<uint8_t> bad_enum_variant = {0xau, 1, 4};\n  CBS cbs;\n  CBS_init(&cbs, bad_enum_variant.data(), bad_enum_variant.size());\n  EXPECT_THROW_WITH_MESSAGE(Asn1OcspUtility::parseResponseStatus(cbs), EnvoyException,\n                            \"Unknown OCSP Response Status variant: 4\");\n}\n\nTEST_F(Asn1OcspUtilityTest, ParseResponseBytesNoOctetStringTest) {\n  std::string oid_str = \"1.1.1.1.1.1.1\";\n  bssl::ScopedCBB cbb;\n  CBB seq, oid, obj;\n  uint8_t* buf;\n  size_t buf_len;\n\n  ASSERT_TRUE(CBB_init(cbb.get(), 0));\n  ASSERT_TRUE(CBB_add_asn1(cbb.get(), &seq, CBS_ASN1_SEQUENCE));\n  ASSERT_TRUE(CBB_add_asn1(&seq, &oid, CBS_ASN1_OBJECT));\n  ASSERT_TRUE(CBB_add_asn1_oid_from_text(&oid, oid_str.c_str(), oid_str.size()));\n  // Empty sequence instead of `OCTETSTRING` with the response\n  ASSERT_TRUE(CBB_add_asn1(&seq, &obj, CBS_ASN1_SEQUENCE));\n  ASSERT_TRUE(CBB_finish(cbb.get(), &buf, &buf_len));\n\n  CBS cbs;\n  CBS_init(&cbs, buf, buf_len);\n  bssl::UniquePtr<uint8_t> scoped(buf);\n\n  EXPECT_THROW_WITH_MESSAGE(Asn1OcspUtility::parseResponseBytes(cbs), EnvoyException,\n                            \"Expected ASN.1 OCTETSTRING for response\");\n}\n\nTEST_F(Asn1OcspUtilityTest, ParseResponseBytesUnknownResponseTypeTest) {\n  std::string oid_str = \"1.1.1.1.1.1.1\";\n  bssl::ScopedCBB cbb;\n  CBB seq, oid, obj;\n  uint8_t* buf;\n  size_t buf_len;\n\n  ASSERT_TRUE(CBB_init(cbb.get(), 0));\n  ASSERT_TRUE(CBB_add_asn1(cbb.get(), &seq, CBS_ASN1_SEQUENCE));\n  ASSERT_TRUE(CBB_add_asn1(&seq, &oid, CBS_ASN1_OBJECT));\n  ASSERT_TRUE(CBB_add_asn1_oid_from_text(&oid, oid_str.c_str(), oid_str.size()));\n  ASSERT_TRUE(CBB_add_asn1(&seq, &obj, CBS_ASN1_OCTETSTRING));\n  ASSERT_TRUE(CBB_add_bytes(&obj, reinterpret_cast<const uint8_t*>(\"\\x1\\x2\\x3\"), 3));\n  ASSERT_TRUE(CBB_finish(cbb.get(), &buf, &buf_len));\n\n  CBS cbs;\n  CBS_init(&cbs, buf, buf_len);\n  bssl::UniquePtr<uint8_t> scoped(buf);\n\n  EXPECT_THROW_WITH_MESSAGE(Asn1OcspUtility::parseResponseBytes(cbs), EnvoyException,\n                            \"Unknown OCSP Response type with OID: 1.1.1.1.1.1.1\");\n}\n\n} // namespace\n\n} // namespace Ocsp\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/ssl_certs_test.h",
    "content": "#pragma once\n\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nclass SslCertsTest : public testing::Test {\npublic:\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    TestEnvironment::exec({TestEnvironment::runfilesPath(\n        \"test/extensions/transport_sockets/tls/gen_unittest_certs.sh\")});\n    TestEnvironment::exec({TestEnvironment::runfilesPath(\n        \"test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh\")});\n  }\n\nprotected:\n  SslCertsTest() : api_(Api::createApiForTest(store_, time_system_)) {\n    ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_));\n  }\n\n  Event::SimulatedTimeSystem time_system_;\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context_;\n  Stats::IsolatedStoreImpl store_;\n  Api::ApiPtr api_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/ssl_socket_test.cc",
    "content": "#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/network/transport_socket.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/transport_socket_options_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_impl.h\"\n#include \"extensions/transport_sockets/tls/private_key/private_key_manager_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/extensions/transport_sockets/tls/ssl_certs_test.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/ca_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/extensions_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/password_protected_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_dns2_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_dns3_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_dns4_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_dns_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_uri_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_private_key_method_provider.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/io_handle.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"openssl/ssl.h\"\n\nusing testing::_;\nusing testing::ContainsRegex;\nusing testing::DoAll;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::StrictMock;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace {\n\n/**\n * A base class to hold the options for testUtil() and testUtilV2().\n */\nclass TestUtilOptionsBase {\npublic:\n  const std::vector<std::string>& expectedClientCertUri() const {\n    return expected_client_cert_uri_;\n  }\n  const std::string& expectedServerStats() const { return expected_server_stats_; }\n  bool expectSuccess() const { return expect_success_; }\n  Network::Address::IpVersion version() const { return version_; }\n\nprotected:\n  TestUtilOptionsBase(bool expect_success, Network::Address::IpVersion version)\n      : expect_success_(expect_success), version_(version) {}\n\n  void setExpectedClientCertUri(const std::string& expected_client_cert_uri) {\n    expected_client_cert_uri_ = {expected_client_cert_uri};\n  }\n\n  void setExpectedServerStats(const std::string& expected_server_stats) {\n    expected_server_stats_ = expected_server_stats;\n  }\n\nprivate:\n  const bool expect_success_;\n  const Network::Address::IpVersion version_;\n\n  std::string expected_server_stats_;\n  std::vector<std::string> expected_client_cert_uri_;\n};\n\n/**\n * A class to hold the options for testUtil().\n */\nclass TestUtilOptions : public TestUtilOptionsBase {\npublic:\n  TestUtilOptions(const std::string& client_ctx_yaml, const std::string& server_ctx_yaml,\n                  bool expect_success, Network::Address::IpVersion version)\n      : TestUtilOptionsBase(expect_success, version), client_ctx_yaml_(client_ctx_yaml),\n        server_ctx_yaml_(server_ctx_yaml), expect_no_cert_(false), expect_no_cert_chain_(false),\n        expect_private_key_method_(false),\n        expected_server_close_event_(Network::ConnectionEvent::RemoteClose) {\n    if (expect_success) {\n      setExpectedServerStats(\"ssl.handshake\");\n    } else {\n      setExpectedServerStats(\"ssl.fail_verify_error\");\n    }\n  }\n\n  const std::string& clientCtxYaml() const { return client_ctx_yaml_; }\n  const std::string& serverCtxYaml() const { return server_ctx_yaml_; }\n\n  TestUtilOptions& setExpectedServerStats(const std::string& expected_server_stats) {\n    TestUtilOptionsBase::setExpectedServerStats(expected_server_stats);\n    return *this;\n  }\n\n  bool expectNoCert() const { return expect_no_cert_; }\n\n  TestUtilOptions& setExpectNoCert() {\n    expect_no_cert_ = true;\n    return *this;\n  }\n\n  bool expectNoCertChain() const { return expect_no_cert_chain_; }\n\n  TestUtilOptions& setExpectNoCertChain() {\n    expect_no_cert_chain_ = true;\n    return *this;\n  }\n\n  TestUtilOptions& setExpectedClientCertUri(const std::string& expected_client_cert_uri) {\n    TestUtilOptionsBase::setExpectedClientCertUri(expected_client_cert_uri);\n    return *this;\n  }\n\n  TestUtilOptions& setExpectedSha256Digest(const std::string& expected_sha256_digest) {\n    expected_sha256_digest_ = expected_sha256_digest;\n    return *this;\n  }\n\n  const std::string& expectedSha256Digest() const { return expected_sha256_digest_; }\n\n  TestUtilOptions& setExpectedSha1Digest(const std::string& expected_sha1_digest) {\n    expected_sha1_digest_ = expected_sha1_digest;\n    return *this;\n  }\n\n  const std::string& expectedSha1Digest() const { return expected_sha1_digest_; }\n\n  TestUtilOptions& setExpectedLocalUri(const std::string& expected_local_uri) {\n    expected_local_uri_ = {expected_local_uri};\n    return *this;\n  }\n\n  const std::vector<std::string>& expectedLocalUri() const { return expected_local_uri_; }\n\n  TestUtilOptions& setExpectedSerialNumber(const std::string& expected_serial_number) {\n    expected_serial_number_ = expected_serial_number;\n    return *this;\n  }\n\n  const std::string& expectedSerialNumber() const { return expected_serial_number_; }\n\n  TestUtilOptions& setExpectedPeerIssuer(const std::string& expected_peer_issuer) {\n    expected_peer_issuer_ = expected_peer_issuer;\n    return *this;\n  }\n\n  const std::string& expectedPeerIssuer() const { return expected_peer_issuer_; }\n\n  TestUtilOptions& setExpectedPeerSubject(const std::string& expected_peer_subject) {\n    expected_peer_subject_ = expected_peer_subject;\n    return *this;\n  }\n\n  const std::string& expectedPeerSubject() const { return expected_peer_subject_; }\n\n  TestUtilOptions& setExpectedLocalSubject(const std::string& expected_local_subject) {\n    expected_local_subject_ = expected_local_subject;\n    return *this;\n  }\n\n  const std::string& expectedLocalSubject() const { return expected_local_subject_; }\n\n  TestUtilOptions& setExpectedPeerCert(const std::string& expected_peer_cert) {\n    expected_peer_cert_ = expected_peer_cert;\n    return *this;\n  }\n\n  const std::string& expectedPeerCert() const { return expected_peer_cert_; }\n\n  TestUtilOptions& setExpectedPeerCertChain(const std::string& expected_peer_cert_chain) {\n    expected_peer_cert_chain_ = expected_peer_cert_chain;\n    return *this;\n  }\n\n  const std::string& expectedPeerCertChain() const { return expected_peer_cert_chain_; }\n\n  TestUtilOptions& setExpectedValidFromTimePeerCert(const std::string& expected_valid_from) {\n    expected_valid_from_peer_cert_ = expected_valid_from;\n    return *this;\n  }\n\n  const std::string& expectedValidFromTimePeerCert() const {\n    return expected_valid_from_peer_cert_;\n  }\n\n  TestUtilOptions& setExpectedExpirationTimePeerCert(const std::string& expected_expiration) {\n    expected_expiration_peer_cert_ = expected_expiration;\n    return *this;\n  }\n\n  const std::string& expectedExpirationTimePeerCert() const {\n    return expected_expiration_peer_cert_;\n  }\n\n  TestUtilOptions& setPrivateKeyMethodExpected(bool expected_method) {\n    expect_private_key_method_ = expected_method;\n    return *this;\n  }\n\n  bool expectedPrivateKeyMethod() const { return expect_private_key_method_; }\n\n  TestUtilOptions& setExpectedServerCloseEvent(Network::ConnectionEvent expected_event) {\n    expected_server_close_event_ = expected_event;\n    return *this;\n  }\n\n  Network::ConnectionEvent expectedServerCloseEvent() const { return expected_server_close_event_; }\n\n  TestUtilOptions& setExpectedOcspResponse(const std::string& expected_ocsp_response) {\n    expected_ocsp_response_ = expected_ocsp_response;\n    return *this;\n  }\n\n  const std::string& expectedOcspResponse() const { return expected_ocsp_response_; }\n\n  TestUtilOptions& enableOcspStapling() {\n    ocsp_stapling_enabled_ = true;\n    return *this;\n  }\n\n  bool ocspStaplingEnabled() const { return ocsp_stapling_enabled_; }\n\nprivate:\n  const std::string client_ctx_yaml_;\n  const std::string server_ctx_yaml_;\n\n  bool expect_no_cert_;\n  bool expect_no_cert_chain_;\n  bool expect_private_key_method_;\n  Network::ConnectionEvent expected_server_close_event_;\n  std::string expected_sha256_digest_;\n  std::string expected_sha1_digest_;\n  std::vector<std::string> expected_local_uri_;\n  std::string expected_serial_number_;\n  std::string expected_peer_issuer_;\n  std::string expected_peer_subject_;\n  std::string expected_local_subject_;\n  std::string expected_peer_cert_;\n  std::string expected_peer_cert_chain_;\n  std::string expected_valid_from_peer_cert_;\n  std::string expected_expiration_peer_cert_;\n  std::string expected_ocsp_response_;\n  bool ocsp_stapling_enabled_{false};\n};\n\nvoid testUtil(const TestUtilOptions& options) {\n  Event::SimulatedTimeSystem time_system;\n\n  Stats::TestUtil::TestStore server_stats_store;\n  Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      server_factory_context;\n  ON_CALL(server_factory_context, api()).WillByDefault(ReturnRef(*server_api));\n\n  // For private key method testing.\n  NiceMock<Ssl::MockContextManager> context_manager;\n  Extensions::PrivateKeyMethodProvider::TestPrivateKeyMethodFactory test_factory;\n  Registry::InjectFactory<Ssl::PrivateKeyMethodProviderInstanceFactory>\n      test_private_key_method_factory(test_factory);\n  PrivateKeyMethodManagerImpl private_key_method_manager;\n  if (options.expectedPrivateKeyMethod()) {\n    EXPECT_CALL(server_factory_context, sslContextManager())\n        .WillOnce(ReturnRef(context_manager))\n        .WillRepeatedly(ReturnRef(context_manager));\n    EXPECT_CALL(context_manager, privateKeyMethodManager())\n        .WillOnce(ReturnRef(private_key_method_manager))\n        .WillRepeatedly(ReturnRef(private_key_method_manager));\n  }\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(options.serverCtxYaml()),\n                            server_tls_context);\n  auto server_cfg =\n      std::make_unique<ServerContextConfigImpl>(server_tls_context, server_factory_context);\n  ContextManagerImpl manager(*time_system);\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, std::vector<std::string>{});\n\n  Event::DispatcherPtr dispatcher = server_api->allocateDispatcher(\"test_thread\");\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(options.version()), nullptr, true);\n  Network::MockTcpListenerCallbacks callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(options.clientCtxYaml()),\n                            client_tls_context);\n\n  Stats::TestUtil::TestStore client_stats_store;\n  Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      client_factory_context;\n  ON_CALL(client_factory_context, api()).WillByDefault(ReturnRef(*client_api));\n\n  auto client_cfg =\n      std::make_unique<ClientContextConfigImpl>(client_tls_context, client_factory_context);\n  ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager,\n                                                   client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      client_ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  StreamInfo::MockStreamInfo stream_info;\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  if (options.ocspStaplingEnabled()) {\n    const SslHandshakerImpl* ssl_socket =\n        dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n    SSL_enable_ocsp_stapling(ssl_socket->ssl());\n  }\n\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  client_connection->connect();\n\n  size_t connect_count = 0;\n  auto connect_second_time = [&]() {\n    if (++connect_count == 2) {\n      if (!options.expectedSha256Digest().empty()) {\n        // Assert twice to ensure a cached value is returned and still valid.\n        EXPECT_EQ(options.expectedSha256Digest(),\n                  server_connection->ssl()->sha256PeerCertificateDigest());\n        EXPECT_EQ(options.expectedSha256Digest(),\n                  server_connection->ssl()->sha256PeerCertificateDigest());\n      }\n      if (!options.expectedSha1Digest().empty()) {\n        // Assert twice to ensure a cached value is returned and still valid.\n        EXPECT_EQ(options.expectedSha1Digest(),\n                  server_connection->ssl()->sha1PeerCertificateDigest());\n        EXPECT_EQ(options.expectedSha1Digest(),\n                  server_connection->ssl()->sha1PeerCertificateDigest());\n      }\n      // Assert twice to ensure a cached value is returned and still valid.\n      EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate());\n      EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate());\n\n      if (!options.expectedLocalUri().empty()) {\n        // Assert twice to ensure a cached value is returned and still valid.\n        EXPECT_EQ(options.expectedLocalUri(), server_connection->ssl()->uriSanLocalCertificate());\n        EXPECT_EQ(options.expectedLocalUri(), server_connection->ssl()->uriSanLocalCertificate());\n      }\n      EXPECT_EQ(options.expectedSerialNumber(),\n                server_connection->ssl()->serialNumberPeerCertificate());\n      if (!options.expectedPeerIssuer().empty()) {\n        EXPECT_EQ(options.expectedPeerIssuer(), server_connection->ssl()->issuerPeerCertificate());\n      }\n      if (!options.expectedPeerSubject().empty()) {\n        EXPECT_EQ(options.expectedPeerSubject(),\n                  server_connection->ssl()->subjectPeerCertificate());\n      }\n      if (!options.expectedLocalSubject().empty()) {\n        EXPECT_EQ(options.expectedLocalSubject(),\n                  server_connection->ssl()->subjectLocalCertificate());\n      }\n      if (!options.expectedPeerCert().empty()) {\n        std::string urlencoded = absl::StrReplaceAll(\n            options.expectedPeerCert(),\n            {{\"\\n\", \"%0A\"}, {\" \", \"%20\"}, {\"+\", \"%2B\"}, {\"/\", \"%2F\"}, {\"=\", \"%3D\"}});\n        // Assert twice to ensure a cached value is returned and still valid.\n        EXPECT_EQ(urlencoded, server_connection->ssl()->urlEncodedPemEncodedPeerCertificate());\n        EXPECT_EQ(urlencoded, server_connection->ssl()->urlEncodedPemEncodedPeerCertificate());\n      }\n      if (!options.expectedPeerCertChain().empty()) {\n        std::string cert_chain = absl::StrReplaceAll(\n            options.expectedPeerCertChain(),\n            {{\"\\n\", \"%0A\"}, {\" \", \"%20\"}, {\"+\", \"%2B\"}, {\"/\", \"%2F\"}, {\"=\", \"%3D\"}});\n        // Assert twice to ensure a cached value is returned and still valid.\n        EXPECT_EQ(cert_chain, server_connection->ssl()->urlEncodedPemEncodedPeerCertificateChain());\n        EXPECT_EQ(cert_chain, server_connection->ssl()->urlEncodedPemEncodedPeerCertificateChain());\n      }\n      if (!options.expectedValidFromTimePeerCert().empty()) {\n        const std::string formatted = TestUtility::formatTime(\n            server_connection->ssl()->validFromPeerCertificate().value(), \"%b %e %H:%M:%S %Y GMT\");\n        EXPECT_EQ(options.expectedValidFromTimePeerCert(), formatted);\n      }\n      if (!options.expectedExpirationTimePeerCert().empty()) {\n        const std::string formatted = TestUtility::formatTime(\n            server_connection->ssl()->expirationPeerCertificate().value(), \"%b %e %H:%M:%S %Y GMT\");\n        EXPECT_EQ(options.expectedExpirationTimePeerCert(), formatted);\n      }\n      if (options.expectNoCert()) {\n        EXPECT_FALSE(server_connection->ssl()->peerCertificatePresented());\n        EXPECT_FALSE(server_connection->ssl()->validFromPeerCertificate().has_value());\n        EXPECT_FALSE(server_connection->ssl()->expirationPeerCertificate().has_value());\n        EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sha256PeerCertificateDigest());\n        EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sha1PeerCertificateDigest());\n        EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->urlEncodedPemEncodedPeerCertificate());\n        EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->subjectPeerCertificate());\n        EXPECT_EQ(std::vector<std::string>{}, server_connection->ssl()->dnsSansPeerCertificate());\n      }\n      if (options.expectNoCertChain()) {\n        EXPECT_EQ(EMPTY_STRING,\n                  server_connection->ssl()->urlEncodedPemEncodedPeerCertificateChain());\n      }\n\n      const SslHandshakerImpl* ssl_socket =\n          dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n      SSL* client_ssl_socket = ssl_socket->ssl();\n      const uint8_t* response_head;\n      size_t response_len;\n      SSL_get0_ocsp_response(client_ssl_socket, &response_head, &response_len);\n      std::string ocsp_response{reinterpret_cast<const char*>(response_head), response_len};\n      EXPECT_EQ(options.expectedOcspResponse(), ocsp_response);\n\n      // By default, the session is not created with session resumption. The\n      // client should see a session ID but the server should not.\n      EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sessionId());\n      EXPECT_NE(EMPTY_STRING, client_connection->ssl()->sessionId());\n\n      server_connection->close(Network::ConnectionCloseType::NoFlush);\n      client_connection->close(Network::ConnectionCloseType::NoFlush);\n      dispatcher->exit();\n    }\n  };\n\n  size_t close_count = 0;\n  auto close_second_time = [&close_count, &dispatcher]() {\n    if (++close_count == 2) {\n      dispatcher->exit();\n    }\n  };\n\n  if (options.expectSuccess()) {\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  } else {\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n    EXPECT_CALL(server_connection_callbacks, onEvent(options.expectedServerCloseEvent()))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n  }\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n\n  if (!options.expectedServerStats().empty()) {\n    EXPECT_EQ(1UL, server_stats_store.counter(options.expectedServerStats()).value());\n  }\n}\n\n/**\n * A class to hold the options for testUtilV2().\n */\nclass TestUtilOptionsV2 : public TestUtilOptionsBase {\npublic:\n  TestUtilOptionsV2(\n      const envoy::config::listener::v3::Listener& listener,\n      const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& client_ctx_proto,\n      bool expect_success, Network::Address::IpVersion version)\n      : TestUtilOptionsBase(expect_success, version), listener_(listener),\n        client_ctx_proto_(client_ctx_proto), transport_socket_options_(nullptr) {\n    if (expect_success) {\n      setExpectedServerStats(\"ssl.handshake\").setExpectedClientStats(\"ssl.handshake\");\n    } else {\n      setExpectedServerStats(\"ssl.fail_verify_error\")\n          .setExpectedClientStats(\"ssl.connection_error\");\n    }\n  }\n\n  const envoy::config::listener::v3::Listener& listener() const { return listener_; }\n  const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& clientCtxProto() const {\n    return client_ctx_proto_;\n  }\n  const std::string& expectedClientStats() const { return expected_client_stats_; }\n\n  TestUtilOptionsV2& setExpectedServerStats(const std::string& expected_server_stats) {\n    TestUtilOptionsBase::setExpectedServerStats(expected_server_stats);\n    return *this;\n  }\n\n  TestUtilOptionsV2& setExpectedClientCertUri(const std::string& expected_client_cert_uri) {\n    TestUtilOptionsBase::setExpectedClientCertUri(expected_client_cert_uri);\n    return *this;\n  }\n\n  TestUtilOptionsV2& setExpectedClientStats(const std::string& expected_client_stats) {\n    expected_client_stats_ = expected_client_stats;\n    return *this;\n  }\n\n  TestUtilOptionsV2& setClientSession(const std::string& client_session) {\n    client_session_ = client_session;\n    return *this;\n  }\n\n  const std::string& clientSession() const { return client_session_; }\n\n  TestUtilOptionsV2& setExpectedProtocolVersion(const std::string& expected_protocol_version) {\n    expected_protocol_version_ = expected_protocol_version;\n    return *this;\n  }\n\n  const std::string& expectedProtocolVersion() const { return expected_protocol_version_; }\n\n  TestUtilOptionsV2& setExpectedCiphersuite(const std::string& expected_cipher_suite) {\n    expected_cipher_suite_ = expected_cipher_suite;\n    return *this;\n  }\n\n  const std::string& expectedCiphersuite() const { return expected_cipher_suite_; }\n\n  TestUtilOptionsV2& setExpectedServerCertDigest(const std::string& expected_server_cert_digest) {\n    expected_server_cert_digest_ = expected_server_cert_digest;\n    return *this;\n  }\n\n  const std::string& expectedServerCertDigest() const { return expected_server_cert_digest_; }\n\n  TestUtilOptionsV2&\n  setExpectedRequestedServerName(const std::string& expected_requested_server_name) {\n    expected_requested_server_name_ = expected_requested_server_name;\n    return *this;\n  }\n\n  const std::string& expectedRequestedServerName() const { return expected_requested_server_name_; }\n\n  TestUtilOptionsV2& setExpectedALPNProtocol(const std::string& expected_alpn_protocol) {\n    expected_alpn_protocol_ = expected_alpn_protocol;\n    return *this;\n  }\n\n  const std::string& expectedALPNProtocol() const { return expected_alpn_protocol_; }\n\n  TestUtilOptionsV2&\n  setTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options) {\n    transport_socket_options_ = transport_socket_options;\n    return *this;\n  }\n\n  Network::TransportSocketOptionsSharedPtr transportSocketOptions() const {\n    return transport_socket_options_;\n  }\n\n  TestUtilOptionsV2& setExpectedTransportFailureReasonContains(\n      const std::string& expected_transport_failure_reason_contains) {\n    expected_transport_failure_reason_contains_ = expected_transport_failure_reason_contains;\n    return *this;\n  }\n\n  const std::string& expectedTransportFailureReasonContains() const {\n    return expected_transport_failure_reason_contains_;\n  }\n\nprivate:\n  const envoy::config::listener::v3::Listener& listener_;\n  const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& client_ctx_proto_;\n  std::string expected_client_stats_;\n\n  std::string client_session_;\n  std::string expected_cipher_suite_;\n  std::string expected_protocol_version_;\n  std::string expected_server_cert_digest_;\n  std::string expected_requested_server_name_;\n  std::string expected_alpn_protocol_;\n  Network::TransportSocketOptionsSharedPtr transport_socket_options_;\n  std::string expected_transport_failure_reason_contains_;\n};\n\nconst std::string testUtilV2(const TestUtilOptionsV2& options) {\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(*time_system);\n  std::string new_session = EMPTY_STRING;\n\n  // SNI-based selection logic isn't happening in SslSocket anymore.\n  ASSERT(options.listener().filter_chains().size() == 1);\n  const auto& filter_chain = options.listener().filter_chains(0);\n  std::vector<std::string> server_names(filter_chain.filter_chain_match().server_names().begin(),\n                                        filter_chain.filter_chain_match().server_names().end());\n  Stats::TestUtil::TestStore server_stats_store;\n  Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      server_factory_context;\n  ON_CALL(server_factory_context, api()).WillByDefault(ReturnRef(*server_api));\n\n  auto server_cfg = std::make_unique<ServerContextConfigImpl>(\n      filter_chain.hidden_envoy_deprecated_tls_context(), server_factory_context);\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, server_names);\n\n  Event::DispatcherPtr dispatcher(server_api->allocateDispatcher(\"test_thread\"));\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(options.version()), nullptr, true);\n  NiceMock<Network::MockTcpListenerCallbacks> callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  Stats::TestUtil::TestStore client_stats_store;\n  Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      client_factory_context;\n  ON_CALL(client_factory_context, api()).WillByDefault(ReturnRef(*client_api));\n\n  auto client_cfg =\n      std::make_unique<ClientContextConfigImpl>(options.clientCtxProto(), client_factory_context);\n  ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager,\n                                                   client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      client_ssl_socket_factory.createTransportSocket(options.transportSocketOptions()), nullptr);\n\n  if (!options.clientSession().empty()) {\n    const SslHandshakerImpl* ssl_socket =\n        dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n    SSL* client_ssl_socket = ssl_socket->ssl();\n    SSL_CTX* client_ssl_context = SSL_get_SSL_CTX(client_ssl_socket);\n    SSL_SESSION* client_ssl_session =\n        SSL_SESSION_from_bytes(reinterpret_cast<const uint8_t*>(options.clientSession().data()),\n                               options.clientSession().size(), client_ssl_context);\n    int rc = SSL_set_session(client_ssl_socket, client_ssl_session);\n    ASSERT(rc == 1);\n    SSL_SESSION_free(client_ssl_session);\n  }\n\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  StreamInfo::MockStreamInfo stream_info;\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        std::string sni = options.transportSocketOptions() != nullptr &&\n                                  options.transportSocketOptions()->serverNameOverride().has_value()\n                              ? options.transportSocketOptions()->serverNameOverride().value()\n                              : options.clientCtxProto().sni();\n        socket->setRequestedServerName(sni);\n        server_connection = dispatcher->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  client_connection->connect();\n\n  size_t connect_count = 0;\n  auto connect_second_time = [&]() {\n    if (++connect_count == 2) {\n      if (!options.expectedServerCertDigest().empty()) {\n        EXPECT_EQ(options.expectedServerCertDigest(),\n                  client_connection->ssl()->sha256PeerCertificateDigest());\n      }\n      if (!options.expectedALPNProtocol().empty()) {\n        EXPECT_EQ(options.expectedALPNProtocol(), client_connection->nextProtocol());\n      }\n      EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate());\n      const SslHandshakerImpl* ssl_socket =\n          dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n      SSL* client_ssl_socket = ssl_socket->ssl();\n      if (!options.expectedProtocolVersion().empty()) {\n        // Assert twice to ensure a cached value is returned and still valid.\n        EXPECT_EQ(options.expectedProtocolVersion(), client_connection->ssl()->tlsVersion());\n        EXPECT_EQ(options.expectedProtocolVersion(), client_connection->ssl()->tlsVersion());\n      }\n      if (!options.expectedCiphersuite().empty()) {\n        EXPECT_EQ(options.expectedCiphersuite(), client_connection->ssl()->ciphersuiteString());\n        const SSL_CIPHER* cipher =\n            SSL_get_cipher_by_value(client_connection->ssl()->ciphersuiteId());\n        EXPECT_NE(nullptr, cipher);\n        EXPECT_EQ(options.expectedCiphersuite(), SSL_CIPHER_get_name(cipher));\n      }\n\n      absl::optional<std::string> server_ssl_requested_server_name;\n      const SslHandshakerImpl* server_ssl_socket =\n          dynamic_cast<const SslHandshakerImpl*>(server_connection->ssl().get());\n      SSL* server_ssl = server_ssl_socket->ssl();\n      auto requested_server_name = SSL_get_servername(server_ssl, TLSEXT_NAMETYPE_host_name);\n      if (requested_server_name != nullptr) {\n        server_ssl_requested_server_name = std::string(requested_server_name);\n      }\n\n      if (!options.expectedRequestedServerName().empty()) {\n        EXPECT_TRUE(server_ssl_requested_server_name.has_value());\n        EXPECT_EQ(options.expectedRequestedServerName(), server_ssl_requested_server_name.value());\n      } else {\n        EXPECT_FALSE(server_ssl_requested_server_name.has_value());\n      }\n\n      SSL_SESSION* client_ssl_session = SSL_get_session(client_ssl_socket);\n      EXPECT_TRUE(SSL_SESSION_is_resumable(client_ssl_session));\n      uint8_t* session_data;\n      size_t session_len;\n      int rc = SSL_SESSION_to_bytes(client_ssl_session, &session_data, &session_len);\n      ASSERT(rc == 1);\n      new_session = std::string(reinterpret_cast<char*>(session_data), session_len);\n      OPENSSL_free(session_data);\n      server_connection->close(Network::ConnectionCloseType::NoFlush);\n      client_connection->close(Network::ConnectionCloseType::NoFlush);\n      dispatcher->exit();\n    }\n  };\n\n  size_t close_count = 0;\n  auto close_second_time = [&close_count, &dispatcher]() {\n    if (++close_count == 2) {\n      dispatcher->exit();\n    }\n  };\n\n  if (options.expectSuccess()) {\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n          EXPECT_EQ(options.expectedRequestedServerName(),\n                    server_connection->requestedServerName());\n          connect_second_time();\n        }));\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  } else {\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n  }\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n\n  if (!options.expectedServerStats().empty()) {\n    EXPECT_EQ(1UL, server_stats_store.counter(options.expectedServerStats()).value())\n        << options.expectedServerStats();\n  }\n\n  if (!options.expectedClientStats().empty()) {\n    EXPECT_EQ(1UL, client_stats_store.counter(options.expectedClientStats()).value());\n  }\n\n  if (options.expectSuccess()) {\n    EXPECT_EQ(\"\", client_connection->transportFailureReason());\n    EXPECT_EQ(\"\", server_connection->transportFailureReason());\n  } else {\n    EXPECT_THAT(std::string(client_connection->transportFailureReason()),\n                ContainsRegex(options.expectedTransportFailureReasonContains()));\n    EXPECT_NE(\"\", server_connection->transportFailureReason());\n  }\n\n  return new_session;\n}\n\n// Configure the listener with unittest{cert,key}.pem and ca_cert.pem.\n// Configure the client with expired_san_uri_{cert,key}.pem\nvoid configureServerAndExpiredClientCertificate(\n    envoy::config::listener::v3::Listener& listener,\n    envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& client) {\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(\n      TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestcert.pem\"));\n  server_cert->mutable_private_key()->set_filename(\n      TestEnvironment::substitute(\"{{ test_tmpdir }}/unittestkey.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/expired_san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/expired_san_uri_key.pem\"));\n}\n\n} // namespace\n\nclass SslSocketTest : public SslCertsTest,\n                      public testing::WithParamInterface<Network::Address::IpVersion> {\nprotected:\n  SslSocketTest()\n      : dispatcher_(api_->allocateDispatcher(\"test_thread\")), stream_info_(api_->timeSource()) {}\n\n  void testClientSessionResumption(const std::string& server_ctx_yaml,\n                                   const std::string& client_ctx_yaml, bool expect_reuse,\n                                   const Network::Address::IpVersion version);\n\n  Event::DispatcherPtr dispatcher_;\n  StreamInfo::StreamInfoImpl stream_info_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SslSocketTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(SslSocketTest, GetCertDigest) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH)\n               .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH)\n               .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, GetCertDigestInvalidFiles) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(\n      test_options.setExpectedSha256Digest(\"\").setExpectedSha1Digest(\"\").setExpectedSerialNumber(\n          \"\"));\n}\n\nTEST_P(SslSocketTest, GetCertDigestInline) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n\n  // From test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem.\n  server_cert->mutable_certificate_chain()->set_inline_bytes(\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\")));\n\n  // From test/extensions/transport_sockets/tls/test_data/san_dns_key.pem.\n  server_cert->mutable_private_key()->set_inline_bytes(\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\")));\n\n  // From test/extensions/transport_sockets/tls/test_data/ca_certificates.pem.\n  filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n      ->mutable_common_tls_context()\n      ->mutable_validation_context()\n      ->mutable_trusted_ca()\n      ->set_inline_bytes(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir \"\n          \"}}/test/extensions/transport_sockets/tls/test_data/ca_certificates.pem\")));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_ctx;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client_ctx.mutable_common_tls_context()->add_tls_certificates();\n\n  // From test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem.\n  client_cert->mutable_certificate_chain()->set_inline_bytes(\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\")));\n\n  // From test/extensions/transport_sockets/tls/test_data/san_uri_key.pem.\n  client_cert->mutable_private_key()->set_inline_bytes(\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\")));\n\n  TestUtilOptionsV2 test_options(listener, client_ctx, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));\n}\n\nTEST_P(SslSocketTest, GetCertDigestServerCertWithIntermediateCA) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH)\n               .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH)\n               .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, GetCertDigestServerCertWithoutCommonName) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_only_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_only_dns_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH)\n               .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH)\n               .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, GetUriWithUriSan) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      verify_subject_alt_name: \"spiffe://lyft.com/test-team\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n               .setExpectedSerialNumber(TEST_SAN_URI_CERT_SERIAL));\n}\n\n// Verify that IP SANs work with an IPv4 address specified in the validation context.\nTEST_P(SslSocketTest, Ipv4San) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/upstreamcacert.pem\"\n      verify_subject_alt_name: \"127.0.0.1\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostcert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostkey.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options);\n}\n\n// Verify that IP SANs work with an IPv6 address specified in the validation context.\nTEST_P(SslSocketTest, Ipv6San) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/upstreamcacert.pem\"\n      verify_subject_alt_name: \"::1\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostcert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostkey.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options);\n}\n\nTEST_P(SslSocketTest, GetNoUriWithDnsSan) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  // The SAN field only has DNS, expect \"\" for uriSanPeerCertificate().\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedSerialNumber(TEST_SAN_DNS_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, NoCert) {\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.no_certificate\")\n               .setExpectNoCert()\n               .setExpectNoCertChain());\n}\n\n// Prefer ECDSA certificate when multiple RSA certificates are present and the\n// client is RSA/ECDSA capable. We validate TLSv1.2 only here, since we validate\n// the e2e behavior on TLSv1.2/1.3 in ssl_integration_test.\nTEST_P(SslSocketTest, MultiCertPreferEcdsa) {\n  const std::string client_ctx_yaml = absl::StrCat(R\"EOF(\n    common_tls_context:\n      tls_params:\n        tls_minimum_protocol_version: TLSv1_2\n        tls_maximum_protocol_version: TLSv1_2\n        cipher_suites:\n        - ECDHE-ECDSA-AES128-GCM-SHA256\n        - ECDHE-RSA-AES128-GCM-SHA256\n      validation_context:\n        verify_certificate_hash: )EOF\",\n                                                   TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH);\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options);\n}\n\nTEST_P(SslSocketTest, GetUriWithLocalUriSan) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedLocalUri(\"spiffe://lyft.com/test-team\")\n               .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, GetSubjectsWithBothCerts) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  require_client_certificate: true\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)\n               .setExpectedPeerIssuer(\n                   \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedPeerSubject(\n                   \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedLocalSubject(\n                   \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\"));\n}\n\nTEST_P(SslSocketTest, GetPeerCert) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  require_client_certificate: true\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  std::string expected_peer_cert =\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"));\n  testUtil(test_options.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)\n               .setExpectedPeerIssuer(\n                   \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedPeerSubject(\n                   \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedLocalSubject(\n                   \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedPeerCert(expected_peer_cert));\n}\n\nTEST_P(SslSocketTest, GetPeerCertAcceptUntrusted) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"\n      trust_chain_verification: ACCEPT_UNTRUSTED\n  require_client_certificate: true\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  std::string expected_peer_cert =\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"));\n  testUtil(test_options.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)\n               .setExpectedPeerIssuer(\n                   \"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedPeerSubject(\n                   \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedLocalSubject(\n                   \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\")\n               .setExpectedPeerCert(expected_peer_cert));\n}\n\nTEST_P(SslSocketTest, NoCertUntrustedNotPermitted) {\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"\n      trust_chain_verification: VERIFY_TRUST_CHAIN\n      verify_certificate_hash: \"0000000000000000000000000000000000000000000000000000000000000000\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\"));\n}\n\nTEST_P(SslSocketTest, NoCertUntrustedPermitted) {\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"\n      trust_chain_verification: ACCEPT_UNTRUSTED\n      verify_certificate_hash: \"0000000000000000000000000000000000000000000000000000000000000000\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.no_certificate\")\n               .setExpectNoCert()\n               .setExpectNoCertChain());\n}\n\nTEST_P(SslSocketTest, GetPeerCertChain) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_chain.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  require_client_certificate: true\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  std::string expected_peer_cert_chain =\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir \"\n          \"}}/test/extensions/transport_sockets/tls/test_data/no_san_chain.pem\"));\n  testUtil(test_options.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)\n               .setExpectedPeerCertChain(expected_peer_cert_chain));\n}\n\nTEST_P(SslSocketTest, GetIssueExpireTimesPeerCert) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  require_client_certificate: true\n)EOF\";\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)\n               .setExpectedValidFromTimePeerCert(TEST_NO_SAN_CERT_NOT_BEFORE)\n               .setExpectedExpirationTimePeerCert(TEST_NO_SAN_CERT_NOT_AFTER));\n}\n\nTEST_P(SslSocketTest, FailedClientAuthCaVerificationNoClientCert) {\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  require_client_certificate: true\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\"));\n}\n\nTEST_P(SslSocketTest, FailedClientAuthCaVerification) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n}\n\nTEST_P(SslSocketTest, FailedClientAuthSanVerificationNoClientCert) {\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      verify_subject_alt_name: \"example.com\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\"));\n}\n\nTEST_P(SslSocketTest, FailedClientAuthSanVerification) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      verify_subject_alt_name: \"example.com\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_san\"));\n}\n\nTEST_P(SslSocketTest, X509ExtensionsCertificateSerialNumber) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/extensions_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/extensions_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/extensions_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/extensions_key.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  require_client_certificate: true\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedSerialNumber(TEST_EXTENSIONS_CERT_SERIAL));\n}\n\n// By default, expired certificates are not permitted.\nTEST_P(SslSocketTest, FailedClientCertificateDefaultExpirationVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  configureServerAndExpiredClientCertificate(listener, client);\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_EXPIRED\"));\n}\n\n// Expired certificates will not be accepted when explicitly disallowed via\n// allow_expired_certificate.\nTEST_P(SslSocketTest, FailedClientCertificateExpirationVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  configureServerAndExpiredClientCertificate(listener, client);\n\n  listener.mutable_filter_chains(0)\n      ->mutable_hidden_envoy_deprecated_tls_context()\n      ->mutable_common_tls_context()\n      ->mutable_validation_context()\n      ->set_allow_expired_certificate(false);\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_EXPIRED\"));\n}\n\n// Expired certificates will be accepted when explicitly allowed via allow_expired_certificate.\nTEST_P(SslSocketTest, ClientCertificateExpirationAllowedVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  configureServerAndExpiredClientCertificate(listener, client);\n\n  listener.mutable_filter_chains(0)\n      ->mutable_hidden_envoy_deprecated_tls_context()\n      ->mutable_common_tls_context()\n      ->mutable_validation_context()\n      ->set_allow_expired_certificate(true);\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_EXPIRED\"));\n}\n\n// Allow expired certificates, but add a certificate hash requirement so it still fails.\nTEST_P(SslSocketTest, FailedClientCertAllowExpiredBadHashVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  configureServerAndExpiredClientCertificate(listener, client);\n\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = listener.mutable_filter_chains(0)\n                                  ->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n\n  server_validation_ctx->set_allow_expired_certificate(true);\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_cert_hash\")\n                 .setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_EXPIRED\"));\n}\n\n// Allow expired certificates, but use the wrong CA so it should fail still.\nTEST_P(SslSocketTest, FailedClientCertAllowServerExpiredWrongCAVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  configureServerAndExpiredClientCertificate(listener, client);\n\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = listener.mutable_filter_chains(0)\n                                  ->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n\n  server_validation_ctx->set_allow_expired_certificate(true);\n\n  // This fake CA was not used to sign the client's certificate.\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedTransportFailureReasonContains(\"TLSV1_ALERT_UNKNOWN_CA\"));\n}\n\nTEST_P(SslSocketTest, ClientCertificateHashVerification) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = absl::StrCat(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      verify_certificate_hash: \")EOF\",\n                                                   TEST_SAN_URI_CERT_256_HASH, \"\\\"\");\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n               .setExpectedSerialNumber(TEST_SAN_URI_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, ClientCertificateHashVerificationNoCA) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = absl::StrCat(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      verify_certificate_hash: \")EOF\",\n                                                   TEST_SAN_URI_CERT_256_HASH, \"\\\"\");\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n               .setExpectedSerialNumber(TEST_SAN_URI_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, ClientCertificateHashListVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_hash(TEST_SAN_URI_CERT_256_HASH);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));\n\n  // Works even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, ClientCertificateHashListVerificationNoCA) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_hash(TEST_SAN_URI_CERT_256_HASH);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));\n\n  // Works even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashVerificationNoClientCertificate) {\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  const std::string server_ctx_yaml = absl::StrCat(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      verify_certificate_hash: \")EOF\",\n                                                   TEST_SAN_URI_CERT_256_HASH, \"\\\"\");\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\"));\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashVerificationNoCANoClientCertificate) {\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  const std::string server_ctx_yaml = absl::StrCat(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      verify_certificate_hash: \")EOF\",\n                                                   TEST_SAN_URI_CERT_256_HASH, \"\\\"\");\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\"));\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashVerificationWrongClientCertificate) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = absl::StrCat(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      verify_certificate_hash: \")EOF\",\n                                                   TEST_SAN_URI_CERT_256_HASH, \"\\\"\");\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_cert_hash\"));\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashVerificationNoCAWrongClientCertificate) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = absl::StrCat(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      verify_certificate_hash: \")EOF\",\n                                                   TEST_SAN_URI_CERT_256_HASH, \"\\\"\");\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_cert_hash\"));\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashVerificationWrongCA) {\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n)EOF\";\n\n  const std::string server_ctx_yaml = absl::StrCat(R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"\n      verify_certificate_hash: \")EOF\",\n                                                   TEST_SAN_URI_CERT_256_HASH, \"\\\"\");\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n}\n\nTEST_P(SslSocketTest, CertificatesWithPassword) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_key.pem\"));\n  server_cert->mutable_password()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_password.txt\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_hash(TEST_PASSWORD_PROTECTED_CERT_256_HASH);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_key.pem\"));\n  client_cert->mutable_password()->set_inline_string(\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n          \"{{ test_rundir \"\n          \"}}/test/extensions/transport_sockets/tls/test_data/password_protected_password.txt\")));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_PASSWORD_PROTECTED_CERT_256_HASH));\n\n  // Works even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, ClientCertificateSpkiVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));\n\n  // Works even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, ClientCertificateSpkiVerificationNoCA) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));\n\n  // Works even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateSpkiVerificationNoClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_HANDSHAKE_FAILURE\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateSpkiVerificationNoCANoClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_HANDSHAKE_FAILURE\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateSpkiVerificationWrongClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_cert_hash\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_UNKNOWN\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateSpkiVerificationNoCAWrongClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_cert_hash\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_UNKNOWN\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateSpkiVerificationWrongCA) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedTransportFailureReasonContains(\"TLSV1_ALERT_UNKNOWN_CA\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, ClientCertificateHashAndSpkiVerification) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));\n\n  // Works even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, ClientCertificateHashAndSpkiVerificationNoCA) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n                 .setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));\n\n  // Works even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashAndSpkiVerificationNoClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_HANDSHAKE_FAILURE\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashAndSpkiVerificationNoCANoClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_no_cert\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_HANDSHAKE_FAILURE\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashAndSpkiVerificationWrongClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_cert_hash\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_UNKNOWN\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashAndSpkiVerificationNoCAWrongClientCertificate) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedServerStats(\"ssl.fail_verify_cert_hash\")\n                 .setExpectedTransportFailureReasonContains(\"SSLV3_ALERT_CERTIFICATE_UNKNOWN\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\nTEST_P(SslSocketTest, FailedClientCertificateHashAndSpkiVerificationWrongCA) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"));\n  server_validation_ctx->add_verify_certificate_hash(\n      \"0000000000000000000000000000000000000000000000000000000000000000\");\n  server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  TestUtilOptionsV2 test_options(listener, client, false, GetParam());\n  testUtilV2(test_options.setExpectedTransportFailureReasonContains(\"TLSV1_ALERT_UNKNOWN_CA\"));\n\n  // Fails even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n}\n\n// Make sure that we do not flush code and do an immediate close if we have not completed the\n// handshake.\nTEST_P(SslSocketTest, FlushCloseDuringHandshake) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_certificates.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), tls_context);\n  auto server_cfg = std::make_unique<ServerContextConfigImpl>(tls_context, factory_context_);\n  ContextManagerImpl manager(time_system_);\n  Stats::TestUtil::TestStore server_stats_store;\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, std::vector<std::string>{});\n\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  Network::MockTcpListenerCallbacks callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      Network::Test::createRawBufferSocket(), nullptr);\n  client_connection->connect();\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher_->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info_);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n        Buffer::OwnedImpl data(\"hello\");\n        server_connection->write(data, false);\n        server_connection->close(Network::ConnectionCloseType::FlushWrite);\n      }));\n\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test that half-close is sent and received correctly\nTEST_P(SslSocketTest, HalfClose) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_certificates.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), server_tls_context);\n  auto server_cfg = std::make_unique<ServerContextConfigImpl>(server_tls_context, factory_context_);\n  ContextManagerImpl manager(time_system_);\n  Stats::TestUtil::TestStore server_stats_store;\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, std::vector<std::string>{});\n\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  Network::MockTcpListenerCallbacks listener_callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher_->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n  std::shared_ptr<Network::MockReadFilter> server_read_filter(new Network::MockReadFilter());\n  std::shared_ptr<Network::MockReadFilter> client_read_filter(new Network::MockReadFilter());\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), tls_context);\n  auto client_cfg = std::make_unique<ClientContextConfigImpl>(tls_context, factory_context_);\n  Stats::TestUtil::TestStore client_stats_store;\n  ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager,\n                                                   client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      client_ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n  client_connection->enableHalfClose(true);\n  client_connection->addReadFilter(client_read_filter);\n  client_connection->connect();\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  EXPECT_CALL(listener_callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher_->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info_);\n        server_connection->enableHalfClose(true);\n        server_connection->addReadFilter(server_read_filter);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n        Buffer::OwnedImpl data(\"hello\");\n        server_connection->write(data, true);\n      }));\n\n  EXPECT_CALL(*server_read_filter, onNewConnection())\n      .WillOnce(Return(Network::FilterStatus::Continue));\n  EXPECT_CALL(*client_read_filter, onNewConnection())\n      .WillOnce(Return(Network::FilterStatus::Continue));\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected));\n  EXPECT_CALL(*client_read_filter, onData(BufferStringEqual(\"hello\"), true))\n      .WillOnce(Invoke([&](Buffer::Instance&, bool) -> Network::FilterStatus {\n        Buffer::OwnedImpl buffer(\"world\");\n        client_connection->write(buffer, true);\n        return Network::FilterStatus::Continue;\n      }));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(*server_read_filter, onData(BufferStringEqual(\"world\"), true));\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\nTEST_P(SslSocketTest, ClientAuthMultipleCAs) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_certificates.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), server_tls_context);\n  auto server_cfg = std::make_unique<ServerContextConfigImpl>(server_tls_context, factory_context_);\n  ContextManagerImpl manager(time_system_);\n  Stats::TestUtil::TestStore server_stats_store;\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, std::vector<std::string>{});\n\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  Network::MockTcpListenerCallbacks callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), tls_context);\n  auto client_cfg = std::make_unique<ClientContextConfigImpl>(tls_context, factory_context_);\n  Stats::TestUtil::TestStore client_stats_store;\n  ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n\n  // Verify that server sent list with 2 acceptable client certificate CA names.\n  const SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n  SSL_set_cert_cb(\n      ssl_socket->ssl(),\n      [](SSL* ssl, void*) -> int {\n        STACK_OF(X509_NAME)* list = SSL_get_client_CA_list(ssl);\n        EXPECT_NE(nullptr, list);\n        EXPECT_EQ(2U, sk_X509_NAME_num(list));\n        return 1;\n      },\n      nullptr);\n\n  client_connection->connect();\n\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher_->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info_);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        server_connection->close(Network::ConnectionCloseType::NoFlush);\n        client_connection->close(Network::ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(1UL, server_stats_store.counter(\"ssl.handshake\").value());\n}\n\nnamespace {\n\n// Test connecting with a client to server1, then trying to reuse the session on server2\nvoid testTicketSessionResumption(const std::string& server_ctx_yaml1,\n                                 const std::vector<std::string>& server_names1,\n                                 const std::string& server_ctx_yaml2,\n                                 const std::vector<std::string>& server_names2,\n                                 const std::string& client_ctx_yaml, bool expect_reuse,\n                                 const Network::Address::IpVersion ip_version,\n                                 const uint32_t expected_lifetime_hint = 0) {\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(*time_system);\n\n  Stats::TestUtil::TestStore server_stats_store;\n  Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      server_factory_context;\n  ON_CALL(server_factory_context, api()).WillByDefault(ReturnRef(*server_api));\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_tls_context1;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml1), server_tls_context1);\n  auto server_cfg1 =\n      std::make_unique<ServerContextConfigImpl>(server_tls_context1, server_factory_context);\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_tls_context2;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml2), server_tls_context2);\n  auto server_cfg2 =\n      std::make_unique<ServerContextConfigImpl>(server_tls_context2, server_factory_context);\n  ServerSslSocketFactory server_ssl_socket_factory1(std::move(server_cfg1), manager,\n                                                    server_stats_store, server_names1);\n  ServerSslSocketFactory server_ssl_socket_factory2(std::move(server_cfg2), manager,\n                                                    server_stats_store, server_names2);\n\n  auto socket1 = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(ip_version), nullptr, true);\n  auto socket2 = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(ip_version), nullptr, true);\n  NiceMock<Network::MockTcpListenerCallbacks> callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Event::DispatcherPtr dispatcher(server_api->allocateDispatcher(\"test_thread\"));\n  Network::ListenerPtr listener1 =\n      dispatcher->createListener(socket1, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n  Network::ListenerPtr listener2 =\n      dispatcher->createListener(socket2, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_tls_context);\n\n  Stats::TestUtil::TestStore client_stats_store;\n  Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      client_factory_context;\n  ON_CALL(client_factory_context, api()).WillByDefault(ReturnRef(*client_api));\n\n  auto client_cfg =\n      std::make_unique<ClientContextConfigImpl>(client_tls_context, client_factory_context);\n  ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection(\n      socket1->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  client_connection->connect();\n\n  SSL_SESSION* ssl_session = nullptr;\n  Network::ConnectionPtr server_connection;\n  StreamInfo::StreamInfoImpl stream_info(time_system);\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        Network::TransportSocketFactory& tsf = socket->localAddress() == socket1->localAddress()\n                                                   ? server_ssl_socket_factory1\n                                                   : server_ssl_socket_factory2;\n        server_connection = dispatcher->createServerConnection(\n            std::move(socket), tsf.createTransportSocket(nullptr), stream_info);\n      }));\n\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        const SslHandshakerImpl* ssl_socket =\n            dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n        ssl_session = SSL_get1_session(ssl_socket->ssl());\n        EXPECT_TRUE(SSL_SESSION_is_resumable(ssl_session));\n        if (expected_lifetime_hint) {\n          auto lifetime_hint = SSL_SESSION_get_ticket_lifetime_hint(ssl_session);\n          EXPECT_TRUE(lifetime_hint <= expected_lifetime_hint);\n        }\n        client_connection->close(Network::ConnectionCloseType::NoFlush);\n        server_connection->close(Network::ConnectionCloseType::NoFlush);\n        dispatcher->exit();\n      }));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(0UL, server_stats_store.counter(\"ssl.session_reused\").value());\n  EXPECT_EQ(0UL, client_stats_store.counter(\"ssl.session_reused\").value());\n\n  client_connection = dispatcher->createClientConnection(\n      socket2->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  const SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n  SSL_set_session(ssl_socket->ssl(), ssl_session);\n  SSL_SESSION_free(ssl_session);\n\n  client_connection->connect();\n\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  StreamInfo::StreamInfoImpl stream_info2(time_system);\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        Network::TransportSocketFactory& tsf = socket->localAddress() == socket1->localAddress()\n                                                   ? server_ssl_socket_factory1\n                                                   : server_ssl_socket_factory2;\n        server_connection = dispatcher->createServerConnection(\n            std::move(socket), tsf.createTransportSocket(nullptr), stream_info2);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  // Different tests have different order of whether client or server gets Connected event\n  // first, so always wait until both have happened.\n  size_t connect_count = 0;\n  auto connect_second_time = [&connect_count, &dispatcher, &server_connection, &client_connection,\n                              expect_reuse]() {\n    connect_count++;\n    if (connect_count == 2) {\n      if (expect_reuse) {\n        EXPECT_NE(EMPTY_STRING, server_connection->ssl()->sessionId());\n        EXPECT_EQ(server_connection->ssl()->sessionId(), client_connection->ssl()->sessionId());\n      } else {\n        EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sessionId());\n      }\n      client_connection->close(Network::ConnectionCloseType::NoFlush);\n      server_connection->close(Network::ConnectionCloseType::NoFlush);\n      dispatcher->exit();\n    }\n  };\n\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(expect_reuse ? 1UL : 0UL, server_stats_store.counter(\"ssl.session_reused\").value());\n  EXPECT_EQ(expect_reuse ? 1UL : 0UL, client_stats_store.counter(\"ssl.session_reused\").value());\n}\n\nvoid testSupportForStatelessSessionResumption(const std::string& server_ctx_yaml,\n                                              const std::string& client_ctx_yaml,\n                                              bool expect_support,\n                                              const Network::Address::IpVersion ip_version) {\n  Event::SimulatedTimeSystem time_system;\n  ContextManagerImpl manager(*time_system);\n\n  Stats::IsolatedStoreImpl server_stats_store;\n  Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      server_factory_context;\n  ON_CALL(server_factory_context, api()).WillByDefault(ReturnRef(*server_api));\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), server_tls_context);\n  auto server_cfg =\n      std::make_unique<ServerContextConfigImpl>(server_tls_context, server_factory_context);\n\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, {});\n  auto tcp_socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(ip_version), nullptr, true);\n  NiceMock<Network::MockTcpListenerCallbacks> callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Event::DispatcherPtr dispatcher(server_api->allocateDispatcher(\"test_thread\"));\n  Network::ListenerPtr listener =\n      dispatcher->createListener(tcp_socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_tls_context);\n\n  Stats::IsolatedStoreImpl client_stats_store;\n  Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      client_factory_context;\n  ON_CALL(client_factory_context, api()).WillByDefault(ReturnRef(*client_api));\n\n  auto client_cfg =\n      std::make_unique<ClientContextConfigImpl>(client_tls_context, client_factory_context);\n  ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection(\n      tcp_socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  client_connection->connect();\n\n  StreamInfo::StreamInfoImpl stream_info(time_system);\n  Network::ConnectionPtr server_connection;\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info);\n\n        const SslHandshakerImpl* ssl_socket =\n            dynamic_cast<const SslHandshakerImpl*>(server_connection->ssl().get());\n        SSL* server_ssl_socket = ssl_socket->ssl();\n        SSL_CTX* server_ssl_context = SSL_get_SSL_CTX(server_ssl_socket);\n        if (expect_support) {\n          EXPECT_EQ(0, (SSL_CTX_get_options(server_ssl_context) & SSL_OP_NO_TICKET));\n        } else {\n          EXPECT_EQ(SSL_OP_NO_TICKET, (SSL_CTX_get_options(server_ssl_context) & SSL_OP_NO_TICKET));\n        }\n      }));\n\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        client_connection->close(Network::ConnectionCloseType::NoFlush);\n        server_connection->close(Network::ConnectionCloseType::NoFlush);\n        dispatcher->exit();\n      }));\n\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n}\n\n} // namespace\n\nTEST_P(SslSocketTest, TicketSessionResumption) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml, {}, server_ctx_yaml, {}, client_ctx_yaml, true,\n                              GetParam());\n}\n\nTEST_P(SslSocketTest, TicketSessionResumptionCustomTimeout) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_0\n      tls_maximum_protocol_version: TLSv1_2\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n  session_timeout: 2307s\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml, {}, server_ctx_yaml, {}, client_ctx_yaml, true,\n                              GetParam(), 2307);\n}\n\nTEST_P(SslSocketTest, TicketSessionResumptionWithClientCA) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml, {}, server_ctx_yaml, {}, client_ctx_yaml, true,\n                              GetParam());\n}\n\nTEST_P(SslSocketTest, TicketSessionResumptionRotateKey) {\n  const std::string server_ctx_yaml1 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string server_ctx_yaml2 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_b\"\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml2, {}, client_ctx_yaml, true,\n                              GetParam());\n}\n\nTEST_P(SslSocketTest, TicketSessionResumptionWrongKey) {\n  const std::string server_ctx_yaml1 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string server_ctx_yaml2 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_b\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml2, {}, client_ctx_yaml, false,\n                              GetParam());\n}\n\n// Sessions cannot be resumed even though the server certificates are the same,\n// because of the different SNI requirements.\nTEST_P(SslSocketTest, TicketSessionResumptionDifferentServerNames) {\n  const std::string server_ctx_yaml1 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string server_ctx_yaml2 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  std::vector<std::string> server_names1 = {\"server1.example.com\"};\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml1, server_names1, server_ctx_yaml2, {},\n                              client_ctx_yaml, false, GetParam());\n}\n\n// Sessions can be resumed because the server certificates are different but the CN/SANs and\n// issuer are identical\nTEST_P(SslSocketTest, TicketSessionResumptionDifferentServerCert) {\n  const std::string server_ctx_yaml1 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string server_ctx_yaml2 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns2_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns2_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml2, {}, client_ctx_yaml, true,\n                              GetParam());\n}\n\n// Sessions cannot be resumed because the server certificates are different, CN/SANs are identical,\n// but the issuer is different.\nTEST_P(SslSocketTest, TicketSessionResumptionDifferentServerCertIntermediateCA) {\n  const std::string server_ctx_yaml1 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string server_ctx_yaml2 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml2, {}, client_ctx_yaml, false,\n                              GetParam());\n}\n\n// Sessions cannot be resumed because the server certificates are different and the SANs\n// are not identical\nTEST_P(SslSocketTest, TicketSessionResumptionDifferentServerCertDifferentSAN) {\n  const std::string server_ctx_yaml1 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string server_ctx_yaml2 = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem\"\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml2, {}, client_ctx_yaml, false,\n                              GetParam());\n}\n\nTEST_P(SslSocketTest, StatelessSessionResumptionDisabled) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  disable_stateless_session_resumption: true\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testSupportForStatelessSessionResumption(server_ctx_yaml, client_ctx_yaml, false, GetParam());\n}\n\nTEST_P(SslSocketTest, SatelessSessionResumptionEnabledExplicitly) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n  disable_stateless_session_resumption: false\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testSupportForStatelessSessionResumption(server_ctx_yaml, client_ctx_yaml, true, GetParam());\n}\n\nTEST_P(SslSocketTest, StatelessSessionResumptionEnabledByDefault) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n    common_tls_context:\n  )EOF\";\n\n  testSupportForStatelessSessionResumption(server_ctx_yaml, client_ctx_yaml, true, GetParam());\n}\n\n// Test that if two listeners use the same cert and session ticket key, but\n// different client CA, that sessions cannot be resumed.\nTEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n  require_client_certificate: true\n)EOF\";\n\n  const std::string server2_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"\n  require_client_certificate: true\n  session_ticket_keys:\n    keys:\n      filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context1;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), tls_context1);\n  auto server_cfg = std::make_unique<ServerContextConfigImpl>(tls_context1, factory_context_);\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context2;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server2_ctx_yaml), tls_context2);\n  auto server2_cfg = std::make_unique<ServerContextConfigImpl>(tls_context2, factory_context_);\n  ContextManagerImpl manager(time_system_);\n  Stats::TestUtil::TestStore server_stats_store;\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, std::vector<std::string>{});\n  ServerSslSocketFactory server2_ssl_socket_factory(std::move(server2_cfg), manager,\n                                                    server_stats_store, std::vector<std::string>{});\n\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  auto socket2 = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  Network::MockTcpListenerCallbacks callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n  Network::ListenerPtr listener2 =\n      dispatcher_->createListener(socket2, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), tls_context);\n\n  auto client_cfg = std::make_unique<ClientContextConfigImpl>(tls_context, factory_context_);\n  Stats::TestUtil::TestStore client_stats_store;\n  ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  client_connection->connect();\n\n  SSL_SESSION* ssl_session = nullptr;\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void {\n        Network::TransportSocketFactory& tsf =\n            accepted_socket->localAddress() == socket->localAddress() ? server_ssl_socket_factory\n                                                                      : server2_ssl_socket_factory;\n        server_connection = dispatcher_->createServerConnection(\n            std::move(accepted_socket), tsf.createTransportSocket(nullptr), stream_info_);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected));\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        const SslHandshakerImpl* ssl_socket =\n            dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n        ssl_session = SSL_get1_session(ssl_socket->ssl());\n        EXPECT_TRUE(SSL_SESSION_is_resumable(ssl_session));\n        server_connection->close(Network::ConnectionCloseType::NoFlush);\n        client_connection->close(Network::ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(1UL, server_stats_store.counter(\"ssl.handshake\").value());\n  EXPECT_EQ(1UL, client_stats_store.counter(\"ssl.handshake\").value());\n\n  client_connection = dispatcher_->createClientConnection(\n      socket2->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  const SslHandshakerImpl* ssl_socket =\n      dynamic_cast<const SslHandshakerImpl*>(client_connection->ssl().get());\n  SSL_set_session(ssl_socket->ssl(), ssl_session);\n  SSL_SESSION_free(ssl_session);\n\n  client_connection->connect();\n\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void {\n        Network::TransportSocketFactory& tsf =\n            accepted_socket->localAddress() == socket->localAddress() ? server_ssl_socket_factory\n                                                                      : server2_ssl_socket_factory;\n        server_connection = dispatcher_->createServerConnection(\n            std::move(accepted_socket), tsf.createTransportSocket(nullptr), stream_info_);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(1UL, client_stats_store.counter(\"ssl.connection_error\").value());\n  EXPECT_EQ(0UL, server_stats_store.counter(\"ssl.session_reused\").value());\n  EXPECT_EQ(0UL, client_stats_store.counter(\"ssl.session_reused\").value());\n}\n\nvoid SslSocketTest::testClientSessionResumption(const std::string& server_ctx_yaml,\n                                                const std::string& client_ctx_yaml,\n                                                bool expect_reuse,\n                                                const Network::Address::IpVersion version) {\n  InSequence s;\n\n  ContextManagerImpl manager(time_system_);\n\n  Stats::TestUtil::TestStore server_stats_store;\n  Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system_);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      server_factory_context;\n  ON_CALL(server_factory_context, api()).WillByDefault(ReturnRef(*server_api));\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_ctx_proto;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), server_ctx_proto);\n  auto server_cfg =\n      std::make_unique<ServerContextConfigImpl>(server_ctx_proto, server_factory_context);\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, std::vector<std::string>{});\n\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(version), nullptr, true);\n  NiceMock<Network::MockTcpListenerCallbacks> callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system_);\n  Event::DispatcherPtr dispatcher(server_api->allocateDispatcher(\"test_thread\"));\n  Network::ListenerPtr listener =\n      dispatcher->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_ctx_proto;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_ctx_proto);\n\n  Stats::TestUtil::TestStore client_stats_store;\n  Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system_);\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext>\n      client_factory_context;\n  ON_CALL(client_factory_context, api()).WillByDefault(ReturnRef(*client_api));\n\n  auto client_cfg =\n      std::make_unique<ClientContextConfigImpl>(client_ctx_proto, client_factory_context);\n  ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager,\n                                                   client_stats_store);\n  Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      client_ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n\n  Network::MockConnectionCallbacks client_connection_callbacks;\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  client_connection->connect();\n\n  size_t connect_count = 0;\n  auto connect_second_time = [&connect_count, &server_connection]() {\n    if (++connect_count == 2) {\n      server_connection->close(Network::ConnectionCloseType::NoFlush);\n    }\n  };\n\n  size_t close_count = 0;\n  auto close_second_time = [&close_count, &dispatcher]() {\n    if (++close_count == 2) {\n      dispatcher->exit();\n    }\n  };\n\n  // WillRepeatedly doesn't work with InSequence.\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info_);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  const bool expect_tls13 =\n      client_ctx_proto.common_tls_context().tls_params().tls_maximum_protocol_version() ==\n          envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3 &&\n      server_ctx_proto.common_tls_context().tls_params().tls_maximum_protocol_version() ==\n          envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3;\n\n  // The order of \"Connected\" events depends on the version of the TLS protocol (1.3 or older).\n  if (expect_tls13) {\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n  } else {\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n  }\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(0UL, server_stats_store.counter(\"ssl.session_reused\").value());\n  EXPECT_EQ(0UL, client_stats_store.counter(\"ssl.session_reused\").value());\n\n  connect_count = 0;\n  close_count = 0;\n\n  client_connection = dispatcher->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      client_ssl_socket_factory.createTransportSocket(nullptr), nullptr);\n  client_connection->addConnectionCallbacks(client_connection_callbacks);\n  client_connection->connect();\n\n  // WillRepeatedly doesn't work with InSequence.\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info_);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  // The order of \"Connected\" events depends on the version of the TLS protocol (1.3 or older),\n  // and whether or not the session was successfully resumed.\n  if (expect_tls13 || expect_reuse) {\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n  } else {\n    EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n    EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { connect_second_time(); }));\n  }\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n  EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); }));\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(expect_reuse ? 1UL : 0UL, server_stats_store.counter(\"ssl.session_reused\").value());\n  EXPECT_EQ(expect_reuse ? 1UL : 0UL, client_stats_store.counter(\"ssl.session_reused\").value());\n}\n\n// Test client session resumption using default settings (should be enabled).\nTEST_P(SslSocketTest, ClientSessionResumptionDefault) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n)EOF\";\n\n  testClientSessionResumption(server_ctx_yaml, client_ctx_yaml, true, GetParam());\n}\n\n// Make sure client session resumption is not happening with TLS 1.0-1.2 when it's disabled.\nTEST_P(SslSocketTest, ClientSessionResumptionDisabledTls12) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_0\n      tls_maximum_protocol_version: TLSv1_2\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n  max_session_keys: 0\n)EOF\";\n\n  testClientSessionResumption(server_ctx_yaml, client_ctx_yaml, false, GetParam());\n}\n\n// Test client session resumption with TLS 1.0-1.2.\nTEST_P(SslSocketTest, ClientSessionResumptionEnabledTls12) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_0\n      tls_maximum_protocol_version: TLSv1_2\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_0\n      tls_maximum_protocol_version: TLSv1_2\n  max_session_keys: 2\n)EOF\";\n\n  testClientSessionResumption(server_ctx_yaml, client_ctx_yaml, true, GetParam());\n}\n\n// Make sure client session resumption is not happening with TLS 1.3 when it's disabled.\nTEST_P(SslSocketTest, ClientSessionResumptionDisabledTls13) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_3\n      tls_maximum_protocol_version: TLSv1_3\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_3\n      tls_maximum_protocol_version: TLSv1_3\n  max_session_keys: 0\n)EOF\";\n\n  testClientSessionResumption(server_ctx_yaml, client_ctx_yaml, false, GetParam());\n}\n\n// Test client session resumption with TLS 1.3 (it's different than in older versions of TLS).\nTEST_P(SslSocketTest, ClientSessionResumptionEnabledTls13) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_3\n      tls_maximum_protocol_version: TLSv1_3\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n)EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      tls_minimum_protocol_version: TLSv1_3\n      tls_maximum_protocol_version: TLSv1_3\n  max_session_keys: 2\n)EOF\";\n\n  testClientSessionResumption(server_ctx_yaml, client_ctx_yaml, true, GetParam());\n}\n\nTEST_P(SslSocketTest, SslError) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem\"\n      verify_certificate_hash: \"7B:0C:3F:0D:97:0E:FC:16:70:11:7A:0C:35:75:54:6B:17:AB:CF:20:D8:AA:A0:ED:87:08:0F:FB:60:4C:40:77\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), tls_context);\n  auto server_cfg = std::make_unique<ServerContextConfigImpl>(tls_context, factory_context_);\n  ContextManagerImpl manager(time_system_);\n  Stats::TestUtil::TestStore server_stats_store;\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager,\n                                                   server_stats_store, std::vector<std::string>{});\n\n  auto socket = std::make_shared<Network::TcpListenSocket>(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n  Network::MockTcpListenerCallbacks callbacks;\n  Network::MockConnectionHandler connection_handler;\n  Network::ListenerPtr listener =\n      dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE);\n\n  Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection(\n      socket->localAddress(), Network::Address::InstanceConstSharedPtr(),\n      Network::Test::createRawBufferSocket(), nullptr);\n  client_connection->connect();\n  Buffer::OwnedImpl bad_data(\"bad_handshake_data\");\n  client_connection->write(bad_data, false);\n\n  Network::ConnectionPtr server_connection;\n  Network::MockConnectionCallbacks server_connection_callbacks;\n  EXPECT_CALL(callbacks, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection = dispatcher_->createServerConnection(\n            std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr),\n            stream_info_);\n        server_connection->addConnectionCallbacks(server_connection_callbacks);\n      }));\n\n  EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        client_connection->close(Network::ConnectionCloseType::NoFlush);\n        dispatcher_->exit();\n      }));\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(1UL, server_stats_store.counter(\"ssl.connection_error\").value());\n}\n\nstatic TestUtilOptionsV2 createProtocolTestOptions(\n    const envoy::config::listener::v3::Listener& listener,\n    const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& client_ctx,\n    Network::Address::IpVersion version, std::string protocol) {\n  std::string stats = \"ssl.versions.\" + protocol;\n  TestUtilOptionsV2 options(listener, client_ctx, true, version);\n  options.setExpectedServerStats(stats).setExpectedClientStats(stats);\n  return options.setExpectedProtocolVersion(protocol);\n}\n\nTEST_P(SslSocketTest, ProtocolVersions) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::TlsParameters* server_params =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->mutable_tls_params();\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsParameters* client_params =\n      client.mutable_common_tls_context()->mutable_tls_params();\n\n  // Connection using defaults (client & server) succeeds, negotiating TLSv1.2.\n  TestUtilOptionsV2 tls_v1_2_test_options =\n      createProtocolTestOptions(listener, client, GetParam(), \"TLSv1.2\");\n  testUtilV2(tls_v1_2_test_options);\n\n  // Connection using defaults (client & server) succeeds, negotiating TLSv1.2,\n  // even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(tls_v1_2_test_options);\n  client.set_allow_renegotiation(false);\n\n  // Connection using TLSv1.0 (client) and defaults (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  TestUtilOptionsV2 tls_v1_test_options =\n      createProtocolTestOptions(listener, client, GetParam(), \"TLSv1\");\n  testUtilV2(tls_v1_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.1 (client) and defaults (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_1);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_1);\n  TestUtilOptionsV2 tls_v1_1_test_options =\n      createProtocolTestOptions(listener, client, GetParam(), \"TLSv1.1\");\n  testUtilV2(tls_v1_1_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.2 (client) and defaults (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2);\n  testUtilV2(tls_v1_2_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.3 (client) and defaults (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  TestUtilOptionsV2 tls_v1_3_test_options =\n      createProtocolTestOptions(listener, client, GetParam(), \"TLSv1.3\");\n  TestUtilOptionsV2 error_test_options(listener, client, false, GetParam());\n  error_test_options.setExpectedServerStats(\"ssl.connection_error\")\n      .setExpectedTransportFailureReasonContains(\"TLSV1_ALERT_PROTOCOL_VERSION\");\n  testUtilV2(tls_v1_3_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.0-1.3 (client) and defaults (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  testUtilV2(tls_v1_3_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.0 (client) and TLSv1.0-1.3 (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  testUtilV2(tls_v1_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.3 (client) and TLSv1.0-1.3 (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  testUtilV2(tls_v1_3_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  TestUtilOptionsV2 unsupported_protocol_test_options(listener, client, false, GetParam());\n  unsupported_protocol_test_options.setExpectedServerStats(\"ssl.connection_error\")\n      .setExpectedTransportFailureReasonContains(\"UNSUPPORTED_PROTOCOL\");\n\n  // Connection using defaults (client) and TLSv1.0 (server) fails.\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  testUtilV2(unsupported_protocol_test_options);\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  // Connection using defaults (client) and TLSv1.1 (server) fails.\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_1);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_1);\n  testUtilV2(unsupported_protocol_test_options);\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  // Connection using defaults (client) and TLSv1.2 (server) succeeds.\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_2);\n  testUtilV2(tls_v1_2_test_options);\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  // Connection using defaults (client) and TLSv1.3 (server) fails.\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  testUtilV2(error_test_options);\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  // Connection using defaults (client) and TLSv1.0-1.3 (server) succeeds.\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  testUtilV2(tls_v1_2_test_options);\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.0-TLSv1.3 (client) and TLSv1.0 (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  testUtilV2(tls_v1_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n\n  // Connection using TLSv1.0-TLSv1.3 (client) and TLSv1.3 (server) succeeds.\n  client_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0);\n  client_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  server_params->set_tls_minimum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  server_params->set_tls_maximum_protocol_version(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3);\n  testUtilV2(tls_v1_3_test_options);\n  client_params->clear_tls_minimum_protocol_version();\n  client_params->clear_tls_maximum_protocol_version();\n  server_params->clear_tls_minimum_protocol_version();\n  server_params->clear_tls_maximum_protocol_version();\n}\n\nTEST_P(SslSocketTest, ALPN) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CommonTlsContext* server_ctx =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()->mutable_common_tls_context();\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::CommonTlsContext* client_ctx =\n      client.mutable_common_tls_context();\n\n  // Connection using defaults (client & server) succeeds, no ALPN is negotiated.\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options);\n\n  // Connection using defaults (client & server) succeeds, no ALPN is negotiated,\n  // even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n  client.set_allow_renegotiation(false);\n\n  // Client connects without ALPN to a server with \"test\" ALPN, no ALPN is negotiated.\n  server_ctx->add_alpn_protocols(\"test\");\n  testUtilV2(test_options);\n  server_ctx->clear_alpn_protocols();\n\n  // Client connects with \"test\" ALPN to a server without ALPN, no ALPN is negotiated.\n  client_ctx->add_alpn_protocols(\"test\");\n  testUtilV2(test_options);\n\n  client_ctx->clear_alpn_protocols();\n\n  // Client connects with \"test\" ALPN to a server with \"test\" ALPN, \"test\" ALPN is negotiated.\n  client_ctx->add_alpn_protocols(\"test\");\n  server_ctx->add_alpn_protocols(\"test\");\n  test_options.setExpectedALPNProtocol(\"test\");\n  testUtilV2(test_options);\n  test_options.setExpectedALPNProtocol(\"\");\n  client_ctx->clear_alpn_protocols();\n  server_ctx->clear_alpn_protocols();\n\n  // Client connects with \"test\" ALPN to a server with \"test\" ALPN, \"test\" ALPN is negotiated,\n  // even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  client_ctx->add_alpn_protocols(\"test\");\n  server_ctx->add_alpn_protocols(\"test\");\n  test_options.setExpectedALPNProtocol(\"test\");\n  testUtilV2(test_options);\n  test_options.setExpectedALPNProtocol(\"\");\n  client.set_allow_renegotiation(false);\n  client_ctx->clear_alpn_protocols();\n  server_ctx->clear_alpn_protocols();\n\n  // Client connects with \"test\" ALPN to a server with \"test2\" ALPN, no ALPN is negotiated.\n  client_ctx->add_alpn_protocols(\"test\");\n  server_ctx->add_alpn_protocols(\"test2\");\n  testUtilV2(test_options);\n  client_ctx->clear_alpn_protocols();\n  server_ctx->clear_alpn_protocols();\n\n  // Client attempts to configure ALPN that is too large.\n  client_ctx->add_alpn_protocols(std::string(100000, 'a'));\n  EXPECT_THROW_WITH_MESSAGE(testUtilV2(test_options), EnvoyException,\n                            \"Invalid ALPN protocol string\");\n}\n\nTEST_P(SslSocketTest, CipherSuites) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::TlsParameters* server_params =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->mutable_tls_params();\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsParameters* client_params =\n      client.mutable_common_tls_context()->mutable_tls_params();\n\n  // Connection using defaults (client & server) succeeds.\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options);\n\n  // Connection using defaults (client & server) succeeds, even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n  client.set_allow_renegotiation(false);\n\n  // Client connects with one of the supported cipher suites, connection succeeds.\n  std::string common_cipher_suite = \"ECDHE-RSA-CHACHA20-POLY1305\";\n  client_params->add_cipher_suites(common_cipher_suite);\n  server_params->add_cipher_suites(common_cipher_suite);\n  server_params->add_cipher_suites(\"ECDHE-RSA-AES128-GCM-SHA256\");\n  TestUtilOptionsV2 cipher_test_options(listener, client, true, GetParam());\n  cipher_test_options.setExpectedCiphersuite(common_cipher_suite);\n  std::string stats = \"ssl.ciphers.\" + common_cipher_suite;\n  cipher_test_options.setExpectedServerStats(stats).setExpectedClientStats(stats);\n  testUtilV2(cipher_test_options);\n  client_params->clear_cipher_suites();\n  server_params->clear_cipher_suites();\n\n  // Client connects with unsupported cipher suite, connection fails.\n  client_params->add_cipher_suites(\"ECDHE-RSA-AES128-GCM-SHA256\");\n  server_params->add_cipher_suites(\"ECDHE-RSA-CHACHA20-POLY1305\");\n  TestUtilOptionsV2 error_test_options(listener, client, false, GetParam());\n  error_test_options.setExpectedServerStats(\"ssl.connection_error\");\n  testUtilV2(error_test_options);\n  client_params->clear_cipher_suites();\n  server_params->clear_cipher_suites();\n\n  // Verify that ECDHE-RSA-CHACHA20-POLY1305 is not offered by default in FIPS builds.\n  client_params->add_cipher_suites(common_cipher_suite);\n#ifdef BORINGSSL_FIPS\n  testUtilV2(error_test_options);\n#else\n  testUtilV2(cipher_test_options);\n#endif\n  client_params->clear_cipher_suites();\n}\n\nTEST_P(SslSocketTest, EcdhCurves) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::TlsParameters* server_params =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->mutable_tls_params();\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  envoy::extensions::transport_sockets::tls::v3::TlsParameters* client_params =\n      client.mutable_common_tls_context()->mutable_tls_params();\n\n  // Connection using defaults (client & server) succeeds.\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options);\n\n  // Connection using defaults (client & server) succeeds, even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(test_options);\n  client.set_allow_renegotiation(false);\n\n  // Client connects with one of the supported ECDH curves, connection succeeds.\n  client_params->add_ecdh_curves(\"X25519\");\n  server_params->add_ecdh_curves(\"X25519\");\n  server_params->add_ecdh_curves(\"P-256\");\n  server_params->add_cipher_suites(\"ECDHE-RSA-AES128-GCM-SHA256\");\n  TestUtilOptionsV2 ecdh_curves_test_options(listener, client, true, GetParam());\n  std::string stats = \"ssl.curves.X25519\";\n  ecdh_curves_test_options.setExpectedServerStats(stats).setExpectedClientStats(stats);\n  testUtilV2(ecdh_curves_test_options);\n  client_params->clear_ecdh_curves();\n  server_params->clear_ecdh_curves();\n  server_params->clear_cipher_suites();\n\n  // Client connects with unsupported ECDH curve, connection fails.\n  client_params->add_ecdh_curves(\"X25519\");\n  server_params->add_ecdh_curves(\"P-256\");\n  server_params->add_cipher_suites(\"ECDHE-RSA-AES128-GCM-SHA256\");\n\n  TestUtilOptionsV2 error_test_options(listener, client, false, GetParam());\n  error_test_options.setExpectedServerStats(\"ssl.connection_error\");\n  testUtilV2(error_test_options);\n\n  client_params->clear_ecdh_curves();\n  server_params->clear_ecdh_curves();\n  server_params->clear_cipher_suites();\n\n  // Verify that X25519 is not offered by default in FIPS builds.\n  client_params->add_ecdh_curves(\"X25519\");\n  server_params->add_cipher_suites(\"ECDHE-RSA-AES128-GCM-SHA256\");\n#ifdef BORINGSSL_FIPS\n  testUtilV2(error_test_options);\n#else\n  testUtilV2(ecdh_curves_test_options);\n#endif\n  client_params->clear_ecdh_curves();\n  server_params->clear_cipher_suites();\n}\n\nTEST_P(SslSocketTest, SignatureAlgorithms) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*\n      server_validation_ctx = filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n                                  ->mutable_common_tls_context()\n                                  ->mutable_validation_context();\n  server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  // Server ECDSA certificate.\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  // Client RSA certificate.\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =\n      client.mutable_common_tls_context()->add_tls_certificates();\n  client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"));\n\n  // Connection using defaults (client & server) succeeds.\n  TestUtilOptionsV2 algorithm_test_options(listener, client, true, GetParam());\n  algorithm_test_options.setExpectedClientCertUri(\"spiffe://lyft.com/test-team\")\n      .setExpectedServerStats(\"ssl.sigalgs.rsa_pss_rsae_sha256\")\n      .setExpectedClientStats(\"ssl.sigalgs.ecdsa_secp256r1_sha256\");\n  testUtilV2(algorithm_test_options);\n\n  // Connection using defaults (client & server) succeeds, even with client renegotiation.\n  client.set_allow_renegotiation(true);\n  testUtilV2(algorithm_test_options);\n  client.set_allow_renegotiation(false);\n}\n\nTEST_P(SslSocketTest, RevokedCertificate) {\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n\n  // This should fail, since the certificate has been revoked.\n  const std::string revoked_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n)EOF\";\n\n  TestUtilOptions revoked_test_options(revoked_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(revoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // This should succeed, since the cert isn't revoked.\n  const std::string successful_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns2_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns2_key.pem\"\n)EOF\";\n\n  TestUtilOptions successful_test_options(successful_client_ctx_yaml, server_ctx_yaml, true,\n                                          GetParam());\n  testUtil(successful_test_options.setExpectedSerialNumber(TEST_SAN_DNS2_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, RevokedCertificateCRLInTrustedCA) {\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert_with_crl.pem\"\n)EOF\";\n\n  // This should fail, since the certificate has been revoked.\n  const std::string revoked_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"\n)EOF\";\n\n  TestUtilOptions revoked_test_options(revoked_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(revoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // This should succeed, since the cert isn't revoked.\n  const std::string successful_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns2_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns2_key.pem\"\n)EOF\";\n  TestUtilOptions successful_test_options(successful_client_ctx_yaml, server_ctx_yaml, true,\n                                          GetParam());\n  testUtil(successful_test_options.setExpectedSerialNumber(TEST_SAN_DNS2_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, RevokedIntermediateCertificate) {\n\n  // This should succeed, since the crl chain is complete.\n  //\n  // Trust chain contains:\n  //  - Root authority certificate (i.e., ca_cert.pem)\n  //  - Intermediate authority certificate (i.e., intermediate_ca_cert.pem)\n  //\n  // Certificate revocation list contains:\n  //  - Root authority certificate revocation list (i.e., ca_cert.crl)\n  //  - Intermediate authority certificate revocation list (i.e., intermediate_ca_cert.crl)\n  const std::string complete_server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain.crl\"\n)EOF\";\n\n  // This should fail, since the crl chain is incomplete.\n  //\n  // Trust chain contains:\n  //  - Root authority certificate (i.e., ca_cert.pem)\n  //  - Intermediate authority certificate (i.e., intermediate_ca_cert.pem)\n  //\n  // Certificate revocation list contains:\n  //  - Root authority certificate revocation list (i.e., ca_cert.crl)\n  //\n  // Certificate revocation list omits:\n  //  - Root authority certificate revocation list (i.e., ca_cert.crl)\n  const std::string incomplete_server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert.crl\"\n)EOF\";\n\n  // This should fail, since the certificate has been revoked.\n  const std::string revoked_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\"\n)EOF\";\n\n  // This should succeed, since the certificate has not been revoked.\n  const std::string unrevoked_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns4_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns4_key.pem\"\n)EOF\";\n\n  // Ensure that incomplete crl chains fail with revoked certificates.\n  TestUtilOptions incomplete_revoked_test_options(revoked_client_ctx_yaml,\n                                                  incomplete_server_ctx_yaml, false, GetParam());\n  testUtil(incomplete_revoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // Ensure that incomplete crl chains fail with unrevoked certificates.\n  TestUtilOptions incomplete_unrevoked_test_options(unrevoked_client_ctx_yaml,\n                                                    incomplete_server_ctx_yaml, false, GetParam());\n  testUtil(incomplete_unrevoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // Ensure that complete crl chains fail with revoked certificates.\n  TestUtilOptions complete_revoked_test_options(revoked_client_ctx_yaml, complete_server_ctx_yaml,\n                                                false, GetParam());\n  testUtil(complete_revoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // Ensure that complete crl chains succeed with unrevoked certificates.\n  TestUtilOptions complete_unrevoked_test_options(unrevoked_client_ctx_yaml,\n                                                  complete_server_ctx_yaml, true, GetParam());\n  testUtil(complete_unrevoked_test_options.setExpectedSerialNumber(TEST_SAN_DNS4_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, RevokedIntermediateCertificateCRLInTrustedCA) {\n\n  // This should succeed, since the crl chain is complete.\n  //\n  // Trust chain contains:\n  //  - Root authority certificate (i.e., ca_cert.pem)\n  //  - Root authority certificate revocation list (i.e., ca_cert.crl)\n  //  - Intermediate authority certificate (i.e., intermediate_ca_cert.pem)\n  //  - Intermediate authority certificate revocation list (i.e., intermediate_ca_cert.crl)\n  const std::string complete_server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain_with_crl_chain.pem\"\n)EOF\";\n\n  // This should fail, since the crl chain is incomplete.\n  //\n  // Trust chain contains:\n  //  - Root authority certificate (i.e., ca_cert.pem)\n  //  - Intermediate authority certificate (i.e., intermediate_ca_cert.pem)\n  //  - Intermediate authority certificate revocation list (i.e., intermediate_ca_cert.crl)\n  //\n  // Trust chain omits:\n  //  - Root authority certificate revocation list (i.e., ca_cert.crl)\n  const std::string incomplete_server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain_with_crl.pem\"\n)EOF\";\n\n  // This should fail, since the certificate has been revoked.\n  const std::string revoked_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\"\n)EOF\";\n\n  // This should succeed, since the certificate has not been revoked.\n  const std::string unrevoked_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns4_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns4_key.pem\"\n)EOF\";\n\n  // Ensure that incomplete crl chains fail with revoked certificates.\n  TestUtilOptions incomplete_revoked_test_options(revoked_client_ctx_yaml,\n                                                  incomplete_server_ctx_yaml, false, GetParam());\n  testUtil(incomplete_revoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // Ensure that incomplete crl chains fail with unrevoked certificates.\n  TestUtilOptions incomplete_unrevoked_test_options(unrevoked_client_ctx_yaml,\n                                                    incomplete_server_ctx_yaml, false, GetParam());\n  testUtil(incomplete_unrevoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // Ensure that complete crl chains fail with revoked certificates.\n  TestUtilOptions complete_revoked_test_options(revoked_client_ctx_yaml, complete_server_ctx_yaml,\n                                                false, GetParam());\n  testUtil(complete_revoked_test_options.setExpectedServerStats(\"ssl.fail_verify_error\"));\n\n  // Ensure that complete crl chains succeed with unrevoked certificates.\n  TestUtilOptions complete_unrevoked_test_options(unrevoked_client_ctx_yaml,\n                                                  complete_server_ctx_yaml, true, GetParam());\n  testUtil(complete_unrevoked_test_options.setExpectedSerialNumber(TEST_SAN_DNS4_CERT_SERIAL));\n}\n\nTEST_P(SslSocketTest, GetRequestedServerName) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  client.set_sni(\"lyft.com\");\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedRequestedServerName(\"lyft.com\"));\n}\n\nTEST_P(SslSocketTest, OverrideRequestedServerName) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  client.set_sni(\"lyft.com\");\n\n  Network::TransportSocketOptionsSharedPtr transport_socket_options(\n      new Network::TransportSocketOptionsImpl(\"example.com\"));\n\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedRequestedServerName(\"example.com\")\n                 .setTransportSocketOptions(transport_socket_options));\n}\n\nTEST_P(SslSocketTest, OverrideRequestedServerNameWithoutSniInUpstreamTlsContext) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n\n  Network::TransportSocketOptionsSharedPtr transport_socket_options(\n      new Network::TransportSocketOptionsImpl(\"example.com\"));\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n  testUtilV2(test_options.setExpectedRequestedServerName(\"example.com\")\n                 .setTransportSocketOptions(transport_socket_options));\n}\n\nTEST_P(SslSocketTest, OverrideApplicationProtocols) {\n  envoy::config::listener::v3::Listener listener;\n  envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();\n  envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()\n          ->mutable_common_tls_context()\n          ->add_tls_certificates();\n  server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\"));\n  envoy::extensions::transport_sockets::tls::v3::CommonTlsContext* server_ctx =\n      filter_chain->mutable_hidden_envoy_deprecated_tls_context()->mutable_common_tls_context();\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;\n  TestUtilOptionsV2 test_options(listener, client, true, GetParam());\n\n  // Client connects without ALPN to a server with \"test\" ALPN, no ALPN is negotiated.\n  server_ctx->add_alpn_protocols(\"test\");\n  testUtilV2(test_options);\n  server_ctx->clear_alpn_protocols();\n  // Override client side ALPN, \"test\" ALPN is used.\n  server_ctx->add_alpn_protocols(\"test\");\n  auto transport_socket_options = std::make_shared<Network::TransportSocketOptionsImpl>(\n      \"\", std::vector<std::string>{}, std::vector<std::string>{\"foo\", \"test\", \"bar\"});\n\n  testUtilV2(test_options.setExpectedALPNProtocol(\"test\").setTransportSocketOptions(\n      transport_socket_options));\n\n  // Set fallback ALPN on the client side ALPN, \"test\" ALPN is used since no ALPN is specified\n  // in the config.\n  server_ctx->add_alpn_protocols(\"test\");\n  transport_socket_options = std::make_shared<Network::TransportSocketOptionsImpl>(\n      \"\", std::vector<std::string>{}, std::vector<std::string>{}, \"test\");\n  testUtilV2(test_options.setExpectedALPNProtocol(\"test\").setTransportSocketOptions(\n      transport_socket_options));\n\n  // Update the client TLS config to specify ALPN. The fallback value should no longer be used.\n  // Note that the server prefers \"test\" over \"bar\", but since the client only configures \"bar\",\n  // the resulting ALPN will be \"bar\" even though \"test\" is included in the fallback.\n  server_ctx->add_alpn_protocols(\"bar\");\n  client.mutable_common_tls_context()->add_alpn_protocols(\"bar\");\n  testUtilV2(test_options.setExpectedALPNProtocol(\"bar\").setTransportSocketOptions(\n      transport_socket_options));\n}\n\n// Validate that if downstream secrets are not yet downloaded from SDS server, Envoy creates\n// NotReadySslSocket object to handle downstream connection.\nTEST_P(SslSocketTest, DownstreamNotReadySslSocket) {\n  Stats::TestUtil::TestStore stats_store;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n  EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store));\n  EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  auto sds_secret_configs =\n      tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add();\n  sds_secret_configs->set_name(\"abc.com\");\n  sds_secret_configs->mutable_sds_config();\n  auto server_cfg = std::make_unique<ServerContextConfigImpl>(tls_context, factory_context);\n  EXPECT_TRUE(server_cfg->tlsCertificates().empty());\n  EXPECT_FALSE(server_cfg->isReady());\n\n  ContextManagerImpl manager(time_system_);\n  ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, stats_store,\n                                                   std::vector<std::string>{});\n  auto transport_socket = server_ssl_socket_factory.createTransportSocket(nullptr);\n  EXPECT_EQ(EMPTY_STRING, transport_socket->protocol());\n  EXPECT_EQ(nullptr, transport_socket->ssl());\n  Buffer::OwnedImpl buffer;\n  Network::IoResult result = transport_socket->doRead(buffer);\n  EXPECT_EQ(Network::PostIoAction::Close, result.action_);\n  result = transport_socket->doWrite(buffer, true);\n  EXPECT_EQ(Network::PostIoAction::Close, result.action_);\n  EXPECT_EQ(\"TLS error: Secret is not supplied by SDS\", transport_socket->failureReason());\n}\n\n// Validate that if upstream secrets are not yet downloaded from SDS server, Envoy creates\n// NotReadySslSocket object to handle upstream connection.\nTEST_P(SslSocketTest, UpstreamNotReadySslSocket) {\n  Stats::TestUtil::TestStore stats_store;\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;\n  NiceMock<Init::MockManager> init_manager;\n  NiceMock<Event::MockDispatcher> dispatcher;\n  EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info));\n  EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store));\n  EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager));\n  EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  auto sds_secret_configs =\n      tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add();\n  sds_secret_configs->set_name(\"abc.com\");\n  sds_secret_configs->mutable_sds_config();\n  auto client_cfg = std::make_unique<ClientContextConfigImpl>(tls_context, factory_context);\n  EXPECT_TRUE(client_cfg->tlsCertificates().empty());\n  EXPECT_FALSE(client_cfg->isReady());\n\n  ContextManagerImpl manager(time_system_);\n  ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, stats_store);\n  auto transport_socket = client_ssl_socket_factory.createTransportSocket(nullptr);\n  EXPECT_EQ(EMPTY_STRING, transport_socket->protocol());\n  EXPECT_EQ(nullptr, transport_socket->ssl());\n  Buffer::OwnedImpl buffer;\n  Network::IoResult result = transport_socket->doRead(buffer);\n  EXPECT_EQ(Network::PostIoAction::Close, result.action_);\n  result = transport_socket->doWrite(buffer, true);\n  EXPECT_EQ(Network::PostIoAction::Close, result.action_);\n  EXPECT_EQ(\"TLS error: Secret is not supplied by SDS\", transport_socket->failureReason());\n}\n\nTEST_P(SslSocketTest, TestTransportSocketCallback) {\n  // Make MockTransportSocketCallbacks.\n  Network::MockIoHandle io_handle;\n  NiceMock<Network::MockTransportSocketCallbacks> callbacks;\n  ON_CALL(callbacks, ioHandle()).WillByDefault(ReturnRef(io_handle));\n\n  // Make SslSocket.\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;\n  Stats::TestUtil::TestStore stats_store;\n  ON_CALL(factory_context, stats()).WillByDefault(ReturnRef(stats_store));\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  ON_CALL(factory_context, localInfo()).WillByDefault(ReturnRef(local_info));\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  auto client_cfg = std::make_unique<ClientContextConfigImpl>(tls_context, factory_context);\n\n  ContextManagerImpl manager(time_system_);\n  ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, stats_store);\n\n  Network::TransportSocketPtr transport_socket =\n      client_ssl_socket_factory.createTransportSocket(nullptr);\n\n  SslSocket* ssl_socket = dynamic_cast<SslSocket*>(transport_socket.get());\n\n  // If no transport socket callbacks have been set, this method should return nullptr.\n  EXPECT_EQ(ssl_socket->transportSocketCallbacks(), nullptr);\n\n  // Otherwise, it should return a pointer to the set callbacks object.\n  ssl_socket->setTransportSocketCallbacks(callbacks);\n  EXPECT_EQ(ssl_socket->transportSocketCallbacks(), &callbacks);\n}\n\nclass SslReadBufferLimitTest : public SslSocketTest {\nprotected:\n  void initialize() {\n    TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml_),\n                              downstream_tls_context_);\n    auto server_cfg =\n        std::make_unique<ServerContextConfigImpl>(downstream_tls_context_, factory_context_);\n    manager_ = std::make_unique<ContextManagerImpl>(time_system_);\n    server_ssl_socket_factory_ = std::make_unique<ServerSslSocketFactory>(\n        std::move(server_cfg), *manager_, server_stats_store_, std::vector<std::string>{});\n\n    socket_ = std::make_shared<Network::TcpListenSocket>(\n        Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true);\n    listener_ =\n        dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE);\n\n    TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml_), upstream_tls_context_);\n    auto client_cfg =\n        std::make_unique<ClientContextConfigImpl>(upstream_tls_context_, factory_context_);\n\n    client_ssl_socket_factory_ = std::make_unique<ClientSslSocketFactory>(\n        std::move(client_cfg), *manager_, client_stats_store_);\n    auto transport_socket = client_ssl_socket_factory_->createTransportSocket(nullptr);\n    client_transport_socket_ = transport_socket.get();\n    client_connection_ = dispatcher_->createClientConnection(\n        socket_->localAddress(), source_address_, std::move(transport_socket), nullptr);\n    client_connection_->addConnectionCallbacks(client_callbacks_);\n    client_connection_->connect();\n    read_filter_ = std::make_shared<Network::MockReadFilter>();\n  }\n\n  void readBufferLimitTest(uint32_t read_buffer_limit, uint32_t expected_chunk_size,\n                           uint32_t write_size, uint32_t num_writes, bool reserve_write_space) {\n    initialize();\n\n    EXPECT_CALL(listener_callbacks_, onAccept_(_))\n        .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n          server_connection_ = dispatcher_->createServerConnection(\n              std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr),\n              stream_info_);\n          server_connection_->setBufferLimits(read_buffer_limit);\n          server_connection_->addConnectionCallbacks(server_callbacks_);\n          server_connection_->addReadFilter(read_filter_);\n          EXPECT_EQ(\"\", server_connection_->nextProtocol());\n          EXPECT_EQ(read_buffer_limit, server_connection_->bufferLimit());\n        }));\n\n    EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    uint32_t filter_seen = 0;\n\n    EXPECT_CALL(*read_filter_, onNewConnection());\n    EXPECT_CALL(*read_filter_, onData(_, _))\n        .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) -> Network::FilterStatus {\n          EXPECT_GE(expected_chunk_size, data.length());\n          filter_seen += data.length();\n          data.drain(data.length());\n          if (filter_seen == (write_size * num_writes)) {\n            server_connection_->close(Network::ConnectionCloseType::FlushWrite);\n          }\n          return Network::FilterStatus::StopIteration;\n        }));\n\n    EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n          EXPECT_EQ((write_size * num_writes), filter_seen);\n          dispatcher_->exit();\n        }));\n\n    for (uint32_t i = 0; i < num_writes; i++) {\n      Buffer::OwnedImpl data(std::string(write_size, 'a'));\n\n      // Incredibly contrived way of making sure that the write buffer has an empty chain in it.\n      if (reserve_write_space) {\n        Buffer::RawSlice iovecs[2];\n        EXPECT_EQ(2UL, data.reserve(16384, iovecs, 2));\n        iovecs[0].len_ = 0;\n        iovecs[1].len_ = 0;\n        data.commit(iovecs, 2);\n      }\n\n      client_connection_->write(data, false);\n    }\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    EXPECT_EQ(0UL, server_stats_store_.counter(\"ssl.connection_error\").value());\n    EXPECT_EQ(0UL, client_stats_store_.counter(\"ssl.connection_error\").value());\n  }\n\n  void singleWriteTest(uint32_t read_buffer_limit, uint32_t bytes_to_write) {\n    MockWatermarkBuffer* client_write_buffer = nullptr;\n    MockBufferFactory* factory = new StrictMock<MockBufferFactory>;\n    dispatcher_ = api_->allocateDispatcher(\"test_thread\", Buffer::WatermarkFactoryPtr{factory});\n\n    // By default, expect 4 buffers to be created - the client and server read and write buffers.\n    EXPECT_CALL(*factory, create_(_, _, _))\n        .Times(2)\n        .WillOnce(Invoke([&](std::function<void()> below_low, std::function<void()> above_high,\n                             std::function<void()> above_overflow) -> Buffer::Instance* {\n          client_write_buffer = new MockWatermarkBuffer(below_low, above_high, above_overflow);\n          return client_write_buffer;\n        }))\n        .WillRepeatedly(Invoke([](std::function<void()> below_low, std::function<void()> above_high,\n                                  std::function<void()> above_overflow) -> Buffer::Instance* {\n          return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow);\n        }));\n\n    initialize();\n\n    EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n    EXPECT_CALL(listener_callbacks_, onAccept_(_))\n        .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n          server_connection_ = dispatcher_->createServerConnection(\n              std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr),\n              stream_info_);\n          server_connection_->setBufferLimits(read_buffer_limit);\n          server_connection_->addConnectionCallbacks(server_callbacks_);\n          server_connection_->addReadFilter(read_filter_);\n          EXPECT_EQ(\"\", server_connection_->nextProtocol());\n          EXPECT_EQ(read_buffer_limit, server_connection_->bufferLimit());\n        }));\n\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n    EXPECT_CALL(*read_filter_, onNewConnection());\n    EXPECT_CALL(*read_filter_, onData(_, _)).Times(testing::AnyNumber());\n\n    std::string data_to_write(bytes_to_write, 'a');\n    Buffer::OwnedImpl buffer_to_write(data_to_write);\n    std::string data_written;\n    EXPECT_CALL(*client_write_buffer, move(_))\n        .WillRepeatedly(DoAll(AddBufferToStringWithoutDraining(&data_written),\n                              Invoke(client_write_buffer, &MockWatermarkBuffer::baseMove)));\n    EXPECT_CALL(*client_write_buffer, drain(_)).Times(2).WillOnce(Invoke([&](uint64_t n) -> void {\n      client_write_buffer->baseDrain(n);\n      dispatcher_->exit();\n    }));\n    client_connection_->write(buffer_to_write, false);\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n    EXPECT_EQ(data_to_write, data_written);\n\n    disconnect();\n  }\n\n  void disconnect() {\n    EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));\n    EXPECT_CALL(server_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n        .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n\n    client_connection_->close(Network::ConnectionCloseType::NoFlush);\n    dispatcher_->run(Event::Dispatcher::RunType::Block);\n  }\n\n  Stats::TestUtil::TestStore server_stats_store_;\n  Stats::TestUtil::TestStore client_stats_store_;\n  std::shared_ptr<Network::TcpListenSocket> socket_;\n  Network::MockTcpListenerCallbacks listener_callbacks_;\n  Network::MockConnectionHandler connection_handler_;\n  const std::string server_ctx_yaml_ = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/unittestkey.pem\"\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n)EOF\";\n\n  const std::string client_ctx_yaml_ = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem\"\n)EOF\";\n\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext downstream_tls_context_;\n  std::unique_ptr<ContextManagerImpl> manager_;\n  Network::TransportSocketFactoryPtr server_ssl_socket_factory_;\n  Network::ListenerPtr listener_;\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext upstream_tls_context_;\n  Envoy::Ssl::ClientContextSharedPtr client_ctx_;\n  Network::TransportSocketFactoryPtr client_ssl_socket_factory_;\n  Network::ClientConnectionPtr client_connection_;\n  Network::TransportSocket* client_transport_socket_{};\n  Network::ConnectionPtr server_connection_;\n  NiceMock<Network::MockConnectionCallbacks> server_callbacks_;\n  std::shared_ptr<Network::MockReadFilter> read_filter_;\n  StrictMock<Network::MockConnectionCallbacks> client_callbacks_;\n  Network::Address::InstanceConstSharedPtr source_address_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SslReadBufferLimitTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(SslReadBufferLimitTest, NoLimit) {\n  readBufferLimitTest(0, 256 * 1024, 256 * 1024, 1, false);\n}\n\nTEST_P(SslReadBufferLimitTest, NoLimitReserveSpace) { readBufferLimitTest(0, 512, 512, 1, true); }\n\nTEST_P(SslReadBufferLimitTest, NoLimitSmallWrites) {\n  readBufferLimitTest(0, 256 * 1024, 1, 256 * 1024, false);\n}\n\nTEST_P(SslReadBufferLimitTest, SomeLimit) {\n  readBufferLimitTest(32 * 1024, 32 * 1024, 256 * 1024, 1, false);\n}\n\nTEST_P(SslReadBufferLimitTest, WritesSmallerThanBufferLimit) { singleWriteTest(5 * 1024, 1024); }\n\nTEST_P(SslReadBufferLimitTest, WritesLargerThanBufferLimit) { singleWriteTest(1024, 5 * 1024); }\n\nTEST_P(SslReadBufferLimitTest, TestBind) {\n  std::string address_string = TestUtility::getIpv4Loopback();\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    source_address_ = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv4Instance(address_string, 0, nullptr)};\n  } else {\n    address_string = \"::1\";\n    source_address_ = Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv6Instance(address_string, 0, nullptr)};\n  }\n\n  initialize();\n\n  EXPECT_CALL(listener_callbacks_, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection_ = dispatcher_->createServerConnection(\n            std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr),\n            stream_info_);\n        server_connection_->addConnectionCallbacks(server_callbacks_);\n        server_connection_->addReadFilter(read_filter_);\n        EXPECT_EQ(\"\", server_connection_->nextProtocol());\n      }));\n\n  EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  EXPECT_EQ(address_string, server_connection_->remoteAddress()->ip()->addressAsString());\n\n  disconnect();\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/6617\nTEST_P(SslReadBufferLimitTest, SmallReadsIntoSameSlice) {\n  // write_size * num_writes must be large enough to cause buffer reserving fragmentation,\n  // but smaller than one reservation so the expected slice to be 1.\n  const uint32_t write_size = 1;\n  const uint32_t num_writes = 12 * 1024;\n  const uint32_t read_buffer_limit = write_size * num_writes;\n  const uint32_t expected_chunk_size = write_size * num_writes;\n\n  initialize();\n\n  EXPECT_CALL(listener_callbacks_, onAccept_(_))\n      .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {\n        server_connection_ = dispatcher_->createServerConnection(\n            std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr),\n            stream_info_);\n        server_connection_->setBufferLimits(read_buffer_limit);\n        server_connection_->addConnectionCallbacks(server_callbacks_);\n        server_connection_->addReadFilter(read_filter_);\n        EXPECT_EQ(\"\", server_connection_->nextProtocol());\n        EXPECT_EQ(read_buffer_limit, server_connection_->bufferLimit());\n      }));\n\n  EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::Connected))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n\n  uint32_t filter_seen = 0;\n\n  EXPECT_CALL(*read_filter_, onNewConnection());\n  EXPECT_CALL(*read_filter_, onData(_, _))\n      .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) -> Network::FilterStatus {\n        EXPECT_GE(expected_chunk_size, data.length());\n        EXPECT_EQ(1, data.getRawSlices().size());\n        filter_seen += data.length();\n        data.drain(data.length());\n        if (filter_seen == (write_size * num_writes)) {\n          server_connection_->close(Network::ConnectionCloseType::FlushWrite);\n        }\n        return Network::FilterStatus::StopIteration;\n      }));\n\n  EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose))\n      .WillOnce(Invoke([&](Network::ConnectionEvent) -> void {\n        EXPECT_EQ((write_size * num_writes), filter_seen);\n        dispatcher_->exit();\n      }));\n\n  for (uint32_t i = 0; i < num_writes; i++) {\n    Buffer::OwnedImpl data(std::string(write_size, 'a'));\n    client_transport_socket_->doWrite(data, false);\n  }\n\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n}\n\n// Test asynchronous signing (ECDHE) using a private key provider.\nTEST_P(SslSocketTest, RsaPrivateKeyProviderAsyncSignSuccess) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: false\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string successful_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-RSA-AES128-GCM-SHA256\n)EOF\";\n\n  TestUtilOptions successful_test_options(successful_client_ctx_yaml, server_ctx_yaml, true,\n                                          GetParam());\n  testUtil(successful_test_options.setPrivateKeyMethodExpected(true));\n}\n\n// Test asynchronous decryption (RSA).\nTEST_P(SslSocketTest, RsaPrivateKeyProviderAsyncDecryptSuccess) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: decrypt\n            sync_mode: false\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string successful_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestUtilOptions successful_test_options(successful_client_ctx_yaml, server_ctx_yaml, true,\n                                          GetParam());\n  testUtil(successful_test_options.setPrivateKeyMethodExpected(true));\n}\n\n// Test synchronous signing (ECDHE).\nTEST_P(SslSocketTest, RsaPrivateKeyProviderSyncSignSuccess) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string successful_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-RSA-AES128-GCM-SHA256\n)EOF\";\n\n  TestUtilOptions successful_test_options(successful_client_ctx_yaml, server_ctx_yaml, true,\n                                          GetParam());\n  testUtil(successful_test_options.setPrivateKeyMethodExpected(true));\n}\n\n// Test synchronous decryption (RSA).\nTEST_P(SslSocketTest, RsaPrivateKeyProviderSyncDecryptSuccess) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: decrypt\n            sync_mode: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string successful_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestUtilOptions successful_test_options(successful_client_ctx_yaml, server_ctx_yaml, true,\n                                          GetParam());\n  testUtil(successful_test_options.setPrivateKeyMethodExpected(true));\n}\n\n// Test asynchronous signing (ECDHE) failure (invalid signature).\nTEST_P(SslSocketTest, RsaPrivateKeyProviderAsyncSignFailure) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: false\n            crypto_error: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string failing_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-RSA-AES128-GCM-SHA256\n)EOF\";\n\n  TestUtilOptions failing_test_options(failing_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(failing_test_options.setPrivateKeyMethodExpected(true).setExpectedServerStats(\n      \"ssl.connection_error\"));\n}\n\n// Test synchronous signing (ECDHE) failure (invalid signature).\nTEST_P(SslSocketTest, RsaPrivateKeyProviderSyncSignFailure) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: true\n            crypto_error: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string failing_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-RSA-AES128-GCM-SHA256\n)EOF\";\n\n  TestUtilOptions failing_test_options(failing_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(failing_test_options.setPrivateKeyMethodExpected(true).setExpectedServerStats(\n      \"ssl.connection_error\"));\n}\n\n// Test the sign operation return with an error.\nTEST_P(SslSocketTest, RsaPrivateKeyProviderSignFailure) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            method_error: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string failing_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-RSA-AES128-GCM-SHA256\n)EOF\";\n\n  TestUtilOptions failing_test_options(failing_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(failing_test_options.setPrivateKeyMethodExpected(true).setExpectedServerStats(\n      \"ssl.connection_error\"));\n}\n\n// Test the decrypt operation return with an error.\nTEST_P(SslSocketTest, RsaPrivateKeyProviderDecryptFailure) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: decrypt\n            method_error: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string failing_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestUtilOptions failing_test_options(failing_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(failing_test_options.setPrivateKeyMethodExpected(true).setExpectedServerStats(\n      \"ssl.connection_error\"));\n}\n\n// Test the sign operation return with an error in complete.\nTEST_P(SslSocketTest, RsaPrivateKeyProviderAsyncSignCompleteFailure) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            async_method_error: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string failing_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-RSA-AES128-GCM-SHA256\n)EOF\";\n\n  TestUtilOptions failing_test_options(failing_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(failing_test_options.setPrivateKeyMethodExpected(true)\n               .setExpectedServerCloseEvent(Network::ConnectionEvent::LocalClose)\n               .setExpectedServerStats(\"ssl.connection_error\"));\n}\n\n// Test the decrypt operation return with an error in complete.\nTEST_P(SslSocketTest, RsaPrivateKeyProviderAsyncDecryptCompleteFailure) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_tmpdir }}/unittestcert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: decrypt\n            async_method_error: true\n            mode: rsa\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n      crl:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"\n)EOF\";\n  const std::string failing_client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestUtilOptions failing_test_options(failing_client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(failing_test_options.setPrivateKeyMethodExpected(true)\n               .setExpectedServerCloseEvent(Network::ConnectionEvent::LocalClose)\n               .setExpectedServerStats(\"ssl.connection_error\"));\n}\n\n// Test having one cert with private key method and another with just\n// private key.\nTEST_P(SslSocketTest, RsaPrivateKeyProviderMultiCertSuccess) {\n  const std::string client_ctx_yaml = absl::StrCat(R\"EOF(\n    common_tls_context:\n      tls_params:\n        tls_minimum_protocol_version: TLSv1_2\n        tls_maximum_protocol_version: TLSv1_2\n        cipher_suites:\n        - ECDHE-ECDSA-AES128-GCM-SHA256\n        - ECDHE-RSA-AES128-GCM-SHA256\n      validation_context:\n        verify_certificate_hash: )EOF\",\n                                                   TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH);\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: false\n            mode: rsa\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setPrivateKeyMethodExpected(true));\n}\n\n// Test having two certs with private key methods. This will\n// synchronously fail because the second certificate is a ECDSA one and\n// the RSA method can't handle it.\nTEST_P(SslSocketTest, RsaPrivateKeyProviderMultiCertFail) {\n  const std::string client_ctx_yaml = absl::StrCat(R\"EOF(\n    common_tls_context:\n      tls_params:\n        tls_minimum_protocol_version: TLSv1_2\n        tls_maximum_protocol_version: TLSv1_2\n        cipher_suites:\n        - ECDHE-ECDSA-AES128-GCM-SHA256\n        - ECDHE-RSA-AES128-GCM-SHA256\n      validation_context:\n        verify_certificate_hash: )EOF\",\n                                                   TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH);\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: false\n            mode: rsa\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n            expected_operation: sign\n            sync_mode: false\n            mode: rsa\n)EOF\";\n\n  TestUtilOptions failing_test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  EXPECT_THROW_WITH_MESSAGE(testUtil(failing_test_options.setPrivateKeyMethodExpected(true)),\n                            EnvoyException, \"Private key is not RSA.\")\n}\n\n// Test ECDSA private key method provider mode.\nTEST_P(SslSocketTest, EcdsaPrivateKeyProviderSuccess) {\n  const std::string client_ctx_yaml = absl::StrCat(R\"EOF(\n    common_tls_context:\n      tls_params:\n        tls_minimum_protocol_version: TLSv1_2\n        tls_maximum_protocol_version: TLSv1_2\n        cipher_suites:\n        - ECDHE-ECDSA-AES128-GCM-SHA256\n      validation_context:\n        verify_certificate_hash: )EOF\",\n                                                   TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH);\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n            expected_operation: sign\n            mode: ecdsa\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setPrivateKeyMethodExpected(true));\n}\n\n// Test having two certs with different private key method modes. It's expected that the ECDSA\n// provider mode is being used. RSA provider mode is set to fail with \"async_method_error\", but\n// that's not happening.\nTEST_P(SslSocketTest, RsaAndEcdsaPrivateKeyProviderMultiCertSuccess) {\n  const std::string client_ctx_yaml = absl::StrCat(R\"EOF(\n    common_tls_context:\n      tls_params:\n        tls_minimum_protocol_version: TLSv1_2\n        tls_maximum_protocol_version: TLSv1_2\n        cipher_suites:\n        - ECDHE-ECDSA-AES128-GCM-SHA256\n        - ECDHE-RSA-AES128-GCM-SHA256\n      validation_context:\n        verify_certificate_hash: )EOF\",\n                                                   TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH);\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: false\n            async_method_error: true\n            mode: rsa\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n            expected_operation: sign\n            mode: ecdsa\n)EOF\";\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setPrivateKeyMethodExpected(true));\n}\n\n// Test having two certs with different private key method modes. ECDSA provider is set to fail.\nTEST_P(SslSocketTest, RsaAndEcdsaPrivateKeyProviderMultiCertFail) {\n  const std::string client_ctx_yaml = absl::StrCat(R\"EOF(\n    common_tls_context:\n      tls_params:\n        tls_minimum_protocol_version: TLSv1_2\n        tls_maximum_protocol_version: TLSv1_2\n        cipher_suites:\n        - ECDHE-ECDSA-AES128-GCM-SHA256\n        - ECDHE-RSA-AES128-GCM-SHA256\n      validation_context:\n        verify_certificate_hash: )EOF\",\n                                                   TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH);\n\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_tmpdir }}/unittestkey.pem\"\n            expected_operation: sign\n            sync_mode: false\n            mode: rsa\n    - certificate_chain:\n        filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem\"\n      private_key_provider:\n        provider_name: test\n        typed_config:\n          \"@type\": type.googleapis.com/google.protobuf.Struct\n          value:\n            private_key_file: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem\"\n            expected_operation: sign\n            async_method_error: true\n            mode: ecdsa\n)EOF\";\n  TestUtilOptions failing_test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(failing_test_options.setPrivateKeyMethodExpected(true)\n               .setExpectedServerCloseEvent(Network::ConnectionEvent::LocalClose)\n               .setExpectedServerStats(\"ssl.connection_error\"));\n}\n\nTEST_P(SslSocketTest, TestStaplesOcspResponseSuccess) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\"\n  ocsp_staple_policy: lenient_stapling\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n\n  std::string ocsp_response_path = \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\";\n  std::string expected_response =\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ocsp_response_path));\n\n  testUtil(test_options.enableOcspStapling()\n               .setExpectedOcspResponse(expected_response)\n               .setExpectedServerStats(\"ssl.ocsp_staple_responses\"));\n}\n\nTEST_P(SslSocketTest, TestNoOcspStapleWhenNotEnabledOnClient) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options);\n}\n\nTEST_P(SslSocketTest, TestOcspStapleOmittedOnSkipStaplingAndResponseExpired) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/unknown_ocsp_resp.der\"\n  ocsp_staple_policy: lenient_stapling\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.ocsp_staple_omitted\").enableOcspStapling());\n}\n\nTEST_P(SslSocketTest, TestConnectionFailsOnStapleRequiredAndOcspExpired) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/unknown_ocsp_resp.der\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.ocsp_staple_failed\").enableOcspStapling());\n}\n\nTEST_P(SslSocketTest, TestConnectionSucceedsWhenRejectOnExpiredNoOcspResponse) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n  ocsp_staple_policy: strict_stapling\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.ocsp_staple_omitted\").enableOcspStapling());\n}\n\nTEST_P(SslSocketTest, TestConnectionFailsWhenRejectOnExpiredAndResponseExpired) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/unknown_ocsp_resp.der\"\n  ocsp_staple_policy: strict_stapling\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.ocsp_staple_failed\").enableOcspStapling());\n}\n\nTEST_P(SslSocketTest, TestConnectionFailsWhenCertIsMustStapleAndResponseExpired) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der\"\n  ocsp_staple_policy: lenient_stapling\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.ocsp_staple_failed\").enableOcspStapling());\n}\n\nTEST_P(SslSocketTest, TestConnectionSucceedsForMustStapleCertExpirationValidationOff) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.check_ocsp_policy\", \"false\"}});\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  std::string ocsp_response_path = \"{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der\";\n  std::string expected_response =\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ocsp_response_path));\n  testUtil(test_options.enableOcspStapling()\n               .setExpectedServerStats(\"ssl.ocsp_staple_responses\")\n               .setExpectedOcspResponse(expected_response));\n}\n\nTEST_P(SslSocketTest, TestConnectionSucceedsForMustStapleCertNoValidationNoResponse) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem\"\n  ocsp_staple_policy: lenient_stapling\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestScopedRuntime scoped_runtime;\n  Runtime::LoaderSingleton::getExisting()->mergeValues(\n      {{\"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs\", \"false\"},\n       {\"envoy.reloadable_features.check_ocsp_policy\", \"false\"}});\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.ocsp_staple_omitted\")\n               .enableOcspStapling()\n               .setExpectedOcspResponse(\"\"));\n}\n\nTEST_P(SslSocketTest, TestFilterMultipleCertsFilterByOcspPolicyFallbackOnFirst) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\"\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/ecdsa_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/ecdsa_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/ecdsa_ocsp_resp.der\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-ECDSA-AES128-GCM-SHA256\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  std::string ocsp_response_path = \"{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der\";\n  std::string expected_response =\n      TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ocsp_response_path));\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());\n  testUtil(test_options.enableOcspStapling()\n               .setExpectedServerStats(\"ssl.ocsp_staple_responses\")\n               .setExpectedOcspResponse(expected_response));\n}\n\nTEST_P(SslSocketTest, TestConnectionFailsOnMultipleCertificatesNonePassOcspPolicy) {\n  const std::string server_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_certificates:\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der\"\n    - certificate_chain:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/ecdsa_cert.pem\"\n      private_key:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/ecdsa_key.pem\"\n      ocsp_staple:\n        filename: \"{{ test_tmpdir }}/ocsp_test_data/ecdsa_ocsp_resp.der\"\n  ocsp_staple_policy: must_staple\n  )EOF\";\n\n  const std::string client_ctx_yaml = R\"EOF(\n  common_tls_context:\n    tls_params:\n      cipher_suites:\n      - ECDHE-ECDSA-AES128-GCM-SHA256\n      - TLS_RSA_WITH_AES_128_GCM_SHA256\n)EOF\";\n\n  TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());\n  testUtil(test_options.setExpectedServerStats(\"ssl.ocsp_staple_failed\").enableOcspStapling());\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/ssl_test_utility.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"test/test_common/environment.h\"\n\n#include \"gtest/gtest.h\"\n#include \"openssl/ssl.h\"\n#include \"openssl/x509v3.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\n\ninline bssl::UniquePtr<X509> readCertFromFile(const std::string& path) {\n  const std::string& file_content = TestEnvironment::readFileToStringForTest(path);\n  bssl::UniquePtr<BIO> bio(BIO_new_mem_buf(file_content.c_str(), file_content.size()));\n  bssl::UniquePtr<X509> cert(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr));\n  EXPECT_NE(cert, nullptr);\n  return cert;\n}\n\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nfilegroup(\n    name = \"certs\",\n    srcs = glob([\"*\"]),\n)\n\nenvoy_cc_test_library(\n    name = \"cert_infos\",\n    hdrs = glob([\"*info.h\"]),\n)\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/README.md",
    "content": "# What are the identities, certificates and keys\nThere are 15 identities:\n- **CA**: Certificate Authority for **No SAN**, **SAN With URI** and **SAN With\n  DNS**. It has the self-signed certificate *ca_cert.pem*. *ca_key.pem* is its\n  private key. Additionally, we create a CRL for this CA (*ca_cert.crl*) that\n  revokes the certificate *san_dns_cert.pem*.\n- **Intermediate CA**: Intermediate Certificate Authority, signed by the **CA**.\n  It has the certificate *intermediate_ca_cert.pem\". *intermediate_ca_key.pem*\n  is its private key.\n- **Fake CA**: Fake Certificate Authority used to validate verification logic.\n  It has the self-signed certificate *fake_ca_cert.pem\"*. *fake_ca_key.pem\" is\n  its private key.\n- **No SAN**: It has the certificate *no_san_cert.pem*, signed by the **CA**.\n  The certificate does not have SAN field. *no_san_key.pem* is its private key.\n  A certificate chain named *no_san_chain.pem* is generated by using the\n  **Intermediate CA** along with the *no_san_cert.pem* certificate.\n- **SAN With DNS**: It has the certificate *san_dns_cert.pem*, which is signed\n  by the **CA** using the config *san_dns_cert.cfg*. The certificate has SAN\n  field of DNS type. *san_dns_key.pem* is its private key. A second certificate\n  and key, using the same config, is *san_dns2_cert*. A third certificate and key,\n  using the same config, but signed by the **Intermediate CA** is *san_dns3_cert*,\n  its certificate chain is *san_dns3_chain.pem*.\n- **SAN With Multiple DNS**: Same as *SAN With DNS* except there are multiple\n  SANs (including wildcard domain). It has certificate *san_multiple_dns_cert.pem*,\n  *san_multiple_dns_key.pem* is its private key.\n- **SAN only**: Same as *SAN With DNS* except that the certificate doesn't have the\n  CommonName set. It has certificate *san_only_dns_cert.pem*, *san_only_dns_key.pem*\n  is its private key.\n- **SAN With URI**: It has the certificate *san_uri_cert.pem*, which is signed\n  by the **CA** using the config *san_uri_cert.cfg*. The certificate has SAN\n  field of URI type. *san_uri_key.pem* is its private key.\n- **Password-protected**: The password-protected certificate *password_protected_cert.pem*,\n  using the config *san_uri_cert.cfg*. *password_protected_key.pem* is\n  its private key encrypted using the password supplied in *password_protectted_password.txt*.\n- **Self-signed**: The self-signed certificate *selfsigned_cert.pem*, using the\n  config *selfsigned_cert.cfg*. *selfsigned_key.pem* is its private key.\n- **Self-signed RSA 1024**: The self-signed certificate *selfsigned_rsa_1024_cert.pem*,\n  using the config *selfsigned_cert.cfg*. *selfsigned_rsa_1024_key.pem* is\n  its private key.\n- **Self-signed ECDSA P-256**: The self-signed certificate *selfsigned_ecdsa_p256_cert.pem*,\n  using the config *selfsigned_cert.cfg*. *selfsigned_ecdsa_p256_key.pem* is\n  its private key.\n- **Self-signed ECDSA P-384**: The self-signed certificate *selfsigned_ecdsa_p384_cert.pem*,\n  using the config *selfsigned_cert.cfg*. *selfsigned_ecdsa_p256_key.pem* is\n  its private key.\n- **Expired**: A self-signed, expired certificate *expired_cert.pem*,\n  using the config *selfsigned_cert.cfg*. *expired_key.pem* is its private\n  key.\n- **Expired With URI**: A self-signed, expired certificate *expired_san_uri_cert.pem*,\n  using the config *san_uri_cert.cfg*. *expired_san_uri_key.pem* is its private\n  key.\n\n# How to update certificates\n**certs.sh** has the commands to generate all files except the private key\nfiles. Running certs.sh directly will cause the certificate files to be\nregenerated. So if you want to regenerate a particular file, please copy the\ncorresponding commands from certs.sh and execute them in command line.\n\nNote that macOS is unable to generate the expired unit test cert starting\nwith its switch from OpenSSL to LibreSSL in High Sierra (10.13). Specifically,\nthat version of the openssl command will not accept a non-positive \"-days\"\nparameter.\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/aes_128_key",
    "content": "�J��w\u0016sXyP�\\;���"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ca_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test CA\ncommonName_default = Test CA\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[ca]\ndefault_ca = CA_default\n\n[CA_default]\ndatabase = crl_index.txt\ncrlnumber = crl_number\n\ndefault_days = 3650\ndefault_crl_days = 3650\ndefault_md = sha256\npreserve = no\nunique_subject = no\n\n[crl_ext]\nauthorityKeyIdentifier = keyid:always,issuer:always\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ca_cert.crl",
    "content": "-----BEGIN X509 CRL-----\nMIIB+DCB4QIBATANBgkqhkiG9w0BAQsFADB2MQswCQYDVQQGEwJVUzETMBEGA1UE\nCAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwE\nTHlmdDEZMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBD\nQRcNMjAwODIwMTY1NzUzWhcNMzAwODE4MTY1NzUzWjAnMCUCFG+5aeRHWk8uigny\no72012gx2DLbFw0yMDA4MjAxNjU3NTNaoA4wDDAKBgNVHRQEAwIBADANBgkqhkiG\n9w0BAQsFAAOCAQEAJA/YKjDv8Is9aysvQYlwEiQyTKR2j42zxHr71cS/YCZgEL9h\nJwnrK9Oo+EIO8nSfZfa35G/WvFknzeGrqwCH+amAQk0rWy7rdY3mAcCXgS9Ee5C5\nFzTWKYRFmCp/pFtm0PuqrCVf6IMe8GJ3WpleOFI1DkSpZXEOR4VBh5VU29DFVoWz\n9AvKIcwkYtZ49bojh7HP1db+14ovjC1miakWkW1l6U6/pfTwH8ViP28L5yyyp70l\nI0gtw/OXjb0VZ/F2YVkB4f4Yhhs9+pLfpyHN3hEYl0oQt73WPbylPUrbKY1Gp4Qx\n5BQF3Ub+aOIskQ1/8QDBWommJNj//7NwVvBO2w==\n-----END X509 CRL-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ca_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtgwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ2WhcNMjIw\nODIwMTY1NzQ2WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAKmqkuzB22hUhvai26KtFtia0yfQgyBYtWx8MCxw\n1XI+CeOC1JsYbEozm2ze+ytxlS+1Yr8U2Sb7D43AuVd27HeQllMT7DP5JV6mQkQG\n4ms1yTz8oN4H1V6au3Gy6K8BZOf7rY+1yiJMzG2yqC3ipShD8up/RXmXQWInSv9G\nU6lU7ZK+bK6IezsPEUPiFVzfxspQDMCSLSLi3jZmD4S0Uld4d6pFG21pWBSSRxI8\nd8xJkqqOAMc400V65rnaHm96uwvcjeWZGiI50HwhfTVjiztzcCblN1qt/Es7yhBs\neQFr+2b8N04zCMDLlL7grn9imW/XLiVSRvVrkXDyqIUmwRECAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZMA0G\nCSqGSIb3DQEBCwUAA4IBAQAVnbqXnnVFGz83S2tkry/Xq0wviklVWOsIn+emjV13\nh4SC4WgQWHViIwVz1XjUvpguIgMg+R1uE9cPrs0G2Pi6JkrzJB3Btre6QeRvMGhN\nT4HsEdyfg2KvcNj0VhkHAFD53R0g9UZywxyVcTFwjFDE4ELUSRGcYsF3lH0AU/cP\nhMLm0TBsUaPDxHUSpKqn63oAjKh2BN9r8Ecg9ii7I0nYakWtjz9W8e9CPTrVHxrl\nV6/lXdvkXEEtblCnNonawqYSbd4Rqs7duk2jgVX9zjguA57/0wmxeb+dDUYvVFMp\nxFQdzaxezPS7myjbpTP+TUyWhbTZDJLKxhMf9LAOV3A1\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ca_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_CA_CERT_256_HASH[] =\n    \"e7ca585384165af15ce8aa8d5a497ace7bc476a57a9b0f6746145e04ec338cf4\";\nconstexpr char TEST_CA_CERT_1_HASH[] = \"c1753b553e3446c9120d17699c1f9484e2b8b7f7\";\nconstexpr char TEST_CA_CERT_SPKI[] = \"gumR8sG2aAYQhqyhVB5nceZhRb0k+QBnGC+R8/ChhCg=\";\nconstexpr char TEST_CA_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832d8\";\nconstexpr char TEST_CA_CERT_NOT_BEFORE[] = \"Aug 20 16:57:46 2020 GMT\";\nconstexpr char TEST_CA_CERT_NOT_AFTER[] = \"Aug 20 16:57:46 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ca_cert_with_crl.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtgwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ2WhcNMjIw\nODIwMTY1NzQ2WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAKmqkuzB22hUhvai26KtFtia0yfQgyBYtWx8MCxw\n1XI+CeOC1JsYbEozm2ze+ytxlS+1Yr8U2Sb7D43AuVd27HeQllMT7DP5JV6mQkQG\n4ms1yTz8oN4H1V6au3Gy6K8BZOf7rY+1yiJMzG2yqC3ipShD8up/RXmXQWInSv9G\nU6lU7ZK+bK6IezsPEUPiFVzfxspQDMCSLSLi3jZmD4S0Uld4d6pFG21pWBSSRxI8\nd8xJkqqOAMc400V65rnaHm96uwvcjeWZGiI50HwhfTVjiztzcCblN1qt/Es7yhBs\neQFr+2b8N04zCMDLlL7grn9imW/XLiVSRvVrkXDyqIUmwRECAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZMA0G\nCSqGSIb3DQEBCwUAA4IBAQAVnbqXnnVFGz83S2tkry/Xq0wviklVWOsIn+emjV13\nh4SC4WgQWHViIwVz1XjUvpguIgMg+R1uE9cPrs0G2Pi6JkrzJB3Btre6QeRvMGhN\nT4HsEdyfg2KvcNj0VhkHAFD53R0g9UZywxyVcTFwjFDE4ELUSRGcYsF3lH0AU/cP\nhMLm0TBsUaPDxHUSpKqn63oAjKh2BN9r8Ecg9ii7I0nYakWtjz9W8e9CPTrVHxrl\nV6/lXdvkXEEtblCnNonawqYSbd4Rqs7duk2jgVX9zjguA57/0wmxeb+dDUYvVFMp\nxFQdzaxezPS7myjbpTP+TUyWhbTZDJLKxhMf9LAOV3A1\n-----END CERTIFICATE-----\n-----BEGIN X509 CRL-----\nMIIB+DCB4QIBATANBgkqhkiG9w0BAQsFADB2MQswCQYDVQQGEwJVUzETMBEGA1UE\nCAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwE\nTHlmdDEZMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBD\nQRcNMjAwODIwMTY1NzUzWhcNMzAwODE4MTY1NzUzWjAnMCUCFG+5aeRHWk8uigny\no72012gx2DLbFw0yMDA4MjAxNjU3NTNaoA4wDDAKBgNVHRQEAwIBADANBgkqhkiG\n9w0BAQsFAAOCAQEAJA/YKjDv8Is9aysvQYlwEiQyTKR2j42zxHr71cS/YCZgEL9h\nJwnrK9Oo+EIO8nSfZfa35G/WvFknzeGrqwCH+amAQk0rWy7rdY3mAcCXgS9Ee5C5\nFzTWKYRFmCp/pFtm0PuqrCVf6IMe8GJ3WpleOFI1DkSpZXEOR4VBh5VU29DFVoWz\n9AvKIcwkYtZ49bojh7HP1db+14ovjC1miakWkW1l6U6/pfTwH8ViP28L5yyyp70l\nI0gtw/OXjb0VZ/F2YVkB4f4Yhhs9+pLfpyHN3hEYl0oQt73WPbylPUrbKY1Gp4Qx\n5BQF3Ub+aOIskQ1/8QDBWommJNj//7NwVvBO2w==\n-----END X509 CRL-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ca_certificates.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUFphcD2peb8k4I8znXQlQrleU3t4wDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB0Zha2UgQ0EwHhcNMjAwODIwMTY1NzQ3WhcNMjIw\nODIwMTY1NzQ3WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHRmFrZSBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAMseSZfV6e+UfQYjNv1wEi5O4Rv4SXDLyPrxB0UE\n2Wny8Mji4BsLWHI1v8+kXnjw1cCiRa7rZQvdtQ8NunUyLyl1NNAePDqKSMZf8r8o\nUsNd7WgT1ziE0CJ/4KLrL/3ednv4d67Pj7njACke8IzrIVTB1Gfk7oROcmXcYNHl\nmuKbWPnFz4TdWptP5QYh24E3GxrceD7tDeW9yGndfSVb7LaOXNhU9tg/pVG+xIea\n77H+6uKZWSh2rGYZdhedBZwPC6b0xuPpcJj/HVPkt5AO+xWhyloSlbmgNypnxKre\nolHvzNbDINgg8UQwUE68sE91ErzbQt4Kcf2NcgVM7LgBGXcCAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKtN9263XXVl\na4+dQ0aPKPh+WGS1MB8GA1UdIwQYMBaAFKtN9263XXVla4+dQ0aPKPh+WGS1MA0G\nCSqGSIb3DQEBCwUAA4IBAQBBjy26Xe1kCvhJR2cDvWHyGOiGGHUAD74FVzGDhsrD\nivxQ+x6rU2W3zQ2Wxx4ODIyl47yIFnn5UzmgAA0chsNw7t2j+5R62ikEV+vn8YxL\nrK72RV4KzW/XdHcaHY+DdgboHlYYFd7e7rbKODpfNIMB8TeoD9z2YtMefmcHnwEr\nM22h45aAJOaHi/oR8K1BPpwa1Ubkf5bVSdCM1bO3sJZcjdC41dZ1orLdtgcPmSgu\ngbWDzFs8kDHetOTgNn2OuJSm94njQgStSMAY5820BylYQi9uidnrUooK6sKf+Ioc\nO9HhERkI0RpAZHl/mHw8WJHGCEPMqokKC8I8OTVHRwkN\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtgwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ2WhcNMjIw\nODIwMTY1NzQ2WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAKmqkuzB22hUhvai26KtFtia0yfQgyBYtWx8MCxw\n1XI+CeOC1JsYbEozm2ze+ytxlS+1Yr8U2Sb7D43AuVd27HeQllMT7DP5JV6mQkQG\n4ms1yTz8oN4H1V6au3Gy6K8BZOf7rY+1yiJMzG2yqC3ipShD8up/RXmXQWInSv9G\nU6lU7ZK+bK6IezsPEUPiFVzfxspQDMCSLSLi3jZmD4S0Uld4d6pFG21pWBSSRxI8\nd8xJkqqOAMc400V65rnaHm96uwvcjeWZGiI50HwhfTVjiztzcCblN1qt/Es7yhBs\neQFr+2b8N04zCMDLlL7grn9imW/XLiVSRvVrkXDyqIUmwRECAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZMA0G\nCSqGSIb3DQEBCwUAA4IBAQAVnbqXnnVFGz83S2tkry/Xq0wviklVWOsIn+emjV13\nh4SC4WgQWHViIwVz1XjUvpguIgMg+R1uE9cPrs0G2Pi6JkrzJB3Btre6QeRvMGhN\nT4HsEdyfg2KvcNj0VhkHAFD53R0g9UZywxyVcTFwjFDE4ELUSRGcYsF3lH0AU/cP\nhMLm0TBsUaPDxHUSpKqn63oAjKh2BN9r8Ecg9ii7I0nYakWtjz9W8e9CPTrVHxrl\nV6/lXdvkXEEtblCnNonawqYSbd4Rqs7duk2jgVX9zjguA57/0wmxeb+dDUYvVFMp\nxFQdzaxezPS7myjbpTP+TUyWhbTZDJLKxhMf9LAOV3A1\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ca_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAqaqS7MHbaFSG9qLboq0W2JrTJ9CDIFi1bHwwLHDVcj4J44LU\nmxhsSjObbN77K3GVL7VivxTZJvsPjcC5V3bsd5CWUxPsM/klXqZCRAbiazXJPPyg\n3gfVXpq7cbLorwFk5/utj7XKIkzMbbKoLeKlKEPy6n9FeZdBYidK/0ZTqVTtkr5s\nroh7Ow8RQ+IVXN/GylAMwJItIuLeNmYPhLRSV3h3qkUbbWlYFJJHEjx3zEmSqo4A\nxzjTRXrmudoeb3q7C9yN5ZkaIjnQfCF9NWOLO3NwJuU3Wq38SzvKEGx5AWv7Zvw3\nTjMIwMuUvuCuf2KZb9cuJVJG9WuRcPKohSbBEQIDAQABAoIBAHzq9ZoTcPNzqJrW\ndVdkkTmRZH4f6ytZpNnm5aESwlSGkMlNfjNi2/gV3w1dbxghXHHv3nBJbUhh+KWp\ni8x15sxxS/yG4pWxjm4NgySK3RGKXEvmxJo0skghLHkd/Gs9X4ndp3/znIt8d9cc\nfu6pvGfU/3jfk8teLemSfIlOwjIc/nOeYDkTKDdAIcJYfCJf36h1NTd6a7gvGuok\nGU6PQ//AJyu4tzLRJKSV1llNkXVtqMF3ABF+MtONvfJrD7J0hCDXeg06ZFZjuhsv\nReMbDeItAdrXl62RAWGWgsr6FkzrkW24pOo5zYZqQV1yKgc3vBnp+JLseWgGSbBE\nFQb0lzECgYEA1wU6fkSx0n5Na8B04Y+T/8zA5a1gdGZv+G45EmzL3Av2HHdoMdEC\nAEzn+sCJq3vSwq1/puY2EQPW/SRwpjsnWVlhDGyLfqNcGspbXEw8FLhrQn8OOFnV\nUxz1mgJ6+//LCT8QdFnvXb1vQ1UPYte5hlK5I5mzDYoVKhsxwr+9e4MCgYEAygCI\npLZuhC4vjKusMnMlL2EbSxW+JryahhCBWURtuN+S32QDdRAKRDREDnunlAoDE+c9\nwLJPft4V0uAWKYxEqUATwfZonrIougIc4MlmsvIB2qLjL3JhRZ1vZg5tP2z1kQQx\nhnTaz9T97JGBSqvfz0IuWP+CYcfd4BL63nRmCNsCgYEAncdOZr1NpuHbOcgdfC55\nMWkz2tVgQ8mvPCLVajm/VZbJX5YXNQbjawkD6LJD0zunKgdZ9kRTRCUHKvYEW1Zo\nx12Yox87NQ/2VApNA+tE4wd4XXESJDN3H+fOKNbNHAFVIVHPZzFfPe/1pvjRs3wM\nvQ4SoJb2FmdOpHkUCI3eIrUCgYEAoPF77YBM3ZPO3s2JDUkgv3cnP9O7BTIQC/Dq\nni0fHsVDSClvd72n8F+UOdXb1t+DREBhuz5WiEZ6LOHNxpyzuAYNt+STxp08RbeZ\nOqA86yZWw9Q0HoGLbuPY9f9Ym8g8f6ZPr0k7bYvOJMZptQzr7T5VnyM0xqNv7DAz\nITqscTECgYBZHZGuenCk+6BFrJf5sRXwNKIG8jb4nVP0lMpRisAeLBTCTI2fP7p9\nKj4FRSkeAMnlcKBXarNPkmi5YOZ6E+16mxILEmF6nBiv3orMmHAO0Hts7JlL9jZy\n1U/tcbLU4ZipnRboRIdcg8IByI6ty9AnzqAdhbHCqCwVxe/hDK8jhQ==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/certs.sh",
    "content": "#!/bin/bash\n\nset -e\n\nreadonly DEFAULT_VALIDITY_DAYS=${DEFAULT_VALIDITY_DAYS:-730}\nreadonly HERE=$(cd \"$(dirname \"$0\")\" && pwd)\n\ncd \"$HERE\" || exit 1\ntrap cleanup EXIT\n\ncleanup() {\n    rm ./*csr\n    rm ./*srl\n    rm ./crl_*\n    rm ./intermediate_crl_*\n}\n\n# $1=<CA name> $2=[issuer name]\ngenerate_ca() {\n    local extra_args=()\n    if [[ -n \"$2\" ]]; then\n\textra_args=(-CA \"${2}_cert.pem\" -CAkey \"${2}_key.pem\" -CAcreateserial);\n    else\n\textra_args=(-signkey \"${1}_key.pem\");\n    fi\n    openssl genrsa -out \"${1}_key.pem\" 2048\n    openssl req -new -key \"${1}_key.pem\" -out \"${1}_cert.csr\" -config \"${1}_cert.cfg\" -batch -sha256\n    openssl x509 -req -days \"${DEFAULT_VALIDITY_DAYS}\" -in \"${1}_cert.csr\" -out \"${1}_cert.pem\" \\\n\t    -extensions v3_ca -extfile \"${1}_cert.cfg\" \"${extra_args[@]}\"\n    generate_info_header \"$1\"\n}\n\n# $1=<certificate name> $2=[key size] $3=[password]\ngenerate_rsa_key() {\n    local keysize extra_args=()\n    keysize=\"${2:-2048}\"\n    if [[ -n \"$3\" ]]; then\n\techo -n \"$3\" > \"${1}_password.txt\"\n\textra_args=(-aes128 -passout \"file:${1}_password.txt\")\n    fi\n    openssl genrsa -out \"${1}_key.pem\" \"${extra_args[@]}\" \"$keysize\"\n}\n\n# $1=<certificate name> $2=[curve]\ngenerate_ecdsa_key() {\n    local curve\n    curve=\"${2:-secp256r1}\"\n    openssl ecparam -name \"$curve\" -genkey -out \"${1}_key.pem\"\n}\n\n# $1=<certificate name>\ngenerate_info_header() {\n    local prefix\n    prefix=\"TEST_$(echo \"$1\" | tr '[:lower:]' '[:upper:]')\"\n    {\n\techo \"// NOLINT(namespace-envoy)\"\n\techo \"constexpr char ${prefix}_CERT_256_HASH[] =\"\n\techo \"    \\\"$(openssl x509 -in \"${1}_cert.pem\" -outform DER | openssl dgst -sha256 | cut -d\" \" -f2)\\\";\"\n\techo \"constexpr char ${prefix}_CERT_1_HASH[] = \\\"$(openssl x509 -in \"${1}_cert.pem\" -outform DER | openssl dgst -sha1 | cut -d\" \" -f2)\\\";\"\n\techo \"constexpr char ${prefix}_CERT_SPKI[] = \\\"$(openssl x509 -in \"${1}_cert.pem\" -noout -pubkey | openssl pkey -pubin -outform DER | openssl dgst -sha256 -binary | openssl enc -base64)\\\";\"\n\techo \"constexpr char ${prefix}_CERT_SERIAL[] = \\\"$(openssl x509 -in \"${1}_cert.pem\" -noout -serial | cut -d\"=\" -f2 | awk '{print tolower($0)}')\\\";\"\n\techo \"constexpr char ${prefix}_CERT_NOT_BEFORE[] = \\\"$(openssl x509 -in \"${1}_cert.pem\" -noout -startdate | cut -d\"=\" -f2)\\\";\"\n\techo \"constexpr char ${prefix}_CERT_NOT_AFTER[] = \\\"$(openssl x509 -in \"${1}_cert.pem\" -noout -enddate | cut -d\"=\" -f2)\\\";\"\n    } > \"${1}_cert_info.h\"\n}\n\n# $1=<certificate name> $2=<CA name> $3=[days]\ngenerate_x509_cert() {\n    local days extra_args=()\n    days=\"${3:-${DEFAULT_VALIDITY_DAYS}}\"\n    if [[ -f \"${1}_password.txt\" ]]; then\n\textra_args=(-passin \"file:${1}_password.txt\")\n    fi\n    openssl req -new -key \"${1}_key.pem\" -out \"${1}_cert.csr\" -config \"${1}_cert.cfg\" -batch -sha256 \"${extra_args[@]}\"\n    openssl x509 -req -days \"$days\" -in \"${1}_cert.csr\" -sha256 -CA \"${2}_cert.pem\" -CAkey \\\n\t    \"${2}_key.pem\" -CAcreateserial -out \"${1}_cert.pem\" -extensions v3_ca -extfile \"${1}_cert.cfg\" \"${extra_args[@]}\"\n    generate_info_header \"$1\"\n}\n\n# $1=<certificate name> $2=<CA name> $3=[days]\n#\n# Generate a certificate without a subject CN. For this to work, the config\n# must have an empty [req_distinguished_name] section.\ngenerate_x509_cert_nosubject() {\n    local days\n    days=\"${3:-${DEFAULT_VALIDITY_DAYS}}\"\n    openssl req -new -key \"${1}_key.pem\" -out \"${1}_cert.csr\" -config \"${1}_cert.cfg\" -subj / -batch -sha256\n    openssl x509 -req -days \"$days\" -in \"${1}_cert.csr\" -sha256 -CA \"${2}_cert.pem\" -CAkey \\\n\t    \"${2}_key.pem\" -CAcreateserial -out \"${1}_cert.pem\" -extensions v3_ca -extfile \"${1}_cert.cfg\"\n    generate_info_header \"$1\"\n}\n\n# $1=<certificate name> $2=[certificate file name]\ngenerate_selfsigned_x509_cert() {\n    local output_prefix\n    output_prefix=\"${2:-$1}\"\n    openssl req -new -x509 -days \"${DEFAULT_VALIDITY_DAYS}\" -key \"${1}_key.pem\" -out \"${output_prefix}_cert.pem\" -config \"${1}_cert.cfg\" -batch -sha256\n    generate_info_header \"$output_prefix\"\n}\n\n# Generate ca_cert.pem.\ngenerate_ca ca\n\n# Generate intermediate_ca_cert.pem.\ngenerate_ca intermediate_ca ca\n\n# Concatenate intermediate_ca_cert.pem and ca_cert.pem to create valid certificate chain.\ncat intermediate_ca_cert.pem ca_cert.pem > intermediate_ca_cert_chain.pem\n\n# Generate fake_ca_cert.pem.\ngenerate_ca fake_ca\n\n# Concatenate Fake CA (fake_ca_cert.pem) and Test CA (ca_cert.pem) to create CA file with multiple entries.\ncat fake_ca_cert.pem ca_cert.pem > ca_certificates.pem\n\n# Generate no_san_cert.pem.\ngenerate_rsa_key no_san\ngenerate_x509_cert no_san ca\n\n# Concatenate no_san_cert.pem and Test Intermediate CA (intermediate_ca_cert.pem) to create valid certificate chain.\ncat no_san_cert.pem intermediate_ca_cert.pem > no_san_chain.pem\n\n# Generate san_dns_cert.pem.\ngenerate_rsa_key san_dns\ngenerate_x509_cert san_dns ca\n\n# Generate san_dns2_cert.pem (duplicate of san_dns_cert.pem, but with a different private key).\ncp -f san_dns_cert.cfg san_dns2_cert.cfg\ngenerate_rsa_key san_dns2\ngenerate_x509_cert san_dns2 ca\nrm -f san_dns2_cert.cfg\n\n# Generate san_dns3_cert.pm (signed by intermediate_ca_cert.pem).\ncp -f san_dns_cert.cfg san_dns3_cert.cfg\ngenerate_rsa_key san_dns3\ngenerate_x509_cert san_dns3 intermediate_ca\nrm -f san_dns3_cert.cfg\n\n# Concatenate san_dns3_cert.pem and Test Intermediate CA (intermediate_ca_cert.pem) to create valid certificate chain.\ncat san_dns3_cert.pem intermediate_ca_cert.pem > san_dns3_chain.pem\n\n# Generate san_dns4_cert.pm (signed by intermediate_ca_cert.pem).\ncp -f san_dns_cert.cfg san_dns4_cert.cfg\ngenerate_rsa_key san_dns4\ngenerate_x509_cert san_dns4 intermediate_ca\nrm -f san_dns4_cert.cfg\n\n# Generate san_multiple_dns_cert.pem.\ngenerate_rsa_key san_multiple_dns\ngenerate_x509_cert san_multiple_dns ca\n\n# Generate san_only_dns_cert.pem.\ngenerate_rsa_key san_only_dns\ngenerate_x509_cert san_only_dns ca\n\n# Generate san_uri_cert.pem.\ngenerate_rsa_key san_uri\ngenerate_x509_cert san_uri ca\n\n# Generate san_ip_cert.pem.\ngenerate_rsa_key san_ip\ngenerate_x509_cert san_ip ca\n\n# Concatenate san_ip_cert.pem and Test Intermediate CA (intermediate_ca_cert.pem) to create valid certificate chain.\ncat san_ip_cert.pem intermediate_ca_cert.pem > san_ip_chain.pem\n\n# Generate certificate with extensions\ngenerate_rsa_key extensions\ngenerate_x509_cert extensions ca\n\n# Generate password_protected_cert.pem.\ncp -f san_uri_cert.cfg password_protected_cert.cfg\ngenerate_rsa_key password_protected \"\" \"p4ssw0rd\"\ngenerate_x509_cert password_protected ca\nrm -f password_protected_cert.cfg\n\n# Generate selfsigned*_cert.pem.\ngenerate_rsa_key selfsigned\ngenerate_selfsigned_x509_cert selfsigned\ngenerate_selfsigned_x509_cert selfsigned selfsigned2\n\n# Generate selfsigned_rsa_1024.pem\ncp -f selfsigned_cert.cfg selfsigned_rsa_1024_cert.cfg\ngenerate_rsa_key selfsigned_rsa_1024 1024\ngenerate_selfsigned_x509_cert selfsigned_rsa_1024\nrm -f selfsigned_rsa_1024_cert.cfg\n\n# Generate selfsigned_rsa_3072.pem\ncp -f selfsigned_cert.cfg selfsigned_rsa_3072_cert.cfg\ngenerate_rsa_key selfsigned_rsa_3072 3072\ngenerate_selfsigned_x509_cert selfsigned_rsa_3072\nrm -f selfsigned_rsa_3072_cert.cfg\n\n# Generate selfsigned_rsa_4096.pem\ncp -f selfsigned_cert.cfg selfsigned_rsa_4096_cert.cfg\ngenerate_rsa_key selfsigned_rsa_4096 4096\ngenerate_selfsigned_x509_cert selfsigned_rsa_4096\nrm -f selfsigned_rsa_4096_cert.cfg\n\n# Generate selfsigned_ecdsa_p256_cert.pem.\ncp -f selfsigned_cert.cfg selfsigned_ecdsa_p256_cert.cfg\ngenerate_ecdsa_key selfsigned_ecdsa_p256\ngenerate_selfsigned_x509_cert selfsigned_ecdsa_p256\ngenerate_selfsigned_x509_cert selfsigned_ecdsa_p256 selfsigned2_ecdsa_p256\nrm -f selfsigned_ecdsa_p256_cert.cfg\n\n# Generate selfsigned_ecdsa_p384_cert.pem.\ncp -f selfsigned_cert.cfg selfsigned_ecdsa_p384_cert.cfg\ngenerate_ecdsa_key selfsigned_ecdsa_p384 secp384r1\ngenerate_selfsigned_x509_cert selfsigned_ecdsa_p384\nrm -f selfsigned_ecdsa_p384_cert.cfg\n\n# Generate long_validity_cert.pem as a self-signed, with expiry that exceeds 32bit time_t.\ncp -f selfsigned_cert.cfg long_validity_cert.cfg\ngenerate_rsa_key long_validity\ngenerate_x509_cert long_validity ca 18250\nrm -f long_validity_cert.cfg\n\n# Generate expired_cert.pem as a self-signed, expired cert (will fail on macOS 10.13+ because of negative days value).\ncp -f selfsigned_cert.cfg expired_cert.cfg\ngenerate_rsa_key expired\ngenerate_x509_cert expired ca -365\nrm -f expired_cert.cfg\n\n# Generate expired_san_uri_cert.pem as a CA signed, expired cert (will fail on macOS 10.13+ because of negative days value).\ncp -f san_uri_cert.cfg expired_san_uri_cert.cfg\ngenerate_rsa_key expired_san_uri\ngenerate_x509_cert expired_san_uri ca -365\nrm -f expired_san_uri_cert.cfg\n\n# Initialize information for root CRL process\ntouch crl_index.txt crl_index.txt.attr\necho 00 > crl_number\n\n# Revoke the certificate and generate a CRL (using root)\nopenssl ca -revoke san_dns_cert.pem -keyfile ca_key.pem -cert ca_cert.pem -config ca_cert.cfg\nopenssl ca -gencrl -keyfile ca_key.pem -cert ca_cert.pem -out ca_cert.crl -config ca_cert.cfg\ncat ca_cert.pem ca_cert.crl > ca_cert_with_crl.pem\n\n# Initialize information for intermediate CRL process\ntouch intermediate_crl_index.txt intermediate_crl_index.txt.attr\necho 00 > intermediate_crl_number\n\n# Revoke the certificate and generate a CRL (using intermediate)\nopenssl ca -revoke san_dns3_cert.pem -keyfile intermediate_ca_key.pem -cert intermediate_ca_cert.pem -config intermediate_ca_cert.cfg\nopenssl ca -gencrl -keyfile intermediate_ca_key.pem -cert intermediate_ca_cert.pem -out intermediate_ca_cert.crl -config intermediate_ca_cert.cfg\ncat ca_cert.crl intermediate_ca_cert.crl > intermediate_ca_cert_chain.crl\ncat ca_cert.pem intermediate_ca_cert.pem intermediate_ca_cert.crl > intermediate_ca_cert_chain_with_crl.pem\ncat ca_cert.pem intermediate_ca_cert.pem ca_cert.crl intermediate_ca_cert.crl > intermediate_ca_cert_chain_with_crl_chain.pem\n\n# Write session ticket key files\nopenssl rand 80 > ticket_key_a\nopenssl rand 80 > ticket_key_b\nopenssl rand 79 > ticket_key_wrong_len\n\n# Generate a certificate with no subject CN and no altnames.\ngenerate_rsa_key no_subject\ngenerate_x509_cert_nosubject no_subject ca\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/expired_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEHDCCAwSgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMuMwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzUzWhcNMTkw\nODIxMTY1NzUzWjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0Qsguvgn0tLOSVLDUNzFfdnlq7Zu2Wl4G\nVVNNZ6wVgraTxR3BAVBzJqSBz1ALZXCRCnInjbAQKUJP6Sl9xRXNcJzMAQih5Bh5\ncW7fqlFf/DuewGrtgqWaJghTSsCPWP67K920/0aPnvfN6wvLH/KmXEMk76ZnsSpj\nhvyQT1DW2qswRLz/FzMKIbL/OhElN8z3tDGButN8AhLbLVGcblGJQhME+IgcxEGk\n1aqEvL5qXRn1Ho31g8TEDR7kKLPRcuOMGbpKdE3097FpceXDzIeNqB9ctEdYRGnx\nkSOK4pLPMSLJ+jTHZ6rQUX9OunfztWwdRjvBh/wAYpO8k9JnRBt5AgMBAAGjgZ0w\ngZowDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUH\nAwIGCCsGAQUFBwMBMB4GA1UdEQQXMBWCE3NlcnZlcjEuZXhhbXBsZS5jb20wHQYD\nVR0OBBYEFE6Wyo6lz5MuVSBa9uXEK/fRc4eTMB8GA1UdIwQYMBaAFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQA+qVS3SiaHAOTpeAoAlrD3\na8IWtfDbK/AVpXU7PoDtyrBT21ZHpFcluBhi7TSQeaOAFJIMKf80d9ly7sOqdJZH\nzju0NxVBr9jPlr9COBJ0lZfD6dSUbefEUmslni9rd9pWm1cghjbC1LtDfoN2+N2n\nM9sIh48hWw+d5EZMKVMPpsoKRUokTfq6uqUAx/ddalX0edvq4iUyQTPxqJAfOf1H\nutDIGJCdmmR9qHfLedOsGIq4xzVsgAxgvxG+3utkegf8rkRRvHGoV6yykYM1guDV\nnsalrTQxvro3XDa5Jy/yFjvEzb0ONnZJvt3EN9zY8sEUy1tdA9jsuKvbXMIXNwve\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/expired_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_EXPIRED_CERT_256_HASH[] =\n    \"67fead0bf8263f4392a41490c8e38fb2bf388a6cb7892f8ecdfad7bd7cee03e2\";\nconstexpr char TEST_EXPIRED_CERT_1_HASH[] = \"a0857f4e1b98864db1c5b50f57068da09db87547\";\nconstexpr char TEST_EXPIRED_CERT_SPKI[] = \"6rf+Ws8Kf5n1aYNWbBCw0vmKa6uz81L66vjX2JRscqY=\";\nconstexpr char TEST_EXPIRED_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832e3\";\nconstexpr char TEST_EXPIRED_CERT_NOT_BEFORE[] = \"Aug 20 16:57:53 2020 GMT\";\nconstexpr char TEST_EXPIRED_CERT_NOT_AFTER[] = \"Aug 21 16:57:53 2019 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/expired_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA9ELILr4J9LSzklSw1DcxX3Z5au2btlpeBlVTTWesFYK2k8Ud\nwQFQcyakgc9QC2VwkQpyJ42wEClCT+kpfcUVzXCczAEIoeQYeXFu36pRX/w7nsBq\n7YKlmiYIU0rAj1j+uyvdtP9Gj573zesLyx/yplxDJO+mZ7EqY4b8kE9Q1tqrMES8\n/xczCiGy/zoRJTfM97QxgbrTfAIS2y1RnG5RiUITBPiIHMRBpNWqhLy+al0Z9R6N\n9YPExA0e5Ciz0XLjjBm6SnRN9PexaXHlw8yHjagfXLRHWERp8ZEjiuKSzzEiyfo0\nx2eq0FF/Trp387VsHUY7wYf8AGKTvJPSZ0QbeQIDAQABAoIBAAzLhsL0D39zC8kJ\niK3FKl/uy9NvGO4Hc89vHsr6OEh0LW42MCXRFM7DYnCuVGoUaT4fxvUNdgCSuLeI\n5jSBY2/8iyogj7wzP5j0+TLcMCEQDGt3duJ0KoqVnXAHhWi857MUTCvUB3U0hZSS\n0G5WgRZ31MiViSj7XdYS1x4UGY73kTb907wQf8fBe6LxgyH7SMUh6tjW03NpdF4Y\nkiLdfRlpAAzQ1KOHm5jp7ertz0THs45tn0kiPNf6LGHJFncNO1ZNEPYVbutiVzWB\nyddJSfFM2e7Ng9ujSCIH5N5qC9S/gRb1bIubuD4tolonqWz5uyfXRP2fispuKLpZ\nv3IQSwUCgYEA/LpxNRv3+zR2CRZy/t/uby5V2AvxQb85EPtaBicuA52D4L8ZcgFi\n6FNkg1u79cgWo8JPmYt+Fuo4kK6/3+DbIq+VMvy54guFrUfbf8EV/34HKWZ0qP+q\nrCDeBKZIcjRQejRJViNAIHjL6tX5cn3AsU0LnW5Wg8931E3NNalEj2sCgYEA92xH\nNbgAuvwhFS8pM0S7xpn3EjBjA3CBvAkRPtK3PynaRmuv/6Jopba+a6BWAL3LPxTR\nXMGactmH3tJ+gll2F9hi4LPJHJUpVyOs96QS1R9z+jz7fI3obiaaS9X+G5/GMrqj\ntS1+Rt0DCtSfpSSWiaD+hv4RmNrd1dcjisjnrasCgYAsu5NuIxUxkYeS9DZGhrxd\nUYZAR1zclkwzhUnZ7PVoDmlZTnnig/xNwvT3izUpu9DGC46rTtVymOKKOWQc7F/d\nM9bkv3bnW8K9eQqRAkwyfMwnt8rOXvKGpQqnKAeKr1fPSZHxfSvMk5UEa/tLGJcS\nyuBTbURC8GX1XhAEf4FzIQKBgQCAKxH0TN3r/sGwdJ62FSvkZEttm60e0gAShOvv\nDD6qtKaChjREi0AEX9bib8s9VRPdGI6fqxaz2eWdUpMAORGIpqoJ1ngem1Iv9qYa\nHl83EbUFcuOfkO9PaCXTlQWuD6UfEDnPl8JrOP/jc2NDq+FjaAeRrDk3YmE57dlW\ngl4pQwKBgQDaKRH6cA/rh5C7eO887Qh0z+yQ3tRQz67vSBGNgCq9MMNb4qJl0ShW\n/57a4K+AvJGcx5ig29SYFyiFLT8Eh7nitYYaPNOwGOSQFnz/BW0PjzacMdZkwDD0\nMXSoc0jxyvwbLtoyjJns8DbNskYpDC3RrYJWbCDuX5R3uoqxkqm3mw==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/expired_san_uri_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEJDCCAwygAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMuQwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzUzWhcNMTkw\nODIxMTY1NzUzWjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIUKXZ/OzIb7B5HYNQl5LpbF1T3N9v97iz\nlBgC1BAHlsAXSOEYfMsTwmhq+M4Q+3dPE3IuzrxJZVb8nM9shhGL6eJzuWVfd50F\nkeUdZiK+dLTbgEQCWxdF6moTWabogwFAefpzC6hoe0MMhocVyhR0khg9l40kUMZy\nRDMHfyxIToPl7FUlsvN49pxopYyd5b3BG2idf56bPZmAIs90+SHeGKpEpRD+2CCO\n3Ka3TI+0tEE9y7nxP/fniUh1IBD5jPCq9jiEr0czC6H/whvLDiyK+O0HSfRtwGQs\njZhy05vb+X0WyP4BhIHPS8MMAGfK2Tz3SWaZSj4T/fcFNgvvtl6rAgMBAAGjgaUw\ngaIwDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUH\nAwIGCCsGAQUFBwMBMCYGA1UdEQQfMB2GG3NwaWZmZTovL2x5ZnQuY29tL3Rlc3Qt\ndGVhbTAdBgNVHQ4EFgQUAf/jCDomlt3JoThIvlh0sIsBMoEwHwYDVR0jBBgwFoAU\n08ELI+PDqPIyvl9UwT94OBNVA9kwDQYJKoZIhvcNAQELBQADggEBAC7+Dhq7+PEb\nLnvUYDH3LTQfJrD+E5gZi9QMtCmuREMHA4IOHhVC7yQryhDKQH5nMvqtD1rEkpZ/\nlPuxjm8QdBGjlJqxj4M8aKuWrdZyoZG7UGOL7VrPTiXDbDmNXtvxqEFW5rbm7mMW\nHpS1WjWLPEJVM9KUeEaQYYTn6YRtnj5WMqPn/hMUm7CTnSB1AC6UOvyYRrBXNSi9\niGC3TBqp+qRh4dqyU67ekezw6G0kKlvY4xdF+FiMj0r0FZW7WTgKGsh5BbvRZh4R\n9lwBJY9DAvgZIin17wC374phUQF/uS33gUUEZtwJy0wTxjGhHjxOtTERwNvVj1Jl\nV5Zy+IBHOy4=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/expired_san_uri_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_EXPIRED_SAN_URI_CERT_256_HASH[] =\n    \"216d8f237b0cf119f679fd3066d3d499055b47307d621c7559c2f38fdb8945c9\";\nconstexpr char TEST_EXPIRED_SAN_URI_CERT_1_HASH[] = \"79c133a7ab42b99c5966838503c3063308e5cacd\";\nconstexpr char TEST_EXPIRED_SAN_URI_CERT_SPKI[] = \"BwhAeB2kJm3djaNsty8L3SJu0p2wyl2PecoKk2pTDhw=\";\nconstexpr char TEST_EXPIRED_SAN_URI_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832e4\";\nconstexpr char TEST_EXPIRED_SAN_URI_CERT_NOT_BEFORE[] = \"Aug 20 16:57:53 2020 GMT\";\nconstexpr char TEST_EXPIRED_SAN_URI_CERT_NOT_AFTER[] = \"Aug 21 16:57:53 2019 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/expired_san_uri_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAyFCl2fzsyG+weR2DUJeS6WxdU9zfb/e4s5QYAtQQB5bAF0jh\nGHzLE8JoavjOEPt3TxNyLs68SWVW/JzPbIYRi+nic7llX3edBZHlHWYivnS024BE\nAlsXRepqE1mm6IMBQHn6cwuoaHtDDIaHFcoUdJIYPZeNJFDGckQzB38sSE6D5exV\nJbLzePacaKWMneW9wRtonX+emz2ZgCLPdPkh3hiqRKUQ/tggjtymt0yPtLRBPcu5\n8T/354lIdSAQ+YzwqvY4hK9HMwuh/8Ibyw4sivjtB0n0bcBkLI2YctOb2/l9Fsj+\nAYSBz0vDDABnytk890lmmUo+E/33BTYL77ZeqwIDAQABAoIBAGjRMofHfks/jlNB\n6zWfqsMz7f31BIH9WQu2wX2xY5Xk/yzv+MEsg8MaJjNbb9CZFQwuu7/cc8/gFE1o\n7Gz4FnRk9Rbi+zWiTGretcnVaPbJ8N0u1+d3rMzr5YeskNb3vg70BOZwQjYx0Zrb\nRhJTxLJexApuo+9NyRl/sEgK9qv6ZCCU06tsrAWoPrhjWvONi96Uu93JT/D6mxvF\n9K4vDoMh53Ytl310AGaJ0as9K88L96LF1tvjKQBU/1GcnnViqpDPnQUvGPBofT2G\nIoRndoOqta3aM1lqsWxbMr+fQPFn7CnfJ05qXlacZBtB4XGejjZUxsljLQ2IGWiR\nQaSk72kCgYEA8rbVBWOecJ3OasHbmCNUgkFhIpH7UFGqePPdumTpMHqCsHg0k/up\nf3kK5CFglZS5hJ3WkavyIitsBEr2jV+7onF0EdFP74M0doUQ43Hwr5FEehtjZEgg\n2ZDrZ/GbT1bloUM7nHJf6QveKyrSAJa/uew3tCJA6QaGrZJqTOUl7+0CgYEA00es\nhY6dAb68RqN7NfaQ6IQijrTJQ0Y0MWx0BPHQ0/PpoAILtlQ4KcoKKinGv5Mdc6m/\nj49CrOl+myaWPCcMbAG6XU8q86Dt9p5ye7doIdNoR7WbpkTTsZUvwag5lg2ss7P+\nuNn/bCb4MSuSJXRvCHmVDywvMwV/5QQGKLAVRfcCgYBuHIwf2cXziWQwAqrBJqYc\nQdTzfg7prWMNZHVfLpCsMahArwgLaszGy/8o6AuNUN+pToIdrEg5QEHM4MVh9eCG\nHbpuqur82iVe75dt2C89N0Y9r1c0E5Tzn09s5Kt1HxMmSf4tR6ZDb+Lm6pJDOiDZ\nFFVhH3kbPp6y+A3JvNutMQKBgQCRHGUtxemoTSxbB+WUBqk4SajCgwDIp+spFugI\njV10dc48C6unquh4K0AX/WF4Kr7LtgbPvwF4aVZ/kV0WK2uosvchJ0LE2f8Wp3TR\n7AIlWWIB/oXNCPCsw15tj/EdUn8PXczRgUihK5LDkr1p5DJui7MJHLu+O0TAMqdY\nQrgZuQKBgQCQW86IQz2Ruk1dXHC+7PXTnGQ9ptDsFQ+MlGFyfmwaVNAB2Y8okCW0\ncla6FcZg6nMBPZHhUBRvFDsVs3JjEU5OlAKGA2Y5KLNJ2ilHhetHHMowi6X6doM6\ntRHug80HPvvv6jjAU8gmk0OeKnx++T9m4wV5KpbFvdpubTmRFc2zRw==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/extensions_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Cert\ncommonName_default = Test Cert\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectKeyIdentifier = hash\n1.2.3.4.5.6.7.8 = ASN1:UTF8String:Something\n1.2.3.4.5.6.7.9 = DER:30:03:01:01:FF\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n1.2.3.4.5.6.7.8 = ASN1:UTF8String:Something\n1.2.3.4.5.6.7.9 = DER:30:03:01:01:FF"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/extensions_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEJDCCAwygAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMuAwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ5WhcNMjIw\nODIwMTY1NzQ5WjB4MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzESMBAGA1UEAwwJVGVzdCBDZXJ0MIIBIjANBgkqhkiG\n9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0sP+MO/B/+GFa6SnLxeeXYH6+ZDN9lkixC04\nstiWrmMDzD2VpmXvUIh0cIo7T5Do1aQJZ0vq8wO2SL1cB2YCjZ4EYyJHgs1zWmjD\n+vdi0TYQ7GhxXNZtlBmv4g5BtMnSkk+VFseFuOtbPtheL2JD1CCJ/gkqjiQQ0kk4\noir1nPgrlSJye71ODvT8ffKyZwsxl6JblNIX9+kXupolp+w4hL1svLI49BAojGUi\nwAJsdnm3KI3EDFcjLExglEZyKtM7eD/fxXfvseF+rn9j3Gph7+EmjfKAgue81I3X\nBV7qJMODtmNaLTm6WuEokfPan1w2w6lqCTXxhxwQjLZf+ScTfwIDAQABo4GnMIGk\nMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMC\nBggrBgEFBQcDATAdBgNVHQ4EFgQUjvCzskCOhF3Q9IhFI0tSD86Fp8UwHwYDVR0j\nBBgwFoAU08ELI+PDqPIyvl9UwT94OBNVA9kwFgYHKgMEBQYHCAQLDAlTb21ldGhp\nbmcwEAYHKgMEBQYHCQQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGYaqkqEiqPI\ntNm7aUIS0mJq0KZ5azEWnIMPSV8iUktPY6m15jYfIPeCwOgr592XSO20LMU0jaJS\nN0cKPCU6W7kJ12Fyv1AzajmNEAiwPTTSRoIr6Hupr+YAPv5jQR8E1N203dXtz2vt\nRNG161UfSmc31/uot5tCrw3VOvhngtxTz9FTxz50tyvllBRN/m+/eoJCIZA5iZk/\n5EdnSSVnAuNvCmNvWZrPi7mLDe0zGlbA6qMe+ph5uuOt9Fyuly4He6GJULKq0vGr\nqRNGU6GNXslkIYbiJjNmzedxegJqGnVyzH/CCJFIr3YFtzlzP0ThHz7J7HqK/ILn\nBctMbxggu80=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/extensions_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_EXTENSIONS_CERT_256_HASH[] =\n    \"1ed6b087e4d1d93bd523032df13b9379f6d438796cdfc3b6ae9b3138a3a330a2\";\nconstexpr char TEST_EXTENSIONS_CERT_1_HASH[] = \"8269ea5591173cc89db0459639f793529221f138\";\nconstexpr char TEST_EXTENSIONS_CERT_SPKI[] = \"tw9gBe4VKhobfMjBTM6uEO1s8jehNBKRVtUtS2ltWA0=\";\nconstexpr char TEST_EXTENSIONS_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832e0\";\nconstexpr char TEST_EXTENSIONS_CERT_NOT_BEFORE[] = \"Aug 20 16:57:49 2020 GMT\";\nconstexpr char TEST_EXTENSIONS_CERT_NOT_AFTER[] = \"Aug 20 16:57:49 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/extensions_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA0sP+MO/B/+GFa6SnLxeeXYH6+ZDN9lkixC04stiWrmMDzD2V\npmXvUIh0cIo7T5Do1aQJZ0vq8wO2SL1cB2YCjZ4EYyJHgs1zWmjD+vdi0TYQ7Ghx\nXNZtlBmv4g5BtMnSkk+VFseFuOtbPtheL2JD1CCJ/gkqjiQQ0kk4oir1nPgrlSJy\ne71ODvT8ffKyZwsxl6JblNIX9+kXupolp+w4hL1svLI49BAojGUiwAJsdnm3KI3E\nDFcjLExglEZyKtM7eD/fxXfvseF+rn9j3Gph7+EmjfKAgue81I3XBV7qJMODtmNa\nLTm6WuEokfPan1w2w6lqCTXxhxwQjLZf+ScTfwIDAQABAoIBAQCkZPdp2Djri050\nDbPRiYyexvmpm4bq9c+mUNd+uG2fi9+0ePy3E588o5KfDVMB1relnBMxnQefkW+n\nAPw9URYAvBYT9PLuVa36AOmMYVWUdaVxoG17yk0iTX7gxdn+V8WFFcnmQK5O9FTx\nifa84tqhJNg9a4zj3cG5JB+mbfex/H2I5pGjIkzU+e/+0aSXn7yw8KQdIyKeOHce\nJVyhFHdFaWiErEIN+GTc7A4RKajRdmqZP8Pl7WfJw70xjrkkpKwVb9biNYyPmL8I\ny5ChP+fCiYkvohl1gGbz4bnL73PXI/JeKYYVpv97tdZCgKVvJxfb+o+pbeXL1rRU\nfZq6i3QhAoGBAPbgqjllgVSjVlW8+UnKYeZqs1YDsiiQ1FeYiFlHW/TdThkynkvZ\nEQCck2UCKO4mkqXpdBxcygaZbeodFi9g7qhzFfWI4YLSlnlObgMzhS24QRz90dBd\nXe64M94yV1Dxm7mX/hsqTB5Qw1gkyefnTpntd/d8EdP+PIFZktgsf98nAoGBANqN\nuiA38EXyUlWUfBxkwAdLHYftQYzbqY/prUNyGJxjAwsSqacAhaWxDd9eju8gj4RP\n/EWwugFDnUso5L8XRIjLEjIFeif55i0kKLASputO7Q1VTArDhaLcLN8QlP762Ke+\nPfZE4N0KQpVMzrjq2hXGZ+BeBI8HGlpJ/ufczt/pAoGBAK68YX9yetGfMlJJCODT\nn+tohjpRL15K9CBz073tJgbwRIDNNQFWSBR4vlJ17uZw4scPr6ELg+qb2hIkODmc\nkA8mNdqt4X+o8dw2SON3KcIscdnFRSYHj56RPhT86JRgDeqUy8QKcgna/Ah9MXM0\nYovMamb+p9e2ULj031ymUnNtAoGASfWgQPrtIp6IYlGnVmx5134B9lfB0Qsc7Iil\ni6d20KVpCDw+kTdKAvJRwX9cW2vnZV0z/9l4eKkK975U+p3DIY+nDqpv1ktpK2mL\nI1qMhiDlc5ej7hiQhrZ/sNUJtufonYtxqH9SoTqxoZimdEl1l9zWlkT6wfQjZN6Z\nlplQoNECgYAtrjTBZxTA39uKmTd1bljs/Jxrz74KpfWxMAlhiN1zEnD7/LaRnsug\n3uDR8QFm5uSzgwnGQxN+IobCdpbxhLxvm5BCfn+0q8j+fpEKLWacAQyRGAi5lVcZ\nxRymZ0Dso5e6iW6KBuXQs71g8cWbbdDvCjBGvns14z8SPewVxBglGw==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/fake_ca_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Fake CA\ncommonName_default = Fake CA\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:TRUE\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUFphcD2peb8k4I8znXQlQrleU3t4wDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB0Zha2UgQ0EwHhcNMjAwODIwMTY1NzQ3WhcNMjIw\nODIwMTY1NzQ3WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHRmFrZSBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAMseSZfV6e+UfQYjNv1wEi5O4Rv4SXDLyPrxB0UE\n2Wny8Mji4BsLWHI1v8+kXnjw1cCiRa7rZQvdtQ8NunUyLyl1NNAePDqKSMZf8r8o\nUsNd7WgT1ziE0CJ/4KLrL/3ednv4d67Pj7njACke8IzrIVTB1Gfk7oROcmXcYNHl\nmuKbWPnFz4TdWptP5QYh24E3GxrceD7tDeW9yGndfSVb7LaOXNhU9tg/pVG+xIea\n77H+6uKZWSh2rGYZdhedBZwPC6b0xuPpcJj/HVPkt5AO+xWhyloSlbmgNypnxKre\nolHvzNbDINgg8UQwUE68sE91ErzbQt4Kcf2NcgVM7LgBGXcCAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKtN9263XXVl\na4+dQ0aPKPh+WGS1MB8GA1UdIwQYMBaAFKtN9263XXVla4+dQ0aPKPh+WGS1MA0G\nCSqGSIb3DQEBCwUAA4IBAQBBjy26Xe1kCvhJR2cDvWHyGOiGGHUAD74FVzGDhsrD\nivxQ+x6rU2W3zQ2Wxx4ODIyl47yIFnn5UzmgAA0chsNw7t2j+5R62ikEV+vn8YxL\nrK72RV4KzW/XdHcaHY+DdgboHlYYFd7e7rbKODpfNIMB8TeoD9z2YtMefmcHnwEr\nM22h45aAJOaHi/oR8K1BPpwa1Ubkf5bVSdCM1bO3sJZcjdC41dZ1orLdtgcPmSgu\ngbWDzFs8kDHetOTgNn2OuJSm94njQgStSMAY5820BylYQi9uidnrUooK6sKf+Ioc\nO9HhERkI0RpAZHl/mHw8WJHGCEPMqokKC8I8OTVHRwkN\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/fake_ca_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_FAKE_CA_CERT_256_HASH[] =\n    \"c765f9b756e41de667ec7622765100383dbd18723afa1903ed59ec2ae05e9dd8\";\nconstexpr char TEST_FAKE_CA_CERT_1_HASH[] = \"5a5b6a7148d8649db0c20469a8d2db245edd95cb\";\nconstexpr char TEST_FAKE_CA_CERT_SPKI[] = \"UmI2PwrPQmISzkZopazA3nRtCGvWZ8r26UBYNHjYEOI=\";\nconstexpr char TEST_FAKE_CA_CERT_SERIAL[] = \"16985c0f6a5e6fc93823cce75d0950ae5794dede\";\nconstexpr char TEST_FAKE_CA_CERT_NOT_BEFORE[] = \"Aug 20 16:57:47 2020 GMT\";\nconstexpr char TEST_FAKE_CA_CERT_NOT_AFTER[] = \"Aug 20 16:57:47 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/fake_ca_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAyx5Jl9Xp75R9BiM2/XASLk7hG/hJcMvI+vEHRQTZafLwyOLg\nGwtYcjW/z6ReePDVwKJFrutlC921Dw26dTIvKXU00B48OopIxl/yvyhSw13taBPX\nOITQIn/gousv/d52e/h3rs+PueMAKR7wjOshVMHUZ+TuhE5yZdxg0eWa4ptY+cXP\nhN1am0/lBiHbgTcbGtx4Pu0N5b3Iad19JVvsto5c2FT22D+lUb7Eh5rvsf7q4plZ\nKHasZhl2F50FnA8LpvTG4+lwmP8dU+S3kA77FaHKWhKVuaA3KmfEqt6iUe/M1sMg\n2CDxRDBQTrywT3USvNtC3gpx/Y1yBUzsuAEZdwIDAQABAoIBAQCQISXHgBtHze4J\ndOOIYwYDpiVGW6CBUZmDt5SY/R/pdmI1qgesKRjRsiIxjUkcOIj08DDSNma1TNU1\noWA12S5uFKUtKMdEIuwRF/SAgHR5/SSBPdLyYAvw7xzPfmPHmRwqS91hRheOp+1t\nQG8hD5fPNNsr/d7iM2gpr39kcbC+KonRXUNN/EKRGuRF328PNj7CwfhG778e2PU6\n4mv5tU8I9ivAnUaOmWYgckeVjyOAwJzwVsg7hTRQzKlxXQLSArvIoDORtQApzrCk\nv63HNa0OFx7M+PyEfDXsDVdup6RkRMM43KVrybC87iS1yXkwhh4yonj80jgYdR/2\n/xeFfFeBAoGBAO00jECyWNT/doz0ODuyPNEIJSRjF7+kSfgURQ8c4XnuKAgXbEDY\n4DzyekEXQt+X6EydsdGcKrHeW3MBQg972t4PS9sr3R3lvAmQrv1tD98VIKj+WqFs\nHgOz9SLESASSSfu2HuBqoFWSnGliY5F4e5v0kUAisZ+uEEOX55gBj69fAoGBANs2\nUpcsAfySvzL8F8ANe/O06Jc71q59NpOr034g9QFkLo0sqtcjQ9AplOKFOcIQpCxK\nWERJl2sxjVYctJotYpBEDSzMZCCAiP98ThEeyzUr8rl9wxaJAGd4uEiF7WaXCYZG\nHqVoRD/hJXFRHq+hNNX9tHpCFt4kbxYjl14cowTpAoGBAOwyCP5bGM1vW7LbdCst\nPhW/EIz9BybTBry6IlH8JiDtnnsAJ9Eio5bMtCOgb/yJHM09sVTRNkpK6bDIrekR\nIU1A2f3x3YnpuqWjpju/n7A5L35XWBOovvJc/VmTLUswB+9W18QLKIBNLuZZDGBM\nInmWvBOq+EfSYWuNFRVtaI/tAoGBALIglEAvDHY2sJM3cf0nd99y08WAZFRpft3E\nNBk7ops80u74quIiz8no4AiphWOCrkUnLE5hFl+OZnDg5oiZ9IuSILqUY5BdofUp\nBy62PR251oSQAuTe8qsh121nFp/Uf3LSq7hw33Sg+RH5ftvsPMjeXbcy2O4t4O62\ngRuCXZ1xAoGAQvb/O0HRVo1ZeOIEHqFiQLXdF9neV50PYEFKpdWcLQqKjuZv5zsr\njMfuknxveGEhpC4ahalKdF65cxIaNMPogS3mz+ogiplF0F0jlc0EfF7ctymb0rsI\ncZL6hBXoPZaA2gkUj3TIVSQ0Ro2f1SOu78a2VlEXJB3MDGSOMmcAV38=\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Intermediate CA\ncommonName_default = Test Intermediate CA\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:TRUE, pathlen:0\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:TRUE, pathlen:0\nkeyUsage = critical, cRLSign, keyCertSign\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[ca]\ndefault_ca = CA_default\n\n[CA_default]\ndatabase = intermediate_crl_index.txt\ncrlnumber = intermediate_crl_number\n\ndefault_days = 3650\ndefault_crl_days = 3650\ndefault_md = sha256\npreserve = no\nunique_subject = no\n\n[crl_ext]\nauthorityKeyIdentifier = keyid:always,issuer:always\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert.crl",
    "content": "-----BEGIN X509 CRL-----\nMIICNDCCARwCAQEwDQYJKoZIhvcNAQELBQAwgYMxCzAJBgNVBAYTAlVTMRMwEQYD\nVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQK\nDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMR0wGwYDVQQDDBRUZXN0\nIEludGVybWVkaWF0ZSBDQRcNMjAwODI3MTUxMDUxWhcNMzAwODI1MTUxMDUxWjBU\nMBoCCQCSJ/gDfP7k5RcNMjAwODI3MTUxMDUxWjAaAgkAoqs4rEF5siEXDTIwMDgy\nNzE1MDk0MlowGgIJAPgJXSbIs3whFw0yMDA4MjcxNTA4MTJaoA4wDDAKBgNVHRQE\nAwIBADANBgkqhkiG9w0BAQsFAAOCAQEAbEQ4RcSFSpOXK3B9TdgJSeV6+/52Dewn\nXtUpwh4YxHCnVk+3Zwfr7zneGU6E/d3UrCXk9oJI0TybLxhXVOw7kEbE3UlIFO5O\nGFT/2m1Wt7/O/m5LMjFIa4Y2mi2XcNEbrL+HBIW3Ylvs6gvlQnEHvhUvr6pxXxsZ\nQJKvHlTIlQbH4SUjqSl+QrBJqg6OerUx4FSc3+8o28rG3GeM7W9F8a2pZThYHb77\nBgdotTh65To2z4GnFbZHvolnxWyz/MWwrkiZFAnGOB1y+nlyrI4a4K/GltiG4vTH\nuVv80JOgUd+bB8TV948Wc4lEDiHjY085moWcAxKIDOYOg151ntvrfw==\n-----END X509 CRL-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID4zCCAsugAwIBAgIJAPDlREG4TLxSMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDgyNzE1MTA1MVoXDTIyMDgyNzE1MTA1MVow\ngYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T\nYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2lu\nZWVyaW5nMR0wGwYDVQQDDBRUZXN0IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKGa0pSi8MI88LdnHT8oZJVDpZ6qa9ooYP6m\n3S+xIxRBOVOGEs0a1dxko5iAfWgJJRF8igT1bRQAlNsnK/lZpGOOo8txjWsTQPFD\nFLUSVVn78lnXvyYlIhAMqmhIAmSK+qo02TcsncBNa/iCT9aH9SEf0a7xjiAcyfm6\n3XHScpPC0o47tICqKnkMCJvOxi4yKM0SWIxNpnoXq/ixxcipsA8QFQ4GU2r9LBSB\n8+7+couSKdPDR0AvDA/t47P8pNxQHCCzvspEtX5wGMEOVVhAsh1GgwMNyYh9IbUO\nTkHqR45D2ky+x6emPDPdAgqzh4xDpMR4V+hsPQ/GWdXpKLfKjdECAwEAAaNmMGQw\nEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKbQ\ndxTWui6GjBedEeOPwmCUdTCwMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgT\nVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBFEUnhtsWzHM/xoy/vlntkdau/TYrpLQkE\nKXIeDbjtjLnMBKXu5z3epKWrSV12kEQWrkARNTuhW4+5XkZUA9gb2MPMXh2yse4u\nOy3rRhhzbXMas45IFbXqnI+xuS/99vJQrhjXGB49dbUV9P9dJ4hj2Uc27UmAx/zB\n6MHKDXhpRw/R6MXPtpQpZjDTEA4QU2yuwHWOZ56QHJ/aj0QZJvSbdK6DDY7VSKEC\n2yCX1d+S0lF5Hcgrhhi4Me2sKA7JQwKLDDlXMOKpcI0vMZAoKXPbA+tVBQUZICZU\n/t9YW49y8KWCjXK113JDIxQOtTCV8SaLnV9+qL+3ckbih6gz76Vw\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain.crl",
    "content": "-----BEGIN X509 CRL-----\nMIIB+DCB4QIBATANBgkqhkiG9w0BAQsFADB2MQswCQYDVQQGEwJVUzETMBEGA1UE\nCAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwE\nTHlmdDEZMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBD\nQRcNMjAwODIwMTY1NzUzWhcNMzAwODE4MTY1NzUzWjAnMCUCFG+5aeRHWk8uigny\no72012gx2DLbFw0yMDA4MjAxNjU3NTNaoA4wDDAKBgNVHRQEAwIBADANBgkqhkiG\n9w0BAQsFAAOCAQEAJA/YKjDv8Is9aysvQYlwEiQyTKR2j42zxHr71cS/YCZgEL9h\nJwnrK9Oo+EIO8nSfZfa35G/WvFknzeGrqwCH+amAQk0rWy7rdY3mAcCXgS9Ee5C5\nFzTWKYRFmCp/pFtm0PuqrCVf6IMe8GJ3WpleOFI1DkSpZXEOR4VBh5VU29DFVoWz\n9AvKIcwkYtZ49bojh7HP1db+14ovjC1miakWkW1l6U6/pfTwH8ViP28L5yyyp70l\nI0gtw/OXjb0VZ/F2YVkB4f4Yhhs9+pLfpyHN3hEYl0oQt73WPbylPUrbKY1Gp4Qx\n5BQF3Ub+aOIskQ1/8QDBWommJNj//7NwVvBO2w==\n-----END X509 CRL-----\n-----BEGIN X509 CRL-----\nMIICNDCCARwCAQEwDQYJKoZIhvcNAQELBQAwgYMxCzAJBgNVBAYTAlVTMRMwEQYD\nVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQK\nDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMR0wGwYDVQQDDBRUZXN0\nIEludGVybWVkaWF0ZSBDQRcNMjAwODI3MTUxMDUxWhcNMzAwODI1MTUxMDUxWjBU\nMBoCCQCSJ/gDfP7k5RcNMjAwODI3MTUxMDUxWjAaAgkAoqs4rEF5siEXDTIwMDgy\nNzE1MDk0MlowGgIJAPgJXSbIs3whFw0yMDA4MjcxNTA4MTJaoA4wDDAKBgNVHRQE\nAwIBADANBgkqhkiG9w0BAQsFAAOCAQEAbEQ4RcSFSpOXK3B9TdgJSeV6+/52Dewn\nXtUpwh4YxHCnVk+3Zwfr7zneGU6E/d3UrCXk9oJI0TybLxhXVOw7kEbE3UlIFO5O\nGFT/2m1Wt7/O/m5LMjFIa4Y2mi2XcNEbrL+HBIW3Ylvs6gvlQnEHvhUvr6pxXxsZ\nQJKvHlTIlQbH4SUjqSl+QrBJqg6OerUx4FSc3+8o28rG3GeM7W9F8a2pZThYHb77\nBgdotTh65To2z4GnFbZHvolnxWyz/MWwrkiZFAnGOB1y+nlyrI4a4K/GltiG4vTH\nuVv80JOgUd+bB8TV948Wc4lEDiHjY085moWcAxKIDOYOg151ntvrfw==\n-----END X509 CRL-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID4zCCAsugAwIBAgIJAPDlREG4TLxSMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDgyNzE1MTA1MVoXDTIyMDgyNzE1MTA1MVow\ngYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T\nYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2lu\nZWVyaW5nMR0wGwYDVQQDDBRUZXN0IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKGa0pSi8MI88LdnHT8oZJVDpZ6qa9ooYP6m\n3S+xIxRBOVOGEs0a1dxko5iAfWgJJRF8igT1bRQAlNsnK/lZpGOOo8txjWsTQPFD\nFLUSVVn78lnXvyYlIhAMqmhIAmSK+qo02TcsncBNa/iCT9aH9SEf0a7xjiAcyfm6\n3XHScpPC0o47tICqKnkMCJvOxi4yKM0SWIxNpnoXq/ixxcipsA8QFQ4GU2r9LBSB\n8+7+couSKdPDR0AvDA/t47P8pNxQHCCzvspEtX5wGMEOVVhAsh1GgwMNyYh9IbUO\nTkHqR45D2ky+x6emPDPdAgqzh4xDpMR4V+hsPQ/GWdXpKLfKjdECAwEAAaNmMGQw\nEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKbQ\ndxTWui6GjBedEeOPwmCUdTCwMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgT\nVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBFEUnhtsWzHM/xoy/vlntkdau/TYrpLQkE\nKXIeDbjtjLnMBKXu5z3epKWrSV12kEQWrkARNTuhW4+5XkZUA9gb2MPMXh2yse4u\nOy3rRhhzbXMas45IFbXqnI+xuS/99vJQrhjXGB49dbUV9P9dJ4hj2Uc27UmAx/zB\n6MHKDXhpRw/R6MXPtpQpZjDTEA4QU2yuwHWOZ56QHJ/aj0QZJvSbdK6DDY7VSKEC\n2yCX1d+S0lF5Hcgrhhi4Me2sKA7JQwKLDDlXMOKpcI0vMZAoKXPbA+tVBQUZICZU\n/t9YW49y8KWCjXK113JDIxQOtTCV8SaLnV9+qL+3ckbih6gz76Vw\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtgwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ2WhcNMjIw\nODIwMTY1NzQ2WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAKmqkuzB22hUhvai26KtFtia0yfQgyBYtWx8MCxw\n1XI+CeOC1JsYbEozm2ze+ytxlS+1Yr8U2Sb7D43AuVd27HeQllMT7DP5JV6mQkQG\n4ms1yTz8oN4H1V6au3Gy6K8BZOf7rY+1yiJMzG2yqC3ipShD8up/RXmXQWInSv9G\nU6lU7ZK+bK6IezsPEUPiFVzfxspQDMCSLSLi3jZmD4S0Uld4d6pFG21pWBSSRxI8\nd8xJkqqOAMc400V65rnaHm96uwvcjeWZGiI50HwhfTVjiztzcCblN1qt/Es7yhBs\neQFr+2b8N04zCMDLlL7grn9imW/XLiVSRvVrkXDyqIUmwRECAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZMA0G\nCSqGSIb3DQEBCwUAA4IBAQAVnbqXnnVFGz83S2tkry/Xq0wviklVWOsIn+emjV13\nh4SC4WgQWHViIwVz1XjUvpguIgMg+R1uE9cPrs0G2Pi6JkrzJB3Btre6QeRvMGhN\nT4HsEdyfg2KvcNj0VhkHAFD53R0g9UZywxyVcTFwjFDE4ELUSRGcYsF3lH0AU/cP\nhMLm0TBsUaPDxHUSpKqn63oAjKh2BN9r8Ecg9ii7I0nYakWtjz9W8e9CPTrVHxrl\nV6/lXdvkXEEtblCnNonawqYSbd4Rqs7duk2jgVX9zjguA57/0wmxeb+dDUYvVFMp\nxFQdzaxezPS7myjbpTP+TUyWhbTZDJLKxhMf9LAOV3A1\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain_with_crl.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtgwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ2WhcNMjIw\nODIwMTY1NzQ2WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAKmqkuzB22hUhvai26KtFtia0yfQgyBYtWx8MCxw\n1XI+CeOC1JsYbEozm2ze+ytxlS+1Yr8U2Sb7D43AuVd27HeQllMT7DP5JV6mQkQG\n4ms1yTz8oN4H1V6au3Gy6K8BZOf7rY+1yiJMzG2yqC3ipShD8up/RXmXQWInSv9G\nU6lU7ZK+bK6IezsPEUPiFVzfxspQDMCSLSLi3jZmD4S0Uld4d6pFG21pWBSSRxI8\nd8xJkqqOAMc400V65rnaHm96uwvcjeWZGiI50HwhfTVjiztzcCblN1qt/Es7yhBs\neQFr+2b8N04zCMDLlL7grn9imW/XLiVSRvVrkXDyqIUmwRECAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZMA0G\nCSqGSIb3DQEBCwUAA4IBAQAVnbqXnnVFGz83S2tkry/Xq0wviklVWOsIn+emjV13\nh4SC4WgQWHViIwVz1XjUvpguIgMg+R1uE9cPrs0G2Pi6JkrzJB3Btre6QeRvMGhN\nT4HsEdyfg2KvcNj0VhkHAFD53R0g9UZywxyVcTFwjFDE4ELUSRGcYsF3lH0AU/cP\nhMLm0TBsUaPDxHUSpKqn63oAjKh2BN9r8Ecg9ii7I0nYakWtjz9W8e9CPTrVHxrl\nV6/lXdvkXEEtblCnNonawqYSbd4Rqs7duk2jgVX9zjguA57/0wmxeb+dDUYvVFMp\nxFQdzaxezPS7myjbpTP+TUyWhbTZDJLKxhMf9LAOV3A1\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIID4zCCAsugAwIBAgIJAPDlREG4TLxSMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDgyNzE1MTA1MVoXDTIyMDgyNzE1MTA1MVow\ngYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T\nYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2lu\nZWVyaW5nMR0wGwYDVQQDDBRUZXN0IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKGa0pSi8MI88LdnHT8oZJVDpZ6qa9ooYP6m\n3S+xIxRBOVOGEs0a1dxko5iAfWgJJRF8igT1bRQAlNsnK/lZpGOOo8txjWsTQPFD\nFLUSVVn78lnXvyYlIhAMqmhIAmSK+qo02TcsncBNa/iCT9aH9SEf0a7xjiAcyfm6\n3XHScpPC0o47tICqKnkMCJvOxi4yKM0SWIxNpnoXq/ixxcipsA8QFQ4GU2r9LBSB\n8+7+couSKdPDR0AvDA/t47P8pNxQHCCzvspEtX5wGMEOVVhAsh1GgwMNyYh9IbUO\nTkHqR45D2ky+x6emPDPdAgqzh4xDpMR4V+hsPQ/GWdXpKLfKjdECAwEAAaNmMGQw\nEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKbQ\ndxTWui6GjBedEeOPwmCUdTCwMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgT\nVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBFEUnhtsWzHM/xoy/vlntkdau/TYrpLQkE\nKXIeDbjtjLnMBKXu5z3epKWrSV12kEQWrkARNTuhW4+5XkZUA9gb2MPMXh2yse4u\nOy3rRhhzbXMas45IFbXqnI+xuS/99vJQrhjXGB49dbUV9P9dJ4hj2Uc27UmAx/zB\n6MHKDXhpRw/R6MXPtpQpZjDTEA4QU2yuwHWOZ56QHJ/aj0QZJvSbdK6DDY7VSKEC\n2yCX1d+S0lF5Hcgrhhi4Me2sKA7JQwKLDDlXMOKpcI0vMZAoKXPbA+tVBQUZICZU\n/t9YW49y8KWCjXK113JDIxQOtTCV8SaLnV9+qL+3ckbih6gz76Vw\n-----END CERTIFICATE-----\n-----BEGIN X509 CRL-----\nMIICNDCCARwCAQEwDQYJKoZIhvcNAQELBQAwgYMxCzAJBgNVBAYTAlVTMRMwEQYD\nVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQK\nDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMR0wGwYDVQQDDBRUZXN0\nIEludGVybWVkaWF0ZSBDQRcNMjAwODI3MTUxMDUxWhcNMzAwODI1MTUxMDUxWjBU\nMBoCCQCSJ/gDfP7k5RcNMjAwODI3MTUxMDUxWjAaAgkAoqs4rEF5siEXDTIwMDgy\nNzE1MDk0MlowGgIJAPgJXSbIs3whFw0yMDA4MjcxNTA4MTJaoA4wDDAKBgNVHRQE\nAwIBADANBgkqhkiG9w0BAQsFAAOCAQEAbEQ4RcSFSpOXK3B9TdgJSeV6+/52Dewn\nXtUpwh4YxHCnVk+3Zwfr7zneGU6E/d3UrCXk9oJI0TybLxhXVOw7kEbE3UlIFO5O\nGFT/2m1Wt7/O/m5LMjFIa4Y2mi2XcNEbrL+HBIW3Ylvs6gvlQnEHvhUvr6pxXxsZ\nQJKvHlTIlQbH4SUjqSl+QrBJqg6OerUx4FSc3+8o28rG3GeM7W9F8a2pZThYHb77\nBgdotTh65To2z4GnFbZHvolnxWyz/MWwrkiZFAnGOB1y+nlyrI4a4K/GltiG4vTH\nuVv80JOgUd+bB8TV948Wc4lEDiHjY085moWcAxKIDOYOg151ntvrfw==\n-----END X509 CRL-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_chain_with_crl_chain.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtgwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ2WhcNMjIw\nODIwMTY1NzQ2WjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAKmqkuzB22hUhvai26KtFtia0yfQgyBYtWx8MCxw\n1XI+CeOC1JsYbEozm2ze+ytxlS+1Yr8U2Sb7D43AuVd27HeQllMT7DP5JV6mQkQG\n4ms1yTz8oN4H1V6au3Gy6K8BZOf7rY+1yiJMzG2yqC3ipShD8up/RXmXQWInSv9G\nU6lU7ZK+bK6IezsPEUPiFVzfxspQDMCSLSLi3jZmD4S0Uld4d6pFG21pWBSSRxI8\nd8xJkqqOAMc400V65rnaHm96uwvcjeWZGiI50HwhfTVjiztzcCblN1qt/Es7yhBs\neQFr+2b8N04zCMDLlL7grn9imW/XLiVSRvVrkXDyqIUmwRECAwEAAaNjMGEwDwYD\nVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZMA0G\nCSqGSIb3DQEBCwUAA4IBAQAVnbqXnnVFGz83S2tkry/Xq0wviklVWOsIn+emjV13\nh4SC4WgQWHViIwVz1XjUvpguIgMg+R1uE9cPrs0G2Pi6JkrzJB3Btre6QeRvMGhN\nT4HsEdyfg2KvcNj0VhkHAFD53R0g9UZywxyVcTFwjFDE4ELUSRGcYsF3lH0AU/cP\nhMLm0TBsUaPDxHUSpKqn63oAjKh2BN9r8Ecg9ii7I0nYakWtjz9W8e9CPTrVHxrl\nV6/lXdvkXEEtblCnNonawqYSbd4Rqs7duk2jgVX9zjguA57/0wmxeb+dDUYvVFMp\nxFQdzaxezPS7myjbpTP+TUyWhbTZDJLKxhMf9LAOV3A1\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIID4zCCAsugAwIBAgIJAPDlREG4TLxSMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDgyNzE1MTA1MVoXDTIyMDgyNzE1MTA1MVow\ngYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T\nYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2lu\nZWVyaW5nMR0wGwYDVQQDDBRUZXN0IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKGa0pSi8MI88LdnHT8oZJVDpZ6qa9ooYP6m\n3S+xIxRBOVOGEs0a1dxko5iAfWgJJRF8igT1bRQAlNsnK/lZpGOOo8txjWsTQPFD\nFLUSVVn78lnXvyYlIhAMqmhIAmSK+qo02TcsncBNa/iCT9aH9SEf0a7xjiAcyfm6\n3XHScpPC0o47tICqKnkMCJvOxi4yKM0SWIxNpnoXq/ixxcipsA8QFQ4GU2r9LBSB\n8+7+couSKdPDR0AvDA/t47P8pNxQHCCzvspEtX5wGMEOVVhAsh1GgwMNyYh9IbUO\nTkHqR45D2ky+x6emPDPdAgqzh4xDpMR4V+hsPQ/GWdXpKLfKjdECAwEAAaNmMGQw\nEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKbQ\ndxTWui6GjBedEeOPwmCUdTCwMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgT\nVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBFEUnhtsWzHM/xoy/vlntkdau/TYrpLQkE\nKXIeDbjtjLnMBKXu5z3epKWrSV12kEQWrkARNTuhW4+5XkZUA9gb2MPMXh2yse4u\nOy3rRhhzbXMas45IFbXqnI+xuS/99vJQrhjXGB49dbUV9P9dJ4hj2Uc27UmAx/zB\n6MHKDXhpRw/R6MXPtpQpZjDTEA4QU2yuwHWOZ56QHJ/aj0QZJvSbdK6DDY7VSKEC\n2yCX1d+S0lF5Hcgrhhi4Me2sKA7JQwKLDDlXMOKpcI0vMZAoKXPbA+tVBQUZICZU\n/t9YW49y8KWCjXK113JDIxQOtTCV8SaLnV9+qL+3ckbih6gz76Vw\n-----END CERTIFICATE-----\n-----BEGIN X509 CRL-----\nMIIB+DCB4QIBATANBgkqhkiG9w0BAQsFADB2MQswCQYDVQQGEwJVUzETMBEGA1UE\nCAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwE\nTHlmdDEZMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBD\nQRcNMjAwODIwMTY1NzUzWhcNMzAwODE4MTY1NzUzWjAnMCUCFG+5aeRHWk8uigny\no72012gx2DLbFw0yMDA4MjAxNjU3NTNaoA4wDDAKBgNVHRQEAwIBADANBgkqhkiG\n9w0BAQsFAAOCAQEAJA/YKjDv8Is9aysvQYlwEiQyTKR2j42zxHr71cS/YCZgEL9h\nJwnrK9Oo+EIO8nSfZfa35G/WvFknzeGrqwCH+amAQk0rWy7rdY3mAcCXgS9Ee5C5\nFzTWKYRFmCp/pFtm0PuqrCVf6IMe8GJ3WpleOFI1DkSpZXEOR4VBh5VU29DFVoWz\n9AvKIcwkYtZ49bojh7HP1db+14ovjC1miakWkW1l6U6/pfTwH8ViP28L5yyyp70l\nI0gtw/OXjb0VZ/F2YVkB4f4Yhhs9+pLfpyHN3hEYl0oQt73WPbylPUrbKY1Gp4Qx\n5BQF3Ub+aOIskQ1/8QDBWommJNj//7NwVvBO2w==\n-----END X509 CRL-----\n-----BEGIN X509 CRL-----\nMIICNDCCARwCAQEwDQYJKoZIhvcNAQELBQAwgYMxCzAJBgNVBAYTAlVTMRMwEQYD\nVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQK\nDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMR0wGwYDVQQDDBRUZXN0\nIEludGVybWVkaWF0ZSBDQRcNMjAwODI3MTUxMDUxWhcNMzAwODI1MTUxMDUxWjBU\nMBoCCQCSJ/gDfP7k5RcNMjAwODI3MTUxMDUxWjAaAgkAoqs4rEF5siEXDTIwMDgy\nNzE1MDk0MlowGgIJAPgJXSbIs3whFw0yMDA4MjcxNTA4MTJaoA4wDDAKBgNVHRQE\nAwIBADANBgkqhkiG9w0BAQsFAAOCAQEAbEQ4RcSFSpOXK3B9TdgJSeV6+/52Dewn\nXtUpwh4YxHCnVk+3Zwfr7zneGU6E/d3UrCXk9oJI0TybLxhXVOw7kEbE3UlIFO5O\nGFT/2m1Wt7/O/m5LMjFIa4Y2mi2XcNEbrL+HBIW3Ylvs6gvlQnEHvhUvr6pxXxsZ\nQJKvHlTIlQbH4SUjqSl+QrBJqg6OerUx4FSc3+8o28rG3GeM7W9F8a2pZThYHb77\nBgdotTh65To2z4GnFbZHvolnxWyz/MWwrkiZFAnGOB1y+nlyrI4a4K/GltiG4vTH\nuVv80JOgUd+bB8TV948Wc4lEDiHjY085moWcAxKIDOYOg151ntvrfw==\n-----END X509 CRL-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_INTERMEDIATE_CA_CERT_256_HASH[] =\n    \"98214d5d7fdbdc78bb1a54e8710dbd9ee4fda96b2c06cd2dae5cf1fa28b38c67\";\nconstexpr char TEST_INTERMEDIATE_CA_CERT_1_HASH[] = \"32075bf7b8cb1976abdd0c3fe3bf5f540562d0cc\";\nconstexpr char TEST_INTERMEDIATE_CA_CERT_SPKI[] = \"ygQ/ZoU9iXZ7TYmNPOW/L9gcA/x3PViSBPMLncpTS7E=\";\nconstexpr char TEST_INTERMEDIATE_CA_CERT_SERIAL[] = \"f0e54441b84cbc52\";\nconstexpr char TEST_INTERMEDIATE_CA_CERT_NOT_BEFORE[] = \"Aug 27 15:10:51 2020 GMT\";\nconstexpr char TEST_INTERMEDIATE_CA_CERT_NOT_AFTER[] = \"Aug 27 15:10:51 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/intermediate_ca_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAoZrSlKLwwjzwt2cdPyhklUOlnqpr2ihg/qbdL7EjFEE5U4YS\nzRrV3GSjmIB9aAklEXyKBPVtFACU2ycr+VmkY46jy3GNaxNA8UMUtRJVWfvyWde/\nJiUiEAyqaEgCZIr6qjTZNyydwE1r+IJP1of1IR/RrvGOIBzJ+brdcdJyk8LSjju0\ngKoqeQwIm87GLjIozRJYjE2meher+LHFyKmwDxAVDgZTav0sFIHz7v5yi5Ip08NH\nQC8MD+3js/yk3FAcILO+ykS1fnAYwQ5VWECyHUaDAw3JiH0htQ5OQepHjkPaTL7H\np6Y8M90CCrOHjEOkxHhX6Gw9D8ZZ1ekot8qN0QIDAQABAoIBAHWMHhxLoQBIP3fI\nIzfrc6429GNC5wxaTqMsULr+B9wVzVL5tPdHy8Nx2KV24MEp1bKwedw9gMgJ3DzE\nc3H+SFq6SF4VDJ6T9KsI9ij3WuoHHzNGgR2H6QzviYwsxMDJbqcbpdxCMcTNC/pJ\nHzWW0zDy6hxsS8sD5c4mpCjEwN7EsOwAIYwHge6j1rNEM6/Dt9NrBJueV5vHsAPa\n1uO70AskT6dfX9yLJjShalxMUd4NFcbCjB7rYFQA19SQBOuIR99/173RELtJzemT\nwbTm8WaHa77b3GNjqCDPgZQGXfWVz5WMeD7NM1/fa4IKav8hBAZZ6LSLDlkWeroH\nWJAyn4ECgYEA124TMeTTtVPvT1D52Na6t/PKHl+zLpzw7jXW8K64YWMuBb5Wy41L\nlGOsUngfqBCap973mUvFKT+Qx9eAslx8GU/t+mv7c+TFrwwLC+7m6uRSenGdSsgq\nQpZML4UvzONmNWd7Wfl/HweMdI6TGkkv7gd6N32txJV/6nwpuNLr2fkCgYEAwAnS\ntOqaWpq41HL5lFBTy5bWOjP6HdjWYcKq6j8O2wtzPud3Ci5WuUyKqtpjXvHDMQm/\npTm/DTBFKIyLGtqUOe/eKa9ypbBsA83xflvTLTIYrbkxMdIvWDTEfduQkd4AqaK0\nRPIJKGFj1dB45ssVgFSKplaCm+YoukuMZ9+RiJkCgYBHzEXVcSAl5M1zL9e1LwuX\n10ZbjSWYuXGK0qLKIFEPFBD4lRE0UekIkZeS/LoAN1hz7lmy24/gSXElSWt5Bl3N\nydfUiDyLyYkg5/Cej4aibmoWqNr1W3TKMHE/vTHLJVahIdX4jLCTVN640fOw5Qd2\nW0OeTuKlC/ZP3gXBtXopcQKBgBSjOZyttabVB9X1ATNaKpcvf5hDq7Z3bhqVdA62\ndh8+LMVu1JEoh7NkMWbJD3LNon9rn8fe29RMArKn3+8j3FgZmhN6wmRiAEmsYS6G\nKbyqrJJG262R+/qLuS4ZYYD7jbCtRpg+NpO7W6JHxCZxXr8q1renhmPAqVHT+qPT\nF+3JAoGAZRR4IFaEAwxrL9mxBKaxWdUjQRt5RoZ4CiUITimSySUjGCqGE4PEfTeu\nekTQfRotCuQw4Nf1sOFtPpFW+nzfuSrwB4kDc8ovuzE4xLKFltYj7gAYRxFoajWG\nLLetweEQOP5flC7UShpdhND+oysqtZJsRFW+2HVrrpaYq0gnnUM=\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/long_validity_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEHjCCAwagAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMuIwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwIBcNMjAwODIwMTY1NzUzWhgPMjA3\nMDA4MDgxNjU3NTNaMHoxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh\nMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQL\nDBBMeWZ0IEVuZ2luZWVyaW5nMRQwEgYDVQQDDAtUZXN0IFNlcnZlcjCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOk963Ntn2TcZQeMlC6DQqUVZ0B2Jm/8\nKO9840C2u76p/Imcyp6JePLxs0OsY8aHSn9dkFrERXz+DMM4yqM/2AdjyHGvEkun\nr7eXnbC3QuOBDdFLl8CF6QRY87x5kqOi5AqLlJQwtFr9XiIB8J0oXYdjti08JMIe\nM9ETv7mCnPm53WBJsdhKqDcdGgYuGeo0KrG3KTELmcNDG2AQb09tP7rmnf/XKaLv\nmEny7Eeyg2bOmvbBVrnHcfVW/3rXcryPkIlWxhEw3P3kLKTtD5MmjHH4EF1EOHhq\nX/KGjBfFHT1F7A5IOJxWZjEq7fenZ5cXO+GZWjDtHL0cOiOPkbY9bEMCAwEAAaOB\nnTCBmjAMBgNVHRMBAf8EAjAAMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEF\nBQcDAgYIKwYBBQUHAwEwHgYDVR0RBBcwFYITc2VydmVyMS5leGFtcGxlLmNvbTAd\nBgNVHQ4EFgQUL9kDea8hH6DLCh4MmAiI9ROPn9wwHwYDVR0jBBgwFoAU08ELI+PD\nqPIyvl9UwT94OBNVA9kwDQYJKoZIhvcNAQELBQADggEBAH2Hd+qSCCcNUqZId1TN\n0P0OVQKUxUDs8yAC6wDOF4WyRsPTf5GucSYMmt0Ji0HfL85PMJy4f0Ks6ud6KMbz\n0xyMJdT2hY4ddqxWHc4FO1loqu9hwMR+6KPnF8o581H83QpGRcLFVGpAUKDE5jgj\ncBACXwuzLMuLKblJq3ZIwqIKowQXjQNN/i8B5WMEr7YFOIuAt+cj9FcPu8jxCyDW\nK5/MjVJGwbNf7X8llhPobp3Vu98UA7RdNtrXfLU8N6DEqJ0Awf6spuVG4RX4CDzb\nAZIlGN3OLJJqXYmeKGQQuFiy1ntEVbK6Q9qqxxD+vhzN+LsJOdlUUHITRPXVYcr7\nyJY=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/long_validity_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_LONG_VALIDITY_CERT_256_HASH[] =\n    \"0a8fef8beef315e5858fa6d9c9ce662a58d28d3bc85fa30cd124753cc1898fcc\";\nconstexpr char TEST_LONG_VALIDITY_CERT_1_HASH[] = \"ca96d23826c9a7e65adce050c77e55f0d8f49a3a\";\nconstexpr char TEST_LONG_VALIDITY_CERT_SPKI[] = \"o6Oi3fVVgo1X+KEHbkqpsG+ze/SwFzyJqSoBXzgOpJg=\";\nconstexpr char TEST_LONG_VALIDITY_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832e2\";\nconstexpr char TEST_LONG_VALIDITY_CERT_NOT_BEFORE[] = \"Aug 20 16:57:53 2020 GMT\";\nconstexpr char TEST_LONG_VALIDITY_CERT_NOT_AFTER[] = \"Aug  8 16:57:53 2070 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/long_validity_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA6T3rc22fZNxlB4yULoNCpRVnQHYmb/wo73zjQLa7vqn8iZzK\nnol48vGzQ6xjxodKf12QWsRFfP4MwzjKoz/YB2PIca8SS6evt5edsLdC44EN0UuX\nwIXpBFjzvHmSo6LkCouUlDC0Wv1eIgHwnShdh2O2LTwkwh4z0RO/uYKc+bndYEmx\n2EqoNx0aBi4Z6jQqsbcpMQuZw0MbYBBvT20/uuad/9cpou+YSfLsR7KDZs6a9sFW\nucdx9Vb/etdyvI+QiVbGETDc/eQspO0PkyaMcfgQXUQ4eGpf8oaMF8UdPUXsDkg4\nnFZmMSrt96dnlxc74ZlaMO0cvRw6I4+Rtj1sQwIDAQABAoIBAQDZ+HHXV3UETytj\nWK8KoMRhfpUVmtTlBqRC21WNHrhpwHYvLIX2jsanU9WRwsMjvz+Rs6C6En5WBx1q\nO+KzmXCLx2aFR3UzmqwOSPSnNzydW/fLxnc1Bm/zDiYJ1xuBLTMr37Gko7eI6QtV\ngLaaSCTP6BFSSMiZWJxYIbqk5TlJ7e4Fp1xSkFwK9p+5lrZMBudzayT1wburbHPH\nOh2K2+QsJHn5Big/lEl3vH3L/wXdEgVCYmOuOrKodt+oIJL/DHyLIfeSq85WdBeG\nadkOSjpoFIs7zTNh/S9Dy1qMJNWHgGQy9kCelHdNhb7aTFMCTWSAtpyH3H3J43Sd\nPN6jniZRAoGBAPZ4DmRkNAO3OSTV0ddtEESCFWEUd3oNyzWhR06AbDg0KENmB/QE\nmP6Y2u7MXzW6Qn6z9OJGhMOsFeaBQL1DLl3gcPxA+GNmKQursfpOIBRwJREDuiB5\nYaC63PlG48SWSm+s53xmedOULwqcJbHv0iAcBf56BTB7wriALCWUQAdHAoGBAPJC\n66r5XWFMbNhwXFO7W09EOlQVS1z49LhLdIyv1TygkGZmAxFhU6UekqmsZMJVahF1\nXj1shVzXPwpiDy0ewyrR5mMlFTN4XzBHMj2fGJKyzdel4PAhzureKz1l+GMXmLvO\n8EgoOxAvTMZ5ckFd781siA8e42se9ftRztQmniklAoGAIvHquHkslspHo91dHzor\ncEsxVM5eEkQaZ8QyJAM5VM6wVr4EqqhsOMzbXbhEV43NF8HUGrD86fFgOYph73Oz\nA0RrQJjFnvJBJ5J0bZTeH4e2+a6ZmTkcinl0EzPl312fuC2/tHejFkiQUuTIw23J\nURwoDWini7RAa4BgwgM/AMkCgYBbLPZ3FXygdtvsJXWKCdRWo830AUOozQWFKtjF\nbhOpyqAt+ayj6OZLGZOHChuty+msgmiiY2pZw6S1KqydhZtcTT0RCU7OXTot+E2f\nRXZBF8xCoepjVcYDPPQ3Gl5+JM5VuI9UCNgZwkN52xE2Kr1qqbMXRb+/fBqFqIdX\n7uTNhQKBgQDKvNwIqERaOe2cltnFSRI37wwA2udaoh0qspJXmyUWHgUmSalTTlMt\n7bEw8x408PZN2e/M8ogTl73g76HgiTyzo5NDotP+L2TEkX0XKMt/7dtCgqgJhVyA\nP4WuOI2T3M+6sqDsY4xuUvfE13DQOd4hI1A6dFItdXgLfPUlTymQmg==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_san_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Server\ncommonName_default = Test Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_san_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID+jCCAuKgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtowDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ3WhcNMjIw\nODIwMTY1NzQ3WjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCmTELbIJkET8uMy5zIjg5NWtzxf2PdehX/\nJEW61ugESH7xnWl0PV8/HUHV5xgUS8gGvtFBmSetaZSOqpaMsZyVCqR4SyX2TtGr\nVhepmuO0bSLUJQJ1oGNiAdwBn4ZArlbbL3rGIUmaXxz04Re8N19JQgdqBgbugoCS\n/iKnJpuGabCmuf9J4AYm5Yun3hkGDifpyqrdZ8C/YjkZ2+AYKH8DVDioXpziwN+W\nkfSecZXMMDW90t0VC7lDGzwf/Eey/mt7zGfecUEHUVRi6vtjBxZ6o+1mjCwLhMAr\nClWdMCJel4gpgyfpz7jIBPRP/XAGlWYkkY4/WiBm41TJa+ujUqndAgMBAAGjfDB6\nMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMC\nBggrBgEFBQcDATAdBgNVHQ4EFgQUvtgXVkSjpv0MyirSEN8RGT8vzaMwHwYDVR0j\nBBgwFoAU08ELI+PDqPIyvl9UwT94OBNVA9kwDQYJKoZIhvcNAQELBQADggEBACeA\nJ/KXgot/kPl2diZrmykg8bMtz7KPVGmU/e2OprlKbPTgiuLKzWLxC3JZf4u0LyK/\navdPMtDplitEpmTLYQPTqIHNR7ravC8wa2BDbOSbBsWLAVdIppLibvp4USN5ZbYT\n1suA3s+cP3IC7fz5WgD7eihkg0in20np1yWElmD0TuvA9p9IHojePp4sbpU3/eFQ\nACyj5575cdQSIKFJuHkc8zAoLpH7y0Zvfmy0w2jziWv/7fZYznTVvbEBI6CPx7+w\nYK4ZjmljQgXdABhQZN06J4NTPfkUByJSXOkdhf4N0nJ8/SaKoKU09otqB7N4oqt9\nYi7YZVI5N+UYUFX1J5E=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_NO_SAN_CERT_256_HASH[] =\n    \"acead6f9ce47637c5aad9843cd465de6ec5de94f887fa0dc87b61df6c3ccd5e0\";\nconstexpr char TEST_NO_SAN_CERT_1_HASH[] = \"e032f61b4823257c75ee75ad3b8c921746c88d28\";\nconstexpr char TEST_NO_SAN_CERT_SPKI[] = \"NOz4phTttuT4U3FoTQ1SUif1dLeN6Ifk0lxDxRF7oSI=\";\nconstexpr char TEST_NO_SAN_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832da\";\nconstexpr char TEST_NO_SAN_CERT_NOT_BEFORE[] = \"Aug 20 16:57:47 2020 GMT\";\nconstexpr char TEST_NO_SAN_CERT_NOT_AFTER[] = \"Aug 20 16:57:47 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_san_chain.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIID+jCCAuKgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtowDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ3WhcNMjIw\nODIwMTY1NzQ3WjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCmTELbIJkET8uMy5zIjg5NWtzxf2PdehX/\nJEW61ugESH7xnWl0PV8/HUHV5xgUS8gGvtFBmSetaZSOqpaMsZyVCqR4SyX2TtGr\nVhepmuO0bSLUJQJ1oGNiAdwBn4ZArlbbL3rGIUmaXxz04Re8N19JQgdqBgbugoCS\n/iKnJpuGabCmuf9J4AYm5Yun3hkGDifpyqrdZ8C/YjkZ2+AYKH8DVDioXpziwN+W\nkfSecZXMMDW90t0VC7lDGzwf/Eey/mt7zGfecUEHUVRi6vtjBxZ6o+1mjCwLhMAr\nClWdMCJel4gpgyfpz7jIBPRP/XAGlWYkkY4/WiBm41TJa+ujUqndAgMBAAGjfDB6\nMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMC\nBggrBgEFBQcDATAdBgNVHQ4EFgQUvtgXVkSjpv0MyirSEN8RGT8vzaMwHwYDVR0j\nBBgwFoAU08ELI+PDqPIyvl9UwT94OBNVA9kwDQYJKoZIhvcNAQELBQADggEBACeA\nJ/KXgot/kPl2diZrmykg8bMtz7KPVGmU/e2OprlKbPTgiuLKzWLxC3JZf4u0LyK/\navdPMtDplitEpmTLYQPTqIHNR7ravC8wa2BDbOSbBsWLAVdIppLibvp4USN5ZbYT\n1suA3s+cP3IC7fz5WgD7eihkg0in20np1yWElmD0TuvA9p9IHojePp4sbpU3/eFQ\nACyj5575cdQSIKFJuHkc8zAoLpH7y0Zvfmy0w2jziWv/7fZYznTVvbEBI6CPx7+w\nYK4ZjmljQgXdABhQZN06J4NTPfkUByJSXOkdhf4N0nJ8/SaKoKU09otqB7N4oqt9\nYi7YZVI5N+UYUFX1J5E=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIID4zCCAsugAwIBAgIJAPDlREG4TLxSMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDgyNzE1MTA1MVoXDTIyMDgyNzE1MTA1MVow\ngYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T\nYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2lu\nZWVyaW5nMR0wGwYDVQQDDBRUZXN0IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKGa0pSi8MI88LdnHT8oZJVDpZ6qa9ooYP6m\n3S+xIxRBOVOGEs0a1dxko5iAfWgJJRF8igT1bRQAlNsnK/lZpGOOo8txjWsTQPFD\nFLUSVVn78lnXvyYlIhAMqmhIAmSK+qo02TcsncBNa/iCT9aH9SEf0a7xjiAcyfm6\n3XHScpPC0o47tICqKnkMCJvOxi4yKM0SWIxNpnoXq/ixxcipsA8QFQ4GU2r9LBSB\n8+7+couSKdPDR0AvDA/t47P8pNxQHCCzvspEtX5wGMEOVVhAsh1GgwMNyYh9IbUO\nTkHqR45D2ky+x6emPDPdAgqzh4xDpMR4V+hsPQ/GWdXpKLfKjdECAwEAAaNmMGQw\nEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKbQ\ndxTWui6GjBedEeOPwmCUdTCwMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgT\nVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBFEUnhtsWzHM/xoy/vlntkdau/TYrpLQkE\nKXIeDbjtjLnMBKXu5z3epKWrSV12kEQWrkARNTuhW4+5XkZUA9gb2MPMXh2yse4u\nOy3rRhhzbXMas45IFbXqnI+xuS/99vJQrhjXGB49dbUV9P9dJ4hj2Uc27UmAx/zB\n6MHKDXhpRw/R6MXPtpQpZjDTEA4QU2yuwHWOZ56QHJ/aj0QZJvSbdK6DDY7VSKEC\n2yCX1d+S0lF5Hcgrhhi4Me2sKA7JQwKLDDlXMOKpcI0vMZAoKXPbA+tVBQUZICZU\n/t9YW49y8KWCjXK113JDIxQOtTCV8SaLnV9+qL+3ckbih6gz76Vw\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_san_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApkxC2yCZBE/LjMucyI4OTVrc8X9j3XoV/yRFutboBEh+8Z1p\ndD1fPx1B1ecYFEvIBr7RQZknrWmUjqqWjLGclQqkeEsl9k7Rq1YXqZrjtG0i1CUC\ndaBjYgHcAZ+GQK5W2y96xiFJml8c9OEXvDdfSUIHagYG7oKAkv4ipyabhmmwprn/\nSeAGJuWLp94ZBg4n6cqq3WfAv2I5GdvgGCh/A1Q4qF6c4sDflpH0nnGVzDA1vdLd\nFQu5Qxs8H/xHsv5re8xn3nFBB1FUYur7YwcWeqPtZowsC4TAKwpVnTAiXpeIKYMn\n6c+4yAT0T/1wBpVmJJGOP1ogZuNUyWvro1Kp3QIDAQABAoIBAQCGTOplS1MoT0UE\n5N8ufMMCJ9JtWkMNd25SSjniJXLlGqMif8zNak8Eip1DoUkqAvV8tRgL0SNhAfZO\nrWe+IiZhm2GuIIp8+7SKmZGskIE6kjThfS4aTrhUS0ohKXLjw6gzhYmyfumROSUK\ngsqHX3SdliykrE9myZAKpiDYOu65Qk3hUKZLoPYlF1d1f2lmJ239PFRCpmYnFdV4\nL7zMkf9Wtz3zPS8AKYRrF8Y/BgqnV0HkFFANhbd+4T8LS4nvb8/C6ix6MYUbEPPh\nREgUrYcajqAM6r6TLYecrfOMcF2CEhPmJTWL8AIXqcaNG1fO1BrC8hOnZUwF+d1/\n9Kel2kNhAoGBANsTdfXXmdQg+XfEXzDMozCDEcmlIE7iqZTOgiRNth0RF/DnWaIe\nOmmK5tAaAf3shPALmZ6EgV3NljZCEOynQWhsoMcjPnpgBxjgmyV86fK+cBq/zjYC\njFTqhHuy6LMPYcje1nrc/murGHcycEXwZ/k9lSe67jIdPZe3b5RGMCj1AoGBAMJT\nkRSEPydvXPjSidHOve6l7vaJM4SVUxbjj3TfFPCexdZMaXlBJibajKPLkfD4sz4E\nusjHRn8ERpMf3fOdYIt/m7Dicc7yIpcC8naae1zMx5Xw/5SLOezpmNqnPtv8i5s3\nppGKzOPOgSrrJuY3EYYKBl3GWR5HEKjglNRpzYxJAoGAbfiM5EXAIG4VDHv01MhE\nuttwmGpdl3vMaLhpN9QSsPCdtf5MTkciR0zEpH4Jte9+pAkOqjYVwoKq/r2XRVSI\nTuUIqC4W9NMzfNdyyhS0U3gpSiQyXYZV72Jx8q5ShWAZU/4YaEB7Lo0KkPsuxMzz\nRHgXYYRCgcGP45LfXe0Fy+kCgYAzTw1HdYOXTfgmWrC5DPVpRbbKsX9Ayoydxofi\nO+Szg7iI1LQkp+cC60GnHeDy1ze8cFNBIByk/OQFhKx93lfXfWIGWkPieFxZsxWB\n61gXMJqiJHfXKMuqmiMZ2H7oZs4VtaPtWdCvdZ71ThF/R5sA4TS9Hdn5/JmRfXrO\newEiwQKBgQDKSn1mhqwU1VxFFMI9bsBjCyyx/FO/SmJm9SV4cKBOHFP9aBKEUHOy\nF4pSupFg7XDDbpJDigpSFJrKHhFL0W/lX/dAbH6FwB4nI2JzirhP3ir9bmuKvPOm\nyf1553WkxtO1Cs49WhnoyLeQEZKoDG97RrgfH7Ji1WpASyLt/LEDtg==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_subject_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_subject_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIDgDCCAmigAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMuUwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzU0WhcNMjIw\nODIwMTY1NzU0WjAAMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4tc4\n6+KRo34aNE9FffGgnwfRqiyNnUddwKLfrdmXFMA7pcGOP/83bjDxUlh+fEWdbYNt\nv5lL7zGfeb/lvH/he2GjbdhjqBISvW2u7sKEipp1RWLeoulNMqwu8311QywFWQZi\nQwSCjOStnzXrrWc1Wqm7Mbu1DJSQlHbQ21TOxUy49Zif5IgH2Sprt3/djVwUMio6\nYRJwn6hG6dx9Ni5fa4tUdvHRdrS2/LJpoQDTbagd1vXuafObG8Dxsed1JtCmFGGU\n8iwnHQl9D4tQgBikiZFvw5MfaWIJi/cZPzsCLf6OdfaKcyHPfosf2uk5y8Mq+gwr\nsaVbHfq2Agq6dm7z3wIDAQABo3wwejAMBgNVHRMBAf8EAjAAMAsGA1UdDwQEAwIF\n4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwHQYDVR0OBBYEFDzATZmJ\neTIG0y4UXTqJNF0Ej9mmMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZ\nMA0GCSqGSIb3DQEBCwUAA4IBAQA0/7vgXo+vpskjOHo/MFd70egSI40/G7S6w6VR\nVH7mrr+jMi61AddLMQrbVrM4K4j0IMuXhGjJQl2cECKOqdO3Scd1FohrHo6VKUx+\nGefef1zTpsmFDjmGKRfo3r2R3SHXR+QN+JVR2+KfjdAz2oGwMm1dNSUyYcZiQz76\nZ5JCfTGWziUfRyempSe1/957bSK9PmFpQvs3BuaqtetDPD+A/NU00zttncoLHmjN\nh4trPHz9MStNWAees3aEwYmarog1k4RdBGH7kDyNdY8nfKksRwYREj/tdUnOKyF3\nejX+0lg3aNl9Pc2v4Lw7Wz6TaP/yq7Efztw+EQVpQTrhDkxg\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_subject_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_NO_SUBJECT_CERT_256_HASH[] =\n    \"e09376964b796cbe17b7e7f57658ebb8c693cefc6dcde433cf2b734c984963ce\";\nconstexpr char TEST_NO_SUBJECT_CERT_1_HASH[] = \"cb9ad234e1607095e421fcb9e39a955ed3a1b5b6\";\nconstexpr char TEST_NO_SUBJECT_CERT_SPKI[] = \"YISd5qWqfaFjWOEi73JkosxN2gS2WgKvLrw6GLe164A=\";\nconstexpr char TEST_NO_SUBJECT_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832e5\";\nconstexpr char TEST_NO_SUBJECT_CERT_NOT_BEFORE[] = \"Aug 20 16:57:54 2020 GMT\";\nconstexpr char TEST_NO_SUBJECT_CERT_NOT_AFTER[] = \"Aug 20 16:57:54 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/no_subject_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEA4tc46+KRo34aNE9FffGgnwfRqiyNnUddwKLfrdmXFMA7pcGO\nP/83bjDxUlh+fEWdbYNtv5lL7zGfeb/lvH/he2GjbdhjqBISvW2u7sKEipp1RWLe\noulNMqwu8311QywFWQZiQwSCjOStnzXrrWc1Wqm7Mbu1DJSQlHbQ21TOxUy49Zif\n5IgH2Sprt3/djVwUMio6YRJwn6hG6dx9Ni5fa4tUdvHRdrS2/LJpoQDTbagd1vXu\nafObG8Dxsed1JtCmFGGU8iwnHQl9D4tQgBikiZFvw5MfaWIJi/cZPzsCLf6OdfaK\ncyHPfosf2uk5y8Mq+gwrsaVbHfq2Agq6dm7z3wIDAQABAoIBAD4y6Wa0n16HC0Rg\noBtij7pLeJAC22nO0cQqZM7UpPqJo1FV+11M67QJM4JyxWvp4rrOmN86d3nEz4eg\nuKuxHPCp2xgZh2nkWsMaOic5qwIeTB2UeJuPAR3t3InjUq3yehzpBTR/HhTn9zt6\nm37Wl+ieNbfzUjd63RH4alhZrsCmPr/udY4wmfXPaPwXOiP5mpkN8J+MpvZZXqdL\nBnDGWbDt4zKWvjTC+s+pGiYgc5aP+Rt5h1qZrW0LekYXauKKocIrgXWHvdYEc2hc\nVm8i8ut6P1jufQSlwLWq8P0kKAmqLIrgZtq0IvrUnUmYJR+0PatD1FKsax+0AdGD\nExFbK4ECgYEA83xpqsKGccZPPOU0FSnopUMi6Xd+QfXzKEoZ2t0QUdctDjcuRuKY\nw0RyRh6LYgNSJqoUtZoTbg+LIuCUAIaReVnhEFF+34qvKo90/ct/TJ3SmPlqLG4p\nzXBy5l2g2xbW6NvsTurw3RFUViDjHUns/oVz7YqadFeC3XCIa+VCwiECgYEA7n/O\nCQoZZ0FrxKOfi7ZH1R/4VXp4exeufsZfFlAo35NKpEQCjdy33a+0Rkfe7JmcM+2P\nx7nHnZSd3sA07ULqAhCO4h+XYNjzpUIBxOFPxsbjhl5gmDflgN3ksQrXl86chJCJ\n6KIa0DGr8fRMRtksZi/oZX4hwT/pw3Z1G1xm9f8CgYBidh8s5HkFrVIBqBZFsKrZ\nmaqWirHN7q2jz0NhVB/zWHZp1zqpg5yO6jDj1ho7SmiAJis1vNa6nSKAuhxfkgtl\n7UisFqidsTFgnp6Kuy4BR5kQL1wWF35kC2MLcTVJI5VImRaYWz2HyCg4pBfzkh96\nVXEpz/Dhtzm+XnFsYEt8wQKBgBYHHdt94EzGIRL/XcwEoOvRURlr/1a3IDm7Tqr7\n6lqD+x/i3C2IFj9WbBsVpkRKB8BQD7iNsx8dy9r3+6tyQ7S/HMNL0apzP0WjLBCo\nErs0QW12qAtHFl5poCat7q2pupEP3HztA/b4r33gZ28fvdcOU7OUC8ASjB+ugDBv\nv163AoGBAMJ/l9mm26jYNsjRyvAL8U2T9W1TUXHp37mR+6k1m8x3GCy7CN2tDN9E\n7f+5OHEBmsvGZjYn9lI1CDL0z0HXNuIyrk8xrHukGlxWjay4WWF4xoChlx/Y0k8v\nUTHT7d5JziVbLDE3kOa/0t+Tx8YIBYIsVTMTix3PiJRZVWc59NH7\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/not_a_crl.crl",
    "content": "-----BEGIN X509 CRL-----\nTOTALLY_NOT_A_CRL_HERE\n-----END X509 CRL-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/password_protected_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEJDCCAwygAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMuEwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ5WhcNMjIw\nODIwMTY1NzQ5WjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDxJ08cathEgr87cKUavqWQfiIo9oQlECKW\ndkm81N1bwl48c/va+mjwnxS9nidiY9o3cBOd/KUMscgwu7S5rlq36LiAZVEMaMaL\nBnBBMVh1SM0jBkisFkn69dIeMVcebrS08ElWBDP4rI1dvRx8Z4vVfRWqV6/XqihJ\n2363GiQl61dYlIFADyKREa4r4/YudP7kyULCNLU4u+hSGMyEmu87TG3RyBC49QqP\ngK7z9zzOxOAY5fyIVr1k8Ptk3uw5fg/9dEyDCnnK8Apa7FmbrEGQ14mmx3iqPO1q\nNOMW+9HnMoGx5IfpP2ejRqwoSg/5TAicZagfY3xB9bhML3zERuAJAgMBAAGjgaUw\ngaIwDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUH\nAwIGCCsGAQUFBwMBMCYGA1UdEQQfMB2GG3NwaWZmZTovL2x5ZnQuY29tL3Rlc3Qt\ndGVhbTAdBgNVHQ4EFgQUDg8/hNzFFyWjnynrSveWEtz7f50wHwYDVR0jBBgwFoAU\n08ELI+PDqPIyvl9UwT94OBNVA9kwDQYJKoZIhvcNAQELBQADggEBADg/EN89IG3g\n4GmIqvfyW4u0R+s35nseLIzGZekX6FIW2Y3f269HL8Ax4KqMWFve1R7eFRvJVS5h\ntfq0k+Tf7p63NMez6bbuVvNyD+A0xWrjNGDDWy6YQwGuvdrTiN0vXkqX+GxDFKlP\n/e7ta0ctu3sJ436GeMvNldXMplRKEBe1AgqUXDt4/8PaYRJz5/AxO5w9c/2+PFBa\nefHLuwYxIkcdmvNt5PhrnNP8WUThUgvY5L2LlNCO9o5tPJqk021dcNLnU4uAeA5B\n4colPpyG0aDOHG3+aGOO99nKs2msLZ5iCu7A1ls6vlBZRB6MA3eXfKy5nHzfMLnP\nDHks2LKIt/4=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/password_protected_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_PASSWORD_PROTECTED_CERT_256_HASH[] =\n    \"782d98b2ab8d30eafa5afd1b859bf0102cc4f05bc9b3a00a9bc17020bf599d01\";\nconstexpr char TEST_PASSWORD_PROTECTED_CERT_1_HASH[] = \"51bbc4e271a268905bb88b8b2548b09d363bfc32\";\nconstexpr char TEST_PASSWORD_PROTECTED_CERT_SPKI[] = \"Z+lFjvLkwAiSsIrAE0WYZHwGeyJ9Vr7jNMcK7J3q0tE=\";\nconstexpr char TEST_PASSWORD_PROTECTED_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832e1\";\nconstexpr char TEST_PASSWORD_PROTECTED_CERT_NOT_BEFORE[] = \"Aug 20 16:57:49 2020 GMT\";\nconstexpr char TEST_PASSWORD_PROTECTED_CERT_NOT_AFTER[] = \"Aug 20 16:57:49 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/password_protected_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-128-CBC,043C862E0838958EB0440FDD45B7797B\n\nxda9UdWs4HVHSS96fADHKVsmnBkgW9pE5sBUm28IcMCklrmca0oWm39SmSDYM41z\nmUCF+1IDAGBJoLNGjZ/BLF3TqL2O/Tu2vwsHjnAdHD8Gf2Xfu266ueJHLrSv4AxD\nzi9C6CAxHipLulcrWXT+tFREfK0E4yQ73eCqZBh/7gngC3Pj2HQQkJqexIO7DdVv\nHgjI3DbFpTMVLyjAlXSkry/adMwF4pAmq1/RMGA3hisrgHIpMhGFnuT+vZkz85JG\ntNuFxKtc3PhR9dHgesOiMR5jyFJ3xD5jaLZ8AaDoGTerOeKMAjneBale7rBfTzhY\nB+P8lpjxbCldFHPskqCnxupDkDitznmo4oTFo8vqGFPsX0SsM/UUkPo/M4SX+d0o\n5Qr2D+CElhxAuHGLSAo297LDikXosKyoNFTuOZJh+gd5uUyKglqPavvgM4wAQZDT\nIGs+yKlkCXil7OV2hjuJam5haCuXSEZEwFZt5TSXzbQsd865gKRc44RJpZtkguI/\nzuSchul3e5Sc5oqS0RREO1+DamDFAxinM6cq4WSsEH01LwqVkzKhYT841BBlQlSK\nzzYL+brlRHT/42eyWfXdGTWHEV5DRkRR8K9BTpYLoHNBX9EBT8BEQmtlDBml7VJD\nA/eC7/KpdPxgZ8Wh/iZosM6GaTFKekxN9YpOnwWZXsy+cSnTH7tgARgPARvdc4F0\nw+YAQv9Tt7k+v6SsyTB4QThqXVLOjQZS0IFnFniNK7CqgxB+FpiC2iBl1FL6jsL4\nQEZG0bV4/c5whBlcCevSbk13cRVUuoO6yFw4JYTkXmpx85LodVMlfqJ7ep73kqIM\n6S1coaCtA/v9YawG4db6Jc+ii7G4CX0f17/UzTGQ7nE3w9ekSPBRU/ES1gDBtUtu\n4QfAnQ3NGDwOnZPRffcUlSLhBjMUnfkwk2HUHMDdWkvIGWjlIbVkb6zzEZouYoqL\naGRP40x0ufhwz3+YvQlA3bJqc9+Bi0w24kLtteweBR6Kq+f+3dPlI03wXM6Q92fa\nLkJAgRkz2flw+nh7CMnzR1zfLS8ShsfpYe9Amq+ZHv3EPHMEJ5jd9OBvmjDfeCD0\ndLufp95ZOovcz8Ye1JrGnAX8tRLS7IvQ/APXVTrkxUsmNpz4TBSAPFo3EN2FLYF/\nIoMW0w087ErFCSdgKiTWtfOcTMP5cLoDYWt8vELUPtUbe8mi7S1pEJGFSJ9CUeKg\nh8D4iefVsiYUtS/uBlCCHUTlGefhWVECoSpUXGVWe67ieLvLQifnvLijkVMjQchZ\nE0AZXj3ECIgAnBOHBsThyrC0qBqjMFAopF6ELBkfMkug/xhTqaPi3c15FQXj9ufO\ntTT6nCHTE5TgZrMYs4ekOvE/Mk0CHGRNIDGlVOszXMmxfso2H6bBEz148POqokKe\nkLrFIEigrX2KtBbDq2euJJy2b8SqClbeHUrXhjpidKDqCzsj5nOUNUZxPdHPJnog\nAdz6udYHQIz78zyXE7MqLGttaroMs7hXz/dz1awfDeisUZyZJ/IV9CszcHHDSJga\ng9FeAGNcybPjQlXKuvNuFnIChcB6q567TrNDc5Rk5vWk7cl8y8fGL01IIXoAtdwm\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/password_protected_password.txt",
    "content": "p4ssw0rd"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns2_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEHDCCAwSgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtwwDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ4WhcNMjIw\nODIwMTY1NzQ4WjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDjauzUWGVwbu+eUcVo2L9WUU3kY9ExepQb\nSfWOv2GTpwR49m5sRdISlUY1SVgd2ML9rgMUKoy188qFuo1di8/2HnuSQ9tcverb\ntgSwIWRu6Ki5iH3StLppi7Uhp+XXmKg+Z7cwe1VArxPkXfmu7V55lMsVULr2OkmG\nN08T3KStmZdk9iHFk9vXpr9xCqIxUxWa9vq3mMKgdbbSHn03vo/doXdgDSUbvaJ5\nB4B3DkmETVYkoaSgu03d5GC8gT61oZ7UPaBfATo3Zht0MQm9EfD+VcUJ/PQYb9PU\nI0dRiuAo64ppwkxx6IcsvFht8LR5CnAYK8D72snOZQW6tlWW25n1AgMBAAGjgZ0w\ngZowDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUH\nAwIGCCsGAQUFBwMBMB4GA1UdEQQXMBWCE3NlcnZlcjEuZXhhbXBsZS5jb20wHQYD\nVR0OBBYEFDt17X7vt95vcW3hmBqD35IEz50eMB8GA1UdIwQYMBaAFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQCk1kHb5UucEN9rM0vV9wyE\n3PAJiCpSUTYQCWTvdYNE4Hr0BEO8SkGGEwnEM32P6b9lyev11rQJN5Av5eWivNlS\n+ejwy1X276GZrvg3aKj8ypITkAlyoX+97JUsM+x6F77d1wdGZWiyr9VnnPC9wOW/\nNbER60Mt9ZjR9RZjIk2ME7S7sYx95dM3CzHMpB/9/YRXKo0dw4sm6i517/HVEs7j\npUpMOyWTbrXWm/QCXDG0mwWOJzcmu+XBiWCmTAd3RvcJvdrk90JS3cHWH5C2lmgx\nyToMDd/21nKmROmgmBFhwmgvAfx2uGQevx2s4Va1txeMRQS/Lc6VnolATI3hH9aX\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns2_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_DNS2_CERT_256_HASH[] =\n    \"8d4a2b9321f29cfc8bfbe28e8ec1069c9f87b900fcbf37e6beb1c5f24b8d3d68\";\nconstexpr char TEST_SAN_DNS2_CERT_1_HASH[] = \"7badb47d6d1c1b8cb7eebde80328e2ae34563922\";\nconstexpr char TEST_SAN_DNS2_CERT_SPKI[] = \"RMX61rJ5+5ZBkIkGU0NgDRCXuIKMKkMNrrL81ed0I4Y=\";\nconstexpr char TEST_SAN_DNS2_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832dc\";\nconstexpr char TEST_SAN_DNS2_CERT_NOT_BEFORE[] = \"Aug 20 16:57:48 2020 GMT\";\nconstexpr char TEST_SAN_DNS2_CERT_NOT_AFTER[] = \"Aug 20 16:57:48 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns2_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA42rs1FhlcG7vnlHFaNi/VlFN5GPRMXqUG0n1jr9hk6cEePZu\nbEXSEpVGNUlYHdjC/a4DFCqMtfPKhbqNXYvP9h57kkPbXL3q27YEsCFkbuiouYh9\n0rS6aYu1Iafl15ioPme3MHtVQK8T5F35ru1eeZTLFVC69jpJhjdPE9ykrZmXZPYh\nxZPb16a/cQqiMVMVmvb6t5jCoHW20h59N76P3aF3YA0lG72ieQeAdw5JhE1WJKGk\noLtN3eRgvIE+taGe1D2gXwE6N2YbdDEJvRHw/lXFCfz0GG/T1CNHUYrgKOuKacJM\nceiHLLxYbfC0eQpwGCvA+9rJzmUFurZVltuZ9QIDAQABAoIBAQCvdPiBoC2rQDvp\nNweKq+d7bkEdW5GzjfIk17v3xVpAM9S0itSI/plkMv028X5lkK5oUFAZx/ZGD3CK\nGnMk4FCzf7MxFt9wvyorF1SomyrEkwZSFqeU4FEOS3aVH9epriLn+tcQNaeBv1Im\nmAaiWorQXOcOa/nDLRc60QNWvJ3lPK1Divdul+iaGH8ahKlFUgH0U5+f0fo3aLqs\nPZqCq1Bh/8pc3TJ6+Re5RzqXGwauxjgWMAV9MmDBg8SbEAOUYjmzBld+Q+0ThSTS\nNNEBbvevbGBdJfh6gCx5mWlRBbuTZBuq1v6aWIbiVLRPPS6vh7hsdpvDWx25OAM6\nVjEOf8klAoGBAPrNigbZGUAGgd9DklCddRG3vex2WhJoeVH0tne6r2x5RZ7+4i6h\nGR/jdONXAIT1PnthO85UnOnxH0ccsAruCbYNKS8qa8wxSgwzziqb4tgolef0Vi9o\n1PoXnN6IyxuHLMZYkcxclHSYPiiVDskZJfh2sBbSP4XJzwC+EAv8x8mTAoGBAOgh\nVPj1V67jpMqIg430MHHAtzXhcepeyKzV3wc+dhGO/5c0lyWlINqGy0jUbL2OxmYW\n86h9yY4a7n6XX8H04LWpEkCblUQa6EIl4h+tKmnDz5xPotyPhcDP/ijbTgE1ulXh\nrMywkGmB266RrynQFhW4xIE6ehziZaAKC0rMXSNXAoGAVhzYdX15I/iYAvaKeE93\n8Ltr5JLGXmessC0bsYRR0ybYdr0BIA2EOOS6L4yhywUBjQ9Xn4KB1flKSfpZn4cX\nooBmUaF5HcUWrZXkBdE8VAerYNcN+H1AExakb+v8ANmqz/d9J/PprRbQ0NJNH1MZ\nLHRkBZHu6ZrfDlwz2knSty0CgYAv08ZNMUeqqg6nCC+KzPg6R8m32UZ+Up8oNTyp\nom3zOjJhNh26TOaS/9jcnZ/2AnfQSwRS/+/KTrQ68vdw4AJ4i2hOVYAQszaNgn6v\nph8qAYVCIIpexVd6naAxTNYqm6ZT0Zan6e0nP/FCa5PampwiR5p9Qc4tK1xi+A7R\nj3/VKwKBgQDHlSxggUrhPxGyAO/MMCmGrzxtu/Z/74eeXiJ8miwuHYnaeXIujomH\nA9mCFgQLhkuONYdOf/uND6McgIbhy/No/hfD6Nh6qTet5Y/poZIlh3BMDuUhHIRJ\nhkxUN4+oe4tfeQVtCgkuU5UTYbVefn2YSeeqWTneu4h3ep3Mk3oAZQ==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEHzCCAwegAwIBAgIJAJIn+AN8/uTlMA0GCSqGSIb3DQEBCwUAMIGDMQswCQYD\nVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j\naXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEd\nMBsGA1UEAwwUVGVzdCBJbnRlcm1lZGlhdGUgQ0EwHhcNMjAwODI3MTUxMDUxWhcN\nMjIwODI3MTUxMDUxWjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5p\nYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UE\nCwwQTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0G\nCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD38zde1ZyufkkXHWmvuXc2MCjs7lDc\n04BJP0JQV6FiKcYW+nLCfyGHNS1Qir5RPvbbfmKk0TC4Hs4Z30SbFx9wjlRVAgkR\nyJST8xAuGjWm7wjbyEc0tBi23pJFLKcXXmN+PgNDpCWRCOiJxLCBqTYzD6nd337x\nQdCkl1HBPWvrgWMQIkzbn2ZwblNfjSjKfzTlhQOinmXGu1sEakdooXMgP7LNbrVc\nUt5xxe6qB870CjQ/peplKAN8FFeTrDAn82BjWcJ1lvU7UY1ehOwhxmDRULP4Kvsl\nuV3rFwQFqUSQ5Rto2ZL68zWExh455UGtlMLr6DpkgvOtSRKre5opBTjFAgMBAAGj\ngZ0wgZowDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwIGCCsGAQUFBwMBMB4GA1UdEQQXMBWCE3NlcnZlcjEuZXhhbXBsZS5jb20w\nHQYDVR0OBBYEFKMjeX0IaNSsDyaPKBjJfaW5h7r0MB8GA1UdIwQYMBaAFKbQdxTW\nui6GjBedEeOPwmCUdTCwMA0GCSqGSIb3DQEBCwUAA4IBAQAn1R2bOj8fIR5HHIqv\nSEjXTesdOj0g3QT5iNkbDGUw5LfWnMldkdNAIfDV+r0ikEwG1xGUFko8xZGeMoI2\nhut8kLilnnJtg8oO8yXuWqBngdzrl/kcNDXleJEkjq0av5VilKc3ZkJ8U/FHKBZ0\nN5c+edjy+ab8vMC/XSWZVKQUdbmr9Ag+bZ0s1b70/OZxbfRi2GjLtP+QzJaRNOnI\nbofVfr5BHQdNk3iTMCBJgtrBfx1vlNyi2t7YC6fY+i6gg4zkP5dH7DHti16jVM31\nZ0Ad6xsksQGb8MReoPzgnXa2zGRnqIWWuKbbeJ7j0mWjX84/abjU4Hgqh85NuErK\nIOr9\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns3_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_DNS3_CERT_256_HASH[] =\n    \"f121778059c3de638fdded5ebb1f98f280dd1204915401e1edf5d263f42aa65d\";\nconstexpr char TEST_SAN_DNS3_CERT_1_HASH[] = \"48574e7d11f906d0600406765a9cb5e9825fbd1b\";\nconstexpr char TEST_SAN_DNS3_CERT_SPKI[] = \"IqBAwWghNQiaw+Qsm/vFmQOSpNJSYpLcGNwU0xhFaoA=\";\nconstexpr char TEST_SAN_DNS3_CERT_SERIAL[] = \"9227f8037cfee4e5\";\nconstexpr char TEST_SAN_DNS3_CERT_NOT_BEFORE[] = \"Aug 27 15:10:51 2020 GMT\";\nconstexpr char TEST_SAN_DNS3_CERT_NOT_AFTER[] = \"Aug 27 15:10:51 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEHzCCAwegAwIBAgIJAJIn+AN8/uTlMA0GCSqGSIb3DQEBCwUAMIGDMQswCQYD\nVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j\naXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEd\nMBsGA1UEAwwUVGVzdCBJbnRlcm1lZGlhdGUgQ0EwHhcNMjAwODI3MTUxMDUxWhcN\nMjIwODI3MTUxMDUxWjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5p\nYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UE\nCwwQTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0G\nCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD38zde1ZyufkkXHWmvuXc2MCjs7lDc\n04BJP0JQV6FiKcYW+nLCfyGHNS1Qir5RPvbbfmKk0TC4Hs4Z30SbFx9wjlRVAgkR\nyJST8xAuGjWm7wjbyEc0tBi23pJFLKcXXmN+PgNDpCWRCOiJxLCBqTYzD6nd337x\nQdCkl1HBPWvrgWMQIkzbn2ZwblNfjSjKfzTlhQOinmXGu1sEakdooXMgP7LNbrVc\nUt5xxe6qB870CjQ/peplKAN8FFeTrDAn82BjWcJ1lvU7UY1ehOwhxmDRULP4Kvsl\nuV3rFwQFqUSQ5Rto2ZL68zWExh455UGtlMLr6DpkgvOtSRKre5opBTjFAgMBAAGj\ngZ0wgZowDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwIGCCsGAQUFBwMBMB4GA1UdEQQXMBWCE3NlcnZlcjEuZXhhbXBsZS5jb20w\nHQYDVR0OBBYEFKMjeX0IaNSsDyaPKBjJfaW5h7r0MB8GA1UdIwQYMBaAFKbQdxTW\nui6GjBedEeOPwmCUdTCwMA0GCSqGSIb3DQEBCwUAA4IBAQAn1R2bOj8fIR5HHIqv\nSEjXTesdOj0g3QT5iNkbDGUw5LfWnMldkdNAIfDV+r0ikEwG1xGUFko8xZGeMoI2\nhut8kLilnnJtg8oO8yXuWqBngdzrl/kcNDXleJEkjq0av5VilKc3ZkJ8U/FHKBZ0\nN5c+edjy+ab8vMC/XSWZVKQUdbmr9Ag+bZ0s1b70/OZxbfRi2GjLtP+QzJaRNOnI\nbofVfr5BHQdNk3iTMCBJgtrBfx1vlNyi2t7YC6fY+i6gg4zkP5dH7DHti16jVM31\nZ0Ad6xsksQGb8MReoPzgnXa2zGRnqIWWuKbbeJ7j0mWjX84/abjU4Hgqh85NuErK\nIOr9\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIID4zCCAsugAwIBAgIJAPDlREG4TLxSMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDgyNzE1MTA1MVoXDTIyMDgyNzE1MTA1MVow\ngYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T\nYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2lu\nZWVyaW5nMR0wGwYDVQQDDBRUZXN0IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKGa0pSi8MI88LdnHT8oZJVDpZ6qa9ooYP6m\n3S+xIxRBOVOGEs0a1dxko5iAfWgJJRF8igT1bRQAlNsnK/lZpGOOo8txjWsTQPFD\nFLUSVVn78lnXvyYlIhAMqmhIAmSK+qo02TcsncBNa/iCT9aH9SEf0a7xjiAcyfm6\n3XHScpPC0o47tICqKnkMCJvOxi4yKM0SWIxNpnoXq/ixxcipsA8QFQ4GU2r9LBSB\n8+7+couSKdPDR0AvDA/t47P8pNxQHCCzvspEtX5wGMEOVVhAsh1GgwMNyYh9IbUO\nTkHqR45D2ky+x6emPDPdAgqzh4xDpMR4V+hsPQ/GWdXpKLfKjdECAwEAAaNmMGQw\nEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKbQ\ndxTWui6GjBedEeOPwmCUdTCwMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgT\nVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBFEUnhtsWzHM/xoy/vlntkdau/TYrpLQkE\nKXIeDbjtjLnMBKXu5z3epKWrSV12kEQWrkARNTuhW4+5XkZUA9gb2MPMXh2yse4u\nOy3rRhhzbXMas45IFbXqnI+xuS/99vJQrhjXGB49dbUV9P9dJ4hj2Uc27UmAx/zB\n6MHKDXhpRw/R6MXPtpQpZjDTEA4QU2yuwHWOZ56QHJ/aj0QZJvSbdK6DDY7VSKEC\n2yCX1d+S0lF5Hcgrhhi4Me2sKA7JQwKLDDlXMOKpcI0vMZAoKXPbA+tVBQUZICZU\n/t9YW49y8KWCjXK113JDIxQOtTCV8SaLnV9+qL+3ckbih6gz76Vw\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEA9/M3XtWcrn5JFx1pr7l3NjAo7O5Q3NOAST9CUFehYinGFvpy\nwn8hhzUtUIq+UT72235ipNEwuB7OGd9EmxcfcI5UVQIJEciUk/MQLho1pu8I28hH\nNLQYtt6SRSynF15jfj4DQ6QlkQjoicSwgak2Mw+p3d9+8UHQpJdRwT1r64FjECJM\n259mcG5TX40oyn805YUDop5lxrtbBGpHaKFzID+yzW61XFLeccXuqgfO9Ao0P6Xq\nZSgDfBRXk6wwJ/NgY1nCdZb1O1GNXoTsIcZg0VCz+Cr7Jbld6xcEBalEkOUbaNmS\n+vM1hMYeOeVBrZTC6+g6ZILzrUkSq3uaKQU4xQIDAQABAoIBAF9qlTBFe9oadv4S\nL42UDbAosFcdDrksGv4U/ev1DQG9maFWF7uJONnTOPD5s2uIk268IDHU3q68DAwg\nw0H7j/m8i+96rQrO6X5V1zAmnebzrbEGTYCWnbwhwqAkItj3VcpLCu6IyHwbtYDu\n1A3RlC3qJTBgxXUKIsR0Bt4aSOGDUzFm6EQAahon8Y4tnCAn77keKrkIqVAS7VuM\n9TBR8PCWIoxVcgSbwp7swUrx1xh+ij9EnDvpYUEThZE0LG43Pnub+v89Ic8uprmo\n1NC1sNNw98wP7PcAhtTed4t5jZ2gpxnUBNgl4abiCZkouCrUp8bo7oPyH1nLHZBk\n/Ei4QAECgYEA/9wgXV19QoLCXs3sMbB75vnJ2i6fA6Na9ClSpV3t8/CfFyX3rlnJ\nl64XB3VpRL06i7qtMfDPFwfuwrmC6wuGjMWjfHm6cJuXwXa41VzjSx7KGpMRGnUo\nCSn5k4qliVRRQ/gZl1HTLC8G0y2cO0OJ+cRcHcEskb+ezrUTDPP7VAECgYEA+BX7\nGOslCtm3CvXDdEMFYFf/0pywD/3RD0qQVlhsbed8W805u+ooFNYfiGa7WNuuntZu\nosRwqhV+eXqGMWT8RNP5CdGNVQjNBdekUFP8ciNiEINFjkbx6AGUMrRA089AhHbE\noA8sMvHoimV3QhBscwpjK31eEEb3S5nO8dgNlMUCgYApcwbHOVMGiFpms2N1NUsN\nQyAhOXZHR5p/fYZHBe17nFqZjGwQkbkn2mseqTWxjhpC8q4GLiXObFmQsC72Vvqo\nKYhvrOS4Q2yVSd9tqUulKsrA/VxC9gHTYjptXYOtuLwlj7bNndLhEIlBgt0ReMKM\ndPQbWeUXg3x9rjX03j20AQKBgQCDc/I3A/H+y9nnns3QPusMX9SYHppT/uPVvGa6\nuePKYIxOgHIFFk/iqorO1dCThuQCGhFJgdlI5la7u6Z7ac0aE0+0WisPUbiBTgAs\n6DOtrv35TfV/LCxmCOAj9csb8Id5KQ2K2j9LR082dgk5Ika0tl5R3v4GlfPXRUqY\nbyjTGQKBgHA5EA5bwjW+LoWDQV6+Cng88csm8W03PoogDQc0kDIxCVsvyInY6h0B\nOzDMNxJ/Ii9Ta4/e3RXss8ld8Sig6iWG2Fmb/dQCKqavz/mI3aqjiIfEdmufHXcx\n2RjFl6VX3M6QKXPttjHhCxIw8CNy6lyc5rgGCua+kkxqCyBwDMnF\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns4_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEHzCCAwegAwIBAgIJAJIn+AN8/uTmMA0GCSqGSIb3DQEBCwUAMIGDMQswCQYD\nVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j\naXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQTHlmdCBFbmdpbmVlcmluZzEd\nMBsGA1UEAwwUVGVzdCBJbnRlcm1lZGlhdGUgQ0EwHhcNMjAwODI3MTUxMDUxWhcN\nMjIwODI3MTUxMDUxWjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5p\nYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UE\nCwwQTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0G\nCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDbLPShq+sqjKzdpeFgW6A7jRv1LuD\noj3TERmUd/EEexkAWKz5thpIfR0Y1YAQk69b7ZblXxUcKc3OFdsQM0Zksi3k9dK2\ncGL1xkqUtiZj1i7wA83F1Urs7rxjFRN1UlrC3jPZHwtDz5J24pw88gAnDZL/PHRR\nYHTwZNTlbJiIq+FWza+ZlX09WHnwM9hnjNkOfutiu+nifEvrnwloNi/IbckOngIr\n26X4GrtZcuz7SqkRD+zLku/YpH5FpU+ADg4x1lbz//N85qMrlXXGPx/ziDsBpfGc\n00vdlVu7L2tP+oftq/uZ3pVyQy9aj2amu1z+v4ZNG+SUcG1Qq+1pvRxPAgMBAAGj\ngZ0wgZowDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwIGCCsGAQUFBwMBMB4GA1UdEQQXMBWCE3NlcnZlcjEuZXhhbXBsZS5jb20w\nHQYDVR0OBBYEFPpOgSQfZnHZ8g73cEXul0nKdXteMB8GA1UdIwQYMBaAFKbQdxTW\nui6GjBedEeOPwmCUdTCwMA0GCSqGSIb3DQEBCwUAA4IBAQAtwWaTFQTPUR2z5G1A\nHbbgElSmLO80wsuA2BVevSKdwnWN/MpyM+lXJS4OUFDSTNbGIan/2c7Xu3suJURY\nnMoPQFr3z73Q3qZhCIHKbJDVfH+N3oa1xUbkLb2UQK5KMwq29UqpKGG1oKdvJlC3\nz2/B+peIEpkW8iMCMjRdpslhW3vuTH+A6IeDjG7hzgbaBcbHY7GcfMx4XAP3sB+4\nXFYUsLsxuUhqy/j4zHjiUXmwoMQuON7yxhmpeO46xg+qcxzmNX+IAPADfmH8jS+l\ntVORCUn8BbMYdX+ceU7/naHdFX5ltH3tmCST5xGEb4lRJ/2NttrTvEWImZmB2fLX\nNJMm\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns4_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_DNS4_CERT_256_HASH[] =\n    \"bacedca770148c6e353bf449edae8d2b97188528118be0846eea7d5da73e72ba\";\nconstexpr char TEST_SAN_DNS4_CERT_1_HASH[] = \"46bec336a88879335207923081013e5cdcc4e24c\";\nconstexpr char TEST_SAN_DNS4_CERT_SPKI[] = \"h5L1TnkoSRao/0KnRZX7re4dsuaZL0rqco3bg+H5/Xg=\";\nconstexpr char TEST_SAN_DNS4_CERT_SERIAL[] = \"9227f8037cfee4e6\";\nconstexpr char TEST_SAN_DNS4_CERT_NOT_BEFORE[] = \"Aug 27 15:10:51 2020 GMT\";\nconstexpr char TEST_SAN_DNS4_CERT_NOT_AFTER[] = \"Aug 27 15:10:51 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns4_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAw2yz0oavrKoys3aXhYFugO40b9S7g6I90xEZlHfxBHsZAFis\n+bYaSH0dGNWAEJOvW+2W5V8VHCnNzhXbEDNGZLIt5PXStnBi9cZKlLYmY9Yu8APN\nxdVK7O68YxUTdVJawt4z2R8LQ8+SduKcPPIAJw2S/zx0UWB08GTU5WyYiKvhVs2v\nmZV9PVh58DPYZ4zZDn7rYrvp4nxL658JaDYvyG3JDp4CK9ul+Bq7WXLs+0qpEQ/s\ny5Lv2KR+RaVPgA4OMdZW8//zfOajK5V1xj8f84g7AaXxnNNL3ZVbuy9rT/qH7av7\nmd6VckMvWo9mprtc/r+GTRvklHBtUKvtab0cTwIDAQABAoIBAGSBPkYyoJyzd5nW\njvzJYTSLzYoKJVoAkb1AWSCDQwAj8uq7sFJItmG7fsBr5Q4hdhlYKrfSW7/9O+wW\nT3CYfkaBYJsVVgYf7LUsAuVSfJHx8Hfd7PEmzf7mlRp8F2LKNQVtU8sBbUC1u0TL\nLMEnUK7FC2mbBNNpQRr1MIzBef0nSnGtI8paWLY++bxdyIIcikevaLVkaLekmt5K\n4Esp3URBuBo8uCjuu8fv/II/m+NoFeDs/eUqrJ60DWJXcRdiJR2xZEbDnotSSmAi\nHt/31BuFrJnqygTZJZP31OqViUu7GwerDHWlhqQigM+ZAsnaDzS0h0y51bghfgdr\n5Uc9dukCgYEA5tBKGoKEOYu84OzH9RhBKvQb0f4fYTFkzBWeieKnaW0tZ/DAjcaz\nri5uHyyaHx1idJ5LKMKSz26uGJqnQQ4OeWZK2P1isaLEHRAv4+KiVDmh7rDkYLAr\nX3x/DYIVs9kAjRByqgusEHlQeoysEK1PKbZOozEuL8P5qVnFTj8h6jMCgYEA2L/U\n1V+Cr0rFB8Abmz8IoC3dJTCmqk7nnVgx9FCEuRQ0fW6sRaIpmHJC9qYVFk2efX4K\ngd0Uf7WdQgUoXdsKLEMVamGl8aVi/NGLNF/86+ej5iGYByAc6SSMpDLmNMcckC6f\nTMPfnztl+xFV9Cm1C+khIPSuuu3FyyKVAyxBoXUCgYAinArwjK+M/XIV/AtDqh5j\ndXUSeNt1tHVb0MYUAON/gHp1gnktCSV6McYbcJGvprVTrUSx2PMwnXUHhXEc5SGw\nfHEutlftMO3dOcckkaArpvSsGOQ1x1TJnZ5jhSUB8cEOZe/7pYJoefhiV+OZIJOl\nOovyFnbJfMZb6XUXdYn9VwKBgHfulW7HXYFvBtzGAMCDPVucZRxZ8i6UYHZgwatl\nDjt2oaCyJ2KUWx2+Q61EsrBXnJXlsxyuXtRXgr9wKSjdtaBOMkBd7btq/v3TWIpP\nfpxt3PPOrNdsfnLGxYdpnpWfgv+IvJ52V3s9G62zpvLmTQZdJfadTT2jqWlAJmHO\nEDatAoGBAKzd+CFyAE9yiDN31t1/GpEUJrhAGppNP3J/N7np3AjKT4n6lKS62d5U\nI7zZJbyQBo3RoGgmtC08+HsnSz1vOSQbjWW87aq+6GiolGwDF8r0703YSxwVDQBw\n4tR5lWFUTNwKms0lnERBNmvf+6mdE851HkYMzzR9J6fjWBpOfwxc\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Server\ncommonName_default = Test Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nDNS.1 = server1.example.com\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEHDCCAwSgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMtswDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ3WhcNMjIw\nODIwMTY1NzQ3WjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGfhKM9k+YQm9SAj413Au17AYKjItwtV4S\nZGELiT5aIq3RkdlhHYeEHODtjAGcTaTRYFT2yciKGR37Sm5JXT/0RyQ5Yf2Kpzfl\nIcyZQvc+gfG/nIm+3/lY5mBUe8LPSwj/+Xydu2vuR8muBwlklBD6DH0KjW1vP4/6\nhovgiX+54liXPIp/yJ/EaLWZTDMs66/jum3u1CwT0vhpVcjXQ6+JHPpd35vaBrsf\n57XyTlhBEjM2GsGjOtATZ8pBD2SZSS0D4sO25tv+4GvNmqmP7fJDUn+DJloxBukj\nydkFIah/K6kHEkgg/+YNUCRMimU/oEHZowmn+Yq5F/xdOVM/jK0NAgMBAAGjgZ0w\ngZowDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUH\nAwIGCCsGAQUFBwMBMB4GA1UdEQQXMBWCE3NlcnZlcjEuZXhhbXBsZS5jb20wHQYD\nVR0OBBYEFOr1/iHXL5x7hJJKC51lZBeTnsTdMB8GA1UdIwQYMBaAFNPBCyPjw6jy\nMr5fVME/eDgTVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBH+Mo8BBrGuVlFHwFMrHA/\n4qQLoJ6HJNsTIFh0m3u6KKypvvW0Zj5FL2NEu/HIMCyfVSLl0K1ou0tTKcKhuf/v\nMPFbbdr9s+o8ofsbVUzFvWKNV2uDkq00te8Gyinf+aUOvmsAFEDCOSZnzhErD7yJ\n5vAP+5vb15aWEau4ZLYcp8wpdpMgaIN3xp98TSNkLh4XH7BX/sxlsyObRTEL0II5\nqhrE/clNZGW2Cs4eRfnz8ydjvRfhBuG0zUcEUrt7YHZFCEiavO49Vw8j1HKw9KA1\nxyzWDRDnx9wx9pY4EoB82XFfyANqtZ/UBPauaEBlTfCKRSNFI0eRakhaLLPFp+Y/\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_DNS_CERT_256_HASH[] =\n    \"bea6f1e274b1d0fc97c116d05ecf6a631d6603cb930ecacd80fa3f29cee4fb46\";\nconstexpr char TEST_SAN_DNS_CERT_1_HASH[] = \"c0e8754ef7ddc8115aee75a971a1267da94924aa\";\nconstexpr char TEST_SAN_DNS_CERT_SPKI[] = \"LNBf5NasAGvgyEuDtvYnXs5yqFMA0/xYHlSAlF1cwgM=\";\nconstexpr char TEST_SAN_DNS_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832db\";\nconstexpr char TEST_SAN_DNS_CERT_NOT_BEFORE[] = \"Aug 20 16:57:47 2020 GMT\";\nconstexpr char TEST_SAN_DNS_CERT_NOT_AFTER[] = \"Aug 20 16:57:47 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_dns_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAxn4SjPZPmEJvUgI+NdwLtewGCoyLcLVeEmRhC4k+WiKt0ZHZ\nYR2HhBzg7YwBnE2k0WBU9snIihkd+0puSV0/9EckOWH9iqc35SHMmUL3PoHxv5yJ\nvt/5WOZgVHvCz0sI//l8nbtr7kfJrgcJZJQQ+gx9Co1tbz+P+oaL4Il/ueJYlzyK\nf8ifxGi1mUwzLOuv47pt7tQsE9L4aVXI10OviRz6Xd+b2ga7H+e18k5YQRIzNhrB\nozrQE2fKQQ9kmUktA+LDtubb/uBrzZqpj+3yQ1J/gyZaMQbpI8nZBSGofyupBxJI\nIP/mDVAkTIplP6BB2aMJp/mKuRf8XTlTP4ytDQIDAQABAoIBAD5WPbEzcc/ZnWhd\n9/emQa9CjqsldG1TKFYSXD/pPEHGflxAt8o+Id8lyqYPwDm8ULTI0iYnPOnR9Y6D\n05sKxBkNdpzm3196IJ2/PagsVcW1vZx1pvThFkgLIdmmfInpgMwTKEXJJJ5SwU38\nO8Dwl1xQK7zyqZm0lp0RQECmLgzMMMVIr7sa2uOEQJ9hVWUEu3fXsniYh5hDqWOx\nx367sU2L2qEOyWzZDedm/b4xuDMZ+SK0W0dhgenLsagiUmGQcmBeg5qjE6Lcclzh\nqsly3Jcix/djTHHCdcZxdTCKDD8BEjTsddmdX9UtDR5yxT0smIh+Bm40gSy0fM3y\nU5zsagECgYEA+TB9+5ahAz//PpMN+XewSQPOF25zFHBVuCuNjHXi07gbCrGV6anS\nSeXSOZxnXmVjavXOgFXykTRqvGme122x/rsO6ekED87UMPkkd60LjNbNUjBual3y\nrRwhvfD6sXjpKmq/D8/xAeXT6lRZV9dhu3RB0u4iQnhvnuGhjYIVC0ECgYEAy+re\nPN84zQVhYOOkFVwMtA7f8hkObTo5C4MSjQW4UpWdKTM5H1/DEpFQNDR5txr3JNTj\noFtT0l7BzlXCwrR/kvkJ7oleYPKT8EjbS6TfYrzA5A+WSBJgooIbPKlX/GjTzkOV\nbjjjY8lkkMsXtR9iWae+A9n4IFxy0CGgdaxaKs0CgYEAz2rswrkrxv/DHzvamvdk\ndjJDnr9h7PZgc49VIECUEPUjFTCifz/K+ZXh+fBVHn64VTTLsYlZAkKmQbSM8fat\n/UGI9Wzu45WejDEliLQydAXsG+iST9lpgWx0LZ1jnsvtEVCPPsMwcgry7Q9O2s49\n3IG1tLRhMgD7sD1Rcu9/QsECgYA2R6WypGS1RlpedRoHSYJkJ5V7Fypc7uOiadh1\nS3F+Ii88eVMaQ95kQeQMkW9Yy0fAOR8CX20XkXubVf7K30saL0pygYRq4Ad7LZma\nMB/6Y24hBvkk5Cdu+p0Pb7BpEupWfGz5cxEsTb6EQFtmLh166gjNc6b1zkYNmvXg\nfOlffQKBgG8qDbr0xUNNfUFvuwJKio2JvZksVGo7yrmB160BJIv7NzKOdEw+clpQ\nHbZjSkHwQqTEk+5LEXPJ3h9YPcYwrDi7I5zfxqbq09eAu9HmJmSoeQC3THUPcurn\nMHukFnGbSxAT1u/rEqFu80734WyU+Vog2L0nn93K3oVR2qWEqhBj\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_ip_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Server\ncommonName_default = Test Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nIP.1 = 1.1.1.1"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_ip_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEAjCCAuqgAwIBAgIJAN9r4+ZJ2zR4MA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDMwNjA2MTQwN1oXDTIyMDMwNjA2MTQwN1ow\nejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh\nbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5naW5l\nZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOC\nAQ8AMIIBCgKCAQEAqnrHQboGfxBxa5tyUapHm7c5gkXaN6DsuHu2+rKf8p13ZtO4\ncXMsvu9wM8QSSsO8Y1UuPBtAkAgq1MRfRZ3POBDXNH4Nw6WaLldLNH1I52q0zaFA\nctzYpgsYBeBMalmj4nqPO+0V5NZ4ahTuFNSVke1XAdNYqGzVrR0jtMn+RKJ7lzjg\n9CoSKADKCDJ3XdKhouKFlrnoFcRUtJgwHkOL6iN+6p/d+qkRTCpgplNhtoclnMrg\n1IWaCzuhtIMus8ygx1kuT6pC/NkqiTOmcifcw4tjDhaQd5LAAMFwsAxyGieP4/bE\nVAMC+EYffkhydQ1NB3Wnn357W5KTaDzIylJTLwIDAQABo4GOMIGLMAwGA1UdEwEB\n/wQCMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD\nATAPBgNVHREECDAGhwQBAQEBMB0GA1UdDgQWBBQmouQGOJvqwhFeX6fNNaIWVVcA\nfDAfBgNVHSMEGDAWgBSLgpmApTtprdcKFiTUZxUkLNRhZTANBgkqhkiG9w0BAQsF\nAAOCAQEALlA8Jl2H+Q3bXfhMsND/pSaGpRDMbA/v8F4eEurlxvTvAw6CLjB5tSBP\nxl/dW0sGIokKdWvV0TUnGhUPysXiytC8/oVJfSQ0BBsyfhhI7ZHxt4I4vkr+CeP+\nXfPSUt3xWutRXHr9DYqZxw7huUWiUsSCR0giu2f6WIgYI8DlULcWmVZOxFE+JjLv\nrpeJxJMF1lbx7577zp3e9W1in7ktQMgmsNo7IfAx5OfmnEkbQj+3XVhQU1d9r9Da\nCJIoaI0nDEbfRIzYbNrU1y1DAwTSG+M/hH6XDDBaLz7dyq9i48AvS7AuvWZIYOf0\n5we3dPbVba/eRtoAvVnCY/4GbS0xYg==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_ip_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_IP_CERT_HASH[] =\n    \"eca5dc9d474d36e93ac409dc4518e40e106e8671ea00ab37c0db3a04a283ba29\";\nconstexpr char TEST_SAN_IP_CERT_SPKI[] = \"Q5bol66HDGlC5Kr1kq6VzLy3fKxWXqr6m+z0HPhxM2o=\";\nconstexpr char TEST_SAN_IP_CERT_SERIAL[] = \"df6be3e649db3478\";\nconstexpr char TEST_SAN_IP_CERT_NOT_BEFORE[] = \"Mar  6 06:14:07 2020 GMT\";\nconstexpr char TEST_SAN_IP_CERT_NOT_AFTER[] = \"Mar  6 06:14:07 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_ip_chain.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEAjCCAuqgAwIBAgIJAN9r4+ZJ2zR4MA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDMwNjA2MTQwN1oXDTIyMDMwNjA2MTQwN1ow\nejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh\nbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5naW5l\nZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOC\nAQ8AMIIBCgKCAQEAqnrHQboGfxBxa5tyUapHm7c5gkXaN6DsuHu2+rKf8p13ZtO4\ncXMsvu9wM8QSSsO8Y1UuPBtAkAgq1MRfRZ3POBDXNH4Nw6WaLldLNH1I52q0zaFA\nctzYpgsYBeBMalmj4nqPO+0V5NZ4ahTuFNSVke1XAdNYqGzVrR0jtMn+RKJ7lzjg\n9CoSKADKCDJ3XdKhouKFlrnoFcRUtJgwHkOL6iN+6p/d+qkRTCpgplNhtoclnMrg\n1IWaCzuhtIMus8ygx1kuT6pC/NkqiTOmcifcw4tjDhaQd5LAAMFwsAxyGieP4/bE\nVAMC+EYffkhydQ1NB3Wnn357W5KTaDzIylJTLwIDAQABo4GOMIGLMAwGA1UdEwEB\n/wQCMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD\nATAPBgNVHREECDAGhwQBAQEBMB0GA1UdDgQWBBQmouQGOJvqwhFeX6fNNaIWVVcA\nfDAfBgNVHSMEGDAWgBSLgpmApTtprdcKFiTUZxUkLNRhZTANBgkqhkiG9w0BAQsF\nAAOCAQEALlA8Jl2H+Q3bXfhMsND/pSaGpRDMbA/v8F4eEurlxvTvAw6CLjB5tSBP\nxl/dW0sGIokKdWvV0TUnGhUPysXiytC8/oVJfSQ0BBsyfhhI7ZHxt4I4vkr+CeP+\nXfPSUt3xWutRXHr9DYqZxw7huUWiUsSCR0giu2f6WIgYI8DlULcWmVZOxFE+JjLv\nrpeJxJMF1lbx7577zp3e9W1in7ktQMgmsNo7IfAx5OfmnEkbQj+3XVhQU1d9r9Da\nCJIoaI0nDEbfRIzYbNrU1y1DAwTSG+M/hH6XDDBaLz7dyq9i48AvS7AuvWZIYOf0\n5we3dPbVba/eRtoAvVnCY/4GbS0xYg==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIID4zCCAsugAwIBAgIJAPDlREG4TLxSMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2luZWVyaW5nMRAw\nDgYDVQQDDAdUZXN0IENBMB4XDTIwMDgyNzE1MTA1MVoXDTIyMDgyNzE1MTA1MVow\ngYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T\nYW4gRnJhbmNpc2NvMQ0wCwYDVQQKDARMeWZ0MRkwFwYDVQQLDBBMeWZ0IEVuZ2lu\nZWVyaW5nMR0wGwYDVQQDDBRUZXN0IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKGa0pSi8MI88LdnHT8oZJVDpZ6qa9ooYP6m\n3S+xIxRBOVOGEs0a1dxko5iAfWgJJRF8igT1bRQAlNsnK/lZpGOOo8txjWsTQPFD\nFLUSVVn78lnXvyYlIhAMqmhIAmSK+qo02TcsncBNa/iCT9aH9SEf0a7xjiAcyfm6\n3XHScpPC0o47tICqKnkMCJvOxi4yKM0SWIxNpnoXq/ixxcipsA8QFQ4GU2r9LBSB\n8+7+couSKdPDR0AvDA/t47P8pNxQHCCzvspEtX5wGMEOVVhAsh1GgwMNyYh9IbUO\nTkHqR45D2ky+x6emPDPdAgqzh4xDpMR4V+hsPQ/GWdXpKLfKjdECAwEAAaNmMGQw\nEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFKbQ\ndxTWui6GjBedEeOPwmCUdTCwMB8GA1UdIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgT\nVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQBFEUnhtsWzHM/xoy/vlntkdau/TYrpLQkE\nKXIeDbjtjLnMBKXu5z3epKWrSV12kEQWrkARNTuhW4+5XkZUA9gb2MPMXh2yse4u\nOy3rRhhzbXMas45IFbXqnI+xuS/99vJQrhjXGB49dbUV9P9dJ4hj2Uc27UmAx/zB\n6MHKDXhpRw/R6MXPtpQpZjDTEA4QU2yuwHWOZ56QHJ/aj0QZJvSbdK6DDY7VSKEC\n2yCX1d+S0lF5Hcgrhhi4Me2sKA7JQwKLDDlXMOKpcI0vMZAoKXPbA+tVBQUZICZU\n/t9YW49y8KWCjXK113JDIxQOtTCV8SaLnV9+qL+3ckbih6gz76Vw\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_ip_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAqnrHQboGfxBxa5tyUapHm7c5gkXaN6DsuHu2+rKf8p13ZtO4\ncXMsvu9wM8QSSsO8Y1UuPBtAkAgq1MRfRZ3POBDXNH4Nw6WaLldLNH1I52q0zaFA\nctzYpgsYBeBMalmj4nqPO+0V5NZ4ahTuFNSVke1XAdNYqGzVrR0jtMn+RKJ7lzjg\n9CoSKADKCDJ3XdKhouKFlrnoFcRUtJgwHkOL6iN+6p/d+qkRTCpgplNhtoclnMrg\n1IWaCzuhtIMus8ygx1kuT6pC/NkqiTOmcifcw4tjDhaQd5LAAMFwsAxyGieP4/bE\nVAMC+EYffkhydQ1NB3Wnn357W5KTaDzIylJTLwIDAQABAoIBAHKOtKznG4xw+TZt\nK6HTAfGSaxOuatfszIWfpLLMK4014VF4vj2GPrBDZ6txezVAWKyLRdlgGr9urZFy\nL+8w/1lWf7anagppkrTHNiMceUwAuWRgklVdMDxLwiXYtqDUgbsjznJfe9HdiOcI\nD4Bx5dw8l7YDVlU/ZUrCBQdsoO9vArJuLavBBifC+wimXdGkxtGWDtA0l0asOYge\nkipUnXenlzEFCx2D2zJ507xC4fOfV0NN/cmwA7pzfIHFMbWAlu3hWEzDmwie/mI7\nsf/jRjFMBvDAXn08nVC5V70AjCKF4PaJu1ckn/dFJL0q0abT/XwSpjh1j0Kz2UCG\nKGakLaECgYEA3tPze5BocpdzTgMM43PW3Tlg9qQNv0h7vHVyjI1IiiL837eq64Ce\nzJjnYCufRtQmgMhQgGc1FkXfGipUXpzyLLuylJlvi7QBdKp10cg/qx/JX+WOODrj\nJDthJveHaQukTQKZd04LwFm4NCxJDntjq9a4DTF1fEbBD9RhCUVztVECgYEAw9vQ\nfVh2oUNoPFol/N++RjoCL9DxZUR6EXa5jMAx+GZjCCHe96KsYMN8WhwiMl2xg/Ei\ngaIFu8baqSqqNwFBledXTEyRBHIEIjHPK2zzMo+FfFaI/Q/d7ea9NOEKZyVelv+N\npIoIQ0Dq3pgBqhsOA1ZTsCoDKWkH+H+pWvvdYH8CgYEA2QRsWqemUhSWxB/Maank\nVckQWqYkjWu2tzFbwu/G+mJhDjH/USYPgwxYLB3QHld4uGxfmu7pkSuzwfP//7Wz\nV0XnAzr8nH9P+6QY45u4Bp4Krk8QWEth9WOvlxf01aX0+tvRQ5TLdSbsdEO9U1Fx\nHWceEhtdZRAz74rUhwcRTkECgYAT5060rjy//p2lJHQf0oz/jf2S5mThqafOBoas\nqwto9t6xgN+rwrhJzRPiBGHlRGg5xFOBYPm8L2UhM9jAZjxIA453joBeN0f8QZuQ\nCi5cwQELta+OKAKhnoUzznrEKKQILEg0pGJak0pK/J+Ordkqtacji+FuX+ITesW3\nHUlN+QKBgCBJEIgfG2LyK072JBA3Sqk2N3zGIuFb6MSNKccEkGg7s5OLa+ap7E9f\n7GIQ/A2hxr/NO7OHoH1WNfkSi69+1Ujn9wBq8ARsldNXoEPypKnPCAGmV4NfNn4R\n/IueqraaSFXYBWfnxNp4sXqwqoExXCG20+rNV4pf06lvBqkCyFrC\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Server\ncommonName_default = Test Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nDNS.1 = *.example.com\nDNS.2 = server2.example.com\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEKzCCAxOgAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMt0wDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ4WhcNMjIw\nODIwMTY1NzQ4WjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNzlTlpi24s27HKk3rbmLQOqkAG1eOBzvc\nzh619xElOns+vYhjJ2EppkCSQ2cXJ0kjA5gLrseJMQaWwvAK9REBrK0zCdO/1Ww6\nB+JtdnPCsahQaISkXTfK5mr5Ds9MnEhj1dWCSi4FX10zxL14PCEPnoLiBcI6UjUj\n6nT+4i1qcmEdAm30If+6NxWK+WOx5A1amRBrvSeLNLZZ4RdY6+uqf1r/A1ycZ8fs\nFDsFGZkxyREI6Q8dEs54XLo20qnhP5doRoPdvBTyzx6HMuj4dkmv9AIueU1a6ZY8\nkMG7zk/CvY+M0Mnav1Uey7FYs6OoKmLWIQiJMn9c6PRa8ga43IgNAgMBAAGjgaww\ngakwDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUH\nAwIGCCsGAQUFBwMBMC0GA1UdEQQmMCSCDSouZXhhbXBsZS5jb22CE3NlcnZlcjIu\nZXhhbXBsZS5jb20wHQYDVR0OBBYEFBoKznrRgwq7F19bkOnV+ZIiIVZ5MB8GA1Ud\nIwQYMBaAFNPBCyPjw6jyMr5fVME/eDgTVQPZMA0GCSqGSIb3DQEBCwUAA4IBAQB6\nyJ4dbMaq+p2t6R1MIQQLRrOWrpN1iYBzy+1bYU+afalLbjOXsOVWiRSN2YrcisM3\ngYxhv5ATZlD3qccPRsSpUmVOBaSH03skA87okq8cHWqeDRjmhMdMGRHEWMZd8rb5\nB43cjuzyHyRWBsPXHRG2oNKNr2BmdqbjiyhksGfub0D5xVov4XL5TZSMdwoPYy2J\nsPWyX3Dm09iuQdAZI1hDbgYV1ebZ4tGrzoybWTYlf970/4J+0sae/mLxloc5+4Ut\nQjQ+959pE+h0fiHNAq87GkooxALJfS8ahK26icHquWa6fnY2C35NOpmIOe2ghh57\niBqbIUnubYigvoKARkCQ\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_MULTIPLE_DNS_CERT_256_HASH[] =\n    \"a633dc2b712d40064ce2ab7fcf7959b80dc08ba25adbdbe8af608a0044086787\";\nconstexpr char TEST_SAN_MULTIPLE_DNS_CERT_1_HASH[] = \"50a10bc7dd486f635c81f9a083d870edd0cc2084\";\nconstexpr char TEST_SAN_MULTIPLE_DNS_CERT_SPKI[] = \"dUdLBaYYAEu+5fyYltqRLjn/uW9lFs63LKC5ChYLCpU=\";\nconstexpr char TEST_SAN_MULTIPLE_DNS_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832dd\";\nconstexpr char TEST_SAN_MULTIPLE_DNS_CERT_NOT_BEFORE[] = \"Aug 20 16:57:48 2020 GMT\";\nconstexpr char TEST_SAN_MULTIPLE_DNS_CERT_NOT_AFTER[] = \"Aug 20 16:57:48 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAzc5U5aYtuLNuxypN625i0DqpABtXjgc73M4etfcRJTp7Pr2I\nYydhKaZAkkNnFydJIwOYC67HiTEGlsLwCvURAaytMwnTv9VsOgfibXZzwrGoUGiE\npF03yuZq+Q7PTJxIY9XVgkouBV9dM8S9eDwhD56C4gXCOlI1I+p0/uItanJhHQJt\n9CH/ujcVivljseQNWpkQa70nizS2WeEXWOvrqn9a/wNcnGfH7BQ7BRmZMckRCOkP\nHRLOeFy6NtKp4T+XaEaD3bwU8s8ehzLo+HZJr/QCLnlNWumWPJDBu85Pwr2PjNDJ\n2r9VHsuxWLOjqCpi1iEIiTJ/XOj0WvIGuNyIDQIDAQABAoIBAB0YS1Y//Py8mS+c\n4nEjJOMNN/Ek7GbOzP61QRA2gn0B3ISNJOfnnEvYDg7faoaSbjt9+ZPFPL4/aRAN\nNQRHiwTylXW+sOnQ0UEO9krFUS44EXR0ojPeXdHYa00Uc+f/vBgpFcsak5D8318/\nNA34ajAcF6K+b6vxDUQrlCiobwU6LxNBvoZl92HlliRVFSM+b5R0YGESJP2N3a0T\nrq+KsRm3NCKDtGklpmzLqaRfDhmTqIjI2tS739uNkvbxAZihuIV5Xb5rHXQrXgP4\nWZX1X11XwAzrDHib4jAhOs2fBDn6CDEronexw+tjQoeexCgSB/dgOjWJKyUJVXP+\nhKo+RH0CgYEA/ofMTpHIEnBSXE19Zz0txCNrhHpbVSKVF+H5aXVou/WkkRTZT64g\njUgBlf608vWJRx67Ae4GxCVkwXFt6J5hQIS4zk83SfKPh9wwvJpDJpcBUB1n7IVe\nuqRLO1Xr6+0pTziERUJhnU+ldjELa7Ca6E01wfm7zIF+GRaB7MR4o6sCgYEAzv6E\nhHQ3Ae7EiFX1hXfmwWKdZqgQHaDUI5Fe6bsiIQXxXgYhkeUFLOx2mdx1NwDusGY0\nQF4VOZ3H9m+SqZ6C55YbeigDGOJwi5s4nk8kX4/JBHGGRhG9gsLd6Tzbg5wnr1gz\nG4iVGdbaeDHfUhw/wzhsywBvlCdOYhjPPm8ZyycCgYBhm1nx113OFFzBIMYyTPe7\n+4lDqFhV8YkTO4kVs/yR98VADaq4L6I0C6TwDFj/wsD+EP7N4qWdmmlk9Uacqf/J\nahwkoaQmK2p3Qy09ZjrSFZ5fRqePwCVvlYl0G6L5Ol1ZD+dJWAdu+BgPlwc2KOd+\npf0zgb1O02mz3Tm9vio1OQKBgHSZkKJgKKBRFKpL6Zr+eKEU4N9z6mgWMYm7mMCG\n5mm9xEIXqA9b9/dfWEvBkNGkF9CMMgDUPpaPF5XtMauUm97CgsXkBiKdY5JwpIsg\nv7eI8k4reNy9n4gqJwD8hBb1/rDaOSfYX00H51bvmQHS4eQhQ79yhmfpYFEH8q4T\nvW7fAoGBAJryMX7ccbycdhwqIGrLIpQtocuxQuaoRdVOWFMpbNM97WvGb9MKFRSQ\ntss816u6o+/5bSSnTxbaYni3VH1pQDfUT8jKr/Nrn/gifL2f89BpGhmGHjpQQ3ap\nP7uWy3cHYEVLdsx8ok55ECLDOAWsKwwlIMDZAoMLeeVS7kWjd4IW\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_only_dns_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nDNS.1 = server1.example.com\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_only_dns_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEBjCCAu6gAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMt4wDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ4WhcNMjIw\nODIwMTY1NzQ4WjBkMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB\nANaD2q+imvPbSEbePA5JPKNHfwpx3W6PrxXgNSp13BEplNy99CVVBExRj59D+hrW\nrWebPFunP5W+zrx4FhDzlT+x7pCLS8rQiAOFDOXnaX5Qd14lMSTpFqlpmOYD6TN7\ne/JrIHvoQf8eyJjVDwMgLBJBUyr/8Ulz8ijlZgjfbHDez5ZUtiIbtz4Is7wn1qRT\n8InomZSfgs5QexY9oUI6e/noPuWq5XaVEU2Vk8XoQZE/76zNFzDOlI4i6p5vYRhg\ne2jxvdYcy6Qb31XaG85uXBAYtx9X7sxfl18GxBebVerZnCs6Ue/lTXtMJmAzRgUs\n3sWAt5hxj1EGJoNMTgUSEr8CAwEAAaOBnTCBmjAMBgNVHRMBAf8EAjAAMAsGA1Ud\nDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwHgYDVR0RBBcw\nFYITc2VydmVyMS5leGFtcGxlLmNvbTAdBgNVHQ4EFgQUzBR904O+dDxReWTOwELy\nkhGkxu8wHwYDVR0jBBgwFoAU08ELI+PDqPIyvl9UwT94OBNVA9kwDQYJKoZIhvcN\nAQELBQADggEBAEJa8gI+QUP++nqnnWbMVqtPACLPk6VH/yyddO9KcVaqI5kmh6x/\nPPXYN6JhJsVKSH7HuiG197kup8aI4TJnva7qOTmHHwbaPAqQQaHi9nuZsSgF0KQa\nm2N0BdI7y40K8EMEs/QMXCCj4/N14Rq92+hPHg2Khgdt2gMZCDxnuNTJg/eoKkRm\nnijtaD0ELjDQ3vRGCiVq6K74HSa3Srs3NjOLhWN/c1W/Z93ADzqmG/Y+EK50yXnP\nEltv4lg8kOEB0PZ5/+1cQLqOd/SSokH7fYi3vBp01kD3qcxE4XBluVQmgsWOqWOe\nrPJHor95BVKbvT9wEIr0lSw3uAazJw4qPrw=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_only_dns_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_ONLY_DNS_CERT_256_HASH[] =\n    \"30971c6b904e2edb83c15156692df1683f12f5201d9afa628bc1ba455335e444\";\nconstexpr char TEST_SAN_ONLY_DNS_CERT_1_HASH[] = \"6f06962f2f96565c00b6ae3f4816a620f034dd74\";\nconstexpr char TEST_SAN_ONLY_DNS_CERT_SPKI[] = \"yrWBu7Wj0LH75IlYOwKyOco6LXqHMlSn7BYyXKbvRSk=\";\nconstexpr char TEST_SAN_ONLY_DNS_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832de\";\nconstexpr char TEST_SAN_ONLY_DNS_CERT_NOT_BEFORE[] = \"Aug 20 16:57:48 2020 GMT\";\nconstexpr char TEST_SAN_ONLY_DNS_CERT_NOT_AFTER[] = \"Aug 20 16:57:48 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_only_dns_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA1oPar6Ka89tIRt48Dkk8o0d/CnHdbo+vFeA1KnXcESmU3L30\nJVUETFGPn0P6GtatZ5s8W6c/lb7OvHgWEPOVP7HukItLytCIA4UM5edpflB3XiUx\nJOkWqWmY5gPpM3t78msge+hB/x7ImNUPAyAsEkFTKv/xSXPyKOVmCN9scN7PllS2\nIhu3PgizvCfWpFPwieiZlJ+CzlB7Fj2hQjp7+eg+5arldpURTZWTxehBkT/vrM0X\nMM6UjiLqnm9hGGB7aPG91hzLpBvfVdobzm5cEBi3H1fuzF+XXwbEF5tV6tmcKzpR\n7+VNe0wmYDNGBSzexYC3mHGPUQYmg0xOBRISvwIDAQABAoIBAQC6eGm77UR3IDHm\n8L4RyRX99WN4p9xNhHc1M+3jWAqRnKy7ah/1575RvUB0uLmLZPvdqTLBlVQPjorV\niCneWG9vq/NuELg0uCrTrG+21f98/uOUog3jQP8jCxkPMW0hBIGNDBRzwUTFSXQk\nbmibVfcZc1GhelS7fh+N70NQCS+RuHSygR9PTiEc+AiDe1035VaGHoj2jinC3ZmF\np+RG18fT14/RBofwO04cgANQvjeyQAx7MoolFcR2aaBrIZGlVXVxLGJNbEfC3JkQ\n/ggUUkOyrVVpW6c7U4KnWNR5UXaQSm36sh36JArQwa0ahX/xfy7Dp37Jb96bD8jH\nFgNtnydZAoGBAP7CKL88qsnPpWuMTQX7P3swK366KC7csDLTiOqqYW0J2C7DGTl0\nBo33TithAR+omfyBCWa9cZBx0AiiFHXsvur7ms37NTZ3GIu8oYO1cXBIUTyJqlnH\nWmMik730RIuGuo6blRih+a19mDPP4H7E/Y8MT+ozaO65ajsTUF5iGkDLAoGBANeP\nfJz6/suZEEXP3ImZ0kc4j8gGjrYoTIp7j/7qXpJCDXMBtIsAI1QT9M5SGnrLxw0t\n4X6HEq8LbDNr0hwbYatuZXnwARjSYgyKu6f/xuS9M3Kp6as5ElpFbhPGkmrrbidj\n4rZUpbi6xsWqpikq+DjqDJX5eYaw5c9SNJujVXtdAoGBAL2Kx7ZA9PDhxTmXUVc5\n76k/1Zyb9HmpSI8IQ74UthfiUy23inTsllPZxBcSPTvIvYjYmJ4QJQZLJ3TE4mQ6\nmHFMcYj/dj/nrCOLXMAZpyTbKVXZNx1E7UME3tddfgIxY2vm5a9GE/W4lB04dNbc\nyao8htjBFzlGSntZi1ots0qxAoGBANUn+EurD2X5F9RQ9X3D8+GllthvU5PhSb7u\n7Ldgvt/WHUKKfyOsoUK08TFMkw62j0EDC6Z4nwslOnumtQBCO1SwN/xaTltjPr20\nOlioXt82ELfkOUKtju5hFbbMd2DvKeAdr8Qo9C810vUICINREGAHYSa0qW19Y+Kd\nIZR98TupAoGARuEEDfc9Xyg9JP4pX6Ikjf5fMcmWI+qWQzKxhy/exWhSM3WutiLY\ntpPb+4FrTgsA/9AayERGHFWNqXEvXt1Z+ONUvPZDMqjsdJ8KS9ZPF7DiHchdym9d\nW7Hj7QSit1euLAhiaK5hkaEj2/130YSo/xN4rYu9m5Z8UeQ0+4LIFUM=\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_uri_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Server\ncommonName_default = Test Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nURI.1 = spiffe://lyft.com/test-team\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEJDCCAwygAwIBAgIUb7lp5EdaTy6KCfKjvbTXaDHYMt8wDQYJKoZIhvcNAQEL\nBQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODIwMTY1NzQ4WhcNMjIw\nODIwMTY1NzQ4WjB6MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW\nMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ\nTHlmdCBFbmdpbmVlcmluZzEUMBIGA1UEAwwLVGVzdCBTZXJ2ZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCp+vVW1lCAmJk1Em5+ToWF0G/s/obRNWMn\naMKXqCtWwFgodjAD2+YGc+0Zk5CCng3L0Pt22wVjvqc/KCzLNeF8KJBm7cWWHlVl\ndR0nkzM0EVGPkZzls+7igK8SKwEeeKZVUDhFJBfoybD+76ZzNU9hwZP5mG5JrDUP\no70ywH/wiyc3Fkm8znsYIeLQKKNtwsh8M8jb1y4OBg9Kiri7Wr/74Ty1yiFWGeSR\naefG/gMae6v0eGyAID1R6tLJk+j9fD/LkxVduAEMve4xoBdqytSw0g/xGbkNd/qt\n0NBaNOAM8HT2eGvE6nXOUMxxLUF3A3+pHWSd1NWmXG2aFmg0jmQ7AgMBAAGjgaUw\ngaIwDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUH\nAwIGCCsGAQUFBwMBMCYGA1UdEQQfMB2GG3NwaWZmZTovL2x5ZnQuY29tL3Rlc3Qt\ndGVhbTAdBgNVHQ4EFgQUQgDceE1I9E8ZtZf9tykd1my2BRowHwYDVR0jBBgwFoAU\n08ELI+PDqPIyvl9UwT94OBNVA9kwDQYJKoZIhvcNAQELBQADggEBAJ75nNh3myc8\nw9m754OKhUkVSEWTwaPzAF3TWyO54sfTUa2kjqx7mufYjB/2joVXjnuISsNFYi0P\nvOx/YxZv8XNx4fg/n0U23Q6XAsgjWmqrEgg3nogfuSGRgOkcdmTNQFluqBJsQ7UE\nYmx19jjEyKO8SiXvHO2iA5CWYllayyAkvieZOLG0VOP0+kLraRwsSW0AsejWP/IF\nlTLvqR3m6qTpwBCkmqDs9uT0Q53rzvJK6/UYah05SqMsKp64zjMepfwh72S6ujzA\nOoLSx9vVJj5wkG3bsk6sv4g3zDOpLToYKJsapX5ysLrho4qRozLz+m1429aBGzjT\nzeSZNjAJ6Ig=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_uri_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SAN_URI_CERT_256_HASH[] =\n    \"1de3c3c66bfd116269ecc0db4a58a20b69ec4db1a513c23aafe0bf0c3c5cf337\";\nconstexpr char TEST_SAN_URI_CERT_1_HASH[] = \"ceb3cbb8d8704d558bea7e584fb388a78c9d3501\";\nconstexpr char TEST_SAN_URI_CERT_SPKI[] = \"+OFm7W6LtcY++mvNq8/meuLhczsRjsA0dCBbeCfyAx0=\";\nconstexpr char TEST_SAN_URI_CERT_SERIAL[] = \"6fb969e4475a4f2e8a09f2a3bdb4d76831d832df\";\nconstexpr char TEST_SAN_URI_CERT_NOT_BEFORE[] = \"Aug 20 16:57:48 2020 GMT\";\nconstexpr char TEST_SAN_URI_CERT_NOT_AFTER[] = \"Aug 20 16:57:48 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/san_uri_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpgIBAAKCAQEAqfr1VtZQgJiZNRJufk6FhdBv7P6G0TVjJ2jCl6grVsBYKHYw\nA9vmBnPtGZOQgp4Ny9D7dtsFY76nPygsyzXhfCiQZu3Flh5VZXUdJ5MzNBFRj5Gc\n5bPu4oCvEisBHnimVVA4RSQX6Mmw/u+mczVPYcGT+ZhuSaw1D6O9MsB/8IsnNxZJ\nvM57GCHi0CijbcLIfDPI29cuDgYPSoq4u1q/++E8tcohVhnkkWnnxv4DGnur9Hhs\ngCA9UerSyZPo/Xw/y5MVXbgBDL3uMaAXasrUsNIP8Rm5DXf6rdDQWjTgDPB09nhr\nxOp1zlDMcS1BdwN/qR1kndTVplxtmhZoNI5kOwIDAQABAoIBAQCHHnboEHTtxGAF\nUCGFReXd+VL/kVSZ+VZYfW4yH/xTeZKal1iTuLmhZ4WSTzsQmZNd8UHzuScPov5P\nth/laW9UwuoG5/uo2XQIso/6ZYjOWvX6qkWOUvoSEzkXuBeroTyXlK0mSQq4gZux\nC8/ExRvVGYwHXKRO7BVB2WcMcPlCvA5ZsHyDelvCZMZpqMehfQfF3R12Zge+DDew\n4GG6pGb88pkCKH/Q2HG4PLWL/1zat781QVxTNlAh4Pro8Vd0ocXvlRoQdzwOprIC\n+Vqo84b05a+Sa881Qd/GCV+vDDofQXwwKs2PkuZHJMZQcgHYAPusYjhXKdDyDnnG\n4SJSIWgBAoGBAODvsFHBNXYoFT4XMd6gCrafZp+WQhaPq8w/mXiKfsGJW4kqrfCa\ntghGVdkNVPcJpYeIqX0q4Z2n3P2cW/n2q0GPOtT6nAlboFRDWsI8Np9/xsKhEmOQ\ns7FbAnkF6LX6IqdGL899V5o66m8LQOOvl1UNp+tzoyvCGp2r5msPRW3zAoGBAMF0\nYINdIu3+0/2nb0sLBdyyyslBKYsNCSwZSnns5eR+qXIu9I3bCPVe+lu76pN8SwHr\nfbVdVHb5sdu7d8ZFHrNM4VMSg2ATYdf/CmBV0j4dAvIlwGgx1Sp2xVmRaJTJ3MOx\n+qBlSpLMlXkgdRUUxlSG3qYrmUAVeoVKM9/1xxqZAoGBAICIVl+RIQSAiQru5B9L\nonOSiGi+ysrrlAblfMQFl8Ltw3GRAs3PCh+WWW3rEVcMmxrke707aKrXBg+hbmla\ni6etubeD8O/j/KzJxC3jjG7968zBj6l3QUciMzqGpuB9q5mcIKvW+tz1t4p2isDQ\nUJBYelCvxli11NGuLb+VfV4DAoGBALCZf52MAV4H+hgdqEB2QJS3dt2AKd9xOAVU\n1SM2uqr7Yc4FxWRWJvhwCtaBVmPq3EJVar2NBJVT3iR8H5wU7zC5Jx0B0oJ7Tx+y\nCBPnZHUlYNIS2nGDxFfucrwQPMtO98SCDgwsS4Z1UqJvmdJz05XIRSqmtFDHCsDw\n+yghIvBJAoGBAJ3AqsM5YcHh4NFw4Dgfv/jbyMEdJdbNvcEjpSCm48EBDggX/1k8\n1uabDaBDsjtSZJUneuXz6PuUogYv+YMH9Jgm38ehtxnhcUn/Hd2Aq78Y7dE2Oqsm\nfg4HX3gn8LNMwW8H3dBVEsFuJEOB3FQrtVlSq9yLg3IvRC88uQzpl2uK\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned2_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEIDCCAwigAwIBAgIUTFKj6zzkQHxTvlGhgpi3dvsLcN4wDQYJKoZIhvcNAQEL\nBQAwejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc0OVoX\nDTIyMDgyMDE2NTc0OVowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3Ju\naWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNV\nBAsMEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA08SQ0+LaGtukI2UlaXdeA4eCOOIh\ng/3di4zgDh0eu3UqNTud1gDDAdSkyQ/GbRK/6CDN9+Ct58sAjg8cbemZSvtrN4RW\n+HIFXAJuEWFO3TXtl/y5R64uzIy8ICVn5psrwPxAD2KdxNWP7zSOBcSxlaOSPDyH\noaMrjLE/NerFSSBHtueRnmJ1BkuWi5YbwnaRq/eUA2F8i3uJU0E7hFoV2KRtZ59n\n15OfGbIfR28T6drpaUOzL5uZf8ZTVa6+S3j3x9Kc0DmVhbZwHDySPhBwOkAw4476\nMtHkV05aj5ibfu12Vg+eH0YD/L1HLyVBMoyOYX+WNK9l06kOKoeQSXr+KwIDAQAB\no4GdMIGaMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsG\nAQUFBwMCBggrBgEFBQcDATAeBgNVHREEFzAVghNzZXJ2ZXIxLmV4YW1wbGUuY29t\nMB0GA1UdDgQWBBT1SuDJCIggmeXOPjB7yCsVlHIkYDAfBgNVHSMEGDAWgBT1SuDJ\nCIggmeXOPjB7yCsVlHIkYDANBgkqhkiG9w0BAQsFAAOCAQEArVA858JgIaw4ryWQ\ncsOe+ecib2wBuPCvTtQEk+c0vIfQfykDG38s2yFVah/VmYt8fbMK/EkuDwnX8Smt\n+ZGVUshzPu2NlpqEmKDwl6cHaMzrt0Ltwz8zkKCigPPRgRJedmRR3u+Sz4ZhKkCL\nfWutGz0t4WPEKJzZjyzDnT0lkdJZ24Ye0PvceYoArwxfkjKIiuj59+FK5IpntXwT\nT9WuizvQKjawdbwOE8qVYkJ/YB3IgSEqXyQCOVXciCi7gDucK/f5XRy/qk+E7sYq\nQWbB6F4LUWYGz139B5k/j9nyXlgpEbshQzWtWUIrhtrJEgBfGKOA7JnGA5B89n45\n3I6Qjw==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned2_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED2_CERT_256_HASH[] =\n    \"7e33b4d81ef8518d9527967093b489d58da724f5b72c3330932789fbe0ed6089\";\nconstexpr char TEST_SELFSIGNED2_CERT_1_HASH[] = \"f07aea4eabfbad0d9b60053bd2fecc925733be5e\";\nconstexpr char TEST_SELFSIGNED2_CERT_SPKI[] = \"VlFyLiwhtbwHIPk7OHp4XGXGhCgDfYHOyiBEh0VGowM=\";\nconstexpr char TEST_SELFSIGNED2_CERT_SERIAL[] = \"4c52a3eb3ce4407c53be51a18298b776fb0b70de\";\nconstexpr char TEST_SELFSIGNED2_CERT_NOT_BEFORE[] = \"Aug 20 16:57:49 2020 GMT\";\nconstexpr char TEST_SELFSIGNED2_CERT_NOT_AFTER[] = \"Aug 20 16:57:49 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned2_ecdsa_p256_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIClTCCAjqgAwIBAgIUGcHYDSF6P9mnFjI3j53FFgtDjI4wCgYIKoZIzj0EAwIw\nejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh\nbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5naW5l\nZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc1MloXDTIy\nMDgyMDE2NTc1MlowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\nEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMFkwEwYHKoZI\nzj0CAQYIKoZIzj0DAQcDQgAEo6UZ9lqsYUZ0azeH+5BTfSSjEv9153YAQBQY9Fgq\nQel1VQnQU/O40tOchP4rAiXghiK/Cu5938dQoo0k3T4tOKOBnTCBmjAMBgNVHRMB\nAf8EAjAAMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH\nAwEwHgYDVR0RBBcwFYITc2VydmVyMS5leGFtcGxlLmNvbTAdBgNVHQ4EFgQUYikA\nTWJCGKp4SAki0qkraMiM8UYwHwYDVR0jBBgwFoAUYikATWJCGKp4SAki0qkraMiM\n8UYwCgYIKoZIzj0EAwIDSQAwRgIhAPSCRDqqtTCTyPLN2r5dLORIrgI6IFQavS6q\nw7h1uETKAiEAtKqwGv+L+Qr0fQSW0hQG2PkZbZBkbjZshnv9x0kOXg4=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned2_ecdsa_p256_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED2_ECDSA_P256_CERT_256_HASH[] =\n    \"4a3a4a14179d8af55cd89b0613e987e49335e826da53672933ab115247416baf\";\nconstexpr char TEST_SELFSIGNED2_ECDSA_P256_CERT_1_HASH[] =\n    \"024359839fb45be9e70c3a01594c64d9955ce6e8\";\nconstexpr char TEST_SELFSIGNED2_ECDSA_P256_CERT_SPKI[] =\n    \"/dozXU2pNDFTzYxiPFcysW+ZaZP063Nn3gfc3LHjQEI=\";\nconstexpr char TEST_SELFSIGNED2_ECDSA_P256_CERT_SERIAL[] =\n    \"19c1d80d217a3fd9a71632378f9dc5160b438c8e\";\nconstexpr char TEST_SELFSIGNED2_ECDSA_P256_CERT_NOT_BEFORE[] = \"Aug 20 16:57:52 2020 GMT\";\nconstexpr char TEST_SELFSIGNED2_ECDSA_P256_CERT_NOT_AFTER[] = \"Aug 20 16:57:52 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_cert.cfg",
    "content": "[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = v3_req\nx509_extensions = v3_ca\n\n[req_distinguished_name]\ncountryName = US\ncountryName_default = US\nstateOrProvinceName = California\nstateOrProvinceName_default = California\nlocalityName = San Francisco\nlocalityName_default = San Francisco\norganizationName = Lyft\norganizationName_default = Lyft\norganizationalUnitName = Lyft Engineering\norganizationalUnitName_default = Lyft Engineering\ncommonName = Test Server\ncommonName_default = Test Server\ncommonName_max  = 64\n\n[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\n\n[v3_ca]\nbasicConstraints = critical, CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nextendedKeyUsage = clientAuth, serverAuth\nsubjectAltName = @alt_names\nsubjectKeyIdentifier = hash\nauthorityKeyIdentifier = keyid:always\n\n[alt_names]\nDNS.1 = server1.example.com\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIEIDCCAwigAwIBAgIUDJW42Svjo33Z8p0GBQWcEBralBEwDQYJKoZIhvcNAQEL\nBQAwejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc0OVoX\nDTIyMDgyMDE2NTc0OVowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3Ju\naWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNV\nBAsMEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA08SQ0+LaGtukI2UlaXdeA4eCOOIh\ng/3di4zgDh0eu3UqNTud1gDDAdSkyQ/GbRK/6CDN9+Ct58sAjg8cbemZSvtrN4RW\n+HIFXAJuEWFO3TXtl/y5R64uzIy8ICVn5psrwPxAD2KdxNWP7zSOBcSxlaOSPDyH\noaMrjLE/NerFSSBHtueRnmJ1BkuWi5YbwnaRq/eUA2F8i3uJU0E7hFoV2KRtZ59n\n15OfGbIfR28T6drpaUOzL5uZf8ZTVa6+S3j3x9Kc0DmVhbZwHDySPhBwOkAw4476\nMtHkV05aj5ibfu12Vg+eH0YD/L1HLyVBMoyOYX+WNK9l06kOKoeQSXr+KwIDAQAB\no4GdMIGaMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXgMB0GA1UdJQQWMBQGCCsG\nAQUFBwMCBggrBgEFBQcDATAeBgNVHREEFzAVghNzZXJ2ZXIxLmV4YW1wbGUuY29t\nMB0GA1UdDgQWBBT1SuDJCIggmeXOPjB7yCsVlHIkYDAfBgNVHSMEGDAWgBT1SuDJ\nCIggmeXOPjB7yCsVlHIkYDANBgkqhkiG9w0BAQsFAAOCAQEAkCKh8Xt2PEPBTBR/\nWETDmuGDjQ21A2xJVJpDbp/KwX5z/IUz/+ML8d/IMw0IFFPeck2H366dpeBjaTA8\nDRwHWrkC+Yy3PSfdmK58bvGiTVqWjVn09y4yHYDqH365RjpzpR1bqjMmKZWzYPm2\nEmZDEHcQFDW4zLrOQGY9jOFzEOahS/gwjKiOUfVFifF16Xn3IQ/uaq0dD1SZu03i\nGANbGw4kXLw9BXSuM8JvwFJjLRSNDIFWarhHRsbkU/oS+rkNoV2YaQJNjBsv8/jr\nYghmgl2nnGxJP6La1HLhT9J/+b60nfDrKOH1dcxxhyn/WRT4jN94K7CM/8q5+dEF\ndw9T9Q==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED_CERT_256_HASH[] =\n    \"6a29bb9917f595807471c79731ca7c40f3994ed8ba13a068a4e8ba6e18d69cc4\";\nconstexpr char TEST_SELFSIGNED_CERT_1_HASH[] = \"e8d95bd6180cea11dc316845b8a5619acbfae523\";\nconstexpr char TEST_SELFSIGNED_CERT_SPKI[] = \"VlFyLiwhtbwHIPk7OHp4XGXGhCgDfYHOyiBEh0VGowM=\";\nconstexpr char TEST_SELFSIGNED_CERT_SERIAL[] = \"0c95b8d92be3a37dd9f29d0605059c101ada9411\";\nconstexpr char TEST_SELFSIGNED_CERT_NOT_BEFORE[] = \"Aug 20 16:57:49 2020 GMT\";\nconstexpr char TEST_SELFSIGNED_CERT_NOT_AFTER[] = \"Aug 20 16:57:49 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIClDCCAjqgAwIBAgIUEuu+Bc6XaYBJR9bbnTUvgjLZdVEwCgYIKoZIzj0EAwIw\nejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh\nbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5naW5l\nZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc1MloXDTIy\nMDgyMDE2NTc1MlowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\nEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMFkwEwYHKoZI\nzj0CAQYIKoZIzj0DAQcDQgAEo6UZ9lqsYUZ0azeH+5BTfSSjEv9153YAQBQY9Fgq\nQel1VQnQU/O40tOchP4rAiXghiK/Cu5938dQoo0k3T4tOKOBnTCBmjAMBgNVHRMB\nAf8EAjAAMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH\nAwEwHgYDVR0RBBcwFYITc2VydmVyMS5leGFtcGxlLmNvbTAdBgNVHQ4EFgQUYikA\nTWJCGKp4SAki0qkraMiM8UYwHwYDVR0jBBgwFoAUYikATWJCGKp4SAki0qkraMiM\n8UYwCgYIKoZIzj0EAwIDSAAwRQIhAOv5KAtSHusXpbf+FM4fzzOU3vQFHguEZ3Jt\n0lNKUXdhAiBjJSmq+QgwSMxWwcyw5y+4yXk2uyx088Qlx9LPjTssOA==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH[] =\n    \"aad05923e18b702a34097624c9793be3e940d1572515be521d667cb7d5388c0d\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P256_CERT_1_HASH[] =\n    \"7f8e9ed0b41930e184e95bda5a79f39410e68fcd\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P256_CERT_SPKI[] =\n    \"/dozXU2pNDFTzYxiPFcysW+ZaZP063Nn3gfc3LHjQEI=\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P256_CERT_SERIAL[] =\n    \"12ebbe05ce9769804947d6db9d352f8232d97551\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P256_CERT_NOT_BEFORE[] = \"Aug 20 16:57:52 2020 GMT\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P256_CERT_NOT_AFTER[] = \"Aug 20 16:57:52 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem",
    "content": "-----BEGIN EC PARAMETERS-----\nBggqhkjOPQMBBw==\n-----END EC PARAMETERS-----\n-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEICH3LfsmyG+tH/ReKgwTnZdOdpNobgHyEm5cUyqoZR8FoAoGCCqGSM49\nAwEHoUQDQgAEo6UZ9lqsYUZ0azeH+5BTfSSjEv9153YAQBQY9FgqQel1VQnQU/O4\n0tOchP4rAiXghiK/Cu5938dQoo0k3T4tOA==\n-----END EC PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p384_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIC0TCCAlegAwIBAgIUK3vFP5efbjrsST+0HOuEbStmpqcwCgYIKoZIzj0EAwIw\nejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh\nbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5naW5l\nZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc1MloXDTIy\nMDgyMDE2NTc1MlowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\nEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMHYwEAYHKoZI\nzj0CAQYFK4EEACIDYgAEe/vtHmcsAmMdU9wWtMPTwFiq2kjzcGTtY+jW+wIke4vw\nepFcfICqaun5gHINHjj+658vZ+vReXxGYp69ag0pD7yZ3SMm6Rh5gBZS7ir6UKyS\nZ2PaICQFAda7Hct91v+/o4GdMIGaMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXg\nMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAeBgNVHREEFzAVghNzZXJ2\nZXIxLmV4YW1wbGUuY29tMB0GA1UdDgQWBBSCufDHpMHMnoQjKSTERGecDel0+DAf\nBgNVHSMEGDAWgBSCufDHpMHMnoQjKSTERGecDel0+DAKBggqhkjOPQQDAgNoADBl\nAjAMaiB3UKxH/vsS9Tu8FD6TItlABPBQ5TVb3nXG7BLJnf6pggh3oec+vGx7lUhN\n3MQCMQCyENXYrmhqyvPUVgrD4w1WqmT9cH7u8ysX9jDDSdCAO3Gk5qIbvf345WoX\nbn8jdfM=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p384_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED_ECDSA_P384_CERT_256_HASH[] =\n    \"686326b5427f43d04394606c75223180779478004b2112820b5affb6d84763b9\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P384_CERT_1_HASH[] =\n    \"58b63003620b27457a8445520c73b3ee7530a6d4\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P384_CERT_SPKI[] =\n    \"sCNwIyDKE4zM8AEswNRUdSQ7xLCOINkhzAT/XiJ08Mk=\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P384_CERT_SERIAL[] =\n    \"2b7bc53f979f6e3aec493fb41ceb846d2b66a6a7\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P384_CERT_NOT_BEFORE[] = \"Aug 20 16:57:52 2020 GMT\";\nconstexpr char TEST_SELFSIGNED_ECDSA_P384_CERT_NOT_AFTER[] = \"Aug 20 16:57:52 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p384_key.pem",
    "content": "-----BEGIN EC PARAMETERS-----\nBgUrgQQAIg==\n-----END EC PARAMETERS-----\n-----BEGIN EC PRIVATE KEY-----\nMIGkAgEBBDCisSgFfvgPparwappqTwcIOxTfMdrhTXUHGZiMh/FsxMaskGbPXW+W\nRj9dNUd98q+gBwYFK4EEACKhZANiAAR7++0eZywCYx1T3Ba0w9PAWKraSPNwZO1j\n6Nb7AiR7i/B6kVx8gKpq6fmAcg0eOP7rny9n69F5fEZinr1qDSkPvJndIybpGHmA\nFlLuKvpQrJJnY9ogJAUB1rsdy33W/78=\n-----END EC PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpgIBAAKCAQEA08SQ0+LaGtukI2UlaXdeA4eCOOIhg/3di4zgDh0eu3UqNTud\n1gDDAdSkyQ/GbRK/6CDN9+Ct58sAjg8cbemZSvtrN4RW+HIFXAJuEWFO3TXtl/y5\nR64uzIy8ICVn5psrwPxAD2KdxNWP7zSOBcSxlaOSPDyHoaMrjLE/NerFSSBHtueR\nnmJ1BkuWi5YbwnaRq/eUA2F8i3uJU0E7hFoV2KRtZ59n15OfGbIfR28T6drpaUOz\nL5uZf8ZTVa6+S3j3x9Kc0DmVhbZwHDySPhBwOkAw4476MtHkV05aj5ibfu12Vg+e\nH0YD/L1HLyVBMoyOYX+WNK9l06kOKoeQSXr+KwIDAQABAoIBAQCA4r6+pu/Vrt5M\nSJAni2XlwgCovEBnxAelmnz3OhPtRZcFxR4AFICbSZ9tW7OJyQKRtf2FovJVBAF3\ndIz5/FW3BCQyKtJ/7W2DnVr+KvpDukG74i2mCcYj6nfJHNsKyGAt2ZjnhD6/gLdU\nf+j8a8UlbONg0/o8j0G/JFVzbErcgLSCfTO4jZUWxpKmgDF0sNa1V+Rn3YpPIs+Y\nH+3EEf5VpOL/xy7dAuFFupTFXN9u45/4GgzymF8U1t9gPE87Xw1Tdkmc9o+YGJhX\nBIaKfQxF8a2CVnASwkhU3unrWsm+wAxjOBDuzwsuU0kM77N2hm4mXtYGFW7VepE8\neqlfDiOxAoGBAO7BOXD53okrPB4mwKxdisMhvgT7OH7Q5mRWK69peoNOYL48UXls\nbBVbrF5nSQ2d0pcOVoUb7nVlqI8IBbpF6P3E3hgb9r6HyUnloFF0MeqqRX8SQD2Q\nD1ezEgdthZity+4n/DAua4EZbLNrE5vbrey6nLF3tcyJYO7dJJCSFI7zAoGBAOMQ\nVIKCBBMOK5DuD5B+Rvru2C2S+MiOg9Lc1Gdpmyix0WLT4B6Q7ucTxqw93KkGGaUo\nQVWOm297SyKcVe+5JXTyknfVjQK1PwkQjTzL39N9za5sHPy+x3v97xIvsgamyRUy\ni+JLwpdJ8AvNgvfeXrNwFPwHtGy5JV0OqBQEzlHpAoGBAOC31UzUofQRUYA/kVGq\nqZSICcW+fqo+DNubj3qGmN1FxCDJvRXASuq/YjlolYXPagrrX9vhMkwk5sXZEysD\n6gwQY8nlaBRk0bxP+NkxuFna8ZZsozJOiWJOFUYQe5H84tjBocfnqR/Vwk4NFmO4\nHLeX3SvcNKzTEP1B/8xi8h6pAoGBAJt/ZnCNu16VWK6C6XPJlrBn+vGS8f7e4lX4\nVxmOz+wZgkxMzvH/zSvAdlEkeEXkYJGjNMLzy2yYIcdNCukFgSpKqJWaUFwnZUi+\n5gpZkCnUwJOPhHZt7Ez0hNGOijtV4uU6XWOXAGb9AoACuIso5O+7dRN37055qKXG\n0MIM3IJpAoGBAMTc58+cYEHTrM+JBg8c8V9ERirYHiMrOBHGOfGCEwQi4IC1AJqY\nCrFf62zG7TjIPJEfwxblAqr03aYNQ+hl8lLKxiUMhBUdq7h6KPewuGUEkP0AWKbr\n8fhGn5aLCZox/qRNKK8QBpzeOfWDzIm7DiW1s+1fH4OuMfB/t8htX3d3\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_1024_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIDGzCCAoSgAwIBAgIUPdUTBkqScjb8A/YYlu46TxTVCy4wDQYJKoZIhvcNAQEL\nBQAwejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc1MFoX\nDTIyMDgyMDE2NTc1MFowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3Ju\naWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNV\nBAsMEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMIGfMA0G\nCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDhRnCNu+t+gHLANlaFJ01uzSU83jSWUspI\nW9E1cRrzmdadpS4NW+G+yGaAiTQbfaV6ES53i4Lm+f4/DknrbhJ2NQzKABm0wkFw\nBeVcYV+s7XVFyrjZ6WqCzOI07VHMb9yO1GZhxDlbQ63+rmMRaCKCYnpgEayC+4qq\n/PBk0QJ6owIDAQABo4GdMIGaMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXgMB0G\nA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAeBgNVHREEFzAVghNzZXJ2ZXIx\nLmV4YW1wbGUuY29tMB0GA1UdDgQWBBQKQHAMgFz+7HL4/1uVBOMTg+4hRTAfBgNV\nHSMEGDAWgBQKQHAMgFz+7HL4/1uVBOMTg+4hRTANBgkqhkiG9w0BAQsFAAOBgQBa\nhZNq2DyCRjsabkfgNA0fkTsx2UzHstJJoV/kzsQBlHd8O6jzvnOi10aixKs+vL+D\ngZkKK0xwItsftoFS2ejw2ta5G0iNU2vZx6QROi/kyuZV4mbZqm5NXr03Vdf6gMTV\nOlD+/4X/CE4AE1n1wdzd3FTv5sVF5Pa9XNyzdZZYUA==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_1024_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED_RSA_1024_CERT_256_HASH[] =\n    \"8ae57e961f5c02ac1f638ddaf8f5c22ad2f1669590d041a48529b9b5d7278845\";\nconstexpr char TEST_SELFSIGNED_RSA_1024_CERT_1_HASH[] = \"e35e22060f35ae148831fefc5c3fdd616c589113\";\nconstexpr char TEST_SELFSIGNED_RSA_1024_CERT_SPKI[] =\n    \"5i/SCHfhjVYUlH3GY0AczvDq3JJkZ3pn3WbDsBg9k1U=\";\nconstexpr char TEST_SELFSIGNED_RSA_1024_CERT_SERIAL[] = \"3dd513064a927236fc03f61896ee3a4f14d50b2e\";\nconstexpr char TEST_SELFSIGNED_RSA_1024_CERT_NOT_BEFORE[] = \"Aug 20 16:57:50 2020 GMT\";\nconstexpr char TEST_SELFSIGNED_RSA_1024_CERT_NOT_AFTER[] = \"Aug 20 16:57:50 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_1024_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQDhRnCNu+t+gHLANlaFJ01uzSU83jSWUspIW9E1cRrzmdadpS4N\nW+G+yGaAiTQbfaV6ES53i4Lm+f4/DknrbhJ2NQzKABm0wkFwBeVcYV+s7XVFyrjZ\n6WqCzOI07VHMb9yO1GZhxDlbQ63+rmMRaCKCYnpgEayC+4qq/PBk0QJ6owIDAQAB\nAoGBAL5YNVwc75N41vzALQTfO5cmsZMnscledNsE8dW1Fu/ECco0OqBBBZKdY7ax\nt5yO4tYsVFXgRKADlacT3LOX0vIGOIMfkNaw9XDJ4fAfKvIOMor1ZIR201lPW7GT\nbQRox37QYXfB8lkYrSgwnAz0dkewYjC67BPglNVjTPZuCCoRAkEA+Ki4yKc27m6k\n9d3Qj8VnvJ+6a92zvFjcLQAsE7hkzkOU3+GwSHFQH3zW0qcyZCUM+fQq/+cMz8th\nruT0hyxkWwJBAOfs/YuWIkPGIvQ94ps3kCN5uJ75pwqRdCpjhWvlkLZeh4RyFvBC\nDw6VNASZrTkmLqGfsFsQzv/Psr8GcUACdVkCQQCyhB61HycWoiSXxvlWXjiNeHbT\nvcStsXXQcNE0Dyt/0ZDt8g8m0wLcuDlZ3mMU9myMfjjooy3VeIPWldTfvoUFAkEA\n58xgWIu0MWKteskDXDj7UcmNMS65ugFIRjlzlzQ5H1x5O3G/5V9QpzWOaO6jR9QW\nlSkCXPH+wYP31Z59wAj0+QJAKOUMrAeTjr567kew+JdroFVgVsg2MySxbvrqPaRd\nLEqeih4bfkjVwQ2ePJN+JHH3/uS9emHew3yzk3ArpygKhw==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_3072_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIFIDCCA4igAwIBAgIUOOB5XMw0htWTihr0AemjTtLOz/swDQYJKoZIhvcNAQEL\nBQAwejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc1MVoX\nDTIyMDgyMDE2NTc1MVowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3Ju\naWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNV\nBAsMEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMIIBojAN\nBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA56YCTajmrZ3Vxp/Qr1L2ZNAFlA/o\ndVzmL5Wwf755Ct+zQyMqPTYfikBujjkIQ0wGpmroU2kNRqQMfa+3fHtm4ewzePBp\noUf1cJf1R5uakhFt1pFant4a5Yhpry6kn0IaIVsRwQeSATOWqrMm6wmsHR8Mh4bA\nNg2bxgL6DuF1AM5gy7/+ETzV0Uf1UmjvFn32d0kT7jTn5LpYIAD6fhGoQLX4sz3h\nCsbzIhCaGg3A0KFzfOFa7lz0pa4zchK62qXvQ2yvcP2dCyIF4xpVkJeVLZOQrram\nXkhHu592drVL24Yq8d/mL3buVos/uCyQfhwFgRc0lSStbZ6kfzbpZKM7UnsYRuL1\nGUCdTXBI2tLn0Q8bij26noRCZdv1L0ph6hgNpP/E0ck9+vGDEU/mK62KwZum++84\nKpDeCAbX2k/fESU0+QxtZ5EphpgALlA1janTu3fMwhqn1YaUvpNbn7XkyFpmkek6\n5Pg6tv5FtmXozOqKN3hfBgWgp2KEBt2kfrmJAgMBAAGjgZ0wgZowDAYDVR0TAQH/\nBAIwADALBgNVHQ8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMB\nMB4GA1UdEQQXMBWCE3NlcnZlcjEuZXhhbXBsZS5jb20wHQYDVR0OBBYEFC5FWdkc\newZN0NTmvy+Eq2bhEjwYMB8GA1UdIwQYMBaAFC5FWdkcewZN0NTmvy+Eq2bhEjwY\nMA0GCSqGSIb3DQEBCwUAA4IBgQBTzugFFjGNugM0Yar1LXVpi2Z59WPKsSvwqxSM\np4JhhLnuQRg5uRz2MbJ3P5V7HbuCRurc6TepqNKwSpCoD0GlWUDUdIvcmXyaeBtO\nBeM/5Z0wXjJzS2UOUkXQFePhmC8/qxbHmKgzPcU0EmEsEDlQQN0HwOTJIct1Ya1y\nRCIQj2C6pkQiOKpBtXaDt0KO7UB8LJp3APM9D5B7+YJVXq7hE9g2UOgWE1FuC3bF\n1O2CurXxVm64Jtp+lCFCsmzSESAeydV09GhJryksdmmTpo5nqfsluGb7cWF3CbZi\nJ7ey7YzadNwAK7x6bNjYSE/IHZSATXNKex5A32jh/2ks1XZkGCuQeqnEwmjl5p+z\nXtzQh7WA0Z0Wo3ubBAlvsY01I4euAvIDnXSVlBx87ABJOEfDszph34npYjSvn8OJ\nd2u7BlmC2Y1bNQXRl1ohAmYDTkv7LBMolOPdqFiWM7mr7hq1N8KLSQrpnjbmZqyk\nIwAaRrZR9tcCP/0jiZwDu8sx2hU=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_3072_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED_RSA_3072_CERT_256_HASH[] =\n    \"58b04d28cb84cb5ea918435b82308b4b17364ae251a97c4cf0ffcb2a71d33c51\";\nconstexpr char TEST_SELFSIGNED_RSA_3072_CERT_1_HASH[] = \"efd3d5cfd00f6947b9d864e69b736a9690210009\";\nconstexpr char TEST_SELFSIGNED_RSA_3072_CERT_SPKI[] =\n    \"kT9uq6WUtb/2hlkLjRg4bl0y5M1xVGT2QTpXDi+9gPM=\";\nconstexpr char TEST_SELFSIGNED_RSA_3072_CERT_SERIAL[] = \"38e0795ccc3486d5938a1af401e9a34ed2cecffb\";\nconstexpr char TEST_SELFSIGNED_RSA_3072_CERT_NOT_BEFORE[] = \"Aug 20 16:57:51 2020 GMT\";\nconstexpr char TEST_SELFSIGNED_RSA_3072_CERT_NOT_AFTER[] = \"Aug 20 16:57:51 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_3072_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIG4wIBAAKCAYEA56YCTajmrZ3Vxp/Qr1L2ZNAFlA/odVzmL5Wwf755Ct+zQyMq\nPTYfikBujjkIQ0wGpmroU2kNRqQMfa+3fHtm4ewzePBpoUf1cJf1R5uakhFt1pFa\nnt4a5Yhpry6kn0IaIVsRwQeSATOWqrMm6wmsHR8Mh4bANg2bxgL6DuF1AM5gy7/+\nETzV0Uf1UmjvFn32d0kT7jTn5LpYIAD6fhGoQLX4sz3hCsbzIhCaGg3A0KFzfOFa\n7lz0pa4zchK62qXvQ2yvcP2dCyIF4xpVkJeVLZOQrramXkhHu592drVL24Yq8d/m\nL3buVos/uCyQfhwFgRc0lSStbZ6kfzbpZKM7UnsYRuL1GUCdTXBI2tLn0Q8bij26\nnoRCZdv1L0ph6hgNpP/E0ck9+vGDEU/mK62KwZum++84KpDeCAbX2k/fESU0+Qxt\nZ5EphpgALlA1janTu3fMwhqn1YaUvpNbn7XkyFpmkek65Pg6tv5FtmXozOqKN3hf\nBgWgp2KEBt2kfrmJAgMBAAECggGAK6nF6193w6fpgJvlGPb5TH1NX+Azv9N36+Y6\nM4nyhNMvlesN7iXCgius6w/i99RfuISUMAywHO2VESfK3SLhfJoqBZTDREws/kEW\nUZ4J35h7bKNPQny48yToTsXq773k8P6adRj5M1LHBnvoTQswrviIQw9qMYYVI5iy\nC5YBmNpNsnoI6LTf1KfMnPD8h+/MQiW7JP5B64IYVEf670JixXfHXfm+7mobsAMT\nGPAYtPqJqzNLEXzh/Ey+k7TmFYqLF/H3fefGE0teCI92/CS8LgwFrXGuaZlqtoF7\nhN5I/gn+GzQ/CBFXFxyZmLdKd6COIZ5qN4p96vOBTdjReVKfyY+tEK5BzoV81dO6\nlHP6mu9YomPp29ZaAtt0KmgXSB8FUvvELDmH3+etGrGMmJbtjn5ZL8a+zkCAl9At\nufcW132FJ7pp8GqmV/5eC3LGL5C2MUJLCMkeabdrXUUvb1iUrvKEX5aCZe3X95ph\nvmg5y6f+jT3bKobpgyOXoZjXGdmBAoHBAP82jeOq69oX3O4/fPL85Ms2gnNbTm17\nganQ9XE5MT0rqPLX4C8zhEcQMPIEX/Jls8n8AQREqmB0u3yBPEUVM9VTzreO3E84\nfr3ps42Ek311wYCEEAaGbTl+m/wW8JNwv5m4ShgwocVZVYoHxmUyRPmt5FUVgmWl\n4diMut01gWXzBUASczWS6Ct7h/S5VkX1dXZTm3rxupQkgT1MMQTJA5NksmP7JgKM\n5mOrJ2i4pkl95B+9+giR5yIAZEwC1gCaUQKBwQDoXNrIgSJnXjqyjKTwVs/o1eFq\n47qu7DnQJl88CJ0WnRHApUpx54r12hT8D8tFSuR6GuTbsZTQFQk/15LXuXFyA+Yk\nDk9b+5L63OOXpqxiJeip/bfHdxccoC6TAFgyPFcu4XQNWMdzYZIXy5wtR+/aCxIA\nEUIcOHQbZoSlTOo/EqxJowxaTy+06by4fABJMuQ6j+3SFqT0K4ebDAGflnthXdV1\novAMoOGxgbgD++Gh6dDR8WMDazom7CYL9SHJpbkCgcEA6M9TUEQzMphiKdQwn1ij\nPd1T0veP67+m4MqFm+uYRzz0udLD50sPd+QFQkFLZUvxXmhMZSBea+8QuNsLp9lX\nl1sET946iun2dDgkWwx9nw00n4/ZLpqiOLiOgsBKZbWMkyXSMY7rg6OirG+7Q8U/\ntqQyjos4kJFqHKkELZcJm369Y11xS4Xf1QqzU0NeuCLaLD7IrrakhU9ntgszDDJy\n2JobQalxWxI9eN5A7P7EcnYtC+e5lufB+PpyhARyQ8qBAoHAVGwgJUcdu9a1lteY\nuaWg5mSHAPH6isFTA/hdE4/Bm0y3/XlQge/xQf/ZLfeDEAr01hRdYCC2A6AQ+zwU\nowJCYH1gKWt1k+LkdPa6TggFi6dgUBrZocP4BrOAGHLAmIfp2SOaqZq+5IHntXOJ\nq73+PAdXo+ShU2WDcc0elnxKg14CcMKno0wv9hgSNCkBZZby6wAOFQ2bIweJvmBv\nWo8vBqI0AwiPFNeDCJ4rwG7MfKpZ7103by+PMkMMU73dZ8kpAoHAcQa0S+LQPi00\nQGLaePUR5YcCEHY+rrPRFWskMMsEHuDsAg7t0Q0XxwVxMK6ZJX1AjPyhKn+qMedq\nBiPk2Yq5eHeR9FDOSZeNQsADj8yeKuGTfxaGUMfp1A0RQXW/k+PS+AkxPiXKmQX3\nUNYI9M2rCp4TGqa2lYbMWQYYiTwkVXxdUPi/84cKwVw/WE+2BvxNA33fcnBA3b4D\n263f601U3og9sVSpMrSmwPEwaFWhs+ZLQ0fXfHWKiwhRbx2gT4gj\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_4096_cert.pem",
    "content": "-----BEGIN CERTIFICATE-----\nMIIGIDCCBAigAwIBAgIUPSy7q/7IsDI0afQtGZMZXRqJixgwDQYJKoZIhvcNAQEL\nBQAwejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM\nDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n\naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTIwMDgyMDE2NTc1MloX\nDTIyMDgyMDE2NTc1MlowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3Ju\naWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNV\nBAsMEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMIICIjAN\nBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA3KxLzjDw7nsaEvdSpYUrFvrtrZ4n\nwHAXo6IE+Mlb4J1VqrO4S8OmV4d2ydGuSOPelqprq8efnxJXHhUsvYAb+HXSpZs6\ncn9Sp98qe9X2sNyof6PwGz4eUcz4CD50OYeVXYI0RcnL9regqdG/4eQThW/KcYaR\nXFCaRUPZAACXM+6RgayKffs1K7v7PZmvYUXi2G0a4QqclK+hvsjD7eMCBOI2ovoV\naUDmzDqtW9mwtRDY/0bdCw375bFXoczMi//HbV2aqlfU6z0R5VEq+kQsv5j4atXn\ny2CsDfsjC8DjQmVuTjCDpYCU6nMWXfg+mzTjw0Q/UP/RlBL/aMjgzK36LgtxQ4Q7\nuOMXm21kNMg2D/UFSFdOUFw263d9auwGV3sP8C7exXQrwYukJyNVDbOg/TeZqD10\n4ChcWruffJfIIeQOX73+2CWk6oUEPx7egV9tSq3wue/DPSb61T0M0oCUTLRLHEhh\n7UXec+bWwSeBZKq6jynwSwIj1+H0n/4QifnWLHCkrHmn2DcHT2ceAXyd10RAyteO\noIYplR7xd4OSH0l6jLxYtLf7qV0l3MvcbczcQdEIRi90ANokbZffobOZnxba7Z6Y\nG58n3k1xq9mPjC9XriMjUeNSzBHDVPBGNQ2jKfDXgidPBT97AxD3tQXPQqZChBH2\nxn5d3rUKL9A/198CAwEAAaOBnTCBmjAMBgNVHRMBAf8EAjAAMAsGA1UdDwQEAwIF\n4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwHgYDVR0RBBcwFYITc2Vy\ndmVyMS5leGFtcGxlLmNvbTAdBgNVHQ4EFgQULFYCD1W5+0o6I88v2lPXxBxwxYQw\nHwYDVR0jBBgwFoAULFYCD1W5+0o6I88v2lPXxBxwxYQwDQYJKoZIhvcNAQELBQAD\nggIBADkFABWLcPZh2b0ZqnQuxfAD8gzN09NgGBMUjzA6WHqIDTUoRSf+TfNXkS91\nRMHsr3Ql6FCxHeD1s8VE20/NZNZ5Rm0zIRb/5gSZx9QSYIHuTM04OVqkeZB7qh/8\nxrCvlhdQRJ+Cx+9w11yMrwJxcHvTQdq/bkOPYwKJxC+1WVBC7w39VWO9nlmElUBk\n8QzoqBlNXYQRAUJCuYVLtvxrKBIERDhl2TTMFKEtw+dqBmZv/2q+eWjB1cZT6uhs\n+VX3UyCVF1Kt8pNJv9LJuQH/sj2JYUvcm/9kcIL5jQpi7WUXrUwOdWT+p2f2++4I\n32E35jHP1lMdHOKoe+aCF9W95yfF74AjbkbnGe2kxyxv6j9eSY99MI7Wz5DNiz2c\n2D7ng32AtiWgTc5c1e41+yDrPUKW0aNmr/FFfEvfmvN+R0kXJLH/xbfhH2+Ys54c\nFvmQPcV+al8WGYvZUguUQ8x7qbNH+4yECv0IvUXCAWaCl2z8IRkUDBP9mo0qpYGa\nnG0NewkLyupR8NPTUvD01gMGrg+ifhLpCcUv9Rx9IGDLRGkBSHNvQCaP8gh0PFQo\nvfB0ZzPoRj8muEdGUvNfxpwvZEzal5XOTGPWIvOtufdb3sTLNAhfNYhSy6V0O91D\npB9F5jdJ1iT8ERVjdFtBCLZG00ES1N202LRq/5Y6xd2NIiz3\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_4096_cert_info.h",
    "content": "// NOLINT(namespace-envoy)\nconstexpr char TEST_SELFSIGNED_RSA_4096_CERT_256_HASH[] =\n    \"1008ef4f43c00538d6e722c547b08ea14ca7715dda89201fcbcca6c5f05dd02c\";\nconstexpr char TEST_SELFSIGNED_RSA_4096_CERT_1_HASH[] = \"c28478bc776e2cdf7932817168114dc4ecbe67b8\";\nconstexpr char TEST_SELFSIGNED_RSA_4096_CERT_SPKI[] =\n    \"gpM7le+KGkypSguhq/cbzhEoqTAV5MMkQRQr0WW7+jk=\";\nconstexpr char TEST_SELFSIGNED_RSA_4096_CERT_SERIAL[] = \"3d2cbbabfec8b0323469f42d1993195d1a898b18\";\nconstexpr char TEST_SELFSIGNED_RSA_4096_CERT_NOT_BEFORE[] = \"Aug 20 16:57:52 2020 GMT\";\nconstexpr char TEST_SELFSIGNED_RSA_4096_CERT_NOT_AFTER[] = \"Aug 20 16:57:52 2022 GMT\";\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/selfsigned_rsa_4096_key.pem",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKgIBAAKCAgEA3KxLzjDw7nsaEvdSpYUrFvrtrZ4nwHAXo6IE+Mlb4J1VqrO4\nS8OmV4d2ydGuSOPelqprq8efnxJXHhUsvYAb+HXSpZs6cn9Sp98qe9X2sNyof6Pw\nGz4eUcz4CD50OYeVXYI0RcnL9regqdG/4eQThW/KcYaRXFCaRUPZAACXM+6RgayK\nffs1K7v7PZmvYUXi2G0a4QqclK+hvsjD7eMCBOI2ovoVaUDmzDqtW9mwtRDY/0bd\nCw375bFXoczMi//HbV2aqlfU6z0R5VEq+kQsv5j4atXny2CsDfsjC8DjQmVuTjCD\npYCU6nMWXfg+mzTjw0Q/UP/RlBL/aMjgzK36LgtxQ4Q7uOMXm21kNMg2D/UFSFdO\nUFw263d9auwGV3sP8C7exXQrwYukJyNVDbOg/TeZqD104ChcWruffJfIIeQOX73+\n2CWk6oUEPx7egV9tSq3wue/DPSb61T0M0oCUTLRLHEhh7UXec+bWwSeBZKq6jynw\nSwIj1+H0n/4QifnWLHCkrHmn2DcHT2ceAXyd10RAyteOoIYplR7xd4OSH0l6jLxY\ntLf7qV0l3MvcbczcQdEIRi90ANokbZffobOZnxba7Z6YG58n3k1xq9mPjC9XriMj\nUeNSzBHDVPBGNQ2jKfDXgidPBT97AxD3tQXPQqZChBH2xn5d3rUKL9A/198CAwEA\nAQKCAgBjNbL/CFHJZd7bGAY4FB7DPh0NKUWF5t852eWSVDTSTunszM/WgCeQwn5N\nTc+lRY0U/5lYqW7B/WTdSXfL2oS4t3/0pE0qnMN0ZYicFB3MdefrOrEADnVjIVuB\nmVJ3atVkLN2DYq4oRXYd4wb5b6i3HQNO9sjzT+mLqm0s6xcPafpQ6ll5Oi81clYJ\n48ZNXTYXRzYGZtbuoed99pTfpmeJ+onFy9lEcMFSoC7AVKZ1QRXN7vYBuk+9fHWB\ncKmlZ2EpciPO4Z4ABHy/NDa2PWBA420L2w9TCCtSPc4LwrRpbWCiFn2ZMq3Ame08\nYhcDgEvFxjERIXH/uGrLmUQ3wR/wLcNTgEm1BaQ2CcZpmo53UTyILPswNNAW9TEh\nE92VtySMI9BKU+xvleFChlARRF9TKKUOTN50XfI4YWfCk05toFD4DK+Pjsi4mnXy\ncMRmBnJLvQCgUDhD1Wky4RFNDuWWe4GN9+/sgEPHDWZ59u5pHE9XQq3jMfJs5Glv\nkIJ5RWKbHLr34Y7Ql2KebHD17BK0hgfMIVCoCKzPBr2Yq6kpcio2mX+seM0f/dOw\nnowSWHPaeSaVqShOF9HguHZcPG1n6OZ2Es57VlgX32r++1cSMUUIeeUCc3942EtO\nNWIe+F2mmptRC8EU7o8tfAaKKZGaAU/EyP9t/wKbM5RwzKg4gQKCAQEA8lKxEmoT\nG0M295xOAcphXLyg+D1oeg01XbmmmHpYRYr8AjMWqkI3vo9GqW3JMcASwwMxdqFC\nEshtNFDzC8EXDontHh6HY2X3+5OVIG+PSCeoafhZg7i3cXUnr02KhVihQ1o59Ehd\nEirOD3BPOPcWajZwj/3ST1/sFU5VuHnCKtvalpSqRxkQ40hpUDPWdqSP+Cmu0V1W\nATrgYjS5R3W8SAfSO0vEYtEcy5WrI30uYqgpjl0baYOphenVXr2QvbIr9gZhCg44\nZTzSmpvHABUax2Y1YS20jGTK459fjWP6Rarho2ywtE+0M4kGFdX63DCc3WdhIFXb\n1tUQe5KZyoA6ZwKCAQEA6SDJDANenAdPap7ue2uYoftHTCFnXLQ1iI+C8eYiwaEB\nubwIjqHYL8a+6EKszRTpjVS30egcnwIu1cfdWP/f9UuEXpxwMOatR1mdNymWLQaA\nSCwdRPjmw2FSjBy3ineb3JwAV98jNf48hpVMoNORPBBJikNUEZh9A0w987CxFJd5\naG7HNwSB51qd8qL4hDIhngDZE+nk4t2t0zfHQo8rpd+kTHKT86G/1xv9OpPJ1ip6\nJ40UiF82unnphyEw+tMvX0NazRQ5yr6Blt4/ti9TYBhzajElSzxNknmJXS5fh5CV\nqccGNtqcMEM1ZOOIZL0hfAick8Fix100WSqIUdn7yQKCAQEAzvt5CnyWo2PPUMMv\nEnJ2pYrth/yUACTGH6+TMIzYV1Z3Gb3fr0BucxPYzKHdB+TDYdWZa3YkhPyz4sr+\nP10IP7/qTR7QkEOqBTazW4hO/9f4D8Q1WcDu2De01RVLq8jfvjMUa6df8EOlzSQm\nV0ce20xy61toR6pscmp959wA0TEHzjVgGOi9NxZD9mDA4IiB/PxFucmay+Qpg1j7\npaFYWMJtfX6jWbC6gt7t1pt3goeXL2YyIvd2Vs88FPeg6ZeF21jpjHhHDZpdXfiP\ni297dzTDhaVPodf6QWGBcbkjLlze7QPLD8lA6AAztXUYGPphEvDZ0o/j/uXxdUvs\nlz7eIwKCAQEAkUh3Ql9PNJlxqpaFNLEnG7tHAXDHw4Qi4InfJvUur9CxDJjcFFdU\nLOIE0yXKHrxrmEI1lEC8pXNs0JmPnglH/swjH/sKB8+aKR711Qy7pC+q/k9Ppk4f\nMZ+TG/JBh69mmKM3q4tJnDpmadDdQlTW68GSsvGmkJihP87hxN5WXOjRJn3y9JyA\ngDd+u1lI2smLHbaVkc1vcNqJm1nledtTbjnjYCj/fxNMRLgSpdC4NhmeqstHAn2j\n065qFGZg6c92dD27dpamNW1nwSecOONhaD+UnZcnEK3yAdhv/HwL2KYIPxfjDYhL\nUeBwhAEwXOdo2K1dNs+LyYaSOwdAkRHx0QKCAQEAs2w8C1QiDJRqJnPed91NfMUZ\noY3HO2sMfB3mzAhLNiqwizYQtciJIynN4Y7jdCTGAvvRVahqaGsI/G+2Ge8jhLKp\nYyxvKXU7dsX0baoglNSrgbG+5aFw5RN22vB2sfYa4sFoNnKTIOwZosAa/opddoOO\nANBxF2oNMd4qk584921YUXjgnROLQAmCUDZlt8KSDVrgOHEA5LvNR6otdkP4aoLV\nVsUAWs+S7Pj4fvxnwuEP03lzgQB0mAm/GycLzqpAplxkg0sC3Rg1C+Q+T2Gv8MjI\n5SwBj9TsrKciLSHEVTGbEIftDbwx+HecFPVBOOfq9CJcbXLoyrpSC+yXwHsOng==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ticket_key_a",
    "content": "\u0001\u001bp\tZD\u0007\t=\u001d+;MJ^k|8Ĵ\u000b\u0013f`\u0006A\u000fgy㌰{FԴ烗\u0003v'N[p"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ticket_key_b",
    "content": "\u000e\\Roy\u0011\u0011*/rdHgڇߋH\u00190y\u001b$gղЛ=!utW<Yᠵ0\n2v\\"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_data/ticket_key_wrong_len",
    "content": "HIycN'\fU\"\rK%擒p~Wv Qtܸ.w7@JҨ7a#G\u001f-\u001e\u0016,Dj0\u001eA;q"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_private_key_method_provider.cc",
    "content": "#include \"test/extensions/transport_sockets/tls/test_private_key_method_provider.h\"\n\n#include <memory>\n\n#include \"envoy/api/api.h\"\n\n#include \"openssl/ssl.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace PrivateKeyMethodProvider {\n\nvoid TestPrivateKeyConnection::delayed_op() {\n  const std::chrono::milliseconds timeout_0ms{0};\n\n  timer_ = dispatcher_.createTimer([this]() -> void {\n    finished_ = true;\n    this->cb_.onPrivateKeyMethodComplete();\n  });\n  timer_->enableTimer(timeout_0ms);\n}\n\nstatic int calculateDigest(const EVP_MD* md, const uint8_t* in, size_t in_len, unsigned char* hash,\n                           unsigned int* hash_len) {\n  bssl::ScopedEVP_MD_CTX ctx;\n\n  // Calculate the message digest for signing.\n  if (!EVP_DigestInit_ex(ctx.get(), md, nullptr) || !EVP_DigestUpdate(ctx.get(), in, in_len) ||\n      !EVP_DigestFinal_ex(ctx.get(), hash, hash_len)) {\n    return 0;\n  }\n  return 1;\n}\n\nstatic ssl_private_key_result_t ecdsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t* out_len,\n                                                    size_t max_out, uint16_t signature_algorithm,\n                                                    const uint8_t* in, size_t in_len) {\n  unsigned char hash[EVP_MAX_MD_SIZE];\n  unsigned int hash_len;\n  TestPrivateKeyConnection* ops = static_cast<TestPrivateKeyConnection*>(\n      SSL_get_ex_data(ssl, TestPrivateKeyMethodProvider::ecdsaConnectionIndex()));\n  unsigned int out_len_unsigned;\n\n  if (!ops) {\n    return ssl_private_key_failure;\n  }\n\n  if (ops->test_options_.method_error_) {\n    // Have an artificial test failure.\n    return ssl_private_key_failure;\n  }\n\n  if (!ops->test_options_.sign_expected_) {\n    return ssl_private_key_failure;\n  }\n\n  const EVP_MD* md = SSL_get_signature_algorithm_digest(signature_algorithm);\n  if (!md) {\n    return ssl_private_key_failure;\n  }\n\n  if (!calculateDigest(md, in, in_len, hash, &hash_len)) {\n    return ssl_private_key_failure;\n  }\n\n  bssl::UniquePtr<EC_KEY> ec_key(EVP_PKEY_get1_EC_KEY(ops->getPrivateKey()));\n  if (!ec_key) {\n    return ssl_private_key_failure;\n  }\n\n  // Borrow \"out\" because it has been already initialized to the max_out size.\n  if (!ECDSA_sign(0, hash, hash_len, out, &out_len_unsigned, ec_key.get())) {\n    return ssl_private_key_failure;\n  }\n\n  if (ops->test_options_.sync_mode_) {\n    // Return immediately with the results.\n    if (out_len_unsigned > max_out) {\n      return ssl_private_key_failure;\n    }\n    *out_len = out_len_unsigned;\n    return ssl_private_key_success;\n  }\n\n  ops->output_.assign(out, out + out_len_unsigned);\n  // Tell SSL socket that the operation is ready to be called again.\n  ops->delayed_op();\n\n  return ssl_private_key_retry;\n}\n\nstatic ssl_private_key_result_t ecdsaPrivateKeyDecrypt(SSL*, uint8_t*, size_t*, size_t,\n                                                       const uint8_t*, size_t) {\n  return ssl_private_key_failure;\n}\n\nstatic ssl_private_key_result_t rsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t* out_len,\n                                                  size_t max_out, uint16_t signature_algorithm,\n                                                  const uint8_t* in, size_t in_len) {\n  TestPrivateKeyConnection* ops = static_cast<TestPrivateKeyConnection*>(\n      SSL_get_ex_data(ssl, TestPrivateKeyMethodProvider::rsaConnectionIndex()));\n  unsigned char hash[EVP_MAX_MD_SIZE] = {0};\n  unsigned int hash_len = EVP_MAX_MD_SIZE;\n  std::vector<uint8_t> in2;\n\n  if (!ops) {\n    return ssl_private_key_failure;\n  }\n\n  if (ops->test_options_.method_error_) {\n    return ssl_private_key_failure;\n  }\n\n  if (!ops->test_options_.sign_expected_) {\n    return ssl_private_key_failure;\n  }\n\n  const EVP_MD* md = SSL_get_signature_algorithm_digest(signature_algorithm);\n  if (!md) {\n    return ssl_private_key_failure;\n  }\n\n  in2.assign(in, in + in_len);\n\n  // If crypto error is set, we'll modify the incoming token by flipping\n  // the bits.\n  if (ops->test_options_.crypto_error_) {\n    for (size_t i = 0; i < in_len; i++) {\n      in2[i] = ~in2[i];\n    }\n  }\n\n  if (!calculateDigest(md, in2.data(), in_len, hash, &hash_len)) {\n    return ssl_private_key_failure;\n  }\n\n  RSA* rsa = EVP_PKEY_get0_RSA(ops->getPrivateKey());\n  if (rsa == nullptr) {\n    return ssl_private_key_failure;\n  }\n\n  // Perform RSA signing.\n  if (SSL_is_signature_algorithm_rsa_pss(signature_algorithm)) {\n    if (!RSA_sign_pss_mgf1(rsa, out_len, out, max_out, hash, hash_len, md, nullptr, -1)) {\n      return ssl_private_key_failure;\n    }\n  } else {\n    unsigned int out_len_unsigned;\n    if (!RSA_sign(EVP_MD_type(md), hash, hash_len, out, &out_len_unsigned, rsa)) {\n      return ssl_private_key_failure;\n    }\n    if (out_len_unsigned > max_out) {\n      return ssl_private_key_failure;\n    }\n    *out_len = out_len_unsigned;\n  }\n\n  if (ops->test_options_.sync_mode_) {\n    return ssl_private_key_success;\n  }\n\n  ops->output_.assign(out, out + *out_len);\n  ops->delayed_op();\n\n  return ssl_private_key_retry;\n}\n\nstatic ssl_private_key_result_t rsaPrivateKeyDecrypt(SSL* ssl, uint8_t* out, size_t* out_len,\n                                                     size_t max_out, const uint8_t* in,\n                                                     size_t in_len) {\n  TestPrivateKeyConnection* ops = static_cast<TestPrivateKeyConnection*>(\n      SSL_get_ex_data(ssl, TestPrivateKeyMethodProvider::rsaConnectionIndex()));\n\n  if (!ops) {\n    return ssl_private_key_failure;\n  }\n\n  if (ops->test_options_.method_error_) {\n    return ssl_private_key_failure;\n  }\n\n  if (!ops->test_options_.decrypt_expected_) {\n    return ssl_private_key_failure;\n  }\n\n  RSA* rsa = EVP_PKEY_get0_RSA(ops->getPrivateKey());\n  if (rsa == nullptr) {\n    return ssl_private_key_failure;\n  }\n\n  if (!RSA_decrypt(rsa, out_len, out, max_out, in, in_len, RSA_NO_PADDING)) {\n    return ssl_private_key_failure;\n  }\n\n  if (ops->test_options_.sync_mode_) {\n    return ssl_private_key_success;\n  }\n\n  ops->output_.assign(out, out + *out_len);\n  ops->delayed_op();\n\n  return ssl_private_key_retry;\n}\n\nstatic ssl_private_key_result_t privateKeyComplete(SSL* ssl, uint8_t* out, size_t* out_len,\n                                                   size_t max_out, int id) {\n  TestPrivateKeyConnection* ops = static_cast<TestPrivateKeyConnection*>(SSL_get_ex_data(ssl, id));\n\n  if (!ops->finished_) {\n    // The operation didn't finish yet, retry.\n    return ssl_private_key_retry;\n  }\n\n  if (ops->test_options_.async_method_error_) {\n    return ssl_private_key_failure;\n  }\n\n  if (ops->output_.size() > max_out) {\n    return ssl_private_key_failure;\n  }\n\n  std::copy(ops->output_.begin(), ops->output_.end(), out);\n  *out_len = ops->output_.size();\n\n  return ssl_private_key_success;\n}\n\nstatic ssl_private_key_result_t rsaPrivateKeyComplete(SSL* ssl, uint8_t* out, size_t* out_len,\n                                                      size_t max_out) {\n  return privateKeyComplete(ssl, out, out_len, max_out,\n                            TestPrivateKeyMethodProvider::rsaConnectionIndex());\n}\n\nstatic ssl_private_key_result_t ecdsaPrivateKeyComplete(SSL* ssl, uint8_t* out, size_t* out_len,\n                                                        size_t max_out) {\n  return privateKeyComplete(ssl, out, out_len, max_out,\n                            TestPrivateKeyMethodProvider::ecdsaConnectionIndex());\n}\n\nSsl::BoringSslPrivateKeyMethodSharedPtr\nTestPrivateKeyMethodProvider::getBoringSslPrivateKeyMethod() {\n  return method_;\n}\n\nbool TestPrivateKeyMethodProvider::checkFips() {\n  if (mode_ == \"rsa\") {\n    RSA* rsa_private_key = EVP_PKEY_get0_RSA(pkey_.get());\n    if (rsa_private_key == nullptr || !RSA_check_fips(rsa_private_key)) {\n      return false;\n    }\n  } else { // if (mode_ == \"ecdsa\")\n    const EC_KEY* ecdsa_private_key = EVP_PKEY_get0_EC_KEY(pkey_.get());\n    if (ecdsa_private_key == nullptr || !EC_KEY_check_fips(ecdsa_private_key)) {\n      return false;\n    }\n  }\n  return true;\n}\n\nTestPrivateKeyConnection::TestPrivateKeyConnection(\n    Ssl::PrivateKeyConnectionCallbacks& cb, Event::Dispatcher& dispatcher,\n    bssl::UniquePtr<EVP_PKEY> pkey, TestPrivateKeyConnectionTestOptions& test_options)\n    : test_options_(test_options), cb_(cb), dispatcher_(dispatcher), pkey_(std::move(pkey)) {}\n\nvoid TestPrivateKeyMethodProvider::registerPrivateKeyMethod(SSL* ssl,\n                                                            Ssl::PrivateKeyConnectionCallbacks& cb,\n                                                            Event::Dispatcher& dispatcher) {\n  TestPrivateKeyConnection* ops;\n  // In multi-cert case, when the same provider is used in different modes with the same SSL object,\n  // we need to keep both rsa and ecdsa connection objects in store because the test options for the\n  // two certificates may be different. We need to be able to deduct in the signing, decryption, and\n  // completion functions which options to use, so we associate the connection objects to the same\n  // SSL object using different user data indexes.\n  //\n  // Another way to do this would be to store both test options in one connection object.\n  int index = mode_ == \"rsa\" ? TestPrivateKeyMethodProvider::rsaConnectionIndex()\n                             : TestPrivateKeyMethodProvider::ecdsaConnectionIndex();\n\n  // Check if there is another certificate of the same mode associated with the context. This would\n  // be an error.\n  ops = static_cast<TestPrivateKeyConnection*>(SSL_get_ex_data(ssl, index));\n  if (ops != nullptr) {\n    throw EnvoyException(\n        \"Can't distinguish between two registered providers for the same SSL object.\");\n  }\n\n  ops = new TestPrivateKeyConnection(cb, dispatcher, bssl::UpRef(pkey_), test_options_);\n  SSL_set_ex_data(ssl, index, ops);\n}\n\nvoid TestPrivateKeyMethodProvider::unregisterPrivateKeyMethod(SSL* ssl) {\n  int index = mode_ == \"rsa\" ? TestPrivateKeyMethodProvider::rsaConnectionIndex()\n                             : TestPrivateKeyMethodProvider::ecdsaConnectionIndex();\n  TestPrivateKeyConnection* ops =\n      static_cast<TestPrivateKeyConnection*>(SSL_get_ex_data(ssl, index));\n  SSL_set_ex_data(ssl, index, nullptr);\n  delete ops;\n}\n\nstatic int createIndex() {\n  int index = SSL_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr);\n  RELEASE_ASSERT(index >= 0, \"Failed to get SSL user data index.\");\n  return index;\n}\n\nint TestPrivateKeyMethodProvider::rsaConnectionIndex() {\n  CONSTRUCT_ON_FIRST_USE(int, createIndex());\n}\n\nint TestPrivateKeyMethodProvider::ecdsaConnectionIndex() {\n  CONSTRUCT_ON_FIRST_USE(int, createIndex());\n}\n\nTestPrivateKeyMethodProvider::TestPrivateKeyMethodProvider(\n    const ProtobufWkt::Any& typed_config,\n    Server::Configuration::TransportSocketFactoryContext& factory_context) {\n  std::string private_key_path;\n\n  auto config = MessageUtil::anyConvert<ProtobufWkt::Struct>(typed_config);\n\n  for (auto& value_it : config.fields()) {\n    auto& value = value_it.second;\n    if (value_it.first == \"private_key_file\" &&\n        value.kind_case() == ProtobufWkt::Value::kStringValue) {\n      private_key_path = value.string_value();\n    }\n    if (value_it.first == \"sync_mode\" && value.kind_case() == ProtobufWkt::Value::kBoolValue) {\n      test_options_.sync_mode_ = value.bool_value();\n    }\n    if (value_it.first == \"crypto_error\" && value.kind_case() == ProtobufWkt::Value::kBoolValue) {\n      test_options_.crypto_error_ = value.bool_value();\n    }\n    if (value_it.first == \"method_error\" && value.kind_case() == ProtobufWkt::Value::kBoolValue) {\n      test_options_.method_error_ = value.bool_value();\n    }\n    if (value_it.first == \"async_method_error\" &&\n        value.kind_case() == ProtobufWkt::Value::kBoolValue) {\n      test_options_.async_method_error_ = value.bool_value();\n    }\n    if (value_it.first == \"expected_operation\" &&\n        value.kind_case() == ProtobufWkt::Value::kStringValue) {\n      if (value.string_value() == \"decrypt\") {\n        test_options_.decrypt_expected_ = true;\n      } else if (value.string_value() == \"sign\") {\n        test_options_.sign_expected_ = true;\n      }\n    }\n    if (value_it.first == \"mode\" && value.kind_case() == ProtobufWkt::Value::kStringValue) {\n      mode_ = value.string_value();\n    }\n  }\n\n  std::string private_key = factory_context.api().fileSystem().fileReadToEnd(private_key_path);\n  bssl::UniquePtr<BIO> bio(\n      BIO_new_mem_buf(const_cast<char*>(private_key.data()), private_key.size()));\n  bssl::UniquePtr<EVP_PKEY> pkey(PEM_read_bio_PrivateKey(bio.get(), nullptr, nullptr, nullptr));\n  if (pkey == nullptr) {\n    throw EnvoyException(\"Failed to read private key from disk.\");\n  }\n\n  method_ = std::make_shared<SSL_PRIVATE_KEY_METHOD>();\n\n  // Have two modes, \"rsa\" and \"ecdsa\", for testing multi-cert use cases.\n  if (mode_ == \"rsa\") {\n    if (EVP_PKEY_id(pkey.get()) != EVP_PKEY_RSA) {\n      throw EnvoyException(\"Private key is not RSA.\");\n    }\n    method_->sign = rsaPrivateKeySign;\n    method_->decrypt = rsaPrivateKeyDecrypt;\n    method_->complete = rsaPrivateKeyComplete;\n  } else if (mode_ == \"ecdsa\") {\n    if (EVP_PKEY_id(pkey.get()) != EVP_PKEY_EC) {\n      throw EnvoyException(\"Private key is not ECDSA.\");\n    }\n    method_->sign = ecdsaPrivateKeySign;\n    method_->decrypt = ecdsaPrivateKeyDecrypt;\n    method_->complete = ecdsaPrivateKeyComplete;\n  } else {\n    throw EnvoyException(\"Unknown test provider mode, supported modes are \\\"rsa\\\" and \\\"ecdsa\\\".\");\n  }\n\n  pkey_ = std::move(pkey);\n}\n\n} // namespace PrivateKeyMethodProvider\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/test_private_key_method_provider.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/private_key/private_key.h\"\n#include \"envoy/ssl/private_key/private_key_config.h\"\n\n#include \"common/config/utility.h\"\n#include \"common/protobuf/utility.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace PrivateKeyMethodProvider {\n\nstruct TestPrivateKeyConnectionTestOptions {\n  // Return private key method value directly without asynchronous operation.\n  bool sync_mode_{};\n\n  // The \"decrypt\" private key method is expected to he called.\n  bool decrypt_expected_{};\n\n  // The \"sign\" private key method is expected to he called.\n  bool sign_expected_{};\n\n  // Add a cryptographic error (invalid signature, incorrect decryption).\n  bool crypto_error_{};\n\n  // Return an error from the private key method.\n  bool method_error_{};\n\n  // Return an error from the private key method completion function.\n  bool async_method_error_{};\n};\n\n// An example private key method provider for testing the decrypt() and sign()\n// functionality.\nclass TestPrivateKeyConnection {\npublic:\n  TestPrivateKeyConnection(Ssl::PrivateKeyConnectionCallbacks& cb, Event::Dispatcher& dispatcher,\n                           bssl::UniquePtr<EVP_PKEY> pkey,\n                           TestPrivateKeyConnectionTestOptions& test_options);\n  EVP_PKEY* getPrivateKey() { return pkey_.get(); }\n  void delayed_op();\n  // Store the output data temporarily.\n  std::vector<uint8_t> output_;\n  // The complete callback can return other value than \"retry\" only after\n  // onPrivateKeyMethodComplete() function has been called. This is controlled by \"finished\"\n  // variable.\n  bool finished_{};\n  TestPrivateKeyConnectionTestOptions& test_options_;\n\nprivate:\n  Ssl::PrivateKeyConnectionCallbacks& cb_;\n  Event::Dispatcher& dispatcher_;\n  bssl::UniquePtr<EVP_PKEY> pkey_;\n  // A zero-length timer controls the callback.\n  Event::TimerPtr timer_;\n};\n\nclass TestPrivateKeyMethodProvider : public virtual Ssl::PrivateKeyMethodProvider {\npublic:\n  TestPrivateKeyMethodProvider(\n      const ProtobufWkt::Any& typed_config,\n      Server::Configuration::TransportSocketFactoryContext& factory_context);\n  // Ssl::PrivateKeyMethodProvider\n  void registerPrivateKeyMethod(SSL* ssl, Ssl::PrivateKeyConnectionCallbacks& cb,\n                                Event::Dispatcher& dispatcher) override;\n  void unregisterPrivateKeyMethod(SSL* ssl) override;\n  bool checkFips() override;\n  Ssl::BoringSslPrivateKeyMethodSharedPtr getBoringSslPrivateKeyMethod() override;\n\n  static int rsaConnectionIndex();\n  static int ecdsaConnectionIndex();\n\nprivate:\n  Ssl::BoringSslPrivateKeyMethodSharedPtr method_{};\n  bssl::UniquePtr<EVP_PKEY> pkey_;\n  TestPrivateKeyConnectionTestOptions test_options_;\n  std::string mode_;\n};\n\nclass TestPrivateKeyMethodFactory : public Ssl::PrivateKeyMethodProviderInstanceFactory {\npublic:\n  // Ssl::PrivateKeyMethodProviderInstanceFactory\n  Ssl::PrivateKeyMethodProviderSharedPtr createPrivateKeyMethodProviderInstance(\n      const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& config,\n      Server::Configuration::TransportSocketFactoryContext& factory_context) override {\n    return std::make_shared<TestPrivateKeyMethodProvider>(config.typed_config(), factory_context);\n  }\n\n  std::string name() const override { return \"test\"; };\n};\n\n} // namespace PrivateKeyMethodProvider\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/transport_sockets/tls/utility_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"extensions/transport_sockets/tls/utility.h\"\n\n#include \"test/extensions/transport_sockets/tls/ssl_test_utility.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/long_validity_cert_info.h\"\n#include \"test/extensions/transport_sockets/tls/test_data/san_dns_cert_info.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/time/time.h\"\n#include \"gtest/gtest.h\"\n#include \"openssl/x509v3.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace TransportSockets {\nnamespace Tls {\nnamespace {\n\nTEST(UtilityTest, TestGetSubjectAlternateNamesWithDNS) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  const auto& subject_alt_names = Utility::getSubjectAltNames(*cert, GEN_DNS);\n  EXPECT_EQ(1, subject_alt_names.size());\n}\n\nTEST(UtilityTest, TestMultipleGetSubjectAlternateNamesWithDNS) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir \"\n      \"}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\"));\n  const auto& subject_alt_names = Utility::getSubjectAltNames(*cert, GEN_DNS);\n  EXPECT_EQ(2, subject_alt_names.size());\n}\n\nTEST(UtilityTest, TestGetSubjectAlternateNamesWithUri) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"));\n  const auto& subject_alt_names = Utility::getSubjectAltNames(*cert, GEN_URI);\n  EXPECT_EQ(1, subject_alt_names.size());\n}\n\nTEST(UtilityTest, TestGetSubjectAlternateNamesWithNoSAN) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem\"));\n  const auto& uri_subject_alt_names = Utility::getSubjectAltNames(*cert, GEN_URI);\n  EXPECT_EQ(0, uri_subject_alt_names.size());\n}\n\nTEST(UtilityTest, TestGetSubject) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  EXPECT_EQ(\"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\",\n            Utility::getSubjectFromCertificate(*cert));\n}\n\nTEST(UtilityTest, TestGetIssuer) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  EXPECT_EQ(\"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\",\n            Utility::getIssuerFromCertificate(*cert));\n}\n\nTEST(UtilityTest, TestGetSerialNumber) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  EXPECT_EQ(TEST_SAN_DNS_CERT_SERIAL, Utility::getSerialNumberFromCertificate(*cert));\n}\n\nTEST(UtilityTest, TestDaysUntilExpiration) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  // Set a known date (2033-05-18 03:33:20 UTC) so that we get fixed output from this test.\n  const time_t known_date_time = 2000000000;\n  Event::SimulatedTimeSystem time_source;\n  time_source.setSystemTime(std::chrono::system_clock::from_time_t(known_date_time));\n\n  // Get expiration time from the certificate info.\n  const absl::Time expiration =\n      TestUtility::parseTime(TEST_SAN_DNS_CERT_NOT_AFTER, \"%b %e %H:%M:%S %Y GMT\");\n\n  int days = std::difftime(absl::ToTimeT(expiration), known_date_time) / (60 * 60 * 24);\n  EXPECT_EQ(days, Utility::getDaysUntilExpiration(cert.get(), time_source));\n}\n\nTEST(UtilityTest, TestDaysUntilExpirationWithNull) {\n  Event::SimulatedTimeSystem time_source;\n  EXPECT_EQ(std::numeric_limits<int>::max(), Utility::getDaysUntilExpiration(nullptr, time_source));\n}\n\nTEST(UtilityTest, TestValidFrom) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  const std::string formatted =\n      TestUtility::formatTime(Utility::getValidFrom(*cert), \"%b %e %H:%M:%S %Y GMT\");\n  EXPECT_EQ(TEST_SAN_DNS_CERT_NOT_BEFORE, formatted);\n}\n\nTEST(UtilityTest, TestExpirationTime) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\"));\n  const std::string formatted =\n      TestUtility::formatTime(Utility::getExpirationTime(*cert), \"%b %e %H:%M:%S %Y GMT\");\n  EXPECT_EQ(TEST_SAN_DNS_CERT_NOT_AFTER, formatted);\n}\n\nTEST(UtilityTest, TestLongExpirationTime) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/long_validity_cert.pem\"));\n  const std::string formatted =\n      TestUtility::formatTime(Utility::getExpirationTime(*cert), \"%b %e %H:%M:%S %Y GMT\");\n  EXPECT_EQ(TEST_LONG_VALIDITY_CERT_NOT_AFTER, formatted);\n}\n\nTEST(UtilityTest, GetLastCryptoError) {\n  // Clearing the error stack leaves us with no error to get.\n  ERR_clear_error();\n  EXPECT_FALSE(Utility::getLastCryptoError().has_value());\n\n  ERR_put_error(ERR_LIB_SSL, 0, ERR_R_MALLOC_FAILURE, __FILE__, __LINE__);\n  EXPECT_EQ(Utility::getLastCryptoError().value(),\n            \"error:10000041:SSL routines:OPENSSL_internal:malloc failure\");\n\n  // We consumed the last error, so back to not having an error to get.\n  EXPECT_FALSE(Utility::getLastCryptoError().has_value());\n}\n\nTEST(UtilityTest, TestGetCertificationExtensionValue) {\n  bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/extensions_cert.pem\"));\n  EXPECT_EQ(\"\\xc\\x9Something\", Utility::getCertificateExtensionValue(*cert, \"1.2.3.4.5.6.7.8\"));\n  EXPECT_EQ(\"\\x30\\x3\\x1\\x1\\xFF\", Utility::getCertificateExtensionValue(*cert, \"1.2.3.4.5.6.7.9\"));\n  EXPECT_EQ(\"\", Utility::getCertificateExtensionValue(*cert, \"1.2.3.4.5.6.7.10\"));\n  EXPECT_EQ(\"\", Utility::getCertificateExtensionValue(*cert, \"1.2.3.4\"));\n  EXPECT_EQ(\"\", Utility::getCertificateExtensionValue(*cert, \"\"));\n  EXPECT_EQ(\"\", Utility::getCertificateExtensionValue(*cert, \"foo\"));\n}\n\n} // namespace\n} // namespace Tls\n} // namespace TransportSockets\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/upstreams/http/tcp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"upstream_request_test\",\n    srcs = [\"upstream_request_test.cc\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/router:router_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/upstreams/http/tcp:upstream_request_lib\",\n        \"//test/common/http:common_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/router:router_filter_interface\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/upstreams/http/tcp/upstream_request_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/router/router.h\"\n#include \"common/router/upstream_request.h\"\n\n#include \"extensions/common/proxy_protocol/proxy_protocol_header.h\"\n#include \"extensions/upstreams/http/tcp/upstream_request.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/router/router_filter_interface.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/tcp/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing Envoy::Http::TestRequestHeaderMapImpl;\nusing Envoy::Router::UpstreamRequest;\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Upstreams {\nnamespace Http {\nnamespace Tcp {\n\nclass TcpConnPoolTest : public ::testing::Test {\npublic:\n  TcpConnPoolTest() : host_(std::make_shared<NiceMock<Upstream::MockHost>>()) {\n    NiceMock<Router::MockRouteEntry> route_entry;\n    NiceMock<Upstream::MockClusterManager> cm;\n    EXPECT_CALL(cm, tcpConnPoolForCluster(_, _, _)).WillOnce(Return(&mock_pool_));\n    conn_pool_ = std::make_unique<TcpConnPool>(cm, true, route_entry, Envoy::Http::Protocol::Http11,\n                                               nullptr);\n  }\n\n  std::unique_ptr<TcpConnPool> conn_pool_;\n  Envoy::Tcp::ConnectionPool::MockInstance mock_pool_;\n  Router::MockGenericConnectionPoolCallbacks mock_generic_callbacks_;\n  std::shared_ptr<NiceMock<Upstream::MockHost>> host_;\n  NiceMock<Envoy::ConnectionPool::MockCancellable> cancellable_;\n};\n\nTEST_F(TcpConnPoolTest, Basic) {\n  NiceMock<Network::MockClientConnection> connection;\n\n  EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_));\n  conn_pool_->newStream(&mock_generic_callbacks_);\n\n  EXPECT_CALL(mock_generic_callbacks_, upstreamToDownstream());\n  EXPECT_CALL(mock_generic_callbacks_, onPoolReady(_, _, _, _));\n  auto data = std::make_unique<NiceMock<Envoy::Tcp::ConnectionPool::MockConnectionData>>();\n  EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection));\n  conn_pool_->onPoolReady(std::move(data), host_);\n}\n\nTEST_F(TcpConnPoolTest, OnPoolFailure) {\n  EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_));\n  conn_pool_->newStream(&mock_generic_callbacks_);\n\n  EXPECT_CALL(mock_generic_callbacks_, onPoolFailure(_, _, _));\n  conn_pool_->onPoolFailure(Envoy::Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure,\n                            host_);\n\n  // Make sure that the pool failure nulled out the pending request.\n  EXPECT_FALSE(conn_pool_->cancelAnyPendingStream());\n}\n\nTEST_F(TcpConnPoolTest, Cancel) {\n  // Initially cancel should fail as there is no pending request.\n  EXPECT_FALSE(conn_pool_->cancelAnyPendingStream());\n\n  EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_));\n  conn_pool_->newStream(&mock_generic_callbacks_);\n\n  // Canceling should now return true as there was an active request.\n  EXPECT_TRUE(conn_pool_->cancelAnyPendingStream());\n\n  // A second cancel should return false as there is not a pending request.\n  EXPECT_FALSE(conn_pool_->cancelAnyPendingStream());\n}\n\nclass TcpUpstreamTest : public ::testing::Test {\npublic:\n  TcpUpstreamTest() {\n    mock_router_filter_.requests_.push_back(std::make_unique<UpstreamRequest>(\n        mock_router_filter_, std::make_unique<NiceMock<Router::MockGenericConnPool>>()));\n    auto data = std::make_unique<NiceMock<Envoy::Tcp::ConnectionPool::MockConnectionData>>();\n    EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection_));\n    tcp_upstream_ =\n        std::make_unique<TcpUpstream>(mock_router_filter_.requests_.front().get(), std::move(data));\n  }\n  ~TcpUpstreamTest() override { EXPECT_CALL(mock_router_filter_, config()).Times(AnyNumber()); }\n\nprotected:\n  NiceMock<Network::MockClientConnection> connection_;\n  NiceMock<Router::MockRouterFilterInterface> mock_router_filter_;\n  Envoy::Tcp::ConnectionPool::MockConnectionData* mock_connection_data_;\n  std::unique_ptr<TcpUpstream> tcp_upstream_;\n  TestRequestHeaderMapImpl request_{{\":method\", \"CONNECT\"},\n                                    {\":path\", \"/\"},\n                                    {\":protocol\", \"bytestream\"},\n                                    {\":scheme\", \"https\"},\n                                    {\":authority\", \"host\"}};\n};\n\nTEST_F(TcpUpstreamTest, Basic) {\n  // Swallow the request headers and generate response headers.\n  EXPECT_CALL(connection_, write(_, false)).Times(0);\n  EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(200, _, _, false));\n  tcp_upstream_->encodeHeaders(request_, false);\n\n  // Proxy the data.\n  EXPECT_CALL(connection_, write(BufferStringEqual(\"foo\"), false));\n  Buffer::OwnedImpl buffer(\"foo\");\n  tcp_upstream_->encodeData(buffer, false);\n\n  // Metadata is swallowed.\n  Envoy::Http::MetadataMapVector metadata_map_vector;\n  tcp_upstream_->encodeMetadata(metadata_map_vector);\n\n  // Forward data.\n  Buffer::OwnedImpl response1(\"bar\");\n  EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual(\"bar\"), _, false));\n  tcp_upstream_->onUpstreamData(response1, false);\n\n  Buffer::OwnedImpl response2(\"eep\");\n  EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(_, _, _, _)).Times(0);\n  EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual(\"eep\"), _, false));\n  tcp_upstream_->onUpstreamData(response2, false);\n}\n\nTEST_F(TcpUpstreamTest, V1Header) {\n  envoy::config::core::v3::ProxyProtocolConfig* proxy_config =\n      mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config();\n  proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V1);\n  mock_router_filter_.client_connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 5);\n  mock_router_filter_.client_connection_.local_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"4.5.6.7\", 8);\n\n  Buffer::OwnedImpl expected_data;\n  Extensions::Common::ProxyProtocol::generateProxyProtoHeader(\n      *proxy_config, mock_router_filter_.client_connection_, expected_data);\n\n  // encodeHeaders now results in the proxy proto header being sent.\n  EXPECT_CALL(connection_, write(BufferEqual(&expected_data), false));\n  tcp_upstream_->encodeHeaders(request_, false);\n\n  // Data is proxied as usual.\n  EXPECT_CALL(connection_, write(BufferStringEqual(\"foo\"), false));\n  Buffer::OwnedImpl buffer(\"foo\");\n  tcp_upstream_->encodeData(buffer, false);\n}\n\nTEST_F(TcpUpstreamTest, V2Header) {\n  envoy::config::core::v3::ProxyProtocolConfig* proxy_config =\n      mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config();\n  proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V2);\n  mock_router_filter_.client_connection_.remote_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"1.2.3.4\", 5);\n  mock_router_filter_.client_connection_.local_address_ =\n      std::make_shared<Network::Address::Ipv4Instance>(\"4.5.6.7\", 8);\n\n  Buffer::OwnedImpl expected_data;\n  Extensions::Common::ProxyProtocol::generateProxyProtoHeader(\n      *proxy_config, mock_router_filter_.client_connection_, expected_data);\n\n  // encodeHeaders now results in the proxy proto header being sent.\n  EXPECT_CALL(connection_, write(BufferEqual(&expected_data), false));\n  tcp_upstream_->encodeHeaders(request_, false);\n\n  // Data is proxied as usual.\n  EXPECT_CALL(connection_, write(BufferStringEqual(\"foo\"), false));\n  Buffer::OwnedImpl buffer(\"foo\");\n  tcp_upstream_->encodeData(buffer, false);\n}\n\nTEST_F(TcpUpstreamTest, TrailersEndStream) {\n  // Swallow the headers.\n  tcp_upstream_->encodeHeaders(request_, false);\n\n  EXPECT_CALL(connection_, write(BufferStringEqual(\"\"), true));\n  Envoy::Http::TestRequestTrailerMapImpl trailers{{\"foo\", \"bar\"}};\n  tcp_upstream_->encodeTrailers(trailers);\n}\n\nTEST_F(TcpUpstreamTest, HeaderEndStreamHalfClose) {\n  EXPECT_CALL(connection_, write(BufferStringEqual(\"\"), true));\n  tcp_upstream_->encodeHeaders(request_, true);\n}\n\nTEST_F(TcpUpstreamTest, ReadDisable) {\n  EXPECT_CALL(connection_, readDisable(true));\n  tcp_upstream_->readDisable(true);\n\n  EXPECT_CALL(connection_, readDisable(false));\n  tcp_upstream_->readDisable(false);\n\n  // Once the connection is closed, don't touch it.\n  connection_.state_ = Network::Connection::State::Closed;\n  EXPECT_CALL(connection_, readDisable(_)).Times(0);\n  tcp_upstream_->readDisable(true);\n}\n\nTEST_F(TcpUpstreamTest, UpstreamEvent) {\n  // Make sure upstream disconnects result in stream reset.\n  EXPECT_CALL(mock_router_filter_,\n              onUpstreamReset(Envoy::Http::StreamResetReason::ConnectionTermination, \"\", _));\n  tcp_upstream_->onEvent(Network::ConnectionEvent::RemoteClose);\n}\n\nTEST_F(TcpUpstreamTest, Watermarks) {\n  EXPECT_CALL(mock_router_filter_, callbacks()).Times(AnyNumber());\n  EXPECT_CALL(mock_router_filter_.callbacks_, onDecoderFilterAboveWriteBufferHighWatermark());\n  tcp_upstream_->onAboveWriteBufferHighWatermark();\n\n  EXPECT_CALL(mock_router_filter_.callbacks_, onDecoderFilterBelowWriteBufferLowWatermark());\n  tcp_upstream_->onBelowWriteBufferLowWatermark();\n}\n\n} // namespace Tcp\n} // namespace Http\n} // namespace Upstreams\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/watchdog/abort_action/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"abort_action_test\",\n    srcs = [\"abort_action_test.cc\"],\n    extension_name = \"envoy.watchdog.abort_action\",\n    external_deps = [\n        \"abseil_synchronization\",\n    ],\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:guarddog_config_interface\",\n        \"//source/extensions/watchdog/abort_action:abort_action_lib\",\n        \"//source/extensions/watchdog/abort_action:config\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.watchdog.abort_action\",\n    tags = [\"skip_on_windows\"],\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:guarddog_config_interface\",\n        \"//source/extensions/watchdog/abort_action:abort_action_lib\",\n        \"//source/extensions/watchdog/abort_action:config\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/watchdog/abort_action/abort_action_test.cc",
    "content": "#include <csignal>\n#include <memory>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h\"\n#include \"envoy/server/guarddog_config.h\"\n#include \"envoy/thread/thread.h\"\n\n#include \"extensions/watchdog/abort_action/abort_action.h\"\n#include \"extensions/watchdog/abort_action/config.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace AbortAction {\nnamespace {\n\nusing AbortActionConfig = envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig;\n\nclass AbortActionTest : public testing::Test {\nprotected:\n  AbortActionTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"test\")),\n        context_({*api_, *dispatcher_, stats_, \"test\"}) {}\n\n  Stats::TestUtil::TestStore stats_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Server::Configuration::GuardDogActionFactoryContext context_;\n  std::unique_ptr<Server::Configuration::GuardDogAction> action_;\n\n  // Used to synchronize with the main thread\n  absl::Notification child_ready_;\n};\n\nTEST_F(AbortActionTest, ShouldNotAbortIfNoTids) {\n  AbortActionConfig config;\n  config.mutable_wait_duration()->set_nanos(1000000);\n  action_ = std::make_unique<AbortAction>(config, context_);\n\n  // Create empty vector and run the action.\n  const auto now = api_->timeSource().monotonicTime();\n  const std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {};\n\n  // Should not signal or panic since there are no TIDs.\n  action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::KILL, tid_ltt_pairs, now);\n}\n\n// insufficient signal support on Windows.\nTEST_F(AbortActionTest, CanKillThread) {\n  AbortActionConfig config;\n  config.mutable_wait_duration()->set_seconds(1);\n  action_ = std::make_unique<AbortAction>(config, context_);\n\n  auto die_function = [this]() -> void {\n    // Create a thread that we'll kill\n    Thread::ThreadId tid;\n    Thread::ThreadPtr thread = api_->threadFactory().createThread([this, &tid]() -> void {\n      tid = api_->threadFactory().currentThreadId();\n\n      child_ready_.Notify();\n\n      dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit);\n    });\n\n    child_ready_.WaitForNotification();\n\n    // Create vector with child tid and run the action.\n    const auto now = api_->timeSource().monotonicTime();\n    const std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {{tid, now}};\n\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::KILL, tid_ltt_pairs, now);\n  };\n\n  EXPECT_DEATH(die_function(), \"\");\n}\n\nvoid handler(int sig, siginfo_t* /*siginfo*/, void* /*context*/) {\n  std::cout << \"Eating signal :\" << std::to_string(sig) << \". will ignore it.\" << std::endl;\n  signal(SIGABRT, SIG_IGN);\n}\n\nTEST_F(AbortActionTest, PanicsIfThreadDoesNotDie) {\n  AbortActionConfig config;\n  config.mutable_wait_duration()->set_seconds(1);\n  action_ = std::make_unique<AbortAction>(config, context_);\n\n  auto die_function = [this]() -> void {\n    // Create a thread that we try to kill\n    Thread::ThreadId tid;\n    Thread::ThreadPtr thread = api_->threadFactory().createThread([this, &tid]() -> void {\n      tid = api_->threadFactory().currentThreadId();\n\n      // Prepare signal handler to eat SIGABRT for the child thread.\n      struct sigaction saction;\n      std::memset(&saction, 0, sizeof(saction));\n      saction.sa_flags = SA_SIGINFO;\n      saction.sa_sigaction = &handler;\n      sigaction(SIGABRT, &saction, nullptr);\n\n      child_ready_.Notify();\n\n      dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit);\n    });\n\n    child_ready_.WaitForNotification();\n\n    // Create vector with child tid and run the action.\n    const auto now = api_->timeSource().monotonicTime();\n    const std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {{tid, now}};\n\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::KILL, tid_ltt_pairs, now);\n  };\n\n  EXPECT_DEATH(die_function(), \"aborting from Watchdog AbortAction instead\");\n}\n\n} // namespace\n} // namespace AbortAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/watchdog/abort_action/config_test.cc",
    "content": "#include \"envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/guarddog_config.h\"\n\n#include \"extensions/watchdog/abort_action/config.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace AbortAction {\nnamespace {\n\nTEST(AbortActionFactoryTest, CanCreateAction) {\n  auto factory =\n      Registry::FactoryRegistry<Server::Configuration::GuardDogActionFactory>::getFactory(\n          \"envoy.watchdog.abort_action\");\n  ASSERT_NE(factory, nullptr);\n\n  // Create config and mock context\n  envoy::config::bootstrap::v3::Watchdog::WatchdogAction config;\n  TestUtility::loadFromJson(\n      R\"EOF(\n        {\n          \"config\": {\n            \"name\": \"envoy.watchdog.abort_action\",\n            \"typed_config\": {\n\t      \"@type\": \"type.googleapis.com/udpa.type.v1.TypedStruct\",\n\t      \"type_url\": \"type.googleapis.com/envoy.extensions.watchdog.abort_action.v3alpha.AbortActionConfig\",\n\t      \"value\": {\n\t\t\"wait_duration\": \"2s\",\n\t      }\n            }\n          },\n        }\n      )EOF\",\n      config);\n\n  Stats::TestUtil::TestStore stats_;\n  Event::MockDispatcher dispatcher;\n  Api::ApiPtr api = Api::createApiForTest();\n  Server::Configuration::GuardDogActionFactoryContext context{*api, dispatcher, stats_, \"test\"};\n\n  EXPECT_NE(factory->createGuardDogActionFromProto(config, context), nullptr);\n}\n\n} // namespace\n} // namespace AbortAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/watchdog/profile_action/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\nload(\n    \"//test/extensions:extensions_build_system.bzl\",\n    \"envoy_extension_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_extension_cc_test(\n    name = \"profile_action_test\",\n    srcs = [\"profile_action_test.cc\"],\n    extension_name = \"envoy.watchdog.profile_action\",\n    external_deps = [\n        \"abseil_synchronization\",\n    ],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:guarddog_config_interface\",\n        \"//source/common/filesystem:directory_lib\",\n        \"//source/common/profiler:profiler_lib\",\n        \"//source/extensions/watchdog/profile_action:config\",\n        \"//source/extensions/watchdog/profile_action:profile_action_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_extension_cc_test(\n    name = \"config_test\",\n    srcs = [\"config_test.cc\"],\n    extension_name = \"envoy.watchdog.profile_action\",\n    deps = [\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:guarddog_config_interface\",\n        \"//source/extensions/watchdog/profile_action:config\",\n        \"//source/extensions/watchdog/profile_action:profile_action_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/extensions/watchdog/profile_action/config_test.cc",
    "content": "#include \"envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/guarddog_config.h\"\n\n#include \"extensions/watchdog/profile_action/config.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace ProfileAction {\nnamespace {\n\nTEST(ProfileActionFactoryTest, CanCreateAction) {\n  auto factory =\n      Registry::FactoryRegistry<Server::Configuration::GuardDogActionFactory>::getFactory(\n          \"envoy.watchdog.profile_action\");\n  ASSERT_NE(factory, nullptr);\n\n  // Create config and mock context\n  envoy::config::bootstrap::v3::Watchdog::WatchdogAction config;\n  TestUtility::loadFromJson(\n      R\"EOF(\n        {\n          \"config\": {\n            \"name\": \"envoy.watchdog.profile_action\",\n            \"typed_config\": {\n\t      \"@type\": \"type.googleapis.com/udpa.type.v1.TypedStruct\",\n\t      \"type_url\": \"type.googleapis.com/envoy.extensions.watchdog.profile_action.v3alpha.ProfileActionConfig\",\n\t      \"value\": {\n\t\t\"profile_duration\": \"2s\",\n\t\t\"profile_path\": \"/tmp/envoy/\",\n\t\t\"max_profiles\": \"20\"\n\t      }\n            }\n          },\n        }\n      )EOF\",\n      config);\n\n  Stats::TestUtil::TestStore stats;\n  Event::MockDispatcher dispatcher;\n  Api::ApiPtr api = Api::createApiForTest(stats);\n  Server::Configuration::GuardDogActionFactoryContext context{*api, dispatcher, stats, \"test\"};\n\n  EXPECT_CALL(dispatcher, createTimer_(_));\n  EXPECT_NE(factory->createGuardDogActionFromProto(config, context), nullptr);\n}\n\n} // namespace\n} // namespace ProfileAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/extensions/watchdog/profile_action/profile_action_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h\"\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/server/guarddog_config.h\"\n#include \"envoy/thread/thread.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/filesystem/directory.h\"\n#include \"common/profiler/profiler.h\"\n\n#include \"extensions/watchdog/profile_action/config.h\"\n#include \"extensions/watchdog/profile_action/profile_action.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/substitute.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Watchdog {\nnamespace ProfileAction {\nnamespace {\n\nclass ProfileActionTest : public testing::Test {\nprotected:\n  ProfileActionTest()\n      : time_system_(std::make_unique<Event::SimulatedTimeSystem>()),\n        api_(Api::createApiForTest(stats_, *time_system_)),\n        dispatcher_(api_->allocateDispatcher(\"test\")),\n        context_({*api_, *dispatcher_, stats_, \"test\"}), test_path_(generateTestPath()) {}\n\n  // Generates a unique path for a testcase.\n  static std::string generateTestPath() {\n    const ::testing::TestInfo* const test_info =\n        ::testing::UnitTest::GetInstance()->current_test_info();\n\n    std::string test_path = TestEnvironment::temporaryPath(\n        absl::StrJoin({test_info->test_suite_name(), test_info->name()}, \"/\"));\n    TestEnvironment::createPath(test_path);\n\n    return test_path;\n  }\n\n  // Counts the number of non-empty profiles found within a directory.\n  int countNumberOfProfileInPath(const std::string& path) {\n    int nonempty_profiles_found = 0;\n    Filesystem::Directory directory(path);\n\n    for (const Filesystem::DirectoryEntry& entry : directory) {\n      const std::string full_path = path + \"/\" + entry.name_;\n\n      // Count if its a non-empty file with the prefix of profiles.\n      if (entry.type_ == Filesystem::FileType::Regular &&\n          absl::StartsWith(entry.name_, \"ProfileAction\") &&\n          api_->fileSystem().fileSize(full_path) > 0) {\n        nonempty_profiles_found++;\n      }\n    }\n\n    return nonempty_profiles_found;\n  }\n\n  void waitForOutstandingNotify() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {\n    mutex_.Await(absl::Condition(\n        +[](int* outstanding_notifies) -> bool { return *outstanding_notifies > 0; },\n        &outstanding_notifies_));\n    outstanding_notifies_ -= 1;\n  }\n\n  Stats::TestUtil::TestStore stats_;\n  std::unique_ptr<Event::TestTimeSystem> time_system_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  Server::Configuration::GuardDogActionFactoryContext context_;\n  std::unique_ptr<Server::Configuration::GuardDogAction> action_;\n  // Path for the test case to dump any profiles to.\n  const std::string test_path_;\n  // Used to synchronize with the dispatch thread\n  absl::Mutex mutex_;\n  int outstanding_notifies_ ABSL_GUARDED_BY(mutex_) = 0;\n};\n\nTEST_F(ProfileActionTest, CanDoSingleProfile) {\n  // Create configuration.\n  envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config;\n  config.set_profile_path(test_path_);\n  config.mutable_profile_duration()->set_seconds(1);\n\n  // Create the ProfileAction before we start running the dispatcher\n  // otherwise the timer created will in ProfileActions ctor will\n  // not be thread safe.\n  action_ = std::make_unique<ProfileAction>(config, context_);\n  Thread::ThreadPtr thread = api_->threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); });\n\n  // Create vector of relevant threads\n  const auto now = api_->timeSource().monotonicTime();\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {\n      {Thread::ThreadId(10), now}};\n\n  // Check that we can do at least a single profile\n  dispatcher_->post([&tid_ltt_pairs, &now, this]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  absl::MutexLock lock(&mutex_);\n  waitForOutstandingNotify();\n  time_system_->advanceTimeWait(std::chrono::seconds(2));\n\n  dispatcher_->exit();\n  thread->join();\n\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 1);\n#else\n  // Profiler won't run in this case, so there should be no files generated.\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 0);\n#endif\n}\n\nTEST_F(ProfileActionTest, CanDoMultipleProfiles) {\n  // Create configuration.\n  envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config;\n  config.set_profile_path(test_path_);\n  config.mutable_profile_duration()->set_seconds(1);\n  // Create the ProfileAction before we start running the dispatcher\n  // otherwise the timer created will in ProfileActions ctor will\n  // not be thread safe.\n  action_ = std::make_unique<ProfileAction>(config, context_);\n  Thread::ThreadPtr thread = api_->threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); });\n\n  // Create vector of relevant threads\n  const auto now = api_->timeSource().monotonicTime();\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {\n      {Thread::ThreadId(10), now}};\n\n  // Check that we can do at least a single profile\n  dispatcher_->post([&tid_ltt_pairs, &now, this]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  absl::MutexLock lock(&mutex_);\n  waitForOutstandingNotify();\n  time_system_->advanceTimeWait(std::chrono::seconds(2));\n\n#ifdef PROFILER_AVAILABLE\n  ASSERT_EQ(countNumberOfProfileInPath(test_path_), 1);\n#else\n  // Profiler won't run in this case, so there should be no files generated.\n  ASSERT_EQ(countNumberOfProfileInPath(test_path_), 0);\n#endif\n\n  // Check we can do multiple profiles\n  dispatcher_->post([&tid_ltt_pairs, &now, this]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  waitForOutstandingNotify();\n  time_system_->advanceTimeWait(std::chrono::seconds(2));\n\n  dispatcher_->exit();\n  thread->join();\n\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 2);\n#else\n  // Profiler won't run in this case, so there should be no files generated.\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 0);\n#endif\n}\n\nTEST_F(ProfileActionTest, CannotTriggerConcurrentProfiles) {\n  // Create configuration.\n  envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config;\n  TestUtility::loadFromJson(absl::Substitute(R\"EOF({ \"profile_path\": \"$0\", })EOF\", test_path_),\n                            config);\n  // Create the ProfileAction before we start running the dispatcher\n  // otherwise the timer created will in ProfileActions ctor will\n  // not be thread safe.\n  action_ = std::make_unique<ProfileAction>(config, context_);\n  Thread::ThreadPtr thread = api_->threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); });\n\n  // Create vector of relevant threads\n  const auto now = api_->timeSource().monotonicTime();\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {\n      {Thread::ThreadId(10), now}};\n\n  dispatcher_->post([&, this]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n\n    // This subsequent call should fail since the one prior starts a profile.\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  absl::MutexLock lock(&mutex_);\n  waitForOutstandingNotify();\n  time_system_->advanceTimeWait(std::chrono::seconds(6));\n\n  dispatcher_->exit();\n  thread->join();\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 1);\n#else\n  // Profiler won't run in this case, so there should be no files generated.\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 0);\n#endif\n}\n\nTEST_F(ProfileActionTest, ShouldNotProfileIfDirectoryDoesNotExist) {\n  // Create configuration.\n  envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config;\n  const std::string nonexistant_path = test_path_ + \"/nonexistant_dir/\";\n  TestUtility::loadFromJson(\n      absl::Substitute(R\"EOF({ \"profile_path\": \"$0\", })EOF\", nonexistant_path), config);\n  // Create the ProfileAction before we start running the dispatcher\n  // otherwise the timer created will in ProfileActions ctor will\n  // not be thread safe.\n  action_ = std::make_unique<ProfileAction>(config, context_);\n  Thread::ThreadPtr thread = api_->threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); });\n\n  // Create vector of relevant threads\n  const auto now = api_->timeSource().monotonicTime();\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {\n      {Thread::ThreadId(10), now}};\n\n  dispatcher_->post([&, this]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  absl::MutexLock lock(&mutex_);\n  waitForOutstandingNotify();\n  time_system_->advanceTimeWait(std::chrono::seconds(6));\n\n  dispatcher_->exit();\n  thread->join();\n\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 0);\n  EXPECT_FALSE(api_->fileSystem().directoryExists(nonexistant_path));\n}\n\nTEST_F(ProfileActionTest, ShouldNotProfileIfNoTids) {\n  // Create configuration.\n  envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config;\n  TestUtility::loadFromJson(absl::Substitute(R\"EOF({ \"profile_path\": \"$0\"})EOF\", test_path_),\n                            config);\n  // Create the ProfileAction before we start running the dispatcher\n  // otherwise the timer created will in ProfileActions ctor will\n  // not be thread safe.\n  action_ = std::make_unique<ProfileAction>(config, context_);\n  Thread::ThreadPtr thread = api_->threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); });\n\n  // Test that no profiles are created given empty vector of valid TIDs\n  dispatcher_->post([this]() -> void {\n    std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs;\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs,\n                 api_->timeSource().monotonicTime());\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  absl::MutexLock lock(&mutex_);\n  waitForOutstandingNotify();\n  time_system_->advanceTimeWait(std::chrono::seconds(2));\n\n  dispatcher_->exit();\n  thread->join();\n\n  // No profiles should have been created\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 0);\n}\n\nTEST_F(ProfileActionTest, ShouldSaturatedMaxProfiles) {\n  // Create configuration that we'll run until it saturates.\n  envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config;\n  config.set_profile_path(test_path_);\n  config.mutable_profile_duration()->set_seconds(1);\n  config.set_max_profiles(1);\n\n  // Create the ProfileAction before we start running the dispatcher\n  // otherwise the timer created will in ProfileActions ctor will\n  // not be thread safe.\n  action_ = std::make_unique<ProfileAction>(config, context_);\n  Thread::ThreadPtr thread = api_->threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); });\n\n  // Create vector of relevant threads\n  const auto now = api_->timeSource().monotonicTime();\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs = {\n      {Thread::ThreadId(10), now}};\n\n  dispatcher_->post([&, this]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  absl::MutexLock lock(&mutex_);\n  waitForOutstandingNotify();\n  time_system_->advanceTimeWait(std::chrono::seconds(2));\n\n  // check that the profile is created!\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 1);\n#else\n  // Profiler won't run in this case, so there should be no files generated.\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 0);\n#endif\n\n  // Do another run of the watchdog action. It shouldn't have run again.\n  dispatcher_->post([&, this]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  waitForOutstandingNotify();\n\n  // If the callback had scheduled (it shouldn't as we've saturated the profile\n  // count) advancing time to make it run.\n  time_system_->advanceTimeWait(std::chrono::seconds(2));\n  dispatcher_->exit();\n  thread->join();\n\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 1);\n#else\n  // Profiler won't run in this case, so there should be no files generated.\n  EXPECT_EQ(countNumberOfProfileInPath(test_path_), 0);\n#endif\n}\n\n// The attempted counter should be updated on profile attempts that don't\n// interfere with an existing profile the action is running.\n// The successfully captured profile should be updated only if we captured the profile.\nTEST_F(ProfileActionTest, ShouldUpdateCountersCorrectly) {\n  envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config;\n  config.set_profile_path(test_path_);\n  config.mutable_profile_duration()->set_seconds(1);\n\n  // Create the ProfileAction before we start running the dispatcher\n  // otherwise the timer created will in ProfileActions ctor will\n  // not be thread safe.\n  action_ = std::make_unique<ProfileAction>(config, context_);\n  Thread::ThreadPtr thread = api_->threadFactory().createThread(\n      [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); });\n  std::vector<std::pair<Thread::ThreadId, MonotonicTime>> tid_ltt_pairs;\n\n  // This will fail since no TIDs are provided.\n  dispatcher_->post([this, &tid_ltt_pairs]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs,\n                 api_->timeSource().monotonicTime());\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  {\n    absl::MutexLock lock(&mutex_);\n    waitForOutstandingNotify();\n    time_system_->advanceTimeWait(std::chrono::seconds(2));\n  }\n\n  // Check the counters are correct on a fail\n  EXPECT_EQ(TestUtility::findCounter(stats_, \"test.profile_action.attempted\")->value(), 1);\n  EXPECT_EQ(TestUtility::findCounter(stats_, \"test.profile_action.successfully_captured\")->value(),\n            0);\n\n  // Run a profile that will succeed.\n  const auto now = api_->timeSource().monotonicTime();\n  tid_ltt_pairs.emplace_back(Thread::ThreadId(10), now);\n\n  dispatcher_->post([this, &tid_ltt_pairs, &now]() -> void {\n    action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now);\n    absl::MutexLock lock(&mutex_);\n    outstanding_notifies_ += 1;\n  });\n\n  {\n    absl::MutexLock lock(&mutex_);\n    waitForOutstandingNotify();\n    time_system_->advanceTimeWait(std::chrono::seconds(2));\n  }\n\n#ifdef PROFILER_AVAILABLE\n  // Check the counters are correct on success\n  EXPECT_EQ(TestUtility::findCounter(stats_, \"test.profile_action.attempted\")->value(), 2);\n  EXPECT_EQ(TestUtility::findCounter(stats_, \"test.profile_action.successfully_captured\")->value(),\n            1);\n#else\n  EXPECT_EQ(TestUtility::findCounter(stats_, \"test.profile_action.attempted\")->value(), 2);\n  EXPECT_EQ(TestUtility::findCounter(stats_, \"test.profile_action.successfully_captured\")->value(),\n            0);\n#endif\n\n  dispatcher_->exit();\n  thread->join();\n}\n\n} // namespace\n} // namespace ProfileAction\n} // namespace Watchdog\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/fuzz/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_proto_library(\n    name = \"common_proto\",\n    srcs = [\"common.proto\"],\n    deps = [\"@envoy_api//envoy/config/core/v3:pkg\"],\n)\n\nexports_files([\"headers.dict\"])\n\nenvoy_cc_test_library(\n    name = \"main\",\n    srcs = [\"main.cc\"],\n    external_deps = [\n        \"abseil_symbolize\",\n    ],\n    deps = [\n        \":fuzz_runner_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n    ] + select({\n        \"//bazel:disable_signal_trace\": [],\n        \"//conditions:default\": [\"//source/common/signal:sigaction_lib\"],\n    }),\n)\n\nenvoy_cc_test_library(\n    name = \"fuzz_runner_lib\",\n    srcs = [\"fuzz_runner.cc\"],\n    hdrs = [\"fuzz_runner.h\"],\n    deps = [\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/event:libevent_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//source/exe:process_wide_lib\",\n        \"//test/test_common:environment_lib\",\n        \"@com_github_google_libprotobuf_mutator//:libprotobuf_mutator\",\n        \"@org_llvm_releases_compiler_rt//:fuzzed_data_provider\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    deps = [\n        \":common_proto_cc_proto\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/network:resolver_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//test/common/stream_info:test_util\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/fuzz/README.md",
    "content": "# Envoy fuzz testing\n\nEnvoy is fuzz tested via [OSS-Fuzz](https://github.com/google/oss-fuzz). We follow the best\npractices described in the OSS-Fuzz [ideal integration\npage](https://github.com/google/oss-fuzz/blob/master/docs/ideal_integration.md).\n\n## Test environment\n\nTests should be unit test-like, fast and not require writable access to the filesystem (beyond\ntemporary files), network (including loopback) or multiple processes. See the [ClusterFuzz\nenvironment](https://github.com/google/oss-fuzz/blob/master/docs/fuzzer_environment.md) for further\ndetails.\n\n## Corpus\n\nEvery fuzz test must comes with a *corpus*. A corpus is a set of files that provide example valid\ninputs. Fuzzing libraries will use this seed corpus to drive mutations, e.g. via evolutionary\nfuzzing, to explore interesting parts of the state space.\n\nThe corpus also acts as a quick regression test for evaluating the fuzz tests without the help of a\nfuzzing library.\n\nThe corpus is located in a directory underneath the fuzz test. E.g. suppose you have\n[`test/common/common/base64_fuzz_test.cc`](../../test/common/common/base64_fuzz_test.cc), a corpus\ndirectory [`test/common/common/base64_corpus`](../../test/common/common/base64_corpus) should exist,\npopulated with files that will act as the corpus.\n\n## Test driver\n\nYour fuzz test will ultimately be driven by a simple interface:\n\n```c++\nDEFINE_FUZZER(const uint8_t* data, size_t size) {\n  // Your test code goes here\n}\n```\n\nIt is up to your test `DEFINE_FUZZER` implementation to map this buffer of data to meaningful\nsemantics, e.g. a stream of network bytes or a protobuf binary input.\n\nThe fuzz test will be executed in three environments:\n\n1. Under Envoy's fuzz test driver when run in the Envoy repository with `bazel test\n   //test/path/to/some_fuzz_test`. This provides a litmus test indicating that the test passes CI\n   and basic sanitizers just on the supplied corpus.\n   \n1. Using the libFuzzer fuzzing engine and ASAN when run in the Envoy repository with `bazel run\n   //test/path/to/some_fuzz_test --config asan-fuzzer`. This is where real fuzzing\n   takes place locally. The built binary can take libFuzzer command-line flags, including the number\n   of runs and the maximum input length.\n\n3. Via fuzzing library test drivers in OSS-Fuzz. This is where the real fuzzing takes places on a VM\n   cluster and the seed corpus is used by fuzzers to explore the state space.\n\n## Defining a new fuzz test\n\n1. Write a fuzz test module implementing the `DEFINE_FUZZER` interface. E.g.\n   [`test/common/common/base64_fuzz_test.cc`](../../test/common/common/base64_fuzz_test.cc).\n\n2. Define an `envoy_cc_fuzz_test` target, see `base64_fuzz_test` in\n   [`test/common/common/BUILD`](../../test/common/common/BUILD).\n\n3. Create the seed corpus directory and populate it with at least one example input. E.g.\n   [`test/common/common/base64_corpus`](../../test/common/common/base64_corpus).\n\n4. Run the `envoy_cc_fuzz_test` target to test against the seed corpus. E.g. `bazel test\n   //test/common/common:base64_fuzz_test`.\n   \n5. Run the `*_fuzz_test` target against libFuzzer. E.g. `bazel run\n   //test/common/common:base64_fuzz_test --config asan-fuzzer`.\n   \n## Protobuf fuzz tests\n\nWe also have integration with [libprotobuf-mutator](https://github.com/google/libprotobuf-mutator),\nallowing tests built on a protobuf input to work directly with a typed protobuf object, rather than\na raw buffer. The interface to this is as described at\nhttps://github.com/google/libprotobuf-mutator#integrating-with-libfuzzer:\n\n```c++\nDEFINE_PROTO_FUZZER(const MyMessageType& input) {\n  // Your test code goes here\n}\n```\n\n## Running fuzz tests locally\n\nWithin the Envoy repository, we have various `*_fuzz_test` targets. When run under `bazel test`,\nthese will exercise the corpus as inputs but not actually link and run against any fuzzer (e.g.\n[`libfuzzer`](https://llvm.org/docs/LibFuzzer.html)).\n\nTo get actual fuzzing performed, the `*_fuzz_test` target needs to be built with `--config\nasan-fuzzer`. This links the target to the libFuzzer fuzzing engine. This is recommended when\nwriting new fuzz tests to check if they pick up any low hanging fruit (i.e. what you can find on\nyour local machine vs. the fuzz cluster). The binary takes the location of the seed corpus\ndirectory. Fuzzing continues indefinitely until a bug is found or the number of iterations it should\nperform is specified with `-runs`. For example,\n\n`bazel run //test/common/common:base64_fuzz_test --config asan-fuzzer\n--test/common/common/base64_corpus -runs=1000`\n\nThe fuzzer prints information to stderr:\n\n```\nINFO: Seed: 774517650\nINFO: Loaded 1 modules   (1090433 guards): 1090433 [0x8875600, 0x8c9e404), \nINFO: -max_len is not provided; libFuzzer will not generate inputs larger than 4096 bytes\nINFO: A corpus is not provided, starting from an empty corpus\n#2\tINITED cov: 47488 ft: 30 corp: 1/1b lim: 4 exec/s: 0 rss: 139Mb\n#5\tNEW    cov: 47499 ft: 47 corp: 2/3b lim: 4 exec/s: 0 rss: 139Mb L: 2/2 MS: 3 ChangeByte-ShuffleBytes-InsertByte-\n...\n#13145\tNEW    cov: 47506 ft: 205 corp: 17/501b lim: 128 exec/s: 6572 rss: 150Mb L: 128/128 MS: 1 CrossOver-\n#16384\tpulse  cov: 47506 ft: 205 corp: 17/501b lim: 156 exec/s: 8192 rss: 151Mb\n#32768\tpulse  cov: 47506 ft: 205 corp: 17/501b lim: 317 exec/s: 6553 rss: 157Mb\n#39442\tNEW    cov: 47506 ft: 224 corp: 18/890b lim: 389 exec/s: 6573 rss: 160Mb L: 389/389 MS: 2 InsertByte-CrossOver-\n```\n\nEach output line reports statistics such as:\n* `cov:` -- the number of code blocks or edges in the program's control flow graph observed so\n  far. The number grows as the fuzzer finds interesting inputs.\n* `exec/s:` -- the number of fuzzer iterations per second.\n* `corp:` -- size of the corpus in memory (number of elements, size in bytes)\n* `rss:` -- memory consumption.\n\n\n## Running fuzz tests in OSS-Fuzz\n\nFuzzing against other engines is also performed by the\n[oss-fuzz](https://github.com/google/oss-fuzz) project, with results provided on the [ClusterFuzz\ndashboard](https://oss-fuzz.com).\n\nIt is possible to run against fuzzers locally by using the `oss-fuzz` Docker image. This will\nprovide fuzzing against other fuzzing engines.\n\n1. `git clone https://github.com/google/oss-fuzz.git`\n2. `cd oss-fuzz`\n3. `python infra/helper.py build_image envoy`\n4. `python infra/helper.py build_fuzzers --sanitizer=address envoy <path to envoy source tree>`. The\n   path to the Envoy source tree can be omitted if you want to consume Envoy from GitHub at\n   HEAD/master.\n5. `python infra/helper.py run_fuzzer envoy <fuzz test target>`. The fuzz test target will be the\n   test name, e.g. `server_fuzz_test`.\n\nIf there is a crash, `run_fuzzer` will emit a log line along the lines of:\n\n```\nartifact_prefix='./'; Test unit written to ./crash-db2ee19f50162f2079dc0c5ba24fd0e3dcb8b9bc\n```\n\nThe test input can be found in `build/out/envoy`, e.g.\n`build/out/envoy/crash-db2ee19f50162f2079dc0c5ba24fd0e3dcb8b9bc`. For protobuf fuzz tests, this will\nbe in text proto format.\n\nTo test and validate fixes to the crash, add it to the corpus in the Envoy source directory for the\ntest, e.g. for `server_fuzz_test` this is `test/server/corpus`, and run `bazel test`, e.g. `bazel\ntest //test/server:server_fuzz_test`. These crash cases can be added to the corpus in followup PRs\nto provide fuzzers some interesting starting points for invalid inputs.\n\n## Coverage reports\n\nCoverage reports, where individual lines are annotated with fuzzing hit counts, are a useful way to\nunderstand the scope and efficacy of the Envoy fuzzers. You can generate fuzz coverage reports both locally, and using the OSS-Fuzz infrastructure.\n\nTo generate fuzz coverage reports locally (see [Coverage builds](bazel/README.md), run\n```\nFUZZ_COVERAGE=true test/run_envoy_bazel_coverage.sh\n```\nThis generates a coverage report after running the fuzz targets for one minute against the fuzzing engine libfuzzer and using the checked-in corpus as an initial seed.\n\nOtherwise, you can generate reports from the\nClusterFuzz corpus following the general ClusterFuzz [instructions for profiling\nsetup](https://github.com/google/oss-fuzz/blob/master/docs/code_coverage.md).\n\nTo filter out unrelated artifacts (e.g. Bazel cache, libfuzzer src), the following profile command\ncan be used:\n\n```bash\npython infra/helper.py profile envoy -- \\\n  -ignore-filename-regex='proc/self/cwd/bazel-out.*' \\\n  -ignore-filename-regex='proc/self/cwd/external.*' \\\n  -ignore-filename-regex='proc/self/cwd/test.*' \\\n  -ignore-filename-regex='.*\\.cache.*' \\\n  -ignore-filename-regex='src/libfuzzer.*'\n```\n"
  },
  {
    "path": "test/fuzz/common.proto",
    "content": "syntax = \"proto3\";\n\npackage test.fuzz;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/core/v3/address.proto\";\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/wrappers.proto\";\n\nimport \"validate/validate.proto\";\n\n// Common fuzzing input types.\n\nmessage Headers {\n  repeated envoy.config.core.v3.HeaderValue headers = 1;\n}\n\nmessage Metadata {\n  map<string, string> metadata = 1;\n}\n\nmessage HttpBody {\n  // The bytes that will be used as the request body.\n  repeated string data = 1 [(validate.rules).repeated .min_items = 1];\n}\n\n// HttpBody cannot efficiently create serialized protos.\n// Use ProtoBody instead to test grpc data.\nmessage ProtoBody {\n  // The proto message that will be serialized and used as the request body.\n  google.protobuf.Any message = 1 [(validate.rules).any.required = true];\n\n  // The size (in bytes) of each buffer when forming the requests.\n  uint64 chunk_size = 2 [(validate.rules).uint64 = {gt: 0, lt: 8192}];\n}\n\nmessage HttpData {\n  Headers headers = 1;\n\n  oneof body {\n    HttpBody http_body = 2;\n    ProtoBody proto_body = 4;\n  }\n\n  Headers trailers = 3;\n}\n\nmessage StreamInfo {\n  envoy.config.core.v3.Metadata dynamic_metadata = 1;\n  uint64 start_time = 2;\n  google.protobuf.UInt32Value response_code = 3;\n  envoy.config.core.v3.Metadata upstream_metadata = 4;\n  string requested_server_name = 5;\n  envoy.config.core.v3.Address address = 6;\n  envoy.config.core.v3.Address upstream_local_address = 7;\n}\n"
  },
  {
    "path": "test/fuzz/fuzz_runner.cc",
    "content": "#include \"test/fuzz/fuzz_runner.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/common/utility.h\"\n#include \"common/event/libevent.h\"\n#include \"common/http/http2/codec_impl.h\"\n\n#include \"exe/process_wide.h\"\n\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\nspdlog::level::level_enum Runner::log_level_;\n\nuint32_t PerTestEnvironment::test_num_;\n\nPerTestEnvironment::PerTestEnvironment()\n    : per_test_num_(test_num_++),\n      test_tmpdir_(TestEnvironment::temporaryPath(fmt::format(\"fuzz_{}\", per_test_num_))),\n      test_id_(std::to_string(HashUtil::xxHash64(test_tmpdir_))) {\n  TestEnvironment::createPath(test_tmpdir_);\n}\n\nPerTestEnvironment::~PerTestEnvironment() { TestEnvironment::removePath(test_tmpdir_); }\n\nvoid Runner::setupEnvironment(int argc, char** argv, spdlog::level::level_enum default_log_level) {\n  // We hold on to process_wide to provide RAII cleanup of process-wide\n  // state.\n  ProcessWide process_wide;\n  TestEnvironment::initializeOptions(argc, argv);\n\n  const auto environment_log_level = TestEnvironment::getOptions().logLevel();\n  // We only override the default log level if it looks like we're debugging;\n  // otherwise the default environment log level might override the default and\n  // spew too much when running under a fuzz engine.\n  log_level_ =\n      environment_log_level <= spdlog::level::debug ? environment_log_level : default_log_level;\n  // This needs to work in both the Envoy test shim and oss-fuzz build environments, so we can't\n  // allocate in main.cc. Instead, just create these non-PODs to live forever, since we don't get a\n  // shutdown hook (see\n  // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/fuzzer/FuzzerInterface.h).\n  static auto* lock = new Thread::MutexBasicLockable();\n  static auto* logging_context =\n      new Logger::Context(log_level_, TestEnvironment::getOptions().logFormat(), *lock, false);\n  UNREFERENCED_PARAMETER(logging_context);\n\n  // Suppress all libprotobuf non-fatal logging as long as this object exists.\n  // For fuzzing, this prevents logging when parsing text-format protos fails,\n  // deprecated fields are used, etc.\n  // https://github.com/protocolbuffers/protobuf/blob/204f99488ce1ef74565239cf3963111ae4c774b7/src/google/protobuf/stubs/logging.h#L223\n  if (log_level_ > spdlog::level::debug) {\n    ABSL_ATTRIBUTE_UNUSED static auto* log_silencer = new Protobuf::LogSilencer();\n  }\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n\n// LLVMFuzzerInitialize() is called by LibFuzzer once before fuzzing starts.\n// NOLINTNEXTLINE(readability-identifier-naming)\nextern \"C\" int LLVMFuzzerInitialize(int* argc, char*** argv) {\n  // Before parsing gmock flags, set the default value of flag --gmock_verbose to \"error\".\n  // This suppresses logs from NiceMock objects, which can be noisy and provide little value.\n  testing::GMOCK_FLAG(verbose) = \"error\";\n  testing::InitGoogleMock(argc, *argv);\n  Envoy::Fuzz::Runner::setupEnvironment(1, *argv, spdlog::level::critical);\n  return 0;\n}\n"
  },
  {
    "path": "test/fuzz/fuzz_runner.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <cwchar>\n\n// Bring in DEFINE_PROTO_FUZZER definition as per\n// https://github.com/google/libprotobuf-mutator#integrating-with-libfuzzer.\n#include \"libprotobuf_mutator/src/libfuzzer/libfuzzer_macro.h\"\n// Bring in FuzzedDataProvider, see\n// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider\n#include \"fuzzer/FuzzedDataProvider.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Fuzz {\n\n// Each test may need a sub-environment of that provided by //test/test_common:environment_lib,\n// since each fuzz invocation runs in the same process, but might want a distinct tmp sandbox for\n// example.\nclass PerTestEnvironment {\npublic:\n  PerTestEnvironment();\n  ~PerTestEnvironment();\n\n  std::string temporaryPath(const std::string& path) const { return test_tmpdir_ + \"/\" + path; }\n  const std::string& testId() const { return test_id_; }\n\nprivate:\n  static uint32_t test_num_;\n  const uint32_t per_test_num_;\n  const std::string test_tmpdir_;\n  const std::string test_id_;\n};\n\nclass Runner {\npublic:\n  /**\n   * Setup the environment for fuzz testing. Multiple execute() runs may be\n   * invoked in this environment.\n   * @param argc number of command-line args.\n   * @param argv array of command-line args.\n   * @param default_loglevel default log level (overridable with -l).\n   */\n  static void setupEnvironment(int argc, char** argv, spdlog::level::level_enum default_log_level);\n\n  /**\n   * @return spdlog::level::level_enum the log level for the fuzzer.\n   */\n  static spdlog::level::level_enum logLevel() { return log_level_; }\n\nprivate:\n  static spdlog::level::level_enum log_level_;\n};\n\n} // namespace Fuzz\n} // namespace Envoy\n\n// Fuzz test startup hook, see\n// https://llvm.org/docs/LibFuzzer.html#startup-initialization.\nextern \"C\" int LLVMFuzzerInitialize(int* argc, char*** argv);\n\n// See https://llvm.org/docs/LibFuzzer.html#fuzz-target.\nextern \"C\" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);\n\n#ifdef PERSISTENT_FUZZER\n#define PERSISTENT_FUZZ_VAR static\n#else\n#define PERSISTENT_FUZZ_VAR\n#endif\n\n#define DEFINE_TEST_ONE_INPUT_IMPL                                                                 \\\n  extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {                        \\\n    EnvoyTestOneInput(data, size);                                                                 \\\n    return 0;                                                                                      \\\n  }\n\n/**\n * Define a fuzz test. This should be used to define a fuzz_cc_fuzz_test_target with:\n *\n * DEFINE_FUZZER(const uint8_t* buf, size_t len) {\n *   // Do some test stuff with buf/len.\n *   return 0;\n * }\n */\n#define DEFINE_FUZZER                                                                              \\\n  static void EnvoyTestOneInput(const uint8_t* buf, size_t len);                                   \\\n  DEFINE_TEST_ONE_INPUT_IMPL                                                                       \\\n  static void EnvoyTestOneInput\n"
  },
  {
    "path": "test/fuzz/headers.dict",
    "content": "# Dictionary to populate HeaderValueOptions\n\":path\"\n\":method\"\n\":scheme\"\n\":status\"\n\":authority\"\n\"host\"\n\"keep-alive\"\n\":protocol\"\n\"set-cookie\"\n\"upgrade\"\n\"via\"\n\"te\"\n\"user-agent\"\n\"content-length\"\n\"chunked\"\n\"transfer-encoding\"\n"
  },
  {
    "path": "test/fuzz/main.cc",
    "content": "// This is an Envoy test driver for fuzz tests. Unlike regular Envoy tests, we\n// operate in a more restricted environment, comparable to what oss-fuzz uses. We\n// use the same Envoy::Fuzz::Runner library that oss-fuzz\n// (https://github.com/google/oss-fuzz) links against, providing the ability to\n// develop tests purely inside the Envoy repository and also to regression test\n// fuzz tests\n// (https://github.com/google/oss-fuzz/blob/master/docs/ideal_integration.md#regression-testing).\n//\n// Below, we use a similar approach to\n// https://github.com/grpc/grpc/blob/master/test/core/util/fuzzer_corpus_test.cc,\n// where gtest parameterized tests are used to iterate over the corpus. This is\n// neat, as we get features likes --gtest_filter to select over the corpus\n// and the reporting features of gtest.\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/debugging/symbolize.h\"\n\n#ifdef ENVOY_HANDLE_SIGNALS\n#include \"common/signal/signal_action.h\"\n#endif\n\n#include \"gtest/gtest.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace {\n\n// List of paths for files in the test corpus.\nstd::vector<std::string> test_corpus_;\n\nclass FuzzerCorpusTest : public testing::TestWithParam<std::string> {\nprotected:\n  FuzzerCorpusTest() : api_(Api::createApiForTest()) {}\n\n  Api::ApiPtr api_;\n};\n\nTEST_P(FuzzerCorpusTest, RunOneCorpusFile) {\n  ENVOY_LOG_MISC(info, \"Corpus file: {}\", GetParam());\n  const std::string buf = api_->fileSystem().fileReadToEnd(GetParam());\n  // Everything from here on is the same as under the fuzzer lib.\n  LLVMFuzzerTestOneInput(reinterpret_cast<const uint8_t*>(buf.c_str()), buf.size());\n}\n\nINSTANTIATE_TEST_SUITE_P(CorpusExamples, FuzzerCorpusTest, testing::ValuesIn(test_corpus_));\n\n} // namespace\n} // namespace Envoy\n\nint main(int argc, char** argv) {\n  Envoy::TestEnvironment::initializeTestMain(argv[0]);\n\n  // Expected usage: <test path> <corpus paths..> [other gtest flags]\n  RELEASE_ASSERT(argc >= 2, \"\");\n  // Consider any file after the test path which doesn't have a - prefix to be a corpus entry.\n  uint32_t input_args = 0;\n  // Ensure we cleanup API resources before we jump into the tests, the test API creates a singleton\n  // time system that we don't want to leak into gtest.\n  {\n    Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest();\n    for (int i = 1; i < argc; ++i) {\n      const std::string arg{argv[i]};\n      if (arg.empty() || arg[0] == '-') {\n        break;\n      }\n      ++input_args;\n      // Outputs from envoy_directory_genrule might be directories or we might\n      // have artisanal files.\n      if (api->fileSystem().directoryExists(arg)) {\n        const auto paths = Envoy::TestUtility::listFiles(arg, true);\n        Envoy::test_corpus_.insert(Envoy::test_corpus_.begin(), paths.begin(), paths.end());\n      } else {\n        Envoy::test_corpus_.emplace_back(arg);\n      }\n    }\n  }\n  argc -= input_args;\n  for (ssize_t i = 1; i < argc; ++i) {\n    argv[i] = argv[i + input_args];\n  }\n\n  testing::InitGoogleTest(&argc, argv);\n  testing::InitGoogleMock(&argc, argv);\n  Envoy::Fuzz::Runner::setupEnvironment(argc, argv, spdlog::level::info);\n\n  return RUN_ALL_TESTS();\n}\n"
  },
  {
    "path": "test/fuzz/utility.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/common/empty_string.h\"\n#include \"common/network/resolver_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/common/stream_info/test_util.h\"\n#include \"test/fuzz/common.pb.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"nghttp2/nghttp2.h\"\n\n// Strong assertion that applies across all compilation modes and doesn't rely\n// on gtest, which only provides soft fails that don't trip oss-fuzz failures.\n#define FUZZ_ASSERT(x) RELEASE_ASSERT(x, \"\")\n\nnamespace Envoy {\nnamespace Fuzz {\n\n// The HeaderMap code assumes that input does not contain certain characters, and\n// this is validated by the HTTP parser. Some fuzzers will create strings with\n// these characters, however, and this creates not very interesting fuzz test\n// failures as an assertion is rapidly hit in the LowerCaseString constructor\n// before we get to anything interesting.\n//\n// This method will replace any of those characters found with spaces.\ninline std::string replaceInvalidCharacters(absl::string_view string) {\n  std::string filtered;\n  filtered.reserve(string.length());\n  for (const char& c : string) {\n    switch (c) {\n    case '\\0':\n      FALLTHRU;\n    case '\\r':\n      FALLTHRU;\n    case '\\n':\n      filtered.push_back(' ');\n      break;\n    default:\n      filtered.push_back(c);\n    }\n  }\n  return filtered;\n}\n\n// Replace invalid host characters.\ninline std::string replaceInvalidHostCharacters(absl::string_view string) {\n  std::string filtered;\n  filtered.reserve(string.length());\n  for (const char& c : string) {\n    if (nghttp2_check_authority(reinterpret_cast<const uint8_t*>(&c), 1)) {\n      filtered.push_back(c);\n    } else {\n      filtered.push_back('0');\n    }\n  }\n  return filtered;\n}\n\ninline envoy::config::core::v3::Metadata\nreplaceInvalidStringValues(const envoy::config::core::v3::Metadata& upstream_metadata) {\n  envoy::config::core::v3::Metadata processed = upstream_metadata;\n  for (auto& metadata_struct : *processed.mutable_filter_metadata()) {\n    // Metadata fields consist of keyed Structs, which is a map of dynamically typed values. These\n    // values can be null, a number, a string, a boolean, a list of values, or a recursive struct.\n    // This clears any invalid characters in string values. It may not be likely a coverage-driven\n    // fuzzer will explore recursive structs, so this case is not handled here.\n    for (auto& field : *metadata_struct.second.mutable_fields()) {\n      if (field.second.kind_case() == ProtobufWkt::Value::kStringValue) {\n        field.second.set_string_value(replaceInvalidCharacters(field.second.string_value()));\n      }\n    }\n  }\n  return processed;\n}\n\n// Convert from test proto Headers to a variant of TestHeaderMapImpl. Validate proto if you intend\n// to sanitize for invalid header characters.\ntemplate <class T>\ninline T fromHeaders(\n    const test::fuzz::Headers& headers,\n    const absl::node_hash_set<std::string>& ignore_headers = absl::node_hash_set<std::string>(),\n    absl::node_hash_set<std::string> include_headers = absl::node_hash_set<std::string>()) {\n  T header_map;\n  for (const auto& header : headers.headers()) {\n    if (ignore_headers.find(absl::AsciiStrToLower(header.key())) == ignore_headers.end()) {\n      header_map.addCopy(header.key(), header.value());\n    }\n    include_headers.erase(absl::AsciiStrToLower(header.key()));\n  }\n  // Add dummy headers for non-present headers that must be included.\n  for (const auto& header : include_headers) {\n    header_map.addCopy(header, \"dummy\");\n  }\n  return header_map;\n}\n\n// Convert from test proto Metadata to MetadataMap\ninline Http::MetadataMapVector fromMetadata(const test::fuzz::Metadata& metadata) {\n  Http::MetadataMapVector metadata_map_vector;\n  if (!metadata.metadata().empty()) {\n    Http::MetadataMap metadata_map;\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    for (const auto& pair : metadata.metadata()) {\n      metadata_map_ptr->insert(pair);\n    }\n    metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  }\n  return metadata_map_vector;\n}\n\n// Convert from HeaderMap to test proto Headers.\ninline test::fuzz::Headers toHeaders(const Http::HeaderMap& headers) {\n  test::fuzz::Headers fuzz_headers;\n  headers.iterate([&fuzz_headers](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    auto* fuzz_header = fuzz_headers.add_headers();\n    fuzz_header->set_key(std::string(header.key().getStringView()));\n    fuzz_header->set_value(std::string(header.value().getStringView()));\n    return Http::HeaderMap::Iterate::Continue;\n  });\n  return fuzz_headers;\n}\n\nconst std::string TestSubjectPeer =\n    \"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\";\n\ninline std::unique_ptr<TestStreamInfo> fromStreamInfo(const test::fuzz::StreamInfo& stream_info) {\n  // Set mocks' default string return value to be an empty string.\n  // TODO(asraa): Speed up this function, which is slowed because of the use of mocks.\n  testing::DefaultValue<const std::string&>::Set(EMPTY_STRING);\n  auto test_stream_info = std::make_unique<TestStreamInfo>();\n  test_stream_info->metadata_ = stream_info.dynamic_metadata();\n  // Truncate recursive filter metadata fields.\n  // TODO(asraa): Resolve MessageToJsonString failure on recursive filter metadata.\n  for (auto& pair : *test_stream_info->metadata_.mutable_filter_metadata()) {\n    std::string value;\n    pair.second.SerializeToString(&value);\n    pair.second.ParseFromString(value.substr(0, 128));\n  }\n  // libc++ clocks don't track at nanosecond on macOS.\n  const auto start_time =\n      static_cast<uint64_t>(std::numeric_limits<std::chrono::nanoseconds::rep>::max()) <\n              stream_info.start_time()\n          ? 0\n          : stream_info.start_time() / 1000;\n  test_stream_info->start_time_ = SystemTime(std::chrono::microseconds(start_time));\n  if (stream_info.has_response_code()) {\n    test_stream_info->response_code_ = stream_info.response_code().value();\n  }\n  test_stream_info->setRequestedServerName(stream_info.requested_server_name());\n  auto upstream_host = std::make_shared<NiceMock<Upstream::MockHostDescription>>();\n  auto upstream_metadata = std::make_shared<envoy::config::core::v3::Metadata>(\n      replaceInvalidStringValues(stream_info.upstream_metadata()));\n  ON_CALL(*upstream_host, metadata()).WillByDefault(testing::Return(upstream_metadata));\n  test_stream_info->upstream_host_ = upstream_host;\n  auto address = stream_info.has_address()\n                     ? Envoy::Network::Address::resolveProtoAddress(stream_info.address())\n                     : Network::Utility::resolveUrl(\"tcp://10.0.0.1:443\");\n  auto upstream_local_address =\n      stream_info.has_upstream_local_address()\n          ? Envoy::Network::Address::resolveProtoAddress(stream_info.upstream_local_address())\n          : Network::Utility::resolveUrl(\"tcp://10.0.0.1:10000\");\n  test_stream_info->upstream_local_address_ = upstream_local_address;\n  test_stream_info->downstream_local_address_ = address;\n  test_stream_info->downstream_direct_remote_address_ = address;\n  test_stream_info->downstream_remote_address_ = address;\n  auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();\n  ON_CALL(*connection_info, subjectPeerCertificate())\n      .WillByDefault(testing::ReturnRef(TestSubjectPeer));\n  test_stream_info->setDownstreamSslConnection(connection_info);\n  return test_stream_info;\n}\n\n// Parses http or proto body into chunks.\ninline std::vector<std::string> parseHttpData(const test::fuzz::HttpData& data) {\n  std::vector<std::string> data_chunks;\n\n  if (data.has_http_body()) {\n    data_chunks.reserve(data.http_body().data_size());\n    for (const std::string& http_data : data.http_body().data()) {\n      data_chunks.push_back(http_data);\n    }\n  } else if (data.has_proto_body()) {\n    const std::string serialized = data.proto_body().message().value();\n    data_chunks = absl::StrSplit(serialized, absl::ByLength(data.proto_body().chunk_size()));\n  }\n\n  return data_chunks;\n}\n\n} // namespace Fuzz\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_binary\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n    \"envoy_select_hot_restart\",\n    \"envoy_select_new_codecs_in_integration_tests\",\n    \"envoy_sh_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"ads_integration_lib\",\n    srcs = [\n        \"ads_integration.cc\",\n    ],\n    hdrs = [\n        \"ads_integration.h\",\n    ],\n    data = [\n        \"//test/config/integration:server_xds_files\",\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/version:version_lib\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"ads_integration_test\",\n    size = \"enormous\",\n    srcs = [\"ads_integration_test.cc\"],\n    deps = [\n        \":ads_integration_lib\",\n        \":http_integration_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"api_listener_integration_test\",\n    srcs = [\"api_listener_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/mocks/http:stream_encoder_mock\",\n        \"//test/server:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"api_version_integration_test\",\n    srcs = [\"api_version_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/core:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\npy_binary(\n    name = \"capture_fuzz_gen\",\n    srcs = [\"capture_fuzz_gen.py\"],\n    licenses = [\"notice\"],  # Apache 2\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":capture_fuzz_proto_py_proto\",\n        \"@envoy_api//envoy/data/tap/v2alpha:pkg_py_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"capture_fuzz_proto\",\n    srcs = [\":capture_fuzz.proto\"],\n)\n\nenvoy_proto_library(\n    name = \"h2_capture_fuzz_proto\",\n    srcs = [\":h2_capture_fuzz.proto\"],\n)\n\nenvoy_cc_test(\n    name = \"cds_integration_test\",\n    srcs = [\"cds_integration_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"eds_integration_test\",\n    srcs = [\"eds_integration_test.cc\"],\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//test/config:utility_lib\",\n        \"//test/integration/filters:eds_ready_filter_config_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"filter_manager_integration_proto\",\n    srcs = [\":filter_manager_integration_test.proto\"],\n)\n\nenvoy_cc_test(\n    name = \"filter_manager_integration_test\",\n    srcs = [\n        \"filter_manager_integration_test.cc\",\n    ],\n    deps = [\n        \":filter_manager_integration_proto_cc_proto\",\n        \":http_integration_lib\",\n        \":integration_lib\",\n        \"//source/extensions/filters/listener/original_dst:config\",\n        \"//source/extensions/filters/listener/tls_inspector:config\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/echo:config\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/server:utility_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"cluster_filter_integration_test\",\n    srcs = [\"cluster_filter_integration_test.cc\"],\n    deps = [\n        \":integration_lib\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/config:utility_lib\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"custom_cluster_integration_test\",\n    srcs = [\"custom_cluster_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/upstream:load_balancer_lib\",\n        \"//test/config:utility_lib\",\n        \"//test/integration/clusters:custom_static_cluster\",\n        \"//test/test_common:network_utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"vhds_integration_test\",\n    srcs = [\"vhds_integration_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"drain_close_integration_test\",\n    srcs = [\n        \"drain_close_integration_test.cc\",\n    ],\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nexports_files([\"test_utility.sh\"])\n\nenvoy_cc_test_binary(\n    name = \"hotrestart_main\",\n    srcs = [\"hotrestart_main.cc\"],\n    external_deps = [\n        \"abseil_symbolize\",\n    ],\n    stamped = True,\n    deps = [\n        \"//source/exe:envoy_main_common_with_core_extensions_lib\",\n        \"//source/exe:platform_impl_lib\",\n    ],\n)\n\nenvoy_sh_test(\n    name = \"hotrestart_test\",\n    size = \"enormous\",\n    srcs = envoy_select_hot_restart([\n        \"hotrestart_test.sh\",\n    ]),\n    cc_binary = [\":hotrestart_main\"],\n    data = [\n        \"test_utility.sh\",\n        \"//test/config/integration:server_config_files\",\n        \"//tools:socket_passing\",\n    ],\n    # Hot restart does not apply on Windows, skipping\n    tags = [\"skip_on_windows\"],\n)\n\nenvoy_sh_test(\n    name = \"run_envoy_test\",\n    srcs = [\"run_envoy_test.sh\"],\n    cc_binary = [\":hotrestart_main\"],\n    data = [\n        \"test_utility.sh\",\n        \"//test/config/integration:server_config_files\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"alpn_selection_integration_test\",\n    srcs = [\n        \"alpn_selection_integration_test.cc\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"header_integration_test\",\n    srcs = [\n        \"header_integration_test.cc\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/protobuf\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http2_integration_test\",\n    srcs = [\n        \"http2_integration_test.cc\",\n        \"http2_integration_test.h\",\n    ],\n    shard_count = 4,\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//test/common/http/http2:http2_frame\",\n        \"//test/integration/filters:metadata_stop_all_filter_config_lib\",\n        \"//test/integration/filters:request_metadata_filter_config_lib\",\n        \"//test/integration/filters:response_metadata_filter_config_lib\",\n        \"//test/integration/filters:set_response_code_filter_config_proto_cc_proto\",\n        \"//test/integration/filters:set_response_code_filter_lib\",\n        \"//test/integration/filters:stop_iteration_and_continue\",\n        \"//test/integration/filters:test_socket_interface_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/upstream:retry_priority_factory_mocks\",\n        \"//test/mocks/upstream:retry_priority_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@com_google_absl//absl/synchronization\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http_subset_lb_integration_test\",\n    srcs = [\n        \"http_subset_lb_integration_test.cc\",\n    ],\n    # Consistently times out in CI on Windows, but observed to pass locally\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"transport_socket_match_integration_test\",\n    srcs = [\n        \"transport_socket_match_integration_test.cc\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"header_casing_integration_test\",\n    srcs = [\n        \"header_casing_integration_test.cc\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http_timeout_integration_test\",\n    srcs = [\n        \"http_timeout_integration_test.cc\",\n        \"http_timeout_integration_test.h\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"protocol_integration_test\",\n    srcs = [\n        \"protocol_integration_test.cc\",\n    ],\n    # As this test has many H1/H2/v4/v6 tests it takes a while to run.\n    # Shard it enough to bring the run time in line with other integration tests.\n    shard_count = 5,\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//test/integration/filters:continue_headers_only_inject_body\",\n        \"//test/integration/filters:encoder_decoder_buffer_filter_lib\",\n        \"//test/integration/filters:local_reply_during_encoding_filter_lib\",\n        \"//test/integration/filters:random_pause_filter_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http2_upstream_integration_test\",\n    srcs = [\n        \"http2_upstream_integration_test.cc\",\n        \"http2_upstream_integration_test.h\",\n    ],\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/access_loggers/grpc:http_config\",\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//test/integration/filters:encoder_decoder_buffer_filter_lib\",\n        \"//test/integration/filters:random_pause_filter_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"integration_admin_test\",\n    srcs = [\n        \"integration_admin_test.cc\",\n        \"integration_admin_test.h\",\n    ],\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//source/common/stats:histogram_lib\",\n        \"//source/common/stats:stats_matcher_lib\",\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_host_predicate_lib\",\n    srcs = [\n        \"test_host_predicate.h\",\n        \"test_host_predicate_config.h\",\n    ],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"http_integration_lib\",\n    srcs = [\n        \"http_integration.cc\",\n    ],\n    hdrs = [\n        \"http_integration.h\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":integration_lib\",\n        \":test_host_predicate_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//source/common/common:thread_annotations\",\n        \"//source/extensions/filters/http/on_demand:config\",\n        \"//source/extensions/filters/http/router:config\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/integration/filters:add_body_filter_config_lib\",\n        \"//test/integration/filters:add_trailers_filter_config_lib\",\n        \"//test/integration/filters:call_decodedata_once_filter_config_lib\",\n        \"//test/integration/filters:decode_headers_return_stop_all_filter_config_lib\",\n        \"//test/integration/filters:encode_headers_return_stop_all_filter_config_lib\",\n        \"//test/integration/filters:headers_only_filter_config_lib\",\n        \"//test/integration/filters:modify_buffer_filter_config_lib\",\n        \"//test/integration/filters:passthrough_filter_config_lib\",\n        \"//test/integration/filters:pause_filter_lib\",\n        \"//test/integration/filters:wait_for_whole_request_and_response_config_lib\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"http_protocol_integration_lib\",\n    srcs = [\n        \"http_protocol_integration.cc\",\n    ],\n    hdrs = [\n        \"http_protocol_integration.h\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/common/upstream:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"idle_timeout_integration_test\",\n    srcs = [\"idle_timeout_integration_test.cc\"],\n    # As this test has many pauses for idle timeouts, it takes a while to run.\n    # Shard it enough to bring the run time in line with other integration tests.\n    shard_count = 2,\n    # Consistently fails in CI on Windows, but observed to pass locally\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"//test/integration/filters:backpressure_filter_config_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"integration_stream_decoder_lib\",\n    srcs = [\n        \"integration_stream_decoder.cc\",\n    ],\n    hdrs = [\n        \"integration_stream_decoder.h\",\n    ],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/http:metadata_interface\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    srcs = [\n        \"utility.cc\",\n    ],\n    hdrs = [\n        \"utility.h\",\n    ],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:test_time_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"integration_tcp_client_lib\",\n    srcs = [\n        \"integration_tcp_client.cc\",\n    ],\n    hdrs = [\n        \"integration_tcp_client.h\",\n    ],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:listen_socket_interface\",\n        \"//include/envoy/network:socket_interface\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"fake_upstream_lib\",\n    srcs = [\n        \"fake_upstream.cc\",\n    ],\n    hdrs = [\n        \"fake_upstream.h\",\n    ],\n    copts = envoy_select_new_codecs_in_integration_tests(\n        [\"-DENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS\"],\n        \"@envoy\",\n    ),\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/grpc:status\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/network:connection_handler_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/common:basic_resource_lib\",\n        \"//source/common/common:callback_impl_lib\",\n        \"//source/common/common:linked_object\",\n        \"//source/common/common:lock_guard_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http/http1:codec_legacy_lib\",\n        \"//source/common/http/http1:codec_lib\",\n        \"//source/common/http/http2:codec_legacy_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//source/common/network:connection_balancer_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:udp_default_writer_config\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/server:active_raw_udp_listener_config\",\n        \"//source/server:connection_handler_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:test_time_system_interface\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"base_integration_test_lib\",\n    srcs = [\n        \"base_integration_test.cc\",\n    ],\n    hdrs = [\n        \"base_integration_test.h\",\n    ],\n    deps = [\n        \":autonomous_upstream_lib\",\n        \":fake_upstream_lib\",\n        \":integration_tcp_client_lib\",\n        \":server_lib\",\n        \":utility_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//source/extensions/transport_sockets/tls:ssl_socket_lib\",\n        \"//source/server:process_context_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/config:utility_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"server_lib\",\n    srcs = [\n        \"server.cc\",\n    ],\n    hdrs = [\n        \"server.h\",\n    ],\n    deps = [\n        \":server_stats_interface\",\n        \":tcp_dump\",\n        \":utility_lib\",\n        \"//include/envoy/server:options_interface\",\n        \"//include/envoy/server:process_context_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:lock_guard_lib\",\n        \"//source/common/common:logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/stats:allocator_lib\",\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//source/server:drain_manager_lib\",\n        \"//source/server:hot_restart_nop_lib\",\n        \"//source/server:listener_hooks_lib\",\n        \"//source/server:options_lib\",\n        \"//source/server:process_context_lib\",\n        \"//source/server:server_lib\",\n        \"//test/common/runtime:utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:test_time_system_interface\",\n        \"//test/test_common:utility_lib\",\n        \"@com_google_absl//absl/synchronization\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"autonomous_upstream_lib\",\n    srcs = [\n        \"autonomous_upstream.cc\",\n    ],\n    hdrs = [\n        \"autonomous_upstream.h\",\n    ],\n    deps = [\n        \":fake_upstream_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"integration_lib\",\n    srcs = [\n        \"ssl_utility.cc\",\n    ],\n    hdrs = [\n        \"integration.h\",\n        \"ssl_utility.h\",\n    ],\n    copts = envoy_select_new_codecs_in_integration_tests(\n        [\"-DENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS\"],\n        \"@envoy\",\n    ),\n    data = [\"//test/common/runtime:filesystem_test_data\"],\n    deps = [\n        \":autonomous_upstream_lib\",\n        \":base_integration_test_lib\",\n        \":fake_upstream_lib\",\n        \":integration_stream_decoder_lib\",\n        \":integration_tcp_client_lib\",\n        \":server_lib\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/grpc:status\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/server:configuration_interface\",\n        \"//include/envoy/server:hot_restart_interface\",\n        \"//include/envoy/server:options_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/thread:thread_interface\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:zero_copy_input_stream_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:basic_resource_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/grpc:codec_lib\",\n        \"//source/common/grpc:common_lib\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http/http1:codec_legacy_lib\",\n        \"//source/common/http/http1:codec_lib\",\n        \"//source/common/http/http2:codec_legacy_lib\",\n        \"//source/common/http/http2:codec_lib\",\n        \"//source/common/local_info:local_info_lib\",\n        \"//source/common/network:filter_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/extensions/transport_sockets/tap:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//source/server:connection_handler_lib\",\n        \"//source/server:drain_manager_lib\",\n        \"//source/server:hot_restart_nop_lib\",\n        \"//source/server:listener_hooks_lib\",\n        \"//source/server:process_context_lib\",\n        \"//source/server:server_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/common/runtime:utility_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/config:utility_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:retry_priority_factory_mocks\",\n        \"//test/mocks/upstream:retry_priority_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:test_time_system_interface\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"integration_test\",\n    srcs = [\n        \"integration_test.cc\",\n        \"integration_test.h\",\n    ],\n    shard_count = 2,\n    # Times out on Windows\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/extensions/filters/http/cors:config\",\n        \"//source/extensions/filters/http/grpc_http1_bridge:config\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//test/integration/filters:clear_route_cache_filter_lib\",\n        \"//test/integration/filters:encoder_decoder_buffer_filter_lib\",\n        \"//test/integration/filters:process_context_lib\",\n        \"//test/integration/filters:stop_iteration_and_continue\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"redirect_integration_test\",\n    srcs = [\n        \"redirect_integration_test.cc\",\n    ],\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/internal_redirect/allow_listed_routes:config\",\n        \"//source/extensions/internal_redirect/previous_routes:config\",\n        \"//source/extensions/internal_redirect/safe_cross_scheme:config\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"websocket_integration_test\",\n    srcs = [\n        \"websocket_integration_test.cc\",\n        \"websocket_integration_test.h\",\n    ],\n    # Consistently fails in CI on Windows, but observed to pass locally\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"echo_integration_test\",\n    srcs = [\n        \"echo_integration_test.cc\",\n    ],\n    tags = [\n        # Uncomment this line to run this test repeatedly in exclusive mode if not using docker-sandbox,\n        # or RBE, see comments in AddRemoveListener.\n        # \"exclusive\",\n    ],\n    deps = [\n        \":integration_lib\",\n        \"//source/extensions/filters/network/echo:config\",\n        \"//test/server:utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"socket_interface_integration_test\",\n    srcs = [\"socket_interface_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/network:socket_interface_lib\",\n        \"//source/extensions/filters/network/echo:config\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"stats_integration_test\",\n    srcs = [\"stats_integration_test.cc\"],\n    # The symbol table cluster memory tests take a while to run specially under tsan.\n    # Shard it to avoid test timeout.\n    shard_count = 2,\n    deps = [\n        \":integration_lib\",\n        \"//source/common/memory:stats_lib\",\n        \"//source/extensions/filters/http/router:config\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"load_stats_integration_test\",\n    srcs = [\"load_stats_integration_test.cc\"],\n    # Consistently timing out on Windows, observed to pass locally\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/config:utility_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/load_stats/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"hds_integration_test\",\n    srcs = [\"hds_integration_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    shard_count = 2,\n    # Alternately timing out and failing in CI on windows; observed to pass locally\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \":integration_lib\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/upstream:health_checker_lib\",\n        \"//source/common/upstream:health_discovery_service_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/config:utility_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/health/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"header_prefix_integration_test\",\n    srcs = [\"header_prefix_integration_test.cc\"],\n    coverage = False,\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"overload_integration_test\",\n    srcs = [\"overload_integration_test.cc\"],\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_protocol_integration_lib\",\n        \"//test/common/config:dummy_config_proto_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/overload/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"proxy_proto_integration_test\",\n    srcs = [\n        \"proxy_proto_integration_test.cc\",\n        \"proxy_proto_integration_test.h\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/extensions/filters/listener/proxy_protocol:config\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"rtds_integration_test\",\n    srcs = [\"rtds_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"@envoy_api//envoy/service/runtime/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"extension_discovery_integration_test\",\n    srcs = [\"extension_discovery_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/integration/filters:set_response_code_filter_config_proto_cc_proto\",\n        \"//test/integration/filters:set_response_code_filter_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/extension/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"server_stats_interface\",\n    hdrs = [\"server_stats.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/stats:stats_interface\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"sds_static_integration_test\",\n    srcs = [\n        \"sds_static_integration_test.cc\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"sds_dynamic_integration_test\",\n    srcs = [\n        \"sds_dynamic_integration_test.cc\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/secret/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"sds_generic_secret_integration_test\",\n    srcs = [\n        \"sds_generic_secret_integration_test.cc\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//include/envoy/registry\",\n        \"//source/common/grpc:common_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"tcp_proxy_integration_proto\",\n    srcs = [\":tcp_proxy_integration_test.proto\"],\n)\n\nenvoy_cc_test(\n    name = \"tcp_proxy_integration_test\",\n    srcs = [\n        \"tcp_proxy_integration_test.cc\",\n        \"tcp_proxy_integration_test.h\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    shard_count = 2,\n    # Fails intermittantly on local build\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":integration_lib\",\n        \":tcp_proxy_integration_proto_cc_proto\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/filters/network/common:factory_base_lib\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/network/tcp_proxy/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"tcp_tunneling_integration_test\",\n    srcs = [\n        \"tcp_tunneling_integration_test.cc\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \":http_protocol_integration_lib\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/network/tcp_proxy/v2:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"tcp_conn_pool_integration_test\",\n    srcs = [\n        \"tcp_conn_pool_integration_test.cc\",\n    ],\n    deps = [\n        \":integration_lib\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//test/server:utility_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"tcp_dump\",\n    srcs = [\"tcp_dump.cc\"],\n    hdrs = [\"tcp_dump.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:fmt_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"uds_integration_test\",\n    srcs = [\n        \"uds_integration_test.cc\",\n        \"uds_integration_test.h\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:codec_client_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/test_common:environment_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"version_integration_test\",\n    srcs = [\"version_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/extensions/filters/http/ip_tagging:config\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"dynamic_validation_integration_test\",\n    srcs = [\"dynamic_validation_integration_test.cc\"],\n    data = [\"//test/config/integration:server_xds_files\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"xds_integration_test\",\n    srcs = [\"xds_integration_test.cc\"],\n    data = [\n        \"//test/config/integration:server_xds_files\",\n        \"//test/config/integration/certs\",\n    ],\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \":http_integration_lib\",\n        \":http_protocol_integration_lib\",\n        \"//source/extensions/filters/listener/tls_inspector:config\",\n        \"//source/extensions/filters/listener/tls_inspector:tls_inspector_lib\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"xfcc_integration_test\",\n    srcs = [\n        \"xfcc_integration_test.cc\",\n        \"xfcc_integration_test.h\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n\nH1_FUZZ_LIB_DEPS = [\n    \":capture_fuzz_proto_cc_proto\",\n    \":http_integration_lib\",\n    \"//source/common/common:assert_lib\",\n    \"//source/common/common:logger_lib\",\n    \"//test/fuzz:fuzz_runner_lib\",\n    \"//test/integration:integration_lib\",\n    \"//test/test_common:environment_lib\",\n]\n\nenvoy_cc_test_library(\n    name = \"h1_fuzz_lib\",\n    srcs = [\"h1_fuzz.cc\"],\n    hdrs = [\"h1_fuzz.h\"],\n    deps = H1_FUZZ_LIB_DEPS,\n)\n\nenvoy_cc_test_library(\n    name = \"h1_fuzz_persistent_lib\",\n    srcs = [\"h1_fuzz.cc\"],\n    hdrs = [\"h1_fuzz.h\"],\n    copts = [\"-DPERSISTENT_FUZZER\"],\n    deps = H1_FUZZ_LIB_DEPS,\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h1_capture_fuzz_test\",\n    srcs = [\"h1_capture_fuzz_test.cc\"],\n    corpus = \"h1_corpus\",\n    deps = [\":h1_fuzz_lib\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h1_capture_persistent_fuzz_test\",\n    srcs = [\"h1_capture_fuzz_test.cc\"],\n    copts = [\"-DPERSISTENT_FUZZER\"],\n    corpus = \"h1_corpus\",\n    deps = [\":h1_fuzz_persistent_lib\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h1_capture_direct_response_fuzz_test\",\n    srcs = [\"h1_capture_direct_response_fuzz_test.cc\"],\n    corpus = \"h1_corpus\",\n    deps = [\n        \":h1_fuzz_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h1_capture_direct_response_persistent_fuzz_test\",\n    srcs = [\"h1_capture_direct_response_fuzz_test.cc\"],\n    copts = [\"-DPERSISTENT_FUZZER\"],\n    corpus = \"h1_corpus\",\n    deps = [\n        \":h1_fuzz_persistent_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nH2_FUZZ_LIB_DEPS = [\n    \":h2_capture_fuzz_proto_cc_proto\",\n    \":http_integration_lib\",\n    \"//source/common/common:assert_lib\",\n    \"//source/common/common:logger_lib\",\n    \"//test/common/http/http2:http2_frame\",\n    \"//test/fuzz:fuzz_runner_lib\",\n    \"//test/fuzz:utility_lib\",\n    \"//test/integration:integration_lib\",\n    \"//test/test_common:environment_lib\",\n]\n\nenvoy_cc_test_library(\n    name = \"h2_fuzz_lib\",\n    srcs = [\"h2_fuzz.cc\"],\n    hdrs = [\"h2_fuzz.h\"],\n    deps = H2_FUZZ_LIB_DEPS,\n)\n\nenvoy_cc_test_library(\n    name = \"h2_fuzz_persistent_lib\",\n    srcs = [\"h2_fuzz.cc\"],\n    hdrs = [\"h2_fuzz.h\"],\n    copts = [\"-DPERSISTENT_FUZZER\"],\n    deps = H2_FUZZ_LIB_DEPS,\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h2_capture_fuzz_test\",\n    srcs = [\"h2_capture_fuzz_test.cc\"],\n    corpus = \"h2_corpus\",\n    deps = [\":h2_fuzz_lib\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h2_capture_persistent_fuzz_test\",\n    srcs = [\"h2_capture_fuzz_test.cc\"],\n    copts = [\"-DPERSISTENT_FUZZER\"],\n    corpus = \"h2_corpus\",\n    deps = [\":h2_fuzz_persistent_lib\"],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h2_capture_direct_response_fuzz_test\",\n    srcs = [\"h2_capture_direct_response_fuzz_test.cc\"],\n    corpus = \"h2_corpus\",\n    deps = [\n        \":h2_fuzz_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"h2_capture_direct_response_persistent_fuzz_test\",\n    srcs = [\"h2_capture_direct_response_fuzz_test.cc\"],\n    copts = [\"-DPERSISTENT_FUZZER\"],\n    corpus = \"h2_corpus\",\n    deps = [\n        \":h2_fuzz_persistent_lib\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"scoped_rds_integration_test\",\n    srcs = [\n        \"scoped_rds_integration_test.cc\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"listener_lds_integration_test\",\n    srcs = [\n        \"listener_lds_integration_test.cc\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:connection_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//test/common/grpc:grpc_client_integration_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"listener_filter_integration_test\",\n    srcs = [\n        \"listener_filter_integration_test.cc\",\n    ],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \":integration_lib\",\n        \"//source/common/config:api_version_lib\",\n        \"//source/common/event:dispatcher_includes\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/filters/listener/tls_inspector:config\",\n        \"//source/extensions/filters/listener/tls_inspector:tls_inspector_lib\",\n        \"//source/extensions/filters/network/echo:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"cx_limit_integration_test\",\n    srcs = [\"cx_limit_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//test/config:utility_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"local_reply_integration_test\",\n    srcs = [\n        \"local_reply_integration_test.cc\",\n    ],\n    deps = [\n        \":http_integration_lib\",\n        \":http_protocol_integration_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"health_check_integration_test\",\n    srcs = [\"health_check_integration_test.cc\"],\n    deps = [\n        \":http_integration_lib\",\n        \":integration_lib\",\n        \"//test/common/http/http2:http2_frame\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/integration/README.md",
    "content": "# Writing new tests\n\nThe Envoy integration test framework is designed to make it simple to test downstream-Envoy-upstream\ncommunication. In the common case, one\n\n- Sends a request from downstream through Envoy\n- Verifies the request is received upstream, and possibly inspects elements of the headers or body\n- Sends a response from upstream through Envoy\n- Verifies the request is received downstream, again possibly inspecting headers and body\n\nFor the simplest variant of this, one could do the following.\n\n```c++\n// start Envoy, set up the fake upstreams.\ninitialize();\n\n// Create a client aimed at Envoy’s default HTTP port.\ncodec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n\n// Create some request headers.\nHttp::TestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                        {\":path\", \"/test/long/url\"},\n                                        {\":scheme\", \"http\"},\n                                        {\":authority\", \"host\"}};\n\n// Send the request headers from the client, wait until they are received upstream. When they\n// are received, send the default response headers from upstream and wait until they are\n// received at by client\nsendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n// Verify the proxied request was received upstream, as expected.\nEXPECT_TRUE(upstream_request_->complete());\nEXPECT_EQ(0U, upstream_request_->bodyLength());\n// Verify the proxied response was received downstream, as expected.\nEXPECT_TRUE(response_->complete());\nEXPECT_STREQ(\"200\", response_->headers().Status()->value().c_str());\nEXPECT_EQ(0U, response_->body().size());\n```\n\nOnce you have the basic end-to-end test, it is fairly straight forward to modify it to test more\ninteresting corner cases. There are existing tests which send requests with bodies, have\ndownstream or upstream disconnect or time out, send multiple simultaneous requests on the same\nconnection, etc. Given that many of the disconnect/timeout cases are covered, a common case for\ntesting is covering a newly added configuration option.\n\nMost of Envoy's tests have been migrated from using [`json flatfiles`](../config/integration/) to\nusing a basic configuration defined as `basic_config` in [`config/utility.cc`](../config/utility.cc).\nThis configuration may be modified before the call to `initialize()`.\nThe [`ConfigHelper`](../config/utility.h) has utilities for common alterations such as:\n\n```c++\n// Set the default protocol to HTTP2\nsetDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n```\n\nor\n\n```c++\n// Add a buffering filter on the request path\nconfig_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER);\n```\n\nFor other edits which are less likely reusable, one can add config modifiers. Config modifiers\nallow arbitrary modification of Envoy’s configuration just before Envoy is brought up. One can add\na config modifier to alter the bootstrap proto, one which affects the first `HttpConnectionManager`\nobject in the config, or mix and match. Config modifiers are operated on in the order they are\nadded, so one could, for example, modify the default `HttpConnectionManager`, duplicate the listening\nconfig, and then change the first `HttpConnectionManager` to be different from the second.\n\nAn example of modifying the bootstrap proto to overwrite runtime defaults:\n\n```c++\nconfig_helper_.addRuntimeOverride(\"Foo\", \"bar\");\n});\n```\n\nAn example of modifying `HttpConnectionManager` to change Envoy’s HTTP/1.1 processing:\n\n```c++\nconfig_helper_.addConfigModifier([&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void {\n  envoy::api::v2::core::Http1ProtocolOptions options;\n  options.mutable_allow_absolute_url()->set_value(true);\n  hcm.mutable_http_protocol_options()->CopyFrom(options);\n};);\n```\n\nAn example of modifying `HttpConnectionManager` to add an additional upstream\ncluster:\n\n```c++\n   config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) {\n      bootstrap.mutable_rate_limit_service()->set_cluster_name(\"ratelimit\");\n      auto* ratelimit_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ratelimit_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      ratelimit_cluster->set_name(\"ratelimit\");\n      ratelimit_cluster->mutable_http2_protocol_options();\n    });\n```\n\nIn addition to the existing test framework, which allows for carefully timed interaction and ordering of events between downstream, Envoy, and Upstream, there is now an “autonomous” framework which simplifies the common case where the timing is not essential (or bidirectional streaming is desired). When AutonomousUpstream is used, by setting `autonomous_upstream_ = true` before `initialize()`, upstream will by default create AutonomousHttpConnections for each incoming connection and AutonomousStreams for each incoming stream. By default, the streams will respond to each complete request with “200 OK” and 10 bytes of payload, but this behavior can be altered by setting various request headers, as documented in [`autonomous_upstream.h`](autonomous_upstream.h)\n\n## Common Problems\n- If a response body length does not match the `content-length` header, any mock calls to wait for the response completion such as `sendRequestAndWaitForResponse` or `response_->waitForEndStream` could time out. Also, any asserts that the response was completed such as `EXPECT_TRUE(response_->complete())` could fail. Make sure that the response body length matches the `content-length` header.\n\n# Extending the test framework\n\nThe Envoy integration test framework is most definitely a work in progress.\nWhen encountering features which do not exist (actions for the autonomous\nbackend, testing new sections of configuration) please use your best judgement\nif the changes will be needed by one specific test file, or will be likely\nreused in other integration tests. If it's likely be reused, please add the\nappropriate functions to existing utilities or add new test utilities. If it's\nlikely a one-off change, it can be scoped to the existing test file.\n\n# Debugging integration tests\n\nThe Envoy integration test framework is generally designed to fast-fail when\nthings go wrong, with an explanatory message such as\n\"Timed out waiting for new connection.\"\nbut it's not always clear what the underlying cause is. Because there are many\nEnvoy components under test, often the best tool for debugging is to try to get\na run of the test with `--test_arg=\"-l debug\"`, or `--test_arg=\"-l trace\"` ideally\nwith a clean run (if it's a code change which caused test breakage) or a comparable\ntest (if it's a new test failing). Looking at the conn_manager_impl.cc\nlogs, router.cc logs, and fake_upstream.cc logs, can often give you a feel for\nwhat unexpected event is occurring. If that doesn't help, following the GDB\ninstructions to run the test in a debugger or sprinkling cerrs around are both often helpful.\n\n# Deflaking tests\n\nThe instructions below assume the developer is running tests natively with bazel\nrather than in docker. For developers using docker the best workaround today is\nto replace `//test/...` on the relevant `ci/do_ci.sh`with the command lines\nreferenced below and remember to back those changes out before sending the fix\nupstream!\n\n## Reproducing test flakes\n\nThe first step of fixing test flakes is reproducing the test flake. In general\nif you have written a test which flakes, you can start by running\n\n``\nbazel test [test_name] --runs_per_test=1000\n``\n\nWhich runs the full test many times. If this works, great!  If not, it's worth\ntrying to stress your system more by running more tests in parallel, by setting\n`--jobs` and `--local_resources.`\n\nOnce you've managed to reproduce a failure it may be beneficial to limit your\ntest run to the specific failing test(s) with `--gtest_filter`. This may cause\nthe test to flake less often (i.e. if two tests are interfering with each other,\nscoping to your specific test name may harm rather than help reproducibility.)\nbut if it works it lets you iterate faster.\n\nAnother helpful tip for debugging is turn turn up Envoy trace logs with\n`--test_arg=\"-l trace\"`. Again if the test failure is due to a race, this may make\nit harder to reproduce, and it may also hide any custom logging you add, but it's a\nhandy thing to know of to follow the general flow.\n\nThe full command might look something like\n\n```\nbazel test //test/integration:http2_upstream_integration_test \\\n--test_arg=--gtest_filter=\"IpVersions/Http2UpstreamIntegrationTest.RouterRequestAndResponseWithBodyNoBuffer/IPv6\" \\\n--jobs 60 --local_test_jobs=60 --runs_per_test=1000 --test_arg=\"-l trace\"\n```\n\n## Debugging test flakes\n\nOnce you've managed to reproduce your test flake, you get to figure out what's\ngoing on. If your failure mode isn't documented below, ideally some combination\nof cerr << logging and trace logs will help you sort out what is going on (and\nplease add to this document as you figure it out!)\n\n\n"
  },
  {
    "path": "test/integration/ads_integration.cc",
    "content": "#include \"test/integration/ads_integration.h\"\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\n\nAdsIntegrationTest::AdsIntegrationTest(const envoy::config::core::v3::ApiVersion api_version)\n    : HttpIntegrationTest(\n          Http::CodecClient::Type::HTTP2, ipVersion(),\n          ConfigHelper::adsBootstrap(\n              sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? \"GRPC\" : \"DELTA_GRPC\", api_version)) {\n  use_lds_ = false;\n  create_xds_upstream_ = true;\n  tls_xds_upstream_ = true;\n  sotw_or_delta_ = sotwOrDelta();\n  api_version_ = api_version;\n}\n\nvoid AdsIntegrationTest::TearDown() { cleanUpXdsConnection(); }\n\nenvoy::config::cluster::v3::Cluster AdsIntegrationTest::buildCluster(const std::string& name) {\n  return ConfigHelper::buildCluster(name, \"ROUND_ROBIN\", api_version_);\n}\n\nenvoy::config::cluster::v3::Cluster AdsIntegrationTest::buildTlsCluster(const std::string& name) {\n  return ConfigHelper::buildTlsCluster(name, \"ROUND_ROBIN\", api_version_);\n}\n\nenvoy::config::cluster::v3::Cluster AdsIntegrationTest::buildRedisCluster(const std::string& name) {\n  return ConfigHelper::buildCluster(name, \"MAGLEV\", api_version_);\n}\n\nenvoy::config::endpoint::v3::ClusterLoadAssignment\nAdsIntegrationTest::buildClusterLoadAssignment(const std::string& name) {\n  return ConfigHelper::buildClusterLoadAssignment(\n      name, Network::Test::getLoopbackAddressString(ipVersion()),\n      fake_upstreams_[0]->localAddress()->ip()->port(), api_version_);\n}\n\nenvoy::config::endpoint::v3::ClusterLoadAssignment\nAdsIntegrationTest::buildTlsClusterLoadAssignment(const std::string& name) {\n  return ConfigHelper::buildClusterLoadAssignment(\n      name, Network::Test::getLoopbackAddressString(ipVersion()), 8443, api_version_);\n}\n\nenvoy::config::listener::v3::Listener\nAdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config,\n                                  const std::string& stat_prefix) {\n  return ConfigHelper::buildListener(name, route_config,\n                                     Network::Test::getLoopbackAddressString(ipVersion()),\n                                     stat_prefix, api_version_);\n}\n\nenvoy::config::listener::v3::Listener\nAdsIntegrationTest::buildRedisListener(const std::string& name, const std::string& cluster) {\n  std::string redis = fmt::format(\n      R\"EOF(\n        filters:\n        - name: redis\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy\n            settings:\n              op_timeout: 1s\n            stat_prefix: {}\n            prefix_routes:\n              catch_all_route:\n                cluster: {}\n    )EOF\",\n      name, cluster);\n  return ConfigHelper::buildBaseListener(name, Network::Test::getLoopbackAddressString(ipVersion()),\n                                         redis, api_version_);\n}\n\nenvoy::config::route::v3::RouteConfiguration\nAdsIntegrationTest::buildRouteConfig(const std::string& name, const std::string& cluster) {\n  return ConfigHelper::buildRouteConfig(name, cluster, api_version_);\n}\n\nvoid AdsIntegrationTest::makeSingleRequest() {\n  registerTestServerPorts({\"http\"});\n  testRouterHeaderOnlyRequestAndResponse();\n  cleanupUpstreamAndDownstream();\n}\n\nvoid AdsIntegrationTest::initialize() { initializeAds(false); }\n\nvoid AdsIntegrationTest::initializeAds(const bool rate_limiting) {\n  config_helper_.addConfigModifier([this, &rate_limiting](\n                                       envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config();\n    if (rate_limiting) {\n      ads_config->mutable_rate_limit_settings();\n    }\n    auto* grpc_service = ads_config->add_grpc_services();\n    setGrpcService(*grpc_service, \"ads_cluster\", xds_upstream_->localAddress());\n    auto* ads_cluster = bootstrap.mutable_static_resources()->add_clusters();\n    ads_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n    ads_cluster->set_name(\"ads_cluster\");\n    envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext context;\n    auto* validation_context = context.mutable_common_tls_context()->mutable_validation_context();\n    validation_context->mutable_trusted_ca()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n    validation_context->add_match_subject_alt_names()->set_suffix(\"lyft.com\");\n    if (clientType() == Grpc::ClientType::GoogleGrpc) {\n      auto* google_grpc = grpc_service->mutable_google_grpc();\n      auto* ssl_creds = google_grpc->mutable_channel_credentials()->mutable_ssl_credentials();\n      ssl_creds->mutable_root_certs()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n    }\n    ads_cluster->mutable_transport_socket()->set_name(\"envoy.transport_sockets.tls\");\n    ads_cluster->mutable_transport_socket()->mutable_typed_config()->PackFrom(context);\n  });\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  HttpIntegrationTest::initialize();\n  if (xds_stream_ == nullptr) {\n    createXdsConnection();\n    AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n  }\n}\n\nvoid AdsIntegrationTest::testBasicFlow() {\n  // Send initial configuration, validate we can process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  makeSingleRequest();\n  const ProtobufWkt::Timestamp first_active_listener_ts_1 =\n      getListenersConfigDump().dynamic_listeners(0).active_state().last_updated();\n  const ProtobufWkt::Timestamp first_active_cluster_ts_1 =\n      getClustersConfigDump().dynamic_active_clusters()[0].last_updated();\n  const ProtobufWkt::Timestamp first_route_config_ts_1 =\n      getRoutesConfigDump().dynamic_route_configs()[0].last_updated();\n\n  // Upgrade RDS/CDS/EDS to a newer config, validate we can process a request.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildCluster(\"cluster_1\"), buildCluster(\"cluster_2\")},\n      {buildCluster(\"cluster_1\"), buildCluster(\"cluster_2\")}, {\"cluster_0\"}, \"2\");\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 2);\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment,\n      {buildClusterLoadAssignment(\"cluster_1\"), buildClusterLoadAssignment(\"cluster_2\")},\n      {buildClusterLoadAssignment(\"cluster_1\"), buildClusterLoadAssignment(\"cluster_2\")},\n      {\"cluster_0\"}, \"2\");\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_2\", \"cluster_1\"}, {\"cluster_2\", \"cluster_1\"},\n                                      {\"cluster_0\"}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"2\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"2\",\n                                      {\"cluster_2\", \"cluster_1\"}, {}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_1\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_1\")}, {}, \"2\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"2\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  makeSingleRequest();\n  const ProtobufWkt::Timestamp first_active_listener_ts_2 =\n      getListenersConfigDump().dynamic_listeners(0).active_state().last_updated();\n  const ProtobufWkt::Timestamp first_active_cluster_ts_2 =\n      getClustersConfigDump().dynamic_active_clusters()[0].last_updated();\n  const ProtobufWkt::Timestamp first_route_config_ts_2 =\n      getRoutesConfigDump().dynamic_route_configs()[0].last_updated();\n\n  // Upgrade LDS/RDS, validate we can process a request.\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener,\n      {buildListener(\"listener_1\", \"route_config_1\"),\n       buildListener(\"listener_2\", \"route_config_2\")},\n      {buildListener(\"listener_1\", \"route_config_1\"),\n       buildListener(\"listener_2\", \"route_config_2\")},\n      {\"listener_0\"}, \"2\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"2\",\n                                      {\"route_config_2\", \"route_config_1\", \"route_config_0\"},\n                                      {\"route_config_2\", \"route_config_1\"}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"2\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"2\",\n                                      {\"route_config_2\", \"route_config_1\"}, {},\n                                      {\"route_config_0\"}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration,\n      {buildRouteConfig(\"route_config_1\", \"cluster_1\"),\n       buildRouteConfig(\"route_config_2\", \"cluster_1\")},\n      {buildRouteConfig(\"route_config_1\", \"cluster_1\"),\n       buildRouteConfig(\"route_config_2\", \"cluster_1\")},\n      {\"route_config_0\"}, \"3\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"3\",\n                                      {\"route_config_2\", \"route_config_1\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 2);\n  makeSingleRequest();\n  const ProtobufWkt::Timestamp first_active_listener_ts_3 =\n      getListenersConfigDump().dynamic_listeners(0).active_state().last_updated();\n  const ProtobufWkt::Timestamp first_active_cluster_ts_3 =\n      getClustersConfigDump().dynamic_active_clusters()[0].last_updated();\n  const ProtobufWkt::Timestamp first_route_config_ts_3 =\n      getRoutesConfigDump().dynamic_route_configs()[0].last_updated();\n\n  // Expect last_updated timestamps to be updated in a predictable way\n  // For the listener configs in this example, 1 == 2 < 3.\n  EXPECT_EQ(first_active_listener_ts_2, first_active_listener_ts_1);\n  EXPECT_GT(first_active_listener_ts_3, first_active_listener_ts_2);\n  // For the cluster configs in this example, 1 < 2 == 3.\n  EXPECT_GT(first_active_cluster_ts_2, first_active_cluster_ts_1);\n  EXPECT_EQ(first_active_cluster_ts_3, first_active_cluster_ts_2);\n  // For the route configs in this example, 1 < 2 < 3.\n  EXPECT_GT(first_route_config_ts_2, first_route_config_ts_1);\n  EXPECT_GT(first_route_config_ts_3, first_route_config_ts_2);\n}\n\nenvoy::admin::v3::ClustersConfigDump AdsIntegrationTest::getClustersConfigDump() {\n  auto message_ptr =\n      test_server_->server().admin().getConfigTracker().getCallbacksMap().at(\"clusters\")();\n  return dynamic_cast<const envoy::admin::v3::ClustersConfigDump&>(*message_ptr);\n}\n\nenvoy::admin::v3::ListenersConfigDump AdsIntegrationTest::getListenersConfigDump() {\n  auto message_ptr =\n      test_server_->server().admin().getConfigTracker().getCallbacksMap().at(\"listeners\")();\n  return dynamic_cast<const envoy::admin::v3::ListenersConfigDump&>(*message_ptr);\n}\n\nenvoy::admin::v3::RoutesConfigDump AdsIntegrationTest::getRoutesConfigDump() {\n  auto message_ptr =\n      test_server_->server().admin().getConfigTracker().getCallbacksMap().at(\"routes\")();\n  return dynamic_cast<const envoy::admin::v3::RoutesConfigDump&>(*message_ptr);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/ads_integration.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/config/utility.h\"\n#include \"test/integration/http_integration.h\"\n\nnamespace Envoy {\n\nclass AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest {\npublic:\n  AdsIntegrationTest(const envoy::config::core::v3::ApiVersion api_version);\n  AdsIntegrationTest() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V2) {}\n\n  void TearDown() override;\n\n  envoy::config::cluster::v3::Cluster buildCluster(const std::string& name);\n\n  envoy::config::cluster::v3::Cluster buildTlsCluster(const std::string& name);\n\n  envoy::config::cluster::v3::Cluster buildRedisCluster(const std::string& name);\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment\n  buildClusterLoadAssignment(const std::string& name);\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment\n  buildTlsClusterLoadAssignment(const std::string& name);\n\n  envoy::config::listener::v3::Listener buildListener(const std::string& name,\n                                                      const std::string& route_config,\n                                                      const std::string& stat_prefix = \"ads_test\");\n\n  envoy::config::listener::v3::Listener buildRedisListener(const std::string& name,\n                                                           const std::string& cluster);\n\n  envoy::config::route::v3::RouteConfiguration buildRouteConfig(const std::string& name,\n                                                                const std::string& cluster);\n\n  void makeSingleRequest();\n\n  void initialize() override;\n  void initializeAds(const bool rate_limiting);\n\n  void testBasicFlow();\n\n  envoy::admin::v3::ClustersConfigDump getClustersConfigDump();\n  envoy::admin::v3::ListenersConfigDump getListenersConfigDump();\n  envoy::admin::v3::RoutesConfigDump getRoutesConfigDump();\n\n  envoy::config::core::v3::ApiVersion api_version_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/ads_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/grpc/status.h\"\n\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/version/version.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/ads_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTest,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Validate basic config delivery and upgrade.\nTEST_P(AdsIntegrationTest, Basic) {\n  initialize();\n  testBasicFlow();\n}\n\n// Validate basic config delivery and upgrade with RateLimiting.\nTEST_P(AdsIntegrationTest, BasicWithRateLimiting) {\n  initializeAds(true);\n  testBasicFlow();\n}\n\n// Validate that we can recover from failures.\nTEST_P(AdsIntegrationTest, Failure) {\n  initialize();\n\n  // Send initial configuration, failing each xDS once (via a type mismatch), validate we can\n  // process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().Cluster, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n\n  EXPECT_TRUE(compareDiscoveryRequest(\n      Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true,\n      Grpc::Status::WellKnownGrpcStatus::Internal,\n      fmt::format(\"does not match the message-wide type URL {}\", Config::TypeUrl::get().Cluster)));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildCluster(\"cluster_0\")},\n      {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {}, {}, true,\n                                      Grpc::Status::WellKnownGrpcStatus::Internal,\n                                      fmt::format(\"does not match the message-wide type URL {}\",\n                                                  Config::TypeUrl::get().ClusterLoadAssignment)));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().Listener, {buildRouteConfig(\"listener_0\", \"route_config_0\")},\n      {buildRouteConfig(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(\n      Config::TypeUrl::get().Listener, \"\", {}, {}, {}, true,\n      Grpc::Status::WellKnownGrpcStatus::Internal,\n      fmt::format(\"does not match the message-wide type URL {}\", Config::TypeUrl::get().Listener)));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().RouteConfiguration, {buildListener(\"route_config_0\", \"cluster_0\")},\n      {buildListener(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {}, {}, true,\n                                      Grpc::Status::WellKnownGrpcStatus::Internal,\n                                      fmt::format(\"does not match the message-wide type URL {}\",\n                                                  Config::TypeUrl::get().RouteConfiguration)));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  makeSingleRequest();\n}\n\n// Validate that xds can support a mix of v2 and v3 type url.\nTEST_P(AdsIntegrationTest, MixV2V3TypeUrlInDiscoveryResponse) {\n  config_helper_.addRuntimeOverride(\n      \"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade\", \"true\");\n  initialize();\n\n  // Send initial configuration.\n  // Discovery response with v3 type url.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::getTypeUrl<envoy::config::cluster::v3::Cluster>(\n          envoy::config::core::v3::ApiVersion::V3),\n      {buildCluster(\"cluster_0\")}, {buildCluster(\"cluster_0\")}, {}, \"1\", false);\n  // Discovery response with v2 type url.\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n  // Discovery response with v3 type url.\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::getTypeUrl<envoy::config::listener::v3::Listener>(\n          envoy::config::core::v3::ApiVersion::V3),\n      {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\", false);\n  // Discovery response with v2 type url.\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  // Validate that we can process a request.\n  makeSingleRequest();\n}\n\n// Validate that the request with duplicate listeners is rejected.\nTEST_P(AdsIntegrationTest, DuplicateWarmingListeners) {\n  initialize();\n\n  // Send initial configuration, validate we can process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n\n  // Send duplicate listeners and validate that the update is rejected.\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener,\n      {buildListener(\"duplicae_listener\", \"route_config_0\"),\n       buildListener(\"duplicae_listener\", \"route_config_0\")},\n      {buildListener(\"duplicae_listener\", \"route_config_0\"),\n       buildListener(\"duplicae_listener\", \"route_config_0\")},\n      {}, \"1\");\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_rejected\", 1);\n}\n\n// Regression test for the use-after-free crash when processing RDS update (#3953).\nTEST_P(AdsIntegrationTest, RdsAfterLdsWithNoRdsChanges) {\n  initialize();\n\n  // Send initial configuration.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  // Validate that we can process a request.\n  makeSingleRequest();\n\n  // Update existing LDS (change stat_prefix).\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\", \"rds_crash\")},\n      {buildListener(\"listener_0\", \"route_config_0\", \"rds_crash\")}, {}, \"2\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 2);\n\n  // Update existing RDS (no changes).\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"2\");\n\n  // Validate that we can process a request again\n  makeSingleRequest();\n}\n\n// Regression test for #11877, validate behavior of EDS updates when a cluster is updated and\n// an active cluster is replaced by a newer cluster undergoing warming.\nTEST_P(AdsIntegrationTest, CdsEdsReplacementWarming) {\n  initialize();\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  makeSingleRequest();\n\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildTlsCluster(\"cluster_0\")},\n      {buildTlsCluster(\"cluster_0\")}, {}, \"2\");\n  // Inconsistent SotW and delta behaviors for warming, see\n  // https://github.com/envoyproxy/envoy/issues/11477#issuecomment-657855029.\n  if (sotw_or_delta_ != Grpc::SotwOrDelta::Delta) {\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                        {\"cluster_0\"}, {}, {}));\n  }\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildTlsClusterLoadAssignment(\"cluster_0\")},\n      {buildTlsClusterLoadAssignment(\"cluster_0\")}, {}, \"2\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"2\", {}, {}, {}, true));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"2\",\n                                      {\"cluster_0\"}, {}, {}));\n}\n\n// Validate that the request with duplicate clusters in the initial request during server init is\n// rejected.\nTEST_P(AdsIntegrationTest, DuplicateInitialClusters) {\n  initialize();\n\n  // Send initial configuration, failing each xDS once (via a type mismatch), validate we can\n  // process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster,\n      {buildCluster(\"duplicate_cluster\"), buildCluster(\"duplicate_cluster\")},\n      {buildCluster(\"duplicate_cluster\"), buildCluster(\"duplicate_cluster\")}, {}, \"1\");\n\n  test_server_->waitForCounterGe(\"cluster_manager.cds.update_rejected\", 1);\n}\n\n// Validates that removing a redis cluster does not crash Envoy.\n// Regression test for issue https://github.com/envoyproxy/envoy/issues/7990.\nTEST_P(AdsIntegrationTest, RedisClusterRemoval) {\n  initialize();\n\n  // Send initial configuration with a redis cluster and a redis proxy listener.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildRedisCluster(\"redis_cluster\")},\n      {buildRedisCluster(\"redis_cluster\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"redis_cluster\"}, {\"redis_cluster\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"redis_cluster\")},\n      {buildClusterLoadAssignment(\"redis_cluster\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildRedisListener(\"listener_0\", \"redis_cluster\")},\n      {buildRedisListener(\"listener_0\", \"redis_cluster\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"redis_cluster\"}, {}, {}));\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n\n  // Validate that redis listener is successfully created.\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  // Now send a CDS update, removing redis cluster added above.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildCluster(\"cluster_2\")}, {buildCluster(\"cluster_2\")},\n      {\"redis_cluster\"}, \"2\");\n\n  // Validate that the cluster is removed successfully.\n  test_server_->waitForCounterGe(\"cluster_manager.cluster_removed\", 1);\n}\n\n// Validate that the request with duplicate clusters in the subsequent requests (warming clusters)\n// is rejected.\nTEST_P(AdsIntegrationTest, DuplicateWarmingClusters) {\n  initialize();\n\n  // Send initial configuration, validate we can process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  makeSingleRequest();\n\n  // Send duplicate warming clusters and validate that the update is rejected.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster,\n      {buildCluster(\"duplicate_cluster\"), buildCluster(\"duplicate_cluster\")},\n      {buildCluster(\"duplicate_cluster\"), buildCluster(\"duplicate_cluster\")}, {}, \"2\");\n  test_server_->waitForCounterGe(\"cluster_manager.cds.update_rejected\", 1);\n}\n\n// Verify CDS is paused during cluster warming.\nTEST_P(AdsIntegrationTest, CdsPausedDuringWarming) {\n  initialize();\n\n  // Send initial configuration, validate we can process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  makeSingleRequest();\n\n  // Send the first warming cluster.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildCluster(\"warming_cluster_1\")},\n      {buildCluster(\"warming_cluster_1\")}, {\"cluster_0\"}, \"2\");\n\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 1);\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"warming_cluster_1\"}, {\"warming_cluster_1\"}, {\"cluster_0\"}));\n\n  // Send the second warming cluster.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildCluster(\"warming_cluster_2\")},\n      {buildCluster(\"warming_cluster_2\")}, {}, \"3\");\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 2);\n  // We would've got a Cluster discovery request with version 2 here, had the CDS not been paused.\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"warming_cluster_2\", \"warming_cluster_1\"},\n                                      {\"warming_cluster_2\"}, {}));\n\n  // Finish warming the clusters.\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment,\n      {buildClusterLoadAssignment(\"warming_cluster_1\"),\n       buildClusterLoadAssignment(\"warming_cluster_2\")},\n      {buildClusterLoadAssignment(\"warming_cluster_1\"),\n       buildClusterLoadAssignment(\"warming_cluster_2\")},\n      {\"cluster_0\"}, \"2\");\n\n  // Validate that clusters are warmed.\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n\n  // CDS is resumed and EDS response was acknowledged.\n  if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) {\n    // Envoy will ACK both Cluster messages. Since they arrived while CDS was paused, they aren't\n    // sent until CDS is unpaused. Since version 3 has already arrived by the time the version 2\n    // ACK goes out, they're both acknowledging version 3.\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"3\", {}, {}, {}));\n  }\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"3\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"2\",\n                                      {\"warming_cluster_2\", \"warming_cluster_1\"}, {}, {}));\n}\n\n// Validate that warming listeners are removed when left out of SOTW update.\nTEST_P(AdsIntegrationTest, RemoveWarmingListener) {\n  initialize();\n\n  // Send initial configuration to start workers, validate we can process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  makeSingleRequest();\n\n  // Send a listener without its route, so it will be added as warming.\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener,\n      {buildListener(\"listener_0\", \"route_config_0\"),\n       buildListener(\"warming_listener_1\", \"nonexistent_route\")},\n      {buildListener(\"warming_listener_1\", \"nonexistent_route\")}, {}, \"2\");\n  test_server_->waitForGaugeEq(\"listener_manager.total_listeners_warming\", 1);\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"nonexistent_route\", \"route_config_0\"},\n                                      {\"nonexistent_route\"}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"2\", {}, {}, {}));\n\n  // Send a request removing the warming listener.\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {\"warming_listener_1\"}, \"3\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {\"nonexistent_route\"}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"3\", {}, {}, {}));\n\n  // The warming listener should be successfully removed.\n  test_server_->waitForCounterEq(\"listener_manager.listener_removed\", 1);\n  test_server_->waitForGaugeEq(\"listener_manager.total_listeners_warming\", 0);\n}\n\n// Verify cluster warming is finished only on named EDS response.\nTEST_P(AdsIntegrationTest, ClusterWarmingOnNamedResponse) {\n  initialize();\n\n  // Send initial configuration, validate we can process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  makeSingleRequest();\n\n  // Send the first warming cluster.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildCluster(\"warming_cluster_1\")},\n      {buildCluster(\"warming_cluster_1\")}, {\"cluster_0\"}, \"2\");\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 1);\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"warming_cluster_1\"}, {\"warming_cluster_1\"}, {\"cluster_0\"}));\n\n  // Send the second warming cluster.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildCluster(\"warming_cluster_2\")},\n      {buildCluster(\"warming_cluster_2\")}, {}, \"3\");\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 2);\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"warming_cluster_2\", \"warming_cluster_1\"},\n                                      {\"warming_cluster_2\"}, {}));\n\n  // Finish warming the first cluster.\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment,\n      {buildClusterLoadAssignment(\"warming_cluster_1\")},\n      {buildClusterLoadAssignment(\"warming_cluster_1\")}, {}, \"2\");\n\n  // Envoy will not finish warming of the second cluster because of the missing load assignments\n  // i,e. no named EDS response.\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 1);\n\n  // Disconnect and reconnect the stream.\n  xds_stream_->finishGrpcStream(Grpc::Status::Internal);\n\n  AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n  RELEASE_ASSERT(result, result.message());\n  xds_stream_->startGrpcStream();\n\n  // Envoy will not finish warming of the second cluster because of the missing load assignments\n  // i,e. no named EDS response even after disconnect and reconnect.\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 1);\n\n  // Finish warming the second cluster.\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment,\n      {buildClusterLoadAssignment(\"warming_cluster_2\")},\n      {buildClusterLoadAssignment(\"warming_cluster_2\")}, {}, \"3\");\n\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n}\n\n// Regression test for the use-after-free crash when processing RDS update (#3953).\nTEST_P(AdsIntegrationTest, RdsAfterLdsWithRdsChange) {\n  initialize();\n\n  // Send initial configuration.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  // Validate that we can process a request.\n  makeSingleRequest();\n\n  // Update existing LDS (change stat_prefix).\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {buildCluster(\"cluster_1\")}, {buildCluster(\"cluster_1\")},\n      {\"cluster_0\"}, \"2\");\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_1\")},\n      {buildClusterLoadAssignment(\"cluster_1\")}, {\"cluster_0\"}, \"2\");\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\", \"rds_crash\")},\n      {buildListener(\"listener_0\", \"route_config_0\", \"rds_crash\")}, {}, \"2\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 2);\n\n  // Update existing RDS (migrate traffic to cluster_1).\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_1\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_1\")}, {}, \"2\");\n\n  // Validate that we can process a request after RDS update\n  test_server_->waitForCounterGe(\"http.ads_test.rds.route_config_0.config_reload\", 2);\n  makeSingleRequest();\n}\n\n// Regression test for the use-after-free crash when a listener awaiting an RDS update is destroyed\n// (#6116).\nTEST_P(AdsIntegrationTest, RdsAfterLdsInvalidated) {\n\n  initialize();\n\n  // STEP 1: Initial setup\n  // ---------------------\n\n  // Initial request for any cluster, respond with cluster_0 version 1\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  // Initial request for load assignment for cluster_0, respond with version 1\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n\n  // Request for updates to cluster_0 version 1, no response\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n\n  // Initial request for any listener, respond with listener_0 version 1\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  // Request for updates to load assignment version 1, no response\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n\n  // Initial request for route_config_0 (referenced by listener_0), respond with version 1\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\");\n\n  // Wait for initial listener to be created successfully. Any subsequent listeners will then use\n  // the dynamic InitManager (see ListenerImpl::initManager).\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  // STEP 2: Listener with dynamic InitManager\n  // -----------------------------------------\n\n  // Request for updates to listener_0 version 1, respond with version 2. Under the hood, this\n  // registers RdsRouteConfigSubscription's init target with the new ListenerImpl instance.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_1\")},\n      {buildListener(\"listener_0\", \"route_config_1\")}, {}, \"2\");\n\n  // Request for updates to route_config_0 version 1, and initial request for route_config_1\n  // (referenced by listener_0), don't respond yet!\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_0\"}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"1\",\n                                      {\"route_config_1\", \"route_config_0\"}, {\"route_config_1\"},\n                                      {}));\n\n  // STEP 3: \"New listener, who dis?\"\n  // --------------------------------\n\n  // Request for updates to listener_0 version 2, respond with version 3 (updated stats prefix).\n  // This should blow away the previous ListenerImpl instance, which is still waiting for\n  // route_config_1...\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"2\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_1\", \"omg\")},\n      {buildListener(\"listener_0\", \"route_config_1\", \"omg\")}, {}, \"3\");\n\n  // Respond to prior request for route_config_1. Under the hood, this invokes\n  // RdsRouteConfigSubscription::runInitializeCallbackIfAny, which references the defunct\n  // ListenerImpl instance. We should not crash in this event!\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig(\"route_config_1\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_1\", \"cluster_0\")}, {\"route_config_0\"}, \"1\");\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 2);\n}\n\nclass AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest,\n                               public HttpIntegrationTest {\npublic:\n  AdsFailIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(),\n                            ConfigHelper::adsBootstrap(\n                                sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? \"GRPC\" : \"DELTA_GRPC\",\n                                envoy::config::core::v3::ApiVersion::V2)) {\n    create_xds_upstream_ = true;\n    use_lds_ = false;\n    sotw_or_delta_ = sotwOrDelta();\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* grpc_service =\n          bootstrap.mutable_dynamic_resources()->mutable_ads_config()->add_grpc_services();\n      setGrpcService(*grpc_service, \"ads_cluster\", xds_upstream_->localAddress());\n      auto* ads_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ads_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      ads_cluster->set_name(\"ads_cluster\");\n    });\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    HttpIntegrationTest::initialize();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsFailIntegrationTest,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Validate that we don't crash on failed ADS stream.\nTEST_P(AdsFailIntegrationTest, ConnectDisconnect) {\n  initialize();\n  createXdsConnection();\n  ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_));\n  xds_stream_->startGrpcStream();\n  xds_stream_->finishGrpcStream(Grpc::Status::Internal);\n}\n\nclass AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest,\n                                 public HttpIntegrationTest {\npublic:\n  AdsConfigIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(),\n                            ConfigHelper::adsBootstrap(\n                                sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? \"GRPC\" : \"DELTA_GRPC\",\n                                envoy::config::core::v3::ApiVersion::V2)) {\n    create_xds_upstream_ = true;\n    use_lds_ = false;\n    sotw_or_delta_ = sotwOrDelta();\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* grpc_service =\n          bootstrap.mutable_dynamic_resources()->mutable_ads_config()->add_grpc_services();\n      setGrpcService(*grpc_service, \"ads_cluster\", xds_upstream_->localAddress());\n      auto* ads_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ads_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      ads_cluster->set_name(\"ads_cluster\");\n\n      // Add EDS static Cluster that uses ADS as config Source.\n      auto* ads_eds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ads_eds_cluster->set_name(\"ads_eds_cluster\");\n      ads_eds_cluster->set_type(envoy::config::cluster::v3::Cluster::EDS);\n      auto* eds_cluster_config = ads_eds_cluster->mutable_eds_cluster_config();\n      auto* eds_config = eds_cluster_config->mutable_eds_config();\n      eds_config->mutable_ads();\n    });\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    HttpIntegrationTest::initialize();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsConfigIntegrationTest,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// This is s regression validating that we don't crash on EDS static Cluster that uses ADS.\nTEST_P(AdsConfigIntegrationTest, EdsClusterWithAdsConfigSource) {\n  initialize();\n  createXdsConnection();\n  ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_));\n  xds_stream_->startGrpcStream();\n  xds_stream_->finishGrpcStream(Grpc::Status::Ok);\n}\n\n// Validates that the initial xDS request batches all resources referred to in static config\nTEST_P(AdsIntegrationTest, XdsBatching) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    bootstrap.mutable_dynamic_resources()->clear_cds_config();\n    bootstrap.mutable_dynamic_resources()->clear_lds_config();\n\n    auto static_resources = bootstrap.mutable_static_resources();\n    static_resources->add_clusters()->MergeFrom(buildCluster(\"eds_cluster\"));\n    static_resources->add_clusters()->MergeFrom(buildCluster(\"eds_cluster2\"));\n\n    static_resources->add_listeners()->MergeFrom(buildListener(\"rds_listener\", \"route_config\"));\n    static_resources->add_listeners()->MergeFrom(buildListener(\"rds_listener2\", \"route_config2\"));\n  });\n\n  on_server_init_function_ = [this]() {\n    createXdsConnection();\n    ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_));\n    xds_stream_->startGrpcStream();\n\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                        {\"eds_cluster2\", \"eds_cluster\"},\n                                        {\"eds_cluster2\", \"eds_cluster\"}, {}, true));\n    sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n        Config::TypeUrl::get().ClusterLoadAssignment,\n        {buildClusterLoadAssignment(\"eds_cluster\"), buildClusterLoadAssignment(\"eds_cluster2\")},\n        {buildClusterLoadAssignment(\"eds_cluster\"), buildClusterLoadAssignment(\"eds_cluster2\")}, {},\n        \"1\");\n\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                        {\"route_config2\", \"route_config\"},\n                                        {\"route_config2\", \"route_config\"}, {}));\n    sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n        Config::TypeUrl::get().RouteConfiguration,\n        {buildRouteConfig(\"route_config2\", \"eds_cluster2\"),\n         buildRouteConfig(\"route_config\", \"dummy_cluster\")},\n        {buildRouteConfig(\"route_config2\", \"eds_cluster2\"),\n         buildRouteConfig(\"route_config\", \"dummy_cluster\")},\n        {}, \"1\");\n  };\n\n  initialize();\n}\n\n// Validates that listeners can be removed before server start.\nTEST_P(AdsIntegrationTest, ListenerDrainBeforeServerStart) {\n  initialize();\n\n  // Initial request for cluster, response for cluster_0.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"1\");\n\n  // Initial request for load assignment for cluster_0, respond with version 1\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\");\n  // Request for updates to cluster_0 version 1, no response\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"1\", {}, {}, {}));\n\n  // Initial request for any listener, respond with listener_0 version 1\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      Config::TypeUrl::get().Listener, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\");\n\n  // Request for updates to load assignment version 1, no response\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"cluster_0\"}, {}, {}));\n\n  // Initial request for route_config_0 (referenced by listener_0), respond with version 1\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                      {\"route_config_0\"}, {\"route_config_0\"}, {}));\n\n  test_server_->waitForGaugeGe(\"listener_manager.total_listeners_active\", 1);\n  // Before server is started, even though listeners are added to active list\n  // we mark them as \"warming\" in config dump since they're not initialized yet.\n  ASSERT_EQ(getListenersConfigDump().dynamic_listeners().size(), 1);\n  EXPECT_TRUE(getListenersConfigDump().dynamic_listeners(0).has_warming_state());\n\n  // Remove listener.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, \"1\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(Config::TypeUrl::get().Listener, {},\n                                                               {}, {\"listener_0\"}, \"2\");\n  test_server_->waitForGaugeEq(\"listener_manager.total_listeners_active\", 0);\n}\n\n// Validate that Node message is well formed.\nTEST_P(AdsIntegrationTest, NodeMessage) {\n  initialize();\n  API_NO_BOOST(envoy::api::v2::DiscoveryRequest) sotw_request;\n  API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) delta_request;\n  const envoy::api::v2::core::Node* node = nullptr;\n  if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) {\n    EXPECT_TRUE(xds_stream_->waitForGrpcMessage(*dispatcher_, sotw_request));\n    EXPECT_TRUE(sotw_request.has_node());\n    node = &sotw_request.node();\n  } else {\n    EXPECT_TRUE(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_request));\n    EXPECT_TRUE(delta_request.has_node());\n    node = &delta_request.node();\n  }\n  envoy::config::core::v3::BuildVersion build_version_msg;\n  Config::VersionConverter::upgrade(node->user_agent_build_version(), build_version_msg);\n  EXPECT_THAT(build_version_msg, ProtoEq(VersionInfo::buildVersion()));\n  EXPECT_GE(node->extensions().size(), 0);\n  EXPECT_EQ(0, node->client_features().size());\n  xds_stream_->finishGrpcStream(Grpc::Status::Ok);\n}\n\n// Check if EDS cluster defined in file is loaded before ADS request and used as xDS server\nclass AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest,\n                                          public HttpIntegrationTest {\npublic:\n  AdsClusterFromFileIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(),\n                            ConfigHelper::adsBootstrap(\n                                sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? \"GRPC\" : \"DELTA_GRPC\",\n                                envoy::config::core::v3::ApiVersion::V2)) {\n    create_xds_upstream_ = true;\n    use_lds_ = false;\n    sotw_or_delta_ = sotwOrDelta();\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* grpc_service =\n          bootstrap.mutable_dynamic_resources()->mutable_ads_config()->add_grpc_services();\n      setGrpcService(*grpc_service, \"ads_cluster\", xds_upstream_->localAddress());\n      // Define ADS cluster\n      auto* ads_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ads_cluster->set_name(\"ads_cluster\");\n      ads_cluster->mutable_http2_protocol_options();\n      ads_cluster->set_type(envoy::config::cluster::v3::Cluster::EDS);\n      auto* ads_cluster_config = ads_cluster->mutable_eds_cluster_config();\n      auto* ads_cluster_eds_config = ads_cluster_config->mutable_eds_config();\n      // Define port of ADS cluster\n      TestEnvironment::PortMap port_map_;\n      port_map_[\"upstream_0\"] = xds_upstream_->localAddress()->ip()->port();\n      // Path to EDS for ads_cluster\n      const std::string eds_path = TestEnvironment::temporaryFileSubstitute(\n          \"test/config/integration/server_xds.eds.ads_cluster.yaml\", port_map_, version_);\n      ads_cluster_eds_config->set_path(eds_path);\n\n      // Add EDS static Cluster that uses ADS as config Source.\n      auto* ads_eds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ads_eds_cluster->set_name(\"ads_eds_cluster\");\n      ads_eds_cluster->set_type(envoy::config::cluster::v3::Cluster::EDS);\n      auto* eds_cluster_config = ads_eds_cluster->mutable_eds_cluster_config();\n      auto* eds_config = eds_cluster_config->mutable_eds_config();\n      eds_config->mutable_ads();\n    });\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    HttpIntegrationTest::initialize();\n  }\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment\n  buildClusterLoadAssignment(const std::string& name) {\n    return TestUtility::parseYaml<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n        fmt::format(R\"EOF(\n      cluster_name: {}\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {}\n                port_value: {}\n    )EOF\",\n                    name, Network::Test::getLoopbackAddressString(ipVersion()),\n                    fake_upstreams_[0]->localAddress()->ip()->port()));\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterFromFileIntegrationTest,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Validate if ADS cluster defined as EDS will be loaded from file and connection with ADS cluster\n// will be established.\nTEST_P(AdsClusterFromFileIntegrationTest, BasicTestWidsAdsEndpointLoadedFromFile) {\n  initialize();\n  createXdsConnection();\n  ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_));\n  xds_stream_->startGrpcStream();\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"ads_eds_cluster\"}, {\"ads_eds_cluster\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"ads_eds_cluster\")},\n      {buildClusterLoadAssignment(\"ads_eds_cluster\")}, {}, \"1\");\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}));\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"1\",\n                                      {\"ads_eds_cluster\"}, {}, {}));\n}\n\nclass AdsIntegrationTestWithRtds : public AdsIntegrationTest {\npublic:\n  AdsIntegrationTestWithRtds() = default;\n\n  void initialize() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* layered_runtime = bootstrap.mutable_layered_runtime();\n      auto* layer = layered_runtime->add_layers();\n      layer->set_name(\"foobar\");\n      auto* rtds_layer = layer->mutable_rtds_layer();\n      rtds_layer->set_name(\"ads_rtds_layer\");\n      auto* rtds_config = rtds_layer->mutable_rtds_config();\n      rtds_config->mutable_ads();\n\n      auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config();\n      ads_config->set_set_node_on_first_message_only(true);\n    });\n    AdsIntegrationTest::initialize();\n  }\n\n  void testBasicFlow() {\n    // Test that runtime discovery request comes first and cluster discovery request comes after\n    // runtime was loaded.\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, \"\", {\"ads_rtds_layer\"},\n                                        {\"ads_rtds_layer\"}, {}, true));\n    auto some_rtds_layer = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n      name: ads_rtds_layer\n      layer:\n        foo: bar\n        baz: meh\n    )EOF\");\n    sendDiscoveryResponse<envoy::service::runtime::v3::Runtime>(\n        Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, \"1\");\n\n    test_server_->waitForCounterGe(\"runtime.load_success\", 1);\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, false));\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, \"1\", {\"ads_rtds_layer\"}, {},\n                                        {}, false));\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtds,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\nTEST_P(AdsIntegrationTestWithRtds, Basic) {\n  initialize();\n  testBasicFlow();\n}\n\nclass AdsIntegrationTestWithRtdsAndSecondaryClusters : public AdsIntegrationTestWithRtds {\npublic:\n  AdsIntegrationTestWithRtdsAndSecondaryClusters() = default;\n\n  void initialize() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Add secondary cluster to the list of static resources.\n      auto* eds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      eds_cluster->set_name(\"eds_cluster\");\n      eds_cluster->set_type(envoy::config::cluster::v3::Cluster::EDS);\n      auto* eds_cluster_config = eds_cluster->mutable_eds_cluster_config();\n      eds_cluster_config->mutable_eds_config()->mutable_ads();\n    });\n    AdsIntegrationTestWithRtds::initialize();\n  }\n\n  void testBasicFlow() {\n    // Test that runtime discovery request comes first followed by the cluster load assignment\n    // discovery request for secondary cluster and then CDS discovery request.\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, \"\", {\"ads_rtds_layer\"},\n                                        {\"ads_rtds_layer\"}, {}, true));\n    auto some_rtds_layer = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n      name: ads_rtds_layer\n      layer:\n        foo: bar\n        baz: meh\n    )EOF\");\n    sendDiscoveryResponse<envoy::service::runtime::v3::Runtime>(\n        Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, \"1\");\n\n    test_server_->waitForCounterGe(\"runtime.load_success\", 1);\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                        {\"eds_cluster\"}, {\"eds_cluster\"}, {}, false));\n    sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n        Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"eds_cluster\")},\n        {buildClusterLoadAssignment(\"eds_cluster\")}, {}, \"1\");\n\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, \"1\", {\"ads_rtds_layer\"}, {},\n                                        {}, false));\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, false));\n    sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n        Config::TypeUrl::get().Cluster, {buildCluster(\"cluster_0\")}, {buildCluster(\"cluster_0\")},\n        {}, \"1\");\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtdsAndSecondaryClusters,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\nTEST_P(AdsIntegrationTestWithRtdsAndSecondaryClusters, Basic) {\n  initialize();\n  testBasicFlow();\n}\n\n// Check if EDS cluster defined in file is loaded before ADS request and used as xDS server\nclass AdsClusterV3Test : public AdsIntegrationTest {\npublic:\n  AdsClusterV3Test() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V3) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterV3Test,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Verify CDS is paused during cluster warming.\nTEST_P(AdsClusterV3Test, CdsPausedDuringWarming) {\n  initialize();\n\n  const auto cds_type_url = Config::getTypeUrl<envoy::config::cluster::v3::Cluster>(\n      envoy::config::core::v3::ApiVersion::V3);\n  const auto eds_type_url = Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      envoy::config::core::v3::ApiVersion::V3);\n  const auto lds_type_url = Config::getTypeUrl<envoy::config::listener::v3::Listener>(\n      envoy::config::core::v3::ApiVersion::V3);\n  const auto rds_type_url = Config::getTypeUrl<envoy::config::route::v3::RouteConfiguration>(\n      envoy::config::core::v3::ApiVersion::V3);\n\n  // Send initial configuration, validate we can process a request.\n  EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      cds_type_url, {buildCluster(\"cluster_0\")}, {buildCluster(\"cluster_0\")}, {}, \"1\", false);\n  EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, \"\", {\"cluster_0\"}, {\"cluster_0\"}, {}));\n\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      eds_type_url, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"1\", false);\n\n  EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, \"\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(\n      lds_type_url, {buildListener(\"listener_0\", \"route_config_0\")},\n      {buildListener(\"listener_0\", \"route_config_0\")}, {}, \"1\", false);\n\n  EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, \"1\", {\"cluster_0\"}, {}, {}));\n  EXPECT_TRUE(\n      compareDiscoveryRequest(rds_type_url, \"\", {\"route_config_0\"}, {\"route_config_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      rds_type_url, {buildRouteConfig(\"route_config_0\", \"cluster_0\")},\n      {buildRouteConfig(\"route_config_0\", \"cluster_0\")}, {}, \"1\", false);\n\n  EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, \"1\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, \"1\", {\"route_config_0\"}, {}, {}));\n\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  makeSingleRequest();\n\n  // Send the first warming cluster.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      cds_type_url, {buildCluster(\"warming_cluster_1\")}, {buildCluster(\"warming_cluster_1\")},\n      {\"cluster_0\"}, \"2\", false);\n\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 1);\n\n  EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, \"1\", {\"warming_cluster_1\"},\n                                      {\"warming_cluster_1\"}, {\"cluster_0\"}));\n\n  // Send the second warming cluster.\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      cds_type_url, {buildCluster(\"warming_cluster_2\")}, {buildCluster(\"warming_cluster_2\")}, {},\n      \"3\", false);\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 2);\n  // We would've got a Cluster discovery request with version 2 here, had the CDS not been paused.\n\n  EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, \"1\", {\"warming_cluster_2\", \"warming_cluster_1\"},\n                                      {\"warming_cluster_2\"}, {}));\n\n  // Finish warming the clusters.\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      eds_type_url,\n      {buildClusterLoadAssignment(\"warming_cluster_1\"),\n       buildClusterLoadAssignment(\"warming_cluster_2\")},\n      {buildClusterLoadAssignment(\"warming_cluster_1\"),\n       buildClusterLoadAssignment(\"warming_cluster_2\")},\n      {\"cluster_0\"}, \"2\", false);\n\n  // Validate that clusters are warmed.\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n\n  // CDS is resumed and EDS response was acknowledged.\n  if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) {\n    // Envoy will ACK both Cluster messages. Since they arrived while CDS was paused, they aren't\n    // sent until CDS is unpaused. Since version 3 has already arrived by the time the version 2\n    // ACK goes out, they're both acknowledging version 3.\n    EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, \"3\", {}, {}, {}));\n  }\n  EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, \"3\", {}, {}, {}));\n  EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, \"2\", {\"warming_cluster_2\", \"warming_cluster_1\"},\n                                      {}, {}));\n}\n\n// Validates that the initial xDS request batches all resources referred to in static config\nTEST_P(AdsClusterV3Test, XdsBatching) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    bootstrap.mutable_dynamic_resources()->clear_cds_config();\n    bootstrap.mutable_dynamic_resources()->clear_lds_config();\n\n    auto static_resources = bootstrap.mutable_static_resources();\n    static_resources->add_clusters()->MergeFrom(buildCluster(\"eds_cluster\"));\n    static_resources->add_clusters()->MergeFrom(buildCluster(\"eds_cluster2\"));\n\n    static_resources->add_listeners()->MergeFrom(buildListener(\"rds_listener\", \"route_config\"));\n    static_resources->add_listeners()->MergeFrom(buildListener(\"rds_listener2\", \"route_config2\"));\n  });\n\n  on_server_init_function_ = [this]() {\n    createXdsConnection();\n    ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_));\n    xds_stream_->startGrpcStream();\n\n    const auto eds_type_url =\n        Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n            envoy::config::core::v3::ApiVersion::V3);\n    const auto rds_type_url = Config::getTypeUrl<envoy::config::route::v3::RouteConfiguration>(\n        envoy::config::core::v3::ApiVersion::V3);\n\n    EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, \"\", {\"eds_cluster2\", \"eds_cluster\"},\n                                        {\"eds_cluster2\", \"eds_cluster\"}, {}, true));\n    sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n        eds_type_url,\n        {buildClusterLoadAssignment(\"eds_cluster\"), buildClusterLoadAssignment(\"eds_cluster2\")},\n        {buildClusterLoadAssignment(\"eds_cluster\"), buildClusterLoadAssignment(\"eds_cluster2\")}, {},\n        \"1\", false);\n\n    EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, \"\", {\"route_config2\", \"route_config\"},\n                                        {\"route_config2\", \"route_config\"}, {}));\n    sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n        rds_type_url,\n        {buildRouteConfig(\"route_config2\", \"eds_cluster2\"),\n         buildRouteConfig(\"route_config\", \"dummy_cluster\")},\n        {buildRouteConfig(\"route_config2\", \"eds_cluster2\"),\n         buildRouteConfig(\"route_config\", \"dummy_cluster\")},\n        {}, \"1\", false);\n  };\n\n  initialize();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/alpn_selection_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/http/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/integration/http_integration.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass AlpnSelectionIntegrationTest : public testing::Test, public HttpIntegrationTest {\npublic:\n  AlpnSelectionIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1,\n                            TestEnvironment::getIpVersionsForTest().front(),\n                            ConfigHelper::httpProxyConfig()) {}\n\n  void initialize() override {\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP1);\n    setUpstreamProtocol(use_h2_ ? FakeHttpConnection::Type::HTTP2\n                                : FakeHttpConnection::Type::HTTP1);\n    config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* static_resources = bootstrap.mutable_static_resources();\n      auto* cluster = static_resources->mutable_clusters(0);\n\n      if (use_h2_) {\n        cluster->mutable_http2_protocol_options();\n      }\n      const std::string transport_socket_yaml = absl::StrFormat(\n          R\"EOF(\nname: tls\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n  common_tls_context:\n    alpn_protocols: [ %s ]\n    tls_certificates:\n    - certificate_chain: { filename: \"%s\" }\n      private_key: { filename: \"%s\" }\n )EOF\",\n          absl::StrJoin(configured_alpn_, \",\"),\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"),\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n      auto* transport_socket = cluster->mutable_transport_socket();\n      TestUtility::loadFromYaml(transport_socket_yaml, *transport_socket);\n    });\n    HttpIntegrationTest::initialize();\n  }\n\n  Network::TransportSocketFactoryPtr createUpstreamSslContext() {\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    const std::string yaml = absl::StrFormat(\n        R\"EOF(\ncommon_tls_context:\n  alpn_protocols: [%s]\n  tls_certificates:\n  - certificate_chain: { filename: \"%s\" }\n    private_key: { filename: \"%s\" }\n  validation_context:\n    trusted_ca: { filename: \"%s\" }\nrequire_client_certificate: true\n)EOF\",\n        absl::StrJoin(upstream_alpn_, \",\"),\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcert.pem\"),\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamkey.pem\"),\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n    TestUtility::loadFromYaml(yaml, tls_context);\n    auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n        tls_context, factory_context_);\n    static Stats::Scope* upstream_stats_store = new Stats::IsolatedStoreImpl();\n    return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n        std::move(cfg), context_manager_, *upstream_stats_store, std::vector<std::string>{});\n  }\n\n  void createUpstreams() override {\n    auto endpoint = upstream_address_fn_(0);\n    fake_upstreams_.emplace_back(new FakeUpstream(\n        createUpstreamSslContext(), endpoint->ip()->port(),\n        use_h2_ ? FakeHttpConnection::Type::HTTP2 : FakeHttpConnection::Type::HTTP1,\n        endpoint->ip()->version(), timeSystem()));\n  }\n\n  bool use_h2_{};\n  std::vector<std::string> upstream_alpn_;\n  std::vector<std::string> configured_alpn_;\n};\n\n// No upstream ALPN is specified in the protocol, but we successfully negotiate h2 ALPN\n// due to the default ALPN set through the HTTP/2 conn pool.\nTEST_F(AlpnSelectionIntegrationTest, Http2UpstreamMatchingAlpn) {\n  use_h2_ = true;\n  upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2);\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(Http::Utility::AlpnNames::get().Http2,\n            fake_upstream_connection_->connection().nextProtocol());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// No upstream ALPN is specified in the protocol and we fail to negotiate h2 ALPN\n// since the upstream doesn't list h2 in its ALPN list. Note that the call still goes\n// through because ALPN negotiation failure doesn't necessarily fail the call.\n// TODO(snowp): We should actually fail the handshake in case of negotiation failure,\n// fix that and update these tests.\nTEST_F(AlpnSelectionIntegrationTest, Http2UpstreamMismatchingAlpn) {\n  use_h2_ = true;\n  upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11);\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  // No ALPN negotiated.\n  EXPECT_EQ(\"\", fake_upstream_connection_->connection().nextProtocol());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// The upstream supports h2,custom-alpn, and we configure the upstream TLS context to negotiate\n// custom-alpn. No attempt to negotiate h2 should happen, so we should select custom-alpn.\nTEST_F(AlpnSelectionIntegrationTest, Http2UpstreamConfiguredALPN) {\n  use_h2_ = true;\n  upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2);\n  upstream_alpn_.emplace_back(\"custom-alpn\");\n  configured_alpn_.emplace_back(\"custom-alpn\");\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(\"custom-alpn\", fake_upstream_connection_->connection().nextProtocol());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// No upstream ALPN is specified in the protocol, but we successfully negotiate http/1.1 ALPN\n// due to the default ALPN set through the HTTP/1.1 conn pool.\nTEST_F(AlpnSelectionIntegrationTest, Http11UpstreaMatchingAlpn) {\n  upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11);\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(Http::Utility::AlpnNames::get().Http11,\n            fake_upstream_connection_->connection().nextProtocol());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// The upstream only lists h2 but we attempt to negotiate http/1.1 due to the default ALPN set by\n// the conn pool. This results in no protocol being negotiated. Note that the call still goes\n// through because ALPN negotiation failure doesn't necessarily fail the call.\nTEST_F(AlpnSelectionIntegrationTest, Http11UpstreaMismatchingAlpn) {\n  upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2);\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  // No ALPN selected.\n  EXPECT_EQ(\"\", fake_upstream_connection_->connection().nextProtocol());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// The upstream supports http/1.1,custom-alpn, and we configure the upstream TLS context to\n// negotiate custom-alpn. No attempt to negotiate http/1.1 should happen, so we should select\n// custom-alpn.\n// TODO(snowp): We should actually fail the handshake in case of negotiation failure,\n// fix that and update these tests.\nTEST_F(AlpnSelectionIntegrationTest, Http11UpstreamConfiguredALPN) {\n  upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11);\n  upstream_alpn_.emplace_back(\"custom-alpn\");\n  configured_alpn_.emplace_back(\"custom-alpn\");\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(\"custom-alpn\", fake_upstream_connection_->connection().nextProtocol());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/api_listener_integration_test.cc",
    "content": "#include \"test/integration/integration.h\"\n#include \"test/mocks/http/stream_encoder.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace {\n\nclass ApiListenerIntegrationTest : public BaseIntegrationTest,\n                                   public testing::TestWithParam<Network::Address::IpVersion> {\npublic:\n  ApiListenerIntegrationTest() : BaseIntegrationTest(GetParam(), bootstrapConfig()) {\n    use_lds_ = false;\n    autonomous_upstream_ = true;\n    defer_listener_finalization_ = true;\n  }\n\n  void SetUp() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // currently ApiListener does not trigger this wait\n      // https://github.com/envoyproxy/envoy/blob/0b92c58d08d28ba7ef0ed5aaf44f90f0fccc5dce/test/integration/integration.cc#L454\n      // Thus, the ApiListener has to be added in addition to the already existing listener in the\n      // config.\n      bootstrap.mutable_static_resources()->mutable_listeners(0)->MergeFrom(\n          Server::parseListenerFromV3Yaml(apiListenerConfig()));\n    });\n  }\n\n  void TearDown() override {\n    test_server_.reset();\n    fake_upstreams_.clear();\n  }\n\n  static std::string bootstrapConfig() {\n    // At least one empty filter chain needs to be specified.\n    return absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n    )EOF\");\n  }\n\n  static std::string apiListenerConfig() {\n    return R\"EOF(\nname: api_listener\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n    stat_prefix: hcm\n    route_config:\n      virtual_hosts:\n        name: integration\n        routes:\n          route:\n            cluster: cluster_0\n          match:\n            prefix: \"/\"\n        domains: \"*\"\n      name: route_config_0\n    http_filters:\n      - name: router\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n      )EOF\";\n  }\n\n  NiceMock<Http::MockResponseEncoder> stream_encoder_;\n};\n\nACTION_P(Notify, notification) { notification->Notify(); }\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ApiListenerIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ApiListenerIntegrationTest, Basic) {\n  BaseIntegrationTest::initialize();\n  absl::Notification done;\n  test_server_->server().dispatcher().post([this, &done]() -> void {\n    ASSERT_TRUE(test_server_->server().listenerManager().apiListener().has_value());\n    ASSERT_EQ(\"api_listener\", test_server_->server().listenerManager().apiListener()->get().name());\n    ASSERT_TRUE(test_server_->server().listenerManager().apiListener()->get().http().has_value());\n    auto& http_api_listener =\n        test_server_->server().listenerManager().apiListener()->get().http()->get();\n\n    ON_CALL(stream_encoder_, getStream()).WillByDefault(ReturnRef(stream_encoder_.stream_));\n    auto& stream_decoder = http_api_listener.newStream(stream_encoder_);\n\n    // The AutonomousUpstream responds with 200 OK and a body of 10 bytes.\n    // In the http1 codec the end stream is encoded with encodeData and 0 bytes.\n    Http::TestResponseHeaderMapImpl expected_response_headers{{\":status\", \"200\"}};\n    EXPECT_CALL(stream_encoder_, encodeHeaders(_, false));\n    EXPECT_CALL(stream_encoder_, encodeData(_, false));\n    EXPECT_CALL(stream_encoder_, encodeData(BufferStringEqual(\"\"), true)).WillOnce(Notify(&done));\n\n    // Send a headers-only request\n    stream_decoder.decodeHeaders(\n        Http::RequestHeaderMapPtr(new Http::TestRequestHeaderMapImpl{\n            {\":method\", \"GET\"}, {\":path\", \"/api\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}}),\n        true);\n  });\n  ASSERT_TRUE(done.WaitForNotificationWithTimeout(absl::Seconds(1)));\n}\n\nTEST_P(ApiListenerIntegrationTest, DestroyWithActiveStreams) {\n  autonomous_allow_incomplete_streams_ = true;\n  BaseIntegrationTest::initialize();\n  absl::Notification done;\n\n  test_server_->server().dispatcher().post([this, &done]() -> void {\n    ASSERT_TRUE(test_server_->server().listenerManager().apiListener().has_value());\n    ASSERT_EQ(\"api_listener\", test_server_->server().listenerManager().apiListener()->get().name());\n    ASSERT_TRUE(test_server_->server().listenerManager().apiListener()->get().http().has_value());\n    auto& http_api_listener =\n        test_server_->server().listenerManager().apiListener()->get().http()->get();\n\n    ON_CALL(stream_encoder_, getStream()).WillByDefault(ReturnRef(stream_encoder_.stream_));\n    auto& stream_decoder = http_api_listener.newStream(stream_encoder_);\n\n    // Send a headers-only request\n    stream_decoder.decodeHeaders(\n        Http::RequestHeaderMapPtr(new Http::TestRequestHeaderMapImpl{\n            {\":method\", \"GET\"}, {\":path\", \"/api\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}}),\n        false);\n\n    done.Notify();\n  });\n  ASSERT_TRUE(done.WaitForNotificationWithTimeout(absl::Seconds(1)));\n  // The server should shutdown the ApiListener at the right time during server termination such\n  // that no crashes occur if termination happens when the ApiListener still has ongoing streams.\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/api_version_integration_test.cc",
    "content": "#include \"envoy/api/v2/core/config_source.pb.h\"\n#include \"envoy/api/v2/discovery.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.validate.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"test/integration/http_integration.h\"\n\nnamespace Envoy {\nnamespace {\n\nusing Params =\n    std::tuple<Network::Address::IpVersion, bool, envoy::config::core::v3::ApiConfigSource::ApiType,\n               envoy::config::core::v3::ApiVersion, envoy::config::core::v3::ApiVersion>;\n\nclass ApiVersionIntegrationTest : public testing::TestWithParam<Params>,\n                                  public HttpIntegrationTest {\npublic:\n  ApiVersionIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion()) {\n    use_lds_ = false;\n    create_xds_upstream_ = true;\n    tls_xds_upstream_ = false;\n    defer_listener_finalization_ = true;\n    skipPortUsageValidation();\n  }\n\n  static bool hasHiddenEnvoyDeprecated(const Protobuf::Message& message) {\n    // Do this the slow copy-based way, since this is just for test validation.\n    ProtobufTypes::MessagePtr mutable_clone;\n    mutable_clone.reset(message.New());\n    mutable_clone->MergeFrom(message);\n    Config::VersionUtil::scrubHiddenEnvoyDeprecated(*mutable_clone);\n    return !TestUtility::protoEqual(message, *mutable_clone,\n                                    /*ignore_repeated_field_ordering=*/false);\n  }\n\n  static std::string paramsToString(const testing::TestParamInfo<Params>& p) {\n    return fmt::format(\"{}_{}_{}_Resource_{}_Transport_{}\",\n                       std::get<0>(p.param) == Network::Address::IpVersion::v4 ? \"IPv4\" : \"IPv6\",\n                       std::get<1>(p.param) ? \"ADS\" : \"SingletonXds\",\n                       envoy::config::core::v3::ApiConfigSource::ApiType_Name(std::get<2>(p.param)),\n                       envoy::config::core::v3::ApiVersion_Name(std::get<3>(p.param)),\n                       envoy::config::core::v3::ApiVersion_Name(std::get<4>(p.param)));\n  }\n\n  Network::Address::IpVersion ipVersion() const { return std::get<0>(GetParam()); }\n  bool ads() const { return std::get<1>(GetParam()); }\n  envoy::config::core::v3::ApiConfigSource::ApiType apiType() const {\n    return std::get<2>(GetParam());\n  }\n  envoy::config::core::v3::ApiVersion resourceApiVersion() const { return std::get<3>(GetParam()); }\n  envoy::config::core::v3::ApiVersion transportApiVersion() const {\n    return std::get<4>(GetParam());\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* xds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      xds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      xds_cluster->set_name(\"xds_cluster\");\n      xds_cluster->mutable_http2_protocol_options();\n      if (ads()) {\n        auto* api_config_source = bootstrap.mutable_dynamic_resources()->mutable_ads_config();\n        api_config_source->set_transport_api_version(transportApiVersion());\n        api_config_source->set_api_type(apiType());\n        auto* grpc_service = api_config_source->add_grpc_services();\n        grpc_service->mutable_envoy_grpc()->set_cluster_name(\"xds_cluster\");\n      }\n    });\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    HttpIntegrationTest::initialize();\n    if (xds_stream_ == nullptr) {\n      createXdsConnection();\n      AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n      RELEASE_ASSERT(result, result.message());\n      result = xds_stream_->waitForHeadersComplete();\n      RELEASE_ASSERT(result, result.message());\n      endpoint_ = std::string(xds_stream_->headers().getPathValue());\n      ENVOY_LOG_MISC(debug, \"xDS endpoint {}\", endpoint_);\n    }\n  }\n\n  void setupConfigSource(envoy::config::core::v3::ConfigSource& config_source) {\n    config_source.set_resource_api_version(resourceApiVersion());\n    if (ads()) {\n      config_source.mutable_ads();\n      return;\n    }\n    auto* api_config_source = config_source.mutable_api_config_source();\n    api_config_source->set_transport_api_version(transportApiVersion());\n    api_config_source->set_api_type(apiType());\n    if (apiType() == envoy::config::core::v3::ApiConfigSource::REST) {\n      api_config_source->add_cluster_names(\"xds_cluster\");\n      api_config_source->mutable_refresh_delay()->set_seconds(1);\n    } else {\n      auto* grpc_service = api_config_source->add_grpc_services();\n      grpc_service->mutable_envoy_grpc()->set_cluster_name(\"xds_cluster\");\n    }\n  }\n\n  AssertionResult validateDiscoveryRequest(\n      const std::string& expected_v2_sotw_endpoint, const std::string& expected_v2_delta_endpoint,\n      const std::string& expected_v2_rest_endpoint, const std::string& expected_v3_sotw_endpoint,\n      const std::string& expected_v3_delta_endpoint, const std::string& expected_v3_rest_endpoint,\n      const std::string& expected_v2_type_url, const std::string& expected_v3_type_url) {\n    // Only with ADS do we allow mixed transport/resource versions.\n    if (!ads() && resourceApiVersion() != transportApiVersion()) {\n      return AssertionSuccess();\n    }\n    std::string expected_endpoint;\n    std::string expected_type_url;\n    std::string actual_type_url;\n    const char ads_v2_sotw_endpoint[] =\n        \"/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources\";\n    const char ads_v3_sotw_endpoint[] =\n        \"/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources\";\n    const char ads_v2_delta_endpoint[] =\n        \"/envoy.service.discovery.v2.AggregatedDiscoveryService/DeltaAggregatedResources\";\n    const char ads_v3_delta_endpoint[] =\n        \"/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources\";\n    switch (transportApiVersion()) {\n    case envoy::config::core::v3::ApiVersion::AUTO:\n    case envoy::config::core::v3::ApiVersion::V2: {\n      switch (apiType()) {\n      case envoy::config::core::v3::ApiConfigSource::GRPC: {\n        API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request;\n        VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request));\n        EXPECT_TRUE(!hasHiddenEnvoyDeprecated(discovery_request));\n        xds_stream_->startGrpcStream();\n        actual_type_url = discovery_request.type_url();\n        expected_endpoint = ads() ? ads_v2_sotw_endpoint : expected_v2_sotw_endpoint;\n        break;\n      }\n      case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: {\n        API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) delta_discovery_request;\n        VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_discovery_request));\n        EXPECT_TRUE(!hasHiddenEnvoyDeprecated(delta_discovery_request));\n        xds_stream_->startGrpcStream();\n        actual_type_url = delta_discovery_request.type_url();\n        expected_endpoint = ads() ? ads_v2_delta_endpoint : expected_v2_delta_endpoint;\n        break;\n      }\n      case envoy::config::core::v3::ApiConfigSource::REST: {\n        API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request;\n        VERIFY_ASSERTION(xds_stream_->waitForEndStream(*dispatcher_));\n        MessageUtil::loadFromJson(xds_stream_->body().toString(), discovery_request,\n                                  ProtobufMessage::getStrictValidationVisitor());\n        actual_type_url = discovery_request.type_url();\n        expected_endpoint = expected_v2_rest_endpoint;\n        break;\n      }\n      default:\n        NOT_REACHED_GCOVR_EXCL_LINE;\n        break;\n      }\n      break;\n    }\n    case envoy::config::core::v3::ApiVersion::V3: {\n      switch (apiType()) {\n      case envoy::config::core::v3::ApiConfigSource::GRPC: {\n        API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request;\n        VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request));\n        EXPECT_TRUE(!hasHiddenEnvoyDeprecated(discovery_request));\n        actual_type_url = discovery_request.type_url();\n        expected_endpoint = ads() ? ads_v3_sotw_endpoint : expected_v3_sotw_endpoint;\n        break;\n      }\n      case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: {\n        API_NO_BOOST(envoy::service::discovery::v3::DeltaDiscoveryRequest)\n        delta_discovery_request;\n        VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_discovery_request));\n        EXPECT_TRUE(!hasHiddenEnvoyDeprecated(delta_discovery_request));\n        actual_type_url = delta_discovery_request.type_url();\n        expected_endpoint = ads() ? ads_v3_delta_endpoint : expected_v3_delta_endpoint;\n        break;\n      }\n      case envoy::config::core::v3::ApiConfigSource::REST: {\n        API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request;\n        VERIFY_ASSERTION(xds_stream_->waitForEndStream(*dispatcher_));\n        MessageUtil::loadFromJson(xds_stream_->body().toString(), discovery_request,\n                                  ProtobufMessage::getStrictValidationVisitor());\n        actual_type_url = discovery_request.type_url();\n        expected_endpoint = expected_v3_rest_endpoint;\n        break;\n      }\n      default:\n        NOT_REACHED_GCOVR_EXCL_LINE;\n        break;\n      }\n      break;\n    }\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n    switch (resourceApiVersion()) {\n    case envoy::config::core::v3::ApiVersion::AUTO:\n    case envoy::config::core::v3::ApiVersion::V2:\n      expected_type_url = expected_v2_type_url;\n      break;\n    case envoy::config::core::v3::ApiVersion::V3:\n      expected_type_url = expected_v3_type_url;\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n    if (endpoint_ != expected_endpoint) {\n      return AssertionFailure() << \"Expected endpoint \" << expected_endpoint << \", got \"\n                                << endpoint_;\n    }\n    if (expected_type_url != actual_type_url) {\n      return AssertionFailure() << \"Expected type URL \" << expected_type_url << \", got \"\n                                << actual_type_url;\n    }\n    return AssertionSuccess();\n  }\n\n  void TearDown() override {\n    if (xds_stream_ != nullptr) {\n      cleanUpXdsConnection();\n    }\n  }\n\n  std::string endpoint_;\n};\n\n// We manage the permutations below to reduce combinatorial explosion:\n// - We only care about testing on one IP version, there should be no\n//   material difference between v4/v6.\n// - We do care about all the different ApiConfigSource variations.\n// - We explicitly give the AUTO versions their own independent test suite,\n//   since they are equivalent to v2, so we want to test them once but they are\n//   mostly redundant.\n// - We treat ADS and singleton xDS differently. ADS doesn't care about REST and\n//   doesn't currently support delta xDS.\nINSTANTIATE_TEST_SUITE_P(\n    SingletonApiConfigSourcesExplicitApiVersions, ApiVersionIntegrationTest,\n    testing::Combine(testing::Values(TestEnvironment::getIpVersionsForTest()[0]),\n                     testing::Values(false),\n                     testing::Values(envoy::config::core::v3::ApiConfigSource::REST,\n                                     envoy::config::core::v3::ApiConfigSource::GRPC,\n                                     envoy::config::core::v3::ApiConfigSource::DELTA_GRPC),\n                     testing::Values(envoy::config::core::v3::ApiVersion::V2,\n                                     envoy::config::core::v3::ApiVersion::V3),\n                     testing::Values(envoy::config::core::v3::ApiVersion::V2,\n                                     envoy::config::core::v3::ApiVersion::V3)),\n    ApiVersionIntegrationTest::paramsToString);\n\nINSTANTIATE_TEST_SUITE_P(\n    SingletonApiConfigSourcesAutoApiVersions, ApiVersionIntegrationTest,\n    testing::Combine(testing::Values(TestEnvironment::getIpVersionsForTest()[0]),\n                     testing::Values(false),\n                     testing::Values(envoy::config::core::v3::ApiConfigSource::REST,\n                                     envoy::config::core::v3::ApiConfigSource::GRPC,\n                                     envoy::config::core::v3::ApiConfigSource::DELTA_GRPC),\n                     testing::Values(envoy::config::core::v3::ApiVersion::AUTO),\n                     testing::Values(envoy::config::core::v3::ApiVersion::AUTO)),\n    ApiVersionIntegrationTest::paramsToString);\n\nINSTANTIATE_TEST_SUITE_P(\n    AdsApiConfigSourcesExplicitApiVersions, ApiVersionIntegrationTest,\n    testing::Combine(testing::Values(TestEnvironment::getIpVersionsForTest()[0]),\n                     testing::Values(true),\n                     testing::Values(envoy::config::core::v3::ApiConfigSource::GRPC,\n                                     envoy::config::core::v3::ApiConfigSource::DELTA_GRPC),\n                     testing::Values(envoy::config::core::v3::ApiVersion::V2,\n                                     envoy::config::core::v3::ApiVersion::V3),\n                     testing::Values(envoy::config::core::v3::ApiVersion::V2,\n                                     envoy::config::core::v3::ApiVersion::V3)),\n    ApiVersionIntegrationTest::paramsToString);\n\nTEST_P(ApiVersionIntegrationTest, Lds) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    setupConfigSource(*bootstrap.mutable_dynamic_resources()->mutable_lds_config());\n  });\n  initialize();\n  ASSERT_TRUE(validateDiscoveryRequest(\n      \"/envoy.api.v2.ListenerDiscoveryService/StreamListeners\",\n      \"/envoy.api.v2.ListenerDiscoveryService/DeltaListeners\", \"/v2/discovery:listeners\",\n      \"/envoy.service.listener.v3.ListenerDiscoveryService/StreamListeners\",\n      \"/envoy.service.listener.v3.ListenerDiscoveryService/DeltaListeners\",\n      \"/v3/discovery:listeners\", \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"type.googleapis.com/envoy.config.listener.v3.Listener\"));\n}\n\nTEST_P(ApiVersionIntegrationTest, Cds) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    setupConfigSource(*bootstrap.mutable_dynamic_resources()->mutable_cds_config());\n  });\n  initialize();\n  ASSERT_TRUE(validateDiscoveryRequest(\n      \"/envoy.api.v2.ClusterDiscoveryService/StreamClusters\",\n      \"/envoy.api.v2.ClusterDiscoveryService/DeltaClusters\", \"/v2/discovery:clusters\",\n      \"/envoy.service.cluster.v3.ClusterDiscoveryService/StreamClusters\",\n      \"/envoy.service.cluster.v3.ClusterDiscoveryService/DeltaClusters\", \"/v3/discovery:clusters\",\n      \"type.googleapis.com/envoy.api.v2.Cluster\",\n      \"type.googleapis.com/envoy.config.cluster.v3.Cluster\"));\n}\n\nTEST_P(ApiVersionIntegrationTest, Eds) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* cluster = bootstrap.mutable_static_resources()->add_clusters();\n    cluster->MergeFrom(bootstrap.static_resources().clusters(0));\n    cluster->set_name(\"some_cluster\");\n    cluster->set_type(envoy::config::cluster::v3::Cluster::EDS);\n    setupConfigSource(*cluster->mutable_eds_cluster_config()->mutable_eds_config());\n  });\n  initialize();\n  ASSERT_TRUE(validateDiscoveryRequest(\n      \"/envoy.api.v2.EndpointDiscoveryService/StreamEndpoints\",\n      \"/envoy.api.v2.EndpointDiscoveryService/DeltaEndpoints\", \"/v2/discovery:endpoints\",\n      \"/envoy.service.endpoint.v3.EndpointDiscoveryService/StreamEndpoints\",\n      \"/envoy.service.endpoint.v3.EndpointDiscoveryService/DeltaEndpoints\",\n      \"/v3/discovery:endpoints\", \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\",\n      \"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\"));\n}\n\nTEST_P(ApiVersionIntegrationTest, Rtds) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    if (bootstrap.mutable_layered_runtime()->layers_size() == 0) {\n      auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers();\n      admin_layer->set_name(\"admin layer\");\n      admin_layer->mutable_admin_layer();\n    }\n    auto* rtds_layer = bootstrap.mutable_layered_runtime()->add_layers();\n    rtds_layer->set_name(\"rtds_layer\");\n    setupConfigSource(*rtds_layer->mutable_rtds_layer()->mutable_rtds_config());\n  });\n  initialize();\n  ASSERT_TRUE(validateDiscoveryRequest(\n      \"/envoy.service.discovery.v2.RuntimeDiscoveryService/StreamRuntime\",\n      \"/envoy.service.discovery.v2.RuntimeDiscoveryService/DeltaRuntime\", \"/v2/discovery:runtime\",\n      \"/envoy.service.runtime.v3.RuntimeDiscoveryService/StreamRuntime\",\n      \"/envoy.service.runtime.v3.RuntimeDiscoveryService/DeltaRuntime\", \"/v3/discovery:runtime\",\n      \"type.googleapis.com/envoy.service.discovery.v2.Runtime\",\n      \"type.googleapis.com/envoy.service.runtime.v3.Runtime\"));\n}\n\nTEST_P(ApiVersionIntegrationTest, Rds) {\n  // TODO(htuch): this segfaults, this is likely some untested existing issue.\n  if (apiType() == envoy::config::core::v3::ApiConfigSource::DELTA_GRPC) {\n    return;\n  }\n  config_helper_.addConfigModifier(\n      [this](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              http_connection_manager) {\n        auto* rds = http_connection_manager.mutable_rds();\n        rds->set_route_config_name(\"rds\");\n        setupConfigSource(*rds->mutable_config_source());\n      });\n  initialize();\n  ASSERT_TRUE(validateDiscoveryRequest(\n      \"/envoy.api.v2.RouteDiscoveryService/StreamRoutes\",\n      \"/envoy.api.v2.RouteDiscoveryService/DeltaRoutes\", \"/v2/discovery:routes\",\n      \"/envoy.service.route.v3.RouteDiscoveryService/StreamRoutes\",\n      \"/envoy.service.route.v3.RouteDiscoveryService/DeltaRoutes\", \"/v3/discovery:routes\",\n      \"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n      \"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\"));\n}\n\n// TODO(htuch): add VHDS tests once VHDS lands.\n// TEST_P(ApiVersionIntegrationTest, Vhds) {\n// }\n\nTEST_P(ApiVersionIntegrationTest, Srds) {\n  config_helper_.addConfigModifier(\n      [this](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              http_connection_manager) {\n        auto* scoped_routes = http_connection_manager.mutable_scoped_routes();\n        scoped_routes->set_name(\"scoped_routes\");\n        const std::string& scope_key_builder_config_yaml = R\"EOF(\nfragments:\n  - header_value_extractor:\n      name: Addr\n      element_separator: ;\n      element:\n        key: x-foo-key\n        separator: =\n)EOF\";\n        envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n            ScopeKeyBuilder scope_key_builder;\n        TestUtility::loadFromYaml(scope_key_builder_config_yaml,\n                                  *scoped_routes->mutable_scope_key_builder());\n        setupConfigSource(*scoped_routes->mutable_scoped_rds()->mutable_scoped_rds_config_source());\n        setupConfigSource(*scoped_routes->mutable_rds_config_source());\n      });\n  initialize();\n  ASSERT_TRUE(validateDiscoveryRequest(\n      \"/envoy.api.v2.ScopedRoutesDiscoveryService/StreamScopedRoutes\",\n      \"/envoy.api.v2.ScopedRoutesDiscoveryService/DeltaScopedRoutes\", \"/v2/discovery:scoped-routes\",\n      \"/envoy.service.route.v3.ScopedRoutesDiscoveryService/StreamScopedRoutes\",\n      \"/envoy.service.route.v3.ScopedRoutesDiscoveryService/DeltaScopedRoutes\",\n      \"/v3/discovery:scoped-routes\", \"type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\",\n      \"type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration\"));\n}\n\nTEST_P(ApiVersionIntegrationTest, Sds) {\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    auto* transport_socket = listener->mutable_filter_chains(0)->mutable_transport_socket();\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    auto* common_tls_context = tls_context.mutable_common_tls_context();\n    auto* secret_config = common_tls_context->add_tls_certificate_sds_secret_configs();\n    secret_config->set_name(\"sds\");\n    setupConfigSource(*secret_config->mutable_sds_config());\n    transport_socket->set_name(\"envoy.transport_sockets.tls\");\n    transport_socket->mutable_typed_config()->PackFrom(tls_context);\n  });\n  initialize();\n  ASSERT_TRUE(validateDiscoveryRequest(\n      \"/envoy.service.discovery.v2.SecretDiscoveryService/StreamSecrets\",\n      \"/envoy.service.discovery.v2.SecretDiscoveryService/DeltaSecrets\", \"/v2/discovery:secrets\",\n      \"/envoy.service.secret.v3.SecretDiscoveryService/StreamSecrets\",\n      \"/envoy.service.secret.v3.SecretDiscoveryService/DeltaSecrets\", \"/v3/discovery:secrets\",\n      \"type.googleapis.com/envoy.api.v2.auth.Secret\",\n      \"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret\"));\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/autonomous_upstream.cc",
    "content": "#include \"test/integration/autonomous_upstream.h\"\n\nnamespace Envoy {\nnamespace {\n\nvoid HeaderToInt(const char header_name[], int32_t& return_int,\n                 Http::TestResponseHeaderMapImpl& headers) {\n  const std::string header_value(headers.get_(header_name));\n  if (!header_value.empty()) {\n    uint64_t parsed_value;\n    RELEASE_ASSERT(absl::SimpleAtoi(header_value, &parsed_value) &&\n                       parsed_value < static_cast<uint32_t>(std::numeric_limits<int32_t>::max()),\n                   \"\");\n    return_int = parsed_value;\n  }\n}\n\n} // namespace\n\nconst char AutonomousStream::RESPONSE_SIZE_BYTES[] = \"response_size_bytes\";\nconst char AutonomousStream::RESPONSE_DATA_BLOCKS[] = \"response_data_blocks\";\nconst char AutonomousStream::EXPECT_REQUEST_SIZE_BYTES[] = \"expect_request_size_bytes\";\nconst char AutonomousStream::RESET_AFTER_REQUEST[] = \"reset_after_request\";\nconst char AutonomousStream::NO_TRAILERS[] = \"no_trailers\";\n\nAutonomousStream::AutonomousStream(FakeHttpConnection& parent, Http::ResponseEncoder& encoder,\n                                   AutonomousUpstream& upstream, bool allow_incomplete_streams)\n    : FakeStream(parent, encoder, upstream.timeSystem()), upstream_(upstream),\n      allow_incomplete_streams_(allow_incomplete_streams) {}\n\nAutonomousStream::~AutonomousStream() {\n  if (!allow_incomplete_streams_) {\n    RELEASE_ASSERT(complete(), \"Found that end_stream is not true\");\n  }\n}\n\n// By default, automatically send a response when the request is complete.\nvoid AutonomousStream::setEndStream(bool end_stream) {\n  FakeStream::setEndStream(end_stream);\n  if (end_stream) {\n    sendResponse();\n  }\n}\n\n// Check all the special headers and send a customized response based on them.\nvoid AutonomousStream::sendResponse() {\n  Http::TestResponseHeaderMapImpl headers(*headers_);\n  upstream_.setLastRequestHeaders(*headers_);\n\n  int32_t request_body_length = -1;\n  HeaderToInt(EXPECT_REQUEST_SIZE_BYTES, request_body_length, headers);\n  if (request_body_length >= 0) {\n    EXPECT_EQ(request_body_length, body_.length());\n  }\n\n  if (!headers.get_(RESET_AFTER_REQUEST).empty()) {\n    encodeResetStream();\n    return;\n  }\n\n  int32_t response_body_length = 10;\n  HeaderToInt(RESPONSE_SIZE_BYTES, response_body_length, headers);\n\n  int32_t response_data_blocks = 1;\n  HeaderToInt(RESPONSE_DATA_BLOCKS, response_data_blocks, headers);\n\n  const bool send_trailers = headers.get_(NO_TRAILERS).empty();\n  const bool headers_only_response = !send_trailers && response_data_blocks == 0;\n  encodeHeaders(upstream_.responseHeaders(), headers_only_response);\n  if (!headers_only_response) {\n    for (int32_t i = 0; i < response_data_blocks; ++i) {\n      encodeData(response_body_length, i == (response_data_blocks - 1) && !send_trailers);\n    }\n    if (send_trailers) {\n      encodeTrailers(upstream_.responseTrailers());\n    }\n  }\n}\n\nAutonomousHttpConnection::AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream,\n                                                   SharedConnectionWrapper& shared_connection,\n                                                   Type type, AutonomousUpstream& upstream)\n    : FakeHttpConnection(autonomous_upstream, shared_connection, type, upstream.timeSystem(),\n                         Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n                         envoy::config::core::v3::HttpProtocolOptions::ALLOW),\n      upstream_(upstream) {}\n\nHttp::RequestDecoder& AutonomousHttpConnection::newStream(Http::ResponseEncoder& response_encoder,\n                                                          bool) {\n  auto stream =\n      new AutonomousStream(*this, response_encoder, upstream_, upstream_.allow_incomplete_streams_);\n  streams_.push_back(FakeStreamPtr{stream});\n  return *(stream);\n}\n\nAutonomousUpstream::~AutonomousUpstream() {\n  // Make sure the dispatcher is stopped before the connections are destroyed.\n  cleanUp();\n  http_connections_.clear();\n}\n\nbool AutonomousUpstream::createNetworkFilterChain(Network::Connection& connection,\n                                                  const std::vector<Network::FilterFactoryCb>&) {\n  shared_connections_.emplace_back(new SharedConnectionWrapper(connection));\n  AutonomousHttpConnectionPtr http_connection(\n      new AutonomousHttpConnection(*this, *shared_connections_.back(), http_type_, *this));\n  testing::AssertionResult result = http_connection->initialize();\n  RELEASE_ASSERT(result, result.message());\n  http_connections_.push_back(std::move(http_connection));\n  return true;\n}\n\nbool AutonomousUpstream::createListenerFilterChain(Network::ListenerFilterManager&) { return true; }\n\nvoid AutonomousUpstream::createUdpListenerFilterChain(Network::UdpListenerFilterManager&,\n                                                      Network::UdpReadFilterCallbacks&) {}\n\nvoid AutonomousUpstream::setLastRequestHeaders(const Http::HeaderMap& headers) {\n  Thread::LockGuard lock(headers_lock_);\n  last_request_headers_ = std::make_unique<Http::TestRequestHeaderMapImpl>(headers);\n}\n\nstd::unique_ptr<Http::TestRequestHeaderMapImpl> AutonomousUpstream::lastRequestHeaders() {\n  Thread::LockGuard lock(headers_lock_);\n  return std::move(last_request_headers_);\n}\n\nvoid AutonomousUpstream::setResponseTrailers(\n    std::unique_ptr<Http::TestResponseTrailerMapImpl>&& response_trailers) {\n  Thread::LockGuard lock(headers_lock_);\n  response_trailers_ = std::move(response_trailers);\n}\n\nvoid AutonomousUpstream::setResponseHeaders(\n    std::unique_ptr<Http::TestResponseHeaderMapImpl>&& response_headers) {\n  Thread::LockGuard lock(headers_lock_);\n  response_headers_ = std::move(response_headers);\n}\n\nHttp::TestResponseTrailerMapImpl AutonomousUpstream::responseTrailers() {\n  Thread::LockGuard lock(headers_lock_);\n  Http::TestResponseTrailerMapImpl return_trailers = *response_trailers_;\n  return return_trailers;\n}\n\nHttp::TestResponseHeaderMapImpl AutonomousUpstream::responseHeaders() {\n  Thread::LockGuard lock(headers_lock_);\n  Http::TestResponseHeaderMapImpl return_headers = *response_headers_;\n  return return_headers;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/autonomous_upstream.h",
    "content": "#pragma once\n\n#include \"test/integration/fake_upstream.h\"\n\nnamespace Envoy {\n\nclass AutonomousUpstream;\n\n// A stream which automatically responds when the downstream request is\n// completely read. By default the response is 200: OK with 10 bytes of\n// payload. This behavior can be overridden with custom request headers defined below.\nclass AutonomousStream : public FakeStream {\npublic:\n  // The number of response bytes to send. Payload is randomized.\n  static const char RESPONSE_SIZE_BYTES[];\n  // The number of data blocks send.\n  static const char RESPONSE_DATA_BLOCKS[];\n  // If set to an integer, the AutonomousStream will expect the response body to\n  // be this large.\n  static const char EXPECT_REQUEST_SIZE_BYTES[];\n  // If set, the stream will reset when the request is complete, rather than\n  // sending a response.\n  static const char RESET_AFTER_REQUEST[];\n  // Prevents upstream from sending trailers.\n  static const char NO_TRAILERS[];\n\n  AutonomousStream(FakeHttpConnection& parent, Http::ResponseEncoder& encoder,\n                   AutonomousUpstream& upstream, bool allow_incomplete_streams);\n  ~AutonomousStream() override;\n\n  void setEndStream(bool set) EXCLUSIVE_LOCKS_REQUIRED(lock_) override;\n\nprivate:\n  AutonomousUpstream& upstream_;\n  void sendResponse() EXCLUSIVE_LOCKS_REQUIRED(lock_);\n  const bool allow_incomplete_streams_{false};\n};\n\n// An upstream which creates AutonomousStreams for new incoming streams.\nclass AutonomousHttpConnection : public FakeHttpConnection {\npublic:\n  AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream,\n                           SharedConnectionWrapper& shared_connection, Type type,\n                           AutonomousUpstream& upstream);\n\n  Http::RequestDecoder& newStream(Http::ResponseEncoder& response_encoder, bool) override;\n\nprivate:\n  AutonomousUpstream& upstream_;\n  std::vector<FakeStreamPtr> streams_;\n};\n\nusing AutonomousHttpConnectionPtr = std::unique_ptr<AutonomousHttpConnection>;\n\n// An upstream which creates AutonomousHttpConnection for new incoming connections.\nclass AutonomousUpstream : public FakeUpstream {\npublic:\n  AutonomousUpstream(const Network::Address::InstanceConstSharedPtr& address,\n                     FakeHttpConnection::Type type, Event::TestTimeSystem& time_system,\n                     bool allow_incomplete_streams)\n      : FakeUpstream(address, type, time_system),\n        allow_incomplete_streams_(allow_incomplete_streams),\n        response_trailers_(std::make_unique<Http::TestResponseTrailerMapImpl>()),\n        response_headers_(std::make_unique<Http::TestResponseHeaderMapImpl>(\n            Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}))) {}\n\n  AutonomousUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, uint32_t port,\n                     FakeHttpConnection::Type type, Network::Address::IpVersion version,\n                     Event::TestTimeSystem& time_system, bool allow_incomplete_streams)\n      : FakeUpstream(std::move(transport_socket_factory), port, type, version, time_system),\n        allow_incomplete_streams_(allow_incomplete_streams),\n        response_trailers_(std::make_unique<Http::TestResponseTrailerMapImpl>()),\n        response_headers_(std::make_unique<Http::TestResponseHeaderMapImpl>(\n            Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}}))) {}\n\n  ~AutonomousUpstream() override;\n  bool\n  createNetworkFilterChain(Network::Connection& connection,\n                           const std::vector<Network::FilterFactoryCb>& filter_factories) override;\n  bool createListenerFilterChain(Network::ListenerFilterManager& listener) override;\n  void createUdpListenerFilterChain(Network::UdpListenerFilterManager& listener,\n                                    Network::UdpReadFilterCallbacks& callbacks) override;\n\n  void setLastRequestHeaders(const Http::HeaderMap& headers);\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> lastRequestHeaders();\n  void setResponseTrailers(std::unique_ptr<Http::TestResponseTrailerMapImpl>&& response_trailers);\n  void setResponseHeaders(std::unique_ptr<Http::TestResponseHeaderMapImpl>&& response_headers);\n  Http::TestResponseTrailerMapImpl responseTrailers();\n  Http::TestResponseHeaderMapImpl responseHeaders();\n  const bool allow_incomplete_streams_{false};\n\nprivate:\n  Thread::MutexBasicLockable headers_lock_;\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> last_request_headers_;\n  std::unique_ptr<Http::TestResponseTrailerMapImpl> response_trailers_;\n  std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers_;\n  std::vector<AutonomousHttpConnectionPtr> http_connections_;\n  std::vector<SharedConnectionWrapperPtr> shared_connections_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/base_integration_test.cc",
    "content": "#include \"test/integration/base_integration_test.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/config/api_version.h\"\n#include \"common/event/libevent.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/str_join.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nusing ::testing::_;\nusing ::testing::AssertionFailure;\nusing ::testing::AssertionResult;\nusing ::testing::AssertionSuccess;\nusing ::testing::Invoke;\nusing ::testing::IsSubstring;\nusing ::testing::NiceMock;\nusing ::testing::ReturnRef;\n\nBaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstream_address_fn,\n                                         Network::Address::IpVersion version,\n                                         const std::string& config)\n    : api_(Api::createApiForTest(stats_store_)),\n      mock_buffer_factory_(new NiceMock<MockBufferFactory>),\n      dispatcher_(api_->allocateDispatcher(\"test_thread\",\n                                           Buffer::WatermarkFactoryPtr{mock_buffer_factory_})),\n      version_(version), upstream_address_fn_(upstream_address_fn),\n      config_helper_(version, *api_, config),\n      default_log_level_(TestEnvironment::getOptions().logLevel()) {\n  // This is a hack, but there are situations where we disconnect fake upstream connections and\n  // then we expect the server connection pool to get the disconnect before the next test starts.\n  // This does not always happen. This pause should allow the server to pick up the disconnect\n  // notification and clear the pool connection if necessary. A real fix would require adding fairly\n  // complex test hooks to the server and/or spin waiting on stats, neither of which I think are\n  // necessary right now.\n  timeSystem().realSleepDoNotUseWithoutScrutiny(std::chrono::milliseconds(10));\n  ON_CALL(*mock_buffer_factory_, create_(_, _, _))\n      .WillByDefault(Invoke([](std::function<void()> below_low, std::function<void()> above_high,\n                               std::function<void()> above_overflow) -> Buffer::Instance* {\n        return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow);\n      }));\n  ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_));\n  // In ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS mode, set runtime config to use legacy codecs.\n#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS\n  ENVOY_LOG_MISC(debug, \"Using new codecs\");\n  setNewCodecs();\n#endif\n}\n\nBaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version,\n                                         const std::string& config)\n    : BaseIntegrationTest(\n          [version](int) {\n            return Network::Utility::parseInternetAddress(\n                Network::Test::getLoopbackAddressString(version), 0);\n          },\n          version, config) {}\n\nNetwork::ClientConnectionPtr BaseIntegrationTest::makeClientConnection(uint32_t port) {\n  return makeClientConnectionWithOptions(port, nullptr);\n}\n\nNetwork::ClientConnectionPtr BaseIntegrationTest::makeClientConnectionWithOptions(\n    uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) {\n  Network::ClientConnectionPtr connection(dispatcher_->createClientConnection(\n      Network::Utility::resolveUrl(\n          fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version_), port)),\n      Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), options));\n\n  connection->enableHalfClose(enable_half_close_);\n  return connection;\n}\n\nvoid BaseIntegrationTest::initialize() {\n  RELEASE_ASSERT(!initialized_, \"\");\n  RELEASE_ASSERT(Event::Libevent::Global::initialized(), \"\");\n  initialized_ = true;\n\n  createUpstreams();\n  createXdsUpstream();\n  createEnvoy();\n}\n\nvoid BaseIntegrationTest::createUpstreams() {\n  for (uint32_t i = 0; i < fake_upstreams_count_; ++i) {\n    auto endpoint = upstream_address_fn_(i);\n    if (autonomous_upstream_) {\n      fake_upstreams_.emplace_back(new AutonomousUpstream(\n          endpoint, upstream_protocol_, *time_system_, autonomous_allow_incomplete_streams_));\n    } else {\n      fake_upstreams_.emplace_back(new FakeUpstream(endpoint, upstream_protocol_, *time_system_,\n                                                    enable_half_close_, udp_fake_upstream_));\n    }\n  }\n}\n\nvoid BaseIntegrationTest::createEnvoy() {\n  std::vector<uint32_t> ports;\n  for (auto& upstream : fake_upstreams_) {\n    if (upstream->localAddress()->ip()) {\n      ports.push_back(upstream->localAddress()->ip()->port());\n    }\n  }\n\n  if (use_lds_) {\n    ENVOY_LOG_MISC(debug, \"Setting up file-based LDS\");\n    // Before finalization, set up a real lds path, replacing the default /dev/null\n    std::string lds_path = TestEnvironment::temporaryPath(TestUtility::uniqueFilename());\n    config_helper_.addConfigModifier(\n        [lds_path](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          bootstrap.mutable_dynamic_resources()->mutable_lds_config()->set_path(lds_path);\n        });\n  }\n\n  // Note that finalize assumes that every fake_upstream_ must correspond to a bootstrap config\n  // static entry. So, if you want to manually create a fake upstream without specifying it in the\n  // config, you will need to do so *after* initialize() (which calls this function) is done.\n  config_helper_.finalize(ports);\n\n  envoy::config::bootstrap::v3::Bootstrap bootstrap = config_helper_.bootstrap();\n  if (use_lds_) {\n    // After the config has been finalized, write the final listener config to the lds file.\n    const std::string lds_path = config_helper_.bootstrap().dynamic_resources().lds_config().path();\n    API_NO_BOOST(envoy::api::v2::DiscoveryResponse) lds;\n    lds.set_version_info(\"0\");\n    for (auto& listener : config_helper_.bootstrap().static_resources().listeners()) {\n      ProtobufWkt::Any* resource = lds.add_resources();\n      resource->PackFrom(listener);\n    }\n    TestEnvironment::writeStringToFileForTest(lds_path, MessageUtil::getJsonStringFromMessage(lds),\n                                              true);\n\n    // Now that the listeners have been written to the lds file, remove them from static resources\n    // or they will not be reloadable.\n    bootstrap.mutable_static_resources()->mutable_listeners()->Clear();\n  }\n  ENVOY_LOG_MISC(debug, \"Running Envoy with configuration:\\n{}\",\n                 MessageUtil::getYamlStringFromMessage(bootstrap));\n\n  const std::string bootstrap_path = TestEnvironment::writeStringToFileForTest(\n      \"bootstrap.pb\", TestUtility::getProtobufBinaryStringFromMessage(bootstrap));\n\n  std::vector<std::string> named_ports;\n  const auto& static_resources = config_helper_.bootstrap().static_resources();\n  named_ports.reserve(static_resources.listeners_size());\n  for (int i = 0; i < static_resources.listeners_size(); ++i) {\n    named_ports.push_back(static_resources.listeners(i).name());\n  }\n  createGeneratedApiTestServer(bootstrap_path, named_ports, {false, true, false}, false);\n}\n\nvoid BaseIntegrationTest::setUpstreamProtocol(FakeHttpConnection::Type protocol) {\n  upstream_protocol_ = protocol;\n  if (upstream_protocol_ == FakeHttpConnection::Type::HTTP2) {\n    config_helper_.addConfigModifier(\n        [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() >= 1, \"\");\n          auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n          cluster->mutable_http2_protocol_options();\n        });\n  } else {\n    RELEASE_ASSERT(protocol == FakeHttpConnection::Type::HTTP1, \"\");\n  }\n}\n\nIntegrationTcpClientPtr\nBaseIntegrationTest::makeTcpConnection(uint32_t port,\n                                       const Network::ConnectionSocket::OptionsSharedPtr& options) {\n  return std::make_unique<IntegrationTcpClient>(*dispatcher_, *mock_buffer_factory_, port, version_,\n                                                enable_half_close_, options);\n}\n\nvoid BaseIntegrationTest::registerPort(const std::string& key, uint32_t port) {\n  port_map_[key] = port;\n}\n\nuint32_t BaseIntegrationTest::lookupPort(const std::string& key) {\n  auto it = port_map_.find(key);\n  if (it != port_map_.end()) {\n    return it->second;\n  }\n  RELEASE_ASSERT(\n      false,\n      fmt::format(\"lookupPort() called on service type '{}', which has not been added to port_map_\",\n                  key));\n}\n\nvoid BaseIntegrationTest::setUpstreamAddress(\n    uint32_t upstream_index, envoy::config::endpoint::v3::LbEndpoint& endpoint) const {\n  auto* socket_address = endpoint.mutable_endpoint()->mutable_address()->mutable_socket_address();\n  socket_address->set_address(Network::Test::getLoopbackAddressString(version_));\n  socket_address->set_port_value(fake_upstreams_[upstream_index]->localAddress()->ip()->port());\n}\n\nvoid BaseIntegrationTest::registerTestServerPorts(const std::vector<std::string>& port_names) {\n  bool listeners_ready = false;\n  absl::Mutex l;\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> listeners;\n  test_server_->server().dispatcher().post([this, &listeners, &listeners_ready, &l]() {\n    listeners = test_server_->server().listenerManager().listeners();\n    l.Lock();\n    listeners_ready = true;\n    l.Unlock();\n  });\n  l.LockWhen(absl::Condition(&listeners_ready));\n  l.Unlock();\n\n  auto listener_it = listeners.cbegin();\n  auto port_it = port_names.cbegin();\n  for (; port_it != port_names.end() && listener_it != listeners.end(); ++port_it, ++listener_it) {\n    const auto listen_addr = listener_it->get().listenSocketFactory().localAddress();\n    if (listen_addr->type() == Network::Address::Type::Ip) {\n      ENVOY_LOG(debug, \"registered '{}' as port {}.\", *port_it, listen_addr->ip()->port());\n      registerPort(*port_it, listen_addr->ip()->port());\n    }\n  }\n  const auto admin_addr = test_server_->server().admin().socket().localAddress();\n  if (admin_addr->type() == Network::Address::Type::Ip) {\n    registerPort(\"admin\", admin_addr->ip()->port());\n  }\n}\n\nstd::string getListenerDetails(Envoy::Server::Instance& server) {\n  const auto& cbs_maps = server.admin().getConfigTracker().getCallbacksMap();\n  ProtobufTypes::MessagePtr details = cbs_maps.at(\"listeners\")();\n  auto listener_info = Protobuf::down_cast<envoy::admin::v3::ListenersConfigDump>(*details);\n  return MessageUtil::getYamlStringFromMessage(listener_info.dynamic_listeners(0).error_state());\n}\n\nvoid BaseIntegrationTest::createGeneratedApiTestServer(\n    const std::string& bootstrap_path, const std::vector<std::string>& port_names,\n    Server::FieldValidationConfig validator_config, bool allow_lds_rejection) {\n  test_server_ = IntegrationTestServer::create(\n      bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_,\n      timeSystem(), *api_, defer_listener_finalization_, process_object_, validator_config,\n      concurrency_, drain_time_, drain_strategy_, use_real_stats_);\n  if (config_helper_.bootstrap().static_resources().listeners_size() > 0 &&\n      !defer_listener_finalization_) {\n\n    // Wait for listeners to be created before invoking registerTestServerPorts() below, as that\n    // needs to know about the bound listener ports.\n    Event::TestTimeSystem::RealTimeBound bound(TestUtility::DefaultTimeout);\n    const char* success = \"listener_manager.listener_create_success\";\n    const char* rejected = \"listener_manager.lds.update_rejected\";\n    for (Stats::CounterSharedPtr success_counter = test_server_->counter(success),\n                                 rejected_counter = test_server_->counter(rejected);\n         (success_counter == nullptr ||\n          success_counter->value() <\n              concurrency_ * config_helper_.bootstrap().static_resources().listeners_size()) &&\n         (!allow_lds_rejection || rejected_counter == nullptr || rejected_counter->value() == 0);\n         success_counter = test_server_->counter(success),\n                                 rejected_counter = test_server_->counter(rejected)) {\n      if (!bound.withinBound()) {\n        RELEASE_ASSERT(0, \"Timed out waiting for listeners.\");\n      }\n      if (!allow_lds_rejection) {\n        RELEASE_ASSERT(rejected_counter == nullptr || rejected_counter->value() == 0,\n                       absl::StrCat(\"Lds update failed. Details\\n\",\n                                    getListenerDetails(test_server_->server())));\n      }\n      // TODO(mattklein123): Switch to events and waitFor().\n      time_system_.realSleepDoNotUseWithoutScrutiny(std::chrono::milliseconds(10));\n    }\n\n    registerTestServerPorts(port_names);\n  }\n}\n\nvoid BaseIntegrationTest::createApiTestServer(const ApiFilesystemConfig& api_filesystem_config,\n                                              const std::vector<std::string>& port_names,\n                                              Server::FieldValidationConfig validator_config,\n                                              bool allow_lds_rejection) {\n  const std::string eds_path = TestEnvironment::temporaryFileSubstitute(\n      api_filesystem_config.eds_path_, port_map_, version_);\n  const std::string cds_path = TestEnvironment::temporaryFileSubstitute(\n      api_filesystem_config.cds_path_, {{\"eds_json_path\", eds_path}}, port_map_, version_);\n  const std::string rds_path = TestEnvironment::temporaryFileSubstitute(\n      api_filesystem_config.rds_path_, port_map_, version_);\n  const std::string lds_path = TestEnvironment::temporaryFileSubstitute(\n      api_filesystem_config.lds_path_, {{\"rds_json_path\", rds_path}}, port_map_, version_);\n  createGeneratedApiTestServer(TestEnvironment::temporaryFileSubstitute(\n                                   api_filesystem_config.bootstrap_path_,\n                                   {{\"cds_json_path\", cds_path}, {\"lds_json_path\", lds_path}},\n                                   port_map_, version_),\n                               port_names, validator_config, allow_lds_rejection);\n}\n\nvoid BaseIntegrationTest::sendRawHttpAndWaitForResponse(int port, const char* raw_http,\n                                                        std::string* response,\n                                                        bool disconnect_after_headers_complete) {\n  auto connection = createConnectionDriver(\n      port, raw_http,\n      [response, disconnect_after_headers_complete](Network::ClientConnection& client,\n                                                    const Buffer::Instance& data) -> void {\n        response->append(data.toString());\n        if (disconnect_after_headers_complete && response->find(\"\\r\\n\\r\\n\") != std::string::npos) {\n          client.close(Network::ConnectionCloseType::NoFlush);\n        }\n      });\n\n  connection->run();\n}\n\nvoid BaseIntegrationTest::useListenerAccessLog(absl::string_view format) {\n  listener_access_log_name_ = TestEnvironment::temporaryPath(TestUtility::uniqueFilename());\n  ASSERT_TRUE(config_helper_.setListenerAccessLog(listener_access_log_name_, format));\n}\n\n// Assuming logs are newline delineated, return the start index of the nth entry.\n// If there are not n entries, it will return file.length() (end of the string\n// index)\nsize_t entryIndex(const std::string& file, uint32_t entry) {\n  size_t index = 0;\n  for (uint32_t i = 0; i < entry; ++i) {\n    index = file.find('\\n', index);\n    if (index == std::string::npos || index == file.length()) {\n      return file.length();\n    }\n    ++index;\n  }\n  return index;\n}\n\nstd::string BaseIntegrationTest::waitForAccessLog(const std::string& filename, uint32_t entry) {\n  // Wait a max of 1s for logs to flush to disk.\n  for (int i = 0; i < 1000; ++i) {\n    std::string contents = TestEnvironment::readFileToStringForTest(filename, false);\n    size_t index = entryIndex(contents, entry);\n    if (contents.length() > index) {\n      return contents.substr(index);\n    }\n    absl::SleepFor(absl::Milliseconds(1));\n  }\n  RELEASE_ASSERT(0, \"Timed out waiting for access log\");\n  return \"\";\n}\n\nvoid BaseIntegrationTest::createXdsUpstream() {\n  if (create_xds_upstream_ == false) {\n    return;\n  }\n  if (tls_xds_upstream_ == false) {\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  } else {\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    auto* common_tls_context = tls_context.mutable_common_tls_context();\n    common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2);\n    auto* tls_cert = common_tls_context->add_tls_certificates();\n    tls_cert->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcert.pem\"));\n    tls_cert->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamkey.pem\"));\n    auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n        tls_context, factory_context_);\n\n    upstream_stats_store_ = std::make_unique<Stats::TestIsolatedStoreImpl>();\n    auto context = std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n        std::move(cfg), context_manager_, *upstream_stats_store_, std::vector<std::string>{});\n    addFakeUpstream(std::move(context), FakeHttpConnection::Type::HTTP2);\n  }\n  xds_upstream_ = fake_upstreams_[1].get();\n}\n\nvoid BaseIntegrationTest::createXdsConnection() {\n  AssertionResult result = xds_upstream_->waitForHttpConnection(*dispatcher_, xds_connection_);\n  RELEASE_ASSERT(result, result.message());\n}\n\nvoid BaseIntegrationTest::cleanUpXdsConnection() {\n  AssertionResult result = xds_connection_->close();\n  RELEASE_ASSERT(result, result.message());\n  result = xds_connection_->waitForDisconnect();\n  RELEASE_ASSERT(result, result.message());\n  xds_connection_.reset();\n}\n\nAssertionResult BaseIntegrationTest::compareDiscoveryRequest(\n    const std::string& expected_type_url, const std::string& expected_version,\n    const std::vector<std::string>& expected_resource_names,\n    const std::vector<std::string>& expected_resource_names_added,\n    const std::vector<std::string>& expected_resource_names_removed, bool expect_node,\n    const Protobuf::int32 expected_error_code, const std::string& expected_error_substring) {\n  if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) {\n    return compareSotwDiscoveryRequest(expected_type_url, expected_version, expected_resource_names,\n                                       expect_node, expected_error_code, expected_error_substring);\n  } else {\n    return compareDeltaDiscoveryRequest(expected_type_url, expected_resource_names_added,\n                                        expected_resource_names_removed, expected_error_code,\n                                        expected_error_substring);\n  }\n}\n\nAssertionResult BaseIntegrationTest::compareSotwDiscoveryRequest(\n    const std::string& expected_type_url, const std::string& expected_version,\n    const std::vector<std::string>& expected_resource_names, bool expect_node,\n    const Protobuf::int32 expected_error_code, const std::string& expected_error_substring) {\n  API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request;\n  VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request));\n\n  if (expect_node) {\n    EXPECT_TRUE(discovery_request.has_node());\n    EXPECT_FALSE(discovery_request.node().id().empty());\n    EXPECT_FALSE(discovery_request.node().cluster().empty());\n  } else {\n    EXPECT_FALSE(discovery_request.has_node());\n  }\n\n  if (expected_type_url != discovery_request.type_url()) {\n    return AssertionFailure() << fmt::format(\"type_url {} does not match expected {}\",\n                                             discovery_request.type_url(), expected_type_url);\n  }\n  if (!(expected_error_code == discovery_request.error_detail().code())) {\n    return AssertionFailure() << fmt::format(\"error_code {} does not match expected {}\",\n                                             discovery_request.error_detail().code(),\n                                             expected_error_code);\n  }\n  EXPECT_TRUE(\n      IsSubstring(\"\", \"\", expected_error_substring, discovery_request.error_detail().message()));\n  const std::vector<std::string> resource_names(discovery_request.resource_names().cbegin(),\n                                                discovery_request.resource_names().cend());\n  if (expected_resource_names != resource_names) {\n    return AssertionFailure() << fmt::format(\n               \"resources {} do not match expected {} in {}\", absl::StrJoin(resource_names, \",\"),\n               absl::StrJoin(expected_resource_names, \",\"), discovery_request.DebugString());\n  }\n  if (expected_version != discovery_request.version_info()) {\n    return AssertionFailure() << fmt::format(\"version {} does not match expected {} in {}\",\n                                             discovery_request.version_info(), expected_version,\n                                             discovery_request.DebugString());\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult compareSets(const std::set<std::string>& set1, const std::set<std::string>& set2,\n                            absl::string_view name) {\n  if (set1 == set2) {\n    return AssertionSuccess();\n  }\n  auto failure = AssertionFailure() << name << \" field not as expected.\\nExpected: {\";\n  for (const auto& x : set1) {\n    failure << x << \", \";\n  }\n  failure << \"}\\nActual: {\";\n  for (const auto& x : set2) {\n    failure << x << \", \";\n  }\n  return failure << \"}\";\n}\n\nAssertionResult BaseIntegrationTest::waitForPortAvailable(uint32_t port,\n                                                          std::chrono::milliseconds timeout) {\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  while (bound.withinBound()) {\n    try {\n      Network::TcpListenSocket(Network::Utility::getAddressWithPort(\n                                   *Network::Test::getCanonicalLoopbackAddress(version_), port),\n                               nullptr, true);\n      return AssertionSuccess();\n    } catch (const EnvoyException&) {\n      // The nature of this function requires using a real sleep here.\n      timeSystem().realSleepDoNotUseWithoutScrutiny(std::chrono::milliseconds(100));\n    }\n  }\n\n  return AssertionFailure() << \"Timeout waiting for port availability\";\n}\n\nAssertionResult BaseIntegrationTest::compareDeltaDiscoveryRequest(\n    const std::string& expected_type_url,\n    const std::vector<std::string>& expected_resource_subscriptions,\n    const std::vector<std::string>& expected_resource_unsubscriptions, FakeStreamPtr& xds_stream,\n    const Protobuf::int32 expected_error_code, const std::string& expected_error_substring) {\n  API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) request;\n  VERIFY_ASSERTION(xds_stream->waitForGrpcMessage(*dispatcher_, request));\n\n  // Verify all we care about node.\n  if (!request.has_node() || request.node().id().empty() || request.node().cluster().empty()) {\n    return AssertionFailure() << \"Weird node field\";\n  }\n  if (request.type_url() != expected_type_url) {\n    return AssertionFailure() << fmt::format(\"type_url {} does not match expected {}.\",\n                                             request.type_url(), expected_type_url);\n  }\n  // Sort to ignore ordering.\n  std::set<std::string> expected_sub{expected_resource_subscriptions.begin(),\n                                     expected_resource_subscriptions.end()};\n  std::set<std::string> expected_unsub{expected_resource_unsubscriptions.begin(),\n                                       expected_resource_unsubscriptions.end()};\n  std::set<std::string> actual_sub{request.resource_names_subscribe().begin(),\n                                   request.resource_names_subscribe().end()};\n  std::set<std::string> actual_unsub{request.resource_names_unsubscribe().begin(),\n                                     request.resource_names_unsubscribe().end()};\n  auto sub_result = compareSets(expected_sub, actual_sub, \"expected_resource_subscriptions\");\n  if (!sub_result) {\n    return sub_result;\n  }\n  auto unsub_result =\n      compareSets(expected_unsub, actual_unsub, \"expected_resource_unsubscriptions\");\n  if (!unsub_result) {\n    return unsub_result;\n  }\n  // (We don't care about response_nonce or initial_resource_versions.)\n\n  if (request.error_detail().code() != expected_error_code) {\n    return AssertionFailure() << fmt::format(\n               \"error code {} does not match expected {}. (Error message is {}).\",\n               request.error_detail().code(), expected_error_code,\n               request.error_detail().message());\n  }\n  if (expected_error_code != Grpc::Status::WellKnownGrpcStatus::Ok &&\n      request.error_detail().message().find(expected_error_substring) == std::string::npos) {\n    return AssertionFailure() << \"\\\"\" << expected_error_substring\n                              << \"\\\" is not a substring of actual error message \\\"\"\n                              << request.error_detail().message() << \"\\\"\";\n  }\n  return AssertionSuccess();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/base_integration_test.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n#include <vector>\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/server/process_context.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/config/version_converter.h\"\n\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/config/utility.h\"\n#include \"test/integration/fake_upstream.h\"\n#include \"test/integration/integration_tcp_client.h\"\n#include \"test/integration/server.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"absl/types/optional.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\n\nstruct ApiFilesystemConfig {\n  std::string bootstrap_path_;\n  std::string cds_path_;\n  std::string eds_path_;\n  std::string lds_path_;\n  std::string rds_path_;\n};\n\n/**\n * Test fixture for all integration tests.\n */\nclass BaseIntegrationTest : protected Logger::Loggable<Logger::Id::testing> {\npublic:\n  using TestTimeSystemPtr = std::unique_ptr<Event::TestTimeSystem>;\n  using InstanceConstSharedPtrFn = std::function<Network::Address::InstanceConstSharedPtr(int)>;\n\n  // Creates a test fixture with an upstream bound to INADDR_ANY on an unspecified port using the\n  // provided IP |version|.\n  BaseIntegrationTest(Network::Address::IpVersion version,\n                      const std::string& config = ConfigHelper::httpProxyConfig());\n  BaseIntegrationTest(Network::Address::IpVersion version, TestTimeSystemPtr,\n                      const std::string& config = ConfigHelper::httpProxyConfig())\n      : BaseIntegrationTest(version, config) {}\n  // Creates a test fixture with a specified |upstream_address| function that provides the IP and\n  // port to use.\n  BaseIntegrationTest(const InstanceConstSharedPtrFn& upstream_address_fn,\n                      Network::Address::IpVersion version,\n                      const std::string& config = ConfigHelper::httpProxyConfig());\n  virtual ~BaseIntegrationTest() = default;\n\n  // TODO(jmarantz): Remove this once\n  // https://github.com/envoyproxy/envoy-filter-example/pull/69 is reverted.\n  static TestTimeSystemPtr realTime() { return TestTimeSystemPtr(); }\n\n  // Initialize the basic proto configuration, create fake upstreams, and start Envoy.\n  virtual void initialize();\n  // Set up the fake upstream connections. This is called by initialize() and\n  // is virtual to allow subclass overrides.\n  virtual void createUpstreams();\n  // Finalize the config and spin up an Envoy instance.\n  virtual void createEnvoy();\n  // Sets upstream_protocol_ and alters the upstream protocol in the config_helper_\n  void setUpstreamProtocol(FakeHttpConnection::Type protocol);\n  // Sets fake_upstreams_count_\n  void setUpstreamCount(uint32_t count) { fake_upstreams_count_ = count; }\n  // Skip validation that ensures that all upstream ports are referenced by the\n  // configuration generated in ConfigHelper::finalize.\n  void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); }\n  // Make test more deterministic by using a fixed RNG value.\n  void setDeterministic() { deterministic_ = true; }\n  void setNewCodecs() { config_helper_.setNewCodecs(); }\n\n  FakeHttpConnection::Type upstreamProtocol() const { return upstream_protocol_; }\n\n  IntegrationTcpClientPtr\n  makeTcpConnection(uint32_t port,\n                    const Network::ConnectionSocket::OptionsSharedPtr& options = nullptr);\n\n  // Test-wide port map.\n  void registerPort(const std::string& key, uint32_t port);\n  uint32_t lookupPort(const std::string& key);\n\n  // Set the endpoint's socket address to point at upstream at given index.\n  void setUpstreamAddress(uint32_t upstream_index,\n                          envoy::config::endpoint::v3::LbEndpoint& endpoint) const;\n\n  Network::ClientConnectionPtr makeClientConnection(uint32_t port);\n  virtual Network::ClientConnectionPtr\n  makeClientConnectionWithOptions(uint32_t port,\n                                  const Network::ConnectionSocket::OptionsSharedPtr& options);\n\n  void registerTestServerPorts(const std::vector<std::string>& port_names);\n  void createGeneratedApiTestServer(const std::string& bootstrap_path,\n                                    const std::vector<std::string>& port_names,\n                                    Server::FieldValidationConfig validator_config,\n                                    bool allow_lds_rejection);\n  void createApiTestServer(const ApiFilesystemConfig& api_filesystem_config,\n                           const std::vector<std::string>& port_names,\n                           Server::FieldValidationConfig validator_config,\n                           bool allow_lds_rejection);\n\n  Event::TestTimeSystem& timeSystem() { return time_system_; }\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  Api::ApiPtr api_for_server_stat_store_;\n  MockBufferFactory* mock_buffer_factory_; // Will point to the dispatcher's factory.\n\n  // Enable the listener access log\n  void useListenerAccessLog(absl::string_view format = \"\");\n  // Waits for the nth access log entry, defaulting to log entry 0.\n  std::string waitForAccessLog(const std::string& filename, uint32_t entry = 0);\n\n  std::string listener_access_log_name_;\n\n  // Functions for testing reloadable config (xDS)\n  void createXdsUpstream();\n  void createXdsConnection();\n  void cleanUpXdsConnection();\n\n  // See if a port can be successfully bound within the given timeout.\n  ABSL_MUST_USE_RESULT AssertionResult waitForPortAvailable(\n      uint32_t port, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  // Helpers for setting up expectations and making the internal gears turn for xDS request/response\n  // sending/receiving to/from the (imaginary) xDS server. You should almost always use\n  // compareDiscoveryRequest() and sendDiscoveryResponse(), but the SotW/delta-specific versions are\n  // available if you're writing a SotW/delta-specific test.\n  // TODO(fredlas) expect_node was defaulting false here; the delta+SotW unification work restores\n  // it.\n  AssertionResult compareDiscoveryRequest(\n      const std::string& expected_type_url, const std::string& expected_version,\n      const std::vector<std::string>& expected_resource_names,\n      const std::vector<std::string>& expected_resource_names_added,\n      const std::vector<std::string>& expected_resource_names_removed, bool expect_node = true,\n      const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok,\n      const std::string& expected_error_message = \"\");\n  template <class T>\n  void sendDiscoveryResponse(const std::string& type_url, const std::vector<T>& state_of_the_world,\n                             const std::vector<T>& added_or_updated,\n                             const std::vector<std::string>& removed, const std::string& version,\n                             const bool api_downgrade = true) {\n    if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) {\n      sendSotwDiscoveryResponse(type_url, state_of_the_world, version, api_downgrade);\n    } else {\n      sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, api_downgrade);\n    }\n  }\n\n  AssertionResult compareDeltaDiscoveryRequest(\n      const std::string& expected_type_url,\n      const std::vector<std::string>& expected_resource_subscriptions,\n      const std::vector<std::string>& expected_resource_unsubscriptions,\n      const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok,\n      const std::string& expected_error_message = \"\") {\n    return compareDeltaDiscoveryRequest(expected_type_url, expected_resource_subscriptions,\n                                        expected_resource_unsubscriptions, xds_stream_,\n                                        expected_error_code, expected_error_message);\n  }\n\n  AssertionResult compareDeltaDiscoveryRequest(\n      const std::string& expected_type_url,\n      const std::vector<std::string>& expected_resource_subscriptions,\n      const std::vector<std::string>& expected_resource_unsubscriptions, FakeStreamPtr& stream,\n      const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok,\n      const std::string& expected_error_message = \"\");\n\n  // TODO(fredlas) expect_node was defaulting false here; the delta+SotW unification work restores\n  // it.\n  AssertionResult compareSotwDiscoveryRequest(\n      const std::string& expected_type_url, const std::string& expected_version,\n      const std::vector<std::string>& expected_resource_names, bool expect_node = true,\n      const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok,\n      const std::string& expected_error_message = \"\");\n\n  template <class T>\n  void sendSotwDiscoveryResponse(const std::string& type_url, const std::vector<T>& messages,\n                                 const std::string& version, const bool api_downgrade = true) {\n    API_NO_BOOST(envoy::api::v2::DiscoveryResponse) discovery_response;\n    discovery_response.set_version_info(version);\n    discovery_response.set_type_url(type_url);\n    for (const auto& message : messages) {\n      if (api_downgrade) {\n        discovery_response.add_resources()->PackFrom(API_DOWNGRADE(message));\n      } else {\n        discovery_response.add_resources()->PackFrom(message);\n      }\n    }\n    static int next_nonce_counter = 0;\n    discovery_response.set_nonce(absl::StrCat(\"nonce\", next_nonce_counter++));\n    xds_stream_->sendGrpcMessage(discovery_response);\n  }\n\n  template <class T>\n  void sendDeltaDiscoveryResponse(const std::string& type_url,\n                                  const std::vector<T>& added_or_updated,\n                                  const std::vector<std::string>& removed,\n                                  const std::string& version, const bool api_downgrade = true) {\n    sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, xds_stream_, {},\n                               api_downgrade);\n  }\n  template <class T>\n  void\n  sendDeltaDiscoveryResponse(const std::string& type_url, const std::vector<T>& added_or_updated,\n                             const std::vector<std::string>& removed, const std::string& version,\n                             FakeStreamPtr& stream, const std::vector<std::string>& aliases = {},\n                             const bool api_downgrade = true) {\n    auto response = createDeltaDiscoveryResponse<T>(type_url, added_or_updated, removed, version,\n                                                    aliases, api_downgrade);\n    stream->sendGrpcMessage(response);\n  }\n\n  template <class T>\n  envoy::api::v2::DeltaDiscoveryResponse\n  createDeltaDiscoveryResponse(const std::string& type_url, const std::vector<T>& added_or_updated,\n                               const std::vector<std::string>& removed, const std::string& version,\n                               const std::vector<std::string>& aliases,\n                               const bool api_downgrade = true) {\n\n    API_NO_BOOST(envoy::api::v2::DeltaDiscoveryResponse) response;\n    response.set_system_version_info(\"system_version_info_this_is_a_test\");\n    response.set_type_url(type_url);\n    for (const auto& message : added_or_updated) {\n      auto* resource = response.add_resources();\n      ProtobufWkt::Any temp_any;\n      if (api_downgrade) {\n        temp_any.PackFrom(API_DOWNGRADE(message));\n        resource->mutable_resource()->PackFrom(API_DOWNGRADE(message));\n      } else {\n        temp_any.PackFrom(message);\n        resource->mutable_resource()->PackFrom(message);\n      }\n      resource->set_name(TestUtility::xdsResourceName(temp_any));\n      resource->set_version(version);\n      for (const auto& alias : aliases) {\n        resource->add_aliases(alias);\n      }\n    }\n    *response.mutable_removed_resources() = {removed.begin(), removed.end()};\n    static int next_nonce_counter = 0;\n    response.set_nonce(absl::StrCat(\"nonce\", next_nonce_counter++));\n    return response;\n  }\n\nprivate:\n  Event::GlobalTimeSystem time_system_;\n\npublic:\n  Event::DispatcherPtr dispatcher_;\n\n  /**\n   * Open a connection to Envoy, send a series of bytes, and return the\n   * response. This function will continue reading response bytes until Envoy\n   * closes the connection (as a part of error handling) or (if configured true)\n   * the complete headers are read.\n   *\n   * @param port the port to connect to.\n   * @param raw_http the data to send.\n   * @param response the response data will be sent here\n   * @param if the connection should be terminated once '\\r\\n\\r\\n' has been read.\n   **/\n  void sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response,\n                                     bool disconnect_after_headers_complete = false);\n\n  /**\n   * Helper to create ConnectionDriver.\n   *\n   * @param port the port to connect to.\n   * @param initial_data the data to send.\n   * @param data_callback the callback on the received data.\n   **/\n  std::unique_ptr<RawConnectionDriver> createConnectionDriver(\n      uint32_t port, const std::string& initial_data,\n      std::function<void(Network::ClientConnection&, const Buffer::Instance&)>&& data_callback) {\n    Buffer::OwnedImpl buffer(initial_data);\n    return std::make_unique<RawConnectionDriver>(port, buffer, data_callback, version_,\n                                                 *dispatcher_);\n  }\n\n  // Helper to create FakeUpstream.\n  // Creates a fake upstream bound to the specified unix domain socket path.\n  std::unique_ptr<FakeUpstream> createFakeUpstream(const std::string& uds_path,\n                                                   FakeHttpConnection::Type type) {\n    return std::make_unique<FakeUpstream>(uds_path, type, timeSystem());\n  }\n  // Creates a fake upstream bound to the specified |address|.\n  std::unique_ptr<FakeUpstream>\n  createFakeUpstream(const Network::Address::InstanceConstSharedPtr& address,\n                     FakeHttpConnection::Type type, bool enable_half_close = false,\n                     bool udp_fake_upstream = false) {\n    return std::make_unique<FakeUpstream>(address, type, timeSystem(), enable_half_close,\n                                          udp_fake_upstream);\n  }\n  // Creates a fake upstream bound to INADDR_ANY and there is no specified port.\n  std::unique_ptr<FakeUpstream> createFakeUpstream(FakeHttpConnection::Type type,\n                                                   bool enable_half_close = false) {\n    return std::make_unique<FakeUpstream>(0, type, version_, timeSystem(), enable_half_close);\n  }\n  std::unique_ptr<FakeUpstream>\n  createFakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory,\n                     FakeHttpConnection::Type type) {\n    return std::make_unique<FakeUpstream>(std::move(transport_socket_factory), 0, type, version_,\n                                          timeSystem());\n  }\n  // Helper to add FakeUpstream.\n  // Add a fake upstream bound to the specified unix domain socket path.\n  void addFakeUpstream(const std::string& uds_path, FakeHttpConnection::Type type) {\n    fake_upstreams_.emplace_back(createFakeUpstream(uds_path, type));\n  }\n  // Add a fake upstream bound to the specified |address|.\n  void addFakeUpstream(const Network::Address::InstanceConstSharedPtr& address,\n                       FakeHttpConnection::Type type, bool enable_half_close = false,\n                       bool udp_fake_upstream = false) {\n    fake_upstreams_.emplace_back(\n        createFakeUpstream(address, type, enable_half_close, udp_fake_upstream));\n  }\n  // Add a fake upstream bound to INADDR_ANY and there is no specified port.\n  void addFakeUpstream(FakeHttpConnection::Type type, bool enable_half_close = false) {\n    fake_upstreams_.emplace_back(createFakeUpstream(type, enable_half_close));\n  }\n  void addFakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory,\n                       FakeHttpConnection::Type type) {\n    fake_upstreams_.emplace_back(createFakeUpstream(std::move(transport_socket_factory), type));\n  }\n\nprotected:\n  bool initialized() const { return initialized_; }\n\n  std::unique_ptr<Stats::Scope> upstream_stats_store_;\n\n  // Make sure the test server will be torn down after any fake client.\n  // The test server owns the runtime, which is often accessed by client and\n  // fake upstream codecs and must outlast them.\n  IntegrationTestServerPtr test_server_;\n\n  // The IpVersion (IPv4, IPv6) to use.\n  Network::Address::IpVersion version_;\n  // IP Address to use when binding sockets on upstreams.\n  InstanceConstSharedPtrFn upstream_address_fn_;\n  // The config for envoy start-up.\n  ConfigHelper config_helper_;\n  // The ProcessObject to use when constructing the envoy server.\n  ProcessObjectOptRef process_object_{absl::nullopt};\n\n  // Steps that should be done before the envoy server starting.\n  std::function<void(IntegrationTestServer&)> on_server_ready_function_;\n\n  // Steps that should be done in parallel with the envoy server starting. E.g., xDS\n  // pre-init, control plane synchronization needed for server start.\n  std::function<void()> on_server_init_function_;\n\n  // A map of keys to port names. Generally the names are pulled from the v2 listener name\n  // but if a listener is created via ADS, it will be from whatever key is used with registerPort.\n  TestEnvironment::PortMap port_map_;\n\n  // The DrainStrategy that dictates the behaviour of\n  // DrainManagerImpl::drainClose().\n  Server::DrainStrategy drain_strategy_{Server::DrainStrategy::Gradual};\n\n  // Member variables for xDS testing.\n  FakeUpstream* xds_upstream_{};\n  FakeHttpConnectionPtr xds_connection_;\n  FakeStreamPtr xds_stream_;\n  bool create_xds_upstream_{false};\n  bool tls_xds_upstream_{false};\n  bool use_lds_{true}; // Use the integration framework's LDS set up.\n\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context_;\n  Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{timeSystem()};\n\n  // The fake upstreams_ are created using the context_manager, so make sure\n  // they are destroyed before it is.\n  std::vector<std::unique_ptr<FakeUpstream>> fake_upstreams_;\n\n  Grpc::SotwOrDelta sotw_or_delta_{Grpc::SotwOrDelta::Sotw};\n\n  spdlog::level::level_enum default_log_level_;\n\n  // Target number of upstreams.\n  uint32_t fake_upstreams_count_{1};\n\n  // The duration of the drain manager graceful drain period.\n  std::chrono::seconds drain_time_{1};\n\n  // The number of worker threads that the test server uses.\n  uint32_t concurrency_{1};\n\n  // If true, use AutonomousUpstream for fake upstreams.\n  bool autonomous_upstream_{false};\n\n  // If true, allow incomplete streams in AutonomousUpstream\n  // This does nothing if autonomous_upstream_ is false\n  bool autonomous_allow_incomplete_streams_{false};\n\n  bool enable_half_close_{false};\n\n  // Whether the default created fake upstreams are UDP listeners.\n  bool udp_fake_upstream_{false};\n\n  // True if test will use a fixed RNG value.\n  bool deterministic_{};\n\n  // Set true when your test will itself take care of ensuring listeners are up, and registering\n  // them in the port_map_.\n  bool defer_listener_finalization_{false};\n\n  // By default the test server will use custom stats to notify on increment.\n  // This override exists for tests measuring stats memory.\n  bool use_real_stats_{};\n\nprivate:\n  // The type for the Envoy-to-backend connection\n  FakeHttpConnection::Type upstream_protocol_{FakeHttpConnection::Type::HTTP1};\n  // True if initialized() has been called.\n  bool initialized_{};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/capture_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.integration;\n\nimport \"google/protobuf/empty.proto\";\n\nmessage Event {\n  oneof event_selector {\n    // Downstream sent given bytes.\n    bytes downstream_send_bytes = 1;\n    // Downstream received some bytes.\n    google.protobuf.Empty downstream_recv_bytes = 2;\n    // Upstream sent given bytes.\n    bytes upstream_send_bytes = 3;\n    // Upstream received some bytes.\n    google.protobuf.Empty upstream_recv_bytes = 4;\n  }\n}\n\n// Test case in corpus for *_capture_fuzz_test.\nmessage CaptureFuzzTestCase {\n  repeated Event events = 1;\n}\n"
  },
  {
    "path": "test/integration/capture_fuzz_gen.py",
    "content": "\"\"\"Convert from transport socket capture to fuzz test case.\n\nConverts from envoy.data.tap.v2alpha.Trace proto to\ntest.integration.CaptureFuzzTestCase.\n\nUsage: capture_fuzz_gen.py <listener capture> [<cluster capture>]\n\"\"\"\nfrom __future__ import print_function\n\nimport functools\nimport sys\n\nfrom google.protobuf import empty_pb2\nfrom google.protobuf import text_format\n\nfrom envoy.data.tap.v2alpha import capture_pb2\nfrom test.integration import capture_fuzz_pb2\n\n\n# Collapse adjacent event in the trace that are of the same type.\ndef Coalesce(trace):\n  if not trace.events:\n    return []\n  events = [trace.events[0]]\n  for event in trace.events[1:]:\n    if events[-1].HasField('read') and event.HasField('read'):\n      events[-1].read.data += event.read.data\n    elif events[-1].HasField('write') and event.HasField('write'):\n      events[-1].write.data += event.write.data\n    else:\n      events.append(event)\n  return events\n\n\n# Convert from transport socket Event to test Event.\ndef ToTestEvent(direction, event):\n  test_event = capture_fuzz_pb2.Event()\n  if event.HasField('read'):\n    setattr(test_event, '%s_send_bytes' % direction, event.read.data)\n  elif event.HasField('write'):\n    getattr(test_event, '%s_recv_bytes' % direction).MergeFrom(empty_pb2.Empty())\n  return test_event\n\n\ndef ToDownstreamTestEvent(event):\n  return ToTestEvent('downstream', event)\n\n\ndef ToUpstreamTestEvent(event):\n  return ToTestEvent('upstream', event)\n\n\n# Zip together the listener/cluster events to produce a single trace for replay.\ndef TestCaseGen(listener_events, cluster_events):\n  test_case = capture_fuzz_pb2.CaptureFuzzTestCase()\n  if not listener_events:\n    return test_case\n  test_case.events.extend([ToDownstreamTestEvent(listener_events[0])])\n  del listener_events[0]\n  while listener_events or cluster_events:\n    if not listener_events:\n      test_case.events.extend(map(ToUpstreamTestEvent, cluster_events))\n      return test_case\n    if not cluster_events:\n      test_case.events.extend(map(ToDownstreamTestEvent, listener_events))\n      return test_case\n    if listener_events[0].timestamp.ToDatetime() < cluster_events[0].timestamp.ToDatetime():\n      test_case.events.extend([ToDownstreamTestEvent(listener_events[0])])\n      del listener_events[0]\n    test_case.events.extend([ToUpstreamTestEvent(cluster_events[0])])\n    del cluster_events[0]\n\n\ndef CaptureFuzzGen(listener_path, cluster_path=None):\n  listener_trace = capture_pb2.Trace()\n  with open(listener_path, 'r') as f:\n    text_format.Merge(f.read(), listener_trace)\n  listener_events = Coalesce(listener_trace)\n\n  cluster_trace = capture_pb2.Trace()\n  if cluster_path:\n    with open(cluster_path, 'r') as f:\n      text_format.Merge(f.read(), cluster_trace)\n    cluster_events = Coalesce(cluster_trace)\n\n  print(TestCaseGen(listener_events, cluster_events))\n\n\nif __name__ == '__main__':\n  if len(sys.argv) < 2 or len(sys.argv) > 3:\n    print('Usage: %s <listener capture> [<cluster capture>]' % sys.argv[0])\n    sys.exit(1)\n  CaptureFuzzGen(*sys.argv[1:])\n"
  },
  {
    "path": "test/integration/cds_integration_test.cc",
    "content": "#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\nnamespace {\n\nconst char ClusterName1[] = \"cluster_1\";\nconst char ClusterName2[] = \"cluster_2\";\nconst int UpstreamIndex1 = 1;\nconst int UpstreamIndex2 = 2;\n\nclass CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest {\npublic:\n  CdsIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(),\n                            ConfigHelper::discoveredClustersBootstrap(\n                                sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? \"GRPC\" : \"DELTA_GRPC\")) {\n    use_lds_ = false;\n    sotw_or_delta_ = sotwOrDelta();\n  }\n\n  void TearDown() override {\n    if (!test_skipped_) {\n      cleanUpXdsConnection();\n    }\n  }\n\n  // Overridden to insert this stuff into the initialize() at the very beginning of\n  // HttpIntegrationTest::testRouterHeaderOnlyRequestAndResponse().\n  void initialize() override {\n    use_lds_ = false;\n    test_skipped_ = false;\n    // Controls how many addFakeUpstream() will happen in\n    // BaseIntegrationTest::createUpstreams() (which is part of initialize()).\n    // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap\n    // config that you use!\n    setUpstreamCount(1);                                  // the CDS cluster\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.\n\n    // HttpIntegrationTest::initialize() does many things:\n    // 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount().\n    // 2) It updates your bootstrap config with the ports your fake upstreams are actually listening\n    //    on (since you're supposed to leave them as 0).\n    // 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual\n    //    Envoy used in the tests.\n    // 4) Bringing up the server usually entails waiting to ensure that any listeners specified in\n    //    the bootstrap config have come up, and registering them in a port map (see lookupPort()).\n    //    However, this test needs to defer all of that to later.\n    defer_listener_finalization_ = true;\n    HttpIntegrationTest::initialize();\n\n    // Create the regular (i.e. not an xDS server) upstreams. We create them manually here after\n    // initialize() because finalize() expects all fake_upstreams_ to correspond to a static\n    // cluster in the bootstrap config - which we don't want since we're testing dynamic CDS!\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    cluster1_ = ConfigHelper::buildStaticCluster(\n        ClusterName1, fake_upstreams_[UpstreamIndex1]->localAddress()->ip()->port(),\n        Network::Test::getLoopbackAddressString(ipVersion()));\n    cluster2_ = ConfigHelper::buildStaticCluster(\n        ClusterName2, fake_upstreams_[UpstreamIndex2]->localAddress()->ip()->port(),\n        Network::Test::getLoopbackAddressString(ipVersion()));\n\n    // Let Envoy establish its connection to the CDS server.\n    acceptXdsConnection();\n\n    // Do the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_1.\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n    sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                               {cluster1_}, {cluster1_}, {}, \"55\");\n\n    // We can continue the test once we're sure that Envoy's ClusterManager has made use of\n    // the DiscoveryResponse describing cluster_1 that we sent.\n    // 2 because the statically specified CDS server itself counts as a cluster.\n    test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 2);\n\n    // Wait for our statically specified listener to become ready, and register its port in the\n    // test framework's downstream listener port map.\n    test_server_->waitUntilListenersReady();\n    registerTestServerPorts({\"http\"});\n  }\n\n  // Regression test to catch the code declaring a gRPC service method for {SotW,delta}\n  // when the user's bootstrap config asks for the other type.\n  void verifyGrpcServiceMethod() {\n    EXPECT_TRUE(xds_stream_->waitForHeadersComplete());\n    Envoy::Http::LowerCaseString path_string(\":path\");\n    std::string expected_method(sotwOrDelta() == Grpc::SotwOrDelta::Sotw\n                                    ? \"/envoy.api.v2.ClusterDiscoveryService/StreamClusters\"\n                                    : \"/envoy.api.v2.ClusterDiscoveryService/DeltaClusters\");\n    EXPECT_EQ(xds_stream_->headers().get(path_string)->value(), expected_method);\n  }\n\n  void acceptXdsConnection() {\n    AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n    verifyGrpcServiceMethod();\n  }\n\n  envoy::config::cluster::v3::Cluster cluster1_;\n  envoy::config::cluster::v3::Cluster cluster2_;\n  // True if we decided not to run the test after all.\n  bool test_skipped_{true};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, CdsIntegrationTest,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// 1) Envoy starts up with no static clusters (other than the CDS-over-gRPC server).\n// 2) Envoy is told of a cluster via CDS.\n// 3) We send Envoy a request, which we verify is properly proxied to and served by that cluster.\n// 4) Envoy is told that cluster is gone.\n// 5) We send Envoy a request, which should 503.\n// 6) Envoy is told that the cluster is back.\n// 7) We send Envoy a request, which we verify is properly proxied to and served by that cluster.\nTEST_P(CdsIntegrationTest, CdsClusterUpDownUp) {\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, \"/cluster1\");\n  test_server_->waitForCounterGe(\"cluster_manager.cluster_added\", 1);\n\n  // Tell Envoy that cluster_1 is gone.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"55\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster, {}, {},\n                                                             {ClusterName1}, \"42\");\n  // We can continue the test once we're sure that Envoy's ClusterManager has made use of\n  // the DiscoveryResponse that says cluster_1 is gone.\n  test_server_->waitForCounterGe(\"cluster_manager.cluster_removed\", 1);\n\n  // Now that cluster_1 is gone, the listener (with its routing to cluster_1) should 503.\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/cluster1\", \"\", downstream_protocol_, version_, \"foo.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_1 is back.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"42\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {cluster1_}, {cluster1_}, {}, \"413\");\n\n  // We can continue the test once we're sure that Envoy's ClusterManager has made use of\n  // the DiscoveryResponse describing cluster_1 that we sent. Again, 2 includes CDS server.\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 2);\n\n  // Does *not* call our initialize().\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, \"/cluster1\");\n\n  cleanupUpstreamAndDownstream();\n}\n\n// Tests adding a cluster, adding another, then removing the first.\nTEST_P(CdsIntegrationTest, TwoClusters) {\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, \"/cluster1\");\n\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_2 is here.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"55\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster2_}, {}, \"42\");\n  // The '3' includes the fake CDS server.\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 3);\n\n  // A request for cluster_2 should be fine.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, \"/cluster2\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_1 is gone.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"42\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {cluster2_}, {}, {ClusterName1}, \"42\");\n  // We can continue the test once we're sure that Envoy's ClusterManager has made use of\n  // the DiscoveryResponse that says cluster_1 is gone.\n  test_server_->waitForCounterGe(\"cluster_manager.cluster_removed\", 1);\n\n  // Even with cluster_1 gone, a request for cluster_2 should be fine.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, \"/cluster2\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Tell Envoy that cluster_1 is back.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"42\", {}, {}, {}));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n      Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster1_}, {}, \"413\");\n\n  // We can continue the test once we're sure that Envoy's ClusterManager has made use of\n  // the DiscoveryResponse describing cluster_1 that we sent. Again, 3 includes CDS server.\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 3);\n\n  // Does *not* call our initialize().\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, \"/cluster1\");\n\n  cleanupUpstreamAndDownstream();\n}\n\n// Tests that when Envoy's delta xDS stream dis/reconnects, Envoy can inform the server of the\n// resources it already has: the reconnected stream need not start with a state-of-the-world update.\nTEST_P(CdsIntegrationTest, VersionsRememberedAfterReconnect) {\n  SKIP_IF_XDS_IS(Grpc::SotwOrDelta::Sotw);\n\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, \"/cluster1\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Close the connection carrying Envoy's xDS gRPC stream...\n  AssertionResult result = xds_connection_->close();\n  RELEASE_ASSERT(result, result.message());\n  result = xds_connection_->waitForDisconnect();\n  RELEASE_ASSERT(result, result.message());\n  xds_connection_.reset();\n  // ...and reconnect it.\n  acceptXdsConnection();\n\n  // Upon reconnecting, the Envoy should tell us its current resource versions.\n  API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) request;\n  result = xds_stream_->waitForGrpcMessage(*dispatcher_, request);\n  RELEASE_ASSERT(result, result.message());\n  const auto& initial_resource_versions = request.initial_resource_versions();\n  EXPECT_EQ(\"55\", initial_resource_versions.at(std::string(ClusterName1)));\n  EXPECT_EQ(1, initial_resource_versions.size());\n\n  // Tell Envoy that cluster_2 is here. This update does *not* need to include cluster_1,\n  // which Envoy should already know about despite the disconnect.\n  sendDeltaDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                                  {cluster2_}, {}, \"42\");\n  // The '3' includes the fake CDS server.\n  test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 3);\n\n  // A request for cluster_1 should be fine.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, \"/cluster1\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  // A request for cluster_2 should be fine.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, \"/cluster2\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/cluster_filter_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/integration/integration.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass PoliteFilter : public Network::Filter, Logger::Loggable<Logger::Id::filter> {\npublic:\n  PoliteFilter(const ProtobufWkt::StringValue& value) : greeting_(value.value()) {}\n\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override {\n    ENVOY_CONN_LOG(debug, \"polite: onData {} bytes {} end_stream\", read_callbacks_->connection(),\n                   data.length(), end_stream);\n    if (!read_greeted_) {\n      Buffer::OwnedImpl greeter(greeting_);\n      read_callbacks_->injectReadDataToFilterChain(greeter, false);\n      read_greeted_ = true;\n    }\n    return Network::FilterStatus::Continue;\n  }\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override {\n    ENVOY_CONN_LOG(debug, \"polite: onWrite {} bytes {} end_stream\", write_callbacks_->connection(),\n                   data.length(), end_stream);\n    if (!write_greeted_) {\n      Buffer::OwnedImpl greeter(\"please \");\n      write_callbacks_->injectWriteDataToFilterChain(greeter, false);\n      write_greeted_ = true;\n    }\n    return Network::FilterStatus::Continue;\n  }\n  Network::FilterStatus onNewConnection() override {\n    ENVOY_CONN_LOG(debug, \"polite: new connection\", read_callbacks_->connection());\n    return Network::FilterStatus::Continue;\n  }\n\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n  }\n  void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override {\n    write_callbacks_ = &callbacks;\n  }\n\nprivate:\n  const std::string greeting_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  Network::WriteFilterCallbacks* write_callbacks_{};\n  bool read_greeted_{false};\n  bool write_greeted_{false};\n};\n\nclass PoliteFilterConfigFactory\n    : public Server::Configuration::NamedUpstreamNetworkFilterConfigFactory {\npublic:\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message& proto_config,\n                               Server::Configuration::CommonFactoryContext&) override {\n    auto config = dynamic_cast<const ProtobufWkt::StringValue&>(proto_config);\n    return [config](Network::FilterManager& filter_manager) -> void {\n      filter_manager.addFilter(std::make_shared<PoliteFilter>(config));\n    };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ProtobufWkt::StringValue>();\n  }\n\n  std::string name() const override { return \"envoy.upstream.polite\"; }\n};\n\nclass ClusterFilterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                     public BaseIntegrationTest {\npublic:\n  ClusterFilterIntegrationTest()\n      : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()), registration_(factory_) {}\n\n  void initialize() override {\n    enable_half_close_ = true;\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0);\n      auto* filter = cluster_0->add_filters();\n      filter->set_name(\"envoy.upstream.polite\");\n      ProtobufWkt::StringValue config;\n      config.set_value(\"surely \");\n      filter->mutable_typed_config()->PackFrom(config);\n    });\n    BaseIntegrationTest::initialize();\n  }\n\n  PoliteFilterConfigFactory factory_;\n  Registry::InjectFactory<Server::Configuration::NamedUpstreamNetworkFilterConfigFactory>\n      registration_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ClusterFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ClusterFilterIntegrationTest, TestClusterFilter) {\n  initialize();\n\n  auto tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  std::string observed_data;\n  ASSERT_TRUE(tcp_client->write(\"test\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(11, &observed_data));\n  EXPECT_EQ(\"please test\", observed_data);\n\n  observed_data.clear();\n  ASSERT_TRUE(tcp_client->write(\" everything\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(22, &observed_data));\n  EXPECT_EQ(\"please test everything\", observed_data);\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"yes\"));\n  tcp_client->waitForData(\"surely yes\");\n\n  ASSERT_TRUE(tcp_client->write(\"\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->waitForDisconnect();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/clusters/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"custom_static_cluster\",\n    srcs = [\n        \"custom_static_cluster.cc\",\n    ],\n    hdrs = [\n        \"custom_static_cluster.h\",\n    ],\n    deps = [\n        \":cluster_factory_config_proto_cc_proto\",\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/singleton:manager_impl_lib\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:transport_socket_config_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"cluster_factory_config_proto\",\n    srcs = [\":cluster_factory_config.proto\"],\n)\n"
  },
  {
    "path": "test/integration/clusters/cluster_factory_config.proto",
    "content": "syntax = \"proto3\";\n\npackage test.integration.clusters;\n\nmessage CustomStaticConfig {\n  uint32 priority = 1;\n  string address = 2;\n  uint32 port_value = 3;\n}"
  },
  {
    "path": "test/integration/clusters/custom_static_cluster.cc",
    "content": "#include \"custom_static_cluster.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n\nnamespace Envoy {\n\n// ClusterImplBase\nvoid CustomStaticCluster::startPreInit() {\n  Upstream::HostVector hosts{host_};\n  auto hosts_ptr = std::make_shared<Upstream::HostVector>(hosts);\n\n  priority_set_.updateHosts(\n      priority_,\n      Upstream::HostSetImpl::partitionHosts(hosts_ptr, Upstream::HostsPerLocalityImpl::empty()), {},\n      hosts, {}, absl::nullopt);\n\n  onPreInitComplete();\n}\n\nUpstream::HostSharedPtr CustomStaticCluster::makeHost() {\n  Network::Address::InstanceConstSharedPtr address =\n      Network::Utility::parseInternetAddress(address_, port_, true);\n  return Upstream::HostSharedPtr{new Upstream::HostImpl(\n      info(), \"\", address,\n      std::make_shared<const envoy::config::core::v3::Metadata>(info()->metadata()), 1,\n      envoy::config::core::v3::Locality::default_instance(),\n      envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), priority_,\n      envoy::config::core::v3::UNKNOWN)};\n}\n\nUpstream::ThreadAwareLoadBalancerPtr CustomStaticCluster::threadAwareLb() {\n  return std::make_unique<ThreadAwareLbImpl>(host_);\n}\n\nREGISTER_FACTORY(CustomStaticClusterFactoryNoLb, Upstream::ClusterFactory);\nREGISTER_FACTORY(CustomStaticClusterFactoryWithLb, Upstream::ClusterFactory);\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/clusters/custom_static_cluster.h",
    "content": "#include <chrono>\n#include <list>\n#include <string>\n#include <tuple>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/upstream/cluster_factory_impl.h\"\n\n#include \"server/transport_socket_config_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/integration/clusters/cluster_factory_config.pb.h\"\n#include \"test/integration/clusters/cluster_factory_config.pb.validate.h\"\n#include \"test/test_common/registry.h\"\n\nnamespace Envoy {\n\nclass CustomStaticCluster : public Upstream::ClusterImplBase {\npublic:\n  CustomStaticCluster(const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime,\n                      Server::Configuration::TransportSocketFactoryContextImpl& factory_context,\n                      Stats::ScopePtr&& stats_scope, bool added_via_api, uint32_t priority,\n                      std::string address, uint32_t port)\n      : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api),\n        priority_(priority), address_(std::move(address)), port_(port), host_(makeHost()) {}\n\n  InitializePhase initializePhase() const override { return InitializePhase::Primary; }\n\nprivate:\n  struct LbImpl : public Upstream::LoadBalancer {\n    LbImpl(const Upstream::HostSharedPtr& host) : host_(host) {}\n\n    Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext*) override {\n      return host_;\n    }\n    Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override {\n      return nullptr;\n    }\n\n    const Upstream::HostSharedPtr host_;\n  };\n\n  struct LbFactory : public Upstream::LoadBalancerFactory {\n    LbFactory(const Upstream::HostSharedPtr& host) : host_(host) {}\n\n    Upstream::LoadBalancerPtr create() override { return std::make_unique<LbImpl>(host_); }\n\n    const Upstream::HostSharedPtr host_;\n  };\n\n  struct ThreadAwareLbImpl : public Upstream::ThreadAwareLoadBalancer {\n    ThreadAwareLbImpl(const Upstream::HostSharedPtr& host) : host_(host) {}\n\n    Upstream::LoadBalancerFactorySharedPtr factory() override {\n      return std::make_shared<LbFactory>(host_);\n    }\n    void initialize() override {}\n\n    const Upstream::HostSharedPtr host_;\n  };\n\n  Upstream::ThreadAwareLoadBalancerPtr threadAwareLb();\n\n  // ClusterImplBase\n  void startPreInit() override;\n\n  Upstream::HostSharedPtr makeHost();\n\n  const uint32_t priority_;\n  const std::string address_;\n  const uint32_t port_;\n  const Upstream::HostSharedPtr host_;\n\n  friend class CustomStaticClusterFactoryBase;\n};\n\nclass CustomStaticClusterFactoryBase : public Upstream::ConfigurableClusterFactoryBase<\n                                           test::integration::clusters::CustomStaticConfig> {\nprotected:\n  CustomStaticClusterFactoryBase(const std::string& name, bool create_lb)\n      : ConfigurableClusterFactoryBase(name), create_lb_(create_lb) {}\n\nprivate:\n  std::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>\n  createClusterWithConfig(\n      const envoy::config::cluster::v3::Cluster& cluster,\n      const test::integration::clusters::CustomStaticConfig& proto_config,\n      Upstream::ClusterFactoryContext& context,\n      Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context,\n      Stats::ScopePtr&& stats_scope) override {\n    auto new_cluster = std::make_shared<CustomStaticCluster>(\n        cluster, context.runtime(), socket_factory_context, std::move(stats_scope),\n        context.addedViaApi(), proto_config.priority(), proto_config.address(),\n        proto_config.port_value());\n    return std::make_pair(new_cluster, create_lb_ ? new_cluster->threadAwareLb() : nullptr);\n  }\n\n  const bool create_lb_;\n};\n\nclass CustomStaticClusterFactoryNoLb : public CustomStaticClusterFactoryBase {\npublic:\n  CustomStaticClusterFactoryNoLb()\n      : CustomStaticClusterFactoryBase(\"envoy.clusters.custom_static\", false) {}\n};\n\nclass CustomStaticClusterFactoryWithLb : public CustomStaticClusterFactoryBase {\npublic:\n  CustomStaticClusterFactoryWithLb()\n      : CustomStaticClusterFactoryBase(\"envoy.clusters.custom_static_with_lb\", true) {}\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/custom_cluster_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/integration/clusters/cluster_factory_config.pb.h\"\n#include \"test/integration/clusters/custom_static_cluster.h\"\n#include \"test/integration/http_integration.h\"\n\nnamespace Envoy {\nnamespace {\n\nconst int UpstreamIndex = 0;\n\n// Integration test for cluster extension using CustomStaticCluster.\nclass CustomClusterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                     public HttpIntegrationTest {\npublic:\n  CustomClusterIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), realTime()) {}\n\n  void initialize() override {\n    setUpstreamCount(1);\n    // change the configuration of the cluster_0 to a custom static cluster\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0);\n\n      if (cluster_provided_lb_) {\n        cluster_0->set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED);\n      }\n\n      envoy::config::cluster::v3::Cluster::CustomClusterType cluster_type;\n      cluster_type.set_name(cluster_provided_lb_ ? \"envoy.clusters.custom_static_with_lb\"\n                                                 : \"envoy.clusters.custom_static\");\n      test::integration::clusters::CustomStaticConfig config;\n      config.set_priority(10);\n      config.set_address(Network::Test::getLoopbackAddressString(ipVersion()));\n      config.set_port_value(fake_upstreams_[UpstreamIndex]->localAddress()->ip()->port());\n      cluster_type.mutable_typed_config()->PackFrom(config);\n\n      cluster_0->mutable_cluster_type()->CopyFrom(cluster_type);\n    });\n    HttpIntegrationTest::initialize();\n    test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 1);\n  }\n\n  Network::Address::IpVersion ipVersion() const { return version_; }\n  bool cluster_provided_lb_{};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, CustomClusterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\nTEST_P(CustomClusterIntegrationTest, TestRouterHeaderOnly) {\n  testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex);\n}\n\nTEST_P(CustomClusterIntegrationTest, TestTwoRequests) { testTwoRequests(false); }\n\nTEST_P(CustomClusterIntegrationTest, TestTwoRequestsWithClusterLb) {\n  cluster_provided_lb_ = true;\n  testTwoRequests(false);\n}\n\nTEST_P(CustomClusterIntegrationTest, TestCustomConfig) {\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  initialize();\n\n  // Verify the cluster is correctly setup with the custom priority\n  const auto& cluster_map = test_server_->server().clusterManager().clusters();\n  EXPECT_EQ(1, cluster_map.size());\n  EXPECT_EQ(1, cluster_map.count(\"cluster_0\"));\n  const auto& cluster_ref = cluster_map.find(\"cluster_0\")->second;\n  const auto& hostset_per_priority = cluster_ref.get().prioritySet().hostSetsPerPriority();\n  EXPECT_EQ(11, hostset_per_priority.size());\n  const Envoy::Upstream::HostSetPtr& host_set = hostset_per_priority[10];\n  EXPECT_EQ(1, host_set->hosts().size());\n  EXPECT_EQ(1, host_set->healthyHosts().size());\n  EXPECT_EQ(10, host_set->priority());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/cx_limit_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/network/utility.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/integration/integration.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass ConnectionLimitIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                       public Event::TestUsingSimulatedTime,\n                                       public BaseIntegrationTest {\npublic:\n  ConnectionLimitIntegrationTest()\n      : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {}\n\n  void setEmptyListenerLimit() {\n    config_helper_.addRuntimeOverride(\"envoy.resource_limits.listener.listener_0.connection_limit\",\n                                      \"\");\n  }\n\n  void setListenerLimit(const uint32_t num_conns) {\n    config_helper_.addRuntimeOverride(\"envoy.resource_limits.listener.listener_0.connection_limit\",\n                                      std::to_string(num_conns));\n  }\n\n  void setGlobalLimit(std::string&& num_conns) {\n    config_helper_.addRuntimeOverride(\"overload.global_downstream_max_connections\", num_conns);\n  }\n\n  void initialize() override { BaseIntegrationTest::initialize(); }\n\n  AssertionResult waitForConnections(uint32_t envoy_downstream_connections) {\n    // The multiplier of 2 is because both Envoy's downstream connections and\n    // the test server's downstream connections are counted by the global\n    // counter.\n    uint32_t expected_connections = envoy_downstream_connections * 2;\n\n    for (int i = 0; i < 10; ++i) {\n      if (Network::AcceptedSocketImpl::acceptedSocketCount() == expected_connections) {\n        return AssertionSuccess();\n      }\n      // TODO(mattklein123): Do not use a real sleep here. Switch to events with waitFor().\n      timeSystem().realSleepDoNotUseWithoutScrutiny(std::chrono::milliseconds(500));\n    }\n    if (Network::AcceptedSocketImpl::acceptedSocketCount() == expected_connections) {\n      return AssertionSuccess();\n    }\n    return AssertionFailure();\n  }\n\n  // Assumes a limit of 2 connections.\n  void doTest(std::function<void()> init_func, std::string&& check_stat) {\n    init_func();\n\n    std::vector<IntegrationTcpClientPtr> tcp_clients;\n    std::vector<FakeRawConnectionPtr> raw_conns;\n    tcp_clients.emplace_back(makeTcpConnection(lookupPort(\"listener_0\")));\n    raw_conns.emplace_back();\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back()));\n    ASSERT_TRUE(tcp_clients.back()->connected());\n\n    tcp_clients.emplace_back(makeTcpConnection(lookupPort(\"listener_0\")));\n    raw_conns.emplace_back();\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back()));\n    ASSERT_TRUE(tcp_clients.back()->connected());\n\n    tcp_clients.emplace_back(makeTcpConnection(lookupPort(\"listener_0\")));\n    raw_conns.emplace_back();\n    ASSERT_FALSE(\n        fake_upstreams_[0]->waitForRawConnection(raw_conns.back(), std::chrono::milliseconds(500)));\n    tcp_clients.back()->waitForDisconnect();\n\n    // Get rid of the client that failed to connect.\n    tcp_clients.back()->close();\n    tcp_clients.pop_back();\n\n    // Close the first connection that was successful so that we can open a new successful\n    // connection.\n    tcp_clients.front()->close();\n    ASSERT_TRUE(raw_conns.front()->waitForDisconnect());\n\n    // Make sure to not try to connect again until the acceptedSocketCount is updated.\n    ASSERT_TRUE(waitForConnections(1));\n    tcp_clients.emplace_back(makeTcpConnection(lookupPort(\"listener_0\")));\n    raw_conns.emplace_back();\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back()));\n    ASSERT_TRUE(tcp_clients.back()->connected());\n\n    const bool isV4 = (version_ == Network::Address::IpVersion::v4);\n    auto local_address = isV4 ? Network::Utility::getCanonicalIpv4LoopbackAddress()\n                              : Network::Utility::getIpv6LoopbackAddress();\n\n    const std::string counter_prefix = (isV4 ? \"listener.127.0.0.1_0.\" : \"listener.[__1]_0.\");\n\n    test_server_->waitForCounterEq(counter_prefix + check_stat, 1);\n\n    for (auto& tcp_client : tcp_clients) {\n      tcp_client->close();\n    }\n\n    tcp_clients.clear();\n    raw_conns.clear();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionLimitIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ConnectionLimitIntegrationTest, TestListenerLimit) {\n  std::function<void()> init_func = [this]() {\n    setListenerLimit(2);\n    initialize();\n  };\n\n  doTest(init_func, \"downstream_cx_overflow\");\n}\n\nTEST_P(ConnectionLimitIntegrationTest, TestEmptyGlobalCxRuntimeLimit) {\n  const std::string log_line = \"no configured limit to the number of allowed active connections.\";\n  EXPECT_LOG_CONTAINS(\"warn\", log_line, { initialize(); });\n}\n\nTEST_P(ConnectionLimitIntegrationTest, TestEmptyListenerRuntimeLimit) {\n  const std::string log_line =\n      \"Listener connection limit runtime key \"\n      \"envoy.resource_limits.listener.listener_0.connection_limit is empty. There are currently \"\n      \"no limitations on the number of accepted connections for listener listener_0.\";\n  EXPECT_LOG_CONTAINS(\"warn\", log_line, {\n    setEmptyListenerLimit();\n    initialize();\n  });\n}\n\nTEST_P(ConnectionLimitIntegrationTest, TestGlobalLimit) {\n  std::function<void()> init_func = [this]() {\n    // Includes twice the number of connections expected because the tracking is performed via a\n    // static variable and the fake upstream has a listener. This causes upstream connections to the\n    // fake upstream to also be tracked as part of the global downstream connection tracking.\n    setGlobalLimit(\"4\");\n    initialize();\n  };\n\n  doTest(init_func, \"downstream_global_cx_overflow\");\n}\n\nTEST_P(ConnectionLimitIntegrationTest, TestBothLimits) {\n  std::function<void()> init_func = [this]() {\n    // Setting the listener limit to a much higher value and making sure the right stat gets\n    // incremented when both limits are set.\n    setGlobalLimit(\"4\");\n    setListenerLimit(100);\n    initialize();\n  };\n\n  doTest(init_func, \"downstream_global_cx_overflow\");\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/drain_close_integration_test.cc",
    "content": "#include \"test/integration/http_protocol_integration.h\"\n\nnamespace Envoy {\nnamespace {\n\nusing DrainCloseIntegrationTest = HttpProtocolIntegrationTest;\n\n// Add a health check filter and verify correct behavior when draining.\nTEST_P(DrainCloseIntegrationTest, DrainCloseGradual) {\n  // The probability of drain close increases over time. With a high timeout,\n  // the probability will be very low, but the rapid retries prevent this from\n  // increasing total test time.\n  drain_time_ = std::chrono::seconds(100);\n  config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter());\n  initialize();\n\n  absl::Notification drain_sequence_started;\n  test_server_->server().dispatcher().post([this, &drain_sequence_started]() {\n    test_server_->drainManager().startDrainSequence([] {});\n    drain_sequence_started.Notify();\n  });\n  drain_sequence_started.WaitForNotification();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  EXPECT_FALSE(codec_client_->disconnected());\n\n  IntegrationStreamDecoderPtr response;\n  while (!test_server_->counter(\"http.config_test.downstream_cx_drain_close\")->value()) {\n    response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n    response->waitForEndStream();\n  }\n  EXPECT_EQ(test_server_->counter(\"http.config_test.downstream_cx_drain_close\")->value(), 1L);\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  EXPECT_TRUE(response->complete());\n\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) {\n    EXPECT_TRUE(codec_client_->sawGoAway());\n  } else {\n    EXPECT_EQ(\"close\", response->headers().getConnectionValue());\n  }\n}\n\nTEST_P(DrainCloseIntegrationTest, DrainCloseImmediate) {\n  drain_strategy_ = Server::DrainStrategy::Immediate;\n  drain_time_ = std::chrono::seconds(100);\n  config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter());\n  initialize();\n\n  absl::Notification drain_sequence_started;\n  test_server_->server().dispatcher().post([this, &drain_sequence_started]() {\n    test_server_->drainManager().startDrainSequence([] {});\n    drain_sequence_started.Notify();\n  });\n  drain_sequence_started.WaitForNotification();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  EXPECT_FALSE(codec_client_->disconnected());\n\n  IntegrationStreamDecoderPtr response;\n  response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  response->waitForEndStream();\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  EXPECT_TRUE(response->complete());\n\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) {\n    EXPECT_TRUE(codec_client_->sawGoAway());\n  } else {\n    EXPECT_EQ(\"close\", response->headers().getConnectionValue());\n  }\n}\n\nTEST_P(DrainCloseIntegrationTest, AdminDrain) { testAdminDrain(downstreamProtocol()); }\n\nTEST_P(DrainCloseIntegrationTest, AdminGracefulDrain) {\n  drain_strategy_ = Server::DrainStrategy::Immediate;\n  drain_time_ = std::chrono::seconds(999);\n  initialize();\n  uint32_t http_port = lookupPort(\"http\");\n  codec_client_ = makeHttpConnection(http_port);\n\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), Http::HttpStatusIs(\"200\"));\n  // The request is completed but the connection remains open.\n  EXPECT_TRUE(codec_client_->connected());\n\n  // Invoke /drain_listeners with graceful drain\n  BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"POST\", \"/drain_listeners?graceful\", \"\", downstreamProtocol(), version_);\n  EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), \"200\");\n\n  // With a 999s graceful drain period, the listener should still be open.\n  EXPECT_EQ(test_server_->counter(\"listener_manager.listener_stopped\")->value(), 0);\n\n  response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), Http::HttpStatusIs(\"200\"));\n\n  // Connections will terminate on request complete\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) {\n    EXPECT_TRUE(codec_client_->sawGoAway());\n  } else {\n    EXPECT_EQ(\"close\", response->headers().getConnectionValue());\n  }\n\n  // New connections can still be made.\n  auto second_codec_client_ = makeRawHttpConnection(makeClientConnection(http_port), absl::nullopt);\n  EXPECT_TRUE(second_codec_client_->connected());\n\n  // Invoke /drain_listeners and shut down listeners.\n  second_codec_client_->rawConnection().close(Network::ConnectionCloseType::NoFlush);\n  admin_response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"POST\", \"/drain_listeners\", \"\", downstreamProtocol(), version_);\n  EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), \"200\");\n\n  test_server_->waitForCounterEq(\"listener_manager.listener_stopped\", 1);\n  ASSERT_TRUE(waitForPortAvailable(http_port));\n}\n\nTEST_P(DrainCloseIntegrationTest, RepeatedAdminGracefulDrain) {\n  // Use the default gradual probabilistic DrainStrategy so drainClose()\n  // behaviour isn't conflated with whether the drain sequence has started.\n  drain_time_ = std::chrono::seconds(999);\n  initialize();\n  uint32_t http_port = lookupPort(\"http\");\n  codec_client_ = makeHttpConnection(http_port);\n\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  // Invoke /drain_listeners with graceful drain\n  BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"POST\", \"/drain_listeners?graceful\", \"\", downstreamProtocol(), version_);\n  EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), \"200\");\n  EXPECT_EQ(test_server_->counter(\"listener_manager.listener_stopped\")->value(), 0);\n\n  admin_response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"POST\", \"/drain_listeners?graceful\", \"\", downstreamProtocol(), version_);\n  EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), \"200\");\n  EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), \"200\");\n\n  response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), Http::HttpStatusIs(\"200\"));\n\n  admin_response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"POST\", \"/drain_listeners\", \"\", downstreamProtocol(), version_);\n  EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), \"200\");\n\n  test_server_->waitForCounterEq(\"listener_manager.listener_stopped\", 1);\n  ASSERT_TRUE(waitForPortAvailable(http_port));\n}\n\nINSTANTIATE_TEST_SUITE_P(Protocols, DrainCloseIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2},\n                             {FakeHttpConnection::Type::HTTP1})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/dynamic_validation_integration_test.cc",
    "content": "#include <fstream>\n\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.validate.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n// This fake filter is used by CdsProtocolOptionsRejected.\nclass TestDynamicValidationNetworkFilter : public Network::WriteFilter {\npublic:\n  Network::FilterStatus onWrite(Buffer::Instance&, bool) override {\n    return Network::FilterStatus::Continue;\n  }\n};\n\nclass TestDynamicValidationNetworkFilterConfigFactory\n    : public Extensions::NetworkFilters::Common::FactoryBase<\n          envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy> {\npublic:\n  TestDynamicValidationNetworkFilterConfigFactory()\n      : Extensions::NetworkFilters::Common::FactoryBase<\n            envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy>(\n            \"envoy.test.dynamic_validation\") {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& /*proto_config*/,\n      Server::Configuration::FactoryContext& /*context*/) override {\n    return Network::FilterFactoryCb();\n  }\n\n  Upstream::ProtocolOptionsConfigConstSharedPtr\n  createProtocolOptionsTyped(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy&,\n                             Server::Configuration::ProtocolOptionsFactoryContext&) override {\n    return nullptr;\n  }\n};\n\n// Pretty-printing of parameterized test names.\nstd::string dynamicValidationTestParamsToString(\n    const ::testing::TestParamInfo<std::tuple<Network::Address::IpVersion, bool, bool>>& params) {\n  return fmt::format(\n      \"{}_{}_{}\",\n      TestUtility::ipTestParamsToString(\n          ::testing::TestParamInfo<Network::Address::IpVersion>(std::get<0>(params.param), 0)),\n      std::get<1>(params.param) ? \"with_reject_unknown_fields\" : \"without_reject_unknown_fields\",\n      std::get<2>(params.param) ? \"with_ignore_unknown_fields\" : \"without_ignore_unknown_fields\");\n}\n\n// Validate unknown field handling in dynamic configuration.\nclass DynamicValidationIntegrationTest\n    : public testing::TestWithParam<std::tuple<Network::Address::IpVersion, bool, bool>>,\n      public HttpIntegrationTest {\npublic:\n  DynamicValidationIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, std::get<0>(GetParam())),\n        reject_unknown_dynamic_fields_(std::get<1>(GetParam())),\n        ignore_unknown_dynamic_fields_(std::get<2>(GetParam())) {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void createEnvoy() override {\n    registerPort(\"upstream_0\", fake_upstreams_.back()->localAddress()->ip()->port());\n    createApiTestServer(api_filesystem_config_, {\"http\"},\n                        {reject_unknown_dynamic_fields_, reject_unknown_dynamic_fields_,\n                         ignore_unknown_dynamic_fields_},\n                        allow_lds_rejection_);\n  }\n\n  ApiFilesystemConfig api_filesystem_config_;\n  const bool reject_unknown_dynamic_fields_;\n  const bool ignore_unknown_dynamic_fields_;\n  bool allow_lds_rejection_{};\n\nprivate:\n  TestDynamicValidationNetworkFilterConfigFactory factory_;\n  Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory> register_{\n      factory_};\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    IpVersions, DynamicValidationIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool(),\n                     testing::Bool()),\n    dynamicValidationTestParamsToString);\n\n// Protocol options in CDS with unknown fields are rejected if and only if strict.\nTEST_P(DynamicValidationIntegrationTest, CdsProtocolOptionsRejected) {\n  api_filesystem_config_ = {\n      \"test/config/integration/server_xds.bootstrap.yaml\",\n      \"test/config/integration/server_xds.cds.with_unknown_field.yaml\",\n      \"test/config/integration/server_xds.eds.yaml\",\n      \"test/config/integration/server_xds.lds.yaml\",\n      \"test/config/integration/server_xds.rds.yaml\",\n  };\n  initialize();\n  if (reject_unknown_dynamic_fields_) {\n    EXPECT_EQ(0, test_server_->counter(\"cluster_manager.cds.update_success\")->value());\n    // CDS API parsing will reject due to unknown HCM field.\n    EXPECT_EQ(1, test_server_->counter(\"cluster_manager.cds.update_rejected\")->value());\n    EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n  } else {\n    EXPECT_EQ(1, test_server_->counter(\"cluster_manager.cds.update_success\")->value());\n    if (ignore_unknown_dynamic_fields_) {\n      EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    } else {\n      EXPECT_EQ(1, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    }\n  }\n}\n\n// Network filters in LDS with unknown fields are rejected if and only if strict.\nTEST_P(DynamicValidationIntegrationTest, LdsFilterRejected) {\n  allow_lds_rejection_ = true;\n  api_filesystem_config_ = {\n      \"test/config/integration/server_xds.bootstrap.yaml\",\n      \"test/config/integration/server_xds.cds.yaml\",\n      \"test/config/integration/server_xds.eds.yaml\",\n      \"test/config/integration/server_xds.lds.with_unknown_field.yaml\",\n      \"test/config/integration/server_xds.rds.yaml\",\n  };\n  initialize();\n  if (reject_unknown_dynamic_fields_) {\n    EXPECT_EQ(0, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n    // LDS API parsing will reject due to unknown HCM field.\n    EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_rejected\")->value());\n    EXPECT_EQ(nullptr, test_server_->counter(\"http.router.rds.route_config_0.update_success\"));\n    EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n  } else {\n    EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"http.router.rds.route_config_0.update_success\")->value());\n    if (ignore_unknown_dynamic_fields_) {\n      EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    } else {\n      EXPECT_EQ(1, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    }\n  }\n  EXPECT_EQ(1, test_server_->counter(\"cluster_manager.cds.update_success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.update_success\")->value());\n}\n\n// Network filters in LDS config using TypedStruct with unknown fields are rejected if and only if\n// strict.\nTEST_P(DynamicValidationIntegrationTest, LdsFilterRejectedTypedStruct) {\n  allow_lds_rejection_ = true;\n  api_filesystem_config_ = {\n      \"test/config/integration/server_xds.bootstrap.yaml\",\n      \"test/config/integration/server_xds.cds.yaml\",\n      \"test/config/integration/server_xds.eds.yaml\",\n      \"test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml\",\n      \"test/config/integration/server_xds.rds.yaml\",\n  };\n  initialize();\n  if (reject_unknown_dynamic_fields_) {\n    EXPECT_EQ(0, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n    // LDS API parsing will reject due to unknown HCM field.\n    EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_rejected\")->value());\n    EXPECT_EQ(nullptr, test_server_->counter(\"http.router.rds.route_config_0.update_success\"));\n    EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n  } else {\n    EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"http.router.rds.route_config_0.update_success\")->value());\n    if (ignore_unknown_dynamic_fields_) {\n      EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    } else {\n      EXPECT_EQ(1, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    }\n  }\n  EXPECT_EQ(1, test_server_->counter(\"cluster_manager.cds.update_success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.update_success\")->value());\n}\n\n// Unknown fields in RDS cause config load failure if and only if strict.\nTEST_P(DynamicValidationIntegrationTest, RdsFailedBySubscription) {\n  api_filesystem_config_ = {\n      \"test/config/integration/server_xds.bootstrap.yaml\",\n      \"test/config/integration/server_xds.cds.yaml\",\n      \"test/config/integration/server_xds.eds.yaml\",\n      \"test/config/integration/server_xds.lds.yaml\",\n      \"test/config/integration/server_xds.rds.with_unknown_field.yaml\",\n  };\n  initialize();\n  EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n  if (reject_unknown_dynamic_fields_) {\n    EXPECT_EQ(0, test_server_->counter(\"http.router.rds.route_config_0.update_success\")->value());\n    // Unknown fields in the config result in the update_rejected counter incremented\n    EXPECT_EQ(1, test_server_->counter(\"http.router.rds.route_config_0.update_rejected\")->value());\n    EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n  } else {\n    EXPECT_EQ(1, test_server_->counter(\"http.router.rds.route_config_0.update_success\")->value());\n    if (ignore_unknown_dynamic_fields_) {\n      EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    } else {\n      EXPECT_EQ(1, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    }\n  }\n  EXPECT_EQ(1, test_server_->counter(\"cluster_manager.cds.update_success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.update_success\")->value());\n}\n\n// Unknown fields in EDS cause config load failure if and only if strict.\nTEST_P(DynamicValidationIntegrationTest, EdsFailedBySubscription) {\n  api_filesystem_config_ = {\n      \"test/config/integration/server_xds.bootstrap.yaml\",\n      \"test/config/integration/server_xds.cds.yaml\",\n      \"test/config/integration/server_xds.eds.with_unknown_field.yaml\",\n      \"test/config/integration/server_xds.lds.yaml\",\n      \"test/config/integration/server_xds.rds.yaml\",\n  };\n  initialize();\n  EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"http.router.rds.route_config_0.update_success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"cluster_manager.cds.update_success\")->value());\n  if (reject_unknown_dynamic_fields_) {\n    EXPECT_EQ(0, test_server_->counter(\"cluster.cluster_1.update_success\")->value());\n    // Unknown fields in the config result in the update_rejected counter incremented\n    EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.update_rejected\")->value());\n    EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n  } else {\n    EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.update_success\")->value());\n    if (ignore_unknown_dynamic_fields_) {\n      EXPECT_EQ(0, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    } else {\n      EXPECT_EQ(1, test_server_->counter(\"server.dynamic_unknown_fields\")->value());\n    }\n  }\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/echo_integration_test.cc",
    "content": "#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\n\nstd::string echo_config;\n\nclass EchoIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                            public BaseIntegrationTest {\npublic:\n  EchoIntegrationTest() : BaseIntegrationTest(GetParam(), echo_config) {}\n\n  // Called once by the gtest framework before any EchoIntegrationTests are run.\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    echo_config = absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n        name: ratelimit\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.rate_limit.v2.RateLimit\n          domain: foo\n          stats_prefix: name\n          descriptors: [{\"key\": \"foo\", \"value\": \"bar\"}]\n      filters:\n        name: envoy.filters.network.echo\n      )EOF\");\n  }\n\n  void SetUp() override { BaseIntegrationTest::initialize(); }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, EchoIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(EchoIntegrationTest, Hello) {\n  std::string response;\n  auto connection = createConnectionDriver(\n      lookupPort(\"listener_0\"), \"hello\",\n      [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n        conn.close(Network::ConnectionCloseType::FlushWrite);\n      });\n  connection->run();\n  EXPECT_EQ(\"hello\", response);\n}\n\nTEST_P(EchoIntegrationTest, AddRemoveListener) {\n  const std::string json = TestEnvironment::substitute(R\"EOF(\nname: new_listener\naddress:\n  socket_address:\n    address: \"{{ ip_loopback_address }}\"\n    port_value: 0\nfilter_chains:\n- filters:\n  - name: envoy.filters.network.echo\n  )EOF\",\n                                                       GetParam());\n\n  // Add the listener.\n  ConditionalInitializer listener_added_by_worker;\n  ConditionalInitializer listener_added_by_manager;\n  test_server_->setOnWorkerListenerAddedCb(\n      [&listener_added_by_worker]() -> void { listener_added_by_worker.setReady(); });\n  test_server_->server().dispatcher().post([this, json, &listener_added_by_manager]() -> void {\n    EXPECT_TRUE(test_server_->server().listenerManager().addOrUpdateListener(\n        Server::parseListenerFromV3Yaml(json), \"\", true));\n    listener_added_by_manager.setReady();\n  });\n  listener_added_by_worker.waitReady();\n  listener_added_by_manager.waitReady();\n\n  EXPECT_EQ(2UL, test_server_->server().listenerManager().listeners().size());\n  uint32_t new_listener_port = test_server_->server()\n                                   .listenerManager()\n                                   .listeners()[1]\n                                   .get()\n                                   .listenSocketFactory()\n                                   .localAddress()\n                                   ->ip()\n                                   ->port();\n\n  std::string response;\n  auto connection = createConnectionDriver(\n      lookupPort(\"listener_0\"), \"hello\",\n      [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n        conn.close(Network::ConnectionCloseType::FlushWrite);\n      });\n  connection->run();\n  EXPECT_EQ(\"hello\", response);\n\n  // Remove the listener.\n  ConditionalInitializer listener_removed;\n  test_server_->setOnWorkerListenerRemovedCb(\n      [&listener_removed]() -> void { listener_removed.setReady(); });\n  test_server_->server().dispatcher().post([this]() -> void {\n    EXPECT_TRUE(test_server_->server().listenerManager().removeListener(\"new_listener\"));\n  });\n  listener_removed.waitReady();\n\n  // Now connect. This should fail.\n  // Allow for a few attempts, in order to handle a race (likely due to lack of\n  // LEV_OPT_CLOSE_ON_FREE, which would break listener reuse)\n  //\n  // In order for this test to work, it must be tagged as \"exclusive\" in its\n  // build file. Otherwise, it's possible that when the listener is destroyed\n  // above, another test would start listening on the released port, and this\n  // connect would unexpectedly succeed.\n  bool connect_fail = false;\n  for (int i = 0; i < 10; ++i) {\n    auto connection2 = createConnectionDriver(\n        new_listener_port, \"hello\",\n        [](Network::ClientConnection&, const Buffer::Instance&) -> void { FAIL(); });\n    connection2->waitForConnection();\n    if (connection2->connection().state() == Network::Connection::State::Closed) {\n      connect_fail = true;\n      break;\n    } else {\n      connection2->close();\n    }\n  }\n  ASSERT_TRUE(connect_fail);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/eds_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/type/v3/http.pb.h\"\n\n#include \"common/upstream/load_balancer_impl.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n// Integration test for EDS features. EDS is consumed via filesystem\n// subscription.\nclass EdsIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                           public HttpIntegrationTest {\npublic:\n  EdsIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()),\n        codec_client_type_(envoy::type::v3::HTTP1) {}\n\n  // We need to supply the endpoints via EDS to provide health status. Use a\n  // filesystem delivery to simplify test mechanics.\n  void setEndpointsInPriorities(uint32_t first_priority, uint32_t second_priority,\n                                bool await_update = true) {\n    envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n    cluster_load_assignment.set_cluster_name(\"cluster_0\");\n\n    {\n      for (uint32_t i = 0; i < first_priority; ++i) {\n        auto* locality_lb_endpoints = cluster_load_assignment.add_endpoints();\n        auto* endpoint = locality_lb_endpoints->add_lb_endpoints();\n        setUpstreamAddress(i, *endpoint);\n      }\n    }\n\n    {\n      for (uint32_t i = first_priority; i < first_priority + second_priority; ++i) {\n        auto* locality_lb_endpoints = cluster_load_assignment.add_endpoints();\n        locality_lb_endpoints->set_priority(1);\n        auto* endpoint = locality_lb_endpoints->add_lb_endpoints();\n        setUpstreamAddress(i, *endpoint);\n      }\n    }\n\n    if (await_update) {\n      eds_helper_.setEdsAndWait({cluster_load_assignment}, *test_server_);\n    } else {\n      eds_helper_.setEds({cluster_load_assignment});\n    }\n  }\n\n  // We need to supply the endpoints via EDS to provide health status. Use a\n  // filesystem delivery to simplify test mechanics.\n  void setEndpoints(uint32_t total_endpoints, uint32_t healthy_endpoints,\n                    uint32_t degraded_endpoints, bool remaining_unhealthy = true,\n                    absl::optional<uint32_t> overprovisioning_factor = absl::nullopt,\n                    bool await_update = true) {\n    ASSERT(total_endpoints >= healthy_endpoints + degraded_endpoints);\n    envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n    cluster_load_assignment.set_cluster_name(\"cluster_0\");\n    if (overprovisioning_factor.has_value()) {\n      cluster_load_assignment.mutable_policy()->mutable_overprovisioning_factor()->set_value(\n          overprovisioning_factor.value());\n    }\n    auto* locality_lb_endpoints = cluster_load_assignment.add_endpoints();\n\n    for (uint32_t i = 0; i < total_endpoints; ++i) {\n      auto* endpoint = locality_lb_endpoints->add_lb_endpoints();\n      setUpstreamAddress(i, *endpoint);\n      // First N endpoints are degraded, next M are healthy and the remaining endpoints are\n      // unhealthy or unknown depending on remaining_unhealthy.\n      if (i < degraded_endpoints) {\n        endpoint->set_health_status(envoy::config::core::v3::DEGRADED);\n      } else if (i >= healthy_endpoints + degraded_endpoints) {\n        endpoint->set_health_status(remaining_unhealthy ? envoy::config::core::v3::UNHEALTHY\n                                                        : envoy::config::core::v3::UNKNOWN);\n      }\n    }\n\n    if (await_update) {\n      eds_helper_.setEdsAndWait({cluster_load_assignment}, *test_server_);\n    } else {\n      eds_helper_.setEds({cluster_load_assignment});\n    }\n  }\n\n  void initializeTest(bool http_active_hc) {\n    setUpstreamCount(4);\n    if (codec_client_type_ == envoy::type::v3::HTTP2) {\n      setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    }\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Switch predefined cluster_0 to CDS filesystem sourcing.\n      bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path());\n      bootstrap.mutable_static_resources()->clear_clusters();\n    });\n\n    // Set validate_clusters to false to allow us to reference a CDS cluster.\n    config_helper_.addConfigModifier(\n        [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n               hcm) { hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); });\n\n    cluster_.mutable_connect_timeout()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    cluster_.set_name(\"cluster_0\");\n    cluster_.set_type(envoy::config::cluster::v3::Cluster::EDS);\n    auto* eds_cluster_config = cluster_.mutable_eds_cluster_config();\n    eds_cluster_config->mutable_eds_config()->set_path(eds_helper_.eds_path());\n    if (http_active_hc) {\n      auto* health_check = cluster_.add_health_checks();\n      health_check->mutable_timeout()->set_seconds(30);\n      // TODO(mattklein123): Consider using simulated time here.\n      health_check->mutable_interval()->CopyFrom(\n          Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n      health_check->mutable_no_traffic_interval()->CopyFrom(\n          Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n      health_check->mutable_unhealthy_threshold()->set_value(1);\n      health_check->mutable_healthy_threshold()->set_value(1);\n      health_check->mutable_http_health_check()->set_path(\"/healthcheck\");\n      health_check->mutable_http_health_check()->set_codec_client_type(codec_client_type_);\n    }\n    setEndpoints(0, 0, 0, true, absl::nullopt, false);\n    cds_helper_.setCds({cluster_});\n    initialize();\n    test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n  }\n\n  envoy::type::v3::CodecClientType codec_client_type_{};\n  EdsHelper eds_helper_;\n  CdsHelper cds_helper_;\n  envoy::config::cluster::v3::Cluster cluster_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, EdsIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\n// Validates that endpoints can be added and then moved to other priorities without causing crashes.\n// Primarily as a regression test for https://github.com/envoyproxy/envoy/issues/8764\nTEST_P(EdsIntegrationTest, Http2UpdatePriorities) {\n  codec_client_type_ = envoy::type::v3::HTTP2;\n  initializeTest(true);\n\n  setEndpointsInPriorities(2, 2);\n\n  setEndpointsInPriorities(4, 0);\n\n  setEndpointsInPriorities(0, 4);\n}\n\n// Verifies that a new cluster can we warmed when using HTTP/2 health checking. Regression test\n// of the issue detailed in issue #6951.\nTEST_P(EdsIntegrationTest, Http2HcClusterRewarming) {\n  codec_client_type_ = envoy::type::v3::HTTP2;\n  initializeTest(true);\n  setEndpoints(1, 0, 0, false);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n\n  // Wait for the first HC and verify the host is healthy. This should warm the initial cluster.\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.membership_healthy\", 1);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n\n  // Trigger a CDS update. This should cause a new cluster to require warming, blocked on the host\n  // being health checked.\n  cluster_.mutable_circuit_breakers()->add_thresholds()->mutable_max_connections()->set_value(100);\n  cds_helper_.setCds({cluster_});\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 1);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster_manager.warming_clusters\")->value());\n\n  // We need to do a bunch of work to get a hold of second hc connection.\n  FakeHttpConnectionPtr fake_upstream_connection;\n  auto result = fake_upstreams_[0]->waitForHttpConnection(\n      *dispatcher_, fake_upstream_connection, TestUtility::DefaultTimeout, max_request_headers_kb_);\n  RELEASE_ASSERT(result, result.message());\n\n  FakeStreamPtr upstream_request;\n  result = fake_upstream_connection->waitForNewStream(*dispatcher_, upstream_request);\n  RELEASE_ASSERT(result, result.message());\n  // Wait for the stream to be completely received.\n  result = upstream_request->waitForEndStream(*dispatcher_);\n  RELEASE_ASSERT(result, result.message());\n\n  // Respond with a health check. This will cause the previous cluster to be destroyed inline as\n  // part of processing the response.\n  upstream_request->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, true);\n  test_server_->waitForGaugeEq(\"cluster_manager.warming_clusters\", 0);\n  EXPECT_EQ(0, test_server_->gauge(\"cluster_manager.warming_clusters\")->value());\n\n  // Since the second connection is not managed by the integration test base we need to close it\n  // ourselves.\n  result = fake_upstream_connection->close();\n  RELEASE_ASSERT(result, result.message());\n  result = fake_upstream_connection->waitForDisconnect();\n  RELEASE_ASSERT(result, result.message());\n  fake_upstream_connection.reset();\n}\n\n// Verify that a host stabilized via active health checking which is first removed from EDS and\n// then fails health checking is removed.\nTEST_P(EdsIntegrationTest, RemoveAfterHcFail) {\n  initializeTest(true);\n  setEndpoints(1, 0, 0, false);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n\n  // Wait for the first HC and verify the host is healthy.\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.membership_healthy\", 1);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n\n  // Clear out the host and verify the host is still healthy.\n  setEndpoints(0, 0, 0);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n\n  // Fail HC and verify the host is gone.\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}, {\"connection\", \"close\"}}, true);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.membership_healthy\", 0);\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n}\n\n// Verifies that endpoints are ignored until health checked when configured to.\nTEST_P(EdsIntegrationTest, EndpointWarmingSuccessfulHc) {\n  cluster_.mutable_common_lb_config()->set_ignore_new_hosts_until_first_hc(true);\n\n  // Endpoints are initially excluded.\n  initializeTest(true);\n  setEndpoints(1, 0, 0, false);\n\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_excluded\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n\n  // Wait for the first HC and verify the host is healthy and that it is no longer being excluded.\n  // The other endpoint should still be excluded.\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.membership_excluded\", 0);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n}\n\n// Verifies that endpoints are ignored until health checked when configured to when the first\n// health check fails.\nTEST_P(EdsIntegrationTest, EndpointWarmingFailedHc) {\n  cluster_.mutable_common_lb_config()->set_ignore_new_hosts_until_first_hc(true);\n\n  // Endpoints are initially excluded.\n  initializeTest(true);\n  setEndpoints(1, 0, 0, false);\n\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_excluded\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n\n  // Wait for the first HC and verify the host is healthy and that it is no longer being excluded.\n  // The other endpoint should still be excluded.\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, true);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.membership_excluded\", 0);\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n}\n\n// Validate that health status updates are consumed from EDS.\nTEST_P(EdsIntegrationTest, HealthUpdate) {\n  initializeTest(false);\n  // Initial state, no cluster members.\n  EXPECT_EQ(0, test_server_->counter(\"cluster.cluster_0.membership_change\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  // 2/2 healthy endpoints.\n  setEndpoints(2, 2, 0);\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.membership_change\")->value());\n  EXPECT_EQ(2, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(2, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  // Drop to 0/2 healthy endpoints.\n  setEndpoints(2, 0, 0);\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.membership_change\")->value());\n  EXPECT_EQ(2, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  // Increase to 1/2 healthy endpoints.\n  setEndpoints(2, 1, 0);\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.membership_change\")->value());\n  EXPECT_EQ(2, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  // Add host and modify health to 2/3 healthy endpoints.\n  setEndpoints(3, 2, 0);\n  EXPECT_EQ(2, test_server_->counter(\"cluster.cluster_0.membership_change\")->value());\n  EXPECT_EQ(3, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(2, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  // Modify health to 2/3 healthy and 1/3 degraded.\n  setEndpoints(3, 2, 1);\n  EXPECT_EQ(2, test_server_->counter(\"cluster.cluster_0.membership_change\")->value());\n  EXPECT_EQ(3, test_server_->gauge(\"cluster.cluster_0.membership_total\")->value());\n  EXPECT_EQ(2, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  EXPECT_EQ(1, test_server_->gauge(\"cluster.cluster_0.membership_degraded\")->value());\n}\n\n// Validate that overprovisioning_factor update are picked up by Envoy.\nTEST_P(EdsIntegrationTest, OverprovisioningFactorUpdate) {\n  initializeTest(false);\n  // Default overprovisioning factor.\n  setEndpoints(4, 4, 0);\n  auto get_and_compare = [this](const uint32_t expected_factor) {\n    const auto& cluster_map = test_server_->server().clusterManager().clusters();\n    EXPECT_EQ(1, cluster_map.size());\n    EXPECT_EQ(1, cluster_map.count(\"cluster_0\"));\n    const auto& cluster_ref = cluster_map.find(\"cluster_0\")->second;\n    const auto& hostset_per_priority = cluster_ref.get().prioritySet().hostSetsPerPriority();\n    EXPECT_EQ(1, hostset_per_priority.size());\n    const Envoy::Upstream::HostSetPtr& host_set = hostset_per_priority[0];\n    EXPECT_EQ(expected_factor, host_set->overprovisioningFactor());\n  };\n  get_and_compare(Envoy::Upstream::kDefaultOverProvisioningFactor);\n\n  // Use new overprovisioning factor 200.\n  setEndpoints(4, 4, 0, true, 200);\n  get_and_compare(200);\n}\n\n// Verifies that EDS update only triggers member update callbacks once per update.\nTEST_P(EdsIntegrationTest, BatchMemberUpdateCb) {\n  initializeTest(false);\n\n  uint32_t member_update_count{};\n\n  auto& priority_set = test_server_->server()\n                           .clusterManager()\n                           .clusters()\n                           .find(\"cluster_0\")\n                           ->second.get()\n                           .prioritySet();\n\n  // Keep track of how many times we're seeing a member update callback.\n  priority_set.addMemberUpdateCb([&](const auto& hosts_added, const auto&) {\n    // We should see both hosts present in the member update callback.\n    EXPECT_EQ(2, hosts_added.size());\n    member_update_count++;\n  });\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n  cluster_load_assignment.set_cluster_name(\"cluster_0\");\n\n  {\n    auto* locality_lb_endpoints = cluster_load_assignment.add_endpoints();\n\n    auto* endpoint = locality_lb_endpoints->add_lb_endpoints();\n    setUpstreamAddress(0, *endpoint);\n  }\n\n  auto* locality_lb_endpoints = cluster_load_assignment.add_endpoints();\n  locality_lb_endpoints->set_priority(1);\n\n  auto* endpoint = locality_lb_endpoints->add_lb_endpoints();\n  setUpstreamAddress(1, *endpoint);\n\n  eds_helper_.setEdsAndWait({cluster_load_assignment}, *test_server_);\n\n  EXPECT_EQ(1, member_update_count);\n}\n\nTEST_P(EdsIntegrationTest, StatsReadyFilter) {\n  config_helper_.addFilter(\"name: eds-ready-filter\");\n  initializeTest(false);\n\n  // Initial state: no healthy endpoints\n  EXPECT_EQ(0, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/cluster1\", \"\", downstream_protocol_, version_, \"foo.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"500\", response->headers().getStatusValue());\n  EXPECT_EQ(\"EDS not ready\", response->body());\n\n  cleanupUpstreamAndDownstream();\n\n  // 2/2 healthy endpoints.\n  setEndpoints(2, 2, 0);\n  EXPECT_EQ(2, test_server_->gauge(\"cluster.cluster_0.membership_healthy\")->value());\n  response = IntegrationUtil::makeSingleRequest(lookupPort(\"http\"), \"GET\", \"/cluster1\", \"\",\n                                                downstream_protocol_, version_, \"foo.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"EDS is ready\", response->body());\n\n  cleanupUpstreamAndDownstream();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/extension_discovery_integration_test.cc",
    "content": "#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/service/extension/v3/config_discovery.pb.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/filters/set_response_code_filter_config.pb.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nstd::string denyPrivateConfig() {\n  return R\"EOF(\n    prefix: \"/private\"\n    code: 403\n)EOF\";\n}\n\nstd::string allowAllConfig() { return \"code: 200\"; }\n\nstd::string invalidConfig() { return \"code: 90\"; }\n\nclass ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationParamTest,\n                                          public HttpIntegrationTest {\npublic:\n  ExtensionDiscoveryIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {}\n\n  void addDynamicFilter(const std::string& name, bool apply_without_warming,\n                        bool set_default_config = true, bool rate_limit = false) {\n    config_helper_.addConfigModifier(\n        [this, name, apply_without_warming, set_default_config, rate_limit](\n            envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                http_connection_manager) {\n          auto* filter = http_connection_manager.mutable_http_filters()->Add();\n          filter->set_name(name);\n          auto* discovery = filter->mutable_config_discovery();\n          discovery->add_type_urls(\n              \"type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig\");\n          if (set_default_config) {\n            const auto default_configuration =\n                TestUtility::parseYaml<test::integration::filters::SetResponseCodeFilterConfig>(\n                    \"code: 403\");\n            discovery->mutable_default_config()->PackFrom(default_configuration);\n          }\n          discovery->set_apply_default_config_without_warming(apply_without_warming);\n          auto* api_config_source = discovery->mutable_config_source()->mutable_api_config_source();\n          api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n          api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3);\n          if (rate_limit) {\n            api_config_source->mutable_rate_limit_settings()->mutable_max_tokens()->set_value(10);\n          }\n          auto* grpc_service = api_config_source->add_grpc_services();\n          setGrpcService(*grpc_service, \"ecds_cluster\", getEcdsFakeUpstream().localAddress());\n          // keep router the last\n          auto size = http_connection_manager.http_filters_size();\n          http_connection_manager.mutable_http_filters()->SwapElements(size - 2, size - 1);\n        });\n  }\n\n  void initialize() override {\n    defer_listener_finalization_ = true;\n    setUpstreamCount(1);\n    // Add an xDS cluster for extension config discovery.\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* ecds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      ecds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      ecds_cluster->set_name(\"ecds_cluster\");\n      ecds_cluster->mutable_http2_protocol_options();\n    });\n    // Make HCM do a direct response to avoid timing issues with the upstream.\n    config_helper_.addConfigModifier(\n        [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n               http_connection_manager) {\n          http_connection_manager.mutable_route_config()\n              ->mutable_virtual_hosts(0)\n              ->mutable_routes(0)\n              ->mutable_direct_response()\n              ->set_status(200);\n        });\n    HttpIntegrationTest::initialize();\n  }\n\n  ~ExtensionDiscoveryIntegrationTest() override {\n    if (ecds_connection_ != nullptr) {\n      AssertionResult result = ecds_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = ecds_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n      ecds_connection_.reset();\n    }\n  }\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    // Create the extension config discovery upstream (fake_upstreams_[1]).\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void waitXdsStream() {\n    auto& upstream = getEcdsFakeUpstream();\n    AssertionResult result = upstream.waitForHttpConnection(*dispatcher_, ecds_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = ecds_connection_->waitForNewStream(*dispatcher_, ecds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    ecds_stream_->startGrpcStream();\n  }\n\n  void sendXdsResponse(const std::string& name, const std::string& version,\n                       const std::string& yaml_config) {\n    envoy::service::discovery::v3::DiscoveryResponse response;\n    response.set_version_info(version);\n    response.set_type_url(\"type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig\");\n    const auto configuration =\n        TestUtility::parseYaml<test::integration::filters::SetResponseCodeFilterConfig>(\n            yaml_config);\n    envoy::config::core::v3::TypedExtensionConfig typed_config;\n    typed_config.set_name(name);\n    typed_config.mutable_typed_config()->PackFrom(configuration);\n    response.add_resources()->PackFrom(typed_config);\n    ecds_stream_->sendGrpcMessage(response);\n  }\n\n  FakeUpstream& getEcdsFakeUpstream() const { return *fake_upstreams_[1]; }\n\n  FakeHttpConnectionPtr ecds_connection_{nullptr};\n  FakeStreamPtr ecds_stream_{nullptr};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, ExtensionDiscoveryIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\nTEST_P(ExtensionDiscoveryIntegrationTest, BasicSuccess) {\n  on_server_init_function_ = [&]() { waitXdsStream(); };\n  addDynamicFilter(\"foo\", false);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing);\n  registerTestServerPorts({\"http\"});\n  sendXdsResponse(\"foo\", \"1\", denyPrivateConfig());\n  test_server_->waitForCounterGe(\"http.config_test.extension_config_discovery.foo.config_reload\",\n                                 1);\n  test_server_->waitUntilListenersReady();\n  test_server_->waitForGaugeGe(\"listener_manager.workers_started\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  {\n    Http::TestRequestHeaderMapImpl request_headers{\n        {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n    auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n  Http::TestRequestHeaderMapImpl banned_request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/private/key\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  {\n    auto response = codec_client_->makeHeaderOnlyRequest(banned_request_headers);\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"403\", response->headers().getStatusValue());\n  }\n  // Update again but keep the connection.\n  {\n    sendXdsResponse(\"foo\", \"2\", allowAllConfig());\n    test_server_->waitForCounterGe(\"http.config_test.extension_config_discovery.foo.config_reload\",\n                                   2);\n    auto response = codec_client_->makeHeaderOnlyRequest(banned_request_headers);\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n\nTEST_P(ExtensionDiscoveryIntegrationTest, BasicFailWithDefault) {\n  on_server_init_function_ = [&]() { waitXdsStream(); };\n  addDynamicFilter(\"foo\", false);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing);\n  registerTestServerPorts({\"http\"});\n  sendXdsResponse(\"foo\", \"1\", invalidConfig());\n  test_server_->waitForCounterGe(\"http.config_test.extension_config_discovery.foo.config_fail\", 1);\n  test_server_->waitUntilListenersReady();\n  test_server_->waitForGaugeGe(\"listener_manager.workers_started\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n}\n\nTEST_P(ExtensionDiscoveryIntegrationTest, BasicFailWithoutDefault) {\n  on_server_init_function_ = [&]() { waitXdsStream(); };\n  addDynamicFilter(\"foo\", false, false);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing);\n  registerTestServerPorts({\"http\"});\n  sendXdsResponse(\"foo\", \"1\", invalidConfig());\n  test_server_->waitForCounterGe(\"http.config_test.extension_config_discovery.foo.config_fail\", 1);\n  test_server_->waitUntilListenersReady();\n  test_server_->waitForGaugeGe(\"listener_manager.workers_started\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"500\", response->headers().getStatusValue());\n}\n\nTEST_P(ExtensionDiscoveryIntegrationTest, BasicWithoutWarming) {\n  on_server_init_function_ = [&]() { waitXdsStream(); };\n  addDynamicFilter(\"bar\", true);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n  registerTestServerPorts({\"http\"});\n  test_server_->waitUntilListenersReady();\n  test_server_->waitForGaugeGe(\"listener_manager.workers_started\", 1);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  // Initial request uses the default config.\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  {\n    auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"403\", response->headers().getStatusValue());\n  }\n\n  // Update should cause a different response.\n  sendXdsResponse(\"bar\", \"1\", denyPrivateConfig());\n  test_server_->waitForCounterGe(\"http.config_test.extension_config_discovery.bar.config_reload\",\n                                 1);\n  {\n    auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n\nTEST_P(ExtensionDiscoveryIntegrationTest, BasicWithoutWarmingFail) {\n  on_server_init_function_ = [&]() { waitXdsStream(); };\n  addDynamicFilter(\"bar\", true);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n  registerTestServerPorts({\"http\"});\n  test_server_->waitUntilListenersReady();\n  test_server_->waitForGaugeGe(\"listener_manager.workers_started\", 1);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  // Update should not cause a different response.\n  sendXdsResponse(\"bar\", \"1\", invalidConfig());\n  test_server_->waitForCounterGe(\"http.config_test.extension_config_discovery.bar.config_fail\", 1);\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"403\", response->headers().getStatusValue());\n}\n\nTEST_P(ExtensionDiscoveryIntegrationTest, BasicTwoSubscriptionsSameName) {\n  on_server_init_function_ = [&]() { waitXdsStream(); };\n  addDynamicFilter(\"baz\", true);\n  addDynamicFilter(\"baz\", false);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing);\n  registerTestServerPorts({\"http\"});\n  sendXdsResponse(\"baz\", \"1\", denyPrivateConfig());\n  test_server_->waitForCounterGe(\"http.config_test.extension_config_discovery.baz.config_reload\",\n                                 1);\n  test_server_->waitUntilListenersReady();\n  test_server_->waitForGaugeGe(\"listener_manager.workers_started\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(ExtensionDiscoveryIntegrationTest, DestroyDuringInit) {\n  // If rate limiting is enabled on the config source, gRPC mux drainage updates the requests\n  // queue size on destruction. The update calls out to stats scope nested under the extension\n  // config subscription stats scope. This test verifies that the stats scope outlasts the gRPC\n  // subscription.\n  on_server_init_function_ = [&]() { waitXdsStream(); };\n  addDynamicFilter(\"foo\", false, true);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing);\n  test_server_.reset();\n  auto result = ecds_connection_->waitForDisconnect();\n  RELEASE_ASSERT(result, result.message());\n  ecds_connection_.reset();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/fake_upstream.cc",
    "content": "#include \"test/integration/fake_upstream.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http1/codec_impl_legacy.h\"\n#include \"common/http/http2/codec_impl_legacy.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/utility.h\"\n\n#include \"server/connection_handler_impl.h\"\n\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nusing namespace std::chrono_literals;\n\nusing std::chrono::milliseconds;\nusing testing::AssertionFailure;\nusing testing::AssertionResult;\nusing testing::AssertionSuccess;\n\nnamespace Envoy {\nFakeStream::FakeStream(FakeHttpConnection& parent, Http::ResponseEncoder& encoder,\n                       Event::TestTimeSystem& time_system)\n    : parent_(parent), encoder_(encoder), time_system_(time_system) {\n  encoder.getStream().addCallbacks(*this);\n}\n\nvoid FakeStream::decodeHeaders(Http::RequestHeaderMapPtr&& headers, bool end_stream) {\n  absl::MutexLock lock(&lock_);\n  headers_ = std::move(headers);\n  setEndStream(end_stream);\n}\n\nvoid FakeStream::decodeData(Buffer::Instance& data, bool end_stream) {\n  received_data_ = true;\n  absl::MutexLock lock(&lock_);\n  body_.add(data);\n  setEndStream(end_stream);\n}\n\nvoid FakeStream::decodeTrailers(Http::RequestTrailerMapPtr&& trailers) {\n  absl::MutexLock lock(&lock_);\n  setEndStream(true);\n  trailers_ = std::move(trailers);\n}\n\nvoid FakeStream::decodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) {\n  for (const auto& metadata : *metadata_map_ptr) {\n    duplicated_metadata_key_count_[metadata.first]++;\n    metadata_map_.insert(metadata);\n  }\n}\n\nvoid FakeStream::postToConnectionThread(std::function<void()> cb) {\n  parent_.connection().dispatcher().post(cb);\n}\n\nvoid FakeStream::encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) {\n  std::shared_ptr<Http::ResponseHeaderMap> headers_copy(\n      Http::createHeaderMap<Http::ResponseHeaderMapImpl>(headers));\n  parent_.connection().dispatcher().post(\n      [this, headers_copy]() -> void { encoder_.encode100ContinueHeaders(*headers_copy); });\n}\n\nvoid FakeStream::encodeHeaders(const Http::HeaderMap& headers, bool end_stream) {\n  std::shared_ptr<Http::ResponseHeaderMap> headers_copy(\n      Http::createHeaderMap<Http::ResponseHeaderMapImpl>(headers));\n  if (add_served_by_header_) {\n    headers_copy->addCopy(Http::LowerCaseString(\"x-served-by\"),\n                          parent_.connection().localAddress()->asString());\n  }\n\n  parent_.connection().dispatcher().post([this, headers_copy, end_stream]() -> void {\n    encoder_.encodeHeaders(*headers_copy, end_stream);\n  });\n}\n\nvoid FakeStream::encodeData(absl::string_view data, bool end_stream) {\n  parent_.connection().dispatcher().post([this, data, end_stream]() -> void {\n    Buffer::OwnedImpl fake_data(data.data(), data.size());\n    encoder_.encodeData(fake_data, end_stream);\n  });\n}\n\nvoid FakeStream::encodeData(uint64_t size, bool end_stream) {\n  parent_.connection().dispatcher().post([this, size, end_stream]() -> void {\n    Buffer::OwnedImpl data(std::string(size, 'a'));\n    encoder_.encodeData(data, end_stream);\n  });\n}\n\nvoid FakeStream::encodeData(Buffer::Instance& data, bool end_stream) {\n  std::shared_ptr<Buffer::Instance> data_copy = std::make_shared<Buffer::OwnedImpl>(data);\n  parent_.connection().dispatcher().post(\n      [this, data_copy, end_stream]() -> void { encoder_.encodeData(*data_copy, end_stream); });\n}\n\nvoid FakeStream::encodeTrailers(const Http::HeaderMap& trailers) {\n  std::shared_ptr<Http::ResponseTrailerMap> trailers_copy(\n      Http::createHeaderMap<Http::ResponseTrailerMapImpl>(trailers));\n  parent_.connection().dispatcher().post(\n      [this, trailers_copy]() -> void { encoder_.encodeTrailers(*trailers_copy); });\n}\n\nvoid FakeStream::encodeResetStream() {\n  parent_.connection().dispatcher().post(\n      [this]() -> void { encoder_.getStream().resetStream(Http::StreamResetReason::LocalReset); });\n}\n\nvoid FakeStream::encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) {\n  parent_.connection().dispatcher().post(\n      [this, &metadata_map_vector]() -> void { encoder_.encodeMetadata(metadata_map_vector); });\n}\n\nvoid FakeStream::readDisable(bool disable) {\n  parent_.connection().dispatcher().post(\n      [this, disable]() -> void { encoder_.getStream().readDisable(disable); });\n}\n\nvoid FakeStream::onResetStream(Http::StreamResetReason, absl::string_view) {\n  absl::MutexLock lock(&lock_);\n  saw_reset_ = true;\n}\n\nAssertionResult FakeStream::waitForHeadersComplete(milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  const auto reached = [this]()\n                           ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { return headers_ != nullptr; };\n  if (!time_system_.waitFor(lock_, absl::Condition(&reached), timeout)) {\n    return AssertionFailure() << \"Timed out waiting for headers.\";\n  }\n  return AssertionSuccess();\n}\n\nnamespace {\n// Perform a wait on a condition while still allowing for periodic client dispatcher runs that\n// occur on the current thread.\nbool waitForWithDispatcherRun(Event::TestTimeSystem& time_system, absl::Mutex& lock,\n                              const std::function<bool()>& condition,\n                              Event::Dispatcher& client_dispatcher, milliseconds timeout)\n    ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock) {\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  while (bound.withinBound()) {\n    // Wake up every 5ms to run the client dispatcher.\n    if (time_system.waitFor(lock, absl::Condition(&condition), 5ms)) {\n      return true;\n    }\n\n    // Run the client dispatcher since we may need to process window updates, etc.\n    client_dispatcher.run(Event::Dispatcher::RunType::NonBlock);\n  }\n  return false;\n}\n} // namespace\n\nAssertionResult FakeStream::waitForData(Event::Dispatcher& client_dispatcher, uint64_t body_length,\n                                        milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  if (!waitForWithDispatcherRun(\n          time_system_, lock_,\n          [this, body_length]()\n              ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { return (body_.length() >= body_length); },\n          client_dispatcher, timeout)) {\n    return AssertionFailure() << \"Timed out waiting for data.\";\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult FakeStream::waitForData(Event::Dispatcher& client_dispatcher,\n                                        absl::string_view data, milliseconds timeout) {\n  auto succeeded = waitForData(client_dispatcher, data.length(), timeout);\n  if (succeeded) {\n    Buffer::OwnedImpl buffer(data.data(), data.length());\n    if (!TestUtility::buffersEqual(body(), buffer)) {\n      return AssertionFailure() << body().toString() << \" not equal to \" << data;\n    }\n  }\n  return succeeded;\n}\n\nAssertionResult FakeStream::waitForEndStream(Event::Dispatcher& client_dispatcher,\n                                             milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  if (!waitForWithDispatcherRun(\n          time_system_, lock_,\n          [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { return end_stream_; }, client_dispatcher,\n          timeout)) {\n    return AssertionFailure() << \"Timed out waiting for end of stream.\";\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult FakeStream::waitForReset(milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  if (!time_system_.waitFor(lock_, absl::Condition(&saw_reset_), timeout)) {\n    return AssertionFailure() << \"Timed out waiting for reset.\";\n  }\n  return AssertionSuccess();\n}\n\nvoid FakeStream::startGrpcStream() {\n  encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n}\n\nvoid FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) {\n  encodeTrailers(Http::TestResponseTrailerMapImpl{\n      {\"grpc-status\", std::to_string(static_cast<uint32_t>(status))}});\n}\n\n// The TestHttp1ServerConnectionImpl outlives its underlying Network::Connection\n// so must not access the Connection on teardown. To achieve this, clear the\n// read disable calls to avoid checking / editing the Connection blocked state.\nclass TestHttp1ServerConnectionImpl : public Http::Http1::ServerConnectionImpl {\npublic:\n  using Http::Http1::ServerConnectionImpl::ServerConnectionImpl;\n\n  void onMessageComplete() override {\n    ServerConnectionImpl::onMessageComplete();\n\n    if (activeRequest().has_value() && activeRequest().value().request_decoder_) {\n      // Undo the read disable from the base class - we have many tests which\n      // waitForDisconnect after a full request has been read which will not\n      // receive the disconnect if reading is disabled.\n      activeRequest().value().response_encoder_.readDisable(false);\n    }\n  }\n  ~TestHttp1ServerConnectionImpl() override {\n    if (activeRequest().has_value()) {\n      activeRequest().value().response_encoder_.clearReadDisableCallsForTests();\n    }\n  }\n};\n\nnamespace Legacy {\nclass TestHttp1ServerConnectionImpl : public Http::Legacy::Http1::ServerConnectionImpl {\npublic:\n  using Http::Legacy::Http1::ServerConnectionImpl::ServerConnectionImpl;\n\n  void onMessageComplete() override {\n    ServerConnectionImpl::onMessageComplete();\n\n    if (activeRequest().has_value() && activeRequest().value().request_decoder_) {\n      // Undo the read disable from the base class - we have many tests which\n      // waitForDisconnect after a full request has been read which will not\n      // receive the disconnect if reading is disabled.\n      activeRequest().value().response_encoder_.readDisable(false);\n    }\n  }\n  ~TestHttp1ServerConnectionImpl() override {\n    if (activeRequest().has_value()) {\n      activeRequest().value().response_encoder_.clearReadDisableCallsForTests();\n    }\n  }\n};\n} // namespace Legacy\n\nFakeHttpConnection::FakeHttpConnection(\n    FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, Type type,\n    Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb,\n    uint32_t max_request_headers_count,\n    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n        headers_with_underscores_action)\n    : FakeConnectionBase(shared_connection, time_system), type_(type) {\n  if (type == Type::HTTP1) {\n    Http::Http1Settings http1_settings;\n    // For the purpose of testing, we always have the upstream encode the trailers if any\n    http1_settings.enable_trailers_ = true;\n    Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats();\n#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS\n    codec_ = std::make_unique<TestHttp1ServerConnectionImpl>(\n        shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb,\n        max_request_headers_count, headers_with_underscores_action);\n#else\n    codec_ = std::make_unique<Legacy::TestHttp1ServerConnectionImpl>(\n        shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb,\n        max_request_headers_count, headers_with_underscores_action);\n#endif\n  } else {\n    envoy::config::core::v3::Http2ProtocolOptions http2_options =\n        ::Envoy::Http2::Utility::initializeAndValidateOptions(\n            envoy::config::core::v3::Http2ProtocolOptions());\n    http2_options.set_allow_connect(true);\n    http2_options.set_allow_metadata(true);\n    Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats();\n#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS\n    codec_ = std::make_unique<Http::Http2::ServerConnectionImpl>(\n        shared_connection_.connection(), *this, stats, random_, http2_options,\n        max_request_headers_kb, max_request_headers_count, headers_with_underscores_action);\n#else\n    codec_ = std::make_unique<Http::Legacy::Http2::ServerConnectionImpl>(\n        shared_connection_.connection(), *this, stats, random_, http2_options,\n        max_request_headers_kb, max_request_headers_count, headers_with_underscores_action);\n#endif\n    ASSERT(type == Type::HTTP2);\n  }\n  shared_connection_.connection().addReadFilter(\n      Network::ReadFilterSharedPtr{new ReadFilter(*this)});\n}\n\nAssertionResult FakeConnectionBase::close(std::chrono::milliseconds timeout) {\n  ENVOY_LOG(trace, \"FakeConnectionBase close\");\n  if (!shared_connection_.connected()) {\n    return AssertionSuccess();\n  }\n  return shared_connection_.executeOnDispatcher(\n      [](Network::Connection& connection) {\n        connection.close(Network::ConnectionCloseType::FlushWrite);\n      },\n      timeout);\n}\n\nAssertionResult FakeConnectionBase::readDisable(bool disable, std::chrono::milliseconds timeout) {\n  return shared_connection_.executeOnDispatcher(\n      [disable](Network::Connection& connection) { connection.readDisable(disable); }, timeout);\n}\n\nAssertionResult FakeConnectionBase::enableHalfClose(bool enable,\n                                                    std::chrono::milliseconds timeout) {\n  return shared_connection_.executeOnDispatcher(\n      [enable](Network::Connection& connection) { connection.enableHalfClose(enable); }, timeout);\n}\n\nHttp::RequestDecoder& FakeHttpConnection::newStream(Http::ResponseEncoder& encoder, bool) {\n  absl::MutexLock lock(&lock_);\n  new_streams_.emplace_back(new FakeStream(*this, encoder, time_system_));\n  return *new_streams_.back();\n}\n\nvoid FakeHttpConnection::onGoAway(Http::GoAwayErrorCode code) {\n  ASSERT(type_ == Type::HTTP2);\n  // Usually indicates connection level errors, no operations are needed since\n  // the connection will be closed soon.\n  ENVOY_LOG(info, \"FakeHttpConnection receives GOAWAY: \", code);\n}\n\nAssertionResult FakeConnectionBase::waitForDisconnect(milliseconds timeout) {\n  ENVOY_LOG(trace, \"FakeConnectionBase waiting for disconnect\");\n  absl::MutexLock lock(&lock_);\n  const auto reached = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {\n    return !shared_connection_.connectedLockHeld();\n  };\n\n  if (!time_system_.waitFor(lock_, absl::Condition(&reached), timeout)) {\n    return AssertionFailure() << \"Timed out waiting for disconnect.\";\n  }\n  ENVOY_LOG(trace, \"FakeConnectionBase done waiting for disconnect\");\n  return AssertionSuccess();\n}\n\nAssertionResult FakeConnectionBase::waitForHalfClose(milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  if (!time_system_.waitFor(lock_, absl::Condition(&half_closed_), timeout)) {\n    return AssertionFailure() << \"Timed out waiting for half close.\";\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult FakeHttpConnection::waitForNewStream(Event::Dispatcher& client_dispatcher,\n                                                     FakeStreamPtr& stream,\n                                                     std::chrono::milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  if (!waitForWithDispatcherRun(\n          time_system_, lock_,\n          [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { return !new_streams_.empty(); },\n          client_dispatcher, timeout)) {\n    return AssertionFailure() << \"Timed out waiting for new stream.\";\n  }\n  stream = std::move(new_streams_.front());\n  new_streams_.pop_front();\n  return AssertionSuccess();\n}\n\nFakeUpstream::FakeUpstream(const std::string& uds_path, FakeHttpConnection::Type type,\n                           Event::TestTimeSystem& time_system)\n    : FakeUpstream(Network::Test::createRawBufferSocketFactory(),\n                   Network::SocketPtr{new Network::UdsListenSocket(\n                       std::make_shared<Network::Address::PipeInstance>(uds_path))},\n                   type, time_system, false) {\n  ENVOY_LOG(info, \"starting fake server on unix domain socket {}\", uds_path);\n}\n\nstatic Network::SocketPtr\nmakeTcpListenSocket(const Network::Address::InstanceConstSharedPtr& address) {\n  return std::make_unique<Network::TcpListenSocket>(address, nullptr, true);\n}\n\nstatic Network::SocketPtr makeTcpListenSocket(uint32_t port, Network::Address::IpVersion version) {\n  return makeTcpListenSocket(Network::Utility::parseInternetAddress(\n      Network::Test::getLoopbackAddressString(version), port));\n}\n\nstatic Network::SocketPtr\nmakeUdpListenSocket(const Network::Address::InstanceConstSharedPtr& address) {\n  auto socket = std::make_unique<Network::UdpListenSocket>(address, nullptr, true);\n  // TODO(mattklein123): These options are set in multiple locations. We should centralize them for\n  // UDP listeners.\n  socket->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions());\n  socket->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions());\n  return socket;\n}\n\nFakeUpstream::FakeUpstream(const Network::Address::InstanceConstSharedPtr& address,\n                           FakeHttpConnection::Type type, Event::TestTimeSystem& time_system,\n                           bool enable_half_close, bool udp_fake_upstream)\n    : FakeUpstream(Network::Test::createRawBufferSocketFactory(),\n                   udp_fake_upstream ? makeUdpListenSocket(address) : makeTcpListenSocket(address),\n                   type, time_system, enable_half_close) {\n  ENVOY_LOG(info, \"starting fake server on socket {}:{}. Address version is {}. UDP={}\",\n            address->ip()->addressAsString(), address->ip()->port(),\n            Network::Test::addressVersionAsString(address->ip()->version()), udp_fake_upstream);\n}\n\nFakeUpstream::FakeUpstream(uint32_t port, FakeHttpConnection::Type type,\n                           Network::Address::IpVersion version, Event::TestTimeSystem& time_system,\n                           bool enable_half_close)\n    : FakeUpstream(Network::Test::createRawBufferSocketFactory(),\n                   makeTcpListenSocket(port, version), type, time_system, enable_half_close) {\n  ENVOY_LOG(info, \"starting fake server on port {}. Address version is {}\",\n            localAddress()->ip()->port(), Network::Test::addressVersionAsString(version));\n}\n\nFakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory,\n                           uint32_t port, FakeHttpConnection::Type type,\n                           Network::Address::IpVersion version, Event::TestTimeSystem& time_system)\n    : FakeUpstream(std::move(transport_socket_factory), makeTcpListenSocket(port, version), type,\n                   time_system, false) {\n  ENVOY_LOG(info, \"starting fake SSL server on port {}. Address version is {}\",\n            localAddress()->ip()->port(), Network::Test::addressVersionAsString(version));\n}\n\nFakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory,\n                           Network::SocketPtr&& listen_socket, FakeHttpConnection::Type type,\n                           Event::TestTimeSystem& time_system, bool enable_half_close)\n    : http_type_(type), socket_(Network::SocketSharedPtr(listen_socket.release())),\n      socket_factory_(std::make_shared<FakeListenSocketFactory>(socket_)),\n      api_(Api::createApiForTest(stats_store_)), time_system_(time_system),\n      dispatcher_(api_->allocateDispatcher(\"fake_upstream\")),\n      handler_(new Server::ConnectionHandlerImpl(*dispatcher_, 0)),\n      read_disable_on_new_connection_(true), enable_half_close_(enable_half_close),\n      listener_(*this),\n      filter_chain_(Network::Test::createEmptyFilterChain(std::move(transport_socket_factory))) {\n  thread_ = api_->threadFactory().createThread([this]() -> void { threadRoutine(); });\n  server_initialized_.waitReady();\n}\n\nFakeUpstream::~FakeUpstream() { cleanUp(); };\n\nvoid FakeUpstream::cleanUp() {\n  if (thread_.get()) {\n    dispatcher_->exit();\n    thread_->join();\n    thread_.reset();\n  }\n}\n\nbool FakeUpstream::createNetworkFilterChain(Network::Connection& connection,\n                                            const std::vector<Network::FilterFactoryCb>&) {\n  absl::MutexLock lock(&lock_);\n  if (read_disable_on_new_connection_) {\n    connection.readDisable(true);\n  }\n  auto connection_wrapper = std::make_unique<SharedConnectionWrapper>(connection);\n  LinkedList::moveIntoListBack(std::move(connection_wrapper), new_connections_);\n  return true;\n}\n\nbool FakeUpstream::createListenerFilterChain(Network::ListenerFilterManager&) { return true; }\n\nvoid FakeUpstream::createUdpListenerFilterChain(Network::UdpListenerFilterManager& udp_listener,\n                                                Network::UdpReadFilterCallbacks& callbacks) {\n  udp_listener.addReadFilter(std::make_unique<FakeUpstreamUdpFilter>(*this, callbacks));\n}\n\nvoid FakeUpstream::threadRoutine() {\n  handler_->addListener(absl::nullopt, listener_);\n  server_initialized_.setReady();\n  dispatcher_->run(Event::Dispatcher::RunType::Block);\n  handler_.reset();\n  {\n    absl::MutexLock lock(&lock_);\n    new_connections_.clear();\n    consumed_connections_.clear();\n  }\n}\n\nAssertionResult FakeUpstream::waitForHttpConnection(\n    Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, milliseconds timeout,\n    uint32_t max_request_headers_kb, uint32_t max_request_headers_count,\n    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n        headers_with_underscores_action) {\n  {\n    absl::MutexLock lock(&lock_);\n    if (!waitForWithDispatcherRun(\n            time_system_, lock_,\n            [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { return !new_connections_.empty(); },\n            client_dispatcher, timeout)) {\n      return AssertionFailure() << \"Timed out waiting for new connection.\";\n    }\n\n    connection = std::make_unique<FakeHttpConnection>(\n        *this, consumeConnection(), http_type_, time_system_, max_request_headers_kb,\n        max_request_headers_count, headers_with_underscores_action);\n  }\n  VERIFY_ASSERTION(connection->initialize());\n  if (read_disable_on_new_connection_) {\n    VERIFY_ASSERTION(connection->readDisable(false));\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult\nFakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher,\n                                    std::vector<std::unique_ptr<FakeUpstream>>& upstreams,\n                                    FakeHttpConnectionPtr& connection, milliseconds timeout) {\n  if (upstreams.empty()) {\n    return AssertionFailure() << \"No upstreams configured.\";\n  }\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  while (bound.withinBound()) {\n    for (auto& it : upstreams) {\n      FakeUpstream& upstream = *it;\n      {\n        absl::MutexLock lock(&upstream.lock_);\n        if (!waitForWithDispatcherRun(\n                upstream.time_system_, upstream.lock_,\n                [&upstream]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(upstream.lock_) {\n                  return !upstream.new_connections_.empty();\n                },\n                client_dispatcher, 5ms)) {\n          continue;\n        }\n        connection = std::make_unique<FakeHttpConnection>(\n            upstream, upstream.consumeConnection(), upstream.http_type_, upstream.timeSystem(),\n            Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT,\n            envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n      }\n      VERIFY_ASSERTION(connection->initialize());\n      VERIFY_ASSERTION(connection->readDisable(false));\n      return AssertionSuccess();\n    }\n  }\n  return AssertionFailure() << \"Timed out waiting for HTTP connection.\";\n}\n\nAssertionResult FakeUpstream::waitForRawConnection(FakeRawConnectionPtr& connection,\n                                                   milliseconds timeout) {\n  {\n    absl::MutexLock lock(&lock_);\n    const auto reached = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {\n      return !new_connections_.empty();\n    };\n\n    ENVOY_LOG(debug, \"waiting for raw connection\");\n    if (!time_system_.waitFor(lock_, absl::Condition(&reached), timeout)) {\n      return AssertionFailure() << \"Timed out waiting for raw connection\";\n    }\n    connection = std::make_unique<FakeRawConnection>(consumeConnection(), timeSystem());\n  }\n  VERIFY_ASSERTION(connection->initialize());\n  VERIFY_ASSERTION(connection->readDisable(false));\n  VERIFY_ASSERTION(connection->enableHalfClose(enable_half_close_));\n  return AssertionSuccess();\n}\n\nSharedConnectionWrapper& FakeUpstream::consumeConnection() {\n  ASSERT(!new_connections_.empty());\n  auto* const connection_wrapper = new_connections_.front().get();\n  connection_wrapper->setParented();\n  connection_wrapper->moveBetweenLists(new_connections_, consumed_connections_);\n  return *connection_wrapper;\n}\n\ntesting::AssertionResult FakeUpstream::waitForUdpDatagram(Network::UdpRecvData& data_to_fill,\n                                                          std::chrono::milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  const auto reached = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {\n    return !received_datagrams_.empty();\n  };\n\n  if (!time_system_.waitFor(lock_, absl::Condition(&reached), timeout)) {\n    return AssertionFailure() << \"Timed out waiting for UDP datagram.\";\n  }\n\n  data_to_fill = std::move(received_datagrams_.front());\n  received_datagrams_.pop_front();\n  return AssertionSuccess();\n}\n\nvoid FakeUpstream::onRecvDatagram(Network::UdpRecvData& data) {\n  absl::MutexLock lock(&lock_);\n  received_datagrams_.emplace_back(std::move(data));\n}\n\nvoid FakeUpstream::sendUdpDatagram(const std::string& buffer,\n                                   const Network::Address::InstanceConstSharedPtr& peer) {\n  dispatcher_->post([this, buffer, peer] {\n    const auto rc = Network::Utility::writeToSocket(socket_->ioHandle(), Buffer::OwnedImpl(buffer),\n                                                    nullptr, *peer);\n    EXPECT_TRUE(rc.rc_ == buffer.length());\n  });\n}\n\nAssertionResult FakeRawConnection::waitForData(uint64_t num_bytes, std::string* data,\n                                               milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  const auto reached = [this, num_bytes]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {\n    return data_.size() == num_bytes;\n  };\n  ENVOY_LOG(debug, \"waiting for {} bytes of data\", num_bytes);\n  if (!time_system_.waitFor(lock_, absl::Condition(&reached), timeout)) {\n    return AssertionFailure() << fmt::format(\n               \"Timed out waiting for data. Got '{}', waiting for {} bytes.\", data_, num_bytes);\n  }\n  if (data != nullptr) {\n    *data = data_;\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult\nFakeRawConnection::waitForData(const std::function<bool(const std::string&)>& data_validator,\n                               std::string* data, milliseconds timeout) {\n  absl::MutexLock lock(&lock_);\n  const auto reached = [this, &data_validator]()\n                           ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { return data_validator(data_); };\n  ENVOY_LOG(debug, \"waiting for data\");\n  if (!time_system_.waitFor(lock_, absl::Condition(&reached), timeout)) {\n    return AssertionFailure() << \"Timed out waiting for data.\";\n  }\n  if (data != nullptr) {\n    *data = data_;\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult FakeRawConnection::write(const std::string& data, bool end_stream,\n                                         milliseconds timeout) {\n  return shared_connection_.executeOnDispatcher(\n      [data, end_stream](Network::Connection& connection) {\n        Buffer::OwnedImpl to_write(data);\n        connection.write(to_write, end_stream);\n      },\n      timeout);\n}\n\nNetwork::FilterStatus FakeRawConnection::ReadFilter::onData(Buffer::Instance& data,\n                                                            bool end_stream) {\n  absl::MutexLock lock(&parent_.lock_);\n  ENVOY_LOG(debug, \"got {} bytes, end_stream {}\", data.length(), end_stream);\n  parent_.data_.append(data.toString());\n  parent_.half_closed_ = end_stream;\n  data.drain(data.length());\n  return Network::FilterStatus::StopIteration;\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/fake_upstream.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/zero_copy_input_stream_impl.h\"\n#include \"common/common/basic_resource_impl.h\"\n#include \"common/common/callback_impl.h\"\n#include \"common/common/linked_object.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n#include \"common/grpc/codec.h\"\n#include \"common/grpc/common.h\"\n#include \"common/http/http1/codec_impl.h\"\n#include \"common/http/http2/codec_impl.h\"\n#include \"common/network/connection_balancer_impl.h\"\n#include \"common/network/filter_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/udp_default_writer_config.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"server/active_raw_udp_listener_config.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/test_common/test_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n// TODO(mattklein123): A lot of code should be moved from this header file into the cc file.\n\nnamespace Envoy {\n\nclass FakeHttpConnection;\nclass FakeUpstream;\n\n/**\n * Provides a fake HTTP stream for integration testing.\n */\nclass FakeStream : public Http::RequestDecoder,\n                   public Http::StreamCallbacks,\n                   Logger::Loggable<Logger::Id::testing> {\npublic:\n  FakeStream(FakeHttpConnection& parent, Http::ResponseEncoder& encoder,\n             Event::TestTimeSystem& time_system);\n\n  uint64_t bodyLength() {\n    absl::MutexLock lock(&lock_);\n    return body_.length();\n  }\n  Buffer::Instance& body() {\n    absl::MutexLock lock(&lock_);\n    return body_;\n  }\n  bool complete() {\n    absl::MutexLock lock(&lock_);\n    return end_stream_;\n  }\n\n  // Execute a callback using the dispatcher associated with the FakeStream's connection. This\n  // allows execution of non-interrupted sequences of operations on the fake stream which may run\n  // into trouble if client-side events are interleaved.\n  void postToConnectionThread(std::function<void()> cb);\n  void encode100ContinueHeaders(const Http::ResponseHeaderMap& headers);\n  void encodeHeaders(const Http::HeaderMap& headers, bool end_stream);\n  void encodeData(uint64_t size, bool end_stream);\n  void encodeData(Buffer::Instance& data, bool end_stream);\n  void encodeData(absl::string_view data, bool end_stream);\n  void encodeTrailers(const Http::HeaderMap& trailers);\n  void encodeResetStream();\n  void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector);\n  void readDisable(bool disable);\n  const Http::RequestHeaderMap& headers() {\n    absl::MutexLock lock(&lock_);\n    return *headers_;\n  }\n  void setAddServedByHeader(bool add_header) { add_served_by_header_ = add_header; }\n  const Http::RequestTrailerMapPtr& trailers() { return trailers_; }\n  bool receivedData() { return received_data_; }\n  Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() {\n    return encoder_.http1StreamEncoderOptions();\n  }\n  void\n  sendLocalReply(bool is_grpc_request, Http::Code code, absl::string_view body,\n                 const std::function<void(Http::ResponseHeaderMap& headers)>& /*modify_headers*/,\n                 const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                 absl::string_view /*details*/) override {\n    bool is_head_request;\n    {\n      absl::MutexLock lock(&lock_);\n      is_head_request = headers_ != nullptr &&\n                        headers_->getMethodValue() == Http::Headers::get().MethodValues.Head;\n    }\n    Http::Utility::sendLocalReply(\n        false,\n        Http::Utility::EncodeFunctions(\n            {nullptr, nullptr,\n             [&](Http::ResponseHeaderMapPtr&& headers, bool end_stream) -> void {\n               encoder_.encodeHeaders(*headers, end_stream);\n             },\n             [&](Buffer::Instance& data, bool end_stream) -> void {\n               encoder_.encodeData(data, end_stream);\n             }}),\n        Http::Utility::LocalReplyData({is_grpc_request, code, body, grpc_status, is_head_request}));\n  }\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForHeadersComplete(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForData(Event::Dispatcher& client_dispatcher, uint64_t body_length,\n              std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForData(Event::Dispatcher& client_dispatcher, absl::string_view body,\n              std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForEndStream(Event::Dispatcher& client_dispatcher,\n                   std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForReset(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  // gRPC convenience methods.\n  void startGrpcStream();\n  void finishGrpcStream(Grpc::Status::GrpcStatus status);\n  template <class T> void sendGrpcMessage(const T& message) {\n    auto serialized_response = Grpc::Common::serializeToGrpcFrame(message);\n    encodeData(*serialized_response, false);\n    ENVOY_LOG(debug, \"Sent gRPC message: {}\", message.DebugString());\n  }\n  template <class T> void decodeGrpcFrame(T& message) {\n    EXPECT_GE(decoded_grpc_frames_.size(), 1);\n    if (decoded_grpc_frames_[0].length_ == 0) {\n      decoded_grpc_frames_.erase(decoded_grpc_frames_.begin());\n      return;\n    }\n    Buffer::ZeroCopyInputStreamImpl stream(std::move(decoded_grpc_frames_[0].data_));\n    EXPECT_TRUE(decoded_grpc_frames_[0].flags_ == Grpc::GRPC_FH_DEFAULT);\n    EXPECT_TRUE(message.ParseFromZeroCopyStream(&stream));\n    ENVOY_LOG(debug, \"Received gRPC message: {}\", message.DebugString());\n    decoded_grpc_frames_.erase(decoded_grpc_frames_.begin());\n  }\n  template <class T>\n  ABSL_MUST_USE_RESULT testing::AssertionResult\n  waitForGrpcMessage(Event::Dispatcher& client_dispatcher, T& message,\n                     std::chrono::milliseconds timeout = TestUtility::DefaultTimeout) {\n    Event::TestTimeSystem::RealTimeBound bound(timeout);\n    ENVOY_LOG(debug, \"Waiting for gRPC message...\");\n    if (!decoded_grpc_frames_.empty()) {\n      decodeGrpcFrame(message);\n      return AssertionSuccess();\n    }\n    if (!waitForData(client_dispatcher, 5, timeout)) {\n      return testing::AssertionFailure() << \"Timed out waiting for start of gRPC message.\";\n    }\n    {\n      absl::MutexLock lock(&lock_);\n      if (!grpc_decoder_.decode(body_, decoded_grpc_frames_)) {\n        return testing::AssertionFailure()\n               << \"Couldn't decode gRPC data frame: \" << body_.toString();\n      }\n    }\n    if (decoded_grpc_frames_.empty()) {\n      if (!waitForData(client_dispatcher, grpc_decoder_.length(), bound.timeLeft())) {\n        return testing::AssertionFailure() << \"Timed out waiting for end of gRPC message.\";\n      }\n      {\n        absl::MutexLock lock(&lock_);\n        if (!grpc_decoder_.decode(body_, decoded_grpc_frames_)) {\n          return testing::AssertionFailure()\n                 << \"Couldn't decode gRPC data frame: \" << body_.toString();\n        }\n      }\n    }\n    decodeGrpcFrame(message);\n    ENVOY_LOG(debug, \"Received gRPC message: {}\", message.DebugString());\n    return AssertionSuccess();\n  }\n\n  // Http::StreamDecoder\n  void decodeData(Buffer::Instance& data, bool end_stream) override;\n  void decodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) override;\n\n  // Http::RequestDecoder\n  void decodeHeaders(Http::RequestHeaderMapPtr&& headers, bool end_stream) override;\n  void decodeTrailers(Http::RequestTrailerMapPtr&& trailers) override;\n\n  // Http::StreamCallbacks\n  void onResetStream(Http::StreamResetReason reason,\n                     absl::string_view transport_failure_reason) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  virtual void setEndStream(bool end) EXCLUSIVE_LOCKS_REQUIRED(lock_) { end_stream_ = end; }\n\n  Event::TestTimeSystem& timeSystem() { return time_system_; }\n\n  Http::MetadataMap& metadataMap() { return metadata_map_; }\n  absl::node_hash_map<std::string, uint64_t>& duplicatedMetadataKeyCount() {\n    return duplicated_metadata_key_count_;\n  }\n\nprotected:\n  absl::Mutex lock_;\n  Http::RequestHeaderMapPtr headers_ ABSL_GUARDED_BY(lock_);\n  Buffer::OwnedImpl body_ ABSL_GUARDED_BY(lock_);\n\nprivate:\n  FakeHttpConnection& parent_;\n  Http::ResponseEncoder& encoder_;\n  Http::RequestTrailerMapPtr trailers_ ABSL_GUARDED_BY(lock_);\n  bool end_stream_ ABSL_GUARDED_BY(lock_){};\n  bool saw_reset_ ABSL_GUARDED_BY(lock_){};\n  Grpc::Decoder grpc_decoder_;\n  std::vector<Grpc::Frame> decoded_grpc_frames_;\n  bool add_served_by_header_{};\n  Event::TestTimeSystem& time_system_;\n  Http::MetadataMap metadata_map_;\n  absl::node_hash_map<std::string, uint64_t> duplicated_metadata_key_count_;\n  bool received_data_{false};\n};\n\nusing FakeStreamPtr = std::unique_ptr<FakeStream>;\n\n// Encapsulates various state and functionality related to sharing a Connection object across\n// threads. With FakeUpstream fabricated objects, we have a Connection that is associated with a\n// dispatcher on a thread managed by FakeUpstream. We want to be able to safely invoke methods on\n// this object from other threads (e.g. the main test thread) and be able to track connection state\n// (e.g. are we disconnected and the Connection is now possibly deleted). We manage this via a\n// SharedConnectionWrapper that lives from when the Connection is added to the accepted connection\n// queue and then through the lifetime of the Fake{Raw,Http}Connection that manages the Connection\n// through active use.\nclass SharedConnectionWrapper : public Network::ConnectionCallbacks,\n                                public LinkedObject<SharedConnectionWrapper> {\npublic:\n  using DisconnectCallback = std::function<void()>;\n\n  SharedConnectionWrapper(Network::Connection& connection) : connection_(connection) {\n    connection_.addConnectionCallbacks(*this);\n  }\n\n  Common::CallbackHandle* addDisconnectCallback(DisconnectCallback callback) {\n    absl::MutexLock lock(&lock_);\n    return disconnect_callback_manager_.add(callback);\n  }\n\n  // Avoid directly removing by caller, since CallbackManager is not thread safe.\n  void removeDisconnectCallback(Common::CallbackHandle* handle) {\n    absl::MutexLock lock(&lock_);\n    handle->remove();\n  }\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override {\n    // Throughout this entire function, we know that the connection_ cannot disappear, since this\n    // callback is invoked prior to connection_ deferred delete. We also know by locking below,\n    // that elsewhere where we also hold lock_, that the connection cannot disappear inside the\n    // locked scope.\n    absl::MutexLock lock(&lock_);\n    if (event == Network::ConnectionEvent::RemoteClose ||\n        event == Network::ConnectionEvent::LocalClose) {\n      disconnected_ = true;\n      disconnect_callback_manager_.runCallbacks();\n    }\n  }\n\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\n  bool connected() {\n    absl::MutexLock lock(&lock_);\n    return connectedLockHeld();\n  }\n\n  bool connectedLockHeld() {\n    lock_.AssertReaderHeld(); // TODO(mattklein123): This can't be annotated because the lock\n                              // is acquired via the base connection reference. Fix this to\n                              // remove the reference.\n    return !disconnected_;\n  }\n\n  // This provides direct access to the underlying connection, but only to const methods.\n  // Stateful connection related methods should happen on the connection's dispatcher via\n  // executeOnDispatcher.\n  // thread safety violations when crossing between the test thread and FakeUpstream thread.\n  Network::Connection& connection() const { return connection_; }\n\n  // Execute some function on the connection's dispatcher. This involves a cross-thread post and\n  // wait-for-completion. If the connection is disconnected, either prior to post or when the\n  // dispatcher schedules the callback, we silently ignore.\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  executeOnDispatcher(std::function<void(Network::Connection&)> f,\n                      std::chrono::milliseconds timeout = TestUtility::DefaultTimeout,\n                      bool allow_disconnects = true) {\n    absl::MutexLock lock(&lock_);\n    if (disconnected_) {\n      return testing::AssertionSuccess();\n    }\n    bool callback_ready_event = false;\n    bool unexpected_disconnect = false;\n    connection_.dispatcher().post(\n        [this, f, &lock = lock_, &callback_ready_event, &unexpected_disconnect]() -> void {\n          // The use of connected() here, vs. !disconnected_, is because we want to use the lock_\n          // acquisition to briefly serialize. This avoids us entering this completion and issuing\n          // a notifyOne() until the wait() is ready to receive it below.\n          if (connected()) {\n            f(connection_);\n          } else {\n            unexpected_disconnect = true;\n          }\n          absl::MutexLock lock_guard(&lock);\n          callback_ready_event = true;\n        });\n    Event::TestTimeSystem& time_system =\n        dynamic_cast<Event::TestTimeSystem&>(connection_.dispatcher().timeSource());\n    if (!time_system.waitFor(lock_, absl::Condition(&callback_ready_event), timeout)) {\n      return testing::AssertionFailure() << \"Timed out while executing on dispatcher.\";\n    }\n    if (unexpected_disconnect && !allow_disconnects) {\n      ENVOY_LOG_MISC(warn, \"executeOnDispatcher failed due to disconnect\\n\");\n    }\n    return testing::AssertionSuccess();\n  }\n\n  absl::Mutex& lock() { return lock_; }\n\n  void setParented() {\n    absl::MutexLock lock(&lock_);\n    parented_ = true;\n  }\n\nprivate:\n  Network::Connection& connection_;\n  absl::Mutex lock_;\n  Common::CallbackManager<> disconnect_callback_manager_ ABSL_GUARDED_BY(lock_);\n  bool parented_ ABSL_GUARDED_BY(lock_){};\n  bool disconnected_ ABSL_GUARDED_BY(lock_){};\n};\n\nusing SharedConnectionWrapperPtr = std::unique_ptr<SharedConnectionWrapper>;\n\n/**\n * Base class for both fake raw connections and fake HTTP connections.\n */\nclass FakeConnectionBase : public Logger::Loggable<Logger::Id::testing> {\npublic:\n  virtual ~FakeConnectionBase() { ASSERT(initialized_); }\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult close(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  readDisable(bool disable, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForDisconnect(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForHalfClose(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  virtual testing::AssertionResult initialize() {\n    initialized_ = true;\n    return testing::AssertionSuccess();\n  }\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  enableHalfClose(bool enabled, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n  // The same caveats apply here as in SharedConnectionWrapper::connection().\n  Network::Connection& connection() const { return shared_connection_.connection(); }\n  bool connected() const { return shared_connection_.connected(); }\n\nprotected:\n  FakeConnectionBase(SharedConnectionWrapper& shared_connection, Event::TestTimeSystem& time_system)\n      : shared_connection_(shared_connection), lock_(shared_connection.lock()),\n        time_system_(time_system) {}\n\n  SharedConnectionWrapper& shared_connection_;\n  bool initialized_{};\n  absl::Mutex& lock_; // TODO(mattklein123): Use the shared connection lock and figure out better\n                      // guarded by annotations.\n  bool half_closed_ ABSL_GUARDED_BY(lock_){};\n  Event::TestTimeSystem& time_system_;\n};\n\n/**\n * Provides a fake HTTP connection for integration testing.\n */\nclass FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeConnectionBase {\npublic:\n  enum class Type { HTTP1, HTTP2 };\n\n  FakeHttpConnection(FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection,\n                     Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb,\n                     uint32_t max_request_headers_count,\n                     envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n                         headers_with_underscores_action);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForNewStream(Event::Dispatcher& client_dispatcher, FakeStreamPtr& stream,\n                   std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  // Http::ServerConnectionCallbacks\n  Http::RequestDecoder& newStream(Http::ResponseEncoder& response_encoder, bool) override;\n  // Should only be called for HTTP2\n  void onGoAway(Http::GoAwayErrorCode code) override;\n\nprivate:\n  struct ReadFilter : public Network::ReadFilterBaseImpl {\n    ReadFilter(FakeHttpConnection& parent) : parent_(parent) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n      Http::Status status = parent_.codec_->dispatch(data);\n\n      if (Http::isCodecProtocolError(status)) {\n        ENVOY_LOG(debug, \"FakeUpstream dispatch error: {}\", status.message());\n        // We don't do a full stream shutdown like HCM, but just shutdown the\n        // connection for now.\n        read_filter_callbacks_->connection().close(\n            Network::ConnectionCloseType::FlushWriteAndDelay);\n      }\n      return Network::FilterStatus::StopIteration;\n    }\n\n    void\n    initializeReadFilterCallbacks(Network::ReadFilterCallbacks& read_filter_callbacks) override {\n      read_filter_callbacks_ = &read_filter_callbacks;\n    }\n\n    Network::ReadFilterCallbacks* read_filter_callbacks_{};\n    FakeHttpConnection& parent_;\n  };\n\n  const Type type_;\n  Http::ServerConnectionPtr codec_;\n  std::list<FakeStreamPtr> new_streams_ ABSL_GUARDED_BY(lock_);\n  testing::NiceMock<Random::MockRandomGenerator> random_;\n};\n\nusing FakeHttpConnectionPtr = std::unique_ptr<FakeHttpConnection>;\n\n/**\n * Fake raw connection for integration testing.\n */\nclass FakeRawConnection : public FakeConnectionBase {\npublic:\n  FakeRawConnection(SharedConnectionWrapper& shared_connection, Event::TestTimeSystem& time_system)\n      : FakeConnectionBase(shared_connection, time_system) {}\n  using ValidatorFunction = const std::function<bool(const std::string&)>;\n\n  // Writes to data. If data is nullptr, discards the received data.\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForData(uint64_t num_bytes, std::string* data = nullptr,\n              std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  // Wait until data_validator returns true.\n  // example usage:\n  // std::string data;\n  // ASSERT_TRUE(waitForData(FakeRawConnection::waitForInexactMatch(\"foo\"), &data));\n  // EXPECT_EQ(data, \"foobar\");\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForData(const ValidatorFunction& data_validator, std::string* data = nullptr,\n              std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult write(const std::string& data, bool end_stream = false,\n                                 std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult initialize() override {\n    testing::AssertionResult result =\n        shared_connection_.executeOnDispatcher([this](Network::Connection& connection) {\n          connection.addReadFilter(Network::ReadFilterSharedPtr{new ReadFilter(*this)});\n        });\n    if (!result) {\n      return result;\n    }\n    return FakeConnectionBase::initialize();\n  }\n\n  // Creates a ValidatorFunction which returns true when data_to_wait_for is\n  // contained in the incoming data string. Unlike many of Envoy waitFor functions,\n  // it does not expect an exact match, simply the presence of data_to_wait_for.\n  static ValidatorFunction waitForInexactMatch(const char* data_to_wait_for) {\n    return [data_to_wait_for](const std::string& data) -> bool {\n      return data.find(data_to_wait_for) != std::string::npos;\n    };\n  }\n\nprivate:\n  struct ReadFilter : public Network::ReadFilterBaseImpl {\n    ReadFilter(FakeRawConnection& parent) : parent_(parent) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool) override;\n\n    FakeRawConnection& parent_;\n  };\n\n  std::string data_ ABSL_GUARDED_BY(lock_);\n};\n\nusing FakeRawConnectionPtr = std::unique_ptr<FakeRawConnection>;\n\n/**\n * Provides a fake upstream server for integration testing.\n */\nclass FakeUpstream : Logger::Loggable<Logger::Id::testing>,\n                     public Network::FilterChainManager,\n                     public Network::FilterChainFactory {\npublic:\n  // Creates a fake upstream bound to the specified unix domain socket path.\n  FakeUpstream(const std::string& uds_path, FakeHttpConnection::Type type,\n               Event::TestTimeSystem& time_system);\n  // Creates a fake upstream bound to the specified |address|.\n  FakeUpstream(const Network::Address::InstanceConstSharedPtr& address,\n               FakeHttpConnection::Type type, Event::TestTimeSystem& time_system,\n               bool enable_half_close = false, bool udp_fake_upstream = false);\n\n  // Creates a fake upstream bound to INADDR_ANY and the specified |port|.\n  FakeUpstream(uint32_t port, FakeHttpConnection::Type type, Network::Address::IpVersion version,\n               Event::TestTimeSystem& time_system, bool enable_half_close = false);\n  FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, uint32_t port,\n               FakeHttpConnection::Type type, Network::Address::IpVersion version,\n               Event::TestTimeSystem& time_system);\n  ~FakeUpstream() override;\n\n  FakeHttpConnection::Type httpType() { return http_type_; }\n\n  // Returns the new connection via the connection argument.\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult waitForHttpConnection(\n      Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection,\n      std::chrono::milliseconds timeout = TestUtility::DefaultTimeout,\n      uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB,\n      uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT,\n      envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction\n          headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForRawConnection(FakeRawConnectionPtr& connection,\n                       std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n  Network::Address::InstanceConstSharedPtr localAddress() const { return socket_->localAddress(); }\n\n  // Wait for one of the upstreams to receive a connection\n  ABSL_MUST_USE_RESULT\n  static testing::AssertionResult\n  waitForHttpConnection(Event::Dispatcher& client_dispatcher,\n                        std::vector<std::unique_ptr<FakeUpstream>>& upstreams,\n                        FakeHttpConnectionPtr& connection,\n                        std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  // Waits for 1 UDP datagram to be received.\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult\n  waitForUdpDatagram(Network::UdpRecvData& data_to_fill,\n                     std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n\n  // Send a UDP datagram on the fake upstream thread.\n  void sendUdpDatagram(const std::string& buffer,\n                       const Network::Address::InstanceConstSharedPtr& peer);\n\n  // Network::FilterChainManager\n  const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override {\n    return filter_chain_.get();\n  }\n\n  // Network::FilterChainFactory\n  bool\n  createNetworkFilterChain(Network::Connection& connection,\n                           const std::vector<Network::FilterFactoryCb>& filter_factories) override;\n  bool createListenerFilterChain(Network::ListenerFilterManager& listener) override;\n  void createUdpListenerFilterChain(Network::UdpListenerFilterManager& udp_listener,\n                                    Network::UdpReadFilterCallbacks& callbacks) override;\n\n  void setReadDisableOnNewConnection(bool value) { read_disable_on_new_connection_ = value; }\n  Event::TestTimeSystem& timeSystem() { return time_system_; }\n\n  // Stops the dispatcher loop and joins the listening thread.\n  void cleanUp();\n\n  Http::Http1::CodecStats& http1CodecStats() {\n    return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, stats_store_);\n  }\n\n  Http::Http2::CodecStats& http2CodecStats() {\n    return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, stats_store_);\n  }\n\nprotected:\n  Stats::IsolatedStoreImpl stats_store_;\n  const FakeHttpConnection::Type http_type_;\n\nprivate:\n  FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory,\n               Network::SocketPtr&& connection, FakeHttpConnection::Type type,\n               Event::TestTimeSystem& time_system, bool enable_half_close);\n\n  class FakeListenSocketFactory : public Network::ListenSocketFactory {\n  public:\n    FakeListenSocketFactory(Network::SocketSharedPtr socket) : socket_(socket) {}\n\n    // Network::ListenSocketFactory\n    Network::Socket::Type socketType() const override { return socket_->socketType(); }\n\n    const Network::Address::InstanceConstSharedPtr& localAddress() const override {\n      return socket_->localAddress();\n    }\n\n    Network::SocketSharedPtr getListenSocket() override { return socket_; }\n    Network::SocketOptRef sharedSocket() const override { return *socket_; }\n\n  private:\n    Network::SocketSharedPtr socket_;\n  };\n\n  class FakeUpstreamUdpFilter : public Network::UdpListenerReadFilter {\n  public:\n    FakeUpstreamUdpFilter(FakeUpstream& parent, Network::UdpReadFilterCallbacks& callbacks)\n        : UdpListenerReadFilter(callbacks), parent_(parent) {}\n\n    // Network::UdpListenerReadFilter\n    void onData(Network::UdpRecvData& data) override { parent_.onRecvDatagram(data); }\n    void onReceiveError(Api::IoError::IoErrorCode) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }\n\n  private:\n    FakeUpstream& parent_;\n  };\n\n  class FakeListener : public Network::ListenerConfig {\n  public:\n    FakeListener(FakeUpstream& parent)\n        : parent_(parent), name_(\"fake_upstream\"),\n          udp_listener_factory_(std::make_unique<Server::ActiveRawUdpListenerFactory>(1)),\n          udp_writer_factory_(std::make_unique<Network::UdpDefaultWriterFactory>()),\n          udp_listener_worker_router_(1), init_manager_(nullptr) {}\n\n  private:\n    // Network::ListenerConfig\n    Network::FilterChainManager& filterChainManager() override { return parent_; }\n    Network::FilterChainFactory& filterChainFactory() override { return parent_; }\n    Network::ListenSocketFactory& listenSocketFactory() override {\n      return *parent_.socket_factory_;\n    }\n    bool bindToPort() override { return true; }\n    bool handOffRestoredDestinationConnections() const override { return false; }\n    uint32_t perConnectionBufferLimitBytes() const override { return 0; }\n    std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; }\n    bool continueOnListenerFiltersTimeout() const override { return false; }\n    Stats::Scope& listenerScope() override { return parent_.stats_store_; }\n    uint64_t listenerTag() const override { return 0; }\n    const std::string& name() const override { return name_; }\n    Network::ActiveUdpListenerFactory* udpListenerFactory() override {\n      return udp_listener_factory_.get();\n    }\n    Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override {\n      return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_));\n    }\n    Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override {\n      return udp_listener_worker_router_;\n    }\n    Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; }\n    envoy::config::core::v3::TrafficDirection direction() const override {\n      return envoy::config::core::v3::UNSPECIFIED;\n    }\n    const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n      return empty_access_logs_;\n    }\n    ResourceLimit& openConnections() override { return connection_resource_; }\n    uint32_t tcpBacklogSize() const override { return ENVOY_TCP_BACKLOG_SIZE; }\n    Init::Manager& initManager() override { return *init_manager_; }\n\n    void setMaxConnections(const uint32_t num_connections) {\n      connection_resource_.setMax(num_connections);\n    }\n    void clearMaxConnections() { connection_resource_.resetMax(); }\n\n    FakeUpstream& parent_;\n    const std::string name_;\n    Network::NopConnectionBalancerImpl connection_balancer_;\n    const Network::ActiveUdpListenerFactoryPtr udp_listener_factory_;\n    const Network::UdpPacketWriterFactoryPtr udp_writer_factory_;\n    Network::UdpListenerWorkerRouterImpl udp_listener_worker_router_;\n    BasicResourceLimitImpl connection_resource_;\n    const std::vector<AccessLog::InstanceSharedPtr> empty_access_logs_;\n    std::unique_ptr<Init::Manager> init_manager_;\n  };\n\n  void threadRoutine();\n  SharedConnectionWrapper& consumeConnection() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);\n  void onRecvDatagram(Network::UdpRecvData& data);\n\n  Network::SocketSharedPtr socket_;\n  Network::ListenSocketFactorySharedPtr socket_factory_;\n  ConditionalInitializer server_initialized_;\n  // Guards any objects which can be altered both in the upstream thread and the\n  // main test thread.\n  absl::Mutex lock_;\n  Thread::ThreadPtr thread_;\n  Api::ApiPtr api_;\n  Event::TestTimeSystem& time_system_;\n  Event::DispatcherPtr dispatcher_;\n  Network::ConnectionHandlerPtr handler_;\n  std::list<SharedConnectionWrapperPtr> new_connections_ ABSL_GUARDED_BY(lock_);\n  // When a QueuedConnectionWrapper is popped from new_connections_, ownership is transferred to\n  // consumed_connections_. This allows later the Connection destruction (when the FakeUpstream is\n  // deleted) on the same thread that allocated the connection.\n  std::list<SharedConnectionWrapperPtr> consumed_connections_ ABSL_GUARDED_BY(lock_);\n  bool read_disable_on_new_connection_;\n  const bool enable_half_close_;\n  FakeListener listener_;\n  const Network::FilterChainSharedPtr filter_chain_;\n  std::list<Network::UdpRecvData> received_datagrams_ ABSL_GUARDED_BY(lock_);\n  Http::Http1::CodecStats::AtomicPtr http1_codec_stats_;\n  Http::Http2::CodecStats::AtomicPtr http2_codec_stats_;\n};\n\nusing FakeUpstreamPtr = std::unique_ptr<FakeUpstream>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filter_manager_integration_test.cc",
    "content": "#include <regex>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n\n#include \"test/integration/filter_manager_integration_test.pb.h\"\n#include \"test/integration/filter_manager_integration_test.pb.validate.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n/**\n * Basic traffic throttler that emits a next chunk of the original request/response data\n * on timer tick.\n */\nclass Throttler {\npublic:\n  Throttler(Event::Dispatcher& dispatcher, std::chrono::milliseconds tick_interval,\n            uint64_t max_chunk_length, std::function<void(Buffer::Instance&, bool)> next_chunk_cb)\n      : timer_(dispatcher.createTimer([this] { onTimerTick(); })), tick_interval_(tick_interval),\n        max_chunk_length_(max_chunk_length), next_chunk_cb_(next_chunk_cb) {}\n\n  /**\n   * Throttle given given request/response data.\n   */\n  void throttle(Buffer::Instance& data, bool end_stream);\n  /**\n   * Cancel any scheduled activities (on connection close).\n   */\n  void reset();\n\nprivate:\n  void onTimerTick();\n\n  Buffer::OwnedImpl buffer_{};\n  bool end_stream_{};\n\n  const Event::TimerPtr timer_;\n  const std::chrono::milliseconds tick_interval_;\n  const uint64_t max_chunk_length_;\n  const std::function<void(Buffer::Instance&, bool)> next_chunk_cb_;\n};\n\nvoid Throttler::throttle(Buffer::Instance& data, bool end_stream) {\n  buffer_.move(data);\n  end_stream_ |= end_stream;\n  if (!timer_->enabled()) {\n    timer_->enableTimer(tick_interval_);\n  }\n}\n\nvoid Throttler::reset() { timer_->disableTimer(); }\n\nvoid Throttler::onTimerTick() {\n  Buffer::OwnedImpl next_chunk{};\n  if (0 < buffer_.length()) {\n    auto chunk_length = max_chunk_length_ < buffer_.length() ? max_chunk_length_ : buffer_.length();\n    next_chunk.move(buffer_, chunk_length);\n  }\n  bool end_stream = end_stream_ && 0 == buffer_.length();\n  if (0 < buffer_.length()) {\n    timer_->enableTimer(tick_interval_);\n  }\n  next_chunk_cb_(next_chunk, end_stream);\n}\n\n/**\n * Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain()\n * and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of a timer\n * callback.\n *\n * Emits a next chunk of the original request/response data on timer tick.\n */\nclass ThrottlerFilter : public Network::Filter, public Network::ConnectionCallbacks {\npublic:\n  ThrottlerFilter(std::chrono::milliseconds tick_interval, uint64_t max_chunk_length)\n      : tick_interval_(tick_interval), max_chunk_length_(max_chunk_length) {}\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n    read_callbacks_->connection().addConnectionCallbacks(*this);\n\n    read_throttler_ = std::make_unique<Throttler>(\n        read_callbacks_->connection().dispatcher(), tick_interval_, max_chunk_length_,\n        [this](Buffer::Instance& data, bool end_stream) {\n          read_callbacks_->injectReadDataToFilterChain(data, end_stream);\n        });\n  }\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override;\n  void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override {\n    write_callbacks_ = &callbacks;\n\n    write_throttler_ = std::make_unique<Throttler>(\n        write_callbacks_->connection().dispatcher(), tick_interval_, max_chunk_length_,\n        [this](Buffer::Instance& data, bool end_stream) {\n          write_callbacks_->injectWriteDataToFilterChain(data, end_stream);\n        });\n  }\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\nprivate:\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  Network::WriteFilterCallbacks* write_callbacks_{};\n\n  std::unique_ptr<Throttler> read_throttler_;\n  std::unique_ptr<Throttler> write_throttler_;\n\n  const std::chrono::milliseconds tick_interval_;\n  const uint64_t max_chunk_length_;\n};\n\n// Network::ReadFilter\nNetwork::FilterStatus ThrottlerFilter::onNewConnection() { return Network::FilterStatus::Continue; }\n\nNetwork::FilterStatus ThrottlerFilter::onData(Buffer::Instance& data, bool end_stream) {\n  read_throttler_->throttle(data, end_stream);\n  ASSERT(data.length() == 0);\n  return Network::FilterStatus::StopIteration;\n}\n\n// Network::WriteFilter\nNetwork::FilterStatus ThrottlerFilter::onWrite(Buffer::Instance& data, bool end_stream) {\n  write_throttler_->throttle(data, end_stream);\n  ASSERT(data.length() == 0);\n  return Network::FilterStatus::StopIteration;\n}\n\n// Network::ConnectionCallbacks\nvoid ThrottlerFilter::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    read_throttler_->reset();\n    write_throttler_->reset();\n  }\n}\n\n/**\n * Config factory for ThrottlerFilter.\n */\nclass ThrottlerFilterConfigFactory : public Extensions::NetworkFilters::Common::FactoryBase<\n                                         test::integration::filter_manager::Throttler> {\npublic:\n  explicit ThrottlerFilterConfigFactory(const std::string& name) : FactoryBase(name) {}\n\nprivate:\n  Network::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const test::integration::filter_manager::Throttler& proto_config,\n      Server::Configuration::FactoryContext&) override {\n    return [proto_config](Network::FilterManager& filter_manager) -> void {\n      filter_manager.addFilter(std::make_shared<ThrottlerFilter>(\n          std::chrono::milliseconds(proto_config.tick_interval_ms()),\n          proto_config.max_chunk_length()));\n    };\n  }\n};\n\n/**\n * Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain()\n * and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of\n * ReadFilter::onData() and WriteFilter::onWrite().\n *\n * Calls ReadFilterCallbacks::injectReadDataToFilterChain() /\n * WriteFilterCallbacks::injectWriteDataToFilterChain() to pass data to the next filter\n * byte-by-byte.\n */\nclass DispenserFilter : public Network::Filter {\npublic:\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n  Network::FilterStatus onNewConnection() override;\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n  }\n\n  // Network::WriteFilter\n  Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override;\n  void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override {\n    write_callbacks_ = &callbacks;\n  }\n\nprivate:\n  // Pass data to the next filter byte-by-byte.\n  void dispense(Buffer::Instance& data, bool end_stream,\n                std::function<void(Buffer::Instance&, bool)> next_chunk_cb);\n\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  Network::WriteFilterCallbacks* write_callbacks_{};\n};\n\n// Network::ReadFilter\nNetwork::FilterStatus DispenserFilter::onNewConnection() { return Network::FilterStatus::Continue; }\n\nNetwork::FilterStatus DispenserFilter::onData(Buffer::Instance& data, bool end_stream) {\n  dispense(data, end_stream, [this](Buffer::Instance& data, bool end_stream) {\n    read_callbacks_->injectReadDataToFilterChain(data, end_stream);\n  });\n  ASSERT(data.length() == 0);\n  return Network::FilterStatus::StopIteration;\n}\n\n// Network::WriteFilter\nNetwork::FilterStatus DispenserFilter::onWrite(Buffer::Instance& data, bool end_stream) {\n  dispense(data, end_stream, [this](Buffer::Instance& data, bool end_stream) {\n    write_callbacks_->injectWriteDataToFilterChain(data, end_stream);\n  });\n  ASSERT(data.length() == 0);\n  return Network::FilterStatus::StopIteration;\n}\n\n// Pass data to the next filter byte-by-byte.\nvoid DispenserFilter::dispense(Buffer::Instance& data, bool end_stream,\n                               std::function<void(Buffer::Instance&, bool)> next_chunk_cb) {\n  Buffer::OwnedImpl next_chunk{};\n  do {\n    if (0 < data.length()) {\n      next_chunk.move(data, 1);\n    }\n    next_chunk_cb(next_chunk, end_stream && 0 == data.length());\n    next_chunk.drain(next_chunk.length());\n  } while (0 < data.length());\n}\n\n/**\n * Config factory for DispenserFilter.\n */\nclass DispenserFilterConfigFactory : public Server::Configuration::NamedNetworkFilterConfigFactory {\npublic:\n  explicit DispenserFilterConfigFactory(const std::string& name) : name_(name) {}\n\n  // NamedNetworkFilterConfigFactory\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&,\n                               Server::Configuration::FactoryContext&) override {\n    return [](Network::FilterManager& filter_manager) -> void {\n      filter_manager.addFilter(std::make_shared<DispenserFilter>());\n    };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n\n  std::string name() const override { return name_; }\n\nprivate:\n  const std::string name_;\n};\n\n// Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain()\n// and WriteFilterCallbacks::injectWriteDataToFilterChain() methods outside of the context of\n// ReadFilter::onData() and WriteFilter::onWrite(), i.e. on timer event\nconst char inject_data_outside_callback_filter[] = \"inject-data-outside-filter-callback\";\n\n// Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain()\n// and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of\n// ReadFilter::onData() and WriteFilter::onWrite()\nconst char inject_data_inside_callback_filter[] = \"inject-data-inside-filter-callback\";\n\n// Do not use ReadFilterCallbacks::injectReadDataToFilterChain() and\n// WriteFilterCallbacks::injectWriteDataToFilterChain() methods at all\nconst char no_inject_data[] = \"no-inject-data\";\n\n// List of auxiliary filters to test against\nconst std::vector<std::string> auxiliary_filters() {\n  return {inject_data_outside_callback_filter, inject_data_inside_callback_filter, no_inject_data};\n}\n\n// Used to pretty print test parameters\nconst std::regex invalid_param_name_regex() { return std::regex{\"[^a-zA-Z0-9_]\"}; }\n\n/**\n * Integration test with one of auxiliary filters (listed above)\n * added to the head of the filter chain.\n *\n * Shared by tests for \"envoy.filters.network.echo\", \"envoy.filters.network.tcp_proxy\" and\n * \"envoy.filters.network.http_connection_manager\".\n */\nclass TestWithAuxiliaryFilter {\npublic:\n  explicit TestWithAuxiliaryFilter(const std::string& auxiliary_filter_name)\n      : auxiliary_filter_name_(auxiliary_filter_name) {}\n\n  virtual ~TestWithAuxiliaryFilter() = default;\n\nprotected:\n  /**\n   * Returns configuration for a given auxiliary filter.\n   *\n   * Assuming that representative configurations differ in the context of\n   * \"envoy.filters.network.echo\", \"envoy.filters.network.tcp_proxy\" and\n   * \"envoy.filters.network.http_connection_manager\".\n   */\n  virtual std::string filterConfig(const std::string& auxiliary_filter_name) PURE;\n\n  /**\n   * Adds an auxiliary filter to the head of the filter chain.\n   * @param config_helper helper object.\n   */\n  void addAuxiliaryFilter(ConfigHelper& config_helper) {\n    if (auxiliary_filter_name_ == no_inject_data) {\n      // we want to run the same test on unmodified filter chain and observe identical behaviour\n      return;\n    }\n    addNetworkFilter(config_helper, fmt::format(R\"EOF(\n      name: {}\n    )EOF\",\n                                                auxiliary_filter_name_) +\n                                        filterConfig(auxiliary_filter_name_));\n    // double-check the filter was actually added\n    config_helper.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      ASSERT_EQ(auxiliary_filter_name_,\n                bootstrap.static_resources().listeners(0).filter_chains(0).filters(0).name());\n    });\n  }\n\n  /**\n   * Add a network filter prior to existing filters.\n   * @param config_helper helper object.\n   * @param filter_yaml configuration snippet.\n   */\n  void addNetworkFilter(ConfigHelper& config_helper, const std::string& filter_yaml) {\n    config_helper.addConfigModifier(\n        [filter_yaml](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n          ASSERT_GT(bootstrap.mutable_static_resources()->listeners_size(), 0);\n          auto l = bootstrap.mutable_static_resources()->mutable_listeners(0);\n          ASSERT_GT(l->filter_chains_size(), 0);\n\n          auto* filter_chain = l->mutable_filter_chains(0);\n          auto* filter_list_back = filter_chain->add_filters();\n          TestUtility::loadFromYaml(filter_yaml, *filter_list_back);\n\n          // Now move it to the front.\n          for (int i = filter_chain->filters_size() - 1; i > 0; --i) {\n            filter_chain->mutable_filters()->SwapElements(i, i - 1);\n          }\n        });\n  }\n\nprivate:\n  // one of auxiliary filters (listed at the top)\n  const std::string auxiliary_filter_name_;\n\n  // Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain()\n  // and WriteFilterCallbacks::injectWriteDataToFilterChain() methods outside of the context of\n  // ReadFilter::onData() and WriteFilter::onWrite(), i.e. on timer event\n  ThrottlerFilterConfigFactory outside_callback_config_factory_{\n      inject_data_outside_callback_filter};\n  Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory>\n      registered_outside_callback_config_factory_{outside_callback_config_factory_};\n\n  // Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain()\n  // and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of\n  // ReadFilter::onData() and WriteFilter::onWrite()\n  DispenserFilterConfigFactory inside_callback_config_factory_{inject_data_inside_callback_filter};\n  Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory>\n      registered_inside_callback_config_factory_{inside_callback_config_factory_};\n};\n\n/**\n * Base class for \"envoy.filters.network.echo\" and \"envoy.filters.network.tcp_proxy\" tests.\n *\n * Inherits from BaseIntegrationTest; parameterized with IP version and auxiliary filter.\n */\nclass InjectDataToFilterChainIntegrationTest\n    : public testing::TestWithParam<std::tuple<Network::Address::IpVersion, std::string>>,\n      public BaseIntegrationTest,\n      public TestWithAuxiliaryFilter {\npublic:\n  // Allows pretty printed test names of the form\n  // FooTestCase.BarInstance/IPv4_no_inject_data\n  static std::string testParamsToString(\n      const testing::TestParamInfo<std::tuple<Network::Address::IpVersion, std::string>>& params) {\n    return fmt::format(\n        \"{}_{}\",\n        TestUtility::ipTestParamsToString(testing::TestParamInfo<Network::Address::IpVersion>(\n            std::get<0>(params.param), params.index)),\n        std::regex_replace(std::get<1>(params.param), invalid_param_name_regex(), \"_\"));\n  }\n\n  explicit InjectDataToFilterChainIntegrationTest(const std::string& config)\n      : BaseIntegrationTest(std::get<0>(GetParam()), config),\n        TestWithAuxiliaryFilter(std::get<1>(GetParam())) {}\n\n  void SetUp() override { addAuxiliaryFilter(config_helper_); }\n\nprotected:\n  // Returns configuration for a given auxiliary filter\n  std::string filterConfig(const std::string& auxiliary_filter_name) override {\n    return auxiliary_filter_name == inject_data_outside_callback_filter ? R\"EOF(\n      typed_config:\n        \"@type\": type.googleapis.com/test.integration.filter_manager.Throttler\n        tick_interval_ms: 1\n        max_chunk_length: 5\n    )EOF\"\n                                                                        : \"\";\n  }\n};\n\n/**\n * Integration test with an auxiliary filter in front of \"envoy.filters.network.echo\".\n */\nclass InjectDataWithEchoFilterIntegrationTest : public InjectDataToFilterChainIntegrationTest {\npublic:\n  static std::string echoConfig() {\n    return absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n      - name: envoy.filters.network.echo\n      )EOF\");\n  }\n\n  InjectDataWithEchoFilterIntegrationTest()\n      : InjectDataToFilterChainIntegrationTest(echoConfig()) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    Params, InjectDataWithEchoFilterIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                     testing::ValuesIn(auxiliary_filters())),\n    InjectDataToFilterChainIntegrationTest::testParamsToString);\n\nTEST_P(InjectDataWithEchoFilterIntegrationTest, UsageOfInjectDataMethodsShouldBeUnnoticeable) {\n  initialize();\n\n  auto tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  tcp_client->waitForData(\"hello\");\n\n  tcp_client->close();\n}\n\nTEST_P(InjectDataWithEchoFilterIntegrationTest, FilterChainMismatch) {\n  useListenerAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    bootstrap.mutable_static_resources()\n        ->mutable_listeners(0)\n        ->mutable_filter_chains(0)\n        ->mutable_filter_chain_match()\n        ->set_transport_protocol(\"tls\");\n  });\n  initialize();\n\n  auto tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\", false, false));\n\n  std::string access_log =\n      absl::StrCat(\"NR \", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound);\n  EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr(access_log));\n  tcp_client->waitForDisconnect();\n}\n\n/**\n * Integration test with an auxiliary filter in front of \"envoy.filters.network.tcp_proxy\".\n */\nclass InjectDataWithTcpProxyFilterIntegrationTest : public InjectDataToFilterChainIntegrationTest {\npublic:\n  InjectDataWithTcpProxyFilterIntegrationTest()\n      : InjectDataToFilterChainIntegrationTest(ConfigHelper::tcpProxyConfig()) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    Params, InjectDataWithTcpProxyFilterIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                     testing::ValuesIn(auxiliary_filters())),\n    InjectDataToFilterChainIntegrationTest::testParamsToString);\n\nTEST_P(InjectDataWithTcpProxyFilterIntegrationTest, UsageOfInjectDataMethodsShouldBeUnnoticeable) {\n  enable_half_close_ = true;\n  initialize();\n\n  auto tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n\n  std::string observed_data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5, &observed_data));\n  EXPECT_EQ(\"hello\", observed_data);\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"hi\"));\n  tcp_client->waitForData(\"hi\");\n\n  ASSERT_TRUE(tcp_client->write(\" world!\", true));\n  observed_data.clear();\n  ASSERT_TRUE(fake_upstream_connection->waitForData(12, &observed_data));\n  EXPECT_EQ(\"hello world!\", observed_data);\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"there!\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  tcp_client->waitForData(\"there!\");\n  tcp_client->waitForDisconnect();\n}\n\n/**\n * Integration test with an auxiliary filter in front of\n * \"envoy.filters.network.http_connection_manager\".\n *\n * Inherits from HttpIntegrationTest;\n * parameterized with IP version, downstream HTTP version and auxiliary filter.\n */\nclass InjectDataWithHttpConnectionManagerIntegrationTest\n    : public testing::TestWithParam<\n          std::tuple<Network::Address::IpVersion, Http::CodecClient::Type, std::string>>,\n      public HttpIntegrationTest,\n      public TestWithAuxiliaryFilter {\npublic:\n  // Allows pretty printed test names of the form\n  // FooTestCase.BarInstance/IPv4_Http_no_inject_data\n  static std::string testParamsToString(\n      const testing::TestParamInfo<\n          std::tuple<Network::Address::IpVersion, Http::CodecClient::Type, std::string>>& params) {\n    return fmt::format(\n        \"{}_{}_{}\",\n        TestUtility::ipTestParamsToString(testing::TestParamInfo<Network::Address::IpVersion>(\n            std::get<0>(params.param), params.index)),\n        (std::get<1>(params.param) == Http::CodecClient::Type::HTTP2 ? \"Http2\" : \"Http\"),\n        std::regex_replace(std::get<2>(params.param), invalid_param_name_regex(), \"_\"));\n  }\n\n  InjectDataWithHttpConnectionManagerIntegrationTest()\n      : HttpIntegrationTest(std::get<1>(GetParam()), std::get<0>(GetParam())),\n        TestWithAuxiliaryFilter(std::get<2>(GetParam())) {}\n\n  void SetUp() override { addAuxiliaryFilter(config_helper_); }\n\n  void TearDown() override {\n    // already cleaned up in ~HttpIntegrationTest()\n  }\n\nprotected:\n  // Returns configuration for a given auxiliary filter\n  std::string filterConfig(const std::string& auxiliary_filter_name) override {\n    return auxiliary_filter_name == inject_data_outside_callback_filter ? R\"EOF(\n      typed_config:\n        \"@type\": type.googleapis.com/test.integration.filter_manager.Throttler\n        tick_interval_ms: 1\n        max_chunk_length: 10\n    )EOF\"\n                                                                        : \"\";\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    Params, InjectDataWithHttpConnectionManagerIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                     testing::Values(Http::CodecClient::Type::HTTP1,\n                                     Http::CodecClient::Type::HTTP2),\n                     testing::ValuesIn(auxiliary_filters())),\n    InjectDataWithHttpConnectionManagerIntegrationTest::testParamsToString);\n\nTEST_P(InjectDataWithHttpConnectionManagerIntegrationTest,\n       UsageOfInjectDataMethodsShouldBeUnnoticeable) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl headers{\n      {\":method\", \"POST\"}, {\":path\", \"/api\"}, {\":authority\", \"host\"}, {\":scheme\", \"http\"}};\n  auto response = codec_client_->makeRequestWithBody(headers, \"hello!\");\n\n  waitForNextUpstreamRequest();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(\"hello!\", upstream_request_->body().toString());\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  Buffer::OwnedImpl response_data{\"greetings\"};\n  upstream_request_->encodeData(response_data, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"greetings\", response->body());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filter_manager_integration_test.proto",
    "content": "syntax = \"proto3\";\n\npackage test.integration.filter_manager;\n\nmessage Throttler {\n  int32 tick_interval_ms = 1;\n  int64 max_chunk_length = 2;\n}\n"
  },
  {
    "path": "test/integration/filters/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"add_body_filter_config_lib\",\n    srcs = [\n        \"add_body_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"local_reply_during_encoding_filter_lib\",\n    srcs = [\n        \"local_reply_during_encoding_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"continue_headers_only_inject_body\",\n    srcs = [\n        \"continue_headers_only_inject_body_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"wait_for_whole_request_and_response_config_lib\",\n    srcs = [\n        \"wait_for_whole_request_and_response.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"add_trailers_filter_config_lib\",\n    srcs = [\n        \"add_trailers_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"backpressure_filter_config_lib\",\n    srcs = [\n        \"backpressure_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"clear_route_cache_filter_lib\",\n    srcs = [\n        \"clear_route_cache_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"eds_ready_filter_config_lib\",\n    srcs = [\n        \"eds_ready_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/stats:symbol_table_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"modify_buffer_filter_config_lib\",\n    srcs = [\n        \"modify_buffer_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"passthrough_filter_config_lib\",\n    srcs = [\n        \"passthrough_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"headers_only_filter_config_lib\",\n    srcs = [\n        \"headers_only_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"pause_filter_lib\",\n    srcs = [\n        \"pause_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/network:connection_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"process_context_lib\",\n    srcs = [\n        \"process_context_filter.cc\",\n    ],\n    hdrs = [\n        \"process_context_filter.h\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:process_context_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"set_response_code_filter_lib\",\n    srcs = [\n        \"set_response_code_filter.cc\",\n    ],\n    deps = [\n        \":set_response_code_filter_config_proto_cc_proto\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//source/extensions/filters/http/common:factory_base_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"set_response_code_filter_config_proto\",\n    srcs = [\":set_response_code_filter_config.proto\"],\n)\n\nenvoy_cc_test_library(\n    name = \"stop_iteration_and_continue\",\n    srcs = [\n        \"stop_iteration_and_continue_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/network:connection_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"common_lib\",\n    hdrs = [\n        \"common.h\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"request_metadata_filter_config_lib\",\n    srcs = [\n        \"request_metadata_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"random_pause_filter_lib\",\n    srcs = [\n        \"random_pause_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//source/common/network:connection_lib\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"response_metadata_filter_config_lib\",\n    srcs = [\n        \"response_metadata_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"decode_headers_return_stop_all_filter_config_lib\",\n    srcs = [\n        \"decode_headers_return_stop_all_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"call_decodedata_once_filter_config_lib\",\n    srcs = [\n        \"call_decodedata_once_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"encode_headers_return_stop_all_filter_config_lib\",\n    srcs = [\n        \"encode_headers_return_stop_all_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"metadata_stop_all_filter_config_lib\",\n    srcs = [\n        \"metadata_stop_all_filter.cc\",\n    ],\n    deps = [\n        \":common_lib\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"encoder_decoder_buffer_filter_lib\",\n    srcs = [\n        \"encoder_decoder_buffer_filter.cc\",\n    ],\n    deps = [\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/registry\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/extensions/filters/http/common:pass_through_filter_lib\",\n        \"//test/extensions/filters/http/common:empty_http_filter_config_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_socket_interface_lib\",\n    srcs = [\n        \"test_socket_interface.cc\",\n    ],\n    hdrs = [\n        \"test_socket_interface.h\",\n    ],\n    deps = [\n        \"//include/envoy/network:socket_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:default_socket_interface_lib\",\n        \"@com_google_absl//absl/types:optional\",\n        \"@envoy_api//envoy/extensions/network/socket_interface/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/integration/filters/add_body_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\nnamespace Envoy {\n\n// A test filter that inserts body to a header only request/response.\nclass AddBodyStreamFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"add-body-filter\";\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers,\n                                          bool end_stream) override {\n    if (end_stream) {\n      Buffer::OwnedImpl body(\"body\");\n      headers.setContentLength(body.length());\n      decoder_callbacks_->addDecodedData(body, false);\n    }\n\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers,\n                                          bool end_stream) override {\n    if (end_stream) {\n      Buffer::OwnedImpl body(\"body\");\n      headers.setContentLength(body.length());\n      encoder_callbacks_->addEncodedData(body, false);\n    }\n\n    return Http::FilterHeadersStatus::Continue;\n  }\n};\n\nconstexpr char AddBodyStreamFilter::name[];\n\nstatic Registry::RegisterFactory<SimpleFilterConfig<AddBodyStreamFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    encoder_register_;\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/add_trailers_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A test filter that inserts trailers at the end of encode/decode\nclass AddTrailersStreamFilter : public Http::PassThroughFilter {\npublic:\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool end_stream) override {\n    if (end_stream) {\n      decoder_callbacks_->addDecodedTrailers().addCopy(Http::LowerCaseString(\"grpc-message\"),\n                                                       \"decode\");\n    }\n\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool end_stream) override {\n    if (end_stream) {\n      encoder_callbacks_->addEncodedTrailers().setGrpcMessage(\"encode\");\n    }\n\n    return Http::FilterDataStatus::Continue;\n  }\n};\n\nclass AddTrailersStreamFilterConfig\n    : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  AddTrailersStreamFilterConfig() : EmptyHttpFilterConfig(\"add-trailers-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::AddTrailersStreamFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<AddTrailersStreamFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/backpressure_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A filter that buffers the entire request/response, then doubles\n// the content of the filter buffer.\nclass BackpressureFilter : public Http::PassThroughFilter {\npublic:\n  void onDestroy() override { decoder_callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); }\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override {\n    decoder_callbacks_->onDecoderFilterAboveWriteBufferHighWatermark();\n    return Http::FilterHeadersStatus::Continue;\n  }\n};\n\nclass BackpressureConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  BackpressureConfig() : EmptyHttpFilterConfig(\"backpressure-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::BackpressureFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<BackpressureConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/call_decodedata_once_filter.cc",
    "content": "#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\n// A filter that only allows decodeData() to be called once with fixed data length.\nclass CallDecodeDataOnceFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"call-decodedata-once-filter\";\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& header_map, bool) override {\n    const Http::HeaderEntry* entry_content =\n        header_map.get(Envoy::Http::LowerCaseString(\"content_size\"));\n    const Http::HeaderEntry* entry_added =\n        header_map.get(Envoy::Http::LowerCaseString(\"added_size\"));\n    ASSERT(entry_content != nullptr && entry_added != nullptr);\n    content_size_ = std::stoul(std::string(entry_content->value().getStringView()));\n    added_size_ = std::stoul(std::string(entry_added->value().getStringView()));\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool) override {\n    // Request data length (size 5000) + data from addDecodedData() called in dataDecode (size 1).\n    // Or data from addDecodedData() called in dataTrailers (size 1)\n    EXPECT_TRUE(data.length() == content_size_ + added_size_ || data.length() == added_size_);\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n\nprivate:\n  size_t content_size_ = 0;\n  size_t added_size_ = 0;\n};\n\nconstexpr char CallDecodeDataOnceFilter::name[];\nstatic Registry::RegisterFactory<SimpleFilterConfig<CallDecodeDataOnceFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/clear_route_cache_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A test filter that clears the route cache on creation\nclass ClearRouteCacheFilter : public Http::PassThroughFilter {\npublic:\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    callbacks.clearRouteCache();\n    Http::PassThroughFilter::setDecoderFilterCallbacks(callbacks);\n  }\n};\n\nclass ClearRouteCacheFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  ClearRouteCacheFilterConfig() : EmptyHttpFilterConfig(\"clear-route-cache\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::ClearRouteCacheFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<ClearRouteCacheFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/common.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// DRYs up the creation of a simple filter config for a filter that requires no config.\ntemplate <class T>\nclass SimpleFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  SimpleFilterConfig() : EmptyHttpFilterConfig(T::name) {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<T>());\n    };\n  }\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/continue_headers_only_inject_body_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\nnamespace Envoy {\n\n// A test filter that continues iteration of headers-only request/response without ending the\n// stream, then injects a body later.\nclass ContinueHeadersOnlyInjectBodyFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"continue-headers-only-inject-body-filter\";\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override {\n    headers.setContentLength(body_.length());\n    decoder_callbacks_->dispatcher().post([this]() -> void {\n      Buffer::OwnedImpl buffer(body_);\n      decoder_callbacks_->injectDecodedDataToFilterChain(buffer, true);\n    });\n    return Http::FilterHeadersStatus::ContinueAndDontEndStream;\n  }\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool) override {\n    headers.setContentLength(body_.length());\n    encoder_callbacks_->dispatcher().post([this]() -> void {\n      Buffer::OwnedImpl buffer(body_);\n      encoder_callbacks_->injectEncodedDataToFilterChain(buffer, true);\n    });\n    return Http::FilterHeadersStatus::ContinueAndDontEndStream;\n  }\n\nprivate:\n  constexpr static absl::string_view body_ = \"body\";\n};\n\nstatic Registry::RegisterFactory<SimpleFilterConfig<ContinueHeadersOnlyInjectBodyFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/decode_headers_return_stop_all_filter.cc",
    "content": "#include <chrono>\n#include <string>\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\n// A filter returns StopAllIterationAndBuffer or StopAllIterationAndWatermark for headers. How the\n// filter acts depends on the headers received.\nclass DecodeHeadersReturnStopAllFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"decode-headers-return-stop-all-filter\";\n\n  // Returns Http::FilterHeadersStatus::StopAllIterationAndBuffer or\n  // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to\n  // continue iteration after 5s.\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& header_map, bool) override {\n    const Http::HeaderEntry* entry_content =\n        header_map.get(Envoy::Http::LowerCaseString(\"content_size\"));\n    const Http::HeaderEntry* entry_added =\n        header_map.get(Envoy::Http::LowerCaseString(\"added_size\"));\n    ASSERT(entry_content != nullptr && entry_added != nullptr);\n    content_size_ = std::stoul(std::string(entry_content->value().getStringView()));\n    added_size_ = std::stoul(std::string(entry_added->value().getStringView()));\n    const Http::HeaderEntry* entry_is_first_trigger =\n        header_map.get(Envoy::Http::LowerCaseString(\"is_first_trigger\"));\n    is_first_trigger_ = entry_is_first_trigger != nullptr;\n    // Remove \"first_trigger\" headers so that if the filter is registered twice in a filter chain,\n    // it would act differently.\n    header_map.remove(Http::LowerCaseString(\"is_first_trigger\"));\n\n    createTimerForContinue();\n\n    const Http::HeaderEntry* entry_buffer =\n        header_map.get(Envoy::Http::LowerCaseString(\"buffer_limit\"));\n    if (entry_buffer == nullptr || !is_first_trigger_) {\n      return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n    } else {\n      watermark_enabled_ = true;\n      buffer_limit_ = std::stoul(std::string(entry_buffer->value().getStringView()));\n      decoder_callbacks_->setDecoderBufferLimit(buffer_limit_);\n      header_map.remove(Http::LowerCaseString(\"buffer_limit\"));\n      return Http::FilterHeadersStatus::StopAllIterationAndWatermark;\n    }\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool) override {\n    ASSERT(timer_triggered_);\n    if (is_first_trigger_) {\n      if (watermark_enabled_) {\n        // High watermark reached before all data are received. The rest of the data is sent after\n        // iteration resumes.\n        EXPECT_LT(data.length(), content_size_);\n      } else {\n        // decodeData will only be called once after iteration resumes.\n        EXPECT_EQ(data.length(), content_size_);\n      }\n      Buffer::OwnedImpl added_data(std::string(added_size_, 'a'));\n      decoder_callbacks_->addDecodedData(added_data, false);\n    } else {\n      EXPECT_TRUE(data.length() == content_size_ + added_size_ ||\n                  data.length() == content_size_ + added_size_ * 2);\n    }\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    ASSERT(timer_triggered_);\n    if (is_first_trigger_) {\n      Buffer::OwnedImpl data(std::string(added_size_, 'a'));\n      decoder_callbacks_->addDecodedData(data, false);\n    }\n    return Http::FilterTrailersStatus::Continue;\n  }\n\nprivate:\n  // Creates a timer to continue iteration after conditions meet.\n  void createTimerForContinue() {\n    delay_timer_ = decoder_callbacks_->dispatcher().createTimer([this]() -> void {\n      // If decodeHeaders() returns StopAllIterationAndBuffer, triggers the timer when all the\n      // request data has been received. If decodeHeaders() returns StopAllIterationAndWatermark,\n      // triggers the timer when received data exceed buffer limit.\n      if ((content_size_ > 0 &&\n           decoder_callbacks_->streamInfo().bytesReceived() >= content_size_) ||\n          (watermark_enabled_ && buffer_limit_ > 0 &&\n           decoder_callbacks_->streamInfo().bytesReceived() >= buffer_limit_)) {\n        timer_triggered_ = true;\n        decoder_callbacks_->continueDecoding();\n      } else {\n        // Create a new timer to try again later.\n        createTimerForContinue();\n      }\n    });\n    delay_timer_->enableTimer(std::chrono::milliseconds(500));\n  }\n\n  Event::TimerPtr delay_timer_;\n  bool timer_triggered_ = false;\n  size_t content_size_ = 0;\n  size_t added_size_ = 0;\n  size_t buffer_limit_ = 0;\n  bool watermark_enabled_ = false;\n  bool is_first_trigger_ = false;\n};\n\nconstexpr char DecodeHeadersReturnStopAllFilter::name[];\nstatic Registry::RegisterFactory<SimpleFilterConfig<DecodeHeadersReturnStopAllFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/eds_ready_filter.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A test filter that rejects all requests if EDS isn't healthy yet, and\n// responds OK to all requests if it is.\nclass EdsReadyFilter : public Http::PassThroughFilter {\npublic:\n  EdsReadyFilter(const Stats::Scope& root_scope, Stats::SymbolTable& symbol_table)\n      : root_scope_(root_scope), stat_name_(\"cluster.cluster_0.membership_healthy\", symbol_table) {}\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override {\n    Stats::GaugeOptConstRef gauge = root_scope_.findGauge(stat_name_.statName());\n    if (!gauge.has_value()) {\n      decoder_callbacks_->sendLocalReply(Envoy::Http::Code::InternalServerError,\n                                         \"Couldn't find stat\", nullptr, absl::nullopt, \"\");\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    if (gauge->get().value() == 0) {\n      decoder_callbacks_->sendLocalReply(Envoy::Http::Code::InternalServerError, \"EDS not ready\",\n                                         nullptr, absl::nullopt, \"\");\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    decoder_callbacks_->sendLocalReply(Envoy::Http::Code::OK, \"EDS is ready\", nullptr,\n                                       absl::nullopt, \"\");\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\nprivate:\n  const Stats::Scope& root_scope_;\n  Stats::StatNameManagedStorage stat_name_;\n};\n\nclass EdsReadyFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  EdsReadyFilterConfig() : EmptyHttpFilterConfig(\"eds-ready-filter\") {}\n\n  Http::FilterFactoryCb\n  createFilter(const std::string&,\n               Server::Configuration::FactoryContext& factory_context) override {\n    return [&factory_context](Http::FilterChainFactoryCallbacks& callbacks) {\n      const Stats::Scope& scope = factory_context.api().rootScope();\n      Stats::SymbolTable& symbol_table = factory_context.scope().symbolTable();\n      callbacks.addStreamFilter(std::make_shared<EdsReadyFilter>(scope, symbol_table));\n    };\n  }\n};\n\nstatic Registry::RegisterFactory<EdsReadyFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/encode_headers_return_stop_all_filter.cc",
    "content": "#include <chrono>\n#include <string>\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\n// A filter returns StopAllIterationAndBuffer or StopAllIterationAndWatermark for headers. The\n// iteration continues after 5s.\nclass EncodeHeadersReturnStopAllFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"encode-headers-return-stop-all-filter\";\n\n  // Returns Http::FilterHeadersStatus::StopAllIterationAndBuffer or\n  // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to\n  // continue iteration after 5s.\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& header_map, bool) override {\n    const Http::HeaderEntry* entry_content =\n        header_map.get(Envoy::Http::LowerCaseString(\"content_size\"));\n    const Http::HeaderEntry* entry_added =\n        header_map.get(Envoy::Http::LowerCaseString(\"added_size\"));\n    ASSERT(entry_content != nullptr && entry_added != nullptr);\n    content_size_ = std::stoul(std::string(entry_content->value().getStringView()));\n    added_size_ = std::stoul(std::string(entry_added->value().getStringView()));\n\n    createTimerForContinue();\n\n    Http::MetadataMap metadata_map = {{\"headers\", \"headers\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n\n    const Http::HeaderEntry* entry_buffer =\n        header_map.get(Envoy::Http::LowerCaseString(\"buffer_limit\"));\n    if (entry_buffer == nullptr) {\n      return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n    } else {\n      watermark_enabled_ = true;\n      encoder_callbacks_->setEncoderBufferLimit(\n          std::stoul(std::string(entry_buffer->value().getStringView())));\n      return Http::FilterHeadersStatus::StopAllIterationAndWatermark;\n    }\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool) override {\n    ASSERT(timer_triggered_);\n    if (watermark_enabled_) {\n      // High watermark reached before all data are received. The rest of the data is sent after\n      // iteration resumes.\n      EXPECT_LT(data.length(), content_size_);\n    } else {\n      // encodeData will only be called once after iteration resumes.\n      EXPECT_EQ(data.length(), content_size_);\n    }\n    Http::MetadataMap metadata_map = {{\"data\", \"data\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n\n    Buffer::OwnedImpl added_data(std::string(added_size_, 'a'));\n    encoder_callbacks_->addEncodedData(added_data, false);\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override {\n    ASSERT(timer_triggered_);\n    Http::MetadataMap metadata_map = {{\"trailers\", \"trailers\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n\n    Buffer::OwnedImpl data(std::string(added_size_, 'a'));\n    encoder_callbacks_->addEncodedData(data, false);\n    return Http::FilterTrailersStatus::Continue;\n  }\n\nprivate:\n  // Creates a timer to continue iteration after 5s.\n  void createTimerForContinue() {\n    delay_timer_ = encoder_callbacks_->dispatcher().createTimer([this]() -> void {\n      timer_triggered_ = true;\n      encoder_callbacks_->continueEncoding();\n    });\n    delay_timer_->enableTimer(std::chrono::seconds(5));\n  }\n\n  Event::TimerPtr delay_timer_;\n  bool timer_triggered_ = false;\n  size_t added_size_ = 0;\n  size_t content_size_ = 0;\n  bool watermark_enabled_ = false;\n};\n\nconstexpr char EncodeHeadersReturnStopAllFilter::name[];\nstatic Registry::RegisterFactory<SimpleFilterConfig<EncodeHeadersReturnStopAllFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/encoder_decoder_buffer_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A filter that buffers the entire request/response.\nclass EncoderDecoderBufferStreamFilter : public Http::PassThroughFilter {\npublic:\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override {\n    return end_stream ? Http::FilterHeadersStatus::Continue\n                      : Http::FilterHeadersStatus::StopIteration;\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool end_stream) override {\n    return end_stream ? Http::FilterDataStatus::Continue\n                      : Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override {\n    return end_stream ? Http::FilterHeadersStatus::Continue\n                      : Http::FilterHeadersStatus::StopIteration;\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool end_stream) override {\n    return end_stream ? Http::FilterDataStatus::Continue\n                      : Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n};\n\nclass EncoderDecoderBuffferFilterConfig\n    : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  EncoderDecoderBuffferFilterConfig() : EmptyHttpFilterConfig(\"encoder-decoder-buffer-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::EncoderDecoderBufferStreamFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<EncoderDecoderBuffferFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/headers_only_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\nnamespace Envoy {\n\nclass HeaderOnlyDecoderFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"decode-headers-only\";\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override {\n    return Http::FilterHeadersStatus::ContinueAndEndStream;\n  }\n};\n\nconstexpr char HeaderOnlyDecoderFilter::name[];\nstatic Registry::RegisterFactory<SimpleFilterConfig<HeaderOnlyDecoderFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    decoder_register_;\n\nclass HeaderOnlyEncoderFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"encode-headers-only\";\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override {\n    return Http::FilterHeadersStatus::ContinueAndEndStream;\n  }\n};\n\nconstexpr char HeaderOnlyEncoderFilter::name[];\n\nstatic Registry::RegisterFactory<SimpleFilterConfig<HeaderOnlyEncoderFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    encoder_register_;\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/local_reply_during_encoding_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\nnamespace Envoy {\n\nclass LocalReplyDuringEncode : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"local-reply-during-encode\";\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override {\n    encoder_callbacks_->sendLocalReply(Http::Code::InternalServerError, \"\", nullptr, absl::nullopt,\n                                       \"\");\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n};\n\nconstexpr char LocalReplyDuringEncode::name[];\nstatic Registry::RegisterFactory<SimpleFilterConfig<LocalReplyDuringEncode>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/metadata_stop_all_filter.cc",
    "content": "#include <chrono>\n#include <string>\n\n#include \"envoy/event/timer.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass MetadataStopAllFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"metadata-stop-all-filter\";\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& header_map, bool) override {\n    const Http::HeaderEntry* entry_content =\n        header_map.get(Envoy::Http::LowerCaseString(\"content_size\"));\n    ASSERT(entry_content != nullptr);\n    content_size_ = std::stoul(std::string(entry_content->value().getStringView()));\n\n    createTimerForContinue();\n\n    return Http::FilterHeadersStatus::StopAllIterationAndBuffer;\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    ASSERT(timer_triggered_);\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    ASSERT(timer_triggered_);\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  Http::FilterMetadataStatus decodeMetadata(Http::MetadataMap&) override {\n    ASSERT(timer_triggered_);\n    return Http::FilterMetadataStatus::Continue;\n  }\n\nprivate:\n  // Creates a timer to continue iteration after conditions meet.\n  void createTimerForContinue() {\n    delay_timer_ = decoder_callbacks_->dispatcher().createTimer([this]() -> void {\n      if (content_size_ > 0 && decoder_callbacks_->streamInfo().bytesReceived() >= content_size_) {\n        timer_triggered_ = true;\n        decoder_callbacks_->continueDecoding();\n      } else {\n        // Creates a new timer to try again later.\n        createTimerForContinue();\n      }\n    });\n    delay_timer_->enableTimer(std::chrono::milliseconds(50));\n  }\n\n  Event::TimerPtr delay_timer_;\n  bool timer_triggered_ = false;\n  size_t content_size_ = 0;\n};\n\nconstexpr char MetadataStopAllFilter::name[];\nstatic Registry::RegisterFactory<SimpleFilterConfig<MetadataStopAllFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/modify_buffer_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A filter that buffers the entire request/response, then doubles\n// the content of the filter buffer.\nclass ModifyBufferStreamFilter : public Http::PassThroughFilter {\npublic:\n  Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override {\n    decoder_callbacks_->addDecodedData(data, true);\n\n    if (end_stream) {\n      decoder_callbacks_->modifyDecodingBuffer([](auto& buffer) {\n        // Append the buffer with itself.\n        buffer.add(buffer);\n      });\n      return Http::FilterDataStatus::Continue;\n    }\n\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override {\n    encoder_callbacks_->addEncodedData(data, true);\n\n    if (end_stream) {\n      encoder_callbacks_->modifyEncodingBuffer([](auto& buffer) {\n        // Append the buffer with itself.\n        buffer.add(buffer);\n      });\n      return Http::FilterDataStatus::Continue;\n    }\n\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n};\n\nclass ModifyBuffferFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  ModifyBuffferFilterConfig() : EmptyHttpFilterConfig(\"modify-buffer-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::ModifyBufferStreamFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<ModifyBuffferFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/passthrough_filter.cc",
    "content": "#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\nnamespace Envoy {\n\n// Registers the passthrough filter for use in test.\nclass TestPassThroughFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"passthrough-filter\";\n};\n\nconstexpr char TestPassThroughFilter::name[];\nstatic Registry::RegisterFactory<SimpleFilterConfig<TestPassThroughFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/pause_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/network/connection_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// This filter exists to synthetically test network backup by faking TCP\n// connection back-up when an encode is finished, and unblocking it when the\n// next stream starts to decode headers.\n// Allows regression tests for https://github.com/envoyproxy/envoy/issues/4541\nclass TestPauseFilter : public Http::PassThroughFilter {\npublic:\n  // Pass in a some global filter state to ensure the Network::Connection is\n  // blocked and unblocked exactly once.\n  TestPauseFilter(absl::Mutex& encode_lock, uint32_t& number_of_encode_calls_ref,\n                  uint32_t& number_of_decode_calls_ref)\n      : encode_lock_(encode_lock), number_of_encode_calls_ref_(number_of_encode_calls_ref),\n        number_of_decode_calls_ref_(number_of_decode_calls_ref) {}\n\n  Http::FilterDataStatus decodeData(Buffer::Instance& buf, bool end_stream) override {\n    if (end_stream) {\n      absl::WriterMutexLock m(&encode_lock_);\n      number_of_decode_calls_ref_++;\n      // If this is the second stream to decode headers and we're at high watermark. force low\n      // watermark state\n      if (number_of_decode_calls_ref_ == 2 && connection()->aboveHighWatermark()) {\n        connection()->onWriteBufferLowWatermark();\n      }\n    }\n    return PassThroughFilter::decodeData(buf, end_stream);\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance& buf, bool end_stream) override {\n    if (end_stream) {\n      absl::WriterMutexLock m(&encode_lock_);\n      number_of_encode_calls_ref_++;\n      // If this is the first stream to encode headers and we're not at high watermark, force high\n      // watermark state.\n      if (number_of_encode_calls_ref_ == 1 && !connection()->aboveHighWatermark()) {\n        connection()->onWriteBufferHighWatermark();\n      }\n    }\n    return PassThroughFilter::encodeData(buf, end_stream);\n  }\n\n  Network::ConnectionImpl* connection() {\n    // As long as we're doing horrible things let's do *all* the horrible things.\n    // Assert the connection we have is a ConnectionImpl and const cast it so we\n    // can force watermark changes.\n    auto conn_impl = dynamic_cast<const Network::ConnectionImpl*>(decoder_callbacks_->connection());\n    return const_cast<Network::ConnectionImpl*>(conn_impl);\n  }\n\n  absl::Mutex& encode_lock_;\n  uint32_t& number_of_encode_calls_ref_;\n  uint32_t& number_of_decode_calls_ref_;\n};\n\nclass TestPauseFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  TestPauseFilterConfig() : EmptyHttpFilterConfig(\"pause-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [&](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      // ABSL_GUARDED_BY insists the lock be held when the guarded variables are passed by\n      // reference.\n      absl::WriterMutexLock m(&encode_lock_);\n      callbacks.addStreamFilter(std::make_shared<::Envoy::TestPauseFilter>(\n          encode_lock_, number_of_encode_calls_, number_of_decode_calls_));\n    };\n  }\n\n  absl::Mutex encode_lock_;\n  uint32_t number_of_encode_calls_ ABSL_GUARDED_BY(encode_lock_) = 0;\n  uint32_t number_of_decode_calls_ ABSL_GUARDED_BY(encode_lock_) = 0;\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<TestPauseFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/process_context_filter.cc",
    "content": "#include \"test/integration/filters/process_context_filter.h\"\n\n#include <memory>\n#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A test filter that rejects all requests if the ProcessObject held by the\n// ProcessContext is unhealthy, and responds OK to all requests otherwise.\nclass ProcessContextFilter : public Http::PassThroughFilter {\npublic:\n  ProcessContextFilter(ProcessContext& process_context)\n      : process_object_(dynamic_cast<ProcessObjectForFilter&>(process_context.get())) {}\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override {\n    if (!process_object_.isHealthy()) {\n      decoder_callbacks_->sendLocalReply(Envoy::Http::Code::InternalServerError,\n                                         \"ProcessObjectForFilter is unhealthy\", nullptr,\n                                         absl::nullopt, \"\");\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    decoder_callbacks_->sendLocalReply(Envoy::Http::Code::OK, \"ProcessObjectForFilter is healthy\",\n                                       nullptr, absl::nullopt, \"\");\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\nprivate:\n  ProcessObjectForFilter& process_object_;\n};\n\nclass ProcessContextFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  ProcessContextFilterConfig() : EmptyHttpFilterConfig(\"process-context-filter\") {}\n\n  Http::FilterFactoryCb\n  createFilter(const std::string&,\n               Server::Configuration::FactoryContext& factory_context) override {\n    return [&factory_context](Http::FilterChainFactoryCallbacks& callbacks) {\n      callbacks.addStreamFilter(\n          std::make_shared<ProcessContextFilter>(*factory_context.processContext()));\n    };\n  }\n};\n\nstatic Registry::RegisterFactory<ProcessContextFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/process_context_filter.h",
    "content": "#pragma once\n\n#include \"envoy/server/process_context.h\"\n\nnamespace Envoy {\n\nclass ProcessObjectForFilter : public ProcessObject {\npublic:\n  explicit ProcessObjectForFilter(bool is_healthy) : is_healthy_(is_healthy) {}\n  ~ProcessObjectForFilter() override = default;\n\n  bool isHealthy() { return is_healthy_; }\n\nprivate:\n  bool is_healthy_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/random_pause_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/registry/registry.h\"\n\n#include \"common/network/connection_impl.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\n\n// This filter exists to synthetically test network backup by faking TCP\n// connection back-up when an encode is finished, blocking and unblocking\n// randomly.\nclass RandomPauseFilter : public Http::PassThroughFilter {\npublic:\n  RandomPauseFilter(absl::Mutex& rand_lock, TestRandomGenerator& rng)\n      : rand_lock_(rand_lock), rng_(rng) {}\n\n  Http::FilterDataStatus encodeData(Buffer::Instance& buf, bool end_stream) override {\n    absl::WriterMutexLock m(&rand_lock_);\n    uint64_t random = rng_.random();\n    // Roughly every 5th encode (5 being arbitrary) swap the watermark state.\n    if (random % 5 == 0) {\n      if (connection()->aboveHighWatermark()) {\n        connection()->onWriteBufferLowWatermark();\n      } else {\n        connection()->onWriteBufferHighWatermark();\n      }\n    }\n    return Http::PassThroughFilter::encodeData(buf, end_stream);\n  }\n\n  Network::ConnectionImpl* connection() {\n    // As long as we're doing horrible things let's do *all* the horrible things.\n    // Assert the connection we have is a ConnectionImpl and const cast it so we\n    // can force watermark changes.\n    auto conn_impl = dynamic_cast<const Network::ConnectionImpl*>(decoder_callbacks_->connection());\n    return const_cast<Network::ConnectionImpl*>(conn_impl);\n  }\n\n  absl::Mutex& rand_lock_;\n  TestRandomGenerator& rng_;\n};\n\nclass RandomPauseFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  RandomPauseFilterConfig() : EmptyHttpFilterConfig(\"random-pause-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [&](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      absl::WriterMutexLock m(&rand_lock_);\n      if (rng_ == nullptr) {\n        // Lazily create to ensure the test seed is set.\n        rng_ = std::make_unique<TestRandomGenerator>();\n      }\n      callbacks.addStreamFilter(std::make_shared<::Envoy::RandomPauseFilter>(rand_lock_, *rng_));\n    };\n  }\n\n  absl::Mutex rand_lock_;\n  std::unique_ptr<TestRandomGenerator> rng_ ABSL_GUARDED_BY(rand_lock_);\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<RandomPauseFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/request_metadata_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n// A filter tests request metadata consuming and inserting. The filter inserts new\n// metadata when decodeHeaders/Data/Trailers() are called. If the received metadata with key\n// \"consume\", the metadata will be consumed and not forwarded to the next hop.\nclass RequestMetadataStreamFilter : public Http::PassThroughFilter {\npublic:\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override {\n    Http::MetadataMap metadata_map = {{\"headers\", \"headers\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    decoder_callbacks_->addDecodedMetadata().emplace_back(std::move(metadata_map_ptr));\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    Http::MetadataMap metadata_map = {{\"data\", \"data\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    decoder_callbacks_->addDecodedMetadata().emplace_back(std::move(metadata_map_ptr));\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    Http::MetadataMap metadata_map = {{\"trailers\", \"trailers\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    decoder_callbacks_->addDecodedMetadata().emplace_back(std::move(metadata_map_ptr));\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  // If metadata_map contains key \"consume\", consumes the metadata, and replace it with a new one.\n  // The function also adds a new metadata using addDecodedMetadata().\n  Http::FilterMetadataStatus decodeMetadata(Http::MetadataMap& metadata_map) override {\n    auto it = metadata_map.find(\"consume\");\n    if (it != metadata_map.end()) {\n      metadata_map.erase(\"consume\");\n      metadata_map.emplace(\"replace\", \"replace\");\n    }\n    metadata_map[\"metadata\"] = \"metadata\";\n    return Http::FilterMetadataStatus::Continue;\n  }\n};\n\nclass AddRequestMetadataStreamFilterConfig\n    : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  AddRequestMetadataStreamFilterConfig() : EmptyHttpFilterConfig(\"request-metadata-filter\") {}\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::RequestMetadataStreamFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<AddRequestMetadataStreamFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/response_metadata_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A filter tests response metadata process. The filter inserts new\n// metadata when encodeHeaders/Data/Trailers/100ContinueHeaders/Metadata() are called, and consumes\n// metadata in encodeMetadata().\nclass ResponseMetadataStreamFilter : public Http::PassThroughFilter {\npublic:\n  // Inserts one new metadata_map.\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override {\n    Http::MetadataMap metadata_map = {{\"headers\", \"headers\"}, {\"duplicate\", \"duplicate\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // Inserts one new metadata_map.\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override {\n    Http::MetadataMap metadata_map = {{\"data\", \"data\"}, {\"duplicate\", \"duplicate\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n    return Http::FilterDataStatus::Continue;\n  }\n\n  // Inserts two metadata_maps by calling decoder_callbacks_->encodeMetadata() twice.\n  Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override {\n    Http::MetadataMap metadata_map = {{\"trailers\", \"trailers\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n    metadata_map = {{\"duplicate\", \"duplicate\"}};\n    metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  // Inserts two metadata_maps by calling decoder_callbacks_->encodeMetadata() twice.\n  Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override {\n    Http::MetadataMap metadata_map = {{\"100-continue\", \"100-continue\"}, {\"duplicate\", \"duplicate\"}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n    metadata_map = {{\"duplicate\", \"duplicate\"}};\n    metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  // Adds new metadata to metadata_map directly, and consumes metadata when the keys are equal to\n  // remove and metadata. If the key is equal to consume, replaces it with replace.\n  Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap& metadata_map) override {\n    // Adds new metadata to metadata_map directly.\n    metadata_map.emplace(\"keep\", \"keep\");\n    // Consumes metadata.\n    auto it = metadata_map.find(\"consume\");\n    if (it != metadata_map.end()) {\n      metadata_map.erase(\"consume\");\n      metadata_map.emplace(\"replace\", \"replace\");\n    }\n    it = metadata_map.find(\"metadata\");\n    if (it != metadata_map.end()) {\n      metadata_map.erase(\"metadata\");\n    }\n    return Http::FilterMetadataStatus::Continue;\n  }\n};\n\nclass AddMetadataStreamFilterConfig\n    : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  AddMetadataStreamFilterConfig() : EmptyHttpFilterConfig(\"response-metadata-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::ResponseMetadataStreamFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<AddMetadataStreamFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/set_response_code_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"extensions/filters/http/common/factory_base.h\"\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/integration/filters/set_response_code_filter_config.pb.h\"\n#include \"test/integration/filters/set_response_code_filter_config.pb.validate.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\n\n// A test filter that responds directly with a code on a prefix match.\nclass SetResponseCodeFilterConfig {\npublic:\n  SetResponseCodeFilterConfig(const std::string& prefix, uint32_t code, const std::string& body,\n                              Server::Configuration::FactoryContext& context)\n      : prefix_(prefix), code_(code), body_(body), tls_slot_(context.threadLocal().allocateSlot()) {\n  }\n\n  const std::string prefix_;\n  const uint32_t code_;\n  const std::string body_;\n  // Allocate a slot to validate that it is destroyed on a main thread only.\n  ThreadLocal::SlotPtr tls_slot_;\n};\n\nclass SetResponseCodeFilter : public Http::PassThroughFilter {\npublic:\n  SetResponseCodeFilter(std::shared_ptr<SetResponseCodeFilterConfig> config) : config_(config) {}\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override {\n    if (absl::StartsWith(headers.Path()->value().getStringView(), config_->prefix_)) {\n      decoder_callbacks_->sendLocalReply(static_cast<Http::Code>(config_->code_), config_->body_,\n                                         nullptr, absl::nullopt, \"\");\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    return Http::FilterHeadersStatus::Continue;\n  }\n\nprivate:\n  const std::shared_ptr<SetResponseCodeFilterConfig> config_;\n};\n\nclass SetResponseCodeFilterFactory : public Extensions::HttpFilters::Common::FactoryBase<\n                                         test::integration::filters::SetResponseCodeFilterConfig> {\npublic:\n  SetResponseCodeFilterFactory() : FactoryBase(\"set-response-code-filter\") {}\n\nprivate:\n  Http::FilterFactoryCb createFilterFactoryFromProtoTyped(\n      const test::integration::filters::SetResponseCodeFilterConfig& proto_config,\n      const std::string&, Server::Configuration::FactoryContext& context) override {\n    auto filter_config = std::make_shared<SetResponseCodeFilterConfig>(\n        proto_config.prefix(), proto_config.code(), proto_config.body(), context);\n    return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<SetResponseCodeFilter>(filter_config));\n    };\n  }\n};\n\nREGISTER_FACTORY(SetResponseCodeFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory);\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/set_response_code_filter_config.proto",
    "content": "syntax = \"proto3\";\n\npackage test.integration.filters;\n\nimport \"validate/validate.proto\";\n\nmessage SetResponseCodeFilterConfig {\n  string prefix = 1;\n  uint32 code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}];\n  string body = 3;\n}\n"
  },
  {
    "path": "test/integration/filters/stop_iteration_and_continue_filter.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n\nnamespace Envoy {\n\n// A test filter that does StopIterationNoBuffer on end stream, then continues after a 0ms alarm.\nclass StopIterationAndContinueFilter : public Http::PassThroughFilter {\npublic:\n  void setEndStreamAndDecodeTimer() {\n    decode_end_stream_seen_ = true;\n    decode_delay_timer_ = decoder_callbacks_->dispatcher().createTimer(\n        [this]() -> void { decoder_callbacks_->continueDecoding(); });\n    decode_delay_timer_->enableTimer(std::chrono::seconds(0));\n  }\n\n  void setEndStreamAndEncodeTimer() {\n    encode_end_stream_seen_ = true;\n    encode_delay_timer_ = decoder_callbacks_->dispatcher().createTimer(\n        [this]() -> void { encoder_callbacks_->continueEncoding(); });\n    encode_delay_timer_->enableTimer(std::chrono::seconds(0));\n  }\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override {\n    if (end_stream) {\n      setEndStreamAndDecodeTimer();\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool end_stream) override {\n    RELEASE_ASSERT(!decode_end_stream_seen_, \"end stream seen twice\");\n    if (end_stream) {\n      setEndStreamAndDecodeTimer();\n    }\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override {\n    if (end_stream) {\n      setEndStreamAndEncodeTimer();\n      return Http::FilterHeadersStatus::StopIteration;\n    }\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool end_stream) override {\n    RELEASE_ASSERT(!encode_end_stream_seen_, \"end stream seen twice\");\n    if (end_stream) {\n      setEndStreamAndEncodeTimer();\n    }\n    return Http::FilterDataStatus::StopIterationNoBuffer;\n  }\n\n  Event::TimerPtr decode_delay_timer_;\n  bool decode_end_stream_seen_{};\n  Event::TimerPtr encode_delay_timer_;\n  bool encode_end_stream_seen_{};\n};\n\nclass StopIterationAndContinueFilterConfig\n    : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  StopIterationAndContinueFilterConfig()\n      : EmptyHttpFilterConfig(\"stop-iteration-and-continue-filter\") {}\n\n  Http::FilterFactoryCb createFilter(const std::string&,\n                                     Server::Configuration::FactoryContext&) override {\n    return [](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n      callbacks.addStreamFilter(std::make_shared<::Envoy::StopIterationAndContinueFilter>());\n    };\n  }\n};\n\n// perform static registration\nstatic Registry::RegisterFactory<StopIterationAndContinueFilterConfig,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    register_;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/test_socket_interface.cc",
    "content": "#include \"test/integration/filters/test_socket_interface.h\"\n\n#include <algorithm>\n\n#include \"envoy/common/exception.h\"\n#include \"envoy/extensions/network/socket_interface/v3/default_socket_interface.pb.h\"\n#include \"envoy/network/socket.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/network/address_impl.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nApi::IoCallUint64Result TestIoSocketHandle::writev(const Buffer::RawSlice* slices,\n                                                   uint64_t num_slice) {\n  if (writev_override_) {\n    auto result = writev_override_(this, slices, num_slice);\n    if (result.has_value()) {\n      return std::move(result).value();\n    }\n  }\n  return IoSocketHandleImpl::writev(slices, num_slice);\n}\n\nIoHandlePtr TestIoSocketHandle::accept(struct sockaddr* addr, socklen_t* addrlen) {\n  auto result = Api::OsSysCallsSingleton::get().accept(fd_, addr, addrlen);\n  if (SOCKET_INVALID(result.rc_)) {\n    return nullptr;\n  }\n\n  return std::make_unique<TestIoSocketHandle>(writev_override_, result.rc_, socket_v6only_,\n                                              domain_);\n}\n\nIoHandlePtr TestSocketInterface::makeSocket(int socket_fd, bool socket_v6only,\n                                            absl::optional<int> domain) const {\n  return std::make_unique<TestIoSocketHandle>(writev_override_proc_, socket_fd, socket_v6only,\n                                              domain);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/test_socket_interface.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/network/address.h\"\n#include \"envoy/network/socket.h\"\n\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/socket_interface_impl.h\"\n\n#include \"absl/types/optional.h\"\n\n/**\n * TestSocketInterface allows overriding the behavior of the IoHandle interface.\n */\nnamespace Envoy {\nnamespace Network {\n\nclass TestIoSocketHandle : public IoSocketHandleImpl {\npublic:\n  using WritevOverrideType = absl::optional<Api::IoCallUint64Result>(TestIoSocketHandle* io_handle,\n                                                                     const Buffer::RawSlice* slices,\n                                                                     uint64_t num_slice);\n  using WritevOverrideProc = std::function<WritevOverrideType>;\n\n  TestIoSocketHandle(WritevOverrideProc writev_override_proc, os_fd_t fd = INVALID_SOCKET,\n                     bool socket_v6only = false, absl::optional<int> domain = absl::nullopt)\n      : IoSocketHandleImpl(fd, socket_v6only, domain), writev_override_(writev_override_proc) {}\n\nprivate:\n  IoHandlePtr accept(struct sockaddr* addr, socklen_t* addrlen) override;\n  Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) override;\n\n  const WritevOverrideProc writev_override_;\n};\n\n/**\n * TestSocketInterface allows overriding of the behavior of the IoHandle interface of\n * accepted sockets.\n * Most integration tests have deterministic order in which Envoy accepts connections.\n * For example a test with one client connection will result in two accepted sockets. First\n * is for the client<->Envoy connection and the second is for the Envoy<->upstream connection.\n */\n\nclass TestSocketInterface : public SocketInterfaceImpl {\npublic:\n  /**\n   * Override the behavior of the IoSocketHandleImpl::writev() method.\n   * The supplied callback is invoked with the arguments of the writev method and the index\n   * of the accepted socket.\n   * Returning absl::nullopt from the callback continues normal execution of the\n   * IoSocketHandleImpl::writev() method. Returning a Api::IoCallUint64Result from callback skips\n   * the IoSocketHandleImpl::writev() with the returned result value.\n   */\n  TestSocketInterface(TestIoSocketHandle::WritevOverrideProc writev)\n      : writev_override_proc_(writev) {}\n\nprivate:\n  // SocketInterfaceImpl\n  IoHandlePtr makeSocket(int socket_fd, bool socket_v6only,\n                         absl::optional<int> domain) const override;\n\n  const TestIoSocketHandle::WritevOverrideProc writev_override_proc_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/filters/wait_for_whole_request_and_response.cc",
    "content": "#include <string>\n\n#include \"envoy/http/filter.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"extensions/filters/http/common/pass_through_filter.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/filters/common.h\"\n\nnamespace Envoy {\n\n// A test filter that waits for the request/response to finish before continuing.\nclass WaitForWholeRequestAndResponseStreamFilter : public Http::PassThroughFilter {\npublic:\n  constexpr static char name[] = \"wait-for-whole-request-and-response-filter\";\n\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override {\n    if (end_stream) {\n      return Http::FilterHeadersStatus::Continue;\n    }\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool end_stream) override {\n    if (end_stream) {\n      return Http::FilterDataStatus::Continue;\n    }\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n\n  Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override {\n    if (end_stream) {\n      return Http::FilterHeadersStatus::Continue;\n    }\n    return Http::FilterHeadersStatus::StopIteration;\n  }\n\n  Http::FilterDataStatus encodeData(Buffer::Instance&, bool end_stream) override {\n    if (end_stream) {\n      return Http::FilterDataStatus::Continue;\n    }\n    return Http::FilterDataStatus::StopIterationAndBuffer;\n  }\n};\n\nconstexpr char WaitForWholeRequestAndResponseStreamFilter::name[];\n\nstatic Registry::RegisterFactory<SimpleFilterConfig<WaitForWholeRequestAndResponseStreamFilter>,\n                                 Server::Configuration::NamedHttpFilterConfigFactory>\n    encoder_register_;\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h1_capture_direct_response_fuzz_test.cc",
    "content": "#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/integration/h1_fuzz.h\"\n\nnamespace Envoy {\n\nvoid H1FuzzIntegrationTest::initialize() {\n  const std::string body = \"Response body\";\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", body);\n  const std::string prefix(\"/\");\n  const Http::Code status(Http::Code::OK);\n  config_helper_.addConfigModifier(\n      [&file_path, &prefix](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        // adding direct response mode to the default route\n        auto* default_route =\n            hcm.mutable_route_config()->mutable_virtual_hosts(0)->mutable_routes(0);\n        default_route->mutable_match()->set_prefix(prefix);\n        default_route->mutable_direct_response()->set_status(static_cast<uint32_t>(status));\n        default_route->mutable_direct_response()->mutable_body()->set_filename(file_path);\n        // adding headers to the default route\n        auto* header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_value(\"direct-response-enabled\");\n        header_value_option->mutable_header()->set_key(\"x-direct-response-header\");\n      });\n  HttpIntegrationTest::initialize();\n}\n\nDEFINE_PROTO_FUZZER(const test::integration::CaptureFuzzTestCase& input) {\n  RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), \"\");\n  const auto ip_version = TestEnvironment::getIpVersionsForTest()[0];\n  PERSISTENT_FUZZ_VAR H1FuzzIntegrationTest h1_fuzz_integration_test(ip_version);\n  h1_fuzz_integration_test.replay(input, true);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h1_capture_fuzz_test.cc",
    "content": "#include \"test/integration/h1_fuzz.h\"\n\nnamespace Envoy {\nvoid H1FuzzIntegrationTest::initialize() { HttpIntegrationTest::initialize(); }\n\nDEFINE_PROTO_FUZZER(const test::integration::CaptureFuzzTestCase& input) {\n  // Pick an IP version to use for loopback, it doesn't matter which.\n  RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), \"\");\n  const auto ip_version = TestEnvironment::getIpVersionsForTest()[0];\n  PERSISTENT_FUZZ_VAR H1FuzzIntegrationTest h1_fuzz_integration_test(ip_version);\n  h1_fuzz_integration_test.replay(input, false);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h1_corpus/BadPath.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/EnvoyHandling100Continue.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/InvalidContentLength.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/NoHost.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/RouterDownstreamDisconnectBeforeRequestComplete.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/RouterDownstreamDisconnectBeforeResponseComplete.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/RouterHeaderOnlyRequestAndResponseNoBuffer.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/RouterRequestAndResponseWithBodyNoBuffer.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/RouterUpstreamDisconnectBeforeRequestcomplete.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/RouterUpstreamDisconnectBeforeResponseComplete.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/RouterUpstreamResponseBeforeRequestComplete.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\n\n"
  },
  {
    "path": "test/integration/h1_corpus/clusterfuzz-testcase-h1_capture_fuzz_test-5696503594090496",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 0\\r\\nx-forwarded-for: -1113144117.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\344aaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234\\234aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\344aaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.-507137025.1\\r\\ntransevfer-encoding: \\r\\n\\t\\t\\t\\t\\tents \"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  upstream_send_bytes: \"%\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\raaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 654 \\002\\002\\002\\002\\002\\002\\002\\002\\002\\002\\002OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n200\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_aaaaaaaaaaaaaaaaFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\3\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n129\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\n"
  },
  {
    "path": "test/integration/h1_corpus/clusterfuzz-testcase-h1_capture_fuzz_test-6215556767154176",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-fovwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaevents {\\n  upaaaaaaaaa\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"H\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \")\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"@\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"a\"\n}\nevents {\n  downstream_send_bytes: \"H\"\n}\nevents {\n  downstream_send_bytes: \"B\"\n}\nevents {\n  downstream_send_bytes: \"%\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nr: 10.0.0.1\\r\\ntransfer-encoding: ahunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaTaaaaaaaaa{aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\377\\377\\377\\177aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaSaaaaaaaaaaaaaaaaaaaaaRaaaaaaaaaaaaaaaaaaaaaaaaa\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033\\033aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhsto: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwchunkedinK: chunked\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"@\"\n}\nevents {\n  downstream_send_bytes: \"H\"\n}\nevents {\n  downstream_send_bytes: \"*\"\n}\nevents {\n  downstream_send_bytes: \"\\377\\377\\377\\022\"\n}\nevents {\n  downstream_send_bytes: \"UUUUUU\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"*\\000\"\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"0\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HThunked\\r\\n\\r\\n400\\r\\naaaaaevents {\\n  upaaaaaaaaa\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"H\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\n"
  },
  {
    "path": "test/integration/h1_corpus/clusterfuzz-testcase-minimized-h1_capture_fuzz_test-5675304995782656",
    "content": "events {\n}\nevents {\n  downstream_send_bytes: \"POST /tes HTTP/1.1\\r\\ncontent-type: application/grpc\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 0\\r\\nx-forwarded-for: 10.0.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaad$for: -49442\\2147483647.aaaaaaa0a\"\n}\nevents {\n  downstream_send_bytes: \"POST /r HTTP/2.2\\n\\n\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /tes HTTP/1.1\\r\\ncontent-type: application/gr/grpc\\n\\r\\n\"\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: hr: 10.0.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaad$aaa0a\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"**\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 0\\r\\nx-forwarded-for: 10.0.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaad$aaa0a\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 100\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"D\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /tes HTTP/1.1\\r\\ncontent-type: application/gr/grpc\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\\n\"\n}\nevents {\n  downstream_send_bytes: \"D\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 100\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 0\\r\\nx-forwarded-for: 10.0.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaad$for: -49442\\2147483647.aaaaaaa0a\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 100\\n\\r\\n\"\n}\nevents {\n  upstream_send_bytes: \"HTTP/1.1 100 OK\\r\\ntransfee.coding: chunkedinK: chunked\\r\\n\\r\\n\"\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-fkrwardedK: chunked\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /tes HTTP/1.1\\r\\ncontent-type: application/gr/grpc\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/?g/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.2\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.2\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/2.1\\r\\n\\r\\n\\rPOST /t/llong/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\n\\n\\n\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"D\"\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 0\\r\\nx-forwarded-for: 10.0.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaad$aaa0a\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /testg/url HTTP/2.2\\n\\n\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"**\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/?#l HTTP/1.2\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.2\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.2\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.2\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/2.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.2\\r\\n\\r\\n\\rPOST /t/long/?#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /tlo/ng/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.3\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\r\\n\\r\\n\\rPOST /t/long/u#l HTTP/1.1\\n\\n\\n\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"D\"\n}\nevents {\n  downstream_send_bytes: \"POST /rl HTTP/2.2\\n\\n\\n\"\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"HEAD /te/url HTTP/1.1\\r\\nhost: hos\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n}\nevents {\n  downstream_send_bytes: \"POST /rl HTTP/2.3\\n\\n\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"?\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"D\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-fkrwardedK: chunked\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"N\"\n}\nevents {\n}\n"
  },
  {
    "path": "test/integration/h1_corpus/clusterfuzz-testcase-minimized-h1_capture_fuzz_test-5738507290542080",
    "content": "events {\n}\nevents { }\nevents {   downstream_send_bytes: \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\" }\nevents {\n  downstream_recv_bytes {\n  } }\nevents {\n  downstream_send_bytes: \"POST /test/long/ur  HTTP/1.1\\r\\nhost:  �s �r �     t �s ��     ��  �� - �r �r  ��� r:     . �  ��� r��  ��  �c�d�  : v nt  er       3     -     r�ed-for: 10.0.0.1\\r\\ntransfer-encoding:events {\\n   downstrc\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"\\005\\000\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\\003\\000\"\n}\nevents {\n  downstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long)url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \">\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"?\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"z\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  upstream_send_bytes: \">\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /tes HTTP/1.1\\r\\ncontent-type: application/grpc\\n\\r\\n\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST //test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-u: chunked\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"\\005\\000\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\\206\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \">\"\n}\nevents {\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"?\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"?\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  downstream_send_bytes: \"POST /t/long)url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\\003\\000\"\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /t/long/url HTTP/1.1\\r\\n\\r\\n\\r\\n\"\n}\n"
  },
  {
    "path": "test/integration/h1_corpus/embed_null.pb_text",
    "content": "events {   downstream_send_bytes: \"POST   /\\nntnt:   � \\0     \" }\n"
  },
  {
    "path": "test/integration/h1_corpus/stream_info_destructor",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: -063%\\nuser-agent: /4302450943\\n\\t\\t08856android\\363\\243x-lyft-user-id: -063%\\nuser-agent: /4;02450943\\n\\t\\t08856android\\363\\243\\201$80\\n\\t\\t\\t\\n\\t\\t\\t\\tAe1\\201\\24180\\n\\t\\t\\t\\n\\t\\t\\t\\tAe118\\tefts \"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST //urk HTTP/1.1\\r\\nshhfot: ost\\r\\n -253%\\nuser-agent: /0%\\nuser-agent: /430%\\nuser-agent:4967:18446744073709551615iOS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\201~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~a~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-7749978774642053139~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttuttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\326Utttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-4017153681670550988tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt|tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttstttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttaaaaaaaaaaaaaaaaaaaaaaa-6742158280474489582aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\n\\r\\n\"\n}\nevents {\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST /test/lon\\nte: e: h\"\n}\nevents {\n}\nevents {\n  downstream_send_bytes: \"POST //urk HTTP/1.1\\r\\nshhfot: ost\\r\\n -253%\\nuser-agent: /0%\\nuser-agent: /430%\\nuser-agent:4967:18446744073709551615iOS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\201~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~a~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-7749978774642053139~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttuttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\n\ntttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\326Utttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-4017153681670550988tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt|tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttstttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttaaaaaaaaaaaaaaaaaaaaaaa-6742158280474489582aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\n\\r\\n\"\n}\nevents {\n}"
  },
  {
    "path": "test/integration/h1_corpus/upstream_extra_crlf.pb_text",
    "content": "events {\n  downstream_send_bytes: \"POST /test/long/url HTTP/1.1\\r\\nhost: host\\r\\nx-lyft-user-id: 123\\r\\nx-forwarded-for: 10.0.0.1\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n400\\r\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\r\\n0\\r\\n\\r\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"\\r\"\n}\nevents {\n}\nevents {\n  upstream_send_bytes: \"\\nStack trace:\\n\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"\\nStack trace:\\n\"\n}\nevents {\n  upstream_send_bytes: \"\"\n}\nevents {\n  upstream_recv_bytes {\n  }\n}\nevents {\n  upstream_send_bytes: \"1\"\n}\nevents {\n}\n"
  },
  {
    "path": "test/integration/h1_fuzz.cc",
    "content": "#include \"test/integration/h1_fuzz.h\"\n\n#include <functional>\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/environment.h\"\n\nnamespace Envoy {\n\nvoid H1FuzzIntegrationTest::replay(const test::integration::CaptureFuzzTestCase& input,\n                                   bool ignore_response) {\n  PERSISTENT_FUZZ_VAR bool initialized = [this]() -> bool {\n    initialize();\n    return true;\n  }();\n  UNREFERENCED_PARAMETER(initialized);\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  for (int i = 0; i < input.events().size(); ++i) {\n    const auto& event = input.events(i);\n    ENVOY_LOG_MISC(debug, \"Processing event: {}\", event.DebugString());\n    // If we're disconnected, we fail out.\n    if (!tcp_client->connected()) {\n      ENVOY_LOG_MISC(debug, \"Disconnected, no further event processing.\");\n      break;\n    }\n    switch (event.event_selector_case()) {\n    case test::integration::Event::kDownstreamSendBytes:\n      ASSERT_TRUE(tcp_client->write(event.downstream_send_bytes(), false, false));\n      break;\n    case test::integration::Event::kDownstreamRecvBytes:\n      // TODO(htuch): Should we wait for some data?\n      break;\n    case test::integration::Event::kUpstreamSendBytes:\n      if (ignore_response) {\n        break;\n      }\n      if (fake_upstream_connection == nullptr) {\n        if (!fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection, max_wait_ms_)) {\n          // If we timed out, we fail out.\n          tcp_client->close();\n          return;\n        }\n      }\n      // If we're no longer connected, we're done.\n      if (!fake_upstream_connection->connected()) {\n        tcp_client->close();\n        return;\n      }\n      {\n        AssertionResult result = fake_upstream_connection->write(event.upstream_send_bytes());\n        RELEASE_ASSERT(result, result.message());\n      }\n      break;\n    case test::integration::Event::kUpstreamRecvBytes:\n      // TODO(htuch): Should we wait for some data?\n      break;\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n  }\n  if (fake_upstream_connection != nullptr) {\n    if (fake_upstream_connection->connected()) {\n      AssertionResult result = fake_upstream_connection->close();\n      RELEASE_ASSERT(result, result.message());\n    }\n    AssertionResult result = fake_upstream_connection->waitForDisconnect();\n    RELEASE_ASSERT(result, result.message());\n  }\n  tcp_client->close();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h1_fuzz.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/integration/capture_fuzz.pb.h\"\n#include \"test/integration/http_integration.h\"\n\nnamespace Envoy {\n\nclass H1FuzzIntegrationTest : public HttpIntegrationTest {\npublic:\n  H1FuzzIntegrationTest(Network::Address::IpVersion version)\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, version) {}\n\n  void initialize() override;\n  void replay(const test::integration::CaptureFuzzTestCase&, bool ignore_response);\n  const std::chrono::milliseconds max_wait_ms_{10};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h2_capture_direct_response_fuzz_test.cc",
    "content": "#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/integration/h2_fuzz.h\"\n\nnamespace Envoy {\n\nvoid H2FuzzIntegrationTest::initialize() {\n  const std::string body = \"Response body\";\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", body);\n  const std::string prefix(\"/\");\n  const Http::Code status(Http::Code::OK);\n\n  setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n\n  config_helper_.addConfigModifier(\n      [&file_path, &prefix](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        // adding direct response mode to the default route\n        auto* default_route =\n            hcm.mutable_route_config()->mutable_virtual_hosts(0)->mutable_routes(0);\n        default_route->mutable_match()->set_prefix(prefix);\n        default_route->mutable_direct_response()->set_status(static_cast<uint32_t>(status));\n        default_route->mutable_direct_response()->mutable_body()->set_filename(file_path);\n        // adding headers to the default route\n        auto* header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_value(\"direct-response-enabled\");\n        header_value_option->mutable_header()->set_key(\"x-direct-response-header\");\n      });\n  HttpIntegrationTest::initialize();\n}\n\nDEFINE_PROTO_FUZZER(const test::integration::H2CaptureFuzzTestCase& input) {\n  RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), \"\");\n  const auto ip_version = TestEnvironment::getIpVersionsForTest()[0];\n  PERSISTENT_FUZZ_VAR H2FuzzIntegrationTest h2_fuzz_integration_test(ip_version);\n  h2_fuzz_integration_test.replay(input, true);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h2_capture_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.integration;\n\nmessage H2FramePing {\n  enum Flags {\n    NONE = 0;\n    ACK = 1;\n  }\n  Flags flags = 1;\n  bytes data = 2;\n}\n\nmessage H2FrameSettings {\n  enum Flags {\n    NONE = 0;\n    ACK = 1;\n  }\n  Flags flags = 1;\n}\n\nenum H2HeadersFlags {\n  option allow_alias = true;\n  NONE = 0;\n  END_STREAM = 1;\n  END_HEADERS = 4;\n  END_METADATA = 4;\n}\n\nmessage H2FrameHeaders {\n  repeated H2HeadersFlags flags = 1;\n  uint32 stream_index = 2;\n}\n\nmessage H2FrameContinuation {\n  repeated H2HeadersFlags flags = 1;\n  uint32 stream_index = 2;\n}\n\nmessage H2FrameData {\n  enum Flags {\n    NONE = 0;\n    END_STREAM = 1;\n  }\n  Flags flags = 1;\n  uint32 stream_index = 2;\n}\n\nmessage H2FramePriority {\n  uint32 stream_index = 1;\n  uint32 dependent_index = 2;\n}\n\n// These map to the errors defined in: https://tools.ietf.org/html/rfc7540#section-7\nenum H2ErrorCode {\n  NO_ERROR = 0;\n  PROTOCOL_ERROR = 1;\n  INTERNAL_ERROR = 2;\n  FLOW_CONTROL_ERROR = 3;\n  SETTINGS_TIMEOUT = 4;\n  STREAM_CLOSED = 5;\n  FRAME_SIZE_ERROR = 6;\n  REFUSED_STREAM = 7;\n  CANCEL = 8;\n  COMPRESSION_ERROR = 9;\n  CONNECT_ERROR = 10;\n  ENHANCE_YOUR_CLAIM = 11;\n  INADEQUATE_SECURITY = 12;\n  HTTP_1_1_REQUIRED = 13;\n}\n\nmessage H2FramePushPromise {\n  repeated H2HeadersFlags flags = 1;\n  uint32 stream_index = 2;\n  uint32 promised_stream_index = 3;\n}\n\nmessage H2FrameResetStream {\n  uint32 stream_index = 1;\n  H2ErrorCode error_code = 2;\n}\n\nmessage H2FrameGoAway {\n  uint32 last_stream_index = 1;\n  H2ErrorCode error_code = 2;\n}\n\nmessage H2FrameWindowUpdate {\n  uint32 stream_index = 1;\n  uint32 increment = 2;\n}\n\n// A header that contains invalid status\nmessage H2FrameMalformedRequest {\n  uint32 stream_index = 1;\n}\n\n// A request that is comprised of a header that has HTTP GET request with a given host and path and\n// an additional zero length header (making this a malformed request)\nmessage H2FrameMalformedRequestWithZerolenHeader {\n  uint32 stream_index = 1;\n  string host = 2;\n  string path = 3;\n}\n\n// A request that is comprised of a header that has HTTP GET request with a given host and path\nmessage H2FrameRequest {\n  uint32 stream_index = 1;\n  string host = 2;\n  string path = 3;\n}\n\n// A request that is comprised of a header that has HTTP POST request with a given host and path\nmessage H2FramePostRequest {\n  uint32 stream_index = 1;\n  string host = 2;\n  string path = 3;\n}\n\n// A generic frame to emit a malformed frame\nmessage H2FrameGeneric {\n  bytes frame_bytes = 1;\n}\n\nmessage Metadata {\n  map<string, string> metadata = 1;\n}\n\nmessage H2FrameMetadata {\n  H2HeadersFlags flags = 1;\n  uint32 stream_index = 2;\n  Metadata metadata = 3;\n}\n\nmessage H2TestFrame {\n  // These values map to the frame creation methods in:\n  // test/common/http/http2/http2_frame.h\n  oneof frame_type {\n    H2FramePing ping = 1;\n    H2FrameSettings settings = 2;\n    H2FrameHeaders headers = 3;\n    H2FrameContinuation continuation = 4;\n    H2FrameData data = 5;\n    H2FramePriority priority = 6;\n    H2FramePushPromise push_promise = 7;\n    H2FrameResetStream reset_stream = 8;\n    H2FrameGoAway go_away = 9;\n    H2FrameWindowUpdate window_update = 10;\n    H2FrameMalformedRequest malformed_request = 11;\n    H2FrameMalformedRequestWithZerolenHeader malformed_request_with_zerolen_header = 12;\n    H2FrameRequest request = 13;\n    H2FramePostRequest post_request = 14;\n    H2FrameGeneric generic = 15;\n    H2FrameMetadata metadata = 77;\n  }\n}\n\nmessage DownstreamSendEvent {\n  repeated H2TestFrame h2_frames = 1;\n}\n\nmessage UpstreamSendEvent {\n  repeated H2TestFrame h2_frames = 1;\n}\n\nmessage Event {\n  oneof event_selector {\n    // Frames sent from downstream.\n    DownstreamSendEvent downstream_send_event = 1;\n    // Frames sent from upstream.\n    UpstreamSendEvent upstream_send_event = 2;\n  }\n}\n\n// Test case in corpus for *_h2_capture_fuzz_test.\nmessage H2CaptureFuzzTestCase {\n  repeated Event events = 1;\n}\n"
  },
  {
    "path": "test/integration/h2_capture_fuzz_test.cc",
    "content": "#include \"test/integration/h2_fuzz.h\"\n\nnamespace Envoy {\nvoid H2FuzzIntegrationTest::initialize() {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() >= 1, \"\");\n    auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    cluster->mutable_http2_protocol_options()->set_allow_metadata(true);\n  });\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); });\n  setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n\n  HttpIntegrationTest::initialize();\n}\n\nDEFINE_PROTO_FUZZER(const test::integration::H2CaptureFuzzTestCase& input) {\n  // Pick an IP version to use for loopback, it doesn't matter which.\n  FUZZ_ASSERT(!TestEnvironment::getIpVersionsForTest().empty());\n  const auto ip_version = TestEnvironment::getIpVersionsForTest()[0];\n  PERSISTENT_FUZZ_VAR H2FuzzIntegrationTest h2_fuzz_integration_test(ip_version);\n  h2_fuzz_integration_test.replay(input, false);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h2_corpus/metadata_test",
    "content": "events {\n  downstream_send_event {\n    h2_frames {\n      settings {\n        flags: NONE\n      }\n    }\n    h2_frames {\n      settings {\n        flags: ACK\n      }\n    }\n    h2_frames {\n      request {\n        stream_index: 1\n        host: \"host\"\n        path: \"/path/to/long/url\"\n      }\n    }\n  }\n}\nevents {\n  upstream_send_event {\n    h2_frames {\n      settings {\n        flags: NONE\n      }\n    }\n    h2_frames {\n      settings {\n        flags: ACK\n      }\n    }\n  }\n}\nevents {\n  downstream_send_event {\n    h2_frames {\n      metadata {\n        stream_index: 1\n        metadata {\n          metadata {\n              key: \"connections\"\n              value: \"15\"\n          }\n          metadata {\n            key: \"Timeout Seconds\"\n            value: \"10\"\n          }\n        }\n      }\n    }\n  }\n}"
  },
  {
    "path": "test/integration/h2_corpus/simple_test",
    "content": "events {\n  downstream_send_event {\n    h2_frames {\n      settings {\n        flags: NONE\n      }\n    }\n    h2_frames {\n      settings {\n        flags: ACK\n      }\n    }\n    h2_frames {\n      request {\n        stream_index: 1\n        host: \"host\"\n        path: \"/path/to/long/url\"\n      }\n    }\n  }\n}\nevents {\n  upstream_send_event {\n    h2_frames {\n      settings {\n        flags: NONE\n      }\n    }\n    h2_frames {\n      settings {\n        flags: ACK\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/integration/h2_fuzz.cc",
    "content": "#include \"test/integration/h2_fuzz.h\"\n\n#include <functional>\n\n#include \"common/common/assert.h\"\n#include \"common/common/base64.h\"\n#include \"common/common/logger.h\"\n\n#include \"test/test_common/environment.h\"\n\nnamespace Envoy {\n\nusing namespace Envoy::Http::Http2;\n\nnamespace {\n\nstatic Http2Frame::HeadersFlags\nunifyHeadersFlags(const Protobuf::RepeatedField<int>& headers_flags) {\n  int unified_flags = 0;\n  for (const auto& flag : headers_flags) {\n    unified_flags |= flag;\n  }\n  return static_cast<Http2Frame::HeadersFlags>(unified_flags);\n}\n\n} // namespace\n\nvoid H2FuzzIntegrationTest::sendFrame(const test::integration::H2TestFrame& proto_frame,\n                                      std::function<void(const Http2Frame&)> write_func) {\n  Http2Frame h2_frame;\n  switch (proto_frame.frame_type_case()) {\n  case test::integration::H2TestFrame::kPing:\n    ENVOY_LOG_MISC(trace, \"Sending ping frame\");\n    h2_frame = Http2Frame::makePingFrame(proto_frame.ping().data());\n    break;\n  case test::integration::H2TestFrame::kSettings: {\n    const Http2Frame::SettingsFlags settings_flags =\n        static_cast<Http2Frame::SettingsFlags>(proto_frame.settings().flags());\n    ENVOY_LOG_MISC(trace, \"Sending settings frame\");\n    h2_frame = Http2Frame::makeEmptySettingsFrame(settings_flags);\n    break;\n  }\n  case test::integration::H2TestFrame::kHeaders: {\n    const Http2Frame::HeadersFlags headers_flags = unifyHeadersFlags(proto_frame.headers().flags());\n    const uint32_t stream_idx = proto_frame.headers().stream_index();\n    ENVOY_LOG_MISC(trace, \"Sending headers frame\");\n    h2_frame = Http2Frame::makeEmptyHeadersFrame(stream_idx, headers_flags);\n    break;\n  }\n  case test::integration::H2TestFrame::kContinuation: {\n    const Http2Frame::HeadersFlags headers_flags =\n        unifyHeadersFlags(proto_frame.continuation().flags());\n    const uint32_t stream_idx = proto_frame.continuation().stream_index();\n    ENVOY_LOG_MISC(trace, \"Sending continuation frame\");\n    h2_frame = Http2Frame::makeEmptyContinuationFrame(stream_idx, headers_flags);\n    break;\n  }\n  case test::integration::H2TestFrame::kData: {\n    const Http2Frame::DataFlags data_flags =\n        static_cast<Http2Frame::DataFlags>(proto_frame.data().flags());\n    const uint32_t stream_idx = proto_frame.data().stream_index();\n    ENVOY_LOG_MISC(trace, \"Sending data frame\");\n    h2_frame = Http2Frame::makeEmptyDataFrame(stream_idx, data_flags);\n    break;\n  }\n  case test::integration::H2TestFrame::kPriority: {\n    const uint32_t stream_idx = proto_frame.priority().stream_index();\n    const uint32_t dependent_idx = proto_frame.priority().dependent_index();\n    ENVOY_LOG_MISC(trace, \"Sending priority frame\");\n    h2_frame = Http2Frame::makePriorityFrame(stream_idx, dependent_idx);\n    break;\n  }\n  case test::integration::H2TestFrame::kPushPromise: {\n    const Http2Frame::HeadersFlags headers_flags =\n        unifyHeadersFlags(proto_frame.push_promise().flags());\n    const uint32_t stream_idx = proto_frame.push_promise().stream_index();\n    const uint32_t promised_stream_idx = proto_frame.push_promise().promised_stream_index();\n    ENVOY_LOG_MISC(trace, \"Sending push promise frame\");\n    h2_frame =\n        Http2Frame::makeEmptyPushPromiseFrame(stream_idx, promised_stream_idx, headers_flags);\n    break;\n  }\n  case test::integration::H2TestFrame::kResetStream: {\n    const uint32_t stream_idx = proto_frame.reset_stream().stream_index();\n    const Http2Frame::ErrorCode error_code =\n        static_cast<Http2Frame::ErrorCode>(proto_frame.reset_stream().error_code());\n    ENVOY_LOG_MISC(trace, \"Sending reset stream frame\");\n    h2_frame = Http2Frame::makeResetStreamFrame(stream_idx, error_code);\n    break;\n  }\n  case test::integration::H2TestFrame::kGoAway: {\n    const uint32_t last_stream_idx = proto_frame.go_away().last_stream_index();\n    const Http2Frame::ErrorCode error_code =\n        static_cast<Http2Frame::ErrorCode>(proto_frame.go_away().error_code());\n    ENVOY_LOG_MISC(trace, \"Sending go-away frame\");\n    h2_frame = Http2Frame::makeEmptyGoAwayFrame(last_stream_idx, error_code);\n    break;\n  }\n  case test::integration::H2TestFrame::kWindowUpdate: {\n    const uint32_t stream_idx = proto_frame.window_update().stream_index();\n    const uint32_t increment = proto_frame.window_update().increment();\n    ENVOY_LOG_MISC(trace, \"Sending windows_update frame\");\n    h2_frame = Http2Frame::makeWindowUpdateFrame(stream_idx, increment);\n    break;\n  }\n  case test::integration::H2TestFrame::kMalformedRequest: {\n    const uint32_t stream_idx = proto_frame.malformed_request().stream_index();\n    ENVOY_LOG_MISC(trace, \"Sending malformed_request frame\");\n    h2_frame = Http2Frame::makeMalformedRequest(stream_idx);\n    break;\n  }\n  case test::integration::H2TestFrame::kMalformedRequestWithZerolenHeader: {\n    const uint32_t stream_idx = proto_frame.malformed_request_with_zerolen_header().stream_index();\n    const absl::string_view host = proto_frame.malformed_request_with_zerolen_header().host();\n    const absl::string_view path = proto_frame.malformed_request_with_zerolen_header().path();\n    ENVOY_LOG_MISC(trace, \"Sending malformed_request_with_zerolen_header\");\n    h2_frame = Http2Frame::makeMalformedRequestWithZerolenHeader(stream_idx, host, path);\n    break;\n  }\n  case test::integration::H2TestFrame::kRequest: {\n    const uint32_t stream_idx = proto_frame.request().stream_index();\n    const absl::string_view host = proto_frame.request().host();\n    const absl::string_view path = proto_frame.request().path();\n    ENVOY_LOG_MISC(trace, \"Sending request\");\n    h2_frame = Http2Frame::makeRequest(stream_idx, host, path);\n    break;\n  }\n  case test::integration::H2TestFrame::kPostRequest: {\n    const uint32_t stream_idx = proto_frame.post_request().stream_index();\n    const absl::string_view host = proto_frame.post_request().host();\n    const absl::string_view path = proto_frame.post_request().path();\n    ENVOY_LOG_MISC(trace, \"Sending post request\");\n    h2_frame = Http2Frame::makePostRequest(stream_idx, host, path);\n    break;\n  }\n  case test::integration::H2TestFrame::kMetadata: {\n    const Http2Frame::MetadataFlags metadata_flags =\n        static_cast<Http2Frame::MetadataFlags>(proto_frame.metadata().flags());\n    const uint32_t stream_idx = proto_frame.metadata().stream_index();\n    Http::MetadataMap metadata_map;\n    for (const auto& metadataPair : proto_frame.metadata().metadata().metadata()) {\n      metadata_map.insert(metadataPair);\n    }\n    ENVOY_LOG_MISC(trace, \"Sending metadata frame.\");\n    h2_frame =\n        Http2Frame::makeMetadataFrameFromMetadataMap(stream_idx, metadata_map, metadata_flags);\n    break;\n  }\n  case test::integration::H2TestFrame::kGeneric: {\n    const absl::string_view frame_bytes = proto_frame.generic().frame_bytes();\n    ENVOY_LOG_MISC(trace, \"Sending generic frame\");\n    h2_frame = Http2Frame::makeGenericFrame(frame_bytes);\n    break;\n  }\n  default:\n    ENVOY_LOG_MISC(debug, \"Proto-frame not supported!\");\n    break;\n  }\n\n  write_func(h2_frame);\n}\n\nvoid H2FuzzIntegrationTest::replay(const test::integration::H2CaptureFuzzTestCase& input,\n                                   bool ignore_response) {\n  PERSISTENT_FUZZ_VAR bool initialized = [this]() -> bool {\n    initialize();\n    return true;\n  }();\n  UNREFERENCED_PARAMETER(initialized);\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  bool stop_further_inputs = false;\n  bool preamble_sent = false;\n  for (int i = 0; i < input.events().size(); ++i) {\n    if (stop_further_inputs) {\n      break;\n    }\n    const auto& event = input.events(i);\n    ENVOY_LOG_MISC(debug, \"Processing event: {}\", event.DebugString());\n    // If we're disconnected, we fail out.\n    if (!tcp_client->connected()) {\n      ENVOY_LOG_MISC(debug, \"Disconnected, no further event processing.\");\n      break;\n    }\n    switch (event.event_selector_case()) {\n    case test::integration::Event::kDownstreamSendEvent: {\n      auto downstream_write_func = [&](const Http2Frame& h2_frame) -> void {\n        ASSERT_TRUE(tcp_client->write(std::string(h2_frame), false, false));\n      };\n      if (!preamble_sent) {\n        // Start H2 session - send hello string\n        ASSERT_TRUE(tcp_client->write(Http2Frame::Preamble, false, false));\n        preamble_sent = true;\n      }\n      for (auto& frame : event.downstream_send_event().h2_frames()) {\n        if (!tcp_client->connected()) {\n          ENVOY_LOG_MISC(debug,\n                         \"Disconnected, avoiding sending data, no further event processing.\");\n          break;\n        }\n\n        ENVOY_LOG_MISC(trace, \"sending downstream frame\");\n        sendFrame(frame, downstream_write_func);\n      }\n      break;\n    }\n    case test::integration::Event::kUpstreamSendEvent: {\n      if (ignore_response) {\n        break;\n      }\n      if (fake_upstream_connection == nullptr) {\n        if (!fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection, max_wait_ms_)) {\n          // If we timed out, we fail out.\n          if (tcp_client->connected()) {\n            tcp_client->close();\n          }\n          stop_further_inputs = true;\n          break;\n        }\n      }\n      // If we're no longer connected, we're done.\n      if (!fake_upstream_connection->connected()) {\n        if (tcp_client->connected()) {\n          tcp_client->close();\n        }\n        stop_further_inputs = true;\n        break;\n      }\n      {\n        auto upstream_write_func = [&](const Http2Frame& h2_frame) -> void {\n          AssertionResult result = fake_upstream_connection->write(std::string(h2_frame));\n          RELEASE_ASSERT(result, result.message());\n        };\n        for (auto& frame : event.upstream_send_event().h2_frames()) {\n          if (!fake_upstream_connection->connected()) {\n            ENVOY_LOG_MISC(\n                debug,\n                \"Upstream disconnected, avoiding sending data, no further event processing.\");\n            stop_further_inputs = true;\n            break;\n          }\n\n          ENVOY_LOG_MISC(trace, \"sending upstream frame\");\n          sendFrame(frame, upstream_write_func);\n        }\n      }\n      break;\n    }\n    default:\n      // Maybe nothing is set?\n      break;\n    }\n  }\n  if (fake_upstream_connection != nullptr) {\n    if (fake_upstream_connection->connected()) {\n      AssertionResult result = fake_upstream_connection->close();\n      RELEASE_ASSERT(result, result.message());\n    }\n    AssertionResult result = fake_upstream_connection->waitForDisconnect();\n    RELEASE_ASSERT(result, result.message());\n  }\n  if (tcp_client->connected()) {\n    tcp_client->close();\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/h2_fuzz.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n#include \"common/common/logger.h\"\n\n#include \"test/common/http/http2/http2_frame.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/integration/h2_capture_fuzz.pb.h\"\n#include \"test/integration/http_integration.h\"\n\nnamespace Envoy {\n\nclass H2FuzzIntegrationTest : public HttpIntegrationTest {\npublic:\n  H2FuzzIntegrationTest(Network::Address::IpVersion version)\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, version) {}\n\n  void initialize() override;\n  void replay(const test::integration::H2CaptureFuzzTestCase&, bool ignore_response);\n  const std::chrono::milliseconds max_wait_ms_{10};\n\nprivate:\n  void sendFrame(const test::integration::H2TestFrame&,\n                 std::function<void(const Envoy::Http::Http2::Http2Frame&)>);\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/hds_integration_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/core/v3/health_check.pb.h\"\n#include \"envoy/service/health/v3/hds.pb.h\"\n#include \"envoy/type/v3/http.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/upstream/health_checker_impl.h\"\n#include \"common/upstream/health_discovery_service.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/config/utility.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n// TODO(jmarantz): switch this to simulated-time after debugging flakes.\nclass HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest,\n                           public HttpIntegrationTest {\npublic:\n  HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {}\n\n  void createUpstreams() override {\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    hds_upstream_ = fake_upstreams_.back().get();\n    HttpIntegrationTest::createUpstreams();\n  }\n  void initialize() override {\n    setUpstreamCount(upstream_endpoints_);\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Setup hds and corresponding gRPC cluster.\n      auto* hds_config = bootstrap.mutable_hds_config();\n      hds_config->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n      hds_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\"hds_cluster\");\n      hds_config->set_transport_api_version(apiVersion());\n      auto* hds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      hds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      hds_cluster->mutable_circuit_breakers()->Clear();\n      hds_cluster->set_name(\"hds_cluster\");\n      hds_cluster->mutable_http2_protocol_options();\n      auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0);\n      cluster_0->clear_load_assignment();\n    });\n\n    HttpIntegrationTest::initialize();\n\n    // Endpoint connections\n    if (tls_hosts_) {\n      host_upstream_ =\n          createFakeUpstream(HttpIntegrationTest::createUpstreamTlsContext(), http_conn_type_);\n      host2_upstream_ =\n          createFakeUpstream(HttpIntegrationTest::createUpstreamTlsContext(), http_conn_type_);\n    } else {\n      host_upstream_ = createFakeUpstream(http_conn_type_);\n      host2_upstream_ = createFakeUpstream(http_conn_type_);\n    }\n  }\n\n  // Sets up a connection between Envoy and the management server.\n  void waitForHdsStream() {\n    AssertionResult result =\n        hds_upstream_->waitForHttpConnection(*dispatcher_, hds_fake_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = hds_fake_connection_->waitForNewStream(*dispatcher_, hds_stream_);\n    RELEASE_ASSERT(result, result.message());\n  }\n\n  // Envoy sends health check messages to the endpoints\n  void healthcheckEndpoints(std::string cluster2 = \"\") {\n    ASSERT_TRUE(host_upstream_->waitForHttpConnection(*dispatcher_, host_fake_connection_));\n    ASSERT_TRUE(host_fake_connection_->waitForNewStream(*dispatcher_, host_stream_));\n    ASSERT_TRUE(host_stream_->waitForEndStream(*dispatcher_));\n\n    EXPECT_EQ(host_stream_->headers().getPathValue(), \"/healthcheck\");\n    EXPECT_EQ(host_stream_->headers().getMethodValue(), \"GET\");\n    EXPECT_EQ(host_stream_->headers().getHostValue(), \"anna\");\n\n    if (!cluster2.empty()) {\n      ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_));\n      ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_));\n      ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_));\n\n      EXPECT_EQ(host2_stream_->headers().getPathValue(), \"/healthcheck\");\n      EXPECT_EQ(host2_stream_->headers().getMethodValue(), \"GET\");\n      EXPECT_EQ(host2_stream_->headers().getHostValue(), cluster2);\n    }\n  }\n\n  // Clean up the connection between Envoy and the management server\n  void cleanupHdsConnection() {\n    if (hds_fake_connection_ != nullptr) {\n      AssertionResult result = hds_fake_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = hds_fake_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n  }\n\n  // Clean up connections between Envoy and endpoints\n  void cleanupHostConnections() {\n    if (host_fake_connection_ != nullptr) {\n      AssertionResult result = host_fake_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = host_fake_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n    if (host2_fake_connection_ != nullptr) {\n      AssertionResult result = host2_fake_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = host2_fake_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n  }\n\n  // Creates a basic HealthCheckSpecifier message containing one endpoint and\n  // one HTTP health_check\n  envoy::service::health::v3::HealthCheckSpecifier\n  makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType codec_type, bool use_tls) {\n    envoy::service::health::v3::HealthCheckSpecifier server_health_check_specifier_;\n    server_health_check_specifier_.mutable_interval()->set_nanos(100000000); // 0.1 seconds\n\n    auto* cluster_health_check = server_health_check_specifier_.add_cluster_health_checks();\n\n    cluster_health_check->set_cluster_name(\"anna\");\n    Network::Utility::addressToProtobufAddress(\n        *host_upstream_->localAddress(),\n        *cluster_health_check->add_locality_endpoints()->add_endpoints()->mutable_address());\n    cluster_health_check->mutable_locality_endpoints(0)->mutable_locality()->set_region(\n        \"middle_earth\");\n    cluster_health_check->mutable_locality_endpoints(0)->mutable_locality()->set_zone(\"shire\");\n    cluster_health_check->mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone(\n        \"hobbiton\");\n    auto* health_check = cluster_health_check->add_health_checks();\n    health_check->mutable_timeout()->set_seconds(MaxTimeout);\n    health_check->mutable_interval()->set_seconds(MaxTimeout);\n    health_check->mutable_unhealthy_threshold()->set_value(2);\n    health_check->mutable_healthy_threshold()->set_value(2);\n    health_check->mutable_grpc_health_check();\n    auto* http_health_check = health_check->mutable_http_health_check();\n    http_health_check->set_path(\"/healthcheck\");\n    http_health_check->set_codec_client_type(codec_type);\n    if (use_tls) {\n      // Map our transport socket matches with our matcher.\n      const std::string criteria_yaml = absl::StrFormat(\n          R\"EOF(\ntransport_socket_match_criteria:\n  good_match: \"true\"\n)EOF\");\n      health_check->MergeFrom(\n          TestUtility::parseYaml<envoy::config::core::v3::HealthCheck>(criteria_yaml));\n\n      // Create the list of all possible matches.\n      const std::string match_yaml = absl::StrFormat(\n          R\"EOF(\ntransport_socket_matches:\n- name: \"tls_socket\"\n  match:\n    good_match: \"true\"\n  transport_socket:\n    name: tls\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n      common_tls_context:\n        tls_certificates:\n        - certificate_chain: { filename: \"%s\" }\n          private_key: { filename: \"%s\" }\n  )EOF\",\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"),\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n      cluster_health_check->MergeFrom(\n          TestUtility::parseYaml<envoy::service::health::v3::ClusterHealthCheck>(match_yaml));\n    }\n    return server_health_check_specifier_;\n  }\n\n  // Creates a basic HealthCheckSpecifier message containing one endpoint and\n  // one TCP health_check\n  envoy::service::health::v3::HealthCheckSpecifier makeTcpHealthCheckSpecifier() {\n    envoy::service::health::v3::HealthCheckSpecifier server_health_check_specifier_;\n    server_health_check_specifier_.mutable_interval()->set_nanos(100000000); // 0.1 seconds\n\n    auto* health_check = server_health_check_specifier_.add_cluster_health_checks();\n\n    health_check->set_cluster_name(\"anna\");\n    Network::Utility::addressToProtobufAddress(\n        *host_upstream_->localAddress(),\n        *health_check->add_locality_endpoints()->add_endpoints()->mutable_address());\n    health_check->mutable_locality_endpoints(0)->mutable_locality()->set_region(\"middle_earth\");\n    health_check->mutable_locality_endpoints(0)->mutable_locality()->set_zone(\"eriador\");\n    health_check->mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone(\"rivendell\");\n\n    health_check->add_health_checks()->mutable_timeout()->set_seconds(MaxTimeout);\n    health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout);\n    health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2);\n    health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2);\n    auto* tcp_health_check = health_check->mutable_health_checks(0)->mutable_tcp_health_check();\n    tcp_health_check->mutable_send()->set_text(\"50696E67\");\n    tcp_health_check->add_receive()->set_text(\"506F6E67\");\n\n    return server_health_check_specifier_;\n  }\n\n  // Checks if Envoy reported the health status of an endpoint correctly\n  bool checkEndpointHealthResponse(envoy::service::health::v3::EndpointHealth endpoint,\n                                   envoy::config::core::v3::HealthStatus healthy,\n                                   Network::Address::InstanceConstSharedPtr address) {\n\n    if (healthy != endpoint.health_status()) {\n      return false;\n    }\n    if (address->ip()->port() != endpoint.endpoint().address().socket_address().port_value()) {\n      return false;\n    }\n    if (address->ip()->addressAsString() !=\n        endpoint.endpoint().address().socket_address().address()) {\n      return false;\n    }\n    return true;\n  }\n\n  // Checks if the cluster counters are correct\n  void checkCounters(int requests, int responses, int successes, int failures) {\n    EXPECT_EQ(requests, test_server_->counter(\"hds_delegate.requests\")->value());\n    EXPECT_LE(responses, test_server_->counter(\"hds_delegate.responses\")->value());\n    EXPECT_EQ(successes, test_server_->counter(\"cluster.anna.health_check.success\")->value());\n    EXPECT_EQ(failures, test_server_->counter(\"cluster.anna.health_check.failure\")->value());\n  }\n\n  void waitForEndpointHealthResponse(envoy::config::core::v3::HealthStatus healthy) {\n    ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, response_));\n    while (!checkEndpointHealthResponse(response_.endpoint_health_response().endpoints_health(0),\n                                        healthy, host_upstream_->localAddress())) {\n      ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, response_));\n      EXPECT_EQ(\"POST\", hds_stream_->headers().getMethodValue());\n      EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.{1}.{0}.HealthDiscoveryService\",\n                                                    \"StreamHealthCheck\", apiVersion(),\n                                                    /*use_alpha=*/false, serviceNamespace()),\n                hds_stream_->headers().getPathValue());\n      EXPECT_EQ(\"application/grpc\", hds_stream_->headers().getContentTypeValue());\n    }\n  }\n\n  // check response has correct format and health response.\n  bool checkClusterEndpointHealthResponse(envoy::config::core::v3::HealthStatus healthy,\n                                          Network::Address::InstanceConstSharedPtr address,\n                                          int cluster, int locality, int endpoint) {\n    // Ensure that this grpc message is a health response.\n    if (response_.has_endpoint_health_response()) {\n      auto& health_response = response_.endpoint_health_response();\n\n      // Ensure that this response has a cluster available at the index.\n      if (health_response.cluster_endpoints_health_size() > cluster) {\n        auto& cluster_response = health_response.cluster_endpoints_health(cluster);\n\n        // Ensure that this response has a locality available at the index.\n        if (cluster_response.locality_endpoints_health_size() > locality) {\n          auto& locality_response = cluster_response.locality_endpoints_health(locality);\n\n          // Ensure that this response has a endpoint available at the index.\n          if (locality_response.endpoints_health_size() > endpoint) {\n            auto& endpoint_response = locality_response.endpoints_health(endpoint);\n\n            // Check to see if this endpoint has specified health status.\n            return checkEndpointHealthResponse(endpoint_response, healthy, address);\n          }\n        }\n      }\n    }\n\n    // Some field is missing, return false.\n    return false;\n  }\n\n  // wait until our response has desired health status for desired endpoint.\n  bool waitForClusterHealthResponse(envoy::config::core::v3::HealthStatus healthy,\n                                    Network::Address::InstanceConstSharedPtr address, int cluster,\n                                    int locality, int endpoint) {\n    // Get some response.\n    if (!hds_stream_->waitForGrpcMessage(*dispatcher_, response_)) {\n      return false;\n    }\n\n    // Check endpoint health status by indices.\n    while (!checkClusterEndpointHealthResponse(healthy, address, cluster, locality, endpoint)) {\n      if (!hds_stream_->waitForGrpcMessage(*dispatcher_, response_)) {\n        return false;\n      }\n\n      EXPECT_EQ(\"POST\", hds_stream_->headers().getMethodValue());\n      EXPECT_EQ(TestUtility::getVersionedMethodPath(\"envoy.service.{1}.{0}.HealthDiscoveryService\",\n                                                    \"StreamHealthCheck\", apiVersion(),\n                                                    /*use_alpha=*/false, serviceNamespace()),\n                hds_stream_->headers().getPathValue());\n      EXPECT_EQ(\"application/grpc\", hds_stream_->headers().getContentTypeValue());\n    }\n\n    return true;\n  }\n\n  const std::string serviceNamespace() const {\n    switch (apiVersion()) {\n    case envoy::config::core::v3::ApiVersion::AUTO:\n      FALLTHRU;\n    case envoy::config::core::v3::ApiVersion::V2:\n      return \"discovery\";\n    case envoy::config::core::v3::ApiVersion::V3:\n      return \"health\";\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  static constexpr uint32_t upstream_endpoints_ = 0;\n\n  FakeHttpConnectionPtr hds_fake_connection_;\n  FakeStreamPtr hds_stream_;\n  FakeUpstream* hds_upstream_{};\n  uint32_t hds_requests_{};\n  FakeUpstreamPtr host_upstream_{};\n  FakeUpstreamPtr host2_upstream_{};\n  FakeStreamPtr host_stream_;\n  FakeStreamPtr host2_stream_;\n  FakeHttpConnectionPtr host_fake_connection_;\n  FakeHttpConnectionPtr host2_fake_connection_;\n  FakeRawConnectionPtr host_fake_raw_connection_;\n  FakeHttpConnection::Type http_conn_type_{FakeHttpConnection::Type::HTTP1};\n  bool tls_hosts_{false};\n\n  static constexpr int MaxTimeout = 100;\n  envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse envoy_msg_;\n  envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse response_;\n  envoy::service::health::v3::HealthCheckSpecifier server_health_check_specifier_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, HdsIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Tests Envoy HTTP health checking a single healthy endpoint and reporting that it is\n// indeed healthy to the server.\nTEST_P(HdsIntegrationTest, SingleEndpointHealthyHttp) {\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n  EXPECT_EQ(envoy_msg_.health_check_request().capability().health_check_protocols(0),\n            envoy::service::health::v3::Capability::HTTP);\n\n  // Server asks for health checking\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  healthcheckEndpoints();\n\n  // Endpoint responds to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  host_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::HEALTHY);\n\n  checkCounters(1, 2, 1, 0);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy HTTP health checking a single endpoint that times out and reporting\n// that it is unhealthy to the server.\nTEST_P(HdsIntegrationTest, SingleEndpointTimeoutHttp) {\n  initialize();\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n\n  server_health_check_specifier_.mutable_cluster_health_checks(0)\n      ->mutable_health_checks(0)\n      ->mutable_timeout()\n      ->set_seconds(0);\n  server_health_check_specifier_.mutable_cluster_health_checks(0)\n      ->mutable_health_checks(0)\n      ->mutable_timeout()\n      ->set_nanos(100000000); // 0.1 seconds\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_));\n\n  // Endpoint doesn't respond to the health check\n  ASSERT_TRUE(host_fake_raw_connection_->waitForDisconnect());\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::TIMEOUT);\n\n  checkCounters(1, 2, 0, 1);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy HTTP health checking a single unhealthy endpoint and reporting that it is\n// indeed unhealthy to the server.\nTEST_P(HdsIntegrationTest, SingleEndpointUnhealthyHttp) {\n  initialize();\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  healthcheckEndpoints();\n\n  // Endpoint responds to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}, false);\n  host_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::UNHEALTHY);\n\n  checkCounters(1, 2, 0, 1);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy TCP health checking an endpoint that doesn't respond and reporting that it is\n// unhealthy to the server.\nTEST_P(HdsIntegrationTest, SingleEndpointTimeoutTcp) {\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n  EXPECT_EQ(envoy_msg_.health_check_request().capability().health_check_protocols(1),\n            envoy::service::health::v3::Capability::TCP);\n\n  // Server asks for health checking\n  server_health_check_specifier_ = makeTcpHealthCheckSpecifier();\n  server_health_check_specifier_.mutable_cluster_health_checks(0)\n      ->mutable_health_checks(0)\n      ->mutable_timeout()\n      ->set_seconds(0);\n  server_health_check_specifier_.mutable_cluster_health_checks(0)\n      ->mutable_health_checks(0)\n      ->mutable_timeout()\n      ->set_nanos(100000000); // 0.1 seconds\n\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoys asks the endpoint if it's healthy\n  ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_));\n\n  // No response from the endpoint\n  ASSERT_TRUE(host_fake_raw_connection_->waitForDisconnect());\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::TIMEOUT);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy TCP health checking a single healthy endpoint and reporting that it is\n// indeed healthy to the server.\nTEST_P(HdsIntegrationTest, SingleEndpointHealthyTcp) {\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  server_health_check_specifier_ = makeTcpHealthCheckSpecifier();\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy asks the endpoint if it's healthy\n  ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_));\n  ASSERT_TRUE(\n      host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch(\"Ping\")));\n  AssertionResult result = host_fake_raw_connection_->write(\"Pong\");\n  RELEASE_ASSERT(result, result.message());\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::HEALTHY);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy TCP health checking a single unhealthy endpoint and reporting that it is\n// indeed unhealthy to the server.\nTEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTcp) {\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  server_health_check_specifier_ = makeTcpHealthCheckSpecifier();\n  server_health_check_specifier_.mutable_cluster_health_checks(0)\n      ->mutable_health_checks(0)\n      ->mutable_timeout()\n      ->set_seconds(2);\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy asks the endpoint if it's healthy\n  ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_));\n  ASSERT_TRUE(\n      host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch(\"Ping\")));\n  AssertionResult result = host_fake_raw_connection_->write(\"Voronoi\");\n  RELEASE_ASSERT(result, result.message());\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::UNHEALTHY);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests that Envoy can HTTP health check two hosts that are in the same cluster, and\n// the same locality and report back the correct health statuses.\nTEST_P(HdsIntegrationTest, TwoEndpointsSameLocality) {\n  initialize();\n\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n  Network::Utility::addressToProtobufAddress(\n      *host2_upstream_->localAddress(),\n      *server_health_check_specifier_.mutable_cluster_health_checks(0)\n           ->mutable_locality_endpoints(0)\n           ->add_endpoints()\n           ->mutable_address());\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  healthcheckEndpoints(\"anna\");\n\n  // Endpoints respond to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}, false);\n  host_stream_->encodeData(1024, true);\n  host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  host2_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  ASSERT_TRUE(waitForClusterHealthResponse(envoy::config::core::v3::HEALTHY,\n                                           host2_upstream_->localAddress(), 0, 0, 1));\n\n  // Ensure we have at least one cluster before trying to read it.\n  ASSERT_EQ(response_.endpoint_health_response().cluster_endpoints_health_size(), 1);\n\n  // store cluster response info for easier reference.\n  const auto& cluster_response = response_.endpoint_health_response().cluster_endpoints_health(0);\n\n  // Check cluster has correct name and number of localities (1)\n  EXPECT_EQ(cluster_response.cluster_name(), \"anna\");\n  ASSERT_EQ(cluster_response.locality_endpoints_health_size(), 1);\n\n  // check the only locality and its endpoints.\n  const auto& locality_response = cluster_response.locality_endpoints_health(0);\n  EXPECT_EQ(locality_response.locality().sub_zone(), \"hobbiton\");\n  ASSERT_EQ(locality_response.endpoints_health_size(), 2);\n  EXPECT_TRUE(checkEndpointHealthResponse(locality_response.endpoints_health(0),\n                                          envoy::config::core::v3::UNHEALTHY,\n                                          host_upstream_->localAddress()));\n\n  checkCounters(1, 2, 1, 1);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests that Envoy can HTTP health check two hosts that are in the same cluster, and\n// different localities and report back the correct health statuses.\nTEST_P(HdsIntegrationTest, TwoEndpointsDifferentLocality) {\n  initialize();\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n\n  // Add endpoint\n  auto* health_check = server_health_check_specifier_.mutable_cluster_health_checks(0);\n\n  Network::Utility::addressToProtobufAddress(\n      *host2_upstream_->localAddress(),\n      *health_check->add_locality_endpoints()->add_endpoints()->mutable_address());\n  health_check->mutable_locality_endpoints(1)->mutable_locality()->set_region(\"plakias\");\n  health_check->mutable_locality_endpoints(1)->mutable_locality()->set_zone(\"fragokastelo\");\n  health_check->mutable_locality_endpoints(1)->mutable_locality()->set_sub_zone(\"emplisi\");\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends health check messages to two endpoints\n  healthcheckEndpoints(\"anna\");\n\n  // Endpoint responds to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}, false);\n  host_stream_->encodeData(1024, true);\n  host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  host2_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  ASSERT_TRUE(waitForClusterHealthResponse(envoy::config::core::v3::HEALTHY,\n                                           host2_upstream_->localAddress(), 0, 1, 0));\n\n  ASSERT_EQ(response_.endpoint_health_response().cluster_endpoints_health_size(), 1);\n\n  // store cluster response info for easier reference.\n  const auto& cluster_response = response_.endpoint_health_response().cluster_endpoints_health(0);\n\n  // Check cluster has correct name and number of localities (2)\n  EXPECT_EQ(cluster_response.cluster_name(), \"anna\");\n  ASSERT_EQ(cluster_response.locality_endpoints_health_size(), 2);\n\n  // check first locality.\n  const auto& locality_resp0 = cluster_response.locality_endpoints_health(0);\n  EXPECT_EQ(locality_resp0.locality().sub_zone(), \"hobbiton\");\n  ASSERT_EQ(locality_resp0.endpoints_health_size(), 1);\n  EXPECT_TRUE(checkEndpointHealthResponse(locality_resp0.endpoints_health(0),\n                                          envoy::config::core::v3::UNHEALTHY,\n                                          host_upstream_->localAddress()));\n\n  // check second locality.\n  const auto& locality_resp1 = cluster_response.locality_endpoints_health(1);\n  EXPECT_EQ(locality_resp1.locality().sub_zone(), \"emplisi\");\n  ASSERT_EQ(locality_resp1.endpoints_health_size(), 1);\n\n  checkCounters(1, 2, 1, 1);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests that Envoy can HTTP health check two hosts that are in different clusters, and\n// report back the correct health statuses.\nTEST_P(HdsIntegrationTest, TwoEndpointsDifferentClusters) {\n  initialize();\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n\n  // Add endpoint\n  auto* health_check = server_health_check_specifier_.add_cluster_health_checks();\n\n  health_check->set_cluster_name(\"cat\");\n  Network::Utility::addressToProtobufAddress(\n      *host2_upstream_->localAddress(),\n      *health_check->add_locality_endpoints()->add_endpoints()->mutable_address());\n  health_check->mutable_locality_endpoints(0)->mutable_locality()->set_region(\"kounopetra\");\n  health_check->mutable_locality_endpoints(0)->mutable_locality()->set_zone(\"emplisi\");\n  health_check->mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone(\"paris\");\n\n  health_check->add_health_checks()->mutable_timeout()->set_seconds(MaxTimeout);\n  health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout);\n  health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2);\n  health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2);\n  health_check->mutable_health_checks(0)->mutable_grpc_health_check();\n  health_check->mutable_health_checks(0)\n      ->mutable_http_health_check()\n      ->set_hidden_envoy_deprecated_use_http2(false);\n  health_check->mutable_health_checks(0)->mutable_http_health_check()->set_path(\"/healthcheck\");\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends health check messages to two endpoints\n  healthcheckEndpoints(\"cat\");\n\n  // Endpoint responds to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}, false);\n  host_stream_->encodeData(1024, true);\n  host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  host2_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  ASSERT_TRUE(waitForClusterHealthResponse(envoy::config::core::v3::HEALTHY,\n                                           host2_upstream_->localAddress(), 1, 0, 0));\n\n  ASSERT_EQ(response_.endpoint_health_response().cluster_endpoints_health_size(), 2);\n\n  // store cluster response info for easier reference.\n  const auto& cluster_resp0 = response_.endpoint_health_response().cluster_endpoints_health(0);\n  const auto& cluster_resp1 = response_.endpoint_health_response().cluster_endpoints_health(1);\n\n  // check cluster info and sizes.\n  EXPECT_EQ(cluster_resp0.cluster_name(), \"anna\");\n  ASSERT_EQ(cluster_resp0.locality_endpoints_health_size(), 1);\n  EXPECT_EQ(cluster_resp1.cluster_name(), \"cat\");\n  ASSERT_EQ(cluster_resp1.locality_endpoints_health_size(), 1);\n\n  // store locality response info for easier reference.\n  const auto& locality_resp0 = cluster_resp0.locality_endpoints_health(0);\n  const auto& locality_resp1 = cluster_resp1.locality_endpoints_health(0);\n\n  // check locality info and sizes.\n  EXPECT_EQ(locality_resp0.locality().sub_zone(), \"hobbiton\");\n  ASSERT_EQ(locality_resp0.endpoints_health_size(), 1);\n  EXPECT_EQ(locality_resp1.locality().sub_zone(), \"paris\");\n  ASSERT_EQ(locality_resp1.endpoints_health_size(), 1);\n\n  // check endpoints.\n  EXPECT_TRUE(checkEndpointHealthResponse(locality_resp0.endpoints_health(0),\n                                          envoy::config::core::v3::UNHEALTHY,\n                                          host_upstream_->localAddress()));\n\n  checkCounters(1, 2, 0, 1);\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cat.health_check.success\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"cluster.cat.health_check.failure\")->value());\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy HTTP health checking a single endpoint, receiving an update\n// message from the management server and health checking a new endpoint\nTEST_P(HdsIntegrationTest, TestUpdateMessage) {\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  healthcheckEndpoints();\n\n  // Endpoint responds to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  host_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::HEALTHY);\n\n  checkCounters(1, 2, 1, 0);\n\n  cleanupHostConnections();\n\n  // New HealthCheckSpecifier message\n  envoy::service::health::v3::HealthCheckSpecifier new_message;\n  new_message.mutable_interval()->set_nanos(100000000); // 0.1 seconds\n\n  auto* health_check = new_message.add_cluster_health_checks();\n\n  health_check->set_cluster_name(\"cat\");\n  Network::Utility::addressToProtobufAddress(\n      *host2_upstream_->localAddress(),\n      *health_check->add_locality_endpoints()->add_endpoints()->mutable_address());\n\n  health_check->mutable_locality_endpoints(0)->mutable_locality()->set_region(\"matala\");\n  health_check->mutable_locality_endpoints(0)->mutable_locality()->set_zone(\"tilburg\");\n  health_check->mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone(\"rivendell\");\n\n  health_check->add_health_checks()->mutable_timeout()->set_seconds(MaxTimeout);\n  health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout);\n  health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2);\n  health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2);\n  health_check->mutable_health_checks(0)->mutable_grpc_health_check();\n  health_check->mutable_health_checks(0)\n      ->mutable_http_health_check()\n      ->set_hidden_envoy_deprecated_use_http2(false);\n  health_check->mutable_health_checks(0)->mutable_http_health_check()->set_path(\"/healthcheck\");\n\n  // Server asks for health checking with the new message\n  hds_stream_->sendGrpcMessage(new_message);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_));\n  ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_));\n  ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_));\n\n  // Endpoint responds to the health check\n  host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"404\"}}, false);\n  host2_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, response_));\n  while (!checkEndpointHealthResponse(response_.endpoint_health_response().endpoints_health(0),\n                                      envoy::config::core::v3::UNHEALTHY,\n                                      host2_upstream_->localAddress())) {\n    ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, response_));\n  }\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy HTTP health checking a single endpoint, receiving an update\n// message from the management server and reporting in a new interval\nTEST_P(HdsIntegrationTest, TestUpdateChangesTimer) {\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  healthcheckEndpoints();\n\n  // an update should be received after interval\n  ASSERT_TRUE(\n      hds_stream_->waitForGrpcMessage(*dispatcher_, response_, std::chrono::milliseconds(250)));\n\n  // New HealthCheckSpecifier message\n  server_health_check_specifier_.mutable_interval()->set_nanos(300000000); // 0.3 seconds\n\n  // Server asks for health checking with the new message\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // A response should not be received until the new timer is completed\n  ASSERT_FALSE(\n      hds_stream_->waitForGrpcMessage(*dispatcher_, response_, std::chrono::milliseconds(100)));\n  // Response should be received now\n  ASSERT_TRUE(\n      hds_stream_->waitForGrpcMessage(*dispatcher_, response_, std::chrono::milliseconds(400)));\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Tests Envoy HTTP health checking a single endpoint when interval hasn't been defined\nTEST_P(HdsIntegrationTest, TestDefaultTimer) {\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n\n  // Server asks for health checking\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n  server_health_check_specifier_.clear_interval();\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  healthcheckEndpoints();\n\n  // an update should be received after interval\n  ASSERT_TRUE(\n      hds_stream_->waitForGrpcMessage(*dispatcher_, response_, std::chrono::milliseconds(2500)));\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Health checks a single endpoint over TLS with HTTP/2\nTEST_P(HdsIntegrationTest, SingleEndpointHealthyTlsHttp2) {\n  // Change member variable to specify host streams to have tls transport socket.\n  tls_hosts_ = true;\n\n  // Change hosts to operate over HTTP/2 instead of default HTTP.\n  http_conn_type_ = FakeHttpConnection::Type::HTTP2;\n\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n  EXPECT_EQ(envoy_msg_.health_check_request().capability().health_check_protocols(0),\n            envoy::service::health::v3::Capability::HTTP);\n\n  // Server asks for health checking\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP2, true);\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  healthcheckEndpoints();\n\n  // Endpoint responds to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  host_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::HEALTHY);\n\n  checkCounters(1, 2, 1, 0);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Health checks a single endpoint over TLS with HTTP/1\nTEST_P(HdsIntegrationTest, SingleEndpointHealthyTlsHttp1) {\n  // Change member variable to specify host streams to have tls transport socket.\n  tls_hosts_ = true;\n\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n  EXPECT_EQ(envoy_msg_.health_check_request().capability().health_check_protocols(0),\n            envoy::service::health::v3::Capability::HTTP);\n\n  // Server asks for health checking\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, true);\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  healthcheckEndpoints();\n\n  // Endpoint responds to the health check\n  host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  host_stream_->encodeData(1024, true);\n\n  // Receive updates until the one we expect arrives\n  waitForEndpointHealthResponse(envoy::config::core::v3::HEALTHY);\n\n  checkCounters(1, 2, 1, 0);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n// Attempts to health check a TLS endpoint over plaintext, which should fail.\nTEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTlsMissingSocketMatch) {\n  // Make the endpoints expect communication over TLS.\n  tls_hosts_ = true;\n\n  initialize();\n\n  // Server <--> Envoy\n  waitForHdsStream();\n  ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_));\n  EXPECT_EQ(envoy_msg_.health_check_request().capability().health_check_protocols(0),\n            envoy::service::health::v3::Capability::HTTP);\n\n  // Make the specifier not have the TLS socket matches, so it will try to connect over plaintext.\n  server_health_check_specifier_ =\n      makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false);\n\n  hds_stream_->startGrpcStream();\n  hds_stream_->sendGrpcMessage(server_health_check_specifier_);\n  test_server_->waitForCounterGe(\"hds_delegate.requests\", ++hds_requests_);\n\n  // Envoy sends a health check message to an endpoint\n  ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_));\n\n  // Endpoint doesn't respond to the health check\n  ASSERT_TRUE(host_fake_raw_connection_->waitForDisconnect());\n\n  // Receive updates until the one we expect arrives. This should be UNHEALTHY and not TIMEOUT,\n  // because TIMEOUT occurs in the situation where there is no response from the endpoint. In this\n  // case, the endpoint does respond but it is over TLS, and HDS is trying to parse it as plaintext.\n  // It does not recognize the malformed plaintext, so it is considered a failure and UNHEALTHY is\n  // set.\n  waitForEndpointHealthResponse(envoy::config::core::v3::UNHEALTHY);\n\n  checkCounters(1, 2, 0, 1);\n\n  // Clean up connections\n  cleanupHostConnections();\n  cleanupHdsConnection();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/header_casing_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"test/integration/http_integration.h\"\n\n#include \"fake_upstream.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass HeaderCasingIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                    public HttpIntegrationTest {\npublic:\n  HeaderCasingIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void SetUp() override {\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP1);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP1);\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier(\n        [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n               hcm) {\n          hcm.mutable_http_protocol_options()\n              ->mutable_header_key_format()\n              ->mutable_proper_case_words();\n        });\n\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      bootstrap.mutable_static_resources()\n          ->mutable_clusters(0)\n          ->mutable_http_protocol_options()\n          ->mutable_header_key_format()\n          ->mutable_proper_case_words();\n    });\n\n    HttpIntegrationTest::initialize();\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, HeaderCasingIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(HeaderCasingIntegrationTest, VerifyCasedHeaders) {\n  initialize();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  auto request = \"GET / HTTP/1.1\\r\\nhost: host\\r\\nmy-header: foo\\r\\n\\r\\n\";\n  ASSERT_TRUE(tcp_client->write(request, false));\n\n  Envoy::FakeRawConnectionPtr upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(upstream_connection));\n\n  // Verify that the upstream request has proper cased headers.\n  std::string upstream_request;\n  EXPECT_TRUE(upstream_connection->waitForData(FakeRawConnection::waitForInexactMatch(\"GET /\"),\n                                               &upstream_request));\n\n  EXPECT_TRUE(absl::StrContains(upstream_request, \"My-Header: foo\"));\n  EXPECT_TRUE(absl::StrContains(upstream_request, \"Host: host\"));\n  EXPECT_TRUE(absl::StrContains(upstream_request, \"Content-Length: 0\"));\n\n  // Verify that the downstream response has proper cased headers.\n  auto response =\n      \"HTTP/1.1 503 Service Unavailable\\r\\ncontent-length: 0\\r\\nresponse-header: foo\\r\\n\\r\\n\";\n  ASSERT_TRUE(upstream_connection->write(response));\n\n  // Verify that we're at least one proper cased header.\n  tcp_client->waitForData(\"HTTP/1.1 503 Service Unavailable\\r\\nContent-Length:\", true);\n\n  tcp_client->close();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/header_integration_test.cc",
    "content": "#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/api/v2/endpoint.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/filters/http/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/config/metadata.h\"\n#include \"common/http/exception.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nstd::string ipSuppressEnvoyHeadersTestParamsToString(\n    const ::testing::TestParamInfo<std::tuple<Network::Address::IpVersion, bool>>& params) {\n  return fmt::format(\n      \"{}_{}\",\n      TestUtility::ipTestParamsToString(\n          ::testing::TestParamInfo<Network::Address::IpVersion>(std::get<0>(params.param), 0)),\n      std::get<1>(params.param) ? \"with_x_envoy_from_router\" : \"without_x_envoy_from_router\");\n}\n\nvoid disableHeaderValueOptionAppend(\n    Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>& header_value_options) {\n  for (auto& i : header_value_options) {\n    i.mutable_append()->set_value(false);\n  }\n}\n\nconst std::string http_connection_mgr_config = R\"EOF(\nhttp_filters:\n  - name: envoy.filters.http.router\ncodec_type: HTTP1\nuse_remote_address: false\nxff_num_trusted_hops: 1\nstat_prefix: header_test\nroute_config:\n  virtual_hosts:\n    - name: no-headers\n      domains: [\"no-headers.com\"]\n      routes:\n        - match: { prefix: \"/\" }\n          route: { cluster: \"cluster_0\" }\n    - name: vhost-headers\n      domains: [\"vhost-headers.com\"]\n      request_headers_to_add:\n        - header:\n            key: \"x-vhost-request\"\n            value: \"vhost\"\n      request_headers_to_remove: [\"x-vhost-request-remove\"]\n      response_headers_to_add:\n        - header:\n            key: \"x-vhost-response\"\n            value: \"vhost\"\n      response_headers_to_remove: [\"x-vhost-response-remove\"]\n      routes:\n        - match: { prefix: \"/vhost-only\" }\n          route: { cluster: \"cluster_0\" }\n        - match: { prefix: \"/vhost-and-route\" }\n          request_headers_to_add:\n            - header:\n                key: \"x-route-request\"\n                value: \"route\"\n          request_headers_to_remove: [\"x-route-request-remove\"]\n          response_headers_to_add:\n            - header:\n                key: \"x-route-response\"\n                value: \"route\"\n          response_headers_to_remove: [\"x-route-response-remove\"]\n          route:\n            cluster: cluster_0\n        - match: { prefix: \"/vhost-route-and-weighted-clusters\" }\n          request_headers_to_add:\n            - header:\n                key: \"x-route-request\"\n                value: \"route\"\n          request_headers_to_remove: [\"x-route-request-remove\"]\n          response_headers_to_add:\n            - header:\n                key: \"x-route-response\"\n                value: \"route\"\n          response_headers_to_remove: [\"x-route-response-remove\"]\n          route:\n            weighted_clusters:\n              clusters:\n                - name: cluster_0\n                  weight: 100\n                  request_headers_to_add:\n                    - header:\n                        key: \"x-weighted-cluster-request\"\n                        value: \"weighted-cluster-1\"\n                  request_headers_to_remove: [\"x-weighted-cluster-request-remove\"]\n                  response_headers_to_add:\n                    - header:\n                        key: \"x-weighted-cluster-response\"\n                        value: \"weighted-cluster-1\"\n                  response_headers_to_remove: [\"x-weighted-cluster-response-remove\"]\n    - name: route-headers\n      domains: [\"route-headers.com\"]\n      routes:\n        - match: { prefix: \"/route-only\" }\n          request_headers_to_add:\n            - header:\n                key: \"x-route-request\"\n                value: \"route\"\n          request_headers_to_remove: [\"x-route-request-remove\"]\n          response_headers_to_add:\n            - header:\n                key: \"x-route-response\"\n                value: \"route\"\n          response_headers_to_remove: [\"x-route-response-remove\"]\n          route:\n            cluster: cluster_0\n    - name: xff-headers\n      domains: [\"xff-headers.com\"]\n      routes:\n        - match: { prefix: \"/test\" }\n          route:\n            cluster: cluster_0\n          request_headers_to_add:\n            - header:\n                key: \"x-real-ip\"\n                value: \"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%\"\n    - name: append-same-headers\n      domains: [\"append-same-headers.com\"]\n      request_headers_to_add:\n        - header:\n            key: \"x-foo\"\n            value: \"value1\"\n        - header:\n            key: \"user-agent\"\n            value: \"token1\"\n      routes:\n        - match: { prefix: \"/test\" }\n          route:\n            cluster: cluster_0\n          request_headers_to_add:\n            - header:\n                key: \"x-foo\"\n                value: \"value2\"\n            - header:\n                key: \"user-agent\"\n                value: \"token2\"\n    - name: path-sanitization\n      domains: [\"path-sanitization.com\"]\n      routes:\n        - match: { prefix: \"/private\" }\n          route:\n            cluster: cluster_0\n          request_headers_to_add:\n            - header:\n                key: \"x-site\"\n                value: \"private\"\n        - match: { prefix: \"/public\" }\n          route:\n            cluster: cluster_0\n          request_headers_to_add:\n            - header:\n                key: \"x-site\"\n                value: \"public\"\n)EOF\";\n\n} // namespace\n\nclass HeaderIntegrationTest\n    : public testing::TestWithParam<std::tuple<Network::Address::IpVersion, bool>>,\n      public HttpIntegrationTest {\npublic:\n  HeaderIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, std::get<0>(GetParam())) {}\n\n  bool routerSuppressEnvoyHeaders() const { return std::get<1>(GetParam()); }\n\n  enum HeaderMode {\n    Append = 1,\n    Replace = 2,\n  };\n\n  void TearDown() override {\n    if (eds_connection_ != nullptr) {\n      AssertionResult result = eds_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = eds_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n      eds_connection_.reset();\n    }\n  }\n\n  void addHeader(Protobuf::RepeatedPtrField<envoy::config::core::v3::HeaderValueOption>* field,\n                 const std::string& key, const std::string& value, bool append) {\n    envoy::config::core::v3::HeaderValueOption* header_value_option = field->Add();\n    auto* mutable_header = header_value_option->mutable_header();\n    mutable_header->set_key(key);\n    mutable_header->set_value(value);\n    header_value_option->mutable_append()->set_value(append);\n  }\n\n  void prepareEDS() {\n    config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* static_resources = bootstrap.mutable_static_resources();\n      ASSERT(static_resources->clusters_size() == 1);\n\n      static_resources->mutable_clusters(0)->CopyFrom(\n          TestUtility::parseYaml<envoy::config::cluster::v3::Cluster>(\n              R\"EOF(\n                  name: cluster_0\n                  type: EDS\n                  eds_cluster_config:\n                    eds_config:\n                      api_config_source:\n                        api_type: GRPC\n                        grpc_services:\n                          envoy_grpc:\n                            cluster_name: \"eds-cluster\"\n              )EOF\"));\n\n      // TODO(zuercher): Make ConfigHelper EDS-aware and get rid of this hack:\n      // ConfigHelper expects the number of ports assigned to upstreams to match the number of\n      // static hosts assigned ports. So give it a place to put the port for our EDS host. This\n      // host must come before the eds-cluster's host to keep the upstreams and ports in the same\n      // order.\n      static_resources->add_clusters()->CopyFrom(\n          TestUtility::parseYaml<envoy::config::cluster::v3::Cluster>(fmt::format(\n              R\"EOF(\n                      name: unused-cluster\n                      type: STATIC\n                      lb_policy: ROUND_ROBIN\n                      load_assignment:\n                        cluster_name: unused-cluster\n                        endpoints:\n                        - lb_endpoints:\n                          - endpoint:\n                              address:\n                                socket_address:\n                                  address: {}\n                                  port_value: 0\n                  )EOF\",\n              Network::Test::getLoopbackAddressString(version_))));\n\n      static_resources->add_clusters()->CopyFrom(\n          TestUtility::parseYaml<envoy::config::cluster::v3::Cluster>(fmt::format(\n              R\"EOF(\n                      name: eds-cluster\n                      type: STATIC\n                      lb_policy: ROUND_ROBIN\n                      http2_protocol_options: {{}}\n                      connect_timeout: 5s\n                      load_assignment:\n                        cluster_name: eds-cluster\n                        endpoints:\n                        - lb_endpoints:\n                          - endpoint:\n                              address:\n                                socket_address:\n                                  address: {}\n                                  port_value: 0\n                  )EOF\",\n              Network::Test::getLoopbackAddressString(version_))));\n    });\n\n    use_eds_ = true;\n  }\n\n  void initializeFilter(HeaderMode mode, bool inject_route_config_headers) {\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          // Overwrite default config with our own.\n          TestUtility::loadFromYaml(http_connection_mgr_config, hcm);\n          envoy::extensions::filters::http::router::v3::Router router_config;\n          router_config.set_suppress_envoy_headers(routerSuppressEnvoyHeaders());\n          hcm.mutable_http_filters(0)->mutable_typed_config()->PackFrom(router_config);\n\n          const bool append = mode == HeaderMode::Append;\n\n          auto* route_config = hcm.mutable_route_config();\n          if (inject_route_config_headers) {\n            // Configure route config level headers.\n            addHeader(route_config->mutable_response_headers_to_add(), \"x-routeconfig-response\",\n                      \"routeconfig\", append);\n            route_config->add_response_headers_to_remove(\"x-routeconfig-response-remove\");\n            addHeader(route_config->mutable_request_headers_to_add(), \"x-routeconfig-request\",\n                      \"routeconfig\", append);\n            route_config->add_request_headers_to_remove(\"x-routeconfig-request-remove\");\n          }\n\n          if (use_eds_) {\n            addHeader(route_config->mutable_response_headers_to_add(), \"x-routeconfig-dynamic\",\n                      R\"(%UPSTREAM_METADATA([\"test.namespace\", \"key\"])%)\", append);\n\n            // Iterate over VirtualHosts, nested Routes and WeightedClusters, adding a dynamic\n            // response header.\n            for (auto& vhost : *route_config->mutable_virtual_hosts()) {\n              addHeader(vhost.mutable_response_headers_to_add(), \"x-vhost-dynamic\",\n                        R\"(vhost:%UPSTREAM_METADATA([\"test.namespace\", \"key\"])%)\", append);\n\n              for (auto& route : *vhost.mutable_routes()) {\n                addHeader(route.mutable_response_headers_to_add(), \"x-route-dynamic\",\n                          R\"(route:%UPSTREAM_METADATA([\"test.namespace\", \"key\"])%)\", append);\n\n                if (route.has_route()) {\n                  auto* route_action = route.mutable_route();\n                  if (route_action->has_weighted_clusters()) {\n                    for (auto& c : *route_action->mutable_weighted_clusters()->mutable_clusters()) {\n                      addHeader(c.mutable_response_headers_to_add(), \"x-weighted-cluster-dynamic\",\n                                R\"(weighted:%UPSTREAM_METADATA([\"test.namespace\", \"key\"])%)\",\n                                append);\n                    }\n                  }\n                }\n              }\n            }\n          }\n\n          hcm.mutable_normalize_path()->set_value(normalize_path_);\n\n          if (append) {\n            // The config specifies append by default: no modifications needed.\n            return;\n          }\n\n          // Iterate over VirtualHosts and nested Routes, disabling header append.\n          for (auto& vhost : *route_config->mutable_virtual_hosts()) {\n            disableHeaderValueOptionAppend(*vhost.mutable_request_headers_to_add());\n            disableHeaderValueOptionAppend(*vhost.mutable_response_headers_to_add());\n\n            for (auto& route : *vhost.mutable_routes()) {\n              disableHeaderValueOptionAppend(*route.mutable_request_headers_to_add());\n              disableHeaderValueOptionAppend(*route.mutable_response_headers_to_add());\n\n              if (route.has_route()) {\n                auto* route_action = route.mutable_route();\n\n                if (route_action->has_weighted_clusters()) {\n                  for (auto& c : *route_action->mutable_weighted_clusters()->mutable_clusters()) {\n                    disableHeaderValueOptionAppend(*c.mutable_request_headers_to_add());\n                    disableHeaderValueOptionAppend(*c.mutable_response_headers_to_add());\n                  }\n                }\n              }\n            }\n          }\n        });\n\n    initialize();\n  }\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n\n    if (use_eds_) {\n      addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    }\n  }\n\n  void initialize() override {\n    if (use_eds_) {\n      on_server_init_function_ = [this]() {\n        AssertionResult result =\n            fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, eds_connection_);\n        RELEASE_ASSERT(result, result.message());\n        result = eds_connection_->waitForNewStream(*dispatcher_, eds_stream_);\n        RELEASE_ASSERT(result, result.message());\n        eds_stream_->startGrpcStream();\n\n        API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request;\n        result = eds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request);\n        RELEASE_ASSERT(result, result.message());\n\n        API_NO_BOOST(envoy::api::v2::DiscoveryResponse) discovery_response;\n        discovery_response.set_version_info(\"1\");\n        discovery_response.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);\n\n        auto cluster_load_assignment =\n            TestUtility::parseYaml<API_NO_BOOST(envoy::api::v2::ClusterLoadAssignment)>(fmt::format(\n                R\"EOF(\n                cluster_name: cluster_0\n                endpoints:\n                - lb_endpoints:\n                  - endpoint:\n                      address:\n                        socket_address:\n                          address: {}\n                          port_value: {}\n                    metadata:\n                      filter_metadata:\n                        test.namespace:\n                          key: metadata-value\n              )EOF\",\n                Network::Test::getLoopbackAddressString(std::get<0>(GetParam())),\n                fake_upstreams_[0]->localAddress()->ip()->port()));\n\n        discovery_response.add_resources()->PackFrom(cluster_load_assignment);\n        eds_stream_->sendGrpcMessage(discovery_response);\n\n        // Wait for the next request to make sure the first response was consumed.\n        result = eds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request);\n        RELEASE_ASSERT(result, result.message());\n      };\n    }\n\n    HttpIntegrationTest::initialize();\n  }\n\nprotected:\n  void performRequest(Http::TestRequestHeaderMapImpl&& request_headers,\n                      Http::TestRequestHeaderMapImpl&& expected_request_headers,\n                      Http::TestResponseHeaderMapImpl&& response_headers,\n                      Http::TestResponseHeaderMapImpl&& expected_response_headers) {\n    registerTestServerPorts({\"http\"});\n    codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n    auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0);\n\n    compareHeaders(Http::TestRequestHeaderMapImpl(upstream_request_->headers()),\n                   expected_request_headers);\n    compareHeaders(Http::TestResponseHeaderMapImpl(response->headers()), expected_response_headers);\n  }\n\n  template <class Headers, class ExpectedHeaders>\n  void compareHeaders(Headers&& headers, ExpectedHeaders& expected_headers) {\n    headers.remove(Envoy::Http::LowerCaseString{\"content-length\"});\n    headers.remove(Envoy::Http::LowerCaseString{\"date\"});\n    if (!routerSuppressEnvoyHeaders()) {\n      headers.remove(Envoy::Http::LowerCaseString{\"x-envoy-expected-rq-timeout-ms\"});\n      headers.remove(Envoy::Http::LowerCaseString{\"x-envoy-upstream-service-time\"});\n    }\n    headers.remove(Envoy::Http::LowerCaseString{\"x-forwarded-proto\"});\n    headers.remove(Envoy::Http::LowerCaseString{\"x-request-id\"});\n    headers.remove(Envoy::Http::LowerCaseString{\"x-envoy-internal\"});\n\n    EXPECT_EQ(expected_headers, headers);\n  }\n\n  bool use_eds_{false};\n  bool normalize_path_{false};\n  FakeHttpConnectionPtr eds_connection_;\n  FakeStreamPtr eds_stream_;\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    IpVersionsSuppressEnvoyHeaders, HeaderIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool()),\n    ipSuppressEnvoyHeadersTestParamsToString);\n\n// Validate that downstream request headers are passed upstream and upstream response headers are\n// passed downstream.\nTEST_P(HeaderIntegrationTest, TestRequestAndResponseHeaderPassThrough) {\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"no-headers.com\"},\n          {\"x-request-foo\", \"downstram\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"no-headers.com\"},\n          {\"x-request-foo\", \"downstram\"},\n          {\":path\", \"/\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-return-foo\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-return-foo\", \"upstream\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the virtual host appends upstream request headers and appends/removes upstream\n// response headers.\nTEST_P(HeaderIntegrationTest, TestVirtualHostAppendHeaderManipulation) {\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-only\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-vhost-request-remove\", \"downstream\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\":path\", \"/vhost-only\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-vhost-response-remove\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the virtual host replaces request headers and replaces upstream response headers.\nTEST_P(HeaderIntegrationTest, TestVirtualHostReplaceHeaderManipulation) {\n  initializeFilter(HeaderMode::Replace, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-only\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-unmodified\", \"downstream\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-unmodified\", \"downstream\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\":path\", \"/vhost-only\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-unmodified\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"upstream\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the route appends request headers and appends/removes upstream response headers.\nTEST_P(HeaderIntegrationTest, TestRouteAppendHeaderManipulation) {\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/route-only\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"route-headers.com\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request-remove\", \"downstream\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"route-headers.com\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request\", \"route\"},\n          {\":path\", \"/route-only\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response-remove\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response\", \"route\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the route replaces request headers and replaces/removes upstream response headers.\nTEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) {\n  initializeFilter(HeaderMode::Replace, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/route-only\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"route-headers.com\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request-remove\", \"downstream\"},\n          {\"x-unmodified\", \"downstream\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"route-headers.com\"},\n          {\"x-unmodified\", \"downstream\"},\n          {\"x-route-request\", \"route\"},\n          {\":path\", \"/route-only\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response-remove\", \"upstream\"},\n          {\"x-unmodified\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"upstream\"},\n          {\"x-route-response\", \"route\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the relationship between virtual host and route header manipulations when appending.\nTEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) {\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-vhost-request-remove\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request-remove\", \"downstream\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request\", \"route\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-vhost-response-remove\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response-remove\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response\", \"route\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the relationship between virtual host and route header manipulations when replacing.\nTEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) {\n  initializeFilter(HeaderMode::Replace, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-unmodified\", \"request\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-unmodified\", \"request\"},\n          {\"x-route-request\", \"route\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\"x-route-response\", \"route\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the relationship between route configuration, virtual host and route header\n// manipulations when appending.\nTEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderManipulation) {\n  initializeFilter(HeaderMode::Append, true);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-routeconfig-request\", \"downstream\"},\n          {\"x-routeconfig-request-remove\", \"downstream\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-vhost-request-remove\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request-remove\", \"downstream\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-routeconfig-request\", \"downstream\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request\", \"route\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\"x-routeconfig-request\", \"routeconfig\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-routeconfig-response\", \"upstream\"},\n          {\"x-routeconfig-response-remove\", \"upstream\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-vhost-response-remove\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response-remove\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-routeconfig-response\", \"upstream\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response\", \"route\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\"x-routeconfig-response\", \"routeconfig\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the relationship between route configuration, virtual host and route header\n// manipulations when replacing.\nTEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderManipulation) {\n  initializeFilter(HeaderMode::Replace, true);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-routeconfig-request\", \"downstream\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-unmodified\", \"request\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-unmodified\", \"request\"},\n          {\"x-route-request\", \"route\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\"x-routeconfig-request\", \"routeconfig\"},\n          {\":path\", \"/vhost-and-route\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-routeconfig-response\", \"upstream\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\"x-route-response\", \"route\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\"x-routeconfig-response\", \"routeconfig\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the relationship between route configuration, virtual host, route, and weighted\n// cluster header manipulations when appending.\nTEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHeaderManipulation) {\n  initializeFilter(HeaderMode::Append, true);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-route-and-weighted-clusters\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-routeconfig-request\", \"downstream\"},\n          {\"x-routeconfig-request-remove\", \"downstream\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-vhost-request-remove\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-route-request-remove\", \"downstream\"},\n          {\"x-weighted-cluster-request\", \"downstream\"},\n          {\"x-weighted-cluster-request-remove\", \"downstream\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-routeconfig-request\", \"downstream\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-weighted-cluster-request\", \"downstream\"},\n          {\"x-weighted-cluster-request\", \"weighted-cluster-1\"},\n          {\"x-route-request\", \"route\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\"x-routeconfig-request\", \"routeconfig\"},\n          {\":path\", \"/vhost-route-and-weighted-clusters\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-routeconfig-response\", \"upstream\"},\n          {\"x-routeconfig-response-remove\", \"upstream\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-vhost-response-remove\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-route-response-remove\", \"upstream\"},\n          {\"x-weighted-cluster-response\", \"upstream\"},\n          {\"x-weighted-cluster-response-remove\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-routeconfig-response\", \"upstream\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-weighted-cluster-response\", \"upstream\"},\n          {\"x-weighted-cluster-response\", \"weighted-cluster-1\"},\n          {\"x-route-response\", \"route\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\"x-routeconfig-response\", \"routeconfig\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates the relationship between route configuration, virtual host, route and weighted cluster\n// header manipulations when replacing.\nTEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHeaderManipulation) {\n  initializeFilter(HeaderMode::Replace, true);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-route-and-weighted-clusters\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-routeconfig-request\", \"downstream\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-weighted-cluster-request\", \"downstream\"},\n          {\"x-unmodified\", \"request\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-unmodified\", \"request\"},\n          {\"x-weighted-cluster-request\", \"weighted-cluster-1\"},\n          {\"x-route-request\", \"route\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\"x-routeconfig-request\", \"routeconfig\"},\n          {\":path\", \"/vhost-route-and-weighted-clusters\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-routeconfig-response\", \"upstream\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-weighted-cluster-response\", \"upstream\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\"x-weighted-cluster-response\", \"weighted-cluster-1\"},\n          {\"x-route-response\", \"route\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\"x-routeconfig-response\", \"routeconfig\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates that upstream host metadata can be emitted in headers.\nTEST_P(HeaderIntegrationTest, TestDynamicHeaders) {\n  prepareEDS();\n  initializeFilter(HeaderMode::Replace, true);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/vhost-route-and-weighted-clusters\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-routeconfig-request\", \"downstream\"},\n          {\"x-vhost-request\", \"downstream\"},\n          {\"x-route-request\", \"downstream\"},\n          {\"x-weighted-cluster-request\", \"downstream\"},\n          {\"x-unmodified\", \"request\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"vhost-headers.com\"},\n          {\"x-unmodified\", \"request\"},\n          {\"x-weighted-cluster-request\", \"weighted-cluster-1\"},\n          {\"x-route-request\", \"route\"},\n          {\"x-vhost-request\", \"vhost\"},\n          {\"x-routeconfig-request\", \"routeconfig\"},\n          {\":path\", \"/vhost-route-and-weighted-clusters\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-routeconfig-response\", \"upstream\"},\n          {\"x-vhost-response\", \"upstream\"},\n          {\"x-route-response\", \"upstream\"},\n          {\"x-weighted-cluster-response\", \"upstream\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\"x-weighted-cluster-response\", \"weighted-cluster-1\"},\n          {\"x-weighted-cluster-dynamic\", \"weighted:metadata-value\"},\n          {\"x-route-response\", \"route\"},\n          {\"x-route-dynamic\", \"route:metadata-value\"},\n          {\"x-vhost-response\", \"vhost\"},\n          {\"x-vhost-dynamic\", \"vhost:metadata-value\"},\n          {\"x-routeconfig-response\", \"routeconfig\"},\n          {\"x-routeconfig-dynamic\", \"metadata-value\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates that XFF gets properly parsed.\nTEST_P(HeaderIntegrationTest, TestXFFParsing) {\n  initializeFilter(HeaderMode::Replace, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"xff-headers.com\"},\n          {\"x-forwarded-for\", \"1.2.3.4, 5.6.7.8 ,9.10.11.12\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"xff-headers.com\"},\n          {\"x-forwarded-for\", \"1.2.3.4, 5.6.7.8 ,9.10.11.12\"},\n          {\"x-real-ip\", \"5.6.7.8\"},\n          {\":path\", \"/test\"},\n          {\":method\", \"GET\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates behavior around same header appending (both predefined headers and\n// other).\nTEST_P(HeaderIntegrationTest, TestAppendSameHeaders) {\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/test\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"append-same-headers.com\"},\n          {\"user-agent\", \"token3\"},\n          {\"x-foo\", \"value3\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"append-same-headers.com\"},\n          {\":path\", \"/test\"},\n          {\":method\", \"GET\"},\n          {\"user-agent\", \"token3,token2,token1\"},\n          {\"x-foo\", \"value3\"},\n          {\"x-foo\", \"value2\"},\n          {\"x-foo\", \"value1\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates behavior when normalize path is off.\n// Route selection and path to upstream are the exact string literal\n// from downstream.\nTEST_P(HeaderIntegrationTest, TestPathAndRouteWhenNormalizePathOff) {\n  normalize_path_ = false;\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/private/../public\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"path-sanitization.com\"},\n      },\n      Http::TestRequestHeaderMapImpl{{\":authority\", \"path-sanitization.com\"},\n                                     {\":path\", \"/private/../public\"},\n                                     {\":method\", \"GET\"},\n                                     {\"x-site\", \"private\"}},\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates behavior when normalize path is on.\n// Path to decide route and path to upstream are both\n// the normalized.\nTEST_P(HeaderIntegrationTest, TestPathAndRouteOnNormalizedPath) {\n  normalize_path_ = true;\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/private/../public\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"path-sanitization.com\"},\n      },\n      Http::TestRequestHeaderMapImpl{{\":authority\", \"path-sanitization.com\"},\n                                     {\":path\", \"/public\"},\n                                     {\":method\", \"GET\"},\n                                     {\"x-site\", \"public\"}},\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-unmodified\", \"response\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-unmodified\", \"response\"},\n          {\":status\", \"200\"},\n      });\n}\n\n// Validates TE header is forwarded if it contains a supported value\nTEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) {\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"no-headers.com\"},\n          {\"x-request-foo\", \"downstram\"},\n          {\"connection\", \"te, close\"},\n          {\"te\", \"trailers\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"no-headers.com\"},\n          {\":path\", \"/\"},\n          {\":method\", \"GET\"},\n          {\"x-request-foo\", \"downstram\"},\n          {\"te\", \"trailers\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-return-foo\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-return-foo\", \"upstream\"},\n          {\":status\", \"200\"},\n          {\"connection\", \"close\"},\n      });\n}\n\n// Validates TE header is stripped if it contains an unsupported value\nTEST_P(HeaderIntegrationTest, TestTeHeaderSanitized) {\n  initializeFilter(HeaderMode::Append, false);\n  performRequest(\n      Http::TestRequestHeaderMapImpl{\n          {\":method\", \"GET\"},\n          {\":path\", \"/\"},\n          {\":scheme\", \"http\"},\n          {\":authority\", \"no-headers.com\"},\n          {\"x-request-foo\", \"downstram\"},\n          {\"connection\", \"te, mike, sam, will, close\"},\n          {\"te\", \"gzip\"},\n          {\"mike\", \"foo\"},\n          {\"sam\", \"bar\"},\n          {\"will\", \"baz\"},\n      },\n      Http::TestRequestHeaderMapImpl{\n          {\":authority\", \"no-headers.com\"},\n          {\":path\", \"/\"},\n          {\":method\", \"GET\"},\n          {\"x-request-foo\", \"downstram\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"content-length\", \"0\"},\n          {\":status\", \"200\"},\n          {\"x-return-foo\", \"upstream\"},\n      },\n      Http::TestResponseHeaderMapImpl{\n          {\"server\", \"envoy\"},\n          {\"x-return-foo\", \"upstream\"},\n          {\":status\", \"200\"},\n          {\"connection\", \"close\"},\n      });\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/header_prefix_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/integration/server.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\n// Unfortunately in the Envoy test suite, the headers singleton is initialized\n// well before server start-up, so by the time the server has parsed the\n// bootstrap proto it's too late to set it.\n//\n// Instead, set the value early and regression test the bootstrap proto's validation of prefix\n// injection. We also register a custom header to make sure that registered headers interact well\n// with the prefix override.\nHttp::RegisterCustomInlineHeader<Http::CustomInlineHeaderRegistry::Type::RequestHeaders>\n    cache_control_handle(Http::CustomHeaders::get().CacheControl);\n\nstatic const char* custom_prefix_ = \"x-custom\";\n\nclass HeaderPrefixIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    ThreadSafeSingleton<Http::PrefixValue>::get().setPrefix(custom_prefix_);\n  }\n};\n\nTEST_P(HeaderPrefixIntegrationTest, CustomHeaderPrefix) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    bootstrap.set_header_prefix(\"x-custom\");\n  });\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response =\n      sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0);\n\n  EXPECT_TRUE(response->headers().get(\n                  Envoy::Http::LowerCaseString{\"x-custom-upstream-service-time\"}) != nullptr);\n  EXPECT_EQ(\"x-custom-upstream-service-time\",\n            response->headers().EnvoyUpstreamServiceTime()->key().getStringView());\n\n  EXPECT_TRUE(upstream_request_->headers().get(\n                  Envoy::Http::LowerCaseString{\"x-custom-expected-rq-timeout-ms\"}) != nullptr);\n  EXPECT_EQ(\"x-custom-expected-rq-timeout-ms\",\n            upstream_request_->headers().EnvoyExpectedRequestTimeoutMs()->key().getStringView());\n}\n\n// In this case, the header prefix set in the bootstrap will not match the\n// singleton header prefix in SetUpTestSuite, and Envoy will RELEASE_ASSERT on\n// start-up.\nTEST_P(HeaderPrefixIntegrationTest, FailedCustomHeaderPrefix) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    bootstrap.set_header_prefix(\"x-custom-but-not-set\");\n  });\n  EXPECT_DEATH(initialize(), \"Attempting to change the header prefix after it has been used!\");\n}\n\nINSTANTIATE_TEST_SUITE_P(Protocols, HeaderPrefixIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2},\n                             {FakeHttpConnection::Type::HTTP1})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/health_check_integration_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/core/v3/health_check.pb.h\"\n\n#include \"test/common/http/http2/http2_frame.h\"\n#include \"test/common/upstream/utility.h\"\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n// Integration tests for active health checking.\n// The tests fetch the cluster configuration using CDS in order to actively start health\n// checking after Envoy and the hosts are initialized.\nclass HealthCheckIntegrationTestBase : public Event::TestUsingSimulatedTime,\n                                       public HttpIntegrationTest {\npublic:\n  HealthCheckIntegrationTestBase(\n      Network::Address::IpVersion ip_version,\n      FakeHttpConnection::Type upstream_protocol = FakeHttpConnection::Type::HTTP2)\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ip_version,\n                            ConfigHelper::discoveredClustersBootstrap(\"GRPC\")),\n        ip_version_(ip_version), upstream_protocol_(upstream_protocol) {}\n\n  // Per-cluster information including the fake connection and stream.\n  struct ClusterData {\n    const std::string name_;\n    envoy::config::cluster::v3::Cluster cluster_;\n    FakeUpstreamPtr host_upstream_;\n    FakeStreamPtr host_stream_;\n    FakeHttpConnectionPtr host_fake_connection_;\n    FakeRawConnectionPtr host_fake_raw_connection_;\n\n    ClusterData(const std::string name) : name_(name) {}\n  };\n\n  void initialize() override {\n    // The endpoints and their configuration is received as part of a CDS response, and not\n    // statically defined clusters with active health-checking because in an integration test the\n    // hosts will be able to reply to the health-check requests only after the tests framework\n    // initialization has finished. This follows the same initialization procedure that is executed\n    // in the CDS integration tests.\n\n    use_lds_ = false;\n    // Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in\n    // BaseIntegrationTest::createUpstreams() (which is part of initialize()).\n    // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap\n    // config that you use!\n    setUpstreamCount(1);                                  // the CDS cluster\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.\n\n    // HttpIntegrationTest::initialize() does many things:\n    // 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount().\n    // 2) It updates your bootstrap config with the ports your fake upstreams are actually listening\n    //    on (since you're supposed to leave them as 0).\n    // 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual\n    //    Envoy used in the tests.\n    // 4) Bringing up the server usually entails waiting to ensure that any listeners specified in\n    //    the bootstrap config have come up, and registering them in a port map (see lookupPort()).\n    //    However, this test needs to defer all of that to later.\n    defer_listener_finalization_ = true;\n    HttpIntegrationTest::initialize();\n\n    // Let Envoy establish its connection to the CDS server.\n    acceptXdsConnection();\n\n    // Expect 1 for the statically specified CDS server.\n    test_server_->waitForGaugeGe(\"cluster_manager.active_clusters\", 1);\n\n    registerTestServerPorts({\"http\"});\n\n    // Create the regular (i.e. not an xDS server) upstreams. We create them manually here after\n    // initialize() because finalize() expects all fake_upstreams_ to correspond to a static\n    // cluster in the bootstrap config - which we don't want since we're using dynamic CDS.\n    for (auto& cluster : clusters_) {\n      cluster.host_upstream_ = std::make_unique<FakeUpstream>(0, upstream_protocol_, version_,\n                                                              timeSystem(), enable_half_close_);\n      cluster.cluster_ = ConfigHelper::buildStaticCluster(\n          cluster.name_, cluster.host_upstream_->localAddress()->ip()->port(),\n          Network::Test::getLoopbackAddressString(ip_version_));\n    }\n  }\n\n  void acceptXdsConnection() {\n    AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n  }\n\n  // Closes the connections to the fake hosts.\n  void cleanupHostConnections() {\n    for (auto& cluster : clusters_) {\n      auto& host_fake_connection = cluster.host_fake_connection_;\n      if (host_fake_connection != nullptr) {\n        AssertionResult result = host_fake_connection->close();\n        RELEASE_ASSERT(result, result.message());\n        result = host_fake_connection->waitForDisconnect();\n        RELEASE_ASSERT(result, result.message());\n      }\n    }\n  }\n\n  // Adds an active health check specifier to the given cluster.\n  envoy::config::core::v3::HealthCheck*\n  addHealthCheck(envoy::config::cluster::v3::Cluster& cluster) {\n    // Add general health check specifier to the cluster.\n    auto* health_check = cluster.add_health_checks();\n    health_check->mutable_timeout()->set_seconds(30);\n    health_check->mutable_interval()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    health_check->mutable_no_traffic_interval()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    health_check->mutable_unhealthy_threshold()->set_value(1);\n    health_check->mutable_healthy_threshold()->set_value(1);\n    return health_check;\n  }\n\n  // The number of clusters and their names must match the clusters in the CDS integration test\n  // configuration.\n  static constexpr size_t clusters_num_ = 2;\n  std::array<ClusterData, clusters_num_> clusters_{{{\"cluster_1\"}, {\"cluster_2\"}}};\n  Network::Address::IpVersion ip_version_;\n  FakeHttpConnection::Type upstream_protocol_;\n};\n\nstruct HttpHealthCheckIntegrationTestParams {\n  Network::Address::IpVersion ip_version;\n  FakeHttpConnection::Type upstream_protocol;\n};\n\nclass HttpHealthCheckIntegrationTest\n    : public testing::TestWithParam<HttpHealthCheckIntegrationTestParams>,\n      public HealthCheckIntegrationTestBase {\npublic:\n  HttpHealthCheckIntegrationTest()\n      : HealthCheckIntegrationTestBase(GetParam().ip_version, GetParam().upstream_protocol) {}\n\n  // Returns the 4 combinations for testing:\n  // [HTTP1, HTTP2] x [IPv4, IPv6]\n  static std::vector<HttpHealthCheckIntegrationTestParams>\n  getHttpHealthCheckIntegrationTestParams() {\n    std::vector<HttpHealthCheckIntegrationTestParams> ret;\n\n    for (auto ip_version : TestEnvironment::getIpVersionsForTest()) {\n      for (auto upstream_protocol :\n           {FakeHttpConnection::Type::HTTP1, FakeHttpConnection::Type::HTTP2}) {\n        ret.push_back(HttpHealthCheckIntegrationTestParams{ip_version, upstream_protocol});\n      }\n    }\n    return ret;\n  }\n\n  static std::string protocolTestParamsToString(\n      const ::testing::TestParamInfo<HttpHealthCheckIntegrationTestParams>& params) {\n    return absl::StrCat(\n        (params.param.ip_version == Network::Address::IpVersion::v4 ? \"IPv4_\" : \"IPv6_\"),\n        (params.param.upstream_protocol == FakeHttpConnection::Type::HTTP2 ? \"Http2Upstream\"\n                                                                           : \"HttpUpstream\"));\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  // Adds a HTTP active health check specifier to the given cluster, and waits for the first health\n  // check probe to be received.\n  void initHttpHealthCheck(uint32_t cluster_idx) {\n    const envoy::type::v3::CodecClientType codec_client_type =\n        (FakeHttpConnection::Type::HTTP1 == upstream_protocol_)\n            ? envoy::type::v3::CodecClientType::HTTP1\n            : envoy::type::v3::CodecClientType::HTTP2;\n\n    auto& cluster_data = clusters_[cluster_idx];\n    auto* health_check = addHealthCheck(cluster_data.cluster_);\n    health_check->mutable_http_health_check()->set_path(\"/healthcheck\");\n    health_check->mutable_http_health_check()->set_codec_client_type(codec_client_type);\n\n    // Introduce the cluster using compareDiscoveryRequest / sendDiscoveryResponse.\n    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n    sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(\n        Config::TypeUrl::get().Cluster, {cluster_data.cluster_}, {cluster_data.cluster_}, {}, \"55\");\n\n    // Wait for upstream to receive health check request.\n    ASSERT_TRUE(cluster_data.host_upstream_->waitForHttpConnection(\n        *dispatcher_, cluster_data.host_fake_connection_));\n    ASSERT_TRUE(cluster_data.host_fake_connection_->waitForNewStream(*dispatcher_,\n                                                                     cluster_data.host_stream_));\n    ASSERT_TRUE(cluster_data.host_stream_->waitForEndStream(*dispatcher_));\n\n    EXPECT_EQ(cluster_data.host_stream_->headers().getPathValue(), \"/healthcheck\");\n    EXPECT_EQ(cluster_data.host_stream_->headers().getMethodValue(), \"GET\");\n    EXPECT_EQ(cluster_data.host_stream_->headers().getHostValue(), cluster_data.name_);\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(\n    IpVersions, HttpHealthCheckIntegrationTest,\n    testing::ValuesIn(HttpHealthCheckIntegrationTest::getHttpHealthCheckIntegrationTestParams()),\n    HttpHealthCheckIntegrationTest::protocolTestParamsToString);\n\n// Tests that a healthy endpoint returns a valid HTTP health check response.\nTEST_P(HttpHealthCheckIntegrationTest, SingleEndpointHealthyHttp) {\n  const uint32_t cluster_idx = 0;\n  initialize();\n  initHttpHealthCheck(cluster_idx);\n\n  // Endpoint responds with healthy status to the health check.\n  clusters_[cluster_idx].host_stream_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  clusters_[cluster_idx].host_stream_->encodeData(1024, true);\n\n  // Verify that Envoy detected the health check response.\n  test_server_->waitForCounterGe(\"cluster.cluster_1.health_check.success\", 1);\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.health_check.success\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"cluster.cluster_1.health_check.failure\")->value());\n\n  // Clean up connections.\n  cleanupHostConnections();\n}\n\n// Tests that an unhealthy endpoint returns a valid HTTP health check response.\nTEST_P(HttpHealthCheckIntegrationTest, SingleEndpointUnhealthyHttp) {\n  const uint32_t cluster_idx = 0;\n  initialize();\n  initHttpHealthCheck(cluster_idx);\n\n  // Endpoint responds to the health check with unhealthy status.\n  clusters_[cluster_idx].host_stream_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n  clusters_[cluster_idx].host_stream_->encodeData(1024, true);\n\n  test_server_->waitForCounterGe(\"cluster.cluster_1.health_check.failure\", 1);\n  EXPECT_EQ(0, test_server_->counter(\"cluster.cluster_1.health_check.success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.health_check.failure\")->value());\n\n  // Clean up connections.\n  cleanupHostConnections();\n}\n\n// Tests that no HTTP health check response results in timeout and unhealthy endpoint.\nTEST_P(HttpHealthCheckIntegrationTest, SingleEndpointTimeoutHttp) {\n  const uint32_t cluster_idx = 0;\n  initialize();\n  initHttpHealthCheck(cluster_idx);\n\n  // Increase time until timeout (30s).\n  timeSystem().advanceTimeWait(std::chrono::seconds(30));\n\n  // Endpoint doesn't reply, and a healthcheck failure occurs (due to timeout).\n  test_server_->waitForCounterGe(\"cluster.cluster_1.health_check.failure\", 1);\n  EXPECT_EQ(0, test_server_->counter(\"cluster.cluster_1.health_check.success\")->value());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.health_check.failure\")->value());\n\n  // Clean up connections\n  cleanupHostConnections();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/hotrestart_main.cc",
    "content": "#include \"common/stats/utility.h\"\n\n#include \"exe/main_common.h\"\n\n// NOLINT(namespace-envoy)\n\n/**\n * Custom main() for hotrestart_test. This should be identical to\n * source/exe/main.cc, except for the registration and increment of a new gauge\n * specifically for hot_restart.test.sh.\n */\nint main(int argc, char** argv) {\n  return Envoy::MainCommon::main(argc, argv, [](Envoy::Server::Instance& server) {\n    // Creates a gauge that will be incremented once and then never touched. This is\n    // for testing parent-gauge accumulation in hot_restart_test.sh.\n    Envoy::Stats::Utility::gaugeFromElements(server.stats(),\n                                             {Envoy::Stats::DynamicName(\"hotrestart_test_gauge\")},\n                                             Envoy::Stats::Gauge::ImportMode::Accumulate)\n        .inc();\n  });\n}\n"
  },
  {
    "path": "test/integration/hotrestart_test.sh",
    "content": "#!/bin/bash\n\n# For this test we use a slightly modiified test binary, based on\n# source/exe/enovy-static. If this starts failing to run or build, ensure that\n# source/exe/main.cc and ./hotrestart_main.cc have not diverged except for\n# adding the new gauge.\nexport ENVOY_BIN=\"${TEST_SRCDIR}\"/envoy/test/integration/hotrestart_main\n# shellcheck source=test/integration/test_utility.sh\nsource \"$TEST_SRCDIR/envoy/test/integration/test_utility.sh\"\n\n# TODO(htuch): In this test script, we are duplicating work done in test_environment.cc via sed.\n# Instead, we can add a simple C++ binary that links against test_environment.cc and uses the\n# substitution methods provided there.\nJSON_TEST_ARRAY=()\n\n# Ensure that the runtime watch root exist.\nmkdir -p \"${TEST_TMPDIR}\"/test/common/runtime/test_data/current/envoy\nmkdir -p \"${TEST_TMPDIR}\"/test/common/runtime/test_data/current/envoy_override\n\n# Parameterize IPv4 and IPv6 testing.\nif [[ -z \"${ENVOY_IP_TEST_VERSIONS}\" ]] || [[ \"${ENVOY_IP_TEST_VERSIONS}\" == \"all\" ]] \\\n  || [[ \"${ENVOY_IP_TEST_VERSIONS}\" == \"v4only\" ]]; then\n  HOT_RESTART_JSON_V4=\"${TEST_TMPDIR}\"/hot_restart_v4.yaml\n  echo \"building ${HOT_RESTART_JSON_V4} ...\"\n  sed -e \"s#{{ upstream_. }}#0#g\" \"${TEST_SRCDIR}/envoy\"/test/config/integration/server.yaml | \\\n    sed -e \"s#{{ test_rundir }}#$TEST_SRCDIR/envoy#\" | \\\n    sed -e \"s#{{ test_tmpdir }}#$TEST_TMPDIR#\" | \\\n    sed -e \"s#{{ ip_loopback_address }}#127.0.0.1#\" | \\\n    sed -e \"s#{{ reuse_port }}#false#\" | \\\n    sed -e \"s#{{ dns_lookup_family }}#V4_ONLY#\" | \\\n    sed -e \"s#{{ null_device_path }}#/dev/null#\" | \\\n    cat > \"${HOT_RESTART_JSON_V4}\"\n  JSON_TEST_ARRAY+=(\"${HOT_RESTART_JSON_V4}\")\nfi\n\nif [[ -z \"${ENVOY_IP_TEST_VERSIONS}\" ]] || [[ \"${ENVOY_IP_TEST_VERSIONS}\" == \"all\" ]] \\\n  || [[ \"${ENVOY_IP_TEST_VERSIONS}\" == \"v6only\" ]]; then\n  HOT_RESTART_JSON_V6=\"${TEST_TMPDIR}\"/hot_restart_v6.yaml\n  sed -e \"s#{{ upstream_. }}#0#g\" \"${TEST_SRCDIR}/envoy\"/test/config/integration/server.yaml | \\\n    sed -e \"s#{{ test_rundir }}#$TEST_SRCDIR/envoy#\" | \\\n    sed -e \"s#{{ test_tmpdir }}#$TEST_TMPDIR#\" | \\\n    sed -e \"s#{{ ip_loopback_address }}#::1#\" | \\\n    sed -e \"s#{{ reuse_port }}#false#\" | \\\n    sed -e \"s#{{ dns_lookup_family }}#v6_only#\" | \\\n    sed -e \"s#{{ null_device_path }}#/dev/null#\" | \\\n    cat > \"${HOT_RESTART_JSON_V6}\"\n  JSON_TEST_ARRAY+=(\"${HOT_RESTART_JSON_V6}\")\nfi\n\n# Also test for listening on UNIX domain sockets. We use IPv4 for the\n# upstreams to avoid too much wild sedding.\nHOT_RESTART_JSON_UDS=\"${TEST_TMPDIR}\"/hot_restart_uds.yaml\nSOCKET_DIR=\"$(mktemp -d /tmp/envoy_test_hotrestart.XXXXXX)\"\nsed -e \"s#{{ socket_dir }}#${SOCKET_DIR}#\" \"${TEST_SRCDIR}/envoy\"/test/config/integration/server_unix_listener.yaml | \\\n  sed -e \"s#{{ ip_loopback_address }}#127.0.0.1#\" | \\\n  sed -e \"s#{{ null_device_path }}#/dev/null#\" | \\\n  cat > \"${HOT_RESTART_JSON_UDS}\"\nJSON_TEST_ARRAY+=(\"${HOT_RESTART_JSON_UDS}\")\n\n# Test reuse port listener.\nHOT_RESTART_JSON_REUSE_PORT=\"${TEST_TMPDIR}\"/hot_restart_v4.yaml\necho \"building ${HOT_RESTART_JSON_V4} ...\"\nsed -e \"s#{{ upstream_. }}#0#g\" \"${TEST_SRCDIR}/envoy\"/test/config/integration/server.yaml | \\\n  sed -e \"s#{{ test_rundir }}#$TEST_SRCDIR/envoy#\" | \\\n  sed -e \"s#{{ test_tmpdir }}#$TEST_TMPDIR#\" | \\\n  sed -e \"s#{{ ip_loopback_address }}#127.0.0.1#\" | \\\n  sed -e \"s#{{ reuse_port }}#true#\" | \\\n  sed -e \"s#{{ dns_lookup_family }}#V4_ONLY#\" | \\\n  sed -e \"s#{{ null_device_path }}#/dev/null#\" | \\\n  cat > \"${HOT_RESTART_JSON_REUSE_PORT}\"\nJSON_TEST_ARRAY+=(\"${HOT_RESTART_JSON_REUSE_PORT}\")\n\n# Shared memory size varies by architecture\nSHARED_MEMORY_SIZE=\"104\"\n[[ \"$(uname -m)\" == \"aarch64\" ]] && SHARED_MEMORY_SIZE=\"120\"\n\necho \"Hot restart test using dynamic base id\"\n\nTEST_INDEX=0\nfunction run_testsuite() {\n  local BASE_ID BASE_ID_PATH HOT_RESTART_JSON=\"$1\" FAKE_SYMBOL_TABLE=\"$2\"\n  local SOCKET_PATH=@envoy_domain_socket\n  local SOCKET_MODE=0\n  if [ -n \"$3\" ] &&  [ -n \"$4\" ]\n  then\n     SOCKET_PATH=\"$3\"\n     SOCKET_MODE=\"$4\"\n  fi\n\n  start_test validation\n  check \"${ENVOY_BIN}\" -c \"${HOT_RESTART_JSON}\" --mode validate --service-cluster cluster \\\n      --use-fake-symbol-table \"$FAKE_SYMBOL_TABLE\" --service-node node --disable-hot-restart\n\n  BASE_ID_PATH=$(mktemp 'envoy_test_base_id.XXXXXX')\n  echo \"Selected dynamic base id path ${BASE_ID_PATH}\"\n\n  # Now start the real server, hot restart it twice, and shut it all down as a\n  # basic hot restart sanity test. We expect SERVER_0 to exit quickly when\n  # SERVER_2 starts, and are not relying on timeouts.\n  start_test \"Starting epoch 0\"\n  ADMIN_ADDRESS_PATH_0=\"${TEST_TMPDIR}\"/admin.0.\"${TEST_INDEX}\".address\n  run_in_background_saving_pid \"${ENVOY_BIN}\" -c \"${HOT_RESTART_JSON}\" \\\n      --restart-epoch 0  --use-dynamic-base-id --base-id-path \"${BASE_ID_PATH}\" \\\n      --service-cluster cluster --service-node node --use-fake-symbol-table \"$FAKE_SYMBOL_TABLE\" \\\n      --admin-address-path \"${ADMIN_ADDRESS_PATH_0}\" \\\n      --socket-path \"${SOCKET_PATH}\" --socket-mode \"${SOCKET_MODE}\"\n\n  BASE_ID=$(cat \"${BASE_ID_PATH}\")\n  while [ -z \"${BASE_ID}\" ]; do\n      echo \"Waiting for base id\"\n      sleep 0.5\n      BASE_ID=$(cat \"${BASE_ID_PATH}\")\n  done\n\n  echo \"Selected dynamic base id ${BASE_ID}\"\n\n  SERVER_0_PID=$BACKGROUND_PID\n\n  start_test \"Updating original config listener addresses\"\n  sleep 3\n\n  UPDATED_HOT_RESTART_JSON=\"${TEST_TMPDIR}\"/hot_restart_updated.\"${TEST_INDEX}\".yaml\n  \"${TEST_SRCDIR}/envoy\"/tools/socket_passing \"-o\" \"${HOT_RESTART_JSON}\" \"-a\" \"${ADMIN_ADDRESS_PATH_0}\" \\\n    \"-u\" \"${UPDATED_HOT_RESTART_JSON}\"\n\n  # Send SIGUSR1 signal to the first server, this should not kill it. Also send SIGHUP which should\n  # get eaten.\n  echo \"Sending SIGUSR1/SIGHUP to first server\"\n  kill -SIGUSR1 \"${SERVER_0_PID}\"\n  kill -SIGHUP \"${SERVER_0_PID}\"\n  sleep 3\n\n  disableHeapCheck\n\n  # To ensure that we don't accidentally change the /hot_restart_version\n  # string, compare it against a hard-coded string.\n  start_test \"Checking for consistency of /hot_restart_version\"\n  CLI_HOT_RESTART_VERSION=$(\"${ENVOY_BIN}\" --hot-restart-version --base-id \"${BASE_ID}\" 2>&1)\n  EXPECTED_CLI_HOT_RESTART_VERSION=\"11.${SHARED_MEMORY_SIZE}\"\n  echo \"The Envoy's hot restart version is ${CLI_HOT_RESTART_VERSION}\"\n  echo \"Now checking that the above version is what we expected.\"\n  check [ \"${CLI_HOT_RESTART_VERSION}\" = \"${EXPECTED_CLI_HOT_RESTART_VERSION}\" ]\n\n  start_test \"Checking for consistency of /hot_restart_version with --use-fake-symbol-table ${FAKE_SYMBOL_TABLE}\"\n  CLI_HOT_RESTART_VERSION=$(\"${ENVOY_BIN}\" --hot-restart-version --base-id \"${BASE_ID}\" \\\n    --use-fake-symbol-table \"$FAKE_SYMBOL_TABLE\" 2>&1)\n  CLI_HOT_RESTART_VERSION=$(strip_fake_symbol_table_warning \"$CLI_HOT_RESTART_VERSION\" \"$FAKE_SYMBOL_TABLE\")\n  EXPECTED_CLI_HOT_RESTART_VERSION=\"11.${SHARED_MEMORY_SIZE}\"\n  check [ \"${CLI_HOT_RESTART_VERSION}\" = \"${EXPECTED_CLI_HOT_RESTART_VERSION}\" ]\n\n  start_test \"Checking for match of --hot-restart-version and admin /hot_restart_version\"\n  ADMIN_ADDRESS_0=$(cat \"${ADMIN_ADDRESS_PATH_0}\")\n  echo \"fetching hot restart version from http://${ADMIN_ADDRESS_0}/hot_restart_version ...\"\n  ADMIN_HOT_RESTART_VERSION=$(curl -sg \"http://${ADMIN_ADDRESS_0}/hot_restart_version\")\n  echo \"Fetched ADMIN_HOT_RESTART_VERSION is ${ADMIN_HOT_RESTART_VERSION}\"\n  CLI_HOT_RESTART_VERSION=$(\"${ENVOY_BIN}\" --hot-restart-version --base-id \"${BASE_ID}\" \\\n    --use-fake-symbol-table \"$FAKE_SYMBOL_TABLE\" 2>&1)\n  CLI_HOT_RESTART_VERSION=$(strip_fake_symbol_table_warning \"$CLI_HOT_RESTART_VERSION\" \"$FAKE_SYMBOL_TABLE\")\n  check [ \"${ADMIN_HOT_RESTART_VERSION}\" = \"${CLI_HOT_RESTART_VERSION}\" ]\n\n  start_test \"Checking server.hot_restart_generation 1\"\n  GENERATION_0=$(scrape_stat \"${ADMIN_ADDRESS_0}\" \"server.hot_restart_generation\")\n  check [ \"$GENERATION_0\" = \"1\" ];\n\n  # Verify we can see server.live in the admin port.\n  SERVER_LIVE_0=$(scrape_stat \"${ADMIN_ADDRESS_0}\" \"server.live\")\n  check [ \"$SERVER_LIVE_0\" = \"1\" ];\n\n  # Capture the value of test_gauge from the initial parent: it should be 1.\n  TEST_GAUGE_0=$(scrape_stat \"${ADMIN_ADDRESS_0}\" \"hotrestart_test_gauge\")\n  check [ \"$TEST_GAUGE_0\" = \"1\" ];\n\n  enableHeapCheck\n\n  ADMIN_ADDRESS_PATH_1=\"${TEST_TMPDIR}\"/admin.1.\"${TEST_INDEX}\".address\n  run_in_background_saving_pid \"${ENVOY_BIN}\" -c \"${UPDATED_HOT_RESTART_JSON}\" \\\n      --restart-epoch 1 --base-id \"${BASE_ID}\" --service-cluster cluster --service-node node \\\n      --use-fake-symbol-table \"$FAKE_SYMBOL_TABLE\" --admin-address-path \"${ADMIN_ADDRESS_PATH_1}\" \\\n      --socket-path \"${SOCKET_PATH}\" --socket-mode \"${SOCKET_MODE}\"\n\n  SERVER_1_PID=$BACKGROUND_PID\n\n  # Wait for stat flushing\n  sleep 7\n\n  ADMIN_ADDRESS_1=$(cat \"${ADMIN_ADDRESS_PATH_1}\")\n  SERVER_LIVE_1=$(scrape_stat \"${ADMIN_ADDRESS_1}\" \"server.live\")\n  check [ \"$SERVER_LIVE_1\" = \"1\" ];\n\n  # Check to see that the SERVER_1 accumulates the test_gauge value from\n  # SERVER_0, This will be erased once SERVER_0 terminates.\n  if [ \"$TEST_GAUGE_0\" != 0 ]; then\n    start_test \"Checking that the hotrestart_test_gauge incorporates SERVER_0 and SERVER_1.\"\n    TEST_GAUGE_1=$(scrape_stat \"${ADMIN_ADDRESS_1}\" \"hotrestart_test_gauge\")\n    check [ \"$TEST_GAUGE_1\" = \"2\" ]\n  fi\n\n  start_test \"Checking that listener addresses have not changed\"\n  HOT_RESTART_JSON_1=\"${TEST_TMPDIR}\"/hot_restart.1.\"${TEST_INDEX}\".yaml\n  \"${TEST_SRCDIR}/envoy\"/tools/socket_passing \"-o\" \"${UPDATED_HOT_RESTART_JSON}\" \"-a\" \"${ADMIN_ADDRESS_PATH_1}\" \\\n    \"-u\" \"${HOT_RESTART_JSON_1}\"\n  CONFIG_DIFF=$(diff \"${UPDATED_HOT_RESTART_JSON}\" \"${HOT_RESTART_JSON_1}\")\n  [[ -z \"${CONFIG_DIFF}\" ]]\n\n  # Send SIGUSR1 signal to the second server, this should not kill it, and\n  # we prove that by checking its stats after having sent it a signal.\n  start_test \"Sending SIGUSR1 to SERVER_1.\"\n  kill -SIGUSR1 \"${SERVER_1_PID}\"\n  sleep 3\n\n  start_test \"Checking server.hot_restart_generation 2\"\n  GENERATION_1=$(scrape_stat \"${ADMIN_ADDRESS_1}\" \"server.hot_restart_generation\")\n  check [ \"$GENERATION_1\" = \"2\" ];\n\n  ADMIN_ADDRESS_PATH_2=\"${TEST_TMPDIR}\"/admin.2.\"${TEST_INDEX}\".address\n  start_test \"Starting epoch 2\"\n  run_in_background_saving_pid \"${ENVOY_BIN}\" -c \"${UPDATED_HOT_RESTART_JSON}\" \\\n      --restart-epoch 2  --base-id \"${BASE_ID}\" --service-cluster cluster --service-node node \\\n      --use-fake-symbol-table \"$FAKE_SYMBOL_TABLE\" --admin-address-path \"${ADMIN_ADDRESS_PATH_2}\" \\\n      --parent-shutdown-time-s 3 \\\n      --socket-path \"${SOCKET_PATH}\" --socket-mode \"${SOCKET_MODE}\"\n\n  SERVER_2_PID=$BACKGROUND_PID\n\n  # Now wait for the SERVER_0 to exit. It should occur immediately when SERVER_2 starts, as\n  # SERVER_1 will terminate SERVER_0 when it becomes the parent.\n  start_test \"Waiting for epoch 0 to finish.\"\n  echo \"time wait ${SERVER_0_PID}\"\n  time wait \"${SERVER_0_PID}\"\n\n  # Then wait for the SERVER_1 to exit, which should happen within a few seconds\n  # due to '--parent-shutdown-time-s 3' on SERVER_2.\n  start_test \"Waiting for epoch 1 to finish.\"\n  echo \"time wait ${SERVER_1_PID}\"\n  time wait \"${SERVER_1_PID}\"\n\n  # This tests that we are retaining the generation count. For most Gauges,\n  # we erase the parent contribution when the parent exits, but\n  # server.hot_restart_generation is excluded. Commenting out the call to\n  # stat_merger_->retainParentGaugeValue(hot_restart_generation_stat_name_)\n  # in source/server/hot_restarting_child.cc results in this test failing,\n  # with the generation being decremented back to 1.\n  start_test \"Checking server.hot_restart_generation 2\"\n  ADMIN_ADDRESS_2=$(cat \"${ADMIN_ADDRESS_PATH_2}\")\n  GENERATION_2=$(scrape_stat \"${ADMIN_ADDRESS_2}\" \"server.hot_restart_generation\")\n  check [ \"$GENERATION_2\" = \"3\" ];\n\n  # Check to see that the SERVER_2's test_gauge value reverts bac to 1, since\n  # its parents have now exited and we have erased their gauge contributions.\n  start_test \"Check that the hotrestart_test_gauge reported in SERVER_2 excludes parent contribution\"\n  wait_status=$(wait_for_stat \"$ADMIN_ADDRESS_2\" \"hotrestart_test_gauge\" -eq 1 5)\n  echo \"$wait_status\"\n  if [[ \"$wait_status\" != success* ]]; then\n    handle_failure timeout\n  fi\n\n  start_test \"Checking that listener addresses have not changed\"\n  HOT_RESTART_JSON_2=\"${TEST_TMPDIR}\"/hot_restart.2.\"${TEST_INDEX}\".yaml\n  \"${TEST_SRCDIR}/envoy\"/tools/socket_passing \"-o\" \"${UPDATED_HOT_RESTART_JSON}\" \"-a\" \"${ADMIN_ADDRESS_PATH_2}\" \\\n    \"-u\" \"${HOT_RESTART_JSON_2}\"\n  CONFIG_DIFF=$(diff \"${UPDATED_HOT_RESTART_JSON}\" \"${HOT_RESTART_JSON_2}\")\n  [[ -z \"${CONFIG_DIFF}\" ]]\n\n  # Now term the last server, and the other one should exit also.\n  start_test \"Killing and waiting for epoch 2\"\n  kill \"${SERVER_2_PID}\"\n  wait \"${SERVER_2_PID}\"\n}\n\n# TODO(#13399): remove this helper function and the references to it, as long as\n# the references to $FAKE_SYMBOL_TABLE.\nfunction strip_fake_symbol_table_warning() {\n  local INPUT=\"$1\"\n  local FAKE_SYMBOL_TABLE=\"$2\"\n  if [ \"$FAKE_SYMBOL_TABLE\" = \"1\" ]; then\n    echo \"$INPUT\" | grep -v \"Fake symbol tables have been removed\"\n  else\n    echo \"$INPUT\"\n  fi\n}\n\n# Hotrestart in abstract namespace\nfor HOT_RESTART_JSON in \"${JSON_TEST_ARRAY[@]}\"\ndo\n  # Run one of the tests with real symbol tables. No need to do all of them.\n  if [ \"$TEST_INDEX\" = \"0\" ]; then\n    run_testsuite \"$HOT_RESTART_JSON\" \"0\" || exit 1\n  fi\n\n  run_testsuite \"$HOT_RESTART_JSON\" \"1\" || exit 1\ndone\n\n# Hotrestart in specified UDS\n# Real symbol tables are the default, so I had run just one with fake symbol tables\n# (Switch the \"0\" and \"1\" in the second arg in the two run_testsuite calls below).\nif [ \"$TEST_INDEX\" = \"0\" ]; then\n  run_testsuite \"${HOT_RESTART_JSON_V4}\" \"0\" \"${SOCKET_DIR}/envoy_domain_socket\" \"600\" || exit 1\nfi\n\nrun_testsuite \"${HOT_RESTART_JSON_V4}\" \"1\" \"${SOCKET_DIR}/envoy_domain_socket\" \"600\" || exit 1\n\nstart_test \"disabling hot_restart by command line.\"\nCLI_HOT_RESTART_VERSION=$(\"${ENVOY_BIN}\" --hot-restart-version --disable-hot-restart 2>&1)\ncheck [ \"disabled\" = \"${CLI_HOT_RESTART_VERSION}\" ]\n\n# Validating socket-path permission\nstart_test socket-mode for socket path\nrun_in_background_saving_pid \"${ENVOY_BIN}\" -c \"${HOT_RESTART_JSON}\" \\\n      --restart-epoch 0  --base-id 0 --base-id-path \"${BASE_ID_PATH}\" \\\n      --socket-path \"${SOCKET_DIR}\"/envoy_domain_socket --socket-mode 644 \\\n      --service-cluster cluster --service-node node --use-fake-symbol-table \"$FAKE_SYMBOL_TABLE\" \\\n      --admin-address-path \"${ADMIN_ADDRESS_PATH_0}\"\nsleep 3\nEXPECTED_SOCKET_MODE=$(stat -c '%a' \"${SOCKET_DIR}\"/envoy_domain_socket_parent_0)\ncheck [ \"644\" = \"${EXPECTED_SOCKET_MODE}\" ]\nkill \"${BACKGROUND_PID}\"\nwait \"${BACKGROUND_PID}\"\n\necho \"PASS\"\n"
  },
  {
    "path": "test/integration/http2_integration_test.cc",
    "content": "#include \"test/integration/http2_integration_test.h\"\n\n#include <algorithm>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/random_generator.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n\n#include \"test/integration/utility.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing ::testing::HasSubstr;\nusing ::testing::MatchesRegex;\n\nnamespace Envoy {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, Http2IntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(Http2IntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false, false);\n}\n\nTEST_P(Http2IntegrationTest, RouterRequestAndResponseWithGiantBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false);\n}\n\nTEST_P(Http2IntegrationTest, FlowControlOnAndGiantBody) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false);\n}\n\nTEST_P(Http2IntegrationTest, LargeFlowControlOnAndGiantBody) {\n  config_helper_.setBufferLimits(128 * 1024,\n                                 128 * 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false);\n}\n\nTEST_P(Http2IntegrationTest, RouterRequestAndResponseWithBodyAndContentLengthNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false, true);\n}\n\nTEST_P(Http2IntegrationTest, RouterRequestAndResponseWithGiantBodyAndContentLengthNoBuffer) {\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true);\n}\n\nTEST_P(Http2IntegrationTest, FlowControlOnAndGiantBodyWithContentLength) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true);\n}\n\nTEST_P(Http2IntegrationTest, LargeFlowControlOnAndGiantBodyWithContentLength) {\n  config_helper_.setBufferLimits(128 * 1024,\n                                 128 * 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true);\n}\n\nTEST_P(Http2IntegrationTest, RouterHeaderOnlyRequestAndResponseNoBuffer) {\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\nTEST_P(Http2IntegrationTest, RouterRequestAndResponseLargeHeaderNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, true);\n}\n\nTEST_P(Http2IntegrationTest, RouterUpstreamDisconnectBeforeRequestcomplete) {\n  testRouterUpstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(Http2IntegrationTest, RouterUpstreamDisconnectBeforeResponseComplete) {\n  testRouterUpstreamDisconnectBeforeResponseComplete();\n}\n\nTEST_P(Http2IntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {\n  testRouterDownstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(Http2IntegrationTest, RouterDownstreamDisconnectBeforeResponseComplete) {\n  testRouterDownstreamDisconnectBeforeResponseComplete();\n}\n\nTEST_P(Http2IntegrationTest, RouterUpstreamResponseBeforeRequestComplete) {\n  testRouterUpstreamResponseBeforeRequestComplete();\n}\n\nTEST_P(Http2IntegrationTest, Retry) { testRetry(); }\n\nTEST_P(Http2IntegrationTest, RetryAttemptCount) { testRetryAttemptCountHeader(); }\n\nTEST_P(Http2IntegrationTest, LargeRequestTrailersRejected) { testLargeRequestTrailers(66, 60); }\n\n// Verify downstream codec stream flush timeout.\nTEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) {\n  config_helper_.setBufferLimits(1024, 1024);\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_stream_idle_timeout()->set_seconds(0);\n        constexpr uint64_t IdleTimeoutMs = 400;\n        hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000);\n      });\n  initialize();\n  envoy::config::core::v3::Http2ProtocolOptions http2_options;\n  http2_options.mutable_initial_stream_window_size()->set_value(65535);\n  codec_client_ = makeRawHttpConnection(makeClientConnection(lookupPort(\"http\")), http2_options);\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(70000, true);\n  test_server_->waitForCounterEq(\"http2.tx_flush_timeout\", 1);\n  response->waitForReset();\n}\n\nTEST_P(Http2IntegrationTest, Http2DownstreamKeepalive) {\n  constexpr uint64_t interval_ms = 1;\n  constexpr uint64_t timeout_ms = 250;\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_http2_protocol_options()\n            ->mutable_connection_keepalive()\n            ->mutable_interval()\n            ->set_nanos(interval_ms * 1000 * 1000);\n        hcm.mutable_http2_protocol_options()\n            ->mutable_connection_keepalive()\n            ->mutable_timeout()\n            ->set_nanos(timeout_ms * 1000 * 1000);\n      });\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  // This call is NOT running the event loop of the client, so downstream PINGs will\n  // not receive a response.\n  test_server_->waitForCounterEq(\"http2.keepalive_timeout\", 1,\n                                 std::chrono::milliseconds(timeout_ms * 2));\n\n  response->waitForReset();\n}\n\nstatic std::string response_metadata_filter = R\"EOF(\nname: response-metadata-filter\ntyped_config:\n  \"@type\": type.googleapis.com/google.protobuf.Empty\n)EOF\";\n\n// Verifies metadata can be sent at different locations of the responses.\nTEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends the first request.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends metadata before response header.\n  const std::string key = \"key\";\n  std::string value = std::string(80 * 1024, '1');\n  Http::MetadataMap metadata_map = {{key, value}};\n  Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  Http::MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(12, true);\n\n  // Verifies metadata is received by the client.\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(response->metadataMap().find(key)->second, value);\n\n  // Sends the second request.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends metadata after response header followed by an empty data frame with end_stream true.\n  value = std::string(10, '2');\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  metadata_map = {{key, value}};\n  metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  metadata_map_vector.erase(metadata_map_vector.begin());\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeData(0, true);\n\n  // Verifies metadata is received by the client.\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(response->metadataMap().find(key)->second, value);\n\n  // Sends the third request.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends metadata after response header and before data.\n  value = std::string(10, '3');\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  metadata_map = {{key, value}};\n  metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  metadata_map_vector.erase(metadata_map_vector.begin());\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeData(10, true);\n\n  // Verifies metadata is received by the client.\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(response->metadataMap().find(key)->second, value);\n\n  // Sends the fourth request.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends metadata between data frames.\n  value = std::string(10, '4');\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(10, false);\n  metadata_map = {{key, value}};\n  metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  metadata_map_vector.erase(metadata_map_vector.begin());\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeData(10, true);\n\n  // Verifies metadata is received by the client.\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(response->metadataMap().find(key)->second, value);\n\n  // Sends the fifth request.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends metadata after the last non-empty data frames.\n  value = std::string(10, '5');\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(10, false);\n  metadata_map = {{key, value}};\n  metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  metadata_map_vector.erase(metadata_map_vector.begin());\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeData(0, true);\n\n  // Verifies metadata is received by the client.\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(response->metadataMap().find(key)->second, value);\n\n  // Sends the sixth request.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends metadata before reset.\n  value = std::string(10, '6');\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(10, false);\n  metadata_map = {{key, value}};\n  metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  metadata_map_vector.erase(metadata_map_vector.begin());\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeResetStream();\n\n  // Verifies stream is reset.\n  response->waitForReset();\n  ASSERT_FALSE(response->complete());\n}\n\nTEST_P(Http2MetadataIntegrationTest, ProxyMultipleMetadata) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  const int size = 4;\n  std::vector<Http::MetadataMapVector> multiple_vecs(size);\n  for (int i = 0; i < size; i++) {\n    Random::RandomGeneratorImpl random;\n    int value_size = random.random() % Http::METADATA_MAX_PAYLOAD_SIZE + 1;\n    Http::MetadataMap metadata_map = {{std::string(i, 'a'), std::string(value_size, 'b')}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    multiple_vecs[i].push_back(std::move(metadata_map_ptr));\n  }\n  upstream_request_->encodeMetadata(multiple_vecs[0]);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeMetadata(multiple_vecs[1]);\n  upstream_request_->encodeData(12, false);\n  upstream_request_->encodeMetadata(multiple_vecs[2]);\n  upstream_request_->encodeData(12, false);\n  upstream_request_->encodeMetadata(multiple_vecs[3]);\n  upstream_request_->encodeData(12, true);\n\n  // Verifies multiple metadata are received by the client.\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  for (int i = 0; i < size; i++) {\n    for (const auto& metadata : *multiple_vecs[i][0]) {\n      EXPECT_EQ(response->metadataMap().find(metadata.first)->second, metadata.second);\n    }\n  }\n  EXPECT_EQ(response->metadataMap().size(), multiple_vecs.size());\n}\n\nTEST_P(Http2MetadataIntegrationTest, ProxyInvalidMetadata) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends over-sized metadata before response header.\n  const std::string key = \"key\";\n  std::string value = std::string(1024 * 1024, 'a');\n  Http::MetadataMap metadata_map = {{key, value}};\n  Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  Http::MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeData(12, false);\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeData(12, true);\n\n  // Verifies metadata is not received by the client.\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(response->metadataMap().size(), 0);\n}\n\nvoid verifyExpectedMetadata(Http::MetadataMap metadata_map, std::set<std::string> keys) {\n  for (const auto& key : keys) {\n    // keys are the same as their corresponding values.\n    EXPECT_EQ(metadata_map.find(key)->second, key);\n  }\n  EXPECT_EQ(metadata_map.size(), keys.size());\n}\n\nTEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) {\n  addFilters({response_metadata_filter});\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.set_proxy_100_continue(true); });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Upstream responds with headers.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  std::set<std::string> expected_metadata_keys = {\"headers\", \"duplicate\"};\n  verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys);\n\n  // Upstream responds with headers and data.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(100, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.insert(\"data\");\n  verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(response->keyCount(\"duplicate\"), 2);\n\n  // Upstream responds with headers, data and trailers.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(10, false);\n  Http::TestResponseTrailerMapImpl response_trailers{{\"response\", \"trailer\"}};\n  upstream_request_->encodeTrailers(response_trailers);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.insert(\"trailers\");\n  verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(response->keyCount(\"duplicate\"), 3);\n\n  // Upstream responds with headers, 100-continue and data.\n  response =\n      codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                        {\":path\", \"/dynamo/url\"},\n                                                                        {\":scheme\", \"http\"},\n                                                                        {\":authority\", \"host\"},\n                                                                        {\"expect\", \"100-continue\"}},\n                                         10);\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n  response->waitForContinueHeaders();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(100, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.erase(\"trailers\");\n  expected_metadata_keys.insert(\"100-continue\");\n  verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(response->keyCount(\"duplicate\"), 4);\n\n  // Upstream responds with headers and metadata that will not be consumed.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n  Http::MetadataMap metadata_map = {{\"aaa\", \"aaa\"}};\n  Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  Http::MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.erase(\"data\");\n  expected_metadata_keys.erase(\"100-continue\");\n  expected_metadata_keys.insert(\"aaa\");\n  expected_metadata_keys.insert(\"keep\");\n  verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys);\n\n  // Upstream responds with headers, data and metadata that will be consumed.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n  metadata_map = {{\"consume\", \"consume\"}};\n  metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  metadata_map_vector.clear();\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  upstream_request_->encodeMetadata(metadata_map_vector);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(100, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.erase(\"aaa\");\n  expected_metadata_keys.insert(\"data\");\n  expected_metadata_keys.insert(\"replace\");\n  verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(response->keyCount(\"duplicate\"), 2);\n}\n\nTEST_P(Http2MetadataIntegrationTest, ProxyMultipleMetadataReachSizeLimit) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Sends multiple metadata after response header until max size limit is reached.\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  const int size = 200;\n  std::vector<Http::MetadataMapVector> multiple_vecs(size);\n  for (int i = 0; i < size; i++) {\n    Http::MetadataMap metadata_map = {{\"key\", std::string(10000, 'a')}};\n    Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n    multiple_vecs[i].push_back(std::move(metadata_map_ptr));\n    upstream_request_->encodeMetadata(multiple_vecs[i]);\n  }\n  upstream_request_->encodeData(12, true);\n\n  // Verifies reset is received.\n  response->waitForReset();\n  ASSERT_FALSE(response->complete());\n}\n\n// Verifies small metadata can be sent at different locations of a request.\nTEST_P(Http2MetadataIntegrationTest, ProxySmallMetadataInRequest) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  Http::MetadataMap metadata_map = {{\"key\", \"value\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  Http::TestRequestTrailerMapImpl request_trailers{{\"request\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n\n  waitForNextUpstreamRequest();\n\n  // Verifies metadata is received by upstream.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  EXPECT_EQ(upstream_request_->metadataMap().find(\"key\")->second, \"value\");\n  EXPECT_EQ(upstream_request_->metadataMap().size(), 1);\n  EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find(\"key\")->second, 3);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n}\n\n// Verifies large metadata can be sent at different locations of a request.\nTEST_P(Http2MetadataIntegrationTest, ProxyLargeMetadataInRequest) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  std::string value = std::string(80 * 1024, '1');\n  Http::MetadataMap metadata_map = {{\"key\", value}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  Http::TestRequestTrailerMapImpl request_trailers{{\"request\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n\n  waitForNextUpstreamRequest();\n\n  // Verifies metadata is received upstream.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  EXPECT_EQ(upstream_request_->metadataMap().find(\"key\")->second, value);\n  EXPECT_EQ(upstream_request_->metadataMap().size(), 1);\n  EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find(\"key\")->second, 3);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n}\n\nTEST_P(Http2MetadataIntegrationTest, RequestMetadataReachSizeLimit) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  std::string value = std::string(10 * 1024, '1');\n  Http::MetadataMap metadata_map = {{\"key\", value}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  for (int i = 0; i < 200; i++) {\n    codec_client_->sendMetadata(*request_encoder_, metadata_map);\n    if (codec_client_->disconnected()) {\n      break;\n    }\n  }\n\n  // Verifies client connection will be closed.\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  ASSERT_FALSE(response->complete());\n}\n\nstatic std::string request_metadata_filter = R\"EOF(\nname: request-metadata-filter\ntyped_config:\n  \"@type\": type.googleapis.com/google.protobuf.Empty\n)EOF\";\n\nTEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) {\n  addFilters({request_metadata_filter});\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.set_proxy_100_continue(true); });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a headers only request.\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  // Verifies a headers metadata added.\n  std::set<std::string> expected_metadata_keys = {\"headers\"};\n  expected_metadata_keys.insert(\"metadata\");\n  verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys);\n\n  // Sends a headers only request with metadata. An empty data frame carries end_stream.\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  response = std::move(encoder_decoder.second);\n  Http::MetadataMap metadata_map = {{\"consume\", \"consume\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 0, true);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.insert(\"data\");\n  expected_metadata_keys.insert(\"metadata\");\n  expected_metadata_keys.insert(\"replace\");\n  verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find(\"metadata\")->second, 3);\n  // Verifies zero length data received, and end_stream is true.\n  EXPECT_EQ(true, upstream_request_->receivedData());\n  EXPECT_EQ(0, upstream_request_->bodyLength());\n  EXPECT_EQ(true, upstream_request_->complete());\n\n  // Sends headers, data, metadata and trailer.\n  auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder_2.first;\n  response = std::move(encoder_decoder_2.second);\n  codec_client_->sendData(*request_encoder_, 10, false);\n  metadata_map = {{\"consume\", \"consume\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  Http::TestRequestTrailerMapImpl request_trailers{{\"trailer\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.insert(\"trailers\");\n  verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find(\"metadata\")->second, 4);\n\n  // Sends headers, large data, metadata. Large data triggers decodeData() multiple times, and each\n  // time, a \"data\" metadata is added.\n  auto encoder_decoder_3 = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder_3.first;\n  response = std::move(encoder_decoder_3.second);\n  codec_client_->sendData(*request_encoder_, 100000, false);\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 100000, true);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n\n  expected_metadata_keys.erase(\"trailers\");\n  verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys);\n  EXPECT_GE(upstream_request_->duplicatedMetadataKeyCount().find(\"data\")->second, 2);\n  EXPECT_GE(upstream_request_->duplicatedMetadataKeyCount().find(\"metadata\")->second, 3);\n\n  // Sends multiple metadata.\n  auto encoder_decoder_4 = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder_4.first;\n  response = std::move(encoder_decoder_4.second);\n  metadata_map = {{\"metadata1\", \"metadata1\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, 10, false);\n  metadata_map = {{\"metadata2\", \"metadata2\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  metadata_map = {{\"consume\", \"consume\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  expected_metadata_keys.insert(\"metadata1\");\n  expected_metadata_keys.insert(\"metadata2\");\n  expected_metadata_keys.insert(\"trailers\");\n  verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find(\"metadata\")->second, 6);\n}\n\nstatic std::string decode_headers_only = R\"EOF(\nname: decode-headers-only\ntyped_config:\n  \"@type\": type.googleapis.com/google.protobuf.Empty\n)EOF\";\n\nvoid Http2MetadataIntegrationTest::runHeaderOnlyTest(bool send_request_body, size_t body_size) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.set_proxy_100_continue(true); });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request with body. Only headers will pass through filters.\n  IntegrationStreamDecoderPtr response;\n  if (send_request_body) {\n    response = codec_client_->makeRequestWithBody(\n        Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                       {\":path\", \"/test/long/url\"},\n                                       {\":scheme\", \"http\"},\n                                       {\":authority\", \"host\"}},\n        body_size);\n  } else {\n    response = codec_client_->makeHeaderOnlyRequest(\n        Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                       {\":path\", \"/test/long/url\"},\n                                       {\":scheme\", \"http\"},\n                                       {\":authority\", \"host\"}});\n  }\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n}\n\nvoid Http2MetadataIntegrationTest::verifyHeadersOnlyTest() {\n  // Verifies a headers metadata added.\n  std::set<std::string> expected_metadata_keys = {\"headers\"};\n  expected_metadata_keys.insert(\"metadata\");\n  verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys);\n\n  // Verifies zero length data received, and end_stream is true.\n  EXPECT_EQ(true, upstream_request_->receivedData());\n  EXPECT_EQ(0, upstream_request_->bodyLength());\n  EXPECT_EQ(true, upstream_request_->complete());\n}\n\nTEST_P(Http2MetadataIntegrationTest, DecodingHeadersOnlyRequestWithRequestMetadataEmptyData) {\n  addFilters({request_metadata_filter, decode_headers_only});\n\n  // Send a request with body, and body size is 0.\n  runHeaderOnlyTest(true, 0);\n  verifyHeadersOnlyTest();\n}\n\nTEST_P(Http2MetadataIntegrationTest, DecodingHeadersOnlyRequestWithRequestMetadataNoneEmptyData) {\n  addFilters({request_metadata_filter, decode_headers_only});\n  // Send a request with body, and body size is 128.\n  runHeaderOnlyTest(true, 128);\n  verifyHeadersOnlyTest();\n}\n\nTEST_P(Http2MetadataIntegrationTest, DecodingHeadersOnlyRequestWithRequestMetadataDiffFilterOrder) {\n  addFilters({decode_headers_only, request_metadata_filter});\n  // Send a request with body, and body size is 128.\n  runHeaderOnlyTest(true, 128);\n  verifyHeadersOnlyTest();\n}\n\nTEST_P(Http2MetadataIntegrationTest, HeadersOnlyRequestWithRequestMetadata) {\n  addFilters({request_metadata_filter});\n  // Send a headers only request.\n  runHeaderOnlyTest(false, 0);\n  verifyHeadersOnlyTest();\n}\n\nvoid Http2MetadataIntegrationTest::testRequestMetadataWithStopAllFilter() {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends multiple metadata.\n  const size_t size = 10;\n  default_request_headers_.addCopy(\"content_size\", std::to_string(size));\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  Http::MetadataMap metadata_map = {{\"metadata1\", \"metadata1\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  codec_client_->sendData(*request_encoder_, size, false);\n  metadata_map = {{\"metadata2\", \"metadata2\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  metadata_map = {{\"consume\", \"consume\"}};\n  codec_client_->sendMetadata(*request_encoder_, metadata_map);\n  Http::TestRequestTrailerMapImpl request_trailers{{\"trailer\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  std::set<std::string> expected_metadata_keys = {\"headers\",   \"data\",    \"metadata\", \"metadata1\",\n                                                  \"metadata2\", \"replace\", \"trailers\"};\n  verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys);\n  EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find(\"metadata\")->second, 6);\n}\n\nstatic std::string metadata_stop_all_filter = R\"EOF(\nname: metadata-stop-all-filter\ntyped_config:\n  \"@type\": type.googleapis.com/google.protobuf.Empty\n)EOF\";\n\nTEST_P(Http2MetadataIntegrationTest, RequestMetadataWithStopAllFilterBeforeMetadataFilter) {\n  addFilters({request_metadata_filter, metadata_stop_all_filter});\n  testRequestMetadataWithStopAllFilter();\n}\n\nTEST_P(Http2MetadataIntegrationTest, RequestMetadataWithStopAllFilterAfterMetadataFilter) {\n  addFilters({metadata_stop_all_filter, request_metadata_filter});\n  testRequestMetadataWithStopAllFilter();\n}\n\nTEST_P(Http2MetadataIntegrationTest, TestAddEncodedMetadata) {\n  config_helper_.addFilter(R\"EOF(\nname: encode-headers-return-stop-all-filter\n)EOF\");\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Upstream responds with headers, data and trailers.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  const int count = 70;\n  const int size = 1000;\n  const int added_decoded_data_size = 1;\n\n  default_response_headers_.addCopy(\"content_size\", std::to_string(count * size));\n  default_response_headers_.addCopy(\"added_size\", std::to_string(added_decoded_data_size));\n  default_response_headers_.addCopy(\"is_first_trigger\", \"value\");\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  for (int i = 0; i < count - 1; i++) {\n    upstream_request_->encodeData(size, false);\n  }\n\n  upstream_request_->encodeData(size, false);\n  Http::TestResponseTrailerMapImpl response_trailers{{\"response\", \"trailer\"}};\n  upstream_request_->encodeTrailers(response_trailers);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(response->metadataMap().find(\"headers\")->second, \"headers\");\n  EXPECT_EQ(response->metadataMap().find(\"data\")->second, \"data\");\n  EXPECT_EQ(response->metadataMap().find(\"trailers\")->second, \"trailers\");\n  EXPECT_EQ(response->metadataMap().size(), 3);\n  EXPECT_EQ(count * size + added_decoded_data_size * 2, response->body().size());\n}\n\nTEST_P(Http2IntegrationTest, GrpcRouterNotFound) {\n  config_helper_.setDefaultHostAndRoute(\"foo.com\", \"/found\");\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"POST\", \"/service/notfound\", \"\", downstream_protocol_, version_, \"host\",\n      Http::Headers::get().ContentTypeValues.Grpc);\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, response->headers().getContentTypeValue());\n  EXPECT_EQ(\"12\", response->headers().getGrpcStatusValue());\n}\n\nTEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); }\n\n// Verify the case where there is an HTTP/2 codec/protocol error with an active stream.\nTEST_P(Http2IntegrationTest, CodecErrorAfterStreamStart) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  // Send bogus raw data on the connection.\n  Buffer::OwnedImpl bogus_data(\"some really bogus data\");\n  codec_client_->rawConnection().write(bogus_data, false);\n\n  // Verifies reset is received.\n  response->waitForReset();\n}\n\nTEST_P(Http2IntegrationTest, BadMagic) {\n  initialize();\n  std::string response;\n  auto connection = createConnectionDriver(\n      lookupPort(\"http\"), \"hello\",\n      [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n      });\n  connection->run();\n  EXPECT_EQ(\"\", response);\n}\n\nTEST_P(Http2IntegrationTest, BadFrame) {\n  initialize();\n  std::string response;\n  auto connection = createConnectionDriver(\n      lookupPort(\"http\"), \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\nhelloworldcauseanerror\",\n      [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n      });\n  connection->run();\n  EXPECT_TRUE(response.find(\"SETTINGS expected\") != std::string::npos);\n}\n\n// Send client headers, a GoAway and then a body and ensure the full request and\n// response are received.\nTEST_P(Http2IntegrationTest, GoAway) {\n  config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter());\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"}, {\":path\", \"/healthcheck\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->goAway();\n  codec_client_->sendData(*request_encoder_, 0, true);\n  response->waitForEndStream();\n  codec_client_->close();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(Http2IntegrationTest, Trailers) { testTrailers(1024, 2048, false, false); }\n\nTEST_P(Http2IntegrationTest, TrailersGiantBody) {\n  testTrailers(1024 * 1024, 1024 * 1024, false, false);\n}\n\nTEST_P(Http2IntegrationTest, GrpcRequestTimeout) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        auto* virtual_host = route_config->mutable_virtual_hosts(0);\n        auto* route = virtual_host->mutable_routes(0);\n        route->mutable_route()\n            ->mutable_max_stream_duration()\n            ->mutable_grpc_timeout_header_max()\n            ->set_seconds(60 * 60);\n      });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"te\", \"trailers\"},\n                                     {\"grpc-timeout\", \"1S\"}, // 1 Second\n                                     {\"content-type\", \"application/grpc\"}});\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_NE(response->headers().GrpcStatus(), nullptr);\n  EXPECT_EQ(\"4\", response->headers().getGrpcStatusValue()); // Deadline exceeded.\n  EXPECT_LT(0,\n            test_server_->counter(\"http.config_test.downstream_rq_max_duration_reached\")->value());\n}\n\n// Interleave two requests and responses and make sure that idle timeout is handled correctly.\nTEST_P(Http2IntegrationTest, IdleTimeoutWithSimultaneousRequests) {\n  FakeHttpConnectionPtr fake_upstream_connection1;\n  FakeHttpConnectionPtr fake_upstream_connection2;\n  Http::RequestEncoder* encoder1;\n  Http::RequestEncoder* encoder2;\n  FakeStreamPtr upstream_request1;\n  FakeStreamPtr upstream_request2;\n  int32_t request1_bytes = 1024;\n  int32_t request2_bytes = 512;\n\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    auto* http_protocol_options = cluster->mutable_common_http_protocol_options();\n    auto* idle_time_out = http_protocol_options->mutable_idle_timeout();\n    std::chrono::milliseconds timeout(1000);\n    auto seconds = std::chrono::duration_cast<std::chrono::seconds>(timeout);\n    idle_time_out->set_seconds(seconds.count());\n  });\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Start request 1\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  encoder1 = &encoder_decoder.first;\n  auto response1 = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection1));\n  ASSERT_TRUE(fake_upstream_connection1->waitForNewStream(*dispatcher_, upstream_request1));\n\n  // Start request 2\n  auto encoder_decoder2 =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  encoder2 = &encoder_decoder2.first;\n  auto response2 = std::move(encoder_decoder2.second);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection2));\n  ASSERT_TRUE(fake_upstream_connection2->waitForNewStream(*dispatcher_, upstream_request2));\n\n  // Finish request 1\n  codec_client_->sendData(*encoder1, request1_bytes, true);\n  ASSERT_TRUE(upstream_request1->waitForEndStream(*dispatcher_));\n\n  // Finish request i2\n  codec_client_->sendData(*encoder2, request2_bytes, true);\n  ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_));\n\n  // Respond to request 2\n  upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request2->encodeData(request2_bytes, true);\n  response2->waitForEndStream();\n  EXPECT_TRUE(upstream_request2->complete());\n  EXPECT_EQ(request2_bytes, upstream_request2->bodyLength());\n  EXPECT_TRUE(response2->complete());\n  EXPECT_EQ(\"200\", response2->headers().getStatusValue());\n  EXPECT_EQ(request2_bytes, response2->body().size());\n\n  // Validate that idle time is not kicked in.\n  EXPECT_EQ(0, test_server_->counter(\"cluster.cluster_0.upstream_cx_idle_timeout\")->value());\n  EXPECT_NE(0, test_server_->counter(\"cluster.cluster_0.upstream_cx_total\")->value());\n\n  // Respond to request 1\n  upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request1->encodeData(request1_bytes, true);\n  response1->waitForEndStream();\n  EXPECT_TRUE(upstream_request1->complete());\n  EXPECT_EQ(request1_bytes, upstream_request1->bodyLength());\n  EXPECT_TRUE(response1->complete());\n  EXPECT_EQ(\"200\", response1->headers().getStatusValue());\n  EXPECT_EQ(request1_bytes, response1->body().size());\n\n  // Do not send any requests and validate idle timeout kicks in after both the requests are done.\n  ASSERT_TRUE(fake_upstream_connection1->waitForDisconnect());\n  ASSERT_TRUE(fake_upstream_connection2->waitForDisconnect());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_idle_timeout\", 2);\n}\n\n// Test request mirroring / shadowing with an HTTP/2 downstream and a request with a body.\nTEST_P(Http2IntegrationTest, RequestMirrorWithBody) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* mirror_policy = hcm.mutable_route_config()\n                                  ->mutable_virtual_hosts(0)\n                                  ->mutable_routes(0)\n                                  ->mutable_route()\n                                  ->add_request_mirror_policies();\n        mirror_policy->set_cluster(\"cluster_0\");\n      });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Send request with body.\n  IntegrationStreamDecoderPtr request =\n      codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                        {\":path\", \"/test/long/url\"},\n                                                                        {\":scheme\", \"http\"},\n                                                                        {\":authority\", \"host\"}},\n                                         \"hello\");\n\n  // Wait for the first request as well as the shadow.\n  waitForNextUpstreamRequest();\n\n  FakeHttpConnectionPtr fake_upstream_connection2;\n  FakeStreamPtr upstream_request2;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection2));\n  ASSERT_TRUE(fake_upstream_connection2->waitForNewStream(*dispatcher_, upstream_request2));\n  ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_));\n\n  // Make sure both requests have a body. Also check the shadow for the shadow headers.\n  EXPECT_EQ(\"hello\", upstream_request_->body().toString());\n  EXPECT_EQ(\"hello\", upstream_request2->body().toString());\n  EXPECT_EQ(\"host-shadow\", upstream_request2->headers().getHostValue());\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  request->waitForEndStream();\n  EXPECT_EQ(\"200\", request->headers().getStatusValue());\n\n  // Cleanup.\n  ASSERT_TRUE(fake_upstream_connection2->close());\n  ASSERT_TRUE(fake_upstream_connection2->waitForDisconnect());\n}\n\n// Interleave two requests and responses and make sure the HTTP2 stack handles this correctly.\nvoid Http2IntegrationTest::simultaneousRequest(int32_t request1_bytes, int32_t request2_bytes) {\n  FakeHttpConnectionPtr fake_upstream_connection1;\n  FakeHttpConnectionPtr fake_upstream_connection2;\n  Http::RequestEncoder* encoder1;\n  Http::RequestEncoder* encoder2;\n  FakeStreamPtr upstream_request1;\n  FakeStreamPtr upstream_request2;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Start request 1\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  encoder1 = &encoder_decoder.first;\n  auto response1 = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection1));\n  ASSERT_TRUE(fake_upstream_connection1->waitForNewStream(*dispatcher_, upstream_request1));\n\n  // Start request 2\n  auto encoder_decoder2 =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  encoder2 = &encoder_decoder2.first;\n  auto response2 = std::move(encoder_decoder2.second);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection2));\n  ASSERT_TRUE(fake_upstream_connection2->waitForNewStream(*dispatcher_, upstream_request2));\n\n  // Finish request 1\n  codec_client_->sendData(*encoder1, request1_bytes, true);\n  ASSERT_TRUE(upstream_request1->waitForEndStream(*dispatcher_));\n\n  // Finish request 2\n  codec_client_->sendData(*encoder2, request2_bytes, true);\n  ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_));\n\n  // Respond to request 2\n  upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request2->encodeData(request2_bytes, true);\n  response2->waitForEndStream();\n  EXPECT_TRUE(upstream_request2->complete());\n  EXPECT_EQ(request2_bytes, upstream_request2->bodyLength());\n  EXPECT_TRUE(response2->complete());\n  EXPECT_EQ(\"200\", response2->headers().getStatusValue());\n  EXPECT_EQ(request2_bytes, response2->body().size());\n\n  // Respond to request 1\n  upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request1->encodeData(request2_bytes, true);\n  response1->waitForEndStream();\n  EXPECT_TRUE(upstream_request1->complete());\n  EXPECT_EQ(request1_bytes, upstream_request1->bodyLength());\n  EXPECT_TRUE(response1->complete());\n  EXPECT_EQ(\"200\", response1->headers().getStatusValue());\n  EXPECT_EQ(request2_bytes, response1->body().size());\n\n  // Cleanup both downstream and upstream\n  ASSERT_TRUE(fake_upstream_connection1->close());\n  ASSERT_TRUE(fake_upstream_connection1->waitForDisconnect());\n  ASSERT_TRUE(fake_upstream_connection2->close());\n  ASSERT_TRUE(fake_upstream_connection2->waitForDisconnect());\n  codec_client_->close();\n}\n\nTEST_P(Http2IntegrationTest, SimultaneousRequest) { simultaneousRequest(1024, 512); }\n\nTEST_P(Http2IntegrationTest, SimultaneousRequestWithBufferLimits) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  simultaneousRequest(1024 * 32, 1024 * 16);\n}\n\n// Test downstream connection delayed close processing.\nTEST_P(Http2IntegrationTest, DelayedCloseAfterBadFrame) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_delayed_close_timeout()->set_nanos(1000 * 1000); });\n  initialize();\n  std::string response;\n\n  auto connection = createConnectionDriver(\n      lookupPort(\"http\"), \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\nhelloworldcauseanerror\",\n      [&](Network::ClientConnection& connection, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n        connection.dispatcher().exit();\n      });\n\n  connection->run();\n  EXPECT_THAT(response, HasSubstr(\"SETTINGS expected\"));\n  // Due to the multiple dispatchers involved (one for the RawConnectionDriver and another for the\n  // Envoy server), it's possible the delayed close timer could fire and close the server socket\n  // prior to the data callback above firing. Therefore, we may either still be connected, or have\n  // received a remote close.\n  if (connection->lastConnectionEvent() == Network::ConnectionEvent::Connected) {\n    connection->run();\n  }\n  EXPECT_EQ(connection->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value(),\n            1);\n}\n\n// Test disablement of delayed close processing on downstream connections.\nTEST_P(Http2IntegrationTest, DelayedCloseDisabled) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(0); });\n  initialize();\n  std::string response;\n  auto connection = createConnectionDriver(\n      lookupPort(\"http\"), \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\nhelloworldcauseanerror\",\n      [&](Network::ClientConnection& connection, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n        connection.dispatcher().exit();\n      });\n\n  connection->run();\n  EXPECT_THAT(response, HasSubstr(\"SETTINGS expected\"));\n  // Due to the multiple dispatchers involved (one for the RawConnectionDriver and another for the\n  // Envoy server), it's possible for the 'connection' to receive the data and exit the dispatcher\n  // prior to the FIN being received from the server.\n  if (connection->lastConnectionEvent() == Network::ConnectionEvent::Connected) {\n    connection->run();\n  }\n  EXPECT_EQ(connection->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value(),\n            0);\n}\n\nTEST_P(Http2IntegrationTest, PauseAndResume) {\n  config_helper_.addFilter(R\"EOF(\n  name: stop-iteration-and-continue-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n  )EOF\");\n  initialize();\n\n  // Send a request with a bit of data, to trigger the filter pausing.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  codec_client_->sendData(*request_encoder_, 1, false);\n\n  auto response = std::move(encoder_decoder.second);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  // Now send the final data frame and make sure it gets proxied.\n  codec_client_->sendData(*request_encoder_, 0, true);\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  response->waitForHeaders();\n  upstream_request_->encodeData(0, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n}\n\nTEST_P(Http2IntegrationTest, PauseAndResumeHeadersOnly) {\n  config_helper_.addFilter(R\"EOF(\n  name: stop-iteration-and-continue-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n  )EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n}\n\n// Verify the case when we have large pending data with empty trailers. It should not introduce\n// stack-overflow (on ASan build). This is a regression test for\n// https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=24714.\nTEST_P(Http2IntegrationTest, EmptyTrailers) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, 100000, false);\n  Http::TestRequestTrailerMapImpl request_trailers;\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n}\n\nHttp2RingHashIntegrationTest::Http2RingHashIntegrationTest() {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    cluster->clear_load_assignment();\n    cluster->mutable_load_assignment()->add_endpoints();\n    cluster->mutable_load_assignment()->set_cluster_name(cluster->name());\n    cluster->set_lb_policy(envoy::config::cluster::v3::Cluster::RING_HASH);\n    for (int i = 0; i < num_upstreams_; i++) {\n      auto* socket = cluster->mutable_load_assignment()\n                         ->mutable_endpoints(0)\n                         ->add_lb_endpoints()\n                         ->mutable_endpoint()\n                         ->mutable_address()\n                         ->mutable_socket_address();\n      socket->set_address(Network::Test::getLoopbackAddressString(version_));\n    }\n  });\n}\n\nHttp2RingHashIntegrationTest::~Http2RingHashIntegrationTest() {\n  if (codec_client_) {\n    codec_client_->close();\n    codec_client_ = nullptr;\n  }\n  for (auto& fake_upstream_connection : fake_upstream_connections_) {\n    AssertionResult result = fake_upstream_connection->close();\n    RELEASE_ASSERT(result, result.message());\n    result = fake_upstream_connection->waitForDisconnect();\n    RELEASE_ASSERT(result, result.message());\n  }\n}\n\nvoid Http2RingHashIntegrationTest::createUpstreams() {\n  for (int i = 0; i < num_upstreams_; i++) {\n    addFakeUpstream(FakeHttpConnection::Type::HTTP1);\n  }\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, Http2RingHashIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, Http2MetadataIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nvoid Http2RingHashIntegrationTest::sendMultipleRequests(\n    int request_bytes, Http::TestRequestHeaderMapImpl headers,\n    std::function<void(IntegrationStreamDecoder&)> cb) {\n  TestRandomGenerator rand;\n  const uint32_t num_requests = 50;\n  std::vector<Http::RequestEncoder*> encoders;\n  std::vector<IntegrationStreamDecoderPtr> responses;\n  std::vector<FakeStreamPtr> upstream_requests;\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  for (uint32_t i = 0; i < num_requests; ++i) {\n    auto encoder_decoder = codec_client_->startRequest(headers);\n    encoders.push_back(&encoder_decoder.first);\n    responses.push_back(std::move(encoder_decoder.second));\n    codec_client_->sendData(*encoders[i], request_bytes, true);\n  }\n\n  for (uint32_t i = 0; i < num_requests; ++i) {\n    FakeHttpConnectionPtr fake_upstream_connection;\n    ASSERT_TRUE(FakeUpstream::waitForHttpConnection(*dispatcher_, fake_upstreams_,\n                                                    fake_upstream_connection));\n    // As data and streams are interwoven, make sure waitForNewStream()\n    // ignores incoming data and waits for actual stream establishment.\n    upstream_requests.emplace_back();\n    ASSERT_TRUE(fake_upstream_connection->waitForNewStream(*dispatcher_, upstream_requests.back()));\n    upstream_requests.back()->setAddServedByHeader(true);\n    fake_upstream_connections_.push_back(std::move(fake_upstream_connection));\n  }\n\n  for (uint32_t i = 0; i < num_requests; ++i) {\n    ASSERT_TRUE(upstream_requests[i]->waitForEndStream(*dispatcher_));\n    upstream_requests[i]->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n    upstream_requests[i]->encodeData(rand.random() % (1024 * 2), true);\n  }\n\n  for (uint32_t i = 0; i < num_requests; ++i) {\n    responses[i]->waitForEndStream();\n    EXPECT_TRUE(upstream_requests[i]->complete());\n    EXPECT_EQ(request_bytes, upstream_requests[i]->bodyLength());\n\n    EXPECT_TRUE(responses[i]->complete());\n    cb(*responses[i]);\n  }\n}\n\nTEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieNoTtl) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* hash_policy = hcm.mutable_route_config()\n                                ->mutable_virtual_hosts(0)\n                                ->mutable_routes(0)\n                                ->mutable_route()\n                                ->add_hash_policy();\n        auto* cookie = hash_policy->mutable_cookie();\n        cookie->set_name(\"foo\");\n      });\n\n  // This test is non-deterministic, so make it extremely unlikely that not all\n  // upstreams get hit.\n  num_upstreams_ = 2;\n  std::set<std::string> served_by;\n  sendMultipleRequests(\n      1024,\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"}},\n      [&](IntegrationStreamDecoder& response) {\n        EXPECT_EQ(\"200\", response.headers().getStatusValue());\n        EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr);\n        served_by.insert(std::string(\n            response.headers().get(Http::LowerCaseString(\"x-served-by\"))->value().getStringView()));\n      });\n  EXPECT_EQ(served_by.size(), num_upstreams_);\n}\n\nTEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithNonzeroTtlSet) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* hash_policy = hcm.mutable_route_config()\n                                ->mutable_virtual_hosts(0)\n                                ->mutable_routes(0)\n                                ->mutable_route()\n                                ->add_hash_policy();\n        auto* cookie = hash_policy->mutable_cookie();\n        cookie->set_name(\"foo\");\n        cookie->mutable_ttl()->set_seconds(15);\n      });\n\n  std::set<std::string> set_cookies;\n  sendMultipleRequests(\n      1024,\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"}},\n      [&](IntegrationStreamDecoder& response) {\n        EXPECT_EQ(\"200\", response.headers().getStatusValue());\n        std::string value(\n            response.headers().get(Http::Headers::get().SetCookie)->value().getStringView());\n        set_cookies.insert(value);\n        EXPECT_THAT(value, MatchesRegex(\"foo=.*; Max-Age=15; HttpOnly\"));\n      });\n  EXPECT_EQ(set_cookies.size(), 1);\n}\n\nTEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithZeroTtlSet) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* hash_policy = hcm.mutable_route_config()\n                                ->mutable_virtual_hosts(0)\n                                ->mutable_routes(0)\n                                ->mutable_route()\n                                ->add_hash_policy();\n        auto* cookie = hash_policy->mutable_cookie();\n        cookie->set_name(\"foo\");\n        cookie->mutable_ttl();\n      });\n\n  std::set<std::string> set_cookies;\n  sendMultipleRequests(\n      1024,\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"}},\n      [&](IntegrationStreamDecoder& response) {\n        EXPECT_EQ(\"200\", response.headers().getStatusValue());\n        std::string value(\n            response.headers().get(Http::Headers::get().SetCookie)->value().getStringView());\n        set_cookies.insert(value);\n        EXPECT_THAT(value, MatchesRegex(\"^foo=.*$\"));\n      });\n  EXPECT_EQ(set_cookies.size(), 1);\n}\n\nTEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieNoTtl) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* hash_policy = hcm.mutable_route_config()\n                                ->mutable_virtual_hosts(0)\n                                ->mutable_routes(0)\n                                ->mutable_route()\n                                ->add_hash_policy();\n        auto* cookie = hash_policy->mutable_cookie();\n        cookie->set_name(\"foo\");\n      });\n\n  std::set<std::string> served_by;\n  sendMultipleRequests(\n      1024,\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\"cookie\", \"foo=bar\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"}},\n      [&](IntegrationStreamDecoder& response) {\n        EXPECT_EQ(\"200\", response.headers().getStatusValue());\n        EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr);\n        served_by.insert(std::string(\n            response.headers().get(Http::LowerCaseString(\"x-served-by\"))->value().getStringView()));\n      });\n  EXPECT_EQ(served_by.size(), 1);\n}\n\nTEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* hash_policy = hcm.mutable_route_config()\n                                ->mutable_virtual_hosts(0)\n                                ->mutable_routes(0)\n                                ->mutable_route()\n                                ->add_hash_policy();\n        auto* cookie = hash_policy->mutable_cookie();\n        cookie->set_name(\"foo\");\n        cookie->mutable_ttl()->set_seconds(15);\n      });\n\n  std::set<std::string> served_by;\n  sendMultipleRequests(\n      1024,\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\"cookie\", \"foo=bar\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"}},\n      [&](IntegrationStreamDecoder& response) {\n        EXPECT_EQ(\"200\", response.headers().getStatusValue());\n        EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr);\n        served_by.insert(std::string(\n            response.headers().get(Http::LowerCaseString(\"x-served-by\"))->value().getStringView()));\n      });\n  EXPECT_EQ(served_by.size(), 1);\n}\n\nvoid Http2FrameIntegrationTest::startHttp2Session() {\n  ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false));\n\n  // Send empty initial SETTINGS frame.\n  auto settings = Http2Frame::makeEmptySettingsFrame();\n  ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false));\n\n  // Read initial SETTINGS frame from the server.\n  readFrame();\n\n  // Send an SETTINGS ACK.\n  settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack);\n  ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false));\n\n  // read pending SETTINGS and WINDOW_UPDATE frames\n  readFrame();\n  readFrame();\n}\n\nvoid Http2FrameIntegrationTest::beginSession() {\n  setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  // set lower outbound frame limits to make tests run faster\n  config_helper_.setOutboundFramesLimits(1000, 100);\n  initialize();\n  // Set up a raw connection to easily send requests without reading responses.\n  auto options = std::make_shared<Network::Socket::Options>();\n  options->emplace_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024));\n  tcp_client_ = makeTcpConnection(lookupPort(\"http\"), options);\n  startHttp2Session();\n}\n\nHttp2Frame Http2FrameIntegrationTest::readFrame() {\n  Http2Frame frame;\n  EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize));\n  frame.setHeader(tcp_client_->data());\n  tcp_client_->clearData(frame.HeaderSize);\n  auto len = frame.payloadSize();\n  if (len) {\n    EXPECT_TRUE(tcp_client_->waitForData(len));\n    frame.setPayload(tcp_client_->data());\n    tcp_client_->clearData(len);\n  }\n  return frame;\n}\n\nvoid Http2FrameIntegrationTest::sendFrame(const Http2Frame& frame) {\n  ASSERT_TRUE(tcp_client_->connected());\n  ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false));\n}\n\n// Regression test.\nTEST_P(Http2FrameIntegrationTest, SetDetailsTwice) {\n  autonomous_upstream_ = true;\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  beginSession();\n\n  // Send two concatenated frames, the first with too many headers, and the second an invalid frame\n  // (push_promise)\n  std::string bad_frame =\n      \"00006d0104000000014083a8749783ee3a3fbebebebebebebebebebebebebebebebebebebebebebebebebebebebe\"\n      \"bebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebe\"\n      \"bebebebebebebebebebebebebebebebebebebebebebebebebebe0001010500000000018800a065\";\n  Http2Frame request = Http2Frame::makeGenericFrameFromHexDump(bad_frame);\n  sendFrame(request);\n  tcp_client_->close();\n\n  // Expect that the details for the first frame are kept.\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"too_many_headers\"));\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, Http2FrameIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nnamespace {\nconst uint32_t ControlFrameFloodLimit = 100;\nconst uint32_t AllFrameFloodLimit = 1000;\n} // namespace\n\nSocketInterfaceSwap::SocketInterfaceSwap() {\n  Envoy::Network::SocketInterfaceSingleton::clear();\n  test_socket_interface_loader_ = std::make_unique<Envoy::Network::SocketInterfaceLoader>(\n      std::make_unique<Envoy::Network::TestSocketInterface>(\n          [writev_matcher = writev_matcher_](Envoy::Network::TestIoSocketHandle* io_handle,\n                                             const Buffer::RawSlice*,\n                                             uint64_t) -> absl::optional<Api::IoCallUint64Result> {\n            if (writev_matcher->shouldReturnEgain(io_handle->localAddress()->ip()->port())) {\n              return Api::IoCallUint64Result(\n                  0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(),\n                                     Network::IoSocketError::deleteIoError));\n            }\n            return absl::nullopt;\n          }));\n}\n\nSocketInterfaceSwap::~SocketInterfaceSwap() {\n  test_socket_interface_loader_.reset();\n  Envoy::Network::SocketInterfaceSingleton::initialize(previous_socket_interface_);\n}\n\nHttp2FloodMitigationTest::Http2FloodMitigationTest() {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); });\n}\n\nvoid Http2FloodMitigationTest::setNetworkConnectionBufferSize() {\n  // nghttp2 library has its own internal mitigation for outbound control frames (see\n  // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified\n  // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when\n  // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal\n  // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's\n  // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or\n  // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the\n  // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames).\n  // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound.\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, \"\");\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n\n    listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024);\n  });\n}\n\nvoid Http2FloodMitigationTest::beginSession() {\n  setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  // set lower outbound frame limits to make tests run faster\n  config_helper_.setOutboundFramesLimits(AllFrameFloodLimit, ControlFrameFloodLimit);\n  initialize();\n  // Set up a raw connection to easily send requests without reading responses. Also, set a small\n  // TCP receive buffer to speed up connection backup.\n  auto options = std::make_shared<Network::Socket::Options>();\n  options->emplace_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024));\n  writev_matcher_->setSourcePort(lookupPort(\"http\"));\n  tcp_client_ = makeTcpConnection(lookupPort(\"http\"), options);\n  startHttp2Session();\n}\n\n// Verify that the server detects the flood of the given frame.\nvoid Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::string& flood_stat,\n                                           uint32_t num_frames) {\n  // make sure all frames can fit into 16k buffer\n  ASSERT_LE(num_frames, (16u * 1024u) / frame.size());\n  std::vector<char> buf(num_frames * frame.size());\n  for (auto pos = buf.begin(); pos != buf.end();) {\n    pos = std::copy(frame.begin(), frame.end(), pos);\n  }\n\n  ASSERT_TRUE(tcp_client_->write({buf.begin(), buf.end()}, false, false));\n\n  // Envoy's flood mitigation should kill the connection\n  tcp_client_->waitForDisconnect();\n\n  EXPECT_EQ(1, test_server_->counter(flood_stat)->value());\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_delayed_close_timeout\", 1);\n}\n\n// Verify that the server detects the flood using specified request parameters.\nvoid Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_view path,\n                                           Http2Frame::ResponseStatus expected_http_status,\n                                           const std::string& flood_stat, uint32_t num_frames) {\n  uint32_t request_idx = 0;\n  auto request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), host, path);\n  sendFrame(request);\n  auto frame = readFrame();\n  EXPECT_EQ(Http2Frame::Type::Headers, frame.type());\n  EXPECT_EQ(expected_http_status, frame.responseStatus());\n  writev_matcher_->setWritevReturnsEgain();\n  for (uint32_t frame = 0; frame < num_frames; ++frame) {\n    request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(++request_idx), host, path);\n    sendFrame(request);\n  }\n  tcp_client_->waitForDisconnect();\n  if (!flood_stat.empty()) {\n    EXPECT_EQ(1, test_server_->counter(flood_stat)->value());\n  }\n  EXPECT_EQ(1,\n            test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value());\n}\n\nvoid Http2FloodMitigationTest::prefillOutboundDownstreamQueue(uint32_t data_frame_count) {\n  // Set large buffer limits so the test is not affected by the flow control.\n  config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024);\n  autonomous_upstream_ = true;\n  autonomous_allow_incomplete_streams_ = true;\n  beginSession();\n\n  // Do not read from the socket and send request that causes autonomous upstream to respond\n  // with the specified number of DATA frames. This pre-fills downstream outbound frame queue\n  // such the the next response triggers flood protection.\n  // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames\n  // start to accumulate in the transport socket buffer.\n  writev_matcher_->setWritevReturnsEgain();\n\n  const auto request = Http2Frame::makeRequest(\n      Http2Frame::makeClientStreamId(0), \"host\", \"/test/long/url\",\n      {Http2Frame::Header(\"response_data_blocks\", absl::StrCat(data_frame_count)),\n       Http2Frame::Header(\"no_trailers\", \"0\")});\n  sendFrame(request);\n\n  // Wait for some data to arrive and then wait for the upstream_rq_active to flip to 0 to indicate\n  // that the first request has completed.\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_rx_bytes_total\", 10000);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.upstream_rq_active\", 0);\n  // Verify that pre-fill did not trigger flood protection\n  EXPECT_EQ(0, test_server_->counter(\"http2.outbound_flood\")->value());\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, Http2FloodMitigationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(Http2FloodMitigationTest, Ping) {\n  setNetworkConnectionBufferSize();\n  beginSession();\n  writev_matcher_->setWritevReturnsEgain();\n  floodServer(Http2Frame::makePingFrame(), \"http2.outbound_control_flood\",\n              ControlFrameFloodLimit + 1);\n}\n\nTEST_P(Http2FloodMitigationTest, Settings) {\n  setNetworkConnectionBufferSize();\n  beginSession();\n  writev_matcher_->setWritevReturnsEgain();\n  floodServer(Http2Frame::makeEmptySettingsFrame(), \"http2.outbound_control_flood\",\n              ControlFrameFloodLimit + 1);\n}\n\n// Verify that the server can detect flood of internally generated 404 responses.\nTEST_P(Http2FloodMitigationTest, 404) {\n  // Change the default route to be restrictive, and send a request to a non existent route.\n  config_helper_.setDefaultHostAndRoute(\"foo.com\", \"/found\");\n  beginSession();\n\n  // Send requests to a non existent path to generate 404s\n  floodServer(\"host\", \"/notfound\", Http2Frame::ResponseStatus::NotFound, \"http2.outbound_flood\",\n              AllFrameFloodLimit + 1);\n}\n\n// Verify that the server can detect flood of response DATA frames\nTEST_P(Http2FloodMitigationTest, Data) {\n  // Set large buffer limits so the test is not affected by the flow control.\n  config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024);\n  autonomous_upstream_ = true;\n  autonomous_allow_incomplete_streams_ = true;\n  beginSession();\n\n  // Do not read from the socket and send request that causes autonomous upstream\n  // to respond with 1000 DATA frames. The Http2FloodMitigationTest::beginSession()\n  // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame\n  // 1000 DATA frames should trigger flood protection.\n  // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start\n  // to accumulate in the transport socket buffer.\n  writev_matcher_->setWritevReturnsEgain();\n\n  const auto request = Http2Frame::makeRequest(\n      1, \"host\", \"/test/long/url\",\n      {Http2Frame::Header(\"response_data_blocks\", \"1000\"), Http2Frame::Header(\"no_trailers\", \"0\")});\n  sendFrame(request);\n\n  // Wait for connection to be flooded with outbound DATA frames and disconnected.\n  tcp_client_->waitForDisconnect();\n\n  // If the server codec had incorrectly thrown an exception on flood detection it would cause\n  // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed\n  // connections.\n  ASSERT_EQ(1, test_server_->gauge(\"cluster.cluster_0.upstream_cx_active\")->value());\n  ASSERT_EQ(0, test_server_->counter(\"cluster.cluster_0.upstream_cx_destroy\")->value());\n  // Verify that the flood check was triggered\n  EXPECT_EQ(1, test_server_->counter(\"http2.outbound_flood\")->value());\n}\n\n// Verify that the server can detect flood triggered by a DATA frame from a decoder filter call\n// to sendLocalReply().\n// This test also verifies that RELEASE_ASSERT in the ConnectionImpl::StreamImpl::encodeDataHelper()\n// is not fired when it is called by the sendLocalReply() in the dispatching context.\nTEST_P(Http2FloodMitigationTest, DataOverflowFromDecoderFilterSendLocalReply) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        const std::string yaml_string = R\"EOF(\nname: send_local_reply_filter\ntyped_config:\n  \"@type\": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig\n  prefix: \"/call_send_local_reply\"\n  code: 404\n  body: \"something\"\n  )EOF\";\n        TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters());\n        // keep router the last\n        auto size = hcm.http_filters_size();\n        hcm.mutable_http_filters()->SwapElements(size - 2, size - 1);\n      });\n\n  // pre-fill 2 away from overflow\n  prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2);\n\n  // At this point the outbound downstream frame queue should be 2 away from overflowing.\n  // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply with body.\n  // HEADERS + DATA frames should overflow the queue.\n  // Verify that connection was disconnected and appropriate counters were set.\n  auto request2 =\n      Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), \"host\", \"/call_send_local_reply\");\n  sendFrame(request2);\n\n  // Wait for connection to be flooded with outbound DATA frame and disconnected.\n  tcp_client_->waitForDisconnect();\n\n  // Verify that the upstream connection is still alive.\n  ASSERT_EQ(1, test_server_->gauge(\"cluster.cluster_0.upstream_cx_active\")->value());\n  ASSERT_EQ(0, test_server_->counter(\"cluster.cluster_0.upstream_cx_destroy\")->value());\n  // Verify that the flood check was triggered\n  EXPECT_EQ(1, test_server_->counter(\"http2.outbound_flood\")->value());\n}\n\n// Verify that the server can detect flood of response HEADERS frames\nTEST_P(Http2FloodMitigationTest, Headers) {\n  // pre-fill one away from overflow\n  prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1);\n\n  // Send second request which should trigger headers only response.\n  // Verify that connection was disconnected and appropriate counters were set.\n  auto request2 = Http2Frame::makeRequest(\n      Http2Frame::makeClientStreamId(1), \"host\", \"/test/long/url\",\n      {Http2Frame::Header(\"response_data_blocks\", \"0\"), Http2Frame::Header(\"no_trailers\", \"0\")});\n  sendFrame(request2);\n\n  // Wait for connection to be flooded with outbound HEADERS frame and disconnected.\n  tcp_client_->waitForDisconnect();\n\n  // If the server codec had incorrectly thrown an exception on flood detection it would cause\n  // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed\n  // connections.\n  ASSERT_EQ(1, test_server_->gauge(\"cluster.cluster_0.upstream_cx_active\")->value());\n  ASSERT_EQ(0, test_server_->counter(\"cluster.cluster_0.upstream_cx_destroy\")->value());\n  // Verify that the flood check was triggered\n  EXPECT_EQ(1, test_server_->counter(\"http2.outbound_flood\")->value());\n}\n\n// Verify that the server can detect overflow by 100 continue response sent by Envoy itself\nTEST_P(Http2FloodMitigationTest, Envoy100ContinueHeaders) {\n  // pre-fill one away from overflow\n  prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1);\n\n  // Send second request which should trigger Envoy to respond with 100 continue.\n  // Verify that connection was disconnected and appropriate counters were set.\n  auto request2 = Http2Frame::makeRequest(\n      Http2Frame::makeClientStreamId(1), \"host\", \"/test/long/url\",\n      {Http2Frame::Header(\"response_data_blocks\", \"0\"), Http2Frame::Header(\"no_trailers\", \"0\"),\n       Http2Frame::Header(\"expect\", \"100-continue\")});\n  sendFrame(request2);\n\n  // Wait for connection to be flooded with outbound HEADERS frame and disconnected.\n  tcp_client_->waitForDisconnect();\n\n  // If the server codec had incorrectly thrown an exception on flood detection it would cause\n  // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed\n  // connections.\n  ASSERT_EQ(1, test_server_->gauge(\"cluster.cluster_0.upstream_cx_active\")->value());\n  ASSERT_EQ(0, test_server_->counter(\"cluster.cluster_0.upstream_cx_destroy\")->value());\n  // The second upstream request should be reset since it is disconnected when sending 100 continue\n  // response\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.upstream_rq_tx_reset\")->value());\n  // Verify that the flood check was triggered\n  EXPECT_EQ(1, test_server_->counter(\"http2.outbound_flood\")->value());\n}\n\n// Verify that the server can detect flood triggered by a HEADERS frame from a decoder filter call\n// to sendLocalReply().\n// This test also verifies that RELEASE_ASSERT in the\n// ConnectionImpl::StreamImpl::encodeHeadersBase() is not fired when it is called by the\n// sendLocalReply() in the dispatching context.\nTEST_P(Http2FloodMitigationTest, HeadersOverflowFromDecoderFilterSendLocalReply) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        const std::string yaml_string = R\"EOF(\nname: send_local_reply_filter\ntyped_config:\n  \"@type\": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig\n  prefix: \"/call_send_local_reply\"\n  code: 404\n  )EOF\";\n        TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters());\n        // keep router the last\n        auto size = hcm.http_filters_size();\n        hcm.mutable_http_filters()->SwapElements(size - 2, size - 1);\n      });\n\n  // pre-fill one away from overflow\n  prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1);\n\n  // At this point the outbound downstream frame queue should be 1 away from overflowing.\n  // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply without body.\n  // Verify that connection was disconnected and appropriate counters were set.\n  auto request2 =\n      Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), \"host\", \"/call_send_local_reply\");\n  sendFrame(request2);\n\n  // Wait for connection to be flooded with outbound HEADERS frame and disconnected.\n  tcp_client_->waitForDisconnect();\n\n  // Verify that the upstream connection is still alive.\n  ASSERT_EQ(1, test_server_->gauge(\"cluster.cluster_0.upstream_cx_active\")->value());\n  ASSERT_EQ(0, test_server_->counter(\"cluster.cluster_0.upstream_cx_destroy\")->value());\n  // Verify that the flood check was triggered\n  EXPECT_EQ(1, test_server_->counter(\"http2.outbound_flood\")->value());\n}\n\n// TODO(yanavlasov): add the same tests as above for the encoder filters.\n// This is currently blocked by the https://github.com/envoyproxy/envoy/pull/13256\n\n// Verify that the server can detect flood of RST_STREAM frames.\nTEST_P(Http2FloodMitigationTest, RST_STREAM) {\n  // Use invalid HTTP headers to trigger sending RST_STREAM frames.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_http2_protocol_options()\n            ->mutable_override_stream_error_on_invalid_http_message()\n            ->set_value(true);\n      });\n  beginSession();\n\n  uint32_t stream_index = 0;\n  auto request =\n      Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index));\n  sendFrame(request);\n  auto response = readFrame();\n  // Make sure we've got RST_STREAM from the server\n  EXPECT_EQ(Http2Frame::Type::RstStream, response.type());\n\n  // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start\n  // to accumulate in the transport socket buffer.\n  writev_matcher_->setWritevReturnsEgain();\n\n  for (++stream_index; stream_index < ControlFrameFloodLimit + 2; ++stream_index) {\n    request =\n        Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index));\n    sendFrame(request);\n  }\n  tcp_client_->waitForDisconnect();\n  EXPECT_EQ(1, test_server_->counter(\"http2.outbound_control_flood\")->value());\n  EXPECT_EQ(1,\n            test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value());\n}\n\n// Verify that the server stop reading downstream connection on protocol error.\nTEST_P(Http2FloodMitigationTest, TooManyStreams) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_http2_protocol_options()->mutable_max_concurrent_streams()->set_value(2);\n      });\n  autonomous_upstream_ = true;\n  beginSession();\n  // To prevent Envoy from closing client streams the upstream connection needs to push back on\n  // writing by the upstream server. In this case Envoy will not see upstream responses and will\n  // keep client streams open, eventually maxing them out and causing client connection to be\n  // closed.\n  writev_matcher_->setSourcePort(fake_upstreams_[0]->localAddress()->ip()->port());\n\n  // Exceed the number of streams allowed by the server. The server should stop reading from the\n  // client.\n  floodServer(\"host\", \"/test/long/url\", Http2Frame::ResponseStatus::Ok, \"\", 3);\n}\n\nTEST_P(Http2FloodMitigationTest, EmptyHeaders) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_http2_protocol_options()\n            ->mutable_max_consecutive_inbound_frames_with_empty_payload()\n            ->set_value(0);\n      });\n  beginSession();\n\n  const auto request = Http2Frame::makeEmptyHeadersFrame(Http2Frame::makeClientStreamId(0));\n  sendFrame(request);\n\n  tcp_client_->waitForDisconnect();\n\n  EXPECT_EQ(1, test_server_->counter(\"http2.inbound_empty_frames_flood\")->value());\n  EXPECT_EQ(1,\n            test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value());\n}\n\nTEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  beginSession();\n\n  const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0);\n  auto request = Http2Frame::makeEmptyHeadersFrame(request_stream_id);\n  sendFrame(request);\n\n  for (int i = 0; i < 2; i++) {\n    request = Http2Frame::makeEmptyContinuationFrame(request_stream_id);\n    sendFrame(request);\n  }\n\n  tcp_client_->waitForDisconnect();\n\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"http2.inbound_empty_frames_flood\"));\n  EXPECT_EQ(1, test_server_->counter(\"http2.inbound_empty_frames_flood\")->value());\n  EXPECT_EQ(1,\n            test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value());\n}\n\nTEST_P(Http2FloodMitigationTest, EmptyData) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  beginSession();\n\n  const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0);\n  auto request = Http2Frame::makePostRequest(request_stream_id, \"host\", \"/\");\n  sendFrame(request);\n\n  for (int i = 0; i < 2; i++) {\n    request = Http2Frame::makeEmptyDataFrame(request_stream_id);\n    sendFrame(request);\n  }\n\n  tcp_client_->waitForDisconnect();\n\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"http2.inbound_empty_frames_flood\"));\n  EXPECT_EQ(1, test_server_->counter(\"http2.inbound_empty_frames_flood\")->value());\n  EXPECT_EQ(1,\n            test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value());\n}\n\nTEST_P(Http2FloodMitigationTest, PriorityIdleStream) {\n  beginSession();\n\n  floodServer(Http2Frame::makePriorityFrame(Http2Frame::makeClientStreamId(0),\n                                            Http2Frame::makeClientStreamId(1)),\n              \"http2.inbound_priority_frames_flood\",\n              Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM + 1);\n}\n\nTEST_P(Http2FloodMitigationTest, PriorityOpenStream) {\n  beginSession();\n\n  // Open stream.\n  const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0);\n  const auto request = Http2Frame::makeRequest(request_stream_id, \"host\", \"/\");\n  sendFrame(request);\n\n  floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)),\n              \"http2.inbound_priority_frames_flood\",\n              Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 +\n                  1);\n}\n\nTEST_P(Http2FloodMitigationTest, PriorityClosedStream) {\n  autonomous_upstream_ = true;\n  beginSession();\n\n  // Open stream.\n  const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0);\n  const auto request = Http2Frame::makeRequest(request_stream_id, \"host\", \"/\");\n  sendFrame(request);\n  // Reading response marks this stream as closed in nghttp2.\n  auto frame = readFrame();\n  EXPECT_EQ(Http2Frame::Type::Headers, frame.type());\n\n  floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)),\n              \"http2.inbound_priority_frames_flood\",\n              Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 +\n                  1);\n}\n\nTEST_P(Http2FloodMitigationTest, WindowUpdate) {\n  beginSession();\n\n  // Open stream.\n  const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0);\n  const auto request = Http2Frame::makeRequest(request_stream_id, \"host\", \"/\");\n  sendFrame(request);\n\n  // Since we do not send any DATA frames, only 4 sequential WINDOW_UPDATE frames should\n  // trigger flood protection.\n  floodServer(Http2Frame::makeWindowUpdateFrame(request_stream_id, 1),\n              \"http2.inbound_window_update_frames_flood\", 4);\n}\n\n// Verify that the HTTP/2 connection is terminated upon receiving invalid HEADERS frame.\nTEST_P(Http2FloodMitigationTest, ZerolenHeader) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  beginSession();\n\n  // Send invalid request.\n  const auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(\n      Http2Frame::makeClientStreamId(0), \"host\", \"/\");\n  sendFrame(request);\n\n  tcp_client_->waitForDisconnect();\n\n  EXPECT_EQ(1, test_server_->counter(\"http2.rx_messaging_error\")->value());\n  EXPECT_EQ(1,\n            test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value());\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"http2.invalid.header.field\"));\n  // expect a downstream protocol error.\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"DPE\"));\n}\n\n// Verify that only the offending stream is terminated upon receiving invalid HEADERS frame.\nTEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_http2_protocol_options()\n            ->mutable_override_stream_error_on_invalid_http_message()\n            ->set_value(true);\n      });\n  autonomous_upstream_ = true;\n  beginSession();\n\n  // Send invalid request.\n  uint32_t request_idx = 0;\n  auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(\n      Http2Frame::makeClientStreamId(request_idx), \"host\", \"/\");\n  sendFrame(request);\n  // Make sure we've got RST_STREAM from the server.\n  auto response = readFrame();\n  EXPECT_EQ(Http2Frame::Type::RstStream, response.type());\n\n  // Send valid request using the same connection.\n  request_idx++;\n  request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), \"host\", \"/\");\n  sendFrame(request);\n  response = readFrame();\n  EXPECT_EQ(Http2Frame::Type::Headers, response.type());\n  EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus());\n\n  tcp_client_->close();\n\n  EXPECT_EQ(1, test_server_->counter(\"http2.rx_messaging_error\")->value());\n  EXPECT_EQ(0,\n            test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value());\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"http2.invalid.header.field\"));\n  // expect Downstream Protocol Error\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"DPE\"));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http2_integration_test.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/common/http/http2/http2_frame.h\"\n#include \"test/integration/filters/test_socket_interface.h\"\n#include \"test/integration/http_integration.h\"\n\n#include \"absl/synchronization/mutex.h\"\n#include \"gtest/gtest.h\"\n\nusing Envoy::Http::Http2::Http2Frame;\n\nnamespace Envoy {\nclass Http2IntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                             public HttpIntegrationTest {\npublic:\n  Http2IntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\n  void SetUp() override { setDownstreamProtocol(Http::CodecClient::Type::HTTP2); }\n\n  void simultaneousRequest(int32_t request1_bytes, int32_t request2_bytes);\n\nprotected:\n  // Utility function to add filters.\n  void addFilters(std::vector<std::string> filters) {\n    for (const auto& filter : filters) {\n      config_helper_.addFilter(filter);\n    }\n  }\n};\n\nclass Http2RingHashIntegrationTest : public Http2IntegrationTest {\npublic:\n  Http2RingHashIntegrationTest();\n\n  ~Http2RingHashIntegrationTest() override;\n\n  void createUpstreams() override;\n\n  void sendMultipleRequests(int request_bytes, Http::TestRequestHeaderMapImpl headers,\n                            std::function<void(IntegrationStreamDecoder&)> cb);\n\n  std::vector<FakeHttpConnectionPtr> fake_upstream_connections_;\n  int num_upstreams_ = 5;\n};\n\nclass Http2MetadataIntegrationTest : public Http2IntegrationTest {\npublic:\n  void SetUp() override {\n    config_helper_.addConfigModifier(\n        [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() >= 1, \"\");\n          auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n          cluster->mutable_http2_protocol_options()->set_allow_metadata(true);\n        });\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); });\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void testRequestMetadataWithStopAllFilter();\n\n  void verifyHeadersOnlyTest();\n\n  void runHeaderOnlyTest(bool send_request_body, size_t body_size);\n};\n\nclass Http2FrameIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                  public HttpIntegrationTest {\npublic:\n  Http2FrameIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\nprotected:\n  void startHttp2Session();\n  Http2Frame readFrame();\n  void sendFrame(const Http2Frame& frame);\n  virtual void beginSession();\n\n  IntegrationTcpClientPtr tcp_client_;\n};\n\nclass SocketInterfaceSwap {\npublic:\n  // Object of this class hold the state determining the IoHandle which\n  // should return EAGAIN from the `writev` call.\n  struct IoHandleMatcher {\n    bool shouldReturnEgain(uint32_t port) const {\n      absl::ReaderMutexLock lock(&mutex_);\n      return port == port_ && writev_returns_egain_;\n    }\n\n    void setSourcePort(uint32_t port) {\n      absl::WriterMutexLock lock(&mutex_);\n      port_ = port;\n    }\n\n    void setWritevReturnsEgain() {\n      absl::WriterMutexLock lock(&mutex_);\n      writev_returns_egain_ = true;\n    }\n\n  private:\n    mutable absl::Mutex mutex_;\n    uint32_t port_ ABSL_GUARDED_BY(mutex_) = 0;\n    bool writev_returns_egain_ ABSL_GUARDED_BY(mutex_) = false;\n  };\n\n  SocketInterfaceSwap();\n  ~SocketInterfaceSwap();\n\nprotected:\n  Envoy::Network::SocketInterface* const previous_socket_interface_{\n      Envoy::Network::SocketInterfaceSingleton::getExisting()};\n  std::shared_ptr<IoHandleMatcher> writev_matcher_{std::make_shared<IoHandleMatcher>()};\n  std::unique_ptr<Envoy::Network::SocketInterfaceLoader> test_socket_interface_loader_;\n};\n\n// It is important that the new socket interface is installed before any I/O activity starts and\n// the previous one is restored after all I/O activity stops. Since the HttpIntegrationTest\n// destructor stops Envoy the SocketInterfaceSwap destructor needs to run after it. This order of\n// multiple inheritance ensures that SocketInterfaceSwap destructor runs after\n// Http2FrameIntegrationTest destructor completes.\nclass Http2FloodMitigationTest : public SocketInterfaceSwap, public Http2FrameIntegrationTest {\npublic:\n  Http2FloodMitigationTest();\n\nprotected:\n  void floodServer(const Http2Frame& frame, const std::string& flood_stat, uint32_t num_frames);\n  void floodServer(absl::string_view host, absl::string_view path,\n                   Http2Frame::ResponseStatus expected_http_status, const std::string& flood_stat,\n                   uint32_t num_frames);\n\n  void setNetworkConnectionBufferSize();\n  void beginSession() override;\n  void prefillOutboundDownstreamQueue(uint32_t data_frame_count);\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http2_upstream_integration_test.cc",
    "content": "#include \"test/integration/http2_upstream_integration_test.h\"\n\n#include <iostream>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, Http2UpstreamIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(Http2UpstreamIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false);\n}\n\nTEST_P(Http2UpstreamIntegrationTest, RouterRequestAndResponseWithZeroByteBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(0, 0, false);\n}\n\nTEST_P(Http2UpstreamIntegrationTest, RouterHeaderOnlyRequestAndResponseNoBuffer) {\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\nTEST_P(Http2UpstreamIntegrationTest, RouterUpstreamDisconnectBeforeRequestcomplete) {\n  testRouterUpstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(Http2UpstreamIntegrationTest, RouterUpstreamDisconnectBeforeResponseComplete) {\n  testRouterUpstreamDisconnectBeforeResponseComplete();\n}\n\nTEST_P(Http2UpstreamIntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {\n  testRouterDownstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(Http2UpstreamIntegrationTest, RouterDownstreamDisconnectBeforeResponseComplete) {\n  testRouterDownstreamDisconnectBeforeResponseComplete();\n}\n\nTEST_P(Http2UpstreamIntegrationTest, RouterUpstreamResponseBeforeRequestComplete) {\n  testRouterUpstreamResponseBeforeRequestComplete();\n}\n\nTEST_P(Http2UpstreamIntegrationTest, Retry) { testRetry(); }\n\nTEST_P(Http2UpstreamIntegrationTest, GrpcRetry) { testGrpcRetry(); }\n\nTEST_P(Http2UpstreamIntegrationTest, Trailers) { testTrailers(1024, 2048, true, true); }\n\n// Ensure Envoy handles streaming requests and responses simultaneously.\nvoid Http2UpstreamIntegrationTest::bidirectionalStreaming(uint32_t bytes) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Start the request.\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Send part of the request body and ensure it is received upstream.\n  codec_client_->sendData(*request_encoder_, bytes, false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, bytes));\n\n  // Start sending the response and ensure it is received downstream.\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request_->encodeData(bytes, false);\n  response->waitForBodyData(bytes);\n\n  // Finish the request.\n  codec_client_->sendTrailers(*request_encoder_,\n                              Http::TestRequestTrailerMapImpl{{\"trailer\", \"foo\"}});\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Finish the response.\n  upstream_request_->encodeTrailers(Http::TestResponseTrailerMapImpl{{\"trailer\", \"bar\"}});\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n}\n\nTEST_P(Http2UpstreamIntegrationTest, BidirectionalStreaming) { bidirectionalStreaming(1024); }\n\nTEST_P(Http2UpstreamIntegrationTest, LargeBidirectionalStreamingWithBufferLimits) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  bidirectionalStreaming(1024 * 32);\n}\n\nTEST_P(Http2UpstreamIntegrationTest, BidirectionalStreamingReset) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Start sending the request.\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Send some request data.\n  codec_client_->sendData(*request_encoder_, 1024, false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 1024));\n\n  // Start sending the response.\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request_->encodeData(1024, false);\n  response->waitForBodyData(1024);\n\n  // Finish sending the request.\n  codec_client_->sendTrailers(*request_encoder_,\n                              Http::TestRequestTrailerMapImpl{{\"trailer\", \"foo\"}});\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Reset the stream.\n  upstream_request_->encodeResetStream();\n  response->waitForReset();\n  EXPECT_FALSE(response->complete());\n}\n\nvoid Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes,\n                                                       uint32_t request2_bytes,\n                                                       uint32_t response1_bytes,\n                                                       uint32_t response2_bytes) {\n  FakeStreamPtr upstream_request1;\n  FakeStreamPtr upstream_request2;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Start request 1\n  auto encoder_decoder1 =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  Http::RequestEncoder* encoder1 = &encoder_decoder1.first;\n  auto response1 = std::move(encoder_decoder1.second);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request1));\n\n  // Start request 2\n  auto encoder_decoder2 =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  Http::RequestEncoder* encoder2 = &encoder_decoder2.first;\n  auto response2 = std::move(encoder_decoder2.second);\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2));\n\n  // Finish request 1\n  codec_client_->sendData(*encoder1, request1_bytes, true);\n  ASSERT_TRUE(upstream_request1->waitForEndStream(*dispatcher_));\n\n  // Finish request 2\n  codec_client_->sendData(*encoder2, request2_bytes, true);\n  ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_));\n\n  // Respond to request 2\n  upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request2->encodeData(response2_bytes, true);\n  response2->waitForEndStream();\n  EXPECT_TRUE(upstream_request2->complete());\n  EXPECT_EQ(request2_bytes, upstream_request2->bodyLength());\n  EXPECT_TRUE(response2->complete());\n  EXPECT_EQ(\"200\", response2->headers().getStatusValue());\n  EXPECT_EQ(response2_bytes, response2->body().size());\n\n  // Respond to request 1\n  upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request1->encodeData(response1_bytes, true);\n  response1->waitForEndStream();\n  EXPECT_TRUE(upstream_request1->complete());\n  EXPECT_EQ(request1_bytes, upstream_request1->bodyLength());\n  EXPECT_TRUE(response1->complete());\n  EXPECT_EQ(\"200\", response1->headers().getStatusValue());\n  EXPECT_EQ(response1_bytes, response1->body().size());\n}\n\nTEST_P(Http2UpstreamIntegrationTest, SimultaneousRequest) {\n  simultaneousRequest(1024, 512, 1023, 513);\n}\n\nTEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimits) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16);\n}\n\nvoid Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_bytes, uint32_t) {\n  TestRandomGenerator rand;\n  const uint32_t num_requests = 50;\n  std::vector<Http::RequestEncoder*> encoders;\n  std::vector<IntegrationStreamDecoderPtr> responses;\n  std::vector<int> response_bytes;\n  autonomous_upstream_ = true;\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  for (uint32_t i = 0; i < num_requests; ++i) {\n    response_bytes.push_back(rand.random() % (1024 * 2));\n    auto headers = Http::TestRequestHeaderMapImpl{\n        {\":method\", \"POST\"},\n        {\":path\", \"/test/long/url\"},\n        {\":scheme\", \"http\"},\n        {\":authority\", \"host\"},\n        {AutonomousStream::RESPONSE_SIZE_BYTES, std::to_string(response_bytes[i])},\n        {AutonomousStream::EXPECT_REQUEST_SIZE_BYTES, std::to_string(request_bytes)}};\n    if (i % 2 == 0) {\n      headers.addCopy(AutonomousStream::RESET_AFTER_REQUEST, \"yes\");\n    }\n    auto encoder_decoder = codec_client_->startRequest(headers);\n    encoders.push_back(&encoder_decoder.first);\n    responses.push_back(std::move(encoder_decoder.second));\n    codec_client_->sendData(*encoders[i], request_bytes, true);\n  }\n\n  for (uint32_t i = 0; i < num_requests; ++i) {\n    responses[i]->waitForEndStream();\n    if (i % 2 != 0) {\n      EXPECT_TRUE(responses[i]->complete());\n      EXPECT_EQ(\"200\", responses[i]->headers().getStatusValue());\n      EXPECT_EQ(response_bytes[i], responses[i]->body().length());\n    } else {\n      // Upstream stream reset.\n      EXPECT_EQ(\"503\", responses[i]->headers().getStatusValue());\n    }\n  }\n\n  EXPECT_EQ(0, test_server_->gauge(\"http2.streams_active\")->value());\n  EXPECT_EQ(0, test_server_->gauge(\"http2.pending_send_bytes\")->value());\n}\n\nTEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequest) {\n  manySimultaneousRequests(1024, 1024);\n}\n\nTEST_P(Http2UpstreamIntegrationTest, ManyLargeSimultaneousRequestWithBufferLimits) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  manySimultaneousRequests(1024 * 20, 1024 * 20);\n}\n\nTEST_P(Http2UpstreamIntegrationTest, ManyLargeSimultaneousRequestWithRandomBackup) {\n  config_helper_.addFilter(R\"EOF(\n  name: random-pause-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n  )EOF\");\n\n  manySimultaneousRequests(1024 * 20, 1024 * 20);\n}\n\nTEST_P(Http2UpstreamIntegrationTest, UpstreamConnectionCloseWithManyStreams) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  const uint32_t num_requests = 20;\n  std::vector<Http::RequestEncoder*> encoders;\n  std::vector<IntegrationStreamDecoderPtr> responses;\n  std::vector<FakeStreamPtr> upstream_requests;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  for (uint32_t i = 0; i < num_requests; ++i) {\n    auto encoder_decoder =\n        codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                   {\":path\", \"/test/long/url\"},\n                                                                   {\":scheme\", \"http\"},\n                                                                   {\":authority\", \"host\"}});\n    encoders.push_back(&encoder_decoder.first);\n    responses.push_back(std::move(encoder_decoder.second));\n\n    // Ensure that we establish the first request (which will be reset) to avoid\n    // a race where the reset is detected before the upstream stream is\n    // established (#5316)\n    if (i == 0) {\n      ASSERT_TRUE(\n          fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n      upstream_requests.emplace_back();\n      ASSERT_TRUE(\n          fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_requests.back()));\n    }\n\n    if (i != 0) {\n      codec_client_->sendData(*encoders[i], 0, true);\n    }\n  }\n\n  // Reset one stream to test how reset and watermarks interact.\n  codec_client_->sendReset(*encoders[0]);\n\n  // Now drain the upstream connection.\n  for (uint32_t i = 1; i < num_requests; ++i) {\n    upstream_requests.emplace_back();\n    ASSERT_TRUE(\n        fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_requests.back()));\n  }\n  for (uint32_t i = 1; i < num_requests; ++i) {\n    ASSERT_TRUE(upstream_requests[i]->waitForEndStream(*dispatcher_));\n    upstream_requests[i]->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n    upstream_requests[i]->encodeData(100, false);\n  }\n  // Close the connection.\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  // Ensure the streams are all reset successfully.\n  for (uint32_t i = 1; i < num_requests; ++i) {\n    responses[i]->waitForReset();\n  }\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/6744\nTEST_P(Http2UpstreamIntegrationTest, HittingEncoderFilterLimitForGrpc) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        const std::string access_log_name =\n            TestEnvironment::temporaryPath(TestUtility::uniqueFilename());\n        // Configure just enough of an upstream access log to reference the upstream headers.\n        const std::string yaml_string = fmt::format(R\"EOF(\nname: router\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.router.v2.Router\n  upstream_log:\n    name: accesslog\n    filter:\n      not_health_check_filter: {{}}\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog\n      path: {}\n  )EOF\",\n                                                    Platform::null_device_path);\n        TestUtility::loadFromYaml(yaml_string, *hcm.mutable_http_filters(1));\n      });\n\n  // As with ProtocolIntegrationTest.HittingEncoderFilterLimit use a filter\n  // which buffers response data but in this case, make sure the sendLocalReply\n  // is gRPC.\n  config_helper_.addFilter(\"{ name: encoder-decoder-buffer-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n  initialize();\n\n  // Send the request.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"te\", \"trailers\"}});\n  auto downstream_request = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  Buffer::OwnedImpl data(\"HTTP body content goes here\");\n  codec_client_->sendData(*downstream_request, data, true);\n  waitForNextUpstreamRequest();\n\n  // Send the response headers.\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Now send an overly large response body. At some point, too much data will\n  // be buffered, the stream will be reset, and the connection will disconnect.\n  upstream_request_->encodeData(1024 * 65, false);\n  ASSERT_TRUE(upstream_request_->waitForReset());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n}\n\n// Tests the default limit for the number of response headers is 100. Results in a stream reset if\n// exceeds.\nTEST_P(Http2UpstreamIntegrationTest, TestManyResponseHeadersRejected) {\n  // Default limit for response headers is 100.\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestResponseHeaderMapImpl many_headers(default_response_headers_);\n  for (int i = 0; i < 100; i++) {\n    many_headers.addCopy(\"many\", std::string(1, 'a'));\n  }\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(many_headers, true);\n  response->waitForEndStream();\n  // Upstream stream reset triggered.\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\n// Tests bootstrap configuration of max response headers.\nTEST_P(Http2UpstreamIntegrationTest, ManyResponseHeadersAccepted) {\n  // Set max response header count to 200.\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    auto* http_protocol_options = cluster->mutable_common_http_protocol_options();\n    http_protocol_options->mutable_max_headers_count()->set_value(200);\n  });\n  Http::TestResponseHeaderMapImpl response_headers(default_response_headers_);\n  for (int i = 0; i < 150; i++) {\n    response_headers.addCopy(std::to_string(i), std::string(1, 'a'));\n  }\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(response_headers, false);\n  upstream_request_->encodeData(512, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n}\n\n// Tests that HTTP/2 response headers over 60 kB are rejected and result in a stream reset.\nTEST_P(Http2UpstreamIntegrationTest, LargeResponseHeadersRejected) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestResponseHeaderMapImpl large_headers(default_response_headers_);\n  large_headers.addCopy(\"large\", std::string(60 * 1024, 'a'));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(large_headers, true);\n  response->waitForEndStream();\n  // Upstream stream reset.\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\n// Regression test to make sure that configuring upstream logs over gRPC will not crash Envoy.\n// TODO(asraa): Test output of the upstream logs.\n// See https://github.com/envoyproxy/envoy/issues/8828.\nTEST_P(Http2UpstreamIntegrationTest, ConfigureHttpOverGrpcLogs) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        const std::string access_log_name =\n            TestEnvironment::temporaryPath(TestUtility::uniqueFilename());\n        // Configure just enough of an upstream access log to reference the upstream headers.\n        const std::string yaml_string = R\"EOF(\nname: router\ntyped_config:\n  \"@type\": type.googleapis.com/envoy.config.filter.http.router.v2.Router\n  upstream_log:\n    name: grpc_accesslog\n    filter:\n      not_health_check_filter: {}\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.accesslog.v2.HttpGrpcAccessLogConfig\n      common_config:\n        log_name: foo\n        grpc_service:\n          envoy_grpc:\n            cluster_name: cluster_0\n  )EOF\";\n        // Replace the terminal envoy.router.\n        hcm.clear_http_filters();\n        TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters());\n      });\n\n  initialize();\n\n  // Send the request.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  // Send the response headers.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http2_upstream_integration_test.h",
    "content": "#pragma once\n\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nclass Http2UpstreamIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                     public HttpIntegrationTest {\npublic:\n  Http2UpstreamIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\n  void SetUp() override {\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void initialize() override { HttpIntegrationTest::initialize(); }\n\n  void bidirectionalStreaming(uint32_t bytes);\n  void simultaneousRequest(uint32_t request1_bytes, uint32_t request2_bytes,\n                           uint32_t response1_bytes, uint32_t response2_bytes);\n  void manySimultaneousRequests(uint32_t request_bytes, uint32_t response_bytes);\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http_integration.cc",
    "content": "#include \"test/integration/http_integration.h\"\n\n#include <functional>\n#include <list>\n#include <memory>\n#include <regex>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/thread_annotations.h\"\n#include \"common/http/headers.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/integration/test_host_predicate_config.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"absl/time/time.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nusing testing::HasSubstr;\n\nenvoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::CodecType\ntypeToCodecType(Http::CodecClient::Type type) {\n  switch (type) {\n  case Http::CodecClient::Type::HTTP1:\n    return envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        HTTP1;\n  case Http::CodecClient::Type::HTTP2:\n    return envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        HTTP2;\n  case Http::CodecClient::Type::HTTP3:\n    return envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n        HTTP3;\n  default:\n    RELEASE_ASSERT(0, \"\");\n  }\n}\n\n} // namespace\n\nIntegrationCodecClient::IntegrationCodecClient(\n    Event::Dispatcher& dispatcher, Random::RandomGenerator& random,\n    Network::ClientConnectionPtr&& conn, Upstream::HostDescriptionConstSharedPtr host_description,\n    CodecClient::Type type)\n    : CodecClientProd(type, std::move(conn), host_description, dispatcher, random),\n      dispatcher_(dispatcher), callbacks_(*this), codec_callbacks_(*this) {\n  connection_->addConnectionCallbacks(callbacks_);\n  setCodecConnectionCallbacks(codec_callbacks_);\n  dispatcher.run(Event::Dispatcher::RunType::Block);\n}\n\nvoid IntegrationCodecClient::flushWrite() {\n  connection_->dispatcher().run(Event::Dispatcher::RunType::NonBlock);\n  // NOTE: We should run blocking until all the body data is flushed.\n}\n\nIntegrationStreamDecoderPtr\nIntegrationCodecClient::makeHeaderOnlyRequest(const Http::RequestHeaderMap& headers) {\n  auto response = std::make_unique<IntegrationStreamDecoder>(dispatcher_);\n  Http::RequestEncoder& encoder = newStream(*response);\n  encoder.getStream().addCallbacks(*response);\n  encoder.encodeHeaders(headers, true);\n  flushWrite();\n  return response;\n}\n\nIntegrationStreamDecoderPtr\nIntegrationCodecClient::makeRequestWithBody(const Http::RequestHeaderMap& headers,\n                                            uint64_t body_size) {\n  return makeRequestWithBody(headers, std::string(body_size, 'a'));\n}\n\nIntegrationStreamDecoderPtr\nIntegrationCodecClient::makeRequestWithBody(const Http::RequestHeaderMap& headers,\n                                            const std::string& body) {\n  auto response = std::make_unique<IntegrationStreamDecoder>(dispatcher_);\n  Http::RequestEncoder& encoder = newStream(*response);\n  encoder.getStream().addCallbacks(*response);\n  encoder.encodeHeaders(headers, false);\n  Buffer::OwnedImpl data(body);\n  encoder.encodeData(data, true);\n  flushWrite();\n  return response;\n}\n\nvoid IntegrationCodecClient::sendData(Http::RequestEncoder& encoder, absl::string_view data,\n                                      bool end_stream) {\n  Buffer::OwnedImpl buffer_data(data.data(), data.size());\n  encoder.encodeData(buffer_data, end_stream);\n  flushWrite();\n}\n\nvoid IntegrationCodecClient::sendData(Http::RequestEncoder& encoder, Buffer::Instance& data,\n                                      bool end_stream) {\n  encoder.encodeData(data, end_stream);\n  flushWrite();\n}\n\nvoid IntegrationCodecClient::sendData(Http::RequestEncoder& encoder, uint64_t size,\n                                      bool end_stream) {\n  Buffer::OwnedImpl data(std::string(size, 'a'));\n  sendData(encoder, data, end_stream);\n}\n\nvoid IntegrationCodecClient::sendTrailers(Http::RequestEncoder& encoder,\n                                          const Http::RequestTrailerMap& trailers) {\n  encoder.encodeTrailers(trailers);\n  flushWrite();\n}\n\nvoid IntegrationCodecClient::sendReset(Http::RequestEncoder& encoder) {\n  encoder.getStream().resetStream(Http::StreamResetReason::LocalReset);\n  flushWrite();\n}\n\nvoid IntegrationCodecClient::sendMetadata(Http::RequestEncoder& encoder,\n                                          Http::MetadataMap metadata_map) {\n  Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);\n  Http::MetadataMapVector metadata_map_vector;\n  metadata_map_vector.push_back(std::move(metadata_map_ptr));\n  encoder.encodeMetadata(metadata_map_vector);\n  flushWrite();\n}\n\nstd::pair<Http::RequestEncoder&, IntegrationStreamDecoderPtr>\nIntegrationCodecClient::startRequest(const Http::RequestHeaderMap& headers) {\n  auto response = std::make_unique<IntegrationStreamDecoder>(dispatcher_);\n  Http::RequestEncoder& encoder = newStream(*response);\n  encoder.getStream().addCallbacks(*response);\n  encoder.encodeHeaders(headers, false);\n  flushWrite();\n  return {encoder, std::move(response)};\n}\n\nAssertionResult IntegrationCodecClient::waitForDisconnect(std::chrono::milliseconds time_to_wait) {\n  if (disconnected_) {\n    return AssertionSuccess();\n  }\n  Event::TimerPtr wait_timer;\n  bool wait_timer_triggered = false;\n  if (time_to_wait.count()) {\n    wait_timer = connection_->dispatcher().createTimer([this, &wait_timer_triggered] {\n      connection_->dispatcher().exit();\n      wait_timer_triggered = true;\n    });\n    wait_timer->enableTimer(time_to_wait);\n  }\n\n  connection_->dispatcher().run(Event::Dispatcher::RunType::Block);\n\n  // Disable the timer if it was created. This call is harmless if the timer already triggered.\n  if (wait_timer) {\n    wait_timer->disableTimer();\n  }\n\n  if (wait_timer_triggered && !disconnected_) {\n    return AssertionFailure() << \"Timed out waiting for disconnect\";\n  }\n  EXPECT_TRUE(disconnected_);\n\n  return AssertionSuccess();\n}\n\nvoid IntegrationCodecClient::ConnectionCallbacks::onEvent(Network::ConnectionEvent event) {\n  parent_.last_connection_event_ = event;\n  if (event == Network::ConnectionEvent::Connected) {\n    parent_.connected_ = true;\n    parent_.connection_->dispatcher().exit();\n  } else if (event == Network::ConnectionEvent::RemoteClose) {\n    parent_.disconnected_ = true;\n    parent_.connection_->dispatcher().exit();\n  } else {\n    if (parent_.type() == CodecClient::Type::HTTP3 && !parent_.connected_) {\n      // Before handshake gets established, any connection failure should exit the loop. I.e. a QUIC\n      // connection may fail of INVALID_VERSION if both this client doesn't support any of the\n      // versions the server advertised before handshake established. In this case the connection is\n      // closed locally and this is in a blocking event loop.\n      parent_.connection_->dispatcher().exit();\n    }\n    parent_.disconnected_ = true;\n  }\n}\n\nIntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(uint32_t port) {\n  return makeHttpConnection(makeClientConnection(port));\n}\n\nIntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection(\n    Network::ClientConnectionPtr&& conn,\n    absl::optional<envoy::config::core::v3::Http2ProtocolOptions> http2_options) {\n  std::shared_ptr<Upstream::MockClusterInfo> cluster{new NiceMock<Upstream::MockClusterInfo>()};\n  cluster->max_response_headers_count_ = 200;\n  if (!http2_options.has_value()) {\n    http2_options = Http2::Utility::initializeAndValidateOptions(\n        envoy::config::core::v3::Http2ProtocolOptions());\n    http2_options.value().set_allow_connect(true);\n    http2_options.value().set_allow_metadata(true);\n  }\n  cluster->http2_options_ = http2_options.value();\n  cluster->http1_settings_.enable_trailers_ = true;\n  Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription(\n      cluster, fmt::format(\"tcp://{}:80\", Network::Test::getLoopbackAddressUrlString(version_)))};\n  return std::make_unique<IntegrationCodecClient>(*dispatcher_, random_, std::move(conn),\n                                                  host_description, downstream_protocol_);\n}\n\nNetwork::TransportSocketFactoryPtr HttpIntegrationTest::createUpstreamTlsContext() {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  const std::string yaml = absl::StrFormat(\n      R\"EOF(\ncommon_tls_context:\n  tls_certificates:\n  - certificate_chain: { filename: \"%s\" }\n    private_key: { filename: \"%s\" }\n  validation_context:\n    trusted_ca: { filename: \"%s\" }\nrequire_client_certificate: true\n)EOF\",\n      TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcert.pem\"),\n      TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamkey.pem\"),\n      TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n  TestUtility::loadFromYaml(yaml, tls_context);\n  auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n      tls_context, factory_context_);\n  static Stats::Scope* upstream_stats_store = new Stats::IsolatedStoreImpl();\n  return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n      std::move(cfg), context_manager_, *upstream_stats_store, std::vector<std::string>{});\n}\nIntegrationCodecClientPtr\nHttpIntegrationTest::makeHttpConnection(Network::ClientConnectionPtr&& conn) {\n  auto codec = makeRawHttpConnection(std::move(conn), absl::nullopt);\n  EXPECT_TRUE(codec->connected()) << codec->connection()->transportFailureReason();\n  return codec;\n}\n\nHttpIntegrationTest::HttpIntegrationTest(Http::CodecClient::Type downstream_protocol,\n                                         Network::Address::IpVersion version,\n                                         const std::string& config)\n    : HttpIntegrationTest::HttpIntegrationTest(\n          downstream_protocol,\n          [version](int) {\n            return Network::Utility::parseInternetAddress(\n                Network::Test::getAnyAddressString(version), 0);\n          },\n          version, config) {}\n\nHttpIntegrationTest::HttpIntegrationTest(Http::CodecClient::Type downstream_protocol,\n                                         const InstanceConstSharedPtrFn& upstream_address_fn,\n                                         Network::Address::IpVersion version,\n                                         const std::string& config)\n    : BaseIntegrationTest(upstream_address_fn, version, config),\n      downstream_protocol_(downstream_protocol) {\n  // Legacy integration tests expect the default listener to be named \"http\" for\n  // lookupPort calls.\n  config_helper_.renameListener(\"http\");\n  config_helper_.setClientCodec(typeToCodecType(downstream_protocol_));\n}\n\nvoid HttpIntegrationTest::useAccessLog(absl::string_view format) {\n  access_log_name_ = TestEnvironment::temporaryPath(TestUtility::uniqueFilename());\n  ASSERT_TRUE(config_helper_.setAccessLog(access_log_name_, format));\n}\n\nHttpIntegrationTest::~HttpIntegrationTest() { cleanupUpstreamAndDownstream(); }\n\nvoid HttpIntegrationTest::setDownstreamProtocol(Http::CodecClient::Type downstream_protocol) {\n  downstream_protocol_ = downstream_protocol;\n  config_helper_.setClientCodec(typeToCodecType(downstream_protocol_));\n}\n\nConfigHelper::HttpModifierFunction HttpIntegrationTest::setEnableDownstreamTrailersHttp1() {\n  return [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) { hcm.mutable_http_protocol_options()->set_enable_trailers(true); };\n}\n\nConfigHelper::ConfigModifierFunction HttpIntegrationTest::setEnableUpstreamTrailersHttp1() {\n  return [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() == 1, \"\");\n    if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n      auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n      cluster->mutable_http_protocol_options()->set_enable_trailers(true);\n    }\n  };\n}\n\nIntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse(\n    const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size,\n    const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_size,\n    int upstream_index, std::chrono::milliseconds time) {\n  ASSERT(codec_client_ != nullptr);\n  // Send the request to Envoy.\n  IntegrationStreamDecoderPtr response;\n  if (request_body_size) {\n    response = codec_client_->makeRequestWithBody(request_headers, request_body_size);\n  } else {\n    response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  }\n  waitForNextUpstreamRequest(upstream_index, time);\n  // Send response headers, and end_stream if there is no response body.\n  upstream_request_->encodeHeaders(response_headers, response_size == 0);\n  // Send any response data, with end_stream true.\n  if (response_size) {\n    upstream_request_->encodeData(response_size, true);\n  }\n  // Wait for the response to be read by the codec client.\n  response->waitForEndStream();\n  return response;\n}\n\nvoid HttpIntegrationTest::cleanupUpstreamAndDownstream() {\n  // Close the upstream connection first. If there's an outstanding request,\n  // closing the client may result in a FIN being sent upstream, and FakeConnectionBase::close\n  // will interpret that as an unexpected disconnect. The codec client is not\n  // subject to the same failure mode.\n  if (fake_upstream_connection_) {\n    AssertionResult result = fake_upstream_connection_->close();\n    RELEASE_ASSERT(result, result.message());\n    result = fake_upstream_connection_->waitForDisconnect();\n    RELEASE_ASSERT(result, result.message());\n    fake_upstream_connection_.reset();\n  }\n  if (codec_client_) {\n    codec_client_->close();\n  }\n}\n\nvoid HttpIntegrationTest::sendRequestAndVerifyResponse(\n    const Http::TestRequestHeaderMapImpl& request_headers, const int request_size,\n    const Http::TestResponseHeaderMapImpl& response_headers, const int response_size,\n    const int backend_idx) {\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = sendRequestAndWaitForResponse(request_headers, request_size, response_headers,\n                                                response_size, backend_idx);\n  verifyResponse(std::move(response), \"200\", response_headers, std::string(response_size, 'a'));\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(request_size, upstream_request_->bodyLength());\n  cleanupUpstreamAndDownstream();\n}\n\nvoid HttpIntegrationTest::verifyResponse(IntegrationStreamDecoderPtr response,\n                                         const std::string& response_code,\n                                         const Http::TestResponseHeaderMapImpl& expected_headers,\n                                         const std::string& expected_body) {\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response_code, response->headers().getStatusValue());\n  expected_headers.iterate([response_headers = &response->headers()](\n                               const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    const Http::HeaderEntry* entry =\n        response_headers->get(Http::LowerCaseString{std::string(header.key().getStringView())});\n    EXPECT_NE(entry, nullptr);\n    EXPECT_EQ(header.value().getStringView(), entry->value().getStringView());\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  EXPECT_EQ(response->body(), expected_body);\n}\n\nabsl::optional<uint64_t>\nHttpIntegrationTest::waitForNextUpstreamRequest(const std::vector<uint64_t>& upstream_indices,\n                                                std::chrono::milliseconds connection_wait_timeout) {\n  absl::optional<uint64_t> upstream_with_request;\n  // If there is no upstream connection, wait for it to be established.\n  if (!fake_upstream_connection_) {\n    AssertionResult result = AssertionFailure();\n    int upstream_index = 0;\n    Event::TestTimeSystem::RealTimeBound bound(connection_wait_timeout);\n    // Loop over the upstreams until the call times out or an upstream request is received.\n    while (!result) {\n      upstream_index = upstream_index % upstream_indices.size();\n      result = fake_upstreams_[upstream_indices[upstream_index]]->waitForHttpConnection(\n          *dispatcher_, fake_upstream_connection_, std::chrono::milliseconds(5),\n          max_request_headers_kb_, max_request_headers_count_);\n      if (result) {\n        upstream_with_request = upstream_index;\n        break;\n      } else if (!bound.withinBound()) {\n        result = (AssertionFailure() << \"Timed out waiting for new connection.\");\n        break;\n      }\n      ++upstream_index;\n    }\n    RELEASE_ASSERT(result, result.message());\n  }\n  // Wait for the next stream on the upstream connection.\n  AssertionResult result =\n      fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n  RELEASE_ASSERT(result, result.message());\n  // Wait for the stream to be completely received.\n  result = upstream_request_->waitForEndStream(*dispatcher_);\n  RELEASE_ASSERT(result, result.message());\n\n  return upstream_with_request;\n}\n\nvoid HttpIntegrationTest::waitForNextUpstreamRequest(\n    uint64_t upstream_index, std::chrono::milliseconds connection_wait_timeout) {\n  waitForNextUpstreamRequest(std::vector<uint64_t>({upstream_index}), connection_wait_timeout);\n}\n\nvoid HttpIntegrationTest::checkSimpleRequestSuccess(uint64_t expected_request_size,\n                                                    uint64_t expected_response_size,\n                                                    IntegrationStreamDecoder* response) {\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(expected_request_size, upstream_request_->bodyLength());\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(expected_response_size, response->body().size());\n}\n\nvoid HttpIntegrationTest::testRouterRequestAndResponseWithBody(\n    uint64_t request_size, uint64_t response_size, bool big_header, bool set_content_length_header,\n    ConnectionCreationFunction* create_connection) {\n  initialize();\n  codec_client_ = makeHttpConnection(\n      create_connection ? ((*create_connection)()) : makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},    {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"},\n      {\":authority\", \"host\"}, {\"x-lyft-user-id\", \"123\"},   {\"x-forwarded-for\", \"10.0.0.1\"}};\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  if (set_content_length_header) {\n    request_headers.setContentLength(request_size);\n    response_headers.setContentLength(response_size);\n  }\n  if (big_header) {\n    request_headers.addCopy(\"big\", std::string(4096, 'a'));\n  }\n  auto response =\n      sendRequestAndWaitForResponse(request_headers, request_size, response_headers, response_size);\n  checkSimpleRequestSuccess(request_size, response_size, response.get());\n}\n\nIntegrationStreamDecoderPtr\nHttpIntegrationTest::makeHeaderOnlyRequest(ConnectionCreationFunction* create_connection,\n                                           int upstream_index, const std::string& path,\n                                           const std::string& authority) {\n  // This is called multiple times per test in ads_integration_test. Only call\n  // initialize() the first time.\n  if (!initialized()) {\n    initialize();\n  }\n  codec_client_ = makeHttpConnection(\n      create_connection ? ((*create_connection)()) : makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", path},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", authority},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  return sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0,\n                                       upstream_index);\n}\n\nvoid HttpIntegrationTest::testRouterHeaderOnlyRequestAndResponse(\n    ConnectionCreationFunction* create_connection, int upstream_index, const std::string& path,\n    const std::string& authority) {\n  auto response = makeHeaderOnlyRequest(create_connection, upstream_index, path, authority);\n  checkSimpleRequestSuccess(0U, 0U, response.get());\n}\n\n// Change the default route to be restrictive, and send a request to an alternate route.\nvoid HttpIntegrationTest::testRouterNotFound() {\n  config_helper_.setDefaultHostAndRoute(\"foo.com\", \"/found\");\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/notfound\", \"\", downstream_protocol_, version_);\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"404\", response->headers().getStatusValue());\n}\n\n// Change the default route to be restrictive, and send a POST to an alternate route.\nvoid HttpIntegrationTest::testRouterNotFoundWithBody() {\n  config_helper_.setDefaultHostAndRoute(\"foo.com\", \"/found\");\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"POST\", \"/notfound\", \"foo\", downstream_protocol_, version_);\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"404\", response->headers().getStatusValue());\n}\n\n// Make sure virtual cluster stats are charged to the appropriate virtual cluster.\nvoid HttpIntegrationTest::testRouterVirtualClusters() {\n  const std::string matching_header = \"x-use-test-vcluster\";\n  config_helper_.addConfigModifier(\n      [matching_header](\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) {\n        auto* route_config = hcm.mutable_route_config();\n        ASSERT_EQ(1, route_config->virtual_hosts_size());\n        auto* virtual_host = route_config->mutable_virtual_hosts(0);\n        {\n          auto* virtual_cluster = virtual_host->add_virtual_clusters();\n          virtual_cluster->set_name(\"test_vcluster\");\n          auto* headers = virtual_cluster->add_headers();\n          headers->set_name(matching_header);\n          headers->set_present_match(true);\n        }\n      });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {matching_header, \"true\"}};\n\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n  checkSimpleRequestSuccess(0, 0, response.get());\n\n  test_server_->waitForCounterEq(\"vhost.integration.vcluster.test_vcluster.upstream_rq_total\", 1);\n  test_server_->waitForCounterEq(\"vhost.integration.vcluster.other.upstream_rq_total\", 0);\n\n  Http::TestRequestHeaderMapImpl request_headers2{{\":method\", \"POST\"},\n                                                  {\":path\", \"/test/long/url\"},\n                                                  {\":scheme\", \"http\"},\n                                                  {\":authority\", \"host\"}};\n\n  auto response2 = sendRequestAndWaitForResponse(request_headers2, 0, default_response_headers_, 0);\n  checkSimpleRequestSuccess(0, 0, response2.get());\n\n  test_server_->waitForCounterEq(\"vhost.integration.vcluster.test_vcluster.upstream_rq_total\", 1);\n  test_server_->waitForCounterEq(\"vhost.integration.vcluster.other.upstream_rq_total\", 1);\n}\n\nvoid HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  response->waitForEndStream();\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(\"upstream connect error or disconnect/reset before headers. reset reason: connection \"\n            \"termination\",\n            response->body());\n}\n\nvoid HttpIntegrationTest::testRouterUpstreamDisconnectBeforeResponseComplete(\n    ConnectionCreationFunction* create_connection) {\n  initialize();\n  codec_client_ = makeHttpConnection(\n      create_connection ? ((*create_connection)()) : makeClientConnection((lookupPort(\"http\"))));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    response->waitForReset();\n    codec_client_->close();\n  }\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_FALSE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(0U, response->body().size());\n}\n\nvoid HttpIntegrationTest::testRouterDownstreamDisconnectBeforeRequestComplete(\n    ConnectionCreationFunction* create_connection) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(\n      create_connection ? ((*create_connection)()) : makeClientConnection((lookupPort(\"http\"))));\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  auto response = std::move(encoder_decoder.second);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->close();\n\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_FALSE(response->complete());\n}\n\nvoid HttpIntegrationTest::testRouterDownstreamDisconnectBeforeResponseComplete(\n    ConnectionCreationFunction* create_connection) {\n#if defined(__APPLE__) || defined(WIN32)\n  // Skip this test on OS/X + Windows: we can't detect the early close, and we\n  // won't clean up the upstream connection until it times out. See #4294.\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    return;\n  }\n#endif\n  initialize();\n  codec_client_ = makeHttpConnection(\n      create_connection ? ((*create_connection)()) : makeClientConnection((lookupPort(\"http\"))));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, false);\n  response->waitForBodyData(512);\n  codec_client_->close();\n\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_FALSE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\nvoid HttpIntegrationTest::testRouterUpstreamResponseBeforeRequestComplete() {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  auto response = std::move(encoder_decoder.second);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n  response->waitForEndStream();\n\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\nvoid HttpIntegrationTest::testRetry() {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\n// Tests that the x-envoy-attempt-count header is properly set on the upstream request\n// and updated after the request is retried.\nvoid HttpIntegrationTest::testRetryAttemptCountHeader() {\n  auto host = config_helper_.createVirtualHost(\"host\", \"/test_retry\");\n  host.set_include_request_attempt_count(true);\n  host.set_include_attempt_count_in_response(true);\n  config_helper_.addVirtualHost(host);\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test_retry\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 1);\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 2);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n  EXPECT_EQ(2, atoi(std::string(response->headers().getEnvoyAttemptCountValue()).c_str()));\n}\n\nvoid HttpIntegrationTest::testGrpcRetry() {\n  Http::TestResponseTrailerMapImpl response_trailers{{\"response1\", \"trailer1\"},\n                                                     {\"grpc-status\", \"0\"}};\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-grpc-on\", \"cancelled\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, 1024, true);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"grpc-status\", \"1\"}}, false);\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512,\n                                fake_upstreams_[0]->httpType() != FakeHttpConnection::Type::HTTP2);\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP2) {\n    upstream_request_->encodeTrailers(response_trailers);\n  }\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP2) {\n    EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers));\n  }\n}\n\nvoid HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_from_upstream,\n                                                       const std::string& via) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/dynamo/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"expect\", \"100-continue\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  // The continue headers should arrive immediately.\n  response->waitForContinueHeaders();\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Send the rest of the request.\n  codec_client_->sendData(*request_encoder_, 10, true);\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n  // Verify the Expect header is stripped.\n  EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::Headers::get().Expect));\n  if (via.empty()) {\n    EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::Headers::get().Via));\n  } else {\n    EXPECT_EQ(via,\n              upstream_request_->headers().get(Http::Headers::get().Via)->value().getStringView());\n  }\n\n  if (additional_continue_from_upstream) {\n    // Make sure if upstream sends an 100-Continue Envoy doesn't send its own and proxy the one\n    // from upstream!\n    upstream_request_->encode100ContinueHeaders(\n        Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n  }\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(12, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  ASSERT(response->continueHeaders() != nullptr);\n  EXPECT_EQ(\"100\", response->continueHeaders()->getStatusValue());\n  EXPECT_EQ(nullptr, response->continueHeaders()->Via());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  if (via.empty()) {\n    EXPECT_EQ(nullptr, response->headers().Via());\n  } else {\n    EXPECT_EQ(via.c_str(), response->headers().getViaValue());\n  }\n}\n\nvoid HttpIntegrationTest::testEnvoyProxying1xx(bool continue_before_upstream_complete,\n                                               bool with_encoder_filter,\n                                               bool with_multiple_1xx_headers) {\n  if (with_encoder_filter) {\n    // Because 100-continue only affects encoder filters, make sure it plays well with one.\n    config_helper_.addFilter(\"name: envoy.filters.http.cors\");\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) -> void {\n          auto* route_config = hcm.mutable_route_config();\n          auto* virtual_host = route_config->mutable_virtual_hosts(0);\n          {\n            auto* cors = virtual_host->mutable_cors();\n            cors->mutable_allow_origin_string_match()->Add()->set_exact(\"*\");\n            cors->set_allow_headers(\"content-type,x-grpc-web\");\n            cors->set_allow_methods(\"GET,POST\");\n          }\n        });\n  }\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.set_proxy_100_continue(true); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                 {\":path\", \"/dynamo/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"expect\", \"100-continue\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  // Wait for the request headers to be received upstream.\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  if (continue_before_upstream_complete) {\n    if (with_multiple_1xx_headers) {\n      upstream_request_->encode100ContinueHeaders(\n          Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n      upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"102\"}}, false);\n      upstream_request_->encode100ContinueHeaders(\n          Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n    }\n    // This case tests sending on 100-Continue headers before the client has sent all the\n    // request data.\n    upstream_request_->encode100ContinueHeaders(\n        Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n    response->waitForContinueHeaders();\n  }\n  // Send all of the request data and wait for it to be received upstream.\n  codec_client_->sendData(*request_encoder_, 10, true);\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  if (!continue_before_upstream_complete) {\n    if (with_multiple_1xx_headers) {\n      upstream_request_->encode100ContinueHeaders(\n          Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n      upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"102\"}}, false);\n      upstream_request_->encode100ContinueHeaders(\n          Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n    }\n    // This case tests forwarding 100-Continue after the client has sent all data.\n    upstream_request_->encode100ContinueHeaders(\n        Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n    response->waitForContinueHeaders();\n  }\n  // Now send the rest of the response.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  ASSERT(response->continueHeaders() != nullptr);\n  EXPECT_EQ(\"100\", response->continueHeaders()->getStatusValue());\n\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nvoid HttpIntegrationTest::testTwoRequests(bool network_backup) {\n  // if network_backup is false, this simply tests that Envoy can handle multiple\n  // requests on a connection.\n  //\n  // If network_backup is true, the first request will explicitly set the TCP level flow control\n  // as blocked as it finishes the encode and set a timer to unblock. The second stream should be\n  // created while the socket appears to be in the high watermark state, and regression tests that\n  // flow control will be corrected as the socket \"becomes unblocked\"\n  if (network_backup) {\n    config_helper_.addFilter(R\"EOF(\n  name: pause-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n  )EOF\");\n  }\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Request 1.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n\n  // Request 2.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 512);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(1024, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(512U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(1024U, response->body().size());\n}\n\nvoid HttpIntegrationTest::testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size) {\n  // `size` parameter dictates the size of each header that will be added to the request and `count`\n  // parameter is the number of headers to be added. The actual request byte size will exceed `size`\n  // due to the keys and other headers. The actual request header count will exceed `count` by four\n  // due to default headers.\n\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.mutable_max_request_headers_kb()->set_value(max_headers_size); });\n  max_request_headers_kb_ = max_headers_size;\n\n  Http::TestRequestHeaderMapImpl big_headers{{\":method\", \"GET\"},\n                                             {\":path\", \"/\" + std::string(url_size * 1024, 'a')},\n                                             {\":scheme\", \"http\"},\n                                             {\":authority\", \"host\"}};\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  if (url_size >= max_headers_size) {\n    // header size includes keys too, so expect rejection when equal\n    auto encoder_decoder = codec_client_->startRequest(big_headers);\n    auto response = std::move(encoder_decoder.second);\n\n    if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n      ASSERT_TRUE(codec_client_->waitForDisconnect());\n      EXPECT_TRUE(response->complete());\n      EXPECT_EQ(\"431\", response->headers().Status()->value().getStringView());\n    } else {\n      response->waitForReset();\n      codec_client_->close();\n    }\n  } else {\n    auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0);\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().Status()->value().getStringView());\n  }\n}\n\nvoid HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size,\n                                                  uint32_t max_count) {\n  useAccessLog(\"%RESPONSE_CODE_DETAILS%\");\n  // `size` parameter dictates the size of each header that will be added to the request and `count`\n  // parameter is the number of headers to be added. The actual request byte size will exceed `size`\n  // due to the keys and other headers. The actual request header count will exceed `count` by four\n  // due to default headers.\n\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_max_request_headers_kb()->set_value(max_size);\n        hcm.mutable_common_http_protocol_options()->mutable_max_headers_count()->set_value(\n            max_count);\n      });\n  max_request_headers_kb_ = max_size;\n  max_request_headers_count_ = max_count;\n\n  Http::TestRequestHeaderMapImpl big_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n\n  // Already added four headers.\n  for (unsigned int i = 0; i < count; i++) {\n    big_headers.addCopy(std::to_string(i), std::string(size * 1024, 'a'));\n  }\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  if (size >= max_size || count > max_count) {\n    // header size includes keys too, so expect rejection when equal\n    auto encoder_decoder = codec_client_->startRequest(big_headers);\n    auto response = std::move(encoder_decoder.second);\n\n    if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n      ASSERT_TRUE(codec_client_->waitForDisconnect());\n      EXPECT_TRUE(response->complete());\n      EXPECT_EQ(\"431\", response->headers().getStatusValue());\n    } else {\n      response->waitForReset();\n      codec_client_->close();\n    }\n  } else {\n    auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0);\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n  if (count > max_count) {\n    EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"too_many_headers\"));\n  }\n}\n\nvoid HttpIntegrationTest::testLargeRequestTrailers(uint32_t size, uint32_t max_size) {\n  // `size` parameter is the size of the trailer that will be added to the\n  // request. The actual request byte size will exceed `size` due to keys\n  // and other headers.\n\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.mutable_max_request_headers_kb()->set_value(max_size); });\n  max_request_headers_kb_ = max_size;\n  Http::TestRequestTrailerMapImpl request_trailers{{\"trailer\", \"trailer\"}};\n  request_trailers.addCopy(\"big\", std::string(size * 1024, 'a'));\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, 10, false);\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n\n  if (size >= max_size) {\n    if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n      ASSERT_TRUE(codec_client_->waitForDisconnect());\n      EXPECT_TRUE(response->complete());\n      EXPECT_EQ(\"431\", response->headers().getStatusValue());\n    } else {\n      // Expect a stream reset when the size of the trailers is larger than the maximum\n      // limit.\n      response->waitForReset();\n      codec_client_->close();\n      EXPECT_FALSE(response->complete());\n    }\n  } else {\n    waitForNextUpstreamRequest();\n    upstream_request_->encodeHeaders(default_response_headers_, true);\n    response->waitForEndStream();\n    EXPECT_TRUE(response->complete());\n  }\n}\n\nvoid HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) {\n  // This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid\n  // time-consuming asserts when using a large number of headers.\n  max_request_headers_kb_ = 96;\n  max_request_headers_count_ = 10005;\n\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_max_request_headers_kb()->set_value(max_request_headers_kb_);\n        hcm.mutable_common_http_protocol_options()->mutable_max_headers_count()->set_value(\n            max_request_headers_count_);\n      });\n\n  auto big_headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(\n      {{Http::Headers::get().Method, \"GET\"},\n       {Http::Headers::get().Path, \"/test/long/url\"},\n       {Http::Headers::get().Scheme, \"http\"},\n       {Http::Headers::get().Host, \"host\"}});\n\n  for (int i = 0; i < 10000; i++) {\n    big_headers->addCopy(Http::LowerCaseString(std::to_string(i)), std::string(0, 'a'));\n  }\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response =\n      sendRequestAndWaitForResponse(*big_headers, 0, default_response_headers_, 0, 0, time);\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nvoid HttpIntegrationTest::testDownstreamResetBeforeResponseComplete() {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"cookie\", \"a=b\"},\n                                                                 {\"cookie\", \"c=d\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, 0, true);\n  waitForNextUpstreamRequest();\n\n  EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Cookie)->value(), \"a=b; c=d\");\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, false);\n\n  response->waitForBodyData(512);\n  codec_client_->sendReset(*request_encoder_);\n\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_FALSE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\nvoid HttpIntegrationTest::testTrailers(uint64_t request_size, uint64_t response_size,\n                                       bool check_request, bool check_response) {\n  Http::TestRequestTrailerMapImpl request_trailers{{\"request1\", \"trailer1\"},\n                                                   {\"request2\", \"trailer2\"}};\n  Http::TestResponseTrailerMapImpl response_trailers{{\"response1\", \"trailer1\"},\n                                                     {\"response2\", \"trailer2\"}};\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, request_size, false);\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(response_size, false);\n  upstream_request_->encodeTrailers(response_trailers);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(request_size, upstream_request_->bodyLength());\n  if (check_request) {\n    EXPECT_THAT(*upstream_request_->trailers(), HeaderMapEqualRef(&request_trailers));\n  } else {\n    EXPECT_EQ(upstream_request_->trailers(), nullptr);\n  }\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(response_size, response->body().size());\n  if (check_response) {\n    EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers));\n  } else {\n    EXPECT_EQ(response->trailers(), nullptr);\n  }\n}\n\nvoid HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_type) {\n  initialize();\n\n  uint32_t http_port = lookupPort(\"http\");\n  codec_client_ = makeHttpConnection(http_port);\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"HEAD\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"}};\n  IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  waitForNextUpstreamRequest(0);\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Invoke drain listeners endpoint and validate that we can still work on inflight requests.\n  BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"POST\", \"/drain_listeners\", \"\", admin_request_type, version_);\n  EXPECT_TRUE(admin_response->complete());\n  EXPECT_EQ(\"200\", admin_response->headers().getStatusValue());\n  EXPECT_EQ(\"OK\\n\", admin_response->body());\n\n  upstream_request_->encodeData(512, true);\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n\n  // Wait for the response to be read by the codec client.\n  response->waitForEndStream();\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), Http::HttpStatusIs(\"200\"));\n\n  // Validate that the listeners have been stopped.\n  test_server_->waitForCounterEq(\"listener_manager.listener_stopped\", 1);\n\n  // Validate that port is closed and can be bound by other sockets.\n  // This does not work for HTTP/3 because the port is not closed until the listener is completely\n  // destroyed. TODO(danzh) Match TCP behavior as much as possible.\n  if (downstreamProtocol() != Http::CodecClient::Type::HTTP3) {\n    ASSERT_TRUE(waitForPortAvailable(http_port));\n  }\n}\n\nvoid HttpIntegrationTest::testMaxStreamDuration() {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    auto* http_protocol_options = cluster->mutable_common_http_protocol_options();\n    http_protocol_options->mutable_max_stream_duration()->MergeFrom(\n        ProtobufUtil::TimeUtil::MillisecondsToDuration(200));\n  });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_max_duration_reached\", 1);\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    response->waitForReset();\n    codec_client_->close();\n  }\n}\n\nvoid HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    auto* http_protocol_options = cluster->mutable_common_http_protocol_options();\n    http_protocol_options->mutable_max_stream_duration()->MergeFrom(\n        ProtobufUtil::TimeUtil::MillisecondsToDuration(1000));\n  });\n\n  Http::TestRequestHeaderMapImpl retriable_header = Http::TestRequestHeaderMapImpl{\n      {\":method\", \"POST\"},    {\":path\", \"/test/long/url\"},     {\":scheme\", \"http\"},\n      {\":authority\", \"host\"}, {\"x-forwarded-for\", \"10.0.0.1\"}, {\"x-envoy-retry-on\", \"5xx\"}};\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(retriable_header);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_max_duration_reached\", 1);\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  if (invoke_retry_upstream_disconnect) {\n    test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_max_duration_reached\", 2);\n    if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n      ASSERT_TRUE(codec_client_->waitForDisconnect());\n    } else {\n      response->waitForReset();\n      codec_client_->close();\n    }\n\n    EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  } else {\n    Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n    upstream_request_->encodeHeaders(response_headers, true);\n\n    response->waitForHeaders();\n    codec_client_->close();\n\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n\nstd::string HttpIntegrationTest::listenerStatPrefix(const std::string& stat_name) {\n  if (version_ == Network::Address::IpVersion::v4) {\n    return \"listener.127.0.0.1_0.\" + stat_name;\n  }\n  return \"listener.[__1]_0.\" + stat_name;\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http_integration.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"common/http/codec_client.h\"\n#include \"common/network/filter_impl.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/printers.h\"\n\nnamespace Envoy {\n\n/**\n * HTTP codec client used during integration testing.\n */\nclass IntegrationCodecClient : public Http::CodecClientProd {\npublic:\n  IntegrationCodecClient(Event::Dispatcher& dispatcher, Random::RandomGenerator& random,\n                         Network::ClientConnectionPtr&& conn,\n                         Upstream::HostDescriptionConstSharedPtr host_description,\n                         Http::CodecClient::Type type);\n\n  IntegrationStreamDecoderPtr makeHeaderOnlyRequest(const Http::RequestHeaderMap& headers);\n  IntegrationStreamDecoderPtr makeRequestWithBody(const Http::RequestHeaderMap& headers,\n                                                  uint64_t body_size);\n  IntegrationStreamDecoderPtr makeRequestWithBody(const Http::RequestHeaderMap& headers,\n                                                  const std::string& body);\n  bool sawGoAway() const { return saw_goaway_; }\n  bool connected() const { return connected_; }\n  void sendData(Http::RequestEncoder& encoder, absl::string_view data, bool end_stream);\n  void sendData(Http::RequestEncoder& encoder, Buffer::Instance& data, bool end_stream);\n  void sendData(Http::RequestEncoder& encoder, uint64_t size, bool end_stream);\n  void sendTrailers(Http::RequestEncoder& encoder, const Http::RequestTrailerMap& trailers);\n  void sendReset(Http::RequestEncoder& encoder);\n  // Intentionally makes a copy of metadata_map.\n  void sendMetadata(Http::RequestEncoder& encoder, Http::MetadataMap metadata_map);\n  std::pair<Http::RequestEncoder&, IntegrationStreamDecoderPtr>\n  startRequest(const Http::RequestHeaderMap& headers);\n  ABSL_MUST_USE_RESULT AssertionResult\n  waitForDisconnect(std::chrono::milliseconds time_to_wait = TestUtility::DefaultTimeout);\n  Network::ClientConnection* connection() const { return connection_.get(); }\n  Network::ConnectionEvent lastConnectionEvent() const { return last_connection_event_; }\n  Network::Connection& rawConnection() { return *connection_; }\n  bool disconnected() { return disconnected_; }\n\nprivate:\n  struct ConnectionCallbacks : public Network::ConnectionCallbacks {\n    ConnectionCallbacks(IntegrationCodecClient& parent) : parent_(parent) {}\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    IntegrationCodecClient& parent_;\n  };\n\n  struct CodecCallbacks : public Http::ConnectionCallbacks {\n    CodecCallbacks(IntegrationCodecClient& parent) : parent_(parent) {}\n\n    // Http::ConnectionCallbacks\n    void onGoAway(Http::GoAwayErrorCode) override { parent_.saw_goaway_ = true; }\n\n    IntegrationCodecClient& parent_;\n  };\n\n  void flushWrite();\n\n  Event::Dispatcher& dispatcher_;\n  ConnectionCallbacks callbacks_;\n  CodecCallbacks codec_callbacks_;\n  bool connected_{};\n  bool disconnected_{};\n  bool saw_goaway_{};\n  Network::ConnectionEvent last_connection_event_;\n};\n\nusing IntegrationCodecClientPtr = std::unique_ptr<IntegrationCodecClient>;\n\n/**\n * Test fixture for HTTP and HTTP/2 integration tests.\n */\nclass HttpIntegrationTest : public BaseIntegrationTest {\npublic:\n  // TODO(jmarantz): Remove this once\n  // https://github.com/envoyproxy/envoy-filter-example/pull/69 is reverted.\n  HttpIntegrationTest(Http::CodecClient::Type downstream_protocol,\n                      Network::Address::IpVersion version, TestTimeSystemPtr,\n                      const std::string& config = ConfigHelper::httpProxyConfig())\n      : HttpIntegrationTest(downstream_protocol, version, config) {}\n\n  HttpIntegrationTest(Http::CodecClient::Type downstream_protocol,\n                      Network::Address::IpVersion version,\n                      const std::string& config = ConfigHelper::httpProxyConfig());\n\n  HttpIntegrationTest(Http::CodecClient::Type downstream_protocol,\n                      const InstanceConstSharedPtrFn& upstream_address_fn,\n                      Network::Address::IpVersion version,\n                      const std::string& config = ConfigHelper::httpProxyConfig());\n  ~HttpIntegrationTest() override;\n\nprotected:\n  void useAccessLog(absl::string_view format = \"\");\n\n  Network::TransportSocketFactoryPtr createUpstreamTlsContext();\n  IntegrationCodecClientPtr makeHttpConnection(uint32_t port);\n  // Makes a http connection object without checking its connected state.\n  virtual IntegrationCodecClientPtr makeRawHttpConnection(\n      Network::ClientConnectionPtr&& conn,\n      absl::optional<envoy::config::core::v3::Http2ProtocolOptions> http2_options);\n  // Makes a http connection object with asserting a connected state.\n  IntegrationCodecClientPtr makeHttpConnection(Network::ClientConnectionPtr&& conn);\n\n  // Sets downstream_protocol_ and alters the HTTP connection manager codec type in the\n  // config_helper_.\n  void setDownstreamProtocol(Http::CodecClient::Type type);\n\n  // Enable the encoding/decoding of Http1 trailers downstream\n  ConfigHelper::HttpModifierFunction setEnableDownstreamTrailersHttp1();\n\n  // Enable the encoding/decoding of Http1 trailers upstream\n  ConfigHelper::ConfigModifierFunction setEnableUpstreamTrailersHttp1();\n\n  // Sends |request_headers| and |request_body_size| bytes of body upstream.\n  // Configured upstream to send |response_headers| and |response_body_size|\n  // bytes of body downstream.\n  //\n  // Waits for the complete downstream response before returning.\n  // Requires |codec_client_| to be initialized.\n  IntegrationStreamDecoderPtr sendRequestAndWaitForResponse(\n      const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size,\n      const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size,\n      int upstream_index = 0, std::chrono::milliseconds time = TestUtility::DefaultTimeout);\n\n  // Wait for the end of stream on the next upstream stream on any of the provided fake upstreams.\n  // Sets fake_upstream_connection_ to the connection and upstream_request_ to stream.\n  // In cases where the upstream that will receive the request is not deterministic, a second\n  // upstream index may be provided, in which case both upstreams will be checked for requests.\n  absl::optional<uint64_t> waitForNextUpstreamRequest(\n      const std::vector<uint64_t>& upstream_indices,\n      std::chrono::milliseconds connection_wait_timeout = TestUtility::DefaultTimeout);\n  void waitForNextUpstreamRequest(\n      uint64_t upstream_index = 0,\n      std::chrono::milliseconds connection_wait_timeout = TestUtility::DefaultTimeout);\n\n  // Close |codec_client_| and |fake_upstream_connection_| cleanly.\n  void cleanupUpstreamAndDownstream();\n\n  // Verifies the response_headers contains the expected_headers, and response body matches given\n  // body string.\n  void verifyResponse(IntegrationStreamDecoderPtr response, const std::string& response_code,\n                      const Http::TestResponseHeaderMapImpl& expected_headers,\n                      const std::string& expected_body);\n\n  // Helper that sends a request to Envoy, and verifies if Envoy response headers and body size is\n  // the same as the expected headers map.\n  // Requires the \"http\" port has been registered.\n  void sendRequestAndVerifyResponse(const Http::TestRequestHeaderMapImpl& request_headers,\n                                    const int request_size,\n                                    const Http::TestResponseHeaderMapImpl& response_headers,\n                                    const int response_size, const int backend_idx);\n\n  // Check for completion of upstream_request_, and a simple \"200\" response.\n  void checkSimpleRequestSuccess(uint64_t expected_request_size, uint64_t expected_response_size,\n                                 IntegrationStreamDecoder* response);\n\n  using ConnectionCreationFunction = std::function<Network::ClientConnectionPtr()>;\n  // Sends a simple header-only HTTP request, and waits for a response.\n  IntegrationStreamDecoderPtr makeHeaderOnlyRequest(ConnectionCreationFunction* create_connection,\n                                                    int upstream_index,\n                                                    const std::string& path = \"/test/long/url\",\n                                                    const std::string& authority = \"host\");\n  void testRouterNotFound();\n  void testRouterNotFoundWithBody();\n  void testRouterVirtualClusters();\n\n  void testRouterRequestAndResponseWithBody(uint64_t request_size, uint64_t response_size,\n                                            bool big_header, bool set_content_length_header = false,\n                                            ConnectionCreationFunction* creator = nullptr);\n  void testRouterHeaderOnlyRequestAndResponse(ConnectionCreationFunction* creator = nullptr,\n                                              int upstream_index = 0,\n                                              const std::string& path = \"/test/long/url\",\n                                              const std::string& authority = \"host\");\n  void testRequestAndResponseShutdownWithActiveConnection();\n\n  // Disconnect tests\n  void testRouterUpstreamDisconnectBeforeRequestComplete();\n  void\n  testRouterUpstreamDisconnectBeforeResponseComplete(ConnectionCreationFunction* creator = nullptr);\n  void testRouterDownstreamDisconnectBeforeRequestComplete(\n      ConnectionCreationFunction* creator = nullptr);\n  void testRouterDownstreamDisconnectBeforeResponseComplete(\n      ConnectionCreationFunction* creator = nullptr);\n  void testRouterUpstreamResponseBeforeRequestComplete();\n\n  void testTwoRequests(bool force_network_backup = false);\n  void testLargeHeaders(Http::TestRequestHeaderMapImpl request_headers,\n                        Http::TestRequestTrailerMapImpl request_trailers, uint32_t size,\n                        uint32_t max_size);\n  void testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size);\n  void testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size = 60,\n                               uint32_t max_count = 100);\n  void testLargeRequestTrailers(uint32_t size, uint32_t max_size = 60);\n  void testManyRequestHeaders(std::chrono::milliseconds time = TestUtility::DefaultTimeout);\n\n  void testAddEncodedTrailers();\n  void testRetry();\n  void testRetryHittingBufferLimit();\n  void testRetryAttemptCountHeader();\n  void testGrpcRetry();\n\n  void testEnvoyHandling100Continue(bool additional_continue_from_upstream = false,\n                                    const std::string& via = \"\");\n  void testEnvoyProxying1xx(bool continue_before_upstream_complete = false,\n                            bool with_encoder_filter = false,\n                            bool with_multiple_1xx_headers = false);\n\n  // HTTP/2 client tests.\n  void testDownstreamResetBeforeResponseComplete();\n  // Test that trailers are sent. request_trailers_present and\n  // response_trailers_present will check if the trailers are present, otherwise\n  // makes sure they were dropped.\n  void testTrailers(uint64_t request_size, uint64_t response_size, bool request_trailers_present,\n                    bool response_trailers_present);\n  // Test /drain_listener from admin portal.\n  void testAdminDrain(Http::CodecClient::Type admin_request_type);\n  // Test max stream duration.\n  void testMaxStreamDuration();\n  void testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect);\n  Http::CodecClient::Type downstreamProtocol() const { return downstream_protocol_; }\n  // Prefix listener stat with IP:port, including IP version dependent loopback address.\n  std::string listenerStatPrefix(const std::string& stat_name);\n\n  // The client making requests to Envoy.\n  IntegrationCodecClientPtr codec_client_;\n  // A placeholder for the first upstream connection.\n  FakeHttpConnectionPtr fake_upstream_connection_;\n  // A placeholder for the first request received at upstream.\n  FakeStreamPtr upstream_request_;\n  // A pointer to the request encoder, if used.\n  Http::RequestEncoder* request_encoder_{nullptr};\n  // The response headers sent by sendRequestAndWaitForResponse() by default.\n  Http::TestResponseHeaderMapImpl default_response_headers_{{\":status\", \"200\"}};\n  Http::TestRequestHeaderMapImpl default_request_headers_{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  // The codec type for the client-to-Envoy connection\n  Http::CodecClient::Type downstream_protocol_{Http::CodecClient::Type::HTTP1};\n  uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB};\n  uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT};\n  std::string access_log_name_;\n  testing::NiceMock<Random::MockRandomGenerator> random_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http_protocol_integration.cc",
    "content": "#include \"test/integration/http_protocol_integration.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\nstd::vector<HttpProtocolTestParams> HttpProtocolIntegrationTest::getProtocolTestParams(\n    const std::vector<Http::CodecClient::Type>& downstream_protocols,\n    const std::vector<FakeHttpConnection::Type>& upstream_protocols) {\n  std::vector<HttpProtocolTestParams> ret;\n\n  for (auto ip_version : TestEnvironment::getIpVersionsForTest()) {\n    for (auto downstream_protocol : downstream_protocols) {\n      for (auto upstream_protocol : upstream_protocols) {\n        ret.push_back(HttpProtocolTestParams{ip_version, downstream_protocol, upstream_protocol});\n      }\n    }\n  }\n  return ret;\n}\n\nstd::string HttpProtocolIntegrationTest::protocolTestParamsToString(\n    const ::testing::TestParamInfo<HttpProtocolTestParams>& params) {\n  return absl::StrCat(\n      (params.param.version == Network::Address::IpVersion::v4 ? \"IPv4_\" : \"IPv6_\"),\n      (params.param.downstream_protocol == Http::CodecClient::Type::HTTP2 ? \"Http2Downstream_\"\n                                                                          : \"HttpDownstream_\"),\n      (params.param.upstream_protocol == FakeHttpConnection::Type::HTTP2 ? \"Http2Upstream\"\n                                                                         : \"HttpUpstream\"));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http_protocol_integration.h",
    "content": "#pragma once\n\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nstruct HttpProtocolTestParams {\n  Network::Address::IpVersion version;\n  Http::CodecClient::Type downstream_protocol;\n  FakeHttpConnection::Type upstream_protocol;\n};\n\n// Allows easy testing of Envoy code for HTTP/HTTP2 upstream/downstream.\n//\n// Usage:\n//\n// using MyTest = HttpProtocolIntegrationTest;\n//\n// INSTANTIATE_TEST_SUITE_P(Protocols, MyTest,\n//                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n//                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n//\n//\n// TEST_P(MyTest, TestInstance) {\n// ....\n// }\nclass HttpProtocolIntegrationTest : public testing::TestWithParam<HttpProtocolTestParams>,\n                                    public HttpIntegrationTest {\npublic:\n  // By default returns 8 combinations of\n  // [HTTP  upstream / HTTP  downstream] x [Ipv4, IPv6]\n  // [HTTP  upstream / HTTP2 downstream] x [Ipv4, IPv6]\n  // [HTTP2 upstream / HTTP2 downstream] x [IPv4, Ipv6]\n  // [HTTP upstream  / HTTP2 downstream] x [IPv4, Ipv6]\n  //\n  // Upstream and downstream protocols may be changed via the input vectors.\n  // Address combinations are propagated from TestEnvironment::getIpVersionsForTest()\n  static std::vector<HttpProtocolTestParams>\n  getProtocolTestParams(const std::vector<Http::CodecClient::Type>& downstream_protocols =\n                            {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2},\n                        const std::vector<FakeHttpConnection::Type>& upstream_protocols = {\n                            FakeHttpConnection::Type::HTTP1, FakeHttpConnection::Type::HTTP2});\n\n  // Allows pretty printed test names of the form\n  // FooTestCase.BarInstance/IPv4_Http2Downstream_HttpUpstream\n  static std::string\n  protocolTestParamsToString(const ::testing::TestParamInfo<HttpProtocolTestParams>& p);\n\n  HttpProtocolIntegrationTest()\n      : HttpIntegrationTest(GetParam().downstream_protocol, GetParam().version) {}\n\n  void SetUp() override {\n    setDownstreamProtocol(GetParam().downstream_protocol);\n    setUpstreamProtocol(GetParam().upstream_protocol);\n  }\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http_subset_lb_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.validate.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass HttpSubsetLbIntegrationTest\n    : public testing::TestWithParam<envoy::config::cluster::v3::Cluster::LbPolicy>,\n      public HttpIntegrationTest {\npublic:\n  // Returns all load balancer types except ORIGINAL_DST_LB and CLUSTER_PROVIDED.\n  static std::vector<envoy::config::cluster::v3::Cluster::LbPolicy> getSubsetLbTestParams() {\n    int first = static_cast<int>(envoy::config::cluster::v3::Cluster::LbPolicy_MIN);\n    int last = static_cast<int>(envoy::config::cluster::v3::Cluster::LbPolicy_MAX);\n    ASSERT(first < last);\n\n    std::vector<envoy::config::cluster::v3::Cluster::LbPolicy> ret;\n    for (int i = first; i <= last; i++) {\n      if (!envoy::config::cluster::v3::Cluster::LbPolicy_IsValid(i)) {\n        continue;\n      }\n\n      auto policy = static_cast<envoy::config::cluster::v3::Cluster::LbPolicy>(i);\n\n      if (policy == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB ||\n          policy == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED ||\n          policy == envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) {\n        continue;\n      }\n\n      ret.push_back(policy);\n    }\n\n    return ret;\n  }\n\n  // Converts an LbPolicy to strings suitable for test names.\n  static std::string subsetLbTestParamsToString(\n      const testing::TestParamInfo<envoy::config::cluster::v3::Cluster::LbPolicy>& p) {\n    const std::string& policy_name = envoy::config::cluster::v3::Cluster::LbPolicy_Name(p.param);\n    return absl::StrReplaceAll(policy_name, {{\"_\", \"\"}});\n  }\n\n  HttpSubsetLbIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1,\n                            TestEnvironment::getIpVersionsForTest().front(),\n                            ConfigHelper::httpProxyConfig()),\n        is_hash_lb_(GetParam() == envoy::config::cluster::v3::Cluster::RING_HASH ||\n                    GetParam() == envoy::config::cluster::v3::Cluster::MAGLEV) {\n    autonomous_upstream_ = true;\n    setUpstreamCount(num_hosts_);\n\n    config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* static_resources = bootstrap.mutable_static_resources();\n      auto* cluster = static_resources->mutable_clusters(0);\n\n      cluster->set_lb_policy(GetParam());\n\n      // Create subsets based on type value of the \"type\" metadata.\n      cluster->mutable_lb_subset_config()->add_subset_selectors()->add_keys(type_key_);\n\n      cluster->clear_load_assignment();\n\n      // Create a load assignment with num_hosts_ entries with metadata split evenly between\n      // type=a and type=b.\n      auto* load_assignment = cluster->mutable_load_assignment();\n      load_assignment->set_cluster_name(cluster->name());\n      auto* endpoints = load_assignment->add_endpoints();\n      for (uint32_t i = 0; i < num_hosts_; i++) {\n        auto* lb_endpoint = endpoints->add_lb_endpoints();\n\n        // ConfigHelper will fill in ports later.\n        auto* endpoint = lb_endpoint->mutable_endpoint();\n        auto* addr = endpoint->mutable_address()->mutable_socket_address();\n        addr->set_address(Network::Test::getLoopbackAddressString(\n            TestEnvironment::getIpVersionsForTest().front()));\n        addr->set_port_value(0);\n\n        // Assign type metadata based on i.\n        auto* metadata = lb_endpoint->mutable_metadata();\n        Envoy::Config::Metadata::mutableMetadataValue(*metadata, \"envoy.lb\", type_key_)\n            .set_string_value((i % 2 == 0) ? \"a\" : \"b\");\n      }\n    });\n\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          auto* vhost = hcm.mutable_route_config()->mutable_virtual_hosts(0);\n\n          // Report the host's type metadata and remote address on every response.\n          auto* resp_header = vhost->add_response_headers_to_add();\n          auto* header = resp_header->mutable_header();\n          header->set_key(host_type_header_);\n          header->set_value(\n              fmt::format(R\"EOF(%UPSTREAM_METADATA([\"envoy.lb\", \"{}\"])%)EOF\", type_key_));\n\n          resp_header = vhost->add_response_headers_to_add();\n          header = resp_header->mutable_header();\n          header->set_key(host_header_);\n          header->set_value(\"%UPSTREAM_REMOTE_ADDRESS%\");\n\n          // Create routes for x-type=a and x-type=b headers.\n          vhost->clear_routes();\n          configureRoute(vhost->add_routes(), \"a\");\n          configureRoute(vhost->add_routes(), \"b\");\n        });\n  }\n\n  void configureRoute(envoy::config::route::v3::Route* route, const std::string& host_type) {\n    auto* match = route->mutable_match();\n    match->set_prefix(\"/\");\n\n    // Match the x-type header against the given host_type (a/b).\n    auto* match_header = match->add_headers();\n    match_header->set_name(type_header_);\n    match_header->set_exact_match(host_type);\n\n    // Route to cluster_0, selecting metadata type=a or type=b.\n    auto* action = route->mutable_route();\n    action->set_cluster(\"cluster_0\");\n    auto* metadata_match = action->mutable_metadata_match();\n    Envoy::Config::Metadata::mutableMetadataValue(*metadata_match, \"envoy.lb\", type_key_)\n        .set_string_value(host_type);\n\n    // Set a hash policy for hashing load balancers.\n    if (is_hash_lb_) {\n      action->add_hash_policy()->mutable_header()->set_header_name(hash_header_);\n    }\n  };\n\n  void SetUp() override {\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP1);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP1);\n  }\n\n  // Runs a subset lb test with the given request headers, expecting the x-host-type header to\n  // the given type (\"a\" or \"b\"). If is_hash_lb_, verifies that a single host is selected over n\n  // iterations (e.g. for maglev/hash-ring policies). Otherwise, expected more than one host to be\n  // selected over at least n iterations and at most m.\n  void runTest(Http::TestRequestHeaderMapImpl& request_headers,\n               const std::string expected_host_type, const int n = 100, const int m = 1000) {\n    ASSERT_LT(n, m);\n\n    std::set<std::string> hosts;\n    for (int i = 0; i < m; i++) {\n      Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n\n      // Send header only request.\n      IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n      response->waitForEndStream();\n\n      // Expect a response from a host in the correct subset.\n      EXPECT_EQ(response->headers()\n                    .get(Envoy::Http::LowerCaseString{host_type_header_})\n                    ->value()\n                    .getStringView(),\n                expected_host_type);\n\n      // Record the upstream address.\n      hosts.emplace(response->headers()\n                        .get(Envoy::Http::LowerCaseString{host_header_})\n                        ->value()\n                        .getStringView());\n\n      if (i >= n && (is_hash_lb_ || hosts.size() > 1)) {\n        // Once we've completed n iterations, quit for hash lb policies. For others, keep going\n        // until we've seen multiple hosts (as expected) or reached m iterations.\n        break;\n      }\n    }\n\n    if (is_hash_lb_) {\n      EXPECT_EQ(hosts.size(), 1) << \"Expected a single unique host to be selected for \"\n                                 << envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam());\n    } else {\n      EXPECT_GT(hosts.size(), 1) << \"Expected multiple hosts to be selected for \"\n                                 << envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam());\n    }\n  }\n\n  const uint32_t num_hosts_{4};\n  const bool is_hash_lb_;\n\n  const std::string hash_header_{\"x-hash\"};\n  const std::string host_type_header_{\"x-host-type\"};\n  const std::string host_header_{\"x-host\"};\n  const std::string type_header_{\"x-type\"};\n  const std::string type_key_{\"type\"};\n\n  Http::TestRequestHeaderMapImpl type_a_request_headers_{\n      {\":method\", \"GET\"},     {\":path\", \"/test\"}, {\":scheme\", \"http\"},\n      {\":authority\", \"host\"}, {\"x-type\", \"a\"},    {\"x-hash\", \"hash-a\"}};\n  Http::TestRequestHeaderMapImpl type_b_request_headers_{\n      {\":method\", \"GET\"},     {\":path\", \"/test\"}, {\":scheme\", \"http\"},\n      {\":authority\", \"host\"}, {\"x-type\", \"b\"},    {\"x-hash\", \"hash-b\"}};\n};\n\nINSTANTIATE_TEST_SUITE_P(SubsetCompatibleLoadBalancers, HttpSubsetLbIntegrationTest,\n                         testing::ValuesIn(HttpSubsetLbIntegrationTest::getSubsetLbTestParams()),\n                         HttpSubsetLbIntegrationTest::subsetLbTestParamsToString);\n\n// Tests each subset-compatible load balancer policy with 4 hosts divided into 2 subsets.\nTEST_P(HttpSubsetLbIntegrationTest, SubsetLoadBalancer) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  runTest(type_a_request_headers_, \"a\");\n  runTest(type_b_request_headers_, \"b\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http_timeout_integration_test.cc",
    "content": "#include \"test/integration/http_timeout_integration_test.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, HttpTimeoutIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Sends a request with a global timeout specified, sleeps for longer than the\n// timeout, and ensures that a timeout is received.\nTEST_P(HttpTimeoutIntegrationTest, GlobalTimeout) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"500\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->sendData(*request_encoder_, 0, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger global timeout.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(501));\n\n  // Ensure we got a timeout downstream and canceled the upstream request.\n  response->waitForHeaders();\n  ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15)));\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"504\", response->headers().getStatusValue());\n}\n\n// Testing that `x-envoy-expected-timeout-ms` header, set by egress envoy, is respected by ingress\n// envoy when `respect_expected_rq_timeout` field is enabled. Sends a request with a global timeout\n// specified, sleeps for longer than the timeout, and ensures that a timeout is received.\nTEST_P(HttpTimeoutIntegrationTest, UseTimeoutSetByEgressEnvoy) {\n  enableRespectExpectedRqTimeout(true);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"500\"},\n                                     {\"x-envoy-expected-rq-timeout-ms\", \"300\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->sendData(*request_encoder_, 0, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger global timeout, populated from `x-envoy-expected-rq-timeout-ms` header.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(301));\n\n  // Ensure we got a timeout downstream and canceled the upstream request.\n  response->waitForHeaders();\n  ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15)));\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"504\", response->headers().getStatusValue());\n}\n\n// Testing that ingress envoy derives new timeout value and sets `x-envoy-expected-timeout-ms`\n// header, when timeout has not been set by egress envoy and `respect_expected_rq_timeout` field is\n// enabled. Sends a request with a global timeout specified, sleeps for longer than the timeout, and\n// ensures that a timeout is received.\nTEST_P(HttpTimeoutIntegrationTest, DeriveTimeoutInIngressEnvoy) {\n  enableRespectExpectedRqTimeout(true);\n  initialize();\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"500\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->sendData(*request_encoder_, 0, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger global timeout, populated from `x-envoy-expected-rq-timeout-ms` header.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(501));\n\n  // Ensure we got a timeout downstream and canceled the upstream request.\n  response->waitForHeaders();\n  ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15)));\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"504\", response->headers().getStatusValue());\n}\n\n// Testing that `x-envoy-expected-timeout-ms` header, set by egress envoy, is ignored by ingress\n// envoy and new value is derived. Sends a request with a global timeout specified,\n// sleeps for longer than the timeout, and ensures that a timeout is received.\nTEST_P(HttpTimeoutIntegrationTest, IgnoreTimeoutSetByEgressEnvoy) {\n  enableRespectExpectedRqTimeout(false);\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"500\"},\n                                     {\"x-envoy-expected-rq-timeout-ms\", \"600\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->sendData(*request_encoder_, 0, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger global timeout, populated from `x-envoy-expected-rq-timeout-ms` header.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(501));\n\n  // Ensure we got a timeout downstream and canceled the upstream request.\n  response->waitForHeaders();\n  ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15)));\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"504\", response->headers().getStatusValue());\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/7154 in which\n// resetStream() was only called after a response timeout for upstream requests\n// that had not received headers yet. This meant that decodeData might be\n// called on a destroyed UpstreamRequest.\nTEST_P(HttpTimeoutIntegrationTest, GlobalTimeoutAfterHeadersBeforeBodyResetsUpstream) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"},\n                                                 {\"x-envoy-upstream-rq-timeout-ms\", \"100\"}};\n  auto encoder_decoder = codec_client_->startRequest(request_headers);\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  codec_client_->sendData(*request_encoder_, 100, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Respond with headers, not end of stream.\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  upstream_request_->encodeHeaders(response_headers, false);\n\n  response->waitForHeaders();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  // Trigger global timeout.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(200));\n\n  ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15)));\n\n  response->waitForReset();\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n}\n\n// Sends a request with a global timeout and per try timeout specified, sleeps\n// for longer than the per try but slightly less than the global timeout.\n// Ensures that two requests are attempted and a timeout is returned\n// downstream.\nTEST_P(HttpTimeoutIntegrationTest, PerTryTimeout) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"500\"},\n                                     {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"400\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->sendData(*request_encoder_, 0, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger per try timeout (but not global timeout) and wait for reset.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(400));\n  ASSERT_TRUE(upstream_request_->waitForReset());\n\n  // Wait for a second request to be sent upstream. Max retry backoff is 25ms so advance time that\n  // much.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(25));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger global timeout.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(100));\n  response->waitForHeaders();\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"504\", response->headers().getStatusValue());\n}\n\n// Sends a request with a per try timeout specified but no global timeout.\n// Ensures that two requests are attempted and a timeout is returned\n// downstream.\nTEST_P(HttpTimeoutIntegrationTest, PerTryTimeoutWithoutGlobalTimeout) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"0\"},\n                                     {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"50\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->sendData(*request_encoder_, 0, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger per try timeout (but not global timeout) and wait for reset.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(50));\n  ASSERT_TRUE(upstream_request_->waitForReset());\n\n  // Wait for a second request to be sent upstream. Max retry backoff is 25ms so advance time that\n  // much. This is always less than the next request's per try timeout.\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(25));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Encode 200 response headers for the first (timed out) request.\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  upstream_request_->encodeHeaders(response_headers, true);\n\n  response->waitForHeaders();\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// With hedge_on_per_try_timeout enabled via config, sends a request with a\n// global timeout and per try timeout specified, sleeps for longer than the per\n// try but slightly less than the global timeout. We then have the first\n// upstream request return headers and expect those to be returned downstream\n// (which proves the request was not canceled when the timeout was hit).\nTEST_P(HttpTimeoutIntegrationTest, HedgedPerTryTimeout) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"},\n                                     {\"x-envoy-hedge-on-per-try-timeout\", \"true\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"500\"},\n                                     {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"400\"}});\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  codec_client_->sendData(*request_encoder_, 0, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger per try timeout (but not global timeout).\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(400));\n\n  // Trigger retry (there's a 25ms backoff before it's issued).\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(26));\n\n  // Wait for a second request to be sent upstream\n  FakeStreamPtr upstream_request2;\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2));\n  ASSERT_TRUE(upstream_request2->waitForHeadersComplete());\n  ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_));\n\n  // Encode 200 response headers for the first (timed out) request.\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  upstream_request_->encodeHeaders(response_headers, true);\n\n  response->waitForHeaders();\n\n  // The second request should be reset since we used the response from the first request.\n  ASSERT_TRUE(upstream_request2->waitForReset(std::chrono::seconds(15)));\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(HttpTimeoutIntegrationTest, HedgedPerTryTimeoutWithBodyNoBufferFirstRequestWins) {\n  testRouterRequestAndResponseWithHedgedPerTryTimeout(1024, 512, true);\n}\n\nTEST_P(HttpTimeoutIntegrationTest, HedgedPerTryTimeoutWithBodyNoBufferSecondRequestWins) {\n  testRouterRequestAndResponseWithHedgedPerTryTimeout(1024, 512, false);\n}\n\nTEST_P(HttpTimeoutIntegrationTest,\n       HedgedPerTryTimeoutLowUpstreamBufferLimitLargeRequestFirstRequestWins) {\n  config_helper_.setBufferLimits(1024, 1024 * 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithHedgedPerTryTimeout(1024 * 1024, 1024, true);\n}\n\nTEST_P(HttpTimeoutIntegrationTest,\n       HedgedPerTryTimeoutLowUpstreamBufferLimitLargeRequestSecondRequestWins) {\n  config_helper_.setBufferLimits(1024, 1024 * 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithHedgedPerTryTimeout(1024 * 1024, 1024, false);\n}\n\nTEST_P(HttpTimeoutIntegrationTest,\n       HedgedPerTryTimeoutLowDownstreamBufferLimitLargeResponseFirstRequestWins) {\n  config_helper_.setBufferLimits(1024 * 1024, 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithHedgedPerTryTimeout(1024, 1024 * 1024, true);\n}\n\nTEST_P(HttpTimeoutIntegrationTest,\n       HedgedPerTryTimeoutLowDownstreamBufferLimitLargeResponseSecondRequestWins) {\n  config_helper_.setBufferLimits(1024 * 1024, 1024); // Set buffer limits upstream and downstream.\n  testRouterRequestAndResponseWithHedgedPerTryTimeout(1024, 1024 * 1024, false);\n}\n\n// Sends a request with x-envoy-hedge-on-per-try-timeout, sleeps (with\n// simulated time) for longer than the per try timeout but shorter than the\n// global timeout, asserts that a retry is sent, and then responds with a 200\n// response on the original request and ensures the downstream sees it.\n// Request/response/header size are configurable to test flow control. If\n// first_request_wins is true, then the \"winning\" response will be sent in\n// response to the first (timed out) request. If false, the second request will\n// get the good response.\nvoid HttpTimeoutIntegrationTest::testRouterRequestAndResponseWithHedgedPerTryTimeout(\n    uint64_t request_size, uint64_t response_size, bool first_request_wins) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection(lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"x-forwarded-for\", \"10.0.0.1\"},\n                                                 {\"x-envoy-retry-on\", \"5xx\"},\n                                                 {\"x-envoy-hedge-on-per-try-timeout\", \"true\"},\n                                                 {\"x-envoy-upstream-rq-timeout-ms\", \"5000\"},\n                                                 {\"x-envoy-upstream-rq-per-try-timeout-ms\", \"400\"}};\n  auto encoder_decoder = codec_client_->startRequest(request_headers);\n\n  auto response = std::move(encoder_decoder.second);\n  request_encoder_ = &encoder_decoder.first;\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  codec_client_->sendData(*request_encoder_, request_size, true);\n\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Trigger per try timeout (but not global timeout).\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(400));\n\n  FakeStreamPtr upstream_request2;\n  // Trigger retry (there's a 25ms backoff before it's issued).\n  timeSystem().advanceTimeWait(std::chrono::milliseconds(26));\n\n  // Wait for a second request to be sent upstream\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2));\n  ASSERT_TRUE(upstream_request2->waitForHeadersComplete());\n  ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"}};\n  if (first_request_wins) {\n    // Encode 200 response headers for the first (timed out) request.\n    upstream_request_->encodeHeaders(response_headers, response_size == 0);\n  } else {\n    // Encode 200 response headers for the second request.\n    upstream_request2->encodeHeaders(response_headers, response_size == 0);\n  }\n\n  response->waitForHeaders();\n\n  if (first_request_wins) {\n    // The second request should be reset since we used the response from the first request.\n    ASSERT_TRUE(upstream_request2->waitForReset(std::chrono::seconds(15)));\n  } else {\n    // The first request should be reset since we used the response from the second request.\n    ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15)));\n  }\n\n  if (response_size) {\n    if (first_request_wins) {\n      upstream_request_->encodeData(response_size, true);\n    } else {\n      upstream_request2->encodeData(response_size, true);\n    }\n  }\n\n  response->waitForEndStream();\n\n  codec_client_->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(upstream_request2->complete());\n  if (first_request_wins) {\n    EXPECT_EQ(request_size, upstream_request_->bodyLength());\n  } else {\n    EXPECT_EQ(request_size, upstream_request2->bodyLength());\n  }\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/http_timeout_integration_test.h",
    "content": "#pragma once\n\n#include \"envoy/extensions/filters/http/router/v3/router.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nclass HttpTimeoutIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                   public Event::TestUsingSimulatedTime,\n                                   public HttpIntegrationTest {\npublic:\n  // Arbitrarily choose HTTP2 here, the tests for this class are around\n  // timeouts which don't have version specific behavior.\n  HttpTimeoutIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\n  void SetUp() override {\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void testRouterRequestAndResponseWithHedgedPerTryTimeout(uint64_t request_size,\n                                                           uint64_t response_size,\n                                                           bool first_request_wins);\n\n  void initialize() override {\n    if (respect_expected_rq_timeout) {\n      config_helper_.addConfigModifier(\n          [&](envoy::extensions::filters::network::http_connection_manager::v3::\n                  HttpConnectionManager& hcm) {\n            envoy::extensions::filters::http::router::v3::Router router_config;\n            router_config.set_respect_expected_rq_timeout(respect_expected_rq_timeout);\n            // TestUtility::jsonConvert(router_config,\n            // *hcm.mutable_http_filters(0)->mutable_config());\n            hcm.mutable_http_filters(0)->mutable_typed_config()->PackFrom(router_config);\n          });\n    }\n\n    HttpIntegrationTest::initialize();\n  }\n\n  void enableRespectExpectedRqTimeout(bool enable) { respect_expected_rq_timeout = enable; }\n\n  bool respect_expected_rq_timeout{false};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/idle_timeout_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/test_common/test_time.h\"\n\nusing testing::HasSubstr;\n\nnamespace Envoy {\nnamespace {\n\nclass IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  void initialize() override {\n    useAccessLog(\"%RESPONSE_CODE_DETAILS%\");\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) -> void {\n          if (enable_global_idle_timeout_) {\n            hcm.mutable_stream_idle_timeout()->set_seconds(0);\n            hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000);\n          }\n          if (enable_per_stream_idle_timeout_) {\n            auto* route_config = hcm.mutable_route_config();\n            auto* virtual_host = route_config->mutable_virtual_hosts(0);\n            auto* route = virtual_host->mutable_routes(0)->mutable_route();\n            route->mutable_idle_timeout()->set_seconds(0);\n            route->mutable_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000);\n\n            auto* header = virtual_host->mutable_response_headers_to_add()->Add()->mutable_header();\n            header->set_key(\"foo\");\n            header->set_value(\"bar\");\n          }\n          if (enable_request_timeout_) {\n            hcm.mutable_request_timeout()->set_seconds(0);\n            hcm.mutable_request_timeout()->set_nanos(RequestTimeoutMs * 1000 * 1000);\n          }\n\n          // For validating encode100ContinueHeaders() timer kick.\n          hcm.set_proxy_100_continue(true);\n        });\n    HttpProtocolIntegrationTest::initialize();\n  }\n\n  IntegrationStreamDecoderPtr setupPerStreamIdleTimeoutTest(const char* method = \"GET\") {\n    initialize();\n    codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n    auto encoder_decoder =\n        codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", method},\n                                                                   {\":path\", \"/test/long/url\"},\n                                                                   {\":scheme\", \"http\"},\n                                                                   {\":authority\", \"host\"}});\n    request_encoder_ = &encoder_decoder.first;\n    auto response = std::move(encoder_decoder.second);\n    AssertionResult result =\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n    RELEASE_ASSERT(result, result.message());\n    result = upstream_request_->waitForHeadersComplete();\n    RELEASE_ASSERT(result, result.message());\n    return response;\n  }\n\n  void sleep() {\n    test_time_.timeSystem().advanceTimeWait(std::chrono::milliseconds(IdleTimeoutMs / 2));\n  }\n\n  void waitForTimeout(IntegrationStreamDecoder& response, absl::string_view stat_name = \"\",\n                      absl::string_view stat_prefix = \"http.config_test\") {\n    if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n      ASSERT_TRUE(codec_client_->waitForDisconnect());\n    } else {\n      response.waitForReset();\n      codec_client_->close();\n    }\n    if (!stat_name.empty()) {\n      EXPECT_EQ(1, test_server_->counter(fmt::format(\"{}.{}\", stat_prefix, stat_name))->value());\n    }\n  }\n\n  // TODO(htuch): This might require scaling for TSAN/ASAN/Valgrind/etc. Bump if\n  // this is the cause of flakes.\n  static constexpr uint64_t IdleTimeoutMs = 400;\n  static constexpr uint64_t RequestTimeoutMs = 200;\n  bool enable_global_idle_timeout_{false};\n  bool enable_per_stream_idle_timeout_{false};\n  bool enable_request_timeout_{false};\n  DangerousDeprecatedTestTime test_time_;\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, IdleTimeoutIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n// Tests idle timeout behaviour with single request and validates that idle timer kicks in\n// after given timeout.\nTEST_P(IdleTimeoutIntegrationTest, TimeoutBasic) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    auto* http_protocol_options = cluster->mutable_common_http_protocol_options();\n    auto* idle_time_out = http_protocol_options->mutable_idle_timeout();\n    std::chrono::milliseconds timeout(1000);\n    auto seconds = std::chrono::duration_cast<std::chrono::seconds>(timeout);\n    idle_time_out->set_seconds(seconds.count());\n  });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_total\", 1);\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_200\", 1);\n\n  // Do not send any requests and validate if idle time out kicks in.\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_idle_timeout\", 1);\n}\n\n// Tests idle timeout behaviour with multiple requests and validates that idle timer kicks in\n// after both the requests are done.\nTEST_P(IdleTimeoutIntegrationTest, IdleTimeoutWithTwoRequests) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    auto* http_protocol_options = cluster->mutable_common_http_protocol_options();\n    auto* idle_time_out = http_protocol_options->mutable_idle_timeout();\n    std::chrono::milliseconds timeout(1000);\n    auto seconds = std::chrono::duration_cast<std::chrono::seconds>(timeout);\n    idle_time_out->set_seconds(seconds.count());\n  });\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Request 1.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_total\", 1);\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_200\", 1);\n\n  // Request 2.\n  response = codec_client_->makeRequestWithBody(default_request_headers_, 512);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(1024, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_total\", 1);\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_200\", 2);\n\n  // Do not send any requests and validate if idle time out kicks in.\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_idle_timeout\", 1);\n}\n\n// Per-stream idle timeout after having sent downstream headers.\nTEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) {\n  enable_per_stream_idle_timeout_ = true;\n  auto response = setupPerStreamIdleTimeoutTest();\n\n  waitForTimeout(*response, \"downstream_rq_idle_timeout\");\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  auto foo = Http::LowerCaseString(\"foo\");\n  ASSERT_TRUE(response->headers().get(foo) != nullptr);\n  EXPECT_EQ(\"bar\", response->headers().get(foo)->value().getStringView());\n  EXPECT_EQ(\"stream timeout\", response->body());\n\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"stream_idle_timeout\"));\n}\n\n// Per-stream idle timeout with reads disabled.\nTEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutWithLargeBuffer) {\n  config_helper_.addFilter(R\"EOF(\n  name: backpressure-filter\n  )EOF\");\n  enable_per_stream_idle_timeout_ = true;\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n\n  // Make sure that for HTTP/1.1 reads are enabled even though the first request\n  // ended in the \"backed up\" state.\n  auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  response2->waitForEndStream();\n  EXPECT_TRUE(response2->complete());\n}\n\n// Per-stream idle timeout after having sent downstream head request.\nTEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutHeadRequestAfterDownstreamHeadRequest) {\n  enable_per_stream_idle_timeout_ = true;\n  auto response = setupPerStreamIdleTimeoutTest(\"HEAD\");\n\n  waitForTimeout(*response, \"downstream_rq_idle_timeout\");\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  EXPECT_EQ(fmt::format(\"{}\", strlen(\"stream timeout\")),\n            response->headers().getContentLengthValue());\n  EXPECT_EQ(\"\", response->body());\n}\n\n// Global per-stream idle timeout applies if there is no per-stream idle timeout.\nTEST_P(IdleTimeoutIntegrationTest, GlobalPerStreamIdleTimeoutAfterDownstreamHeaders) {\n  enable_per_stream_idle_timeout_ = true;\n  enable_global_idle_timeout_ = true;\n  auto response = setupPerStreamIdleTimeoutTest();\n\n  waitForTimeout(*response, \"downstream_rq_idle_timeout\");\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  EXPECT_EQ(\"stream timeout\", response->body());\n}\n\n// Per-stream idle timeout after having sent downstream headers+body.\nTEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeadersAndBody) {\n  enable_per_stream_idle_timeout_ = true;\n  auto response = setupPerStreamIdleTimeoutTest();\n\n  sleep();\n  codec_client_->sendData(*request_encoder_, 1, false);\n\n  waitForTimeout(*response, \"downstream_rq_idle_timeout\");\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(1U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  EXPECT_EQ(\"stream timeout\", response->body());\n}\n\n// Per-stream idle timeout after upstream headers have been sent.\nTEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterUpstreamHeaders) {\n  enable_per_stream_idle_timeout_ = true;\n  auto response = setupPerStreamIdleTimeoutTest();\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n\n  waitForTimeout(*response, \"downstream_rq_idle_timeout\");\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_FALSE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"\", response->body());\n}\n\n// Per-stream idle timeout after a sequence of header/data events.\nTEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterBidiData) {\n  enable_per_stream_idle_timeout_ = true;\n  auto response = setupPerStreamIdleTimeoutTest();\n\n  sleep();\n  upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n\n  sleep();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n\n  sleep();\n  upstream_request_->encodeData(1, false);\n\n  sleep();\n  codec_client_->sendData(*request_encoder_, 1, false);\n\n  sleep();\n  Http::TestRequestTrailerMapImpl request_trailers{{\"request1\", \"trailer1\"},\n                                                   {\"request2\", \"trailer2\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n\n  sleep();\n  upstream_request_->encodeData(1, false);\n\n  waitForTimeout(*response, \"downstream_rq_idle_timeout\");\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1U, upstream_request_->bodyLength());\n  EXPECT_FALSE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"aa\", response->body());\n}\n\n// Successful request/response when per-stream idle timeout is configured.\nTEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutRequestAndResponse) {\n  enable_per_stream_idle_timeout_ = true;\n  testRouterRequestAndResponseWithBody(1024, 1024, false);\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutConfiguredRequestResponse) {\n  enable_request_timeout_ = true;\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutConfiguredRequestResponseWithBody) {\n  enable_request_timeout_ = true;\n  testRouterRequestAndResponseWithBody(1024, 1024, false);\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutTriggersOnBodilessPost) {\n  enable_request_timeout_ = true;\n\n  auto response = setupPerStreamIdleTimeoutTest(\"POST\");\n\n  waitForTimeout(*response, \"downstream_rq_timeout\");\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  EXPECT_EQ(\"request timeout\", response->body());\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutUnconfiguredDoesNotTriggerOnBodilessPost) {\n  enable_request_timeout_ = false;\n  // with no request timeout configured, the idle timeout triggers instead\n  enable_per_stream_idle_timeout_ = true;\n\n  auto response = setupPerStreamIdleTimeoutTest(\"POST\");\n\n  waitForTimeout(*response);\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  EXPECT_NE(\"request timeout\", response->body());\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutTriggersOnRawIncompleteRequestWithHeaders) {\n  // Omitting \\r\\n\\r\\n does not indicate incomplete request in HTTP2\n  if (downstreamProtocol() == Envoy::Http::CodecClient::Type::HTTP2) {\n    return;\n  }\n  enable_request_timeout_ = true;\n\n  initialize();\n\n  std::string raw_response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.1\", &raw_response, true);\n  EXPECT_THAT(raw_response, testing::HasSubstr(\"request timeout\"));\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutDoesNotTriggerOnRawCompleteRequestWithHeaders) {\n  if (downstreamProtocol() == Envoy::Http::CodecClient::Type::HTTP2) {\n    return;\n  }\n  enable_request_timeout_ = true;\n\n  initialize();\n\n  std::string raw_response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.1\\r\\n\\r\\n\", &raw_response, true);\n  EXPECT_THAT(raw_response, testing::Not(testing::HasSubstr(\"request timeout\")));\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutIsDisarmedByPrematureEncodeHeaders) {\n  enable_request_timeout_ = true;\n  enable_per_stream_idle_timeout_ = true;\n\n  auto response = setupPerStreamIdleTimeoutTest(\"POST\");\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n\n  waitForTimeout(*response);\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_FALSE(response->complete());\n  EXPECT_NE(\"request timeout\", response->body());\n}\n\nTEST_P(IdleTimeoutIntegrationTest, RequestTimeoutIsNotDisarmedByEncode100ContinueHeaders) {\n  enable_request_timeout_ = true;\n\n  auto response = setupPerStreamIdleTimeoutTest(\"POST\");\n  upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n\n  waitForTimeout(*response, \"downstream_rq_timeout\");\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"408\", response->headers().getStatusValue());\n  EXPECT_EQ(\"request timeout\", response->body());\n}\n\n// TODO(auni53) create a test filter that hangs and does not send data upstream, which would\n// trigger a configured request_timer\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration.h",
    "content": "#pragma once\n// NOLINT(namespace-envoy)\n#include \"test/test_common/network_utility.h\"\n\n#include \"base_integration_test.h\"\n#include \"integration_stream_decoder.h\"\n#include \"integration_tcp_client.h\"\n"
  },
  {
    "path": "test/integration/integration_admin_test.cc",
    "content": "#include \"test/integration/integration_admin_test.h\"\n\n#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/config/api_version.h\"\n#include \"common/profiler/profiler.h\"\n#include \"common/stats/histogram_impl.h\"\n#include \"common/stats/stats_matcher_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n#include \"spdlog/spdlog.h\"\n\nusing testing::Eq;\nusing testing::HasSubstr;\nusing testing::Not;\n\nnamespace Envoy {\n\nINSTANTIATE_TEST_SUITE_P(Protocols, IntegrationAdminTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2},\n                             {FakeHttpConnection::Type::HTTP1})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(IntegrationAdminTest, HealthCheck) {\n  initialize();\n\n  BufferingStreamDecoderPtr response;\n  EXPECT_EQ(\"200\", request(\"http\", \"POST\", \"/healthcheck\", response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/healthcheck/fail\", response));\n  EXPECT_EQ(\"503\", request(\"http\", \"GET\", \"/healthcheck\", response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/healthcheck/ok\", response));\n  EXPECT_EQ(\"200\", request(\"http\", \"GET\", \"/healthcheck\", response));\n}\n\nTEST_P(IntegrationAdminTest, HealthCheckWithoutServerStats) {\n  envoy::config::metrics::v3::StatsMatcher stats_matcher;\n  stats_matcher.mutable_exclusion_list()->add_patterns()->set_prefix(\"server.\");\n  initialize(stats_matcher);\n\n  BufferingStreamDecoderPtr response;\n  EXPECT_EQ(\"200\", request(\"http\", \"POST\", \"/healthcheck\", response));\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats\", response));\n  EXPECT_THAT(response->body(), Not(HasSubstr(\"server.\")));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/healthcheck/fail\", response));\n  EXPECT_EQ(\"503\", request(\"http\", \"GET\", \"/healthcheck\", response));\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats\", response));\n  EXPECT_THAT(response->body(), Not(HasSubstr(\"server.\")));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/healthcheck/ok\", response));\n  EXPECT_EQ(\"200\", request(\"http\", \"GET\", \"/healthcheck\", response));\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats\", response));\n  EXPECT_THAT(response->body(), Not(HasSubstr(\"server.\")));\n}\n\nTEST_P(IntegrationAdminTest, HealthCheckWithBufferFilter) {\n  config_helper_.addFilter(ConfigHelper::defaultBufferFilter());\n  initialize();\n\n  BufferingStreamDecoderPtr response;\n  EXPECT_EQ(\"200\", request(\"http\", \"GET\", \"/healthcheck\", response));\n}\n\nTEST_P(IntegrationAdminTest, AdminLogging) {\n  initialize();\n\n  BufferingStreamDecoderPtr response;\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/logging\", response));\n\n  // Bad level\n  EXPECT_EQ(\"404\", request(\"admin\", \"POST\", \"/logging?level=blah\", response));\n\n  // Bad logger\n  EXPECT_EQ(\"404\", request(\"admin\", \"POST\", \"/logging?blah=info\", response));\n\n  // This is going to stomp over custom log levels that are set on the command line.\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/logging?level=warning\", response));\n  for (const Logger::Logger& logger : Logger::Registry::loggers()) {\n    EXPECT_EQ(\"warning\", logger.levelString());\n  }\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/logging?assert=trace\", response));\n  EXPECT_EQ(spdlog::level::trace, Logger::Registry::getLog(Logger::Id::assert).level());\n\n  spdlog::string_view_t level_name = spdlog::level::level_string_views[default_log_level_];\n  EXPECT_EQ(\"200\",\n            request(\"admin\", \"POST\", fmt::format(\"/logging?level={}\", level_name), response));\n  for (const Logger::Logger& logger : Logger::Registry::loggers()) {\n    EXPECT_EQ(level_name, logger.levelString());\n  }\n}\n\nnamespace {\n\nstd::string ContentType(const BufferingStreamDecoderPtr& response) {\n  const Http::HeaderEntry* entry = response->headers().ContentType();\n  if (entry == nullptr) {\n    return \"(null)\";\n  }\n  return std::string(entry->value().getStringView());\n}\n\n} // namespace\n\nTEST_P(IntegrationAdminTest, Admin) {\n  initialize();\n\n  BufferingStreamDecoderPtr response;\n  EXPECT_EQ(\"404\", request(\"admin\", \"GET\", \"/notfound\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_NE(std::string::npos, response->body().find(\"invalid path. admin commands are:\"))\n      << response->body();\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/help\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_NE(std::string::npos, response->body().find(\"admin commands are:\")) << response->body();\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/\", response));\n  EXPECT_EQ(\"text/html; charset=UTF-8\", ContentType(response));\n  EXPECT_NE(std::string::npos, response->body().find(\"<title>Envoy Admin</title>\"))\n      << response->body();\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/server_info\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/ready\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  // Our first attempt to get recent lookups will get the error message as they\n  // are off by default.\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats/recentlookups\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_THAT(response->body(), testing::HasSubstr(\"Lookup tracking is not enabled\"));\n\n  // Now enable recent-lookups tracking and check that we get a count.\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/stats/recentlookups/enable\", response));\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats/recentlookups\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_TRUE(absl::StartsWith(response->body(), \"   Count Lookup\\n\")) << response->body();\n  EXPECT_LT(28, response->body().size());\n\n  // Now disable recent-lookups tracking and check that we get the error again.\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/stats/recentlookups/disable\", response));\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats/recentlookups\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_THAT(response->body(), testing::HasSubstr(\"Lookup tracking is not enabled\"));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?usedonly\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  // Testing a filter with no matches\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?filter=foo\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  // Testing a filter with matches\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?filter=server\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?filter=server&usedonly\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?format=json&usedonly\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  validateStatsJson(response->body(), 0);\n\n  EXPECT_EQ(\"404\", request(\"admin\", \"GET\", \"/stats?format=blah\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?format=json\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  validateStatsJson(response->body(), 1);\n\n  // Filtering stats by a regex with one match should return just that match.\n  EXPECT_EQ(\"200\",\n            request(\"admin\", \"GET\", \"/stats?format=json&filter=^server\\\\.version$\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  validateStatsJson(response->body(), 0);\n  EXPECT_THAT(\"{\\\"stats\\\":[{\\\"name\\\":\\\"server.version\\\",\\\"value\\\":0}]}\",\n              JsonStringEq(response->body()));\n\n  // Filtering stats by a non-full-string regex should also return just that match.\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?format=json&filter=server\\\\.version\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  validateStatsJson(response->body(), 0);\n  EXPECT_THAT(\"{\\\"stats\\\":[{\\\"name\\\":\\\"server.version\\\",\\\"value\\\":0}]}\",\n              JsonStringEq(response->body()));\n\n  // Filtering stats by a regex with no matches (\".*not_intended_to_appear.*\") should return a\n  // valid, empty, stats array.\n  EXPECT_EQ(\"200\",\n            request(\"admin\", \"GET\", \"/stats?format=json&filter=not_intended_to_appear\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  validateStatsJson(response->body(), 0);\n  EXPECT_THAT(response->body(), Eq(\"{\\\"stats\\\":[]}\"));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats?format=prometheus\", response));\n  EXPECT_THAT(\n      response->body(),\n      HasSubstr(\"envoy_http_downstream_rq_xx{envoy_response_code_class=\\\"4\\\",envoy_http_conn_\"\n                \"manager_prefix=\\\"admin\\\"} 2\\n\"));\n  EXPECT_THAT(response->body(), HasSubstr(\"# TYPE envoy_http_downstream_rq_xx counter\\n\"));\n  EXPECT_THAT(\n      response->body(),\n      HasSubstr(\"envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class=\\\"4\\\",\"\n                \"envoy_http_conn_manager_prefix=\\\"admin\\\"} 2\\n\"));\n  EXPECT_THAT(response->body(), HasSubstr(\"# TYPE envoy_cluster_upstream_cx_active gauge\\n\"));\n  EXPECT_THAT(response->body(),\n              HasSubstr(\"envoy_cluster_upstream_cx_active{envoy_cluster_name=\\\"cluster_0\\\"} 0\\n\"));\n\n  // Test that a specific bucket config is applied. Buckets 1-4 (inclusive) are set in initialize().\n  for (int i = 1; i <= 4; i++) {\n    EXPECT_THAT(\n        response->body(),\n        HasSubstr(fmt::format(\"envoy_cluster_upstream_cx_connect_ms_bucket{{envoy_cluster_name=\"\n                              \"\\\"cluster_0\\\",le=\\\"{}\\\"}} 0\\n\",\n                              i)));\n  }\n\n  // Test that other histograms use the default buckets.\n  for (double bucket : Stats::HistogramSettingsImpl::defaultBuckets()) {\n    EXPECT_THAT(\n        response->body(),\n        HasSubstr(fmt::format(\"envoy_cluster_upstream_cx_length_ms_bucket{{envoy_cluster_name=\"\n                              \"\\\"cluster_0\\\",le=\\\"{0:.32g}\\\"}} 0\\n\",\n                              bucket)));\n  }\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats/prometheus\", response));\n  EXPECT_THAT(\n      response->body(),\n      HasSubstr(\"envoy_http_downstream_rq_xx{envoy_response_code_class=\\\"4\\\",envoy_http_conn_\"\n                \"manager_prefix=\\\"admin\\\"} 2\\n\"));\n  EXPECT_THAT(response->body(), HasSubstr(\"# TYPE envoy_http_downstream_rq_xx counter\\n\"));\n  EXPECT_THAT(\n      response->body(),\n      HasSubstr(\"envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class=\\\"4\\\",\"\n                \"envoy_http_conn_manager_prefix=\\\"admin\\\"} 2\\n\"));\n  EXPECT_THAT(response->body(), HasSubstr(\"# TYPE envoy_cluster_upstream_cx_active gauge\\n\"));\n  EXPECT_THAT(response->body(),\n              HasSubstr(\"envoy_cluster_upstream_cx_active{envoy_cluster_name=\\\"cluster_0\\\"} 0\\n\"));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/clusters\", response));\n  EXPECT_THAT(response->body(), HasSubstr(\"added_via_api\"));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/clusters?format=json\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  EXPECT_NO_THROW(Json::Factory::loadFromString(response->body()));\n\n  EXPECT_EQ(\"400\", request(\"admin\", \"POST\", \"/cpuprofiler\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/hot_restart_version\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/reset_counters\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/stats/recentlookups/enable\", response));\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/stats/recentlookups/clear\", response));\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/stats/recentlookups\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n\n  switch (GetParam().downstream_protocol) {\n  case Http::CodecClient::Type::HTTP1:\n    EXPECT_EQ(\"   Count Lookup\\n\"\n              \"\\n\"\n              \"total: 0\\n\",\n              response->body());\n    break;\n  case Http::CodecClient::Type::HTTP2:\n    EXPECT_EQ(\"   Count Lookup\\n\"\n              \"\\n\"\n              \"total: 0\\n\",\n              response->body());\n    break;\n  case Http::CodecClient::Type::HTTP3:\n    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n  }\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/certs\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/runtime\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/runtime_modify?foo=bar&foo1=bar1\", response));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/runtime?format=json\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n\n  Json::ObjectSharedPtr json = Json::Factory::loadFromString(response->body());\n  auto entries = json->getObject(\"entries\");\n  auto foo_obj = entries->getObject(\"foo\");\n  EXPECT_EQ(\"bar\", foo_obj->getString(\"final_value\"));\n  auto foo1_obj = entries->getObject(\"foo1\");\n  EXPECT_EQ(\"bar1\", foo1_obj->getString(\"final_value\"));\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/listeners\", response));\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  auto listeners = test_server_->server().listenerManager().listeners();\n  auto listener_it = listeners.cbegin();\n  for (; listener_it != listeners.end(); ++listener_it) {\n    EXPECT_THAT(response->body(),\n                HasSubstr(fmt::format(\n                    \"{}::{}\", listener_it->get().name(),\n                    listener_it->get().listenSocketFactory().localAddress()->asString())));\n  }\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/listeners?format=json\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n\n  json = Json::Factory::loadFromString(response->body());\n  std::vector<Json::ObjectSharedPtr> listener_info = json->getObjectArray(\"listener_statuses\");\n  auto listener_info_it = listener_info.cbegin();\n  listeners = test_server_->server().listenerManager().listeners();\n  listener_it = listeners.cbegin();\n  for (; listener_info_it != listener_info.end() && listener_it != listeners.end();\n       ++listener_info_it, ++listener_it) {\n    auto local_address = (*listener_info_it)->getObject(\"local_address\");\n    auto socket_address = local_address->getObject(\"socket_address\");\n    EXPECT_EQ(listener_it->get().listenSocketFactory().localAddress()->ip()->addressAsString(),\n              socket_address->getString(\"address\"));\n    EXPECT_EQ(listener_it->get().listenSocketFactory().localAddress()->ip()->port(),\n              socket_address->getInteger(\"port_value\"));\n  }\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/config_dump\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  json = Json::Factory::loadFromString(response->body());\n  size_t index = 0;\n  const std::string expected_types[] = {\"type.googleapis.com/envoy.admin.v3.BootstrapConfigDump\",\n                                        \"type.googleapis.com/envoy.admin.v3.ClustersConfigDump\",\n                                        \"type.googleapis.com/envoy.admin.v3.ListenersConfigDump\",\n                                        \"type.googleapis.com/envoy.admin.v3.ScopedRoutesConfigDump\",\n                                        \"type.googleapis.com/envoy.admin.v3.RoutesConfigDump\",\n                                        \"type.googleapis.com/envoy.admin.v3.SecretsConfigDump\"};\n\n  for (const Json::ObjectSharedPtr& obj_ptr : json->getObjectArray(\"configs\")) {\n    EXPECT_TRUE(expected_types[index].compare(obj_ptr->getString(\"@type\")) == 0);\n    index++;\n  }\n\n  // Validate we can parse as proto.\n  envoy::admin::v3::ConfigDump config_dump;\n  TestUtility::loadFromJson(response->body(), config_dump);\n  EXPECT_EQ(6, config_dump.configs_size());\n\n  // .. and that we can unpack one of the entries.\n  envoy::admin::v3::RoutesConfigDump route_config_dump;\n  config_dump.configs(4).UnpackTo(&route_config_dump);\n  envoy::config::route::v3::RouteConfiguration route_config;\n  EXPECT_TRUE(route_config_dump.static_route_configs(0).route_config().UnpackTo(&route_config));\n  EXPECT_EQ(\"route_config_0\", route_config.name());\n\n  envoy::admin::v3::SecretsConfigDump secret_config_dump;\n  config_dump.configs(5).UnpackTo(&secret_config_dump);\n  EXPECT_EQ(\"secret_static_0\", secret_config_dump.static_secrets(0).name());\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/config_dump?include_eds\", response));\n  EXPECT_EQ(\"application/json\", ContentType(response));\n  json = Json::Factory::loadFromString(response->body());\n  index = 0;\n  const std::string expected_types_eds[] = {\n      \"type.googleapis.com/envoy.admin.v3.BootstrapConfigDump\",\n      \"type.googleapis.com/envoy.admin.v3.ClustersConfigDump\",\n      \"type.googleapis.com/envoy.admin.v3.EndpointsConfigDump\",\n      \"type.googleapis.com/envoy.admin.v3.ListenersConfigDump\",\n      \"type.googleapis.com/envoy.admin.v3.ScopedRoutesConfigDump\",\n      \"type.googleapis.com/envoy.admin.v3.RoutesConfigDump\",\n      \"type.googleapis.com/envoy.admin.v3.SecretsConfigDump\"};\n\n  for (const Json::ObjectSharedPtr& obj_ptr : json->getObjectArray(\"configs\")) {\n    EXPECT_TRUE(expected_types_eds[index].compare(obj_ptr->getString(\"@type\")) == 0);\n    index++;\n  }\n\n  // Validate we can parse as proto.\n  envoy::admin::v3::ConfigDump config_dump_with_eds;\n  TestUtility::loadFromJson(response->body(), config_dump_with_eds);\n  EXPECT_EQ(7, config_dump_with_eds.configs_size());\n\n  // Validate that the \"inboundonly\" does not stop the default listener.\n  response = IntegrationUtil::makeSingleRequest(lookupPort(\"admin\"), \"POST\",\n                                                \"/drain_listeners?inboundonly\", \"\",\n                                                downstreamProtocol(), version_);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_EQ(\"OK\\n\", response->body());\n\n  // Validate that the listener stopped stat is not used and still zero.\n  EXPECT_FALSE(test_server_->counter(\"listener_manager.listener_stopped\")->used());\n  EXPECT_EQ(0, test_server_->counter(\"listener_manager.listener_stopped\")->value());\n\n  // Now validate that the drain_listeners stops the listeners.\n  response = IntegrationUtil::makeSingleRequest(lookupPort(\"admin\"), \"POST\", \"/drain_listeners\", \"\",\n                                                downstreamProtocol(), version_);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_EQ(\"OK\\n\", response->body());\n\n  test_server_->waitForCounterEq(\"listener_manager.listener_stopped\", 1);\n}\n\n// Validates that the \"inboundonly\" drains inbound listeners.\nTEST_P(IntegrationAdminTest, AdminDrainInboundOnly) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* inbound_listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    inbound_listener->set_traffic_direction(envoy::config::core::v3::INBOUND);\n    inbound_listener->set_name(\"inbound_0\");\n  });\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"admin\"), \"POST\", \"/drain_listeners?inboundonly\", \"\", downstreamProtocol(),\n      version_);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"text/plain; charset=UTF-8\", ContentType(response));\n  EXPECT_EQ(\"OK\\n\", response->body());\n\n  // Validate that the inbound listener has been stopped.\n  test_server_->waitForCounterEq(\"listener_manager.listener_stopped\", 1);\n}\n\nTEST_P(IntegrationAdminTest, AdminOnDestroyCallbacks) {\n  initialize();\n  bool test = true;\n\n  // add an handler which adds a callback to the list of callback called when connection is dropped.\n  auto callback = [&test](absl::string_view, Http::HeaderMap&, Buffer::Instance&,\n                          Server::AdminStream& admin_stream) -> Http::Code {\n    auto on_destroy_callback = [&test]() { test = false; };\n\n    // Add the on_destroy_callback to the admin_filter list of callbacks.\n    admin_stream.addOnDestroyCallback(std::move(on_destroy_callback));\n    return Http::Code::OK;\n  };\n\n  EXPECT_TRUE(\n      test_server_->server().admin().addHandler(\"/foo/bar\", \"hello\", callback, true, false));\n\n  // As part of the request, on destroy() should be called and the on_destroy_callback invoked.\n  BufferingStreamDecoderPtr response;\n  EXPECT_EQ(\"200\", request(\"admin\", \"GET\", \"/foo/bar\", response));\n  // Check that the added callback was invoked.\n  EXPECT_EQ(test, false);\n\n  // Small test to cover new statsFlushInterval() on Instance.h.\n  EXPECT_EQ(test_server_->server().statsFlushInterval(), std::chrono::milliseconds(5000));\n}\n\nTEST_P(IntegrationAdminTest, AdminCpuProfilerStart) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* admin = bootstrap.mutable_admin();\n    admin->set_profile_path(TestEnvironment::temporaryPath(\"/envoy.prof\"));\n  });\n\n  initialize();\n  BufferingStreamDecoderPtr response;\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/cpuprofiler?enable=y\", response));\n#else\n  EXPECT_EQ(\"500\", request(\"admin\", \"POST\", \"/cpuprofiler?enable=y\", response));\n#endif\n\n  EXPECT_EQ(\"200\", request(\"admin\", \"POST\", \"/cpuprofiler?enable=n\", response));\n}\n\nclass IntegrationAdminIpv4Ipv6Test : public testing::Test, public HttpIntegrationTest {\npublic:\n  IntegrationAdminIpv4Ipv6Test()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, Network::Address::IpVersion::v4) {}\n\n  void initialize() override {\n    config_helper_.addConfigModifier(\n        [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          auto* socket_address =\n              bootstrap.mutable_admin()->mutable_address()->mutable_socket_address();\n          socket_address->set_ipv4_compat(true);\n          socket_address->set_address(\"::\");\n        });\n    HttpIntegrationTest::initialize();\n  }\n};\n\n// Verify an IPv4 client can connect to the admin interface listening on :: when\n// IPv4 compat mode is enabled.\nTEST_F(IntegrationAdminIpv4Ipv6Test, Ipv4Ipv6Listen) {\n  if (TestEnvironment::shouldRunTestForIpVersion(Network::Address::IpVersion::v4) &&\n      TestEnvironment::shouldRunTestForIpVersion(Network::Address::IpVersion::v6)) {\n    initialize();\n    BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n        lookupPort(\"admin\"), \"GET\", \"/server_info\", \"\", downstreamProtocol(), version_);\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n\n// Testing the behavior of StatsMatcher, which allows/denies the  instantiation of stats based on\n// restrictions on their names.\n//\n// Note: using 'Event::TestUsingSimulatedTime' appears to conflict with LDS in\n// StatsMatcherIntegrationTest.IncludeExact, which manifests in a coverage test\n// crash, which is really difficult to debug. See #7215. It's possible this is\n// due to a bad interaction between the wait-for constructs in the integration\n// test framework with sim-time.\nclass StatsMatcherIntegrationTest\n    : public testing::Test,\n      public HttpIntegrationTest,\n      public testing::WithParamInterface<Network::Address::IpVersion> {\npublic:\n  StatsMatcherIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void initialize() override {\n    config_helper_.addConfigModifier(\n        [this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          *bootstrap.mutable_stats_config()->mutable_stats_matcher() = stats_matcher_;\n        });\n    HttpIntegrationTest::initialize();\n  }\n  void makeRequest() {\n    response_ = IntegrationUtil::makeSingleRequest(lookupPort(\"admin\"), \"GET\", \"/stats\", \"\",\n                                                   downstreamProtocol(), version_);\n    ASSERT_TRUE(response_->complete());\n    EXPECT_EQ(\"200\", response_->headers().getStatusValue());\n  }\n\n  BufferingStreamDecoderPtr response_;\n  envoy::config::metrics::v3::StatsMatcher stats_matcher_;\n};\nINSTANTIATE_TEST_SUITE_P(IpVersions, StatsMatcherIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verify that StatsMatcher prevents the printing of uninstantiated stats.\nTEST_P(StatsMatcherIntegrationTest, ExcludePrefixServerDot) {\n  stats_matcher_.mutable_exclusion_list()->add_patterns()->set_prefix(\"server.\");\n  initialize();\n  makeRequest();\n  EXPECT_THAT(response_->body(), Not(HasSubstr(\"server.\")));\n}\n\nTEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(ExcludeRequests)) {\n  stats_matcher_.mutable_exclusion_list()->add_patterns()->set_hidden_envoy_deprecated_regex(\n      \".*requests.*\");\n  initialize();\n  makeRequest();\n  EXPECT_THAT(response_->body(), Not(HasSubstr(\"requests\")));\n}\n\nTEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(ExcludeExact)) {\n  stats_matcher_.mutable_exclusion_list()->add_patterns()->set_exact(\"server.concurrency\");\n  initialize();\n  makeRequest();\n  EXPECT_THAT(response_->body(), Not(HasSubstr(\"server.concurrency\")));\n}\n\nTEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(ExcludeMultipleExact)) {\n  stats_matcher_.mutable_exclusion_list()->add_patterns()->set_exact(\"server.concurrency\");\n  stats_matcher_.mutable_exclusion_list()->add_patterns()->set_hidden_envoy_deprecated_regex(\n      \".*live\");\n  initialize();\n  makeRequest();\n  EXPECT_THAT(response_->body(), Not(HasSubstr(\"server.concurrency\")));\n  EXPECT_THAT(response_->body(), Not(HasSubstr(\"server.live\")));\n}\n\n// TODO(ambuc): Find a cleaner way to test this. This test has an unfortunate compromise:\n// `listener_manager.listener_create_success` must be instantiated, because BaseIntegrationTest\n// blocks on its creation (see waitForCounterGe and the suite of waitFor* functions).\n// If this invariant is changed, this test must be rewritten.\nTEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(IncludeExact)) {\n  // Stats matching does not play well with LDS, at least in test. See #7215.\n  use_lds_ = false;\n  stats_matcher_.mutable_inclusion_list()->add_patterns()->set_exact(\n      \"listener_manager.listener_create_success\");\n  initialize();\n  makeRequest();\n  EXPECT_EQ(response_->body(), \"listener_manager.listener_create_success: 1\\n\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration_admin_test.h",
    "content": "#pragma once\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/json/json_loader.h\"\n\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass IntegrationAdminTest : public HttpProtocolIntegrationTest {\npublic:\n  void initialize() override {\n    config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter());\n    config_helper_.addConfigModifier(\n        [](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          auto& hist_settings =\n              *bootstrap.mutable_stats_config()->mutable_histogram_bucket_settings();\n          envoy::config::metrics::v3::HistogramBucketSettings* setting = hist_settings.Add();\n          setting->mutable_match()->set_suffix(\"upstream_cx_connect_ms\");\n          setting->mutable_buckets()->Add(1);\n          setting->mutable_buckets()->Add(2);\n          setting->mutable_buckets()->Add(3);\n          setting->mutable_buckets()->Add(4);\n        });\n    HttpIntegrationTest::initialize();\n  }\n\n  void initialize(envoy::config::metrics::v3::StatsMatcher stats_matcher) {\n    config_helper_.addConfigModifier(\n        [stats_matcher](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          *bootstrap.mutable_stats_config()->mutable_stats_matcher() = stats_matcher;\n        });\n    initialize();\n  }\n\n  absl::string_view request(const std::string port_key, const std::string method,\n                            const std::string endpoint, BufferingStreamDecoderPtr& response) {\n    response = IntegrationUtil::makeSingleRequest(lookupPort(port_key), method, endpoint, \"\",\n                                                  downstreamProtocol(), version_);\n    EXPECT_TRUE(response->complete());\n    return response->headers().getStatusValue();\n  }\n\n  /**\n   * Validates that the passed in string conforms to output of stats in JSON format.\n   */\n  void validateStatsJson(const std::string& stats_json, const uint64_t expected_hist_count) {\n    Json::ObjectSharedPtr statsjson = Json::Factory::loadFromString(stats_json);\n    EXPECT_TRUE(statsjson->hasObject(\"stats\"));\n    uint64_t histogram_count = 0;\n    for (const Json::ObjectSharedPtr& obj_ptr : statsjson->getObjectArray(\"stats\")) {\n      if (obj_ptr->hasObject(\"histograms\")) {\n        histogram_count++;\n        const Json::ObjectSharedPtr& histograms_ptr = obj_ptr->getObject(\"histograms\");\n        // Validate that both supported_quantiles and computed_quantiles are present in JSON.\n        EXPECT_TRUE(histograms_ptr->hasObject(\"supported_quantiles\"));\n        EXPECT_TRUE(histograms_ptr->hasObject(\"computed_quantiles\"));\n\n        const std::vector<Json::ObjectSharedPtr>& computed_quantiles =\n            histograms_ptr->getObjectArray(\"computed_quantiles\");\n        EXPECT_GT(computed_quantiles.size(), 0);\n\n        // Validate that each computed_quantile has name and value objects.\n        EXPECT_TRUE(computed_quantiles[0]->hasObject(\"name\"));\n        EXPECT_TRUE(computed_quantiles[0]->hasObject(\"values\"));\n\n        // Validate that supported and computed quantiles are of the same size.\n        EXPECT_EQ(histograms_ptr->getObjectArray(\"supported_quantiles\").size(),\n                  computed_quantiles[0]->getObjectArray(\"values\").size());\n      }\n    }\n\n    // Validate that the stats JSON has expected histograms element.\n    EXPECT_EQ(expected_hist_count, histogram_count);\n  }\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration_stream_decoder.cc",
    "content": "#include \"test/integration/integration_stream_decoder.h\"\n\n#include <algorithm>\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <utility>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nIntegrationStreamDecoder::IntegrationStreamDecoder(Event::Dispatcher& dispatcher)\n    : dispatcher_(dispatcher) {}\n\nvoid IntegrationStreamDecoder::waitForContinueHeaders() {\n  if (!continue_headers_.get()) {\n    waiting_for_continue_headers_ = true;\n    dispatcher_.run(Event::Dispatcher::RunType::Block);\n  }\n}\n\nvoid IntegrationStreamDecoder::waitForHeaders() {\n  if (!headers_.get()) {\n    waiting_for_headers_ = true;\n    dispatcher_.run(Event::Dispatcher::RunType::Block);\n  }\n}\n\nvoid IntegrationStreamDecoder::waitForBodyData(uint64_t size) {\n  ASSERT(body_data_waiting_length_ == 0);\n  body_data_waiting_length_ = size;\n  body_data_waiting_length_ -=\n      std::min(body_data_waiting_length_, static_cast<uint64_t>(body_.size()));\n  if (body_data_waiting_length_ > 0) {\n    dispatcher_.run(Event::Dispatcher::RunType::Block);\n  }\n}\n\nvoid IntegrationStreamDecoder::waitForEndStream() {\n  if (!saw_end_stream_) {\n    waiting_for_end_stream_ = true;\n    dispatcher_.run(Event::Dispatcher::RunType::Block);\n  }\n}\n\nvoid IntegrationStreamDecoder::waitForReset() {\n  if (!saw_reset_) {\n    waiting_for_reset_ = true;\n    dispatcher_.run(Event::Dispatcher::RunType::Block);\n  }\n}\n\nvoid IntegrationStreamDecoder::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) {\n  continue_headers_ = std::move(headers);\n  if (waiting_for_continue_headers_) {\n    dispatcher_.exit();\n  }\n}\n\nvoid IntegrationStreamDecoder::decodeHeaders(Http::ResponseHeaderMapPtr&& headers,\n                                             bool end_stream) {\n  saw_end_stream_ = end_stream;\n  headers_ = std::move(headers);\n  if ((end_stream && waiting_for_end_stream_) || waiting_for_headers_) {\n    dispatcher_.exit();\n  }\n}\n\nvoid IntegrationStreamDecoder::decodeData(Buffer::Instance& data, bool end_stream) {\n  saw_end_stream_ = end_stream;\n  body_ += data.toString();\n\n  if (end_stream && waiting_for_end_stream_) {\n    dispatcher_.exit();\n  } else if (body_data_waiting_length_ > 0) {\n    body_data_waiting_length_ -= std::min(body_data_waiting_length_, data.length());\n    if (body_data_waiting_length_ == 0) {\n      dispatcher_.exit();\n    }\n  }\n}\n\nvoid IntegrationStreamDecoder::decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) {\n  saw_end_stream_ = true;\n  trailers_ = std::move(trailers);\n  if (waiting_for_end_stream_) {\n    dispatcher_.exit();\n  }\n}\n\nvoid IntegrationStreamDecoder::decodeMetadata(Http::MetadataMapPtr&& metadata_map) {\n  // Combines newly received metadata with the existing metadata.\n  for (const auto& metadata : *metadata_map) {\n    duplicated_metadata_key_count_[metadata.first]++;\n    metadata_map_->insert(metadata);\n  }\n}\n\nvoid IntegrationStreamDecoder::onResetStream(Http::StreamResetReason reason, absl::string_view) {\n  saw_reset_ = true;\n  reset_reason_ = reason;\n  if (waiting_for_reset_) {\n    dispatcher_.exit();\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration_stream_decoder.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/http/metadata_interface.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n\nnamespace Envoy {\n/**\n * Stream decoder wrapper used during integration testing.\n */\nclass IntegrationStreamDecoder : public Http::ResponseDecoder, public Http::StreamCallbacks {\npublic:\n  IntegrationStreamDecoder(Event::Dispatcher& dispatcher);\n\n  const std::string& body() { return body_; }\n  bool complete() { return saw_end_stream_; }\n  bool reset() { return saw_reset_; }\n  Http::StreamResetReason resetReason() { return reset_reason_; }\n  const Http::ResponseHeaderMap* continueHeaders() { return continue_headers_.get(); }\n  const Http::ResponseHeaderMap& headers() { return *headers_; }\n  const Http::ResponseTrailerMapPtr& trailers() { return trailers_; }\n  const Http::MetadataMap& metadataMap() { return *metadata_map_; }\n  uint64_t keyCount(std::string key) { return duplicated_metadata_key_count_[key]; }\n  void waitForContinueHeaders();\n  void waitForHeaders();\n  // This function waits until body_ has at least size bytes in it (it might have more). clearBody()\n  // can be used if the previous body data is not relevant and the test wants to wait for a specific\n  // amount of new data without considering the existing body size.\n  void waitForBodyData(uint64_t size);\n  void waitForEndStream();\n  void waitForReset();\n  void clearBody() { body_.clear(); }\n\n  // Http::StreamDecoder\n  void decodeData(Buffer::Instance& data, bool end_stream) override;\n  void decodeMetadata(Http::MetadataMapPtr&& metadata_map) override;\n\n  // Http::ResponseDecoder\n  void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) override;\n  void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override;\n  void decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) override;\n\n  // Http::StreamCallbacks\n  void onResetStream(Http::StreamResetReason reason,\n                     absl::string_view transport_failure_reason) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n  Http::ResponseHeaderMapPtr continue_headers_;\n  Http::ResponseHeaderMapPtr headers_;\n  Http::ResponseTrailerMapPtr trailers_;\n  Http::MetadataMapPtr metadata_map_{new Http::MetadataMap()};\n  absl::node_hash_map<std::string, uint64_t> duplicated_metadata_key_count_;\n  bool waiting_for_end_stream_{};\n  bool saw_end_stream_{};\n  std::string body_;\n  uint64_t body_data_waiting_length_{};\n  bool waiting_for_reset_{};\n  bool waiting_for_continue_headers_{};\n  bool waiting_for_headers_{};\n  bool saw_reset_{};\n  Http::StreamResetReason reset_reason_{};\n};\n\nusing IntegrationStreamDecoderPtr = std::unique_ptr<IntegrationStreamDecoder>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration_tcp_client.cc",
    "content": "#include \"test/integration/integration_tcp_client.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n#include <type_traits>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/time.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/integration/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/test_time_system.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nusing ::testing::_;\nusing ::testing::AnyNumber;\nusing ::testing::AssertionFailure;\nusing ::testing::AssertionResult;\nusing ::testing::AssertionSuccess;\nusing ::testing::AtLeast;\nusing ::testing::Invoke;\nusing ::testing::NiceMock;\n\nIntegrationTcpClient::IntegrationTcpClient(\n    Event::Dispatcher& dispatcher, MockBufferFactory& factory, uint32_t port,\n    Network::Address::IpVersion version, bool enable_half_close,\n    const Network::ConnectionSocket::OptionsSharedPtr& options)\n    : payload_reader_(new WaitForPayloadReader(dispatcher)),\n      callbacks_(new ConnectionCallbacks(*this)) {\n  EXPECT_CALL(factory, create_(_, _, _))\n      .WillOnce(Invoke([&](std::function<void()> below_low, std::function<void()> above_high,\n                           std::function<void()> above_overflow) -> Buffer::Instance* {\n        client_write_buffer_ =\n            new NiceMock<MockWatermarkBuffer>(below_low, above_high, above_overflow);\n        return client_write_buffer_;\n      }));\n\n  connection_ = dispatcher.createClientConnection(\n      Network::Utility::resolveUrl(\n          fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version), port)),\n      Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), options);\n\n  ON_CALL(*client_write_buffer_, drain(_))\n      .WillByDefault(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackDrains));\n  EXPECT_CALL(*client_write_buffer_, drain(_)).Times(AnyNumber());\n\n  connection_->enableHalfClose(enable_half_close);\n  connection_->addConnectionCallbacks(*callbacks_);\n  connection_->addReadFilter(payload_reader_);\n  connection_->connect();\n}\n\nvoid IntegrationTcpClient::close() { connection_->close(Network::ConnectionCloseType::NoFlush); }\n\nvoid IntegrationTcpClient::waitForData(const std::string& data, bool exact_match) {\n  auto found = payload_reader_->data().find(data);\n  if (found == 0 || (!exact_match && found != std::string::npos)) {\n    return;\n  }\n\n  payload_reader_->set_data_to_wait_for(data, exact_match);\n  connection_->dispatcher().run(Event::Dispatcher::RunType::Block);\n}\n\nAssertionResult IntegrationTcpClient::waitForData(size_t length,\n                                                  std::chrono::milliseconds timeout) {\n  if (payload_reader_->data().size() >= length) {\n    return AssertionSuccess();\n  }\n\n  return payload_reader_->waitForLength(length, timeout);\n}\n\nvoid IntegrationTcpClient::waitForDisconnect(bool ignore_spurious_events) {\n  Event::TimerPtr timeout_timer =\n      connection_->dispatcher().createTimer([this]() -> void { connection_->dispatcher().exit(); });\n  timeout_timer->enableTimer(TestUtility::DefaultTimeout);\n\n  if (ignore_spurious_events) {\n    while (!disconnected_ && timeout_timer->enabled()) {\n      connection_->dispatcher().run(Event::Dispatcher::RunType::Block);\n    }\n  } else {\n    connection_->dispatcher().run(Event::Dispatcher::RunType::Block);\n  }\n  EXPECT_TRUE(disconnected_);\n}\n\nvoid IntegrationTcpClient::waitForHalfClose() {\n  if (payload_reader_->readLastByte()) {\n    return;\n  }\n  connection_->dispatcher().run(Event::Dispatcher::RunType::Block);\n  EXPECT_TRUE(payload_reader_->readLastByte());\n}\n\nvoid IntegrationTcpClient::readDisable(bool disabled) { connection_->readDisable(disabled); }\n\nAssertionResult IntegrationTcpClient::write(const std::string& data, bool end_stream, bool verify,\n                                            std::chrono::milliseconds timeout) {\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  Buffer::OwnedImpl buffer(data);\n  if (verify) {\n    EXPECT_CALL(*client_write_buffer_, move(_));\n    if (!data.empty()) {\n      EXPECT_CALL(*client_write_buffer_, drain(_)).Times(AtLeast(1));\n    }\n  }\n\n  uint64_t bytes_expected = client_write_buffer_->bytesDrained() + data.size();\n\n  connection_->write(buffer, end_stream);\n  do {\n    connection_->dispatcher().run(Event::Dispatcher::RunType::NonBlock);\n    if (client_write_buffer_->bytesDrained() == bytes_expected || disconnected_) {\n      break;\n    }\n  } while (bound.withinBound());\n\n  if (!bound.withinBound()) {\n    return AssertionFailure() << \"Timed out completing write\";\n  } else if (verify && (disconnected_ || client_write_buffer_->bytesDrained() != bytes_expected)) {\n    return AssertionFailure()\n           << \"Failed to complete write or unexpected disconnect. disconnected_: \" << disconnected_\n           << \" bytes_drained: \" << client_write_buffer_->bytesDrained()\n           << \" bytes_expected: \" << bytes_expected;\n  }\n\n  return AssertionSuccess();\n}\n\nvoid IntegrationTcpClient::ConnectionCallbacks::onEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose) {\n    parent_.disconnected_ = true;\n    parent_.connection_->dispatcher().exit();\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration_tcp_client.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstddef>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/network/socket.h\"\n\n#include \"test/integration/utility.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/types/optional.h\"\n#include \"gtest/gtest.h\"\n#include \"gtest/gtest_pred_impl.h\"\n\nnamespace Envoy {\n/**\n * TCP client used during integration testing.\n */\nclass IntegrationTcpClient {\npublic:\n  IntegrationTcpClient(Event::Dispatcher& dispatcher, MockBufferFactory& factory, uint32_t port,\n                       Network::Address::IpVersion version, bool enable_half_close,\n                       const Network::ConnectionSocket::OptionsSharedPtr& options);\n\n  void close();\n  void waitForData(const std::string& data, bool exact_match = true);\n  // wait for at least `length` bytes to be received\n  ABSL_MUST_USE_RESULT AssertionResult\n  waitForData(size_t length, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n  void waitForDisconnect(bool ignore_spurious_events = false);\n  void waitForHalfClose();\n  void readDisable(bool disabled);\n  ABSL_MUST_USE_RESULT AssertionResult\n  write(const std::string& data, bool end_stream = false, bool verify = true,\n        std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);\n  const std::string& data() { return payload_reader_->data(); }\n  bool connected() const { return !disconnected_; }\n  // clear up to the `count` number of bytes of received data\n  void clearData(size_t count = std::string::npos) { payload_reader_->clearData(count); }\n\nprivate:\n  struct ConnectionCallbacks : public Network::ConnectionCallbacks {\n    ConnectionCallbacks(IntegrationTcpClient& parent) : parent_(parent) {}\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override;\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    IntegrationTcpClient& parent_;\n  };\n\n  std::shared_ptr<WaitForPayloadReader> payload_reader_;\n  std::shared_ptr<ConnectionCallbacks> callbacks_;\n  Network::ClientConnectionPtr connection_;\n  bool disconnected_{};\n  MockWatermarkBuffer* client_write_buffer_;\n};\n\nusing IntegrationTcpClientPtr = std::unique_ptr<IntegrationTcpClient>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration_test.cc",
    "content": "#include \"test/integration/integration_test.h\"\n\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/integration/filters/process_context_filter.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing Envoy::Http::Headers;\nusing Envoy::Http::HeaderValueOf;\nusing Envoy::Http::HttpStatusIs;\nusing testing::EndsWith;\nusing testing::HasSubstr;\nusing testing::Not;\n\nnamespace Envoy {\nnamespace {\n\nstd::string normalizeDate(const std::string& s) {\n  const std::regex date_regex(\"date:[^\\r]+\");\n  return std::regex_replace(s, date_regex, \"date: Mon, 01 Jan 2017 00:00:00 GMT\");\n}\n\nvoid setDisallowAbsoluteUrl(\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) {\n  hcm.mutable_http_protocol_options()->mutable_allow_absolute_url()->set_value(false);\n};\n\nvoid setAllowHttp10WithDefaultHost(\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) {\n  hcm.mutable_http_protocol_options()->set_accept_http_10(true);\n  hcm.mutable_http_protocol_options()->set_default_host_for_http_10(\"default.com\");\n}\n\n} // namespace\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, IntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Verify that we gracefully handle an invalid pre-bind socket option when using reuse port.\nTEST_P(IntegrationTest, BadPrebindSocketOptionWithReusePort) {\n  // Reserve a port that we can then use on the integration listener with reuse port.\n  auto addr_socket =\n      Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true);\n  // Do not wait for listeners to start as the listener will fail.\n  defer_listener_finalization_ = true;\n\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    listener->set_reuse_port(true);\n    listener->mutable_address()->mutable_socket_address()->set_port_value(\n        addr_socket.second->localAddress()->ip()->port());\n    auto socket_option = listener->add_socket_options();\n    socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_PREBIND);\n    socket_option->set_level(10000);     // Invalid level.\n    socket_option->set_int_value(10000); // Invalid value.\n  });\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_failure\", 1);\n}\n\n// Verify that we gracefully handle an invalid post-bind socket option when using reuse port.\nTEST_P(IntegrationTest, BadPostbindSocketOptionWithReusePort) {\n  // Reserve a port that we can then use on the integration listener with reuse port.\n  auto addr_socket =\n      Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true);\n  // Do not wait for listeners to start as the listener will fail.\n  defer_listener_finalization_ = true;\n\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    listener->set_reuse_port(true);\n    listener->mutable_address()->mutable_socket_address()->set_port_value(\n        addr_socket.second->localAddress()->ip()->port());\n    auto socket_option = listener->add_socket_options();\n    socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_BOUND);\n    socket_option->set_level(10000);     // Invalid level.\n    socket_option->set_int_value(10000); // Invalid value.\n  });\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_failure\", 1);\n}\n\n// Make sure we have correctly specified per-worker performance stats.\nTEST_P(IntegrationTest, PerWorkerStatsAndBalancing) {\n  concurrency_ = 2;\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    listener->mutable_connection_balance_config()->mutable_exact_balance();\n  });\n  initialize();\n\n  // Per-worker listener stats.\n  auto check_listener_stats = [this](uint64_t cx_active, uint64_t cx_total) {\n    if (GetParam() == Network::Address::IpVersion::v4) {\n      test_server_->waitForGaugeEq(\"listener.127.0.0.1_0.worker_0.downstream_cx_active\", cx_active);\n      test_server_->waitForGaugeEq(\"listener.127.0.0.1_0.worker_1.downstream_cx_active\", cx_active);\n      test_server_->waitForCounterEq(\"listener.127.0.0.1_0.worker_0.downstream_cx_total\", cx_total);\n      test_server_->waitForCounterEq(\"listener.127.0.0.1_0.worker_1.downstream_cx_total\", cx_total);\n    } else {\n      test_server_->waitForGaugeEq(\"listener.[__1]_0.worker_0.downstream_cx_active\", cx_active);\n      test_server_->waitForGaugeEq(\"listener.[__1]_0.worker_1.downstream_cx_active\", cx_active);\n      test_server_->waitForCounterEq(\"listener.[__1]_0.worker_0.downstream_cx_total\", cx_total);\n      test_server_->waitForCounterEq(\"listener.[__1]_0.worker_1.downstream_cx_total\", cx_total);\n    }\n  };\n  check_listener_stats(0, 0);\n\n  // Main thread admin listener stats.\n  test_server_->waitForCounterExists(\"listener.admin.main_thread.downstream_cx_total\");\n\n  // Per-thread watchdog stats.\n  test_server_->waitForCounterExists(\"server.main_thread.watchdog_miss\");\n  test_server_->waitForCounterExists(\"server.worker_0.watchdog_miss\");\n  test_server_->waitForCounterExists(\"server.worker_1.watchdog_miss\");\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  IntegrationCodecClientPtr codec_client2 = makeHttpConnection(lookupPort(\"http\"));\n  check_listener_stats(1, 1);\n\n  codec_client_->close();\n  codec_client2->close();\n  check_listener_stats(0, 1);\n}\n\nTEST_P(IntegrationTest, RouterDirectResponseWithBody) {\n  const std::string body = \"Response body\";\n  const std::string file_path = TestEnvironment::writeStringToFileForTest(\"test_envoy\", body);\n  static const std::string domain(\"direct.example.com\");\n  static const std::string prefix(\"/\");\n  static const Http::Code status(Http::Code::OK);\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        auto* header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_key(\"x-additional-header\");\n        header_value_option->mutable_header()->set_value(\"example-value\");\n        header_value_option->mutable_append()->set_value(false);\n        header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_key(\"content-type\");\n        header_value_option->mutable_header()->set_value(\"text/html\");\n        header_value_option->mutable_append()->set_value(false);\n        // Add a wrong content-length.\n        header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_key(\"content-length\");\n        header_value_option->mutable_header()->set_value(\"2000\");\n        header_value_option->mutable_append()->set_value(false);\n        auto* virtual_host = route_config->add_virtual_hosts();\n        virtual_host->set_name(domain);\n        virtual_host->add_domains(domain);\n        virtual_host->add_routes()->mutable_match()->set_prefix(prefix);\n        virtual_host->mutable_routes(0)->mutable_direct_response()->set_status(\n            static_cast<uint32_t>(status));\n        virtual_host->mutable_routes(0)->mutable_direct_response()->mutable_body()->set_filename(\n            file_path);\n      });\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/\", \"\", downstream_protocol_, version_, \"direct.example.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"example-value\", response->headers()\n                                 .get(Envoy::Http::LowerCaseString(\"x-additional-header\"))\n                                 ->value()\n                                 .getStringView());\n  EXPECT_EQ(\"text/html\", response->headers().getContentTypeValue());\n  // Verify content-length is correct.\n  EXPECT_EQ(fmt::format(\"{}\", body.size()), response->headers().getContentLengthValue());\n  EXPECT_EQ(body, response->body());\n}\n\nTEST_P(IntegrationTest, RouterDirectResponseEmptyBody) {\n  static const std::string domain(\"direct.example.com\");\n  static const std::string prefix(\"/\");\n  static const Http::Code status(Http::Code::OK);\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        auto* header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_key(\"x-additional-header\");\n        header_value_option->mutable_header()->set_value(\"example-value\");\n        header_value_option->mutable_append()->set_value(false);\n        header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_key(\"content-type\");\n        header_value_option->mutable_header()->set_value(\"text/html\");\n        header_value_option->mutable_append()->set_value(false);\n        // Add a wrong content-length.\n        header_value_option = route_config->mutable_response_headers_to_add()->Add();\n        header_value_option->mutable_header()->set_key(\"content-length\");\n        header_value_option->mutable_header()->set_value(\"2000\");\n        header_value_option->mutable_append()->set_value(false);\n        auto* virtual_host = route_config->add_virtual_hosts();\n        virtual_host->set_name(domain);\n        virtual_host->add_domains(domain);\n        virtual_host->add_routes()->mutable_match()->set_prefix(prefix);\n        virtual_host->mutable_routes(0)->mutable_direct_response()->set_status(\n            static_cast<uint32_t>(status));\n      });\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/\", \"\", downstream_protocol_, version_, \"direct.example.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"example-value\", response->headers()\n                                 .get(Envoy::Http::LowerCaseString(\"x-additional-header\"))\n                                 ->value()\n                                 .getStringView());\n  // Content-type header is removed.\n  EXPECT_EQ(nullptr, response->headers().ContentType());\n  // Content-length header is correct.\n  EXPECT_EQ(\"0\", response->headers().getContentLengthValue());\n}\n\nTEST_P(IntegrationTest, ConnectionClose) {\n  config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter());\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response =\n      codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                          {\":path\", \"/healthcheck\"},\n                                                                          {\":authority\", \"host\"},\n                                                                          {\"connection\", \"close\"}});\n  response->waitForEndStream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n}\n\nTEST_P(IntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false, false);\n}\n\nTEST_P(IntegrationTest, RouterRequestAndResponseWithGiantBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false);\n}\n\nTEST_P(IntegrationTest, FlowControlOnAndGiantBody) {\n  config_helper_.setBufferLimits(1024, 1024);\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false);\n}\n\nTEST_P(IntegrationTest, LargeFlowControlOnAndGiantBody) {\n  config_helper_.setBufferLimits(128 * 1024, 128 * 1024);\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false);\n}\n\nTEST_P(IntegrationTest, RouterRequestAndResponseWithBodyAndContentLengthNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false, true);\n}\n\nTEST_P(IntegrationTest, RouterRequestAndResponseWithGiantBodyAndContentLengthNoBuffer) {\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true);\n}\n\nTEST_P(IntegrationTest, FlowControlOnAndGiantBodyWithContentLength) {\n  config_helper_.setBufferLimits(1024, 1024);\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true);\n}\n\nTEST_P(IntegrationTest, LargeFlowControlOnAndGiantBodyWithContentLength) {\n  config_helper_.setBufferLimits(128 * 1024, 128 * 1024);\n  testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true);\n}\n\nTEST_P(IntegrationTest, RouterRequestAndResponseLargeHeaderNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, true);\n}\n\nTEST_P(IntegrationTest, RouterHeaderOnlyRequestAndResponseNoBuffer) {\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\nTEST_P(IntegrationTest, RouterUpstreamDisconnectBeforeRequestcomplete) {\n  testRouterUpstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(IntegrationTest, RouterUpstreamDisconnectBeforeResponseComplete) {\n  testRouterUpstreamDisconnectBeforeResponseComplete();\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/9508\nTEST_P(IntegrationTest, ResponseFramedByConnectionCloseWithReadLimits) {\n  // Set a small buffer limit on the downstream in order to trigger a call to trigger readDisable on\n  // the upstream when proxying the response. Upstream limit needs to be larger so that\n  // RawBufferSocket::doRead reads the response body and detects the upstream close in the same call\n  // stack.\n  config_helper_.setBufferLimits(100000, 1);\n  initialize();\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  // Disable chunk encoding to trigger framing by connection close.\n  upstream_request_->http1StreamEncoderOptions().value().get().disableChunkEncoding();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request_->encodeData(512, true);\n  ASSERT_TRUE(fake_upstream_connection_->close());\n\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n  EXPECT_EQ(512, response->body().size());\n}\n\nTEST_P(IntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {\n  testRouterDownstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(IntegrationTest, RouterDownstreamDisconnectBeforeResponseComplete) {\n  testRouterDownstreamDisconnectBeforeResponseComplete();\n}\n\nTEST_P(IntegrationTest, RouterUpstreamResponseBeforeRequestComplete) {\n  testRouterUpstreamResponseBeforeRequestComplete();\n}\n\nTEST_P(IntegrationTest, EnvoyProxyingEarly100ContinueWithEncoderFilter) {\n  testEnvoyProxying1xx(true, true);\n}\n\nTEST_P(IntegrationTest, EnvoyProxyingLate100ContinueWithEncoderFilter) {\n  testEnvoyProxying1xx(false, true);\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/10923.\nTEST_P(IntegrationTest, EnvoyProxying100ContinueWithDecodeDataPause) {\n  config_helper_.addFilter(R\"EOF(\n  name: stop-iteration-and-continue-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n  )EOF\");\n  testEnvoyProxying1xx(true);\n}\n\n// This is a regression for https://github.com/envoyproxy/envoy/issues/2715 and validates that a\n// pending request is not sent on a connection that has been half-closed.\nTEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    // Ensure we only have one connection upstream, one request active at a time.\n    cluster->mutable_max_requests_per_connection()->set_value(1);\n    auto* circuit_breakers = cluster->mutable_circuit_breakers();\n    circuit_breakers->add_thresholds()->mutable_max_connections()->set_value(1);\n  });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Request 1.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n  waitForNextUpstreamRequest();\n\n  // Request 2.\n  IntegrationCodecClientPtr codec_client2 = makeHttpConnection(lookupPort(\"http\"));\n  auto response2 = codec_client2->makeRequestWithBody(default_request_headers_, 512);\n\n  // Validate one request active, the other pending.\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.upstream_rq_active\", 1);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.upstream_rq_pending_active\", 1);\n\n  // Response 1.\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_total\", 1);\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_200\", 1);\n\n  // Response 2.\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  fake_upstream_connection_.reset();\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(1024, true);\n  response2->waitForEndStream();\n  codec_client2->close();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response2->complete());\n  EXPECT_EQ(\"200\", response2->headers().getStatusValue());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_total\", 2);\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_200\", 2);\n}\n\n// Test hitting the bridge filter with too many response bytes to buffer. Given\n// the headers are not proxied, the connection manager will send a local error reply.\nTEST_P(IntegrationTest, HittingGrpcFilterLimitBufferingHeaders) {\n  config_helper_.addFilter(\n      \"{ name: grpc_http1_bridge, typed_config: { \\\"@type\\\": \"\n      \"type.googleapis.com/envoy.config.filter.http.grpc_http1_bridge.v2.Config } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/grpc\"},\n                                     {\"x-envoy-retry-grpc-on\", \"cancelled\"}});\n  waitForNextUpstreamRequest();\n\n  // Send the overly large response. Because the grpc_http1_bridge filter buffers and buffer\n  // limits are exceeded, this will be translated into an unknown gRPC error.\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  upstream_request_->encodeData(1024 * 65, false);\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Headers::get().GrpcStatus, \"2\")); // Unknown gRPC error\n}\n\nTEST_P(IntegrationTest, TestSmuggling) {\n  initialize();\n  const std::string smuggled_request = \"GET / HTTP/1.1\\r\\nHost: disallowed\\r\\n\\r\\n\";\n  ASSERT_EQ(smuggled_request.length(), 36);\n  // Make sure the http parser rejects having content-length and transfer-encoding: chunked\n  // on the same request, regardless of order and spacing.\n  {\n    std::string response;\n    const std::string full_request =\n        \"GET / HTTP/1.1\\r\\nHost: host\\r\\ncontent-length: 36\\r\\ntransfer-encoding: chunked\\r\\n\\r\\n\" +\n        smuggled_request;\n    sendRawHttpAndWaitForResponse(lookupPort(\"http\"), full_request.c_str(), &response, false);\n    EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n  }\n  {\n    std::string response;\n    const std::string request = \"GET / HTTP/1.1\\r\\nHost: host\\r\\ntransfer-encoding: chunked \"\n                                \"\\r\\ncontent-length: 36\\r\\n\\r\\n\" +\n                                smuggled_request;\n    sendRawHttpAndWaitForResponse(lookupPort(\"http\"), request.c_str(), &response, false);\n    EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n  }\n  {\n    std::string response;\n    const std::string request = \"GET / HTTP/1.1\\r\\nHost: host\\r\\ntransfer-encoding: \"\n                                \"identity,chunked \\r\\ncontent-length: 36\\r\\n\\r\\n\" +\n                                smuggled_request;\n    sendRawHttpAndWaitForResponse(lookupPort(\"http\"), request.c_str(), &response, false);\n    EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n  }\n}\n\nTEST_P(IntegrationTest, TestServerAllowChunkedLength) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_http_protocol_options()->set_allow_chunked_length(true);\n      });\n  initialize();\n\n  auto tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  ASSERT_TRUE(tcp_client->write(\"POST / HTTP/1.1\\r\\n\"\n                                \"Host: host\\r\\n\"\n                                \"Content-length: 100\\r\\n\"\n                                \"Transfer-Encoding: chunked\\r\\n\\r\\n\"\n                                \"4\\r\\nbody\\r\\n\"\n                                \"0\\r\\n\\r\\n\"));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(\n      FakeRawConnection::waitForInexactMatch(\"\\r\\n\\r\\n\"), &data));\n\n  ASSERT_THAT(data, HasSubstr(\"POST / HTTP/1.1\"));\n  ASSERT_THAT(data, HasSubstr(\"transfer-encoding: chunked\"));\n  // verify no 'content-length' header\n  ASSERT_THAT(data, Not(HasSubstr(\"ontent-length\")));\n\n  ASSERT_TRUE(\n      fake_upstream_connection->write(\"HTTP/1.1 200 OK\\r\\nTransfer-encoding: chunked\\r\\n\\r\\n\"));\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->close();\n}\n\nTEST_P(IntegrationTest, TestClientAllowChunkedLength) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() == 1, \"\");\n    if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n      auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n      cluster->mutable_http_protocol_options()->set_allow_chunked_length(true);\n    }\n  });\n\n  initialize();\n\n  auto tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  ASSERT_TRUE(tcp_client->write(\"GET / HTTP/1.1\\r\\nHost: host\\r\\n\\r\\n\"));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(\n      FakeRawConnection::waitForInexactMatch(\"\\r\\n\\r\\n\"), &data));\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"HTTP/1.1 200 OK\\r\\n\"\n                                              \"Transfer-encoding: chunked\\r\\n\"\n                                              \"Content-Length: 100\\r\\n\\r\\n\"\n                                              \"4\\r\\nbody\\r\\n\"\n                                              \"0\\r\\n\\r\\n\"));\n  tcp_client->waitForData(\"\\r\\n\\r\\n\", false);\n  std::string response = tcp_client->data();\n\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 200 OK\\r\\n\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"content-length\")));\n  EXPECT_THAT(response, HasSubstr(\"transfer-encoding: chunked\\r\\n\"));\n  EXPECT_THAT(response, EndsWith(\"\\r\\n\\r\\n\"));\n\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->close();\n}\n\nTEST_P(IntegrationTest, BadFirstline) {\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"hello\", &response);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n}\n\nTEST_P(IntegrationTest, MissingDelimiter) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"GET / HTTP/1.1\\r\\nHost: host\\r\\nfoo bar\\r\\n\\r\\n\", &response);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n  std::string log = waitForAccessLog(access_log_name_);\n  EXPECT_THAT(log, HasSubstr(\"http1.codec_error\"));\n  EXPECT_THAT(log, HasSubstr(\"DPE\"));\n  EXPECT_THAT(log, Not(HasSubstr(\"DC\")));\n}\n\nTEST_P(IntegrationTest, InvalidCharacterInFirstline) {\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GE(T / HTTP/1.1\\r\\nHost: host\\r\\n\\r\\n\",\n                                &response);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n}\n\nTEST_P(IntegrationTest, InvalidVersion) {\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.01\\r\\nHost: host\\r\\n\\r\\n\",\n                                &response);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n}\n\n// Expect that malformed trailers to break the connection\nTEST_P(IntegrationTest, BadTrailer) {\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"POST / HTTP/1.1\\r\\n\"\n                                \"Host: host\\r\\n\"\n                                \"Transfer-Encoding: chunked\\r\\n\\r\\n\"\n                                \"4\\r\\n\"\n                                \"body\\r\\n0\\r\\n\"\n                                \"badtrailer\\r\\n\\r\\n\",\n                                &response);\n\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n}\n\n// Expect malformed headers to break the connection\nTEST_P(IntegrationTest, BadHeader) {\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"POST / HTTP/1.1\\r\\n\"\n                                \"Host: host\\r\\n\"\n                                \"badHeader\\r\\n\"\n                                \"Transfer-Encoding: chunked\\r\\n\\r\\n\"\n                                \"4\\r\\n\"\n                                \"body\\r\\n0\\r\\n\\r\\n\",\n                                &response);\n\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n}\n\nTEST_P(IntegrationTest, Http10Disabled) {\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.0\\r\\n\\r\\n\", &response, true);\n  EXPECT_TRUE(response.find(\"HTTP/1.1 426 Upgrade Required\\r\\n\") == 0);\n}\n\nTEST_P(IntegrationTest, Http10DisabledWithUpgrade) {\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.0\\r\\nUpgrade: h2c\\r\\n\\r\\n\",\n                                &response, true);\n  EXPECT_TRUE(response.find(\"HTTP/1.1 426 Upgrade Required\\r\\n\") == 0);\n}\n\n// Turn HTTP/1.0 support on and verify 09 style requests work.\nTEST_P(IntegrationTest, Http09Enabled) {\n  useAccessLog();\n  autonomous_upstream_ = true;\n  config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost);\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET /\\r\\n\\r\\n\", &response, false);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.0 200 OK\\r\\n\"));\n  EXPECT_THAT(response, HasSubstr(\"connection: close\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"transfer-encoding: chunked\\r\\n\")));\n\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> upstream_headers =\n      reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get())->lastRequestHeaders();\n  ASSERT_TRUE(upstream_headers != nullptr);\n  EXPECT_EQ(upstream_headers->Host()->value(), \"default.com\");\n\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"HTTP/1.0\"));\n}\n\nTEST_P(IntegrationTest, Http09WithKeepalive) {\n  useAccessLog();\n  autonomous_upstream_ = true;\n  config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost);\n  initialize();\n  reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get())\n      ->setResponseHeaders(std::make_unique<Http::TestResponseHeaderMapImpl>(\n          Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}, {\"content-length\", \"0\"}})));\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET /\\r\\nConnection: keep-alive\\r\\n\\r\\n\",\n                                &response, true);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.0 200 OK\\r\\n\"));\n  EXPECT_THAT(response, HasSubstr(\"connection: keep-alive\\r\\n\"));\n}\n\n// Turn HTTP/1.0 support on and verify the request is proxied and the default host is sent upstream.\nTEST_P(IntegrationTest, Http10Enabled) {\n  autonomous_upstream_ = true;\n  config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost);\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.0\\r\\n\\r\\n\", &response, false);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.0 200 OK\\r\\n\"));\n  EXPECT_THAT(response, HasSubstr(\"connection: close\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"transfer-encoding: chunked\\r\\n\")));\n\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> upstream_headers =\n      reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get())->lastRequestHeaders();\n  ASSERT_TRUE(upstream_headers != nullptr);\n  EXPECT_EQ(upstream_headers->Host()->value(), \"default.com\");\n\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"HEAD / HTTP/1.0\\r\\n\\r\\n\", &response, false);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.0 200 OK\\r\\n\"));\n  EXPECT_THAT(response, HasSubstr(\"connection: close\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"transfer-encoding: chunked\\r\\n\")));\n}\n\nTEST_P(IntegrationTest, TestInlineHeaders) {\n  autonomous_upstream_ = true;\n  config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost);\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"GET / HTTP/1.1\\r\\n\"\n                                \"Host: foo.com\\r\\n\"\n                                \"Foo: bar\\r\\n\"\n                                \"User-Agent: public\\r\\n\"\n                                \"User-Agent: 123\\r\\n\"\n                                \"Eep: baz\\r\\n\\r\\n\",\n                                &response, true);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 200 OK\\r\\n\"));\n\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> upstream_headers =\n      reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get())->lastRequestHeaders();\n  ASSERT_TRUE(upstream_headers != nullptr);\n  EXPECT_EQ(upstream_headers->Host()->value(), \"foo.com\");\n  EXPECT_EQ(upstream_headers->get_(\"User-Agent\"), \"public,123\");\n  ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString(\"foo\")) != nullptr);\n  EXPECT_EQ(\"bar\",\n            upstream_headers->get(Envoy::Http::LowerCaseString(\"foo\"))->value().getStringView());\n  ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString(\"eep\")) != nullptr);\n  EXPECT_EQ(\"baz\",\n            upstream_headers->get(Envoy::Http::LowerCaseString(\"eep\"))->value().getStringView());\n}\n\n// Verify for HTTP/1.0 a keep-alive header results in no connection: close.\n// Also verify existing host headers are passed through for the HTTP/1.0 case.\n// This also regression tests proper handling of trailing whitespace after key\n// values, specifically the host header.\nTEST_P(IntegrationTest, Http10WithHostandKeepAliveAndLwsNoContentLength) {\n  autonomous_upstream_ = true;\n  config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost);\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"GET / HTTP/1.0\\r\\nHost: foo.com \\r\\nConnection:Keep-alive\\r\\n\\r\\n\",\n                                &response, true);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.0 200 OK\\r\\n\"));\n  EXPECT_THAT(response, HasSubstr(\"connection: close\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"connection: keep-alive\")));\n  EXPECT_THAT(response, Not(HasSubstr(\"content-length:\")));\n  EXPECT_THAT(response, Not(HasSubstr(\"transfer-encoding: chunked\\r\\n\")));\n\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> upstream_headers =\n      reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get())->lastRequestHeaders();\n  ASSERT_TRUE(upstream_headers != nullptr);\n  EXPECT_EQ(upstream_headers->Host()->value(), \"foo.com\");\n}\n\nTEST_P(IntegrationTest, Http10WithHostandKeepAliveAndContentLengthAndLws) {\n  autonomous_upstream_ = true;\n  config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost);\n  initialize();\n  reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get())\n      ->setResponseHeaders(std::make_unique<Http::TestResponseHeaderMapImpl>(\n          Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}, {\"content-length\", \"10\"}})));\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"GET / HTTP/1.0\\r\\nHost: foo.com \\r\\nConnection:Keep-alive\\r\\n\\r\\n\",\n                                &response, true);\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.0 200 OK\\r\\n\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"connection: close\")));\n  EXPECT_THAT(response, HasSubstr(\"connection: keep-alive\"));\n  EXPECT_THAT(response, HasSubstr(\"content-length:\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"transfer-encoding: chunked\\r\\n\")));\n}\n\nTEST_P(IntegrationTest, Pipeline) {\n  autonomous_upstream_ = true;\n  initialize();\n  std::string response;\n\n  auto connection = createConnectionDriver(\n      lookupPort(\"http\"), \"GET / HTTP/1.1\\r\\nHost: host\\r\\n\\r\\nGET / HTTP/1.1\\r\\n\\r\\n\",\n      [&](Network::ClientConnection&, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n      });\n  // First response should be success.\n  while (response.find(\"200\") == std::string::npos) {\n    connection->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 200 OK\\r\\n\"));\n\n  // Second response should be 400 (no host)\n  while (response.find(\"400\") == std::string::npos) {\n    connection->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n  connection->close();\n}\n\n// Checks to ensure that we reject the third request that is pipelined in the\n// same request\nTEST_P(IntegrationTest, PipelineWithTrailers) {\n  config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1());\n  autonomous_upstream_ = true;\n  autonomous_allow_incomplete_streams_ = true;\n  initialize();\n  std::string response;\n\n  std::string good_request(\"POST / HTTP/1.1\\r\\n\"\n                           \"Host: host\\r\\n\"\n                           \"Transfer-Encoding: chunked\\r\\n\\r\\n\"\n                           \"4\\r\\n\"\n                           \"body\\r\\n0\\r\\n\"\n                           \"trailer1:t2\\r\\n\"\n                           \"trailer2:t3\\r\\n\"\n                           \"\\r\\n\");\n\n  std::string bad_request(\"POST / HTTP/1.1\\r\\n\"\n                          \"Host: host\\r\\n\"\n                          \"Transfer-Encoding: chunked\\r\\n\\r\\n\"\n                          \"4\\r\\n\"\n                          \"body\\r\\n0\\r\\n\"\n                          \"trailer1\\r\\n\"\n                          \"trailer2:t3\\r\\n\"\n                          \"\\r\\n\");\n\n  auto connection = createConnectionDriver(\n      lookupPort(\"http\"), absl::StrCat(good_request, good_request, bad_request),\n      [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n      });\n\n  // First response should be success.\n  size_t pos;\n  while ((pos = response.find(\"200\")) == std::string::npos) {\n    connection->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 200 OK\\r\\n\"));\n  while (response.find(\"200\", pos + 1) == std::string::npos) {\n    connection->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  while (response.find(\"400\") == std::string::npos) {\n    connection->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n  connection->close();\n}\n\n// Add a pipeline test where complete request headers in the first request merit\n// an inline sendLocalReply to make sure the \"kick\" works under the call stack\n// of dispatch as well as when a response is proxied from upstream.\nTEST_P(IntegrationTest, PipelineInline) {\n  // When deprecating this flag, set hcm.mutable_stream_error_on_invalid_http_message true.\n  config_helper_.addRuntimeOverride(\"envoy.reloadable_features.hcm_stream_error_on_invalid_message\",\n                                    \"false\");\n\n  autonomous_upstream_ = true;\n  initialize();\n  std::string response;\n\n  auto connection = createConnectionDriver(\n      lookupPort(\"http\"), \"GET / HTTP/1.1\\r\\n\\r\\nGET / HTTP/1.0\\r\\n\\r\\n\",\n      [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n      });\n\n  while (response.find(\"400\") == std::string::npos) {\n    connection->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 400 Bad Request\\r\\n\"));\n\n  while (response.find(\"426\") == std::string::npos) {\n    connection->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 426 Upgrade Required\\r\\n\"));\n  connection->close();\n}\n\nTEST_P(IntegrationTest, NoHost) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"400\", response->headers().getStatusValue());\n}\n\nTEST_P(IntegrationTest, BadPath) {\n  config_helper_.addConfigModifier(&setDisallowAbsoluteUrl);\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"GET http://api.lyft.com HTTP/1.1\\r\\nHost: host\\r\\n\\r\\n\", &response,\n                                true);\n  EXPECT_TRUE(response.find(\"HTTP/1.1 404 Not Found\\r\\n\") == 0);\n}\n\nTEST_P(IntegrationTest, AbsolutePath) {\n  // Configure www.redirect.com to send a redirect, and ensure the redirect is\n  // encountered via absolute URL.\n  auto host = config_helper_.createVirtualHost(\"www.redirect.com\", \"/\");\n  host.set_require_tls(envoy::config::route::v3::VirtualHost::ALL);\n  config_helper_.addVirtualHost(host);\n\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"GET http://www.redirect.com HTTP/1.1\\r\\nHost: host\\r\\n\\r\\n\",\n                                &response, true);\n  EXPECT_FALSE(response.find(\"HTTP/1.1 404 Not Found\\r\\n\") == 0);\n}\n\nTEST_P(IntegrationTest, AbsolutePathWithPort) {\n  // Configure www.namewithport.com:1234 to send a redirect, and ensure the redirect is\n  // encountered via absolute URL with a port.\n  auto host = config_helper_.createVirtualHost(\"www.namewithport.com:1234\", \"/\");\n  host.set_require_tls(envoy::config::route::v3::VirtualHost::ALL);\n  config_helper_.addVirtualHost(host);\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(\n      lookupPort(\"http\"), \"GET http://www.namewithport.com:1234 HTTP/1.1\\r\\nHost: host\\r\\n\\r\\n\",\n      &response, true);\n  EXPECT_FALSE(response.find(\"HTTP/1.1 404 Not Found\\r\\n\") == 0);\n}\n\nTEST_P(IntegrationTest, AbsolutePathWithoutPort) {\n  // Add a restrictive default match, to avoid the request hitting the * / catchall.\n  config_helper_.setDefaultHostAndRoute(\"foo.com\", \"/found\");\n  // Set a matcher for www.namewithport.com:1234 and verify http://www.namewithport.com does not\n  // match\n  auto host = config_helper_.createVirtualHost(\"www.namewithport.com:1234\", \"/\");\n  host.set_require_tls(envoy::config::route::v3::VirtualHost::ALL);\n  config_helper_.addVirtualHost(host);\n  initialize();\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"),\n                                \"GET http://www.namewithport.com HTTP/1.1\\r\\nHost: host\\r\\n\\r\\n\",\n                                &response, true);\n  EXPECT_TRUE(response.find(\"HTTP/1.1 404 Not Found\\r\\n\") == 0) << response;\n}\n\n// Ensure that connect behaves the same with allow_absolute_url enabled and without\nTEST_P(IntegrationTest, Connect) {\n  const std::string& request = \"CONNECT www.somewhere.com:80 HTTP/1.1\\r\\n\\r\\n\";\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    // Clone the whole listener.\n    auto static_resources = bootstrap.mutable_static_resources();\n    auto* old_listener = static_resources->mutable_listeners(0);\n    auto* cloned_listener = static_resources->add_listeners();\n    cloned_listener->CopyFrom(*old_listener);\n    old_listener->set_name(\"http_forward\");\n  });\n  // Set the first listener to disallow absolute URLs.\n  config_helper_.addConfigModifier(&setDisallowAbsoluteUrl);\n  initialize();\n\n  std::string response1;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), request.c_str(), &response1, true);\n\n  std::string response2;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http_forward\"), request.c_str(), &response2, true);\n\n  EXPECT_EQ(normalizeDate(response1), normalizeDate(response2));\n}\n\nTEST_P(IntegrationTest, UpstreamProtocolError) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":authority\", \"host\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  // TODO(mattklein123): Waiting for exact amount of data is a hack. This needs to\n  // be fixed.\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(187, &data));\n  ASSERT_TRUE(fake_upstream_connection->write(\"bad protocol data!\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\nTEST_P(IntegrationTest, TestHead) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl head_request{{\":method\", \"HEAD\"},\n                                              {\":path\", \"/test/long/url\"},\n                                              {\":scheme\", \"http\"},\n                                              {\":authority\", \"host\"}};\n\n  // Without an explicit content length, assume we chunk for HTTP/1.1\n  auto response = sendRequestAndWaitForResponse(head_request, 0, default_response_headers_, 0);\n  ASSERT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n  EXPECT_EQ(response->headers().ContentLength(), nullptr);\n  EXPECT_THAT(response->headers(),\n              HeaderValueOf(Headers::get().TransferEncoding,\n                            Http::Headers::get().TransferEncodingValues.Chunked));\n  EXPECT_EQ(0, response->body().size());\n\n  // Preserve explicit content length.\n  Http::TestResponseHeaderMapImpl content_length_response{{\":status\", \"200\"},\n                                                          {\"content-length\", \"12\"}};\n  response = sendRequestAndWaitForResponse(head_request, 0, content_length_response, 0);\n  ASSERT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n  EXPECT_THAT(response->headers(), HeaderValueOf(Headers::get().ContentLength, \"12\"));\n  EXPECT_EQ(response->headers().TransferEncoding(), nullptr);\n  EXPECT_EQ(0, response->body().size());\n}\n\n// The Envoy HTTP/1.1 codec ASSERTs that T-E headers are cleared in\n// encodeHeaders, so to test upstreams explicitly sending T-E: chunked we have\n// to send raw HTTP.\nTEST_P(IntegrationTest, TestHeadWithExplicitTE) {\n  initialize();\n\n  auto tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  ASSERT_TRUE(tcp_client->write(\"HEAD / HTTP/1.1\\r\\nHost: host\\r\\n\\r\\n\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(\n      FakeRawConnection::waitForInexactMatch(\"\\r\\n\\r\\n\"), &data));\n\n  ASSERT_TRUE(\n      fake_upstream_connection->write(\"HTTP/1.1 200 OK\\r\\nTransfer-encoding: chunked\\r\\n\\r\\n\"));\n  tcp_client->waitForData(\"\\r\\n\\r\\n\", false);\n  std::string response = tcp_client->data();\n\n  EXPECT_THAT(response, HasSubstr(\"HTTP/1.1 200 OK\\r\\n\"));\n  EXPECT_THAT(response, Not(HasSubstr(\"content-length\")));\n  EXPECT_THAT(response, HasSubstr(\"transfer-encoding: chunked\\r\\n\"));\n  EXPECT_THAT(response, EndsWith(\"\\r\\n\\r\\n\"));\n\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->close();\n}\n\nTEST_P(IntegrationTest, TestBind) {\n  std::string address_string;\n  if (GetParam() == Network::Address::IpVersion::v4) {\n    address_string = TestUtility::getIpv4Loopback();\n  } else {\n    address_string = \"::1\";\n  }\n  config_helper_.setSourceAddress(address_string);\n  useAccessLog(\"%UPSTREAM_LOCAL_ADDRESS%\\n\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response =\n      codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                        {\":path\", \"/test/long/url\"},\n                                                                        {\":scheme\", \"http\"},\n                                                                        {\":authority\", \"host\"}},\n                                         1024);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_NE(fake_upstream_connection_, nullptr);\n  std::string address =\n      fake_upstream_connection_->connection().remoteAddress()->ip()->addressAsString();\n  EXPECT_EQ(address, address_string);\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_NE(upstream_request_, nullptr);\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  cleanupUpstreamAndDownstream();\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(address_string));\n}\n\nTEST_P(IntegrationTest, TestFailedBind) {\n  config_helper_.setSourceAddress(\"8.8.8.8\");\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  // With no ability to successfully bind on an upstream connection Envoy should\n  // send a 500.\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-upstream-rq-timeout-ms\", \"1000\"}});\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"503\"));\n  EXPECT_LT(0, test_server_->counter(\"cluster.cluster_0.bind_errors\")->value());\n}\n\nConfigHelper::HttpModifierFunction setVia(const std::string& via) {\n  return\n      [via](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) { hcm.set_via(via); };\n}\n\n// Validate in a basic header-only request we get via header insertion.\nTEST_P(IntegrationTest, ViaAppendHeaderOnly) {\n  config_helper_.addConfigModifier(setVia(\"bar\"));\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":authority\", \"host\"},\n                                     {\"via\", \"foo\"},\n                                     {\"connection\", \"close\"}});\n  waitForNextUpstreamRequest();\n  EXPECT_THAT(upstream_request_->headers(), HeaderValueOf(Headers::get().Via, \"foo, bar\"));\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n  EXPECT_THAT(response->headers(), HeaderValueOf(Headers::get().Via, \"bar\"));\n}\n\n// Validate that 100-continue works as expected with via header addition on both request and\n// response path.\nTEST_P(IntegrationTest, ViaAppendWith100Continue) {\n  config_helper_.addConfigModifier(setVia(\"foo\"));\n  testEnvoyHandling100Continue(false, \"foo\");\n}\n\n// Test delayed close semantics for downstream HTTP/1.1 connections. When an early response is\n// sent by Envoy, it will wait for response acknowledgment (via FIN/RST) from the client before\n// closing the socket (with a timeout for ensuring cleanup).\nTEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); });\n  // This test will trigger an early 413 Payload Too Large response due to buffer limits being\n  // exceeded. The following filter is needed since the router filter will never trigger a 413.\n  config_helper_.addFilter(\"{ name: encoder-decoder-buffer-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  codec_client_->sendData(*request_encoder_, 1024 * 65, false);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"413\", response->headers().getStatusValue());\n  // With no delayed close processing, Envoy will close the connection immediately after flushing\n  // and this should instead return true.\n  EXPECT_FALSE(codec_client_->waitForDisconnect(std::chrono::milliseconds(500)));\n\n  // Issue a local close and check that the client did not pick up a remote close which can happen\n  // when delayed close semantics are disabled.\n  codec_client_->connection()->close(Network::ConnectionCloseType::NoFlush);\n  EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::LocalClose);\n}\n\n// Test configuration of the delayed close timeout on downstream HTTP/1.1 connections. A value of 0\n// disables delayed close processing.\nTEST_P(IntegrationTest, TestDelayedConnectionTeardownConfig) {\n  config_helper_.addFilter(\"{ name: encoder-decoder-buffer-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(0); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  codec_client_->sendData(*request_encoder_, 1024 * 65, false);\n\n  response->waitForEndStream();\n  // There is a potential race in the client's response processing when delayed close logic is\n  // disabled in Envoy (see https://github.com/envoyproxy/envoy/issues/2929). Depending on timing,\n  // a client may receive an RST prior to reading the response data from the socket, which may clear\n  // the receive buffers. Also, clients which don't flush the receive buffer upon receiving a remote\n  // close may also lose data (Envoy is susceptible to this).\n  // Therefore, avoid checking response code/payload here and instead simply look for the remote\n  // close.\n  EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(500)));\n  EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose);\n}\n\n// Test that delay closed connections are eventually force closed when the timeout triggers.\nTEST_P(IntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) {\n  config_helper_.addFilter(\"{ name: encoder-decoder-buffer-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) {\n        // 200ms.\n        hcm.mutable_delayed_close_timeout()->set_nanos(200000000);\n      });\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  codec_client_->sendData(*request_encoder_, 1024 * 65, false);\n\n  response->waitForEndStream();\n  // The delayed close timeout should trigger since client is not closing the connection.\n  EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(2000)));\n  EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose);\n  EXPECT_EQ(test_server_->counter(\"http.config_test.downstream_cx_delayed_close_timeout\")->value(),\n            1);\n}\n\n// Test that if the route cache is cleared, it doesn't cause problems.\nTEST_P(IntegrationTest, TestClearingRouteCacheFilter) {\n  config_helper_.addFilter(\"{ name: clear-route-cache, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0);\n}\n\n// Test that if no connection pools are free, Envoy fails to establish an upstream connection.\nTEST_P(IntegrationTest, NoConnectionPoolsFree) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n\n    // Somewhat contrived with 0, but this is the simplest way to test right now.\n    auto* circuit_breakers = cluster->mutable_circuit_breakers();\n    circuit_breakers->add_thresholds()->mutable_max_connection_pools()->set_value(0);\n  });\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Request 1.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n\n  // Validate none active.\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.upstream_rq_active\", 0);\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.upstream_rq_pending_active\", 0);\n\n  response->waitForEndStream();\n\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_503\", 1);\n\n  EXPECT_EQ(test_server_->counter(\"cluster.cluster_0.upstream_cx_pool_overflow\")->value(), 1);\n}\n\nTEST_P(IntegrationTest, ProcessObjectHealthy) {\n  config_helper_.addFilter(\"{ name: process-context-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n\n  ProcessObjectForFilter healthy_object(true);\n  process_object_ = healthy_object;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response =\n      codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                          {\":path\", \"/healthcheck\"},\n                                                                          {\":authority\", \"host\"},\n                                                                          {\"connection\", \"close\"}});\n  response->waitForEndStream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"200\"));\n}\n\nTEST_P(IntegrationTest, ProcessObjectUnealthy) {\n  config_helper_.addFilter(\"{ name: process-context-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n\n  ProcessObjectForFilter unhealthy_object(false);\n  process_object_ = unhealthy_object;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response =\n      codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                          {\":path\", \"/healthcheck\"},\n                                                                          {\":authority\", \"host\"},\n                                                                          {\"connection\", \"close\"}});\n  response->waitForEndStream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"500\"));\n}\n\nTEST_P(IntegrationTest, TrailersDroppedDuringEncoding) { testTrailers(10, 10, false, false); }\n\nTEST_P(IntegrationTest, TrailersDroppedUpstream) {\n  config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  testTrailers(10, 10, false, false);\n}\n\nTEST_P(IntegrationTest, TrailersDroppedDownstream) {\n  config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1());\n  testTrailers(10, 10, false, false);\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, UpstreamEndpointIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(UpstreamEndpointIntegrationTest, TestUpstreamEndpointAddress) {\n  initialize();\n  EXPECT_STREQ(fake_upstreams_[0]->localAddress()->ip()->addressAsString().c_str(),\n               Network::Test::getLoopbackAddressString(GetParam()).c_str());\n}\n\n// Send continuous pipelined requests while not reading responses, to check\n// HTTP/1.1 response flood protection.\nTEST_P(IntegrationTest, TestFlood) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_stream_error_on_invalid_http_message()->set_value(true);\n      });\n  initialize();\n\n  // Set up a raw connection to easily send requests without reading responses.\n  Network::ClientConnectionPtr raw_connection = makeClientConnection(lookupPort(\"http\"));\n  raw_connection->connect();\n\n  // Read disable so responses will queue up.\n  uint32_t bytes_to_send = 0;\n  raw_connection->readDisable(true);\n  // Track locally queued bytes, to make sure the outbound client queue doesn't back up.\n  raw_connection->addBytesSentCallback([&](uint64_t bytes) { bytes_to_send -= bytes; });\n\n  // Keep sending requests until flood protection kicks in and kills the connection.\n  while (raw_connection->state() == Network::Connection::State::Open) {\n    // These requests are missing the host header, so will provoke an internally generated error\n    // response from Envoy.\n    Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\n\\r\\nGET / HTTP/1.1\\r\\n\\r\\nGET / HTTP/1.1\\r\\n\\r\\n\");\n    bytes_to_send += buffer.length();\n    raw_connection->write(buffer, false);\n    // Loop until all bytes are sent.\n    while (bytes_to_send > 0 && raw_connection->state() == Network::Connection::State::Open) {\n      raw_connection->dispatcher().run(Event::Dispatcher::RunType::NonBlock);\n    }\n  }\n\n  // Verify the connection was closed due to flood protection.\n  EXPECT_EQ(1, test_server_->counter(\"http1.response_flood\")->value());\n}\n\nTEST_P(IntegrationTest, TestFloodUpstreamErrors) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); });\n  autonomous_upstream_ = true;\n  initialize();\n\n  // Set an Upstream reply with an invalid content-length, which will be rejected by the Envoy.\n  auto response_headers = std::make_unique<Http::TestResponseHeaderMapImpl>(\n      Http::TestResponseHeaderMapImpl({{\":status\", \"200\"}, {\"content-length\", \"invalid\"}}));\n  reinterpret_cast<AutonomousUpstream*>(fake_upstreams_.front().get())\n      ->setResponseHeaders(std::move(response_headers));\n\n  // Set up a raw connection to easily send requests without reading responses. Also, set a small\n  // TCP receive buffer to speed up connection backup while proxying the response flood.\n  auto options = std::make_shared<Network::Socket::Options>();\n  options->emplace_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024));\n  Network::ClientConnectionPtr raw_connection =\n      makeClientConnectionWithOptions(lookupPort(\"http\"), options);\n  raw_connection->connect();\n\n  // Read disable so responses will queue up.\n  uint32_t bytes_to_send = 0;\n  raw_connection->readDisable(true);\n  // Track locally queued bytes, to make sure the outbound client queue doesn't back up.\n  raw_connection->addBytesSentCallback([&](uint64_t bytes) { bytes_to_send -= bytes; });\n\n  // Keep sending requests until flood protection kicks in and kills the connection.\n  while (raw_connection->state() == Network::Connection::State::Open) {\n    // The upstream response is invalid, and will trigger an internally generated error response\n    // from Envoy.\n    Buffer::OwnedImpl buffer(\"GET / HTTP/1.1\\r\\nhost: foo.com\\r\\n\\r\\n\");\n    bytes_to_send += buffer.length();\n    raw_connection->write(buffer, false);\n    // Loop until all bytes are sent.\n    while (bytes_to_send > 0 && raw_connection->state() == Network::Connection::State::Open) {\n      raw_connection->dispatcher().run(Event::Dispatcher::RunType::NonBlock);\n    }\n  }\n\n  // Verify the connection was closed due to flood protection.\n  EXPECT_EQ(1, test_server_->counter(\"http1.response_flood\")->value());\n}\n\n// Make sure flood protection doesn't kick in with many requests sent serially.\nTEST_P(IntegrationTest, TestManyBadRequests) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_stream_error_on_invalid_http_message()->set_value(true);\n      });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  Http::TestRequestHeaderMapImpl bad_request{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}};\n\n  for (int i = 0; i < 1000; ++i) {\n    IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(bad_request);\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_THAT(response->headers(), HttpStatusIs(\"400\"));\n  }\n  EXPECT_EQ(0, test_server_->counter(\"http1.response_flood\")->value());\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/10566\nTEST_P(IntegrationTest, TestUpgradeHeaderInResponse) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT(fake_upstream_connection != nullptr);\n  ASSERT_TRUE(fake_upstream_connection->write(\"HTTP/1.1 200 OK\\r\\n\"\n                                              \"connection: upgrade\\r\\n\"\n                                              \"upgrade: h2\\r\\n\"\n                                              \"Transfer-encoding: chunked\\r\\n\\r\\n\"\n                                              \"b\\r\\nHello World\\r\\n0\\r\\n\\r\\n\",\n                                              false));\n\n  response->waitForHeaders();\n  EXPECT_EQ(nullptr, response->headers().Upgrade());\n  EXPECT_EQ(nullptr, response->headers().Connection());\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"Hello World\", response->body());\n}\n\nTEST_P(IntegrationTest, ConnectWithNoBody) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); });\n  initialize();\n\n  // Send the payload early so we can regression test that body data does not\n  // get proxied until after the response headers are sent.\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  ASSERT_TRUE(tcp_client->write(\"CONNECT host.com:80 HTTP/1.1\\r\\n\\r\\npayload\", false));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(\n      FakeRawConnection::waitForInexactMatch(\"\\r\\n\\r\\n\"), &data));\n  EXPECT_TRUE(absl::StartsWith(data, \"CONNECT host.com:80 HTTP/1.1\"));\n  // The payload should not be present as the response headers have not been sent.\n  EXPECT_FALSE(absl::StrContains(data, \"payload\")) << data;\n  // No transfer-encoding: chunked or connection: close\n  EXPECT_FALSE(absl::StrContains(data, \"hunked\")) << data;\n  EXPECT_FALSE(absl::StrContains(data, \"onnection\")) << data;\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"HTTP/1.1 200 OK\\r\\n\\r\\n\"));\n  tcp_client->waitForData(\"\\r\\n\\r\\n\", false);\n  EXPECT_TRUE(absl::StartsWith(tcp_client->data(), \"HTTP/1.1 200 OK\\r\\n\")) << tcp_client->data();\n  // Make sure the following payload is proxied without chunks or any other modifications.\n  ASSERT_TRUE(fake_upstream_connection->waitForData(\n      FakeRawConnection::waitForInexactMatch(\"\\r\\n\\r\\npayload\"), &data));\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"return-payload\"));\n  tcp_client->waitForData(\"\\r\\n\\r\\nreturn-payload\", false);\n  EXPECT_FALSE(absl::StrContains(tcp_client->data(), \"hunked\"));\n\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n}\n\nTEST_P(IntegrationTest, ConnectWithChunkedBody) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); });\n  initialize();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"http\"));\n  ASSERT_TRUE(tcp_client->write(\"CONNECT host.com:80 HTTP/1.1\\r\\n\\r\\npayload\", false));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection->waitForData(\n      FakeRawConnection::waitForInexactMatch(\"\\r\\n\\r\\n\"), &data));\n  // No transfer-encoding: chunked or connection: close\n  EXPECT_FALSE(absl::StrContains(data, \"hunked\")) << data;\n  EXPECT_FALSE(absl::StrContains(data, \"onnection\")) << data;\n  ASSERT_TRUE(fake_upstream_connection->write(\n      \"HTTP/1.1 200 OK\\r\\ntransfer-encoding: chunked\\r\\n\\r\\nb\\r\\nHello World\\r\\n0\\r\\n\\r\\n\"));\n  // The response will be rejected because chunked headers are not allowed with CONNECT upgrades.\n  // Envoy will send a local reply due to the invalid upstream response.\n  tcp_client->waitForDisconnect(false);\n  EXPECT_TRUE(absl::StartsWith(tcp_client->data(), \"HTTP/1.1 503 Service Unavailable\\r\\n\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n}\n\n// Verifies that a 204 response returns without a body\nTEST_P(IntegrationTest, Response204WithBody) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  // Create a response with a body. This will cause an upstream messaging error but downstream\n  // should still see a response.\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"204\"}}, false);\n  upstream_request_->encodeData(512, true);\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_THAT(response->headers(), HttpStatusIs(\"204\"));\n  // The body should be removed\n  EXPECT_EQ(0, response->body().size());\n}\n\nTEST_P(IntegrationTest, QuitQuitQuit) {\n  initialize();\n  test_server_->useAdminInterfaceToQuit(true);\n}\n\n// override_stream_error_on_invalid_http_message=true and HCM\n// stream_error_on_invalid_http_message=false: test that HTTP/1.1 connection is left open on invalid\n// HTTP message (missing :host header)\nTEST_P(IntegrationTest, ConnectionIsLeftOpenIfHCMStreamErrorIsFalseAndOverrideIsTrue) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_stream_error_on_invalid_http_message()->set_value(false);\n        hcm.mutable_http_protocol_options()\n            ->mutable_override_stream_error_on_invalid_http_message()\n            ->set_value(true);\n      });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"POST\"}, {\":path\", \"/test/long/url\"}, {\"content-length\", \"0\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_FALSE(codec_client_->waitForDisconnect());\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"400\", response->headers().getStatusValue());\n}\n\n// override_stream_error_on_invalid_http_message is not set and HCM\n// stream_error_on_invalid_http_message=true: test that HTTP/1.1 connection is left open on invalid\n// HTTP message (missing :host header)\nTEST_P(IntegrationTest, ConnectionIsLeftOpenIfHCMStreamErrorIsTrueAndOverrideNotSet) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void { hcm.mutable_stream_error_on_invalid_http_message()->set_value(true); });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"POST\"}, {\":path\", \"/test/long/url\"}, {\"content-length\", \"0\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_FALSE(codec_client_->waitForDisconnect());\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"400\", response->headers().getStatusValue());\n}\n\n// override_stream_error_on_invalid_http_message is not set and HCM\n// stream_error_on_invalid_http_message=false: test that HTTP/1.1 connection is terminated on\n// invalid HTTP message (missing :host header)\nTEST_P(IntegrationTest, ConnectionIsTerminatedIfHCMStreamErrorIsFalseAndOverrideNotSet) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_stream_error_on_invalid_http_message()->set_value(false);\n      });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"POST\"}, {\":path\", \"/test/long/url\"}, {\"content-length\", \"0\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"400\", response->headers().getStatusValue());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/integration_test.h",
    "content": "#pragma once\n\n#include \"test/integration/http_integration.h\"\n\n#include \"gtest/gtest.h\"\n\n// A test class for testing HTTP/1.1 upstream and downstreams\nnamespace Envoy {\nclass IntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                        public HttpIntegrationTest {\npublic:\n  IntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n};\n\nclass UpstreamEndpointIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                        public HttpIntegrationTest {\npublic:\n  UpstreamEndpointIntegrationTest()\n      : HttpIntegrationTest(\n            Http::CodecClient::Type::HTTP1,\n            [](int) {\n              return Network::Utility::parseInternetAddress(\n                  Network::Test::getLoopbackAddressString(GetParam()), 0);\n            },\n            GetParam()) {}\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/listener_filter_integration_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/extensions/access_loggers/file/v3/file.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/listener/tls_inspector/tls_inspector.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/integration/ssl_utility.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/secret/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass ListenerFilterIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                      public BaseIntegrationTest {\npublic:\n  ListenerFilterIntegrationTest()\n      : BaseIntegrationTest(GetParam(), ConfigHelper::baseConfig() + R\"EOF(\n    filter_chains:\n      filters:\n       -  name: envoy.filters.network.echo\n)EOF\") {}\n\n  ~ListenerFilterIntegrationTest() override = default;\n  std::string appendMatcher(const std::string& listener_filter, bool disabled) {\n    if (disabled) {\n      return listener_filter +\n             R\"EOF(\nfilter_disabled:\n  any_match: true\n)EOF\";\n    } else {\n      return listener_filter +\n             R\"EOF(\nfilter_disabled:\n  not_match:\n    any_match: true\n)EOF\";\n    }\n  }\n\n  void initializeWithListenerFilter(absl::optional<bool> listener_filter_disabled = absl::nullopt) {\n    config_helper_.renameListener(\"echo\");\n    std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter();\n    if (listener_filter_disabled.has_value()) {\n      tls_inspector_config = appendMatcher(tls_inspector_config, listener_filter_disabled.value());\n    }\n    config_helper_.addListenerFilter(tls_inspector_config);\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* filter_chain =\n          bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);\n      auto* alpn = filter_chain->mutable_filter_chain_match()->add_application_protocols();\n      *alpn = \"envoyalpn\";\n    });\n    config_helper_.addSslConfig();\n    useListenerAccessLog(\"%RESPONSE_CODE_DETAILS%\");\n    BaseIntegrationTest::initialize();\n\n    context_manager_ =\n        std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem());\n  }\n\n  void setupConnections(bool listener_filter_disabled, bool expect_connection_open) {\n    initializeWithListenerFilter(listener_filter_disabled);\n\n    // Set up the SSL client.\n    Network::Address::InstanceConstSharedPtr address =\n        Ssl::getSslAddress(version_, lookupPort(\"echo\"));\n    context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_);\n    ssl_client_ = dispatcher_->createClientConnection(\n        address, Network::Address::InstanceConstSharedPtr(),\n        context_->createTransportSocket(\n            // nullptr\n            std::make_shared<Network::TransportSocketOptionsImpl>(\n                absl::string_view(\"\"), std::vector<std::string>(),\n                std::vector<std::string>{\"envoyalpn\"})),\n        nullptr);\n    ssl_client_->addConnectionCallbacks(connect_callbacks_);\n    ssl_client_->connect();\n    while (!connect_callbacks_.connected() && !connect_callbacks_.closed()) {\n      dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n    }\n\n    if (expect_connection_open) {\n      ASSERT(connect_callbacks_.connected());\n      ASSERT_FALSE(connect_callbacks_.closed());\n    } else {\n      ASSERT_FALSE(connect_callbacks_.connected());\n      ASSERT(connect_callbacks_.closed());\n    }\n  }\n  std::unique_ptr<Ssl::ContextManager> context_manager_;\n  Network::TransportSocketFactoryPtr context_;\n  ConnectionStatusCallbacks connect_callbacks_;\n  testing::NiceMock<Secret::MockSecretManager> secret_manager_;\n  Network::ClientConnectionPtr ssl_client_;\n};\n\n// Each listener filter is enabled by default.\nTEST_P(ListenerFilterIntegrationTest, AllListenerFiltersAreEnabledByDefault) {\n  setupConnections(/*listener_filter_disabled=*/false, /*expect_connection_open=*/true);\n  ssl_client_->close(Network::ConnectionCloseType::NoFlush);\n  EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::Eq(\"-\"));\n}\n\n// The tls_inspector is disabled. The ALPN won't be sniffed out and no filter chain is matched.\nTEST_P(ListenerFilterIntegrationTest, DisabledTlsInspectorFailsFilterChainFind) {\n  setupConnections(/*listener_filter_disabled=*/true, /*expect_connection_open=*/false);\n  EXPECT_THAT(waitForAccessLog(listener_access_log_name_),\n              testing::Eq(StreamInfo::ResponseCodeDetails::get().FilterChainNotFound));\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ListenerFilterIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/listener_lds_integration_test.cc",
    "content": "#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/config/version_converter.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/resources.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass ListenerIntegrationTest : public HttpIntegrationTest,\n                                public Grpc::GrpcClientIntegrationParamTest {\nprotected:\n  struct FakeUpstreamInfo {\n    FakeHttpConnectionPtr connection_;\n    FakeUpstream* upstream_{};\n    absl::flat_hash_map<std::string, FakeStreamPtr> stream_by_resource_name_;\n  };\n\n  ListenerIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion(), realTime()) {}\n\n  ~ListenerIntegrationTest() override { resetConnections(); }\n\n  void initialize() override {\n    // We want to use the GRPC based LDS.\n    use_lds_ = false;\n    setUpstreamCount(1);\n    defer_listener_finalization_ = true;\n\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Add the static cluster to serve LDS.\n      auto* lds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      lds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      lds_cluster->set_name(\"lds_cluster\");\n      lds_cluster->mutable_http2_protocol_options();\n\n      // Add the static cluster to serve RDS.\n      auto* rds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      rds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      rds_cluster->set_name(\"rds_cluster\");\n      rds_cluster->mutable_http2_protocol_options();\n    });\n\n    config_helper_.addConfigModifier(\n        [this](\n            envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                http_connection_manager) {\n          auto* rds_config = http_connection_manager.mutable_rds();\n          rds_config->set_route_config_name(route_table_name_);\n          envoy::config::core::v3::ApiConfigSource* rds_api_config_source =\n              rds_config->mutable_config_source()->mutable_api_config_source();\n          rds_api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n          envoy::config::core::v3::GrpcService* grpc_service =\n              rds_api_config_source->add_grpc_services();\n          setGrpcService(*grpc_service, \"rds_cluster\", getRdsFakeUpstream().localAddress());\n        });\n\n    // Note this has to be the last modifier as it nuke static_resource listeners.\n    setUpGrpcLds();\n    HttpIntegrationTest::initialize();\n  }\n  void setUpGrpcLds() {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      listener_config_.Swap(bootstrap.mutable_static_resources()->mutable_listeners(0));\n      listener_config_.set_name(listener_name_);\n      ENVOY_LOG_MISC(error, \"listener config: {}\", listener_config_.DebugString());\n      bootstrap.mutable_static_resources()->mutable_listeners()->Clear();\n      auto* lds_api_config_source =\n          bootstrap.mutable_dynamic_resources()->mutable_lds_config()->mutable_api_config_source();\n      lds_api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n      envoy::config::core::v3::GrpcService* grpc_service =\n          lds_api_config_source->add_grpc_services();\n      setGrpcService(*grpc_service, \"lds_cluster\", getLdsFakeUpstream().localAddress());\n    });\n  }\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    // Create the LDS upstream (fake_upstreams_[1]).\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    // Create the RDS upstream (fake_upstreams_[2]).\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void resetFakeUpstreamInfo(FakeUpstreamInfo* upstream_info) {\n    ASSERT(upstream_info->upstream_ != nullptr);\n\n    AssertionResult result = upstream_info->connection_->close();\n    RELEASE_ASSERT(result, result.message());\n    result = upstream_info->connection_->waitForDisconnect();\n    RELEASE_ASSERT(result, result.message());\n    upstream_info->connection_.reset();\n  }\n\n  void resetConnections() {\n    if (rds_upstream_info_.upstream_ != nullptr) {\n      resetFakeUpstreamInfo(&rds_upstream_info_);\n    }\n    resetFakeUpstreamInfo(&lds_upstream_info_);\n  }\n\n  FakeUpstream& getLdsFakeUpstream() const { return *fake_upstreams_[1]; }\n\n  FakeUpstream& getRdsFakeUpstream() const { return *fake_upstreams_[2]; }\n\n  void createStream(FakeUpstreamInfo* upstream_info, FakeUpstream& upstream,\n                    const std::string& resource_name) {\n    if (upstream_info->upstream_ == nullptr) {\n      // bind upstream if not yet.\n      upstream_info->upstream_ = &upstream;\n      AssertionResult result =\n          upstream_info->upstream_->waitForHttpConnection(*dispatcher_, upstream_info->connection_);\n      RELEASE_ASSERT(result, result.message());\n    }\n    if (!upstream_info->stream_by_resource_name_.try_emplace(resource_name, nullptr).second) {\n      RELEASE_ASSERT(false,\n                     fmt::format(\"stream with resource name '{}' already exists!\", resource_name));\n    }\n    auto result = upstream_info->connection_->waitForNewStream(\n        *dispatcher_, upstream_info->stream_by_resource_name_[resource_name]);\n    RELEASE_ASSERT(result, result.message());\n    upstream_info->stream_by_resource_name_[resource_name]->startGrpcStream();\n  }\n\n  void createRdsStream(const std::string& resource_name) {\n    createStream(&rds_upstream_info_, getRdsFakeUpstream(), resource_name);\n  }\n\n  void createLdsStream() {\n    createStream(&lds_upstream_info_, getLdsFakeUpstream(), listener_name_);\n  }\n\n  void sendLdsResponse(const std::vector<std::string>& listener_configs,\n                       const std::string& version) {\n    API_NO_BOOST(envoy::api::v2::DiscoveryResponse) response;\n    response.set_version_info(version);\n    response.set_type_url(Config::TypeUrl::get().Listener);\n    for (const auto& listener_blob : listener_configs) {\n      const auto listener_config =\n          TestUtility::parseYaml<envoy::config::listener::v3::Listener>(listener_blob);\n      response.add_resources()->PackFrom(API_DOWNGRADE(listener_config));\n    }\n    ASSERT(lds_upstream_info_.stream_by_resource_name_[listener_name_] != nullptr);\n    lds_upstream_info_.stream_by_resource_name_[listener_name_]->sendGrpcMessage(response);\n  }\n\n  void sendRdsResponse(const std::string& route_config, const std::string& version) {\n    API_NO_BOOST(envoy::api::v2::DiscoveryResponse) response;\n    response.set_version_info(version);\n    response.set_type_url(Config::TypeUrl::get().RouteConfiguration);\n    const auto route_configuration =\n        TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(route_config);\n    response.add_resources()->PackFrom(API_DOWNGRADE(route_configuration));\n    ASSERT(rds_upstream_info_.stream_by_resource_name_[route_configuration.name()] != nullptr);\n    rds_upstream_info_.stream_by_resource_name_[route_configuration.name()]->sendGrpcMessage(\n        response);\n  }\n  envoy::config::listener::v3::Listener listener_config_;\n  std::string listener_name_{\"testing-listener-0\"};\n  std::string route_table_name_{\"testing-route-table-0\"};\n  FakeUpstreamInfo lds_upstream_info_;\n  FakeUpstreamInfo rds_upstream_info_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsAndGrpcTypes, ListenerIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Tests that a LDS deletion before Server initManager been initialized will not block the Server\n// from starting.\nTEST_P(ListenerIntegrationTest, RemoveLastUninitializedListener) {\n  on_server_init_function_ = [&]() {\n    createLdsStream();\n    sendLdsResponse({MessageUtil::getYamlStringFromMessage(listener_config_)}, \"1\");\n    createRdsStream(route_table_name_);\n  };\n  initialize();\n  registerTestServerPorts({listener_name_});\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  // testing-listener-0 is not initialized as we haven't push any RDS yet.\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing);\n  // Workers not started, the LDS added listener 0 is in active_listeners_ list.\n  EXPECT_EQ(test_server_->server().listenerManager().listeners().size(), 1);\n\n  // This actually deletes the only listener.\n  sendLdsResponse({}, \"2\");\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 2);\n  EXPECT_EQ(test_server_->server().listenerManager().listeners().size(), 0);\n  // Server instance is ready now because the listener's destruction marked the listener\n  // initialized.\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n}\n\n// Tests that a LDS adding listener works as expected.\nTEST_P(ListenerIntegrationTest, BasicSuccess) {\n  on_server_init_function_ = [&]() {\n    createLdsStream();\n    sendLdsResponse({MessageUtil::getYamlStringFromMessage(listener_config_)}, \"1\");\n    createRdsStream(route_table_name_);\n  };\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 1);\n  // testing-listener-0 is not initialized as we haven't pushed any RDS yet.\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing);\n  // Workers not started, the LDS added listener 0 is in active_listeners_ list.\n  EXPECT_EQ(test_server_->server().listenerManager().listeners().size(), 1);\n  registerTestServerPorts({listener_name_});\n\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n  sendRdsResponse(fmt::format(route_config_tmpl, route_table_name_, \"cluster_0\"), \"1\");\n  test_server_->waitForCounterGe(\n      fmt::format(\"http.config_test.rds.{}.update_success\", route_table_name_), 1);\n  // Now testing-listener-0 finishes initialization, Server initManager will be ready.\n  EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized);\n\n  test_server_->waitUntilListenersReady();\n  // NOTE: The line above doesn't tell you if listener is up and listening.\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n  // Request is sent to cluster_0.\n\n  codec_client_ = makeHttpConnection(lookupPort(listener_name_));\n  int response_size = 800;\n  int request_size = 10;\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"200\"},\n                                                   {\"server_id\", \"cluster_0, backend_0\"}};\n  auto response = sendRequestAndWaitForResponse(\n      Http::TestResponseHeaderMapImpl{\n          {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}, {\":scheme\", \"http\"}},\n      request_size, response_headers, response_size, /*cluster_0*/ 0);\n  verifyResponse(std::move(response), \"200\", response_headers, std::string(response_size, 'a'));\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(request_size, upstream_request_->bodyLength());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/load_stats_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint_components.pb.h\"\n#include \"envoy/config/endpoint/v3/load_report.pb.h\"\n#include \"envoy/service/load_stats/v3/lrs.pb.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest,\n                                 public HttpIntegrationTest {\npublic:\n  LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {\n    // We rely on some fairly specific load balancing picks in this test, so\n    // determinize the schedule.\n    setDeterministic();\n  }\n\n  void addEndpoint(envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoints,\n                   uint32_t index, uint32_t& num_endpoints) {\n    setUpstreamAddress(index + 1, *locality_lb_endpoints.add_lb_endpoints());\n    ++num_endpoints;\n  }\n\n  // Used as args to updateClusterLocalityAssignment().\n  struct LocalityAssignment {\n    LocalityAssignment() : LocalityAssignment({}, 0) {}\n    LocalityAssignment(const std::vector<uint32_t>& endpoints) : LocalityAssignment(endpoints, 0) {}\n    LocalityAssignment(const std::vector<uint32_t>& endpoints, uint32_t weight)\n        : endpoints_(endpoints), weight_(weight) {}\n\n    // service_upstream_ indices for endpoints in the cluster.\n    const std::vector<uint32_t> endpoints_;\n    // If non-zero, locality level weighting.\n    const uint32_t weight_{};\n  };\n\n  // We need to supply the endpoints via EDS to provide locality information for\n  // load reporting. Use a filesystem delivery to simplify test mechanics.\n  void updateClusterLoadAssignment(const LocalityAssignment& winter_upstreams,\n                                   const LocalityAssignment& dragon_upstreams,\n                                   const LocalityAssignment& p1_winter_upstreams,\n                                   const LocalityAssignment& p1_dragon_upstreams) {\n    uint32_t num_endpoints = 0;\n    envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;\n    // EDS service_name is set in cluster_0\n    cluster_load_assignment.set_cluster_name(\"service_name_0\");\n\n    auto* winter = cluster_load_assignment.add_endpoints();\n    winter->mutable_locality()->set_region(\"some_region\");\n    winter->mutable_locality()->set_zone(\"zone_name\");\n    winter->mutable_locality()->set_sub_zone(\"winter\");\n    if (winter_upstreams.weight_ > 0) {\n      winter->mutable_load_balancing_weight()->set_value(winter_upstreams.weight_);\n    }\n    for (uint32_t index : winter_upstreams.endpoints_) {\n      addEndpoint(*winter, index, num_endpoints);\n    }\n\n    auto* dragon = cluster_load_assignment.add_endpoints();\n    dragon->mutable_locality()->set_region(\"some_region\");\n    dragon->mutable_locality()->set_zone(\"zone_name\");\n    dragon->mutable_locality()->set_sub_zone(\"dragon\");\n    if (dragon_upstreams.weight_ > 0) {\n      dragon->mutable_load_balancing_weight()->set_value(dragon_upstreams.weight_);\n    }\n    for (uint32_t index : dragon_upstreams.endpoints_) {\n      addEndpoint(*dragon, index, num_endpoints);\n    }\n\n    auto* winter_p1 = cluster_load_assignment.add_endpoints();\n    winter_p1->set_priority(1);\n    winter_p1->mutable_locality()->set_region(\"some_region\");\n    winter_p1->mutable_locality()->set_zone(\"zone_name\");\n    winter_p1->mutable_locality()->set_sub_zone(\"winter\");\n    for (uint32_t index : p1_winter_upstreams.endpoints_) {\n      addEndpoint(*winter_p1, index, num_endpoints);\n    }\n\n    auto* dragon_p1 = cluster_load_assignment.add_endpoints();\n    dragon_p1->set_priority(1);\n    dragon_p1->mutable_locality()->set_region(\"some_region\");\n    dragon_p1->mutable_locality()->set_zone(\"zone_name\");\n    dragon_p1->mutable_locality()->set_sub_zone(\"dragon\");\n    for (uint32_t index : p1_dragon_upstreams.endpoints_) {\n      addEndpoint(*dragon_p1, index, num_endpoints);\n    }\n    eds_helper_.setEdsAndWait({cluster_load_assignment}, *test_server_);\n  }\n\n  void createUpstreams() override {\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    load_report_upstream_ = fake_upstreams_.back().get();\n    HttpIntegrationTest::createUpstreams();\n  }\n\n  void initialize() override {\n    setUpstreamCount(upstream_endpoints_);\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Setup load reporting and corresponding gRPC cluster.\n      auto* loadstats_config = bootstrap.mutable_cluster_manager()->mutable_load_stats_config();\n      loadstats_config->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n      loadstats_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name(\"load_report\");\n      loadstats_config->set_transport_api_version(apiVersion());\n      auto* load_report_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      load_report_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      load_report_cluster->mutable_circuit_breakers()->Clear();\n      load_report_cluster->set_name(\"load_report\");\n      load_report_cluster->mutable_http2_protocol_options();\n      // Put ourselves in a locality that will be used in\n      // updateClusterLoadAssignment()\n      auto* locality = bootstrap.mutable_node()->mutable_locality();\n      locality->set_region(\"some_region\");\n      locality->set_zone(\"zone_name\");\n      locality->set_sub_zone(sub_zone_);\n      // Switch predefined cluster_0 to EDS filesystem sourcing.\n      auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0);\n      cluster_0->set_type(envoy::config::cluster::v3::Cluster::EDS);\n      auto* eds_cluster_config = cluster_0->mutable_eds_cluster_config();\n      eds_cluster_config->mutable_eds_config()->set_path(eds_helper_.eds_path());\n      eds_cluster_config->set_service_name(\"service_name_0\");\n      if (locality_weighted_lb_) {\n        cluster_0->mutable_common_lb_config()->mutable_locality_weighted_lb_config();\n      }\n    });\n    HttpIntegrationTest::initialize();\n    load_report_upstream_ = fake_upstreams_[0].get();\n    for (uint32_t i = 0; i < upstream_endpoints_; ++i) {\n      service_upstream_[i] = fake_upstreams_[i + 1].get();\n    }\n    updateClusterLoadAssignment({}, {}, {}, {});\n  }\n\n  void initiateClientConnection() {\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    codec_client_ = makeHttpConnection(std::move(conn));\n    Http::TestRequestHeaderMapImpl headers{\n        {\":method\", \"POST\"},    {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"},\n        {\":authority\", \"host\"}, {\"x-lyft-user-id\", \"123\"},   {\"x-forwarded-for\", \"10.0.0.1\"}};\n    response_ = codec_client_->makeRequestWithBody(headers, request_size_);\n  }\n\n  void waitForLoadStatsStream() {\n    AssertionResult result =\n        load_report_upstream_->waitForHttpConnection(*dispatcher_, fake_loadstats_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_loadstats_connection_->waitForNewStream(*dispatcher_, loadstats_stream_);\n    RELEASE_ASSERT(result, result.message());\n  }\n\n  void mergeLoadStats(envoy::service::load_stats::v3::LoadStatsRequest& loadstats_request,\n                      envoy::service::load_stats::v3::LoadStatsRequest& local_loadstats_request) {\n    // Strip out \"load_report\" cluster, so that it doesn't interfere with the test.\n    for (auto it = local_loadstats_request.mutable_cluster_stats()->begin();\n         it != local_loadstats_request.mutable_cluster_stats()->end(); ++it) {\n      if (it->cluster_name() == \"load_report\") {\n        local_loadstats_request.mutable_cluster_stats()->erase(it);\n        break;\n      }\n    }\n\n    ASSERT_LE(loadstats_request.cluster_stats_size(), 1) << loadstats_request.DebugString();\n    ASSERT_LE(local_loadstats_request.cluster_stats_size(), 1)\n        << local_loadstats_request.DebugString();\n\n    if (local_loadstats_request.cluster_stats_size() == 0) {\n      return;\n    } else if (loadstats_request.cluster_stats_size() == 0) {\n      loadstats_request.CopyFrom(local_loadstats_request);\n      ASSERT_TRUE(loadstats_request.has_node());\n      ASSERT_FALSE(loadstats_request.node().id().empty());\n      ASSERT_FALSE(loadstats_request.node().cluster().empty());\n      return;\n    }\n\n    const auto& local_cluster_stats = local_loadstats_request.cluster_stats(0);\n    auto* cluster_stats = loadstats_request.mutable_cluster_stats(0);\n\n    cluster_stats->set_total_dropped_requests(cluster_stats->total_dropped_requests() +\n                                              local_cluster_stats.total_dropped_requests());\n\n    for (int i = 0; i < local_cluster_stats.upstream_locality_stats_size(); ++i) {\n      const auto& local_upstream_locality_stats = local_cluster_stats.upstream_locality_stats(i);\n      bool copied = false;\n      for (int j = 0; j < cluster_stats->upstream_locality_stats_size(); ++j) {\n        auto* upstream_locality_stats = cluster_stats->mutable_upstream_locality_stats(j);\n        if (TestUtility::protoEqual(upstream_locality_stats->locality(),\n                                    local_upstream_locality_stats.locality()) &&\n            upstream_locality_stats->priority() == local_upstream_locality_stats.priority()) {\n          copied = true;\n          upstream_locality_stats->set_total_successful_requests(\n              upstream_locality_stats->total_successful_requests() +\n              local_upstream_locality_stats.total_successful_requests());\n          upstream_locality_stats->set_total_error_requests(\n              upstream_locality_stats->total_error_requests() +\n              local_upstream_locality_stats.total_error_requests());\n          upstream_locality_stats->set_total_issued_requests(\n              upstream_locality_stats->total_issued_requests() +\n              local_upstream_locality_stats.total_issued_requests());\n          // Unlike most stats, current requests in progress replaces old requests in progress.\n          break;\n        }\n      }\n      if (!copied) {\n        auto* upstream_locality_stats = cluster_stats->add_upstream_locality_stats();\n        upstream_locality_stats->CopyFrom(local_upstream_locality_stats);\n      }\n    }\n\n    // Unfortunately because we don't issue an update when total_requests_in_progress goes from\n    // non-zero to zero, we have to go through and zero it out for any locality stats we didn't see.\n    for (int i = 0; i < cluster_stats->upstream_locality_stats_size(); ++i) {\n      auto upstream_locality_stats = cluster_stats->mutable_upstream_locality_stats(i);\n      bool found = false;\n      for (int j = 0; j < local_cluster_stats.upstream_locality_stats_size(); ++j) {\n        auto& local_upstream_locality_stats = local_cluster_stats.upstream_locality_stats(j);\n        if (TestUtility::protoEqual(upstream_locality_stats->locality(),\n                                    local_upstream_locality_stats.locality()) &&\n            upstream_locality_stats->priority() == local_upstream_locality_stats.priority()) {\n          found = true;\n          break;\n        }\n      }\n      if (!found) {\n        upstream_locality_stats->set_total_requests_in_progress(0);\n      }\n    }\n  }\n\n  ABSL_MUST_USE_RESULT AssertionResult\n  waitForLoadStatsRequest(const std::vector<envoy::config::endpoint::v3::UpstreamLocalityStats>&\n                              expected_locality_stats,\n                          uint64_t dropped = 0) {\n    Event::TestTimeSystem::RealTimeBound bound(TestUtility::DefaultTimeout);\n    Protobuf::RepeatedPtrField<envoy::config::endpoint::v3::ClusterStats> expected_cluster_stats;\n    if (!expected_locality_stats.empty() || dropped != 0) {\n      auto* cluster_stats = expected_cluster_stats.Add();\n      cluster_stats->set_cluster_name(\"cluster_0\");\n      // Verify the eds service_name is passed back.\n      cluster_stats->set_cluster_service_name(\"service_name_0\");\n      if (dropped > 0) {\n        cluster_stats->set_total_dropped_requests(dropped);\n      }\n      std::copy(\n          expected_locality_stats.begin(), expected_locality_stats.end(),\n          Protobuf::RepeatedPtrFieldBackInserter(cluster_stats->mutable_upstream_locality_stats()));\n    }\n\n    envoy::service::load_stats::v3::LoadStatsRequest loadstats_request;\n    // Because multiple load stats may be sent while load in being sent (on slow machines), loop and\n    // merge until all the expected load has been reported.\n    do {\n      envoy::service::load_stats::v3::LoadStatsRequest local_loadstats_request;\n      AssertionResult result =\n          loadstats_stream_->waitForGrpcMessage(*dispatcher_, local_loadstats_request);\n      RELEASE_ASSERT(result, result.message());\n      // Check that \"envoy.lrs.supports_send_all_clusters\" client feature is set.\n      if (local_loadstats_request.has_node()) {\n        EXPECT_THAT(local_loadstats_request.node().client_features(),\n                    ::testing::ElementsAre(\"envoy.lrs.supports_send_all_clusters\"));\n      }\n      // Sanity check and clear the measured load report interval.\n      for (auto& cluster_stats : *local_loadstats_request.mutable_cluster_stats()) {\n        const uint32_t actual_load_report_interval_ms =\n            Protobuf::util::TimeUtil::DurationToMilliseconds(cluster_stats.load_report_interval());\n        // Turns out libevent timers aren't that accurate; without this adjustment we see things\n        // like \"expected 500, actual 497\". Tweak as needed if races are observed.\n        EXPECT_GE(actual_load_report_interval_ms, load_report_interval_ms_ - 100);\n        // Allow for some skew in test environment.\n        EXPECT_LT(actual_load_report_interval_ms, load_report_interval_ms_ + 1000);\n        cluster_stats.mutable_load_report_interval()->Clear();\n      }\n      mergeLoadStats(loadstats_request, local_loadstats_request);\n\n      EXPECT_EQ(\"POST\", loadstats_stream_->headers().getMethodValue());\n      EXPECT_EQ(\n          TestUtility::getVersionedMethodPath(\"envoy.service.load_stats.{}.LoadReportingService\",\n                                              \"StreamLoadStats\", apiVersion()),\n          loadstats_stream_->headers().getPathValue());\n      EXPECT_EQ(\"application/grpc\", loadstats_stream_->headers().getContentTypeValue());\n      if (!bound.withinBound()) {\n        return TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats,\n                                                        loadstats_request.cluster_stats(), true);\n      }\n    } while (!TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats,\n                                                       loadstats_request.cluster_stats(), true));\n    return testing::AssertionSuccess();\n  }\n\n  void waitForUpstreamResponse(uint32_t endpoint_index, uint32_t response_code = 200) {\n    AssertionResult result = service_upstream_[endpoint_index]->waitForHttpConnection(\n        *dispatcher_, fake_upstream_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n    RELEASE_ASSERT(result, result.message());\n    result = upstream_request_->waitForEndStream(*dispatcher_);\n    RELEASE_ASSERT(result, result.message());\n\n    upstream_request_->encodeHeaders(\n        Http::TestResponseHeaderMapImpl{{\":status\", std::to_string(response_code)}}, false);\n    upstream_request_->encodeData(response_size_, true);\n    response_->waitForEndStream();\n\n    ASSERT_TRUE(upstream_request_->complete());\n    EXPECT_EQ(request_size_, upstream_request_->bodyLength());\n\n    ASSERT_TRUE(response_->complete());\n    EXPECT_EQ(std::to_string(response_code), response_->headers().getStatusValue());\n    EXPECT_EQ(response_size_, response_->body().size());\n  }\n\n  void requestLoadStatsResponse(const std::vector<std::string>& clusters,\n                                bool send_all_clusters = false) {\n    envoy::service::load_stats::v3::LoadStatsResponse loadstats_response;\n    loadstats_response.mutable_load_reporting_interval()->MergeFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(load_report_interval_ms_));\n    for (const auto& cluster : clusters) {\n      loadstats_response.add_clusters(cluster);\n    }\n    if (send_all_clusters) {\n      loadstats_response.set_send_all_clusters(true);\n    }\n    loadstats_stream_->sendGrpcMessage(loadstats_response);\n    // Wait until the request has been received by Envoy.\n    test_server_->waitForCounterGe(\"load_reporter.requests\", ++load_requests_);\n  }\n\n  envoy::config::endpoint::v3::UpstreamLocalityStats localityStats(const std::string& sub_zone,\n                                                                   uint64_t success, uint64_t error,\n                                                                   uint64_t active, uint64_t issued,\n                                                                   uint32_t priority = 0) {\n    envoy::config::endpoint::v3::UpstreamLocalityStats locality_stats;\n    auto* locality = locality_stats.mutable_locality();\n    locality->set_region(\"some_region\");\n    locality->set_zone(\"zone_name\");\n    locality->set_sub_zone(sub_zone);\n    locality_stats.set_total_successful_requests(success);\n    locality_stats.set_total_error_requests(error);\n    locality_stats.set_total_requests_in_progress(active);\n    locality_stats.set_total_issued_requests(issued);\n    locality_stats.set_priority(priority);\n    return locality_stats;\n  }\n\n  void cleanupLoadStatsConnection() {\n    if (fake_loadstats_connection_ != nullptr) {\n      AssertionResult result = fake_loadstats_connection_->close();\n      RELEASE_ASSERT(result, result.message());\n      result = fake_loadstats_connection_->waitForDisconnect();\n      RELEASE_ASSERT(result, result.message());\n    }\n  }\n\n  void sendAndReceiveUpstream(uint32_t endpoint_index, uint32_t response_code = 200) {\n    initiateClientConnection();\n    waitForUpstreamResponse(endpoint_index, response_code);\n    cleanupUpstreamAndDownstream();\n  }\n\n  static constexpr uint32_t upstream_endpoints_ = 5;\n\n  IntegrationStreamDecoderPtr response_;\n  std::string sub_zone_{\"winter\"};\n  FakeHttpConnectionPtr fake_loadstats_connection_;\n  FakeStreamPtr loadstats_stream_;\n  FakeUpstream* load_report_upstream_{};\n  FakeUpstream* service_upstream_[upstream_endpoints_]{};\n  uint32_t load_requests_{};\n  EdsHelper eds_helper_;\n  bool locality_weighted_lb_{};\n\n  const uint64_t request_size_ = 1024;\n  const uint64_t response_size_ = 512;\n  const uint32_t load_report_interval_ms_ = 500;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, LoadStatsIntegrationTest,\n                         VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Validate the load reports for successful requests as cluster membership\n// changes.\nTEST_P(LoadStatsIntegrationTest, Success) {\n  initialize();\n\n  waitForLoadStatsStream();\n  ASSERT_TRUE(waitForLoadStatsRequest({}));\n  loadstats_stream_->startGrpcStream();\n\n  // Simple 50%/50% split between dragon/winter localities. Also include an\n  // unknown cluster to exercise the handling of this case.\n  requestLoadStatsResponse({\"cluster_0\", \"cluster_1\"});\n\n  updateClusterLoadAssignment({{0}}, {{1}}, {{3}}, {});\n\n  for (uint32_t i = 0; i < 4; ++i) {\n    sendAndReceiveUpstream(i % 2);\n  }\n\n  // Verify we do not get empty stats for non-zero priorities.\n  ASSERT_TRUE(waitForLoadStatsRequest(\n      {localityStats(\"winter\", 2, 0, 0, 2), localityStats(\"dragon\", 2, 0, 0, 2)}));\n\n  EXPECT_EQ(1, test_server_->counter(\"load_reporter.requests\")->value());\n  // On slow machines, more than one load stats response may be pushed while we are simulating load.\n  EXPECT_LE(2, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  // 33%/67% split between dragon/winter primary localities.\n  updateClusterLoadAssignment({{0}}, {{1, 2}}, {}, {{4}});\n  // Verify that send_all_clusters works.\n  requestLoadStatsResponse({}, true);\n\n  for (uint32_t i = 0; i < 6; ++i) {\n    sendAndReceiveUpstream((4 + i) % 3);\n  }\n\n  // No locality for priority=1 since there's no \"winter\" endpoints.\n  // The hosts for dragon were received because membership_total is accurate.\n  ASSERT_TRUE(waitForLoadStatsRequest(\n      {localityStats(\"winter\", 2, 0, 0, 2), localityStats(\"dragon\", 4, 0, 0, 4)}));\n\n  EXPECT_EQ(2, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(3, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  // Change to 50/50 for the failover clusters.\n  updateClusterLoadAssignment({}, {}, {{3}}, {{4}});\n  requestLoadStatsResponse({\"cluster_0\"});\n  test_server_->waitForGaugeEq(\"cluster.cluster_0.membership_total\", 2);\n\n  for (uint32_t i = 0; i < 4; ++i) {\n    sendAndReceiveUpstream(i % 2 + 3);\n  }\n\n  ASSERT_TRUE(waitForLoadStatsRequest(\n      {localityStats(\"winter\", 2, 0, 0, 2, 1), localityStats(\"dragon\", 2, 0, 0, 2, 1)}));\n  EXPECT_EQ(3, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(4, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  // 100% winter locality.\n  updateClusterLoadAssignment({}, {}, {}, {});\n  updateClusterLoadAssignment({{1}}, {}, {}, {});\n  requestLoadStatsResponse({\"cluster_0\"});\n\n  for (uint32_t i = 0; i < 1; ++i) {\n    sendAndReceiveUpstream(1);\n  }\n\n  ASSERT_TRUE(waitForLoadStatsRequest({localityStats(\"winter\", 1, 0, 0, 1)}));\n  EXPECT_EQ(4, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(5, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  // A LoadStatsResponse arrives before the expiration of the reporting\n  // interval. Since we are keep tracking cluster_0, stats rollover.\n  requestLoadStatsResponse({\"cluster_0\"});\n  sendAndReceiveUpstream(1);\n  requestLoadStatsResponse({\"cluster_0\"});\n  sendAndReceiveUpstream(1);\n  sendAndReceiveUpstream(1);\n\n  ASSERT_TRUE(waitForLoadStatsRequest({localityStats(\"winter\", 3, 0, 0, 3)}));\n\n  EXPECT_EQ(6, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(6, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  // As above, but stop tracking cluster_0 and only get the requests since the\n  // response.\n  requestLoadStatsResponse({});\n  sendAndReceiveUpstream(1);\n  requestLoadStatsResponse({\"cluster_0\"});\n  sendAndReceiveUpstream(1);\n  sendAndReceiveUpstream(1);\n\n  ASSERT_TRUE(waitForLoadStatsRequest({localityStats(\"winter\", 2, 0, 0, 2)}));\n\n  EXPECT_EQ(8, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(7, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  cleanupLoadStatsConnection();\n}\n\n// Validate the load reports for successful requests when using locality\n// weighted LB. This serves as a de facto integration test for locality weighted\n// LB.\nTEST_P(LoadStatsIntegrationTest, LocalityWeighted) {\n  locality_weighted_lb_ = true;\n  initialize();\n\n  waitForLoadStatsStream();\n  ASSERT_TRUE(waitForLoadStatsRequest({}));\n\n  loadstats_stream_->startGrpcStream();\n  requestLoadStatsResponse({\"cluster_0\"});\n\n  // Simple 33%/67% split between dragon/winter localities.\n  // Even though there are more endpoints in the dragon locality, the winter locality gets the\n  // expected weighting in the WRR locality schedule.\n  updateClusterLoadAssignment({{0}, 2}, {{1, 2}, 1}, {}, {});\n\n  sendAndReceiveUpstream(0);\n  sendAndReceiveUpstream(1);\n  sendAndReceiveUpstream(0);\n  sendAndReceiveUpstream(0);\n  sendAndReceiveUpstream(2);\n  sendAndReceiveUpstream(0);\n\n  // Verify we get the expect request distribution.\n  ASSERT_TRUE(waitForLoadStatsRequest(\n      {localityStats(\"winter\", 4, 0, 0, 4), localityStats(\"dragon\", 2, 0, 0, 2)}));\n\n  EXPECT_EQ(1, test_server_->counter(\"load_reporter.requests\")->value());\n  // On slow machines, more than one load stats response may be pushed while we are simulating load.\n  EXPECT_LE(2, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  cleanupLoadStatsConnection();\n}\n\n// Validate the load reports for requests when all endpoints are non-local.\nTEST_P(LoadStatsIntegrationTest, NoLocalLocality) {\n  sub_zone_ = \"summer\";\n  initialize();\n\n  waitForLoadStatsStream();\n  ASSERT_TRUE(waitForLoadStatsRequest({}));\n  loadstats_stream_->startGrpcStream();\n\n  // Simple 50%/50% split between dragon/winter localities. Also include an\n  // unknown cluster to exercise the handling of this case.\n  requestLoadStatsResponse({\"cluster_0\", \"cluster_1\"});\n\n  updateClusterLoadAssignment({{0}}, {{1}}, {{3}}, {});\n\n  for (uint32_t i = 0; i < 4; ++i) {\n    sendAndReceiveUpstream(i % 2);\n  }\n\n  // Verify we do not get empty stats for non-zero priorities. Note that the\n  // order of locality stats is different to the Success case, where winter is\n  // the local locality (and hence first in the list as per\n  // HostsPerLocality::get()).\n  ASSERT_TRUE(waitForLoadStatsRequest(\n      {localityStats(\"dragon\", 2, 0, 0, 2), localityStats(\"winter\", 2, 0, 0, 2)}));\n\n  EXPECT_EQ(1, test_server_->counter(\"load_reporter.requests\")->value());\n  // On slow machines, more than one load stats response may be pushed while we are simulating load.\n  EXPECT_LE(2, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  cleanupLoadStatsConnection();\n}\n\n// Validate the load reports for successful/error requests make sense.\nTEST_P(LoadStatsIntegrationTest, Error) {\n  initialize();\n\n  waitForLoadStatsStream();\n  ASSERT_TRUE(waitForLoadStatsRequest({}));\n  loadstats_stream_->startGrpcStream();\n\n  requestLoadStatsResponse({\"cluster_0\"});\n  updateClusterLoadAssignment({{0}}, {}, {}, {});\n\n  // This should count as an error since 5xx.\n  sendAndReceiveUpstream(0, 503);\n\n  // This should count as \"success\" since non-5xx.\n  sendAndReceiveUpstream(0, 404);\n\n  ASSERT_TRUE(waitForLoadStatsRequest({localityStats(\"winter\", 1, 1, 0, 2)}));\n\n  EXPECT_EQ(1, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(2, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  cleanupLoadStatsConnection();\n}\n\n// Validate the load reports for in-progress make sense.\nTEST_P(LoadStatsIntegrationTest, InProgress) {\n  initialize();\n\n  waitForLoadStatsStream();\n  ASSERT_TRUE(waitForLoadStatsRequest({}));\n  loadstats_stream_->startGrpcStream();\n  updateClusterLoadAssignment({{0}}, {}, {}, {});\n\n  requestLoadStatsResponse({\"cluster_0\"});\n  initiateClientConnection();\n  ASSERT_TRUE(waitForLoadStatsRequest({localityStats(\"winter\", 0, 0, 1, 1)}));\n\n  waitForUpstreamResponse(0, 503);\n  cleanupUpstreamAndDownstream();\n\n  EXPECT_EQ(1, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(2, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  cleanupLoadStatsConnection();\n}\n\n// Validate the load reports for dropped requests make sense.\nTEST_P(LoadStatsIntegrationTest, Dropped) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    auto* thresholds = cluster_0->mutable_circuit_breakers()->add_thresholds();\n    thresholds->mutable_max_pending_requests()->set_value(0);\n  });\n  initialize();\n\n  waitForLoadStatsStream();\n  ASSERT_TRUE(waitForLoadStatsRequest({}));\n  loadstats_stream_->startGrpcStream();\n\n  updateClusterLoadAssignment({{0}}, {}, {}, {});\n  requestLoadStatsResponse({\"cluster_0\"});\n  // This should count as dropped, since we trigger circuit breaking.\n  initiateClientConnection();\n  response_->waitForEndStream();\n  ASSERT_TRUE(response_->complete());\n  EXPECT_EQ(\"503\", response_->headers().getStatusValue());\n  cleanupUpstreamAndDownstream();\n\n  ASSERT_TRUE(waitForLoadStatsRequest({}, 1));\n\n  EXPECT_EQ(1, test_server_->counter(\"load_reporter.requests\")->value());\n  EXPECT_LE(2, test_server_->counter(\"load_reporter.responses\")->value());\n  EXPECT_EQ(0, test_server_->counter(\"load_reporter.errors\")->value());\n\n  cleanupLoadStatsConnection();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/local_reply_integration_test.cc",
    "content": "#include \"test/integration/http_protocol_integration.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\n\nclass LocalReplyIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  void initialize() override { HttpProtocolIntegrationTest::initialize(); }\n\n  void setLocalReplyConfig(const std::string& yaml) {\n    envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig\n        local_reply_config;\n    TestUtility::loadFromYaml(yaml, local_reply_config);\n    config_helper_.setLocalReply(local_reply_config);\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, LocalReplyIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) {\n  const std::string yaml = R\"EOF(\nmappers:\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value\n    status_code: 550\n    headers_to_add:\n      - header:\n          key: foo\n          value: bar\n        append: false\nbody_format:\n  json_format:\n    level: TRACE\n    user_agent: \"%REQ(USER-AGENT)%\"\n    response_body: \"%LOCAL_REPLY_BODY%\"\n  )EOF\";\n  setLocalReplyConfig(yaml);\n  initialize();\n\n  const std::string expected_body = R\"({\n      \"level\": \"TRACE\",\n      \"user_agent\": null,\n      \"response_body\": \"upstream connect error or disconnect/reset before headers. reset reason: connection termination\"\n})\";\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"test-header\", \"exact-match-value\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  response->waitForEndStream();\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"application/json\", response->headers().ContentType()->value().getStringView());\n  EXPECT_EQ(\"150\", response->headers().ContentLength()->value().getStringView());\n  EXPECT_EQ(\"550\", response->headers().Status()->value().getStringView());\n  EXPECT_EQ(\"bar\", response->headers().get(Http::LowerCaseString(\"foo\"))->value().getStringView());\n  // Check if returned json is same as expected\n  EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body));\n}\n\n// For grpc, the error message is in grpc-message header.\n// If it is json, the header value is in json format.\nTEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson4Grpc) {\n  const std::string yaml = R\"EOF(\nbody_format:\n  json_format:\n    code: \"%RESPONSE_CODE%\"\n    message: \"%LOCAL_REPLY_BODY%\"\n)EOF\";\n  setLocalReplyConfig(yaml);\n  initialize();\n\n  const std::string expected_grpc_message = R\"({\n      \"code\": 503,\n      \"message\":\"upstream connect error or disconnect/reset before headers. reset reason: connection termination\"\n})\";\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/package.service/method\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"content-type\", \"application/grpc\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  response->waitForEndStream();\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"application/grpc\", response->headers().ContentType()->value().getStringView());\n  EXPECT_EQ(\"14\", response->headers().GrpcStatus()->value().getStringView());\n  // Check if grpc-message value is same as expected\n  EXPECT_TRUE(TestUtility::jsonStringEqual(\n      std::string(response->headers().GrpcMessage()->value().getStringView()),\n      expected_grpc_message));\n}\n\n// Matched second filter has code, headers and body rewrite and its format\nTEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFilter) {\n  const std::string yaml = R\"EOF(\nmappers:\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value-1\n    status_code: 550\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value\n    status_code: 551\n    headers_to_add:\n      - header:\n          key: foo\n          value: bar\n        append: false\n    body:\n      inline_string: \"customized body text\"\n    body_format_override:\n      text_format: \"%LOCAL_REPLY_BODY% %RESPONSE_CODE%\"\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value\n    status_code: 552\nbody_format:\n  json_format:\n    level: TRACE\n    response_flags: \"%RESPONSE_FLAGS%\"\n    response_body: \"%LOCAL_REPLY_BODY%\"\n  )EOF\";\n  setLocalReplyConfig(yaml);\n  initialize();\n\n  const std::string expected_body = \"customized body text 551\";\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"test-header\", \"exact-match-value\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  response->waitForEndStream();\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"text/plain\", response->headers().ContentType()->value().getStringView());\n  EXPECT_EQ(\"24\", response->headers().ContentLength()->value().getStringView());\n  EXPECT_EQ(\"551\", response->headers().Status()->value().getStringView());\n  EXPECT_EQ(\"bar\", response->headers().get(Http::LowerCaseString(\"foo\"))->value().getStringView());\n  // Check if returned json is same as expected\n  EXPECT_EQ(response->body(), expected_body);\n}\n\n// Not matching any filters.\nTEST_P(LocalReplyIntegrationTest, ShouldNotMatchAnyFilter) {\n  const std::string yaml = R\"EOF(\nmappers:\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value-1\n    status_code: 550\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value-2\n    status_code: 551\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value-3\n    status_code: 552\nbody_format:\n  json_format:\n    level: TRACE\n    response_flags: \"%RESPONSE_FLAGS%\"\n    response_body: \"%LOCAL_REPLY_BODY%\"\n  )EOF\";\n  setLocalReplyConfig(yaml);\n  initialize();\n\n  const std::string expected_body = R\"({\n      \"level\": \"TRACE\",\n      \"response_flags\": \"UC\",\n      \"response_body\": \"upstream connect error or disconnect/reset before headers. reset reason: connection termination\"\n})\";\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"test-header\", \"exact-match-value\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  response->waitForEndStream();\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"application/json\", response->headers().ContentType()->value().getStringView());\n  EXPECT_EQ(\"154\", response->headers().ContentLength()->value().getStringView());\n  EXPECT_EQ(\"503\", response->headers().Status()->value().getStringView());\n  // Check if returned json is same as expected\n  EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body));\n}\n\n// Use default formatter.\nTEST_P(LocalReplyIntegrationTest, ShouldMapResponseCodeAndMapToDefaultTextResponse) {\n  const std::string yaml = R\"EOF(\nmappers:\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value-1\n    status_code: 550\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value-2\n    status_code: 551\n  - filter:\n      header_filter:\n        header:\n          name: test-header\n          exact_match: exact-match-value-3\n    status_code: 552\n  )EOF\";\n  setLocalReplyConfig(yaml);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"test-header\", \"exact-match-value-2\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  response->waitForEndStream();\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"text/plain\", response->headers().ContentType()->value().getStringView());\n  EXPECT_EQ(\"95\", response->headers().ContentLength()->value().getStringView());\n\n  EXPECT_EQ(\"551\", response->headers().Status()->value().getStringView());\n\n  EXPECT_EQ(response->body(), \"upstream connect error or disconnect/reset before headers. reset \"\n                              \"reason: connection termination\");\n}\n\n// Should return formatted text/plain response.\nTEST_P(LocalReplyIntegrationTest, ShouldFormatResponseToCustomString) {\n  const std::string yaml = R\"EOF(\nmappers:\n- filter:\n    status_code_filter:\n      comparison:\n        op: EQ\n        value:\n          default_value: 503\n          runtime_key: key_b\n  status_code: 513\n  body:\n    inline_string: \"customized body text\"\nbody_format:\n  text_format: \"%RESPONSE_CODE% - %LOCAL_REPLY_BODY%\"\n)EOF\";\n  setLocalReplyConfig(yaml);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"test-header\", \"exact-match-value-2\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  response->waitForEndStream();\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    codec_client_->close();\n  }\n\n  EXPECT_FALSE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n\n  EXPECT_EQ(\"text/plain\", response->headers().ContentType()->value().getStringView());\n  EXPECT_EQ(\"26\", response->headers().ContentLength()->value().getStringView());\n\n  EXPECT_EQ(\"513\", response->headers().Status()->value().getStringView());\n\n  EXPECT_EQ(response->body(), \"513 - customized body text\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/overload_integration_test.cc",
    "content": "#include <unordered_map>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/overload/v3/overload.pb.h\"\n#include \"envoy/server/resource_monitor.h\"\n#include \"envoy/server/resource_monitor_config.h\"\n\n#include \"test/common/config/dummy_config.pb.h\"\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"absl/strings/str_cat.h\"\n\nnamespace Envoy {\n\nclass FakeResourceMonitorFactory;\n\nclass FakeResourceMonitor : public Server::ResourceMonitor {\npublic:\n  FakeResourceMonitor(Event::Dispatcher& dispatcher, FakeResourceMonitorFactory& factory)\n      : dispatcher_(dispatcher), factory_(factory), pressure_(0.0) {}\n  ~FakeResourceMonitor() override;\n  void updateResourceUsage(Callbacks& callbacks) override;\n\n  void setResourcePressure(double pressure) {\n    dispatcher_.post([this, pressure] { pressure_ = pressure; });\n  }\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n  FakeResourceMonitorFactory& factory_;\n  double pressure_;\n};\n\nclass FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitorFactory {\npublic:\n  FakeResourceMonitor* monitor() const { return monitor_; }\n  Server::ResourceMonitorPtr\n  createResourceMonitor(const Protobuf::Message& config,\n                        Server::Configuration::ResourceMonitorFactoryContext& context) override;\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<test::common::config::DummyConfig>();\n  }\n\n  std::string name() const override {\n    return \"envoy.resource_monitors.testonly.fake_resource_monitor\";\n  }\n\n  void onMonitorDestroyed(FakeResourceMonitor* monitor);\n\nprivate:\n  FakeResourceMonitor* monitor_{nullptr};\n};\n\nFakeResourceMonitor::~FakeResourceMonitor() { factory_.onMonitorDestroyed(this); }\n\nvoid FakeResourceMonitor::updateResourceUsage(Callbacks& callbacks) {\n  Server::ResourceUsage usage;\n  usage.resource_pressure_ = pressure_;\n  callbacks.onSuccess(usage);\n}\n\nvoid FakeResourceMonitorFactory::onMonitorDestroyed(FakeResourceMonitor* monitor) {\n  ASSERT(monitor_ == monitor);\n  monitor_ = nullptr;\n}\n\nServer::ResourceMonitorPtr FakeResourceMonitorFactory::createResourceMonitor(\n    const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) {\n  auto monitor = std::make_unique<FakeResourceMonitor>(context.dispatcher(), *this);\n  monitor_ = monitor.get();\n  return monitor;\n}\n\nclass OverloadIntegrationTest : public HttpProtocolIntegrationTest {\nprotected:\n  void initialize() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      const std::string overload_config = R\"EOF(\n        refresh_interval:\n          seconds: 0\n          nanos: 1000000\n        resource_monitors:\n          - name: \"envoy.resource_monitors.testonly.fake_resource_monitor\"\n            typed_config:\n              \"@type\": type.googleapis.com/google.protobuf.Empty\n        actions:\n          - name: \"envoy.overload_actions.stop_accepting_requests\"\n            triggers:\n              - name: \"envoy.resource_monitors.testonly.fake_resource_monitor\"\n                threshold:\n                  value: 0.9\n          - name: \"envoy.overload_actions.disable_http_keepalive\"\n            triggers:\n              - name: \"envoy.resource_monitors.testonly.fake_resource_monitor\"\n                threshold:\n                  value: 0.8\n          - name: \"envoy.overload_actions.stop_accepting_connections\"\n            triggers:\n              - name: \"envoy.resource_monitors.testonly.fake_resource_monitor\"\n                threshold:\n                  value: 0.95\n      )EOF\";\n      *bootstrap.mutable_overload_manager() =\n          TestUtility::parseYaml<envoy::config::overload::v3::OverloadManager>(overload_config);\n    });\n    HttpIntegrationTest::initialize();\n    updateResource(0);\n  }\n\n  void updateResource(double pressure) {\n    auto* monitor = fake_resource_monitor_factory_.monitor();\n    ASSERT(monitor != nullptr);\n    monitor->setResourcePressure(pressure);\n  }\n\n  FakeResourceMonitorFactory fake_resource_monitor_factory_;\n  Registry::InjectFactory<Server::Configuration::ResourceMonitorFactory> inject_factory_{\n      fake_resource_monitor_factory_};\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, OverloadIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) {\n  initialize();\n\n  // Put envoy in overloaded state and check that it drops new requests.\n  // Test both header-only and header+body requests since the code paths are slightly different.\n  updateResource(0.9);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.stop_accepting_requests.active\", 1);\n\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  auto response = codec_client_->makeRequestWithBody(request_headers, 10);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(\"envoy overloaded\", response->body());\n  codec_client_->close();\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(\"envoy overloaded\", response->body());\n  codec_client_->close();\n\n  // Deactivate overload state and check that new requests are accepted.\n  updateResource(0.8);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.stop_accepting_requests.active\", 0);\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(0U, response->body().size());\n}\n\nTEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) {\n  if (downstreamProtocol() != Http::CodecClient::Type::HTTP1) {\n    return; // only relevant for downstream HTTP1.x connections\n  }\n\n  initialize();\n\n  // Put envoy in overloaded state and check that it disables keepalive\n  updateResource(0.8);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.disable_http_keepalive.active\", 1);\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  auto response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1);\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"close\", response->headers().getConnectionValue());\n\n  // Deactivate overload state and check that keepalive is not disabled\n  updateResource(0.7);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.disable_http_keepalive.active\", 0);\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1);\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(nullptr, response->headers().Connection());\n}\n\nTEST_P(OverloadIntegrationTest, StopAcceptingConnectionsWhenOverloaded) {\n  initialize();\n\n  // Put envoy in overloaded state and check that it doesn't accept the new client connection.\n  updateResource(0.95);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.stop_accepting_connections.active\",\n                               1);\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  auto response = codec_client_->makeRequestWithBody(request_headers, 10);\n  EXPECT_FALSE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_,\n                                                         std::chrono::milliseconds(1000)));\n\n  // Reduce load a little to allow the connection to be accepted but then immediately reject the\n  // request.\n  updateResource(0.9);\n  test_server_->waitForGaugeEq(\"overload.envoy.overload_actions.stop_accepting_connections.active\",\n                               0);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(\"envoy overloaded\", response->body());\n  codec_client_->close();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/protocol_integration_test.cc",
    "content": "#include <functional>\n#include <list>\n#include <memory>\n#include <regex>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/registry/registry.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/thread_annotations.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/integration/test_host_predicate_config.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/upstream/retry_priority.h\"\n#include \"test/mocks/upstream/retry_priority_factory.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"absl/time/time.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::HasSubstr;\nusing testing::Not;\n\nnamespace Envoy {\n\nvoid setDoNotValidateRouteConfig(\n    envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) {\n  auto* route_config = hcm.mutable_route_config();\n  route_config->mutable_validate_clusters()->set_value(false);\n};\n\n// Tests for DownstreamProtocolIntegrationTest will be run with all protocols\n// (H1/H2 downstream) but only H1 upstreams.\n//\n// This is useful for things which will likely not differ based on upstream\n// behavior, for example \"how does Envoy handle duplicate content lengths from\n// downstream\"?\nclass DownstreamProtocolIntegrationTest : public HttpProtocolIntegrationTest {\nprotected:\n  template <class T> void changeHeadersForStopAllTests(T& headers, bool set_buffer_limit) {\n    headers.addCopy(\"content_size\", std::to_string(count_ * size_));\n    headers.addCopy(\"added_size\", std::to_string(added_decoded_data_size_));\n    headers.addCopy(\"is_first_trigger\", \"value\");\n    if (set_buffer_limit) {\n      headers.addCopy(\"buffer_limit\", std::to_string(buffer_limit_));\n    }\n  }\n\n  void verifyUpStreamRequestAfterStopAllFilter() {\n    if (downstreamProtocol() == Http::CodecClient::Type::HTTP2) {\n      // decode-headers-return-stop-all-filter calls addDecodedData in decodeData and\n      // decodeTrailers. 2 decoded data were added.\n      EXPECT_EQ(count_ * size_ + added_decoded_data_size_ * 2, upstream_request_->bodyLength());\n    } else {\n      EXPECT_EQ(count_ * size_ + added_decoded_data_size_ * 1, upstream_request_->bodyLength());\n    }\n    EXPECT_EQ(true, upstream_request_->complete());\n  }\n\n  const int count_ = 70;\n  const int size_ = 1000;\n  const int added_decoded_data_size_ = 1;\n  const int buffer_limit_ = 100;\n};\n\n// Tests for ProtocolIntegrationTest will be run with the full mesh of H1/H2\n// downstream and H1/H2 upstreams.\nusing ProtocolIntegrationTest = HttpProtocolIntegrationTest;\n\nTEST_P(ProtocolIntegrationTest, TrailerSupportHttp1) {\n  config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1());\n\n  testTrailers(10, 20, true, true);\n}\n\nTEST_P(ProtocolIntegrationTest, ShutdownWithActiveConnPoolConnections) {\n  auto response = makeHeaderOnlyRequest(nullptr, 0);\n  // Shut down the server with active connection pool connections.\n  test_server_.reset();\n  checkSimpleRequestSuccess(0U, 0U, response.get());\n}\n\n// Change the default route to be restrictive, and send a request to an alternate route.\nTEST_P(ProtocolIntegrationTest, RouterNotFound) { testRouterNotFound(); }\n\nTEST_P(ProtocolIntegrationTest, RouterVirtualClusters) { testRouterVirtualClusters(); }\n\n// Change the default route to be restrictive, and send a POST to an alternate route.\nTEST_P(DownstreamProtocolIntegrationTest, RouterNotFoundBodyNoBuffer) {\n  testRouterNotFoundWithBody();\n}\n\n// Add a route that uses unknown cluster (expect 404 Not Found).\nTEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound404) {\n  config_helper_.addConfigModifier(&setDoNotValidateRouteConfig);\n  auto host = config_helper_.createVirtualHost(\"foo.com\", \"/unknown\", \"unknown_cluster\");\n  host.mutable_routes(0)->mutable_route()->set_cluster_not_found_response_code(\n      envoy::config::route::v3::RouteAction::NOT_FOUND);\n  config_helper_.addVirtualHost(host);\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/unknown\", \"\", downstream_protocol_, version_, \"foo.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"404\", response->headers().getStatusValue());\n}\n\n// Add a route that uses unknown cluster (expect 503 Service Unavailable).\nTEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) {\n  config_helper_.addConfigModifier(&setDoNotValidateRouteConfig);\n  auto host = config_helper_.createVirtualHost(\"foo.com\", \"/unknown\", \"unknown_cluster\");\n  host.mutable_routes(0)->mutable_route()->set_cluster_not_found_response_code(\n      envoy::config::route::v3::RouteAction::SERVICE_UNAVAILABLE);\n  config_helper_.addVirtualHost(host);\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/unknown\", \"\", downstream_protocol_, version_, \"foo.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\n// Add a route which redirects HTTP to HTTPS, and verify Envoy sends a 301\nTEST_P(ProtocolIntegrationTest, RouterRedirect) {\n  auto host = config_helper_.createVirtualHost(\"www.redirect.com\", \"/\");\n  host.set_require_tls(envoy::config::route::v3::VirtualHost::ALL);\n  config_helper_.addVirtualHost(host);\n  initialize();\n\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/foo\", \"\", downstream_protocol_, version_, \"www.redirect.com\");\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"301\", response->headers().getStatusValue());\n  EXPECT_EQ(\"https://www.redirect.com/foo\",\n            response->headers().get(Http::Headers::get().Location)->value().getStringView());\n}\n\nTEST_P(ProtocolIntegrationTest, UnknownResponsecode) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"600\"}};\n  auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 0);\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"600\", response->headers().getStatusValue());\n}\n\n// Add a health check filter and verify correct computation of health based on upstream status.\nTEST_P(ProtocolIntegrationTest, ComputedHealthCheck) {\n  config_helper_.addFilter(R\"EOF(\nname: health_check\ntyped_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n    pass_through_mode: false\n    cluster_min_healthy_percentages:\n        example_cluster_name: { value: 75 }\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"}, {\":path\", \"/healthcheck\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}});\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\n// Add a health check filter and verify correct computation of health based on upstream status.\nTEST_P(ProtocolIntegrationTest, ModifyBuffer) {\n  config_helper_.addFilter(R\"EOF(\nname: health_check\ntyped_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck\n    pass_through_mode: false\n    cluster_min_healthy_percentages:\n        example_cluster_name: { value: 75 }\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"GET\"}, {\":path\", \"/healthcheck\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}});\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\n// Verifies behavior for https://github.com/envoyproxy/envoy/pull/11248\nTEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) {\n  // filters are prepended, so add them in reverse order\n  config_helper_.addFilter(R\"EOF(\n  name: wait-for-whole-request-and-response-filter\n  )EOF\");\n  config_helper_.addFilter(R\"EOF(\n  name: add-body-filter\n  )EOF\");\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(\"body\", upstream_request_->body().toString());\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, false);\n  // encode data, as we have a separate test for the transforming header only response.\n  upstream_request_->encodeData(128, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(ProtocolIntegrationTest, AddBodyToResponseAndWaitForIt) {\n  // filters are prepended, so add them in reverse order\n  config_helper_.addFilter(R\"EOF(\n  name: add-body-filter\n  )EOF\");\n  config_helper_.addFilter(R\"EOF(\n  name: wait-for-whole-request-and-response-filter\n  )EOF\");\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 128);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"body\", response->body());\n}\n\nTEST_P(ProtocolIntegrationTest, ContinueHeadersOnlyInjectBodyFilter) {\n  config_helper_.addFilter(R\"EOF(\n  name: continue-headers-only-inject-body-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n  )EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Send a headers only request.\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n\n  // Make sure that the body was injected to the request.\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(upstream_request_->receivedData());\n  EXPECT_EQ(upstream_request_->body().toString(), \"body\");\n\n  // Send a headers only response.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  // Make sure that the body was injected to the response.\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(response->body(), \"body\");\n}\n\nTEST_P(ProtocolIntegrationTest, AddEncodedTrailers) {\n  config_helper_.addFilter(R\"EOF(\nname: add-trailers-filter\ntyped_config:\n  \"@type\": type.googleapis.com/google.protobuf.Empty\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 128);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n  upstream_request_->encodeData(128, true);\n  response->waitForEndStream();\n\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP2) {\n    EXPECT_EQ(\"decode\", upstream_request_->trailers()\n                            ->get(Http::LowerCaseString(\"grpc-message\"))\n                            ->value()\n                            .getStringView());\n  }\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) {\n    EXPECT_EQ(\"encode\", response->trailers()->getGrpcMessageValue());\n  }\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/9873\nTEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"}});\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"host\", \"host\"}}, true);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"host\",\n            response->headers().get(Http::LowerCaseString(\"host\"))->value().getStringView());\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/10270\nTEST_P(ProtocolIntegrationTest, LongHeaderValueWithSpaces) {\n  // Header with at least 20kb of spaces surrounded by non-whitespace characters to ensure that\n  // dispatching is split across 2 dispatch calls. This threshold comes from Envoy preferring 16KB\n  // reads, which the buffer rounds up to about 20KB when allocating slices in\n  // Buffer::OwnedImpl::reserve().\n  const std::string long_header_value_with_inner_lws = \"v\" + std::string(32 * 1024, ' ') + \"v\";\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"longrequestvalue\", long_header_value_with_inner_lws}});\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(long_header_value_with_inner_lws, upstream_request_->headers()\n                                                  .get(Http::LowerCaseString(\"longrequestvalue\"))\n                                                  ->value()\n                                                  .getStringView());\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"},\n                                      {\"host\", \"host\"},\n                                      {\"longresponsevalue\", long_header_value_with_inner_lws}},\n      true);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(\"host\",\n            response->headers().get(Http::LowerCaseString(\"host\"))->value().getStringView());\n  EXPECT_EQ(\n      long_header_value_with_inner_lws,\n      response->headers().get(Http::LowerCaseString(\"longresponsevalue\"))->value().getStringView());\n}\n\nTEST_P(ProtocolIntegrationTest, Retry) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n  Stats::Store& stats = test_server_->server().stats();\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP2) {\n    Stats::CounterSharedPtr counter =\n        TestUtility::findCounter(stats, \"cluster.cluster_0.http2.tx_reset\");\n    ASSERT_NE(nullptr, counter);\n    EXPECT_EQ(1L, counter->value());\n  } else {\n    Stats::CounterSharedPtr counter =\n        TestUtility::findCounter(stats, \"cluster.cluster_0.http1.dropped_headers_with_underscores\");\n    EXPECT_NE(nullptr, counter);\n  }\n}\n\nTEST_P(ProtocolIntegrationTest, RetryStreaming) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"x-forwarded-for\", \"10.0.0.1\"},\n                                                                 {\"x-envoy-retry-on\", \"5xx\"}});\n  auto& encoder = encoder_decoder.first;\n  auto& response = encoder_decoder.second;\n\n  // Send some data, but not the entire body.\n  std::string data(1024, 'a');\n  Buffer::OwnedImpl send1(data);\n  encoder.encodeData(send1, false);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Send back an upstream failure.\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n\n  // Wait for a retry. Ensure all data, both before and after the retry, is received.\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Finish the request.\n  std::string data2(512, 'b');\n  Buffer::OwnedImpl send2(data2);\n  encoder.encodeData(send2, true);\n  std::string combined_request_data = data + data2;\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, combined_request_data));\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(combined_request_data.size(), upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\n// Regression test https://github.com/envoyproxy/envoy/issues/11131\n// Send complete response headers directing a retry and reset the stream to make\n// sure that Envoy cleans up stream state correctly when doing a retry with\n// complete response but incomplete request.\nTEST_P(ProtocolIntegrationTest, RetryStreamingReset) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"x-forwarded-for\", \"10.0.0.1\"},\n                                                                 {\"x-envoy-retry-on\", \"5xx\"}});\n  auto& encoder = encoder_decoder.first;\n  auto& response = encoder_decoder.second;\n\n  // Send some data, but not the entire body.\n  std::string data(1024, 'a');\n  Buffer::OwnedImpl send1(data);\n  encoder.encodeData(send1, false);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Send back an upstream failure and end stream. Make sure an immediate reset\n  // doesn't cause problems. Schedule via the upstream_request_ dispatcher to ensure that the stream\n  // still exists when encoding the reset stream.\n  upstream_request_->postToConnectionThread([this]() {\n    upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, true);\n    upstream_request_->encodeResetStream();\n  });\n\n  // Make sure the fake stream is reset.\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n\n  // Wait for a retry. Ensure all data, both before and after the retry, is received.\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Finish the request.\n  std::string data2(512, 'b');\n  Buffer::OwnedImpl send2(data2);\n  encoder.encodeData(send2, true);\n  std::string combined_request_data = data + data2;\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, combined_request_data));\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(combined_request_data.size(), upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\nTEST_P(ProtocolIntegrationTest, RetryStreamingCancelDueToBufferOverflow) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) {\n        auto* route = hcm.mutable_route_config()->mutable_virtual_hosts(0)->mutable_routes(0);\n\n        route->mutable_per_request_buffer_limit_bytes()->set_value(1024);\n        route->mutable_route()\n            ->mutable_retry_policy()\n            ->mutable_retry_back_off()\n            ->mutable_base_interval()\n            ->MergeFrom(\n                ProtobufUtil::TimeUtil::MillisecondsToDuration(100000000)); // Effectively infinity.\n      });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"x-forwarded-for\", \"10.0.0.1\"},\n                                                                 {\"x-envoy-retry-on\", \"5xx\"}});\n  auto& encoder = encoder_decoder.first;\n  auto& response = encoder_decoder.second;\n\n  // Send some data, but less than the buffer limit, and not end-stream\n  std::string data(64, 'a');\n  Buffer::OwnedImpl send1(data);\n  encoder.encodeData(send1, false);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  // Send back an upstream failure.\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n\n  // Overflow the request buffer limit. Because the retry base interval is infinity, no\n  // request will be in progress. This will cause the request to be aborted and an error\n  // to be returned to the client.\n  std::string data2(2048, 'b');\n  Buffer::OwnedImpl send2(data2);\n  encoder.encodeData(send2, false);\n\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"507\", response->headers().getStatusValue());\n  test_server_->waitForCounterEq(\"cluster.cluster_0.retry_or_shadow_abandoned\", 1);\n}\n\n// Tests that the x-envoy-attempt-count header is properly set on the upstream request and the\n// downstream response, and updated after the request is retried.\nTEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) {\n  auto host = config_helper_.createVirtualHost(\"host\", \"/test_retry\");\n  host.set_include_request_attempt_count(true);\n  host.set_include_attempt_count_in_response(true);\n  config_helper_.addVirtualHost(host);\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test_retry\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 1);\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n  waitForNextUpstreamRequest();\n  EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 2);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n  EXPECT_EQ(2, atoi(std::string(response->headers().getEnvoyAttemptCountValue()).c_str()));\n}\n\n// Verifies that a retry priority can be configured and affect the host selected during retries.\n// The retry priority will always target P1, which would otherwise never be hit due to P0 being\n// healthy.\nTEST_P(DownstreamProtocolIntegrationTest, RetryPriority) {\n  const Upstream::HealthyLoad healthy_priority_load({0u, 100u});\n  const Upstream::DegradedLoad degraded_priority_load({0u, 100u});\n  NiceMock<Upstream::MockRetryPriority> retry_priority(healthy_priority_load,\n                                                       degraded_priority_load);\n  Upstream::MockRetryPriorityFactory factory(retry_priority);\n\n  Registry::InjectFactory<Upstream::RetryPriorityFactory> inject_factory(factory);\n\n  // Add route with custom retry policy\n  auto host = config_helper_.createVirtualHost(\"host\", \"/test_retry\");\n  host.set_include_request_attempt_count(true);\n  auto retry_policy = host.mutable_routes(0)->mutable_route()->mutable_retry_policy();\n  retry_policy->mutable_retry_priority()->set_name(factory.name());\n  config_helper_.addVirtualHost(host);\n  // We want to work with a cluster with two hosts.\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    auto* load_assignment = cluster->mutable_load_assignment();\n    load_assignment->clear_endpoints();\n\n    for (int i = 0; i < 2; ++i) {\n      auto locality = load_assignment->add_endpoints();\n      locality->set_priority(i);\n      locality->mutable_locality()->set_region(\"region\");\n      locality->mutable_locality()->set_zone(\"zone\");\n      locality->mutable_locality()->set_sub_zone(\"sub_zone\" + std::to_string(i));\n      locality->add_lb_endpoints()->mutable_endpoint()->MergeFrom(\n          ConfigHelper::buildEndpoint(Network::Test::getLoopbackAddressString(version_)));\n    }\n  });\n  fake_upstreams_count_ = 2;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test_retry\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024);\n\n  // Note how we're expecting each upstream request to hit the same upstream.\n  waitForNextUpstreamRequest(0);\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n\n  waitForNextUpstreamRequest(1);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\n//\n// Verifies that a retry host filter can be configured and affect the host selected during retries.\n// The predicate will keep track of the first host attempted, and attempt to route all requests to\n// the same host. With a total of two upstream hosts, this should result in us continuously sending\n// requests to the same host.\nTEST_P(DownstreamProtocolIntegrationTest, RetryHostPredicateFilter) {\n  TestHostPredicateFactory predicate_factory;\n  Registry::InjectFactory<Upstream::RetryHostPredicateFactory> inject_factory(predicate_factory);\n\n  // Add route with custom retry policy\n  auto host = config_helper_.createVirtualHost(\"host\", \"/test_retry\");\n  host.set_include_request_attempt_count(true);\n  auto retry_policy = host.mutable_routes(0)->mutable_route()->mutable_retry_policy();\n  retry_policy->add_retry_host_predicate()->set_name(predicate_factory.name());\n  config_helper_.addVirtualHost(host);\n\n  // We want to work with a cluster with two hosts.\n  config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    bootstrap.mutable_static_resources()\n        ->mutable_clusters(0)\n        ->mutable_load_assignment()\n        ->mutable_endpoints(0)\n        ->add_lb_endpoints()\n        ->mutable_endpoint()\n        ->MergeFrom(ConfigHelper::buildEndpoint(Network::Test::getLoopbackAddressString(version_)));\n  });\n  fake_upstreams_count_ = 2;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test_retry\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024);\n\n  // Note how we're expecting each upstream request to hit the same upstream.\n  auto upstream_idx = waitForNextUpstreamRequest({0, 1});\n  ASSERT_TRUE(upstream_idx.has_value());\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  if (fake_upstreams_[*upstream_idx]->httpType() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n    ASSERT_TRUE(fake_upstreams_[*upstream_idx]->waitForHttpConnection(*dispatcher_,\n                                                                      fake_upstream_connection_));\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n  }\n\n  waitForNextUpstreamRequest(*upstream_idx);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1024U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(512U, response->body().size());\n}\n\n// Very similar set-up to testRetry but with a 16k request the request will not\n// be buffered and the 503 will be returned to the user.\nTEST_P(ProtocolIntegrationTest, RetryHittingBufferLimit) {\n  config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024 * 65);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(66560U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\n// Very similar set-up to RetryHittingBufferLimits but using the route specific cap.\nTEST_P(ProtocolIntegrationTest, RetryHittingRouteLimits) {\n  auto host = config_helper_.createVirtualHost(\"nobody.com\", \"/\");\n  host.mutable_per_request_buffer_limit_bytes()->set_value(0);\n  config_helper_.addVirtualHost(host);\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"nobody.com\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(1U, upstream_request_->bodyLength());\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\n// Test hitting the decoder buffer filter with too many request bytes to buffer. Ensure the\n// connection manager sends a 413.\nTEST_P(DownstreamProtocolIntegrationTest, HittingDecoderFilterLimit) {\n  config_helper_.addFilter(\"{ name: encoder-decoder-buffer-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeRequestWithBody(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                     {\":path\", \"/dynamo/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"x-forwarded-for\", \"10.0.0.1\"},\n                                     {\"x-envoy-retry-on\", \"5xx\"}},\n      1024 * 65);\n\n  response->waitForEndStream();\n  // With HTTP/1 there's a possible race where if the connection backs up early,\n  // the 413-and-connection-close may be sent while the body is still being\n  // sent, resulting in a write error and the connection being closed before the\n  // response is read.\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) {\n    ASSERT_TRUE(response->complete());\n  }\n  if (response->complete()) {\n    EXPECT_EQ(\"413\", response->headers().getStatusValue());\n  }\n}\n\n// Test hitting the encoder buffer filter with too many response bytes to buffer. Given the request\n// headers are sent on early, the stream/connection will be reset.\nTEST_P(ProtocolIntegrationTest, HittingEncoderFilterLimit) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        auto* virtual_host = route_config->mutable_virtual_hosts(0);\n        auto* header = virtual_host->mutable_response_headers_to_add()->Add()->mutable_header();\n        header->set_key(\"foo\");\n        header->set_value(\"bar\");\n      });\n\n  useAccessLog();\n  config_helper_.addFilter(\"{ name: encoder-decoder-buffer-filter, typed_config: { \\\"@type\\\": \"\n                           \"type.googleapis.com/google.protobuf.Empty } }\");\n  config_helper_.setBufferLimits(1024, 1024);\n  initialize();\n\n  // Send the request.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  auto downstream_request = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  Buffer::OwnedImpl data(\"HTTP body content goes here\");\n  codec_client_->sendData(*downstream_request, data, true);\n  waitForNextUpstreamRequest();\n\n  // Send the response headers.\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Now send an overly large response body. At some point, too much data will\n  // be buffered, the stream will be reset, and the connection will disconnect.\n  upstream_request_->encodeData(1024 * 65, false);\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"500\", response->headers().getStatusValue());\n  // Regression test all sendLocalReply paths add route-requested headers.\n  auto foo = Http::LowerCaseString(\"foo\");\n  ASSERT_TRUE(response->headers().get(foo) != nullptr);\n  EXPECT_EQ(\"bar\", response->headers().get(foo)->value().getStringView());\n\n  // Regression test https://github.com/envoyproxy/envoy/issues/9881 by making\n  // sure this path does standard HCM header transformations.\n  EXPECT_TRUE(response->headers().Date() != nullptr);\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"500\"));\n  test_server_->waitForCounterEq(\"http.config_test.downstream_rq_5xx\", 1);\n}\n\nTEST_P(ProtocolIntegrationTest, EnvoyHandling100Continue) { testEnvoyHandling100Continue(); }\n\nTEST_P(ProtocolIntegrationTest, EnvoyHandlingDuplicate100Continue) {\n  testEnvoyHandling100Continue(true);\n}\n\n// 100-continue before the request completes.\nTEST_P(ProtocolIntegrationTest, EnvoyProxyingEarly100Continue) { testEnvoyProxying1xx(true); }\n\n// Multiple 1xx before the request completes.\nTEST_P(ProtocolIntegrationTest, EnvoyProxyingEarlyMultiple1xx) {\n  testEnvoyProxying1xx(true, false, true);\n}\n\n// 100-continue after the request completes.\nTEST_P(ProtocolIntegrationTest, EnvoyProxyingLate100Continue) { testEnvoyProxying1xx(false); }\n\n// Multiple 1xx after the request completes.\nTEST_P(ProtocolIntegrationTest, EnvoyProxyingLateMultiple1xx) {\n  testEnvoyProxying1xx(false, false, true);\n}\n\nTEST_P(ProtocolIntegrationTest, TwoRequests) { testTwoRequests(); }\n\nTEST_P(ProtocolIntegrationTest, TwoRequestsWithForcedBackup) { testTwoRequests(true); }\n\nTEST_P(ProtocolIntegrationTest, BasicMaxStreamDuration) { testMaxStreamDuration(); }\n\nTEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicy) {\n  testMaxStreamDurationWithRetry(false);\n}\n\nTEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicyWhenRetryUpstreamDisconnection) {\n  testMaxStreamDurationWithRetry(true);\n}\n\n// Verify that headers with underscores in their names are dropped from client requests\n// but remain in upstream responses.\nTEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_common_http_protocol_options()->set_headers_with_underscores_action(\n            envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER);\n      });\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"foo_bar\", \"baz\"}});\n  waitForNextUpstreamRequest();\n\n  EXPECT_THAT(upstream_request_->headers(), Not(HeaderHasValueRef(\"foo_bar\", \"baz\")));\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"bar_baz\", \"fooz\"}}, true);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_THAT(response->headers(), HeaderHasValueRef(\"bar_baz\", \"fooz\"));\n  Stats::Store& stats = test_server_->server().stats();\n  std::string stat_name = (downstreamProtocol() == Http::CodecClient::Type::HTTP1)\n                              ? \"http1.dropped_headers_with_underscores\"\n                              : \"http2.dropped_headers_with_underscores\";\n  EXPECT_EQ(1L, TestUtility::findCounter(stats, stat_name)->value());\n}\n\n// Verify that by default headers with underscores in their names remain in both requests and\n// responses when allowed in configuration.\nTEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresRemainByDefault) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"foo_bar\", \"baz\"}});\n  waitForNextUpstreamRequest();\n\n  EXPECT_THAT(upstream_request_->headers(), HeaderHasValueRef(\"foo_bar\", \"baz\"));\n  upstream_request_->encodeHeaders(\n      Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"bar_baz\", \"fooz\"}}, true);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_THAT(response->headers(), HeaderHasValueRef(\"bar_baz\", \"fooz\"));\n}\n\n// Verify that request with headers containing underscores is rejected when configured.\nTEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefault) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_common_http_protocol_options()->set_headers_with_underscores_action(\n            envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST);\n      });\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"},\n                                     {\"foo_bar\", \"baz\"}});\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"400\", response->headers().getStatusValue());\n  } else {\n    response->waitForReset();\n    codec_client_->close();\n    ASSERT_TRUE(response->reset());\n    EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason());\n  }\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"unexpected_underscore\"));\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"content-length\", \"0\"}};\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// Test we're following https://tools.ietf.org/html/rfc7230#section-3.3.2\n// as best we can.\nTEST_P(ProtocolIntegrationTest, 304WithBody) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  Http::TestResponseHeaderMapImpl response_headers{{\":status\", \"304\"}, {\"content-length\", \"2\"}};\n  ASSERT(upstream_request_ != nullptr);\n  upstream_request_->encodeHeaders(response_headers, false);\n  response->waitForHeaders();\n  EXPECT_EQ(\"304\", response->headers().getStatusValue());\n\n  // For HTTP/1.1 http_parser is explicitly told that 304s are header-only\n  // requests.\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1 ||\n      upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(response->complete());\n  } else {\n    ASSERT_FALSE(response->complete());\n  }\n\n  upstream_request_->encodeData(2, true);\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    // Any body sent after the request is considered complete will not be handled as part of the\n    // active request, but will be flagged as a protocol error for the no-longer-associated\n    // connection.\n    // Ideally if we got the body with the headers we would instead reset the\n    // stream, but it turns out that's complicated so instead we consistently\n    // forward the headers and error out after.\n    test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_protocol_error\", 1);\n  }\n\n  // Only for HTTP/2, where streams are ended with an explicit end-stream so we\n  // can differentiate between 304-with-advertised-but-absent-body and\n  // 304-with-body, is there a protocol error on the active stream.\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP2 &&\n      upstreamProtocol() == FakeHttpConnection::Type::HTTP2) {\n    response->waitForReset();\n  }\n}\n\n// Validate that lots of tiny cookies doesn't cause a DoS (single cookie header).\nTEST_P(DownstreamProtocolIntegrationTest, LargeCookieParsingConcatenated) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"content-length\", \"0\"}};\n  std::vector<std::string> cookie_pieces;\n  cookie_pieces.reserve(7000);\n  for (int i = 0; i < 7000; i++) {\n    cookie_pieces.push_back(fmt::sprintf(\"a%x=b\", i));\n  }\n  request_headers.addCopy(\"cookie\", absl::StrJoin(cookie_pieces, \"; \"));\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// Validate that lots of tiny cookies doesn't cause a DoS (many cookie headers).\nTEST_P(DownstreamProtocolIntegrationTest, LargeCookieParsingMany) {\n  // Set header count limit to 2010.\n  uint32_t max_count = 2010;\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_common_http_protocol_options()->mutable_max_headers_count()->set_value(\n            max_count);\n      });\n  max_request_headers_count_ = max_count;\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"host\"},\n                                                 {\"content-length\", \"0\"}};\n  for (int i = 0; i < 2000; i++) {\n    request_headers.addCopy(\"cookie\", fmt::sprintf(\"a%x=b\", i));\n  }\n  auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"content-length\", \"-1\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"400\", response->headers().getStatusValue());\n    test_server_->waitForCounterGe(\"http.config_test.downstream_rq_4xx\", 1);\n  } else {\n    ASSERT_TRUE(response->reset());\n    EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->resetReason());\n  }\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_http2_protocol_options()\n            ->mutable_override_stream_error_on_invalid_http_message()\n            ->set_value(true);\n        hcm.mutable_http_protocol_options()\n            ->mutable_override_stream_error_on_invalid_http_message()\n            ->set_value(true);\n      });\n\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"content-length\", \"-1\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    response->waitForReset();\n    codec_client_->close();\n  }\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"400\", response->headers().getStatusValue());\n  } else {\n    ASSERT_TRUE(response->reset());\n    EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason());\n  }\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengths) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"content-length\", \"3,2\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"400\", response->headers().getStatusValue());\n  } else {\n    ASSERT_TRUE(response->reset());\n    EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->resetReason());\n  }\n}\n\n// TODO(PiotrSikora): move this HTTP/2 only variant to http2_integration_test.cc.\nTEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) {\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_http2_protocol_options()\n            ->mutable_override_stream_error_on_invalid_http_message()\n            ->set_value(true);\n      });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":authority\", \"host\"},\n                                                                 {\"content-length\", \"3,2\"}});\n  auto response = std::move(encoder_decoder.second);\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    response->waitForReset();\n    codec_client_->close();\n  }\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"400\", response->headers().getStatusValue());\n  } else {\n    ASSERT_TRUE(response->reset());\n    EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason());\n  }\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterEncoding) {\n  config_helper_.addFilter(R\"EOF(\nname: encode-headers-only\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response =\n      codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                        {\":path\", \"/test/long/url\"},\n                                                                        {\":scheme\", \"http\"},\n                                                                        {\":authority\", \"host\"}},\n                                         128);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(0, response->body().size());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterDecoding) {\n  config_helper_.addFilter(R\"EOF(\nname: decode-headers-only\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response =\n      codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                        {\":path\", \"/test/long/url\"},\n                                                                        {\":scheme\", \"http\"},\n                                                                        {\":authority\", \"host\"}},\n                                         128);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n  upstream_request_->encodeData(128, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(128, response->body().size());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterEncodingIntermediateFilters) {\n  config_helper_.addFilter(R\"EOF(\nname: passthrough-filter\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: encode-headers-only\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: passthrough-filter\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response =\n      codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                        {\":path\", \"/test/long/url\"},\n                                                                        {\":scheme\", \"http\"},\n                                                                        {\":authority\", \"host\"}},\n                                         128);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n  response->waitForEndStream();\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(0, response->body().size());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterDecodingIntermediateFilters) {\n  config_helper_.addFilter(R\"EOF(\nname: passthrough-filter\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: decode-headers-only\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: passthrough-filter\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response =\n      codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                        {\":path\", \"/test/long/url\"},\n                                                                        {\":scheme\", \"http\"},\n                                                                        {\":authority\", \"host\"}},\n                                         128);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n  upstream_request_->encodeData(128, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(128, response->body().size());\n}\n\n// Verifies behavior when request data is encoded after the request has been\n// turned into a headers-only request and the response has already begun.\nTEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterInterleaved) {\n  config_helper_.addFilter(R\"EOF(\nname: decode-headers-only\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // First send the request headers. The filter should turn this into a header-only\n  // request.\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  // Wait for the upstream request and begin sending a response with end_stream = false.\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, false);\n\n  // Simulate additional data after the request has been turned into a headers only request.\n  Buffer::OwnedImpl data(std::string(128, 'a'));\n  request_encoder_->encodeData(data, false);\n\n  // End the response.\n  upstream_request_->encodeData(128, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n  EXPECT_EQ(0, upstream_request_->body().length());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, LocalReplyDuringEncoding) {\n  config_helper_.addFilter(R\"EOF(\nname: local-reply-during-encode\n)EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/test/long/url\"},\n                                     {\":scheme\", \"http\"},\n                                     {\":authority\", \"host\"}});\n\n  // Wait for the upstream request and begin sending a response with end_stream = false.\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"503\"}}, true);\n\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"500\", response->headers().getStatusValue());\n  EXPECT_EQ(0, upstream_request_->body().length());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlRejected) {\n  // Send one 95 kB URL with limit 60 kB headers.\n  testLargeRequestUrl(95, 60);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlAccepted) {\n  // Send one 95 kB URL with limit 96 kB headers.\n  testLargeRequestUrl(95, 96);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersRejected) {\n  // Send one 95 kB header with limit 60 kB and 100 headers.\n  testLargeRequestHeaders(95, 1, 60, 100);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersAccepted) {\n  // Send one 95 kB header with limit 96 kB and 100 headers.\n  testLargeRequestHeaders(95, 1, 96, 100);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, ManyRequestHeadersRejected) {\n  // Send 101 empty headers with limit 60 kB and 100 headers.\n  testLargeRequestHeaders(0, 101, 60, 80);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, ManyRequestHeadersAccepted) {\n  // Send 145 empty headers with limit 60 kB and 150 headers.\n  testLargeRequestHeaders(0, 140, 60, 150);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersRejected) {\n  // Default header (and trailer) count limit is 100.\n  config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1());\n  Http::TestRequestTrailerMapImpl request_trailers;\n  for (int i = 0; i < 150; i++) {\n    request_trailers.addCopy(\"trailer\", std::string(1, 'a'));\n  }\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"431\", response->headers().getStatusValue());\n  } else {\n    response->waitForReset();\n    codec_client_->close();\n  }\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersAccepted) {\n  // Set header (and trailer) count limit to 200.\n  uint32_t max_count = 200;\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_common_http_protocol_options()->mutable_max_headers_count()->set_value(\n            max_count);\n      });\n  config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1());\n  max_request_headers_count_ = max_count;\n  Http::TestRequestTrailerMapImpl request_trailers;\n  for (int i = 0; i < 150; i++) {\n    request_trailers.addCopy(\"trailer\", std::string(1, 'a'));\n  }\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, 1, false);\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid\n// time-consuming byte size validations that will cause this test to timeout.\nTEST_P(DownstreamProtocolIntegrationTest, ManyRequestHeadersTimeout) {\n  // Set timeout for 5 seconds, and ensure that a request with 10k+ headers can be sent.\n  testManyRequestHeaders(std::chrono::milliseconds(5000));\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, LargeRequestTrailersAccepted) {\n  config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  testLargeRequestTrailers(60, 96);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, LargeRequestTrailersRejected) {\n  config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  testLargeRequestTrailers(66, 60);\n}\n\n// This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid\n// time-consuming byte size verification that will cause this test to timeout.\nTEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) {\n  max_request_headers_kb_ = 96;\n  max_request_headers_count_ = 20005;\n\n  config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1());\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_max_request_headers_kb()->set_value(max_request_headers_kb_);\n        hcm.mutable_common_http_protocol_options()->mutable_max_headers_count()->set_value(\n            max_request_headers_count_);\n      });\n\n  auto request_trailers = Http::RequestTrailerMapImpl::create();\n  for (int i = 0; i < 20000; i++) {\n    request_trailers->addCopy(Http::LowerCaseString(std::to_string(i)), \"\");\n  }\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder =\n      codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{\":method\", \"POST\"},\n                                                                 {\":path\", \"/test/long/url\"},\n                                                                 {\":scheme\", \"http\"},\n                                                                 {\":authority\", \"host\"}});\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  codec_client_->sendTrailers(*request_encoder_, *request_trailers);\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n}\n\n// Regression tests for CVE-2019-18801. We only validate the behavior of large\n// :method request headers, since the case of other large headers is\n// covered in the various testLargeRequest-based integration tests here.\n//\n// The table below describes the expected behaviors (in addition we should never\n// see an ASSERT or ASAN failure trigger).\n//\n// Downstream    Upstream   Behavior expected\n// ------------------------------------------\n// H1            H1         Envoy will reject (HTTP/1 codec behavior)\n// H1            H2         Envoy will reject (HTTP/1 codec behavior)\n// H2            H1         Envoy will forward but backend will reject (HTTP/1\n//                          codec behavior)\n// H2            H2         Success\nTEST_P(ProtocolIntegrationTest, LargeRequestMethod) {\n  const std::string long_method = std::string(48 * 1024, 'a');\n  const Http::TestRequestHeaderMapImpl request_headers{{\":method\", long_method},\n                                                       {\":path\", \"/test/long/url\"},\n                                                       {\":scheme\", \"http\"},\n                                                       {\":authority\", \"host\"}};\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    auto encoder_decoder = codec_client_->startRequest(request_headers);\n    request_encoder_ = &encoder_decoder.first;\n    auto response = std::move(encoder_decoder.second);\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"400\", response->headers().getStatusValue());\n  } else {\n    ASSERT(downstreamProtocol() == Http::CodecClient::Type::HTTP2);\n    if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n      auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n      ASSERT_TRUE(\n          fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n      response->waitForEndStream();\n      EXPECT_TRUE(response->complete());\n      EXPECT_EQ(\"400\", response->headers().getStatusValue());\n    } else {\n      ASSERT(upstreamProtocol() == FakeHttpConnection::Type::HTTP2);\n      auto response =\n          sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n      EXPECT_TRUE(response->complete());\n    }\n  }\n}\n\n// Tests StopAllIterationAndBuffer. Verifies decode-headers-return-stop-all-filter calls decodeData\n// once after iteration is resumed.\nTEST_P(DownstreamProtocolIntegrationTest, TestDecodeHeadersReturnsStopAll) {\n  config_helper_.addFilter(R\"EOF(\nname: call-decodedata-once-filter\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: decode-headers-return-stop-all-filter\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: passthrough-filter\n)EOF\");\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request with headers and data.\n  changeHeadersForStopAllTests(default_request_headers_, false);\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  for (int i = 0; i < count_ - 1; i++) {\n    codec_client_->sendData(*request_encoder_, size_, false);\n  }\n  // Sleeps for 1s in order to be consistent with testDecodeHeadersReturnsStopAllWatermark.\n  absl::SleepFor(absl::Seconds(1));\n  codec_client_->sendData(*request_encoder_, size_, true);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength());\n  EXPECT_EQ(true, upstream_request_->complete());\n\n  // Sends a request with headers, data, and trailers.\n  auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder_2.first;\n  response = std::move(encoder_decoder_2.second);\n  for (int i = 0; i < count_; i++) {\n    codec_client_->sendData(*request_encoder_, size_, false);\n  }\n  Http::TestRequestTrailerMapImpl request_trailers{{\"trailer\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  verifyUpStreamRequestAfterStopAllFilter();\n}\n\n// Tests StopAllIterationAndWatermark. decode-headers-return-stop-all-watermark-filter sets buffer\n// limit to 100. Verifies data pause when limit is reached, and resume after iteration continues.\nTEST_P(DownstreamProtocolIntegrationTest, TestDecodeHeadersReturnsStopAllWatermark) {\n  config_helper_.addFilter(R\"EOF(\nname: decode-headers-return-stop-all-filter\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: passthrough-filter\n)EOF\");\n\n  // Sets initial stream window to min value to make the client sensitive to a low watermark.\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_http2_protocol_options()->mutable_initial_stream_window_size()->set_value(\n            ::Envoy::Http2::Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE);\n      });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request with headers and data.\n  changeHeadersForStopAllTests(default_request_headers_, true);\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  for (int i = 0; i < count_ - 1; i++) {\n    codec_client_->sendData(*request_encoder_, size_, false);\n  }\n  // Gives buffer 1s to react to buffer limit.\n  absl::SleepFor(absl::Seconds(1));\n  codec_client_->sendData(*request_encoder_, size_, true);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength());\n  EXPECT_EQ(true, upstream_request_->complete());\n\n  // Sends a request with headers, data, and trailers.\n  auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder_2.first;\n  response = std::move(encoder_decoder_2.second);\n  for (int i = 0; i < count_ - 1; i++) {\n    codec_client_->sendData(*request_encoder_, size_, false);\n  }\n  // Gives buffer 1s to react to buffer limit.\n  absl::SleepFor(absl::Seconds(1));\n  codec_client_->sendData(*request_encoder_, size_, false);\n  Http::TestRequestTrailerMapImpl request_trailers{{\"trailer\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  verifyUpStreamRequestAfterStopAllFilter();\n}\n\n// Test two filters that return StopAllIterationAndBuffer back-to-back.\nTEST_P(DownstreamProtocolIntegrationTest, TestTwoFiltersDecodeHeadersReturnsStopAll) {\n  config_helper_.addFilter(R\"EOF(\nname: decode-headers-return-stop-all-filter\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: decode-headers-return-stop-all-filter\n)EOF\");\n  config_helper_.addFilter(R\"EOF(\nname: passthrough-filter\n)EOF\");\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Sends a request with headers and data.\n  changeHeadersForStopAllTests(default_request_headers_, false);\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n  for (int i = 0; i < count_ - 1; i++) {\n    codec_client_->sendData(*request_encoder_, size_, false);\n  }\n  codec_client_->sendData(*request_encoder_, size_, true);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength());\n  EXPECT_EQ(true, upstream_request_->complete());\n\n  // Sends a request with headers, data, and trailers.\n  auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder_2.first;\n  response = std::move(encoder_decoder_2.second);\n  for (int i = 0; i < count_; i++) {\n    codec_client_->sendData(*request_encoder_, size_, false);\n  }\n  Http::TestRequestTrailerMapImpl request_trailers{{\"trailer\", \"trailer\"}};\n  codec_client_->sendTrailers(*request_encoder_, request_trailers);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n  response->waitForEndStream();\n  verifyUpStreamRequestAfterStopAllFilter();\n}\n\n// Tests encodeHeaders() returns StopAllIterationAndBuffer.\nTEST_P(DownstreamProtocolIntegrationTest, TestEncodeHeadersReturnsStopAll) {\n  config_helper_.addFilter(R\"EOF(\nname: encode-headers-return-stop-all-filter\n)EOF\");\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Upstream responds with headers, data and trailers.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  changeHeadersForStopAllTests(default_response_headers_, false);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  for (int i = 0; i < count_ - 1; i++) {\n    upstream_request_->encodeData(size_, false);\n  }\n  // Sleeps for 1s in order to be consistent with testEncodeHeadersReturnsStopAllWatermark.\n  absl::SleepFor(absl::Seconds(1));\n  upstream_request_->encodeData(size_, false);\n  Http::TestResponseTrailerMapImpl response_trailers{{\"response\", \"trailer\"}};\n  upstream_request_->encodeTrailers(response_trailers);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(count_ * size_ + added_decoded_data_size_, response->body().size());\n}\n\n// Tests encodeHeaders() returns StopAllIterationAndWatermark.\nTEST_P(DownstreamProtocolIntegrationTest, TestEncodeHeadersReturnsStopAllWatermark) {\n  config_helper_.addFilter(R\"EOF(\nname: encode-headers-return-stop-all-filter\n)EOF\");\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); });\n\n  // Sets initial stream window to min value to make the upstream sensitive to a low watermark.\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.mutable_http2_protocol_options()->mutable_initial_stream_window_size()->set_value(\n            ::Envoy::Http2::Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE);\n      });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Upstream responds with headers, data and trailers.\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);\n  waitForNextUpstreamRequest();\n\n  changeHeadersForStopAllTests(default_response_headers_, true);\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  for (int i = 0; i < count_ - 1; i++) {\n    upstream_request_->encodeData(size_, false);\n  }\n  // Gives buffer 1s to react to buffer limit.\n  absl::SleepFor(absl::Seconds(1));\n  upstream_request_->encodeData(size_, false);\n  Http::TestResponseTrailerMapImpl response_trailers{{\"response\", \"trailer\"}};\n  upstream_request_->encodeTrailers(response_trailers);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(count_ * size_ + added_decoded_data_size_, response->body().size());\n}\n\n// Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't\n// combine set-cookie headers\nTEST_P(ProtocolIntegrationTest, MultipleSetCookies) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestResponseHeaderMapImpl response_headers{\n      {\":status\", \"200\"}, {\"set-cookie\", \"foo\"}, {\"set-cookie\", \"bar\"}};\n\n  auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 0);\n\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  std::vector<absl::string_view> out;\n  Http::HeaderUtility::getAllOfHeader(response->headers(), \"set-cookie\", out);\n  ASSERT_EQ(out.size(), 2);\n  ASSERT_EQ(out[0], \"foo\");\n  ASSERT_EQ(out[1], \"bar\");\n}\n\n// Resets the downstream stream immediately and verifies that we clean up everything.\nTEST_P(ProtocolIntegrationTest, TestDownstreamResetIdleTimeout) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  config_helper_.setDownstreamHttpIdleTimeout(std::chrono::milliseconds(100));\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n\n  EXPECT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_,\n                                                        TestUtility::DefaultTimeout,\n                                                        max_request_headers_kb_));\n\n  EXPECT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    codec_client_->close();\n  } else {\n    codec_client_->sendReset(encoder_decoder.first);\n  }\n\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  } else {\n    ASSERT_TRUE(upstream_request_->waitForReset());\n    ASSERT_TRUE(fake_upstream_connection_->close());\n    ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  }\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  EXPECT_THAT(waitForAccessLog(access_log_name_), Not(HasSubstr(\"DPE\")));\n}\n\n// Test connection is closed after single request processed.\nTEST_P(ProtocolIntegrationTest, ConnDurationTimeoutBasic) {\n  config_helper_.setDownstreamMaxConnectionDuration(std::chrono::milliseconds(500));\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_drain_timeout()->set_seconds(1); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n  waitForNextUpstreamRequest();\n\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_total\", 1);\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_200\", 1);\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(10000)));\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_max_duration_reached\", 1);\n}\n\n// Test inflight request is processed correctly when timeout fires during request processing.\nTEST_P(ProtocolIntegrationTest, ConnDurationInflightRequest) {\n  config_helper_.setDownstreamMaxConnectionDuration(std::chrono::milliseconds(500));\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_drain_timeout()->set_seconds(1); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);\n  waitForNextUpstreamRequest();\n\n  // block and wait for counter to increase\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_max_duration_reached\", 1);\n\n  // ensure request processed correctly\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->encodeData(512, true);\n  response->waitForEndStream();\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_total\", 1);\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_200\", 1);\n\n  ASSERT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(10000)));\n}\n\n// Test connection is closed if no http requests were processed\nTEST_P(ProtocolIntegrationTest, ConnDurationTimeoutNoHttpRequest) {\n  config_helper_.setDownstreamMaxConnectionDuration(std::chrono::milliseconds(500));\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.mutable_drain_timeout()->set_seconds(1); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  ASSERT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(10000)));\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_max_duration_reached\", 1);\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, TestPrefetch) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    cluster->mutable_prefetch_policy()->mutable_per_upstream_prefetch_ratio()->set_value(1.5);\n  });\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response =\n      sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0);\n  FakeHttpConnectionPtr fake_upstream_connection_two;\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    // For HTTP/1.1 there should be a prefetched connection.\n    ASSERT_TRUE(\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_two));\n  } else {\n    // For HTTP/2, the original connection can accommodate two requests.\n    ASSERT_FALSE(fake_upstreams_[0]->waitForHttpConnection(\n        *dispatcher_, fake_upstream_connection_two, std::chrono::milliseconds(5)));\n  }\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeout) {\n  config_helper_.setDownstreamMaxStreamDuration(std::chrono::milliseconds(500));\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  test_server_->waitForCounterGe(\"http.config_test.downstream_rq_max_duration_reached\", 1);\n  response->waitForReset();\n  EXPECT_TRUE(response->complete());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeoutLegacy) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  config_helper_.addRuntimeOverride(\"envoy.reloadable_features.allow_response_for_timeout\",\n                                    \"false\");\n  config_helper_.setDownstreamMaxStreamDuration(std::chrono::milliseconds(500));\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  auto encoder_decoder = codec_client_->startRequest(default_request_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  auto response = std::move(encoder_decoder.second);\n\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  test_server_->waitForCounterGe(\"http.config_test.downstream_rq_max_duration_reached\", 1);\n  response->waitForReset();\n  EXPECT_FALSE(response->complete());\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"max_duration_timeout\"));\n}\n\n// Make sure that invalid authority headers get blocked at or before the HCM.\nTEST_P(DownstreamProtocolIntegrationTest, InvalidAuthority) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"POST\"},\n                                                 {\":path\", \"/test/long/url\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"ho|st|\"}};\n\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    // For HTTP/1 this is handled by the HCM, which sends a full 400 response.\n    response->waitForEndStream();\n    ASSERT_TRUE(response->complete());\n    EXPECT_EQ(\"400\", response->headers().getStatusValue());\n  } else {\n    // For HTTP/2 this is handled by nghttp2 which resets the connection without\n    // sending an HTTP response.\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n    ASSERT_FALSE(response->complete());\n  }\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"CONNECT\"}, {\":authority\", \"host.com:80\"}});\n\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    // Because CONNECT requests for HTTP/1 do not include a path, they will fail\n    // to find a route match and return a 404.\n    response->waitForEndStream();\n    EXPECT_EQ(\"404\", response->headers().getStatusValue());\n    EXPECT_TRUE(response->complete());\n  } else {\n    response->waitForReset();\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  }\n}\n\n// Make sure that with override_stream_error_on_invalid_http_message true, CONNECT\n// results in stream teardown not connection teardown.\nTEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) {\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    return;\n  }\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) -> void {\n        hcm.mutable_http2_protocol_options()\n            ->mutable_override_stream_error_on_invalid_http_message()\n            ->set_value(true);\n      });\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{\n      {\":method\", \"CONNECT\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}});\n\n  response->waitForReset();\n  EXPECT_FALSE(codec_client_->disconnected());\n}\n\n// Regression test for https://github.com/envoyproxy/envoy/issues/12131\nTEST_P(DownstreamProtocolIntegrationTest, Test100AndDisconnect) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n  ASSERT_TRUE(fake_upstream_connection_->close());\n\n  // Make sure that a disconnect results in valid 5xx response headers even when preceded by a 100.\n  response->waitForEndStream();\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n}\n\nTEST_P(DownstreamProtocolIntegrationTest, Test100AndDisconnectLegacy) {\n  config_helper_.addRuntimeOverride(\"envoy.reloadable_features.allow_500_after_100\", \"false\");\n\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n  waitForNextUpstreamRequest();\n  upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"100\"}});\n  ASSERT_TRUE(fake_upstream_connection_->close());\n\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n    EXPECT_FALSE(response->complete());\n  } else {\n    response->waitForReset();\n    EXPECT_FALSE(response->complete());\n  }\n}\n\n// For tests which focus on downstream-to-Envoy behavior, and don't need to be\n// run with both HTTP/1 and HTTP/2 upstreams.\nINSTANTIATE_TEST_SUITE_P(Protocols, DownstreamProtocolIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2},\n                             {FakeHttpConnection::Type::HTTP1})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(Protocols, ProtocolIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/proxy_proto_integration_test.cc",
    "content": "#include \"test/integration/proxy_proto_integration_test.h\"\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"fmt/format.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtoIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ProxyProtoIntegrationTest, CaptureTlvToMetadata) {\n  useListenerAccessLog(\n      \"%DYNAMIC_METADATA(envoy.filters.listener.proxy_protocol:PP2TypeAuthority)%\");\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    Network::ClientConnectionPtr conn = makeClientConnection(lookupPort(\"http\"));\n    constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54,\n                                  0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04, 0x00, 0x01,\n                                  0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 0x00, 0x00, 0x01, 0xff, 0x02,\n                                  0x00, 0x07, 0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d};\n    Buffer::OwnedImpl buf(buffer, sizeof(buffer));\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  cleanupUpstreamAndDownstream();\n  const std::string log_line = waitForAccessLog(listener_access_log_name_);\n  EXPECT_EQ(log_line, \"\\\"foo.com\\\"\");\n}\n\nTEST_P(ProxyProtoIntegrationTest, V1RouterRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    Network::ClientConnectionPtr conn = makeClientConnection(lookupPort(\"http\"));\n    Buffer::OwnedImpl buf(\"PROXY TCP4 1.2.3.4 254.254.254.254 65535 1234\\r\\n\");\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nTEST_P(ProxyProtoIntegrationTest, V2RouterRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    Network::ClientConnectionPtr conn = makeClientConnection(lookupPort(\"http\"));\n    constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49,\n                                  0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04,\n                                  0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0x04, 0xd2};\n    Buffer::OwnedImpl buf(buffer, sizeof(buffer));\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nTEST_P(ProxyProtoIntegrationTest, V1RouterRequestAndResponseWithBodyNoBufferV6) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    Buffer::OwnedImpl buf(\"PROXY TCP6 1:2:3::4 FE00:: 65535 1234\\r\\n\");\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nTEST_P(ProxyProtoIntegrationTest, V2RouterRequestAndResponseWithBodyNoBufferV6) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54,\n                                  0x0a, 0x21, 0x22, 0x00, 0x24, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03,\n                                  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,\n                                  0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,\n                                  0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02};\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    Buffer::OwnedImpl buf(buffer, sizeof(buffer));\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nTEST_P(ProxyProtoIntegrationTest, RouterProxyUnknownRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    Buffer::OwnedImpl buf(\"PROXY UNKNOWN\\r\\n\");\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nTEST_P(ProxyProtoIntegrationTest, RouterProxyUnknownLongRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    auto conn = makeClientConnection(lookupPort(\"http\"));\n    Buffer::OwnedImpl buf(\"PROXY UNKNOWN 1:2:3::4 FE00:: 65535 1234\\r\\n\");\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\n// Test that %DOWNSTREAM_DIRECT_REMOTE_ADDRESS%/%DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT%\n// returns the direct address, and %DOWSTREAM_REMOTE_ADDRESS% returns the proxy-protocol-provided\n// address.\nTEST_P(ProxyProtoIntegrationTest, AccessLog) {\n  useAccessLog(\"%DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT% %DOWNSTREAM_REMOTE_ADDRESS%\");\n\n  // Tell HCM to ignore x-forwarded-for so that the proxy-proto address is used.\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void { hcm.mutable_use_remote_address()->set_value(true); });\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    Network::ClientConnectionPtr conn = makeClientConnection(lookupPort(\"http\"));\n    Buffer::OwnedImpl buf(\"PROXY TCP4 1.2.3.4 254.254.254.254 12345 1234\\r\\n\");\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n  const std::string log_line = waitForAccessLog(access_log_name_);\n  const std::vector<absl::string_view> tokens = StringUtil::splitToken(log_line, \" \", false, true);\n\n  ASSERT_EQ(2, tokens.size());\n  EXPECT_EQ(tokens[0], Network::Test::getLoopbackAddressString(GetParam()));\n  EXPECT_EQ(tokens[1], \"1.2.3.4:12345\");\n}\n\nTEST_P(ProxyProtoIntegrationTest, DEPRECATED_FEATURE_TEST(OriginalDst)) {\n  // Change the cluster to an original destination cluster. An original destination cluster\n  // ignores the configured hosts, and instead uses the restored destination address from the\n  // incoming (server) connection as the destination address for the outgoing (client) connection.\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    cluster->clear_load_assignment();\n    cluster->set_type(envoy::config::cluster::v3::Cluster::ORIGINAL_DST);\n    cluster->set_lb_policy(\n        envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB);\n  });\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    Network::ClientConnectionPtr conn = makeClientConnection(lookupPort(\"http\"));\n    // Create proxy protocol line that has the fake upstream address as the destination address.\n    // This address will become the \"restored\" address for the server connection and will\n    // be used as the destination address by the original destination cluster.\n    std::string proxyLine = fmt::format(\n        \"PROXY {} {} 65535 {}\\r\\n\",\n        GetParam() == Network::Address::IpVersion::v4 ? \"TCP4 1.2.3.4\" : \"TCP6 1:2:3::4\",\n        Network::Test::getLoopbackAddressString(GetParam()),\n        fake_upstreams_[0]->localAddress()->ip()->port());\n\n    Buffer::OwnedImpl buf(proxyLine);\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nTEST_P(ProxyProtoIntegrationTest, ClusterProvided) {\n  // Change the cluster to an original destination cluster. An original destination cluster\n  // ignores the configured hosts, and instead uses the restored destination address from the\n  // incoming (server) connection as the destination address for the outgoing (client) connection.\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    cluster->clear_load_assignment();\n    cluster->set_type(envoy::config::cluster::v3::Cluster::ORIGINAL_DST);\n    cluster->set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED);\n  });\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    Network::ClientConnectionPtr conn = makeClientConnection(lookupPort(\"http\"));\n    // Create proxy protocol line that has the fake upstream address as the destination address.\n    // This address will become the \"restored\" address for the server connection and will\n    // be used as the destination address by the original destination cluster.\n    std::string proxyLine = fmt::format(\n        \"PROXY {} {} 65535 {}\\r\\n\",\n        GetParam() == Network::Address::IpVersion::v4 ? \"TCP4 1.2.3.4\" : \"TCP6 1:2:3::4\",\n        Network::Test::getLoopbackAddressString(GetParam()),\n        fake_upstreams_[0]->localAddress()->ip()->port());\n\n    Buffer::OwnedImpl buf(proxyLine);\n    conn->write(buf, false);\n    return conn;\n  };\n\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/proxy_proto_integration_test.h",
    "content": "#pragma once\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/http/codec_client.h\"\n\n#include \"extensions/filters/listener/proxy_protocol/proxy_protocol.h\"\n\n#include \"test/integration/fake_upstream.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/server.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nclass ProxyProtoIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                  public HttpIntegrationTest {\npublic:\n  ProxyProtoIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {\n    config_helper_.addConfigModifier(\n        [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          ::envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proxy_protocol;\n          auto rule = proxy_protocol.add_rules();\n          rule->set_tlv_type(0x02);\n          rule->mutable_on_tlv_present()->set_key(\"PP2TypeAuthority\");\n\n          auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n          auto* ppv_filter = listener->add_listener_filters();\n          ppv_filter->set_name(\"envoy.listener.proxy_protocol\");\n          ppv_filter->mutable_typed_config()->PackFrom(proxy_protocol);\n        });\n  }\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/redirect_integration_test.cc",
    "content": "#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h\"\n#include \"envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h\"\n#include \"envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h\"\n\n#include \"test/integration/http_protocol_integration.h\"\n\nnamespace Envoy {\n\nusing testing::HasSubstr;\n\nnamespace {\nconstexpr char HandleThreeHopLocationFormat[] =\n    \"http://handle.internal.redirect.max.three.hop/path{}\";\n}\n\nclass RedirectIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  void initialize() override {\n    envoy::config::route::v3::RetryPolicy retry_policy;\n\n    auto pass_through = config_helper_.createVirtualHost(\"pass.through.internal.redirect\");\n    config_helper_.addVirtualHost(pass_through);\n\n    auto handle = config_helper_.createVirtualHost(\"handle.internal.redirect\");\n    handle.mutable_routes(0)->set_name(\"redirect\");\n    handle.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy();\n    config_helper_.addVirtualHost(handle);\n\n    auto handle_max_3_hop =\n        config_helper_.createVirtualHost(\"handle.internal.redirect.max.three.hop\");\n    handle_max_3_hop.mutable_routes(0)->set_name(\"max_three_hop\");\n    handle_max_3_hop.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy();\n    handle_max_3_hop.mutable_routes(0)\n        ->mutable_route()\n        ->mutable_internal_redirect_policy()\n        ->mutable_max_internal_redirects()\n        ->set_value(3);\n    config_helper_.addVirtualHost(handle_max_3_hop);\n\n    HttpProtocolIntegrationTest::initialize();\n  }\n\nprotected:\n  // Returns the next stream that the fake upstream receives.\n  FakeStreamPtr waitForNextStream() {\n    FakeStreamPtr new_stream = nullptr;\n    auto wait_new_stream_fn = [this,\n                               &new_stream](FakeHttpConnectionPtr& connection) -> AssertionResult {\n      AssertionResult result =\n          connection->waitForNewStream(*dispatcher_, new_stream, std::chrono::milliseconds(50));\n      if (result) {\n        ASSERT(new_stream);\n      }\n      return result;\n    };\n\n    // Using a while loop to poll for new connections and new streams on all\n    // connections because connection reuse may or may not be triggered.\n    while (new_stream == nullptr) {\n      FakeHttpConnectionPtr new_connection = nullptr;\n\n      AssertionResult result = fake_upstreams_[0]->waitForHttpConnection(\n          *dispatcher_, new_connection, std::chrono::milliseconds(50), 60, 100);\n      if (result) {\n        ASSERT(new_connection);\n        upstream_connections_.push_back(std::move(new_connection));\n      }\n\n      for (auto& connection : upstream_connections_) {\n        result = wait_new_stream_fn(connection);\n        if (result) {\n          break;\n        }\n      }\n    }\n\n    AssertionResult result = new_stream->waitForEndStream(*dispatcher_);\n    ASSERT(result);\n    return new_stream;\n  }\n\n  Http::TestResponseHeaderMapImpl redirect_response_{\n      {\":status\", \"302\"}, {\"content-length\", \"0\"}, {\"location\", \"http://authority2/new/url\"}};\n\n  std::vector<FakeHttpConnectionPtr> upstream_connections_;\n};\n\n// By default if internal redirects are not configured, redirects are proxied.\nTEST_P(RedirectIntegrationTest, RedirectNotConfigured) {\n  // Use base class initialize.\n  HttpProtocolIntegrationTest::initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0);\n  EXPECT_TRUE(response->complete());\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n}\n\n// Now test a route with redirects configured on in pass-through mode.\nTEST_P(RedirectIntegrationTest, InternalRedirectPassedThrough) {\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  default_request_headers_.setHost(\"pass.through.internal.redirect\");\n  auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0);\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n  EXPECT_EQ(\n      0,\n      test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_failed_total\")->value());\n}\n\nTEST_P(RedirectIntegrationTest, BasicInternalRedirect) {\n  useAccessLog(\"%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%\");\n  // Validate that header sanitization is only called once.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.set_via(\"via_value\"); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  default_request_headers_.setHost(\"handle.internal.redirect\");\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(redirect_response_, true);\n\n  waitForNextUpstreamRequest();\n  ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr);\n  EXPECT_EQ(\"http://handle.internal.redirect/test/long/url\",\n            upstream_request_->headers().getEnvoyOriginalUrlValue());\n  EXPECT_EQ(\"/new/url\", upstream_request_->headers().getPathValue());\n  EXPECT_EQ(\"authority2\", upstream_request_->headers().getHostValue());\n  EXPECT_EQ(\"via_value\", upstream_request_->headers().getViaValue());\n\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_succeeded_total\")\n                   ->value());\n  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr(\"internal_redirect\"));\n}\n\nTEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) {\n  // Validate that header sanitization is only called once.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.set_via(\"via_value\"); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  default_request_headers_.setHost(\"handle.internal.redirect.max.three.hop\");\n  default_request_headers_.setPath(\"/path0\");\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  std::vector<FakeStreamPtr> upstream_requests;\n  // Four requests to upstream: 1 original request + 3 following redirect\n  for (int i = 0; i < 4; i++) {\n    upstream_requests.push_back(waitForNextStream());\n\n    EXPECT_EQ(fmt::format(\"/path{}\", i), upstream_requests.back()->headers().getPathValue());\n    EXPECT_EQ(\"handle.internal.redirect.max.three.hop\",\n              upstream_requests.back()->headers().getHostValue());\n    EXPECT_EQ(\"via_value\", upstream_requests.back()->headers().getViaValue());\n\n    auto next_location = fmt::format(HandleThreeHopLocationFormat, i + 1);\n    redirect_response_.setLocation(next_location);\n    upstream_requests.back()->encodeHeaders(redirect_response_, true);\n  }\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n  EXPECT_EQ(\n      1,\n      test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_failed_total\")->value());\n  EXPECT_EQ(\n      1, test_server_->counter(\"http.config_test.passthrough_internal_redirect_too_many_redirects\")\n             ->value());\n}\n\nTEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) {\n  // Validate that header sanitization is only called once.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.set_via(\"via_value\"); });\n  config_helper_.addFilter(R\"EOF(\n  name: pause-filter\n  typed_config:\n    \"@type\": type.googleapis.com/google.protobuf.Empty\n  )EOF\");\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  default_request_headers_.setHost(\"handle.internal.redirect\");\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(redirect_response_, true);\n\n  waitForNextUpstreamRequest();\n  ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr);\n  EXPECT_EQ(\"http://handle.internal.redirect/test/long/url\",\n            upstream_request_->headers().getEnvoyOriginalUrlValue());\n  EXPECT_EQ(\"/new/url\", upstream_request_->headers().getPathValue());\n  EXPECT_EQ(\"authority2\", upstream_request_->headers().getHostValue());\n  EXPECT_EQ(\"via_value\", upstream_request_->headers().getViaValue());\n\n  Http::TestResponseHeaderMapImpl response_with_big_body(\n      {{\":status\", \"200\"}, {\"content-length\", \"2000000\"}});\n  upstream_request_->encodeHeaders(response_with_big_body, false);\n  upstream_request_->encodeData(2000000, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_succeeded_total\")\n                   ->value());\n}\n\nTEST_P(RedirectIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredicate) {\n  auto handle_prevent_repeated_target =\n      config_helper_.createVirtualHost(\"handle.internal.redirect.no.repeated.target\");\n  auto* internal_redirect_policy = handle_prevent_repeated_target.mutable_routes(0)\n                                       ->mutable_route()\n                                       ->mutable_internal_redirect_policy();\n  internal_redirect_policy->mutable_max_internal_redirects()->set_value(10);\n  envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig\n      previous_routes_config;\n  auto* predicate = internal_redirect_policy->add_predicates();\n  predicate->set_name(\"previous_routes\");\n  predicate->mutable_typed_config()->PackFrom(previous_routes_config);\n  config_helper_.addVirtualHost(handle_prevent_repeated_target);\n\n  // Validate that header sanitization is only called once.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.set_via(\"via_value\"); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  default_request_headers_.setHost(\"handle.internal.redirect.no.repeated.target\");\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  auto first_request = waitForNextStream();\n  // Redirect to another route\n  redirect_response_.setLocation(\"http://handle.internal.redirect.max.three.hop/random/path\");\n  first_request->encodeHeaders(redirect_response_, true);\n\n  auto second_request = waitForNextStream();\n  // Redirect back to the original route.\n  redirect_response_.setLocation(\"http://handle.internal.redirect.no.repeated.target/another/path\");\n  second_request->encodeHeaders(redirect_response_, true);\n\n  auto third_request = waitForNextStream();\n  // Redirect to the same route as the first redirect. This should fail.\n  redirect_response_.setLocation(\"http://handle.internal.redirect.max.three.hop/yet/another/path\");\n  third_request->encodeHeaders(redirect_response_, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n  EXPECT_EQ(\"http://handle.internal.redirect.max.three.hop/yet/another/path\",\n            response->headers().getLocationValue());\n  EXPECT_EQ(2, test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_succeeded_total\")\n                   ->value());\n  EXPECT_EQ(\n      1,\n      test_server_->counter(\"http.config_test.passthrough_internal_redirect_predicate\")->value());\n}\n\nTEST_P(RedirectIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPredicate) {\n  auto handle_allow_listed_redirect_route =\n      config_helper_.createVirtualHost(\"handle.internal.redirect.only.allow.listed.target\");\n  auto* internal_redirect_policy = handle_allow_listed_redirect_route.mutable_routes(0)\n                                       ->mutable_route()\n                                       ->mutable_internal_redirect_policy();\n\n  auto* allow_listed_routes_predicate = internal_redirect_policy->add_predicates();\n  allow_listed_routes_predicate->set_name(\"allow_listed_routes\");\n  envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig\n      allow_listed_routes_config;\n  *allow_listed_routes_config.add_allowed_route_names() = \"max_three_hop\";\n  allow_listed_routes_predicate->mutable_typed_config()->PackFrom(allow_listed_routes_config);\n\n  internal_redirect_policy->mutable_max_internal_redirects()->set_value(10);\n\n  config_helper_.addVirtualHost(handle_allow_listed_redirect_route);\n\n  // Validate that header sanitization is only called once.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.set_via(\"via_value\"); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  default_request_headers_.setHost(\"handle.internal.redirect.only.allow.listed.target\");\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  auto first_request = waitForNextStream();\n  // Redirect to another route\n  redirect_response_.setLocation(\"http://handle.internal.redirect.max.three.hop/random/path\");\n  first_request->encodeHeaders(redirect_response_, true);\n\n  auto second_request = waitForNextStream();\n  // Redirect back to the original route.\n  redirect_response_.setLocation(\n      \"http://handle.internal.redirect.only.allow.listed.target/another/path\");\n  second_request->encodeHeaders(redirect_response_, true);\n\n  auto third_request = waitForNextStream();\n  // Redirect to the non-allow-listed route. This should fail.\n  redirect_response_.setLocation(\"http://handle.internal.redirect/yet/another/path\");\n  third_request->encodeHeaders(redirect_response_, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n  EXPECT_EQ(\"http://handle.internal.redirect/yet/another/path\",\n            response->headers().getLocationValue());\n  EXPECT_EQ(2, test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_succeeded_total\")\n                   ->value());\n  EXPECT_EQ(\n      1,\n      test_server_->counter(\"http.config_test.passthrough_internal_redirect_predicate\")->value());\n}\n\nTEST_P(RedirectIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredicate) {\n  auto handle_safe_cross_scheme_route = config_helper_.createVirtualHost(\n      \"handle.internal.redirect.only.allow.safe.cross.scheme.redirect\");\n  auto* internal_redirect_policy = handle_safe_cross_scheme_route.mutable_routes(0)\n                                       ->mutable_route()\n                                       ->mutable_internal_redirect_policy();\n\n  internal_redirect_policy->set_allow_cross_scheme_redirect(true);\n\n  auto* predicate = internal_redirect_policy->add_predicates();\n  predicate->set_name(\"safe_cross_scheme_predicate\");\n  envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig\n      predicate_config;\n  predicate->mutable_typed_config()->PackFrom(predicate_config);\n\n  internal_redirect_policy->mutable_max_internal_redirects()->set_value(10);\n\n  config_helper_.addVirtualHost(handle_safe_cross_scheme_route);\n\n  // Validate that header sanitization is only called once.\n  config_helper_.addConfigModifier(\n      [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n             hcm) { hcm.set_via(\"via_value\"); });\n  initialize();\n\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  default_request_headers_.setHost(\n      \"handle.internal.redirect.only.allow.safe.cross.scheme.redirect\");\n  IntegrationStreamDecoderPtr response =\n      codec_client_->makeHeaderOnlyRequest(default_request_headers_);\n\n  auto first_request = waitForNextStream();\n  // Redirect to another route\n  redirect_response_.setLocation(\"http://handle.internal.redirect.max.three.hop/random/path\");\n  first_request->encodeHeaders(redirect_response_, true);\n\n  auto second_request = waitForNextStream();\n  // Redirect back to the original route.\n  redirect_response_.setLocation(\n      \"http://handle.internal.redirect.only.allow.safe.cross.scheme.redirect/another/path\");\n  second_request->encodeHeaders(redirect_response_, true);\n\n  auto third_request = waitForNextStream();\n  // Redirect to https target. This should fail.\n  redirect_response_.setLocation(\"https://handle.internal.redirect/yet/another/path\");\n  third_request->encodeHeaders(redirect_response_, true);\n\n  response->waitForEndStream();\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n  EXPECT_EQ(\"https://handle.internal.redirect/yet/another/path\",\n            response->headers().getLocationValue());\n  EXPECT_EQ(2, test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_succeeded_total\")\n                   ->value());\n  EXPECT_EQ(\n      1,\n      test_server_->counter(\"http.config_test.passthrough_internal_redirect_predicate\")->value());\n}\n\nTEST_P(RedirectIntegrationTest, InvalidRedirect) {\n  initialize();\n\n  redirect_response_.setLocation(\"invalid_url\");\n\n  // Send the same request as above, only send an invalid URL as the response.\n  // The request should not be redirected.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  default_request_headers_.setHost(\"handle.internal.redirect\");\n  auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0);\n  EXPECT_EQ(\"302\", response->headers().getStatusValue());\n  EXPECT_EQ(\n      1,\n      test_server_->counter(\"cluster.cluster_0.upstream_internal_redirect_failed_total\")->value());\n}\n\nINSTANTIATE_TEST_SUITE_P(Protocols, RedirectIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/rtds_integration_test.cc",
    "content": "#include \"envoy/service/runtime/v3/rtds.pb.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n// TODO(fredlas) set_node_on_first_message_only was true; the delta+SotW unification\n//               work restores it here.\nstd::string tdsBootstrapConfig(absl::string_view api_type) {\n  return fmt::format(R\"EOF(\nstatic_resources:\n  clusters:\n  - name: dummy_cluster\n    http2_protocol_options: {{}}\n    load_assignment:\n      cluster_name: dummy_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n  - name: rtds_cluster\n    http2_protocol_options: {{}}\n    load_assignment:\n      cluster_name: rtds_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\nlayered_runtime:\n  layers:\n  - name: some_static_layer\n    static_layer:\n      foo: whatevs\n      bar: yar\n  - name: some_rtds_layer\n    rtds_layer:\n      name: some_rtds_layer\n      rtds_config:\n        api_config_source:\n          api_type: {}\n          grpc_services:\n            envoy_grpc:\n              cluster_name: rtds_cluster\n          set_node_on_first_message_only: false\n  - name: some_admin_layer\n    admin_layer: {{}}\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\n)EOF\",\n                     api_type, Platform::null_device_path);\n}\n\nclass RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest {\npublic:\n  RtdsIntegrationTest()\n      : HttpIntegrationTest(\n            Http::CodecClient::Type::HTTP2, ipVersion(),\n            tdsBootstrapConfig(sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? \"GRPC\" : \"DELTA_GRPC\")) {\n    use_lds_ = false;\n    create_xds_upstream_ = true;\n    sotw_or_delta_ = sotwOrDelta();\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  void initialize() override {\n    // The tests infra expects the xDS server to be the second fake upstream, so\n    // we need a dummy data plane cluster.\n    setUpstreamCount(1);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n    HttpIntegrationTest::initialize();\n    // Register admin port.\n    registerTestServerPorts({});\n    initial_load_success_ = test_server_->counter(\"runtime.load_success\")->value();\n    initial_keys_ = test_server_->gauge(\"runtime.num_keys\")->value();\n  }\n\n  void acceptXdsConnection() {\n    // Initial RTDS connection.\n    createXdsConnection();\n    AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n  }\n\n  std::string getRuntimeKey(const std::string& key) {\n    auto response = IntegrationUtil::makeSingleRequest(\n        lookupPort(\"admin\"), \"GET\", \"/runtime?format=json\", \"\", downstreamProtocol(), version_);\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n    Json::ObjectSharedPtr loader = TestEnvironment::jsonLoadFromString(response->body());\n    auto entries = loader->getObject(\"entries\");\n    if (entries->hasObject(key)) {\n      return entries->getObject(key)->getString(\"final_value\");\n    }\n    return \"\";\n  }\n\n  uint32_t initial_load_success_{};\n  uint32_t initial_keys_{};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, RtdsIntegrationTest,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\nTEST_P(RtdsIntegrationTest, RtdsReload) {\n  initialize();\n  acceptXdsConnection();\n\n  EXPECT_EQ(\"whatevs\", getRuntimeKey(\"foo\"));\n  EXPECT_EQ(\"yar\", getRuntimeKey(\"bar\"));\n  EXPECT_EQ(\"\", getRuntimeKey(\"baz\"));\n\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, \"\", {\"some_rtds_layer\"},\n                                      {\"some_rtds_layer\"}, {}, true));\n  auto some_rtds_layer = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_rtds_layer\n    layer:\n      foo: bar\n      baz: meh\n  )EOF\");\n  sendDiscoveryResponse<envoy::service::runtime::v3::Runtime>(\n      Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, \"1\");\n  test_server_->waitForCounterGe(\"runtime.load_success\", initial_load_success_ + 1);\n\n  EXPECT_EQ(\"bar\", getRuntimeKey(\"foo\"));\n  EXPECT_EQ(\"yar\", getRuntimeKey(\"bar\"));\n  EXPECT_EQ(\"meh\", getRuntimeKey(\"baz\"));\n\n  EXPECT_EQ(0, test_server_->counter(\"runtime.load_error\")->value());\n  EXPECT_EQ(initial_load_success_ + 1, test_server_->counter(\"runtime.load_success\")->value());\n  EXPECT_EQ(initial_keys_ + 1, test_server_->gauge(\"runtime.num_keys\")->value());\n  EXPECT_EQ(3, test_server_->gauge(\"runtime.num_layers\")->value());\n\n  EXPECT_TRUE(\n      compareDiscoveryRequest(Config::TypeUrl::get().Runtime, \"1\", {\"some_rtds_layer\"}, {}, {}));\n  some_rtds_layer = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_rtds_layer\n    layer:\n      baz: saz\n  )EOF\");\n  sendDiscoveryResponse<envoy::service::runtime::v3::Runtime>(\n      Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, \"2\");\n  test_server_->waitForCounterGe(\"runtime.load_success\", initial_load_success_ + 2);\n\n  EXPECT_EQ(\"whatevs\", getRuntimeKey(\"foo\"));\n  EXPECT_EQ(\"yar\", getRuntimeKey(\"bar\"));\n  EXPECT_EQ(\"saz\", getRuntimeKey(\"baz\"));\n\n  EXPECT_EQ(0, test_server_->counter(\"runtime.load_error\")->value());\n  EXPECT_EQ(initial_load_success_ + 2, test_server_->counter(\"runtime.load_success\")->value());\n  EXPECT_EQ(initial_keys_ + 1, test_server_->gauge(\"runtime.num_keys\")->value());\n  EXPECT_EQ(3, test_server_->gauge(\"runtime.num_layers\")->value());\n}\n\n// Verify that RTDS initialization starts only after initialization of all primary clusters has\n// completed. Primary cluster initialization completes asynchronously when some of the clusters use\n// DNS for endpoint discovery or when health check is configured.\n// This test uses health checking of the first cluster to make primary cluster initialization to\n// complete asynchronously.\nTEST_P(RtdsIntegrationTest, RtdsAfterAsyncPrimaryClusterInitialization) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    // Enable health checking for the first cluster.\n    auto* dummy_cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    auto* health_check = dummy_cluster->add_health_checks();\n    health_check->mutable_timeout()->set_seconds(30);\n    health_check->mutable_interval()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    health_check->mutable_no_traffic_interval()->CopyFrom(\n        Protobuf::util::TimeUtil::MillisecondsToDuration(100));\n    health_check->mutable_unhealthy_threshold()->set_value(1);\n    health_check->mutable_healthy_threshold()->set_value(1);\n    health_check->mutable_http_health_check()->set_path(\"/healthcheck\");\n    health_check->mutable_http_health_check()->set_codec_client_type(\n        envoy::type::v3::CodecClientType::HTTP2);\n  });\n\n  initialize();\n\n  // Make sure statically provisioned runtime values were loaded.\n  EXPECT_EQ(\"whatevs\", getRuntimeKey(\"foo\"));\n  EXPECT_EQ(\"yar\", getRuntimeKey(\"bar\"));\n  EXPECT_EQ(\"\", getRuntimeKey(\"baz\"));\n\n  // Respond to the initial health check, which should complete initialization of primary clusters.\n  waitForNextUpstreamRequest();\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  test_server_->waitForGaugeEq(\"cluster.dummy_cluster.membership_healthy\", 1);\n\n  // After this xDS connection should be established. Verify that dynamic runtime values are loaded.\n  acceptXdsConnection();\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, \"\", {\"some_rtds_layer\"},\n                                      {\"some_rtds_layer\"}, {}, true));\n  auto some_rtds_layer = TestUtility::parseYaml<envoy::service::runtime::v3::Runtime>(R\"EOF(\n    name: some_rtds_layer\n    layer:\n      foo: bar\n      baz: meh\n  )EOF\");\n  sendDiscoveryResponse<envoy::service::runtime::v3::Runtime>(\n      Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, \"1\");\n  test_server_->waitForCounterGe(\"runtime.load_success\", initial_load_success_ + 1);\n\n  EXPECT_EQ(\"bar\", getRuntimeKey(\"foo\"));\n  EXPECT_EQ(\"yar\", getRuntimeKey(\"bar\"));\n  EXPECT_EQ(\"meh\", getRuntimeKey(\"baz\"));\n\n  EXPECT_EQ(0, test_server_->counter(\"runtime.load_error\")->value());\n  EXPECT_EQ(initial_load_success_ + 1, test_server_->counter(\"runtime.load_success\")->value());\n  EXPECT_EQ(initial_keys_ + 1, test_server_->gauge(\"runtime.num_keys\")->value());\n  EXPECT_EQ(3, test_server_->gauge(\"runtime.num_layers\")->value());\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/run_envoy_test.sh",
    "content": "#!/bin/bash\n\nexport ENVOY_BIN=\"${TEST_SRCDIR}/envoy/test/integration/hotrestart_main\"\n\n# shellcheck source=test/integration/test_utility.sh\nsource \"${TEST_SRCDIR}/envoy/test/integration/test_utility.sh\"\n\nfunction expect_fail_with_error() {\n  log=\"${TEST_TMPDIR}/envoy.log\"\n  rm -f \"$log\"\n  expected_error=\"$1\"\n  shift\n  echo \"${ENVOY_BIN} --use-dynamic-base-id $*\" \">&\" \"$log\"\n  ${ENVOY_BIN} --use-dynamic-base-id \"$@\" >& \"$log\"\n  EXIT_CODE=$?\n  cat \"$log\"\n  check [ $EXIT_CODE -eq 1 ]\n  check grep \"$expected_error\" \"$log\"\n}\n\n\nstart_test \"Launching envoy with a bogus command line flag.\"\nexpect_fail_with_error \"PARSE ERROR: Argument: --bogus-flag\" --bogus-flag\n\nstart_test \"Launching envoy without --config-path or --config-yaml fails.\"\nexpect_fail_with_error \\\n  \"At least one of --config-path or --config-yaml or Options::configProto() should be non-empty\"\n\nstart_test \"Launching envoy with unknown IP address.\"\nexpect_fail_with_error \"error: unknown IP address version\" --local-address-ip-version foo\n\nstart_test \"Launching envoy with unknown mode.\"\nexpect_fail_with_error \"error: unknown mode\" --mode foo\n\nstart_test \"Launching envoy with bogus component log level.\"\nexpect_fail_with_error \"error: component log level not correctly specified\" --component-log-level upstream:foo:bar\n\nstart_test \"Launching envoy with invalid log level.\"\nexpect_fail_with_error \"error: invalid log level specified\" --component-log-level upstream:foo\n\nstart_test \"Launching envoy with invalid component.\"\nexpect_fail_with_error \"error: invalid component specified\" --component-log-level foo:debug\n"
  },
  {
    "path": "test/integration/scoped_rds_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/scoped_route.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/config/version_converter.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/resources.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass ScopedRdsIntegrationTest : public HttpIntegrationTest,\n                                 public Grpc::DeltaSotwIntegrationParamTest {\nprotected:\n  struct FakeUpstreamInfo {\n    FakeHttpConnectionPtr connection_;\n    FakeUpstream* upstream_{};\n    absl::flat_hash_map<std::string, FakeStreamPtr> stream_by_resource_name_;\n  };\n\n  ScopedRdsIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion(), realTime()) {}\n\n  ~ScopedRdsIntegrationTest() override { resetConnections(); }\n\n  void initialize() override {\n    // Setup two upstream hosts, one for each cluster.\n    setUpstreamCount(2);\n\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // Add the static cluster to serve SRDS.\n      auto* cluster_1 = bootstrap.mutable_static_resources()->add_clusters();\n      cluster_1->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      cluster_1->set_name(\"cluster_1\");\n\n      // Add the static cluster to serve SRDS.\n      auto* scoped_rds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      scoped_rds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      scoped_rds_cluster->set_name(\"srds_cluster\");\n      scoped_rds_cluster->mutable_http2_protocol_options();\n\n      // Add the static cluster to serve RDS.\n      auto* rds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      rds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      rds_cluster->set_name(\"rds_cluster\");\n      rds_cluster->mutable_http2_protocol_options();\n    });\n\n    config_helper_.addConfigModifier(\n        [this](\n            envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                http_connection_manager) {\n          const std::string& scope_key_builder_config_yaml = R\"EOF(\nfragments:\n  - header_value_extractor:\n      name: Addr\n      element_separator: ;\n      element:\n        key: x-foo-key\n        separator: =\n)EOF\";\n          envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::\n              ScopeKeyBuilder scope_key_builder;\n          TestUtility::loadFromYaml(scope_key_builder_config_yaml, scope_key_builder);\n          auto* scoped_routes = http_connection_manager.mutable_scoped_routes();\n          scoped_routes->set_name(srds_config_name_);\n          *scoped_routes->mutable_scope_key_builder() = scope_key_builder;\n\n          // Set resource api version for rds.\n          envoy::config::core::v3::ConfigSource* rds_config_source =\n              scoped_routes->mutable_rds_config_source();\n          rds_config_source->set_resource_api_version(envoy::config::core::v3::ApiVersion::V3);\n\n          // Set transport api version for rds.\n          envoy::config::core::v3::ApiConfigSource* rds_api_config_source =\n              rds_config_source->mutable_api_config_source();\n          rds_api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3);\n\n          // Add grpc service for rds.\n          rds_api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n          envoy::config::core::v3::GrpcService* grpc_service =\n              rds_api_config_source->add_grpc_services();\n          setGrpcService(*grpc_service, \"rds_cluster\", getRdsFakeUpstream().localAddress());\n\n          // Set resource api version for scoped rds.\n          envoy::config::core::v3::ConfigSource* srds_config_source =\n              scoped_routes->mutable_scoped_rds()->mutable_scoped_rds_config_source();\n          srds_config_source->set_resource_api_version(envoy::config::core::v3::ApiVersion::V3);\n\n          // Set Transport api version for scoped_rds.\n          envoy::config::core::v3::ApiConfigSource* srds_api_config_source =\n              srds_config_source->mutable_api_config_source();\n          srds_api_config_source->set_transport_api_version(\n              envoy::config::core::v3::ApiVersion::V3);\n\n          // Add grpc service for scoped rds.\n          if (isDelta()) {\n            srds_api_config_source->set_api_type(\n                envoy::config::core::v3::ApiConfigSource::DELTA_GRPC);\n          } else {\n            srds_api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n          }\n          srds_api_config_source->set_transport_api_version(\n              envoy::config::core::v3::ApiVersion::V3);\n          grpc_service = srds_api_config_source->add_grpc_services();\n          setGrpcService(*grpc_service, \"srds_cluster\", getScopedRdsFakeUpstream().localAddress());\n        });\n    HttpIntegrationTest::initialize();\n  }\n\n  void createUpstreams() override {\n    HttpIntegrationTest::createUpstreams();\n    // Create the SRDS upstream.\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n    // Create the RDS upstream.\n    addFakeUpstream(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void resetFakeUpstreamInfo(FakeUpstreamInfo* upstream_info) {\n    ASSERT(upstream_info->upstream_ != nullptr);\n\n    AssertionResult result = upstream_info->connection_->close();\n    RELEASE_ASSERT(result, result.message());\n    result = upstream_info->connection_->waitForDisconnect();\n    RELEASE_ASSERT(result, result.message());\n    upstream_info->connection_.reset();\n  }\n\n  void resetConnections() {\n    if (rds_upstream_info_.upstream_ != nullptr) {\n      resetFakeUpstreamInfo(&rds_upstream_info_);\n    }\n    resetFakeUpstreamInfo(&scoped_rds_upstream_info_);\n  }\n\n  FakeUpstream& getRdsFakeUpstream() const { return *fake_upstreams_[3]; }\n\n  FakeUpstream& getScopedRdsFakeUpstream() const { return *fake_upstreams_[2]; }\n\n  void createStream(FakeUpstreamInfo* upstream_info, FakeUpstream& upstream,\n                    const std::string& resource_name) {\n    if (upstream_info->upstream_ == nullptr) {\n      // bind upstream if not yet.\n      upstream_info->upstream_ = &upstream;\n      AssertionResult result =\n          upstream_info->upstream_->waitForHttpConnection(*dispatcher_, upstream_info->connection_);\n      RELEASE_ASSERT(result, result.message());\n    }\n    if (!upstream_info->stream_by_resource_name_.try_emplace(resource_name, nullptr).second) {\n      RELEASE_ASSERT(false,\n                     fmt::format(\"stream with resource name '{}' already exists!\", resource_name));\n    }\n    auto result = upstream_info->connection_->waitForNewStream(\n        *dispatcher_, upstream_info->stream_by_resource_name_[resource_name]);\n    RELEASE_ASSERT(result, result.message());\n    upstream_info->stream_by_resource_name_[resource_name]->startGrpcStream();\n  }\n\n  void createRdsStream(const std::string& resource_name) {\n    createStream(&rds_upstream_info_, getRdsFakeUpstream(), resource_name);\n  }\n\n  void createScopedRdsStream() {\n    createStream(&scoped_rds_upstream_info_, getScopedRdsFakeUpstream(), srds_config_name_);\n  }\n\n  void sendRdsResponse(const std::string& route_config, const std::string& version) {\n    envoy::service::discovery::v3::DiscoveryResponse response;\n    std::string route_conguration_type_url =\n        \"type.googleapis.com/envoy.config.route.v3.RouteConfiguration\";\n    response.set_version_info(version);\n    response.set_type_url(route_conguration_type_url);\n    auto route_configuration =\n        TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(route_config);\n    response.add_resources()->PackFrom(route_configuration);\n    ASSERT(rds_upstream_info_.stream_by_resource_name_[route_configuration.name()] != nullptr);\n    rds_upstream_info_.stream_by_resource_name_[route_configuration.name()]->sendGrpcMessage(\n        response);\n  }\n\n  void sendSrdsResponse(const std::vector<std::string>& sotw_list,\n                        const std::vector<std::string>& to_add_list,\n                        const std::vector<std::string>& to_delete_list,\n                        const std::string& version) {\n    if (isDelta()) {\n      sendDeltaScopedRdsResponse(to_add_list, to_delete_list, version);\n    } else {\n      sendSotwScopedRdsResponse(sotw_list, version);\n    }\n  }\n\n  void sendDeltaScopedRdsResponse(const std::vector<std::string>& to_add_list,\n                                  const std::vector<std::string>& to_delete_list,\n                                  const std::string& version) {\n    ASSERT(scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_] != nullptr);\n    std::string scoped_route_configuration_type_url =\n        \"type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration\";\n    envoy::service::discovery::v3::DeltaDiscoveryResponse response;\n    response.set_system_version_info(version);\n    response.set_type_url(scoped_route_configuration_type_url);\n\n    for (const auto& scope_name : to_delete_list) {\n      *response.add_removed_resources() = scope_name;\n    }\n    for (const auto& resource_proto : to_add_list) {\n      envoy::config::route::v3::ScopedRouteConfiguration scoped_route_proto;\n      TestUtility::loadFromYaml(resource_proto, scoped_route_proto);\n      auto resource = response.add_resources();\n      resource->set_name(scoped_route_proto.name());\n      resource->set_version(version);\n      resource->mutable_resource()->PackFrom(scoped_route_proto);\n    }\n    scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_]->sendGrpcMessage(\n        response);\n  }\n\n  void sendSotwScopedRdsResponse(const std::vector<std::string>& resource_protos,\n                                 const std::string& version) {\n    ASSERT(scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_] != nullptr);\n\n    std::string scoped_route_configuration_type_url =\n        \"type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration\";\n    envoy::service::discovery::v3::DiscoveryResponse response;\n    response.set_version_info(version);\n    response.set_type_url(scoped_route_configuration_type_url);\n\n    for (const auto& resource_proto : resource_protos) {\n      envoy::config::route::v3::ScopedRouteConfiguration scoped_route_proto;\n      TestUtility::loadFromYaml(resource_proto, scoped_route_proto);\n      response.add_resources()->PackFrom(scoped_route_proto);\n    }\n    scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_]->sendGrpcMessage(\n        response);\n  }\n\n  bool isDelta() { return sotwOrDelta() == Grpc::SotwOrDelta::Delta; }\n\n  const std::string srds_config_name_{\"foo-scoped-routes\"};\n  FakeUpstreamInfo scoped_rds_upstream_info_;\n  FakeUpstreamInfo rds_upstream_info_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsAndGrpcTypes, ScopedRdsIntegrationTest,\n                         DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// Test that a SRDS DiscoveryResponse is successfully processed.\n\nTEST_P(ScopedRdsIntegrationTest, BasicSuccess) {\n  const std::string scope_tmpl = R\"EOF(\nname: {}\nroute_configuration_name: {}\nkey:\n  fragments:\n    - string_key: {}\n)EOF\";\n  const std::string scope_route1 = fmt::format(scope_tmpl, \"foo_scope1\", \"foo_route1\", \"foo-route\");\n  const std::string scope_route2 = fmt::format(scope_tmpl, \"foo_scope2\", \"foo_route1\", \"bar-route\");\n\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n\n  on_server_init_function_ = [&]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1, scope_route2}, {scope_route1, scope_route2}, {}, \"1\");\n    createRdsStream(\"foo_route1\");\n    // CreateRdsStream waits for connection which is fired by RDS subscription.\n    sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  };\n  initialize();\n  registerTestServerPorts({\"http\"});\n\n  // No scope key matches \"xyz-route\".\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=xyz-route\"}});\n  response->waitForEndStream();\n  verifyResponse(std::move(response), \"404\", Http::TestResponseHeaderMapImpl{}, \"\");\n  cleanupUpstreamAndDownstream();\n\n  // Test \"foo-route\" and 'bar-route' both gets routed to cluster_0.\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_success\", 1);\n  for (const std::string& scope_key : std::vector<std::string>{\"foo-route\", \"bar-route\"}) {\n    sendRequestAndVerifyResponse(\n        Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                       {\":path\", \"/meh\"},\n                                       {\":authority\", \"host\"},\n                                       {\":scheme\", \"http\"},\n                                       {\"Addr\", fmt::format(\"x-foo-key={}\", scope_key)}},\n        456, Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"service\", scope_key}}, 123,\n        /*cluster_0*/ 0);\n  }\n  test_server_->waitForCounterGe(\"http.config_test.scoped_rds.foo-scoped-routes.update_attempt\",\n                                 // update_attempt only increase after a response\n                                 isDelta() ? 1 : 2);\n  test_server_->waitForCounterGe(\"http.config_test.scoped_rds.foo-scoped-routes.update_success\", 1);\n  // The version gauge should be set to xxHash64(\"1\").\n  test_server_->waitForGaugeEq(\"http.config_test.scoped_rds.foo-scoped-routes.version\",\n                               13237225503670494420UL);\n\n  // Add a new scope scope_route3 with a brand new RouteConfiguration foo_route2.\n  const std::string scope_route3 = fmt::format(scope_tmpl, \"foo_scope3\", \"foo_route2\", \"baz-route\");\n\n  sendSrdsResponse({scope_route1, scope_route2, scope_route3}, /*added*/ {scope_route3}, {}, \"2\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_attempt\", 2);\n  sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_1\"), \"3\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_success\", 2);\n  createRdsStream(\"foo_route2\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route2.update_attempt\", 1);\n  sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route2\", \"cluster_0\"), \"1\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route2.update_success\", 1);\n  test_server_->waitForCounterGe(\"http.config_test.scoped_rds.foo-scoped-routes.update_success\", 2);\n  // The version gauge should be set to xxHash64(\"2\").\n  test_server_->waitForGaugeEq(\"http.config_test.scoped_rds.foo-scoped-routes.version\",\n                               6927017134761466251UL);\n  // After RDS update, requests within scope 'foo_scope1' or 'foo_scope2' get routed to\n  // 'cluster_1'.\n  for (const std::string& scope_key : std::vector<std::string>{\"foo-route\", \"bar-route\"}) {\n    sendRequestAndVerifyResponse(\n        Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                       {\":path\", \"/meh\"},\n                                       {\":authority\", \"host\"},\n                                       {\":scheme\", \"http\"},\n                                       {\"Addr\", fmt::format(\"x-foo-key={}\", scope_key)}},\n        456, Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"service\", scope_key}}, 123,\n        /*cluster_1*/ 1);\n  }\n  // Now requests within scope 'foo_scope3' get routed to 'cluster_0'.\n  sendRequestAndVerifyResponse(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", fmt::format(\"x-foo-key={}\", \"baz-route\")}},\n      456, Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"service\", \"bluh\"}}, 123,\n      /*cluster_0*/ 0);\n\n  // Delete foo_scope1 and requests within the scope gets 400s.\n  sendSrdsResponse({scope_route2, scope_route3}, {}, {\"foo_scope1\"}, \"3\");\n  test_server_->waitForCounterGe(\"http.config_test.scoped_rds.foo-scoped-routes.update_success\", 3);\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=foo-route\"}});\n  response->waitForEndStream();\n  verifyResponse(std::move(response), \"404\", Http::TestResponseHeaderMapImpl{}, \"\");\n  cleanupUpstreamAndDownstream();\n  // Add a new scope foo_scope4.\n  const std::string& scope_route4 =\n      fmt::format(scope_tmpl, \"foo_scope4\", \"foo_route4\", \"xyz-route\");\n  sendSrdsResponse({scope_route3, scope_route2, scope_route4}, {scope_route4}, {}, \"4\");\n  test_server_->waitForCounterGe(\"http.config_test.scoped_rds.foo-scoped-routes.update_success\", 4);\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=xyz-route\"}});\n  response->waitForEndStream();\n  // Get 404 because RDS hasn't pushed route configuration \"foo_route4\" yet.\n  // But scope is found and the Router::NullConfigImpl is returned.\n  verifyResponse(std::move(response), \"404\", Http::TestResponseHeaderMapImpl{}, \"\");\n  cleanupUpstreamAndDownstream();\n\n  // RDS updated foo_route4, requests with scope key \"xyz-route\" now hit cluster_1.\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route4.update_attempt\", 1);\n  createRdsStream(\"foo_route4\");\n  sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route4\", \"cluster_1\"), \"3\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route4.update_success\", 1);\n  sendRequestAndVerifyResponse(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=xyz-route\"}},\n      456, Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"service\", \"xyz-route\"}}, 123,\n      /*cluster_1 */ 1);\n}\n\n// Test that a bad config update updates the corresponding stats.\nTEST_P(ScopedRdsIntegrationTest, ConfigUpdateFailure) {\n  // 'name' will fail to validate due to empty string.\n  const std::string scope_route1 = R\"EOF(\nname:\nroute_configuration_name: foo_route1\nkey:\n  fragments:\n    - string_key: foo\n)EOF\";\n  on_server_init_function_ = [this, &scope_route1]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1}, {scope_route1}, {}, \"1\");\n  };\n  initialize();\n\n  test_server_->waitForCounterGe(\"http.config_test.scoped_rds.foo-scoped-routes.update_rejected\",\n                                 1);\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=foo\"}});\n  response->waitForEndStream();\n  verifyResponse(std::move(response), \"404\", Http::TestResponseHeaderMapImpl{}, \"\");\n  cleanupUpstreamAndDownstream();\n\n  // SRDS update fixed the problem.\n  const std::string scope_route2 = R\"EOF(\nname: foo_scope1\nroute_configuration_name: foo_route1\nkey:\n  fragments:\n    - string_key: foo\n)EOF\";\n  sendSrdsResponse({scope_route2}, {scope_route2}, {}, \"1\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_attempt\", 1);\n  createRdsStream(\"foo_route1\");\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n  sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_success\", 1);\n  sendRequestAndVerifyResponse(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=foo\"}},\n      456, Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"service\", \"bluh\"}}, 123,\n      /*cluster_0*/ 0);\n  cleanupUpstreamAndDownstream();\n}\n\n// Test that a scoped route config update is performed on demand and http request will succeed.\nTEST_P(ScopedRdsIntegrationTest, OnDemandUpdateSuccess) {\n  config_helper_.addFilter(R\"EOF(\n    name: envoy.filters.http.on_demand\n    )EOF\");\n  const std::string scope_route1 = R\"EOF(\nname: foo_scope1\nroute_configuration_name: foo_route1\non_demand: true\nkey:\n  fragments:\n    - string_key: foo\n)EOF\";\n  on_server_init_function_ = [this, &scope_route1]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1}, {scope_route1}, {}, \"1\");\n  };\n  initialize();\n  registerTestServerPorts({\"http\"});\n\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  // Request that match lazily loaded scope will trigger on demand loading.\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=foo\"}});\n  createRdsStream(\"foo_route1\");\n  sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_success\", 1);\n\n  waitForNextUpstreamRequest();\n  // Send response headers, and end_stream if there is no response body.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForHeaders();\n  EXPECT_EQ(\"200\", response->headers().Status()->value().getStringView());\n\n  cleanupUpstreamAndDownstream();\n}\n\n// With on demand update filter configured, scope not match should still return 404\nTEST_P(ScopedRdsIntegrationTest, OnDemandUpdateScopeNotMatch) {\n\n  config_helper_.addFilter(R\"EOF(\n    name: envoy.filters.http.on_demand\n    )EOF\");\n\n  const std::string scope_tmpl = R\"EOF(\nname: {}\nroute_configuration_name: {}\nkey:\n  fragments:\n    - string_key: {}\n)EOF\";\n  const std::string scope_route1 = fmt::format(scope_tmpl, \"foo_scope1\", \"foo_route1\", \"foo-route\");\n\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/meh\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n\n  on_server_init_function_ = [&]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1}, {scope_route1}, {}, \"1\");\n    createRdsStream(\"foo_route1\");\n    // CreateRdsStream waits for connection which is fired by RDS subscription.\n    sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  };\n  initialize();\n  registerTestServerPorts({\"http\"});\n\n  // No scope key matches \"bar\".\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=bar\"}});\n  response->waitForEndStream();\n  verifyResponse(std::move(response), \"404\", Http::TestResponseHeaderMapImpl{}, \"\");\n  cleanupUpstreamAndDownstream();\n}\n\n// With on demand update filter configured, scope match but virtual host don't match, should still\n// return 404\nTEST_P(ScopedRdsIntegrationTest, OnDemandUpdatePrimaryVirtualHostNotMatch) {\n\n  config_helper_.addFilter(R\"EOF(\n    name: envoy.filters.http.on_demand\n    )EOF\");\n\n  const std::string scope_tmpl = R\"EOF(\nname: {}\nroute_configuration_name: {}\nkey:\n  fragments:\n    - string_key: {}\n)EOF\";\n  const std::string scope_route1 = fmt::format(scope_tmpl, \"foo_scope1\", \"foo_route1\", \"foo-route\");\n\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/meh\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n\n  on_server_init_function_ = [&]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1}, {scope_route1}, {}, \"1\");\n    createRdsStream(\"foo_route1\");\n    // CreateRdsStream waits for connection which is fired by RDS subscription.\n    sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  };\n  initialize();\n  registerTestServerPorts({\"http\"});\n\n  // No virtual host matches \"neh\".\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/neh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=foo\"}});\n  response->waitForEndStream();\n  verifyResponse(std::move(response), \"404\", Http::TestResponseHeaderMapImpl{}, \"\");\n  cleanupUpstreamAndDownstream();\n}\n\n// With on demand update filter configured, scope match but virtual host don't match, should still\n// return 404\nTEST_P(ScopedRdsIntegrationTest, OnDemandUpdateVirtualHostNotMatch) {\n\n  config_helper_.addFilter(R\"EOF(\n    name: envoy.filters.http.on_demand\n    )EOF\");\n\n  const std::string scope_route1 = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_route1\nkey:\n  fragments:\n    - string_key: foo\n)EOF\";\n  const std::string scope_route2 = R\"EOF(\nname: bar_scope\nroute_configuration_name: foo_route1\non_demand: true\nkey:\n  fragments:\n    - string_key: bar\n)EOF\";\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/meh\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n\n  on_server_init_function_ = [&]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1, scope_route2}, {scope_route1, scope_route2}, {}, \"1\");\n    createRdsStream(\"foo_route1\");\n    // CreateRdsStream waits for connection which is fired by RDS subscription.\n    sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  };\n  initialize();\n  registerTestServerPorts({\"http\"});\n\n  // No scope key matches \"bar\".\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/neh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=bar\"}});\n  response->waitForEndStream();\n  verifyResponse(std::move(response), \"404\", Http::TestResponseHeaderMapImpl{}, \"\");\n  cleanupUpstreamAndDownstream();\n}\n\n// Eager and lazy scopes share the same route configuration\nTEST_P(ScopedRdsIntegrationTest, DifferentPriorityScopeShareRoute) {\n  config_helper_.addFilter(R\"EOF(\n    name: envoy.filters.http.on_demand\n    )EOF\");\n\n  const std::string scope_route1 = R\"EOF(\nname: foo_scope\nroute_configuration_name: foo_route1\nkey:\n  fragments:\n    - string_key: foo\n)EOF\";\n  const std::string scope_route2 = R\"EOF(\nname: bar_scope\nroute_configuration_name: foo_route1\non_demand: true\nkey:\n  fragments:\n    - string_key: bar\n)EOF\";\n\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n\n  on_server_init_function_ = [&]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1, scope_route2}, {scope_route1, scope_route2}, {}, \"1\");\n    createRdsStream(\"foo_route1\");\n    // CreateRdsStream waits for connection which is fired by RDS subscription.\n    sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  };\n  initialize();\n  registerTestServerPorts({\"http\"});\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_success\", 1);\n  cleanupUpstreamAndDownstream();\n  // \"foo\" request should succeed because the foo scope is loaded eagerly by default.\n  // \"bar\" request will initialize rds provider on demand and also succeed.\n  for (const std::string& scope_key : std::vector<std::string>{\"foo\", \"bar\"}) {\n    sendRequestAndVerifyResponse(\n        Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                       {\":path\", \"/meh\"},\n                                       {\":authority\", \"host\"},\n                                       {\":scheme\", \"http\"},\n                                       {\"Addr\", fmt::format(\"x-foo-key={}\", scope_key)}},\n        456, Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}, {\"service\", scope_key}}, 123, 0);\n  }\n}\n\nTEST_P(ScopedRdsIntegrationTest, OnDemandUpdateAfterActiveStreamDestroyed) {\n  config_helper_.addFilter(R\"EOF(\n    name: envoy.filters.http.on_demand\n    )EOF\");\n  const std::string scope_route1 = R\"EOF(\nname: foo_scope1\nroute_configuration_name: foo_route1\non_demand: true\nkey:\n  fragments:\n    - string_key: foo\n)EOF\";\n  on_server_init_function_ = [this, &scope_route1]() {\n    createScopedRdsStream();\n    sendSrdsResponse({scope_route1}, {scope_route1}, {}, \"1\");\n  };\n  initialize();\n  registerTestServerPorts({\"http\"});\n\n  const std::string route_config_tmpl = R\"EOF(\n      name: {}\n      virtual_hosts:\n      - name: integration\n        domains: [\"*\"]\n        routes:\n        - match: {{ prefix: \"/\" }}\n          route: {{ cluster: {} }}\n)EOF\";\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  // A request that match lazily loaded scope will trigger on demand loading.\n  auto response = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                     {\":path\", \"/meh\"},\n                                     {\":authority\", \"host\"},\n                                     {\":scheme\", \"http\"},\n                                     {\"Addr\", \"x-foo-key=foo\"}});\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_attempt\", 1);\n  // Close the connection and destroy the active stream.\n  cleanupUpstreamAndDownstream();\n  // Push rds update, on demand updated callback is post to worker thread.\n  // There is no exception thrown even when active stream is dead because weak_ptr can't be locked.\n  createRdsStream(\"foo_route1\");\n  sendRdsResponse(fmt::format(route_config_tmpl, \"foo_route1\", \"cluster_0\"), \"1\");\n  test_server_->waitForCounterGe(\"http.config_test.rds.foo_route1.update_success\", 1);\n}\n\n} // namespace\n} // namespace Envoy"
  },
  {
    "path": "test/integration/sds_dynamic_integration_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/service/secret/v3/sds.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/connection_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/config/integration/certs/clientcert_hash.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/server.h\"\n#include \"test/integration/ssl_utility.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/test_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"integration.h\"\n#include \"utility.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\n// Hack to force linking of the service: https://github.com/google/protobuf/issues/4221.\nconst envoy::service::secret::v3::SdsDummy _sds_dummy;\n\n// Sds integration base class with following support:\n// * functions to create sds upstream, and send sds response\n// * functions to create secret protobuf.\nclass SdsDynamicIntegrationBaseTest : public Grpc::GrpcClientIntegrationParamTest,\n                                      public HttpIntegrationTest {\npublic:\n  SdsDynamicIntegrationBaseTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()),\n        server_cert_(\"server_cert\"), validation_secret_(\"validation_secret\"),\n        client_cert_(\"client_cert\") {}\n\nprotected:\n  void createSdsStream(FakeUpstream&) {\n    createXdsConnection();\n\n    AssertionResult result2 = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result2, result2.message());\n    xds_stream_->startGrpcStream();\n  }\n\n  void setUpSdsConfig(envoy::extensions::transport_sockets::tls::v3::SdsSecretConfig* secret_config,\n                      const std::string& secret_name) {\n    secret_config->set_name(secret_name);\n    auto* config_source = secret_config->mutable_sds_config();\n    auto* api_config_source = config_source->mutable_api_config_source();\n    api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    auto* grpc_service = api_config_source->add_grpc_services();\n    setGrpcService(*grpc_service, \"sds_cluster\", fake_upstreams_.back()->localAddress());\n  }\n\n  envoy::extensions::transport_sockets::tls::v3::Secret getServerSecret() {\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(server_cert_);\n    auto* tls_certificate = secret.mutable_tls_certificate();\n    tls_certificate->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/servercert.pem\"));\n    tls_certificate->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/serverkey.pem\"));\n    return secret;\n  }\n\n  envoy::extensions::transport_sockets::tls::v3::Secret getCvcSecret() {\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(validation_secret_);\n    auto* validation_context = secret.mutable_validation_context();\n    validation_context->mutable_trusted_ca()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n    validation_context->add_verify_certificate_hash(TEST_CLIENT_CERT_HASH);\n    return secret;\n  }\n\n  envoy::extensions::transport_sockets::tls::v3::Secret getCvcSecretWithOnlyTrustedCa() {\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(validation_secret_);\n    auto* validation_context = secret.mutable_validation_context();\n    validation_context->mutable_trusted_ca()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n    return secret;\n  }\n\n  envoy::extensions::transport_sockets::tls::v3::Secret getClientSecret() {\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(client_cert_);\n    auto* tls_certificate = secret.mutable_tls_certificate();\n    tls_certificate->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"));\n    tls_certificate->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n    return secret;\n  }\n\n  envoy::extensions::transport_sockets::tls::v3::Secret\n  getWrongSecret(const std::string& secret_name) {\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(secret_name);\n    secret.mutable_tls_certificate();\n    return secret;\n  }\n\n  void sendSdsResponse(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) {\n    API_NO_BOOST(envoy::api::v2::DiscoveryResponse) discovery_response;\n    discovery_response.set_version_info(\"1\");\n    discovery_response.set_type_url(Config::TypeUrl::get().Secret);\n    discovery_response.add_resources()->PackFrom(API_DOWNGRADE(secret));\n\n    xds_stream_->sendGrpcMessage(discovery_response);\n  }\n\n  void PrintServerCounters() {\n    std::cerr << \"all counters\" << std::endl;\n    for (const auto& c : test_server_->counters()) {\n      std::cerr << \"counter: \" << c->name() << \", value: \" << c->value() << std::endl;\n    }\n  }\n\n  const std::string server_cert_;\n  const std::string validation_secret_;\n  const std::string client_cert_;\n};\n\n// Downstream SDS integration test: static Listener with ssl cert from SDS\nclass SdsDynamicDownstreamIntegrationTest : public SdsDynamicIntegrationBaseTest {\npublic:\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n      auto* common_tls_context = tls_context.mutable_common_tls_context();\n      auto* transport_socket = bootstrap.mutable_static_resources()\n                                   ->mutable_listeners(0)\n                                   ->mutable_filter_chains(0)\n                                   ->mutable_transport_socket();\n      common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11);\n\n      auto* validation_context = common_tls_context->mutable_validation_context();\n      validation_context->mutable_trusted_ca()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n      validation_context->add_verify_certificate_hash(TEST_CLIENT_CERT_HASH);\n\n      // Modify the listener ssl cert to use SDS from sds_cluster\n      auto* secret_config = common_tls_context->add_tls_certificate_sds_secret_configs();\n      setUpSdsConfig(secret_config, \"server_cert\");\n\n      transport_socket->set_name(\"envoy.transport_sockets.tls\");\n      transport_socket->mutable_typed_config()->PackFrom(tls_context);\n\n      // Add a static sds cluster\n      auto* sds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      sds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      sds_cluster->set_name(\"sds_cluster\");\n      sds_cluster->mutable_http2_protocol_options();\n    });\n\n    HttpIntegrationTest::initialize();\n    client_ssl_ctx_ = createClientSslTransportSocketFactory({}, context_manager_, *api_);\n  }\n\n  void createUpstreams() override {\n    create_xds_upstream_ = true;\n    HttpIntegrationTest::createUpstreams();\n  }\n\n  void TearDown() override {\n    cleanUpXdsConnection();\n    client_ssl_ctx_.reset();\n  }\n\n  Network::ClientConnectionPtr makeSslClientConnection() {\n    Network::Address::InstanceConstSharedPtr address = getSslAddress(version_, lookupPort(\"http\"));\n    return dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                               client_ssl_ctx_->createTransportSocket(nullptr),\n                                               nullptr);\n  }\n\nprotected:\n  Network::TransportSocketFactoryPtr client_ssl_ctx_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, SdsDynamicDownstreamIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// A test that SDS server send a good server secret for a static listener.\n// The first ssl request should be OK.\nTEST_P(SdsDynamicDownstreamIntegrationTest, BasicSuccess) {\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getServerSecret());\n  };\n  initialize();\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection();\n  };\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n}\n\n// A test that SDS server send a bad secret for a static listener,\n// The first ssl request should fail at connecting.\n// then SDS send a good server secret,  the second request should be OK.\nTEST_P(SdsDynamicDownstreamIntegrationTest, WrongSecretFirst) {\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getWrongSecret(server_cert_));\n  };\n  initialize();\n\n  codec_client_ = makeRawHttpConnection(makeSslClientConnection(), absl::nullopt);\n  // the connection state is not connected.\n  EXPECT_FALSE(codec_client_->connected());\n  codec_client_->connection()->close(Network::ConnectionCloseType::NoFlush);\n\n  sendSdsResponse(getServerSecret());\n\n  // Wait for ssl_context_updated_by_sds counter.\n  test_server_->waitForCounterGe(\n      listenerStatPrefix(\"server_ssl_socket_factory.ssl_context_update_by_sds\"), 1);\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection();\n  };\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n}\n\nclass SdsDynamicDownstreamCertValidationContextTest : public SdsDynamicDownstreamIntegrationTest {\npublic:\n  SdsDynamicDownstreamCertValidationContextTest() = default;\n\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* transport_socket = bootstrap.mutable_static_resources()\n                                   ->mutable_listeners(0)\n                                   ->mutable_filter_chains(0)\n                                   ->mutable_transport_socket();\n      envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n      auto* common_tls_context = tls_context.mutable_common_tls_context();\n      common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11);\n\n      auto* tls_certificate = common_tls_context->add_tls_certificates();\n      tls_certificate->mutable_certificate_chain()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/servercert.pem\"));\n      tls_certificate->mutable_private_key()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/serverkey.pem\"));\n      setUpSdsValidationContext(common_tls_context);\n      transport_socket->set_name(\"envoy.transport_sockets.tls\");\n      transport_socket->mutable_typed_config()->PackFrom(tls_context);\n\n      // Add a static sds cluster\n      auto* sds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      sds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      sds_cluster->set_name(\"sds_cluster\");\n      sds_cluster->mutable_http2_protocol_options();\n\n      envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext upstream_tls_context;\n      if (share_validation_secret_) {\n        // Configure static cluster with SDS config referencing \"validation_secret\",\n        // which is going to be processed before LDS resources.\n        ASSERT(use_lds_);\n        setUpSdsValidationContext(upstream_tls_context.mutable_common_tls_context());\n      }\n      // Enable SSL/TLS with a client certificate in the first cluster.\n      auto* upstream_tls_certificate =\n          upstream_tls_context.mutable_common_tls_context()->add_tls_certificates();\n      upstream_tls_certificate->mutable_certificate_chain()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"));\n      upstream_tls_certificate->mutable_private_key()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n      auto* upstream_transport_socket =\n          bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket();\n      upstream_transport_socket->set_name(\"envoy.transport_sockets.tls\");\n      upstream_transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);\n    });\n\n    HttpIntegrationTest::initialize();\n    registerTestServerPorts({\"http\"});\n    client_ssl_ctx_ = createClientSslTransportSocketFactory({}, context_manager_, *api_);\n  }\n\n  void setUpSdsValidationContext(\n      envoy::extensions::transport_sockets::tls::v3::CommonTlsContext* common_tls_context) {\n    if (use_combined_validation_context_) {\n      // Modify the listener context validation type to use combined certificate validation\n      // context.\n      auto* combined_config = common_tls_context->mutable_combined_validation_context();\n      auto* default_validation_context = combined_config->mutable_default_validation_context();\n      default_validation_context->add_verify_certificate_hash(TEST_CLIENT_CERT_HASH);\n      auto* secret_config = combined_config->mutable_validation_context_sds_secret_config();\n      setUpSdsConfig(secret_config, validation_secret_);\n    } else {\n      // Modify the listener context validation type to use dynamic certificate validation\n      // context.\n      auto* secret_config = common_tls_context->mutable_validation_context_sds_secret_config();\n      setUpSdsConfig(secret_config, validation_secret_);\n    }\n  }\n\n  void createUpstreams() override {\n    // Fake upstream with SSL/TLS for the first cluster.\n    addFakeUpstream(createUpstreamSslContext(), FakeHttpConnection::Type::HTTP1);\n    create_xds_upstream_ = true;\n  }\n\n  Network::TransportSocketFactoryPtr createUpstreamSslContext() {\n    envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n    auto* common_tls_context = tls_context.mutable_common_tls_context();\n    auto* tls_certificate = common_tls_context->add_tls_certificates();\n    tls_certificate->mutable_certificate_chain()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"));\n    tls_certificate->mutable_private_key()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n\n    auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n        tls_context, factory_context_);\n    static Stats::Scope* upstream_stats_store = new Stats::TestIsolatedStoreImpl();\n    return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n        std::move(cfg), context_manager_, *upstream_stats_store, std::vector<std::string>{});\n  }\n\n  void TearDown() override {\n    cleanUpXdsConnection();\n\n    client_ssl_ctx_.reset();\n    cleanupUpstreamAndDownstream();\n    codec_client_.reset();\n  }\n\n  void enableCombinedValidationContext(bool enable) { use_combined_validation_context_ = enable; }\n  void shareValidationSecret(bool share) { share_validation_secret_ = share; }\n\nprivate:\n  bool use_combined_validation_context_{false};\n  bool share_validation_secret_{false};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, SdsDynamicDownstreamCertValidationContextTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// A test that SDS server send a good certificate validation context for a static listener.\n// The first ssl request should be OK.\nTEST_P(SdsDynamicDownstreamCertValidationContextTest, BasicSuccess) {\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getCvcSecret());\n  };\n  initialize();\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection();\n  };\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n}\n\n// A test that SDS server sends a certificate validation context for a static listener.\n// Listener combines default certificate validation context and the dynamic one.\n// The first ssl request should be OK.\nTEST_P(SdsDynamicDownstreamCertValidationContextTest, CombinedCertValidationContextSuccess) {\n  enableCombinedValidationContext(true);\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getCvcSecretWithOnlyTrustedCa());\n  };\n  initialize();\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection();\n  };\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n}\n\n// A test that verifies that both: static cluster and LDS listener are updated when using\n// the same verification secret (standalone validation context) from the SDS server.\nTEST_P(SdsDynamicDownstreamCertValidationContextTest, BasicWithSharedSecret) {\n  shareValidationSecret(true);\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getCvcSecret());\n  };\n  initialize();\n\n  // Wait for \"ssl_context_updated_by_sds\" counters to indicate that both resources\n  // depending on the verification_secret were updated.\n  test_server_->waitForCounterGe(\n      \"cluster.cluster_0.client_ssl_socket_factory.ssl_context_update_by_sds\", 1);\n  test_server_->waitForCounterGe(\n      listenerStatPrefix(\"server_ssl_socket_factory.ssl_context_update_by_sds\"), 1);\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection();\n  };\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n}\n\n// A test that verifies that both: static cluster and LDS listener are updated when using\n// the same verification secret (combined validation context) from the SDS server.\nTEST_P(SdsDynamicDownstreamCertValidationContextTest, CombinedValidationContextWithSharedSecret) {\n  enableCombinedValidationContext(true);\n  shareValidationSecret(true);\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getCvcSecretWithOnlyTrustedCa());\n  };\n  initialize();\n\n  // Wait for \"ssl_context_updated_by_sds\" counters to indicate that both resources\n  // depending on the verification_secret were updated.\n  test_server_->waitForCounterGe(\n      \"cluster.cluster_0.client_ssl_socket_factory.ssl_context_update_by_sds\", 1);\n  test_server_->waitForCounterGe(\n      listenerStatPrefix(\"server_ssl_socket_factory.ssl_context_update_by_sds\"), 1);\n\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection();\n  };\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n}\n\n// Upstream SDS integration test: a static cluster has ssl cert from SDS.\nclass SdsDynamicUpstreamIntegrationTest : public SdsDynamicIntegrationBaseTest {\npublic:\n  void initialize() override {\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      // add sds cluster first.\n      auto* sds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      sds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      sds_cluster->set_name(\"sds_cluster\");\n      sds_cluster->mutable_http2_protocol_options();\n\n      // change the first cluster with ssl and sds.\n      auto* transport_socket =\n          bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket();\n      envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n      auto* secret_config =\n          tls_context.mutable_common_tls_context()->add_tls_certificate_sds_secret_configs();\n      setUpSdsConfig(secret_config, \"client_cert\");\n\n      transport_socket->set_name(\"envoy.transport_sockets.tls\");\n      transport_socket->mutable_typed_config()->PackFrom(tls_context);\n    });\n\n    HttpIntegrationTest::initialize();\n    registerTestServerPorts({\"http\"});\n  }\n\n  void TearDown() override {\n    cleanUpXdsConnection();\n\n    cleanupUpstreamAndDownstream();\n    codec_client_.reset();\n\n    test_server_.reset();\n    fake_upstreams_.clear();\n  }\n\n  void createUpstreams() override {\n    // This is for backend with ssl\n    addFakeUpstream(createUpstreamSslContext(context_manager_, *api_),\n                    FakeHttpConnection::Type::HTTP1);\n    create_xds_upstream_ = true;\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SdsDynamicUpstreamIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// To test a static cluster with sds. SDS send a good client secret first.\n// The first request should work.\nTEST_P(SdsDynamicUpstreamIntegrationTest, BasicSuccess) {\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getClientSecret());\n  };\n\n  initialize();\n\n  // There is a race condition here; there are two static clusters:\n  // backend cluster_0 with sds and sds_cluster. cluster_0 is created first, its init_manager\n  // is called so it issues a sds call, but fail since sds_cluster is not added yet.\n  // so cluster_0 is initialized with an empty secret. initialize() will not wait and will return.\n  // the testing request will be called, even though in the pre_worker_function, a good sds is\n  // send, the cluster will be updated with good secret, the testing request may fail if it is\n  // before context is updated. Hence, need to wait for context_update counter.\n  test_server_->waitForCounterGe(\n      \"cluster.cluster_0.client_ssl_socket_factory.ssl_context_update_by_sds\", 1);\n\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\n// To test a static cluster with sds. SDS send a bad client secret first.\n// The first request should fail with 503,  then SDS sends a good client secret,\n// the second request should work.\nTEST_P(SdsDynamicUpstreamIntegrationTest, WrongSecretFirst) {\n  on_server_init_function_ = [this]() {\n    createSdsStream(*(fake_upstreams_[1]));\n    sendSdsResponse(getWrongSecret(client_cert_));\n  };\n  initialize();\n\n  // Make a simple request, should get 503\n  BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n      lookupPort(\"http\"), \"GET\", \"/test/long/url\", \"\", downstream_protocol_, version_);\n  ASSERT_TRUE(response->complete());\n  EXPECT_EQ(\"503\", response->headers().getStatusValue());\n\n  // To flush out the reset connection from the first request in upstream.\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  sendSdsResponse(getClientSecret());\n  test_server_->waitForCounterGe(\n      \"cluster.cluster_0.client_ssl_socket_factory.ssl_context_update_by_sds\", 1);\n\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/sds_generic_secret_integration_test.cc",
    "content": "#include <string>\n\n#include \"envoy/api/v2/discovery.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/secret/secret_provider.h\"\n\n#include \"common/config/datasource.h\"\n#include \"common/grpc/common.h\"\n\n#include \"test/extensions/filters/http/common/empty_http_filter_config.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\n\n// The filter fetches a generic secret from secret manager and attaches it to a header for\n// validation.\nclass SdsGenericSecretTestFilter : public Http::StreamDecoderFilter {\npublic:\n  SdsGenericSecretTestFilter(Api::Api& api,\n                             Secret::GenericSecretConfigProviderSharedPtr config_provider)\n      : api_(api), config_provider_(config_provider) {}\n\n  // Http::StreamFilterBase\n  void onDestroy() override{};\n\n  // Http::StreamDecoderFilter\n  Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override {\n    headers.addCopy(Http::LowerCaseString(\"secret\"),\n                    Config::DataSource::read(config_provider_->secret()->secret(), true, api_));\n    return Http::FilterHeadersStatus::Continue;\n  }\n\n  Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override {\n    return Http::FilterDataStatus::Continue;\n  }\n\n  Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override {\n    return Http::FilterTrailersStatus::Continue;\n  }\n\n  void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override {\n    decoder_callbacks_ = &callbacks;\n  }\n\nprivate:\n  Api::Api& api_;\n  Secret::GenericSecretConfigProviderSharedPtr config_provider_;\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_;\n};\n\nclass SdsGenericSecretTestFilterConfig\n    : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig {\npublic:\n  SdsGenericSecretTestFilterConfig()\n      : Extensions::HttpFilters::Common::EmptyHttpFilterConfig(\"sds-generic-secret-test\") {\n    auto* api_config_source = config_source_.mutable_api_config_source();\n    api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC);\n    auto* grpc_service = api_config_source->add_grpc_services();\n    grpc_service->mutable_envoy_grpc()->set_cluster_name(\"sds_cluster\");\n  }\n\n  Http::FilterFactoryCb\n  createFilter(const std::string&,\n               Server::Configuration::FactoryContext& factory_context) override {\n    auto secret_provider =\n        factory_context.clusterManager()\n            .clusterManagerFactory()\n            .secretManager()\n            .findOrCreateGenericSecretProvider(config_source_, \"encryption_key\",\n                                               factory_context.getTransportSocketFactoryContext());\n    return\n        [&factory_context, secret_provider](Http::FilterChainFactoryCallbacks& callbacks) -> void {\n          callbacks.addStreamDecoderFilter(std::make_shared<::Envoy::SdsGenericSecretTestFilter>(\n              factory_context.api(), secret_provider));\n        };\n  }\n\nprivate:\n  envoy::config::core::v3::ConfigSource config_source_;\n};\n\nclass SdsGenericSecretIntegrationTest : public Grpc::GrpcClientIntegrationParamTest,\n                                        public HttpIntegrationTest {\npublic:\n  SdsGenericSecretIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()), registration_(factory_) {}\n\n  void initialize() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* sds_cluster = bootstrap.mutable_static_resources()->add_clusters();\n      sds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n      sds_cluster->set_name(\"sds_cluster\");\n      sds_cluster->mutable_http2_protocol_options();\n    });\n\n    config_helper_.addFilter(\"{ name: sds-generic-secret-test }\");\n\n    create_xds_upstream_ = true;\n    HttpIntegrationTest::initialize();\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  void createSdsStream() {\n    createXdsConnection();\n    AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n  }\n\n  void sendSecret() {\n    envoy::extensions::transport_sockets::tls::v3::Secret secret;\n    secret.set_name(\"encryption_key\");\n    auto* generic_secret = secret.mutable_generic_secret();\n    generic_secret->mutable_secret()->set_inline_string(\"DUMMY_AES_128_KEY\");\n    API_NO_BOOST(envoy::api::v2::DiscoveryResponse) discovery_response;\n    discovery_response.set_version_info(\"0\");\n    discovery_response.set_type_url(Config::TypeUrl::get().Secret);\n    discovery_response.add_resources()->PackFrom(API_DOWNGRADE(secret));\n    xds_stream_->sendGrpcMessage(discovery_response);\n  }\n\n  SdsGenericSecretTestFilterConfig factory_;\n  Registry::InjectFactory<Server::Configuration::NamedHttpFilterConfigFactory> registration_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SdsGenericSecretIntegrationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// A test that an SDS generic secret can be successfully fetched by a filter.\nTEST_P(SdsGenericSecretIntegrationTest, FilterFetchSuccess) {\n  on_server_init_function_ = [this]() {\n    createSdsStream();\n    sendSecret();\n  };\n  initialize();\n\n  codec_client_ = makeHttpConnection((lookupPort(\"http\")));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"GET\"}, {\":path\", \"/\"}, {\":scheme\", \"http\"}, {\":authority\", \"host\"}};\n  sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0);\n\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_EQ(0U, upstream_request_->bodyLength());\n  EXPECT_EQ(\n      \"DUMMY_AES_128_KEY\",\n      upstream_request_->headers().get(Http::LowerCaseString(\"secret\"))->value().getStringView());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/sds_static_integration_test.cc",
    "content": "#include <memory>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/connection_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/config/integration/certs/clientcert_hash.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/server.h\"\n#include \"test/integration/ssl_utility.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/test_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"integration.h\"\n#include \"utility.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass SdsStaticDownstreamIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public HttpIntegrationTest {\npublic:\n  SdsStaticDownstreamIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void initialize() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* transport_socket = bootstrap.mutable_static_resources()\n                                   ->mutable_listeners(0)\n                                   ->mutable_filter_chains(0)\n                                   ->mutable_transport_socket();\n      envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n      auto* common_tls_context = tls_context.mutable_common_tls_context();\n      common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11);\n\n      common_tls_context->mutable_validation_context_sds_secret_config()->set_name(\n          \"validation_context\");\n      common_tls_context->add_tls_certificate_sds_secret_configs()->set_name(\"server_cert\");\n      transport_socket->set_name(\"envoy.transport_sockets.tls\");\n      transport_socket->mutable_typed_config()->PackFrom(tls_context);\n\n      auto* secret = bootstrap.mutable_static_resources()->add_secrets();\n      secret->set_name(\"validation_context\");\n      auto* validation_context = secret->mutable_validation_context();\n      validation_context->mutable_trusted_ca()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/cacert.pem\"));\n      validation_context->add_verify_certificate_hash(TEST_CLIENT_CERT_HASH);\n\n      secret = bootstrap.mutable_static_resources()->add_secrets();\n      secret->set_name(\"server_cert\");\n      auto* tls_certificate = secret->mutable_tls_certificate();\n      tls_certificate->mutable_certificate_chain()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/servercert.pem\"));\n      tls_certificate->mutable_private_key()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/serverkey.pem\"));\n    });\n\n    HttpIntegrationTest::initialize();\n\n    registerTestServerPorts({\"http\"});\n\n    client_ssl_ctx_ = createClientSslTransportSocketFactory({}, context_manager_, *api_);\n  }\n\n  void TearDown() override {\n    client_ssl_ctx_.reset();\n    cleanupUpstreamAndDownstream();\n    codec_client_.reset();\n  }\n\n  Network::ClientConnectionPtr makeSslClientConnection() {\n    Network::Address::InstanceConstSharedPtr address = getSslAddress(version_, lookupPort(\"http\"));\n    return dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                               client_ssl_ctx_->createTransportSocket(nullptr),\n                                               nullptr);\n  }\n\nprivate:\n  Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{timeSystem()};\n\n  Network::TransportSocketFactoryPtr client_ssl_ctx_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SdsStaticDownstreamIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(SdsStaticDownstreamIntegrationTest, RouterRequestAndResponseWithGiantBodyBuffer) {\n  ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {\n    return makeSslClientConnection();\n  };\n  testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, false, &creator);\n}\n\nclass SdsStaticUpstreamIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                         public HttpIntegrationTest {\npublic:\n  SdsStaticUpstreamIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void initialize() override {\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n      tls_context.mutable_common_tls_context()->add_tls_certificate_sds_secret_configs()->set_name(\n          \"client_cert\");\n      auto* transport_socket =\n          bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket();\n      transport_socket->set_name(\"envoy.transport_sockets.tls\");\n      transport_socket->mutable_typed_config()->PackFrom(tls_context);\n\n      auto* secret = bootstrap.mutable_static_resources()->add_secrets();\n      secret->set_name(\"client_cert\");\n      auto* tls_certificate = secret->mutable_tls_certificate();\n      tls_certificate->mutable_certificate_chain()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"));\n      tls_certificate->mutable_private_key()->set_filename(\n          TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n    });\n\n    HttpIntegrationTest::initialize();\n\n    registerTestServerPorts({\"http\"});\n  }\n\n  void TearDown() override {\n    cleanupUpstreamAndDownstream();\n    codec_client_.reset();\n\n    test_server_.reset();\n    fake_upstreams_.clear();\n  }\n\n  void createUpstreams() override {\n    addFakeUpstream(createUpstreamSslContext(context_manager_, *api_),\n                    FakeHttpConnection::Type::HTTP1);\n  }\n\nprivate:\n  Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{timeSystem()};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SdsStaticUpstreamIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(SdsStaticUpstreamIntegrationTest, RouterRequestAndResponseWithGiantBodyBuffer) {\n  testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, false, nullptr);\n}\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/server.cc",
    "content": "#include \"test/integration/server.h\"\n\n#include <memory>\n#include <string>\n\n#include \"envoy/http/header_map.h\"\n\n#include \"common/common/random_generator.h\"\n#include \"common/common/thread.h\"\n#include \"common/local_info/local_info_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/thread_local_store.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"server/hot_restart_nop_impl.h\"\n#include \"server/options_impl.h\"\n#include \"server/process_context_impl.h\"\n\n#include \"test/common/runtime/utility.h\"\n#include \"test/integration/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nOptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml,\n                                  Network::Address::IpVersion ip_version,\n                                  FieldValidationConfig validation_config, uint32_t concurrency,\n                                  std::chrono::seconds drain_time,\n                                  Server::DrainStrategy drain_strategy) {\n  OptionsImpl test_options(\"cluster_name\", \"node_name\", \"zone_name\", spdlog::level::info);\n\n  test_options.setConfigPath(config_path);\n  test_options.setConfigYaml(config_yaml);\n  test_options.setLocalAddressIpVersion(ip_version);\n  test_options.setFileFlushIntervalMsec(std::chrono::milliseconds(50));\n  test_options.setDrainTime(drain_time);\n  test_options.setParentShutdownTime(std::chrono::seconds(2));\n  test_options.setDrainStrategy(drain_strategy);\n  test_options.setAllowUnkownFields(validation_config.allow_unknown_static_fields);\n  test_options.setRejectUnknownFieldsDynamic(validation_config.reject_unknown_dynamic_fields);\n  test_options.setIgnoreUnknownFieldsDynamic(validation_config.ignore_unknown_dynamic_fields);\n  test_options.setConcurrency(concurrency);\n  test_options.setHotRestartDisabled(true);\n\n  return test_options;\n}\n\n} // namespace Server\n\nIntegrationTestServerPtr IntegrationTestServer::create(\n    const std::string& config_path, const Network::Address::IpVersion version,\n    std::function<void(IntegrationTestServer&)> server_ready_function,\n    std::function<void()> on_server_init_function, bool deterministic,\n    Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization,\n    ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config,\n    uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy,\n    bool use_real_stats) {\n  IntegrationTestServerPtr server{\n      std::make_unique<IntegrationTestServerImpl>(time_system, api, config_path, use_real_stats)};\n  if (server_ready_function != nullptr) {\n    server->setOnServerReadyCb(server_ready_function);\n  }\n  server->start(version, on_server_init_function, deterministic, defer_listener_finalization,\n                process_object, validation_config, concurrency, drain_time, drain_strategy);\n  return server;\n}\n\nvoid IntegrationTestServer::waitUntilListenersReady() {\n  Thread::LockGuard guard(listeners_mutex_);\n  while (pending_listeners_ != 0) {\n    // If your test is hanging forever here, you may need to create your listener manually,\n    // after BaseIntegrationTest::initialize() is done. See cds_integration_test.cc for an example.\n    listeners_cv_.wait(listeners_mutex_); // Safe since CondVar::wait won't throw.\n  }\n  ENVOY_LOG(info, \"listener wait complete\");\n}\n\nvoid IntegrationTestServer::start(const Network::Address::IpVersion version,\n                                  std::function<void()> on_server_init_function, bool deterministic,\n                                  bool defer_listener_finalization,\n                                  ProcessObjectOptRef process_object,\n                                  Server::FieldValidationConfig validator_config,\n                                  uint32_t concurrency, std::chrono::seconds drain_time,\n                                  Server::DrainStrategy drain_strategy) {\n  ENVOY_LOG(info, \"starting integration test server\");\n  ASSERT(!thread_);\n  thread_ =\n      api_.threadFactory().createThread([version, deterministic, process_object, validator_config,\n                                         concurrency, drain_time, drain_strategy, this]() -> void {\n        threadRoutine(version, deterministic, process_object, validator_config, concurrency,\n                      drain_time, drain_strategy);\n      });\n\n  // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init.\n  // Note that there is no synchronization guaranteeing this happens either\n  // before workers starting or after server start. Any needed synchronization must occur in the\n  // routines. These steps are executed at this point in the code to allow server initialization to\n  // be dependent on them (e.g. control plane peers).\n  if (on_server_init_function != nullptr) {\n    on_server_init_function();\n  }\n\n  // Wait for the server to be created and the number of initial listeners to wait for to be set.\n  server_set_.waitReady();\n\n  if (!defer_listener_finalization) {\n    // Now wait for the initial listeners (if any) to actually be listening on the worker.\n    // At this point the server is up and ready for testing.\n    waitUntilListenersReady();\n  }\n\n  // If we are tapping, spin up tcpdump.\n  const auto tap_path = TestEnvironment::getOptionalEnvVar(\"TAP_PATH\");\n  if (tap_path) {\n    std::vector<uint32_t> ports;\n    for (auto listener : server().listenerManager().listeners()) {\n      const auto listen_addr = listener.get().listenSocketFactory().localAddress();\n      if (listen_addr->type() == Network::Address::Type::Ip) {\n        ports.push_back(listen_addr->ip()->port());\n      }\n    }\n    // TODO(htuch): Support a different loopback interface as needed.\n    const ::testing::TestInfo* const test_info =\n        ::testing::UnitTest::GetInstance()->current_test_info();\n    const std::string test_id =\n        std::string(test_info->name()) + \"_\" + std::string(test_info->test_case_name());\n    const std::string pcap_path =\n        tap_path.value() + \"_\" + absl::StrReplaceAll(test_id, {{\"/\", \"_\"}}) + \"_server.pcap\";\n    tcp_dump_ = std::make_unique<TcpDump>(pcap_path, \"lo\", ports);\n  }\n}\n\nIntegrationTestServer::~IntegrationTestServer() {\n  // Derived class must have shutdown server.\n  thread_->join();\n}\n\nvoid IntegrationTestServer::onWorkerListenerAdded() {\n  if (on_worker_listener_added_cb_) {\n    on_worker_listener_added_cb_();\n  }\n\n  Thread::LockGuard guard(listeners_mutex_);\n  if (pending_listeners_ > 0) {\n    pending_listeners_--;\n    listeners_cv_.notifyOne();\n  }\n}\n\nvoid IntegrationTestServer::onWorkerListenerRemoved() {\n  if (on_worker_listener_removed_cb_) {\n    on_worker_listener_removed_cb_();\n  }\n}\n\nvoid IntegrationTestServer::serverReady() {\n  pending_listeners_ = server().listenerManager().listeners().size();\n  if (on_server_ready_cb_ != nullptr) {\n    on_server_ready_cb_(*this);\n  }\n  server_set_.setReady();\n}\n\nvoid IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version,\n                                          bool deterministic, ProcessObjectOptRef process_object,\n                                          Server::FieldValidationConfig validation_config,\n                                          uint32_t concurrency, std::chrono::seconds drain_time,\n                                          Server::DrainStrategy drain_strategy) {\n  OptionsImpl options(Server::createTestOptionsImpl(config_path_, \"\", version, validation_config,\n                                                    concurrency, drain_time, drain_strategy));\n  Thread::MutexBasicLockable lock;\n\n  Random::RandomGeneratorPtr random_generator;\n  if (deterministic) {\n    random_generator = std::make_unique<testing::NiceMock<Random::MockRandomGenerator>>();\n  } else {\n    random_generator = std::make_unique<Random::RandomGeneratorImpl>();\n  }\n  createAndRunEnvoyServer(options, time_system_, Network::Utility::getLocalAddress(version), *this,\n                          lock, *this, std::move(random_generator), process_object);\n}\n\nIntegrationTestServerImpl::IntegrationTestServerImpl(Event::TestTimeSystem& time_system,\n                                                     Api::Api& api, const std::string& config_path,\n                                                     bool use_real_stats)\n    : IntegrationTestServer(time_system, api, config_path) {\n  stats_allocator_ =\n      (use_real_stats ? std::make_unique<Stats::AllocatorImpl>(symbol_table_)\n                      : std::make_unique<Stats::NotifyingAllocatorImpl>(symbol_table_));\n}\n\nvoid IntegrationTestServerImpl::createAndRunEnvoyServer(\n    OptionsImpl& options, Event::TimeSystem& time_system,\n    Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks,\n    Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory,\n    Random::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) {\n  {\n    Init::ManagerImpl init_manager{\"Server\"};\n    Server::HotRestartNopImpl restarter;\n    ThreadLocal::InstanceImpl tls;\n    Stats::ThreadLocalStoreImpl stat_store(*stats_allocator_);\n    std::unique_ptr<ProcessContext> process_context;\n    if (process_object.has_value()) {\n      process_context = std::make_unique<ProcessContextImpl>(process_object->get());\n    }\n    Server::InstanceImpl server(init_manager, options, time_system, local_address, hooks, restarter,\n                                stat_store, access_log_lock, component_factory,\n                                std::move(random_generator), tls, Thread::threadFactoryForTest(),\n                                Filesystem::fileSystemForTest(), std::move(process_context));\n    // This is technically thread unsafe (assigning to a shared_ptr accessed\n    // across threads), but because we synchronize below through serverReady(), the only\n    // consumer on the main test thread in ~IntegrationTestServerImpl will not race.\n    admin_address_ = server.admin().socket().localAddress();\n    server_ = &server;\n    stat_store_ = &stat_store;\n    serverReady();\n    server.run();\n  }\n  server_gone_.Notify();\n}\n\nIntegrationTestServerImpl::~IntegrationTestServerImpl() {\n  ENVOY_LOG(info, \"stopping integration test server\");\n\n  if (useAdminInterfaceToQuit()) {\n    Network::Address::InstanceConstSharedPtr admin_address(admin_address_);\n    if (admin_address != nullptr) {\n      BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(\n          admin_address, \"POST\", \"/quitquitquit\", \"\", Http::CodecClient::Type::HTTP1);\n      EXPECT_TRUE(response->complete());\n      EXPECT_EQ(\"200\", response->headers().getStatusValue());\n      server_gone_.WaitForNotification();\n    }\n  } else {\n    if (server_) {\n      server_->dispatcher().post([this]() { server_->shutdown(); });\n      server_gone_.WaitForNotification();\n    }\n  }\n\n  server_ = nullptr;\n  admin_address_ = nullptr;\n  stat_store_ = nullptr;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/server.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/server/options.h\"\n#include \"envoy/server/process_context.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/stats/allocator_impl.h\"\n\n#include \"server/drain_manager_impl.h\"\n#include \"server/listener_hooks.h\"\n#include \"server/options_impl.h\"\n#include \"server/server.h\"\n\n#include \"test/integration/server_stats.h\"\n#include \"test/integration/tcp_dump.h\"\n#include \"test/test_common/test_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nstruct FieldValidationConfig {\n  bool allow_unknown_static_fields = false;\n  bool reject_unknown_dynamic_fields = false;\n  bool ignore_unknown_dynamic_fields = false;\n};\n\n// Create OptionsImpl structures suitable for tests. Disables hot restart.\nOptionsImpl\ncreateTestOptionsImpl(const std::string& config_path, const std::string& config_yaml,\n                      Network::Address::IpVersion ip_version,\n                      FieldValidationConfig validation_config = FieldValidationConfig(),\n                      uint32_t concurrency = 1,\n                      std::chrono::seconds drain_time = std::chrono::seconds(1),\n                      Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual);\n\nclass TestComponentFactory : public ComponentFactory {\npublic:\n  Server::DrainManagerPtr createDrainManager(Server::Instance& server) override {\n    return Server::DrainManagerPtr{\n        new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY)};\n  }\n  Runtime::LoaderPtr createRuntime(Server::Instance& server,\n                                   Server::Configuration::Initial& config) override {\n    return Server::InstanceUtil::createRuntime(server, config);\n  }\n};\n\n} // namespace Server\n\nnamespace Stats {\n\n/**\n * This is a wrapper for Scopes for the TestIsolatedStoreImpl to ensure new scopes do\n * not interact with the store without grabbing the lock from TestIsolatedStoreImpl.\n */\nclass TestScopeWrapper : public Scope {\npublic:\n  TestScopeWrapper(Thread::MutexBasicLockable& lock, ScopePtr wrapped_scope)\n      : lock_(lock), wrapped_scope_(std::move(wrapped_scope)) {}\n\n  ScopePtr createScope(const std::string& name) override {\n    Thread::LockGuard lock(lock_);\n    return ScopePtr{new TestScopeWrapper(lock_, wrapped_scope_->createScope(name))};\n  }\n\n  void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override {\n    Thread::LockGuard lock(lock_);\n    wrapped_scope_->deliverHistogramToSinks(histogram, value);\n  }\n\n  Counter& counterFromStatNameWithTags(const StatName& name,\n                                       StatNameTagVectorOptConstRef tags) override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->counterFromStatNameWithTags(name, tags);\n  }\n\n  Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                   Gauge::ImportMode import_mode) override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->gaugeFromStatNameWithTags(name, tags, import_mode);\n  }\n\n  Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                           Histogram::Unit unit) override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->histogramFromStatNameWithTags(name, tags, unit);\n  }\n\n  TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                               StatNameTagVectorOptConstRef tags) override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->textReadoutFromStatNameWithTags(name, tags);\n  }\n\n  NullGaugeImpl& nullGauge(const std::string& str) override {\n    return wrapped_scope_->nullGauge(str);\n  }\n\n  Counter& counterFromString(const std::string& name) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return counterFromStatName(storage.statName());\n  }\n  Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return gaugeFromStatName(storage.statName(), import_mode);\n  }\n  Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return histogramFromStatName(storage.statName(), unit);\n  }\n  TextReadout& textReadoutFromString(const std::string& name) override {\n    StatNameManagedStorage storage(name, symbolTable());\n    return textReadoutFromStatName(storage.statName());\n  }\n\n  CounterOptConstRef findCounter(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->findCounter(name);\n  }\n  GaugeOptConstRef findGauge(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->findGauge(name);\n  }\n  HistogramOptConstRef findHistogram(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->findHistogram(name);\n  }\n  TextReadoutOptConstRef findTextReadout(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return wrapped_scope_->findTextReadout(name);\n  }\n\n  const SymbolTable& constSymbolTable() const override {\n    return wrapped_scope_->constSymbolTable();\n  }\n  SymbolTable& symbolTable() override { return wrapped_scope_->symbolTable(); }\n\n  bool iterate(const IterateFn<Counter>& fn) const override { return wrapped_scope_->iterate(fn); }\n  bool iterate(const IterateFn<Gauge>& fn) const override { return wrapped_scope_->iterate(fn); }\n  bool iterate(const IterateFn<Histogram>& fn) const override {\n    return wrapped_scope_->iterate(fn);\n  }\n  bool iterate(const IterateFn<TextReadout>& fn) const override {\n    return wrapped_scope_->iterate(fn);\n  }\n\nprivate:\n  Thread::MutexBasicLockable& lock_;\n  ScopePtr wrapped_scope_;\n};\n\n// A counter which signals on a condition variable when it is incremented.\nclass NotifyingCounter : public Stats::Counter {\npublic:\n  NotifyingCounter(Stats::Counter* counter, absl::Mutex& mutex, absl::CondVar& condvar)\n      : counter_(counter), mutex_(mutex), condvar_(condvar) {}\n\n  std::string name() const override { return counter_->name(); }\n  StatName statName() const override { return counter_->statName(); }\n  TagVector tags() const override { return counter_->tags(); }\n  std::string tagExtractedName() const override { return counter_->tagExtractedName(); }\n  void iterateTagStatNames(const TagStatNameIterFn& fn) const override {\n    counter_->iterateTagStatNames(fn);\n  }\n  void add(uint64_t amount) override {\n    counter_->add(amount);\n    absl::MutexLock l(&mutex_);\n    condvar_.Signal();\n  }\n  void inc() override { add(1); }\n  uint64_t latch() override { return counter_->latch(); }\n  void reset() override { return counter_->reset(); }\n  uint64_t value() const override { return counter_->value(); }\n  void incRefCount() override { counter_->incRefCount(); }\n  bool decRefCount() override { return counter_->decRefCount(); }\n  uint32_t use_count() const override { return counter_->use_count(); }\n  StatName tagExtractedStatName() const override { return counter_->tagExtractedStatName(); }\n  bool used() const override { return counter_->used(); }\n  SymbolTable& symbolTable() override { return counter_->symbolTable(); }\n  const SymbolTable& constSymbolTable() const override { return counter_->constSymbolTable(); }\n\nprivate:\n  std::unique_ptr<Stats::Counter> counter_;\n  absl::Mutex& mutex_;\n  absl::CondVar& condvar_;\n};\n\n// A stats allocator which creates NotifyingCounters rather than regular CounterImpls.\nclass NotifyingAllocatorImpl : public Stats::AllocatorImpl {\npublic:\n  using Stats::AllocatorImpl::AllocatorImpl;\n\n  void waitForCounterFromStringEq(const std::string& name, uint64_t value) {\n    absl::MutexLock l(&mutex_);\n    ENVOY_LOG_MISC(trace, \"waiting for {} to be {}\", name, value);\n    while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() != value) {\n      condvar_.Wait(&mutex_);\n    }\n    ENVOY_LOG_MISC(trace, \"done waiting for {} to be {}\", name, value);\n  }\n\n  void waitForCounterFromStringGe(const std::string& name, uint64_t value) {\n    absl::MutexLock l(&mutex_);\n    ENVOY_LOG_MISC(trace, \"waiting for {} to be {}\", name, value);\n    while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() < value) {\n      condvar_.Wait(&mutex_);\n    }\n    ENVOY_LOG_MISC(trace, \"done waiting for {} to be {}\", name, value);\n  }\n\n  void waitForCounterExists(const std::string& name) {\n    absl::MutexLock l(&mutex_);\n    ENVOY_LOG_MISC(trace, \"waiting for {} to exist\", name);\n    while (getCounterLockHeld(name) == nullptr) {\n      condvar_.Wait(&mutex_);\n    }\n    ENVOY_LOG_MISC(trace, \"done waiting for {} to exist\", name);\n  }\n\nprotected:\n  Stats::Counter* makeCounterInternal(StatName name, StatName tag_extracted_name,\n                                      const StatNameTagVector& stat_name_tags) override {\n    Stats::Counter* counter = new NotifyingCounter(\n        Stats::AllocatorImpl::makeCounterInternal(name, tag_extracted_name, stat_name_tags), mutex_,\n        condvar_);\n    {\n      absl::MutexLock l(&mutex_);\n      // Allow getting the counter directly from the allocator, since it's harder to\n      // signal when the counter has been added to a given stats store.\n      counters_.emplace(counter->name(), counter);\n      if (counter->name() == \"cluster_manager.cluster_removed\") {\n      }\n      condvar_.Signal();\n    }\n    return counter;\n  }\n\n  virtual Stats::Counter* getCounterLockHeld(const std::string& name)\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {\n    auto it = counters_.find(name);\n    if (it != counters_.end()) {\n      return it->second;\n    }\n    return nullptr;\n  }\n\nprivate:\n  absl::flat_hash_map<std::string, Stats::Counter*> counters_;\n  absl::Mutex mutex_;\n  absl::CondVar condvar_;\n};\n\n/**\n * This is a variant of the isolated store that has locking across all operations so that it can\n * be used during the integration tests.\n */\nclass TestIsolatedStoreImpl : public StoreRoot {\npublic:\n  // Stats::Scope\n  Counter& counterFromStatNameWithTags(const StatName& name,\n                                       StatNameTagVectorOptConstRef tags) override {\n    Thread::LockGuard lock(lock_);\n    return store_.counterFromStatNameWithTags(name, tags);\n  }\n  Counter& counterFromString(const std::string& name) override {\n    Thread::LockGuard lock(lock_);\n    return store_.counterFromString(name);\n  }\n  ScopePtr createScope(const std::string& name) override {\n    Thread::LockGuard lock(lock_);\n    return ScopePtr{new TestScopeWrapper(lock_, store_.createScope(name))};\n  }\n  void deliverHistogramToSinks(const Histogram&, uint64_t) override {}\n  Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                   Gauge::ImportMode import_mode) override {\n    Thread::LockGuard lock(lock_);\n    return store_.gaugeFromStatNameWithTags(name, tags, import_mode);\n  }\n  Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {\n    Thread::LockGuard lock(lock_);\n    return store_.gaugeFromString(name, import_mode);\n  }\n  Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,\n                                           Histogram::Unit unit) override {\n    Thread::LockGuard lock(lock_);\n    return store_.histogramFromStatNameWithTags(name, tags, unit);\n  }\n  NullGaugeImpl& nullGauge(const std::string& name) override { return store_.nullGauge(name); }\n  Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {\n    Thread::LockGuard lock(lock_);\n    return store_.histogramFromString(name, unit);\n  }\n  TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                               StatNameTagVectorOptConstRef tags) override {\n    Thread::LockGuard lock(lock_);\n    return store_.textReadoutFromStatNameWithTags(name, tags);\n  }\n  TextReadout& textReadoutFromString(const std::string& name) override {\n    Thread::LockGuard lock(lock_);\n    return store_.textReadoutFromString(name);\n  }\n  CounterOptConstRef findCounter(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return store_.findCounter(name);\n  }\n  GaugeOptConstRef findGauge(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return store_.findGauge(name);\n  }\n  HistogramOptConstRef findHistogram(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return store_.findHistogram(name);\n  }\n  TextReadoutOptConstRef findTextReadout(StatName name) const override {\n    Thread::LockGuard lock(lock_);\n    return store_.findTextReadout(name);\n  }\n  const SymbolTable& constSymbolTable() const override { return store_.constSymbolTable(); }\n  SymbolTable& symbolTable() override { return store_.symbolTable(); }\n\n  // Stats::Store\n  std::vector<CounterSharedPtr> counters() const override {\n    Thread::LockGuard lock(lock_);\n    return store_.counters();\n  }\n  std::vector<GaugeSharedPtr> gauges() const override {\n    Thread::LockGuard lock(lock_);\n    return store_.gauges();\n  }\n  std::vector<ParentHistogramSharedPtr> histograms() const override {\n    Thread::LockGuard lock(lock_);\n    return store_.histograms();\n  }\n  std::vector<TextReadoutSharedPtr> textReadouts() const override {\n    Thread::LockGuard lock(lock_);\n    return store_.textReadouts();\n  }\n\n  bool iterate(const IterateFn<Counter>& fn) const override { return store_.iterate(fn); }\n  bool iterate(const IterateFn<Gauge>& fn) const override { return store_.iterate(fn); }\n  bool iterate(const IterateFn<Histogram>& fn) const override { return store_.iterate(fn); }\n  bool iterate(const IterateFn<TextReadout>& fn) const override { return store_.iterate(fn); }\n\n  // Stats::StoreRoot\n  void addSink(Sink&) override {}\n  void setTagProducer(TagProducerPtr&&) override {}\n  void setStatsMatcher(StatsMatcherPtr&&) override {}\n  void setHistogramSettings(HistogramSettingsConstPtr&&) override {}\n  void initializeThreading(Event::Dispatcher&, ThreadLocal::Instance&) override {}\n  void shutdownThreading() override {}\n  void mergeHistograms(PostMergeCb) override {}\n\nprivate:\n  mutable Thread::MutexBasicLockable lock_;\n  IsolatedStoreImpl store_;\n};\n\n} // namespace Stats\n\nclass IntegrationTestServer;\nusing IntegrationTestServerPtr = std::unique_ptr<IntegrationTestServer>;\n\n/**\n * Wrapper for running the real server for the purpose of integration tests.\n * This class is an Abstract Base Class and delegates ownership and management\n * of the actual envoy server to a derived class. See the documentation for\n * createAndRunEnvoyServer().\n */\nclass IntegrationTestServer : public Logger::Loggable<Logger::Id::testing>,\n                              public ListenerHooks,\n                              public IntegrationTestServerStats,\n                              public Server::ComponentFactory {\npublic:\n  static IntegrationTestServerPtr\n  create(const std::string& config_path, const Network::Address::IpVersion version,\n         std::function<void(IntegrationTestServer&)> on_server_ready_function,\n         std::function<void()> on_server_init_function, bool deterministic,\n         Event::TestTimeSystem& time_system, Api::Api& api,\n         bool defer_listener_finalization = false,\n         ProcessObjectOptRef process_object = absl::nullopt,\n         Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(),\n         uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1),\n         Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual,\n         bool use_real_stats = false);\n  // Note that the derived class is responsible for tearing down the server in its\n  // destructor.\n  ~IntegrationTestServer() override;\n\n  void waitUntilListenersReady();\n\n  Server::DrainManagerImpl& drainManager() { return *drain_manager_; }\n  void setOnWorkerListenerAddedCb(std::function<void()> on_worker_listener_added) {\n    on_worker_listener_added_cb_ = std::move(on_worker_listener_added);\n  }\n  void setOnWorkerListenerRemovedCb(std::function<void()> on_worker_listener_removed) {\n    on_worker_listener_removed_cb_ = std::move(on_worker_listener_removed);\n  }\n  void setOnServerReadyCb(std::function<void(IntegrationTestServer&)> on_server_ready) {\n    on_server_ready_cb_ = std::move(on_server_ready);\n  }\n  void onRuntimeCreated() override {}\n\n  void start(const Network::Address::IpVersion version,\n             std::function<void()> on_server_init_function, bool deterministic,\n             bool defer_listener_finalization, ProcessObjectOptRef process_object,\n             Server::FieldValidationConfig validation_config, uint32_t concurrency,\n             std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy);\n\n  void waitForCounterEq(const std::string& name, uint64_t value,\n                        std::chrono::milliseconds timeout = std::chrono::milliseconds::zero(),\n                        Event::Dispatcher* dispatcher = nullptr) override {\n    ASSERT_TRUE(\n        TestUtility::waitForCounterEq(statStore(), name, value, time_system_, timeout, dispatcher));\n  }\n\n  void\n  waitForCounterGe(const std::string& name, uint64_t value,\n                   std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override {\n    ASSERT_TRUE(TestUtility::waitForCounterGe(statStore(), name, value, time_system_, timeout));\n  }\n\n  void\n  waitForGaugeEq(const std::string& name, uint64_t value,\n                 std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override {\n    ASSERT_TRUE(TestUtility::waitForGaugeEq(statStore(), name, value, time_system_, timeout));\n  }\n\n  void\n  waitForGaugeGe(const std::string& name, uint64_t value,\n                 std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override {\n    ASSERT_TRUE(TestUtility::waitForGaugeGe(statStore(), name, value, time_system_, timeout));\n  }\n\n  void waitForCounterExists(const std::string& name) override {\n    notifyingStatsAllocator().waitForCounterExists(name);\n  }\n\n  Stats::CounterSharedPtr counter(const std::string& name) override {\n    // When using the thread local store, only counters() is thread safe. This also allows us\n    // to test if a counter exists at all versus just defaulting to zero.\n    return TestUtility::findCounter(statStore(), name);\n  }\n\n  Stats::GaugeSharedPtr gauge(const std::string& name) override {\n    // When using the thread local store, only gauges() is thread safe. This also allows us\n    // to test if a counter exists at all versus just defaulting to zero.\n    return TestUtility::findGauge(statStore(), name);\n  }\n\n  std::vector<Stats::CounterSharedPtr> counters() override { return statStore().counters(); }\n\n  std::vector<Stats::GaugeSharedPtr> gauges() override { return statStore().gauges(); }\n\n  // ListenerHooks\n  void onWorkerListenerAdded() override;\n  void onWorkerListenerRemoved() override;\n\n  // Server::ComponentFactory\n  Server::DrainManagerPtr createDrainManager(Server::Instance& server) override {\n    drain_manager_ =\n        new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY);\n    return Server::DrainManagerPtr{drain_manager_};\n  }\n  Runtime::LoaderPtr createRuntime(Server::Instance& server,\n                                   Server::Configuration::Initial& config) override {\n    return Server::InstanceUtil::createRuntime(server, config);\n  }\n\n  // Should not be called until createAndRunEnvoyServer() is called.\n  virtual Server::Instance& server() PURE;\n  virtual Stats::Store& statStore() PURE;\n  virtual Network::Address::InstanceConstSharedPtr adminAddress() PURE;\n  virtual Stats::NotifyingAllocatorImpl& notifyingStatsAllocator() PURE;\n  void useAdminInterfaceToQuit(bool use) { use_admin_interface_to_quit_ = use; }\n  bool useAdminInterfaceToQuit() { return use_admin_interface_to_quit_; }\n\nprotected:\n  IntegrationTestServer(Event::TestTimeSystem& time_system, Api::Api& api,\n                        const std::string& config_path)\n      : time_system_(time_system), api_(api), config_path_(config_path) {}\n\n  // Create the running envoy server. This function will call serverReady() when the virtual\n  // functions server(), statStore(), and adminAddress() may be called, but before the server\n  // has been started.\n  // The subclass is also responsible for tearing down this server in its destructor.\n  virtual void createAndRunEnvoyServer(OptionsImpl& options, Event::TimeSystem& time_system,\n                                       Network::Address::InstanceConstSharedPtr local_address,\n                                       ListenerHooks& hooks, Thread::BasicLockable& access_log_lock,\n                                       Server::ComponentFactory& component_factory,\n                                       Random::RandomGeneratorPtr&& random_generator,\n                                       ProcessObjectOptRef process_object) PURE;\n\n  // Will be called by subclass on server thread when the server is ready to be accessed. The\n  // server may not have been run yet, but all server access methods (server(), statStore(),\n  // adminAddress()) will be available.\n  void serverReady();\n\nprivate:\n  /**\n   * Runs the real server on a thread.\n   */\n  void threadRoutine(const Network::Address::IpVersion version, bool deterministic,\n                     ProcessObjectOptRef process_object,\n                     Server::FieldValidationConfig validation_config, uint32_t concurrency,\n                     std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy);\n\n  Event::TestTimeSystem& time_system_;\n  Api::Api& api_;\n  const std::string config_path_;\n  Thread::ThreadPtr thread_;\n  Thread::CondVar listeners_cv_;\n  Thread::MutexBasicLockable listeners_mutex_;\n  uint64_t pending_listeners_;\n  ConditionalInitializer server_set_;\n  Server::DrainManagerImpl* drain_manager_{};\n  std::function<void()> on_worker_listener_added_cb_;\n  std::function<void()> on_worker_listener_removed_cb_;\n  TcpDumpPtr tcp_dump_;\n  std::function<void(IntegrationTestServer&)> on_server_ready_cb_;\n  bool use_admin_interface_to_quit_{};\n};\n\n// Default implementation of IntegrationTestServer\nclass IntegrationTestServerImpl : public IntegrationTestServer {\npublic:\n  IntegrationTestServerImpl(Event::TestTimeSystem& time_system, Api::Api& api,\n                            const std::string& config_path, bool real_stats = false);\n\n  ~IntegrationTestServerImpl() override;\n\n  Server::Instance& server() override {\n    RELEASE_ASSERT(server_ != nullptr, \"\");\n    return *server_;\n  }\n  Stats::Store& statStore() override {\n    RELEASE_ASSERT(stat_store_ != nullptr, \"\");\n    return *stat_store_;\n  }\n  Network::Address::InstanceConstSharedPtr adminAddress() override { return admin_address_; }\n\n  Stats::NotifyingAllocatorImpl& notifyingStatsAllocator() override {\n    auto* ret = dynamic_cast<Stats::NotifyingAllocatorImpl*>(stats_allocator_.get());\n    RELEASE_ASSERT(ret != nullptr,\n                   \"notifyingStatsAllocator() is not created when real_stats is true\");\n    return *ret;\n  }\n\nprivate:\n  void createAndRunEnvoyServer(OptionsImpl& options, Event::TimeSystem& time_system,\n                               Network::Address::InstanceConstSharedPtr local_address,\n                               ListenerHooks& hooks, Thread::BasicLockable& access_log_lock,\n                               Server::ComponentFactory& component_factory,\n                               Random::RandomGeneratorPtr&& random_generator,\n                               ProcessObjectOptRef process_object) override;\n\n  // Owned by this class. An owning pointer is not used because the actual allocation is done\n  // on a stack in a non-main thread.\n  Server::Instance* server_{};\n  Stats::Store* stat_store_{};\n  Network::Address::InstanceConstSharedPtr admin_address_;\n  absl::Notification server_gone_;\n  Stats::SymbolTableImpl symbol_table_;\n  std::unique_ptr<Stats::AllocatorImpl> stats_allocator_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/server_stats.h",
    "content": "#pragma once\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/stats/stats.h\"\n\nnamespace Envoy {\n\n// Abstract interface for IntegrationTestServer stats methods.\nclass IntegrationTestServerStats {\npublic:\n  virtual ~IntegrationTestServerStats() = default;\n\n  /**\n   * Wait for a counter to == a given value.\n   * @param name counter name.\n   * @param value target value.\n   * @param timeout amount of time to wait before asserting false, or 0 for no timeout.\n   * @param dispatcher the dispatcher to run non-blocking periodically during the wait.\n   */\n  virtual void\n  waitForCounterEq(const std::string& name, uint64_t value,\n                   std::chrono::milliseconds timeout = std::chrono::milliseconds::zero(),\n                   Event::Dispatcher* dispatcher = nullptr) PURE;\n\n  /**\n   * Wait for a counter to >= a given value.\n   * @param name counter name.\n   * @param value target value.\n   * @param timeout amount of time to wait before asserting false, or 0 for no timeout.\n   */\n  virtual void\n  waitForCounterGe(const std::string& name, uint64_t value,\n                   std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE;\n\n  /**\n   * Wait for a counter to exist.\n   * @param name counter name.\n   */\n  virtual void waitForCounterExists(const std::string& name) PURE;\n\n  /**\n   * Wait for a gauge to >= a given value.\n   * @param name gauge name.\n   * @param value target value.\n   * @param timeout amount of time to wait before asserting false, or 0 for no timeout.\n   */\n  virtual void\n  waitForGaugeGe(const std::string& name, uint64_t value,\n                 std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE;\n\n  /**\n   * Wait for a gauge to == a given value.\n   * @param name gauge name.\n   * @param value target value.\n   * @param timeout amount of time to wait before asserting false, or 0 for no timeout.\n   */\n  virtual void\n  waitForGaugeEq(const std::string& name, uint64_t value,\n                 std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE;\n\n  /**\n   * Counter lookup. This is not thread safe, since we don't get a consistent\n   * snapshot, uses counters() instead for this behavior.\n   * @param name counter name.\n   * @return Stats::CounterSharedPtr counter if it exists, otherwise nullptr.\n   */\n  virtual Stats::CounterSharedPtr counter(const std::string& name) PURE;\n\n  /**\n   * Gauge lookup. This is not thread safe, since we don't get a consistent\n   * snapshot, uses gauges() instead for this behavior.\n   * @param name gauge name.\n   * @return Stats::GaugeSharedPtr gauge if it exists, otherwise nullptr.\n   */\n  virtual Stats::GaugeSharedPtr gauge(const std::string& name) PURE;\n\n  /**\n   * @return std::vector<Stats::CounterSharedPtr> snapshot of server counters.\n   */\n  virtual std::vector<Stats::CounterSharedPtr> counters() PURE;\n\n  /**\n   * @return std::vector<Stats::GaugeSharedPtr> snapshot of server counters.\n   */\n  virtual std::vector<Stats::GaugeSharedPtr> gauges() PURE;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/socket_interface_integration_test.cc",
    "content": "#include \"common/buffer/buffer_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/socket_interface.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass SocketInterfaceIntegrationTest : public BaseIntegrationTest,\n                                       public testing::TestWithParam<Network::Address::IpVersion> {\npublic:\n  SocketInterfaceIntegrationTest() : BaseIntegrationTest(GetParam(), config()) {\n    use_lds_ = false;\n  };\n\n  static std::string config() {\n    // At least one empty filter chain needs to be specified.\n    return absl::StrCat(echoConfig(), R\"EOF(\nbootstrap_extensions:\n  - name: envoy.extensions.network.socket_interface.default_socket_interface\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.network.socket_interface.v3.DefaultSocketInterface\ndefault_socket_interface: \"envoy.extensions.network.socket_interface.default_socket_interface\"\n    )EOF\");\n  }\n  static std::string echoConfig() {\n    return absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      filters:\n        name: ratelimit\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.rate_limit.v2.RateLimit\n          domain: foo\n          stats_prefix: name\n          descriptors: [{\"key\": \"foo\", \"value\": \"bar\"}]\n      filters:\n        name: envoy.filters.network.echo\n      )EOF\");\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, SocketInterfaceIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(SocketInterfaceIntegrationTest, Basic) {\n  BaseIntegrationTest::initialize();\n  const Network::SocketInterface* factory = Network::socketInterface(\n      \"envoy.extensions.network.socket_interface.default_socket_interface\");\n  ASSERT_TRUE(Network::SocketInterfaceSingleton::getExisting() == factory);\n\n  std::string response;\n  auto connection = createConnectionDriver(\n      lookupPort(\"listener_0\"), \"hello\",\n      [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void {\n        response.append(data.toString());\n        conn.close(Network::ConnectionCloseType::FlushWrite);\n      });\n  connection->run();\n  EXPECT_EQ(\"hello\", response);\n}\n\nTEST_P(SocketInterfaceIntegrationTest, AddressWithSocketInterface) {\n  BaseIntegrationTest::initialize();\n\n  ConnectionStatusCallbacks connect_callbacks_;\n  Network::ClientConnectionPtr client_;\n  const Network::SocketInterface* sock_interface = Network::socketInterface(\n      \"envoy.extensions.network.socket_interface.default_socket_interface\");\n  Network::Address::InstanceConstSharedPtr address =\n      std::make_shared<Network::Address::Ipv4Instance>(\n          Network::Test::getLoopbackAddressUrlString(Network::Address::IpVersion::v4),\n          lookupPort(\"listener_0\"), sock_interface);\n\n  client_ = dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                                Network::Test::createRawBufferSocket(), nullptr);\n\n  client_->addConnectionCallbacks(connect_callbacks_);\n  client_->connect();\n  while (!connect_callbacks_.connected() && !connect_callbacks_.closed()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  client_->close(Network::ConnectionCloseType::FlushWrite);\n}\n\n// Test that connecting to internal address will crash.\n// TODO(lambdai): Add internal connection implementation to enable the connection creation.\nTEST_P(SocketInterfaceIntegrationTest, InternalAddressWithSocketInterface) {\n  BaseIntegrationTest::initialize();\n\n  ConnectionStatusCallbacks connect_callbacks_;\n  Network::ClientConnectionPtr client_;\n  const Network::SocketInterface* sock_interface = Network::socketInterface(\n      \"envoy.extensions.network.socket_interface.default_socket_interface\");\n  Network::Address::InstanceConstSharedPtr address =\n      std::make_shared<Network::Address::EnvoyInternalInstance>(\"listener_0\", sock_interface);\n\n  ASSERT_DEATH(client_ = dispatcher_->createClientConnection(\n                   address, Network::Address::InstanceConstSharedPtr(),\n                   Network::Test::createRawBufferSocket(), nullptr),\n               \"panic: not implemented\");\n}\n\n// Test that recv from internal address will crash.\n// TODO(lambdai): Add internal socket implementation to enable the io path.\nTEST_P(SocketInterfaceIntegrationTest, UdpRecvFromInternalAddressWithSocketInterface) {\n  BaseIntegrationTest::initialize();\n\n  const Network::SocketInterface* sock_interface = Network::socketInterface(\n      \"envoy.extensions.network.socket_interface.default_socket_interface\");\n  Network::Address::InstanceConstSharedPtr address =\n      std::make_shared<Network::Address::EnvoyInternalInstance>(\"listener_0\", sock_interface);\n\n  ASSERT_DEATH(std::make_unique<Network::SocketImpl>(Network::Socket::Type::Datagram, address), \"\");\n}\n\n// Test that send to internal address will return io error.\nTEST_P(SocketInterfaceIntegrationTest, UdpSendToInternalAddressWithSocketInterface) {\n  BaseIntegrationTest::initialize();\n\n  const Network::SocketInterface* sock_interface = Network::socketInterface(\n      \"envoy.extensions.network.socket_interface.default_socket_interface\");\n  Network::Address::InstanceConstSharedPtr peer_internal_address =\n      std::make_shared<Network::Address::EnvoyInternalInstance>(\"listener_0\", sock_interface);\n  Network::Address::InstanceConstSharedPtr local_valid_address =\n      Network::Test::getCanonicalLoopbackAddress(version_);\n\n  auto socket =\n      std::make_unique<Network::SocketImpl>(Network::Socket::Type::Datagram, local_valid_address);\n\n  Buffer::OwnedImpl buffer;\n  Buffer::RawSlice iovec;\n  buffer.reserve(100, &iovec, 1);\n\n  auto result =\n      socket->ioHandle().sendmsg(&iovec, 1, 0, local_valid_address->ip(), *peer_internal_address);\n  ASSERT_FALSE(result.ok());\n  ASSERT_EQ(result.err_->getErrorCode(), Api::IoError::IoErrorCode::NoSupport);\n}\n} // namespace\n} // namespace Envoy"
  },
  {
    "path": "test/integration/ssl_utility.cc",
    "content": "#include \"test/integration/ssl_utility.h\"\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/http/utility.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/config/utility.h\"\n#include \"test/integration/server.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Ssl {\n\nNetwork::TransportSocketFactoryPtr\ncreateClientSslTransportSocketFactory(const ClientSslTransportOptions& options,\n                                      ContextManager& context_manager, Api::Api& api) {\n  std::string yaml_plain = R\"EOF(\n  common_tls_context:\n    validation_context:\n      trusted_ca:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/cacert.pem\"\n)EOF\";\n  if (options.client_ecdsa_cert_) {\n    yaml_plain += R\"EOF(\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/client_ecdsacert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/client_ecdsakey.pem\"\n)EOF\";\n  } else {\n    yaml_plain += R\"EOF(\n    tls_certificates:\n      certificate_chain:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/clientcert.pem\"\n      private_key:\n        filename: \"{{ test_rundir }}/test/config/integration/certs/clientkey.pem\"\n)EOF\";\n  }\n\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(yaml_plain), tls_context);\n  auto* common_context = tls_context.mutable_common_tls_context();\n\n  if (options.alpn_) {\n    common_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2);\n    common_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11);\n  }\n  if (options.san_) {\n    common_context->mutable_validation_context()\n        ->add_hidden_envoy_deprecated_verify_subject_alt_name(\"spiffe://lyft.com/backend-team\");\n  }\n  for (const std::string& cipher_suite : options.cipher_suites_) {\n    common_context->mutable_tls_params()->add_cipher_suites(cipher_suite);\n  }\n  if (!options.sni_.empty()) {\n    tls_context.set_sni(options.sni_);\n  }\n\n  common_context->mutable_tls_params()->set_tls_minimum_protocol_version(options.tls_version_);\n  common_context->mutable_tls_params()->set_tls_maximum_protocol_version(options.tls_version_);\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> mock_factory_ctx;\n  ON_CALL(mock_factory_ctx, api()).WillByDefault(ReturnRef(api));\n  auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ClientContextConfigImpl>(\n      tls_context, options.sigalgs_, mock_factory_ctx);\n  static auto* client_stats_store = new Stats::TestIsolatedStoreImpl();\n  return Network::TransportSocketFactoryPtr{\n      new Extensions::TransportSockets::Tls::ClientSslSocketFactory(std::move(cfg), context_manager,\n                                                                    *client_stats_store)};\n}\n\nNetwork::TransportSocketFactoryPtr createUpstreamSslContext(ContextManager& context_manager,\n                                                            Api::Api& api) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  ConfigHelper::initializeTls({}, *tls_context.mutable_common_tls_context());\n\n  NiceMock<Server::Configuration::MockTransportSocketFactoryContext> mock_factory_ctx;\n  ON_CALL(mock_factory_ctx, api()).WillByDefault(ReturnRef(api));\n  auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n      tls_context, mock_factory_ctx);\n\n  static Stats::Scope* upstream_stats_store = new Stats::TestIsolatedStoreImpl();\n  return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n      std::move(cfg), context_manager, *upstream_stats_store, std::vector<std::string>{});\n}\n\nNetwork::TransportSocketFactoryPtr createFakeUpstreamSslContext(\n    const std::string& upstream_cert_name, ContextManager& context_manager,\n    Server::Configuration::TransportSocketFactoryContext& factory_context) {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  auto* common_tls_context = tls_context.mutable_common_tls_context();\n  auto* tls_cert = common_tls_context->add_tls_certificates();\n  tls_cert->mutable_certificate_chain()->set_filename(TestEnvironment::runfilesPath(\n      fmt::format(\"test/config/integration/certs/{}cert.pem\", upstream_cert_name)));\n  tls_cert->mutable_private_key()->set_filename(TestEnvironment::runfilesPath(\n      fmt::format(\"test/config/integration/certs/{}key.pem\", upstream_cert_name)));\n\n  auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n      tls_context, factory_context);\n\n  static Stats::Scope* upstream_stats_store = new Stats::IsolatedStoreImpl();\n  return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n      std::move(cfg), context_manager, *upstream_stats_store, std::vector<std::string>{});\n}\nNetwork::Address::InstanceConstSharedPtr getSslAddress(const Network::Address::IpVersion& version,\n                                                       int port) {\n  std::string url =\n      \"tcp://\" + Network::Test::getLoopbackAddressUrlString(version) + \":\" + std::to_string(port);\n  return Network::Utility::resolveUrl(url);\n}\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/ssl_utility.h",
    "content": "#pragma once\n\n#include \"envoy/api/api.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/ssl/context_manager.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nstruct ClientSslTransportOptions {\n  ClientSslTransportOptions& setAlpn(bool alpn) {\n    alpn_ = alpn;\n    return *this;\n  }\n\n  ClientSslTransportOptions& setSan(bool san) {\n    san_ = san;\n    return *this;\n  }\n\n  ClientSslTransportOptions& setClientEcdsaCert(bool client_ecdsa_cert) {\n    client_ecdsa_cert_ = client_ecdsa_cert;\n    return *this;\n  }\n\n  ClientSslTransportOptions& setCipherSuites(const std::vector<std::string>& cipher_suites) {\n    cipher_suites_ = cipher_suites;\n    return *this;\n  }\n\n  ClientSslTransportOptions& setSigningAlgorithmsForTest(const std::string& sigalgs) {\n    sigalgs_ = sigalgs;\n    return *this;\n  }\n\n  ClientSslTransportOptions& setTlsVersion(\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TlsProtocol tls_version) {\n    tls_version_ = tls_version;\n    return *this;\n  }\n\n  ClientSslTransportOptions& setSni(absl::string_view sni) {\n    sni_ = std::string(sni);\n    return *this;\n  }\n\n  bool alpn_{};\n  bool san_{};\n  bool client_ecdsa_cert_{};\n  std::vector<std::string> cipher_suites_{};\n  std::string sigalgs_;\n  std::string sni_;\n  envoy::extensions::transport_sockets::tls::v3::TlsParameters::TlsProtocol tls_version_{\n      envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLS_AUTO};\n};\n\nNetwork::TransportSocketFactoryPtr\ncreateClientSslTransportSocketFactory(const ClientSslTransportOptions& options,\n                                      ContextManager& context_manager, Api::Api& api);\n\nNetwork::TransportSocketFactoryPtr createUpstreamSslContext(ContextManager& context_manager,\n                                                            Api::Api& api);\n\nNetwork::TransportSocketFactoryPtr\ncreateFakeUpstreamSslContext(const std::string& upstream_cert_name, ContextManager& context_manager,\n                             Server::Configuration::TransportSocketFactoryContext& factory_context);\n\nNetwork::Address::InstanceConstSharedPtr getSslAddress(const Network::Address::IpVersion& version,\n                                                       int port);\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/stats_integration_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/stats/stats.h\"\n\n#include \"common/config/well_known_names.h\"\n#include \"common/memory/stats.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/config/utility.h\"\n#include \"test/integration/integration.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass StatsIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                             public BaseIntegrationTest {\npublic:\n  StatsIntegrationTest() : BaseIntegrationTest(GetParam()) {}\n\n  void initialize() override { BaseIntegrationTest::initialize(); }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, StatsIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(StatsIntegrationTest, WithDefaultConfig) {\n  initialize();\n\n  auto live = test_server_->gauge(\"server.live\");\n  EXPECT_EQ(live->value(), 1);\n  EXPECT_EQ(live->tags().size(), 0);\n\n  auto counter = test_server_->counter(\"http.config_test.rq_total\");\n  EXPECT_EQ(counter->tags().size(), 1);\n  EXPECT_EQ(counter->tags()[0].name_, \"envoy.http_conn_manager_prefix\");\n  EXPECT_EQ(counter->tags()[0].value_, \"config_test\");\n}\n\nTEST_P(StatsIntegrationTest, WithoutDefaultTagExtractors) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    bootstrap.mutable_stats_config()->mutable_use_all_default_tags()->set_value(false);\n  });\n  initialize();\n\n  auto counter = test_server_->counter(\"http.config_test.rq_total\");\n  EXPECT_EQ(counter->tags().size(), 0);\n}\n\nTEST_P(StatsIntegrationTest, WithDefaultTagExtractors) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    bootstrap.mutable_stats_config()->mutable_use_all_default_tags()->set_value(true);\n  });\n  initialize();\n\n  auto counter = test_server_->counter(\"http.config_test.rq_total\");\n  EXPECT_EQ(counter->tags().size(), 1);\n  EXPECT_EQ(counter->tags()[0].name_, \"envoy.http_conn_manager_prefix\");\n  EXPECT_EQ(counter->tags()[0].value_, \"config_test\");\n}\n\n// Given: a. use_all_default_tags = false, b. a tag specifier has the same name\n// as a default tag extractor name but also has use defined regex.\n// In this case we don't use default tag extractors (since use_all_default_tags\n// is set to false explicitly) and just treat the tag specifier as a normal tag\n// specifier having use defined regex.\nTEST_P(StatsIntegrationTest, WithDefaultTagExtractorNameWithUserDefinedRegex) {\n  std::string tag_name = Config::TagNames::get().HTTP_CONN_MANAGER_PREFIX;\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    bootstrap.mutable_stats_config()->mutable_use_all_default_tags()->set_value(false);\n    auto tag_specifier = bootstrap.mutable_stats_config()->mutable_stats_tags()->Add();\n    tag_specifier->set_tag_name(tag_name);\n    tag_specifier->set_regex(\"((.*))\");\n  });\n  initialize();\n\n  auto counter = test_server_->counter(\"http.config_test.rq_total\");\n  EXPECT_EQ(counter->tags().size(), 1);\n  EXPECT_EQ(counter->tags()[0].name_, tag_name);\n  EXPECT_EQ(counter->tags()[0].value_, \"http.config_test.rq_total\");\n}\n\nTEST_P(StatsIntegrationTest, WithTagSpecifierMissingTagValue) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    bootstrap.mutable_stats_config()->mutable_use_all_default_tags()->set_value(false);\n    auto tag_specifier = bootstrap.mutable_stats_config()->mutable_stats_tags()->Add();\n    tag_specifier->set_tag_name(\"envoy.http_conn_manager_prefix\");\n  });\n  initialize();\n\n  auto counter = test_server_->counter(\"http.config_test.rq_total\");\n  EXPECT_EQ(counter->tags().size(), 1);\n  EXPECT_EQ(counter->tags()[0].name_, \"envoy.http_conn_manager_prefix\");\n  EXPECT_EQ(counter->tags()[0].value_, \"config_test\");\n}\n\nTEST_P(StatsIntegrationTest, WithTagSpecifierWithRegex) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    bootstrap.mutable_stats_config()->mutable_use_all_default_tags()->set_value(false);\n    auto tag_specifier = bootstrap.mutable_stats_config()->mutable_stats_tags()->Add();\n    tag_specifier->set_tag_name(\"my.http_conn_manager_prefix\");\n    tag_specifier->set_regex(R\"(^(?:|listener(?=\\.).*?\\.)http\\.((.*?)\\.))\");\n  });\n  initialize();\n\n  auto counter = test_server_->counter(\"http.config_test.rq_total\");\n  EXPECT_EQ(counter->tags().size(), 1);\n  EXPECT_EQ(counter->tags()[0].name_, \"my.http_conn_manager_prefix\");\n  EXPECT_EQ(counter->tags()[0].value_, \"config_test\");\n}\n\nTEST_P(StatsIntegrationTest, WithTagSpecifierWithFixedValue) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto tag_specifier = bootstrap.mutable_stats_config()->mutable_stats_tags()->Add();\n    tag_specifier->set_tag_name(\"test.x\");\n    tag_specifier->set_fixed_value(\"xxx\");\n  });\n  initialize();\n\n  auto live = test_server_->gauge(\"server.live\");\n  EXPECT_EQ(live->value(), 1);\n  EXPECT_EQ(live->tags().size(), 1);\n  EXPECT_EQ(live->tags()[0].name_, \"test.x\");\n  EXPECT_EQ(live->tags()[0].value_, \"xxx\");\n}\n\n// TODO(cmluciano) Refactor once https://github.com/envoyproxy/envoy/issues/5624 is solved\n// TODO(cmluciano) Add options to measure multiple workers & without stats\n// This class itself does not add additional tests. It is a helper for use in other tests measuring\n// cluster overhead.\nclass ClusterMemoryTestHelper : public BaseIntegrationTest {\npublic:\n  ClusterMemoryTestHelper()\n      : BaseIntegrationTest(testing::TestWithParam<Network::Address::IpVersion>::GetParam()) {\n    use_real_stats_ = true;\n  }\n\n  static size_t computeMemoryDelta(int initial_num_clusters, int initial_num_hosts,\n                                   int final_num_clusters, int final_num_hosts, bool allow_stats) {\n    // Use the same number of fake upstreams for both helpers in order to exclude memory overhead\n    // added by the fake upstreams.\n    int fake_upstreams_count = 1 + final_num_clusters * final_num_hosts;\n\n    size_t initial_memory;\n    {\n      ClusterMemoryTestHelper helper;\n      helper.setUpstreamCount(fake_upstreams_count);\n      helper.skipPortUsageValidation();\n      initial_memory =\n          helper.clusterMemoryHelper(initial_num_clusters, initial_num_hosts, allow_stats);\n    }\n\n    ClusterMemoryTestHelper helper;\n    helper.setUpstreamCount(fake_upstreams_count);\n    return helper.clusterMemoryHelper(final_num_clusters, final_num_hosts, allow_stats) -\n           initial_memory;\n  }\n\nprivate:\n  /**\n   * @param num_clusters number of clusters appended to bootstrap_config\n   * @param allow_stats if false, enable set_reject_all in stats_config\n   * @return size_t the total memory allocated\n   */\n  size_t clusterMemoryHelper(int num_clusters, int num_hosts, bool allow_stats) {\n    Stats::TestUtil::MemoryTest memory_test;\n    config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      if (!allow_stats) {\n        bootstrap.mutable_stats_config()->mutable_stats_matcher()->set_reject_all(true);\n      }\n      for (int i = 1; i < num_clusters; ++i) {\n        auto* cluster = bootstrap.mutable_static_resources()->add_clusters();\n        cluster->set_name(absl::StrCat(\"cluster_\", i));\n      }\n\n      for (int i = 0; i < num_clusters; ++i) {\n        auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(i);\n        for (int j = 0; j < num_hosts; ++j) {\n          auto* host = cluster->mutable_load_assignment()\n                           ->mutable_endpoints(0)\n                           ->add_lb_endpoints()\n                           ->mutable_endpoint()\n                           ->mutable_address();\n          auto* socket_address = host->mutable_socket_address();\n          socket_address->set_protocol(envoy::config::core::v3::SocketAddress::TCP);\n          socket_address->set_address(\"0.0.0.0\");\n        }\n      }\n    });\n    initialize();\n\n    return memory_test.consumedBytes();\n  }\n};\n\nclass ClusterMemoryTestRunner : public testing::TestWithParam<Network::Address::IpVersion> {\nprotected:\n  ClusterMemoryTestRunner()\n      : ip_version_(testing::TestWithParam<Network::Address::IpVersion>::GetParam()) {}\n\n  Network::Address::IpVersion ip_version_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ClusterMemoryTestRunner,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) {\n  // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with\n  // differing configuration. This is necessary for measuring the memory consumption\n  // between the different instances within the same test.\n  const size_t m100 = ClusterMemoryTestHelper::computeMemoryDelta(1, 0, 101, 0, true);\n  const size_t m_per_cluster = (m100) / 100;\n\n  // Note: if you are increasing this golden value because you are adding a\n  // stat, please confirm that this will be generally useful to most Envoy\n  // users. Otherwise you are adding to the per-cluster memory overhead, which\n  // will be significant for Envoy installations that are massively\n  // multi-tenant.\n  //\n  // History of golden values:\n  //\n  // Date        PR       Bytes Per Cluster   Notes\n  //                      exact upper-bound\n  // ----------  -----    -----------------   -----\n  // 2019/08/09  7882     35489       36000   Initial version\n  // 2019/09/02  8118     34585       34500   Share symbol-tables in cluster/host stats.\n  // 2019/09/16  8100     34585       34500   Add transport socket matcher in cluster.\n  // 2019/09/25  8226     34777       35000   dns: enable dns failure refresh rate configuration\n  // 2019/09/30  8354     34969       35000   Implement transport socket match.\n  // 2019/10/17  8537     34966       35000   add new enum value HTTP3\n  // 2019/10/17  8484     34998       35000   stats: add unit support to histogram\n  // 2019/11/01  8859     35221       36000   build: switch to libc++ by default\n  // 2019/11/15  9040     35029       35500   build: update protobuf to 3.10.1\n  // 2019/11/15  9031     35061       35500   upstream: track whether cluster is local\n  // 2019/12/10  8779     35053       35000   use var-length coding for name lengths\n  // 2020/01/07  9069     35548       35700   upstream: Implement retry concurrency budgets\n  // 2020/01/07  9564     35580       36000   RefcountPtr for CentralCache.\n  // 2020/01/09  8889     35644       36000   api: add UpstreamHttpProtocolOptions message\n  // 2019/01/09  9227     35772       36500   router: per-cluster histograms w/ timeout budget\n  // 2020/01/12  9633     35932       36500   config: support recovery of original message when\n  //                                          upgrading.\n  // 2020/03/16  9964     36220       36800   http2: support custom SETTINGS parameters.\n  // 2020/03/24  10501    36300       36800   upstream: upstream_rq_retry_limit_exceeded.\n  // 2020/04/02  10624    35564       36000   Use 100 clusters rather than 1000 to avoid timeouts\n  // 2020/04/07  10661    35557       36000   fix clang tidy on master\n  // 2020/04/23  10531    36281       36800   http: max stream duration upstream support.\n  // 2020/04/23  10661    36537       37000   per-listener connection limits\n  // 2020/05/05  10908    36345       36800   router: add InternalRedirectPolicy and predicate\n  // 2020/05/13  10531    36537       36800   Refactor resource manager\n  // 2020/05/20  11223    36603       36800   Add primary clusters tracking to cluster manager.\n  // 2020/06/10  11561    36603       36923   Make upstreams pluggable\n  // 2020/06/29  11751    36827       38000   Improve time complexity of removing callback handle.\n  // 2020/07/07  11252    37083       38000   Introduce Least Request LB active request bias config\n  // 2020/07/15  11748    37115       38000   Stream error on invalid messaging\n  // 2020/07/20  11559    36859       38000   stats: add histograms for request/response headers\n  //                                          and body sizes.\n  // 2020/07/21  12034    36923       38000   Add configurable histogram buckets.\n  // 2020/07/31  12035    37114       38000   Init manager store unready targets in hash map.\n  // 2020/08/10  12275    37061       38000   Re-organize tls histogram maps to improve continuity.\n  // 2020/08/11  12202    37061       38500   router: add new retry back-off strategy\n  // 2020/09/11  12973                38993   upstream: predictive prefetch\n  // 2020/10/02  13251                39326   switch to google tcmalloc\n\n  // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI\n  // 'release' builds, where we control the platform and tool-chain. So you\n  // will need to find the correct value only after failing CI and looking\n  // at the logs.\n  //\n  // On a local clang8/libstdc++/linux flow, the memory usage was observed in\n  // June 2019 to be 64 bytes higher than it is in CI/release. Your mileage may\n  // vary.\n  //\n  // If you encounter a failure here, please see\n  // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests\n  // for details on how to fix.\n  //\n  // We only run the exact test for ipv6 because ipv4 in some cases may allocate a\n  // different number of bytes. We still run the approximate test.\n  if (ip_version_ != Network::Address::IpVersion::v6) {\n    // https://github.com/envoyproxy/envoy/issues/12209\n    // EXPECT_MEMORY_EQ(m_per_cluster, 37061);\n  }\n  EXPECT_MEMORY_LE(m_per_cluster, 39350); // Round up to allow platform variations.\n}\n\nTEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) {\n  // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with\n  // differing configuration. This is necessary for measuring the memory consumption\n  // between the different instances within the same test.\n  const size_t m100 = ClusterMemoryTestHelper::computeMemoryDelta(1, 1, 1, 101, true);\n  const size_t m_per_host = (m100) / 100;\n\n  // Note: if you are increasing this golden value because you are adding a\n  // stat, please confirm that this will be generally useful to most Envoy\n  // users. Otherwise you are adding to the per-host memory overhead, which\n  // will be significant for Envoy installations configured to talk to large\n  // numbers of hosts.\n  //\n  // History of golden values:\n  //\n  // Date        PR       Bytes Per Host      Notes\n  //                      exact upper-bound\n  // ----------  -----    -----------------   -----\n  // 2019/09/09  8189     2739         3100   Initial per-host memory snapshot\n  // 2019/09/10  8216     1283         1315   Use primitive counters for host stats\n  // 2019/11/01  8859     1299         1315   build: switch to libc++ by default\n  // 2019/11/12  8998     1299         1350   test: adjust memory limit for macOS\n  // 2019/11/15  9040     1283         1350   build: update protobuf to 3.10.1\n  // 2020/01/13  9663     1619         1655   api: deprecate hosts in Cluster.\n  // 2020/02/13  10042    1363         1655   Metadata object are shared across different clusters\n  //                                          and hosts.\n  // 2020/04/02  10624    1380         1655   Use 100 clusters rather than 1000 to avoid timeouts\n\n  // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI\n  // 'release' builds, where we control the platform and tool-chain. So you\n  // will need to find the correct value only after failing CI and looking\n  // at the logs.\n  //\n  // If you encounter a failure here, please see\n  // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests\n  // for details on how to fix.\n  //\n  // We only run the exact test for ipv6 because ipv4 in some cases may allocate a\n  // different number of bytes. We still run the approximate test.\n  if (ip_version_ != Network::Address::IpVersion::v6) {\n    // https://github.com/envoyproxy/envoy/issues/12209\n    // EXPECT_MEMORY_EQ(m_per_host, 1380);\n  }\n  EXPECT_MEMORY_LE(m_per_host, 1800); // Round up to allow platform variations.\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/tcp_conn_pool_integration_test.cc",
    "content": "#include <list>\n\n#include \"envoy/server/filter_config.h\"\n\n#include \"test/integration/integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace {\n\nstd::string tcp_conn_pool_config;\n\n// Trivial Filter that obtains connections from a TCP connection pool each time onData is called\n// and sends the data to the resulting upstream. The upstream's response is sent directly to\n// the downstream.\nclass TestFilter : public Network::ReadFilter {\npublic:\n  TestFilter(Upstream::ClusterManager& cluster_manager) : cluster_manager_(cluster_manager) {}\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override {\n    UNREFERENCED_PARAMETER(end_stream);\n\n    Tcp::ConnectionPool::Instance* pool = cluster_manager_.tcpConnPoolForCluster(\n        \"cluster_0\", Upstream::ResourcePriority::Default, nullptr);\n    ASSERT(pool != nullptr);\n\n    requests_.emplace_back(*this, data);\n    pool->newConnection(requests_.back());\n\n    ASSERT(data.length() == 0);\n    return Network::FilterStatus::StopIteration;\n  }\n  Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; }\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n  }\n\nprivate:\n  class Request : public Tcp::ConnectionPool::Callbacks,\n                  public Tcp::ConnectionPool::UpstreamCallbacks {\n  public:\n    Request(TestFilter& parent, Buffer::Instance& data) : parent_(parent) { data_.move(data); }\n\n    // Tcp::ConnectionPool::Callbacks\n    void onPoolFailure(ConnectionPool::PoolFailureReason,\n                       Upstream::HostDescriptionConstSharedPtr) override {\n      ASSERT(false);\n    }\n\n    void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn,\n                     Upstream::HostDescriptionConstSharedPtr) override {\n      upstream_ = std::move(conn);\n\n      upstream_->addUpstreamCallbacks(*this);\n      upstream_->connection().write(data_, false);\n    }\n\n    // Tcp::ConnectionPool::UpstreamCallbacks\n    void onUpstreamData(Buffer::Instance& data, bool end_stream) override {\n      UNREFERENCED_PARAMETER(end_stream);\n\n      Network::Connection& downstream = parent_.read_callbacks_->connection();\n      downstream.write(data, false);\n\n      upstream_.reset();\n    }\n    void onEvent(Network::ConnectionEvent) override {}\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    TestFilter& parent_;\n    Buffer::OwnedImpl data_;\n    Tcp::ConnectionPool::ConnectionDataPtr upstream_;\n  };\n\n  Upstream::ClusterManager& cluster_manager_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  std::list<Request> requests_;\n};\n\nclass TestFilterConfigFactory : public Server::Configuration::NamedNetworkFilterConfigFactory {\npublic:\n  // NamedNetworkFilterConfigFactory\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&,\n                               Server::Configuration::FactoryContext& context) override {\n    return [&context](Network::FilterManager& filter_manager) -> void {\n      filter_manager.addReadFilter(std::make_shared<TestFilter>(context.clusterManager()));\n    };\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n\n  std::string name() const override { CONSTRUCT_ON_FIRST_USE(std::string, \"envoy.test.router\"); }\n  bool isTerminalFilter() override { return true; }\n};\n\n} // namespace\n\nclass TcpConnPoolIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                   public BaseIntegrationTest {\npublic:\n  TcpConnPoolIntegrationTest()\n      : BaseIntegrationTest(GetParam(), tcp_conn_pool_config), filter_resolver_(config_factory_) {}\n\n  // Called once by the gtest framework before any tests are run.\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    tcp_conn_pool_config = absl::StrCat(ConfigHelper::baseConfig(), R\"EOF(\n    filter_chains:\n      - filters:\n        - name: envoy.test.router\n          config:\n      )EOF\");\n  }\n\n  // Initializer for individual tests.\n  void SetUp() override { BaseIntegrationTest::initialize(); }\n\nprivate:\n  TestFilterConfigFactory config_factory_;\n  Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory> filter_resolver_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, TcpConnPoolIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(TcpConnPoolIntegrationTest, SingleRequest) {\n  std::string request(\"request\");\n  std::string response(\"response\");\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n  ASSERT_TRUE(tcp_client->write(request));\n\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(request.size()));\n  ASSERT_TRUE(fake_upstream_connection->write(response));\n\n  tcp_client->waitForData(response);\n  tcp_client->close();\n}\n\nTEST_P(TcpConnPoolIntegrationTest, MultipleRequests) {\n  std::string request1(\"request1\");\n  std::string request2(\"request2\");\n  std::string response1(\"response1\");\n  std::string response2(\"response2\");\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"listener_0\"));\n\n  // send request 1\n  ASSERT_TRUE(tcp_client->write(request1));\n  FakeRawConnectionPtr fake_upstream_connection1;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection1));\n  std::string data;\n  ASSERT_TRUE(fake_upstream_connection1->waitForData(request1.size(), &data));\n  EXPECT_EQ(request1, data);\n\n  // send request 2\n  ASSERT_TRUE(tcp_client->write(request2));\n  FakeRawConnectionPtr fake_upstream_connection2;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection2));\n  ASSERT_TRUE(fake_upstream_connection2->waitForData(request2.size(), &data));\n  EXPECT_EQ(request2, data);\n\n  // send response 2\n  ASSERT_TRUE(fake_upstream_connection2->write(response2));\n  tcp_client->waitForData(response2);\n\n  // send response 1\n  ASSERT_TRUE(fake_upstream_connection1->write(response1));\n  tcp_client->waitForData(response1, false);\n\n  tcp_client->close();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/tcp_dump.cc",
    "content": "#include \"test/integration/tcp_dump.h\"\n\n#include <sys/types.h>\n\n#include <csignal>\n#include <fstream>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n\nnamespace Envoy {\n\nTcpDump::TcpDump(const std::string& path, const std::string& iface,\n                 const std::vector<uint32_t>& ports) {\n#ifdef WIN32\n  ENVOY_LOG_MISC(debug, \"tcpdump not supported on windows\");\n#else\n  // Remove any extant pcap file.\n  ::unlink(path.c_str());\n  // Derive the port filter expression.\n  std::string port_expr;\n  for (uint32_t port : ports) {\n    if (!port_expr.empty()) {\n      port_expr += \" or \";\n    }\n    port_expr += \"tcp port \" + std::to_string(port);\n  }\n  ENVOY_LOG_MISC(debug, \"tcpdumping iface {} to {} with filter \\\"{}\\\"\", iface, path, port_expr);\n  // Fork a child process. We use explicit fork/wait over popen/pclose to gain\n  // the ability to send signals to the pid.\n  tcpdump_pid_ = ::fork();\n  RELEASE_ASSERT(tcpdump_pid_ >= 0, \"\");\n  // execlp in the child process.\n  if (tcpdump_pid_ == 0) {\n    const int rc = ::execlp(\"tcpdump\", \"tcpdump\", \"-i\", iface.c_str(), \"-w\", path.c_str(),\n                            \"--immediate-mode\", port_expr.c_str(), nullptr);\n    if (rc == -1) {\n      ::perror(\"tcpdump\");\n      exit(1);\n    }\n  }\n  // Wait in parent process until tcpdump is running and has created the pcap\n  // file.\n  while (true) {\n    std::ifstream test_file{path};\n    if (test_file.good()) {\n      break;\n    }\n    // If the child died unexpectedly, handle this.\n    int status;\n    int rc = ::waitpid(tcpdump_pid_, &status, WNOHANG);\n    RELEASE_ASSERT(rc != -1, \"\");\n    if (rc > 0) {\n      RELEASE_ASSERT(rc == tcpdump_pid_, \"\");\n      RELEASE_ASSERT(WIFEXITED(status), \"\");\n      RELEASE_ASSERT(WEXITSTATUS(status) == 1, \"\");\n      ENVOY_LOG_MISC(debug, \"tcpdump exited abnormally\");\n      tcpdump_pid_ = 0;\n      break;\n    }\n    // Give 50ms sleep.\n    ::usleep(50000);\n  }\n#endif\n}\n\nTcpDump::~TcpDump() {\n#ifndef WIN32\n  if (tcpdump_pid_ > 0) {\n    RELEASE_ASSERT(::kill(tcpdump_pid_, SIGINT) == 0, \"\");\n    int status;\n    RELEASE_ASSERT(::waitpid(tcpdump_pid_, &status, 0) != -1, \"\");\n    RELEASE_ASSERT(WIFEXITED(status), \"\");\n    RELEASE_ASSERT(WEXITSTATUS(status) == 0, \"\");\n    ENVOY_LOG_MISC(debug, \"tcpdump terminated\");\n  }\n#endif\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/tcp_dump.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n#include <vector>\n\nnamespace Envoy {\n\n// RAII tcpdump wrapper for tests. It's expected that tcpdump is in the path.\nclass TcpDump {\npublic:\n  // Start tcpdump on a given interface, to a given file on a set of given\n  // ports.\n  TcpDump(const std::string& path, const std::string& iface, const std::vector<uint32_t>& ports);\n  // Stop tcpdump and wait for child process termination.\n  ~TcpDump();\n\nprivate:\n  int tcpdump_pid_;\n};\n\nusing TcpDumpPtr = std::unique_ptr<TcpDump>;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/tcp_proxy_integration_test.cc",
    "content": "#include \"test/integration/tcp_proxy_integration_test.h\"\n\n#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.pb.h\"\n#include \"envoy/extensions/access_loggers/file/v3/file.pb.h\"\n#include \"envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h\"\n\n#include \"common/config/api_version.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/filters/network/common/factory_base.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/integration/ssl_utility.h\"\n#include \"test/integration/tcp_proxy_integration_test.pb.h\"\n#include \"test/integration/tcp_proxy_integration_test.pb.validate.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/registry.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::MatchesRegex;\nusing testing::NiceMock;\n\nnamespace Envoy {\n\nstd::vector<TcpProxyIntegrationTestParams> getProtocolTestParams() {\n  std::vector<TcpProxyIntegrationTestParams> ret;\n\n  for (auto ip_version : TestEnvironment::getIpVersionsForTest()) {\n    ret.push_back(TcpProxyIntegrationTestParams{ip_version, true});\n    ret.push_back(TcpProxyIntegrationTestParams{ip_version, false});\n  }\n  return ret;\n}\n\nstd::string\nprotocolTestParamsToString(const ::testing::TestParamInfo<TcpProxyIntegrationTestParams>& params) {\n  return absl::StrCat(\n      (params.param.version == Network::Address::IpVersion::v4 ? \"IPv4_\" : \"IPv6_\"),\n      (params.param.test_original_version == true ? \"OriginalConnPool\" : \"NewConnPool\"));\n}\n\nvoid TcpProxyIntegrationTest::initialize() {\n  if (GetParam().test_original_version) {\n    config_helper_.addRuntimeOverride(\"envoy.reloadable_features.new_tcp_connection_pool\", \"false\");\n  } else {\n    config_helper_.addRuntimeOverride(\"envoy.reloadable_features.new_tcp_connection_pool\", \"true\");\n  }\n\n  config_helper_.renameListener(\"tcp_proxy\");\n  BaseIntegrationTest::initialize();\n}\n\nINSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxyIntegrationTest,\n                         testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString);\n\n// Test upstream writing before downstream downstream does.\nTEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) {\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"hello\"));\n  tcp_client->waitForData(\"hello\");\n  // Make sure inexact matches work also on data already received.\n  tcp_client->waitForData(\"ello\", false);\n\n  // Make sure length based wait works for the data already received\n  ASSERT_TRUE(tcp_client->waitForData(5));\n  ASSERT_TRUE(tcp_client->waitForData(4));\n\n  // Drain part of the received message\n  tcp_client->clearData(2);\n  tcp_client->waitForData(\"llo\");\n  ASSERT_TRUE(tcp_client->waitForData(3));\n\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5));\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n  tcp_client->waitForHalfClose();\n  ASSERT_TRUE(tcp_client->write(\"\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n}\n\n// Test proxying data in both directions, and that all data is flushed properly\n// when there is an upstream disconnect.\nTEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamDisconnect) {\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5));\n  ASSERT_TRUE(fake_upstream_connection->write(\"world\"));\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->waitForHalfClose();\n  tcp_client->close();\n\n  EXPECT_EQ(\"world\", tcp_client->data());\n}\n\n// Test proxying data in both directions, and that all data is flushed properly\n// when the client disconnects.\nTEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamDisconnect) {\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5));\n  ASSERT_TRUE(fake_upstream_connection->write(\"world\"));\n  tcp_client->waitForData(\"world\");\n  ASSERT_TRUE(tcp_client->write(\"hello\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(10));\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->waitForDisconnect();\n}\n\nTEST_P(TcpProxyIntegrationTest, NoUpstream) {\n  // Set the first upstream to have an invalid port, so connection will fail,\n  // but it won't fail synchronously (as it would if there were simply no\n  // upstreams)\n  fake_upstreams_count_ = 0;\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n    auto* lb_endpoint =\n        cluster->mutable_load_assignment()->mutable_endpoints(0)->mutable_lb_endpoints(0);\n    lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(1);\n  });\n  config_helper_.skipPortUsageValidation();\n  enable_half_close_ = false;\n  initialize();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  tcp_client->waitForDisconnect();\n}\n\nTEST_P(TcpProxyIntegrationTest, TcpProxyLargeWrite) {\n  config_helper_.setBufferLimits(1024, 1024);\n  initialize();\n\n  std::string data(1024 * 16, 'a');\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(tcp_client->write(data));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(data.size()));\n  ASSERT_TRUE(fake_upstream_connection->write(data));\n  tcp_client->waitForData(data);\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  uint32_t upstream_pauses =\n      test_server_->counter(\"cluster.cluster_0.upstream_flow_control_paused_reading_total\")\n          ->value();\n  uint32_t upstream_resumes =\n      test_server_->counter(\"cluster.cluster_0.upstream_flow_control_resumed_reading_total\")\n          ->value();\n  EXPECT_EQ(upstream_pauses, upstream_resumes);\n\n  uint32_t downstream_pauses =\n      test_server_->counter(\"tcp.tcp_stats.downstream_flow_control_paused_reading_total\")->value();\n  uint32_t downstream_resumes =\n      test_server_->counter(\"tcp.tcp_stats.downstream_flow_control_resumed_reading_total\")->value();\n  EXPECT_EQ(downstream_pauses, downstream_resumes);\n}\n\n// Test that a downstream flush works correctly (all data is flushed)\nTEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamFlush) {\n  // Use a very large size to make sure it is larger than the kernel socket read buffer.\n  const uint32_t size = 50 * 1024 * 1024;\n  config_helper_.setBufferLimits(size / 4, size / 4);\n  initialize();\n\n  std::string data(size, 'a');\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  tcp_client->readDisable(true);\n  ASSERT_TRUE(tcp_client->write(\"\", true));\n\n  // This ensures that readDisable(true) has been run on it's thread\n  // before tcp_client starts writing.\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n\n  ASSERT_TRUE(fake_upstream_connection->write(data, true));\n\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_flow_control_paused_reading_total\", 1);\n  EXPECT_EQ(test_server_->counter(\"cluster.cluster_0.upstream_flow_control_resumed_reading_total\")\n                ->value(),\n            0);\n  tcp_client->readDisable(false);\n  tcp_client->waitForData(data);\n  tcp_client->waitForHalfClose();\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n\n  uint32_t upstream_pauses =\n      test_server_->counter(\"cluster.cluster_0.upstream_flow_control_paused_reading_total\")\n          ->value();\n  uint32_t upstream_resumes =\n      test_server_->counter(\"cluster.cluster_0.upstream_flow_control_resumed_reading_total\")\n          ->value();\n  EXPECT_GE(upstream_pauses, upstream_resumes);\n  EXPECT_GT(upstream_resumes, 0);\n}\n\n// Test that an upstream flush works correctly (all data is flushed)\nTEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlush) {\n  // Use a very large size to make sure it is larger than the kernel socket read buffer.\n  const uint32_t size = 50 * 1024 * 1024;\n  config_helper_.setBufferLimits(size, size);\n  initialize();\n\n  std::string data(size, 'a');\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->readDisable(true));\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n\n  // This ensures that fake_upstream_connection->readDisable has been run on it's thread\n  // before tcp_client starts writing.\n  tcp_client->waitForHalfClose();\n\n  ASSERT_TRUE(tcp_client->write(data, true, true, std::chrono::milliseconds(30000)));\n\n  test_server_->waitForGaugeEq(\"tcp.tcp_stats.upstream_flush_active\", 1);\n  ASSERT_TRUE(fake_upstream_connection->readDisable(false));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(data.size()));\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->waitForHalfClose();\n\n  EXPECT_EQ(test_server_->counter(\"tcp.tcp_stats.upstream_flush_total\")->value(), 1);\n  test_server_->waitForGaugeEq(\"tcp.tcp_stats.upstream_flush_active\", 0);\n}\n\n// Test that Envoy doesn't crash or assert when shutting down with an upstream flush active\nTEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlushEnvoyExit) {\n  // Use a very large size to make sure it is larger than the kernel socket read buffer.\n  const uint32_t size = 50 * 1024 * 1024;\n  config_helper_.setBufferLimits(size, size);\n  initialize();\n\n  std::string data(size, 'a');\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->readDisable(true));\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n\n  // This ensures that fake_upstream_connection->readDisable has been run on it's thread\n  // before tcp_client starts writing.\n  tcp_client->waitForHalfClose();\n\n  ASSERT_TRUE(tcp_client->write(data, true));\n\n  test_server_->waitForGaugeEq(\"tcp.tcp_stats.upstream_flush_active\", 1);\n  test_server_.reset();\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  // Success criteria is that no ASSERTs fire and there are no leaks.\n}\n\nTEST_P(TcpProxyIntegrationTest, AccessLog) {\n  std::string access_log_path = TestEnvironment::temporaryPath(\n      fmt::format(\"access_log{}.txt\", version_ == Network::Address::IpVersion::v4 ? \"v4\" : \"v6\"));\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    auto* filter_chain = listener->mutable_filter_chains(0);\n    auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config();\n\n    ASSERT_TRUE(\n        config_blob->Is<API_NO_BOOST(envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>());\n    auto tcp_proxy_config = MessageUtil::anyConvert<API_NO_BOOST(\n        envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>(*config_blob);\n\n    auto* access_log = tcp_proxy_config.add_access_log();\n    access_log->set_name(\"accesslog\");\n    envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config;\n    access_log_config.set_path(access_log_path);\n    access_log_config.mutable_log_format()->set_text_format(\n        \"upstreamlocal=%UPSTREAM_LOCAL_ADDRESS% \"\n        \"upstreamhost=%UPSTREAM_HOST% downstream=%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% \"\n        \"sent=%BYTES_SENT% received=%BYTES_RECEIVED%\\n\");\n    access_log->mutable_typed_config()->PackFrom(access_log_config);\n    auto* runtime_filter = access_log->mutable_filter()->mutable_runtime_filter();\n    runtime_filter->set_runtime_key(\"unused-key\");\n    auto* percent_sampled = runtime_filter->mutable_percent_sampled();\n    percent_sampled->set_numerator(100);\n    percent_sampled->set_denominator(\n        envoy::type::FractionalPercent::DenominatorType::FractionalPercent_DenominatorType_HUNDRED);\n    config_blob->PackFrom(tcp_proxy_config);\n  });\n  initialize();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"hello\"));\n  tcp_client->waitForData(\"hello\");\n\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n  tcp_client->waitForHalfClose();\n  ASSERT_TRUE(tcp_client->write(\"\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n\n  std::string log_result;\n  // Access logs only get flushed to disk periodically, so poll until the log is non-empty\n  do {\n    log_result = api_->fileSystem().fileReadToEnd(access_log_path);\n  } while (log_result.empty());\n\n  // Regex matching localhost:port\n#ifndef GTEST_USES_SIMPLE_RE\n  const std::string ip_port_regex = (version_ == Network::Address::IpVersion::v4)\n                                        ? R\"EOF(127\\.0\\.0\\.1:[0-9]+)EOF\"\n                                        : R\"EOF(\\[::1\\]:[0-9]+)EOF\";\n#else\n  const std::string ip_port_regex = (version_ == Network::Address::IpVersion::v4)\n                                        ? R\"EOF(127\\.0\\.0\\.1:\\d+)EOF\"\n                                        : R\"EOF(\\[::1\\]:\\d+)EOF\";\n#endif\n\n  const std::string ip_regex =\n      (version_ == Network::Address::IpVersion::v4) ? R\"EOF(127\\.0\\.0\\.1)EOF\" : R\"EOF(::1)EOF\";\n\n  // Test that all three addresses were populated correctly. Only check the first line of\n  // log output for simplicity.\n  EXPECT_THAT(log_result,\n              MatchesRegex(fmt::format(\n                  \"upstreamlocal={0} upstreamhost={0} downstream={1} sent=5 received=0\\r?\\n.*\",\n                  ip_port_regex, ip_regex)));\n}\n\n// Test that the server shuts down without crashing when connections are open.\nTEST_P(TcpProxyIntegrationTest, ShutdownWithOpenConnections) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    for (int i = 0; i < static_resources->clusters_size(); ++i) {\n      auto* cluster = static_resources->mutable_clusters(i);\n      cluster->set_close_connections_on_host_health_failure(true);\n    }\n  });\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5));\n  ASSERT_TRUE(fake_upstream_connection->write(\"world\"));\n  tcp_client->waitForData(\"world\");\n  ASSERT_TRUE(tcp_client->write(\"hello\", false));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(10));\n  test_server_.reset();\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->waitForHalfClose();\n  tcp_client->close();\n\n  // Success criteria is that no ASSERTs fire and there are no leaks.\n}\n\nTEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithNoData) {\n  autonomous_upstream_ = true;\n\n  enable_half_close_ = false;\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    auto* filter_chain = listener->mutable_filter_chains(0);\n    auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config();\n\n    ASSERT_TRUE(\n        config_blob->Is<API_NO_BOOST(envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>());\n    auto tcp_proxy_config = MessageUtil::anyConvert<API_NO_BOOST(\n        envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>(*config_blob);\n    tcp_proxy_config.mutable_idle_timeout()->set_nanos(\n        std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::milliseconds(100))\n            .count());\n    config_blob->PackFrom(tcp_proxy_config);\n  });\n\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  tcp_client->waitForDisconnect();\n}\n\nTEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithLargeOutstandingData) {\n  config_helper_.setBufferLimits(1024, 1024);\n  enable_half_close_ = false;\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    auto* filter_chain = listener->mutable_filter_chains(0);\n    auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config();\n\n    ASSERT_TRUE(\n        config_blob->Is<API_NO_BOOST(envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>());\n    auto tcp_proxy_config = MessageUtil::anyConvert<API_NO_BOOST(\n        envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>(*config_blob);\n    tcp_proxy_config.mutable_idle_timeout()->set_nanos(\n        std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::milliseconds(500))\n            .count());\n    config_blob->PackFrom(tcp_proxy_config);\n  });\n\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  std::string data(1024 * 16, 'a');\n  ASSERT_TRUE(tcp_client->write(data));\n  ASSERT_TRUE(fake_upstream_connection->write(data));\n\n  tcp_client->waitForDisconnect();\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n}\n\nTEST_P(TcpProxyIntegrationTest, TestMaxDownstreamConnectionDurationWithNoData) {\n  autonomous_upstream_ = true;\n\n  enable_half_close_ = false;\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    auto* filter_chain = listener->mutable_filter_chains(0);\n    auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config();\n\n    ASSERT_TRUE(\n        config_blob->Is<API_NO_BOOST(envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>());\n    auto tcp_proxy_config =\n        MessageUtil::anyConvert<envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy>(\n            *config_blob);\n    tcp_proxy_config.mutable_max_downstream_connection_duration()->set_nanos(\n        std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::milliseconds(100))\n            .count());\n    config_blob->PackFrom(tcp_proxy_config);\n  });\n\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  tcp_client->waitForDisconnect();\n}\n\nTEST_P(TcpProxyIntegrationTest, TestMaxDownstreamConnectionDurationWithLargeOutstandingData) {\n  config_helper_.setBufferLimits(1024, 1024);\n  enable_half_close_ = false;\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    auto* filter_chain = listener->mutable_filter_chains(0);\n    auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config();\n\n    ASSERT_TRUE(\n        config_blob->Is<API_NO_BOOST(envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>());\n    auto tcp_proxy_config =\n        MessageUtil::anyConvert<envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy>(\n            *config_blob);\n    tcp_proxy_config.mutable_max_downstream_connection_duration()->set_nanos(\n        std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::milliseconds(500))\n            .count());\n    config_blob->PackFrom(tcp_proxy_config);\n  });\n\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n\n  std::string data(1024 * 16, 'a');\n  ASSERT_TRUE(tcp_client->write(data));\n  ASSERT_TRUE(fake_upstream_connection->write(data));\n\n  tcp_client->waitForDisconnect();\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n}\n\nTEST_P(TcpProxyIntegrationTest, TestNoCloseOnHealthFailure) {\n  concurrency_ = 2;\n\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    for (int i = 0; i < static_resources->clusters_size(); ++i) {\n      auto* cluster = static_resources->mutable_clusters(i);\n      cluster->set_close_connections_on_host_health_failure(false);\n      cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0);\n      cluster->add_health_checks()->mutable_timeout()->set_seconds(20);\n      cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true);\n      cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1);\n      cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1);\n      cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1);\n      cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1);\n      cluster->mutable_health_checks(0)->mutable_tcp_health_check();\n      cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text(\n          \"50696E67\");\n      cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text(\n          \"506F6E67\");\n    }\n  });\n\n  FakeRawConnectionPtr fake_upstream_health_connection;\n  on_server_init_function_ = [&](void) -> void {\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection));\n    ASSERT_TRUE(fake_upstream_health_connection->waitForData(\n        FakeRawConnection::waitForInexactMatch(\"Ping\")));\n    ASSERT_TRUE(fake_upstream_health_connection->write(\"Pong\"));\n  };\n\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5));\n  ASSERT_TRUE(fake_upstream_connection->write(\"world\"));\n  tcp_client->waitForData(\"world\");\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(10));\n\n  ASSERT_TRUE(fake_upstream_health_connection->waitForData(8));\n  ASSERT_TRUE(fake_upstream_health_connection->close());\n  ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect());\n\n  // By waiting we know the previous health check attempt completed (with a failure since we closed\n  // the connection on it)\n  FakeRawConnectionPtr fake_upstream_health_connection_reconnect;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection_reconnect));\n  ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForData(\n      FakeRawConnection::waitForInexactMatch(\"Ping\")));\n\n  ASSERT_TRUE(tcp_client->write(\"still\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(15));\n  ASSERT_TRUE(fake_upstream_connection->write(\"here\"));\n  tcp_client->waitForData(\"here\", false);\n\n  test_server_.reset();\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->close());\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_health_connection_reconnect->close());\n  ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForDisconnect());\n  tcp_client->waitForHalfClose();\n  tcp_client->close();\n}\n\nTEST_P(TcpProxyIntegrationTest, TestCloseOnHealthFailure) {\n  concurrency_ = 2;\n\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    for (int i = 0; i < static_resources->clusters_size(); ++i) {\n      auto* cluster = static_resources->mutable_clusters(i);\n      cluster->set_close_connections_on_host_health_failure(true);\n      cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0);\n      cluster->add_health_checks()->mutable_timeout()->set_seconds(20);\n      cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true);\n      cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1);\n      cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1);\n      cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1);\n      cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1);\n      cluster->mutable_health_checks(0)->mutable_tcp_health_check();\n      cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text(\n          \"50696E67\");\n      ;\n      cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text(\n          \"506F6E67\");\n    }\n  });\n\n  FakeRawConnectionPtr fake_upstream_health_connection;\n  on_server_init_function_ = [&](void) -> void {\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection));\n    ASSERT_TRUE(fake_upstream_health_connection->waitForData(4));\n    ASSERT_TRUE(fake_upstream_health_connection->write(\"Pong\"));\n  };\n\n  initialize();\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5));\n  ASSERT_TRUE(fake_upstream_connection->write(\"world\"));\n  tcp_client->waitForData(\"world\");\n  ASSERT_TRUE(tcp_client->write(\"hello\"));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(10));\n\n  ASSERT_TRUE(fake_upstream_health_connection->waitForData(8));\n  ASSERT_TRUE(fake_upstream_health_connection->close());\n  ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect());\n\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  tcp_client->waitForHalfClose();\n\n  ASSERT_TRUE(fake_upstream_connection->close());\n  tcp_client->close();\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n}\n\nclass TcpProxyMetadataMatchIntegrationTest : public TcpProxyIntegrationTest {\npublic:\n  TcpProxyMetadataMatchIntegrationTest(uint32_t tcp_proxy_filter_index = 0)\n      : tcp_proxy_filter_index_(tcp_proxy_filter_index) {}\n  void initialize() override;\n\n  void expectEndpointToMatchRoute(\n      std::function<std::string(IntegrationTcpClient&)> initial_data_cb = nullptr);\n  void expectEndpointNotToMatchRoute(const std::string& write_data = \"hello\");\n\n  envoy::config::core::v3::Metadata lbMetadata(std::map<std::string, std::string> values);\n\n  envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy_;\n  envoy::config::core::v3::Metadata endpoint_metadata_;\n  const uint32_t tcp_proxy_filter_index_;\n};\n\nenvoy::config::core::v3::Metadata\nTcpProxyMetadataMatchIntegrationTest::lbMetadata(std::map<std::string, std::string> values) {\n\n  ProtobufWkt::Struct map;\n  auto* mutable_fields = map.mutable_fields();\n  ProtobufWkt::Value value;\n\n  std::map<std::string, std::string>::iterator it;\n  for (it = values.begin(); it != values.end(); it++) {\n    value.set_string_value(it->second);\n    mutable_fields->insert({it->first, value});\n  }\n\n  envoy::config::core::v3::Metadata metadata;\n  (*metadata.mutable_filter_metadata())[Envoy::Config::MetadataFilters::get().ENVOY_LB] = map;\n  return metadata;\n}\n\nvoid TcpProxyMetadataMatchIntegrationTest::initialize() {\n\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n\n    ASSERT(static_resources->listeners_size() == 1);\n    static_resources->mutable_listeners(0)\n        ->mutable_filter_chains(0)\n        ->mutable_filters(tcp_proxy_filter_index_)\n        ->mutable_typed_config()\n        ->PackFrom(tcp_proxy_);\n\n    ASSERT(static_resources->clusters_size() == 1);\n    auto* cluster_0 = static_resources->mutable_clusters(0);\n    cluster_0->Clear();\n    cluster_0->set_name(\"cluster_0\");\n    cluster_0->set_type(envoy::config::cluster::v3::Cluster::STATIC);\n    cluster_0->set_lb_policy(envoy::config::cluster::v3::Cluster::ROUND_ROBIN);\n    auto* lb_subset_config = cluster_0->mutable_lb_subset_config();\n    lb_subset_config->set_fallback_policy(\n        envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK);\n    auto* subset_selector = lb_subset_config->add_subset_selectors();\n    subset_selector->add_keys(\"role\");\n    subset_selector->add_keys(\"version\");\n    subset_selector->add_keys(\"stage\");\n    auto* load_assignment = cluster_0->mutable_load_assignment();\n    load_assignment->set_cluster_name(\"cluster_0\");\n    auto* locality_lb_endpoints = load_assignment->add_endpoints();\n    auto* lb_endpoint = locality_lb_endpoints->add_lb_endpoints();\n    lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address(\n        Network::Test::getLoopbackAddressString(version_));\n    lb_endpoint->mutable_metadata()->MergeFrom(endpoint_metadata_);\n  });\n\n  TcpProxyIntegrationTest::initialize();\n}\n\n// Verifies successful connection.\nvoid TcpProxyMetadataMatchIntegrationTest::expectEndpointToMatchRoute(\n    std::function<std::string(IntegrationTcpClient&)> initial_data_cb) {\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  std::string expected_upstream_data;\n  if (initial_data_cb) {\n    expected_upstream_data = initial_data_cb(*tcp_client);\n  } else {\n    expected_upstream_data = \"hello\";\n    ASSERT_TRUE(tcp_client->write(expected_upstream_data));\n  }\n  FakeRawConnectionPtr fake_upstream_connection;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(expected_upstream_data.length()));\n  ASSERT_TRUE(fake_upstream_connection->write(\"world\"));\n  tcp_client->waitForData(\"world\");\n  ASSERT_TRUE(tcp_client->write(\"hello\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForData(5 + expected_upstream_data.length()));\n  ASSERT_TRUE(fake_upstream_connection->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection->write(\"\", true));\n  ASSERT_TRUE(fake_upstream_connection->waitForDisconnect());\n  tcp_client->waitForDisconnect();\n\n  test_server_->waitForCounterGe(\"cluster.cluster_0.lb_subsets_selected\", 1);\n}\n\n// Verifies connection failure.\nvoid TcpProxyMetadataMatchIntegrationTest::expectEndpointNotToMatchRoute(\n    const std::string& write_data) {\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(tcp_client->write(write_data, false, false));\n\n  // TODO(yskopets): 'tcp_client->waitForDisconnect();' gets stuck indefinitely on Linux builds,\n  // e.g. on 'envoy-linux (bazel compile_time_options)' and 'envoy-linux (bazel release)'\n  // tcp_client->waitForDisconnect();\n\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_cx_none_healthy\", 1);\n  test_server_->waitForCounterEq(\"cluster.cluster_0.lb_subsets_selected\", 0);\n\n  tcp_client->close();\n}\n\nINSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxyMetadataMatchIntegrationTest,\n                         testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString);\n\n// Test subset load balancing for a regular cluster when endpoint selector is defined at the top\n// level.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       EndpointShouldMatchSingleClusterWithTopLevelMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.set_cluster(\"cluster_0\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointToMatchRoute();\n}\n\n// Test subset load balancing for a deprecated_v1 route when endpoint selector is defined at the top\n// level.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       DEPRECATED_FEATURE_TEST(EndpointShouldMatchRouteWithTopLevelMetadataMatch)) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.set_cluster(\"fallback\");\n  tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster(\n      \"cluster_0\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  config_helper_.addRuntimeOverride(\"envoy.deprecated_features:envoy.extensions.filters.network.\"\n                                    \"tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1\",\n                                    \"true\");\n  initialize();\n\n  expectEndpointToMatchRoute();\n}\n\n// Test subset load balancing for a weighted cluster when endpoint selector is defined on a weighted\n// cluster.\nTEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldMatchWeightedClusterWithMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n  cluster_0->mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointToMatchRoute();\n}\n\n// Test subset load balancing for a weighted cluster when endpoint selector is defined both on a\n// weighted cluster and at the top level.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       EndpointShouldMatchWeightedClusterWithMetadataMatchAndTopLevelMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(lbMetadata({{\"version\", \"v1\"}, {\"stage\", \"dev\"}}));\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n  cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata(\n      {{\"role\", \"primary\"}, {\"stage\", \"prod\"}})); // should override `stage` value at top-level\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointToMatchRoute();\n}\n\n// Test subset load balancing for a weighted cluster when endpoint selector is defined at the top\n// level only.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       EndpointShouldMatchWeightedClusterWithTopLevelMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointToMatchRoute();\n}\n\n// Test subset load balancing for a regular cluster when endpoint selector is defined at the top\n// level.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       EndpointShouldNotMatchSingleClusterWithTopLevelMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.set_cluster(\"cluster_0\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"replica\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointNotToMatchRoute();\n}\n\n// Test subset load balancing for a deprecated_v1 route when endpoint selector is defined at the top\n// level.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       DEPRECATED_FEATURE_TEST(EndpointShouldNotMatchRouteWithTopLevelMetadataMatch)) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.set_cluster(\"fallback\");\n  tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster(\n      \"cluster_0\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"replica\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  config_helper_.addRuntimeOverride(\"envoy.deprecated_features:envoy.extensions.filters.network.\"\n                                    \"tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1\",\n                                    \"true\");\n  initialize();\n\n  expectEndpointNotToMatchRoute();\n}\n\n// Test subset load balancing for a weighted cluster when endpoint selector is defined on a weighted\n// cluster.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       EndpointShouldNotMatchWeightedClusterWithMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n  cluster_0->mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"replica\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointNotToMatchRoute();\n}\n\n// Test subset load balancing for a weighted cluster when endpoint selector is defined both on a\n// weighted cluster and at the top level.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       EndpointShouldNotMatchWeightedClusterWithMetadataMatchAndTopLevelMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(lbMetadata({{\"version\", \"v1\"}, {\"stage\", \"dev\"}}));\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n  cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata(\n      {{\"role\", \"primary\"}, {\"stage\", \"prod\"}})); // should override `stage` value at top-level\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"dev\"}});\n\n  initialize();\n\n  expectEndpointNotToMatchRoute();\n}\n\n// Test subset load balancing for a weighted cluster when endpoint selector is defined at the top\n// level only.\nTEST_P(TcpProxyMetadataMatchIntegrationTest,\n       EndpointShouldNotMatchWeightedClusterWithTopLevelMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n\n  endpoint_metadata_ = lbMetadata({{\"role\", \"replica\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointNotToMatchRoute();\n}\n\nclass InjectDynamicMetadata : public Network::ReadFilter {\npublic:\n  explicit InjectDynamicMetadata(const std::string& key) : key_(key) {}\n\n  Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n    if (!metadata_set_) {\n      // To allow testing a write that returns `StopIteration`, only proceed\n      // when more than 1 byte is received.\n      if (data.length() < 2) {\n        ASSERT(data.length() == 1);\n\n        // Echo data back to test can verify it was received.\n        Buffer::OwnedImpl copy(data);\n        read_callbacks_->connection().write(copy, false);\n        return Network::FilterStatus::StopIteration;\n      }\n\n      ProtobufWkt::Value val;\n      val.set_string_value(data.toString());\n\n      ProtobufWkt::Struct& map =\n          (*read_callbacks_->connection()\n                .streamInfo()\n                .dynamicMetadata()\n                .mutable_filter_metadata())[Envoy::Config::MetadataFilters::get().ENVOY_LB];\n      (*map.mutable_fields())[key_] = val;\n\n      // Put this back in the state that TcpProxy expects.\n      read_callbacks_->connection().readDisable(true);\n\n      metadata_set_ = true;\n    }\n    return Network::FilterStatus::Continue;\n  }\n\n  Network::FilterStatus onNewConnection() override {\n    // TcpProxy disables read; must re-enable so we can read headers.\n    read_callbacks_->connection().readDisable(false);\n\n    // Stop until we read the value and can set the metadata for TcpProxy.\n    return Network::FilterStatus::StopIteration;\n  }\n\n  void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override {\n    read_callbacks_ = &callbacks;\n  }\n\n  const std::string key_;\n  Network::ReadFilterCallbacks* read_callbacks_{};\n  bool metadata_set_{false};\n};\n\nclass InjectDynamicMetadataFactory : public Extensions::NetworkFilters::Common::FactoryBase<\n                                         test::integration::tcp_proxy::InjectDynamicMetadata> {\npublic:\n  InjectDynamicMetadataFactory() : FactoryBase(\"test.inject_dynamic_metadata\") {}\n\nprivate:\n  Network::FilterFactoryCb\n  createFilterFactoryFromProtoTyped(const test::integration::tcp_proxy::InjectDynamicMetadata& cfg,\n                                    Server::Configuration::FactoryContext&) override {\n    std::string key = cfg.key();\n    return [key = std::move(key)](Network::FilterManager& filter_manager) -> void {\n      filter_manager.addReadFilter(std::make_shared<InjectDynamicMetadata>(key));\n    };\n  }\n};\n\nclass TcpProxyDynamicMetadataMatchIntegrationTest : public TcpProxyMetadataMatchIntegrationTest {\npublic:\n  TcpProxyDynamicMetadataMatchIntegrationTest() : TcpProxyMetadataMatchIntegrationTest(1) {\n    config_helper_.addNetworkFilter(R\"EOF(\n      name: test.inject_dynamic_metadata\n      typed_config:\n        \"@type\": type.googleapis.com/test.integration.tcp_proxy.InjectDynamicMetadata\n        key: role\n)EOF\");\n  }\n\n  InjectDynamicMetadataFactory factory_;\n  Registry::InjectFactory<Server::Configuration::NamedNetworkFilterConfigFactory> register_factory_{\n      factory_};\n};\n\nINSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxyDynamicMetadataMatchIntegrationTest,\n                         testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString);\n\nTEST_P(TcpProxyDynamicMetadataMatchIntegrationTest, DynamicMetadataMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n\n  // Note: role isn't set here; it will be set in the dynamic metadata.\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointToMatchRoute([](IntegrationTcpClient& tcp_client) -> std::string {\n    // Break the write into two; validate that the first is received before sending the second. This\n    // validates that a downstream filter can use this functionality, even if it can't make a\n    // decision after the first `onData()`.\n    EXPECT_TRUE(tcp_client.write(\"p\", false));\n    tcp_client.waitForData(\"p\");\n    tcp_client.clearData();\n    EXPECT_TRUE(tcp_client.write(\"rimary\", false));\n    return \"primary\";\n  });\n}\n\nTEST_P(TcpProxyDynamicMetadataMatchIntegrationTest, DynamicMetadataNonMatch) {\n  tcp_proxy_.set_stat_prefix(\"tcp_stats\");\n\n  // Note: role isn't set here; it will be set in the dynamic metadata.\n  tcp_proxy_.mutable_metadata_match()->MergeFrom(\n      lbMetadata({{\"version\", \"v1\"}, {\"stage\", \"prod\"}}));\n  auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters();\n  cluster_0->set_name(\"cluster_0\");\n  cluster_0->set_weight(1);\n  endpoint_metadata_ = lbMetadata({{\"role\", \"primary\"}, {\"version\", \"v1\"}, {\"stage\", \"prod\"}});\n\n  initialize();\n\n  expectEndpointNotToMatchRoute(\"does_not_match_role_primary\");\n}\n\nINSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxySslIntegrationTest,\n                         testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString);\n\nvoid TcpProxySslIntegrationTest::initialize() {\n  config_helper_.addSslConfig();\n  TcpProxyIntegrationTest::initialize();\n\n  context_manager_ =\n      std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem());\n  payload_reader_ = std::make_shared<WaitForPayloadReader>(*dispatcher_);\n}\n\nvoid TcpProxySslIntegrationTest::setupConnections() {\n  initialize();\n\n  // Set up the mock buffer factory so the newly created SSL client will have a mock write\n  // buffer. This allows us to track the bytes actually written to the socket.\n\n  EXPECT_CALL(*mock_buffer_factory_, create_(_, _, _))\n      .Times(1)\n      .WillOnce(Invoke([&](std::function<void()> below_low, std::function<void()> above_high,\n                           std::function<void()> above_overflow) -> Buffer::Instance* {\n        client_write_buffer_ =\n            new NiceMock<MockWatermarkBuffer>(below_low, above_high, above_overflow);\n        ON_CALL(*client_write_buffer_, move(_))\n            .WillByDefault(Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove));\n        ON_CALL(*client_write_buffer_, drain(_))\n            .WillByDefault(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackDrains));\n        return client_write_buffer_;\n      }));\n  // Set up the SSL client.\n  Network::Address::InstanceConstSharedPtr address =\n      Ssl::getSslAddress(version_, lookupPort(\"tcp_proxy\"));\n  context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_);\n  ssl_client_ =\n      dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                          context_->createTransportSocket(nullptr), nullptr);\n\n  // Perform the SSL handshake. Loopback is allowlisted in tcp_proxy.json for the ssl_auth\n  // filter so there will be no pause waiting on auth data.\n  ssl_client_->addConnectionCallbacks(connect_callbacks_);\n  ssl_client_->enableHalfClose(true);\n  ssl_client_->addReadFilter(payload_reader_);\n  ssl_client_->connect();\n  while (!connect_callbacks_.connected()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  AssertionResult result = fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_);\n  RELEASE_ASSERT(result, result.message());\n}\n\n// Test proxying data in both directions with envoy doing TCP and TLS\n// termination.\nvoid TcpProxySslIntegrationTest::sendAndReceiveTlsData(const std::string& data_to_send_upstream,\n                                                       const std::string& data_to_send_downstream) {\n  // Ship some data upstream.\n  Buffer::OwnedImpl buffer(data_to_send_upstream);\n  ssl_client_->write(buffer, false);\n  while (client_write_buffer_->bytesDrained() != data_to_send_upstream.size()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  // Make sure the data makes it upstream.\n  ASSERT_TRUE(fake_upstream_connection_->waitForData(data_to_send_upstream.size()));\n\n  // Now send data downstream and make sure it arrives.\n  ASSERT_TRUE(fake_upstream_connection_->write(data_to_send_downstream));\n  payload_reader_->set_data_to_wait_for(data_to_send_downstream);\n  ssl_client_->dispatcher().run(Event::Dispatcher::RunType::Block);\n\n  // Clean up.\n  Buffer::OwnedImpl empty_buffer;\n  ssl_client_->write(empty_buffer, true);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  ASSERT_TRUE(fake_upstream_connection_->waitForHalfClose());\n  ASSERT_TRUE(fake_upstream_connection_->write(\"\", true));\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n  ssl_client_->dispatcher().run(Event::Dispatcher::RunType::Block);\n  EXPECT_TRUE(payload_reader_->readLastByte());\n  EXPECT_TRUE(connect_callbacks_.closed());\n}\n\nTEST_P(TcpProxySslIntegrationTest, SendTlsToTlsListener) {\n  setupConnections();\n  sendAndReceiveTlsData(\"hello\", \"world\");\n}\n\nTEST_P(TcpProxySslIntegrationTest, LargeBidirectionalTlsWrites) {\n  setupConnections();\n  std::string large_data(1024 * 8, 'a');\n  sendAndReceiveTlsData(large_data, large_data);\n}\n\n// Test that a half-close on the downstream side is proxied correctly.\nTEST_P(TcpProxySslIntegrationTest, DownstreamHalfClose) {\n  setupConnections();\n\n  Buffer::OwnedImpl empty_buffer;\n  ssl_client_->write(empty_buffer, true);\n  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  ASSERT_TRUE(fake_upstream_connection_->waitForHalfClose());\n\n  const std::string data(\"data\");\n  ASSERT_TRUE(fake_upstream_connection_->write(data, false));\n  payload_reader_->set_data_to_wait_for(data);\n  ssl_client_->dispatcher().run(Event::Dispatcher::RunType::Block);\n  EXPECT_FALSE(payload_reader_->readLastByte());\n\n  ASSERT_TRUE(fake_upstream_connection_->write(\"\", true));\n  ssl_client_->dispatcher().run(Event::Dispatcher::RunType::Block);\n  EXPECT_TRUE(payload_reader_->readLastByte());\n}\n\n// Test that a half-close on the upstream side is proxied correctly.\nTEST_P(TcpProxySslIntegrationTest, UpstreamHalfClose) {\n  setupConnections();\n\n  ASSERT_TRUE(fake_upstream_connection_->write(\"\", true));\n  ssl_client_->dispatcher().run(Event::Dispatcher::RunType::Block);\n  EXPECT_TRUE(payload_reader_->readLastByte());\n  EXPECT_FALSE(connect_callbacks_.closed());\n\n  const std::string& val(\"data\");\n  Buffer::OwnedImpl buffer(val);\n  ssl_client_->write(buffer, false);\n  while (client_write_buffer_->bytesDrained() != val.size()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  ASSERT_TRUE(fake_upstream_connection_->waitForData(val.size()));\n\n  Buffer::OwnedImpl empty_buffer;\n  ssl_client_->write(empty_buffer, true);\n  while (!connect_callbacks_.closed()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  ASSERT_TRUE(fake_upstream_connection_->waitForHalfClose());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/tcp_proxy_integration_test.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"test/integration/integration.h\"\n#include \"test/mocks/secret/mocks.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nstruct TcpProxyIntegrationTestParams {\n  Network::Address::IpVersion version;\n  bool test_original_version;\n};\n\nclass TcpProxyIntegrationTest : public testing::TestWithParam<TcpProxyIntegrationTestParams>,\n                                public BaseIntegrationTest {\npublic:\n  TcpProxyIntegrationTest()\n      : BaseIntegrationTest(GetParam().version, ConfigHelper::tcpProxyConfig()) {\n    enable_half_close_ = true;\n  }\n\n  void initialize() override;\n};\n\nclass TcpProxySslIntegrationTest : public TcpProxyIntegrationTest {\npublic:\n  void initialize() override;\n  void setupConnections();\n  void sendAndReceiveTlsData(const std::string& data_to_send_upstream,\n                             const std::string& data_to_send_downstream);\n\n  std::unique_ptr<Ssl::ContextManager> context_manager_;\n  Network::TransportSocketFactoryPtr context_;\n  ConnectionStatusCallbacks connect_callbacks_;\n  MockWatermarkBuffer* client_write_buffer_;\n  std::shared_ptr<WaitForPayloadReader> payload_reader_;\n  testing::NiceMock<Secret::MockSecretManager> secret_manager_;\n  Network::ClientConnectionPtr ssl_client_;\n  FakeRawConnectionPtr fake_upstream_connection_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/tcp_proxy_integration_test.proto",
    "content": "syntax = \"proto3\";\n\npackage test.integration.tcp_proxy;\n\nmessage InjectDynamicMetadata {\n  string key = 1;\n}\n"
  },
  {
    "path": "test/integration/tcp_tunneling_integration_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.pb.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/http_protocol_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\n// Terminating CONNECT and sending raw TCP upstream.\nclass ConnectTerminationIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public HttpIntegrationTest {\npublic:\n  ConnectTerminationIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {\n    enable_half_close_ = true;\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          ConfigHelper::setConnectConfig(hcm, true);\n\n          if (enable_timeout_) {\n            hcm.mutable_stream_idle_timeout()->set_seconds(0);\n            hcm.mutable_stream_idle_timeout()->set_nanos(200 * 1000 * 1000);\n          }\n        });\n    HttpIntegrationTest::initialize();\n  }\n\n  void setUpConnection() {\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    auto encoder_decoder = codec_client_->startRequest(connect_headers_);\n    request_encoder_ = &encoder_decoder.first;\n    response_ = std::move(encoder_decoder.second);\n    ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_raw_upstream_connection_));\n    response_->waitForHeaders();\n  }\n\n  void sendBidirectionalData(const char* downstream_send_data = \"hello\",\n                             const char* upstream_received_data = \"hello\",\n                             const char* upstream_send_data = \"there!\",\n                             const char* downstream_received_data = \"there!\") {\n    // Send some data upstream.\n    codec_client_->sendData(*request_encoder_, downstream_send_data, false);\n    ASSERT_TRUE(fake_raw_upstream_connection_->waitForData(\n        FakeRawConnection::waitForInexactMatch(upstream_received_data)));\n\n    // Send some data downstream.\n    ASSERT_TRUE(fake_raw_upstream_connection_->write(upstream_send_data));\n    response_->waitForBodyData(strlen(downstream_received_data));\n    EXPECT_EQ(downstream_received_data, response_->body());\n  }\n\n  Http::TestRequestHeaderMapImpl connect_headers_{{\":method\", \"CONNECT\"},\n                                                  {\":path\", \"/\"},\n                                                  {\":protocol\", \"bytestream\"},\n                                                  {\":scheme\", \"https\"},\n                                                  {\":authority\", \"host:80\"}};\n  FakeRawConnectionPtr fake_raw_upstream_connection_;\n  IntegrationStreamDecoderPtr response_;\n  bool enable_timeout_{};\n};\n\nTEST_P(ConnectTerminationIntegrationTest, Basic) {\n  initialize();\n\n  setUpConnection();\n  sendBidirectionalData(\"hello\", \"hello\", \"there!\", \"there!\");\n  // Send a second set of data to make sure for example headers are only sent once.\n  sendBidirectionalData(\",bye\", \"hello,bye\", \"ack\", \"there!ack\");\n\n  // Send an end stream. This should result in half close upstream.\n  codec_client_->sendData(*request_encoder_, \"\", true);\n  ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose());\n\n  // Now send a FIN from upstream. This should result in clean shutdown downstream.\n  ASSERT_TRUE(fake_raw_upstream_connection_->close());\n  response_->waitForEndStream();\n  ASSERT_FALSE(response_->reset());\n}\n\nTEST_P(ConnectTerminationIntegrationTest, DownstreamClose) {\n  initialize();\n\n  setUpConnection();\n  sendBidirectionalData();\n\n  // Tear down by closing the client connection.\n  codec_client_->close();\n  ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose());\n}\n\nTEST_P(ConnectTerminationIntegrationTest, DownstreamReset) {\n  initialize();\n\n  setUpConnection();\n  sendBidirectionalData();\n\n  // Tear down by resetting the client stream.\n  codec_client_->sendReset(*request_encoder_);\n  ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose());\n}\n\nTEST_P(ConnectTerminationIntegrationTest, UpstreamClose) {\n  initialize();\n\n  setUpConnection();\n  sendBidirectionalData();\n\n  // Tear down by closing the upstream connection.\n  ASSERT_TRUE(fake_raw_upstream_connection_->close());\n  response_->waitForReset();\n}\n\nTEST_P(ConnectTerminationIntegrationTest, TestTimeout) {\n  enable_timeout_ = true;\n  initialize();\n\n  setUpConnection();\n\n  // Wait for the timeout to close the connection.\n  response_->waitForReset();\n  ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose());\n}\n\nTEST_P(ConnectTerminationIntegrationTest, BuggyHeaders) {\n  initialize();\n\n  // Sending a header-only request is probably buggy, but rather than having a\n  // special corner case it is treated as a regular half close.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  response_ = codec_client_->makeHeaderOnlyRequest(\n      Http::TestRequestHeaderMapImpl{{\":method\", \"CONNECT\"},\n                                     {\":path\", \"/\"},\n                                     {\":protocol\", \"bytestream\"},\n                                     {\":scheme\", \"https\"},\n                                     {\":authority\", \"host:80\"}});\n  // If the connection is established (created, set to half close, and then the\n  // FIN arrives), make sure the FIN arrives, and send a FIN from upstream.\n  if (fake_upstreams_[0]->waitForRawConnection(fake_raw_upstream_connection_) &&\n      fake_raw_upstream_connection_->connected()) {\n    ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose());\n    ASSERT_TRUE(fake_raw_upstream_connection_->close());\n  }\n\n  // Either with early close, or half close, the FIN from upstream should result\n  // in clean stream teardown.\n  response_->waitForEndStream();\n  ASSERT_FALSE(response_->reset());\n}\n\nTEST_P(ConnectTerminationIntegrationTest, BasicMaxStreamDuration) {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* static_resources = bootstrap.mutable_static_resources();\n    auto* cluster = static_resources->mutable_clusters(0);\n    auto* http_protocol_options = cluster->mutable_common_http_protocol_options();\n    http_protocol_options->mutable_max_stream_duration()->MergeFrom(\n        ProtobufUtil::TimeUtil::MillisecondsToDuration(1000));\n  });\n\n  initialize();\n  setUpConnection();\n  sendBidirectionalData();\n\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_rq_max_duration_reached\", 1);\n\n  if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  } else {\n    response_->waitForReset();\n    codec_client_->close();\n  }\n}\n\n// For this class, forward the CONNECT request upstream\nclass ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  void initialize() override {\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); });\n    HttpProtocolIntegrationTest::initialize();\n  }\n\n  Http::TestRequestHeaderMapImpl connect_headers_{{\":method\", \"CONNECT\"},\n                                                  {\":path\", \"/\"},\n                                                  {\":protocol\", \"bytestream\"},\n                                                  {\":scheme\", \"https\"},\n                                                  {\":authority\", \"host:80\"}};\n  IntegrationStreamDecoderPtr response_;\n};\n\nINSTANTIATE_TEST_SUITE_P(Protocols, ProxyingConnectIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nTEST_P(ProxyingConnectIntegrationTest, ProxyConnect) {\n  initialize();\n\n  // Send request headers.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  auto encoder_decoder = codec_client_->startRequest(connect_headers_);\n  request_encoder_ = &encoder_decoder.first;\n  response_ = std::move(encoder_decoder.second);\n\n  // Wait for them to arrive upstream.\n  AssertionResult result =\n      fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_);\n  RELEASE_ASSERT(result, result.message());\n  result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);\n  RELEASE_ASSERT(result, result.message());\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Method)->value(), \"CONNECT\");\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    EXPECT_TRUE(upstream_request_->headers().get(Http::Headers::get().Protocol) == nullptr);\n  } else {\n    EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Protocol)->value(),\n              \"bytestream\");\n  }\n\n  // Send response headers\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Wait for them to arrive downstream.\n  response_->waitForHeaders();\n  EXPECT_EQ(\"200\", response_->headers().getStatusValue());\n\n  // Make sure that even once the response has started, that data can continue to go upstream.\n  codec_client_->sendData(*request_encoder_, \"hello\", false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5));\n\n  // Also test upstream to downstream data.\n  upstream_request_->encodeData(12, false);\n  response_->waitForBodyData(12);\n\n  cleanupUpstreamAndDownstream();\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ConnectTerminationIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Tunneling downstream TCP over an upstream HTTP channel.\nclass TcpTunnelingIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                                    public HttpIntegrationTest {\npublic:\n  TcpTunnelingIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {}\n\n  void SetUp() override {\n    enable_half_close_ = true;\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP2);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n\n    config_helper_.addConfigModifier(\n        [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          envoy::config::filter::network::tcp_proxy::v2::TcpProxy proxy_config;\n          proxy_config.set_stat_prefix(\"tcp_stats\");\n          proxy_config.set_cluster(\"cluster_0\");\n          proxy_config.mutable_tunneling_config()->set_hostname(\"host.com\");\n\n          auto* listener = bootstrap.mutable_static_resources()->add_listeners();\n          listener->set_name(\"tcp_proxy\");\n          auto* socket_address = listener->mutable_address()->mutable_socket_address();\n          socket_address->set_address(Network::Test::getLoopbackAddressString(GetParam()));\n          socket_address->set_port_value(0);\n\n          auto* filter_chain = listener->add_filter_chains();\n          auto* filter = filter_chain->add_filters();\n          filter->mutable_typed_config()->PackFrom(proxy_config);\n          filter->set_name(\"envoy.filters.network.tcp_proxy\");\n        });\n  }\n};\n\nTEST_P(TcpTunnelingIntegrationTest, Basic) {\n  initialize();\n\n  // Start a connection, and verify the upgrade headers are received upstream.\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  // Send upgrade headers downstream, fully establishing the connection.\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Send some data from downstream to upstream, and make sure it goes through.\n  ASSERT_TRUE(tcp_client->write(\"hello\", false));\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5));\n\n  // Send data from upstream to downstream.\n  upstream_request_->encodeData(12, false);\n  ASSERT_TRUE(tcp_client->waitForData(12));\n\n  // Now send more data and close the TCP client. This should be treated as half close, so the data\n  // should go through.\n  ASSERT_TRUE(tcp_client->write(\"hello\", false));\n  tcp_client->close();\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // If the upstream now sends 'end stream' the connection is fully closed.\n  upstream_request_->encodeData(0, true);\n}\n\n// Validates that if the cluster is not configured with HTTP/2 we don't attempt\n// to tunnel the data.\nTEST_P(TcpTunnelingIntegrationTest, InvalidCluster) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    bootstrap.mutable_static_resources()\n        ->mutable_clusters()\n        ->Mutable(0)\n        ->clear_http2_protocol_options();\n  });\n  initialize();\n\n  // Start a connection and see it close immediately due to the invalid cluster.\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  tcp_client->waitForHalfClose();\n  tcp_client->close();\n}\n\nTEST_P(TcpTunnelingIntegrationTest, InvalidResponseHeaders) {\n  initialize();\n\n  // Start a connection, and verify the upgrade headers are received upstream.\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n\n  // Send invalid response_ headers, and verify that the client disconnects and\n  // upstream gets a stream reset.\n  default_response_headers_.setStatus(enumToInt(Http::Code::ServiceUnavailable));\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  ASSERT_TRUE(upstream_request_->waitForReset());\n\n  // The connection should be fully closed, but the client has no way of knowing\n  // that. Ensure the FIN is read and clean up state.\n  tcp_client->waitForHalfClose();\n  tcp_client->close();\n}\n\nTEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) {\n  initialize();\n\n  // Establish a connection.\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Send data in both directions.\n  ASSERT_TRUE(tcp_client->write(\"hello\", false));\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5));\n\n  // Send data from upstream to downstream with an end stream and make sure the data is received\n  // before the connection is half-closed.\n  upstream_request_->encodeData(12, true);\n  ASSERT_TRUE(tcp_client->waitForData(12));\n  tcp_client->waitForHalfClose();\n\n  // Attempt to send data upstream.\n  // should go through.\n  ASSERT_TRUE(tcp_client->write(\"hello\", false));\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5));\n\n  ASSERT_TRUE(tcp_client->write(\"hello\", true));\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n}\n\nTEST_P(TcpTunnelingIntegrationTest, ResetStreamTest) {\n  enable_half_close_ = false;\n  initialize();\n\n  // Establish a connection.\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Reset the stream.\n  upstream_request_->encodeResetStream();\n  tcp_client->waitForDisconnect();\n}\n\nTEST_P(TcpTunnelingIntegrationTest, TestIdletimeoutWithLargeOutstandingData) {\n  enable_half_close_ = false;\n  config_helper_.setBufferLimits(1024, 1024);\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(1);\n    auto* filter_chain = listener->mutable_filter_chains(0);\n    auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config();\n\n    ASSERT_TRUE(\n        config_blob->Is<API_NO_BOOST(envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>());\n    auto tcp_proxy_config = MessageUtil::anyConvert<API_NO_BOOST(\n        envoy::config::filter::network::tcp_proxy::v2::TcpProxy)>(*config_blob);\n    tcp_proxy_config.mutable_idle_timeout()->set_nanos(\n        std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::milliseconds(500))\n            .count());\n    config_blob->PackFrom(tcp_proxy_config);\n  });\n\n  initialize();\n\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  std::string data(1024 * 16, 'a');\n  ASSERT_TRUE(tcp_client->write(data));\n  upstream_request_->encodeData(data, false);\n\n  tcp_client->waitForDisconnect();\n  ASSERT_TRUE(upstream_request_->waitForReset());\n}\n\n// Test that a downstream flush works correctly (all data is flushed)\nTEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) {\n  // Use a very large size to make sure it is larger than the kernel socket read buffer.\n  const uint32_t size = 50 * 1024 * 1024;\n  config_helper_.setBufferLimits(size / 4, size / 4);\n  initialize();\n\n  std::string data(size, 'a');\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  tcp_client->readDisable(true);\n  ASSERT_TRUE(tcp_client->write(\"\", true));\n\n  // This ensures that readDisable(true) has been run on its thread\n  // before tcp_client starts writing.\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  upstream_request_->encodeData(data, true);\n\n  test_server_->waitForCounterGe(\"cluster.cluster_0.upstream_flow_control_paused_reading_total\", 1);\n  tcp_client->readDisable(false);\n  tcp_client->waitForData(data);\n  tcp_client->waitForHalfClose();\n}\n\n// Test that an upstream flush works correctly (all data is flushed)\nTEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) {\n  // Use a very large size to make sure it is larger than the kernel socket read buffer.\n  const uint32_t size = 50 * 1024 * 1024;\n  config_helper_.setBufferLimits(size, size);\n  initialize();\n\n  std::string data(size, 'a');\n  IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n  upstream_request_->readDisable(true);\n  upstream_request_->encodeData(\"hello\", false);\n\n  // This ensures that fake_upstream_connection->readDisable has been run on its thread\n  // before tcp_client starts writing.\n  ASSERT_TRUE(tcp_client->waitForData(5));\n\n  ASSERT_TRUE(tcp_client->write(data, true));\n\n  // Note that upstream_flush_active will *not* be incremented for the HTTP\n  // tunneling case. The data is already written to the stream, so no drainer\n  // is necessary.\n  upstream_request_->readDisable(false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, size));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n  upstream_request_->encodeData(\"world\", true);\n  tcp_client->waitForHalfClose();\n}\n\n// Test that h2 connection is reused.\nTEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) {\n  initialize();\n\n  // Establish a connection.\n  IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  // Send data in both directions.\n  ASSERT_TRUE(tcp_client1->write(\"hello1\", false));\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hello1\"));\n\n  // Send data from upstream to downstream with an end stream and make sure the data is received\n  // before the connection is half-closed.\n  upstream_request_->encodeData(\"world1\", true);\n  tcp_client1->waitForData(\"world1\");\n  tcp_client1->waitForHalfClose();\n  tcp_client1->close();\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n\n  // Establish a new connection.\n  IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort(\"tcp_proxy\"));\n\n  // The new CONNECT stream is established in the existing h2 connection.\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  upstream_request_->encodeHeaders(default_response_headers_, false);\n\n  ASSERT_TRUE(tcp_client2->write(\"hello2\", false));\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hello2\"));\n\n  // Send data from upstream to downstream with an end stream and make sure the data is received\n  // before the connection is half-closed.\n  upstream_request_->encodeData(\"world2\", true);\n  tcp_client2->waitForData(\"world2\");\n  tcp_client2->waitForHalfClose();\n  tcp_client2->close();\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, TcpTunnelingIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/test_host_predicate.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n\nnamespace Envoy {\n\n/**\n * A simple host predicate that will remember the first host it sees and reject\n * all other hosts.\n */\nclass TestHostPredicate : public Upstream::RetryHostPredicate {\n  bool shouldSelectAnotherHost(const Upstream::Host& candidate_host) override {\n    return !first_host_address_ || *first_host_address_ != candidate_host.address()->asString();\n  }\n\n  void onHostAttempted(Upstream::HostDescriptionConstSharedPtr attempted_host) override {\n    if (!first_host_address_) {\n      first_host_address_ = attempted_host->address()->asString();\n    }\n  }\n\n  absl::optional<std::string> first_host_address_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/test_host_predicate_config.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n\n#include \"test/integration/test_host_predicate.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nclass TestHostPredicateFactory : public Upstream::RetryHostPredicateFactory {\npublic:\n  std::string name() const override { return \"envoy.test_host_predicate\"; }\n\n  Upstream::RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&,\n                                                            uint32_t) override {\n    return std::make_shared<testing::NiceMock<TestHostPredicate>>();\n  }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/test_utility.sh",
    "content": "#!/bin/bash\n\n# Helper script for bash integration tests, intended to be source'd from the\n# _test.sh.\n#\n# This was borrowed from PageSpeed's system test infrastructure. Original source\n# link:\n#\n# https://github.com/apache/incubator-pagespeed-mod/blob/c7cc4f22c79ada8077be2a16afc376dc8f8bd2da/pagespeed/automatic/system_test_helpers.sh#L383\n\nCURRENT_TEST=\"NONE\"\nfunction start_test() {\n  CURRENT_TEST=\"$1\"\n  echo \"TEST: $CURRENT_TEST\"\n}\n\ncheck() {\n  echo \"     check\" \"$@\" ...\n  # see https://github.com/koalaman/shellcheck/issues/1679\n  # shellcheck disable=SC2119\n  \"$@\" || handle_failure\n}\n\nexport BACKGROUND_PID=\"?\"\nrun_in_background_saving_pid() {\n  echo \"     backgrounding:\" \"$@\" ...\n  \"$@\" &\n  export BACKGROUND_PID=\"$!\"\n}\n\n# By default, print a message like:\n#   failure at line 374\n#   FAIL\n# and then exit with return value 1.  If we expected this test to fail, log to\n# $EXPECTED_FAILURES and return without exiting.\n#\n# If the shell does not support the 'caller' builtin, skip the line number info.\n#\n# Assumes it's being called from a failure-reporting function and that the\n# actual failure the user is interested in is our caller's caller.  If it\n# weren't for this, fail and handle_failure could be the same.\n# shellcheck disable=SC2120\nhandle_failure() {\n  if [ $# -eq 1 ]; then\n    echo FAILed Input: \"$1\"\n  fi\n\n  # From http://stackoverflow.com/questions/685435/bash-stacktrace\n  # to avoid printing 'handle_failure' we start with 1 to skip get_stack caller\n  local i\n  local stack_size=${#FUNCNAME[@]}\n  for (( i=1; i<stack_size ; i++ )); do\n    local func=\"${FUNCNAME[$i]}\"\n    [ -z \"$func\" ] && func=MAIN\n    local line_number=\"${BASH_LINENO[(( i - 1 ))]}\"\n    local src=\"${BASH_SOURCE[$i]}\"\n    [ -z \"$src\" ] && src=non_file_source\n    echo \"${src}:${line_number}: $func\"\n  done\n\n  # Note: we print line number after \"failed input\" so that it doesn't get\n  # knocked out of the terminal buffer.\n  if type caller > /dev/null 2>&1 ; then\n    # \"caller 1\" is our caller's caller.\n    echo \"     failure at line $(caller 1 | sed 's/ .*//')\" 1>&2\n  fi\n  echo \"in '$CURRENT_TEST'\"\n  echo FAIL.\n  exit 1\n}\n\n# The heapchecker outputs some data to stderr on every execution.  This gets intermingled\n# with the output from --hot-restart-version, so disable the heap-checker for these runs.\ndisableHeapCheck () {\n  SAVED_HEAPCHECK=${HEAPCHECK}\n  unset HEAPCHECK\n}\n\nenableHeapCheck () {\n  HEAPCHECK=${SAVED_HEAPCHECK}\n}\n\n# Scrapes a stat value from an an admin port.\nscrape_stat() {\n  local ADMIN_ADDRESS=\"$1\"\n  local STAT_NAME=\"$2\"\n  curl -sg \"$ADMIN_ADDRESS\"/stats | grep \"^${STAT_NAME}: \" | cut -f2 -d\" \"\n}\n\nmilliseconds() {\n  local nanos seconds\n  nanos=$(date +%N | sed 's/^0*//')\n  seconds=$(date +%s)\n  echo $((1000*seconds + nanos/1000000))\n}\n\nwait_for_stat() {\n  local ADMIN_ADDRESS=\"$1\" \\\n\tSTAT_NAME=\"$2\" \\\n\tOP=\"$3\" \\\n\tVALUE=\"$4\" \\\n\tTIMEOUT_SEC=\"$5\" \\\n\tret=\"\" \\\n\tend_time end_time_ms start_time_ms stat\n  start_time_ms=$(milliseconds)\n  end_time=$((SECONDS + TIMEOUT_SEC))\n  while [ \"$ret\" = \"\" ]; do\n    stat=$(scrape_stat \"$ADMIN_ADDRESS\" \"$STAT_NAME\")\n    if test \"$stat\" \"$OP\" \"$VALUE\"; then\n      end_time_ms=$(milliseconds)\n      ret=\"success: $STAT_NAME reached $stat after $((end_time_ms - start_time_ms)) ms\"\n    elif [ \"$SECONDS\" -gt \"$end_time\" ]; then\n      ret=\"timeout: waiting $TIMEOUT_SEC seconds for $STAT_NAME=$stat to reach $VALUE\"\n    else\n      sleep 0.1\n    fi\n  done\n  echo \"$ret\"\n}\n\n[[ -z \"${ENVOY_BIN}\" ]] && ENVOY_BIN=\"${TEST_SRCDIR}/envoy/source/exe/envoy-static\"\n"
  },
  {
    "path": "test/integration/transport_socket_match_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/integration/autonomous_upstream.h\"\n#include \"test/integration/http_integration.h\"\n\n#include \"absl/strings/str_replace.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass TransportSockeMatchIntegrationTest : public testing::Test, public HttpIntegrationTest {\npublic:\n  TransportSockeMatchIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1,\n                            TestEnvironment::getIpVersionsForTest().front(),\n                            ConfigHelper::httpProxyConfig()) {\n    autonomous_upstream_ = true;\n    setUpstreamCount(num_hosts_);\n  }\n\n  void initialize() override {\n    config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* static_resources = bootstrap.mutable_static_resources();\n      auto* cluster = static_resources->mutable_clusters(0);\n      cluster->mutable_lb_subset_config()->add_subset_selectors()->add_keys(type_key_);\n      if (enable_transport_socket_match_) {\n        const std::string match_yaml = absl::StrFormat(\n            R\"EOF(\nname: \"tls_socket\"\nmatch:\n  mtlsReady: \"true\"\ntransport_socket:\n  name: tls\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\n    common_tls_context:\n      tls_certificates:\n      - certificate_chain: { filename: \"%s\" }\n        private_key: { filename: \"%s\" }\n )EOF\",\n            TestEnvironment::runfilesPath(\"test/config/integration/certs/clientcert.pem\"),\n            TestEnvironment::runfilesPath(\"test/config/integration/certs/clientkey.pem\"));\n        auto* transport_socket_match = cluster->add_transport_socket_matches();\n        TestUtility::loadFromYaml(match_yaml, *transport_socket_match);\n      }\n      // Setup the client Envoy TLS config.\n      cluster->clear_load_assignment();\n      auto* load_assignment = cluster->mutable_load_assignment();\n      load_assignment->set_cluster_name(cluster->name());\n      auto* endpoints = load_assignment->add_endpoints();\n      for (uint32_t i = 0; i < num_hosts_; i++) {\n        auto* lb_endpoint = endpoints->add_lb_endpoints();\n        // ConfigHelper will fill in ports later.\n        auto* endpoint = lb_endpoint->mutable_endpoint();\n        auto* addr = endpoint->mutable_address()->mutable_socket_address();\n        addr->set_address(Network::Test::getLoopbackAddressString(\n            TestEnvironment::getIpVersionsForTest().front()));\n        addr->set_port_value(0);\n        // Assign type metadata based on i.\n        auto* metadata = lb_endpoint->mutable_metadata();\n        Envoy::Config::Metadata::mutableMetadataValue(*metadata, \"envoy.lb\", type_key_)\n            .set_string_value((i % 2 == 0) ? \"a\" : \"b\");\n        if (isTLSUpstream(i)) {\n          Envoy::Config::Metadata::mutableMetadataValue(\n              *metadata, Envoy::Config::MetadataFilters::get().ENVOY_TRANSPORT_SOCKET_MATCH,\n              \"mtlsReady\")\n              .set_string_value(\"true\");\n        }\n      }\n    });\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) {\n          auto* vhost = hcm.mutable_route_config()->mutable_virtual_hosts(0);\n\n          // Report the host's type metadata and remote address on every response.\n          auto* resp_header = vhost->add_response_headers_to_add();\n          auto* header = resp_header->mutable_header();\n          header->set_key(host_type_header_);\n          header->set_value(\n              fmt::format(R\"EOF(%UPSTREAM_METADATA([\"envoy.lb\", \"{}\"])%)EOF\", type_key_));\n\n          resp_header = vhost->add_response_headers_to_add();\n          header = resp_header->mutable_header();\n          header->set_key(host_header_);\n          header->set_value(\"%UPSTREAM_REMOTE_ADDRESS%\");\n\n          // Create routes for x-type=a and x-type=b headers.\n          vhost->clear_routes();\n          configureRoute(vhost->add_routes(), \"a\");\n          configureRoute(vhost->add_routes(), \"b\");\n        });\n    HttpIntegrationTest::initialize();\n  }\n\n  void configureRoute(envoy::config::route::v3::Route* route, const std::string& host_type) {\n    auto* match = route->mutable_match();\n    match->set_prefix(\"/\");\n\n    // Match the x-type header against the given host_type (a/b).\n    auto* match_header = match->add_headers();\n    match_header->set_name(type_header_);\n    match_header->set_exact_match(host_type);\n\n    // Route to cluster_0, selecting metadata type=a or type=b.\n    auto* action = route->mutable_route();\n    action->set_cluster(\"cluster_0\");\n    auto* metadata_match = action->mutable_metadata_match();\n    Envoy::Config::Metadata::mutableMetadataValue(*metadata_match, \"envoy.lb\", type_key_)\n        .set_string_value(host_type);\n  };\n\n  bool isTLSUpstream(int index) { return index % 2 == 0; }\n\n  void createUpstreams() override {\n    for (uint32_t i = 0; i < fake_upstreams_count_; ++i) {\n      auto endpoint = upstream_address_fn_(i);\n      if (isTLSUpstream(i)) {\n        fake_upstreams_.emplace_back(new AutonomousUpstream(\n            HttpIntegrationTest::createUpstreamTlsContext(), endpoint->ip()->port(),\n            FakeHttpConnection::Type::HTTP1, endpoint->ip()->version(), timeSystem(), false));\n      } else {\n        fake_upstreams_.emplace_back(new AutonomousUpstream(\n            Network::Test::createRawBufferSocketFactory(), endpoint->ip()->port(),\n            FakeHttpConnection::Type::HTTP1, endpoint->ip()->version(), timeSystem(), false));\n      }\n    }\n  }\n\n  void SetUp() override {\n    setDownstreamProtocol(Http::CodecClient::Type::HTTP1);\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP1);\n  }\n\n  const uint32_t num_hosts_{2};\n  Http::TestRequestHeaderMapImpl type_a_request_headers_{{\":method\", \"GET\"},\n                                                         {\":path\", \"/test\"},\n                                                         {\":scheme\", \"http\"},\n                                                         {\":authority\", \"host\"},\n                                                         {\"x-type\", \"a\"}};\n  Http::TestRequestHeaderMapImpl type_b_request_headers_{{\":method\", \"GET\"},\n                                                         {\":path\", \"/test\"},\n                                                         {\":scheme\", \"http\"},\n                                                         {\":authority\", \"host\"},\n                                                         {\"x-type\", \"b\"}};\n  const std::string host_type_header_{\"x-host-type\"};\n  const std::string host_header_{\"x-host\"};\n  const std::string type_header_{\"x-type\"};\n  const std::string type_key_{\"type\"};\n  bool enable_transport_socket_match_{true};\n};\n\nTEST_F(TransportSockeMatchIntegrationTest, TlsAndPlaintextSucceed) {\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  for (int i = 0; i < 3; i++) {\n    IntegrationStreamDecoderPtr response =\n        codec_client_->makeHeaderOnlyRequest(type_a_request_headers_);\n    response->waitForEndStream();\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n    response = codec_client_->makeHeaderOnlyRequest(type_b_request_headers_);\n    response->waitForEndStream();\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n\nTEST_F(TransportSockeMatchIntegrationTest, TlsAndPlaintextFailsWithoutSocketMatch) {\n  enable_transport_socket_match_ = false;\n  initialize();\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n  for (int i = 0; i < 3; i++) {\n    IntegrationStreamDecoderPtr response =\n        codec_client_->makeHeaderOnlyRequest(type_a_request_headers_);\n    response->waitForEndStream();\n    EXPECT_EQ(\"503\", response->headers().getStatusValue());\n    response = codec_client_->makeHeaderOnlyRequest(type_b_request_headers_);\n    response->waitForEndStream();\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n  }\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/uds_integration_test.cc",
    "content": "#include \"uds_integration_test.h\"\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/test_common/network_utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\n#if defined(__linux__)\nINSTANTIATE_TEST_SUITE_P(\n    TestParameters, UdsUpstreamIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                     testing::Values(false, true)));\n#else\nINSTANTIATE_TEST_SUITE_P(\n    TestParameters, UdsUpstreamIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                     testing::Values(false)));\n#endif\n\nTEST_P(UdsUpstreamIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false);\n}\n\nTEST_P(UdsUpstreamIntegrationTest, RouterHeaderOnlyRequestAndResponse) {\n  testRouterHeaderOnlyRequestAndResponse();\n}\n\nTEST_P(UdsUpstreamIntegrationTest, RouterUpstreamDisconnectBeforeResponseComplete) {\n  testRouterUpstreamDisconnectBeforeResponseComplete();\n}\n\nTEST_P(UdsUpstreamIntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {\n  testRouterDownstreamDisconnectBeforeRequestComplete();\n}\n\nTEST_P(UdsUpstreamIntegrationTest, RouterDownstreamDisconnectBeforeResponseComplete) {\n  testRouterDownstreamDisconnectBeforeResponseComplete();\n}\n\n#if defined(__linux__)\nINSTANTIATE_TEST_SUITE_P(\n    TestParameters, UdsListenerIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                     testing::Values(false, true)));\n#else\nINSTANTIATE_TEST_SUITE_P(\n    TestParameters, UdsListenerIntegrationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                     testing::Values(false)));\n#endif\n\nvoid UdsListenerIntegrationTest::initialize() {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* admin_addr = bootstrap.mutable_admin()->mutable_address();\n    admin_addr->clear_socket_address();\n    admin_addr->mutable_pipe()->set_path(getAdminSocketName());\n\n    auto* listeners = bootstrap.mutable_static_resources()->mutable_listeners();\n    RELEASE_ASSERT(!listeners->empty(), \"\");\n    auto filter_chains = listeners->Get(0).filter_chains();\n    listeners->Clear();\n    auto* listener = listeners->Add();\n    listener->set_name(\"listener_0\");\n    listener->mutable_address()->mutable_pipe()->set_path(getListenerSocketName());\n    *(listener->mutable_filter_chains()) = filter_chains;\n  });\n  HttpIntegrationTest::initialize();\n}\n\nHttpIntegrationTest::ConnectionCreationFunction UdsListenerIntegrationTest::createConnectionFn() {\n  return [&]() -> Network::ClientConnectionPtr {\n    Network::ClientConnectionPtr conn(dispatcher_->createClientConnection(\n        Network::Utility::resolveUrl(fmt::format(\"unix://{}\", getListenerSocketName())),\n        Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(),\n        nullptr));\n    conn->enableHalfClose(enable_half_close_);\n    return conn;\n  };\n}\n\nTEST_P(UdsListenerIntegrationTest, TestPeerCredentials) {\n  fake_upstreams_count_ = 1;\n  initialize();\n  auto client_connection = createConnectionFn()();\n  codec_client_ = makeHttpConnection(std::move(client_connection));\n  Http::TestRequestHeaderMapImpl request_headers{\n      {\":method\", \"POST\"},    {\":path\", \"/test/long/url\"}, {\":scheme\", \"http\"},\n      {\":authority\", \"host\"}, {\"x-lyft-user-id\", \"123\"},   {\"x-forwarded-for\", \"10.0.0.1\"}};\n  auto response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  waitForNextUpstreamRequest(0);\n\n  auto credentials = codec_client_->connection()->unixSocketPeerCredentials();\n#ifndef SO_PEERCRED\n  EXPECT_EQ(credentials, absl::nullopt);\n#else\n  EXPECT_EQ(credentials->pid, getpid());\n  EXPECT_EQ(credentials->uid, getuid());\n  EXPECT_EQ(credentials->gid, getgid());\n#endif\n\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n\n  response->waitForEndStream();\n}\n\nTEST_P(UdsListenerIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) {\n  ConnectionCreationFunction creator = createConnectionFn();\n  testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator);\n}\n\nTEST_P(UdsListenerIntegrationTest, RouterHeaderOnlyRequestAndResponse) {\n  ConnectionCreationFunction creator = createConnectionFn();\n  testRouterHeaderOnlyRequestAndResponse(&creator);\n}\n\nTEST_P(UdsListenerIntegrationTest, RouterListenerDisconnectBeforeResponseComplete) {\n  ConnectionCreationFunction creator = createConnectionFn();\n  testRouterUpstreamDisconnectBeforeResponseComplete(&creator);\n}\n\nTEST_P(UdsListenerIntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {\n  ConnectionCreationFunction creator = createConnectionFn();\n  testRouterDownstreamDisconnectBeforeRequestComplete(&creator);\n}\n\n// TODO(htuch): This is disabled due to\n// https://github.com/envoyproxy/envoy/issues/2829.\nTEST_P(UdsListenerIntegrationTest, DISABLED_RouterDownstreamDisconnectBeforeResponseComplete) {\n  ConnectionCreationFunction creator = createConnectionFn();\n  testRouterDownstreamDisconnectBeforeResponseComplete(&creator);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/uds_integration_test.h",
    "content": "#pragma once\n\n#include <tuple>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/common/fmt.h\"\n#include \"common/http/codec_client.h\"\n\n#include \"test/integration/fake_upstream.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/server.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nclass UdsUpstreamIntegrationTest\n    : public testing::TestWithParam<std::tuple<Network::Address::IpVersion, bool>>,\n      public HttpIntegrationTest {\npublic:\n  UdsUpstreamIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, std::get<0>(GetParam())),\n        abstract_namespace_(std::get<1>(GetParam())) {}\n\n  void createUpstreams() override {\n    addFakeUpstream(TestEnvironment::unixDomainSocketPath(\"udstest.1.sock\", abstract_namespace_),\n                    FakeHttpConnection::Type::HTTP1);\n\n    config_helper_.addConfigModifier(\n        [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          auto* static_resources = bootstrap.mutable_static_resources();\n          for (int i = 0; i < static_resources->clusters_size(); ++i) {\n            auto* cluster = static_resources->mutable_clusters(i);\n            for (int j = 0; j < cluster->load_assignment().endpoints_size(); ++j) {\n              auto locality_lb = cluster->mutable_load_assignment()->mutable_endpoints(j);\n              for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) {\n                auto lb_endpoint = locality_lb->mutable_lb_endpoints(k);\n                if (lb_endpoint->endpoint().address().has_socket_address()) {\n                  auto* address = lb_endpoint->mutable_endpoint()->mutable_address();\n                  address->clear_socket_address();\n                  address->mutable_pipe()->set_path(\n                      TestEnvironment::unixDomainSocketPath(\"udstest.1.sock\", abstract_namespace_));\n                }\n              }\n            }\n          }\n        });\n  }\n\nprotected:\n  const bool abstract_namespace_;\n};\n\nclass UdsListenerIntegrationTest\n    : public testing::TestWithParam<std::tuple<Network::Address::IpVersion, bool>>,\n      public HttpIntegrationTest {\npublic:\n  UdsListenerIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, std::get<0>(GetParam())),\n        abstract_namespace_(std::get<1>(GetParam())) {}\n\n  void initialize() override;\n\n  std::string getAdminSocketName() {\n    return TestEnvironment::unixDomainSocketPath(\"admin.sock\", abstract_namespace_);\n  }\n\n  std::string getListenerSocketName() {\n    return TestEnvironment::unixDomainSocketPath(\"listener_0.sock\", abstract_namespace_);\n  }\n\nprotected:\n  HttpIntegrationTest::ConnectionCreationFunction createConnectionFn();\n\n  const bool abstract_namespace_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/utility.cc",
    "content": "#include \"utility.h\"\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/connection.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/network/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nvoid BufferingStreamDecoder::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) {\n  ASSERT(!complete_);\n  complete_ = end_stream;\n  headers_ = std::move(headers);\n  if (complete_) {\n    onComplete();\n  }\n}\n\nvoid BufferingStreamDecoder::decodeData(Buffer::Instance& data, bool end_stream) {\n  ASSERT(!complete_);\n  complete_ = end_stream;\n  body_.append(data.toString());\n  if (complete_) {\n    onComplete();\n  }\n}\n\nvoid BufferingStreamDecoder::decodeTrailers(Http::ResponseTrailerMapPtr&&) {\n  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;\n}\n\nvoid BufferingStreamDecoder::onComplete() {\n  ASSERT(complete_);\n  on_complete_cb_();\n}\n\nvoid BufferingStreamDecoder::onResetStream(Http::StreamResetReason, absl::string_view) {\n  ADD_FAILURE();\n}\n\nBufferingStreamDecoderPtr\nIntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPtr& addr,\n                                   const std::string& method, const std::string& url,\n                                   const std::string& body, Http::CodecClient::Type type,\n                                   const std::string& host, const std::string& content_type) {\n\n  NiceMock<Stats::MockIsolatedStatsStore> mock_stats_store;\n  NiceMock<Random::MockRandomGenerator> random;\n  Event::GlobalTimeSystem time_system;\n  NiceMock<Random::MockRandomGenerator> random_generator;\n  Api::Impl api(Thread::threadFactoryForTest(), mock_stats_store, time_system,\n                Filesystem::fileSystemForTest(), random_generator);\n  Event::DispatcherPtr dispatcher(api.allocateDispatcher(\"test_thread\"));\n  std::shared_ptr<Upstream::MockClusterInfo> cluster{new NiceMock<Upstream::MockClusterInfo>()};\n  Upstream::HostDescriptionConstSharedPtr host_description{\n      Upstream::makeTestHostDescription(cluster, \"tcp://127.0.0.1:80\")};\n  Http::CodecClientProd client(\n      type,\n      dispatcher->createClientConnection(addr, Network::Address::InstanceConstSharedPtr(),\n                                         Network::Test::createRawBufferSocket(), nullptr),\n      host_description, *dispatcher, random);\n  BufferingStreamDecoderPtr response(new BufferingStreamDecoder([&]() -> void {\n    client.close();\n    dispatcher->exit();\n  }));\n  Http::RequestEncoder& encoder = client.newStream(*response);\n  encoder.getStream().addCallbacks(*response);\n\n  Http::TestRequestHeaderMapImpl headers;\n  headers.setMethod(method);\n  headers.setPath(url);\n  headers.setHost(host);\n  headers.setReferenceScheme(Http::Headers::get().SchemeValues.Http);\n  if (!content_type.empty()) {\n    headers.setContentType(content_type);\n  }\n  encoder.encodeHeaders(headers, body.empty());\n  if (!body.empty()) {\n    Buffer::OwnedImpl body_buffer(body);\n    encoder.encodeData(body_buffer, true);\n  }\n\n  dispatcher->run(Event::Dispatcher::RunType::Block);\n  return response;\n}\n\nBufferingStreamDecoderPtr\nIntegrationUtil::makeSingleRequest(uint32_t port, const std::string& method, const std::string& url,\n                                   const std::string& body, Http::CodecClient::Type type,\n                                   Network::Address::IpVersion ip_version, const std::string& host,\n                                   const std::string& content_type) {\n  auto addr = Network::Utility::resolveUrl(\n      fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(ip_version), port));\n  return makeSingleRequest(addr, method, url, body, type, host, content_type);\n}\n\nRawConnectionDriver::RawConnectionDriver(uint32_t port, Buffer::Instance& initial_data,\n                                         ReadCallback data_callback,\n                                         Network::Address::IpVersion version,\n                                         Event::Dispatcher& dispatcher,\n                                         Network::TransportSocketPtr transport_socket)\n    : dispatcher_(dispatcher) {\n  api_ = Api::createApiForTest(stats_store_);\n  Event::GlobalTimeSystem time_system;\n  callbacks_ = std::make_unique<ConnectionCallbacks>();\n\n  if (transport_socket == nullptr) {\n    transport_socket = Network::Test::createRawBufferSocket();\n  }\n\n  client_ = dispatcher_.createClientConnection(\n      Network::Utility::resolveUrl(\n          fmt::format(\"tcp://{}:{}\", Network::Test::getLoopbackAddressUrlString(version), port)),\n      Network::Address::InstanceConstSharedPtr(), std::move(transport_socket), nullptr);\n  client_->addConnectionCallbacks(*callbacks_);\n  client_->addReadFilter(Network::ReadFilterSharedPtr{new ForwardingFilter(*this, data_callback)});\n  client_->write(initial_data, false);\n  client_->connect();\n}\n\nRawConnectionDriver::~RawConnectionDriver() = default;\n\nvoid RawConnectionDriver::waitForConnection() {\n  // TODO(mattklein123): Add a timeout and switch to events and waitFor().\n  while (!callbacks_->connected() && !callbacks_->closed()) {\n    Event::GlobalTimeSystem().timeSystem().realSleepDoNotUseWithoutScrutiny(\n        std::chrono::milliseconds(10));\n    dispatcher_.run(Event::Dispatcher::RunType::NonBlock);\n  }\n}\n\nvoid RawConnectionDriver::run(Event::Dispatcher::RunType run_type) { dispatcher_.run(run_type); }\n\nvoid RawConnectionDriver::close() { client_->close(Network::ConnectionCloseType::FlushWrite); }\n\nWaitForPayloadReader::WaitForPayloadReader(Event::Dispatcher& dispatcher)\n    : dispatcher_(dispatcher) {}\n\nNetwork::FilterStatus WaitForPayloadReader::onData(Buffer::Instance& data, bool end_stream) {\n  data_.append(data.toString());\n  data.drain(data.length());\n  read_end_stream_ = end_stream;\n  if ((!data_to_wait_for_.empty() && absl::StartsWith(data_, data_to_wait_for_)) ||\n      (exact_match_ == false && data_.find(data_to_wait_for_) != std::string::npos) || end_stream) {\n    data_to_wait_for_.clear();\n    dispatcher_.exit();\n  }\n\n  if (wait_for_length_ && data_.size() >= length_to_wait_for_) {\n    wait_for_length_ = false;\n    dispatcher_.exit();\n  }\n\n  return Network::FilterStatus::StopIteration;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/utility.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/header_map.h\"\n#include \"envoy/network/filter.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/codec_client.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n/**\n * A buffering response decoder used for testing.\n */\nclass BufferingStreamDecoder : public Http::ResponseDecoder, public Http::StreamCallbacks {\npublic:\n  BufferingStreamDecoder(std::function<void()> on_complete_cb) : on_complete_cb_(on_complete_cb) {}\n\n  bool complete() { return complete_; }\n  const Http::ResponseHeaderMap& headers() { return *headers_; }\n  const std::string& body() { return body_; }\n\n  // Http::StreamDecoder\n  void decodeData(Buffer::Instance&, bool end_stream) override;\n  void decodeMetadata(Http::MetadataMapPtr&&) override {}\n\n  // Http::ResponseDecoder\n  void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&&) override {}\n  void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override;\n  void decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) override;\n\n  // Http::StreamCallbacks\n  void onResetStream(Http::StreamResetReason reason,\n                     absl::string_view transport_failure_reason) override;\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\nprivate:\n  void onComplete();\n\n  Http::ResponseHeaderMapPtr headers_;\n  std::string body_;\n  bool complete_{};\n  std::function<void()> on_complete_cb_;\n};\n\nusing BufferingStreamDecoderPtr = std::unique_ptr<BufferingStreamDecoder>;\n\n/**\n * Basic driver for a raw connection.\n */\nclass RawConnectionDriver {\npublic:\n  using ReadCallback = std::function<void(Network::ClientConnection&, const Buffer::Instance&)>;\n\n  RawConnectionDriver(uint32_t port, Buffer::Instance& initial_data, ReadCallback data_callback,\n                      Network::Address::IpVersion version, Event::Dispatcher& dispatcher,\n                      Network::TransportSocketPtr transport_socket = nullptr);\n  ~RawConnectionDriver();\n  const Network::Connection& connection() { return *client_; }\n  void run(Event::Dispatcher::RunType run_type = Event::Dispatcher::RunType::Block);\n  void close();\n  Network::ConnectionEvent lastConnectionEvent() const {\n    return callbacks_->last_connection_event_;\n  }\n  // Wait until connected or closed().\n  void waitForConnection();\n\n  bool closed() { return callbacks_->closed(); }\n\nprivate:\n  struct ForwardingFilter : public Network::ReadFilterBaseImpl {\n    ForwardingFilter(RawConnectionDriver& parent, ReadCallback cb)\n        : parent_(parent), data_callback_(cb) {}\n\n    // Network::ReadFilter\n    Network::FilterStatus onData(Buffer::Instance& data, bool) override {\n      data_callback_(*parent_.client_, data);\n      data.drain(data.length());\n      return Network::FilterStatus::StopIteration;\n    }\n\n    RawConnectionDriver& parent_;\n    ReadCallback data_callback_;\n  };\n\n  struct ConnectionCallbacks : public Network::ConnectionCallbacks {\n\n    bool connected() const { return connected_; }\n    bool closed() const { return closed_; }\n\n    // Network::ConnectionCallbacks\n    void onEvent(Network::ConnectionEvent event) override {\n      last_connection_event_ = event;\n      closed_ |= (event == Network::ConnectionEvent::RemoteClose ||\n                  event == Network::ConnectionEvent::LocalClose);\n      connected_ |= (event == Network::ConnectionEvent::Connected);\n    }\n    void onAboveWriteBufferHighWatermark() override {}\n    void onBelowWriteBufferLowWatermark() override {}\n\n    Network::ConnectionEvent last_connection_event_;\n\n  private:\n    bool connected_{false};\n    bool closed_{false};\n  };\n\n  Stats::IsolatedStoreImpl stats_store_;\n  Api::ApiPtr api_;\n  Event::Dispatcher& dispatcher_;\n  std::unique_ptr<ConnectionCallbacks> callbacks_;\n  Network::ClientConnectionPtr client_;\n};\n\n/**\n * Utility routines for integration tests.\n */\nclass IntegrationUtil {\npublic:\n  /**\n   * Make a new connection, issues a request, and then disconnect when the request is complete.\n   * @param addr supplies the address to connect to.\n   * @param method supplies the request method.\n   * @param url supplies the request url.\n   * @param body supplies the optional request body to send.\n   * @param type supplies the codec to use for the request.\n   * @param host supplies the host header to use for the request.\n   * @param content_type supplies the content-type header to use for the request, if any.\n   * @return BufferingStreamDecoderPtr the complete request or a partial request if there was\n   *         remote early disconnection.\n   */\n  static BufferingStreamDecoderPtr\n  makeSingleRequest(const Network::Address::InstanceConstSharedPtr& addr, const std::string& method,\n                    const std::string& url, const std::string& body, Http::CodecClient::Type type,\n                    const std::string& host = \"host\", const std::string& content_type = \"\");\n\n  /**\n   * Make a new connection, issues a request, and then disconnect when the request is complete.\n   * @param port supplies the port to connect to on localhost.\n   * @param method supplies the request method.\n   * @param url supplies the request url.\n   * @param body supplies the optional request body to send.\n   * @param type supplies the codec to use for the request.\n   * @param version the IP address version of the client and server.\n   * @param host supplies the host header to use for the request.\n   * @param content_type supplies the content-type header to use for the request, if any.\n   * @return BufferingStreamDecoderPtr the complete request or a partial request if there was\n   *         remote early disconnection.\n   */\n  static BufferingStreamDecoderPtr\n  makeSingleRequest(uint32_t port, const std::string& method, const std::string& url,\n                    const std::string& body, Http::CodecClient::Type type,\n                    Network::Address::IpVersion ip_version, const std::string& host = \"host\",\n                    const std::string& content_type = \"\");\n};\n\n// A set of connection callbacks which tracks connection state.\nclass ConnectionStatusCallbacks : public Network::ConnectionCallbacks {\npublic:\n  bool connected() const { return connected_; }\n  bool closed() const { return closed_; }\n\n  // Network::ConnectionCallbacks\n  void onEvent(Network::ConnectionEvent event) override {\n    closed_ |= (event == Network::ConnectionEvent::RemoteClose ||\n                event == Network::ConnectionEvent::LocalClose);\n    connected_ |= (event == Network::ConnectionEvent::Connected);\n  }\n  void onAboveWriteBufferHighWatermark() override {}\n  void onBelowWriteBufferLowWatermark() override {}\n\nprivate:\n  bool connected_{false};\n  bool closed_{false};\n};\n\n// A read filter which waits for a given data then stops the dispatcher loop.\nclass WaitForPayloadReader : public Network::ReadFilterBaseImpl {\npublic:\n  WaitForPayloadReader(Event::Dispatcher& dispatcher);\n\n  // Network::ReadFilter\n  Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override;\n\n  void set_data_to_wait_for(const std::string& data, bool exact_match = true) {\n    data_to_wait_for_ = data;\n    exact_match_ = exact_match;\n  }\n\n  ABSL_MUST_USE_RESULT testing::AssertionResult waitForLength(size_t length,\n                                                              std::chrono::milliseconds timeout) {\n    ASSERT(!wait_for_length_);\n    length_to_wait_for_ = length;\n    wait_for_length_ = true;\n\n    Event::TimerPtr timeout_timer =\n        dispatcher_.createTimer([this]() -> void { dispatcher_.exit(); });\n    timeout_timer->enableTimer(timeout);\n\n    dispatcher_.run(Event::Dispatcher::RunType::Block);\n\n    if (timeout_timer->enabled()) {\n      timeout_timer->disableTimer();\n      return testing::AssertionSuccess();\n    }\n\n    length_to_wait_for_ = 0;\n    wait_for_length_ = false;\n    return testing::AssertionFailure() << \"Timed out waiting for \" << length << \" bytes of data\\n\";\n  }\n\n  const std::string& data() { return data_; }\n  bool readLastByte() { return read_end_stream_; }\n  void clearData(size_t count = std::string::npos) { data_.erase(0, count); }\n\nprivate:\n  Event::Dispatcher& dispatcher_;\n  std::string data_to_wait_for_;\n  std::string data_;\n  bool exact_match_{true};\n  bool read_end_stream_{};\n  size_t length_to_wait_for_{0};\n  bool wait_for_length_{false};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/version_integration_test.cc",
    "content": "#include \"test/integration/http_integration.h\"\n\nnamespace Envoy {\nnamespace {\n\n// Integration test for ingestion of configuration across API versions.\n// Currently we only have static tests, but there will also be xDS tests added\n// later.\nclass VersionIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                               public HttpIntegrationTest {\npublic:\n  VersionIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, VersionIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\n// Just IP tagging for now.\nconst char ExampleIpTaggingConfig[] = R\"EOF(\n    request_type: both\n    ip_tags:\n      - ip_tag_name: external_request\n        ip_list:\n          - {address_prefix: 1.2.3.4, prefix_len: 32}\n)EOF\";\n\n// envoy.filters.http.ip_tagging from v2 Struct config.\nTEST_P(VersionIntegrationTest, DEPRECATED_FEATURE_TEST(IpTaggingV2StaticStructConfig)) {\n  config_helper_.addFilter(absl::StrCat(R\"EOF(\n  name: envoy.filters.http.ip_tagging\n  config:\n  )EOF\",\n                                        ExampleIpTaggingConfig));\n\n  config_helper_.addRuntimeOverride(\n      \"envoy.deprecated_features:envoy.extensions.filters.network.\"\n      \"http_connection_manager.v3.HttpFilter.hidden_envoy_deprecated_config\",\n      \"true\");\n  initialize();\n}\n\n// envoy.filters.http.ip_tagging from v2 TypedStruct config.\nTEST_P(VersionIntegrationTest, IpTaggingV2StaticTypedStructConfig) {\n  config_helper_.addFilter(absl::StrCat(R\"EOF(\nname: ip_tagging\ntyped_config:\n  \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n  type_url: type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging\n  value:\n  )EOF\",\n                                        ExampleIpTaggingConfig));\n  initialize();\n}\n\n// envoy.filters.http.ip_tagging from v3 TypedStruct config.\nTEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedStructConfig) {\n  config_helper_.addFilter(absl::StrCat(R\"EOF(\nname: ip_tagging\ntyped_config:\n  \"@type\": type.googleapis.com/udpa.type.v1.TypedStruct\n  type_url: type.googleapis.com/envoy.extensions.filters.http.ip_tagging.v3.IPTagging\n  value:\n  )EOF\",\n                                        ExampleIpTaggingConfig));\n  initialize();\n}\n\n// envoy.filters.http.ip_tagging from v2 typed Any config.\nTEST_P(VersionIntegrationTest, IpTaggingV2StaticTypedConfig) {\n  config_helper_.addFilter(absl::StrCat(R\"EOF(\n  name: ip_tagging\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging\n  )EOF\",\n                                        ExampleIpTaggingConfig));\n  initialize();\n}\n\n// envoy.filters.http.ip_tagging from v3 typed Any config.\nTEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedConfig) {\n  config_helper_.addFilter(absl::StrCat(R\"EOF(\n  name: ip_tagging\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.extensions.filters.http.ip_tagging.v3.IPTagging\n  )EOF\",\n                                        ExampleIpTaggingConfig));\n  initialize();\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/vhds_integration_test.cc",
    "content": "#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/grpc/status.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::AssertionResult;\n\nnamespace Envoy {\nnamespace {\n\nconst std::string& config() {\n  CONSTRUCT_ON_FIRST_USE(std::string, fmt::format(R\"EOF(\nadmin:\n  access_log_path: {}\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 0\nstatic_resources:\n  clusters:\n  - name: xds_cluster\n    type: STATIC\n    http2_protocol_options: {{}}\n    load_assignment:\n      cluster_name: xds_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n  - name: my_service\n    type: STATIC\n    http2_protocol_options: {{}}\n    load_assignment:\n      cluster_name: my_service\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: 127.0.0.1\n                port_value: 0\n  listeners:\n  - name: http\n    address:\n      socket_address:\n        address: 127.0.0.1\n        port_value: 0\n    filter_chains:\n    - filters:\n      - name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: config_test\n          http_filters:\n          - name: envoy.filters.http.on_demand\n          - name: envoy.filters.http.router\n          codec_type: HTTP2\n          rds:\n            route_config_name: my_route\n            config_source:\n              api_config_source:\n                api_type: GRPC\n                grpc_services:\n                  envoy_grpc:\n                    cluster_name: xds_cluster\n)EOF\",\n                                                  Platform::null_device_path));\n}\n\n// TODO (dmitri-d) move config yaml into ConfigHelper\nconst char RdsWithoutVhdsConfig[] = R\"EOF(\nname: my_route\nvirtual_hosts:\n- name: vhost_rds1\n  domains: [\"vhost.rds.first\"]\n  routes:\n  - match: { prefix: \"/rdsone\" }\n    route: { cluster: my_service }\n)EOF\";\n\nconst char RdsConfig[] = R\"EOF(\nname: my_route\nvhds:\n  config_source:\n    api_config_source:\n      api_type: DELTA_GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n)EOF\";\n\nconst char RdsConfigWithVhosts[] = R\"EOF(\nname: my_route\nvirtual_hosts:\n- name: vhost_rds1\n  domains: [\"vhost.rds.first\"]\n  routes:\n  - match: { prefix: \"/rdsone\" }\n    route: { cluster: my_service }\nvhds:\n  config_source:\n    api_config_source:\n      api_type: DELTA_GRPC\n      grpc_services:\n        envoy_grpc:\n          cluster_name: xds_cluster\n)EOF\";\n\nconst std::string RouteConfigName = \"my_route\";\n\nconst char VhostTemplate[] = R\"EOF(\nname: {}\ndomains: [{}]\nroutes:\n- match: {{ prefix: \"/\" }}\n  route: {{ cluster: \"my_service\" }}\n)EOF\";\n\nclass VhdsInitializationTest : public HttpIntegrationTest,\n                               public Grpc::GrpcClientIntegrationParamTest {\npublic:\n  VhdsInitializationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), realTime(), config()) {\n    use_lds_ = false;\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  // Overridden to insert this stuff into the initialize() at the very beginning of\n  // HttpIntegrationTest::testRouterRequestAndResponseWithBody().\n  void initialize() override {\n    // Controls how many addFakeUpstream() will happen in\n    // BaseIntegrationTest::createUpstreams() (which is part of initialize()).\n    // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap\n    // config that you use!\n    setUpstreamCount(2);                                  // the CDS cluster\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.\n\n    // BaseIntegrationTest::initialize() does many things:\n    // 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount().\n    // 2) It updates your bootstrap config with the ports your fake upstreams are actually listening\n    //    on (since you're supposed to leave them as 0).\n    // 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual\n    //    Envoy used in the tests.\n    // 4) Bringing up the server usually entails waiting to ensure that any listeners specified in\n    //    the bootstrap config have come up, and registering them in a port map (see lookupPort()).\n    //    However, this test needs to defer all of that to later.\n    defer_listener_finalization_ = true;\n    HttpIntegrationTest::initialize();\n\n    // Now that the upstream has been created, process Envoy's request to discover it.\n    // (First, we have to let Envoy establish its connection to the RDS server.)\n    AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n\n    EXPECT_TRUE(compareSotwDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                            {\"my_route\"}, true));\n    sendSotwDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n        Config::TypeUrl::get().RouteConfiguration,\n        {TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(\n            RdsWithoutVhdsConfig)},\n        \"1\");\n\n    // Wait for our statically specified listener to become ready, and register its port in the\n    // test framework's downstream listener port map.\n    test_server_->waitUntilListenersReady();\n    registerTestServerPorts({\"http\"});\n  }\n\n  FakeStreamPtr vhds_stream_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsInitializationTest,\n                         GRPC_CLIENT_INTEGRATION_PARAMS);\n\n// tests a scenario when:\n//  - RouteConfiguration without VHDS is received\n//  - RouteConfiguration update with VHDS configuration in it is received\n//  - Upstream makes a request to a VirtualHost in the VHDS update\nTEST_P(VhdsInitializationTest, InitializeVhdsAfterRdsHasBeenInitialized) {\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/rdsone\", \"vhost.rds.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Update RouteConfig, this time include VHDS config\n  sendSotwDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration,\n      {TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(RdsConfigWithVhosts)},\n      \"2\");\n\n  auto result = xds_connection_->waitForNewStream(*dispatcher_, vhds_stream_);\n  RELEASE_ASSERT(result, result.message());\n  vhds_stream_->startGrpcStream();\n\n  EXPECT_TRUE(\n      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n  sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n      Config::TypeUrl::get().VirtualHost,\n      {TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n          fmt::format(VhostTemplate, \"my_route/vhost_0\", \"vhost.first\"))},\n      {}, \"1\", vhds_stream_);\n  EXPECT_TRUE(\n      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n  // Confirm vhost.first that was configured via VHDS is reachable\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/\", \"vhost.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n}\n\nclass VhdsIntegrationTest : public HttpIntegrationTest,\n                            public Grpc::GrpcClientIntegrationParamTest {\npublic:\n  VhdsIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), realTime(), config()) {\n    use_lds_ = false;\n  }\n\n  void TearDown() override { cleanUpXdsConnection(); }\n\n  std::string virtualHostYaml(const std::string& name, const std::string& domain) {\n    return fmt::format(VhostTemplate, name, domain);\n  }\n\n  std::string vhdsRequestResourceName(const std::string& host_header) {\n    return RouteConfigName + \"/\" + host_header;\n  }\n\n  envoy::config::route::v3::VirtualHost buildVirtualHost() {\n    return TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n        virtualHostYaml(\"my_route/vhost_0\", \"host\"));\n  }\n\n  std::vector<envoy::config::route::v3::VirtualHost> buildVirtualHost1() {\n    return {TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n                virtualHostYaml(\"my_route/vhost_1\", \"vhost.first\")),\n            TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n                virtualHostYaml(\"my_route/vhost_2\", \"vhost.second\"))};\n  }\n\n  envoy::config::route::v3::VirtualHost buildVirtualHost2() {\n    return TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n        virtualHostYaml(\"my_route/vhost_1\", \"vhost.first\"));\n  }\n\n  // Overridden to insert this stuff into the initialize() at the very beginning of\n  // HttpIntegrationTest::testRouterRequestAndResponseWithBody().\n  void initialize() override {\n    // Controls how many addFakeUpstream() will happen in\n    // BaseIntegrationTest::createUpstreams() (which is part of initialize()).\n    // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap\n    // config that you use!\n    setUpstreamCount(2);                                  // the CDS cluster\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.\n\n    // BaseIntegrationTest::initialize() does many things:\n    // 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount().\n    // 2) It updates your bootstrap config with the ports your fake upstreams are actually listening\n    //    on (since you're supposed to leave them as 0).\n    // 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual\n    //    Envoy used in the tests.\n    // 4) Bringing up the server usually entails waiting to ensure that any listeners specified in\n    //    the bootstrap config have come up, and registering them in a port map (see lookupPort()).\n    //    However, this test needs to defer all of that to later.\n    defer_listener_finalization_ = true;\n    HttpIntegrationTest::initialize();\n\n    // Now that the upstream has been created, process Envoy's request to discover it.\n    // (First, we have to let Envoy establish its connection to the RDS server.)\n    AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.\n        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);\n    RELEASE_ASSERT(result, result.message());\n    result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n\n    EXPECT_TRUE(compareSotwDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, \"\",\n                                            {\"my_route\"}, true));\n    sendSotwDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n        Config::TypeUrl::get().RouteConfiguration, {rdsConfig()}, \"1\");\n\n    result = xds_connection_->waitForNewStream(*dispatcher_, vhds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    vhds_stream_->startGrpcStream();\n\n    EXPECT_TRUE(\n        compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n    sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n        Config::TypeUrl::get().VirtualHost, {buildVirtualHost()}, {}, \"1\", vhds_stream_);\n    EXPECT_TRUE(\n        compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n    // Wait for our statically specified listener to become ready, and register its port in the\n    // test framework's downstream listener port map.\n    test_server_->waitUntilListenersReady();\n    registerTestServerPorts({\"http\"});\n  }\n\n  void useRdsWithVhosts() { use_rds_with_vhosts = true; }\n  const envoy::config::route::v3::RouteConfiguration rdsConfig() const {\n    return TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(\n        use_rds_with_vhosts ? RdsConfigWithVhosts : RdsConfig);\n  }\n\n  void notifyAboutAliasResolutionFailure(const std::string& version, FakeStreamPtr& stream,\n                                         const std::vector<std::string>& aliases = {}) {\n    envoy::api::v2::DeltaDiscoveryResponse response;\n    response.set_system_version_info(\"system_version_info_this_is_a_test\");\n    response.set_type_url(Config::TypeUrl::get().VirtualHost);\n    auto* resource = response.add_resources();\n    resource->set_name(\"my_route/cannot-resolve-alias\");\n    resource->set_version(version);\n    for (const auto& alias : aliases) {\n      resource->add_aliases(alias);\n    }\n    response.set_nonce(\"noncense\");\n    stream->sendGrpcMessage(response);\n  }\n\n  void sendDeltaDiscoveryResponseWithUnresolvedAliases(\n      const std::vector<envoy::config::route::v3::VirtualHost>& added_or_updated,\n      const std::vector<std::string>& removed, const std::string& version, FakeStreamPtr& stream,\n      const std::vector<std::string>& aliases, const std::vector<std::string>& unresolved_aliases) {\n    auto response = createDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n        Config::TypeUrl::get().VirtualHost, added_or_updated, removed, version, aliases);\n    for (const auto& unresolved_alias : unresolved_aliases) {\n      auto* resource = response.add_resources();\n      resource->set_name(unresolved_alias);\n      resource->set_version(version);\n      resource->add_aliases(unresolved_alias);\n    }\n    stream->sendGrpcMessage(response);\n  }\n\n  // used in VhdsOnDemandUpdateWithResourceNameAsAlias test\n  // to create a DeltaDiscoveryResponse with a resource name matching the value used to create an\n  // on-demand request\n  envoy::api::v2::DeltaDiscoveryResponse createDeltaDiscoveryResponseWithResourceNameUsedAsAlias() {\n    API_NO_BOOST(envoy::api::v2::DeltaDiscoveryResponse) ret;\n    ret.set_system_version_info(\"system_version_info_this_is_a_test\");\n    ret.set_type_url(Config::TypeUrl::get().VirtualHost);\n\n    auto* resource = ret.add_resources();\n    resource->set_name(\"my_route/vhost_1\");\n    resource->set_version(\"4\");\n    resource->mutable_resource()->PackFrom(\n        API_DOWNGRADE(TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n            virtualHostYaml(\"my_route/vhost_1\", \"vhost_1, vhost.first\"))));\n    resource->add_aliases(\"my_route/vhost.first\");\n    ret.set_nonce(\"test-nonce-0\");\n\n    return ret;\n  }\n\n  FakeStreamPtr vhds_stream_;\n  bool use_rds_with_vhosts{false};\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS);\n\nTEST_P(VhdsIntegrationTest, RdsUpdateWithoutVHDSChangesDoesNotRestartVHDS) {\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/\", \"host\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Update RouteConfig, but don't change VHDS config\n  sendSotwDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration,\n      {TestUtility::parseYaml<envoy::config::route::v3::RouteConfiguration>(RdsConfigWithVhosts)},\n      \"2\");\n\n  // Confirm vhost_0 that was originally configured via VHDS is reachable\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/\", \"host\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n}\n\n// tests a scenario when:\n//  - a spontaneous VHDS DiscoveryResponse adds two virtual hosts\n//  - the next spontaneous VHDS DiscoveryResponse removes newly added virtual hosts\n//  - Upstream makes a request to an (now) unknown domain\n//  - A VHDS DiscoveryResponse received containing update for the domain\n//  - Upstream receives a 200 response\nTEST_P(VhdsIntegrationTest, VhdsVirtualHostAddUpdateRemove) {\n  // Calls our initialize(), which includes establishing a listener, route, and cluster.\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1);\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // A spontaneous VHDS DiscoveryResponse adds two virtual hosts\n  sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n      Config::TypeUrl::get().VirtualHost, buildVirtualHost1(), {}, \"2\", vhds_stream_);\n  EXPECT_TRUE(\n      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/one\", \"vhost.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/two\", \"vhost.second\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // A spontaneous VHDS DiscoveryResponse removes newly added virtual hosts\n  sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n      Config::TypeUrl::get().VirtualHost, {}, {\"my_route/vhost_1\", \"my_route/vhost_2\"}, \"3\",\n      vhds_stream_);\n  EXPECT_TRUE(\n      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n  // an upstream request to an (now) unknown domain\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"vhost.first\"},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                           {vhdsRequestResourceName(\"vhost.first\")}, {},\n                                           vhds_stream_));\n  sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n      Config::TypeUrl::get().VirtualHost, {buildVirtualHost2()}, {}, \"4\", vhds_stream_,\n      {\"my_route/vhost.first\"});\n\n  waitForNextUpstreamRequest(1);\n  // Send response headers, and end_stream if there is no response body.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForHeaders();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  cleanupUpstreamAndDownstream();\n}\n\n// tests a scenario when:\n//  - an RDS exchange contains a non-empty virtual_hosts array\n//  - a spontaneous VHDS DiscoveryResponse adds two virtual hosts\n//  - the next spontaneous VHDS DiscoveryResponse removes newly added virtual hosts\n//  - Upstream makes a request to an (now) unknown domain\n//  - A VHDS DiscoveryResponse received containing update for the domain\n//  - Upstream receives a 200 response\nTEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) {\n  // RDS exchange with a non-empty virtual_hosts field\n  useRdsWithVhosts();\n\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1);\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // A spontaneous VHDS DiscoveryResponse adds two virtual hosts\n  sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n      Config::TypeUrl::get().VirtualHost, buildVirtualHost1(), {}, \"2\", vhds_stream_);\n  EXPECT_TRUE(\n      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n  // verify that rds-based virtual host can be resolved\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/rdsone\", \"vhost.rds.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/one\", \"vhost.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/two\", \"vhost.second\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // A spontaneous VHDS DiscoveryResponse removes virtual hosts added via vhds\n  sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n      Config::TypeUrl::get().VirtualHost, {}, {\"my_route/vhost_1\", \"my_route/vhost_2\"}, \"3\",\n      vhds_stream_);\n  EXPECT_TRUE(\n      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n  // verify rds-based virtual host is still present\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/rdsone\", \"vhost.rds.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"vhost.first\"},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                           {vhdsRequestResourceName(\"vhost.first\")}, {},\n                                           vhds_stream_));\n  sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n      Config::TypeUrl::get().VirtualHost, {buildVirtualHost2()}, {}, \"4\", vhds_stream_,\n      {\"my_route/vhost.first\"});\n\n  waitForNextUpstreamRequest(1);\n  // Send response headers, and end_stream if there is no response body.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForHeaders();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  cleanupUpstreamAndDownstream();\n}\n\n// tests a scenario where:\n//  a Resource received in a DeltaDiscoveryResponse has name that matches the value used in the\n//  on-demand request\nTEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateWithResourceNameAsAlias) {\n  // RDS exchange with a non-empty virtual_hosts field\n  useRdsWithVhosts();\n\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1);\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // verify that rds-based virtual host can be resolved\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/rdsone\", \"vhost.rds.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Attempt to make a request to an unknown host\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"vhost_1\"},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                           {vhdsRequestResourceName(\"vhost_1\")}, {}, vhds_stream_));\n\n  envoy::api::v2::DeltaDiscoveryResponse vhds_update =\n      createDeltaDiscoveryResponseWithResourceNameUsedAsAlias();\n  vhds_stream_->sendGrpcMessage(vhds_update);\n\n  waitForNextUpstreamRequest(1);\n  // Send response headers, and end_stream if there is no response body.\n  upstream_request_->encodeHeaders(default_response_headers_, true);\n\n  response->waitForHeaders();\n  EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n  cleanupUpstreamAndDownstream();\n}\n\n// tests a scenario when:\n//  - an RDS exchange contains a non-empty virtual_hosts array\n//  - a spontaneous VHDS DiscoveryResponse adds two virtual hosts\n//  - the next spontaneous VHDS DiscoveryResponse removes newly added virtual hosts\n//  - Upstream makes a request to an (now) unknown domain\n//  - A VHDS DiscoveryResponse received but contains no update for the domain (the management server\n//  couldn't resolve it)\n//  - Upstream receives a 404 response\nTEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveTheAlias) {\n  // RDS exchange with a non-empty virtual_hosts field\n  useRdsWithVhosts();\n\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1);\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // verify that rds-based virtual host can be resolved\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/rdsone\", \"vhost.rds.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Attempt to make a request to an unknown host\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"vhost.third\"},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                           {vhdsRequestResourceName(\"vhost.third\")}, {},\n                                           vhds_stream_));\n  // Send an empty response back (the management server isn't aware of vhost.third)\n  notifyAboutAliasResolutionFailure(\"4\", vhds_stream_, {\"my_route/vhost.third\"});\n\n  response->waitForHeaders();\n  EXPECT_EQ(\"404\", response->headers().getStatusValue());\n\n  cleanupUpstreamAndDownstream();\n}\n\n// tests a scenario when:\n//  - an RDS exchange contains a non-empty virtual_hosts array\n//  - a spontaneous VHDS DiscoveryResponse adds two virtual hosts\n//  - the next spontaneous VHDS DiscoveryResponse removes newly added virtual hosts\n//  - Upstream makes a request to an (now) unknown domain\n//  - A VHDS DiscoveryResponse received that contains update for vhost.first host, but vhost.third\n//  couldn't be resolved\n//  - Upstream receives a 404 response\nTEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveOneAliasOutOfSeveral) {\n  // RDS exchange with a non-empty virtual_hosts field\n  useRdsWithVhosts();\n\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1);\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // verify that rds-based virtual host can be resolved\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/rdsone\", \"vhost.rds.first\");\n  cleanupUpstreamAndDownstream();\n  ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n  // Attempt to make a request to an unknown host\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"vhost.third\"},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n  EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                           {vhdsRequestResourceName(\"vhost.third\")}, {},\n                                           vhds_stream_));\n  // Send an empty response back (the management server isn't aware of vhost.third)\n  sendDeltaDiscoveryResponseWithUnresolvedAliases({buildVirtualHost2()}, {}, \"4\", vhds_stream_,\n                                                  {\"my_route/vhost.first\"},\n                                                  {\"my_route/vhost.third\"});\n\n  response->waitForHeaders();\n  EXPECT_EQ(\"404\", response->headers().getStatusValue());\n\n  cleanupUpstreamAndDownstream();\n}\n\n// Verify that an vhds update succeeds even when the client closes its connection\nTEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateHttpConnectionCloses) {\n  // RDS exchange with a non-empty virtual_hosts field\n  useRdsWithVhosts();\n\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1);\n  cleanupUpstreamAndDownstream();\n  EXPECT_TRUE(codec_client_->waitForDisconnect());\n\n  // Attempt to make a request to an unknown host\n  codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n  Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                 {\":path\", \"/\"},\n                                                 {\":scheme\", \"http\"},\n                                                 {\":authority\", \"vhost_1\"},\n                                                 {\"x-lyft-user-id\", \"123\"}};\n  auto encoder_decoder = codec_client_->startRequest(request_headers);\n  Http::RequestEncoder& encoder = encoder_decoder.first;\n  IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second);\n  EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                           {vhdsRequestResourceName(\"vhost_1\")}, {}, vhds_stream_));\n\n  envoy::api::v2::DeltaDiscoveryResponse vhds_update =\n      createDeltaDiscoveryResponseWithResourceNameUsedAsAlias();\n  vhds_stream_->sendGrpcMessage(vhds_update);\n\n  codec_client_->sendReset(encoder);\n  response->waitForReset();\n  EXPECT_TRUE(codec_client_->connected());\n\n  cleanupUpstreamAndDownstream();\n}\n\nconst char VhostTemplateAfterUpdate[] = R\"EOF(\nname: {}\ndomains: [{}]\nroutes:\n- match: {{ prefix: \"/after_update\" }}\n  route: {{ cluster: \"my_service\" }}\n)EOF\";\n\n// Verifies that after multiple vhds updates, virtual hosts from earlier updates still can receive\n// updates See https://github.com/envoyproxy/envoy/issues/12158 for more details\nTEST_P(VhdsIntegrationTest, MultipleUpdates) {\n  testRouterHeaderOnlyRequestAndResponse(nullptr, 1);\n  cleanupUpstreamAndDownstream();\n  EXPECT_TRUE(codec_client_->waitForDisconnect());\n\n  {\n    // make first vhds request (for vhost.first)\n    codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                   {\":path\", \"/\"},\n                                                   {\":scheme\", \"http\"},\n                                                   {\":authority\", \"vhost.first\"},\n                                                   {\"x-lyft-user-id\", \"123\"}};\n    IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                             {vhdsRequestResourceName(\"vhost.first\")}, {},\n                                             vhds_stream_));\n    sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n        Config::TypeUrl::get().VirtualHost, {buildVirtualHost2()}, {}, \"4\", vhds_stream_,\n        {\"my_route/vhost.first\"});\n    EXPECT_TRUE(\n        compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n    waitForNextUpstreamRequest(1);\n    // Send response headers, and end_stream if there is no response body.\n    upstream_request_->encodeHeaders(default_response_headers_, true);\n\n    response->waitForHeaders();\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n    cleanupUpstreamAndDownstream();\n    EXPECT_TRUE(codec_client_->waitForDisconnect());\n  }\n  {\n    // make second vhds request (for vhost.second)\n    codec_client_ = makeHttpConnection(makeClientConnection((lookupPort(\"http\"))));\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                   {\":path\", \"/\"},\n                                                   {\":scheme\", \"http\"},\n                                                   {\":authority\", \"vhost.second\"},\n                                                   {\"x-lyft-user-id\", \"123\"}};\n    IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers);\n    EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost,\n                                             {vhdsRequestResourceName(\"vhost.second\")}, {},\n                                             vhds_stream_));\n    sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n        Config::TypeUrl::get().VirtualHost,\n        {TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n            virtualHostYaml(\"my_route/vhost_2\", \"vhost.second\"))},\n        {}, \"4\", vhds_stream_, {\"my_route/vhost.second\"});\n    EXPECT_TRUE(\n        compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n    waitForNextUpstreamRequest(1);\n    // Send response headers, and end_stream if there is no response body.\n    upstream_request_->encodeHeaders(default_response_headers_, true);\n\n    response->waitForHeaders();\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n\n    cleanupUpstreamAndDownstream();\n    EXPECT_TRUE(codec_client_->waitForDisconnect());\n  }\n  {\n    // Attempt to push updates for both vhost.first and vhost.second\n    sendDeltaDiscoveryResponse<envoy::config::route::v3::VirtualHost>(\n        Config::TypeUrl::get().VirtualHost,\n        {TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n             fmt::format(VhostTemplateAfterUpdate, \"my_route/vhost_1\", \"vhost.first\")),\n         TestUtility::parseYaml<envoy::config::route::v3::VirtualHost>(\n             fmt::format(VhostTemplateAfterUpdate, \"my_route/vhost_2\", \"vhost.second\"))},\n        {}, \"5\", vhds_stream_);\n    EXPECT_TRUE(\n        compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));\n\n    // verify that both virtual hosts have been updated\n    testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/after_update\", \"vhost.first\");\n    cleanupUpstreamAndDownstream();\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n\n    testRouterHeaderOnlyRequestAndResponse(nullptr, 1, \"/after_update\", \"vhost.second\");\n    cleanupUpstreamAndDownstream();\n    ASSERT_TRUE(codec_client_->waitForDisconnect());\n  }\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/websocket_integration_test.cc",
    "content": "#include \"test/integration/websocket_integration_test.h\"\n\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/http/header_map_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/integration/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/str_cat.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace {\n\nHttp::TestRequestHeaderMapImpl upgradeRequestHeaders(const char* upgrade_type = \"websocket\",\n                                                     uint32_t content_length = 0) {\n  return Http::TestRequestHeaderMapImpl{{\":authority\", \"host\"},\n                                        {\"content-length\", fmt::format(\"{}\", content_length)},\n                                        {\":path\", \"/websocket/test\"},\n                                        {\":method\", \"GET\"},\n                                        {\":scheme\", \"http\"},\n                                        {\"upgrade\", upgrade_type},\n                                        {\"connection\", \"keep-alive, upgrade\"}};\n}\n\nHttp::TestResponseHeaderMapImpl upgradeResponseHeaders(const char* upgrade_type = \"websocket\") {\n  return Http::TestResponseHeaderMapImpl{\n      {\":status\", \"101\"}, {\"connection\", \"upgrade\"}, {\"upgrade\", upgrade_type}};\n}\n\ntemplate <class ProxiedHeaders, class OriginalHeaders>\nvoid commonValidate(ProxiedHeaders& proxied_headers, const OriginalHeaders& original_headers) {\n  // If no content length is specified, the HTTP1 codec will add a chunked encoding header.\n  if (original_headers.ContentLength() == nullptr &&\n      proxied_headers.TransferEncoding() != nullptr) {\n    ASSERT_EQ(proxied_headers.getTransferEncodingValue(), \"chunked\");\n    proxied_headers.removeTransferEncoding();\n  }\n  if (proxied_headers.Connection() != nullptr &&\n      proxied_headers.Connection()->value() == \"upgrade\" &&\n      original_headers.Connection() != nullptr &&\n      original_headers.Connection()->value() == \"keep-alive, upgrade\") {\n    // The keep-alive is implicit for HTTP/1.1, so Envoy only sets the upgrade\n    // header when converting from HTTP/1.1 to H2\n    proxied_headers.setConnection(\"keep-alive, upgrade\");\n  }\n}\n\n} // namespace\n\nvoid WebsocketIntegrationTest::validateUpgradeRequestHeaders(\n    const Http::RequestHeaderMap& original_proxied_request_headers,\n    const Http::RequestHeaderMap& original_request_headers) {\n  Http::TestRequestHeaderMapImpl proxied_request_headers(original_proxied_request_headers);\n  if (proxied_request_headers.ForwardedProto()) {\n    ASSERT_EQ(proxied_request_headers.getForwardedProtoValue(), \"http\");\n    proxied_request_headers.removeForwardedProto();\n  }\n\n  // Check for and remove headers added by default for HTTP requests.\n  ASSERT_TRUE(proxied_request_headers.RequestId() != nullptr);\n  ASSERT_TRUE(proxied_request_headers.EnvoyExpectedRequestTimeoutMs() != nullptr);\n  proxied_request_headers.removeEnvoyExpectedRequestTimeoutMs();\n\n  if (proxied_request_headers.Scheme()) {\n    ASSERT_EQ(proxied_request_headers.getSchemeValue(), \"http\");\n  } else {\n    proxied_request_headers.setScheme(\"http\");\n  }\n\n  // 0 byte content lengths may be stripped on the H2 path - ignore that as a difference by adding\n  // it back to the proxied headers.\n  if (original_request_headers.ContentLength() &&\n      proxied_request_headers.ContentLength() == nullptr) {\n    proxied_request_headers.setContentLength(size_t(0));\n  }\n\n  commonValidate(proxied_request_headers, original_request_headers);\n  proxied_request_headers.removeRequestId();\n\n  EXPECT_THAT(&proxied_request_headers, HeaderMapEqualIgnoreOrder(&original_request_headers));\n}\n\nvoid WebsocketIntegrationTest::validateUpgradeResponseHeaders(\n    const Http::ResponseHeaderMap& original_proxied_response_headers,\n    const Http::ResponseHeaderMap& original_response_headers) {\n  Http::TestResponseHeaderMapImpl proxied_response_headers(original_proxied_response_headers);\n\n  // Check for and remove headers added by default for HTTP responses.\n  ASSERT_TRUE(proxied_response_headers.Date() != nullptr);\n  ASSERT_TRUE(proxied_response_headers.Server() != nullptr);\n  ASSERT_EQ(proxied_response_headers.getServerValue(), \"envoy\");\n  proxied_response_headers.removeDate();\n  proxied_response_headers.removeServer();\n\n  ASSERT_TRUE(proxied_response_headers.TransferEncoding() == nullptr);\n\n  commonValidate(proxied_response_headers, original_response_headers);\n\n  EXPECT_THAT(&proxied_response_headers, HeaderMapEqualIgnoreOrder(&original_response_headers));\n}\n\nINSTANTIATE_TEST_SUITE_P(Protocols, WebsocketIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\nConfigHelper::HttpModifierFunction setRouteUsingWebsocket() {\n  return [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) { hcm.add_upgrade_configs()->set_upgrade_type(\"websocket\"); };\n}\n\nvoid WebsocketIntegrationTest::initialize() {\n  if (upstreamProtocol() != FakeHttpConnection::Type::HTTP1) {\n    config_helper_.addConfigModifier(\n        [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n          auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0);\n          cluster->mutable_http2_protocol_options()->set_allow_connect(true);\n        });\n  }\n  if (downstreamProtocol() != Http::CodecClient::Type::HTTP1) {\n    config_helper_.addConfigModifier(\n        [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n                hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_connect(true); });\n  }\n  HttpProtocolIntegrationTest::initialize();\n}\n\nvoid WebsocketIntegrationTest::performUpgrade(\n    const Http::TestRequestHeaderMapImpl& upgrade_request_headers,\n    const Http::TestResponseHeaderMapImpl& upgrade_response_headers) {\n  // Establish the initial connection.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  // Send websocket upgrade request\n  auto encoder_decoder = codec_client_->startRequest(upgrade_request_headers);\n  request_encoder_ = &encoder_decoder.first;\n  response_ = std::move(encoder_decoder.second);\n  test_server_->waitForCounterGe(\"http.config_test.downstream_cx_upgrades_total\", 1);\n  test_server_->waitForGaugeGe(\"http.config_test.downstream_cx_upgrades_active\", 1);\n\n  // Verify the upgrade was received upstream.\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  validateUpgradeRequestHeaders(upstream_request_->headers(), upgrade_request_headers);\n\n  // Send the upgrade response\n  upstream_request_->encodeHeaders(upgrade_response_headers, false);\n\n  // Verify the upgrade response was received downstream.\n  response_->waitForHeaders();\n  validateUpgradeResponseHeaders(response_->headers(), upgrade_response_headers);\n}\n\nvoid WebsocketIntegrationTest::sendBidirectionalData() {\n  // Verify that the client can still send data upstream, and that upstream\n  // receives it.\n  codec_client_->sendData(*request_encoder_, \"hello\", false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hello\"));\n\n  // Verify the upstream can send data to the client and that the client\n  // receives it.\n  upstream_request_->encodeData(\"world\", false);\n  response_->waitForBodyData(5);\n  EXPECT_EQ(\"world\", response_->body());\n}\n\nTEST_P(WebsocketIntegrationTest, WebSocketConnectionDownstreamDisconnect) {\n  config_helper_.addConfigModifier(setRouteUsingWebsocket());\n  initialize();\n\n  performUpgrade(upgradeRequestHeaders(), upgradeResponseHeaders());\n  sendBidirectionalData();\n\n  // Send some final data from the client, and disconnect.\n  codec_client_->sendData(*request_encoder_, \"bye!\", false);\n  codec_client_->close();\n\n  // Verify the final data was received and that the connection is torn down.\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hellobye!\"));\n\n  ASSERT_TRUE(waitForUpstreamDisconnectOrReset());\n  test_server_->waitForGaugeEq(\"http.config_test.downstream_cx_upgrades_active\", 0);\n}\n\nTEST_P(WebsocketIntegrationTest, WebSocketConnectionUpstreamDisconnect) {\n  config_helper_.addConfigModifier(setRouteUsingWebsocket());\n  initialize();\n\n  performUpgrade(upgradeRequestHeaders(), upgradeResponseHeaders());\n\n  // Standard TCP proxy semantics post upgrade\n  codec_client_->sendData(*request_encoder_, \"hello\", false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hello\"));\n\n  // Send data downstream and disconnect immediately.\n  upstream_request_->encodeData(\"world\", false);\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n\n  // Verify both the data and the disconnect went through.\n  response_->waitForBodyData(5);\n  EXPECT_EQ(\"world\", response_->body());\n  waitForClientDisconnectOrReset();\n}\n\nTEST_P(WebsocketIntegrationTest, EarlyData) {\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP2 ||\n      upstreamProtocol() == FakeHttpConnection::Type::HTTP2) {\n    return;\n  }\n  config_helper_.addConfigModifier(setRouteUsingWebsocket());\n  initialize();\n\n  // Establish the initial connection.\n  codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n\n  const std::string early_data_req_str = \"hello\";\n  const std::string early_data_resp_str = \"world\";\n\n  // Send websocket upgrade request with early data.\n  auto encoder_decoder =\n      codec_client_->startRequest(upgradeRequestHeaders(\"websocket\", early_data_req_str.size()));\n  request_encoder_ = &encoder_decoder.first;\n  response_ = std::move(encoder_decoder.second);\n  codec_client_->sendData(*request_encoder_, early_data_req_str, false);\n\n  // Wait for both the upgrade, and the early data.\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForHeadersComplete());\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hello\"));\n\n  // Accept websocket upgrade request\n  upstream_request_->encodeHeaders(upgradeResponseHeaders(), false);\n  // Reply also with early data\n  upstream_request_->encodeData(early_data_resp_str, false);\n  // upstream disconnect\n  ASSERT_TRUE(fake_upstream_connection_->close());\n  ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());\n\n  response_->waitForHeaders();\n  auto upgrade_response_headers(upgradeResponseHeaders());\n  validateUpgradeResponseHeaders(response_->headers(), upgrade_response_headers);\n\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    // For H2, the disconnect may result in the terminal data not being proxied.\n    response_->waitForBodyData(5);\n  }\n  waitForClientDisconnectOrReset();\n  EXPECT_EQ(\"world\", response_->body());\n}\n\nTEST_P(WebsocketIntegrationTest, WebSocketConnectionIdleTimeout) {\n  config_helper_.addConfigModifier(setRouteUsingWebsocket());\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* route_config = hcm.mutable_route_config();\n        auto* virtual_host = route_config->mutable_virtual_hosts(0);\n        auto* route = virtual_host->mutable_routes(0)->mutable_route();\n        route->mutable_idle_timeout()->set_seconds(0);\n        route->mutable_idle_timeout()->set_nanos(200 * 1000 * 1000);\n      });\n  initialize();\n\n  // WebSocket upgrade, send some data and disconnect downstream\n  performUpgrade(upgradeRequestHeaders(), upgradeResponseHeaders());\n  sendBidirectionalData();\n\n  test_server_->waitForCounterGe(\"http.config_test.downstream_rq_idle_timeout\", 1);\n  waitForClientDisconnectOrReset();\n  ASSERT_TRUE(waitForUpstreamDisconnectOrReset());\n}\n\n// Technically not a websocket tests, but verifies normal upgrades have parity\n// with websocket upgrades\nTEST_P(WebsocketIntegrationTest, NonWebsocketUpgrade) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* foo_upgrade = hcm.add_upgrade_configs();\n        foo_upgrade->set_upgrade_type(\"foo\");\n      });\n\n  config_helper_.addConfigModifier(setRouteUsingWebsocket());\n  initialize();\n\n  performUpgrade(upgradeRequestHeaders(\"foo\", 0), upgradeResponseHeaders(\"foo\"));\n  sendBidirectionalData();\n  codec_client_->sendData(*request_encoder_, \"bye!\", false);\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    codec_client_->close();\n  } else {\n    codec_client_->sendReset(*request_encoder_);\n  }\n\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hellobye!\"));\n  ASSERT_TRUE(waitForUpstreamDisconnectOrReset());\n\n  auto upgrade_response_headers(upgradeResponseHeaders(\"foo\"));\n  validateUpgradeResponseHeaders(response_->headers(), upgrade_response_headers);\n  codec_client_->close();\n}\n\nTEST_P(WebsocketIntegrationTest, RouteSpecificUpgrade) {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* foo_upgrade = hcm.add_upgrade_configs();\n        foo_upgrade->set_upgrade_type(\"foo\");\n        foo_upgrade->mutable_enabled()->set_value(false);\n      });\n  auto host = config_helper_.createVirtualHost(\"host\", \"/websocket/test\");\n  host.mutable_routes(0)->mutable_route()->add_upgrade_configs()->set_upgrade_type(\"foo\");\n  config_helper_.addVirtualHost(host);\n  initialize();\n\n  performUpgrade(upgradeRequestHeaders(\"foo\", 0), upgradeResponseHeaders(\"foo\"));\n  sendBidirectionalData();\n  codec_client_->sendData(*request_encoder_, \"bye!\", false);\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) {\n    codec_client_->close();\n  } else {\n    codec_client_->sendReset(*request_encoder_);\n  }\n\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, \"hellobye!\"));\n  ASSERT_TRUE(waitForUpstreamDisconnectOrReset());\n\n  auto upgrade_response_headers(upgradeResponseHeaders(\"foo\"));\n  validateUpgradeResponseHeaders(response_->headers(), upgrade_response_headers);\n  codec_client_->close();\n}\n\nTEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) {\n  config_helper_.addConfigModifier(setRouteUsingWebsocket());\n\n  // Add a small buffer filter to the standard HTTP filter chain. Websocket\n  // upgrades will use the HTTP filter chain so will also have small buffers.\n  config_helper_.addFilter(ConfigHelper::smallBufferFilter());\n\n  // Add a second upgrade type which goes directly to the router filter.\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        auto* foo_upgrade = hcm.add_upgrade_configs();\n        foo_upgrade->set_upgrade_type(\"foo\");\n        auto* filter_list_back = foo_upgrade->add_filters();\n        TestUtility::loadFromYaml(\"name: envoy.filters.http.router\", *filter_list_back);\n      });\n  initialize();\n\n  // Websocket upgrades are configured to disallow large payload.\n  const std::string large_req_str(2048, 'a');\n  {\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    auto encoder_decoder = codec_client_->startRequest(upgradeRequestHeaders(\"websocket\"));\n    response_ = std::move(encoder_decoder.second);\n    codec_client_->sendData(encoder_decoder.first, large_req_str, false);\n    response_->waitForEndStream();\n    EXPECT_EQ(\"413\", response_->headers().getStatusValue());\n    waitForClientDisconnectOrReset();\n    codec_client_->close();\n  }\n\n  // HTTP requests are configured to disallow large bodies.\n  {\n    Http::TestRequestHeaderMapImpl request_headers{{\":method\", \"GET\"},\n                                                   {\":path\", \"/\"},\n                                                   {\"content-length\", \"2048\"},\n                                                   {\":authority\", \"host\"},\n                                                   {\":scheme\", \"https\"}};\n    codec_client_ = makeHttpConnection(lookupPort(\"http\"));\n    auto encoder_decoder = codec_client_->startRequest(request_headers);\n    response_ = std::move(encoder_decoder.second);\n    codec_client_->sendData(encoder_decoder.first, large_req_str, false);\n    response_->waitForEndStream();\n    EXPECT_EQ(\"413\", response_->headers().getStatusValue());\n    waitForClientDisconnectOrReset();\n    codec_client_->close();\n  }\n\n  // Foo upgrades are configured without the buffer filter, so should explicitly\n  // allow large payload.\n  if (downstreamProtocol() != Http::CodecClient::Type::HTTP2) {\n    performUpgrade(upgradeRequestHeaders(\"foo\"), upgradeResponseHeaders(\"foo\"));\n    codec_client_->sendData(*request_encoder_, large_req_str, false);\n    ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, large_req_str));\n\n    // Tear down all the connections cleanly.\n    codec_client_->close();\n    ASSERT_TRUE(waitForUpstreamDisconnectOrReset());\n  }\n}\n\nTEST_P(WebsocketIntegrationTest, BidirectionalChunkedData) {\n  if (downstreamProtocol() == Http::CodecClient::Type::HTTP2 ||\n      upstreamProtocol() == FakeHttpConnection::Type::HTTP2) {\n    return;\n  }\n\n  config_helper_.addConfigModifier(setRouteUsingWebsocket());\n  initialize();\n\n  auto request_headers = upgradeRequestHeaders();\n  request_headers.removeContentLength();\n  auto response_headers = upgradeResponseHeaders();\n  response_headers.removeContentLength();\n  performUpgrade(request_headers, response_headers);\n\n  // With content-length not present, the HTTP codec will send the request with\n  // transfer-encoding: chunked.\n  if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {\n    ASSERT_TRUE(upstream_request_->headers().TransferEncoding() != nullptr);\n  }\n\n  // Send both a chunked request body and \"websocket\" payload.\n  std::string request_payload = \"3\\r\\n123\\r\\n0\\r\\n\\r\\nSomeWebsocketRequestPayload\";\n  codec_client_->sendData(*request_encoder_, request_payload, false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, request_payload));\n\n  // Send both a chunked response body and \"websocket\" payload.\n  std::string response_payload = \"4\\r\\nabcd\\r\\n0\\r\\n\\r\\nSomeWebsocketResponsePayload\";\n  upstream_request_->encodeData(response_payload, false);\n  response_->waitForBodyData(response_payload.size());\n  EXPECT_EQ(response_payload, response_->body());\n\n  // Verify follow-up bidirectional data still works.\n  codec_client_->sendData(*request_encoder_, \"FinalClientPayload\", false);\n  ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, request_payload + \"FinalClientPayload\"));\n  upstream_request_->encodeData(\"FinalServerPayload\", false);\n  response_->waitForBodyData(response_->body().size() + 5);\n  EXPECT_EQ(response_payload + \"FinalServerPayload\", response_->body());\n\n  // Clean up.\n  codec_client_->close();\n  ASSERT_TRUE(waitForUpstreamDisconnectOrReset());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/websocket_integration_test.h",
    "content": "#pragma once\n\n#include \"test/integration/http_protocol_integration.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nstruct WebsocketProtocolTestParams {\n  Network::Address::IpVersion version;\n  Http::CodecClient::Type downstream_protocol;\n  FakeHttpConnection::Type upstream_protocol;\n};\n\nclass WebsocketIntegrationTest : public HttpProtocolIntegrationTest {\npublic:\n  void initialize() override;\n\nprotected:\n  void performUpgrade(const Http::TestRequestHeaderMapImpl& upgrade_request_headers,\n                      const Http::TestResponseHeaderMapImpl& upgrade_response_headers);\n  void sendBidirectionalData();\n\n  void validateUpgradeRequestHeaders(const Http::RequestHeaderMap& proxied_request_headers,\n                                     const Http::RequestHeaderMap& original_request_headers);\n  void validateUpgradeResponseHeaders(const Http::ResponseHeaderMap& proxied_response_headers,\n                                      const Http::ResponseHeaderMap& original_response_headers);\n\n  ABSL_MUST_USE_RESULT\n  testing::AssertionResult waitForUpstreamDisconnectOrReset() {\n    if (upstreamProtocol() != FakeHttpConnection::Type::HTTP1) {\n      return upstream_request_->waitForReset();\n    } else {\n      return fake_upstream_connection_->waitForDisconnect();\n    }\n  }\n\n  void waitForClientDisconnectOrReset() {\n    if (downstreamProtocol() != Http::CodecClient::Type::HTTP1) {\n      response_->waitForReset();\n    } else {\n      ASSERT_TRUE(codec_client_->waitForDisconnect());\n    }\n  }\n\n  IntegrationStreamDecoderPtr response_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/xds_integration_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/http_protocol_integration.h\"\n#include \"test/integration/ssl_utility.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n#include \"utility.h\"\n\nnamespace Envoy {\nnamespace {\n\nusing testing::HasSubstr;\n\n// This is a minimal litmus test for the v2 xDS APIs.\nclass XdsIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                           public HttpIntegrationTest {\npublic:\n  XdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {\n    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  }\n\n  void createEnvoy() override {\n    createEnvoyServer({\n        \"test/config/integration/server_xds.bootstrap.yaml\",\n        \"test/config/integration/server_xds.cds.yaml\",\n        \"test/config/integration/server_xds.eds.yaml\",\n        \"test/config/integration/server_xds.lds.yaml\",\n        \"test/config/integration/server_xds.rds.yaml\",\n    });\n  }\n\n  void createEnvoyServer(const ApiFilesystemConfig& api_filesystem_config) {\n    registerPort(\"upstream_0\", fake_upstreams_.back()->localAddress()->ip()->port());\n    createApiTestServer(api_filesystem_config, {\"http\"}, {false, false, false}, false);\n    EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"http.router.rds.route_config_0.update_success\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"cluster_manager.cds.update_success\")->value());\n    EXPECT_EQ(1, test_server_->counter(\"cluster.cluster_1.update_success\")->value());\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, XdsIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(XdsIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false);\n}\n\nclass XdsIntegrationTestTypedStruct : public XdsIntegrationTest {\npublic:\n  XdsIntegrationTestTypedStruct() = default;\n\n  void createEnvoy() override {\n    createEnvoyServer({\n        \"test/config/integration/server_xds.bootstrap.yaml\",\n        \"test/config/integration/server_xds.cds.yaml\",\n        \"test/config/integration/server_xds.eds.yaml\",\n        \"test/config/integration/server_xds.lds.typed_struct.yaml\",\n        \"test/config/integration/server_xds.rds.yaml\",\n    });\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, XdsIntegrationTestTypedStruct,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(XdsIntegrationTestTypedStruct, RouterRequestAndResponseWithBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false);\n}\n\nclass UdpaXdsIntegrationTestListCollection : public XdsIntegrationTest {\npublic:\n  UdpaXdsIntegrationTestListCollection() = default;\n\n  void createEnvoy() override {\n    // TODO(htuch): Convert CDS/EDS/RDS to UDPA list collections when support is implemented in\n    // Envoy.\n    createEnvoyServer({\n        \"test/config/integration/server_xds.bootstrap.udpa.yaml\",\n        \"test/config/integration/server_xds.cds.yaml\",\n        \"test/config/integration/server_xds.eds.yaml\",\n        \"test/config/integration/server_xds.lds.udpa.list_collection.yaml\",\n        \"test/config/integration/server_xds.rds.yaml\",\n    });\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, UdpaXdsIntegrationTestListCollection,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(UdpaXdsIntegrationTestListCollection, RouterRequestAndResponseWithBodyNoBuffer) {\n  testRouterRequestAndResponseWithBody(1024, 512, false);\n}\n\nclass LdsInplaceUpdateTcpProxyIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public BaseIntegrationTest {\npublic:\n  LdsInplaceUpdateTcpProxyIntegrationTest()\n      : BaseIntegrationTest(GetParam(), ConfigHelper::baseConfig() + R\"EOF(\n    filter_chains:\n    - filter_chain_match:\n        application_protocols: [\"alpn0\"]\n      filters:\n      - name: envoy.filters.network.tcp_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy\n          stat_prefix: tcp_stats\n          cluster: cluster_0\n    - filter_chain_match:\n        application_protocols: [\"alpn1\"]\n      filters:\n      - name: envoy.filters.network.tcp_proxy\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy\n          stat_prefix: tcp_stats\n          cluster: cluster_1\n)EOF\") {}\n\n  void initialize() override {\n    config_helper_.renameListener(\"tcp\");\n    std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter();\n    config_helper_.addListenerFilter(tls_inspector_config);\n\n    config_helper_.addSslConfig();\n    config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      auto* filter_chain_0 =\n          bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);\n      auto* filter_chain_1 =\n          bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(1);\n      filter_chain_1->mutable_transport_socket()->MergeFrom(\n          *filter_chain_0->mutable_transport_socket());\n\n      bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom(\n          *bootstrap.mutable_static_resources()->mutable_clusters(0));\n      bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name(\"cluster_1\");\n    });\n\n    BaseIntegrationTest::initialize();\n\n    context_manager_ =\n        std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem());\n    context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_);\n  }\n\n  std::unique_ptr<RawConnectionDriver> createConnectionAndWrite(const std::string& alpn,\n                                                                const std::string& request,\n                                                                std::string& response) {\n    Buffer::OwnedImpl buffer(request);\n    return std::make_unique<RawConnectionDriver>(\n        lookupPort(\"tcp\"), buffer,\n        [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void {\n          response.append(data.toString());\n        },\n        version_, *dispatcher_,\n        context_->createTransportSocket(std::make_shared<Network::TransportSocketOptionsImpl>(\n            absl::string_view(\"\"), std::vector<std::string>(), std::vector<std::string>{alpn})));\n  }\n\n  std::unique_ptr<Ssl::ContextManager> context_manager_;\n  Network::TransportSocketFactoryPtr context_;\n  testing::NiceMock<Secret::MockSecretManager> secret_manager_;\n};\n\n// Verify that tcp connection 1 is closed while client 0 survives when deleting filter chain 1.\nTEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigDeletingFilterChain) {\n  setUpstreamCount(2);\n  initialize();\n  std::string response_0;\n  auto client_conn_0 = createConnectionAndWrite(\"alpn0\", \"hello\", response_0);\n  client_conn_0->waitForConnection();\n  FakeRawConnectionPtr fake_upstream_connection_0;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0));\n\n  std::string response_1;\n  auto client_conn_1 = createConnectionAndWrite(\"alpn1\", \"dummy\", response_1);\n  client_conn_1->waitForConnection();\n  FakeRawConnectionPtr fake_upstream_connection_1;\n  ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_1));\n\n  ConfigHelper new_config_helper(version_, *api_,\n                                 MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap()));\n  new_config_helper.addConfigModifier(\n      [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n        auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n        listener->mutable_filter_chains()->RemoveLast();\n      });\n  new_config_helper.setLds(\"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_in_place_updated\", 1);\n\n  while (!client_conn_1->closed()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  ASSERT_EQ(response_1, \"\");\n\n  std::string observed_data_0;\n  ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0));\n  EXPECT_EQ(\"hello\", observed_data_0);\n\n  ASSERT_TRUE(fake_upstream_connection_0->write(\"world\"));\n  while (response_0.find(\"world\") == std::string::npos) {\n    client_conn_0->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  client_conn_0->close();\n  while (!client_conn_0->closed()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n}\n\n// Verify that tcp connection of filter chain 0 survives if new listener config adds new filter\n// chain 2.\nTEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigAddingFilterChain) {\n  setUpstreamCount(2);\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  std::string response_0;\n  auto client_conn_0 = createConnectionAndWrite(\"alpn0\", \"hello\", response_0);\n  client_conn_0->waitForConnection();\n  FakeRawConnectionPtr fake_upstream_connection_0;\n  ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0));\n\n  ConfigHelper new_config_helper(version_, *api_,\n                                 MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap()));\n  new_config_helper.addConfigModifier(\n      [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n        auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n        listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1));\n        *listener->mutable_filter_chains(2)\n             ->mutable_filter_chain_match()\n             ->mutable_application_protocols(0) = \"alpn2\";\n      });\n  new_config_helper.setLds(\"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_in_place_updated\", 1);\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 2);\n\n  std::string response_2;\n  auto client_conn_2 = createConnectionAndWrite(\"alpn2\", \"hello2\", response_2);\n  client_conn_2->waitForConnection();\n  FakeRawConnectionPtr fake_upstream_connection_2;\n  ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_2));\n  std::string observed_data_2;\n  ASSERT_TRUE(fake_upstream_connection_2->waitForData(6, &observed_data_2));\n  EXPECT_EQ(\"hello2\", observed_data_2);\n\n  ASSERT_TRUE(fake_upstream_connection_2->write(\"world2\"));\n  while (response_2.find(\"world2\") == std::string::npos) {\n    client_conn_2->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  client_conn_2->close();\n  while (!client_conn_2->closed()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n\n  std::string observed_data_0;\n  ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0));\n  EXPECT_EQ(\"hello\", observed_data_0);\n\n  ASSERT_TRUE(fake_upstream_connection_0->write(\"world\"));\n  while (response_0.find(\"world\") == std::string::npos) {\n    client_conn_0->run(Event::Dispatcher::RunType::NonBlock);\n  }\n  client_conn_0->close();\n  while (!client_conn_0->closed()) {\n    dispatcher_->run(Event::Dispatcher::RunType::NonBlock);\n  }\n}\n\nclass LdsInplaceUpdateHttpIntegrationTest\n    : public testing::TestWithParam<Network::Address::IpVersion>,\n      public HttpIntegrationTest {\npublic:\n  LdsInplaceUpdateHttpIntegrationTest()\n      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}\n\n  void initialize() override {\n    autonomous_upstream_ = true;\n    setUpstreamCount(2);\n\n    config_helper_.renameListener(\"http\");\n    std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter();\n    config_helper_.addListenerFilter(tls_inspector_config);\n    config_helper_.addSslConfig();\n    config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n      if (!use_default_balancer_) {\n        bootstrap.mutable_static_resources()\n            ->mutable_listeners(0)\n            ->mutable_connection_balance_config()\n            ->mutable_exact_balance();\n      }\n      auto* filter_chain_0 =\n          bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);\n      *filter_chain_0->mutable_filter_chain_match()->mutable_application_protocols()->Add() =\n          \"alpn0\";\n      auto* filter_chain_1 = bootstrap.mutable_static_resources()\n                                 ->mutable_listeners(0)\n                                 ->mutable_filter_chains()\n                                 ->Add();\n      filter_chain_1->MergeFrom(*filter_chain_0);\n\n      // filter chain 1\n      // alpn1, route to cluster_1\n      *filter_chain_1->mutable_filter_chain_match()->mutable_application_protocols(0) = \"alpn1\";\n\n      auto* config_blob = filter_chain_1->mutable_filters(0)->mutable_typed_config();\n\n      ASSERT_TRUE(config_blob->Is<envoy::extensions::filters::network::http_connection_manager::v3::\n                                      HttpConnectionManager>());\n      auto hcm_config = MessageUtil::anyConvert<\n          envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>(\n          *config_blob);\n      hcm_config.mutable_route_config()\n          ->mutable_virtual_hosts(0)\n          ->mutable_routes(0)\n          ->mutable_route()\n          ->set_cluster(\"cluster_1\");\n      config_blob->PackFrom(hcm_config);\n      bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom(\n          *bootstrap.mutable_static_resources()->mutable_clusters(0));\n      bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name(\"cluster_1\");\n    });\n\n    BaseIntegrationTest::initialize();\n\n    context_manager_ =\n        std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem());\n    context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_);\n    address_ = Ssl::getSslAddress(version_, lookupPort(\"http\"));\n  }\n\n  IntegrationCodecClientPtr createHttpCodec(const std::string& alpn) {\n    auto ssl_conn = dispatcher_->createClientConnection(\n        address_, Network::Address::InstanceConstSharedPtr(),\n        context_->createTransportSocket(std::make_shared<Network::TransportSocketOptionsImpl>(\n            absl::string_view(\"\"), std::vector<std::string>(), std::vector<std::string>{alpn})),\n        nullptr);\n    return makeHttpConnection(std::move(ssl_conn));\n  }\n\n  void expectResponseHeaderConnectionClose(IntegrationCodecClient& codec_client,\n                                           bool expect_close) {\n    IntegrationStreamDecoderPtr response =\n        codec_client.makeHeaderOnlyRequest(default_request_headers_);\n\n    response->waitForEndStream();\n    EXPECT_TRUE(response->complete());\n    EXPECT_EQ(\"200\", response->headers().getStatusValue());\n    if (expect_close) {\n      EXPECT_EQ(\"close\", response->headers().getConnectionValue());\n\n    } else {\n      EXPECT_EQ(nullptr, response->headers().Connection());\n    }\n  }\n\n  void expectConnenctionServed(std::string alpn = \"alpn0\") {\n    auto codec_client_after_config_update = createHttpCodec(alpn);\n    expectResponseHeaderConnectionClose(*codec_client_after_config_update, false);\n    codec_client_after_config_update->close();\n  }\n\n  std::unique_ptr<Ssl::ContextManager> context_manager_;\n  Network::TransportSocketFactoryPtr context_;\n  testing::NiceMock<Secret::MockSecretManager> secret_manager_;\n  Network::Address::InstanceConstSharedPtr address_;\n  bool use_default_balancer_{false};\n};\n\n// Verify that http response on filter chain 0 has \"Connection: close\" header when filter chain 0\n// is deleted during the listener update.\nTEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) {\n  initialize();\n\n  auto codec_client_1 = createHttpCodec(\"alpn1\");\n  auto codec_client_0 = createHttpCodec(\"alpn0\");\n  Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get()]() {\n    c1->close();\n    c0->close();\n  });\n  ConfigHelper new_config_helper(version_, *api_,\n                                 MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap()));\n  new_config_helper.addConfigModifier(\n      [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n        auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n        listener->mutable_filter_chains()->RemoveLast();\n      });\n\n  new_config_helper.setLds(\"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_in_place_updated\", 1);\n  test_server_->waitForGaugeGe(\"listener_manager.total_filter_chains_draining\", 1);\n\n  expectResponseHeaderConnectionClose(*codec_client_1, true);\n  test_server_->waitForGaugeGe(\"listener_manager.total_filter_chains_draining\", 0);\n  expectResponseHeaderConnectionClose(*codec_client_0, false);\n  expectConnenctionServed();\n}\n\n// Verify that http clients of filter chain 0 survives if new listener config adds new filter\n// chain 2.\nTEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) {\n  initialize();\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 1);\n\n  auto codec_client_0 = createHttpCodec(\"alpn0\");\n  Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); });\n  ConfigHelper new_config_helper(version_, *api_,\n                                 MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap()));\n  new_config_helper.addConfigModifier(\n      [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n        auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n        listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1));\n        *listener->mutable_filter_chains(2)\n             ->mutable_filter_chain_match()\n             ->mutable_application_protocols(0) = \"alpn2\";\n      });\n  new_config_helper.setLds(\"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_in_place_updated\", 1);\n  test_server_->waitForCounterGe(\"listener_manager.listener_create_success\", 2);\n\n  auto codec_client_2 = createHttpCodec(\"alpn2\");\n  Cleanup cleanup2([c2 = codec_client_2.get()]() { c2->close(); });\n  expectResponseHeaderConnectionClose(*codec_client_2, false);\n  expectResponseHeaderConnectionClose(*codec_client_0, false);\n  expectConnenctionServed();\n}\n\n// Verify that balancer is inherited. Test only default balancer because ExactConnectionBalancer\n// is verified in filter chain add and delete test case.\nTEST_P(LdsInplaceUpdateHttpIntegrationTest, OverlappingFilterChainServesNewConnection) {\n  use_default_balancer_ = true;\n  initialize();\n\n  auto codec_client_0 = createHttpCodec(\"alpn0\");\n  Cleanup cleanup([c0 = codec_client_0.get()]() { c0->close(); });\n  ConfigHelper new_config_helper(version_, *api_,\n                                 MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap()));\n  new_config_helper.addConfigModifier(\n      [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n        auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n        listener->mutable_filter_chains()->RemoveLast();\n      });\n\n  new_config_helper.setLds(\"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_in_place_updated\", 1);\n  expectResponseHeaderConnectionClose(*codec_client_0, false);\n  expectConnenctionServed();\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateHttpIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateTcpProxyIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nusing LdsIntegrationTest = HttpProtocolIntegrationTest;\n\nINSTANTIATE_TEST_SUITE_P(Protocols, LdsIntegrationTest,\n                         testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams(\n                             {Http::CodecClient::Type::HTTP1}, {FakeHttpConnection::Type::HTTP1})),\n                         HttpProtocolIntegrationTest::protocolTestParamsToString);\n\n// Sample test making sure our config framework correctly reloads listeners.\nTEST_P(LdsIntegrationTest, ReloadConfig) {\n  autonomous_upstream_ = true;\n  initialize();\n  // Given we're using LDS in this test, initialize() will not complete until\n  // the initial LDS file has loaded.\n  EXPECT_EQ(1, test_server_->counter(\"listener_manager.lds.update_success\")->value());\n\n  // HTTP 1.0 is disabled by default.\n  std::string response;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.0\\r\\n\\r\\n\", &response, true);\n  EXPECT_TRUE(response.find(\"HTTP/1.1 426 Upgrade Required\\r\\n\") == 0);\n\n  // Create a new config with HTTP/1.0 proxying.\n  ConfigHelper new_config_helper(version_, *api_,\n                                 MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap()));\n  new_config_helper.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) {\n        hcm.mutable_http_protocol_options()->set_accept_http_10(true);\n        hcm.mutable_http_protocol_options()->set_default_host_for_http_10(\"default.com\");\n      });\n\n  // Create an LDS response with the new config, and reload config.\n  new_config_helper.setLds(\"1\");\n  test_server_->waitForCounterGe(\"listener_manager.listener_in_place_updated\", 1);\n  test_server_->waitForCounterGe(\"listener_manager.lds.update_success\", 2);\n\n  // HTTP 1.0 should now be enabled.\n  std::string response2;\n  sendRawHttpAndWaitForResponse(lookupPort(\"http\"), \"GET / HTTP/1.0\\r\\n\\r\\n\", &response2, false);\n  EXPECT_THAT(response2, HasSubstr(\"HTTP/1.0 200 OK\\r\\n\"));\n}\n\n// Sample test making sure our config framework informs on listener failure.\nTEST_P(LdsIntegrationTest, FailConfigLoad) {\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);\n    auto* filter_chain = listener->mutable_filter_chains(0);\n    filter_chain->mutable_filters(0)->clear_typed_config();\n    filter_chain->mutable_filters(0)->set_name(\"grewgragra\");\n  });\n  EXPECT_DEATH(initialize(), \"Didn't find a registered implementation for name: 'grewgragra'\");\n}\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/xfcc_integration_test.cc",
    "content": "#include \"xfcc_integration_test.h\"\n\n#include <memory>\n#include <regex>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"integration.h\"\n#include \"utility.h\"\n\nnamespace Envoy {\nnamespace Xfcc {\n\nvoid XfccIntegrationTest::TearDown() {\n  test_server_.reset();\n  client_mtls_ssl_ctx_.reset();\n  client_tls_ssl_ctx_.reset();\n  fake_upstream_connection_.reset();\n  fake_upstreams_.clear();\n  HttpIntegrationTest::cleanupUpstreamAndDownstream();\n  codec_client_.reset();\n  context_manager_.reset();\n}\n\nNetwork::TransportSocketFactoryPtr XfccIntegrationTest::createClientSslContext(bool mtls) {\n  const std::string yaml_tls = R\"EOF(\ncommon_tls_context:\n  validation_context:\n    trusted_ca:\n      filename: {{ test_rundir }}/test/config/integration/certs/cacert.pem\n    match_subject_alt_names: \n      exact: \"spiffe://lyft.com/backend-team\"\n      exact: \"lyft.com\"\n      exact: \"www.lyft.com\"\n)EOF\";\n\n  const std::string yaml_mtls = R\"EOF(\ncommon_tls_context:\n  validation_context:\n    trusted_ca:\n      filename: {{ test_rundir }}/test/config/integration/certs/cacert.pem\n    match_subject_alt_names: \n      exact: \"spiffe://lyft.com/backend-team\"\n      exact: \"lyft.com\"\n      exact: \"www.lyft.com\"\n  tls_certificates:\n    certificate_chain:\n      filename: {{ test_rundir }}/test/config/integration/certs/clientcert.pem\n    private_key:\n      filename: {{ test_rundir }}/test/config/integration/certs/clientkey.pem\n)EOF\";\n\n  std::string target;\n  if (mtls) {\n    target = yaml_mtls;\n  } else {\n    target = yaml_tls;\n  }\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext config;\n  TestUtility::loadFromYaml(TestEnvironment::substitute(target), config);\n  auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ClientContextConfigImpl>(\n      config, factory_context_);\n  static auto* client_stats_store = new Stats::TestIsolatedStoreImpl();\n  return Network::TransportSocketFactoryPtr{\n      new Extensions::TransportSockets::Tls::ClientSslSocketFactory(\n          std::move(cfg), *context_manager_, *client_stats_store)};\n}\n\nNetwork::TransportSocketFactoryPtr XfccIntegrationTest::createUpstreamSslContext() {\n  envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;\n  auto* common_tls_context = tls_context.mutable_common_tls_context();\n  auto* tls_cert = common_tls_context->add_tls_certificates();\n  tls_cert->mutable_certificate_chain()->set_filename(\n      TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcert.pem\"));\n  tls_cert->mutable_private_key()->set_filename(\n      TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamkey.pem\"));\n\n  auto cfg = std::make_unique<Extensions::TransportSockets::Tls::ServerContextConfigImpl>(\n      tls_context, factory_context_);\n  static Stats::Scope* upstream_stats_store = new Stats::TestIsolatedStoreImpl();\n  return std::make_unique<Extensions::TransportSockets::Tls::ServerSslSocketFactory>(\n      std::move(cfg), *context_manager_, *upstream_stats_store, std::vector<std::string>{});\n}\n\nNetwork::ClientConnectionPtr XfccIntegrationTest::makeTcpClientConnection() {\n  Network::Address::InstanceConstSharedPtr address =\n      Network::Utility::resolveUrl(\"tcp://\" + Network::Test::getLoopbackAddressUrlString(version_) +\n                                   \":\" + std::to_string(lookupPort(\"http\")));\n  return dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                             Network::Test::createRawBufferSocket(), nullptr);\n}\n\nNetwork::ClientConnectionPtr XfccIntegrationTest::makeMtlsClientConnection() {\n  Network::Address::InstanceConstSharedPtr address =\n      Network::Utility::resolveUrl(\"tcp://\" + Network::Test::getLoopbackAddressUrlString(version_) +\n                                   \":\" + std::to_string(lookupPort(\"http\")));\n  return dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(),\n                                             client_mtls_ssl_ctx_->createTransportSocket(nullptr),\n                                             nullptr);\n}\n\nvoid XfccIntegrationTest::createUpstreams() {\n  addFakeUpstream(createUpstreamSslContext(), FakeHttpConnection::Type::HTTP1);\n}\n\nvoid XfccIntegrationTest::initialize() {\n  config_helper_.addConfigModifier(\n      [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&\n              hcm) -> void {\n        hcm.set_forward_client_cert_details(fcc_);\n        hcm.mutable_set_current_client_cert_details()->CopyFrom(sccd_);\n      });\n\n  config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void {\n    auto transport_socket =\n        bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket();\n    envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext context;\n    auto* validation_context = context.mutable_common_tls_context()->mutable_validation_context();\n    validation_context->mutable_trusted_ca()->set_filename(\n        TestEnvironment::runfilesPath(\"test/config/integration/certs/upstreamcacert.pem\"));\n    validation_context->add_match_subject_alt_names()->set_suffix(\"lyft.com\");\n    transport_socket->set_name(\"envoy.transport_sockets.tls\");\n    transport_socket->mutable_typed_config()->PackFrom(context);\n  });\n\n  if (tls_) {\n    config_helper_.addSslConfig();\n  }\n\n  context_manager_ =\n      std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem());\n  client_tls_ssl_ctx_ = createClientSslContext(false);\n  client_mtls_ssl_ctx_ = createClientSslContext(true);\n  HttpIntegrationTest::initialize();\n}\n\nvoid XfccIntegrationTest::testRequestAndResponseWithXfccHeader(std::string previous_xfcc,\n                                                               std::string expected_xfcc) {\n  Network::ClientConnectionPtr conn = tls_ ? makeMtlsClientConnection() : makeTcpClientConnection();\n  Http::TestRequestHeaderMapImpl header_map;\n  if (previous_xfcc.empty()) {\n    header_map = Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                {\":path\", \"/test/long/url\"},\n                                                {\":scheme\", \"http\"},\n                                                {\":authority\", \"host\"}};\n  } else {\n    header_map = Http::TestRequestHeaderMapImpl{{\":method\", \"GET\"},\n                                                {\":path\", \"/test/long/url\"},\n                                                {\":scheme\", \"http\"},\n                                                {\":authority\", \"host\"},\n                                                {\"x-forwarded-client-cert\", previous_xfcc.c_str()}};\n  }\n\n  codec_client_ = makeHttpConnection(std::move(conn));\n  auto response = codec_client_->makeHeaderOnlyRequest(header_map);\n  ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));\n  ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));\n  ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));\n  if (expected_xfcc.empty()) {\n    EXPECT_EQ(nullptr, upstream_request_->headers().ForwardedClientCert());\n  } else {\n    EXPECT_EQ(expected_xfcc, upstream_request_->headers().getForwardedClientCertValue());\n  }\n  upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{\":status\", \"200\"}}, true);\n  response->waitForEndStream();\n  EXPECT_TRUE(upstream_request_->complete());\n  EXPECT_TRUE(response->complete());\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, XfccIntegrationTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(XfccIntegrationTest, MtlsForwardOnly) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      FORWARD_ONLY;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, previous_xfcc_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAlwaysForwardOnly) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      ALWAYS_FORWARD_ONLY;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, previous_xfcc_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsSanitize) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, \"\");\n}\n\nTEST_P(XfccIntegrationTest, MtlsSanitizeSetSubject) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE_SET;\n  sccd_.mutable_subject()->set_value(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_,\n                                       current_xfcc_by_hash_ + \";\" + client_subject_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsSanitizeSetUri) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE_SET;\n  sccd_.set_uri(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_,\n                                       current_xfcc_by_hash_ + \";\" + client_uri_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsSanitizeSetDns) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE_SET;\n  sccd_.set_dns(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_,\n                                       current_xfcc_by_hash_ + \";\" + client_dns_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsSanitizeSetSubjectUri) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE_SET;\n  sccd_.mutable_subject()->set_value(true);\n  sccd_.set_uri(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, current_xfcc_by_hash_ + \";\" +\n                                                           client_subject_ + \";\" + client_uri_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsSanitizeSetSubjectDns) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE_SET;\n  sccd_.mutable_subject()->set_value(true);\n  sccd_.set_dns(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, current_xfcc_by_hash_ + \";\" +\n                                                           client_subject_ + \";\" + client_dns_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsSanitizeSetSubjectUriDns) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SANITIZE_SET;\n  sccd_.mutable_subject()->set_value(true);\n  sccd_.set_uri(true);\n  sccd_.set_dns(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, current_xfcc_by_hash_ + \";\" +\n                                                           client_subject_ + \";\" + client_uri_san_ +\n                                                           \";\" + client_dns_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForward) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_,\n                                       previous_xfcc_ + \",\" + current_xfcc_by_hash_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardSubject) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.mutable_subject()->set_value(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(\n      previous_xfcc_, previous_xfcc_ + \",\" + current_xfcc_by_hash_ + \";\" + client_subject_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardUri) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.set_uri(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(\n      previous_xfcc_, previous_xfcc_ + \",\" + current_xfcc_by_hash_ + \";\" + client_uri_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardDns) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.set_dns(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(\n      previous_xfcc_, previous_xfcc_ + \",\" + current_xfcc_by_hash_ + \";\" + client_dns_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardSubjectUri) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.mutable_subject()->set_value(true);\n  sccd_.set_uri(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, previous_xfcc_ + \",\" +\n                                                           current_xfcc_by_hash_ + \";\" +\n                                                           client_subject_ + \";\" + client_uri_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardSubjectDns) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.mutable_subject()->set_value(true);\n  sccd_.set_dns(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, previous_xfcc_ + \",\" +\n                                                           current_xfcc_by_hash_ + \";\" +\n                                                           client_subject_ + \";\" + client_dns_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardSubjectUriDns) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.mutable_subject()->set_value(true);\n  sccd_.set_uri(true);\n  sccd_.set_dns(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(\n      previous_xfcc_, previous_xfcc_ + \",\" + current_xfcc_by_hash_ + \";\" + client_subject_ + \";\" +\n                          client_uri_san_ + \";\" + client_dns_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardUriPreviousXfccHeaderEmpty) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.set_uri(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(\"\", current_xfcc_by_hash_ + \";\" + client_uri_san_);\n}\n\nTEST_P(XfccIntegrationTest, MtlsAppendForwardDnsPreviousXfccHeaderEmpty) {\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      APPEND_FORWARD;\n  sccd_.set_dns(true);\n  initialize();\n  testRequestAndResponseWithXfccHeader(\"\", current_xfcc_by_hash_ + \";\" + client_dns_san_);\n}\n\nTEST_P(XfccIntegrationTest, TlsAlwaysForwardOnly) {\n  // The always_forward_only works regardless of whether the connection is TLS/mTLS.\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      ALWAYS_FORWARD_ONLY;\n  tls_ = false;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, previous_xfcc_);\n}\n\nTEST_P(XfccIntegrationTest, TlsEnforceSanitize) {\n  // The forward_only, append_forward and sanitize_set options are not effective when the connection\n  // is not using Mtls.\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      FORWARD_ONLY;\n  tls_ = false;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, \"\");\n}\n\nTEST_P(XfccIntegrationTest, NonTlsAlwaysForwardOnly) {\n  // The always_forward_only works regardless of whether the connection is TLS/mTLS.\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      ALWAYS_FORWARD_ONLY;\n  tls_ = false;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, previous_xfcc_);\n}\n\nTEST_P(XfccIntegrationTest, NonTlsEnforceSanitize) {\n  // The forward_only, append_forward and sanitize_set options are not effective when the connection\n  // is not using Mtls.\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      FORWARD_ONLY;\n  tls_ = false;\n  initialize();\n  testRequestAndResponseWithXfccHeader(previous_xfcc_, \"\");\n}\n\nTEST_P(XfccIntegrationTest, TagExtractedNameGenerationTest) {\n  // Note: the test below is meant to check that default tags are being extracted correctly with\n  // real-ish input stats. If new stats are added, this test will not break because names that do\n  // not exist in the map are not checked. However, if stats are modified the below maps should be\n  // updated (or regenerated by printing in map literal format). See commented code below to\n  // regenerate the maps. Note: different maps are needed for ipv4 and ipv6, so when regenerating,\n  // the printout needs to be copied from each test parameterization and pasted into the respective\n  // case in the switch statement below.\n\n  fcc_ = envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      FORWARD_ONLY;\n  initialize();\n\n  // Commented sample code to regenerate the map literals used below in the test log if necessary:\n\n  // std::cout << \"tag_extracted_counter_map = {\";\n  // std::vector<Stats::CounterSharedPtr> counters = test_server_->counters();\n  // for (auto it = counters.begin(); it != counters.end(); ++it) {\n  //   if (it != counters.begin()) {\n  //     std::cout << \",\";\n  //   }\n  //   std::cout << std::endl << \"{\\\"\" << (*it)->name() << \"\\\", \\\"\" << (*it)->tagExtractedName() <<\n  //   \"\\\"}\";\n  // }\n  // std::cout << \"};\" << std::endl;\n  // std::cout << \"tag_extracted_gauge_map = {\";\n  // std::vector<Stats::GaugeSharedPtr> gauges = test_server_->gauges();\n  // for (auto it = gauges.begin(); it != gauges.end(); ++it) {\n  //   if (it != gauges.begin()) {\n  //     std::cout << \",\";\n  //   }\n  //   std::cout << std::endl << \"{\\\"\" << (*it)->name() << \"\\\", \\\"\" << (*it)->tagExtractedName() <<\n  //   \"\\\"}\";\n  // }\n  // std::cout << \"};\" << std::endl;\n\n  absl::node_hash_map<std::string, std::string> tag_extracted_counter_map;\n  absl::node_hash_map<std::string, std::string> tag_extracted_gauge_map;\n\n  tag_extracted_counter_map = {\n      {listenerStatPrefix(\"downstream_cx_total\"), \"listener.downstream_cx_total\"},\n      {listenerStatPrefix(\"http.router.downstream_rq_5xx\"), \"listener.http.downstream_rq_xx\"},\n      {listenerStatPrefix(\"http.router.downstream_rq_4xx\"), \"listener.http.downstream_rq_xx\"},\n      {listenerStatPrefix(\"http.router.downstream_rq_3xx\"), \"listener.http.downstream_rq_xx\"},\n      {listenerStatPrefix(\"downstream_cx_destroy\"), \"listener.downstream_cx_destroy\"},\n      {listenerStatPrefix(\"downstream_cx_proxy_proto_error\"),\n       \"listener.downstream_cx_proxy_proto_error\"},\n      {listenerStatPrefix(\"http.router.downstream_rq_2xx\"), \"listener.http.downstream_rq_xx\"},\n      {\"http.router.rq_total\", \"http.rq_total\"},\n      {\"http.router.tracing.not_traceable\", \"http.tracing.not_traceable\"},\n      {\"http.router.tracing.random_sampling\", \"http.tracing.random_sampling\"},\n      {\"http.router.rs_too_large\", \"http.rs_too_large\"},\n      {\"http.router.downstream_rq_5xx\", \"http.downstream_rq_xx\"},\n      {\"http.router.downstream_rq_4xx\", \"http.downstream_rq_xx\"},\n      {\"http.router.downstream_rq_2xx\", \"http.downstream_rq_xx\"},\n      {\"http.router.downstream_rq_ws_on_non_ws_route\", \"http.downstream_rq_ws_on_non_ws_route\"},\n      {\"http.router.downstream_rq_tx_reset\", \"http.downstream_rq_tx_reset\"},\n      {\"http.router.no_route\", \"http.no_route\"},\n      {\"http.router.tracing.health_check\", \"http.tracing.health_check\"},\n      {\"http.router.downstream_rq_too_large\", \"http.downstream_rq_too_large\"},\n      {\"http.router.downstream_rq_response_before_rq_complete\",\n       \"http.downstream_rq_response_before_rq_complete\"},\n      {\"http.router.downstream_rq_3xx\", \"http.downstream_rq_xx\"},\n      {\"http.router.downstream_cx_destroy\", \"http.downstream_cx_destroy\"},\n      {\"http.router.downstream_rq_non_relative_path\", \"http.downstream_rq_non_relative_path\"},\n      {\"http.router.downstream_cx_destroy_active_rq\", \"http.downstream_cx_destroy_active_rq\"},\n      {\"http.router.tracing.client_enabled\", \"http.tracing.client_enabled\"},\n      {\"http.router.downstream_cx_destroy_remote\", \"http.downstream_cx_destroy_remote\"},\n      {\"http.router.downstream_cx_http1_total\", \"http.downstream_cx_http1_total\"},\n      {\"http.router.downstream_cx_http2_total\", \"http.downstream_cx_http2_total\"},\n      {\"http.router.downstream_cx_ssl_total\", \"http.downstream_cx_ssl_total\"},\n      {\"http.router.downstream_cx_destroy_local_active_rq\",\n       \"http.downstream_cx_destroy_local_active_rq\"},\n      {\"http.router.downstream_cx_tx_bytes_total\", \"http.downstream_cx_tx_bytes_total\"},\n      {\"http.router.downstream_cx_destroy_local\", \"http.downstream_cx_destroy_local\"},\n      {\"http.router.downstream_flow_control_resumed_reading_total\",\n       \"http.downstream_flow_control_resumed_reading_total\"},\n      {\"http.router.downstream_cx_total\", \"http.downstream_cx_total\"},\n      {\"http.router.downstream_cx_websocket_total\", \"http.downstream_cx_websocket_total\"},\n      {\"http.router.downstream_cx_destroy_remote_active_rq\",\n       \"http.downstream_cx_destroy_remote_active_rq\"},\n      {\"http.router.rq_redirect\", \"http.rq_redirect\"},\n      {\"http.router.downstream_cx_protocol_error\", \"http.downstream_cx_protocol_error\"},\n      {\"http.router.downstream_cx_drain_close\", \"http.downstream_cx_drain_close\"},\n      {\"http.router.downstream_rq_http2_total\", \"http.downstream_rq_http2_total\"},\n      {\"http.router.no_cluster\", \"http.no_cluster\"},\n      {\"http.router.downstream_rq_rx_reset\", \"http.downstream_rq_rx_reset\"},\n      {\"http.router.downstream_cx_rx_bytes_total\", \"http.downstream_cx_rx_bytes_total\"},\n      {\"http.router.downstream_flow_control_paused_reading_total\",\n       \"http.downstream_flow_control_paused_reading_total\"},\n      {\"http.router.downstream_cx_idle_timeout\", \"http.downstream_cx_idle_timeout\"},\n      {\"http.router.tracing.service_forced\", \"http.tracing.service_forced\"},\n      {\"http.router.downstream_rq_http1_total\", \"http.downstream_rq_http1_total\"},\n      {\"http.router.downstream_rq_total\", \"http.downstream_rq_total\"},\n      {listenerStatPrefix(\"ssl.connection_error\"), \"listener.ssl.connection_error\"},\n      {listenerStatPrefix(\"ssl.handshake\"), \"listener.ssl.handshake\"},\n      {listenerStatPrefix(\"ssl.session_reused\"), \"listener.ssl.session_reused\"},\n      {listenerStatPrefix(\"ssl.fail_verify_san\"), \"listener.ssl.fail_verify_san\"},\n      {listenerStatPrefix(\"ssl.no_certificate\"), \"listener.ssl.no_certificate\"},\n      {listenerStatPrefix(\"ssl.fail_verify_no_cert\"), \"listener.ssl.fail_verify_no_cert\"},\n      {listenerStatPrefix(\"ssl.fail_verify_error\"), \"listener.ssl.fail_verify_error\"},\n      {listenerStatPrefix(\"ssl.fail_verify_cert_hash\"), \"listener.ssl.fail_verify_cert_hash\"},\n      {\"cluster.cluster_2.ssl.fail_verify_san\", \"cluster.ssl.fail_verify_san\"},\n      {\"cluster.cluster_2.ssl.fail_verify_error\", \"cluster.ssl.fail_verify_error\"},\n      {\"cluster.cluster_2.ssl.fail_verify_no_cert\", \"cluster.ssl.fail_verify_no_cert\"},\n      {\"cluster.cluster_2.update_success\", \"cluster.update_success\"},\n      {\"cluster.cluster_2.update_attempt\", \"cluster.update_attempt\"},\n      {\"cluster.cluster_2.retry_or_shadow_abandoned\", \"cluster.retry_or_shadow_abandoned\"},\n      {\"cluster.cluster_2.upstream_cx_destroy_local_with_active_rq\",\n       \"cluster.upstream_cx_destroy_local_with_active_rq\"},\n      {\"cluster.cluster_2.update_empty\", \"cluster.update_empty\"},\n      {\"cluster.cluster_2.lb_zone_no_capacity_left\", \"cluster.lb_zone_no_capacity_left\"},\n      {\"cluster.cluster_2.ssl.fail_verify_cert_hash\", \"cluster.ssl.fail_verify_cert_hash\"},\n      {\"cluster.cluster_2.upstream_cx_destroy\", \"cluster.upstream_cx_destroy\"},\n      {\"cluster.cluster_2.upstream_cx_connect_timeout\", \"cluster.upstream_cx_connect_timeout\"},\n      {\"cluster.cluster_2.update_failure\", \"cluster.update_failure\"},\n      {\"cluster.cluster_2.upstream_cx_rx_bytes_total\", \"cluster.upstream_cx_rx_bytes_total\"},\n      {\"cluster.cluster_2.ssl.no_certificate\", \"cluster.ssl.no_certificate\"},\n      {\"cluster.cluster_2.upstream_cx_http1_total\", \"cluster.upstream_cx_http1_total\"},\n      {\"cluster.cluster_2.upstream_cx_overflow\", \"cluster.upstream_cx_overflow\"},\n      {\"cluster.cluster_2.lb_local_cluster_not_ok\", \"cluster.lb_local_cluster_not_ok\"},\n      {\"cluster.cluster_2.ssl.connection_error\", \"cluster.ssl.connection_error\"},\n      {\"cluster.cluster_2.upstream_cx_destroy_with_active_rq\",\n       \"cluster.upstream_cx_destroy_with_active_rq\"},\n      {\"cluster.cluster_2.upstream_cx_destroy_remote_with_active_rq\",\n       \"cluster.upstream_cx_destroy_remote_with_active_rq\"},\n      {\"cluster.cluster_2.lb_recalculate_zone_structures\",\n       \"cluster.lb_recalculate_zone_structures\"},\n      {\"cluster.cluster_2.lb_zone_number_differs\", \"cluster.lb_zone_number_differs\"},\n      {\"cluster.cluster_2.upstream_cx_none_healthy\", \"cluster.upstream_cx_none_healthy\"},\n      {\"cluster.cluster_2.lb_zone_routing_all_directly\", \"cluster.lb_zone_routing_all_directly\"},\n      {\"cluster.cluster_2.upstream_cx_http2_total\", \"cluster.upstream_cx_http2_total\"},\n      {\"cluster.cluster_2.upstream_rq_maintenance_mode\", \"cluster.upstream_rq_maintenance_mode\"},\n      {\"cluster.cluster_2.upstream_rq_total\", \"cluster.upstream_rq_total\"},\n      {\"cluster.cluster_2.lb_zone_routing_cross_zone\", \"cluster.lb_zone_routing_cross_zone\"},\n      {\"cluster.cluster_2.lb_healthy_panic\", \"cluster.lb_healthy_panic\"},\n      {\"cluster.cluster_2.upstream_rq_timeout\", \"cluster.upstream_rq_timeout\"},\n      {\"cluster.cluster_2.upstream_rq_per_try_timeout\", \"cluster.upstream_rq_per_try_timeout\"},\n      {\"cluster.cluster_2.lb_zone_routing_sampled\", \"cluster.lb_zone_routing_sampled\"},\n      {\"cluster.cluster_2.upstream_cx_connect_fail\", \"cluster.upstream_cx_connect_fail\"},\n      {\"cluster.cluster_2.upstream_cx_destroy_remote\", \"cluster.upstream_cx_destroy_remote\"},\n      {\"cluster.cluster_2.upstream_rq_retry\", \"cluster.upstream_rq_retry\"},\n      {\"cluster.cluster_2.upstream_cx_total\", \"cluster.upstream_cx_total\"},\n      {\"cluster.cluster_2.upstream_rq_retry_overflow\", \"cluster.upstream_rq_retry_overflow\"},\n      {\"cluster.cluster_2.upstream_cx_tx_bytes_total\", \"cluster.upstream_cx_tx_bytes_total\"},\n      {\"cluster.cluster_2.upstream_cx_close_notify\", \"cluster.upstream_cx_close_notify\"},\n      {\"cluster.cluster_2.upstream_cx_protocol_error\", \"cluster.upstream_cx_protocol_error\"},\n      {\"cluster.cluster_2.upstream_flow_control_drained_total\",\n       \"cluster.upstream_flow_control_drained_total\"},\n      {\"cluster.cluster_2.upstream_rq_pending_failure_eject\",\n       \"cluster.upstream_rq_pending_failure_eject\"},\n      {\"cluster.cluster_2.upstream_cx_max_requests\", \"cluster.upstream_cx_max_requests\"},\n      {\"cluster.cluster_2.upstream_rq_rx_reset\", \"cluster.upstream_rq_rx_reset\"},\n      {\"cluster.cluster_2.upstream_rq_pending_total\", \"cluster.upstream_rq_pending_total\"},\n      {\"cluster.cluster_2.upstream_rq_pending_overflow\", \"cluster.upstream_rq_pending_overflow\"},\n      {\"cluster.cluster_2.upstream_rq_cancelled\", \"cluster.upstream_rq_cancelled\"},\n      {\"cluster.cluster_2.lb_zone_cluster_too_small\", \"cluster.lb_zone_cluster_too_small\"},\n      {\"cluster.cluster_2.upstream_rq_tx_reset\", \"cluster.upstream_rq_tx_reset\"},\n      {\"cluster.cluster_2.ssl.session_reused\", \"cluster.ssl.session_reused\"},\n      {\"cluster.cluster_2.membership_change\", \"cluster.membership_change\"},\n      {\"cluster.cluster_2.upstream_rq_retry_success\", \"cluster.upstream_rq_retry_success\"},\n      {\"cluster.cluster_2.upstream_flow_control_paused_reading_total\",\n       \"cluster.upstream_flow_control_paused_reading_total\"},\n      {\"cluster.cluster_2.upstream_flow_control_resumed_reading_total\",\n       \"cluster.upstream_flow_control_resumed_reading_total\"},\n      {\"cluster.cluster_2.upstream_flow_control_backed_up_total\",\n       \"cluster.upstream_flow_control_backed_up_total\"},\n      {\"cluster.cluster_2.ssl.handshake\", \"cluster.ssl.handshake\"},\n      {\"cluster.cluster_2.upstream_cx_destroy_local\", \"cluster.upstream_cx_destroy_local\"},\n      {\"cluster.cluster_2.bind_errors\", \"cluster.bind_errors\"},\n      {\"cluster.cluster_1.ssl.fail_verify_cert_hash\", \"cluster.ssl.fail_verify_cert_hash\"},\n      {\"cluster.cluster_1.ssl.fail_verify_san\", \"cluster.ssl.fail_verify_san\"},\n      {\"cluster.cluster_1.ssl.session_reused\", \"cluster.ssl.session_reused\"},\n      {\"cluster.cluster_1.ssl.handshake\", \"cluster.ssl.handshake\"},\n      {\"cluster.cluster_1.update_empty\", \"cluster.update_empty\"},\n      {\"cluster.cluster_1.update_failure\", \"cluster.update_failure\"},\n      {\"cluster.cluster_1.update_success\", \"cluster.update_success\"},\n      {\"cluster.cluster_1.update_attempt\", \"cluster.update_attempt\"},\n      {\"cluster.cluster_1.retry_or_shadow_abandoned\", \"cluster.retry_or_shadow_abandoned\"},\n      {\"cluster.cluster_1.upstream_cx_close_notify\", \"cluster.upstream_cx_close_notify\"},\n      {\"cluster.cluster_1.upstream_cx_destroy_local_with_active_rq\",\n       \"cluster.upstream_cx_destroy_local_with_active_rq\"},\n      {\"cluster.cluster_1.lb_zone_routing_sampled\", \"cluster.lb_zone_routing_sampled\"},\n      {\"cluster.cluster_1.upstream_cx_destroy_with_active_rq\",\n       \"cluster.upstream_cx_destroy_with_active_rq\"},\n      {\"cluster.cluster_1.upstream_cx_overflow\", \"cluster.upstream_cx_overflow\"},\n      {\"cluster.cluster_1.lb_zone_no_capacity_left\", \"cluster.lb_zone_no_capacity_left\"},\n      {\"cluster.cluster_1.upstream_cx_connect_fail\", \"cluster.upstream_cx_connect_fail\"},\n      {\"cluster.cluster_1.upstream_cx_connect_timeout\", \"cluster.upstream_cx_connect_timeout\"},\n      {\"cluster.cluster_1.lb_zone_number_differs\", \"cluster.lb_zone_number_differs\"},\n      {\"cluster.cluster_1.upstream_rq_maintenance_mode\", \"cluster.upstream_rq_maintenance_mode\"},\n      {\"cluster.cluster_1.upstream_cx_destroy_local\", \"cluster.upstream_cx_destroy_local\"},\n      {\"cluster.cluster_1.ssl.fail_verify_error\", \"cluster.ssl.fail_verify_error\"},\n      {\"cluster.cluster_1.upstream_cx_http2_total\", \"cluster.upstream_cx_http2_total\"},\n      {\"cluster.cluster_1.lb_healthy_panic\", \"cluster.lb_healthy_panic\"},\n      {\"cluster.cluster_1.ssl.fail_verify_no_cert\", \"cluster.ssl.fail_verify_no_cert\"},\n      {\"cluster.cluster_1.ssl.no_certificate\", \"cluster.ssl.no_certificate\"},\n      {\"cluster.cluster_1.upstream_rq_retry_overflow\", \"cluster.upstream_rq_retry_overflow\"},\n      {\"cluster.cluster_1.lb_local_cluster_not_ok\", \"cluster.lb_local_cluster_not_ok\"},\n      {\"cluster.cluster_1.lb_recalculate_zone_structures\",\n       \"cluster.lb_recalculate_zone_structures\"},\n      {\"cluster.cluster_1.lb_zone_routing_all_directly\", \"cluster.lb_zone_routing_all_directly\"},\n      {\"cluster.cluster_1.upstream_cx_http1_total\", \"cluster.upstream_cx_http1_total\"},\n      {\"cluster.cluster_1.upstream_rq_pending_total\", \"cluster.upstream_rq_pending_total\"},\n      {\"cluster.cluster_1.lb_zone_routing_cross_zone\", \"cluster.lb_zone_routing_cross_zone\"},\n      {\"cluster.cluster_1.upstream_cx_total\", \"cluster.upstream_cx_total\"},\n      {\"cluster.cluster_1.bind_errors\", \"cluster.bind_errors\"},\n      {\"cluster.cluster_1.upstream_cx_destroy_remote\", \"cluster.upstream_cx_destroy_remote\"},\n      {\"cluster.cluster_1.upstream_rq_rx_reset\", \"cluster.upstream_rq_rx_reset\"},\n      {\"cluster.cluster_1.upstream_cx_tx_bytes_total\", \"cluster.upstream_cx_tx_bytes_total\"},\n      {\"cluster.cluster_1.ssl.connection_error\", \"cluster.ssl.connection_error\"},\n      {\"cluster.cluster_1.upstream_rq_tx_reset\", \"cluster.upstream_rq_tx_reset\"},\n      {\"cluster.cluster_1.upstream_cx_destroy\", \"cluster.upstream_cx_destroy\"},\n      {\"cluster.cluster_1.upstream_cx_protocol_error\", \"cluster.upstream_cx_protocol_error\"},\n      {\"cluster.cluster_1.upstream_cx_max_requests\", \"cluster.upstream_cx_max_requests\"},\n      {\"cluster.cluster_1.upstream_cx_rx_bytes_total\", \"cluster.upstream_cx_rx_bytes_total\"},\n      {\"cluster.cluster_1.upstream_rq_cancelled\", \"cluster.upstream_rq_cancelled\"},\n      {\"cluster.cluster_1.upstream_cx_none_healthy\", \"cluster.upstream_cx_none_healthy\"},\n      {\"cluster.cluster_1.upstream_rq_timeout\", \"cluster.upstream_rq_timeout\"},\n      {\"cluster.cluster_1.upstream_rq_pending_overflow\", \"cluster.upstream_rq_pending_overflow\"},\n      {\"cluster.cluster_1.upstream_rq_per_try_timeout\", \"cluster.upstream_rq_per_try_timeout\"},\n      {\"cluster.cluster_1.upstream_rq_total\", \"cluster.upstream_rq_total\"},\n      {\"cluster.cluster_1.upstream_cx_destroy_remote_with_active_rq\",\n       \"cluster.upstream_cx_destroy_remote_with_active_rq\"},\n      {\"cluster.cluster_1.upstream_rq_pending_failure_eject\",\n       \"cluster.upstream_rq_pending_failure_eject\"},\n      {\"cluster.cluster_1.upstream_rq_retry\", \"cluster.upstream_rq_retry\"},\n      {\"cluster.cluster_1.upstream_rq_retry_success\", \"cluster.upstream_rq_retry_success\"},\n      {\"cluster.cluster_1.lb_zone_cluster_too_small\", \"cluster.lb_zone_cluster_too_small\"},\n      {\"cluster.cluster_1.upstream_flow_control_paused_reading_total\",\n       \"cluster.upstream_flow_control_paused_reading_total\"},\n      {\"cluster.cluster_1.upstream_flow_control_resumed_reading_total\",\n       \"cluster.upstream_flow_control_resumed_reading_total\"},\n      {\"cluster.cluster_1.upstream_flow_control_backed_up_total\",\n       \"cluster.upstream_flow_control_backed_up_total\"},\n      {\"cluster.cluster_1.upstream_flow_control_drained_total\",\n       \"cluster.upstream_flow_control_drained_total\"},\n      {\"cluster.cluster_1.membership_change\", \"cluster.membership_change\"},\n      {\"listener.admin.downstream_cx_destroy\", \"listener.admin.downstream_cx_destroy\"},\n      {\"listener.admin.downstream_cx_total\", \"listener.admin.downstream_cx_total\"},\n      {\"listener.admin.downstream_cx_proxy_proto_error\",\n       \"listener.admin.downstream_cx_proxy_proto_error\"},\n      {\"server.watchdog_mega_miss\", \"server.watchdog_mega_miss\"},\n      {\"server.watchdog_miss\", \"server.watchdog_miss\"},\n      {\"http.async-client.rq_total\", \"http.rq_total\"},\n      {\"cluster_manager.cluster_added\", \"cluster_manager.cluster_added\"},\n      {\"http.admin.downstream_rq_http2_total\", \"http.downstream_rq_http2_total\"},\n      {\"cluster_manager.cluster_removed\", \"cluster_manager.cluster_removed\"},\n      {\"http.admin.downstream_cx_destroy_remote\", \"http.downstream_cx_destroy_remote\"},\n      {\"http.admin.downstream_rq_http1_total\", \"http.downstream_rq_http1_total\"},\n      {\"http.admin.tracing.tracing.client_enabled\", \"http.tracing.tracing.client_enabled\"},\n      {\"http.admin.downstream_rq_total\", \"http.downstream_rq_total\"},\n      {\"http.admin.tracing.tracing.service_forced\", \"http.tracing.tracing.service_forced\"},\n      {\"http.admin.tracing.tracing.not_traceable\", \"http.tracing.tracing.not_traceable\"},\n      {\"http.admin.downstream_cx_rx_bytes_total\", \"http.downstream_cx_rx_bytes_total\"},\n      {\"http.async-client.no_cluster\", \"http.no_cluster\"},\n      {\"http.admin.downstream_cx_destroy_remote_active_rq\",\n       \"http.downstream_cx_destroy_remote_active_rq\"},\n      {\"http.admin.downstream_cx_destroy_local_active_rq\",\n       \"http.downstream_cx_destroy_local_active_rq\"},\n      {\"filesystem.write_buffered\", \"filesystem.write_buffered\"},\n      {\"http.admin.downstream_cx_destroy_active_rq\", \"http.downstream_cx_destroy_active_rq\"},\n      {\"http.admin.downstream_rq_tx_reset\", \"http.downstream_rq_tx_reset\"},\n      {\"http.admin.downstream_flow_control_resumed_reading_total\",\n       \"http.downstream_flow_control_resumed_reading_total\"},\n      {\"http.admin.downstream_cx_total\", \"http.downstream_cx_total\"},\n      {\"http.admin.downstream_rq_3xx\", \"http.downstream_rq_xx\"},\n      {\"http.admin.downstream_cx_idle_timeout\", \"http.downstream_cx_idle_timeout\"},\n      {\"http.admin.downstream_rq_rx_reset\", \"http.downstream_rq_rx_reset\"},\n      {\"http.admin.downstream_cx_ssl_total\", \"http.downstream_cx_ssl_total\"},\n      {\"http.admin.downstream_cx_websocket_total\", \"http.downstream_cx_websocket_total\"},\n      {\"http.admin.downstream_rq_2xx\", \"http.downstream_rq_xx\"},\n      {\"cluster_manager.cluster_modified\", \"cluster_manager.cluster_modified\"},\n      {\"http.admin.downstream_cx_drain_close\", \"http.downstream_cx_drain_close\"},\n      {\"http.admin.downstream_cx_destroy\", \"http.downstream_cx_destroy\"},\n      {\"http.admin.downstream_cx_http1_total\", \"http.downstream_cx_http1_total\"},\n      {\"http.admin.downstream_cx_protocol_error\", \"http.downstream_cx_protocol_error\"},\n      {\"http.admin.downstream_cx_destroy_local\", \"http.downstream_cx_destroy_local\"},\n      {\"listener_manager.listener_added\", \"listener_manager.listener_added\"},\n      {\"filesystem.write_completed\", \"filesystem.write_completed\"},\n      {\"http.admin.downstream_rq_response_before_rq_complete\",\n       \"http.downstream_rq_response_before_rq_complete\"},\n      {\"http.admin.downstream_cx_tx_bytes_total\", \"http.downstream_cx_tx_bytes_total\"},\n      {\"http.admin.downstream_rq_4xx\", \"http.downstream_rq_xx\"},\n      {\"http.admin.downstream_rq_non_relative_path\", \"http.downstream_rq_non_relative_path\"},\n      {\"http.admin.downstream_rq_ws_on_non_ws_route\", \"http.downstream_rq_ws_on_non_ws_route\"},\n      {\"http.admin.downstream_rq_too_large\", \"http.downstream_rq_too_large\"},\n      {\"http.admin.downstream_rq_5xx\", \"http.downstream_rq_xx\"},\n      {\"http.async-client.no_route\", \"http.no_route\"},\n      {\"http.admin.downstream_flow_control_paused_reading_total\",\n       \"http.downstream_flow_control_paused_reading_total\"},\n      {\"listener_manager.listener_removed\", \"listener_manager.listener_removed\"},\n      {\"listener_manager.listener_create_failure\", \"listener_manager.listener_create_failure\"},\n      {\"http.admin.tracing.tracing.random_sampling\", \"http.tracing.tracing.random_sampling\"},\n      {\"http.async-client.rq_redirect\", \"http.rq_redirect\"},\n      {\"http.admin.tracing.tracing.health_check\", \"http.tracing.tracing.health_check\"},\n      {\"filesystem.flushed_by_timer\", \"filesystem.flushed_by_timer\"},\n      {\"http.admin.downstream_cx_http2_total\", \"http.downstream_cx_http2_total\"},\n      {\"filesystem.reopen_failed\", \"filesystem.reopen_failed\"},\n      {\"listener_manager.listener_modified\", \"listener_manager.listener_modified\"},\n      {\"http.admin.rs_too_large\", \"http.rs_too_large\"},\n      {\"listener_manager.listener_create_success\", \"listener_manager.listener_create_success\"}};\n  tag_extracted_gauge_map = {\n      {listenerStatPrefix(\"downstream_cx_active\"), \"listener.downstream_cx_active\"},\n      {\"http.router.downstream_rq_active\", \"http.downstream_rq_active\"},\n      {\"http.router.downstream_cx_tx_bytes_buffered\", \"http.downstream_cx_tx_bytes_buffered\"},\n      {\"http.router.downstream_cx_http2_active\", \"http.downstream_cx_http2_active\"},\n      {\"http.router.downstream_cx_websocket_active\", \"http.downstream_cx_websocket_active\"},\n      {\"http.router.downstream_cx_rx_bytes_buffered\", \"http.downstream_cx_rx_bytes_buffered\"},\n      {\"http.router.downstream_cx_http1_active\", \"http.downstream_cx_http1_active\"},\n      {\"http.router.downstream_cx_ssl_active\", \"http.downstream_cx_ssl_active\"},\n      {\"http.router.downstream_cx_active\", \"http.downstream_cx_active\"},\n      {\"cluster.cluster_2.membership_total\", \"cluster.membership_total\"},\n      {\"cluster.cluster_2.membership_healthy\", \"cluster.membership_healthy\"},\n      {\"cluster.cluster_2.max_host_weight\", \"cluster.max_host_weight\"},\n      {\"cluster.cluster_2.upstream_rq_pending_active\", \"cluster.upstream_rq_pending_active\"},\n      {\"cluster.cluster_2.version\", \"cluster.version\"},\n      {\"cluster.cluster_2.upstream_rq_active\", \"cluster.upstream_rq_active\"},\n      {\"cluster.cluster_2.upstream_cx_tx_bytes_buffered\", \"cluster.upstream_cx_tx_bytes_buffered\"},\n      {\"cluster.cluster_2.upstream_cx_rx_bytes_buffered\", \"cluster.upstream_cx_rx_bytes_buffered\"},\n      {\"cluster.cluster_2.upstream_cx_active\", \"cluster.upstream_cx_active\"},\n      {\"cluster.cluster_1.upstream_rq_active\", \"cluster.upstream_rq_active\"},\n      {\"cluster.cluster_1.upstream_rq_pending_active\", \"cluster.upstream_rq_pending_active\"},\n      {\"cluster.cluster_1.upstream_cx_tx_bytes_buffered\", \"cluster.upstream_cx_tx_bytes_buffered\"},\n      {\"cluster.cluster_1.max_host_weight\", \"cluster.max_host_weight\"},\n      {\"cluster.cluster_1.upstream_cx_rx_bytes_buffered\", \"cluster.upstream_cx_rx_bytes_buffered\"},\n      {\"cluster.cluster_1.version\", \"cluster.version\"},\n      {\"cluster.cluster_1.membership_total\", \"cluster.membership_total\"},\n      {\"cluster.cluster_1.membership_healthy\", \"cluster.membership_healthy\"},\n      {\"cluster.cluster_1.upstream_cx_active\", \"cluster.upstream_cx_active\"},\n      {\"listener.admin.downstream_cx_active\", \"listener.admin.downstream_cx_active\"},\n      {\"cluster_manager.total_clusters\", \"cluster_manager.total_clusters\"},\n      {\"listener_manager.total_listeners_warming\", \"listener_manager.total_listeners_warming\"},\n      {\"listener_manager.total_listeners_active\", \"listener_manager.total_listeners_active\"},\n      {\"http.admin.downstream_rq_active\", \"http.downstream_rq_active\"},\n      {\"http.admin.downstream_cx_tx_bytes_buffered\", \"http.downstream_cx_tx_bytes_buffered\"},\n      {\"http.admin.downstream_cx_rx_bytes_buffered\", \"http.downstream_cx_rx_bytes_buffered\"},\n      {\"http.admin.downstream_cx_websocket_active\", \"http.downstream_cx_websocket_active\"},\n      {\"http.admin.downstream_cx_http1_active\", \"http.downstream_cx_http1_active\"},\n      {\"server.uptime\", \"server.uptime\"},\n      {\"server.memory_allocated\", \"server.memory_allocated\"},\n      {\"http.admin.downstream_cx_http2_active\", \"http.downstream_cx_http2_active\"},\n      {\"server.memory_heap_size\", \"server.memory_heap_size\"},\n      {\"server.memory_physical_size\", \"server.memory_physical_size\"},\n      {\"listener_manager.total_listeners_draining\", \"listener_manager.total_listeners_draining\"},\n      {\"filesystem.write_total_buffered\", \"filesystem.write_total_buffered\"},\n      {\"http.admin.downstream_cx_ssl_active\", \"http.downstream_cx_ssl_active\"},\n      {\"http.admin.downstream_cx_active\", \"http.downstream_cx_active\"},\n      {\"server.live\", \"server.live\"},\n      {\"server.parent_connections\", \"server.parent_connections\"},\n      {\"server.total_connections\", \"server.total_connections\"},\n      {\"server.days_until_first_cert_expiring\", \"server.days_until_first_cert_expiring\"},\n      {\"server.seconds_until_first_ocsp_response_expiring\",\n       \"server.seconds_until_first_ocsp_response_expiring\"},\n      {\"server.version\", \"server.version\"}};\n\n  auto test_name_against_mapping =\n      [](const absl::node_hash_map<std::string, std::string>& extracted_name_map,\n         const Stats::Metric& metric) {\n        auto it = extracted_name_map.find(metric.name());\n        // Ignore any metrics that are not found in the map for ease of addition\n        if (it != extracted_name_map.end()) {\n          // Check that the tag extracted name matches the \"golden\" state.\n          EXPECT_EQ(it->second, metric.tagExtractedName());\n        }\n      };\n\n  for (const Stats::CounterSharedPtr& counter : test_server_->counters()) {\n    test_name_against_mapping(tag_extracted_counter_map, *counter);\n  }\n\n  for (const Stats::GaugeSharedPtr& gauge : test_server_->gauges()) {\n    test_name_against_mapping(tag_extracted_gauge_map, *gauge);\n  }\n}\n} // namespace Xfcc\n} // namespace Envoy\n"
  },
  {
    "path": "test/integration/xfcc_integration_test.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n\n#include \"test/config/integration/certs/clientcert_hash.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/integration/server.h\"\n#include \"test/mocks/server/transport_socket_factory_context.h\"\n\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Xfcc {\n\nclass XfccIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,\n                            public HttpIntegrationTest {\npublic:\n  const std::string previous_xfcc_ =\n      \"By=spiffe://lyft.com/frontend;Hash=123456;URI=spiffe://lyft.com/testclient\";\n  const std::string current_xfcc_by_hash_ =\n      \"By=spiffe://lyft.com/\"\n      \"backend-team;Hash=\" +\n      absl::AsciiStrToLower(absl::StrReplaceAll(TEST_CLIENT_CERT_HASH, {{\":\", \"\"}}));\n  const std::string client_subject_ =\n      \"Subject=\\\"\"\n      \"emailAddress=frontend-team@lyft.com,CN=Test Frontend Team,\"\n      \"OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US\\\"\";\n  const std::string client_uri_san_ = \"URI=spiffe://lyft.com/frontend-team\";\n  const std::string client_dns_san_ = \"DNS=lyft.com;DNS=www.lyft.com\";\n\n  XfccIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {\n    ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_));\n  }\n\n  void initialize() override;\n  void createUpstreams() override;\n\n  void TearDown() override;\n\n  Network::TransportSocketFactoryPtr createUpstreamSslContext();\n  Network::TransportSocketFactoryPtr createClientSslContext(bool mtls);\n  Network::ClientConnectionPtr makeTcpClientConnection();\n  Network::ClientConnectionPtr makeTlsClientConnection();\n  Network::ClientConnectionPtr makeMtlsClientConnection();\n  void testRequestAndResponseWithXfccHeader(std::string privous_xfcc, std::string expected_xfcc);\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      ForwardClientCertDetails fcc_;\n  envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::\n      SetCurrentClientCertDetails sccd_;\n  bool tls_ = true;\n\nprivate:\n  std::unique_ptr<Ssl::ContextManager> context_manager_;\n  Network::TransportSocketFactoryPtr client_tls_ssl_ctx_;\n  Network::TransportSocketFactoryPtr client_mtls_ssl_ctx_;\n  Network::TransportSocketFactoryPtr upstream_ssl_ctx_;\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context_;\n};\n} // namespace Xfcc\n} // namespace Envoy\n"
  },
  {
    "path": "test/main.cc",
    "content": "// NOLINT(namespace-envoy)\n#include \"envoy/thread/thread.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n#include \"test/test_runner.h\"\n\n#include \"tools/cpp/runfiles/runfiles.h\"\n\nusing bazel::tools::cpp::runfiles::Runfiles;\n// The main entry point (and the rest of this file) should have no logic in it,\n// this allows overriding by site specific versions of main.cc.\nint main(int argc, char** argv) {\n  Envoy::TestEnvironment::initializeTestMain(argv[0]);\n\n  // Create a Runfiles object for runfiles lookup.\n  // https://github.com/bazelbuild/bazel/blob/master/tools/cpp/runfiles/runfiles_src.h#L32\n  std::string error;\n  std::unique_ptr<Runfiles> runfiles(Runfiles::Create(argv[0], &error));\n  RELEASE_ASSERT(Envoy::TestEnvironment::getOptionalEnvVar(\"NORUNFILES\").has_value() ||\n                     runfiles != nullptr,\n                 error);\n\n  Envoy::TestEnvironment::setRunfiles(runfiles.get());\n\n  // Select whether to test only for IPv4, IPv6, or both. The default is to\n  // test for both. Options are {\"v4only\", \"v6only\", \"all\"}. Set\n  // ENVOY_IP_TEST_VERSIONS to \"v4only\" if the system currently does not support IPv6 network\n  // operations. Similarly set ENVOY_IP_TEST_VERSIONS to \"v6only\" if IPv4 has already been\n  // phased out of network operations. Set to \"all\" (or don't set) if testing both\n  // v4 and v6 addresses is desired. This feature is in progress and will be rolled out to all tests\n  // in upcoming PRs.\n  Envoy::TestEnvironment::setEnvVar(\"ENVOY_IP_TEST_VERSIONS\", \"all\", 0);\n  return Envoy::TestRunner::RunTests(argc, argv);\n}\n"
  },
  {
    "path": "test/mocks/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"common_lib\",\n    srcs = [\"common.cc\"],\n    hdrs = [\"common.h\"],\n    deps = [\n        \"//include/envoy/common:conn_pool_interface\",\n        \"//include/envoy/common:random_generator_interface\",\n        \"//include/envoy/common:time_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//test/test_common:test_time_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/access_log/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"access_log_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/access_log/mocks.cc",
    "content": "#include \"test/mocks/access_log/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace AccessLog {\n\nMockAccessLogFile::MockAccessLogFile() = default;\nMockAccessLogFile::~MockAccessLogFile() = default;\n\nMockFilter::MockFilter() = default;\nMockFilter::~MockFilter() = default;\n\nMockAccessLogManager::MockAccessLogManager() {\n  ON_CALL(*this, createAccessLog(_)).WillByDefault(Return(file_));\n}\n\nMockAccessLogManager::~MockAccessLogManager() = default;\n\nMockInstance::MockInstance() = default;\nMockInstance::~MockInstance() = default;\n\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/access_log/mocks.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace AccessLog {\n\nclass MockAccessLogFile : public AccessLogFile {\npublic:\n  MockAccessLogFile();\n  ~MockAccessLogFile() override;\n\n  // AccessLog::AccessLogFile\n  MOCK_METHOD(void, write, (absl::string_view data));\n  MOCK_METHOD(void, reopen, ());\n  MOCK_METHOD(void, flush, ());\n};\n\nclass MockFilter : public Filter {\npublic:\n  MockFilter();\n  ~MockFilter() override;\n\n  // AccessLog::Filter\n  MOCK_METHOD(bool, evaluate,\n              (const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers,\n               const Http::ResponseHeaderMap& response_headers,\n               const Http::ResponseTrailerMap& response_trailers),\n              (const));\n};\n\nclass MockAccessLogManager : public AccessLogManager {\npublic:\n  MockAccessLogManager();\n  ~MockAccessLogManager() override;\n\n  // AccessLog::AccessLogManager\n  MOCK_METHOD(void, reopen, ());\n  MOCK_METHOD(AccessLogFileSharedPtr, createAccessLog, (const std::string& file_name));\n\n  std::shared_ptr<MockAccessLogFile> file_{new testing::NiceMock<MockAccessLogFile>()};\n};\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  // AccessLog::Instance\n  MOCK_METHOD(void, log,\n              (const Http::RequestHeaderMap* request_headers,\n               const Http::ResponseHeaderMap* response_headers,\n               const Http::ResponseTrailerMap* response_trailers,\n               const StreamInfo::StreamInfo& stream_info));\n};\n\n} // namespace AccessLog\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/api/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n    \"envoy_select_hot_restart\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"api_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"] + envoy_select_hot_restart([\"hot_restart.h\"]),\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//include/envoy/api:os_sys_calls_interface\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//test/mocks/filesystem:filesystem_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:test_time_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/api/hot_restart.h",
    "content": "#pragma once\n\n#include \"envoy/api/os_sys_calls.h\"\n\n#include \"common/api/os_sys_calls_impl_hot_restart.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass MockHotRestartOsSysCalls : public HotRestartOsSysCallsImpl {\npublic:\n  // Api::HotRestartOsSysCalls\n  MOCK_METHOD(SysCallIntResult, shmOpen, (const char*, int, mode_t));\n  MOCK_METHOD(SysCallIntResult, shmUnlink, (const char*));\n};\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/api/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/lock_guard.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Api {\n\nMockApi::MockApi() {\n  ON_CALL(*this, fileSystem()).WillByDefault(ReturnRef(file_system_));\n  ON_CALL(*this, rootScope()).WillByDefault(ReturnRef(stats_store_));\n  ON_CALL(*this, randomGenerator()).WillByDefault(ReturnRef(random_));\n}\n\nMockApi::~MockApi() = default;\n\nEvent::DispatcherPtr MockApi::allocateDispatcher(const std::string& name) {\n  return Event::DispatcherPtr{allocateDispatcher_(name, time_system_)};\n}\nEvent::DispatcherPtr MockApi::allocateDispatcher(const std::string& name,\n                                                 Buffer::WatermarkFactoryPtr&& watermark_factory) {\n  return Event::DispatcherPtr{\n      allocateDispatcher_(name, std::move(watermark_factory), time_system_)};\n}\n\nMockOsSysCalls::MockOsSysCalls() {\n  ON_CALL(*this, close(_)).WillByDefault(Invoke([](os_fd_t fd) {\n#ifdef WIN32\n    int rc = ::closesocket(fd);\n    int last_error = ::GetLastError();\n    // It might be the case that the fd is not actually a socket. In that case Winsock api is\n    // failing with error `WSAENOTSOCK`. In that case we fall back to a regular close.\n    if (last_error == WSAENOTSOCK) {\n      rc = ::close(fd);\n      last_error = ::GetLastError();\n    }\n    return SysCallIntResult{rc, last_error};\n#else\n    const int rc = ::close(fd);\n    return SysCallIntResult{rc, errno};\n#endif\n  }));\n}\n\nMockOsSysCalls::~MockOsSysCalls() = default;\n\nSysCallIntResult MockOsSysCalls::setsockopt(os_fd_t sockfd, int level, int optname,\n                                            const void* optval, socklen_t optlen) {\n  ASSERT(optlen == sizeof(int));\n\n  // Allow mocking system call failure.\n  if (setsockopt_(sockfd, level, optname, optval, optlen) != 0) {\n    return SysCallIntResult{-1, 0};\n  }\n\n  boolsockopts_[SockOptKey(sockfd, level, optname)] = !!*reinterpret_cast<const int*>(optval);\n  return SysCallIntResult{0, 0};\n};\n\nSysCallIntResult MockOsSysCalls::getsockopt(os_fd_t sockfd, int level, int optname, void* optval,\n                                            socklen_t* optlen) {\n  int val = 0;\n  const auto& it = boolsockopts_.find(SockOptKey(sockfd, level, optname));\n  if (it != boolsockopts_.end()) {\n    val = it->second;\n  }\n  // Allow mocking system call failure.\n  if (getsockopt_(sockfd, level, optname, optval, optlen) != 0) {\n    return {-1, 0};\n  }\n  *reinterpret_cast<int*>(optval) = val;\n  return {0, 0};\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/api/mocks.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/api/os_sys_calls.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n\n#if defined(__linux__)\n#include \"common/api/os_sys_calls_impl_linux.h\"\n#endif\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/filesystem/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Api {\n\nclass MockApi : public Api {\npublic:\n  MockApi();\n  ~MockApi() override;\n\n  // Api::Api\n  Event::DispatcherPtr allocateDispatcher(const std::string& name) override;\n  Event::DispatcherPtr allocateDispatcher(const std::string& name,\n                                          Buffer::WatermarkFactoryPtr&& watermark_factory) override;\n  TimeSource& timeSource() override { return time_system_; }\n\n  MOCK_METHOD(Event::Dispatcher*, allocateDispatcher_, (const std::string&, Event::TimeSystem&));\n  MOCK_METHOD(Event::Dispatcher*, allocateDispatcher_,\n              (const std::string&, Buffer::WatermarkFactoryPtr&& watermark_factory,\n               Event::TimeSystem&));\n  MOCK_METHOD(Filesystem::Instance&, fileSystem, ());\n  MOCK_METHOD(Thread::ThreadFactory&, threadFactory, ());\n  MOCK_METHOD(const Stats::Scope&, rootScope, ());\n  MOCK_METHOD(Random::RandomGenerator&, randomGenerator, ());\n  MOCK_METHOD(ProcessContextOptRef, processContext, ());\n\n  testing::NiceMock<Filesystem::MockInstance> file_system_;\n  Event::GlobalTimeSystem time_system_;\n  testing::NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  testing::NiceMock<Random::MockRandomGenerator> random_;\n};\n\nclass MockOsSysCalls : public OsSysCallsImpl {\npublic:\n  MockOsSysCalls();\n  ~MockOsSysCalls() override;\n\n  // Api::OsSysCalls\n  SysCallIntResult setsockopt(os_fd_t sockfd, int level, int optname, const void* optval,\n                              socklen_t optlen) override;\n  SysCallIntResult getsockopt(os_fd_t sockfd, int level, int optname, void* optval,\n                              socklen_t* optlen) override;\n\n  MOCK_METHOD(SysCallSocketResult, accept, (os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen));\n  MOCK_METHOD(SysCallIntResult, bind, (os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen));\n  MOCK_METHOD(SysCallIntResult, ioctl, (os_fd_t sockfd, unsigned long int request, void* argp));\n  MOCK_METHOD(SysCallIntResult, close, (os_fd_t));\n  MOCK_METHOD(SysCallSizeResult, writev, (os_fd_t, const iovec*, int));\n  MOCK_METHOD(SysCallSizeResult, sendmsg, (os_fd_t fd, const msghdr* msg, int flags));\n  MOCK_METHOD(SysCallSizeResult, readv, (os_fd_t, const iovec*, int));\n  MOCK_METHOD(SysCallSizeResult, recv, (os_fd_t socket, void* buffer, size_t length, int flags));\n  MOCK_METHOD(SysCallSizeResult, recvmsg, (os_fd_t socket, msghdr* msg, int flags));\n  MOCK_METHOD(SysCallIntResult, recvmmsg,\n              (os_fd_t socket, struct mmsghdr* msgvec, unsigned int vlen, int flags,\n               struct timespec* timeout));\n  MOCK_METHOD(SysCallIntResult, ftruncate, (int fd, off_t length));\n  MOCK_METHOD(SysCallPtrResult, mmap,\n              (void* addr, size_t length, int prot, int flags, int fd, off_t offset));\n  MOCK_METHOD(SysCallIntResult, stat, (const char* name, struct stat* stat));\n  MOCK_METHOD(SysCallIntResult, chmod, (const std::string& name, mode_t mode));\n  MOCK_METHOD(int, setsockopt_,\n              (os_fd_t sockfd, int level, int optname, const void* optval, socklen_t optlen));\n  MOCK_METHOD(int, getsockopt_,\n              (os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen));\n  MOCK_METHOD(SysCallSocketResult, socket, (int domain, int type, int protocol));\n  MOCK_METHOD(SysCallIntResult, gethostname, (char* name, size_t length));\n  MOCK_METHOD(SysCallIntResult, getsockname, (os_fd_t sockfd, sockaddr* name, socklen_t* namelen));\n  MOCK_METHOD(SysCallIntResult, getpeername, (os_fd_t sockfd, sockaddr* name, socklen_t* namelen));\n  MOCK_METHOD(SysCallIntResult, setsocketblocking, (os_fd_t sockfd, bool block));\n  MOCK_METHOD(SysCallIntResult, connect, (os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen));\n  MOCK_METHOD(SysCallIntResult, shutdown, (os_fd_t sockfd, int how));\n  MOCK_METHOD(SysCallIntResult, socketpair, (int domain, int type, int protocol, os_fd_t sv[2]));\n  MOCK_METHOD(SysCallIntResult, listen, (os_fd_t sockfd, int backlog));\n  MOCK_METHOD(SysCallSizeResult, write, (os_fd_t sockfd, const void* buffer, size_t length));\n  MOCK_METHOD(bool, supportsMmsg, (), (const));\n  MOCK_METHOD(bool, supportsUdpGro, (), (const));\n  MOCK_METHOD(bool, supportsIpTransparent, (), (const));\n\n  // Map from (sockfd,level,optname) to boolean socket option.\n  using SockOptKey = std::tuple<os_fd_t, int, int>;\n  std::map<SockOptKey, bool> boolsockopts_;\n};\n\n#if defined(__linux__)\nclass MockLinuxOsSysCalls : public LinuxOsSysCallsImpl {\npublic:\n  // Api::LinuxOsSysCalls\n  MOCK_METHOD(SysCallIntResult, sched_getaffinity, (pid_t pid, size_t cpusetsize, cpu_set_t* mask));\n};\n#endif\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/buffer/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"buffer_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/buffer:watermark_buffer_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/buffer/mocks.cc",
    "content": "#include \"test/mocks/buffer/mocks.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\n\ntemplate <>\nMockBufferBase<Buffer::WatermarkBuffer>::MockBufferBase(std::function<void()> below_low,\n                                                        std::function<void()> above_high,\n                                                        std::function<void()> above_overflow)\n    : Buffer::WatermarkBuffer(below_low, above_high, above_overflow) {}\n\ntemplate <>\nMockBufferBase<Buffer::WatermarkBuffer>::MockBufferBase()\n    : Buffer::WatermarkBuffer([&]() -> void {}, [&]() -> void {}, [&]() -> void {}) {\n  ASSERT(0); // This constructor is not supported for WatermarkBuffer.\n}\ntemplate <>\nMockBufferBase<Buffer::OwnedImpl>::MockBufferBase(std::function<void()>, std::function<void()>,\n                                                  std::function<void()>)\n    : Buffer::OwnedImpl() {\n  ASSERT(0); // This constructor is not supported for OwnedImpl.\n}\n\ntemplate <> MockBufferBase<Buffer::OwnedImpl>::MockBufferBase() : Buffer::OwnedImpl() {}\n\nMockBufferFactory::MockBufferFactory() = default;\nMockBufferFactory::~MockBufferFactory() = default;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/buffer/mocks.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/buffer/watermark_buffer.h\"\n#include \"common/network/io_socket_error_impl.h\"\n\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\n\n// A template class to allow code reuse between MockBuffer and MockWatermarkBuffer\ntemplate <class BaseClass> class MockBufferBase : public BaseClass {\npublic:\n  MockBufferBase();\n  MockBufferBase(std::function<void()> below_low, std::function<void()> above_high,\n                 std::function<void()> above_overflow);\n\n  MOCK_METHOD(void, move, (Buffer::Instance & rhs));\n  MOCK_METHOD(void, move, (Buffer::Instance & rhs, uint64_t length));\n  MOCK_METHOD(void, drain, (uint64_t size));\n\n  void baseMove(Buffer::Instance& rhs) { BaseClass::move(rhs); }\n  void baseDrain(uint64_t size) { BaseClass::drain(size); }\n\n  void trackDrains(uint64_t size) {\n    bytes_drained_ += size;\n    BaseClass::drain(size);\n  }\n\n  uint64_t bytesDrained() const { return bytes_drained_; }\n\nprivate:\n  uint64_t bytes_drained_{0};\n};\n\ntemplate <>\nMockBufferBase<Buffer::WatermarkBuffer>::MockBufferBase(std::function<void()> below_low,\n                                                        std::function<void()> above_high,\n                                                        std::function<void()> above_overflow);\ntemplate <> MockBufferBase<Buffer::WatermarkBuffer>::MockBufferBase();\n\ntemplate <>\nMockBufferBase<Buffer::OwnedImpl>::MockBufferBase(std::function<void()> below_low,\n                                                  std::function<void()> above_high,\n                                                  std::function<void()> above_overflow);\ntemplate <> MockBufferBase<Buffer::OwnedImpl>::MockBufferBase();\n\nclass MockBuffer : public MockBufferBase<Buffer::OwnedImpl> {\npublic:\n  MockBuffer() {\n    ON_CALL(*this, move(testing::_)).WillByDefault(testing::Invoke(this, &MockBuffer::baseMove));\n  }\n};\n\nclass MockWatermarkBuffer : public MockBufferBase<Buffer::WatermarkBuffer> {\npublic:\n  using BaseClass = MockBufferBase<Buffer::WatermarkBuffer>;\n\n  MockWatermarkBuffer(std::function<void()> below_low, std::function<void()> above_high,\n                      std::function<void()> above_overflow)\n      : BaseClass(below_low, above_high, above_overflow) {\n    ON_CALL(*this, move(testing::_))\n        .WillByDefault(testing::Invoke(this, &MockWatermarkBuffer::baseMove));\n  }\n};\n\nclass MockBufferFactory : public Buffer::WatermarkFactory {\npublic:\n  MockBufferFactory();\n  ~MockBufferFactory() override;\n\n  Buffer::InstancePtr create(std::function<void()> below_low, std::function<void()> above_high,\n                             std::function<void()> above_overflow) override {\n    return Buffer::InstancePtr{create_(below_low, above_high, above_overflow)};\n  }\n\n  MOCK_METHOD(Buffer::Instance*, create_,\n              (std::function<void()> below_low, std::function<void()> above_high,\n               std::function<void()> above_overflow));\n};\n\nMATCHER_P(BufferEqual, rhs, testing::PrintToString(*rhs)) {\n  return TestUtility::buffersEqual(arg, *rhs);\n}\n\nMATCHER_P(BufferStringEqual, rhs, rhs) {\n  *result_listener << \"\\\"\" << arg.toString() << \"\\\"\";\n\n  Buffer::OwnedImpl buffer(rhs);\n  return TestUtility::buffersEqual(arg, buffer);\n}\n\nMATCHER_P(BufferStringContains, rhs,\n          std::string(negation ? \"doesn't contain\" : \"contains\") + \" \\\"\" + rhs + \"\\\"\") {\n  *result_listener << \"\\\"\" << arg.toString() << \"\\\"\";\n  return arg.toString().find(rhs) != std::string::npos;\n}\n\nACTION_P(AddBufferToString, target_string) {\n  auto bufferToString = [](const Buffer::OwnedImpl& buf) -> std::string { return buf.toString(); };\n  target_string->append(bufferToString(arg0));\n  arg0.drain(arg0.length());\n}\n\nACTION_P(AddBufferToStringWithoutDraining, target_string) {\n  target_string->append(arg0.toString());\n}\n\nMATCHER_P(RawSliceVectorEqual, rhs, testing::PrintToString(rhs)) {\n  return TestUtility::rawSlicesEqual(arg, rhs.data(), rhs.size());\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/common.cc",
    "content": "#include \"test/mocks/common.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace ConnectionPool {\nMockCancellable::MockCancellable() = default;\nMockCancellable::~MockCancellable() = default;\n} // namespace ConnectionPool\n\nnamespace Random {\n\nMockRandomGenerator::MockRandomGenerator() { ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); }\n\nMockRandomGenerator::~MockRandomGenerator() = default;\n\n} // namespace Random\n\nReadyWatcher::ReadyWatcher() = default;\nReadyWatcher::~ReadyWatcher() = default;\n\nMockTimeSystem::MockTimeSystem() = default;\nMockTimeSystem::~MockTimeSystem() = default;\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/common.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"envoy/common/conn_pool.h\"\n#include \"envoy/common/random_generator.h\"\n#include \"envoy/common/scope_tracker.h\"\n#include \"envoy/common/time.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"test/test_common/test_time.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n/**\n * This action allows us to save a reference parameter to a pointer target.\n */\nACTION_P(SaveArgAddress, target) { *target = &arg0; }\n\n/**\n * Matcher that matches on whether the pointee of both lhs and rhs are equal.\n */\nMATCHER_P(PointeesEq, rhs, \"\") {\n  *result_listener << testing::PrintToString(*arg) + \" != \" + testing::PrintToString(*rhs);\n  return *arg == *rhs;\n}\n\n/**\n * Simple mock that just lets us make sure a method gets called or not called form a lambda.\n */\nclass ReadyWatcher {\npublic:\n  ReadyWatcher();\n  ~ReadyWatcher();\n\n  MOCK_METHOD(void, ready, ());\n};\n\n// TODO(jmarantz): get rid of this and use SimulatedTimeSystem in its place.\nclass MockTimeSystem : public Event::TestTimeSystem {\npublic:\n  MockTimeSystem();\n  ~MockTimeSystem() override;\n\n  // TODO(#4160): Eliminate all uses of MockTimeSystem, replacing with SimulatedTimeSystem,\n  // where timer callbacks are triggered by the advancement of time. This implementation\n  // matches recent behavior, where real-time timers were created directly in libevent\n  // by dispatcher_impl.cc.\n  Event::SchedulerPtr createScheduler(Event::Scheduler& base_scheduler,\n                                      Event::CallbackScheduler& cb_scheduler) override {\n    return real_time_.createScheduler(base_scheduler, cb_scheduler);\n  }\n  void advanceTimeWaitImpl(const Duration& duration) override {\n    real_time_.advanceTimeWaitImpl(duration);\n  }\n  void advanceTimeAsyncImpl(const Duration& duration) override {\n    real_time_.advanceTimeAsyncImpl(duration);\n  }\n  MOCK_METHOD(SystemTime, systemTime, ());\n  MOCK_METHOD(MonotonicTime, monotonicTime, ());\n\n  Event::TestRealTimeSystem real_time_; // NO_CHECK_FORMAT(real_time)\n};\n\n// Captures absl::string_view parameters into temp strings, for use\n// with gmock's SaveArg<n>. Providing an absl::string_view compiles,\n// but fails because by the time you examine the saved value, its\n// backing store will go out of scope.\nclass StringViewSaver {\npublic:\n  void operator=(absl::string_view view) { value_ = std::string(view); }\n  const std::string& value() const { return value_; }\n  operator std::string() const { return value_; }\n\nprivate:\n  std::string value_;\n};\n\ninline bool operator==(const char* str, const StringViewSaver& saver) {\n  return saver.value() == str;\n}\n\ninline bool operator==(const StringViewSaver& saver, const char* str) {\n  return saver.value() == str;\n}\n\nclass MockScopedTrackedObject : public ScopeTrackedObject {\npublic:\n  MOCK_METHOD(void, dumpState, (std::ostream&, int), (const));\n};\n\nnamespace ConnectionPool {\n\nclass MockCancellable : public Cancellable {\npublic:\n  MockCancellable();\n  ~MockCancellable() override;\n\n  // ConnectionPool::Cancellable\n  MOCK_METHOD(void, cancel, (CancelPolicy cancel_policy));\n};\n} // namespace ConnectionPool\n\nnamespace Random {\nclass MockRandomGenerator : public RandomGenerator {\npublic:\n  MockRandomGenerator();\n  ~MockRandomGenerator() override;\n\n  MOCK_METHOD(uint64_t, random, ());\n  MOCK_METHOD(std::string, uuid, ());\n\n  const std::string uuid_{\"a121e9e1-feae-4136-9e0e-6fac343d56c9\"};\n};\n} // namespace Random\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/compression/compressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"compressor_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/compression/compressor:compressor_config_interface\",\n        \"//include/envoy/compression/compressor:compressor_interface\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/compression/compressor/mocks.cc",
    "content": "#include \"test/mocks/compression/compressor/mocks.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Compressor {\n\nMockCompressor::MockCompressor() = default;\nMockCompressor::~MockCompressor() = default;\n\nMockCompressorFactory::MockCompressorFactory() {\n  ON_CALL(*this, statsPrefix()).WillByDefault(ReturnRef(stats_prefix_));\n  ON_CALL(*this, contentEncoding()).WillByDefault(ReturnRef(content_encoding_));\n}\n\nMockCompressorFactory::~MockCompressorFactory() = default;\n\n} // namespace Compressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/compression/compressor/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/compression/compressor/compressor.h\"\n#include \"envoy/compression/compressor/config.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Compressor {\n\nclass MockCompressor : public Compressor {\npublic:\n  MockCompressor();\n  ~MockCompressor() override;\n\n  // Compressor::Compressor\n  MOCK_METHOD(void, compress, (Buffer::Instance & buffer, State state));\n};\n\nclass MockCompressorFactory : public CompressorFactory {\npublic:\n  MockCompressorFactory();\n  ~MockCompressorFactory() override;\n\n  // Compressor::CompressorFactory\n  MOCK_METHOD(CompressorPtr, createCompressor, ());\n  MOCK_METHOD(const std::string&, statsPrefix, (), (const));\n  MOCK_METHOD(const std::string&, contentEncoding, (), (const));\n\n  const std::string stats_prefix_{\"mock\"};\n  const std::string content_encoding_{\"mock\"};\n};\n\n} // namespace Compressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/compression/decompressor/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"decompressor_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/compression/decompressor:decompressor_config_interface\",\n        \"//include/envoy/compression/decompressor:decompressor_interface\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/compression/decompressor/mocks.cc",
    "content": "#include \"test/mocks/compression/decompressor/mocks.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Decompressor {\n\nMockDecompressor::MockDecompressor() = default;\nMockDecompressor::~MockDecompressor() = default;\n\nMockDecompressorFactory::MockDecompressorFactory() {\n  ON_CALL(*this, statsPrefix()).WillByDefault(ReturnRef(stats_prefix_));\n  ON_CALL(*this, contentEncoding()).WillByDefault(ReturnRef(content_encoding_));\n}\n\nMockDecompressorFactory::~MockDecompressorFactory() = default;\n\n} // namespace Decompressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/compression/decompressor/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/compression/decompressor/config.h\"\n#include \"envoy/compression/decompressor/decompressor.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Compression {\nnamespace Decompressor {\n\nclass MockDecompressor : public Decompressor {\npublic:\n  MockDecompressor();\n  ~MockDecompressor() override;\n\n  // Decompressor::Decompressor\n  MOCK_METHOD(void, decompress,\n              (const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer));\n};\n\nclass MockDecompressorFactory : public DecompressorFactory {\npublic:\n  MockDecompressorFactory();\n  ~MockDecompressorFactory() override;\n\n  // Decompressor::DecompressorFactory\n  MOCK_METHOD(DecompressorPtr, createDecompressor, (const std::string&));\n  MOCK_METHOD(const std::string&, statsPrefix, (), (const));\n  MOCK_METHOD(const std::string&, contentEncoding, (), (const));\n\n  const std::string stats_prefix_{\"mock\"};\n  const std::string content_encoding_{\"mock\"};\n};\n\n} // namespace Decompressor\n} // namespace Compression\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/config/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"config_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/config:config_provider_manager_interface\",\n        \"//include/envoy/config:grpc_mux_interface\",\n        \"//include/envoy/config:subscription_interface\",\n        \"//source/common/config:config_provider_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/test_common:resources_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/config/mocks.cc",
    "content": "#include \"test/mocks/config/mocks.h\"\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nMockSubscriptionFactory::MockSubscriptionFactory() {\n  ON_CALL(*this, subscriptionFromConfigSource(_, _, _, _, _))\n      .WillByDefault(testing::Invoke(\n          [this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&,\n                 SubscriptionCallbacks& callbacks, OpaqueResourceDecoder&) -> SubscriptionPtr {\n            auto ret = std::make_unique<testing::NiceMock<MockSubscription>>();\n            subscription_ = ret.get();\n            callbacks_ = &callbacks;\n            return ret;\n          }));\n  ON_CALL(*this, messageValidationVisitor())\n      .WillByDefault(testing::ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n}\n\nMockSubscriptionFactory::~MockSubscriptionFactory() = default;\n\nMockGrpcMuxWatch::MockGrpcMuxWatch() = default;\nMockGrpcMuxWatch::~MockGrpcMuxWatch() { cancel(); }\n\nMockGrpcMux::MockGrpcMux() = default;\nMockGrpcMux::~MockGrpcMux() = default;\n\nMockGrpcStreamCallbacks::MockGrpcStreamCallbacks() = default;\nMockGrpcStreamCallbacks::~MockGrpcStreamCallbacks() = default;\n\nMockSubscriptionCallbacks::MockSubscriptionCallbacks() = default;\nMockSubscriptionCallbacks::~MockSubscriptionCallbacks() = default;\n\nMockOpaqueResourceDecoder::MockOpaqueResourceDecoder() = default;\nMockOpaqueResourceDecoder::~MockOpaqueResourceDecoder() = default;\n\nMockUntypedConfigUpdateCallbacks::MockUntypedConfigUpdateCallbacks() = default;\nMockUntypedConfigUpdateCallbacks::~MockUntypedConfigUpdateCallbacks() = default;\n\nMockTypedFactory::~MockTypedFactory() = default;\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/config/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/config/config_provider_manager.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/grpc_mux.h\"\n#include \"envoy/config/subscription.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/config/config_provider_impl.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Config {\n\nclass MockSubscriptionCallbacks : public SubscriptionCallbacks {\npublic:\n  MockSubscriptionCallbacks();\n  ~MockSubscriptionCallbacks() override;\n\n  MOCK_METHOD(void, onConfigUpdate,\n              (const std::vector<DecodedResourceRef>& resources, const std::string& version_info));\n  MOCK_METHOD(void, onConfigUpdate,\n              (const std::vector<DecodedResourceRef>& added_resources,\n               const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n               const std::string& system_version_info));\n  MOCK_METHOD(void, onConfigUpdateFailed,\n              (Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e));\n};\n\nclass MockOpaqueResourceDecoder : public OpaqueResourceDecoder {\npublic:\n  MockOpaqueResourceDecoder();\n  ~MockOpaqueResourceDecoder() override;\n\n  MOCK_METHOD(ProtobufTypes::MessagePtr, decodeResource, (const ProtobufWkt::Any& resource));\n  MOCK_METHOD(std::string, resourceName, (const Protobuf::Message& resource));\n};\n\nclass MockUntypedConfigUpdateCallbacks : public UntypedConfigUpdateCallbacks {\npublic:\n  MockUntypedConfigUpdateCallbacks();\n  ~MockUntypedConfigUpdateCallbacks() override;\n\n  MOCK_METHOD(void, onConfigUpdate,\n              (const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& resources,\n               const std::string& version_info));\n  MOCK_METHOD(\n      void, onConfigUpdate,\n      (const Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>& added_resources,\n       const Protobuf::RepeatedPtrField<std::string>& removed_resources,\n       const std::string& system_version_info));\n  MOCK_METHOD(void, onConfigUpdateFailed,\n              (Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e));\n};\n\nclass MockSubscription : public Subscription {\npublic:\n  MOCK_METHOD(void, start,\n              (const std::set<std::string>& resources, const bool use_prefix_matching));\n  MOCK_METHOD(void, updateResourceInterest, (const std::set<std::string>& update_to_these_names));\n  MOCK_METHOD(void, requestOnDemandUpdate, (const std::set<std::string>& add_these_names));\n};\n\nclass MockSubscriptionFactory : public SubscriptionFactory {\npublic:\n  MockSubscriptionFactory();\n  ~MockSubscriptionFactory() override;\n\n  MOCK_METHOD(SubscriptionPtr, subscriptionFromConfigSource,\n              (const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url,\n               Stats::Scope& scope, SubscriptionCallbacks& callbacks,\n               OpaqueResourceDecoder& resource_decoder));\n  MOCK_METHOD(SubscriptionPtr, collectionSubscriptionFromUrl,\n              (const udpa::core::v1::ResourceLocator& collection_locator,\n               const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url,\n               Stats::Scope& scope, SubscriptionCallbacks& callbacks,\n               OpaqueResourceDecoder& resource_decoder));\n  MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ());\n\n  MockSubscription* subscription_{};\n  SubscriptionCallbacks* callbacks_{};\n};\n\nclass MockGrpcMuxWatch : public GrpcMuxWatch {\npublic:\n  MockGrpcMuxWatch();\n  ~MockGrpcMuxWatch() override;\n\n  MOCK_METHOD(void, cancel, ());\n};\n\nclass MockGrpcMux : public GrpcMux {\npublic:\n  MockGrpcMux();\n  ~MockGrpcMux() override;\n\n  MOCK_METHOD(void, start, (), (override));\n  MOCK_METHOD(ScopedResume, pause, (const std::string& type_url), (override));\n  MOCK_METHOD(ScopedResume, pause, (const std::vector<std::string> type_urls), (override));\n\n  MOCK_METHOD(void, addSubscription,\n              (const std::set<std::string>& resources, const std::string& type_url,\n               SubscriptionCallbacks& callbacks, SubscriptionStats& stats,\n               std::chrono::milliseconds init_fetch_timeout));\n  MOCK_METHOD(void, updateResourceInterest,\n              (const std::set<std::string>& resources, const std::string& type_url));\n\n  MOCK_METHOD(GrpcMuxWatchPtr, addWatch,\n              (const std::string& type_url, const std::set<std::string>& resources,\n               SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder,\n               const bool use_prefix_matching));\n\n  MOCK_METHOD(void, requestOnDemandUpdate,\n              (const std::string& type_url, const std::set<std::string>& add_these_names));\n};\n\nclass MockGrpcStreamCallbacks\n    : public GrpcStreamCallbacks<envoy::service::discovery::v3::DiscoveryResponse> {\npublic:\n  MockGrpcStreamCallbacks();\n  ~MockGrpcStreamCallbacks() override;\n\n  MOCK_METHOD(void, onStreamEstablished, ());\n  MOCK_METHOD(void, onEstablishmentFailure, ());\n  MOCK_METHOD(void, onDiscoveryResponse,\n              (std::unique_ptr<envoy::service::discovery::v3::DiscoveryResponse> && message,\n               ControlPlaneStats& control_plane_stats));\n  MOCK_METHOD(void, onWriteable, ());\n};\n\nclass MockConfigProviderManager : public ConfigProviderManager {\npublic:\n  MockConfigProviderManager() = default;\n  ~MockConfigProviderManager() override = default;\n\n  MOCK_METHOD(ConfigProviderPtr, createXdsConfigProvider,\n              (const Protobuf::Message& config_source_proto,\n               Server::Configuration::ServerFactoryContext& factory_context,\n               Init::Manager& init_manager, const std::string& stat_prefix,\n               const Envoy::Config::ConfigProviderManager::OptionalArg& optarg));\n  MOCK_METHOD(ConfigProviderPtr, createStaticConfigProvider,\n              (const Protobuf::Message& config_proto,\n               Server::Configuration::ServerFactoryContext& factory_context,\n               const Envoy::Config::ConfigProviderManager::OptionalArg& optarg));\n  MOCK_METHOD(ConfigProviderPtr, createStaticConfigProvider,\n              (std::vector<std::unique_ptr<const Protobuf::Message>> && config_protos,\n               Server::Configuration::ServerFactoryContext& factory_context,\n               const Envoy::Config::ConfigProviderManager::OptionalArg& optarg));\n};\n\nclass MockTypedFactory : public TypedFactory {\npublic:\n  ~MockTypedFactory() override;\n\n  MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, ());\n  MOCK_METHOD(std::string, configType, ());\n  MOCK_METHOD(std::string, name, (), (const));\n  MOCK_METHOD(std::string, category, (), (const));\n};\n\n} // namespace Config\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/event/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"event_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/event:deferred_deletable\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:file_event_interface\",\n        \"//include/envoy/event:signal_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//include/envoy/network:connection_handler_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:dns_interface\",\n        \"//include/envoy/network:listener_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/test_common:test_time_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"wrapped_dispatcher\",\n    hdrs = [\"wrapped_dispatcher.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/event/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Assign;\nusing testing::DoAll;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnNew;\nusing testing::ReturnPointee;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Event {\n\nMockDispatcher::MockDispatcher() : MockDispatcher(\"test_thread\") {}\n\nMockDispatcher::MockDispatcher(const std::string& name) : name_(name) {\n  ON_CALL(*this, initializeStats(_, _)).WillByDefault(Return());\n  ON_CALL(*this, clearDeferredDeleteList()).WillByDefault(Invoke([this]() -> void {\n    to_delete_.clear();\n  }));\n  ON_CALL(*this, createTimer_(_)).WillByDefault(ReturnNew<NiceMock<Event::MockTimer>>());\n  ON_CALL(*this, post(_)).WillByDefault(Invoke([](PostCb cb) -> void { cb(); }));\n}\n\nMockDispatcher::~MockDispatcher() = default;\n\nMockTimer::MockTimer() {\n  ON_CALL(*this, enableTimer(_, _))\n      .WillByDefault(Invoke([&](const std::chrono::milliseconds&, const ScopeTrackedObject* scope) {\n        enabled_ = true;\n        scope_ = scope;\n      }));\n  ON_CALL(*this, disableTimer()).WillByDefault(Assign(&enabled_, false));\n  ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_));\n}\n\n// Ownership of each MockTimer instance is transferred to the (caller of) dispatcher's\n// createTimer_(), so to avoid destructing it twice, the MockTimer must have been dynamically\n// allocated and must not be deleted by it's creator.\nMockTimer::MockTimer(MockDispatcher* dispatcher) : MockTimer() {\n  dispatcher_ = dispatcher;\n  EXPECT_CALL(*dispatcher, createTimer_(_))\n      .WillOnce(DoAll(SaveArg<0>(&callback_), Return(this)))\n      .RetiresOnSaturation();\n}\n\nMockTimer::~MockTimer() {\n  if (timer_destroyed_) {\n    *timer_destroyed_ = true;\n  }\n}\n\nMockSchedulableCallback::~MockSchedulableCallback() = default;\n\nMockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher)\n    : dispatcher_(dispatcher) {\n  EXPECT_CALL(*dispatcher, createSchedulableCallback_(_))\n      .WillOnce(DoAll(SaveArg<0>(&callback_), Return(this)))\n      .RetiresOnSaturation();\n  ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true));\n  ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true));\n  ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false));\n  ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_));\n}\n\nMockSignalEvent::MockSignalEvent(MockDispatcher* dispatcher) {\n  EXPECT_CALL(*dispatcher, listenForSignal_(_, _))\n      .WillOnce(DoAll(SaveArg<1>(&callback_), Return(this)))\n      .RetiresOnSaturation();\n}\n\nMockSignalEvent::~MockSignalEvent() = default;\n\nMockFileEvent::MockFileEvent() = default;\nMockFileEvent::~MockFileEvent() = default;\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/event/mocks.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/event/deferred_deletable.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/file_event.h\"\n#include \"envoy/event/signal.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/connection_handler.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/network/listener.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/ssl/context.h\"\n\n#include \"common/common/scope_tracker.h\"\n\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nclass MockDispatcher : public Dispatcher {\npublic:\n  MockDispatcher();\n  MockDispatcher(const std::string& name);\n  ~MockDispatcher() override;\n\n  // Dispatcher\n  const std::string& name() override { return name_; }\n  TimeSource& timeSource() override { return time_system_; }\n  Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket,\n                                                Network::TransportSocketPtr&& transport_socket,\n                                                StreamInfo::StreamInfo&) override {\n    // The caller expects both the socket and the transport socket to be moved.\n    socket.reset();\n    transport_socket.reset();\n    return Network::ConnectionPtr{createServerConnection_()};\n  }\n\n  Network::ClientConnectionPtr\n  createClientConnection(Network::Address::InstanceConstSharedPtr address,\n                         Network::Address::InstanceConstSharedPtr source_address,\n                         Network::TransportSocketPtr&& transport_socket,\n                         const Network::ConnectionSocket::OptionsSharedPtr& options) override {\n    return Network::ClientConnectionPtr{\n        createClientConnection_(address, source_address, transport_socket, options)};\n  }\n\n  FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger,\n                               uint32_t events) override {\n    return FileEventPtr{createFileEvent_(fd, cb, trigger, events)};\n  }\n\n  Filesystem::WatcherPtr createFilesystemWatcher() override {\n    return Filesystem::WatcherPtr{createFilesystemWatcher_()};\n  }\n\n  Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket,\n                                      Network::TcpListenerCallbacks& cb, bool bind_to_port,\n                                      uint32_t backlog_size) override {\n    return Network::ListenerPtr{createListener_(std::move(socket), cb, bind_to_port, backlog_size)};\n  }\n\n  Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket,\n                                            Network::UdpListenerCallbacks& cb) override {\n    return Network::UdpListenerPtr{createUdpListener_(socket, cb)};\n  }\n\n  Event::TimerPtr createTimer(Event::TimerCb cb) override {\n    auto timer = Event::TimerPtr{createTimer_(cb)};\n    // Assert that the timer is not null to avoid confusing test failures down the line.\n    ASSERT(timer != nullptr);\n    return timer;\n  }\n\n  Event::SchedulableCallbackPtr createSchedulableCallback(std::function<void()> cb) override {\n    auto schedulable_cb = Event::SchedulableCallbackPtr{createSchedulableCallback_(cb)};\n    // Assert that schedulable_cb is not null to avoid confusing test failures down the line.\n    ASSERT(schedulable_cb != nullptr);\n    return schedulable_cb;\n  }\n\n  void deferredDelete(DeferredDeletablePtr&& to_delete) override {\n    deferredDelete_(to_delete.get());\n    if (to_delete) {\n      to_delete_.push_back(std::move(to_delete));\n    }\n  }\n\n  SignalEventPtr listenForSignal(int signal_num, SignalCb cb) override {\n    return SignalEventPtr{listenForSignal_(signal_num, cb)};\n  }\n\n  // Event::Dispatcher\n  MOCK_METHOD(void, initializeStats, (Stats::Scope&, const absl::optional<std::string>&));\n  MOCK_METHOD(void, clearDeferredDeleteList, ());\n  MOCK_METHOD(Network::Connection*, createServerConnection_, ());\n  MOCK_METHOD(Network::ClientConnection*, createClientConnection_,\n              (Network::Address::InstanceConstSharedPtr address,\n               Network::Address::InstanceConstSharedPtr source_address,\n               Network::TransportSocketPtr& transport_socket,\n               const Network::ConnectionSocket::OptionsSharedPtr& options));\n  MOCK_METHOD(Network::DnsResolverSharedPtr, createDnsResolver,\n              (const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n               const bool use_tcp_for_dns_lookups));\n  MOCK_METHOD(FileEvent*, createFileEvent_,\n              (os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events));\n  MOCK_METHOD(Filesystem::Watcher*, createFilesystemWatcher_, ());\n  MOCK_METHOD(Network::Listener*, createListener_,\n              (Network::SocketSharedPtr && socket, Network::TcpListenerCallbacks& cb,\n               bool bind_to_port, uint32_t backlog_size));\n  MOCK_METHOD(Network::UdpListener*, createUdpListener_,\n              (Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb));\n  MOCK_METHOD(Timer*, createTimer_, (Event::TimerCb cb));\n  MOCK_METHOD(SchedulableCallback*, createSchedulableCallback_, (std::function<void()> cb));\n  MOCK_METHOD(void, deferredDelete_, (DeferredDeletable * to_delete));\n  MOCK_METHOD(void, exit, ());\n  MOCK_METHOD(SignalEvent*, listenForSignal_, (int signal_num, SignalCb cb));\n  MOCK_METHOD(void, post, (std::function<void()> callback));\n  MOCK_METHOD(void, run, (RunType type));\n  MOCK_METHOD(const ScopeTrackedObject*, setTrackedObject, (const ScopeTrackedObject* object));\n  MOCK_METHOD(bool, isThreadSafe, (), (const));\n  Buffer::WatermarkFactory& getWatermarkFactory() override { return buffer_factory_; }\n  MOCK_METHOD(Thread::ThreadId, getCurrentThreadId, ());\n  MOCK_METHOD(MonotonicTime, approximateMonotonicTime, (), (const));\n  MOCK_METHOD(void, updateApproximateMonotonicTime, ());\n\n  GlobalTimeSystem time_system_;\n  std::list<DeferredDeletablePtr> to_delete_;\n  MockBufferFactory buffer_factory_;\n\nprivate:\n  const std::string name_;\n};\n\nclass MockTimer : public Timer {\npublic:\n  MockTimer();\n  MockTimer(MockDispatcher* dispatcher);\n  ~MockTimer() override;\n\n  void invokeCallback() {\n    EXPECT_TRUE(enabled_);\n    enabled_ = false;\n    if (scope_ == nullptr) {\n      callback_();\n      return;\n    }\n    ScopeTrackerScopeState scope(scope_, *dispatcher_);\n    scope_ = nullptr;\n    callback_();\n  }\n\n  // Timer\n  MOCK_METHOD(void, disableTimer, ());\n  MOCK_METHOD(void, enableTimer, (std::chrono::milliseconds, const ScopeTrackedObject* scope));\n  MOCK_METHOD(void, enableHRTimer, (std::chrono::microseconds, const ScopeTrackedObject* scope));\n  MOCK_METHOD(bool, enabled, ());\n\n  MockDispatcher* dispatcher_{};\n  const ScopeTrackedObject* scope_{};\n  bool enabled_{};\n\n  Event::TimerCb callback_;\n\n  // If not nullptr, will be set on dtor. This can help to verify that the timer was destroyed.\n  bool* timer_destroyed_{};\n};\n\nclass MockSchedulableCallback : public SchedulableCallback {\npublic:\n  MockSchedulableCallback(MockDispatcher* dispatcher);\n  ~MockSchedulableCallback() override;\n\n  void invokeCallback() {\n    EXPECT_TRUE(enabled_);\n    enabled_ = false;\n    callback_();\n  }\n\n  // SchedulableCallback\n  MOCK_METHOD(void, scheduleCallbackCurrentIteration, ());\n  MOCK_METHOD(void, scheduleCallbackNextIteration, ());\n  MOCK_METHOD(void, cancel, ());\n  MOCK_METHOD(bool, enabled, ());\n\n  MockDispatcher* dispatcher_{};\n  bool enabled_{};\n\nprivate:\n  std::function<void()> callback_;\n};\n\nclass MockSignalEvent : public SignalEvent {\npublic:\n  MockSignalEvent(MockDispatcher* dispatcher);\n  ~MockSignalEvent() override;\n\n  SignalCb callback_;\n};\n\nclass MockFileEvent : public FileEvent {\npublic:\n  MockFileEvent();\n  ~MockFileEvent() override;\n\n  MOCK_METHOD(void, activate, (uint32_t events));\n  MOCK_METHOD(void, setEnabled, (uint32_t events));\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/event/wrapped_dispatcher.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n\n#include \"envoy/event/dispatcher.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n// Dispatcher implementation that forwards all methods to another implementation\n// class. Subclassing this provides a convenient way to forward most methods and\n// override the behavior of a few.\nclass WrappedDispatcher : public Dispatcher {\npublic:\n  WrappedDispatcher(Dispatcher& impl) : impl_(impl) {}\n\n  // Event::Dispatcher\n  const std::string& name() override { return impl_.name(); }\n\n  TimeSource& timeSource() override { return impl_.timeSource(); }\n\n  void initializeStats(Stats::Scope& scope, const absl::optional<std::string>& prefix) override {\n    impl_.initializeStats(scope, prefix);\n  }\n\n  void clearDeferredDeleteList() override { impl_.clearDeferredDeleteList(); }\n\n  Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket,\n                                                Network::TransportSocketPtr&& transport_socket,\n                                                StreamInfo::StreamInfo& stream_info) override {\n    return impl_.createServerConnection(std::move(socket), std::move(transport_socket),\n                                        stream_info);\n  }\n\n  Network::ClientConnectionPtr\n  createClientConnection(Network::Address::InstanceConstSharedPtr address,\n                         Network::Address::InstanceConstSharedPtr source_address,\n                         Network::TransportSocketPtr&& transport_socket,\n                         const Network::ConnectionSocket::OptionsSharedPtr& options) override {\n    return impl_.createClientConnection(std::move(address), std::move(source_address),\n                                        std::move(transport_socket), options);\n  }\n\n  Network::DnsResolverSharedPtr\n  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,\n                    const bool use_tcp_for_dns_lookups) override {\n    return impl_.createDnsResolver(resolvers, use_tcp_for_dns_lookups);\n  }\n\n  FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger,\n                               uint32_t events) override {\n    return impl_.createFileEvent(fd, cb, trigger, events);\n  }\n\n  Filesystem::WatcherPtr createFilesystemWatcher() override {\n    return impl_.createFilesystemWatcher();\n  }\n\n  Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket,\n                                      Network::TcpListenerCallbacks& cb, bool bind_to_port,\n                                      uint32_t backlog_size) override {\n    return impl_.createListener(std::move(socket), cb, bind_to_port, backlog_size);\n  }\n\n  Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket,\n                                            Network::UdpListenerCallbacks& cb) override {\n    return impl_.createUdpListener(std::move(socket), cb);\n  }\n\n  TimerPtr createTimer(TimerCb cb) override { return impl_.createTimer(std::move(cb)); }\n\n  Event::SchedulableCallbackPtr createSchedulableCallback(std::function<void()> cb) override {\n    return impl_.createSchedulableCallback(std::move(cb));\n  }\n\n  void deferredDelete(DeferredDeletablePtr&& to_delete) override {\n    impl_.deferredDelete(std::move(to_delete));\n  }\n\n  void exit() override { impl_.exit(); }\n\n  SignalEventPtr listenForSignal(int signal_num, SignalCb cb) override {\n    return impl_.listenForSignal(signal_num, std::move(cb));\n  }\n\n  void post(std::function<void()> callback) override { impl_.post(std::move(callback)); }\n\n  void run(RunType type) override { impl_.run(type); }\n\n  Buffer::WatermarkFactory& getWatermarkFactory() override { return impl_.getWatermarkFactory(); }\n  const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) override {\n    return impl_.setTrackedObject(object);\n  }\n\n  MonotonicTime approximateMonotonicTime() const override {\n    return impl_.approximateMonotonicTime();\n  }\n\n  void updateApproximateMonotonicTime() override { impl_.updateApproximateMonotonicTime(); }\n\n  bool isThreadSafe() const override { return impl_.isThreadSafe(); }\n\nprotected:\n  Dispatcher& impl_;\n};\n\n} // namespace Event\n} // namespace Envoy"
  },
  {
    "path": "test/mocks/filesystem/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"filesystem_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/filesystem:filesystem_interface\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/filesystem/mocks.cc",
    "content": "#include \"test/mocks/filesystem/mocks.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/lock_guard.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nMockFile::MockFile() : num_opens_(0), num_writes_(0), is_open_(false) {}\nMockFile::~MockFile() = default;\n\nApi::IoCallBoolResult MockFile::open(FlagSet flag) {\n  Thread::LockGuard lock(open_mutex_);\n\n  Api::IoCallBoolResult result = open_(flag);\n  is_open_ = result.rc_;\n  num_opens_++;\n  open_event_.notifyOne();\n\n  return result;\n}\n\nApi::IoCallSizeResult MockFile::write(absl::string_view buffer) {\n  Thread::LockGuard lock(write_mutex_);\n  if (!is_open_) {\n    return {-1, Api::IoErrorPtr(nullptr, [](Api::IoError*) { NOT_REACHED_GCOVR_EXCL_LINE; })};\n  }\n\n  Api::IoCallSizeResult result = write_(buffer);\n  num_writes_++;\n  write_event_.notifyOne();\n\n  return result;\n}\n\nApi::IoCallBoolResult MockFile::close() {\n  Api::IoCallBoolResult result = close_();\n  is_open_ = !result.rc_;\n\n  return result;\n}\n\nMockInstance::MockInstance() = default;\nMockInstance::~MockInstance() = default;\n\nMockWatcher::MockWatcher() = default;\nMockWatcher::~MockWatcher() = default;\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/filesystem/mocks.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/filesystem/filesystem.h\"\n#include \"envoy/filesystem/watcher.h\"\n\n#include \"common/common/thread.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Filesystem {\n\nclass MockFile : public File {\npublic:\n  MockFile();\n  ~MockFile() override;\n\n  // Filesystem::File\n  Api::IoCallBoolResult open(FlagSet flag) override;\n  Api::IoCallSizeResult write(absl::string_view buffer) override;\n  Api::IoCallBoolResult close() override;\n  bool isOpen() const override { return is_open_; };\n  MOCK_METHOD(std::string, path, (), (const));\n\n  // The first parameter here must be `const FlagSet&` otherwise it doesn't compile with libstdc++\n  MOCK_METHOD(Api::IoCallBoolResult, open_, (const FlagSet& flag));\n  MOCK_METHOD(Api::IoCallSizeResult, write_, (absl::string_view buffer));\n  MOCK_METHOD(Api::IoCallBoolResult, close_, ());\n\n  size_t num_opens_;\n  size_t num_writes_;\n  Thread::MutexBasicLockable open_mutex_;\n  Thread::MutexBasicLockable write_mutex_;\n  Thread::CondVar open_event_;\n  Thread::CondVar write_event_;\n\nprivate:\n  bool is_open_;\n};\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  // Filesystem::Instance\n  MOCK_METHOD(FilePtr, createFile, (const std::string&));\n  MOCK_METHOD(bool, fileExists, (const std::string&));\n  MOCK_METHOD(bool, directoryExists, (const std::string&));\n  MOCK_METHOD(ssize_t, fileSize, (const std::string&));\n  MOCK_METHOD(std::string, fileReadToEnd, (const std::string&));\n  MOCK_METHOD(PathSplitResult, splitPathFromFilename, (absl::string_view));\n  MOCK_METHOD(bool, illegalPath, (const std::string&));\n};\n\nclass MockWatcher : public Watcher {\npublic:\n  MockWatcher();\n  ~MockWatcher() override;\n\n  MOCK_METHOD(void, addWatch, (absl::string_view, uint32_t, OnChangedCb));\n};\n\n} // namespace Filesystem\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/grpc/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"grpc_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/grpc:async_client_interface\",\n        \"//include/envoy/grpc:async_client_manager_interface\",\n        \"//source/common/grpc:typed_async_client_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/grpc/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"test/mocks/http/mocks.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Grpc {\n\nMockAsyncClient::MockAsyncClient() {\n  async_request_ = std::make_unique<testing::NiceMock<Grpc::MockAsyncRequest>>();\n  ON_CALL(*this, sendRaw(_, _, _, _, _, _)).WillByDefault(Return(async_request_.get()));\n  ON_CALL(*this, dispatcher()).WillByDefault(Return(&dispatcher_));\n}\nMockAsyncClient::~MockAsyncClient() = default;\n\nMockAsyncRequest::MockAsyncRequest() = default;\nMockAsyncRequest::~MockAsyncRequest() = default;\n\nMockAsyncStream::MockAsyncStream() = default;\nMockAsyncStream::~MockAsyncStream() = default;\n\nMockAsyncClientFactory::MockAsyncClientFactory() {\n  ON_CALL(*this, create()).WillByDefault(Invoke([] {\n    return std::make_unique<testing::NiceMock<Grpc::MockAsyncClient>>();\n  }));\n}\nMockAsyncClientFactory::~MockAsyncClientFactory() = default;\n\nMockAsyncClientManager::MockAsyncClientManager() {\n  ON_CALL(*this, factoryForGrpcService(_, _, _))\n      .WillByDefault(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) {\n        return std::make_unique<testing::NiceMock<Grpc::MockAsyncClientFactory>>();\n      }));\n}\n\nMockAsyncClientManager::~MockAsyncClientManager() = default;\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/grpc/mocks.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/grpc_service.pb.h\"\n#include \"envoy/grpc/async_client.h\"\n#include \"envoy/grpc/async_client_manager.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/grpc/typed_async_client.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Grpc {\n\nclass MockAsyncRequest : public AsyncRequest {\npublic:\n  MockAsyncRequest();\n  ~MockAsyncRequest() override;\n\n  MOCK_METHOD(void, cancel, ());\n};\n\nclass MockAsyncStream : public RawAsyncStream {\npublic:\n  MockAsyncStream();\n  ~MockAsyncStream() override;\n\n  void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override {\n    sendMessageRaw_(request, end_stream);\n  }\n  MOCK_METHOD(void, sendMessageRaw_, (Buffer::InstancePtr & request, bool end_stream));\n  MOCK_METHOD(void, closeStream, ());\n  MOCK_METHOD(void, resetStream, ());\n  MOCK_METHOD(bool, isAboveWriteBufferHighWatermark, (), (const));\n};\n\ntemplate <class ResponseType> using ResponseTypePtr = std::unique_ptr<ResponseType>;\n\ntemplate <class ResponseType>\nclass MockAsyncRequestCallbacks : public AsyncRequestCallbacks<ResponseType> {\npublic:\n  void onSuccess(ResponseTypePtr<ResponseType>&& response, Tracing::Span& span) {\n    onSuccess_(*response, span);\n  }\n\n  MOCK_METHOD(void, onCreateInitialMetadata, (Http::RequestHeaderMap & metadata));\n  MOCK_METHOD(void, onSuccess_, (const ResponseType& response, Tracing::Span& span));\n  MOCK_METHOD(void, onFailure,\n              (Status::GrpcStatus status, const std::string& message, Tracing::Span& span));\n};\n\ntemplate <class ResponseType>\nclass MockAsyncStreamCallbacks : public AsyncStreamCallbacks<ResponseType> {\npublic:\n  void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) {\n    onReceiveInitialMetadata_(*metadata);\n  }\n\n  void onReceiveMessage(ResponseTypePtr<ResponseType>&& message) { onReceiveMessage_(*message); }\n\n  void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) {\n    onReceiveTrailingMetadata_(*metadata);\n  }\n\n  MOCK_METHOD(void, onCreateInitialMetadata, (Http::RequestHeaderMap & metadata));\n  MOCK_METHOD(void, onReceiveInitialMetadata_, (const Http::ResponseHeaderMap& metadata));\n  MOCK_METHOD(void, onReceiveMessage_, (const ResponseType& message));\n  MOCK_METHOD(void, onReceiveTrailingMetadata_, (const Http::ResponseTrailerMap& metadata));\n  MOCK_METHOD(void, onRemoteClose, (Status::GrpcStatus status, const std::string& message));\n};\n\nclass MockAsyncClient : public RawAsyncClient {\npublic:\n  MockAsyncClient();\n  ~MockAsyncClient() override;\n\n  MOCK_METHOD(AsyncRequest*, sendRaw,\n              (absl::string_view service_full_name, absl::string_view method_name,\n               Buffer::InstancePtr&& request, RawAsyncRequestCallbacks& callbacks,\n               Tracing::Span& parent_span, const Http::AsyncClient::RequestOptions& options));\n  MOCK_METHOD(RawAsyncStream*, startRaw,\n              (absl::string_view service_full_name, absl::string_view method_name,\n               RawAsyncStreamCallbacks& callbacks,\n               const Http::AsyncClient::StreamOptions& options));\n  MOCK_METHOD(Event::Dispatcher*, dispatcher, ());\n\n  std::unique_ptr<testing::NiceMock<Grpc::MockAsyncRequest>> async_request_;\n  Event::MockDispatcher dispatcher_;\n};\n\nclass MockAsyncClientFactory : public AsyncClientFactory {\npublic:\n  MockAsyncClientFactory();\n  ~MockAsyncClientFactory() override;\n\n  MOCK_METHOD(RawAsyncClientPtr, create, ());\n};\n\nclass MockAsyncClientManager : public AsyncClientManager {\npublic:\n  MockAsyncClientManager();\n  ~MockAsyncClientManager() override;\n\n  MOCK_METHOD(AsyncClientFactoryPtr, factoryForGrpcService,\n              (const envoy::config::core::v3::GrpcService& grpc_service, Stats::Scope& scope,\n               bool skip_cluster_check));\n};\n\nMATCHER_P(ProtoBufferEq, expected, \"\") {\n  typename std::remove_const<decltype(expected)>::type proto;\n  if (!proto.ParseFromString(arg->toString())) {\n    *result_listener << \"\\nParse of buffer failed\\n\";\n    return false;\n  }\n  auto equal = ::Envoy::TestUtility::protoEqual(proto, expected);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << \"=======================Expected proto:===========================\\n\"\n                     << expected.DebugString()\n                     << \"------------------is not equal to actual proto:------------------\\n\"\n                     << proto.DebugString()\n                     << \"=================================================================\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P2(ProtoBufferEqIgnoringField, expected, ignored_field, \"\") {\n  typename std::remove_const<decltype(expected)>::type proto;\n  if (!proto.ParseFromArray(static_cast<char*>(arg->linearize(arg->length())), arg->length())) {\n    *result_listener << \"\\nParse of buffer failed\\n\";\n    return false;\n  }\n  const bool equal = ::Envoy::TestUtility::protoEqualIgnoringField(proto, expected, ignored_field);\n  if (!equal) {\n    std::string but_ignoring = absl::StrCat(\"(but ignoring \", ignored_field, \")\");\n    *result_listener << \"\\n\"\n                     << ::Envoy::TestUtility::addLeftAndRightPadding(\"Expected proto:\") << \"\\n\"\n                     << ::Envoy::TestUtility::addLeftAndRightPadding(but_ignoring) << \"\\n\"\n                     << expected.DebugString()\n                     << ::Envoy::TestUtility::addLeftAndRightPadding(\n                            \"is not equal to actual proto:\")\n                     << \"\\n\"\n                     << proto.DebugString()\n                     << ::Envoy::TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P(ProtoBufferEqIgnoreRepeatedFieldOrdering, expected, \"\") {\n  typename std::remove_const<decltype(expected)>::type proto;\n  if (!proto.ParseFromArray(static_cast<char*>(arg->linearize(arg->length())), arg->length())) {\n    *result_listener << \"\\nParse of buffer failed\\n\";\n    return false;\n  }\n  const bool equal =\n      ::Envoy::TestUtility::protoEqual(proto, expected, /*ignore_repeated_field_ordering=*/true);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << \"=======================Expected proto:===========================\\n\"\n                     << expected.DebugString()\n                     << \"------------------is not equal to actual proto:------------------\\n\"\n                     << proto.DebugString()\n                     << \"=================================================================\\n\";\n  }\n  return equal;\n}\n\n} // namespace Grpc\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"api_listener_mocks\",\n    srcs = [\"api_listener.cc\"],\n    hdrs = [\"api_listener.h\"],\n    deps = [\n        \"//include/envoy/http:api_listener_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"conn_pool_mocks\",\n    srcs = [\"conn_pool.cc\"],\n    hdrs = [\"conn_pool.h\"],\n    deps = [\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/upstream:host_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"http_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    external_deps = [\n        \"abseil_strings\",\n    ],\n    deps = [\n        \":conn_pool_mocks\",\n        \":stream_decoder_mock\",\n        \":stream_encoder_mock\",\n        \":stream_mock\",\n        \"//include/envoy/access_log:access_log_interface\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/http:conn_pool_interface\",\n        \"//include/envoy/http:filter_interface\",\n        \"//include/envoy/ssl:connection_interface\",\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//source/common/http:filter_manager_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"stream_mock\",\n    srcs = [\"stream.cc\"],\n    hdrs = [\"stream.h\"],\n    deps = [\n        \"//include/envoy/http:codec_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"stream_decoder_mock\",\n    srcs = [\"stream_decoder.cc\"],\n    hdrs = [\"stream_decoder.h\"],\n    deps = [\n        \"//include/envoy/http:codec_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"stream_encoder_mock\",\n    srcs = [\"stream_encoder.cc\"],\n    hdrs = [\"stream_encoder.h\"],\n    deps = [\n        \":stream_mock\",\n        \"//include/envoy/http:codec_interface\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"http_mocks_test\",\n    srcs = [\"mocks_test.cc\"],\n    deps = [\n        \":http_mocks\",\n        \"//include/envoy/http:header_map_interface\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/http/api_listener.cc",
    "content": "#include \"test/mocks/http/api_listener.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nMockApiListener::MockApiListener() = default;\nMockApiListener::~MockApiListener() = default;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/api_listener.h",
    "content": "#pragma once\n#include \"envoy/http/api_listener.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass MockApiListener : public ApiListener {\npublic:\n  MockApiListener();\n  ~MockApiListener() override;\n\n  // Http::ApiListener\n  MOCK_METHOD(RequestDecoder&, newStream,\n              (ResponseEncoder & response_encoder, bool is_internally_created));\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/conn_pool.cc",
    "content": "#include \"test/mocks/http/conn_pool.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace ConnectionPool {\n\nMockInstance::MockInstance()\n    : host_{std::make_shared<testing::NiceMock<Upstream::MockHostDescription>>()} {\n  ON_CALL(*this, host()).WillByDefault(Return(host_));\n}\nMockInstance::~MockInstance() = default;\n\n} // namespace ConnectionPool\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/conn_pool.h",
    "content": "#include <memory>\n\n#include \"envoy/http/conn_pool.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/upstream/host.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace ConnectionPool {\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  // Http::ConnectionPool::Instance\n  MOCK_METHOD(Http::Protocol, protocol, (), (const));\n  MOCK_METHOD(void, addDrainedCallback, (DrainedCb cb));\n  MOCK_METHOD(void, drainConnections, ());\n  MOCK_METHOD(bool, hasActiveConnections, (), (const));\n  MOCK_METHOD(Cancellable*, newStream, (ResponseDecoder & response_decoder, Callbacks& callbacks));\n  MOCK_METHOD(bool, maybePrefetch, (float));\n  MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const));\n\n  std::shared_ptr<testing::NiceMock<Upstream::MockHostDescription>> host_;\n};\n\n} // namespace ConnectionPool\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/http/header_map.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\n\nMockConnectionCallbacks::MockConnectionCallbacks() = default;\nMockConnectionCallbacks::~MockConnectionCallbacks() = default;\n\nMockServerConnectionCallbacks::MockServerConnectionCallbacks() = default;\nMockServerConnectionCallbacks::~MockServerConnectionCallbacks() = default;\n\nMockFilterManagerCallbacks::MockFilterManagerCallbacks() {\n  ON_CALL(*this, responseHeaders()).WillByDefault(Invoke([this]() -> ResponseHeaderMapOptRef {\n    if (response_headers_) {\n      return absl::make_optional(std::ref(*response_headers_));\n    }\n    return absl::nullopt;\n  }));\n}\nMockFilterManagerCallbacks::~MockFilterManagerCallbacks() = default;\n\nMockStreamCallbacks::MockStreamCallbacks() = default;\nMockStreamCallbacks::~MockStreamCallbacks() = default;\n\nMockServerConnection::MockServerConnection() {\n  ON_CALL(*this, protocol()).WillByDefault(Invoke([this]() { return protocol_; }));\n}\n\nMockServerConnection::~MockServerConnection() = default;\n\nMockClientConnection::MockClientConnection() = default;\nMockClientConnection::~MockClientConnection() = default;\n\nMockFilterChainFactory::MockFilterChainFactory() = default;\nMockFilterChainFactory::~MockFilterChainFactory() = default;\n\ntemplate <class T> static void initializeMockStreamFilterCallbacks(T& callbacks) {\n  callbacks.cluster_info_.reset(new NiceMock<Upstream::MockClusterInfo>());\n  callbacks.route_.reset(new NiceMock<Router::MockRoute>());\n  ON_CALL(callbacks, dispatcher()).WillByDefault(ReturnRef(callbacks.dispatcher_));\n  ON_CALL(callbacks, streamInfo()).WillByDefault(ReturnRef(callbacks.stream_info_));\n  ON_CALL(callbacks, clusterInfo()).WillByDefault(Return(callbacks.cluster_info_));\n  ON_CALL(callbacks, route()).WillByDefault(Return(callbacks.route_));\n}\n\nMockStreamDecoderFilterCallbacks::MockStreamDecoderFilterCallbacks() {\n  initializeMockStreamFilterCallbacks(*this);\n  ON_CALL(*this, decodingBuffer()).WillByDefault(Invoke(&buffer_, &Buffer::InstancePtr::get));\n\n  ON_CALL(*this, addDownstreamWatermarkCallbacks(_))\n      .WillByDefault(Invoke([this](DownstreamWatermarkCallbacks& callbacks) -> void {\n        callbacks_.push_back(&callbacks);\n      }));\n\n  ON_CALL(*this, removeDownstreamWatermarkCallbacks(_))\n      .WillByDefault(Invoke([this](DownstreamWatermarkCallbacks& callbacks) -> void {\n        callbacks_.remove(&callbacks);\n      }));\n\n  ON_CALL(*this, activeSpan()).WillByDefault(ReturnRef(active_span_));\n  ON_CALL(*this, tracingConfig()).WillByDefault(ReturnRef(tracing_config_));\n  ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_));\n  ON_CALL(*this, sendLocalReply(_, _, _, _, _))\n      .WillByDefault(Invoke([this](Code code, absl::string_view body,\n                                   std::function<void(ResponseHeaderMap & headers)> modify_headers,\n                                   const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                                   absl::string_view details) {\n        sendLocalReply_(code, body, modify_headers, grpc_status, details);\n      }));\n  ON_CALL(*this, routeConfig())\n      .WillByDefault(Return(absl::optional<Router::ConfigConstSharedPtr>()));\n}\n\nMockStreamDecoderFilterCallbacks::~MockStreamDecoderFilterCallbacks() = default;\n\nvoid MockStreamDecoderFilterCallbacks::sendLocalReply_(\n    Code code, absl::string_view body,\n    std::function<void(ResponseHeaderMap& headers)> modify_headers,\n    const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) {\n  Utility::sendLocalReply(\n      stream_destroyed_,\n      Utility::EncodeFunctions{\n          nullptr, nullptr,\n          [this, modify_headers, details](ResponseHeaderMapPtr&& headers, bool end_stream) -> void {\n            if (modify_headers != nullptr) {\n              modify_headers(*headers);\n            }\n            encodeHeaders(std::move(headers), end_stream, details);\n          },\n          [this](Buffer::Instance& data, bool end_stream) -> void {\n            encodeData(data, end_stream);\n          }},\n      Utility::LocalReplyData{is_grpc_request_, code, body, grpc_status, is_head_request_});\n}\n\nMockStreamEncoderFilterCallbacks::MockStreamEncoderFilterCallbacks() {\n  initializeMockStreamFilterCallbacks(*this);\n  ON_CALL(*this, encodingBuffer()).WillByDefault(Invoke(&buffer_, &Buffer::InstancePtr::get));\n  ON_CALL(*this, activeSpan()).WillByDefault(ReturnRef(active_span_));\n  ON_CALL(*this, tracingConfig()).WillByDefault(ReturnRef(tracing_config_));\n  ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_));\n}\n\nMockStreamEncoderFilterCallbacks::~MockStreamEncoderFilterCallbacks() = default;\n\nMockStreamDecoderFilter::MockStreamDecoderFilter() {\n  ON_CALL(*this, setDecoderFilterCallbacks(_))\n      .WillByDefault(Invoke(\n          [this](StreamDecoderFilterCallbacks& callbacks) -> void { callbacks_ = &callbacks; }));\n}\n\nMockStreamDecoderFilter::~MockStreamDecoderFilter() = default;\n\nMockStreamEncoderFilter::MockStreamEncoderFilter() {\n  ON_CALL(*this, setEncoderFilterCallbacks(_))\n      .WillByDefault(Invoke(\n          [this](StreamEncoderFilterCallbacks& callbacks) -> void { callbacks_ = &callbacks; }));\n}\n\nMockStreamEncoderFilter::~MockStreamEncoderFilter() = default;\n\nMockStreamFilter::MockStreamFilter() {\n  ON_CALL(*this, setDecoderFilterCallbacks(_))\n      .WillByDefault(Invoke([this](StreamDecoderFilterCallbacks& callbacks) -> void {\n        decoder_callbacks_ = &callbacks;\n      }));\n  ON_CALL(*this, setEncoderFilterCallbacks(_))\n      .WillByDefault(Invoke([this](StreamEncoderFilterCallbacks& callbacks) -> void {\n        encoder_callbacks_ = &callbacks;\n      }));\n}\n\nMockStreamFilter::~MockStreamFilter() = default;\n\nMockAsyncClient::MockAsyncClient() {\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n}\nMockAsyncClient::~MockAsyncClient() = default;\n\nMockAsyncClientCallbacks::MockAsyncClientCallbacks() = default;\nMockAsyncClientCallbacks::~MockAsyncClientCallbacks() = default;\n\nMockAsyncClientStreamCallbacks::MockAsyncClientStreamCallbacks() = default;\nMockAsyncClientStreamCallbacks::~MockAsyncClientStreamCallbacks() = default;\n\nMockAsyncClientRequest::MockAsyncClientRequest(MockAsyncClient* client) : client_(client) {}\nMockAsyncClientRequest::~MockAsyncClientRequest() { client_->onRequestDestroy(); }\n\nMockAsyncClientStream::MockAsyncClientStream() = default;\nMockAsyncClientStream::~MockAsyncClientStream() = default;\n\nMockFilterChainFactoryCallbacks::MockFilterChainFactoryCallbacks() = default;\nMockFilterChainFactoryCallbacks::~MockFilterChainFactoryCallbacks() = default;\n\n} // namespace Http\n\nnamespace Http {\n\nIsSubsetOfHeadersMatcher IsSubsetOfHeaders(const HeaderMap& expected_headers) {\n  return IsSubsetOfHeadersMatcher(expected_headers);\n}\n\nIsSupersetOfHeadersMatcher IsSupersetOfHeaders(const HeaderMap& expected_headers) {\n  return IsSupersetOfHeadersMatcher(expected_headers);\n}\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/mocks.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"envoy/access_log/access_log.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/http/conn_pool.h\"\n#include \"envoy/http/filter.h\"\n#include \"envoy/ssl/connection.h\"\n\n#include \"common/http/filter_manager.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/utility.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/conn_pool.h\"\n#include \"test/mocks/http/stream.h\"\n#include \"test/mocks/http/stream_decoder.h\"\n#include \"test/mocks/http/stream_encoder.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass MockConnectionCallbacks : public virtual ConnectionCallbacks {\npublic:\n  MockConnectionCallbacks();\n  ~MockConnectionCallbacks() override;\n\n  // Http::ConnectionCallbacks\n  MOCK_METHOD(void, onGoAway, (GoAwayErrorCode error_code));\n};\n\nclass MockFilterManagerCallbacks : public FilterManagerCallbacks {\npublic:\n  MockFilterManagerCallbacks();\n  ~MockFilterManagerCallbacks() override;\n\n  MOCK_METHOD(void, encodeHeaders, (ResponseHeaderMap&, bool));\n  MOCK_METHOD(void, encode100ContinueHeaders, (ResponseHeaderMap&));\n  MOCK_METHOD(void, encodeData, (Buffer::Instance&, bool));\n  MOCK_METHOD(void, encodeTrailers, (ResponseTrailerMap&));\n  MOCK_METHOD(void, encodeMetadata, (MetadataMapVector&));\n  MOCK_METHOD(void, setRequestTrailers, (RequestTrailerMapPtr &&));\n  MOCK_METHOD(void, setContinueHeaders, (ResponseHeaderMapPtr &&));\n  MOCK_METHOD(void, setResponseHeaders_, (ResponseHeaderMap&));\n  void setResponseHeaders(ResponseHeaderMapPtr&& response_headers) override {\n    // TODO(snowp): Repeat this pattern for all setters.\n    response_headers_ = std::move(response_headers);\n    setResponseHeaders_(*response_headers_);\n  }\n  MOCK_METHOD(void, setResponseTrailers, (ResponseTrailerMapPtr &&));\n  MOCK_METHOD(RequestHeaderMapOptRef, requestHeaders, ());\n  MOCK_METHOD(RequestTrailerMapOptRef, requestTrailers, ());\n  MOCK_METHOD(ResponseHeaderMapOptRef, continueHeaders, ());\n  MOCK_METHOD(ResponseHeaderMapOptRef, responseHeaders, ());\n  MOCK_METHOD(ResponseTrailerMapOptRef, responseTrailers, ());\n  MOCK_METHOD(void, endStream, ());\n  MOCK_METHOD(void, onDecoderFilterBelowWriteBufferLowWatermark, ());\n  MOCK_METHOD(void, onDecoderFilterAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, upgradeFilterChainCreated, ());\n  MOCK_METHOD(void, disarmRequestTimeout, ());\n  MOCK_METHOD(void, resetIdleTimer, ());\n  MOCK_METHOD(void, recreateStream, (StreamInfo::FilterStateSharedPtr filter_state));\n  MOCK_METHOD(void, resetStream, ());\n  MOCK_METHOD(const Router::RouteEntry::UpgradeMap*, upgradeMap, ());\n  MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, clusterInfo, ());\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback& cb));\n  MOCK_METHOD(void, clearRouteCache, ());\n  MOCK_METHOD(absl::optional<Router::ConfigConstSharedPtr>, routeConfig, ());\n  MOCK_METHOD(void, requestRouteConfigUpdate, (Http::RouteConfigUpdatedCallbackSharedPtr));\n  MOCK_METHOD(Tracing::Span&, activeSpan, ());\n  MOCK_METHOD(void, onResponseDataTooLarge, ());\n  MOCK_METHOD(void, onRequestDataTooLarge, ());\n  MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ());\n  MOCK_METHOD(void, onLocalReply, (Code code));\n  MOCK_METHOD(Tracing::Config&, tracingConfig, ());\n  MOCK_METHOD(const ScopeTrackedObject&, scope, ());\n\n  ResponseHeaderMapPtr response_headers_;\n};\n\nclass MockServerConnectionCallbacks : public ServerConnectionCallbacks,\n                                      public MockConnectionCallbacks {\npublic:\n  MockServerConnectionCallbacks();\n  ~MockServerConnectionCallbacks() override;\n\n  // Http::ServerConnectionCallbacks\n  MOCK_METHOD(RequestDecoder&, newStream,\n              (ResponseEncoder & response_encoder, bool is_internally_created));\n};\n\nclass MockStreamCallbacks : public StreamCallbacks {\npublic:\n  MockStreamCallbacks();\n  ~MockStreamCallbacks() override;\n\n  // Http::StreamCallbacks\n  MOCK_METHOD(void, onResetStream, (StreamResetReason reason, absl::string_view));\n  MOCK_METHOD(void, onAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onBelowWriteBufferLowWatermark, ());\n};\n\nclass MockServerConnection : public ServerConnection {\npublic:\n  MockServerConnection();\n  ~MockServerConnection() override;\n\n  // Http::Connection\n  MOCK_METHOD(Status, dispatch, (Buffer::Instance & data));\n  MOCK_METHOD(void, goAway, ());\n  MOCK_METHOD(Protocol, protocol, ());\n  MOCK_METHOD(void, shutdownNotice, ());\n  MOCK_METHOD(bool, wantsToWrite, ());\n  MOCK_METHOD(void, onUnderlyingConnectionAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onUnderlyingConnectionBelowWriteBufferLowWatermark, ());\n\n  Protocol protocol_{Protocol::Http11};\n};\n\nclass MockClientConnection : public ClientConnection {\npublic:\n  MockClientConnection();\n  ~MockClientConnection() override;\n\n  // Http::Connection\n  MOCK_METHOD(Status, dispatch, (Buffer::Instance & data));\n  MOCK_METHOD(void, goAway, ());\n  MOCK_METHOD(Protocol, protocol, ());\n  MOCK_METHOD(void, shutdownNotice, ());\n  MOCK_METHOD(bool, wantsToWrite, ());\n  MOCK_METHOD(void, onUnderlyingConnectionAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onUnderlyingConnectionBelowWriteBufferLowWatermark, ());\n\n  // Http::ClientConnection\n  MOCK_METHOD(RequestEncoder&, newStream, (ResponseDecoder & response_decoder));\n};\n\nclass MockFilterChainFactory : public FilterChainFactory {\npublic:\n  MockFilterChainFactory();\n  ~MockFilterChainFactory() override;\n\n  // Http::FilterChainFactory\n  MOCK_METHOD(void, createFilterChain, (FilterChainFactoryCallbacks & callbacks));\n  MOCK_METHOD(bool, createUpgradeFilterChain,\n              (absl::string_view upgrade_type, const FilterChainFactory::UpgradeMap* upgrade_map,\n               FilterChainFactoryCallbacks& callbacks));\n};\n\nclass MockStreamFilterCallbacksBase {\npublic:\n  Event::MockDispatcher dispatcher_;\n  testing::NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n  std::shared_ptr<Router::MockRoute> route_;\n  std::shared_ptr<Upstream::MockClusterInfo> cluster_info_;\n};\n\nclass MockStreamDecoderFilterCallbacks : public StreamDecoderFilterCallbacks,\n                                         public MockStreamFilterCallbacksBase {\npublic:\n  MockStreamDecoderFilterCallbacks();\n  ~MockStreamDecoderFilterCallbacks() override;\n\n  // Http::StreamFilterCallbacks\n  MOCK_METHOD(const Network::Connection*, connection, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(void, resetStream, ());\n  MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, clusterInfo, ());\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, ());\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback&));\n  MOCK_METHOD(void, requestRouteConfigUpdate, (Http::RouteConfigUpdatedCallbackSharedPtr));\n  MOCK_METHOD(absl::optional<Router::ConfigConstSharedPtr>, routeConfig, ());\n  MOCK_METHOD(void, clearRouteCache, ());\n  MOCK_METHOD(uint64_t, streamId, (), (const));\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n  MOCK_METHOD(Tracing::Span&, activeSpan, ());\n  MOCK_METHOD(Tracing::Config&, tracingConfig, ());\n  MOCK_METHOD(const ScopeTrackedObject&, scope, ());\n  MOCK_METHOD(void, onDecoderFilterAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onDecoderFilterBelowWriteBufferLowWatermark, ());\n  MOCK_METHOD(void, addDownstreamWatermarkCallbacks, (DownstreamWatermarkCallbacks&));\n  MOCK_METHOD(void, removeDownstreamWatermarkCallbacks, (DownstreamWatermarkCallbacks&));\n  MOCK_METHOD(void, setDecoderBufferLimit, (uint32_t));\n  MOCK_METHOD(uint32_t, decoderBufferLimit, ());\n  MOCK_METHOD(bool, recreateStream, ());\n  MOCK_METHOD(void, addUpstreamSocketOptions, (const Network::Socket::OptionsSharedPtr& options));\n  MOCK_METHOD(Network::Socket::OptionsSharedPtr, getUpstreamSocketOptions, (), (const));\n\n  // Http::StreamDecoderFilterCallbacks\n  void sendLocalReply_(Code code, absl::string_view body,\n                       std::function<void(ResponseHeaderMap& headers)> modify_headers,\n                       const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n                       absl::string_view details);\n\n  void encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override {\n    encode100ContinueHeaders_(*headers);\n  }\n  void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream,\n                     absl::string_view details) override {\n    stream_info_.setResponseCodeDetails(details);\n    encodeHeaders_(*headers, end_stream);\n  }\n  void encodeTrailers(ResponseTrailerMapPtr&& trailers) override { encodeTrailers_(*trailers); }\n  void encodeMetadata(MetadataMapPtr&& metadata_map) override {\n    encodeMetadata_(std::move(metadata_map));\n  }\n  absl::string_view details() {\n    if (stream_info_.responseCodeDetails()) {\n      return stream_info_.responseCodeDetails().value();\n    }\n    return \"\";\n  }\n\n  MOCK_METHOD(void, continueDecoding, ());\n  MOCK_METHOD(void, addDecodedData, (Buffer::Instance & data, bool streaming));\n  MOCK_METHOD(void, injectDecodedDataToFilterChain, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(RequestTrailerMap&, addDecodedTrailers, ());\n  MOCK_METHOD(MetadataMapVector&, addDecodedMetadata, ());\n  MOCK_METHOD(const Buffer::Instance*, decodingBuffer, ());\n  MOCK_METHOD(void, modifyDecodingBuffer, (std::function<void(Buffer::Instance&)>));\n  MOCK_METHOD(void, encode100ContinueHeaders_, (HeaderMap & headers));\n  MOCK_METHOD(void, encodeHeaders_, (ResponseHeaderMap & headers, bool end_stream));\n  MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, encodeTrailers_, (ResponseTrailerMap & trailers));\n  MOCK_METHOD(void, encodeMetadata_, (MetadataMapPtr metadata_map));\n  MOCK_METHOD(void, sendLocalReply,\n              (Code code, absl::string_view body,\n               std::function<void(ResponseHeaderMap& headers)> modify_headers,\n               const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n               absl::string_view details));\n\n  Buffer::InstancePtr buffer_;\n  std::list<DownstreamWatermarkCallbacks*> callbacks_{};\n  testing::NiceMock<Tracing::MockSpan> active_span_;\n  testing::NiceMock<Tracing::MockConfig> tracing_config_;\n  testing::NiceMock<MockScopedTrackedObject> scope_;\n  bool is_grpc_request_{};\n  bool is_head_request_{false};\n  bool stream_destroyed_{};\n};\n\nclass MockStreamEncoderFilterCallbacks : public StreamEncoderFilterCallbacks,\n                                         public MockStreamFilterCallbacksBase {\npublic:\n  MockStreamEncoderFilterCallbacks();\n  ~MockStreamEncoderFilterCallbacks() override;\n\n  // Http::StreamFilterCallbacks\n  MOCK_METHOD(const Network::Connection*, connection, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(void, resetStream, ());\n  MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, clusterInfo, ());\n  MOCK_METHOD(void, requestRouteConfigUpdate, (std::function<void()>));\n  MOCK_METHOD(bool, canRequestRouteConfigUpdate, ());\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, ());\n  MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback&));\n  MOCK_METHOD(void, clearRouteCache, ());\n  MOCK_METHOD(uint64_t, streamId, (), (const));\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n  MOCK_METHOD(Tracing::Span&, activeSpan, ());\n  MOCK_METHOD(Tracing::Config&, tracingConfig, ());\n  MOCK_METHOD(const ScopeTrackedObject&, scope, ());\n  MOCK_METHOD(void, onEncoderFilterAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onEncoderFilterBelowWriteBufferLowWatermark, ());\n  MOCK_METHOD(void, setEncoderBufferLimit, (uint32_t));\n  MOCK_METHOD(uint32_t, encoderBufferLimit, ());\n\n  // Http::StreamEncoderFilterCallbacks\n  MOCK_METHOD(void, addEncodedData, (Buffer::Instance & data, bool streaming));\n  MOCK_METHOD(void, injectEncodedDataToFilterChain, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(ResponseTrailerMap&, addEncodedTrailers, ());\n  MOCK_METHOD(void, addEncodedMetadata, (Http::MetadataMapPtr &&));\n  MOCK_METHOD(void, continueEncoding, ());\n  MOCK_METHOD(const Buffer::Instance*, encodingBuffer, ());\n  MOCK_METHOD(void, modifyEncodingBuffer, (std::function<void(Buffer::Instance&)>));\n  MOCK_METHOD(void, sendLocalReply,\n              (Code code, absl::string_view body,\n               std::function<void(ResponseHeaderMap& headers)> modify_headers,\n               const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n               absl::string_view details));\n  MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ());\n\n  Buffer::InstancePtr buffer_;\n  testing::NiceMock<Tracing::MockSpan> active_span_;\n  testing::NiceMock<Tracing::MockConfig> tracing_config_;\n  testing::NiceMock<MockScopedTrackedObject> scope_;\n};\n\nclass MockStreamDecoderFilter : public StreamDecoderFilter {\npublic:\n  MockStreamDecoderFilter();\n  ~MockStreamDecoderFilter() override;\n\n  // Http::StreamFilterBase\n  MOCK_METHOD(void, onStreamComplete, ());\n  MOCK_METHOD(void, onDestroy, ());\n\n  // Http::StreamDecoderFilter\n  MOCK_METHOD(FilterHeadersStatus, decodeHeaders, (RequestHeaderMap & headers, bool end_stream));\n  MOCK_METHOD(FilterDataStatus, decodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(FilterTrailersStatus, decodeTrailers, (RequestTrailerMap & trailers));\n  MOCK_METHOD(FilterMetadataStatus, decodeMetadata, (Http::MetadataMap & metadata_map));\n  MOCK_METHOD(void, setDecoderFilterCallbacks, (StreamDecoderFilterCallbacks & callbacks));\n  MOCK_METHOD(void, decodeComplete, ());\n  MOCK_METHOD(void, sendLocalReply,\n              (bool is_grpc_request, Code code, absl::string_view body,\n               const std::function<void(ResponseHeaderMap& headers)>& modify_headers,\n               bool is_head_request, const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n               absl::string_view details));\n\n  Http::StreamDecoderFilterCallbacks* callbacks_{};\n};\n\nclass MockStreamEncoderFilter : public StreamEncoderFilter {\npublic:\n  MockStreamEncoderFilter();\n  ~MockStreamEncoderFilter() override;\n\n  // Http::StreamFilterBase\n  MOCK_METHOD(void, onStreamComplete, ());\n  MOCK_METHOD(void, onDestroy, ());\n\n  // Http::MockStreamEncoderFilter\n  MOCK_METHOD(FilterHeadersStatus, encode100ContinueHeaders, (ResponseHeaderMap & headers));\n  MOCK_METHOD(FilterHeadersStatus, encodeHeaders, (ResponseHeaderMap & headers, bool end_stream));\n  MOCK_METHOD(FilterDataStatus, encodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(FilterTrailersStatus, encodeTrailers, (ResponseTrailerMap & trailers));\n  MOCK_METHOD(FilterMetadataStatus, encodeMetadata, (MetadataMap & metadata_map));\n  MOCK_METHOD(void, setEncoderFilterCallbacks, (StreamEncoderFilterCallbacks & callbacks));\n  MOCK_METHOD(void, encodeComplete, ());\n\n  Http::StreamEncoderFilterCallbacks* callbacks_{};\n};\n\nclass MockStreamFilter : public StreamFilter {\npublic:\n  MockStreamFilter();\n  ~MockStreamFilter() override;\n\n  // Http::StreamFilterBase\n  MOCK_METHOD(void, onStreamComplete, ());\n  MOCK_METHOD(void, onDestroy, ());\n\n  // Http::StreamDecoderFilter\n  MOCK_METHOD(FilterHeadersStatus, decodeHeaders, (RequestHeaderMap & headers, bool end_stream));\n  MOCK_METHOD(FilterDataStatus, decodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(FilterTrailersStatus, decodeTrailers, (RequestTrailerMap & trailers));\n  MOCK_METHOD(FilterMetadataStatus, decodeMetadata, (Http::MetadataMap & metadata_map));\n  MOCK_METHOD(void, setDecoderFilterCallbacks, (StreamDecoderFilterCallbacks & callbacks));\n\n  // Http::MockStreamEncoderFilter\n  MOCK_METHOD(FilterHeadersStatus, encode100ContinueHeaders, (ResponseHeaderMap & headers));\n  MOCK_METHOD(FilterHeadersStatus, encodeHeaders, (ResponseHeaderMap & headers, bool end_stream));\n  MOCK_METHOD(FilterDataStatus, encodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(FilterTrailersStatus, encodeTrailers, (ResponseTrailerMap & trailers));\n  MOCK_METHOD(FilterMetadataStatus, encodeMetadata, (MetadataMap & metadata_map));\n  MOCK_METHOD(void, setEncoderFilterCallbacks, (StreamEncoderFilterCallbacks & callbacks));\n\n  Http::StreamDecoderFilterCallbacks* decoder_callbacks_{};\n  Http::StreamEncoderFilterCallbacks* encoder_callbacks_{};\n};\n\nclass MockAsyncClient : public AsyncClient {\npublic:\n  MockAsyncClient();\n  ~MockAsyncClient() override;\n\n  MOCK_METHOD(void, onRequestDestroy, ());\n\n  // Http::AsyncClient\n  Request* send(RequestMessagePtr&& request, Callbacks& callbacks,\n                const RequestOptions& args) override {\n    return send_(request, callbacks, args);\n  }\n\n  MOCK_METHOD(Request*, send_,\n              (RequestMessagePtr & request, Callbacks& callbacks, const RequestOptions& args));\n\n  MOCK_METHOD(Stream*, start, (StreamCallbacks & callbacks, const StreamOptions& args));\n\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n\n  NiceMock<Event::MockDispatcher> dispatcher_;\n};\n\nclass MockAsyncClientCallbacks : public AsyncClient::Callbacks {\npublic:\n  MockAsyncClientCallbacks();\n  ~MockAsyncClientCallbacks() override;\n\n  void onSuccess(const Http::AsyncClient::Request& request,\n                 ResponseMessagePtr&& response) override {\n    onSuccess_(request, response.get());\n  }\n\n  // Http::AsyncClient::Callbacks\n  MOCK_METHOD(void, onSuccess_, (const Http::AsyncClient::Request&, ResponseMessage*));\n  MOCK_METHOD(void, onFailure,\n              (const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason));\n  MOCK_METHOD(void, onBeforeFinalizeUpstreamSpan,\n              (Envoy::Tracing::Span&, const Http::ResponseHeaderMap*));\n};\n\nclass MockAsyncClientStreamCallbacks : public AsyncClient::StreamCallbacks {\npublic:\n  MockAsyncClientStreamCallbacks();\n  ~MockAsyncClientStreamCallbacks() override;\n\n  void onHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override {\n    onHeaders_(*headers, end_stream);\n  }\n  void onTrailers(ResponseTrailerMapPtr&& trailers) override { onTrailers_(*trailers); }\n\n  MOCK_METHOD(void, onHeaders_, (ResponseHeaderMap & headers, bool end_stream));\n  MOCK_METHOD(void, onData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, onTrailers_, (ResponseTrailerMap & headers));\n  MOCK_METHOD(void, onComplete, ());\n  MOCK_METHOD(void, onReset, ());\n};\n\nclass MockAsyncClientRequest : public AsyncClient::Request {\npublic:\n  MockAsyncClientRequest(MockAsyncClient* client);\n  ~MockAsyncClientRequest() override;\n\n  MOCK_METHOD(void, cancel, ());\n\n  MockAsyncClient* client_;\n};\n\nclass MockAsyncClientStream : public AsyncClient::Stream {\npublic:\n  MockAsyncClientStream();\n  ~MockAsyncClientStream() override;\n\n  MOCK_METHOD(void, sendHeaders, (RequestHeaderMap & headers, bool end_stream));\n  MOCK_METHOD(void, sendData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, sendTrailers, (RequestTrailerMap & trailers));\n  MOCK_METHOD(void, reset, ());\n  MOCK_METHOD(bool, isAboveWriteBufferHighWatermark, (), (const));\n};\n\nclass MockFilterChainFactoryCallbacks : public Http::FilterChainFactoryCallbacks {\npublic:\n  MockFilterChainFactoryCallbacks();\n  ~MockFilterChainFactoryCallbacks() override;\n\n  MOCK_METHOD(void, addStreamDecoderFilter, (Http::StreamDecoderFilterSharedPtr filter));\n  MOCK_METHOD(void, addStreamEncoderFilter, (Http::StreamEncoderFilterSharedPtr filter));\n  MOCK_METHOD(void, addStreamFilter, (Http::StreamFilterSharedPtr filter));\n  MOCK_METHOD(void, addAccessLogHandler, (AccessLog::InstanceSharedPtr handler));\n};\n\nclass MockDownstreamWatermarkCallbacks : public DownstreamWatermarkCallbacks {\npublic:\n  MOCK_METHOD(void, onAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onBelowWriteBufferLowWatermark, ());\n};\n\n} // namespace Http\n\nnamespace Http {\n\ntemplate <typename HeaderMapT>\nclass HeaderValueOfMatcherImpl : public testing::MatcherInterface<HeaderMapT> {\npublic:\n  explicit HeaderValueOfMatcherImpl(LowerCaseString key,\n                                    testing::Matcher<absl::string_view> matcher)\n      : key_(std::move(key)), matcher_(std::move(matcher)) {}\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  bool MatchAndExplain(HeaderMapT headers, testing::MatchResultListener* listener) const override {\n    // Get all headers with matching keys.\n    std::vector<absl::string_view> values;\n    Envoy::Http::HeaderMap::ConstIterateCb get_headers_cb =\n        [key = key_.get(), &values](const Envoy::Http::HeaderEntry& header) {\n          if (header.key().getStringView() == key) {\n            values.push_back(header.value().getStringView());\n          }\n          return Envoy::Http::HeaderMap::Iterate::Continue;\n        };\n    headers.iterate(get_headers_cb);\n\n    if (values.empty()) {\n      *listener << \"which has no '\" << key_.get() << \"' header\";\n      return false;\n    } else if (values.size() > 1) {\n      *listener << \"which has \" << values.size() << \" '\" << key_.get()\n                << \"' headers, with values: \" << absl::StrJoin(values, \", \");\n      return false;\n    }\n    absl::string_view value = values[0];\n    *listener << \"which has a '\" << key_.get() << \"' header with value \" << value << \" \";\n    return testing::ExplainMatchResult(matcher_, value, listener);\n  }\n\n  void DescribeTo(std::ostream* os) const override {\n    *os << \"has a '\" << key_.get() << \"' header with value that \"\n        << testing::DescribeMatcher<absl::string_view>(matcher_);\n  }\n\n  void DescribeNegationTo(std::ostream* os) const override {\n    *os << \"doesn't have a '\" << key_.get() << \"' header with value that \"\n        << testing::DescribeMatcher<absl::string_view>(matcher_);\n  }\n\nprivate:\n  const LowerCaseString key_;\n  const testing::Matcher<absl::string_view> matcher_;\n};\n\nclass HeaderValueOfMatcher {\npublic:\n  explicit HeaderValueOfMatcher(LowerCaseString key, testing::Matcher<absl::string_view> matcher)\n      : key_(std::move(key)), matcher_(std::move(matcher)) {}\n\n  // Produces a testing::Matcher that is parameterized by HeaderMap& or const\n  // HeaderMap& as requested. This is required since testing::Matcher<const T&>\n  // is not implicitly convertible to testing::Matcher<T&>.\n  template <typename HeaderMapT> operator testing::Matcher<HeaderMapT>() const {\n    return testing::Matcher<HeaderMapT>(new HeaderValueOfMatcherImpl<HeaderMapT>(key_, matcher_));\n  }\n\nprivate:\n  const LowerCaseString key_;\n  const testing::Matcher<absl::string_view> matcher_;\n};\n\n// Test that a HeaderMap argument contains exactly one header with the given\n// key, whose value satisfies the given expectation. The expectation can be a\n// matcher, or a string that the value should equal.\ntemplate <typename T, typename K> HeaderValueOfMatcher HeaderValueOf(K key, const T& matcher) {\n  return HeaderValueOfMatcher(LowerCaseString(key),\n                              testing::SafeMatcherCast<absl::string_view>(matcher));\n}\n\n// Tests the provided Envoy HeaderMap for the provided HTTP status code.\nMATCHER_P(HttpStatusIs, expected_code, \"\") {\n  const HeaderEntry* status = arg.Status();\n  if (status == nullptr) {\n    *result_listener << \"which has no status code\";\n    return false;\n  }\n  const absl::string_view code = status->value().getStringView();\n  if (code != absl::StrCat(expected_code)) {\n    *result_listener << \"which has status code \" << code;\n    return false;\n  }\n  return true;\n}\n\ninline HeaderMap::ConstIterateCb\nsaveHeaders(std::vector<std::pair<absl::string_view, absl::string_view>>* output) {\n  return [output](const HeaderEntry& header) {\n    output->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView()));\n    return HeaderMap::Iterate::Continue;\n  };\n}\n\ntemplate <typename HeaderMapT>\nclass IsSubsetOfHeadersMatcherImpl : public testing::MatcherInterface<HeaderMapT> {\npublic:\n  explicit IsSubsetOfHeadersMatcherImpl(const HeaderMap& expected_headers)\n      : expected_headers_(expected_headers) {}\n\n  IsSubsetOfHeadersMatcherImpl(IsSubsetOfHeadersMatcherImpl&& other) noexcept\n      : expected_headers_(other.expected_headers_) {}\n\n  IsSubsetOfHeadersMatcherImpl(const IsSubsetOfHeadersMatcherImpl& other)\n      : expected_headers_(other.expected_headers_) {}\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  bool MatchAndExplain(HeaderMapT headers, testing::MatchResultListener* listener) const override {\n    // Collect header maps into vectors, to use for IsSubsetOf.\n    std::vector<std::pair<absl::string_view, absl::string_view>> arg_headers_vec;\n    headers.iterate(saveHeaders(&arg_headers_vec));\n\n    std::vector<std::pair<absl::string_view, absl::string_view>> expected_headers_vec;\n    expected_headers_.iterate(saveHeaders(&expected_headers_vec));\n\n    return ExplainMatchResult(testing::IsSubsetOf(expected_headers_vec), arg_headers_vec, listener);\n  }\n\n  void DescribeTo(std::ostream* os) const override {\n    *os << \"is a subset of headers:\\n\" << expected_headers_;\n  }\n\n  const TestRequestHeaderMapImpl expected_headers_;\n};\n\nclass IsSubsetOfHeadersMatcher {\npublic:\n  IsSubsetOfHeadersMatcher(const HeaderMap& expected_headers)\n      : expected_headers_(expected_headers) {}\n\n  IsSubsetOfHeadersMatcher(IsSubsetOfHeadersMatcher&& other) noexcept\n      : expected_headers_(static_cast<const HeaderMap&>(other.expected_headers_)) {}\n\n  IsSubsetOfHeadersMatcher(const IsSubsetOfHeadersMatcher& other)\n      : expected_headers_(static_cast<const HeaderMap&>(other.expected_headers_)) {}\n\n  template <typename HeaderMapT> operator testing::Matcher<HeaderMapT>() const {\n    return testing::MakeMatcher(new IsSubsetOfHeadersMatcherImpl<HeaderMapT>(expected_headers_));\n  }\n\nprivate:\n  TestRequestHeaderMapImpl expected_headers_;\n};\n\nIsSubsetOfHeadersMatcher IsSubsetOfHeaders(const HeaderMap& expected_headers);\n\ntemplate <typename HeaderMapT>\nclass IsSupersetOfHeadersMatcherImpl : public testing::MatcherInterface<HeaderMapT> {\npublic:\n  explicit IsSupersetOfHeadersMatcherImpl(const HeaderMap& expected_headers)\n      : expected_headers_(expected_headers) {}\n\n  IsSupersetOfHeadersMatcherImpl(IsSupersetOfHeadersMatcherImpl&& other) noexcept\n      : expected_headers_(other.expected_headers_) {}\n\n  IsSupersetOfHeadersMatcherImpl(const IsSupersetOfHeadersMatcherImpl& other)\n      : expected_headers_(other.expected_headers_) {}\n\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  bool MatchAndExplain(HeaderMapT headers, testing::MatchResultListener* listener) const override {\n    // Collect header maps into vectors, to use for IsSupersetOf.\n    std::vector<std::pair<absl::string_view, absl::string_view>> arg_headers_vec;\n    headers.iterate(saveHeaders(&arg_headers_vec));\n\n    std::vector<std::pair<absl::string_view, absl::string_view>> expected_headers_vec;\n    expected_headers_.iterate(saveHeaders(&expected_headers_vec));\n\n    return ExplainMatchResult(testing::IsSupersetOf(expected_headers_vec), arg_headers_vec,\n                              listener);\n  }\n\n  void DescribeTo(std::ostream* os) const override {\n    *os << \"is a superset of headers:\\n\" << expected_headers_;\n  }\n\n  const TestRequestHeaderMapImpl expected_headers_;\n};\n\nclass IsSupersetOfHeadersMatcher {\npublic:\n  IsSupersetOfHeadersMatcher(const HeaderMap& expected_headers)\n      : expected_headers_(expected_headers) {}\n\n  IsSupersetOfHeadersMatcher(IsSupersetOfHeadersMatcher&& other) noexcept\n      : expected_headers_(static_cast<const HeaderMap&>(other.expected_headers_)) {}\n\n  IsSupersetOfHeadersMatcher(const IsSupersetOfHeadersMatcher& other)\n      : expected_headers_(static_cast<const HeaderMap&>(other.expected_headers_)) {}\n\n  template <typename HeaderMapT> operator testing::Matcher<HeaderMapT>() const {\n    return testing::MakeMatcher(new IsSupersetOfHeadersMatcherImpl<HeaderMapT>(expected_headers_));\n  }\n\nprivate:\n  TestRequestHeaderMapImpl expected_headers_;\n};\n\nIsSupersetOfHeadersMatcher IsSupersetOfHeaders(const HeaderMap& expected_headers);\n\n} // namespace Http\n\nMATCHER_P(HeaderMapEqual, rhs, \"\") {\n  const bool equal = (*arg == *rhs);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"header map:\") << \"\\n\"\n                     << *rhs << TestUtility::addLeftAndRightPadding(\"is not equal to:\") << \"\\n\"\n                     << *arg << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P(HeaderMapEqualRef, rhs, \"\") {\n  const bool equal = (arg == *rhs);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"header map:\") << \"\\n\"\n                     << *rhs << TestUtility::addLeftAndRightPadding(\"is not equal to:\") << \"\\n\"\n                     << arg << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\n// Test that a HeaderMapPtr argument includes a given key-value pair, e.g.,\n//  HeaderHasValue(\"Upgrade\", \"WebSocket\")\ntemplate <typename K, typename V>\ntesting::Matcher<const Http::HeaderMap*> HeaderHasValue(K key, V value) {\n  return testing::Pointee(Http::HeaderValueOf(key, value));\n}\n\n// Like HeaderHasValue, but matches against a HeaderMap& argument.\ntemplate <typename K, typename V> Http::HeaderValueOfMatcher HeaderHasValueRef(K key, V value) {\n  return Http::HeaderValueOf(key, value);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/mocks_test.cc",
    "content": "#include \"envoy/http/header_map.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nusing ::testing::_;\nusing ::testing::Not;\n\nnamespace Http {\nTEST(HeaderValueOfTest, ConstHeaderMap) {\n  const TestRequestHeaderMapImpl header_map{{\"key\", \"expected value\"}};\n\n  // Positive checks.\n  EXPECT_THAT(header_map, HeaderValueOf(\"key\", \"expected value\"));\n  EXPECT_THAT(header_map, HeaderValueOf(\"key\", _));\n\n  // Negative checks.\n  EXPECT_THAT(header_map, Not(HeaderValueOf(\"key\", \"other value\")));\n  EXPECT_THAT(header_map, Not(HeaderValueOf(\"other key\", _)));\n}\n\nTEST(HeaderValueOfTest, MutableHeaderMap) {\n  TestRequestHeaderMapImpl header_map;\n\n  // Negative checks.\n  EXPECT_THAT(header_map, Not(HeaderValueOf(\"key\", \"other value\")));\n  EXPECT_THAT(header_map, Not(HeaderValueOf(\"other key\", _)));\n\n  header_map.addCopy(\"key\", \"expected value\");\n\n  // Positive checks.\n  EXPECT_THAT(header_map, HeaderValueOf(\"key\", \"expected value\"));\n  EXPECT_THAT(header_map, HeaderValueOf(\"key\", _));\n}\n\nTEST(HeaderValueOfTest, LowerCaseString) {\n  TestRequestHeaderMapImpl header_map;\n  LowerCaseString key(\"key\");\n  LowerCaseString other_key(\"other_key\");\n\n  // Negative checks.\n  EXPECT_THAT(header_map, Not(HeaderValueOf(key, \"other value\")));\n  EXPECT_THAT(header_map, Not(HeaderValueOf(other_key, _)));\n\n  header_map.addCopy(key, \"expected value\");\n  header_map.addCopy(other_key, \"ValUe\");\n\n  // Positive checks.\n  EXPECT_THAT(header_map, HeaderValueOf(key, \"expected value\"));\n  EXPECT_THAT(header_map, HeaderValueOf(other_key, _));\n}\n\nTEST(HttpStatusIsTest, CheckStatus) {\n  TestResponseHeaderMapImpl header_map;\n  const auto status_matcher = HttpStatusIs(200);\n\n  EXPECT_THAT(header_map, Not(status_matcher));\n\n  header_map.addCopy(Headers::get().Status, \"200\");\n\n  EXPECT_THAT(header_map, status_matcher);\n}\n\nTEST(IsSubsetOfHeadersTest, ConstHeaderMap) {\n  const TestRequestHeaderMapImpl header_map{{\"first key\", \"1\"}};\n\n  EXPECT_THAT(header_map, IsSubsetOfHeaders(TestRequestHeaderMapImpl{{\"first key\", \"1\"}}));\n  EXPECT_THAT(header_map,\n              IsSubsetOfHeaders(TestRequestHeaderMapImpl{{\"first key\", \"1\"}, {\"second key\", \"2\"}}));\n\n  EXPECT_THAT(header_map, Not(IsSubsetOfHeaders(TestRequestHeaderMapImpl{{\"third key\", \"1\"}})));\n}\n\nTEST(IsSubsetOfHeadersTest, MutableHeaderMap) {\n  TestRequestHeaderMapImpl header_map;\n  header_map.addCopy(\"first key\", \"1\");\n\n  EXPECT_THAT(header_map, IsSubsetOfHeaders(TestRequestHeaderMapImpl{{\"first key\", \"1\"}}));\n  EXPECT_THAT(header_map,\n              IsSubsetOfHeaders(TestRequestHeaderMapImpl{{\"first key\", \"1\"}, {\"second key\", \"2\"}}));\n\n  EXPECT_THAT(header_map, Not(IsSubsetOfHeaders(TestRequestHeaderMapImpl{{\"third key\", \"1\"}})));\n}\n\nTEST(IsSupersetOfHeadersTest, ConstHeaderMap) {\n  const TestRequestHeaderMapImpl header_map{{\"first key\", \"1\"}, {\"second key\", \"2\"}};\n\n  EXPECT_THAT(header_map, IsSupersetOfHeaders(\n                              TestRequestHeaderMapImpl{{\"first key\", \"1\"}, {\"second key\", \"2\"}}));\n  EXPECT_THAT(header_map, IsSupersetOfHeaders(TestRequestHeaderMapImpl{{\"first key\", \"1\"}}));\n\n  EXPECT_THAT(header_map, Not(IsSupersetOfHeaders(TestRequestHeaderMapImpl{{\"third key\", \"1\"}})));\n}\n\nTEST(IsSupersetOfHeadersTest, MutableHeaderMap) {\n  TestRequestHeaderMapImpl header_map;\n  header_map.addCopy(\"first key\", \"1\");\n  header_map.addCopy(\"second key\", \"2\");\n\n  EXPECT_THAT(header_map, IsSupersetOfHeaders(\n                              TestRequestHeaderMapImpl{{\"first key\", \"1\"}, {\"second key\", \"2\"}}));\n  EXPECT_THAT(header_map, IsSupersetOfHeaders(TestRequestHeaderMapImpl{{\"first key\", \"1\"}}));\n\n  EXPECT_THAT(header_map, Not(IsSupersetOfHeaders(TestRequestHeaderMapImpl{{\"third key\", \"1\"}})));\n}\n} // namespace Http\n\nTEST(HeaderHasValueRefTest, MutableValueRef) {\n  Http::TestRequestHeaderMapImpl header_map;\n\n  EXPECT_THAT(header_map, Not(HeaderHasValueRef(\"key\", \"value\")));\n  EXPECT_THAT(header_map, Not(HeaderHasValueRef(\"other key\", \"value\")));\n\n  header_map.addCopy(\"key\", \"value\");\n\n  EXPECT_THAT(header_map, HeaderHasValueRef(\"key\", \"value\"));\n  EXPECT_THAT(header_map, Not(HeaderHasValueRef(\"key\", \"wrong value\")));\n}\n\nTEST(HeaderHasValueRefTest, ConstValueRef) {\n  const Http::TestRequestHeaderMapImpl header_map{{\"key\", \"expected value\"}};\n\n  EXPECT_THAT(header_map, Not(HeaderHasValueRef(\"key\", \"other value\")));\n  EXPECT_THAT(header_map, HeaderHasValueRef(\"key\", \"expected value\"));\n}\n\nTEST(HeaderHasValueRefTest, LowerCaseStringArguments) {\n  Http::LowerCaseString key(\"key\"), other_key(\"other key\");\n  Http::TestRequestHeaderMapImpl header_map;\n\n  EXPECT_THAT(header_map, Not(HeaderHasValueRef(key, \"value\")));\n  EXPECT_THAT(header_map, Not(HeaderHasValueRef(other_key, \"value\")));\n\n  header_map.addCopy(key, \"value\");\n\n  EXPECT_THAT(header_map, HeaderHasValueRef(key, \"value\"));\n  EXPECT_THAT(header_map, Not(HeaderHasValueRef(other_key, \"wrong value\")));\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/stream.cc",
    "content": "#include \"test/mocks/http/stream.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Http {\n\nMockStream::MockStream() {\n  ON_CALL(*this, addCallbacks(_)).WillByDefault(Invoke([this](StreamCallbacks& callbacks) -> void {\n    callbacks_.push_back(&callbacks);\n  }));\n\n  ON_CALL(*this, removeCallbacks(_))\n      .WillByDefault(\n          Invoke([this](StreamCallbacks& callbacks) -> void { callbacks_.remove(&callbacks); }));\n\n  ON_CALL(*this, resetStream(_)).WillByDefault(Invoke([this](StreamResetReason reason) -> void {\n    for (StreamCallbacks* callbacks : callbacks_) {\n      callbacks->onResetStream(reason, absl::string_view());\n    }\n  }));\n\n  ON_CALL(*this, connectionLocalAddress()).WillByDefault(ReturnRef(connection_local_address_));\n}\n\nMockStream::~MockStream() = default;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/stream.h",
    "content": "#pragma once\n\n#include \"envoy/http/codec.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass MockStream : public Stream {\npublic:\n  MockStream();\n  ~MockStream() override;\n\n  // Http::Stream\n  MOCK_METHOD(void, addCallbacks, (StreamCallbacks & callbacks));\n  MOCK_METHOD(void, removeCallbacks, (StreamCallbacks & callbacks));\n  MOCK_METHOD(void, resetStream, (StreamResetReason reason));\n  MOCK_METHOD(void, readDisable, (bool disable));\n  MOCK_METHOD(void, setWriteBufferWatermarks, (uint32_t, uint32_t));\n  MOCK_METHOD(uint32_t, bufferLimit, ());\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, connectionLocalAddress, ());\n  MOCK_METHOD(void, setFlushTimeout, (std::chrono::milliseconds timeout));\n\n  std::list<StreamCallbacks*> callbacks_{};\n  Network::Address::InstanceConstSharedPtr connection_local_address_;\n\n  void runHighWatermarkCallbacks() {\n    for (auto* callback : callbacks_) {\n      callback->onAboveWriteBufferHighWatermark();\n    }\n  }\n\n  void runLowWatermarkCallbacks() {\n    for (auto* callback : callbacks_) {\n      callback->onBelowWriteBufferLowWatermark();\n    }\n  }\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/stream_decoder.cc",
    "content": "#include \"test/mocks/http/stream_decoder.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Http {\n\nMockRequestDecoder::MockRequestDecoder() {\n  ON_CALL(*this, decodeHeaders_(_, _)).WillByDefault(Invoke([](RequestHeaderMapPtr& headers, bool) {\n    // Check to see that method is not-null. Path can be null for CONNECT and authority can be null\n    // at the codec level.\n    ASSERT_NE(nullptr, headers->Method());\n  }));\n}\nMockRequestDecoder::~MockRequestDecoder() = default;\n\nMockResponseDecoder::MockResponseDecoder() {\n  ON_CALL(*this, decodeHeaders_(_, _))\n      .WillByDefault(Invoke(\n          [](ResponseHeaderMapPtr& headers, bool) { ASSERT_NE(nullptr, headers->Status()); }));\n}\nMockResponseDecoder::~MockResponseDecoder() = default;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/stream_decoder.h",
    "content": "#pragma once\n#include \"envoy/http/codec.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass MockRequestDecoder : public RequestDecoder {\npublic:\n  MockRequestDecoder();\n  ~MockRequestDecoder() override;\n\n  void decodeMetadata(MetadataMapPtr&& metadata_map) override { decodeMetadata_(metadata_map); }\n\n  // Http::StreamDecoder\n  MOCK_METHOD(void, decodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, decodeMetadata_, (MetadataMapPtr & metadata_map));\n  MOCK_METHOD(void, sendLocalReply,\n              (bool is_grpc_request, Code code, absl::string_view body,\n               const std::function<void(ResponseHeaderMap& headers)>& modify_headers,\n               const absl::optional<Grpc::Status::GrpcStatus> grpc_status,\n               absl::string_view details));\n\n  void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override {\n    decodeHeaders_(headers, end_stream);\n  }\n  void decodeTrailers(RequestTrailerMapPtr&& trailers) override { decodeTrailers_(trailers); }\n\n  // Http::RequestDecoder\n  MOCK_METHOD(void, decodeHeaders_, (RequestHeaderMapPtr & headers, bool end_stream));\n  MOCK_METHOD(void, decodeTrailers_, (RequestTrailerMapPtr & trailers));\n};\n\nclass MockResponseDecoder : public ResponseDecoder {\npublic:\n  MockResponseDecoder();\n  ~MockResponseDecoder() override;\n\n  void decodeMetadata(MetadataMapPtr&& metadata_map) override { decodeMetadata_(metadata_map); }\n\n  // Http::StreamDecoder\n  MOCK_METHOD(void, decodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, decodeMetadata_, (MetadataMapPtr & metadata_map));\n\n  void decode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override {\n    decode100ContinueHeaders_(headers);\n  }\n  void decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override {\n    decodeHeaders_(headers, end_stream);\n  }\n  void decodeTrailers(ResponseTrailerMapPtr&& trailers) override { decodeTrailers_(trailers); }\n\n  // Http::ResponseDecoder\n  MOCK_METHOD(void, decode100ContinueHeaders_, (ResponseHeaderMapPtr & headers));\n  MOCK_METHOD(void, decodeHeaders_, (ResponseHeaderMapPtr & headers, bool end_stream));\n  MOCK_METHOD(void, decodeTrailers_, (ResponseTrailerMapPtr & trailers));\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/stream_encoder.cc",
    "content": "#include \"test/mocks/http/stream_encoder.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Http {\n\nMockHttp1StreamEncoderOptions::MockHttp1StreamEncoderOptions() = default;\nMockHttp1StreamEncoderOptions::~MockHttp1StreamEncoderOptions() = default;\n\nMockRequestEncoder::MockRequestEncoder() {\n  ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_));\n  ON_CALL(*this, encodeHeaders(_, _))\n      .WillByDefault(Invoke([](const RequestHeaderMap& headers, bool) {\n        // Check to see that method is not-null. Path can be null for CONNECT and authority can be\n        // null at the codec level.\n        ASSERT_NE(nullptr, headers.Method());\n      }));\n}\nMockRequestEncoder::~MockRequestEncoder() = default;\n\nMockResponseEncoder::MockResponseEncoder() {\n  ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_));\n  ON_CALL(*this, encodeHeaders(_, _))\n      .WillByDefault(Invoke([](const ResponseHeaderMap& headers, bool) {\n        // Check for passing request headers as response headers in a test.\n        ASSERT_NE(nullptr, headers.Status());\n      }));\n}\nMockResponseEncoder::~MockResponseEncoder() = default;\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/http/stream_encoder.h",
    "content": "#pragma once\n\n#include \"envoy/http/codec.h\"\n\n#include \"test/mocks/http/stream.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Http {\n\nclass MockHttp1StreamEncoderOptions : public Http1StreamEncoderOptions {\npublic:\n  MockHttp1StreamEncoderOptions();\n  ~MockHttp1StreamEncoderOptions() override;\n\n  MOCK_METHOD(void, disableChunkEncoding, ());\n};\n\nclass MockRequestEncoder : public RequestEncoder {\npublic:\n  MockRequestEncoder();\n  ~MockRequestEncoder() override;\n\n  // Http::RequestEncoder\n  MOCK_METHOD(void, encodeHeaders, (const RequestHeaderMap& headers, bool end_stream));\n  MOCK_METHOD(void, encodeTrailers, (const RequestTrailerMap& trailers));\n\n  // Http::StreamEncoder\n  MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, encodeMetadata, (const MetadataMapVector& metadata_map_vector));\n  MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ());\n  MOCK_METHOD(Stream&, getStream, (), ());\n\n  testing::NiceMock<MockStream> stream_;\n};\n\nclass MockResponseEncoder : public ResponseEncoder {\npublic:\n  MockResponseEncoder();\n  ~MockResponseEncoder() override;\n\n  // Http::ResponseEncoder\n  MOCK_METHOD(void, encode100ContinueHeaders, (const ResponseHeaderMap& headers));\n  MOCK_METHOD(void, encodeHeaders, (const ResponseHeaderMap& headers, bool end_stream));\n  MOCK_METHOD(void, encodeTrailers, (const ResponseTrailerMap& trailers));\n\n  // Http::StreamEncoder\n  MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, encodeMetadata, (const MetadataMapVector& metadata_map_vector));\n  MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ());\n  MOCK_METHOD(bool, streamErrorOnInvalidHttpMessage, (), (const));\n  MOCK_METHOD(Stream&, getStream, (), ());\n\n  testing::NiceMock<MockStream> stream_;\n};\n\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/init/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"init_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/init:manager_interface\",\n        \"//source/common/init:target_lib\",\n        \"//source/common/init:watcher_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/init/mocks.cc",
    "content": "#include \"test/mocks/init/mocks.h\"\n\nnamespace Envoy {\nnamespace Init {\n\nusing ::testing::Invoke;\n\nExpectableWatcherImpl::ExpectableWatcherImpl(absl::string_view name)\n    : WatcherImpl(name, {[this]() { ready(); }}) {}\n::testing::internal::TypedExpectation<void()>& ExpectableWatcherImpl::expectReady() const {\n  return EXPECT_CALL(*this, ready());\n}\n\nExpectableTargetImpl::ExpectableTargetImpl(absl::string_view name)\n    : TargetImpl(name, {[this]() { initialize(); }}) {}\n::testing::internal::TypedExpectation<void()>& ExpectableTargetImpl::expectInitialize() {\n  return EXPECT_CALL(*this, initialize());\n}\n::testing::internal::TypedExpectation<void()>&\nExpectableTargetImpl::expectInitializeWillCallReady() {\n  return expectInitialize().WillOnce(Invoke([this]() { ready(); }));\n}\n\nExpectableSharedTargetImpl::ExpectableSharedTargetImpl(absl::string_view name)\n    : ExpectableSharedTargetImpl(name, [this]() { initialize(); }) {}\nExpectableSharedTargetImpl::ExpectableSharedTargetImpl(absl::string_view name, InitializeFn fn)\n    : SharedTargetImpl(name, fn) {}\n::testing::internal::TypedExpectation<void()>& ExpectableSharedTargetImpl::expectInitialize() {\n  return EXPECT_CALL(*this, initialize());\n}\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/init/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/init/manager.h\"\n\n#include \"common/init/target_impl.h\"\n#include \"common/init/watcher_impl.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Init {\n\n/**\n * ExpectableWatcherImpl is a real WatcherImpl, subclassed to add a mock `ready` method that you can\n * set expectations on in tests. Tests should never want a watcher with different behavior than the\n * real implementation.\n */\nclass ExpectableWatcherImpl : public WatcherImpl {\npublic:\n  ExpectableWatcherImpl(absl::string_view name = \"test\");\n  MOCK_METHOD(void, ready, (), (const));\n\n  /**\n   * Convenience method to provide a shorthand for EXPECT_CALL(watcher, ready()). Can be chained,\n   * for example: watcher.expectReady().Times(0);\n   */\n  ::testing::internal::TypedExpectation<void()>& expectReady() const;\n};\n\n/**\n * ExpectableTargetImpl is a real TargetImpl, subclassed to add a mock `initialize` method that you\n * can set expectations on in tests. Tests should never want a target with a different behavior than\n * the real implementation.\n */\nclass ExpectableTargetImpl : public TargetImpl {\npublic:\n  ExpectableTargetImpl(absl::string_view name = \"test\");\n  MOCK_METHOD(void, initialize, ());\n\n  /**\n   * Convenience method to provide a shorthand for EXPECT_CALL(target, initialize()). Can be\n   * chained, for example: target.expectInitialize().Times(0);\n   */\n  ::testing::internal::TypedExpectation<void()>& expectInitialize();\n\n  /**\n   * Convenience method to provide a shorthand for expectInitialize() with mocked behavior of\n   * calling `ready` immediately.\n   */\n  ::testing::internal::TypedExpectation<void()>& expectInitializeWillCallReady();\n};\n\n/**\n * Borrow the idea from ExpectableTargetImpl. ExpectableSharedTargetImpl is a real SharedTargetImpl.\n */\nclass ExpectableSharedTargetImpl : public SharedTargetImpl {\npublic:\n  ExpectableSharedTargetImpl(absl::string_view name = \"test\");\n  ExpectableSharedTargetImpl(absl::string_view name, InitializeFn fn);\n  MOCK_METHOD(void, initialize, ());\n\n  ::testing::internal::TypedExpectation<void()>& expectInitialize();\n};\n\n/**\n * MockManager is a typical mock. In many cases, it won't be necessary to mock any of its methods.\n * In cases where its `add` and `initialize` methods are actually called in a test, it's usually\n * sufficient to mock `add` by saving the target argument locally, and to mock `initialize` by\n * invoking the saved target with the watcher argument.\n */\nstruct MockManager : Manager {\n  MOCK_METHOD(Manager::State, state, (), (const));\n  MOCK_METHOD(void, add, (const Target&));\n  MOCK_METHOD(void, initialize, (const Watcher&));\n  MOCK_METHOD((const absl::flat_hash_map<std::string, uint32_t>&), unreadyTargets, (), (const));\n  MOCK_METHOD(void, dumpUnreadyTargets, (envoy::admin::v3::UnreadyTargetsDumps&));\n};\n\n} // namespace Init\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/local_info/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"local_info_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//source/common/network:address_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/local_info/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"common/network/address_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace LocalInfo {\n\nMockLocalInfo::MockLocalInfo() : address_(new Network::Address::Ipv4Instance(\"127.0.0.1\")) {\n  node_.set_id(\"node_name\");\n  node_.set_cluster(\"cluster_name\");\n  node_.mutable_locality()->set_zone(\"zone_name\");\n  ON_CALL(*this, address()).WillByDefault(Return(address_));\n  ON_CALL(*this, zoneName()).WillByDefault(ReturnRef(node_.locality().zone()));\n  ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(node_.cluster()));\n  ON_CALL(*this, nodeName()).WillByDefault(ReturnRef(node_.id()));\n  ON_CALL(*this, node()).WillByDefault(ReturnRef(node_));\n}\n\nMockLocalInfo::~MockLocalInfo() = default;\n\n} // namespace LocalInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/local_info/mocks.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/local_info/local_info.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace LocalInfo {\n\nclass MockLocalInfo : public LocalInfo {\npublic:\n  MockLocalInfo();\n  ~MockLocalInfo() override;\n\n  MOCK_METHOD(Network::Address::InstanceConstSharedPtr, address, (), (const));\n  MOCK_METHOD(const std::string&, zoneName, (), (const));\n  MOCK_METHOD(const std::string&, clusterName, (), (const));\n  MOCK_METHOD(const std::string&, nodeName, (), (const));\n  MOCK_METHOD(envoy::config::core::v3::Node&, node, (), (const));\n\n  Network::Address::InstanceConstSharedPtr address_;\n  // TODO(htuch): Make this behave closer to the real implementation, with the various property\n  // methods using node_ as the source of truth.\n  envoy::config::core::v3::Node node_;\n};\n\n} // namespace LocalInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/local_reply/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"local_reply_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\"//source/common/local_reply:local_reply_lib\"],\n)\n"
  },
  {
    "path": "test/mocks/local_reply/mocks.cc",
    "content": "#include \"test/mocks/local_reply/mocks.h\"\n\nnamespace Envoy {\nnamespace LocalReply {\nMockLocalReply::MockLocalReply() = default;\nMockLocalReply::~MockLocalReply() = default;\n} // namespace LocalReply\n} // namespace Envoy"
  },
  {
    "path": "test/mocks/local_reply/mocks.h",
    "content": "#include \"common/local_reply/local_reply.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace LocalReply {\nclass MockLocalReply : public LocalReply {\npublic:\n  MockLocalReply();\n  ~MockLocalReply() override;\n\n  MOCK_METHOD(void, rewrite,\n              (const Http::RequestHeaderMap* request_headers,\n               Http::ResponseHeaderMap& response_headers, StreamInfo::StreamInfoImpl& stream_info,\n               Http::Code& code, std::string& body, absl::string_view& content_type),\n              (const));\n};\n} // namespace LocalReply\n} // namespace Envoy"
  },
  {
    "path": "test/mocks/network/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"connection_mocks\",\n    srcs = [\"connection.cc\"],\n    hdrs = [\"connection.h\"],\n    deps = [\n        \"//include/envoy/network:connection_interface\",\n        \"//source/common/network:filter_manager_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"io_handle_mocks\",\n    srcs = [\"io_handle.cc\"],\n    hdrs = [\"io_handle.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/network:io_handle_interface\",\n        \"//source/common/buffer:buffer_lib\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"socket_mocks\",\n    srcs = [\"socket.cc\"],\n    hdrs = [\"socket.h\"],\n    deps = [\n        \":io_handle_mocks\",\n        \"//include/envoy/network:socket_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"network_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \":connection_mocks\",\n        \":transport_socket_mocks\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/network:connection_interface\",\n        \"//include/envoy/network:drain_decision_interface\",\n        \"//include/envoy/network:filter_interface\",\n        \"//include/envoy/network:resolver_interface\",\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"transport_socket_mocks\",\n    srcs = [\"transport_socket.cc\"],\n    hdrs = [\"transport_socket.h\"],\n    deps = [\n        \"//include/envoy/network:transport_socket_interface\",\n        \"//source/common/network:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/network/connection.cc",
    "content": "#include \"test/mocks/network/connection.h\"\n\nusing testing::Const;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Network {\n\nMockConnectionCallbacks::MockConnectionCallbacks() = default;\nMockConnectionCallbacks::~MockConnectionCallbacks() = default;\n\nuint64_t MockConnectionBase::next_id_;\n\nvoid MockConnectionBase::raiseEvent(Network::ConnectionEvent event) {\n  if (event == Network::ConnectionEvent::RemoteClose ||\n      event == Network::ConnectionEvent::LocalClose) {\n    if (state_ == Connection::State::Closed) {\n      return;\n    }\n\n    state_ = Connection::State::Closed;\n  }\n\n  for (Network::ConnectionCallbacks* callbacks : callbacks_) {\n    callbacks->onEvent(event);\n  }\n}\n\nvoid MockConnectionBase::raiseBytesSentCallbacks(uint64_t num_bytes) {\n  for (Network::Connection::BytesSentCb& cb : bytes_sent_callbacks_) {\n    cb(num_bytes);\n  }\n}\n\nvoid MockConnectionBase::runHighWatermarkCallbacks() {\n  for (auto* callback : callbacks_) {\n    callback->onAboveWriteBufferHighWatermark();\n  }\n}\n\nvoid MockConnectionBase::runLowWatermarkCallbacks() {\n  for (auto* callback : callbacks_) {\n    callback->onBelowWriteBufferLowWatermark();\n  }\n}\n\ntemplate <class T> static void initializeMockConnection(T& connection) {\n  ON_CALL(connection, dispatcher()).WillByDefault(ReturnRef(connection.dispatcher_));\n  ON_CALL(connection, readEnabled()).WillByDefault(ReturnPointee(&connection.read_enabled_));\n  ON_CALL(connection, addConnectionCallbacks(_))\n      .WillByDefault(Invoke([&connection](Network::ConnectionCallbacks& callbacks) -> void {\n        connection.callbacks_.push_back(&callbacks);\n      }));\n  ON_CALL(connection, addBytesSentCallback(_))\n      .WillByDefault(Invoke([&connection](Network::Connection::BytesSentCb cb) {\n        connection.bytes_sent_callbacks_.emplace_back(cb);\n      }));\n  ON_CALL(connection, close(_)).WillByDefault(Invoke([&connection](ConnectionCloseType) -> void {\n    connection.raiseEvent(Network::ConnectionEvent::LocalClose);\n  }));\n  ON_CALL(connection, remoteAddress()).WillByDefault(ReturnRef(connection.remote_address_));\n  ON_CALL(connection, directRemoteAddress()).WillByDefault(ReturnRef(connection.remote_address_));\n  ON_CALL(connection, localAddress()).WillByDefault(ReturnRef(connection.local_address_));\n  ON_CALL(connection, id()).WillByDefault(Return(connection.next_id_));\n  ON_CALL(connection, state()).WillByDefault(ReturnPointee(&connection.state_));\n\n  // The real implementation will move the buffer data into the socket.\n  ON_CALL(connection, write(_, _)).WillByDefault(Invoke([](Buffer::Instance& buffer, bool) -> void {\n    buffer.drain(buffer.length());\n  }));\n\n  ON_CALL(connection, streamInfo()).WillByDefault(ReturnRef(connection.stream_info_));\n  ON_CALL(Const(connection), streamInfo()).WillByDefault(ReturnRef(connection.stream_info_));\n}\n\nMockConnection::MockConnection() {\n  remote_address_ = Utility::resolveUrl(\"tcp://10.0.0.3:50000\");\n  initializeMockConnection(*this);\n}\nMockConnection::~MockConnection() = default;\n\nMockClientConnection::MockClientConnection() {\n  remote_address_ = Utility::resolveUrl(\"tcp://10.0.0.1:443\");\n  local_address_ = Utility::resolveUrl(\"tcp://10.0.0.2:40000\");\n  initializeMockConnection(*this);\n}\n\nMockClientConnection::~MockClientConnection() = default;\n\nMockFilterManagerConnection::MockFilterManagerConnection() {\n  remote_address_ = Utility::resolveUrl(\"tcp://10.0.0.3:50000\");\n  initializeMockConnection(*this);\n\n  // The real implementation will move the buffer data into the socket.\n  ON_CALL(*this, rawWrite(_, _)).WillByDefault(Invoke([](Buffer::Instance& buffer, bool) -> void {\n    buffer.drain(buffer.length());\n  }));\n}\nMockFilterManagerConnection::~MockFilterManagerConnection() = default;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/connection.h",
    "content": "#pragma once\n\n#include <list>\n\n#include \"envoy/network/connection.h\"\n\n#include \"common/network/filter_manager_impl.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass MockConnectionCallbacks : public ConnectionCallbacks {\npublic:\n  MockConnectionCallbacks();\n  ~MockConnectionCallbacks() override;\n\n  // Network::ConnectionCallbacks\n  MOCK_METHOD(void, onEvent, (Network::ConnectionEvent event));\n  MOCK_METHOD(void, onAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onBelowWriteBufferLowWatermark, ());\n};\n\nclass MockConnectionBase {\npublic:\n  void raiseEvent(Network::ConnectionEvent event);\n  void raiseBytesSentCallbacks(uint64_t num_bytes);\n  void runHighWatermarkCallbacks();\n  void runLowWatermarkCallbacks();\n\n  static uint64_t next_id_;\n\n  testing::NiceMock<Event::MockDispatcher> dispatcher_;\n  std::list<Network::ConnectionCallbacks*> callbacks_;\n  std::list<Network::Connection::BytesSentCb> bytes_sent_callbacks_;\n  uint64_t id_{next_id_++};\n  Address::InstanceConstSharedPtr remote_address_;\n  Address::InstanceConstSharedPtr local_address_;\n  bool read_enabled_{true};\n  testing::NiceMock<StreamInfo::MockStreamInfo> stream_info_;\n  Connection::State state_{Connection::State::Open};\n};\n\nclass MockConnection : public Connection, public MockConnectionBase {\npublic:\n  MockConnection();\n  ~MockConnection() override;\n\n  // Network::Connection\n  MOCK_METHOD(void, addConnectionCallbacks, (ConnectionCallbacks & cb));\n  MOCK_METHOD(void, addBytesSentCallback, (BytesSentCb cb));\n  MOCK_METHOD(void, addWriteFilter, (WriteFilterSharedPtr filter));\n  MOCK_METHOD(void, addFilter, (FilterSharedPtr filter));\n  MOCK_METHOD(void, addReadFilter, (ReadFilterSharedPtr filter));\n  MOCK_METHOD(void, enableHalfClose, (bool enabled));\n  MOCK_METHOD(void, close, (ConnectionCloseType type));\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(uint64_t, id, (), (const));\n  MOCK_METHOD(void, hashKey, (std::vector<uint8_t>&), (const));\n  MOCK_METHOD(bool, initializeReadFilters, ());\n  MOCK_METHOD(std::string, nextProtocol, (), (const));\n  MOCK_METHOD(void, noDelay, (bool enable));\n  MOCK_METHOD(void, readDisable, (bool disable));\n  MOCK_METHOD(void, detectEarlyCloseWhenReadDisabled, (bool));\n  MOCK_METHOD(bool, readEnabled, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, remoteAddress, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, directRemoteAddress, (), (const));\n  MOCK_METHOD(absl::optional<Connection::UnixDomainSocketPeerCredentials>,\n              unixSocketPeerCredentials, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, localAddress, (), (const));\n  MOCK_METHOD(void, setConnectionStats, (const ConnectionStats& stats));\n  MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, ssl, (), (const));\n  MOCK_METHOD(absl::string_view, requestedServerName, (), (const));\n  MOCK_METHOD(State, state, (), (const));\n  MOCK_METHOD(void, write, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, setBufferLimits, (uint32_t limit));\n  MOCK_METHOD(uint32_t, bufferLimit, (), (const));\n  MOCK_METHOD(bool, localAddressRestored, (), (const));\n  MOCK_METHOD(bool, aboveHighWatermark, (), (const));\n  MOCK_METHOD(const Network::ConnectionSocket::OptionsSharedPtr&, socketOptions, (), (const));\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n  MOCK_METHOD(const StreamInfo::StreamInfo&, streamInfo, (), (const));\n  MOCK_METHOD(void, setDelayedCloseTimeout, (std::chrono::milliseconds));\n  MOCK_METHOD(absl::string_view, transportFailureReason, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, lastRoundTripTime, (), (const));\n};\n\n/**\n * NOTE: MockClientConnection duplicated most of MockConnection due to the fact that NiceMock\n *       cannot be reliably used on base class methods.\n */\nclass MockClientConnection : public ClientConnection, public MockConnectionBase {\npublic:\n  MockClientConnection();\n  ~MockClientConnection() override;\n\n  // Network::Connection\n  MOCK_METHOD(void, addConnectionCallbacks, (ConnectionCallbacks & cb));\n  MOCK_METHOD(void, addBytesSentCallback, (BytesSentCb cb));\n  MOCK_METHOD(void, addWriteFilter, (WriteFilterSharedPtr filter));\n  MOCK_METHOD(void, addFilter, (FilterSharedPtr filter));\n  MOCK_METHOD(void, addReadFilter, (ReadFilterSharedPtr filter));\n  MOCK_METHOD(void, enableHalfClose, (bool enabled));\n  MOCK_METHOD(void, close, (ConnectionCloseType type));\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(uint64_t, id, (), (const));\n  MOCK_METHOD(void, hashKey, (std::vector<uint8_t>&), (const));\n  MOCK_METHOD(bool, initializeReadFilters, ());\n  MOCK_METHOD(std::string, nextProtocol, (), (const));\n  MOCK_METHOD(void, noDelay, (bool enable));\n  MOCK_METHOD(void, readDisable, (bool disable));\n  MOCK_METHOD(void, detectEarlyCloseWhenReadDisabled, (bool));\n  MOCK_METHOD(bool, readEnabled, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, remoteAddress, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, directRemoteAddress, (), (const));\n  MOCK_METHOD(absl::optional<Connection::UnixDomainSocketPeerCredentials>,\n              unixSocketPeerCredentials, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, localAddress, (), (const));\n  MOCK_METHOD(void, setConnectionStats, (const ConnectionStats& stats));\n  MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, ssl, (), (const));\n  MOCK_METHOD(absl::string_view, requestedServerName, (), (const));\n  MOCK_METHOD(State, state, (), (const));\n  MOCK_METHOD(void, write, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, setBufferLimits, (uint32_t limit));\n  MOCK_METHOD(uint32_t, bufferLimit, (), (const));\n  MOCK_METHOD(bool, localAddressRestored, (), (const));\n  MOCK_METHOD(bool, aboveHighWatermark, (), (const));\n  MOCK_METHOD(const Network::ConnectionSocket::OptionsSharedPtr&, socketOptions, (), (const));\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n  MOCK_METHOD(const StreamInfo::StreamInfo&, streamInfo, (), (const));\n  MOCK_METHOD(void, setDelayedCloseTimeout, (std::chrono::milliseconds));\n  MOCK_METHOD(absl::string_view, transportFailureReason, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, lastRoundTripTime, (), (const));\n\n  // Network::ClientConnection\n  MOCK_METHOD(void, connect, ());\n};\n\n/**\n * NOTE: MockFilterManagerConnection duplicated most of MockConnection due to the fact that\n *       NiceMock cannot be reliably used on base class methods.\n */\nclass MockFilterManagerConnection : public FilterManagerConnection, public MockConnectionBase {\npublic:\n  MockFilterManagerConnection();\n  ~MockFilterManagerConnection() override;\n\n  // Network::Connection\n  MOCK_METHOD(void, addConnectionCallbacks, (ConnectionCallbacks & cb));\n  MOCK_METHOD(void, addBytesSentCallback, (BytesSentCb cb));\n  MOCK_METHOD(void, addWriteFilter, (WriteFilterSharedPtr filter));\n  MOCK_METHOD(void, addFilter, (FilterSharedPtr filter));\n  MOCK_METHOD(void, addReadFilter, (ReadFilterSharedPtr filter));\n  MOCK_METHOD(void, enableHalfClose, (bool enabled));\n  MOCK_METHOD(void, close, (ConnectionCloseType type));\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(uint64_t, id, (), (const));\n  MOCK_METHOD(void, hashKey, (std::vector<uint8_t>&), (const));\n  MOCK_METHOD(bool, initializeReadFilters, ());\n  MOCK_METHOD(std::string, nextProtocol, (), (const));\n  MOCK_METHOD(void, noDelay, (bool enable));\n  MOCK_METHOD(void, readDisable, (bool disable));\n  MOCK_METHOD(void, detectEarlyCloseWhenReadDisabled, (bool));\n  MOCK_METHOD(bool, readEnabled, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, remoteAddress, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, directRemoteAddress, (), (const));\n  MOCK_METHOD(absl::optional<Connection::UnixDomainSocketPeerCredentials>,\n              unixSocketPeerCredentials, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, localAddress, (), (const));\n  MOCK_METHOD(void, setConnectionStats, (const ConnectionStats& stats));\n  MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, ssl, (), (const));\n  MOCK_METHOD(absl::string_view, requestedServerName, (), (const));\n  MOCK_METHOD(State, state, (), (const));\n  MOCK_METHOD(void, write, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, setBufferLimits, (uint32_t limit));\n  MOCK_METHOD(uint32_t, bufferLimit, (), (const));\n  MOCK_METHOD(bool, localAddressRestored, (), (const));\n  MOCK_METHOD(bool, aboveHighWatermark, (), (const));\n  MOCK_METHOD(const Network::ConnectionSocket::OptionsSharedPtr&, socketOptions, (), (const));\n  MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ());\n  MOCK_METHOD(const StreamInfo::StreamInfo&, streamInfo, (), (const));\n  MOCK_METHOD(void, setDelayedCloseTimeout, (std::chrono::milliseconds));\n  MOCK_METHOD(absl::string_view, transportFailureReason, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, lastRoundTripTime, (), (const));\n\n  // Network::FilterManagerConnection\n  MOCK_METHOD(StreamBuffer, getReadBuffer, ());\n  MOCK_METHOD(StreamBuffer, getWriteBuffer, ());\n  MOCK_METHOD(void, rawWrite, (Buffer::Instance & data, bool end_stream));\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/io_handle.cc",
    "content": "#include \"test/mocks/network/io_handle.h\"\n\n#include \"envoy/network/address.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nMockIoHandle::MockIoHandle() = default;\nMockIoHandle::~MockIoHandle() = default;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/io_handle.h",
    "content": "#pragma once\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/network/io_handle.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass MockIoHandle : public IoHandle {\npublic:\n  MockIoHandle();\n  ~MockIoHandle() override;\n\n  Event::FileEventPtr createFileEvent(Event::Dispatcher& dispatcher, Event::FileReadyCb cb,\n                                      Event::FileTriggerType trigger, uint32_t events) override {\n    return Event::FileEventPtr{createFileEvent_(dispatcher, cb, trigger, events)};\n  }\n\n  MOCK_METHOD(os_fd_t, fdDoNotUse, (), (const));\n  MOCK_METHOD(Api::IoCallUint64Result, close, ());\n  MOCK_METHOD(bool, isOpen, (), (const));\n  MOCK_METHOD(Api::IoCallUint64Result, readv,\n              (uint64_t max_length, Buffer::RawSlice* slices, uint64_t num_slice));\n  MOCK_METHOD(Api::IoCallUint64Result, read, (Buffer::Instance & buffer, uint64_t max_length));\n  MOCK_METHOD(Api::IoCallUint64Result, writev,\n              (const Buffer::RawSlice* slices, uint64_t num_slice));\n  MOCK_METHOD(Api::IoCallUint64Result, write, (Buffer::Instance & buffer));\n  MOCK_METHOD(Api::IoCallUint64Result, sendmsg,\n              (const Buffer::RawSlice* slices, uint64_t num_slice, int flags,\n               const Address::Ip* self_ip, const Address::Instance& peer_address));\n  MOCK_METHOD(Api::IoCallUint64Result, recvmsg,\n              (Buffer::RawSlice * slices, const uint64_t num_slice, uint32_t self_port,\n               RecvMsgOutput& output));\n  MOCK_METHOD(Api::IoCallUint64Result, recvmmsg,\n              (RawSliceArrays & slices, uint32_t self_port, RecvMsgOutput& output));\n  MOCK_METHOD(Api::IoCallUint64Result, recv, (void* buffer, size_t length, int flags));\n  MOCK_METHOD(bool, supportsMmsg, (), (const));\n  MOCK_METHOD(bool, supportsUdpGro, (), (const));\n  MOCK_METHOD(Api::SysCallIntResult, bind, (Address::InstanceConstSharedPtr address));\n  MOCK_METHOD(Api::SysCallIntResult, listen, (int backlog));\n  MOCK_METHOD(IoHandlePtr, accept, (struct sockaddr * addr, socklen_t* addrlen));\n  MOCK_METHOD(Api::SysCallIntResult, connect, (Address::InstanceConstSharedPtr address));\n  MOCK_METHOD(Api::SysCallIntResult, setOption,\n              (int level, int optname, const void* optval, socklen_t optlen));\n  MOCK_METHOD(Api::SysCallIntResult, getOption,\n              (int level, int optname, void* optval, socklen_t* optlen));\n  MOCK_METHOD(Api::SysCallIntResult, setBlocking, (bool blocking));\n  MOCK_METHOD(absl::optional<int>, domain, ());\n  MOCK_METHOD(Address::InstanceConstSharedPtr, localAddress, ());\n  MOCK_METHOD(Address::InstanceConstSharedPtr, peerAddress, ());\n  MOCK_METHOD(Event::FileEvent*, createFileEvent_,\n              (Event::Dispatcher & dispatcher, Event::FileReadyCb cb,\n               Event::FileTriggerType trigger, uint32_t events));\n  MOCK_METHOD(Api::SysCallIntResult, shutdown, (int how));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, lastRoundTripTime, ());\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include <cstdint>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/server/listener_manager.h\"\n\n#include \"common/network/address_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Network {\n\nMockListenerConfig::MockListenerConfig()\n    : socket_(std::make_shared<testing::NiceMock<MockListenSocket>>()),\n      udp_listener_worker_router_(std::make_unique<UdpListenerWorkerRouterImpl>(1)) {\n  ON_CALL(*this, filterChainFactory()).WillByDefault(ReturnRef(filter_chain_factory_));\n  ON_CALL(*this, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_));\n  ON_CALL(socket_factory_, localAddress()).WillByDefault(ReturnRef(socket_->localAddress()));\n  ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(socket_));\n  ON_CALL(socket_factory_, sharedSocket())\n      .WillByDefault(Return(std::reference_wrapper<Socket>(*socket_)));\n  ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(scope_));\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, udpListenerWorkerRouter()).WillByDefault(Invoke([this]() {\n    return UdpListenerWorkerRouterOptRef(*udp_listener_worker_router_);\n  }));\n}\nMockListenerConfig::~MockListenerConfig() = default;\n\nMockActiveDnsQuery::MockActiveDnsQuery() = default;\nMockActiveDnsQuery::~MockActiveDnsQuery() = default;\n\nMockDnsResolver::MockDnsResolver() {\n  ON_CALL(*this, resolve(_, _, _)).WillByDefault(Return(&active_query_));\n}\n\nMockDnsResolver::~MockDnsResolver() = default;\n\nMockAddressResolver::MockAddressResolver() {\n  ON_CALL(*this, name()).WillByDefault(Return(\"envoy.mock.resolver\"));\n}\n\nMockAddressResolver::~MockAddressResolver() = default;\n\nMockReadFilterCallbacks::MockReadFilterCallbacks() {\n  ON_CALL(*this, connection()).WillByDefault(ReturnRef(connection_));\n  ON_CALL(*this, upstreamHost()).WillByDefault(ReturnPointee(&host_));\n  ON_CALL(*this, upstreamHost(_)).WillByDefault(SaveArg<0>(&host_));\n}\n\nMockReadFilterCallbacks::~MockReadFilterCallbacks() = default;\n\nMockReadFilter::MockReadFilter() {\n  ON_CALL(*this, onData(_, _)).WillByDefault(Return(FilterStatus::StopIteration));\n  EXPECT_CALL(*this, initializeReadFilterCallbacks(_))\n      .WillOnce(\n          Invoke([this](ReadFilterCallbacks& callbacks) -> void { callbacks_ = &callbacks; }));\n}\n\nMockReadFilter::~MockReadFilter() = default;\n\nMockWriteFilterCallbacks::MockWriteFilterCallbacks() {\n  ON_CALL(*this, connection()).WillByDefault(ReturnRef(connection_));\n}\n\nMockWriteFilterCallbacks::~MockWriteFilterCallbacks() = default;\n\nMockWriteFilter::MockWriteFilter() {\n  EXPECT_CALL(*this, initializeWriteFilterCallbacks(_))\n      .WillOnce(Invoke(\n          [this](WriteFilterCallbacks& callbacks) -> void { write_callbacks_ = &callbacks; }));\n}\nMockWriteFilter::~MockWriteFilter() = default;\n\nMockFilter::MockFilter() {\n  EXPECT_CALL(*this, initializeReadFilterCallbacks(_))\n      .WillOnce(\n          Invoke([this](ReadFilterCallbacks& callbacks) -> void { callbacks_ = &callbacks; }));\n  EXPECT_CALL(*this, initializeWriteFilterCallbacks(_))\n      .WillOnce(Invoke(\n          [this](WriteFilterCallbacks& callbacks) -> void { write_callbacks_ = &callbacks; }));\n}\n\nMockFilter::~MockFilter() = default;\n\nMockTcpListenerCallbacks::MockTcpListenerCallbacks() = default;\nMockTcpListenerCallbacks::~MockTcpListenerCallbacks() = default;\n\nMockUdpListenerCallbacks::MockUdpListenerCallbacks() = default;\nMockUdpListenerCallbacks::~MockUdpListenerCallbacks() = default;\n\nMockDrainDecision::MockDrainDecision() = default;\nMockDrainDecision::~MockDrainDecision() = default;\n\nMockListenerFilter::MockListenerFilter() = default;\nMockListenerFilter::~MockListenerFilter() { destroy_(); }\n\nMockListenerFilterCallbacks::MockListenerFilterCallbacks() {\n  ON_CALL(*this, socket()).WillByDefault(ReturnRef(socket_));\n}\nMockListenerFilterCallbacks::~MockListenerFilterCallbacks() = default;\n\nMockListenerFilterManager::MockListenerFilterManager() = default;\nMockListenerFilterManager::~MockListenerFilterManager() = default;\n\nMockFilterChain::MockFilterChain() = default;\nMockFilterChain::~MockFilterChain() = default;\n\nMockFilterChainManager::MockFilterChainManager() = default;\nMockFilterChainManager::~MockFilterChainManager() = default;\n\nMockFilterChainFactory::MockFilterChainFactory() {\n  ON_CALL(*this, createListenerFilterChain(_)).WillByDefault(Return(true));\n}\nMockFilterChainFactory::~MockFilterChainFactory() = default;\n\nMockListenSocket::MockListenSocket()\n    : io_handle_(std::make_unique<IoSocketHandleImpl>()),\n      local_address_(new Address::Ipv4Instance(80)) {\n  ON_CALL(*this, localAddress()).WillByDefault(ReturnRef(local_address_));\n  ON_CALL(*this, options()).WillByDefault(ReturnRef(options_));\n  ON_CALL(*this, ioHandle()).WillByDefault(ReturnRef(*io_handle_));\n  ON_CALL(testing::Const(*this), ioHandle()).WillByDefault(ReturnRef(*io_handle_));\n  ON_CALL(*this, close()).WillByDefault(Invoke([this]() { socket_is_open_ = false; }));\n  ON_CALL(testing::Const(*this), isOpen()).WillByDefault(Invoke([this]() {\n    return socket_is_open_;\n  }));\n  ON_CALL(*this, ipVersion()).WillByDefault(Return(local_address_->ip()->version()));\n}\n\nMockSocketOption::MockSocketOption() {\n  ON_CALL(*this, setOption(_, _)).WillByDefault(Return(true));\n}\n\nMockSocketOption::~MockSocketOption() = default;\n\nMockConnectionSocket::MockConnectionSocket()\n    : io_handle_(std::make_unique<IoSocketHandleImpl>()),\n      local_address_(new Address::Ipv4Instance(80)),\n      remote_address_(new Address::Ipv4Instance(80)) {\n  ON_CALL(*this, localAddress()).WillByDefault(ReturnRef(local_address_));\n  ON_CALL(*this, remoteAddress()).WillByDefault(ReturnRef(remote_address_));\n  ON_CALL(*this, directRemoteAddress()).WillByDefault(ReturnRef(remote_address_));\n  ON_CALL(*this, ioHandle()).WillByDefault(ReturnRef(*io_handle_));\n  ON_CALL(testing::Const(*this), ioHandle()).WillByDefault(ReturnRef(*io_handle_));\n  ON_CALL(*this, ipVersion()).WillByDefault(Return(local_address_->ip()->version()));\n}\n\nMockConnectionSocket::~MockConnectionSocket() = default;\n\nMockListener::MockListener() = default;\n\nMockListener::~MockListener() { onDestroy(); }\n\nMockConnectionHandler::MockConnectionHandler() = default;\nMockConnectionHandler::~MockConnectionHandler() = default;\n\nMockIp::MockIp() = default;\nMockIp::~MockIp() = default;\n\nMockResolvedAddress::MockResolvedAddress(const std::string& logical, const std::string& physical)\n    : logical_(logical), physical_(physical) {}\nMockResolvedAddress::~MockResolvedAddress() = default;\n\nMockTransportSocketCallbacks::MockTransportSocketCallbacks() {\n  ON_CALL(*this, connection()).WillByDefault(ReturnRef(connection_));\n}\nMockTransportSocketCallbacks::~MockTransportSocketCallbacks() = default;\n\nMockUdpPacketWriter::MockUdpPacketWriter() = default;\nMockUdpPacketWriter::~MockUdpPacketWriter() = default;\n\nMockUdpListener::MockUdpListener() {\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n}\n\nMockUdpListener::~MockUdpListener() { onDestroy(); }\n\nMockUdpReadFilterCallbacks::MockUdpReadFilterCallbacks() {\n  ON_CALL(*this, udpListener()).WillByDefault(ReturnRef(udp_listener_));\n}\n\nMockUdpReadFilterCallbacks::~MockUdpReadFilterCallbacks() = default;\n\nMockUdpListenerReadFilter::MockUdpListenerReadFilter(UdpReadFilterCallbacks& callbacks)\n    : UdpListenerReadFilter(callbacks) {}\nMockUdpListenerReadFilter::~MockUdpListenerReadFilter() = default;\n\nMockUdpListenerFilterManager::MockUdpListenerFilterManager() = default;\nMockUdpListenerFilterManager::~MockUdpListenerFilterManager() = default;\n\nMockConnectionBalancer::MockConnectionBalancer() = default;\nMockConnectionBalancer::~MockConnectionBalancer() = default;\n\nMockListenerFilterMatcher::MockListenerFilterMatcher() = default;\nMockListenerFilterMatcher::~MockListenerFilterMatcher() = default;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/mocks.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n#include <vector>\n\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/drain_decision.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/resolver.h\"\n#include \"envoy/network/transport_socket.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/network/filter_manager_impl.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/network/connection.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass MockActiveDnsQuery : public ActiveDnsQuery {\npublic:\n  MockActiveDnsQuery();\n  ~MockActiveDnsQuery() override;\n\n  // Network::ActiveDnsQuery\n  MOCK_METHOD(void, cancel, ());\n};\n\nclass MockDnsResolver : public DnsResolver {\npublic:\n  MockDnsResolver();\n  ~MockDnsResolver() override;\n\n  // Network::DnsResolver\n  MOCK_METHOD(ActiveDnsQuery*, resolve,\n              (const std::string& dns_name, DnsLookupFamily dns_lookup_family, ResolveCb callback));\n\n  testing::NiceMock<MockActiveDnsQuery> active_query_;\n};\n\nclass MockAddressResolver : public Address::Resolver {\npublic:\n  MockAddressResolver();\n  ~MockAddressResolver() override;\n\n  MOCK_METHOD(Address::InstanceConstSharedPtr, resolve,\n              (const envoy::config::core::v3::SocketAddress&));\n  MOCK_METHOD(std::string, name, (), (const));\n};\n\nclass MockReadFilterCallbacks : public ReadFilterCallbacks {\npublic:\n  MockReadFilterCallbacks();\n  ~MockReadFilterCallbacks() override;\n\n  MOCK_METHOD(Connection&, connection, ());\n  MOCK_METHOD(void, continueReading, ());\n  MOCK_METHOD(void, injectReadDataToFilterChain, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, upstreamHost, ());\n  MOCK_METHOD(void, upstreamHost, (Upstream::HostDescriptionConstSharedPtr host));\n\n  testing::NiceMock<MockConnection> connection_;\n  Upstream::HostDescriptionConstSharedPtr host_;\n};\n\nclass MockReadFilter : public ReadFilter {\npublic:\n  MockReadFilter();\n  ~MockReadFilter() override;\n\n  MOCK_METHOD(FilterStatus, onData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(FilterStatus, onNewConnection, ());\n  MOCK_METHOD(void, initializeReadFilterCallbacks, (ReadFilterCallbacks & callbacks));\n\n  ReadFilterCallbacks* callbacks_{};\n};\n\nclass MockWriteFilterCallbacks : public WriteFilterCallbacks {\npublic:\n  MockWriteFilterCallbacks();\n  ~MockWriteFilterCallbacks() override;\n\n  MOCK_METHOD(Connection&, connection, ());\n  MOCK_METHOD(void, injectWriteDataToFilterChain, (Buffer::Instance & data, bool end_stream));\n\n  testing::NiceMock<MockConnection> connection_;\n};\n\nclass MockWriteFilter : public WriteFilter {\npublic:\n  MockWriteFilter();\n  ~MockWriteFilter() override;\n\n  MOCK_METHOD(FilterStatus, onWrite, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, initializeWriteFilterCallbacks, (WriteFilterCallbacks & callbacks));\n\n  WriteFilterCallbacks* write_callbacks_{};\n};\n\nclass MockFilter : public Filter {\npublic:\n  MockFilter();\n  ~MockFilter() override;\n\n  MOCK_METHOD(FilterStatus, onData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(FilterStatus, onNewConnection, ());\n  MOCK_METHOD(FilterStatus, onWrite, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, initializeReadFilterCallbacks, (ReadFilterCallbacks & callbacks));\n  MOCK_METHOD(void, initializeWriteFilterCallbacks, (WriteFilterCallbacks & callbacks));\n\n  ReadFilterCallbacks* callbacks_{};\n  WriteFilterCallbacks* write_callbacks_{};\n};\n\nclass MockTcpListenerCallbacks : public TcpListenerCallbacks {\npublic:\n  MockTcpListenerCallbacks();\n  ~MockTcpListenerCallbacks() override;\n\n  void onAccept(ConnectionSocketPtr&& socket) override { onAccept_(socket); }\n\n  MOCK_METHOD(void, onAccept_, (ConnectionSocketPtr & socket));\n  MOCK_METHOD(void, onReject, ());\n};\n\nclass MockUdpListenerCallbacks : public UdpListenerCallbacks {\npublic:\n  MockUdpListenerCallbacks();\n  ~MockUdpListenerCallbacks() override;\n\n  MOCK_METHOD(void, onData, (UdpRecvData && data));\n  MOCK_METHOD(void, onReadReady, ());\n  MOCK_METHOD(void, onWriteReady, (const Socket& socket));\n  MOCK_METHOD(void, onReceiveError, (Api::IoError::IoErrorCode err));\n  MOCK_METHOD(Network::UdpPacketWriter&, udpPacketWriter, ());\n  MOCK_METHOD(uint32_t, workerIndex, (), (const));\n  MOCK_METHOD(void, onDataWorker, (Network::UdpRecvData && data));\n  MOCK_METHOD(void, post, (Network::UdpRecvData && data));\n};\n\nclass MockDrainDecision : public DrainDecision {\npublic:\n  MockDrainDecision();\n  ~MockDrainDecision() override;\n\n  MOCK_METHOD(bool, drainClose, (), (const));\n};\n\nclass MockListenerFilter : public ListenerFilter {\npublic:\n  MockListenerFilter();\n  ~MockListenerFilter() override;\n\n  MOCK_METHOD(void, destroy_, ());\n  MOCK_METHOD(Network::FilterStatus, onAccept, (ListenerFilterCallbacks&));\n};\n\nclass MockListenerFilterManager : public ListenerFilterManager {\npublic:\n  MockListenerFilterManager();\n  ~MockListenerFilterManager() override;\n\n  void addAcceptFilter(const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher,\n                       ListenerFilterPtr&& filter) override {\n    addAcceptFilter_(listener_filter_matcher, filter);\n  }\n\n  MOCK_METHOD(void, addAcceptFilter_,\n              (const Network::ListenerFilterMatcherSharedPtr&, Network::ListenerFilterPtr&));\n};\n\nclass MockFilterChain : public DrainableFilterChain {\npublic:\n  MockFilterChain();\n  ~MockFilterChain() override;\n\n  // Network::DrainableFilterChain\n  MOCK_METHOD(const TransportSocketFactory&, transportSocketFactory, (), (const));\n  MOCK_METHOD(const std::vector<FilterFactoryCb>&, networkFilterFactories, (), (const));\n  MOCK_METHOD(void, startDraining, ());\n};\n\nclass MockFilterChainManager : public FilterChainManager {\npublic:\n  MockFilterChainManager();\n  ~MockFilterChainManager() override;\n\n  // Network::FilterChainManager\n  MOCK_METHOD(const FilterChain*, findFilterChain, (const ConnectionSocket& socket), (const));\n};\n\nclass MockFilterChainFactory : public FilterChainFactory {\npublic:\n  MockFilterChainFactory();\n  ~MockFilterChainFactory() override;\n\n  MOCK_METHOD(bool, createNetworkFilterChain,\n              (Connection & connection,\n               const std::vector<Network::FilterFactoryCb>& filter_factories));\n  MOCK_METHOD(bool, createListenerFilterChain, (ListenerFilterManager & listener));\n  MOCK_METHOD(void, createUdpListenerFilterChain,\n              (UdpListenerFilterManager & listener, UdpReadFilterCallbacks& callbacks));\n};\n\nclass MockListenSocket : public Socket {\npublic:\n  MockListenSocket();\n  ~MockListenSocket() override = default;\n\n  void addOption(const Socket::OptionConstSharedPtr& option) override { addOption_(option); }\n  void addOptions(const Socket::OptionsSharedPtr& options) override { addOptions_(options); }\n\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, localAddress, (), (const));\n  MOCK_METHOD(void, setLocalAddress, (const Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(IoHandle&, ioHandle, ());\n  MOCK_METHOD(const IoHandle&, ioHandle, (), (const));\n  MOCK_METHOD(Socket::Type, socketType, (), (const));\n  MOCK_METHOD(Address::Type, addressType, (), (const));\n  MOCK_METHOD(absl::optional<Address::IpVersion>, ipVersion, (), (const));\n  MOCK_METHOD(void, close, ());\n  MOCK_METHOD(bool, isOpen, (), (const));\n  MOCK_METHOD(void, addOption_, (const Socket::OptionConstSharedPtr& option));\n  MOCK_METHOD(void, addOptions_, (const Socket::OptionsSharedPtr& options));\n  MOCK_METHOD(const OptionsSharedPtr&, options, (), (const));\n  MOCK_METHOD(IoHandlePtr, socket, (Socket::Type, Address::Type, Address::IpVersion), (const));\n  MOCK_METHOD(IoHandlePtr, socketForAddrPtr, (Socket::Type, const Address::InstanceConstSharedPtr),\n              (const));\n  MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr));\n  MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr));\n  MOCK_METHOD(Api::SysCallIntResult, listen, (int));\n  MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t));\n  MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*), (const));\n  MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool));\n\n  IoHandlePtr io_handle_;\n  Address::InstanceConstSharedPtr local_address_;\n  OptionsSharedPtr options_;\n  bool socket_is_open_ = true;\n};\n\nclass MockSocketOption : public Socket::Option {\npublic:\n  MockSocketOption();\n  ~MockSocketOption() override;\n\n  MOCK_METHOD(bool, setOption, (Socket&, envoy::config::core::v3::SocketOption::SocketState state),\n              (const));\n  MOCK_METHOD(void, hashKey, (std::vector<uint8_t>&), (const));\n  MOCK_METHOD(absl::optional<Socket::Option::Details>, getOptionDetails,\n              (const Socket&, envoy::config::core::v3::SocketOption::SocketState state), (const));\n};\n\nclass MockConnectionSocket : public ConnectionSocket {\npublic:\n  MockConnectionSocket();\n  ~MockConnectionSocket() override;\n\n  void addOption(const Socket::OptionConstSharedPtr& option) override { addOption_(option); }\n  void addOptions(const Socket::OptionsSharedPtr& options) override { addOptions_(options); }\n\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, localAddress, (), (const));\n  MOCK_METHOD(void, setLocalAddress, (const Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(void, restoreLocalAddress, (const Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(bool, localAddressRestored, (), (const));\n  MOCK_METHOD(void, setRemoteAddress, (const Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, remoteAddress, (), (const));\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, directRemoteAddress, (), (const));\n  MOCK_METHOD(void, setDetectedTransportProtocol, (absl::string_view));\n  MOCK_METHOD(absl::string_view, detectedTransportProtocol, (), (const));\n  MOCK_METHOD(void, setRequestedApplicationProtocols, (const std::vector<absl::string_view>&));\n  MOCK_METHOD(const std::vector<std::string>&, requestedApplicationProtocols, (), (const));\n  MOCK_METHOD(void, setRequestedServerName, (absl::string_view));\n  MOCK_METHOD(absl::string_view, requestedServerName, (), (const));\n  MOCK_METHOD(void, addOption_, (const Socket::OptionConstSharedPtr&));\n  MOCK_METHOD(void, addOptions_, (const Socket::OptionsSharedPtr&));\n  MOCK_METHOD(const Network::ConnectionSocket::OptionsSharedPtr&, options, (), (const));\n  MOCK_METHOD(IoHandle&, ioHandle, ());\n  MOCK_METHOD(const IoHandle&, ioHandle, (), (const));\n  MOCK_METHOD(Socket::Type, socketType, (), (const));\n  MOCK_METHOD(Address::Type, addressType, (), (const));\n  MOCK_METHOD(absl::optional<Address::IpVersion>, ipVersion, (), (const));\n  MOCK_METHOD(void, close, ());\n  MOCK_METHOD(bool, isOpen, (), (const));\n  MOCK_METHOD(IoHandlePtr, socket, (Socket::Type, Address::Type, Address::IpVersion), (const));\n  MOCK_METHOD(IoHandlePtr, socketForAddrPtr, (Socket::Type, const Address::InstanceConstSharedPtr),\n              (const));\n  MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr));\n  MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr));\n  MOCK_METHOD(Api::SysCallIntResult, listen, (int));\n  MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t));\n  MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*), (const));\n  MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, lastRoundTripTime, ());\n\n  IoHandlePtr io_handle_;\n  Address::InstanceConstSharedPtr local_address_;\n  Address::InstanceConstSharedPtr remote_address_;\n  bool is_closed_;\n};\n\nclass MockListenerFilterCallbacks : public ListenerFilterCallbacks {\npublic:\n  MockListenerFilterCallbacks();\n  ~MockListenerFilterCallbacks() override;\n\n  MOCK_METHOD(ConnectionSocket&, socket, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(void, continueFilterChain, (bool));\n  MOCK_METHOD(void, setDynamicMetadata, (const std::string&, const ProtobufWkt::Struct&));\n  MOCK_METHOD(envoy::config::core::v3::Metadata&, dynamicMetadata, ());\n  MOCK_METHOD(const envoy::config::core::v3::Metadata&, dynamicMetadata, (), (const));\n\n  NiceMock<MockConnectionSocket> socket_;\n};\n\nclass MockListenSocketFactory : public ListenSocketFactory {\npublic:\n  MockListenSocketFactory() = default;\n\n  MOCK_METHOD(Network::Socket::Type, socketType, (), (const));\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, localAddress, (), (const));\n  MOCK_METHOD(Network::SocketSharedPtr, getListenSocket, ());\n  MOCK_METHOD(SocketOptRef, sharedSocket, (), (const));\n};\n\nclass MockUdpPacketWriterFactory : public UdpPacketWriterFactory {\npublic:\n  MockUdpPacketWriterFactory() = default;\n\n  MOCK_METHOD(Network::UdpPacketWriterPtr, createUdpPacketWriter,\n              (Network::IoHandle&, Stats::Scope&), ());\n};\n\nclass MockListenerConfig : public ListenerConfig {\npublic:\n  MockListenerConfig();\n  ~MockListenerConfig() override;\n\n  MOCK_METHOD(FilterChainManager&, filterChainManager, ());\n  MOCK_METHOD(FilterChainFactory&, filterChainFactory, ());\n  MOCK_METHOD(ListenSocketFactory&, listenSocketFactory, ());\n  MOCK_METHOD(bool, bindToPort, ());\n  MOCK_METHOD(bool, handOffRestoredDestinationConnections, (), (const));\n  MOCK_METHOD(uint32_t, perConnectionBufferLimitBytes, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, listenerFiltersTimeout, (), (const));\n  MOCK_METHOD(bool, continueOnListenerFiltersTimeout, (), (const));\n  MOCK_METHOD(Stats::Scope&, listenerScope, ());\n  MOCK_METHOD(uint64_t, listenerTag, (), (const));\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(Network::ActiveUdpListenerFactory*, udpListenerFactory, ());\n  MOCK_METHOD(Network::UdpPacketWriterFactoryOptRef, udpPacketWriterFactory, ());\n  MOCK_METHOD(Network::UdpListenerWorkerRouterOptRef, udpListenerWorkerRouter, ());\n  MOCK_METHOD(ConnectionBalancer&, connectionBalancer, ());\n  MOCK_METHOD(ResourceLimit&, openConnections, ());\n  MOCK_METHOD(uint32_t, tcpBacklogSize, (), (const));\n  MOCK_METHOD(Init::Manager&, initManager, ());\n\n  envoy::config::core::v3::TrafficDirection direction() const override {\n    return envoy::config::core::v3::UNSPECIFIED;\n  }\n\n  const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n    return empty_access_logs_;\n  }\n\n  testing::NiceMock<MockFilterChainFactory> filter_chain_factory_;\n  MockListenSocketFactory socket_factory_;\n  SocketSharedPtr socket_;\n  UdpListenerWorkerRouterPtr udp_listener_worker_router_;\n  Stats::IsolatedStoreImpl scope_;\n  std::string name_;\n  const std::vector<AccessLog::InstanceSharedPtr> empty_access_logs_;\n};\n\nclass MockListener : public Listener {\npublic:\n  MockListener();\n  ~MockListener() override;\n\n  MOCK_METHOD(void, onDestroy, ());\n  MOCK_METHOD(void, enable, ());\n  MOCK_METHOD(void, disable, ());\n};\n\nclass MockConnectionHandler : public ConnectionHandler {\npublic:\n  MockConnectionHandler();\n  ~MockConnectionHandler() override;\n\n  MOCK_METHOD(uint64_t, numConnections, (), (const));\n  MOCK_METHOD(void, incNumConnections, ());\n  MOCK_METHOD(void, decNumConnections, ());\n  MOCK_METHOD(void, addListener,\n              (absl::optional<uint64_t> overridden_listener, ListenerConfig& config));\n  MOCK_METHOD(void, removeListeners, (uint64_t listener_tag));\n  MOCK_METHOD(UdpListenerCallbacksOptRef, getUdpListenerCallbacks, (uint64_t listener_tag));\n  MOCK_METHOD(void, removeFilterChains,\n              (uint64_t listener_tag, const std::list<const Network::FilterChain*>& filter_chains,\n               std::function<void()> completion));\n  MOCK_METHOD(void, stopListeners, (uint64_t listener_tag));\n  MOCK_METHOD(void, stopListeners, ());\n  MOCK_METHOD(void, disableListeners, ());\n  MOCK_METHOD(void, enableListeners, ());\n  MOCK_METHOD(const std::string&, statPrefix, (), (const));\n};\n\nclass MockIp : public Address::Ip {\npublic:\n  MockIp();\n  ~MockIp() override;\n\n  MOCK_METHOD(const std::string&, addressAsString, (), (const));\n  MOCK_METHOD(bool, isAnyAddress, (), (const));\n  MOCK_METHOD(bool, isUnicastAddress, (), (const));\n  MOCK_METHOD(const Address::Ipv4*, ipv4, (), (const));\n  MOCK_METHOD(const Address::Ipv6*, ipv6, (), (const));\n  MOCK_METHOD(uint32_t, port, (), (const));\n  MOCK_METHOD(Address::IpVersion, version, (), (const));\n  MOCK_METHOD(bool, v6only, (), (const));\n};\n\nclass MockResolvedAddress : public Address::Instance {\npublic:\n  MockResolvedAddress(const std::string& logical, const std::string& physical);\n  ~MockResolvedAddress() override;\n\n  bool operator==(const Address::Instance& other) const override {\n    return asString() == other.asString();\n  }\n\n  MOCK_METHOD(Api::SysCallIntResult, bind, (os_fd_t), (const));\n  MOCK_METHOD(Api::SysCallIntResult, connect, (os_fd_t), (const));\n  MOCK_METHOD(const Address::Ip*, ip, (), (const));\n  MOCK_METHOD(const Address::Pipe*, pipe, (), (const));\n  MOCK_METHOD(Address::EnvoyInternalAddress*, envoyInternalAddress, (), (const));\n  MOCK_METHOD(IoHandlePtr, socket, (Socket::Type), (const));\n  MOCK_METHOD(Address::Type, type, (), (const));\n  MOCK_METHOD(const sockaddr*, sockAddr, (), (const));\n  MOCK_METHOD(socklen_t, sockAddrLen, (), (const));\n\n  const std::string& asString() const override { return physical_; }\n  absl::string_view asStringView() const override { return physical_; }\n  const std::string& logicalName() const override { return logical_; }\n  const Network::SocketInterface& socketInterface() const override {\n    return SocketInterfaceSingleton::get();\n  }\n\n  const std::string logical_;\n  const std::string physical_;\n};\n\nclass MockTransportSocketCallbacks : public TransportSocketCallbacks {\npublic:\n  MockTransportSocketCallbacks();\n  ~MockTransportSocketCallbacks() override;\n\n  MOCK_METHOD(IoHandle&, ioHandle, ());\n  MOCK_METHOD(const IoHandle&, ioHandle, (), (const));\n  MOCK_METHOD(Connection&, connection, ());\n  MOCK_METHOD(bool, shouldDrainReadBuffer, ());\n  MOCK_METHOD(void, setReadBufferReady, ());\n  MOCK_METHOD(void, raiseEvent, (ConnectionEvent));\n  MOCK_METHOD(void, flushWriteBuffer, ());\n\n  testing::NiceMock<MockConnection> connection_;\n};\n\nclass MockUdpPacketWriter : public UdpPacketWriter {\npublic:\n  MockUdpPacketWriter();\n  ~MockUdpPacketWriter() override;\n\n  MOCK_METHOD(Api::IoCallUint64Result, writePacket,\n              (const Buffer::Instance& buffer, const Address::Ip* local_ip,\n               const Address::Instance& peer_address));\n  MOCK_METHOD(bool, isWriteBlocked, (), (const));\n  MOCK_METHOD(void, setWritable, ());\n  MOCK_METHOD(uint64_t, getMaxPacketSize, (const Address::Instance& peer_address), (const));\n  MOCK_METHOD(bool, isBatchMode, (), (const));\n  MOCK_METHOD(Network::UdpPacketWriterBuffer, getNextWriteLocation,\n              (const Address::Ip* local_ip, const Address::Instance& peer_address));\n  MOCK_METHOD(Api::IoCallUint64Result, flush, ());\n};\n\nclass MockUdpListener : public UdpListener {\npublic:\n  MockUdpListener();\n  ~MockUdpListener() override;\n\n  MOCK_METHOD(void, onDestroy, ());\n  MOCK_METHOD(void, enable, ());\n  MOCK_METHOD(void, disable, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(Address::InstanceConstSharedPtr&, localAddress, (), (const));\n  MOCK_METHOD(Api::IoCallUint64Result, send, (const UdpSendData&));\n  MOCK_METHOD(Api::IoCallUint64Result, flush, ());\n  MOCK_METHOD(void, activateRead, ());\n\n  Event::MockDispatcher dispatcher_;\n};\n\nclass MockUdpReadFilterCallbacks : public UdpReadFilterCallbacks {\npublic:\n  MockUdpReadFilterCallbacks();\n  ~MockUdpReadFilterCallbacks() override;\n\n  MOCK_METHOD(UdpListener&, udpListener, ());\n\n  testing::NiceMock<MockUdpListener> udp_listener_;\n};\n\nclass MockUdpListenerReadFilter : public UdpListenerReadFilter {\npublic:\n  MockUdpListenerReadFilter(UdpReadFilterCallbacks& callbacks);\n  ~MockUdpListenerReadFilter() override;\n\n  MOCK_METHOD(void, onData, (UdpRecvData&));\n};\n\nclass MockUdpListenerFilterManager : public UdpListenerFilterManager {\npublic:\n  MockUdpListenerFilterManager();\n  ~MockUdpListenerFilterManager() override;\n\n  void addReadFilter(UdpListenerReadFilterPtr&& filter) override { addReadFilter_(filter); }\n\n  MOCK_METHOD(void, addReadFilter_, (Network::UdpListenerReadFilterPtr&));\n};\n\nclass MockConnectionBalancer : public ConnectionBalancer {\npublic:\n  MockConnectionBalancer();\n  ~MockConnectionBalancer() override;\n\n  MOCK_METHOD(void, registerHandler, (BalancedConnectionHandler & handler));\n  MOCK_METHOD(void, unregisterHandler, (BalancedConnectionHandler & handler));\n  MOCK_METHOD(BalancedConnectionHandler&, pickTargetHandler,\n              (BalancedConnectionHandler & current_handler));\n};\n\nclass MockListenerFilterMatcher : public ListenerFilterMatcher {\npublic:\n  MockListenerFilterMatcher();\n  ~MockListenerFilterMatcher() override;\n  MOCK_METHOD(bool, matches, (Network::ListenerFilterCallbacks & cb), (const));\n};\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/socket.cc",
    "content": "#include \"socket.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nMockSocket::MockSocket() : io_handle_(std::make_unique<MockIoHandle>()) {}\n\nMockSocket::~MockSocket() = default;\n\nIoHandle& MockSocket::ioHandle() { return *io_handle_; };\n\nconst IoHandle& MockSocket::ioHandle() const { return *io_handle_; };\n\nApi::SysCallIntResult MockSocket::setSocketOption(int level, int optname, const void* optval,\n                                                  socklen_t len) {\n  return io_handle_->setOption(level, optname, optval, len);\n}\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/socket.h",
    "content": "#pragma once\n\n#include \"envoy/network/socket.h\"\n\n#include \"test/mocks/network/io_handle.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Network {\n\nclass MockSocket : public Socket {\npublic:\n  MockSocket();\n  ~MockSocket() override;\n\n  IoHandle& ioHandle() override;\n  const IoHandle& ioHandle() const override;\n  Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval,\n                                        socklen_t len) override;\n\n  MOCK_METHOD(const Address::InstanceConstSharedPtr&, localAddress, (), (const, override));\n  MOCK_METHOD(void, setLocalAddress, (const Address::InstanceConstSharedPtr&), (override));\n  MOCK_METHOD(Socket::Type, socketType, (), (const, override));\n  MOCK_METHOD(Address::Type, addressType, (), (const, override));\n  MOCK_METHOD(absl::optional<Address::IpVersion>, ipVersion, (), (const, override));\n  MOCK_METHOD(void, close, (), (override));\n  MOCK_METHOD(bool, isOpen, (), (const, override));\n  MOCK_METHOD(const OptionsSharedPtr&, options, (), (const, override));\n  MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr), (override));\n  MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr), (override));\n  MOCK_METHOD(Api::SysCallIntResult, listen, (int), (override));\n  MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*),\n              (const, override));\n  MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool), (override));\n  MOCK_METHOD(void, addOption, (const Socket::OptionConstSharedPtr&), (override));\n  MOCK_METHOD(void, addOptions, (const Socket::OptionsSharedPtr&), (override));\n\n  const std::unique_ptr<MockIoHandle> io_handle_;\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/transport_socket.cc",
    "content": "#include \"transport_socket.h\"\n\n#include <cstdint>\n\n#include \"envoy/buffer/buffer.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Network {\n\nMockTransportSocket::MockTransportSocket() {\n  ON_CALL(*this, setTransportSocketCallbacks(_))\n      .WillByDefault(Invoke([&](TransportSocketCallbacks& callbacks) { callbacks_ = &callbacks; }));\n}\nMockTransportSocket::~MockTransportSocket() = default;\n\nMockTransportSocketFactory::MockTransportSocketFactory() = default;\nMockTransportSocketFactory::~MockTransportSocketFactory() = default;\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/network/transport_socket.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <list>\n#include <string>\n#include <vector>\n\n#include \"envoy/network/transport_socket.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Network {\nclass MockTransportSocket : public TransportSocket {\npublic:\n  MockTransportSocket();\n  ~MockTransportSocket() override;\n\n  MOCK_METHOD(bool, implementsSecureTransport, (), (const));\n  MOCK_METHOD(void, setTransportSocketCallbacks, (TransportSocketCallbacks & callbacks));\n  MOCK_METHOD(std::string, protocol, (), (const));\n  MOCK_METHOD(absl::string_view, failureReason, (), (const));\n  MOCK_METHOD(bool, canFlushClose, ());\n  MOCK_METHOD(void, closeSocket, (Network::ConnectionEvent event));\n  MOCK_METHOD(IoResult, doRead, (Buffer::Instance & buffer));\n  MOCK_METHOD(IoResult, doWrite, (Buffer::Instance & buffer, bool end_stream));\n  MOCK_METHOD(void, onConnected, ());\n  MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, ssl, (), (const));\n\n  TransportSocketCallbacks* callbacks_{};\n};\n\nclass MockTransportSocketFactory : public TransportSocketFactory {\npublic:\n  MockTransportSocketFactory();\n  ~MockTransportSocketFactory() override;\n\n  MOCK_METHOD(bool, implementsSecureTransport, (), (const));\n  MOCK_METHOD(TransportSocketPtr, createTransportSocket, (TransportSocketOptionsSharedPtr),\n              (const));\n};\n\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/protobuf/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"protobuf_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/protobuf:message_validator_interface\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/protobuf/mocks.cc",
    "content": "#include \"test/mocks/protobuf/mocks.h\"\n\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace ProtobufMessage {\n\nMockValidationVisitor::MockValidationVisitor() = default;\n\nMockValidationVisitor::~MockValidationVisitor() = default;\n\nMockValidationContext::MockValidationContext() {\n  ON_CALL(*this, staticValidationVisitor()).WillByDefault(ReturnRef(static_validation_visitor_));\n  ON_CALL(*this, dynamicValidationVisitor()).WillByDefault(ReturnRef(dynamic_validation_visitor_));\n}\n\nMockValidationContext::~MockValidationContext() = default;\n\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/protobuf/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/protobuf/message_validator.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace ProtobufMessage {\n\nclass MockValidationVisitor : public ValidationVisitor {\npublic:\n  MockValidationVisitor();\n  ~MockValidationVisitor() override;\n\n  MOCK_METHOD(void, onUnknownField, (absl::string_view));\n  MOCK_METHOD(void, onDeprecatedField, (absl::string_view, bool));\n\n  bool skipValidation() override { return skip_validation_; }\n\n  void setSkipValidation(bool s) { skip_validation_ = s; }\n\nprivate:\n  bool skip_validation_ = false;\n};\n\nclass MockValidationContext : public ValidationContext {\npublic:\n  MockValidationContext();\n  ~MockValidationContext() override;\n\n  MOCK_METHOD(ValidationVisitor&, staticValidationVisitor, ());\n  MOCK_METHOD(ValidationVisitor&, dynamicValidationVisitor, ());\n\n  MockValidationVisitor static_validation_visitor_;\n  MockValidationVisitor dynamic_validation_visitor_;\n};\n\n} // namespace ProtobufMessage\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/ratelimit/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"ratelimit_mocks\",\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/ratelimit:ratelimit_interface\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/ratelimit/mocks.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/ratelimit/ratelimit.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace RateLimit {\n\ninline bool operator==(const RateLimitOverride& lhs, const RateLimitOverride& rhs) {\n  return lhs.requests_per_unit_ == rhs.requests_per_unit_ && lhs.unit_ == rhs.unit_;\n}\n\ninline bool operator==(const DescriptorEntry& lhs, const DescriptorEntry& rhs) {\n  return lhs.key_ == rhs.key_ && lhs.value_ == rhs.value_;\n}\n\ninline bool operator==(const Descriptor& lhs, const Descriptor& rhs) {\n  return lhs.entries_ == rhs.entries_ && lhs.limit_ == rhs.limit_;\n}\n\n} // namespace RateLimit\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/redis/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n"
  },
  {
    "path": "test/mocks/router/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"router_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/json:json_object_interface\",\n        \"//include/envoy/local_info:local_info_interface\",\n        \"//include/envoy/router:route_config_provider_manager_interface\",\n        \"//include/envoy/router:router_interface\",\n        \"//include/envoy/router:router_ratelimit_interface\",\n        \"//include/envoy/router:scopes_interface\",\n        \"//include/envoy/router:shadow_writer_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"router_filter_interface\",\n    srcs = [\"router_filter_interface.cc\"],\n    hdrs = [\"router_filter_interface.h\"],\n    deps = [\n        \"//source/common/router:router_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/router/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include <chrono>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::DoAll;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Router {\n\nMockDirectResponseEntry::MockDirectResponseEntry() = default;\nMockDirectResponseEntry::~MockDirectResponseEntry() = default;\n\nTestRetryPolicy::TestRetryPolicy() { num_retries_ = 1; }\n\nTestRetryPolicy::~TestRetryPolicy() = default;\n\nMockInternalRedirectPolicy::MockInternalRedirectPolicy() {\n  ON_CALL(*this, enabled()).WillByDefault(Return(false));\n}\n\nMockRetryState::MockRetryState() = default;\n\nvoid MockRetryState::expectHeadersRetry() {\n  EXPECT_CALL(*this, shouldRetryHeaders(_, _))\n      .WillOnce(DoAll(SaveArg<1>(&callback_), Return(RetryStatus::Yes)));\n}\n\nvoid MockRetryState::expectHedgedPerTryTimeoutRetry() {\n  EXPECT_CALL(*this, shouldHedgeRetryPerTryTimeout(_))\n      .WillOnce(DoAll(SaveArg<0>(&callback_), Return(RetryStatus::Yes)));\n}\n\nvoid MockRetryState::expectResetRetry() {\n  EXPECT_CALL(*this, shouldRetryReset(_, _))\n      .WillOnce(DoAll(SaveArg<1>(&callback_), Return(RetryStatus::Yes)));\n}\n\nMockRetryState::~MockRetryState() = default;\n\nMockRateLimitPolicyEntry::MockRateLimitPolicyEntry() {\n  ON_CALL(*this, disableKey()).WillByDefault(ReturnRef(disable_key_));\n}\n\nMockRateLimitPolicyEntry::~MockRateLimitPolicyEntry() = default;\n\nMockRateLimitPolicy::MockRateLimitPolicy() {\n  ON_CALL(*this, getApplicableRateLimit(_)).WillByDefault(ReturnRef(rate_limit_policy_entry_));\n  ON_CALL(*this, empty()).WillByDefault(Return(true));\n}\n\nMockRateLimitPolicy::~MockRateLimitPolicy() = default;\n\nMockShadowWriter::MockShadowWriter() = default;\nMockShadowWriter::~MockShadowWriter() = default;\n\nMockVirtualHost::MockVirtualHost() {\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, rateLimitPolicy()).WillByDefault(ReturnRef(rate_limit_policy_));\n}\n\nMockVirtualHost::~MockVirtualHost() = default;\n\nMockHashPolicy::MockHashPolicy() = default;\nMockHashPolicy::~MockHashPolicy() = default;\n\nMockMetadataMatchCriteria::MockMetadataMatchCriteria() = default;\nMockMetadataMatchCriteria::~MockMetadataMatchCriteria() = default;\n\nMockTlsContextMatchCriteria::MockTlsContextMatchCriteria() = default;\nMockTlsContextMatchCriteria::~MockTlsContextMatchCriteria() = default;\n\nMockPathMatchCriterion::MockPathMatchCriterion() {\n  ON_CALL(*this, matchType()).WillByDefault(ReturnPointee(&type_));\n  ON_CALL(*this, matcher()).WillByDefault(ReturnPointee(&matcher_));\n}\n\nMockPathMatchCriterion::~MockPathMatchCriterion() = default;\n\nMockRouteEntry::MockRouteEntry() {\n  ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_));\n  ON_CALL(*this, opaqueConfig()).WillByDefault(ReturnRef(opaque_config_));\n  ON_CALL(*this, rateLimitPolicy()).WillByDefault(ReturnRef(rate_limit_policy_));\n  ON_CALL(*this, retryPolicy()).WillByDefault(ReturnRef(retry_policy_));\n  ON_CALL(*this, internalRedirectPolicy()).WillByDefault(ReturnRef(internal_redirect_policy_));\n  ON_CALL(*this, retryShadowBufferLimit())\n      .WillByDefault(Return(std::numeric_limits<uint32_t>::max()));\n  ON_CALL(*this, shadowPolicies()).WillByDefault(ReturnRef(shadow_policies_));\n  ON_CALL(*this, timeout()).WillByDefault(Return(std::chrono::milliseconds(10)));\n  ON_CALL(*this, virtualCluster(_)).WillByDefault(Return(&virtual_cluster_));\n  ON_CALL(*this, virtualHost()).WillByDefault(ReturnRef(virtual_host_));\n  ON_CALL(*this, includeVirtualHostRateLimits()).WillByDefault(Return(true));\n  ON_CALL(*this, pathMatchCriterion()).WillByDefault(ReturnRef(path_match_criterion_));\n  ON_CALL(*this, metadata()).WillByDefault(ReturnRef(metadata_));\n  ON_CALL(*this, upgradeMap()).WillByDefault(ReturnRef(upgrade_map_));\n  ON_CALL(*this, hedgePolicy()).WillByDefault(ReturnRef(hedge_policy_));\n  ON_CALL(*this, routeName()).WillByDefault(ReturnRef(route_name_));\n  ON_CALL(*this, connectConfig()).WillByDefault(ReturnRef(connect_config_));\n}\n\nMockRouteEntry::~MockRouteEntry() = default;\n\nMockConfig::MockConfig() : route_(new NiceMock<MockRoute>()) {\n  ON_CALL(*this, route(_, _, _)).WillByDefault(Return(route_));\n  ON_CALL(*this, route(_, _, _, _)).WillByDefault(Return(route_));\n  ON_CALL(*this, internalOnlyHeaders()).WillByDefault(ReturnRef(internal_only_headers_));\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, usesVhds()).WillByDefault(Return(false));\n}\n\nMockConfig::~MockConfig() = default;\n\nMockDecorator::MockDecorator() {\n  ON_CALL(*this, getOperation()).WillByDefault(ReturnRef(operation_));\n  ON_CALL(*this, propagate()).WillByDefault(Return(true));\n}\nMockDecorator::~MockDecorator() = default;\n\nMockRouteTracing::MockRouteTracing() = default;\nMockRouteTracing::~MockRouteTracing() = default;\n\nMockRoute::MockRoute() {\n  ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_));\n  ON_CALL(*this, decorator()).WillByDefault(Return(&decorator_));\n  ON_CALL(*this, tracingConfig()).WillByDefault(Return(nullptr));\n}\nMockRoute::~MockRoute() = default;\n\nMockRouteConfigProvider::MockRouteConfigProvider() {\n  ON_CALL(*this, config()).WillByDefault(Return(route_config_));\n}\nMockRouteConfigProvider::~MockRouteConfigProvider() = default;\n\nMockRouteConfigProviderManager::MockRouteConfigProviderManager() = default;\nMockRouteConfigProviderManager::~MockRouteConfigProviderManager() = default;\n\nMockScopedConfig::MockScopedConfig() {\n  ON_CALL(*this, getRouteConfig(_)).WillByDefault(Return(route_config_));\n}\nMockScopedConfig::~MockScopedConfig() = default;\n\nMockScopedRouteConfigProvider::MockScopedRouteConfigProvider()\n    : config_(std::make_shared<MockScopedConfig>()) {\n  ON_CALL(*this, getConfig()).WillByDefault(Return(config_));\n  ON_CALL(*this, apiType()).WillByDefault(Return(ApiType::Delta));\n}\nMockScopedRouteConfigProvider::~MockScopedRouteConfigProvider() = default;\n\nMockGenericConnectionPoolCallbacks::MockGenericConnectionPoolCallbacks() {\n  ON_CALL(*this, upstreamToDownstream()).WillByDefault(ReturnRef(upstream_to_downstream_));\n}\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/router/mocks.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/config/config_provider.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h\"\n#include \"envoy/http/hash_policy.h\"\n#include \"envoy/local_info/local_info.h\"\n#include \"envoy/router/rds.h\"\n#include \"envoy/router/route_config_provider_manager.h\"\n#include \"envoy/router/router.h\"\n#include \"envoy/router/router_ratelimit.h\"\n#include \"envoy/router/scopes.h\"\n#include \"envoy/router/shadow_writer.h\"\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/stream_info/filter_state.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/global.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Router {\nusing ::testing::NiceMock;\n\nclass MockDirectResponseEntry : public DirectResponseEntry {\npublic:\n  MockDirectResponseEntry();\n  ~MockDirectResponseEntry() override;\n\n  // DirectResponseEntry\n  MOCK_METHOD(void, finalizeResponseHeaders,\n              (Http::ResponseHeaderMap & headers, const StreamInfo::StreamInfo& stream_info),\n              (const));\n  MOCK_METHOD(std::string, newPath, (const Http::RequestHeaderMap& headers), (const));\n  MOCK_METHOD(void, rewritePathHeader,\n              (Http::RequestHeaderMap & headers, bool insert_envoy_original_path), (const));\n  MOCK_METHOD(Http::Code, responseCode, (), (const));\n  MOCK_METHOD(const std::string&, responseBody, (), (const));\n  MOCK_METHOD(const std::string&, routeName, (), (const));\n};\n\nclass TestCorsPolicy : public CorsPolicy {\npublic:\n  // Router::CorsPolicy\n  const std::vector<Matchers::StringMatcherPtr>& allowOrigins() const override {\n    return allow_origins_;\n  };\n  const std::string& allowMethods() const override { return allow_methods_; };\n  const std::string& allowHeaders() const override { return allow_headers_; };\n  const std::string& exposeHeaders() const override { return expose_headers_; };\n  const std::string& maxAge() const override { return max_age_; };\n  const absl::optional<bool>& allowCredentials() const override { return allow_credentials_; };\n  bool enabled() const override { return enabled_; };\n  bool shadowEnabled() const override { return shadow_enabled_; };\n\n  std::vector<Matchers::StringMatcherPtr> allow_origins_;\n  std::string allow_methods_;\n  std::string allow_headers_;\n  std::string expose_headers_;\n  std::string max_age_{};\n  absl::optional<bool> allow_credentials_;\n  bool enabled_{};\n  bool shadow_enabled_{};\n};\n\nclass TestHedgePolicy : public HedgePolicy {\npublic:\n  // Router::HedgePolicy\n  uint32_t initialRequests() const override { return initial_requests_; }\n  const envoy::type::v3::FractionalPercent& additionalRequestChance() const override {\n    return additional_request_chance_;\n  }\n  bool hedgeOnPerTryTimeout() const override { return hedge_on_per_try_timeout_; }\n\n  uint32_t initial_requests_{};\n  envoy::type::v3::FractionalPercent additional_request_chance_{};\n  bool hedge_on_per_try_timeout_{};\n};\n\nclass TestRetryPolicy : public RetryPolicy {\npublic:\n  TestRetryPolicy();\n  ~TestRetryPolicy() override;\n\n  // Router::RetryPolicy\n  std::chrono::milliseconds perTryTimeout() const override { return per_try_timeout_; }\n  uint32_t numRetries() const override { return num_retries_; }\n  uint32_t retryOn() const override { return retry_on_; }\n  MOCK_METHOD(std::vector<Upstream::RetryHostPredicateSharedPtr>, retryHostPredicates, (), (const));\n  MOCK_METHOD(Upstream::RetryPrioritySharedPtr, retryPriority, (), (const));\n  uint32_t hostSelectionMaxAttempts() const override { return host_selection_max_attempts_; }\n  const std::vector<uint32_t>& retriableStatusCodes() const override {\n    return retriable_status_codes_;\n  }\n  const std::vector<Http::HeaderMatcherSharedPtr>& retriableHeaders() const override {\n    return retriable_headers_;\n  }\n  const std::vector<Http::HeaderMatcherSharedPtr>& retriableRequestHeaders() const override {\n    return retriable_request_headers_;\n  }\n\n  absl::optional<std::chrono::milliseconds> baseInterval() const override { return base_interval_; }\n  absl::optional<std::chrono::milliseconds> maxInterval() const override { return max_interval_; }\n  std::chrono::milliseconds resetMaxInterval() const override { return reset_max_interval_; }\n  const std::vector<ResetHeaderParserSharedPtr>& resetHeaders() const override {\n    return reset_headers_;\n  }\n\n  std::chrono::milliseconds per_try_timeout_{0};\n  uint32_t num_retries_{};\n  uint32_t retry_on_{};\n  uint32_t host_selection_max_attempts_;\n  std::vector<uint32_t> retriable_status_codes_;\n  std::vector<Http::HeaderMatcherSharedPtr> retriable_headers_;\n  std::vector<Http::HeaderMatcherSharedPtr> retriable_request_headers_;\n  absl::optional<std::chrono::milliseconds> base_interval_{};\n  absl::optional<std::chrono::milliseconds> max_interval_{};\n  std::vector<ResetHeaderParserSharedPtr> reset_headers_{};\n  std::chrono::milliseconds reset_max_interval_{300000};\n};\n\nclass MockInternalRedirectPolicy : public InternalRedirectPolicy {\npublic:\n  MockInternalRedirectPolicy();\n  MOCK_METHOD(bool, enabled, (), (const));\n  MOCK_METHOD(bool, shouldRedirectForResponseCode, (const Http::Code& response_code), (const));\n  MOCK_METHOD(std::vector<InternalRedirectPredicateSharedPtr>, predicates, (), (const));\n  MOCK_METHOD(uint32_t, maxInternalRedirects, (), (const));\n  MOCK_METHOD(bool, isCrossSchemeRedirectAllowed, (), (const));\n};\n\nclass MockInternalRedirectPredicate : public InternalRedirectPredicate {\npublic:\n  MOCK_METHOD(bool, acceptTargetRoute, (StreamInfo::FilterState&, absl::string_view, bool, bool));\n  MOCK_METHOD(absl::string_view, name, (), (const));\n};\n\nclass MockRetryState : public RetryState {\npublic:\n  MockRetryState();\n  ~MockRetryState() override;\n\n  void expectHeadersRetry();\n  void expectHedgedPerTryTimeoutRetry();\n  void expectResetRetry();\n\n  MOCK_METHOD(bool, enabled, ());\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, parseResetInterval,\n              (const Http::ResponseHeaderMap& response_headers), (const));\n  MOCK_METHOD(RetryStatus, shouldRetryHeaders,\n              (const Http::ResponseHeaderMap& response_headers, DoRetryCallback callback));\n  MOCK_METHOD(bool, wouldRetryFromHeaders, (const Http::ResponseHeaderMap& response_headers));\n  MOCK_METHOD(RetryStatus, shouldRetryReset,\n              (const Http::StreamResetReason reset_reason, DoRetryCallback callback));\n  MOCK_METHOD(RetryStatus, shouldHedgeRetryPerTryTimeout, (DoRetryCallback callback));\n  MOCK_METHOD(void, onHostAttempted, (Upstream::HostDescriptionConstSharedPtr));\n  MOCK_METHOD(bool, shouldSelectAnotherHost, (const Upstream::Host& host));\n  MOCK_METHOD(const Upstream::HealthyAndDegradedLoad&, priorityLoadForRetry,\n              (const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&,\n               const Upstream::RetryPriority::PriorityMappingFunc&));\n  MOCK_METHOD(uint32_t, hostSelectionMaxAttempts, (), (const));\n\n  DoRetryCallback callback_;\n};\n\nclass MockRateLimitPolicyEntry : public RateLimitPolicyEntry {\npublic:\n  MockRateLimitPolicyEntry();\n  ~MockRateLimitPolicyEntry() override;\n\n  // Router::RateLimitPolicyEntry\n  MOCK_METHOD(uint64_t, stage, (), (const));\n  MOCK_METHOD(const std::string&, disableKey, (), (const));\n  MOCK_METHOD(void, populateDescriptors,\n              (const RouteEntry& route, std::vector<Envoy::RateLimit::Descriptor>& descriptors,\n               const std::string& local_service_cluster, const Http::HeaderMap& headers,\n               const Network::Address::Instance& remote_address,\n               const envoy::config::core::v3::Metadata* dynamic_metadata),\n              (const));\n\n  uint64_t stage_{};\n  std::string disable_key_;\n};\n\nclass MockRateLimitPolicy : public RateLimitPolicy {\npublic:\n  MockRateLimitPolicy();\n  ~MockRateLimitPolicy() override;\n\n  // Router::RateLimitPolicy\n  MOCK_METHOD(std::vector<std::reference_wrapper<const RateLimitPolicyEntry>>&,\n              getApplicableRateLimit, (uint64_t stage), (const));\n  MOCK_METHOD(bool, empty, (), (const));\n\n  std::vector<std::reference_wrapper<const Router::RateLimitPolicyEntry>> rate_limit_policy_entry_;\n};\n\nclass TestShadowPolicy : public ShadowPolicy {\npublic:\n  TestShadowPolicy(absl::string_view cluster = \"\", absl::string_view runtime_key = \"\",\n                   envoy::type::v3::FractionalPercent default_value = {}, bool trace_sampled = true)\n      : cluster_(cluster), runtime_key_(runtime_key), default_value_(default_value),\n        trace_sampled_(trace_sampled) {}\n  // Router::ShadowPolicy\n  const std::string& cluster() const override { return cluster_; }\n  const std::string& runtimeKey() const override { return runtime_key_; }\n  const envoy::type::v3::FractionalPercent& defaultValue() const override { return default_value_; }\n  bool traceSampled() const override { return trace_sampled_; }\n\n  std::string cluster_;\n  std::string runtime_key_;\n  envoy::type::v3::FractionalPercent default_value_;\n  bool trace_sampled_;\n};\n\nclass MockShadowWriter : public ShadowWriter {\npublic:\n  MockShadowWriter();\n  ~MockShadowWriter() override;\n\n  // Router::ShadowWriter\n  void shadow(const std::string& cluster, Http::RequestMessagePtr&& request,\n              const Http::AsyncClient::RequestOptions& options) override {\n    shadow_(cluster, request, options);\n  }\n\n  MOCK_METHOD(void, shadow_,\n              (const std::string& cluster, Http::RequestMessagePtr& request,\n               const Http::AsyncClient::RequestOptions& options));\n};\n\nclass TestVirtualCluster : public VirtualCluster {\npublic:\n  // Router::VirtualCluster\n  Stats::StatName statName() const override { return stat_name_.statName(); }\n  VirtualClusterStats& stats() const override { return stats_; }\n\n  Stats::TestSymbolTable symbol_table_;\n  Stats::StatNameManagedStorage stat_name_{\"fake_virtual_cluster\", *symbol_table_};\n  Stats::IsolatedStoreImpl stats_store_;\n  mutable VirtualClusterStats stats_{generateStats(stats_store_)};\n};\n\nclass MockVirtualHost : public VirtualHost {\npublic:\n  MockVirtualHost();\n  ~MockVirtualHost() override;\n\n  // Router::VirtualHost\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(const RateLimitPolicy&, rateLimitPolicy, (), (const));\n  MOCK_METHOD(const CorsPolicy*, corsPolicy, (), (const));\n  MOCK_METHOD(const Config&, routeConfig, (), (const));\n  MOCK_METHOD(const RouteSpecificFilterConfig*, perFilterConfig, (const std::string&), (const));\n  MOCK_METHOD(bool, includeAttemptCountInRequest, (), (const));\n  MOCK_METHOD(bool, includeAttemptCountInResponse, (), (const));\n  MOCK_METHOD(Upstream::RetryPrioritySharedPtr, retryPriority, ());\n  MOCK_METHOD(Upstream::RetryHostPredicateSharedPtr, retryHostPredicate, ());\n  MOCK_METHOD(uint32_t, retryShadowBufferLimit, (), (const));\n\n  Stats::StatName statName() const override {\n    stat_name_ = std::make_unique<Stats::StatNameManagedStorage>(name(), *symbol_table_);\n    return stat_name_->statName();\n  }\n\n  mutable Stats::TestSymbolTable symbol_table_;\n  std::string name_{\"fake_vhost\"};\n  mutable std::unique_ptr<Stats::StatNameManagedStorage> stat_name_;\n  testing::NiceMock<MockRateLimitPolicy> rate_limit_policy_;\n  TestCorsPolicy cors_policy_;\n};\n\nclass MockHashPolicy : public Http::HashPolicy {\npublic:\n  MockHashPolicy();\n  ~MockHashPolicy() override;\n\n  // Http::HashPolicy\n  MOCK_METHOD(absl::optional<uint64_t>, generateHash,\n              (const Network::Address::Instance* downstream_address,\n               const Http::RequestHeaderMap& headers, const AddCookieCallback add_cookie,\n               const StreamInfo::FilterStateSharedPtr filter_state),\n              (const));\n};\n\nclass MockMetadataMatchCriteria : public MetadataMatchCriteria {\npublic:\n  MockMetadataMatchCriteria();\n  ~MockMetadataMatchCriteria() override;\n\n  // Router::MetadataMatchCriteria\n  MOCK_METHOD(const std::vector<MetadataMatchCriterionConstSharedPtr>&, metadataMatchCriteria, (),\n              (const));\n  MOCK_METHOD(MetadataMatchCriteriaConstPtr, mergeMatchCriteria, (const ProtobufWkt::Struct&),\n              (const));\n  MOCK_METHOD(MetadataMatchCriteriaConstPtr, filterMatchCriteria, (const std::set<std::string>&),\n              (const));\n};\n\nclass MockTlsContextMatchCriteria : public TlsContextMatchCriteria {\npublic:\n  MockTlsContextMatchCriteria();\n  ~MockTlsContextMatchCriteria() override;\n\n  // Router::MockTlsContextMatchCriteria\n  MOCK_METHOD(const absl::optional<bool>&, presented, (), (const));\n  MOCK_METHOD(const absl::optional<bool>&, validated, (), (const));\n};\n\nclass MockPathMatchCriterion : public PathMatchCriterion {\npublic:\n  MockPathMatchCriterion();\n  ~MockPathMatchCriterion() override;\n\n  // Router::PathMatchCriterion\n  MOCK_METHOD(PathMatchType, matchType, (), (const));\n  MOCK_METHOD(const std::string&, matcher, (), (const));\n\n  PathMatchType type_;\n  std::string matcher_;\n};\n\nclass MockRouteEntry : public RouteEntry {\npublic:\n  MockRouteEntry();\n  ~MockRouteEntry() override;\n\n  // Router::Config\n  MOCK_METHOD(const std::string&, clusterName, (), (const));\n  MOCK_METHOD(Http::Code, clusterNotFoundResponseCode, (), (const));\n  MOCK_METHOD(void, finalizeRequestHeaders,\n              (Http::RequestHeaderMap & headers, const StreamInfo::StreamInfo& stream_info,\n               bool insert_envoy_original_path),\n              (const));\n  MOCK_METHOD(void, finalizeResponseHeaders,\n              (Http::ResponseHeaderMap & headers, const StreamInfo::StreamInfo& stream_info),\n              (const));\n  MOCK_METHOD(const Http::HashPolicy*, hashPolicy, (), (const));\n  MOCK_METHOD(const HedgePolicy&, hedgePolicy, (), (const));\n  MOCK_METHOD(const Router::MetadataMatchCriteria*, metadataMatchCriteria, (), (const));\n  MOCK_METHOD(const Router::TlsContextMatchCriteria*, tlsContextMatchCriteria, (), (const));\n  MOCK_METHOD(Upstream::ResourcePriority, priority, (), (const));\n  MOCK_METHOD(const RateLimitPolicy&, rateLimitPolicy, (), (const));\n  MOCK_METHOD(const RetryPolicy&, retryPolicy, (), (const));\n  MOCK_METHOD(const InternalRedirectPolicy&, internalRedirectPolicy, (), (const));\n  MOCK_METHOD(uint32_t, retryShadowBufferLimit, (), (const));\n  MOCK_METHOD(const std::vector<ShadowPolicyPtr>&, shadowPolicies, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, timeout, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, idleTimeout, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, maxStreamDuration, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, grpcTimeoutHeaderMax, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, grpcTimeoutHeaderOffset, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, maxGrpcTimeout, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::milliseconds>, grpcTimeoutOffset, (), (const));\n  MOCK_METHOD(const VirtualCluster*, virtualCluster, (const Http::HeaderMap& headers), (const));\n  MOCK_METHOD(const std::string&, virtualHostName, (), (const));\n  MOCK_METHOD(const VirtualHost&, virtualHost, (), (const));\n  MOCK_METHOD(bool, autoHostRewrite, (), (const));\n  MOCK_METHOD((const std::multimap<std::string, std::string>&), opaqueConfig, (), (const));\n  MOCK_METHOD(bool, includeVirtualHostRateLimits, (), (const));\n  MOCK_METHOD(const CorsPolicy*, corsPolicy, (), (const));\n  MOCK_METHOD(const envoy::config::core::v3::Metadata&, metadata, (), (const));\n  MOCK_METHOD(const Envoy::Config::TypedMetadata&, typedMetadata, (), (const));\n  MOCK_METHOD(const PathMatchCriterion&, pathMatchCriterion, (), (const));\n  MOCK_METHOD(const RouteSpecificFilterConfig*, perFilterConfig, (const std::string&), (const));\n  MOCK_METHOD(bool, includeAttemptCountInRequest, (), (const));\n  MOCK_METHOD(bool, includeAttemptCountInResponse, (), (const));\n  MOCK_METHOD(const absl::optional<ConnectConfig>&, connectConfig, (), (const));\n  MOCK_METHOD(const UpgradeMap&, upgradeMap, (), (const));\n  MOCK_METHOD(const std::string&, routeName, (), (const));\n\n  std::string cluster_name_{\"fake_cluster\"};\n  std::string route_name_{\"fake_route_name\"};\n  std::multimap<std::string, std::string> opaque_config_;\n  TestVirtualCluster virtual_cluster_;\n  TestRetryPolicy retry_policy_;\n  testing::NiceMock<MockInternalRedirectPolicy> internal_redirect_policy_;\n  TestHedgePolicy hedge_policy_;\n  testing::NiceMock<MockRateLimitPolicy> rate_limit_policy_;\n  std::vector<ShadowPolicyPtr> shadow_policies_;\n  testing::NiceMock<MockVirtualHost> virtual_host_;\n  MockHashPolicy hash_policy_;\n  MockMetadataMatchCriteria metadata_matches_criteria_;\n  MockTlsContextMatchCriteria tls_context_matches_criteria_;\n  TestCorsPolicy cors_policy_;\n  testing::NiceMock<MockPathMatchCriterion> path_match_criterion_;\n  envoy::config::core::v3::Metadata metadata_;\n  UpgradeMap upgrade_map_;\n  absl::optional<ConnectConfig> connect_config_;\n};\n\nclass MockDecorator : public Decorator {\npublic:\n  MockDecorator();\n  ~MockDecorator() override;\n\n  // Router::Decorator\n  MOCK_METHOD(const std::string&, getOperation, (), (const));\n  MOCK_METHOD(bool, propagate, (), (const));\n  MOCK_METHOD(void, apply, (Tracing::Span & span), (const));\n\n  std::string operation_{\"fake_operation\"};\n};\n\nclass MockRouteTracing : public RouteTracing {\npublic:\n  MockRouteTracing();\n  ~MockRouteTracing() override;\n\n  // Router::RouteTracing\n  MOCK_METHOD(const envoy::type::v3::FractionalPercent&, getClientSampling, (), (const));\n  MOCK_METHOD(const envoy::type::v3::FractionalPercent&, getRandomSampling, (), (const));\n  MOCK_METHOD(const envoy::type::v3::FractionalPercent&, getOverallSampling, (), (const));\n  MOCK_METHOD(const Tracing::CustomTagMap&, getCustomTags, (), (const));\n};\n\nclass MockRoute : public Route {\npublic:\n  MockRoute();\n  ~MockRoute() override;\n\n  // Router::Route\n  MOCK_METHOD(const DirectResponseEntry*, directResponseEntry, (), (const));\n  MOCK_METHOD(const RouteEntry*, routeEntry, (), (const));\n  MOCK_METHOD(const Decorator*, decorator, (), (const));\n  MOCK_METHOD(const RouteTracing*, tracingConfig, (), (const));\n  MOCK_METHOD(const RouteSpecificFilterConfig*, perFilterConfig, (const std::string&), (const));\n\n  testing::NiceMock<MockRouteEntry> route_entry_;\n  testing::NiceMock<MockDecorator> decorator_;\n  testing::NiceMock<MockRouteTracing> route_tracing_;\n};\n\nclass MockConfig : public Config {\npublic:\n  MockConfig();\n  ~MockConfig() override;\n\n  // Router::Config\n  MOCK_METHOD(RouteConstSharedPtr, route,\n              (const Http::RequestHeaderMap&, const Envoy::StreamInfo::StreamInfo&,\n               uint64_t random_value),\n              (const));\n  MOCK_METHOD(RouteConstSharedPtr, route,\n              (const RouteCallback& cb, const Http::RequestHeaderMap&,\n               const Envoy::StreamInfo::StreamInfo&, uint64_t random_value),\n              (const));\n\n  MOCK_METHOD(const std::list<Http::LowerCaseString>&, internalOnlyHeaders, (), (const));\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(bool, usesVhds, (), (const));\n  MOCK_METHOD(bool, mostSpecificHeaderMutationsWins, (), (const));\n\n  std::shared_ptr<MockRoute> route_;\n  std::list<Http::LowerCaseString> internal_only_headers_;\n  std::string name_{\"fake_config\"};\n};\n\nclass MockRouteConfigProvider : public RouteConfigProvider {\npublic:\n  MockRouteConfigProvider();\n  ~MockRouteConfigProvider() override;\n\n  MOCK_METHOD(ConfigConstSharedPtr, config, ());\n  MOCK_METHOD(absl::optional<ConfigInfo>, configInfo, (), (const));\n  MOCK_METHOD(SystemTime, lastUpdated, (), (const));\n  MOCK_METHOD(void, onConfigUpdate, ());\n  MOCK_METHOD(void, validateConfig, (const envoy::config::route::v3::RouteConfiguration&), (const));\n  MOCK_METHOD(void, requestVirtualHostsUpdate,\n              (const std::string&, Event::Dispatcher&,\n               std::weak_ptr<Http::RouteConfigUpdatedCallback> route_config_updated_cb));\n\n  std::shared_ptr<NiceMock<MockConfig>> route_config_{new NiceMock<MockConfig>()};\n};\n\nclass MockRouteConfigProviderManager : public RouteConfigProviderManager {\npublic:\n  MockRouteConfigProviderManager();\n  ~MockRouteConfigProviderManager() override;\n\n  MOCK_METHOD(RouteConfigProviderSharedPtr, createRdsRouteConfigProvider,\n              (const envoy::extensions::filters::network::http_connection_manager::v3::Rds& rds,\n               Server::Configuration::ServerFactoryContext& factory_context,\n               const std::string& stat_prefix, Init::Manager& init_manager));\n  MOCK_METHOD(RouteConfigProviderPtr, createStaticRouteConfigProvider,\n              (const envoy::config::route::v3::RouteConfiguration& route_config,\n               Server::Configuration::ServerFactoryContext& factory_context,\n               ProtobufMessage::ValidationVisitor& validator));\n};\n\nclass MockScopedConfig : public ScopedConfig {\npublic:\n  MockScopedConfig();\n  ~MockScopedConfig() override;\n\n  MOCK_METHOD(ConfigConstSharedPtr, getRouteConfig, (const Http::HeaderMap& headers), (const));\n\n  std::shared_ptr<MockConfig> route_config_{new NiceMock<MockConfig>()};\n};\n\nclass MockScopedRouteConfigProvider : public Envoy::Config::ConfigProvider {\npublic:\n  MockScopedRouteConfigProvider();\n  ~MockScopedRouteConfigProvider() override;\n\n  // Config::ConfigProvider\n  MOCK_METHOD(SystemTime, lastUpdated, (), (const));\n  MOCK_METHOD(Protobuf::Message*, getConfigProto, (), (const));\n  MOCK_METHOD(Envoy::Config::ConfigProvider::ConfigProtoVector, getConfigProtos, (), (const));\n  MOCK_METHOD(ConfigConstSharedPtr, getConfig, (), (const));\n  MOCK_METHOD(ApiType, apiType, (), (const));\n\n  std::shared_ptr<MockScopedConfig> config_;\n};\n\nclass MockGenericConnPool : public GenericConnPool {\n  MOCK_METHOD(void, newStream, (GenericConnectionPoolCallbacks * request));\n  MOCK_METHOD(bool, cancelAnyPendingStream, ());\n  MOCK_METHOD(absl::optional<Http::Protocol>, protocol, (), (const));\n  MOCK_METHOD(bool, initialize,\n              (Upstream::ClusterManager&, const RouteEntry&, Http::Protocol,\n               Upstream::LoadBalancerContext*));\n  MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const));\n};\n\nclass MockUpstreamToDownstream : public UpstreamToDownstream {\npublic:\n  MOCK_METHOD(const RouteEntry&, routeEntry, (), (const));\n  MOCK_METHOD(const Network::Connection&, connection, (), (const));\n\n  MOCK_METHOD(void, decodeData, (Buffer::Instance&, bool));\n  MOCK_METHOD(void, decodeMetadata, (Http::MetadataMapPtr &&));\n  MOCK_METHOD(void, decode100ContinueHeaders, (Http::ResponseHeaderMapPtr &&));\n  MOCK_METHOD(void, decodeHeaders, (Http::ResponseHeaderMapPtr&&, bool));\n  MOCK_METHOD(void, decodeTrailers, (Http::ResponseTrailerMapPtr &&));\n\n  MOCK_METHOD(void, onResetStream, (Http::StreamResetReason, absl::string_view));\n  MOCK_METHOD(void, onAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onBelowWriteBufferLowWatermark, ());\n};\n\nclass MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks {\npublic:\n  MockGenericConnectionPoolCallbacks();\n\n  MOCK_METHOD(void, onPoolFailure,\n              (Http::ConnectionPool::PoolFailureReason reason,\n               absl::string_view transport_failure_reason,\n               Upstream::HostDescriptionConstSharedPtr host));\n  MOCK_METHOD(void, onPoolReady,\n              (std::unique_ptr<GenericUpstream> && upstream,\n               Upstream::HostDescriptionConstSharedPtr host,\n               const Network::Address::InstanceConstSharedPtr& upstream_local_address,\n               const StreamInfo::StreamInfo& info));\n  MOCK_METHOD(UpstreamToDownstream&, upstreamToDownstream, ());\n\n  NiceMock<MockUpstreamToDownstream> upstream_to_downstream_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/router/router_filter_interface.cc",
    "content": "#include \"test/mocks/router/router_filter_interface.h\"\n\nusing testing::AnyNumber;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Router {\n\nMockRouterFilterInterface::MockRouterFilterInterface()\n    : config_(\"prefix.\", context_, ShadowWriterPtr(new MockShadowWriter()), router_proto) {\n  auto cluster_info = new NiceMock<Upstream::MockClusterInfo>();\n  cluster_info->timeout_budget_stats_ = nullptr;\n  ON_CALL(*cluster_info, timeoutBudgetStats()).WillByDefault(Return(absl::nullopt));\n  cluster_info_.reset(cluster_info);\n  ON_CALL(*this, callbacks()).WillByDefault(Return(&callbacks_));\n  ON_CALL(*this, config()).WillByDefault(ReturnRef(config_));\n  ON_CALL(*this, cluster()).WillByDefault(Return(cluster_info_));\n  ON_CALL(*this, upstreamRequests()).WillByDefault(ReturnRef(requests_));\n  EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber());\n  ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_));\n  ON_CALL(callbacks_, connection()).WillByDefault(Return(&client_connection_));\n  route_entry_.connect_config_.emplace(RouteEntry::ConnectConfig());\n}\n\nMockRouterFilterInterface::~MockRouterFilterInterface() = default;\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/router/router_filter_interface.h",
    "content": "#pragma once\n\n#include \"common/router/router.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Router {\n\nclass MockRouterFilterInterface : public RouterFilterInterface {\npublic:\n  MockRouterFilterInterface();\n  ~MockRouterFilterInterface() override;\n\n  MOCK_METHOD(void, onUpstream100ContinueHeaders,\n              (Envoy::Http::ResponseHeaderMapPtr && headers, UpstreamRequest& upstream_request));\n  MOCK_METHOD(void, onUpstreamHeaders,\n              (uint64_t response_code, Envoy::Http::ResponseHeaderMapPtr&& headers,\n               UpstreamRequest& upstream_request, bool end_stream));\n  MOCK_METHOD(void, onUpstreamData,\n              (Buffer::Instance & data, UpstreamRequest& upstream_request, bool end_stream));\n  MOCK_METHOD(void, onUpstreamTrailers,\n              (Envoy::Http::ResponseTrailerMapPtr && trailers, UpstreamRequest& upstream_request));\n  MOCK_METHOD(void, onUpstreamMetadata, (Envoy::Http::MetadataMapPtr && metadata_map));\n  MOCK_METHOD(void, onUpstreamReset,\n              (Envoy::Http::StreamResetReason reset_reason, absl::string_view transport_failure,\n               UpstreamRequest& upstream_request));\n  MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host));\n  MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request));\n  MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request));\n\n  MOCK_METHOD(Envoy::Http::StreamDecoderFilterCallbacks*, callbacks, ());\n  MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ());\n  MOCK_METHOD(FilterConfig&, config, ());\n  MOCK_METHOD(FilterUtility::TimeoutData, timeout, ());\n  MOCK_METHOD(Envoy::Http::RequestHeaderMap*, downstreamHeaders, ());\n  MOCK_METHOD(Envoy::Http::RequestTrailerMap*, downstreamTrailers, ());\n  MOCK_METHOD(bool, downstreamResponseStarted, (), (const));\n  MOCK_METHOD(bool, downstreamEndStream, (), (const));\n  MOCK_METHOD(uint32_t, attemptCount, (), (const));\n  MOCK_METHOD(const VirtualCluster*, requestVcluster, (), (const));\n  MOCK_METHOD(const RouteEntry*, routeEntry, (), (const));\n  MOCK_METHOD(const std::list<UpstreamRequestPtr>&, upstreamRequests, (), (const));\n  MOCK_METHOD(const UpstreamRequest*, finalUpstreamRequest, (), (const));\n  MOCK_METHOD(TimeSource&, timeSource, ());\n\n  NiceMock<Envoy::Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  NiceMock<MockRouteEntry> route_entry_;\n  NiceMock<Network::MockConnection> client_connection_;\n\n  envoy::extensions::filters::http::router::v3::Router router_proto;\n  NiceMock<Server::Configuration::MockFactoryContext> context_;\n  FilterConfig config_;\n  Upstream::ClusterInfoConstSharedPtr cluster_info_;\n  std::list<UpstreamRequestPtr> requests_;\n};\n\n} // namespace Router\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/runtime/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"runtime_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    external_deps = [\"abseil_optional\"],\n    deps = [\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/runtime/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnArg;\n\nnamespace Envoy {\nnamespace Runtime {\n\nMockSnapshot::MockSnapshot() {\n  ON_CALL(*this, getInteger(_, _)).WillByDefault(ReturnArg<1>());\n  ON_CALL(*this, getDouble(_, _)).WillByDefault(ReturnArg<1>());\n  ON_CALL(*this, getBoolean(_, _)).WillByDefault(ReturnArg<1>());\n  ON_CALL(*this, get(_)).WillByDefault(Return(absl::nullopt));\n}\n\nMockSnapshot::~MockSnapshot() = default;\n\nMockLoader::MockLoader() {\n  ON_CALL(*this, threadsafeSnapshot()).WillByDefault(testing::Invoke([]() {\n    return std::make_shared<const NiceMock<MockSnapshot>>();\n  }));\n  ON_CALL(*this, snapshot()).WillByDefault(ReturnRef(snapshot_));\n  ON_CALL(*this, getRootScope()).WillByDefault(ReturnRef(store_));\n}\n\nMockLoader::~MockLoader() = default;\n\nMockOverrideLayer::MockOverrideLayer() = default;\n\nMockOverrideLayer::~MockOverrideLayer() = default;\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/runtime/mocks.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/runtime/runtime.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Runtime {\n\nclass MockSnapshot : public Snapshot {\npublic:\n  MockSnapshot();\n  ~MockSnapshot() override;\n\n  // Provide a default implementation of mocked featureEnabled/2.\n  bool featureEnabledDefault(absl::string_view, uint64_t default_value) {\n    if (default_value == 0) {\n      return false;\n    } else if (default_value == 100) {\n      return true;\n    } else {\n      throw std::invalid_argument(\"Not implemented yet. You may want to set expectation of mocked \"\n                                  \"featureEnabled() instead.\");\n    }\n  }\n\n  MOCK_METHOD(bool, deprecatedFeatureEnabled, (absl::string_view key, bool default_enabled),\n              (const));\n  MOCK_METHOD(bool, runtimeFeatureEnabled, (absl::string_view key), (const));\n  MOCK_METHOD(bool, featureEnabled, (absl::string_view key, uint64_t default_value), (const));\n  MOCK_METHOD(bool, featureEnabled,\n              (absl::string_view key, uint64_t default_value, uint64_t random_value), (const));\n  MOCK_METHOD(bool, featureEnabled,\n              (absl::string_view key, uint64_t default_value, uint64_t random_value,\n               uint64_t num_buckets),\n              (const));\n  MOCK_METHOD(bool, featureEnabled,\n              (absl::string_view key, const envoy::type::v3::FractionalPercent& default_value),\n              (const));\n  MOCK_METHOD(bool, featureEnabled,\n              (absl::string_view key, const envoy::type::v3::FractionalPercent& default_value,\n               uint64_t random_value),\n              (const));\n  MOCK_METHOD(ConstStringOptRef, get, (absl::string_view key), (const));\n  MOCK_METHOD(uint64_t, getInteger, (absl::string_view key, uint64_t default_value), (const));\n  MOCK_METHOD(double, getDouble, (absl::string_view key, double default_value), (const));\n  MOCK_METHOD(bool, getBoolean, (absl::string_view key, bool default_value), (const));\n  MOCK_METHOD(const std::vector<OverrideLayerConstPtr>&, getLayers, (), (const));\n};\n\nclass MockLoader : public Loader {\npublic:\n  MockLoader();\n  ~MockLoader() override;\n\n  MOCK_METHOD(void, initialize, (Upstream::ClusterManager & cm));\n  MOCK_METHOD(const Snapshot&, snapshot, ());\n  MOCK_METHOD(SnapshotConstSharedPtr, threadsafeSnapshot, ());\n  MOCK_METHOD(void, mergeValues, ((const absl::node_hash_map<std::string, std::string>&)));\n  MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback));\n  MOCK_METHOD(Stats::Scope&, getRootScope, ());\n  MOCK_METHOD(void, countDeprecatedFeatureUse, (), (const));\n\n  testing::NiceMock<MockSnapshot> snapshot_;\n  testing::NiceMock<Stats::MockStore> store_;\n};\n\nclass MockOverrideLayer : public Snapshot::OverrideLayer {\npublic:\n  MockOverrideLayer();\n  ~MockOverrideLayer() override;\n\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(const Snapshot::EntryMap&, values, (), (const));\n};\n\n} // namespace Runtime\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/secret/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"secret_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/secret:secret_callbacks_interface\",\n        \"//include/envoy/secret:secret_manager_interface\",\n        \"//include/envoy/server:transport_socket_config_interface\",\n        \"//include/envoy/ssl:tls_certificate_config_interface\",\n        \"//source/common/secret:secret_provider_impl_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/secret/mocks.cc",
    "content": "#include \"test/mocks/secret/mocks.h\"\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n\n#include \"common/secret/secret_provider_impl.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace Secret {\n\nMockSecretManager::MockSecretManager() {\n  ON_CALL(*this, createInlineTlsCertificateProvider(_))\n      .WillByDefault(Invoke(\n          [](const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& tls_certificate) {\n            return std::make_shared<Secret::TlsCertificateConfigProviderImpl>(tls_certificate);\n          }));\n  ON_CALL(*this, createInlineCertificateValidationContextProvider(_))\n      .WillByDefault(Invoke(\n          [](const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n                 certificate_validation_context) {\n            return std::make_shared<Secret::CertificateValidationContextConfigProviderImpl>(\n                certificate_validation_context);\n          }));\n}\n\nMockSecretManager::~MockSecretManager() = default;\n\nMockSecretCallbacks::MockSecretCallbacks() = default;\n\nMockSecretCallbacks::~MockSecretCallbacks() = default;\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/secret/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/secret/secret_callbacks.h\"\n#include \"envoy/secret/secret_manager.h\"\n#include \"envoy/server/transport_socket_config.h\"\n#include \"envoy/ssl/tls_certificate_config.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Secret {\n\nclass MockSecretManager : public SecretManager {\npublic:\n  MockSecretManager();\n  ~MockSecretManager() override;\n\n  MOCK_METHOD(void, addStaticSecret,\n              (const envoy::extensions::transport_sockets::tls::v3::Secret& secret));\n  MOCK_METHOD(TlsCertificateConfigProviderSharedPtr, findStaticTlsCertificateProvider,\n              (const std::string& name), (const));\n  MOCK_METHOD(CertificateValidationContextConfigProviderSharedPtr,\n              findStaticCertificateValidationContextProvider, (const std::string& name), (const));\n  MOCK_METHOD(TlsSessionTicketKeysConfigProviderSharedPtr,\n              findStaticTlsSessionTicketKeysContextProvider, (const std::string& name), (const));\n  MOCK_METHOD(GenericSecretConfigProviderSharedPtr, findStaticGenericSecretProvider,\n              (const std::string& name), (const));\n  MOCK_METHOD(\n      TlsCertificateConfigProviderSharedPtr, createInlineTlsCertificateProvider,\n      (const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& tls_certificate));\n  MOCK_METHOD(CertificateValidationContextConfigProviderSharedPtr,\n              createInlineCertificateValidationContextProvider,\n              (const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext&\n                   certificate_validation_context));\n  MOCK_METHOD(TlsSessionTicketKeysConfigProviderSharedPtr, createInlineTlsSessionTicketKeysProvider,\n              (const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys&\n                   tls_session_ticket_keys));\n  MOCK_METHOD(GenericSecretConfigProviderSharedPtr, createInlineGenericSecretProvider,\n              (const envoy::extensions::transport_sockets::tls::v3::GenericSecret& generic_secret));\n  MOCK_METHOD(TlsCertificateConfigProviderSharedPtr, findOrCreateTlsCertificateProvider,\n              (const envoy::config::core::v3::ConfigSource&, const std::string&,\n               Server::Configuration::TransportSocketFactoryContext&));\n  MOCK_METHOD(CertificateValidationContextConfigProviderSharedPtr,\n              findOrCreateCertificateValidationContextProvider,\n              (const envoy::config::core::v3::ConfigSource& config_source,\n               const std::string& config_name,\n               Server::Configuration::TransportSocketFactoryContext& secret_provider_context));\n  MOCK_METHOD(TlsSessionTicketKeysConfigProviderSharedPtr,\n              findOrCreateTlsSessionTicketKeysContextProvider,\n              (const envoy::config::core::v3::ConfigSource&, const std::string&,\n               Server::Configuration::TransportSocketFactoryContext&));\n  MOCK_METHOD(GenericSecretConfigProviderSharedPtr, findOrCreateGenericSecretProvider,\n              (const envoy::config::core::v3::ConfigSource&, const std::string&,\n               Server::Configuration::TransportSocketFactoryContext&));\n};\n\nclass MockSecretCallbacks : public SecretCallbacks {\npublic:\n  MockSecretCallbacks();\n  ~MockSecretCallbacks() override;\n  MOCK_METHOD(void, onAddOrUpdateSecret, ());\n};\n\n} // namespace Secret\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"config_tracker_mocks\",\n    srcs = [\"config_tracker.cc\"],\n    hdrs = [\"config_tracker.h\"],\n    deps = [\n        \"//include/envoy/server:configuration_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"admin_mocks\",\n    srcs = [\"admin.cc\"],\n    hdrs = [\"admin.h\"],\n    deps = [\n        \"//include/envoy/server:admin_interface\",\n        \"//test/mocks/server:config_tracker_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"bootstrap_extension_factory_mocks\",\n    srcs = [\"bootstrap_extension_factory.cc\"],\n    hdrs = [\"bootstrap_extension_factory.h\"],\n    deps = [\n        \"//include/envoy/server:bootstrap_extension_config_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"options_mocks\",\n    srcs = [\"options.cc\"],\n    hdrs = [\"options.h\"],\n    deps = [\n        \"//include/envoy/server:options_interface\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"admin_stream_mocks\",\n    srcs = [\"admin_stream.cc\"],\n    hdrs = [\"admin_stream.h\"],\n    deps = [\n        \"//include/envoy/server:admin_interface\",\n        \"//test/mocks/http:http_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"drain_manager_mocks\",\n    srcs = [\"drain_manager.cc\"],\n    hdrs = [\"drain_manager.h\"],\n    deps = [\n        \"//include/envoy/server:drain_manager_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"watch_dog_mocks\",\n    srcs = [\"watch_dog.cc\"],\n    hdrs = [\"watch_dog.h\"],\n    deps = [\n        \"//include/envoy/server:watchdog_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"watchdog_config_mocks\",\n    srcs = [\"watchdog_config.cc\"],\n    hdrs = [\"watchdog_config.h\"],\n    deps = [\n        \"//include/envoy/server:configuration_interface\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"guard_dog_mocks\",\n    srcs = [\"guard_dog.cc\"],\n    hdrs = [\"guard_dog.h\"],\n    deps = [\n        \"//include/envoy/server:guarddog_interface\",\n        \"//test/mocks/server:watch_dog_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"hot_restart_mocks\",\n    srcs = [\"hot_restart.cc\"],\n    hdrs = [\"hot_restart.h\"],\n    deps = [\n        \"//include/envoy/server:instance_interface\",\n        \"//test/mocks/stats:stats_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"listener_component_factory_mocks\",\n    srcs = [\"listener_component_factory.cc\"],\n    hdrs = [\"listener_component_factory.h\"],\n    deps = [\n        \"//include/envoy/server:drain_manager_interface\",\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//test/mocks/network:network_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"listener_manager_mocks\",\n    srcs = [\"listener_manager.cc\"],\n    hdrs = [\"listener_manager.h\"],\n    deps = [\n        \"//include/envoy/server:listener_manager_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"server_lifecycle_notifier_mocks\",\n    srcs = [\"server_lifecycle_notifier.cc\"],\n    hdrs = [\"server_lifecycle_notifier.h\"],\n    deps = [\n        \"//include/envoy/server:lifecycle_notifier_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"worker_factory_mocks\",\n    srcs = [\"worker_factory.cc\"],\n    hdrs = [\"worker_factory.h\"],\n    deps = [\n        \"//include/envoy/server:worker_interface\",\n        \"//test/mocks/server:worker_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"worker_mocks\",\n    srcs = [\"worker.cc\"],\n    hdrs = [\"worker.h\"],\n    deps = [\n        \"//include/envoy/server:worker_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"overload_manager_mocks\",\n    srcs = [\"overload_manager.cc\"],\n    hdrs = [\"overload_manager.h\"],\n    deps = [\n        \"//include/envoy/server:overload_manager_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"instance_mocks\",\n    srcs = [\"instance.cc\"],\n    hdrs = [\"instance.h\"],\n    deps = [\n        \"//include/envoy/server:instance_interface\",\n        \"//source/common/grpc:context_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/secret:secret_manager_impl_lib\",\n        \"//source/common/singleton:manager_impl_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:drain_manager_mocks\",\n        \"//test/mocks/server:hot_restart_mocks\",\n        \"//test/mocks/server:listener_manager_mocks\",\n        \"//test/mocks/server:options_mocks\",\n        \"//test/mocks/server:overload_manager_mocks\",\n        \"//test/mocks/server:server_lifecycle_notifier_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/tracing:tracing_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"main_mocks\",\n    hdrs = [\"main.h\"],\n    deps = [\n        \"//include/envoy/server:configuration_interface\",\n        \"//include/envoy/server:overload_manager_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"factory_context_mocks\",\n    srcs = [\"factory_context.cc\"],\n    hdrs = [\"factory_context.h\"],\n    deps = [\n        \"//test/mocks/server:drain_manager_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:overload_manager_mocks\",\n        \"//test/mocks/server:server_lifecycle_notifier_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"transport_socket_factory_context_mocks\",\n    srcs = [\"transport_socket_factory_context.cc\"],\n    hdrs = [\"transport_socket_factory_context.h\"],\n    deps = [\n        \"//include/envoy/server:tracer_config_interface\",\n        \"//source/common/secret:secret_manager_impl_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/server:config_tracker_mocks\",\n        \"//test/mocks/upstream:cluster_manager_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"listener_factory_context_mocks\",\n    srcs = [\"listener_factory_context.cc\"],\n    hdrs = [\"listener_factory_context.h\"],\n    deps = [\n        \"//include/envoy/server:listener_manager_interface\",\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"health_checker_factory_context_mocks\",\n    srcs = [\"health_checker_factory_context.cc\"],\n    hdrs = [\"health_checker_factory_context.h\"],\n    deps = [\n        \"//include/envoy/server:health_checker_config_interface\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/mocks/upstream:health_check_event_logger_mocks\",\n        \"//test/mocks/upstream:health_checker_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"filter_chain_factory_context_mocks\",\n    srcs = [\"filter_chain_factory_context.cc\"],\n    hdrs = [\"filter_chain_factory_context.h\"],\n    deps = [\n        \"//include/envoy/server:filter_config_interface\",\n        \"//test/mocks/server:factory_context_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"tracer_factory_mocks\",\n    srcs = [\"tracer_factory.cc\"],\n    hdrs = [\"tracer_factory.h\"],\n    deps = [\n        \"//include/envoy/protobuf:message_validator_interface\",\n        \"//include/envoy/server:tracer_config_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"tracer_factory_context_mocks\",\n    srcs = [\"tracer_factory_context.cc\"],\n    hdrs = [\"tracer_factory_context.h\"],\n    deps = [\n        \"//include/envoy/server:configuration_interface\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:tracer_factory_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"server_mocks\",\n    srcs = [],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/server:admin_stream_mocks\",\n        \"//test/mocks/server:bootstrap_extension_factory_mocks\",\n        \"//test/mocks/server:config_tracker_mocks\",\n        \"//test/mocks/server:drain_manager_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/mocks/server:filter_chain_factory_context_mocks\",\n        \"//test/mocks/server:guard_dog_mocks\",\n        \"//test/mocks/server:health_checker_factory_context_mocks\",\n        \"//test/mocks/server:hot_restart_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:listener_component_factory_mocks\",\n        \"//test/mocks/server:listener_factory_context_mocks\",\n        \"//test/mocks/server:listener_manager_mocks\",\n        \"//test/mocks/server:main_mocks\",\n        \"//test/mocks/server:options_mocks\",\n        \"//test/mocks/server:overload_manager_mocks\",\n        \"//test/mocks/server:server_lifecycle_notifier_mocks\",\n        \"//test/mocks/server:tracer_factory_context_mocks\",\n        \"//test/mocks/server:tracer_factory_mocks\",\n        \"//test/mocks/server:transport_socket_factory_context_mocks\",\n        \"//test/mocks/server:watch_dog_mocks\",\n        \"//test/mocks/server:worker_factory_mocks\",\n        \"//test/mocks/server:worker_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/server/admin.cc",
    "content": "#include \"admin.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nMockAdmin::MockAdmin() {\n  ON_CALL(*this, getConfigTracker()).WillByDefault(testing::ReturnRef(config_tracker_));\n  ON_CALL(*this, concurrency()).WillByDefault(testing::Return(1));\n}\n\nMockAdmin::~MockAdmin() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/admin.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/admin.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"config_tracker.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockAdmin : public Admin {\npublic:\n  MockAdmin();\n  ~MockAdmin() override;\n\n  // Server::Admin\n  MOCK_METHOD(bool, addHandler,\n              (const std::string& prefix, const std::string& help_text, HandlerCb callback,\n               bool removable, bool mutates_server_state));\n  MOCK_METHOD(bool, removeHandler, (const std::string& prefix));\n  MOCK_METHOD(Network::Socket&, socket, ());\n  MOCK_METHOD(ConfigTracker&, getConfigTracker, ());\n  MOCK_METHOD(void, startHttpListener,\n              (const std::string& access_log_path, const std::string& address_out_path,\n               Network::Address::InstanceConstSharedPtr address,\n               const Network::Socket::OptionsSharedPtr& socket_options,\n               Stats::ScopePtr&& listener_scope));\n  MOCK_METHOD(Http::Code, request,\n              (absl::string_view path_and_query, absl::string_view method,\n               Http::ResponseHeaderMap& response_headers, std::string& body));\n  MOCK_METHOD(void, addListenerToHandler, (Network::ConnectionHandler * handler));\n  MOCK_METHOD(uint32_t, concurrency, (), (const));\n\n  ::testing::NiceMock<MockConfigTracker> config_tracker_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/admin_stream.cc",
    "content": "#include \"admin_stream.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nMockAdminStream::MockAdminStream() = default;\n\nMockAdminStream::~MockAdminStream() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/admin_stream.h",
    "content": "#pragma once\n\n#include \"envoy/server/admin.h\"\n\n#include \"test/mocks/http/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockAdminStream : public AdminStream {\npublic:\n  MockAdminStream();\n  ~MockAdminStream() override;\n\n  MOCK_METHOD(void, setEndStreamOnComplete, (bool));\n  MOCK_METHOD(void, addOnDestroyCallback, (std::function<void()>));\n  MOCK_METHOD(const Buffer::Instance*, getRequestBody, (), (const));\n  MOCK_METHOD(Http::RequestHeaderMap&, getRequestHeaders, (), (const));\n  MOCK_METHOD(NiceMock<Http::MockStreamDecoderFilterCallbacks>&, getDecoderFilterCallbacks, (),\n              (const));\n  MOCK_METHOD(Http::Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ());\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/bootstrap_extension_factory.cc",
    "content": "#include \"bootstrap_extension_factory.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nMockBootstrapExtensionFactory::MockBootstrapExtensionFactory() = default;\n\nMockBootstrapExtensionFactory::~MockBootstrapExtensionFactory() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/bootstrap_extension_factory.h",
    "content": "#pragma once\n\n#include \"envoy/server/bootstrap_extension_config.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockBootstrapExtensionFactory : public BootstrapExtensionFactory {\npublic:\n  MockBootstrapExtensionFactory();\n  ~MockBootstrapExtensionFactory() override;\n\n  MOCK_METHOD(BootstrapExtensionPtr, createBootstrapExtension,\n              (const Protobuf::Message&, Configuration::ServerFactoryContext&), (override));\n  MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, (), (override));\n  MOCK_METHOD(std::string, name, (), (const, override));\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/config_tracker.cc",
    "content": "#include \"config_tracker.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::_;\nusing ::testing::Invoke;\n\nMockConfigTracker::MockConfigTracker() {\n  ON_CALL(*this, add_(_, _))\n      .WillByDefault(Invoke([this](const std::string& key, Cb callback) -> EntryOwner* {\n        EXPECT_TRUE(config_tracker_callbacks_.find(key) == config_tracker_callbacks_.end());\n        config_tracker_callbacks_[key] = callback;\n        return new MockEntryOwner();\n      }));\n}\n\nMockConfigTracker::~MockConfigTracker() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/config_tracker.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/config_tracker.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockConfigTracker : public ConfigTracker {\npublic:\n  MockConfigTracker();\n  ~MockConfigTracker() override;\n\n  struct MockEntryOwner : public EntryOwner {};\n\n  MOCK_METHOD(EntryOwner*, add_, (std::string, Cb));\n\n  // Server::ConfigTracker\n  MOCK_METHOD(const CbsMap&, getCallbacksMap, (), (const));\n  EntryOwnerPtr add(const std::string& key, Cb callback) override {\n    return EntryOwnerPtr{add_(key, std::move(callback))};\n  }\n\n  absl::node_hash_map<std::string, Cb> config_tracker_callbacks_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/drain_manager.cc",
    "content": "#include \"drain_manager.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::_;\nusing ::testing::SaveArg;\n\nMockDrainManager::MockDrainManager() {\n  ON_CALL(*this, startDrainSequence(_)).WillByDefault(SaveArg<0>(&drain_sequence_completion_));\n}\n\nMockDrainManager::~MockDrainManager() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/drain_manager.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/server/drain_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockDrainManager : public DrainManager {\npublic:\n  MockDrainManager();\n  ~MockDrainManager() override;\n\n  // Server::DrainManager\n  MOCK_METHOD(bool, drainClose, (), (const));\n  MOCK_METHOD(bool, draining, (), (const));\n  MOCK_METHOD(void, startDrainSequence, (std::function<void()> completion));\n  MOCK_METHOD(void, startParentShutdownSequence, ());\n\n  std::function<void()> drain_sequence_completion_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/factory_context.cc",
    "content": "#include \"factory_context.h\"\n\n#include <string>\n\n#include \"common/singleton/manager_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nusing ::testing::ReturnRef;\n\nMockFactoryContext::MockFactoryContext()\n    : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())),\n      grpc_context_(scope_.symbolTable()), http_context_(scope_.symbolTable()) {\n  ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_));\n  ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_));\n  ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n  ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_));\n  ON_CALL(*this, getTransportSocketFactoryContext())\n      .WillByDefault(ReturnRef(transport_socket_factory_context_));\n  ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_));\n  ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_));\n  ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_));\n  ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_));\n  ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_));\n  ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_));\n  ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_));\n  ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_));\n  ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(listener_scope_));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n  ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_));\n  ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_));\n  ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_));\n  ON_CALL(*this, messageValidationVisitor())\n      .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n}\n\nMockFactoryContext::~MockFactoryContext() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/factory_context.h",
    "content": "#pragma once\n\n#include \"envoy/server/configuration.h\"\n\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"admin.h\"\n#include \"drain_manager.h\"\n#include \"gmock/gmock.h\"\n#include \"instance.h\"\n#include \"overload_manager.h\"\n#include \"server_lifecycle_notifier.h\"\n#include \"transport_socket_factory_context.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockFactoryContext : public virtual FactoryContext {\npublic:\n  MockFactoryContext();\n  ~MockFactoryContext() override;\n\n  MOCK_METHOD(ServerFactoryContext&, getServerFactoryContext, (), (const));\n  MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const));\n  MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ());\n  MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(const Network::DrainDecision&, drainDecision, ());\n  MOCK_METHOD(bool, healthCheckFailed, ());\n  MOCK_METHOD(Init::Manager&, initManager, ());\n  MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ());\n  MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ());\n  MOCK_METHOD(Stats::Scope&, scope, ());\n  MOCK_METHOD(Singleton::Manager&, singletonManager, ());\n  MOCK_METHOD(OverloadManager&, overloadManager, ());\n  MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ());\n  MOCK_METHOD(Server::Admin&, admin, ());\n  MOCK_METHOD(Stats::Scope&, listenerScope, ());\n  MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const));\n  MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const));\n  MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const));\n  MOCK_METHOD(TimeSource&, timeSource, ());\n  Event::TestTimeSystem& timeSystem() { return time_system_; }\n  Grpc::Context& grpcContext() override { return grpc_context_; }\n  Http::Context& httpContext() override { return http_context_; }\n  MOCK_METHOD(ProcessContextOptRef, processContext, ());\n  MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ());\n  MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ());\n  MOCK_METHOD(Api::Api&, api, ());\n\n  testing::NiceMock<MockServerFactoryContext> server_factory_context_;\n  testing::NiceMock<AccessLog::MockAccessLogManager> access_log_manager_;\n  testing::NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  testing::NiceMock<MockTransportSocketFactoryContext> transport_socket_factory_context_;\n  testing::NiceMock<Event::MockDispatcher> dispatcher_;\n  testing::NiceMock<MockDrainManager> drain_manager_;\n  testing::NiceMock<Init::MockManager> init_manager_;\n  testing::NiceMock<MockServerLifecycleNotifier> lifecycle_notifier_;\n  testing::NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  testing::NiceMock<Envoy::Runtime::MockLoader> runtime_loader_;\n  testing::NiceMock<Stats::MockIsolatedStatsStore> scope_;\n  testing::NiceMock<ThreadLocal::MockInstance> thread_local_;\n  Singleton::ManagerPtr singleton_manager_;\n  testing::NiceMock<MockAdmin> admin_;\n  Stats::IsolatedStoreImpl listener_scope_;\n  Event::GlobalTimeSystem time_system_;\n  testing::NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  testing::NiceMock<MockOverloadManager> overload_manager_;\n  Grpc::ContextImpl grpc_context_;\n  Http::ContextImpl http_context_;\n  testing::NiceMock<Api::MockApi> api_;\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/filter_chain_factory_context.cc",
    "content": "#include \"filter_chain_factory_context.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nMockFilterChainFactoryContext::MockFilterChainFactoryContext() = default;\n\nMockFilterChainFactoryContext::~MockFilterChainFactoryContext() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/filter_chain_factory_context.h",
    "content": "#pragma once\n\n#include \"envoy/server/filter_config.h\"\n\n#include \"factory_context.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockFilterChainFactoryContext : public MockFactoryContext, public FilterChainFactoryContext {\npublic:\n  MockFilterChainFactoryContext();\n  ~MockFilterChainFactoryContext() override;\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/guard_dog.cc",
    "content": "#include \"guard_dog.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::_;\nusing ::testing::NiceMock;\nusing ::testing::Return;\n\nMockGuardDog::MockGuardDog() : watch_dog_(new NiceMock<MockWatchDog>()) {\n  ON_CALL(*this, createWatchDog(_, _)).WillByDefault(Return(watch_dog_));\n}\n\nMockGuardDog::~MockGuardDog() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/guard_dog.h",
    "content": "#pragma once\n\n#include \"envoy/server/guarddog.h\"\n\n#include \"gmock/gmock.h\"\n#include \"watch_dog.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockGuardDog : public GuardDog {\npublic:\n  MockGuardDog();\n  ~MockGuardDog() override;\n\n  // Server::GuardDog\n  MOCK_METHOD(WatchDogSharedPtr, createWatchDog,\n              (Thread::ThreadId thread_id, const std::string& thread_name));\n  MOCK_METHOD(void, stopWatching, (WatchDogSharedPtr wd));\n\n  std::shared_ptr<MockWatchDog> watch_dog_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/health_checker_factory_context.cc",
    "content": "#include \"health_checker_factory_context.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nusing ::testing::ReturnRef;\n\nMockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() {\n  event_logger_ = new testing::NiceMock<Upstream::MockHealthCheckEventLogger>();\n  ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n  ON_CALL(*this, random()).WillByDefault(ReturnRef(random_));\n  ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_));\n  ON_CALL(*this, eventLogger_()).WillByDefault(Return(event_logger_));\n  ON_CALL(*this, messageValidationVisitor())\n      .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n}\n\nMockHealthCheckerFactoryContext::~MockHealthCheckerFactoryContext() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/health_checker_factory_context.h",
    "content": "#pragma once\n\n#include \"envoy/server/health_checker_config.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/health_check_event_logger.h\"\n#include \"test/mocks/upstream/health_checker.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryContext {\npublic:\n  MockHealthCheckerFactoryContext();\n  ~MockHealthCheckerFactoryContext() override;\n\n  MOCK_METHOD(Upstream::Cluster&, cluster, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ());\n  MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ());\n  MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ());\n  MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ());\n  MOCK_METHOD(Api::Api&, api, ());\n  Upstream::HealthCheckEventLoggerPtr eventLogger() override {\n    return Upstream::HealthCheckEventLoggerPtr(eventLogger_());\n  }\n\n  testing::NiceMock<Upstream::MockClusterMockPrioritySet> cluster_;\n  testing::NiceMock<Event::MockDispatcher> dispatcher_;\n  testing::NiceMock<Envoy::Random::MockRandomGenerator> random_;\n  testing::NiceMock<Envoy::Runtime::MockLoader> runtime_;\n  testing::NiceMock<Envoy::Upstream::MockHealthCheckEventLogger>* event_logger_{};\n  testing::NiceMock<Envoy::Api::MockApi> api_{};\n};\n} // namespace Configuration\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/hot_restart.cc",
    "content": "#include \"hot_restart.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::ReturnRef;\n\nMockHotRestart::MockHotRestart() : stats_allocator_(*symbol_table_) {\n  ON_CALL(*this, logLock()).WillByDefault(ReturnRef(log_lock_));\n  ON_CALL(*this, accessLogLock()).WillByDefault(ReturnRef(access_log_lock_));\n  ON_CALL(*this, statsAllocator()).WillByDefault(ReturnRef(stats_allocator_));\n}\n\nMockHotRestart::~MockHotRestart() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/hot_restart.h",
    "content": "#pragma once\n\n#include \"envoy/server/instance.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockHotRestart : public HotRestart {\npublic:\n  MockHotRestart();\n  ~MockHotRestart() override;\n\n  // Server::HotRestart\n  MOCK_METHOD(void, drainParentListeners, ());\n  MOCK_METHOD(int, duplicateParentListenSocket, (const std::string& address));\n  MOCK_METHOD(std::unique_ptr<envoy::HotRestartMessage>, getParentStats, ());\n  MOCK_METHOD(void, initialize, (Event::Dispatcher & dispatcher, Server::Instance& server));\n  MOCK_METHOD(void, sendParentAdminShutdownRequest, (time_t & original_start_time));\n  MOCK_METHOD(void, sendParentTerminateRequest, ());\n  MOCK_METHOD(ServerStatsFromParent, mergeParentStatsIfAny, (Stats::StoreRoot & stats_store));\n  MOCK_METHOD(void, shutdown, ());\n  MOCK_METHOD(uint32_t, baseId, ());\n  MOCK_METHOD(std::string, version, ());\n  MOCK_METHOD(Thread::BasicLockable&, logLock, ());\n  MOCK_METHOD(Thread::BasicLockable&, accessLogLock, ());\n  MOCK_METHOD(Stats::Allocator&, statsAllocator, ());\n\nprivate:\n  Stats::TestSymbolTable symbol_table_;\n  Thread::MutexBasicLockable log_lock_;\n  Thread::MutexBasicLockable access_log_lock_;\n  Stats::AllocatorImpl stats_allocator_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/instance.cc",
    "content": "#include \"instance.h\"\n\n#include \"common/singleton/manager_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::Return;\nusing ::testing::ReturnRef;\n\nMockInstance::MockInstance()\n    : secret_manager_(std::make_unique<Secret::SecretManagerImpl>(admin_.getConfigTracker())),\n      cluster_manager_(timeSource()), ssl_context_manager_(timeSource()),\n      singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())),\n      grpc_context_(stats_store_.symbolTable()), http_context_(stats_store_.symbolTable()),\n      server_factory_context_(\n          std::make_shared<NiceMock<Configuration::MockServerFactoryContext>>()),\n      transport_socket_factory_context_(\n          std::make_shared<NiceMock<Configuration::MockTransportSocketFactoryContext>>()) {\n  ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_));\n  ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_store_));\n  ON_CALL(*this, grpcContext()).WillByDefault(ReturnRef(grpc_context_));\n  ON_CALL(*this, httpContext()).WillByDefault(ReturnRef(http_context_));\n  ON_CALL(*this, dnsResolver()).WillByDefault(Return(dns_resolver_));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n  ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_));\n  ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n  ON_CALL(*this, sslContextManager()).WillByDefault(ReturnRef(ssl_context_manager_));\n  ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_));\n  ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n  ON_CALL(*this, hotRestart()).WillByDefault(ReturnRef(hot_restart_));\n  ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_));\n  ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_));\n  ON_CALL(*this, options()).WillByDefault(ReturnRef(options_));\n  ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_));\n  ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_));\n  ON_CALL(*this, listenerManager()).WillByDefault(ReturnRef(listener_manager_));\n  ON_CALL(*this, mutexTracer()).WillByDefault(Return(nullptr));\n  ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_));\n  ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_));\n  ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_));\n  ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(*server_factory_context_));\n  ON_CALL(*this, transportSocketFactoryContext())\n      .WillByDefault(ReturnRef(*transport_socket_factory_context_));\n}\n\nMockInstance::~MockInstance() = default;\n\nnamespace Configuration {\n\nMockServerFactoryContext::MockServerFactoryContext()\n    : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())),\n      grpc_context_(scope_.symbolTable()) {\n  ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n  ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_));\n  ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_));\n  ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_));\n  ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_));\n  ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_));\n  ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_));\n  ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n  ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_));\n  ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_));\n  ON_CALL(*this, messageValidationVisitor())\n      .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n  ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_));\n}\nMockServerFactoryContext::~MockServerFactoryContext() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/instance.h",
    "content": "#pragma once\n\n#include \"envoy/server/instance.h\"\n\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/tracing/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n\n#include \"admin.h\"\n#include \"drain_manager.h\"\n#include \"gmock/gmock.h\"\n#include \"hot_restart.h\"\n#include \"listener_manager.h\"\n#include \"options.h\"\n#include \"overload_manager.h\"\n#include \"server_lifecycle_notifier.h\"\n#include \"transport_socket_factory_context.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockServerFactoryContext;\n} // namespace Configuration\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); }\n\n  MOCK_METHOD(Admin&, admin, ());\n  MOCK_METHOD(Api::Api&, api, ());\n  MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ());\n  MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(Network::DnsResolverSharedPtr, dnsResolver, ());\n  MOCK_METHOD(void, drainListeners, ());\n  MOCK_METHOD(DrainManager&, drainManager, ());\n  MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ());\n  MOCK_METHOD(void, failHealthcheck, (bool fail));\n  MOCK_METHOD(void, exportStatsToChild, (envoy::HotRestartMessage::Reply::Stats*));\n  MOCK_METHOD(bool, healthCheckFailed, ());\n  MOCK_METHOD(HotRestart&, hotRestart, ());\n  MOCK_METHOD(Init::Manager&, initManager, ());\n  MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ());\n  MOCK_METHOD(ListenerManager&, listenerManager, ());\n  MOCK_METHOD(Envoy::MutexTracer*, mutexTracer, ());\n  MOCK_METHOD(const Options&, options, ());\n  MOCK_METHOD(OverloadManager&, overloadManager, ());\n  MOCK_METHOD(Runtime::Loader&, runtime, ());\n  MOCK_METHOD(void, shutdown, ());\n  MOCK_METHOD(bool, isShutdown, ());\n  MOCK_METHOD(void, shutdownAdmin, ());\n  MOCK_METHOD(Singleton::Manager&, singletonManager, ());\n  MOCK_METHOD(time_t, startTimeCurrentEpoch, ());\n  MOCK_METHOD(time_t, startTimeFirstEpoch, ());\n  MOCK_METHOD(Stats::Store&, stats, ());\n  MOCK_METHOD(Grpc::Context&, grpcContext, ());\n  MOCK_METHOD(Http::Context&, httpContext, ());\n  MOCK_METHOD(ProcessContextOptRef, processContext, ());\n  MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ());\n  MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const));\n  MOCK_METHOD(void, flushStats, ());\n  MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ());\n  MOCK_METHOD(Configuration::ServerFactoryContext&, serverFactoryContext, ());\n  MOCK_METHOD(Configuration::TransportSocketFactoryContext&, transportSocketFactoryContext, ());\n\n  void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override {\n    http_context_.setDefaultTracingConfig(tracing_config);\n  }\n\n  TimeSource& timeSource() override { return time_system_; }\n\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  testing::NiceMock<ThreadLocal::MockInstance> thread_local_;\n  std::shared_ptr<testing::NiceMock<Network::MockDnsResolver>> dns_resolver_{\n      new testing::NiceMock<Network::MockDnsResolver>()};\n  testing::NiceMock<Api::MockApi> api_;\n  testing::NiceMock<MockAdmin> admin_;\n  Event::GlobalTimeSystem time_system_;\n  std::unique_ptr<Secret::SecretManager> secret_manager_;\n  testing::NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  Thread::MutexBasicLockable access_log_lock_;\n  testing::NiceMock<Runtime::MockLoader> runtime_loader_;\n  Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_;\n  testing::NiceMock<Event::MockDispatcher> dispatcher_;\n  testing::NiceMock<MockDrainManager> drain_manager_;\n  testing::NiceMock<AccessLog::MockAccessLogManager> access_log_manager_;\n  testing::NiceMock<MockHotRestart> hot_restart_;\n  testing::NiceMock<MockOptions> options_;\n  testing::NiceMock<MockServerLifecycleNotifier> lifecycle_notifier_;\n  testing::NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  testing::NiceMock<Init::MockManager> init_manager_;\n  testing::NiceMock<MockListenerManager> listener_manager_;\n  testing::NiceMock<MockOverloadManager> overload_manager_;\n  Singleton::ManagerPtr singleton_manager_;\n  Grpc::ContextImpl grpc_context_;\n  Http::ContextImpl http_context_;\n  testing::NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  std::shared_ptr<testing::NiceMock<Configuration::MockServerFactoryContext>>\n      server_factory_context_;\n  std::shared_ptr<testing::NiceMock<Configuration::MockTransportSocketFactoryContext>>\n      transport_socket_factory_context_;\n};\n\nnamespace Configuration {\nclass MockServerFactoryContext : public virtual ServerFactoryContext {\npublic:\n  MockServerFactoryContext();\n  ~MockServerFactoryContext() override;\n\n  MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(const Network::DrainDecision&, drainDecision, ());\n  MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const));\n  MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ());\n  MOCK_METHOD(Stats::Scope&, scope, ());\n  MOCK_METHOD(Singleton::Manager&, singletonManager, ());\n  MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ());\n  MOCK_METHOD(Server::Admin&, admin, ());\n  MOCK_METHOD(TimeSource&, timeSource, ());\n  Event::TestTimeSystem& timeSystem() { return time_system_; }\n  MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ());\n  MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ());\n  MOCK_METHOD(Api::Api&, api, ());\n  Grpc::Context& grpcContext() override { return grpc_context_; }\n  MOCK_METHOD(Server::DrainManager&, drainManager, ());\n  MOCK_METHOD(Init::Manager&, initManager, ());\n  MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ());\n  MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const));\n\n  testing::NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  testing::NiceMock<Event::MockDispatcher> dispatcher_;\n  testing::NiceMock<MockDrainManager> drain_manager_;\n  testing::NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  testing::NiceMock<Envoy::Runtime::MockLoader> runtime_loader_;\n  testing::NiceMock<Stats::MockIsolatedStatsStore> scope_;\n  testing::NiceMock<ThreadLocal::MockInstance> thread_local_;\n  testing::NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  Singleton::ManagerPtr singleton_manager_;\n  testing::NiceMock<MockAdmin> admin_;\n  Event::GlobalTimeSystem time_system_;\n  testing::NiceMock<Api::MockApi> api_;\n  Grpc::ContextImpl grpc_context_;\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/listener_component_factory.cc",
    "content": "#include \"listener_component_factory.h\"\n\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::_;\nusing ::testing::Invoke;\n\nMockListenerComponentFactory::MockListenerComponentFactory()\n    : socket_(std::make_shared<testing::NiceMock<Network::MockListenSocket>>()) {\n  ON_CALL(*this, createListenSocket(_, _, _, _))\n      .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type,\n                                const Network::Socket::OptionsSharedPtr& options,\n                                const ListenSocketCreationParams&) -> Network::SocketSharedPtr {\n        if (!Network::Socket::applyOptions(options, *socket_,\n                                           envoy::config::core::v3::SocketOption::STATE_PREBIND)) {\n          throw EnvoyException(\"MockListenerComponentFactory: Setting socket options failed\");\n        }\n        return socket_;\n      }));\n}\n\nMockListenerComponentFactory::~MockListenerComponentFactory() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/listener_component_factory.h",
    "content": "#pragma once\n\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/server/drain_manager.h\"\n#include \"envoy/server/listener_manager.h\"\n\n#include \"test/mocks/network/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockListenerComponentFactory : public ListenerComponentFactory {\npublic:\n  MockListenerComponentFactory();\n  ~MockListenerComponentFactory() override;\n\n  DrainManagerPtr\n  createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override {\n    return DrainManagerPtr{createDrainManager_(drain_type)};\n  }\n  LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config,\n                         const udpa::core::v1::ResourceLocator* lds_resources_locator) override {\n    return LdsApiPtr{createLdsApi_(lds_config, lds_resources_locator)};\n  }\n\n  MOCK_METHOD(LdsApi*, createLdsApi_,\n              (const envoy::config::core::v3::ConfigSource&,\n               const udpa::core::v1::ResourceLocator*));\n  MOCK_METHOD(std::vector<Network::FilterFactoryCb>, createNetworkFilterFactoryList,\n              (const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n               Configuration::FilterChainFactoryContext& filter_chain_factory_context));\n  MOCK_METHOD(std::vector<Network::ListenerFilterFactoryCb>, createListenerFilterFactoryList,\n              (const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&,\n               Configuration::ListenerFactoryContext& context));\n  MOCK_METHOD(std::vector<Network::UdpListenerFilterFactoryCb>, createUdpListenerFilterFactoryList,\n              (const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&,\n               Configuration::ListenerFactoryContext& context));\n  MOCK_METHOD(Network::SocketSharedPtr, createListenSocket,\n              (Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type,\n               const Network::Socket::OptionsSharedPtr& options,\n               const ListenSocketCreationParams& params));\n  MOCK_METHOD(DrainManager*, createDrainManager_,\n              (envoy::config::listener::v3::Listener::DrainType drain_type));\n  MOCK_METHOD(uint64_t, nextListenerTag, ());\n\n  std::shared_ptr<Network::MockListenSocket> socket_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/listener_factory_context.cc",
    "content": "#include \"listener_factory_context.h\"\n\n#include <string>\n\n#include \"common/singleton/manager_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nusing ::testing::ReturnRef;\n\nMockListenerFactoryContext::MockListenerFactoryContext()\n    : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())),\n      grpc_context_(scope_.symbolTable()), http_context_(scope_.symbolTable()) {\n  ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_));\n  ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_));\n  ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n  ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_));\n  ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_));\n  ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_));\n  ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_));\n  ON_CALL(*this, random()).WillByDefault(ReturnRef(random_));\n  ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_));\n  ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_));\n  ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_));\n  ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_));\n  ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_));\n  ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(listener_scope_));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n  ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_));\n  ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_));\n  ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_));\n  ON_CALL(*this, messageValidationVisitor())\n      .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n}\n\nMockListenerFactoryContext::~MockListenerFactoryContext() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/listener_factory_context.h",
    "content": "#pragma once\n\n#include \"envoy/server/configuration.h\"\n#include \"envoy/server/listener_manager.h\"\n\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"admin.h\"\n#include \"drain_manager.h\"\n#include \"gmock/gmock.h\"\n#include \"instance.h\"\n#include \"overload_manager.h\"\n#include \"server_lifecycle_notifier.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockListenerFactoryContext : public ListenerFactoryContext {\npublic:\n  MockListenerFactoryContext();\n  ~MockListenerFactoryContext() override;\n\n  const Network::ListenerConfig& listenerConfig() const override { return listener_config_; }\n  MOCK_METHOD(const Network::ListenerConfig&, listenerConfig_, (), (const));\n  MOCK_METHOD(ServerFactoryContext&, getServerFactoryContext, (), (const));\n  MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const));\n  MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ());\n  MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(const Network::DrainDecision&, drainDecision, ());\n  MOCK_METHOD(bool, healthCheckFailed, ());\n  MOCK_METHOD(Init::Manager&, initManager, ());\n  MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ());\n  MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ());\n  MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ());\n  MOCK_METHOD(Stats::Scope&, scope, ());\n  MOCK_METHOD(Singleton::Manager&, singletonManager, ());\n  MOCK_METHOD(OverloadManager&, overloadManager, ());\n  MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ());\n  MOCK_METHOD(Server::Admin&, admin, ());\n  MOCK_METHOD(Stats::Scope&, listenerScope, ());\n  MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const));\n  MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const));\n  MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const));\n  MOCK_METHOD(TimeSource&, timeSource, ());\n  Event::TestTimeSystem& timeSystem() { return time_system_; }\n  Grpc::Context& grpcContext() override { return grpc_context_; }\n  Http::Context& httpContext() override { return http_context_; }\n  MOCK_METHOD(ProcessContextOptRef, processContext, ());\n  MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ());\n  MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ());\n  MOCK_METHOD(Api::Api&, api, ());\n\n  testing::NiceMock<MockServerFactoryContext> server_factory_context_;\n  testing::NiceMock<AccessLog::MockAccessLogManager> access_log_manager_;\n  testing::NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  testing::NiceMock<Event::MockDispatcher> dispatcher_;\n  testing::NiceMock<MockDrainManager> drain_manager_;\n  testing::NiceMock<Init::MockManager> init_manager_;\n  testing::NiceMock<MockServerLifecycleNotifier> lifecycle_notifier_;\n  testing::NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  testing::NiceMock<Envoy::Random::MockRandomGenerator> random_;\n  testing::NiceMock<Envoy::Runtime::MockLoader> runtime_loader_;\n  testing::NiceMock<Stats::MockIsolatedStatsStore> scope_;\n  testing::NiceMock<ThreadLocal::MockInstance> thread_local_;\n  Singleton::ManagerPtr singleton_manager_;\n  testing::NiceMock<MockAdmin> admin_;\n  Stats::IsolatedStoreImpl listener_scope_;\n  Event::GlobalTimeSystem time_system_;\n  testing::NiceMock<ProtobufMessage::MockValidationContext> validation_context_;\n  testing::NiceMock<MockOverloadManager> overload_manager_;\n  Grpc::ContextImpl grpc_context_;\n  Http::ContextImpl http_context_;\n  testing::NiceMock<Api::MockApi> api_;\n\n  Network::MockListenerConfig listener_config_;\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/listener_manager.cc",
    "content": "#include \"listener_manager.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nMockListenerManager::MockListenerManager() = default;\n\nMockListenerManager::~MockListenerManager() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/listener_manager.h",
    "content": "#pragma once\n\n#include \"envoy/server/listener_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockListenerManager : public ListenerManager {\npublic:\n  MockListenerManager();\n  ~MockListenerManager() override;\n\n  MOCK_METHOD(bool, addOrUpdateListener,\n              (const envoy::config::listener::v3::Listener& config, const std::string& version_info,\n               bool modifiable));\n  MOCK_METHOD(void, createLdsApi,\n              (const envoy::config::core::v3::ConfigSource& lds_config,\n               const udpa::core::v1::ResourceLocator*));\n  MOCK_METHOD(std::vector<std::reference_wrapper<Network::ListenerConfig>>, listeners,\n              (ListenerState state));\n  MOCK_METHOD(uint64_t, numConnections, (), (const));\n  MOCK_METHOD(bool, removeListener, (const std::string& listener_name));\n  MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog));\n  MOCK_METHOD(void, stopListeners, (StopListenersType listeners_type));\n  MOCK_METHOD(void, stopWorkers, ());\n  MOCK_METHOD(void, beginListenerUpdate, ());\n  MOCK_METHOD(void, endListenerUpdate, (ListenerManager::FailureStates &&));\n  MOCK_METHOD(ApiListenerOptRef, apiListener, ());\n  MOCK_METHOD(bool, isWorkerStarted, ());\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/main.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/server/configuration.h\"\n#include \"envoy/server/overload_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockMain : public Main {\npublic:\n  MockMain() = default;\n  ~MockMain() override = default;\n\n  MOCK_METHOD(Upstream::ClusterManager*, clusterManager, ());\n  MOCK_METHOD(std::list<Stats::SinkPtr>&, statsSinks, ());\n  MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const));\n  MOCK_METHOD(const Watchdog&, mainThreadWatchdogConfig, (), (const));\n  MOCK_METHOD(const Watchdog&, workerWatchdogConfig, (), (const));\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/mocks.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n#include \"admin.h\"\n#include \"admin_stream.h\"\n#include \"bootstrap_extension_factory.h\"\n#include \"config_tracker.h\"\n#include \"drain_manager.h\"\n#include \"factory_context.h\"\n#include \"filter_chain_factory_context.h\"\n#include \"guard_dog.h\"\n#include \"health_checker_factory_context.h\"\n#include \"hot_restart.h\"\n#include \"instance.h\"\n#include \"listener_component_factory.h\"\n#include \"listener_factory_context.h\"\n#include \"listener_manager.h\"\n#include \"main.h\"\n#include \"options.h\"\n#include \"overload_manager.h\"\n#include \"server_lifecycle_notifier.h\"\n#include \"tracer_factory.h\"\n#include \"tracer_factory_context.h\"\n#include \"transport_socket_factory_context.h\"\n#include \"watch_dog.h\"\n#include \"worker.h\"\n#include \"worker_factory.h\"\n"
  },
  {
    "path": "test/mocks/server/options.cc",
    "content": "#include \"options.h\"\n\n#include \"envoy/admin/v3/server_info.pb.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::Invoke;\nusing ::testing::Return;\nusing ::testing::ReturnPointee;\nusing ::testing::ReturnRef;\n\nMockOptions::MockOptions(const std::string& config_path) : config_path_(config_path) {\n  ON_CALL(*this, concurrency()).WillByDefault(ReturnPointee(&concurrency_));\n  ON_CALL(*this, configPath()).WillByDefault(ReturnRef(config_path_));\n  ON_CALL(*this, configProto()).WillByDefault(ReturnRef(config_proto_));\n  ON_CALL(*this, configYaml()).WillByDefault(ReturnRef(config_yaml_));\n  ON_CALL(*this, bootstrapVersion()).WillByDefault(ReturnRef(bootstrap_version_));\n  ON_CALL(*this, allowUnknownStaticFields()).WillByDefault(Invoke([this] {\n    return allow_unknown_static_fields_;\n  }));\n  ON_CALL(*this, rejectUnknownDynamicFields()).WillByDefault(Invoke([this] {\n    return reject_unknown_dynamic_fields_;\n  }));\n  ON_CALL(*this, ignoreUnknownDynamicFields()).WillByDefault(Invoke([this] {\n    return ignore_unknown_dynamic_fields_;\n  }));\n  ON_CALL(*this, adminAddressPath()).WillByDefault(ReturnRef(admin_address_path_));\n  ON_CALL(*this, serviceClusterName()).WillByDefault(ReturnRef(service_cluster_name_));\n  ON_CALL(*this, serviceNodeName()).WillByDefault(ReturnRef(service_node_name_));\n  ON_CALL(*this, serviceZone()).WillByDefault(ReturnRef(service_zone_name_));\n  ON_CALL(*this, logLevel()).WillByDefault(Return(log_level_));\n  ON_CALL(*this, logPath()).WillByDefault(ReturnRef(log_path_));\n  ON_CALL(*this, restartEpoch()).WillByDefault(ReturnPointee(&hot_restart_epoch_));\n  ON_CALL(*this, hotRestartDisabled()).WillByDefault(ReturnPointee(&hot_restart_disabled_));\n  ON_CALL(*this, signalHandlingEnabled()).WillByDefault(ReturnPointee(&signal_handling_enabled_));\n  ON_CALL(*this, mutexTracingEnabled()).WillByDefault(ReturnPointee(&mutex_tracing_enabled_));\n  ON_CALL(*this, cpusetThreadsEnabled()).WillByDefault(ReturnPointee(&cpuset_threads_enabled_));\n  ON_CALL(*this, disabledExtensions()).WillByDefault(ReturnRef(disabled_extensions_));\n  ON_CALL(*this, toCommandLineOptions()).WillByDefault(Invoke([] {\n    return std::make_unique<envoy::admin::v3::CommandLineOptions>();\n  }));\n  ON_CALL(*this, socketPath()).WillByDefault(ReturnRef(socket_path_));\n  ON_CALL(*this, socketMode()).WillByDefault(ReturnPointee(&socket_mode_));\n}\n\nMockOptions::~MockOptions() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/options.h",
    "content": "#pragma once\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/server/options.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockOptions : public Options {\npublic:\n  MockOptions() : MockOptions(std::string()) {}\n  MockOptions(const std::string& config_path);\n  ~MockOptions() override;\n\n  MOCK_METHOD(uint64_t, baseId, (), (const));\n  MOCK_METHOD(bool, useDynamicBaseId, (), (const));\n  MOCK_METHOD(const std::string&, baseIdPath, (), (const));\n  MOCK_METHOD(uint32_t, concurrency, (), (const));\n  MOCK_METHOD(const std::string&, configPath, (), (const));\n  MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const));\n  MOCK_METHOD(const std::string&, configYaml, (), (const));\n  MOCK_METHOD(const absl::optional<uint32_t>&, bootstrapVersion, (), (const));\n  MOCK_METHOD(bool, allowUnknownStaticFields, (), (const));\n  MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const));\n  MOCK_METHOD(bool, ignoreUnknownDynamicFields, (), (const));\n  MOCK_METHOD(const std::string&, adminAddressPath, (), (const));\n  MOCK_METHOD(Network::Address::IpVersion, localAddressIpVersion, (), (const));\n  MOCK_METHOD(std::chrono::seconds, drainTime, (), (const));\n  MOCK_METHOD(std::chrono::seconds, parentShutdownTime, (), (const));\n  MOCK_METHOD(Server::DrainStrategy, drainStrategy, (), (const));\n  MOCK_METHOD(spdlog::level::level_enum, logLevel, (), (const));\n  MOCK_METHOD((const std::vector<std::pair<std::string, spdlog::level::level_enum>>&),\n              componentLogLevels, (), (const));\n  MOCK_METHOD(const std::string&, logFormat, (), (const));\n  MOCK_METHOD(bool, logFormatEscaped, (), (const));\n  MOCK_METHOD(bool, enableFineGrainLogging, (), (const));\n  MOCK_METHOD(const std::string&, logPath, (), (const));\n  MOCK_METHOD(uint64_t, restartEpoch, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, fileFlushIntervalMsec, (), (const));\n  MOCK_METHOD(Mode, mode, (), (const));\n  MOCK_METHOD(const std::string&, serviceClusterName, (), (const));\n  MOCK_METHOD(const std::string&, serviceNodeName, (), (const));\n  MOCK_METHOD(const std::string&, serviceZone, (), (const));\n  MOCK_METHOD(bool, hotRestartDisabled, (), (const));\n  MOCK_METHOD(bool, signalHandlingEnabled, (), (const));\n  MOCK_METHOD(bool, mutexTracingEnabled, (), (const));\n  MOCK_METHOD(bool, fakeSymbolTableEnabled, (), (const));\n  MOCK_METHOD(bool, cpusetThreadsEnabled, (), (const));\n  MOCK_METHOD(const std::vector<std::string>&, disabledExtensions, (), (const));\n  MOCK_METHOD(Server::CommandLineOptionsPtr, toCommandLineOptions, (), (const));\n  MOCK_METHOD(const std::string&, socketPath, (), (const));\n  MOCK_METHOD(mode_t, socketMode, (), (const));\n\n  std::string config_path_;\n  envoy::config::bootstrap::v3::Bootstrap config_proto_;\n  std::string config_yaml_;\n  absl::optional<uint32_t> bootstrap_version_;\n  bool allow_unknown_static_fields_{};\n  bool reject_unknown_dynamic_fields_{};\n  bool ignore_unknown_dynamic_fields_{};\n  std::string admin_address_path_;\n  std::string service_cluster_name_;\n  std::string service_node_name_;\n  std::string service_zone_name_;\n  spdlog::level::level_enum log_level_{spdlog::level::trace};\n  std::string log_path_;\n  uint32_t concurrency_{1};\n  uint64_t hot_restart_epoch_{};\n  bool hot_restart_disabled_{};\n  bool signal_handling_enabled_{true};\n  bool mutex_tracing_enabled_{};\n  bool cpuset_threads_enabled_{};\n  std::vector<std::string> disabled_extensions_;\n  std::string socket_path_;\n  mode_t socket_mode_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/overload_manager.cc",
    "content": "#include \"overload_manager.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::ReturnRef;\n\nMockThreadLocalOverloadState::MockThreadLocalOverloadState()\n    : disabled_state_(OverloadActionState::inactive()) {\n  ON_CALL(*this, getState).WillByDefault(ReturnRef(disabled_state_));\n}\n\nMockOverloadManager::MockOverloadManager() {\n  ON_CALL(*this, getThreadLocalOverloadState()).WillByDefault(ReturnRef(overload_state_));\n}\n\nMockOverloadManager::~MockOverloadManager() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/overload_manager.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/server/overload_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass MockThreadLocalOverloadState : public ThreadLocalOverloadState {\npublic:\n  MockThreadLocalOverloadState();\n  MOCK_METHOD(const OverloadActionState&, getState, (const std::string&), (override));\n\nprivate:\n  const OverloadActionState disabled_state_;\n};\n\nclass MockOverloadManager : public OverloadManager {\npublic:\n  MockOverloadManager();\n  ~MockOverloadManager() override;\n\n  // OverloadManager\n  MOCK_METHOD(void, start, ());\n  MOCK_METHOD(bool, registerForAction,\n              (const std::string& action, Event::Dispatcher& dispatcher,\n               OverloadActionCb callback));\n  MOCK_METHOD(ThreadLocalOverloadState&, getThreadLocalOverloadState, ());\n\n  testing::NiceMock<MockThreadLocalOverloadState> overload_state_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/server_lifecycle_notifier.cc",
    "content": "#include \"server_lifecycle_notifier.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nMockServerLifecycleNotifier::MockServerLifecycleNotifier() = default;\n\nMockServerLifecycleNotifier::~MockServerLifecycleNotifier() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/server_lifecycle_notifier.h",
    "content": "#pragma once\n\n#include \"envoy/server/lifecycle_notifier.h\"\n\n#include \"gmock/gmock.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockServerLifecycleNotifier : public ServerLifecycleNotifier {\npublic:\n  MockServerLifecycleNotifier();\n  ~MockServerLifecycleNotifier() override;\n\n  MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, (Stage, StageCallback));\n  MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback,\n              (Stage, StageCallbackWithCompletion));\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/tracer_factory.cc",
    "content": "#include \"tracer_factory.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nusing ::testing::Invoke;\n\nMockTracerFactory::MockTracerFactory(const std::string& name) : name_(name) {\n  ON_CALL(*this, createEmptyConfigProto()).WillByDefault(Invoke([] {\n    return std::make_unique<ProtobufWkt::Struct>();\n  }));\n}\n\nMockTracerFactory::~MockTracerFactory() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/tracer_factory.h",
    "content": "#pragma once\n\n#include \"envoy/protobuf/message_validator.h\"\n#include \"envoy/server/tracer_config.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockTracerFactory : public TracerFactory {\npublic:\n  explicit MockTracerFactory(const std::string& name);\n  ~MockTracerFactory() override;\n\n  std::string name() const override { return name_; }\n\n  MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, ());\n  MOCK_METHOD(Tracing::HttpTracerSharedPtr, createHttpTracer,\n              (const Protobuf::Message& config, TracerFactoryContext& context));\n\nprivate:\n  std::string name_;\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/tracer_factory_context.cc",
    "content": "#include \"tracer_factory_context.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nusing ::testing::ReturnRef;\n\nMockTracerFactoryContext::MockTracerFactoryContext() {\n  ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(server_factory_context_));\n  ON_CALL(*this, messageValidationVisitor())\n      .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n}\n\nMockTracerFactoryContext::~MockTracerFactoryContext() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/tracer_factory_context.h",
    "content": "#pragma once\n\n#include \"envoy/server/configuration.h\"\n\n#include \"gmock/gmock.h\"\n#include \"instance.h\"\n#include \"tracer_factory.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockTracerFactoryContext : public TracerFactoryContext {\npublic:\n  MockTracerFactoryContext();\n  ~MockTracerFactoryContext() override;\n\n  MOCK_METHOD(ServerFactoryContext&, serverFactoryContext, ());\n  MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ());\n\n  testing::NiceMock<Configuration::MockServerFactoryContext> server_factory_context_;\n};\n} // namespace Configuration\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/transport_socket_factory_context.cc",
    "content": "#include \"transport_socket_factory_context.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nusing ::testing::ReturnRef;\n\nMockTransportSocketFactoryContext::MockTransportSocketFactoryContext()\n    : secret_manager_(std::make_unique<Secret::SecretManagerImpl>(config_tracker_)) {\n  ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_));\n  ON_CALL(*this, api()).WillByDefault(ReturnRef(api_));\n  ON_CALL(*this, messageValidationVisitor())\n      .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor()));\n}\n\nMockTransportSocketFactoryContext::~MockTransportSocketFactoryContext() = default;\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/transport_socket_factory_context.h",
    "content": "#pragma once\n\n#include \"envoy/server/transport_socket_config.h\"\n\n#include \"common/secret/secret_manager_impl.h\"\n\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n\n#include \"config_tracker.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockTransportSocketFactoryContext : public TransportSocketFactoryContext {\npublic:\n  MockTransportSocketFactoryContext();\n  ~MockTransportSocketFactoryContext() override;\n\n  Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); }\n\n  MOCK_METHOD(Server::Admin&, admin, ());\n  MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ());\n  MOCK_METHOD(Stats::Scope&, scope, ());\n  MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ());\n  MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const));\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n  MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ());\n  MOCK_METHOD(Stats::Store&, stats, ());\n  MOCK_METHOD(Init::Manager&, initManager, ());\n  MOCK_METHOD(Singleton::Manager&, singletonManager, ());\n  MOCK_METHOD(ThreadLocal::SlotAllocator&, threadLocal, ());\n  MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ());\n  MOCK_METHOD(Api::Api&, api, ());\n\n  testing::NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  testing::NiceMock<Api::MockApi> api_;\n  testing::NiceMock<MockConfigTracker> config_tracker_;\n  std::unique_ptr<Secret::SecretManager> secret_manager_;\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/watch_dog.cc",
    "content": "#include \"watch_dog.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nMockWatchDog::MockWatchDog() = default;\n\nMockWatchDog::~MockWatchDog() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/watch_dog.h",
    "content": "#pragma once\n\n#include \"envoy/server/watchdog.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockWatchDog : public WatchDog {\npublic:\n  MockWatchDog();\n  ~MockWatchDog() override;\n\n  // Server::WatchDog\n  MOCK_METHOD(void, startWatchdog, (Event::Dispatcher & dispatcher));\n  MOCK_METHOD(void, touch, ());\n  MOCK_METHOD(Thread::ThreadId, threadId, (), (const));\n  MOCK_METHOD(MonotonicTime, lastTouchTime, (), (const));\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/watchdog_config.cc",
    "content": "#include \"watchdog_config.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\n\nusing ::testing::Return;\n\nMockWatchdog::MockWatchdog(int miss, int megamiss, int kill, int multikill,\n                           double multikill_threshold, const std::vector<std::string> action_protos)\n    : miss_(miss), megamiss_(megamiss), kill_(kill), multikill_(multikill),\n      multikill_threshold_(multikill_threshold), actions_([&]() {\n        Protobuf::RepeatedPtrField<envoy::config::bootstrap::v3::Watchdog::WatchdogAction> actions;\n\n        for (const auto& action_proto_str : action_protos) {\n          envoy::config::bootstrap::v3::Watchdog::WatchdogAction action;\n          TestUtility::loadFromJson(action_proto_str, action);\n          actions.Add()->CopyFrom(action);\n        }\n\n        return actions;\n      }()) {\n  ON_CALL(*this, missTimeout()).WillByDefault(Return(miss_));\n  ON_CALL(*this, megaMissTimeout()).WillByDefault(Return(megamiss_));\n  ON_CALL(*this, killTimeout()).WillByDefault(Return(kill_));\n  ON_CALL(*this, multiKillTimeout()).WillByDefault(Return(multikill_));\n  ON_CALL(*this, multiKillThreshold()).WillByDefault(Return(multikill_threshold_));\n  ON_CALL(*this, actions).WillByDefault(Return(actions_));\n}\n\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/watchdog_config.h",
    "content": "#pragma once\n\n#include <chrono>\n\n#include \"envoy/server/configuration.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nclass MockWatchdog : public Watchdog {\npublic:\n  MockWatchdog() : MockWatchdog(0, 0, 0, 0, 0.0, {}) {}\n  MockWatchdog(int miss, int megamiss, int kill, int multikill, double multikill_threshold,\n               const std::vector<std::string> action_protos);\n  ~MockWatchdog() override = default;\n\n  MOCK_METHOD(std::chrono::milliseconds, missTimeout, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, megaMissTimeout, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, killTimeout, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, multiKillTimeout, (), (const));\n  MOCK_METHOD(double, multiKillThreshold, (), (const));\n  MOCK_METHOD(Protobuf::RepeatedPtrField<envoy::config::bootstrap::v3::Watchdog::WatchdogAction>,\n              actions, (), (const));\n\n  std::chrono::milliseconds miss_;\n  std::chrono::milliseconds megamiss_;\n  std::chrono::milliseconds kill_;\n  std::chrono::milliseconds multikill_;\n  double multikill_threshold_;\n  Protobuf::RepeatedPtrField<envoy::config::bootstrap::v3::Watchdog::WatchdogAction> actions_;\n};\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/worker.cc",
    "content": "#include \"worker.h\"\n\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nusing ::testing::_;\nusing ::testing::Invoke;\n\nMockWorker::MockWorker() {\n  ON_CALL(*this, addListener(_, _, _))\n      .WillByDefault(\n          Invoke([this](absl::optional<uint64_t> overridden_listener,\n                        Network::ListenerConfig& config, AddListenerCompletion completion) -> void {\n            UNREFERENCED_PARAMETER(overridden_listener);\n            config.listenSocketFactory().getListenSocket();\n            EXPECT_EQ(nullptr, add_listener_completion_);\n            add_listener_completion_ = completion;\n          }));\n\n  ON_CALL(*this, removeListener(_, _))\n      .WillByDefault(\n          Invoke([this](Network::ListenerConfig&, std::function<void()> completion) -> void {\n            EXPECT_EQ(nullptr, remove_listener_completion_);\n            remove_listener_completion_ = completion;\n          }));\n\n  ON_CALL(*this, stopListener(_, _))\n      .WillByDefault(Invoke([](Network::ListenerConfig&, std::function<void()> completion) -> void {\n        if (completion != nullptr) {\n          completion();\n        }\n      }));\n\n  ON_CALL(*this, removeFilterChains(_, _, _))\n      .WillByDefault(Invoke([this](uint64_t, const std::list<const Network::FilterChain*>&,\n                                   std::function<void()> completion) -> void {\n        EXPECT_EQ(nullptr, remove_filter_chains_completion_);\n        remove_filter_chains_completion_ = completion;\n      }));\n}\n\nMockWorker::~MockWorker() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/worker.h",
    "content": "#pragma once\n\n#include \"envoy/server/worker.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockWorker : public Worker {\npublic:\n  MockWorker();\n  ~MockWorker() override;\n\n  void callAddCompletion(bool success) {\n    EXPECT_NE(nullptr, add_listener_completion_);\n    add_listener_completion_(success);\n    add_listener_completion_ = nullptr;\n  }\n\n  void callRemovalCompletion() {\n    EXPECT_NE(nullptr, remove_listener_completion_);\n    remove_listener_completion_();\n    remove_listener_completion_ = nullptr;\n  }\n\n  void callDrainFilterChainsComplete() {\n    EXPECT_NE(nullptr, remove_filter_chains_completion_);\n    remove_filter_chains_completion_();\n    remove_filter_chains_completion_ = nullptr;\n  }\n\n  // Server::Worker\n  MOCK_METHOD(void, addListener,\n              (absl::optional<uint64_t> overridden_listener, Network::ListenerConfig& listener,\n               AddListenerCompletion completion));\n  MOCK_METHOD(uint64_t, numConnections, (), (const));\n  MOCK_METHOD(void, removeListener,\n              (Network::ListenerConfig & listener, std::function<void()> completion));\n  MOCK_METHOD(void, start, (GuardDog & guard_dog));\n  MOCK_METHOD(void, initializeStats, (Stats::Scope & scope));\n  MOCK_METHOD(void, stop, ());\n  MOCK_METHOD(void, stopListener,\n              (Network::ListenerConfig & listener, std::function<void()> completion));\n  MOCK_METHOD(void, removeFilterChains,\n              (uint64_t listener_tag, const std::list<const Network::FilterChain*>& filter_chains,\n               std::function<void()> completion));\n\n  AddListenerCompletion add_listener_completion_;\n  std::function<void()> remove_listener_completion_;\n  std::function<void()> remove_filter_chains_completion_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/worker_factory.cc",
    "content": "#include \"worker_factory.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nMockWorkerFactory::MockWorkerFactory() = default;\n\nMockWorkerFactory::~MockWorkerFactory() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/server/worker_factory.h",
    "content": "#pragma once\n\n#include \"envoy/server/worker.h\"\n\n#include \"gmock/gmock.h\"\n#include \"worker.h\"\n\nnamespace Envoy {\nnamespace Server {\nclass MockWorkerFactory : public WorkerFactory {\npublic:\n  MockWorkerFactory();\n  ~MockWorkerFactory() override;\n\n  // Server::WorkerFactory\n  WorkerPtr createWorker(uint32_t, OverloadManager&, const std::string&) override {\n    return WorkerPtr{createWorker_()};\n  }\n\n  MOCK_METHOD(Worker*, createWorker_, ());\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/ssl/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"ssl_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/ssl:certificate_validation_context_config_interface\",\n        \"//include/envoy/ssl:connection_interface\",\n        \"//include/envoy/ssl:context_config_interface\",\n        \"//include/envoy/ssl:context_interface\",\n        \"//include/envoy/ssl:context_manager_interface\",\n        \"//include/envoy/stats:stats_interface\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/ssl/mocks.cc",
    "content": "#include \"mocks.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nMockContextManager::MockContextManager() = default;\nMockContextManager::~MockContextManager() = default;\n\nMockConnectionInfo::MockConnectionInfo() = default;\nMockConnectionInfo::~MockConnectionInfo() = default;\n\nMockClientContext::MockClientContext() = default;\nMockClientContext::~MockClientContext() = default;\n\nMockClientContextConfig::MockClientContextConfig() = default;\nMockClientContextConfig::~MockClientContextConfig() = default;\n\nMockServerContextConfig::MockServerContextConfig() = default;\nMockServerContextConfig::~MockServerContextConfig() = default;\n\nMockPrivateKeyMethodManager::MockPrivateKeyMethodManager() = default;\nMockPrivateKeyMethodManager::~MockPrivateKeyMethodManager() = default;\n\nMockPrivateKeyMethodProvider::MockPrivateKeyMethodProvider() = default;\nMockPrivateKeyMethodProvider::~MockPrivateKeyMethodProvider() = default;\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/ssl/mocks.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n\n#include \"envoy/extensions/transport_sockets/tls/v3/cert.pb.h\"\n#include \"envoy/ssl/certificate_validation_context_config.h\"\n#include \"envoy/ssl/connection.h\"\n#include \"envoy/ssl/context.h\"\n#include \"envoy/ssl/context_config.h\"\n#include \"envoy/ssl/context_manager.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"test/mocks/secret/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Ssl {\n\nclass MockContextManager : public ContextManager {\npublic:\n  MockContextManager();\n  ~MockContextManager() override;\n\n  MOCK_METHOD(ClientContextSharedPtr, createSslClientContext,\n              (Stats::Scope & scope, const ClientContextConfig& config));\n  MOCK_METHOD(ServerContextSharedPtr, createSslServerContext,\n              (Stats::Scope & stats, const ServerContextConfig& config,\n               const std::vector<std::string>& server_names));\n  MOCK_METHOD(size_t, daysUntilFirstCertExpires, (), (const));\n  MOCK_METHOD(absl::optional<uint64_t>, secondsUntilFirstOcspResponseExpires, (), (const));\n  MOCK_METHOD(void, iterateContexts, (std::function<void(const Context&)> callback));\n  MOCK_METHOD(Ssl::PrivateKeyMethodManager&, privateKeyMethodManager, ());\n};\n\nclass MockConnectionInfo : public ConnectionInfo {\npublic:\n  MockConnectionInfo();\n  ~MockConnectionInfo() override;\n\n  MOCK_METHOD(bool, peerCertificatePresented, (), (const));\n  MOCK_METHOD(bool, peerCertificateValidated, (), (const));\n  MOCK_METHOD(absl::Span<const std::string>, uriSanLocalCertificate, (), (const));\n  MOCK_METHOD(const std::string&, sha256PeerCertificateDigest, (), (const));\n  MOCK_METHOD(const std::string&, sha1PeerCertificateDigest, (), (const));\n  MOCK_METHOD(const std::string&, serialNumberPeerCertificate, (), (const));\n  MOCK_METHOD(const std::string&, issuerPeerCertificate, (), (const));\n  MOCK_METHOD(const std::string&, subjectPeerCertificate, (), (const));\n  MOCK_METHOD(absl::Span<const std::string>, uriSanPeerCertificate, (), (const));\n  MOCK_METHOD(const std::string&, subjectLocalCertificate, (), (const));\n  MOCK_METHOD(const std::string&, urlEncodedPemEncodedPeerCertificate, (), (const));\n  MOCK_METHOD(const std::string&, urlEncodedPemEncodedPeerCertificateChain, (), (const));\n  MOCK_METHOD(absl::Span<const std::string>, dnsSansPeerCertificate, (), (const));\n  MOCK_METHOD(absl::Span<const std::string>, dnsSansLocalCertificate, (), (const));\n  MOCK_METHOD(absl::optional<SystemTime>, validFromPeerCertificate, (), (const));\n  MOCK_METHOD(absl::optional<SystemTime>, expirationPeerCertificate, (), (const));\n  MOCK_METHOD(const std::string&, sessionId, (), (const));\n  MOCK_METHOD(uint16_t, ciphersuiteId, (), (const));\n  MOCK_METHOD(std::string, ciphersuiteString, (), (const));\n  MOCK_METHOD(const std::string&, tlsVersion, (), (const));\n};\n\nclass MockClientContext : public ClientContext {\npublic:\n  MockClientContext();\n  ~MockClientContext() override;\n\n  MOCK_METHOD(size_t, daysUntilFirstCertExpires, (), (const));\n  MOCK_METHOD(absl::optional<uint64_t>, secondsUntilFirstOcspResponseExpires, (), (const));\n  MOCK_METHOD(CertificateDetailsPtr, getCaCertInformation, (), (const));\n  MOCK_METHOD(std::vector<CertificateDetailsPtr>, getCertChainInformation, (), (const));\n};\n\nclass MockClientContextConfig : public ClientContextConfig {\npublic:\n  MockClientContextConfig();\n  ~MockClientContextConfig() override;\n\n  MOCK_METHOD(const std::string&, alpnProtocols, (), (const));\n  MOCK_METHOD(const std::string&, cipherSuites, (), (const));\n  MOCK_METHOD(const std::string&, ecdhCurves, (), (const));\n  MOCK_METHOD(std::vector<std::reference_wrapper<const TlsCertificateConfig>>, tlsCertificates, (),\n              (const));\n  MOCK_METHOD(const CertificateValidationContextConfig*, certificateValidationContext, (), (const));\n  MOCK_METHOD(unsigned, minProtocolVersion, (), (const));\n  MOCK_METHOD(unsigned, maxProtocolVersion, (), (const));\n  MOCK_METHOD(bool, isReady, (), (const));\n  MOCK_METHOD(void, setSecretUpdateCallback, (std::function<void()> callback));\n\n  MOCK_METHOD(Ssl::HandshakerFactoryCb, createHandshaker, (), (const, override));\n  MOCK_METHOD(Ssl::HandshakerCapabilities, capabilities, (), (const, override));\n\n  MOCK_METHOD(const std::string&, serverNameIndication, (), (const));\n  MOCK_METHOD(bool, allowRenegotiation, (), (const));\n  MOCK_METHOD(size_t, maxSessionKeys, (), (const));\n  MOCK_METHOD(const std::string&, signingAlgorithmsForTest, (), (const));\n};\n\nclass MockServerContextConfig : public ServerContextConfig {\npublic:\n  MockServerContextConfig();\n  ~MockServerContextConfig() override;\n\n  MOCK_METHOD(const std::string&, alpnProtocols, (), (const));\n  MOCK_METHOD(const std::string&, cipherSuites, (), (const));\n  MOCK_METHOD(const std::string&, ecdhCurves, (), (const));\n  MOCK_METHOD(std::vector<std::reference_wrapper<const TlsCertificateConfig>>, tlsCertificates, (),\n              (const));\n  MOCK_METHOD(const CertificateValidationContextConfig*, certificateValidationContext, (), (const));\n  MOCK_METHOD(unsigned, minProtocolVersion, (), (const));\n  MOCK_METHOD(unsigned, maxProtocolVersion, (), (const));\n  MOCK_METHOD(bool, isReady, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::seconds>, sessionTimeout, (), (const));\n  MOCK_METHOD(void, setSecretUpdateCallback, (std::function<void()> callback));\n\n  MOCK_METHOD(Ssl::HandshakerFactoryCb, createHandshaker, (), (const, override));\n  MOCK_METHOD(Ssl::HandshakerCapabilities, capabilities, (), (const, override));\n\n  MOCK_METHOD(bool, requireClientCertificate, (), (const));\n  MOCK_METHOD(OcspStaplePolicy, ocspStaplePolicy, (), (const));\n  MOCK_METHOD(const std::vector<SessionTicketKey>&, sessionTicketKeys, (), (const));\n  MOCK_METHOD(bool, disableStatelessSessionResumption, (), (const));\n};\n\nclass MockTlsCertificateConfig : public TlsCertificateConfig {\npublic:\n  MockTlsCertificateConfig() = default;\n  ~MockTlsCertificateConfig() override = default;\n\n  MOCK_METHOD(const std::string&, certificateChain, (), (const));\n  MOCK_METHOD(const std::string&, certificateChainPath, (), (const));\n  MOCK_METHOD(const std::string&, privateKey, (), (const));\n  MOCK_METHOD(const std::string&, privateKeyPath, (), (const));\n  MOCK_METHOD(const std::vector<uint8_t>&, ocspStaple, (), (const));\n  MOCK_METHOD(const std::string&, ocspStaplePath, (), (const));\n  MOCK_METHOD(const std::string&, password, (), (const));\n  MOCK_METHOD(const std::string&, passwordPath, (), (const));\n  MOCK_METHOD(Envoy::Ssl::PrivateKeyMethodProviderSharedPtr, privateKeyMethod, (), (const));\n};\n\nclass MockCertificateValidationContextConfig : public CertificateValidationContextConfig {\npublic:\n  MOCK_METHOD(const std::string&, caCert, (), (const));\n  MOCK_METHOD(const std::string&, caCertPath, (), (const));\n  MOCK_METHOD(const std::string&, certificateRevocationList, (), (const));\n  MOCK_METHOD(const std::string&, certificateRevocationListPath, (), (const));\n  MOCK_METHOD(const std::vector<std::string>&, verifySubjectAltNameList, (), (const));\n  MOCK_METHOD(const std::vector<envoy::type::matcher::v3::StringMatcher>&, subjectAltNameMatchers,\n              (), (const));\n  MOCK_METHOD(const std::vector<std::string>&, verifyCertificateHashList, (), (const));\n  MOCK_METHOD(const std::vector<std::string>&, verifyCertificateSpkiList, (), (const));\n  MOCK_METHOD(bool, allowExpiredCertificate, (), (const));\n  MOCK_METHOD(envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext::\n                  TrustChainVerification,\n              trustChainVerification, (), (const));\n};\n\nclass MockPrivateKeyMethodManager : public PrivateKeyMethodManager {\npublic:\n  MockPrivateKeyMethodManager();\n  ~MockPrivateKeyMethodManager() override;\n\n  MOCK_METHOD(PrivateKeyMethodProviderSharedPtr, createPrivateKeyMethodProvider,\n              (const envoy::extensions::transport_sockets::tls::v3::PrivateKeyProvider& config,\n               Envoy::Server::Configuration::TransportSocketFactoryContext& factory_context));\n};\n\nclass MockPrivateKeyMethodProvider : public PrivateKeyMethodProvider {\npublic:\n  MockPrivateKeyMethodProvider();\n  ~MockPrivateKeyMethodProvider() override;\n\n  MOCK_METHOD(void, registerPrivateKeyMethod,\n              (SSL * ssl, PrivateKeyConnectionCallbacks& cb, Event::Dispatcher& dispatcher));\n  MOCK_METHOD(void, unregisterPrivateKeyMethod, (SSL * ssl));\n  MOCK_METHOD(bool, checkFips, ());\n\n#ifdef OPENSSL_IS_BORINGSSL\n  MOCK_METHOD(BoringSslPrivateKeyMethodSharedPtr, getBoringSslPrivateKeyMethod, ());\n#endif\n};\n\n} // namespace Ssl\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/stats/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"stats_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/stats:stats_interface\",\n        \"//include/envoy/stats:timespan_interface\",\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/stats:histogram_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stats:store_impl_lib\",\n        \"//source/common/stats:timespan_lib\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/test_common:global_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/stats/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include <memory>\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Stats {\n\nMockCounter::MockCounter() {\n  ON_CALL(*this, used()).WillByDefault(ReturnPointee(&used_));\n  ON_CALL(*this, value()).WillByDefault(ReturnPointee(&value_));\n  ON_CALL(*this, latch()).WillByDefault(ReturnPointee(&latch_));\n}\nMockCounter::~MockCounter() = default;\n\nMockGauge::MockGauge() : used_(false), value_(0), import_mode_(ImportMode::Accumulate) {\n  ON_CALL(*this, used()).WillByDefault(ReturnPointee(&used_));\n  ON_CALL(*this, value()).WillByDefault(ReturnPointee(&value_));\n  ON_CALL(*this, importMode()).WillByDefault(ReturnPointee(&import_mode_));\n}\nMockGauge::~MockGauge() = default;\n\nMockTextReadout::MockTextReadout() {\n  ON_CALL(*this, used()).WillByDefault(ReturnPointee(&used_));\n  ON_CALL(*this, value()).WillByDefault(ReturnPointee(&value_));\n}\nMockTextReadout::~MockTextReadout() = default;\n\nMockHistogram::MockHistogram() {\n  ON_CALL(*this, unit()).WillByDefault(ReturnPointee(&unit_));\n  ON_CALL(*this, recordValue(_)).WillByDefault(Invoke([this](uint64_t value) {\n    if (store_ != nullptr) {\n      store_->deliverHistogramToSinks(*this, value);\n    }\n  }));\n}\nMockHistogram::~MockHistogram() = default;\n\nMockParentHistogram::MockParentHistogram() {\n  ON_CALL(*this, used()).WillByDefault(ReturnPointee(&used_));\n  ON_CALL(*this, unit()).WillByDefault(ReturnPointee(&unit_));\n  ON_CALL(*this, recordValue(_)).WillByDefault(Invoke([this](uint64_t value) {\n    if (store_ != nullptr) {\n      store_->deliverHistogramToSinks(*this, value);\n    }\n  }));\n  ON_CALL(*this, intervalStatistics()).WillByDefault(ReturnRef(*histogram_stats_));\n  ON_CALL(*this, cumulativeStatistics()).WillByDefault(ReturnRef(*histogram_stats_));\n}\nMockParentHistogram::~MockParentHistogram() = default;\n\nMockMetricSnapshot::MockMetricSnapshot() {\n  ON_CALL(*this, counters()).WillByDefault(ReturnRef(counters_));\n  ON_CALL(*this, gauges()).WillByDefault(ReturnRef(gauges_));\n  ON_CALL(*this, histograms()).WillByDefault(ReturnRef(histograms_));\n}\n\nMockMetricSnapshot::~MockMetricSnapshot() = default;\n\nMockSink::MockSink() = default;\nMockSink::~MockSink() = default;\n\nMockStore::MockStore() : TestUtil::TestStore(*global_symbol_table_) {\n  ON_CALL(*this, counter(_)).WillByDefault(ReturnRef(counter_));\n  ON_CALL(*this, histogram(_, _))\n      .WillByDefault(Invoke([this](const std::string& name, Histogram::Unit unit) -> Histogram& {\n        auto* histogram = new NiceMock<MockHistogram>(); // symbol_table_);\n        histogram->name_ = name;\n        histogram->unit_ = unit;\n        histogram->store_ = this;\n        histograms_.emplace_back(histogram);\n        return *histogram;\n      }));\n\n  ON_CALL(*this, histogramFromString(_, _))\n      .WillByDefault(Invoke([this](const std::string& name, Histogram::Unit unit) -> Histogram& {\n        return TestUtil::TestStore::histogramFromString(name, unit);\n      }));\n}\nMockStore::~MockStore() = default;\n\nMockIsolatedStatsStore::MockIsolatedStatsStore() : TestUtil::TestStore(*global_symbol_table_) {}\nMockIsolatedStatsStore::~MockIsolatedStatsStore() = default;\n\nMockStatsMatcher::MockStatsMatcher() = default;\nMockStatsMatcher::~MockStatsMatcher() = default;\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/stats/mocks.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/stats/histogram.h\"\n#include \"envoy/stats/sink.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/stats_matcher.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/stats/timespan.h\"\n#include \"envoy/thread_local/thread_local.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/stats/histogram_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stats/store_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stats/timespan_impl.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/test_common/global.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Stats {\n\nclass TestSymbolTableHelper {\npublic:\n  SymbolTable& symbolTable() { return symbol_table_; }\n  const SymbolTable& constSymbolTable() const { return symbol_table_; }\n\nprivate:\n  SymbolTableImpl symbol_table_;\n};\n\nclass TestSymbolTable {\npublic:\n  SymbolTable& operator*() { return global_.get().symbolTable(); }\n  const SymbolTable& operator*() const { return global_.get().constSymbolTable(); }\n  SymbolTable* operator->() { return &global_.get().symbolTable(); }\n  const SymbolTable* operator->() const { return &global_.get().constSymbolTable(); }\n  Envoy::Test::Global<TestSymbolTableHelper> global_;\n};\n\ntemplate <class BaseClass> class MockMetric : public BaseClass {\npublic:\n  MockMetric() : name_(*this), tag_pool_(*symbol_table_) {}\n  ~MockMetric() override = default;\n\n  // This bit of C++ subterfuge allows us to support the wealth of tests that\n  // do metric->name_ = \"foo\" even though names are more complex now. Note\n  // that the statName is only populated if there is a symbol table.\n  class MetricName {\n  public:\n    explicit MetricName(MockMetric& mock_metric) : mock_metric_(mock_metric) {}\n    ~MetricName() {\n      if (stat_name_storage_ != nullptr) {\n        stat_name_storage_->free(mock_metric_.symbolTable());\n      }\n    }\n\n    void operator=(absl::string_view str) {\n      name_ = std::string(str);\n      stat_name_storage_ = std::make_unique<StatNameStorage>(str, mock_metric_.symbolTable());\n    }\n\n    std::string name() const { return name_; }\n    StatName statName() const { return stat_name_storage_->statName(); }\n\n  private:\n    MockMetric& mock_metric_;\n    std::string name_;\n    std::unique_ptr<StatNameStorage> stat_name_storage_;\n  };\n\n  SymbolTable& symbolTable() override { return *symbol_table_; }\n  const SymbolTable& constSymbolTable() const override { return *symbol_table_; }\n\n  // Note: cannot be mocked because it is accessed as a Property in a gmock EXPECT_CALL. This\n  // creates a deadlock in gmock and is an unintended use of mock functions.\n  std::string name() const override { return name_.name(); }\n  StatName statName() const override { return name_.statName(); }\n  TagVector tags() const override { return tags_; }\n  void setTagExtractedName(absl::string_view name) {\n    tag_extracted_name_ = std::string(name);\n    tag_extracted_stat_name_ =\n        std::make_unique<StatNameManagedStorage>(tagExtractedName(), *symbol_table_);\n  }\n  std::string tagExtractedName() const override {\n    return tag_extracted_name_.empty() ? name() : tag_extracted_name_;\n  }\n  StatName tagExtractedStatName() const override { return tag_extracted_stat_name_->statName(); }\n  void iterateTagStatNames(const Metric::TagStatNameIterFn& fn) const override {\n    ASSERT((tag_names_and_values_.size() % 2) == 0);\n    for (size_t i = 0; i < tag_names_and_values_.size(); i += 2) {\n      if (!fn(tag_names_and_values_[i], tag_names_and_values_[i + 1])) {\n        return;\n      }\n    }\n  }\n\n  TestSymbolTable symbol_table_; // Must outlive name_.\n  MetricName name_;\n\n  void setTags(const TagVector& tags) {\n    tag_pool_.clear();\n    tag_names_and_values_.clear();\n    tags_ = tags;\n    for (const Tag& tag : tags) {\n      tag_names_and_values_.push_back(tag_pool_.add(tag.name_));\n      tag_names_and_values_.push_back(tag_pool_.add(tag.value_));\n    }\n  }\n\n  void setTags(const Stats::StatNameTagVector& tags) {\n    tag_pool_.clear();\n    tag_names_and_values_.clear();\n    tags_.clear();\n    for (const StatNameTag& tag : tags) {\n      tag_names_and_values_.push_back(tag.first);\n      tag_names_and_values_.push_back(tag.second);\n      tags_.push_back(Tag{symbol_table_->toString(tag.first), symbol_table_->toString(tag.second)});\n    }\n  }\n\n  void addTag(const Tag& tag) {\n    tags_.emplace_back(tag);\n    tag_names_and_values_.push_back(tag_pool_.add(tag.name_));\n    tag_names_and_values_.push_back(tag_pool_.add(tag.value_));\n  }\n\nprivate:\n  TagVector tags_;\n  StatNameVec tag_names_and_values_;\n  std::string tag_extracted_name_;\n  StatNamePool tag_pool_;\n  std::unique_ptr<StatNameManagedStorage> tag_extracted_stat_name_;\n};\n\ntemplate <class BaseClass> class MockStatWithRefcount : public MockMetric<BaseClass> {\npublic:\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\n  RefcountHelper refcount_helper_;\n};\n\nclass MockCounter : public MockStatWithRefcount<Counter> {\npublic:\n  MockCounter();\n  ~MockCounter() override;\n\n  MOCK_METHOD(void, add, (uint64_t amount));\n  MOCK_METHOD(void, inc, ());\n  MOCK_METHOD(uint64_t, latch, ());\n  MOCK_METHOD(void, reset, ());\n  MOCK_METHOD(bool, used, (), (const));\n  MOCK_METHOD(uint64_t, value, (), (const));\n\n  bool used_;\n  uint64_t value_;\n  uint64_t latch_;\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\nprivate:\n  RefcountHelper refcount_helper_;\n};\n\nclass MockGauge : public MockStatWithRefcount<Gauge> {\npublic:\n  MockGauge();\n  ~MockGauge() override;\n\n  MOCK_METHOD(void, add, (uint64_t amount));\n  MOCK_METHOD(void, dec, ());\n  MOCK_METHOD(void, inc, ());\n  MOCK_METHOD(void, set, (uint64_t value));\n  MOCK_METHOD(void, setParentValue, (uint64_t parent_value));\n  MOCK_METHOD(void, sub, (uint64_t amount));\n  MOCK_METHOD(void, mergeImportMode, (ImportMode));\n  MOCK_METHOD(bool, used, (), (const));\n  MOCK_METHOD(uint64_t, value, (), (const));\n  MOCK_METHOD(absl::optional<bool>, cachedShouldImport, (), (const));\n  MOCK_METHOD(ImportMode, importMode, (), (const));\n\n  bool used_;\n  uint64_t value_;\n  ImportMode import_mode_;\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\nprivate:\n  RefcountHelper refcount_helper_;\n};\n\nclass MockHistogram : public MockMetric<Histogram> {\npublic:\n  MockHistogram();\n  ~MockHistogram() override;\n\n  MOCK_METHOD(bool, used, (), (const));\n  MOCK_METHOD(Histogram::Unit, unit, (), (const));\n  MOCK_METHOD(void, recordValue, (uint64_t value));\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\n  Unit unit_{Histogram::Unit::Unspecified};\n  Store* store_{};\n\nprivate:\n  RefcountHelper refcount_helper_;\n};\n\nclass MockParentHistogram : public MockMetric<ParentHistogram> {\npublic:\n  MockParentHistogram();\n  ~MockParentHistogram() override;\n\n  void merge() override {}\n  const std::string quantileSummary() const override { return \"\"; };\n  const std::string bucketSummary() const override { return \"\"; };\n\n  MOCK_METHOD(bool, used, (), (const));\n  MOCK_METHOD(Histogram::Unit, unit, (), (const));\n  MOCK_METHOD(void, recordValue, (uint64_t value));\n  MOCK_METHOD(const HistogramStatistics&, cumulativeStatistics, (), (const));\n  MOCK_METHOD(const HistogramStatistics&, intervalStatistics, (), (const));\n\n  // RefcountInterface\n  void incRefCount() override { refcount_helper_.incRefCount(); }\n  bool decRefCount() override { return refcount_helper_.decRefCount(); }\n  uint32_t use_count() const override { return refcount_helper_.use_count(); }\n\n  bool used_;\n  Unit unit_{Histogram::Unit::Unspecified};\n  Store* store_{};\n  std::shared_ptr<HistogramStatistics> histogram_stats_ =\n      std::make_shared<HistogramStatisticsImpl>();\n\nprivate:\n  RefcountHelper refcount_helper_;\n};\n\nclass MockTextReadout : public MockMetric<TextReadout> {\npublic:\n  MockTextReadout();\n  ~MockTextReadout() override;\n\n  MOCK_METHOD(void, set, (absl::string_view value), (override));\n  MOCK_METHOD(bool, used, (), (const, override));\n  MOCK_METHOD(std::string, value, (), (const, override));\n\n  bool used_;\n  std::string value_;\n};\n\nclass MockMetricSnapshot : public MetricSnapshot {\npublic:\n  MockMetricSnapshot();\n  ~MockMetricSnapshot() override;\n\n  MOCK_METHOD(const std::vector<CounterSnapshot>&, counters, ());\n  MOCK_METHOD(const std::vector<std::reference_wrapper<const Gauge>>&, gauges, ());\n  MOCK_METHOD(const std::vector<std::reference_wrapper<const ParentHistogram>>&, histograms, ());\n  MOCK_METHOD(const std::vector<std::reference_wrapper<const TextReadout>>&, textReadouts, ());\n\n  std::vector<CounterSnapshot> counters_;\n  std::vector<std::reference_wrapper<const Gauge>> gauges_;\n  std::vector<std::reference_wrapper<const ParentHistogram>> histograms_;\n  std::vector<std::reference_wrapper<const TextReadout>> text_readouts_;\n};\n\nclass MockSink : public Sink {\npublic:\n  MockSink();\n  ~MockSink() override;\n\n  MOCK_METHOD(void, flush, (MetricSnapshot & snapshot));\n  MOCK_METHOD(void, onHistogramComplete, (const Histogram& histogram, uint64_t value));\n};\n\nclass SymbolTableProvider {\npublic:\n  TestSymbolTable global_symbol_table_;\n};\n\nclass MockStore : public SymbolTableProvider, public TestUtil::TestStore {\npublic:\n  MockStore();\n  ~MockStore() override;\n\n  ScopePtr createScope(const std::string& name) override { return ScopePtr{createScope_(name)}; }\n\n  MOCK_METHOD(void, deliverHistogramToSinks, (const Histogram& histogram, uint64_t value));\n  MOCK_METHOD(Counter&, counter, (const std::string&));\n  MOCK_METHOD(std::vector<CounterSharedPtr>, counters, (), (const));\n  MOCK_METHOD(Scope*, createScope_, (const std::string& name));\n  MOCK_METHOD(Gauge&, gauge, (const std::string&, Gauge::ImportMode));\n  MOCK_METHOD(NullGaugeImpl&, nullGauge, (const std::string&));\n  MOCK_METHOD(std::vector<GaugeSharedPtr>, gauges, (), (const));\n  MOCK_METHOD(Histogram&, histogram, (const std::string&, Histogram::Unit));\n  MOCK_METHOD(std::vector<ParentHistogramSharedPtr>, histograms, (), (const));\n  MOCK_METHOD(Histogram&, histogramFromString, (const std::string& name, Histogram::Unit unit));\n  MOCK_METHOD(TextReadout&, textReadout, (const std::string&));\n  MOCK_METHOD(std::vector<TextReadoutSharedPtr>, text_readouts, (), (const));\n\n  MOCK_METHOD(CounterOptConstRef, findCounter, (StatName), (const));\n  MOCK_METHOD(GaugeOptConstRef, findGauge, (StatName), (const));\n  MOCK_METHOD(HistogramOptConstRef, findHistogram, (StatName), (const));\n  MOCK_METHOD(TextReadoutOptConstRef, findTextReadout, (StatName), (const));\n\n  Counter& counterFromStatNameWithTags(const StatName& name,\n                                       StatNameTagVectorOptConstRef) override {\n    // We always just respond with the mocked counter, so the tags don't matter.\n    return counter(symbol_table_->toString(name));\n  }\n  Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef,\n                                   Gauge::ImportMode import_mode) override {\n    // We always just respond with the mocked gauge, so the tags don't matter.\n    return gauge(symbol_table_->toString(name), import_mode);\n  }\n  Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef,\n                                           Histogram::Unit unit) override {\n    return histogram(symbol_table_->toString(name), unit);\n  }\n  TextReadout& textReadoutFromStatNameWithTags(const StatName& name,\n                                               StatNameTagVectorOptConstRef) override {\n    // We always just respond with the mocked counter, so the tags don't matter.\n    return textReadout(symbol_table_->toString(name));\n  }\n\n  TestSymbolTable symbol_table_;\n  testing::NiceMock<MockCounter> counter_;\n  std::vector<std::unique_ptr<MockHistogram>> histograms_;\n};\n\n/**\n * With IsolatedStoreImpl it's hard to test timing stats.\n * MockIsolatedStatsStore mocks only deliverHistogramToSinks for better testing.\n */\nclass MockIsolatedStatsStore : public SymbolTableProvider, public TestUtil::TestStore {\npublic:\n  MockIsolatedStatsStore();\n  ~MockIsolatedStatsStore() override;\n\n  MOCK_METHOD(void, deliverHistogramToSinks, (const Histogram& histogram, uint64_t value));\n};\n\nclass MockStatsMatcher : public StatsMatcher {\npublic:\n  MockStatsMatcher();\n  ~MockStatsMatcher() override;\n  MOCK_METHOD(bool, rejects, (const std::string& name), (const));\n  bool acceptsAll() const override { return accepts_all_; }\n  bool rejectsAll() const override { return rejects_all_; }\n\n  bool accepts_all_{false};\n  bool rejects_all_{false};\n};\n\n} // namespace Stats\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/stream_info/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"stream_info_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/http:request_id_extension_interface\",\n        \"//include/envoy/stream_info:stream_info_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//test/mocks/upstream:host_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/stream_info/mocks.cc",
    "content": "#include \"test/mocks/stream_info/mocks.h\"\n\n#include \"common/network/address_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Const;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nMockStreamInfo::MockStreamInfo()\n    : start_time_(ts_.systemTime()),\n      filter_state_(std::make_shared<FilterStateImpl>(FilterState::LifeSpan::FilterChain)),\n      downstream_local_address_(new Network::Address::Ipv4Instance(\"127.0.0.2\")),\n      downstream_direct_remote_address_(new Network::Address::Ipv4Instance(\"127.0.0.1\")),\n      downstream_remote_address_(new Network::Address::Ipv4Instance(\"127.0.0.1\")) {\n  ON_CALL(*this, setResponseFlag(_)).WillByDefault(Invoke([this](ResponseFlag response_flag) {\n    response_flags_ |= response_flag;\n  }));\n  ON_CALL(*this, setResponseCodeDetails(_)).WillByDefault(Invoke([this](absl::string_view details) {\n    response_code_details_ = std::string(details);\n  }));\n  ON_CALL(*this, setConnectionTerminationDetails(_))\n      .WillByDefault(\n          Invoke([this](absl::string_view details) { connection_termination_details_ = details; }));\n  ON_CALL(*this, startTime()).WillByDefault(ReturnPointee(&start_time_));\n  ON_CALL(*this, startTimeMonotonic()).WillByDefault(ReturnPointee(&start_time_monotonic_));\n  ON_CALL(*this, lastDownstreamRxByteReceived())\n      .WillByDefault(ReturnPointee(&last_downstream_rx_byte_received_));\n  ON_CALL(*this, firstUpstreamTxByteSent())\n      .WillByDefault(ReturnPointee(&first_upstream_tx_byte_sent_));\n  ON_CALL(*this, lastUpstreamTxByteSent())\n      .WillByDefault(ReturnPointee(&last_upstream_tx_byte_sent_));\n  ON_CALL(*this, firstUpstreamRxByteReceived())\n      .WillByDefault(ReturnPointee(&first_upstream_rx_byte_received_));\n  ON_CALL(*this, lastUpstreamRxByteReceived())\n      .WillByDefault(ReturnPointee(&last_upstream_rx_byte_received_));\n  ON_CALL(*this, firstDownstreamTxByteSent())\n      .WillByDefault(ReturnPointee(&first_downstream_tx_byte_sent_));\n  ON_CALL(*this, lastDownstreamTxByteSent())\n      .WillByDefault(ReturnPointee(&last_downstream_tx_byte_sent_));\n  ON_CALL(*this, requestComplete()).WillByDefault(ReturnPointee(&end_time_));\n  ON_CALL(*this, onRequestComplete()).WillByDefault(Invoke([this]() {\n    end_time_ = absl::make_optional<std::chrono::nanoseconds>(\n        std::chrono::duration_cast<std::chrono::nanoseconds>(ts_.systemTime() - start_time_)\n            .count());\n  }));\n  ON_CALL(*this, setUpstreamLocalAddress(_))\n      .WillByDefault(\n          Invoke([this](const Network::Address::InstanceConstSharedPtr& upstream_local_address) {\n            upstream_local_address_ = upstream_local_address;\n          }));\n  ON_CALL(*this, upstreamLocalAddress()).WillByDefault(ReturnRef(upstream_local_address_));\n  ON_CALL(*this, setDownstreamLocalAddress(_))\n      .WillByDefault(\n          Invoke([this](const Network::Address::InstanceConstSharedPtr& downstream_local_address) {\n            downstream_local_address_ = downstream_local_address;\n          }));\n  ON_CALL(*this, downstreamLocalAddress()).WillByDefault(ReturnRef(downstream_local_address_));\n  ON_CALL(*this, setDownstreamDirectRemoteAddress(_))\n      .WillByDefault(Invoke(\n          [this](const Network::Address::InstanceConstSharedPtr& downstream_direct_remote_address) {\n            downstream_direct_remote_address_ = downstream_direct_remote_address;\n          }));\n  ON_CALL(*this, downstreamDirectRemoteAddress())\n      .WillByDefault(ReturnRef(downstream_direct_remote_address_));\n  ON_CALL(*this, setDownstreamRemoteAddress(_))\n      .WillByDefault(\n          Invoke([this](const Network::Address::InstanceConstSharedPtr& downstream_remote_address) {\n            downstream_remote_address_ = downstream_remote_address;\n          }));\n  ON_CALL(*this, downstreamRemoteAddress()).WillByDefault(ReturnRef(downstream_remote_address_));\n  ON_CALL(*this, setDownstreamSslConnection(_))\n      .WillByDefault(Invoke(\n          [this](const auto& connection_info) { downstream_connection_info_ = connection_info; }));\n  ON_CALL(*this, setUpstreamSslConnection(_))\n      .WillByDefault(Invoke(\n          [this](const auto& connection_info) { upstream_connection_info_ = connection_info; }));\n  ON_CALL(*this, downstreamSslConnection()).WillByDefault(Invoke([this]() {\n    return downstream_connection_info_;\n  }));\n  ON_CALL(*this, upstreamSslConnection()).WillByDefault(Invoke([this]() {\n    return upstream_connection_info_;\n  }));\n  ON_CALL(*this, protocol()).WillByDefault(ReturnPointee(&protocol_));\n  ON_CALL(*this, responseCode()).WillByDefault(ReturnPointee(&response_code_));\n  ON_CALL(*this, responseCodeDetails()).WillByDefault(ReturnPointee(&response_code_details_));\n  ON_CALL(*this, connectionTerminationDetails())\n      .WillByDefault(ReturnPointee(&connection_termination_details_));\n  ON_CALL(*this, addBytesReceived(_)).WillByDefault(Invoke([this](uint64_t bytes_received) {\n    bytes_received_ += bytes_received;\n  }));\n  ON_CALL(*this, bytesReceived()).WillByDefault(ReturnPointee(&bytes_received_));\n  ON_CALL(*this, addBytesSent(_)).WillByDefault(Invoke([this](uint64_t bytes_sent) {\n    bytes_sent_ += bytes_sent;\n  }));\n  ON_CALL(*this, bytesSent()).WillByDefault(ReturnPointee(&bytes_sent_));\n  ON_CALL(*this, hasResponseFlag(_)).WillByDefault(Invoke([this](ResponseFlag flag) {\n    return response_flags_ & flag;\n  }));\n  ON_CALL(*this, intersectResponseFlags(_)).WillByDefault(Invoke([this](uint64_t response_flags) {\n    return (response_flags_ & response_flags) != 0;\n  }));\n  ON_CALL(*this, hasAnyResponseFlag()).WillByDefault(Invoke([this]() {\n    return response_flags_ != 0;\n  }));\n  ON_CALL(*this, responseFlags()).WillByDefault(Return(response_flags_));\n  ON_CALL(*this, upstreamHost()).WillByDefault(ReturnPointee(&host_));\n\n  ON_CALL(*this, dynamicMetadata()).WillByDefault(ReturnRef(metadata_));\n  ON_CALL(Const(*this), dynamicMetadata()).WillByDefault(ReturnRef(metadata_));\n  ON_CALL(*this, filterState()).WillByDefault(ReturnRef(filter_state_));\n  ON_CALL(Const(*this), filterState()).WillByDefault(ReturnRef(*filter_state_));\n  ON_CALL(*this, upstreamFilterState()).WillByDefault(ReturnRef(upstream_filter_state_));\n  ON_CALL(*this, setUpstreamFilterState(_))\n      .WillByDefault(Invoke([this](const FilterStateSharedPtr& filter_state) {\n        upstream_filter_state_ = filter_state;\n      }));\n  ON_CALL(*this, setRequestedServerName(_))\n      .WillByDefault(Invoke([this](const absl::string_view requested_server_name) {\n        requested_server_name_ = std::string(requested_server_name);\n      }));\n  ON_CALL(*this, requestedServerName()).WillByDefault(ReturnRef(requested_server_name_));\n  ON_CALL(*this, setRouteName(_)).WillByDefault(Invoke([this](const absl::string_view route_name) {\n    route_name_ = std::string(route_name);\n  }));\n  ON_CALL(*this, getRouteName()).WillByDefault(ReturnRef(route_name_));\n  ON_CALL(*this, upstreamTransportFailureReason())\n      .WillByDefault(ReturnRef(upstream_transport_failure_reason_));\n  ON_CALL(*this, connectionID()).WillByDefault(Return(connection_id_));\n  ON_CALL(*this, setConnectionID(_)).WillByDefault(Invoke([this](uint64_t id) {\n    connection_id_ = id;\n  }));\n}\n\nMockStreamInfo::~MockStreamInfo() = default;\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/stream_info/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/http/request_id_extension.h\"\n#include \"envoy/stream_info/stream_info.h\"\n\n#include \"common/stream_info/filter_state_impl.h\"\n\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace StreamInfo {\n\nclass MockStreamInfo : public StreamInfo {\npublic:\n  MockStreamInfo();\n  ~MockStreamInfo() override;\n\n  // StreamInfo::StreamInfo\n  MOCK_METHOD(void, setResponseFlag, (ResponseFlag response_flag));\n  MOCK_METHOD(void, setResponseCodeDetails, (absl::string_view));\n  MOCK_METHOD(void, setConnectionTerminationDetails, (absl::string_view));\n  MOCK_METHOD(bool, intersectResponseFlags, (uint64_t), (const));\n  MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host));\n  MOCK_METHOD(SystemTime, startTime, (), (const));\n  MOCK_METHOD(MonotonicTime, startTimeMonotonic, (), (const));\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, lastDownstreamRxByteReceived, (), (const));\n  MOCK_METHOD(void, onLastDownstreamRxByteReceived, ());\n  MOCK_METHOD(void, setUpstreamTiming, (const UpstreamTiming&));\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, firstUpstreamTxByteSent, (), (const));\n  MOCK_METHOD(void, onFirstUpstreamTxByteSent, ());\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, lastUpstreamTxByteSent, (), (const));\n  MOCK_METHOD(void, onLastUpstreamTxByteSent, ());\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, firstUpstreamRxByteReceived, (), (const));\n  MOCK_METHOD(void, onFirstUpstreamRxByteReceived, ());\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, lastUpstreamRxByteReceived, (), (const));\n  MOCK_METHOD(void, onLastUpstreamRxByteReceived, ());\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, firstDownstreamTxByteSent, (), (const));\n  MOCK_METHOD(void, onFirstDownstreamTxByteSent, ());\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, lastDownstreamTxByteSent, (), (const));\n  MOCK_METHOD(void, onLastDownstreamTxByteSent, ());\n  MOCK_METHOD(void, onRequestComplete, ());\n  MOCK_METHOD(absl::optional<std::chrono::nanoseconds>, requestComplete, (), (const));\n  MOCK_METHOD(void, addBytesReceived, (uint64_t));\n  MOCK_METHOD(uint64_t, bytesReceived, (), (const));\n  MOCK_METHOD(void, setRouteName, (absl::string_view route_name));\n  MOCK_METHOD(const std::string&, getRouteName, (), (const));\n  MOCK_METHOD(absl::optional<Http::Protocol>, protocol, (), (const));\n  MOCK_METHOD(void, protocol, (Http::Protocol protocol));\n  MOCK_METHOD(absl::optional<uint32_t>, responseCode, (), (const));\n  MOCK_METHOD(const absl::optional<std::string>&, responseCodeDetails, (), (const));\n  MOCK_METHOD(const absl::optional<std::string>&, connectionTerminationDetails, (), (const));\n  MOCK_METHOD(void, addBytesSent, (uint64_t));\n  MOCK_METHOD(uint64_t, bytesSent, (), (const));\n  MOCK_METHOD(bool, hasResponseFlag, (ResponseFlag), (const));\n  MOCK_METHOD(bool, hasAnyResponseFlag, (), (const));\n  MOCK_METHOD(uint64_t, responseFlags, (), (const));\n  MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, upstreamHost, (), (const));\n  MOCK_METHOD(void, setUpstreamLocalAddress, (const Network::Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, upstreamLocalAddress, (), (const));\n  MOCK_METHOD(bool, healthCheck, (), (const));\n  MOCK_METHOD(void, healthCheck, (bool is_health_check));\n  MOCK_METHOD(void, setDownstreamLocalAddress, (const Network::Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, downstreamLocalAddress, (), (const));\n  MOCK_METHOD(void, setDownstreamDirectRemoteAddress,\n              (const Network::Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, downstreamDirectRemoteAddress, (),\n              (const));\n  MOCK_METHOD(void, setDownstreamRemoteAddress, (const Network::Address::InstanceConstSharedPtr&));\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, downstreamRemoteAddress, (),\n              (const));\n  MOCK_METHOD(void, setDownstreamSslConnection, (const Ssl::ConnectionInfoConstSharedPtr&));\n  MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, downstreamSslConnection, (), (const));\n  MOCK_METHOD(void, setUpstreamSslConnection, (const Ssl::ConnectionInfoConstSharedPtr&));\n  MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, upstreamSslConnection, (), (const));\n  MOCK_METHOD(const Router::RouteEntry*, routeEntry, (), (const));\n  MOCK_METHOD(envoy::config::core::v3::Metadata&, dynamicMetadata, ());\n  MOCK_METHOD(const envoy::config::core::v3::Metadata&, dynamicMetadata, (), (const));\n  MOCK_METHOD(void, setDynamicMetadata, (const std::string&, const ProtobufWkt::Struct&));\n  MOCK_METHOD(void, setDynamicMetadata,\n              (const std::string&, const std::string&, const std::string&));\n  MOCK_METHOD(const FilterStateSharedPtr&, filterState, ());\n  MOCK_METHOD(const FilterState&, filterState, (), (const));\n  MOCK_METHOD(const FilterStateSharedPtr&, upstreamFilterState, (), (const));\n  MOCK_METHOD(void, setUpstreamFilterState, (const FilterStateSharedPtr&));\n  MOCK_METHOD(void, setRequestedServerName, (const absl::string_view));\n  MOCK_METHOD(const std::string&, requestedServerName, (), (const));\n  MOCK_METHOD(void, setUpstreamTransportFailureReason, (absl::string_view));\n  MOCK_METHOD(const std::string&, upstreamTransportFailureReason, (), (const));\n  MOCK_METHOD(void, setRequestHeaders, (const Http::RequestHeaderMap&));\n  MOCK_METHOD(const Http::RequestHeaderMap*, getRequestHeaders, (), (const));\n  MOCK_METHOD(void, setUpstreamClusterInfo, (const Upstream::ClusterInfoConstSharedPtr&));\n  MOCK_METHOD(absl::optional<Upstream::ClusterInfoConstSharedPtr>, upstreamClusterInfo, (),\n              (const));\n  MOCK_METHOD(Http::RequestIDExtensionSharedPtr, getRequestIDExtension, (), (const));\n  MOCK_METHOD(void, setRequestIDExtension, (Http::RequestIDExtensionSharedPtr));\n  MOCK_METHOD(absl::optional<uint64_t>, connectionID, (), (const));\n  MOCK_METHOD(void, setConnectionID, (uint64_t));\n\n  std::shared_ptr<testing::NiceMock<Upstream::MockHostDescription>> host_{\n      new testing::NiceMock<Upstream::MockHostDescription>()};\n  Envoy::Event::SimulatedTimeSystem ts_;\n  SystemTime start_time_;\n  MonotonicTime start_time_monotonic_;\n  absl::optional<std::chrono::nanoseconds> last_downstream_rx_byte_received_;\n  absl::optional<std::chrono::nanoseconds> first_upstream_tx_byte_sent_;\n  absl::optional<std::chrono::nanoseconds> last_upstream_tx_byte_sent_;\n  absl::optional<std::chrono::nanoseconds> first_upstream_rx_byte_received_;\n  absl::optional<uint64_t> connection_id_;\n  absl::optional<std::chrono::nanoseconds> last_upstream_rx_byte_received_;\n  absl::optional<std::chrono::nanoseconds> first_downstream_tx_byte_sent_;\n  absl::optional<std::chrono::nanoseconds> last_downstream_tx_byte_sent_;\n  absl::optional<std::chrono::nanoseconds> end_time_;\n  absl::optional<Http::Protocol> protocol_;\n  absl::optional<uint32_t> response_code_;\n  absl::optional<std::string> response_code_details_;\n  absl::optional<std::string> connection_termination_details_;\n  uint64_t response_flags_{};\n  envoy::config::core::v3::Metadata metadata_;\n  FilterStateSharedPtr upstream_filter_state_;\n  FilterStateSharedPtr filter_state_;\n  uint64_t bytes_received_{};\n  uint64_t bytes_sent_{};\n  Network::Address::InstanceConstSharedPtr upstream_local_address_;\n  Network::Address::InstanceConstSharedPtr downstream_local_address_;\n  Network::Address::InstanceConstSharedPtr downstream_direct_remote_address_;\n  Network::Address::InstanceConstSharedPtr downstream_remote_address_;\n  Ssl::ConnectionInfoConstSharedPtr downstream_connection_info_;\n  Ssl::ConnectionInfoConstSharedPtr upstream_connection_info_;\n  std::string requested_server_name_;\n  std::string route_name_;\n  std::string upstream_transport_failure_reason_;\n};\n\n} // namespace StreamInfo\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/tcp/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"tcp_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/tcp:conn_pool_interface\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/upstream:host_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/tcp/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::ReturnRef;\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Tcp {\nnamespace ConnectionPool {\n\nMockUpstreamCallbacks::MockUpstreamCallbacks() = default;\nMockUpstreamCallbacks::~MockUpstreamCallbacks() = default;\n\nMockConnectionData::MockConnectionData() = default;\nMockConnectionData::~MockConnectionData() {\n  if (release_callback_) {\n    release_callback_();\n  }\n}\n\nMockInstance::MockInstance() {\n  ON_CALL(*this, newConnection(_)).WillByDefault(Invoke([&](Callbacks& cb) -> Cancellable* {\n    return newConnectionImpl(cb);\n  }));\n  ON_CALL(*this, host()).WillByDefault(Return(host_));\n}\nMockInstance::~MockInstance() = default;\n\nEnvoy::ConnectionPool::MockCancellable* MockInstance::newConnectionImpl(Callbacks& cb) {\n  handles_.emplace_back();\n  callbacks_.push_back(&cb);\n  return &handles_.back();\n}\n\nvoid MockInstance::poolFailure(PoolFailureReason reason, bool host_null) {\n  Callbacks* cb = callbacks_.front();\n  callbacks_.pop_front();\n  handles_.pop_front();\n  if (host_null) {\n    cb->onPoolFailure(reason, nullptr);\n  } else {\n    cb->onPoolFailure(reason, host_);\n  }\n}\n\nvoid MockInstance::poolReady(Network::MockClientConnection& conn) {\n  Callbacks* cb = callbacks_.front();\n  callbacks_.pop_front();\n  handles_.pop_front();\n\n  ON_CALL(*connection_data_, connection()).WillByDefault(ReturnRef(conn));\n\n  connection_data_->release_callback_ = [&]() -> void { released(conn); };\n\n  cb->onPoolReady(std::move(connection_data_), host_);\n}\n\n} // namespace ConnectionPool\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/tcp/mocks.h",
    "content": "#pragma once\n\n#include \"envoy/tcp/conn_pool.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/upstream/host.h\"\n#include \"test/test_common/printers.h\"\n\n#include \"gmock/gmock.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Tcp {\nnamespace ConnectionPool {\n\nclass MockUpstreamCallbacks : public UpstreamCallbacks {\npublic:\n  MockUpstreamCallbacks();\n  ~MockUpstreamCallbacks() override;\n\n  // Tcp::ConnectionPool::UpstreamCallbacks\n  MOCK_METHOD(void, onUpstreamData, (Buffer::Instance & data, bool end_stream));\n  MOCK_METHOD(void, onEvent, (Network::ConnectionEvent event));\n  MOCK_METHOD(void, onAboveWriteBufferHighWatermark, ());\n  MOCK_METHOD(void, onBelowWriteBufferLowWatermark, ());\n};\n\nclass MockConnectionData : public ConnectionData {\npublic:\n  MockConnectionData();\n  ~MockConnectionData() override;\n\n  // Tcp::ConnectionPool::ConnectionData\n  MOCK_METHOD(Network::ClientConnection&, connection, ());\n  MOCK_METHOD(void, addUpstreamCallbacks, (ConnectionPool::UpstreamCallbacks&));\n  void setConnectionState(ConnectionStatePtr&& state) override { setConnectionState_(state); }\n  MOCK_METHOD(ConnectionPool::ConnectionState*, connectionState, ());\n\n  MOCK_METHOD(void, setConnectionState_, (ConnectionPool::ConnectionStatePtr & state));\n\n  // If set, invoked in ~MockConnectionData, which indicates that the connection pool\n  // caller has released a connection.\n  std::function<void()> release_callback_;\n};\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  // Tcp::ConnectionPool::Instance\n  MOCK_METHOD(void, addDrainedCallback, (DrainedCb cb));\n  MOCK_METHOD(void, drainConnections, ());\n  MOCK_METHOD(void, closeConnections, ());\n  MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks));\n  MOCK_METHOD(bool, maybePrefetch, (float), ());\n  MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const));\n\n  Envoy::ConnectionPool::MockCancellable* newConnectionImpl(Callbacks& cb);\n  void poolFailure(PoolFailureReason reason, bool host_null = false);\n  void poolReady(Network::MockClientConnection& conn);\n\n  // Invoked when connection_data_, having been assigned via poolReady is released.\n  MOCK_METHOD(void, released, (Network::MockClientConnection&));\n\n  std::list<NiceMock<Envoy::ConnectionPool::MockCancellable>> handles_;\n  std::list<Callbacks*> callbacks_;\n\n  std::shared_ptr<NiceMock<Upstream::MockHostDescription>> host_{\n      new NiceMock<Upstream::MockHostDescription>()};\n  std::unique_ptr<NiceMock<MockConnectionData>> connection_data_{\n      new NiceMock<MockConnectionData>()};\n};\n\n} // namespace ConnectionPool\n} // namespace Tcp\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/thread_local/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"thread_local_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/thread_local:thread_local_interface\",\n        \"//test/mocks/event:event_mocks\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/thread_local/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\n\nnamespace Envoy {\nnamespace ThreadLocal {\n\nMockInstance::MockInstance() {\n  ON_CALL(*this, allocateSlot()).WillByDefault(Invoke(this, &MockInstance::allocateSlot_));\n  ON_CALL(*this, runOnAllThreads(_)).WillByDefault(Invoke(this, &MockInstance::runOnAllThreads1_));\n  ON_CALL(*this, runOnAllThreads(_, _))\n      .WillByDefault(Invoke(this, &MockInstance::runOnAllThreads2_));\n  ON_CALL(*this, shutdownThread()).WillByDefault(Invoke(this, &MockInstance::shutdownThread_));\n  ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n}\n\nMockInstance::~MockInstance() { shutdownThread_(); }\n\n} // namespace ThreadLocal\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/thread_local/mocks.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#include \"envoy/thread_local/thread_local.h\"\n\n#include \"test/mocks/event/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace ThreadLocal {\n\nclass MockInstance : public Instance {\npublic:\n  MockInstance();\n  ~MockInstance() override;\n\n  MOCK_METHOD(void, runOnAllThreads, (Event::PostCb cb));\n  MOCK_METHOD(void, runOnAllThreads, (Event::PostCb cb, Event::PostCb main_callback));\n\n  // Server::ThreadLocal\n  MOCK_METHOD(SlotPtr, allocateSlot, ());\n  MOCK_METHOD(void, registerThread, (Event::Dispatcher & dispatcher, bool main_thread));\n  MOCK_METHOD(void, shutdownGlobalThreading, ());\n  MOCK_METHOD(void, shutdownThread, ());\n  MOCK_METHOD(Event::Dispatcher&, dispatcher, ());\n\n  SlotPtr allocateSlot_() { return SlotPtr{new SlotImpl(*this, current_slot_++)}; }\n  void runOnAllThreads1_(Event::PostCb cb) { cb(); }\n  void runOnAllThreads2_(Event::PostCb cb, Event::PostCb main_callback) {\n    cb();\n    main_callback();\n  }\n\n  void shutdownThread_() {\n    shutdown_ = true;\n    // Reverse order which is same as the production code.\n    for (auto it = data_.rbegin(); it != data_.rend(); ++it) {\n      it->reset();\n    }\n    data_.clear();\n  }\n\n  struct SlotImpl : public Slot {\n    SlotImpl(MockInstance& parent, uint32_t index) : parent_(parent), index_(index) {\n      parent_.data_.resize(index_ + 1);\n      parent_.deferred_data_.resize(index_ + 1);\n    }\n\n    ~SlotImpl() override {\n      // Do not actually clear slot data during shutdown. This mimics the production code.\n      // The defer_delete mimics the recycle() code with Bookkeeper.\n      if (!parent_.shutdown_ && !parent_.defer_delete) {\n        EXPECT_LT(index_, parent_.data_.size());\n        parent_.data_[index_].reset();\n      }\n    }\n\n    // ThreadLocal::Slot\n    ThreadLocalObjectSharedPtr get() override { return parent_.data_[index_]; }\n    bool currentThreadRegistered() override { return parent_.registered_; }\n    void runOnAllThreads(const UpdateCb& cb) override {\n      parent_.runOnAllThreads([cb, this]() { parent_.data_[index_] = cb(parent_.data_[index_]); });\n    }\n    void runOnAllThreads(const UpdateCb& cb, Event::PostCb main_callback) override {\n      parent_.runOnAllThreads([cb, this]() { parent_.data_[index_] = cb(parent_.data_[index_]); },\n                              main_callback);\n    }\n\n    void set(InitializeCb cb) override {\n      if (parent_.defer_data) {\n        parent_.deferred_data_[index_] = cb;\n      } else {\n        parent_.data_[index_] = cb(parent_.dispatcher_);\n      }\n    }\n\n    MockInstance& parent_;\n    const uint32_t index_;\n  };\n\n  void call() {\n    for (unsigned i = 0; i < deferred_data_.size(); i++) {\n      data_[i] = deferred_data_[i](dispatcher_);\n    }\n    deferred_data_.clear();\n  }\n\n  uint32_t current_slot_{};\n  testing::NiceMock<Event::MockDispatcher> dispatcher_;\n  std::vector<ThreadLocalObjectSharedPtr> data_;\n  std::vector<Slot::InitializeCb> deferred_data_;\n  bool defer_data{};\n  bool shutdown_{};\n  bool registered_{true};\n  bool defer_delete{};\n};\n\n} // namespace ThreadLocal\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/tracing/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"tracing_mocks\",\n    srcs = [\"mocks.cc\"],\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \"//include/envoy/tracing:http_tracer_interface\",\n        \"//include/envoy/tracing:http_tracer_manager_interface\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/tracing/mocks.cc",
    "content": "#include \"mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Tracing {\n\nMockSpan::MockSpan() = default;\nMockSpan::~MockSpan() = default;\n\nMockConfig::MockConfig() {\n  ON_CALL(*this, operationName()).WillByDefault(Return(operation_name_));\n  ON_CALL(*this, customTags()).WillByDefault(Return(&custom_tags_));\n  ON_CALL(*this, verbose()).WillByDefault(Return(verbose_));\n  ON_CALL(*this, maxPathTagLength()).WillByDefault(Return(uint32_t(256)));\n}\nMockConfig::~MockConfig() = default;\n\nMockHttpTracer::MockHttpTracer() = default;\nMockHttpTracer::~MockHttpTracer() = default;\n\nMockDriver::MockDriver() = default;\nMockDriver::~MockDriver() = default;\n\nMockHttpTracerManager::MockHttpTracerManager() = default;\nMockHttpTracerManager::~MockHttpTracerManager() = default;\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/tracing/mocks.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/tracing/http_tracer.h\"\n#include \"envoy/tracing/http_tracer_manager.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Tracing {\n\nclass MockConfig : public Config {\npublic:\n  MockConfig();\n  ~MockConfig() override;\n\n  MOCK_METHOD(OperationName, operationName, (), (const));\n  MOCK_METHOD(const CustomTagMap*, customTags, (), (const));\n  MOCK_METHOD(bool, verbose, (), (const));\n  MOCK_METHOD(uint32_t, maxPathTagLength, (), (const));\n\n  OperationName operation_name_{OperationName::Ingress};\n  CustomTagMap custom_tags_;\n  bool verbose_{false};\n};\n\nclass MockSpan : public Span {\npublic:\n  MockSpan();\n  ~MockSpan() override;\n\n  MOCK_METHOD(void, setOperation, (absl::string_view operation));\n  MOCK_METHOD(void, setTag, (absl::string_view name, absl::string_view value));\n  MOCK_METHOD(void, log, (SystemTime timestamp, const std::string& event));\n  MOCK_METHOD(void, finishSpan, ());\n  MOCK_METHOD(void, injectContext, (Http::RequestHeaderMap & request_headers));\n  MOCK_METHOD(void, setSampled, (const bool sampled));\n  MOCK_METHOD(void, setBaggage, (absl::string_view key, absl::string_view value));\n  MOCK_METHOD(std::string, getBaggage, (absl::string_view key));\n\n  SpanPtr spawnChild(const Config& config, const std::string& name,\n                     SystemTime start_time) override {\n    return SpanPtr{spawnChild_(config, name, start_time)};\n  }\n\n  MOCK_METHOD(Span*, spawnChild_,\n              (const Config& config, const std::string& name, SystemTime start_time));\n};\n\nclass MockHttpTracer : public HttpTracer {\npublic:\n  MockHttpTracer();\n  ~MockHttpTracer() override;\n\n  SpanPtr startSpan(const Config& config, Http::RequestHeaderMap& request_headers,\n                    const StreamInfo::StreamInfo& stream_info,\n                    const Tracing::Decision tracing_decision) override {\n    return SpanPtr{startSpan_(config, request_headers, stream_info, tracing_decision)};\n  }\n\n  MOCK_METHOD(Span*, startSpan_,\n              (const Config& config, Http::HeaderMap& request_headers,\n               const StreamInfo::StreamInfo& stream_info,\n               const Tracing::Decision tracing_decision));\n};\n\nclass MockDriver : public Driver {\npublic:\n  MockDriver();\n  ~MockDriver() override;\n\n  SpanPtr startSpan(const Config& config, Http::RequestHeaderMap& request_headers,\n                    const std::string& operation_name, SystemTime start_time,\n                    const Tracing::Decision tracing_decision) override {\n    return SpanPtr{\n        startSpan_(config, request_headers, operation_name, start_time, tracing_decision)};\n  }\n\n  MOCK_METHOD(Span*, startSpan_,\n              (const Config& config, Http::HeaderMap& request_headers,\n               const std::string& operation_name, SystemTime start_time,\n               const Tracing::Decision tracing_decision));\n};\n\nclass MockHttpTracerManager : public HttpTracerManager {\npublic:\n  MockHttpTracerManager();\n  ~MockHttpTracerManager() override;\n\n  MOCK_METHOD(HttpTracerSharedPtr, getOrCreateHttpTracer,\n              (const envoy::config::trace::v3::Tracing_Http*));\n};\n\n} // namespace Tracing\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_mock\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_mock(\n    name = \"cluster_info_mocks\",\n    srcs = [\"cluster_info.cc\"],\n    hdrs = [\"cluster_info.h\"],\n    deps = [\n        \":transport_socket_match_mocks\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/common/http/http1:codec_stats_lib\",\n        \"//source/common/http/http2:codec_stats_lib\",\n        \"//source/common/network:raw_buffer_socket_lib\",\n        \"//source/common/upstream:upstream_includes\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"host_mocks\",\n    srcs = [\"host.cc\"],\n    hdrs = [\"host.h\"],\n    deps = [\n        \":cluster_info_mocks\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/mocks/network:transport_socket_mocks\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/cluster/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"transport_socket_match_mocks\",\n    srcs = [\"transport_socket_match.cc\"],\n    hdrs = [\"transport_socket_match.h\"],\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/network:raw_buffer_socket_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"load_balancer_context_mock\",\n    srcs = [\"load_balancer_context.cc\"],\n    hdrs = [\"load_balancer_context.h\"],\n    deps = [\n        \"//include/envoy/upstream:load_balancer_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"upstream_mocks\",\n    hdrs = [\"mocks.h\"],\n    deps = [\n        \":basic_resource_limit_mocks\",\n        \":cds_api_mocks\",\n        \":cluster_info_factory_mocks\",\n        \":cluster_manager_factory_mocks\",\n        \":cluster_manager_mocks\",\n        \":cluster_mocks\",\n        \":cluster_priority_set_mocks\",\n        \":cluster_real_priority_set_mocks\",\n        \":cluster_update_callbacks_handle_mocks\",\n        \":cluster_update_callbacks_mocks\",\n        \":health_check_event_logger_mocks\",\n        \":health_checker_mocks\",\n        \":host_set_mocks\",\n        \":load_balancer_context_mock\",\n        \":load_balancer_mocks\",\n        \":priority_set_mocks\",\n        \":retry_host_predicate_mocks\",\n        \":retry_priority_factory_mocks\",\n        \":retry_priority_mocks\",\n        \":test_retry_host_predicate_factory_mocks\",\n        \":thread_aware_load_balancer_mocks\",\n        \":thread_local_cluster_mocks\",\n        \":transport_socket_match_mocks\",\n        \"//include/envoy/http:async_client_interface\",\n        \"//include/envoy/upstream:cluster_factory_interface\",\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/http:header_utility_lib\",\n        \"//source/common/upstream:cluster_factory_lib\",\n        \"//source/common/upstream:health_discovery_service_lib\",\n        \"//source/common/upstream:upstream_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/tcp:tcp_mocks\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/data/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"host_set_mocks\",\n    srcs = [\"host_set.cc\"],\n    hdrs = [\"host_set.h\"],\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/common:callback_impl_lib\",\n        \"//source/common/upstream:upstream_lib\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"priority_set_mocks\",\n    srcs = [\"priority_set.cc\"],\n    hdrs = [\"priority_set.h\"],\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//test/mocks/upstream:host_set_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"retry_priority_mocks\",\n    srcs = [\"retry_priority.cc\"],\n    hdrs = [\"retry_priority.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"retry_priority_factory_mocks\",\n    hdrs = [\"retry_priority_factory.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n        \"//test/mocks/upstream:retry_priority_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_mocks\",\n    srcs = [\"cluster.cc\"],\n    hdrs = [\"cluster.h\"],\n    deps = [\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//test/mocks/upstream:cluster_info_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_real_priority_set_mocks\",\n    srcs = [\"cluster_real_priority_set.cc\"],\n    hdrs = [\"cluster_real_priority_set.h\"],\n    deps = [\n        \"//test/mocks/upstream:cluster_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_priority_set_mocks\",\n    srcs = [\"cluster_priority_set.cc\"],\n    hdrs = [\"cluster_priority_set.h\"],\n    deps = [\n        \"//test/mocks/upstream:cluster_mocks\",\n        \"//test/mocks/upstream:priority_set_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"load_balancer_mocks\",\n    srcs = [\"load_balancer.cc\"],\n    hdrs = [\"load_balancer.h\"],\n    deps = [\n        \"//include/envoy/upstream:load_balancer_interface\",\n        \"//test/mocks/upstream:host_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"thread_aware_load_balancer_mocks\",\n    srcs = [\"thread_aware_load_balancer.cc\"],\n    hdrs = [\"thread_aware_load_balancer.h\"],\n    deps = [\n        \"//include/envoy/upstream:load_balancer_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"thread_local_cluster_mocks\",\n    srcs = [\"thread_local_cluster.cc\"],\n    hdrs = [\"thread_local_cluster.h\"],\n    deps = [\n        \"//include/envoy/upstream:thread_local_cluster_interface\",\n        \"//test/mocks/upstream:cluster_priority_set_mocks\",\n        \"//test/mocks/upstream:load_balancer_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_manager_factory_mocks\",\n    srcs = [\"cluster_manager_factory.cc\"],\n    hdrs = [\"cluster_manager_factory.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//test/mocks/secret:secret_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_update_callbacks_handle_mocks\",\n    srcs = [\"cluster_update_callbacks_handle.cc\"],\n    hdrs = [\"cluster_update_callbacks_handle.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_manager_mocks\",\n    srcs = [\"cluster_manager.cc\"],\n    hdrs = [\"cluster_manager.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/tcp:tcp_mocks\",\n        \"//test/mocks/upstream:cluster_manager_factory_mocks\",\n        \"//test/mocks/upstream:thread_local_cluster_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"health_checker_mocks\",\n    srcs = [\"health_checker.cc\"],\n    hdrs = [\"health_checker.h\"],\n    deps = [\n        \"//include/envoy/upstream:health_checker_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"health_check_event_logger_mocks\",\n    hdrs = [\"health_check_event_logger.h\"],\n    deps = [\n        \"//include/envoy/upstream:health_checker_interface\",\n        \"@envoy_api//envoy/data/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cds_api_mocks\",\n    srcs = [\"cds_api.cc\"],\n    hdrs = [\"cds_api.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_update_callbacks_mocks\",\n    srcs = [\"cluster_update_callbacks.cc\"],\n    hdrs = [\"cluster_update_callbacks.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"cluster_info_factory_mocks\",\n    srcs = [\"cluster_info_factory.cc\"],\n    hdrs = [\"cluster_info_factory.h\"],\n    deps = [\n        \"//include/envoy/upstream:cluster_manager_interface\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"retry_host_predicate_mocks\",\n    srcs = [\"retry_host_predicate.cc\"],\n    hdrs = [\"retry_host_predicate.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"test_retry_host_predicate_factory_mocks\",\n    hdrs = [\"test_retry_host_predicate_factory.h\"],\n    deps = [\n        \"//include/envoy/upstream:retry_interface\",\n        \"//test/mocks/upstream:retry_host_predicate_mocks\",\n    ],\n)\n\nenvoy_cc_mock(\n    name = \"basic_resource_limit_mocks\",\n    srcs = [\"basic_resource_limit.cc\"],\n    hdrs = [\"basic_resource_limit.h\"],\n    deps = [\n        \"//include/envoy/common:resource_interface\",\n    ],\n)\n"
  },
  {
    "path": "test/mocks/upstream/basic_resource_limit.cc",
    "content": "#include \"basic_resource_limit.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nusing ::testing::Return;\nMockBasicResourceLimit::MockBasicResourceLimit() {\n  ON_CALL(*this, canCreate()).WillByDefault(Return(true));\n}\n\nMockBasicResourceLimit::~MockBasicResourceLimit() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/basic_resource_limit.h",
    "content": "#pragma once\n\n#include \"envoy/common/resource.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockBasicResourceLimit : public ResourceLimit {\npublic:\n  MockBasicResourceLimit();\n  ~MockBasicResourceLimit() override;\n\n  MOCK_METHOD(bool, canCreate, ());\n  MOCK_METHOD(void, inc, ());\n  MOCK_METHOD(void, dec, ());\n  MOCK_METHOD(void, decBy, (uint64_t));\n  MOCK_METHOD(uint64_t, max, ());\n  MOCK_METHOD(uint64_t, count, (), (const));\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cds_api.cc",
    "content": "#include \"cds_api.h\"\n\n#include <functional>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::_;\nusing ::testing::SaveArg;\nMockCdsApi::MockCdsApi() {\n  ON_CALL(*this, setInitializedCb(_)).WillByDefault(SaveArg<0>(&initialized_callback_));\n}\n\nMockCdsApi::~MockCdsApi() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cds_api.h",
    "content": "#pragma once\n\n#include <functional>\n#include <string>\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockCdsApi : public CdsApi {\npublic:\n  MockCdsApi();\n  ~MockCdsApi() override;\n\n  MOCK_METHOD(void, initialize, ());\n  MOCK_METHOD(void, setInitializedCb, (std::function<void()> callback));\n  MOCK_METHOD(const std::string, versionInfo, (), (const));\n\n  std::function<void()> initialized_callback_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster.cc",
    "content": "#include \"cluster.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::_;\nusing ::testing::Invoke;\nusing ::testing::Return;\nMockCluster::MockCluster() {\n  ON_CALL(*this, info()).WillByDefault(Return(info_));\n  ON_CALL(*this, initialize(_))\n      .WillByDefault(Invoke([this](std::function<void()> callback) -> void {\n        EXPECT_EQ(nullptr, initialize_callback_);\n        initialize_callback_ = callback;\n      }));\n}\n\nMockCluster::~MockCluster() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"envoy/upstream/upstream.h\"\n\n#include \"test/mocks/upstream/cluster_info.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockCluster : public Cluster {\npublic:\n  MockCluster();\n  ~MockCluster() override;\n\n  // Upstream::Cluster\n  MOCK_METHOD(HealthChecker*, healthChecker, ());\n  MOCK_METHOD(ClusterInfoConstSharedPtr, info, (), (const));\n  MOCK_METHOD(Outlier::Detector*, outlierDetector, ());\n  MOCK_METHOD(const Outlier::Detector*, outlierDetector, (), (const));\n  MOCK_METHOD(void, initialize, (std::function<void()> callback));\n  MOCK_METHOD(InitializePhase, initializePhase, (), (const));\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const));\n\n  std::shared_ptr<MockClusterInfo> info_{new ::testing::NiceMock<MockClusterInfo>()};\n  std::function<void()> initialize_callback_;\n  Network::Address::InstanceConstSharedPtr source_address_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_info.cc",
    "content": "#include \"test/mocks/upstream/cluster_info.h\"\n\n#include <limits>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/upstream/host_description.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/config/metadata.h\"\n#include \"common/http/utility.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/upstream/upstream_impl.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Upstream {\n\nMockLoadBalancerSubsetInfo::MockLoadBalancerSubsetInfo() {\n  ON_CALL(*this, isEnabled()).WillByDefault(Return(false));\n  ON_CALL(*this, fallbackPolicy())\n      .WillByDefault(Return(envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT));\n  ON_CALL(*this, defaultSubset()).WillByDefault(ReturnRef(ProtobufWkt::Struct::default_instance()));\n  ON_CALL(*this, subsetSelectors()).WillByDefault(ReturnRef(subset_selectors_));\n}\n\nMockLoadBalancerSubsetInfo::~MockLoadBalancerSubsetInfo() = default;\n\nMockIdleTimeEnabledClusterInfo::MockIdleTimeEnabledClusterInfo() {\n  ON_CALL(*this, idleTimeout()).WillByDefault(Return(std::chrono::milliseconds(1000)));\n}\n\nMockIdleTimeEnabledClusterInfo::~MockIdleTimeEnabledClusterInfo() = default;\n\nMockClusterInfo::MockClusterInfo()\n    : http2_options_(::Envoy::Http2::Utility::initializeAndValidateOptions(\n          envoy::config::core::v3::Http2ProtocolOptions())),\n      stats_(ClusterInfoImpl::generateStats(stats_store_)),\n      transport_socket_matcher_(new NiceMock<Upstream::MockTransportSocketMatcher>()),\n      load_report_stats_(ClusterInfoImpl::generateLoadReportStats(load_report_stats_store_)),\n      request_response_size_stats_(std::make_unique<ClusterRequestResponseSizeStats>(\n          ClusterInfoImpl::generateRequestResponseSizeStats(request_response_size_stats_store_))),\n      timeout_budget_stats_(std::make_unique<ClusterTimeoutBudgetStats>(\n          ClusterInfoImpl::generateTimeoutBudgetStats(timeout_budget_stats_store_))),\n      circuit_breakers_stats_(\n          ClusterInfoImpl::generateCircuitBreakersStats(stats_store_, \"default\", true)),\n      resource_manager_(new Upstream::ResourceManagerImpl(\n          runtime_, \"fake_key\", 1, 1024, 1024, 1, std::numeric_limits<uint64_t>::max(),\n          circuit_breakers_stats_, absl::nullopt, absl::nullopt)) {\n  ON_CALL(*this, connectTimeout()).WillByDefault(Return(std::chrono::milliseconds(1)));\n  ON_CALL(*this, idleTimeout()).WillByDefault(Return(absl::optional<std::chrono::milliseconds>()));\n  ON_CALL(*this, perUpstreamPrefetchRatio()).WillByDefault(Return(1.0));\n  ON_CALL(*this, name()).WillByDefault(ReturnRef(name_));\n  ON_CALL(*this, edsServiceName()).WillByDefault(ReturnPointee(&eds_service_name_));\n  ON_CALL(*this, http1Settings()).WillByDefault(ReturnRef(http1_settings_));\n  ON_CALL(*this, http2Options()).WillByDefault(ReturnRef(http2_options_));\n  ON_CALL(*this, commonHttpProtocolOptions())\n      .WillByDefault(ReturnRef(common_http_protocol_options_));\n  ON_CALL(*this, extensionProtocolOptions(_)).WillByDefault(Return(extension_protocol_options_));\n  ON_CALL(*this, maxResponseHeadersCount())\n      .WillByDefault(ReturnPointee(&max_response_headers_count_));\n  ON_CALL(*this, maxRequestsPerConnection())\n      .WillByDefault(ReturnPointee(&max_requests_per_connection_));\n  ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_));\n  ON_CALL(*this, statsScope()).WillByDefault(ReturnRef(stats_store_));\n  // TODO(incfly): The following is a hack because it's not possible to directly embed\n  // a mock transport socket factory matcher due to circular dependencies. Fix this up in a follow\n  // up.\n  ON_CALL(*this, transportSocketMatcher())\n      .WillByDefault(\n          Invoke([this]() -> TransportSocketMatcher& { return *transport_socket_matcher_; }));\n  ON_CALL(*this, loadReportStats()).WillByDefault(ReturnRef(load_report_stats_));\n  ON_CALL(*this, requestResponseSizeStats())\n      .WillByDefault(Return(\n          std::reference_wrapper<ClusterRequestResponseSizeStats>(*request_response_size_stats_)));\n  ON_CALL(*this, timeoutBudgetStats())\n      .WillByDefault(\n          Return(std::reference_wrapper<ClusterTimeoutBudgetStats>(*timeout_budget_stats_)));\n  ON_CALL(*this, sourceAddress()).WillByDefault(ReturnRef(source_address_));\n  ON_CALL(*this, resourceManager(_))\n      .WillByDefault(Invoke(\n          [this](ResourcePriority) -> Upstream::ResourceManager& { return *resource_manager_; }));\n  ON_CALL(*this, lbType()).WillByDefault(ReturnPointee(&lb_type_));\n  ON_CALL(*this, sourceAddress()).WillByDefault(ReturnRef(source_address_));\n  ON_CALL(*this, lbSubsetInfo()).WillByDefault(ReturnRef(lb_subset_));\n  ON_CALL(*this, lbRingHashConfig()).WillByDefault(ReturnRef(lb_ring_hash_config_));\n  ON_CALL(*this, lbMaglevConfig()).WillByDefault(ReturnRef(lb_maglev_config_));\n  ON_CALL(*this, lbOriginalDstConfig()).WillByDefault(ReturnRef(lb_original_dst_config_));\n  ON_CALL(*this, upstreamConfig()).WillByDefault(ReturnRef(upstream_config_));\n  ON_CALL(*this, lbConfig()).WillByDefault(ReturnRef(lb_config_));\n  ON_CALL(*this, clusterSocketOptions()).WillByDefault(ReturnRef(cluster_socket_options_));\n  ON_CALL(*this, metadata()).WillByDefault(ReturnRef(metadata_));\n  ON_CALL(*this, upstreamHttpProtocolOptions())\n      .WillByDefault(ReturnRef(upstream_http_protocol_options_));\n  // Delayed construction of typed_metadata_, to allow for injection of metadata\n  ON_CALL(*this, typedMetadata())\n      .WillByDefault(Invoke([this]() -> const Envoy::Config::TypedMetadata& {\n        if (typed_metadata_ == nullptr) {\n          typed_metadata_ =\n              std::make_unique<Config::TypedMetadataImpl<ClusterTypedMetadataFactory>>(metadata_);\n        }\n        return *typed_metadata_;\n      }));\n  ON_CALL(*this, clusterType()).WillByDefault(ReturnRef(cluster_type_));\n  ON_CALL(*this, upstreamHttpProtocol(_)).WillByDefault(Return(Http::Protocol::Http11));\n}\n\nMockClusterInfo::~MockClusterInfo() = default;\n\nHttp::Http1::CodecStats& MockClusterInfo::http1CodecStats() const {\n  return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, statsScope());\n}\n\nHttp::Http2::CodecStats& MockClusterInfo::http2CodecStats() const {\n  return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, statsScope());\n}\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_info.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/protocol.pb.h\"\n#include \"envoy/config/typed_metadata.h\"\n#include \"envoy/stats/scope.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/http/http1/codec_stats.h\"\n#include \"common/http/http2/codec_stats.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/transport_socket_match.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass MockLoadBalancerSubsetInfo : public LoadBalancerSubsetInfo {\npublic:\n  MockLoadBalancerSubsetInfo();\n  ~MockLoadBalancerSubsetInfo() override;\n\n  // Upstream::LoadBalancerSubsetInfo\n  MOCK_METHOD(bool, isEnabled, (), (const));\n  MOCK_METHOD(envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetFallbackPolicy,\n              fallbackPolicy, (), (const));\n  MOCK_METHOD(const ProtobufWkt::Struct&, defaultSubset, (), (const));\n  MOCK_METHOD(const std::vector<SubsetSelectorPtr>&, subsetSelectors, (), (const));\n  MOCK_METHOD(bool, localityWeightAware, (), (const));\n  MOCK_METHOD(bool, scaleLocalityWeight, (), (const));\n  MOCK_METHOD(bool, panicModeAny, (), (const));\n  MOCK_METHOD(bool, listAsAny, (), (const));\n\n  std::vector<SubsetSelectorPtr> subset_selectors_;\n};\n\n// While this mock class doesn't have any direct use in public Envoy tests, it's\n// useful for constructing tests of downstream private filters that use\n// ClusterTypedMetadata.\nclass MockClusterTypedMetadata : public Config::TypedMetadataImpl<ClusterTypedMetadataFactory> {\npublic:\n  using Config::TypedMetadataImpl<ClusterTypedMetadataFactory>::TypedMetadataImpl;\n\n  void inject(const std::string& key, std::unique_ptr<const TypedMetadata::Object> value) {\n    data_[key] = std::move(value);\n  }\n\n  absl::node_hash_map<std::string, std::unique_ptr<const TypedMetadata::Object>>& data() {\n    return data_;\n  }\n};\n\nclass MockClusterInfo : public ClusterInfo {\npublic:\n  MockClusterInfo();\n  ~MockClusterInfo() override;\n\n  void resetResourceManager(uint64_t cx, uint64_t rq_pending, uint64_t rq, uint64_t rq_retry,\n                            uint64_t conn_pool) {\n    resource_manager_ = std::make_unique<ResourceManagerImpl>(\n        runtime_, name_, cx, rq_pending, rq, rq_retry, conn_pool, circuit_breakers_stats_,\n        absl::nullopt, absl::nullopt);\n  }\n\n  void resetResourceManagerWithRetryBudget(uint64_t cx, uint64_t rq_pending, uint64_t rq,\n                                           uint64_t rq_retry, uint64_t conn_pool,\n                                           double budget_percent, uint32_t min_retry_concurrency) {\n    resource_manager_ = std::make_unique<ResourceManagerImpl>(\n        runtime_, name_, cx, rq_pending, rq, rq_retry, conn_pool, circuit_breakers_stats_,\n        budget_percent, min_retry_concurrency);\n  }\n\n  // Upstream::ClusterInfo\n  MOCK_METHOD(bool, addedViaApi, (), (const));\n  MOCK_METHOD(std::chrono::milliseconds, connectTimeout, (), (const));\n  MOCK_METHOD(const absl::optional<std::chrono::milliseconds>, idleTimeout, (), (const));\n  MOCK_METHOD(const absl::optional<std::chrono::milliseconds>, maxStreamDuration, (), (const));\n  MOCK_METHOD(const absl::optional<std::chrono::milliseconds>, grpcTimeoutHeaderMax, (), (const));\n  MOCK_METHOD(const absl::optional<std::chrono::milliseconds>, grpcTimeoutHeaderOffset, (),\n              (const));\n  MOCK_METHOD(float, perUpstreamPrefetchRatio, (), (const));\n  MOCK_METHOD(float, peekaheadRatio, (), (const));\n  MOCK_METHOD(uint32_t, perConnectionBufferLimitBytes, (), (const));\n  MOCK_METHOD(uint64_t, features, (), (const));\n  MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const));\n  MOCK_METHOD(const envoy::config::core::v3::Http2ProtocolOptions&, http2Options, (), (const));\n  MOCK_METHOD(const envoy::config::core::v3::HttpProtocolOptions&, commonHttpProtocolOptions, (),\n              (const));\n  MOCK_METHOD(ProtocolOptionsConfigConstSharedPtr, extensionProtocolOptions, (const std::string&),\n              (const));\n  MOCK_METHOD(const envoy::config::cluster::v3::Cluster::CommonLbConfig&, lbConfig, (), (const));\n  MOCK_METHOD(LoadBalancerType, lbType, (), (const));\n  MOCK_METHOD(envoy::config::cluster::v3::Cluster::DiscoveryType, type, (), (const));\n  MOCK_METHOD(const absl::optional<envoy::config::cluster::v3::Cluster::CustomClusterType>&,\n              clusterType, (), (const));\n  MOCK_METHOD(const absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig>&,\n              lbRingHashConfig, (), (const));\n  MOCK_METHOD(const absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig>&,\n              lbMaglevConfig, (), (const));\n  MOCK_METHOD(const absl::optional<envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>&,\n              lbLeastRequestConfig, (), (const));\n  MOCK_METHOD(const absl::optional<envoy::config::cluster::v3::Cluster::OriginalDstLbConfig>&,\n              lbOriginalDstConfig, (), (const));\n  MOCK_METHOD(const absl::optional<envoy::config::core::v3::TypedExtensionConfig>&, upstreamConfig,\n              (), (const));\n  MOCK_METHOD(bool, maintenanceMode, (), (const));\n  MOCK_METHOD(uint32_t, maxResponseHeadersCount, (), (const));\n  MOCK_METHOD(uint64_t, maxRequestsPerConnection, (), (const));\n  MOCK_METHOD(const std::string&, name, (), (const));\n  MOCK_METHOD(ResourceManager&, resourceManager, (ResourcePriority priority), (const));\n  MOCK_METHOD(TransportSocketMatcher&, transportSocketMatcher, (), (const));\n  MOCK_METHOD(ClusterStats&, stats, (), (const));\n  MOCK_METHOD(Stats::Scope&, statsScope, (), (const));\n  MOCK_METHOD(ClusterLoadReportStats&, loadReportStats, (), (const));\n  MOCK_METHOD(ClusterRequestResponseSizeStatsOptRef, requestResponseSizeStats, (), (const));\n  MOCK_METHOD(ClusterTimeoutBudgetStatsOptRef, timeoutBudgetStats, (), (const));\n  MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const));\n  MOCK_METHOD(const LoadBalancerSubsetInfo&, lbSubsetInfo, (), (const));\n  MOCK_METHOD(const envoy::config::core::v3::Metadata&, metadata, (), (const));\n  MOCK_METHOD(const Envoy::Config::TypedMetadata&, typedMetadata, (), (const));\n  MOCK_METHOD(const Network::ConnectionSocket::OptionsSharedPtr&, clusterSocketOptions, (),\n              (const));\n  MOCK_METHOD(bool, drainConnectionsOnHostRemoval, (), (const));\n  MOCK_METHOD(bool, connectionPoolPerDownstreamConnection, (), (const));\n  MOCK_METHOD(bool, warmHosts, (), (const));\n  MOCK_METHOD(const absl::optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>&,\n              upstreamHttpProtocolOptions, (), (const));\n  MOCK_METHOD(absl::optional<std::string>, edsServiceName, (), (const));\n  MOCK_METHOD(void, createNetworkFilterChain, (Network::Connection&), (const));\n  MOCK_METHOD(Http::Protocol, upstreamHttpProtocol, (absl::optional<Http::Protocol>), (const));\n\n  Http::Http1::CodecStats& http1CodecStats() const override;\n  Http::Http2::CodecStats& http2CodecStats() const override;\n\n  std::string name_{\"fake_cluster\"};\n  absl::optional<std::string> eds_service_name_;\n  Http::Http1Settings http1_settings_;\n  envoy::config::core::v3::Http2ProtocolOptions http2_options_;\n  envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_;\n  ProtocolOptionsConfigConstSharedPtr extension_protocol_options_;\n  uint64_t max_requests_per_connection_{};\n  uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT};\n  NiceMock<Stats::MockIsolatedStatsStore> stats_store_;\n  ClusterStats stats_;\n  Upstream::TransportSocketMatcherPtr transport_socket_matcher_;\n  NiceMock<Stats::MockIsolatedStatsStore> load_report_stats_store_;\n  ClusterLoadReportStats load_report_stats_;\n  NiceMock<Stats::MockIsolatedStatsStore> request_response_size_stats_store_;\n  ClusterRequestResponseSizeStatsPtr request_response_size_stats_;\n  NiceMock<Stats::MockIsolatedStatsStore> timeout_budget_stats_store_;\n  ClusterTimeoutBudgetStatsPtr timeout_budget_stats_;\n  ClusterCircuitBreakersStats circuit_breakers_stats_;\n  NiceMock<Runtime::MockLoader> runtime_;\n  std::unique_ptr<Upstream::ResourceManager> resource_manager_;\n  Network::Address::InstanceConstSharedPtr source_address_;\n  LoadBalancerType lb_type_{LoadBalancerType::RoundRobin};\n  envoy::config::cluster::v3::Cluster::DiscoveryType type_{\n      envoy::config::cluster::v3::Cluster::STRICT_DNS};\n  absl::optional<envoy::config::cluster::v3::Cluster::CustomClusterType> cluster_type_;\n  NiceMock<MockLoadBalancerSubsetInfo> lb_subset_;\n  absl::optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>\n      upstream_http_protocol_options_;\n  absl::optional<envoy::config::cluster::v3::Cluster::RingHashLbConfig> lb_ring_hash_config_;\n  absl::optional<envoy::config::cluster::v3::Cluster::MaglevLbConfig> lb_maglev_config_;\n  absl::optional<envoy::config::cluster::v3::Cluster::OriginalDstLbConfig> lb_original_dst_config_;\n  absl::optional<envoy::config::core::v3::TypedExtensionConfig> upstream_config_;\n  Network::ConnectionSocket::OptionsSharedPtr cluster_socket_options_;\n  envoy::config::cluster::v3::Cluster::CommonLbConfig lb_config_;\n  envoy::config::core::v3::Metadata metadata_;\n  std::unique_ptr<Envoy::Config::TypedMetadata> typed_metadata_;\n  absl::optional<std::chrono::milliseconds> max_stream_duration_;\n  mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_;\n  mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_;\n};\n\nclass MockIdleTimeEnabledClusterInfo : public MockClusterInfo {\npublic:\n  MockIdleTimeEnabledClusterInfo();\n  ~MockIdleTimeEnabledClusterInfo() override;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_info_factory.cc",
    "content": "#include \"cluster_info_factory.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockClusterInfoFactory::MockClusterInfoFactory() = default;\n\nMockClusterInfoFactory::~MockClusterInfoFactory() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_info_factory.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/common/logger.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable<Logger::Id::upstream> {\npublic:\n  MockClusterInfoFactory();\n  ~MockClusterInfoFactory() override;\n\n  MOCK_METHOD(ClusterInfoConstSharedPtr, createClusterInfo, (const CreateClusterInfoParams&));\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_manager.cc",
    "content": "#include \"cluster_manager.h\"\n\n#include <chrono>\n#include <functional>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::_;\nusing ::testing::Eq;\nusing ::testing::Return;\nusing ::testing::ReturnRef;\nMockClusterManager::MockClusterManager(TimeSource&) : MockClusterManager() {}\n\nMockClusterManager::MockClusterManager() {\n  ON_CALL(*this, httpConnPoolForCluster(_, _, _, _)).WillByDefault(Return(&conn_pool_));\n  ON_CALL(*this, tcpConnPoolForCluster(_, _, _)).WillByDefault(Return(&tcp_conn_pool_));\n  ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault(ReturnRef(async_client_));\n  ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault((ReturnRef(async_client_)));\n  ON_CALL(*this, bindConfig()).WillByDefault(ReturnRef(bind_config_));\n  ON_CALL(*this, adsMux()).WillByDefault(Return(ads_mux_));\n  ON_CALL(*this, grpcAsyncClientManager()).WillByDefault(ReturnRef(async_client_manager_));\n  ON_CALL(*this, localClusterName()).WillByDefault((ReturnRef(local_cluster_name_)));\n\n  // Matches are LIFO so \"\" will match first.\n  ON_CALL(*this, get(_)).WillByDefault(Return(&thread_local_cluster_));\n  ON_CALL(*this, get(Eq(\"\"))).WillByDefault(Return(nullptr));\n  ON_CALL(*this, subscriptionFactory()).WillByDefault(ReturnRef(subscription_factory_));\n}\n\nMockClusterManager::~MockClusterManager() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_manager.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/tcp/mocks.h\"\n\n#include \"cluster_manager_factory.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"thread_local_cluster.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::NiceMock;\nclass MockClusterManager : public ClusterManager {\npublic:\n  explicit MockClusterManager(TimeSource& time_source);\n  MockClusterManager();\n  ~MockClusterManager() override;\n\n  ClusterUpdateCallbacksHandlePtr\n  addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& callbacks) override {\n    return ClusterUpdateCallbacksHandlePtr{addThreadLocalClusterUpdateCallbacks_(callbacks)};\n  }\n\n  Host::CreateConnectionData tcpConnForCluster(const std::string& cluster,\n                                               LoadBalancerContext* context) override {\n    MockHost::MockCreateConnectionData data = tcpConnForCluster_(cluster, context);\n    return {Network::ClientConnectionPtr{data.connection_}, data.host_description_};\n  }\n\n  ClusterManagerFactory& clusterManagerFactory() override { return cluster_manager_factory_; }\n\n  // Upstream::ClusterManager\n  MOCK_METHOD(bool, addOrUpdateCluster,\n              (const envoy::config::cluster::v3::Cluster& cluster,\n               const std::string& version_info));\n  MOCK_METHOD(void, setPrimaryClustersInitializedCb, (PrimaryClustersReadyCallback));\n  MOCK_METHOD(void, setInitializedCb, (InitializationCompleteCallback));\n  MOCK_METHOD(void, initializeSecondaryClusters,\n              (const envoy::config::bootstrap::v3::Bootstrap& bootstrap));\n  MOCK_METHOD(ClusterInfoMap, clusters, ());\n  MOCK_METHOD(const ClusterSet&, primaryClusters, ());\n  MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster));\n  MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster,\n              (const std::string& cluster, ResourcePriority priority,\n               absl::optional<Http::Protocol> downstream_protocol, LoadBalancerContext* context));\n  MOCK_METHOD(Tcp::ConnectionPool::Instance*, tcpConnPoolForCluster,\n              (const std::string& cluster, ResourcePriority priority,\n               LoadBalancerContext* context));\n  MOCK_METHOD(MockHost::MockCreateConnectionData, tcpConnForCluster_,\n              (const std::string& cluster, LoadBalancerContext* context));\n  MOCK_METHOD(Http::AsyncClient&, httpAsyncClientForCluster, (const std::string& cluster));\n  MOCK_METHOD(bool, removeCluster, (const std::string& cluster));\n  MOCK_METHOD(void, shutdown, ());\n  MOCK_METHOD(const envoy::config::core::v3::BindConfig&, bindConfig, (), (const));\n  MOCK_METHOD(Config::GrpcMuxSharedPtr, adsMux, ());\n  MOCK_METHOD(Grpc::AsyncClientManager&, grpcAsyncClientManager, ());\n  MOCK_METHOD(const std::string, versionInfo, (), (const));\n  MOCK_METHOD(const absl::optional<std::string>&, localClusterName, (), (const));\n  MOCK_METHOD(ClusterUpdateCallbacksHandle*, addThreadLocalClusterUpdateCallbacks_,\n              (ClusterUpdateCallbacks & callbacks));\n  MOCK_METHOD(Config::SubscriptionFactory&, subscriptionFactory, ());\n\n  NiceMock<Http::ConnectionPool::MockInstance> conn_pool_;\n  NiceMock<Http::MockAsyncClient> async_client_;\n  NiceMock<Tcp::ConnectionPool::MockInstance> tcp_conn_pool_;\n  NiceMock<MockThreadLocalCluster> thread_local_cluster_;\n  envoy::config::core::v3::BindConfig bind_config_;\n  std::shared_ptr<NiceMock<Config::MockGrpcMux>> ads_mux_;\n  NiceMock<Grpc::MockAsyncClientManager> async_client_manager_;\n  absl::optional<std::string> local_cluster_name_;\n  NiceMock<MockClusterManagerFactory> cluster_manager_factory_;\n  NiceMock<Config::MockSubscriptionFactory> subscription_factory_;\n};\n} // namespace Upstream\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_manager_factory.cc",
    "content": "#include \"cluster_manager_factory.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockClusterManagerFactory::MockClusterManagerFactory() = default;\n\nMockClusterManagerFactory::~MockClusterManagerFactory() = default;\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_manager_factory.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"test/mocks/secret/mocks.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::NiceMock;\nclass MockClusterManagerFactory : public ClusterManagerFactory {\npublic:\n  MockClusterManagerFactory();\n  ~MockClusterManagerFactory() override;\n\n  Secret::MockSecretManager& secretManager() override { return secret_manager_; };\n\n  MOCK_METHOD(ClusterManagerPtr, clusterManagerFromProto,\n              (const envoy::config::bootstrap::v3::Bootstrap& bootstrap));\n\n  MOCK_METHOD(Http::ConnectionPool::InstancePtr, allocateConnPool,\n              (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority,\n               Http::Protocol protocol, const Network::ConnectionSocket::OptionsSharedPtr& options,\n               const Network::TransportSocketOptionsSharedPtr& transport_socket_options));\n\n  MOCK_METHOD(Tcp::ConnectionPool::InstancePtr, allocateTcpConnPool,\n              (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority,\n               const Network::ConnectionSocket::OptionsSharedPtr& options,\n               Network::TransportSocketOptionsSharedPtr));\n\n  MOCK_METHOD((std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>), clusterFromProto,\n              (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm,\n               Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api));\n\n  MOCK_METHOD(CdsApiPtr, createCds,\n              (const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm));\n\nprivate:\n  NiceMock<Secret::MockSecretManager> secret_manager_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_priority_set.cc",
    "content": "#include \"cluster_priority_set.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockClusterMockPrioritySet::MockClusterMockPrioritySet() = default;\n\nMockClusterMockPrioritySet::~MockClusterMockPrioritySet() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_priority_set.h",
    "content": "#pragma once\n\n#include \"cluster.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"priority_set.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockClusterMockPrioritySet : public MockCluster {\npublic:\n  MockClusterMockPrioritySet();\n  ~MockClusterMockPrioritySet() override;\n\n  // Upstream::Cluster\n  MockPrioritySet& prioritySet() override { return priority_set_; }\n  const PrioritySet& prioritySet() const override { return priority_set_; }\n\n  ::testing::NiceMock<MockPrioritySet> priority_set_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_real_priority_set.cc",
    "content": "#include \"cluster_real_priority_set.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockClusterRealPrioritySet::MockClusterRealPrioritySet() = default;\n\nMockClusterRealPrioritySet::~MockClusterRealPrioritySet() = default;\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_real_priority_set.h",
    "content": "#pragma once\n\n#include \"cluster.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockClusterRealPrioritySet : public MockCluster {\npublic:\n  MockClusterRealPrioritySet();\n  ~MockClusterRealPrioritySet() override;\n\n  // Upstream::Cluster\n  PrioritySetImpl& prioritySet() override { return priority_set_; }\n  const PrioritySet& prioritySet() const override { return priority_set_; }\n\n  PrioritySetImpl priority_set_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_update_callbacks.cc",
    "content": "#include \"cluster_update_callbacks.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockClusterUpdateCallbacks::MockClusterUpdateCallbacks() = default;\n\nMockClusterUpdateCallbacks::~MockClusterUpdateCallbacks() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_update_callbacks.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockClusterUpdateCallbacks : public ClusterUpdateCallbacks {\npublic:\n  MockClusterUpdateCallbacks();\n  ~MockClusterUpdateCallbacks() override;\n\n  MOCK_METHOD(void, onClusterAddOrUpdate, (ThreadLocalCluster & cluster));\n  MOCK_METHOD(void, onClusterRemoval, (const std::string& cluster_name));\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_update_callbacks_handle.cc",
    "content": "#include \"cluster_update_callbacks_handle.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockClusterUpdateCallbacksHandle::MockClusterUpdateCallbacksHandle() = default;\n\nMockClusterUpdateCallbacksHandle::~MockClusterUpdateCallbacksHandle() = default;\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/cluster_update_callbacks_handle.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockClusterUpdateCallbacksHandle : public ClusterUpdateCallbacksHandle {\npublic:\n  MockClusterUpdateCallbacksHandle();\n  ~MockClusterUpdateCallbacksHandle() override;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/health_check_event_logger.h",
    "content": "#pragma once\n\n#include <functional>\n#include <list>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/upstream/health_checker.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockHealthCheckEventLogger : public HealthCheckEventLogger {\npublic:\n  MOCK_METHOD(void, logEjectUnhealthy,\n              (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&,\n               envoy::data::core::v3::HealthCheckFailureType));\n  MOCK_METHOD(void, logAddHealthy,\n              (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&,\n               bool));\n  MOCK_METHOD(void, logUnhealthy,\n              (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&,\n               envoy::data::core::v3::HealthCheckFailureType, bool));\n  MOCK_METHOD(void, logDegraded,\n              (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&));\n  MOCK_METHOD(void, logNoLongerDegraded,\n              (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&));\n};\n} // namespace Upstream\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/health_checker.cc",
    "content": "#include \"health_checker.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::_;\nusing ::testing::Invoke;\nMockHealthChecker::MockHealthChecker() {\n  ON_CALL(*this, addHostCheckCompleteCb(_)).WillByDefault(Invoke([this](HostStatusCb cb) -> void {\n    callbacks_.push_back(cb);\n  }));\n}\n\nMockHealthChecker::~MockHealthChecker() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/health_checker.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/health_checker.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockHealthChecker : public HealthChecker {\npublic:\n  MockHealthChecker();\n  ~MockHealthChecker() override;\n\n  MOCK_METHOD(void, addHostCheckCompleteCb, (HostStatusCb callback));\n  MOCK_METHOD(void, start, ());\n\n  void runCallbacks(Upstream::HostSharedPtr host, HealthTransition changed_state) {\n    for (const auto& callback : callbacks_) {\n      callback(host, changed_state);\n    }\n  }\n\n  std::list<HostStatusCb> callbacks_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/host.cc",
    "content": "#include \"test/mocks/upstream/host.h\"\n\n#include \"common/network/utility.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace Outlier {\n\nMockDetectorHostMonitor::MockDetectorHostMonitor() = default;\nMockDetectorHostMonitor::~MockDetectorHostMonitor() = default;\n\nMockEventLogger::MockEventLogger() = default;\nMockEventLogger::~MockEventLogger() = default;\n\nMockDetector::MockDetector() {\n  ON_CALL(*this, addChangedStateCb(_)).WillByDefault(Invoke([this](ChangeStateCb cb) -> void {\n    callbacks_.push_back(cb);\n  }));\n}\n\nMockDetector::~MockDetector() = default;\n\n} // namespace Outlier\n\nMockHealthCheckHostMonitor::MockHealthCheckHostMonitor() = default;\nMockHealthCheckHostMonitor::~MockHealthCheckHostMonitor() = default;\n\nMockHostDescription::MockHostDescription()\n    : address_(Network::Utility::resolveUrl(\"tcp://10.0.0.1:443\")),\n      socket_factory_(new testing::NiceMock<Network::MockTransportSocketFactory>) {\n  ON_CALL(*this, hostname()).WillByDefault(ReturnRef(hostname_));\n  ON_CALL(*this, address()).WillByDefault(Return(address_));\n  ON_CALL(*this, outlierDetector()).WillByDefault(ReturnRef(outlier_detector_));\n  ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_));\n  ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_));\n  ON_CALL(*this, healthChecker()).WillByDefault(ReturnRef(health_checker_));\n  ON_CALL(*this, transportSocketFactory()).WillByDefault(ReturnRef(*socket_factory_));\n}\n\nMockHostDescription::~MockHostDescription() = default;\n\nMockHost::MockHost() : socket_factory_(new testing::NiceMock<Network::MockTransportSocketFactory>) {\n  ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_));\n  ON_CALL(*this, outlierDetector()).WillByDefault(ReturnRef(outlier_detector_));\n  ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_));\n  ON_CALL(*this, warmed()).WillByDefault(Return(true));\n  ON_CALL(*this, transportSocketFactory()).WillByDefault(ReturnRef(*socket_factory_));\n}\n\nMockHost::~MockHost() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/host.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/data/cluster/v2alpha/outlier_detection_event.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"test/mocks/network/transport_socket.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/test_common/global.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace Outlier {\n\nclass MockDetectorHostMonitor : public DetectorHostMonitor {\npublic:\n  MockDetectorHostMonitor();\n  ~MockDetectorHostMonitor() override;\n\n  MOCK_METHOD(uint32_t, numEjections, ());\n  MOCK_METHOD(void, putHttpResponseCode, (uint64_t code));\n  MOCK_METHOD(void, putResult, (Result result, absl::optional<uint64_t> code));\n  MOCK_METHOD(void, putResponseTime, (std::chrono::milliseconds time));\n  MOCK_METHOD(const absl::optional<MonotonicTime>&, lastEjectionTime, ());\n  MOCK_METHOD(const absl::optional<MonotonicTime>&, lastUnejectionTime, ());\n  MOCK_METHOD(double, successRate, (DetectorHostMonitor::SuccessRateMonitorType type), (const));\n  MOCK_METHOD(void, successRate,\n              (DetectorHostMonitor::SuccessRateMonitorType type, double new_success_rate));\n};\n\nclass MockEventLogger : public EventLogger {\npublic:\n  MockEventLogger();\n  ~MockEventLogger() override;\n\n  MOCK_METHOD(void, logEject,\n              (const HostDescriptionConstSharedPtr& host, Detector& detector,\n               envoy::data::cluster::v2alpha::OutlierEjectionType type, bool enforced));\n  MOCK_METHOD(void, logUneject, (const HostDescriptionConstSharedPtr& host));\n};\n\nclass MockDetector : public Detector {\npublic:\n  MockDetector();\n  ~MockDetector() override;\n\n  void runCallbacks(HostSharedPtr host) {\n    for (const ChangeStateCb& cb : callbacks_) {\n      cb(host);\n    }\n  }\n\n  MOCK_METHOD(void, addChangedStateCb, (ChangeStateCb cb));\n  MOCK_METHOD(double, successRateAverage, (DetectorHostMonitor::SuccessRateMonitorType), (const));\n  MOCK_METHOD(double, successRateEjectionThreshold, (DetectorHostMonitor::SuccessRateMonitorType),\n              (const));\n\n  std::list<ChangeStateCb> callbacks_;\n};\n\n} // namespace Outlier\n\nclass MockHealthCheckHostMonitor : public HealthCheckHostMonitor {\npublic:\n  MockHealthCheckHostMonitor();\n  ~MockHealthCheckHostMonitor() override;\n\n  MOCK_METHOD(void, setUnhealthy, ());\n};\n\nclass MockHostDescription : public HostDescription {\npublic:\n  MockHostDescription();\n  ~MockHostDescription() override;\n\n  MOCK_METHOD(Network::Address::InstanceConstSharedPtr, address, (), (const));\n  MOCK_METHOD(Network::Address::InstanceConstSharedPtr, healthCheckAddress, (), (const));\n  MOCK_METHOD(bool, canary, (), (const));\n  MOCK_METHOD(void, canary, (bool new_canary));\n  MOCK_METHOD(MetadataConstSharedPtr, metadata, (), (const));\n  MOCK_METHOD(void, metadata, (MetadataConstSharedPtr));\n  MOCK_METHOD(const ClusterInfo&, cluster, (), (const));\n  MOCK_METHOD(Outlier::DetectorHostMonitor&, outlierDetector, (), (const));\n  MOCK_METHOD(HealthCheckHostMonitor&, healthChecker, (), (const));\n  MOCK_METHOD(const std::string&, hostnameForHealthChecks, (), (const));\n  MOCK_METHOD(const std::string&, hostname, (), (const));\n  MOCK_METHOD(Network::TransportSocketFactory&, transportSocketFactory, (), (const));\n  MOCK_METHOD(HostStats&, stats, (), (const));\n  MOCK_METHOD(const envoy::config::core::v3::Locality&, locality, (), (const));\n  MOCK_METHOD(uint32_t, priority, (), (const));\n  MOCK_METHOD(void, priority, (uint32_t));\n  Stats::StatName localityZoneStatName() const override {\n    Stats::SymbolTable& symbol_table = *symbol_table_;\n    locality_zone_stat_name_ =\n        std::make_unique<Stats::StatNameManagedStorage>(locality().zone(), symbol_table);\n    return locality_zone_stat_name_->statName();\n  }\n\n  std::string hostname_;\n  Network::Address::InstanceConstSharedPtr address_;\n  testing::NiceMock<Outlier::MockDetectorHostMonitor> outlier_detector_;\n  testing::NiceMock<MockHealthCheckHostMonitor> health_checker_;\n  Network::TransportSocketFactoryPtr socket_factory_;\n  testing::NiceMock<MockClusterInfo> cluster_;\n  HostStats stats_;\n  mutable Stats::TestSymbolTable symbol_table_;\n  mutable std::unique_ptr<Stats::StatNameManagedStorage> locality_zone_stat_name_;\n};\n\nclass MockHost : public Host {\npublic:\n  struct MockCreateConnectionData {\n    Network::ClientConnection* connection_{};\n    HostDescriptionConstSharedPtr host_description_{};\n  };\n\n  MockHost();\n  ~MockHost() override;\n\n  CreateConnectionData createConnection(Event::Dispatcher& dispatcher,\n                                        const Network::ConnectionSocket::OptionsSharedPtr& options,\n                                        Network::TransportSocketOptionsSharedPtr) const override {\n    MockCreateConnectionData data = createConnection_(dispatcher, options);\n    return {Network::ClientConnectionPtr{data.connection_}, data.host_description_};\n  }\n\n  CreateConnectionData\n  createHealthCheckConnection(Event::Dispatcher& dispatcher,\n                              Network::TransportSocketOptionsSharedPtr,\n                              const envoy::config::core::v3::Metadata*) const override {\n    MockCreateConnectionData data = createConnection_(dispatcher, nullptr);\n    return {Network::ClientConnectionPtr{data.connection_}, data.host_description_};\n  }\n\n  void setHealthChecker(HealthCheckHostMonitorPtr&& health_checker) override {\n    setHealthChecker_(health_checker);\n  }\n\n  void setOutlierDetector(Outlier::DetectorHostMonitorPtr&& outlier_detector) override {\n    setOutlierDetector_(outlier_detector);\n  }\n\n  Stats::StatName localityZoneStatName() const override {\n    locality_zone_stat_name_ =\n        std::make_unique<Stats::StatNameManagedStorage>(locality().zone(), *symbol_table_);\n    return locality_zone_stat_name_->statName();\n  }\n\n  MOCK_METHOD(Network::Address::InstanceConstSharedPtr, address, (), (const));\n  MOCK_METHOD(Network::Address::InstanceConstSharedPtr, healthCheckAddress, (), (const));\n  MOCK_METHOD(bool, canary, (), (const));\n  MOCK_METHOD(void, canary, (bool new_canary));\n  MOCK_METHOD(MetadataConstSharedPtr, metadata, (), (const));\n  MOCK_METHOD(void, metadata, (MetadataConstSharedPtr));\n  MOCK_METHOD(const ClusterInfo&, cluster, (), (const));\n  MOCK_METHOD((std::vector<std::pair<absl::string_view, Stats::PrimitiveCounterReference>>),\n              counters, (), (const));\n  MOCK_METHOD(MockCreateConnectionData, createConnection_,\n              (Event::Dispatcher & dispatcher,\n               const Network::ConnectionSocket::OptionsSharedPtr& options),\n              (const));\n  MOCK_METHOD((std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>>), gauges,\n              (), (const));\n  MOCK_METHOD(HealthCheckHostMonitor&, healthChecker, (), (const));\n  MOCK_METHOD(void, healthFlagClear, (HealthFlag flag));\n  MOCK_METHOD(bool, healthFlagGet, (HealthFlag flag), (const));\n  MOCK_METHOD(ActiveHealthFailureType, getActiveHealthFailureType, (), (const));\n  MOCK_METHOD(void, healthFlagSet, (HealthFlag flag));\n  MOCK_METHOD(void, setActiveHealthFailureType, (ActiveHealthFailureType type));\n  MOCK_METHOD(Host::Health, health, (), (const));\n  MOCK_METHOD(const std::string&, hostnameForHealthChecks, (), (const));\n  MOCK_METHOD(const std::string&, hostname, (), (const));\n  MOCK_METHOD(Network::TransportSocketFactory&, transportSocketFactory, (), (const));\n  MOCK_METHOD(Outlier::DetectorHostMonitor&, outlierDetector, (), (const));\n  MOCK_METHOD(void, setHealthChecker_, (HealthCheckHostMonitorPtr & health_checker));\n  MOCK_METHOD(void, setOutlierDetector_, (Outlier::DetectorHostMonitorPtr & outlier_detector));\n  MOCK_METHOD(HostStats&, stats, (), (const));\n  MOCK_METHOD(uint32_t, weight, (), (const));\n  MOCK_METHOD(void, weight, (uint32_t new_weight));\n  MOCK_METHOD(bool, used, (), (const));\n  MOCK_METHOD(void, used, (bool new_used));\n  MOCK_METHOD(const envoy::config::core::v3::Locality&, locality, (), (const));\n  MOCK_METHOD(uint32_t, priority, (), (const));\n  MOCK_METHOD(void, priority, (uint32_t));\n  MOCK_METHOD(bool, warmed, (), (const));\n\n  testing::NiceMock<MockClusterInfo> cluster_;\n  Network::TransportSocketFactoryPtr socket_factory_;\n  testing::NiceMock<Outlier::MockDetectorHostMonitor> outlier_detector_;\n  HostStats stats_;\n  mutable Stats::TestSymbolTable symbol_table_;\n  mutable std::unique_ptr<Stats::StatNameManagedStorage> locality_zone_stat_name_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/host_set.cc",
    "content": "#include \"host_set.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::Invoke;\nusing ::testing::Return;\nusing ::testing::ReturnRef;\nMockHostSet::MockHostSet(uint32_t priority, uint32_t overprovisioning_factor)\n    : priority_(priority), overprovisioning_factor_(overprovisioning_factor) {\n  ON_CALL(*this, priority()).WillByDefault(Return(priority_));\n  ON_CALL(*this, hosts()).WillByDefault(ReturnRef(hosts_));\n  ON_CALL(*this, hostsPtr()).WillByDefault(Invoke([this]() {\n    return std::make_shared<HostVector>(hosts_);\n  }));\n  ON_CALL(*this, healthyHosts()).WillByDefault(ReturnRef(healthy_hosts_));\n  ON_CALL(*this, healthyHostsPtr()).WillByDefault(Invoke([this]() {\n    return std::make_shared<HealthyHostVector>(healthy_hosts_);\n  }));\n  ON_CALL(*this, degradedHosts()).WillByDefault(ReturnRef(degraded_hosts_));\n  ON_CALL(*this, degradedHostsPtr()).WillByDefault(Invoke([this]() {\n    return std::make_shared<DegradedHostVector>(degraded_hosts_);\n  }));\n  ON_CALL(*this, excludedHosts()).WillByDefault(ReturnRef(excluded_hosts_));\n  ON_CALL(*this, excludedHostsPtr()).WillByDefault(Invoke([this]() {\n    return std::make_shared<ExcludedHostVector>(excluded_hosts_);\n  }));\n  ON_CALL(*this, hostsPerLocality()).WillByDefault(Invoke([this]() -> const HostsPerLocality& {\n    return *hosts_per_locality_;\n  }));\n  ON_CALL(*this, hostsPerLocalityPtr()).WillByDefault(Return(hosts_per_locality_));\n  ON_CALL(*this, healthyHostsPerLocality())\n      .WillByDefault(\n          Invoke([this]() -> const HostsPerLocality& { return *healthy_hosts_per_locality_; }));\n  ON_CALL(*this, healthyHostsPerLocalityPtr()).WillByDefault(Return(healthy_hosts_per_locality_));\n  ON_CALL(*this, degradedHostsPerLocality())\n      .WillByDefault(\n          Invoke([this]() -> const HostsPerLocality& { return *degraded_hosts_per_locality_; }));\n  ON_CALL(*this, degradedHostsPerLocalityPtr()).WillByDefault(Return(degraded_hosts_per_locality_));\n  ON_CALL(*this, excludedHostsPerLocality())\n      .WillByDefault(\n          Invoke([this]() -> const HostsPerLocality& { return *excluded_hosts_per_locality_; }));\n  ON_CALL(*this, excludedHostsPerLocalityPtr()).WillByDefault(Return(excluded_hosts_per_locality_));\n  ON_CALL(*this, localityWeights()).WillByDefault(Invoke([this]() -> LocalityWeightsConstSharedPtr {\n    return locality_weights_;\n  }));\n}\n\nMockHostSet::~MockHostSet() = default;\n\n} // namespace Upstream\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/host_set.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/callback_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockHostSet : public HostSet {\npublic:\n  MockHostSet(uint32_t priority = 0,\n              uint32_t overprovisioning_factor = kDefaultOverProvisioningFactor);\n  ~MockHostSet() override;\n\n  void runCallbacks(const HostVector added, const HostVector removed) {\n    member_update_cb_helper_.runCallbacks(priority(), added, removed);\n  }\n\n  Common::CallbackHandle* addMemberUpdateCb(PrioritySet::PriorityUpdateCb callback) {\n    return member_update_cb_helper_.add(callback);\n  }\n\n  // Upstream::HostSet\n  MOCK_METHOD(const HostVector&, hosts, (), (const));\n  MOCK_METHOD(HostVectorConstSharedPtr, hostsPtr, (), (const));\n  MOCK_METHOD(const HostVector&, healthyHosts, (), (const));\n  MOCK_METHOD(HealthyHostVectorConstSharedPtr, healthyHostsPtr, (), (const));\n  MOCK_METHOD(const HostVector&, degradedHosts, (), (const));\n  MOCK_METHOD(DegradedHostVectorConstSharedPtr, degradedHostsPtr, (), (const));\n  MOCK_METHOD(const HostVector&, excludedHosts, (), (const));\n  MOCK_METHOD(ExcludedHostVectorConstSharedPtr, excludedHostsPtr, (), (const));\n  MOCK_METHOD(const HostsPerLocality&, hostsPerLocality, (), (const));\n  MOCK_METHOD(HostsPerLocalityConstSharedPtr, hostsPerLocalityPtr, (), (const));\n  MOCK_METHOD(const HostsPerLocality&, healthyHostsPerLocality, (), (const));\n  MOCK_METHOD(HostsPerLocalityConstSharedPtr, healthyHostsPerLocalityPtr, (), (const));\n  MOCK_METHOD(const HostsPerLocality&, degradedHostsPerLocality, (), (const));\n  MOCK_METHOD(HostsPerLocalityConstSharedPtr, degradedHostsPerLocalityPtr, (), (const));\n  MOCK_METHOD(const HostsPerLocality&, excludedHostsPerLocality, (), (const));\n  MOCK_METHOD(HostsPerLocalityConstSharedPtr, excludedHostsPerLocalityPtr, (), (const));\n  MOCK_METHOD(LocalityWeightsConstSharedPtr, localityWeights, (), (const));\n  MOCK_METHOD(absl::optional<uint32_t>, chooseHealthyLocality, ());\n  MOCK_METHOD(absl::optional<uint32_t>, chooseDegradedLocality, ());\n  MOCK_METHOD(uint32_t, priority, (), (const));\n  uint32_t overprovisioningFactor() const override { return overprovisioning_factor_; }\n  void setOverprovisioningFactor(const uint32_t overprovisioning_factor) {\n    overprovisioning_factor_ = overprovisioning_factor;\n  }\n\n  HostVector hosts_;\n  HostVector healthy_hosts_;\n  HostVector degraded_hosts_;\n  HostVector excluded_hosts_;\n  HostsPerLocalitySharedPtr hosts_per_locality_{new HostsPerLocalityImpl()};\n  HostsPerLocalitySharedPtr healthy_hosts_per_locality_{new HostsPerLocalityImpl()};\n  HostsPerLocalitySharedPtr degraded_hosts_per_locality_{new HostsPerLocalityImpl()};\n  HostsPerLocalitySharedPtr excluded_hosts_per_locality_{new HostsPerLocalityImpl()};\n  LocalityWeightsConstSharedPtr locality_weights_{{}};\n  Common::CallbackManager<uint32_t, const HostVector&, const HostVector&> member_update_cb_helper_;\n  uint32_t priority_{};\n  uint32_t overprovisioning_factor_{};\n  bool run_in_panic_mode_ = false;\n};\n} // namespace Upstream\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/load_balancer.cc",
    "content": "#include \"load_balancer.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::_;\nusing ::testing::Return;\nMockLoadBalancer::MockLoadBalancer() { ON_CALL(*this, chooseHost(_)).WillByDefault(Return(host_)); }\n\nMockLoadBalancer::~MockLoadBalancer() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/load_balancer.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/load_balancer.h\"\n\n#include \"test/mocks/upstream/host.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockLoadBalancer : public LoadBalancer {\npublic:\n  MockLoadBalancer();\n  ~MockLoadBalancer() override;\n\n  // Upstream::LoadBalancer\n  MOCK_METHOD(HostConstSharedPtr, chooseHost, (LoadBalancerContext * context));\n  MOCK_METHOD(HostConstSharedPtr, peekAnotherHost, (LoadBalancerContext * context));\n\n  std::shared_ptr<MockHost> host_{new MockHost()};\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/load_balancer_context.cc",
    "content": "#include \"test/mocks/upstream/load_balancer_context.h\"\n\nusing testing::_;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Upstream {\n\nMockLoadBalancerContext::MockLoadBalancerContext() {\n  // By default, set loads which treat everything as healthy in the first priority.\n  priority_load_.healthy_priority_load_ = HealthyLoad({100});\n  priority_load_.degraded_priority_load_ = DegradedLoad({0});\n  ON_CALL(*this, determinePriorityLoad(_, _, _)).WillByDefault(ReturnRef(priority_load_));\n}\n\nMockLoadBalancerContext::~MockLoadBalancerContext() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/load_balancer_context.h",
    "content": "#pragma once\n#include \"envoy/upstream/load_balancer.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass MockLoadBalancerContext : public LoadBalancerContext {\npublic:\n  MockLoadBalancerContext();\n  ~MockLoadBalancerContext() override;\n\n  MOCK_METHOD(absl::optional<uint64_t>, computeHashKey, ());\n  MOCK_METHOD(Router::MetadataMatchCriteria*, metadataMatchCriteria, ());\n  MOCK_METHOD(const Network::Connection*, downstreamConnection, (), (const));\n  MOCK_METHOD(const Http::RequestHeaderMap*, downstreamHeaders, (), (const));\n  MOCK_METHOD(const HealthyAndDegradedLoad&, determinePriorityLoad,\n              (const PrioritySet&, const HealthyAndDegradedLoad&,\n               const Upstream::RetryPriority::PriorityMappingFunc&));\n  MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host&));\n  MOCK_METHOD(uint32_t, hostSelectionRetryCount, (), (const));\n  MOCK_METHOD(Network::Socket::OptionsSharedPtr, upstreamSocketOptions, (), (const));\n  MOCK_METHOD(Network::TransportSocketOptionsSharedPtr, upstreamTransportSocketOptions, (),\n              (const));\n\nprivate:\n  HealthyAndDegradedLoad priority_load_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/mocks.h",
    "content": "#pragma once\n\n// NOLINT(namespace-envoy)\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/data/core/v3/health_check_event.pb.h\"\n#include \"envoy/http/async_client.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include \"envoy/upstream/health_checker.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/common/callback_impl.h\"\n#include \"common/upstream/health_discovery_service.h\"\n#include \"common/upstream/load_balancer_impl.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/tcp/mocks.h\"\n#include \"test/mocks/upstream/basic_resource_limit.h\"\n#include \"test/mocks/upstream/cds_api.h\"\n#include \"test/mocks/upstream/cluster.h\"\n#include \"test/mocks/upstream/cluster_info.h\"\n#include \"test/mocks/upstream/cluster_info_factory.h\"\n#include \"test/mocks/upstream/cluster_manager.h\"\n#include \"test/mocks/upstream/cluster_manager_factory.h\"\n#include \"test/mocks/upstream/cluster_priority_set.h\"\n#include \"test/mocks/upstream/cluster_real_priority_set.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks.h\"\n#include \"test/mocks/upstream/cluster_update_callbacks_handle.h\"\n#include \"test/mocks/upstream/health_check_event_logger.h\"\n#include \"test/mocks/upstream/health_checker.h\"\n#include \"test/mocks/upstream/host_set.h\"\n#include \"test/mocks/upstream/load_balancer.h\"\n#include \"test/mocks/upstream/load_balancer_context.h\"\n#include \"test/mocks/upstream/priority_set.h\"\n#include \"test/mocks/upstream/retry_host_predicate.h\"\n#include \"test/mocks/upstream/retry_priority.h\"\n#include \"test/mocks/upstream/retry_priority_factory.h\"\n#include \"test/mocks/upstream/test_retry_host_predicate_factory.h\"\n#include \"test/mocks/upstream/thread_aware_load_balancer.h\"\n#include \"test/mocks/upstream/thread_local_cluster.h\"\n"
  },
  {
    "path": "test/mocks/upstream/priority_set.cc",
    "content": "#include \"priority_set.h\"\n\n#include <chrono>\n#include <functional>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nusing ::testing::_;\nusing ::testing::Invoke;\nusing ::testing::ReturnRef;\n\nMockPrioritySet::MockPrioritySet() {\n  getHostSet(0);\n  ON_CALL(*this, hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_));\n  ON_CALL(testing::Const(*this), hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_));\n  ON_CALL(*this, addMemberUpdateCb(_))\n      .WillByDefault(Invoke([this](PrioritySet::MemberUpdateCb cb) -> Common::CallbackHandle* {\n        return member_update_cb_helper_.add(cb);\n      }));\n  ON_CALL(*this, addPriorityUpdateCb(_))\n      .WillByDefault(Invoke([this](PrioritySet::PriorityUpdateCb cb) -> Common::CallbackHandle* {\n        return priority_update_cb_helper_.add(cb);\n      }));\n}\n\nMockPrioritySet::~MockPrioritySet() = default;\n\nHostSet& MockPrioritySet::getHostSet(uint32_t priority) {\n  if (host_sets_.size() < priority + 1) {\n    for (size_t i = host_sets_.size(); i <= priority; ++i) {\n      auto host_set = new ::testing::NiceMock<MockHostSet>(i);\n      host_sets_.push_back(HostSetPtr{host_set});\n      host_set->addMemberUpdateCb([this](uint32_t priority, const HostVector& hosts_added,\n                                         const HostVector& hosts_removed) {\n        runUpdateCallbacks(priority, hosts_added, hosts_removed);\n      });\n    }\n  }\n  return *host_sets_[priority];\n}\n\nvoid MockPrioritySet::runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added,\n                                         const HostVector& hosts_removed) {\n  member_update_cb_helper_.runCallbacks(hosts_added, hosts_removed);\n  priority_update_cb_helper_.runCallbacks(priority, hosts_added, hosts_removed);\n}\n\n} // namespace Upstream\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/priority_set.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/upstream.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"host_set.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockPrioritySet : public PrioritySet {\npublic:\n  MockPrioritySet();\n  ~MockPrioritySet() override;\n\n  HostSet& getHostSet(uint32_t priority);\n  void runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added,\n                          const HostVector& hosts_removed);\n\n  MOCK_METHOD(Common::CallbackHandle*, addMemberUpdateCb, (MemberUpdateCb callback), (const));\n  MOCK_METHOD(Common::CallbackHandle*, addPriorityUpdateCb, (PriorityUpdateCb callback), (const));\n  MOCK_METHOD(const std::vector<HostSetPtr>&, hostSetsPerPriority, (), (const));\n  MOCK_METHOD(std::vector<HostSetPtr>&, hostSetsPerPriority, ());\n  MOCK_METHOD(void, updateHosts,\n              (uint32_t priority, UpdateHostsParams&& update_hosts_params,\n               LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added,\n               const HostVector& hosts_removed, absl::optional<uint32_t> overprovisioning_factor));\n  MOCK_METHOD(void, batchHostUpdate, (BatchUpdateCb&));\n\n  MockHostSet* getMockHostSet(uint32_t priority) {\n    getHostSet(priority); // Ensure the host set exists.\n    return reinterpret_cast<MockHostSet*>(host_sets_[priority].get());\n  }\n\n  std::vector<HostSetPtr> host_sets_;\n  Common::CallbackManager<const HostVector&, const HostVector&> member_update_cb_helper_;\n  Common::CallbackManager<uint32_t, const HostVector&, const HostVector&>\n      priority_update_cb_helper_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/retry_host_predicate.cc",
    "content": "#include \"retry_host_predicate.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockRetryHostPredicate::MockRetryHostPredicate() = default;\n\nMockRetryHostPredicate::~MockRetryHostPredicate() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/retry_host_predicate.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockRetryHostPredicate : public RetryHostPredicate {\npublic:\n  MockRetryHostPredicate();\n  ~MockRetryHostPredicate() override;\n\n  MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host& candidate_host));\n  MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr));\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/retry_priority.cc",
    "content": "#include \"retry_priority.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockRetryPriority::~MockRetryPriority() = default;\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/retry_priority.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockRetryPriority : public RetryPriority {\npublic:\n  MockRetryPriority(const HealthyLoad& healthy_priority_load,\n                    const DegradedLoad& degraded_priority_load)\n      : priority_load_({healthy_priority_load, degraded_priority_load}) {}\n  MockRetryPriority(const MockRetryPriority& other) : priority_load_(other.priority_load_) {}\n  ~MockRetryPriority() override;\n\n  const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet&,\n                                                      const HealthyAndDegradedLoad&,\n                                                      const PriorityMappingFunc&) override {\n    return priority_load_;\n  }\n\n  MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr));\n\nprivate:\n  const HealthyAndDegradedLoad priority_load_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/retry_priority_factory.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"retry_priority.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::NiceMock;\nclass MockRetryPriorityFactory : public RetryPriorityFactory {\npublic:\n  MockRetryPriorityFactory(const MockRetryPriority& retry_priority)\n      : retry_priority_(retry_priority) {}\n  RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message&,\n                                             ProtobufMessage::ValidationVisitor&,\n                                             uint32_t) override {\n    return std::make_shared<NiceMock<MockRetryPriority>>(retry_priority_);\n  }\n\n  std::string name() const override { return \"envoy.test_retry_priority\"; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n\nprivate:\n  const MockRetryPriority& retry_priority_;\n};\n} // namespace Upstream\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/test_retry_host_predicate_factory.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/retry.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"retry_host_predicate.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::NiceMock;\nclass TestRetryHostPredicateFactory : public RetryHostPredicateFactory {\npublic:\n  RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&, uint32_t) override {\n    return std::make_shared<NiceMock<MockRetryHostPredicate>>();\n  }\n\n  std::string name() const override { return \"envoy.test_host_predicate\"; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/thread_aware_load_balancer.cc",
    "content": "#include \"thread_aware_load_balancer.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nMockThreadAwareLoadBalancer::MockThreadAwareLoadBalancer() = default;\n\nMockThreadAwareLoadBalancer::~MockThreadAwareLoadBalancer() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/thread_aware_load_balancer.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/load_balancer.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nclass MockThreadAwareLoadBalancer : public ThreadAwareLoadBalancer {\npublic:\n  MockThreadAwareLoadBalancer();\n  ~MockThreadAwareLoadBalancer() override;\n\n  // Upstream::ThreadAwareLoadBalancer\n  MOCK_METHOD(LoadBalancerFactorySharedPtr, factory, ());\n  MOCK_METHOD(void, initialize, ());\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/thread_local_cluster.cc",
    "content": "#include \"thread_local_cluster.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::Return;\nusing ::testing::ReturnRef;\nMockThreadLocalCluster::MockThreadLocalCluster() {\n  ON_CALL(*this, prioritySet()).WillByDefault(ReturnRef(cluster_.priority_set_));\n  ON_CALL(*this, info()).WillByDefault(Return(cluster_.info_));\n  ON_CALL(*this, loadBalancer()).WillByDefault(ReturnRef(lb_));\n}\n\nMockThreadLocalCluster::~MockThreadLocalCluster() = default;\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/thread_local_cluster.h",
    "content": "#pragma once\n\n#include \"envoy/upstream/thread_local_cluster.h\"\n\n#include \"cluster_priority_set.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"load_balancer.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nusing ::testing::NiceMock;\nclass MockThreadLocalCluster : public ThreadLocalCluster {\npublic:\n  MockThreadLocalCluster();\n  ~MockThreadLocalCluster() override;\n\n  // Upstream::ThreadLocalCluster\n  MOCK_METHOD(const PrioritySet&, prioritySet, ());\n  MOCK_METHOD(ClusterInfoConstSharedPtr, info, ());\n  MOCK_METHOD(LoadBalancer&, loadBalancer, ());\n\n  NiceMock<MockClusterMockPrioritySet> cluster_;\n  NiceMock<MockLoadBalancer> lb_;\n};\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/transport_socket_match.cc",
    "content": "#include \"test/mocks/upstream/transport_socket_match.h\"\n\n#include \"common/network/raw_buffer_socket.h\"\n\nusing testing::_;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Upstream {\n\nMockTransportSocketMatcher::MockTransportSocketMatcher()\n    : MockTransportSocketMatcher(std::make_unique<Network::RawBufferSocketFactory>()) {}\n\nMockTransportSocketMatcher::MockTransportSocketMatcher(Network::TransportSocketFactoryPtr factory)\n    : socket_factory_(std::move(factory)),\n      stats_({ALL_TRANSPORT_SOCKET_MATCH_STATS(POOL_COUNTER_PREFIX(stats_store_, \"test\"))}) {\n  ON_CALL(*this, resolve(_))\n      .WillByDefault(Return(TransportSocketMatcher::MatchData(*socket_factory_, stats_, \"test\")));\n}\n\nMockTransportSocketMatcher::~MockTransportSocketMatcher() = default;\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/mocks/upstream/transport_socket_match.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Upstream {\n\nclass MockTransportSocketMatcher : public TransportSocketMatcher {\npublic:\n  MockTransportSocketMatcher();\n  MockTransportSocketMatcher(Network::TransportSocketFactoryPtr default_factory);\n  ~MockTransportSocketMatcher() override;\n  MOCK_METHOD(TransportSocketMatcher::MatchData, resolve,\n              (const envoy::config::core::v3::Metadata*), (const));\n\nprivate:\n  Network::TransportSocketFactoryPtr socket_factory_;\n  Stats::IsolatedStoreImpl stats_store_;\n  TransportSocketMatchStats stats_;\n};\n\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/per_file_coverage.sh",
    "content": "#!/bin/bash\n\n# directory:coverage_percent\n# for existing directories with low coverage.\ndeclare -a KNOWN_LOW_COVERAGE=(\n\"source/common/network:94.0\"\n\"source/common/http/http3:50.0\"\n\"source/common/tracing:94.9\"\n\"source/common/protobuf:94.3\"\n\"source/common/secret:95.2\"\n\"source/common/singleton:95.1\"\n\"source/common/api:72.9\"\n\"source/common/api/posix:71.8\"\n\"source/common/init:96.2\"\n\"source/common/json:90.6\"\n\"source/common/filesystem:96.1\"\n\"source/common/filesystem/posix:93.7\"\n\"source/common/thread_local:95.7\"\n\"source/common/crypto:0.0\"\n\"source/common/common:96.1\"\n\"source/common/common/posix:94.1\"\n\"source/common/signal:85.1\"\n\"source/exe:93.7\"\n\"source/extensions:96.3\"\n\"source/extensions/common:94.4\"\n\"source/extensions/common/crypto:91.5\"\n\"source/extensions/common/tap:95.1\"\n\"source/extensions/common/wasm:85.4\"\n\"source/extensions/common/wasm/v8:85.4\"\n\"source/extensions/common/wasm/null:77.8\"\n\"source/extensions/filters/network/sni_cluster:90.3\"\n\"source/extensions/filters/network/sni_dynamic_forward_proxy:90.9\"\n\"source/extensions/filters/network/dubbo_proxy:96.1\"\n\"source/extensions/filters/network/dubbo_proxy/router:95.1\"\n\"source/extensions/filters/network/mongo_proxy:94.0\"\n\"source/extensions/filters/network/common:96.1\"\n\"source/extensions/filters/network/common/redis:96.2\"\n\"source/extensions/filters/network/http_connection_manager:95.2\"\n\"source/extensions/filters/http/cache:80.7\"\n\"source/extensions/filters/http/cache/simple_http_cache:84.5\"\n\"source/extensions/filters/http/dynamic_forward_proxy:94.9\"\n\"source/extensions/filters/http/ip_tagging:91.2\"\n\"source/extensions/filters/http/grpc_json_transcoder:93.3\"\n\"source/extensions/filters/http/oauth2:96.5\"\n\"source/extensions/filters/listener:96.0\"\n\"source/extensions/filters/listener/tls_inspector:92.4\"\n\"source/extensions/filters/listener/http_inspector:93.3\"\n\"source/extensions/filters/udp:91.1\"\n\"source/extensions/filters/udp/dns_filter:96.9\"\n\"source/extensions/filters/common:94.7\"\n\"source/extensions/filters/common/expr:92.2\"\n\"source/extensions/filters/common/rbac:87.1\"\n\"source/extensions/filters/common/fault:94.3\"\n\"source/extensions/grpc_credentials:92.0\"\n\"source/extensions/health_checkers:95.9\"\n\"source/extensions/health_checkers/redis:95.9\"\n\"source/extensions/quic_listeners:84.8\"\n\"source/extensions/quic_listeners/quiche:84.8\"\n\"source/extensions/stat_sinks/statsd:85.2\"\n\"source/extensions/tracers:96.0\"\n\"source/extensions/tracers/opencensus:91.2\"\n\"source/extensions/tracers/xray:94.0\"\n\"source/extensions/transport_sockets:94.9\"\n\"source/extensions/transport_sockets/tap:95.6\"\n\"source/extensions/transport_sockets/tls:94.2\"\n\"source/extensions/transport_sockets/tls/ocsp:95.3\"\n\"source/extensions/transport_sockets/tls/private_key:76.9\"\n\"source/extensions/watchdog:69.6\" # Death tests within extensions\n\"source/extensions/watchdog/profile_action:84.9\"\n\"source/extensions/watchdog/abort_action:42.9\" # Death tests don't report LCOV\n\"source/server:94.6\"\n\"source/server/config_validation:76.6\"\n\"source/server/admin:95.3\"\n)\n\n[[ -z \"${SRCDIR}\" ]] && SRCDIR=\"${PWD}\"\nCOVERAGE_DIR=\"${SRCDIR}\"/generated/coverage\nCOVERAGE_DATA=\"${COVERAGE_DIR}/coverage.dat\"\n\nFAILED=0\nDEFAULT_COVERAGE_THRESHOLD=96.6\nDIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD\n\n# Unfortunately we have a bunch of preexisting directory with low coverage.\n# Set their low bar as their current coverage level.\nget_coverage_target() {\n  DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD\n  for FILE_PERCENT in \"${KNOWN_LOW_COVERAGE[@]}\"\n  do\n    if [[ $FILE_PERCENT =~ $1: ]]; then\n      DIRECTORY_THRESHOLD=\"${FILE_PERCENT//*:/}\"\n      return\n    fi\n  done\n}\n\n# Make sure that for each directory with code, coverage doesn't dip\n# below the default coverage threshold.\nSOURCES=$(find source/* -type d)\nwhile read -r DIRECTORY\ndo\n  get_coverage_target \"$DIRECTORY\"\n  COVERAGE_VALUE=$(lcov -e \"$COVERAGE_DATA\"  \"${DIRECTORY}/*\" -o /dev/null | grep line |  cut -d ' ' -f 4)\n  COVERAGE_VALUE=${COVERAGE_VALUE%?}\n  # If the coverage number is 'n' (no data found) there is 0% coverage. This is\n  # probably a directory without source code, so we skip checks.\n  #\n  # We could insist that we validate that 0% coverage directories are in a\n  # documented list, but instead of adding busy-work for folks adding\n  # non-source-containing directories, we trust reviewers to notice if there's\n  # absolutely no tests for a full directory.\n  if [[ $COVERAGE_VALUE =~ \"n\" ]]; then\n    continue;\n  fi;\n  COVERAGE_FAILED=$(echo \"${COVERAGE_VALUE}<${DIRECTORY_THRESHOLD}\" | bc)\n  if [[ \"${COVERAGE_FAILED}\" -eq 1 ]]; then\n    echo \"Code coverage for ${DIRECTORY} is lower than limit of ${DIRECTORY_THRESHOLD} (${COVERAGE_VALUE})\"\n    FAILED=1\n  fi\ndone <<< \"$SOURCES\"\n\nexit $FAILED\n"
  },
  {
    "path": "test/proto/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n    \"envoy_proto_descriptor\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nexports_files([\"bookstore.proto\"])\n\nenvoy_proto_library(\n    name = \"deprecated_proto\",\n    srcs = [\":deprecated.proto\"],\n    deps = [\"@envoy_api//envoy/annotations:pkg\"],\n)\n\nenvoy_proto_library(\n    name = \"helloworld_proto\",\n    srcs = [\":helloworld.proto\"],\n)\n\nenvoy_proto_library(\n    name = \"bookstore_proto\",\n    srcs = [\":bookstore.proto\"],\n    external_deps = [\"api_httpbody_protos\"],\n)\n\nenvoy_proto_descriptor(\n    name = \"bookstore_proto_descriptor\",\n    srcs = [\n        \"bookstore.proto\",\n        # JSON transcoder doesn't link against \":helloworld_proto_cc_proto\", so we can add it to the\n        # descriptor and test that we can actually transcode types not linked into the test binary.\n        \"helloworld.proto\",\n    ],\n    out = \"bookstore.descriptor\",\n    external_deps = [\n        \"api_httpbody_protos\",\n        \"http_api_protos\",\n        \"well_known_protos\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"sensitive_proto\",\n    srcs = [\":sensitive.proto\"],\n    deps = [\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@com_github_cncf_udpa//udpa/type/v1:pkg\",\n    ],\n)\n"
  },
  {
    "path": "test/proto/bookstore.proto",
    "content": "syntax = \"proto3\";\n\npackage bookstore;\n\nimport \"google/api/annotations.proto\";\nimport \"google/api/httpbody.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/struct.proto\";\n\n// A simple Bookstore API.\n//\n// The API manages shelves and books resources. Shelves contain books.\nservice Bookstore {\n  // Returns a list of all shelves in the bookstore.\n  rpc ListShelves(google.protobuf.Empty) returns (ListShelvesResponse) {\n    option (google.api.http) = {\n      get: \"/shelves\"\n    };\n  }\n  // Creates a new shelf in the bookstore.\n  rpc CreateShelf(CreateShelfRequest) returns (Shelf) {\n    option (google.api.http) = {\n      post: \"/shelf\"\n      body: \"shelf\"\n    };\n  }\n  // Creates a new shelf in the bookstore via a mapped package.service.method as its HTTP path.\n  rpc CreateShelfWithPackageServiceAndMethod(CreateShelfRequest) returns (Shelf) {\n    option (google.api.http) = {\n      post: \"/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod\"\n      body: \"shelf\"\n    };\n  }\n  // Creates multiple shelves with one streaming call\n  rpc BulkCreateShelf(stream CreateShelfRequest) returns (stream Shelf) {\n    option (google.api.http) = {\n      post: \"/bulk/shelves\"\n      body: \"shelf\"\n    };\n  }\n  // Returns a specific bookstore shelf.\n  rpc GetShelf(GetShelfRequest) returns (Shelf) {\n    option (google.api.http) = {\n      get: \"/shelves/{shelf}\"\n    };\n  }\n  // Deletes a shelf, including all books that are stored on the shelf.\n  rpc DeleteShelf(DeleteShelfRequest) returns (google.protobuf.Empty) {\n  }\n  // Returns a list of books on a shelf.\n  rpc ListBooks(ListBooksRequest) returns (stream Book) {\n    option (google.api.http) = {\n      get: \"/shelves/{shelf}/books\"\n    };\n  }\n  // Creates a new book.\n  rpc CreateBook(CreateBookRequest) returns (Book) {\n    option (google.api.http) = {\n      put: \"/shelves/{shelf}/books\"\n      body: \"book\"\n    };\n  }\n  // Returns a specific book.\n  rpc GetBook(GetBookRequest) returns (Book) {\n  }\n  // Deletes a book from a shelf.\n  rpc DeleteBook(DeleteBookRequest) returns (google.protobuf.Empty) {\n    option (google.api.http) = {\n      delete: \"/shelves/{shelf}/books/{book}\"\n    };\n  }\n  rpc UpdateBook(UpdateBookRequest) returns (Book) {\n    option (google.api.http) = {\n      patch: \"/shelves/{shelf}/books/{book.id}\"\n      body: \"book\"\n    };\n  }\n  rpc BookstoreOptions(GetShelfRequest) returns (google.protobuf.Empty) {\n    option (google.api.http) = {\n      custom {kind: \"OPTIONS\" path: \"/shelves/{shelf}\"}\n    };\n  }\n  // Returns a specific author.\n  rpc GetAuthor(GetAuthorRequest) returns (Author) {\n    option (google.api.http) = {\n      get: \"/authors/{author}\"\n    };\n  }\n  rpc GetIndex(google.protobuf.Empty) returns (google.api.HttpBody) {\n    option (google.api.http) = {\n      get: \"/index\"\n    };\n  }\n  rpc GetIndexStream(google.protobuf.Empty) returns (stream google.api.HttpBody) {\n    option (google.api.http) = {\n      get: \"/indexStream\"\n    };\n  }\n  rpc PostBody(EchoBodyRequest) returns (google.protobuf.Empty) {\n    option (google.api.http) = {\n      post: \"/postBody\"\n      body: \"nested.content\"\n    };\n  }\n  rpc StreamBody(stream EchoBodyRequest) returns (google.protobuf.Empty) {\n    option (google.api.http) = {\n      post: \"/streamBody\"\n      body: \"nested.content\"\n    };\n  }\n  rpc EchoBody(EchoBodyRequest) returns (google.api.HttpBody) {\n    option (google.api.http) = {\n      post: \"/echoBody\"\n      body: \"nested.content\"\n    };\n  }\n  rpc EchoResponseBodyPath(google.protobuf.Empty) returns (EchoBodyRequest) {\n    option (google.api.http) = {\n      get: \"/echoResponseBodyPath\"\n      response_body: \"nested.content\"\n    };\n  }\n  rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) {\n    option (google.api.http) = {\n      post: \"/echoStruct\"\n      body: \"content\"\n    };\n  }\n  // To test grpc transcoding with an unknown field.\n  // This could happen when the grpc server is using a updated proto with a new field,\n  // but Envoy transcoding config is still using the old version.\n  rpc GetBigBook(google.protobuf.Empty) returns (OldBigBook) {\n    option (google.api.http) = {\n      get: \"/bigbook\"\n    };\n  }\n}\n\nservice ServiceWithResponseBody {\n  rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) {\n    option (google.api.http) = {\n      get: \"/echoStruct\"\n      response_body: \"content\"\n    };\n  }\n}\n\nservice ServiceWithInvalidRequestBodyPath {\n  rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) {\n    option (google.api.http) = {\n      get: \"/echoStruct\"\n      body: \"unknown.field\"\n    };\n  }\n}\n\nservice ServiceWithInvalidResponseBodyPath {\n  rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) {\n    option (google.api.http) = {\n      get: \"/echoStruct\"\n      response_body: \"unknown.field\"\n    };\n  }\n}\n\n// A shelf resource.\nmessage Shelf {\n  // A unique shelf id.\n  int64 id = 1;\n  // A theme of the shelf (fiction, poetry, etc).\n  string theme = 2;\n}\n\n// A book resource.\nmessage Book {\n  // A unique book id.\n  int64 id = 1;\n  // An author of the book.\n  string author = 2;\n  // A book title.\n  string title = 3;\n  // Quotes from the book.\n  repeated string quotes = 4;\n}\n\n// An author resource.\nmessage Author {\n  // A unique author id.\n  int64 id = 1;\n  enum Gender {\n    UNKNOWN = 0;\n    MALE = 1;\n    FEMALE = 2;\n  };\n  Gender gender = 2;\n  string first_name = 3;\n  string last_name = 4 [json_name = \"lname\"];\n}\n\n// Response to ListShelves call.\nmessage ListShelvesResponse {\n  // Shelves in the bookstore.\n  repeated Shelf shelves = 1;\n}\n\n// Request message for CreateShelf method.\nmessage CreateShelfRequest {\n  // The shelf resource to create.\n  Shelf shelf = 1;\n}\n\n// Request message for GetShelf method.\nmessage GetShelfRequest {\n  // The ID of the shelf resource to retrieve.\n  int64 shelf = 1;\n}\n\n// Request message for DeleteShelf method.\nmessage DeleteShelfRequest {\n  // The ID of the shelf to delete.\n  int64 shelf = 1;\n}\n\n// Request message for ListBooks method.\nmessage ListBooksRequest {\n  // ID of the shelf which books to list.\n  int64 shelf = 1;\n}\n\n// Request message for CreateBook method.\nmessage CreateBookRequest {\n  // The ID of the shelf on which to create a book.\n  int64 shelf = 1;\n  // A book resource to create on the shelf.\n  Book book = 2;\n}\n\n// Request message for GetBook method.\nmessage GetBookRequest {\n  // The ID of the shelf from which to retrieve a book.\n  int64 shelf = 1;\n  // The ID of the book to retrieve.\n  int64 book = 2;\n}\n\n// Request message for UpdateBook method\nmessage UpdateBookRequest {\n  // The ID of the shelf from which to retrieve a book.\n  int64 shelf = 1;\n  // A book resource to update on the shelf.\n  Book book = 2;\n}\n\n// Request message for DeleteBook method.\nmessage DeleteBookRequest {\n  // The ID of the shelf from which to delete a book.\n  int64 shelf = 1;\n  // The ID of the book to delete.\n  int64 book = 2;\n}\n\n// Request message for GetAuthor method.\nmessage GetAuthorRequest {\n  // The ID of the author resource to retrieve.\n  int64 author = 1;\n}\n\nmessage EchoBodyRequest {\n  message Nested {\n    google.api.HttpBody content = 1;\n  }\n  string arg = 1;\n  string unused = 2;\n  Nested nested = 3;\n}\n\n// Request and Response message for EchoStructReqResp method.\nmessage EchoStructReqResp {\n  // The content of request.\n  google.protobuf.Struct content = 1;\n}\n\n// Test message for deeply-nested HttpBody field.\nmessage DeepNestedBody {\n  message Nested {\n    message Nested {\n      message Nested {\n        google.api.HttpBody body = 500000000;\n      }\n      Nested nested = 100000000;\n    }\n    message Extra {\n      int32 field = 1;\n    }\n    Nested nested = 1000000;\n    Extra extra = 50;\n  }\n  message Extra {\n    string field = 1;\n  }\n  Nested nested = 1;\n  Extra extra = 2;\n}\n\n// gRPC server is using BigBook, but envoy transcoder filter is using\n// OldBigBook with missing `field1`.\nmessage BigBook {\n  string field1 = 1;\n  string field2 = 2;\n  string field3 = 3;\n}\n\n// The BigBook message with missing `field1`.\nmessage OldBigBook {\n  string field2 = 2;\n  string field3 = 3;\n}\n"
  },
  {
    "path": "test/proto/deprecated.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.test.deprecation_test;\n\nimport \"envoy/annotations/deprecation.proto\";\n\nmessage Base {\n  string not_deprecated = 1;\n  string is_deprecated = 2 [deprecated = true];\n  string is_deprecated_fatal = 3\n      [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n  message InnerMessage {\n    string inner_not_deprecated = 1;\n    string inner_deprecated = 2 [deprecated = true];\n    string inner_deprecated_fatal = 3\n        [deprecated = true, (envoy.annotations.disallowed_by_default) = true];\n  }\n  InnerMessage deprecated_message = 4 [deprecated = true];\n  InnerMessage not_deprecated_message = 5;\n  repeated InnerMessage repeated_message = 6;\n  repeated InnerMessage deprecated_repeated_message = 7 [deprecated = true];\n\n  // For deprecated enum value testing, stick the enum in a container, to avoid\n  // the default instantiation of Base having a deprecated-by-default value.\n  enum DeprecationEnum {\n    DEPRECATED_DEFAULT = 0 [deprecated = true];\n    NOT_DEPRECATED = 1;\n    DEPRECATED_NOT_DEFAULT = 2 [deprecated = true];\n    DEPRECATED_FATAL = 3 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true];\n  }\n  message InnerMessageWithDeprecationEnum {\n    DeprecationEnum deprecated_enum = 1;\n  }\n  InnerMessageWithDeprecationEnum enum_container = 8;\n}\n\n// Same structure as base but with protoxform migration style transforms applied\n// manually.\nmessage UpgradedBase {\n  string not_deprecated = 1;\n  string hidden_envoy_deprecated_is_deprecated = 2 [deprecated = true];\n  string hidden_envoy_deprecated_is_deprecated_fatal = 3 [deprecated = true];\n  message InnerMessage {\n    string inner_not_deprecated = 1;\n    string hidden_envoy_deprecated_inner_deprecated = 2 [deprecated = true];\n    string hidden_envoy_deprecated_inner_deprecated_fatal = 3 [deprecated = true];\n  }\n  InnerMessage hidden_envoy_deprecated_deprecated_message = 4 [deprecated = true];\n  InnerMessage not_deprecated_message = 5;\n  repeated InnerMessage repeated_message = 6;\n  repeated InnerMessage hidden_envoy_deprecated_deprecated_repeated_message = 7 [deprecated = true];\n\n  // For deprecated enum value testing, stick the enum in a container, to avoid\n  // the default instantiation of Base having a deprecated-by-default value.\n  enum DeprecationEnum {\n    hidden_envoy_deprecated_DEPRECATED_DEFAULT = 0 [deprecated = true];\n    NOT_DEPRECATED = 1;\n    hidden_envoy_deprecated_DEPRECATED_NOT_DEFAULT = 2 [deprecated = true];\n  }\n  message InnerMessageWithDeprecationEnum {\n    DeprecationEnum deprecated_enum = 1;\n  }\n  InnerMessageWithDeprecationEnum enum_container = 8;\n}\n"
  },
  {
    "path": "test/proto/helloworld.proto",
    "content": "// Copyright 2015, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\noption cc_generic_services = true;\noption java_package = \"io.grpc.examples.helloworld\";\noption java_outer_classname = \"HelloWorldProto\";\n\npackage helloworld;\n\n// The greeting service definition.\nservice Greeter {\n  // Sends a greeting\n  rpc SayHello(HelloRequest) returns (HelloReply) {\n  }\n}\n\n// The request message containing the user's name.\nmessage HelloRequest {\n  string name = 1;\n\n  // Use this if you are including non-UTF-8.\n  bytes name_bytes = 2;\n}\n\n// The response message containing the greetings\nmessage HelloReply {\n  string message = 1;\n}\n"
  },
  {
    "path": "test/proto/sensitive.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.test;\n\noption java_package = \"io.envoyproxy.envoy.test\";\noption java_outer_classname = \"SensitiveProto\";\noption java_multiple_files = true;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/struct.proto\";\n\nimport \"udpa/annotations/sensitive.proto\";\nimport \"udpa/type/v1/typed_struct.proto\";\n\nmessage Sensitive {\n  string sensitive_string = 1 [(udpa.annotations.sensitive) = true];\n  repeated string sensitive_repeated_string = 2 [(udpa.annotations.sensitive) = true];\n  bytes sensitive_bytes = 3 [(udpa.annotations.sensitive) = true];\n  repeated bytes sensitive_repeated_bytes = 4 [(udpa.annotations.sensitive) = true];\n  int64 sensitive_int = 5 [(udpa.annotations.sensitive) = true];\n  repeated int64 sensitive_repeated_int = 6 [(udpa.annotations.sensitive) = true];\n  Sensitive sensitive_message = 7 [(udpa.annotations.sensitive) = true];\n  repeated Sensitive sensitive_repeated_message = 8 [(udpa.annotations.sensitive) = true];\n  google.protobuf.Any sensitive_any = 9 [(udpa.annotations.sensitive) = true];\n  repeated google.protobuf.Any sensitive_repeated_any = 10 [(udpa.annotations.sensitive) = true];\n  udpa.type.v1.TypedStruct sensitive_typed_struct = 11 [(udpa.annotations.sensitive) = true];\n  repeated udpa.type.v1.TypedStruct sensitive_repeated_typed_struct = 12\n      [(udpa.annotations.sensitive) = true];\n\n  string insensitive_string = 101;\n  repeated string insensitive_repeated_string = 102;\n  bytes insensitive_bytes = 103;\n  repeated bytes insensitive_repeated_bytes = 104;\n  int64 insensitive_int = 105;\n  repeated int64 insensitive_repeated_int = 106;\n  Sensitive insensitive_message = 107;\n  repeated Sensitive insensitive_repeated_message = 108;\n  google.protobuf.Any insensitive_any = 109;\n  repeated google.protobuf.Any insensitive_repeated_any = 110;\n  udpa.type.v1.TypedStruct insensitive_typed_struct = 111;\n  repeated udpa.type.v1.TypedStruct insensitive_repeated_typed_struct = 112;\n}\n"
  },
  {
    "path": "test/run_envoy_bazel_coverage.sh",
    "content": "#!/bin/bash\n\nset -e\n\n[[ -z \"${SRCDIR}\" ]] && SRCDIR=\"${PWD}\"\n[[ -z \"${VALIDATE_COVERAGE}\" ]] && VALIDATE_COVERAGE=true\n[[ -z \"${FUZZ_COVERAGE}\" ]] && FUZZ_COVERAGE=false\n[[ -z \"${COVERAGE_THRESHOLD}\" ]] && COVERAGE_THRESHOLD=96.5\nCOVERAGE_TARGET=\"${COVERAGE_TARGET:-}\"\nread -ra BAZEL_BUILD_OPTIONS <<< \"${BAZEL_BUILD_OPTIONS:-}\"\n\necho \"Starting run_envoy_bazel_coverage.sh...\"\necho \"    PWD=$(pwd)\"\necho \"    SRCDIR=${SRCDIR}\"\necho \"    VALIDATE_COVERAGE=${VALIDATE_COVERAGE}\"\n\n# This is the target that will be run to generate coverage data. It can be overridden by consumer\n# projects that want to run coverage on a different/combined target.\n# Command-line arguments take precedence over ${COVERAGE_TARGET}.\nif [[ $# -gt 0 ]]; then\n  COVERAGE_TARGETS=(\"$@\")\nelif [[ -n \"${COVERAGE_TARGET}\" ]]; then\n  COVERAGE_TARGETS=(\"${COVERAGE_TARGET}\")\nelse\n  COVERAGE_TARGETS=(//test/...)\nfi\n\nif [[ \"${FUZZ_COVERAGE}\" == \"true\" ]]; then\n  # Filter targets to just fuzz tests.\n  _targets=$(bazel query \"attr('tags', 'fuzz_target', ${COVERAGE_TARGETS[*]})\")\n  COVERAGE_TARGETS=()\n  while read -r line; do COVERAGE_TARGETS+=(\"$line\"); done \\\n      <<< \"$_targets\"\n  BAZEL_BUILD_OPTIONS+=(\n      \"--config=fuzz-coverage\"\n      \"--test_tag_filters=-nocoverage\")\nelse\n  BAZEL_BUILD_OPTIONS+=(\n      \"--config=test-coverage\"\n      \"--test_tag_filters=-nocoverage,-fuzz_target\")\nfi\n\nbazel coverage \"${BAZEL_BUILD_OPTIONS[@]}\" \"${COVERAGE_TARGETS[@]}\"\n\n# Collecting profile and testlogs\n[[ -z \"${ENVOY_BUILD_PROFILE}\" ]] || cp -f \"$(bazel info output_base)/command.profile.gz\" \"${ENVOY_BUILD_PROFILE}/coverage.profile.gz\" || true\n[[ -z \"${ENVOY_BUILD_DIR}\" ]] || find bazel-testlogs/ -name test.log | tar zcf \"${ENVOY_BUILD_DIR}/testlogs.tar.gz\" -T -\n\nCOVERAGE_DIR=\"${SRCDIR}\"/generated/coverage && [[ ${FUZZ_COVERAGE} == \"true\" ]] && COVERAGE_DIR=\"${SRCDIR}\"/generated/fuzz_coverage\n\nrm -rf \"${COVERAGE_DIR}\"\nmkdir -p \"${COVERAGE_DIR}\"\n\nCOVERAGE_DATA=\"${COVERAGE_DIR}/coverage.dat\"\ncp bazel-out/_coverage/_coverage_report.dat \"${COVERAGE_DATA}\"\n\nCOVERAGE_VALUE=\"$(genhtml --prefix \"${PWD}\" --output \"${COVERAGE_DIR}\" \"${COVERAGE_DATA}\" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4)\"\nCOVERAGE_VALUE=${COVERAGE_VALUE%?}\n\nif [ \"${FUZZ_COVERAGE}\" == \"true\" ]\nthen\n  [[ -z \"${ENVOY_FUZZ_COVERAGE_ARTIFACT}\" ]] || tar zcf \"${ENVOY_FUZZ_COVERAGE_ARTIFACT}\" -C \"${COVERAGE_DIR}\" --transform 's/^\\./fuzz_coverage/' .\nelse\n  [[ -z \"${ENVOY_COVERAGE_ARTIFACT}\" ]] || tar zcf \"${ENVOY_COVERAGE_ARTIFACT}\" -C \"${COVERAGE_DIR}\" --transform 's/^\\./coverage/' .\nfi\n\nif [[ \"$VALIDATE_COVERAGE\" == \"true\" ]]; then\n  if [[ \"${FUZZ_COVERAGE}\" == \"true\" ]]; then\n    COVERAGE_THRESHOLD=27.0\n  fi\n  COVERAGE_FAILED=$(echo \"${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}\" | bc)\n  if [[ \"${COVERAGE_FAILED}\" -eq 1 ]]; then\n      echo \"Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD}\"\n      exit 1\n  else\n      echo \"Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD}\"\n  fi\nfi\n\n# We want to allow per_file_coverage to fail without exiting this script.\nset +e\nif [[ \"$VALIDATE_COVERAGE\" == \"true\" ]] && [[ \"${FUZZ_COVERAGE}\" == \"false\" ]]; then\n  echo \"Checking per-extension coverage\"\n  output=$(./test/per_file_coverage.sh)\n\n  if [ $? -eq 1 ]; then\n    echo Per-extension coverage failed:\n    echo \"$output\"\n    exit 1\n  fi\n  echo Per-extension coverage passed.\nfi\n\necho \"HTML coverage report is in ${COVERAGE_DIR}/index.html\"\n"
  },
  {
    "path": "test/server/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_benchmark_test\",\n    \"envoy_cc_benchmark_binary\",\n    \"envoy_cc_fuzz_test\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_select_hot_restart\",\n)\nload(\"//source/extensions:all_extensions.bzl\", \"envoy_all_extensions\")\nload(\"//bazel:repositories.bzl\", \"PPC_SKIP_TARGETS\", \"WINDOWS_SKIP_TARGETS\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"api_listener_test\",\n    srcs = [\"api_listener_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/server:listener_manager_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:listener_component_factory_mocks\",\n        \"//test/mocks/server:worker_factory_mocks\",\n        \"//test/mocks/server:worker_mocks\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"backtrace_test\",\n    srcs = [\"backtrace_test.cc\"],\n    tags = [\"backtrace\"],\n    deps = [\n        \"//source/server:backtrace_lib\",\n        \"//test/test_common:logging_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"configuration_impl_test\",\n    srcs = [\"configuration_impl_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/upstream:cluster_manager_lib\",\n        \"//source/extensions/stat_sinks/statsd:config\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/server:configuration_lib\",\n        \"//test/common/upstream:utility_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/metrics/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"connection_handler_test\",\n    srcs = [\"connection_handler_test.cc\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:utility_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:connection_balancer_lib\",\n        \"//source/common/network:udp_default_writer_config\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/server:active_raw_udp_listener_config\",\n        \"//source/server:connection_handler_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"drain_manager_impl_test\",\n    srcs = [\"drain_manager_impl_test.cc\"],\n    deps = [\n        \"//source/server:drain_manager_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"hot_restart_impl_test\",\n    srcs = envoy_select_hot_restart([\"hot_restart_impl_test.cc\"]),\n    deps = [\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/server:hot_restart_lib\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"hot_restarting_parent_test\",\n    srcs = envoy_select_hot_restart([\"hot_restarting_parent_test.cc\"]),\n    deps = [\n        \"//source/common/stats:stats_lib\",\n        \"//source/server:hot_restart_lib\",\n        \"//source/server:hot_restarting_child\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:server_mocks\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"guarddog_impl_test\",\n    size = \"small\",\n    srcs = [\"guarddog_impl_test.cc\"],\n    # Fails intermittantly on local build\n    tags = [\"flaky_on_windows\"],\n    deps = [\n        \"//include/envoy/common:time_interface\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/server:guarddog_lib\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/server:watchdog_config_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"options_impl_test\",\n    srcs = [\"options_impl_test.cc\"],\n    deps = [\n        \"//include/envoy/config:typed_config_interface\",\n        \"//include/envoy/server:filter_config_interface\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/filters/http:well_known_names\",\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//source/server:options_lib\",\n        \"//test/mocks/api:api_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/http/buffer/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"overload_manager_impl_test\",\n    srcs = [\"overload_manager_impl_test.cc\"],\n    deps = [\n        \"//include/envoy/registry\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/extensions/resource_monitors/common:factory_base_lib\",\n        \"//source/server:overload_manager_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/overload/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"lds_api_test\",\n    srcs = [\"lds_api_test.cc\"],\n    data = [\n        \"//test/config/integration/certs\",\n    ],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/server:lds_api_lib\",\n        \"//test/mocks/config:config_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/server:listener_manager_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"listener_manager_impl_test_lib\",\n    hdrs = [\"listener_manager_impl_test.h\"],\n    data = [\"//test/extensions/transport_sockets/tls/test_data:certs\"],\n    deps = [\n        \"//source/common/init:manager_lib\",\n        \"//source/server:listener_manager_lib\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:drain_manager_mocks\",\n        \"//test/mocks/server:guard_dog_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:listener_component_factory_mocks\",\n        \"//test/mocks/server:worker_factory_mocks\",\n        \"//test/mocks/server:worker_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"listener_manager_impl_test\",\n    srcs = [\"listener_manager_impl_test.cc\"],\n    deps = [\n        \":listener_manager_impl_test_lib\",\n        \":utility_lib\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/network:addr_family_aware_socket_option_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/filters/listener/original_dst:config\",\n        \"//source/extensions/filters/listener/proxy_protocol:config\",\n        \"//source/extensions/filters/listener/tls_inspector:config\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//source/extensions/filters/network/tcp_proxy:config\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:ssl_socket_lib\",\n        \"//source/server:active_raw_udp_listener_config\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:registry_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\n# Stand-alone quic test because of FIPS.\nenvoy_cc_test(\n    name = \"listener_manager_impl_quic_only_test\",\n    srcs = [\"listener_manager_impl_quic_only_test.cc\"],\n    tags = [\n        \"nofips\",\n        # Skipping as quiche quic_gso_batch_writer.h does not exist on Windows\n        # required by quic_stream_send_buffer.cc\n        \"skip_on_windows\",\n    ],\n    deps = [\n        \":listener_manager_impl_test_lib\",\n        \":utility_lib\",\n        \"//source/extensions/quic_listeners/quiche:quic_factory_lib\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"filter_chain_manager_impl_test\",\n    srcs = [\"filter_chain_manager_impl_test.cc\"],\n    data = [\"//test/extensions/transport_sockets/tls/test_data:certs\"],\n    deps = [\n        \":utility_lib\",\n        \"//source/common/api:os_sys_calls_lib\",\n        \"//source/common/config:metadata_lib\",\n        \"//source/common/network:addr_family_aware_socket_option_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:socket_option_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//source/extensions/transport_sockets/raw_buffer:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/extensions/transport_sockets/tls:ssl_socket_lib\",\n        \"//source/server:filter_chain_manager_lib\",\n        \"//source/server:listener_manager_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:drain_manager_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:threadsafe_singleton_injector_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"server_fuzz_test\",\n    srcs = [\"server_fuzz_test.cc\"],\n    corpus = \"server_corpus\",\n    deps = [\n        \"//source/common/thread_local:thread_local_lib\",\n        \"//source/server:server_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/server:options_mocks\",\n        \"//test/mocks/server:hot_restart_mocks\",\n        \"//test/test_common:environment_lib\",\n    ] + select({\n        \"//bazel:windows_x86_64\": envoy_all_extensions(WINDOWS_SKIP_TARGETS),\n        \"//bazel:linux_ppc\": envoy_all_extensions(PPC_SKIP_TARGETS),\n        \"//conditions:default\": envoy_all_extensions(),\n    }),\n)\n\nfilegroup(\n    name = \"server_test_data\",\n    srcs = glob([\"test_data/server/**\"]),\n)\n\nfilegroup(\n    name = \"runtime_test_data\",\n    srcs = glob([\"test_data/runtime/**\"]),\n)\n\nfilegroup(\n    name = \"static_validation_test_data\",\n    srcs = glob([\"test_data/static_validation/**\"]),\n)\n\nenvoy_cc_test(\n    name = \"server_test\",\n    srcs = [\"server_test.cc\"],\n    data = [\n        \":runtime_test_data\",\n        \":server_test_data\",\n        \":static_validation_test_data\",\n    ],\n    deps = [\n        \"//source/common/version:version_lib\",\n        \"//source/extensions/access_loggers/file:config\",\n        \"//source/extensions/filters/http/buffer:config\",\n        \"//source/extensions/filters/http/grpc_http1_bridge:config\",\n        \"//source/extensions/filters/http/health_check:config\",\n        \"//source/extensions/filters/http/router:config\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//source/extensions/filters/network/redis_proxy:config\",\n        \"//source/extensions/stat_sinks/statsd:config\",\n        \"//source/extensions/tracers/zipkin:config\",\n        \"//source/server:process_context_lib\",\n        \"//source/server:server_lib\",\n        \"//test/common/config:dummy_config_proto_cc_proto\",\n        \"//test/common/stats:stat_test_utility_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/server:bootstrap_extension_factory_mocks\",\n        \"//test/mocks/server:hot_restart_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:options_mocks\",\n        \"//test/mocks/server:overload_manager_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"ssl_context_manager_test\",\n    srcs = [\"ssl_context_manager_test.cc\"],\n    deps = [\n        \"//source/server:ssl_context_manager_lib\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    hdrs = [\"utility.h\"],\n    deps = [\n        \"//source/common/protobuf:utility_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"worker_impl_test\",\n    srcs = [\"worker_impl_test.cc\"],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/server:worker_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:guard_dog_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/mocks/server:overload_manager_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_benchmark_binary(\n    name = \"filter_chain_benchmark_test\",\n    srcs = [\"filter_chain_benchmark_test.cc\"],\n    external_deps = [\n        \"benchmark\",\n        \"googletest\",\n    ],\n    deps = [\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"//source/server:filter_chain_manager_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:factory_context_mocks\",\n        # tranport socket config registration\n        \"//source/extensions/transport_sockets/tls:config\",\n    ],\n)\n\nenvoy_benchmark_test(\n    name = \"filter_chain_benchmark_test_benchmark_test\",\n    timeout = \"long\",\n    benchmark_binary = \"filter_chain_benchmark_test\",\n)\n"
  },
  {
    "path": "test/server/admin/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"admin_instance_lib\",\n    srcs = [\"admin_instance.cc\"],\n    hdrs = [\"admin_instance.h\"],\n    deps = [\n        \"//source/server/admin:admin_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"admin_test\",\n    srcs = [\"admin_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n        \"//include/envoy/json:json_object_interface\",\n        \"//include/envoy/runtime:runtime_interface\",\n        \"//source/common/http:message_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/protobuf\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/server/admin:admin_lib\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"admin_filter_test\",\n    srcs = [\"admin_filter_test.cc\"],\n    deps = [\n        \"//source/server/admin:admin_filter_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:environment_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"stats_handler_test\",\n    srcs = [\"stats_handler_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n        \"//source/common/stats:thread_local_store_lib\",\n        \"//source/server/admin:stats_handler_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"runtime_handler_test\",\n    srcs = [\"runtime_handler_test.cc\"],\n    deps = [\":admin_instance_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"prometheus_stats_test\",\n    srcs = [\"prometheus_stats_test.cc\"],\n    deps = [\n        \"//source/server/admin:prometheus_stats_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"logs_handler_test\",\n    srcs = [\"logs_handler_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"profiling_handler_test\",\n    srcs = [\"profiling_handler_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n        \"//test/test_common:logging_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"server_info_handler_test\",\n    srcs = [\"server_info_handler_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n        \"//source/extensions/transport_sockets/tls:context_config_lib\",\n        \"//test/test_common:logging_lib\",\n        \"//test/test_common:test_runtime_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"clusters_handler_test\",\n    srcs = [\"clusters_handler_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"config_dump_handler_test\",\n    srcs = [\"config_dump_handler_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"init_dump_handler_test\",\n    srcs = [\"init_dump_handler_test.cc\"],\n    deps = [\n        \":admin_instance_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"config_tracker_impl_test\",\n    srcs = [\"config_tracker_impl_test.cc\"],\n    deps = [\n        \"//source/server/admin:config_tracker_lib\",\n        \"//test/mocks:common_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/server/admin/admin_filter_test.cc",
    "content": "#include \"server/admin/admin_filter.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/environment.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::InSequence;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Server {\n\nclass AdminFilterTest : public testing::TestWithParam<Network::Address::IpVersion> {\npublic:\n  AdminFilterTest() : filter_(adminServerCallback), request_headers_{{\":path\", \"/\"}} {\n    filter_.setDecoderFilterCallbacks(callbacks_);\n  }\n\n  NiceMock<MockInstance> server_;\n  Stats::IsolatedStoreImpl listener_scope_;\n  AdminFilter filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n\n  static Http::Code adminServerCallback(absl::string_view path_and_query,\n                                        Http::ResponseHeaderMap& response_headers,\n                                        Buffer::OwnedImpl& response, AdminFilter& filter) {\n    // silence compiler warnings for unused params\n    UNREFERENCED_PARAMETER(path_and_query);\n    UNREFERENCED_PARAMETER(response_headers);\n    UNREFERENCED_PARAMETER(filter);\n\n    response.add(\"OK\\n\");\n    return Http::Code::OK;\n  }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminFilterTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminFilterTest, HeaderOnly) {\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers_, true));\n  ASSERT_TRUE(callbacks_.stream_info_.responseCodeDetails().has_value());\n  EXPECT_EQ(callbacks_.stream_info_.responseCodeDetails().value(), \"admin_filter_response\");\n}\n\nTEST_P(AdminFilterTest, Body) {\n  InSequence s;\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers_, false));\n  Buffer::OwnedImpl data(\"hello\");\n  Http::MetadataMap metadata_map{{\"metadata\", \"metadata\"}};\n  EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map));\n  EXPECT_CALL(callbacks_, addDecodedData(_, false));\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(data, true));\n}\n\nTEST_P(AdminFilterTest, Trailers) {\n  InSequence s;\n\n  EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,\n            filter_.decodeHeaders(request_headers_, false));\n  Buffer::OwnedImpl data(\"hello\");\n  EXPECT_CALL(callbacks_, addDecodedData(_, false));\n  EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(data, false));\n  EXPECT_CALL(callbacks_, decodingBuffer());\n  filter_.getRequestBody();\n  EXPECT_CALL(callbacks_, encodeHeaders_(_, false));\n  Http::TestRequestTrailerMapImpl request_trailers;\n  EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_.decodeTrailers(request_trailers));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/admin_instance.cc",
    "content": "#include \"test/server/admin/admin_instance.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nAdminInstanceTest::AdminInstanceTest()\n    : address_out_path_(TestEnvironment::temporaryPath(\"admin.address\")),\n      cpu_profile_path_(TestEnvironment::temporaryPath(\"envoy.prof\")),\n      admin_(cpu_profile_path_, server_), request_headers_{{\":path\", \"/\"}},\n      admin_filter_(admin_.createCallbackFunction()) {\n  admin_.startHttpListener(\"/dev/null\", address_out_path_,\n                           Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr,\n                           listener_scope_.createScope(\"listener.admin.\"));\n  EXPECT_EQ(std::chrono::milliseconds(100), admin_.drainTimeout());\n  admin_.tracingStats().random_sampling_.inc();\n  EXPECT_TRUE(admin_.setCurrentClientCertDetails().empty());\n  admin_filter_.setDecoderFilterCallbacks(callbacks_);\n}\n\nHttp::Code AdminInstanceTest::runCallback(absl::string_view path_and_query,\n                                          Http::ResponseHeaderMap& response_headers,\n                                          Buffer::Instance& response, absl::string_view method,\n                                          absl::string_view body) {\n  if (!body.empty()) {\n    request_headers_.setReferenceContentType(Http::Headers::get().ContentTypeValues.FormUrlEncoded);\n    callbacks_.buffer_ = std::make_unique<Buffer::OwnedImpl>(body);\n  }\n\n  request_headers_.setMethod(method);\n  admin_filter_.decodeHeaders(request_headers_, false);\n\n  return admin_.runCallback(path_and_query, response_headers, response, admin_filter_);\n}\n\nHttp::Code AdminInstanceTest::getCallback(absl::string_view path_and_query,\n                                          Http::ResponseHeaderMap& response_headers,\n                                          Buffer::Instance& response) {\n  return runCallback(path_and_query, response_headers, response,\n                     Http::Headers::get().MethodValues.Get);\n}\n\nHttp::Code AdminInstanceTest::postCallback(absl::string_view path_and_query,\n                                           Http::ResponseHeaderMap& response_headers,\n                                           Buffer::Instance& response) {\n  return runCallback(path_and_query, response_headers, response,\n                     Http::Headers::get().MethodValues.Post);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/admin_instance.h",
    "content": "#pragma once\n\n#include \"server/admin/admin.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass AdminInstanceTest : public testing::TestWithParam<Network::Address::IpVersion> {\npublic:\n  AdminInstanceTest();\n\n  Http::Code runCallback(absl::string_view path_and_query,\n                         Http::ResponseHeaderMap& response_headers, Buffer::Instance& response,\n                         absl::string_view method, absl::string_view body = absl::string_view());\n\n  Http::Code getCallback(absl::string_view path_and_query,\n                         Http::ResponseHeaderMap& response_headers, Buffer::Instance& response);\n\n  Http::Code postCallback(absl::string_view path_and_query,\n                          Http::ResponseHeaderMap& response_headers, Buffer::Instance& response);\n\n  std::string address_out_path_;\n  std::string cpu_profile_path_;\n  NiceMock<MockInstance> server_;\n  Stats::IsolatedStoreImpl listener_scope_;\n  AdminImpl admin_;\n  Http::TestRequestHeaderMapImpl request_headers_;\n  Server::AdminFilter admin_filter_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> callbacks_;\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/admin_test.cc",
    "content": "#include <algorithm>\n#include <fstream>\n#include <memory>\n#include <regex>\n#include <vector>\n\n#include \"envoy/json/json_object.h\"\n#include \"envoy/upstream/outlier_detection.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/http/message_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/upstream/upstream_impl.h\"\n\n#include \"test/server/admin/admin_instance.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::HasSubstr;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminInstanceTest, MutatesErrorWithGet) {\n  Buffer::OwnedImpl data;\n  Http::TestResponseHeaderMapImpl header_map;\n  const std::string path(\"/healthcheck/fail\");\n  // TODO(jmarantz): the call to getCallback should be made to fail, but as an interim we will\n  // just issue a warning, so that scripts using curl GET commands to mutate state can be fixed.\n  EXPECT_LOG_CONTAINS(\"error\",\n                      \"admin path \\\"\" + path + \"\\\" mutates state, method=GET rather than POST\",\n                      EXPECT_EQ(Http::Code::MethodNotAllowed, getCallback(path, header_map, data)));\n}\n\nTEST_P(AdminInstanceTest, WriteAddressToFile) {\n  std::ifstream address_file(address_out_path_);\n  std::string address_from_file;\n  std::getline(address_file, address_from_file);\n  EXPECT_EQ(admin_.socket().localAddress()->asString(), address_from_file);\n}\n\nTEST_P(AdminInstanceTest, AdminBadAddressOutPath) {\n  std::string bad_path = TestEnvironment::temporaryPath(\"some/unlikely/bad/path/admin.address\");\n  AdminImpl admin_bad_address_out_path(cpu_profile_path_, server_);\n  EXPECT_LOG_CONTAINS(\n      \"critical\", \"cannot open admin address output file \" + bad_path + \" for writing.\",\n      admin_bad_address_out_path.startHttpListener(\n          \"/dev/null\", bad_path, Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr,\n          listener_scope_.createScope(\"listener.admin.\")));\n  EXPECT_FALSE(std::ifstream(bad_path));\n}\n\nTEST_P(AdminInstanceTest, CustomHandler) {\n  auto callback = [](absl::string_view, Http::HeaderMap&, Buffer::Instance&,\n                     AdminStream&) -> Http::Code { return Http::Code::Accepted; };\n\n  // Test removable handler.\n  EXPECT_NO_LOGS(EXPECT_TRUE(admin_.addHandler(\"/foo/bar\", \"hello\", callback, true, false)));\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n  EXPECT_EQ(Http::Code::Accepted, getCallback(\"/foo/bar\", header_map, response));\n\n  // Test that removable handler gets removed.\n  EXPECT_TRUE(admin_.removeHandler(\"/foo/bar\"));\n  EXPECT_EQ(Http::Code::NotFound, getCallback(\"/foo/bar\", header_map, response));\n  EXPECT_FALSE(admin_.removeHandler(\"/foo/bar\"));\n\n  // Add non removable handler.\n  EXPECT_TRUE(admin_.addHandler(\"/foo/bar\", \"hello\", callback, false, false));\n  EXPECT_EQ(Http::Code::Accepted, getCallback(\"/foo/bar\", header_map, response));\n\n  // Add again and make sure it is not there twice.\n  EXPECT_FALSE(admin_.addHandler(\"/foo/bar\", \"hello\", callback, false, false));\n\n  // Try to remove non removable handler, and make sure it is not removed.\n  EXPECT_FALSE(admin_.removeHandler(\"/foo/bar\"));\n  EXPECT_EQ(Http::Code::Accepted, getCallback(\"/foo/bar\", header_map, response));\n}\n\nTEST_P(AdminInstanceTest, RejectHandlerWithXss) {\n  auto callback = [](absl::string_view, Http::HeaderMap&, Buffer::Instance&,\n                     AdminStream&) -> Http::Code { return Http::Code::Accepted; };\n  EXPECT_LOG_CONTAINS(\"error\",\n                      \"filter \\\"/foo<script>alert('hi')</script>\\\" contains invalid character '<'\",\n                      EXPECT_FALSE(admin_.addHandler(\"/foo<script>alert('hi')</script>\", \"hello\",\n                                                     callback, true, false)));\n}\n\nTEST_P(AdminInstanceTest, RejectHandlerWithEmbeddedQuery) {\n  auto callback = [](absl::string_view, Http::HeaderMap&, Buffer::Instance&,\n                     AdminStream&) -> Http::Code { return Http::Code::Accepted; };\n  EXPECT_LOG_CONTAINS(\"error\",\n                      \"filter \\\"/bar?queryShouldNotBeInPrefix\\\" contains invalid character '?'\",\n                      EXPECT_FALSE(admin_.addHandler(\"/bar?queryShouldNotBeInPrefix\", \"hello\",\n                                                     callback, true, false)));\n}\n\nTEST_P(AdminInstanceTest, EscapeHelpTextWithPunctuation) {\n  auto callback = [](absl::string_view, Http::HeaderMap&, Buffer::Instance&,\n                     AdminStream&) -> Http::Code { return Http::Code::Accepted; };\n\n  // It's OK to have help text with HTML characters in it, but when we render the home\n  // page they need to be escaped.\n  const std::string planets = \"jupiter>saturn>mars\";\n  EXPECT_TRUE(admin_.addHandler(\"/planets\", planets, callback, true, false));\n\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/\", header_map, response));\n  const Http::HeaderString& content_type = header_map.ContentType()->value();\n  EXPECT_THAT(std::string(content_type.getStringView()), testing::HasSubstr(\"text/html\"));\n  EXPECT_EQ(-1, response.search(planets.data(), planets.size(), 0, 0));\n  const std::string escaped_planets = \"jupiter&gt;saturn&gt;mars\";\n  EXPECT_NE(-1, response.search(escaped_planets.data(), escaped_planets.size(), 0, 0));\n}\n\nTEST_P(AdminInstanceTest, HelpUsesFormForMutations) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/\", header_map, response));\n  const std::string logging_action = \"<form action='logging' method='post'\";\n  const std::string stats_href = \"<a href='stats'\";\n  EXPECT_NE(-1, response.search(logging_action.data(), logging_action.size(), 0, 0));\n  EXPECT_NE(-1, response.search(stats_href.data(), stats_href.size(), 0, 0));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/clusters_handler_test.cc",
    "content": "#include \"envoy/admin/v3/clusters.pb.h\"\n\n#include \"test/server/admin/admin_instance.h\"\n\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminInstanceTest, ClustersJson) {\n  Upstream::ClusterManager::ClusterInfoMap cluster_map;\n  ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map));\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster;\n  cluster_map.emplace(cluster.info_->name_, cluster);\n\n  NiceMock<Upstream::Outlier::MockDetector> outlier_detector;\n  ON_CALL(Const(cluster), outlierDetector()).WillByDefault(Return(&outlier_detector));\n  ON_CALL(outlier_detector,\n          successRateEjectionThreshold(\n              Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))\n      .WillByDefault(Return(6.0));\n  ON_CALL(outlier_detector,\n          successRateEjectionThreshold(\n              Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin))\n      .WillByDefault(Return(9.0));\n\n  ON_CALL(*cluster.info_, addedViaApi()).WillByDefault(Return(true));\n\n  Upstream::MockHostSet* host_set = cluster.priority_set_.getMockHostSet(0);\n  auto host = std::make_shared<NiceMock<Upstream::MockHost>>();\n\n  envoy::config::core::v3::Locality locality;\n  locality.set_region(\"test_region\");\n  locality.set_zone(\"test_zone\");\n  locality.set_sub_zone(\"test_sub_zone\");\n  ON_CALL(*host, locality()).WillByDefault(ReturnRef(locality));\n\n  host_set->hosts_.emplace_back(host);\n  Network::Address::InstanceConstSharedPtr address =\n      Network::Utility::resolveUrl(\"tcp://1.2.3.4:80\");\n  ON_CALL(*host, address()).WillByDefault(Return(address));\n  const std::string hostname = \"foo.com\";\n  ON_CALL(*host, hostname()).WillByDefault(ReturnRef(hostname));\n\n  // Add stats in random order and validate that they come in order.\n  Stats::PrimitiveCounter test_counter;\n  test_counter.add(10);\n  Stats::PrimitiveCounter rest_counter;\n  rest_counter.add(10);\n  Stats::PrimitiveCounter arest_counter;\n  arest_counter.add(5);\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveCounterReference>> counters = {\n      {\"arest_counter\", arest_counter},\n      {\"rest_counter\", rest_counter},\n      {\"test_counter\", test_counter},\n  };\n  Stats::PrimitiveGauge test_gauge;\n  test_gauge.set(11);\n  Stats::PrimitiveGauge atest_gauge;\n  atest_gauge.set(10);\n  std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>> gauges = {\n      {\"atest_gauge\", atest_gauge},\n      {\"test_gauge\", test_gauge},\n  };\n  ON_CALL(*host, counters()).WillByDefault(Invoke([&counters]() { return counters; }));\n  ON_CALL(*host, gauges()).WillByDefault(Invoke([&gauges]() { return gauges; }));\n\n  ON_CALL(*host, healthFlagGet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC))\n      .WillByDefault(Return(true));\n  ON_CALL(*host, healthFlagGet(Upstream::Host::HealthFlag::FAILED_OUTLIER_CHECK))\n      .WillByDefault(Return(true));\n  ON_CALL(*host, healthFlagGet(Upstream::Host::HealthFlag::FAILED_EDS_HEALTH))\n      .WillByDefault(Return(false));\n  ON_CALL(*host, healthFlagGet(Upstream::Host::HealthFlag::DEGRADED_ACTIVE_HC))\n      .WillByDefault(Return(true));\n  ON_CALL(*host, healthFlagGet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH))\n      .WillByDefault(Return(true));\n  ON_CALL(*host, healthFlagGet(Upstream::Host::HealthFlag::PENDING_DYNAMIC_REMOVAL))\n      .WillByDefault(Return(true));\n\n  ON_CALL(\n      host->outlier_detector_,\n      successRate(Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))\n      .WillByDefault(Return(43.2));\n  ON_CALL(*host, weight()).WillByDefault(Return(5));\n  ON_CALL(host->outlier_detector_,\n          successRate(Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin))\n      .WillByDefault(Return(93.2));\n  ON_CALL(*host, priority()).WillByDefault(Return(6));\n\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/clusters?format=json\", header_map, response));\n  std::string output_json = response.toString();\n  envoy::admin::v3::Clusters output_proto;\n  TestUtility::loadFromJson(output_json, output_proto);\n\n  const std::string expected_json = R\"EOF({\n \"cluster_statuses\": [\n  {\n   \"name\": \"fake_cluster\",\n   \"success_rate_ejection_threshold\": {\n    \"value\": 6\n   },\n   \"local_origin_success_rate_ejection_threshold\": {\n    \"value\": 9\n   },\n   \"added_via_api\": true,\n   \"circuit_breakers\": {\n    \"thresholds\": [\n     {\n      \"max_connections\": 1,\n      \"max_pending_requests\": 1024,\n      \"max_requests\": 1024,\n      \"max_retries\": 1\n     },\n     {\n      \"priority\": \"HIGH\",\n      \"max_connections\": 1,\n      \"max_pending_requests\": 1024,\n      \"max_requests\": 1024,\n      \"max_retries\": 1\n     }\n    ]\n   },\n   \"host_statuses\": [\n    {\n     \"address\": {\n      \"socket_address\": {\n       \"protocol\": \"TCP\",\n       \"address\": \"1.2.3.4\",\n       \"port_value\": 80\n      }\n     },\n     \"stats\": [\n       {\n       \"name\": \"arest_counter\",\n       \"value\": \"5\",\n       \"type\": \"COUNTER\"\n       },\n       {\n       \"name\": \"rest_counter\",\n       \"value\": \"10\",\n       \"type\": \"COUNTER\"\n      },\n      {\n       \"name\": \"test_counter\",\n       \"value\": \"10\",\n       \"type\": \"COUNTER\"\n      },\n      {\n       \"name\": \"atest_gauge\",\n       \"value\": \"10\",\n       \"type\": \"GAUGE\"\n      },\n      {\n       \"name\": \"test_gauge\",\n       \"value\": \"11\",\n       \"type\": \"GAUGE\"\n      }\n     ],\n     \"health_status\": {\n      \"eds_health_status\": \"DEGRADED\",\n      \"failed_active_health_check\": true,\n      \"failed_outlier_check\": true,\n      \"failed_active_degraded_check\": true,\n      \"pending_dynamic_removal\": true\n     },\n     \"success_rate\": {\n      \"value\": 43.2\n     },\n     \"weight\": 5,\n     \"hostname\": \"foo.com\",\n     \"priority\": 6,\n     \"local_origin_success_rate\": {\n      \"value\": 93.2\n     },\n     \"locality\": {\n       \"region\": \"test_region\",\n       \"zone\": \"test_zone\",\n       \"sub_zone\": \"test_sub_zone\"\n     }\n    }\n   ]\n  }\n ]\n}\n)EOF\";\n\n  envoy::admin::v3::Clusters expected_proto;\n  TestUtility::loadFromJson(expected_json, expected_proto);\n\n  // Ensure the protos created from each JSON are equivalent.\n  EXPECT_THAT(output_proto, ProtoEq(expected_proto));\n\n  // Ensure that the normal text format is used by default.\n  Buffer::OwnedImpl response2;\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/clusters\", header_map, response2));\n  const std::string expected_text = R\"EOF(fake_cluster::outlier::success_rate_average::0\nfake_cluster::outlier::success_rate_ejection_threshold::6\nfake_cluster::outlier::local_origin_success_rate_average::0\nfake_cluster::outlier::local_origin_success_rate_ejection_threshold::9\nfake_cluster::default_priority::max_connections::1\nfake_cluster::default_priority::max_pending_requests::1024\nfake_cluster::default_priority::max_requests::1024\nfake_cluster::default_priority::max_retries::1\nfake_cluster::high_priority::max_connections::1\nfake_cluster::high_priority::max_pending_requests::1024\nfake_cluster::high_priority::max_requests::1024\nfake_cluster::high_priority::max_retries::1\nfake_cluster::added_via_api::true\nfake_cluster::1.2.3.4:80::arest_counter::5\nfake_cluster::1.2.3.4:80::atest_gauge::10\nfake_cluster::1.2.3.4:80::rest_counter::10\nfake_cluster::1.2.3.4:80::test_counter::10\nfake_cluster::1.2.3.4:80::test_gauge::11\nfake_cluster::1.2.3.4:80::hostname::foo.com\nfake_cluster::1.2.3.4:80::health_flags::/failed_active_hc/failed_outlier_check/degraded_active_hc/degraded_eds_health/pending_dynamic_removal\nfake_cluster::1.2.3.4:80::weight::5\nfake_cluster::1.2.3.4:80::region::test_region\nfake_cluster::1.2.3.4:80::zone::test_zone\nfake_cluster::1.2.3.4:80::sub_zone::test_sub_zone\nfake_cluster::1.2.3.4:80::canary::false\nfake_cluster::1.2.3.4:80::priority::6\nfake_cluster::1.2.3.4:80::success_rate::43.2\nfake_cluster::1.2.3.4:80::local_origin_success_rate::93.2\n)EOF\";\n  EXPECT_EQ(expected_text, response2.toString());\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/config_dump_handler_test.cc",
    "content": "#include \"test/server/admin/admin_instance.h\"\n\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// helper method for adding host's info\nvoid addHostInfo(NiceMock<Upstream::MockHost>& host, const std::string& hostname,\n                 const std::string& address_url, envoy::config::core::v3::Locality& locality,\n                 const std::string& hostname_for_healthcheck,\n                 const std::string& healthcheck_address_url, int weight, int priority) {\n  ON_CALL(host, locality()).WillByDefault(ReturnRef(locality));\n\n  Network::Address::InstanceConstSharedPtr address = Network::Utility::resolveUrl(address_url);\n  ON_CALL(host, address()).WillByDefault(Return(address));\n  ON_CALL(host, hostname()).WillByDefault(ReturnRef(hostname));\n\n  ON_CALL(host, hostnameForHealthChecks()).WillByDefault(ReturnRef(hostname_for_healthcheck));\n  Network::Address::InstanceConstSharedPtr healthcheck_address =\n      Network::Utility::resolveUrl(healthcheck_address_url);\n  ON_CALL(host, healthCheckAddress()).WillByDefault(Return(healthcheck_address));\n\n  auto metadata = std::make_shared<envoy::config::core::v3::Metadata>();\n  ON_CALL(host, metadata()).WillByDefault(Return(metadata));\n\n  ON_CALL(host, health()).WillByDefault(Return(Upstream::Host::Health::Healthy));\n\n  ON_CALL(host, weight()).WillByDefault(Return(weight));\n  ON_CALL(host, priority()).WillByDefault(Return(priority));\n}\n\nTEST_P(AdminInstanceTest, ConfigDump) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto entry = admin_.getConfigTracker().add(\"foo\", [] {\n    auto msg = std::make_unique<ProtobufWkt::StringValue>();\n    msg->set_value(\"bar\");\n    return msg;\n  });\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/google.protobuf.StringValue\",\n   \"value\": \"bar\"\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/config_dump\", header_map, response));\n  std::string output = response.toString();\n  EXPECT_EQ(expected_json, output);\n}\n\nTEST_P(AdminInstanceTest, ConfigDumpMaintainsOrder) {\n  // Add configs in random order and validate config_dump dumps in the order.\n  auto bootstrap_entry = admin_.getConfigTracker().add(\"bootstrap\", [] {\n    auto msg = std::make_unique<ProtobufWkt::StringValue>();\n    msg->set_value(\"bootstrap_config\");\n    return msg;\n  });\n  auto route_entry = admin_.getConfigTracker().add(\"routes\", [] {\n    auto msg = std::make_unique<ProtobufWkt::StringValue>();\n    msg->set_value(\"routes_config\");\n    return msg;\n  });\n  auto listener_entry = admin_.getConfigTracker().add(\"listeners\", [] {\n    auto msg = std::make_unique<ProtobufWkt::StringValue>();\n    msg->set_value(\"listeners_config\");\n    return msg;\n  });\n  auto cluster_entry = admin_.getConfigTracker().add(\"clusters\", [] {\n    auto msg = std::make_unique<ProtobufWkt::StringValue>();\n    msg->set_value(\"clusters_config\");\n    return msg;\n  });\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/google.protobuf.StringValue\",\n   \"value\": \"bootstrap_config\"\n  },\n  {\n   \"@type\": \"type.googleapis.com/google.protobuf.StringValue\",\n   \"value\": \"clusters_config\"\n  },\n  {\n   \"@type\": \"type.googleapis.com/google.protobuf.StringValue\",\n   \"value\": \"listeners_config\"\n  },\n  {\n   \"@type\": \"type.googleapis.com/google.protobuf.StringValue\",\n   \"value\": \"routes_config\"\n  }\n ]\n}\n)EOF\";\n  // Run it multiple times and validate that order is preserved.\n  for (size_t i = 0; i < 5; i++) {\n    Buffer::OwnedImpl response;\n    Http::TestResponseHeaderMapImpl header_map;\n    EXPECT_EQ(Http::Code::OK, getCallback(\"/config_dump\", header_map, response));\n    const std::string output = response.toString();\n    EXPECT_EQ(expected_json, output);\n  }\n}\n\n// Test that using ?include_eds parameter adds EDS to the config dump.\nTEST_P(AdminInstanceTest, ConfigDumpWithEndpoint) {\n  Upstream::ClusterManager::ClusterInfoMap cluster_map;\n  ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map));\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster;\n  cluster_map.emplace(cluster.info_->name_, cluster);\n\n  ON_CALL(*cluster.info_, addedViaApi()).WillByDefault(Return(false));\n\n  Upstream::MockHostSet* host_set = cluster.priority_set_.getMockHostSet(0);\n  auto host = std::make_shared<NiceMock<Upstream::MockHost>>();\n  host_set->hosts_.emplace_back(host);\n\n  envoy::config::core::v3::Locality locality;\n  const std::string hostname_for_healthcheck = \"test_hostname_healthcheck\";\n  const std::string hostname = \"foo.com\";\n\n  addHostInfo(*host, hostname, \"tcp://1.2.3.4:80\", locality, hostname_for_healthcheck,\n              \"tcp://1.2.3.5:90\", 5, 6);\n\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/config_dump?include_eds\", header_map, response));\n  std::string output = response.toString();\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/envoy.admin.v3.EndpointsConfigDump\",\n   \"static_endpoint_configs\": [\n    {\n     \"endpoint_config\": {\n      \"@type\": \"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\",\n      \"cluster_name\": \"fake_cluster\",\n      \"endpoints\": [\n       {\n        \"locality\": {},\n        \"lb_endpoints\": [\n         {\n          \"endpoint\": {\n           \"address\": {\n            \"socket_address\": {\n             \"address\": \"1.2.3.4\",\n             \"port_value\": 80\n            }\n           },\n           \"health_check_config\": {\n            \"port_value\": 90,\n            \"hostname\": \"test_hostname_healthcheck\"\n           },\n           \"hostname\": \"foo.com\"\n          },\n          \"health_status\": \"HEALTHY\",\n          \"metadata\": {},\n          \"load_balancing_weight\": 5\n         }\n        ],\n        \"priority\": 6\n       }\n      ],\n      \"policy\": {\n       \"overprovisioning_factor\": 140\n      }\n     }\n    }\n   ]\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(expected_json, output);\n}\n\n// Test EDS config dump while multiple localities and priorities exist\nTEST_P(AdminInstanceTest, ConfigDumpWithLocalityEndpoint) {\n  Upstream::ClusterManager::ClusterInfoMap cluster_map;\n  ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map));\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster;\n  cluster_map.emplace(cluster.info_->name_, cluster);\n\n  ON_CALL(*cluster.info_, addedViaApi()).WillByDefault(Return(false));\n\n  Upstream::MockHostSet* host_set_1 = cluster.priority_set_.getMockHostSet(0);\n  auto host_1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  host_set_1->hosts_.emplace_back(host_1);\n\n  envoy::config::core::v3::Locality locality_1;\n  locality_1.set_region(\"oceania\");\n  locality_1.set_zone(\"hello\");\n  locality_1.set_sub_zone(\"world\");\n\n  const std::string hostname_for_healthcheck = \"test_hostname_healthcheck\";\n  const std::string hostname_1 = \"foo.com\";\n\n  addHostInfo(*host_1, hostname_1, \"tcp://1.2.3.4:80\", locality_1, hostname_for_healthcheck,\n              \"tcp://1.2.3.5:90\", 5, 6);\n\n  auto host_2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  host_set_1->hosts_.emplace_back(host_2);\n  const std::string empty_hostname_for_healthcheck = \"\";\n  const std::string hostname_2 = \"boo.com\";\n\n  addHostInfo(*host_2, hostname_2, \"tcp://1.2.3.7:8\", locality_1, empty_hostname_for_healthcheck,\n              \"tcp://1.2.3.7:8\", 3, 6);\n\n  envoy::config::core::v3::Locality locality_2;\n\n  auto host_3 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  host_set_1->hosts_.emplace_back(host_3);\n  const std::string hostname_3 = \"coo.com\";\n\n  addHostInfo(*host_3, hostname_3, \"tcp://1.2.3.8:8\", locality_2, empty_hostname_for_healthcheck,\n              \"tcp://1.2.3.8:8\", 3, 4);\n\n  std::vector<Upstream::HostVector> locality_hosts = {\n      {Upstream::HostSharedPtr(host_1), Upstream::HostSharedPtr(host_2)},\n      {Upstream::HostSharedPtr(host_3)}};\n  auto hosts_per_locality = new Upstream::HostsPerLocalityImpl(std::move(locality_hosts), false);\n\n  Upstream::LocalityWeightsConstSharedPtr locality_weights{new Upstream::LocalityWeights{1, 3}};\n  ON_CALL(*host_set_1, hostsPerLocality()).WillByDefault(ReturnRef(*hosts_per_locality));\n  ON_CALL(*host_set_1, localityWeights()).WillByDefault(Return(locality_weights));\n\n  Upstream::MockHostSet* host_set_2 = cluster.priority_set_.getMockHostSet(1);\n  auto host_4 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  host_set_2->hosts_.emplace_back(host_4);\n  const std::string hostname_4 = \"doo.com\";\n\n  addHostInfo(*host_4, hostname_4, \"tcp://1.2.3.9:8\", locality_1, empty_hostname_for_healthcheck,\n              \"tcp://1.2.3.9:8\", 3, 2);\n\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/config_dump?include_eds\", header_map, response));\n  std::string output = response.toString();\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/envoy.admin.v3.EndpointsConfigDump\",\n   \"static_endpoint_configs\": [\n    {\n     \"endpoint_config\": {\n      \"@type\": \"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\",\n      \"cluster_name\": \"fake_cluster\",\n      \"endpoints\": [\n       {\n        \"locality\": {\n         \"region\": \"oceania\",\n         \"zone\": \"hello\",\n         \"sub_zone\": \"world\"\n        },\n        \"lb_endpoints\": [\n         {\n          \"endpoint\": {\n           \"address\": {\n            \"socket_address\": {\n             \"address\": \"1.2.3.4\",\n             \"port_value\": 80\n            }\n           },\n           \"health_check_config\": {\n            \"port_value\": 90,\n            \"hostname\": \"test_hostname_healthcheck\"\n           },\n           \"hostname\": \"foo.com\"\n          },\n          \"health_status\": \"HEALTHY\",\n          \"metadata\": {},\n          \"load_balancing_weight\": 5\n         },\n         {\n          \"endpoint\": {\n           \"address\": {\n            \"socket_address\": {\n             \"address\": \"1.2.3.7\",\n             \"port_value\": 8\n            }\n           },\n           \"health_check_config\": {},\n           \"hostname\": \"boo.com\"\n          },\n          \"health_status\": \"HEALTHY\",\n          \"metadata\": {},\n          \"load_balancing_weight\": 3\n         }\n        ],\n        \"load_balancing_weight\": 1,\n        \"priority\": 6\n       },\n       {\n        \"locality\": {},\n        \"lb_endpoints\": [\n         {\n          \"endpoint\": {\n           \"address\": {\n            \"socket_address\": {\n             \"address\": \"1.2.3.8\",\n             \"port_value\": 8\n            }\n           },\n           \"health_check_config\": {},\n           \"hostname\": \"coo.com\"\n          },\n          \"health_status\": \"HEALTHY\",\n          \"metadata\": {},\n          \"load_balancing_weight\": 3\n         }\n        ],\n        \"load_balancing_weight\": 3,\n        \"priority\": 4\n       },\n       {\n        \"locality\": {\n         \"region\": \"oceania\",\n         \"zone\": \"hello\",\n         \"sub_zone\": \"world\"\n        },\n        \"lb_endpoints\": [\n         {\n          \"endpoint\": {\n           \"address\": {\n            \"socket_address\": {\n             \"address\": \"1.2.3.9\",\n             \"port_value\": 8\n            }\n           },\n           \"health_check_config\": {},\n           \"hostname\": \"doo.com\"\n          },\n          \"health_status\": \"HEALTHY\",\n          \"metadata\": {},\n          \"load_balancing_weight\": 3\n         }\n        ],\n        \"priority\": 2\n       }\n      ],\n      \"policy\": {\n       \"overprovisioning_factor\": 140\n      }\n     }\n    }\n   ]\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(expected_json, output);\n  delete (hosts_per_locality);\n}\n\n// Test that using the resource query parameter filters the config dump.\n// We add both static and dynamic listener config to the dump, but expect only\n// dynamic in the JSON with ?resource=dynamic_listeners.\nTEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto listeners = admin_.getConfigTracker().add(\"listeners\", [] {\n    auto msg = std::make_unique<envoy::admin::v3::ListenersConfigDump>();\n    auto dyn_listener = msg->add_dynamic_listeners();\n    dyn_listener->set_name(\"foo\");\n    auto stat_listener = msg->add_static_listeners();\n    envoy::config::listener::v3::Listener listener;\n    listener.set_name(\"bar\");\n    stat_listener->mutable_listener()->PackFrom(listener);\n    return msg;\n  });\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/envoy.admin.v3.ListenersConfigDump.DynamicListener\",\n   \"name\": \"foo\"\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(Http::Code::OK,\n            getCallback(\"/config_dump?resource=dynamic_listeners\", header_map, response));\n  std::string output = response.toString();\n  EXPECT_EQ(expected_json, output);\n}\n\n// Test that using the resource query parameter filters the config dump including EDS.\n// We add both static and dynamic endpoint config to the dump, but expect only\n// dynamic in the JSON with ?resource=dynamic_endpoint_configs.\nTEST_P(AdminInstanceTest, ConfigDumpWithEndpointFiltersByResource) {\n  Upstream::ClusterManager::ClusterInfoMap cluster_map;\n  ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map));\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster_1;\n  cluster_map.emplace(cluster_1.info_->name_, cluster_1);\n\n  ON_CALL(*cluster_1.info_, addedViaApi()).WillByDefault(Return(true));\n\n  Upstream::MockHostSet* host_set = cluster_1.priority_set_.getMockHostSet(0);\n  auto host_1 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  host_set->hosts_.emplace_back(host_1);\n\n  envoy::config::core::v3::Locality locality;\n  const std::string hostname_for_healthcheck = \"test_hostname_healthcheck\";\n  const std::string hostname_1 = \"foo.com\";\n\n  addHostInfo(*host_1, hostname_1, \"tcp://1.2.3.4:80\", locality, hostname_for_healthcheck,\n              \"tcp://1.2.3.5:90\", 5, 6);\n\n  NiceMock<Upstream::MockClusterMockPrioritySet> cluster_2;\n  cluster_2.info_->name_ = \"fake_cluster_2\";\n  cluster_map.emplace(cluster_2.info_->name_, cluster_2);\n\n  ON_CALL(*cluster_2.info_, addedViaApi()).WillByDefault(Return(false));\n\n  Upstream::MockHostSet* host_set_2 = cluster_2.priority_set_.getMockHostSet(0);\n  auto host_2 = std::make_shared<NiceMock<Upstream::MockHost>>();\n  host_set_2->hosts_.emplace_back(host_2);\n  const std::string hostname_2 = \"boo.com\";\n\n  addHostInfo(*host_2, hostname_2, \"tcp://1.2.3.5:8\", locality, hostname_for_healthcheck,\n              \"tcp://1.2.3.4:1\", 3, 4);\n\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  EXPECT_EQ(Http::Code::OK,\n            getCallback(\"/config_dump?include_eds&resource=dynamic_endpoint_configs\", header_map,\n                        response));\n  std::string output = response.toString();\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig\",\n   \"endpoint_config\": {\n    \"@type\": \"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment\",\n    \"cluster_name\": \"fake_cluster\",\n    \"endpoints\": [\n     {\n      \"locality\": {},\n      \"lb_endpoints\": [\n       {\n        \"endpoint\": {\n         \"address\": {\n          \"socket_address\": {\n           \"address\": \"1.2.3.4\",\n           \"port_value\": 80\n          }\n         },\n         \"health_check_config\": {\n          \"port_value\": 90,\n          \"hostname\": \"test_hostname_healthcheck\"\n         },\n         \"hostname\": \"foo.com\"\n        },\n        \"health_status\": \"HEALTHY\",\n        \"metadata\": {},\n        \"load_balancing_weight\": 5\n       }\n      ],\n      \"priority\": 6\n     }\n    ],\n    \"policy\": {\n     \"overprovisioning_factor\": 140\n    }\n   }\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(expected_json, output);\n}\n\n// Test that using the mask query parameter filters the config dump.\n// We add both static and dynamic listener config to the dump, but expect only\n// dynamic in the JSON with ?mask=dynamic_listeners.\nTEST_P(AdminInstanceTest, ConfigDumpFiltersByMask) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto listeners = admin_.getConfigTracker().add(\"listeners\", [] {\n    auto msg = std::make_unique<envoy::admin::v3::ListenersConfigDump>();\n    auto dyn_listener = msg->add_dynamic_listeners();\n    dyn_listener->set_name(\"foo\");\n    auto stat_listener = msg->add_static_listeners();\n    envoy::config::listener::v3::Listener listener;\n    listener.set_name(\"bar\");\n    stat_listener->mutable_listener()->PackFrom(listener);\n    return msg;\n  });\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/envoy.admin.v3.ListenersConfigDump\",\n   \"dynamic_listeners\": [\n    {\n     \"name\": \"foo\"\n    }\n   ]\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(Http::Code::OK,\n            getCallback(\"/config_dump?mask=dynamic_listeners\", header_map, response));\n  std::string output = response.toString();\n  EXPECT_EQ(expected_json, output);\n}\n\nProtobufTypes::MessagePtr testDumpClustersConfig() {\n  auto msg = std::make_unique<envoy::admin::v3::ClustersConfigDump>();\n  auto* static_cluster = msg->add_static_clusters();\n  envoy::config::cluster::v3::Cluster inner_cluster;\n  inner_cluster.set_name(\"foo\");\n  inner_cluster.set_ignore_health_on_host_removal(true);\n  static_cluster->mutable_cluster()->PackFrom(inner_cluster);\n\n  auto* dyn_cluster = msg->add_dynamic_active_clusters();\n  dyn_cluster->set_version_info(\"baz\");\n  dyn_cluster->mutable_last_updated()->set_seconds(5);\n  envoy::config::cluster::v3::Cluster inner_dyn_cluster;\n  inner_dyn_cluster.set_name(\"bar\");\n  inner_dyn_cluster.set_ignore_health_on_host_removal(true);\n  inner_dyn_cluster.mutable_http2_protocol_options()->set_allow_connect(true);\n  dyn_cluster->mutable_cluster()->PackFrom(inner_dyn_cluster);\n  return msg;\n}\n\n// Test that when using both resource and mask query parameters the JSON output contains\n// only the desired resource and the fields specified in the mask.\nTEST_P(AdminInstanceTest, ConfigDumpFiltersByResourceAndMask) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto clusters = admin_.getConfigTracker().add(\"clusters\", testDumpClustersConfig);\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/envoy.admin.v3.ClustersConfigDump.DynamicCluster\",\n   \"version_info\": \"baz\",\n   \"cluster\": {\n    \"@type\": \"type.googleapis.com/envoy.config.cluster.v3.Cluster\",\n    \"name\": \"bar\",\n    \"http2_protocol_options\": {\n     \"allow_connect\": true\n    }\n   }\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/config_dump?resource=dynamic_active_clusters&mask=\"\n                                        \"cluster.name,version_info,cluster.http2_protocol_options\",\n                                        header_map, response));\n  std::string output = response.toString();\n  EXPECT_EQ(expected_json, output);\n}\n\n// Test that no fields are present in the JSON output if there is no intersection between the fields\n// of the config dump and the fields present in the mask query parameter.\nTEST_P(AdminInstanceTest, ConfigDumpNonExistentMask) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto clusters = admin_.getConfigTracker().add(\"clusters\", testDumpClustersConfig);\n  const std::string expected_json = R\"EOF({\n \"configs\": [\n  {\n   \"@type\": \"type.googleapis.com/envoy.admin.v3.ClustersConfigDump.StaticCluster\"\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(Http::Code::OK,\n            getCallback(\"/config_dump?resource=static_clusters&mask=bad\", header_map, response));\n  std::string output = response.toString();\n  EXPECT_EQ(expected_json, output);\n}\n\n// Test that a 404 Not found is returned if a non-existent resource is passed in as the\n// resource query parameter.\nTEST_P(AdminInstanceTest, ConfigDumpNonExistentResource) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto listeners = admin_.getConfigTracker().add(\"listeners\", [] {\n    auto msg = std::make_unique<ProtobufWkt::StringValue>();\n    msg->set_value(\"listeners_config\");\n    return msg;\n  });\n  EXPECT_EQ(Http::Code::NotFound, getCallback(\"/config_dump?resource=foo\", header_map, response));\n}\n\n// Test that a 400 Bad Request is returned if the passed resource query parameter is not a\n// repeated field.\nTEST_P(AdminInstanceTest, ConfigDumpResourceNotRepeated) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto clusters = admin_.getConfigTracker().add(\"clusters\", [] {\n    auto msg = std::make_unique<envoy::admin::v3::ClustersConfigDump>();\n    msg->set_version_info(\"foo\");\n    return msg;\n  });\n  EXPECT_EQ(Http::Code::BadRequest,\n            getCallback(\"/config_dump?resource=version_info\", header_map, response));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/config_tracker_impl_test.cc",
    "content": "#include \"server/admin/config_tracker_impl.h\"\n\n#include \"test/mocks/common.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ConfigTrackerImplTest : public testing::Test {\npublic:\n  ConfigTrackerImplTest() : cbs_map(tracker.getCallbacksMap()) {\n    EXPECT_TRUE(cbs_map.empty());\n    test_cb = [this] {\n      called = true;\n      return test_msg();\n    };\n  }\n\n  ProtobufTypes::MessagePtr test_msg() { return std::make_unique<ProtobufWkt::Any>(); }\n\n  ~ConfigTrackerImplTest() override = default;\n\n  ConfigTrackerImpl tracker;\n  const std::map<std::string, ConfigTracker::Cb>& cbs_map;\n  ConfigTracker::Cb test_cb;\n  bool called{false};\n  std::string test_string{\"foo\"};\n};\n\nTEST_F(ConfigTrackerImplTest, Basic) {\n  EXPECT_EQ(0, cbs_map.size());\n  auto entry_owner = tracker.add(\"test_key\", test_cb);\n  EXPECT_EQ(1, cbs_map.size());\n  EXPECT_NE(nullptr, entry_owner);\n  EXPECT_NE(nullptr, cbs_map.begin()->second());\n  EXPECT_TRUE(called);\n}\n\nTEST_F(ConfigTrackerImplTest, DestroyHandleBeforeTracker) {\n  auto entry_owner = tracker.add(\"test_key\", test_cb);\n  EXPECT_EQ(1, cbs_map.size());\n  entry_owner.reset();\n  EXPECT_EQ(0, cbs_map.size());\n}\n\nTEST_F(ConfigTrackerImplTest, DestroyTrackerBeforeHandle) {\n  std::shared_ptr<ConfigTracker> tracker_ptr = std::make_shared<ConfigTrackerImpl>();\n  auto entry_owner = tracker.add(\"test_key\", test_cb);\n  tracker_ptr.reset();\n  entry_owner.reset(); // Shouldn't explode\n}\n\nTEST_F(ConfigTrackerImplTest, AddDuplicate) {\n  auto entry_owner = tracker.add(\"test_key\", test_cb);\n  EXPECT_EQ(nullptr, tracker.add(\"test_key\", test_cb));\n  entry_owner.reset();\n  // Now we can add it\n  EXPECT_NE(nullptr, tracker.add(\"test_key\", test_cb));\n}\n\nTEST_F(ConfigTrackerImplTest, OperationsWithinCallback) {\n  ConfigTracker::EntryOwnerPtr owner1, owner2;\n  owner1 = tracker.add(\"test_key\", [&] {\n    owner2 = tracker.add(\"test_key2\", [&] {\n      owner1.reset();\n      return test_msg();\n    });\n    return test_msg();\n  });\n  EXPECT_EQ(1, cbs_map.size());\n  EXPECT_NE(nullptr, owner1);\n  EXPECT_NE(nullptr, cbs_map.at(\"test_key\")());\n  EXPECT_EQ(2, cbs_map.size());\n  EXPECT_NE(nullptr, owner2);\n  EXPECT_NE(nullptr, cbs_map.at(\"test_key2\")());\n  EXPECT_EQ(1, cbs_map.size());\n  EXPECT_EQ(0, cbs_map.count(\"test_key\"));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/init_dump_handler_test.cc",
    "content": "#include \"test/server/admin/admin_instance.h\"\n\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Test Using /init_dump to dump information of all unready targets.\nTEST_P(AdminInstanceTest, UnreadyTargetsDump) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n\n  Network::MockListenerConfig listener_1;\n  Init::ManagerImpl init_manager_1{\"test_init_manager_1\"};\n  Init::TargetImpl target_1(\"test_target_1\", nullptr);\n  init_manager_1.add(target_1);\n  EXPECT_CALL(listener_1, initManager()).WillOnce(ReturnRef(init_manager_1));\n\n  Network::MockListenerConfig listener_2;\n  Init::ManagerImpl init_manager_2{\"test_init_manager_2\"};\n  Init::TargetImpl target_2(\"test_target_2\", nullptr);\n  init_manager_2.add(target_2);\n  EXPECT_CALL(listener_2, initManager()).WillOnce(ReturnRef(init_manager_2));\n\n  MockListenerManager listener_manager;\n  EXPECT_CALL(server_, listenerManager()).WillRepeatedly(ReturnRef(listener_manager));\n\n  std::vector<std::reference_wrapper<Envoy::Network::ListenerConfig>> listeners;\n  listeners.push_back(listener_1);\n  listeners.push_back(listener_2);\n  EXPECT_CALL(listener_manager, isWorkerStarted()).WillRepeatedly(Return(true));\n  EXPECT_CALL(listener_manager, listeners(_)).WillOnce(Return(listeners));\n\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/init_dump\", header_map, response));\n  std::string output = response.toString();\n  // The expected value should be updated when ProtobufTypes::MessagePtr\n  // AdminImpl::dumpListenerUnreadyTargets function includes more dump when mask has no value.\n  const std::string expected_json = R\"EOF({\n \"unready_targets_dumps\": [\n  {\n   \"name\": \"init manager test_init_manager_1\",\n   \"target_names\": [\n    \"target test_target_1\"\n   ]\n  },\n  {\n   \"name\": \"init manager test_init_manager_2\",\n   \"target_names\": [\n    \"target test_target_2\"\n   ]\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(output, expected_json);\n}\n\n// Test Using /init_dump?listener to dump unready targets of listeners.\nTEST_P(AdminInstanceTest, ListenerUnreadyTargetsDump) {\n  Buffer::OwnedImpl response;\n  Http::TestResponseHeaderMapImpl header_map;\n\n  Network::MockListenerConfig listener_1;\n  Init::ManagerImpl init_manager_1{\"test_init_manager_1\"};\n  Init::TargetImpl target_1(\"test_target_1\", nullptr);\n  init_manager_1.add(target_1);\n  EXPECT_CALL(listener_1, initManager()).WillOnce(ReturnRef(init_manager_1));\n\n  Network::MockListenerConfig listener_2;\n  Init::ManagerImpl init_manager_2{\"test_init_manager_2\"};\n  Init::TargetImpl target_2(\"test_target_2\", nullptr);\n  init_manager_2.add(target_2);\n  EXPECT_CALL(listener_2, initManager()).WillOnce(ReturnRef(init_manager_2));\n\n  MockListenerManager listener_manager;\n  EXPECT_CALL(server_, listenerManager()).WillRepeatedly(ReturnRef(listener_manager));\n\n  std::vector<std::reference_wrapper<Envoy::Network::ListenerConfig>> listeners;\n  listeners.push_back(listener_1);\n  listeners.push_back(listener_2);\n  EXPECT_CALL(listener_manager, isWorkerStarted()).WillRepeatedly(Return(true));\n  EXPECT_CALL(listener_manager, listeners(_)).WillOnce(Return(listeners));\n\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/init_dump?mask=listener\", header_map, response));\n  std::string output = response.toString();\n  const std::string expected_json = R\"EOF({\n \"unready_targets_dumps\": [\n  {\n   \"name\": \"init manager test_init_manager_1\",\n   \"target_names\": [\n    \"target test_target_1\"\n   ]\n  },\n  {\n   \"name\": \"init manager test_init_manager_2\",\n   \"target_names\": [\n    \"target test_target_2\"\n   ]\n  }\n ]\n}\n)EOF\";\n  EXPECT_EQ(output, expected_json);\n}\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/logs_handler_test.cc",
    "content": "#include \"common/common/fancy_logger.h\"\n#include \"common/common/logger.h\"\n\n#include \"test/server/admin/admin_instance.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminInstanceTest, ReopenLogs) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n  testing::NiceMock<AccessLog::MockAccessLogManager> access_log_manager_;\n\n  EXPECT_CALL(server_, accessLogManager()).WillRepeatedly(ReturnRef(access_log_manager_));\n  EXPECT_CALL(access_log_manager_, reopen());\n  EXPECT_EQ(Http::Code::OK, postCallback(\"/reopen_logs\", header_map, response));\n}\n\nTEST_P(AdminInstanceTest, LogLevelSetting) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n\n  // now for Envoy, w/o setting the mode\n  FANCY_LOG(info, \"Build the logger for this file.\");\n  Logger::Context::enableFancyLogger();\n  postCallback(\"/logging\", header_map, response);\n  FANCY_LOG(error, response.toString());\n\n  postCallback(\"/logging?level=warning\", header_map, response);\n  FANCY_LOG(warn, \"After post 1: all level is warning now!\");\n  EXPECT_EQ(getFancyContext().getFancyLogEntry(__FILE__)->level(), spdlog::level::warn);\n  std::string query = fmt::format(\"/logging?{}=info\", __FILE__);\n  postCallback(query, header_map, response);\n  FANCY_LOG(info, \"After post 2: level for this file is info now!\");\n  EXPECT_EQ(getFancyContext().getFancyLogEntry(__FILE__)->level(), spdlog::level::info);\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/profiling_handler_test.cc",
    "content": "#include \"common/profiler/profiler.h\"\n\n#include \"test/server/admin/admin_instance.h\"\n#include \"test/test_common/logging.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminInstanceTest, AdminCpuProfiler) {\n  Buffer::OwnedImpl data;\n  Http::TestResponseHeaderMapImpl header_map;\n\n  // Can only get code coverage of AdminImpl::handlerCpuProfiler stopProfiler with\n  // a real profiler linked in (successful call to startProfiler).\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(Http::Code::OK, postCallback(\"/cpuprofiler?enable=y\", header_map, data));\n  EXPECT_TRUE(Profiler::Cpu::profilerEnabled());\n#else\n  EXPECT_EQ(Http::Code::InternalServerError,\n            postCallback(\"/cpuprofiler?enable=y\", header_map, data));\n  EXPECT_FALSE(Profiler::Cpu::profilerEnabled());\n#endif\n\n  EXPECT_EQ(Http::Code::OK, postCallback(\"/cpuprofiler?enable=n\", header_map, data));\n  EXPECT_FALSE(Profiler::Cpu::profilerEnabled());\n}\n\nTEST_P(AdminInstanceTest, AdminHeapProfilerOnRepeatedRequest) {\n  Buffer::OwnedImpl data;\n  Http::TestResponseHeaderMapImpl header_map;\n  auto repeatResultCode = Http::Code::BadRequest;\n#ifndef PROFILER_AVAILABLE\n  repeatResultCode = Http::Code::NotImplemented;\n#endif\n\n  postCallback(\"/heapprofiler?enable=y\", header_map, data);\n  EXPECT_EQ(repeatResultCode, postCallback(\"/heapprofiler?enable=y\", header_map, data));\n\n  postCallback(\"/heapprofiler?enable=n\", header_map, data);\n  EXPECT_EQ(repeatResultCode, postCallback(\"/heapprofiler?enable=n\", header_map, data));\n}\n\nTEST_P(AdminInstanceTest, AdminHeapProfiler) {\n  Buffer::OwnedImpl data;\n  Http::TestResponseHeaderMapImpl header_map;\n\n  // The below flow need to begin with the profiler not running\n  Profiler::Heap::stopProfiler();\n\n#ifdef PROFILER_AVAILABLE\n  EXPECT_EQ(Http::Code::OK, postCallback(\"/heapprofiler?enable=y\", header_map, data));\n  EXPECT_TRUE(Profiler::Heap::isProfilerStarted());\n  EXPECT_EQ(Http::Code::OK, postCallback(\"/heapprofiler?enable=n\", header_map, data));\n#else\n  EXPECT_EQ(Http::Code::NotImplemented, postCallback(\"/heapprofiler?enable=y\", header_map, data));\n  EXPECT_FALSE(Profiler::Heap::isProfilerStarted());\n  EXPECT_EQ(Http::Code::NotImplemented, postCallback(\"/heapprofiler?enable=n\", header_map, data));\n#endif\n\n  EXPECT_FALSE(Profiler::Heap::isProfilerStarted());\n}\n\nTEST_P(AdminInstanceTest, AdminBadProfiler) {\n  Buffer::OwnedImpl data;\n  AdminImpl admin_bad_profile_path(TestEnvironment::temporaryPath(\"some/unlikely/bad/path.prof\"),\n                                   server_);\n  Http::TestResponseHeaderMapImpl header_map;\n  const absl::string_view post = Http::Headers::get().MethodValues.Post;\n  request_headers_.setMethod(post);\n  admin_filter_.decodeHeaders(request_headers_, false);\n  EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::InternalServerError,\n                           admin_bad_profile_path.runCallback(\"/cpuprofiler?enable=y\", header_map,\n                                                              data, admin_filter_)));\n  EXPECT_FALSE(Profiler::Cpu::profilerEnabled());\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/prometheus_stats_test.cc",
    "content": "#include <regex>\n\n#include \"server/admin/prometheus_stats.h\"\n\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::NiceMock;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\n\nclass HistogramWrapper {\npublic:\n  HistogramWrapper() : histogram_(hist_alloc()) {}\n\n  ~HistogramWrapper() { hist_free(histogram_); }\n\n  const histogram_t* getHistogram() { return histogram_; }\n\n  void setHistogramValues(const std::vector<uint64_t>& values) {\n    for (uint64_t value : values) {\n      hist_insert_intscale(histogram_, value, 0, 1);\n    }\n  }\n\n  void setHistogramValuesWithCounts(const std::vector<std::pair<uint64_t, uint64_t>>& values) {\n    for (std::pair<uint64_t, uint64_t> cv : values) {\n      hist_insert_intscale(histogram_, cv.first, 0, cv.second);\n    }\n  }\n\nprivate:\n  histogram_t* histogram_;\n};\n\nclass PrometheusStatsFormatterTest : public testing::Test {\nprotected:\n  PrometheusStatsFormatterTest() : alloc_(*symbol_table_), pool_(*symbol_table_) {}\n\n  ~PrometheusStatsFormatterTest() override { clearStorage(); }\n\n  void addCounter(const std::string& name, Stats::StatNameTagVector cluster_tags) {\n    Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_);\n    Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_);\n    counters_.push_back(alloc_.makeCounter(name_storage.statName(),\n                                           tag_extracted_name_storage.statName(), cluster_tags));\n  }\n\n  void addGauge(const std::string& name, Stats::StatNameTagVector cluster_tags) {\n    Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_);\n    Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_);\n    gauges_.push_back(alloc_.makeGauge(name_storage.statName(),\n                                       tag_extracted_name_storage.statName(), cluster_tags,\n                                       Stats::Gauge::ImportMode::Accumulate));\n  }\n\n  using MockHistogramSharedPtr = Stats::RefcountPtr<NiceMock<Stats::MockParentHistogram>>;\n  void addHistogram(MockHistogramSharedPtr histogram) { histograms_.push_back(histogram); }\n\n  MockHistogramSharedPtr makeHistogram(const std::string& name,\n                                       Stats::StatNameTagVector cluster_tags) {\n    auto histogram = MockHistogramSharedPtr(new NiceMock<Stats::MockParentHistogram>());\n    histogram->name_ = baseName(name, cluster_tags);\n    histogram->setTagExtractedName(name);\n    histogram->setTags(cluster_tags);\n    histogram->used_ = true;\n    return histogram;\n  }\n\n  Stats::StatName makeStat(absl::string_view name) { return pool_.add(name); }\n\n  // Format tags into the name to create a unique stat_name for each name:tag combination.\n  // If the same stat_name is passed to makeGauge() or makeCounter(), even with different\n  // tags, a copy of the previous metric will be returned.\n  std::string baseName(const std::string& name, Stats::StatNameTagVector cluster_tags) {\n    std::string result = name;\n    for (const auto& name_tag : cluster_tags) {\n      result.append(fmt::format(\"<{}:{}>\", symbol_table_->toString(name_tag.first),\n                                symbol_table_->toString(name_tag.second)));\n    }\n    return result;\n  }\n\n  void clearStorage() {\n    pool_.clear();\n    counters_.clear();\n    gauges_.clear();\n    histograms_.clear();\n    EXPECT_EQ(0, symbol_table_->numSymbols());\n  }\n\n  Stats::TestSymbolTable symbol_table_;\n  Stats::AllocatorImpl alloc_;\n  Stats::StatNamePool pool_;\n  std::vector<Stats::CounterSharedPtr> counters_;\n  std::vector<Stats::GaugeSharedPtr> gauges_;\n  std::vector<Stats::ParentHistogramSharedPtr> histograms_;\n};\n\nTEST_F(PrometheusStatsFormatterTest, MetricName) {\n  std::string raw = \"vulture.eats-liver\";\n  std::string expected = \"envoy_vulture_eats_liver\";\n  auto actual = PrometheusStatsFormatter::metricName(raw);\n  EXPECT_EQ(expected, actual);\n}\n\nTEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) {\n  std::string raw = \"An.artist.plays-violin@019street\";\n  std::string expected = \"envoy_An_artist_plays_violin_019street\";\n  auto actual = PrometheusStatsFormatter::metricName(raw);\n  EXPECT_EQ(expected, actual);\n}\n\nTEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) {\n  std::string raw = \"3.artists.play-violin@019street\";\n  std::string expected = \"envoy_3_artists_play_violin_019street\";\n  auto actual = PrometheusStatsFormatter::metricName(raw);\n  EXPECT_EQ(expected, actual);\n}\n\nTEST_F(PrometheusStatsFormatterTest, NamespaceRegistry) {\n  std::string raw = \"vulture.eats-liver\";\n  std::string expected = \"vulture_eats_liver\";\n\n  EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace(\"3vulture\"));\n  EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace(\".vulture\"));\n\n  EXPECT_FALSE(PrometheusStatsFormatter::unregisterPrometheusNamespace(\"vulture\"));\n  EXPECT_TRUE(PrometheusStatsFormatter::registerPrometheusNamespace(\"vulture\"));\n  EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace(\"vulture\"));\n  EXPECT_EQ(expected, PrometheusStatsFormatter::metricName(raw));\n  EXPECT_TRUE(PrometheusStatsFormatter::unregisterPrometheusNamespace(\"vulture\"));\n\n  EXPECT_EQ(\"envoy_\" + expected, PrometheusStatsFormatter::metricName(raw));\n}\n\nTEST_F(PrometheusStatsFormatterTest, FormattedTags) {\n  std::vector<Stats::Tag> tags;\n  Stats::Tag tag1 = {\"a.tag-name\", \"a.tag-value\"};\n  Stats::Tag tag2 = {\"another_tag_name\", \"another_tag-value\"};\n  tags.push_back(tag1);\n  tags.push_back(tag2);\n  std::string expected = \"a_tag_name=\\\"a.tag-value\\\",another_tag_name=\\\"another_tag-value\\\"\";\n  auto actual = PrometheusStatsFormatter::formattedTags(tags);\n  EXPECT_EQ(expected, actual);\n}\n\nTEST_F(PrometheusStatsFormatterTest, MetricNameCollison) {\n\n  // Create two counters and two gauges with each pair having the same name,\n  // but having different tag names and values.\n  //`statsAsPrometheus()` should return two implying it found two unique stat names\n\n  addCounter(\"cluster.test_cluster_1.upstream_cx_total\",\n             {{makeStat(\"a.tag-name\"), makeStat(\"a.tag-value\")}});\n  addCounter(\"cluster.test_cluster_1.upstream_cx_total\",\n             {{makeStat(\"another_tag_name\"), makeStat(\"another_tag-value\")}});\n  addGauge(\"cluster.test_cluster_2.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_3\"), makeStat(\"another_tag_3-value\")}});\n  addGauge(\"cluster.test_cluster_2.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_4\"), makeStat(\"another_tag_4-value\")}});\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          false, absl::nullopt);\n  EXPECT_EQ(2UL, size);\n}\n\nTEST_F(PrometheusStatsFormatterTest, UniqueMetricName) {\n\n  // Create two counters and two gauges, all with unique names.\n  // statsAsPrometheus() should return four implying it found\n  // four unique stat names.\n\n  addCounter(\"cluster.test_cluster_1.upstream_cx_total\",\n             {{makeStat(\"a.tag-name\"), makeStat(\"a.tag-value\")}});\n  addCounter(\"cluster.test_cluster_2.upstream_cx_total\",\n             {{makeStat(\"another_tag_name\"), makeStat(\"another_tag-value\")}});\n  addGauge(\"cluster.test_cluster_3.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_3\"), makeStat(\"another_tag_3-value\")}});\n  addGauge(\"cluster.test_cluster_4.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_4\"), makeStat(\"another_tag_4-value\")}});\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          false, absl::nullopt);\n  EXPECT_EQ(4UL, size);\n}\n\nTEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) {\n  HistogramWrapper h1_cumulative;\n  h1_cumulative.setHistogramValues(std::vector<uint64_t>(0));\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram());\n\n  auto histogram = makeHistogram(\"histogram1\", {});\n  ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics));\n\n  addHistogram(histogram);\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          false, absl::nullopt);\n  EXPECT_EQ(1UL, size);\n\n  const std::string expected_output = R\"EOF(# TYPE envoy_histogram1 histogram\nenvoy_histogram1_bucket{le=\"0.5\"} 0\nenvoy_histogram1_bucket{le=\"1\"} 0\nenvoy_histogram1_bucket{le=\"5\"} 0\nenvoy_histogram1_bucket{le=\"10\"} 0\nenvoy_histogram1_bucket{le=\"25\"} 0\nenvoy_histogram1_bucket{le=\"50\"} 0\nenvoy_histogram1_bucket{le=\"100\"} 0\nenvoy_histogram1_bucket{le=\"250\"} 0\nenvoy_histogram1_bucket{le=\"500\"} 0\nenvoy_histogram1_bucket{le=\"1000\"} 0\nenvoy_histogram1_bucket{le=\"2500\"} 0\nenvoy_histogram1_bucket{le=\"5000\"} 0\nenvoy_histogram1_bucket{le=\"10000\"} 0\nenvoy_histogram1_bucket{le=\"30000\"} 0\nenvoy_histogram1_bucket{le=\"60000\"} 0\nenvoy_histogram1_bucket{le=\"300000\"} 0\nenvoy_histogram1_bucket{le=\"600000\"} 0\nenvoy_histogram1_bucket{le=\"1800000\"} 0\nenvoy_histogram1_bucket{le=\"3600000\"} 0\nenvoy_histogram1_bucket{le=\"+Inf\"} 0\nenvoy_histogram1_sum{} 0\nenvoy_histogram1_count{} 0\n\n)EOF\";\n\n  EXPECT_EQ(expected_output, response.toString());\n}\n\nTEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) {\n  HistogramWrapper h1_cumulative;\n  h1_cumulative.setHistogramValues(std::vector<uint64_t>(0));\n  Stats::ConstSupportedBuckets buckets{10, 20};\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram(), buckets);\n\n  auto histogram = makeHistogram(\"histogram1\", {});\n  ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics));\n\n  addHistogram(histogram);\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          false, absl::nullopt);\n  EXPECT_EQ(1UL, size);\n\n  const std::string expected_output = R\"EOF(# TYPE envoy_histogram1 histogram\nenvoy_histogram1_bucket{le=\"10\"} 0\nenvoy_histogram1_bucket{le=\"20\"} 0\nenvoy_histogram1_bucket{le=\"+Inf\"} 0\nenvoy_histogram1_sum{} 0\nenvoy_histogram1_count{} 0\n\n)EOF\";\n\n  EXPECT_EQ(expected_output, response.toString());\n}\n\nTEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) {\n  HistogramWrapper h1_cumulative;\n\n  // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation.\n  h1_cumulative.setHistogramValuesWithCounts(std::vector<std::pair<uint64_t, uint64_t>>({\n      {1, 100000},\n      {100, 1000000},\n      {1000, 100000000},\n  }));\n\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram());\n\n  auto histogram = makeHistogram(\"histogram1\", {});\n  ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics));\n\n  addHistogram(histogram);\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          false, absl::nullopt);\n  EXPECT_EQ(1UL, size);\n\n  const std::string expected_output = R\"EOF(# TYPE envoy_histogram1 histogram\nenvoy_histogram1_bucket{le=\"0.5\"} 0\nenvoy_histogram1_bucket{le=\"1\"} 0\nenvoy_histogram1_bucket{le=\"5\"} 100000\nenvoy_histogram1_bucket{le=\"10\"} 100000\nenvoy_histogram1_bucket{le=\"25\"} 100000\nenvoy_histogram1_bucket{le=\"50\"} 100000\nenvoy_histogram1_bucket{le=\"100\"} 100000\nenvoy_histogram1_bucket{le=\"250\"} 1100000\nenvoy_histogram1_bucket{le=\"500\"} 1100000\nenvoy_histogram1_bucket{le=\"1000\"} 1100000\nenvoy_histogram1_bucket{le=\"2500\"} 101100000\nenvoy_histogram1_bucket{le=\"5000\"} 101100000\nenvoy_histogram1_bucket{le=\"10000\"} 101100000\nenvoy_histogram1_bucket{le=\"30000\"} 101100000\nenvoy_histogram1_bucket{le=\"60000\"} 101100000\nenvoy_histogram1_bucket{le=\"300000\"} 101100000\nenvoy_histogram1_bucket{le=\"600000\"} 101100000\nenvoy_histogram1_bucket{le=\"1800000\"} 101100000\nenvoy_histogram1_bucket{le=\"3600000\"} 101100000\nenvoy_histogram1_bucket{le=\"+Inf\"} 101100000\nenvoy_histogram1_sum{} 105105105000\nenvoy_histogram1_count{} 101100000\n\n)EOF\";\n\n  EXPECT_EQ(expected_output, response.toString());\n}\n\nTEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) {\n  addCounter(\"cluster.test_1.upstream_cx_total\",\n             {{makeStat(\"a.tag-name\"), makeStat(\"a.tag-value\")}});\n  addCounter(\"cluster.test_2.upstream_cx_total\",\n             {{makeStat(\"another_tag_name\"), makeStat(\"another_tag-value\")}});\n  addGauge(\"cluster.test_3.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_3\"), makeStat(\"another_tag_3-value\")}});\n  addGauge(\"cluster.test_4.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_4\"), makeStat(\"another_tag_4-value\")}});\n\n  const std::vector<uint64_t> h1_values = {50, 20, 30, 70, 100, 5000, 200};\n  HistogramWrapper h1_cumulative;\n  h1_cumulative.setHistogramValues(h1_values);\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram());\n\n  auto histogram1 =\n      makeHistogram(\"cluster.test_1.upstream_rq_time\", {{makeStat(\"key1\"), makeStat(\"value1\")},\n                                                        {makeStat(\"key2\"), makeStat(\"value2\")}});\n  histogram1->unit_ = Stats::Histogram::Unit::Milliseconds;\n  addHistogram(histogram1);\n  EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics));\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          false, absl::nullopt);\n  EXPECT_EQ(5UL, size);\n\n  const std::string expected_output = R\"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter\nenvoy_cluster_test_1_upstream_cx_total{a_tag_name=\"a.tag-value\"} 0\n\n# TYPE envoy_cluster_test_2_upstream_cx_total counter\nenvoy_cluster_test_2_upstream_cx_total{another_tag_name=\"another_tag-value\"} 0\n\n# TYPE envoy_cluster_test_3_upstream_cx_total gauge\nenvoy_cluster_test_3_upstream_cx_total{another_tag_name_3=\"another_tag_3-value\"} 0\n\n# TYPE envoy_cluster_test_4_upstream_cx_total gauge\nenvoy_cluster_test_4_upstream_cx_total{another_tag_name_4=\"another_tag_4-value\"} 0\n\n# TYPE envoy_cluster_test_1_upstream_rq_time histogram\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"0.5\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"1\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"5\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"10\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"25\"} 1\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"50\"} 2\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"100\"} 4\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"250\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"500\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"1000\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"2500\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"5000\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"10000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"30000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"60000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"300000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"600000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"1800000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"3600000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"+Inf\"} 7\nenvoy_cluster_test_1_upstream_rq_time_sum{key1=\"value1\",key2=\"value2\"} 5532\nenvoy_cluster_test_1_upstream_rq_time_count{key1=\"value1\",key2=\"value2\"} 7\n\n)EOF\";\n\n  EXPECT_EQ(expected_output, response.toString());\n}\n\n// Test that output groups all metrics of the same name (with different tags) together,\n// as required by the Prometheus exposition format spec. Additionally, groups of metrics\n// should be sorted by their tags; the format specifies that it is preferred that metrics\n// are always grouped in the same order, and sorting is an easy way to ensure this.\nTEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) {\n  const std::vector<uint64_t> h1_values = {50, 20, 30, 70, 100, 5000, 200};\n  HistogramWrapper h1_cumulative;\n  h1_cumulative.setHistogramValues(h1_values);\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram());\n\n  // Create the 3 clusters in non-sorted order to exercise the sorting.\n  // Create two of each metric type (counter, gauge, histogram) so that\n  // the output for each needs to be collected together.\n  for (const char* cluster : {\"ccc\", \"aaa\", \"bbb\"}) {\n    const Stats::StatNameTagVector tags{{makeStat(\"cluster\"), makeStat(cluster)}};\n    addCounter(\"cluster.upstream_cx_total\", tags);\n    addCounter(\"cluster.upstream_cx_connect_fail\", tags);\n    addGauge(\"cluster.upstream_cx_active\", tags);\n    addGauge(\"cluster.upstream_rq_active\", tags);\n\n    for (const char* hist_name : {\"cluster.upstream_rq_time\", \"cluster.upstream_response_time\"}) {\n      auto histogram1 = makeHistogram(hist_name, tags);\n      histogram1->unit_ = Stats::Histogram::Unit::Milliseconds;\n      addHistogram(histogram1);\n      EXPECT_CALL(*histogram1, cumulativeStatistics())\n          .WillOnce(ReturnRef(h1_cumulative_statistics));\n    }\n  }\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          false, absl::nullopt);\n  EXPECT_EQ(6UL, size);\n\n  const std::string expected_output = R\"EOF(# TYPE envoy_cluster_upstream_cx_connect_fail counter\nenvoy_cluster_upstream_cx_connect_fail{cluster=\"aaa\"} 0\nenvoy_cluster_upstream_cx_connect_fail{cluster=\"bbb\"} 0\nenvoy_cluster_upstream_cx_connect_fail{cluster=\"ccc\"} 0\n\n# TYPE envoy_cluster_upstream_cx_total counter\nenvoy_cluster_upstream_cx_total{cluster=\"aaa\"} 0\nenvoy_cluster_upstream_cx_total{cluster=\"bbb\"} 0\nenvoy_cluster_upstream_cx_total{cluster=\"ccc\"} 0\n\n# TYPE envoy_cluster_upstream_cx_active gauge\nenvoy_cluster_upstream_cx_active{cluster=\"aaa\"} 0\nenvoy_cluster_upstream_cx_active{cluster=\"bbb\"} 0\nenvoy_cluster_upstream_cx_active{cluster=\"ccc\"} 0\n\n# TYPE envoy_cluster_upstream_rq_active gauge\nenvoy_cluster_upstream_rq_active{cluster=\"aaa\"} 0\nenvoy_cluster_upstream_rq_active{cluster=\"bbb\"} 0\nenvoy_cluster_upstream_rq_active{cluster=\"ccc\"} 0\n\n# TYPE envoy_cluster_upstream_response_time histogram\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"0.5\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"1\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"5\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"10\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"25\"} 1\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"50\"} 2\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"100\"} 4\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"250\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"500\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"1000\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"2500\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"5000\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"10000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"30000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"60000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"300000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"600000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"1800000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"3600000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"aaa\",le=\"+Inf\"} 7\nenvoy_cluster_upstream_response_time_sum{cluster=\"aaa\"} 5532\nenvoy_cluster_upstream_response_time_count{cluster=\"aaa\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"0.5\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"1\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"5\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"10\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"25\"} 1\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"50\"} 2\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"100\"} 4\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"250\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"500\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"1000\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"2500\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"5000\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"10000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"30000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"60000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"300000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"600000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"1800000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"3600000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"bbb\",le=\"+Inf\"} 7\nenvoy_cluster_upstream_response_time_sum{cluster=\"bbb\"} 5532\nenvoy_cluster_upstream_response_time_count{cluster=\"bbb\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"0.5\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"1\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"5\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"10\"} 0\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"25\"} 1\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"50\"} 2\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"100\"} 4\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"250\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"500\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"1000\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"2500\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"5000\"} 6\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"10000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"30000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"60000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"300000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"600000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"1800000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"3600000\"} 7\nenvoy_cluster_upstream_response_time_bucket{cluster=\"ccc\",le=\"+Inf\"} 7\nenvoy_cluster_upstream_response_time_sum{cluster=\"ccc\"} 5532\nenvoy_cluster_upstream_response_time_count{cluster=\"ccc\"} 7\n\n# TYPE envoy_cluster_upstream_rq_time histogram\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"0.5\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"1\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"5\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"10\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"25\"} 1\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"50\"} 2\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"100\"} 4\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"250\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"500\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"1000\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"2500\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"5000\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"10000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"30000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"60000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"300000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"600000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"1800000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"3600000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"aaa\",le=\"+Inf\"} 7\nenvoy_cluster_upstream_rq_time_sum{cluster=\"aaa\"} 5532\nenvoy_cluster_upstream_rq_time_count{cluster=\"aaa\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"0.5\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"1\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"5\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"10\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"25\"} 1\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"50\"} 2\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"100\"} 4\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"250\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"500\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"1000\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"2500\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"5000\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"10000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"30000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"60000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"300000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"600000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"1800000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"3600000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"bbb\",le=\"+Inf\"} 7\nenvoy_cluster_upstream_rq_time_sum{cluster=\"bbb\"} 5532\nenvoy_cluster_upstream_rq_time_count{cluster=\"bbb\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"0.5\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"1\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"5\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"10\"} 0\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"25\"} 1\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"50\"} 2\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"100\"} 4\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"250\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"500\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"1000\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"2500\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"5000\"} 6\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"10000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"30000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"60000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"300000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"600000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"1800000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"3600000\"} 7\nenvoy_cluster_upstream_rq_time_bucket{cluster=\"ccc\",le=\"+Inf\"} 7\nenvoy_cluster_upstream_rq_time_sum{cluster=\"ccc\"} 5532\nenvoy_cluster_upstream_rq_time_count{cluster=\"ccc\"} 7\n\n)EOF\";\n\n  EXPECT_EQ(expected_output, response.toString());\n}\n\nTEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) {\n  addCounter(\"cluster.test_1.upstream_cx_total\",\n             {{makeStat(\"a.tag-name\"), makeStat(\"a.tag-value\")}});\n  addCounter(\"cluster.test_2.upstream_cx_total\",\n             {{makeStat(\"another_tag_name\"), makeStat(\"another_tag-value\")}});\n  addGauge(\"cluster.test_3.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_3\"), makeStat(\"another_tag_3-value\")}});\n  addGauge(\"cluster.test_4.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_4\"), makeStat(\"another_tag_4-value\")}});\n\n  const std::vector<uint64_t> h1_values = {50, 20, 30, 70, 100, 5000, 200};\n  HistogramWrapper h1_cumulative;\n  h1_cumulative.setHistogramValues(h1_values);\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram());\n\n  auto histogram1 =\n      makeHistogram(\"cluster.test_1.upstream_rq_time\", {{makeStat(\"key1\"), makeStat(\"value1\")},\n                                                        {makeStat(\"key2\"), makeStat(\"value2\")}});\n  histogram1->unit_ = Stats::Histogram::Unit::Milliseconds;\n  addHistogram(histogram1);\n  EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics));\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response,\n                                                          true, absl::nullopt);\n  EXPECT_EQ(1UL, size);\n\n  const std::string expected_output = R\"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"0.5\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"1\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"5\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"10\"} 0\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"25\"} 1\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"50\"} 2\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"100\"} 4\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"250\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"500\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"1000\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"2500\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"5000\"} 6\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"10000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"30000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"60000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"300000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"600000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"1800000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"3600000\"} 7\nenvoy_cluster_test_1_upstream_rq_time_bucket{key1=\"value1\",key2=\"value2\",le=\"+Inf\"} 7\nenvoy_cluster_test_1_upstream_rq_time_sum{key1=\"value1\",key2=\"value2\"} 5532\nenvoy_cluster_test_1_upstream_rq_time_count{key1=\"value1\",key2=\"value2\"} 7\n\n)EOF\";\n\n  EXPECT_EQ(expected_output, response.toString());\n}\n\nTEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) {\n  const std::vector<uint64_t> h1_values = {};\n  HistogramWrapper h1_cumulative;\n  h1_cumulative.setHistogramValues(h1_values);\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram());\n\n  auto histogram1 =\n      makeHistogram(\"cluster.test_1.upstream_rq_time\", {{makeStat(\"key1\"), makeStat(\"value1\")},\n                                                        {makeStat(\"key2\"), makeStat(\"value2\")}});\n  histogram1->unit_ = Stats::Histogram::Unit::Milliseconds;\n  histogram1->used_ = false;\n  addHistogram(histogram1);\n\n  {\n    const bool used_only = true;\n    EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0);\n\n    Buffer::OwnedImpl response;\n    auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_,\n                                                            response, used_only, absl::nullopt);\n    EXPECT_EQ(0UL, size);\n  }\n\n  {\n    const bool used_only = false;\n    EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics));\n\n    Buffer::OwnedImpl response;\n    auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_,\n                                                            response, used_only, absl::nullopt);\n    EXPECT_EQ(1UL, size);\n  }\n}\n\nTEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) {\n  addCounter(\"cluster.test_1.upstream_cx_total\",\n             {{makeStat(\"a.tag-name\"), makeStat(\"a.tag-value\")}});\n  addCounter(\"cluster.test_2.upstream_cx_total\",\n             {{makeStat(\"another_tag_name\"), makeStat(\"another_tag-value\")}});\n  addGauge(\"cluster.test_3.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_3\"), makeStat(\"another_tag_3-value\")}});\n  addGauge(\"cluster.test_4.upstream_cx_total\",\n           {{makeStat(\"another_tag_name_4\"), makeStat(\"another_tag_4-value\")}});\n\n  const std::vector<uint64_t> h1_values = {50, 20, 30, 70, 100, 5000, 200};\n  HistogramWrapper h1_cumulative;\n  h1_cumulative.setHistogramValues(h1_values);\n  Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram());\n\n  auto histogram1 =\n      makeHistogram(\"cluster.test_1.upstream_rq_time\", {{makeStat(\"key1\"), makeStat(\"value1\")},\n                                                        {makeStat(\"key2\"), makeStat(\"value2\")}});\n  histogram1->unit_ = Stats::Histogram::Unit::Milliseconds;\n  addHistogram(histogram1);\n\n  Buffer::OwnedImpl response;\n  auto size = PrometheusStatsFormatter::statsAsPrometheus(\n      counters_, gauges_, histograms_, response, false,\n      absl::optional<std::regex>{std::regex(\"cluster.test_1.upstream_cx_total\")});\n  EXPECT_EQ(1UL, size);\n\n  const std::string expected_output =\n      R\"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter\nenvoy_cluster_test_1_upstream_cx_total{a_tag_name=\"a.tag-value\"} 0\n\n)EOF\";\n\n  EXPECT_EQ(expected_output, response.toString());\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/runtime_handler_test.cc",
    "content": "#include \"test/server/admin/admin_instance.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminInstanceTest, Runtime) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n\n  Runtime::MockSnapshot snapshot;\n  Runtime::MockLoader loader;\n  auto layer1 = std::make_unique<NiceMock<Runtime::MockOverrideLayer>>();\n  auto layer2 = std::make_unique<NiceMock<Runtime::MockOverrideLayer>>();\n  Runtime::Snapshot::EntryMap entries2{{\"string_key\", {\"override\", {}, {}, {}, {}}},\n                                       {\"extra_key\", {\"bar\", {}, {}, {}, {}}}};\n  Runtime::Snapshot::EntryMap entries1{{\"string_key\", {\"foo\", {}, {}, {}, {}}},\n                                       {\"int_key\", {\"1\", 1, {}, {}, {}}},\n                                       {\"other_key\", {\"bar\", {}, {}, {}, {}}}};\n\n  ON_CALL(*layer1, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{\"layer1\"}));\n  ON_CALL(*layer1, values()).WillByDefault(testing::ReturnRef(entries1));\n  ON_CALL(*layer2, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{\"layer2\"}));\n  ON_CALL(*layer2, values()).WillByDefault(testing::ReturnRef(entries2));\n\n  std::vector<Runtime::Snapshot::OverrideLayerConstPtr> layers;\n  layers.push_back(std::move(layer1));\n  layers.push_back(std::move(layer2));\n  EXPECT_CALL(snapshot, getLayers()).WillRepeatedly(testing::ReturnRef(layers));\n\n  const std::string expected_json = R\"EOF({\n    \"layers\": [\n        \"layer1\",\n        \"layer2\"\n    ],\n    \"entries\": {\n        \"extra_key\": {\n            \"layer_values\": [\n                \"\",\n                \"bar\"\n            ],\n            \"final_value\": \"bar\"\n        },\n        \"int_key\": {\n            \"layer_values\": [\n                \"1\",\n                \"\"\n            ],\n            \"final_value\": \"1\"\n        },\n        \"other_key\": {\n            \"layer_values\": [\n                \"bar\",\n                \"\"\n            ],\n            \"final_value\": \"bar\"\n        },\n        \"string_key\": {\n            \"layer_values\": [\n                \"foo\",\n                \"override\"\n            ],\n            \"final_value\": \"override\"\n        }\n    }\n})EOF\";\n\n  EXPECT_CALL(loader, snapshot()).WillRepeatedly(testing::ReturnPointee(&snapshot));\n  EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader));\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/runtime\", header_map, response));\n  EXPECT_THAT(expected_json, JsonStringEq(response.toString()));\n}\n\nTEST_P(AdminInstanceTest, RuntimeModify) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n\n  Runtime::MockLoader loader;\n  EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader));\n\n  absl::node_hash_map<std::string, std::string> overrides;\n  overrides[\"foo\"] = \"bar\";\n  overrides[\"x\"] = \"42\";\n  overrides[\"nothing\"] = \"\";\n  EXPECT_CALL(loader, mergeValues(overrides)).Times(1);\n  EXPECT_EQ(Http::Code::OK,\n            postCallback(\"/runtime_modify?foo=bar&x=42&nothing=\", header_map, response));\n  EXPECT_EQ(\"OK\\n\", response.toString());\n}\n\nTEST_P(AdminInstanceTest, RuntimeModifyParamsInBody) {\n  Runtime::MockLoader loader;\n  EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader));\n\n  const std::string key = \"routing.traffic_shift.foo\";\n  const std::string value = \"numerator: 1\\ndenominator: TEN_THOUSAND\\n\";\n  const absl::node_hash_map<std::string, std::string> overrides = {{key, value}};\n  EXPECT_CALL(loader, mergeValues(overrides)).Times(1);\n\n  const std::string body = fmt::format(\"{}={}\", key, value);\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n  EXPECT_EQ(Http::Code::OK, runCallback(\"/runtime_modify\", header_map, response, \"POST\", body));\n  EXPECT_EQ(\"OK\\n\", response.toString());\n}\n\nTEST_P(AdminInstanceTest, RuntimeModifyNoArguments) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n\n  EXPECT_EQ(Http::Code::BadRequest, postCallback(\"/runtime_modify\", header_map, response));\n  EXPECT_TRUE(absl::StartsWith(response.toString(), \"usage:\"));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/server_info_handler_test.cc",
    "content": "#include \"envoy/admin/v3/memory.pb.h\"\n\n#include \"extensions/transport_sockets/tls/context_config_impl.h\"\n\n#include \"test/server/admin/admin_instance.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/test_runtime.h\"\n\nusing testing::Ge;\nusing testing::HasSubstr;\nusing testing::Property;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Server {\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n\n  // Setup a context that returns null cert details.\n  testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;\n  envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext config;\n  Extensions::TransportSockets::Tls::ClientContextConfigImpl cfg(config, factory_context);\n  Stats::IsolatedStoreImpl store;\n  Envoy::Ssl::ClientContextSharedPtr client_ctx(\n      server_.sslContextManager().createSslClientContext(store, cfg));\n\n  const std::string expected_empty_json = R\"EOF({\n \"certificates\": [\n  {\n   \"ca_cert\": [],\n   \"cert_chain\": []\n  }\n ]\n}\n)EOF\";\n\n  // Validate that cert details are null and /certs handles it correctly.\n  EXPECT_EQ(nullptr, client_ctx->getCaCertInformation());\n  EXPECT_TRUE(client_ctx->getCertChainInformation().empty());\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/certs\", header_map, response));\n  EXPECT_EQ(expected_empty_json, response.toString());\n}\n\nTEST_P(AdminInstanceTest, Memory) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl response;\n  EXPECT_EQ(Http::Code::OK, getCallback(\"/memory\", header_map, response));\n  const std::string output_json = response.toString();\n  envoy::admin::v3::Memory output_proto;\n  TestUtility::loadFromJson(output_json, output_proto);\n  EXPECT_THAT(output_proto, AllOf(Property(&envoy::admin::v3::Memory::allocated, Ge(0)),\n                                  Property(&envoy::admin::v3::Memory::heap_size, Ge(0)),\n                                  Property(&envoy::admin::v3::Memory::pageheap_unmapped, Ge(0)),\n                                  Property(&envoy::admin::v3::Memory::pageheap_free, Ge(0)),\n                                  Property(&envoy::admin::v3::Memory::total_thread_cache, Ge(0))));\n}\n\nTEST_P(AdminInstanceTest, GetReadyRequest) {\n  NiceMock<Init::MockManager> initManager;\n  ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager));\n\n  {\n    Http::TestResponseHeaderMapImpl response_headers;\n    std::string body;\n\n    ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized));\n    EXPECT_EQ(Http::Code::OK, admin_.request(\"/ready\", \"GET\", response_headers, body));\n    EXPECT_EQ(body, \"LIVE\\n\");\n    EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"text/plain\"));\n  }\n\n  {\n    Http::TestResponseHeaderMapImpl response_headers;\n    std::string body;\n\n    ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized));\n    EXPECT_EQ(Http::Code::ServiceUnavailable,\n              admin_.request(\"/ready\", \"GET\", response_headers, body));\n    EXPECT_EQ(body, \"PRE_INITIALIZING\\n\");\n    EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"text/plain\"));\n  }\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  std::string body;\n\n  ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing));\n  EXPECT_EQ(Http::Code::ServiceUnavailable,\n            admin_.request(\"/ready\", \"GET\", response_headers, body));\n  EXPECT_EQ(body, \"INITIALIZING\\n\");\n  EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"text/plain\"));\n}\n\nTEST_P(AdminInstanceTest, GetRequest) {\n  NiceMock<LocalInfo::MockLocalInfo> local_info;\n  EXPECT_CALL(server_, localInfo()).WillRepeatedly(ReturnRef(local_info));\n  EXPECT_CALL(server_.options_, toCommandLineOptions()).WillRepeatedly(Invoke([&local_info] {\n    Server::CommandLineOptionsPtr command_line_options =\n        std::make_unique<envoy::admin::v3::CommandLineOptions>();\n    command_line_options->set_restart_epoch(2);\n    command_line_options->set_service_cluster(local_info.clusterName());\n    return command_line_options;\n  }));\n  NiceMock<Init::MockManager> initManager;\n  ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager));\n  ON_CALL(server_.hot_restart_, version()).WillByDefault(Return(\"foo_version\"));\n\n  {\n    Http::TestResponseHeaderMapImpl response_headers;\n    std::string body;\n\n    ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized));\n    EXPECT_EQ(Http::Code::OK, admin_.request(\"/server_info\", \"GET\", response_headers, body));\n    envoy::admin::v3::ServerInfo server_info_proto;\n    EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"application/json\"));\n\n    // We only test that it parses as the proto and that some fields are correct, since\n    // values such as timestamps + Envoy version are tricky to test for.\n    TestUtility::loadFromJson(body, server_info_proto);\n    EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::LIVE);\n    EXPECT_EQ(server_info_proto.hot_restart_version(), \"foo_version\");\n    EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2);\n    EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), local_info.clusterName());\n    EXPECT_EQ(server_info_proto.command_line_options().service_cluster(),\n              server_info_proto.node().cluster());\n    EXPECT_EQ(server_info_proto.command_line_options().service_node(), \"\");\n    EXPECT_EQ(server_info_proto.command_line_options().service_zone(), \"\");\n    EXPECT_EQ(server_info_proto.node().id(), local_info.nodeName());\n    EXPECT_EQ(server_info_proto.node().locality().zone(), local_info.zoneName());\n  }\n\n  {\n    Http::TestResponseHeaderMapImpl response_headers;\n    std::string body;\n\n    ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized));\n    EXPECT_EQ(Http::Code::OK, admin_.request(\"/server_info\", \"GET\", response_headers, body));\n    envoy::admin::v3::ServerInfo server_info_proto;\n    EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"application/json\"));\n\n    // We only test that it parses as the proto and that some fields are correct, since\n    // values such as timestamps + Envoy version are tricky to test for.\n    TestUtility::loadFromJson(body, server_info_proto);\n    EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::PRE_INITIALIZING);\n    EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2);\n    EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), local_info.clusterName());\n    EXPECT_EQ(server_info_proto.command_line_options().service_cluster(),\n              server_info_proto.node().cluster());\n    EXPECT_EQ(server_info_proto.command_line_options().service_node(), \"\");\n    EXPECT_EQ(server_info_proto.command_line_options().service_zone(), \"\");\n    EXPECT_EQ(server_info_proto.node().id(), local_info.nodeName());\n    EXPECT_EQ(server_info_proto.node().locality().zone(), local_info.zoneName());\n  }\n\n  Http::TestResponseHeaderMapImpl response_headers;\n  std::string body;\n\n  ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing));\n  EXPECT_EQ(Http::Code::OK, admin_.request(\"/server_info\", \"GET\", response_headers, body));\n  envoy::admin::v3::ServerInfo server_info_proto;\n  EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"application/json\"));\n\n  // We only test that it parses as the proto and that some fields are correct, since\n  // values such as timestamps + Envoy version are tricky to test for.\n  TestUtility::loadFromJson(body, server_info_proto);\n  EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::INITIALIZING);\n  EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2);\n  EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), local_info.clusterName());\n  EXPECT_EQ(server_info_proto.command_line_options().service_cluster(),\n            server_info_proto.node().cluster());\n  EXPECT_EQ(server_info_proto.command_line_options().service_node(), \"\");\n  EXPECT_EQ(server_info_proto.command_line_options().service_zone(), \"\");\n  EXPECT_EQ(server_info_proto.node().id(), local_info.nodeName());\n  EXPECT_EQ(server_info_proto.node().locality().zone(), local_info.zoneName());\n}\n\nTEST_P(AdminInstanceTest, PostRequest) {\n  // Load TestScopedRuntime to suppress warnings related to runtime features.\n  TestScopedRuntime scoped_runtime;\n  Http::TestResponseHeaderMapImpl response_headers;\n  std::string body;\n  EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK,\n                           admin_.request(\"/healthcheck/fail\", \"POST\", response_headers, body)));\n  EXPECT_EQ(body, \"OK\\n\");\n  EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"text/plain\"));\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/admin/stats_handler_test.cc",
    "content": "#include <regex>\n\n#include \"common/stats/thread_local_store.h\"\n\n#include \"server/admin/stats_handler.h\"\n\n#include \"test/server/admin/admin_instance.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/utility.h\"\n\nusing testing::EndsWith;\nusing testing::HasSubstr;\nusing testing::InSequence;\nusing testing::Ref;\nusing testing::StartsWith;\n\nnamespace Envoy {\nnamespace Server {\n\nclass AdminStatsTest : public testing::TestWithParam<Network::Address::IpVersion> {\npublic:\n  AdminStatsTest() : alloc_(symbol_table_) {\n    store_ = std::make_unique<Stats::ThreadLocalStoreImpl>(alloc_);\n    store_->addSink(sink_);\n  }\n\n  static std::string\n  statsAsJsonHandler(std::map<std::string, uint64_t>& all_stats,\n                     std::map<std::string, std::string>& all_text_readouts,\n                     const std::vector<Stats::ParentHistogramSharedPtr>& all_histograms,\n                     const bool used_only, const absl::optional<std::regex> regex = absl::nullopt) {\n    return StatsHandler::statsAsJson(all_stats, all_text_readouts, all_histograms, used_only, regex,\n                                     true /*pretty_print*/);\n  }\n\n  Stats::SymbolTableImpl symbol_table_;\n  NiceMock<Event::MockDispatcher> main_thread_dispatcher_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Stats::AllocatorImpl alloc_;\n  Stats::MockSink sink_;\n  Stats::ThreadLocalStoreImplPtr store_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminStatsTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminStatsTest, StatsAsJson) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  Stats::Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Stats::Histogram& h2 = store_->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200));\n  h1.recordValue(200);\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h2), 100));\n  h2.recordValue(100);\n\n  store_->mergeHistograms([]() -> void {});\n\n  // Again record a new value in h1 so that it has both interval and cumulative values.\n  // h2 should only have cumulative values.\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100));\n  h1.recordValue(100);\n\n  store_->mergeHistograms([]() -> void {});\n\n  std::vector<Stats::ParentHistogramSharedPtr> histograms = store_->histograms();\n  std::sort(histograms.begin(), histograms.end(),\n            [](const Stats::ParentHistogramSharedPtr& a,\n               const Stats::ParentHistogramSharedPtr& b) -> bool { return a->name() < b->name(); });\n  std::map<std::string, uint64_t> all_stats;\n  std::map<std::string, std::string> all_text_readouts;\n  std::string actual_json = statsAsJsonHandler(all_stats, all_text_readouts, histograms, false);\n\n  const std::string expected_json = R\"EOF({\n    \"stats\": [\n        {\n            \"histograms\": {\n                \"supported_quantiles\": [\n                    0.0,\n                    25.0,\n                    50.0,\n                    75.0,\n                    90.0,\n                    95.0,\n                    99.0,\n                    99.5,\n                    99.9,\n                    100.0\n                ],\n                \"computed_quantiles\": [\n                    {\n                        \"name\": \"h1\",\n                        \"values\": [\n                            {\n                                \"interval\": 100.0,\n                                \"cumulative\": 100.0\n                            },\n                            {\n                                \"interval\": 102.5,\n                                \"cumulative\": 105.0\n                            },\n                            {\n                                \"interval\": 105.0,\n                                \"cumulative\": 110.0\n                            },\n                            {\n                                \"interval\": 107.5,\n                                \"cumulative\": 205.0\n                            },\n                            {\n                                \"interval\": 109.0,\n                                \"cumulative\": 208.0\n                            },\n                            {\n                                \"interval\": 109.5,\n                                \"cumulative\": 209.0\n                            },\n                            {\n                                \"interval\": 109.9,\n                                \"cumulative\": 209.8\n                            },\n                            {\n                                \"interval\": 109.95,\n                                \"cumulative\": 209.9\n                            },\n                            {\n                                \"interval\": 109.99,\n                                \"cumulative\": 209.98\n                            },\n                            {\n                                \"interval\": 110.0,\n                                \"cumulative\": 210.0\n                            }\n                        ]\n                    },\n                    {\n                        \"name\": \"h2\",\n                        \"values\": [\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 100.0\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 102.5\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 105.0\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 107.5\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 109.0\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 109.5\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 109.9\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 109.95\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 109.99\n                            },\n                            {\n                                \"interval\": null,\n                                \"cumulative\": 110.0\n                            }\n                        ]\n                    }\n                ]\n            }\n        }\n    ]\n})EOF\";\n\n  EXPECT_THAT(expected_json, JsonStringEq(actual_json));\n  store_->shutdownThreading();\n}\n\nTEST_P(AdminStatsTest, UsedOnlyStatsAsJson) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  Stats::Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Stats::Histogram& h2 = store_->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n\n  EXPECT_EQ(\"h1\", h1.name());\n  EXPECT_EQ(\"h2\", h2.name());\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200));\n  h1.recordValue(200);\n\n  store_->mergeHistograms([]() -> void {});\n\n  // Again record a new value in h1 so that it has both interval and cumulative values.\n  // h2 should only have cumulative values.\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100));\n  h1.recordValue(100);\n\n  store_->mergeHistograms([]() -> void {});\n\n  std::map<std::string, uint64_t> all_stats;\n  std::map<std::string, std::string> all_text_readouts;\n  std::string actual_json =\n      statsAsJsonHandler(all_stats, all_text_readouts, store_->histograms(), true);\n\n  // Expected JSON should not have h2 values as it is not used.\n  const std::string expected_json = R\"EOF({\n    \"stats\": [\n        {\n            \"histograms\": {\n                \"supported_quantiles\": [\n                    0.0,\n                    25.0,\n                    50.0,\n                    75.0,\n                    90.0,\n                    95.0,\n                    99.0,\n                    99.5,\n                    99.9,\n                    100.0\n                ],\n                \"computed_quantiles\": [\n                    {\n                        \"name\": \"h1\",\n                        \"values\": [\n                            {\n                                \"interval\": 100.0,\n                                \"cumulative\": 100.0\n                            },\n                            {\n                                \"interval\": 102.5,\n                                \"cumulative\": 105.0\n                            },\n                            {\n                                \"interval\": 105.0,\n                                \"cumulative\": 110.0\n                            },\n                            {\n                                \"interval\": 107.5,\n                                \"cumulative\": 205.0\n                            },\n                            {\n                                \"interval\": 109.0,\n                                \"cumulative\": 208.0\n                            },\n                            {\n                                \"interval\": 109.5,\n                                \"cumulative\": 209.0\n                            },\n                            {\n                                \"interval\": 109.9,\n                                \"cumulative\": 209.8\n                            },\n                            {\n                                \"interval\": 109.95,\n                                \"cumulative\": 209.9\n                            },\n                            {\n                                \"interval\": 109.99,\n                                \"cumulative\": 209.98\n                            },\n                            {\n                                \"interval\": 110.0,\n                                \"cumulative\": 210.0\n                            }\n                        ]\n                    }\n                ]\n            }\n        }\n    ]\n})EOF\";\n\n  EXPECT_THAT(expected_json, JsonStringEq(actual_json));\n  store_->shutdownThreading();\n}\n\nTEST_P(AdminStatsTest, StatsAsJsonFilterString) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  Stats::Histogram& h1 = store_->histogramFromString(\"h1\", Stats::Histogram::Unit::Unspecified);\n  Stats::Histogram& h2 = store_->histogramFromString(\"h2\", Stats::Histogram::Unit::Unspecified);\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200));\n  h1.recordValue(200);\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h2), 100));\n  h2.recordValue(100);\n\n  store_->mergeHistograms([]() -> void {});\n\n  // Again record a new value in h1 so that it has both interval and cumulative values.\n  // h2 should only have cumulative values.\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100));\n  h1.recordValue(100);\n\n  store_->mergeHistograms([]() -> void {});\n\n  std::map<std::string, uint64_t> all_stats;\n  std::map<std::string, std::string> all_text_readouts;\n  std::string actual_json =\n      statsAsJsonHandler(all_stats, all_text_readouts, store_->histograms(), false,\n                         absl::optional<std::regex>{std::regex(\"[a-z]1\")});\n\n  // Because this is a filter case, we don't expect to see any stats except for those containing\n  // \"h1\" in their name.\n  const std::string expected_json = R\"EOF({\n    \"stats\": [\n        {\n            \"histograms\": {\n                \"supported_quantiles\": [\n                    0.0,\n                    25.0,\n                    50.0,\n                    75.0,\n                    90.0,\n                    95.0,\n                    99.0,\n                    99.5,\n                    99.9,\n                    100.0\n                ],\n                \"computed_quantiles\": [\n                    {\n                        \"name\": \"h1\",\n                        \"values\": [\n                            {\n                                \"interval\": 100.0,\n                                \"cumulative\": 100.0\n                            },\n                            {\n                                \"interval\": 102.5,\n                                \"cumulative\": 105.0\n                            },\n                            {\n                                \"interval\": 105.0,\n                                \"cumulative\": 110.0\n                            },\n                            {\n                                \"interval\": 107.5,\n                                \"cumulative\": 205.0\n                            },\n                            {\n                                \"interval\": 109.0,\n                                \"cumulative\": 208.0\n                            },\n                            {\n                                \"interval\": 109.5,\n                                \"cumulative\": 209.0\n                            },\n                            {\n                                \"interval\": 109.9,\n                                \"cumulative\": 209.8\n                            },\n                            {\n                                \"interval\": 109.95,\n                                \"cumulative\": 209.9\n                            },\n                            {\n                                \"interval\": 109.99,\n                                \"cumulative\": 209.98\n                            },\n                            {\n                                \"interval\": 110.0,\n                                \"cumulative\": 210.0\n                            }\n                        ]\n                    }\n                ]\n            }\n        }\n    ]\n})EOF\";\n\n  EXPECT_THAT(expected_json, JsonStringEq(actual_json));\n  store_->shutdownThreading();\n}\n\nTEST_P(AdminStatsTest, UsedOnlyStatsAsJsonFilterString) {\n  InSequence s;\n  store_->initializeThreading(main_thread_dispatcher_, tls_);\n\n  Stats::Histogram& h1 = store_->histogramFromString(\n      \"h1_matches\", Stats::Histogram::Unit::Unspecified); // Will match, be used, and print\n  Stats::Histogram& h2 = store_->histogramFromString(\n      \"h2_matches\", Stats::Histogram::Unit::Unspecified); // Will match but not be used\n  Stats::Histogram& h3 = store_->histogramFromString(\n      \"h3_not\", Stats::Histogram::Unit::Unspecified); // Will be used but not match\n\n  EXPECT_EQ(\"h1_matches\", h1.name());\n  EXPECT_EQ(\"h2_matches\", h2.name());\n  EXPECT_EQ(\"h3_not\", h3.name());\n\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200));\n  h1.recordValue(200);\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h3), 200));\n  h3.recordValue(200);\n\n  store_->mergeHistograms([]() -> void {});\n\n  // Again record a new value in h1 and h3 so that they have both interval and cumulative values.\n  // h2 should only have cumulative values.\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100));\n  h1.recordValue(100);\n  EXPECT_CALL(sink_, onHistogramComplete(Ref(h3), 100));\n  h3.recordValue(100);\n\n  store_->mergeHistograms([]() -> void {});\n\n  std::map<std::string, uint64_t> all_stats;\n  std::map<std::string, std::string> all_text_readouts;\n  std::string actual_json =\n      statsAsJsonHandler(all_stats, all_text_readouts, store_->histograms(), true,\n                         absl::optional<std::regex>{std::regex(\"h[12]\")});\n\n  // Expected JSON should not have h2 values as it is not used, and should not have h3 values as\n  // they are used but do not match.\n  const std::string expected_json = R\"EOF({\n    \"stats\": [\n        {\n            \"histograms\": {\n                \"supported_quantiles\": [\n                    0.0,\n                    25.0,\n                    50.0,\n                    75.0,\n                    90.0,\n                    95.0,\n                    99.0,\n                    99.5,\n                    99.9,\n                    100.0\n                ],\n                \"computed_quantiles\": [\n                    {\n                        \"name\": \"h1_matches\",\n                        \"values\": [\n                            {\n                                \"interval\": 100.0,\n                                \"cumulative\": 100.0\n                            },\n                            {\n                                \"interval\": 102.5,\n                                \"cumulative\": 105.0\n                            },\n                            {\n                                \"interval\": 105.0,\n                                \"cumulative\": 110.0\n                            },\n                            {\n                                \"interval\": 107.5,\n                                \"cumulative\": 205.0\n                            },\n                            {\n                                \"interval\": 109.0,\n                                \"cumulative\": 208.0\n                            },\n                            {\n                                \"interval\": 109.5,\n                                \"cumulative\": 209.0\n                            },\n                            {\n                                \"interval\": 109.9,\n                                \"cumulative\": 209.8\n                            },\n                            {\n                                \"interval\": 109.95,\n                                \"cumulative\": 209.9\n                            },\n                            {\n                                \"interval\": 109.99,\n                                \"cumulative\": 209.98\n                            },\n                            {\n                                \"interval\": 110.0,\n                                \"cumulative\": 210.0\n                            }\n                        ]\n                    }\n                ]\n            }\n        }\n    ]\n})EOF\";\n\n  EXPECT_THAT(expected_json, JsonStringEq(actual_json));\n  store_->shutdownThreading();\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\nTEST_P(AdminInstanceTest, StatsInvalidRegex) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl data;\n  EXPECT_LOG_CONTAINS(\n      \"error\", \"Invalid regex: \",\n      EXPECT_EQ(Http::Code::BadRequest, getCallback(\"/stats?filter=*.test\", header_map, data)));\n\n  // Note: depending on the library, the detailed error message might be one of:\n  //   \"One of *?+{ was not preceded by a valid regular expression.\"\n  //   \"regex_error\"\n  // but we always precede by 'Invalid regex: \"'.\n  EXPECT_THAT(data.toString(), StartsWith(\"Invalid regex: \\\"\"));\n  EXPECT_THAT(data.toString(), EndsWith(\"\\\"\\n\"));\n}\n\nTEST_P(AdminInstanceTest, PrometheusStatsInvalidRegex) {\n  Http::TestResponseHeaderMapImpl header_map;\n  Buffer::OwnedImpl data;\n  EXPECT_LOG_CONTAINS(\n      \"error\", \": *.ptest\",\n      EXPECT_EQ(Http::Code::BadRequest,\n                getCallback(\"/stats?format=prometheus&filter=*.ptest\", header_map, data)));\n\n  // Note: depending on the library, the detailed error message might be one of:\n  //   \"One of *?+{ was not preceded by a valid regular expression.\"\n  //   \"regex_error\"\n  // but we always precede by 'Invalid regex: \"'.\n  EXPECT_THAT(data.toString(), StartsWith(\"Invalid regex: \\\"\"));\n  EXPECT_THAT(data.toString(), EndsWith(\"\\\"\\n\"));\n}\n\nTEST_P(AdminInstanceTest, TracingStatsDisabled) {\n  const std::string& name = admin_.tracingStats().service_forced_.name();\n  for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) {\n    EXPECT_NE(counter->name(), name) << \"Unexpected tracing stat found in server stats: \" << name;\n  }\n}\n\nTEST_P(AdminInstanceTest, GetRequestJson) {\n  Http::TestResponseHeaderMapImpl response_headers;\n  std::string body;\n  EXPECT_EQ(Http::Code::OK, admin_.request(\"/stats?format=json\", \"GET\", response_headers, body));\n  EXPECT_THAT(body, HasSubstr(\"{\\\"stats\\\":[\"));\n  EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"application/json\"));\n}\n\nTEST_P(AdminInstanceTest, RecentLookups) {\n  Http::TestResponseHeaderMapImpl response_headers;\n  std::string body;\n\n  // Recent lookup tracking is disabled by default.\n  EXPECT_EQ(Http::Code::OK, admin_.request(\"/stats/recentlookups\", \"GET\", response_headers, body));\n  EXPECT_THAT(body, HasSubstr(\"Lookup tracking is not enabled\"));\n  EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr(\"text/plain\"));\n\n  // We can't test RecentLookups in admin unit tests as it doesn't work with a\n  // fake symbol table. However we cover this solidly in integration tests.\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/api_listener_test.cc",
    "content": "#include <string>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n\n#include \"server/api_listener_impl.h\"\n#include \"server/listener_manager_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/listener_component_factory.h\"\n#include \"test/mocks/server/worker.h\"\n#include \"test/mocks/server/worker_factory.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nclass ApiListenerTest : public testing::Test {\nprotected:\n  ApiListenerTest()\n      : listener_manager_(std::make_unique<ListenerManagerImpl>(server_, listener_factory_,\n                                                                worker_factory_, false)) {}\n\n  NiceMock<MockInstance> server_;\n  NiceMock<MockListenerComponentFactory> listener_factory_;\n  NiceMock<MockWorkerFactory> worker_factory_;\n  std::unique_ptr<ListenerManagerImpl> listener_manager_;\n};\n\nTEST_F(ApiListenerTest, HttpApiListener) {\n  const std::string yaml = R\"EOF(\nname: test_api_listener\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n    stat_prefix: hcm\n    route_config:\n      name: api_router\n      virtual_hosts:\n        - name: api\n          domains:\n            - \"*\"\n          routes:\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: dynamic_forward_proxy_cluster\n  )EOF\";\n\n  const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml);\n\n  auto http_api_listener = HttpApiListener(config, *listener_manager_, config.name());\n\n  ASSERT_EQ(\"test_api_listener\", http_api_listener.name());\n  ASSERT_EQ(ApiListener::Type::HttpApiListener, http_api_listener.type());\n  ASSERT_TRUE(http_api_listener.http().has_value());\n}\n\nTEST_F(ApiListenerTest, HttpApiListenerThrowsWithBadConfig) {\n  const std::string yaml = R\"EOF(\nname: test_api_listener\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.api.v2.Cluster\n    name: cluster1\n    type: EDS\n    eds_cluster_config:\n      eds_config:\n        path: eds path\n  )EOF\";\n\n  const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml);\n\n  EXPECT_THROW_WITH_MESSAGE(\n      HttpApiListener(config, *listener_manager_, config.name()), EnvoyException,\n      \"Unable to unpack as \"\n      \"envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager: \"\n      \"[type.googleapis.com/envoy.api.v2.Cluster] {\\n  name: \\\"cluster1\\\"\\n  type: EDS\\n  \"\n      \"eds_cluster_config {\\n    eds_config {\\n      path: \\\"eds path\\\"\\n    }\\n  }\\n}\\n\");\n}\n\nTEST_F(ApiListenerTest, HttpApiListenerShutdown) {\n  const std::string yaml = R\"EOF(\nname: test_api_listener\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n    stat_prefix: hcm\n    route_config:\n      name: api_router\n      virtual_hosts:\n        - name: api\n          domains:\n            - \"*\"\n          routes:\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: dynamic_forward_proxy_cluster\n  )EOF\";\n\n  const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml);\n\n  auto http_api_listener = HttpApiListener(config, *listener_manager_, config.name());\n\n  ASSERT_EQ(\"test_api_listener\", http_api_listener.name());\n  ASSERT_EQ(ApiListener::Type::HttpApiListener, http_api_listener.type());\n  ASSERT_TRUE(http_api_listener.http().has_value());\n\n  Network::MockConnectionCallbacks network_connection_callbacks;\n  // TODO(junr03): potentially figure out a way of unit testing this behavior without exposing a\n  // ForTest function.\n  http_api_listener.readCallbacksForTest().connection().addConnectionCallbacks(\n      network_connection_callbacks);\n\n  EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose));\n  // Shutting down the ApiListener should raise an event on all connection callback targets.\n  http_api_listener.shutdown();\n}\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/backtrace_test.cc",
    "content": "#include \"server/backtrace.h\"\n\n#include \"test/test_common/logging.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nTEST(Backward, Basic) {\n  // There isn't much to test here and this feature is really just useful for\n  // debugging. This test simply verifies that we do not cause a crash when\n  // logging a backtrace, and covers the added lines.\n  const bool save_log_to_stderr = BackwardsTrace::logToStderr();\n  BackwardsTrace::setLogToStderr(false);\n  BackwardsTrace tracer;\n  tracer.capture();\n  EXPECT_LOG_CONTAINS(\"critical\", \"Envoy version:\", tracer.logTrace());\n  BackwardsTrace::setLogToStderr(save_log_to_stderr);\n}\n\nTEST(Backward, InvalidUsageTest) {\n  // Ensure we do not crash if logging is attempted when there was no trace captured\n  BackwardsTrace tracer;\n  tracer.logTrace();\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/BUILD",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_cc_fuzz_test\", \"envoy_cc_test\", \"envoy_cc_test_library\", \"envoy_package\", \"envoy_proto_library\")\nload(\"//source/extensions:all_extensions.bzl\", \"envoy_all_extensions\")\nload(\"//bazel:repositories.bzl\", \"PPC_SKIP_TARGETS\", \"WINDOWS_SKIP_TARGETS\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"async_client_test\",\n    srcs = [\"async_client_test.cc\"],\n    deps = [\n        \"//include/envoy/http:message_interface\",\n        \"//source/common/http:message_lib\",\n        \"//source/server/config_validation:async_client_lib\",\n        \"//source/server/config_validation:dns_lib\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"cluster_manager_test\",\n    srcs = [\"cluster_manager_test.cc\"],\n    deps = [\n        \"//include/envoy/upstream:resource_manager_interface\",\n        \"//include/envoy/upstream:upstream_interface\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/singleton:manager_impl_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/extensions/transport_sockets/tls:context_lib\",\n        \"//source/server/config_validation:cluster_manager_lib\",\n        \"//source/server/config_validation:dns_lib\",\n        \"//test/mocks/access_log:access_log_mocks\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/secret:secret_mocks\",\n        \"//test/mocks/server:admin_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/test_common:simulated_time_system_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nfilegroup(\n    name = \"server_test_data\",\n    srcs = glob([\"test_data/**\"]),\n)\n\nenvoy_cc_test(\n    name = \"server_test\",\n    srcs = [\"server_test.cc\"],\n    data = [\n        \":server_test_data\",\n        \"//configs:example_configs\",\n        \"//test/config_test:example_configs_test_setup.sh\",\n    ],\n    deps = [\n        \"//source/extensions/filters/http/router:config\",\n        \"//source/extensions/filters/network/http_connection_manager:config\",\n        \"//source/extensions/transport_sockets/tls:config\",\n        \"//source/server/config_validation:server_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/server:options_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:registry_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"dispatcher_test\",\n    srcs = [\"dispatcher_test.cc\"],\n    deps = [\n        \"//source/common/event:libevent_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//source/server/config_validation:api_lib\",\n        \"//source/server/config_validation:dns_lib\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:network_utility_lib\",\n        \"//test/test_common:test_time_lib\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"config_fuzz_test\",\n    srcs = [\"config_fuzz_test.cc\"],\n    corpus = \"//test/server:server_fuzz_test_corpus\",\n    deps = [\n        \"//source/common/common:thread_lib\",\n        \"//source/server/config_validation:server_lib\",\n        \"//test/integration:integration_lib\",\n        \"//test/mocks/server:options_mocks\",\n        \"//test/test_common:environment_lib\",\n    ] + select({\n        \"//bazel:windows_x86_64\": envoy_all_extensions(WINDOWS_SKIP_TARGETS),\n        \"//bazel:linux_ppc\": envoy_all_extensions(PPC_SKIP_TARGETS),\n        \"//conditions:default\": envoy_all_extensions(),\n    }),\n)\n\nenvoy_proto_library(\n    name = \"xds_fuzz_proto\",\n    srcs = [\"xds_fuzz.proto\"],\n)\n\nenvoy_cc_test_library(\n    name = \"xds_verifier_lib\",\n    srcs = [\"xds_verifier.cc\"],\n    hdrs = [\"xds_verifier.h\"],\n    deps = [\n        \":xds_fuzz_proto_cc_proto\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"xds_verifier_test\",\n    srcs = [\"xds_verifier_test.cc\"],\n    deps = [\n        \":xds_verifier_lib\",\n        \"//test/config:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"xds_fuzz_lib\",\n    srcs = [\"xds_fuzz.cc\"],\n    hdrs = [\"xds_fuzz.h\"],\n    deps = [\n        \":xds_fuzz_proto_cc_proto\",\n        \":xds_verifier_lib\",\n        \"//test/fuzz:utility_lib\",\n        \"//test/integration:http_integration_lib\",\n        \"@envoy_api//envoy/admin/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_fuzz_test(\n    name = \"xds_fuzz_test\",\n    srcs = [\"xds_fuzz_test.cc\"],\n    corpus = \"xds_corpus\",\n    deps = [\n        \":xds_fuzz_lib\",\n        \"//source/common/protobuf:utility_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/server/config_validation/async_client_test.cc",
    "content": "#include \"envoy/http/message.h\"\n\n#include \"common/http/message_impl.h\"\n\n#include \"server/config_validation/async_client.h\"\n\n#include \"test/mocks/http/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n\nnamespace Envoy {\nnamespace Http {\nnamespace {\n\nTEST(ValidationAsyncClientTest, MockedMethods) {\n  RequestMessagePtr message{new RequestMessageImpl()};\n  MockAsyncClientCallbacks callbacks;\n  MockAsyncClientStreamCallbacks stream_callbacks;\n\n  Event::SimulatedTimeSystem time_system;\n  Api::ApiPtr api = Api::createApiForTest(time_system);\n  ValidationAsyncClient client(*api, time_system);\n  EXPECT_EQ(nullptr, client.send(std::move(message), callbacks, AsyncClient::RequestOptions()));\n  EXPECT_EQ(nullptr, client.start(stream_callbacks, AsyncClient::StreamOptions()));\n}\n\n} // namespace\n} // namespace Http\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/cluster_manager_test.cc",
    "content": "#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/upstream/resource_manager.h\"\n#include \"envoy/upstream/upstream.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/grpc/context_impl.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/singleton/manager_impl.h\"\n\n#include \"server/config_validation/cluster_manager.h\"\n\n#include \"extensions/transport_sockets/tls/context_manager_impl.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/secret/mocks.h\"\n#include \"test/mocks/server/admin.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Upstream {\nnamespace {\n\nTEST(ValidationClusterManagerTest, MockedMethods) {\n  Stats::IsolatedStoreImpl stats_store;\n  Event::SimulatedTimeSystem time_system;\n  NiceMock<ProtobufMessage::MockValidationContext> validation_context;\n  Api::ApiPtr api(Api::createApiForTest(stats_store, time_system));\n  NiceMock<Runtime::MockLoader> runtime;\n  NiceMock<ThreadLocal::MockInstance> tls;\n  NiceMock<Random::MockRandomGenerator> random;\n  testing::NiceMock<Secret::MockSecretManager> secret_manager;\n  auto dns_resolver = std::make_shared<NiceMock<Network::MockDnsResolver>>();\n  Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager{api->timeSource()};\n  NiceMock<Event::MockDispatcher> dispatcher;\n  LocalInfo::MockLocalInfo local_info;\n  NiceMock<Server::MockAdmin> admin;\n  Http::ContextImpl http_context(stats_store.symbolTable());\n  Grpc::ContextImpl grpc_context(stats_store.symbolTable());\n  AccessLog::MockAccessLogManager log_manager;\n  Singleton::ManagerImpl singleton_manager{Thread::threadFactoryForTest()};\n\n  ValidationClusterManagerFactory factory(\n      admin, runtime, stats_store, tls, dns_resolver, ssl_context_manager, dispatcher, local_info,\n      secret_manager, validation_context, *api, http_context, grpc_context, log_manager,\n      singleton_manager, time_system);\n\n  const envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  ClusterManagerPtr cluster_manager = factory.clusterManagerFromProto(bootstrap);\n  EXPECT_EQ(nullptr, cluster_manager->httpConnPoolForCluster(\"cluster\", ResourcePriority::Default,\n                                                             Http::Protocol::Http11, nullptr));\n  Host::CreateConnectionData data = cluster_manager->tcpConnForCluster(\"cluster\", nullptr);\n  EXPECT_EQ(nullptr, data.connection_);\n  EXPECT_EQ(nullptr, data.host_description_);\n\n  Http::AsyncClient& client = cluster_manager->httpAsyncClientForCluster(\"cluster\");\n  Http::MockAsyncClientStreamCallbacks stream_callbacks;\n  EXPECT_EQ(nullptr, client.start(stream_callbacks, Http::AsyncClient::StreamOptions()));\n}\n\n} // namespace\n} // namespace Upstream\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/config_fuzz_test.cc",
    "content": "#include <fstream>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/common/thread.h\"\n#include \"common/network/address_impl.h\"\n\n#include \"server/config_validation/server.h\"\n\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/integration/server.h\"\n#include \"test/mocks/server/options.h\"\n#include \"test/test_common/environment.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\n// Derived from //test/server:server_fuzz_test.cc, but starts the server in configuration validation\n// mode (quits upon validation of the given config)\nDEFINE_PROTO_FUZZER(const envoy::config::bootstrap::v3::Bootstrap& input) {\n  envoy::config::bootstrap::v3::Bootstrap sanitizedInput(input);\n  // TODO(asraa): QUIC is not enabled in production code yet, so remove references for HTTP3.\n  // Tracked at https://github.com/envoyproxy/envoy/issues/9513.\n  for (auto& cluster : *sanitizedInput.mutable_static_resources()->mutable_clusters()) {\n    for (auto& health_check : *cluster.mutable_health_checks()) {\n      if (health_check.http_health_check().codec_client_type() ==\n          envoy::type::v3::CodecClientType::HTTP3) {\n        health_check.mutable_http_health_check()->clear_codec_client_type();\n      }\n    }\n  }\n  testing::NiceMock<MockOptions> options;\n  TestComponentFactory component_factory;\n  Fuzz::PerTestEnvironment test_env;\n\n  const std::string bootstrap_path = test_env.temporaryPath(\"bootstrap.pb_text\");\n  std::ofstream bootstrap_file(bootstrap_path);\n  bootstrap_file << sanitizedInput.DebugString();\n  options.config_path_ = bootstrap_path;\n  options.log_level_ = Fuzz::Runner::logLevel();\n\n  try {\n    validateConfig(options, Network::Address::InstanceConstSharedPtr(), component_factory,\n                   Thread::threadFactoryForTest(), Filesystem::fileSystemForTest());\n  } catch (const EnvoyException& ex) {\n    ENVOY_LOG_MISC(debug, \"Controlled EnvoyException exit: {}\", ex.what());\n  }\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/dispatcher_test.cc",
    "content": "#include <chrono>\n\n#include \"common/common/thread.h\"\n#include \"common/event/dispatcher_impl.h\"\n#include \"common/event/libevent.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"server/config_validation/api.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\n\n// Define fixture which allocates ValidationDispatcher.\nclass ConfigValidation : public testing::TestWithParam<Network::Address::IpVersion> {\npublic:\n  ConfigValidation() {\n    validation_ = std::make_unique<Api::ValidationImpl>(\n        Thread::threadFactoryForTest(), stats_store_, test_time_.timeSystem(),\n        Filesystem::fileSystemForTest(), random_generator_);\n    dispatcher_ = validation_->allocateDispatcher(\"test_thread\");\n  }\n\n  DangerousDeprecatedTestTime test_time_;\n  Event::DispatcherPtr dispatcher_;\n  Stats::IsolatedStoreImpl stats_store_;\n  testing::NiceMock<Random::MockRandomGenerator> random_generator_;\n\nprivate:\n  // Using config validation API.\n  std::unique_ptr<Api::ValidationImpl> validation_;\n};\n\n// Simple test which creates a connection to fake upstream client. This is to test if\n// ValidationDispatcher can call createClientConnection without crashing.\nTEST_P(ConfigValidation, CreateConnection) {\n  Network::Address::InstanceConstSharedPtr address(\n      Network::Test::getCanonicalLoopbackAddress(GetParam()));\n  dispatcher_->createClientConnection(address, address, Network::Test::createRawBufferSocket(),\n                                      nullptr);\n  SUCCEED();\n}\n\n// Make sure that creating DnsResolver does not cause crash and each call to create\n// DNS resolver returns the same shared_ptr.\nTEST_F(ConfigValidation, SharedDnsResolver) {\n  std::vector<Network::Address::InstanceConstSharedPtr> resolvers;\n\n  Network::DnsResolverSharedPtr dns1 = dispatcher_->createDnsResolver(resolvers, false);\n  long use_count = dns1.use_count();\n  Network::DnsResolverSharedPtr dns2 = dispatcher_->createDnsResolver(resolvers, false);\n\n  EXPECT_EQ(dns1.get(), dns2.get());          // Both point to the same instance.\n  EXPECT_EQ(use_count + 1, dns2.use_count()); // Each call causes ++ in use_count.\n}\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ConfigValidation,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/server_test.cc",
    "content": "#include <vector>\n\n#include \"envoy/server/filter_config.h\"\n\n#include \"server/config_validation/server.h\"\n\n#include \"test/integration/server.h\"\n#include \"test/mocks/server/options.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/test_time.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\n// Test param is the path to the config file to validate.\nclass ValidationServerTest : public testing::TestWithParam<std::string> {\npublic:\n  static void setupTestDirectory() {\n    TestEnvironment::exec(\n        {TestEnvironment::runfilesPath(\"test/config_test/example_configs_test_setup.sh\")});\n    directory_ = TestEnvironment::temporaryDirectory() + \"/test/config_test/\";\n  }\n\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    setupTestDirectory();\n  }\n\nprotected:\n  ValidationServerTest() : options_(directory_ + GetParam()) {}\n\n  static std::string directory_;\n  testing::NiceMock<MockOptions> options_;\n  TestComponentFactory component_factory_;\n};\n\nstd::string ValidationServerTest::directory_ = \"\";\n\n// ValidationServerTest_1 is created only to run different set of parameterized\n// tests than set of tests for ValidationServerTest.\nclass ValidationServerTest_1 : public ValidationServerTest {\npublic:\n  static const std::vector<std::string> getAllConfigFiles() {\n    setupTestDirectory();\n\n    auto files = TestUtility::listFiles(ValidationServerTest::directory_, false);\n\n    // Strip directory part. options_ adds it for each test.\n    for (auto& file : files) {\n      file = file.substr(directory_.length() + 1);\n    }\n    return files;\n  }\n};\n\n// RuntimeFeatureValidationServerTest is used to test validation with non-default runtime\n// values.\nclass RuntimeFeatureValidationServerTest : public ValidationServerTest {\npublic:\n  static void SetUpTestSuite() { // NOLINT(readability-identifier-naming)\n    setupTestDirectory();\n  }\n\n  static void setupTestDirectory() {\n    directory_ =\n        TestEnvironment::runfilesDirectory(\"envoy/test/server/config_validation/test_data/\");\n  }\n\n  static const std::vector<std::string> getAllConfigFiles() {\n    setupTestDirectory();\n\n    auto files = TestUtility::listFiles(ValidationServerTest::directory_, false);\n\n    // Strip directory part. options_ adds it for each test.\n    for (auto& file : files) {\n      file = file.substr(directory_.length() + 1);\n    }\n    return files;\n  }\n\n  class TestConfigFactory : public Configuration::NamedNetworkFilterConfigFactory {\n  public:\n    std::string name() const override { return \"envoy.filters.network.test\"; }\n\n    Network::FilterFactoryCb createFilterFactoryFromProto(const Protobuf::Message&,\n                                                          Configuration::FactoryContext&) override {\n      // Validate that the validation server loaded the runtime data and installed the singleton.\n      auto* runtime = Runtime::LoaderSingleton::getExisting();\n      if (runtime == nullptr) {\n        throw EnvoyException(\"Runtime::LoaderSingleton == nullptr\");\n      }\n\n      if (!runtime->threadsafeSnapshot()->getBoolean(\"test.runtime.loaded\", false)) {\n        throw EnvoyException(\n            \"Found Runtime::LoaderSingleton, got wrong value for test.runtime.loaded\");\n      }\n\n      return [](Network::FilterManager&) {};\n    }\n\n    ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n      return ProtobufTypes::MessagePtr{new ProtobufWkt::Struct()};\n    }\n\n    bool isTerminalFilter() override { return true; }\n  };\n};\n\nTEST_P(ValidationServerTest, Validate) {\n  EXPECT_TRUE(validateConfig(options_, Network::Address::InstanceConstSharedPtr(),\n                             component_factory_, Thread::threadFactoryForTest(),\n                             Filesystem::fileSystemForTest()));\n}\n\nTEST_P(ValidationServerTest, NoopLifecycleNotifier) {\n  Thread::MutexBasicLockable access_log_lock;\n  Stats::IsolatedStoreImpl stats_store;\n  DangerousDeprecatedTestTime time_system;\n  ValidationInstance server(options_, time_system.timeSystem(),\n                            Network::Address::InstanceConstSharedPtr(), stats_store,\n                            access_log_lock, component_factory_, Thread::threadFactoryForTest(),\n                            Filesystem::fileSystemForTest());\n  server.registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit, [] { FAIL(); });\n  server.registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit,\n                          [](Event::PostCb) { FAIL(); });\n  server.shutdown();\n}\n\n// TODO(rlazarus): We'd like use this setup to replace //test/config_test (that is, run it against\n// all the example configs) but can't until light validation is implemented, mocking out access to\n// the filesystem for TLS certs, etc. In the meantime, these are the example configs that work\n// as-is. (Note, /dev/stdout as an access log file is invalid on Windows, no equivalent /dev/\n// exists.)\n\nauto testing_values = ::testing::Values(\"front-proxy_front-envoy.yaml\", \"google_com_proxy.v2.yaml\",\n#ifndef WIN32\n                                        \"grpc-bridge_server_envoy-proxy.yaml\",\n#endif\n                                        \"front-proxy_service-envoy.yaml\");\n\nINSTANTIATE_TEST_SUITE_P(ValidConfigs, ValidationServerTest, testing_values);\n\n// Just make sure that all configs can be ingested without a crash. Processing of config files\n// may not be successful, but there should be no crash.\nTEST_P(ValidationServerTest_1, RunWithoutCrash) {\n  auto local_address = Network::Utility::getLocalAddress(options_.localAddressIpVersion());\n  validateConfig(options_, local_address, component_factory_, Thread::threadFactoryForTest(),\n                 Filesystem::fileSystemForTest());\n  SUCCEED();\n}\n\nINSTANTIATE_TEST_SUITE_P(AllConfigs, ValidationServerTest_1,\n                         ::testing::ValuesIn(ValidationServerTest_1::getAllConfigFiles()));\n\nTEST_P(RuntimeFeatureValidationServerTest, ValidRuntimeLoaderSingleton) {\n  TestConfigFactory factory;\n  Registry::InjectFactory<Configuration::NamedNetworkFilterConfigFactory> registration(factory);\n\n  auto local_address = Network::Utility::getLocalAddress(options_.localAddressIpVersion());\n\n  // If this fails, it's likely because TestConfigFactory threw an exception related to the\n  // runtime loader.\n  ASSERT_TRUE(validateConfig(options_, local_address, component_factory_,\n                             Thread::threadFactoryForTest(), Filesystem::fileSystemForTest()));\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    AllConfigs, RuntimeFeatureValidationServerTest,\n    ::testing::ValuesIn(RuntimeFeatureValidationServerTest::getAllConfigFiles()));\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/test_data/runtime_config.yaml",
    "content": "---\nnode:\n  id: \"test\"\nlayered_runtime:\n  layers:\n    - name: static-layer\n      static_layer:\n        \"test.runtime.loaded\": true\nstatic_resources:\n  listeners:\n    - name: \"test.listener\"\n      address:\n        socket_address:\n          protocol: TCP\n          address: 0.0.0.0\n          port_value: 0\n      filter_chains:\n        - filters:\n            - name: envoy.filters.network.test\n              typed_config:\n                \"@type\": type.googleapis.com/google.protobuf.Struct\nadmin:\n  access_log_path: \"/dev/null\"\n  address:\n    socket_address:\n      address: 0.0.0.0\n      port_value: 9000\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480",
    "content": "actions {\n  add_listener {\n    listener_num: 256\n    route_num: 6356993\n  }\n}\nactions {\n  add_listener {\n    route_num: 16\n  }\n}\nactions {\n  remove_listener {\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    route_num: 11264\n  }\n}\nactions {\n  add_listener {\n    listener_num: 2147483648\n    route_num: 2147483648\n  }\n}\nactions {\n  add_listener {\n    listener_num: 6356993\n    route_num: 11264\n  }\n}\nactions {\n  add_listener {\n    listener_num: 256\n    route_num: 65537\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 2147483648\n    route_num: 2\n  }\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/clusterfuzz-testcase-xds_fuzz_test-6589246463541248",
    "content": "actions {\n  add_listener {\n    listener_num: 1024\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n  }\n}\nactions {\n  add_listener {\n    listener_num: 1851719680\n  }\n}\nactions {\n  add_listener {\n    listener_num: 4\n    route_num: 1667301376\n  }\n}\nactions {\n  add_listener {\n    listener_num: 268435456\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 17\n    route_num: 20992\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 16711680\n  }\n}\nactions {\n  add_listener {\n    listener_num: 1\n    route_num: 42\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n  }\n}\nactions {\n  add_listener {\n    listener_num: 8\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 17\n    route_num: 20992\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 16711680\n  }\n}\nactions {\n  add_listener {\n    listener_num: 822083584\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 16711680\n  }\n}\nconfig {\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example0",
    "content": "actions {\n  add_listener {\n    listener_num : 0\n    route_num : 0\n  }\n}\nactions {\n  add_route {\n    route_num : 0\n  }\n}\nactions {\n  add_listener {\n    listener_num : 1\n    route_num : 1\n  }\n}\nactions {\n  add_route {\n    route_num : 1\n  }\n}\nactions {\n  add_listener {\n    listener_num : 2\n    route_num : 2\n  }\n}\nconfig {\n  sotw_or_delta : SOTW\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example1",
    "content": "actions {\n  remove_listener {\n    listener_num: 1\n  }\n}\nconfig {\n  sotw_or_delta: SOTW\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example10",
    "content": "actions {\n  add_listener {\n    route_num: 100728832\n  }\n}\nactions {\n  add_route {\n  }\n}\nactions {\n  add_listener {\n    listener_num: 1\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nconfig {\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example13",
    "content": "actions {\n  add_listener {\n    listener_num: 2\n    route_num: 3\n  }\n}\nactions {\n  add_route {\n    route_num: 3\n  }\n}\nactions {\n  add_route {\n    route_num: 3\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 0\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n    route_num: 0\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 2\n    route_num: 3\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 2\n    route_num: 3\n  }\n}\nconfig {\n  sotw_or_delta: SOTW\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example2",
    "content": "actions {\n  add_listener {\n\tlistener_num: 1\n\troute_num: 1\n  }\n}\nactions {\n  add_route {\n\troute_num: 1\n  }\n}\nactions {\n  add_route {\n\troute_num: 1\n  }\n}\nactions {\n  add_listener {\n\tlistener_num: 2\n\troute_num: 2\n  }\n}\nconfig {\n  sotw_or_delta : DELTA\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example3",
    "content": "actions {\n  add_route {\n    route_num : 0\n  }\n}\nactions {\n  add_listener {\n    listener_num : 0\n    route_num : 0\n  }\n}\nactions {\n  remove_listener {\n    listener_num : 0\n  }\n}\nconfig {\n  sotw_or_delta : SOTW\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example4",
    "content": "actions {\n  add_listener {\n    listener_num: 1\n    route_num: 2\n  }\n}\nactions {\n  add_listener {\n    listener_num: 1\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 1\n    route_num: 2\n  }\n}\nconfig {\n  sotw_or_delta: DELTA\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example5",
    "content": "actions {\n  add_route {\n    route_num: 4261412864\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 7012368\n  }\n}\nactions {\n  add_route {\n    route_num: 4261412864\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 7012388\n  }\n}\nactions {\n  add_route {\n    route_num: 7012388\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 7012352\n  }\n}\nconfig {\n  sotw_or_delta: DELTA\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example6",
    "content": "actions {\n  add_listener {\n    listener_num: 1\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  remove_listener {\n    listener_num: 1\n  }\n}\nconfig {\n  sotw_or_delta: DELTA\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example7",
    "content": "actions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n  }\n}\nconfig {\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example8",
    "content": "actions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n  }\n}\nactions {\n  remove_listener {\n  }\n}\nconfig {\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_corpus/example9",
    "content": "actions {\n  add_listener {\n  }\n}\nactions {\n  add_route {\n    route_num: 1\n  }\n}\nactions {\n  add_listener {\n    listener_num: 1\n    route_num: 1\n  }\n}\nactions {\n  add_route {\n  }\n}\nconfig {\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_fuzz.cc",
    "content": "#include \"test/server/config_validation/xds_fuzz.h\"\n\n#include \"envoy/api/v2/route.pb.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n\nnamespace Envoy {\n\n// Helper functions to build API responses.\nenvoy::config::cluster::v3::Cluster XdsFuzzTest::buildCluster(const std::string& name) {\n  return ConfigHelper::buildCluster(name, \"ROUND_ROBIN\", api_version_);\n};\n\nenvoy::config::endpoint::v3::ClusterLoadAssignment\nXdsFuzzTest::buildClusterLoadAssignment(const std::string& name) {\n  return ConfigHelper::buildClusterLoadAssignment(\n      name, Network::Test::getLoopbackAddressString(ip_version_),\n      fake_upstreams_[0]->localAddress()->ip()->port(), api_version_);\n}\n\nenvoy::config::listener::v3::Listener XdsFuzzTest::buildListener(const std::string& listener_name,\n                                                                 const std::string& route_name) {\n  return ConfigHelper::buildListener(listener_name, route_name,\n                                     Network::Test::getLoopbackAddressString(ip_version_),\n                                     \"ads_test\", api_version_);\n}\n\nenvoy::config::route::v3::RouteConfiguration\nXdsFuzzTest::buildRouteConfig(const std::string& route_name) {\n  return ConfigHelper::buildRouteConfig(route_name, \"cluster_0\", api_version_);\n}\n\n// Helper functions to send API responses.\nvoid XdsFuzzTest::updateListener(\n    const std::vector<envoy::config::listener::v3::Listener>& listeners,\n    const std::vector<envoy::config::listener::v3::Listener>& added_or_updated,\n    const std::vector<std::string>& removed) {\n  ENVOY_LOG_MISC(debug, \"Sending Listener DiscoveryResponse version {}\", version_);\n  sendDiscoveryResponse<envoy::config::listener::v3::Listener>(Config::TypeUrl::get().Listener,\n                                                               listeners, added_or_updated, removed,\n                                                               std::to_string(version_));\n}\n\nvoid XdsFuzzTest::updateRoute(\n    const std::vector<envoy::config::route::v3::RouteConfiguration>& routes,\n    const std::vector<envoy::config::route::v3::RouteConfiguration>& added_or_updated,\n    const std::vector<std::string>& removed) {\n  ENVOY_LOG_MISC(debug, \"Sending Route DiscoveryResponse version {}\", version_);\n  sendDiscoveryResponse<envoy::config::route::v3::RouteConfiguration>(\n      Config::TypeUrl::get().RouteConfiguration, routes, added_or_updated, removed,\n      std::to_string(version_));\n}\n\nXdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& input,\n                         envoy::config::core::v3::ApiVersion api_version)\n    : HttpIntegrationTest(\n          Http::CodecClient::Type::HTTP2, TestEnvironment::getIpVersionsForTest()[0],\n          ConfigHelper::adsBootstrap(input.config().sotw_or_delta() ==\n                                             test::server::config_validation::Config::SOTW\n                                         ? \"GRPC\"\n                                         : \"DELTA_GRPC\",\n                                     api_version)),\n      verifier_(input.config().sotw_or_delta()), actions_(input.actions()), version_(1),\n      api_version_(api_version), ip_version_(TestEnvironment::getIpVersionsForTest()[0]) {\n  use_lds_ = false;\n  create_xds_upstream_ = true;\n  tls_xds_upstream_ = false;\n\n  // Avoid listeners draining during the test.\n  drain_time_ = std::chrono::seconds(60);\n\n  if (input.config().sotw_or_delta() == test::server::config_validation::Config::SOTW) {\n    sotw_or_delta_ = Grpc::SotwOrDelta::Sotw;\n  } else {\n    sotw_or_delta_ = Grpc::SotwOrDelta::Delta;\n  }\n}\n\n/**\n * Initialize an envoy configured with a fully dynamic bootstrap with ADS over gRPC.\n */\nvoid XdsFuzzTest::initialize() {\n  config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) {\n    auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config();\n    auto* grpc_service = ads_config->add_grpc_services();\n\n    std::string cluster_name = \"ads_cluster\";\n    grpc_service->mutable_envoy_grpc()->set_cluster_name(cluster_name);\n    auto* ads_cluster = bootstrap.mutable_static_resources()->add_clusters();\n    ads_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]);\n    ads_cluster->set_name(\"ads_cluster\");\n  });\n  setUpstreamProtocol(FakeHttpConnection::Type::HTTP2);\n  HttpIntegrationTest::initialize();\n  if (xds_stream_ == nullptr) {\n    createXdsConnection();\n    AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);\n    RELEASE_ASSERT(result, result.message());\n    xds_stream_->startGrpcStream();\n  }\n}\n\nvoid XdsFuzzTest::close() {\n  cleanUpXdsConnection();\n  test_server_.reset();\n  fake_upstreams_.clear();\n}\n\n/**\n * @return true iff listener_name is in listeners_ (and removes it from listeners_)\n */\nbool XdsFuzzTest::eraseListener(const std::string& listener_name) {\n  const auto orig_size = listeners_.size();\n  listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(),\n                                  [&](auto& listener) { return listener.name() == listener_name; }),\n                   listeners_.end());\n  return orig_size != listeners_.size();\n}\n\n/**\n * @return true iff route_name has already been added to routes_\n */\nbool XdsFuzzTest::hasRoute(const std::string& route_name) {\n  return std::any_of(routes_.begin(), routes_.end(),\n                     [&](auto& route) { return route.name() == route_name; });\n}\n\n/**\n * Log the current state of the verifier and the test server for debugging purposes to see\n * discrepancies.\n */\nvoid XdsFuzzTest::logState() {\n  ENVOY_LOG_MISC(debug, \"warming {} ({}), active {} ({}), draining {} ({})\", verifier_.numWarming(),\n                 test_server_->gauge(\"listener_manager.total_listeners_warming\")->value(),\n                 verifier_.numActive(),\n                 test_server_->gauge(\"listener_manager.total_listeners_active\")->value(),\n                 verifier_.numDraining(),\n                 test_server_->gauge(\"listener_manager.total_listeners_draining\")->value());\n  ENVOY_LOG_MISC(\n      debug, \"added {} ({}), modified {} ({}), removed {} ({})\", verifier_.numAdded(),\n      test_server_->counter(\"listener_manager.listener_added\")->value(), verifier_.numModified(),\n      test_server_->counter(\"listener_manager.listener_modified\")->value(), verifier_.numRemoved(),\n      test_server_->counter(\"listener_manager.listener_removed\")->value());\n}\n\n/**\n * Send an xDS response to add a listener and update state accordingly.\n */\nvoid XdsFuzzTest::addListener(const std::string& listener_name, const std::string& route_name) {\n  ENVOY_LOG_MISC(debug, \"Adding {} with reference to {}\", listener_name, route_name);\n  lds_update_success_++;\n  bool removed = eraseListener(listener_name);\n  auto listener = buildListener(listener_name, route_name);\n  listeners_.push_back(listener);\n\n  updateListener(listeners_, {listener}, {});\n\n  // Use waitForAck instead of compareDiscoveryRequest as the client makes additional\n  // DiscoveryRequests at launch that we might not want to respond to yet.\n  EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_)));\n  if (removed) {\n    verifier_.listenerUpdated(listener);\n  } else {\n    verifier_.listenerAdded(listener);\n  }\n}\n\n/**\n * Send an xDS response to remove a listener and update state accordingly.\n */\nvoid XdsFuzzTest::removeListener(const std::string& listener_name) {\n  ENVOY_LOG_MISC(debug, \"Removing {}\", listener_name);\n  bool removed = eraseListener(listener_name);\n\n  if (removed) {\n    lds_update_success_++;\n    updateListener(listeners_, {}, {listener_name});\n    EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_)));\n    verifier_.listenerRemoved(listener_name);\n  }\n}\n\n/**\n * Send an xDS response to add a route and update state accordingly.\n */\nvoid XdsFuzzTest::addRoute(const std::string& route_name) {\n  ENVOY_LOG_MISC(debug, \"Adding {}\", route_name);\n  auto route = buildRouteConfig(route_name);\n\n  if (!hasRoute(route_name)) {\n    routes_.push_back(route);\n  }\n\n  updateRoute(routes_, {route}, {});\n  verifier_.routeAdded(route);\n\n  EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_)));\n}\n\n/**\n * Wait for a specific ACK, ignoring any other ACKs that are made in the meantime.\n * @param the expected API type url of the ack\n * @param the expected version number\n * @return AssertionSuccess() if the ack was received, else an AssertionError()\n */\nAssertionResult XdsFuzzTest::waitForAck(const std::string& expected_type_url,\n                                        const std::string& expected_version) {\n  if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) {\n    API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request;\n    do {\n      VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request));\n      ENVOY_LOG_MISC(debug, \"Received gRPC message with type {} and version {}\",\n                     discovery_request.type_url(), discovery_request.version_info());\n    } while (expected_type_url != discovery_request.type_url() ||\n             expected_version != discovery_request.version_info());\n  } else {\n    API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) delta_discovery_request;\n    do {\n      VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_discovery_request));\n      ENVOY_LOG_MISC(debug, \"Received gRPC message with type {}\",\n                     delta_discovery_request.type_url());\n    } while (expected_type_url != delta_discovery_request.type_url());\n  }\n  version_++;\n  return AssertionSuccess();\n}\n\n/**\n * Run the sequence of actions defined in the fuzzed protobuf.\n */\nvoid XdsFuzzTest::replay() {\n  initialize();\n\n  // Set up cluster.\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, \"\", {}, {}, {}, true));\n  sendDiscoveryResponse<envoy::config::cluster::v3::Cluster>(Config::TypeUrl::get().Cluster,\n                                                             {buildCluster(\"cluster_0\")},\n                                                             {buildCluster(\"cluster_0\")}, {}, \"0\");\n  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, \"\",\n                                      {\"cluster_0\"}, {\"cluster_0\"}, {}));\n  sendDiscoveryResponse<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n      Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment(\"cluster_0\")},\n      {buildClusterLoadAssignment(\"cluster_0\")}, {}, \"0\");\n\n  // The client will not subscribe to the RouteConfiguration type URL until it receives a listener,\n  // and the ACKS it sends back seem to be an empty type URL so just don't check them until a\n  // listener is added.\n  bool sent_listener = false;\n\n  for (const auto& action : actions_) {\n    switch (action.action_selector_case()) {\n    case test::server::config_validation::Action::kAddListener: {\n      std::string listener_name =\n          absl::StrCat(\"listener_\", action.add_listener().listener_num() % ListenersMax);\n      std::string route_name =\n          absl::StrCat(\"route_config_\", action.add_listener().route_num() % RoutesMax);\n      addListener(listener_name, route_name);\n      if (!sent_listener) {\n        addRoute(route_name);\n        test_server_->waitForCounterEq(\"listener_manager.listener_create_success\", 1, timeout_);\n      }\n      sent_listener = true;\n      break;\n    }\n    case test::server::config_validation::Action::kRemoveListener: {\n      std::string listener_name =\n          absl::StrCat(\"listener_\", action.remove_listener().listener_num() % ListenersMax);\n      removeListener(listener_name);\n      break;\n    }\n    case test::server::config_validation::Action::kAddRoute: {\n      if (!sent_listener) {\n        ENVOY_LOG_MISC(debug, \"Ignoring request to add route_{}\",\n                       action.add_route().route_num() % RoutesMax);\n        break;\n      }\n      std::string route_name =\n          absl::StrCat(\"route_config_\", action.add_route().route_num() % RoutesMax);\n      addRoute(route_name);\n      break;\n    }\n    default:\n      break;\n    }\n    if (sent_listener) {\n      // Wait for all of the updates to take effect.\n      test_server_->waitForGaugeEq(\"listener_manager.total_listeners_warming\",\n                                   verifier_.numWarming(), timeout_);\n      test_server_->waitForGaugeEq(\"listener_manager.total_listeners_active\", verifier_.numActive(),\n                                   timeout_);\n      test_server_->waitForGaugeEq(\"listener_manager.total_listeners_draining\",\n                                   verifier_.numDraining(), timeout_);\n      test_server_->waitForCounterEq(\"listener_manager.listener_modified\", verifier_.numModified(),\n                                     timeout_);\n      test_server_->waitForCounterEq(\"listener_manager.listener_added\", verifier_.numAdded(),\n                                     timeout_);\n      test_server_->waitForCounterEq(\"listener_manager.listener_removed\", verifier_.numRemoved(),\n                                     timeout_);\n      test_server_->waitForCounterEq(\"listener_manager.lds.update_success\", lds_update_success_,\n                                     timeout_);\n    }\n    logState();\n  }\n\n  verifyState();\n  close();\n}\n\n/**\n * Verify that each listener in the verifier has a matching listener in the config dump.\n */\nvoid XdsFuzzTest::verifyListeners() {\n  ENVOY_LOG_MISC(debug, \"Verifying listeners\");\n  const auto& abstract_rep = verifier_.listeners();\n  const auto dump = getListenersConfigDump().dynamic_listeners();\n\n  for (const auto& rep : abstract_rep) {\n    ENVOY_LOG_MISC(debug, \"Verifying {} with state {}\", rep.listener.name(), rep.state);\n\n    auto listener_dump = std::find_if(dump.begin(), dump.end(), [&](auto& listener) {\n      return listener.name() == rep.listener.name();\n    });\n\n    // There should be a listener of the same name in the dump.\n    if (listener_dump == dump.end()) {\n      throw EnvoyException(fmt::format(\"Expected to find {} in config dump\", rep.listener.name()));\n    }\n\n    // The state should match.\n    switch (rep.state) {\n    case XdsVerifier::DRAINING:\n      FUZZ_ASSERT(listener_dump->has_draining_state());\n      break;\n    case XdsVerifier::WARMING:\n      FUZZ_ASSERT(listener_dump->has_warming_state());\n      break;\n    case XdsVerifier::ACTIVE:\n      FUZZ_ASSERT(listener_dump->has_active_state());\n      break;\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n}\n\nvoid XdsFuzzTest::verifyRoutes() {\n  auto dump = getRoutesConfigDump();\n\n  // Go through routes in verifier and make sure each is in the config dump.\n  auto routes = verifier_.routes();\n  FUZZ_ASSERT(routes.size() == dump.size());\n  for (const auto& route : routes) {\n    FUZZ_ASSERT(std::any_of(dump.begin(), dump.end(), [&](const auto& dump_route) {\n      return route.first == dump_route.name();\n    }));\n  }\n}\n\nvoid XdsFuzzTest::verifyState() {\n  verifyListeners();\n  ENVOY_LOG_MISC(debug, \"Verified listeners\");\n  verifyRoutes();\n  ENVOY_LOG_MISC(debug, \"Verified routes\");\n\n  FUZZ_ASSERT(test_server_->gauge(\"listener_manager.total_listeners_draining\")->value() ==\n              verifier_.numDraining());\n  FUZZ_ASSERT(test_server_->gauge(\"listener_manager.total_listeners_warming\")->value() ==\n              verifier_.numWarming());\n  FUZZ_ASSERT(test_server_->gauge(\"listener_manager.total_listeners_active\")->value() ==\n              verifier_.numActive());\n  ENVOY_LOG_MISC(debug, \"Verified stats\");\n  ENVOY_LOG_MISC(debug, \"warming {} ({}), active {} ({}), draining {} ({})\", verifier_.numWarming(),\n                 test_server_->gauge(\"listener_manager.total_listeners_warming\")->value(),\n                 verifier_.numActive(),\n                 test_server_->gauge(\"listener_manager.total_listeners_active\")->value(),\n                 verifier_.numDraining(),\n                 test_server_->gauge(\"listener_manager.total_listeners_draining\")->value());\n}\n\nenvoy::admin::v3::ListenersConfigDump XdsFuzzTest::getListenersConfigDump() {\n  auto message_ptr =\n      test_server_->server().admin().getConfigTracker().getCallbacksMap().at(\"listeners\")();\n  return dynamic_cast<const envoy::admin::v3::ListenersConfigDump&>(*message_ptr);\n}\n\nstd::vector<envoy::api::v2::RouteConfiguration> XdsFuzzTest::getRoutesConfigDump() {\n  auto map = test_server_->server().admin().getConfigTracker().getCallbacksMap();\n\n  // There is no route config dump before envoy has a route.\n  if (map.find(\"routes\") == map.end()) {\n    return {};\n  }\n\n  auto message_ptr = map.at(\"routes\")();\n  auto dump = dynamic_cast<const envoy::admin::v3::RoutesConfigDump&>(*message_ptr);\n\n  // Since the route config dump gives the RouteConfigurations as an Any, go through and cast them\n  // back to RouteConfigurations.\n  std::vector<envoy::api::v2::RouteConfiguration> dump_routes;\n  for (const auto& route : dump.dynamic_route_configs()) {\n    envoy::api::v2::RouteConfiguration dyn_route;\n    route.route_config().UnpackTo(&dyn_route);\n    dump_routes.push_back(dyn_route);\n  }\n  return dump_routes;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/xds_fuzz.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n\n#include \"test/common/grpc/grpc_client_integration.h\"\n#include \"test/config/utility.h\"\n#include \"test/fuzz/utility.h\"\n#include \"test/integration/http_integration.h\"\n#include \"test/server/config_validation/xds_fuzz.pb.h\"\n#include \"test/server/config_validation/xds_verifier.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\nclass XdsFuzzTest : public HttpIntegrationTest {\npublic:\n  XdsFuzzTest(const test::server::config_validation::XdsTestCase& input,\n              envoy::config::core::v3::ApiVersion api_version);\n\n  envoy::config::cluster::v3::Cluster buildCluster(const std::string& name);\n\n  envoy::config::endpoint::v3::ClusterLoadAssignment\n  buildClusterLoadAssignment(const std::string& name);\n\n  envoy::config::listener::v3::Listener buildListener(const std::string& listener_name,\n                                                      const std::string& route_name);\n\n  envoy::config::route::v3::RouteConfiguration buildRouteConfig(const std::string& route_name);\n\n  void updateListener(const std::vector<envoy::config::listener::v3::Listener>& listeners,\n                      const std::vector<envoy::config::listener::v3::Listener>& added_or_updated,\n                      const std::vector<std::string>& removed);\n\n  void\n  updateRoute(const std::vector<envoy::config::route::v3::RouteConfiguration>& routes,\n              const std::vector<envoy::config::route::v3::RouteConfiguration>& added_or_updated,\n              const std::vector<std::string>& removed);\n\n  void initialize() override;\n  void replay();\n  void close();\n\n  const size_t ListenersMax = 3;\n  const size_t RoutesMax = 5;\n\nprivate:\n  void addListener(const std::string& listener_name, const std::string& route_name);\n  void removeListener(const std::string& listener_name);\n  void addRoute(const std::string& route_name);\n\n  void logState();\n\n  void verifyState();\n  void verifyListeners();\n  void verifyRoutes();\n\n  envoy::admin::v3::ListenersConfigDump getListenersConfigDump();\n  std::vector<envoy::api::v2::RouteConfiguration> getRoutesConfigDump();\n\n  bool eraseListener(const std::string& listener_name);\n  bool hasRoute(const std::string& route_name);\n  AssertionResult waitForAck(const std::string& expected_type_url,\n                             const std::string& expected_version);\n\n  XdsVerifier verifier_;\n\n  Protobuf::RepeatedPtrField<test::server::config_validation::Action> actions_;\n  std::vector<envoy::config::route::v3::RouteConfiguration> routes_;\n  std::vector<envoy::config::listener::v3::Listener> listeners_;\n\n  uint64_t version_;\n  envoy::config::core::v3::ApiVersion api_version_;\n\n  Network::Address::IpVersion ip_version_;\n\n  std::chrono::seconds timeout_{5};\n  uint64_t lds_update_success_{0};\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/xds_fuzz.proto",
    "content": "syntax = \"proto3\";\n\npackage test.server.config_validation;\n\nimport \"validate/validate.proto\";\n\nmessage AddListener {\n  // generates a new listener listener_x with number listener_num, which can later be removed by\n  // RemoveListener\n  // if listener_x had already been added, it will update listener_x's route_config\n  uint32 listener_num = 1;\n  // listener_x references route_y, which has number route_num\n  uint32 route_num = 2;\n}\n\nmessage AddRoute {\n  // generates a new route route_y with number route_num which can later be removed by a RemoveRoute\n  uint32 route_num = 1;\n}\n\nmessage RemoveListener {\n  // removes listener_x\n  uint32 listener_num = 1;\n}\n\nmessage Action {\n  oneof action_selector {\n    option (validate.required) = true;\n\n    AddListener add_listener = 1;\n    AddRoute add_route = 2;\n    RemoveListener remove_listener = 3;\n  }\n}\n\nmessage Config {\n  enum SotwOrDelta {\n    SOTW = 0;\n    DELTA = 1;\n  }\n  SotwOrDelta sotw_or_delta = 1;\n}\n\nmessage XdsTestCase {\n  repeated Action actions = 1;\n  Config config = 2;\n}\n"
  },
  {
    "path": "test/server/config_validation/xds_fuzz_test.cc",
    "content": "#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/server/config_validation/xds_fuzz.h\"\n#include \"test/server/config_validation/xds_fuzz.pb.validate.h\"\n\nnamespace Envoy {\n\nDEFINE_PROTO_FUZZER(const test::server::config_validation::XdsTestCase& input) {\n  RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), \"\");\n  try {\n    TestUtility::validate(input);\n  } catch (const ProtoValidationException& e) {\n    ENVOY_LOG_MISC(debug, \"ProtoValidationException: {}\", e.what());\n    return;\n  }\n  XdsFuzzTest test(input, envoy::config::core::v3::ApiVersion::V2);\n  test.replay();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/xds_verifier.cc",
    "content": "#include \"test/server/config_validation/xds_verifier.h\"\n\n#include \"common/common/logger.h\"\n\nnamespace Envoy {\n\nXdsVerifier::XdsVerifier(test::server::config_validation::Config::SotwOrDelta sotw_or_delta)\n    : num_warming_(0), num_active_(0), num_draining_(0), num_added_(0), num_modified_(0),\n      num_removed_(0) {\n  if (sotw_or_delta == test::server::config_validation::Config::SOTW) {\n    sotw_or_delta_ = SOTW;\n  } else {\n    sotw_or_delta_ = DELTA;\n  }\n  ENVOY_LOG_MISC(debug, \"sotw_or_delta_ = {}\", sotw_or_delta_);\n}\n\n/**\n * Get the route referenced by a listener.\n */\nstd::string XdsVerifier::getRoute(const envoy::config::listener::v3::Listener& listener) {\n  envoy::config::listener::v3::Filter filter0 = listener.filter_chains()[0].filters()[0];\n  envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager conn_man;\n  filter0.typed_config().UnpackTo(&conn_man);\n  return conn_man.rds().route_config_name();\n}\n\n/**\n * @return true iff the route listener refers to is in all_routes_\n */\nbool XdsVerifier::hasRoute(const envoy::config::listener::v3::Listener& listener) {\n  return hasRoute(getRoute(listener));\n}\n\nbool XdsVerifier::hasRoute(const std::string& name) { return all_routes_.contains(name); }\n\nbool XdsVerifier::hasActiveRoute(const envoy::config::listener::v3::Listener& listener) {\n  return hasActiveRoute(getRoute(listener));\n}\n\nbool XdsVerifier::hasActiveRoute(const std::string& name) { return active_routes_.contains(name); }\n\nbool XdsVerifier::hasListener(const std::string& name, ListenerState state) {\n  return std::any_of(listeners_.begin(), listeners_.end(), [&](const auto& rep) {\n    return rep.listener.name() == name && state == rep.state;\n  });\n}\n\n/**\n * Pretty prints the currently stored listeners and their states.\n */\nvoid XdsVerifier::dumpState() {\n  ENVOY_LOG_MISC(debug, \"Listener Dump:\");\n  for (const auto& rep : listeners_) {\n    ENVOY_LOG_MISC(debug, \"Name: {}, Route {}, State: {}\", rep.listener.name(),\n                   getRoute(rep.listener), rep.state);\n  }\n}\n\n/*\n * If a listener is added for the first time, it will be added as active/warming depending on if\n * Envoy knows about its route config.\n *\n * If a listener is updated (i.e. there is a already a listener by this name), there are 3 cases:\n * 1. The old listener is active and the new is warming:\n *    - Old will remain active\n *    - New will be added as warming, to replace the old when it gets its route\n * 2. The old listener is active and new is active:\n *    - Old is drained (seemingly instantaneously)\n *    - New is added as active\n * 3. The old listener is warming and new is active/warming:\n *    - Old is completely removed\n *    - New is added as warming/active as normal\n */\n\n/**\n * Update a listener when its route is changed, draining/removing the old listener and adding the\n * updated listener.\n */\nvoid XdsVerifier::listenerUpdated(const envoy::config::listener::v3::Listener& listener) {\n  ENVOY_LOG_MISC(debug, \"About to update listener {} to {}\", listener.name(), getRoute(listener));\n  dumpState();\n\n  const auto existing_warming =\n      std::find_if(listeners_.begin(), listeners_.end(), [&](const auto& rep) {\n        return rep.listener.name() == listener.name() &&\n               getRoute(rep.listener) == getRoute(listener) && rep.state == WARMING;\n      });\n  const auto existing_active =\n      std::find_if(listeners_.begin(), listeners_.end(), [&](const auto& rep) {\n        return rep.listener.name() == listener.name() &&\n               getRoute(rep.listener) == getRoute(listener) && rep.state == ACTIVE;\n      });\n\n  // Return early if the listener is a duplicate.\n  if (existing_active != listeners_.end()) {\n    const auto warming_to_remove =\n        std::find_if(listeners_.begin(), listeners_.end(), [&](const auto& rep) {\n          return rep.listener.name() == listener.name() && rep.state == WARMING;\n        });\n\n    // The listener should be updated back to its original state and the warming removed.\n    if (warming_to_remove != listeners_.end()) {\n      ENVOY_LOG_MISC(debug, \"Removing warming listener {} after update\", listener.name());\n      num_modified_++;\n      num_warming_--;\n      listeners_.erase(warming_to_remove);\n      return;\n    }\n    ENVOY_LOG_MISC(debug, \"Ignoring duplicate add of {}\", listener.name());\n    return;\n  }\n\n  if (existing_warming != listeners_.end()) {\n    ENVOY_LOG_MISC(debug, \"Ignoring duplicate add of {}\", listener.name());\n    return;\n  }\n\n  bool found = false;\n  for (auto it = listeners_.begin(); it != listeners_.end();) {\n    const auto& rep = *it;\n    ENVOY_LOG_MISC(debug, \"checking {} for update\", rep.listener.name());\n    if (rep.listener.name() == listener.name()) {\n      // If we're updating a warming/active listener, num_modified_ must be incremented.\n      if (rep.state != DRAINING && !found) {\n        num_modified_++;\n        found = true;\n      }\n\n      if (rep.state == ACTIVE) {\n        if (hasActiveRoute(listener)) {\n          // If the new listener is ready to take traffic, the old listener will be removed. It\n          // seems to be directly removed without being added to the config dump as draining.\n          ENVOY_LOG_MISC(debug, \"Removing {} after update\", listener.name());\n          num_active_--;\n          it = listeners_.erase(it);\n          continue;\n        } else {\n          // If the new listener has not gotten its route yet, the old listener will remain active\n          // until that happens.\n          ENVOY_LOG_MISC(debug, \"Keeping {} as ACTIVE\", listener.name());\n        }\n      } else if (rep.state == WARMING) {\n        // If the old listener is warming, it will be removed and replaced with the new.\n        ENVOY_LOG_MISC(debug, \"Removed warming listener {}\", listener.name());\n        num_warming_--;\n        it = listeners_.erase(it);\n        // Don't increment it.\n        continue;\n      }\n    }\n    ++it;\n  }\n  dumpState();\n  listenerAdded(listener, true);\n}\n\n/**\n * Add a new listener to listeners_ in either an active or warming state.\n * @param listener the listener to be added\n * @param from_update whether this function was called from listenerUpdated, in which case\n * num_added_ should not be incremented\n */\nvoid XdsVerifier::listenerAdded(const envoy::config::listener::v3::Listener& listener,\n                                bool from_update) {\n  if (!from_update) {\n    num_added_++;\n  }\n\n  if (hasActiveRoute(listener)) {\n    ENVOY_LOG_MISC(debug, \"Adding {} to listeners_ as ACTIVE\", listener.name());\n    listeners_.push_back({listener, ACTIVE});\n    num_active_++;\n  } else {\n    num_warming_++;\n    ENVOY_LOG_MISC(debug, \"Adding {} to listeners_ as WARMING\", listener.name());\n    listeners_.push_back({listener, WARMING});\n  }\n\n  ENVOY_LOG_MISC(debug, \"listenerAdded({})\", listener.name());\n  dumpState();\n}\n\n/**\n * Remove a listener and drain it if it was active.\n * @param name the name of the listener to be removed\n */\nvoid XdsVerifier::listenerRemoved(const std::string& name) {\n  bool found = false;\n\n  for (auto it = listeners_.begin(); it != listeners_.end();) {\n    auto& rep = *it;\n    if (rep.listener.name() == name) {\n      if (rep.state == ACTIVE) {\n        // The listener will be drained before being removed.\n        ENVOY_LOG_MISC(debug, \"Changing {} to DRAINING\", name);\n        found = true;\n        num_active_--;\n        num_draining_++;\n        rep.state = DRAINING;\n      } else if (rep.state == WARMING) {\n        // The listener will be removed immediately.\n        ENVOY_LOG_MISC(debug, \"Removed warming listener {}\", name);\n        found = true;\n        num_warming_--;\n        it = listeners_.erase(it);\n        // Don't increment it.\n        continue;\n      }\n    }\n    ++it;\n  }\n\n  if (found) {\n    num_removed_++;\n  }\n}\n\n/**\n * After a SOTW update, see if any listeners that are currently warming can become active.\n */\nvoid XdsVerifier::updateSotwListeners() {\n  ASSERT(sotw_or_delta_ == SOTW);\n  for (auto& rep : listeners_) {\n    // Check all_routes_, not active_routes_ since this is SOTW, so any inactive routes will become\n    // active if this listener refers to them.\n    if (hasRoute(rep.listener) && rep.state == WARMING) {\n      // It should successfully warm now.\n      ENVOY_LOG_MISC(debug, \"Moving {} to ACTIVE state\", rep.listener.name());\n\n      // If the route was not originally added as active, change it now.\n      if (!hasActiveRoute(rep.listener)) {\n        std::string route_name = getRoute(rep.listener);\n        auto it = all_routes_.find(route_name);\n        // all added routes should be in all_routes_ in SOTW\n        ASSERT(it != all_routes_.end());\n        active_routes_.insert({route_name, it->second});\n      }\n\n      // If there were any active listeners that were waiting to be updated, they will now be\n      // removed and the warming listener will take their place.\n      markForRemoval(rep);\n      num_warming_--;\n      num_active_++;\n      rep.state = ACTIVE;\n    }\n  }\n  listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(),\n                                  [&](auto& listener) { return listener.state == REMOVED; }),\n                   listeners_.end());\n}\n\n/**\n * After a delta update, update any listeners that refer to the added route.\n */\nvoid XdsVerifier::updateDeltaListeners(const envoy::config::route::v3::RouteConfiguration& route) {\n  for (auto& rep : listeners_) {\n    if (getRoute(rep.listener) == route.name() && rep.state == WARMING) {\n      // It should successfully warm now.\n      ENVOY_LOG_MISC(debug, \"Moving {} to ACTIVE state\", rep.listener.name());\n\n      // If there were any active listeners that were waiting to be updated, they will now be\n      // removed and the warming listener will take their place.\n      markForRemoval(rep);\n      num_warming_--;\n      num_active_++;\n      rep.state = ACTIVE;\n    }\n  }\n  // erase any active listeners that were replaced\n  listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(),\n                                  [&](auto& listener) { return listener.state == REMOVED; }),\n                   listeners_.end());\n}\n\n/**\n * @param listener a warming listener that has a corresponding active listener of the same name\n * called after listener receives its route, so it will be moved to active and the old listener will\n * be removed\n */\nvoid XdsVerifier::markForRemoval(ListenerRepresentation& rep) {\n  ASSERT(rep.state == WARMING);\n  // Find the old listener and mark it for removal.\n  for (auto& old_rep : listeners_) {\n    if (old_rep.listener.name() == rep.listener.name() &&\n        getRoute(old_rep.listener) != getRoute(rep.listener) && old_rep.state == ACTIVE) {\n      // Mark it as removed to remove it after the loop so as not to invalidate the iterator in\n      // the caller function.\n      old_rep.state = REMOVED;\n      num_active_--;\n    }\n  }\n}\n\n/**\n * Add a new route and update any listeners that refer to this route.\n */\nvoid XdsVerifier::routeAdded(const envoy::config::route::v3::RouteConfiguration& route) {\n  // Routes that are not referenced by any resource are ignored, so this creates a distinction\n  // between SOTW and delta.\n  // If an unreferenced route is sent in delta, it is ignored forever as it will not be sent in\n  // future RDS updates, whereas in SOTW it will be present in all future RDS updates, so if a\n  // listener that refers to it is added in the meantime, it will become active.\n  if (!hasRoute(route.name())) {\n    all_routes_.insert({route.name(), route});\n  }\n\n  if (sotw_or_delta_ == DELTA && std::any_of(listeners_.begin(), listeners_.end(), [&](auto& rep) {\n        return getRoute(rep.listener) == route.name();\n      })) {\n    if (!hasActiveRoute(route.name())) {\n      active_routes_.insert({route.name(), route});\n      updateDeltaListeners(route);\n    }\n    updateDeltaListeners(route);\n  } else if (sotw_or_delta_ == SOTW) {\n    updateSotwListeners();\n  }\n}\n\n/**\n * Called after draining a listener, will remove it from listeners_.\n */\nvoid XdsVerifier::drainedListener(const std::string& name) {\n  for (auto it = listeners_.begin(); it != listeners_.end(); ++it) {\n    if (it->listener.name() == name && it->state == DRAINING) {\n      ENVOY_LOG_MISC(debug, \"Drained and removed {}\", name);\n      num_draining_--;\n      listeners_.erase(it);\n      return;\n    }\n  }\n  throw EnvoyException(fmt::format(\"Tried to drain {} which is not draining\", name));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/xds_verifier.h",
    "content": "#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"test/server/config_validation/xds_fuzz.pb.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\n\nclass XdsVerifier {\npublic:\n  XdsVerifier(test::server::config_validation::Config::SotwOrDelta sotw_or_delta);\n  void listenerAdded(const envoy::config::listener::v3::Listener& listener,\n                     bool from_update = false);\n  void listenerUpdated(const envoy::config::listener::v3::Listener& listener);\n  void listenerRemoved(const std::string& name);\n  void drainedListener(const std::string& name);\n\n  void routeAdded(const envoy::config::route::v3::RouteConfiguration& route);\n\n  enum ListenerState { WARMING, ACTIVE, DRAINING, REMOVED };\n  struct ListenerRepresentation {\n    envoy::config::listener::v3::Listener listener;\n    ListenerState state;\n  };\n\n  const std::vector<ListenerRepresentation>& listeners() const { return listeners_; }\n\n  const absl::flat_hash_map<std::string, envoy::config::route::v3::RouteConfiguration>&\n  routes() const {\n    return active_routes_;\n  };\n\n  uint32_t numWarming() const { return num_warming_; }\n  uint32_t numActive() const { return num_active_; }\n  uint32_t numDraining() const { return num_draining_; }\n\n  uint32_t numAdded() const { return num_added_; }\n  uint32_t numModified() const { return num_modified_; }\n  uint32_t numRemoved() const { return num_removed_; }\n\n  void dumpState();\n\n  bool hasListener(const std::string& name, ListenerState state);\n  bool hasRoute(const envoy::config::listener::v3::Listener& listener);\n  bool hasRoute(const std::string& name);\n  bool hasActiveRoute(const envoy::config::listener::v3::Listener& listener);\n  bool hasActiveRoute(const std::string& name);\n\nprivate:\n  enum SotwOrDelta { SOTW, DELTA };\n\n  std::string getRoute(const envoy::config::listener::v3::Listener& listener);\n  void updateSotwListeners();\n  void updateDeltaListeners(const envoy::config::route::v3::RouteConfiguration& route);\n  void markForRemoval(ListenerRepresentation& rep);\n  std::vector<ListenerRepresentation> listeners_;\n\n  // envoy ignores routes that are not referenced by any resources\n  // all_routes_ is used for SOTW, as every previous route is sent in each request\n  // active_routes_ holds the routes that envoy knows about, i.e. the routes that are/were\n  // referenced by a listener\n  absl::flat_hash_map<std::string, envoy::config::route::v3::RouteConfiguration> all_routes_;\n  absl::flat_hash_map<std::string, envoy::config::route::v3::RouteConfiguration> active_routes_;\n\n  uint32_t num_warming_;\n  uint32_t num_active_;\n  uint32_t num_draining_;\n\n  uint32_t num_added_;\n  uint32_t num_modified_;\n  uint32_t num_removed_;\n\n  SotwOrDelta sotw_or_delta_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/config_validation/xds_verifier_test.cc",
    "content": "#include \"test/config/utility.h\"\n#include \"test/server/config_validation/xds_verifier.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nenvoy::config::listener::v3::Listener buildListener(const std::string& listener_name,\n                                                    const std::string& route_name) {\n  return ConfigHelper::buildListener(listener_name, route_name, \"\", \"ads_test\",\n                                     envoy::config::core::v3::ApiVersion::V3);\n}\n\nenvoy::config::route::v3::RouteConfiguration buildRoute(const std::string& route_name) {\n  return ConfigHelper::buildRouteConfig(route_name, \"cluster_0\",\n                                        envoy::config::core::v3::ApiVersion::V3);\n}\n\n// Add, warm, drain and remove a listener.\nTEST(XdsVerifier, Basic) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n  EXPECT_EQ(verifier.numAdded(), 1);\n  EXPECT_EQ(verifier.numWarming(), 1);\n\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_TRUE(verifier.hasRoute(\"route_config_0\") && verifier.hasActiveRoute(\"route_config_0\"));\n  EXPECT_EQ(verifier.numAdded(), 1);\n  EXPECT_EQ(verifier.numWarming(), 0);\n  EXPECT_EQ(verifier.numActive(), 1);\n\n  verifier.listenerRemoved(\"listener_0\");\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.DRAINING));\n  EXPECT_EQ(verifier.numDraining(), 1);\n  EXPECT_EQ(verifier.numRemoved(), 1);\n  EXPECT_EQ(verifier.numActive(), 0);\n\n  verifier.drainedListener(\"listener_0\");\n  EXPECT_FALSE(verifier.hasListener(\"listener_0\", verifier.DRAINING));\n  EXPECT_EQ(verifier.numRemoved(), 1);\n}\n\nTEST(XdsVerifier, RouteBeforeListenerSOTW) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n  // Send a route first, so envoy will not accept it.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasRoute(\"route_config_0\"));\n  EXPECT_FALSE(verifier.hasActiveRoute(\"route_config_0\"));\n\n  // Envoy still doesn't know about the route, so this will warm.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n  EXPECT_EQ(verifier.numAdded(), 1);\n  EXPECT_EQ(verifier.numWarming(), 1);\n\n  // Send a new route, which will include route_config_0 since SOTW, so route_config_0 will become\n  // active.\n  verifier.routeAdded(buildRoute(\"route_config_1\"));\n  EXPECT_TRUE(verifier.hasRoute(\"route_config_1\"));\n  EXPECT_FALSE(verifier.hasActiveRoute(\"route_config_1\"));\n  EXPECT_TRUE(verifier.hasActiveRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_EQ(verifier.numActive(), 1);\n}\n\nTEST(XdsVerifier, RouteBeforeListenerDelta) {\n  XdsVerifier verifier(test::server::config_validation::Config::DELTA);\n  // Send a route first, so envoy will not accept it.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_FALSE(verifier.hasActiveRoute(\"route_config_0\"));\n\n  // Envoy still doesn't know about the route, so this will warm.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n  EXPECT_EQ(verifier.numAdded(), 1);\n  EXPECT_EQ(verifier.numWarming(), 1);\n\n  // Send a new route, which will not include route_config_0 since SOTW, so route_config_0 will not\n  // become active.\n  verifier.routeAdded(buildRoute(\"route_config_1\"));\n  EXPECT_FALSE(verifier.hasActiveRoute(\"route_config_1\"));\n  EXPECT_FALSE(verifier.hasActiveRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n  EXPECT_EQ(verifier.numWarming(), 1);\n}\n\nTEST(XdsVerifier, UpdateWarmingListener) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  verifier.listenerUpdated(buildListener(\"listener_0\", \"route_config_1\"));\n  // The new listener should directly replace the old listener since it's warming.\n  EXPECT_EQ(verifier.numModified(), 1);\n  EXPECT_EQ(verifier.numAdded(), 1);\n\n  // Send the route for the old listener, which should have been replaced with the update.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_FALSE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n\n  // Now the new should become active.\n  verifier.routeAdded(buildRoute(\"route_config_1\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n}\n\nTEST(XdsVerifier, UpdateActiveListener) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n\n  // Add an active listener.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n\n  // Send an update, which should keep the old listener active until the new warms.\n  verifier.listenerUpdated(buildListener(\"listener_0\", \"route_config_1\"));\n  EXPECT_EQ(verifier.numModified(), 1);\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n\n  // Warm the new listener, which should remove the old.\n  verifier.routeAdded(buildRoute(\"route_config_1\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_FALSE(verifier.hasListener(\"listener_0\", verifier.DRAINING));\n  EXPECT_FALSE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n\n  EXPECT_EQ(verifier.numActive(), 1);\n}\n\nTEST(XdsVerifier, UpdateActiveToActive) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n\n  // Add two active listeners to different routes.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n\n  // Add an active listener.\n  verifier.listenerAdded(buildListener(\"listener_1\", \"route_config_1\"));\n  verifier.routeAdded(buildRoute(\"route_config_1\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_1\", verifier.ACTIVE));\n  EXPECT_EQ(verifier.numAdded(), 2);\n\n  // Send an update, which should make the new listener active straight away and remove the old\n  // since its route is already active.\n  verifier.listenerUpdated(buildListener(\"listener_0\", \"route_config_1\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_FALSE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n  EXPECT_EQ(verifier.numActive(), 2);\n}\n\nTEST(XdsVerifier, WarmMultipleListenersSOTW) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n\n  // Add two warming listener to the same route.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  verifier.listenerAdded(buildListener(\"listener_1\", \"route_config_0\"));\n\n  // Send the route, make sure both are active.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_TRUE(verifier.hasListener(\"listener_1\", verifier.ACTIVE));\n  EXPECT_EQ(verifier.numActive(), 2);\n}\n\nTEST(XdsVerifier, WarmMultipleListenersDelta) {\n  XdsVerifier verifier(test::server::config_validation::Config::DELTA);\n\n  // Add two warming listener to the same route.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  verifier.listenerAdded(buildListener(\"listener_1\", \"route_config_0\"));\n\n  // Send the route, make sure both are active.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_TRUE(verifier.hasListener(\"listener_1\", verifier.ACTIVE));\n  EXPECT_EQ(verifier.numActive(), 2);\n}\n\nTEST(XdsVerifier, ResendRouteSOTW) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n\n  // Send a route that will be ignored.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n\n  // Add a warming listener that refers to this route.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n\n  // Send the same route again, make sure listener becomes active.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n}\n\nTEST(XdsVerifier, ResendRouteDelta) {\n  XdsVerifier verifier(test::server::config_validation::Config::DELTA);\n\n  // Send a route that will be ignored.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n\n  // Add a warming listener that refers to this route.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n\n  // Send the same route again, make sure listener becomes active.\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n}\n\nTEST(XdsVerifier, RemoveThenAddListener) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n\n  // Add an active listener.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n\n  // Remove it.\n  verifier.listenerRemoved(\"listener_0\");\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.DRAINING));\n\n  // And add it back, it should now be draining and active.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.DRAINING));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n}\n\nTEST(XdsVerifier, UpdateBackToOriginal) {\n  XdsVerifier verifier(test::server::config_validation::Config::SOTW);\n\n  // Add an active listener.\n  verifier.listenerAdded(buildListener(\"listener_0\", \"route_config_0\"));\n  verifier.routeAdded(buildRoute(\"route_config_0\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n\n  // Update it to a new route, should warm.\n  verifier.listenerUpdated(buildListener(\"listener_0\", \"route_config_1\"));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n\n  // Update it back to original, should remove warming listener.\n  verifier.listenerUpdated(buildListener(\"listener_0\", \"route_config_0\"));\n  EXPECT_FALSE(verifier.hasListener(\"listener_0\", verifier.WARMING));\n  EXPECT_TRUE(verifier.hasListener(\"listener_0\", verifier.ACTIVE));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/configuration_impl_test.cc",
    "content": "#include <chrono>\n#include <list>\n#include <string>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/metrics/v3/stats.pb.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/config/well_known_names.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/upstream/cluster_manager_impl.h\"\n\n#include \"server/configuration_impl.h\"\n\n#include \"extensions/stat_sinks/well_known_names.h\"\n\n#include \"test/common/upstream/utility.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"fmt/printf.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"udpa/type/v1/typed_struct.pb.h\"\n\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Server {\nnamespace Configuration {\nnamespace {\n\nTEST(FilterChainUtility, buildFilterChain) {\n  Network::MockConnection connection;\n  std::vector<Network::FilterFactoryCb> factories;\n  ReadyWatcher watcher;\n  Network::FilterFactoryCb factory = [&](Network::FilterManager&) -> void { watcher.ready(); };\n  factories.push_back(factory);\n  factories.push_back(factory);\n\n  EXPECT_CALL(watcher, ready()).Times(2);\n  EXPECT_CALL(connection, initializeReadFilters()).WillOnce(Return(true));\n  EXPECT_EQ(FilterChainUtility::buildFilterChain(connection, factories), true);\n}\n\nTEST(FilterChainUtility, buildFilterChainFailWithBadFilters) {\n  Network::MockConnection connection;\n  std::vector<Network::FilterFactoryCb> factories;\n  EXPECT_CALL(connection, initializeReadFilters()).WillOnce(Return(false));\n  EXPECT_EQ(FilterChainUtility::buildFilterChain(connection, factories), false);\n}\n\nclass ConfigurationImplTest : public testing::Test {\nprotected:\n  ConfigurationImplTest()\n      : api_(Api::createApiForTest()),\n        cluster_manager_factory_(\n            server_.admin(), server_.runtime(), server_.stats(), server_.threadLocal(),\n            server_.dnsResolver(), server_.sslContextManager(), server_.dispatcher(),\n            server_.localInfo(), server_.secretManager(), server_.messageValidationContext(), *api_,\n            server_.httpContext(), server_.grpcContext(), server_.accessLogManager(),\n            server_.singletonManager()) {}\n\n  void addStatsdFakeClusterConfig(envoy::config::metrics::v3::StatsSink& sink) {\n    envoy::config::metrics::v3::StatsdSink statsd_sink;\n    statsd_sink.set_tcp_cluster_name(\"fake_cluster\");\n    sink.mutable_typed_config()->PackFrom(statsd_sink);\n  }\n\n  Api::ApiPtr api_;\n  NiceMock<Server::MockInstance> server_;\n  Upstream::ProdClusterManagerFactory cluster_manager_factory_;\n};\n\nTEST_F(ConfigurationImplTest, DefaultStatsFlushInterval) {\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_EQ(std::chrono::milliseconds(5000), config.statsFlushInterval());\n}\n\nTEST_F(ConfigurationImplTest, CustomStatsFlushInterval) {\n  std::string json = R\"EOF(\n  {\n    \"stats_flush_interval\": \"0.500s\",\n\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_EQ(std::chrono::milliseconds(500), config.statsFlushInterval());\n}\n\nTEST_F(ConfigurationImplTest, SetUpstreamClusterPerConnectionBufferLimit) {\n  const std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\" : [],\n      \"clusters\": [\n        {\n          \"name\": \"test_cluster\",\n          \"type\": \"static\",\n          \"connect_timeout\": \"0.01s\",\n          \"per_connection_buffer_limit_bytes\": 8192,\n          \"lb_policy\": \"round_robin\",\n          \"load_assignment\": {\n    \"endpoints\": [\n      {\n        \"lb_endpoints\": [\n          {\n            \"endpoint\": {\n              \"address\": {\n                \"socket_address\": {\n                  \"address\": \"127.0.0.1\",\n                  \"port_value\": 9999\n                }\n              }\n            }\n          }\n        ]\n      }\n    ]\n  }\n        }\n      ]\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  ASSERT_EQ(1U, config.clusterManager()->clusters().count(\"test_cluster\"));\n  EXPECT_EQ(8192U, config.clusterManager()\n                       ->clusters()\n                       .find(\"test_cluster\")\n                       ->second.get()\n                       .info()\n                       ->perConnectionBufferLimitBytes());\n  server_.thread_local_.shutdownThread();\n}\n\nTEST_F(ConfigurationImplTest, NullTracerSetWhenTracingConfigurationAbsent) {\n  std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\" : [\n        {\n          \"address\": {\n            \"socket_address\": {\n              \"address\": \"127.0.0.1\",\n              \"port_value\": 1234\n            }\n          },\n          \"filter_chains\": []\n        }\n      ],\n      \"clusters\": []\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  server_.local_info_.node_.set_cluster(\"\");\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_THAT(envoy::config::trace::v3::Tracing{},\n              ProtoEq(server_.httpContext().defaultTracingConfig()));\n}\n\nTEST_F(ConfigurationImplTest, NullTracerSetWhenHttpKeyAbsentFromTracerConfiguration) {\n  std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\" : [\n        {\n          \"address\": {\n            \"socket_address\": {\n              \"address\": \"127.0.0.1\",\n              \"port_value\": 1234\n            }\n          },\n          \"filter_chains\": []\n        }\n      ],\n      \"clusters\": []\n    },\n    \"tracing\": {},\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  server_.local_info_.node_.set_cluster(\"\");\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_THAT(envoy::config::trace::v3::Tracing{},\n              ProtoEq(server_.httpContext().defaultTracingConfig()));\n}\n\nTEST_F(ConfigurationImplTest, ConfigurationFailsWhenInvalidTracerSpecified) {\n  std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\" : [\n        {\n          \"address\": {\n            \"socket_address\": {\n              \"address\": \"127.0.0.1\",\n              \"port_value\": 1234\n            }\n          },\n          \"filter_chains\": []\n        }\n      ],\n      \"clusters\": []\n    },\n    \"tracing\": {\n      \"http\": {\n        \"name\": \"invalid\",\n        \"typed_config\": {\n          \"@type\": \"type.googleapis.com/udpa.type.v1.TypedStruct\",\n          \"type_url\": \"type.googleapis.com/envoy.config.trace.v2.BlackHoleConfig\",\n          \"value\": {\n            \"collector_cluster\": \"cluster_0\",\n            \"access_token_file\": \"/etc/envoy/envoy.cfg\"\n          }\n        }\n      }\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n  MainImpl config;\n  EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_),\n                            EnvoyException,\n                            \"Didn't find a registered implementation for name: 'invalid'\");\n}\n\nTEST_F(ConfigurationImplTest, ProtoSpecifiedStatsSink) {\n  std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\": [],\n      \"clusters\": []\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  auto& sink = *bootstrap.mutable_stats_sinks()->Add();\n  sink.set_name(Extensions::StatSinks::StatsSinkNames::get().Statsd);\n  addStatsdFakeClusterConfig(sink);\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_EQ(1, config.statsSinks().size());\n}\n\nTEST_F(ConfigurationImplTest, StatsSinkWithInvalidName) {\n  std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\": [],\n      \"clusters\": []\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  envoy::config::metrics::v3::StatsSink& sink = *bootstrap.mutable_stats_sinks()->Add();\n  sink.set_name(\"envoy.invalid\");\n\n  MainImpl config;\n  EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_),\n                            EnvoyException,\n                            \"Didn't find a registered implementation for name: 'envoy.invalid'\");\n}\n\nTEST_F(ConfigurationImplTest, StatsSinkWithNoName) {\n  std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\": [],\n      \"clusters\": []\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  bootstrap.mutable_stats_sinks()->Add();\n\n  MainImpl config;\n  EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_),\n                            EnvoyException,\n                            \"Provided name for static registration lookup was empty.\");\n}\n\nTEST_F(ConfigurationImplTest, StatsSinkWithNoType) {\n  std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\": [],\n      \"clusters\": []\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  auto& sink = *bootstrap.mutable_stats_sinks()->Add();\n  udpa::type::v1::TypedStruct typed_struct;\n  auto untyped_struct = typed_struct.mutable_value();\n  (*untyped_struct->mutable_fields())[\"foo\"].set_string_value(\"bar\");\n  sink.mutable_typed_config()->PackFrom(typed_struct);\n\n  MainImpl config;\n  EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_),\n                            EnvoyException,\n                            \"Provided name for static registration lookup was empty.\");\n}\n\n// An explicit non-empty LayeredRuntime is available to the server with no\n// changes made.\nTEST(InitialImplTest, LayeredRuntime) {\n  const std::string yaml = R\"EOF(\n  layered_runtime:\n    layers:\n    - name: base\n      static_layer:\n        health_check:\n          min_interval: 5\n    - name: root\n      disk_layer: { symlink_root: /srv/runtime/current, subdirectory: envoy }\n    - name: override\n      disk_layer: { symlink_root: /srv/runtime/current, subdirectory: envoy_override, append_service_cluster: true }\n    - name: admin\n      admin_layer: {}\n  )EOF\";\n  const auto bootstrap = TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(yaml);\n  InitialImpl config(bootstrap);\n  EXPECT_THAT(config.runtime(), ProtoEq(bootstrap.layered_runtime()));\n}\n\n// An empty LayeredRuntime has an admin layer injected.\nTEST(InitialImplTest, EmptyLayeredRuntime) {\n  const std::string bootstrap_yaml = R\"EOF(\n  layered_runtime: {}\n  )EOF\";\n  const auto bootstrap =\n      TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(bootstrap_yaml);\n  InitialImpl config(bootstrap);\n\n  const std::string expected_yaml = R\"EOF(\n  layers:\n  - admin_layer: {}\n  )EOF\";\n  const auto expected_runtime =\n      TestUtility::parseYaml<envoy::config::bootstrap::v3::LayeredRuntime>(expected_yaml);\n  EXPECT_THAT(config.runtime(), ProtoEq(expected_runtime));\n}\n\n// An empty deprecated Runtime has an empty static and admin layer injected.\nTEST(InitialImplTest, EmptyDeprecatedRuntime) {\n  const auto bootstrap = TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(\"{}\");\n  InitialImpl config(bootstrap);\n\n  const std::string expected_yaml = R\"EOF(\n  layers:\n  - name: base\n    static_layer: {}\n  - name: admin\n    admin_layer: {}\n  )EOF\";\n  const auto expected_runtime =\n      TestUtility::parseYaml<envoy::config::bootstrap::v3::LayeredRuntime>(expected_yaml);\n  EXPECT_THAT(config.runtime(), ProtoEq(expected_runtime));\n}\n\n// A deprecated Runtime is transformed to the equivalent LayeredRuntime.\nTEST(InitialImplTest, DeprecatedRuntimeTranslation) {\n  const std::string bootstrap_yaml = R\"EOF(\n  runtime:\n    symlink_root: /srv/runtime/current\n    subdirectory: envoy\n    override_subdirectory: envoy_override\n    base:\n      health_check:\n        min_interval: 5\n  )EOF\";\n  const auto bootstrap =\n      TestUtility::parseYaml<envoy::config::bootstrap::v3::Bootstrap>(bootstrap_yaml);\n  InitialImpl config(bootstrap);\n\n  const std::string expected_yaml = R\"EOF(\n  layers:\n  - name: base\n    static_layer:\n      health_check:\n        min_interval: 5\n  - name: root\n    disk_layer: { symlink_root: /srv/runtime/current, subdirectory: envoy }\n  - name: override\n    disk_layer: { symlink_root: /srv/runtime/current, subdirectory: envoy_override, append_service_cluster: true }\n  - name: admin\n    admin_layer: {}\n  )EOF\";\n  const auto expected_runtime =\n      TestUtility::parseYaml<envoy::config::bootstrap::v3::LayeredRuntime>(expected_yaml);\n  EXPECT_THAT(config.runtime(), ProtoEq(expected_runtime));\n}\n\nTEST_F(ConfigurationImplTest, AdminSocketOptions) {\n  std::string json = R\"EOF(\n  {\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      },\n      \"socket_options\": [\n         {\n           \"level\": 1,\n           \"name\": 2,\n           \"int_value\": 3,\n           \"state\": \"STATE_PREBIND\"\n         },\n         {\n           \"level\": 4,\n           \"name\": 5,\n           \"int_value\": 6,\n           \"state\": \"STATE_BOUND\"\n         },\n      ]\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n  InitialImpl config(bootstrap);\n  Network::MockListenSocket socket_mock;\n\n  ASSERT_EQ(config.admin().socketOptions()->size(), 2);\n  auto detail = config.admin().socketOptions()->at(0)->getOptionDetails(\n      socket_mock, envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  ASSERT_NE(detail, absl::nullopt);\n  EXPECT_EQ(detail->name_, Envoy::Network::SocketOptionName(1, 2, \"1/2\"));\n  detail = config.admin().socketOptions()->at(1)->getOptionDetails(\n      socket_mock, envoy::config::core::v3::SocketOption::STATE_BOUND);\n  ASSERT_NE(detail, absl::nullopt);\n  EXPECT_EQ(detail->name_, Envoy::Network::SocketOptionName(4, 5, \"4/5\"));\n}\n\nTEST_F(ConfigurationImplTest, ExceedLoadBalancerHostWeightsLimit) {\n  const std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\" : [],\n      \"clusters\": [\n        {\n          \"name\": \"test_cluster\",\n          \"type\": \"static\",\n          \"connect_timeout\": \"0.01s\",\n          \"per_connection_buffer_limit_bytes\": 8192,\n          \"lb_policy\": \"RING_HASH\",\n          \"load_assignment\": {\n            \"cluster_name\": \"load_test_cluster\",\n            \"endpoints\": [\n              {\n                \"priority\": 93\n              },\n              {\n                \"locality\": {\n                  \"zone\": \"zone1\"\n                },\n                \"lb_endpoints\": [\n                  {\n                    \"endpoint\": {\n                      \"address\": {\n                        \"pipe\": {\n                          \"path\": \"path/to/pipe\"\n                        }\n                      }\n                    },\n                    \"health_status\": \"TIMEOUT\",\n                    \"load_balancing_weight\": {\n                      \"value\": 4294967295\n                    }\n                  },\n                  {\n                    \"endpoint\": {\n                      \"address\": {\n                        \"pipe\": {\n                          \"path\": \"path/to/pipe2\"\n                        }\n                      }\n                    },\n                    \"health_status\": \"TIMEOUT\",\n                    \"load_balancing_weight\": {\n                      \"value\": 1\n                    }\n                  }\n                ],\n                \"load_balancing_weight\": {\n                  \"value\": 122\n                }\n              }\n            ]\n          }\n        }\n      ]\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  MainImpl config;\n  EXPECT_THROW_WITH_MESSAGE(\n      config.initialize(bootstrap, server_, cluster_manager_factory_), EnvoyException,\n      \"The sum of weights of all upstream hosts in a locality exceeds 4294967295\");\n}\n\nTEST_F(ConfigurationImplTest, ExceedLoadBalancerLocalityWeightsLimit) {\n  const std::string json = R\"EOF(\n  {\n    \"static_resources\": {\n      \"listeners\" : [],\n      \"clusters\": [\n        {\n          \"name\": \"test_cluster\",\n          \"type\": \"static\",\n          \"connect_timeout\": \"0.01s\",\n          \"per_connection_buffer_limit_bytes\": 8192,\n          \"lb_policy\": \"RING_HASH\",\n          \"load_assignment\": {\n            \"cluster_name\": \"load_test_cluster\",\n            \"endpoints\": [\n              {\n                \"priority\": 93\n              },\n              {\n                \"locality\": {\n                  \"zone\": \"zone1\"\n                },\n                \"lb_endpoints\": [\n                  {\n                    \"endpoint\": {\n                      \"address\": {\n                        \"pipe\": {\n                          \"path\": \"path/to/pipe\"\n                        }\n                      }\n                    },\n                    \"health_status\": \"TIMEOUT\",\n                    \"load_balancing_weight\": {\n                      \"value\": 7\n                    }\n                  }\n                ],\n                \"load_balancing_weight\": {\n                  \"value\": 4294967295\n                }\n              },\n              {\n                \"locality\": {\n                  \"region\": \"domains\",\n                  \"sub_zone\": \"sub_zone1\"\n                },\n                \"lb_endpoints\": [\n                  {\n                    \"endpoint\": {\n                      \"address\": {\n                        \"pipe\": {\n                          \"path\": \"path/to/pipe\"\n                        }\n                      }\n                    },\n                    \"health_status\": \"TIMEOUT\",\n                    \"load_balancing_weight\": {\n                      \"value\": 8\n                    }\n                  }\n                ],\n                \"load_balancing_weight\": {\n                  \"value\": 2\n                }\n              }\n            ]\n          },\n          \"lb_subset_config\": {\n            \"fallback_policy\": \"ANY_ENDPOINT\",\n            \"subset_selectors\": {\n              \"keys\": [\n                \"x\"\n              ]\n            },\n            \"locality_weight_aware\": \"true\"\n          },\n          \"common_lb_config\": {\n            \"healthy_panic_threshold\": {\n              \"value\": 0.8\n            },\n            \"locality_weighted_lb_config\": {\n            }\n          }\n        }\n      ]\n    },\n    \"admin\": {\n      \"access_log_path\": \"/dev/null\",\n      \"address\": {\n        \"socket_address\": {\n          \"address\": \"1.2.3.4\",\n          \"port_value\": 5678\n        }\n      }\n    }\n  }\n  )EOF\";\n\n  auto bootstrap = Upstream::parseBootstrapFromV3Json(json);\n\n  MainImpl config;\n  EXPECT_THROW_WITH_MESSAGE(\n      config.initialize(bootstrap, server_, cluster_manager_factory_), EnvoyException,\n      \"The sum of weights of all localities at the same priority exceeds 4294967295\");\n}\n\nTEST_F(ConfigurationImplTest, KillTimeoutWithoutSkew) {\n  const std::string json = R\"EOF(\n  {\n    \"watchdog\": {\n      \"kill_timeout\": \"1.0s\",\n    },\n  })EOF\";\n\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromJson(json, bootstrap);\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_EQ(std::chrono::milliseconds(1000), config.workerWatchdogConfig().killTimeout());\n  EXPECT_EQ(std::chrono::milliseconds(1000), config.mainThreadWatchdogConfig().killTimeout());\n}\n\nTEST_F(ConfigurationImplTest, CanSkewsKillTimeout) {\n  const std::string json = R\"EOF(\n  {\n    \"watchdog\": {\n      \"kill_timeout\": \"1.0s\",\n      \"max_kill_timeout_jitter\": \"0.5s\"\n    },\n  })EOF\";\n\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromJson(json, bootstrap);\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_LT(std::chrono::milliseconds(1000), config.mainThreadWatchdogConfig().killTimeout());\n  EXPECT_LT(std::chrono::milliseconds(1000), config.workerWatchdogConfig().killTimeout());\n  EXPECT_GE(std::chrono::milliseconds(1500), config.mainThreadWatchdogConfig().killTimeout());\n  EXPECT_GE(std::chrono::milliseconds(1500), config.workerWatchdogConfig().killTimeout());\n}\n\nTEST_F(ConfigurationImplTest, DoesNotSkewIfKillTimeoutDisabled) {\n  const std::string json = R\"EOF(\n  {\n    \"watchdog\": {\n      \"max_kill_timeout_jitter\": \"0.5s\"\n    },\n  })EOF\";\n\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromJson(json, bootstrap);\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_EQ(std::chrono::milliseconds(0), config.mainThreadWatchdogConfig().killTimeout());\n  EXPECT_EQ(std::chrono::milliseconds(0), config.workerWatchdogConfig().killTimeout());\n}\n\nTEST_F(ConfigurationImplTest, ShouldErrorIfBothWatchdogsAndWatchdogSet) {\n  const std::string json = R\"EOF( { \"watchdogs\": {}, \"watchdog\": {}})EOF\";\n\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromJson(json, bootstrap);\n\n  MainImpl config;\n\n  EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_),\n                            EnvoyException, \"Only one of watchdog or watchdogs should be set!\");\n}\n\nTEST_F(ConfigurationImplTest, CanSetMultiWatchdogConfigs) {\n  const std::string json = R\"EOF( { \"watchdogs\": {\n    \"main_thread_watchdog\" : {\n      miss_timeout : \"2s\"\n    },\n    \"worker_watchdog\" : {\n      miss_timeout : \"0.5s\"\n    }\n  }})EOF\";\n\n  envoy::config::bootstrap::v3::Bootstrap bootstrap;\n  TestUtility::loadFromJson(json, bootstrap);\n\n  MainImpl config;\n  config.initialize(bootstrap, server_, cluster_manager_factory_);\n\n  EXPECT_EQ(config.mainThreadWatchdogConfig().missTimeout(), std::chrono::milliseconds(2000));\n  EXPECT_EQ(config.workerWatchdogConfig().missTimeout(), std::chrono::milliseconds(500));\n}\n} // namespace\n} // namespace Configuration\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/connection_handler_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/listener/v3/udp_listener_config.pb.h\"\n#include \"envoy/network/exception.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/server/active_udp_listener_config.h\"\n#include \"envoy/stats/scope.h\"\n\n#include \"common/common/utility.h\"\n#include \"common/config/utility.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/connection_balancer_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/network/udp_default_writer_config.h\"\n#include \"common/network/udp_listener_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"server/connection_handler_impl.h\"\n\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::HasSubstr;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nclass ConnectionHandlerTest : public testing::Test, protected Logger::Loggable<Logger::Id::main> {\npublic:\n  ConnectionHandlerTest()\n      : socket_factory_(std::make_shared<Network::MockListenSocketFactory>()),\n        handler_(new ConnectionHandlerImpl(dispatcher_, 0)),\n        filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()),\n        listener_filter_matcher_(std::make_shared<NiceMock<Network::MockListenerFilterMatcher>>()),\n        access_log_(std::make_shared<AccessLog::MockInstance>()) {\n    ON_CALL(*listener_filter_matcher_, matches(_)).WillByDefault(Return(false));\n  }\n\n  class TestListener : public Network::ListenerConfig {\n  public:\n    TestListener(\n        ConnectionHandlerTest& parent, uint64_t tag, bool bind_to_port,\n        bool hand_off_restored_destination_connections, const std::string& name,\n        Network::Socket::Type socket_type, std::chrono::milliseconds listener_filters_timeout,\n        bool continue_on_listener_filters_timeout,\n        Network::ListenSocketFactorySharedPtr socket_factory,\n        std::shared_ptr<AccessLog::MockInstance> access_log,\n        std::shared_ptr<NiceMock<Network::MockFilterChainManager>> filter_chain_manager = nullptr,\n        uint32_t tcp_backlog_size = ENVOY_TCP_BACKLOG_SIZE,\n        Network::ConnectionBalancerSharedPtr connection_balancer = nullptr)\n        : parent_(parent), socket_(std::make_shared<NiceMock<Network::MockListenSocket>>()),\n          socket_factory_(std::move(socket_factory)), tag_(tag), bind_to_port_(bind_to_port),\n          tcp_backlog_size_(tcp_backlog_size),\n          hand_off_restored_destination_connections_(hand_off_restored_destination_connections),\n          name_(name), listener_filters_timeout_(listener_filters_timeout),\n          continue_on_listener_filters_timeout_(continue_on_listener_filters_timeout),\n          connection_balancer_(connection_balancer == nullptr\n                                   ? std::make_shared<Network::NopConnectionBalancerImpl>()\n                                   : connection_balancer),\n          access_logs_({access_log}), inline_filter_chain_manager_(filter_chain_manager),\n          init_manager_(nullptr) {\n      envoy::config::listener::v3::UdpListenerConfig dummy;\n      std::string listener_name(\"raw_udp_listener\");\n      dummy.set_udp_listener_name(listener_name);\n      udp_listener_factory_ =\n          Config::Utility::getAndCheckFactoryByName<ActiveUdpListenerConfigFactory>(listener_name)\n              .createActiveUdpListenerFactory(dummy, /*concurrency=*/1);\n      udp_writer_factory_ = std::make_unique<Network::UdpDefaultWriterFactory>();\n      ON_CALL(*socket_, socketType()).WillByDefault(Return(socket_type));\n    }\n\n    // Network::ListenerConfig\n    Network::FilterChainManager& filterChainManager() override {\n      return inline_filter_chain_manager_ == nullptr ? parent_.manager_\n                                                     : *inline_filter_chain_manager_;\n    }\n    Network::FilterChainFactory& filterChainFactory() override { return parent_.factory_; }\n    Network::ListenSocketFactory& listenSocketFactory() override { return *socket_factory_; }\n    bool bindToPort() override { return bind_to_port_; }\n    bool handOffRestoredDestinationConnections() const override {\n      return hand_off_restored_destination_connections_;\n    }\n    uint32_t perConnectionBufferLimitBytes() const override { return 0; }\n    std::chrono::milliseconds listenerFiltersTimeout() const override {\n      return listener_filters_timeout_;\n    }\n    bool continueOnListenerFiltersTimeout() const override {\n      return continue_on_listener_filters_timeout_;\n    }\n    Stats::Scope& listenerScope() override { return parent_.stats_store_; }\n    uint64_t listenerTag() const override { return tag_; }\n    const std::string& name() const override { return name_; }\n    Network::ActiveUdpListenerFactory* udpListenerFactory() override {\n      return udp_listener_factory_.get();\n    }\n    Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override {\n      return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_));\n    }\n    Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override {\n      return *udp_listener_worker_router_;\n    }\n    envoy::config::core::v3::TrafficDirection direction() const override {\n      return envoy::config::core::v3::UNSPECIFIED;\n    }\n    Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; }\n    const std::vector<AccessLog::InstanceSharedPtr>& accessLogs() const override {\n      return access_logs_;\n    }\n    ResourceLimit& openConnections() override { return open_connections_; }\n    uint32_t tcpBacklogSize() const override { return tcp_backlog_size_; }\n    Init::Manager& initManager() override { return *init_manager_; }\n\n    void setMaxConnections(const uint32_t num_connections) {\n      open_connections_.setMax(num_connections);\n    }\n    void clearMaxConnections() { open_connections_.resetMax(); }\n\n    ConnectionHandlerTest& parent_;\n    std::shared_ptr<NiceMock<Network::MockListenSocket>> socket_;\n    Network::ListenSocketFactorySharedPtr socket_factory_;\n    uint64_t tag_;\n    bool bind_to_port_;\n    const uint32_t tcp_backlog_size_;\n    const bool hand_off_restored_destination_connections_;\n    const std::string name_;\n    const std::chrono::milliseconds listener_filters_timeout_;\n    const bool continue_on_listener_filters_timeout_;\n    std::unique_ptr<Network::ActiveUdpListenerFactory> udp_listener_factory_;\n    std::unique_ptr<Network::UdpPacketWriterFactory> udp_writer_factory_;\n    Network::UdpListenerWorkerRouterPtr udp_listener_worker_router_;\n    Network::ConnectionBalancerSharedPtr connection_balancer_;\n    BasicResourceLimitImpl open_connections_;\n    const std::vector<AccessLog::InstanceSharedPtr> access_logs_;\n    std::shared_ptr<NiceMock<Network::MockFilterChainManager>> inline_filter_chain_manager_;\n    std::unique_ptr<Init::Manager> init_manager_;\n  };\n\n  using TestListenerPtr = std::unique_ptr<TestListener>;\n\n  class MockUpstreamUdpFilter : public Network::UdpListenerReadFilter {\n  public:\n    MockUpstreamUdpFilter(ConnectionHandlerTest& parent, Network::UdpReadFilterCallbacks& callbacks)\n        : UdpListenerReadFilter(callbacks), parent_(parent) {}\n    ~MockUpstreamUdpFilter() override {\n      parent_.deleted_before_listener_ = !parent_.udp_listener_deleted_;\n    }\n\n    MOCK_METHOD(void, onData, (Network::UdpRecvData&), (override));\n    MOCK_METHOD(void, onReceiveError, (Api::IoError::IoErrorCode), (override));\n\n  private:\n    ConnectionHandlerTest& parent_;\n  };\n\n  class MockUpstreamUdpListener : public Network::UdpListener {\n  public:\n    explicit MockUpstreamUdpListener(ConnectionHandlerTest& parent) : parent_(parent) {\n      ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_));\n    }\n    ~MockUpstreamUdpListener() override { parent_.udp_listener_deleted_ = true; }\n\n    MOCK_METHOD(void, enable, (), (override));\n    MOCK_METHOD(void, disable, (), (override));\n    MOCK_METHOD(Event::Dispatcher&, dispatcher, (), (override));\n    MOCK_METHOD(Network::Address::InstanceConstSharedPtr&, localAddress, (), (const, override));\n    MOCK_METHOD(Api::IoCallUint64Result, send, (const Network::UdpSendData&), (override));\n    MOCK_METHOD(Api::IoCallUint64Result, flush, (), (override));\n    MOCK_METHOD(void, activateRead, (), (override));\n\n  private:\n    ConnectionHandlerTest& parent_;\n    Event::MockDispatcher dispatcher_;\n  };\n\n  TestListener* addListener(\n      uint64_t tag, bool bind_to_port, bool hand_off_restored_destination_connections,\n      const std::string& name, Network::Listener* listener,\n      Network::TcpListenerCallbacks** listener_callbacks = nullptr,\n      std::shared_ptr<Network::MockConnectionBalancer> connection_balancer = nullptr,\n      Network::BalancedConnectionHandler** balanced_connection_handler = nullptr,\n      Network::Socket::Type socket_type = Network::Socket::Type::Stream,\n      std::chrono::milliseconds listener_filters_timeout = std::chrono::milliseconds(15000),\n      bool continue_on_listener_filters_timeout = false,\n      std::shared_ptr<NiceMock<Network::MockFilterChainManager>> overridden_filter_chain_manager =\n          nullptr,\n      uint32_t tcp_backlog_size = ENVOY_TCP_BACKLOG_SIZE) {\n    listeners_.emplace_back(std::make_unique<TestListener>(\n        *this, tag, bind_to_port, hand_off_restored_destination_connections, name, socket_type,\n        listener_filters_timeout, continue_on_listener_filters_timeout, socket_factory_,\n        access_log_, overridden_filter_chain_manager, tcp_backlog_size, connection_balancer));\n    EXPECT_CALL(*socket_factory_, socketType()).WillOnce(Return(socket_type));\n\n    if (listener == nullptr) {\n      // Expecting listener config in place update.\n      // If so, dispatcher would not create new network listener.\n      return listeners_.back().get();\n    }\n    EXPECT_CALL(*socket_factory_, getListenSocket()).WillOnce(Return(listeners_.back()->socket_));\n    if (socket_type == Network::Socket::Type::Stream) {\n      EXPECT_CALL(dispatcher_, createListener_(_, _, _, _))\n          .WillOnce(Invoke([listener, listener_callbacks](Network::SocketSharedPtr&&,\n                                                          Network::TcpListenerCallbacks& cb, bool,\n                                                          uint32_t) -> Network::Listener* {\n            if (listener_callbacks != nullptr) {\n              *listener_callbacks = &cb;\n            }\n            return listener;\n          }));\n    } else {\n      EXPECT_CALL(dispatcher_, createUdpListener_(_, _))\n          .WillOnce(Invoke([listener](Network::SocketSharedPtr&&,\n                                      Network::UdpListenerCallbacks&) -> Network::UdpListener* {\n            return dynamic_cast<Network::UdpListener*>(listener);\n          }));\n      listeners_.back()->udp_listener_worker_router_ =\n          std::make_unique<Network::UdpListenerWorkerRouterImpl>(1);\n    }\n\n    if (balanced_connection_handler != nullptr) {\n      EXPECT_CALL(*connection_balancer, registerHandler(_))\n          .WillOnce(SaveArgAddress(balanced_connection_handler));\n    }\n\n    return listeners_.back().get();\n  }\n\n  Stats::TestUtil::TestStore stats_store_;\n  std::shared_ptr<Network::MockListenSocketFactory> socket_factory_;\n  Network::Address::InstanceConstSharedPtr local_address_{\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 10001)};\n  NiceMock<Event::MockDispatcher> dispatcher_{\"test\"};\n  std::list<TestListenerPtr> listeners_;\n  Network::ConnectionHandlerPtr handler_;\n  NiceMock<Network::MockFilterChainManager> manager_;\n  NiceMock<Network::MockFilterChainFactory> factory_;\n  const Network::FilterChainSharedPtr filter_chain_;\n  NiceMock<Api::MockOsSysCalls> os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{&os_sys_calls_};\n  std::shared_ptr<NiceMock<Network::MockListenerFilterMatcher>> listener_filter_matcher_;\n  bool udp_listener_deleted_ = false;\n  bool deleted_before_listener_ = false;\n  std::shared_ptr<AccessLog::MockInstance> access_log_;\n};\n\n// Verify that if a listener is removed while a rebalanced connection is in flight, we correctly\n// destroy the connection.\nTEST_F(ConnectionHandlerTest, RemoveListenerDuringRebalance) {\n  InSequence s;\n\n  // For reasons I did not investigate, the death test below requires this, likely due to forking.\n  // So we just leak the FDs for this test.\n  ON_CALL(os_sys_calls_, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  auto connection_balancer = std::make_shared<Network::MockConnectionBalancer>();\n  Network::BalancedConnectionHandler* current_handler;\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks,\n                  connection_balancer, &current_handler);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  // Fake a balancer posting a connection to us.\n  Event::PostCb post_cb;\n  EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb));\n  Network::MockConnectionSocket* connection = new NiceMock<Network::MockConnectionSocket>();\n  current_handler->incNumConnections();\n#ifndef NDEBUG\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n#endif\n  current_handler->post(Network::ConnectionSocketPtr{connection});\n\n  EXPECT_CALL(*connection_balancer, unregisterHandler(_));\n\n  // This also tests the assert in ConnectionHandlerImpl::ActiveTcpListener::~ActiveTcpListener.\n  // See the long comment at the end of that function.\n#ifndef NDEBUG\n  // On debug builds this should crash.\n  EXPECT_DEATH(handler_->removeListeners(1), \".*num_listener_connections_ == 0.*\");\n  // The original test continues without the previous line being run. To avoid the same assert\n  // firing during teardown, run the posted callback now.\n  post_cb();\n  ASSERT(post_cb != nullptr);\n  EXPECT_CALL(*listener, onDestroy());\n#else\n  // On release builds this should be fine.\n  EXPECT_CALL(*listener, onDestroy());\n  handler_->removeListeners(1);\n  post_cb();\n#endif\n}\n\nTEST_F(ConnectionHandlerTest, ListenerConnectionLimitEnforced) {\n  Network::TcpListenerCallbacks* listener_callbacks1;\n  auto listener1 = new NiceMock<Network::MockListener>();\n  TestListener* test_listener1 =\n      addListener(1, false, false, \"test_listener1\", listener1, &listener_callbacks1);\n  Network::Address::InstanceConstSharedPtr normal_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 10001));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address));\n  // Only allow a single connection on this listener.\n  test_listener1->setMaxConnections(1);\n  handler_->addListener(absl::nullopt, *test_listener1);\n\n  auto listener2 = new NiceMock<Network::MockListener>();\n  Network::TcpListenerCallbacks* listener_callbacks2;\n  TestListener* test_listener2 =\n      addListener(2, false, false, \"test_listener2\", listener2, &listener_callbacks2);\n  Network::Address::InstanceConstSharedPtr alt_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.2\", 20002));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(alt_address));\n  // Do not allow any connections on this listener.\n  test_listener2->setMaxConnections(0);\n  handler_->addListener(absl::nullopt, *test_listener2);\n\n  EXPECT_CALL(manager_, findFilterChain(_)).WillRepeatedly(Return(filter_chain_.get()));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillRepeatedly(Return(true));\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(*test_filter, destroy_());\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus {\n        return Network::FilterStatus::Continue;\n      }));\n\n  // For listener 2, verify its connection limit is independent of listener 1.\n\n  // We expect that listener 2 accepts the connection, so there will be a call to\n  // createServerConnection and active cx should increase, while cx overflow remains the same.\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  listener_callbacks2->onAccept(\n      Network::ConnectionSocketPtr{new NiceMock<Network::MockConnectionSocket>()});\n  EXPECT_EQ(0, handler_->numConnections());\n  EXPECT_EQ(0, TestUtility::findCounter(stats_store_, \"downstream_cx_total\")->value());\n  EXPECT_EQ(0, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"downstream_cx_overflow\")->value());\n\n  // For listener 1, verify connections are limited after one goes active.\n\n  // First connection attempt should result in an active connection being created.\n  auto conn1 = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(conn1));\n  listener_callbacks1->onAccept(\n      Network::ConnectionSocketPtr{new NiceMock<Network::MockConnectionSocket>()});\n  EXPECT_EQ(1, handler_->numConnections());\n  // Note that these stats are not the per-worker stats, but the per-listener stats.\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"downstream_cx_total\")->value());\n  EXPECT_EQ(1, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"downstream_cx_overflow\")->value());\n\n  // Don't expect server connection to be created, should be instantly closed and increment\n  // overflow stat.\n  listener_callbacks1->onAccept(\n      Network::ConnectionSocketPtr{new NiceMock<Network::MockConnectionSocket>()});\n  EXPECT_EQ(1, handler_->numConnections());\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"downstream_cx_total\")->value());\n  EXPECT_EQ(1, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(2, TestUtility::findCounter(stats_store_, \"downstream_cx_overflow\")->value());\n\n  // Check behavior again for good measure.\n  listener_callbacks1->onAccept(\n      Network::ConnectionSocketPtr{new NiceMock<Network::MockConnectionSocket>()});\n  EXPECT_EQ(1, handler_->numConnections());\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"downstream_cx_total\")->value());\n  EXPECT_EQ(1, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(3, TestUtility::findCounter(stats_store_, \"downstream_cx_overflow\")->value());\n\n  EXPECT_CALL(*listener1, onDestroy());\n  EXPECT_CALL(*listener2, onDestroy());\n}\n\nTEST_F(ConnectionHandlerTest, RemoveListener) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockConnectionSocket* connection = new NiceMock<Network::MockConnectionSocket>();\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{connection});\n  EXPECT_EQ(0UL, handler_->numConnections());\n\n  // Test stop/remove of not existent listener.\n  handler_->stopListeners(0);\n  handler_->removeListeners(0);\n\n  EXPECT_CALL(*listener, onDestroy());\n  handler_->stopListeners(1);\n\n  EXPECT_CALL(dispatcher_, clearDeferredDeleteList());\n  handler_->removeListeners(1);\n  EXPECT_EQ(0UL, handler_->numConnections());\n\n  // Test stop/remove of not existent listener.\n  handler_->stopListeners(0);\n  handler_->removeListeners(0);\n}\n\nTEST_F(ConnectionHandlerTest, DisableListener) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, false, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  EXPECT_CALL(*listener, disable());\n  EXPECT_CALL(*listener, onDestroy());\n\n  handler_->disableListeners();\n}\n\nTEST_F(ConnectionHandlerTest, AddDisabledListener) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, false, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*listener, disable());\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  EXPECT_CALL(*listener, onDestroy());\n\n  handler_->disableListeners();\n  handler_->addListener(absl::nullopt, *test_listener);\n}\n\nTEST_F(ConnectionHandlerTest, DestroyCloseConnections) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockConnectionSocket* connection = new NiceMock<Network::MockConnectionSocket>();\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{connection});\n  EXPECT_EQ(0UL, handler_->numConnections());\n\n  EXPECT_CALL(dispatcher_, clearDeferredDeleteList());\n  EXPECT_CALL(*listener, onDestroy());\n  handler_.reset();\n}\n\nTEST_F(ConnectionHandlerTest, CloseDuringFilterChainCreate) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));\n  Network::MockConnection* connection = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(connection));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(*connection, state()).WillOnce(Return(Network::Connection::State::Closed));\n  EXPECT_CALL(*connection, addConnectionCallbacks(_)).Times(0);\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  EXPECT_EQ(0UL, handler_->numConnections());\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\nTEST_F(ConnectionHandlerTest, CloseConnectionOnEmptyFilterChain) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));\n  Network::MockConnection* connection = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(connection));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(false));\n  EXPECT_CALL(*connection, close(Network::ConnectionCloseType::NoFlush));\n  EXPECT_CALL(*connection, addConnectionCallbacks(_)).Times(0);\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  EXPECT_EQ(0UL, handler_->numConnections());\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\nTEST_F(ConnectionHandlerTest, NormalRedirect) {\n  Network::TcpListenerCallbacks* listener_callbacks1;\n  auto listener1 = new NiceMock<Network::MockListener>();\n  TestListener* test_listener1 =\n      addListener(1, true, true, \"test_listener1\", listener1, &listener_callbacks1);\n  Network::Address::InstanceConstSharedPtr normal_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 10001));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address));\n  handler_->addListener(absl::nullopt, *test_listener1);\n\n  Network::TcpListenerCallbacks* listener_callbacks2;\n  auto listener2 = new NiceMock<Network::MockListener>();\n  TestListener* test_listener2 =\n      addListener(1, false, false, \"test_listener2\", listener2, &listener_callbacks2);\n  Network::Address::InstanceConstSharedPtr alt_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.2\", 20002));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(alt_address));\n  handler_->addListener(absl::nullopt, *test_listener2);\n\n  auto* test_filter = new NiceMock<Network::MockListenerFilter>();\n  EXPECT_CALL(*test_filter, destroy_());\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  bool redirected = false;\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        // Insert the Mock filter.\n        if (!redirected) {\n          manager.addAcceptFilter(nullptr, Network::ListenerFilterPtr{test_filter});\n          redirected = true;\n        }\n        return true;\n      }));\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus {\n        cb.socket().restoreLocalAddress(alt_address);\n        return Network::FilterStatus::Continue;\n      }));\n  EXPECT_CALL(*accepted_socket, restoreLocalAddress(alt_address));\n  EXPECT_CALL(*accepted_socket, localAddressRestored()).WillOnce(Return(true));\n  EXPECT_CALL(*accepted_socket, localAddress()).WillRepeatedly(ReturnRef(alt_address));\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));\n  Network::MockConnection* connection = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(connection));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true));\n  listener_callbacks1->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n\n  // Verify per-listener connection stats.\n  EXPECT_EQ(1UL, handler_->numConnections());\n  EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, \"downstream_cx_total\")->value());\n  EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, \"test.downstream_cx_total\")->value());\n  EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, \"test.downstream_cx_active\")->value());\n\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  connection->close(Network::ConnectionCloseType::NoFlush);\n  dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, \"test.downstream_cx_active\")->value());\n\n  EXPECT_CALL(*listener2, onDestroy());\n  EXPECT_CALL(*listener1, onDestroy());\n}\n\nTEST_F(ConnectionHandlerTest, FallbackToWildcardListener) {\n  Network::TcpListenerCallbacks* listener_callbacks1;\n  auto listener1 = new NiceMock<Network::MockListener>();\n  TestListener* test_listener1 =\n      addListener(1, true, true, \"test_listener1\", listener1, &listener_callbacks1);\n  Network::Address::InstanceConstSharedPtr normal_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 10001));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address));\n  handler_->addListener(absl::nullopt, *test_listener1);\n\n  Network::TcpListenerCallbacks* listener_callbacks2;\n  auto listener2 = new NiceMock<Network::MockListener>();\n  TestListener* test_listener2 =\n      addListener(1, false, false, \"test_listener2\", listener2, &listener_callbacks2);\n  Network::Address::InstanceConstSharedPtr any_address = Network::Utility::getIpv4AnyAddress();\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address));\n  handler_->addListener(absl::nullopt, *test_listener2);\n\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(*test_filter, destroy_());\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  bool redirected = false;\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        // Insert the Mock filter.\n        if (!redirected) {\n          manager.addAcceptFilter(listener_filter_matcher_,\n                                  Network::ListenerFilterPtr{test_filter});\n          redirected = true;\n        }\n        return true;\n      }));\n  // Zero port to match the port of AnyAddress\n  Network::Address::InstanceConstSharedPtr alt_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.2\", 0, nullptr));\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus {\n        cb.socket().restoreLocalAddress(alt_address);\n        return Network::FilterStatus::Continue;\n      }));\n  EXPECT_CALL(*accepted_socket, restoreLocalAddress(alt_address));\n  EXPECT_CALL(*accepted_socket, localAddressRestored()).WillOnce(Return(true));\n  EXPECT_CALL(*accepted_socket, localAddress()).WillRepeatedly(ReturnRef(alt_address));\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));\n  Network::MockConnection* connection = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(connection));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true));\n  listener_callbacks1->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  EXPECT_EQ(1UL, handler_->numConnections());\n\n  EXPECT_CALL(*listener2, onDestroy());\n  EXPECT_CALL(*listener1, onDestroy());\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n}\n\nTEST_F(ConnectionHandlerTest, WildcardListenerWithOriginalDst) {\n  Network::TcpListenerCallbacks* listener_callbacks1;\n  auto listener1 = new NiceMock<Network::MockListener>();\n  TestListener* test_listener1 =\n      addListener(1, true, true, \"test_listener1\", listener1, &listener_callbacks1);\n  Network::Address::InstanceConstSharedPtr normal_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 80));\n  // Original dst address nor port number match that of the listener's address.\n  Network::Address::InstanceConstSharedPtr original_dst_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.2\", 8080));\n  Network::Address::InstanceConstSharedPtr any_address = Network::Utility::getAddressWithPort(\n      *Network::Utility::getIpv4AnyAddress(), normal_address->ip()->port());\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address));\n  handler_->addListener(absl::nullopt, *test_listener1);\n\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        // Insert the Mock filter.\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus {\n        cb.socket().restoreLocalAddress(original_dst_address);\n        return Network::FilterStatus::Continue;\n      }));\n  EXPECT_CALL(*test_filter, destroy_());\n  EXPECT_CALL(*accepted_socket, restoreLocalAddress(original_dst_address));\n  EXPECT_CALL(*accepted_socket, localAddressRestored()).WillOnce(Return(true));\n  EXPECT_CALL(*accepted_socket, localAddress()).WillRepeatedly(ReturnRef(original_dst_address));\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));\n  Network::MockConnection* connection = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(connection));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true));\n  listener_callbacks1->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  EXPECT_EQ(1UL, handler_->numConnections());\n\n  EXPECT_CALL(*listener1, onDestroy());\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n}\n\nTEST_F(ConnectionHandlerTest, WildcardListenerWithNoOriginalDst) {\n  Network::TcpListenerCallbacks* listener_callbacks1;\n  auto listener1 = new NiceMock<Network::MockListener>();\n  TestListener* test_listener1 =\n      addListener(1, true, true, \"test_listener1\", listener1, &listener_callbacks1);\n\n  Network::Address::InstanceConstSharedPtr normal_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 80));\n  Network::Address::InstanceConstSharedPtr any_address = Network::Utility::getAddressWithPort(\n      *Network::Utility::getIpv4AnyAddress(), normal_address->ip()->port());\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address));\n  handler_->addListener(absl::nullopt, *test_listener1);\n\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(*test_filter, destroy_());\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        // Insert the Mock filter.\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  EXPECT_CALL(*test_filter, onAccept(_)).WillOnce(Return(Network::FilterStatus::Continue));\n  EXPECT_CALL(*accepted_socket, localAddressRestored()).WillOnce(Return(false));\n  EXPECT_CALL(*accepted_socket, localAddress()).WillRepeatedly(ReturnRef(normal_address));\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));\n  Network::MockConnection* connection = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(connection));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true));\n  listener_callbacks1->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  EXPECT_EQ(1UL, handler_->numConnections());\n\n  EXPECT_CALL(*listener1, onDestroy());\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n}\n\nTEST_F(ConnectionHandlerTest, TransportProtocolDefault) {\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  EXPECT_CALL(*accepted_socket, detectedTransportProtocol())\n      .WillOnce(Return(absl::string_view(\"\")));\n  EXPECT_CALL(*accepted_socket, setDetectedTransportProtocol(absl::string_view(\"raw_buffer\")));\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\nTEST_F(ConnectionHandlerTest, TransportProtocolCustom) {\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(*test_filter, destroy_());\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  absl::string_view dummy = \"dummy\";\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus {\n        cb.socket().setDetectedTransportProtocol(dummy);\n        return Network::FilterStatus::Continue;\n      }));\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  EXPECT_CALL(*accepted_socket, setDetectedTransportProtocol(dummy));\n  EXPECT_CALL(*accepted_socket, detectedTransportProtocol()).WillOnce(Return(dummy));\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\n// Timeout during listener filter stop iteration.\nTEST_F(ConnectionHandlerTest, ListenerFilterTimeout) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus {\n        return Network::FilterStatus::StopIteration;\n      }));\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  Network::IoSocketHandleImpl io_handle{42};\n  EXPECT_CALL(*accepted_socket, ioHandle()).WillRepeatedly(ReturnRef(io_handle));\n  Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _));\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  Stats::Gauge& downstream_pre_cx_active =\n      stats_store_.gauge(\"downstream_pre_cx_active\", Stats::Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(1UL, downstream_pre_cx_active.value());\n\n  EXPECT_CALL(*timeout, disableTimer());\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  timeout->invokeCallback();\n  EXPECT_CALL(*test_filter, destroy_());\n  dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0UL, downstream_pre_cx_active.value());\n  EXPECT_EQ(1UL, stats_store_.counter(\"downstream_pre_cx_timeout\").value());\n\n  // Make sure we didn't continue to try create connection.\n  EXPECT_EQ(0UL, stats_store_.counter(\"no_filter_chain_match\").value());\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\n// Continue on timeout during listener filter stop iteration.\nTEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks, nullptr, nullptr,\n                  Network::Socket::Type::Stream, std::chrono::milliseconds(15000), true);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockListenerFilter* test_filter = new NiceMock<Network::MockListenerFilter>();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus {\n        return Network::FilterStatus::StopIteration;\n      }));\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  Network::IoSocketHandleImpl io_handle{42};\n  EXPECT_CALL(*accepted_socket, ioHandle()).WillRepeatedly(ReturnRef(io_handle));\n  Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _));\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  Stats::Gauge& downstream_pre_cx_active =\n      stats_store_.gauge(\"downstream_pre_cx_active\", Stats::Gauge::ImportMode::Accumulate);\n  EXPECT_EQ(1UL, downstream_pre_cx_active.value());\n  EXPECT_CALL(*test_filter, destroy_());\n  // Barrier: test_filter must be destructed before findFilterChain\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  EXPECT_CALL(*timeout, disableTimer());\n  timeout->invokeCallback();\n  dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0UL, downstream_pre_cx_active.value());\n  EXPECT_EQ(1UL, stats_store_.counter(\"downstream_pre_cx_timeout\").value());\n\n  // Make sure we continued to try create connection.\n  EXPECT_EQ(1UL, stats_store_.counter(\"no_filter_chain_match\").value());\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\n// Timeout is disabled once the listener filters complete.\nTEST_F(ConnectionHandlerTest, ListenerFilterTimeoutResetOnSuccess) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  Network::ListenerFilterCallbacks* listener_filter_cb{};\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus {\n        listener_filter_cb = &cb;\n        return Network::FilterStatus::StopIteration;\n      }));\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  Network::IoSocketHandleImpl io_handle{42};\n  EXPECT_CALL(*accepted_socket, ioHandle()).WillRepeatedly(ReturnRef(io_handle));\n\n  Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_);\n  EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _));\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n  EXPECT_CALL(*test_filter, destroy_());\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  EXPECT_CALL(*timeout, disableTimer());\n  listener_filter_cb->continueFilterChain(true);\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\n// Ensure there is no timeout when the timeout is disabled with 0s.\nTEST_F(ConnectionHandlerTest, ListenerFilterDisabledTimeout) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks, nullptr, nullptr,\n                  Network::Socket::Type::Stream, std::chrono::milliseconds());\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter});\n        return true;\n      }));\n  EXPECT_CALL(*test_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus {\n        return Network::FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  EXPECT_CALL(dispatcher_, createTimer_(_)).Times(0);\n  EXPECT_CALL(*test_filter, destroy_());\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\n// Listener Filter could close socket in the context of listener callback.\nTEST_F(ConnectionHandlerTest, ListenerFilterReportError) {\n  InSequence s;\n\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockListenerFilter* first_filter = new Network::MockListenerFilter();\n  Network::MockListenerFilter* last_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{first_filter});\n        manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{last_filter});\n        return true;\n      }));\n  // The first filter close the socket\n  EXPECT_CALL(*first_filter, onAccept(_))\n      .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus {\n        cb.socket().close();\n        return Network::FilterStatus::StopIteration;\n      }));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  // The last filter won't be invoked\n  EXPECT_CALL(*last_filter, onAccept(_)).Times(0);\n  EXPECT_CALL(*first_filter, destroy_());\n  EXPECT_CALL(*last_filter, destroy_());\n  Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});\n\n  dispatcher_.clearDeferredDeleteList();\n  // Make sure the error leads to no listener timer created.\n  EXPECT_CALL(dispatcher_, createTimer_(_)).Times(0);\n  // Make sure we never try to match the filer chain since listener filter doesn't complete.\n  EXPECT_CALL(manager_, findFilterChain(_)).Times(0);\n\n  EXPECT_CALL(*listener, onDestroy());\n}\n\n// Ensure an exception is thrown if there are no filters registered for a UDP listener\nTEST_F(ConnectionHandlerTest, UdpListenerNoFilterThrowsException) {\n  InSequence s;\n\n  auto listener = new NiceMock<Network::MockUdpListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, nullptr, nullptr, nullptr,\n                  Network::Socket::Type::Datagram, std::chrono::milliseconds());\n  EXPECT_CALL(factory_, createUdpListenerFilterChain(_, _))\n      .WillOnce(Invoke([&](Network::UdpListenerFilterManager&,\n                           Network::UdpReadFilterCallbacks&) -> bool { return true; }));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  EXPECT_CALL(*listener, onDestroy());\n\n  try {\n    handler_->addListener(absl::nullopt, *test_listener);\n    FAIL();\n  } catch (const Network::CreateListenerException& e) {\n    EXPECT_THAT(\n        e.what(),\n        HasSubstr(\"Cannot create listener as no read filter registered for the udp listener\"));\n  }\n}\n\nTEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) {\n  InSequence s;\n  uint64_t old_listener_tag = 1;\n  uint64_t new_listener_tag = 2;\n  Network::TcpListenerCallbacks* old_listener_callbacks;\n  Network::BalancedConnectionHandler* current_handler;\n\n  auto old_listener = new NiceMock<Network::MockListener>();\n  auto mock_connection_balancer = std::make_shared<Network::MockConnectionBalancer>();\n\n  TestListener* old_test_listener =\n      addListener(old_listener_tag, true, false, \"test_listener\", old_listener,\n                  &old_listener_callbacks, mock_connection_balancer, &current_handler);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *old_test_listener);\n  ASSERT_NE(old_test_listener, nullptr);\n\n  Network::TcpListenerCallbacks* new_listener_callbacks = nullptr;\n\n  auto overridden_filter_chain_manager =\n      std::make_shared<NiceMock<Network::MockFilterChainManager>>();\n  TestListener* new_test_listener = addListener(\n      new_listener_tag, true, false, \"test_listener\", /* Network::Listener */ nullptr,\n      &new_listener_callbacks, mock_connection_balancer, nullptr, Network::Socket::Type::Stream,\n      std::chrono::milliseconds(15000), false, overridden_filter_chain_manager);\n  handler_->addListener(old_listener_tag, *new_test_listener);\n  ASSERT_EQ(new_listener_callbacks, nullptr)\n      << \"new listener should be inplace added and callback should not change\";\n\n  Network::MockConnectionSocket* connection = new NiceMock<Network::MockConnectionSocket>();\n  current_handler->incNumConnections();\n\n  EXPECT_CALL(*mock_connection_balancer, pickTargetHandler(_))\n      .WillOnce(ReturnRef(*current_handler));\n  EXPECT_CALL(manager_, findFilterChain(_)).Times(0);\n  EXPECT_CALL(*overridden_filter_chain_manager, findFilterChain(_)).WillOnce(Return(nullptr));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  EXPECT_CALL(*mock_connection_balancer, unregisterHandler(_));\n  old_listener_callbacks->onAccept(Network::ConnectionSocketPtr{connection});\n  EXPECT_EQ(0UL, handler_->numConnections());\n  EXPECT_CALL(*old_listener, onDestroy());\n}\n\nTEST_F(ConnectionHandlerTest, TcpListenerRemoveFilterChain) {\n  InSequence s;\n  uint64_t listener_tag = 1;\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(listener_tag, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  Network::MockConnectionSocket* connection = new NiceMock<Network::MockConnectionSocket>();\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));\n  Network::MockConnection* server_connection = new NiceMock<Network::MockConnection>();\n  EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(server_connection));\n  EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n\n  listener_callbacks->onAccept(Network::ConnectionSocketPtr{connection});\n\n  EXPECT_EQ(1UL, handler_->numConnections());\n  EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, \"downstream_cx_total\")->value());\n  EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, \"test.downstream_cx_total\")->value());\n  EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, \"test.downstream_cx_active\")->value());\n\n  const std::list<const Network::FilterChain*> filter_chains{filter_chain_.get()};\n\n  // The completion callback is scheduled\n  handler_->removeFilterChains(listener_tag, filter_chains,\n                               []() { ENVOY_LOG(debug, \"removed filter chains\"); });\n  // Trigger the deletion if any.\n  dispatcher_.clearDeferredDeleteList();\n  EXPECT_EQ(0UL, handler_->numConnections());\n  EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, \"downstream_cx_total\")->value());\n  EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, \"downstream_cx_active\")->value());\n  EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, \"test.downstream_cx_total\")->value());\n  EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, \"test.downstream_cx_active\")->value());\n\n  EXPECT_CALL(dispatcher_, clearDeferredDeleteList());\n  EXPECT_CALL(*listener, onDestroy());\n  handler_.reset();\n}\n\n// Listener Filter matchers works.\nTEST_F(ConnectionHandlerTest, ListenerFilterWorks) {\n  Network::TcpListenerCallbacks* listener_callbacks;\n  auto listener = new NiceMock<Network::MockListener>();\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, &listener_callbacks);\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  handler_->addListener(absl::nullopt, *test_listener);\n\n  auto all_matcher = std::make_shared<Network::MockListenerFilterMatcher>();\n  auto* disabled_listener_filter = new Network::MockListenerFilter();\n  auto* enabled_filter = new Network::MockListenerFilter();\n  EXPECT_CALL(factory_, createListenerFilterChain(_))\n      .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {\n        manager.addAcceptFilter(all_matcher, Network::ListenerFilterPtr{disabled_listener_filter});\n        manager.addAcceptFilter(listener_filter_matcher_,\n                                Network::ListenerFilterPtr{enabled_filter});\n        return true;\n      }));\n\n  // The all matcher matches any incoming traffic and disables the listener filter.\n  EXPECT_CALL(*all_matcher, matches(_)).WillOnce(Return(true));\n  EXPECT_CALL(*disabled_listener_filter, onAccept(_)).Times(0);\n\n  // The non matcher acts as if always enabled.\n  EXPECT_CALL(*enabled_filter, onAccept(_)).WillOnce(Return(Network::FilterStatus::Continue));\n  EXPECT_CALL(*disabled_listener_filter, destroy_());\n  EXPECT_CALL(*enabled_filter, destroy_());\n  EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr));\n  EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1);\n  listener_callbacks->onAccept(std::make_unique<NiceMock<Network::MockConnectionSocket>>());\n  EXPECT_CALL(*listener, onDestroy());\n}\n\n// The read_filter should be deleted before the udp_listener is deleted.\nTEST_F(ConnectionHandlerTest, ShutdownUdpListener) {\n  InSequence s;\n\n  Network::MockUdpReadFilterCallbacks dummy_callbacks;\n  auto listener = new NiceMock<MockUpstreamUdpListener>(*this);\n  TestListener* test_listener =\n      addListener(1, true, false, \"test_listener\", listener, nullptr, nullptr, nullptr,\n                  Network::Socket::Type::Datagram, std::chrono::milliseconds(), false, nullptr);\n  auto filter = std::make_unique<NiceMock<MockUpstreamUdpFilter>>(*this, dummy_callbacks);\n\n  EXPECT_CALL(factory_, createUdpListenerFilterChain(_, _))\n      .WillOnce(Invoke([&](Network::UdpListenerFilterManager& udp_listener,\n                           Network::UdpReadFilterCallbacks&) -> bool {\n        udp_listener.addReadFilter(std::move(filter));\n        return true;\n      }));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_));\n  EXPECT_CALL(dummy_callbacks.udp_listener_, onDestroy());\n\n  handler_->addListener(absl::nullopt, *test_listener);\n  handler_->stopListeners();\n\n  ASSERT_TRUE(deleted_before_listener_)\n      << \"The read_filter_ should be deleted before the udp_listener_ is deleted.\";\n}\n\nTEST_F(ConnectionHandlerTest, TcpBacklogCustom) {\n  uint32_t custom_backlog = 100;\n  TestListener* test_listener = addListener(\n      1, true, false, \"test_tcp_backlog\", nullptr, nullptr, nullptr, nullptr,\n      Network::Socket::Type::Stream, std::chrono::milliseconds(), false, nullptr, custom_backlog);\n  EXPECT_CALL(*socket_factory_, getListenSocket()).WillOnce(Return(listeners_.back()->socket_));\n  EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_));\n  EXPECT_CALL(dispatcher_, createListener_(_, _, _, _))\n      .WillOnce(Invoke([custom_backlog](Network::SocketSharedPtr&&, Network::TcpListenerCallbacks&,\n                                        bool, uint32_t backlog) -> Network::Listener* {\n        EXPECT_EQ(custom_backlog, backlog);\n        return nullptr;\n      }));\n  handler_->addListener(absl::nullopt, *test_listener);\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/drain_manager_impl_test.cc",
    "content": "#include <chrono>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n\n#include \"server/drain_manager_impl.h\"\n\n#include \"test/mocks/server/instance.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Return;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nconstexpr int DrainTimeSeconds(600);\n\nclass DrainManagerImplTest : public Event::TestUsingSimulatedTime,\n                             public testing::TestWithParam<bool> {\nprotected:\n  DrainManagerImplTest() {\n    ON_CALL(server_.options_, drainTime())\n        .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds)));\n    ON_CALL(server_.options_, parentShutdownTime())\n        .WillByDefault(Return(std::chrono::seconds(900)));\n  }\n\n  NiceMock<MockInstance> server_;\n};\n\nTEST_F(DrainManagerImplTest, Default) {\n  InSequence s;\n  DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT);\n\n  // Test parent shutdown.\n  Event::MockTimer* shutdown_timer = new Event::MockTimer(&server_.dispatcher_);\n  EXPECT_CALL(*shutdown_timer, enableTimer(std::chrono::milliseconds(900000), _));\n  drain_manager.startParentShutdownSequence();\n\n  EXPECT_CALL(server_.hot_restart_, sendParentTerminateRequest());\n  shutdown_timer->invokeCallback();\n\n  // Verify basic drain close.\n  EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(false));\n  EXPECT_FALSE(drain_manager.drainClose());\n  EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(true));\n  EXPECT_TRUE(drain_manager.drainClose());\n\n  // Test drain sequence.\n  Event::MockTimer* drain_timer = new Event::MockTimer(&server_.dispatcher_);\n  const auto expected_delay = std::chrono::milliseconds(DrainTimeSeconds * 1000);\n  EXPECT_CALL(*drain_timer, enableTimer(expected_delay, nullptr));\n  ReadyWatcher drain_complete;\n  drain_manager.startDrainSequence([&drain_complete]() -> void { drain_complete.ready(); });\n  EXPECT_CALL(drain_complete, ready());\n  drain_timer->invokeCallback();\n}\n\nTEST_F(DrainManagerImplTest, ModifyOnly) {\n  InSequence s;\n  DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::MODIFY_ONLY);\n\n  EXPECT_CALL(server_, healthCheckFailed()).Times(0); // Listener check will short-circuit\n  EXPECT_FALSE(drain_manager.drainClose());\n}\n\nTEST_P(DrainManagerImplTest, DrainDeadline) {\n  const bool drain_gradually = GetParam();\n  ON_CALL(server_.options_, drainStrategy())\n      .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual\n                                            : Server::DrainStrategy::Immediate));\n  // TODO(auni53): Add integration tests for this once TestDrainManager is\n  // removed.\n  DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT);\n\n  // Ensure drainClose() behaviour is determined by the deadline.\n  drain_manager.startDrainSequence([] {});\n  EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false));\n  ON_CALL(server_.api_.random_, random()).WillByDefault(Return(DrainTimeSeconds * 2 - 1));\n  ON_CALL(server_.options_, drainTime())\n      .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds)));\n\n  if (drain_gradually) {\n    // random() should be called when elapsed time < drain timeout\n    EXPECT_CALL(server_.api_.random_, random()).Times(2);\n    EXPECT_FALSE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1));\n    EXPECT_FALSE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(1));\n    EXPECT_TRUE(drain_manager.drainClose());\n\n    // Test that this still works if remaining time is negative\n    simTime().advanceTimeWait(std::chrono::seconds(1));\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(500));\n    EXPECT_TRUE(drain_manager.drainClose());\n  } else {\n    EXPECT_CALL(server_.api_.random_, random()).Times(0);\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1));\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(1));\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(1));\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(500));\n    EXPECT_TRUE(drain_manager.drainClose());\n  }\n}\n\nTEST_P(DrainManagerImplTest, DrainDeadlineProbability) {\n  const bool drain_gradually = GetParam();\n  ON_CALL(server_.options_, drainStrategy())\n      .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual\n                                            : Server::DrainStrategy::Immediate));\n  ON_CALL(server_.api_.random_, random()).WillByDefault(Return(4));\n  ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(3)));\n\n  DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT);\n\n  EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(true));\n  EXPECT_TRUE(drain_manager.drainClose());\n  EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false));\n  EXPECT_FALSE(drain_manager.drainClose());\n  EXPECT_FALSE(drain_manager.draining());\n\n  drain_manager.startDrainSequence([] {});\n  EXPECT_TRUE(drain_manager.draining());\n\n  if (drain_gradually) {\n    // random() should be called when elapsed time < drain timeout\n    EXPECT_CALL(server_.api_.random_, random()).Times(2);\n    // Current elapsed time is 0\n    // drainClose() will return true when elapsed time > (4 % 3 == 1).\n    EXPECT_FALSE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(2));\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(1));\n    EXPECT_TRUE(drain_manager.drainClose());\n  } else {\n    EXPECT_CALL(server_.api_.random_, random()).Times(0);\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(2));\n    EXPECT_TRUE(drain_manager.drainClose());\n    simTime().advanceTimeWait(std::chrono::seconds(1));\n    EXPECT_TRUE(drain_manager.drainClose());\n  }\n}\n\nINSTANTIATE_TEST_SUITE_P(DrainStrategies, DrainManagerImplTest, testing::Bool());\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/filter_chain_benchmark_test.cc",
    "content": "#include <iostream>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/network/connection.h\"\n#include \"envoy/network/listen_socket.h\"\n#include \"envoy/protobuf/message_validator.h\"\n\n#include \"server/filter_chain_manager_impl.h\"\n\n#include \"extensions/transport_sockets/well_known_names.h\"\n\n#include \"test/benchmark/main.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"benchmark/benchmark.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\n\nnamespace {\nclass MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder {\n  Network::DrainableFilterChainSharedPtr\n  buildFilterChain(const envoy::config::listener::v3::FilterChain&,\n                   FilterChainFactoryContextCreator&) const override {\n    // A place holder to be found\n    return std::make_shared<Network::MockFilterChain>();\n  }\n};\n\nclass MockConnectionSocket : public Network::ConnectionSocket {\npublic:\n  MockConnectionSocket() = default;\n  static std::unique_ptr<MockConnectionSocket>\n  createMockConnectionSocket(uint16_t destination_port, const std::string& destination_address,\n                             const std::string& server_name, const std::string& transport_protocol,\n                             const std::vector<std::string>& application_protocols,\n                             const std::string& source_address, uint16_t source_port) {\n    auto res = std::make_unique<MockConnectionSocket>();\n\n    if (absl::StartsWith(destination_address, \"/\")) {\n      res->local_address_ = std::make_shared<Network::Address::PipeInstance>(destination_address);\n    } else {\n      res->local_address_ =\n          Network::Utility::parseInternetAddress(destination_address, destination_port);\n    }\n    if (absl::StartsWith(source_address, \"/\")) {\n      res->remote_address_ = std::make_shared<Network::Address::PipeInstance>(source_address);\n    } else {\n      res->remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port);\n    }\n    res->server_name_ = server_name;\n    res->transport_protocol_ = transport_protocol;\n    res->application_protocols_ = application_protocols;\n    return res;\n  }\n\n  const Network::Address::InstanceConstSharedPtr& remoteAddress() const override {\n    return remote_address_;\n  }\n\n  const Network::Address::InstanceConstSharedPtr& directRemoteAddress() const override {\n    return remote_address_;\n  }\n\n  const Network::Address::InstanceConstSharedPtr& localAddress() const override {\n    return local_address_;\n  }\n\n  absl::string_view detectedTransportProtocol() const override { return transport_protocol_; }\n\n  absl::string_view requestedServerName() const override { return server_name_; }\n  const std::vector<std::string>& requestedApplicationProtocols() const override {\n    return application_protocols_;\n  }\n\n  // Wont call\n  Network::IoHandle& ioHandle() override { return *io_handle_; }\n  const Network::IoHandle& ioHandle() const override { return *io_handle_; }\n\n  // Dummy method\n  void close() override {}\n  bool isOpen() const override { return false; }\n  Network::Socket::Type socketType() const override { return Network::Socket::Type::Stream; }\n  Network::Address::Type addressType() const override { return local_address_->type(); }\n  absl::optional<Network::Address::IpVersion> ipVersion() const override {\n    return Network::Address::IpVersion::v4;\n  }\n  void setLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {}\n  void restoreLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {}\n  void setRemoteAddress(const Network::Address::InstanceConstSharedPtr&) override {}\n  bool localAddressRestored() const override { return true; }\n  void setDetectedTransportProtocol(absl::string_view) override {}\n  void setRequestedApplicationProtocols(const std::vector<absl::string_view>&) override {}\n  void addOption(const OptionConstSharedPtr&) override {}\n  void addOptions(const OptionsSharedPtr&) override {}\n  const OptionsSharedPtr& options() const override { return options_; }\n  void setRequestedServerName(absl::string_view) override {}\n  Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr) override { return {0, 0}; }\n  Api::SysCallIntResult listen(int) override { return {0, 0}; }\n  Api::SysCallIntResult connect(const Network::Address::InstanceConstSharedPtr) override {\n    return {0, 0};\n  }\n  Api::SysCallIntResult setSocketOption(int, int, const void*, socklen_t) override {\n    return {0, 0};\n  }\n  Api::SysCallIntResult getSocketOption(int, int, void*, socklen_t*) const override {\n    return {0, 0};\n  }\n  Api::SysCallIntResult setBlockingForTest(bool) override { return {0, 0}; }\n  absl::optional<std::chrono::milliseconds> lastRoundTripTime() override { return {}; }\n\nprivate:\n  Network::IoHandlePtr io_handle_;\n  OptionsSharedPtr options_;\n  Network::Address::InstanceConstSharedPtr local_address_;\n  Network::Address::InstanceConstSharedPtr remote_address_;\n  std::string server_name_;\n  std::string transport_protocol_;\n  std::vector<std::string> application_protocols_;\n};\nconst char YamlHeader[] = R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      config: {}\n    filter_chains:\n    - filter_chain_match:\n        # empty\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\")EOF\";\nconst char YamlSingleServer[] = R\"EOF(\n    - filter_chain_match:\n        server_names: \"server1.example.com\"\n        transport_protocol: \"tls\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\")EOF\";\nconst char YamlSingleDstPortTop[] = R\"EOF(\n    - filter_chain_match:\n        destination_port: )EOF\";\nconst char YamlSingleDstPortBottom[] = R\"EOF(\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\")EOF\";\n} // namespace\n\nclass FilterChainBenchmarkFixture : public ::benchmark::Fixture {\npublic:\n  void initialize(::benchmark::State& state) {\n    int64_t input_size = state.range(0);\n    std::vector<std::string> port_chains;\n    port_chains.reserve(input_size);\n    for (int i = 0; i < input_size; i++) {\n      port_chains.push_back(absl::StrCat(YamlSingleDstPortTop, 10000 + i, YamlSingleDstPortBottom));\n    }\n    listener_yaml_config_ = TestEnvironment::substitute(\n        absl::StrCat(YamlHeader, YamlSingleServer, absl::StrJoin(port_chains, \"\")),\n        Network::Address::IpVersion::v4);\n    TestUtility::loadFromYaml(listener_yaml_config_, listener_config_);\n    filter_chains_ = listener_config_.filter_chains();\n  }\n\n  Envoy::Thread::MutexBasicLockable lock_;\n  Logger::Context logging_state_{spdlog::level::warn, Logger::Logger::DEFAULT_LOG_FORMAT, lock_,\n                                 false};\n  std::string listener_yaml_config_;\n  envoy::config::listener::v3::Listener listener_config_;\n  absl::Span<const envoy::config::listener::v3::FilterChain* const> filter_chains_;\n  MockFilterChainFactoryBuilder dummy_builder_;\n  Init::ManagerImpl init_manager_{\"fcm_benchmark\"};\n};\n\n// NOLINTNEXTLINE(readability-redundant-member-init)\nBENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest)\n(::benchmark::State& state) {\n  if (benchmark::skipExpensiveBenchmarks() && state.range(0) > 64) {\n    state.SkipWithError(\"Skipping expensive benchmark\");\n    return;\n  }\n\n  initialize(state);\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  for (auto _ : state) {\n    FilterChainManagerImpl filter_chain_manager{\n        std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234), factory_context,\n        init_manager_};\n    filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager);\n  }\n}\n\nBENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainFindTest)\n(::benchmark::State& state) {\n  if (benchmark::skipExpensiveBenchmarks() && state.range(0) > 64) {\n    state.SkipWithError(\"Skipping expensive benchmark\");\n    return;\n  }\n\n  initialize(state);\n  std::vector<MockConnectionSocket> sockets;\n  sockets.reserve(state.range(0));\n  for (int i = 0; i < state.range(0); i++) {\n    sockets.push_back(std::move(*MockConnectionSocket::createMockConnectionSocket(\n        10000 + i, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111)));\n  }\n  NiceMock<Server::Configuration::MockFactoryContext> factory_context;\n  FilterChainManagerImpl filter_chain_manager{\n      std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234), factory_context,\n      init_manager_};\n\n  filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager);\n  for (auto _ : state) {\n    for (int i = 0; i < state.range(0); i++) {\n      filter_chain_manager.findFilterChain(sockets[i]);\n    }\n  }\n}\nBENCHMARK_REGISTER_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest)\n    ->Ranges({\n        // scale of the chains\n        {1, 4096},\n    })\n    ->Unit(::benchmark::kMillisecond);\nBENCHMARK_REGISTER_F(FilterChainBenchmarkFixture, FilterChainFindTest)\n    ->Ranges({\n        // scale of the chains\n        {1, 4096},\n    })\n    ->Unit(::benchmark::kMillisecond);\n\n/*\nclang-format off\n\nRun on (32 X 2200 MHz CPU s)\nCPU Caches:\n  L1 Data 32K (x16)\n  L1 Instruction 32K (x16)\n  L2 Unified 256K (x16)\n  L3 Unified 56320K (x1)\nLoad Average: 19.05, 9.89, 3.92\n-------------------------------------------------------------------------------------------------------\nBenchmark                                                             Time             CPU   Iterations\n-------------------------------------------------------------------------------------------------------\nFilterChainBenchmarkFixture/FilterChainManagerBuildTest/1        136994 ns       134510 ns         5183\nFilterChainBenchmarkFixture/FilterChainManagerBuildTest/8        583649 ns       574596 ns         1207\nFilterChainBenchmarkFixture/FilterChainManagerBuildTest/64      4483799 ns      4419618 ns          157\nFilterChainBenchmarkFixture/FilterChainManagerBuildTest/512    38864048 ns     38340468 ns           19\nFilterChainBenchmarkFixture/FilterChainManagerBuildTest/4096  318686843 ns    318568578 ns            2\nFilterChainBenchmarkFixture/FilterChainFindTest/1                   201 ns          201 ns      3494470\nFilterChainBenchmarkFixture/FilterChainFindTest/8                  1592 ns         1592 ns       435045\nFilterChainBenchmarkFixture/FilterChainFindTest/64                16057 ns        16053 ns        44275\nFilterChainBenchmarkFixture/FilterChainFindTest/512              172423 ns       172269 ns         4253\nFilterChainBenchmarkFixture/FilterChainFindTest/4096            2676478 ns      2676167 ns          254\n\nclang-format on\n*/\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/filter_chain_manager_impl_test.cc",
    "content": "#include <chrono>\n#include <functional>\n#include <memory>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n#include \"envoy/registry/registry.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/config/metadata.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"server/configuration_impl.h\"\n#include \"server/filter_chain_manager_impl.h\"\n#include \"server/listener_manager_impl.h\"\n\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/drain_manager.h\"\n#include \"test/mocks/server/factory_context.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/match.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\n\nclass MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder {\npublic:\n  MockFilterChainFactoryBuilder() {\n    ON_CALL(*this, buildFilterChain(_, _))\n        .WillByDefault(Return(std::make_shared<Network::MockFilterChain>()));\n  }\n\n  MOCK_METHOD(Network::DrainableFilterChainSharedPtr, buildFilterChain,\n              (const envoy::config::listener::v3::FilterChain&, FilterChainFactoryContextCreator&),\n              (const));\n};\n\nclass FilterChainManagerImplTest : public testing::Test {\npublic:\n  void SetUp() override {\n    local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234);\n    remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234);\n    TestUtility::loadFromYaml(\n        TestEnvironment::substitute(filter_chain_yaml, Network::Address::IpVersion::v4),\n        filter_chain_template_);\n  }\n\n  const Network::FilterChain*\n  findFilterChainHelper(uint16_t destination_port, const std::string& destination_address,\n                        const std::string& server_name, const std::string& transport_protocol,\n                        const std::vector<std::string>& application_protocols,\n                        const std::string& source_address, uint16_t source_port) {\n    auto mock_socket = std::make_shared<NiceMock<Network::MockConnectionSocket>>();\n    sockets_.push_back(mock_socket);\n\n    if (absl::StartsWith(destination_address, \"/\")) {\n      local_address_ = std::make_shared<Network::Address::PipeInstance>(destination_address);\n    } else {\n      local_address_ =\n          Network::Utility::parseInternetAddress(destination_address, destination_port);\n    }\n    ON_CALL(*mock_socket, localAddress()).WillByDefault(ReturnRef(local_address_));\n\n    ON_CALL(*mock_socket, requestedServerName())\n        .WillByDefault(Return(absl::string_view(server_name)));\n    ON_CALL(*mock_socket, detectedTransportProtocol())\n        .WillByDefault(Return(absl::string_view(transport_protocol)));\n    ON_CALL(*mock_socket, requestedApplicationProtocols())\n        .WillByDefault(ReturnRef(application_protocols));\n\n    if (absl::StartsWith(source_address, \"/\")) {\n      remote_address_ = std::make_shared<Network::Address::PipeInstance>(source_address);\n    } else {\n      remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port);\n    }\n    ON_CALL(*mock_socket, remoteAddress()).WillByDefault(ReturnRef(remote_address_));\n    return filter_chain_manager_.findFilterChain(*mock_socket);\n  }\n\n  void addSingleFilterChainHelper(const envoy::config::listener::v3::FilterChain& filter_chain) {\n    filter_chain_manager_.addFilterChain(\n        std::vector<const envoy::config::listener::v3::FilterChain*>{&filter_chain},\n        filter_chain_factory_builder_, filter_chain_manager_);\n  }\n\n  // Intermediate states.\n  Network::Address::InstanceConstSharedPtr local_address_;\n  Network::Address::InstanceConstSharedPtr remote_address_;\n  std::vector<std::shared_ptr<Network::MockConnectionSocket>> sockets_;\n\n  // Reusable template.\n  const std::string filter_chain_yaml = R\"EOF(\n      filter_chain_match:\n        destination_port: 10000\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n  )EOF\";\n  Init::ManagerImpl init_manager_{\"for_filter_chain_manager_test\"};\n  envoy::config::listener::v3::FilterChain filter_chain_template_;\n  NiceMock<MockFilterChainFactoryBuilder> filter_chain_factory_builder_;\n  NiceMock<Server::Configuration::MockFactoryContext> parent_context_;\n  // Test target.\n  FilterChainManagerImpl filter_chain_manager_{\n      std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234), parent_context_,\n      init_manager_};\n};\n\nTEST_F(FilterChainManagerImplTest, FilterChainMatchNothing) {\n  auto filter_chain = findFilterChainHelper(10000, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n}\n\nTEST_F(FilterChainManagerImplTest, AddSingleFilterChain) {\n  addSingleFilterChainHelper(filter_chain_template_);\n  auto* filter_chain = findFilterChainHelper(10000, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_NE(filter_chain, nullptr);\n}\n\nTEST_F(FilterChainManagerImplTest, LookupFilterChainContextByFilterChainMessage) {\n  std::vector<envoy::config::listener::v3::FilterChain> filter_chain_messages;\n\n  for (int i = 0; i < 2; i++) {\n    envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_;\n    new_filter_chain.set_name(absl::StrCat(\"filter_chain_\", i));\n    // For sanity check\n    new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i);\n    filter_chain_messages.push_back(std::move(new_filter_chain));\n  }\n  EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2);\n  filter_chain_manager_.addFilterChain(\n      std::vector<const envoy::config::listener::v3::FilterChain*>{&filter_chain_messages[0],\n                                                                   &filter_chain_messages[1]},\n      filter_chain_factory_builder_, filter_chain_manager_);\n}\n\nTEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) {\n  std::vector<envoy::config::listener::v3::FilterChain> filter_chain_messages;\n\n  for (int i = 0; i < 3; i++) {\n    envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_;\n    new_filter_chain.set_name(absl::StrCat(\"filter_chain_\", i));\n    // For sanity check\n    new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i);\n    filter_chain_messages.push_back(std::move(new_filter_chain));\n  }\n\n  EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(1);\n  filter_chain_manager_.addFilterChain(\n      std::vector<const envoy::config::listener::v3::FilterChain*>{&filter_chain_messages[0]},\n      filter_chain_factory_builder_, filter_chain_manager_);\n\n  FilterChainManagerImpl new_filter_chain_manager{\n      std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234), parent_context_,\n      init_manager_, filter_chain_manager_};\n  // The new filter chain manager maintains 3 filter chains, but only 2 filter chain context is\n  // built because it reuse the filter chain context in the previous filter chain manager\n  EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2);\n  new_filter_chain_manager.addFilterChain(\n      std::vector<const envoy::config::listener::v3::FilterChain*>{\n          &filter_chain_messages[0], &filter_chain_messages[1], &filter_chain_messages[2]},\n      filter_chain_factory_builder_, new_filter_chain_manager);\n}\n\nTEST_F(FilterChainManagerImplTest, CreatedFilterChainFactoryContextHasIndependentDrainClose) {\n  std::vector<envoy::config::listener::v3::FilterChain> filter_chain_messages;\n  for (int i = 0; i < 3; i++) {\n    envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_;\n    new_filter_chain.set_name(absl::StrCat(\"filter_chain_\", i));\n    // For sanity check\n    new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i);\n    filter_chain_messages.push_back(std::move(new_filter_chain));\n  }\n  auto context0 = filter_chain_manager_.createFilterChainFactoryContext(&filter_chain_messages[0]);\n  auto context1 = filter_chain_manager_.createFilterChainFactoryContext(&filter_chain_messages[1]);\n\n  // Server as whole is not draining.\n  MockDrainManager not_a_draining_manager;\n  EXPECT_CALL(not_a_draining_manager, drainClose).WillRepeatedly(Return(false));\n  Configuration::MockServerFactoryContext mock_server_context;\n  EXPECT_CALL(mock_server_context, drainManager).WillRepeatedly(ReturnRef(not_a_draining_manager));\n  EXPECT_CALL(parent_context_, getServerFactoryContext)\n      .WillRepeatedly(ReturnRef(mock_server_context));\n\n  EXPECT_FALSE(context0->drainDecision().drainClose());\n  EXPECT_FALSE(context1->drainDecision().drainClose());\n\n  // Drain filter chain 0\n  auto* context_impl_0 = dynamic_cast<PerFilterChainFactoryContextImpl*>(context0.get());\n  context_impl_0->startDraining();\n\n  EXPECT_TRUE(context0->drainDecision().drainClose());\n  EXPECT_FALSE(context1->drainDecision().drainClose());\n}\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/guarddog_impl_test.cc",
    "content": "#include <atomic>\n#include <chrono>\n#include <csignal>\n#include <memory>\n#include <vector>\n\n#include \"envoy/common/time.h\"\n#include \"envoy/server/configuration.h\"\n#include \"envoy/server/guarddog_config.h\"\n#include \"envoy/server/watchdog.h\"\n#include \"envoy/thread/thread.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/protobuf/utility.h\"\n\n#include \"server/guarddog_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/server/watchdog_config.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::ElementsAre;\nusing testing::InSequence;\nusing testing::NiceMock;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\n// Kill has an explicit value that disables the feature.\nconst int DISABLE_KILL = 0;\nconst int DISABLE_MULTIKILL = 0;\n\n// Miss / Megamiss don't have an explicit value that disables them\n// so set a timeout larger than those used in tests for 'disable' it.\nconst int DISABLE_MISS = 1000000;\nconst int DISABLE_MEGAMISS = 1000000;\n\nclass DebugTestInterlock : public GuardDogImpl::TestInterlockHook {\npublic:\n  // GuardDogImpl::TestInterlockHook\n  void signalFromImpl(MonotonicTime time) override {\n    impl_reached_ = time;\n    impl_.notifyAll();\n  }\n\n  void waitFromTest(Thread::MutexBasicLockable& mutex, MonotonicTime time) override\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) {\n    while (impl_reached_ < time) {\n      impl_.wait(mutex);\n    }\n  }\n\nprivate:\n  Thread::CondVar impl_;\n  MonotonicTime impl_reached_;\n};\n\n// We want to make sure guard-dog is tested with both simulated time and real\n// time, to ensure that it works in production, and that it works in the context\n// of integration tests which are much easier to control with simulated time.\nenum class TimeSystemType { Real, Simulated };\n\nclass GuardDogTestBase : public testing::TestWithParam<TimeSystemType> {\nprotected:\n  GuardDogTestBase()\n      : time_system_(makeTimeSystem()), api_(Api::createApiForTest(stats_store_, *time_system_)) {}\n\n  static std::unique_ptr<Event::TestTimeSystem> makeTimeSystem() {\n    if (GetParam() == TimeSystemType::Real) {\n      return std::make_unique<Event::GlobalTimeSystem>();\n    }\n    ASSERT(GetParam() == TimeSystemType::Simulated);\n    return std::make_unique<Event::SimulatedTimeSystem>();\n  }\n\n  void initGuardDog(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config) {\n    guard_dog_ = std::make_unique<GuardDogImpl>(stats_scope, config, *api_, \"server\",\n                                                std::make_unique<DebugTestInterlock>());\n  }\n\n  std::unique_ptr<Event::TestTimeSystem> time_system_;\n  Stats::TestUtil::TestStore stats_store_;\n  Api::ApiPtr api_;\n  std::unique_ptr<GuardDogImpl> guard_dog_;\n};\n\nINSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogTestBase,\n                         testing::ValuesIn({TimeSystemType::Real, TimeSystemType::Simulated}));\n\n/**\n * Death test caveat: Because of the way we die gcov doesn't receive coverage\n * information from the forked process that is checked for successful death.\n * This means that the lines dealing with the calls to PANIC are not seen as\n * green in the coverage report. However, rest assured from the results of the\n * test: these lines are in fact covered.\n */\nclass GuardDogDeathTest : public GuardDogTestBase {\nprotected:\n  GuardDogDeathTest()\n      : config_kill_(1000, 1000, 100, 1000, 0, std::vector<std::string>{}),\n        config_multikill_(1000, 1000, 1000, 500, 0, std::vector<std::string>{}),\n        config_multikill_threshold_(1000, 1000, 1000, 500, 60, std::vector<std::string>{}) {}\n\n  /**\n   * This does everything but the final forceCheckForTest() that should cause\n   * death for the single kill case.\n   */\n  void SetupForDeath() {\n    InSequence s;\n    initGuardDog(fakestats_, config_kill_);\n    unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n    dogs_.emplace_back(unpet_dog_);\n    guard_dog_->forceCheckForTest();\n    time_system_->advanceTimeWait(std::chrono::milliseconds(99)); // 1 ms shy of death.\n  }\n\n  /**\n   * This does everything but the final forceCheckForTest() that should cause\n   * death for the multiple kill case.\n   */\n  void SetupForMultiDeath() {\n    InSequence s;\n    initGuardDog(fakestats_, config_multikill_);\n    auto unpet_dog_ =\n        guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n    dogs_.emplace_back(unpet_dog_);\n    guard_dog_->forceCheckForTest();\n    auto second_dog_ =\n        guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n    dogs_.emplace_back(second_dog_);\n    guard_dog_->forceCheckForTest();\n    time_system_->advanceTimeWait(std::chrono::milliseconds(499)); // 1 ms shy of multi-death.\n  }\n\n  /**\n   * This does everything but the final forceCheckForTest() that should cause\n   * death for the multiple kill case using threshold (100% of watchdogs over the threshold).\n   */\n  void setupForMultiDeathThreshold() {\n    InSequence s;\n    initGuardDog(fakestats_, config_multikill_threshold_);\n\n    // Creates 5 watchdogs.\n    for (int i = 0; i < 5; ++i) {\n      auto dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n      dogs_.emplace_back(dog);\n\n      if (i == 0) {\n        unpet_dog_ = dog;\n      } else if (i == 1) {\n        second_dog_ = dog;\n      }\n\n      guard_dog_->forceCheckForTest();\n    }\n\n    time_system_->advanceTimeWait(std::chrono::milliseconds(499)); // 1 ms shy of multi-death.\n  }\n\n  NiceMock<Configuration::MockWatchdog> config_kill_;\n  NiceMock<Configuration::MockWatchdog> config_multikill_;\n  NiceMock<Configuration::MockWatchdog> config_multikill_threshold_;\n  NiceMock<Stats::MockStore> fakestats_;\n  WatchDogSharedPtr unpet_dog_;\n  WatchDogSharedPtr second_dog_;\n  std::vector<WatchDogSharedPtr> dogs_; // Tracks all watchdogs created.\n};\n\nINSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogDeathTest,\n                         testing::ValuesIn({TimeSystemType::Real, TimeSystemType::Simulated}));\n\n// These tests use threads, and need to run after the real death tests, so we need to call them\n// a different name.\nclass GuardDogAlmostDeadTest : public GuardDogDeathTest {};\n\nINSTANTIATE_TEST_SUITE_P(\n    TimeSystemType, GuardDogAlmostDeadTest,\n    testing::ValuesIn({// TODO(#6465): TimeSystemType::Real -- fails in this suite 30/1000 times.\n                       TimeSystemType::Simulated}));\n\nTEST_P(GuardDogDeathTest, KillDeathTest) {\n  // Is it German for \"The Function\"? Almost...\n  auto die_function = [&]() -> void {\n    SetupForDeath();\n    time_system_->advanceTimeWait(std::chrono::milliseconds(401)); // 400 ms past death.\n    guard_dog_->forceCheckForTest();\n  };\n\n  // Why do it this way? Any threads must be started inside the death test\n  // statement and this is the easiest way to accomplish that.\n  EXPECT_DEATH(die_function(), \"\");\n}\n\nTEST_P(GuardDogAlmostDeadTest, KillNoFinalCheckTest) {\n  // This does everything the death test does, except allow enough time to\n  // expire to reach the death panic. The death test does not verify that there\n  // was not a crash *before* the expected line, so this test checks that.\n  SetupForDeath();\n}\n\nTEST_P(GuardDogDeathTest, MultiKillDeathTest) {\n  auto die_function = [&]() -> void {\n    SetupForMultiDeath();\n    time_system_->advanceTimeWait(std::chrono::milliseconds(2)); // 1 ms past multi-death.\n    guard_dog_->forceCheckForTest();\n  };\n  EXPECT_DEATH(die_function(), \"\");\n}\n\nTEST_P(GuardDogAlmostDeadTest, MultiKillNoFinalCheckTest) {\n  // This does everything the death test does not except the final force check that\n  // should actually result in dying. The death test does not verify that there\n  // was not a crash *before* the expected line, so this test checks that.\n  SetupForMultiDeath();\n}\n\nTEST_P(GuardDogDeathTest, MultiKillThresholdDeathTest) {\n  auto die_function = [&]() -> void {\n    setupForMultiDeathThreshold();\n\n    // Pet the last two dogs so we're just at the threshold that causes death.\n    dogs_.at(4)->touch();\n    dogs_.at(3)->touch();\n\n    time_system_->advanceTimeWait(std::chrono::milliseconds(2)); // 1 ms past multi-death.\n    guard_dog_->forceCheckForTest();\n  };\n  EXPECT_DEATH(die_function(), \"\");\n}\n\nTEST_P(GuardDogAlmostDeadTest, MultiKillUnderThreshold) {\n  // This does everything the death test does except it pets an additional watchdog\n  // that causes us to be under the threshold (60%) of multikill death.\n  setupForMultiDeathThreshold();\n\n  // Pet the last three dogs so we're just under the threshold that causes death.\n  dogs_.at(4)->touch();\n  dogs_.at(3)->touch();\n  dogs_.at(2)->touch();\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(2)); // 1 ms past multi-death.\n  guard_dog_->forceCheckForTest();\n}\n\nTEST_P(GuardDogAlmostDeadTest, NearDeathTest) {\n  // This ensures that if only one thread surpasses the multiple kill threshold\n  // there is no death. The positive case is covered in MultiKillDeathTest.\n  InSequence s;\n  initGuardDog(fakestats_, config_multikill_);\n  auto unpet_dog =\n      guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n  auto pet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n  // This part \"waits\" 600 milliseconds while one dog is touched every 100, and\n  // the other is not. 600ms is over the threshold of 500ms for multi-kill but\n  // only one is nonresponsive, so there should be no kill (single kill\n  // threshold of 1s is not reached).\n  for (int i = 0; i < 6; i++) {\n    time_system_->advanceTimeWait(std::chrono::milliseconds(100));\n    pet_dog->touch();\n    guard_dog_->forceCheckForTest();\n  }\n}\n\nclass GuardDogMissTest : public GuardDogTestBase {\nprotected:\n  GuardDogMissTest()\n      : config_miss_(500, 1000, 0, 0, 0, std::vector<std::string>{}),\n        config_mega_(1000, 500, 0, 0, 0, std::vector<std::string>{}) {}\n\n  void checkMiss(uint64_t count, const std::string& descriptor) {\n    EXPECT_EQ(count, TestUtility::findCounter(stats_store_, \"server.watchdog_miss\")->value())\n        << descriptor;\n    EXPECT_EQ(count,\n              TestUtility::findCounter(stats_store_, \"server.test_thread.watchdog_miss\")->value())\n        << descriptor;\n  }\n\n  void checkMegaMiss(uint64_t count, const std::string& descriptor) {\n    EXPECT_EQ(count, TestUtility::findCounter(stats_store_, \"server.watchdog_mega_miss\")->value())\n        << descriptor;\n    EXPECT_EQ(\n        count,\n        TestUtility::findCounter(stats_store_, \"server.test_thread.watchdog_mega_miss\")->value())\n        << descriptor;\n  }\n\n  NiceMock<Configuration::MockWatchdog> config_miss_;\n  NiceMock<Configuration::MockWatchdog> config_mega_;\n};\n\nINSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogMissTest,\n                         testing::ValuesIn({TimeSystemType::Real, TimeSystemType::Simulated}));\n\nTEST_P(GuardDogMissTest, MissTest) {\n  // This test checks the actual collected statistics after doing some timer\n  // advances that should and shouldn't increment the counters.\n  initGuardDog(stats_store_, config_miss_);\n  auto unpet_dog =\n      guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n  // We'd better start at 0:\n  checkMiss(0, \"MissTest check 1\");\n  // At 300ms we shouldn't have hit the timeout yet:\n  time_system_->advanceTimeWait(std::chrono::milliseconds(300));\n  guard_dog_->forceCheckForTest();\n  checkMiss(0, \"MissTest check 2\");\n  // This should push it past the 500ms limit:\n  time_system_->advanceTimeWait(std::chrono::milliseconds(250));\n  guard_dog_->forceCheckForTest();\n  checkMiss(1, \"MissTest check 3\");\n  guard_dog_->stopWatching(unpet_dog);\n  unpet_dog = nullptr;\n}\n\nTEST_P(GuardDogMissTest, MegaMissTest) {\n  // TODO(#6465): This test fails in real-time 1/1000 times, but passes in simulated time.\n  if (GetParam() == TimeSystemType::Real) {\n    return;\n  }\n\n  // This test checks the actual collected statistics after doing some timer\n  // advances that should and shouldn't increment the counters.\n  initGuardDog(stats_store_, config_mega_);\n  auto unpet_dog =\n      guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n  // We'd better start at 0:\n  checkMegaMiss(0, \"MegaMissTest check 1\");\n  // This shouldn't be enough to increment the stat:\n  time_system_->advanceTimeWait(std::chrono::milliseconds(499));\n  guard_dog_->forceCheckForTest();\n  checkMegaMiss(0, \"MegaMissTest check 2\");\n  // Just 2ms more will make it greater than 500ms timeout:\n  time_system_->advanceTimeWait(std::chrono::milliseconds(2));\n  guard_dog_->forceCheckForTest();\n  checkMegaMiss(1, \"MegaMissTest check 3\");\n  guard_dog_->stopWatching(unpet_dog);\n  unpet_dog = nullptr;\n}\n\nTEST_P(GuardDogMissTest, MissCountTest) {\n  // TODO(#6465): This test fails in real-time 9/1000 times, but passes in simulated time.\n  if (GetParam() == TimeSystemType::Real) {\n    return;\n  }\n\n  // This tests a flake discovered in the MissTest where real timeout or\n  // spurious condition_variable wakeup causes the counter to get incremented\n  // more than it should be.\n  initGuardDog(stats_store_, config_miss_);\n  auto sometimes_pet_dog =\n      guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n  // These steps are executed once without ever touching the watchdog.\n  // Then the last step is to touch the watchdog and repeat the steps.\n  // This verifies that the behavior is reset back to baseline after a touch.\n  for (unsigned long i = 0; i < 2; i++) {\n    EXPECT_EQ(i, stats_store_.counter(\"server.watchdog_miss\").value());\n    // This shouldn't be enough to increment the stat:\n    time_system_->advanceTimeWait(std::chrono::milliseconds(499));\n    guard_dog_->forceCheckForTest();\n    checkMiss(i, \"MissCountTest check 1\");\n    // And if we force re-execution of the loop it still shouldn't be:\n    guard_dog_->forceCheckForTest();\n    checkMiss(i, \"MissCountTest check 2\");\n    // Just 2ms more will make it greater than 500ms timeout:\n    time_system_->advanceTimeWait(std::chrono::milliseconds(2));\n    guard_dog_->forceCheckForTest();\n    checkMiss(i + 1, \"MissCountTest check 3\");\n    // Spurious wakeup, we should still only have one miss counted.\n    guard_dog_->forceCheckForTest();\n    checkMiss(i + 1, \"MissCountTest check 4\");\n    // When we finally touch the dog we should get one more increment once the\n    // timeout value expires:\n    sometimes_pet_dog->touch();\n  }\n  time_system_->advanceTimeWait(std::chrono::milliseconds(1000));\n  sometimes_pet_dog->touch();\n  // Make sure megamiss still works:\n  checkMegaMiss(0UL, \"MissCountTest check 5\");\n  time_system_->advanceTimeWait(std::chrono::milliseconds(1500));\n  guard_dog_->forceCheckForTest();\n  checkMegaMiss(1UL, \"MissCountTest check 6\");\n\n  guard_dog_->stopWatching(sometimes_pet_dog);\n  sometimes_pet_dog = nullptr;\n}\n\nTEST_P(GuardDogTestBase, StartStopTest) {\n  NiceMock<Stats::MockStore> stats;\n  NiceMock<Configuration::MockWatchdog> config(0, 0, 0, 0, 0, std::vector<std::string>{});\n  initGuardDog(stats, config);\n}\n\nTEST_P(GuardDogTestBase, LoopIntervalNoKillTest) {\n  NiceMock<Stats::MockStore> stats;\n  NiceMock<Configuration::MockWatchdog> config(40, 50, 0, 0, 0, std::vector<std::string>{});\n  initGuardDog(stats, config);\n  EXPECT_EQ(guard_dog_->loopIntervalForTest(), std::chrono::milliseconds(40));\n}\n\nTEST_P(GuardDogTestBase, LoopIntervalTest) {\n  NiceMock<Stats::MockStore> stats;\n  NiceMock<Configuration::MockWatchdog> config(100, 90, 1000, 500, 0, std::vector<std::string>{});\n  initGuardDog(stats, config);\n  EXPECT_EQ(guard_dog_->loopIntervalForTest(), std::chrono::milliseconds(90));\n}\n\nTEST_P(GuardDogTestBase, WatchDogThreadIdTest) {\n  NiceMock<Stats::MockStore> stats;\n  NiceMock<Configuration::MockWatchdog> config(100, 90, 1000, 500, 0, std::vector<std::string>{});\n  initGuardDog(stats, config);\n  auto watched_dog =\n      guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), \"test_thread\");\n  EXPECT_EQ(watched_dog->threadId().debugString(),\n            api_->threadFactory().currentThreadId().debugString());\n  guard_dog_->stopWatching(watched_dog);\n}\n\n// If this test fails it is because the std::chrono::steady_clock::duration type has become\n// nontrivial or we are compiling under a compiler and library combo that makes\n// std::chrono::steady_clock::duration require a lock to be atomically modified.\n//\n// The WatchDog/GuardDog relies on this being a lock free atomic for perf reasons so some workaround\n// will be required if this test starts failing.\nTEST_P(GuardDogTestBase, AtomicIsAtomicTest) {\n  std::atomic<std::chrono::steady_clock::duration> atomic_time;\n  ASSERT_EQ(atomic_time.is_lock_free(), true);\n}\n\n// A GuardDogAction used for testing the GuardDog.\n// It's primary use is dumping string of the format EVENT_TYPE : tid1,.., tidN to\n// the events vector passed to it.\n// Instances of this class will be registered for GuardDogEvent through\n// TestGuardDogActionFactory.\nclass RecordGuardDogAction : public Configuration::GuardDogAction {\npublic:\n  RecordGuardDogAction(std::vector<std::string>& events) : events_(events) {}\n\n  void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event,\n           const std::vector<std::pair<Thread::ThreadId, MonotonicTime>>& thread_last_checkin_pairs,\n           MonotonicTime /*now*/) override {\n    std::string event_string =\n        envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent_Name(event);\n    absl::StrAppend(&event_string, \" : \");\n    std::vector<std::string> output_string_parts;\n    output_string_parts.reserve(thread_last_checkin_pairs.size());\n\n    for (const auto& thread_ltt_pair : thread_last_checkin_pairs) {\n      output_string_parts.push_back(thread_ltt_pair.first.debugString());\n    }\n\n    absl::StrAppend(&event_string, absl::StrJoin(output_string_parts, \",\"));\n    events_.push_back(event_string);\n  }\n\nprotected:\n  std::vector<std::string>& events_; // not owned\n};\n\n// A GuardDogAction that raises the specified signal.\nclass AssertGuardDogAction : public Configuration::GuardDogAction {\npublic:\n  AssertGuardDogAction() = default;\n\n  void\n  run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/,\n      const std::vector<std::pair<Thread::ThreadId, MonotonicTime>>& /*thread_last_checkin_pairs*/,\n      MonotonicTime /*now*/) override {\n    RELEASE_ASSERT(false, \"ASSERT_GUARDDOG_ACTION\");\n  }\n};\n\n// Test factory for consuming Watchdog configs and creating GuardDogActions.\ntemplate <class ConfigType>\nclass RecordGuardDogActionFactory : public Configuration::GuardDogActionFactory {\npublic:\n  RecordGuardDogActionFactory(const std::string& name, std::vector<std::string>& events)\n      : name_(name), events_(events) {}\n\n  Configuration::GuardDogActionPtr createGuardDogActionFromProto(\n      const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& /*config*/,\n      Configuration::GuardDogActionFactoryContext& /*context*/) override {\n    // Return different actions depending on the config.\n    return std::make_unique<RecordGuardDogAction>(events_);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return ProtobufTypes::MessagePtr{new ConfigType()};\n  }\n\n  std::string name() const override { return name_; }\n\n  const std::string name_;\n  std::vector<std::string>& events_; // not owned\n};\n\n// Test factory for consuming Watchdog configs and creating GuardDogActions.\ntemplate <class ConfigType>\nclass AssertGuardDogActionFactory : public Configuration::GuardDogActionFactory {\npublic:\n  AssertGuardDogActionFactory(const std::string& name) : name_(name) {}\n\n  Configuration::GuardDogActionPtr createGuardDogActionFromProto(\n      const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& /*config*/,\n      Configuration::GuardDogActionFactoryContext& /*context*/) override {\n    // Return different actions depending on the config.\n    return std::make_unique<AssertGuardDogAction>();\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return ProtobufTypes::MessagePtr{new ConfigType()};\n  }\n\n  std::string name() const override { return name_; }\n\n  const std::string name_;\n};\n\n/**\n * Tests that various actions registered for the guard dog get called upon.\n */\nclass GuardDogActionsTest : public GuardDogTestBase {\nprotected:\n  GuardDogActionsTest()\n      : log_factory_(\"LogFactory\", events_), register_log_factory_(log_factory_),\n        assert_factory_(\"AssertFactory\"), register_assert_factory_(assert_factory_) {}\n\n  std::vector<std::string> getActionsConfig() {\n    return {\n        R\"EOF(\n        {\n          \"config\": {\n            \"name\": \"AssertFactory\",\n            \"typed_config\": {\n              \"@type\": \"type.googleapis.com/google.protobuf.Empty\"\n            }\n          },\n          \"event\": \"MULTIKILL\"\n        }\n      )EOF\",\n        R\"EOF(\n        {\n          \"config\": {\n            \"name\": \"AssertFactory\",\n            \"typed_config\": {\n              \"@type\": \"type.googleapis.com/google.protobuf.Empty\"\n            }\n          },\n          \"event\": \"KILL\"\n        }\n      )EOF\",\n        R\"EOF(\n        {\n          \"config\": {\n            \"name\": \"LogFactory\",\n            \"typed_config\": {\n              \"@type\": \"type.googleapis.com/google.protobuf.Empty\"\n            }\n          },\n          \"event\": \"MEGAMISS\"\n        }\n      )EOF\",\n        R\"EOF(\n        {\n          \"config\": {\n            \"name\": \"LogFactory\",\n            \"typed_config\": {\n              \"@type\": \"type.googleapis.com/google.protobuf.Empty\"\n            }\n          },\n          \"event\": \"MISS\"\n        }\n      )EOF\"};\n  }\n\n  void setupFirstDog(const NiceMock<Configuration::MockWatchdog>& config, Thread::ThreadId tid) {\n    initGuardDog(fake_stats_, config);\n    first_dog_ = guard_dog_->createWatchDog(tid, \"test_thread\");\n    guard_dog_->forceCheckForTest();\n  }\n\n  std::vector<std::string> actions_;\n  std::vector<std::string> events_;\n  RecordGuardDogActionFactory<Envoy::ProtobufWkt::Empty> log_factory_;\n  Registry::InjectFactory<Configuration::GuardDogActionFactory> register_log_factory_;\n  AssertGuardDogActionFactory<Envoy::ProtobufWkt::Empty> assert_factory_;\n  Registry::InjectFactory<Configuration::GuardDogActionFactory> register_assert_factory_;\n  NiceMock<Stats::MockStore> fake_stats_;\n  WatchDogSharedPtr first_dog_;\n  WatchDogSharedPtr second_dog_;\n};\n\nINSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogActionsTest,\n                         testing::ValuesIn({TimeSystemType::Real, TimeSystemType::Simulated}));\n\nTEST_P(GuardDogActionsTest, MissShouldOnlyReportRelevantThreads) {\n  const NiceMock<Configuration::MockWatchdog> config(100, DISABLE_MEGAMISS, DISABLE_KILL,\n                                                     DISABLE_MULTIKILL, 0, getActionsConfig());\n  setupFirstDog(config, Thread::ThreadId(10));\n  second_dog_ = guard_dog_->createWatchDog(Thread::ThreadId(11), \"test_thread\");\n  time_system_->advanceTimeWait(std::chrono::milliseconds(50));\n  second_dog_->touch();\n\n  // This will reset the loop interval timer, and should help us\n  // synchronize with the guard dog.\n  guard_dog_->forceCheckForTest();\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(51));\n  guard_dog_->forceCheckForTest();\n\n  EXPECT_THAT(events_, ElementsAre(\"MISS : 10\"));\n}\n\nTEST_P(GuardDogActionsTest, MissShouldBeAbleToReportMultipleThreads) {\n  const NiceMock<Configuration::MockWatchdog> config(100, DISABLE_MEGAMISS, DISABLE_KILL,\n                                                     DISABLE_MULTIKILL, 0, getActionsConfig());\n  initGuardDog(fake_stats_, config);\n  first_dog_ = guard_dog_->createWatchDog(Thread::ThreadId(10), \"test_thread\");\n  second_dog_ = guard_dog_->createWatchDog(Thread::ThreadId(11), \"test_thread\");\n\n  first_dog_->touch();\n  second_dog_->touch();\n  // This should ensure that when the next call to step() occurs, both of the\n  // dogs will be over last touch time threshold and be reported in the event.\n  // The next call to step() will either be triggered by the timer or after\n  // advanceTimeWait() below, but only one of them will append to events_\n  // because of saturation.\n  guard_dog_->forceCheckForTest();\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MISS : 10,11\"));\n}\n\nTEST_P(GuardDogActionsTest, MissShouldSaturateOnMissEvent) {\n  const NiceMock<Configuration::MockWatchdog> config(100, DISABLE_MISS, DISABLE_KILL,\n                                                     DISABLE_MULTIKILL, 0, getActionsConfig());\n  setupFirstDog(config, Thread::ThreadId(10));\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MISS : 10\"));\n\n  // Should saturate and not add an additional \"event_\"\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MISS : 10\"));\n\n  // Touch the watchdog, which should allow the event to trigger again.\n  first_dog_->touch();\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MISS : 10\", \"MISS : 10\"));\n}\n\nTEST_P(GuardDogActionsTest, MegaMissShouldOnlyReportRelevantThreads) {\n  const NiceMock<Configuration::MockWatchdog> config(DISABLE_MISS, 100, DISABLE_KILL,\n                                                     DISABLE_MULTIKILL, 0, getActionsConfig());\n  setupFirstDog(config, Thread::ThreadId(10));\n  second_dog_ = guard_dog_->createWatchDog(Thread::ThreadId(11), \"test_thread\");\n  time_system_->advanceTimeWait(std::chrono::milliseconds(50));\n  second_dog_->touch();\n\n  // This will reset the loop interval timer, and should help us\n  // synchronize with the guard dog.\n  guard_dog_->forceCheckForTest();\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(51));\n  guard_dog_->forceCheckForTest();\n\n  EXPECT_THAT(events_, ElementsAre(\"MEGAMISS : 10\"));\n}\n\nTEST_P(GuardDogActionsTest, MegaMissShouldBeAbleToReportMultipleThreads) {\n  const NiceMock<Configuration::MockWatchdog> config(DISABLE_MISS, 100, DISABLE_KILL,\n                                                     DISABLE_MULTIKILL, 0, getActionsConfig());\n  initGuardDog(fake_stats_, config);\n  first_dog_ = guard_dog_->createWatchDog(Thread::ThreadId(10), \"test_thread\");\n  second_dog_ = guard_dog_->createWatchDog(Thread::ThreadId(11), \"test_thread\");\n\n  first_dog_->touch();\n  second_dog_->touch();\n\n  // This should ensure that when the next call to step() occurs, both of the\n  // dogs will be over last touch time threshold and be reported in the event.\n  // The next call to step() will either be triggered by the timer or after\n  // advanceTimeWait() below, but only one of them will append to events_\n  // because of saturation.\n  guard_dog_->forceCheckForTest();\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MEGAMISS : 10,11\"));\n}\n\nTEST_P(GuardDogActionsTest, MegaMissShouldSaturateOnMegaMissEvent) {\n  const NiceMock<Configuration::MockWatchdog> config(DISABLE_MISS, 100, DISABLE_KILL,\n                                                     DISABLE_MULTIKILL, 0, getActionsConfig());\n  setupFirstDog(config, Thread::ThreadId(10));\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MEGAMISS : 10\"));\n\n  // Should saturate and not add an additional \"event_\"\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MEGAMISS : 10\"));\n\n  // Touch the watchdog, which should allow the event to trigger again.\n  first_dog_->touch();\n\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MEGAMISS : 10\", \"MEGAMISS : 10\"));\n}\n\nTEST_P(GuardDogActionsTest, ShouldRespectEventPriority) {\n  // Priority of events are KILL, MULTIKILL, MEGAMISS and MISS\n\n  // Kill event should fire before the others\n  auto kill_function = [&]() -> void {\n    const NiceMock<Configuration::MockWatchdog> config(100, 100, 100, 100, 0, getActionsConfig());\n    initGuardDog(fake_stats_, config);\n    auto first_dog = guard_dog_->createWatchDog(Thread::ThreadId(10), \"test_thread\");\n    auto second_dog = guard_dog_->createWatchDog(Thread::ThreadId(11), \"test_thread\");\n    time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n    guard_dog_->forceCheckForTest();\n  };\n\n  // We expect only the kill action to have fired\n  EXPECT_DEATH(kill_function(), \"ASSERT_GUARDDOG_ACTION\");\n\n  // Multikill event should fire before the others\n  auto multikill_function = [&]() -> void {\n    const NiceMock<Configuration::MockWatchdog> config(100, 100, DISABLE_KILL, 100, 0,\n                                                       getActionsConfig());\n    initGuardDog(fake_stats_, config);\n    auto first_dog = guard_dog_->createWatchDog(Thread::ThreadId(10), \"test_thread\");\n    auto second_dog = guard_dog_->createWatchDog(Thread::ThreadId(11), \"test_thread\");\n    time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n    guard_dog_->forceCheckForTest();\n  };\n\n  EXPECT_DEATH(multikill_function(), \"ASSERT_GUARDDOG_ACTION\");\n\n  // We expect megamiss to fire before miss\n  const NiceMock<Configuration::MockWatchdog> config(100, 100, DISABLE_KILL, DISABLE_MULTIKILL, 0,\n                                                     getActionsConfig());\n  setupFirstDog(config, Thread::ThreadId(10));\n  time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n  guard_dog_->forceCheckForTest();\n  EXPECT_THAT(events_, ElementsAre(\"MEGAMISS : 10\", \"MISS : 10\"));\n}\n\nTEST_P(GuardDogActionsTest, KillShouldTriggerGuardDogActions) {\n  auto die_function = [&]() -> void {\n    const NiceMock<Configuration::MockWatchdog> config(DISABLE_MISS, DISABLE_MEGAMISS, 100, 0, 0,\n                                                       getActionsConfig());\n    setupFirstDog(config, Thread::ThreadId(10));\n    time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n    guard_dog_->forceCheckForTest();\n  };\n\n  EXPECT_DEATH(die_function(), \"ASSERT_GUARDDOG_ACTION\");\n}\n\nTEST_P(GuardDogActionsTest, MultikillShouldTriggerGuardDogActions) {\n  auto die_function = [&]() -> void {\n    const NiceMock<Configuration::MockWatchdog> config(DISABLE_MISS, DISABLE_MEGAMISS, DISABLE_KILL,\n                                                       100, 0, getActionsConfig());\n    setupFirstDog(config, Thread::ThreadId(10));\n    second_dog_ = guard_dog_->createWatchDog(Thread::ThreadId(11), \"test_thread\");\n    guard_dog_->forceCheckForTest();\n    time_system_->advanceTimeWait(std::chrono::milliseconds(101));\n    guard_dog_->forceCheckForTest();\n  };\n\n  EXPECT_DEATH(die_function(), \"ASSERT_GUARDDOG_ACTION\");\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/hot_restart_impl_test.cc",
    "content": "#include <memory>\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/api/os_sys_calls_impl_hot_restart.h\"\n#include \"common/common/hex.h\"\n\n#include \"server/hot_restart_impl.h\"\n\n#include \"test/mocks/api/hot_restart.h\"\n#include \"test/mocks/api/mocks.h\"\n#include \"test/mocks/server/hot_restart.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"absl/strings/match.h\"\n#include \"absl/strings/string_view.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AnyNumber;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\nusing testing::WithArg;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nclass HotRestartImplTest : public testing::Test {\npublic:\n  void setup() {\n    EXPECT_CALL(hot_restart_os_sys_calls_, shmUnlink(_)).Times(AnyNumber());\n    EXPECT_CALL(hot_restart_os_sys_calls_, shmOpen(_, _, _));\n    EXPECT_CALL(os_sys_calls_, ftruncate(_, _)).WillOnce(WithArg<1>(Invoke([this](off_t size) {\n      buffer_.resize(size);\n      return Api::SysCallIntResult{0, 0};\n    })));\n    EXPECT_CALL(os_sys_calls_, mmap(_, _, _, _, _, _)).WillOnce(InvokeWithoutArgs([this]() {\n      return Api::SysCallPtrResult{buffer_.data(), 0};\n    }));\n    // We bind two sockets: one to talk to parent, one to talk to our (hypothetical eventual) child\n    EXPECT_CALL(os_sys_calls_, bind(_, _, _)).Times(2);\n\n    // Test we match the correct stat with empty-slots before, after, or both.\n    hot_restart_ = std::make_unique<HotRestartImpl>(0, 0, \"@envoy_domain_socket\", 0);\n    hot_restart_->drainParentListeners();\n\n    // We close both sockets.\n    EXPECT_CALL(os_sys_calls_, close(_)).Times(2);\n  }\n\n  Api::MockOsSysCalls os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls{&os_sys_calls_};\n  Api::MockHotRestartOsSysCalls hot_restart_os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::HotRestartOsSysCallsImpl> hot_restart_os_calls{\n      &hot_restart_os_sys_calls_};\n  std::vector<uint8_t> buffer_;\n  std::unique_ptr<HotRestartImpl> hot_restart_;\n};\n\nTEST_F(HotRestartImplTest, VersionString) {\n  // Tests that the version-string will be consistent and HOT_RESTART_VERSION,\n  // between multiple instantiations.\n  std::string version;\n\n  // The mocking infrastructure requires a test setup and teardown every time we\n  // want to re-instantiate HotRestartImpl.\n  {\n    setup();\n    version = hot_restart_->version();\n    EXPECT_TRUE(absl::StartsWith(version, fmt::format(\"{}.\", HOT_RESTART_VERSION))) << version;\n    TearDown();\n  }\n\n  {\n    setup();\n    EXPECT_EQ(version, hot_restart_->version()) << \"Version string deterministic from options\";\n    TearDown();\n  }\n}\n\n// Test that HotRestartDomainSocketInUseException is thrown when the domain socket is already\n// in use,\nTEST_F(HotRestartImplTest, DomainSocketAlreadyInUse) {\n  EXPECT_CALL(os_sys_calls_, bind(_, _, _))\n      .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE}));\n  EXPECT_CALL(os_sys_calls_, close(_)).Times(1);\n\n  EXPECT_THROW(std::make_unique<HotRestartImpl>(0, 0, \"@envoy_domain_socket\", 0),\n               Server::HotRestartDomainSocketInUseException);\n}\n\n// Test that EnvoyException is thrown when the domain socket bind fails for reasons other than\n// being in use.\nTEST_F(HotRestartImplTest, DomainSocketError) {\n  EXPECT_CALL(os_sys_calls_, bind(_, _, _))\n      .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ACCESS}));\n  EXPECT_CALL(os_sys_calls_, close(_)).Times(1);\n\n  EXPECT_THROW(std::make_unique<HotRestartImpl>(0, 0, \"@envoy_domain_socket\", 0), EnvoyException);\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/hot_restarting_parent_test.cc",
    "content": "#include <memory>\n\n#include \"server/hot_restarting_child.h\"\n#include \"server/hot_restarting_parent.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/listener_manager.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::InSequence;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nusing HotRestartMessage = envoy::HotRestartMessage;\n\nclass HotRestartingParentTest : public testing::Test {\npublic:\n  NiceMock<MockInstance> server_;\n  HotRestartingParent::Internal hot_restarting_parent_{&server_};\n};\n\nTEST_F(HotRestartingParentTest, ShutdownAdmin) {\n  EXPECT_CALL(server_, shutdownAdmin());\n  EXPECT_CALL(server_, startTimeFirstEpoch()).WillOnce(Return(12345));\n  HotRestartMessage message = hot_restarting_parent_.shutdownAdmin();\n  EXPECT_EQ(12345, message.reply().shutdown_admin().original_start_time_unix_seconds());\n}\n\nTEST_F(HotRestartingParentTest, GetListenSocketsForChildNotFound) {\n  MockListenerManager listener_manager;\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> listeners;\n  EXPECT_CALL(server_, listenerManager()).WillOnce(ReturnRef(listener_manager));\n  EXPECT_CALL(listener_manager, listeners(ListenerManager::ListenerState::ACTIVE))\n      .WillOnce(Return(listeners));\n\n  HotRestartMessage::Request request;\n  request.mutable_pass_listen_socket()->set_address(\"tcp://127.0.0.1:80\");\n  HotRestartMessage message = hot_restarting_parent_.getListenSocketsForChild(request);\n  EXPECT_EQ(-1, message.reply().pass_listen_socket().fd());\n}\n\nTEST_F(HotRestartingParentTest, GetListenSocketsForChildNotBindPort) {\n  MockListenerManager listener_manager;\n  Network::MockListenerConfig listener_config;\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> listeners;\n  InSequence s;\n  listeners.push_back(std::ref(*static_cast<Network::ListenerConfig*>(&listener_config)));\n  EXPECT_CALL(server_, listenerManager()).WillOnce(ReturnRef(listener_manager));\n  EXPECT_CALL(listener_manager, listeners(ListenerManager::ListenerState::ACTIVE))\n      .WillOnce(Return(listeners));\n  EXPECT_CALL(listener_config, listenSocketFactory());\n  EXPECT_CALL(listener_config.socket_factory_, localAddress());\n  EXPECT_CALL(listener_config, bindToPort()).WillOnce(Return(false));\n\n  HotRestartMessage::Request request;\n  request.mutable_pass_listen_socket()->set_address(\"tcp://0.0.0.0:80\");\n  HotRestartMessage message = hot_restarting_parent_.getListenSocketsForChild(request);\n  EXPECT_EQ(-1, message.reply().pass_listen_socket().fd());\n}\n\nTEST_F(HotRestartingParentTest, ExportStatsToChild) {\n  Stats::TestUtil::TestStore store;\n  MockListenerManager listener_manager;\n  EXPECT_CALL(server_, listenerManager()).WillRepeatedly(ReturnRef(listener_manager));\n  EXPECT_CALL(listener_manager, numConnections()).WillRepeatedly(Return(0));\n  EXPECT_CALL(server_, stats()).WillRepeatedly(ReturnRef(store));\n\n  {\n    store.counter(\"c1\").inc();\n    store.counter(\"c2\").add(2);\n    store.gauge(\"g0\", Stats::Gauge::ImportMode::Accumulate).set(0);\n    store.gauge(\"g1\", Stats::Gauge::ImportMode::Accumulate).set(123);\n    store.gauge(\"g2\", Stats::Gauge::ImportMode::Accumulate).set(456);\n    HotRestartMessage::Reply::Stats stats;\n    hot_restarting_parent_.exportStatsToChild(&stats);\n    EXPECT_EQ(1, stats.counter_deltas().at(\"c1\"));\n    EXPECT_EQ(2, stats.counter_deltas().at(\"c2\"));\n    EXPECT_EQ(0, stats.gauges().at(\"g0\"));\n    EXPECT_EQ(123, stats.gauges().at(\"g1\"));\n    EXPECT_EQ(456, stats.gauges().at(\"g2\"));\n  }\n  // When a counter has not changed since its last export, it should not be included in the message.\n  {\n    store.counter(\"c2\").add(2);\n    store.gauge(\"g1\", Stats::Gauge::ImportMode::Accumulate).add(1);\n    store.gauge(\"g2\", Stats::Gauge::ImportMode::Accumulate).sub(1);\n    HotRestartMessage::Reply::Stats stats;\n    hot_restarting_parent_.exportStatsToChild(&stats);\n    EXPECT_EQ(stats.counter_deltas().end(), stats.counter_deltas().find(\"c1\"));\n    EXPECT_EQ(2, stats.counter_deltas().at(\"c2\")); // 4 is the value, but 2 is the delta\n    EXPECT_EQ(0, stats.gauges().at(\"g0\"));\n    EXPECT_EQ(124, stats.gauges().at(\"g1\"));\n    EXPECT_EQ(455, stats.gauges().at(\"g2\"));\n  }\n\n  // When a counter and gauge are not used, they should not be included in the message.\n  {\n    store.counter(\"unused_counter\");\n    store.counter(\"used_counter\").inc();\n    store.gauge(\"unused_gauge\", Stats::Gauge::ImportMode::Accumulate);\n    store.gauge(\"used_gauge\", Stats::Gauge::ImportMode::Accumulate).add(1);\n    HotRestartMessage::Reply::Stats stats;\n    hot_restarting_parent_.exportStatsToChild(&stats);\n    EXPECT_EQ(stats.counter_deltas().end(), stats.counter_deltas().find(\"unused_counter\"));\n    EXPECT_EQ(1, stats.counter_deltas().at(\"used_counter\"));\n    EXPECT_EQ(stats.gauges().end(), stats.counter_deltas().find(\"unused_gauge\"));\n    EXPECT_EQ(1, stats.gauges().at(\"used_gauge\"));\n  }\n}\n\nTEST_F(HotRestartingParentTest, RetainDynamicStats) {\n  MockListenerManager listener_manager;\n  Stats::SymbolTableImpl parent_symbol_table;\n  Stats::TestUtil::TestStore parent_store(parent_symbol_table);\n\n  EXPECT_CALL(server_, listenerManager()).WillRepeatedly(ReturnRef(listener_manager));\n  EXPECT_CALL(listener_manager, numConnections()).WillRepeatedly(Return(0));\n  EXPECT_CALL(server_, stats()).WillRepeatedly(ReturnRef(parent_store));\n\n  HotRestartMessage::Reply::Stats stats_proto;\n  {\n    Stats::StatNameDynamicPool dynamic(parent_store.symbolTable());\n    parent_store.counter(\"c1\").inc();\n    parent_store.counterFromStatName(dynamic.add(\"c2\")).inc();\n    parent_store.gauge(\"g1\", Stats::Gauge::ImportMode::Accumulate).set(123);\n    parent_store.gaugeFromStatName(dynamic.add(\"g2\"), Stats::Gauge::ImportMode::Accumulate).set(42);\n    hot_restarting_parent_.exportStatsToChild(&stats_proto);\n  }\n\n  {\n    Stats::SymbolTableImpl child_symbol_table;\n    Stats::TestUtil::TestStore child_store(child_symbol_table);\n    Stats::StatNameDynamicPool dynamic(child_store.symbolTable());\n    Stats::Counter& c1 = child_store.counter(\"c1\");\n    Stats::Counter& c2 = child_store.counterFromStatName(dynamic.add(\"c2\"));\n    Stats::Gauge& g1 = child_store.gauge(\"g1\", Stats::Gauge::ImportMode::Accumulate);\n    Stats::Gauge& g2 =\n        child_store.gaugeFromStatName(dynamic.add(\"g2\"), Stats::Gauge::ImportMode::Accumulate);\n\n    HotRestartingChild hot_restarting_child(0, 0, \"@envoy_domain_socket\", 0);\n    hot_restarting_child.mergeParentStats(child_store, stats_proto);\n    EXPECT_EQ(1, c1.value());\n    EXPECT_EQ(1, c2.value());\n    EXPECT_EQ(123, g1.value());\n    EXPECT_EQ(42, g2.value());\n  }\n}\n\nTEST_F(HotRestartingParentTest, DrainListeners) {\n  EXPECT_CALL(server_, drainListeners());\n  hot_restarting_parent_.drainListeners();\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/lds_api_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"server/lds_api.h\"\n\n#include \"test/mocks/config/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/server/listener_manager.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n\nusing ::testing::_;\nusing ::testing::InSequence;\nusing ::testing::Invoke;\nusing ::testing::NiceMock;\nusing ::testing::Return;\nusing ::testing::Throw;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nclass LdsApiTest : public testing::Test {\npublic:\n  LdsApiTest() {\n    ON_CALL(init_manager_, add(_)).WillByDefault(Invoke([this](const Init::Target& target) {\n      init_target_handle_ = target.createHandle(\"test\");\n    }));\n  }\n\n  void setup() {\n    envoy::config::core::v3::ConfigSource lds_config;\n    EXPECT_CALL(init_manager_, add(_));\n    lds_ = std::make_unique<LdsApiImpl>(lds_config, nullptr, cluster_manager_, init_manager_,\n                                        store_, listener_manager_, validation_visitor_);\n    EXPECT_CALL(*cluster_manager_.subscription_factory_.subscription_, start(_, _));\n    init_target_handle_->initialize(init_watcher_);\n    lds_callbacks_ = cluster_manager_.subscription_factory_.callbacks_;\n  }\n\n  void expectAdd(const std::string& listener_name, absl::optional<std::string> version,\n                 bool updated) {\n    if (!version) {\n      EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true))\n          .WillOnce(\n              Invoke([listener_name, updated](const envoy::config::listener::v3::Listener& config,\n                                              const std::string&, bool) -> bool {\n                EXPECT_EQ(listener_name, config.name());\n                return updated;\n              }));\n    } else {\n      EXPECT_CALL(listener_manager_, addOrUpdateListener(_, version.value(), true))\n          .WillOnce(\n              Invoke([listener_name, updated](const envoy::config::listener::v3::Listener& config,\n                                              const std::string&, bool) -> bool {\n                EXPECT_EQ(listener_name, config.name());\n                return updated;\n              }));\n    }\n  }\n\n  void makeListenersAndExpectCall(const std::vector<std::string>& listener_names) {\n    std::vector<std::reference_wrapper<Network::ListenerConfig>> refs;\n    listeners_.clear();\n    for (const auto& name : listener_names) {\n      listeners_.emplace_back();\n      listeners_.back().name_ = name;\n      refs.emplace_back(listeners_.back());\n    }\n    EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE))\n        .WillOnce(Return(refs));\n    EXPECT_CALL(listener_manager_, beginListenerUpdate());\n  }\n\n  envoy::config::listener::v3::Listener buildListener(const std::string& listener_name) {\n    envoy::config::listener::v3::Listener listener;\n    listener.set_name(listener_name);\n    auto socket_address = listener.mutable_address()->mutable_socket_address();\n    socket_address->set_address(listener_name);\n    socket_address->set_port_value(1);\n    listener.add_filter_chains();\n    return listener;\n  }\n\n  std::shared_ptr<NiceMock<Config::MockGrpcMux>> grpc_mux_;\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  Init::MockManager init_manager_;\n  Init::ExpectableWatcherImpl init_watcher_;\n  Init::TargetHandlePtr init_target_handle_;\n  Stats::IsolatedStoreImpl store_;\n  MockListenerManager listener_manager_;\n  Config::SubscriptionCallbacks* lds_callbacks_{};\n  std::unique_ptr<LdsApiImpl> lds_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n\nprivate:\n  std::list<NiceMock<Network::MockListenerConfig>> listeners_;\n};\n\nTEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) {\n  InSequence s;\n\n  setup();\n\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> existing_listeners;\n\n  // Construct a minimal listener that would pass proto validation.\n  envoy::config::listener::v3::Listener listener;\n  listener.set_name(\"invalid-listener\");\n  auto socket_address = listener.mutable_address()->mutable_socket_address();\n  socket_address->set_address(\"invalid-address\");\n  socket_address->set_port_value(1);\n  listener.add_filter_chains();\n\n  EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE))\n      .WillOnce(Return(existing_listeners));\n\n  EXPECT_CALL(listener_manager_, beginListenerUpdate());\n  EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true))\n      .WillOnce(Throw(EnvoyException(\"something is wrong\")));\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  EXPECT_CALL(init_watcher_, ready());\n\n  const auto decoded_resources = TestUtility::decodeResources({listener});\n  EXPECT_THROW_WITH_MESSAGE(\n      lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"), EnvoyException,\n      \"Error adding/updating listener(s) invalid-listener: something is wrong\\n\");\n}\n\nTEST_F(LdsApiTest, EmptyListenersUpdate) {\n  InSequence s;\n\n  setup();\n\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> existing_listeners;\n\n  EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE))\n      .WillOnce(Return(existing_listeners));\n  EXPECT_CALL(listener_manager_, beginListenerUpdate());\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_))\n      .WillOnce(Invoke([](ListenerManager::FailureStates&& state) { EXPECT_EQ(0, state.size()); }));\n  ;\n  EXPECT_CALL(init_watcher_, ready());\n\n  lds_callbacks_->onConfigUpdate({}, \"\");\n}\n\nTEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) {\n  InSequence s;\n\n  setup();\n\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> existing_listeners;\n\n  // Add 4 listeners - 2 valid and 2 invalid.\n  const auto listener_0 = buildListener(\"valid-listener-1\");\n  const auto listener_1 = buildListener(\"invalid-listener-1\");\n  const auto listener_2 = buildListener(\"valid-listener-2\");\n  const auto listener_3 = buildListener(\"invalid-listener-2\");\n\n  EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE))\n      .WillOnce(Return(existing_listeners));\n\n  EXPECT_CALL(listener_manager_, beginListenerUpdate());\n  EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true))\n      .WillOnce(Return(true))\n      .WillOnce(Throw(EnvoyException(\"something is wrong\")))\n      .WillOnce(Return(true))\n      .WillOnce(Throw(EnvoyException(\"something else is wrong\")));\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n\n  EXPECT_CALL(init_watcher_, ready());\n\n  const auto decoded_resources =\n      TestUtility::decodeResources({listener_0, listener_1, listener_2, listener_3});\n  EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"),\n                            EnvoyException,\n                            \"Error adding/updating listener(s) invalid-listener-1: something is \"\n                            \"wrong\\ninvalid-listener-2: something else is wrong\\n\");\n}\n\n// Validate onConfigUpdate throws EnvoyException with duplicate listeners.\n// The first of the duplicates will be successfully applied, with the rest adding to\n// the exception message.\nTEST_F(LdsApiTest, ValidateDuplicateListeners) {\n  InSequence s;\n\n  setup();\n\n  const auto listener = buildListener(\"duplicate_listener\");\n\n  std::vector<std::reference_wrapper<Network::ListenerConfig>> existing_listeners;\n  EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE))\n      .WillOnce(Return(existing_listeners));\n  EXPECT_CALL(listener_manager_, beginListenerUpdate());\n  EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)).WillOnce(Return(true));\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  EXPECT_CALL(init_watcher_, ready());\n\n  const auto decoded_resources = TestUtility::decodeResources({listener, listener});\n  EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, \"\"),\n                            EnvoyException,\n                            \"Error adding/updating listener(s) duplicate_listener: duplicate \"\n                            \"listener duplicate_listener found\\n\");\n}\n\nTEST_F(LdsApiTest, Basic) {\n  InSequence s;\n\n  setup();\n\n  const std::string response1_json = R\"EOF(\n{\n  \"version_info\": \"0\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener1\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.1\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    },\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener2\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.2\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    }\n  ]\n}\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_json);\n\n  makeListenersAndExpectCall({});\n  expectAdd(\"listener1\", \"0\", true);\n  expectAdd(\"listener2\", \"0\", true);\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  EXPECT_CALL(init_watcher_, ready());\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response1);\n  lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n\n  EXPECT_EQ(\"0\", lds_->versionInfo());\n\n  const std::string response2_json = R\"EOF(\n{\n  \"version_info\": \"1\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener1\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.1\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    },\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener3\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.3\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    }\n  ]\n}\n  )EOF\";\n  auto response2 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response2_json);\n\n  makeListenersAndExpectCall({\"listener1\", \"listener2\"});\n  EXPECT_CALL(listener_manager_, removeListener(\"listener2\")).WillOnce(Return(true));\n  expectAdd(\"listener1\", \"1\", false);\n  expectAdd(\"listener3\", \"1\", true);\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  const auto decoded_resources_2 =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response2);\n  lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info());\n  EXPECT_EQ(\"1\", lds_->versionInfo());\n}\n\n// Regression test against only updating versionInfo() if at least one listener\n// is added/updated even if one or more are removed.\nTEST_F(LdsApiTest, UpdateVersionOnListenerRemove) {\n  InSequence s;\n\n  setup();\n\n  const std::string response1_json = R\"EOF(\n{\n  \"version_info\": \"0\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener1\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.1\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    }\n  ]\n}\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_json);\n\n  makeListenersAndExpectCall({});\n  expectAdd(\"listener1\", \"0\", true);\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  EXPECT_CALL(init_watcher_, ready());\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response1);\n  lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n\n  EXPECT_EQ(\"0\", lds_->versionInfo());\n\n  const std::string response2_json = R\"EOF(\n{\n  \"version_info\": \"1\",\n  \"resources\": []\n}\n  )EOF\";\n  auto response2 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response2_json);\n\n  makeListenersAndExpectCall({\"listener1\"});\n  EXPECT_CALL(listener_manager_, removeListener(\"listener1\")).WillOnce(Return(true));\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  const auto decoded_resources_2 =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response2);\n  lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info());\n  EXPECT_EQ(\"1\", lds_->versionInfo());\n}\n\n// Regression test issue #2188 where an empty ca_cert_file field was created and caused the LDS\n// update to fail validation.\nTEST_F(LdsApiTest, TlsConfigWithoutCaCert) {\n  InSequence s;\n\n  setup();\n\n  std::string response1_yaml = R\"EOF(\nversion_info: '1'\nresources:\n- \"@type\": type.googleapis.com/envoy.api.v2.Listener\n  name: listener0\n  address:\n    socket_address:\n      address: tcp://0.0.0.1\n      port_value: 61000\n  filter_chains:\n  - filters:\n  )EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_yaml);\n\n  makeListenersAndExpectCall({\"listener0\"});\n  expectAdd(\"listener0\", {}, true);\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  EXPECT_CALL(init_watcher_, ready());\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response1);\n  lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n\n  std::string response2_basic = R\"EOF(\nversion_info: '1'\nresources:\n- \"@type\": type.googleapis.com/envoy.api.v2.Listener\n  name: listener-8080\n  address:\n    socket_address:\n      address: tcp://0.0.0.0\n      port_value: 61001\n  filter_chains:\n  - transport_socket:\n      name: tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n        common_tls_context:\n          tls_certificates:\n          - certificate_chain:\n              filename: \"{}\"\n            private_key:\n              filename: \"{}\"\n    filters:\n  )EOF\";\n  std::string response2_json =\n      fmt::format(response2_basic,\n                  TestEnvironment::runfilesPath(\"test/config/integration/certs/servercert.pem\"),\n                  TestEnvironment::runfilesPath(\"test/config/integration/certs/serverkey.pem\"));\n  auto response2 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response2_json);\n\n  makeListenersAndExpectCall({\n      \"listener-8080\",\n  });\n  // Can't check version here because of bazel sandbox paths for the certs.\n  expectAdd(\"listener-8080\", {}, true);\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  const auto decoded_resources_2 =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response2);\n  EXPECT_NO_THROW(\n      lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()));\n}\n\n// Validate behavior when the config fails delivery at the subscription level.\nTEST_F(LdsApiTest, FailureSubscription) {\n  InSequence s;\n\n  setup();\n\n  EXPECT_CALL(init_watcher_, ready());\n  lds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {});\n  EXPECT_EQ(\"\", lds_->versionInfo());\n}\n\nTEST_F(LdsApiTest, ReplacingListenerWithSameAddress) {\n  InSequence s;\n\n  setup();\n\n  const std::string response1_json = R\"EOF(\n{\n  \"version_info\": \"0\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener1\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.1\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    },\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener2\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.2\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    }\n  ]\n}\n)EOF\";\n  auto response1 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response1_json);\n\n  makeListenersAndExpectCall({});\n  expectAdd(\"listener1\", \"0\", true);\n  expectAdd(\"listener2\", \"0\", true);\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  EXPECT_CALL(init_watcher_, ready());\n  const auto decoded_resources =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response1);\n  lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info());\n\n  EXPECT_EQ(\"0\", lds_->versionInfo());\n\n  const std::string response2_json = R\"EOF(\n{\n  \"version_info\": \"1\",\n  \"resources\": [\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener1\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.1\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    },\n    {\n      \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n      \"name\": \"listener3\",\n      \"address\": { \"socket_address\": { \"address\": \"tcp://0.0.0.2\", \"port_value\": 0 } },\n      \"filter_chains\": [ { \"filters\": null } ]\n    }\n  ]\n}\n)EOF\";\n  auto response2 =\n      TestUtility::parseYaml<envoy::service::discovery::v3::DiscoveryResponse>(response2_json);\n\n  makeListenersAndExpectCall({\"listener1\", \"listener2\"});\n  EXPECT_CALL(listener_manager_, removeListener(\"listener2\")).WillOnce(Return(true));\n  expectAdd(\"listener1\", \"1\", false);\n  expectAdd(\"listener3\", \"1\", true);\n  EXPECT_CALL(listener_manager_, endListenerUpdate(_));\n  const auto decoded_resources_2 =\n      TestUtility::decodeResources<envoy::config::listener::v3::Listener>(response2);\n  lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info());\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/listener_manager_impl_quic_only_test.cc",
    "content": "#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n\n#include \"extensions/quic_listeners/quiche/quic_transport_socket_factory.h\"\n\n#include \"test/server/listener_manager_impl_test.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nclass MockSupportsUdpGso : public Api::OsSysCallsImpl {\npublic:\n  MOCK_METHOD(bool, supportsUdpGso, (), (const));\n};\n\nclass ListenerManagerImplQuicOnlyTest : public ListenerManagerImplTest {\npublic:\n  NiceMock<MockSupportsUdpGso> udp_gso_syscall_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls{&udp_gso_syscall_};\n};\n\nTEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    protocol: UDP\n    port_value: 1234\nfilter_chains:\n- filter_chain_match:\n    transport_protocol: \"quic\"\n  filters: []\n  transport_socket:\n    name: envoy.transport_sockets.quic\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport\n      downstream_tls_context:\n        common_tls_context:\n          tls_certificates:\n          - certificate_chain:\n              filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n            private_key:\n              filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n          validation_context:\n            trusted_ca:\n              filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n            match_subject_alt_names:\n            - exact: localhost\n            - exact: 127.0.0.1\nreuse_port: true\nudp_listener_config:\n  udp_listener_name: \"quiche_quic_listener\"\nudp_writer_config:\n  name: \"udp_gso_batch_writer\"\n  typed_config:\n    \"@type\": type.googleapis.com/envoy.config.listener.v3.UdpGsoBatchWriterOptions\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml);\n  ON_CALL(udp_gso_syscall_, supportsUdpGso()).WillByDefault(Return(true));\n  EXPECT_CALL(server_.api_.random_, uuid());\n  expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND,\n#ifdef SO_RXQ_OVFL // SO_REUSEPORT is on as configured\n                           /* expected_num_options */\n                           Api::OsSysCallsSingleton::get().supportsUdpGro() ? 4 : 3,\n#else\n                           /* expected_num_options */\n                           Api::OsSysCallsSingleton::get().supportsUdpGro() ? 3 : 2,\n#endif\n                           /* expected_creation_params */ {true, false});\n\n  expectSetsockopt(/* expected_sockopt_level */ IPPROTO_IP,\n                   /* expected_sockopt_name */ ENVOY_IP_PKTINFO,\n                   /* expected_value */ 1,\n                   /* expected_num_calls */ 1);\n#ifdef SO_RXQ_OVFL\n  expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET,\n                   /* expected_sockopt_name */ SO_RXQ_OVFL,\n                   /* expected_value */ 1,\n                   /* expected_num_calls */ 1);\n#endif\n  expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET,\n                   /* expected_sockopt_name */ SO_REUSEPORT,\n                   /* expected_value */ 1,\n                   /* expected_num_calls */ 1);\n  if (Api::OsSysCallsSingleton::get().supportsUdpGro()) {\n    expectSetsockopt(/* expected_sockopt_level */ SOL_UDP,\n                     /* expected_sockopt_name */ UDP_GRO,\n                     /* expected_value */ 1,\n                     /* expected_num_calls */ 1);\n  }\n\n  manager_->addOrUpdateListener(listener_proto, \"\", true);\n  EXPECT_EQ(1u, manager_->listeners().size());\n  EXPECT_FALSE(manager_->listeners()[0].get().udpListenerFactory()->isTransportConnectionless());\n  Network::SocketSharedPtr listen_socket =\n      manager_->listeners().front().get().listenSocketFactory().getListenSocket();\n\n  Network::UdpPacketWriterPtr udp_packet_writer =\n      manager_->listeners().front().get().udpPacketWriterFactory()->get().createUdpPacketWriter(\n          listen_socket->ioHandle(), manager_->listeners()[0].get().listenerScope());\n  EXPECT_TRUE(udp_packet_writer->isBatchMode());\n\n  // No filter chain found with non-matching transport protocol.\n  EXPECT_EQ(nullptr, findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111));\n\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"quic\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(nullptr, filter_chain);\n  auto& quic_socket_factory = dynamic_cast<const Quic::QuicServerTransportSocketFactory&>(\n      filter_chain->transportSocketFactory());\n  EXPECT_TRUE(quic_socket_factory.implementsSecureTransport());\n  EXPECT_TRUE(quic_socket_factory.serverContextConfig().isReady());\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/listener_manager_impl_test.cc",
    "content": "#include \"test/server/listener_manager_impl_test.h\"\n\n#include <chrono>\n#include <memory>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/core/v3/config_source.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/server/filter_config.h\"\n#include \"envoy/server/listener_manager.h\"\n\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/config/metadata.h\"\n#include \"common/init/manager_impl.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/io_socket_handle_impl.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/protobuf.h\"\n\n#include \"extensions/filters/listener/original_dst/original_dst.h\"\n#include \"extensions/transport_sockets/tls/ssl_socket.h\"\n\n#include \"test/mocks/init/mocks.h\"\n#include \"test/server/utility.h\"\n#include \"test/test_common/network_utility.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/match.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nusing testing::AtLeast;\nusing testing::InSequence;\nusing testing::Return;\nusing testing::ReturnRef;\nusing testing::Throw;\n\nclass ListenerManagerImplWithDispatcherStatsTest : public ListenerManagerImplTest {\nprotected:\n  ListenerManagerImplWithDispatcherStatsTest() { enable_dispatcher_stats_ = true; }\n};\n\nclass ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest {\npublic:\n  /**\n   * Create an IPv4 listener with a given name.\n   */\n  envoy::config::listener::v3::Listener createIPv4Listener(const std::string& name) {\n    envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R\"EOF(\n      address:\n        socket_address: { address: 127.0.0.1, port_value: 1111 }\n      filter_chains:\n      - filters:\n    )EOF\");\n    listener.set_name(name);\n    return listener;\n  }\n\n  /**\n   * Used by some tests below to validate that, if a given socket option is valid on this platform\n   * and set in the Listener, it should result in a call to setsockopt() with the appropriate\n   * values.\n   */\n  void testSocketOption(const envoy::config::listener::v3::Listener& listener,\n                        const envoy::config::core::v3::SocketOption::SocketState& expected_state,\n                        const Network::SocketOptionName& expected_option, int expected_value,\n                        uint32_t expected_num_options = 1,\n                        ListenSocketCreationParams expected_creation_params = {true, true}) {\n    if (expected_option.hasValue()) {\n      expectCreateListenSocket(expected_state, expected_num_options, expected_creation_params);\n      expectSetsockopt(expected_option.level(), expected_option.option(), expected_value,\n                       expected_num_options);\n      manager_->addOrUpdateListener(listener, \"\", true);\n      EXPECT_EQ(1U, manager_->listeners().size());\n    } else {\n      EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(listener, \"\", true), EnvoyException,\n                                \"MockListenerComponentFactory: Setting socket options failed\");\n      EXPECT_EQ(0U, manager_->listeners().size());\n    }\n  }\n};\n\nclass ListenerManagerImplForInPlaceFilterChainUpdateTest : public ListenerManagerImplTest {\npublic:\n  envoy::config::listener::v3::Listener createDefaultListener() {\n    envoy::config::listener::v3::Listener listener_proto;\n    Protobuf::TextFormat::ParseFromString(R\"EOF(\n    name: \"foo\"\n    address: {\n      socket_address: {\n        address: \"127.0.0.1\"\n        port_value: 1234\n      }\n    }\n    filter_chains: {}\n  )EOF\",\n                                          &listener_proto);\n    return listener_proto;\n  }\n\n  void expectAddListener(const envoy::config::listener::v3::Listener& listener_proto,\n                         ListenerHandle*) {\n    EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n    EXPECT_CALL(*worker_, addListener(_, _, _));\n    manager_->addOrUpdateListener(listener_proto, \"\", true);\n    worker_->callAddCompletion(true);\n    EXPECT_EQ(1UL, manager_->listeners().size());\n    checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n  }\n\n  void expectUpdateToThenDrain(const envoy::config::listener::v3::Listener& new_listener_proto,\n                               ListenerHandle* old_listener_handle) {\n    EXPECT_CALL(*worker_, addListener(_, _, _));\n    EXPECT_CALL(*worker_, stopListener(_, _));\n    EXPECT_CALL(*old_listener_handle->drain_manager_, startDrainSequence(_));\n\n    EXPECT_TRUE(manager_->addOrUpdateListener(new_listener_proto, \"\", true));\n\n    EXPECT_CALL(*worker_, removeListener(_, _));\n    old_listener_handle->drain_manager_->drain_sequence_completion_();\n\n    EXPECT_CALL(*old_listener_handle, onDestroy());\n    worker_->callRemovalCompletion();\n  }\n\n  void expectRemove(const envoy::config::listener::v3::Listener& listener_proto,\n                    ListenerHandle* listener_handle) {\n\n    EXPECT_CALL(*worker_, stopListener(_, _));\n    EXPECT_CALL(*listener_factory_.socket_, close());\n    EXPECT_CALL(*listener_handle->drain_manager_, startDrainSequence(_));\n    EXPECT_TRUE(manager_->removeListener(listener_proto.name()));\n\n    EXPECT_CALL(*worker_, removeListener(_, _));\n    listener_handle->drain_manager_->drain_sequence_completion_();\n\n    EXPECT_CALL(*listener_handle, onDestroy());\n    worker_->callRemovalCompletion();\n  }\n};\n\nclass MockLdsApi : public LdsApi {\npublic:\n  MOCK_METHOD(std::string, versionInfo, (), (const));\n};\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, EmptyFilter) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n  EXPECT_EQ(std::chrono::milliseconds(15000),\n            manager_->listeners().front().get().listenerFiltersTimeout());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, DefaultListenerPerConnectionBufferLimit) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1024 * 1024U, manager_->listeners().back().get().perConnectionBufferLimitBytes());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SetListenerPerConnectionBufferLimit) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\nper_connection_buffer_limit_bytes: 8192\n  )EOF\";\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(8192U, manager_->listeners().back().get().perConnectionBufferLimitBytes());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsTransportSocket) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  transport_socket:\n    name: tls\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n      common_tls_context:\n        tls_certificates:\n        - certificate_chain:\n            filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n          private_key:\n            filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n        validation_context:\n          trusted_ca:\n            filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n          match_subject_alt_names:\n            exact: localhost\n            exact: 127.0.0.1\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest,\n       DEPRECATED_FEATURE_TEST(TlsTransportSocketLegacyConfig)) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  transport_socket:\n    name: tls\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n      common_tls_context:\n        tls_certificates:\n        - certificate_chain:\n            filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n          private_key:\n            filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n        validation_context:\n          trusted_ca:\n            filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n          verify_subject_alt_name:\n            - localhost\n            - 127.0.0.1\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, DEPRECATED_FEATURE_TEST(TlsContext)) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  transport_socket:\n     name: tls\n     typed_config:\n       \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n       common_tls_context:\n         tls_certificates:\n         - certificate_chain:\n             filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\"\n           private_key:\n             filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\"\n         validation_context:\n           trusted_ca:\n             filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"\n           match_subject_alt_names:\n             exact: localhost\n             exact: 127.0.0.1\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) {\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n  // Validate that there are no active listeners and workers are started.\n  EXPECT_EQ(0, server_.stats_store_\n                   .gauge(\"listener_manager.total_active_listeners\",\n                          Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n  EXPECT_EQ(1, server_.stats_store_\n                   .gauge(\"listener_manager.workers_started\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n\n  const std::string proto_text = R\"EOF(\n    address: {\n      socket_address: {\n        protocol: UDP\n        address: \"127.0.0.1\"\n        port_value: 1234\n      }\n    }\n    filter_chains: {}\n  )EOF\";\n  envoy::config::listener::v3::Listener listener_proto;\n  EXPECT_TRUE(Protobuf::TextFormat::ParseFromString(proto_text, &listener_proto));\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_CALL(listener_factory_,\n              createListenSocket(_, Network::Socket::Type::Datagram, _, {{true, false}}))\n      .WillOnce(Invoke([this](const Network::Address::InstanceConstSharedPtr&,\n                              Network::Socket::Type, const Network::Socket::OptionsSharedPtr&,\n                              const ListenSocketCreationParams&) -> Network::SocketSharedPtr {\n        return listener_factory_.socket_;\n      }));\n  EXPECT_CALL(*listener_factory_.socket_, setSocketOption(_, _, _, _)).Times(testing::AtLeast(1));\n  EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno}));\n  manager_->addOrUpdateListener(listener_proto, \"\", true);\n  EXPECT_EQ(1u, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfig) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\ntest: a\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                          EnvoyException, \"test: Cannot find field\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfigNoFilterChains) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                          EnvoyException, \"no filter chains specified\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfig2UDPListenerFilters) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    protocol: UDP\n    address: 127.0.0.1\n    port_value: 1234\nlistener_filters:\n- name: envoy.filters.listener.tls_inspector\n- name: envoy.filters.listener.original_dst\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                          EnvoyException, \"Only 1 UDP listener filter per listener supported\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  - foo: type\n    name: name\n    typed_config: {}\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                          EnvoyException, \"foo: Cannot find field\");\n}\nclass NonTerminalFilterFactory : public Configuration::NamedNetworkFilterConfigFactory {\npublic:\n  // Configuration::NamedNetworkFilterConfigFactory\n  Network::FilterFactoryCb\n  createFilterFactoryFromProto(const Protobuf::Message&,\n                               Server::Configuration::FactoryContext&) override {\n    return [](Network::FilterManager&) -> void {};\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return std::make_unique<Envoy::ProtobufWkt::Struct>();\n  }\n\n  std::string name() const override { return \"non_terminal\"; }\n};\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TerminalNotLast) {\n  NonTerminalFilterFactory filter;\n  Registry::InjectFactory<Configuration::NamedNetworkFilterConfigFactory> registered(filter);\n\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  - name: non_terminal\n    typed_config: {}\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true), EnvoyException,\n      \"Error: non-terminal filter named non_terminal of type non_terminal is the last \"\n      \"filter in a network filter chain.\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, NotTerminalLast) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  - name: envoy.filters.network.tcp_proxy\n    typed_config: {}\n  - name: unknown_but_will_not_be_processed\n    typed_config: {}\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true), EnvoyException,\n      \"Error: terminal filter named envoy.filters.network.tcp_proxy of type \"\n      \"envoy.filters.network.tcp_proxy must be the last filter in a network filter chain.\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterName) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  - name: invalid\n    typed_config: {}\n  )EOF\";\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException,\n                            \"Didn't find a registered implementation for name: 'invalid'\");\n}\n\nclass TestStatsConfigFactory : public Configuration::NamedNetworkFilterConfigFactory {\npublic:\n  // Configuration::NamedNetworkFilterConfigFactory\n  Network::FilterFactoryCb createFilterFactoryFromProto(\n      const Protobuf::Message&,\n      Configuration::FactoryContext& filter_chain_factory_context) override {\n    return commonFilterFactory(filter_chain_factory_context);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return std::make_unique<Envoy::ProtobufWkt::Struct>();\n  }\n\n  std::string name() const override { return \"stats_test\"; }\n  bool isTerminalFilter() override { return true; }\n\nprivate:\n  Network::FilterFactoryCb commonFilterFactory(Configuration::FactoryContext& context) {\n    context.scope().counterFromString(\"bar\").inc();\n    return [](Network::FilterManager&) -> void {};\n  }\n};\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, StatsScopeTest) {\n  TestStatsConfigFactory filter;\n  Registry::InjectFactory<Configuration::NamedNetworkFilterConfigFactory> registered(filter);\n\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\ndeprecated_v1:\n  bind_to_port: false\nfilter_chains:\n- filters:\n  - name: stats_test\n    typed_config: {}\n  )EOF\";\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false)));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  manager_->listeners().front().get().listenerScope().counterFromString(\"foo\").inc();\n\n  EXPECT_EQ(1UL, server_.stats_store_.counterFromString(\"bar\").value());\n  EXPECT_EQ(1UL, server_.stats_store_.counterFromString(\"listener.127.0.0.1_1234.foo\").value());\n}\n\nTEST_F(ListenerManagerImplTest, UnsupportedInternalListener) {\n  const std::string yaml = R\"EOF(\naddress:\n  envoy_internal_address:\n    server_listener_name: a_listener_name  \nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ASSERT_DEATH(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true), \"\");\n}\n\nTEST_F(ListenerManagerImplTest, NotDefaultListenerFiltersTimeout) {\n  const std::string yaml = R\"EOF(\n    name: \"foo\"\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 10000 }\n    filter_chains:\n    - filters:\n    listener_filters_timeout: 0s\n  )EOF\";\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true));\n  EXPECT_EQ(std::chrono::milliseconds(),\n            manager_->listeners().front().get().listenerFiltersTimeout());\n}\n\nTEST_F(ListenerManagerImplTest, ModifyOnlyDrainType) {\n  InSequence s;\n\n  // Add foo listener.\n  const std::string listener_foo_yaml = R\"EOF(\n    name: \"foo\"\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 10000 }\n    filter_chains:\n    - filters:\n    drain_type: MODIFY_ONLY\n  )EOF\";\n\n  ListenerHandle* listener_foo =\n      expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n}\n\nTEST_F(ListenerManagerImplTest, AddListenerAddressNotMatching) {\n  InSequence s;\n\n  // Add foo listener.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\ndrain_type: default\n\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Update foo listener, but with a different address. Should throw.\n  const std::string listener_foo_different_address_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1235\nfilter_chains:\n- filters: []\ndrain_type: modify_only\n  )EOF\";\n\n  ListenerHandle* listener_foo_different_address =\n      expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY);\n  EXPECT_CALL(*listener_foo_different_address, onDestroy());\n  EXPECT_THROW_WITH_MESSAGE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_different_address_yaml),\n                                    \"\", true),\n      EnvoyException,\n      \"error updating listener: 'foo' has a different address \"\n      \"'127.0.0.1:1235' from existing listener address '127.0.0.1:1234'\");\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n}\n\n// Make sure that a listener creation does not fail on IPv4 only setups when FilterChainMatch is not\n// specified and we try to create default CidrRange. See makeCidrListEntry function for\n// more details.\nTEST_F(ListenerManagerImplTest, AddListenerOnIpv4OnlySetups) {\n  InSequence s;\n\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\ndrain_type: default\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n\n  ON_CALL(os_sys_calls_, socket(AF_INET, _, 0))\n      .WillByDefault(Return(Api::SysCallSocketResult{5, 0}));\n  ON_CALL(os_sys_calls_, socket(AF_INET6, _, 0))\n      .WillByDefault(Return(Api::SysCallSocketResult{INVALID_SOCKET, 0}));\n  ON_CALL(os_sys_calls_, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n  EXPECT_CALL(*listener_foo, onDestroy());\n}\n\n// Make sure that a listener creation does not fail on IPv6 only setups when FilterChainMatch is not\n// specified and we try to create default CidrRange. See makeCidrListEntry function for\n// more details.\nTEST_F(ListenerManagerImplTest, AddListenerOnIpv6OnlySetups) {\n  InSequence s;\n\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: \"::0001\"\n    port_value: 1234\nfilter_chains:\n- filters: []\ndrain_type: default\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n\n  ON_CALL(os_sys_calls_, socket(AF_INET, _, 0))\n      .WillByDefault(Return(Api::SysCallSocketResult{INVALID_SOCKET, 0}));\n  ON_CALL(os_sys_calls_, socket(AF_INET6, _, 0))\n      .WillByDefault(Return(Api::SysCallSocketResult{5, 0}));\n  ON_CALL(os_sys_calls_, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n  EXPECT_CALL(*listener_foo, onDestroy());\n}\n\n// Make sure that a listener that is not added_via_api cannot be updated or removed.\nTEST_F(ListenerManagerImplTest, UpdateRemoveNotModifiableListener) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1001001001001));\n\n  InSequence s;\n\n  // Add foo listener.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, false);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", false));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n  checkConfigDump(R\"EOF(\nstatic_listeners:\n  listener:\n    \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n    name: \"foo\"\n    address:\n      socket_address:\n        address: \"127.0.0.1\"\n        port_value: 1234\n    filter_chains: {}\n  last_updated:\n    seconds: 1001001001\n    nanos: 1000000\n)EOF\");\n\n  // Update foo listener. Should be blocked.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  - name: fake\n    typed_config: {}\n  )EOF\";\n\n  EXPECT_FALSE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", false));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Remove foo listener. Should be blocked.\n  EXPECT_FALSE(manager_->removeListener(\"foo\"));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n}\n\n// Tests that when listener tears down, server's initManager is notified.\nTEST_F(ListenerManagerImplTest, ListenerTeardownNotifiesServerInitManager) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1001001001001));\n\n  InSequence s;\n\n  auto* lds_api = new MockLdsApi();\n  EXPECT_CALL(listener_factory_, createLdsApi_(_, _)).WillOnce(Return(lds_api));\n  envoy::config::core::v3::ConfigSource lds_config;\n  manager_->createLdsApi(lds_config, nullptr);\n\n  EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"\"));\n  checkConfigDump(R\"EOF(\nstatic_listeners:\n)EOF\");\n\n  const std::string listener_foo_yaml = R\"EOF(\nname: \"foo\"\naddress:\n  socket_address:\n    address: \"127.0.0.1\"\n    port_value: 1234\nfilter_chains: {}\n  )EOF\";\n\n  const std::string listener_foo_address_update_yaml = R\"EOF(\nname: \"foo\"\naddress:\n  socket_address:\n    address: \"127.0.0.1\"\n    port_value: 1235\nfilter_chains: {}\n  )EOF\";\n\n  Init::ManagerImpl server_init_mgr(\"server-init-manager\");\n  Init::ExpectableWatcherImpl server_init_watcher(\"server-init-watcher\");\n  { // Add and remove a listener before starting workers.\n    ListenerHandle* listener_foo = expectListenerCreate(true, true);\n    EXPECT_CALL(server_, initManager()).WillOnce(ReturnRef(server_init_mgr));\n    EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n    EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml),\n                                              \"version1\", true));\n    checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n    EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"version1\"));\n    checkConfigDump(R\"EOF(\nversion_info: version1\nstatic_listeners:\ndynamic_listeners:\n  - name: foo\n    warming_state:\n      version_info: version1\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1234\n        filter_chains: {}\n      last_updated:\n        seconds: 1001001001\n        nanos: 1000000\n)EOF\");\n    EXPECT_CALL(listener_foo->target_, initialize()).Times(1);\n    server_init_mgr.initialize(server_init_watcher);\n    // Since listener_foo->target_ is not ready, the listener's listener_init_target will not be\n    // ready until the destruction happens.\n    server_init_watcher.expectReady().Times(1);\n    EXPECT_CALL(*listener_foo, onDestroy());\n    EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  }\n  // Listener foo's listener_init_target_ is the only target added to server_init_mgr.\n  EXPECT_EQ(server_init_mgr.state(), Init::Manager::State::Initialized);\n\n  EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"\"));\n  checkConfigDump(R\"EOF(\nstatic_listeners:\n)EOF\");\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Now add new version listener foo after workers start, note it's fine that server_init_mgr is\n  // initialized, as no target will be added to it.\n  time_system_.setSystemTime(std::chrono::milliseconds(2002002002002));\n  EXPECT_CALL(server_, initManager()).Times(0); // No target added to server init manager.\n  server_init_watcher.expectReady().Times(0);\n  {\n    ListenerHandle* listener_foo2 = expectListenerCreate(true, true);\n    EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n    // Version 2 listener will be initialized by listener manager directly.\n    EXPECT_CALL(listener_foo2->target_, initialize()).Times(1);\n    EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml),\n                                              \"version2\", true));\n    // Version2 is in warming list as listener_foo2->target_ is not ready yet.\n    checkStats(__LINE__, /*added=*/2, 0, /*removed=*/1, /*warming=*/1, 0, 0, 0);\n    EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"version2\"));\n    checkConfigDump(R\"EOF(\n  version_info: version2\n  static_listeners:\n  dynamic_listeners:\n    - name: foo\n      warming_state:\n        version_info: version2\n        listener:\n          \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n          name: foo\n          address:\n            socket_address:\n              address: 127.0.0.1\n              port_value: 1234\n          filter_chains: {}\n        last_updated:\n          seconds: 2002002002\n          nanos: 2000000\n  )EOF\");\n\n    // While it is in warming state, try updating the address. It should fail.\n    ListenerHandle* listener_foo3 = expectListenerCreate(true, true);\n    EXPECT_CALL(*listener_foo3, onDestroy());\n    EXPECT_THROW_WITH_MESSAGE(\n        manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_address_update_yaml),\n                                      \"version3\", true),\n        EnvoyException,\n        \"error updating listener: 'foo' has a different address \"\n        \"'127.0.0.1:1235' from existing listener address '127.0.0.1:1234'\");\n\n    // Delete foo-listener again.\n    EXPECT_CALL(*listener_foo2, onDestroy());\n    EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  }\n}\n\nTEST_F(ListenerManagerImplTest, OverrideListener) {\n  InSequence s;\n\n  time_system_.setSystemTime(std::chrono::milliseconds(1001001001001));\n  auto* lds_api = new MockLdsApi();\n  EXPECT_CALL(listener_factory_, createLdsApi_(_, _)).WillOnce(Return(lds_api));\n  envoy::config::core::v3::ConfigSource lds_config;\n  manager_->createLdsApi(lds_config, nullptr);\n\n  // Add foo listener.\n  const std::string listener_foo_yaml = R\"EOF(\nname: \"foo\"\naddress:\n  socket_address:\n    address: \"127.0.0.1\"\n    port_value: 1234\nfilter_chains: {}\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"version1\", true));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Start workers and capture ListenerImpl.\n  Network::ListenerConfig* listener_config = nullptr;\n  EXPECT_CALL(*worker_, addListener(_, _, _))\n      .WillOnce(Invoke([&listener_config](auto, Network::ListenerConfig& config, auto) -> void {\n        listener_config = &config;\n      }))\n      .RetiresOnSaturation();\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_create_success\").value());\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  filter_chain_match:\n    destination_port: 1234\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerOverridden(false);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  auto* timer = new Event::MockTimer(dynamic_cast<Event::MockDispatcher*>(&server_.dispatcher()));\n  EXPECT_CALL(*timer, enableTimer(_, _));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1UL, manager_->listeners().size());\n\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1);\n\n  EXPECT_CALL(*worker_, removeFilterChains(_, _, _));\n  timer->invokeCallback();\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callDrainFilterChainsComplete();\n\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_create_success\").value());\n}\n\nTEST_F(ListenerManagerImplTest, AddOrUpdateListener) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1001001001001));\n\n  InSequence s;\n\n  auto* lds_api = new MockLdsApi();\n  EXPECT_CALL(listener_factory_, createLdsApi_(_, _)).WillOnce(Return(lds_api));\n  envoy::config::core::v3::ConfigSource lds_config;\n  manager_->createLdsApi(lds_config, nullptr);\n\n  EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"\"));\n  checkConfigDump(R\"EOF(\nstatic_listeners:\n)EOF\");\n\n  // Add foo listener.\n  const std::string listener_foo_yaml = R\"EOF(\nname: \"foo\"\naddress:\n  socket_address:\n    address: \"127.0.0.1\"\n    port_value: 1234\nfilter_chains: {}\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"version1\", true));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n  EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"version1\"));\n  checkConfigDump(R\"EOF(\nversion_info: version1\nstatic_listeners:\ndynamic_listeners:\n  - name: foo\n    warming_state:\n      version_info: version1\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1234\n        filter_chains: {}\n      last_updated:\n        seconds: 1001001001\n        nanos: 1000000\n)EOF\");\n\n  // Update duplicate should be a NOP.\n  EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Update foo listener. Should share socket.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: \"foo\"\naddress:\n  socket_address:\n    address: \"127.0.0.1\"\n    port_value: 1234\nfilter_chains: {}\nper_connection_buffer_limit_bytes: 10\n  )EOF\";\n\n  time_system_.setSystemTime(std::chrono::milliseconds(2002002002002));\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true);\n  EXPECT_CALL(*listener_foo, onDestroy());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml),\n                                            \"version2\", true));\n  checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0);\n  EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"version2\"));\n  checkConfigDump(R\"EOF(\nversion_info: version2\nstatic_listeners:\ndynamic_listeners:\n  - name: foo\n    warming_state:\n      version_info: version2\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1234\n        filter_chains: {}\n        per_connection_buffer_limit_bytes: 10\n      last_updated:\n        seconds: 2002002002\n        nanos: 2000000\n)EOF\");\n\n  // Validate that workers_started stat is zero before calling startWorkers.\n  EXPECT_EQ(0, server_.stats_store_\n                   .gauge(\"listener_manager.workers_started\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n\n  // Start workers.\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n  // Validate that workers_started stat is still zero before workers set the status via\n  // completion callback.\n  EXPECT_EQ(0, server_.stats_store_\n                   .gauge(\"listener_manager.workers_started\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n  worker_->callAddCompletion(true);\n\n  // Validate that workers_started stat is set to 1 after workers have responded with initialization\n  // status.\n  EXPECT_EQ(1, server_.stats_store_\n                   .gauge(\"listener_manager.workers_started\", Stats::Gauge::ImportMode::NeverImport)\n                   .value());\n\n  // Update duplicate should be a NOP.\n  EXPECT_FALSE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0);\n\n  time_system_.setSystemTime(std::chrono::milliseconds(3003003003003));\n\n  // Update foo. Should go into warming, have an immediate warming callback, and start immediate\n  // removal.\n  ListenerHandle* listener_foo_update2 = expectListenerCreate(false, true);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"version3\", true));\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0);\n  EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"version3\"));\n  checkConfigDump(R\"EOF(\nversion_info: version3\nstatic_listeners:\ndynamic_listeners:\n  - name: foo\n    active_state:\n      version_info: version3\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1234\n        filter_chains: {}\n      last_updated:\n        seconds: 3003003003\n        nanos: 3000000\n    draining_state:\n      version_info: version2\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1234\n        filter_chains: {}\n        per_connection_buffer_limit_bytes: 10\n      last_updated:\n        seconds: 2002002002\n        nanos: 2000000\n)EOF\");\n\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo_update1->drain_manager_->drain_sequence_completion_();\n  checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0);\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  worker_->callRemovalCompletion();\n  checkStats(__LINE__, 1, 2, 0, 0, 1, 0, 0);\n\n  time_system_.setSystemTime(std::chrono::milliseconds(4004004004004));\n\n  // Add bar listener.\n  const std::string listener_bar_yaml = R\"EOF(\nname: \"bar\"\naddress:\n  socket_address:\n    address: \"127.0.0.1\"\n    port_value: 1235\nfilter_chains: {}\n  )EOF\";\n\n  ListenerHandle* listener_bar = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), \"version4\", true));\n  EXPECT_EQ(2UL, manager_->listeners().size());\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 2, 2, 0, 0, 2, 0, 0);\n\n  time_system_.setSystemTime(std::chrono::milliseconds(5005005005005));\n\n  // Add baz listener, this time requiring initializing.\n  const std::string listener_baz_yaml = R\"EOF(\nname: \"baz\"\naddress:\n  socket_address:\n    address: \"127.0.0.1\"\n    port_value: 1236\nfilter_chains: {}\n  )EOF\";\n\n  ListenerHandle* listener_baz = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_baz->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), \"version5\", true));\n  EXPECT_EQ(2UL, manager_->listeners().size());\n  checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0);\n  EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return(\"version5\"));\n  checkConfigDump(R\"EOF(\nversion_info: version5\ndynamic_listeners:\n  - name: foo\n    active_state:\n      version_info: version3\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1234\n        filter_chains: {}\n      last_updated:\n        seconds: 3003003003\n        nanos: 3000000\n  - name: bar\n    active_state:\n      version_info: version4\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: bar\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1235\n        filter_chains: {}\n      last_updated:\n        seconds: 4004004004\n        nanos: 4000000\n  - name: baz\n    warming_state:\n      version_info: version5\n      listener:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: baz\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1236\n        filter_chains: {}\n      last_updated:\n        seconds: 5005005005\n        nanos: 5000000\n)EOF\");\n\n  // Update a duplicate baz that is currently warming.\n  EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), \"\", true));\n  checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0);\n\n  // Update baz while it is warming.\n  const std::string listener_baz_update1_yaml = R\"EOF(\nname: baz\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1236\nfilter_chains:\n- filters:\n  - name: fake\n    typed_config: {}\n  )EOF\";\n\n  ListenerHandle* listener_baz_update1 = expectListenerCreate(true, true);\n  EXPECT_CALL(*listener_baz, onDestroy()).WillOnce(Invoke([listener_baz]() -> void {\n    // Call the initialize callback during destruction like RDS will.\n    listener_baz->target_.ready();\n  }));\n  EXPECT_CALL(listener_baz_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_update1_yaml), \"\", true));\n  EXPECT_EQ(2UL, manager_->listeners().size());\n  checkStats(__LINE__, 3, 3, 0, 1, 2, 0, 0);\n\n  // Finish initialization for baz which should make it active.\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_baz_update1->target_.ready();\n  EXPECT_EQ(3UL, manager_->listeners().size());\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 3, 3, 0, 0, 3, 0, 0);\n\n  EXPECT_CALL(*listener_foo_update2, onDestroy());\n  EXPECT_CALL(*listener_bar, onDestroy());\n  EXPECT_CALL(*listener_baz_update1, onDestroy());\n}\n\nTEST_F(ListenerManagerImplTest, UpdateActiveToWarmAndBack) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add and initialize foo listener.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nper_connection_buffer_limit_bytes: 999\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_foo_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n\n  // Should be both active and warming now.\n  EXPECT_EQ(1UL, manager_->listeners(ListenerManager::WARMING).size());\n  EXPECT_EQ(1UL, manager_->listeners(ListenerManager::ACTIVE).size());\n  checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0);\n\n  // Update foo back to original active, should cause the warming listener to be removed.\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n\n  checkStats(__LINE__, 1, 2, 0, 0, 1, 0, 0);\n  EXPECT_EQ(0UL, manager_->listeners(ListenerManager::WARMING).size());\n  EXPECT_EQ(1UL, manager_->listeners(ListenerManager::ACTIVE).size());\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n}\n\nTEST_F(ListenerManagerImplTest, AddReusableDrainingListener) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener directly into active.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  Network::Address::InstanceConstSharedPtr local_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 1234));\n  ON_CALL(*listener_factory_.socket_, localAddress()).WillByDefault(ReturnRef(local_address));\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Remove foo into draining.\n  std::function<void()> stop_completion;\n  EXPECT_CALL(*worker_, stopListener(_, _))\n      .WillOnce(Invoke(\n          [&stop_completion](Network::ListenerConfig&, std::function<void()> completion) -> void {\n            ASSERT_TRUE(completion != nullptr);\n            stop_completion = std::move(completion);\n          }));\n  EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0);\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo->drain_manager_->drain_sequence_completion_();\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0);\n\n  // Add foo again. We should use the socket from draining.\n  ListenerHandle* listener_foo2 = expectListenerCreate(false, true);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0);\n\n  EXPECT_CALL(*listener_factory_.socket_, close()).Times(0);\n  stop_completion();\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callRemovalCompletion();\n  checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0);\n\n  EXPECT_CALL(*listener_foo2, onDestroy());\n}\n\nTEST_F(ListenerManagerImplTest, AddClosedDrainingListener) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener directly into active.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  Network::Address::InstanceConstSharedPtr local_address(\n      new Network::Address::Ipv4Instance(\"127.0.0.1\", 1234));\n  ON_CALL(*listener_factory_.socket_, localAddress()).WillByDefault(ReturnRef(local_address));\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Remove foo into draining.\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_factory_.socket_, close());\n  EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0);\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo->drain_manager_->drain_sequence_completion_();\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0);\n\n  // Add foo again. We should use the socket from draining.\n  ListenerHandle* listener_foo2 = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0);\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callRemovalCompletion();\n  checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0);\n\n  EXPECT_CALL(*listener_foo2, onDestroy());\n}\n\nTEST_F(ListenerManagerImplTest, BindToPortEqualToFalse) {\n  InSequence s;\n  ProdListenerComponentFactory real_listener_factory(server_);\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\ndeprecated_v1:\n    bind_to_port: false\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  auto syscall_result = os_sys_calls_actual_.socket(AF_INET, SOCK_STREAM, 0);\n  ASSERT_TRUE(SOCKET_VALID(syscall_result.rc_));\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false)))\n      .WillOnce(Invoke([this, &syscall_result, &real_listener_factory](\n                           const Network::Address::InstanceConstSharedPtr& address,\n                           Network::Socket::Type socket_type,\n                           const Network::Socket::OptionsSharedPtr& options,\n                           const ListenSocketCreationParams& params) -> Network::SocketSharedPtr {\n        EXPECT_CALL(server_, hotRestart).Times(0);\n        // When bind_to_port is equal to false, create socket fd directly, and do not get socket\n        // fd through hot restart.\n        ON_CALL(os_sys_calls_, socket(AF_INET, _, 0)).WillByDefault(Return(syscall_result));\n        return real_listener_factory.createListenSocket(address, socket_type, options, params);\n      }));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_CALL(*listener_foo, onDestroy());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n}\n\nTEST_F(ListenerManagerImplTest, ReusePortEqualToTrue) {\n  InSequence s;\n  ProdListenerComponentFactory real_listener_factory(server_);\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 0\nreuse_port: true\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  auto syscall_result = os_sys_calls_actual_.socket(AF_INET, SOCK_STREAM, 0);\n  ASSERT_TRUE(SOCKET_VALID(syscall_result.rc_));\n\n  // On Windows if the socket has not been bound to an address with bind\n  // the call to getsockname fails with WSAEINVAL. To avoid that we make sure\n  // that the bind system actually happens and it does not get mocked.\n  ON_CALL(os_sys_calls_, bind(_, _, _))\n      .WillByDefault(Invoke(\n          [&](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) -> Api::SysCallIntResult {\n            Api::SysCallIntResult result = os_sys_calls_actual_.bind(sockfd, addr, addrlen);\n            ASSERT(result.rc_ >= 0);\n            return result;\n          }));\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {{true, false}}))\n      .WillOnce(Invoke([this, &syscall_result, &real_listener_factory](\n                           const Network::Address::InstanceConstSharedPtr& address,\n                           Network::Socket::Type socket_type,\n                           const Network::Socket::OptionsSharedPtr& options,\n                           const ListenSocketCreationParams& params) -> Network::SocketSharedPtr {\n        EXPECT_CALL(server_, hotRestart).Times(0);\n        ON_CALL(os_sys_calls_, socket(AF_INET, _, 0)).WillByDefault(Return(syscall_result));\n        return real_listener_factory.createListenSocket(address, socket_type, options, params);\n      }));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_CALL(*listener_foo, onDestroy());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n}\n\nTEST_F(ListenerManagerImplTest, NotSupportedDatagramUds) {\n  ProdListenerComponentFactory real_listener_factory(server_);\n  EXPECT_THROW_WITH_MESSAGE(real_listener_factory.createListenSocket(\n                                std::make_shared<Network::Address::PipeInstance>(\"/foo\"),\n                                Network::Socket::Type::Datagram, nullptr, {true}),\n                            EnvoyException,\n                            \"socket type SocketType::Datagram not supported for pipes\");\n}\n\nTEST_F(ListenerManagerImplTest, CantBindSocket) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1001001001001));\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}))\n      .WillOnce(Throw(EnvoyException(\"can't bind\")));\n  EXPECT_CALL(*listener_foo, onDestroy());\n  EXPECT_THROW(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true),\n               EnvoyException);\n  checkConfigDump(R\"EOF(\ndynamic_listeners:\n  - name: foo\n    error_state:\n      failed_configuration:\n        \"@type\": type.googleapis.com/envoy.config.listener.v3.Listener\n        name: foo\n        address:\n          socket_address:\n            address: 127.0.0.1\n            port_value: 1234\n        filter_chains:\n          - {}\n      last_update_attempt:\n        seconds: 1001001001\n        nanos: 1000000\n      details: can't bind\n)EOF\");\n\n  ListenerManager::FailureStates empty_failure_state;\n  // Fake a new update, just to sanity check the clearing code.\n  manager_->beginListenerUpdate();\n  manager_->endListenerUpdate(std::move(empty_failure_state));\n\n  checkConfigDump(R\"EOF(\ndynamic_listeners:\n)EOF\");\n}\n\n// Verify that errors tracked on endListenerUpdate show up in the config dump/\nTEST_F(ListenerManagerImplTest, ConfigDumpWithExternalError) {\n  time_system_.setSystemTime(std::chrono::milliseconds(1001001001001));\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Make sure the config dump is empty by default.\n  ListenerManager::FailureStates empty_failure_state;\n  manager_->beginListenerUpdate();\n  manager_->endListenerUpdate(std::move(empty_failure_state));\n  checkConfigDump(R\"EOF(\ndynamic_listeners:\n)EOF\");\n\n  // Now have an external update with errors and make sure it gets dumped.\n  ListenerManager::FailureStates non_empty_failure_state;\n  non_empty_failure_state.push_back(std::make_unique<envoy::admin::v3::UpdateFailureState>());\n  auto& state = non_empty_failure_state.back();\n  state->set_details(\"foo\");\n  manager_->beginListenerUpdate();\n  manager_->endListenerUpdate(std::move(non_empty_failure_state));\n  checkConfigDump(R\"EOF(\ndynamic_listeners:\n  error_state:\n    details: \"foo\"\n)EOF\");\n\n  // And clear again.\n  ListenerManager::FailureStates empty_failure_state2;\n  manager_->beginListenerUpdate();\n  manager_->endListenerUpdate(std::move(empty_failure_state2));\n  checkConfigDump(R\"EOF(\ndynamic_listeners:\n)EOF\");\n}\n\nTEST_F(ListenerManagerImplTest, ListenerDraining) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  worker_->callAddCompletion(true);\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false));\n  EXPECT_CALL(server_.drain_manager_, drainClose()).WillOnce(Return(false));\n  EXPECT_FALSE(listener_foo->context_->drainDecision().drainClose());\n\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0);\n\n  // NOTE: || short circuit here prevents the server drain manager from getting called.\n  EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(true));\n  EXPECT_TRUE(listener_foo->context_->drainDecision().drainClose());\n\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo->drain_manager_->drain_sequence_completion_();\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0);\n\n  EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false));\n  EXPECT_CALL(server_.drain_manager_, drainClose()).WillOnce(Return(true));\n  EXPECT_TRUE(listener_foo->context_->drainDecision().drainClose());\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callRemovalCompletion();\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 0, 0);\n}\n\nTEST_F(ListenerManagerImplTest, RemoveListener) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Remove an unknown listener.\n  EXPECT_FALSE(manager_->removeListener(\"unknown\"));\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n\n  // Remove foo.\n  EXPECT_CALL(*listener_foo, onDestroy());\n  EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 1, 0, 0, 0, 0);\n\n  // Add foo again and initialize it.\n  listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 2, 0, 1, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0);\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nper_connection_buffer_limit_bytes: 999\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_foo_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 2, 1, 1, 1, 1, 0, 0);\n\n  // Remove foo which should remove both warming and active.\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_factory_.socket_, close());\n  EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  checkStats(__LINE__, 2, 1, 2, 0, 0, 1, 0);\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo->drain_manager_->drain_sequence_completion_();\n  checkStats(__LINE__, 2, 1, 2, 0, 0, 1, 0);\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callRemovalCompletion();\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  checkStats(__LINE__, 2, 1, 2, 0, 0, 0, 0);\n}\n\n// Validates that StopListener functionality works correctly when only inbound listeners are\n// stopped.\nTEST_F(ListenerManagerImplTest, StopListeners) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener in inbound direction.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  auto foo_inbound_proto = parseListenerFromV3Yaml(listener_foo_yaml);\n  EXPECT_TRUE(manager_->addOrUpdateListener(foo_inbound_proto, \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Add a listener in outbound direction.\n  const std::string listener_foo_outbound_yaml = R\"EOF(\nname: foo_outbound\ntraffic_direction: OUTBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1239\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo_outbound = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo_outbound->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_outbound_yaml), \"\", true));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo_outbound->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(2UL, manager_->listeners().size());\n\n  // Validate that stop listener is only called once - for inbound listeners.\n  EXPECT_CALL(*worker_, stopListener(_, _)).Times(1);\n  EXPECT_CALL(*listener_factory_.socket_, close()).Times(1);\n  manager_->stopListeners(ListenerManager::StopListenersType::InboundOnly);\n  EXPECT_EQ(1, server_.stats_store_.counterFromString(\"listener_manager.listener_stopped\").value());\n\n  // Validate that listener creation in outbound direction is allowed.\n  const std::string listener_bar_outbound_yaml = R\"EOF(\nname: bar_outbound\ntraffic_direction: OUTBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1237\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_bar_outbound = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_outbound_yaml), \"\", true));\n  EXPECT_EQ(3UL, manager_->listeners().size());\n  worker_->callAddCompletion(true);\n\n  // Validate that adding a listener in stopped listener's traffic direction is not allowed.\n  const std::string listener_bar_yaml = R\"EOF(\nname: bar\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1235\nfilter_chains:\n- filters: []\n  )EOF\";\n  EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), \"\", true));\n\n  // Explicitly validate that in place filter chain update is not allowed.\n  auto in_place_foo_inbound_proto = foo_inbound_proto;\n  in_place_foo_inbound_proto.mutable_filter_chains(0)\n      ->mutable_filter_chain_match()\n      ->mutable_destination_port()\n      ->set_value(9999);\n\n  EXPECT_FALSE(manager_->addOrUpdateListener(in_place_foo_inbound_proto, \"\", true));\n  EXPECT_CALL(*listener_foo, onDestroy());\n  EXPECT_CALL(*listener_foo_outbound, onDestroy());\n  EXPECT_CALL(*listener_bar_outbound, onDestroy());\n}\n\n// Validates that StopListener functionality works correctly when all listeners are stopped.\nTEST_F(ListenerManagerImplTest, StopAllListeners) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_factory_.socket_, close());\n  EXPECT_CALL(*listener_foo, onDestroy());\n  manager_->stopListeners(ListenerManager::StopListenersType::All);\n  EXPECT_EQ(1, server_.stats_store_.counterFromString(\"listener_manager.listener_stopped\").value());\n\n  // Validate that adding a listener is not allowed after all listeners are stopped.\n  const std::string listener_bar_yaml = R\"EOF(\nname: bar\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1235\nfilter_chains:\n- filters: []\n  )EOF\";\n  EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), \"\", true));\n}\n\n// Validate that stopping a warming listener, removes directly from warming listener list.\nTEST_F(ListenerManagerImplTest, StopWarmingListener) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nper_connection_buffer_limit_bytes: 999\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_foo_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1UL, manager_->listeners().size());\n\n  // Stop foo which should remove warming listener.\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_factory_.socket_, close());\n  EXPECT_CALL(*listener_foo, onDestroy());\n  manager_->stopListeners(ListenerManager::StopListenersType::InboundOnly);\n  EXPECT_EQ(1, server_.stats_store_.counterFromString(\"listener_manager.listener_stopped\").value());\n}\n\nTEST_F(ListenerManagerImplTest, AddListenerFailure) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into active.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 0.0.0.0\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_));\n  worker_->callAddCompletion(false);\n\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo->drain_manager_->drain_sequence_completion_();\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callRemovalCompletion();\n\n  EXPECT_EQ(\n      1UL,\n      server_.stats_store_.counterFromString(\"listener_manager.listener_create_failure\").value());\n}\n\nTEST_F(ListenerManagerImplTest, StaticListenerAddFailure) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into active.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 0.0.0.0\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, false);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", false));\n\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_));\n  worker_->callAddCompletion(false);\n\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo->drain_manager_->drain_sequence_completion_();\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callRemovalCompletion();\n\n  EXPECT_EQ(\n      1UL,\n      server_.stats_store_.counterFromString(\"listener_manager.listener_create_failure\").value());\n  EXPECT_EQ(0, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) {\n  const std::string yaml = R\"EOF(\naddress:\n  socket_address:\n    address: \"::1\"\n    port_value: 10000\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  manager_->listeners().front().get().listenerScope().counterFromString(\"foo\").inc();\n\n  EXPECT_EQ(1UL, server_.stats_store_.counterFromString(\"listener.[__1]_10000.foo\").value());\n}\n\nTEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 0.0.0.0\n    port_value: 1234\ndeprecated_v1:\n  bind_to_port: false\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false)));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n\n  // Add bar with same non-binding address. Should fail.\n  const std::string listener_bar_yaml = R\"EOF(\nname: bar\naddress:\n  socket_address:\n    address: 0.0.0.0\n    port_value: 1234\ndeprecated_v1:\n  bind_to_port: false\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_bar = expectListenerCreate(true, true);\n  EXPECT_CALL(*listener_bar, onDestroy());\n  EXPECT_THROW_WITH_MESSAGE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), \"\", true),\n      EnvoyException,\n      \"error adding listener: 'bar' has duplicate address '0.0.0.0:1234' as existing listener\");\n\n  // Move foo to active and then try to add again. This should still fail.\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n\n  listener_bar = expectListenerCreate(true, true);\n  EXPECT_CALL(*listener_bar, onDestroy());\n  EXPECT_THROW_WITH_MESSAGE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), \"\", true),\n      EnvoyException,\n      \"error adding listener: 'bar' has duplicate address '0.0.0.0:1234' as existing listener\");\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n}\n\nTEST_F(ListenerManagerImplTest, EarlyShutdown) {\n  // If stopWorkers is called before the workers are started, it should be a no-op: they should be\n  // neither started nor stopped.\n  EXPECT_CALL(*worker_, start(_)).Times(0);\n  EXPECT_CALL(*worker_, stop()).Times(0);\n  manager_->stopWorkers();\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationPortMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        destination_port: 8080\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // IPv4 client connects to unknown port - no match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // IPv4 client connects to valid port - using 1st filter chain.\n  filter_chain = findFilterChain(8080, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // UDS client - no match.\n  filter_chain = findFilterChain(0, \"/tmp/test.sock\", \"\", \"tls\", {}, \"/tmp/test.sock\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationIPMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        prefix_ranges: { address_prefix: 127.0.0.0, prefix_len: 8 }\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // IPv4 client connects to unknown IP - no match.\n  auto filter_chain = findFilterChain(1234, \"1.2.3.4\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // IPv4 client connects to valid IP - using 1st filter chain.\n  filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // UDS client - no match.\n  filter_chain = findFilterChain(0, \"/tmp/test.sock\", \"\", \"tls\", {}, \"/tmp/test.sock\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        server_names: \"server1.example.com\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS client without SNI - no match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // TLS client without matching SNI - no match.\n  filter_chain = findFilterChain(1234, \"127.0.0.1\", \"www.example.com\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // TLS client with matching SNI - using 1st filter chain.\n  filter_chain =\n      findFilterChain(1234, \"127.0.0.1\", \"server1.example.com\", \"tls\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportProtocolMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        transport_protocol: \"tls\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TCP client - no match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"raw_buffer\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // TLS client - using 1st filter chain.\n  filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationProtocolMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        application_protocols: \"http/1.1\"\n        source_type: ANY\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS client without ALPN - no match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // TLS client with \"http/1.1\" ALPN - using 1st filter chain.\n  filter_chain = findFilterChain(\n      1234, \"127.0.0.1\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"8.8.8.8\",\n      111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n}\n\n// Define a source_type filter chain match and test against it.\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        source_type: SAME_IP_OR_LOOPBACK\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // EXTERNAL IPv4 client without \"http/1.1\" ALPN - no match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"8.8.8.8\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // LOCAL IPv4 client with \"http/1.1\" ALPN - using 1st filter chain.\n  filter_chain = findFilterChain(\n      1234, \"127.0.0.1\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"127.0.0.1\",\n      111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // LOCAL UDS client with \"http/1.1\" ALPN - using 1st filter chain.\n  filter_chain = findFilterChain(\n      0, \"/tmp/test.sock\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11},\n      \"/tmp/test.sock\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n}\n\n// Verify source IP matches.\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        source_prefix_ranges:\n          - address_prefix: 10.0.0.1\n            prefix_len: 24\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // IPv4 client with source 10.0.1.1. No match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"10.0.1.1\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // IPv4 client with source 10.0.0.10, Match.\n  filter_chain = findFilterChain(\n      1234, \"127.0.0.1\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"10.0.0.10\",\n      111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // IPv6 client. No match.\n  filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {},\n                                 \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // UDS client. No match.\n  filter_chain = findFilterChain(\n      0, \"/tmp/test.sock\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11},\n      \"/tmp/test.sock\", 0);\n  ASSERT_EQ(filter_chain, nullptr);\n}\n\n// Verify source IPv6 matches.\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Match) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        source_prefix_ranges:\n          - address_prefix: 2001:0db8:85a3:0000:0000:0000:0000:0000\n            prefix_len: 64\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // IPv6 client with matching subnet. Match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {},\n                                      \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\", 111);\n  EXPECT_NE(filter_chain, nullptr);\n\n  // IPv6 client with non-matching subnet. No match.\n  filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {},\n                                 \"2001:0db8:85a3:0001:0000:8a2e:0370:7334\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n}\n\n// Verify source port matches.\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        source_ports:\n          - 100\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // Client with source port 100. Match.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 100);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // Client with source port 101. No match.\n  filter_chain = findFilterChain(\n      1234, \"8.8.8.8\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"4.4.4.4\",\n      101);\n  ASSERT_EQ(filter_chain, nullptr);\n}\n\n// Define multiple source_type filter chain matches and test against them.\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceTypeMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        source_type: SAME_IP_OR_LOOPBACK\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n    - filter_chain_match:\n        application_protocols: \"http/1.1\"\n        source_type: EXTERNAL\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\" }\n    - filter_chain_match:\n        source_type: ANY\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // LOCAL TLS client with \"http/1.1\" ALPN - no match.\n  auto filter_chain = findFilterChain(\n      1234, \"127.0.0.1\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"127.0.0.1\",\n      111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // LOCAL TLS client without \"http/1.1\" ALPN - using 1st filter chain.\n  filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // EXTERNAL TLS client with \"http/1.1\" ALPN - using 2nd filter chain.\n  filter_chain = findFilterChain(\n      1234, \"8.8.8.8\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"4.4.4.4\",\n      111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto uri = ssl_socket->ssl()->uriSanLocalCertificate();\n  EXPECT_EQ(uri[0], \"spiffe://lyft.com/test-team\");\n\n  // EXTERNAL TLS client without \"http/1.1\" ALPN - using 3rd filter chain.\n  filter_chain = findFilterChain(1234, \"8.8.8.8\", \"\", \"tls\", {}, \"4.4.4.4\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 2);\n  EXPECT_EQ(server_names.front(), \"*.example.com\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinationPortMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        # empty\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\" }\n    - filter_chain_match:\n        destination_port: 8080\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n    - filter_chain_match:\n        destination_port: 8081\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // IPv4 client connects to default port - using 1st filter chain.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto uri = ssl_socket->ssl()->uriSanLocalCertificate();\n  EXPECT_EQ(uri[0], \"spiffe://lyft.com/test-team\");\n\n  // IPv4 client connects to port 8080 - using 2nd filter chain.\n  filter_chain = findFilterChain(8080, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // IPv4 client connects to port 8081 - using 3rd filter chain.\n  filter_chain = findFilterChain(8081, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 2);\n  EXPECT_EQ(server_names.front(), \"*.example.com\");\n\n  // UDS client - using 1st filter chain.\n  filter_chain = findFilterChain(0, \"/tmp/test.sock\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  uri = ssl_socket->ssl()->uriSanLocalCertificate();\n  EXPECT_EQ(uri[0], \"spiffe://lyft.com/test-team\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinationIPMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        # empty\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\" }\n    - filter_chain_match:\n        prefix_ranges: { address_prefix: 192.168.0.1, prefix_len: 32 }\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n    - filter_chain_match:\n        prefix_ranges: { address_prefix: 192.168.0.0, prefix_len: 16 }\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // IPv4 client connects to default IP - using 1st filter chain.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto uri = ssl_socket->ssl()->uriSanLocalCertificate();\n  EXPECT_EQ(uri[0], \"spiffe://lyft.com/test-team\");\n\n  // IPv4 client connects to exact IP match - using 2nd filter chain.\n  filter_chain = findFilterChain(1234, \"192.168.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // IPv4 client connects to wildcard IP match - using 3rd filter chain.\n  filter_chain = findFilterChain(1234, \"192.168.1.1\", \"\", \"tls\", {}, \"192.168.1.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 2);\n  EXPECT_EQ(server_names.front(), \"*.example.com\");\n\n  // UDS client - using 1st filter chain.\n  filter_chain = findFilterChain(0, \"/tmp/test.sock\", \"\", \"tls\", {}, \"/tmp/test.sock\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  uri = ssl_socket->ssl()->uriSanLocalCertificate();\n  EXPECT_EQ(uri[0], \"spiffe://lyft.com/test-team\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNamesMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        # empty\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n    - filter_chain_match:\n        server_names: \"server1.example.com\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n    - filter_chain_match:\n        server_names: \"*.com\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS client without SNI - using 1st filter chain.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto uri = ssl_socket->ssl()->uriSanLocalCertificate();\n  EXPECT_EQ(uri[0], \"spiffe://lyft.com/test-team\");\n\n  // TLS client with exact SNI match - using 2nd filter chain.\n  filter_chain =\n      findFilterChain(1234, \"127.0.0.1\", \"server1.example.com\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n\n  // TLS client with wildcard SNI match - using 3rd filter chain.\n  filter_chain =\n      findFilterChain(1234, \"127.0.0.1\", \"server2.example.com\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 2);\n  EXPECT_EQ(server_names.front(), \"*.example.com\");\n\n  // TLS client with wildcard SNI match - using 3rd filter chain.\n  filter_chain =\n      findFilterChain(1234, \"127.0.0.1\", \"www.wildcard.com\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  ssl_socket = dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 2);\n  EXPECT_EQ(server_names.front(), \"*.example.com\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransportProtocolMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        # empty\n    - filter_chain_match:\n        transport_protocol: \"tls\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TCP client - using 1st filter chain.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"raw_buffer\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport());\n\n  // TLS client - using 2nd filter chain.\n  filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicationProtocolMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        # empty\n    - filter_chain_match:\n        application_protocols: [\"dummy\", \"h2\"]\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS client without ALPN - using 1st filter chain.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport());\n\n  // TLS client with \"h2,http/1.1\" ALPN - using 2nd filter chain.\n  filter_chain = findFilterChain(\n      1234, \"127.0.0.1\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"127.0.0.1\",\n      111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleRequirementsMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        # empty\n    - filter_chain_match:\n        server_names: [\"www.example.com\", \"server1.example.com\"]\n        transport_protocol: \"tls\"\n        application_protocols: [\"dummy\", \"h2\"]\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS client without SNI and ALPN - using 1st filter chain.\n  auto filter_chain = findFilterChain(1234, \"127.0.0.1\", \"\", \"tls\", {}, \"127.0.0.1\", 111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport());\n\n  // TLS client with exact SNI match but without ALPN - no match (SNI blackholed by configuration).\n  filter_chain =\n      findFilterChain(1234, \"127.0.0.1\", \"server1.example.com\", \"tls\", {}, \"127.0.0.1\", 111);\n  EXPECT_EQ(filter_chain, nullptr);\n\n  // TLS client with ALPN match but without SNI - using 1st filter chain.\n  filter_chain = findFilterChain(\n      1234, \"127.0.0.1\", \"\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"127.0.0.1\",\n      111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport());\n\n  // TLS client with exact SNI match and ALPN match - using 2nd filter chain.\n  filter_chain = findFilterChain(\n      1234, \"127.0.0.1\", \"server1.example.com\", \"tls\",\n      {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, \"127.0.0.1\",\n      111);\n  ASSERT_NE(filter_chain, nullptr);\n  EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport());\n  auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr);\n  auto ssl_socket =\n      dynamic_cast<Extensions::TransportSockets::Tls::SslSocket*>(transport_socket.get());\n  auto server_names = ssl_socket->ssl()->dnsSansLocalCertificate();\n  EXPECT_EQ(server_names.size(), 1);\n  EXPECT_EQ(server_names.front(), \"server1.example.com\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferentSessionTicketKeys) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        server_names: \"example.com\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n    - filter_chain_match:\n        server_names: \"www.example.com\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_b\"\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest,\n       MultipleFilterChainsWithMixedUseOfSessionTicketKeys) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        server_names: \"example.com\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n          session_ticket_keys:\n            keys:\n            - filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a\"\n    - filter_chain_match:\n        server_names: \"www.example.com\"\n      transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidDestinationIPMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        prefix_ranges: { address_prefix: a.b.c.d, prefix_len: 32 }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException, \"malformed IP address: a.b.c.d\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidServerNamesMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        server_names: \"*w.example.com\"\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException,\n                            \"error adding listener '127.0.0.1:1234': partial wildcards are not \"\n                            \"supported in \\\"server_names\\\"\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithSameMatch) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - name : foo\n      filter_chain_match:\n        transport_protocol: \"tls\"\n    - name: bar\n      filter_chain_match:\n        transport_protocol: \"tls\"\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException,\n                            \"error adding listener '127.0.0.1:1234': filter chain 'bar' has \"\n                            \"the same matching rules defined as 'foo'\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest,\n       MultipleFilterChainsWithSameMatchPlusUnimplementedFields) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - name: foo\n      filter_chain_match:\n        transport_protocol: \"tls\"\n    - name: bar\n      filter_chain_match:\n        transport_protocol: \"tls\"\n        address_suffix: 127.0.0.0\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true), EnvoyException,\n      \"error adding listener '127.0.0.1:1234': filter chain 'bar' contains unimplemented fields\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappingRules) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.filters.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        server_names: \"example.com\"\n    - filter_chain_match:\n        server_names: [\"example.com\", \"www.example.com\"]\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException,\n                            \"error adding listener '127.0.0.1:1234': multiple filter chains with \"\n                            \"overlapping matching rules are defined\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsFilterChainWithoutTlsInspector) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - filter_chain_match:\n        transport_protocol: \"tls\"\n    - filter_chain_match:\n        # empty\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS Inspector is automatically injected for filter chains with TLS requirements,\n  // so make sure there is exactly 1 listener filter (and assume it's TLS Inspector).\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&,\n                           Network::ListenerFilterPtr&) -> void {}));\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n}\n\n// Test the tls inspector is not injected twice when the deprecated name is used.\nTEST_F(ListenerManagerImplWithRealFiltersTest,\n       DEPRECATED_FEATURE_TEST(TlsFilterChainWithDeprecatedTlsInspectorName)) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    listener_filters:\n    - name: \"envoy.listener.tls_inspector\"\n      typed_config: {}\n    filter_chains:\n    - filter_chain_match:\n        transport_protocol: \"tls\"\n    - filter_chain_match:\n        # empty\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // Make sure there is exactly 1 listener filter (and assume it's TLS Inspector). 2 filters\n  // would imply incorrect injection of a second filter.\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&,\n                           Network::ListenerFilterPtr&) -> void {}));\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, SniFilterChainWithoutTlsInspector) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - filter_chain_match:\n        server_names: \"example.com\"\n    - filter_chain_match:\n        # empty\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS Inspector is automatically injected for filter chains with SNI requirements,\n  // so make sure there is exactly 1 listener filter (and assume it's TLS Inspector).\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&,\n                           Network::ListenerFilterPtr&) -> void {}));\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, AlpnFilterChainWithoutTlsInspector) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - filter_chain_match:\n        application_protocols: [\"h2\", \"http/1.1\"]\n    - filter_chain_match:\n        # empty\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // TLS Inspector is automatically injected for filter chains with ALPN requirements,\n  // so make sure there is exactly 1 listener filter (and assume it's TLS Inspector).\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&,\n                           Network::ListenerFilterPtr&) -> void {}));\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, CustomTransportProtocolWithSniWithoutTlsInspector) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - filter_chain_match:\n        server_names: \"example.com\"\n        transport_protocol: \"custom\"\n    - filter_chain_match:\n        # empty\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  // Make sure there are no listener filters (i.e. no automatically injected TLS Inspector).\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n  EXPECT_CALL(manager, addAcceptFilter_(_, _)).Times(0);\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInline) {\n  const std::string cert = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\"));\n  const std::string pkey = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\"));\n  const std::string ca = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\"));\n  const std::string yaml = absl::StrCat(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { inline_string: \")EOF\",\n                                        absl::CEscape(cert), R\"EOF(\" }\n                private_key: { inline_string: \")EOF\",\n                                        absl::CEscape(pkey), R\"EOF(\" }\n            validation_context:\n              trusted_ca: { inline_string: \")EOF\",\n                                        absl::CEscape(ca), R\"EOF(\" }\n  )EOF\");\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateChainInlinePrivateKeyFilename) {\n  const std::string cert = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\"));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\" }\n                certificate_chain: { inline_string: \")EOF\",\n                                                                    absl::CEscape(cert), R\"EOF(\" }\n  )EOF\"),\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateIncomplete) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true), EnvoyException,\n      TestEnvironment::substitute(\n          \"Failed to load incomplete certificate from {{ test_rundir }}\"\n          \"/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem, \",\n          Network::Address::IpVersion::v4));\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidCertificateChain) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { inline_string: \"invalid\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException, \"Failed to load certificate chain from <inline>\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidIntermediateCA) {\n  const std::string leaf = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_cert.pem\"));\n  const std::string yaml = TestEnvironment::substitute(\n      absl::StrCat(\n          R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { inline_string: \")EOF\",\n          absl::CEscape(leaf),\n          R\"EOF(\\n-----BEGIN CERTIFICATE-----\\nDEFINITELY_INVALID_CERTIFICATE\\n-----END CERTIFICATE-----\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\" }\n  )EOF\"),\n      Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException, \"Failed to load certificate chain from <inline>\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidPrivateKey) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\" }\n                private_key: { inline_string: \"invalid\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException, \"Failed to load private key from <inline>\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidTrustedCA) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem\" }\n            validation_context:\n              trusted_ca: { inline_string: \"invalid\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException, \"Failed to load trusted CA certificates from <inline>\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    metadata: { filter_metadata: { com.bar.foo: { baz: test_value } } }\n    traffic_direction: INBOUND\n    filter_chains:\n    - filter_chain_match:\n      filters:\n      - name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          stat_prefix: metadata_test\n          route_config:\n            virtual_hosts:\n            - name: \"some_virtual_host\"\n              domains: [\"some.domain\"]\n              routes:\n              - match: { prefix: \"/\" }\n                route: { cluster: service_foo }\n    listener_filters:\n    - name: \"envoy.filters.listener.original_dst\"\n      typed_config: {}\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n  Configuration::ListenerFactoryContext* listener_factory_context = nullptr;\n  // Extract listener_factory_context avoid accessing private member.\n  ON_CALL(listener_factory_, createListenerFilterFactoryList(_, _))\n      .WillByDefault(\n          Invoke([&listener_factory_context](\n                     const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&\n                         filters,\n                     Configuration::ListenerFactoryContext& context)\n                     -> std::vector<Network::ListenerFilterFactoryCb> {\n            listener_factory_context = &context;\n            return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters, context);\n          }));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  ASSERT_NE(nullptr, listener_factory_context);\n  EXPECT_EQ(\"test_value\", Config::Metadata::metadataValue(\n                              &listener_factory_context->listenerMetadata(), \"com.bar.foo\", \"baz\")\n                              .string_value());\n  EXPECT_EQ(envoy::config::core::v3::INBOUND, listener_factory_context->direction());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1111 }\n    filter_chains: {}\n    listener_filters:\n    - name: \"envoy.filters.listener.original_dst\"\n      typed_config: {}\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n\n#ifdef SOL_IP\n  // Return error when trying to retrieve the original dst on the invalid handle\n  EXPECT_CALL(os_sys_calls_, getsockopt_(_, _, _, _, _)).WillOnce(Return(-1));\n#endif\n\n  NiceMock<Network::MockListenerFilterCallbacks> callbacks;\n  Network::AcceptedSocketImpl socket(std::make_unique<Network::IoSocketHandleImpl>(),\n                                     Network::Address::InstanceConstSharedPtr{\n                                         new Network::Address::Ipv4Instance(\"127.0.0.1\", 1234)},\n                                     Network::Address::InstanceConstSharedPtr{\n                                         new Network::Address::Ipv4Instance(\"127.0.0.1\", 5678)});\n\n  EXPECT_CALL(callbacks, socket()).WillOnce(Invoke([&]() -> Network::ConnectionSocket& {\n    return socket;\n  }));\n\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&,\n                           Network::ListenerFilterPtr& filter) -> void {\n        EXPECT_EQ(Network::FilterStatus::Continue, filter->onAccept(callbacks));\n      }));\n\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n}\n\nclass OriginalDstTestFilter : public Extensions::ListenerFilters::OriginalDst::OriginalDstFilter {\n  Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket&) override {\n    return Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv4Instance(\"127.0.0.2\", 2345)};\n  }\n};\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) {\n  class OriginalDstTestConfigFactory : public Configuration::NamedListenerFilterConfigFactory {\n  public:\n    // NamedListenerFilterConfigFactory\n    Network::ListenerFilterFactoryCb\n    createListenerFilterFactoryFromProto(const Protobuf::Message&,\n                                         const Network::ListenerFilterMatcherSharedPtr&,\n                                         Configuration::ListenerFactoryContext&) override {\n      return [](Network::ListenerFilterManager& filter_manager) -> void {\n        filter_manager.addAcceptFilter(nullptr, std::make_unique<OriginalDstTestFilter>());\n      };\n    }\n\n    ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n      // Using Struct instead of a custom per-filter empty config proto\n      // This is only allowed in tests.\n      return std::make_unique<Envoy::ProtobufWkt::Struct>();\n    }\n\n    std::string name() const override { return \"test.listener.original_dst\"; }\n  };\n\n  OriginalDstTestConfigFactory factory;\n  Registry::InjectFactory<Configuration::NamedListenerFilterConfigFactory> registration(factory);\n\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1111 }\n    filter_chains: {}\n    listener_filters:\n    - name: \"test.listener.original_dst\"\n      typed_config: {}\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n\n  NiceMock<Network::MockListenerFilterCallbacks> callbacks;\n  Network::AcceptedSocketImpl socket(\n      std::make_unique<Network::IoSocketHandleImpl>(),\n      std::make_unique<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234),\n      std::make_unique<Network::Address::Ipv4Instance>(\"127.0.0.1\", 5678));\n\n  EXPECT_CALL(callbacks, socket()).WillOnce(Invoke([&]() -> Network::ConnectionSocket& {\n    return socket;\n  }));\n\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&,\n                           Network::ListenerFilterPtr& filter) -> void {\n        EXPECT_EQ(Network::FilterStatus::Continue, filter->onAccept(callbacks));\n      }));\n\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n  EXPECT_TRUE(socket.localAddressRestored());\n  EXPECT_EQ(\"127.0.0.2:2345\", socket.localAddress()->asString());\n}\n\nclass OriginalDstTestFilterIPv6\n    : public Extensions::ListenerFilters::OriginalDst::OriginalDstFilter {\n  Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket&) override {\n    return Network::Address::InstanceConstSharedPtr{\n        new Network::Address::Ipv6Instance(\"1::2\", 2345)};\n  }\n};\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) {\n  class OriginalDstTestConfigFactory : public Configuration::NamedListenerFilterConfigFactory {\n  public:\n    // NamedListenerFilterConfigFactory\n    Network::ListenerFilterFactoryCb\n    createListenerFilterFactoryFromProto(const Protobuf::Message&,\n                                         const Network::ListenerFilterMatcherSharedPtr&,\n                                         Configuration::ListenerFactoryContext&) override {\n      return [](Network::ListenerFilterManager& filter_manager) -> void {\n        filter_manager.addAcceptFilter(nullptr, std::make_unique<OriginalDstTestFilterIPv6>());\n      };\n    }\n\n    ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n      // Using Struct instead of a custom per-filter empty config proto\n      // This is only allowed in tests.\n      return std::make_unique<Envoy::ProtobufWkt::Struct>();\n    }\n\n    std::string name() const override { return \"test.listener.original_dstipv6\"; }\n  };\n\n  OriginalDstTestConfigFactory factory;\n  Registry::InjectFactory<Configuration::NamedListenerFilterConfigFactory> registration(factory);\n\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: ::0001, port_value: 1111 }\n    filter_chains: {}\n    listener_filters:\n    - name: \"test.listener.original_dstipv6\"\n      typed_config: {}\n  )EOF\",\n                                                       Network::Address::IpVersion::v6);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n\n  Network::ListenerConfig& listener = manager_->listeners().back().get();\n\n  Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory();\n  Network::MockListenerFilterManager manager;\n\n  NiceMock<Network::MockListenerFilterCallbacks> callbacks;\n  Network::AcceptedSocketImpl socket(\n      std::make_unique<Network::IoSocketHandleImpl>(),\n      std::make_unique<Network::Address::Ipv6Instance>(\"::0001\", 1234),\n      std::make_unique<Network::Address::Ipv6Instance>(\"::0001\", 5678));\n\n  EXPECT_CALL(callbacks, socket()).WillOnce(Invoke([&]() -> Network::ConnectionSocket& {\n    return socket;\n  }));\n\n  EXPECT_CALL(manager, addAcceptFilter_(_, _))\n      .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&,\n                           Network::ListenerFilterPtr& filter) -> void {\n        EXPECT_EQ(Network::FilterStatus::Continue, filter->onAccept(callbacks));\n      }));\n\n  EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager));\n  EXPECT_TRUE(socket.localAddressRestored());\n  EXPECT_EQ(\"[1::2]:2345\", socket.localAddress()->asString());\n}\n\n// Validate that when neither transparent nor freebind is not set in the\n// Listener, we see no socket option set.\nTEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabled) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    name: \"TestListener\"\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1111 }\n    transparent: false\n    freebind: false\n    filter_chains:\n    - filters:\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}))\n      .WillOnce(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type,\n                           const Network::Socket::OptionsSharedPtr& options,\n                           const ListenSocketCreationParams&) -> Network::SocketSharedPtr {\n        EXPECT_EQ(options, nullptr);\n        return listener_factory_.socket_;\n      }));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\n// Validate that when transparent is set in the Listener, we see the socket option\n// propagated to setsockopt(). This is as close to an end-to-end test as we have\n// for this feature, due to the complexity of creating an integration test\n// involving the network stack. We only test the IPv4 case here, as the logic\n// around IPv4/IPv6 handling is tested generically in\n// socket_option_impl_test.cc.\nTEST_F(ListenerManagerImplWithRealFiltersTest, TransparentListenerEnabled) {\n  auto listener = createIPv4Listener(\"TransparentListener\");\n  listener.mutable_transparent()->set_value(true);\n  testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_PREBIND,\n                   ENVOY_SOCKET_IP_TRANSPARENT, /* expected_value */ 1,\n                   /* expected_num_options */ 2);\n}\n\n// Validate that when freebind is set in the Listener, we see the socket option\n// propagated to setsockopt(). This is as close to an end-to-end test as we have\n// for this feature, due to the complexity of creating an integration test\n// involving the network stack. We only test the IPv4 case here, as the logic\n// around IPv4/IPv6 handling is tested generically in\n// socket_option_impl_test.cc.\nTEST_F(ListenerManagerImplWithRealFiltersTest, FreebindListenerEnabled) {\n  auto listener = createIPv4Listener(\"FreebindListener\");\n  listener.mutable_freebind()->set_value(true);\n\n  testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_PREBIND,\n                   ENVOY_SOCKET_IP_FREEBIND, /* expected_value */ 1);\n}\n\n// Validate that when tcp_fast_open_queue_length is set in the Listener, we see the socket option\n// propagated to setsockopt(). This is as close to an end-to-end test as we have\n// for this feature, due to the complexity of creating an integration test\n// involving the network stack. We only test the IPv4 case here, as the logic\n// around IPv4/IPv6 handling is tested generically in\n// socket_option_impl_test.cc.\nTEST_F(ListenerManagerImplWithRealFiltersTest, FastOpenListenerEnabled) {\n  auto listener = createIPv4Listener(\"FastOpenListener\");\n  listener.mutable_tcp_fast_open_queue_length()->set_value(1);\n\n  testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_LISTENING,\n                   ENVOY_SOCKET_TCP_FASTOPEN, /* expected_value */ 1);\n}\n\n// Validate that when reuse_port is set in the Listener, we see the socket option\n// propagated to setsockopt().\nTEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerEnabledForTcp) {\n  auto listener = createIPv4Listener(\"ReusePortListener\");\n  listener.set_reuse_port(true);\n  // when reuse_port is true, port should be 0 for creating the shared socket,\n  // otherwise socket creation will be done on worker thread.\n  listener.mutable_address()->mutable_socket_address()->set_port_value(0);\n  testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_PREBIND,\n                   ENVOY_SOCKET_SO_REUSEPORT, /* expected_value */ 1,\n                   /* expected_num_options */ 1,\n                   /* expected_creation_params */ {true, false});\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerDisabled) {\n  auto listener = createIPv4Listener(\"UdpListener\");\n  listener.mutable_address()->mutable_socket_address()->set_protocol(\n      envoy::config::core::v3::SocketAddress::UDP);\n  // For UDP, verify that we fail if reuse port is false and concurrency is > 1.\n  listener.set_reuse_port(false);\n  server_.options_.concurrency_ = 2;\n\n  EXPECT_THROW_WITH_MESSAGE(\n      manager_->addOrUpdateListener(listener, \"\", true), EnvoyException,\n      \"Listening on UDP when concurrency is > 1 without the SO_REUSEPORT socket option results in \"\n      \"unstable packet proxying. Configure the reuse_port listener option or set concurrency = 1.\");\n  EXPECT_EQ(0, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) {\n  const envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R\"EOF(\n    name: SockoptsListener\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1111 }\n    filter_chains:\n    - filters:\n    socket_options: [\n      # The socket goes through socket() and bind() but never listen(), so if we\n      # ever saw (7, 8, 9) being applied it would cause a EXPECT_CALL failure.\n      { level: 1, name: 2, int_value: 3, state: STATE_PREBIND },\n      { level: 4, name: 5, int_value: 6, state: STATE_BOUND },\n      { level: 7, name: 8, int_value: 9, state: STATE_LISTENING },\n    ]\n  )EOF\");\n\n  expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND,\n                           /* expected_num_options */ 3);\n  expectSetsockopt(\n      /* expected_sockopt_level */ 1,\n      /* expected_sockopt_name */ 2,\n      /* expected_value */ 3);\n  expectSetsockopt(\n      /* expected_sockopt_level */ 4,\n      /* expected_sockopt_name */ 5,\n      /* expected_value */ 6);\n  manager_->addOrUpdateListener(listener, \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\n// Set the resolver to the default IP resolver. The address resolver logic is unit tested in\n// resolver_impl_test.cc.\nTEST_F(ListenerManagerImplWithRealFiltersTest, AddressResolver) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    name: AddressResolverdListener\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1111, resolver_name: envoy.mock.resolver }\n    filter_chains:\n    - filters:\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  NiceMock<Network::MockAddressResolver> mock_resolver;\n  EXPECT_CALL(mock_resolver, resolve(_))\n      .Times(2)\n      .WillRepeatedly(Return(Network::Utility::parseInternetAddress(\"127.0.0.1\", 1111, false)));\n  Registry::InjectFactory<Network::Address::Resolver> register_resolver(mock_resolver);\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, CRLFilename) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n            validation_context:\n              trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n              crl: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, CRLInline) {\n  const std::string crl = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(\n      \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\"));\n  const std::string yaml = TestEnvironment::substitute(absl::StrCat(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n            validation_context:\n              trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n              crl: { inline_string: \")EOF\",\n                                                                    absl::CEscape(crl), R\"EOF(\" }\n  )EOF\"),\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(server_.api_.random_, uuid());\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, InvalidCRLInline) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n            validation_context:\n              trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n              crl: { inline_string: \"-----BEGIN X509 CRL-----\\nTOTALLY_NOT_A_CRL_HERE\\n-----END X509 CRL-----\\n\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException, \"Failed to load CRL from <inline>\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, CRLWithNoCA) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n            validation_context:\n              crl: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.crl\" }\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                          EnvoyException, \"^Failed to load CRL from .* without trusted CA$\");\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, VerifySanWithNoCA) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n            validation_context:\n              match_subject_alt_names:\n                 exact: \"spiffe://lyft.com/testclient\"\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException,\n                            \"SAN-based verification of peer certificates without trusted CA \"\n                            \"is insecure and not allowed\");\n}\n\n// Disabling certificate expiration checks only makes sense with a trusted CA.\nTEST_F(ListenerManagerImplWithRealFiltersTest, VerifyIgnoreExpirationWithNoCA) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n            validation_context:\n              allow_expired_certificate: true\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true),\n                            EnvoyException,\n                            \"Certificate validity period is always ignored without trusted CA\");\n}\n\n// Verify that with a CA, expired certificates are allowed.\nTEST_F(ListenerManagerImplWithRealFiltersTest, VerifyIgnoreExpirationWithCA) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1234 }\n    filter_chains:\n    - transport_socket:\n        name: tls\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext\n          common_tls_context:\n            tls_certificates:\n              - certificate_chain: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem\" }\n                private_key: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem\" }\n\n            validation_context:\n              trusted_ca: { filename: \"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem\" }\n              allow_expired_certificate: true\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_NO_THROW(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true));\n}\n\n// Validate that dispatcher stats prefix is set correctly when enabled.\nTEST_F(ListenerManagerImplWithDispatcherStatsTest, DispatherStatsWithCorrectPrefix) {\n  EXPECT_CALL(*worker_, start(_));\n  EXPECT_CALL(*worker_, initializeStats(_));\n  manager_->startWorkers(guard_dog_);\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, ApiListener) {\n  const std::string yaml = R\"EOF(\nname: test_api_listener\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n    stat_prefix: hcm\n    route_config:\n      name: api_router\n      virtual_hosts:\n        - name: api\n          domains:\n            - \"*\"\n          routes:\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: dynamic_forward_proxy_cluster\n  )EOF\";\n\n  ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", false));\n  EXPECT_EQ(0U, manager_->listeners().size());\n  ASSERT_TRUE(manager_->apiListener().has_value());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, ApiListenerNotAllowedAddedViaApi) {\n  const std::string yaml = R\"EOF(\nname: test_api_listener\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n    stat_prefix: hcm\n    route_config:\n      name: api_router\n      virtual_hosts:\n        - name: api\n          domains:\n            - \"*\"\n          routes:\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: dynamic_forward_proxy_cluster\n  )EOF\";\n\n  ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true));\n  EXPECT_EQ(0U, manager_->listeners().size());\n  ASSERT_FALSE(manager_->apiListener().has_value());\n}\n\nTEST_F(ListenerManagerImplWithRealFiltersTest, ApiListenerOnlyOneApiListener) {\n  const std::string yaml = R\"EOF(\nname: test_api_listener\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n    stat_prefix: hcm\n    route_config:\n      name: api_router\n      virtual_hosts:\n        - name: api\n          domains:\n            - \"*\"\n          routes:\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: dynamic_forward_proxy_cluster\n  )EOF\";\n\n  const std::string yaml2 = R\"EOF(\nname: test_api_listener_2\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\napi_listener:\n  api_listener:\n    \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n    stat_prefix: hcm\n    route_config:\n      name: api_router\n      virtual_hosts:\n        - name: api\n          domains:\n            - \"*\"\n          routes:\n            - match:\n                prefix: \"/\"\n              route:\n                cluster: dynamic_forward_proxy_cluster\n  )EOF\";\n\n  ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", false));\n  EXPECT_EQ(0U, manager_->listeners().size());\n  ASSERT_TRUE(manager_->apiListener().has_value());\n  EXPECT_EQ(\"test_api_listener\", manager_->apiListener()->get().name());\n\n  // Only one ApiListener is added.\n  ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", false));\n  EXPECT_EQ(0U, manager_->listeners().size());\n  // The original ApiListener is there.\n  ASSERT_TRUE(manager_->apiListener().has_value());\n  EXPECT_EQ(\"test_api_listener\", manager_->apiListener()->get().name());\n}\n\nTEST_F(ListenerManagerImplTest, StopInplaceWarmingListener) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  filter_chain_match:\n    destination_port: 1234\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerOverridden(true);\n  EXPECT_CALL(listener_foo_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  EXPECT_EQ(1UL, manager_->listeners().size());\n\n  // Stop foo which should remove warming listener.\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_factory_.socket_, close());\n  EXPECT_CALL(*listener_foo, onDestroy());\n  manager_->stopListeners(ListenerManager::StopListenersType::InboundOnly);\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_stopped\").value());\n}\n\nTEST_F(ListenerManagerImplTest, RemoveInplaceUpdatingListener) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  filter_chain_match:\n    destination_port: 1234\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerOverridden(true);\n  EXPECT_CALL(listener_foo_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0);\n\n  // Remove foo which should remove both warming and active.\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_factory_.socket_, close());\n  EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(manager_->removeListener(\"foo\"));\n  checkStats(__LINE__, 1, 1, 1, 0, 0, 1, 0);\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo->drain_manager_->drain_sequence_completion_();\n  checkStats(__LINE__, 1, 1, 1, 0, 0, 1, 0);\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callRemovalCompletion();\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 1, 1, 0, 0, 0, 0);\n}\n\nTEST_F(ListenerManagerImplTest, UpdateInplaceWarmingListener) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  filter_chain_match:\n    destination_port: 1234\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerOverridden(true);\n  EXPECT_CALL(listener_foo_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0);\n\n  // Listener warmed up.\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_CALL(*listener_foo, onDestroy());\n  listener_foo_update1->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n}\n\nTEST_F(ListenerManagerImplTest, DrainageDuringInplaceUpdate) {\n  InSequence s;\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener into warming.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(true, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  EXPECT_CALL(listener_foo->target_, initialize());\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  listener_foo->target_.ready();\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n\n  // Update foo into warming.\n  const std::string listener_foo_update1_yaml = R\"EOF(\nname: foo\ntraffic_direction: INBOUND\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters:\n  filter_chain_match:\n    destination_port: 1234\n  )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerOverridden(true);\n  EXPECT_CALL(listener_foo_update1->target_, initialize());\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n  checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0);\n\n  // The warmed up starts the drain timer.\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_CALL(server_.options_, drainTime()).WillOnce(Return(std::chrono::seconds(600)));\n  Event::MockTimer* filter_chain_drain_timer = new Event::MockTimer(&server_.dispatcher_);\n  EXPECT_CALL(*filter_chain_drain_timer, enableTimer(std::chrono::milliseconds(600000), _));\n  listener_foo_update1->target_.ready();\n  checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1);\n\n  // Timer expires, worker close connections if any.\n  EXPECT_CALL(*worker_, removeFilterChains(_, _, _));\n  filter_chain_drain_timer->invokeCallback();\n\n  // Once worker clean up is done, it's safe for the main thread to remove the original listener.\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callDrainFilterChainsComplete();\n  checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0);\n\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n}\n\nTEST(ListenerMessageUtilTest, ListenerMessageSameAreEquivalent) {\n  envoy::config::listener::v3::Listener listener1;\n  envoy::config::listener::v3::Listener listener2;\n  EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2));\n}\n\nTEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentNameNotEquivalent) {\n  envoy::config::listener::v3::Listener listener1;\n  listener1.set_name(\"listener1\");\n  envoy::config::listener::v3::Listener listener2;\n  listener2.set_name(\"listener2\");\n  EXPECT_FALSE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2));\n}\n\nTEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentFilterChainsAreEquivalent) {\n  envoy::config::listener::v3::Listener listener1;\n  listener1.set_name(\"common\");\n  auto add_filter_chain_1 = listener1.add_filter_chains();\n  add_filter_chain_1->set_name(\"127.0.0.1\");\n\n  envoy::config::listener::v3::Listener listener2;\n  listener2.set_name(\"common\");\n  auto add_filter_chain_2 = listener2.add_filter_chains();\n  add_filter_chain_2->set_name(\"127.0.0.2\");\n\n  EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2));\n}\n\nTEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWorkerNotStarted) {\n  // Worker is not started yet.\n  auto listener_proto = createDefaultListener();\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n  manager_->addOrUpdateListener(listener_proto, \"\", true);\n  EXPECT_EQ(1u, manager_->listeners().size());\n\n  // Mutate the listener message as filter chain change only.\n  auto new_listener_proto = listener_proto;\n  new_listener_proto.mutable_filter_chains(0)\n      ->mutable_filter_chain_match()\n      ->mutable_destination_port()\n      ->set_value(9999);\n\n  EXPECT_CALL(*listener_foo, onDestroy());\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true);\n  manager_->addOrUpdateListener(new_listener_proto, \"\", true);\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n}\n\nTEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAnyListenerIsNotTcp) {\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  auto listener_proto = createDefaultListener();\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n\n  expectAddListener(listener_proto, listener_foo);\n\n  auto new_listener_proto = listener_proto;\n  new_listener_proto.mutable_address()->mutable_socket_address()->set_protocol(\n      envoy::config::core::v3::SocketAddress_Protocol::SocketAddress_Protocol_UDP);\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true);\n  expectUpdateToThenDrain(new_listener_proto, listener_foo);\n\n  expectRemove(new_listener_proto, listener_foo_update1);\n\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n}\n\nTEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest,\n       TraditionalUpdateIfImplicitTlsInspectorChanges) {\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  auto listener_proto = createDefaultListener();\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  expectAddListener(listener_proto, listener_foo);\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true);\n\n  auto new_listener_proto = listener_proto;\n  *new_listener_proto.mutable_filter_chains(0)\n       ->mutable_filter_chain_match()\n       ->mutable_application_protocols()\n       ->Add() = \"alpn\";\n  expectUpdateToThenDrain(new_listener_proto, listener_foo);\n\n  expectRemove(new_listener_proto, listener_foo_update1);\n\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n}\n\nTEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest,\n       TraditionalUpdateIfImplicitProxyProtocolChanges) {\n\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  auto listener_proto = createDefaultListener();\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  expectAddListener(listener_proto, listener_foo);\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true);\n\n  auto new_listener_proto = listener_proto;\n  new_listener_proto.mutable_filter_chains(0)->mutable_use_proxy_proto()->set_value(true);\n\n  expectUpdateToThenDrain(new_listener_proto, listener_foo);\n  expectRemove(new_listener_proto, listener_foo_update1);\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n}\n\nTEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateOnZeroFilterChain) {\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  auto listener_proto = createDefaultListener();\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  expectAddListener(listener_proto, listener_foo);\n\n  auto new_listener_proto = listener_proto;\n  new_listener_proto.clear_filter_chains();\n  EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0);\n  EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor());\n  EXPECT_CALL(listener_factory_, createDrainManager_(_));\n  EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(new_listener_proto, \"\", true),\n                            EnvoyException,\n                            \"error adding listener '127.0.0.1:1234': no filter chains specified\");\n\n  expectRemove(listener_proto, listener_foo);\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n}\n\nTEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest,\n       TraditionalUpdateIfListenerConfigHasUpdateOtherThanFilterChain) {\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  auto listener_proto = createDefaultListener();\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  expectAddListener(listener_proto, listener_foo);\n\n  ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true);\n\n  auto new_listener_proto = listener_proto;\n  new_listener_proto.set_traffic_direction(::envoy::config::core::v3::TrafficDirection::INBOUND);\n  expectUpdateToThenDrain(new_listener_proto, listener_foo);\n\n  expectRemove(new_listener_proto, listener_foo_update1);\n\n  EXPECT_EQ(0UL, manager_->listeners().size());\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n}\n\n// This test execute an in place update first, then a traditional listener update.\n// The second update is enforced by runtime.\nTEST_F(ListenerManagerImplTest, RuntimeDisabledInPlaceUpdateFallbacksToTraditionalUpdate) {\n  InSequence s;\n  EXPECT_CALL(*worker_, start(_));\n  manager_->startWorkers(guard_dog_);\n\n  // Add foo listener.\n  const std::string listener_foo_yaml = R\"EOF(\nname: foo\naddress:\n  socket_address:\n    address: 127.0.0.1\n    port_value: 1234\nfilter_chains:\n- filters: []\n  )EOF\";\n\n  ListenerHandle* listener_foo = expectListenerCreate(false, true);\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));\n\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n\n  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), \"\", true));\n  EXPECT_EQ(0, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  worker_->callAddCompletion(true);\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0);\n\n  // Add foo listener again. Will execute in place filter chain update path.\n  const std::string listener_foo_update1_yaml = R\"EOF(\n  name: foo\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 1234\n  filter_chains:\n  - filters: []\n    filter_chain_match:\n      destination_port: 1234\n    )EOF\";\n\n  ListenerHandle* listener_foo_update1 = expectListenerOverridden(false, listener_foo);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  auto* timer = new Event::MockTimer(dynamic_cast<Event::MockDispatcher*>(&server_.dispatcher()));\n  EXPECT_CALL(*timer, enableTimer(_, _));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), \"\", true));\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  EXPECT_EQ(1UL, manager_->listeners().size());\n  worker_->callAddCompletion(true);\n\n  EXPECT_CALL(*worker_, removeFilterChains(_, _, _));\n  timer->invokeCallback();\n  EXPECT_CALL(*listener_foo, onDestroy());\n  worker_->callDrainFilterChainsComplete();\n\n  // Update foo again. This time we disable in place filter chain update in runtime.\n  // The traditional full listener update path is used.\n  auto in_place_update_disabled_guard = disableInplaceUpdateForThisTest();\n  const std::string listener_foo_update2_yaml = R\"EOF(\n  name: foo\n  address:\n    socket_address:\n      address: 127.0.0.1\n      port_value: 1234\n  filter_chains:\n  - filters:\n    filter_chain_match:\n      destination_port: 2345\n    )EOF\";\n\n  ListenerHandle* listener_foo_update2 = expectListenerCreate(false, true);\n  EXPECT_CALL(*worker_, addListener(_, _, _));\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(\n      manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update2_yaml), \"\", true));\n  EXPECT_EQ(1, server_.stats_store_.counter(\"listener_manager.listener_in_place_updated\").value());\n\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo_update1->drain_manager_->drain_sequence_completion_();\n\n  EXPECT_CALL(*listener_foo_update1, onDestroy());\n  worker_->callRemovalCompletion();\n\n  EXPECT_CALL(*worker_, stopListener(_, _));\n  EXPECT_CALL(*listener_factory_.socket_, close());\n  EXPECT_CALL(*listener_foo_update2->drain_manager_, startDrainSequence(_));\n  EXPECT_TRUE(manager_->removeListener(\"foo\"));\n\n  EXPECT_CALL(*worker_, removeListener(_, _));\n  listener_foo_update2->drain_manager_->drain_sequence_completion_();\n\n  EXPECT_CALL(*listener_foo_update2, onDestroy());\n  worker_->callRemovalCompletion();\n  EXPECT_EQ(0UL, manager_->listeners().size());\n}\n\n// This test verifies that on default initialization the UDP Packet Writer\n// is initialized in passthrough mode. (i.e. by using UdpDefaultWriter).\nTEST_F(ListenerManagerImplTest, UdpDefaultWriterConfig) {\n  const envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R\"EOF(\naddress:\n  socket_address:\n    address: 127.0.0.1\n    protocol: UDP\n    port_value: 1234\nfilter_chains:\n  filters: []\n    )EOF\");\n  manager_->addOrUpdateListener(listener, \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n  Network::SocketSharedPtr listen_socket =\n      manager_->listeners().front().get().listenSocketFactory().getListenSocket();\n  Network::UdpPacketWriterPtr udp_packet_writer =\n      manager_->listeners().front().get().udpPacketWriterFactory()->get().createUdpPacketWriter(\n          listen_socket->ioHandle(), manager_->listeners()[0].get().listenerScope());\n  EXPECT_FALSE(udp_packet_writer->isBatchMode());\n}\n\nTEST_F(ListenerManagerImplTest, TcpBacklogCustomConfig) {\n  const std::string yaml = TestEnvironment::substitute(R\"EOF(\n    name: TcpBacklogConfigListener\n    address:\n      socket_address: { address: 127.0.0.1, port_value: 1111 }\n    tcp_backlog_size: 100\n    filter_chains:\n    - filters:\n  )EOF\",\n                                                       Network::Address::IpVersion::v4);\n\n  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, _));\n  manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), \"\", true);\n  EXPECT_EQ(1U, manager_->listeners().size());\n  EXPECT_EQ(100U, manager_->listeners().back().get().tcpBacklogSize());\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/listener_manager_impl_test.h",
    "content": "#include <memory>\n\n#include \"envoy/admin/v3/config_dump.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/listener/v3/listener_components.pb.h\"\n\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n\n#include \"server/configuration_impl.h\"\n#include \"server/listener_manager_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/drain_manager.h\"\n#include \"test/mocks/server/guard_dog.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/listener_component_factory.h\"\n#include \"test/mocks/server/worker.h\"\n#include \"test/mocks/server/worker_factory.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::ReturnRef;\n\nnamespace Envoy {\nnamespace Server {\n\nclass ListenerHandle {\npublic:\n  ListenerHandle(bool need_local_drain_manager = true) {\n    if (need_local_drain_manager) {\n      drain_manager_ = new MockDrainManager();\n      EXPECT_CALL(*drain_manager_, startParentShutdownSequence()).Times(0);\n    }\n  }\n  ~ListenerHandle() { onDestroy(); }\n\n  MOCK_METHOD(void, onDestroy, ());\n\n  Init::ExpectableTargetImpl target_;\n  MockDrainManager* drain_manager_{};\n  Configuration::FactoryContext* context_{};\n};\n\nclass ListenerManagerImplTest : public testing::Test {\nprotected:\n  ListenerManagerImplTest() : api_(Api::createApiForTest(server_.api_.random_)) {}\n\n  void SetUp() override {\n    ON_CALL(server_, api()).WillByDefault(ReturnRef(*api_));\n    EXPECT_CALL(worker_factory_, createWorker_()).WillOnce(Return(worker_));\n    ON_CALL(server_.validation_context_, staticValidationVisitor())\n        .WillByDefault(ReturnRef(validation_visitor));\n    ON_CALL(server_.validation_context_, dynamicValidationVisitor())\n        .WillByDefault(ReturnRef(validation_visitor));\n    manager_ = std::make_unique<ListenerManagerImpl>(server_, listener_factory_, worker_factory_,\n                                                     enable_dispatcher_stats_);\n\n    // Use real filter loading by default.\n    ON_CALL(listener_factory_, createNetworkFilterFactoryList(_, _))\n        .WillByDefault(Invoke(\n            [](const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n               Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context)\n                -> std::vector<Network::FilterFactoryCb> {\n              return ProdListenerComponentFactory::createNetworkFilterFactoryList_(\n                  filters, filter_chain_factory_context);\n            }));\n    ON_CALL(listener_factory_, createListenerFilterFactoryList(_, _))\n        .WillByDefault(\n            Invoke([](const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&\n                          filters,\n                      Configuration::ListenerFactoryContext& context)\n                       -> std::vector<Network::ListenerFilterFactoryCb> {\n              return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters,\n                                                                                    context);\n            }));\n    ON_CALL(listener_factory_, createUdpListenerFilterFactoryList(_, _))\n        .WillByDefault(\n            Invoke([](const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&\n                          filters,\n                      Configuration::ListenerFactoryContext& context)\n                       -> std::vector<Network::UdpListenerFilterFactoryCb> {\n              return ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(filters,\n                                                                                       context);\n            }));\n    ON_CALL(listener_factory_, nextListenerTag()).WillByDefault(Invoke([this]() {\n      return listener_tag_++;\n    }));\n\n    local_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234);\n    remote_address_ = std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\", 1234);\n    EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno}));\n    EXPECT_CALL(os_sys_calls_, getsockname)\n        .WillRepeatedly(Invoke([this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) {\n          return os_sys_calls_actual_.getsockname(sockfd, addr, addrlen);\n        }));\n    socket_ = std::make_unique<NiceMock<Network::MockConnectionSocket>>();\n  }\n\n  /**\n   * This routing sets up an expectation that does various things:\n   * 1) Allows us to track listener destruction via filter factory destruction.\n   * 2) Allows us to register for init manager handling much like RDS, etc. would do.\n   * 3) Stores the factory context for later use.\n   * 4) Creates a mock local drain manager for the listener.\n   */\n  ListenerHandle* expectListenerCreate(bool need_init, bool added_via_api,\n                                       envoy::config::listener::v3::Listener::DrainType drain_type =\n                                           envoy::config::listener::v3::Listener::DEFAULT) {\n    if (added_via_api) {\n      EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0);\n      EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor());\n    } else {\n      EXPECT_CALL(server_.validation_context_, staticValidationVisitor());\n      EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor()).Times(0);\n    }\n    auto raw_listener = new ListenerHandle();\n    EXPECT_CALL(listener_factory_, createDrainManager_(drain_type))\n        .WillOnce(Return(raw_listener->drain_manager_));\n    EXPECT_CALL(listener_factory_, createNetworkFilterFactoryList(_, _))\n        .WillOnce(Invoke(\n            [raw_listener, need_init](\n                const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>&,\n                Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context)\n                -> std::vector<Network::FilterFactoryCb> {\n              std::shared_ptr<ListenerHandle> notifier(raw_listener);\n              raw_listener->context_ = &filter_chain_factory_context;\n              if (need_init) {\n                filter_chain_factory_context.initManager().add(notifier->target_);\n              }\n              return {[notifier](Network::FilterManager&) -> void {}};\n            }));\n\n    return raw_listener;\n  }\n\n  ListenerHandle* expectListenerOverridden(bool need_init, ListenerHandle* origin = nullptr) {\n    auto raw_listener = new ListenerHandle(false);\n    // Simulate ListenerImpl: drain manager is copied from origin.\n    if (origin != nullptr) {\n      raw_listener->drain_manager_ = origin->drain_manager_;\n    }\n    // Overridden listener is always added by api.\n    EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0);\n    EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor());\n\n    EXPECT_CALL(listener_factory_, createNetworkFilterFactoryList(_, _))\n        .WillOnce(Invoke(\n            [raw_listener, need_init](\n                const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>&,\n                Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context)\n                -> std::vector<Network::FilterFactoryCb> {\n              std::shared_ptr<ListenerHandle> notifier(raw_listener);\n              raw_listener->context_ = &filter_chain_factory_context;\n              if (need_init) {\n                filter_chain_factory_context.initManager().add(notifier->target_);\n              }\n              return {[notifier](Network::FilterManager&) -> void {}};\n            }));\n\n    return raw_listener;\n  }\n\n  const Network::FilterChain*\n  findFilterChain(uint16_t destination_port, const std::string& destination_address,\n                  const std::string& server_name, const std::string& transport_protocol,\n                  const std::vector<std::string>& application_protocols,\n                  const std::string& source_address, uint16_t source_port) {\n    if (absl::StartsWith(destination_address, \"/\")) {\n      local_address_ = std::make_shared<Network::Address::PipeInstance>(destination_address);\n    } else {\n      local_address_ =\n          Network::Utility::parseInternetAddress(destination_address, destination_port);\n    }\n    ON_CALL(*socket_, localAddress()).WillByDefault(ReturnRef(local_address_));\n\n    ON_CALL(*socket_, requestedServerName()).WillByDefault(Return(absl::string_view(server_name)));\n    ON_CALL(*socket_, detectedTransportProtocol())\n        .WillByDefault(Return(absl::string_view(transport_protocol)));\n    ON_CALL(*socket_, requestedApplicationProtocols())\n        .WillByDefault(ReturnRef(application_protocols));\n\n    if (absl::StartsWith(source_address, \"/\")) {\n      remote_address_ = std::make_shared<Network::Address::PipeInstance>(source_address);\n    } else {\n      remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port);\n    }\n    ON_CALL(*socket_, remoteAddress()).WillByDefault(ReturnRef(remote_address_));\n\n    return manager_->listeners().back().get().filterChainManager().findFilterChain(*socket_);\n  }\n\n  /**\n   * Validate that createListenSocket is called once with the expected options.\n   */\n  void\n  expectCreateListenSocket(const envoy::config::core::v3::SocketOption::SocketState& expected_state,\n                           Network::Socket::Options::size_type expected_num_options,\n                           ListenSocketCreationParams expected_creation_params = {true, true}) {\n    EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, expected_creation_params))\n        .WillOnce(Invoke([this, expected_num_options, &expected_state](\n                             const Network::Address::InstanceConstSharedPtr&, Network::Socket::Type,\n                             const Network::Socket::OptionsSharedPtr& options,\n                             const ListenSocketCreationParams&) -> Network::SocketSharedPtr {\n          EXPECT_NE(options.get(), nullptr);\n          EXPECT_EQ(options->size(), expected_num_options);\n          EXPECT_TRUE(\n              Network::Socket::applyOptions(options, *listener_factory_.socket_, expected_state));\n          return listener_factory_.socket_;\n        }));\n  }\n\n  /**\n   * Validate that setSocketOption() is called the expected number of times with the expected\n   * options.\n   */\n  void expectSetsockopt(int expected_sockopt_level, int expected_sockopt_name, int expected_value,\n                        uint32_t expected_num_calls = 1) {\n    EXPECT_CALL(*listener_factory_.socket_,\n                setSocketOption(expected_sockopt_level, expected_sockopt_name, _, sizeof(int)))\n        .Times(expected_num_calls)\n        .WillRepeatedly(Invoke(\n            [expected_value](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult {\n              EXPECT_EQ(expected_value, *static_cast<const int*>(optval));\n              return {0, 0};\n            }));\n  }\n\n  void checkStats(int line_num, uint64_t added, uint64_t modified, uint64_t removed,\n                  uint64_t warming, uint64_t active, uint64_t draining,\n                  uint64_t draining_filter_chains) {\n    SCOPED_TRACE(line_num);\n\n    EXPECT_EQ(added, server_.stats_store_.counter(\"listener_manager.listener_added\").value());\n    EXPECT_EQ(modified, server_.stats_store_.counter(\"listener_manager.listener_modified\").value());\n    EXPECT_EQ(removed, server_.stats_store_.counter(\"listener_manager.listener_removed\").value());\n    EXPECT_EQ(warming, server_.stats_store_\n                           .gauge(\"listener_manager.total_listeners_warming\",\n                                  Stats::Gauge::ImportMode::NeverImport)\n                           .value());\n    EXPECT_EQ(active, server_.stats_store_\n                          .gauge(\"listener_manager.total_listeners_active\",\n                                 Stats::Gauge::ImportMode::NeverImport)\n                          .value());\n    EXPECT_EQ(draining, server_.stats_store_\n                            .gauge(\"listener_manager.total_listeners_draining\",\n                                   Stats::Gauge::ImportMode::NeverImport)\n                            .value());\n    EXPECT_EQ(draining_filter_chains, server_.stats_store_\n                                          .gauge(\"listener_manager.total_filter_chains_draining\",\n                                                 Stats::Gauge::ImportMode::NeverImport)\n                                          .value());\n  }\n\n  void checkConfigDump(const std::string& expected_dump_yaml) {\n    auto message_ptr = server_.admin_.config_tracker_.config_tracker_callbacks_[\"listeners\"]();\n    const auto& listeners_config_dump =\n        dynamic_cast<const envoy::admin::v3::ListenersConfigDump&>(*message_ptr);\n\n    envoy::admin::v3::ListenersConfigDump expected_listeners_config_dump;\n    TestUtility::loadFromYaml(expected_dump_yaml, expected_listeners_config_dump);\n    EXPECT_EQ(expected_listeners_config_dump.DebugString(), listeners_config_dump.DebugString());\n  }\n\n  ABSL_MUST_USE_RESULT\n  auto disableInplaceUpdateForThisTest() {\n    auto scoped_runtime = std::make_unique<TestScopedRuntime>();\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.listener_in_place_filterchain_update\", \"false\"}});\n    return scoped_runtime;\n  }\n\n  NiceMock<Api::MockOsSysCalls> os_sys_calls_;\n  TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls_{&os_sys_calls_};\n  Api::OsSysCallsImpl os_sys_calls_actual_;\n  NiceMock<MockInstance> server_;\n  NiceMock<MockListenerComponentFactory> listener_factory_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor;\n  MockWorker* worker_ = new MockWorker();\n  NiceMock<MockWorkerFactory> worker_factory_;\n  std::unique_ptr<ListenerManagerImpl> manager_;\n  NiceMock<MockGuardDog> guard_dog_;\n  Event::SimulatedTimeSystem time_system_;\n  Api::ApiPtr api_;\n  Network::Address::InstanceConstSharedPtr local_address_;\n  Network::Address::InstanceConstSharedPtr remote_address_;\n  std::unique_ptr<Network::MockConnectionSocket> socket_;\n  uint64_t listener_tag_{1};\n  bool enable_dispatcher_stats_{false};\n};\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/options_impl_test.cc",
    "content": "#include <algorithm>\n#include <chrono>\n#include <fstream>\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"envoy/admin/v3/server_info.pb.h\"\n#include \"envoy/common/exception.h\"\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/filter/http/buffer/v2/buffer.pb.h\"\n#include \"envoy/config/typed_config.h\"\n#include \"envoy/extensions/filters/http/buffer/v3/buffer.pb.h\"\n#include \"envoy/server/filter_config.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"server/options_impl.h\"\n\n#include \"extensions/filters/http/buffer/buffer_filter.h\"\n#include \"extensions/filters/http/well_known_names.h\"\n\n#if defined(__linux__)\n#include <sched.h>\n#include \"server/options_impl_platform_linux.h\"\n#endif\n#include \"test/mocks/api/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/threadsafe_singleton_injector.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\nnamespace {\n\nclass OptionsImplTest : public testing::Test {\n\npublic:\n  // Do the ugly work of turning a std::string into a vector and create an OptionsImpl. Args are\n  // separated by a single space: no fancy quoting or escaping.\n  std::unique_ptr<OptionsImpl> createOptionsImpl(const std::string& args) {\n    std::vector<std::string> words = TestUtility::split(args, ' ');\n    return std::make_unique<OptionsImpl>(\n        std::move(words), [](bool) { return \"1\"; }, spdlog::level::warn);\n  }\n\n  std::unique_ptr<OptionsImpl> createOptionsImpl(std::vector<std::string> args) {\n    return std::make_unique<OptionsImpl>(\n        std::move(args), [](bool) { return \"1\"; }, spdlog::level::warn);\n  }\n};\n\nTEST_F(OptionsImplTest, HotRestartVersion) {\n  EXPECT_THROW_WITH_REGEX(createOptionsImpl(\"envoy --hot-restart-version\"), NoServingException,\n                          \"NoServingException\");\n}\n\nTEST_F(OptionsImplTest, InvalidMode) {\n  EXPECT_THROW_WITH_REGEX(createOptionsImpl(\"envoy --mode bogus\"), MalformedArgvException, \"bogus\");\n}\n\nTEST_F(OptionsImplTest, InvalidCommandLine) {\n  EXPECT_THROW_WITH_REGEX(createOptionsImpl(\"envoy --blah\"), MalformedArgvException,\n                          \"Couldn't find match for argument\");\n}\n\nTEST_F(OptionsImplTest, InvalidSocketMode) {\n  EXPECT_THROW_WITH_REGEX(\n      createOptionsImpl(\"envoy --socket-path /foo/envoy_domain_socket --socket-mode foo\"),\n      MalformedArgvException, \"error: invalid socket-mode 'foo'\");\n}\n\nTEST_F(OptionsImplTest, V1Disallowed) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\n      \"envoy --mode validate --concurrency 2 -c hello --admin-address-path path --restart-epoch 1 \"\n      \"--local-address-ip-version v6 -l info --service-cluster cluster --service-node node \"\n      \"--service-zone zone --file-flush-interval-msec 9000 --drain-time-s 60 --log-format [%v] \"\n      \"--parent-shutdown-time-s 90 --log-path /foo/bar --disable-hot-restart\");\n  EXPECT_EQ(Server::Mode::Validate, options->mode());\n}\n\nTEST_F(OptionsImplTest, All) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\n      \"envoy --mode validate --concurrency 2 -c hello --admin-address-path path --restart-epoch 0 \"\n      \"--local-address-ip-version v6 -l info --component-log-level upstream:debug,connection:trace \"\n      \"--service-cluster cluster --service-node node --service-zone zone \"\n      \"--file-flush-interval-msec 9000 \"\n      \"--drain-time-s 60 --log-format [%v] --enable-fine-grain-logging --parent-shutdown-time-s 90 \"\n      \"--log-path \"\n      \"/foo/bar \"\n      \"--disable-hot-restart --cpuset-threads --allow-unknown-static-fields \"\n      \"--reject-unknown-dynamic-fields --use-fake-symbol-table 0 --base-id 5 \"\n      \"--use-dynamic-base-id --base-id-path /foo/baz \"\n      \"--socket-path /foo/envoy_domain_socket --socket-mode 644\");\n  EXPECT_EQ(Server::Mode::Validate, options->mode());\n  EXPECT_EQ(2U, options->concurrency());\n  EXPECT_EQ(\"hello\", options->configPath());\n  EXPECT_EQ(\"path\", options->adminAddressPath());\n  EXPECT_EQ(Network::Address::IpVersion::v6, options->localAddressIpVersion());\n  EXPECT_EQ(0U, options->restartEpoch());\n  EXPECT_EQ(spdlog::level::info, options->logLevel());\n  EXPECT_EQ(2, options->componentLogLevels().size());\n  EXPECT_EQ(\"[%v]\", options->logFormat());\n  EXPECT_EQ(\"/foo/bar\", options->logPath());\n  EXPECT_EQ(true, options->enableFineGrainLogging());\n  EXPECT_EQ(\"cluster\", options->serviceClusterName());\n  EXPECT_EQ(\"node\", options->serviceNodeName());\n  EXPECT_EQ(\"zone\", options->serviceZone());\n  EXPECT_EQ(std::chrono::milliseconds(9000), options->fileFlushIntervalMsec());\n  EXPECT_EQ(std::chrono::seconds(60), options->drainTime());\n  EXPECT_EQ(std::chrono::seconds(90), options->parentShutdownTime());\n  EXPECT_TRUE(options->hotRestartDisabled());\n  EXPECT_TRUE(options->cpusetThreadsEnabled());\n  EXPECT_TRUE(options->allowUnknownStaticFields());\n  EXPECT_TRUE(options->rejectUnknownDynamicFields());\n  EXPECT_FALSE(options->fakeSymbolTableEnabled());\n  EXPECT_EQ(5U, options->baseId());\n  EXPECT_TRUE(options->useDynamicBaseId());\n  EXPECT_EQ(\"/foo/baz\", options->baseIdPath());\n  EXPECT_EQ(\"/foo/envoy_domain_socket\", options->socketPath());\n  EXPECT_EQ(0644, options->socketMode());\n\n  options = createOptionsImpl(\"envoy --mode init_only\");\n  EXPECT_EQ(Server::Mode::InitOnly, options->mode());\n}\n\n// TODO(#13399): remove this test once we remove the option.\nTEST_F(OptionsImplTest, FakeSymtabWarning) {\n  EXPECT_LOG_CONTAINS(\"warning\", \"Fake symbol tables have been removed\",\n                      createOptionsImpl(\"envoy --use-fake-symbol-table 1\"));\n  EXPECT_NO_LOGS(createOptionsImpl(\"envoy --use-fake-symbol-table 0\"));\n}\n\n// Either variants of allow-unknown-[static-]-fields works.\nTEST_F(OptionsImplTest, AllowUnknownFields) {\n  {\n    std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy\");\n    EXPECT_FALSE(options->allowUnknownStaticFields());\n  }\n  {\n    std::unique_ptr<OptionsImpl> options;\n    EXPECT_LOG_CONTAINS(\n        \"warning\",\n        \"--allow-unknown-fields is deprecated, use --allow-unknown-static-fields instead.\",\n        options = createOptionsImpl(\"envoy --allow-unknown-fields\"));\n    EXPECT_TRUE(options->allowUnknownStaticFields());\n  }\n  {\n    std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --allow-unknown-static-fields\");\n    EXPECT_TRUE(options->allowUnknownStaticFields());\n  }\n}\n\nTEST_F(OptionsImplTest, SetAll) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy -c hello\");\n  bool hot_restart_disabled = options->hotRestartDisabled();\n  bool signal_handling_enabled = options->signalHandlingEnabled();\n  bool cpuset_threads_enabled = options->cpusetThreadsEnabled();\n  bool fake_symbol_table_enabled = options->fakeSymbolTableEnabled();\n\n  options->setBaseId(109876);\n  options->setConcurrency(42);\n  options->setConfigPath(\"foo\");\n  envoy::config::bootstrap::v3::Bootstrap bootstrap_foo{};\n  bootstrap_foo.mutable_node()->set_id(\"foo\");\n  options->setConfigProto(bootstrap_foo);\n  options->setConfigYaml(\"bogus:\");\n  options->setAdminAddressPath(\"path\");\n  options->setLocalAddressIpVersion(Network::Address::IpVersion::v6);\n  options->setDrainTime(std::chrono::seconds(42));\n  options->setDrainStrategy(Server::DrainStrategy::Immediate);\n  options->setParentShutdownTime(std::chrono::seconds(43));\n  options->setLogLevel(spdlog::level::trace);\n  options->setLogFormat(\"%L %n %v\");\n  options->setLogPath(\"/foo/bar\");\n  options->setRestartEpoch(44);\n  options->setFileFlushIntervalMsec(std::chrono::milliseconds(45));\n  options->setMode(Server::Mode::Validate);\n  options->setServiceClusterName(\"cluster_foo\");\n  options->setServiceNodeName(\"node_foo\");\n  options->setServiceZone(\"zone_foo\");\n  options->setHotRestartDisabled(!options->hotRestartDisabled());\n  options->setSignalHandling(!options->signalHandlingEnabled());\n  options->setCpusetThreads(!options->cpusetThreadsEnabled());\n  options->setAllowUnkownFields(true);\n  options->setRejectUnknownFieldsDynamic(true);\n  options->setFakeSymbolTableEnabled(!options->fakeSymbolTableEnabled());\n  options->setSocketPath(\"/foo/envoy_domain_socket\");\n  options->setSocketMode(0644);\n\n  EXPECT_EQ(109876, options->baseId());\n  EXPECT_EQ(42U, options->concurrency());\n  EXPECT_EQ(\"foo\", options->configPath());\n  envoy::config::bootstrap::v3::Bootstrap bootstrap_bar{};\n  bootstrap_bar.mutable_node()->set_id(\"foo\");\n  EXPECT_TRUE(TestUtility::protoEqual(bootstrap_bar, options->configProto()));\n  EXPECT_EQ(\"bogus:\", options->configYaml());\n  EXPECT_EQ(\"path\", options->adminAddressPath());\n  EXPECT_EQ(Network::Address::IpVersion::v6, options->localAddressIpVersion());\n  EXPECT_EQ(std::chrono::seconds(42), options->drainTime());\n  EXPECT_EQ(Server::DrainStrategy::Immediate, options->drainStrategy());\n  EXPECT_EQ(spdlog::level::trace, options->logLevel());\n  EXPECT_EQ(\"%L %n %v\", options->logFormat());\n  EXPECT_EQ(\"/foo/bar\", options->logPath());\n  EXPECT_EQ(std::chrono::seconds(43), options->parentShutdownTime());\n  EXPECT_EQ(44, options->restartEpoch());\n  EXPECT_EQ(std::chrono::milliseconds(45), options->fileFlushIntervalMsec());\n  EXPECT_EQ(Server::Mode::Validate, options->mode());\n  EXPECT_EQ(\"cluster_foo\", options->serviceClusterName());\n  EXPECT_EQ(\"node_foo\", options->serviceNodeName());\n  EXPECT_EQ(\"zone_foo\", options->serviceZone());\n  EXPECT_EQ(!hot_restart_disabled, options->hotRestartDisabled());\n  EXPECT_EQ(!signal_handling_enabled, options->signalHandlingEnabled());\n  EXPECT_EQ(!cpuset_threads_enabled, options->cpusetThreadsEnabled());\n  EXPECT_TRUE(options->allowUnknownStaticFields());\n  EXPECT_TRUE(options->rejectUnknownDynamicFields());\n  EXPECT_EQ(!fake_symbol_table_enabled, options->fakeSymbolTableEnabled());\n  EXPECT_EQ(\"/foo/envoy_domain_socket\", options->socketPath());\n  EXPECT_EQ(0644, options->socketMode());\n\n  // Validate that CommandLineOptions is constructed correctly.\n  Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions();\n\n  EXPECT_EQ(options->baseId(), command_line_options->base_id());\n  EXPECT_EQ(options->concurrency(), command_line_options->concurrency());\n  EXPECT_EQ(options->configPath(), command_line_options->config_path());\n  EXPECT_EQ(options->configYaml(), command_line_options->config_yaml());\n  EXPECT_EQ(options->adminAddressPath(), command_line_options->admin_address_path());\n  EXPECT_EQ(envoy::admin::v3::CommandLineOptions::v6,\n            command_line_options->local_address_ip_version());\n  EXPECT_EQ(options->drainTime().count(), command_line_options->drain_time().seconds());\n  EXPECT_EQ(envoy::admin::v3::CommandLineOptions::Immediate,\n            command_line_options->drain_strategy());\n  EXPECT_EQ(options->parentShutdownTime().count(),\n            command_line_options->parent_shutdown_time().seconds());\n  EXPECT_EQ(spdlog::level::to_string_view(options->logLevel()), command_line_options->log_level());\n  EXPECT_EQ(options->logFormat(), command_line_options->log_format());\n  EXPECT_EQ(options->logPath(), command_line_options->log_path());\n  EXPECT_EQ(options->restartEpoch(), command_line_options->restart_epoch());\n  EXPECT_EQ(options->fileFlushIntervalMsec().count() / 1000,\n            command_line_options->file_flush_interval().seconds());\n  EXPECT_EQ(envoy::admin::v3::CommandLineOptions::Validate, command_line_options->mode());\n  EXPECT_EQ(options->serviceClusterName(), command_line_options->service_cluster());\n  EXPECT_EQ(options->serviceNodeName(), command_line_options->service_node());\n  EXPECT_EQ(options->serviceZone(), command_line_options->service_zone());\n  EXPECT_EQ(options->hotRestartDisabled(), command_line_options->disable_hot_restart());\n  EXPECT_EQ(options->mutexTracingEnabled(), command_line_options->enable_mutex_tracing());\n  EXPECT_EQ(options->cpusetThreadsEnabled(), command_line_options->cpuset_threads());\n  EXPECT_EQ(options->socketPath(), command_line_options->socket_path());\n  EXPECT_EQ(options->socketMode(), command_line_options->socket_mode());\n}\n\nTEST_F(OptionsImplTest, DefaultParams) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy -c hello\");\n  EXPECT_EQ(std::chrono::seconds(600), options->drainTime());\n  EXPECT_EQ(Server::DrainStrategy::Gradual, options->drainStrategy());\n  EXPECT_EQ(std::chrono::seconds(900), options->parentShutdownTime());\n  EXPECT_EQ(\"\", options->adminAddressPath());\n  EXPECT_EQ(Network::Address::IpVersion::v4, options->localAddressIpVersion());\n  EXPECT_EQ(Server::Mode::Serve, options->mode());\n  EXPECT_EQ(spdlog::level::warn, options->logLevel());\n  EXPECT_EQ(\"@envoy_domain_socket\", options->socketPath());\n  EXPECT_EQ(0, options->socketMode());\n  EXPECT_FALSE(options->hotRestartDisabled());\n  EXPECT_FALSE(options->cpusetThreadsEnabled());\n\n  // Validate that CommandLineOptions is constructed correctly with default params.\n  Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions();\n\n  EXPECT_EQ(600, command_line_options->drain_time().seconds());\n  EXPECT_EQ(900, command_line_options->parent_shutdown_time().seconds());\n  EXPECT_EQ(\"\", command_line_options->admin_address_path());\n  EXPECT_EQ(envoy::admin::v3::CommandLineOptions::v4,\n            command_line_options->local_address_ip_version());\n  EXPECT_EQ(envoy::admin::v3::CommandLineOptions::Serve, command_line_options->mode());\n  EXPECT_EQ(\"@envoy_domain_socket\", command_line_options->socket_path());\n  EXPECT_EQ(0, command_line_options->socket_mode());\n  EXPECT_FALSE(command_line_options->disable_hot_restart());\n  EXPECT_FALSE(command_line_options->cpuset_threads());\n  EXPECT_FALSE(command_line_options->allow_unknown_static_fields());\n  EXPECT_FALSE(command_line_options->reject_unknown_dynamic_fields());\n}\n\n// Validates that the server_info proto is in sync with the options.\nTEST_F(OptionsImplTest, OptionsAreInSyncWithProto) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy -c hello\");\n  Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions();\n  // Failure of this condition indicates that the server_info proto is not in sync with the options.\n  // If an option is added/removed, please update server_info proto as well to keep it in sync.\n\n  // Currently the following 7 options are not defined in proto, hence the count differs by 7.\n  // 1. version        - default TCLAP argument.\n  // 2. help           - default TCLAP argument.\n  // 3. ignore_rest    - default TCLAP argument.\n  // 4. allow-unknown-fields  - deprecated alias of allow-unknown-static-fields.\n  // 5. use-fake-symbol-table - short-term override for rollout of real symbol-table implementation.\n  // 6. hot restart version - print the hot restart version and exit.\n  // 7. log-format-prefix-with-location - short-term override for rollout of dynamic log format.\n  const uint32_t options_not_in_proto = 7;\n\n  // There are two deprecated options: \"max_stats\" and \"max_obj_name_len\".\n  const uint32_t deprecated_options = 2;\n\n  EXPECT_EQ(options->count() - options_not_in_proto,\n            command_line_options->GetDescriptor()->field_count() - deprecated_options);\n}\n\nTEST_F(OptionsImplTest, OptionsFromArgv) {\n  const std::array<const char*, 3> args{\"envoy\", \"-c\", \"hello\"};\n  std::unique_ptr<OptionsImpl> options = std::make_unique<OptionsImpl>(\n      static_cast<int>(args.size()), args.data(), [](bool) { return \"1\"; }, spdlog::level::warn);\n  // Spot check that the arguments were parsed.\n  EXPECT_EQ(\"hello\", options->configPath());\n}\n\nTEST_F(OptionsImplTest, OptionsFromArgvPrefix) {\n  const std::array<const char*, 5> args{\"envoy\", \"-c\", \"hello\", \"--admin-address-path\", \"goodbye\"};\n  std::unique_ptr<OptionsImpl> options = std::make_unique<OptionsImpl>(\n      static_cast<int>(args.size()) - 2, // Pass in only a prefix of the args\n      args.data(), [](bool) { return \"1\"; }, spdlog::level::warn);\n  EXPECT_EQ(\"hello\", options->configPath());\n  // This should still have the default value since the extra arguments are\n  // ignored.\n  EXPECT_EQ(\"\", options->adminAddressPath());\n}\n\nTEST_F(OptionsImplTest, BadCliOption) {\n  EXPECT_THROW_WITH_REGEX(createOptionsImpl(\"envoy -c hello --local-address-ip-version foo\"),\n                          MalformedArgvException, \"error: unknown IP address version 'foo'\");\n}\n\nTEST_F(OptionsImplTest, ParseComponentLogLevels) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --mode init_only\");\n  options->parseComponentLogLevels(\"upstream:debug,connection:trace\");\n  const std::vector<std::pair<std::string, spdlog::level::level_enum>>& component_log_levels =\n      options->componentLogLevels();\n  EXPECT_EQ(2, component_log_levels.size());\n  EXPECT_EQ(\"upstream\", component_log_levels[0].first);\n  EXPECT_EQ(spdlog::level::level_enum::debug, component_log_levels[0].second);\n  EXPECT_EQ(\"connection\", component_log_levels[1].first);\n  EXPECT_EQ(spdlog::level::level_enum::trace, component_log_levels[1].second);\n}\n\nTEST_F(OptionsImplTest, ParseComponentLogLevelsWithBlank) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --mode init_only\");\n  options->parseComponentLogLevels(\"\");\n  EXPECT_EQ(0, options->componentLogLevels().size());\n}\n\nTEST_F(OptionsImplTest, InvalidComponent) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --mode init_only\");\n  EXPECT_THROW_WITH_REGEX(options->parseComponentLogLevels(\"blah:debug\"), MalformedArgvException,\n                          \"error: invalid component specified 'blah'\");\n}\n\nTEST_F(OptionsImplTest, InvalidComponentLogLevel) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --mode init_only\");\n  EXPECT_THROW_WITH_REGEX(options->parseComponentLogLevels(\"upstream:blah,connection:trace\"),\n                          MalformedArgvException, \"error: invalid log level specified 'blah'\");\n}\n\nTEST_F(OptionsImplTest, ComponentLogLevelContainsBlank) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --mode init_only\");\n  EXPECT_THROW_WITH_REGEX(options->parseComponentLogLevels(\"upstream:,connection:trace\"),\n                          MalformedArgvException, \"error: invalid log level specified ''\");\n}\n\nTEST_F(OptionsImplTest, InvalidComponentLogLevelStructure) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --mode init_only\");\n  EXPECT_THROW_WITH_REGEX(options->parseComponentLogLevels(\"upstream:foo:bar\"),\n                          MalformedArgvException,\n                          \"error: component log level not correctly specified 'upstream:foo:bar'\");\n}\n\nTEST_F(OptionsImplTest, IncompleteComponentLogLevel) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy --mode init_only\");\n  EXPECT_THROW_WITH_REGEX(options->parseComponentLogLevels(\"upstream\"), MalformedArgvException,\n                          \"component log level not correctly specified 'upstream'\");\n}\n\nTEST_F(OptionsImplTest, InvalidLogLevel) {\n  EXPECT_THROW_WITH_REGEX(createOptionsImpl(\"envoy -l blah\"), MalformedArgvException,\n                          \"error: invalid log level specified 'blah'\");\n}\n\nTEST_F(OptionsImplTest, ValidLogLevel) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy -l critical\");\n  EXPECT_EQ(spdlog::level::level_enum::critical, options->logLevel());\n}\n\nTEST_F(OptionsImplTest, WarnIsValidLogLevel) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy -l warn\");\n  EXPECT_EQ(spdlog::level::level_enum::warn, options->logLevel());\n}\n\nTEST_F(OptionsImplTest, AllowedLogLevels) {\n  EXPECT_EQ(\"[trace][debug][info][warning|warn][error][critical][off]\",\n            OptionsImpl::allowedLogLevels());\n}\n\n// Test that the test constructor comes up with the same default values as the main constructor.\nTEST_F(OptionsImplTest, SaneTestConstructor) {\n  std::unique_ptr<OptionsImpl> regular_options_impl(createOptionsImpl(\"envoy\"));\n  OptionsImpl test_options_impl(\"service_cluster\", \"service_node\", \"service_zone\",\n                                spdlog::level::level_enum::info);\n\n  // Specified by constructor\n  EXPECT_EQ(\"service_cluster\", test_options_impl.serviceClusterName());\n  EXPECT_EQ(\"service_node\", test_options_impl.serviceNodeName());\n  EXPECT_EQ(\"service_zone\", test_options_impl.serviceZone());\n  EXPECT_EQ(spdlog::level::level_enum::info, test_options_impl.logLevel());\n\n  // Special (simplified) for tests\n  EXPECT_EQ(1u, test_options_impl.concurrency());\n\n  EXPECT_EQ(regular_options_impl->baseId(), test_options_impl.baseId());\n  EXPECT_EQ(regular_options_impl->configPath(), test_options_impl.configPath());\n  EXPECT_TRUE(TestUtility::protoEqual(regular_options_impl->configProto(),\n                                      test_options_impl.configProto()));\n  EXPECT_EQ(regular_options_impl->configYaml(), test_options_impl.configYaml());\n  EXPECT_EQ(regular_options_impl->adminAddressPath(), test_options_impl.adminAddressPath());\n  EXPECT_EQ(regular_options_impl->localAddressIpVersion(),\n            test_options_impl.localAddressIpVersion());\n  EXPECT_EQ(regular_options_impl->drainTime(), test_options_impl.drainTime());\n  EXPECT_EQ(spdlog::level::level_enum::info, test_options_impl.logLevel());\n  EXPECT_EQ(regular_options_impl->componentLogLevels(), test_options_impl.componentLogLevels());\n  EXPECT_EQ(regular_options_impl->logPath(), test_options_impl.logPath());\n  EXPECT_EQ(regular_options_impl->parentShutdownTime(), test_options_impl.parentShutdownTime());\n  EXPECT_EQ(regular_options_impl->restartEpoch(), test_options_impl.restartEpoch());\n  EXPECT_EQ(regular_options_impl->mode(), test_options_impl.mode());\n  EXPECT_EQ(regular_options_impl->fileFlushIntervalMsec(),\n            test_options_impl.fileFlushIntervalMsec());\n  EXPECT_EQ(regular_options_impl->hotRestartDisabled(), test_options_impl.hotRestartDisabled());\n  EXPECT_EQ(regular_options_impl->cpusetThreadsEnabled(), test_options_impl.cpusetThreadsEnabled());\n}\n\nTEST_F(OptionsImplTest, SetBothConcurrencyAndCpuset) {\n  EXPECT_LOG_CONTAINS(\n      \"warning\",\n      \"Both --concurrency and --cpuset-threads options are set; not applying --cpuset-threads.\",\n      std::unique_ptr<OptionsImpl> options =\n          createOptionsImpl(\"envoy -c hello --concurrency 42 --cpuset-threads\"));\n}\n\nTEST_F(OptionsImplTest, SetCpusetOnly) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl(\"envoy -c hello --cpuset-threads\");\n  EXPECT_NE(options->concurrency(), 0);\n}\n\nTEST_F(OptionsImplTest, LogFormatDefault) {\n  std::unique_ptr<OptionsImpl> options = createOptionsImpl({\"envoy\", \"-c\", \"hello\"});\n  EXPECT_EQ(options->logFormat(), \"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v\");\n}\n\nTEST_F(OptionsImplTest, LogFormatDefaultNoPrefix) {\n  std::unique_ptr<OptionsImpl> options =\n      createOptionsImpl({\"envoy\", \"-c\", \"hello\", \"--log-format-prefix-with-location\", \"0\"});\n  EXPECT_EQ(options->logFormat(), \"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v\");\n}\n\nTEST_F(OptionsImplTest, LogFormatOverride) {\n  std::unique_ptr<OptionsImpl> options =\n      createOptionsImpl({\"envoy\", \"-c\", \"hello\", \"--log-format\", \"%%v %v %t %v\",\n                         \"--log-format-prefix-with-location 1\"});\n  EXPECT_EQ(options->logFormat(), \"%%v [%g:%#] %v %t [%g:%#] %v\");\n}\n\nTEST_F(OptionsImplTest, LogFormatOverrideNoPrefix) {\n  std::unique_ptr<OptionsImpl> options =\n      createOptionsImpl({\"envoy\", \"-c\", \"hello\", \"--log-format\", \"%%v %v %t %v\"});\n  EXPECT_EQ(options->logFormat(), \"%%v %v %t %v\");\n}\n\n// Test that --base-id and --restart-epoch with non-default values are accepted.\nTEST_F(OptionsImplTest, SetBaseIdAndRestartEpoch) {\n  std::unique_ptr<OptionsImpl> options =\n      createOptionsImpl({\"envoy\", \"-c\", \"hello\", \"--base-id\", \"99\", \"--restart-epoch\", \"999\"});\n  EXPECT_EQ(99U, options->baseId());\n  EXPECT_EQ(999U, options->restartEpoch());\n}\n\n// Test that --use-dynamic-base-id and --restart-epoch with a non-default value is not accepted.\nTEST_F(OptionsImplTest, SetUseDynamicBaseIdAndRestartEpoch) {\n  EXPECT_THROW_WITH_REGEX(\n      createOptionsImpl({\"envoy\", \"-c\", \"hello\", \"--use-dynamic-base-id\", \"--restart-epoch\", \"1\"}),\n      MalformedArgvException, \"error: cannot use --restart-epoch=1 with --use-dynamic-base-id\");\n}\n\n#if defined(__linux__)\n\nusing testing::DoAll;\nusing testing::Return;\nusing testing::SetArgPointee;\n\nclass OptionsImplPlatformLinuxTest : public testing::Test {\npublic:\n};\n\nTEST_F(OptionsImplPlatformLinuxTest, AffinityTest1) {\n  // Success case: cpuset size and hardware thread count are the same.\n  unsigned int fake_hw_threads = 4;\n  cpu_set_t test_set;\n  Api::MockLinuxOsSysCalls linux_os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::LinuxOsSysCallsImpl> linux_os_calls(&linux_os_sys_calls);\n\n  // Set cpuset size to be four.\n  CPU_ZERO(&test_set);\n  for (int i = 0; i < 4; i++) {\n    CPU_SET(i, &test_set);\n  }\n\n  EXPECT_CALL(linux_os_sys_calls, sched_getaffinity(_, _, _))\n      .WillOnce(DoAll(SetArgPointee<2>(test_set), Return(Api::SysCallIntResult{0, 0})));\n  EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), 4);\n}\n\nTEST_F(OptionsImplPlatformLinuxTest, AffinityTest2) {\n  // Success case: cpuset size is half of the hardware thread count.\n  unsigned int fake_hw_threads = 16;\n  cpu_set_t test_set;\n  Api::MockLinuxOsSysCalls linux_os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::LinuxOsSysCallsImpl> linux_os_calls(&linux_os_sys_calls);\n\n  // Set cpuset size to be eight.\n  CPU_ZERO(&test_set);\n  for (int i = 0; i < 8; i++) {\n    CPU_SET(i, &test_set);\n  }\n\n  EXPECT_CALL(linux_os_sys_calls, sched_getaffinity(_, _, _))\n      .WillOnce(DoAll(SetArgPointee<2>(test_set), Return(Api::SysCallIntResult{0, 0})));\n  EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), 8);\n}\n\nTEST_F(OptionsImplPlatformLinuxTest, AffinityTest3) {\n  // Failure case: cpuset size is bigger than the hardware thread count.\n  unsigned int fake_hw_threads = 4;\n  cpu_set_t test_set;\n  Api::MockLinuxOsSysCalls linux_os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::LinuxOsSysCallsImpl> linux_os_calls(&linux_os_sys_calls);\n\n  // Set cpuset size to be eight.\n  CPU_ZERO(&test_set);\n  for (int i = 0; i < 8; i++) {\n    CPU_SET(i, &test_set);\n  }\n\n  EXPECT_CALL(linux_os_sys_calls, sched_getaffinity(_, _, _))\n      .WillOnce(DoAll(SetArgPointee<2>(test_set), Return(Api::SysCallIntResult{0, 0})));\n  EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), fake_hw_threads);\n}\n\nTEST_F(OptionsImplPlatformLinuxTest, AffinityTest4) {\n  // When sched_getaffinity() fails, expect to get the hardware thread count.\n  unsigned int fake_hw_threads = 8;\n  cpu_set_t test_set;\n  Api::MockLinuxOsSysCalls linux_os_sys_calls;\n  TestThreadsafeSingletonInjector<Api::LinuxOsSysCallsImpl> linux_os_calls(&linux_os_sys_calls);\n\n  // Set cpuset size to be four.\n  CPU_ZERO(&test_set);\n  for (int i = 0; i < 4; i++) {\n    CPU_SET(i, &test_set);\n  }\n\n  EXPECT_CALL(linux_os_sys_calls, sched_getaffinity(_, _, _))\n      .WillOnce(DoAll(SetArgPointee<2>(test_set), Return(Api::SysCallIntResult{-1, 0})));\n  EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), fake_hw_threads);\n}\n\n#endif\n\nclass TestFactory : public Config::TypedFactory {\npublic:\n  ~TestFactory() override = default;\n  std::string category() const override { return \"test\"; }\n  std::string configType() override { return \"google.protobuf.StringValue\"; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ProtobufWkt::StringValue>();\n  }\n};\n\nclass TestTestFactory : public TestFactory {\npublic:\n  std::string name() const override { return \"test\"; }\n};\n\nclass TestingFactory : public Config::TypedFactory {\npublic:\n  ~TestingFactory() override = default;\n  std::string category() const override { return \"testing\"; }\n  std::string configType() override { return \"google.protobuf.StringValue\"; }\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return std::make_unique<ProtobufWkt::StringValue>();\n  }\n};\n\nclass TestTestingFactory : public TestingFactory {\npublic:\n  std::string name() const override { return \"test\"; }\n};\n\nTEST(DisableExtensions, DEPRECATED_FEATURE_TEST(IsDisabled)) {\n  TestTestFactory testTestFactory;\n  Registry::InjectFactoryCategory<TestFactory> testTestCategory(testTestFactory);\n  Registry::InjectFactory<TestFactory> testTestRegistration(testTestFactory, {\"test-1\", \"test-2\"});\n\n  TestTestingFactory testTestingFactory;\n  Registry::InjectFactoryCategory<TestingFactory> testTestingCategory(testTestingFactory);\n  Registry::InjectFactory<TestingFactory> testTestingRegistration(testTestingFactory,\n                                                                  {\"test-1\", \"test-2\"});\n\n  EXPECT_LOG_CONTAINS(\"warning\", \"failed to disable invalid extension name 'not.a.factory'\",\n                      OptionsImpl::disableExtensions({\"not.a.factory\"}));\n\n  EXPECT_LOG_CONTAINS(\"warning\", \"failed to disable unknown extension 'no/such.factory'\",\n                      OptionsImpl::disableExtensions({\"no/such.factory\"}));\n\n  EXPECT_NE(Registry::FactoryRegistry<TestFactory>::getFactory(\"test\"), nullptr);\n  EXPECT_NE(Registry::FactoryRegistry<TestFactory>::getFactory(\"test-1\"), nullptr);\n  EXPECT_NE(Registry::FactoryRegistry<TestFactory>::getFactory(\"test-2\"), nullptr);\n  EXPECT_NE(Registry::FactoryRegistry<TestFactory>::getFactoryByType(\"google.protobuf.StringValue\"),\n            nullptr);\n\n  EXPECT_NE(Registry::FactoryRegistry<TestingFactory>::getFactory(\"test\"), nullptr);\n  EXPECT_NE(Registry::FactoryRegistry<TestingFactory>::getFactory(\"test-1\"), nullptr);\n  EXPECT_NE(Registry::FactoryRegistry<TestingFactory>::getFactory(\"test-2\"), nullptr);\n\n  OptionsImpl::disableExtensions({\"test/test\", \"testing/test-2\"});\n\n  // Simulate the initial construction of the type mappings.\n  testTestRegistration.resetTypeMappings();\n  testTestingRegistration.resetTypeMappings();\n\n  // When we disable an extension, all its aliases should also be disabled.\n  EXPECT_EQ(Registry::FactoryRegistry<TestFactory>::getFactory(\"test\"), nullptr);\n  EXPECT_EQ(Registry::FactoryRegistry<TestFactory>::getFactory(\"test-1\"), nullptr);\n  EXPECT_EQ(Registry::FactoryRegistry<TestFactory>::getFactory(\"test-2\"), nullptr);\n\n  // When we disable an extension, all its aliases should also be disabled.\n  EXPECT_EQ(Registry::FactoryRegistry<TestingFactory>::getFactory(\"test\"), nullptr);\n  EXPECT_EQ(Registry::FactoryRegistry<TestingFactory>::getFactory(\"test-1\"), nullptr);\n  EXPECT_EQ(Registry::FactoryRegistry<TestingFactory>::getFactory(\"test-2\"), nullptr);\n\n  // Typing map for TestingFactory should be constructed here after disabling\n  EXPECT_EQ(\n      Registry::FactoryRegistry<TestingFactory>::getFactoryByType(\"google.protobuf.StringValue\"),\n      nullptr);\n}\n\nTEST(FactoryByTypeTest, EarlierVersionConfigType) {\n  envoy::config::filter::http::buffer::v2::Buffer v2_config;\n  auto factory = Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::\n      getFactoryByType(v2_config.GetDescriptor()->full_name());\n  EXPECT_NE(factory, nullptr);\n  EXPECT_EQ(factory->name(), Extensions::HttpFilters::HttpFilterNames::get().Buffer);\n\n  envoy::extensions::filters::http::buffer::v3::Buffer v3_config;\n  factory = Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::\n      getFactoryByType(v3_config.GetDescriptor()->full_name());\n  EXPECT_NE(factory, nullptr);\n  EXPECT_EQ(factory->name(), Extensions::HttpFilters::HttpFilterNames::get().Buffer);\n\n  ProtobufWkt::Any non_api_type;\n  factory = Registry::FactoryRegistry<Server::Configuration::NamedHttpFilterConfigFactory>::\n      getFactoryByType(non_api_type.GetDescriptor()->full_name());\n  EXPECT_EQ(factory, nullptr);\n}\n\n} // namespace\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/overload_manager_impl_test.cc",
    "content": "#include \"envoy/config/overload/v3/overload.pb.h\"\n#include \"envoy/server/overload_manager.h\"\n#include \"envoy/server/resource_monitor.h\"\n#include \"envoy/server/resource_monitor_config.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"server/overload_manager_impl.h\"\n\n#include \"extensions/resource_monitors/common/factory_base.h\"\n\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::AllOf;\nusing testing::Invoke;\nusing testing::NiceMock;\nusing testing::Property;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nclass FakeResourceMonitor : public ResourceMonitor {\npublic:\n  FakeResourceMonitor(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher), response_(0.0) {}\n\n  void setPressure(double pressure) { response_ = pressure; }\n\n  void setError() { response_ = EnvoyException(\"fake_error\"); }\n\n  void setUpdateAsync(bool new_update_async) {\n    callbacks_.reset();\n    update_async_ = new_update_async;\n  }\n\n  void updateResourceUsage(ResourceMonitor::Callbacks& callbacks) override {\n    if (update_async_) {\n      callbacks_.emplace(callbacks);\n    } else {\n      publishUpdate(callbacks);\n    }\n  }\n\n  void publishUpdate() {\n    if (update_async_) {\n      ASSERT(callbacks_.has_value());\n      publishUpdate(*callbacks_);\n      callbacks_.reset();\n    }\n  }\n\nprivate:\n  void publishUpdate(ResourceMonitor::Callbacks& callbacks) {\n    if (absl::holds_alternative<double>(response_)) {\n      Server::ResourceUsage usage;\n      usage.resource_pressure_ = absl::get<double>(response_);\n      dispatcher_.post([&, usage]() { callbacks.onSuccess(usage); });\n    } else {\n      EnvoyException& error = absl::get<EnvoyException>(response_);\n      dispatcher_.post([&, error]() { callbacks.onFailure(error); });\n    }\n  }\n\n  Event::Dispatcher& dispatcher_;\n  absl::variant<double, EnvoyException> response_;\n  bool update_async_ = false;\n  absl::optional<std::reference_wrapper<ResourceMonitor::Callbacks>> callbacks_;\n};\n\ntemplate <class ConfigType>\nclass FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitorFactory {\npublic:\n  FakeResourceMonitorFactory(const std::string& name) : monitor_(nullptr), name_(name) {}\n\n  Server::ResourceMonitorPtr\n  createResourceMonitor(const Protobuf::Message&,\n                        Server::Configuration::ResourceMonitorFactoryContext& context) override {\n    auto monitor = std::make_unique<FakeResourceMonitor>(context.dispatcher());\n    monitor_ = monitor.get();\n    return monitor;\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    return ProtobufTypes::MessagePtr{new ConfigType()};\n  }\n\n  std::string name() const override { return name_; }\n\n  FakeResourceMonitor* monitor_; // not owned\n  const std::string name_;\n};\n\nclass OverloadManagerImplTest : public testing::Test {\nprotected:\n  OverloadManagerImplTest()\n      : factory1_(\"envoy.resource_monitors.fake_resource1\"),\n        factory2_(\"envoy.resource_monitors.fake_resource2\"),\n        factory3_(\"envoy.resource_monitors.fake_resource3\"),\n        factory4_(\"envoy.resource_monitors.fake_resource4\"), register_factory1_(factory1_),\n        register_factory2_(factory2_), register_factory3_(factory3_), register_factory4_(factory4_),\n        api_(Api::createApiForTest(stats_)) {}\n\n  void setDispatcherExpectation() {\n    timer_ = new NiceMock<Event::MockTimer>();\n    EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([&](Event::TimerCb cb) {\n      timer_cb_ = cb;\n      return timer_;\n    }));\n  }\n\n  envoy::config::overload::v3::OverloadManager parseConfig(const std::string& config) {\n    envoy::config::overload::v3::OverloadManager proto;\n    bool success = Protobuf::TextFormat::ParseFromString(config, &proto);\n    ASSERT(success);\n    return proto;\n  }\n\n  std::string getConfig() {\n    return R\"EOF(\n      refresh_interval {\n        seconds: 1\n      }\n      resource_monitors {\n        name: \"envoy.resource_monitors.fake_resource1\"\n      }\n      resource_monitors {\n        name: \"envoy.resource_monitors.fake_resource2\"\n      }\n      resource_monitors {\n        name: \"envoy.resource_monitors.fake_resource3\"\n      }\n      resource_monitors {\n        name: \"envoy.resource_monitors.fake_resource4\"\n      }\n      actions {\n        name: \"envoy.overload_actions.dummy_action\"\n        triggers {\n          name: \"envoy.resource_monitors.fake_resource1\"\n          threshold {\n            value: 0.9\n          }\n        }\n        triggers {\n          name: \"envoy.resource_monitors.fake_resource2\"\n          threshold {\n            value: 0.8\n          }\n        }\n        triggers {\n          name: \"envoy.resource_monitors.fake_resource3\"\n          scaled {\n            scaling_threshold: 0.5\n            saturation_threshold: 0.8\n          }\n        }\n        triggers {\n          name: \"envoy.resource_monitors.fake_resource4\"\n          scaled {\n            scaling_threshold: 0.5\n            saturation_threshold: 0.8\n          }\n        }\n      }\n    )EOF\";\n  }\n\n  std::unique_ptr<OverloadManagerImpl> createOverloadManager(const std::string& config) {\n    return std::make_unique<OverloadManagerImpl>(dispatcher_, stats_, thread_local_,\n                                                 parseConfig(config), validation_visitor_, *api_);\n  }\n\n  FakeResourceMonitorFactory<Envoy::ProtobufWkt::Struct> factory1_;\n  FakeResourceMonitorFactory<Envoy::ProtobufWkt::Timestamp> factory2_;\n  FakeResourceMonitorFactory<Envoy::ProtobufWkt::Timestamp> factory3_;\n  FakeResourceMonitorFactory<Envoy::ProtobufWkt::Timestamp> factory4_;\n  Registry::InjectFactory<Configuration::ResourceMonitorFactory> register_factory1_;\n  Registry::InjectFactory<Configuration::ResourceMonitorFactory> register_factory2_;\n  Registry::InjectFactory<Configuration::ResourceMonitorFactory> register_factory3_;\n  Registry::InjectFactory<Configuration::ResourceMonitorFactory> register_factory4_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Event::MockTimer>* timer_; // not owned\n  Stats::TestUtil::TestStore stats_;\n  NiceMock<ThreadLocal::MockInstance> thread_local_;\n  Event::TimerCb timer_cb_;\n  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  Api::ApiPtr api_;\n};\n\nTEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) {\n  setDispatcherExpectation();\n\n  auto manager(createOverloadManager(getConfig()));\n  bool is_active = false;\n  int cb_count = 0;\n  manager->registerForAction(\"envoy.overload_actions.dummy_action\", dispatcher_,\n                             [&](OverloadActionState state) {\n                               is_active = state.isSaturated();\n                               cb_count++;\n                             });\n  manager->registerForAction(\"envoy.overload_actions.unknown_action\", dispatcher_,\n                             [&](OverloadActionState) { EXPECT_TRUE(false); });\n  manager->start();\n\n  Stats::Gauge& active_gauge = stats_.gauge(\"overload.envoy.overload_actions.dummy_action.active\",\n                                            Stats::Gauge::ImportMode::Accumulate);\n  Stats::Gauge& scale_percent_gauge =\n      stats_.gauge(\"overload.envoy.overload_actions.dummy_action.scale_percent\",\n                   Stats::Gauge::ImportMode::Accumulate);\n  Stats::Gauge& pressure_gauge1 =\n      stats_.gauge(\"overload.envoy.resource_monitors.fake_resource1.pressure\",\n                   Stats::Gauge::ImportMode::NeverImport);\n  Stats::Gauge& pressure_gauge2 =\n      stats_.gauge(\"overload.envoy.resource_monitors.fake_resource2.pressure\",\n                   Stats::Gauge::ImportMode::NeverImport);\n  const OverloadActionState& action_state =\n      manager->getThreadLocalOverloadState().getState(\"envoy.overload_actions.dummy_action\");\n\n  // Update does not exceed fake_resource1 trigger threshold, no callback expected\n  factory1_.monitor_->setPressure(0.5);\n  timer_cb_();\n  EXPECT_FALSE(is_active);\n  EXPECT_THAT(action_state, AllOf(Property(&OverloadActionState::isSaturated, false),\n                                  Property(&OverloadActionState::value, 0)));\n  EXPECT_EQ(0, cb_count);\n  EXPECT_EQ(0, active_gauge.value());\n  EXPECT_EQ(0, scale_percent_gauge.value());\n  EXPECT_EQ(50, pressure_gauge1.value());\n\n  // Update exceeds fake_resource1 trigger threshold, callback is expected\n  factory1_.monitor_->setPressure(0.95);\n  timer_cb_();\n  EXPECT_TRUE(is_active);\n  EXPECT_TRUE(action_state.isSaturated());\n  EXPECT_EQ(1, cb_count);\n  EXPECT_EQ(1, active_gauge.value());\n  EXPECT_EQ(100, scale_percent_gauge.value());\n  EXPECT_EQ(95, pressure_gauge1.value());\n\n  // Callback should not be invoked if action state does not change\n  factory1_.monitor_->setPressure(0.94);\n  timer_cb_();\n  EXPECT_TRUE(is_active);\n  EXPECT_TRUE(action_state.isSaturated());\n  EXPECT_EQ(1, cb_count);\n  EXPECT_EQ(94, pressure_gauge1.value());\n\n  // The action is already active for fake_resource1 so no callback expected\n  factory2_.monitor_->setPressure(0.9);\n  timer_cb_();\n  EXPECT_TRUE(is_active);\n  EXPECT_TRUE(action_state.isSaturated());\n  EXPECT_EQ(1, cb_count);\n  EXPECT_EQ(90, pressure_gauge2.value());\n\n  // The action remains active for fake_resource2 so no callback expected\n  factory1_.monitor_->setPressure(0.5);\n  timer_cb_();\n  EXPECT_TRUE(is_active);\n  EXPECT_TRUE(action_state.isSaturated());\n  EXPECT_EQ(1, cb_count);\n  EXPECT_EQ(50, pressure_gauge1.value());\n  EXPECT_EQ(90, pressure_gauge2.value());\n\n  // Both become inactive so callback is expected\n  factory2_.monitor_->setPressure(0.3);\n  timer_cb_();\n  EXPECT_FALSE(is_active);\n  EXPECT_THAT(action_state, AllOf(Property(&OverloadActionState::isSaturated, false),\n                                  Property(&OverloadActionState::value, 0)));\n  EXPECT_EQ(2, cb_count);\n  EXPECT_EQ(30, pressure_gauge2.value());\n\n  // Different triggers, both become active, only one callback expected\n  factory1_.monitor_->setPressure(0.97);\n  factory2_.monitor_->setPressure(0.96);\n  timer_cb_();\n  EXPECT_TRUE(is_active);\n  EXPECT_TRUE(action_state.isSaturated());\n  EXPECT_EQ(3, cb_count);\n  EXPECT_EQ(97, pressure_gauge1.value());\n  EXPECT_EQ(96, pressure_gauge2.value());\n\n  // Different triggers, both become inactive, only one callback expected\n  factory1_.monitor_->setPressure(0.41);\n  factory2_.monitor_->setPressure(0.42);\n  timer_cb_();\n  EXPECT_FALSE(is_active);\n  EXPECT_THAT(action_state, AllOf(Property(&OverloadActionState::isSaturated, false),\n                                  Property(&OverloadActionState::value, 0)));\n  EXPECT_EQ(4, cb_count);\n  EXPECT_EQ(41, pressure_gauge1.value());\n  EXPECT_EQ(42, pressure_gauge2.value());\n\n  manager->stop();\n}\n\nTEST_F(OverloadManagerImplTest, ScaledTrigger) {\n  setDispatcherExpectation();\n\n  auto manager(createOverloadManager(getConfig()));\n  manager->start();\n  const auto& action_state =\n      manager->getThreadLocalOverloadState().getState(\"envoy.overload_actions.dummy_action\");\n  Stats::Gauge& active_gauge = stats_.gauge(\"overload.envoy.overload_actions.dummy_action.active\",\n                                            Stats::Gauge::ImportMode::Accumulate);\n  Stats::Gauge& scale_percent_gauge =\n      stats_.gauge(\"overload.envoy.overload_actions.dummy_action.scale_percent\",\n                   Stats::Gauge::ImportMode::Accumulate);\n\n  factory3_.monitor_->setPressure(0.5);\n  timer_cb_();\n\n  EXPECT_THAT(action_state, AllOf(Property(&OverloadActionState::isSaturated, false),\n                                  Property(&OverloadActionState::value, 0)));\n  EXPECT_EQ(0, active_gauge.value());\n  EXPECT_EQ(0, scale_percent_gauge.value());\n\n  // The trigger for fake_resource3 is a scaled trigger with a min of 0.5 and a max of 0.8. Set the\n  // current pressure value to halfway in that range.\n  factory3_.monitor_->setPressure(0.65);\n  timer_cb_();\n\n  EXPECT_EQ(action_state.value(), 0.5 /* = 0.65 / (0.8 - 0.5) */);\n  EXPECT_EQ(0, active_gauge.value());\n  EXPECT_EQ(50, scale_percent_gauge.value());\n\n  factory3_.monitor_->setPressure(0.8);\n  timer_cb_();\n\n  EXPECT_TRUE(action_state.isSaturated());\n  EXPECT_EQ(1, active_gauge.value());\n  EXPECT_EQ(100, scale_percent_gauge.value());\n\n  factory3_.monitor_->setPressure(0.9);\n  timer_cb_();\n\n  EXPECT_TRUE(action_state.isSaturated());\n  EXPECT_EQ(1, active_gauge.value());\n  EXPECT_EQ(100, scale_percent_gauge.value());\n}\n\nTEST_F(OverloadManagerImplTest, FailedUpdates) {\n  setDispatcherExpectation();\n  auto manager(createOverloadManager(getConfig()));\n  manager->start();\n  Stats::Counter& failed_updates =\n      stats_.counter(\"overload.envoy.resource_monitors.fake_resource1.failed_updates\");\n\n  factory1_.monitor_->setError();\n  timer_cb_();\n  EXPECT_EQ(1, failed_updates.value());\n  timer_cb_();\n  EXPECT_EQ(2, failed_updates.value());\n\n  manager->stop();\n}\n\nTEST_F(OverloadManagerImplTest, AggregatesMultipleResourceUpdates) {\n  setDispatcherExpectation();\n  auto manager(createOverloadManager(getConfig()));\n  manager->start();\n\n  const OverloadActionState& action_state =\n      manager->getThreadLocalOverloadState().getState(\"envoy.overload_actions.dummy_action\");\n\n  factory1_.monitor_->setUpdateAsync(true);\n\n  // Monitor 2 will respond immediately at the timer callback, but that won't push an update to the\n  // thread-local state because monitor 1 hasn't finished its update yet.\n  factory2_.monitor_->setPressure(1.0);\n  timer_cb_();\n\n  EXPECT_FALSE(action_state.isSaturated());\n\n  // Once the last monitor publishes, the change to the action takes effect.\n  factory1_.monitor_->publishUpdate();\n  EXPECT_TRUE(action_state.isSaturated());\n}\n\nTEST_F(OverloadManagerImplTest, DelayedUpdatesAreCoalesced) {\n  setDispatcherExpectation();\n  auto manager(createOverloadManager(getConfig()));\n  manager->start();\n\n  const OverloadActionState& action_state =\n      manager->getThreadLocalOverloadState().getState(\"envoy.overload_actions.dummy_action\");\n\n  factory3_.monitor_->setUpdateAsync(true);\n  factory4_.monitor_->setUpdateAsync(true);\n\n  timer_cb_();\n  // When monitor 3 publishes its update, the action won't be visible to the thread-local state\n  factory3_.monitor_->setPressure(0.6);\n  factory3_.monitor_->publishUpdate();\n  EXPECT_EQ(action_state.value(), 0.0);\n\n  // Now when monitor 4 publishes a larger value, the update from monitor 3 is skipped.\n  EXPECT_FALSE(action_state.isSaturated());\n  factory4_.monitor_->setPressure(0.65);\n  factory4_.monitor_->publishUpdate();\n  EXPECT_EQ(action_state.value(), 0.5 /* = (0.65 - 0.5) / (0.8 - 0.5) */);\n}\n\nTEST_F(OverloadManagerImplTest, FlushesUpdatesEvenWithOneUnresponsive) {\n  setDispatcherExpectation();\n  auto manager(createOverloadManager(getConfig()));\n  manager->start();\n\n  const OverloadActionState& action_state =\n      manager->getThreadLocalOverloadState().getState(\"envoy.overload_actions.dummy_action\");\n\n  // Set monitor 1 to async, but never publish updates for it.\n  factory1_.monitor_->setUpdateAsync(true);\n\n  // Monitor 2 will respond immediately at the timer callback, but that won't push an update to the\n  // thread-local state because monitor 1 hasn't finished its update yet.\n  factory2_.monitor_->setPressure(1.0);\n  timer_cb_();\n\n  EXPECT_FALSE(action_state.isSaturated());\n  // A second timer callback will flush the update from monitor 2, even though monitor 1 is\n  // unresponsive.\n  timer_cb_();\n  EXPECT_TRUE(action_state.isSaturated());\n}\n\nTEST_F(OverloadManagerImplTest, SkippedUpdates) {\n  setDispatcherExpectation();\n\n  auto manager(createOverloadManager(getConfig()));\n  manager->start();\n  Stats::Counter& skipped_updates =\n      stats_.counter(\"overload.envoy.resource_monitors.fake_resource1.skipped_updates\");\n  Stats::Gauge& pressure_gauge1 =\n      stats_.gauge(\"overload.envoy.resource_monitors.fake_resource1.pressure\",\n                   Stats::Gauge::ImportMode::NeverImport);\n\n  factory1_.monitor_->setUpdateAsync(true);\n  EXPECT_EQ(0, pressure_gauge1.value());\n  factory1_.monitor_->setPressure(0.3);\n\n  timer_cb_();\n  EXPECT_EQ(0, skipped_updates.value());\n  timer_cb_();\n  EXPECT_EQ(1, skipped_updates.value());\n  timer_cb_();\n  EXPECT_EQ(2, skipped_updates.value());\n\n  factory1_.monitor_->publishUpdate();\n  EXPECT_EQ(30, pressure_gauge1.value());\n\n  timer_cb_();\n  EXPECT_EQ(2, skipped_updates.value());\n\n  manager->stop();\n}\n\nTEST_F(OverloadManagerImplTest, DuplicateResourceMonitor) {\n  const std::string config = R\"EOF(\n    resource_monitors {\n      name: \"envoy.resource_monitors.fake_resource1\"\n    }\n    resource_monitors {\n      name: \"envoy.resource_monitors.fake_resource1\"\n    }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException,\n                          \"Duplicate resource monitor .*\");\n}\n\nTEST_F(OverloadManagerImplTest, DuplicateOverloadAction) {\n  const std::string config = R\"EOF(\n    actions {\n      name: \"envoy.overload_actions.dummy_action\"\n    }\n    actions {\n      name: \"envoy.overload_actions.dummy_action\"\n    }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException,\n                          \"Duplicate overload action .*\");\n}\n\n// A scaled trigger action's thresholds must conform to scaling < saturation.\nTEST_F(OverloadManagerImplTest, ScaledTriggerSaturationLessThanScalingThreshold) {\n  const std::string config = R\"EOF(\n    resource_monitors {\n      name: \"envoy.resource_monitors.fake_resource1\"\n    }\n    actions {\n      name: \"envoy.overload_actions.dummy_action\"\n      triggers {\n        name: \"envoy.resource_monitors.fake_resource1\"\n        scaled {\n          scaling_threshold: 0.9\n          saturation_threshold: 0.8\n        }\n      }\n    }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException,\n                          \"scaling_threshold must be less than saturation_threshold.*\");\n}\n\n// A scaled trigger action can't have threshold values that are equal.\nTEST_F(OverloadManagerImplTest, ScaledTriggerThresholdsEqual) {\n  const std::string config = R\"EOF(\n    resource_monitors {\n      name: \"envoy.resource_monitors.fake_resource1\"\n    }\n    actions {\n      name: \"envoy.overload_actions.dummy_action\"\n      triggers {\n        name: \"envoy.resource_monitors.fake_resource1\"\n        scaled {\n          scaling_threshold: 0.9\n          saturation_threshold: 0.9\n        }\n      }\n    }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException,\n                          \"scaling_threshold must be less than saturation_threshold.*\");\n}\n\nTEST_F(OverloadManagerImplTest, UnknownTrigger) {\n  const std::string config = R\"EOF(\n    actions {\n      name: \"envoy.overload_actions.dummy_action\"\n      triggers {\n        name: \"envoy.resource_monitors.fake_resource1\"\n        threshold {\n          value: 0.9\n        }\n      }\n    }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException,\n                          \"Unknown trigger resource .*\");\n}\n\nTEST_F(OverloadManagerImplTest, DuplicateTrigger) {\n  const std::string config = R\"EOF(\n    resource_monitors {\n      name: \"envoy.resource_monitors.fake_resource1\"\n    }\n    actions {\n      name: \"envoy.overload_actions.dummy_action\"\n      triggers {\n        name: \"envoy.resource_monitors.fake_resource1\"\n        threshold {\n          value: 0.9\n        }\n      }\n      triggers {\n        name: \"envoy.resource_monitors.fake_resource1\"\n        threshold {\n          value: 0.8\n        }\n      }\n    }\n  )EOF\";\n\n  EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, \"Duplicate trigger .*\");\n}\n\nTEST_F(OverloadManagerImplTest, Shutdown) {\n  setDispatcherExpectation();\n\n  auto manager(createOverloadManager(getConfig()));\n  manager->start();\n\n  EXPECT_CALL(*timer_, disableTimer());\n  manager->stop();\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/server_corpus/api_boost_crash",
    "content": "node {\n  client_features: \"&\"\n}\nstatic_resources {\n  listeners {\n    name: \"          \"\n    address {\n      pipe {\n        path: \"aa\\000\"\n      }\n    }\n    transparent {\n    }\n  }\n}\nstats_sinks {\n  typed_config {\n    [type.googleapis.com/envoy.api.v2.route.Route] {\n      route {\n        retry_policy {\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 97\n          retriable_status_codes: 116\n          retriable_status_codes: 101\n          retriable_status_codes: 113\n          retriable_status_codes: 32\n          retriable_status_codes: 123\n          retriable_status_codes: 40\n          retriable_status_codes: 36\n          retriable_status_codes: 32\n          retriable_status_codes: 42\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 97\n          retriable_status_codes: 116\n          retriable_status_codes: 101\n          retriable_status_codes: 113\n          retriable_status_codes: 32\n          retriable_status_codes: 123\n          retriable_status_codes: 40\n          retriable_status_codes: 36\n          retriable_status_codes: 32\n          retriable_status_codes: 32\n          retriable_status_codes: 99\n          retriable_status_codes: 108\n          retriable_status_codes: 117\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 101\n          retriable_status_codes: 114\n          retriable_status_codes: 123\n          retriable_status_codes: 115\n        }\n      }\n    }\n  }\n}\nstats_sinks {\n  typed_config {\n    [type.googleapis.com/envoy.api.v2.route.Route] {\n      route {\n        retry_policy {\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 97\n          retriable_status_codes: 116\n          retriable_status_codes: 105\n          retriable_status_codes: 99\n          retriable_status_codes: 95\n          retriable_status_codes: 114\n          retriable_status_codes: 101\n          retriable_status_codes: 115\n          retriable_status_codes: 111\n          retriable_status_codes: 117\n          retriable_status_codes: 114\n          retriable_status_codes: 99\n          retriable_status_codes: 65\n          retriable_status_codes: 101\n          retriable_status_codes: 32\n          retriable_status_codes: 99\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 97\n          retriable_status_codes: 32\n          retriable_status_codes: 32\n          retriable_status_codes: 99\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 97\n        }\n      }\n    }\n  }\n}\nstats_sinks {\n  typed_config {\n    [type.googleapis.com/envoy.api.v2.route.Route] {\n      route {\n        retry_policy {\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 97\n          retriable_status_codes: 116\n          retriable_status_codes: 101\n          retriable_status_codes: 113\n          retriable_status_codes: 32\n          retriable_status_codes: 123\n          retriable_status_codes: 40\n          retriable_status_codes: 36\n          retriable_status_codes: 32\n          retriable_status_codes: 42\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 97\n          retriable_status_codes: 116\n          retriable_status_codes: 101\n          retriable_status_codes: 113\n          retriable_status_codes: 32\n          retriable_status_codes: 123\n          retriable_status_codes: 40\n          retriable_status_codes: 36\n          retriable_status_codes: 32\n          retriable_status_codes: 32\n          retriable_status_codes: 99\n          retriable_status_codes: 108\n          retriable_status_codes: 117\n          retriable_status_codes: 115\n          retriable_status_codes: 116\n          retriable_status_codes: 101\n          retriable_status_codes: 114\n          retriable_status_codes: 123\n          retriable_status_codes: 115\n        }\n      }\n    }\n  }\n}"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-4788023076847616",
    "content": "static_resources {\n  clusters {\n    name: \";\"\n    connect_timeout {\n      seconds: 2304\n      nanos: 132\n    }\n    health_checks {\n      timeout {\n        nanos: 262144\n      }\n      interval {\n        seconds: 2559\n        nanos: 67154560\n      }\n      unhealthy_threshold {\n        value: 122\n      }\n      healthy_threshold {\n        value: 1728053248\n      }\n      alt_port {\n        value: 4\n      }\n      http_health_check {\n        path: \"0.0.0.0\"\n        receive {\n          text: \"@B\\017\\000\\000\\000\\000\\000\"\n        }\n        request_headers_to_add {\n          header {\n            key: \";\"\n            value: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n          }\n          append {\n            value: true\n          }\n        }\n        request_headers_to_remove: \";x\"\n        codec_client_type: HTTP3\n      }\n      no_traffic_interval {\n        nanos: 917760\n      }\n      unhealthy_edge_interval {\n        seconds: 2559\n        nanos: 16384\n      }\n      event_log_path: \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\"\n      interval_jitter_percent: 524288\n      tls_options {\n        alpn_protocols: \"/\"\n      }\n    }\n    http2_protocol_options {\n    }\n    upstream_bind_config {\n      source_address {\n        address: \"0.0.0.0\"\n        port_value: 0\n      }\n      freebind {\n        value: true\n      }\n    }\n    common_http_protocol_options {\n      idle_timeout {\n        seconds: 2304\n        nanos: 132\n      }\n    }\n    load_assignment {\n      cluster_name: \"domains\"\n      endpoints {\n        locality {\n          zone: \"6\"\n        }\n        load_balancing_weight {\n          value: 122\n        }\n        priority: 122\n      }\n      endpoints {\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                protocol: UDP\n                address: \"0.0.0.0\"\n                port_value: 122\n              }\n            }\n          }\n          health_status: TIMEOUT\n          load_balancing_weight {\n            value: 8960\n          }\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n          zone: \"\\n\\000\\000\\000\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                address: \"0.0.0.0\"\n                port_value: 0\n              }\n            }\n          }\n          health_status: TIMEOUT\n        }\n        load_balancing_weight {\n          value: 122\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n          sub_zone: \"|\"\n        }\n        priority: 122\n        proximity {\n          value: 664184\n        }\n      }\n      endpoints {\n        locality {\n          zone: \"77777777\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                address: \"0.0.0.1\"\n                port_value: 0\n                ipv4_compat: true\n              }\n            }\n          }\n          health_status: TIMEOUT\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                address: \"0.0.0.0\"\n                port_value: 0\n              }\n            }\n          }\n          health_status: TIMEOUT\n        }\n        load_balancing_weight {\n          value: 1728053248\n        }\n        priority: 106\n      }\n      policy {\n        endpoint_stale_after {\n          nanos: 262144\n        }\n      }\n    }\n    dns_failure_refresh_rate {\n      base_interval {\n        seconds: 8\n        nanos: 812933685\n      }\n    }\n    upstream_http_protocol_options {\n    }\n  }\n}\ncluster_manager {\n  load_stats_config {\n    transport_api_version: V3\n  }\n}\nstats_sinks {\n  name: \"type.googleapis.com/envoy.api.v2.route.Route\"\n  typed_config {\n    type_url: \"IIIIIIIIIIIIIIII\"\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-5067970991095808",
    "content": "static_resources {\n  clusters {\n    name: \"6\"\n    connect_timeout {\n      seconds: 2321\n    }\n    lb_policy: MAGLEV\n    hosts {\n      pipe {\n        path: \"=\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"=\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"t\"\n      }\n    }\n    max_requests_per_connection {\n      value: 67108864\n    }\n    dns_lookup_family: V4_ONLY\n    outlier_detection {\n      success_rate_minimum_hosts {\n        value: 4096\n      }\n    }\n    common_lb_config {\n      locality_weighted_lb_config {\n      }\n    }\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J :2222222222222222222222222\\022\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*static\\'_resourc\\022es {(\\n  cluster`s\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J :2222222222222222222222221\\022\"\n  }\n}\nstats_flush_interval {\n  nanos: 2883584\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-5664687524413440",
    "content": "node {\n  id: \",\"\n  cluster: \"0\"\n  locality {\n    zone: \"0\"\n    sub_zone: \"0\"\n  }\n}\nstatic_resources {\n  listeners {\n    address {\n      pipe {\n        path: \"name\"\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n        source_prefix_ranges {\n          address_prefix: \"\\177\"\n        }\n        transport_protocol: \"\\177\"\n      }\n    }\n    tcp_fast_open_queue_length {\n      value: 99\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"name\"\n      }\n    }\n    filter_chains {\n    }\n    tcp_fast_open_queue_length {\n      value: 99\n    }\n  }\n  listeners {\n    name: \"listener_0\"\n    address {\n      pipe {\n        path: \"name\"\n      }\n    }\n    filter_chains {\n      tls_context {\n        common_tls_context {\n          tls_params {\n            tls_minimum_protocol_version: TLSv1_2\n            tls_maximum_protocol_version: TLSv1_2\n          }\n          tls_certificate_sds_secret_configs {\n            sds_config {\n              api_config_source {\n                api_type: REST\n                grpc_services {\n                  google_grpc {\n                    target_uri: \"name\"\n                    stat_prefix: \":\"\n                  }\n                }\n              }\n            }\n          }\n        }\n        require_client_certificate {\n        }\n        require_sni {\n        }\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n        source_prefix_ranges {\n          address_prefix: \"\\177\"\n        }\n      }\n    }\n    transparent {\n      value: true\n    }\n    listener_filters_timeout {\n      nanos: 1024\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"name\"\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n        source_prefix_ranges {\n          address_prefix: \"\\177\"\n        }\n      }\n      tls_context {\n        common_tls_context {\n          tls_params {\n            tls_minimum_protocol_version: TLSv1_2\n            tls_maximum_protocol_version: TLSv1_2\n          }\n          tls_certificate_sds_secret_configs {\n            name: \"\\177\"\n            sds_config {\n              api_config_source {\n                api_type: REST\n                grpc_services {\n                  google_grpc {\n                    target_uri: \"name\"\n                    stat_prefix: \":\"\n                  }\n                }\n              }\n            }\n          }\n        }\n        require_client_certificate {\n        }\n        require_sni {\n        }\n      }\n    }\n    tcp_fast_open_queue_length {\n      value: 99\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"name\"\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n        source_prefix_ranges {\n          address_prefix: \"\\177\"\n        }\n      }\n      tls_context {\n        common_tls_context {\n          tls_params {\n            tls_minimum_protocol_version: TLSv1_2\n            tls_maximum_protocol_version: TLSv1_2\n          }\n          tls_certificate_sds_secret_configs {\n            name: \"\\177\"\n            sds_config {\n              api_config_source {\n                api_type: REST\n                grpc_services {\n                  google_grpc {\n                    target_uri: \"name\"\n                    stat_prefix: \":\"\n                  }\n                }\n              }\n            }\n          }\n        }\n        require_client_certificate {\n        }\n        require_sni {\n        }\n      }\n    }\n    tcp_fast_open_queue_length {\n      value: 99\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"name\"\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n        source_prefix_ranges {\n          address_prefix: \"\\177\"\n        }\n      }\n    }\n    tcp_fast_open_queue_length {\n      value: 99\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"name\"\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n        source_prefix_ranges {\n          address_prefix: \"\\177\"\n        }\n      }\n    }\n    tcp_fast_open_queue_length {\n      value: 99\n    }\n  }\n}\nflags_path: \" \"\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-5697041979146240",
    "content": "static_resources {\n  clusters {\n    name: \"ineasrh_stsB\"\n    eds_cluster_config {\n      service_name: \"\\177\"\n    }\n    connect_timeout {\n      nanos: 249999905\n    }\n    dns_refresh_rate {\n      nanos: 249999905\n    }\n    dns_lookup_family: V4_ONLY\n    load_assignment {\n      cluster_name: \"GGG\"\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        lb_endpoints {\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 11264\n      }\n      endpoints {\n        priority: 246\n      }\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        priority: 2\n      }\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        priority: 2105354\n      }\n      endpoints {\n        lb_endpoints {\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 11264\n      }\n      endpoints {\n        priority: 671091188\n      }\n      endpoints {\n        priority: 2105354\n      }\n      endpoints {\n        priority: 11264\n      }\n      endpoints {\n        lb_endpoints {\n        }\n        load_balancing_weight {\n          value: 2\n        }\n        priority: 11264\n      }\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        priority: 11264\n      }\n      endpoints {\n        priority: 2105354\n      }\n      endpoints {\n        priority: 671091190\n      }\n      endpoints {\n        priority: 671151625\n      }\n      endpoints {\n        priority: 671151625\n      }\n      endpoints {\n        lb_endpoints {\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 11264\n      }\n      endpoints {\n        lb_endpoints {\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 11264\n      }\n      endpoints {\n        priority: 671091190\n      }\n      endpoints {\n        priority: 538976256\n      }\n      endpoints {\n        priority: 671091188\n      }\n      endpoints {\n        priority: 671091188\n      }\n      endpoints {\n        priority: 11264\n      }\n      endpoints {\n        lb_endpoints {\n          health_status: DRAINING\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 11264\n      }\n      endpoints {\n        lb_endpoints {\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 11264\n      }\n      endpoints {\n        priority: 11264\n      }\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        lb_endpoints {\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 11264\n      }\n      endpoints {\n        priority: 11264\n      }\n      endpoints {\n        lb_endpoints {\n          load_balancing_weight {\n            value: 2\n          }\n        }\n        priority: 738208768\n      }\n      endpoints {\n        priority: 671151625\n      }\n      endpoints {\n        priority: 671091188\n      }\n      endpoints {\n        priority: 30\n      }\n      endpoints {\n        priority: 671151625\n      }\n      endpoints {\n        lb_endpoints {\n        }\n        load_balancing_weight {\n          value: 64\n        }\n        priority: 2\n      }\n      endpoints {\n        lb_endpoints {\n          health_status: DRAINING\n        }\n        priority: 11264\n      }\n      endpoints {\n        lb_endpoints {\n        }\n        priority: 11264\n      }\n      endpoints {\n        locality {\n          zone: \"\\177\\r\"\n        }\n        priority: 538970624\n      }\n      endpoints {\n        priority: 671091190\n      }\n      endpoints {\n        priority: 244\n      }\n      endpoints {\n        priority: 538970624\n      }\n      endpoints {\n        locality {\n          region: \"~\"\n        }\n        lb_endpoints {\n        }\n        priority: 11264\n      }\n      endpoints {\n        priority: 2\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"/tmp/admin_access.log\"\n  address {\n    pipe {\n      path: \"*\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-5729922022113280",
    "content": "static_resources {\n  clusters {\n    name: \"i?aress_http\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      seconds: 2304\n      nanos: 707406378\n    }\n    tls_context {\n      common_tls_context {\n        alpn_protocols: \"\"\n        alpn_protocols: \"\"\n        combined_validation_context {\n          default_validation_context {\n            trusted_ca {\n              inline_bytes: \"\\302\\000\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\000\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\"\n            }\n            require_signed_certificate_timestamp {\n            }\n          }\n          validation_context_sds_secret_config {\n          }\n        }\n      }\n      sni: \"mane\"\n    }\n    alt_stat_name: \"[\"\n  }\n  clusters {\n    name: \"i?aress_http\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      nanos: 538970747\n    }\n    hosts {\n      pipe {\n        path: \"\\000\\000\\000\\000`\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"2\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"`\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"2\"\n      }\n    }\n    tls_context {\n      common_tls_context {\n        combined_validation_context {\n          default_validation_context {\n            trusted_ca {\n              inline_bytes: \"\\302\\000\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\000\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\"\n            }\n            require_signed_certificate_timestamp {\n            }\n          }\n          validation_context_sds_secret_config {\n            sds_config {\n              api_config_source {\n                refresh_delay {\n                  seconds: 8391050737688276324\n                  nanos: 64\n                }\n              }\n            }\n          }\n        }\n      }\n    }\n    http_protocol_options {\n      allow_absolute_url {\n        value: true\n      }\n    }\n    alt_stat_name: \"[\"\n    upstream_connection_options {\n      tcp_keepalive {\n        keepalive_probes {\n          value: 589824\n        }\n      }\n    }\n  }\n  clusters {\n    name: \"p`ne\"\n    type: STRICT_DNS\n    connect_timeout {\n      nanos: 707406378\n    }\n    hosts {\n      pipe {\n        path: \"2\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"`\"\n      }\n    }\n    tls_context {\n      common_tls_context {\n        alpn_protocols: \"\"\n        alpn_protocols: \"\"\n        combined_validation_context {\n          default_validation_context {\n            trusted_ca {\n              inline_bytes: \"\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\\302\"\n            }\n            require_signed_certificate_timestamp {\n            }\n          }\n          validation_context_sds_secret_config {\n            sds_config {\n              api_config_source {\n                refresh_delay {\n                  seconds: 8391050737688276324\n                  nanos: 64\n                }\n              }\n            }\n          }\n        }\n      }\n      sni: \"mane\"\n    }\n    http_protocol_options {\n      allow_absolute_url {\n        value: true\n      }\n    }\n    alt_stat_name: \"[\"\n  }\n  secrets {\n    validation_context {\n    }\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"2\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"Z\"\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"type.googleapis.com/envoy.api.v2.route.Route\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\nJ1\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*static\\'_resourc\\022es {(\\n  cluster`s\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"type.googleapis.com/envoy.api.v2.route.Route\"\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"type.googleapis.com/envoy.api.v2.route.Route\"\n  }\n}\nstats_sinks {\n  name: \"]\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"type.googleatis.com/google.protobuf.Value\"\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*static_resourc\\001es {(\\n  cluster`s\"\n  }\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J :2222622222222222222222221\\022\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"\\013\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\nJ1\"\n  }\n}\nstats_sinks {\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-5747944989392896",
    "content": "static_resources {\n  clusters {\n    name: \"servi_e_ervile\"\n    connect_timeout {\n      seconds: 2304\n      nanos: 250000000\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"\\000\\000\\020\\000\\000\\000\\000\\000\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"{\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    lb_subset_config {\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors {\n      }\n      subset_selectors {\n      }\n    }\n    common_lb_config {\n      healthy_panic_threshold {\n        value: 9.88131291682493e-324\n      }\n      locality_weighted_lb_config {}\n    }\n    drain_connections_on_host_removal: true\n  }\n  clusters {\n    name: \"{\"\n    connect_timeout {\n      seconds: 2304\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"A\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"1\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"A\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    dns_lookup_family: V6_ONLY\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 589951\n      }\n    }\n    lb_subset_config {\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors {\n      }\n    }\n    common_lb_config {\n      locality_weighted_lb_config {}\n    }\n    upstream_connection_options {\n      tcp_keepalive {\n        keepalive_probes {\n          value: 589951\n        }\n      }\n    }\n    drain_connections_on_host_removal: true\n  }\n  clusters {\n    name: \"|\"\n    connect_timeout {\n      seconds: 2304\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"j\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"1\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"6\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"A\"\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 4294443006\n      }\n    }\n    lb_subset_config {\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors {\n      }\n      locality_weight_aware: true\n    }\n    common_lb_config {\n      locality_weighted_lb_config {}\n    }\n    upstream_connection_options {\n      tcp_keepalive {\n        keepalive_probes {\n          value: 589951\n        }\n      }\n    }\n    drain_connections_on_host_removal: true\n  }\n  clusters {\n    name: \"z\"\n    connect_timeout {\n      seconds: 2304\n    }\n    lb_policy: RANDOM\n    hosts {\n      pipe {\n        path: \"\\001\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"@\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"{\"\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    upstream_connection_options {\n    }\n  }\n  clusters {\n    name: \"8\"\n    connect_timeout {\n      seconds: 2304\n      nanos: 250000000\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"{\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"\\001\"\n      }\n    }\n    circuit_breakers {\n      thresholds {\n        max_requests {\n          value: 4294443006\n        }\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 589951\n      }\n    }\n    lb_subset_config {\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors {\n      }\n      locality_weight_aware: true\n      scale_locality_weight: true\n    }\n    common_lb_config {\n      locality_weighted_lb_config {}\n    }\n    alt_stat_name: \"B\"\n    upstream_connection_options {\n    }\n    drain_connections_on_host_removal: true\n  }\n  clusters {\n    name: \"\\001\"\n    connect_timeout {\n      seconds: 2304\n      nanos: 250000111\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"\\001\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"\\001\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"@\"\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    lb_subset_config {\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors {\n      }\n      subset_selectors {\n      }\n      locality_weight_aware: true\n    }\n    common_lb_config {\n      locality_weighted_lb_config {}\n    }\n    upstream_connection_options {\n    }\n  }\n}\nadmin {\n  access_log_path: \"/tmpoadmin_access.log\"\n  address {\n    pipe {\n      path: \"name\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-6287096397430784",
    "content": "static_resources {\n  clusters {\n    name: \" \"\n    connect_timeout {\n      seconds: 2304\n    }\n    per_connection_buffer_limit_bytes {\n      value: 209\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \" \"\n      }\n    }\n    hosts {\n      pipe {\n        path: \";\"\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 268435456\n      }\n    }\n  }\n  clusters {\n    name: \"@\"\n    connect_timeout {\n      seconds: 2304\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"@\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"X\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"@\"\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 589951\n      }\n    }\n  }\n  clusters {\n    name: \"#\"\n    connect_timeout {\n      seconds: 2304\n      nanos: 235995425\n    }\n    lb_policy: MAGLEV\n    dns_lookup_family: V4_ONLY\n    cleanup_interval {\n      nanos: 235995425\n    }\n    upstream_connection_options {\n      tcp_keepalive {\n        keepalive_probes {\n          value: 589824\n        }\n      }\n    }\n  }\n  clusters {\n    name: \"X\"\n    connect_timeout {\n      seconds: 2304\n    }\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 589951\n      }\n    }\n    lb_subset_config {\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors {\n      }\n      locality_weight_aware: true\n    }\n    ring_hash_lb_config {\n    }\n  }\n  clusters {\n    name: \"0\"\n    connect_timeout {\n      seconds: 2304\n    }\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 589951\n      }\n    }\n    lb_subset_config {\n      fallback_policy: ANY_ENDPOINT\n      subset_selectors {\n      }\n      locality_weight_aware: true\n    }\n  }\n  clusters {\n    name: \"`\"\n    connect_timeout {\n      seconds: 2304\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \";\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \";\"\n      }\n    }\n    dns_lookup_family: V4_ONLY\n    outlier_detection {\n      success_rate_stdev_factor {\n        value: 589951\n      }\n    }\n    lb_subset_config {\n      default_subset {\n        fields {\n          key: \"\"\n          value {\n            bool_value: true\n          }\n        }\n      }\n    }\n    upstream_connection_options {\n      tcp_keepalive {\n        keepalive_probes {\n          value: 589824\n        }\n      }\n    }\n    close_connections_on_host_health_failure: true\n    drain_connections_on_host_removal: true\n  }\n  clusters {\n    name: \"z\"\n    connect_timeout {\n      seconds: 2304\n    }\n    hosts {\n      pipe {\n        path: \"*\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"5\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"@\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"z\"\n      }\n    }\n    upstream_connection_options {\n      tcp_keepalive {\n        keepalive_probes {\n          value: 589824\n        }\n      }\n    }\n    load_assignment {\n      cluster_name: \" \"\n      endpoints {\n        locality {\n          region: \" \"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \"\\n\\000\\000\\000\"\n              }\n            }\n            health_check_config {\n              port_value: 10878976\n            }\n          }\n          health_status: TIMEOUT\n        }\n      }\n      endpoints {\n        lb_endpoints {\n          endpoint {\n            health_check_config {\n              port_value: 41216\n            }\n          }\n          health_status: TIMEOUT\n        }\n        priority: 41216\n      }\n      endpoints {\n        locality {\n          region: \"\\027\"\n        }\n        lb_endpoints {\n        }\n      }\n      endpoints {\n        priority: 41216\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"/tmp/admin_access.lss\"\n  address {\n    pipe {\n      path: \"*\"\n    }\n  }\n}\n\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5118008002871296",
    "content": "static_resources {   listeners {     name: \"          \"     address {       pipe {         path: \"\\000\"       }     }     transparent {     }   } } stats_sinks {   typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*stateq {($ *stateq {($  cluster{s\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*static_resourcAe csta  csta\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*stateq {($ *stateq {($  cluster{s\"\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5118008002871297",
    "content": "static_resources {   listeners {     name: \"          \"     address {       pipe {         path: \"aa\\000\"       }     }     transparent {     }   } } stats_sinks {   typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*stateq {($ *stateq {($  cluster{s\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*static_resourcAe csta  csta\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J.:*stateq {($ *stateq {($  cluster{s\"\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912",
    "content": "static_resources {\n  clusters {\n    name: \"www.google.com\"\n    connect_timeout {\n      nanos: 61\n    }\n    http2_protocol_options {\n      initial_stream_window_size {\n        value: 917504\n      }\n      initial_connection_window_size {\n        value: 1952382976\n      }\n      allow_connect: true\n      max_outbound_control_frames {\n        value: 1952382976\n      }\n      stream_error_on_invalid_http_messaging: true\n      custom_settings_parameters {\n        identifier {\n          value: 65536\n        }\n        value {\n          value: 7536640\n        }\n      }\n      custom_settings_parameters {\n        identifier {\n          value: 65536\n        }\n        value {\n          value: 7536640\n        }\n      }\n    }\n    alt_stat_name: \";\"\n    load_assignment {\n      cluster_name: \"domains\"\n      policy {\n        hidden_envoy_deprecated_disable_overprovisioning: true\n      }\n    }\n    lrs_server {\n      path: \":\"\n    }\n  }\n}\ndynamic_resources {\n}\nstats_sinks {\n  hidden_envoy_deprecated_config {\n    fields {\n      key: \"fffffffffffffffffffffffffff\"\n      value {\n      }\n    }\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"J\\004\\022\\002\\010\\001J\\005\\n\\003\\022\\0019J\\004\\022\\002\\010\\001b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000b\\000\"\n  }\n}\nadmin {\n}\nenable_dispatcher_stats: true\nheader_prefix: \"*\"\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5666128418832384",
    "content": "node {   id: \" \"   cluster: \"              \"   build_version: \" \" } static_resources {   clusters {     name: \"              \"     type: STRICT_DNS     connect_timeout {       nanos:  82893184     }     per_connection_buffer_limit_bytes {       value: 268435456     }     lb_policy: RING_HASH     hosts {       pipe {         path: \" \"       }     }     hosts {       pipe {         path: \" \"       }     }     tls_context {       common_tls_context {       }     }   }   clusters {     name: \" \"     connect_timeout {       seconds: 2304     }     lb_policy: RING_HASH     hosts {       pipe {         path: \" \"       }     }     hosts {       pipe {         path: \"             \"       }     }     hosts {       pipe {         path: \" \"       }     }     hosts {       pipe {         path: \" \"       }     }     hosts {       pipe {         path: \" \"       }     }     hosts {       pipe {         path: \" \"       }     }     hosts {       pipe {         path: \" \"       }     }     hosts {       pipe {         path: \" \"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"2\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"s\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"4\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"5\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"1\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"0\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"]\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"8\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"{\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"*\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"4\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \" \"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"{\"\n      }\n    }\n    health_checks {\n      timeout {\n        seconds: 4294901763\n        nanos: 25\n      }\n      interval {\n        nanos: 25\n      }\n      unhealthy_threshold {\n        value: 268435456\n      }\n      healthy_threshold {\n        value: 655360\n      }\n      http_health_check {\n        path: \":\"\n        service_name: \"0\"\n        use_http2: true\n      }\n      event_log_path: \"c\"\n    }\n    tls_context {\n      common_tls_context {\n        validation_context {\n        }\n        tls_certificate_sds_secret_configs {\n          sds_config {\n            path: \"/\"\n          }\n        }\n      }\n    }\n    alt_stat_name: \"o\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"tsy.googtati\"\n  config {\n    fields {\n      key: \"\\177\"\n      value {\n      }\n    }\n  }\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/googlalue\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n  config {\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"[\"\n}\nstats_sinks {\n  typed_config {\n    type_url: \"[\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"toogti\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"q\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"!\"\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"2\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"2\"\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"2\"\n}\nstats_sinks {\n  name: \"6\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5674078337236992",
    "content": "static_resources {\n  clusters {\n    name: \"p\"\n    connect_timeout {\n      nanos: 786870912\n    }\n    hosts {\n      pipe {\n        path: \"`\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"`\"\n      }\n    }\n    tls_context {\n      common_tls_context {\n        tls_params {\n          ecdh_curves: \"P\"\n          ecdh_curves: \"P\"\n        }\n        tls_certificates {\n          private_key {\n            filename: \"\\001\\000\\000\\t\"\n          }\n          password {\n            inline_string: \"#\\000\\000\\000\\000\\000\\000\\000\"\n          }\n          ocsp_staple {\n            filename: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n          }\n        }\n        tls_certificates {\n          private_key {\n            filename: \"\\001\\000\\000\\t\"\n          }\n          password {\n            inline_string: \"#\\000\\000\\000\\000\\000\\000\\000\"\n          }\n          ocsp_staple {\n            filename: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n          }\n        }\n        tls_certificates {\n          private_key {\n            filename: \"\\001static_resources {\\n  clusters {\\n    name: \\\"z:\\\"\\n    type: STRICT_DNS\\n    connect_timeout {\\n      seconds: -210453399808\\n      nanos: 250002048\\n    }\\n    http2_protocol_options {\\n      allow_connect: true\\n    }\\n    dns_lookup_family: V4_ONLY\\n    common_lb_config {\\n      ignore_new_hosts_until_first_hc: true\\n    }\\n    alt_stat_name: \\\"\\\\020\\\"\\n    close_connections_on_host_health_failure: true\\n    filters {\\n      name: \\\"%\\\"\\n    }\\n  }\\n  clusters {\\n    name: \\\"service_google\\\"\\n    connect_timeout {\\n      nanos\\000: 786870912\\n    }\\n    per_connection_buffer_limit_bytes {\\n      value: 4143972352\\n    }\\n    lb_policy: RING_HASH\\n    hosts {\\n      pipe {\\n        path: \\\"`\\\"\\n      }\\n    }\\n    hosts {\\n      pipe {\\n        path: \\\"#\\\"\\n      }\\n    }\\n    hosts {\\n      pipe {\\n        path: \\\"`\\\"\\n      }\\n    }\\n    tls_context {\\n      common_tls_conte\\000xt {\\n        tls_params {\\n          ecdh_curves: \\\"P\\\"\\n        }\\n        tls_certificates {\\n          private_key {\\n            filename: \\\"\\\\001\\\\0\\t00\\\\000\\\\t\\\"\\n          }\\n          password {\\n            inline_string: \\\"#\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\"\\n          }\\n          ocsp_staple {\\n            filename: \\\"\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\\000\\\"\\n          }\\n        }\\n        tls_certificates\"\n          }\n          ocsp_staple {\n            filename: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n          }\n        }\n        combined_validation_context {\n          default_validation_context {\n            verify_subject_alt_name: \"\\001\\000\\000\\000\\000\\000\\000\\006\"\n            require_ocsp_staple {\n              value: true\n            }\n            require_signed_certificate_timestamp {\n            }\n          }\n          validation_context_sds_secret_config {\n          }\n        }\n      }\n    }\n    close_connections_on_host_health_failure: true\n  }\n  secrets {\n    validation_context {\n      trusted_ca {\n        inline_bytes: \"\\316\"\n      }\n    }\n  }\n}\ncluster_manager {\n  local_cluster_name: \",\"\n  upstream_bind_config {\n    source_address {\n      address: \"\\032\"\n      port_value: 8192\n      ipv4_compat: true\n    }\n  }\n  load_stats_config {\n    refresh_delay {\n      seconds: 4611686018427387903\n    }\n    set_node_on_first_message_only: true\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"\\001\\000\\000\\t\"\n    value: \"\\230*Vs{{{{{{{{{{{{{{{{{{{{{{{{{{||{|||{{{{{{{{{{{{{{{{{{{{{{{{{||||||||||||||||||||||||||st|||||||||||||||||||||stt\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\"\n  typed_config {\n    type_url: \"\\001\\000\\000\\t\"\n  }\n}\nwatchdog {\n  miss_timeout {\n    seconds: 2147483707\n  }\n  kill_timeout {\n    seconds: 2046820352\n  }\n}\nhds_config {\n}\nlayered_runtime {\n}\nheader_prefix: \":\"\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5702999713513472",
    "content": "static_resources {\n  listeners {\n    address {\n      pipe {\n        path: \"@\"\n      }\n    }\n  }\n  clusters {\n    name: \"&\"\n    connect_timeout {\n      seconds: 2304\n    }\n    lb_policy: RING_HASH\n    alt_stat_name: \"t\"\n    close_connections_on_host_health_failure: true\n    track_timeout_budgets: true\n  }\n  clusters {\n    name: \"servi|e_gogole\"\n    connect_timeout {\n      seconds: 3\n    }\n    lb_policy: RING_HASH\n    load_assignment {\n      cluster_name: \"domains\"\n      endpoints {\n        priority: 93\n      }\n      endpoints {\n        locality {\n          zone: \"t\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \";\"\n              }\n            }\n          }\n          health_status: TIMEOUT\n          load_balancing_weight {\n            value: 4294967295\n          }\n        }\n        load_balancing_weight {\n          value: 122\n        }\n      }\n      endpoints {\n        locality {\n          region: \"domains\"\n          sub_zone: \"|\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \";\"\n              }\n            }\n          }\n          health_status: TIMEOUT\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n          zone: \"\\t\"\n        }\n        load_balancing_weight {\n          value: 122\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n          sub_zone: \"|\"\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n          zone: \"\\t\"\n        }\n        load_balancing_weight {\n          value: 285212672\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n        }\n        load_balancing_weight {\n          value: 122\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n          zone: \"\\t\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                address: \"0.0.0.0\"\n                port_value: 47\n              }\n            }\n          }\n          health_status: TIMEOUT\n        }\n        load_balancing_weight {\n          value: 122\n        }\n      }\n      endpoints {\n        locality {\n          zone: \"\\t\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \"@\"\n              }\n            }\n          }\n          health_status: UNHEALTHY\n        }\n        load_balancing_weight {\n          value: 122\n        }\n        priority: 122\n      }\n      endpoints {\n        load_balancing_weight {\n          value: 122\n        }\n        priority: 122\n      }\n      policy {\n      }\n    }\n  }\n}\nflags_path: \"VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV(VVVVVVVVVVVVVVV\"\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/e\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"B\\004\\n\\000\\022\\000R\\000\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  hidden_envoy_deprecated_config {\n    fields {\n      key: \"\"\n      value {\n        null_value: NULL_VALUE\n      }\n    }\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n}\nstats_sinks {\n  typed_config {\n    type_url: \"\\000\\000\\000\\000\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/e\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"ffff]f$fffff\"\n}\nstats_sinks {\n  name: \"\\000\\035eut_c\"\n}\nstats_sinks {\n  name: \"]\"\n}\nadmin {\n  profile_path: \"\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\\'\"\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5754548048625664",
    "content": "node {   id: \"    \"   cluster: \"             \" } dynamic_resources {   ads_config {     api_type: GRPC     grpc_services {       google_grpc {         target_uri: \" \"         stat_prefix: \"                                        \"       }     }   } } admin {   access_log_path: \"                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                 004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\007\\004\\004\\004\\004\\004\\004\\004\\004        }\\n\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004      \\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004\\004}\"\n  profile_path: \"p\"\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5762646786179072",
    "content": "node {   id: \"    \"   cluster: \"             \" } static_resources {   listeners {     address {       pipe {         path: \" \"       }     }     filter_chains {       tls_context {         common_tls_context {           tls_params {             tls_minimum_protocol_version: TLSv1_2             tls_maximum_protocol_version: TLSv1_3             cipher_suites: \"    \"             ecdh_curves: \"                                                                                                              \"           }           tls_certificate_sds_secret_configs {             sds_config {               api_config_source {                 cluster_names: \"\"                 cluster_names: \"\"                 cluster_names: \"\"                 cluster_names: \"\"                 cluster_names: \"\"                 cluster_names: \"\"                 cluster_names: \"\"                 cluster_names: \"\"                 cluster_names: \"\"               }             }           }           validation_context_sds_secret_config {             sds_config {               ads {               }             }           }         }         require_client_certificate {           value: true         }       }     }     metadata {       filter_metadata {         key: \"\"         value {\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\000\\000\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\000\\000\\014\"\n        value {\n          fields {\n            key: \"\"\n            value {\n            }\n          }\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\000\\010\\010\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\000\\024\\024\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\004\\004\\004\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\004\\027\\000\\010\\010\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\005\\010\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\000\\020\\001\\010\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\004\\000\\000\\004\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\024\\020n\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \" \"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\\"\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"4\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"B\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"LN~NNNN\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"M\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"M\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NH~=LZ\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NL/EN/X\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NL/LNNX\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NNMn!mN\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NNNHH^\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NNNHNNN\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NNNH`NN\"\n        value {\n          fields {\n            key: \"[\"\n            value {\n            }\n          }\n        }\n      }\n      filter_metadata {\n        key: \"NNNLNNN\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NNNN~L^\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"NNN~=LZ\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"envoy.hnnection_managemananer\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"f&fff$f2fff\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"f&fffnameff\"\n      }\n      filter_metadata {\n        key: \"ffQff$fffff\"\n        value {\n          fields {\n            key: \"\\000\\000\\000\\005\"\n            value {\n            }\n          }\n        }\n      }\n      filter_metadata {\n        key: \"ffff$ffffff\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"fffff$fff$f\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"ffffffQf@ff\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"ffffffffff&\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"ffffffffnf&\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"nAm!\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"p\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"t=L^NNN\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"u\"\n        value {\n          fields {\n            key: \"\\000\\000\\000\\005\"\n            value {\n            }\n          }\n        }\n      }\n      filter_metadata {\n        key: \"}fffv$fffff\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\312\\277  \"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\314\\200`282366920938463463374607431768211455stati\\020alt\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\314\\200`282366929384634633744607431768211455stati\\000alt\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\314\\200`282366929384634669293846432337611455stati\\000alt\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\314\\200\\340282366920938463463374607431768211455static_resources {\\n  clusters {\\n    he\\000alt\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\343\\203\\262\\\"88GIHT98\\361\\241\\254\\243\\341\\277\\236`28236692093:463463374607431768211203\\363\\213\\207\\222\\361\\221\\226\\240\\005\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"\\343\\203\\262\\\"88GIHT98\\361\\241\\254\\243\\341\\277\\236`28:366920938463463374607431712163208\\363\\213\\207\\222\\321\\221\\314\\240\\005\"\n        value {\n        }\n      }\n    }\n    tcp_fast_open_queue_length {\n    }\n  }\n}\ndynamic_resources {\n  ads_config {\n    api_type: GRPC\n    grpc_services {\n      google_grpc {\n        target_uri: \"*\"\n        stat_prefix: \"                                        \"\n      }\n    }\n  }\n}\nstats_sinks {\n  name: \"!\"\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/google.prlue\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/google.prlue\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"\\005\"\n}\nstats_sinks {\n  name: \"\\005\"\n  typed_config {\n    type_url: \"type.googleapis.com/google.protobuf.Value\"\n    value: \"2\\366\\337\\303\\377\\007!\\253\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/google.protobuf.Value\"\n    value: \"2\\366\\337\\303\\377\\007!\\253\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"d\"\n  }\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/google.protobuf.ListValue\"\n    value: \"2\\366\\337\\303\\377\\007!\\253\"\n  }\n}\nstats_sinks {\n  name: \"tsy.googtati\"\n  typed_config {\n    type_url: \"type.googleapiobuf.Value\"\n  }\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n  name: \"\\005\"\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/google.protobuf.Value\"\n    value: \"2\\366\\337\\303\\377\\007!\\253\"\n  }\n}\nstats_sinks {\n  name: \",\"\n}\nstats_sinks {\n  name: \",\"\n  typed_config {\n    type_url: \"type.googleapis.com/google.protobuf.Value\"\n    value: \"2\\366\\337\\303\\377\\007!\\253\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"\\005\"\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/google.prlue\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"\\005\"\n}\nstats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/google.protobuf.ListValue\"\n  }\n}\nhds_config {\n  cluster_names: \"\"\n  cluster_names: \"\"\n  cluster_names: \"\"\n  cluster_names: \"\"\n  cluster_names: \"\"\n  cluster_names: \"\"\n  cluster_names: \"\"\n  cluster_names: \"\"\n  cluster_names: \"\"\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5083428128030720",
    "content": "static_resources {\n  clusters {\n    name: \"^\"\n    connect_timeout {\n      nanos: 6\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \"i\"\n      }\n    }\n    ring_hash_lb_config {\n      minimum_ring_size {\n        value: 4395513236313604096\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"i\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5084029869883392",
    "content": "static_resources {\n  listeners {\n    address {\n      envoy_internal_address {\n        server_listener_name: \"ipv6\"\n      }\n    }\n  }\n}"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5632902623657984",
    "content": "static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 6     }     hosts {       pipe {         path: \" \"       }     }     health_checks {       timeout {         nanos: 6       }       interval {         nanos: 6       }       unhealthy_threshold {       }       healthy_threshold {       }       tcp_health_check {       }     }   } } cluster_manager {   upstream_bind_config {     source_address {       address: \"0.0.0.0\"       port_value: 0     }     freebind {       value: true     }   } } admin {   address {     pipe {       path: \" \"     }   } } \n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5633109961998336",
    "content": "static_resources {\n  clusters {\n    name: \"B\"\n    connect_timeout {\n      nanos: 4\n    }\n    hosts {\n      pipe {\n        path: \"l\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"\\334\\254\"\n      }\n    }\n    health_checks {\n      timeout {\n        seconds: 512\n      }\n      interval {\n        nanos: 91\n      }\n      unhealthy_threshold {\n      }\n      healthy_threshold {\n      }\n      http_health_check {\n        host: \"#\"\n        path: \"=\"\n        codec_client_type: HTTP3\n      }\n      interval_jitter_percent: 1\n      always_log_health_check_failures: true\n    }\n  }\n}\ncluster_manager {\n  load_stats_config {\n    rate_limit_settings {\n      fill_rate {\n        value: 2.36453327863576e-312\n      }\n    }\n  }\n}\nflags_path: \"\\000\\000\\000\\000\\000\\010NK\"\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5665272556158976",
    "content": "cluster_manager {   load_stats_config {     grpc_services {       google_grpc {         target_uri: \" \"         call_credentials {           sts_service {           }         }       }     }   } } "
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5665941383282688",
    "content": "static_resources {   clusters {     name: \" \"     type: STRICT_DNS     connect_timeout {       nanos: 4     }     dns_refresh_rate {       nanos: 4     }   } } "
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5686444035670016",
    "content": "static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 4     }     hosts {\n      pipe {       }     }     health_checks {       timeout {         nanos: 4       }       interval {         nanos: 4       }       unhealthy_threshold {       }       healthy_threshold {       }       tcp_health_check {       }       no_traffic_interval {         seconds: 2818048       }       interval_jitter_percent: 537791091     }   } } "
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5690948441341952",
    "content": "static_resources {   clusters {     name: \" \"     type: STRICT_DNS     connect_timeout { nanos:   8 }     outlier_detection {       interval {         nanos:   800000000       }       success_rate_minimum_hosts {       }     }   } }\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5705296232579072",
    "content": "static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 6     }     load_assignment {       cluster_name: \" \"       endpoints {         priority: 4       }     }   } } "
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032",
    "content": "cluster_manager {\n  load_stats_config {\n    api_type: GRPC\n    grpc_services {\n      google_grpc {\n        target_uri: \"18446744073709551617\"\n        stat_prefix: \"2147483649\"\n        channel_args {\n          args {\n            key: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n            value {\n            }\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5724853840117760",
    "content": "static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 9     }     hosts {       pipe {       }     }     health_checks {       timeout {         nanos: 9       }       interval {         nanos: 9       }       unhealthy_threshold {       }       healthy_threshold {       }       grpc_health_check {       }       no_traffic_interval {       }       interval_jitter_percent: 4     }     http2_protocol_options {     }   } }\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5730612661452800",
    "content": "runtime {\n  symlink_root: \"/\"\n  subdirectory: \"tmp\"\n  override_subdirectory: \"out\"\n}\nadmin {\n  access_log_path: \"/\"\n  address {\n    pipe {\n      path: \"WW\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5733243234811904",
    "content": "node {\n}\nstatic_resources {\n  clusters {\n    name: \";\"\n    connect_timeout {\n      seconds: 8\n      nanos: 132\n    }\n    per_connection_buffer_limit_bytes {\n      value: 1728053248\n    }\n    health_checks {\n      timeout {\n        seconds: 8\n        nanos: 25\n      }\n      interval {\n        seconds: 2559\n        nanos: 67154560\n      }\n      unhealthy_threshold {\n        value: 1728053248\n      }\n      healthy_threshold {\n        value: 1728053248\n      }\n      alt_port {\n        value: 4\n      }\n      http_health_check {\n        path: \":\"\n        receive {\n          text: \"@B\\017\\000\\000\\000\\000\\000\"\n        }\n        request_headers_to_add {\n          header {\n            key: \"\\361\\211\\211\\211\\t\\341\\211\\211\\tt\"\n            value: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\337\\205\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n          }\n        }\n        request_headers_to_remove: \";\"\n      }\n      unhealthy_interval {\n        seconds: 8\n        nanos: 132\n      }\n      unhealthy_edge_interval {\n        seconds: 2299\n        nanos: 16384\n      }\n      event_log_path: \"%\"\n      always_log_health_check_failures: true\n    }\n    max_requests_per_connection {\n      value: 842473504\n    }\n    http2_protocol_options {\n      max_inbound_priority_frames_per_stream {\n        value: 1701737468\n      }\n    }\n    alt_stat_name: \"=\"\n    common_http_protocol_options {\n      max_headers_count {\n        value: 842473504\n      }\n    }\n    load_assignment {\n      cluster_name: \"domains\"\n      endpoints {\n        locality {\n          zone: \"6\"\n        }\n        load_balancing_weight {\n          value: 842473504\n        }\n        priority: 122\n      }\n      endpoints {\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                address: \"0.0.0.0\"\n                port_value: 0\n              }\n            }\n          }\n          health_status: TIMEOUT\n        }\n        priority: 122\n        proximity {\n          value: 28732523\n        }\n      }\n      endpoints {\n        locality {\n          sub_zone: \"|\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \"$node {\\n}\\ns\"\n              }\n            }\n          }\n          health_status: UNHEALTHY\n        }\n        load_balancing_weight {\n          value: 122\n        }\n        priority: 122\n      }\n      endpoints {\n        locality {\n          sub_zone: \"|\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                address: \"0.0.0.0\"\n                port_value: 0\n              }\n            }\n          }\n          health_status: DEGRADED\n        }\n        priority: 122\n      }\n      endpoints {\n        lb_endpoints {\n          health_status: HEALTHY\n          endpoint_name: \"\\021\\000\\000\\000\\000\\000\\000\\000\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              envoy_internal_address {\n                server_listener_name: \"\\001\\000\\000\\001\"\n              }\n            }\n          }\n          health_status: TIMEOUT\n        }\n        priority: 106\n      }\n      policy {\n        drop_overloads {\n          category: \"U\"\n        }\n      }\n    }\n    dns_failure_refresh_rate {\n      base_interval {\n        seconds: 8\n        nanos: 132\n      }\n    }\n    track_cluster_stats {\n    }\n  }\n}\nstats_sinks {\n  name: \"type.googleapis.com/envoy.api.v2.route.Route\"\n  typed_config {\n    type_url: \"IIIIIIIIIIIIIIII\"\n  }\n}\nstats_sinks {\n  name: \"=\"\n  typed_config {\n    value: \"\\000\\037\"\n  }\n}\nstats_sinks {\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"[\"\n}\nstats_sinks {\n}\nstats_sinks {\n  name: \"z\"\n}\nstats_sinks {\n  name: \"z\"\n}\nstats_sinks {\n  name: \"z\"\n}\nstats_sinks {\n  name: \"z\"\n}\nstats_sinks {\n  name: \"z\"\n}\nstats_sinks {\n  name: \"z\"\n}"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5742573780467712",
    "content": "static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 4     }     load_assignment {       cluster_name: \" \"       endpoints {         lb_endpoints {           endpoint {             address {               pipe {                 path: \" \"               }             }             health_check_config {               port_value: 2             }           }         }       }     }   } } "
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5751467204411392",
    "content": "static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 6     }     lb_policy: RING_HASH     hosts {       pipe {       }     }     health_checks {       timeout {         nanos: 6       }       interval {         nanos: 6       }       unhealthy_threshold {       }       healthy_threshold {       }       tcp_health_check {       }     }     ring_hash_lb_config {       minimum_ring_size {         value:  8765695       }     }   } } "
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5761881319407616",
    "content": "node {   id: \" \"   cluster: \" \" } static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 4     }     hosts {       pipe {       }     }     health_checks {       timeout {         nanos: 4       }       interval {         nanos: 4       }       unhealthy_threshold {       }       healthy_threshold {       }       grpc_health_check {       }     }     tls_context {       common_tls_context {         tls_certificate_sds_secret_configs {           sds_config {             path: \"\"           }         }       }     }     http2_protocol_options {     }   } }\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-6246954531291136",
    "content": "static_resources {\n  clusters {\n    name: \"o\"\n    connect_timeout {\n      nanos: 15118976\n    }\n    metadata {\n      filter_metadata {\n        key: \"\"\n      }\n      filter_metadata {\n        key: \"\\000\\000\"\n      }\n      filter_metadata {\n        key: \"\\000\\000\\001\\002z3r90\\000\\000\"\n      }\n      filter_metadata {\n        key: \"\\000\\001\\000\"\n      }\n      filter_metadata {\n        key: \"\\000\\\\959798428\\001\\002z32902546264\\23297677535337\\000\"\n      }\n      filter_metadata {\n        key: \"\\0147,83648\\000\\001\\001\"\n      }\n      filter_metadata {\n        key: \"0\"\n      }\n      filter_metadata {\n        key: \"@\"\n      }\n      filter_metadata {\n        key: \"@@\"\n      }\n      filter_metadata {\n        key: \"UPST128\\tAM_HO\\001\\202\\247ST\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"g\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"m\"\n        value {\n        }\n      }\n      filter_metadata {\n        key: \"y\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-4832853025095680",
    "content": "static_resources {\n  clusters {\n    name: \"service_google\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      nanos: 250000000\n    }\n    hosts {\n      socket_address {\n        address: \"google.com\"\n        port_value: 0\n      }\n    }\n    tls_context {\n      sni: \"127.0.0.1\"\n    }\n    dns_lookup_family: V4_ONLY\n  }\n  clusters {\n    name: \"service_google\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      nanos: 250000000\n    }\n    hosts {\n      socket_address {\n        address: \"127.0.0.1\"\n        port_value: 4294312379\n      }\n    }\n    dns_lookup_family: V4_ONLY\n  }\n  clusters {\n    name: \"127.1\"\n    type: LOGICAL_DNS\n    tls_context {\n      sni: \"www.google.com\"\n    }\n    dns_lookup_family: V4_ONLY\n  }\n}\nadmin {\n  access_log_path: \"127.1\"\n  profile_path: \"\\'\"\n  address {\n    pipe {\n      path: \"@\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-4890981380915200",
    "content": "stats_flush_interval {\n  seconds: 13792273858822144\n}\ntracing {\n}\nadmin {\n  access_log_path: \"127.0.0.1\"\n  address {\n    pipe {\n      path: \"\\177\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5085107063881728",
    "content": "watchdog {\n  megamiss_timeout {\n    seconds: -27037478448\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"!\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5366294281977856",
    "content": "static_resources {\n  clusters {\n    name: \"9\"\n    connect_timeout {\n      nanos: 1\n    }\n    hosts {\n      pipe {\n        path: \"N\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"n\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"=\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"s\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"n\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"N\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"W\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"=\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"N\"\n      }\n    }\n    hosts {\n      pipe {\n        path: \"n\"\n      }\n    }\n    health_checks {\n      timeout {\n        nanos: 1\n      }\n      interval {\n        nanos: 1\n      }\n      unhealthy_threshold {\n      }\n      healthy_threshold {\n        value: 1701650432\n      }\n      http_health_check {\n        path: \"~\"\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n          header {\n            value: \"W\"\n          }\n        }\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n        use_http2: true\n      }\n    }\n    health_checks {\n      timeout {\n        nanos: 1\n      }\n      interval {\n        nanos: 1\n      }\n      unhealthy_threshold {\n      }\n      healthy_threshold {\n      }\n      http_health_check {\n        path: \"E\"\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n        request_headers_to_add {\n        }\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@\\'\"\n  address {\n    socket_address {\n      address: \"::\"\n      port_value: 0\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5647989147697152",
    "content": "static_resources {\n  listeners {\n    name: \"$\"\n    address {\n      pipe {\n        path: \".\"\n      }\n    }\n    filter_chains {\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"=\"\n      }\n    }\n    filter_chains {\n    }\n  }\n  listeners {\n    name: \"\\000$&$`%n&#000;NaN0\"\n    address {\n      socket_address {\n        address: \"0.0.1.0\"\n        port_value: 32768\n        resolver_name: \"@q\"\n      }\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"=\"\n      }\n    }\n    filter_chains {\n      use_proxy_proto {\n        value: true\n      }\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"=\"\n      }\n    }\n    filter_chains {\n      use_proxy_proto {\n        value: true\n      }\n    }\n    listener_filters_timeout {\n    }\n  }\n  listeners {\n    name: \"@\"\n    address {\n      socket_address {\n        address: \"2147483648.0.1.0\"\n        named_port: \"L\"\n      }\n    }\n    filter_chains {\n    }\n  }\n  listeners {\n    name: \"$\"\n    address {\n      pipe {\n        path: \".\"\n      }\n    }\n    filter_chains {\n    }\n  }\n  listeners {\n    name: \"@\"\n    address {\n      socket_address {\n        address: \"0.0.1.0\"\n        port_value: 32768\n      }\n    }\n    filter_chains {\n    }\n  }\n  listeners {\n    name: \"\\000$&$`%n&#000;NaN0\"\n    address {\n      pipe {\n        path: \"1\"\n      }\n    }\n    filter_chains {\n      use_proxy_proto {\n        value: true\n      }\n    }\n    use_original_dst {\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"=\"\n      }\n    }\n    filter_chains {\n      use_proxy_proto {\n        value: true\n      }\n    }\n  }\n  listeners {\n    name: \"@\"\n    address {\n      socket_address {\n        address: \"@\"\n        port_value: 32768\n      }\n    }\n    filter_chains {\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"=\"\n      }\n    }\n    filter_chains {\n      use_proxy_proto {\n        value: true\n      }\n    }\n    use_original_dst {\n    }\n  }\n  listeners {\n    name: \"\\000$&$`%n&#000;NaN0\"\n    address {\n      pipe {\n        path: \"@\"\n      }\n    }\n    filter_chains {\n      use_proxy_proto {\n      }\n    }\n    use_original_dst {\n    }\n  }\n  listeners {\n    name: \"@\"\n    address {\n      socket_address {\n        address: \"0.0.1.0\"\n        port_value: 32768\n      }\n    }\n    filter_chains {\n    }\n  }\n  listeners {\n    address {\n      pipe {\n        path: \"=\"\n      }\n    }\n    filter_chains {\n      use_proxy_proto {\n        value: true\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\"\n  address {\n    pipe {\n      path: \"$\"\n    }\n  }\n}\nlayered_runtime {\n  layers {\n    name: \"preserveExt\"\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5691106634760192",
    "content": ""
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5696568846450688",
    "content": "static_resources {\n  clusters {\n    name: \"/\"\n    type: STRICT_DNS\n    connect_timeout {\n      seconds: 539000848\n      nanos: 15\n    }\n    hosts {\n      pipe {\n        path: \"4\"\n      }\n    }\n    tls_context {\n      common_tls_context {\n        tls_certificate_sds_secret_configs {\n          sds_config {\n            path: \"/\"\n          }\n        }\n      }\n    }\n  }\n}\ntracing {\n}\nadmin {\n  address {\n    pipe {\n      path: \" \"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5697356077989888",
    "content": "static_resources {\n  clusters {\n    name: \"=\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      seconds: 1946186496\n    }\n    hosts {\n      socket_address {\n        address: \"127.0.0.1\"\n        port_value: 0\n      }\n    }\n    health_checks {\n      timeout {\n        nanos: 95\n      }\n      interval {\n        nanos: 95\n      }\n      unhealthy_threshold {\n      }\n      healthy_threshold {\n        value: 2147483648\n      }\n      redis_health_check {\n        key: \"=\"\n      }\n      healthy_edge_interval {\n        nanos: 95\n      }\n    }\n    tls_context {\n      common_tls_context {\n        tls_params {\n        }\n      }\n    }\n    alt_stat_name: \"\\001\\000\\000\\000\\007\\\\\\316\\230\"\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"%\"\n    }\n  }\n}\nstats_config {\n  use_all_default_tags {\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5704964522377216",
    "content": "static_resources {\n  clusters {\n    name: \"907501\"\n    connect_timeout {\n      nanos: 45696\n    }\n    lb_policy: RING_HASH\n    tls_context {\n      sni: \"www.google.com\"\n    }\n    dns_lookup_family: V4_ONLY\n    lb_subset_config {\n      subset_selectors {\n        keys: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"@\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5705154446753792",
    "content": "static_resources {\n  clusters {\n    name: \"service_google\"\n    type: STRICT_DNS\n    connect_timeout {\n      nanos: 1\n    }\n    lb_policy: MAGLEV\n    hosts {\n      socket_address {\n        address: \"127.0.0.1\"\n        port_value: 0\n      }\n    }\n    hosts {\n      socket_address {\n        address: \"127.0.0.1\"\n        port_value: 0\n      }\n    }\n    circuit_breakers {\n      thresholds {\n        max_retries {\n          value: 7564900\n        }\n      }\n    }\n    tls_context {\n      sni: \"www.google.com\"\n    }\n    lb_subset_config {\n      subset_selectors {\n        keys: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"`\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5734693923717120",
    "content": "static_resources {\n  clusters {\n    name: \"@\"\n    connect_timeout {\n      nanos: 250000000\n    }\n    common_lb_config {\n      zone_aware_lb_config {\n        min_cluster_size {\n          value: 38\n        }\n      }\n    }\n  }\n}\nstats_flush_interval {\n  nanos: 32256\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5754606195310592",
    "content": "static_resources {\n}\nstats_config {\n  use_all_default_tags {\n    value: true\n  }\n}\noverload_manager {\n  resource_monitors {\n    name: \"envoy.resource_monitors.fixed_heap\"\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5755877701713920",
    "content": "static_resources {\n  listeners {\n    address {\n      pipe {\n      }\n    }\n    filter_chains {\n    }\n    freebind {\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5763613693837312",
    "content": "stats_sinks {\n  typed_config {\n    type_url: \"type.googleapis.com/envoy.api.v2.route.Route\"\n    value: \"\\022*J :222222\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\t2871770\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\377\\0378\\377\\377\\377\\377\\377\\377\\377\\377 8\\377\\377\\377\\377\\377\\377\\377\\377\\37722222222222222222220\\022\"\n  }\n}\nheader_prefix: \"type.googleapis.com/envoy.api.v2.route.Route\"\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5809171076218880",
    "content": "admin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"@\"\n    }\n  }\n}\nhds_config {\n  cluster_names: \"+\"\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-5988544525893632",
    "content": "static_resources {\n  clusters {\n    name: \"-2353373969551157135775236\"\n    connect_timeout {\n      seconds: 12884901890\n    }\n    hosts {\n      pipe {\n        path: \"@\"\n      }\n    }\n    outlier_detection {\n    }\n    common_lb_config {\n      healthy_panic_threshold {\n        value: nan\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@r\"\n  address {\n    pipe {\n      path: \"W\"\n    }\n  }\n}\n\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-6036175623028736",
    "content": "dynamic_resources {\n  ads_config {\n    api_type: GRPC\n    grpc_services {\n      google_grpc {\n        target_uri: \"\\177\\177\"\n        stat_prefix: \"\\177\\001D\\177\"\n      }\n      timeout {\n        seconds: 2048\n      }\n      initial_metadata {\n        value: \"\\177\\177\\177\\177\"\n      }\n    }\n  }\n}\nflags_path: \"\\'\"\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"^\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-6236930453798912",
    "content": "static_resources {\n  clusters {\n    name: \"www.google.com\"\n    connect_timeout {\n      nanos: 1\n    }\n    lb_policy: RING_HASH\n    hosts {\n      pipe {\n        path: \" \"\n      }\n    }\n    tls_context {\n      sni: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n    }\n  }\n}\ncluster_manager {\n  load_stats_config {\n    api_type: REST\n    cluster_names: \"www.google.com\"\n  }\n}\nwatchdog {\n  miss_timeout {\n    nanos: 64\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"[\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-6280208148594688",
    "content": "static_resources {\n  clusters {\n    name: \"service_google\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      nanos: 250000000\n    }\n    hosts {\n      socket_address {\n        address: \"127.0.0.1\"\n        port_value: 0\n      }\n    }\n    tls_context {\n      sni: \"www.google.com\"\n    }\n    dns_lookup_family: V4_ONLY\n    alt_stat_name: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n  }\n}\nadmin {\n  access_log_path: \"/tmp/admin_access.log\"\n  address {\n    socket_address {\n      address: \"127.0.0.1\"\n      port_value: 0\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-6288786894880768",
    "content": "node {\n  locality {\n  }\n}\nstatic_resources {\n  clusters {\n    name: \"x\"\n    type: STRICT_DNS\n    connect_timeout {\n      nanos: 250000000\n    }\n    lb_policy: RING_HASH\n    hosts {\n      socket_address {\n        address: \"123.1.0.1\"\n        named_port: \"x\"\n      }\n    }\n    hosts {\n      socket_address {\n        address: \"127.0.0.2\"\n        named_port: \"3\"\n      }\n    }\n    common_lb_config {\n      update_merge_window {\n        seconds: 281474976710656\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@-\"\n  address {\n    pipe {\n      path: \" \"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-6313779791921152",
    "content": "admin {\n  access_log_path: \"W\"\n  address {\n    pipe {\n  path: \"W\"\n    }\n  }\n}\nstats_config {\n  stats_tags {\ntag_name: \"W\"\n    regex: \"t\\n  \\00000\\\\x000\\00000\\000\\000a000\\000  \\000\\\\00\\\\D00\\\\n  \\000N000000\\000\\000\\000\\000L000\\000\\\\B\\\\\\n  ^^^^^^^^^^^000N000000\\000\\000\\000\\000L000\\000\\\\B\\\\\\n  ^^^^^^^^^^^000N0���00000\\000(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((<((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII����00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII����00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII����00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII����00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII����00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII����00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\00\\000\\000IIIIIIIIII00\\000\\0^^^^^^^^^^^^^^^^^^^^^^000N000000\\000((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((\\000\\000\\000L00000(((((((\\000\\000\\000L00000\\0\\000\\000\\0000\\\\cCb\\\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-6419204524736512",
    "content": "static_resources {\n  clusters {\n    name: \"`\"\n    connect_timeout {\n      nanos: 20\n    }\n    load_assignment {\n      cluster_name: \"`\"\n      endpoints {\n        priority: 1030831324\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@@\"\n  address {\n    pipe {\n      path: \"`\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/clusterfuzz-testcase-server_fuzz_test-6610050496856064",
    "content": "static_resources {\n  listeners {\n    name: \"listener_0\"\n    address {\n      pipe {\n        path: \"\\t\"\n      }\n    }\n    filter_chains {\n      tls_context {\n        common_tls_context {\n          tls_certificate_sds_secret_configs {\n            sds_config {\n              api_config_source {\n                refresh_delay {\n                  seconds: -281474976710656\n                }\n                grpc_services {\n                  envoy_grpc {\n                    cluster_name: \"\\000\\000\\000\\000\\000\"\n                  }\n                }\n              }\n            }\n          }\n        }\n      }\n    }\n    filter_chains {\n      filter_chain_match {\n        server_names: \"6e702f1f66d415068aabbc60377ad67a326b6b2b\"\n      }\n    }\n    filter_chains {\n    }\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"^\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/crash-38abba5264d01217f4f027f02dc403eae6eda8bb",
    "content": "static_resources {\n  listeners {\n    name: \"listener_0\"\n    address {\n      pipe {\n        path: \"/tmp/admin_access.log\"\n      }\n    }\n    filter_chains {\n      tls_context {\n        common_tls_context {\n          tls_certificate_sds_secret_configs {\n            sds_config {\n              ads {\n              }\n            }\n          }\n        }\n      }\n    }\n    use_original_dst {\n    }\n  }\n  listeners {\n    name: \"www.google.com\"\n    address {\n      socket_address {\n        address: \"(\"\n        named_port: \"2\"\n      }\n    }\n    filter_chains {\n    }\n  }\n}\nadmin {\n  access_log_path: \"(st\"\n  address {\n    pipe {\n      path: \"/tmp/admin_access.log\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3",
    "content": "dynamic_resources {\n}\ncluster_manager {\n  local_cluster_name: \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"\n}\nhidden_envoy_deprecated_runtime {\n}\nadmin {\n}\nstats_config {\n  use_all_default_tags {\n    value: true\n  }\n}\nlayered_runtime {\n  layers {\n    disk_layer {\n      append_service_cluster: true\n    }\n  }\n}\nuse_tcp_for_dns_lookups: true\n"
  },
  {
    "path": "test/server/server_corpus/crash-cbd98584afd43791dc2143260c4438f4d2db5e87",
    "content": "static_resources {\n  listeners {\n    name: \"listener_0\"\n    address {\n      pipe {\n        path: \"@\"\n      }\n    }\n    filter_chains {\n      tls_context {\n        common_tls_context {\n          tls_certificates {\n            password {\n              filename: \"\\177\\177\\177\\177\\177\\177\\177\\177\\177\\177\"\n            }\n          }\n        }\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"@\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/crash-d60f68abcafaae8e7b135ca5144b062d969e5575",
    "content": "static_resources {\n  clusters {\n    name: \"service_google\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      nanos: 250000000\n    }\n    hosts {\n      socket_address {\n        address: \"google.com\"\n        port_value: 0\n      }\n    }\n    tls_context {\n      sni: \"www.google.com\"\n    }\n    dns_lookup_family: V4_ONLY\n  }\n  clusters {\n    name: \"service_google\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      nanos: 250000000\n    }\n    hosts {\n      socket_address {\n        address: \"google.com\"\n        port_value: 0\n      }\n    }\n    tls_context {\n      sni: \"www.google.com\"\n    }\n    dns_lookup_family: V4_ONLY\n  }\n}\nadmin {\n  access_log_path: \"/tmp/admin_access.log\"\n  address {\n    socket_address {\n      address: \"127.0.0.1\"\n      port_value: 0\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/crash-da39a3ee5e6b4b0d3255bfef95601890afd80709",
    "content": ""
  },
  {
    "path": "test/server/server_corpus/crash-db2ee19f50162f2079dc0c5ba24fd0e3dcb8b9bc",
    "content": "static_resources {\n  clusters {\n    name: \"@\"\n    connect_timeout {\n      nanos: 250000000\n    }\n    lb_policy: MAGLEV\n    dns_lookup_family: V4_ONLY\n    dns_resolvers {\n      pipe {\n        path: \"\\014\\000\"\n      }\n    }\n  }\n}\nadmin {\n  access_log_path: \"@\"\n  address {\n    pipe {\n      path: \"@\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/crash-e0339370f24027b5c73b5355e74c0b68c8b33314",
    "content": "static_resources {\n  listeners {\n    name: \"listener_0\"\n    address {\n      pipe {\n        path: \"\\000\\000\\000j\"\n      }\n    }\n    filter_chains {\n      tls_context {\n        session_ticket_keys_sds_secret_config {\n        }\n      }\n    }\n    tcp_fast_open_queue_length {\n      value: 1\n    }\n  }\n}\nadmin {\n  access_log_path: \"/tmp/admin_access,lOg\"\n  address {\n    pipe {\n      path: \"\\026runt\"\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/google_com_proxy.v2.pb_text",
    "content": "static_resources {\n  listeners {\n    name: \"listener_0\"\n    address {\n      socket_address {\n        address: \"0.0.0.0\"\n        port_value: 0\n      }\n    }\n    filter_chains {\n      filters {\n        name: \"envoy.filters.network.http_connection_manager\"\n        config {\n          fields {\n            key: \"http_filters\"\n            value {\n              list_value {\n                values {\n                  struct_value {\n                    fields {\n                      key: \"name\"\n                      value {\n                        string_value: \"envoy.filters.http.router\"\n                      }\n                    }\n                  }\n                }\n              }\n            }\n          }\n          fields {\n            key: \"route_config\"\n            value {\n              struct_value {\n                fields {\n                  key: \"name\"\n                  value {\n                    string_value: \"local_route\"\n                  }\n                }\n                fields {\n                  key: \"virtual_hosts\"\n                  value {\n                    list_value {\n                      values {\n                        struct_value {\n                          fields {\n                            key: \"domains\"\n                            value {\n                              list_value {\n                                values {\n                                  string_value: \"*\"\n                                }\n                              }\n                            }\n                          }\n                          fields {\n                            key: \"name\"\n                            value {\n                              string_value: \"local_service\"\n                            }\n                          }\n                          fields {\n                            key: \"routes\"\n                            value {\n                              list_value {\n                                values {\n                                  struct_value {\n                                    fields {\n                                      key: \"match\"\n                                      value {\n                                        struct_value {\n                                          fields {\n                                            key: \"prefix\"\n                                            value {\n                                              string_value: \"/\"\n                                            }\n                                          }\n                                        }\n                                      }\n                                    }\n                                    fields {\n                                      key: \"route\"\n                                      value {\n                                        struct_value {\n                                          fields {\n                                            key: \"cluster\"\n                                            value {\n                                              string_value: \"service_google\"\n                                            }\n                                          }\n                                          fields {\n                                            key: \"host_rewrite\"\n                                            value {\n                                              string_value: \"www.google.com\"\n                                            }\n                                          }\n                                        }\n                                      }\n                                    }\n                                  }\n                                }\n                              }\n                            }\n                          }\n                        }\n                      }\n                    }\n                  }\n                }\n              }\n            }\n          }\n          fields {\n            key: \"stat_prefix\"\n            value {\n              string_value: \"ingress_http\"\n            }\n          }\n        }\n      }\n    }\n  }\n  clusters {\n    name: \"service_google\"\n    type: LOGICAL_DNS\n    connect_timeout {\n      nanos: 250000000\n    }\n    hosts {\n      socket_address {\n        address: \"google.com\"\n        port_value: 0\n      }\n    }\n    tls_context {\n      sni: \"www.google.com\"\n    }\n    dns_lookup_family: V4_ONLY\n  }\n}\nadmin {\n  access_log_path: \"/tmp/admin_access.log\"\n  address {\n    socket_address {\n      address: \"127.0.0.1\"\n      port_value: 0\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/server_corpus/not_implemented_envoy_internal",
    "content": "static_resources {\n  clusters {\n    name: \"ser\"\n    connect_timeout {\n      nanos: 813\n    }\n    lb_policy: RING_HASH\n    health_checks {\n      timeout {\n        seconds: 1000000\n        nanos: 262239\n      }\n      interval {\n        seconds: 10838081697\n        nanos: 95\n      }\n      unhealthy_threshold {\n      }\n      healthy_threshold {\n      }\n      http_health_check {\n        host: \"\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\001\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037f\\037\\037\\037\\037\"\n        path: \"&\"\n      }\n      healthy_edge_interval {\n        nanos: 95\n      }\n    }\n    circuit_breakers {\n    }\n    http_protocol_options {\n      allow_absolute_url {\n        value: true\n      }\n    }\n    load_assignment {\n      cluster_name: \".\"\n      endpoints {\n      }\n      endpoints {\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \"f\"\n              }\n            }\n          }\n          health_status: DRAINING\n        }\n      }\n      endpoints {\n        lb_endpoints {\n          endpoint {\n            address {\n              envoy_internal_address {\n                server_listener_name: \"\\000\\000\\000\\003\"\n              }\n            }\n          }\n        }\n        priority: 16\n      }\n      endpoints {\n        proximity {\n          value: 10240\n        }\n      }\n      endpoints {\n        lb_endpoints {\n          endpoint {\n            address {\n              socket_address {\n                address: \"127.0.0.1\"\n                port_value: 9901\n              }\n            }\n          }\n          health_status: HEALTHY\n        }\n        priority: 16\n      }\n    }\n    use_tcp_for_dns_lookups: true\n  }\n}\nstats_flush_interval {\n  nanos: 16777216\n}\nadmin {\n  access_log_path: \"f\"\n  address {\n    socket_address {\n      address: \"\\024\"\n    }\n  }\n  socket_options {\n    description: \"=\"\n    level: 4702337453602635775\n    int_value: 4702337453602635775\n  }\n}\nuse_tcp_for_dns_lookups: true"
  },
  {
    "path": "test/server/server_corpus/not_reached",
    "content": "static_resources {   clusters {     name: \" \"     connect_timeout {       nanos: 4     }     lb_policy: LOAD_BALANCING_POLICY_CONFIG   } } "
  },
  {
    "path": "test/server/server_corpus/valid",
    "content": "static_resources {\n  clusters {\n    name: \"ser\"\n    connect_timeout {\n      nanos: 813\n    }\n    per_connection_buffer_limit_bytes {\n    }\n    lb_policy: RING_HASH\n    health_checks {\n      timeout {\n        nanos: 95\n      }\n      interval {\n        seconds: 165537\n        nanos: 95\n      }\n      unhealthy_threshold {\n      }\n      healthy_threshold {\n      }\n      http_health_check {\n        host: \"a\\037\\037\\037\\037\\037\\037\\037\\037\\037\\000\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\001\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\031\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\"\n        path: \"&\"\n      }\n      healthy_edge_interval {\n        nanos: 54784\n      }\n      interval_jitter_percent: 16\n    }\n    http_protocol_options {\n      header_key_format {\n        proper_case_words {\n        }\n      }\n    }\n    load_assignment {\n      cluster_name: \".\"\n      endpoints {\n        locality {\n          sub_zone: \"\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\001\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \".\"\n              }\n            }\n          }\n          health_status: DRAINING\n        }\n      }\n      endpoints {\n        locality {\n          region: \".\"\n          sub_zone: \"\\037\\037\\037\\037\\037\\037\\037\\037\\037\\000\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\001\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\031\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\\037\"\n        }\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \".\"\n              }\n            }\n          }\n          health_status: DRAINING\n        }\n      }\n      endpoints {\n        priority: 16\n      }\n      endpoints {\n      }\n      endpoints {\n        lb_endpoints {\n          endpoint {\n            address {\n              pipe {\n                path: \".\"\n              }\n            }\n          }\n          health_status: HEALTHY\n        }\n      }\n    }\n  }\n}\nenable_dispatcher_stats: true"
  },
  {
    "path": "test/server/server_fuzz_test.cc",
    "content": "#include <fstream>\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n#include \"envoy/config/core/v3/address.pb.h\"\n\n#include \"common/common/random_generator.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n\n#include \"server/listener_hooks.h\"\n#include \"server/server.h\"\n\n#include \"test/common/runtime/utility.h\"\n#include \"test/fuzz/fuzz_runner.h\"\n#include \"test/integration/server.h\"\n#include \"test/mocks/server/hot_restart.h\"\n#include \"test/mocks/server/options.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/test_time.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nvoid makePortHermetic(Fuzz::PerTestEnvironment& test_env,\n                      envoy::config::core::v3::Address& address) {\n  if (address.has_socket_address()) {\n    address.mutable_socket_address()->set_port_value(0);\n  } else if (address.has_pipe() || address.has_envoy_internal_address()) {\n    // TODO(asraa): Remove this work-around to replace EnvoyInternalAddress when implemented and\n    // remove condition at line 74.\n    address.mutable_pipe()->set_path(\"@\" + test_env.testId() + address.pipe().path());\n  }\n}\n\nenvoy::config::bootstrap::v3::Bootstrap\nmakeHermeticPathsAndPorts(Fuzz::PerTestEnvironment& test_env,\n                          const envoy::config::bootstrap::v3::Bootstrap& input) {\n  envoy::config::bootstrap::v3::Bootstrap output(input);\n  // This is not a complete list of places where we need to zero out ports or sanitize paths, so we\n  // should adapt it as we go and encounter places that we need to stabilize server test flakes.\n  // config_validation_fuzz_test doesn't need to do this sanitization, so should pickup the coverage\n  // we lose here. If we don't sanitize here, we get flakes due to port bind conflicts, file\n  // conflicts, etc.\n  output.clear_admin();\n  // The header_prefix is a write-once then read-only singleton that persists across tests. We clear\n  // this field so that fuzz tests don't fail over multiple iterations.\n  output.clear_header_prefix();\n  if (output.has_hidden_envoy_deprecated_runtime()) {\n    output.mutable_hidden_envoy_deprecated_runtime()->set_symlink_root(test_env.temporaryPath(\"\"));\n  }\n  for (auto& listener : *output.mutable_static_resources()->mutable_listeners()) {\n    if (listener.has_address()) {\n      makePortHermetic(test_env, *listener.mutable_address());\n    }\n  }\n  for (auto& cluster : *output.mutable_static_resources()->mutable_clusters()) {\n    for (auto& health_check : *cluster.mutable_health_checks()) {\n      // TODO(asraa): QUIC is not enabled in production code yet, so remove references for HTTP3.\n      // Tracked at https://github.com/envoyproxy/envoy/issues/9513.\n      if (health_check.http_health_check().codec_client_type() ==\n          envoy::type::v3::CodecClientType::HTTP3) {\n        health_check.mutable_http_health_check()->clear_codec_client_type();\n      }\n    }\n    // We may have both deprecated hosts() or load_assignment().\n    for (auto& host : *cluster.mutable_hidden_envoy_deprecated_hosts()) {\n      makePortHermetic(test_env, host);\n    }\n    for (int j = 0; j < cluster.load_assignment().endpoints_size(); ++j) {\n      auto* locality_lb = cluster.mutable_load_assignment()->mutable_endpoints(j);\n      for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) {\n        auto* lb_endpoint = locality_lb->mutable_lb_endpoints(k);\n        if (lb_endpoint->endpoint().address().has_socket_address() ||\n            lb_endpoint->endpoint().address().has_envoy_internal_address()) {\n          makePortHermetic(test_env, *lb_endpoint->mutable_endpoint()->mutable_address());\n        }\n      }\n    }\n  }\n  return output;\n}\n\nDEFINE_PROTO_FUZZER(const envoy::config::bootstrap::v3::Bootstrap& input) {\n  testing::NiceMock<MockOptions> options;\n  DefaultListenerHooks hooks;\n  testing::NiceMock<MockHotRestart> restart;\n  Stats::TestIsolatedStoreImpl stats_store;\n  Thread::MutexBasicLockable fakelock;\n  TestComponentFactory component_factory;\n  ThreadLocal::InstanceImpl thread_local_instance;\n  DangerousDeprecatedTestTime test_time;\n  Fuzz::PerTestEnvironment test_env;\n  Init::ManagerImpl init_manager{\"Server\"};\n\n  {\n    const std::string bootstrap_path = test_env.temporaryPath(\"bootstrap.pb_text\");\n    std::ofstream bootstrap_file(bootstrap_path);\n    bootstrap_file << makeHermeticPathsAndPorts(test_env, input).DebugString();\n    options.config_path_ = bootstrap_path;\n    options.log_level_ = Fuzz::Runner::logLevel();\n  }\n\n  std::unique_ptr<InstanceImpl> server;\n  try {\n    server = std::make_unique<InstanceImpl>(\n        init_manager, options, test_time.timeSystem(),\n        std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\"), hooks, restart, stats_store,\n        fakelock, component_factory, std::make_unique<Random::RandomGeneratorImpl>(),\n        thread_local_instance, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(),\n        nullptr);\n  } catch (const EnvoyException& ex) {\n    ENVOY_LOG_MISC(debug, \"Controlled EnvoyException exit: {}\", ex.what());\n    return;\n  }\n  // If we were successful, run any pending events on the main thread's dispatcher loop. These might\n  // be, for example, pending DNS resolution callbacks. If they generate exceptions, we want to\n  // explode and fail the test, hence we do this outside of the try-catch above.\n  server->dispatcher().run(Event::Dispatcher::RunType::NonBlock);\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/server_test.cc",
    "content": "#include <memory>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/network/exception.h\"\n#include \"envoy/server/bootstrap_extension_config.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/socket_option_impl.h\"\n#include \"common/protobuf/protobuf.h\"\n#include \"common/thread_local/thread_local_impl.h\"\n#include \"common/version/version.h\"\n\n#include \"server/process_context_impl.h\"\n#include \"server/server.h\"\n\n#include \"test/common/config/dummy_config.pb.h\"\n#include \"test/common/stats/stat_test_utility.h\"\n#include \"test/integration/server.h\"\n#include \"test/mocks/server/bootstrap_extension_factory.h\"\n#include \"test/mocks/server/hot_restart.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/options.h\"\n#include \"test/mocks/server/overload_manager.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/logging.h\"\n#include \"test/test_common/registry.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/synchronization/notification.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Assign;\nusing testing::HasSubstr;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::Return;\nusing testing::SaveArg;\nusing testing::StrictMock;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nTEST(ServerInstanceUtil, flushHelper) {\n  InSequence s;\n\n  Stats::TestUtil::TestStore store;\n  Stats::Counter& c = store.counter(\"hello\");\n  c.inc();\n  store.gauge(\"world\", Stats::Gauge::ImportMode::Accumulate).set(5);\n  store.histogram(\"histogram\", Stats::Histogram::Unit::Unspecified);\n  store.textReadout(\"text\").set(\"is important\");\n\n  std::list<Stats::SinkPtr> sinks;\n  InstanceUtil::flushMetricsToSinks(sinks, store);\n  // Make sure that counters have been latched even if there are no sinks.\n  EXPECT_EQ(1UL, c.value());\n  EXPECT_EQ(0, c.latch());\n\n  Stats::MockSink* sink = new StrictMock<Stats::MockSink>();\n  sinks.emplace_back(sink);\n  EXPECT_CALL(*sink, flush(_)).WillOnce(Invoke([](Stats::MetricSnapshot& snapshot) {\n    ASSERT_EQ(snapshot.counters().size(), 1);\n    EXPECT_EQ(snapshot.counters()[0].counter_.get().name(), \"hello\");\n    EXPECT_EQ(snapshot.counters()[0].delta_, 1);\n\n    ASSERT_EQ(snapshot.gauges().size(), 1);\n    EXPECT_EQ(snapshot.gauges()[0].get().name(), \"world\");\n    EXPECT_EQ(snapshot.gauges()[0].get().value(), 5);\n\n    ASSERT_EQ(snapshot.textReadouts().size(), 1);\n    EXPECT_EQ(snapshot.textReadouts()[0].get().name(), \"text\");\n    EXPECT_EQ(snapshot.textReadouts()[0].get().value(), \"is important\");\n  }));\n  c.inc();\n  InstanceUtil::flushMetricsToSinks(sinks, store);\n\n  // Histograms don't currently work with the isolated store so test those with a mock store.\n  NiceMock<Stats::MockStore> mock_store;\n  Stats::ParentHistogramSharedPtr parent_histogram(new Stats::MockParentHistogram());\n  std::vector<Stats::ParentHistogramSharedPtr> parent_histograms = {parent_histogram};\n  ON_CALL(mock_store, histograms).WillByDefault(Return(parent_histograms));\n  EXPECT_CALL(*sink, flush(_)).WillOnce(Invoke([](Stats::MetricSnapshot& snapshot) {\n    EXPECT_TRUE(snapshot.counters().empty());\n    EXPECT_TRUE(snapshot.gauges().empty());\n    EXPECT_EQ(snapshot.histograms().size(), 1);\n    EXPECT_TRUE(snapshot.textReadouts().empty());\n  }));\n  InstanceUtil::flushMetricsToSinks(sinks, mock_store);\n}\n\nclass RunHelperTest : public testing::Test {\npublic:\n  RunHelperTest() {\n    InSequence s;\n\n#ifndef WIN32\n    sigterm_ = new Event::MockSignalEvent(&dispatcher_);\n    sigint_ = new Event::MockSignalEvent(&dispatcher_);\n    sigusr1_ = new Event::MockSignalEvent(&dispatcher_);\n    sighup_ = new Event::MockSignalEvent(&dispatcher_);\n#endif\n    EXPECT_CALL(overload_manager_, start());\n    EXPECT_CALL(cm_, setInitializedCb(_)).WillOnce(SaveArg<0>(&cm_init_callback_));\n    ON_CALL(server_, shutdown()).WillByDefault(Assign(&shutdown_, true));\n\n    helper_ = std::make_unique<RunHelper>(server_, options_, dispatcher_, cm_, access_log_manager_,\n                                          init_manager_, overload_manager_,\n                                          [this] { start_workers_.ready(); });\n  }\n\n  NiceMock<MockInstance> server_;\n  testing::NiceMock<MockOptions> options_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Upstream::MockClusterManager> cm_;\n  NiceMock<AccessLog::MockAccessLogManager> access_log_manager_;\n  NiceMock<MockOverloadManager> overload_manager_;\n  Init::ManagerImpl init_manager_{\"\"};\n  ReadyWatcher start_workers_;\n  std::unique_ptr<RunHelper> helper_;\n  std::function<void()> cm_init_callback_;\n#ifndef WIN32\n  Event::MockSignalEvent* sigterm_;\n  Event::MockSignalEvent* sigint_;\n  Event::MockSignalEvent* sigusr1_;\n  Event::MockSignalEvent* sighup_;\n#endif\n  bool shutdown_ = false;\n};\n\nTEST_F(RunHelperTest, Normal) {\n  EXPECT_CALL(start_workers_, ready());\n  cm_init_callback_();\n}\n\n// no signals on Windows\n#ifndef WIN32\nTEST_F(RunHelperTest, ShutdownBeforeCmInitialize) {\n  EXPECT_CALL(start_workers_, ready()).Times(0);\n  sigterm_->callback_();\n  EXPECT_CALL(server_, isShutdown()).WillOnce(Return(shutdown_));\n  cm_init_callback_();\n}\n#endif\n\n// no signals on Windows\n#ifndef WIN32\nTEST_F(RunHelperTest, ShutdownBeforeInitManagerInit) {\n  EXPECT_CALL(start_workers_, ready()).Times(0);\n  Init::ExpectableTargetImpl target;\n  init_manager_.add(target);\n  EXPECT_CALL(target, initialize());\n  cm_init_callback_();\n  sigterm_->callback_();\n  EXPECT_CALL(server_, isShutdown()).WillOnce(Return(shutdown_));\n  target.ready();\n}\n#endif\n\nclass InitializingInitManager : public Init::ManagerImpl {\npublic:\n  InitializingInitManager(absl::string_view name) : Init::ManagerImpl(name) {}\n\n  State state() const override { return State::Initializing; }\n};\n\n// Class creates minimally viable server instance for testing.\nclass ServerInstanceImplTestBase {\nprotected:\n  void initialize(const std::string& bootstrap_path) { initialize(bootstrap_path, false); }\n\n  void initialize(const std::string& bootstrap_path, const bool use_intializing_instance) {\n    if (options_.config_path_.empty()) {\n      options_.config_path_ = TestEnvironment::temporaryFileSubstitute(\n          bootstrap_path, {{\"upstream_0\", 0}, {\"upstream_1\", 0}}, version_);\n    }\n    thread_local_ = std::make_unique<ThreadLocal::InstanceImpl>();\n    if (process_object_ != nullptr) {\n      process_context_ = std::make_unique<ProcessContextImpl>(*process_object_);\n    }\n    init_manager_ = use_intializing_instance ? std::make_unique<InitializingInitManager>(\"Server\")\n                                             : std::make_unique<Init::ManagerImpl>(\"Server\");\n\n    server_ = std::make_unique<InstanceImpl>(\n        *init_manager_, options_, time_system_,\n        std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\"), hooks_, restart_,\n        stats_store_, fakelock_, component_factory_,\n        std::make_unique<NiceMock<Random::MockRandomGenerator>>(), *thread_local_,\n        Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(),\n        std::move(process_context_));\n    EXPECT_TRUE(server_->api().fileSystem().fileExists(std::string(Platform::null_device_path)));\n  }\n\n  void initializeWithHealthCheckParams(const std::string& bootstrap_path, const double timeout,\n                                       const double interval) {\n    options_.config_path_ = TestEnvironment::temporaryFileSubstitute(\n        bootstrap_path,\n        {{\"health_check_timeout\", fmt::format(\"{}\", timeout).c_str()},\n         {\"health_check_interval\", fmt::format(\"{}\", interval).c_str()}},\n        TestEnvironment::PortMap{}, version_);\n    thread_local_ = std::make_unique<ThreadLocal::InstanceImpl>();\n    init_manager_ = std::make_unique<Init::ManagerImpl>(\"Server\");\n    server_ = std::make_unique<InstanceImpl>(\n        *init_manager_, options_, time_system_,\n        std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\"), hooks_, restart_,\n        stats_store_, fakelock_, component_factory_,\n        std::make_unique<NiceMock<Random::MockRandomGenerator>>(), *thread_local_,\n        Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr);\n\n    EXPECT_TRUE(server_->api().fileSystem().fileExists(std::string(Platform::null_device_path)));\n  }\n\n  Thread::ThreadPtr startTestServer(const std::string& bootstrap_path,\n                                    const bool use_intializing_instance) {\n    absl::Notification started;\n    absl::Notification post_init;\n\n    auto server_thread = Thread::threadFactoryForTest().createThread([&] {\n      initialize(bootstrap_path, use_intializing_instance);\n      auto startup_handle = server_->registerCallback(ServerLifecycleNotifier::Stage::Startup,\n                                                      [&] { started.Notify(); });\n      auto post_init_handle = server_->registerCallback(ServerLifecycleNotifier::Stage::PostInit,\n                                                        [&] { post_init.Notify(); });\n      auto shutdown_handle = server_->registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit,\n                                                       [&](Event::PostCb) { FAIL(); });\n      shutdown_handle = nullptr; // unregister callback\n      server_->run();\n      startup_handle = nullptr;\n      post_init_handle = nullptr;\n      server_ = nullptr;\n      thread_local_ = nullptr;\n    });\n\n    started.WaitForNotification();\n    post_init.WaitForNotification();\n    return server_thread;\n  }\n\n  void expectCorrectBuildVersion(const envoy::config::core::v3::BuildVersion& build_version) {\n    std::string version_string =\n        absl::StrCat(build_version.version().major_number(), \".\",\n                     build_version.version().minor_number(), \".\", build_version.version().patch());\n    const auto& fields = build_version.metadata().fields();\n    if (fields.find(BuildVersionMetadataKeys::get().BuildLabel) != fields.end()) {\n      absl::StrAppend(&version_string, \"-\",\n                      fields.at(BuildVersionMetadataKeys::get().BuildLabel).string_value());\n    }\n    EXPECT_EQ(BUILD_VERSION_NUMBER, version_string);\n  }\n\n  Network::Address::IpVersion version_;\n  testing::NiceMock<MockOptions> options_;\n  DefaultListenerHooks hooks_;\n  testing::NiceMock<MockHotRestart> restart_;\n  ThreadLocal::InstanceImplPtr thread_local_;\n  Stats::TestIsolatedStoreImpl stats_store_;\n  Thread::MutexBasicLockable fakelock_;\n  TestComponentFactory component_factory_;\n  Event::GlobalTimeSystem time_system_;\n  ProcessObject* process_object_ = nullptr;\n  std::unique_ptr<ProcessContextImpl> process_context_;\n  std::unique_ptr<Init::Manager> init_manager_;\n\n  std::unique_ptr<InstanceImpl> server_;\n};\n\nclass ServerInstanceImplTest : public ServerInstanceImplTestBase,\n                               public testing::TestWithParam<Network::Address::IpVersion> {\nprotected:\n  ServerInstanceImplTest() { version_ = GetParam(); }\n};\n\n// Custom StatsSink that just increments a counter when flush is called.\nclass CustomStatsSink : public Stats::Sink {\npublic:\n  CustomStatsSink(Stats::Scope& scope) : stats_flushed_(scope.counterFromString(\"stats.flushed\")) {}\n\n  // Stats::Sink\n  void flush(Stats::MetricSnapshot&) override { stats_flushed_.inc(); }\n\n  void onHistogramComplete(const Stats::Histogram&, uint64_t) override {}\n\nprivate:\n  Stats::Counter& stats_flushed_;\n};\n\n// Custom StatsSinkFactory that creates CustomStatsSink.\nclass CustomStatsSinkFactory : public Server::Configuration::StatsSinkFactory {\npublic:\n  // StatsSinkFactory\n  Stats::SinkPtr createStatsSink(const Protobuf::Message&,\n                                 Server::Configuration::ServerFactoryContext& server) override {\n    return std::make_unique<CustomStatsSink>(server.scope());\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n\n  std::string name() const override { return \"envoy.custom_stats_sink\"; }\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, ServerInstanceImplTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),\n                         TestUtility::ipTestParamsToString);\n\n// Validates that server stats are flushed even when server is stuck with initialization.\nTEST_P(ServerInstanceImplTest, StatsFlushWhenServerIsStillInitializing) {\n  CustomStatsSinkFactory factory;\n  Registry::InjectFactory<Server::Configuration::StatsSinkFactory> registered(factory);\n\n  auto server_thread =\n      startTestServer(\"test/server/test_data/server/stats_sink_bootstrap.yaml\", true);\n\n  // Wait till stats are flushed to custom sink and validate that the actual flush happens.\n  TestUtility::waitForCounterEq(stats_store_, \"stats.flushed\", 1, time_system_);\n  EXPECT_EQ(3L, TestUtility::findGauge(stats_store_, \"server.state\")->value());\n  EXPECT_EQ(Init::Manager::State::Initializing, server_->initManager().state());\n\n  server_->dispatcher().post([&] { server_->shutdown(); });\n  server_thread->join();\n}\n\n// Validates that the \"server.version\" is updated with stats_server_version_override from bootstrap.\nTEST_P(ServerInstanceImplTest, ProxyVersionOveridesFromBootstrap) {\n  auto server_thread =\n      startTestServer(\"test/server/test_data/server/proxy_version_bootstrap.yaml\", true);\n\n  EXPECT_EQ(100012001, TestUtility::findGauge(stats_store_, \"server.version\")->value());\n\n  server_->dispatcher().post([&] { server_->shutdown(); });\n  server_thread->join();\n}\n\nTEST_P(ServerInstanceImplTest, EmptyShutdownLifecycleNotifications) {\n  auto server_thread = startTestServer(\"test/server/test_data/server/node_bootstrap.yaml\", false);\n  server_->dispatcher().post([&] { server_->shutdown(); });\n  server_thread->join();\n  // Validate that initialization_time histogram value has been set.\n  EXPECT_TRUE(stats_store_\n                  .histogramFromString(\"server.initialization_time_ms\",\n                                       Stats::Histogram::Unit::Milliseconds)\n                  .used());\n  EXPECT_EQ(0L, TestUtility::findGauge(stats_store_, \"server.state\")->value());\n}\n\nTEST_P(ServerInstanceImplTest, LifecycleNotifications) {\n  bool startup = false, post_init = false, shutdown = false, shutdown_with_completion = false;\n  absl::Notification started, post_init_fired, shutdown_begin, completion_block, completion_done;\n\n  // Run the server in a separate thread so we can test different lifecycle stages.\n  auto server_thread = Thread::threadFactoryForTest().createThread([&] {\n    initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n    auto handle1 = server_->registerCallback(ServerLifecycleNotifier::Stage::Startup, [&] {\n      startup = true;\n      started.Notify();\n    });\n    auto handle2 = server_->registerCallback(ServerLifecycleNotifier::Stage::PostInit, [&] {\n      post_init = true;\n      post_init_fired.Notify();\n    });\n    auto handle3 = server_->registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit, [&] {\n      shutdown = true;\n      shutdown_begin.Notify();\n    });\n    auto handle4 = server_->registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit,\n                                             [&](Event::PostCb completion_cb) {\n                                               // Block till we're told to complete\n                                               completion_block.WaitForNotification();\n                                               shutdown_with_completion = true;\n                                               server_->dispatcher().post(completion_cb);\n                                               completion_done.Notify();\n                                             });\n    auto handle5 =\n        server_->registerCallback(ServerLifecycleNotifier::Stage::Startup, [&] { FAIL(); });\n    handle5 = server_->registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit,\n                                        [&](Event::PostCb) { FAIL(); });\n    handle5 = nullptr;\n\n    server_->run();\n    handle1 = nullptr;\n    handle2 = nullptr;\n    handle3 = nullptr;\n    handle4 = nullptr;\n    server_ = nullptr;\n    thread_local_ = nullptr;\n  });\n\n  started.WaitForNotification();\n  EXPECT_TRUE(startup);\n  EXPECT_FALSE(shutdown);\n  EXPECT_TRUE(TestUtility::findGauge(stats_store_, \"server.state\")->used());\n  EXPECT_EQ(0L, TestUtility::findGauge(stats_store_, \"server.state\")->value());\n\n  post_init_fired.WaitForNotification();\n  EXPECT_TRUE(post_init);\n  EXPECT_FALSE(shutdown);\n\n  server_->dispatcher().post([&] { server_->shutdown(); });\n  shutdown_begin.WaitForNotification();\n  EXPECT_TRUE(shutdown);\n\n  // Expect the server to block waiting for the completion callback to be invoked\n  EXPECT_FALSE(completion_done.WaitForNotificationWithTimeout(absl::Seconds(1)));\n\n  completion_block.Notify();\n  completion_done.WaitForNotification();\n  EXPECT_TRUE(shutdown_with_completion);\n\n  server_thread->join();\n}\n\n// A test target which never signals that it is ready.\nclass NeverReadyTarget : public Init::TargetImpl {\npublic:\n  NeverReadyTarget(absl::Notification& initialized)\n      : Init::TargetImpl(\"test\", [this] { initialize(); }), initialized_(initialized) {}\n\nprivate:\n  void initialize() { initialized_.Notify(); }\n\n  absl::Notification& initialized_;\n};\n\nTEST_P(ServerInstanceImplTest, NoLifecycleNotificationOnEarlyShutdown) {\n  absl::Notification initialized;\n\n  auto server_thread = Thread::threadFactoryForTest().createThread([&] {\n    initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n\n    // This shutdown notification should never be called because we will shutdown\n    // early before the init manager finishes initializing and therefore before\n    // the server starts worker threads.\n    auto shutdown_handle = server_->registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit,\n                                                     [&](Event::PostCb) { FAIL(); });\n    NeverReadyTarget target(initialized);\n    server_->initManager().add(target);\n    server_->run();\n\n    shutdown_handle = nullptr;\n    server_ = nullptr;\n    thread_local_ = nullptr;\n  });\n\n  // Wait until the init manager starts initializing targets...\n  initialized.WaitForNotification();\n\n  // Now shutdown the main dispatcher and trigger server lifecycle notifications.\n  server_->dispatcher().post([&] { server_->shutdown(); });\n  server_thread->join();\n}\n\nTEST_P(ServerInstanceImplTest, V2ConfigOnly) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  try {\n    initialize(\"test/server/test_data/server/unparseable_bootstrap.yaml\");\n    FAIL();\n  } catch (const EnvoyException& e) {\n    EXPECT_THAT(e.what(), HasSubstr(\"Unable to parse JSON as proto\"));\n  }\n}\n\nTEST_P(ServerInstanceImplTest, Stats) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  options_.concurrency_ = 2;\n  options_.hot_restart_epoch_ = 3;\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n  EXPECT_NE(nullptr, TestUtility::findCounter(stats_store_, \"main_thread.watchdog_miss\"));\n  EXPECT_NE(nullptr, TestUtility::findCounter(stats_store_, \"workers.watchdog_miss\"));\n  EXPECT_EQ(2L, TestUtility::findGauge(stats_store_, \"server.concurrency\")->value());\n  EXPECT_EQ(3L, TestUtility::findGauge(stats_store_, \"server.hot_restart_epoch\")->value());\n\n// The ENVOY_BUG stat works in release mode.\n#if defined(NDEBUG)\n  // Test exponential back-off on a fixed line ENVOY_BUG.\n  for (int i = 0; i < 16; i++) {\n    ENVOY_BUG(false, \"\");\n  }\n  EXPECT_EQ(5L, TestUtility::findCounter(stats_store_, \"server.envoy_bug_failures\")->value());\n  // Another ENVOY_BUG increments the counter.\n  ENVOY_BUG(false, \"Testing envoy bug assertion failure detection in release build.\");\n  EXPECT_EQ(6L, TestUtility::findCounter(stats_store_, \"server.envoy_bug_failures\")->value());\n#else\n  // The ENVOY_BUG macro aborts in debug mode.\n  EXPECT_DEATH(ENVOY_BUG(false, \"\"), \"\");\n#endif\n\n// The ASSERT stat only works in this configuration.\n#if defined(NDEBUG) && defined(ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE)\n  ASSERT(false, \"Testing debug assertion failure detection in release build.\");\n  EXPECT_EQ(1L, TestUtility::findCounter(stats_store_, \"server.debug_assertion_failures\")->value());\n#else\n  EXPECT_EQ(0L, TestUtility::findCounter(stats_store_, \"server.debug_assertion_failures\")->value());\n#endif\n}\n\nclass ServerStatsTest\n    : public Event::TestUsingSimulatedTime,\n      public ServerInstanceImplTestBase,\n      public testing::TestWithParam<std::tuple<Network::Address::IpVersion, bool>> {\nprotected:\n  ServerStatsTest() {\n    version_ = std::get<0>(GetParam());\n    manual_flush_ = std::get<1>(GetParam());\n  }\n\n  void flushStats() {\n    if (manual_flush_) {\n      server_->flushStats();\n      server_->dispatcher().run(Event::Dispatcher::RunType::Block);\n    } else {\n      // Default flush interval is 5 seconds.\n      simTime().advanceTimeAndRun(std::chrono::seconds(6), server_->dispatcher(),\n                                  Event::Dispatcher::RunType::Block);\n    }\n  }\n\n  bool manual_flush_;\n};\n\nstd::string ipFlushingModeTestParamsToString(\n    const ::testing::TestParamInfo<std::tuple<Network::Address::IpVersion, bool>>& params) {\n  return fmt::format(\n      \"{}_{}\",\n      TestUtility::ipTestParamsToString(\n          ::testing::TestParamInfo<Network::Address::IpVersion>(std::get<0>(params.param), 0)),\n      std::get<1>(params.param) ? \"with_manual_flush\" : \"with_time_based_flush\");\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    IpVersionsFlushingMode, ServerStatsTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool()),\n    ipFlushingModeTestParamsToString);\n\nTEST_P(ServerStatsTest, FlushStats) {\n  initialize(\"test/server/test_data/server/empty_bootstrap.yaml\");\n  Stats::Gauge& recent_lookups = stats_store_.gaugeFromString(\n      \"server.stats_recent_lookups\", Stats::Gauge::ImportMode::NeverImport);\n  EXPECT_EQ(0, recent_lookups.value());\n  flushStats();\n  uint64_t strobed_recent_lookups = recent_lookups.value();\n  EXPECT_LT(100, strobed_recent_lookups); // Recently this was 319 but exact value not important.\n  Stats::StatNameSetPtr test_set = stats_store_.symbolTable().makeSet(\"test\");\n\n  // When we remember a StatNameSet builtin, we charge only for the SymbolTable\n  // lookup, which requires a lock.\n  test_set->rememberBuiltin(\"a.b\");\n  flushStats();\n  EXPECT_EQ(1, recent_lookups.value() - strobed_recent_lookups);\n  strobed_recent_lookups = recent_lookups.value();\n\n  // When we create a dynamic stat, there are no locks taken.\n  Stats::StatNameDynamicStorage dynamic_stat(\"c.d\", stats_store_.symbolTable());\n  flushStats();\n  EXPECT_EQ(recent_lookups.value(), strobed_recent_lookups);\n}\n\n// Default validation mode\nTEST_P(ServerInstanceImplTest, ValidationDefault) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n  EXPECT_THAT_THROWS_MESSAGE(\n      server_->messageValidationContext().staticValidationVisitor().onUnknownField(\"foo\"),\n      EnvoyException, \"Protobuf message (foo) has unknown fields\");\n  EXPECT_EQ(0, TestUtility::findCounter(stats_store_, \"server.static_unknown_fields\")->value());\n  EXPECT_NO_THROW(\n      server_->messageValidationContext().dynamicValidationVisitor().onUnknownField(\"bar\"));\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"server.dynamic_unknown_fields\")->value());\n}\n\n// Validation mode with --allow-unknown-static-fields\nTEST_P(ServerInstanceImplTest, ValidationAllowStatic) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  options_.allow_unknown_static_fields_ = true;\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n  EXPECT_NO_THROW(\n      server_->messageValidationContext().staticValidationVisitor().onUnknownField(\"foo\"));\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"server.static_unknown_fields\")->value());\n  EXPECT_NO_THROW(\n      server_->messageValidationContext().dynamicValidationVisitor().onUnknownField(\"bar\"));\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"server.dynamic_unknown_fields\")->value());\n}\n\n// Validation mode with --reject-unknown-dynamic-fields\nTEST_P(ServerInstanceImplTest, ValidationRejectDynamic) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  options_.reject_unknown_dynamic_fields_ = true;\n  options_.ignore_unknown_dynamic_fields_ = true; // reject takes precedence over ignore\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n  EXPECT_THAT_THROWS_MESSAGE(\n      server_->messageValidationContext().staticValidationVisitor().onUnknownField(\"foo\"),\n      EnvoyException, \"Protobuf message (foo) has unknown fields\");\n  EXPECT_EQ(0, TestUtility::findCounter(stats_store_, \"server.static_unknown_fields\")->value());\n  EXPECT_THAT_THROWS_MESSAGE(\n      server_->messageValidationContext().dynamicValidationVisitor().onUnknownField(\"bar\"),\n      EnvoyException, \"Protobuf message (bar) has unknown fields\");\n  EXPECT_EQ(0, TestUtility::findCounter(stats_store_, \"server.dynamic_unknown_fields\")->value());\n}\n\n// Validation mode with --allow-unknown-static-fields --reject-unknown-dynamic-fields\nTEST_P(ServerInstanceImplTest, ValidationAllowStaticRejectDynamic) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  options_.allow_unknown_static_fields_ = true;\n  options_.reject_unknown_dynamic_fields_ = true;\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n  EXPECT_NO_THROW(\n      server_->messageValidationContext().staticValidationVisitor().onUnknownField(\"foo\"));\n  EXPECT_EQ(1, TestUtility::findCounter(stats_store_, \"server.static_unknown_fields\")->value());\n  EXPECT_THAT_THROWS_MESSAGE(\n      server_->messageValidationContext().dynamicValidationVisitor().onUnknownField(\"bar\"),\n      EnvoyException, \"Protobuf message (bar) has unknown fields\");\n  EXPECT_EQ(0, TestUtility::findCounter(stats_store_, \"server.dynamic_unknown_fields\")->value());\n}\n\n// Validate server localInfo() from bootstrap Node.\n// Deprecated testing of the envoy.api.v2.core.Node.build_version field\nTEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(BootstrapNodeDeprecated)) {\n  initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n  EXPECT_EQ(\"bootstrap_zone\", server_->localInfo().zoneName());\n  EXPECT_EQ(\"bootstrap_cluster\", server_->localInfo().clusterName());\n  EXPECT_EQ(\"bootstrap_id\", server_->localInfo().nodeName());\n  EXPECT_EQ(\"bootstrap_sub_zone\", server_->localInfo().node().locality().sub_zone());\n  EXPECT_EQ(VersionInfo::version(),\n            server_->localInfo().node().hidden_envoy_deprecated_build_version());\n  EXPECT_EQ(\"envoy\", server_->localInfo().node().user_agent_name());\n  EXPECT_TRUE(server_->localInfo().node().has_user_agent_build_version());\n  expectCorrectBuildVersion(server_->localInfo().node().user_agent_build_version());\n}\n\n// Validate server localInfo() from bootstrap Node.\nTEST_P(ServerInstanceImplTest, BootstrapNode) {\n  initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n  EXPECT_EQ(\"bootstrap_zone\", server_->localInfo().zoneName());\n  EXPECT_EQ(\"bootstrap_cluster\", server_->localInfo().clusterName());\n  EXPECT_EQ(\"bootstrap_id\", server_->localInfo().nodeName());\n  EXPECT_EQ(\"bootstrap_sub_zone\", server_->localInfo().node().locality().sub_zone());\n  EXPECT_EQ(\"envoy\", server_->localInfo().node().user_agent_name());\n  EXPECT_TRUE(server_->localInfo().node().has_user_agent_build_version());\n  expectCorrectBuildVersion(server_->localInfo().node().user_agent_build_version());\n}\n\n// Validate that bootstrap pb_text loads.\nTEST_P(ServerInstanceImplTest, LoadsBootstrapFromPbText) {\n  EXPECT_LOG_NOT_CONTAINS(\"trace\", \"Configuration does not parse cleanly as v3\",\n                          initialize(\"test/server/test_data/server/node_bootstrap.pb_text\"));\n  EXPECT_EQ(\"bootstrap_id\", server_->localInfo().node().id());\n}\n\n// Validate that bootstrap v2 pb_text with deprecated fields loads.\nTEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2BootstrapFromPbText)) {\n  EXPECT_LOG_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text\"));\n  EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty());\n}\n\n// Validate that bootstrap v2 YAML with deprecated fields loads.\nTEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2BootstrapFromYaml)) {\n  EXPECT_LOG_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml\"));\n  EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty());\n}\n\n// Validate that bootstrap v3 pb_text with new fields loads fails if V2 config is specified.\nTEST_P(ServerInstanceImplTest, FailToLoadV3ConfigWhenV2SelectedFromPbText) {\n  options_.bootstrap_version_ = 2;\n\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text\"),\n      EnvoyException, \"Unable to parse file\");\n}\n\n// Validate that bootstrap v3 YAML with new fields loads fails if V2 config is specified.\nTEST_P(ServerInstanceImplTest, FailToLoadV3ConfigWhenV2SelectedFromYaml) {\n  options_.bootstrap_version_ = 2;\n\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml\"),\n      EnvoyException, \"has unknown fields\");\n}\n\n// Validate that we correctly parse a V2 pb_text file when configured to do so.\nTEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2ConfigWhenV2SelectedFromPbText)) {\n  options_.bootstrap_version_ = 2;\n\n  EXPECT_LOG_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text\"));\n  EXPECT_EQ(server_->localInfo().node().id(), \"bootstrap_id\");\n}\n\n// Validate that we correctly parse a V2 YAML file when configured to do so.\nTEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2ConfigWhenV2SelectedFromYaml)) {\n  options_.bootstrap_version_ = 2;\n\n  EXPECT_LOG_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml\"));\n  EXPECT_EQ(server_->localInfo().node().id(), \"bootstrap_id\");\n}\n\n// Validate that we correctly parse a V3 pb_text file without explicit version configuration.\nTEST_P(ServerInstanceImplTest, LoadsV3ConfigFromPbText) {\n  EXPECT_LOG_NOT_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text\"));\n}\n\n// Validate that we correctly parse a V3 YAML file without explicit version configuration.\nTEST_P(ServerInstanceImplTest, LoadsV3ConfigFromYaml) {\n  EXPECT_LOG_NOT_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml\"));\n}\n\n// Validate that we correctly parse a V3 pb_text file when configured to do so.\nTEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV3SelectedFromPbText) {\n  options_.bootstrap_version_ = 3;\n\n  EXPECT_LOG_NOT_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text\"));\n}\n\n// Validate that we correctly parse a V3 YAML file when configured to do so.\nTEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV3SelectedFromYaml) {\n  options_.bootstrap_version_ = 3;\n\n  EXPECT_LOG_NOT_CONTAINS(\n      \"trace\", \"Configuration does not parse cleanly as v3\",\n      initialize(\"test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml\"));\n}\n\n// Validate that bootstrap v2 pb_text with deprecated fields loads fails if V3 config is specified.\nTEST_P(ServerInstanceImplTest, FailToLoadV2ConfigWhenV3SelectedFromPbText) {\n  options_.bootstrap_version_ = 3;\n\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text\"),\n      EnvoyException, \"Unable to parse file\");\n}\n\n// Validate that bootstrap v2 YAML with deprecated fields loads fails if V3 config is specified.\nTEST_P(ServerInstanceImplTest, FailToLoadV2ConfigWhenV3SelectedFromYaml) {\n  options_.bootstrap_version_ = 3;\n\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml\"),\n      EnvoyException, \"has unknown fields\");\n}\n\n// Validate that we blow up on invalid version number.\nTEST_P(ServerInstanceImplTest, InvalidBootstrapVersion) {\n  options_.bootstrap_version_ = 1;\n\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text\"),\n      EnvoyException, \"Unknown bootstrap version 1.\");\n}\n\nTEST_P(ServerInstanceImplTest, LoadsBootstrapFromConfigProtoOptions) {\n  options_.config_proto_.mutable_node()->set_id(\"foo\");\n  initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n  EXPECT_EQ(\"foo\", server_->localInfo().node().id());\n}\n\nTEST_P(ServerInstanceImplTest, LoadsBootstrapFromConfigYamlAfterConfigPath) {\n  options_.config_yaml_ = \"node:\\n  id: 'bar'\";\n  initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n  EXPECT_EQ(\"bar\", server_->localInfo().node().id());\n}\n\nTEST_P(ServerInstanceImplTest, LoadsBootstrapFromConfigProtoOptionsLast) {\n  options_.config_yaml_ = \"node:\\n  id: 'bar'\";\n  options_.config_proto_.mutable_node()->set_id(\"foo\");\n  initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n  EXPECT_EQ(\"foo\", server_->localInfo().node().id());\n}\n\n// Validate server localInfo() from bootstrap Node with CLI overrides.\nTEST_P(ServerInstanceImplTest, BootstrapNodeWithOptionsOverride) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  options_.service_zone_name_ = \"some_zone_name\";\n  initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n  EXPECT_EQ(\"some_zone_name\", server_->localInfo().zoneName());\n  EXPECT_EQ(\"some_cluster_name\", server_->localInfo().clusterName());\n  EXPECT_EQ(\"some_node_name\", server_->localInfo().nodeName());\n  EXPECT_EQ(\"bootstrap_sub_zone\", server_->localInfo().node().locality().sub_zone());\n}\n\n// Validate server runtime is parsed from bootstrap and that we can read from\n// service cluster specified disk-based overrides.\nTEST_P(ServerInstanceImplTest, BootstrapRuntime) {\n  options_.service_cluster_name_ = \"some_service\";\n  initialize(\"test/server/test_data/server/runtime_bootstrap.yaml\");\n  EXPECT_EQ(\"bar\", server_->runtime().snapshot().get(\"foo\").value().get());\n  // This should access via the override/some_service overlay.\n  EXPECT_EQ(\"fozz\", server_->runtime().snapshot().get(\"fizz\").value().get());\n}\n\n// Validate that a runtime absent an admin layer will fail mutating operations\n// but still support inspection of runtime values.\nTEST_P(ServerInstanceImplTest, RuntimeNoAdminLayer) {\n  options_.service_cluster_name_ = \"some_service\";\n  initialize(\"test/server/test_data/server/runtime_bootstrap.yaml\");\n  Http::TestResponseHeaderMapImpl response_headers;\n  std::string response_body;\n  EXPECT_EQ(Http::Code::OK,\n            server_->admin().request(\"/runtime\", \"GET\", response_headers, response_body));\n  EXPECT_THAT(response_body, HasSubstr(\"fozz\"));\n  EXPECT_EQ(\n      Http::Code::ServiceUnavailable,\n      server_->admin().request(\"/runtime_modify?foo=bar\", \"POST\", response_headers, response_body));\n  EXPECT_EQ(\"No admin layer specified\", response_body);\n}\n\n// Verify that bootstrap fails if RTDS is configured through an EDS cluster\nTEST_P(ServerInstanceImplTest, BootstrapRtdsThroughEdsFails) {\n  options_.service_cluster_name_ = \"some_service\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_THROW_WITH_REGEX(initialize(\"test/server/test_data/server/runtime_bootstrap_eds.yaml\"),\n                          EnvoyException, \"must have a statically defined non-EDS cluster\");\n}\n\n// Verify that bootstrap fails if RTDS is configured through an ADS using EDS cluster\nTEST_P(ServerInstanceImplTest, BootstrapRtdsThroughAdsViaEdsFails) {\n  options_.service_cluster_name_ = \"some_service\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_THROW_WITH_REGEX(initialize(\"test/server/test_data/server/runtime_bootstrap_ads_eds.yaml\"),\n                          EnvoyException, \"Unknown gRPC client cluster\");\n}\n\nTEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(InvalidLegacyBootstrapRuntime)) {\n  EXPECT_THROW_WITH_MESSAGE(\n      initialize(\"test/server/test_data/server/invalid_legacy_runtime_bootstrap.yaml\"),\n      EnvoyException, \"Invalid runtime entry value for foo\");\n}\n\n// Validate invalid runtime in bootstrap is rejected.\nTEST_P(ServerInstanceImplTest, InvalidBootstrapRuntime) {\n  EXPECT_THROW_WITH_MESSAGE(\n      initialize(\"test/server/test_data/server/invalid_runtime_bootstrap.yaml\"), EnvoyException,\n      \"Invalid runtime entry value for foo\");\n}\n\n// Validate invalid layered runtime missing a name is rejected.\nTEST_P(ServerInstanceImplTest, InvalidLayeredBootstrapMissingName) {\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/invalid_layered_runtime_missing_name.yaml\"),\n      EnvoyException, \"RuntimeLayerValidationError.Name: \\\\[\\\"value length must be at least\");\n}\n\n// Validate invalid layered runtime with duplicate names is rejected.\nTEST_P(ServerInstanceImplTest, InvalidLayeredBootstrapDuplicateName) {\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/invalid_layered_runtime_duplicate_name.yaml\"),\n      EnvoyException, \"Duplicate layer name: some_static_laye\");\n}\n\n// Validate invalid layered runtime with no layer specifier is rejected.\nTEST_P(ServerInstanceImplTest, InvalidLayeredBootstrapNoLayerSpecifier) {\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/invalid_layered_runtime_no_layer_specifier.yaml\"),\n      EnvoyException, \"BootstrapValidationError.LayeredRuntime\");\n}\n\n// Regression test for segfault when server initialization fails prior to\n// ClusterManager initialization.\nTEST_P(ServerInstanceImplTest, BootstrapClusterManagerInitializationFail) {\n  EXPECT_THROW_WITH_MESSAGE(initialize(\"test/server/test_data/server/cluster_dupe_bootstrap.yaml\"),\n                            EnvoyException, \"cluster manager: duplicate cluster 'service_google'\");\n}\n\n// Regression tests for SdsApi throwing exceptions in initialize().\nTEST_P(ServerInstanceImplTest, BadSdsConfigSource) {\n  EXPECT_THROW_WITH_MESSAGE(\n      initialize(\"test/server/test_data/server/bad_sds_config_source.yaml\"), EnvoyException,\n      \"envoy.config.core.v3.ApiConfigSource must have a statically defined non-EDS cluster: \"\n      \"'sds-grpc' does not exist, was added via api, or is an EDS cluster\");\n}\n\n// Test for protoc-gen-validate constraint on invalid timeout entry of a health check config entry.\nTEST_P(ServerInstanceImplTest, BootstrapClusterHealthCheckInvalidTimeout) {\n  EXPECT_THROW_WITH_REGEX(\n      initializeWithHealthCheckParams(\n          \"test/server/test_data/server/cluster_health_check_bootstrap.yaml\", 0, 0.25),\n      EnvoyException,\n      \"HealthCheckValidationError.Timeout: \\\\[\\\"value must be greater than \\\" \\\"0s\\\"\\\\]\");\n}\n\n// Test for protoc-gen-validate constraint on invalid interval entry of a health check config entry.\nTEST_P(ServerInstanceImplTest, BootstrapClusterHealthCheckInvalidInterval) {\n  EXPECT_THROW_WITH_REGEX(\n      initializeWithHealthCheckParams(\n          \"test/server/test_data/server/cluster_health_check_bootstrap.yaml\", 0.5, 0),\n      EnvoyException,\n      \"HealthCheckValidationError.Interval: \\\\[\\\"value must be greater than \\\" \\\"0s\\\"\\\\]\");\n}\n\n// Test for protoc-gen-validate constraint on invalid timeout and interval entry of a health check\n// config entry.\nTEST_P(ServerInstanceImplTest, BootstrapClusterHealthCheckInvalidTimeoutAndInterval) {\n  EXPECT_THROW_WITH_REGEX(\n      initializeWithHealthCheckParams(\n          \"test/server/test_data/server/cluster_health_check_bootstrap.yaml\", 0, 0),\n      EnvoyException,\n      \"HealthCheckValidationError.Timeout: \\\\[\\\"value must be greater than \\\" \\\"0s\\\"\\\\]\");\n}\n\n// Test for protoc-gen-validate constraint on valid interval entry of a health check config entry.\nTEST_P(ServerInstanceImplTest, BootstrapClusterHealthCheckValidTimeoutAndInterval) {\n  EXPECT_NO_THROW(initializeWithHealthCheckParams(\n      \"test/server/test_data/server/cluster_health_check_bootstrap.yaml\", 0.25, 0.5));\n}\n\n// Test that a Bootstrap proto with no address specified in its Admin field can go through\n// initialization properly, but without starting an admin listener.\nTEST_P(ServerInstanceImplTest, BootstrapNodeNoAdmin) {\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/node_bootstrap_no_admin_port.yaml\"));\n  // Admin::addListenerToHandler() calls one of handler's methods after checking that the Admin\n  // has a listener. So, the fact that passing a nullptr doesn't cause a segfault establishes\n  // that there is no listener.\n  server_->admin().addListenerToHandler(/*handler=*/nullptr);\n}\n\n// Validate that an admin config with a server address but no access log path is rejected.\nTEST_P(ServerInstanceImplTest, BootstrapNodeWithoutAccessLog) {\n  EXPECT_THROW_WITH_MESSAGE(\n      initialize(\"test/server/test_data/server/node_bootstrap_without_access_log.yaml\"),\n      EnvoyException, \"An admin access log path is required for a listening server.\");\n}\n\nnamespace {\nvoid bindAndListenTcpSocket(const Network::Address::InstanceConstSharedPtr& address,\n                            const Network::Socket::OptionsSharedPtr& options) {\n  auto socket = std::make_unique<Network::TcpListenSocket>(address, options, true);\n  // Some kernels erroneously allow `bind` without SO_REUSEPORT for addresses\n  // with some other socket already listening on it, see #7636.\n  if (SOCKET_FAILURE(socket->ioHandle().listen(1).rc_)) {\n    // Mimic bind exception for the test simplicity.\n    throw Network::SocketBindException(fmt::format(\"cannot listen: {}\", errorDetails(errno)),\n                                       errno);\n  }\n}\n} // namespace\n\n// Test that `socket_options` field in an Admin proto is honored.\nTEST_P(ServerInstanceImplTest, BootstrapNodeWithSocketOptions) {\n  // Start Envoy instance with admin port with SO_REUSEPORT option.\n  ASSERT_NO_THROW(\n      initialize(\"test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml\"));\n  const auto address = server_->admin().socket().localAddress();\n\n  // First attempt to bind and listen socket should fail due to the lack of SO_REUSEPORT socket\n  // options.\n  EXPECT_THAT_THROWS_MESSAGE(bindAndListenTcpSocket(address, nullptr), EnvoyException,\n                             HasSubstr(errorDetails(SOCKET_ERROR_ADDR_IN_USE)));\n\n  // Second attempt should succeed as kernel allows multiple sockets to listen the same address iff\n  // both of them use SO_REUSEPORT socket option.\n  auto options = std::make_shared<Network::Socket::Options>();\n  options->emplace_back(std::make_shared<Network::SocketOptionImpl>(\n      envoy::config::core::v3::SocketOption::STATE_PREBIND,\n      ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_REUSEPORT), 1));\n  EXPECT_NO_THROW(bindAndListenTcpSocket(address, options));\n}\n\n// Empty bootstrap succeeds.\nTEST_P(ServerInstanceImplTest, EmptyBootstrap) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n}\n\n// Custom header bootstrap succeeds.\nTEST_P(ServerInstanceImplTest, CustomHeaderBootstrap) {\n  options_.config_path_ = TestEnvironment::writeStringToFileForTest(\n      \"custom.yaml\", \"header_prefix: \\\"x-envoy\\\"\\nstatic_resources:\\n\");\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_NO_THROW(initialize(options_.config_path_));\n}\n\n// Negative test for protoc-gen-validate constraints.\nTEST_P(ServerInstanceImplTest, ValidateFail) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  try {\n    initialize(\"test/server/test_data/server/invalid_bootstrap.yaml\");\n    FAIL();\n  } catch (const EnvoyException& e) {\n    EXPECT_THAT(e.what(), HasSubstr(\"Proto constraint validation failed\"));\n  }\n}\n\nTEST_P(ServerInstanceImplTest, LogToFile) {\n  const std::string path =\n      TestEnvironment::temporaryPath(\"ServerInstanceImplTest_LogToFile_Test.log\");\n  options_.log_path_ = path;\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n  EXPECT_TRUE(server_->api().fileSystem().fileExists(path));\n\n  GET_MISC_LOGGER().set_level(spdlog::level::info);\n  ENVOY_LOG_MISC(warn, \"LogToFile test string\");\n  Logger::Registry::getSink()->flush();\n  std::string log = server_->api().fileSystem().fileReadToEnd(path);\n  EXPECT_GT(log.size(), 0);\n  EXPECT_TRUE(log.find(\"LogToFile test string\") != std::string::npos);\n\n  // Test that critical messages get immediately flushed\n  ENVOY_LOG_MISC(critical, \"LogToFile second test string\");\n  log = server_->api().fileSystem().fileReadToEnd(path);\n  EXPECT_TRUE(log.find(\"LogToFile second test string\") != std::string::npos);\n}\n\nTEST_P(ServerInstanceImplTest, LogToFileError) {\n  options_.log_path_ = \"/this/path/does/not/exist\";\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  try {\n    initialize(\"test/server/test_data/server/empty_bootstrap.yaml\");\n    FAIL();\n  } catch (const EnvoyException& e) {\n    EXPECT_THAT(e.what(), HasSubstr(\"Failed to open log-file\"));\n  }\n}\n\n// When there are no bootstrap CLI options, either for content or path, we can load the server with\n// an empty config.\nTEST_P(ServerInstanceImplTest, NoOptionsPassed) {\n  thread_local_ = std::make_unique<ThreadLocal::InstanceImpl>();\n  init_manager_ = std::make_unique<Init::ManagerImpl>(\"Server\");\n  EXPECT_THROW_WITH_MESSAGE(\n      server_.reset(new InstanceImpl(*init_manager_, options_, time_system_,\n                                     std::make_shared<Network::Address::Ipv4Instance>(\"127.0.0.1\"),\n                                     hooks_, restart_, stats_store_, fakelock_, component_factory_,\n                                     std::make_unique<NiceMock<Random::MockRandomGenerator>>(),\n                                     *thread_local_, Thread::threadFactoryForTest(),\n                                     Filesystem::fileSystemForTest(), nullptr)),\n      EnvoyException,\n      \"At least one of --config-path or --config-yaml or Options::configProto() should be \"\n      \"non-empty\");\n}\n\n// Validate that when std::exception is unexpectedly thrown, we exit safely.\n// This is a regression test for when we used to crash.\nTEST_P(ServerInstanceImplTest, StdExceptionThrowInConstructor) {\n  EXPECT_CALL(restart_, initialize(_, _)).WillOnce(InvokeWithoutArgs([] {\n    throw(std::runtime_error(\"foobar\"));\n  }));\n  EXPECT_THROW_WITH_MESSAGE(initialize(\"test/server/test_data/server/node_bootstrap.yaml\"),\n                            std::runtime_error, \"foobar\");\n}\n\n// Neither EnvoyException nor std::exception derived.\nclass FakeException {\npublic:\n  FakeException(const std::string& what) : what_(what) {}\n  const std::string& what() const { return what_; }\n\n  const std::string what_;\n};\n\n// Validate that when a totally unknown exception is unexpectedly thrown, we\n// exit safely. This is a regression test for when we used to crash.\nTEST_P(ServerInstanceImplTest, UnknownExceptionThrowInConstructor) {\n  EXPECT_CALL(restart_, initialize(_, _)).WillOnce(InvokeWithoutArgs([] {\n    throw(FakeException(\"foobar\"));\n  }));\n  EXPECT_THROW_WITH_MESSAGE(initialize(\"test/server/test_data/server/node_bootstrap.yaml\"),\n                            FakeException, \"foobar\");\n}\n\nTEST_P(ServerInstanceImplTest, MutexContentionEnabled) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  options_.mutex_tracing_enabled_ = true;\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n}\n\nTEST_P(ServerInstanceImplTest, NoHttpTracing) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n  EXPECT_THAT(envoy::config::trace::v3::Tracing{},\n              ProtoEq(server_->httpContext().defaultTracingConfig()));\n}\n\nTEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(ZipkinHttpTracingEnabled)) {\n  options_.service_cluster_name_ = \"some_cluster_name\";\n  options_.service_node_name_ = \"some_node_name\";\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/zipkin_tracing_deprecated_config.yaml\"));\n  EXPECT_EQ(\"zipkin\", server_->httpContext().defaultTracingConfig().http().name());\n}\n\nclass TestObject : public ProcessObject {\npublic:\n  void setFlag(bool value) { boolean_flag_ = value; }\n\n  bool boolean_flag_ = true;\n};\n\nTEST_P(ServerInstanceImplTest, WithProcessContext) {\n  TestObject object;\n  process_object_ = &object;\n\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/empty_bootstrap.yaml\"));\n\n  auto context = server_->processContext();\n  auto& object_from_context = dynamic_cast<TestObject&>(context->get().get());\n  EXPECT_EQ(&object_from_context, &object);\n  EXPECT_TRUE(object_from_context.boolean_flag_);\n\n  object.boolean_flag_ = false;\n  EXPECT_FALSE(object_from_context.boolean_flag_);\n}\n\nclass FooBootstrapExtension : public BootstrapExtension {};\n\nTEST_P(ServerInstanceImplTest, WithBootstrapExtensions) {\n  NiceMock<Configuration::MockBootstrapExtensionFactory> mock_factory;\n  EXPECT_CALL(mock_factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() {\n    return std::make_unique<test::common::config::DummyConfig>();\n  }));\n  EXPECT_CALL(mock_factory, name()).WillRepeatedly(Return(\"envoy_test.bootstrap.foo\"));\n  EXPECT_CALL(mock_factory, createBootstrapExtension(_, _))\n      .WillOnce(Invoke([](const Protobuf::Message& config, Configuration::ServerFactoryContext&) {\n        const auto* proto = dynamic_cast<const test::common::config::DummyConfig*>(&config);\n        EXPECT_NE(nullptr, proto);\n        EXPECT_EQ(proto->a(), \"foo\");\n        return std::make_unique<FooBootstrapExtension>();\n      }));\n\n  Registry::InjectFactory<Configuration::BootstrapExtensionFactory> registered_factory(\n      mock_factory);\n\n  EXPECT_NO_THROW(initialize(\"test/server/test_data/server/bootstrap_extensions.yaml\"));\n}\n\nTEST_P(ServerInstanceImplTest, WithBootstrapExtensionsThrowingError) {\n  NiceMock<Configuration::MockBootstrapExtensionFactory> mock_factory;\n  EXPECT_CALL(mock_factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() {\n    return std::make_unique<test::common::config::DummyConfig>();\n  }));\n  EXPECT_CALL(mock_factory, name()).WillRepeatedly(Return(\"envoy_test.bootstrap.foo\"));\n  EXPECT_CALL(mock_factory, createBootstrapExtension(_, _))\n      .WillOnce(Invoke([](const Protobuf::Message&,\n                          Configuration::ServerFactoryContext&) -> BootstrapExtensionPtr {\n        throw EnvoyException(\"Unable to initiate mock_bootstrap_extension.\");\n      }));\n\n  Registry::InjectFactory<Configuration::BootstrapExtensionFactory> registered_factory(\n      mock_factory);\n\n  EXPECT_THROW_WITH_REGEX(initialize(\"test/server/test_data/server/bootstrap_extensions.yaml\"),\n                          EnvoyException, \"Unable to initiate mock_bootstrap_extension.\");\n}\n\nTEST_P(ServerInstanceImplTest, WithUnknownBootstrapExtensions) {\n  EXPECT_THROW_WITH_REGEX(\n      initialize(\"test/server/test_data/server/bootstrap_extensions.yaml\"), EnvoyException,\n      \"Didn't find a registered implementation for name: 'envoy_test.bootstrap.foo'\");\n}\n\n// Static configuration validation. We test with both allow/reject settings various aspects of\n// configuration from YAML.\nclass StaticValidationTest\n    : public ServerInstanceImplTestBase,\n      public testing::TestWithParam<std::tuple<Network::Address::IpVersion, bool>> {\nprotected:\n  StaticValidationTest() {\n    version_ = std::get<0>(GetParam());\n    options_.service_cluster_name_ = \"some_cluster_name\";\n    options_.service_node_name_ = \"some_node_name\";\n    options_.allow_unknown_static_fields_ = std::get<1>(GetParam());\n    // By inverting the static validation value, we can hopefully catch places we may have confused\n    // static/dynamic validation.\n    options_.reject_unknown_dynamic_fields_ = options_.allow_unknown_static_fields_;\n  }\n\n  AssertionResult validate(absl::string_view yaml_filename) {\n    const std::string path =\n        absl::StrCat(\"test/server/test_data/static_validation/\", yaml_filename);\n    try {\n      initialize(path);\n    } catch (EnvoyException&) {\n      return options_.allow_unknown_static_fields_ ? AssertionFailure() : AssertionSuccess();\n    }\n    return options_.allow_unknown_static_fields_ ? AssertionSuccess() : AssertionFailure();\n  }\n};\n\nstd::string staticValidationTestParamsToString(\n    const ::testing::TestParamInfo<std::tuple<Network::Address::IpVersion, bool>>& params) {\n  return fmt::format(\n      \"{}_{}\",\n      TestUtility::ipTestParamsToString(\n          ::testing::TestParamInfo<Network::Address::IpVersion>(std::get<0>(params.param), 0)),\n      std::get<1>(params.param) ? \"with_allow_unknown_static_fields\"\n                                : \"without_allow_unknown_static_fields\");\n}\n\nINSTANTIATE_TEST_SUITE_P(\n    IpVersions, StaticValidationTest,\n    testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool()),\n    staticValidationTestParamsToString);\n\nTEST_P(StaticValidationTest, BootstrapUnknownField) {\n  EXPECT_TRUE(validate(\"bootstrap_unknown_field.yaml\"));\n}\n\nTEST_P(StaticValidationTest, ListenerUnknownField) {\n  EXPECT_TRUE(validate(\"listener_unknown_field.yaml\"));\n}\n\nTEST_P(StaticValidationTest, NetworkFilterUnknownField) {\n  EXPECT_TRUE(validate(\"network_filter_unknown_field.yaml\"));\n}\n\nTEST_P(StaticValidationTest, ClusterUnknownField) {\n  EXPECT_TRUE(validate(\"cluster_unknown_field.yaml\"));\n}\n\n// Custom StatsSink that registers both a Cluster update callback and Server lifecycle callback.\nclass CallbacksStatsSink : public Stats::Sink, public Upstream::ClusterUpdateCallbacks {\npublic:\n  CallbacksStatsSink(Server::Configuration::ServerFactoryContext& server)\n      : cluster_removal_cb_handle_(\n            server.clusterManager().addThreadLocalClusterUpdateCallbacks(*this)),\n        lifecycle_cb_handle_(server.lifecycleNotifier().registerCallback(\n            ServerLifecycleNotifier::Stage::ShutdownExit,\n            [this]() { cluster_removal_cb_handle_.reset(); })) {}\n\n  // Stats::Sink\n  void flush(Stats::MetricSnapshot&) override {}\n  void onHistogramComplete(const Stats::Histogram&, uint64_t) override {}\n\n  // Upstream::ClusterUpdateCallbacks\n  void onClusterAddOrUpdate(Upstream::ThreadLocalCluster&) override {}\n  void onClusterRemoval(const std::string&) override {}\n\nprivate:\n  Upstream::ClusterUpdateCallbacksHandlePtr cluster_removal_cb_handle_;\n  ServerLifecycleNotifier::HandlePtr lifecycle_cb_handle_;\n};\n\nclass CallbacksStatsSinkFactory : public Server::Configuration::StatsSinkFactory {\npublic:\n  // StatsSinkFactory\n  Stats::SinkPtr createStatsSink(const Protobuf::Message&,\n                                 Server::Configuration::ServerFactoryContext& server) override {\n    return std::make_unique<CallbacksStatsSink>(server);\n  }\n\n  ProtobufTypes::MessagePtr createEmptyConfigProto() override {\n    // Using Struct instead of a custom per-filter empty config proto\n    // This is only allowed in tests.\n    return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};\n  }\n\n  std::string name() const override { return \"envoy.callbacks_stats_sink\"; }\n};\n\n// This test ensures that a stats sink can use cluster update callbacks properly. Using only a\n// cluster update callback is insufficient to protect against double-free bugs, so a server\n// lifecycle callback is also used to ensure that the cluster update callback is freed during\n// Server::Instance's destruction. See issue #9292 for more details.\nTEST_P(ServerInstanceImplTest, CallbacksStatsSinkTest) {\n  CallbacksStatsSinkFactory factory;\n  Registry::InjectFactory<Server::Configuration::StatsSinkFactory> registered(factory);\n\n  initialize(\"test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml\");\n  // Necessary to trigger server lifecycle callbacks, otherwise only terminate() is called.\n  server_->shutdown();\n}\n\n// Validate that disabled extension is reflected in the list of Node extensions.\nTEST_P(ServerInstanceImplTest, DisabledExtension) {\n  OptionsImpl::disableExtensions({\"envoy.filters.http/envoy.filters.http.buffer\"});\n  initialize(\"test/server/test_data/server/node_bootstrap.yaml\");\n  bool disabled_filter_found = false;\n  for (const auto& extension : server_->localInfo().node().extensions()) {\n    // TODO(zuercher): remove envoy.buffer when old-style name deprecation is completed.\n    if (extension.category() == \"envoy.filters.http\" &&\n        (extension.name() == \"envoy.filters.http.buffer\" || extension.name() == \"envoy.buffer\")) {\n      ASSERT_TRUE(extension.disabled());\n      disabled_filter_found = true;\n    } else {\n      ASSERT_FALSE(extension.disabled());\n    }\n  }\n  ASSERT_TRUE(disabled_filter_found);\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/ssl_context_manager_test.cc",
    "content": "#include \"server/ssl_context_manager.h\"\n\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nTEST(SslContextManager, createStub) {\n  Event::SimulatedTimeSystem time_system;\n  Stats::MockStore scope;\n  Ssl::MockClientContextConfig client_config;\n  Ssl::MockServerContextConfig server_config;\n  std::vector<std::string> server_names;\n\n  Ssl::ContextManagerPtr manager = createContextManager(\"fake_factory_name\", time_system);\n\n  // Check we've created a stub, not real manager.\n  EXPECT_EQ(manager->daysUntilFirstCertExpires(), std::numeric_limits<int>::max());\n  EXPECT_EQ(manager->secondsUntilFirstOcspResponseExpires(), absl::nullopt);\n  EXPECT_THROW_WITH_MESSAGE(manager->createSslClientContext(scope, client_config), EnvoyException,\n                            \"SSL is not supported in this configuration\");\n  EXPECT_THROW_WITH_MESSAGE(manager->createSslServerContext(scope, server_config, server_names),\n                            EnvoyException, \"SSL is not supported in this configuration\");\n  EXPECT_NO_THROW(manager->iterateContexts([](const Envoy::Ssl::Context&) -> void {}));\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/test_data/runtime/override/some_service/fizz",
    "content": "fozz\n"
  },
  {
    "path": "test/server/test_data/runtime/primary/fizz",
    "content": "buzz\n"
  },
  {
    "path": "test/server/test_data/server/bad_sds_config_source.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\nstatic_resources:\n  clusters:\n  - name: xds-grpc\n    connect_timeout: 0.25s\n    type: STRICT_DNS\n    connect_timeout: 1s\n    load_assignment:\n      cluster_name: xds-grpc\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: localhost\n                port_value: 12345\n    transport_socket:\n      name: \"envoy.transport_sockets.tls\"\n      typed_config:\n        \"@type\": \"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\"\n        common_tls_context:\n          tls_certificate_sds_secret_configs:\n          - name: default\n            sds_config:\n              api_config_source:\n                api_type: GRPC\n                grpc_services:\n                  envoy_grpc:\n                    cluster_name: \"sds-grpc\"\n          validation_context: {}\n"
  },
  {
    "path": "test/server/test_data/server/bootstrap_extensions.yaml",
    "content": "bootstrap_extensions:\n  - name: envoy_test.bootstrap.foo\n    typed_config:\n      \"@type\": type.googleapis.com/test.common.config.DummyConfig\n      a: foo\n"
  },
  {
    "path": "test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\n  locality:\n    zone: bootstrap_zone\n    sub_zone: bootstrap_sub_zone\nadmin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\nstats_sinks:\n- name: envoy.callbacks_stats_sink\nstats_flush_interval: 1s\n"
  },
  {
    "path": "test/server/test_data/server/cluster_dupe_bootstrap.yaml",
    "content": "admin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\nstatic_resources:\n  clusters:\n  - name: service_google\n    connect_timeout: 0.25s\n  - name: service_google\n    connect_timeout: 0.25s\n"
  },
  {
    "path": "test/server/test_data/server/cluster_health_check_bootstrap.yaml",
    "content": "admin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\nstatic_resources:\n  clusters:\n  - name: service_google\n    connect_timeout: 0.25s\n    health_checks:\n      - timeout: {{ health_check_timeout }}s\n        interval: {{ health_check_interval }}s\n        unhealthy_threshold: 1\n        healthy_threshold: 1\n        http_health_check:\n          path: \"/\"\n"
  },
  {
    "path": "test/server/test_data/server/empty_bootstrap.yaml",
    "content": "static_resources:\n"
  },
  {
    "path": "test/server/test_data/server/invalid_bootstrap.yaml",
    "content": "static_resources:\n  clusters:\n  - name:\n"
  },
  {
    "path": "test/server/test_data/server/invalid_layered_runtime_duplicate_name.yaml",
    "content": "layered_runtime:\n  layers:\n    - name: some_static_layer\n      static_layer:\n        foo: bar\n    - name: some_static_layer\n      static_layer:\n        foo: baz\n"
  },
  {
    "path": "test/server/test_data/server/invalid_layered_runtime_missing_name.yaml",
    "content": "layered_runtime:\n  layers:\n    - static_layer:\n        foo: bar\n"
  },
  {
    "path": "test/server/test_data/server/invalid_layered_runtime_no_layer_specifier.yaml",
    "content": "layered_runtime:\n  layers:\n    - name: \"foo\"\n"
  },
  {
    "path": "test/server/test_data/server/invalid_legacy_runtime_bootstrap.yaml",
    "content": "runtime:\n  base:\n    foo:\n    - bar: baz\n"
  },
  {
    "path": "test/server/test_data/server/invalid_runtime_bootstrap.yaml",
    "content": "layered_runtime:\n  layers:\n    - name: some_static_layer\n      static_layer:\n        foo:\n        - bar: baz\n"
  },
  {
    "path": "test/server/test_data/server/node_bootstrap.pb_text",
    "content": "node {\n  id: \"bootstrap_id\"\n  cluster: \"bootstrap_cluster\"\n  locality {\n    zone: \"bootstrap_zone\"\n    sub_zone: \"bootstrap_sub_zone\"\n  }\n}\nadmin {\n  access_log_path: \"{{ null_device_path }}\"\n  address {\n    socket_address {\n      address: \"{{ ntop_ip_loopback_address }}\"\n      port_value: 0\n    }\n  }\n}\n"
  },
  {
    "path": "test/server/test_data/server/node_bootstrap.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\n  locality:\n    zone: bootstrap_zone\n    sub_zone: bootstrap_sub_zone\nadmin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n"
  },
  {
    "path": "test/server/test_data/server/node_bootstrap_no_admin_port.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\n  locality:\n    zone: bootstrap_zone\n    sub_zone: bootstrap_sub_zone\nadmin:\n  access_log_path: {{ null_device_path }}\n"
  },
  {
    "path": "test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\n  locality:\n    zone: bootstrap_zone\n    sub_zone: bootstrap_sub_zone\nadmin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n  socket_options:\n  - description: SO_REUSEPORT\n    level: {{ sol_socket }}\n    name: {{ so_reuseport }}\n    int_value: 1\n    state: STATE_PREBIND\n"
  },
  {
    "path": "test/server/test_data/server/node_bootstrap_without_access_log.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\n  locality:\n    zone: bootstrap_zone\n    sub_zone: bootstrap_sub_zone\nadmin:\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\n\n"
  },
  {
    "path": "test/server/test_data/server/proxy_version_bootstrap.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\n  locality:\n    zone: bootstrap_zone\n    sub_zone: bootstrap_sub_zone\nadmin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\nstats_server_version_override: 100012001\n"
  },
  {
    "path": "test/server/test_data/server/runtime_bootstrap.yaml",
    "content": "layered_runtime:\n  layers:\n    - name: some_static_layer\n      static_layer:\n        foo: bar\n    - name: base_disk_layer\n      disk_layer: { symlink_root: {{ test_rundir }}/test/server/test_data/runtime/primary }\n    - name: overlay_disk_layer\n      disk_layer: { symlink_root: {{ test_rundir }}/test/server/test_data/runtime/override, append_service_cluster: true }\n"
  },
  {
    "path": "test/server/test_data/server/runtime_bootstrap_ads_eds.yaml",
    "content": "static_resources:\n  clusters:\n  - name: dummy_cluster\n    connect_timeout: 1s\n    load_assignment:\n      cluster_name: dummy_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {{ ntop_ip_loopback_address }}\n                port_value: 0\n  - name: ads_cluster\n    connect_timeout: 1s\n    type: EDS\n    eds_cluster_config:\n      eds_config:\n        api_config_source:\n          api_type: GRPC\n          grpc_services:\n            envoy_grpc:\n              cluster_name: \"dummy_cluster\"\ndynamic_resources:\n  ads_config:\n    api_type: GRPC\n    grpc_services:\n      envoy_grpc:\n        cluster_name: ads_cluster\n    set_node_on_first_message_only: true\nlayered_runtime:\n  layers:\n    - name: foobar\n      rtds_layer:\n        name: foobar\n        rtds_config:\n          ads: {}\n                \n"
  },
  {
    "path": "test/server/test_data/server/runtime_bootstrap_eds.yaml",
    "content": "static_resources:\n  clusters:\n  - name: dummy_cluster\n    connect_timeout: 1s\n    load_assignment:\n      cluster_name: dummy_cluster\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: {{ ntop_ip_loopback_address }}\n                port_value: 0\n  - name: rtds_cluster\n    connect_timeout: 1s\n    type: EDS\n    eds_cluster_config:\n      eds_config:\n        api_config_source:\n          api_type: GRPC\n          grpc_services:\n            envoy_grpc:\n              cluster_name: \"dummy_cluster\"\nlayered_runtime:\n  layers:\n    - name: foobar\n      rtds_layer:\n        name: foobar\n        rtds_config:\n          api_config_source:\n            api_type: GRPC\n            grpc_services:\n              envoy_grpc:\n                cluster_name: rtds_cluster\n                \n"
  },
  {
    "path": "test/server/test_data/server/stats_sink_bootstrap.yaml",
    "content": "node:\n  id: bootstrap_id\n  cluster: bootstrap_cluster\n  locality:\n    zone: bootstrap_zone\n    sub_zone: bootstrap_sub_zone\nadmin:\n  access_log_path: {{ null_device_path }}\n  address:\n    socket_address:\n      address: {{ ntop_ip_loopback_address }}\n      port_value: 0\nstats_sinks:\n- name: envoy.custom_stats_sink\nstats_flush_interval: 1s\n"
  },
  {
    "path": "test/server/test_data/server/unparseable_bootstrap.yaml",
    "content": "- foo: bar\n"
  },
  {
    "path": "test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text",
    "content": "node {\n  id: \"bootstrap_id\"\n  build_version: \"foo\"\n}\n"
  },
  {
    "path": "test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml",
    "content": "node:\n  id: \"bootstrap_id\"\n  build_version: \"foo\"\n"
  },
  {
    "path": "test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text",
    "content": "static_resources {\n  clusters {\n      name: \"cluster\"\n      ignore_health_on_host_removal: true\n      connect_timeout {\n        seconds: 1\n      }\n  }\n}\n"
  },
  {
    "path": "test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml",
    "content": "static_resources:\n  clusters :\n    name: \"cluster\"\n    ignore_health_on_host_removal: true\n    connect_timeout: 1s\n"
  },
  {
    "path": "test/server/test_data/server/watchdogs_bootstrap_with_deprecated_field.yaml",
    "content": "watchdogs:\n  main_thread_watchdog:\n    miss_timeout: 1s\n  worker_watchdog:\n    miss_timeout: 0.5s\nwatchdog:\n  miss_timeout: 1s\n"
  },
  {
    "path": "test/server/test_data/server/zipkin_tracing_deprecated_config.yaml",
    "content": "static_resources:\n  clusters:\n  - name: zipkin\n    connect_timeout: 1s\ntracing:\n  http:\n    name: zipkin\n    typed_config:\n      \"@type\": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig\n      collector_cluster: zipkin\n      collector_endpoint: \"/api/v1/spans\"\n      collector_endpoint_version: HTTP_JSON\n"
  },
  {
    "path": "test/server/test_data/static_validation/bootstrap_unknown_field.yaml",
    "content": "foo: bar\n"
  },
  {
    "path": "test/server/test_data/static_validation/cluster_unknown_field.yaml",
    "content": "static_resources:\n  clusters:\n    name: foo\n    connect_timeout: { seconds: 5 }\n    foo: bar\n"
  },
  {
    "path": "test/server/test_data/static_validation/listener_unknown_field.yaml",
    "content": "static_resources:\n  listeners:\n    name: foo\n    address:\n      socket_address:\n        address: {{ ntop_ip_loopback_address }}\n        port_value: 0\n    foo: bar\n    filter_chains:\n    - filters:\n"
  },
  {
    "path": "test/server/test_data/static_validation/network_filter_unknown_field.yaml",
    "content": "static_resources:\n  listeners:\n    name: foo\n    address:\n      socket_address:\n        address: {{ ntop_ip_loopback_address }}\n        port_value: 0\n    filter_chains:\n    - filters:\n      - name: http\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager\n          codec_type: HTTP2\n          stat_prefix: blah\n          route_config: {}\n          foo: bar\n"
  },
  {
    "path": "test/server/utility.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/config/listener/v3/listener.pb.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\ninline envoy::config::listener::v3::Listener parseListenerFromV3Yaml(const std::string& yaml,\n                                                                     bool avoid_boosting = true) {\n  envoy::config::listener::v3::Listener listener;\n  TestUtility::loadFromYaml(yaml, listener, true, avoid_boosting);\n  return listener;\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/server/worker_impl_test.cc",
    "content": "#include \"envoy/network/exception.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/event/dispatcher_impl.h\"\n\n#include \"server/worker_impl.h\"\n\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/guard_dog.h\"\n#include \"test/mocks/server/instance.h\"\n#include \"test/mocks/server/overload_manager.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::InSequence;\nusing testing::Invoke;\nusing testing::InvokeWithoutArgs;\nusing testing::NiceMock;\nusing testing::Return;\nusing testing::Throw;\n\nnamespace Envoy {\nnamespace Server {\nnamespace {\n\nclass WorkerImplTest : public testing::Test {\npublic:\n  WorkerImplTest()\n      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher(\"worker_test\")),\n        no_exit_timer_(dispatcher_->createTimer([]() -> void {})),\n        worker_(tls_, hooks_, std::move(dispatcher_), Network::ConnectionHandlerPtr{handler_},\n                overload_manager_, *api_) {\n    // In the real worker the watchdog has timers that prevent exit. Here we need to prevent event\n    // loop exit since we use mock timers.\n    no_exit_timer_->enableTimer(std::chrono::hours(1));\n  }\n\n  ~WorkerImplTest() override {\n    // We init no_exit_timer_ before worker_ because the dispatcher will be\n    // moved into the worker. However we need to destruct no_exit_timer_ before\n    // destructing the worker, otherwise the timer will outlive its dispatcher.\n    no_exit_timer_.reset();\n  }\n\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  Network::MockConnectionHandler* handler_ = new Network::MockConnectionHandler();\n  NiceMock<MockGuardDog> guard_dog_;\n  NiceMock<MockOverloadManager> overload_manager_;\n  Api::ApiPtr api_;\n  Event::DispatcherPtr dispatcher_;\n  DefaultListenerHooks hooks_;\n  Event::TimerPtr no_exit_timer_;\n  WorkerImpl worker_;\n};\n\nTEST_F(WorkerImplTest, BasicFlow) {\n  InSequence s;\n  std::thread::id current_thread_id = std::this_thread::get_id();\n  ConditionalInitializer ci;\n\n  // Before a worker is started adding a listener will be posted and will get added when the\n  // thread starts running.\n  NiceMock<Network::MockListenerConfig> listener;\n  ON_CALL(listener, listenerTag()).WillByDefault(Return(1UL));\n  EXPECT_CALL(*handler_, addListener(_, _))\n      .WillOnce(Invoke(\n          [current_thread_id](absl::optional<uint64_t>, Network::ListenerConfig& config) -> void {\n            EXPECT_EQ(config.listenerTag(), 1UL);\n            EXPECT_NE(current_thread_id, std::this_thread::get_id());\n          }));\n  worker_.addListener(absl::nullopt, listener, [&ci](bool success) -> void {\n    EXPECT_TRUE(success);\n    ci.setReady();\n  });\n\n  NiceMock<Stats::MockStore> store;\n  worker_.start(guard_dog_);\n  worker_.initializeStats(store);\n  ci.waitReady();\n\n  // After a worker is started adding/stopping/removing a listener happens on the worker thread.\n  NiceMock<Network::MockListenerConfig> listener2;\n  ON_CALL(listener2, listenerTag()).WillByDefault(Return(2UL));\n  EXPECT_CALL(*handler_, addListener(_, _))\n      .WillOnce(Invoke(\n          [current_thread_id](absl::optional<uint64_t>, Network::ListenerConfig& config) -> void {\n            EXPECT_EQ(config.listenerTag(), 2UL);\n            EXPECT_NE(current_thread_id, std::this_thread::get_id());\n          }));\n  worker_.addListener(absl::nullopt, listener2, [&ci](bool success) -> void {\n    EXPECT_TRUE(success);\n    ci.setReady();\n  });\n  ci.waitReady();\n\n  EXPECT_CALL(*handler_, stopListeners(2))\n      .WillOnce(InvokeWithoutArgs([current_thread_id, &ci]() -> void {\n        EXPECT_NE(current_thread_id, std::this_thread::get_id());\n        ci.setReady();\n      }));\n\n  ConditionalInitializer ci2;\n  // Verify that callback is called from the other thread.\n  worker_.stopListener(listener2, [current_thread_id, &ci2]() {\n    EXPECT_NE(current_thread_id, std::this_thread::get_id());\n    ci2.setReady();\n  });\n  ci.waitReady();\n  ci2.waitReady();\n\n  EXPECT_CALL(*handler_, removeListeners(2))\n      .WillOnce(InvokeWithoutArgs([current_thread_id]() -> void {\n        EXPECT_NE(current_thread_id, std::this_thread::get_id());\n      }));\n  worker_.removeListener(listener2, [current_thread_id, &ci]() -> void {\n    EXPECT_NE(current_thread_id, std::this_thread::get_id());\n    ci.setReady();\n  });\n  ci.waitReady();\n\n  // Now test adding and removing a listener without stopping it first.\n  NiceMock<Network::MockListenerConfig> listener3;\n  ON_CALL(listener3, listenerTag()).WillByDefault(Return(3UL));\n  EXPECT_CALL(*handler_, addListener(_, _))\n      .WillOnce(Invoke(\n          [current_thread_id](absl::optional<uint64_t>, Network::ListenerConfig& config) -> void {\n            EXPECT_EQ(config.listenerTag(), 3UL);\n            EXPECT_NE(current_thread_id, std::this_thread::get_id());\n          }));\n  worker_.addListener(absl::nullopt, listener3, [&ci](bool success) -> void {\n    EXPECT_TRUE(success);\n    ci.setReady();\n  });\n  ci.waitReady();\n\n  EXPECT_CALL(*handler_, removeListeners(3))\n      .WillOnce(InvokeWithoutArgs([current_thread_id]() -> void {\n        EXPECT_NE(current_thread_id, std::this_thread::get_id());\n      }));\n  worker_.removeListener(listener3, [current_thread_id]() -> void {\n    EXPECT_NE(current_thread_id, std::this_thread::get_id());\n  });\n\n  worker_.stop();\n}\n\nTEST_F(WorkerImplTest, ListenerException) {\n  InSequence s;\n\n  NiceMock<Network::MockListenerConfig> listener;\n  ON_CALL(listener, listenerTag()).WillByDefault(Return(1UL));\n  EXPECT_CALL(*handler_, addListener(_, _))\n      .WillOnce(Throw(Network::CreateListenerException(\"failed\")));\n  worker_.addListener(absl::nullopt, listener, [](bool success) -> void { EXPECT_FALSE(success); });\n\n  worker_.start(guard_dog_);\n  worker_.stop();\n}\n\n} // namespace\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_basic_cc_library\",\n    \"envoy_cc_library\",\n    \"envoy_cc_test\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_basic_cc_library(\n    name = \"printers_includes\",\n    hdrs = [\"printers.h\"],\n    deps = [\n        \"//include/envoy/network:address_interface\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"environment_lib\",\n    srcs = [\"environment.cc\"],\n    hdrs = [\"environment.h\"],\n    external_deps = [\n        \"abseil_optional\",\n        \"abseil_symbolize\",\n        \"bazel_runfiles\",\n    ],\n    deps = [\n        \":network_utility_lib\",\n        \"//include/envoy/server:options_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:compiler_requirements_lib\",\n        \"//source/common/common:macros\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/filesystem:filesystem_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/server:options_lib\",\n        \"//test/common/runtime:utility_lib\",\n    ] + select({\n        \"//bazel:disable_signal_trace\": [],\n        \"//conditions:default\": [\"//source/common/signal:sigaction_lib\"],\n    }),\n)\n\nenvoy_cc_test_library(\n    name = \"network_utility_lib\",\n    srcs = [\"network_utility.cc\"],\n    hdrs = [\"network_utility.h\"],\n    deps = [\n        \":utility_lib\",\n        \"//include/envoy/network:filter_interface\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:listen_socket_lib\",\n        \"//source/common/network:raw_buffer_socket_lib\",\n        \"//source/common/network:socket_option_factory_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/runtime:runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"network_utility_test\",\n    srcs = [\"network_utility_test.cc\"],\n    deps = [\n        \":environment_lib\",\n        \":network_utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"contention_lib\",\n    srcs = [\"contention.cc\"],\n    hdrs = [\"contention.h\"],\n    deps = [\n        \"//source/common/common:mutex_tracer_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//test/test_common:test_time_lib\",\n        \"//test/test_common:utility_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"printers_lib\",\n    srcs = [\"printers.cc\"],\n    deps = [\n        \":printers_includes\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:header_map_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"registry_lib\",\n    hdrs = [\"registry.h\"],\n    deps = [\n        \"//include/envoy/registry\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"resources_lib\",\n    hdrs = [\"resources.h\"],\n    deps = [\"//source/common/singleton:const_singleton\"],\n)\n\nenvoy_cc_test_library(\n    name = \"utility_lib\",\n    srcs = [\"utility.cc\"],\n    hdrs = [\"utility.h\"],\n    external_deps = [\n        \"abseil_strings\",\n    ],\n    deps = [\n        \":file_system_for_test_lib\",\n        \":resources_lib\",\n        \":test_time_lib\",\n        \":thread_factory_for_test_lib\",\n        \"//include/envoy/buffer:buffer_interface\",\n        \"//include/envoy/http:codec_interface\",\n        \"//include/envoy/network:address_interface\",\n        \"//source/common/api:api_lib\",\n        \"//source/common/common:empty_string\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:utility_lib\",\n        \"//source/common/config:decoded_resource_lib\",\n        \"//source/common/config:opaque_resource_decoder_lib\",\n        \"//source/common/config:version_converter_lib\",\n        \"//source/common/filesystem:directory_lib\",\n        \"//source/common/filesystem:filesystem_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/network:address_lib\",\n        \"//source/common/network:utility_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/listener/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/runtime/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_runtime_lib\",\n    hdrs = [\"test_runtime.h\"],\n    deps = [\n        \"//source/common/runtime:runtime_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/mocks/init:init_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/protobuf:protobuf_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"thread_factory_for_test_lib\",\n    srcs = [\"thread_factory_for_test.cc\"],\n    hdrs = [\"thread_factory_for_test.h\"],\n    deps = [\n        \"//source/common/common:thread_lib\",\n        \"//source/common/common:utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"file_system_for_test_lib\",\n    srcs = [\"file_system_for_test.cc\"],\n    hdrs = [\"file_system_for_test.h\"],\n    deps = [\n        \"//source/common/common:utility_lib\",\n        \"//source/common/filesystem:filesystem_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"utility_test\",\n    srcs = [\"utility_test.cc\"],\n    deps = [\n        \":utility_lib\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"logging_lib\",\n    srcs = [\"logging.cc\"],\n    hdrs = [\"logging.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"threadsafe_singleton_injector_lib\",\n    hdrs = [\"threadsafe_singleton_injector.h\"],\n    deps = [\n        \"//source/common/singleton:threadsafe_singleton\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"global_lib\",\n    srcs = [\"global.cc\"],\n    hdrs = [\"global.h\"],\n    deps = [\n        \"//source/common/common:assert_lib\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"global_test\",\n    srcs = [\"global_test.cc\"],\n    deps = [\n        \":global_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"only_one_thread_lib\",\n    srcs = [\"only_one_thread.cc\"],\n    hdrs = [\"only_one_thread.h\"],\n    deps = [\":thread_factory_for_test_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"test_time_lib\",\n    srcs = [\"test_time.cc\"],\n    hdrs = [\"test_time.h\"],\n    deps = [\n        \":global_lib\",\n        \":test_time_system_interface\",\n        \"//source/common/event:real_time_system_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"test_time_system_interface\",\n    srcs = [\"test_time_system.cc\"],\n    hdrs = [\"test_time_system.h\"],\n    deps = [\n        \":global_lib\",\n        \":only_one_thread_lib\",\n        \"//include/envoy/event:dispatcher_interface\",\n        \"//include/envoy/event:timer_interface\",\n        \"//source/common/common:thread_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"simulated_time_system_lib\",\n    srcs = [\"simulated_time_system.cc\"],\n    hdrs = [\"simulated_time_system.h\"],\n    deps = [\n        \":test_time_system_interface\",\n        \":utility_lib\",\n        \"//source/common/event:event_impl_base_lib\",\n        \"//source/common/event:real_time_system_lib\",\n        \"//source/common/event:timer_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"status_utility_lib\",\n    hdrs = [\"status_utility.h\"],\n)\n\nenvoy_cc_test(\n    name = \"simulated_time_system_test\",\n    srcs = [\"simulated_time_system_test.cc\"],\n    deps = [\n        \":simulated_time_system_lib\",\n        \":utility_lib\",\n        \"//source/common/event:libevent_scheduler_lib\",\n        \"//test/mocks/event:event_mocks\",\n        \"//test/test_common:test_runtime_lib\",\n    ],\n)\n\nenvoy_cc_test(\n    name = \"test_time_system_test\",\n    srcs = [\"test_time_system_test.cc\"],\n    deps = [\n        \":simulated_time_system_lib\",\n        \":test_time_lib\",\n        \":utility_lib\",\n    ],\n)\n\nenvoy_cc_test_library(\n    name = \"wasm_lib\",\n    hdrs = [\"wasm_base.h\"],\n    deps = [\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/extensions/common/wasm:wasm_interoperation_lib\",\n        \"//source/extensions/common/wasm:wasm_lib\",\n        \"//test/mocks/grpc:grpc_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/network:network_mocks\",\n        \"//test/mocks/server:server_mocks\",\n        \"//test/mocks/ssl:ssl_mocks\",\n        \"//test/mocks/stream_info:stream_info_mocks\",\n        \"//test/mocks/thread_local:thread_local_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:environment_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_basic_cc_library(\n    name = \"test_version_linkstamp\",\n    srcs = [\"test_version_linkstamp.cc\"],\n    alwayslink = 1,\n)\n"
  },
  {
    "path": "test/test_common/contention.cc",
    "content": "#include \"test/test_common/contention.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Thread {\nnamespace TestUtil {\n\nvoid ContentionGenerator::generateContention(MutexTracerImpl& tracer) {\n  Envoy::Thread::ThreadPtr t1 = launchThread(tracer);\n  Envoy::Thread::ThreadPtr t2 = launchThread(tracer);\n  t1->join();\n  t2->join();\n}\n\nEnvoy::Thread::ThreadPtr ContentionGenerator::launchThread(MutexTracerImpl& tracer) {\n  return threadFactoryForTest().createThread(\n      [&tracer, this]() -> void { holdUntilContention(tracer); });\n}\n\nvoid ContentionGenerator::holdUntilContention(MutexTracerImpl& tracer) {\n  Event::DispatcherPtr dispatcher = api_.allocateDispatcher(\"test_thread\");\n  Event::TimerPtr timer = dispatcher->createTimer([&dispatcher]() { dispatcher->exit(); });\n  auto sleep_ms = [&timer, &dispatcher](int num_ms) {\n    timer->enableTimer(std::chrono::milliseconds(num_ms));\n    dispatcher->run(Event::Dispatcher::RunType::RunUntilExit);\n  };\n  int64_t curr_num_contentions = tracer.numContentions();\n  do {\n    sleep_ms(1);\n    {\n      LockGuard lock(mutex_);\n      // We hold the lock 90% of the time to ensure both contention and eventual acquisition, which\n      // is needed to bump numContentions().\n      sleep_ms(9);\n    }\n    if (tracer.numContentions() > curr_num_contentions) {\n      found_contention_ = true;\n    }\n  } while (!found_contention_);\n}\n\n} // namespace TestUtil\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/contention.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/mutex_tracer_impl.h\"\n#include \"common/common/thread.h\"\n\n#include \"test/test_common/test_time.h\"\n\nnamespace Envoy {\nnamespace Thread {\nnamespace TestUtil {\n\n/**\n * Generates mutex contention, as measured by the MutexTracer passed in via argument.\n * @param tracer a reference to the global MutexTracerImpl.\n */\n\nclass ContentionGenerator {\npublic:\n  ContentionGenerator(Api::Api& api) : api_(api) {}\n\n  /**\n   * Generates at least once occurrence of mutex contention, as measured by tracer.\n   */\n  void generateContention(MutexTracerImpl& tracer);\n\nprivate:\n  ThreadPtr launchThread(MutexTracerImpl& tracer);\n  void holdUntilContention(MutexTracerImpl& tracer);\n\n  MutexBasicLockable mutex_;\n  Api::Api& api_;\n  std::atomic<bool> found_contention_{false};\n};\n\n} // namespace TestUtil\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/environment.cc",
    "content": "#include \"test/test_common/environment.h\"\n\n#include <fstream>\n#include <iostream>\n#include <regex>\n#include <sstream>\n#include <string>\n#include <vector>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/compiler_requirements.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/macros.h\"\n#include \"common/common/utility.h\"\n#include \"common/filesystem/directory.h\"\n\n#include \"absl/container/node_hash_map.h\"\n\n#ifdef ENVOY_HANDLE_SIGNALS\n#include \"common/signal/signal_action.h\"\n#endif\n\n#include \"server/options_impl.h\"\n\n#include \"test/test_common/file_system_for_test.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"absl/debugging/symbolize.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_format.h\"\n#include \"gtest/gtest.h\"\n#include \"spdlog/spdlog.h\"\n\nusing bazel::tools::cpp::runfiles::Runfiles;\nusing Envoy::Filesystem::Directory;\nusing Envoy::Filesystem::DirectoryEntry;\n\nnamespace Envoy {\nnamespace {\n\nstd::string makeTempDir(std::string basename_template) {\n#ifdef WIN32\n  std::string name_template = \"c:\\\\Windows\\\\TEMP\\\\\" + basename_template;\n  char* dirname = ::_mktemp(&name_template[0]);\n  RELEASE_ASSERT(dirname != nullptr, fmt::format(\"failed to create tempdir from template: {} {}\",\n                                                 name_template, errorDetails(errno)));\n  TestEnvironment::createPath(dirname);\n#else\n  std::string name_template = \"/tmp/\" + basename_template;\n  char* dirname = ::mkdtemp(&name_template[0]);\n  RELEASE_ASSERT(dirname != nullptr, fmt::format(\"failed to create tempdir from template: {} {}\",\n                                                 name_template, errorDetails(errno)));\n#endif\n  return std::string(dirname);\n}\n\nstd::string getOrCreateUnixDomainSocketDirectory() {\n  const char* path = std::getenv(\"TEST_UDSDIR\");\n  if (path != nullptr) {\n    return std::string(path);\n  }\n  // Generate temporary path for Unix Domain Sockets only. This is a workaround\n  // for the sun_path limit on `sockaddr_un`, since TEST_TMPDIR as generated by\n  // Bazel may be too long.\n  return makeTempDir(\"envoy_test_uds.XXXXXX\");\n}\n\nstd::string getTemporaryDirectory() {\n  std::string temp_dir;\n  if (std::getenv(\"TEST_TMPDIR\")) {\n    temp_dir = TestEnvironment::getCheckedEnvVar(\"TEST_TMPDIR\");\n  } else if (std::getenv(\"TMPDIR\")) {\n    temp_dir = TestEnvironment::getCheckedEnvVar(\"TMPDIR\");\n  } else {\n    return makeTempDir(\"envoy_test_tmp.XXXXXX\");\n  }\n  TestEnvironment::createPath(temp_dir);\n  return temp_dir;\n}\n\n// Allow initializeOptions() to remember CLI args for getOptions().\nint argc_;\nchar** argv_;\n\n} // namespace\n\nvoid TestEnvironment::createPath(const std::string& path) {\n  if (Filesystem::fileSystemForTest().directoryExists(path)) {\n    return;\n  }\n  const Filesystem::PathSplitResult parent =\n      Filesystem::fileSystemForTest().splitPathFromFilename(path);\n  if (parent.file_.length() > 0) {\n    TestEnvironment::createPath(std::string(parent.directory_));\n  }\n#ifndef WIN32\n  RELEASE_ASSERT(::mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IRWXO) == 0,\n                 absl::StrCat(\"failed to create path: \", path));\n#else\n  RELEASE_ASSERT(::CreateDirectory(path.c_str(), NULL),\n                 absl::StrCat(\"failed to create path: \", path));\n#endif\n}\n\n// On linux, attempt to unlink any file that exists at path,\n// ignoring the result code, to avoid traversing a symlink,\n// On windows, also attempt to remove the directory in case\n// it is actually a symlink/junction, ignoring the result code.\n// Proceed to iteratively recurse the directory if it still remains\nvoid TestEnvironment::removePath(const std::string& path) {\n  RELEASE_ASSERT(absl::StartsWith(path, TestEnvironment::temporaryDirectory()),\n                 \"cowardly refusing to remove test directory not in temp path\");\n#ifndef WIN32\n  (void)::unlink(path.c_str());\n#else\n  (void)::DeleteFile(path.c_str());\n  (void)::RemoveDirectory(path.c_str());\n#endif\n  if (!Filesystem::fileSystemForTest().directoryExists(path)) {\n    return;\n  }\n  Directory directory(path);\n  std::string entry_name;\n  entry_name.reserve(path.size() + 256);\n  entry_name.append(path);\n  entry_name.append(\"/\");\n  size_t fileidx = entry_name.size();\n  for (const DirectoryEntry& entry : directory) {\n    entry_name.resize(fileidx);\n    entry_name.append(entry.name_);\n    if (entry.type_ == Envoy::Filesystem::FileType::Regular) {\n#ifndef WIN32\n      RELEASE_ASSERT(::unlink(entry_name.c_str()) == 0,\n                     absl::StrCat(\"failed to remove file: \", entry_name));\n#else\n      RELEASE_ASSERT(::DeleteFile(entry_name.c_str()),\n                     absl::StrCat(\"failed to remove file: \", entry_name));\n#endif\n    } else if (entry.type_ == Envoy::Filesystem::FileType::Directory) {\n      if (entry.name_ != \".\" && entry.name_ != \"..\") {\n        removePath(entry_name);\n      }\n    }\n  }\n#ifndef WIN32\n  RELEASE_ASSERT(::rmdir(path.c_str()) == 0,\n                 absl::StrCat(\"failed to remove path: \", path, \" (rmdir failed)\"));\n#else\n  RELEASE_ASSERT(::RemoveDirectory(path.c_str()), absl::StrCat(\"failed to remove path: \", path));\n#endif\n}\n\nvoid TestEnvironment::renameFile(const std::string& old_name, const std::string& new_name) {\n#ifdef WIN32\n  // use MoveFileEx, since ::rename will not overwrite an existing file. See\n  // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/rename-wrename?view=vs-2017\n  // Note MoveFileEx cannot overwrite a directory as documented, nor a symlink, apparently.\n  const BOOL rc = ::MoveFileEx(old_name.c_str(), new_name.c_str(), MOVEFILE_REPLACE_EXISTING);\n  ASSERT_NE(0, rc);\n#else\n  const int rc = ::rename(old_name.c_str(), new_name.c_str());\n  ASSERT_EQ(0, rc);\n#endif\n};\n\nvoid TestEnvironment::createSymlink(const std::string& target, const std::string& link) {\n#ifdef WIN32\n  const DWORD attributes = ::GetFileAttributes(target.c_str());\n  ASSERT_NE(attributes, INVALID_FILE_ATTRIBUTES);\n  int flags = SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE;\n  if (attributes & FILE_ATTRIBUTE_DIRECTORY) {\n    flags |= SYMBOLIC_LINK_FLAG_DIRECTORY;\n  }\n\n  const BOOLEAN rc = ::CreateSymbolicLink(link.c_str(), target.c_str(), flags);\n  ASSERT_NE(rc, 0);\n#else\n  const int rc = ::symlink(target.c_str(), link.c_str());\n  ASSERT_EQ(rc, 0);\n#endif\n}\n\nabsl::optional<std::string> TestEnvironment::getOptionalEnvVar(const std::string& var) {\n  const char* path = std::getenv(var.c_str());\n  if (path == nullptr) {\n    return {};\n  }\n  return std::string(path);\n}\n\nstd::string TestEnvironment::getCheckedEnvVar(const std::string& var) {\n  auto optional = getOptionalEnvVar(var);\n  RELEASE_ASSERT(optional.has_value(), var);\n  return optional.value();\n}\n\nvoid TestEnvironment::initializeTestMain(char* program_name) {\n#ifdef WIN32\n  _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);\n\n  _set_invalid_parameter_handler([](const wchar_t* expression, const wchar_t* function,\n                                    const wchar_t* file, unsigned int line,\n                                    uintptr_t pReserved) {});\n\n  WSADATA wsa_data;\n  const WORD version_requested = MAKEWORD(2, 2);\n  RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, \"\");\n#endif\n\n#ifdef __APPLE__\n  UNREFERENCED_PARAMETER(program_name);\n#else\n  absl::InitializeSymbolizer(program_name);\n#endif\n\n#ifdef ENVOY_HANDLE_SIGNALS\n  // Enabled by default. Control with \"bazel --define=signal_trace=disabled\"\n  static Envoy::SignalAction handle_sigs;\n#endif\n}\n\nvoid TestEnvironment::initializeOptions(int argc, char** argv) {\n  argc_ = argc;\n  argv_ = argv;\n}\n\nbool TestEnvironment::shouldRunTestForIpVersion(Network::Address::IpVersion type) {\n  const char* value = std::getenv(\"ENVOY_IP_TEST_VERSIONS\");\n  std::string option(value ? value : \"\");\n  if (option.empty()) {\n    return true;\n  }\n  if ((type == Network::Address::IpVersion::v4 && option == \"v6only\") ||\n      (type == Network::Address::IpVersion::v6 && option == \"v4only\")) {\n    return false;\n  }\n  return true;\n}\n\nstd::vector<Network::Address::IpVersion> TestEnvironment::getIpVersionsForTest() {\n  std::vector<Network::Address::IpVersion> parameters;\n  for (auto version : {Network::Address::IpVersion::v4, Network::Address::IpVersion::v6}) {\n    if (TestEnvironment::shouldRunTestForIpVersion(version)) {\n      parameters.push_back(version);\n      if (!Network::Test::supportsIpVersion(version)) {\n        const auto version_string = Network::Test::addressVersionAsString(version);\n        ENVOY_LOG_TO_LOGGER(\n            Logger::Registry::getLog(Logger::Id::testing), warn,\n            \"Testing with IP{} addresses may not be supported on this machine. If \"\n            \"testing fails, set the environment variable ENVOY_IP_TEST_VERSIONS to 'v{}only'.\",\n            version_string, version_string);\n      }\n    }\n  }\n  return parameters;\n}\n\nServer::Options& TestEnvironment::getOptions() {\n  static OptionsImpl* options = new OptionsImpl(\n      argc_, argv_, [](bool) { return \"1\"; }, spdlog::level::err);\n  return *options;\n}\n\nconst std::string& TestEnvironment::temporaryDirectory() {\n  CONSTRUCT_ON_FIRST_USE(std::string, getTemporaryDirectory());\n}\n\nstd::string TestEnvironment::runfilesDirectory(const std::string& workspace) {\n  RELEASE_ASSERT(runfiles_ != nullptr, \"\");\n  return runfiles_->Rlocation(workspace);\n}\n\nstd::string TestEnvironment::runfilesPath(const std::string& path, const std::string& workspace) {\n  RELEASE_ASSERT(runfiles_ != nullptr, \"\");\n  return runfiles_->Rlocation(absl::StrCat(workspace, \"/\", path));\n}\n\nconst std::string TestEnvironment::unixDomainSocketDirectory() {\n  CONSTRUCT_ON_FIRST_USE(std::string, getOrCreateUnixDomainSocketDirectory());\n}\n\nstd::string TestEnvironment::substitute(const std::string& str,\n                                        Network::Address::IpVersion version) {\n  const absl::node_hash_map<std::string, std::string> path_map = {\n      {\"test_tmpdir\", TestEnvironment::temporaryDirectory()},\n      {\"test_udsdir\", TestEnvironment::unixDomainSocketDirectory()},\n      {\"test_rundir\", runfiles_ != nullptr ? TestEnvironment::runfilesDirectory() : \"invalid\"},\n  };\n\n  std::string out_json_string = str;\n  for (const auto& it : path_map) {\n    const std::regex port_regex(\"\\\\{\\\\{ \" + it.first + \" \\\\}\\\\}\");\n    out_json_string = std::regex_replace(out_json_string, port_regex, it.second);\n  }\n\n  // Substitute platform specific null device.\n  const std::regex null_device_regex(R\"(\\{\\{ null_device_path \\}\\})\");\n  out_json_string = std::regex_replace(out_json_string, null_device_regex,\n                                       std::string(Platform::null_device_path).c_str());\n\n  // Substitute IP loopback addresses.\n  const std::regex loopback_address_regex(R\"(\\{\\{ ip_loopback_address \\}\\})\");\n  out_json_string = std::regex_replace(out_json_string, loopback_address_regex,\n                                       Network::Test::getLoopbackAddressString(version));\n  const std::regex ntop_loopback_address_regex(R\"(\\{\\{ ntop_ip_loopback_address \\}\\})\");\n  out_json_string = std::regex_replace(out_json_string, ntop_loopback_address_regex,\n                                       Network::Test::getLoopbackAddressString(version));\n\n  // Substitute IP any addresses.\n  const std::regex any_address_regex(R\"(\\{\\{ ip_any_address \\}\\})\");\n  out_json_string = std::regex_replace(out_json_string, any_address_regex,\n                                       Network::Test::getAnyAddressString(version));\n\n  // Substitute dns lookup family.\n  const std::regex lookup_family_regex(R\"(\\{\\{ dns_lookup_family \\}\\})\");\n  switch (version) {\n  case Network::Address::IpVersion::v4:\n    out_json_string = std::regex_replace(out_json_string, lookup_family_regex, \"v4_only\");\n    break;\n  case Network::Address::IpVersion::v6:\n    out_json_string = std::regex_replace(out_json_string, lookup_family_regex, \"v6_only\");\n    break;\n  }\n\n  // Substitute socket options arguments.\n  const std::regex sol_socket_regex(R\"(\\{\\{ sol_socket \\}\\})\");\n  out_json_string =\n      std::regex_replace(out_json_string, sol_socket_regex, std::to_string(SOL_SOCKET));\n  const std::regex so_reuseport_regex(R\"(\\{\\{ so_reuseport \\}\\})\");\n  out_json_string =\n      std::regex_replace(out_json_string, so_reuseport_regex, std::to_string(SO_REUSEPORT));\n\n  return out_json_string;\n}\n\nstd::string TestEnvironment::temporaryFileSubstitute(const std::string& path,\n                                                     const PortMap& port_map,\n                                                     Network::Address::IpVersion version) {\n  return temporaryFileSubstitute(path, ParamMap(), port_map, version);\n}\n\nstd::string TestEnvironment::readFileToStringForTest(const std::string& filename,\n                                                     bool require_existence) {\n  std::ifstream file(filename, std::ios::binary);\n  if (file.fail()) {\n    if (!require_existence) {\n      return \"\";\n    }\n    RELEASE_ASSERT(false, absl::StrCat(\"failed to open: \", filename));\n  }\n\n  std::stringstream file_string_stream;\n  file_string_stream << file.rdbuf();\n  return file_string_stream.str();\n}\n\nstd::string TestEnvironment::temporaryFileSubstitute(const std::string& path,\n                                                     const ParamMap& param_map,\n                                                     const PortMap& port_map,\n                                                     Network::Address::IpVersion version) {\n  RELEASE_ASSERT(!path.empty(), \"requested path to substitute in is empty\");\n  // Load the entire file as a string, regex replace one at a time and write it back out. Proper\n  // templating might be better one day, but this works for now.\n  const std::string json_path = TestEnvironment::runfilesPath(path);\n  std::string out_json_string = readFileToStringForTest(json_path);\n\n  // Substitute params.\n  for (const auto& it : param_map) {\n    const std::regex param_regex(\"\\\\{\\\\{ \" + it.first + \" \\\\}\\\\}\");\n    out_json_string = std::regex_replace(out_json_string, param_regex, it.second);\n  }\n\n  // Substitute ports.\n  for (const auto& it : port_map) {\n    const std::regex port_regex(\"\\\\{\\\\{ \" + it.first + \" \\\\}\\\\}\");\n    out_json_string = std::regex_replace(out_json_string, port_regex, std::to_string(it.second));\n  }\n\n  // Substitute paths and other common things.\n  out_json_string = substitute(out_json_string, version);\n\n  auto name = Filesystem::fileSystemForTest().splitPathFromFilename(path).file_;\n  const std::string extension = absl::EndsWith(name, \".yaml\")\n                                    ? \".yaml\"\n                                    : absl::EndsWith(name, \".pb_text\") ? \".pb_text\" : \".json\";\n  const std::string out_json_path =\n      TestEnvironment::temporaryPath(name) + \".with.ports\" + extension;\n  {\n    std::ofstream out_json_file(out_json_path, std::ios::binary);\n    out_json_file << out_json_string;\n  }\n  return out_json_path;\n}\n\nJson::ObjectSharedPtr TestEnvironment::jsonLoadFromString(const std::string& json,\n                                                          Network::Address::IpVersion version) {\n  return Json::Factory::loadFromString(substitute(json, version));\n}\n\nvoid TestEnvironment::exec(const std::vector<std::string>& args) {\n  std::stringstream cmd;\n  // Symlinked args[0] can confuse Python when importing module relative, so we let Python know\n  // where it can find its module relative files.\n  cmd << \"bash -c \\\"PYTHONPATH=$(dirname \" << args[0] << \") \";\n  for (auto& arg : args) {\n    cmd << arg << \" \";\n  }\n  cmd << \"\\\"\";\n  if (::system(cmd.str().c_str()) != 0) {\n    std::cerr << \"Failed \" << cmd.str() << \"\\n\";\n    RELEASE_ASSERT(false, \"\");\n  }\n}\n\nstd::string TestEnvironment::writeStringToFileForTest(const std::string& filename,\n                                                      const std::string& contents,\n                                                      bool fully_qualified_path) {\n  const std::string out_path =\n      fully_qualified_path ? filename : TestEnvironment::temporaryPath(filename);\n  unlink(out_path.c_str());\n  {\n    std::ofstream out_file(out_path, std::ios_base::binary);\n    RELEASE_ASSERT(!out_file.fail(), \"\");\n    out_file << contents;\n  }\n  return out_path;\n}\n\nvoid TestEnvironment::setEnvVar(const std::string& name, const std::string& value, int overwrite) {\n#ifdef WIN32\n  if (!overwrite) {\n    size_t requiredSize;\n    const int rc = ::getenv_s(&requiredSize, nullptr, 0, name.c_str());\n    ASSERT_EQ(0, rc);\n    if (requiredSize != 0) {\n      return;\n    }\n  }\n  const int rc = ::_putenv_s(name.c_str(), value.c_str());\n  ASSERT_EQ(0, rc);\n#else\n  const int rc = ::setenv(name.c_str(), value.c_str(), overwrite);\n  ASSERT_EQ(0, rc);\n#endif\n}\n\nvoid TestEnvironment::unsetEnvVar(const std::string& name) {\n#ifdef WIN32\n  const int rc = ::_putenv_s(name.c_str(), \"\");\n  ASSERT_EQ(0, rc);\n#else\n  const int rc = ::unsetenv(name.c_str());\n  ASSERT_EQ(0, rc);\n#endif\n}\n\nvoid TestEnvironment::setRunfiles(Runfiles* runfiles) { runfiles_ = runfiles; }\n\nRunfiles* TestEnvironment::runfiles_{};\n\nAtomicFileUpdater::AtomicFileUpdater(const std::string& filename)\n    : link_(filename), new_link_(absl::StrCat(filename, \".new\")),\n      target1_(absl::StrCat(filename, \".target1\")), target2_(absl::StrCat(filename, \".target2\")),\n      use_target1_(true) {\n  unlink(link_.c_str());\n  unlink(new_link_.c_str());\n  unlink(target1_.c_str());\n  unlink(target2_.c_str());\n}\n\nvoid AtomicFileUpdater::update(const std::string& contents) {\n  const std::string target = use_target1_ ? target1_ : target2_;\n  use_target1_ = !use_target1_;\n  {\n    std::ofstream file(target, std::ios_base::binary);\n    file << contents;\n  }\n  TestEnvironment::createSymlink(target, new_link_);\n  TestEnvironment::renameFile(new_link_, link_);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/environment.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"envoy/network/address.h\"\n#include \"envoy/server/options.h\"\n\n#include \"common/json/json_loader.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"tools/cpp/runfiles/runfiles.h\"\n\nnamespace Envoy {\nclass TestEnvironment {\npublic:\n  using PortMap = absl::node_hash_map<std::string, uint32_t>;\n\n  using ParamMap = absl::node_hash_map<std::string, std::string>;\n\n  /**\n   * Perform common initialization steps needed to run a test binary. This\n   * method should be called first in all test main functions.\n   * @param program_name argv[0] test program is invoked with\n   */\n  static void initializeTestMain(char* program_name);\n\n  /**\n   * Initialize command-line options for later access by tests in getOptions().\n   * @param argc number of command-line args.\n   * @param argv array of command-line args.\n   */\n  static void initializeOptions(int argc, char** argv);\n\n  /**\n   * Check whether testing with IP version type {v4 or v6} is enabled via\n   * setting the environment variable ENVOY_IP_TEST_VERSIONS.\n   * @param Network::Address::IpVersion IP address version to check.\n   * @return bool if testing only with IP type addresses only.\n   */\n  static bool shouldRunTestForIpVersion(Network::Address::IpVersion type);\n\n  /**\n   * Return a vector of IP address parameters to test. Tests can be run with\n   * only IPv4 addressing or only IPv6 addressing by setting the environment\n   * variable ENVOY_IP_TEST_VERSIONS to \"v4only\" or \"v6only\", respectively.\n   * The default test setting runs all tests with both IPv4 and IPv6 addresses.\n   * @return std::vector<Network::Address::IpVersion> vector of IP address\n   * types to test.\n   */\n  static std::vector<Network::Address::IpVersion> getIpVersionsForTest();\n\n  /**\n   * Obtain command-line options reference.\n   * @return Server::Options& with command-line options.\n   */\n  static Server::Options& getOptions();\n\n  /**\n   * Obtain the value of an environment variable, null if not available.\n   * @return absl::optional<std::string> with the value of the environment variable.\n   */\n  static absl::optional<std::string> getOptionalEnvVar(const std::string& var);\n\n  /**\n   * Obtain the value of an environment variable, die if not available.\n   * @return std::string with the value of the environment variable.\n   */\n  static std::string getCheckedEnvVar(const std::string& var);\n\n  /**\n   * Obtain a private writable temporary directory.\n   * @return const std::string& with the path to the temporary directory.\n   */\n  static const std::string& temporaryDirectory();\n\n  /**\n   * Prefix a given path with the private writable test temporary directory.\n   * @param path path suffix.\n   * @return std::string path qualified with temporary directory.\n   */\n  static std::string temporaryPath(absl::string_view path) {\n    return absl::StrCat(temporaryDirectory(), \"/\", path);\n  }\n\n  /**\n   * Obtain platform specific new line character(s)\n   * @return absl::string_view platform specific new line character(s)\n   */\n  static constexpr absl::string_view newLine\n#ifdef WIN32\n      {\"\\r\\n\"};\n#else\n      {\"\\n\"};\n#endif\n\n  /**\n   * Obtain read-only test input data directory.\n   * @param workspace the name of the Bazel workspace where the input data is.\n   * @return const std::string& with the path to the read-only test input directory.\n   */\n  static std::string runfilesDirectory(const std::string& workspace = \"envoy\");\n\n  /**\n   * Prefix a given path with the read-only test input data directory.\n   * @param path path suffix.\n   * @return std::string path qualified with read-only test input data directory.\n   */\n  static std::string runfilesPath(const std::string& path, const std::string& workspace = \"envoy\");\n\n  /**\n   * Obtain Unix Domain Socket temporary directory.\n   * @return std::string& with the path to the Unix Domain Socket temporary directory.\n   */\n  static const std::string unixDomainSocketDirectory();\n\n  /**\n   * Prefix a given path with the Unix Domain Socket temporary directory.\n   * @param path path suffix.\n   * @param abstract_namespace true if an abstract namespace should be returned.\n   * @return std::string path qualified with the Unix Domain Socket temporary directory.\n   */\n  static std::string unixDomainSocketPath(const std::string& path,\n                                          bool abstract_namespace = false) {\n    return (abstract_namespace ? \"@\" : \"\") + unixDomainSocketDirectory() + \"/\" + path;\n  }\n\n  /**\n   * String environment path, loopback, and DNS resolver type substitution.\n   * @param str string with template patterns including {{ test_tmpdir }}.\n   * @param version supplies the IP version to substitute for relevant templates.\n   * @return std::string with patterns replaced with environment values.\n   */\n  static std::string\n  substitute(const std::string& str,\n             Network::Address::IpVersion version = Network::Address::IpVersion::v4);\n\n  /**\n   * Substitute ports, paths, and IP loopback addresses in a JSON file in the\n   * private writable test temporary directory.\n   * @param path path prefix for the input file with port and path templates.\n   * @param port_map map from port name to port number.\n   * @param version IP address version to substitute.\n   * @return std::string path for the generated file.\n   */\n  static std::string temporaryFileSubstitute(const std::string& path, const PortMap& port_map,\n                                             Network::Address::IpVersion version);\n  /**\n   * Substitute ports, paths, and IP loopback addresses in a JSON file in the\n   * private writable test temporary directory.\n   * @param path path prefix for the input file with port and path templates.\n   * @param param_map map from parameter name to values.\n   * @param port_map map from port name to port number.\n   * @param version IP address version to substitute.\n   * @return std::string path for the generated file.\n   */\n  static std::string temporaryFileSubstitute(const std::string& path, const ParamMap& param_map,\n                                             const PortMap& port_map,\n                                             Network::Address::IpVersion version);\n\n  /**\n   * Build JSON object from a string subject to environment path, loopback, and DNS resolver type\n   * substitution.\n   * @param json JSON with template patterns including {{ test_certs }}.\n   * @param version supplies the IP version to substitute for relevant templates.\n   * @return Json::ObjectSharedPtr with built JSON object.\n   */\n  static Json::ObjectSharedPtr\n  jsonLoadFromString(const std::string& json,\n                     Network::Address::IpVersion version = Network::Address::IpVersion::v4);\n\n  /**\n   * Execute a program under ::system. Any failure is fatal.\n   * @param args program path and arguments.\n   */\n  static void exec(const std::vector<std::string>& args);\n\n  /**\n   * Dumps the contents of the string into a temporary file from temporaryDirectory() + filename.\n   *\n   * @param filename: the name of the file to use\n   * @param contents: the data to go in the file.\n   * @param fully_qualified_path: if true, will write to filename without prepending the tempdir.\n   * @return the fully qualified path of the output file.\n   */\n  static std::string writeStringToFileForTest(const std::string& filename,\n                                              const std::string& contents,\n                                              bool fully_qualified_path = false);\n  /**\n   * Dumps the contents of the file into the string.\n   *\n   * @param filename: the fully qualified name of the file to use\n   * @param require_existence if true, RELEASE_ASSERT if the file does not exist.\n   *   If false, an empty string will be returned if the file is not present.\n   * @return string the contents of the file.\n   */\n  static std::string readFileToStringForTest(const std::string& filename,\n                                             bool require_existence = true);\n\n  /**\n   * Create a path on the filesystem (mkdir -p ... equivalent).\n   * @param path.\n   */\n  static void createPath(const std::string& path);\n\n  /**\n   * Remove a path on the filesystem (rm -rf ... equivalent).\n   * @param path.\n   */\n  static void removePath(const std::string& path);\n\n  /**\n   * Rename a file\n   * @param old_name\n   * @param new_name\n   */\n  static void renameFile(const std::string& old_name, const std::string& new_name);\n\n  /**\n   * Create a symlink\n   * @param target\n   * @param link\n   */\n  static void createSymlink(const std::string& target, const std::string& link);\n\n  /**\n   * Set environment variable. Same args as setenv(2).\n   */\n  static void setEnvVar(const std::string& name, const std::string& value, int overwrite);\n\n  /**\n   * Removes environment variable. Same args as unsetenv(3).\n   */\n  static void unsetEnvVar(const std::string& name);\n\n  /**\n   * Set runfiles with current test, this have to be called before calling path related functions.\n   */\n  static void setRunfiles(bazel::tools::cpp::runfiles::Runfiles* runfiles);\n\nprivate:\n  static bazel::tools::cpp::runfiles::Runfiles* runfiles_;\n};\n\n/**\n * A utility class for atomically updating a file using symbolic link swap.\n * Note the file lifetime is limited to the instance of the AtomicFileUpdater\n * which erases any existing files upon creation, used for specific test\n * scenarios. See discussion at https://github.com/envoyproxy/envoy/pull/4298\n */\nclass AtomicFileUpdater {\npublic:\n  AtomicFileUpdater(const std::string& filename);\n\n  void update(const std::string& contents);\n\nprivate:\n  const std::string link_;\n  const std::string new_link_;\n  const std::string target1_;\n  const std::string target2_;\n  bool use_target1_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/file_system_for_test.cc",
    "content": "#include \"common/filesystem/filesystem_impl.h\"\n\nnamespace Envoy {\n\nnamespace Filesystem {\n\n// TODO(sesmith177) Tests should get the Filesystem::Instance from the same location as the main\n// code\nInstance& fileSystemForTest() {\n#ifdef WIN32\n  static InstanceImplWin32* file_system = new InstanceImplWin32();\n#else\n  static InstanceImplPosix* file_system = new InstanceImplPosix();\n#endif\n  return *file_system;\n}\n\n} // namespace Filesystem\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/file_system_for_test.h",
    "content": "#include \"envoy/filesystem/filesystem.h\"\n\nnamespace Envoy {\n\nnamespace Filesystem {\nInstance& fileSystemForTest();\n} // namespace Filesystem\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/global.cc",
    "content": "#include \"test/test_common/global.h\"\n\n#include \"common/common/assert.h\"\n\nnamespace Envoy {\nnamespace Test {\n\nGlobals& Globals::instance() {\n  static Globals* globals = new Globals;\n  return *globals;\n}\n\nstd::string Globals::describeActiveSingletonsHelper() {\n  std::string ret;\n  Thread::ReleasableLockGuard map_lock(map_mutex_);\n  for (auto& p : singleton_map_) {\n    SingletonSharedPtr singleton = p.second.lock();\n    if (singleton != nullptr) {\n      absl::StrAppend(&ret, \"Unexpected active singleton: \", p.first, \"\\n\");\n    }\n  }\n  return ret;\n}\n\n} // namespace Test\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/global.h",
    "content": "#pragma once\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Test {\n\n/**\n * Helper class for managing Global<Type>s.\n *\n * This class is instantiated as a process-scoped singleton. It manages a map\n * from type-name to weak_ptr<GlobalHelper::Singleton>. That map accumulates\n * over a process lifetime and never shrinks. However, the weak_ptr will\n * generally be cleared after each test, as all shared_ptr references held in\n * Global<Type> instances are destroyed. This way, each unit-test gets a fresh\n * start.\n */\nclass Globals {\npublic:\n  /**\n   * Walks through all global singletons and ensures that none of them are\n   * active. No singletons should be allocated at the end of unit tests, so\n   * this is called at the end of Envoy::TestRunner::RunTests().\n   *\n   * @return std::string empty string if quiescent, otherwise newline-separated\n   *    error messages.\n   */\n  static std::string describeActiveSingletons() {\n    return instance().describeActiveSingletonsHelper();\n  }\n\n  /**\n   * Manages Singleton objects that are cleaned up after all references are\n   * dropped. This class must not be templatized because as a map value where\n   * every singleton in the map represents a different type. Instead we\n   * templatize the ptr() and ref() methods.\n   */\n  struct Singleton {\n    virtual ~Singleton() = default;\n    virtual void* ptrHelper() PURE;\n    template <class Type> Type* ptr() { return static_cast<Type*>(ptrHelper()); }\n    template <class Type> Type& ref() { return *ptr<Type>(); }\n  };\n  using SingletonSharedPtr = std::shared_ptr<Singleton>;\n\n  /**\n   * @return Type a singleton instance of Type. T must be default-constructible.\n   */\n  template <class Type> static SingletonSharedPtr get() { return instance().getHelper<Type>(); }\n\n  ~Globals() = delete; // Globals is constructed once and never destroyed.\n\nprivate:\n  /**\n   * Templatized derived class of Singleton which holds the Type object and is\n   * responsible for deleting it using the correct destructor.\n   */\n  template <class Type> struct TypedSingleton : public Singleton {\n    ~TypedSingleton() override = default;\n    void* ptrHelper() override { return ptr_.get(); }\n\n  private:\n    std::unique_ptr<Type> ptr_{std::make_unique<Type>()};\n  };\n\n  Globals() = default; // Construct via Globals::instance().\n\n  /**\n   * @return Globals& a singleton for Globals.\n   */\n  static Globals& instance();\n\n  template <class Type> SingletonSharedPtr getHelper() {\n    Thread::LockGuard map_lock(map_mutex_);\n    std::weak_ptr<Singleton>& weak_singleton_ref = singleton_map_[typeid(Type).name()];\n    SingletonSharedPtr singleton = weak_singleton_ref.lock();\n\n    if (singleton == nullptr) {\n      singleton = std::make_shared<TypedSingleton<Type>>();\n      weak_singleton_ref = singleton;\n    }\n    return singleton;\n  }\n\n  std::string describeActiveSingletonsHelper();\n\n  Thread::MutexBasicLockable map_mutex_;\n  absl::flat_hash_map<std::string, std::weak_ptr<Singleton>>\n      singleton_map_ ABSL_GUARDED_BY(map_mutex_);\n};\n\n/**\n * Helps manage classes that need to be instantiated once per server. In\n * production they must be plumbed through call/class hierarchy, but\n * in test-code the zero-arg-constructor Mock pattern makes this impractical.\n * Instead we use self-cleaning singletons.\n *\n * Say for example you need a FooImpl plumbed through the system. In production\n * code you must propagate a FooImpl through constructors to provide access\n * where needed. For tests, everywhere a common FooImpl is required,\n * instantiate:\n *\n *   Global<FooImpl> foo;\n *\n * You can then access the singleton FooImpl via foo.get(). The underlying\n * FooImpl is ref-counted, and when the last TestGlobal is freed, the singleton\n * FooImpl will be destructed and the singleton pointer nulled.\n *\n * The templated type must have a zero-arg constructor. Templatizing this on an\n * int will compile, but will be hard to use as the memory will be uninitialized\n * and you will not know when instantiating it whether it needs to be\n * initialized.\n */\ntemplate <class Type> class Global {\npublic:\n  Global() : singleton_(Globals::get<Type>()) {}\n  Type& get() { return singleton_->ref<Type>(); }\n  const Type& get() const { return singleton_->ref<Type>(); }\n  Type* operator->() { return singleton_->ptr<Type>(); }\n  Type& operator*() { return singleton_->ref<Type>(); }\n\nprivate:\n  Globals::SingletonSharedPtr singleton_;\n};\n\n} // namespace Test\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/global_test.cc",
    "content": "#include <string>\n#include <vector>\n\n#include \"test/test_common/global.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Test {\n\nclass GlobalTest : public testing::Test {\nprotected:\n};\n\nTEST_F(GlobalTest, SingletonStringAndVector) {\n  {\n    Global<std::string> s1;\n    Global<std::vector<int>> v1;\n    EXPECT_EQ(\"\", *s1);\n    *s1 = \"foo\";\n    EXPECT_TRUE(v1->empty());\n    v1->push_back(42);\n\n    Global<std::string> s2;\n    Global<std::vector<int>> v2;\n    EXPECT_EQ(\"foo\", *s2);\n    ASSERT_EQ(1, v2->size());\n    EXPECT_EQ(42, (*v2)[0]);\n  }\n\n  // The system is now quiescent, having dropped all references to the globals.\n  EXPECT_EQ(\"\", Globals::describeActiveSingletons());\n\n  // After the globals went out of scope, referencing them again we start\n  // from clean objects;\n  Global<std::string> s3;\n  Global<std::vector<int>> v3;\n  EXPECT_EQ(\"\", *s3);\n  EXPECT_TRUE(v3->empty());\n\n  // With s3 and v3 on the stack, there are active singletons.\n  EXPECT_NE(\"\", Globals::describeActiveSingletons());\n}\n\n} // namespace Test\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/logging.cc",
    "content": "#include \"test/test_common/logging.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"absl/synchronization/mutex.h\"\n\nnamespace Envoy {\n\nLogLevelSetter::LogLevelSetter(spdlog::level::level_enum log_level) {\n  if (Logger::Context::useFancyLogger()) {\n    previous_fancy_levels_ = getFancyContext().getAllFancyLogLevelsForTest();\n    getFancyContext().setAllFancyLoggers(log_level);\n  } else {\n    for (Logger::Logger& logger : Logger::Registry::loggers()) {\n      previous_levels_.push_back(logger.level());\n      logger.setLevel(log_level);\n    }\n  }\n}\n\nLogLevelSetter::~LogLevelSetter() {\n  if (Logger::Context::useFancyLogger()) {\n    for (const auto& it : previous_fancy_levels_) {\n      getFancyContext().setFancyLogger(it.first, it.second);\n    }\n  } else {\n    auto prev_level = previous_levels_.begin();\n    for (Logger::Logger& logger : Logger::Registry::loggers()) {\n      ASSERT(prev_level != previous_levels_.end());\n      logger.setLevel(*prev_level);\n      ++prev_level;\n    }\n    ASSERT(prev_level == previous_levels_.end());\n  }\n}\n\nLogRecordingSink::LogRecordingSink(Logger::DelegatingLogSinkSharedPtr log_sink)\n    : Logger::SinkDelegate(log_sink) {\n  setDelegate();\n}\n\nLogRecordingSink::~LogRecordingSink() { restoreDelegate(); }\n\nvoid LogRecordingSink::log(absl::string_view msg) {\n  previousDelegate()->log(msg);\n\n  absl::MutexLock ml(&mtx_);\n  messages_.push_back(std::string(msg));\n}\n\nvoid LogRecordingSink::flush() { previousDelegate()->flush(); }\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/logging.h",
    "content": "#pragma once\n\n#include <cstdint>\n#include <string>\n#include <vector>\n\n#include \"common/common/logger.h\"\n\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"spdlog/spdlog.h\"\n\nnamespace Envoy {\n\n/**\n * Provides a mechanism to temporarily set the logging level on\n * construction, restoring its previous state on destruction.\n *\n * The log_level is the minimum log severity required to print messages.\n * Messages below this loglevel will be suppressed.\n *\n * Note that during the scope of this object, command-line overrides, e.g.,\n * --log-level trace, will not take effect.\n *\n * Also note: instantiating this setter should only occur when the system is\n * in a quiescent state, e.g. at startup or between tests.\n *\n * This is intended for use in EXPECT_LOG_CONTAINS and similar macros.\n */\nclass LogLevelSetter {\npublic:\n  explicit LogLevelSetter(spdlog::level::level_enum log_level);\n  ~LogLevelSetter();\n\nprivate:\n  std::vector<spdlog::level::level_enum> previous_levels_;\n  FancyLogLevelMap previous_fancy_levels_;\n};\n\n/**\n * Records log messages in a vector<string>, forwarding them to the previous\n * delegate. This is useful for unit-testing log messages while still being able\n * to see them on stderr.\n *\n * Also note: instantiating this sink should only occur when the system is\n * in a quiescent state, e.g. at startup or between tests\n *\n * This is intended for use in EXPECT_LOG_CONTAINS and similar macros.\n */\nclass LogRecordingSink : public Logger::SinkDelegate {\npublic:\n  explicit LogRecordingSink(Logger::DelegatingLogSinkSharedPtr log_sink);\n  ~LogRecordingSink() override;\n\n  // Logger::SinkDelegate\n  void log(absl::string_view msg) override;\n  void flush() override;\n\n  const std::vector<std::string>& messages() const { return messages_; }\n\nprivate:\n  absl::Mutex mtx_;\n  std::vector<std::string> messages_ ABSL_GUARDED_BY(mtx_);\n};\n\nusing StringPair = std::pair<std::string, std::string>;\n\nusing ExpectedLogMessages = std::vector<StringPair>;\n\n// Below macros specify Envoy:: before class names so that the macro can be used outside of\n// namespace Envoy.\n\n// Alias for EXPECT_LOG_CONTAINS_ALL_OF_HELPER, with escaped=true\n#define EXPECT_LOG_CONTAINS_ALL_OF_ESCAPED(expected_messages, stmt)                                \\\n  EXPECT_LOG_CONTAINS_ALL_OF_HELPER(expected_messages, stmt, true)\n\n// Alias for EXPECT_LOG_CONTAINS_ALL_OF_HELPER, with escaped=false\n#define EXPECT_LOG_CONTAINS_ALL_OF(expected_messages, stmt)                                        \\\n  EXPECT_LOG_CONTAINS_ALL_OF_HELPER(expected_messages, stmt, false)\n\n// Validates that when stmt is executed, log messages containing substr and loglevel will be\n// emitted. Escaped=true sets the behavior to function like the --log-format-escaped CLI flag.\n// Failure message e.g.,\n//\n// Logs:\n//  [2018-04-12 05:51:00.245][7290192][debug][upstream] grpc_mux_impl.cc:160] Received gRPC\n//  [2018-04-12 05:51:00.246][7290192][warning][upstream] grpc_mux_impl.cc:63] Called bar\n//  [2018-04-12 05:51:00.246][7290192][trace][upstream] grpc_mux_impl.cc:80] Sending foo\n//  Does NOT contain:\n//    'warning', 'Too many sendDiscoveryRequest calls for baz’\n//    'warning', 'Too man sendDiscoveryRequest calls for foo'\n#define EXPECT_LOG_CONTAINS_ALL_OF_HELPER(expected_messages, stmt, escaped)                        \\\n  do {                                                                                             \\\n    ASSERT_FALSE(expected_messages.empty()) << \"Expected messages cannot be empty.\";               \\\n    Envoy::LogLevelSetter save_levels(spdlog::level::trace);                                       \\\n    Envoy::Logger::DelegatingLogSinkSharedPtr sink_ptr = Envoy::Logger::Registry::getSink();       \\\n    sink_ptr->setShouldEscape(escaped);                                                            \\\n    Envoy::LogRecordingSink log_recorder(sink_ptr);                                                \\\n    stmt;                                                                                          \\\n    if (log_recorder.messages().empty()) {                                                         \\\n      FAIL() << \"Expected message(s), but NONE was recorded.\";                                     \\\n    }                                                                                              \\\n    Envoy::ExpectedLogMessages failed_expectations;                                                \\\n    for (const Envoy::StringPair& expected : expected_messages) {                                  \\\n      const auto log_message =                                                                     \\\n          std::find_if(log_recorder.messages().begin(), log_recorder.messages().end(),             \\\n                       [&expected](const std::string& message) {                                   \\\n                         return (message.find(expected.second) != std::string::npos) &&            \\\n                                (message.find(expected.first) != std::string::npos);               \\\n                       });                                                                         \\\n      if (log_message == log_recorder.messages().end()) {                                          \\\n        failed_expectations.push_back(expected);                                                   \\\n      }                                                                                            \\\n    }                                                                                              \\\n    if (!failed_expectations.empty()) {                                                            \\\n      std::string failed_message;                                                                  \\\n      absl::StrAppend(&failed_message, \"\\nLogs:\\n \", absl::StrJoin(log_recorder.messages(), \" \"),  \\\n                      \"\\n Do NOT contain:\\n\");                                                     \\\n      for (const auto& expectation : failed_expectations) {                                        \\\n        absl::StrAppend(&failed_message, \"  '\", expectation.first, \"', '\", expectation.second,     \\\n                        \"'\\n\");                                                                    \\\n      }                                                                                            \\\n      FAIL() << failed_message;                                                                    \\\n    }                                                                                              \\\n  } while (false)\n\n// Validates that when stmt is executed, log message containing substr and loglevel will NOT be\n// emitted. Failure message e.g.,\n//\n// Logs:\n//  [2018-04-12 05:51:00.245][7290192][warning][upstream] grpc_mux_impl.cc:160] Received gRPC\n//  [2018-04-12 05:51:00.246][7290192][trace][upstream] grpc_mux_impl.cc:63] Called bar\n//  Should NOT contain:\n//   'warning', 'Received gRPC’\n#define EXPECT_LOG_NOT_CONTAINS(loglevel, substr, stmt)                                            \\\n  do {                                                                                             \\\n    Envoy::LogLevelSetter save_levels(spdlog::level::trace);                                       \\\n    Envoy::LogRecordingSink log_recorder(Envoy::Logger::Registry::getSink());                      \\\n    stmt;                                                                                          \\\n    for (const std::string& message : log_recorder.messages()) {                                   \\\n      if ((message.find(substr) != std::string::npos) &&                                           \\\n          (message.find(loglevel) != std::string::npos)) {                                         \\\n        FAIL() << \"\\nLogs:\\n \" << absl::StrJoin(log_recorder.messages(), \" \")                      \\\n               << \"\\n Should NOT contain:\\n '\" << loglevel << \"', '\" << substr \"'\\n\";              \\\n      }                                                                                            \\\n    }                                                                                              \\\n  } while (false)\n\n// Validates that when stmt is executed, the supplied substring matches at least one log message.\n// Failure message e.g.,\n//\n// Logs:\n//  [2018-04-12 05:51:00.245][7290192][debug][upstream] grpc_mux_impl.cc:160] Received gRPC\n//  [2018-04-12 05:51:00.246][7290192][trace][upstream] grpc_mux_impl.cc:80] Sending foo\n//  Do NOT contain:\n//    'warning', 'Too many sendDiscoveryRequest calls for baz’\n#define EXPECT_LOG_CONTAINS(loglevel, substr, stmt)                                                \\\n  do {                                                                                             \\\n    const Envoy::ExpectedLogMessages message{{loglevel, substr}};                                  \\\n    EXPECT_LOG_CONTAINS_ALL_OF(message, stmt);                                                     \\\n  } while (false)\n\n// Validates that when stmt is executed, no logs will be emitted.\n// Expected equality of these values:\n//   0\n//   logs.size()\n//     Which is: 3\n//  Logs:\n//   [2018-04-12 05:51:00.245][7290192][debug][upstream] grpc_mux_impl.cc:160] Received gRPC\n//   [2018-04-12 05:51:00.246][7290192][trace][upstream] grpc_mux_impl.cc:80] Sending foo\n#define EXPECT_NO_LOGS(stmt)                                                                       \\\n  do {                                                                                             \\\n    Envoy::LogLevelSetter save_levels(spdlog::level::trace);                                       \\\n    Envoy::LogRecordingSink log_recorder(Envoy::Logger::Registry::getSink());                      \\\n    stmt;                                                                                          \\\n    const std::vector<std::string>& logs = log_recorder.messages();                                \\\n    ASSERT_EQ(0, logs.size()) << \" Logs:\\n   \" << absl::StrJoin(logs, \"   \");                      \\\n  } while (false)\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/network_utility.cc",
    "content": "#include \"test/test_common/network_utility.h\"\n\n#include <cstdint>\n#include <string>\n\n#include \"envoy/common/platform.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/fmt.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/listen_socket_impl.h\"\n#include \"common/network/raw_buffer_socket.h\"\n#include \"common/network/socket_interface.h\"\n#include \"common/network/socket_option_factory.h\"\n#include \"common/network/utility.h\"\n#include \"common/runtime/runtime_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Test {\n\nAddress::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstSharedPtr addr_port,\n                                                    Socket::Type type) {\n  if (addr_port == nullptr || addr_port->type() != Address::Type::Ip) {\n    ADD_FAILURE() << \"Not an internet address: \"\n                  << (addr_port == nullptr ? \"nullptr\" : addr_port->asString());\n    return nullptr;\n  }\n  SocketImpl sock(type, addr_port);\n  // Not setting REUSEADDR, therefore if the address has been recently used we won't reuse it here.\n  // However, because we're going to use the address while checking if it is available, we'll need\n  // to set REUSEADDR on listener sockets created by tests using an address validated by this means.\n  Api::SysCallIntResult result = sock.bind(addr_port);\n  const char* failing_fn = nullptr;\n  if (result.rc_ != 0) {\n    failing_fn = \"bind\";\n  } else if (type == Socket::Type::Stream) {\n    // Try listening on the port also, if the type is TCP.\n    result = sock.listen(1);\n    if (result.rc_ != 0) {\n      failing_fn = \"listen\";\n    }\n  }\n  if (failing_fn != nullptr) {\n    if (result.errno_ == SOCKET_ERROR_ADDR_IN_USE) {\n      // The port is already in use. Perfectly normal.\n      return nullptr;\n    } else if (result.errno_ == SOCKET_ERROR_ACCESS) {\n      // A privileged port, and we don't have privileges. Might want to log this.\n      return nullptr;\n    }\n    // Unexpected failure.\n    ADD_FAILURE() << failing_fn << \" failed for '\" << addr_port->asString()\n                  << \"' with error: \" << errorDetails(result.errno_) << \" (\" << result.errno_\n                  << \")\";\n    return nullptr;\n  }\n  return sock.localAddress();\n}\n\nAddress::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port,\n                                                    Socket::Type type) {\n  auto instance = Utility::parseInternetAddressAndPort(addr_port);\n  if (instance != nullptr) {\n    instance = findOrCheckFreePort(instance, type);\n  } else {\n    ADD_FAILURE() << \"Unable to parse as an address and port: \" << addr_port;\n  }\n  return instance;\n}\n\nstd::string getLoopbackAddressUrlString(const Address::IpVersion version) {\n  if (version == Address::IpVersion::v6) {\n    return std::string(\"[::1]\");\n  }\n  return std::string(\"127.0.0.1\");\n}\n\nstd::string getLoopbackAddressString(const Address::IpVersion version) {\n  if (version == Address::IpVersion::v6) {\n    return std::string(\"::1\");\n  }\n  return std::string(\"127.0.0.1\");\n}\n\nstd::string getAnyAddressUrlString(const Address::IpVersion version) {\n  if (version == Address::IpVersion::v6) {\n    return std::string(\"[::]\");\n  }\n  return std::string(\"0.0.0.0\");\n}\n\nstd::string getAnyAddressString(const Address::IpVersion version) {\n  if (version == Address::IpVersion::v6) {\n    return std::string(\"::\");\n  }\n  return std::string(\"0.0.0.0\");\n}\n\nstd::string addressVersionAsString(const Address::IpVersion version) {\n  if (version == Address::IpVersion::v4) {\n    return std::string(\"v4\");\n  }\n  return std::string(\"v6\");\n}\n\nAddress::InstanceConstSharedPtr getCanonicalLoopbackAddress(Address::IpVersion version) {\n  if (version == Address::IpVersion::v4) {\n    return Network::Utility::getCanonicalIpv4LoopbackAddress();\n  }\n  return Network::Utility::getIpv6LoopbackAddress();\n}\n\nnamespace {\n\n// There is no portable way to initialize sockaddr_in6 with a static initializer, do it with a\n// helper function instead.\nsockaddr_in6 sockaddrIn6Any() {\n  sockaddr_in6 v6any = {};\n  v6any.sin6_family = AF_INET6;\n  v6any.sin6_addr = in6addr_any;\n\n  return v6any;\n}\n\n} // namespace\n\nAddress::InstanceConstSharedPtr getAnyAddress(const Address::IpVersion version, bool v4_compat) {\n  if (version == Address::IpVersion::v4) {\n    return Network::Utility::getIpv4AnyAddress();\n  }\n  if (v4_compat) {\n    // This will return an IPv6 ANY address (\"[::]:0\") like the getIpv6AnyAddress() below, but\n    // with the internal 'v6only' member set to false. This will allow a socket created from this\n    // address to accept IPv4 connections. IPv4 connections received on IPv6 sockets will have\n    // Ipv4-mapped IPv6 addresses, which we will then internally interpret as IPv4 addresses so\n    // that, for example, access logging will show IPv4 address format for IPv4 connections even\n    // if they were received on an IPv6 socket.\n    static Address::InstanceConstSharedPtr any(new Address::Ipv6Instance(sockaddrIn6Any(), false));\n    return any;\n  }\n  return Network::Utility::getIpv6AnyAddress();\n}\n\nbool supportsIpVersion(const Address::IpVersion version) {\n  return Network::SocketInterfaceSingleton::get().ipFamilySupported(\n      version == Address::IpVersion::v4 ? AF_INET : AF_INET6);\n}\n\nstd::string ipVersionToDnsFamily(Network::Address::IpVersion version) {\n  switch (version) {\n  case Network::Address::IpVersion::v4:\n    return \"V4_ONLY\";\n  case Network::Address::IpVersion::v6:\n    return \"V6_ONLY\";\n  }\n\n  // This seems to be needed on the coverage build for some reason.\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nstd::pair<Address::InstanceConstSharedPtr, Network::SocketPtr>\nbindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_port) {\n  Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version);\n  SocketPtr sock = std::make_unique<SocketImpl>(type, addr);\n  if (reuse_port) {\n    sock->addOptions(SocketOptionFactory::buildReusePortOptions());\n    Socket::applyOptions(sock->options(), *sock,\n                         envoy::config::core::v3::SocketOption::STATE_PREBIND);\n  }\n  Api::SysCallIntResult result = sock->bind(addr);\n  if (0 != result.rc_) {\n    sock->close();\n    std::string msg = fmt::format(\"bind failed for address {} with error: {} ({})\",\n                                  addr->asString(), errorDetails(result.errno_), result.errno_);\n    ADD_FAILURE() << msg;\n    throw EnvoyException(msg);\n  }\n\n  return std::make_pair(sock->localAddress(), std::move(sock));\n}\n\nTransportSocketPtr createRawBufferSocket() { return std::make_unique<RawBufferSocket>(); }\n\nTransportSocketFactoryPtr createRawBufferSocketFactory() {\n  return std::make_unique<RawBufferSocketFactory>();\n}\n\nconst Network::FilterChainSharedPtr\ncreateEmptyFilterChain(TransportSocketFactoryPtr&& transport_socket_factory) {\n  return std::make_shared<Network::Test::EmptyFilterChain>(std::move(transport_socket_factory));\n}\n\nconst Network::FilterChainSharedPtr createEmptyFilterChainWithRawBufferSockets() {\n  return createEmptyFilterChain(createRawBufferSocketFactory());\n}\n\nnamespace {\nstruct SyncPacketProcessor : public Network::UdpPacketProcessor {\n  SyncPacketProcessor(std::list<Network::UdpRecvData>& data) : data_(data) {}\n\n  void processPacket(Network::Address::InstanceConstSharedPtr local_address,\n                     Network::Address::InstanceConstSharedPtr peer_address,\n                     Buffer::InstancePtr buffer, MonotonicTime receive_time) override {\n    Network::UdpRecvData datagram{\n        {std::move(local_address), std::move(peer_address)}, std::move(buffer), receive_time};\n    data_.push_back(std::move(datagram));\n  }\n  uint64_t maxPacketSize() const override { return Network::MAX_UDP_PACKET_SIZE; }\n\n  std::list<Network::UdpRecvData>& data_;\n};\n} // namespace\n\nApi::IoCallUint64Result readFromSocket(IoHandle& handle, const Address::Instance& local_address,\n                                       std::list<UdpRecvData>& data) {\n  SyncPacketProcessor processor(data);\n  return Network::Utility::readFromSocket(handle, local_address, processor,\n                                          MonotonicTime(std::chrono::seconds(0)), nullptr);\n}\n\nUdpSyncPeer::UdpSyncPeer(Network::Address::IpVersion version)\n    : socket_(\n          std::make_unique<UdpListenSocket>(getCanonicalLoopbackAddress(version), nullptr, true)) {\n  RELEASE_ASSERT(socket_->setBlockingForTest(true).rc_ != -1, \"\");\n}\n\nvoid UdpSyncPeer::write(const std::string& buffer, const Network::Address::Instance& peer) {\n  const auto rc = Network::Utility::writeToSocket(socket_->ioHandle(), Buffer::OwnedImpl(buffer),\n                                                  nullptr, peer);\n  ASSERT_EQ(rc.rc_, buffer.length());\n}\n\nvoid UdpSyncPeer::recv(Network::UdpRecvData& datagram) {\n  if (received_datagrams_.empty()) {\n    const auto rc = Network::Test::readFromSocket(socket_->ioHandle(), *socket_->localAddress(),\n                                                  received_datagrams_);\n    ASSERT_TRUE(rc.ok());\n  }\n  datagram = std::move(received_datagrams_.front());\n  received_datagrams_.pop_front();\n}\n\n} // namespace Test\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/network_utility.h",
    "content": "#pragma once\n\n#include <list>\n#include <string>\n\n#include \"envoy/network/address.h\"\n#include \"envoy/network/filter.h\"\n#include \"envoy/network/io_handle.h\"\n#include \"envoy/network/transport_socket.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Test {\n\n/**\n * Determines if the passed in address and port is available for binding. If the port is zero,\n * the OS should pick an unused port for the supplied address (e.g. for the loopback address).\n * NOTE: this is racy, as it does not provide a means to keep the port reserved for the\n * caller's use.\n * @param addr_port a valid host address (e.g. an address of one of the network interfaces\n *        of this host, or the any address or the loopback address) and port (zero to indicate\n *        that the OS should pick an unused address.\n * @param type the type of socket to be tested.\n * @returns the address and port (selected if zero was the passed in port) that can be used for\n *          listening, else nullptr if the address and port are not free.\n */\nAddress::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstSharedPtr addr_port,\n                                                    Socket::Type type);\n\n/**\n * As above, but addr_port is specified as a string. For example:\n *    - 127.0.0.1:32000  Check whether a specific port on the IPv4 loopback address is free.\n *    - [::1]:0          Pick a free port on the IPv6 loopback address.\n *    - 0.0.0.0:0        Pick a free port on all local addresses of all local interfaces.\n *    - [::]:45678       Check whether a specific port on all local IPv6 addresses is free.\n */\nAddress::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port,\n                                                    Socket::Type type);\n\n/**\n * Get a URL ready IP loopback address as a string.\n * @param version IP address version of loopback address.\n * @return std::string URL ready loopback address as a string.\n */\nstd::string getLoopbackAddressUrlString(const Address::IpVersion version);\n\n/**\n * Get a IP loopback address as a string. There are no square brackets around IPv6 addresses, this\n * is what inet_ntop() gives.\n * @param version IP address version of loopback address.\n * @return std::string loopback address as a string.\n */\nstd::string getLoopbackAddressString(const Address::IpVersion version);\n\n/**\n * Get a URL ready IP any address as a string.\n * @param version IP address version of any address.\n * @return std::string URL ready any address as a string.\n */\nstd::string getAnyAddressUrlString(const Address::IpVersion version);\n\n/**\n * Get an IP any address as a string.\n * @param version IP address version of any address.\n * @return std::string any address as a string.\n */\nstd::string getAnyAddressString(const Address::IpVersion version);\n\n/**\n * Return a string version of enum IpVersion version.\n * @param version IP address version.\n * @return std::string string version of IpVersion.\n */\nstd::string addressVersionAsString(const Address::IpVersion version);\n\n/**\n * Returns a loopback address for the specified IP version (127.0.0.1 for IPv4 and ::1 for IPv6).\n * @param version the IP version of the loopback address.\n * @returns a loopback address for the specified IP version.\n */\nAddress::InstanceConstSharedPtr getCanonicalLoopbackAddress(const Address::IpVersion version);\n\n/**\n * Returns the any address for the specified IP version.\n * @param version the IP version of the any address.\n * @param v4_compat determines whether a v4-mapped addresses bound to a socket listening on the\n *        returned ANY address are to be treated as IPv4 or IPv6 addresses. Defaults to 'false',\n *        has no effect with IPv4 ANY address.\n * @returns the any address for the specified IP version.\n */\nAddress::InstanceConstSharedPtr getAnyAddress(const Address::IpVersion version,\n                                              bool v4_compat = false);\n\n/**\n * This function tries to create a socket of type IpVersion version and bind to it. If\n * successful this function returns true. If either socket creation or socket\n * bind fail, this function returns false.\n * @param version the IP version to test.\n * @return bool whether IpVersion addresses are \"supported\".\n */\nbool supportsIpVersion(const Address::IpVersion version);\n\n/**\n * Returns the DNS family for the specified IP version.\n * @param version the IP version of the DNS lookup family.\n */\nstd::string ipVersionToDnsFamily(Network::Address::IpVersion version);\n\n/**\n * Bind a socket to a free port on a loopback address, and return the socket's fd and bound address.\n * Enables a test server to reliably \"select\" a port to listen on.\n * @param version the IP version of the loopback address.\n * @param type the type of socket to be bound.\n * @param reuse_port specifies whether the socket option SO_REUSEADDR has been set on the socket.\n * @returns the address and the fd of the socket bound to that address.\n */\nstd::pair<Address::InstanceConstSharedPtr, Network::SocketPtr>\nbindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_port = false);\n\n/**\n * Create a transport socket for testing purposes.\n * @return TransportSocketPtr the transport socket factory to use with a connection.\n */\nTransportSocketPtr createRawBufferSocket();\n\n/**\n * Create a transport socket factory for testing purposes.\n * @return TransportSocketFactoryPtr the transport socket factory to use with a cluster or a\n * listener.\n */\nTransportSocketFactoryPtr createRawBufferSocketFactory();\n\n/**\n * Implementation of Network::FilterChain with empty filter chain, but pluggable transport socket\n * factory.\n */\nclass EmptyFilterChain : public FilterChain {\npublic:\n  EmptyFilterChain(TransportSocketFactoryPtr&& transport_socket_factory)\n      : transport_socket_factory_(std::move(transport_socket_factory)) {}\n\n  // Network::FilterChain\n  const TransportSocketFactory& transportSocketFactory() const override {\n    return *transport_socket_factory_;\n  }\n\n  const std::vector<FilterFactoryCb>& networkFilterFactories() const override {\n    return empty_network_filter_factory_;\n  }\n\nprivate:\n  const TransportSocketFactoryPtr transport_socket_factory_;\n  const std::vector<FilterFactoryCb> empty_network_filter_factory_{};\n};\n\n/**\n * Create an empty filter chain for testing purposes.\n * @param transport_socket_factory transport socket factory to use when creating transport sockets.\n * @return const FilterChainSharedPtr filter chain.\n */\nconst FilterChainSharedPtr\ncreateEmptyFilterChain(TransportSocketFactoryPtr&& transport_socket_factory);\n\n/**\n * Create an empty filter chain creating raw buffer sockets for testing purposes.\n * @return const FilterChainSharedPtr filter chain.\n */\nconst FilterChainSharedPtr createEmptyFilterChainWithRawBufferSockets();\n\n/**\n * Wrapper for Utility::readFromSocket() which reads a single datagram into the supplied\n * UdpRecvData without worrying about the packet processor interface. The function will\n * instantiate the buffer returned in data.\n */\nApi::IoCallUint64Result readFromSocket(IoHandle& handle, const Address::Instance& local_address,\n                                       UdpRecvData& data);\n\n/**\n * A synchronous UDP peer that can be used for testing.\n */\nclass UdpSyncPeer {\npublic:\n  UdpSyncPeer(Network::Address::IpVersion version);\n\n  // Writer a datagram to a remote peer.\n  void write(const std::string& buffer, const Network::Address::Instance& peer);\n\n  // Receive a datagram.\n  void recv(Network::UdpRecvData& datagram);\n\n  // Return the local peer's socket address.\n  const Network::Address::InstanceConstSharedPtr& localAddress() { return socket_->localAddress(); }\n\nprivate:\n  const Network::SocketPtr socket_;\n  std::list<Network::UdpRecvData> received_datagrams_;\n};\n\n} // namespace Test\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/network_utility_test.cc",
    "content": "#include <string>\n\n#include \"common/api/os_sys_calls_impl.h\"\n\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/network_utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Network {\nnamespace Test {\n\nclass NetworkUtilityTest : public testing::TestWithParam<Address::IpVersion> {\nprotected:\n  NetworkUtilityTest() : version_(GetParam()) {}\n  const Address::IpVersion version_;\n};\n\nINSTANTIATE_TEST_SUITE_P(IpVersions, NetworkUtilityTest,\n                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));\n\n// This validates Network::Test::bindFreeLoopbackPort behaves as desired, i.e. that we don't have\n// a significant risk of flakes due to re-use of a port over short time intervals. We can't drive\n// this too long else we'll eventually run out of ephemeral ports.\n//\n// Tested: IPv4 with --gtest_repeats=1000 and kLimit=1000 on Ubuntu with docker.\n// Result: Zero failures, presumably because of the randomization of the address.\n//\n// Tested: IPv6 --gtest_repeats=1000 and kLimit=50 on Ubuntu with docker.\n// Result: In about 5% of runs, two of the 50 allocated ports were the same, though not\n//         more than that.\n// The test is DISABLED as we don't want the occasional expected collisions to cause problems.\nTEST_P(NetworkUtilityTest, DISABLED_ValidateBindFreeLoopbackPort) {\n  std::map<std::string, size_t> seen;\n  const size_t kLimit = 50;\n  for (size_t n = 0; n < kLimit; ++n) {\n    auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Socket::Type::Stream);\n    addr_fd.second->close();\n    auto addr = addr_fd.first->asString();\n    auto search = seen.find(addr);\n    if (search != seen.end()) {\n      ADD_FAILURE() << \"Saw duplicate binds for address \" << addr << \" at steps \" << n << \" and \"\n                    << search->second;\n    }\n    seen[addr] = n;\n  }\n}\n\n} // namespace Test\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/only_one_thread.cc",
    "content": "#include \"test/test_common/only_one_thread.h\"\n\n#include \"envoy/thread/thread.h\"\n\n#include \"common/common/lock_guard.h\"\n\n#include \"test/test_common/thread_factory_for_test.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\nOnlyOneThread::OnlyOneThread() : thread_factory_(threadFactoryForTest()) {}\n\nvoid OnlyOneThread::checkOneThread() {\n  LockGuard lock(mutex_);\n  if (thread_advancing_time_.isEmpty()) {\n    thread_advancing_time_ = thread_factory_.currentThreadId();\n  } else {\n    RELEASE_ASSERT(thread_advancing_time_ == thread_factory_.currentThreadId(),\n                   \"time should only be advanced on one thread in the context of a test\");\n  }\n}\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/only_one_thread.h",
    "content": "#pragma once\n\n#include \"common/common/assert.h\"\n#include \"common/common/thread.h\"\n\nnamespace Envoy {\nnamespace Thread {\n\n// Ensures that an operation is performed on only one thread. The first caller\n// to OnlyOneThread::checkOneThread establishes the thread ID, and subsequent\n// ones will assert-fail if they do not match.\nclass OnlyOneThread {\npublic:\n  OnlyOneThread();\n\n  /**\n   * Ensures that one thread is used in a testcase to access some resource.\n   */\n  void checkOneThread();\n\nprivate:\n  ThreadFactory& thread_factory_;\n  ThreadId thread_advancing_time_ ABSL_GUARDED_BY(mutex_);\n  mutable MutexBasicLockable mutex_;\n};\n\n} // namespace Thread\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/printers.cc",
    "content": "#include \"printers.h\"\n#include \"test/test_common/printers.h\"\n\n#include <iostream>\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/header_map_impl.h\"\n\nnamespace Envoy {\nnamespace Http {\n// NOLINTNEXTLINE(readability-identifier-naming)\nvoid PrintTo(const HeaderMapImpl& headers, std::ostream* os) {\n  headers.iterate([os](const HeaderEntry& header) -> HeaderMap::Iterate {\n    *os << \"{'\" << header.key().getStringView() << \"','\" << header.value().getStringView() << \"'}\";\n    return HeaderMap::Iterate::Continue;\n  });\n}\n\nvoid PrintTo(const HeaderMapPtr& headers, std::ostream* os) {\n  PrintTo(*dynamic_cast<HeaderMapImpl*>(headers.get()), os);\n}\n\nvoid PrintTo(const HeaderMap& headers, std::ostream* os) {\n  PrintTo(*dynamic_cast<const HeaderMapImpl*>(&headers), os);\n}\n} // namespace Http\n\nnamespace Buffer {\nvoid PrintTo(const Instance& buffer, std::ostream* os) {\n  *os << \"buffer with size=\" << buffer.length();\n}\n\nvoid PrintTo(const Buffer::OwnedImpl& buffer, std::ostream* os) {\n  PrintTo(dynamic_cast<const Buffer::Instance&>(buffer), os);\n}\n} // namespace Buffer\n\nnamespace Network {\nnamespace Address {\nvoid PrintTo(const Instance& address, std::ostream* os) { *os << address.asString(); }\n} // namespace Address\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/printers.h",
    "content": "#pragma once\n\n#include <iostream>\n#include <memory>\n\n#include \"envoy/network/address.h\"\n\nnamespace Envoy {\nnamespace Http {\n/**\n * Pretty print const HeaderMapImpl&\n */\nclass HeaderMapImpl;\nvoid PrintTo(const HeaderMapImpl& headers, std::ostream* os);\n\n/**\n * Pretty print const HeaderMapPtr&\n */\nclass HeaderMap;\nusing HeaderMapPtr = std::unique_ptr<HeaderMap>;\nvoid PrintTo(const HeaderMap& headers, std::ostream* os);\nvoid PrintTo(const HeaderMapPtr& headers, std::ostream* os);\n} // namespace Http\n\nnamespace Buffer {\n/**\n * Pretty print const Instance&\n */\nclass Instance;\nvoid PrintTo(const Instance& buffer, std::ostream* os);\n\n/**\n * Pretty print const Buffer::OwnedImpl&\n */\nclass OwnedImpl;\nvoid PrintTo(const OwnedImpl& buffer, std::ostream* os);\n} // namespace Buffer\n\nnamespace Network {\nnamespace Address {\nvoid PrintTo(const Instance& address, std::ostream* os);\n}\n} // namespace Network\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/registry.h",
    "content": "#pragma once\n\n#include \"envoy/registry/registry.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Registry {\n\n/**\n * Factory registration template for tests. This can be used to inject a mock or dummy version\n * of a factory for testing purposes. It will restore the original value, if any, when it goes\n * out of scope.\n */\ntemplate <class Base> class InjectFactory {\npublic:\n  InjectFactory(Base& instance) : InjectFactory(instance, {}) {}\n\n  InjectFactory(Base& instance, std::initializer_list<absl::string_view> deprecated_names)\n      : instance_(instance) {\n    EXPECT_STRNE(instance.category().c_str(), \"\");\n\n    original_ = Registry::FactoryRegistry<Base>::getFactory(instance_.name());\n    restore_factories_ =\n        Registry::FactoryRegistry<Base>::replaceFactoryForTest(instance_, deprecated_names);\n  }\n\n  ~InjectFactory() {\n    restore_factories_();\n\n    auto* restored = Registry::FactoryRegistry<Base>::getFactory(instance_.name());\n    ASSERT(restored == original_);\n  }\n\n  // Rebuilds the registry's factory-by-type mapping from scratch. In most cases, this is handled\n  // by the replaceFactoryForTest calls in the constructor and destructor. This method is only\n  // necessary if the disabled state of the factory is modified.\n  void resetTypeMappings() { Registry::FactoryRegistry<Base>::rebuildFactoriesByTypeForTest(); }\n\nprivate:\n  Base& instance_;\n  Base* original_{};\n  std::function<void()> restore_factories_;\n};\n\n/**\n * Registers a factory category for tests. Most tests do not need this functionality. It's only\n * useful for testing the registration infrastructure.\n */\ntemplate <class Base> class InjectFactoryCategory {\npublic:\n  InjectFactoryCategory(Base& instance)\n      : proxy_(std::make_unique<FactoryRegistryProxyImpl<Base>>()), instance_(instance) {\n    // Register a new category.\n    FactoryCategoryRegistry::registerCategory(instance_.category(), proxy_.get());\n  }\n\n  ~InjectFactoryCategory() {\n    FactoryCategoryRegistry::deregisterCategoryForTest(instance_.category());\n  }\n\nprivate:\n  std::unique_ptr<FactoryRegistryProxyImpl<Base>> proxy_;\n  Base& instance_;\n};\n\n} // namespace Registry\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/resources.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"common/singleton/const_singleton.h\"\n\nnamespace Envoy {\nnamespace Config {\n\n/**\n * Constant Type URLs.\n */\nclass TypeUrlValues {\npublic:\n  const std::string Listener{\"type.googleapis.com/envoy.api.v2.Listener\"};\n  const std::string Cluster{\"type.googleapis.com/envoy.api.v2.Cluster\"};\n  const std::string ClusterLoadAssignment{\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\"};\n  const std::string Secret{\"type.googleapis.com/envoy.api.v2.auth.Secret\"};\n  const std::string RouteConfiguration{\"type.googleapis.com/envoy.api.v2.RouteConfiguration\"};\n  const std::string VirtualHost{\"type.googleapis.com/envoy.api.v2.route.VirtualHost\"};\n  const std::string ScopedRouteConfiguration{\n      \"type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration\"};\n  const std::string Runtime{\"type.googleapis.com/envoy.service.discovery.v2.Runtime\"};\n};\n\nusing TypeUrl = ConstSingleton<TypeUrlValues>;\n\n} // namespace Config\n} // namespace Envoy"
  },
  {
    "path": "test/test_common/simulated_time_system.cc",
    "content": "#include \"test/test_common/simulated_time_system.h\"\n\n#include <chrono>\n\n#include \"envoy/event/dispatcher.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/event/real_time_system.h\"\n#include \"common/event/timer_impl.h\"\n#include \"common/runtime/runtime_features.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nnamespace {\nclass UnlockGuard {\npublic:\n  /**\n   * Establishes a scoped mutex-lock; the mutex is unlocked upon construction.\n   * The main motivation for setting up a class to manage this, rather than\n   * simply { mutex.unlock(); operation(); mutex.lock(); } is that in method\n   * Alarm::activateLockHeld(), the mutex is owned by the time-system, which\n   * lives long enough. However the Alarm may be destructed while the lock is\n   * dropped, so there can be a tsan error when re-taking time_system_.mutex_.\n   *\n   * It's also easy to make a temp mutex reference, however this confuses\n   * clang's thread-annotation analysis, whereas this unlock-guard seems to work\n   * with thread annotation.\n   *\n   * Another reason to use this Guard class is so that the mutex is re-taken\n   * even if there is an exception thrown while the lock is dropped. That is\n   * not likely to happen at this call-site as the functions being called don't\n   * throw.\n   *\n   * @param lock the mutex.\n   */\n  explicit UnlockGuard(absl::Mutex& lock) : lock_(lock) { lock_.Unlock(); }\n\n  /**\n   * Destruction of the UnlockGuard re-locks the lock.\n   */\n  ~UnlockGuard() { lock_.Lock(); }\n\nprivate:\n  absl::Mutex& lock_;\n};\n} // namespace\n\n// Each timer is maintained and ordered by a common TimeSystem, but is\n// associated with a scheduler. The scheduler creates the timers with a libevent\n// context, so that the timer callbacks can be executed via Dispatcher::run() in\n// the expected thread.\nclass SimulatedTimeSystemHelper::SimulatedScheduler : public Scheduler {\npublic:\n  SimulatedScheduler(SimulatedTimeSystemHelper& time_system, CallbackScheduler& cb_scheduler)\n      : time_system_(time_system), cb_scheduler_(cb_scheduler),\n        thread_factory_(Thread::threadFactoryForTest()),\n        run_alarms_cb_(cb_scheduler.createSchedulableCallback([this] { runReadyAlarms(); })),\n        monotonic_time_(time_system_.monotonicTime()), system_time_(time_system_.systemTime()) {\n    time_system_.registerScheduler(this);\n  }\n  ~SimulatedScheduler() override { time_system_.unregisterScheduler(this); }\n\n  // From Scheduler.\n  TimerPtr createTimer(const TimerCb& cb, Dispatcher& /*dispatcher*/) override;\n\n  // Implementation of SimulatedTimeSystemHelper::Alarm methods.\n  bool isEnabled(Alarm& alarm) ABSL_LOCKS_EXCLUDED(mutex_);\n  void enableAlarm(Alarm& alarm, const std::chrono::microseconds duration)\n      ABSL_LOCKS_EXCLUDED(mutex_);\n  void disableAlarm(Alarm& alarm) ABSL_LOCKS_EXCLUDED(mutex_) {\n    absl::MutexLock lock(&mutex_);\n    disableAlarmLockHeld(alarm);\n    // Wait until alarm processing for the current thread completes when disabling from outside the\n    // event loop thread. This helps avoid data races when deleting Alarm objects from outside the\n    // event loop thread.\n    if (running_cbs_ && !thread_advancing_time_.isEmpty() &&\n        thread_advancing_time_ != thread_factory_.currentThreadId()) {\n      waitForNoRunningCallbacksLockHeld();\n    }\n  }\n\n  // Called by SimulatedTimeSystemHelper::setMonotonicTime to update the time associated with each\n  // of the simulated schedulers and associated alarms.\n  void updateTime(MonotonicTime monotonic_time, SystemTime system_time)\n      ABSL_LOCKS_EXCLUDED(mutex_) {\n    bool inc_pending = false;\n    {\n      absl::MutexLock lock(&mutex_);\n      // Wait until the event loop associated with this scheduler is not executing callbacks so time\n      // does not change in the middle of a callback.\n      waitForNoRunningCallbacksLockHeld();\n      monotonic_time_ = monotonic_time;\n      system_time_ = system_time;\n      if (!pending_dec_ && (!registered_alarms_.empty() || !triggered_alarms_.empty())) {\n        // Selectively increment the pending updates counter only on dispatchers that have active\n        // alarms to allow advanceTimeWait to work but avoid getting stuck if some of the event\n        // loops associated with some of the registered simulated schedulers is not currently\n        // active.\n        inc_pending = true;\n        pending_dec_ = true;\n      }\n    }\n    if (inc_pending) {\n      time_system_.incPending();\n    }\n\n    if (!run_alarms_cb_->enabled()) {\n      if (Runtime::runtimeFeatureEnabled(\n              \"envoy.reloadable_features.activate_timers_next_event_loop\")) {\n        run_alarms_cb_->scheduleCallbackNextIteration();\n      } else {\n        run_alarms_cb_->scheduleCallbackCurrentIteration();\n      }\n    }\n  }\n\nprivate:\n  void waitForNoRunningCallbacksLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {\n    mutex_.Await(absl::Condition(\n        +[](bool* running_cbs) -> bool { return !*running_cbs; }, &running_cbs_));\n  }\n\n  void disableAlarmLockHeld(Alarm& alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);\n\n  // Collect expired alarms and execute associated callbacks.\n  void runReadyAlarms() ABSL_LOCKS_EXCLUDED(mutex_);\n\n  struct AlarmRegistration {\n    AlarmRegistration(MonotonicTime time, uint64_t randomness, Alarm& alarm)\n        : time_(time), randomness_(randomness), alarm_(alarm) {}\n\n    MonotonicTime time_;\n    // Random tie-breaker for alarms scheduled for the same monotonic time used to mimic\n    // non-deterministic execution of real alarms scheduled for the same wall time.\n    uint64_t randomness_;\n    Alarm& alarm_;\n\n    friend bool operator<(const AlarmRegistration& lhs, const AlarmRegistration& rhs) {\n      if (lhs.time_ != rhs.time_) {\n        return lhs.time_ < rhs.time_;\n      }\n      if (lhs.randomness_ != rhs.randomness_) {\n        return lhs.randomness_ < rhs.randomness_;\n      }\n      // Out of paranoia, use pointer comparison on the alarms as a final tie-breaker but also\n      // ASSERT that this branch isn't hit in debug modes since in practice the randomness_\n      // associated with two registrations should never be equal.\n      ASSERT(false, \"Alarm registration randomness_ for two alarms should never be equal.\");\n      return &lhs.alarm_ < &rhs.alarm_;\n    }\n  };\n\n  class AlarmSet {\n  public:\n    bool empty() const { return sorted_alarms_.empty(); }\n\n    const AlarmRegistration& next() const {\n      ASSERT(!empty());\n      return *sorted_alarms_.begin();\n    }\n\n    void add(AlarmRegistration registration) {\n      auto insert_result = sorted_alarms_.insert(registration);\n      ASSERT(insert_result.second);\n      alarm_registrations_map_.emplace(&registration.alarm_, insert_result.first);\n\n      // Sanity check that the parallel data structures used for alarm registration have the same\n      // number of entries.\n      ASSERT(sorted_alarms_.size() == alarm_registrations_map_.size());\n    }\n\n    bool remove(Alarm& alarm) {\n      auto it = alarm_registrations_map_.find(&alarm);\n      if (it == alarm_registrations_map_.end()) {\n        return false;\n      }\n      sorted_alarms_.erase(it->second);\n      alarm_registrations_map_.erase(it);\n\n      // Sanity check that the parallel data structures used for alarm registration have the same\n      // number of entries.\n      ASSERT(sorted_alarms_.size() == alarm_registrations_map_.size());\n      return true;\n    }\n\n    bool contains(Alarm& alarm) const {\n      return alarm_registrations_map_.find(&alarm) != alarm_registrations_map_.end();\n    }\n\n  private:\n    std::set<AlarmRegistration> sorted_alarms_;\n    absl::flat_hash_map<Alarm*, std::set<AlarmRegistration>::const_iterator>\n        alarm_registrations_map_;\n  };\n\n  SimulatedTimeSystemHelper& time_system_;\n  CallbackScheduler& cb_scheduler_;\n  Thread::ThreadFactory& thread_factory_;\n  SchedulableCallbackPtr run_alarms_cb_;\n\n  absl::Mutex mutex_;\n  bool running_cbs_ ABSL_GUARDED_BY(mutex_) = false;\n  AlarmSet registered_alarms_ ABSL_GUARDED_BY(mutex_);\n  AlarmSet triggered_alarms_ ABSL_GUARDED_BY(mutex_);\n\n  MonotonicTime monotonic_time_ ABSL_GUARDED_BY(mutex_);\n  SystemTime system_time_ ABSL_GUARDED_BY(mutex_);\n\n  // Id of the thread where the event loop is running.\n  Thread::ThreadId thread_advancing_time_ ABSL_GUARDED_BY(mutex_);\n  // True if the SimulatedTimeSystemHelper is waiting for the scheduler to process expired alarms\n  // and call decPending after an update to monotonic time.\n  bool pending_dec_ ABSL_GUARDED_BY(mutex_) = false;\n  // Used to randomize the ordering of alarms scheduled for the same time when the runtime feature\n  // envoy.reloadable_features.activate_timers_next_event_loop is enabled. This mimics the trigger\n  // order of real timers scheduled for the same absolute time is non-deterministic.\n  // Each simulated scheduler has it's own TestRandomGenerator with the same seed to improve test\n  // failure reproducibility when running against a specific seed by minimizing cross scheduler\n  // interactions.\n  TestRandomGenerator random_source_ ABSL_GUARDED_BY(mutex_);\n  uint64_t legacy_next_idx_ ABSL_GUARDED_BY(mutex_) = 0;\n};\n\n// Our simulated alarm inherits from TimerImpl so that the same dispatching\n// mechanism used in RealTimeSystem timers is employed for simulated alarms.\nclass SimulatedTimeSystemHelper::Alarm : public Timer {\npublic:\n  Alarm(SimulatedScheduler& simulated_scheduler, SimulatedTimeSystemHelper& time_system,\n        CallbackScheduler& /*cb_scheduler*/, TimerCb cb)\n      : cb_(cb), simulated_scheduler_(simulated_scheduler), time_system_(time_system) {}\n\n  ~Alarm() override;\n\n  // Timer\n  void disableTimer() override;\n  void enableTimer(const std::chrono::milliseconds duration,\n                   const ScopeTrackedObject* scope) override {\n    enableHRTimer(duration, scope);\n  };\n  void enableHRTimer(const std::chrono::microseconds duration,\n                     const ScopeTrackedObject* scope) override;\n  bool enabled() override { return simulated_scheduler_.isEnabled(*this); }\n\n  SimulatedTimeSystemHelper& timeSystem() { return time_system_; }\n\n  void runAlarm() { cb_(); }\n\nprivate:\n  TimerCb cb_;\n  SimulatedScheduler& simulated_scheduler_;\n  SimulatedTimeSystemHelper& time_system_;\n};\n\nTimerPtr SimulatedTimeSystemHelper::SimulatedScheduler::createTimer(const TimerCb& cb,\n                                                                    Dispatcher& /*dispatcher*/) {\n  return std::make_unique<SimulatedTimeSystemHelper::Alarm>(*this, time_system_, cb_scheduler_, cb);\n}\n\nbool SimulatedTimeSystemHelper::SimulatedScheduler::isEnabled(Alarm& alarm) {\n  absl::MutexLock lock(&mutex_);\n  return registered_alarms_.contains(alarm) || triggered_alarms_.contains(alarm);\n}\n\nvoid SimulatedTimeSystemHelper::SimulatedScheduler::enableAlarm(\n    Alarm& alarm, const std::chrono::microseconds duration) {\n  {\n    absl::MutexLock lock(&mutex_);\n    if (duration.count() == 0 && triggered_alarms_.contains(alarm)) {\n      return;\n    } else if (Runtime::runtimeFeatureEnabled(\n                   \"envoy.reloadable_features.activate_timers_next_event_loop\")) {\n      disableAlarmLockHeld(alarm);\n      registered_alarms_.add({monotonic_time_ + duration, random_source_.random(), alarm});\n    } else {\n      disableAlarmLockHeld(alarm);\n      AlarmSet& alarm_set = (duration.count() != 0) ? registered_alarms_ : triggered_alarms_;\n      alarm_set.add({monotonic_time_ + duration, ++legacy_next_idx_, alarm});\n    }\n  }\n\n  if (duration.count() == 0) {\n    if (Runtime::runtimeFeatureEnabled(\n            \"envoy.reloadable_features.activate_timers_next_event_loop\")) {\n      run_alarms_cb_->scheduleCallbackNextIteration();\n    } else {\n      run_alarms_cb_->scheduleCallbackCurrentIteration();\n    }\n  }\n}\n\nvoid SimulatedTimeSystemHelper::SimulatedScheduler::disableAlarmLockHeld(Alarm& alarm) {\n  if (triggered_alarms_.contains(alarm)) {\n    ASSERT(!registered_alarms_.contains(alarm));\n    triggered_alarms_.remove(alarm);\n  } else {\n    ASSERT(!triggered_alarms_.contains(alarm));\n    registered_alarms_.remove(alarm);\n  }\n}\n\nvoid SimulatedTimeSystemHelper::SimulatedScheduler::runReadyAlarms() {\n  bool dec_pending = false;\n  {\n    absl::MutexLock lock(&mutex_);\n    if (pending_dec_) {\n      dec_pending = true;\n      pending_dec_ = false;\n    }\n    if (thread_advancing_time_.isEmpty()) {\n      thread_advancing_time_ = thread_factory_.currentThreadId();\n    } else {\n      ASSERT(thread_advancing_time_ == thread_factory_.currentThreadId());\n    }\n    auto monotonic_time = monotonic_time_;\n    while (!registered_alarms_.empty()) {\n      const AlarmRegistration& alarm_registration = registered_alarms_.next();\n      MonotonicTime alarm_time = alarm_registration.time_;\n      if (alarm_time > monotonic_time) {\n        break;\n      }\n      triggered_alarms_.add(alarm_registration);\n      registered_alarms_.remove(alarm_registration.alarm_);\n    }\n\n    ASSERT(!running_cbs_);\n    running_cbs_ = true;\n    while (!triggered_alarms_.empty()) {\n      Alarm& alarm = triggered_alarms_.next().alarm_;\n      triggered_alarms_.remove(alarm);\n      UnlockGuard unlocker(mutex_);\n      alarm.runAlarm();\n    }\n    ASSERT(running_cbs_);\n    ASSERT(monotonic_time == monotonic_time_);\n    running_cbs_ = false;\n  }\n  if (dec_pending) {\n    time_system_.decPending();\n  }\n}\n\nSimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { simulated_scheduler_.disableAlarm(*this); }\n\nvoid SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() {\n  simulated_scheduler_.disableAlarm(*this);\n}\n\nvoid SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer(\n    const std::chrono::microseconds duration, const ScopeTrackedObject* /*scope*/) {\n  simulated_scheduler_.enableAlarm(*this, duration);\n}\n\n// It would be very confusing if there were more than one simulated time system\n// extant at once. Technically this might be something we want, but more likely\n// it indicates some kind of plumbing error in test infrastructure. So track\n// the instance count with a simple int. In the future if there's a good reason\n// to have more than one around at a time, this variable can be deleted.\nstatic int instance_count = 0;\n\n// When we initialize our simulated time, we'll start the current time based on\n// the real current time. But thereafter, real-time will not be used, and time\n// will march forward only by calling advanceTimeAndRun() or advanceTimeWait().\nSimulatedTimeSystemHelper::SimulatedTimeSystemHelper()\n    : monotonic_time_(MonotonicTime(std::chrono::seconds(0))),\n      system_time_(real_time_source_.systemTime()), pending_updates_(0) {\n  ++instance_count;\n  ASSERT(instance_count <= 1);\n}\n\nSimulatedTimeSystemHelper::~SimulatedTimeSystemHelper() { --instance_count; }\n\nbool SimulatedTimeSystemHelper::hasInstance() { return instance_count > 0; }\n\nSystemTime SimulatedTimeSystemHelper::systemTime() {\n  absl::MutexLock lock(&mutex_);\n  return system_time_;\n}\n\nMonotonicTime SimulatedTimeSystemHelper::monotonicTime() {\n  absl::MutexLock lock(&mutex_);\n  return monotonic_time_;\n}\n\nvoid SimulatedTimeSystemHelper::advanceTimeAsyncImpl(const Duration& duration) {\n  only_one_thread_.checkOneThread();\n  absl::MutexLock lock(&mutex_);\n  MonotonicTime monotonic_time =\n      monotonic_time_ + std::chrono::duration_cast<MonotonicTime::duration>(duration);\n  setMonotonicTimeLockHeld(monotonic_time);\n}\n\nvoid SimulatedTimeSystemHelper::advanceTimeWaitImpl(const Duration& duration) {\n  only_one_thread_.checkOneThread();\n  absl::MutexLock lock(&mutex_);\n  MonotonicTime monotonic_time =\n      monotonic_time_ + std::chrono::duration_cast<MonotonicTime::duration>(duration);\n  setMonotonicTimeLockHeld(monotonic_time);\n  waitForNoPendingLockHeld();\n}\n\nvoid SimulatedTimeSystemHelper::waitForNoPendingLockHeld() const\n    ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {\n  mutex_.Await(absl::Condition(\n      +[](const uint32_t* pending_updates) -> bool { return *pending_updates == 0; },\n      &pending_updates_));\n}\n\nSchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& /*base_scheduler*/,\n                                                        CallbackScheduler& cb_scheduler) {\n  return std::make_unique<SimulatedScheduler>(*this, cb_scheduler);\n}\n\nvoid SimulatedTimeSystemHelper::setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) {\n  only_one_thread_.checkOneThread();\n  // We don't have a MutexLock construct that allows temporarily\n  // dropping the lock to run a callback. The main issue here is that we must\n  // be careful not to be holding mutex_ when an exception can be thrown.\n  // That can only happen here in alarm->activate(), which is run with the mutex\n  // released.\n  if (monotonic_time >= monotonic_time_) {\n    system_time_ +=\n        std::chrono::duration_cast<SystemTime::duration>(monotonic_time - monotonic_time_);\n    monotonic_time_ = monotonic_time;\n    for (SimulatedScheduler* scheduler : schedulers_) {\n      UnlockGuard unlocker(mutex_);\n      scheduler->updateTime(monotonic_time_, system_time_);\n    }\n  }\n}\n\nvoid SimulatedTimeSystemHelper::setSystemTime(const SystemTime& system_time) {\n  absl::MutexLock lock(&mutex_);\n  if (system_time > system_time_) {\n    MonotonicTime monotonic_time =\n        monotonic_time_ +\n        std::chrono::duration_cast<MonotonicTime::duration>(system_time - system_time_);\n    setMonotonicTimeLockHeld(monotonic_time);\n  } else {\n    system_time_ = system_time;\n  }\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/simulated_time_system.h",
    "content": "#pragma once\n\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread.h\"\n#include \"common/common/utility.h\"\n\n#include \"test/test_common/test_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"absl/container/flat_hash_map.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n// Implements a simulated time system including a scheduler for timers. This is\n// designed to be used as the exclusive time-system resident in a process at\n// any particular time, and as such should not be instantiated directly by\n// tests. Instead it should be instantiated via SimulatedTimeSystem, declared\n// below.\nclass SimulatedTimeSystemHelper : public TestTimeSystem {\npublic:\n  SimulatedTimeSystemHelper();\n  ~SimulatedTimeSystemHelper() override;\n\n  // TimeSystem\n  SchedulerPtr createScheduler(Scheduler& base_scheduler, CallbackScheduler& cb_scheduler) override;\n\n  // TestTimeSystem\n  void advanceTimeWaitImpl(const Duration& duration) override;\n  void advanceTimeAsyncImpl(const Duration& duration) override;\n\n  // TimeSource\n  SystemTime systemTime() override;\n  MonotonicTime monotonicTime() override;\n\n  /**\n   * Sets the time forward monotonically. If the supplied argument moves\n   * backward in time, the call is a no-op. If the supplied argument moves\n   * forward, any applicable timers are fired, and system-time is also moved\n   * forward by the same delta.\n   *\n   * @param monotonic_time The desired new current time.\n   */\n  void setMonotonicTime(const MonotonicTime& monotonic_time) {\n    absl::MutexLock lock(&mutex_);\n    setMonotonicTimeLockHeld(monotonic_time);\n  }\n\n  /**\n   * Sets the system-time, whether forward or backward. If time moves forward,\n   * applicable timers are fired and monotonic time is also increased by the\n   * same delta.\n   *\n   * @param system_time The desired new system time.\n   */\n  void setSystemTime(const SystemTime& system_time);\n\n  static bool hasInstance();\n\nprivate:\n  class SimulatedScheduler;\n  class Alarm;\n\n  void registerScheduler(SimulatedScheduler* scheduler) {\n    absl::MutexLock lock(&mutex_);\n    schedulers_.insert(scheduler);\n  }\n\n  void unregisterScheduler(SimulatedScheduler* scheduler) {\n    absl::MutexLock lock(&mutex_);\n    schedulers_.erase(scheduler);\n  }\n\n  /**\n   * Sets the time forward monotonically. If the supplied argument moves\n   * backward in time, the call is a no-op. If the supplied argument moves\n   * forward, any applicable timers are fired, and system-time is also moved\n   * forward by the same delta.\n   *\n   * @param monotonic_time The desired new current time.\n   */\n  void setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time)\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);\n\n  // Keeps track of the number of simulated schedulers that have pending monotonic time updates.\n  // Used by advanceTimeWait() to determine when the time updates have finished propagating.\n  void incPending() {\n    absl::MutexLock lock(&mutex_);\n    ++pending_updates_;\n  }\n  void decPending() {\n    absl::MutexLock lock(&mutex_);\n    --pending_updates_;\n  }\n  void waitForNoPendingLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);\n\n  RealTimeSource real_time_source_; // Used to initialize monotonic_time_ and system_time_;\n  MonotonicTime monotonic_time_ ABSL_GUARDED_BY(mutex_);\n  SystemTime system_time_ ABSL_GUARDED_BY(mutex_);\n  TestRandomGenerator random_source_ ABSL_GUARDED_BY(mutex_);\n  std::set<SimulatedScheduler*> schedulers_ ABSL_GUARDED_BY(mutex_);\n  mutable absl::Mutex mutex_;\n  uint32_t pending_updates_ ABSL_GUARDED_BY(mutex_);\n};\n\n// Represents a simulated time system, where time is advanced by calling\n// sleep(), setSystemTime(), or setMonotonicTime(). systemTime() and\n// monotonicTime() are maintained in the class, and alarms are fired in response\n// to adjustments in time.\nclass SimulatedTimeSystem : public DelegatingTestTimeSystem<SimulatedTimeSystemHelper> {\npublic:\n  void setMonotonicTime(const MonotonicTime& monotonic_time) {\n    timeSystem().setMonotonicTime(monotonic_time);\n  }\n  void setSystemTime(const SystemTime& system_time) { timeSystem().setSystemTime(system_time); }\n\n  template <class Duration> void setMonotonicTime(const Duration& duration) {\n    setMonotonicTime(MonotonicTime(duration));\n  }\n  template <class Duration> void setSystemTime(const Duration& duration) {\n    setSystemTime(SystemTime(duration));\n  }\n};\n\n// Class encapsulating a SimulatedTimeSystem, intended for integration tests.\n// Inherit from this mixin in a test fixture class to use a SimulatedTimeSystem\n// during the test.\nclass TestUsingSimulatedTime {\npublic:\n  SimulatedTimeSystem& simTime() { return sim_time_; }\n\nprivate:\n  SimulatedTimeSystem sim_time_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/simulated_time_system_test.cc",
    "content": "#include \"common/common/thread.h\"\n#include \"common/event/libevent.h\"\n#include \"common/event/libevent_scheduler.h\"\n#include \"common/event/timer_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_runtime.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"event2/event.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Event {\nnamespace Test {\nnamespace {\n\nenum class ActivateMode { DelayActivateTimers, EagerlyActivateTimers };\n\nclass SimulatedTimeSystemTest : public testing::TestWithParam<ActivateMode> {\nprotected:\n  SimulatedTimeSystemTest()\n      : scheduler_(time_system_.createScheduler(base_scheduler_, base_scheduler_)),\n        start_monotonic_time_(time_system_.monotonicTime()),\n        start_system_time_(time_system_.systemTime()) {\n    Runtime::LoaderSingleton::getExisting()->mergeValues(\n        {{\"envoy.reloadable_features.activate_timers_next_event_loop\",\n          activateMode() == ActivateMode::DelayActivateTimers ? \"true\" : \"false\"}});\n  }\n\n  ActivateMode activateMode() { return GetParam(); }\n\n  void trackPrepareCalls() {\n    base_scheduler_.registerOnPrepareCallback([this]() { output_.append(1, 'p'); });\n  }\n\n  void addTask(int64_t delay_ms, char marker, bool expect_monotonic = true) {\n    addCustomTask(\n        delay_ms, marker, []() {}, expect_monotonic);\n  }\n\n  void addCustomTask(int64_t delay_ms, char marker, std::function<void()> cb,\n                     bool expect_monotonic = true) {\n    std::chrono::milliseconds delay(delay_ms);\n    TimerPtr timer = scheduler_->createTimer(\n        [this, marker, delay, cb, expect_monotonic]() {\n          output_.append(1, marker);\n          if (expect_monotonic) {\n            EXPECT_GE(time_system_.monotonicTime(), start_monotonic_time_ + delay);\n          }\n          cb();\n        },\n        dispatcher_);\n    timer->enableTimer(delay);\n    timers_.push_back(std::move(timer));\n  }\n\n  void advanceMsAndLoop(int64_t delay_ms) {\n    time_system_.advanceTimeAndRun(std::chrono::milliseconds(delay_ms), base_scheduler_,\n                                   Dispatcher::RunType::NonBlock);\n  }\n\n  void advanceSystemMsAndLoop(int64_t delay_ms) {\n    time_system_.setSystemTime(time_system_.systemTime() + std::chrono::milliseconds(delay_ms));\n    base_scheduler_.run(Dispatcher::RunType::NonBlock);\n  }\n\n  TestScopedRuntime scoped_runtime_;\n  Event::MockDispatcher dispatcher_;\n  LibeventScheduler base_scheduler_;\n  SimulatedTimeSystem time_system_;\n  SchedulerPtr scheduler_;\n  std::string output_;\n  std::vector<TimerPtr> timers_;\n  MonotonicTime start_monotonic_time_;\n  SystemTime start_system_time_;\n};\n\nINSTANTIATE_TEST_SUITE_P(DelayTimerActivation, SimulatedTimeSystemTest,\n                         testing::Values(ActivateMode::DelayActivateTimers,\n                                         ActivateMode::EagerlyActivateTimers));\n\nTEST_P(SimulatedTimeSystemTest, AdvanceTimeAsync) {\n  EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime());\n  EXPECT_EQ(start_system_time_, time_system_.systemTime());\n  advanceMsAndLoop(5);\n  EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime());\n  EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime());\n}\n\nTEST_P(SimulatedTimeSystemTest, TimerTotalOrdering) {\n  trackPrepareCalls();\n\n  addTask(0, '0');\n  addTask(1, '1');\n  addTask(2, '2');\n  EXPECT_EQ(3, timers_.size());\n\n  advanceMsAndLoop(5);\n\n  // Verify order.\n  EXPECT_EQ(\"p012\", output_);\n}\n\nTEST_P(SimulatedTimeSystemTest, TimerPartialOrdering) {\n  trackPrepareCalls();\n\n  std::set<std::string> outputs;\n  for (int i = 0; i < 100; ++i) {\n    addTask(0, '0');\n    addTask(1, '1');\n    addTask(1, '2');\n    addTask(3, '3');\n    EXPECT_EQ(4, timers_.size());\n\n    advanceMsAndLoop(5);\n\n    outputs.insert(output_);\n\n    // Cleanup before the next iteration.\n    output_.clear();\n    timers_.clear();\n  }\n\n  if (activateMode() == ActivateMode::DelayActivateTimers) {\n    // Execution order of timers 1 and 2 is non-deterministic because the two timers were scheduled\n    // for the same time. Verify that both orderings were observed.\n    EXPECT_THAT(outputs, testing::ElementsAre(\"p0123\", \"p0213\"));\n  } else {\n    EXPECT_THAT(outputs, testing::ElementsAre(\"p0123\"));\n  }\n}\n\nTEST_P(SimulatedTimeSystemTest, TimerPartialOrdering2) {\n  trackPrepareCalls();\n\n  std::set<std::string> outputs;\n  for (int i = 0; i < 100; ++i) {\n    addTask(0, '0');\n    addTask(15, '1');\n    advanceMsAndLoop(10);\n\n    // Timer 1 has 5ms remaining, so timer 2 ends up scheduled at the same monotonic time as 1.\n    addTask(5, '2');\n    addTask(6, '3');\n    advanceMsAndLoop(10);\n\n    outputs.insert(output_);\n\n    // Cleanup before the next iteration.\n    output_.clear();\n    timers_.clear();\n  }\n\n  if (activateMode() == ActivateMode::DelayActivateTimers) {\n    // Execution order of timers 1 and 2 is non-deterministic because the two timers were scheduled\n    // for the same time. Verify that both orderings were observed.\n    EXPECT_THAT(outputs, testing::ElementsAre(\"p0p123\", \"p0p213\"));\n  } else {\n    EXPECT_THAT(outputs, testing::ElementsAre(\"p0p123\"));\n  }\n}\n\n// Timers that are scheduled to execute and but are disabled first do not trigger.\nTEST_P(SimulatedTimeSystemTest, TimerOrderAndDisableTimer) {\n  trackPrepareCalls();\n\n  // Create 3 timers. The first timer should disable the second, so it doesn't trigger.\n  addCustomTask(0, '0', [this]() { timers_[1]->disableTimer(); });\n  addTask(1, '1');\n  addTask(2, '2');\n  EXPECT_EQ(3, timers_.size());\n\n  // Expect timers to execute in order since the timers are scheduled at have different times and\n  // that timer 1 does not execute because it was disabled as part of 0's execution.\n  advanceMsAndLoop(5);\n  // Verify that timer 1 was skipped.\n  EXPECT_EQ(\"p02\", output_);\n}\n\n// Capture behavior of timers which are rescheduled without being disabled first.\nTEST_P(SimulatedTimeSystemTest, TimerOrderAndRescheduleTimer) {\n  trackPrepareCalls();\n\n  // Reschedule timers 1, 2 and 4 without disabling first.\n  addCustomTask(0, '0', [this]() {\n    timers_[1]->enableTimer(std::chrono::milliseconds(0));\n    timers_[2]->enableTimer(std::chrono::milliseconds(100));\n    timers_[4]->enableTimer(std::chrono::milliseconds(0));\n  });\n  addTask(1, '1');\n  addTask(2, '2');\n  addTask(3, '3');\n  addTask(10000, '4', false);\n  EXPECT_EQ(5, timers_.size());\n\n  // Rescheduling timers that are already scheduled to run in the current event loop iteration has\n  // no effect if the time delta is 0. Expect timers 0, 1 and 3 to execute in the original order.\n  // Timer 4 runs as part of the first wakeup since its new schedule time has a delta of 0. Timer 2\n  // is delayed since it is rescheduled with a non-zero delta.\n  advanceMsAndLoop(5);\n  if (activateMode() == ActivateMode::DelayActivateTimers) {\n#ifdef WIN32\n    // Force it to run again to pick up next iteration callbacks.\n    // The event loop runs for a single iteration in NonBlock mode on Windows as a hack to work\n    // around LEVEL trigger fd registrations constantly firing events and preventing the NonBlock\n    // event loop from ever reaching the no-fd event and no-expired timers termination condition. It\n    // is not possible to get consistent event loop behavior since the time system does not override\n    // the base scheduler's run behavior, and libevent does not provide a mode where it runs at most\n    // N iterations before breaking out of the loop for us to prefer over the single iteration mode\n    // used on Windows.\n    advanceMsAndLoop(0);\n#endif\n    EXPECT_EQ(\"p013p4\", output_);\n  } else {\n    EXPECT_EQ(\"p0134\", output_);\n  }\n\n  advanceMsAndLoop(100);\n  if (activateMode() == ActivateMode::DelayActivateTimers) {\n    EXPECT_EQ(\"p013p4p2\", output_);\n  } else {\n    EXPECT_EQ(\"p0134p2\", output_);\n  }\n}\n\n// Disable and re-enable timers that is already pending execution and verify that execution is\n// delayed.\nTEST_P(SimulatedTimeSystemTest, TimerOrderDisableAndRescheduleTimer) {\n  trackPrepareCalls();\n\n  // Disable and reschedule timers 1, 2 and 4 when timer 0 triggers.\n  addCustomTask(0, '0', [this]() {\n    timers_[1]->disableTimer();\n    timers_[1]->enableTimer(std::chrono::milliseconds(0));\n    timers_[2]->disableTimer();\n    timers_[2]->enableTimer(std::chrono::milliseconds(100));\n    timers_[4]->disableTimer();\n    timers_[4]->enableTimer(std::chrono::milliseconds(0));\n  });\n  addTask(1, '1');\n  addTask(2, '2');\n  addTask(3, '3');\n  addTask(10000, '4', false);\n  EXPECT_EQ(5, timers_.size());\n\n  // timer 0 is expected to run first and reschedule timers 1 and 2. Timer 3 should fire before\n  // timer 1 since timer 3's registration is unaffected. timer 1 runs in the same iteration\n  // because it is scheduled with zero delay. Timer 2 executes in a later iteration because it is\n  // re-enabled with a non-zero timeout.\n  advanceMsAndLoop(5);\n  if (activateMode() == ActivateMode::DelayActivateTimers) {\n#ifdef WIN32\n    // The event loop runs for a single iteration in NonBlock mode on Windows. Force it to run again\n    // to pick up next iteration callbacks.\n    advanceMsAndLoop(0);\n#endif\n    EXPECT_THAT(output_, testing::AnyOf(\"p03p14\", \"p03p41\"));\n  } else {\n    EXPECT_EQ(\"p0314\", output_);\n  }\n\n  advanceMsAndLoop(100);\n  if (activateMode() == ActivateMode::DelayActivateTimers) {\n    EXPECT_THAT(output_, testing::AnyOf(\"p03p14p2\", \"p03p41p2\"));\n  } else {\n    EXPECT_EQ(\"p0314p2\", output_);\n  }\n}\n\nTEST_P(SimulatedTimeSystemTest, AdvanceTimeWait) {\n  EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime());\n  EXPECT_EQ(start_system_time_, time_system_.systemTime());\n\n  addTask(4, 'Z');\n  addTask(2, 'X');\n  addTask(3, 'Y');\n  addTask(6, 'A'); // This timer will never be run, so \"A\" will not be appended.\n  std::atomic<bool> done(false);\n  auto thread = Thread::threadFactoryForTest().createThread([this, &done]() {\n    while (!done) {\n      base_scheduler_.run(Dispatcher::RunType::Block);\n    }\n  });\n  time_system_.advanceTimeWait(std::chrono::milliseconds(5));\n  EXPECT_EQ(\"XYZ\", output_);\n  done = true;\n  thread->join();\n  EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime());\n  EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime());\n}\n\nTEST_P(SimulatedTimeSystemTest, WaitFor) {\n  EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime());\n  EXPECT_EQ(start_system_time_, time_system_.systemTime());\n\n  // Run an event loop in the background to activate timers.\n  absl::Mutex mutex;\n  bool done(false);\n  auto thread = Thread::threadFactoryForTest().createThread([this, &mutex, &done]() {\n    for (;;) {\n      {\n        absl::MutexLock lock(&mutex);\n        if (done) {\n          return;\n        }\n      }\n\n      base_scheduler_.run(Dispatcher::RunType::Block);\n    }\n  });\n\n  TimerPtr timer = scheduler_->createTimer(\n      [&mutex, &done]() {\n        absl::MutexLock lock(&mutex);\n        done = true;\n      },\n      dispatcher_);\n  timer->enableTimer(std::chrono::seconds(60));\n\n  // Wait 1ms of real time. waitFor() does not advance simulated time, so this is just going to\n  // verify that we return quickly and nothing has fired.\n  {\n    absl::MutexLock lock(&mutex);\n    EXPECT_FALSE(time_system_.waitFor(mutex, absl::Condition(&done), std::chrono::milliseconds(1)));\n  }\n  EXPECT_FALSE(done);\n  EXPECT_EQ(MonotonicTime(std::chrono::seconds(0)), time_system_.monotonicTime());\n\n  // Fire the timeout by advancing time and then verify that waitFor() returns without any timeout.\n  time_system_.advanceTimeWait(std::chrono::seconds(60));\n  {\n    absl::MutexLock lock(&mutex);\n    EXPECT_TRUE(time_system_.waitFor(mutex, absl::Condition(&done), std::chrono::seconds(0)));\n  }\n  EXPECT_TRUE(done);\n  EXPECT_EQ(MonotonicTime(std::chrono::seconds(60)), time_system_.monotonicTime());\n  thread->join();\n\n  // Waiting a third time, with no pending timeouts, will just sleep out for\n  // the max duration and return a timeout.\n  done = false;\n  {\n    absl::MutexLock lock(&mutex);\n    EXPECT_FALSE(time_system_.waitFor(mutex, absl::Condition(&done), std::chrono::seconds(0)));\n  }\n  EXPECT_FALSE(done);\n  EXPECT_EQ(MonotonicTime(std::chrono::seconds(60)), time_system_.monotonicTime());\n}\n\nTEST_P(SimulatedTimeSystemTest, Monotonic) {\n  // Setting time forward works.\n  time_system_.setMonotonicTime(start_monotonic_time_ + std::chrono::milliseconds(5));\n  EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime());\n\n  // But going backward does not.\n  time_system_.setMonotonicTime(start_monotonic_time_ + std::chrono::milliseconds(3));\n  EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime());\n}\n\nTEST_P(SimulatedTimeSystemTest, System) {\n  // Setting time forward works.\n  time_system_.setSystemTime(start_system_time_ + std::chrono::milliseconds(5));\n  EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime());\n\n  // And going backward works too.\n  time_system_.setSystemTime(start_system_time_ + std::chrono::milliseconds(3));\n  EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(3), time_system_.systemTime());\n}\n\nTEST_P(SimulatedTimeSystemTest, Ordering) {\n  addTask(5, '5');\n  addTask(3, '3');\n  addTask(6, '6');\n  EXPECT_EQ(\"\", output_);\n  advanceMsAndLoop(5);\n  EXPECT_EQ(\"35\", output_);\n  advanceMsAndLoop(1);\n  EXPECT_EQ(\"356\", output_);\n}\n\nTEST_P(SimulatedTimeSystemTest, SystemTimeOrdering) {\n  addTask(5, '5');\n  addTask(3, '3');\n  addTask(6, '6');\n  EXPECT_EQ(\"\", output_);\n  advanceSystemMsAndLoop(5);\n  EXPECT_EQ(\"35\", output_);\n  advanceSystemMsAndLoop(1);\n  EXPECT_EQ(\"356\", output_);\n  time_system_.setSystemTime(start_system_time_ + std::chrono::milliseconds(1));\n  time_system_.setSystemTime(start_system_time_ + std::chrono::milliseconds(100));\n  EXPECT_EQ(\"356\", output_); // callbacks don't get replayed.\n}\n\nTEST_P(SimulatedTimeSystemTest, DisableTimer) {\n  addTask(5, '5');\n  addTask(3, '3');\n  addTask(6, '6');\n  timers_[0]->disableTimer();\n  EXPECT_EQ(\"\", output_);\n  advanceMsAndLoop(5);\n  EXPECT_EQ(\"3\", output_);\n  advanceMsAndLoop(1);\n  EXPECT_EQ(\"36\", output_);\n}\n\nTEST_P(SimulatedTimeSystemTest, IgnoreRedundantDisable) {\n  addTask(5, '5');\n  timers_[0]->disableTimer();\n  timers_[0]->disableTimer();\n  advanceMsAndLoop(5);\n  EXPECT_EQ(\"\", output_);\n}\n\nTEST_P(SimulatedTimeSystemTest, OverrideEnable) {\n  addTask(5, '5');\n  timers_[0]->enableTimer(std::chrono::milliseconds(6));\n  advanceMsAndLoop(5);\n  EXPECT_EQ(\"\", output_); // Timer didn't wake up because we overrode to 6ms.\n  advanceMsAndLoop(1);\n  EXPECT_EQ(\"5\", output_);\n}\n\nTEST_P(SimulatedTimeSystemTest, DeleteTime) {\n  addTask(5, '5');\n  addTask(3, '3');\n  addTask(6, '6');\n  timers_[0].reset();\n  EXPECT_EQ(\"\", output_);\n  advanceMsAndLoop(5);\n  EXPECT_EQ(\"3\", output_);\n  advanceMsAndLoop(1);\n  EXPECT_EQ(\"36\", output_);\n}\n\n// Regression test for issues documented in https://github.com/envoyproxy/envoy/pull/6956\nTEST_P(SimulatedTimeSystemTest, DuplicateTimer) {\n  // Set one alarm two times to test that pending does not get duplicated..\n  std::chrono::milliseconds delay(0);\n  TimerPtr zero_timer = scheduler_->createTimer([this]() { output_.append(1, '2'); }, dispatcher_);\n  zero_timer->enableTimer(delay);\n  zero_timer->enableTimer(delay);\n  advanceMsAndLoop(1);\n  EXPECT_EQ(\"2\", output_);\n}\n\n// Regression test for issues documented in https://github.com/envoyproxy/envoy/pull/6956\nTEST_P(SimulatedTimeSystemTest, DuplicateTimer2) {\n  // Now set an alarm which requires 10s of progress and make sure advanceTimeWait and waitFor\n  // works.\n  absl::Mutex mutex;\n  bool done(false);\n  auto thread = Thread::threadFactoryForTest().createThread([this, &mutex, &done]() {\n    for (;;) {\n      {\n        absl::MutexLock lock(&mutex);\n        if (done) {\n          return;\n        }\n      }\n\n      base_scheduler_.run(Dispatcher::RunType::Block);\n    }\n  });\n\n  TimerPtr timer = scheduler_->createTimer(\n      [&mutex, &done]() {\n        absl::MutexLock lock(&mutex);\n        done = true;\n      },\n      dispatcher_);\n  timer->enableTimer(std::chrono::seconds(10));\n\n  {\n    absl::MutexLock lock(&mutex);\n    EXPECT_FALSE(time_system_.waitFor(mutex, absl::Condition(&done), std::chrono::seconds(0)));\n  }\n  EXPECT_FALSE(done);\n\n  time_system_.advanceTimeWait(std::chrono::seconds(10));\n  {\n    absl::MutexLock lock(&mutex);\n    EXPECT_TRUE(time_system_.waitFor(mutex, absl::Condition(&done), std::chrono::seconds(0)));\n  }\n  EXPECT_TRUE(done);\n\n  thread->join();\n}\n\nTEST_P(SimulatedTimeSystemTest, Enabled) {\n  TimerPtr timer = scheduler_->createTimer({}, dispatcher_);\n  timer->enableTimer(std::chrono::milliseconds(0));\n  EXPECT_TRUE(timer->enabled());\n}\n\nTEST_P(SimulatedTimeSystemTest, DeleteTimerFromThread) {\n  TimerPtr timer = scheduler_->createTimer([]() {}, dispatcher_);\n  timer->enableTimer(std::chrono::milliseconds(0));\n  auto thread = Thread::threadFactoryForTest().createThread([&timer]() { timer.reset(); });\n  advanceMsAndLoop(1);\n  thread->join();\n}\n\nTEST_P(SimulatedTimeSystemTest, DeleteTimerFromThread2) {\n  TimerPtr timer = scheduler_->createTimer([]() {}, dispatcher_);\n  timer->enableTimer(std::chrono::milliseconds(1));\n  auto thread = Thread::threadFactoryForTest().createThread([&timer]() { timer.reset(); });\n  advanceMsAndLoop(1);\n  thread->join();\n}\n\n} // namespace\n} // namespace Test\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/status_utility.h",
    "content": "#pragma once\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace StatusHelpers {\n\n// Check that a StatusOr is OK and has a value equal to its argument.\n//\n// For example:\n//\n// StatusOr<int> status(3);\n// EXPECT_THAT(status, IsOkAndHolds(3));\nMATCHER_P(IsOkAndHolds, expected, \"\") {\n  if (!arg) {\n    *result_listener << \"which has unexpected status: \" << arg.status();\n    return false;\n  }\n  if (*arg != expected) {\n    *result_listener << \"which has wrong value: \" << *arg;\n    return false;\n  }\n  return true;\n}\n\n// Check that a StatusOr as a status code equal to its argument.\n//\n// For example:\n//\n// StatusOr<int> status(absl::InvalidArgumentError(\"bad argument!\"));\n// EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));\nMATCHER_P(StatusIs, expected_code, \"\") {\n  if (arg.status().code() != expected_code) {\n    *result_listener << \"which has unexpected status: \" << arg.status();\n    return false;\n  }\n  return true;\n}\n\n} // namespace StatusHelpers\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/test_runtime.h",
    "content": "// A simple test utility to easily allow for runtime feature overloads in unit tests.\n//\n// As long as this class is in scope one can do runtime feature overrides:\n//\n//  TestScopedRuntime scoped_runtime;\n//  Runtime::LoaderSingleton::getExisting()->mergeValues(\n//      {{\"envoy.reloadable_features.test_feature_true\", \"false\"}});\n//\n//  As long as a TestScopedRuntime exists, Runtime::LoaderSingleton::getExisting()->mergeValues()\n//  can safely be called to override runtime values.\n\n#pragma once\n\n#include \"envoy/config/bootstrap/v3/bootstrap.pb.h\"\n\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/event/mocks.h\"\n#include \"test/mocks/init/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/protobuf/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\n\nclass TestScopedRuntime {\npublic:\n  TestScopedRuntime() : api_(Api::createApiForTest()) {\n    envoy::config::bootstrap::v3::LayeredRuntime config;\n    // The existence of an admin layer is required for mergeValues() to work.\n    config.add_layers()->mutable_admin_layer();\n\n    loader_ = std::make_unique<Runtime::ScopedLoaderSingleton>(\n        std::make_unique<Runtime::LoaderImpl>(dispatcher_, tls_, config, local_info_, store_,\n                                              generator_, validation_visitor_, *api_));\n  }\n\nprivate:\n  Event::MockDispatcher dispatcher_;\n  testing::NiceMock<ThreadLocal::MockInstance> tls_;\n  Stats::IsolatedStoreImpl store_;\n  Random::MockRandomGenerator generator_;\n  Api::ApiPtr api_;\n  testing::NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  testing::NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;\n  std::unique_ptr<Runtime::ScopedLoaderSingleton> loader_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/test_time.cc",
    "content": "#include \"test/test_common/test_time.h\"\n\n#include \"common/common/utility.h\"\n\n#include \"test/test_common/global.h\"\n\nnamespace Envoy {\n\nDangerousDeprecatedTestTime::DangerousDeprecatedTestTime() = default;\n\nnamespace Event {\n\nTestTimeSystem& GlobalTimeSystem::timeSystem() {\n  // TODO(#4160): Switch default to SimulatedTimeSystem.\n  auto make_real_time_system = []() -> std::unique_ptr<TestTimeSystem> {\n    return std::make_unique<TestRealTimeSystem>();\n  };\n  return singleton_->timeSystem(make_real_time_system);\n}\n\nvoid TestRealTimeSystem::advanceTimeWaitImpl(const Duration& duration) {\n  only_one_thread_.checkOneThread();\n  std::this_thread::sleep_for(duration);\n}\n\nvoid TestRealTimeSystem::advanceTimeAsyncImpl(const Duration& duration) {\n  advanceTimeWait(duration);\n}\n\nSystemTime TestRealTimeSystem::systemTime() { return real_time_system_.systemTime(); }\n\nMonotonicTime TestRealTimeSystem::monotonicTime() { return real_time_system_.monotonicTime(); }\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/test_time.h",
    "content": "#pragma once\n\n#include \"common/event/real_time_system.h\"\n\n#include \"test/test_common/global.h\"\n#include \"test/test_common/test_time_system.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nclass TestRealTimeSystem : public TestTimeSystem {\npublic:\n  // TestTimeSystem\n  void advanceTimeAsyncImpl(const Duration& duration) override;\n  void advanceTimeWaitImpl(const Duration& duration) override;\n\n  // Event::TimeSystem\n  Event::SchedulerPtr createScheduler(Scheduler& base_scheduler,\n                                      CallbackScheduler& cb_scheduler) override {\n    return real_time_system_.createScheduler(base_scheduler, cb_scheduler);\n  }\n\n  // TimeSource\n  SystemTime systemTime() override;\n  MonotonicTime monotonicTime() override;\n\nprivate:\n  Event::RealTimeSystem real_time_system_;\n};\n\nclass GlobalTimeSystem : public DelegatingTestTimeSystemBase<TestTimeSystem> {\npublic:\n  TestTimeSystem& timeSystem() override;\n\nprivate:\n  Test::Global<SingletonTimeSystemHelper> singleton_;\n};\n\n} // namespace Event\n\n// Instantiates real-time sources for testing purposes. In general, this is a\n// bad idea, and tests should use simulated or mock time.\n//\n// TODO(#4160): change most references to this class to SimulatedTimeSystem.\nclass DangerousDeprecatedTestTime {\npublic:\n  DangerousDeprecatedTestTime();\n\n  Event::TestTimeSystem& timeSystem() { return time_system_.timeSystem(); }\n\nprivate:\n  Event::DelegatingTestTimeSystem<Event::TestRealTimeSystem> time_system_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/test_time_system.cc",
    "content": "#include \"test/test_common/test_time_system.h\"\n\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/thread.h\"\n\nnamespace Envoy {\nnamespace Event {\n\nTestTimeSystem& SingletonTimeSystemHelper::timeSystem(const MakeTimeSystemFn& make_time_system) {\n  Thread::LockGuard lock(mutex_);\n  if (time_system_ == nullptr) {\n    time_system_ = make_time_system();\n  }\n  return *time_system_;\n}\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/test_time_system.h",
    "content": "#pragma once\n\n#include \"envoy/common/time.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/timer.h\"\n\n#include \"common/common/assert.h\"\n#include \"common/common/thread.h\"\n\n#include \"test/test_common/global.h\"\n#include \"test/test_common/only_one_thread.h\"\n\nnamespace Envoy {\nnamespace Event {\n\n// Adds sleep() and waitFor() interfaces to Event::TimeSystem.\nclass TestTimeSystem : public Event::TimeSystem {\npublic:\n  ~TestTimeSystem() override = default;\n\n  /**\n   * This class will use the real monotonic time regardless of the time system in use (real\n   * or simulated). This should only be used when time is needed for real timeouts that govern\n   * networking, etc. It should never be used for time that only advances explicitly for alarms.\n   */\n  class RealTimeBound {\n  public:\n    template <class D>\n    RealTimeBound(const D& duration)\n        : end_time_(std::chrono::steady_clock::now() + duration) // NO_CHECK_FORMAT(real_time)\n    {}\n\n    std::chrono::milliseconds timeLeft() {\n      const auto current_time = std::chrono::steady_clock::now(); // NO_CHECK_FORMAT(real_time)\n      if (current_time > end_time_) {\n        return std::chrono::milliseconds(0);\n      }\n      return std::chrono::duration_cast<std::chrono::milliseconds>(end_time_ - current_time);\n    }\n\n    bool withinBound() {\n      return std::chrono::steady_clock::now() < end_time_; // NO_CHECK_FORMAT(real_time)\n    }\n\n  private:\n    const MonotonicTime end_time_;\n  };\n\n  /**\n   * Advances time forward by the specified duration, running any timers\n   * scheduled to fire, and blocking until the timer callbacks are complete.\n   * See also advanceTimeAndRun(), which provides the option to run a specific\n   * dispatcher or scheduler after advancing the time.\n   *\n   * This function should be used in multi-threaded tests, where other\n   * threads are running dispatcher loops. Integration tests should usually\n   * use this variant.\n   *\n   * @param duration The amount of time to advance.\n   */\n  virtual void advanceTimeWaitImpl(const Duration& duration) PURE;\n  template <class D> void advanceTimeWait(const D& duration = false) {\n    advanceTimeWaitImpl(std::chrono::duration_cast<Duration>(duration));\n  }\n\n  /**\n   * Advances time forward by the specified duration. Timers on event loops outside the current\n   * thread may trigger, but unlike advanceTimeWait(), this method does not block waiting for them\n   * to complete. This method also takes in a parameter the dispatcher or scheduler for the current\n   * thread, which will be run in the requested mode after advancing the time forward.\n   *\n   * This function should be used in single-threaded tests that want to advance time and then run\n   * the test thread event loop. Unit tests will often use this variant.\n   *\n   * @param duration The amount of time to advance.\n   * @param dispatcher_or_scheduler The event loop to run after advancing time forward.\n   * @param mode The mode to use when running the event loop.\n   */\n  template <class D, class DispatcherOrScheduler>\n  void advanceTimeAndRun(const D& duration, DispatcherOrScheduler& dispatcher_or_scheduler,\n                         Dispatcher::RunType mode) {\n    advanceTimeAsyncImpl(std::chrono::duration_cast<Duration>(duration));\n    dispatcher_or_scheduler.run(mode);\n  }\n\n  /**\n   * Helper function used by the implementation of advanceTimeAndRun which just advances time\n   * forward by the specified amount.\n   *\n   * @param duration The amount of time to advance.\n   */\n  virtual void advanceTimeAsyncImpl(const Duration& duration) PURE;\n\n  /**\n   * Waits for the specified duration to expire, or for the condition to be satisfied, whichever\n   * comes first.\n   *\n   * NOTE: This function takes a duration parameter which is the timeout of the wait. This is *real*\n   *       time in all time systems. This is to avoid test hangs and provide a useful error message.\n   *       When using simulated time this does not advance monotonic time. Thus, to simulated time\n   *       tests all network behavior will appear instantaneous. If time needs to advance to fire\n   *       alarms advanceTimeWait() or advanceTimeAsync() should be used.\n   *\n   * @param mutex A mutex which must be held before calling this function.\n   * @param condition The condition to wait on.\n   * @param duration The maximum amount of time to wait.\n   * @return Thread::CondVar::WaitStatus whether the condition timed out or not.\n   */\n  template <class D>\n  bool waitFor(absl::Mutex& mutex, const absl::Condition& condition, const D& duration) noexcept\n      ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) {\n    only_one_thread_.checkOneThread();\n    return mutex.AwaitWithTimeout(condition,\n                                  absl::FromChrono(std::chrono::duration_cast<Duration>(duration)));\n  }\n\n  /**\n   * This function will perform a real sleep in all time systems (real or simulated). This function\n   * should NOT be used without a good reason. It is either for supporting old code that needs to\n   * be converted to an event based approach, simulated time, or some other solution. Be ready\n   * to explain why you are using this in code review.\n   */\n  template <class D> void realSleepDoNotUseWithoutScrutiny(const D& duration) {\n    std::this_thread::sleep_for(duration); // NO_CHECK_FORMAT(real_time)\n  }\n\nprotected:\n  Thread::OnlyOneThread only_one_thread_;\n};\n\n// There should only be one instance of any time-system resident in a test\n// process at once. This helper class is used with Test::Global to help enforce\n// that with an ASSERT. Each time-system derivation should have a helper\n// implementation which is referenced from a delegate (see\n// DelegatingTestTimeSystemBase). In each delegate, a SingletonTimeSystemHelper\n// should be instantiated via Test::Global<SingletonTimeSystemHelper>. Only one\n// instance of SingletonTimeSystemHelper per process, at a time. When all\n// references to the delegates are destructed, the singleton will be destroyed\n// as well, so each test-method will get a fresh start.\nclass SingletonTimeSystemHelper {\npublic:\n  SingletonTimeSystemHelper() : time_system_(nullptr) {}\n\n  using MakeTimeSystemFn = std::function<std::unique_ptr<TestTimeSystem>()>;\n\n  /**\n   * Returns a singleton time-system, creating a default one of there's not\n   * one already. This method is thread-safe.\n   *\n   * @return the time system.\n   */\n  TestTimeSystem& timeSystem(const MakeTimeSystemFn& make_time_system);\n\nprivate:\n  std::unique_ptr<TestTimeSystem> time_system_ ABSL_GUARDED_BY(mutex_);\n  Thread::MutexBasicLockable mutex_;\n};\n\n// Implements the TestTimeSystem interface, delegating implementation of all\n// methods to a TestTimeSystem reference supplied by a timeSystem() method in a\n// subclass.\ntemplate <class TimeSystemVariant> class DelegatingTestTimeSystemBase : public TestTimeSystem {\npublic:\n  void advanceTimeAsyncImpl(const Duration& duration) override {\n    timeSystem().advanceTimeAsyncImpl(duration);\n  }\n  void advanceTimeWaitImpl(const Duration& duration) override {\n    timeSystem().advanceTimeWaitImpl(duration);\n  }\n  SchedulerPtr createScheduler(Scheduler& base_scheduler,\n                               CallbackScheduler& cb_scheduler) override {\n    return timeSystem().createScheduler(base_scheduler, cb_scheduler);\n  }\n  SystemTime systemTime() override { return timeSystem().systemTime(); }\n  MonotonicTime monotonicTime() override { return timeSystem().monotonicTime(); }\n\n  TimeSystemVariant& operator*() { return timeSystem(); }\n\n  virtual TimeSystemVariant& timeSystem() PURE;\n};\n\n// Wraps a concrete time-system in a delegate that ensures there is only one\n// time-system of any variant resident in a process at a time. Attempts to\n// instantiate multiple instances of the same type of time-system will simply\n// reference the same shared delegate, which will be deleted when the last one\n// goes out of scope. Attempts to instantiate different types of type-systems\n// will result in a RELEASE_ASSERT. See the testcases in\n// test_time_system_test.cc to understand the allowable sequences.\ntemplate <class TimeSystemVariant>\nclass DelegatingTestTimeSystem : public DelegatingTestTimeSystemBase<TimeSystemVariant> {\npublic:\n  DelegatingTestTimeSystem() : time_system_(initTimeSystem()) {}\n\n  TimeSystemVariant& timeSystem() override { return time_system_; }\n\nprivate:\n  TimeSystemVariant& initTimeSystem() {\n    auto make_time_system = []() -> std::unique_ptr<TestTimeSystem> {\n      return std::make_unique<TimeSystemVariant>();\n    };\n    auto time_system = dynamic_cast<TimeSystemVariant*>(&singleton_->timeSystem(make_time_system));\n    RELEASE_ASSERT(time_system,\n                   \"Two different types of time-systems allocated. If deriving from \"\n                   \"Event::TestUsingSimulatedTime make sure it is the first base class.\");\n    return *time_system;\n  }\n\n  Test::Global<SingletonTimeSystemHelper> singleton_;\n  TimeSystemVariant& time_system_;\n};\n\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/test_time_system_test.cc",
    "content": "#include \"test/test_common/simulated_time_system.h\"\n#include \"test/test_common/test_time.h\"\n#include \"test/test_common/test_time_system.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Event {\nnamespace Test {\nnamespace {\n\nclass TestTimeSystemTest : public testing::Test {\nprotected:\n};\n\nTEST_F(TestTimeSystemTest, TwoSimsSameReference) {\n  SimulatedTimeSystem t1, t2;\n  EXPECT_EQ(&t1.timeSystem(), &t2.timeSystem());\n}\n\nTEST_F(TestTimeSystemTest, TwoRealsSameReference) {\n  DangerousDeprecatedTestTime t1, t2;\n  EXPECT_EQ(&t1.timeSystem(), &t2.timeSystem());\n}\n\nTEST_F(TestTimeSystemTest, SimThenRealConflict) {\n  SimulatedTimeSystem t1;\n  EXPECT_DEATH({ DangerousDeprecatedTestTime t2; },\n               \".*Two different types of time-systems allocated.*\");\n}\n\nTEST_F(TestTimeSystemTest, SimThenRealSerial) {\n  { SimulatedTimeSystem t1; }\n  { DangerousDeprecatedTestTime t2; }\n}\n\nTEST_F(TestTimeSystemTest, RealThenSim) {\n  DangerousDeprecatedTestTime t1;\n  EXPECT_DEATH({ SimulatedTimeSystem t2; }, \".*Two different types of time-systems allocated.*\");\n}\n\nTEST_F(TestTimeSystemTest, RealThenSimSerial) {\n  { DangerousDeprecatedTestTime t2; }\n  { SimulatedTimeSystem t1; }\n}\n\n} // namespace\n} // namespace Test\n} // namespace Event\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/test_version_linkstamp.cc",
    "content": "// NOLINT(namespace-envoy)\nextern const char build_scm_revision[];\nextern const char build_scm_status[];\n\nconst char build_scm_revision[] = \"0\";\nconst char build_scm_status[] = \"test\";\n"
  },
  {
    "path": "test/test_common/thread_factory_for_test.cc",
    "content": "#include \"common/common/thread_impl.h\"\n\nnamespace Envoy {\n\nnamespace Thread {\n\n// TODO(sesmith177) Tests should get the ThreadFactory from the same location as the main code\nThreadFactory& threadFactoryForTest() {\n#ifdef WIN32\n  static auto* thread_factory = new ThreadFactoryImplWin32();\n#else\n  static auto* thread_factory = new ThreadFactoryImplPosix();\n#endif\n  return *thread_factory;\n}\n\n} // namespace Thread\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/thread_factory_for_test.h",
    "content": "#include \"envoy/thread/thread.h\"\n\nnamespace Envoy {\n\nnamespace Thread {\nThreadFactory& threadFactoryForTest();\n} // namespace Thread\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/threadsafe_singleton_injector.h",
    "content": "#pragma once\n\n#include \"common/singleton/threadsafe_singleton.h\"\n\nnamespace Envoy {\n\n// Note this class is not thread-safe, and should be called exceedingly carefully.\ntemplate <class T> class TestThreadsafeSingletonInjector {\npublic:\n  TestThreadsafeSingletonInjector(T* instance) {\n    latched_instance_ = &ThreadSafeSingleton<T>::get();\n    ThreadSafeSingleton<T>::instance_ = instance;\n  }\n  ~TestThreadsafeSingletonInjector() { ThreadSafeSingleton<T>::instance_ = latched_instance_; }\n\nprivate:\n  T* latched_instance_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/utility.cc",
    "content": "#include \"utility.h\"\n\n#include <cstdint>\n#include <fstream>\n#include <iomanip>\n#include <iostream>\n#include <list>\n#include <regex>\n#include <stdexcept>\n#include <string>\n#include <vector>\n\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/common/platform.h\"\n#include \"envoy/config/cluster/v3/cluster.pb.h\"\n#include \"envoy/config/endpoint/v3/endpoint.pb.h\"\n#include \"envoy/config/listener/v3/listener.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n#include \"envoy/http/codec.h\"\n#include \"envoy/service/runtime/v3/rtds.pb.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/common/fmt.h\"\n#include \"common/common/lock_guard.h\"\n#include \"common/common/thread_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/resource_name.h\"\n#include \"common/filesystem/directory.h\"\n#include \"common/filesystem/filesystem_impl.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/network/utility.h\"\n\n#include \"test/mocks/common.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/resources.h\"\n#include \"test/test_common/test_time.h\"\n\n#include \"absl/container/fixed_array.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::GTEST_FLAG(random_seed);\n\nnamespace Envoy {\n\n// The purpose of using the static seed here is to use --test_arg=--gtest_random_seed=[seed]\n// to specify the seed of the problem to replay.\nint32_t getSeed() {\n  static const int32_t seed = std::chrono::duration_cast<std::chrono::nanoseconds>(\n                                  std::chrono::system_clock::now().time_since_epoch())\n                                  .count();\n  return seed;\n}\n\nTestRandomGenerator::TestRandomGenerator()\n    : seed_(GTEST_FLAG(random_seed) == 0 ? getSeed() : GTEST_FLAG(random_seed)), generator_(seed_) {\n  std::cerr << \"TestRandomGenerator running with seed \" << seed_ << \"\\n\";\n}\n\nuint64_t TestRandomGenerator::random() { return generator_(); }\n\nbool TestUtility::headerMapEqualIgnoreOrder(const Http::HeaderMap& lhs,\n                                            const Http::HeaderMap& rhs) {\n  if (lhs.size() != rhs.size()) {\n    return false;\n  }\n\n  bool equal = true;\n  rhs.iterate([&lhs, &equal](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {\n    const Http::HeaderEntry* entry =\n        lhs.get(Http::LowerCaseString(std::string(header.key().getStringView())));\n    if (entry == nullptr || (entry->value() != header.value().getStringView())) {\n      equal = false;\n      return Http::HeaderMap::Iterate::Break;\n    }\n    return Http::HeaderMap::Iterate::Continue;\n  });\n\n  return equal;\n}\n\nbool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs) {\n  if (lhs.length() != rhs.length()) {\n    return false;\n  }\n\n  // Check whether the two buffers contain the same content. It is valid for the content\n  // to be arranged differently in the buffers. For example, lhs could have one slice\n  // containing 10 bytes while rhs has ten slices containing one byte each.\n  Buffer::RawSliceVector lhs_slices = lhs.getRawSlices();\n  Buffer::RawSliceVector rhs_slices = rhs.getRawSlices();\n\n  size_t rhs_slice = 0;\n  size_t rhs_offset = 0;\n  for (auto& lhs_slice : lhs_slices) {\n    for (size_t lhs_offset = 0; lhs_offset < lhs_slice.len_; lhs_offset++) {\n      while (rhs_offset >= rhs_slices[rhs_slice].len_) {\n        rhs_slice++;\n        ASSERT(rhs_slice < rhs_slices.size());\n        rhs_offset = 0;\n      }\n      auto lhs_str = static_cast<const uint8_t*>(lhs_slice.mem_);\n      auto rhs_str = static_cast<const uint8_t*>(rhs_slices[rhs_slice].mem_);\n      if (lhs_str[lhs_offset] != rhs_str[rhs_offset]) {\n        return false;\n      }\n      rhs_offset++;\n    }\n  }\n\n  return true;\n}\n\nbool TestUtility::rawSlicesEqual(const Buffer::RawSlice* lhs, const Buffer::RawSlice* rhs,\n                                 size_t num_slices) {\n  for (size_t slice = 0; slice < num_slices; slice++) {\n    auto rhs_slice = rhs[slice];\n    auto lhs_slice = lhs[slice];\n    if (rhs_slice.len_ != lhs_slice.len_) {\n      return false;\n    }\n    auto rhs_slice_data = static_cast<const uint8_t*>(rhs_slice.mem_);\n    auto lhs_slice_data = static_cast<const uint8_t*>(lhs_slice.mem_);\n    for (size_t offset = 0; offset < rhs_slice.len_; offset++) {\n      if (rhs_slice_data[offset] != lhs_slice_data[offset]) {\n        return false;\n      }\n    }\n  }\n  return true;\n}\n\nvoid TestUtility::feedBufferWithRandomCharacters(Buffer::Instance& buffer, uint64_t n_char,\n                                                 uint64_t seed) {\n  const std::string sample = \"Neque porro quisquam est qui dolorem ipsum..\";\n  std::mt19937 generate(seed);\n  std::uniform_int_distribution<> distribute(1, sample.length() - 1);\n  std::string str{};\n  for (uint64_t n = 0; n < n_char; ++n) {\n    str += sample.at(distribute(generate));\n  }\n  buffer.add(str);\n}\n\nStats::CounterSharedPtr TestUtility::findCounter(Stats::Store& store, const std::string& name) {\n  return findByName(store.counters(), name);\n}\n\nStats::GaugeSharedPtr TestUtility::findGauge(Stats::Store& store, const std::string& name) {\n  return findByName(store.gauges(), name);\n}\n\nStats::TextReadoutSharedPtr TestUtility::findTextReadout(Stats::Store& store,\n                                                         const std::string& name) {\n  return findByName(store.textReadouts(), name);\n}\n\nAssertionResult TestUtility::waitForCounterEq(Stats::Store& store, const std::string& name,\n                                              uint64_t value, Event::TestTimeSystem& time_system,\n                                              std::chrono::milliseconds timeout,\n                                              Event::Dispatcher* dispatcher) {\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  while (findCounter(store, name) == nullptr || findCounter(store, name)->value() != value) {\n    time_system.advanceTimeWait(std::chrono::milliseconds(10));\n    if (timeout != std::chrono::milliseconds::zero() && !bound.withinBound()) {\n      return AssertionFailure() << fmt::format(\"timed out waiting for {} to be {}\", name, value);\n    }\n    if (dispatcher != nullptr) {\n      dispatcher->run(Event::Dispatcher::RunType::NonBlock);\n    }\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult TestUtility::waitForCounterGe(Stats::Store& store, const std::string& name,\n                                              uint64_t value, Event::TestTimeSystem& time_system,\n                                              std::chrono::milliseconds timeout) {\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  while (findCounter(store, name) == nullptr || findCounter(store, name)->value() < value) {\n    time_system.advanceTimeWait(std::chrono::milliseconds(10));\n    if (timeout != std::chrono::milliseconds::zero() && !bound.withinBound()) {\n      return AssertionFailure() << fmt::format(\"timed out waiting for {} to be {}\", name, value);\n    }\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult TestUtility::waitForGaugeGe(Stats::Store& store, const std::string& name,\n                                            uint64_t value, Event::TestTimeSystem& time_system,\n                                            std::chrono::milliseconds timeout) {\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  while (findGauge(store, name) == nullptr || findGauge(store, name)->value() < value) {\n    time_system.advanceTimeWait(std::chrono::milliseconds(10));\n    if (timeout != std::chrono::milliseconds::zero() && !bound.withinBound()) {\n      return AssertionFailure() << fmt::format(\"timed out waiting for {} to be {}\", name, value);\n    }\n  }\n  return AssertionSuccess();\n}\n\nAssertionResult TestUtility::waitForGaugeEq(Stats::Store& store, const std::string& name,\n                                            uint64_t value, Event::TestTimeSystem& time_system,\n                                            std::chrono::milliseconds timeout) {\n  Event::TestTimeSystem::RealTimeBound bound(timeout);\n  while (findGauge(store, name) == nullptr || findGauge(store, name)->value() != value) {\n    time_system.advanceTimeWait(std::chrono::milliseconds(10));\n    if (timeout != std::chrono::milliseconds::zero() && !bound.withinBound()) {\n      return AssertionFailure() << fmt::format(\"timed out waiting for {} to be {}\", name, value);\n    }\n  }\n  return AssertionSuccess();\n}\n\nstd::list<Network::DnsResponse>\nTestUtility::makeDnsResponse(const std::list<std::string>& addresses, std::chrono::seconds ttl) {\n  std::list<Network::DnsResponse> ret;\n  for (const auto& address : addresses) {\n    ret.emplace_back(Network::DnsResponse(Network::Utility::parseInternetAddress(address), ttl));\n  }\n  return ret;\n}\n\nstd::vector<std::string> TestUtility::listFiles(const std::string& path, bool recursive) {\n  std::vector<std::string> file_names;\n  Filesystem::Directory directory(path);\n  for (const Filesystem::DirectoryEntry& entry : directory) {\n    std::string file_name = fmt::format(\"{}/{}\", path, entry.name_);\n    if (entry.type_ == Filesystem::FileType::Directory) {\n      if (recursive && entry.name_ != \".\" && entry.name_ != \"..\") {\n        std::vector<std::string> more_file_names = listFiles(file_name, recursive);\n        file_names.insert(file_names.end(), more_file_names.begin(), more_file_names.end());\n      }\n    } else { // regular file\n      file_names.push_back(file_name);\n    }\n  }\n  return file_names;\n}\n\nstd::string TestUtility::xdsResourceName(const ProtobufWkt::Any& resource) {\n  if (resource.type_url() == Config::TypeUrl::get().Listener) {\n    return TestUtility::anyConvert<envoy::config::listener::v3::Listener>(resource).name();\n  }\n  if (resource.type_url() == Config::TypeUrl::get().RouteConfiguration) {\n    return TestUtility::anyConvert<envoy::config::route::v3::RouteConfiguration>(resource).name();\n  }\n  if (resource.type_url() == Config::TypeUrl::get().Cluster) {\n    return TestUtility::anyConvert<envoy::config::cluster::v3::Cluster>(resource).name();\n  }\n  if (resource.type_url() == Config::TypeUrl::get().ClusterLoadAssignment) {\n    return TestUtility::anyConvert<envoy::config::endpoint::v3::ClusterLoadAssignment>(resource)\n        .cluster_name();\n  }\n  if (resource.type_url() == Config::TypeUrl::get().VirtualHost) {\n    return TestUtility::anyConvert<envoy::config::route::v3::VirtualHost>(resource).name();\n  }\n  if (resource.type_url() == Config::TypeUrl::get().Runtime) {\n    return TestUtility::anyConvert<envoy::service::runtime::v3::Runtime>(resource).name();\n  }\n  if (resource.type_url() == Config::getTypeUrl<envoy::config::listener::v3::Listener>(\n                                 envoy::config::core::v3::ApiVersion::V3)) {\n    return TestUtility::anyConvert<envoy::config::listener::v3::Listener>(resource).name();\n  }\n  if (resource.type_url() == Config::getTypeUrl<envoy::config::route::v3::RouteConfiguration>(\n                                 envoy::config::core::v3::ApiVersion::V3)) {\n    return TestUtility::anyConvert<envoy::config::route::v3::RouteConfiguration>(resource).name();\n  }\n  if (resource.type_url() == Config::getTypeUrl<envoy::config::cluster::v3::Cluster>(\n                                 envoy::config::core::v3::ApiVersion::V3)) {\n    return TestUtility::anyConvert<envoy::config::cluster::v3::Cluster>(resource).name();\n  }\n  if (resource.type_url() == Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(\n                                 envoy::config::core::v3::ApiVersion::V3)) {\n    return TestUtility::anyConvert<envoy::config::endpoint::v3::ClusterLoadAssignment>(resource)\n        .cluster_name();\n  }\n  if (resource.type_url() == Config::getTypeUrl<envoy::config::route::v3::VirtualHost>(\n                                 envoy::config::core::v3::ApiVersion::V3)) {\n    return TestUtility::anyConvert<envoy::config::route::v3::VirtualHost>(resource).name();\n  }\n  if (resource.type_url() == Config::getTypeUrl<envoy::service::runtime::v3::Runtime>(\n                                 envoy::config::core::v3::ApiVersion::V3)) {\n    return TestUtility::anyConvert<envoy::service::runtime::v3::Runtime>(resource).name();\n  }\n  throw EnvoyException(\n      absl::StrCat(\"xdsResourceName does not know about type URL \", resource.type_url()));\n}\n\nstd::string TestUtility::addLeftAndRightPadding(absl::string_view to_pad, int desired_length) {\n  int line_fill_len = desired_length - to_pad.length();\n  int first_half_len = line_fill_len / 2;\n  int second_half_len = line_fill_len - first_half_len;\n  return absl::StrCat(std::string(first_half_len, '='), to_pad, std::string(second_half_len, '='));\n}\n\nstd::vector<std::string> TestUtility::split(const std::string& source, char split) {\n  return TestUtility::split(source, std::string{split});\n}\n\nstd::vector<std::string> TestUtility::split(const std::string& source, const std::string& split,\n                                            bool keep_empty_string) {\n  std::vector<std::string> ret;\n  const auto tokens_sv = StringUtil::splitToken(source, split, keep_empty_string);\n  std::transform(tokens_sv.begin(), tokens_sv.end(), std::back_inserter(ret),\n                 [](absl::string_view sv) { return std::string(sv); });\n  return ret;\n}\n\n// static\nabsl::Time TestUtility::parseTime(const std::string& input, const std::string& input_format) {\n  absl::Time time;\n  std::string parse_error;\n  EXPECT_TRUE(absl::ParseTime(input_format, input, &time, &parse_error))\n      << \" error \\\"\" << parse_error << \"\\\" from failing to parse timestamp \\\"\" << input\n      << \"\\\" with format string \\\"\" << input_format << \"\\\"\";\n  return time;\n}\n\n// static\nstd::string TestUtility::formatTime(const absl::Time input, const std::string& output_format) {\n  static const absl::TimeZone utc = absl::UTCTimeZone();\n  return absl::FormatTime(output_format, input, utc);\n}\n\n// static\nstd::string TestUtility::formatTime(const SystemTime input, const std::string& output_format) {\n  return TestUtility::formatTime(absl::FromChrono(input), output_format);\n}\n\n// static\nstd::string TestUtility::convertTime(const std::string& input, const std::string& input_format,\n                                     const std::string& output_format) {\n  return TestUtility::formatTime(TestUtility::parseTime(input, input_format), output_format);\n}\n\n// static\nstd::string TestUtility::nonZeroedGauges(const std::vector<Stats::GaugeSharedPtr>& gauges) {\n  // Returns all gauges that are 0 except the circuit_breaker remaining resource\n  // gauges which default to the resource max.\n  std::regex omitted(\".*circuit_breakers\\\\..*\\\\.remaining.*\");\n  std::string non_zero;\n  for (const Stats::GaugeSharedPtr& gauge : gauges) {\n    if (!std::regex_match(gauge->name(), omitted) && gauge->value() != 0) {\n      non_zero.append(fmt::format(\"{}: {}; \", gauge->name(), gauge->value()));\n    }\n  }\n  return non_zero;\n}\n\n// static\nbool TestUtility::gaugesZeroed(const std::vector<Stats::GaugeSharedPtr>& gauges) {\n  return nonZeroedGauges(gauges).empty();\n}\n\n// static\nbool TestUtility::gaugesZeroed(\n    const std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>>& gauges) {\n  // Returns true if all gauges are 0 except the circuit_breaker remaining resource\n  // gauges which default to the resource max.\n  std::regex omitted(\".*circuit_breakers\\\\..*\\\\.remaining.*\");\n  for (const auto& gauge : gauges) {\n    if (!std::regex_match(std::string(gauge.first), omitted) && gauge.second.get().value() != 0) {\n      return false;\n    }\n  }\n  return true;\n}\n\nvoid ConditionalInitializer::setReady() {\n  absl::MutexLock lock(&mutex_);\n  EXPECT_FALSE(ready_);\n  ready_ = true;\n}\n\nvoid ConditionalInitializer::waitReady() {\n  absl::MutexLock lock(&mutex_);\n  if (ready_) {\n    ready_ = false;\n    return;\n  }\n\n  mutex_.Await(absl::Condition(&ready_));\n  EXPECT_TRUE(ready_);\n  ready_ = false;\n}\n\nvoid ConditionalInitializer::wait() {\n  absl::MutexLock lock(&mutex_);\n  mutex_.Await(absl::Condition(&ready_));\n  EXPECT_TRUE(ready_);\n}\n\nconstexpr std::chrono::milliseconds TestUtility::DefaultTimeout;\n\nnamespace Api {\n\nclass TestImplProvider {\nprotected:\n  Event::GlobalTimeSystem global_time_system_;\n  testing::NiceMock<Stats::MockIsolatedStatsStore> default_stats_store_;\n  testing::NiceMock<Random::MockRandomGenerator> mock_random_generator_;\n};\n\nclass TestImpl : public TestImplProvider, public Impl {\npublic:\n  TestImpl(Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system,\n           Stats::Store* stats_store = nullptr, Event::TimeSystem* time_system = nullptr,\n           Random::RandomGenerator* random = nullptr)\n      : Impl(thread_factory, stats_store ? *stats_store : default_stats_store_,\n             time_system ? *time_system : global_time_system_, file_system,\n             random ? *random : mock_random_generator_) {}\n};\n\nApiPtr createApiForTest() {\n  return std::make_unique<TestImpl>(Thread::threadFactoryForTest(),\n                                    Filesystem::fileSystemForTest());\n}\n\nApiPtr createApiForTest(Random::RandomGenerator& random) {\n  return std::make_unique<TestImpl>(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(),\n                                    nullptr, nullptr, &random);\n}\n\nApiPtr createApiForTest(Stats::Store& stat_store) {\n  return std::make_unique<TestImpl>(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(),\n                                    &stat_store);\n}\n\nApiPtr createApiForTest(Stats::Store& stat_store, Random::RandomGenerator& random) {\n  return std::make_unique<TestImpl>(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(),\n                                    &stat_store, nullptr, &random);\n}\n\nApiPtr createApiForTest(Event::TimeSystem& time_system) {\n  return std::make_unique<TestImpl>(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(),\n                                    nullptr, &time_system);\n}\n\nApiPtr createApiForTest(Stats::Store& stat_store, Event::TimeSystem& time_system) {\n  return std::make_unique<TestImpl>(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(),\n                                    &stat_store, &time_system);\n}\n\n} // namespace Api\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/utility.h",
    "content": "#pragma once\n\n#include <cstdlib>\n#include <list>\n#include <random>\n#include <string>\n#include <vector>\n\n#include \"envoy/api/api.h\"\n#include \"envoy/buffer/buffer.h\"\n#include \"envoy/network/address.h\"\n#include \"envoy/stats/stats.h\"\n#include \"envoy/stats/store.h\"\n#include \"envoy/thread/thread.h\"\n#include \"envoy/type/matcher/v3/string.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/common/c_smart_ptr.h\"\n#include \"common/common/empty_string.h\"\n#include \"common/common/thread.h\"\n#include \"common/config/decoded_resource_impl.h\"\n#include \"common/config/opaque_resource_decoder_impl.h\"\n#include \"common/config/version_converter.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/symbol_table_impl.h\"\n\n#include \"test/test_common/file_system_for_test.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/test_time_system.h\"\n#include \"test/test_common/thread_factory_for_test.h\"\n\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/time.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_; // NOLINT(misc-unused-using-decls)\nusing testing::AssertionFailure;\nusing testing::AssertionResult;\nusing testing::AssertionSuccess;\nusing testing::Invoke; //  NOLINT(misc-unused-using-decls)\n\nnamespace Envoy {\n\n/*\n  Macro to use for validating that a statement throws the specified type of exception, and that\n  the exception's what() method returns a string which is matched by the specified matcher.\n  This allows for expectations such as:\n\n  EXPECT_THAT_THROWS_MESSAGE(\n      bad_function_call(),\n      EnvoyException,\n      AllOf(StartsWith(\"expected prefix\"), HasSubstr(\"some substring\")));\n*/\n#define EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception, matcher)                         \\\n  try {                                                                                            \\\n    statement;                                                                                     \\\n    ADD_FAILURE() << \"Exception should take place. It did not.\";                                   \\\n  } catch (expected_exception & e) {                                                               \\\n    EXPECT_THAT(std::string(e.what()), matcher);                                                   \\\n  }\n\n// Expect that the statement throws the specified type of exception with exactly the specified\n// message.\n#define EXPECT_THROW_WITH_MESSAGE(statement, expected_exception, message)                          \\\n  EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception, ::testing::Eq(message))\n\n// Expect that the statement throws the specified type of exception with a message containing a\n// substring matching the specified regular expression (i.e. the regex doesn't have to match\n// the entire message).\n#define EXPECT_THROW_WITH_REGEX(statement, expected_exception, regex_str)                          \\\n  EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception, ::testing::ContainsRegex(regex_str))\n\n// Expect that the statement throws the specified type of exception with a message that does not\n// contain any substring matching the specified regular expression.\n#define EXPECT_THROW_WITHOUT_REGEX(statement, expected_exception, regex_str)                       \\\n  EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception,                                        \\\n                             ::testing::Not(::testing::ContainsRegex(regex_str)))\n\n#define VERBOSE_EXPECT_NO_THROW(statement)                                                         \\\n  try {                                                                                            \\\n    statement;                                                                                     \\\n  } catch (EnvoyException & e) {                                                                   \\\n    ADD_FAILURE() << \"Unexpected exception: \" << std::string(e.what());                            \\\n  }\n\n#define VERIFY_ASSERTION(statement)                                                                \\\n  do {                                                                                             \\\n    ::testing::AssertionResult status = statement;                                                 \\\n    if (!status) {                                                                                 \\\n      return status;                                                                               \\\n    }                                                                                              \\\n  } while (false)\n\n// A convenience macro for testing Envoy deprecated features. This will disable the test when\n// tests are built with --define deprecated_features=disabled to avoid the hard-failure mode for\n// deprecated features. Sample usage is:\n//\n// TEST_F(FixtureName, DEPRECATED_FEATURE_TEST(TestName)) {\n// ...\n// }\n#ifndef ENVOY_DISABLE_DEPRECATED_FEATURES\n#define DEPRECATED_FEATURE_TEST(X) X\n#else\n#define DEPRECATED_FEATURE_TEST(X) DISABLED_##X\n#endif\n\n// Random number generator which logs its seed to stderr. To repeat a test run with a non-zero seed\n// one can run the test with --test_arg=--gtest_random_seed=[seed]\nclass TestRandomGenerator {\npublic:\n  TestRandomGenerator();\n\n  uint64_t random();\n\nprivate:\n  const int32_t seed_;\n  std::ranlux48 generator_;\n  RealTimeSource real_time_source_;\n};\n\nclass TestUtility {\npublic:\n  /**\n   * Compare 2 HeaderMaps.\n   * @param lhs supplies HeaderMap 1.\n   * @param rhs supplies HeaderMap 2.\n   * @return TRUE if the HeaderMaps are equal, ignoring the order of the\n   * headers, false if not.\n   */\n  static bool headerMapEqualIgnoreOrder(const Http::HeaderMap& lhs, const Http::HeaderMap& rhs);\n\n  /**\n   * Compare 2 buffers.\n   * @param lhs supplies buffer 1.\n   * @param rhs supplies buffer 2.\n   * @return TRUE if the buffers contain equal content\n   *         (i.e., if lhs.toString() == rhs.toString()), false if not.\n   */\n  static bool buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs);\n\n  /**\n   * Compare 2 RawSlice pointers.\n   * @param lhs supplies raw slice 1.\n   * @param rhs supplies raw slice 2.\n   * @param num_slices The number of slices to compare. It is assumed lhs and rhs have the same\n   * number.\n   * @return true if for num_slices, all lhs raw slices are equal to the corresponding rhs raw slice\n   *         in length and a byte by byte data comparison. false otherwise\n   */\n  static bool rawSlicesEqual(const Buffer::RawSlice* lhs, const Buffer::RawSlice* rhs,\n                             size_t num_slices);\n\n  /**\n   * Feed a buffer with random characters.\n   * @param buffer supplies the buffer to be fed.\n   * @param n_char number of characters that should be added to the supplied buffer.\n   * @param seed seeds pseudo-random number generator (default = 0).\n   */\n  static void feedBufferWithRandomCharacters(Buffer::Instance& buffer, uint64_t n_char,\n                                             uint64_t seed = 0);\n\n  /**\n   * Finds a stat in a vector with the given name.\n   * @param name the stat name to look for.\n   * @param v the vector of stats.\n   * @return the stat\n   */\n  template <typename T> static T findByName(const std::vector<T>& v, const std::string& name) {\n    auto pos = std::find_if(v.begin(), v.end(),\n                            [&name](const T& stat) -> bool { return stat->name() == name; });\n    if (pos == v.end()) {\n      return nullptr;\n    }\n    return *pos;\n  }\n\n  /**\n   * Find a counter in a stats store.\n   * @param store supplies the stats store.\n   * @param name supplies the name to search for.\n   * @return Stats::CounterSharedPtr the counter or nullptr if there is none.\n   */\n  static Stats::CounterSharedPtr findCounter(Stats::Store& store, const std::string& name);\n\n  /**\n   * Find a gauge in a stats store.\n   * @param store supplies the stats store.\n   * @param name supplies the name to search for.\n   * @return Stats::GaugeSharedPtr the gauge or nullptr if there is none.\n   */\n  static Stats::GaugeSharedPtr findGauge(Stats::Store& store, const std::string& name);\n\n  /**\n   * Wait for a counter to == a given value.\n   * @param store supplies the stats store.\n   * @param name supplies the name of the counter to wait for.\n   * @param value supplies the value of the counter.\n   * @param time_system the time system to use for waiting.\n   * @param timeout the maximum time to wait before timing out, or 0 for no timeout.\n   * @param dispatcher the dispatcher to run non-blocking periodically during the wait.\n   * @return AssertionSuccess() if the counter was == to the value within the timeout, else\n   * AssertionFailure().\n   */\n  static AssertionResult\n  waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value,\n                   Event::TestTimeSystem& time_system,\n                   std::chrono::milliseconds timeout = std::chrono::milliseconds::zero(),\n                   Event::Dispatcher* dispatcher = nullptr);\n\n  /**\n   * Wait for a counter to >= a given value.\n   * @param store supplies the stats store.\n   * @param name counter name.\n   * @param value target value.\n   * @param time_system the time system to use for waiting.\n   * @param timeout the maximum time to wait before timing out, or 0 for no timeout.\n   * @return AssertionSuccess() if the counter was >= to the value within the timeout, else\n   * AssertionFailure().\n   */\n  static AssertionResult\n  waitForCounterGe(Stats::Store& store, const std::string& name, uint64_t value,\n                   Event::TestTimeSystem& time_system,\n                   std::chrono::milliseconds timeout = std::chrono::milliseconds::zero());\n\n  /**\n   * Wait for a gauge to >= a given value.\n   * @param store supplies the stats store.\n   * @param name gauge name.\n   * @param value target value.\n   * @param time_system the time system to use for waiting.\n   * @param timeout the maximum time to wait before timing out, or 0 for no timeout.\n   * @return AssertionSuccess() if the counter gauge >= to the value within the timeout, else\n   * AssertionFailure().\n   */\n  static AssertionResult\n  waitForGaugeGe(Stats::Store& store, const std::string& name, uint64_t value,\n                 Event::TestTimeSystem& time_system,\n                 std::chrono::milliseconds timeout = std::chrono::milliseconds::zero());\n\n  /**\n   * Wait for a gauge to == a given value.\n   * @param store supplies the stats store.\n   * @param name gauge name.\n   * @param value target value.\n   * @param time_system the time system to use for waiting.\n   * @param timeout the maximum time to wait before timing out, or 0 for no timeout.\n   * @return AssertionSuccess() if the gauge was == to the value within the timeout, else\n   * AssertionFailure().\n   */\n  static AssertionResult\n  waitForGaugeEq(Stats::Store& store, const std::string& name, uint64_t value,\n                 Event::TestTimeSystem& time_system,\n                 std::chrono::milliseconds timeout = std::chrono::milliseconds::zero());\n\n  /**\n   * Find a readout in a stats store.\n   * @param store supplies the stats store.\n   * @param name supplies the name to search for.\n   * @return Stats::TextReadoutSharedPtr the readout or nullptr if there is none.\n   */\n  static Stats::TextReadoutSharedPtr findTextReadout(Stats::Store& store, const std::string& name);\n\n  /**\n   * Convert a string list of IP addresses into a list of network addresses usable for DNS\n   * response testing.\n   */\n  static std::list<Network::DnsResponse>\n  makeDnsResponse(const std::list<std::string>& addresses,\n                  std::chrono::seconds = std::chrono::seconds(0));\n\n  /**\n   * List files in a given directory path\n   *\n   * @param path directory path to list\n   * @param recursive whether or not to traverse subdirectories\n   * @return std::vector<std::string> filenames\n   */\n  static std::vector<std::string> listFiles(const std::string& path, bool recursive);\n\n  /**\n   * Return a unique temporary filename for use in tests.\n   *\n   * @return a filename based on the process id and current time.\n   */\n\n  static std::string uniqueFilename() {\n    return absl::StrCat(getpid(), \"_\", std::chrono::system_clock::now().time_since_epoch().count());\n  }\n\n  /**\n   * Compare two protos of the same type for equality.\n   *\n   * @param lhs proto on LHS.\n   * @param rhs proto on RHS.\n   * @param ignore_repeated_field_ordering if true, repeated field ordering will be ignored.\n   * @return bool indicating whether the protos are equal.\n   */\n  static bool protoEqual(const Protobuf::Message& lhs, const Protobuf::Message& rhs,\n                         bool ignore_repeated_field_ordering = false) {\n    Protobuf::util::MessageDifferencer differencer;\n    differencer.set_message_field_comparison(Protobuf::util::MessageDifferencer::EQUIVALENT);\n    if (ignore_repeated_field_ordering) {\n      differencer.set_repeated_field_comparison(Protobuf::util::MessageDifferencer::AS_SET);\n    }\n    return differencer.Compare(lhs, rhs);\n  }\n\n  static bool protoEqualIgnoringField(const Protobuf::Message& lhs, const Protobuf::Message& rhs,\n                                      const std::string& field_to_ignore) {\n    Protobuf::util::MessageDifferencer differencer;\n    const Protobuf::FieldDescriptor* ignored_field =\n        lhs.GetDescriptor()->FindFieldByName(field_to_ignore);\n    ASSERT(ignored_field != nullptr, \"Field name to ignore not found.\");\n    differencer.IgnoreField(ignored_field);\n    return differencer.Compare(lhs, rhs);\n  }\n\n  /**\n   * Compare two decoded resources for equality.\n   *\n   * @param lhs decoded resource on LHS.\n   * @param rhs decoded resource on RHS.\n   * @return bool indicating whether the decoded resources are equal.\n   */\n  static bool decodedResourceEq(const Config::DecodedResource& lhs,\n                                const Config::DecodedResource& rhs) {\n    return lhs.name() == rhs.name() && lhs.aliases() == rhs.aliases() &&\n           lhs.version() == rhs.version() && lhs.hasResource() == rhs.hasResource() &&\n           (!lhs.hasResource() || protoEqual(lhs.resource(), rhs.resource()));\n  }\n\n  /**\n   * Compare two JSON strings serialized from ProtobufWkt::Struct for equality. When two identical\n   * ProtobufWkt::Struct are serialized into JSON strings, the results have the same set of\n   * properties (values), but the positions may be different.\n   *\n   * @param lhs JSON string on LHS.\n   * @param rhs JSON string on RHS.\n   * @return bool indicating whether the JSON strings are equal.\n   */\n  static bool jsonStringEqual(const std::string& lhs, const std::string& rhs) {\n    return protoEqual(jsonToStruct(lhs), jsonToStruct(rhs));\n  }\n\n  /**\n   * Symmetrically pad a string with '=' out to a desired length.\n   * @param to_pad the string being padded around.\n   * @param desired_length the length we want the padding to bring the string up to.\n   * @return the padded string.\n   */\n  static std::string addLeftAndRightPadding(absl::string_view to_pad, int desired_length = 80);\n\n  /**\n   * Split a string.\n   * @param source supplies the string to split.\n   * @param split supplies the char to split on.\n   * @return vector of strings computed after splitting `source` around all instances of `split`.\n   */\n  static std::vector<std::string> split(const std::string& source, char split);\n\n  /**\n   * Split a string.\n   * @param source supplies the string to split.\n   * @param split supplies the string to split on.\n   * @param keep_empty_string result contains empty strings if the string starts or ends with\n   * 'split', or if instances of 'split' are adjacent.\n   * @return vector of strings computed after splitting `source` around all instances of `split`.\n   */\n  static std::vector<std::string> split(const std::string& source, const std::string& split,\n                                        bool keep_empty_string = false);\n\n  /**\n   * Compare two RepeatedPtrFields of the same type for equality.\n   *\n   * @param lhs RepeatedPtrField on LHS.\n   * @param rhs RepeatedPtrField on RHS.\n   * @param ignore_ordering if ordering should be ignored. Note if true this turns\n   *   comparison into an N^2 operation.\n   * @return bool indicating whether the RepeatedPtrField are equal. TestUtility::protoEqual() is\n   *              used for individual element testing.\n   */\n  template <typename ProtoType>\n  static bool repeatedPtrFieldEqual(const Protobuf::RepeatedPtrField<ProtoType>& lhs,\n                                    const Protobuf::RepeatedPtrField<ProtoType>& rhs,\n                                    bool ignore_ordering = false) {\n    if (lhs.size() != rhs.size()) {\n      return false;\n    }\n\n    if (!ignore_ordering) {\n      for (int i = 0; i < lhs.size(); ++i) {\n        if (!TestUtility::protoEqual(lhs[i], rhs[i], /*ignore_ordering=*/false)) {\n          return false;\n        }\n      }\n\n      return true;\n    }\n    using ProtoList = std::list<std::unique_ptr<const Protobuf::Message>>;\n    // Iterate through using protoEqual as ignore_ordering is true, and fields\n    // in the sub-protos may also be out of order.\n    ProtoList lhs_list =\n        RepeatedPtrUtil::convertToConstMessagePtrContainer<ProtoType, ProtoList>(lhs);\n    ProtoList rhs_list =\n        RepeatedPtrUtil::convertToConstMessagePtrContainer<ProtoType, ProtoList>(rhs);\n    while (!lhs_list.empty()) {\n      bool found = false;\n      for (auto it = rhs_list.begin(); it != rhs_list.end(); ++it) {\n        if (TestUtility::protoEqual(*lhs_list.front(), **it,\n                                    /*ignore_ordering=*/true)) {\n          lhs_list.pop_front();\n          rhs_list.erase(it);\n          found = true;\n          break;\n        }\n      }\n      if (!found) {\n        return false;\n      }\n    }\n    return true;\n  }\n\n  template <class ProtoType>\n  static AssertionResult\n  assertRepeatedPtrFieldEqual(const Protobuf::RepeatedPtrField<ProtoType>& lhs,\n                              const Protobuf::RepeatedPtrField<ProtoType>& rhs,\n                              bool ignore_ordering = false) {\n    if (!repeatedPtrFieldEqual(lhs, rhs, ignore_ordering)) {\n      return AssertionFailure() << RepeatedPtrUtil::debugString(lhs) << \" does not match \"\n                                << RepeatedPtrUtil::debugString(rhs);\n    }\n\n    return AssertionSuccess();\n  }\n\n  /**\n   * Returns the closest thing to a sensible \"name\" field for the given xDS resource.\n   * @param resource the resource to extract the name of.\n   * @return the resource's name.\n   */\n  static std::string xdsResourceName(const ProtobufWkt::Any& resource);\n\n  /**\n   * Returns a \"novel\" IPv4 loopback address, if available.\n   * For many tests, we want a loopback address other than 127.0.0.1 where possible. For some\n   * platforms such as macOS, only 127.0.0.1 is available for IPv4 loopback.\n   *\n   * @return string 127.0.0.x , where x is \"1\" for macOS and \"9\" otherwise.\n   */\n  static std::string getIpv4Loopback() {\n#ifdef __APPLE__\n    return \"127.0.0.1\";\n#else\n    return \"127.0.0.9\";\n#endif\n  }\n\n  /**\n   * Return typed proto message object for YAML.\n   * @param yaml YAML string.\n   * @return MessageType parsed from yaml.\n   */\n  template <class MessageType> static MessageType parseYaml(const std::string& yaml) {\n    MessageType message;\n    TestUtility::loadFromYaml(yaml, message);\n    return message;\n  }\n\n  // Allows pretty printed test names for TEST_P using TestEnvironment::getIpVersionsForTest().\n  //\n  // Tests using this will be of the form IpVersions/SslSocketTest.HalfClose/IPv4\n  // instead of IpVersions/SslSocketTest.HalfClose/1\n  static std::string\n  ipTestParamsToString(const ::testing::TestParamInfo<Network::Address::IpVersion>& params) {\n    return params.param == Network::Address::IpVersion::v4 ? \"IPv4\" : \"IPv6\";\n  }\n\n  /**\n   * Return flip-ordered bytes.\n   * @param bytes input bytes.\n   * @return Type flip-ordered bytes.\n   */\n  template <class Type> static Type flipOrder(const Type& bytes) {\n    Type result{0};\n    Type data = bytes;\n    for (Type i = 0; i < sizeof(Type); i++) {\n      result <<= 8;\n      result |= (data & Type(0xFF));\n      data >>= 8;\n    }\n    return result;\n  }\n\n  static absl::Time parseTime(const std::string& input, const std::string& input_format);\n  static std::string formatTime(const absl::Time input, const std::string& output_format);\n  static std::string formatTime(const SystemTime input, const std::string& output_format);\n  static std::string convertTime(const std::string& input, const std::string& input_format,\n                                 const std::string& output_format);\n\n  static constexpr std::chrono::milliseconds DefaultTimeout = std::chrono::milliseconds(10000);\n\n  /**\n   * Return a prefix string matcher.\n   * @param string prefix.\n   * @return Object StringMatcher.\n   */\n  static const envoy::type::matcher::v3::StringMatcher createPrefixMatcher(std::string str) {\n    envoy::type::matcher::v3::StringMatcher matcher;\n    matcher.set_prefix(str);\n    return matcher;\n  }\n\n  /**\n   * Return an exact string matcher.\n   * @param string exact.\n   * @return Object StringMatcher.\n   */\n  static const envoy::type::matcher::v3::StringMatcher createExactMatcher(std::string str) {\n    envoy::type::matcher::v3::StringMatcher matcher;\n    matcher.set_exact(str);\n    return matcher;\n  }\n\n  /**\n   * Return a regex string matcher.\n   * @param string exact.\n   * @return Object StringMatcher.\n   */\n  static const envoy::type::matcher::v3::StringMatcher createRegexMatcher(std::string str) {\n    envoy::type::matcher::v3::StringMatcher matcher;\n    matcher.set_hidden_envoy_deprecated_regex(str);\n    return matcher;\n  }\n\n  /**\n   * Checks that passed gauges have a value of 0. Gauges can be omitted from\n   * this check by modifying the regex that matches gauge names in the\n   * implementation.\n   *\n   * @param vector of gauges to check.\n   * @return bool indicating that passed gauges not matching the omitted regex have a value of 0.\n   */\n  static bool gaugesZeroed(const std::vector<Stats::GaugeSharedPtr>& gauges);\n  static bool gaugesZeroed(\n      const std::vector<std::pair<absl::string_view, Stats::PrimitiveGaugeReference>>& gauges);\n\n  /**\n   * Returns the members of gauges that are not zero. Uses the same regex filter as gaugesZeroed().\n   */\n  static std::string nonZeroedGauges(const std::vector<Stats::GaugeSharedPtr>& gauges);\n\n  // Strict variants of Protobuf::MessageUtil\n  static void loadFromJson(const std::string& json, Protobuf::Message& message,\n                           bool preserve_original_type = false, bool avoid_boosting = false) {\n    MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor(),\n                              !avoid_boosting);\n    if (!preserve_original_type) {\n      Config::VersionConverter::eraseOriginalTypeInformation(message);\n    }\n  }\n\n  static void loadFromJson(const std::string& json, ProtobufWkt::Struct& message) {\n    MessageUtil::loadFromJson(json, message);\n  }\n\n  static void loadFromYaml(const std::string& yaml, Protobuf::Message& message,\n                           bool preserve_original_type = false, bool avoid_boosting = false) {\n    MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor(),\n                              !avoid_boosting);\n    if (!preserve_original_type) {\n      Config::VersionConverter::eraseOriginalTypeInformation(message);\n    }\n  }\n\n  static void loadFromFile(const std::string& path, Protobuf::Message& message, Api::Api& api,\n                           bool preserve_original_type = false) {\n    MessageUtil::loadFromFile(path, message, ProtobufMessage::getStrictValidationVisitor(), api);\n    if (!preserve_original_type) {\n      Config::VersionConverter::eraseOriginalTypeInformation(message);\n    }\n  }\n\n  template <class MessageType>\n  static inline MessageType anyConvert(const ProtobufWkt::Any& message) {\n    return MessageUtil::anyConvert<MessageType>(message);\n  }\n\n  template <class MessageType>\n  static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message,\n                                      bool preserve_original_type = false,\n                                      bool avoid_boosting = false) {\n    MessageUtil::loadFromYamlAndValidate(\n        yaml, message, ProtobufMessage::getStrictValidationVisitor(), avoid_boosting);\n    if (!preserve_original_type) {\n      Config::VersionConverter::eraseOriginalTypeInformation(message);\n    }\n  }\n\n  template <class MessageType> static void validate(const MessageType& message) {\n    MessageUtil::validate(message, ProtobufMessage::getStrictValidationVisitor());\n  }\n\n  template <class MessageType>\n  static const MessageType& downcastAndValidate(const Protobuf::Message& config) {\n    return MessageUtil::downcastAndValidate<MessageType>(\n        config, ProtobufMessage::getStrictValidationVisitor());\n  }\n\n  static void jsonConvert(const Protobuf::Message& source, Protobuf::Message& dest) {\n    // Explicit round-tripping to support conversions inside tests between arbitrary messages as a\n    // convenience.\n    ProtobufWkt::Struct tmp;\n    MessageUtil::jsonConvert(source, tmp);\n    MessageUtil::jsonConvert(tmp, ProtobufMessage::getStrictValidationVisitor(), dest);\n  }\n\n  static ProtobufWkt::Struct jsonToStruct(const std::string& json) {\n    ProtobufWkt::Struct message;\n    MessageUtil::loadFromJson(json, message);\n    return message;\n  }\n\n  /**\n   * Extract the Protobuf binary format of a google.protobuf.Message as a string.\n   * @param message message of type type.googleapis.com/google.protobuf.Message.\n   * @return std::string of the Protobuf binary object.\n   */\n  static std::string getProtobufBinaryStringFromMessage(const Protobuf::Message& message) {\n    std::string pb_binary_str;\n    pb_binary_str.reserve(message.ByteSizeLong());\n    message.SerializeToString(&pb_binary_str);\n    return pb_binary_str;\n  }\n\n  template <class MessageType>\n  static Config::DecodedResourcesWrapper\n  decodeResources(std::initializer_list<MessageType> resources,\n                  const std::string& name_field = \"name\") {\n    Config::DecodedResourcesWrapper decoded_resources;\n    for (const auto& resource : resources) {\n      auto owned_resource = std::make_unique<MessageType>(resource);\n      decoded_resources.owned_resources_.emplace_back(new Config::DecodedResourceImpl(\n          std::move(owned_resource), MessageUtil::getStringField(resource, name_field), {}, \"\"));\n      decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back());\n    }\n    return decoded_resources;\n  }\n\n  template <class MessageType>\n  static Config::DecodedResourcesWrapper decodeResources(std::vector<MessageType> resources,\n                                                         const std::string& name_field = \"name\") {\n    Config::DecodedResourcesWrapper decoded_resources;\n    for (const auto& resource : resources) {\n      auto owned_resource = std::make_unique<MessageType>(resource);\n      decoded_resources.owned_resources_.emplace_back(new Config::DecodedResourceImpl(\n          std::move(owned_resource), MessageUtil::getStringField(resource, name_field), {}, \"\"));\n      decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back());\n    }\n    return decoded_resources;\n  }\n\n  template <class MessageType>\n  static Config::DecodedResourcesWrapper\n  decodeResources(const Protobuf::RepeatedPtrField<ProtobufWkt::Any>& resources,\n                  const std::string& version, const std::string& name_field = \"name\") {\n    TestOpaqueResourceDecoderImpl<MessageType> resource_decoder(name_field);\n    return Config::DecodedResourcesWrapper(resource_decoder, resources, version);\n  }\n\n  template <class MessageType>\n  static Config::DecodedResourcesWrapper\n  decodeResources(const envoy::service::discovery::v3::DiscoveryResponse& resources,\n                  const std::string& name_field = \"name\") {\n    return decodeResources<MessageType>(resources.resources(), resources.version_info(),\n                                        name_field);\n  }\n\n  template <class MessageType>\n  static Config::DecodedResourcesWrapper decodeResources(\n      const Protobuf::RepeatedPtrField<envoy::service::discovery::v3::Resource>& resources,\n      const std::string& name_field = \"name\") {\n    Config::DecodedResourcesWrapper decoded_resources;\n    TestOpaqueResourceDecoderImpl<MessageType> resource_decoder(name_field);\n    for (const auto& resource : resources) {\n      decoded_resources.owned_resources_.emplace_back(\n          new Config::DecodedResourceImpl(resource_decoder, resource));\n      decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back());\n    }\n    return decoded_resources;\n  }\n\n  template <typename Current>\n  class TestOpaqueResourceDecoderImpl : public Config::OpaqueResourceDecoderImpl<Current> {\n  public:\n    TestOpaqueResourceDecoderImpl(absl::string_view name_field)\n        : Config::OpaqueResourceDecoderImpl<Current>(ProtobufMessage::getStrictValidationVisitor(),\n                                                     name_field) {}\n  };\n\n  /**\n   * Returns the string representation of a envoy::config::core::v3::ApiVersion.\n   *\n   * @param api_version to be converted.\n   * @return std::string representation of envoy::config::core::v3::ApiVersion.\n   */\n  static std::string\n  getVersionStringFromApiVersion(envoy::config::core::v3::ApiVersion api_version) {\n    switch (api_version) {\n    case envoy::config::core::v3::ApiVersion::AUTO:\n      return \"AUTO\";\n    case envoy::config::core::v3::ApiVersion::V2:\n      return \"V2\";\n    case envoy::config::core::v3::ApiVersion::V3:\n      return \"V3\";\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  /**\n   * Returns the fully-qualified name of a service, rendered from service_full_name_template.\n   *\n   * @param service_full_name_template the service fully-qualified name template.\n   * @param api_version version of a service.\n   * @param use_alpha if the alpha version is preferred.\n   * @param service_namespace to override the service namespace.\n   * @return std::string full path of a service method.\n   */\n  static std::string\n  getVersionedServiceFullName(const std::string& service_full_name_template,\n                              envoy::config::core::v3::ApiVersion api_version,\n                              bool use_alpha = false,\n                              const std::string& service_namespace = EMPTY_STRING) {\n    switch (api_version) {\n    case envoy::config::core::v3::ApiVersion::AUTO:\n      FALLTHRU;\n    case envoy::config::core::v3::ApiVersion::V2:\n      return fmt::format(service_full_name_template, use_alpha ? \"v2alpha\" : \"v2\",\n                         service_namespace);\n\n    case envoy::config::core::v3::ApiVersion::V3:\n      return fmt::format(service_full_name_template, \"v3\", service_namespace);\n    default:\n      NOT_REACHED_GCOVR_EXCL_LINE;\n    }\n  }\n\n  /**\n   * Returns the full path of a service method.\n   *\n   * @param service_full_name_template the service fully-qualified name template.\n   * @param method_name the method name.\n   * @param api_version version of a service method.\n   * @param use_alpha if the alpha version is preferred.\n   * @param service_namespace to override the service namespace.\n   * @return std::string full path of a service method.\n   */\n  static std::string getVersionedMethodPath(const std::string& service_full_name_template,\n                                            absl::string_view method_name,\n                                            envoy::config::core::v3::ApiVersion api_version,\n                                            bool use_alpha = false,\n                                            const std::string& service_namespace = EMPTY_STRING) {\n    return absl::StrCat(\"/\",\n                        getVersionedServiceFullName(service_full_name_template, api_version,\n                                                    use_alpha, service_namespace),\n                        \"/\", method_name);\n  }\n};\n\n/**\n * Wraps the common case of having a cross-thread \"one shot\" ready condition.\n *\n * It functions like absl::Notification except the usage of notifyAll() appears\n * to trigger tighter simultaneous wakeups in multiple threads, resulting in\n * more contentions, e.g. for BM_CreateRace in\n * ../common/stats/symbol_table_speed_test.cc.\n *\n * See\n *     https://github.com/abseil/abseil-cpp/blob/master/absl/synchronization/notification.h\n * for the absl impl, which appears to result in fewer contentions (and in\n * tests we want contentions).\n */\nclass ConditionalInitializer {\npublic:\n  /**\n   * Set the conditional to ready.\n   */\n  void setReady();\n\n  /**\n   * Block until the conditional is ready, will return immediately if it is already ready. This\n   * routine will also reset ready_ so that the initializer can be used again. setReady() should\n   * only be called once in between a call to waitReady().\n   */\n  void waitReady();\n\n  /**\n   * Waits until ready; does not reset it. This variation is immune to spurious\n   * condvar wakeups, and is also suitable for having multiple threads wait on\n   * a common condition.\n   */\n  void wait();\n\nprivate:\n  absl::Mutex mutex_;\n  bool ready_ ABSL_GUARDED_BY(mutex_){false};\n};\n\nnamespace Http {\n\n/**\n * All of the inline header functions that just pass through to the child header map.\n */\n#define DEFINE_TEST_INLINE_HEADER_FUNCS(name)                                                      \\\npublic:                                                                                            \\\n  const HeaderEntry* name() const override { return header_map_->name(); }                         \\\n  void append##name(absl::string_view data, absl::string_view delimiter) override {                \\\n    header_map_->append##name(data, delimiter);                                                    \\\n    header_map_->verifyByteSizeInternalForTest();                                                  \\\n  }                                                                                                \\\n  void setReference##name(absl::string_view value) override {                                      \\\n    header_map_->setReference##name(value);                                                        \\\n    header_map_->verifyByteSizeInternalForTest();                                                  \\\n  }                                                                                                \\\n  void set##name(absl::string_view value) override {                                               \\\n    header_map_->set##name(value);                                                                 \\\n    header_map_->verifyByteSizeInternalForTest();                                                  \\\n  }                                                                                                \\\n  void set##name(uint64_t value) override {                                                        \\\n    header_map_->set##name(value);                                                                 \\\n    header_map_->verifyByteSizeInternalForTest();                                                  \\\n  }                                                                                                \\\n  size_t remove##name() override {                                                                 \\\n    const size_t headers_removed = header_map_->remove##name();                                    \\\n    header_map_->verifyByteSizeInternalForTest();                                                  \\\n    return headers_removed;                                                                        \\\n  }                                                                                                \\\n  absl::string_view get##name##Value() const override { return header_map_->get##name##Value(); }\n\n/**\n * Base class for all test header map types. This class wraps an underlying real header map\n * implementation, passes through all calls, and adds some niceties for testing that we don't\n * want in the production implementation for performance reasons. The wrapping functionality is\n * primarily here to deal with complexities around virtual calls in some constructor paths in\n * HeaderMapImpl.\n */\ntemplate <class Interface, class Impl> class TestHeaderMapImplBase : public Interface {\npublic:\n  TestHeaderMapImplBase() = default;\n  TestHeaderMapImplBase(const std::initializer_list<std::pair<std::string, std::string>>& values) {\n    for (auto& value : values) {\n      header_map_->addCopy(LowerCaseString(value.first), value.second);\n    }\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  TestHeaderMapImplBase(const TestHeaderMapImplBase& rhs)\n      : TestHeaderMapImplBase(*rhs.header_map_) {}\n  TestHeaderMapImplBase(const HeaderMap& rhs) {\n    HeaderMapImpl::copyFrom(*header_map_, rhs);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  TestHeaderMapImplBase& operator=(const TestHeaderMapImplBase& rhs) {\n    if (this == &rhs) {\n      return *this;\n    }\n    clear();\n    HeaderMapImpl::copyFrom(*header_map_, rhs);\n    header_map_->verifyByteSizeInternalForTest();\n    return *this;\n  }\n\n  // Value added methods on top of HeaderMap.\n  void addCopy(const std::string& key, const std::string& value) {\n    addCopy(LowerCaseString(key), value);\n  }\n  std::string get_(const std::string& key) const { return get_(LowerCaseString(key)); }\n  std::string get_(const LowerCaseString& key) const {\n    const HeaderEntry* header = get(key);\n    if (!header) {\n      return EMPTY_STRING;\n    } else {\n      return std::string(header->value().getStringView());\n    }\n  }\n  bool has(const std::string& key) const { return get(LowerCaseString(key)) != nullptr; }\n  bool has(const LowerCaseString& key) const { return get(key) != nullptr; }\n  size_t remove(const std::string& key) { return remove(LowerCaseString(key)); }\n\n  // HeaderMap\n  bool operator==(const HeaderMap& rhs) const override { return header_map_->operator==(rhs); }\n  bool operator!=(const HeaderMap& rhs) const override { return header_map_->operator!=(rhs); }\n  void addViaMove(HeaderString&& key, HeaderString&& value) override {\n    header_map_->addViaMove(std::move(key), std::move(value));\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void addReference(const LowerCaseString& key, absl::string_view value) override {\n    header_map_->addReference(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void addReferenceKey(const LowerCaseString& key, uint64_t value) override {\n    header_map_->addReferenceKey(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void addReferenceKey(const LowerCaseString& key, absl::string_view value) override {\n    header_map_->addReferenceKey(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void addCopy(const LowerCaseString& key, uint64_t value) override {\n    header_map_->addCopy(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void addCopy(const LowerCaseString& key, absl::string_view value) override {\n    header_map_->addCopy(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void appendCopy(const LowerCaseString& key, absl::string_view value) override {\n    header_map_->appendCopy(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void setReference(const LowerCaseString& key, absl::string_view value) override {\n    header_map_->setReference(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void setReferenceKey(const LowerCaseString& key, absl::string_view value) override {\n    header_map_->setReferenceKey(key, value);\n  }\n  void setCopy(const LowerCaseString& key, absl::string_view value) override {\n    header_map_->setCopy(key, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  uint64_t byteSize() const override { return header_map_->byteSize(); }\n  const HeaderEntry* get(const LowerCaseString& key) const override {\n    return header_map_->get(key);\n  }\n  HeaderMap::GetResult getAll(const LowerCaseString& key) const override {\n    return header_map_->getAll(key);\n  }\n  void iterate(HeaderMap::ConstIterateCb cb) const override { header_map_->iterate(cb); }\n  void iterateReverse(HeaderMap::ConstIterateCb cb) const override {\n    header_map_->iterateReverse(cb);\n  }\n  void clear() override {\n    header_map_->clear();\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  size_t remove(const LowerCaseString& key) override {\n    size_t headers_removed = header_map_->remove(key);\n    header_map_->verifyByteSizeInternalForTest();\n    return headers_removed;\n  }\n  size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate) override {\n    size_t headers_removed = header_map_->removeIf(predicate);\n    header_map_->verifyByteSizeInternalForTest();\n    return headers_removed;\n  }\n  size_t removePrefix(const LowerCaseString& key) override {\n    size_t headers_removed = header_map_->removePrefix(key);\n    header_map_->verifyByteSizeInternalForTest();\n    return headers_removed;\n  }\n  size_t size() const override { return header_map_->size(); }\n  bool empty() const override { return header_map_->empty(); }\n  void dumpState(std::ostream& os, int indent_level = 0) const override {\n    header_map_->dumpState(os, indent_level);\n  }\n\n  using Handle = typename CustomInlineHeaderRegistry::Handle<Interface::header_map_type>;\n  const HeaderEntry* getInline(Handle handle) const override {\n    return header_map_->getInline(handle);\n  }\n  void appendInline(Handle handle, absl::string_view data, absl::string_view delimiter) override {\n    header_map_->appendInline(handle, data, delimiter);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void setReferenceInline(Handle handle, absl::string_view value) override {\n    header_map_->setReferenceInline(handle, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void setInline(Handle handle, absl::string_view value) override {\n    header_map_->setInline(handle, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  void setInline(Handle handle, uint64_t value) override {\n    header_map_->setInline(handle, value);\n    header_map_->verifyByteSizeInternalForTest();\n  }\n  size_t removeInline(Handle handle) override {\n    const size_t rc = header_map_->removeInline(handle);\n    header_map_->verifyByteSizeInternalForTest();\n    return rc;\n  }\n\n  std::unique_ptr<Impl> header_map_{Impl::create()};\n};\n\n/**\n * Typed test implementations for all of the concrete header types.\n */\nclass TestRequestHeaderMapImpl\n    : public TestHeaderMapImplBase<RequestHeaderMap, RequestHeaderMapImpl> {\npublic:\n  using TestHeaderMapImplBase::TestHeaderMapImplBase;\n\n  INLINE_REQ_HEADERS(DEFINE_TEST_INLINE_HEADER_FUNCS)\n  INLINE_REQ_RESP_HEADERS(DEFINE_TEST_INLINE_HEADER_FUNCS)\n};\n\nusing TestRequestTrailerMapImpl = TestHeaderMapImplBase<RequestTrailerMap, RequestTrailerMapImpl>;\n\nclass TestResponseHeaderMapImpl\n    : public TestHeaderMapImplBase<ResponseHeaderMap, ResponseHeaderMapImpl> {\npublic:\n  using TestHeaderMapImplBase::TestHeaderMapImplBase;\n\n  INLINE_RESP_HEADERS(DEFINE_TEST_INLINE_HEADER_FUNCS)\n  INLINE_REQ_RESP_HEADERS(DEFINE_TEST_INLINE_HEADER_FUNCS)\n  INLINE_RESP_HEADERS_TRAILERS(DEFINE_TEST_INLINE_HEADER_FUNCS)\n};\n\nclass TestResponseTrailerMapImpl\n    : public TestHeaderMapImplBase<ResponseTrailerMap, ResponseTrailerMapImpl> {\npublic:\n  using TestHeaderMapImplBase::TestHeaderMapImplBase;\n\n  INLINE_RESP_HEADERS_TRAILERS(DEFINE_TEST_INLINE_HEADER_FUNCS)\n};\n\n// Helper method to create a header map from an initializer list. Useful due to make_unique's\n// inability to infer the initializer list type.\ntemplate <class T>\ninline std::unique_ptr<T>\nmakeHeaderMap(const std::initializer_list<std::pair<std::string, std::string>>& values) {\n  return std::make_unique<T, const std::initializer_list<std::pair<std::string, std::string>>&>(\n      values);\n}\n\n} // namespace Http\n\nnamespace Api {\nApiPtr createApiForTest();\nApiPtr createApiForTest(Random::RandomGenerator& random);\nApiPtr createApiForTest(Stats::Store& stat_store);\nApiPtr createApiForTest(Stats::Store& stat_store, Random::RandomGenerator& random);\nApiPtr createApiForTest(Event::TimeSystem& time_system);\nApiPtr createApiForTest(Stats::Store& stat_store, Event::TimeSystem& time_system);\n} // namespace Api\n\nMATCHER_P(HeaderMapEqualIgnoreOrder, expected, \"\") {\n  const bool equal = TestUtility::headerMapEqualIgnoreOrder(*arg, *expected);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"Expected header map:\") << \"\\n\"\n                     << *expected\n                     << TestUtility::addLeftAndRightPadding(\"is not equal to actual header map:\")\n                     << \"\\n\"\n                     << *arg << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P(ProtoEq, expected, \"\") {\n  const bool equal =\n      TestUtility::protoEqual(arg, expected, /*ignore_repeated_field_ordering=*/false);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << \"==========================Expected proto:===========================\\n\"\n                     << expected.DebugString()\n                     << \"------------------is not equal to actual proto:---------------------\\n\"\n                     << arg.DebugString()\n                     << \"====================================================================\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P(ProtoEqIgnoreRepeatedFieldOrdering, expected, \"\") {\n  const bool equal =\n      TestUtility::protoEqual(arg, expected, /*ignore_repeated_field_ordering=*/true);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"Expected proto:\") << \"\\n\"\n                     << expected.DebugString()\n                     << TestUtility::addLeftAndRightPadding(\"is not equal to actual proto:\") << \"\\n\"\n                     << arg.DebugString()\n                     << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P2(ProtoEqIgnoringField, expected, ignored_field, \"\") {\n  const bool equal = TestUtility::protoEqualIgnoringField(arg, expected, ignored_field);\n  if (!equal) {\n    std::string but_ignoring = absl::StrCat(\"(but ignoring \", ignored_field, \")\");\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"Expected proto:\") << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(but_ignoring) << \"\\n\"\n                     << expected.DebugString()\n                     << TestUtility::addLeftAndRightPadding(\"is not equal to actual proto:\") << \"\\n\"\n                     << arg.DebugString()\n                     << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P(RepeatedProtoEq, expected, \"\") {\n  const bool equal = TestUtility::repeatedPtrFieldEqual(arg, expected);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"Expected repeated:\") << \"\\n\"\n                     << RepeatedPtrUtil::debugString(expected) << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"is not equal to actual repeated:\")\n                     << \"\\n\"\n                     << RepeatedPtrUtil::debugString(arg) << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P(DecodedResourcesEq, expected, \"\") {\n  const bool equal = std::equal(arg.begin(), arg.end(), expected.begin(), expected.end(),\n                                TestUtility::decodedResourceEq);\n  if (!equal) {\n    const auto format_resources =\n        [](const std::vector<Config::DecodedResourceRef>& resources) -> std::string {\n      std::vector<std::string> resource_strs;\n      std::transform(\n          resources.begin(), resources.end(), std::back_inserter(resource_strs),\n          [](const Config::DecodedResourceRef& resource) -> std::string {\n            return fmt::format(\n                \"<name: {}, aliases: {}, version: {}, resource: {}>\", resource.get().name(),\n                absl::StrJoin(resource.get().aliases(), \",\"), resource.get().version(),\n                resource.get().hasResource() ? resource.get().resource().DebugString() : \"(none)\");\n          });\n      return absl::StrJoin(resource_strs, \", \");\n    };\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"Expected resources:\") << \"\\n\"\n                     << format_resources(expected) << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"are not equal to actual resources:\")\n                     << \"\\n\"\n                     << format_resources(arg) << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\nMATCHER_P(Percent, rhs, \"\") {\n  envoy::type::v3::FractionalPercent expected;\n  expected.set_numerator(rhs);\n  expected.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED);\n  return TestUtility::protoEqual(expected, arg, /*ignore_repeated_field_ordering=*/false);\n}\n\nMATCHER_P(JsonStringEq, expected, \"\") {\n  const bool equal = TestUtility::jsonStringEqual(arg, expected);\n  if (!equal) {\n    *result_listener << \"\\n\"\n                     << TestUtility::addLeftAndRightPadding(\"Expected JSON string:\") << \"\\n\"\n                     << expected\n                     << TestUtility::addLeftAndRightPadding(\"is not equal to actual JSON string:\")\n                     << \"\\n\"\n                     << arg << TestUtility::addLeftAndRightPadding(\"\") // line full of padding\n                     << \"\\n\";\n  }\n  return equal;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/utility_test.cc",
    "content": "#include \"envoy/service/discovery/v3/discovery.pb.h\"\n\n#include \"test/test_common/utility.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\nTEST(HeaderMapEqualIgnoreOrder, ActuallyEqual) {\n  Http::TestRequestHeaderMapImpl lhs{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  Http::TestRequestHeaderMapImpl rhs{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs));\n  EXPECT_EQ(lhs, rhs);\n}\n\nTEST(HeaderMapEqualIgnoreOrder, IgnoreOrder) {\n  Http::TestRequestHeaderMapImpl lhs{{\":method\", \"GET\"}, {\":authority\", \"host\"}, {\":path\", \"/\"}};\n  Http::TestRequestHeaderMapImpl rhs{{\":method\", \"GET\"}, {\":path\", \"/\"}, {\":authority\", \"host\"}};\n  EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs));\n  EXPECT_THAT(&lhs, HeaderMapEqualIgnoreOrder(&rhs));\n  EXPECT_FALSE(lhs == rhs);\n}\n\nTEST(HeaderMapEqualIgnoreOrder, NotEqual) {\n  Http::TestRequestHeaderMapImpl lhs{\n      {\":method\", \"GET\"}, {\":authority\", \"host\"}, {\":authority\", \"host\"}};\n  Http::TestRequestHeaderMapImpl rhs{{\":method\", \"GET\"}, {\":authority\", \"host\"}};\n  EXPECT_FALSE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs));\n}\n\nTEST(ProtoEqIgnoreField, ActuallyEqual) {\n  // Ignored field equal\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    lhs.set_response_nonce(\"nonce\");\n    rhs.set_response_nonce(\"nonce\");\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_TRUE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n  // Ignored field not equal\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    lhs.set_response_nonce(\"nonce\");\n    rhs.set_response_nonce(\"noncense\");\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_TRUE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n  // Ignored field not present\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_TRUE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n  // Ignored field only present in one\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    rhs.set_response_nonce(\"noncense\");\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_TRUE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n}\n\nTEST(ProtoEqIgnoreField, NotEqual) {\n  // Ignored field equal\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    lhs.set_response_nonce(\"nonce\");\n    rhs.set_response_nonce(\"nonce\");\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.Listener\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_FALSE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n  // Ignored field not equal\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    lhs.set_response_nonce(\"nonce\");\n    rhs.set_response_nonce(\"noncense\");\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.Listener\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_FALSE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n  // Ignored field not present\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.Listener\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_FALSE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n  // Ignored field only present in one\n  {\n    envoy::service::discovery::v3::DeltaDiscoveryRequest lhs, rhs;\n    rhs.set_response_nonce(\"noncense\");\n    lhs.set_type_url(\"type.googleapis.com/envoy.api.v2.Listener\");\n    rhs.set_type_url(\"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\");\n    EXPECT_FALSE(TestUtility::protoEqualIgnoringField(lhs, rhs, \"response_nonce\"));\n  }\n}\n\nTEST(BuffersEqual, Aligned) {\n  Buffer::OwnedImpl buffer1, buffer2;\n  EXPECT_TRUE(TestUtility::buffersEqual(buffer1, buffer2));\n\n  buffer1.appendSliceForTest(\"hello\");\n  EXPECT_FALSE(TestUtility::buffersEqual(buffer1, buffer2));\n  buffer2.appendSliceForTest(\"hello\");\n  EXPECT_TRUE(TestUtility::buffersEqual(buffer1, buffer2));\n\n  buffer1.appendSliceForTest(\", world\");\n  EXPECT_FALSE(TestUtility::buffersEqual(buffer1, buffer2));\n  buffer2.appendSliceForTest(\", world\");\n  EXPECT_TRUE(TestUtility::buffersEqual(buffer1, buffer2));\n}\n\nTEST(BuffersEqual, NonAligned) {\n  Buffer::OwnedImpl buffer1, buffer2;\n  EXPECT_TRUE(TestUtility::buffersEqual(buffer1, buffer2));\n\n  buffer1.appendSliceForTest(\"hello\");\n  EXPECT_FALSE(TestUtility::buffersEqual(buffer1, buffer2));\n  buffer2.appendSliceForTest(\"hello\");\n  EXPECT_TRUE(TestUtility::buffersEqual(buffer1, buffer2));\n\n  buffer1.appendSliceForTest(\", \");\n  buffer1.appendSliceForTest(\"world\");\n  EXPECT_FALSE(TestUtility::buffersEqual(buffer1, buffer2));\n  buffer2.appendSliceForTest(\", world\");\n  EXPECT_TRUE(TestUtility::buffersEqual(buffer1, buffer2));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_common/wasm_base.h",
    "content": "#include <cstdio>\n\n#include \"envoy/extensions/wasm/v3/wasm.pb.validate.h\"\n#include \"envoy/server/lifecycle_notifier.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/message_impl.h\"\n#include \"common/stats/isolated_store_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"extensions/common/wasm/wasm.h\"\n#include \"extensions/common/wasm/wasm_state.h\"\n\n#include \"test/mocks/grpc/mocks.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/network/mocks.h\"\n#include \"test/mocks/server/mocks.h\"\n#include \"test/mocks/ssl/mocks.h\"\n#include \"test/mocks/stream_info/mocks.h\"\n#include \"test/mocks/thread_local/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\nnamespace Extensions {\nnamespace Common {\nnamespace Wasm {\n\n#define MOCK_CONTEXT_LOG_                                                                          \\\n  using Context::log;                                                                              \\\n  proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override {                 \\\n    log_(static_cast<spdlog::level::level_enum>(level), message);                                  \\\n    return proxy_wasm::WasmResult::Ok;                                                             \\\n  }                                                                                                \\\n  MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message))\n\nclass DeferredRunner {\npublic:\n  ~DeferredRunner() {\n    if (f_) {\n      f_();\n    }\n  }\n  void setFunction(std::function<void()> f) { f_ = f; }\n\nprivate:\n  std::function<void()> f_;\n};\n\ntemplate <typename Base = testing::Test> class WasmTestBase : public Base {\npublic:\n  // NOLINTNEXTLINE(readability-identifier-naming)\n  void SetUp() override { clearCodeCacheForTesting(); }\n\n  void setupBase(const std::string& runtime, const std::string& code, CreateContextFn create_root,\n                 std::string root_id = \"\", std::string vm_configuration = \"\",\n                 bool fail_open = false, std::string plugin_configuration = \"\") {\n    envoy::extensions::wasm::v3::VmConfig vm_config;\n    vm_config.set_vm_id(\"vm_id\");\n    vm_config.set_runtime(absl::StrCat(\"envoy.wasm.runtime.\", runtime));\n    ProtobufWkt::StringValue vm_configuration_string;\n    vm_configuration_string.set_value(vm_configuration);\n    vm_config.mutable_configuration()->PackFrom(vm_configuration_string);\n    vm_config.mutable_code()->mutable_local()->set_inline_bytes(code);\n    Api::ApiPtr api = Api::createApiForTest(stats_store_);\n    scope_ = Stats::ScopeSharedPtr(stats_store_.createScope(\"wasm.\"));\n    auto name = \"plugin_name\";\n    auto vm_id = \"\";\n    plugin_ = std::make_shared<Extensions::Common::Wasm::Plugin>(\n        name, root_id, vm_id, runtime, plugin_configuration, fail_open,\n        envoy::config::core::v3::TrafficDirection::INBOUND, local_info_, &listener_metadata_);\n    // Passes ownership of root_context_.\n    Extensions::Common::Wasm::createWasm(\n        vm_config, plugin_, scope_, cluster_manager_, init_manager_, dispatcher_, *api,\n        lifecycle_notifier_, remote_data_provider_,\n        [this](WasmHandleSharedPtr wasm) { wasm_ = wasm; }, create_root);\n    if (wasm_) {\n      wasm_ = getOrCreateThreadLocalWasm(\n          wasm_, plugin_, dispatcher_,\n          [this, create_root](Wasm* wasm, const std::shared_ptr<Plugin>& plugin) {\n            root_context_ = static_cast<Context*>(create_root(wasm, plugin));\n            return root_context_;\n          });\n    }\n  }\n\n  WasmHandleSharedPtr& wasm() { return wasm_; }\n  Context* rootContext() { return root_context_; }\n\n  DeferredRunner deferred_runner_;\n  Stats::IsolatedStoreImpl stats_store_;\n  Stats::ScopeSharedPtr scope_;\n  NiceMock<ThreadLocal::MockInstance> tls_;\n  NiceMock<Event::MockDispatcher> dispatcher_;\n  NiceMock<Upstream::MockClusterManager> cluster_manager_;\n  NiceMock<Init::MockManager> init_manager_;\n  WasmHandleSharedPtr wasm_;\n  PluginSharedPtr plugin_;\n  NiceMock<Envoy::Ssl::MockConnectionInfo> ssl_;\n  NiceMock<Envoy::Network::MockConnection> connection_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  NiceMock<LocalInfo::MockLocalInfo> local_info_;\n  NiceMock<Server::MockServerLifecycleNotifier> lifecycle_notifier_;\n  envoy::config::core::v3::Metadata listener_metadata_;\n  Context* root_context_ = nullptr; // Unowned.\n  Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_;\n};\n\ntemplate <typename Base = testing::Test> class WasmHttpFilterTestBase : public WasmTestBase<Base> {\npublic:\n  template <typename TestFilter> void setupFilterBase(const std::string root_id = \"\") {\n    auto wasm = WasmTestBase<Base>::wasm_ ? WasmTestBase<Base>::wasm_->wasm().get() : nullptr;\n    int root_context_id = wasm ? wasm->getRootContext(root_id)->id() : 0;\n    context_ = std::make_unique<TestFilter>(wasm, root_context_id, WasmTestBase<Base>::plugin_);\n    context_->setDecoderFilterCallbacks(decoder_callbacks_);\n    context_->setEncoderFilterCallbacks(encoder_callbacks_);\n  }\n\n  std::unique_ptr<Context> context_;\n  NiceMock<Http::MockStreamDecoderFilterCallbacks> decoder_callbacks_;\n  NiceMock<Http::MockStreamEncoderFilterCallbacks> encoder_callbacks_;\n  NiceMock<Envoy::StreamInfo::MockStreamInfo> request_stream_info_;\n};\n\ntemplate <typename Base = testing::Test>\nclass WasmNetworkFilterTestBase : public WasmTestBase<Base> {\npublic:\n  template <typename TestFilter> void setupFilterBase(const std::string root_id = \"\") {\n    auto wasm = WasmTestBase<Base>::wasm_ ? WasmTestBase<Base>::wasm_->wasm().get() : nullptr;\n    int root_context_id = wasm ? wasm->getRootContext(root_id)->id() : 0;\n    context_ = std::make_unique<TestFilter>(wasm, root_context_id, WasmTestBase<Base>::plugin_);\n    context_->initializeReadFilterCallbacks(read_filter_callbacks_);\n    context_->initializeWriteFilterCallbacks(write_filter_callbacks_);\n  }\n\n  std::unique_ptr<Context> context_;\n  NiceMock<Network::MockReadFilterCallbacks> read_filter_callbacks_;\n  NiceMock<Network::MockWriteFilterCallbacks> write_filter_callbacks_;\n};\n\n} // namespace Wasm\n} // namespace Common\n} // namespace Extensions\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_listener.cc",
    "content": "#include \"test/test_listener.h\"\n\n#include \"common/common/assert.h\"\n\n#include \"test/test_common/global.h\"\n\nnamespace Envoy {\n\nvoid TestListener::OnTestEnd(const ::testing::TestInfo& test_info) {\n  // Check that all singletons have been destroyed.\n  std::string active_singletons = Envoy::Test::Globals::describeActiveSingletons();\n  RELEASE_ASSERT(active_singletons.empty(),\n                 absl::StrCat(\"FAIL [\", test_info.test_suite_name(), \".\", test_info.name(),\n                              \"]: Active singletons exist. Something is leaking. Consider \"\n                              \"commenting out this assert and letting the heap checker run:\\n\",\n                              active_singletons));\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_listener.h",
    "content": "#pragma once\n\n#include \"gtest/gtest.h\"\n\nnamespace Envoy {\n\n// Provides a test listener to be called after each test method. This offers\n// a place to put hooks we'd like to run on every test. There's currently a\n// check that all test-scoped singletons have been destroyed. A test-scoped\n// singleton might remain at the end of a test if it's transitively referenced\n// by a leaked structure or a static.\n//\n// In the future, we can also add:\n//   - a test-specific ThreadFactory that enables us to verify there are no\n//     outstanding threads at the end of each thread.\n//   - a check that no more bytes of memory are allocated at the end of a test\n//     than there were at the start of it. This is likely to fail in a few\n//     places when introduced, but we could add known test overrides for this.\n//\n// Note: nothing compute-intensive should be put in this class, as it will\n// be a tax paid by every test method in the codebase.\nclass TestListener : public ::testing::EmptyTestEventListener {\n  void OnTestEnd(const ::testing::TestInfo& test_info) override;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_runner.cc",
    "content": "#include \"test/test_runner.h\"\n\n#include <regex>\n\n#include \"common/common/logger.h\"\n#include \"common/common/logger_delegates.h\"\n#include \"common/common/thread.h\"\n#include \"common/event/libevent.h\"\n#include \"common/runtime/runtime_features.h\"\n\n#include \"exe/process_wide.h\"\n\n#include \"server/backtrace.h\"\n\n#include \"test/common/runtime/utility.h\"\n#include \"test/mocks/access_log/mocks.h\"\n#include \"test/test_common/environment.h\"\n#include \"test/test_listener.h\"\n\n#include \"gmock/gmock.h\"\n\nnamespace Envoy {\n\nnamespace {\n\nstd::string findAndRemove(const std::regex& pattern, int& argc, char**& argv) {\n  std::smatch matched;\n  std::string return_value;\n  for (int i = 0; i < argc; ++i) {\n    if (return_value.empty()) {\n      std::string argument = std::string(argv[i]);\n      if (regex_search(argument, matched, pattern)) {\n        return_value = matched[1];\n        argc--;\n      }\n    }\n    if (!return_value.empty() && i < argc) {\n      argv[i] = argv[i + 1];\n    }\n  }\n  return return_value;\n}\n\n// This class is created iff a test is run with the special runtime override flag.\nclass RuntimeManagingListener : public ::testing::EmptyTestEventListener {\npublic:\n  RuntimeManagingListener(std::string& runtime_override, bool disable = false)\n      : runtime_override_(runtime_override), disable_(disable) {}\n\n  // On each test start, edit RuntimeFeaturesDefaults with our custom runtime defaults.\n  void OnTestStart(const ::testing::TestInfo&) override {\n    if (!runtime_override_.empty()) {\n      bool reset = disable_ ? Runtime::RuntimeFeaturesPeer::disableFeature(runtime_override_)\n                            : Runtime::RuntimeFeaturesPeer::enableFeature(runtime_override_);\n      if (!reset) {\n        // If the entry was already in the hash map, don't remove it OnTestEnd.\n        runtime_override_.clear();\n      }\n    }\n  }\n\n  // As each test ends, clean up the RuntimeFeaturesDefaults state.\n  void OnTestEnd(const ::testing::TestInfo&) override {\n    if (!runtime_override_.empty()) {\n      disable_ ? Runtime::RuntimeFeaturesPeer::enableFeature(runtime_override_)\n               : Runtime::RuntimeFeaturesPeer::disableFeature(runtime_override_);\n    }\n  }\n  std::string runtime_override_;\n  // This marks whether the runtime feature was enabled by default and needs to be overridden to\n  // false.\n  bool disable_;\n};\n\n} // namespace\n\nint TestRunner::RunTests(int argc, char** argv) {\n  ::testing::InitGoogleMock(&argc, argv);\n  // We hold on to process_wide to provide RAII cleanup of process-wide\n  // state.\n  ProcessWide process_wide;\n  // Add a test-listener so we can call a hook where we can do a quiescence\n  // check after each method. See\n  // https://github.com/google/googletest/blob/master/googletest/docs/advanced.md\n  // for details.\n  ::testing::TestEventListeners& listeners = ::testing::UnitTest::GetInstance()->listeners();\n  listeners.Append(new TestListener);\n\n  // Use the recommended, but not default, \"threadsafe\" style for the Death Tests.\n  // See: https://github.com/google/googletest/commit/84ec2e0365d791e4ebc7ec249f09078fb5ab6caa\n  ::testing::FLAGS_gtest_death_test_style = \"threadsafe\";\n\n  // Set gtest properties\n  // (https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#logging-additional-information),\n  // they are available in the test XML.\n  // TODO(htuch): Log these as well?\n  testing::Test::RecordProperty(\"TemporaryDirectory\", TestEnvironment::temporaryDirectory());\n\n  TestEnvironment::setEnvVar(\"TEST_UDSDIR\", TestEnvironment::unixDomainSocketDirectory(), 1);\n\n  // Before letting TestEnvironment latch argv and argc, remove any runtime override flag.\n  // This allows doing test overrides of Envoy runtime features without adding\n  // test flags to the Envoy production command line.\n  const std::regex ENABLE_PATTERN{\"--runtime-feature-override-for-tests=(.*)\",\n                                  std::regex::optimize};\n  std::string runtime_override_enable = findAndRemove(ENABLE_PATTERN, argc, argv);\n  if (!runtime_override_enable.empty()) {\n    ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), info,\n                        \"Running with runtime feature override enable {}\", runtime_override_enable);\n    // Set up a listener which will create a global runtime and set the feature\n    // to true for the duration of each test instance.\n    ::testing::TestEventListeners& listeners = ::testing::UnitTest::GetInstance()->listeners();\n    listeners.Append(new RuntimeManagingListener(runtime_override_enable));\n  }\n  const std::regex DISABLE_PATTERN{\"--runtime-feature-disable-for-tests=(.*)\",\n                                   std::regex::optimize};\n  std::string runtime_override_disable = findAndRemove(DISABLE_PATTERN, argc, argv);\n  if (!runtime_override_disable.empty()) {\n    ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), info,\n                        \"Running with runtime feature override disable {}\",\n                        runtime_override_disable);\n    // Set up a listener which will create a global runtime and set the feature\n    // to false for the duration of each test instance.\n    ::testing::TestEventListeners& listeners = ::testing::UnitTest::GetInstance()->listeners();\n    listeners.Append(new RuntimeManagingListener(runtime_override_disable, true));\n  }\n\n#ifdef ENVOY_CONFIG_COVERAGE\n  // Coverage tests are run with -l trace --log-path /dev/null, in order to\n  // ensure that all of the code-paths from the maximum level of tracing are\n  // covered in tests, but we don't wind up filling up CI with useless detailed\n  // artifacts.\n  //\n  // The downside of this is that if there's a crash, the backtrace is lost, as\n  // the backtracing mechanism uses logging, so force the backtraces to stderr.\n  BackwardsTrace::setLogToStderr(true);\n#endif\n\n  TestEnvironment::initializeOptions(argc, argv);\n  Thread::MutexBasicLockable lock;\n\n  Server::Options& options = TestEnvironment::getOptions();\n  Logger::Context logging_state(options.logLevel(), options.logFormat(), lock, false,\n                                options.enableFineGrainLogging());\n\n  // Allocate fake log access manager.\n  testing::NiceMock<AccessLog::MockAccessLogManager> access_log_manager;\n  std::unique_ptr<Logger::FileSinkDelegate> file_logger;\n\n  // Redirect all logs to fake file when --log-path arg is specified in command line.\n  if (!TestEnvironment::getOptions().logPath().empty()) {\n    file_logger = std::make_unique<Logger::FileSinkDelegate>(\n        TestEnvironment::getOptions().logPath(), access_log_manager, Logger::Registry::getSink());\n  }\n  return RUN_ALL_TESTS();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/test_runner.h",
    "content": "#pragma once\n\nnamespace Envoy {\n\nclass TestRunner {\npublic:\n  static int RunTests(int argc, char** argv);\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/config_load_check/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_binary\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_library(\n    name = \"config_load_check_lib\",\n    srcs = [\"config_load_check.cc\"],\n    deps = [\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//test/config_test:config_test_lib\",\n    ],\n)\n\nenvoy_cc_test_binary(\n    name = \"config_load_check_tool\",\n    deps = [\n        \":config_load_check_lib\",\n        \"//source/common/common:minimal_logger_lib\",\n        \"//source/common/common:thread_lib\",\n        \"//source/common/config:protobuf_link_hacks\",\n        \"//source/common/event:libevent_lib\",\n    ],\n)\n"
  },
  {
    "path": "test/tools/config_load_check/config_load_check.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <iostream>\n#include <stdexcept>\n#include <string>\n\n#include \"common/common/fmt.h\"\n#include \"common/common/logger.h\"\n#include \"common/common/thread.h\"\n#include \"common/config/protobuf_link_hacks.h\"\n#include \"common/event/libevent.h\"\n\n#include \"test/config_test/config_test.h\"\n\n#include \"gtest/gtest.h\"\n\nint main(int argc, char* argv[]) {\n  if (argc != 2) {\n    std::cerr << \"Usage: config_load_check PATH\\n\"\n                 \"\\nValidate configuration files against json schema\\n\"\n                 \"\\n\\tPATH - root of the path that holds the json files to verify.\"\n                 \" The tool recursively searches for json files to validate.\"\n              << std::endl;\n    return EXIT_FAILURE;\n  }\n  try {\n\n    Envoy::Event::Libevent::Global::initialize();\n    Envoy::Thread::MutexBasicLockable lock;\n    Envoy::Logger::Context logging_context(static_cast<spdlog::level::level_enum>(2),\n                                           Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false);\n\n    const uint32_t num_tested = Envoy::ConfigTest::run(std::string(argv[1]));\n    std::cout << fmt::format(\"Configs tested: {}. \", num_tested);\n    if (testing::Test::HasFailure()) {\n      std::cerr << \"There were failures. Please Fix your configuration files.\" << std::endl;\n      return EXIT_FAILURE;\n    } else {\n      std::cout << \"No failures.\" << std::endl;\n      return EXIT_SUCCESS;\n    }\n  } catch (const std::runtime_error& e) {\n    // catch directory not found runtime exception.\n    std::cerr << e.what() << std::endl;\n  }\n  return EXIT_FAILURE;\n}\n"
  },
  {
    "path": "test/tools/router_check/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_binary\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n    \"envoy_proto_library\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_binary(\n    name = \"router_check_tool\",\n    deps = [\":router_check_main_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"router_check_main_lib\",\n    srcs = [\n        \"coverage.cc\",\n        \"coverage.h\",\n        \"router.cc\",\n        \"router.h\",\n        \"router_check.cc\",\n    ],\n    copts = [\"-DHAVE_LONG_LONG\"],\n    external_deps = [\"tclap\"],\n    deps = [\n        \":validation_proto_cc_proto\",\n        \"//source/common/event:dispatcher_lib\",\n        \"//source/common/http:header_map_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/json:json_loader_lib\",\n        \"//source/common/router:config_lib\",\n        \"//source/common/stats:stats_lib\",\n        \"//source/common/stream_info:stream_info_lib\",\n        \"//source/exe:platform_impl_lib\",\n        \"//test/mocks/server:instance_mocks\",\n        \"//test/test_common:printers_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/v3:pkg_cc_proto\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"validation_proto\",\n    srcs = [\"validation.proto\"],\n    deps = [\n        \"@envoy_api//envoy/config/core/v3:pkg\",\n        \"@envoy_api//envoy/config/route/v3:pkg\",\n    ],\n)\n"
  },
  {
    "path": "test/tools/router_check/coverage.cc",
    "content": "#include \"test/tools/router_check/coverage.h\"\n\n#include <algorithm>\n\nnamespace Envoy {\ndouble RouteCoverage::report() {\n  uint64_t route_weight = 0;\n  for (auto covered_field : coverageFields()) {\n    if (covered_field) {\n      route_weight += 1;\n    }\n  }\n  return static_cast<double>(route_weight) / coverageFields().size();\n}\n\nstd::vector<bool> RouteCoverage::coverageFields() {\n  if (route_entry_ != nullptr) {\n    return std::vector<bool>{cluster_covered_, virtual_cluster_covered_, virtual_host_covered_,\n                             path_rewrite_covered_, host_rewrite_covered_};\n  } else if (direct_response_entry_ != nullptr) {\n    return std::vector<bool>{redirect_path_covered_};\n  } else {\n    return std::vector<bool>{};\n  }\n}\n\nvoid Coverage::markClusterCovered(const Envoy::Router::Route& route) {\n  coveredRoute(route).setClusterCovered();\n}\n\nvoid Coverage::markVirtualClusterCovered(const Envoy::Router::Route& route) {\n  coveredRoute(route).setVirtualClusterCovered();\n}\n\nvoid Coverage::markVirtualHostCovered(const Envoy::Router::Route& route) {\n  coveredRoute(route).setVirtualHostCovered();\n}\n\nvoid Coverage::markPathRewriteCovered(const Envoy::Router::Route& route) {\n  coveredRoute(route).setPathRewriteCovered();\n}\n\nvoid Coverage::markHostRewriteCovered(const Envoy::Router::Route& route) {\n  coveredRoute(route).setHostRewriteCovered();\n}\n\nvoid Coverage::markRedirectPathCovered(const Envoy::Router::Route& route) {\n  coveredRoute(route).setRedirectPathCovered();\n}\n\ndouble Coverage::report() {\n  uint64_t num_routes = 0;\n  for (const auto& host : route_config_.virtual_hosts()) {\n    for (const auto& route : host.routes()) {\n      if (route.route().has_weighted_clusters()) {\n        num_routes += route.route().weighted_clusters().clusters_size();\n      } else {\n        num_routes += 1;\n      }\n    }\n  }\n  return 100 * static_cast<double>(covered_routes_.size()) / num_routes;\n}\n\ndouble Coverage::detailedReport() {\n  std::set<std::string> all_route_names;\n  std::set<std::string> covered_route_names;\n  uint64_t num_routes = 0;\n  for (const auto& host : route_config_.virtual_hosts()) {\n    for (const auto& route : host.routes()) {\n      if (route.route().has_weighted_clusters()) {\n        num_routes += route.route().weighted_clusters().clusters_size();\n      } else {\n        num_routes += 1;\n      }\n      all_route_names.emplace(route.name());\n    }\n  }\n  double cumulative_coverage = 0;\n  for (auto& covered_route : covered_routes_) {\n    cumulative_coverage += covered_route->report();\n    covered_route_names.emplace(covered_route->routeName());\n  }\n  printMissingTests(all_route_names, covered_route_names);\n  return 100 * cumulative_coverage / num_routes;\n}\n\nvoid Coverage::printMissingTests(const std::set<std::string>& all_route_names,\n                                 const std::set<std::string>& covered_route_names) {\n  std::set<std::string> missing_route_names;\n  std::set_difference(all_route_names.begin(), all_route_names.end(), covered_route_names.begin(),\n                      covered_route_names.end(),\n                      std::inserter(missing_route_names, missing_route_names.end()));\n\n  for (const auto& host : route_config_.virtual_hosts()) {\n    for (const auto& route : host.routes()) {\n      if (missing_route_names.find(route.name()) != missing_route_names.end()) {\n        std::cout << \"Missing test for host: \" << host.name()\n                  << \", route: \" << route.match().DebugString() << std::endl;\n      }\n    }\n  }\n}\n\nRouteCoverage& Coverage::coveredRoute(const Envoy::Router::Route& route) {\n  std::string route_name;\n  if (route.routeEntry() != nullptr) {\n    const Envoy::Router::RouteEntry* route_entry = route.routeEntry();\n    route_name = route.routeEntry()->routeName();\n    for (auto& route_coverage : covered_routes_) {\n      if (route_coverage->covers(route_entry)) {\n        return *route_coverage;\n      }\n    }\n    std::unique_ptr<RouteCoverage> new_coverage =\n        std::make_unique<RouteCoverage>(route_entry, route_name);\n    covered_routes_.push_back(std::move(new_coverage));\n    return coveredRoute(route);\n  } else if (route.directResponseEntry() != nullptr) {\n    const Envoy::Router::DirectResponseEntry* direct_response_entry = route.directResponseEntry();\n    route_name = route.directResponseEntry()->routeName();\n    for (auto& route_coverage : covered_routes_) {\n      if (route_coverage->covers(direct_response_entry)) {\n        return *route_coverage;\n      }\n    }\n    std::unique_ptr<RouteCoverage> new_coverage =\n        std::make_unique<RouteCoverage>(direct_response_entry, route_name);\n    covered_routes_.push_back(std::move(new_coverage));\n    return coveredRoute(route);\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/router_check/coverage.h",
    "content": "#pragma once\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/router/router.h\"\n\nnamespace Envoy {\nclass RouteCoverage : Logger::Loggable<Logger::Id::testing> {\npublic:\n  RouteCoverage(const Envoy::Router::RouteEntry* route, const std::string route_name)\n      : route_entry_(route), direct_response_entry_(nullptr), route_name_(route_name){};\n  RouteCoverage(const Envoy::Router::DirectResponseEntry* route, const std::string route_name)\n      : route_entry_(nullptr), direct_response_entry_(route), route_name_(route_name){};\n\n  double report();\n  void setClusterCovered() { cluster_covered_ = true; }\n  void setVirtualClusterCovered() { virtual_cluster_covered_ = true; }\n  void setVirtualHostCovered() { virtual_host_covered_ = true; }\n  void setPathRewriteCovered() { path_rewrite_covered_ = true; }\n  void setHostRewriteCovered() { host_rewrite_covered_ = true; }\n  void setRedirectPathCovered() { redirect_path_covered_ = true; }\n  bool covers(const Envoy::Router::RouteEntry* route) { return route_entry_ == route; }\n  bool covers(const Envoy::Router::DirectResponseEntry* route) {\n    return direct_response_entry_ == route;\n  }\n  const std::string routeName() { return route_name_; };\n\nprivate:\n  const Envoy::Router::RouteEntry* route_entry_;\n  const Envoy::Router::DirectResponseEntry* direct_response_entry_;\n  const std::string route_name_;\n  bool cluster_covered_{false};\n  bool virtual_cluster_covered_{false};\n  bool virtual_host_covered_{false};\n  bool path_rewrite_covered_{false};\n  bool host_rewrite_covered_{false};\n  bool redirect_path_covered_{false};\n  std::vector<bool> coverageFields();\n};\n\nclass Coverage : Logger::Loggable<Logger::Id::testing> {\npublic:\n  Coverage(envoy::config::route::v3::RouteConfiguration config) : route_config_(config){};\n  void markClusterCovered(const Envoy::Router::Route& route);\n  void markVirtualClusterCovered(const Envoy::Router::Route& route);\n  void markVirtualHostCovered(const Envoy::Router::Route& route);\n  void markPathRewriteCovered(const Envoy::Router::Route& route);\n  void markHostRewriteCovered(const Envoy::Router::Route& route);\n  void markRedirectPathCovered(const Envoy::Router::Route& route);\n  double report();\n  double detailedReport();\n\nprivate:\n  RouteCoverage& coveredRoute(const Envoy::Router::Route& route);\n  void printMissingTests(const std::set<std::string>& all_route_names,\n                         const std::set<std::string>& covered_route_names);\n  std::vector<std::unique_ptr<RouteCoverage>> covered_routes_;\n  const envoy::config::route::v3::RouteConfiguration route_config_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/router_check/router.cc",
    "content": "#include \"test/tools/router_check/router.h\"\n\n#include <functional>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/random_generator.h\"\n#include \"common/network/utility.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"test/test_common/printers.h\"\n\nnamespace {\nconst std::string\ntoString(envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase specifier) {\n  switch (specifier) {\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kExactMatch:\n    return \"exact_match\";\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::\n      kHiddenEnvoyDeprecatedRegexMatch:\n    return \"regex_match\";\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kSafeRegexMatch:\n    return \"safe_regex_match\";\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kRangeMatch:\n    return \"range_match\";\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kPresentMatch:\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::\n      HEADER_MATCH_SPECIFIER_NOT_SET:\n    return \"present_match\";\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kPrefixMatch:\n    return \"prefix_match\";\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kSuffixMatch:\n    return \"suffix_match\";\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kContainsMatch:\n    return \"contains_match\";\n    break;\n  }\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nconst std::string toString(const Envoy::Http::HeaderEntry* entry) {\n  return entry == nullptr ? \"NULL\" : std::string(entry->value().getStringView());\n}\n\n} // namespace\n\nnamespace Envoy {\n// static\nToolConfig ToolConfig::create(const envoy::RouterCheckToolSchema::ValidationItem& check_config) {\n  // Add header field values\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> request_headers(\n      new Http::TestRequestHeaderMapImpl());\n  std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers(\n      new Http::TestResponseHeaderMapImpl());\n  request_headers->addCopy(\":authority\", check_config.input().authority());\n  request_headers->addCopy(\":path\", check_config.input().path());\n  request_headers->addCopy(\":method\", check_config.input().method());\n  request_headers->addCopy(\"x-forwarded-proto\", check_config.input().ssl() ? \"https\" : \"http\");\n\n  if (check_config.input().internal()) {\n    request_headers->addCopy(\"x-envoy-internal\", \"true\");\n  }\n\n  if (check_config.input().additional_request_headers().data()) {\n    for (const envoy::config::core::v3::HeaderValue& header_config :\n         check_config.input().additional_request_headers()) {\n      request_headers->addCopy(header_config.key(), header_config.value());\n    }\n  }\n\n  if (check_config.input().additional_response_headers().data()) {\n    for (const envoy::config::core::v3::HeaderValue& header_config :\n         check_config.input().additional_response_headers()) {\n      response_headers->addCopy(header_config.key(), header_config.value());\n    }\n  }\n\n  return ToolConfig(std::move(request_headers), std::move(response_headers),\n                    check_config.input().random_value());\n}\n\nToolConfig::ToolConfig(std::unique_ptr<Http::TestRequestHeaderMapImpl> request_headers,\n                       std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers,\n                       int random_value)\n    : request_headers_(std::move(request_headers)), response_headers_(std::move(response_headers)),\n      random_value_(random_value) {}\n\n// static\nRouterCheckTool RouterCheckTool::create(const std::string& router_config_file,\n                                        const bool disable_deprecation_check) {\n  // TODO(hennna): Allow users to load a full config and extract the route configuration from it.\n  envoy::config::route::v3::RouteConfiguration route_config;\n  auto stats = std::make_unique<Stats::IsolatedStoreImpl>();\n  auto api = Api::createApiForTest(*stats);\n  TestUtility::loadFromFile(router_config_file, route_config, *api);\n  assignUniqueRouteNames(route_config);\n  assignRuntimeFraction(route_config);\n  auto factory_context =\n      std::make_unique<NiceMock<Server::Configuration::MockServerFactoryContext>>();\n  auto config = std::make_unique<Router::ConfigImpl>(\n      route_config, *factory_context, ProtobufMessage::getNullValidationVisitor(), false);\n  if (!disable_deprecation_check) {\n    MessageUtil::checkForUnexpectedFields(route_config,\n                                          ProtobufMessage::getStrictValidationVisitor(),\n                                          &factory_context->runtime_loader_);\n  }\n\n  return RouterCheckTool(std::move(factory_context), std::move(config), std::move(stats),\n                         std::move(api), Coverage(route_config));\n}\n\nvoid RouterCheckTool::assignUniqueRouteNames(\n    envoy::config::route::v3::RouteConfiguration& route_config) {\n  Random::RandomGeneratorImpl random;\n  for (auto& host : *route_config.mutable_virtual_hosts()) {\n    for (auto& route : *host.mutable_routes()) {\n      route.set_name(random.uuid());\n    }\n  }\n}\n\nvoid RouterCheckTool::assignRuntimeFraction(\n    envoy::config::route::v3::RouteConfiguration& route_config) {\n  for (auto& host : *route_config.mutable_virtual_hosts()) {\n    for (auto& route : *host.mutable_routes()) {\n      if (route.match().has_runtime_fraction() &&\n          route.match().runtime_fraction().default_value().numerator() == 0) {\n        route.mutable_match()->mutable_runtime_fraction()->mutable_default_value()->set_numerator(\n            1);\n      }\n    }\n  }\n}\n\nvoid RouterCheckTool::finalizeHeaders(ToolConfig& tool_config,\n                                      Envoy::StreamInfo::StreamInfoImpl stream_info) {\n  if (!headers_finalized_ && tool_config.route_ != nullptr) {\n    if (tool_config.route_->directResponseEntry() != nullptr) {\n      tool_config.route_->directResponseEntry()->rewritePathHeader(*tool_config.request_headers_,\n                                                                   true);\n      sendLocalReply(tool_config, *tool_config.route_->directResponseEntry());\n      tool_config.route_->directResponseEntry()->finalizeResponseHeaders(\n          *tool_config.response_headers_, stream_info);\n    } else if (tool_config.route_->routeEntry() != nullptr) {\n      tool_config.route_->routeEntry()->finalizeRequestHeaders(*tool_config.request_headers_,\n                                                               stream_info, true);\n      tool_config.route_->routeEntry()->finalizeResponseHeaders(*tool_config.response_headers_,\n                                                                stream_info);\n    }\n  }\n\n  headers_finalized_ = true;\n}\n\nvoid RouterCheckTool::sendLocalReply(ToolConfig& tool_config,\n                                     const Router::DirectResponseEntry& entry) {\n  auto encode_functions = Envoy::Http::Utility::EncodeFunctions{\n      nullptr, nullptr,\n      [&](Envoy::Http::ResponseHeaderMapPtr&& headers, bool end_stream) -> void {\n        UNREFERENCED_PARAMETER(end_stream);\n        Http::HeaderMapImpl::copyFrom(*tool_config.response_headers_->header_map_, *headers);\n      },\n      [&](Envoy::Buffer::Instance& data, bool end_stream) -> void {\n        UNREFERENCED_PARAMETER(data);\n        UNREFERENCED_PARAMETER(end_stream);\n      }};\n\n  bool is_grpc = false;\n  bool is_head_request = false;\n  Envoy::Http::Utility::LocalReplyData local_reply_data{\n      is_grpc, entry.responseCode(), entry.responseBody(), absl::nullopt, is_head_request};\n\n  Envoy::Http::Utility::sendLocalReply(false, encode_functions, local_reply_data);\n}\n\nRouterCheckTool::RouterCheckTool(\n    std::unique_ptr<NiceMock<Server::Configuration::MockServerFactoryContext>> factory_context,\n    std::unique_ptr<Router::ConfigImpl> config, std::unique_ptr<Stats::IsolatedStoreImpl> stats,\n    Api::ApiPtr api, Coverage coverage)\n    : factory_context_(std::move(factory_context)), config_(std::move(config)),\n      stats_(std::move(stats)), api_(std::move(api)), coverage_(std::move(coverage)) {\n  ON_CALL(factory_context_->runtime_loader_.snapshot_,\n          featureEnabled(_, testing::An<const envoy::type::v3::FractionalPercent&>(),\n                         testing::An<uint64_t>()))\n      .WillByDefault(testing::Invoke(this, &RouterCheckTool::runtimeMock));\n}\n\nJson::ObjectSharedPtr loadFromFile(const std::string& file_path, Api::Api& api) {\n  std::string contents = api.fileSystem().fileReadToEnd(file_path);\n  if (absl::EndsWith(file_path, \".yaml\")) {\n    contents = MessageUtil::getJsonStringFromMessage(ValueUtil::loadFromYaml(contents));\n  }\n  return Json::Factory::loadFromString(contents);\n}\n\nbool RouterCheckTool::compareEntries(const std::string& expected_routes) {\n  envoy::RouterCheckToolSchema::Validation validation_config;\n  auto stats = std::make_unique<Stats::IsolatedStoreImpl>();\n  auto api = Api::createApiForTest(*stats);\n  const std::string contents = api->fileSystem().fileReadToEnd(expected_routes);\n  TestUtility::loadFromFile(expected_routes, validation_config, *api);\n  TestUtility::validate(validation_config);\n\n  bool no_failures = true;\n  for (const envoy::RouterCheckToolSchema::ValidationItem& check_config :\n       validation_config.tests()) {\n    active_runtime_ = check_config.input().runtime();\n    headers_finalized_ = false;\n    Envoy::StreamInfo::StreamInfoImpl stream_info(Envoy::Http::Protocol::Http11,\n                                                  factory_context_->dispatcher().timeSource());\n    stream_info.setDownstreamRemoteAddress(Network::Utility::getCanonicalIpv4LoopbackAddress());\n    ToolConfig tool_config = ToolConfig::create(check_config);\n    tool_config.route_ =\n        config_->route(*tool_config.request_headers_, stream_info, tool_config.random_value_);\n\n    const std::string& test_name = check_config.test_name();\n    tests_.emplace_back(test_name, std::vector<std::string>{});\n    const envoy::RouterCheckToolSchema::ValidationAssert& validate = check_config.validate();\n\n    using CheckerFunc =\n        std::function<bool(ToolConfig&, const envoy::RouterCheckToolSchema::ValidationAssert&)>;\n    CheckerFunc checkers[] = {\n        [this](auto&... params) -> bool { return this->compareCluster(params...); },\n        [this](auto&... params) -> bool { return this->compareVirtualCluster(params...); },\n        [this](auto&... params) -> bool { return this->compareVirtualHost(params...); },\n        [this](auto&... params) -> bool { return this->compareRewritePath(params...); },\n        [this](auto&... params) -> bool { return this->compareRewriteHost(params...); },\n        [this](auto&... params) -> bool { return this->compareRedirectPath(params...); },\n        [this](auto&... params) -> bool { return this->compareRequestHeaderFields(params...); },\n        [this](auto&... params) -> bool { return this->compareResponseHeaderFields(params...); },\n    };\n    finalizeHeaders(tool_config, stream_info);\n    // Call appropriate function for each match case.\n    for (const auto& test : checkers) {\n      if (!test(tool_config, validate)) {\n        no_failures = false;\n      }\n    }\n  }\n  printResults();\n  return no_failures;\n}\n\nbool RouterCheckTool::compareCluster(ToolConfig& tool_config, const std::string& expected) {\n  std::string actual = \"\";\n\n  if (tool_config.route_->routeEntry() != nullptr) {\n    actual = tool_config.route_->routeEntry()->clusterName();\n  }\n  const bool matches = compareResults(actual, expected, \"cluster_name\");\n  if (matches && tool_config.route_->routeEntry() != nullptr) {\n    coverage_.markClusterCovered(*tool_config.route_);\n  }\n  return matches;\n}\n\nbool RouterCheckTool::compareCluster(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  if (!expected.has_cluster_name()) {\n    return true;\n  }\n  if (tool_config.route_ == nullptr) {\n    return compareResults(\"\", expected.cluster_name().value(), \"cluster_name\");\n  }\n  return compareCluster(tool_config, expected.cluster_name().value());\n}\n\nbool RouterCheckTool::compareVirtualCluster(ToolConfig& tool_config, const std::string& expected) {\n  std::string actual = \"\";\n\n  if (tool_config.route_->routeEntry() != nullptr &&\n      tool_config.route_->routeEntry()->virtualCluster(*tool_config.request_headers_) != nullptr) {\n    Stats::StatName stat_name =\n        tool_config.route_->routeEntry()->virtualCluster(*tool_config.request_headers_)->statName();\n    actual = tool_config.symbolTable().toString(stat_name);\n  }\n  const bool matches = compareResults(actual, expected, \"virtual_cluster_name\");\n  if (matches && tool_config.route_->routeEntry() != nullptr) {\n    coverage_.markVirtualClusterCovered(*tool_config.route_);\n  }\n  return matches;\n}\n\nbool RouterCheckTool::compareVirtualCluster(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  if (!expected.has_virtual_cluster_name()) {\n    return true;\n  }\n  if (tool_config.route_ == nullptr) {\n    return compareResults(\"\", expected.virtual_cluster_name().value(), \"virtual_cluster_name\");\n  }\n  return compareVirtualCluster(tool_config, expected.virtual_cluster_name().value());\n}\n\nbool RouterCheckTool::compareVirtualHost(ToolConfig& tool_config, const std::string& expected) {\n  std::string actual = \"\";\n  if (tool_config.route_->routeEntry() != nullptr) {\n    Stats::StatName stat_name = tool_config.route_->routeEntry()->virtualHost().statName();\n    actual = tool_config.symbolTable().toString(stat_name);\n  }\n  const bool matches = compareResults(actual, expected, \"virtual_host_name\");\n  if (matches && tool_config.route_->routeEntry() != nullptr) {\n    coverage_.markVirtualHostCovered(*tool_config.route_);\n  }\n  return matches;\n}\n\nbool RouterCheckTool::compareVirtualHost(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  if (!expected.has_virtual_host_name()) {\n    return true;\n  }\n  if (tool_config.route_ == nullptr) {\n    return compareResults(\"\", expected.virtual_host_name().value(), \"virtual_host_name\");\n  }\n  return compareVirtualHost(tool_config, expected.virtual_host_name().value());\n}\n\nbool RouterCheckTool::compareRewritePath(ToolConfig& tool_config, const std::string& expected) {\n  std::string actual = \"\";\n  if (tool_config.route_->routeEntry() != nullptr) {\n    actual = tool_config.request_headers_->get_(Http::Headers::get().Path);\n  }\n  const bool matches = compareResults(actual, expected, \"path_rewrite\");\n  if (matches && tool_config.route_->routeEntry() != nullptr) {\n    coverage_.markPathRewriteCovered(*tool_config.route_);\n  }\n  return matches;\n}\n\nbool RouterCheckTool::compareRewritePath(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  if (!expected.has_path_rewrite()) {\n    return true;\n  }\n  if (tool_config.route_ == nullptr) {\n    return compareResults(\"\", expected.path_rewrite().value(), \"path_rewrite\");\n  }\n  return compareRewritePath(tool_config, expected.path_rewrite().value());\n}\n\nbool RouterCheckTool::compareRewriteHost(ToolConfig& tool_config, const std::string& expected) {\n  std::string actual = \"\";\n  if (tool_config.route_->routeEntry() != nullptr) {\n    actual = tool_config.request_headers_->get_(Http::Headers::get().Host);\n  }\n  const bool matches = compareResults(actual, expected, \"host_rewrite\");\n  if (matches && tool_config.route_->routeEntry() != nullptr) {\n    coverage_.markHostRewriteCovered(*tool_config.route_);\n  }\n  return matches;\n}\n\nbool RouterCheckTool::compareRewriteHost(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  if (!expected.has_host_rewrite()) {\n    return true;\n  }\n  if (tool_config.route_ == nullptr) {\n    return compareResults(\"\", expected.host_rewrite().value(), \"host_rewrite\");\n  }\n  return compareRewriteHost(tool_config, expected.host_rewrite().value());\n}\n\nbool RouterCheckTool::compareRedirectPath(ToolConfig& tool_config, const std::string& expected) {\n  std::string actual = \"\";\n  if (tool_config.route_->directResponseEntry() != nullptr) {\n    actual = tool_config.route_->directResponseEntry()->newPath(*tool_config.request_headers_);\n  }\n\n  const bool matches = compareResults(actual, expected, \"path_redirect\");\n  if (matches && tool_config.route_->directResponseEntry() != nullptr) {\n    coverage_.markRedirectPathCovered(*tool_config.route_);\n  }\n  return matches;\n}\n\nbool RouterCheckTool::compareRedirectPath(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  if (!expected.has_path_redirect()) {\n    return true;\n  }\n  if (tool_config.route_ == nullptr) {\n    return compareResults(\"\", expected.path_redirect().value(), \"path_redirect\");\n  }\n  return compareRedirectPath(tool_config, expected.path_redirect().value());\n}\n\nbool RouterCheckTool::compareRequestHeaderFields(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  bool no_failures = true;\n  if (expected.request_header_matches().data()) {\n    for (const envoy::config::route::v3::HeaderMatcher& header :\n         expected.request_header_matches()) {\n      if (!matchHeaderField(*tool_config.request_headers_, header, \"request_header_matches\")) {\n        no_failures = false;\n      }\n    }\n  }\n  // TODO(kb000) : Remove deprecated request_header_fields.\n  if (expected.request_header_fields().data()) {\n    for (const envoy::config::core::v3::HeaderValue& header : expected.request_header_fields()) {\n      auto actual = tool_config.request_headers_->get_(header.key());\n      auto const& expected = header.value();\n      if (!compareResults(actual, expected, \"request_header_fields\")) {\n        no_failures = false;\n      }\n    }\n  }\n  return no_failures;\n}\n\nbool RouterCheckTool::compareResponseHeaderFields(\n    ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) {\n  bool no_failures = true;\n  if (expected.response_header_matches().data()) {\n    for (const envoy::config::route::v3::HeaderMatcher& header :\n         expected.response_header_matches()) {\n      if (!matchHeaderField(*tool_config.response_headers_, header, \"response_header_matches\")) {\n        no_failures = false;\n      }\n    }\n  }\n  // TODO(kb000) : Remove deprecated response_header_fields.\n  if (expected.response_header_fields().data()) {\n    for (const envoy::config::core::v3::HeaderValue& header : expected.response_header_fields()) {\n      auto actual = tool_config.response_headers_->get_(header.key());\n      auto const& expected = header.value();\n      if (!compareResults(actual, expected, \"response_header_fields\")) {\n        no_failures = false;\n      }\n    }\n  }\n  return no_failures;\n}\n\ntemplate <typename HeaderMap>\nbool RouterCheckTool::matchHeaderField(const HeaderMap& header_map,\n                                       const envoy::config::route::v3::HeaderMatcher& header,\n                                       const std::string test_type) {\n  Envoy::Http::HeaderUtility::HeaderData expected_header_data{header};\n  if (Envoy::Http::HeaderUtility::matchHeaders(header_map, expected_header_data)) {\n    return true;\n  }\n\n  // Test failed. Decide on what to log.\n  std::string actual, expected;\n  std::string match_test_type{test_type + \".\" + ::toString(header.header_match_specifier_case())};\n  switch (header.header_match_specifier_case()) {\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kExactMatch:\n    actual =\n        header.name() + \": \" + ::toString(header_map.get(Http::LowerCaseString(header.name())));\n    expected = header.name() + \": \" + header.exact_match();\n    reportFailure(actual, expected, match_test_type, !header.invert_match());\n    break;\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kPresentMatch:\n  case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::\n      HEADER_MATCH_SPECIFIER_NOT_SET:\n    actual = \"has(\" + header.name() + \"):\" + (header.invert_match() ? \"true\" : \"false\");\n    expected = \"has(\" + header.name() + \"):\" + (header.invert_match() ? \"false\" : \"true\");\n    reportFailure(actual, expected, match_test_type);\n    break;\n  default:\n    actual =\n        header.name() + \": \" + ::toString(header_map.get(Http::LowerCaseString(header.name())));\n    tests_.back().second.emplace_back(\"actual: [\" + actual + \"], test type: \" + match_test_type);\n    break;\n  }\n\n  return false;\n}\n\nbool RouterCheckTool::compareResults(const std::string& actual, const std::string& expected,\n                                     const std::string& test_type, const bool expect_match) {\n  if ((expected == actual) != expect_match) {\n    reportFailure(actual, expected, test_type, expect_match);\n    return false;\n  }\n  return true;\n}\n\nvoid RouterCheckTool::reportFailure(const std::string& actual, const std::string& expected,\n                                    const std::string& test_type, const bool expect_match) {\n  tests_.back().second.emplace_back(\"expected: [\" + expected + \"], \" +\n                                    \"actual: \" + (expect_match ? \"\" : \"NOT \") + \"[\" + actual +\n                                    \"],\" + \" test type: \" + test_type);\n}\n\nvoid RouterCheckTool::printResults() {\n  // Output failure details to stdout if details_ flag is set to true\n  for (const auto& test_result : tests_) {\n    // All test names are printed if the details_ flag is true unless only_show_failures_ is\n    // also true.\n    if ((details_ && !only_show_failures_) ||\n        (only_show_failures_ && !test_result.second.empty())) {\n      if (test_result.second.empty()) {\n        std::cout << test_result.first << std::endl;\n      } else {\n        std::cerr << test_result.first << std::endl;\n        for (const auto& failure : test_result.second) {\n          std::cerr << failure << std::endl;\n        }\n      }\n    }\n  }\n}\n\n// The Mock for runtime value checks.\n// This is a simple implementation to mimic the actual runtime checks in Snapshot.featureEnabled\nbool RouterCheckTool::runtimeMock(absl::string_view key,\n                                  const envoy::type::v3::FractionalPercent& default_value,\n                                  uint64_t random_value) {\n  return !active_runtime_.empty() && key.compare(active_runtime_) == 0 &&\n         ProtobufPercentHelper::evaluateFractionalPercent(default_value, random_value);\n}\n\nOptions::Options(int argc, char** argv) {\n  TCLAP::CmdLine cmd(\"router_check_tool\", ' ', \"none\", true);\n  TCLAP::SwitchArg is_detailed(\"d\", \"details\", \"Show detailed test execution results\", cmd, false);\n  TCLAP::SwitchArg only_show_failures(\"\", \"only-show-failures\", \"Only display failing tests\", cmd,\n                                      false);\n  TCLAP::SwitchArg disable_deprecation_check(\"\", \"disable-deprecation-check\",\n                                             \"Disable deprecated fields check\", cmd, false);\n  TCLAP::ValueArg<double> fail_under(\"f\", \"fail-under\",\n                                     \"Fail if test coverage is under a specified amount\", false,\n                                     0.0, \"float\", cmd);\n  TCLAP::SwitchArg comprehensive_coverage(\n      \"\", \"covall\", \"Measure coverage by checking all route fields\", cmd, false);\n  TCLAP::ValueArg<std::string> config_path(\"c\", \"config-path\", \"Path to configuration file.\", false,\n                                           \"\", \"string\", cmd);\n  TCLAP::ValueArg<std::string> test_path(\"t\", \"test-path\", \"Path to test file.\", false, \"\",\n                                         \"string\", cmd);\n  TCLAP::UnlabeledMultiArg<std::string> unlabelled_configs(\n      \"unlabelled-configs\", \"unlabelled configs\", false, \"unlabelledConfigStrings\", cmd);\n  try {\n    cmd.parse(argc, argv);\n  } catch (TCLAP::ArgException& e) {\n    std::cerr << \"error: \" << e.error() << std::endl;\n    exit(EXIT_FAILURE);\n  }\n\n  is_detailed_ = is_detailed.getValue();\n  only_show_failures_ = only_show_failures.getValue();\n  fail_under_ = fail_under.getValue();\n  comprehensive_coverage_ = comprehensive_coverage.getValue();\n  disable_deprecation_check_ = disable_deprecation_check.getValue();\n\n  config_path_ = config_path.getValue();\n  test_path_ = test_path.getValue();\n  if (config_path_.empty() || test_path_.empty()) {\n    std::cerr << \"error: \"\n              << \"Both --config-path/c and --test-path/t are mandatory\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n}\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/router_check/router.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/type/v3/percent.pb.h\"\n\n#include \"common/common/logger.h\"\n#include \"common/common/utility.h\"\n#include \"common/http/header_map_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/json/json_loader.h\"\n#include \"common/router/config_impl.h\"\n#include \"common/stats/symbol_table_impl.h\"\n#include \"common/stream_info/stream_info_impl.h\"\n\n#include \"test/mocks/server/instance.h\"\n#include \"test/test_common/global.h\"\n#include \"test/test_common/printers.h\"\n#include \"test/test_common/utility.h\"\n#include \"test/tools/router_check/coverage.h\"\n#include \"test/tools/router_check/validation.pb.h\"\n#include \"test/tools/router_check/validation.pb.validate.h\"\n\n#include \"tclap/CmdLine.h\"\n\nnamespace Envoy {\n/**\n * Struct that stores the configuration parameters of the router check tool extracted from a json\n * input file.\n */\nstruct ToolConfig {\n  ToolConfig() = default;\n\n  /**\n   * @param check_config tool config proto object.\n   * @return ToolConfig a ToolConfig instance with member variables set by the tool config json\n   * file.\n   */\n  static ToolConfig create(const envoy::RouterCheckToolSchema::ValidationItem& check_config);\n\n  Stats::SymbolTable& symbolTable() { return *symbol_table_; }\n\n  std::unique_ptr<Http::TestRequestHeaderMapImpl> request_headers_;\n  std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers_;\n  Router::RouteConstSharedPtr route_;\n  int random_value_{0};\n\nprivate:\n  ToolConfig(std::unique_ptr<Http::TestRequestHeaderMapImpl> request_headers,\n             std::unique_ptr<Http::TestResponseHeaderMapImpl> response_headers, int random_value);\n  Stats::TestSymbolTable symbol_table_;\n};\n\n/**\n * A route table check tool that check whether route parameters returned by a router match\n * what is expected.\n */\nclass RouterCheckTool : Logger::Loggable<Logger::Id::testing> {\npublic:\n  /**\n   * @param router_config_file v2 router config file.\n   * @param disable_deprecation_check flag to disable the RouteConfig deprecated field check\n   * @return RouterCheckTool a RouterCheckTool instance with member variables set by the router\n   * config file.\n   * */\n  static RouterCheckTool create(const std::string& router_config_file,\n                                const bool disable_deprecation_check);\n\n  /**\n   * @param expected_route_json tool config json file.\n   * @return bool if all routes match what is expected.\n   */\n  bool compareEntries(const std::string& expected_routes);\n\n  /**\n   * Set whether to print out match case details.\n   */\n  void setShowDetails() { details_ = true; }\n\n  /**\n   * Set whether to only print failing match cases.\n   */\n  void setOnlyShowFailures() { only_show_failures_ = true; }\n\n  float coverage(bool detailed) {\n    return detailed ? coverage_.detailedReport() : coverage_.report();\n  }\n\nprivate:\n  RouterCheckTool(\n      std::unique_ptr<NiceMock<Server::Configuration::MockServerFactoryContext>> factory_context,\n      std::unique_ptr<Router::ConfigImpl> config, std::unique_ptr<Stats::IsolatedStoreImpl> stats,\n      Api::ApiPtr api, Coverage coverage);\n\n  /**\n   * Set UUID as the name for each route for detecting missing tests during the coverage check.\n   */\n  static void assignUniqueRouteNames(envoy::config::route::v3::RouteConfiguration& route_config);\n\n  /**\n   * For each route with runtime fraction 0%, set the numerator to a nonzero value so the\n   * route can be tested as enabled or disabled.\n   */\n  static void assignRuntimeFraction(envoy::config::route::v3::RouteConfiguration& route_config);\n\n  /**\n   * Perform header transforms for any request/response headers for the route matched.\n   * Can be called at most once for each test route.\n   */\n  void finalizeHeaders(ToolConfig& tool_config, Envoy::StreamInfo::StreamInfoImpl stream_info);\n\n  /*\n   * Performs direct-response reply actions for a response entry.\n   */\n  void sendLocalReply(ToolConfig& tool_config, const Router::DirectResponseEntry& entry);\n\n  bool compareCluster(ToolConfig& tool_config, const std::string& expected);\n  bool compareCluster(ToolConfig& tool_config,\n                      const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  bool compareVirtualCluster(ToolConfig& tool_config, const std::string& expected);\n  bool compareVirtualCluster(ToolConfig& tool_config,\n                             const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  bool compareVirtualHost(ToolConfig& tool_config, const std::string& expected);\n  bool compareVirtualHost(ToolConfig& tool_config,\n                          const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  bool compareRewriteHost(ToolConfig& tool_config, const std::string& expected);\n  bool compareRewriteHost(ToolConfig& tool_config,\n                          const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  bool compareRewritePath(ToolConfig& tool_config, const std::string& expected);\n  bool compareRewritePath(ToolConfig& tool_config,\n                          const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  bool compareRedirectPath(ToolConfig& tool_config, const std::string& expected);\n  bool compareRedirectPath(ToolConfig& tool_config,\n                           const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  bool compareRequestHeaderFields(ToolConfig& tool_config,\n                                  const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  bool compareResponseHeaderFields(ToolConfig& tool_config,\n                                   const envoy::RouterCheckToolSchema::ValidationAssert& expected);\n  template <typename HeaderMap>\n  bool matchHeaderField(const HeaderMap& header_map,\n                        const envoy::config::route::v3::HeaderMatcher& header,\n                        const std::string test_type);\n\n  /**\n   * Compare the expected and actual route parameter values. Print out match details if details_\n   * flag is set.\n   * @param actual holds the actual route returned by the router.\n   * @param expected holds the expected parameter value of the route.\n   * @param expect_match negates the expectation if false.\n   * @return bool if actual and expected match.\n   */\n  bool compareResults(const std::string& actual, const std::string& expected,\n                      const std::string& test_type, const bool expect_match = true);\n\n  void reportFailure(const std::string& actual, const std::string& expected,\n                     const std::string& test_type, const bool expect_match = true);\n\n  void printResults();\n\n  bool runtimeMock(absl::string_view key, const envoy::type::v3::FractionalPercent& default_value,\n                   uint64_t random_value);\n\n  bool headers_finalized_{false};\n\n  bool details_{false};\n\n  bool only_show_failures_{false};\n\n  // The first member of each pair is the name of the test.\n  // The second member is a list of any failing results for that test as strings.\n  std::vector<std::pair<std::string, std::vector<std::string>>> tests_;\n\n  // TODO(hennna): Switch away from mocks following work done by @rlazarus in github issue #499.\n  std::unique_ptr<NiceMock<Server::Configuration::MockServerFactoryContext>> factory_context_;\n  std::unique_ptr<Router::ConfigImpl> config_;\n  std::unique_ptr<Stats::IsolatedStoreImpl> stats_;\n  Api::ApiPtr api_;\n  std::string active_runtime_;\n  Coverage coverage_;\n};\n\n/**\n * Parses command line arguments for Router Check Tool.\n */\nclass Options {\npublic:\n  Options(int argc, char** argv);\n\n  /**\n   * @return the path to configuration file.\n   */\n  const std::string& configPath() const { return config_path_; }\n\n  /**\n   * @return the path to test file.\n   */\n  const std::string& testPath() const { return test_path_; }\n\n  /**\n   * @return the minimum required percentage of routes coverage.\n   */\n  double failUnder() const { return fail_under_; }\n\n  /**\n   * @return true if test coverage should be comprehensive.\n   */\n  bool comprehensiveCoverage() const { return comprehensive_coverage_; }\n\n  /**\n   * @return true if detailed test execution results are displayed.\n   */\n  bool isDetailed() const { return is_detailed_; }\n\n  /**\n   * @return true if only test failures are displayed.\n   */\n  bool onlyShowFailures() const { return only_show_failures_; }\n\n  /**\n   * @return true if the deprecated field check for RouteConfiguration is disabled.\n   */\n  bool disableDeprecationCheck() const { return disable_deprecation_check_; }\n\nprivate:\n  std::string test_path_;\n  std::string config_path_;\n  float fail_under_;\n  bool comprehensive_coverage_;\n  bool is_detailed_;\n  bool only_show_failures_;\n  bool disable_deprecation_check_;\n};\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/router_check/router_check.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <iostream>\n#include <string>\n\n#include \"exe/platform_impl.h\"\n\n#include \"test/tools/router_check/router.h\"\n\nint main(int argc, char* argv[]) {\n  Envoy::Options options(argc, argv);\n\n  const bool enforce_coverage = options.failUnder() != 0.0;\n  // We need this to ensure WSAStartup is called on Windows\n  Envoy::PlatformImpl platform_impl_;\n\n  try {\n    Envoy::RouterCheckTool checktool =\n        Envoy::RouterCheckTool::create(options.configPath(), options.disableDeprecationCheck());\n\n    if (options.isDetailed()) {\n      checktool.setShowDetails();\n    }\n\n    if (options.onlyShowFailures()) {\n      checktool.setOnlyShowFailures();\n    }\n\n    bool is_equal = checktool.compareEntries(options.testPath());\n    // Test fails if routes do not match what is expected\n    if (!is_equal) {\n      return EXIT_FAILURE;\n    }\n\n    const double current_coverage = checktool.coverage(options.comprehensiveCoverage());\n    std::cout << \"Current route coverage: \" << current_coverage << \"%\" << std::endl;\n    if (enforce_coverage) {\n      if (current_coverage < options.failUnder()) {\n        std::cerr << \"Failed to meet coverage requirement: \" << options.failUnder() << \"%\"\n                  << std::endl;\n        return EXIT_FAILURE;\n      }\n    }\n  } catch (const Envoy::EnvoyException& ex) {\n    std::cerr << ex.what() << std::endl;\n    return EXIT_FAILURE;\n  }\n\n  return EXIT_SUCCESS;\n}\n"
  },
  {
    "path": "test/tools/router_check/test/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n    \"envoy_sh_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_sh_test(\n    name = \"router_tool_test\",\n    srcs = [\"route_tests.sh\"],\n    cc_binary = [\"//test/tools/router_check:router_check_tool\"],\n    data = [\n        \":configs\",\n    ],\n)\n\nfilegroup(\n    name = \"configs\",\n    srcs = glob([\n        \"config/*.yaml\",\n        \"config/*.json\",\n        \"config/*.pb_text\",\n    ]),\n)\n"
  },
  {
    "path": "test/tools/router_check/test/config/ClusterHeader.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test1\",\n      \"input\": {\n        \"authority\": \"some_cluster\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"some_cluster\"}\n    },\n    {\n      \"test_name\": \"Test2\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/bar\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"\"}\n    },\n    {\n      \"test_name\": \"Test3\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/bar\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"some_header\",\n            \"value\": \"some_cluster\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"some_cluster\"}\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/ClusterHeader.yaml",
    "content": "virtual_hosts:\n- name: local_service\n  domains:\n  - '*'\n  routes:\n    - match:\n        prefix: \"/foo\"\n        headers:\n          - name: \":authority\"\n      route:\n        cluster_header: \":authority\"\n    - match:\n        prefix: \"/bar\"\n      route:\n        cluster_header: \"some_header\"\n        timeout:\n          nanos: 0\n"
  },
  {
    "path": "test/tools/router_check/test/config/ComprehensiveRoutes.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test 1\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/new_endpoint\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"cluster_name\": \"www2\",\n        \"virtual_cluster_name\": \"other\",\n        \"virtual_host_name\": \"www2_host\",\n        \"path_rewrite\": \"/api/new_endpoint\",\n        \"host_rewrite\": \"www.lyft.com\",\n        \"path_redirect\": \"\"\n      }\n    },\n    {\n      \"test_name\": \"Test 2\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"cluster_name\": \"root_www2\",\n        \"virtual_cluster_name\": \"other\",\n        \"virtual_host_name\": \"www2_host\",\n        \"path_rewrite\": \"/\",\n        \"host_rewrite\": \"www.lyft.com\",\n        \"path_redirect\": \"\"\n      }\n    },\n    {\n      \"test_name\": \"Test 3\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/foobar\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"cluster_name\": \"www2\",\n        \"virtual_cluster_name\": \"other\",\n        \"virtual_host_name\": \"www2_host\",\n        \"path_rewrite\": \"/foobar\",\n        \"host_rewrite\": \"www.lyft.com\",\n        \"path_redirect\": \"\"\n      }\n    },\n    {\n      \"test_name\": \"Test 4\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/users/123\",\n        \"method\": \"PUT\"\n      },\n      \"validate\": {\n        \"virtual_cluster_name\": \"update_user\"\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/ComprehensiveRoutes.yaml",
    "content": "virtual_hosts:\n  - name: www2_host\n    domains:\n    - www.lyft.com\n    routes:\n      - match:\n          prefix: /new_endpoint\n        route:\n          cluster: www2\n          prefix_rewrite: /api/new_endpoint\n      - match:\n          path: /\n        route:\n          cluster: root_www2\n      - match:\n          prefix: /\n        route:\n          cluster: www2\n    virtual_clusters:\n      - headers:\n        - name: :path\n          safe_regex_match:\n            google_re2: {}\n            regex: ^/users/\\d+$\n        - name: :method\n          exact_match: PUT\n        name: update_user\n"
  },
  {
    "path": "test/tools/router_check/test/config/ContentType.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test_1\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\":\"local_service\"}\n    },\n    {\n      \"test_name\": \"Test_2\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [{\"key\": \"content-type\", \"value\": \"application/grpc\"}]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_grpc\"}\n    },\n    {\n      \"test_name\": \"Test_3\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [{\"key\": \"content-type\", \"value\": \"foo\"}]\n      },\n      \"validate\": {\"cluster_name\": \"local_service\"}\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/ContentType.yaml",
    "content": "virtual_hosts:\n- name: local_service\n  domains:\n  - '*'\n  routes:\n  - match:\n      prefix: \"/\"\n      headers:\n        - name: \"content-type\"\n          exact_match: \"application/grpc\"\n    route:\n      cluster: local_service_grpc\n  - match:\n      prefix: \"/\"\n    route:\n      cluster: local_service\n"
  },
  {
    "path": "test/tools/router_check/test/config/DirectResponse.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Direct Response\",\n      \"input\": {\n        \"authority\": \"lyft.com\",\n        \"path\": \"/ping\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://lyft.com/ping\",\n        \"cluster_name\": \"\",\n        \"host_rewrite\": \"\",\n        \"path_rewrite\": \"\",\n        \"virtual_cluster_name\": \"\",\n        \"virtual_host_name\": \"\",\n        \"response_header_matches\": [\n          {\n            \"name\": \"content-type\",\n            \"exact_match\": \"text/plain\"\n          },\n          {\n            \"name\": \"content-length\",\n            \"exact_match\": \"8\"\n          }\n        ]\n      }\n    },\n    {\n      \"test_name\": \"Direct Response custom content-type\",\n      \"input\": {\n        \"authority\": \"lyft.com\",\n        \"path\": \"/ping-json\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://lyft.com/ping-json\",\n        \"cluster_name\": \"\",\n        \"host_rewrite\": \"\",\n        \"path_rewrite\": \"\",\n        \"virtual_cluster_name\": \"\",\n        \"virtual_host_name\": \"\",\n        \"response_header_matches\": [\n          {\n            \"name\": \"content-type\",\n            \"exact_match\": \"application/json\"\n          },\n          {\n            \"name\": \"content-length\",\n            \"exact_match\": \"25\"\n          }\n        ]\n      }\n    },\n    {\n      \"test_name\": \"Direct Response custom content-type appended\",\n      \"input\": {\n        \"authority\": \"lyft.com\",\n        \"path\": \"/ping-json2\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://lyft.com/ping-json2\",\n        \"cluster_name\": \"\",\n        \"host_rewrite\": \"\",\n        \"path_rewrite\": \"\",\n        \"virtual_cluster_name\": \"\",\n        \"virtual_host_name\": \"\",\n        \"response_header_matches\": [\n          {\n            \"name\": \"content-type\",\n            \"exact_match\": \"text/plain,application/json\"\n          }\n        ]\n      }\n    },\n    {\n      \"test_name\": \"Direct Response empty body\",\n      \"input\": {\n        \"authority\": \"lyft.com\",\n        \"path\": \"/ping-empty\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://lyft.com/ping-empty\",\n        \"cluster_name\": \"\",\n        \"host_rewrite\": \"\",\n        \"path_rewrite\": \"\",\n        \"virtual_cluster_name\": \"\",\n        \"virtual_host_name\": \"\",\n        \"response_header_matches\": [\n          {\n            \"name\": \"content-type\",\n            \"present_match\": true,\n            \"invert_match\": true\n          },\n          {\n            \"name\": \"content-length\",\n            \"present_match\": true,\n            \"invert_match\": true\n          }\n        ]\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/DirectResponse.yaml",
    "content": "virtual_hosts:\n  - name: www2\n    domains:\n    - lyft.com\n    routes:\n      - match:\n          path: /ping\n        direct_response:\n          status: 200\n          body:\n            inline_string: \"Success!\"\n      - match:\n          path: /ping-json\n        direct_response:\n          status: 200\n          body:\n            inline_string: \"{ 'message': 'Success!' }\"\n        response_headers_to_add:\n          - header:\n              key: \"content-type\"\n              value: \"application/json\"\n            append: false\n      - match:\n          path: /ping-json2\n        direct_response:\n          status: 200\n          body:\n            inline_string: \"{ 'message': 'Success!' }\"\n        response_headers_to_add:\n        # This is bad, don't do it! Use append: false!\n          - header:\n              key: \"content-type\"\n              value: \"application/json\"\n      - match:\n          path: /ping-empty\n        direct_response:\n          status: 200\n          body:\n            inline_string: \"\"\n"
  },
  {
    "path": "test/tools/router_check/test/config/HeaderMatchedRouting.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test_1\", \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"local_service_without_headers\"}\n    },\n    {\n      \"test_name\": \"Test_2\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"test_header\",\n            \"value\": \"test\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_with_headers\"}\n    },\n    {\n      \"test_name\": \"Test_3\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"test_header_multiple1\",\n            \"value\": \"test1\"\n          },\n          {\n            \"key\": \"test_header_multiple2\",\n            \"value\": \"test2\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_with_multiple_headers\"}\n    },\n    {\n      \"test_name\": \"Test_4\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"non_existent_header\",\n            \"value\": \"foo\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_without_headers\"}\n    },\n    {\n      \"test_name\": \"Test_5\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"test_header_presence\",\n            \"value\": \"test\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_with_empty_headers\"}\n    },\n    {\n      \"test_name\": \"Test_6\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"test_header_pattern\",\n            \"value\": \"user=test-1223\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_with_header_pattern_set_regex\"}\n    },\n    {\n      \"test_name\": \"Test_7\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"test_header_pattern\",\n            \"value\": \"customer=test-1223\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_without_headers\"}\n    },\n    {\n      \"test_name\": \"Test_8\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"content-type\",\n            \"value\": \"application/grpc+proto\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_with_grpc\"}\n    },\n    {\n      \"test_name\": \"Test_9\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\",\n        \"additional_request_headers\": [\n          {\n            \"key\": \"content-type\",\n            \"value\": \"application/grpc+proto\"\n          },\n          {\n            \"key\": \"test_header\",\n            \"value\": \"some_value\"\n          }\n        ]\n      },\n      \"validate\": {\"cluster_name\": \"local_service_with_grpc_and_other_header\"}\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/HeaderMatchedRouting.yaml",
    "content": "virtual_hosts:\n- name: local_service\n  domains:\n  - '*'\n  routes:\n  - match:\n      prefix: /\n      headers:\n      - name: test_header\n        exact_match: test\n    route:\n      cluster: local_service_with_headers\n  - match:\n      prefix: /\n      headers:\n      - name: test_header_multiple1\n        exact_match: test1\n      - name: test_header_multiple2\n        exact_match: test2\n    route:\n      cluster: local_service_with_multiple_headers\n\n  - match:\n      prefix: /\n      headers:\n      - name: test_header_presence\n    route:\n      cluster: local_service_with_empty_headers\n  - match:\n      prefix: /\n      headers:\n      - name: test_header_pattern\n        safe_regex_match:\n          google_re2: {}\n          regex: ^user=test-\\d+$\n    route:\n      cluster: local_service_with_header_pattern_set_regex\n  - match:\n      prefix: /\n      headers:\n      - name: test_header_pattern\n        exact_match: ^customer=test-\\d+$\n    route:\n      cluster: local_service_with_header_pattern_unset_regex\n  - match:\n      prefix: /\n      grpc: {}\n      headers:\n      - name: test_header\n        exact_match: some_value\n    route:\n      cluster: local_service_with_grpc_and_other_header\n  - match:\n      prefix: /\n      grpc: {}\n    route:\n      cluster: local_service_with_grpc\n  - match:\n      prefix: /\n    route:\n      cluster: local_service_without_headers\n"
  },
  {
    "path": "test/tools/router_check/test/config/Redirect.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test_1\",\n      \"input\": {\n        \"authority\": \"www.foo.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"internal\": true,\n        \"ssl\": true\n      },\n      \"validate\": {\"path_redirect\": \"\"}\n    },\n    {\n      \"test_name\": \"Test_2\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"ssl\": true,\n        \"internal\": true\n      },\n      \"validate\": {\"path_redirect\": \"\"}\n    },\n    {\n      \"test_name\": \"Test_3\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"ssl\": false,\n        \"internal\": false\n      },\n      \"validate\": {\"path_redirect\": \"https://www.lyft.com/foo\"}\n    },\n    {\n      \"test_name\": \"Test_4\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"internal\": true\n      },\n      \"validate\": {\"path_redirect\": \"\"}\n    },\n    {\n      \"test_name\": \"Test_5\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_redirect\": \"https://api.lyft.com/foo\"}\n    },\n    {\n      \"test_name\": \"Test_6\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"ssl\": false,\n        \"internal\": false\n      },\n      \"validate\": {\"path_redirect\": \"http://new.lyft.com/foo\"}\n    },\n    {\n      \"test_name\": \"Test_7\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/bar\",\n        \"method\": \"GET\",\n        \"ssl\": true\n      },\n      \"validate\": {\"path_redirect\": \"https://redirect.lyft.com/new_bar\"}\n    },\n    {\n      \"test_name\": \"Test_8\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/baz\",\n        \"method\": \"GET\",\n        \"ssl\": true,\n        \"internal\": false\n      },\n      \"validate\": {\"path_redirect\": \"https://new.lyft.com/new_baz\"}\n    },\n    {\n      \"test_name\": \"Test_9\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/pretest\",\n        \"method\": \"GET\",\n        \"ssl\": true\n      },\n      \"validate\": {\n        \"host_rewrite\": \"api.lyft.com\",\n        \"virtual_host_name\": \"api\",\n        \"path_rewrite\": \"/pre/test\",\n        \"cluster_name\": \"www2\"\n      }\n    },\n    {\n      \"test_name\": \"Test_10\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/secondtest\",\n        \"method\": \"GET\",\n        \"ssl\": true\n      },\n      \"validate\": {\n        \"host_rewrite\": \"api.lyft.com\",\n        \"virtual_host_name\": \"api\",\n        \"path_rewrite\": \"/second/test\",\n        \"cluster_name\": \"www2\"\n      }\n    },\n    {\n      \"test_name\": \"Test_11\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/rewrite\",\n        \"method\": \"GET\",\n        \"ssl\": true\n      },\n      \"validate\": {\n        \"host_rewrite\": \"\",\n        \"virtual_host_name\": \"\",\n        \"path_rewrite\": \"\",\n        \"cluster_name\": \"\",\n        \"path_redirect\": \"https://redirect.lyft.com/blah\"\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/Redirect.yaml",
    "content": "virtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  require_tls: ALL\n  routes:\n    - match:\n        prefix: /\n      route:\n        cluster: www2\n- name: api\n  domains:\n  - api.lyft.com\n  require_tls: EXTERNAL_ONLY\n  routes:\n    - match:\n        prefix: /pretest\n      route:\n        prefix_rewrite: /pre/test\n        cluster: www2\n    - match:\n        prefix: /secondtest\n      route:\n        prefix_rewrite: /second/test\n        cluster: www2\n    - match:\n        prefix: /\n      route:\n        cluster: www2\n- name: redirect\n  domains:\n  - redirect.lyft.com\n  routes:\n    - match:\n        prefix: /blah\n      route:\n        cluster: www2\n    - match:\n        prefix: /foo\n      redirect:\n        host_redirect: new.lyft.com\n    - match:\n        prefix: /bar\n      redirect:\n        path_redirect: /new_bar\n    - match:\n        prefix: /baz\n      redirect:\n        host_redirect: new.lyft.com\n        path_redirect: /new_baz\n    - match:\n        prefix: /rewrite\n      redirect:\n        prefix_rewrite: /blah\n"
  },
  {
    "path": "test/tools/router_check/test/config/Redirect2.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test_1\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"ssl\": true,\n        \"internal\": true\n      },\n      \"validate\": {\n        \"path_redirect\": \"\",\n        \"cluster_name\": \"www2\"\n      }\n    },\n    {\n      \"test_name\": \"Test_2\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"ssl\": false,\n        \"internal\": false\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://new.lyft.com/foo\",\n        \"cluster_name\": \"\"\n      }\n    },\n    {\n      \"test_name\": \"Test_3\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://new.lyft.com/foo\",\n        \"cluster_name\": \"\"\n      }\n    },\n    {\n      \"test_name\": \"Test_4\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"ssl\": false\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://new.lyft.com/foo\",\n        \"cluster_name\": \"\"\n      }\n    },\n    {\n      \"test_name\": \"Test_5\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"ssl\": false,\n        \"path\": \"/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"path_redirect\": \"http://new.lyft.com/foo\",\n        \"cluster_name\": \"\"\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/Redirect2.yaml",
    "content": "virtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n    - match:\n        prefix: /\n      route:\n        cluster: www2\n- name: redirect\n  domains:\n  - redirect.lyft.com\n  routes:\n    - match:\n        prefix: /foo\n      redirect:\n        host_redirect: new.lyft.com\n"
  },
  {
    "path": "test/tools/router_check/test/config/Redirect3.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test_1\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"ssl\": true,\n        \"internal\": true\n      },\n      \"validate\": {\n        \"path_redirect\": \"\",\n        \"cluster_name\": \"www2\"\n      }\n    },\n    {\n      \"test_name\": \"Test_2\",\n      \"input\": {\n        \"authority\": \"redirect.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"cluster_name\": \"\",\n        \"path_redirect\": \"http://new.lyft.com/foo\"\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/Redirect3.yaml",
    "content": "virtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n    - match:\n        prefix: /\n      route:\n        weighted_clusters:\n          clusters:\n            - name: www2\n              weight: 100\n          total_weight: 100\n- name: redirect\n  domains:\n  - redirect.lyft.com\n  routes:\n    - match:\n        prefix: /foo\n      redirect:\n        host_redirect: new.lyft.com\n"
  },
  {
    "path": "test/tools/router_check/test/config/Runtime.golden.proto.json",
    "content": "{\n    \"tests\": [\n      {\n        \"test_name\": \"Test_1\",\n        \"input\": {\n          \"authority\": \"www.lyft.com\",\n          \"path\": \"/\",\n          \"method\": \"GET\",\n          \"ssl\": true,\n          \"internal\": true\n        },\n        \"validate\": {\n          \"cluster_name\": \"www3\",\n          \"virtual_cluster_name\": \"\",\n          \"virtual_host_name\": \"www2\",\n          \"path_rewrite\": \"/\",\n          \"host_rewrite\": \"www.lyft.com\",\n          \"path_redirect\": \"\"\n        }\n      },\n      {\n        \"test_name\": \"Test_2\",\n        \"input\": {\n          \"authority\": \"www.lyft.com\",\n          \"path\": \"/\",\n          \"method\": \"GET\",\n          \"ssl\": true,\n          \"internal\": true,\n          \"runtime\": \"runtime.key\",\n          \"random_value\": 70\n        },\n        \"validate\": {\n          \"cluster_name\": \"www3\",\n          \"virtual_cluster_name\": \"\",\n          \"virtual_host_name\": \"www2\",\n          \"path_rewrite\": \"/\",\n          \"host_rewrite\": \"www.lyft.com\",\n          \"path_redirect\": \"\"\n        }\n      },\n      {\n        \"test_name\": \"Test_3\",\n        \"input\": {\n          \"authority\": \"www.lyft.com\",\n          \"path\": \"/\",\n          \"method\": \"GET\",\n          \"ssl\": true,\n          \"internal\": true,\n          \"runtime\": \"runtime.key\",\n          \"random_value\": 20\n        },\n        \"validate\": {\n          \"cluster_name\": \"www2\",\n          \"virtual_cluster_name\": \"\",\n          \"virtual_host_name\": \"www2\",\n          \"path_rewrite\": \"/\",\n          \"host_rewrite\": \"www.lyft.com\",\n          \"path_redirect\": \"\"\n        }\n      },\n      {\n        \"test_name\": \"Test_4\",\n        \"input\": {\n          \"authority\": \"www.lyft.com\",\n          \"path\": \"/disabled\",\n          \"method\": \"GET\",\n          \"ssl\": true,\n          \"internal\": true,\n          \"runtime\": \"runtime.key\",\n          \"random_value\": 0\n        },\n        \"validate\": {\n          \"cluster_name\": \"www4\",\n          \"virtual_cluster_name\": \"\",\n          \"virtual_host_name\": \"www2\",\n          \"path_rewrite\": \"/disabled\",\n          \"host_rewrite\": \"www.lyft.com\",\n          \"path_redirect\": \"\"\n        }\n      },\n      {\n        \"test_name\": \"Test_5\",\n        \"input\": {\n          \"authority\": \"www.lyft.com\",\n          \"path\": \"/disabled\",\n          \"method\": \"GET\",\n          \"ssl\": true,\n          \"internal\": true,\n          \"runtime\": \"runtime.key\",\n          \"random_value\": 2\n        },\n        \"validate\": {\n          \"cluster_name\": \"www2\",\n          \"virtual_cluster_name\": \"\",\n          \"virtual_host_name\": \"www2\",\n          \"path_rewrite\": \"/disabled\",\n          \"host_rewrite\": \"www.lyft.com\",\n          \"path_redirect\": \"\"\n        }\n      },\n      {\n        \"test_name\": \"Test_6\",\n        \"input\": {\n          \"authority\": \"www.lyft.com\",\n          \"path\": \"/disabled\",\n          \"method\": \"GET\",\n          \"ssl\": true,\n          \"internal\": true,\n          \"random_value\": 2\n        },\n        \"validate\": {\n          \"cluster_name\": \"www3\",\n          \"virtual_cluster_name\": \"\",\n          \"virtual_host_name\": \"www2\",\n          \"path_rewrite\": \"/disabled\",\n          \"host_rewrite\": \"www.lyft.com\",\n          \"path_redirect\": \"\"\n        }\n      },\n      {\n        \"test_name\": \"Test_7\",\n        \"input\": {\n          \"authority\": \"www.lyft.com\",\n          \"path\": \"/disabled\",\n          \"method\": \"GET\",\n          \"ssl\": true,\n          \"internal\": true\n        },\n        \"validate\": {\n          \"cluster_name\": \"www3\",\n          \"virtual_cluster_name\": \"\",\n          \"virtual_host_name\": \"www2\",\n          \"path_rewrite\": \"/disabled\",\n          \"host_rewrite\": \"www.lyft.com\",\n          \"path_redirect\": \"\"\n        }\n      }\n    ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/Runtime.yaml",
    "content": "virtual_hosts:\n- name: www2\n  domains:\n  - www.lyft.com\n  routes:\n    - match:\n        prefix: /disabled\n        runtime_fraction:\n          runtime_key: runtime.key\n          default_value:\n            numerator: 0\n            denominator: HUNDRED\n      route:\n        cluster: www4\n    - match:\n        prefix: /\n        runtime_fraction:\n          runtime_key: runtime.key\n          default_value:\n            numerator: 30\n            denominator: HUNDRED\n      route:\n        cluster: www2\n    - match:\n        prefix: /\n      route:\n        cluster: www3\n"
  },
  {
    "path": "test/tools/router_check/test/config/TestRoutes.golden.proto.json",
    "content": "{\n  \"tests\":  [\n    {\n      \"test_name\": \"Test1\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"cluster_name\": \"instant-server\",\n        \"virtual_cluster_name\": \"other\",\n        \"virtual_host_name\": \"default\",\n        \"path_rewrite\": \"/\",\n        \"host_rewrite\": \"api.lyft.com\",\n        \"path_redirect\": \"\"}\n    },\n    {\n      \"test_name\": \"Test2\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/api/leads/me\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"ats\"}\n    },\n    {\n      \"test_name\": \"Test3\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/api/locations?works=true\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"locations\"}\n    },\n    {\n      \"test_name\": \"Test4\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/api/locations\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"locations\"}\n    },\n    {\n      \"test_name\": \"Test5\",\n      \"input\": {\n        \"authority\": \"lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"www2\"}\n    },\n    {\n      \"test_name\": \"Test6\",\n      \"input\": {\n        \"authority\": \"wwww.lyft.com\",\n        \"path\": \"/\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"cluster_name\": \"root_www2\"}\n    },\n    {\n      \"test_name\": \"Test7\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/new_endpoint/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\":\n        {\n    \"cluster_name\": \"www2\",\n    \"virtual_host_name\": \"www2\"\n        }\n    },\n    {\n      \"test_name\": \"Test8\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/new_endpoint/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/api/new_endpoint/foo\"}\n    },\n    {\n      \"test_name\": \"Test9\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/api/locations?works=true\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/rewrote?works=true\"}\n    },\n    {\n      \"test_name\": \"Test10\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/bar\"}\n    },\n    {\n      \"test_name\": \"Test11\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/host/rewrite/me\",\n        \"method\": \"GET\"\n     },\n      \"validate\": {\"host_rewrite\":\"new_host\"}\n    },\n    {\n      \"test_name\": \"Test12\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/API/locations?works=true\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/rewrote?works=true\"}\n    },\n    {\n      \"test_name\": \"Test13\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/fooD\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/cAndy\"}\n    },\n    {\n      \"test_name\": \"Test14\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/FOO\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/FOO\"}\n    },\n    {\n      \"test_name\": \"Test15\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/ApPles\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/ApPles\"}\n    },\n    {\n      \"test_name\": \"Test16\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/oLDhost/rewrite/me\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"host_rewrite\": \"api.lyft.com\"}\n    },\n    {\n      \"test_name\": \"Test17\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/Tart\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_rewrite\": \"/Tart\"}\n    },\n    {\n      \"test_name\": \"Test18\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/newhost/rewrite/me\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"host_rewrite\": \"new_host\"}\n    },\n    {\n      \"test_name\": \"Test19\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/rides\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"other\"}\n    },\n    {\n      \"test_name\": \"Test20\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/rides/blah\",\n        \"method\": \"POST\"\n       },\n      \"validate\": {\"virtual_cluster_name\": \"other\"}\n    },\n    {\n      \"test_name\": \"Test21\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/rides\",\n        \"method\": \"POST\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"ride_request\"}\n    },\n    {\n      \"test_name\": \"Test22\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/rides/123\",\n        \"method\": \"PUT\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"update_ride\"}\n    },\n    {\n      \"test_name\": \"Test23\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/users/123/chargeaccounts\",\n        \"method\": \"POST\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"cc_add\"}\n    },\n    {\n      \"test_name\": \"Test24\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/users/123/chargeaccounts/hello123\",\n        \"method\": \"PUT\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"cc_add\"}\n    },\n    {\n      \"test_name\": \"Test25\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/users/123/chargeaccounts/validate\",\n        \"method\": \"PUT\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"other\"}\n    },\n    {\n      \"test_name\": \"Test26\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/foo/bar\",\n        \"method\": \"PUT\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"other\"}\n    },\n    {\n      \"test_name\": \"Test27\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/users\",\n        \"method\": \"POST\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"create_user_login\"}\n    },\n    {\n      \"test_name\": \"Test28\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/users/123\",\n        \"method\": \"PUT\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"update_user\"}\n    },\n    {\n      \"test_name\": \"Test29\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/users/123/location\",\n        \"method\": \"POST\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"ulu\"}\n    },\n    {\n      \"test_name\": \"Test30\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/something/else\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"virtual_cluster_name\": \"other\"}\n    },\n    {\n      \"test_name\": \"RequestHeaderMatches 1\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/host/rewrite/me\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"host_rewrite\": \"new_host\",\n        \"request_header_matches\": [\n          {\n            \"name\": \":authority\",\n            \"exact_match\": \"new_host\"\n          },\n          {\n            \"name\": \":path\",\n            \"exact_match\": \"/host/rewrite/me\"\n          },\n          {\n            \"name\": \":method\",\n            \"exact_match\": \"GET\"\n          },\n          {\n            \"name\": \":cookie\",\n            \"invert_match\": true\n          }\n        ]\n      }\n    },\n    {\n      \"test_name\": \"RequestHeaderMatches 2\",\n      \"input\": {\n        \"authority\": \"api.lyft.com\",\n        \"path\": \"/customheaders\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"request_header_matches\": [\n          {\n            \"name\": \"X-Client-IP\",\n            \"exact_match\": \"127.0.0.1\"\n          }\n        ]\n      }\n    },\n    {\n      \"test_name\": \"ResponseHeaderMatches\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/ping\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"cluster_name\": \"\",\n        \"host_rewrite\": \"\",\n        \"path_redirect\": \"http://www.lyft.com/ping\",\n        \"path_rewrite\": \"\",\n        \"virtual_cluster_name\": \"\",\n        \"virtual_host_name\": \"\",\n        \"response_header_matches\": [\n          {\n            \"name\": \"content-type\",\n            \"exact_match\": \"text/plain\"\n          },\n          {\n            \"name\": \"content-length\",\n            \"range_match\": \n            {\n              \"start\": 0,\n              \"end\": 100\n            }\n          },\n          {\n            \"name\": \"x-ping-response\",\n            \"exact_match\": \"yes\"\n          },\n          {\n            \"name\": \"x-ping-response\",\n            \"present_match\": true\n          },\n          {\n            \"name\": \"x-pong-response\",\n            \"present_match\": true,\n            \"invert_match\": true\n          },\n        ]\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/TestRoutes.yaml",
    "content": "virtual_hosts:\n  - name: www2\n    domains:\n    - lyft.com\n    - www.lyft.com\n    - w.lyft.com\n    - ww.lyft.com\n    - wwww.lyft.com\n    routes:\n      - match:\n          prefix: /new_endpoint\n        route:\n          cluster: www2\n          prefix_rewrite: /api/new_endpoint\n      - match:\n          prefix: /ping\n        direct_response:\n          status: 200\n          body:\n            inline_string: \"{ 'message': 'Success!' }\"\n        response_headers_to_add:\n          - header:\n              key: \"x-ping-response\"\n              value: \"yes\"\n      - match:\n          path: /\n        route:\n          cluster: root_www2\n      - match:\n          prefix: /\n        route:\n          cluster: www2\n  - name: www2_staging\n    domains:\n      - www-staging.lyft.net\n      - www-staging-orca.lyft.com\n    routes:\n      - match:\n          prefix: /\n        route:\n          cluster: www2_staging\n  - name: default\n    domains:\n    - '*'\n    routes:\n      - match:\n          prefix: /api/application_data\n        route:\n          cluster: ats\n      - match:\n          path: /api/locations\n          case_sensitive: false\n        route:\n          cluster: locations\n          prefix_rewrite: /rewrote\n      - match:\n          prefix: /api/leads/me\n        route:\n          cluster: ats\n      - match:\n          prefix: /host/rewrite/me\n        route:\n          cluster: ats\n          host_rewrite: new_host\n      - match:\n          prefix: /oldhost/rewrite/me\n        route:\n          cluster: ats\n          host_rewrite: new_oldhost\n      - match:\n          path: /foo\n          case_sensitive: true\n        route:\n          prefix_rewrite: /bar\n          cluster: instant-server\n      - match:\n          path: /tar\n          case_sensitive: false\n        route:\n          prefix_rewrite: /car\n          cluster: instant-server\n      - match:\n          prefix: /newhost/rewrite/me\n          case_sensitive: false\n        route:\n          cluster: ats\n          host_rewrite: new_host\n      - match:\n          path: /FOOD\n          case_sensitive: false\n        route:\n          prefix_rewrite: /cAndy\n          cluster: ats\n      - match:\n          path: /ApplEs\n          case_sensitive: true\n        route:\n          prefix_rewrite: /oranGES\n          cluster: instant-server\n      - match:\n          prefix: /customheaders\n        route:\n          cluster: ats\n          host_rewrite: new_host\n        request_headers_to_add:\n          - header:\n              key: X-Client-IP\n              value: '%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%'\n      - match:\n          prefix: /\n        route:\n          cluster: instant-server\n          timeout:\n            seconds: 30\n    virtual_clusters:\n      - headers:\n          - name: :path\n            safe_regex_match:\n              google_re2: {}\n              regex: ^/rides$\n          - name: :method\n            exact_match: POST\n        name: ride_request\n      - headers:\n          - name: :path\n            safe_regex_match:\n              google_re2: {}\n              regex: ^/rides/\\d+$\n          - name: :method\n            exact_match: PUT\n        name: update_ride\n      - headers:\n          - name: :path\n            safe_regex_match:\n              google_re2: {}\n              regex: ^/users/\\d+/chargeaccounts$\n          - name: :method\n            exact_match: POST\n        name: cc_add\n      - headers:\n          - name: :path\n            safe_regex_match:\n              google_re2: {}\n              regex: ^/users/\\d+/chargeaccounts/[^validate]\\w+$\n          - name: :method\n            exact_match: PUT\n        name: cc_add\n      - headers:\n          - name: :path\n            safe_regex_match:\n              google_re2: {}\n              regex: ^/users$\n          - name: :method\n            exact_match: POST\n        name: create_user_login\n      - headers:\n          - name: :path\n            safe_regex_match:\n              google_re2: {}\n              regex: ^/users/\\d+$\n          - name: :method\n            exact_match: PUT\n        name: update_user\n      - headers:\n          - name: :path\n            safe_regex_match:\n              google_re2: {}\n              regex: ^/users/\\d+/location$\n          - name: :method\n            exact_match: POST\n        name: ulu\ninternal_only_headers:\n  - x-lyft-user-id\nresponse_headers_to_add:\n  - header:\n      key: x-envoy-upstream-canary\n      value: \"true\"\nresponse_headers_to_remove:\n  - x-envoy-upstream-canary\n  - x-envoy-virtual-cluster\n"
  },
  {
    "path": "test/tools/router_check/test/config/TestRoutesFailures.golden.proto.json",
    "content": "{\n  \"tests\":  [\n    {\n      \"test_name\": \"ResponseHeaderMatches Failures\",\n      \"input\": {\n        \"authority\": \"www.lyft.com\",\n        \"path\": \"/ping\",\n        \"method\": \"GET\"\n      },\n      \"validate\": {\n        \"cluster_name\": \"\",\n        \"host_rewrite\": \"\",\n        \"path_redirect\": \"http://www.lyft.com/ping\",\n        \"path_rewrite\": \"\",\n        \"virtual_cluster_name\": \"\",\n        \"virtual_host_name\": \"\",\n        \"response_header_matches\": [\n          {\n            \"name\": \"content-type\",\n            \"exact_match\": \"text/plain\",\n            \"invert_match\": true\n          },\n          {\n            \"name\": \"content-length\",\n            \"range_match\": \n            {\n              \"start\": 100,\n              \"end\": 1000\n            }\n          },\n          {\n            \"name\": \"x-ping-response\",\n            \"exact_match\": \"pong\"\n          },\n          {\n            \"name\": \"x-ping-response\",\n            \"present_match\": true,\n            \"invert_match\": true\n          },\n          {\n            \"name\": \"x-pong-response\",\n            \"present_match\": true,\n            \"invert_match\": false\n          }\n        ]\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/Weighted.golden.proto.json",
    "content": "{\n  \"tests\": [\n    {\n      \"test_name\": \"Test_1\",\n      \"input\": {\n        \"authority\": \"www1.lyft.com\",\n        \"path\": \"/foo\",\n        \"ssl\": true,\n        \"internal\": true,\n        \"method\": \"GET\"\n      },\n      \"validate\": {\"path_redirect\": \"\"}\n    },\n    {\n      \"test_name\": \"Test_2\",\n      \"input\": {\n        \"authority\": \"www1.lyft.com\",\n        \"path\": \"/test/123\",\n        \"method\": \"GET\",\n        \"random_value\": 115\n      },\n      \"validate\": {\"cluster_name\": \"cluster1\", \"virtual_cluster_name\": \"test_virtual_cluster\"}\n    },\n    {\n      \"test_name\": \"Test_3\",\n      \"input\": {\n        \"authority\": \"www1.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"random_value\": 445\n      },\n      \"validate\": {\"cluster_name\": \"cluster2\"}\n    },\n    {\n      \"test_name\": \"Test_4\",\n      \"input\": {\n        \"authority\": \"www1.lyft.com\",\n        \"path\": \"/foo\",\n        \"method\": \"GET\",\n        \"random_value\": 560\n      },\n      \"validate\": {\"cluster_name\": \"cluster3\"}\n    }\n  ]\n}\n"
  },
  {
    "path": "test/tools/router_check/test/config/Weighted.golden.proto.pb_text",
    "content": "tests {\n  test_name: \"Test_1\"\n  input: {\n    authority: \"www1.lyft.com\"\n    path: \"/foo\"\n    ssl: true\n    internal: true\n    method: \"GET\"\n  }\n  validate: {\n    path_redirect: { value: \"\" }\n  }\n}\n\ntests {\n  test_name: \"Test_2\"\n  input: {\n    authority: \"www1.lyft.com\"\n    path: \"/test/123\"\n    method: \"GET\"\n    random_value: 115\n  }\n  validate: {\n    cluster_name: { value: \"cluster1\"}\n    virtual_cluster_name: { value: \"test_virtual_cluster\" }\n  }\n}"
  },
  {
    "path": "test/tools/router_check/test/config/Weighted.golden.proto.yaml",
    "content": "tests:\n- test_name: Test_1\n  input:\n    authority: www1.lyft.com\n    path: \"/foo\"\n    ssl: true\n    method: GET\n    internal: true\n  validate:\n    path_redirect: ''\n- test_name: Test_2\n  input:\n    authority: www1.lyft.com\n    path: \"/test/123\"\n    method: GET\n    random_value: 115\n  validate:\n    cluster_name: cluster1\n    virtual_cluster_name: test_virtual_cluster\n- test_name: Test_3\n  input:\n    authority: www1.lyft.com\n    path: \"/foo\"\n    method: GET\n    random_value: 445\n  validate:\n    cluster_name: cluster2\n- test_name: Test_4\n  input:\n    authority: www1.lyft.com\n    path: \"/foo\"\n    method: GET\n    random_value: 560\n  validate:\n    cluster_name: cluster3\n"
  },
  {
    "path": "test/tools/router_check/test/config/Weighted.yaml",
    "content": "virtual_hosts:\n- name: www1\n  domains:\n  - www1.lyft.com\n  routes:\n    - match:\n        prefix: /\n      route:\n        weighted_clusters:\n          clusters:\n            - name: cluster1\n              weight: 30\n            - name: cluster2\n              weight: 30\n            - name: cluster3\n              weight: 40\n  virtual_clusters:\n    - headers:\n      - name: :path\n        safe_regex_match:\n          google_re2: {}\n          regex: ^/test/\\d+$\n      - name: :method\n        exact_match: GET\n      name: test_virtual_cluster\n- name: www2\n  domains:\n  - www2.lyft.com\n  routes:\n    - match:\n        prefix: /\n      route:\n        weighted_clusters:\n          runtime_key_prefix: www2_weights\n          clusters:\n            - name: cluster1\n              weight: 30\n            - name: cluster2\n              weight: 30\n            - name: cluster3\n              weight: 40\n"
  },
  {
    "path": "test/tools/router_check/test/route_tests.sh",
    "content": "#!/bin/bash\n\nset -e\n\n# Router_check_tool binary path\nPATH_BIN=\"${TEST_SRCDIR}/envoy\"/test/tools/router_check/router_check_tool\n\n# Config json path\nPATH_CONFIG=\"${TEST_SRCDIR}/envoy\"/test/tools/router_check/test/config\n\nTESTS=(\"ContentType\" \"ClusterHeader\" \"DirectResponse\" \"HeaderMatchedRouting\" \"Redirect\" \"Redirect2\" \"Redirect3\" \"Runtime\" \"TestRoutes\" \"Weighted\")\n\n# Testing expected matches\nfor t in \"${TESTS[@]}\"\ndo\n  \"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/${t}.yaml\" \"-t\" \"${PATH_CONFIG}/${t}.golden.proto.json\" \"--details\"\ndone\n\n# Testing coverage flag passes\nCOVERAGE_CMD=\"${PATH_BIN} -c ${PATH_CONFIG}/Redirect.yaml -t ${PATH_CONFIG}/Redirect.golden.proto.json --details -f \"\nCOVERAGE_OUTPUT=$($COVERAGE_CMD \"1.0\" 2>&1) || echo \"${COVERAGE_OUTPUT:-no-output}\"\nif [[ \"${COVERAGE_OUTPUT}\" != *\"Current route coverage: \"* ]] ; then\n  exit 1\nfi\n\nCOMP_COVERAGE_CMD=\"${PATH_BIN} -c ${PATH_CONFIG}/ComprehensiveRoutes.yaml -t ${PATH_CONFIG}/ComprehensiveRoutes.golden.proto.json --details -f \"\nCOVERAGE_OUTPUT=$($COMP_COVERAGE_CMD \"100\" \"--covall\" 2>&1) || echo \"${COVERAGE_OUTPUT:-no-output}\"\nif [[ \"${COVERAGE_OUTPUT}\" != *\"Current route coverage: 100%\"* ]] ; then\n  exit 1\nfi\n\nDIRECT_RESPONSE_COVERAGE_CMD=\"${PATH_BIN} -c ${PATH_CONFIG}/DirectResponse.yaml -t ${PATH_CONFIG}/DirectResponse.golden.proto.json --details -f \"\nCOVERAGE_OUTPUT=$($DIRECT_RESPONSE_COVERAGE_CMD \"100\" \"--covall\" 2>&1) || echo \"${DIRECT_RESPONSE_COVERAGE_CMD:-no-output}\"\nif [[ \"${COVERAGE_OUTPUT}\" != *\"Current route coverage: 100%\"* ]] ; then\n  exit 1\nfi\n\nRUNTIME_COVERAGE_OUTPUT=$(\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/Runtime.yaml\" \"-t\" \"${PATH_CONFIG}/Runtime.golden.proto.json\" \"--details\" \"--covall\" 2>&1) ||\n  echo \"${RUNTIME_COVERAGE_OUTPUT:-no-output}\"\nif [[ \"${RUNTIME_COVERAGE_OUTPUT}\" != *\"Current route coverage: 100%\"* ]] ; then\n  exit 1\nfi\n\n# Testing coverage flag fails\nCOVERAGE_OUTPUT=$($COVERAGE_CMD \"100\" 2>&1) || echo \"${COVERAGE_OUTPUT:-no-output}\"\nif [[ \"${COVERAGE_OUTPUT}\" != *\"Failed to meet coverage requirement: 100%\"* ]] ; then\n  exit 1\nfi\n\n# Test the yaml test file support\n\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/Weighted.yaml\" \"-t\" \"${PATH_CONFIG}/Weighted.golden.proto.yaml\" \"--details\"\n\n# Test the proto text test file support\n\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/Weighted.yaml\" \"-t\" \"${PATH_CONFIG}/Weighted.golden.proto.pb_text\" \"--details\"\n\n# Bad config file\necho \"testing bad config output\"\nBAD_CONFIG_OUTPUT=$(\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/Redirect.golden.proto.json\" \"-t\" \"${PATH_CONFIG}/TestRoutes.yaml\" 2>&1) ||\n  echo \"${BAD_CONFIG_OUTPUT:-no-output}\"\nif [[ \"${BAD_CONFIG_OUTPUT}\" != *\"Protobuf message (type envoy.config.route.v3.RouteConfiguration reason INVALID_ARGUMENT:tests: Cannot find field.) has unknown fields\"* ]]; then\n  exit 1\nfi\n\n# Failure output flag test cases\necho \"testing failure test cases\"\n# Failure test case with only details flag set\nFAILURE_OUTPUT=$(\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/TestRoutes.yaml\" \"-t\" \"${PATH_CONFIG}/Weighted.golden.proto.json\" \"--details\" 2>&1) ||\n  echo \"${FAILURE_OUTPUT:-no-output}\"\nif [[ \"${FAILURE_OUTPUT}\" != *\"Test_1\"*\"Test_2\"*\"expected: [cluster1], actual: [instant-server], test type: cluster_name\"*\"expected: [test_virtual_cluster], actual: [other], test type: virtual_cluster_name\"*\"Test_3\"* ]]; then\n  exit 1\nfi\n\n# Failure test case with details flag set and failures flag set\nFAILURE_OUTPUT=$(\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/TestRoutes.yaml\" \"-t\" \"${PATH_CONFIG}/Weighted.golden.proto.json\" \"--details\"  \"--only-show-failures\" 2>&1) ||\n  echo \"${FAILURE_OUTPUT:-no-output}\"\nif [[ \"${FAILURE_OUTPUT}\" != *\"Test_2\"*\"expected: [cluster1], actual: [instant-server], test type: cluster_name\"* ]] || [[ \"${FAILURE_OUTPUT}\" == *\"Test_1\"* ]]; then\n  exit 1\nfi\n\n# Failure test case with details flag unset and failures flag set\nFAILURE_OUTPUT=$(\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/TestRoutes.yaml\" \"-t\" \"${PATH_CONFIG}/Weighted.golden.proto.json\" \"--only-show-failures\" 2>&1) ||\n  echo \"${FAILURE_OUTPUT:-no-output}\"\nif [[ \"${FAILURE_OUTPUT}\" != *\"Test_2\"*\"expected: [cluster1], actual: [instant-server], test type: cluster_name\"* ]] || [[ \"${FAILURE_OUTPUT}\" == *\"Test_1\"* ]]; then\n  exit 1\nfi\n\n# Failure test case to examine error strings\necho \"testing error strings\"\nFAILURE_OUTPUT=$(\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/TestRoutes.yaml\" \"-t\" \"${PATH_CONFIG}/TestRoutesFailures.golden.proto.json\" \"--only-show-failures\" 2>&1) ||\n  echo \"${FAILURE_OUTPUT:-no-output}\"\nif ! echo \"${FAILURE_OUTPUT}\" | grep -Fxq \"expected: [content-type: text/plain], actual: NOT [content-type: text/plain], test type: response_header_matches.exact_match\"; then\n  exit 1\nfi\nif ! echo \"${FAILURE_OUTPUT}\" | grep -Fxq \"actual: [content-length: 25], test type: response_header_matches.range_match\"; then\n  exit 1\nfi\nif ! echo \"${FAILURE_OUTPUT}\" | grep -Fxq \"expected: [x-ping-response: pong], actual: [x-ping-response: yes], test type: response_header_matches.exact_match\"; then\n  exit 1\nfi\nif ! echo \"${FAILURE_OUTPUT}\" | grep -Fxq \"expected: [has(x-ping-response):false], actual: [has(x-ping-response):true], test type: response_header_matches.present_match\"; then\n  exit 1\nfi\nif ! echo \"${FAILURE_OUTPUT}\" | grep -Fxq \"expected: [has(x-pong-response):true], actual: [has(x-pong-response):false], test type: response_header_matches.present_match\"; then\n  exit 1\nfi\n\n# Missing test results\necho \"testing missing tests output test cases\"\nMISSING_OUTPUT=$(\"${PATH_BIN}\" \"-c\" \"${PATH_CONFIG}/TestRoutes.yaml\" \"-t\" \"${PATH_CONFIG}/TestRoutes.golden.proto.json\" \"--details\" \"--covall\" 2>&1) ||\n  echo \"${MISSING_OUTPUT:-no-output}\"\nif [[ \"${MISSING_OUTPUT}\" != *\"Missing test for host: www2_staging, route: prefix: \\\"/\\\"\"*\"Missing test for host: default, route: prefix: \\\"/api/application_data\\\"\"* ]]; then\n  exit 1\nfi\n"
  },
  {
    "path": "test/tools/router_check/validation.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.RouterCheckToolSchema;\n\nimport \"envoy/config/core/v3/base.proto\";\nimport \"envoy/config/route/v3/route_components.proto\";\nimport \"google/protobuf/wrappers.proto\";\nimport \"validate/validate.proto\";\n\n// [#protodoc-title: RouterCheckTool Validation]\n\n// The Validation Schema of the envoy router check test files.\n// The accepted input formats for the test are json and yaml.\n// The tool transparently converts json/yaml into this proto schema.\nmessage Validation {\n  // A collection of test cases.\n  repeated ValidationItem tests = 1 [(validate.rules).repeated .min_items = 1];\n}\n\n// Schema for each test case.\nmessage ValidationItem {\n  // Name of the test case. There is no uniqueness constraint among the test case names.\n  // The name has to be non empty.\n  string test_name = 1 [(validate.rules).string.min_len = 1];\n\n  // The input constraints of the test case.\n  ValidationInput input = 2 [(validate.rules).message.required = true];\n\n  // The validations that need to be performed on the resultant route.\n  ValidationAssert validate = 3 [(validate.rules).message.required = true];\n}\n\n// Input values sent to the router that determine the returned route.\n// This includes the `pseudo-header <https://http2.github.io/http2-spec/#HttpRequest>`_ fields\n// defined in HTTP2.\nmessage ValidationInput {\n  reserved 8;\n  // This pseudo-header field includes the authority portion of the target URI.\n  // Clients that generate HTTP/2 requests directly SHOULD use the :authority pseudo-header field\n  // instead of the Host header field.\n  string authority = 1 [(validate.rules).string.min_len = 1];\n\n  // The :path pseudo-header field includes the path and query parts of the target URI.\n  // This pseudo-header field MUST NOT be empty for http or https URIs.\n  // http or https URIs that do not contain a path component MUST include a value of '/'\n  // The exception to this rule is an OPTIONS request for an http or https URI that does not include\n  // a path component.\n  string path = 2 [(validate.rules).string.min_len = 1];\n\n  // This pseudo-header field includes the HTTP method.\n  string method = 4 [(validate.rules).string.min_len = 3];\n\n  // An integer used to identify the target for weighted cluster selection.\n  // The default value of random_value is 0.\n  uint64 random_value = 5;\n\n  // A flag that determines whether to set x-forwarded-proto to https or http.\n  // By setting x-forwarded-proto to a given protocol, the tool is able to simulate the behavior of\n  // a client issuing a request via http or https. By default ssl is false which corresponds to\n  // x-forwarded-proto set to http.\n  bool ssl = 6;\n\n  // A flag that determines whether to set x-envoy-internal to “true”.\n  // If not specified, or if internal is equal to false, x-envoy-internal is not set.\n  bool internal = 7;\n\n  // Additional request or response headers to be added as input for route determination.\n  // The “:authority”, “:path”, “:method”, “x-forwarded-proto”, and “x-envoy-internal” fields are\n  // specified by the other config options and should not be set here.\n  repeated envoy.config.core.v3.HeaderValue additional_request_headers = 10;\n  repeated envoy.config.core.v3.HeaderValue additional_response_headers = 11;\n\n  // Runtime setting key to enable for the test case.\n  // If a route depends on the runtime, the route will be enabled based on the random_value defined\n  // in the test. Only a random_value less than the fractional percentage will enable the route.\n  string runtime = 9;\n}\n\n// The validate object specifies the returned route parameters to match.\n// At least one test parameter must be specified.\n// Use “” (empty string) to indicate that no return value is expected.\n// For example, to test that no cluster match is expected use {“cluster_name”: “”}.\nmessage ValidationAssert {\n  reserved 7, 8;\n  // Match the cluster name.\n  google.protobuf.StringValue cluster_name = 1;\n\n  // Match the virtual cluster name.\n  google.protobuf.StringValue virtual_cluster_name = 2;\n\n  // Match the virtual host name.\n  google.protobuf.StringValue virtual_host_name = 3;\n\n  // Match the host header field after rewrite.\n  google.protobuf.StringValue host_rewrite = 4;\n\n  // Match the path header field after rewrite.\n  google.protobuf.StringValue path_rewrite = 5;\n\n  // Match the returned redirect path.\n  google.protobuf.StringValue path_redirect = 6;\n\n  // Match the listed request or response header fields. These fields are deprecated, use *_header_matches instead.\n  repeated envoy.config.core.v3.HeaderValue request_header_fields = 9 [deprecated = true];\n  repeated envoy.config.core.v3.HeaderValue response_header_fields = 10 [deprecated = true];\n\n  // Match the listed request or response headers.\n  // Example header fields include the “:path”, “cookie”, and “date” fields.\n  // The header fields are checked after all other test cases.\n  // Thus, the header fields checked will be those of the redirected or rewritten routes when\n  // applicable.\n  repeated envoy.config.route.v3.HeaderMatcher request_header_matches = 11;\n  repeated envoy.config.route.v3.HeaderMatcher response_header_matches = 12;\n}\n"
  },
  {
    "path": "test/tools/schema_validator/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_test_binary\",\n    \"envoy_cc_test_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test_binary(\n    name = \"schema_validator_tool\",\n    deps = [\":schema_validator_lib\"],\n)\n\nenvoy_cc_test_library(\n    name = \"schema_validator_lib\",\n    srcs = [\n        \"schema_validator.cc\",\n        \"validator.cc\",\n        \"validator.h\",\n    ],\n    # TCLAP command line parser needs this to support int64_t/uint64_t in several build environments.\n    copts = [\"-DHAVE_LONG_LONG\"],\n    external_deps = [\"tclap\"],\n    deps = [\n        \"//include/envoy/api:api_interface\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"//test/test_common:utility_lib\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/service/discovery/v3:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "test/tools/schema_validator/schema_validator.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <iostream>\n#include <string>\n\n#include \"envoy/common/exception.h\"\n\n#include \"test/tools/schema_validator/validator.h\"\n\nint main(int argc, char** argv) {\n  Envoy::Options options(argc, argv);\n  Envoy::Validator v;\n\n  try {\n    v.validate(options.configPath(), options.schemaType());\n  } catch (const Envoy::EnvoyException& ex) {\n    std::cerr << ex.what() << std::endl;\n    return EXIT_FAILURE;\n  }\n  return EXIT_SUCCESS;\n}\n"
  },
  {
    "path": "test/tools/schema_validator/validator.cc",
    "content": "#include \"test/tools/schema_validator/validator.h\"\n\n#include \"envoy/config/route/v3/route.pb.h\"\n#include \"envoy/config/route/v3/route.pb.validate.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.h\"\n#include \"envoy/service/discovery/v3/discovery.pb.validate.h\"\n\n#include \"common/protobuf/utility.h\"\n\n#include \"tclap/CmdLine.h\"\n\nnamespace Envoy {\n\nconst std::string Schema::DISCOVERY_RESPONSE = \"discovery_response\";\nconst std::string Schema::ROUTE = \"route\";\n\nconst std::string& Schema::toString(Type type) {\n  switch (type) {\n  case Type::DiscoveryResponse:\n    return DISCOVERY_RESPONSE;\n  case Type::Route:\n    return ROUTE;\n  }\n\n  NOT_REACHED_GCOVR_EXCL_LINE;\n}\n\nOptions::Options(int argc, char** argv) {\n  TCLAP::CmdLine cmd(\"schema_validator_tool\", ' ', \"none\", false);\n  TCLAP::ValueArg<std::string> config_path(\"c\", \"config-path\", \"Path to configuration file.\", true,\n                                           \"\", \"string\", cmd);\n  TCLAP::ValueArg<std::string> schema_type(\n      \"t\", \"schema-type\",\n      \"Type of schema to validate the configuration against. Supported schema is: 'route'.\", true,\n      \"\", \"string\", cmd);\n\n  try {\n    cmd.parse(argc, argv);\n  } catch (TCLAP::ArgException& e) {\n    std::cerr << \"error: \" << e.error() << std::endl;\n    exit(EXIT_FAILURE);\n  }\n\n  if (schema_type.getValue() == Schema::toString(Schema::Type::Route)) {\n    schema_type_ = Schema::Type::Route;\n  } else if (schema_type.getValue() == Schema::toString(Schema::Type::DiscoveryResponse)) {\n    schema_type_ = Schema::Type::DiscoveryResponse;\n  } else {\n    std::cerr << \"error: unknown schema type '\" << schema_type.getValue() << \"'\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n\n  config_path_ = config_path.getValue();\n}\n\nvoid Validator::validate(const std::string& config_path, Schema::Type schema_type) {\n\n  switch (schema_type) {\n  case Schema::Type::DiscoveryResponse: {\n    envoy::service::discovery::v3::DiscoveryResponse discovery_response_config;\n    TestUtility::loadFromFile(config_path, discovery_response_config, *api_);\n    TestUtility::validate(discovery_response_config);\n    break;\n  }\n  case Schema::Type::Route: {\n    envoy::config::route::v3::RouteConfiguration route_config;\n    TestUtility::loadFromFile(config_path, route_config, *api_);\n    TestUtility::validate(route_config);\n    break;\n  }\n  default:\n    NOT_REACHED_GCOVR_EXCL_LINE;\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/schema_validator/validator.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"envoy/api/api.h\"\n\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"test/test_common/utility.h\"\n\nnamespace Envoy {\n\n/**\n * Class for Schemas supported by validation tool.\n */\nclass Schema {\npublic:\n  /**\n   * List of supported schemas to validate.\n   */\n  enum Type { DiscoveryResponse, Route };\n\n  /**\n   * Get a string representation of the schema type.\n   * @param type to convert.\n   * @return string representation of type.\n   */\n  static const std::string& toString(Type type);\n\nprivate:\n  static const std::string DISCOVERY_RESPONSE;\n  static const std::string ROUTE;\n};\n\n/**\n * Parses command line arguments for Schema Validator Tool.\n */\nclass Options {\npublic:\n  Options(int argc, char** argv);\n\n  /**\n   * @return the schema type.\n   */\n  Schema::Type schemaType() const { return schema_type_; }\n\n  /**\n   * @return the path to configuration file.\n   */\n  const std::string& configPath() const { return config_path_; }\n\nprivate:\n  Schema::Type schema_type_;\n  std::string config_path_;\n};\n\n/**\n * Validates the schema of a configuration.\n */\nclass Validator {\npublic:\n  Validator() : api_(Api::createApiForTest(stats_)) {}\n  /**\n   * Validates the configuration at config_path against schema_type.\n   * An EnvoyException is thrown in several cases:\n   *  - Cannot load the configuration from config_path(invalid path or malformed data).\n   *  - A schema error from validating the configuration.\n   * @param config_path specifies the path to the configuration file.\n   * @param schema_type specifies the schema to validate the configuration against.\n   */\n  void validate(const std::string& config_path, Schema::Type schema_type);\n\nprivate:\n  Stats::IsolatedStoreImpl stats_;\n  Api::ApiPtr api_;\n};\n\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/type_whisperer/BUILD",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_cc_test\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_test(\n    name = \"api_type_db_test\",\n    srcs = [\"api_type_db_test.cc\"],\n    # MSVC does not allow strings over a certain length, see error C2026\n    tags = [\"skip_on_windows\"],\n    deps = [\"//tools/type_whisperer:api_type_db_lib\"],\n)\n"
  },
  {
    "path": "test/tools/type_whisperer/api_type_db_test.cc",
    "content": "#include \"gtest/gtest.h\"\n#include \"tools/type_whisperer/api_type_db.h\"\n\nnamespace Envoy {\nnamespace Tools {\nnamespace TypeWhisperer {\nnamespace {\n\n// Validate that ApiTypeDb::getLatestTypeInformation returns nullopt when no\n// type information exists.\nTEST(ApiTypeDb, GetLatestTypeInformationForTypeUnknown) {\n  const auto unknown_type_information = ApiTypeDb::getLatestTypeInformation(\"foo\");\n  EXPECT_EQ(absl::nullopt, unknown_type_information);\n}\n\n// Validate that ApiTypeDb::getLatestTypeInformation fetches the latest type\n// information when an upgrade occurs.\nTEST(ApiTypeDb, GetLatestTypeInformationForTypeKnownUpgraded) {\n  const auto known_type_information = ApiTypeDb::getLatestTypeInformation(\"envoy.type.Int64Range\");\n  EXPECT_EQ(\"envoy.type.v3.Int64Range\", known_type_information->type_name_);\n  EXPECT_EQ(\"envoy/type/v3/range.proto\", known_type_information->proto_path_);\n}\n\n// Validate that ApiTypeDb::getLatestTypeInformation is idempotent when no\n// upgrade occurs.\nTEST(ApiTypeDb, GetLatestTypeInformationForTypeKnownNoUpgrade) {\n  const auto known_type_information =\n      ApiTypeDb::getLatestTypeInformation(\"envoy.type.v3.Int64Range\");\n  EXPECT_EQ(\"envoy.type.v3.Int64Range\", known_type_information->type_name_);\n  EXPECT_EQ(\"envoy/type/v3/range.proto\", known_type_information->proto_path_);\n}\n\n} // namespace\n} // namespace TypeWhisperer\n} // namespace Tools\n} // namespace Envoy\n"
  },
  {
    "path": "test/tools/wee8_compile/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_binary\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_binary(\n    name = \"wee8_compile_tool\",\n    deps = [\":wee8_compile_lib\"],\n)\n\nenvoy_cc_library(\n    name = \"wee8_compile_lib\",\n    srcs = [\"wee8_compile.cc\"],\n    external_deps = [\"wee8\"],\n)\n"
  },
  {
    "path": "test/tools/wee8_compile/wee8_compile.cc",
    "content": "// NOLINT(namespace-envoy)\n\n#include <unistd.h>\n\n#include <fstream>\n#include <iostream>\n#include <sstream>\n#include <vector>\n\n#include \"v8-version.h\"\n#include \"wasm-api/wasm.hh\"\n\nuint32_t parseVarint(const byte_t*& pos, const byte_t* end) {\n  uint32_t n = 0;\n  uint32_t shift = 0;\n  byte_t b;\n\n  do {\n    if (pos + 1 > end) {\n      return static_cast<uint32_t>(-1);\n    }\n    b = *pos++;\n    n += (b & 0x7f) << shift;\n    shift += 7;\n  } while ((b & 0x80) != 0);\n\n  return n;\n}\n\nwasm::vec<byte_t> getVarint(uint32_t value) {\n  byte_t bytes[5];\n  int pos = 0;\n\n  while (pos < 5) {\n    if ((value & ~0x7F) == 0) {\n      bytes[pos++] = static_cast<uint8_t>(value);\n      break;\n    }\n\n    bytes[pos++] = static_cast<uint8_t>(value & 0x7F) | 0x80;\n    value >>= 7;\n  }\n\n  auto vec = wasm::vec<byte_t>::make_uninitialized(pos);\n  ::memcpy(vec.get(), bytes, pos);\n\n  return vec;\n}\n\nwasm::vec<byte_t> readWasmModule(const char* path, const std::string& name) {\n  // Open binary file.\n  auto file = std::ifstream(path, std::ios::binary);\n  file.seekg(0, std::ios_base::end);\n  const auto size = file.tellg();\n  file.seekg(0);\n  auto content = wasm::vec<byte_t>::make_uninitialized(size);\n  file.read(content.get(), size);\n  file.close();\n\n  if (file.fail()) {\n    std::cerr << \"ERROR: Failed to read the input file from: \" << path << std::endl;\n    return wasm::vec<byte_t>::invalid();\n  }\n\n  // Wasm header is 8 bytes (magic number + version).\n  const uint8_t magic_number[4] = {0x00, 0x61, 0x73, 0x6d};\n  if (size < 8 || ::memcmp(content.get(), magic_number, 4) != 0) {\n    std::cerr << \"ERROR: Failed to parse corrupted Wasm module from: \" << path << std::endl;\n    return wasm::vec<byte_t>::invalid();\n  }\n\n  // Parse custom sections to see if precompiled module already exists.\n  const byte_t* pos = content.get() + 8 /* Wasm header */;\n  const byte_t* end = content.get() + content.size();\n  while (pos < end) {\n    if (pos + 1 > end) {\n      std::cerr << \"ERROR: Failed to parse corrupted Wasm module from: \" << path << std::endl;\n      return wasm::vec<byte_t>::invalid();\n    }\n    const auto section_type = *pos++;\n    const auto section_len = parseVarint(pos, end);\n    if (section_len == static_cast<uint32_t>(-1) || pos + section_len > end) {\n      std::cerr << \"ERROR: Failed to parse corrupted Wasm module from: \" << path << std::endl;\n      return wasm::vec<byte_t>::invalid();\n    }\n    if (section_type == 0 /* custom section */) {\n      const auto section_data_start = pos;\n      const auto section_name_len = parseVarint(pos, end);\n      if (section_name_len == static_cast<uint32_t>(-1) || pos + section_name_len > end) {\n        std::cerr << \"ERROR: Failed to parse corrupted Wasm module from: \" << path << std::endl;\n        return wasm::vec<byte_t>::invalid();\n      }\n      if (section_name_len == name.size() && ::memcmp(pos, name.data(), section_name_len) == 0) {\n        std::cerr << \"ERROR: Wasm module: \" << path << \" already contains precompiled module.\"\n                  << std::endl;\n        return wasm::vec<byte_t>::invalid();\n      }\n      pos = section_data_start + section_len;\n    } else {\n      pos += section_len;\n    }\n  }\n\n  return content;\n}\n\nwasm::vec<byte_t> stripWasmModule(const wasm::vec<byte_t>& module) {\n  std::vector<byte_t> stripped;\n\n  const byte_t* pos = module.get();\n  const byte_t* end = module.get() + module.size();\n\n  // Copy Wasm header.\n  stripped.insert(stripped.end(), pos, pos + 8);\n  pos += 8;\n\n  while (pos < end) {\n    const auto section_start = pos;\n    if (pos + 1 > end) {\n      std::cerr << \"ERROR: Failed to parse corrupted Wasm module.\" << std::endl;\n      return wasm::vec<byte_t>::invalid();\n    }\n    const auto section_type = *pos++;\n    const auto section_len = parseVarint(pos, end);\n    if (section_len == static_cast<uint32_t>(-1) || pos + section_len > end) {\n      std::cerr << \"ERROR: Failed to parse corrupted Wasm module.\" << std::endl;\n      return wasm::vec<byte_t>::invalid();\n    }\n    if (section_type != 0 /* custom section */) {\n      stripped.insert(stripped.end(), section_start, pos + section_len);\n    }\n    pos += section_len;\n  }\n\n  return wasm::vec<byte_t>::make(stripped.size(), stripped.data());\n}\n\nwasm::vec<byte_t> serializeWasmModule(const char* path, const wasm::vec<byte_t>& content) {\n  const auto engine = wasm::Engine::make();\n  if (engine == nullptr) {\n    std::cerr << \"ERROR: Failed to start V8.\" << std::endl;\n    return wasm::vec<byte_t>::invalid();\n  }\n\n  const auto store = wasm::Store::make(engine.get());\n  if (store == nullptr) {\n    std::cerr << \"ERROR: Failed to create V8 isolate.\" << std::endl;\n    return wasm::vec<byte_t>::invalid();\n  }\n\n  const auto module = wasm::Module::make(store.get(), content);\n  if (module == nullptr) {\n    std::cerr << \"ERROR: Failed to instantiate WebAssembly module from: \" << path << std::endl;\n    return wasm::vec<byte_t>::invalid();\n  }\n\n  // TODO(PiotrSikora): figure out how to hook the completion callback.\n  sleep(3);\n\n  return module->serialize();\n}\n\nbool writeWasmModule(const char* path, const wasm::vec<byte_t>& module, size_t stripped_module_size,\n                     const std::string& section_name, const wasm::vec<byte_t>& serialized) {\n  auto file = std::fstream(path, std::ios::out | std::ios::binary);\n  file.write(module.get(), module.size());\n  const char section_type = '\\0'; // custom section\n  file.write(&section_type, 1);\n  const auto section_name_len = getVarint(section_name.size());\n  const auto section_size =\n      getVarint(section_name_len.size() + section_name.size() + serialized.size());\n  file.write(section_size.get(), section_size.size());\n  file.write(section_name_len.get(), section_name_len.size());\n  file.write(section_name.data(), section_name.size());\n  file.write(serialized.get(), serialized.size());\n  file.close();\n\n  if (file.fail()) {\n    std::cerr << \"ERROR: Failed to write the output file to: \" << path << std::endl;\n    return false;\n  }\n\n  const auto total_size = module.size() + 1 + section_size.size() + section_name_len.size() +\n                          section_name.size() + serialized.size();\n  std::cout << \"Written \" << total_size << \" bytes (bytecode: \" << stripped_module_size << \" bytes,\"\n            << \" precompiled: \" << serialized.size() << \" bytes).\" << std::endl;\n  return true;\n}\n\n#if defined(__linux__) && defined(__x86_64__)\n#define WEE8_PLATFORM \"linux_x86_64\"\n#else\n#define WEE8_PLATFORM \"\"\n#endif\n\nint main(int argc, char* argv[]) {\n  if (sizeof(WEE8_PLATFORM) - 1 == 0) {\n    std::cerr << \"Unsupported platform.\" << std::endl;\n    return EXIT_FAILURE;\n  }\n\n  if (argc != 3) {\n    std::cerr << \"Usage: \" << argv[0] << \" <input> <output>\" << std::endl;\n    return EXIT_FAILURE;\n  }\n\n  const std::string section_name = \"precompiled_wee8_v\" + std::to_string(V8_MAJOR_VERSION) + \".\" +\n                                   std::to_string(V8_MINOR_VERSION) + \".\" +\n                                   std::to_string(V8_BUILD_NUMBER) + \".\" +\n                                   std::to_string(V8_PATCH_LEVEL) + \"_\" + WEE8_PLATFORM;\n\n  const auto module = readWasmModule(argv[1], section_name);\n  if (!module) {\n    return EXIT_FAILURE;\n  }\n\n  const auto stripped_module = stripWasmModule(module);\n  if (!stripped_module) {\n    return EXIT_FAILURE;\n  }\n\n  const auto serialized = serializeWasmModule(argv[1], stripped_module);\n  if (!serialized) {\n    return EXIT_FAILURE;\n  }\n\n  if (!writeWasmModule(argv[2], module, stripped_module.size(), section_name, serialized)) {\n    return EXIT_FAILURE;\n  }\n\n  return EXIT_SUCCESS;\n}\n"
  },
  {
    "path": "third_party/statusor/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nload(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_cc_test\",\n    \"envoy_package\",\n)\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"statusor_lib\",\n    srcs = [\n        \"statusor.cc\",\n    ],\n    hdrs = [\n        \"statusor.h\",\n        \"statusor_internals.h\",\n    ],\n    external_deps = [\n        \"abseil_status\",\n    ],\n    deps = [\"//source/common/common:assert_lib\"],\n)\n\nenvoy_cc_test(\n    name = \"statusor_test\",\n    srcs = [\"statusor_test.cc\"],\n    deps = [\n        \":statusor_lib\",\n    ],\n)\n"
  },
  {
    "path": "third_party/statusor/statusor.cc",
    "content": "/**\n * IMPORTANT: this file is a fork of the soon to be open-source absl::StatusOr class.\n * When the absl::StatusOr lands this file will be removed.\n */\n\n/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"third_party/statusor/statusor.h\"\n\n#include <ostream>\n\n#include \"common/common/assert.h\"\n\nnamespace absl {\n\nnamespace internal_statusor {\n\nvoid Helper::HandleInvalidStatusCtorArg(absl::Status* status) {\n  const char* kMessage = \"An OK status is not a valid constructor argument to StatusOr<T>\";\n  ASSERT(false, kMessage);\n  // In optimized builds, we will fall back to ::util::error::INTERNAL.\n  *status = absl::Status(absl::StatusCode::kInternal, kMessage);\n}\n\nvoid Helper::Crash(const absl::Status&) { abort(); }\n\n} // namespace internal_statusor\n\n} // namespace Envoy\n"
  },
  {
    "path": "third_party/statusor/statusor.h",
    "content": "/**\n * IMPORTANT: this file is a fork of the soon to be open-source absl::StatusOr class.\n * When the absl::StatusOr lands this file will be removed.\n */\n\n/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// StatusOr<T> is the union of a Status object and a T\n// object. StatusOr models the concept of an object that is either a\n// usable value, or an error Status explaining why such a value is\n// not present. To this end, StatusOr<T> does not allow its Status\n// value to be OkStatus().\n//\n// The primary use-case for StatusOr<T> is as the return value of a\n// function which may fail.\n//\n// Example usage of a StatusOr<T>:\n//\n//  StatusOr<Foo> result = DoBigCalculationThatCouldFail();\n//  if (result) {\n//    result->DoSomethingCool();\n//  } else {\n//    GOOGLE_LOG(ERROR) << result.status();\n//  }\n//\n// Example that is guaranteed crash if the result holds no value:\n//\n//  StatusOr<Foo> result = DoBigCalculationThatCouldFail();\n//  const Foo& foo = result.value();\n//  foo.DoSomethingCool();\n//\n// Example usage of a StatusOr<std::unique_ptr<T>>:\n//\n//  StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);\n//  if (!result) {\n//    GOOGLE_LOG(ERROR) << result.status();\n//  } else if (*result == nullptr) {\n//    GOOGLE_LOG(ERROR) << \"Unexpected null pointer\";\n//  } else {\n//    (*result)->DoSomethingCool();\n//  }\n//\n// Example factory implementation returning StatusOr<T>:\n//\n//  StatusOr<Foo> FooFactory::MakeFoo(int arg) {\n//    if (arg <= 0) {\n//      return ::cel_base::Status(::cel_base::INVALID_ARGUMENT,\n//                                      \"Arg must be positive\");\n//    }\n//    return Foo(arg);\n//  }\n//\n\n#include <new>\n#include <string>\n#include <type_traits>\n#include <utility>\n\n#include \"third_party/statusor/statusor_internals.h\"\n\n#include \"absl/base/attributes.h\"\n#include \"absl/base/macros.h\"\n\nnamespace absl {\n\n// Returned StatusOr objects may not be ignored.\ntemplate <typename T> class ABSL_MUST_USE_RESULT StatusOr;\n\ntemplate <typename T>\nclass StatusOr : private internal_statusor::StatusOrData<T>,\n                 private internal_statusor::TraitsBase<std::is_copy_constructible<T>::value,\n                                                       std::is_move_constructible<T>::value> {\n  template <typename U> friend class StatusOr;\n\n  typedef internal_statusor::StatusOrData<T> Base;\n\npublic:\n  using element_type = T;\n\n  // Constructs a new StatusOr with Status::UNKNOWN status. This is marked\n  // 'explicit' to try to catch cases like 'return {};', where people think\n  // StatusOr<std::vector<int>> will be initialized with an empty vector,\n  // instead of a Status::UNKNOWN status.\n  explicit StatusOr();\n\n  // StatusOr<T> will be copy constructible/assignable if T is copy\n  // constructible.\n  StatusOr(const StatusOr&) = default;\n  StatusOr& operator=(const StatusOr&) = default;\n\n  // StatusOr<T> will be move constructible/assignable if T is move\n  // constructible.\n  StatusOr(StatusOr&&) = default;\n  StatusOr& operator=(StatusOr&&) = default;\n\n  // Conversion copy/move constructor, T must be convertible from U.\n  // These should not participate in overload resolution if U\n  // is not convertible to T.\n  template <typename U> StatusOr(const StatusOr<U>& other);\n  template <typename U> StatusOr(StatusOr<U>&& other);\n\n  // Conversion copy/move assignment operator, T must be convertible from U.\n  template <typename U> StatusOr& operator=(const StatusOr<U>& other);\n  template <typename U> StatusOr& operator=(StatusOr<U>&& other);\n\n  // Constructs a new StatusOr with the given value. After calling this\n  // constructor, this->ok() will be true and the contained value may be\n  // retrieved with value(), operator*(), or operator->().\n  //\n  // NOTE: Not explicit - we want to use StatusOr<T> as a return type\n  // so it is convenient and sensible to be able to do 'return T()'\n  // when the return type is StatusOr<T>.\n  //\n  // REQUIRES: T is copy constructible.\n  StatusOr(const T& value);\n\n  // Constructs a new StatusOr with the given non-ok status. After calling this\n  // constructor, this->ok() will be false and calls to value() will\n  // CHECK-fail.\n  //\n  // NOTE: Not explicit - we want to use StatusOr<T> as a return\n  // value, so it is convenient and sensible to be able to do 'return\n  // Status()' when the return type is StatusOr<T>.\n  //\n  // REQUIRES: !status.ok(). This requirement is checked by ASSERT.\n  // In optimized builds, passing OkStatus() here will have the effect\n  // of passing INTERNAL as a fallback.\n  StatusOr(const absl::Status& status);\n  StatusOr& operator=(const absl::Status& status);\n\n  // Similar to the `const T&` overload.\n  //\n  // REQUIRES: T is move constructible.\n  StatusOr(T&& value);\n\n  // RValue versions of the operations declared above.\n  StatusOr(absl::Status&& status);\n  StatusOr& operator=(absl::Status&& status);\n\n  // Returns this->ok()\n  explicit operator bool() const { return ok(); }\n\n  // Returns this->status().ok()\n  ABSL_MUST_USE_RESULT bool ok() const { return this->status_.ok(); }\n\n  // Returns a reference to our status. If this contains a T, then\n  // returns OkStatus().\n  const absl::Status& status() const&;\n  absl::Status status() &&;\n\n  // Returns a reference to our current value, or ASSERT-fails if !this->ok(). If\n  // you have already checked the status using this->ok() or operator bool(),\n  // then you probably want to use operator*() or operator->() to access the\n  // current value instead of value().\n  //\n  // Note: for value types that are cheap to copy, prefer simple code:\n  //\n  //   T value = status_or.value();\n  //\n  // Otherwise, if the value type is expensive to copy, but can be left\n  // in the StatusOr, simply assign to a reference:\n  //\n  //   T& value = status_or.value();  // or `const T&`\n  //\n  // Otherwise, if the value type supports an efficient move, it can be\n  // used as follows:\n  //\n  //   T value = std::move(status_or).value();\n  //\n  // The std::move on status_or instead of on the whole expression enables\n  // warnings about possible uses of the status_or object after the move.\n\n  const T& value() const&;\n  T& value() &;\n  const T&& value() const&&;\n  T&& value() &&;\n\n  // Returns a reference to the current value.\n  //\n  // REQUIRES: this->ok() == true, otherwise the behavior is undefined.\n  //\n  // Use this->ok() or `operator bool()` to verify that there is a current\n  // value. Alternatively, see value() for a similar API that guarantees\n  // ASSERT-failing if there is no current value.\n  const T& operator*() const&;\n  T& operator*() &;\n  const T&& operator*() const&&;\n  T&& operator*() &&;\n\n  // Returns a pointer to the current value.\n  //\n  // REQUIRES: this->ok() == true, otherwise the behavior is undefined.\n  //\n  // Use this->ok() or `operator bool()` to verify that there is a current\n  // value.\n  const T* operator->() const;\n  T* operator->();\n\n  // Returns a copy of the current value if this->ok() == true. Otherwise\n  // returns a default value.\n  template <typename U> T value_or(U&& default_value) const&;\n  template <typename U> T value_or(U&& default_value) &&;\n\n  // Ignores any errors. This method does nothing except potentially suppress\n  // complaints from any tools that are checking that errors are not dropped on\n  // the floor.\n  void IgnoreError() const;\n};\n\n////////////////////////////////////////////////////////////////////////////////\n// Implementation details for StatusOr<T>\n\ntemplate <typename T>\nStatusOr<T>::StatusOr() : Base(absl::Status(absl::StatusCode::kUnknown, \"\")) {}\n\ntemplate <typename T> StatusOr<T>::StatusOr(const T& value) : Base(value) {}\n\ntemplate <typename T> StatusOr<T>::StatusOr(const absl::Status& status) : Base(status) {}\n\ntemplate <typename T> StatusOr<T>& StatusOr<T>::operator=(const absl::Status& status) {\n  this->Assign(status);\n  return *this;\n}\n\ntemplate <typename T> StatusOr<T>::StatusOr(T&& value) : Base(std::move(value)) {}\n\ntemplate <typename T> StatusOr<T>::StatusOr(absl::Status&& status) : Base(std::move(status)) {}\n\ntemplate <typename T> StatusOr<T>& StatusOr<T>::operator=(absl::Status&& status) {\n  this->Assign(std::move(status));\n  return *this;\n}\n\ntemplate <typename T>\ntemplate <typename U>\ninline StatusOr<T>::StatusOr(const StatusOr<U>& other)\n    : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}\n\ntemplate <typename T>\ntemplate <typename U>\ninline StatusOr<T>& StatusOr<T>::operator=(const StatusOr<U>& other) {\n  if (other.ok())\n    this->Assign(other.value());\n  else\n    this->Assign(other.status());\n  return *this;\n}\n\ntemplate <typename T>\ntemplate <typename U>\ninline StatusOr<T>::StatusOr(StatusOr<U>&& other)\n    : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}\n\ntemplate <typename T>\ntemplate <typename U>\ninline StatusOr<T>& StatusOr<T>::operator=(StatusOr<U>&& other) {\n  if (other.ok()) {\n    this->Assign(std::move(other).value());\n  } else {\n    this->Assign(std::move(other).status());\n  }\n  return *this;\n}\n\ntemplate <typename T> const absl::Status& StatusOr<T>::status() const& { return this->status_; }\ntemplate <typename T> absl::Status StatusOr<T>::status() && {\n  return ok() ? absl::OkStatus() : std::move(this->status_);\n}\n\ntemplate <typename T> const T& StatusOr<T>::value() const& {\n  this->EnsureOk();\n  return this->data_;\n}\n\ntemplate <typename T> T& StatusOr<T>::value() & {\n  this->EnsureOk();\n  return this->data_;\n}\n\ntemplate <typename T> const T&& StatusOr<T>::value() const&& {\n  this->EnsureOk();\n  return std::move(this->data_);\n}\n\ntemplate <typename T> T&& StatusOr<T>::value() && {\n  this->EnsureOk();\n  return std::move(this->data_);\n}\n\ntemplate <typename T> const T& StatusOr<T>::operator*() const& {\n  this->EnsureOk();\n  return this->data_;\n}\n\ntemplate <typename T> T& StatusOr<T>::operator*() & {\n  this->EnsureOk();\n  return this->data_;\n}\n\ntemplate <typename T> const T&& StatusOr<T>::operator*() const&& {\n  this->EnsureOk();\n  return std::move(this->data_);\n}\n\ntemplate <typename T> T&& StatusOr<T>::operator*() && {\n  this->EnsureOk();\n  return std::move(this->data_);\n}\n\ntemplate <typename T> const T* StatusOr<T>::operator->() const {\n  this->EnsureOk();\n  return &this->data_;\n}\n\ntemplate <typename T> T* StatusOr<T>::operator->() {\n  this->EnsureOk();\n  return &this->data_;\n}\n\ntemplate <typename T> template <typename U> T StatusOr<T>::value_or(U&& default_value) const& {\n  if (ok()) {\n    return this->data_;\n  }\n  return std::forward<U>(default_value);\n}\n\ntemplate <typename T> template <typename U> T StatusOr<T>::value_or(U&& default_value) && {\n  if (ok()) {\n    return std::move(this->data_);\n  }\n  return std::forward<U>(default_value);\n}\n\ntemplate <typename T> void StatusOr<T>::IgnoreError() const {\n  // no-op\n}\n\n} // namespace absl\n"
  },
  {
    "path": "third_party/statusor/statusor_internals.h",
    "content": "/**\n * IMPORTANT: this file is a fork of the soon to be open-source absl::StatusOr class.\n * When the absl::StatusOr lands this file will be removed.\n */\n\n/*\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <new>\n#include <type_traits>\n#include <utility>\n\n#include \"absl/base/attributes.h\"\n#include \"absl/meta/type_traits.h\"\n#include \"absl/status/status.h\"\n\nnamespace absl {\n\nnamespace internal_statusor {\n\nclass Helper {\npublic:\n  // Move type-agnostic error handling to the .cc.\n  static void HandleInvalidStatusCtorArg(absl::Status*);\n  ABSL_ATTRIBUTE_NORETURN static void Crash(const absl::Status& status);\n};\n\n// Construct an instance of T in `p` through placement new, passing Args... to\n// the constructor.\n// This abstraction is here mostly for the gcc performance fix.\ntemplate <typename T, typename... Args> void PlacementNew(void* p, Args&&... args) {\n#if defined(__GNUC__) && !defined(__clang__)\n  // Teach gcc that 'p' cannot be null, fixing code size issues.\n  if (p == nullptr)\n    __builtin_unreachable();\n#endif\n  new (p) T(std::forward<Args>(args)...);\n}\n\n// Helper base class to hold the data and all operations.\n// We move all this to a base class to allow mixing with the appropriate\n// TraitsBase specialization.\ntemplate <typename T> class StatusOrData {\n  template <typename U> friend class StatusOrData;\n\npublic:\n  StatusOrData() = delete;\n\n  StatusOrData(const StatusOrData& other) {\n    if (other.ok()) {\n      MakeValue(other.data_);\n      MakeStatus();\n    } else {\n      MakeStatus(other.status_);\n    }\n  }\n\n  StatusOrData(StatusOrData&& other) noexcept {\n    if (other.ok()) {\n      MakeValue(std::move(other.data_));\n      MakeStatus();\n    } else {\n      MakeStatus(std::move(other.status_));\n    }\n  }\n\n  template <typename U> StatusOrData(const StatusOrData<U>& other) {\n    if (other.ok()) {\n      MakeValue(other.data_);\n      MakeStatus();\n    } else {\n      MakeStatus(other.status_);\n    }\n  }\n\n  template <typename U> StatusOrData(StatusOrData<U>&& other) {\n    if (other.ok()) {\n      MakeValue(std::move(other.data_));\n      MakeStatus();\n    } else {\n      MakeStatus(std::move(other.status_));\n    }\n  }\n\n  explicit StatusOrData(const T& value) : data_(value) { MakeStatus(); }\n  explicit StatusOrData(T&& value) : data_(std::move(value)) { MakeStatus(); }\n\n  explicit StatusOrData(const absl::Status& status) : status_(status) { EnsureNotOk(); }\n  explicit StatusOrData(absl::Status&& status) : status_(std::move(status)) { EnsureNotOk(); }\n\n  StatusOrData& operator=(const StatusOrData& other) {\n    if (this == &other)\n      return *this;\n    if (other.ok())\n      Assign(other.data_);\n    else\n      Assign(other.status_);\n    return *this;\n  }\n\n  StatusOrData& operator=(StatusOrData&& other) {\n    if (this == &other)\n      return *this;\n    if (other.ok())\n      Assign(std::move(other.data_));\n    else\n      Assign(std::move(other.status_));\n    return *this;\n  }\n\n  ~StatusOrData() {\n    if (ok()) {\n      status_.~Status();\n      data_.~T();\n    } else {\n      status_.~Status();\n    }\n  }\n\n  void Assign(const T& value) {\n    if (ok()) {\n      data_.~T();\n      MakeValue(value);\n    } else {\n      MakeValue(value);\n      status_ = absl::OkStatus();\n    }\n  }\n\n  void Assign(T&& value) {\n    if (ok()) {\n      data_.~T();\n      MakeValue(std::move(value));\n    } else {\n      MakeValue(std::move(value));\n      status_ = absl::OkStatus();\n    }\n  }\n\n  void Assign(const absl::Status& status) {\n    Clear();\n    status_ = status;\n    EnsureNotOk();\n  }\n\n  void Assign(absl::Status&& status) {\n    Clear();\n    status_ = std::move(status);\n    EnsureNotOk();\n  }\n\n  bool ok() const { return status_.ok(); }\n\nprotected:\n  // status_ will always be active after the constructor.\n  // We make it a union to be able to initialize exactly how we need without\n  // waste.\n  // E/g. in the copy constructor we use the default constructor of\n  // Status in the ok() path to avoid an extra Ref call.\n  union {\n    absl::Status status_;\n  };\n\n  // data_ is active iff status_.ok()==true\n  struct Dummy {};\n  union {\n    // When T is const, we need some non-const object we can cast to void* for\n    // the placement new. dummy_ is that object.\n    Dummy dummy_;\n    T data_;\n  };\n\n  void Clear() {\n    if (ok())\n      data_.~T();\n  }\n\n  void EnsureOk() const {\n    if (!ok())\n      Helper::Crash(status_);\n  }\n\n  void EnsureNotOk() {\n    if (ok())\n      Helper::HandleInvalidStatusCtorArg(&status_);\n  }\n\n  // Construct the value (i.e. data_) through placement new with the passed\n  // argument.\n  template <typename Arg> void MakeValue(Arg&& arg) {\n    internal_statusor::PlacementNew<T>(&dummy_, std::forward<Arg>(arg));\n  }\n\n  // Construct the status (i.e. status_) through placement new with the passed\n  // argument.\n  template <typename... Args> void MakeStatus(Args&&... args) {\n    internal_statusor::PlacementNew<absl::Status>(&status_, std::forward<Args>(args)...);\n  }\n};\n\n// Helper base class to allow implicitly deleted constructors and assignment\n// operations in StatusOr.\n// TraitsBase will explicitly delete what it can't support and StatusOr will\n// inherit that behavior implicitly.\ntemplate <bool Copy, bool Move> struct TraitsBase {\n  TraitsBase() = default;\n  TraitsBase(const TraitsBase&) = default;\n  TraitsBase(TraitsBase&&) = default;\n  TraitsBase& operator=(const TraitsBase&) = default;\n  TraitsBase& operator=(TraitsBase&&) = default;\n};\n\ntemplate <> struct TraitsBase<false, true> {\n  TraitsBase() = default;\n  TraitsBase(const TraitsBase&) = delete;\n  TraitsBase(TraitsBase&&) = default;\n  TraitsBase& operator=(const TraitsBase&) = delete;\n  TraitsBase& operator=(TraitsBase&&) = default;\n};\n\ntemplate <> struct TraitsBase<false, false> {\n  TraitsBase() = default;\n  TraitsBase(const TraitsBase&) = delete;\n  TraitsBase(TraitsBase&&) = delete;\n  TraitsBase& operator=(const TraitsBase&) = delete;\n  TraitsBase& operator=(TraitsBase&&) = delete;\n};\n\n} // namespace internal_statusor\n\n} // namespace absl\n"
  },
  {
    "path": "third_party/statusor/statusor_test.cc",
    "content": "/**\n * IMPORTANT: this file is a fork of the soon to be open-source absl::StatusOr class.\n * When the absl::StatusOr lands this file will be removed.\n */\n\n/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n==============================================================================*/\n\n// Unit tests for StatusOr\n\n#include <memory>\n#include <type_traits>\n\n#include \"third_party/statusor/statusor.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace absl {\nnamespace {\n\n\nclass Base1 {\n public:\n  virtual ~Base1() {}\n  int pad_;\n};\n\nclass Base2 {\n public:\n  virtual ~Base2() {}\n  int yetotherpad_;\n};\n\nclass Derived : public Base1, public Base2 {\n public:\n  ~Derived() override {}\n  int evenmorepad_;\n};\n\nclass CopyNoAssign {\n public:\n  explicit CopyNoAssign(int value) : foo_(value) {}\n  CopyNoAssign(const CopyNoAssign& other) : foo_(other.foo_) {}\n  int foo_;\n\n private:\n  const CopyNoAssign& operator=(const CopyNoAssign&);\n};\n\nclass NoDefaultConstructor {\n public:\n  explicit NoDefaultConstructor(int foo);\n};\n\nstatic_assert(!std::is_default_constructible<NoDefaultConstructor>(),\n              \"Should not be default-constructible.\");\n\nStatusOr<std::unique_ptr<int>> ReturnUniquePtr() {\n  // Uses implicit constructor from T&&\n  return std::unique_ptr<int>(new int(0));\n}\n\nTEST(StatusOr, ElementType) {\n  static_assert(std::is_same<StatusOr<int>::element_type, int>(), \"\");\n  static_assert(std::is_same<StatusOr<char>::element_type, char>(), \"\");\n}\n\nTEST(StatusOr, NullPointerStatusOr) {\n  // As a very special case, null-plain-pointer StatusOr used to be an\n  // error. Test that it no longer is.\n  StatusOr<int*> null_status(nullptr);\n  EXPECT_TRUE(null_status.ok());\n  EXPECT_EQ(null_status.value(), nullptr);\n}\n\nTEST(StatusOr, TestNoDefaultConstructorInitialization) {\n  // Explicitly initialize it with an error code.\n  StatusOr<NoDefaultConstructor> statusor(absl::CancelledError(\"\"));\n  EXPECT_FALSE(statusor.ok());\n  EXPECT_EQ(statusor.status().code(), absl::StatusCode::kCancelled);\n\n  // Default construction of StatusOr initializes it with an UNKNOWN error code.\n  StatusOr<NoDefaultConstructor> statusor2;\n  EXPECT_FALSE(statusor2.ok());\n  EXPECT_EQ(statusor2.status().code(), absl::StatusCode::kUnknown);\n}\n\nTEST(StatusOr, TestMoveOnlyInitialization) {\n  StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());\n  ASSERT_TRUE(thing.ok());\n  EXPECT_EQ(0, *thing.value());\n  int* previous = thing.value().get();\n\n  thing = ReturnUniquePtr();\n  EXPECT_TRUE(thing.ok());\n  EXPECT_EQ(0, *thing.value());\n  EXPECT_NE(previous, thing.value().get());\n}\n\nTEST(StatusOr, TestMoveOnlyStatusCtr) {\n  StatusOr<std::unique_ptr<int>> thing(absl::CancelledError(\"\"));\n  ASSERT_FALSE(thing.ok());\n}\n\nTEST(StatusOr, TestMoveOnlyValueExtraction) {\n  StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());\n  ASSERT_TRUE(thing.ok());\n  std::unique_ptr<int> ptr = std::move(thing).value();\n  EXPECT_EQ(0, *ptr);\n\n  thing = std::move(ptr);\n  ptr = std::move(thing.value());\n  EXPECT_EQ(0, *ptr);\n}\n\nTEST(StatusOr, TestMoveOnlyConversion) {\n  StatusOr<std::unique_ptr<const int>> const_thing(ReturnUniquePtr());\n  EXPECT_TRUE(const_thing.ok());\n  EXPECT_EQ(0, *const_thing.value());\n\n  // Test rvalue converting assignment\n  const int* const_previous = const_thing.value().get();\n  const_thing = ReturnUniquePtr();\n  EXPECT_TRUE(const_thing.ok());\n  EXPECT_EQ(0, *const_thing.value());\n  EXPECT_NE(const_previous, const_thing.value().get());\n}\n\nTEST(StatusOr, TestMoveOnlyVector) {\n  // Sanity check that StatusOr<MoveOnly> works in vector.\n  std::vector<StatusOr<std::unique_ptr<int>>> vec;\n  vec.push_back(ReturnUniquePtr());\n  vec.resize(2);\n  auto another_vec = std::move(vec);\n  EXPECT_EQ(0, *another_vec[0].value());\n  EXPECT_EQ(absl::StatusCode::kUnknown, another_vec[1].status().code());\n}\n\nTEST(StatusOr, TestMoveWithValuesAndErrors) {\n  StatusOr<std::string> status_or(std::string(1000, '0'));\n  StatusOr<std::string> value1(std::string(1000, '1'));\n  StatusOr<std::string> value2(std::string(1000, '2'));\n  StatusOr<std::string> error1(Status(absl::StatusCode::kUnknown, \"error1\"));\n  StatusOr<std::string> error2(Status(absl::StatusCode::kUnknown, \"error2\"));\n\n  ASSERT_TRUE(status_or.ok());\n  EXPECT_EQ(std::string(1000, '0'), status_or.value());\n\n  // Overwrite the value in status_or with another value.\n  status_or = std::move(value1);\n  ASSERT_TRUE(status_or.ok());\n  EXPECT_EQ(std::string(1000, '1'), status_or.value());\n\n  // Overwrite the value in status_or with an error.\n  status_or = std::move(error1);\n  ASSERT_FALSE(status_or.ok());\n  EXPECT_EQ(\"error1\", status_or.status().message());\n\n  // Overwrite the error in status_or with another error.\n  status_or = std::move(error2);\n  ASSERT_FALSE(status_or.ok());\n  EXPECT_EQ(\"error2\", status_or.status().message());\n\n  // Overwrite the error with a value.\n  status_or = std::move(value2);\n  ASSERT_TRUE(status_or.ok());\n  EXPECT_EQ(std::string(1000, '2'), status_or.value());\n}\n\nTEST(StatusOr, TestCopyWithValuesAndErrors) {\n  StatusOr<std::string> status_or(std::string(1000, '0'));\n  StatusOr<std::string> value1(std::string(1000, '1'));\n  StatusOr<std::string> value2(std::string(1000, '2'));\n  StatusOr<std::string> error1(Status(absl::StatusCode::kUnknown, \"error1\"));\n  StatusOr<std::string> error2(Status(absl::StatusCode::kUnknown, \"error2\"));\n\n  ASSERT_TRUE(status_or.ok());\n  EXPECT_EQ(std::string(1000, '0'), status_or.value());\n\n  // Overwrite the value in status_or with another value.\n  status_or = value1;\n  ASSERT_TRUE(status_or.ok());\n  EXPECT_EQ(std::string(1000, '1'), status_or.value());\n\n  // Overwrite the value in status_or with an error.\n  status_or = error1;\n  ASSERT_FALSE(status_or.ok());\n  EXPECT_EQ(\"error1\", status_or.status().message());\n\n  // Overwrite the error in status_or with another error.\n  status_or = error2;\n  ASSERT_FALSE(status_or.ok());\n  EXPECT_EQ(\"error2\", status_or.status().message());\n\n  // Overwrite the error with a value.\n  status_or = value2;\n  ASSERT_TRUE(status_or.ok());\n  EXPECT_EQ(std::string(1000, '2'), status_or.value());\n\n  // Verify original values unchanged.\n  EXPECT_EQ(std::string(1000, '1'), value1.value());\n  EXPECT_EQ(\"error1\", error1.status().message());\n  EXPECT_EQ(\"error2\", error2.status().message());\n  EXPECT_EQ(std::string(1000, '2'), value2.value());\n}\n\nTEST(StatusOr, TestDefaultCtor) {\n  StatusOr<int> thing;\n  EXPECT_FALSE(thing.ok());\n  EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);\n}\n\nTEST(StatusOrDeathTest, TestDefaultCtorValue) {\n  StatusOr<int> thing;\n  EXPECT_DEATH(thing.value(), \"\");\n\n  const StatusOr<int> thing2;\n  EXPECT_DEATH(thing.value(), \"\");\n}\n\nTEST(StatusOr, TestStatusCtor) {\n  StatusOr<int> thing(Status(absl::StatusCode::kCancelled, \"\"));\n  EXPECT_FALSE(thing.ok());\n  EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled);\n}\n\nTEST(StatusOr, TestValueCtor) {\n  const int kI = 4;\n  const StatusOr<int> thing(kI);\n  EXPECT_TRUE(thing.ok());\n  EXPECT_EQ(kI, thing.value());\n}\n\nTEST(StatusOr, TestCopyCtorStatusOk) {\n  const int kI = 4;\n  const StatusOr<int> original(kI);\n  const StatusOr<int> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n  EXPECT_EQ(original.value(), copy.value());\n}\n\nTEST(StatusOr, TestCopyCtorStatusNotOk) {\n  StatusOr<int> original(Status(absl::StatusCode::kCancelled, \"\"));\n  StatusOr<int> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n}\n\nTEST(StatusOr, TestCopyCtorNonAssignable) {\n  const int kI = 4;\n  CopyNoAssign value(kI);\n  StatusOr<CopyNoAssign> original(value);\n  StatusOr<CopyNoAssign> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n  EXPECT_EQ(original.value().foo_, copy.value().foo_);\n}\n\nTEST(StatusOr, TestCopyCtorStatusOKConverting) {\n  const int kI = 4;\n  StatusOr<int> original(kI);\n  StatusOr<double> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n  EXPECT_DOUBLE_EQ(original.value(), copy.value());\n}\n\nTEST(StatusOr, TestCopyCtorStatusNotOkConverting) {\n  StatusOr<int> original(Status(absl::StatusCode::kCancelled, \"\"));\n  StatusOr<double> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n}\n\nTEST(StatusOr, TestAssignmentStatusOk) {\n  const int kI = 4;\n  StatusOr<int> source(kI);\n  StatusOr<int> target;\n  target = source;\n  EXPECT_EQ(target.status(), source.status());\n  EXPECT_EQ(source.value(), target.value());\n}\n\nTEST(StatusOr, TestAssignmentStatusNotOk) {\n  StatusOr<int> source(Status(absl::StatusCode::kCancelled, \"\"));\n  StatusOr<int> target;\n  target = source;\n  EXPECT_EQ(target.status(), source.status());\n}\n\nTEST(StatusOr, TestStatus) {\n  StatusOr<int> good(4);\n  EXPECT_TRUE(good.ok());\n  StatusOr<int> bad(Status(absl::StatusCode::kCancelled, \"\"));\n  EXPECT_FALSE(bad.ok());\n  EXPECT_EQ(bad.status(), Status(absl::StatusCode::kCancelled, \"\"));\n}\n\nTEST(StatusOr, TestValue) {\n  const int kI = 4;\n  StatusOr<int> thing(kI);\n  EXPECT_EQ(kI, thing.value());\n}\n\nTEST(StatusOr, TestValueConst) {\n  const int kI = 4;\n  const StatusOr<int> thing(kI);\n  EXPECT_EQ(kI, thing.value());\n}\n\nTEST(StatusOrDeathTest, TestValueNotOk) {\n  StatusOr<int> thing(Status(absl::StatusCode::kCancelled, \"cancelled\"));\n  EXPECT_DEATH(thing.value(), \"\");\n}\n\nTEST(StatusOrDeathTest, TestValueNotOkConst) {\n  const StatusOr<int> thing(Status(absl::StatusCode::kUnknown, \"\"));\n  EXPECT_DEATH(thing.value(), \"\");\n}\n\nTEST(StatusOr, TestPointerDefaultCtor) {\n  StatusOr<int*> thing;\n  EXPECT_FALSE(thing.ok());\n  EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);\n}\n\nTEST(StatusOrDeathTest, TestPointerDefaultCtorValue) {\n  StatusOr<int*> thing;\n  EXPECT_DEATH(thing.value(), \"\");\n}\n\nTEST(StatusOr, TestPointerStatusCtor) {\n  StatusOr<int*> thing(Status(absl::StatusCode::kCancelled, \"\"));\n  EXPECT_FALSE(thing.ok());\n  EXPECT_EQ(thing.status(), Status(absl::StatusCode::kCancelled, \"\"));\n}\n\nTEST(StatusOr, TestPointerValueCtor) {\n  const int kI = 4;\n  StatusOr<const int*> thing(&kI);\n  EXPECT_TRUE(thing.ok());\n  EXPECT_EQ(&kI, thing.value());\n}\n\nTEST(StatusOr, TestPointerCopyCtorStatusOk) {\n  const int kI = 0;\n  StatusOr<const int*> original(&kI);\n  StatusOr<const int*> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n  EXPECT_EQ(original.value(), copy.value());\n}\n\nTEST(StatusOr, TestPointerCopyCtorStatusNotOk) {\n  StatusOr<int*> original(Status(absl::StatusCode::kCancelled, \"\"));\n  StatusOr<int*> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n}\n\nTEST(StatusOr, TestPointerCopyCtorStatusOKConverting) {\n  Derived derived;\n  StatusOr<Derived*> original(&derived);\n  StatusOr<Base2*> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n  EXPECT_EQ(static_cast<const Base2*>(original.value()),\n            copy.value());\n}\n\nTEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) {\n  StatusOr<Derived*> original(Status(absl::StatusCode::kCancelled, \"\"));\n  StatusOr<Base2*> copy(original);\n  EXPECT_EQ(copy.status(), original.status());\n}\n\nTEST(StatusOr, TestPointerAssignmentStatusOk) {\n  const int kI = 0;\n  StatusOr<const int*> source(&kI);\n  StatusOr<const int*> target;\n  target = source;\n  EXPECT_EQ(target.status(), source.status());\n  EXPECT_EQ(source.value(), target.value());\n}\n\nTEST(StatusOr, TestPointerAssignmentStatusNotOk) {\n  StatusOr<int*> source(Status(absl::StatusCode::kCancelled, \"\"));\n  StatusOr<int*> target;\n  target = source;\n  EXPECT_EQ(target.status(), source.status());\n}\n\nTEST(StatusOr, TestPointerStatus) {\n  const int kI = 0;\n  StatusOr<const int*> good(&kI);\n  EXPECT_TRUE(good.ok());\n  StatusOr<const int*> bad(Status(absl::StatusCode::kCancelled, \"\"));\n  EXPECT_EQ(bad.status(), Status(absl::StatusCode::kCancelled, \"\"));\n}\n\nTEST(StatusOr, TestPointerValue) {\n  const int kI = 0;\n  StatusOr<const int*> thing(&kI);\n  EXPECT_EQ(&kI, thing.value());\n}\n\nTEST(StatusOr, TestPointerValueConst) {\n  const int kI = 0;\n  const StatusOr<const int*> thing(&kI);\n  EXPECT_EQ(&kI, thing.value());\n}\n\n// NOTE(tucker): StatusOr does not support this kind\n// of resize op.\n// TEST(StatusOr, StatusOrVectorOfUniquePointerCanResize) {\n//   using EvilType = std::vector<std::unique_ptr<int>>;\n//   static_assert(std::is_copy_constructible<EvilType>::value, \"\");\n//   std::vector<StatusOr<EvilType>> v(5);\n//   v.reserve(v.capacity() + 10);\n// }\n\nTEST(StatusOrDeathTest, TestPointerValueNotOk) {\n  StatusOr<int*> thing(Status(absl::StatusCode::kCancelled, \"cancelled\"));\n  EXPECT_DEATH(thing.value(), \"\");\n}\n\nTEST(StatusOrDeathTest, TestPointerValueNotOkConst) {\n  const StatusOr<int*> thing(Status(absl::StatusCode::kCancelled, \"cancelled\"));\n  EXPECT_DEATH(thing.value(), \"\");\n}\n\n// Benchmarks were removed as we not intend to change forked code.\n\n} // namespace\n} // namespace absl\n"
  },
  {
    "path": "tools/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_library\")\nload(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_binary\",\n    \"envoy_cc_platform_dep\",\n    \"envoy_package\",\n    \"envoy_py_test_binary\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nexports_files([\n    \"gen_git_sha.sh\",\n    \"code_format/check_format.py\",\n    \"code_format/header_order.py\",\n    \"code_format/envoy_build_fixer.py\",\n    \"check_repositories.sh\",\n])\n\nenvoy_py_test_binary(\n    name = \"socket_passing\",\n    srcs = [\n        \"socket_passing.py\",\n    ],\n)\n\npy_library(\n    name = \"run_command\",\n    srcs = [\n        \"run_command.py\",\n    ],\n    visibility = [\"//visibility:public\"],\n)\n\nenvoy_cc_binary(\n    name = \"bootstrap2pb\",\n    srcs = [\"bootstrap2pb.cc\"],\n    deps = [\n        \"//source/common/api:api_lib\",\n        \"//source/common/common:assert_lib\",\n        \"//source/common/protobuf:message_validator_lib\",\n        \"//source/common/protobuf:utility_lib\",\n        \"//source/common/stats:isolated_store_lib\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n    ] + envoy_cc_platform_dep(\"//source/exe:platform_impl_lib\"),\n)\n"
  },
  {
    "path": "tools/api/generate_go_protobuf.py",
    "content": "#!/usr/bin/env python3\n\nfrom subprocess import check_output\nfrom subprocess import check_call\nimport glob\nimport os\nimport shutil\nimport sys\nimport re\n\nTARGETS = '@envoy_api//...'\nIMPORT_BASE = 'github.com/envoyproxy/go-control-plane'\nOUTPUT_BASE = 'build_go'\nREPO_BASE = 'go-control-plane'\nBRANCH = 'master'\nMIRROR_MSG = 'Mirrored from envoyproxy/envoy @ '\nUSER_NAME = 'go-control-plane(CircleCI)'\nUSER_EMAIL = 'go-control-plane@users.noreply.github.com'\n\n\ndef generateProtobufs(output):\n  bazel_bin = check_output(['bazel', 'info', 'bazel-bin']).decode().strip()\n  go_protos = check_output([\n      'bazel',\n      'query',\n      'kind(\"go_proto_library\", %s)' % TARGETS,\n  ]).split()\n\n  # Each rule has the form @envoy_api//foo/bar:baz_go_proto.\n  # First build all the rules to ensure we have the output files.\n  # We preserve source info so comments are retained on generated code.\n  check_call([\n      'bazel', 'build', '-c', 'fastbuild',\n      '--experimental_proto_descriptor_sets_include_source_info'\n  ] + go_protos)\n\n  for rule in go_protos:\n    # Example rule:\n    # @envoy_api//envoy/config/bootstrap/v2:pkg_go_proto\n    #\n    # Example generated directory:\n    # bazel-bin/external/envoy_api/envoy/config/bootstrap/v2/linux_amd64_stripped/pkg_go_proto%/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v2/\n    #\n    # Example output directory:\n    # go_out/envoy/config/bootstrap/v2\n    rule_dir, proto = rule.decode()[len('@envoy_api//'):].rsplit(':', 1)\n    input_dir = os.path.join(bazel_bin, 'external', 'envoy_api', rule_dir, proto + '_', IMPORT_BASE,\n                             rule_dir)\n    input_files = glob.glob(os.path.join(input_dir, '*.go'))\n    output_dir = os.path.join(output, rule_dir)\n\n    # Ensure the output directory exists\n    os.makedirs(output_dir, 0o755, exist_ok=True)\n    for generated_file in input_files:\n      shutil.copy(generated_file, output_dir)\n  print('Go artifacts placed into: ' + output)\n\n\ndef git(repo, *args):\n  cmd = ['git']\n  if repo:\n    cmd = cmd + ['-C', repo]\n  for arg in args:\n    cmd = cmd + [arg]\n  return check_output(cmd).decode()\n\n\ndef cloneGoProtobufs(repo):\n  # Create a local clone of go-control-plane\n  git(None, 'clone', 'git@github.com:envoyproxy/go-control-plane', repo)\n  git(repo, 'fetch')\n  git(repo, 'checkout', '-B', BRANCH, 'origin/master')\n\n\ndef findLastSyncSHA(repo):\n  # Determine last envoyproxy/envoy SHA in envoyproxy/go-control-plane\n  last_commit = git(repo, 'log', '--grep=' + MIRROR_MSG, '-n', '1', '--format=%B').strip()\n  # Initial SHA from which the APIs start syncing. Prior to that it was done manually.\n  if last_commit == \"\":\n    return 'e7f0b7176efdc65f96eb1697b829d1e6187f4502'\n  m = re.search(MIRROR_MSG + '(\\w+)', last_commit)\n  return m.group(1)\n\n\ndef updatedSinceSHA(repo, last_sha):\n  # Determine if there are changes to API since last SHA\n  return git(None, 'rev-list', '%s..HEAD' % last_sha).split()\n\n\ndef writeRevisionInfo(repo, sha):\n  # Put a file in the generated code root containing the latest mirrored SHA\n  dst = os.path.join(repo, 'envoy', 'COMMIT')\n  with open(dst, 'w') as fh:\n    fh.write(sha)\n\n\ndef syncGoProtobufs(output, repo):\n  # Sync generated content against repo and return true if there is a commit necessary\n  dst = os.path.join(repo, 'envoy')\n  # Remove subtree at envoy in repo\n  git(repo, 'rm', '-r', 'envoy')\n  # Copy subtree at envoy from output to repo\n  shutil.copytree(os.path.join(output, 'envoy'), dst)\n  git(repo, 'add', 'envoy')\n\n\ndef publishGoProtobufs(repo, sha):\n  # Publish generated files with the last SHA changes to API\n  git(repo, 'config', 'user.name', USER_NAME)\n  git(repo, 'config', 'user.email', USER_EMAIL)\n  git(repo, 'add', 'envoy')\n  git(repo, 'commit', '--allow-empty', '-s', '-m', MIRROR_MSG + sha)\n  git(repo, 'push', 'origin', BRANCH)\n\n\ndef updated(repo):\n  return len(\n      [f for f in git(repo, 'diff', 'HEAD', '--name-only').splitlines() if f != 'envoy/COMMIT']) > 0\n\n\nif __name__ == \"__main__\":\n  workspace = check_output(['bazel', 'info', 'workspace']).decode().strip()\n  output = os.path.join(workspace, OUTPUT_BASE)\n  generateProtobufs(output)\n  repo = os.path.join(workspace, REPO_BASE)\n  cloneGoProtobufs(repo)\n  syncGoProtobufs(output, repo)\n  last_sha = findLastSyncSHA(repo)\n  changes = updatedSinceSHA(repo, last_sha)\n  if updated(repo):\n    print('Changes detected: %s' % changes)\n    new_sha = changes[0]\n    writeRevisionInfo(repo, new_sha)\n    publishGoProtobufs(repo, new_sha)\n"
  },
  {
    "path": "tools/api/validate_structure.py",
    "content": "#!/usr/bin/env python3\n\n# Validate the API package structure. Usage:\n#\n# ./tools/api/validate_structure.py\n\nimport pathlib\nimport re\nimport sys\n\n# Only v2 protos are allowed in these trees.\nV2_ONLY_PATHS = [\n    'api',\n    'config/filter',\n    'config/transport_socket',\n    'config/common/dynamic_forward_proxy',\n    'config/common/tap',\n]\n\n# These are the only legacy trees that we permit not to terminate with a versioned suffix.\nVERSIONLESS_PATHS = [\n    'annotations',\n    'api/v2/ratelimit',\n    'api/v2/auth',\n    'api/v2/listener',\n    'api/v2/core',\n    'api/v2/endpoint',\n    'api/v2/route',\n    'api/v2/cluster',\n    'type',\n    'type/matcher',\n    'config/cluster/redis',\n    'config/retry/previous_priorities',\n]\n\n\nclass ValidationError(Exception):\n  pass\n\n\n# Extract major version and full API version string from a proto path.\ndef ProtoApiVersion(proto_path):\n  match = re.match('v(\\d+).*', proto_path.parent.name)\n  if match:\n    return str(proto_path.parent.name)[1:], int(match.group(1))\n  return None, 0\n\n\n# Validate a single proto path.\ndef ValidateProtoPath(proto_path):\n  version_str, major_version = ProtoApiVersion(proto_path)\n\n  # Validate version-less paths.\n  if major_version == 0:\n    if not any(str(proto_path.parent) == p for p in VERSIONLESS_PATHS):\n      raise ValidationError('Package is missing a version')\n\n  # Validate that v3+ versions are regular.\n  if major_version >= 3:\n    if not re.match('\\d+(alpha)?$', version_str):\n      raise ValidationError('Invalid v3+ version: %s' % version_str)\n\n    # Validate v2-only paths.\n    for p in V2_ONLY_PATHS:\n      if str(proto_path).startswith(p):\n        raise ValidationError('v3+ protos are not allowed in %s' % p)\n\n\n# Validate a list of proto paths.\ndef ValidateProtoPaths(proto_paths):\n  error_msgs = []\n  for proto_path in proto_paths:\n    try:\n      ValidateProtoPath(proto_path)\n    except ValidationError as e:\n      error_msgs.append('Invalid .proto location [%s]: %s' % (proto_path, e))\n  return error_msgs\n\n\nif __name__ == '__main__':\n  api_root = 'api/envoy'\n  api_protos = pathlib.Path(api_root).rglob('*.proto')\n  error_msgs = ValidateProtoPaths(p.relative_to(api_root) for p in api_protos)\n  if error_msgs:\n    for m in error_msgs:\n      print(m)\n    sys.exit(1)\n  sys.exit(0)\n"
  },
  {
    "path": "tools/api_boost/README.md",
    "content": "# Envoy API upgrades\n\nThis directory contains tooling to support the [Envoy API versioning\nguidelines](api/API_VERSIONING.md). Envoy internally tracks the latest API\nversion for any given package. Since each package may have a different API\nversion, and we have have > 15k of API protos, we require machine assistance to\nscale the upgrade process.\n\nWe refer to the process of upgrading Envoy to the latest version of the API as\n*API boosting*. This is a manual process, where a developer wanting to bump\nmajor version at the API clock invokes:\n\n```console\n/tools/api_boost/api_boost.py --build_api_booster --generate_compilation_database\n```\n\nfollowed by `fix_format`. The full process is still WiP, but we expect that\nthere will be some manual fixup required of test cases (e.g. YAML fragments) as\nwell.\n\nYou will need to configure `LLVM_CONFIG` as per the [Clang Libtooling setup\nguide](tools/clang_tools/README.md).\n\n## Status\n\nThe API boosting tooling is still WiP. It is slated to land in the v3 release\n(EOY 2019), at which point it should be considered ready for general consumption\nby experienced developers who work on Envoy APIs.\n"
  },
  {
    "path": "tools/api_boost/api_boost.py",
    "content": "#!/usr/bin/env python3\n\n# Tool that assists in upgrading the Envoy source tree to the latest API.\n# Internally, Envoy uses the latest vN or vNalpha for a given package. Envoy\n# will perform a reflection based version upgrade on any older protos that are\n# presented to it in configuration at ingestion time.\n#\n# Usage (from a clean tree):\n#\n# api_boost.py --generate_compilation_database --build_api_booster\n\nimport argparse\nimport functools\nimport json\nimport os\nimport multiprocessing as mp\nimport pathlib\nimport re\nimport shlex\nimport subprocess as sp\n\n# Detect API #includes.\nAPI_INCLUDE_REGEX = re.compile('#include \"(envoy/.*)/[^/]+\\.pb\\.(validate\\.)?h\"')\n\n# Needed for CI to pass down bazel options.\nBAZEL_BUILD_OPTIONS = shlex.split(os.environ.get('BAZEL_BUILD_OPTIONS', ''))\n\n\n# Obtain the directory containing a path prefix, e.g. ./foo/bar.txt is ./foo,\n# ./foo/ba is ./foo, ./foo/bar/ is ./foo/bar.\ndef PrefixDirectory(path_prefix):\n  return path_prefix if os.path.isdir(path_prefix) else os.path.dirname(path_prefix)\n\n\n# Update a C++ file to the latest API.\ndef ApiBoostFile(llvm_include_path, debug_log, path):\n  print('Processing %s' % path)\n  if 'API_NO_BOOST_FILE' in pathlib.Path(path).read_text():\n    if debug_log:\n      print('Not boosting %s due to API_NO_BOOST_FILE\\n' % path)\n    return None\n  # Run the booster\n  try:\n    result = sp.run([\n        './bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster',\n        '--extra-arg-before=-xc++',\n        '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal',\n        '--extra-arg=-Wno-old-style-cast', path\n    ],\n                    capture_output=True,\n                    check=True)\n  except sp.CalledProcessError as e:\n    print('api_booster failure for %s: %s %s' % (path, e, e.stderr.decode('utf-8')))\n    raise\n  if debug_log:\n    print(result.stderr.decode('utf-8'))\n\n  # Consume stdout containing the list of inferred API headers.\n  return sorted(set(result.stdout.decode('utf-8').splitlines()))\n\n\n# Rewrite API includes to the inferred headers. Currently this is handled\n# outside of the clang-ast-replacements. In theory we could either integrate\n# with this or with clang-include-fixer, but it's pretty simply to handle as done\n# below, we have more control over special casing as well, so ¯\\_(ツ)_/¯.\ndef RewriteIncludes(args):\n  path, api_includes = args\n  # Files with API_NO_BOOST_FILE will have None returned by ApiBoostFile.\n  if api_includes is None:\n    return\n  # We just dump the inferred API header includes at the start of the #includes\n  # in the file and remove all the present API header includes. This does not\n  # match Envoy style; we rely on later invocations of fix_format.sh to take\n  # care of this alignment.\n  output_lines = []\n  include_lines = ['#include \"%s\"' % f for f in api_includes]\n  input_text = pathlib.Path(path).read_text()\n  for line in input_text.splitlines():\n    if include_lines and line.startswith('#include'):\n      output_lines.extend(include_lines)\n      include_lines = None\n    # Exclude API includes, except for a special case related to v2alpha\n    # ext_authz; this is needed to include the service descriptor in the build\n    # and is a hack that will go away when we remove v2.\n    if re.match(API_INCLUDE_REGEX, line) and 'envoy/service/auth/v2alpha' not in line:\n      continue\n    output_lines.append(line)\n  # Rewrite file.\n  pathlib.Path(path).write_text('\\n'.join(output_lines) + '\\n')\n\n\n# Update the Envoy source tree the latest API.\ndef ApiBoostTree(target_paths,\n                 generate_compilation_database=False,\n                 build_api_booster=False,\n                 debug_log=False,\n                 sequential=False):\n  dep_build_targets = ['//%s/...' % PrefixDirectory(prefix) for prefix in target_paths]\n\n  # Optional setup of state. We need the compilation database and api_booster\n  # tool in place before we can start boosting.\n  if generate_compilation_database:\n    print('Building compilation database for %s' % dep_build_targets)\n    sp.run(['./tools/gen_compilation_database.py', '--include_headers'] + dep_build_targets,\n           check=True)\n\n  if build_api_booster:\n    # Similar to gen_compilation_database.py, we only need the cc_library for\n    # setup. The long term fix for this is in\n    # https://github.com/bazelbuild/bazel/issues/9578.\n    #\n    # Figure out some cc_libraries that cover most of our external deps. This is\n    # the same logic as in gen_compilation_database.py.\n    query = 'kind(cc_library, {})'.format(' union '.join(dep_build_targets))\n    dep_lib_build_targets = sp.check_output(['bazel', 'query', query]).decode().splitlines()\n    # We also need some misc. stuff such as test binaries for setup of benchmark\n    # dep.\n    query = 'attr(\"tags\", \"compilation_db_dep\", {})'.format(' union '.join(dep_build_targets))\n    dep_lib_build_targets.extend(sp.check_output(['bazel', 'query', query]).decode().splitlines())\n    extra_api_booster_args = []\n    if debug_log:\n      extra_api_booster_args.append('--copt=-DENABLE_DEBUG_LOG')\n\n    # Slightly easier to debug when we build api_booster on its own.\n    sp.run([\n        'bazel',\n        'build',\n        '--strip=always',\n        '@envoy_dev//clang_tools/api_booster',\n    ] + BAZEL_BUILD_OPTIONS + extra_api_booster_args,\n           check=True)\n    sp.run([\n        'bazel',\n        'build',\n        '--config=libc++',\n        '--strip=always',\n    ] + BAZEL_BUILD_OPTIONS + dep_lib_build_targets,\n           check=True)\n\n  # Figure out where the LLVM include path is. We need to provide this\n  # explicitly as the api_booster is built inside the Bazel cache and doesn't\n  # know about this path.\n  # TODO(htuch): this is fragile and depends on Clang version, should figure out\n  # a cleaner approach.\n  llvm_include_path = os.path.join(\n      sp.check_output([os.getenv('LLVM_CONFIG'), '--libdir']).decode().rstrip(),\n      'clang/9.0.0/include')\n\n  # Determine the files in the target dirs eligible for API boosting, based on\n  # known files in the compilation database.\n  file_paths = set([])\n  for entry in json.loads(pathlib.Path('compile_commands.json').read_text()):\n    file_path = entry['file']\n    if any(file_path.startswith(prefix) for prefix in target_paths):\n      file_paths.add(file_path)\n  # Ensure a determinstic ordering if we are going to process sequentially.\n  if sequential:\n    file_paths = sorted(file_paths)\n\n  # The API boosting is file local, so this is trivially parallelizable, use\n  # multiprocessing pool with default worker pool sized to cpu_count(), since\n  # this is CPU bound.\n  try:\n    with mp.Pool(processes=1 if sequential else None) as p:\n      # We need multiple phases, to ensure that any dependency on files being modified\n      # in one thread on consumed transitive headers on the other thread isn't an\n      # issue. This also ensures that we complete all analysis error free before\n      # any mutation takes place.\n      # TODO(htuch): we should move to run-clang-tidy.py once the headers fixups\n      # are Clang-based.\n      api_includes = p.map(functools.partial(ApiBoostFile, llvm_include_path, debug_log),\n                           file_paths)\n      # Apply Clang replacements before header fixups, since the replacements\n      # are all relative to the original file.\n      for prefix_dir in set(map(PrefixDirectory, target_paths)):\n        sp.run(['clang-apply-replacements', prefix_dir], check=True)\n      # Fixup headers.\n      p.map(RewriteIncludes, zip(file_paths, api_includes))\n  finally:\n    # Cleanup any stray **/*.clang-replacements.yaml.\n    for prefix in target_paths:\n      clang_replacements = pathlib.Path(\n          PrefixDirectory(prefix)).glob('**/*.clang-replacements.yaml')\n      for path in clang_replacements:\n        path.unlink()\n\n\nif __name__ == '__main__':\n  parser = argparse.ArgumentParser(description='Update Envoy tree to the latest API')\n  parser.add_argument('--generate_compilation_database', action='store_true')\n  parser.add_argument('--build_api_booster', action='store_true')\n  parser.add_argument('--debug_log', action='store_true')\n  parser.add_argument('--sequential', action='store_true')\n  parser.add_argument('paths', nargs='*', default=['source', 'test', 'include'])\n  args = parser.parse_args()\n  ApiBoostTree(args.paths,\n               generate_compilation_database=args.generate_compilation_database,\n               build_api_booster=args.build_api_booster,\n               debug_log=args.debug_log,\n               sequential=args.sequential)\n"
  },
  {
    "path": "tools/api_boost/api_boost_test.py",
    "content": "#!/usr/bin/env python3\n\n# Golden C++ source tests for API boosting. This is effectively a test for the\n# combination of api_boost.py, the Clang libtooling-based\n# tools/clang_tools/api_booster, as well as the type whisperer and API type\n# database.\n\nimport argparse\nfrom collections import namedtuple\nimport logging\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport api_boost\n\nTestCase = namedtuple('TestCase', ['name', 'description'])\n\n# List of test in the form [(file_name, explanation)]\nTESTS = list(\n    map(lambda x: TestCase(*x), [\n        ('deprecate', 'Deprecations'),\n        ('elaborated_type', 'ElaboratedTypeLoc type upgrades'),\n        ('using_decl', 'UsingDecl upgrades for named types'),\n        ('rename', 'Annotation-based renaming'),\n        ('decl_ref_expr', 'DeclRefExpr upgrades for named constants'),\n        ('no_boost_file', 'API_NO_BOOST_FILE annotations'),\n        ('validate', 'Validation proto header inference'),\n    ]))\n\nTESTDATA_PATH = 'tools/api_boost/testdata'\n\n\ndef Diff(some_path, other_path):\n  result = subprocess.run(['diff', '-u', some_path, other_path], capture_output=True)\n  if result.returncode == 0:\n    return None\n  return result.stdout.decode('utf-8') + result.stderr.decode('utf-8')\n\n\nif __name__ == '__main__':\n  parser = argparse.ArgumentParser(description='Golden C++ source tests for api_boost.py')\n  parser.add_argument('tests', nargs='*')\n  args = parser.parse_args()\n\n  # Accumulated error messages.\n  logging.basicConfig(format='%(message)s')\n  messages = []\n\n  def ShouldRunTest(test_name):\n    return len(args.tests) == 0 or test_name in args.tests\n\n  # Run API booster against test artifacts in a directory relative to workspace.\n  # We use a temporary copy as the API booster does in-place rewriting.\n  with tempfile.TemporaryDirectory(dir=pathlib.Path.cwd()) as path:\n    # Setup temporary tree.\n    shutil.copy(os.path.join(TESTDATA_PATH, 'BUILD'), path)\n    for test in TESTS:\n      if ShouldRunTest(test.name):\n        shutil.copy(os.path.join(TESTDATA_PATH, test.name + '.cc'), path)\n      else:\n        # Place an empty file to make Bazel happy.\n        pathlib.Path(path, test.name + '.cc').write_text('')\n\n    # Run API booster.\n    relpath_to_testdata = str(pathlib.Path(path).relative_to(pathlib.Path.cwd()))\n    api_boost.ApiBoostTree([\n        os.path.join(relpath_to_testdata, test.name) for test in TESTS if ShouldRunTest(test.name)\n    ],\n                           generate_compilation_database=True,\n                           build_api_booster=True,\n                           debug_log=True,\n                           sequential=True)\n\n    # Validate output against golden files.\n    for test in TESTS:\n      if ShouldRunTest(test.name):\n        delta = Diff(os.path.join(TESTDATA_PATH, test.name + '.cc.gold'),\n                     os.path.join(path, test.name + '.cc'))\n        if delta is not None:\n          messages.append('Non-empty diff for %s (%s):\\n%s\\n' %\n                          (test.name, test.description, delta))\n\n  if len(messages) > 0:\n    logging.error('FAILED:\\n{}'.format('\\n'.join(messages)))\n    sys.exit(1)\n  logging.warning('PASS')\n"
  },
  {
    "path": "tools/api_boost/testdata/BUILD",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_library\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_library(\n    name = \"decl_ref_expr\",\n    srcs = [\"decl_ref_expr.cc\"],\n    deps = [\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/route:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"deprecate\",\n    srcs = [\"deprecate.cc\"],\n    deps = [\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/route:pkg_cc_proto\",\n        \"@envoy_api//envoy/type/matcher:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"elaborated_type\",\n    srcs = [\"elaborated_type.cc\"],\n    deps = [\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto\",\n    ],\n)\n\nenvoy_cc_library(\n    name = \"rename\",\n    srcs = [\"rename.cc\"],\n    deps = [\"@envoy_api//envoy/api/v2/route:pkg_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"no_boost_file\",\n    srcs = [\"no_boost_file.cc\"],\n    deps = [\"@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"using_decl\",\n    srcs = [\"using_decl.cc\"],\n    deps = [\"@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto\"],\n)\n\nenvoy_cc_library(\n    name = \"validate\",\n    srcs = [\"validate.cc\"],\n    deps = [\n        \"//include/envoy/protobuf:message_validator_interface\",\n        \"//source/common/protobuf:utility_lib\",\n        \"@envoy_api//envoy/api/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "tools/api_boost/testdata/decl_ref_expr.cc",
    "content": "#include \"envoy/api/v2/cds.pb.h\"\n#include \"envoy/api/v2/route/route.pb.h\"\n#include \"envoy/config/overload/v2alpha/overload.pb.h\"\n\n#define API_NO_BOOST(x) x\n#define BAR(x) x\n#define ASSERT(x) static_cast<void>(x)\n\nusing envoy::config::overload::v2alpha::Trigger;\n\nusing envoy::api::v2::Cluster;\nusing MutableStringClusterAccessor = std::string* (Cluster::*)();\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const envoy::config::overload::v2alpha::Trigger& config) {\n    switch (config.trigger_oneof_case()) {\n    case envoy::config::overload::v2alpha::Trigger::kThreshold:\n      break;\n    default:\n      break;\n    }\n    switch (config.trigger_oneof_case()) {\n    case Trigger::kThreshold:\n      break;\n    default:\n      break;\n    }\n    API_NO_BOOST(envoy::api::v2::route::RouteAction) route_action;\n    route_action.host_rewrite();\n    API_NO_BOOST(envoy::config::overload::v2alpha::Trigger) foo;\n    BAR(API_NO_BOOST(envoy::config::overload::v2alpha::Trigger)) bar;\n    BAR(envoy::config::overload::v2alpha::Trigger) baz;\n    envoy::config::overload::v2alpha::ThresholdTrigger::default_instance();\n    ASSERT(envoy::config::overload::v2alpha::Trigger::kThreshold == Trigger::kThreshold);\n    ASSERT(Foo::kThreshold == Trigger::kThreshold);\n    envoy::api::v2::Cluster::LbPolicy_Name(0);\n    static_cast<void>(envoy::api::v2::Cluster::MAGLEV);\n    MutableStringClusterAccessor foo2 = &envoy::api::v2::Cluster::mutable_name;\n    static_cast<void>(foo2);\n  }\n\n  using Foo = envoy::config::overload::v2alpha::Trigger;\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/decl_ref_expr.cc.gold",
    "content": "#include \"envoy/api/v2/route/route_components.pb.h\"\n#include \"envoy/config/cluster/v4alpha/cluster.pb.h\"\n#include \"envoy/config/overload/v2alpha/overload.pb.h\"\n#include \"envoy/config/overload/v3/overload.pb.h\"\n\n#define API_NO_BOOST(x) x\n#define BAR(x) x\n#define ASSERT(x) static_cast<void>(x)\n\nusing envoy::config::overload::v3::Trigger;\n\nusing envoy::config::cluster::v4alpha::Cluster;\nusing MutableStringClusterAccessor = std::string* (Cluster::*)();\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const envoy::config::overload::v3::Trigger& config) {\n    switch (config.trigger_oneof_case()) {\n    case envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold:\n      break;\n    default:\n      break;\n    }\n    switch (config.trigger_oneof_case()) {\n    case Trigger::kThreshold:\n      break;\n    default:\n      break;\n    }\n    API_NO_BOOST(envoy::api::v2::route::RouteAction) route_action;\n    route_action.host_rewrite();\n    API_NO_BOOST(envoy::config::overload::v2alpha::Trigger) foo;\n    BAR(API_NO_BOOST(envoy::config::overload::v2alpha::Trigger)) bar;\n    BAR(envoy::config::overload::v3::Trigger) baz;\n    envoy::config::overload::v3::ThresholdTrigger::default_instance();\n    ASSERT(envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold == Trigger::kThreshold);\n    ASSERT(Foo::kThreshold == Trigger::kThreshold);\n    envoy::config::cluster::v4alpha::Cluster::LbPolicy_Name(0);\n    static_cast<void>(envoy::config::cluster::v4alpha::Cluster::MAGLEV);\n    MutableStringClusterAccessor foo2 = &envoy::config::cluster::v4alpha::Cluster::mutable_name;\n    static_cast<void>(foo2);\n  }\n\n  using Foo = envoy::config::overload::v3::Trigger;\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/deprecate.cc",
    "content": "#include \"envoy/api/v2/cds.pb.h\"\n#include \"envoy/api/v2/route/route.pb.h\"\n#include \"envoy/type/matcher/string.pb.h\"\n\nvoid test() {\n  envoy::api::v2::route::VirtualHost vhost;\n  vhost.per_filter_config();\n  vhost.mutable_per_filter_config();\n  static_cast<void>(envoy::type::matcher::StringMatcher::kRegex);\n  static_cast<void>(envoy::api::v2::Cluster::ORIGINAL_DST_LB);\n}\n"
  },
  {
    "path": "tools/api_boost/testdata/deprecate.cc.gold",
    "content": "#include \"envoy/config/cluster/v4alpha/cluster.pb.h\"\n#include \"envoy/config/route/v4alpha/route_components.pb.h\"\n#include \"envoy/type/matcher/v4alpha/string.pb.h\"\n\nvoid test() {\n  envoy::config::route::v4alpha::VirtualHost vhost;\n  vhost.hidden_envoy_deprecated_per_filter_config();\n  vhost.mutable_hidden_envoy_deprecated_per_filter_config();\n  static_cast<void>(envoy::type::matcher::v4alpha::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex);\n  static_cast<void>(envoy::config::cluster::v4alpha::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB);\n}\n"
  },
  {
    "path": "tools/api_boost/testdata/elaborated_type.cc",
    "content": "#include \"envoy/api/v2/cds.pb.h\"\n#include \"envoy/config/overload/v2alpha/overload.pb.h\"\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const envoy::config::overload::v2alpha::ThresholdTrigger& /*config*/) {}\n  void someMethod(envoy::api::v2::Cluster_LbPolicy) {}\n\n  const envoy::config::overload::v2alpha::Trigger::TriggerOneofCase case_{};\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/elaborated_type.cc.gold",
    "content": "#include \"envoy/config/cluster/v4alpha/cluster.pb.h\"\n#include \"envoy/config/overload/v3/overload.pb.h\"\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const envoy::config::overload::v3::ThresholdTrigger& /*config*/) {}\n  void someMethod(envoy::config::cluster::v4alpha::Cluster::LbPolicy) {}\n\n  const envoy::config::overload::v3::Trigger::TriggerOneofCase case_{};\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/no_boost_file.cc",
    "content": "#include \"envoy/config/overload/v2alpha/overload.pb.h\"\n\n// API_NO_BOOST_FILE\n\nusing envoy::config::overload::v2alpha::ThresholdTrigger;\nusing SomePtrAlias = std::unique_ptr<envoy::config::overload::v2alpha::ThresholdTrigger>;\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {}\n  ThresholdTriggerImpl(SomePtrAlias /*config*/) {}\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/no_boost_file.cc.gold",
    "content": "#include \"envoy/config/overload/v2alpha/overload.pb.h\"\n\n// API_NO_BOOST_FILE\n\nusing envoy::config::overload::v2alpha::ThresholdTrigger;\nusing SomePtrAlias = std::unique_ptr<envoy::config::overload::v2alpha::ThresholdTrigger>;\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {}\n  ThresholdTriggerImpl(SomePtrAlias /*config*/) {}\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/rename.cc",
    "content": "#include \"envoy/api/v2/route/route.pb.h\"\n\nvoid test() {\n  envoy::api::v2::route::RouteAction route_action;\n  route_action.host_rewrite();\n  route_action.set_host_rewrite(\"blah\");\n}\n"
  },
  {
    "path": "tools/api_boost/testdata/rename.cc.gold",
    "content": "#include \"envoy/config/route/v4alpha/route_components.pb.h\"\n\nvoid test() {\n  envoy::config::route::v4alpha::RouteAction route_action;\n  route_action.host_rewrite_literal();\n  route_action.set_host_rewrite_literal(\"blah\");\n}\n"
  },
  {
    "path": "tools/api_boost/testdata/using_decl.cc",
    "content": "#include \"envoy/config/overload/v2alpha/overload.pb.h\"\n\nusing envoy::config::overload::v2alpha::ThresholdTrigger;\nusing ::envoy::config::overload::v2alpha::Trigger;\nusing SomePtrAlias = std::unique_ptr<envoy::config::overload::v2alpha::ThresholdTrigger>;\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {}\n  ThresholdTriggerImpl(SomePtrAlias /*config*/) {}\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/using_decl.cc.gold",
    "content": "#include \"envoy/config/overload/v3/overload.pb.h\"\n\nusing envoy::config::overload::v3::ThresholdTrigger;\nusing envoy::config::overload::v3::Trigger;\nusing SomePtrAlias = std::unique_ptr<envoy::config::overload::v3::ThresholdTrigger>;\n\nclass ThresholdTriggerImpl {\npublic:\n  ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {}\n  ThresholdTriggerImpl(SomePtrAlias /*config*/) {}\n};\n"
  },
  {
    "path": "tools/api_boost/testdata/validate.cc",
    "content": "#include \"envoy/api/v2/cds.pb.h\"\n#include \"envoy/api/v2/cluster.pb.validate.h\"\n#include \"envoy/protobuf/message_validator.h\"\n\n#include \"common/protobuf/utility.h\"\n\nvoid foo(Envoy::ProtobufMessage::ValidationVisitor& validator) {\n  envoy::api::v2::Cluster msg;\n  Envoy::MessageUtil::downcastAndValidate<const envoy::api::v2::Cluster&>(msg, validator);\n}\n"
  },
  {
    "path": "tools/api_boost/testdata/validate.cc.gold",
    "content": "#include \"envoy/config/cluster/v4alpha/cluster.pb.h\"\n#include \"envoy/config/cluster/v4alpha/cluster.pb.validate.h\"\n#include \"envoy/protobuf/message_validator.h\"\n\n#include \"common/protobuf/utility.h\"\n\nvoid foo(Envoy::ProtobufMessage::ValidationVisitor& validator) {\n  envoy::config::cluster::v4alpha::Cluster msg;\n  Envoy::MessageUtil::downcastAndValidate<const envoy::config::cluster::v4alpha::Cluster&>(msg, validator);\n}\n"
  },
  {
    "path": "tools/api_proto_plugin/BUILD",
    "content": "load(\"@bazel_skylib//rules:common_settings.bzl\", \"string_flag\")\nload(\"@rules_python//python:defs.bzl\", \"py_library\")\nload(\"//tools/type_whisperer:type_database.bzl\", \"type_database\")\n\nlicenses([\"notice\"])  # Apache 2\n\npy_library(\n    name = \"api_proto_plugin\",\n    srcs = [\n        \"annotations.py\",\n        \"plugin.py\",\n        \"traverse.py\",\n        \"type_context.py\",\n        \"visitor.py\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"@com_google_protobuf//:protobuf_python\",\n    ],\n)\n\npy_library(\n    name = \"utils\",\n    srcs = [\"utils.py\"],\n    visibility = [\"//visibility:public\"],\n)\n\nlabel_flag(\n    name = \"default_type_db_target\",\n    # While this is not completely empty but type_db_gen generates nothing on this target.\n    build_setting_default = \"@com_google_protobuf//:empty_proto\",\n    visibility = [\"//visibility:public\"],\n)\n\ntype_database(\n    name = \"default_type_db\",\n    targets = [\":default_type_db_target\"],\n    visibility = [\"//visibility:public\"],\n)\n\nstring_flag(\n    name = \"extra_args\",\n    build_setting_default = \"\",\n    visibility = [\"//visibility:public\"],\n)\n"
  },
  {
    "path": "tools/api_proto_plugin/__init__.py",
    "content": ""
  },
  {
    "path": "tools/api_proto_plugin/annotations.py",
    "content": "\"\"\"Envoy API annotations.\"\"\"\n\nfrom collections import namedtuple\n\nimport re\n\n# Key-value annotation regex.\nANNOTATION_REGEX = re.compile('\\[#([\\w-]+?):\\s*(.*?)\\](\\s?)', re.DOTALL)\n\n# Page/section titles with special prefixes in the proto comments\nDOC_TITLE_ANNOTATION = 'protodoc-title'\n\n# When documenting an extension, this should be used to specify the qualified\n# name that the extension registers as in the static registry, e.g.\n# envoy.filters.network.http_connection_manager.\nEXTENSION_ANNOTATION = 'extension'\n\n# Not implemented yet annotation on leading comments, leading to hiding of\n# field.\nNOT_IMPLEMENTED_HIDE_ANNOTATION = 'not-implemented-hide'\n\n# For large protos, place a comment at the top that specifies the next free field number.\nNEXT_FREE_FIELD_ANNOTATION = 'next-free-field'\n\n# Comment that allows for easy searching for things that need cleaning up in the next major\n# API version.\nNEXT_MAJOR_VERSION_ANNOTATION = 'next-major-version'\n\n# Comment. Just used for adding text that will not go into the docs at all.\nCOMMENT_ANNOTATION = 'comment'\n\nVALID_ANNOTATIONS = set([\n    DOC_TITLE_ANNOTATION,\n    EXTENSION_ANNOTATION,\n    NOT_IMPLEMENTED_HIDE_ANNOTATION,\n    NEXT_FREE_FIELD_ANNOTATION,\n    NEXT_MAJOR_VERSION_ANNOTATION,\n    COMMENT_ANNOTATION,\n])\n\n# These can propagate from file scope to message/enum scope (and be overridden).\nINHERITED_ANNOTATIONS = set([\n    # Nothing here right now, this used to be PROTO_STATUS_ANNOTATION. Retaining\n    # this capability for potential future use.\n])\n\n\nclass AnnotationError(Exception):\n  \"\"\"Base error class for the annotations module.\"\"\"\n\n\ndef ExtractAnnotations(s, inherited_annotations=None):\n  \"\"\"Extract annotations map from a given comment string.\n\n  Args:\n    s: string that may contains annotations.\n    inherited_annotations: annotation map from file-level inherited annotations\n      (or None) if this is a file-level comment.\n\n  Returns:\n    Annotation map.\n  \"\"\"\n  annotations = {\n      k: v for k, v in (inherited_annotations or {}).items() if k in INHERITED_ANNOTATIONS\n  }\n  # Extract annotations.\n  groups = re.findall(ANNOTATION_REGEX, s)\n  for group in groups:\n    annotation = group[0]\n    if annotation not in VALID_ANNOTATIONS:\n      raise AnnotationError('Unknown annotation: %s' % annotation)\n    annotations[group[0]] = group[1].lstrip()\n  return annotations\n\n\ndef XformAnnotation(s, annotation_xforms):\n  \"\"\"Return transformed string with annotation transformers.\n\n  The annotation will be replaced with the new value returned by the transformer.\n  If the transformer returns None, then the annotation will be removed.\n  If the annotation presented in transformers doesn't exist in the original string,\n  a new annotation will be appended to the end of string.\n\n  Args:\n    annotation_xforms: a dict of transformers for annotations.\n\n  Returns:\n    transformed string.\n  \"\"\"\n  present_annotations = set()\n\n  def xform(match):\n    annotation, content, trailing = match.groups()\n    present_annotations.add(annotation)\n    annotation_xform = annotation_xforms.get(annotation)\n    if annotation_xform:\n      value = annotation_xform(annotation)\n      return '[#%s: %s]%s' % (annotation, value, trailing) if value is not None else ''\n    else:\n      return match.group(0)\n\n  def append(s, annotation, content):\n    return '%s [#%s: %s]\\n' % (s, annotation, content)\n\n  xformed = re.sub(ANNOTATION_REGEX, xform, s)\n  for annotation, xform in sorted(annotation_xforms.items()):\n    if annotation not in present_annotations:\n      value = xform(None)\n      if value is not None:\n        xformed = append(xformed, annotation, value)\n  return xformed\n\n\ndef WithoutAnnotations(s):\n  return re.sub(ANNOTATION_REGEX, '', s)\n"
  },
  {
    "path": "tools/api_proto_plugin/plugin.bzl",
    "content": "load(\"@bazel_skylib//rules:common_settings.bzl\", \"BuildSettingInfo\")\nload(\"@rules_proto//proto:defs.bzl\", \"ProtoInfo\")\n\n# Borrowed from https://github.com/grpc/grpc-java/blob/v1.24.1/java_grpc_library.bzl#L61\ndef _path_ignoring_repository(f):\n    # Bazel creates a _virtual_imports directory in case the .proto source files\n    # need to be accessed at a path that's different from their source path:\n    # https://github.com/bazelbuild/bazel/blob/0.27.1/src/main/java/com/google/devtools/build/lib/rules/proto/ProtoCommon.java#L289\n    #\n    # In that case, the import path of the .proto file is the path relative to\n    # the virtual imports directory of the rule in question.\n    virtual_imports = \"/_virtual_imports/\"\n    if virtual_imports in f.path:\n        return f.path.split(virtual_imports)[1].split(\"/\", 1)[1]\n    elif len(f.owner.workspace_root) == 0:\n        # |f| is in the main repository\n        return f.short_path\n    else:\n        # If |f| is a generated file, it will have \"bazel-out/*/genfiles\" prefix\n        # before \"external/workspace\", so we need to add the starting index of \"external/workspace\"\n        return f.path[f.path.find(f.owner.workspace_root) + len(f.owner.workspace_root) + 1:]\n\ndef api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes):\n    # Compute output files from the current proto_library node's dependencies.\n    transitive_outputs = depset(transitive = [dep.output_groups[output_group] for dep in ctx.rule.attr.deps])\n    proto_sources = target[ProtoInfo].direct_sources\n\n    # If this proto_library doesn't actually name any sources, e.g. //api:api,\n    # but just glues together other libs, we just need to follow the graph.\n    if not proto_sources:\n        return [OutputGroupInfo(**{output_group: transitive_outputs})]\n\n    # Figure out the set of import paths. Ideally we would use descriptor sets\n    # built by proto_library, which avoid having to do nasty path mangling, but\n    # these don't include source_code_info, which we need for comment\n    # extractions. See https://github.com/bazelbuild/bazel/issues/3971.\n    import_paths = []\n    for f in target[ProtoInfo].transitive_sources.to_list():\n        import_paths.append(\"{}={}\".format(_path_ignoring_repository(f), f.path))\n\n    # The outputs live in the ctx.label's package root. We add some additional\n    # path information to match with protoc's notion of path relative locations.\n    outputs = []\n    for output_suffix in output_suffixes:\n        outputs += [ctx.actions.declare_file(ctx.label.name + \"/\" + _path_ignoring_repository(f) +\n                                             output_suffix) for f in proto_sources]\n\n    # Create the protoc command-line args.\n    inputs = target[ProtoInfo].transitive_sources\n    ctx_path = ctx.label.package + \"/\" + ctx.label.name\n    output_path = outputs[0].root.path + \"/\" + outputs[0].owner.workspace_root + \"/\" + ctx_path\n    args = [\"-I./\" + ctx.label.workspace_root]\n    args += [\"-I\" + import_path for import_path in import_paths]\n    args += [\"--plugin=protoc-gen-api_proto_plugin=\" + ctx.executable._api_proto_plugin.path, \"--api_proto_plugin_out=\" + output_path]\n    if hasattr(ctx.attr, \"_type_db\"):\n        inputs = depset(transitive = [inputs] + [ctx.attr._type_db.files])\n        if len(ctx.attr._type_db.files.to_list()) != 1:\n            fail(\"{} must have one type database file\".format(ctx.attr._type_db))\n        args.append(\"--api_proto_plugin_opt=type_db_path=\" + ctx.attr._type_db.files.to_list()[0].path)\n    if hasattr(ctx.attr, \"_extra_args\"):\n        args.append(\"--api_proto_plugin_opt=extra_args=\" + ctx.attr._extra_args[BuildSettingInfo].value)\n    args += [src.path for src in target[ProtoInfo].direct_sources]\n    env = {}\n\n    ctx.actions.run(\n        executable = ctx.executable._protoc,\n        arguments = args,\n        inputs = inputs,\n        tools = [ctx.executable._api_proto_plugin],\n        outputs = outputs,\n        mnemonic = mnemonic,\n        use_default_shell_env = True,\n    )\n\n    transitive_outputs = depset(outputs, transitive = [transitive_outputs])\n    return [OutputGroupInfo(**{output_group: transitive_outputs})]\n\ndef api_proto_plugin_aspect(tool_label, aspect_impl, use_type_db = False):\n    _attrs = {\n        \"_protoc\": attr.label(\n            default = Label(\"@com_google_protobuf//:protoc\"),\n            executable = True,\n            cfg = \"exec\",\n        ),\n        \"_api_proto_plugin\": attr.label(\n            default = Label(tool_label),\n            executable = True,\n            cfg = \"exec\",\n        ),\n    }\n    if use_type_db:\n        _attrs[\"_type_db\"] = attr.label(\n            default = Label(\"@envoy//tools/api_proto_plugin:default_type_db\"),\n        )\n    _attrs[\"_extra_args\"] = attr.label(\n        default = Label(\"@envoy//tools/api_proto_plugin:extra_args\"),\n    )\n    return aspect(\n        attr_aspects = [\"deps\"],\n        attrs = _attrs,\n        implementation = aspect_impl,\n    )\n"
  },
  {
    "path": "tools/api_proto_plugin/plugin.py",
    "content": "\"\"\"Python protoc plugin for Envoy APIs.\"\"\"\n\nimport cProfile\nfrom collections import namedtuple\nimport io\nimport os\nimport pstats\nimport sys\n\nfrom tools.api_proto_plugin import traverse\n\nfrom google.protobuf.compiler import plugin_pb2\n\nOutputDescriptor = namedtuple(\n    'OutputDescriptor',\n    [\n        # Output files are generated alongside their corresponding input .proto,\n        # with the output_suffix appended.\n        'output_suffix',\n        # The visitor factory is a function to create a visitor.Visitor defining\n        # the business logic of the plugin for the specific output descriptor.\n        'visitor_factory',\n        # FileDescriptorProto transformer; this is applied to the input\n        # before any output generation.\n        'xform',\n        # Supply --//tools/api_proto_plugin CLI args as a parameters dictionary\n        # to visitor_factory constructor and xform function?\n        'want_params',\n    ])\n\n\ndef DirectOutputDescriptor(output_suffix, visitor, want_params=False):\n  return OutputDescriptor(output_suffix, visitor, (lambda x, _: x) if want_params else lambda x: x,\n                          want_params)\n\n\ndef Plugin(output_descriptors):\n  \"\"\"Protoc plugin entry point.\n\n  This defines protoc plugin and manages the stdin -> stdout flow. An\n  api_proto_plugin is defined by the provided visitor.\n\n  See\n  http://www.expobrain.net/2015/09/13/create-a-plugin-for-google-protocol-buffer/\n  for further details on protoc plugin basics.\n\n  Args:\n    output_descriptors: a list of OutputDescriptors.\n  \"\"\"\n  request = plugin_pb2.CodeGeneratorRequest()\n  request.ParseFromString(sys.stdin.buffer.read())\n  response = plugin_pb2.CodeGeneratorResponse()\n  cprofile_enabled = os.getenv('CPROFILE_ENABLED')\n\n  # We use request.file_to_generate rather than request.file_proto here since we\n  # are invoked inside a Bazel aspect, each node in the DAG will be visited once\n  # by the aspect and we only want to generate docs for the current node.\n  for file_to_generate in request.file_to_generate:\n    # Find the FileDescriptorProto for the file we actually are generating.\n    file_proto = [pf for pf in request.proto_file if pf.name == file_to_generate][0]\n    if cprofile_enabled:\n      pr = cProfile.Profile()\n      pr.enable()\n    for od in output_descriptors:\n      f = response.file.add()\n      f.name = file_proto.name + od.output_suffix\n      # Don't run API proto plugins on things like WKT types etc.\n      if not file_proto.package.startswith('envoy.'):\n        continue\n      if request.HasField(\"parameter\") and od.want_params:\n        params = dict(param.split('=') for param in request.parameter.split(','))\n        xformed_proto = od.xform(file_proto, params)\n        visitor_factory = od.visitor_factory(params)\n      else:\n        xformed_proto = od.xform(file_proto)\n        visitor_factory = od.visitor_factory()\n      f.content = traverse.TraverseFile(xformed_proto, visitor_factory) if xformed_proto else ''\n    if cprofile_enabled:\n      pr.disable()\n      stats_stream = io.StringIO()\n      ps = pstats.Stats(pr,\n                        stream=stats_stream).sort_stats(os.getenv('CPROFILE_SORTBY', 'cumulative'))\n      stats_file = response.file.add()\n      stats_file.name = file_proto.name + '.profile'\n      ps.print_stats()\n      stats_file.content = stats_stream.getvalue()\n    # Also include the original FileDescriptorProto as text proto, this is\n    # useful when debugging.\n    descriptor_file = response.file.add()\n    descriptor_file.name = file_proto.name + \".descriptor.proto\"\n    descriptor_file.content = str(file_proto)\n  sys.stdout.buffer.write(response.SerializeToString())\n"
  },
  {
    "path": "tools/api_proto_plugin/traverse.py",
    "content": "\"\"\"FileDescriptorProto traversal for api_proto_plugin framework.\"\"\"\n\nfrom tools.api_proto_plugin import type_context\n\n\ndef TraverseService(type_context, service_proto, visitor):\n  \"\"\"Traverse a service definition.\n\n  Args:\n    type_context: type_context.TypeContext for service type.\n    service_proto: ServiceDescriptorProto for service.\n    visitor: visitor.Visitor defining the business logic of the plugin.\n\n  Returns:\n    Plugin specific output.\n  \"\"\"\n  return visitor.VisitService(service_proto, type_context)\n\n\ndef TraverseEnum(type_context, enum_proto, visitor):\n  \"\"\"Traverse an enum definition.\n\n  Args:\n    type_context: type_context.TypeContext for enum type.\n    enum_proto: EnumDescriptorProto for enum.\n    visitor: visitor.Visitor defining the business logic of the plugin.\n\n  Returns:\n    Plugin specific output.\n  \"\"\"\n  return visitor.VisitEnum(enum_proto, type_context)\n\n\ndef TraverseMessage(type_context, msg_proto, visitor):\n  \"\"\"Traverse a message definition.\n\n  Args:\n    type_context: type_context.TypeContext for message type.\n    msg_proto: DescriptorProto for message.\n    visitor: visitor.Visitor defining the business logic of the plugin.\n\n  Returns:\n    Plugin specific output.\n  \"\"\"\n  # We need to do some extra work to recover the map type annotation from the\n  # synthesized messages.\n  type_context.map_typenames = {\n      '%s.%s' % (type_context.name, nested_msg.name): (nested_msg.field[0], nested_msg.field[1])\n      for nested_msg in msg_proto.nested_type\n      if nested_msg.options.map_entry\n  }\n  nested_msgs = [\n      TraverseMessage(\n          type_context.ExtendNestedMessage(index, nested_msg.name, nested_msg.options.deprecated),\n          nested_msg, visitor) for index, nested_msg in enumerate(msg_proto.nested_type)\n  ]\n  nested_enums = [\n      TraverseEnum(\n          type_context.ExtendNestedEnum(index, nested_enum.name, nested_enum.options.deprecated),\n          nested_enum, visitor) for index, nested_enum in enumerate(msg_proto.enum_type)\n  ]\n  return visitor.VisitMessage(msg_proto, type_context, nested_msgs, nested_enums)\n\n\ndef TraverseFile(file_proto, visitor):\n  \"\"\"Traverse a proto file definition.\n\n  Args:\n    file_proto: FileDescriptorProto for file.\n    visitor: visitor.Visitor defining the business logic of the plugin.\n\n  Returns:\n    Plugin specific output.\n  \"\"\"\n  source_code_info = type_context.SourceCodeInfo(file_proto.name, file_proto.source_code_info)\n  package_type_context = type_context.TypeContext(source_code_info, file_proto.package)\n  services = [\n      TraverseService(package_type_context.ExtendService(index, service.name), service, visitor)\n      for index, service in enumerate(file_proto.service)\n  ]\n  msgs = [\n      TraverseMessage(package_type_context.ExtendMessage(index, msg.name, msg.options.deprecated),\n                      msg, visitor) for index, msg in enumerate(file_proto.message_type)\n  ]\n  enums = [\n      TraverseEnum(package_type_context.ExtendEnum(index, enum.name, enum.options.deprecated), enum,\n                   visitor) for index, enum in enumerate(file_proto.enum_type)\n  ]\n  return visitor.VisitFile(file_proto, package_type_context, services, msgs, enums)\n"
  },
  {
    "path": "tools/api_proto_plugin/type_context.py",
    "content": "\"\"\"Type context for FileDescriptorProto traversal.\"\"\"\n\nfrom collections import namedtuple\n\nfrom tools.api_proto_plugin import annotations\n\n\nclass Comment(object):\n  \"\"\"Wrapper for proto source comments.\"\"\"\n\n  def __init__(self, comment, file_level_annotations=None):\n    self.raw = comment\n    self.file_level_annotations = file_level_annotations\n    self.annotations = annotations.ExtractAnnotations(self.raw, file_level_annotations)\n\n  def getCommentWithTransforms(self, annotation_xforms):\n    \"\"\"Return transformed comment with annotation transformers.\n\n    Args:\n      annotation_xforms: a dict of transformers for annotations in leading comment.\n\n    Returns:\n      transformed Comment object.\n    \"\"\"\n    return Comment(annotations.XformAnnotation(self.raw, annotation_xforms),\n                   self.file_level_annotations)\n\n\nclass SourceCodeInfo(object):\n  \"\"\"Wrapper for SourceCodeInfo proto.\"\"\"\n\n  def __init__(self, name, source_code_info):\n    self.name = name\n    self.proto = source_code_info\n    # Map from path to SourceCodeInfo.Location\n    self._locations = {str(location.path): location for location in self.proto.location}\n    self._file_level_comments = None\n    self._file_level_annotations = None\n\n  @property\n  def file_level_comments(self):\n    \"\"\"Obtain inferred file level comment.\"\"\"\n    if self._file_level_comments:\n      return self._file_level_comments\n    comments = []\n    # We find the earliest detached comment by first finding the maximum start\n    # line for any location and then scanning for any earlier locations with\n    # detached comments.\n    earliest_detached_comment = max(location.span[0] for location in self.proto.location) + 1\n    for location in self.proto.location:\n      if location.leading_detached_comments and location.span[0] < earliest_detached_comment:\n        comments = location.leading_detached_comments\n        earliest_detached_comment = location.span[0]\n    self._file_level_comments = comments\n    return comments\n\n  @property\n  def file_level_annotations(self):\n    \"\"\"Obtain inferred file level annotations.\"\"\"\n    if self._file_level_annotations:\n      return self._file_level_annotations\n    self._file_level_annotations = dict(\n        sum([list(annotations.ExtractAnnotations(c).items()) for c in self.file_level_comments],\n            []))\n    return self._file_level_annotations\n\n  def LocationPathLookup(self, path):\n    \"\"\"Lookup SourceCodeInfo.Location by path in SourceCodeInfo.\n\n    Args:\n      path: a list of path indexes as per\n        https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.\n\n    Returns:\n      SourceCodeInfo.Location object if found, otherwise None.\n    \"\"\"\n    return self._locations.get(str(path), None)\n\n  # TODO(htuch): consider integrating comment lookup with overall\n  # FileDescriptorProto, perhaps via two passes.\n  def LeadingCommentPathLookup(self, path):\n    \"\"\"Lookup leading comment by path in SourceCodeInfo.\n\n    Args:\n      path: a list of path indexes as per\n        https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.\n\n    Returns:\n      Comment object.\n    \"\"\"\n    location = self.LocationPathLookup(path)\n    if location is not None:\n      return Comment(location.leading_comments, self.file_level_annotations)\n    return Comment('')\n\n  def LeadingDetachedCommentsPathLookup(self, path):\n    \"\"\"Lookup leading detached comments by path in SourceCodeInfo.\n\n    Args:\n      path: a list of path indexes as per\n        https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.\n\n    Returns:\n      List of detached comment strings.\n    \"\"\"\n    location = self.LocationPathLookup(path)\n    if location is not None and location.leading_detached_comments != self.file_level_comments:\n      return location.leading_detached_comments\n    return []\n\n  def TrailingCommentPathLookup(self, path):\n    \"\"\"Lookup trailing comment by path in SourceCodeInfo.\n\n    Args:\n      path: a list of path indexes as per\n        https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.\n\n    Returns:\n      Raw detached comment string\n    \"\"\"\n    location = self.LocationPathLookup(path)\n    if location is not None:\n      return location.trailing_comments\n    return ''\n\n\nclass TypeContext(object):\n  \"\"\"Contextual information for a message/field.\n\n  Provides information around namespaces and enclosing types for fields and\n  nested messages/enums.\n  \"\"\"\n\n  def __init__(self, source_code_info, name):\n    # SourceCodeInfo as per\n    # https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto.\n    self.source_code_info = source_code_info\n    # path: a list of path indexes as per\n    #  https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717.\n    #  Extended as nested objects are traversed.\n    self.path = []\n    # Message/enum/field name. Extended as nested objects are traversed.\n    self.name = name\n    # Map from type name to the correct type annotation string, e.g. from\n    # \".envoy.api.v2.Foo.Bar\" to \"map<string, string>\". This is lost during\n    # proto synthesis and is dynamically recovered in TraverseMessage.\n    self.map_typenames = {}\n    # Map from a message's oneof index to the fields sharing a oneof.\n    self.oneof_fields = {}\n    # Map from a message's oneof index to the name of oneof.\n    self.oneof_names = {}\n    # Map from a message's oneof index to the \"required\" bool property.\n    self.oneof_required = {}\n    self.type_name = 'file'\n    self.deprecated = False\n\n  def _Extend(self, path, type_name, name, deprecated=False):\n    if not self.name:\n      extended_name = name\n    else:\n      extended_name = '%s.%s' % (self.name, name)\n    extended = TypeContext(self.source_code_info, extended_name)\n    extended.path = self.path + path\n    extended.type_name = type_name\n    extended.map_typenames = self.map_typenames.copy()\n    extended.oneof_fields = self.oneof_fields.copy()\n    extended.oneof_names = self.oneof_names.copy()\n    extended.oneof_required = self.oneof_required.copy()\n    extended.deprecated = self.deprecated or deprecated\n    return extended\n\n  def ExtendMessage(self, index, name, deprecated):\n    \"\"\"Extend type context with a message.\n\n    Args:\n      index: message index in file.\n      name: message name.\n      deprecated: is the message depreacted?\n    \"\"\"\n    return self._Extend([4, index], 'message', name, deprecated)\n\n  def ExtendNestedMessage(self, index, name, deprecated):\n    \"\"\"Extend type context with a nested message.\n\n    Args:\n      index: nested message index in message.\n      name: message name.\n      deprecated: is the message depreacted?\n    \"\"\"\n    return self._Extend([3, index], 'message', name, deprecated)\n\n  def ExtendField(self, index, name):\n    \"\"\"Extend type context with a field.\n\n    Args:\n      index: field index in message.\n      name: field name.\n    \"\"\"\n    return self._Extend([2, index], 'field', name)\n\n  def ExtendEnum(self, index, name, deprecated):\n    \"\"\"Extend type context with an enum.\n\n    Args:\n      index: enum index in file.\n      name: enum name.\n      deprecated: is the message depreacted?\n    \"\"\"\n    return self._Extend([5, index], 'enum', name, deprecated)\n\n  def ExtendService(self, index, name):\n    \"\"\"Extend type context with a service.\n\n    Args:\n      index: service index in file.\n      name: service name.\n    \"\"\"\n    return self._Extend([6, index], 'service', name)\n\n  def ExtendNestedEnum(self, index, name, deprecated):\n    \"\"\"Extend type context with a nested enum.\n\n    Args:\n      index: enum index in message.\n      name: enum name.\n      deprecated: is the message depreacted?\n    \"\"\"\n    return self._Extend([4, index], 'enum', name, deprecated)\n\n  def ExtendEnumValue(self, index, name):\n    \"\"\"Extend type context with an enum enum.\n\n    Args:\n      index: enum value index in enum.\n      name: value name.\n    \"\"\"\n    return self._Extend([2, index], 'enum_value', name)\n\n  def ExtendOneof(self, index, name):\n    \"\"\"Extend type context with an oneof declaration.\n\n    Args:\n      index: oneof index in oneof_decl.\n      name: oneof name.\n    \"\"\"\n    return self._Extend([8, index], 'oneof', name)\n\n  def ExtendMethod(self, index, name):\n    \"\"\"Extend type context with a service method declaration.\n\n    Args:\n      index: method index in service.\n      name: method name.\n    \"\"\"\n    return self._Extend([2, index], 'method', name)\n\n  @property\n  def location(self):\n    \"\"\"SourceCodeInfo.Location for type context.\"\"\"\n    return self.source_code_info.LocationPathLookup(self.path)\n\n  @property\n  def leading_comment(self):\n    \"\"\"Leading comment for type context.\"\"\"\n    return self.source_code_info.LeadingCommentPathLookup(self.path)\n\n  @property\n  def leading_detached_comments(self):\n    \"\"\"Leading detached comments for type context.\"\"\"\n    return self.source_code_info.LeadingDetachedCommentsPathLookup(self.path)\n\n  @property\n  def trailing_comment(self):\n    \"\"\"Trailing comment for type context.\"\"\"\n    return self.source_code_info.TrailingCommentPathLookup(self.path)\n"
  },
  {
    "path": "tools/api_proto_plugin/utils.py",
    "content": "import os\n\n\ndef ProtoFileCanonicalFromLabel(label):\n  \"\"\"Compute path from API root to a proto file from a Bazel proto label.\n\n  Args:\n    label: Bazel source proto label string.\n\n  Returns:\n    A string with the path, e.g. for @envoy_api//envoy/type/matcher:metadata.proto\n    this would be envoy/type/matcher/matcher.proto.\n  \"\"\"\n  assert (label.startswith('@envoy_api_canonical//'))\n  return label[len('@envoy_api_canonical//'):].replace(':', '/')\n\n\ndef BazelBinPathForOutputArtifact(label, suffix, root=''):\n  \"\"\"Find the location in bazel-bin/ for an api_proto_plugin output file.\n\n  Args:\n    label: Bazel source proto label string.\n    suffix: output suffix for the artifact from label, e.g. \".types.pb_text\".\n    root: location of bazel-bin/, if not specified, PWD.\n\n  Returns:\n    Path in bazel-bin/external/envoy_api_canonical for label output with given suffix.\n  \"\"\"\n  proto_file_path = ProtoFileCanonicalFromLabel(label)\n  return os.path.join(root, 'bazel-bin/external/envoy_api_canonical',\n                      os.path.dirname(proto_file_path), 'pkg', proto_file_path + suffix)\n"
  },
  {
    "path": "tools/api_proto_plugin/visitor.py",
    "content": "\"\"\"FileDescriptorProto visitor interface for api_proto_plugin implementations.\"\"\"\n\n\nclass Visitor(object):\n  \"\"\"Abstract visitor interface for api_proto_plugin implementation.\"\"\"\n\n  def VisitService(self, service_proto, type_context):\n    \"\"\"Visit a service definition.\n\n    Args:\n      service_proto: ServiceDescriptorProto for service.\n      type_context: type_context.TypeContext for service type.\n\n    Returns:\n      Plugin specific output.\n    \"\"\"\n    pass\n\n  def VisitEnum(self, enum_proto, type_context):\n    \"\"\"Visit an enum definition.\n\n    Args:\n      enum_proto: EnumDescriptorProto for enum.\n      type_context: type_context.TypeContext for enum type.\n\n    Returns:\n      Plugin specific output.\n    \"\"\"\n    pass\n\n  def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):\n    \"\"\"Visit a message definition.\n\n    Args:\n      msg_proto: DescriptorProto for message.\n      type_context: type_context.TypeContext for message type.\n      nested_msgs: a list of results from visiting nested messages.\n      nested_enums: a list of results from visiting nested enums.\n\n    Returns:\n      Plugin specific output.\n    \"\"\"\n    pass\n\n  def VisitFile(self, file_proto, type_context, services, msgs, enums):\n    \"\"\"Visit a proto file definition.\n\n    Args:\n      file_proto: FileDescriptorProto for file.\n      type_context: type_context.TypeContext for file.\n      services: a list of results from visiting services.\n      msgs: a list of results from visiting messages.\n      enums: a list of results from visiting enums.\n\n    Returns:\n      Plugin specific output.\n    \"\"\"\n    pass\n"
  },
  {
    "path": "tools/bazel-test-docker.sh",
    "content": "#!/bin/bash\n\n# Run a single Bazel test target under a privileged docker. Usage:\n#\n# tools/bazel-test-docker //test/foo:bar --some_other --bazel_args\n# By default, this will run in a local docker container, mounting the local shared library paths\n# into the counter. To run remotely, use RUN_REMOTE=yes. If the test was compiled with a different\n# toolchain than the envoy-build container, passing in LOCAL_MOUNT=yes will force it to copy the\n# local libraries into the container.\n\nif [[ -z \"$1\" ]]; then\n  echo \"First argument to $0 must be a [@repo]//test/foo:bar label identifying a set of test to run\"\n  echo \"\\\"$1\\\" does not match this pattern\"\n  exit 1\nfi\n\nSCRIPT_DIR=\"$(realpath \"$(dirname \"$0\")\")\"\n[[ -z \"${BAZEL}\" ]] && BAZEL=bazel\n[[ -z \"${DOCKER}\" ]] && DOCKER=docker\n\nif [[ -z \"${RUN_REMOTE}\" ]]; then\n  LOCAL_MOUNT=\"${LOCAL_MOUNT:-yes}\"\n  RUN_REMOTE=no\nelse\n  LOCAL_MOUNT=\"${LOCAL_MOUNT:-no}\"\n  RUN_REMOTE=yes\nfi\n\n# Pass through the docker environment\nDOCKER_ENV=$(mktemp -t docker_env.XXXXXX)\nfunction cleanup() {\n  rm -f \"${DOCKER_ENV}\"\n}\n\ntrap cleanup EXIT\ncat > \"${DOCKER_ENV}\" <<EOF\n  #!/bin/bash\n  export DOCKER_CERT_PATH=\"${DOCKER_CERT_PATH}\"\n  export DOCKER_HOST=\"${DOCKER_HOST}\"\n  export DOCKER_MACHINE_NAME=\"${DOCKER_MACHINE_NAME}\"\n  export DOCKER_TLS_VERIFY=\"${DOCKER_TLS_VERIFY}\"\n  export NO_PROXY=\"${NO_PROXY}\"\nEOF\n\n. ./ci/envoy_build_sha.sh\nIMAGE=envoyproxy/envoy-build:${ENVOY_BUILD_SHA}\n\n# Note docker_wrapper.sh is tightly coupled to the order of arguments here due to where the test\n# name is passed in.\n\"${BAZEL}\" test \"$@\" --strategy=TestRunner=standalone --cache_test_results=no \\\n  --test_output=summary --run_under=\"${SCRIPT_DIR}/docker_wrapper.sh ${IMAGE} ${RUN_REMOTE} \\\n   ${LOCAL_MOUNT} ${DOCKER_ENV}\"\n"
  },
  {
    "path": "tools/bootstrap2pb.cc",
    "content": "/**\n * Utility to convert bootstrap from its YAML/JSON/proto representation to text\n * proto.\n *\n * Usage:\n *\n * bootstrap2pb <input YAML/JSON/proto path> <output text proto path>\n */\n#include <cstdlib>\n#include <fstream>\n\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/common/assert.h\"\n#include \"common/event/real_time_system.h\"\n#include \"common/protobuf/message_validator_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/stats/isolated_store_impl.h\"\n\n#include \"exe/platform_impl.h\"\n\n// NOLINT(namespace-envoy)\nint main(int argc, char** argv) {\n  if (argc != 3) {\n    std::cerr << \"Usage: \" << argv[0] << \" <input YAML/JSON/proto path> <output text text path>\"\n              << std::endl;\n    return EXIT_FAILURE;\n  }\n\n  Envoy::PlatformImpl platform_impl_;\n  Envoy::Stats::IsolatedStoreImpl stats_store;\n  Envoy::Event::RealTimeSystem time_system; // NO_CHECK_FORMAT(real_time)\n  Envoy::Api::Impl api(platform_impl_.threadFactory(), stats_store, time_system,\n                       platform_impl_.fileSystem());\n\n  envoy::config::bootstrap::v2::Bootstrap bootstrap;\n  Envoy::MessageUtil::loadFromFile(argv[1], bootstrap,\n                                   Envoy::ProtobufMessage::getStrictValidationVisitor(), api);\n  std::ofstream bootstrap_file(argv[2]);\n  bootstrap_file << bootstrap.DebugString();\n  return EXIT_SUCCESS;\n}\n"
  },
  {
    "path": "tools/build_profile.py",
    "content": "#!/usr/bin/env python\n\n# This tool take the foo.dep.log output from a build recipe run under recipe_wrapper.sh on stdin and\n# produces a profile of command execution time on the stdout.\n\nfrom __future__ import print_function\n\nimport re\nimport sys\n\n\ndef PrintProfile(f):\n  prev_cmd = None\n  prev_timestamp = None\n  for line in f:\n    sr = re.match('\\++ (\\d+\\.\\d+) (.*)', line)\n    if sr:\n      timestamp, cmd = sr.groups()\n      if prev_cmd:\n        print('%.2f %s' % (float(timestamp) - float(prev_timestamp), prev_cmd))\n      prev_timestamp, prev_cmd = timestamp, cmd\n\n\nif __name__ == '__main__':\n  PrintProfile(sys.stdin)\n"
  },
  {
    "path": "tools/check_repositories.sh",
    "content": "#!/bin/bash\n\nset -eu\n\n# Check whether any git repositories are defined.\n# Git repository definition contains `commit` and `remote` fields.\nif git grep -n \"commit =\\|remote =\" -- '*.bzl'; then\n  echo \"Using git repositories is not allowed.\"\n  echo \"To ensure that all dependencies can be stored offline in distdir, only HTTP repositories are allowed.\"\n  exit 1\nfi\n\n# Check whether number of defined `url =` or `urls =` and `sha256 =` kwargs in\n# repository definitions is equal.\nurls_count=$(git grep -E \"\\<url(s)? =\" -- '*.bzl' -- ':!bazel/crates.bzl' | wc -l)\nsha256sums_count=$(git grep -E \"\\<sha256 =\" -- '*.bzl' -- ':!bazel/crates.bzl' | wc -l)\n\nif [[ $urls_count != \"$sha256sums_count\" ]]; then\n  echo \"Found more defined repository URLs than SHA256 sums, which means that there are some repositories without sums.\"\n  echo \"Dependencies without SHA256 sums cannot be stored in distdir.\"\n  echo \"Please ensure that every repository has a SHA256 sum.\"\n  echo \"Repositories are defined in the following files:\"\n  echo \"\"\n  echo \"    bazel/repository_locations.bzl\"\n  echo \"    api/bazel/repositories.bzl\"\n  exit 1\nfi\n"
  },
  {
    "path": "tools/clang_tools/README.md",
    "content": "# Envoy Clang Libtooling developer tools\n\n## Overview\n\nA number of tools live in this directory that are intended for use by Envoy\ndevelopers (and potentially CI). These are host tools and should not be linked\ninto the Envoy target. They are based around Clang's\n[libtooling](https://clang.llvm.org/docs/LibTooling.html) libraries, a C++\nframework for writing Clang tools in the style of `clang-format` and\n`clang-check`.\n\n## Building and running\n\nTo build tools in this tree, a Clang binary install must be available. If you\nare building Envoy with `clang`, this should already be true of your system. You\ncan find prebuilt binary releases of Clang at https://releases.llvm.org. You\nwill need the Clang version used by Envoy in CI (currently clang-10.0).\n\nTo build a tool, set the following environment variable:\n\n```console\nexport LLVM_CONFIG=<path to clang installation>/bin/llvm-config\n```\n\nAssuming that `CC` and `CXX` already point at Clang, you should be able to build\nwith:\n\n```console\nbazel build @envoy_dev//clang_tools/syntax_only\n```\n\nTo run `libtooling` based tools against Envoy, you will need to first generate a\ncompilation database, which tells the tool how to take a source file and locate\nits various dependencies. The `tools/gen_compilation_database.py` script\ngenerates this and also does setup of the Bazel cache paths to allow external\ndependencies to be located:\n\n```console\ntools/gen_compilation_database.py --include_headers\n```\n\nFinally, the tool can be run against source files in the Envoy tree:\n\n```console\nbazel-bin/external/envoy_dev/clang_tools/syntax_only/syntax_only \\\n  source/common/common/logger.cc\n```\n\n## Adding a new Envoy libtooling based tool\n\nFollow the example at `tools/clang_tools/syntax_only`, based on the tutorial\nexample at https://clang.llvm.org/docs/LibTooling.html. Please use the\n`envoy_clang_tools_cc_binary` Bazel macro for the tool, this disables use of\nRTTI/exceptions and allows developer tools to be structurally excluded from the\nbuild as needed.\n"
  },
  {
    "path": "tools/clang_tools/api_booster/BUILD",
    "content": "load(\n    \"//clang_tools/support:clang_tools.bzl\",\n    \"clang_tools_cc_binary\",\n    \"clang_tools_cc_library\",\n    \"clang_tools_cc_test\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nclang_tools_cc_binary(\n    name = \"api_booster\",\n    srcs = [\"main.cc\"],\n    deps = [\n        \":proto_cxx_utils_lib\",\n        \"@clang_tools//:clang_astmatchers\",\n        \"@clang_tools//:clang_basic\",\n        \"@clang_tools//:clang_tooling\",\n        \"@envoy//tools/type_whisperer:api_type_db_lib\",\n    ],\n)\n\nclang_tools_cc_library(\n    name = \"proto_cxx_utils_lib\",\n    srcs = [\"proto_cxx_utils.cc\"],\n    hdrs = [\"proto_cxx_utils.h\"],\n    deps = [\n        \"@com_google_absl//absl/container:node_hash_map\",\n        \"@com_google_absl//absl/strings\",\n        \"@com_google_absl//absl/types:optional\",\n    ],\n)\n\nclang_tools_cc_test(\n    name = \"proto_cxx_utils_test\",\n    srcs = [\"proto_cxx_utils_test.cc\"],\n    deps = [\":proto_cxx_utils_lib\"],\n)\n"
  },
  {
    "path": "tools/clang_tools/api_booster/main.cc",
    "content": "// Upgrade a single Envoy C++ file to the latest API version.\n//\n// Currently this tool is a WIP and only does inference of .pb[.validate].h\n// #include locations. This already exercises some of the muscles we need, such\n// as AST matching, rudimentary type inference and API type database lookup.\n//\n// NOLINT(namespace-envoy)\n\n#include <fstream>\n#include <iostream>\n#include <regex>\n#include <set>\n\n// Declares clang::SyntaxOnlyAction.\n#include \"clang/ASTMatchers/ASTMatchers.h\"\n#include \"clang/ASTMatchers/ASTMatchFinder.h\"\n#include \"clang/Frontend/FrontendActions.h\"\n#include \"clang/Tooling/CommonOptionsParser.h\"\n#include \"clang/Tooling/Core/Replacement.h\"\n#include \"clang/Tooling/Refactoring.h\"\n#include \"clang/Tooling/ReplacementsYaml.h\"\n\n// Declares llvm::cl::extrahelp.\n#include \"llvm/Support/CommandLine.h\"\n\n#include \"proto_cxx_utils.h\"\n\n#include \"tools/type_whisperer/api_type_db.h\"\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/str_cat.h\"\n\n// Enable to see debug log messages.\n#ifdef ENABLE_DEBUG_LOG\n#define DEBUG_LOG(s)                                                                               \\\n  do {                                                                                             \\\n    std::cerr << (s) << std::endl;                                                                 \\\n  } while (0)\n#else\n#define DEBUG_LOG(s)\n#endif\n\nusing namespace Envoy::Tools::TypeWhisperer;\n\nnamespace ApiBooster {\n\nclass ApiBooster : public clang::ast_matchers::MatchFinder::MatchCallback,\n                   public clang::tooling::SourceFileCallbacks {\npublic:\n  ApiBooster(std::map<std::string, clang::tooling::Replacements>& replacements)\n      : replacements_(replacements) {}\n\n  // AST match callback dispatcher.\n  void run(const clang::ast_matchers::MatchFinder::MatchResult& match_result) override {\n    clang::SourceManager& source_manager = match_result.Context->getSourceManager();\n    DEBUG_LOG(\"AST match callback dispatcher\");\n    for (const auto it : match_result.Nodes.getMap()) {\n      const std::string match_text = getSourceText(it.second.getSourceRange(), source_manager);\n      const clang::SourceRange spelling_range =\n          getSpellingRange(it.second.getSourceRange(), source_manager);\n      const std::string spelling_text = getSourceText(spelling_range, source_manager);\n      DEBUG_LOG(absl::StrCat(\"  Result for \", it.first, \" [\", truncateForDebug(match_text), \"]\"));\n      if (match_text != spelling_text) {\n        DEBUG_LOG(absl::StrCat(\"    with spelling text [\", truncateForDebug(spelling_text), \"]\"));\n      }\n    }\n    if (const auto* type_loc = match_result.Nodes.getNodeAs<clang::TypeLoc>(\"type\")) {\n      onTypeLocMatch(*type_loc, source_manager);\n      return;\n    }\n    if (const auto* using_decl = match_result.Nodes.getNodeAs<clang::UsingDecl>(\"using_decl\")) {\n      onUsingDeclMatch(*using_decl, source_manager);\n      return;\n    }\n    if (const auto* decl_ref_expr =\n            match_result.Nodes.getNodeAs<clang::DeclRefExpr>(\"decl_ref_expr\")) {\n      onDeclRefExprMatch(*decl_ref_expr, *match_result.Context, source_manager);\n      return;\n    }\n    if (const auto* call_expr = match_result.Nodes.getNodeAs<clang::CallExpr>(\"call_expr\")) {\n      onCallExprMatch(*call_expr, *match_result.Context, source_manager);\n      return;\n    }\n    if (const auto* member_call_expr =\n            match_result.Nodes.getNodeAs<clang::CXXMemberCallExpr>(\"member_call_expr\")) {\n      onMemberCallExprMatch(*member_call_expr, source_manager);\n      return;\n    }\n    if (const auto* tmpl =\n            match_result.Nodes.getNodeAs<clang::ClassTemplateSpecializationDecl>(\"tmpl\")) {\n      onClassTemplateSpecializationDeclMatch(*tmpl, source_manager);\n      return;\n    }\n  }\n\n  // Visitor callback for start of a compilation unit.\n  bool handleBeginSource(clang::CompilerInstance& CI) override {\n    source_api_proto_paths_.clear();\n    return true;\n  }\n\n  // Visitor callback for end of a compilation unit.\n  void handleEndSource() override {\n    // Dump known API header paths to stdout for api_boost.py to rewrite with\n    // (no rewriting support in this tool yet).\n    for (const std::string& proto_path : source_api_proto_paths_) {\n      std::cout << proto_path << std::endl;\n    }\n  }\n\nprivate:\n  static bool isEnvoyNamespace(absl::string_view s) {\n    return absl::StartsWith(s, \"envoy::\") || absl::StartsWith(s, \"::envoy::\");\n  }\n\n  static std::string truncateForDebug(const std::string& text) {\n    const uint32_t MaxExpansionChars = 250;\n    return text.size() > MaxExpansionChars ? text.substr(0, MaxExpansionChars) + \"...\" : text;\n  }\n\n  // Match callback for TypeLoc. These are explicit mentions of the type in the\n  // source. If we have a match on type, we should track the corresponding .pb.h\n  // and attempt to upgrade.\n  void onTypeLocMatch(const clang::TypeLoc& type_loc, const clang::SourceManager& source_manager) {\n    absl::optional<clang::SourceRange> source_range;\n    const std::string type_name =\n        type_loc.getType().getCanonicalType().getUnqualifiedType().getAsString();\n    // Remove qualifiers, e.g. const.\n    const clang::UnqualTypeLoc unqual_type_loc = type_loc.getUnqualifiedLoc();\n    DEBUG_LOG(absl::StrCat(\"Type class \", type_loc.getType()->getTypeClassName()));\n    // Today we are only smart enough to rewrite ElaborateTypeLoc, which are\n    // full namespace prefixed types. We probably will need to support more, in\n    // particular if we want message-level type renaming. TODO(htuch): add more\n    // supported AST TypeLoc classes as needed.\n    if (unqual_type_loc.getTypeLocClass() == clang::TypeLoc::Elaborated &&\n        isEnvoyNamespace(getSourceText(\n            getSpellingRange(unqual_type_loc.getSourceRange(), source_manager), source_manager))) {\n      source_range = absl::make_optional<clang::SourceRange>(unqual_type_loc.getSourceRange());\n      tryBoostType(type_name, source_range, source_manager, type_loc.getType()->getTypeClassName(),\n                   false);\n    } else {\n      // If we're not going to rewrite, we still deliver SourceLocation to\n      // tryBoostType to assist with determination of API_NO_BOOST().\n      tryBoostType(type_name, unqual_type_loc.getBeginLoc(), -1, source_manager,\n                   type_loc.getType()->getTypeClassName(), false);\n    }\n  }\n\n  // Match callback for clang::UsingDecl. These are 'using' aliases for API type\n  // names.\n  void onUsingDeclMatch(const clang::UsingDecl& using_decl,\n                        const clang::SourceManager& source_manager) {\n    // Not all using declaration are types, but we try the rewrite in case there\n    // is such an API type database match.\n    const clang::SourceRange source_range = clang::SourceRange(\n        using_decl.getQualifierLoc().getBeginLoc(), using_decl.getNameInfo().getEndLoc());\n    const std::string type_name = getSourceText(source_range, source_manager);\n    tryBoostType(type_name, source_range, source_manager, \"UsingDecl\", true);\n  }\n\n  // Match callback for clang::DeclRefExpr. These occur when enums constants,\n  // e.g. foo::bar::kBaz, appear in the source.\n  void onDeclRefExprMatch(const clang::DeclRefExpr& decl_ref_expr, const clang::ASTContext& context,\n                          const clang::SourceManager& source_manager) {\n    // We don't need to consider non-namespace qualified DeclRefExprfor now (no\n    // renaming support yet).\n    if (!decl_ref_expr.hasQualifier()) {\n      return;\n    }\n    const std::string decl_name = decl_ref_expr.getNameInfo().getAsString();\n    // There are generated methods to stringify/parse/validate enum values,\n    // these need special treatment as they look like types with special\n    // suffices.\n    for (const std::string& enum_generated_method_suffix : {\"_Name\", \"_Parse\", \"_IsValid\"}) {\n      if (absl::EndsWith(decl_name, enum_generated_method_suffix)) {\n        // Remove trailing suffix from reference for replacement range and type\n        // name purposes.\n        const clang::SourceLocation begin_loc =\n            source_manager.getSpellingLoc(decl_ref_expr.getBeginLoc());\n        const std::string type_name_with_suffix =\n            getSourceText(decl_ref_expr.getSourceRange(), source_manager);\n        const std::string type_name = type_name_with_suffix.substr(\n            0, type_name_with_suffix.size() - enum_generated_method_suffix.size());\n        tryBoostType(type_name, begin_loc, type_name.size(), source_manager,\n                     \"DeclRefExpr suffixed \" + enum_generated_method_suffix, false);\n        return;\n      }\n    }\n    // Remove trailing : from namespace qualifier.\n    const clang::SourceRange source_range =\n        clang::SourceRange(decl_ref_expr.getQualifierLoc().getBeginLoc(),\n                           decl_ref_expr.getQualifierLoc().getEndLoc().getLocWithOffset(-1));\n    // Only try to boost type if it's explicitly an Envoy qualified type.\n    const std::string source_type_name = getSourceText(source_range, source_manager);\n    const clang::QualType ast_type =\n        decl_ref_expr.getDecl()->getType().getCanonicalType().getUnqualifiedType();\n    const std::string ast_type_name = ast_type.getAsString();\n    if (isEnvoyNamespace(source_type_name)) {\n      // Generally we pull the type from the named entity's declaration type,\n      // since this allows us to map from things like envoy::type::HTTP2 to the\n      // underlying fully qualified envoy::type::CodecClientType::HTTP2 prior to\n      // API type database lookup. However, for the generated static methods or\n      // field accessors, we don't want to deal with lookup via the function\n      // type, so we use the source text directly.\n      const std::string type_name = ast_type.isPODType(context) ? ast_type_name : source_type_name;\n      tryBoostType(type_name, source_range, source_manager, \"DeclRefExpr\", true);\n    }\n    const auto latest_type_info = getTypeInformationFromCType(ast_type_name, true);\n    // In some cases we need to upgrade the name the DeclRefExpr points at. If\n    // this isn't a known API type, our work here is done.\n    if (!latest_type_info) {\n      return;\n    }\n    const clang::SourceRange decl_source_range = decl_ref_expr.getNameInfo().getSourceRange();\n    // Deprecated enum constants need to be upgraded.\n    if (latest_type_info->enum_type_) {\n      const auto enum_value_rename =\n          ProtoCxxUtils::renameEnumValue(decl_name, latest_type_info->renames_);\n      if (enum_value_rename) {\n        const clang::SourceRange decl_source_range = decl_ref_expr.getNameInfo().getSourceRange();\n        const clang::tooling::Replacement enum_value_replacement(\n            source_manager, source_manager.getSpellingLoc(decl_source_range.getBegin()),\n            sourceRangeLength(decl_source_range, source_manager), *enum_value_rename);\n        insertReplacement(enum_value_replacement);\n      }\n      return;\n    }\n    // We need to map from envoy::type::matcher::StringMatcher::kRegex to\n    // envoy::type::matcher::v3::StringMatcher::kHiddenEnvoyDeprecatedRegex.\n    const auto constant_rename =\n        ProtoCxxUtils::renameConstant(decl_name, latest_type_info->renames_);\n    if (constant_rename) {\n      const clang::tooling::Replacement constant_replacement(\n          source_manager, decl_source_range.getBegin(),\n          sourceRangeLength(decl_source_range, source_manager), *constant_rename);\n      insertReplacement(constant_replacement);\n    }\n  }\n\n  // Match callback clang::CallExpr. We don't need to rewrite, but if it's something like\n  // loadFromYamlAndValidate, we might need to look at the argument type to\n  // figure out any corresponding .pb.validate.h we require.\n  void onCallExprMatch(const clang::CallExpr& call_expr, const clang::ASTContext& context,\n                       const clang::SourceManager& source_manager) {\n    auto* direct_callee = call_expr.getDirectCallee();\n    if (direct_callee != nullptr) {\n      const absl::node_hash_map<std::string, int> ValidateNameToArg = {\n          {\"loadFromYamlAndValidate\", 1},\n          {\"loadFromFileAndValidate\", 1},\n          {\"downcastAndValidate\", -1},\n          {\"validate\", 0},\n      };\n      const std::string& callee_name = direct_callee->getNameInfo().getName().getAsString();\n      DEBUG_LOG(absl::StrCat(\"callee_name \", callee_name));\n      const auto arg = ValidateNameToArg.find(callee_name);\n      // Sometimes we hit false positives because we aren't qualifying above.\n      // TODO(htuch): fix this.\n      if (arg != ValidateNameToArg.end() &&\n          arg->second < static_cast<int>(call_expr.getNumArgs())) {\n        const std::string type_name = arg->second >= 0 ? call_expr.getArg(arg->second)\n                                                             ->getType()\n                                                             .getCanonicalType()\n                                                             .getUnqualifiedType()\n                                                             .getAsString()\n                                                       : call_expr.getCallReturnType(context)\n                                                             .getNonReferenceType()\n                                                             .getCanonicalType()\n                                                             .getUnqualifiedType()\n                                                             .getAsString();\n        DEBUG_LOG(absl::StrCat(\"Validation header boosting \", type_name));\n        tryBoostType(type_name, {}, source_manager, \"validation invocation\", true, true);\n      }\n    }\n  }\n\n  // Match callback for clang::CxxMemberCallExpr. We rewrite things like\n  // ->mutable_foo() to ->mutable_foo_new_name() during renames.\n  void onMemberCallExprMatch(const clang::CXXMemberCallExpr& member_call_expr,\n                             const clang::SourceManager& source_manager) {\n    const std::string type_name =\n        member_call_expr.getObjectType().getCanonicalType().getUnqualifiedType().getAsString();\n    const auto latest_type_info = getTypeInformationFromCType(type_name, true);\n    // If this isn't a known API type, our work here is done.\n    if (!latest_type_info) {\n      return;\n    }\n    // Figure out if the referenced object was declared under API_NO_BOOST. This\n    // only works for simple cases, best effort.\n    const auto* object_expr = member_call_expr.getImplicitObjectArgument();\n    if (object_expr != nullptr) {\n      const auto* decl = object_expr->getReferencedDeclOfCallee();\n      if (decl != nullptr &&\n          getSourceText(decl->getSourceRange(), source_manager).find(\"API_NO_BOOST\") !=\n              std::string::npos) {\n        DEBUG_LOG(\"Skipping method replacement due to API_NO_BOOST\");\n        return;\n      }\n    }\n    tryRenameMethod(*latest_type_info, member_call_expr.getExprLoc(), source_manager);\n  }\n\n  bool tryRenameMethod(const TypeInformation& type_info, clang::SourceLocation method_loc,\n                       const clang::SourceManager& source_manager) {\n    const clang::SourceRange source_range = {source_manager.getSpellingLoc(method_loc),\n                                             source_manager.getSpellingLoc(method_loc)};\n    const std::string method_name = getSourceText(source_range, source_manager);\n    DEBUG_LOG(absl::StrCat(\"Checking for rename of \", method_name));\n    const auto method_rename = ProtoCxxUtils::renameMethod(method_name, type_info.renames_);\n    if (method_rename) {\n      const clang::tooling::Replacement method_replacement(\n          source_manager, source_range.getBegin(), sourceRangeLength(source_range, source_manager),\n          *method_rename);\n      insertReplacement(method_replacement);\n      return true;\n    }\n    return false;\n  }\n\n  // Match callback for clang::ClassTemplateSpecializationDecl. An additional\n  // place we need to look for .pb.validate.h reference is instantiation of\n  // FactoryBase.\n  void onClassTemplateSpecializationDeclMatch(const clang::ClassTemplateSpecializationDecl& tmpl,\n                                              const clang::SourceManager& source_manager) {\n    const std::string tmpl_type_name = tmpl.getSpecializedTemplate()\n                                           ->getInjectedClassNameSpecialization()\n                                           .getCanonicalType()\n                                           .getAsString();\n    if (absl::EndsWith(tmpl_type_name, \"FactoryBase<type-parameter-0-0>\")) {\n      const std::string type_name = tmpl.getTemplateArgs()\n                                        .get(0)\n                                        .getAsType()\n                                        .getCanonicalType()\n                                        .getUnqualifiedType()\n                                        .getAsString();\n      tryBoostType(type_name, {}, source_manager, \"FactoryBase template\", true, true);\n    }\n    if (tmpl_type_name == \"FactoryBase<type-parameter-0-0, type-parameter-0-1>\") {\n      const std::string type_name_0 = tmpl.getTemplateArgs()\n                                          .get(0)\n                                          .getAsType()\n                                          .getCanonicalType()\n                                          .getUnqualifiedType()\n                                          .getAsString();\n      tryBoostType(type_name_0, {}, source_manager, \"FactoryBase template\", true, true);\n      const std::string type_name_1 = tmpl.getTemplateArgs()\n                                          .get(1)\n                                          .getAsType()\n                                          .getCanonicalType()\n                                          .getUnqualifiedType()\n                                          .getAsString();\n      tryBoostType(type_name_1, {}, source_manager, \"FactoryBase template\", true, true);\n    }\n  }\n\n  // Attempt to boost a given type and rewrite the given source range.\n  void tryBoostType(const std::string& type_name, absl::optional<clang::SourceRange> source_range,\n                    const clang::SourceManager& source_manager, absl::string_view debug_description,\n                    bool requires_enum_truncation, bool validation_required = false) {\n    if (source_range) {\n      tryBoostType(type_name, source_range->getBegin(),\n                   sourceRangeLength(*source_range, source_manager), source_manager,\n                   debug_description, requires_enum_truncation, validation_required);\n    } else {\n      tryBoostType(type_name, {}, -1, source_manager, debug_description, requires_enum_truncation,\n                   validation_required);\n    }\n  }\n\n  bool underApiNoBoost(clang::SourceLocation loc, const clang::SourceManager& source_manager) {\n    if (loc.isMacroID()) {\n      const auto macro_name = clang::Lexer::getImmediateMacroName(loc, source_manager, lexer_lopt_);\n      if (macro_name.str() == \"API_NO_BOOST\") {\n        return true;\n      }\n    }\n    return false;\n  }\n\n  void tryBoostType(const std::string& type_name, clang::SourceLocation begin_loc, int length,\n                    const clang::SourceManager& source_manager, absl::string_view debug_description,\n                    bool requires_enum_truncation, bool validation_required = false) {\n    bool is_skip_macro = false;\n    if (underApiNoBoost(begin_loc, source_manager)) {\n      DEBUG_LOG(\"Skipping replacement due to API_NO_BOOST\");\n      is_skip_macro = true;\n    }\n    const auto type_info = getTypeInformationFromCType(type_name, !is_skip_macro);\n    // If this isn't a known API type, our work here is done.\n    if (!type_info) {\n      return;\n    }\n    DEBUG_LOG(absl::StrCat(\"Matched type '\", type_name, \"' (\", debug_description, \") length \",\n                           length, \" at \", begin_loc.printToString(source_manager)));\n    // Track corresponding imports.\n    source_api_proto_paths_.insert(adjustProtoSuffix(type_info->proto_path_, \".pb.h\"));\n    if (validation_required) {\n      source_api_proto_paths_.insert(adjustProtoSuffix(type_info->proto_path_, \".pb.validate.h\"));\n    }\n    // Not all AST matchers know how to do replacements (yet?).\n    if (length == -1 || is_skip_macro) {\n      return;\n    }\n    const clang::SourceLocation spelling_begin = source_manager.getSpellingLoc(begin_loc);\n    // We need to look at the text we're replacing to decide whether we should\n    // use the qualified C++'ified proto name.\n    const bool qualified =\n        getSourceText(spelling_begin, length, source_manager).find(\"::\") != std::string::npos;\n    std::string case_residual;\n    if (absl::EndsWith(type_name, \"Case\")) {\n      case_residual = type_name.substr(type_name.rfind(':') - 1);\n    }\n    // Add corresponding replacement.\n    const clang::tooling::Replacement type_replacement(\n        source_manager, source_manager.getSpellingLoc(begin_loc), length,\n        ProtoCxxUtils::protoToCxxType(type_info->type_name_, qualified,\n                                      type_info->enum_type_ && requires_enum_truncation) +\n            case_residual);\n    insertReplacement(type_replacement);\n  }\n\n  void insertReplacement(const clang::tooling::Replacement& replacement) {\n    llvm::Error error = replacements_[replacement.getFilePath()].add(replacement);\n    if (error) {\n      std::cerr << \"  Replacement insertion error: \" << llvm::toString(std::move(error))\n                << std::endl;\n    } else {\n      std::cerr << \"  Replacement added: \" << replacement.toString() << std::endl;\n    }\n  }\n\n  // Modeled after getRangeSize() in Clang's Replacements.cpp. Turns out it's\n  // non-trivial to get the actual length of a SourceRange, as the end location\n  // point to the start of the last token.\n  int sourceRangeLength(clang::SourceRange source_range,\n                        const clang::SourceManager& source_manager) {\n    const clang::SourceLocation spelling_begin =\n        source_manager.getSpellingLoc(source_range.getBegin());\n    const clang::SourceLocation spelling_end = source_manager.getSpellingLoc(source_range.getEnd());\n    std::pair<clang::FileID, unsigned> start = source_manager.getDecomposedLoc(spelling_begin);\n    std::pair<clang::FileID, unsigned> end = source_manager.getDecomposedLoc(spelling_end);\n    if (start.first != end.first) {\n      return -1;\n    }\n    end.second += clang::Lexer::MeasureTokenLength(spelling_end, source_manager, lexer_lopt_);\n    return end.second - start.second;\n  }\n\n  std::string getSourceText(clang::SourceLocation begin_loc, int size,\n                            const clang::SourceManager& source_manager) {\n    return clang::Lexer::getSourceText(\n        {clang::SourceRange(begin_loc, begin_loc.getLocWithOffset(size)), false}, source_manager,\n        lexer_lopt_, 0);\n  }\n\n  std::string getSourceText(clang::SourceRange source_range,\n                            const clang::SourceManager& source_manager) {\n    return clang::Lexer::getSourceText(clang::CharSourceRange::getTokenRange(source_range),\n                                       source_manager, lexer_lopt_, 0);\n  }\n\n  void addNamedspaceQualifiedTypeReplacement() {}\n\n  // Remove .proto from a path, apply specified suffix instead.\n  std::string adjustProtoSuffix(absl::string_view proto_path, absl::string_view suffix) {\n    return absl::StrCat(proto_path.substr(0, proto_path.size() - 6), suffix);\n  }\n\n  // Obtain the latest type information for a given from C++ type, e.g. envoy:config::v2::Cluster,\n  // from the API type database.\n  absl::optional<TypeInformation> getTypeInformationFromCType(const std::string& c_type_name,\n                                                              bool latest) {\n    // Ignore compound or non-API types.\n    // TODO(htuch): this is all super hacky and not really right, we should be\n    // removing qualifiers etc. to get to the underlying type name.\n    const std::string type_name = std::regex_replace(c_type_name, std::regex(\"^(class|enum) \"), \"\");\n    if (!isEnvoyNamespace(type_name) || absl::StrContains(type_name, \" \")) {\n      return {};\n    }\n    const std::string proto_type_name = ProtoCxxUtils::cxxToProtoType(type_name);\n\n    // Use API type database to map from proto type to path.\n    auto result = latest ? ApiTypeDb::getLatestTypeInformation(proto_type_name)\n                         : ApiTypeDb::getExistingTypeInformation(proto_type_name);\n    if (result) {\n      // Remove the .proto extension.\n      return result;\n    } else if (!absl::StartsWith(proto_type_name, \"envoy.HotRestart\") &&\n               !absl::StartsWith(proto_type_name, \"envoy.RouterCheckToolSchema\") &&\n               !absl::StartsWith(proto_type_name, \"envoy.annotations\") &&\n               !absl::StartsWith(proto_type_name, \"envoy.test\") &&\n               !absl::StartsWith(proto_type_name, \"envoy.tracers.xray.daemon\")) {\n      // Die hard if we don't have a useful proto type for something that looks\n      // like an API type(modulo a short allowlist).\n      std::cerr << \"Unknown API type: \" << proto_type_name << std::endl;\n      // TODO(htuch): maybe there is a nicer way to terminate AST traversal?\n      ::exit(1);\n    }\n\n    return {};\n  }\n\n  static clang::SourceRange getSpellingRange(clang::SourceRange source_range,\n                                             const clang::SourceManager& source_manager) {\n    return {source_manager.getSpellingLoc(source_range.getBegin()),\n            source_manager.getSpellingLoc(source_range.getEnd())};\n  }\n\n  // Set of inferred .pb[.validate].h, updated as the AST matcher callbacks above fire.\n  std::set<std::string> source_api_proto_paths_;\n  // Map from source file to replacements.\n  std::map<std::string, clang::tooling::Replacements>& replacements_;\n  // Language options for interacting with Lexer. Currently empty.\n  clang::LangOptions lexer_lopt_;\n}; // namespace ApiBooster\n\n} // namespace ApiBooster\n\nint main(int argc, const char** argv) {\n  // Apply a custom category to all command-line options so that they are the\n  // only ones displayed.\n  llvm::cl::OptionCategory api_booster_tool_category(\"api-booster options\");\n\n  clang::tooling::CommonOptionsParser options_parser(argc, argv, api_booster_tool_category);\n  clang::tooling::RefactoringTool tool(options_parser.getCompilations(),\n                                       options_parser.getSourcePathList());\n\n  ApiBooster::ApiBooster api_booster(tool.getReplacements());\n  clang::ast_matchers::MatchFinder finder;\n\n  // Match on all mentions of types in the AST.\n  auto type_matcher =\n      clang::ast_matchers::typeLoc(clang::ast_matchers::isExpansionInMainFile()).bind(\"type\");\n  finder.addMatcher(type_matcher, &api_booster);\n\n  // Match on all \"using\" declarations.\n  auto using_decl_matcher =\n      clang::ast_matchers::usingDecl(clang::ast_matchers::isExpansionInMainFile())\n          .bind(\"using_decl\");\n  finder.addMatcher(using_decl_matcher, &api_booster);\n\n  // Match on references to enum constants.\n  auto decl_ref_expr_matcher =\n      clang::ast_matchers::declRefExpr(clang::ast_matchers::isExpansionInMainFile())\n          .bind(\"decl_ref_expr\");\n  finder.addMatcher(decl_ref_expr_matcher, &api_booster);\n\n  // Match on all call expressions. We are interested in particular in calls\n  // where validation on protos is performed.\n  auto call_matcher =\n      clang::ast_matchers::callExpr(clang::ast_matchers::isExpansionInMainFile()).bind(\"call_expr\");\n  finder.addMatcher(call_matcher, &api_booster);\n\n  // Match on all .foo() or ->foo() expressions. We are interested in these for renames\n  // and deprecations.\n  auto member_call_expr =\n      clang::ast_matchers::cxxMemberCallExpr(clang::ast_matchers::isExpansionInMainFile())\n          .bind(\"member_call_expr\");\n  finder.addMatcher(member_call_expr, &api_booster);\n\n  // Match on all template instantiations. We are interested in particular in\n  // instantiations of factories where validation on protos is performed.\n  auto tmpl_matcher = clang::ast_matchers::classTemplateSpecializationDecl(\n                          clang::ast_matchers::matchesName(\".*FactoryBase.*\"))\n                          .bind(\"tmpl\");\n  finder.addMatcher(tmpl_matcher, &api_booster);\n\n  // Apply ApiBooster to AST matches. This will generate a set of replacements in\n  // tool.getReplacements().\n  const int run_result = tool.run(newFrontendActionFactory(&finder, &api_booster).get());\n  if (run_result != 0) {\n    std::cerr << \"Exiting with non-zero result \" << run_result << std::endl;\n    return run_result;\n  }\n\n  // Serialize replacements to <main source file path>.clang-replacements.yaml.\n  // These are suitable for consuming by clang-apply-replacements.\n  for (const auto& file_replacement : tool.getReplacements()) {\n    // Populate TranslationUnitReplacements from file replacements (this is what\n    // there exists llvm::yaml serialization support for).\n    clang::tooling::TranslationUnitReplacements tu_replacements;\n    tu_replacements.MainSourceFile = file_replacement.first;\n    for (const auto& r : file_replacement.second) {\n      tu_replacements.Replacements.push_back(r);\n      DEBUG_LOG(r.toString());\n    }\n    // Serialize TranslationUnitReplacements to YAML.\n    std::string yaml_content;\n    llvm::raw_string_ostream yaml_content_stream(yaml_content);\n    llvm::yaml::Output yaml(yaml_content_stream);\n    yaml << tu_replacements;\n    // Write to <main source file path>.clang-replacements.yaml.\n    std::ofstream serialized_replacement_file(tu_replacements.MainSourceFile +\n                                              \".clang-replacements.yaml\");\n    serialized_replacement_file << yaml_content_stream.str();\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tools/clang_tools/api_booster/proto_cxx_utils.cc",
    "content": "#include \"proto_cxx_utils.h\"\n\nnamespace ApiBooster {\n\nstd::string ProtoCxxUtils::cxxToProtoType(const std::string& cxx_type_name) {\n  // Convert from C++ to a qualified proto type. This is fairly hacky stuff,\n  // we're essentially reversing the conventions that the protobuf C++\n  // compiler is using, e.g. replacing _ and :: with . as needed, guessing\n  // that a Case suffix implies some enum switching.\n  const std::string rel_cxx_type_name =\n      absl::StartsWith(cxx_type_name, \"::\") ? cxx_type_name.substr(2) : cxx_type_name;\n  std::vector<std::string> frags = absl::StrSplit(rel_cxx_type_name, \"::\");\n  // TODO(htuch): if we add some more stricter checks on mangled name usage in\n  // check_format.py, we should be able to eliminate this.\n  for (std::string& frag : frags) {\n    if (!frag.empty() && isupper(frag[0])) {\n      frag = std::regex_replace(frag, std::regex(\"_\"), \".\");\n    }\n  }\n  if (absl::EndsWith(frags.back(), \"Case\")) {\n    frags.pop_back();\n  }\n  return absl::StrJoin(frags, \".\");\n}\n\nstd::string ProtoCxxUtils::protoToCxxType(const std::string& proto_type_name, bool qualified,\n                                          bool enum_type) {\n  std::vector<std::string> frags = absl::StrSplit(proto_type_name, '.');\n  // We drop the enum type name, it's not needed and confuses the mangling\n  // when enums are nested in messages.\n  if (enum_type) {\n    frags.pop_back();\n  }\n  if (qualified) {\n    return absl::StrJoin(frags, \"::\");\n  } else {\n    return frags.back();\n  }\n}\n\nabsl::optional<std::string>\nProtoCxxUtils::renameMethod(absl::string_view method_name,\n                            const absl::node_hash_map<std::string, std::string> renames) {\n  // Simple O(N * M) match, where M is constant (the set of prefixes/suffixes) so\n  // should be fine.\n  for (const auto& field_rename : renames) {\n    const std::vector<std::string> GeneratedMethodPrefixes = {\n        \"clear_\", \"set_\", \"has_\", \"mutable_\", \"set_allocated_\", \"release_\", \"add_\", \"\",\n    };\n    // Most of the generated methods are some prefix.\n    for (const std::string& prefix : GeneratedMethodPrefixes) {\n      if (method_name == prefix + field_rename.first) {\n        return prefix + field_rename.second;\n      }\n    }\n    // _size is the only suffix.\n    if (method_name == field_rename.first + \"_size\") {\n      return field_rename.second + \"_size\";\n    }\n  }\n  return {};\n}\n\nabsl::optional<std::string>\nProtoCxxUtils::renameConstant(absl::string_view constant_name,\n                              const absl::node_hash_map<std::string, std::string> renames) {\n  if (constant_name.size() < 2 || constant_name[0] != 'k' || !isupper(constant_name[1])) {\n    return {};\n  }\n  std::vector<std::string> frags;\n  for (const char c : constant_name.substr(1)) {\n    if (isupper(c)) {\n      frags.emplace_back(1, tolower(c));\n    } else {\n      frags.back().push_back(c);\n    }\n  }\n  const std::string field_name = absl::StrJoin(frags, \"_\");\n  const auto it = renames.find(field_name);\n  if (it == renames.cend()) {\n    return {};\n  }\n  std::vector<std::string> new_frags = absl::StrSplit(it->second, '_');\n  for (auto& frag_it : new_frags) {\n    if (!frag_it.empty()) {\n      frag_it[0] = toupper(frag_it[0]);\n    }\n  }\n  return \"k\" + absl::StrJoin(new_frags, \"\");\n}\n\nabsl::optional<std::string>\nProtoCxxUtils::renameEnumValue(absl::string_view enum_value_name,\n                               const absl::node_hash_map<std::string, std::string> renames) {\n  const auto it = renames.find(std::string(enum_value_name));\n  if (it == renames.cend()) {\n    return {};\n  }\n  return it->second;\n}\n\n} // namespace ApiBooster\n"
  },
  {
    "path": "tools/clang_tools/api_booster/proto_cxx_utils.h",
    "content": "#pragma once\n\n#include <regex>\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/types/optional.h\"\n\nnamespace ApiBooster {\n\n// Protobuf C++ code generation hackery. This is where the utilities that map\n// between C++ and protobuf types, enum constants and identifiers live. Most of\n// this is heuristic and needs to match whatever the protobuf compiler does.\n// TODO(htuch): investigate what can be done to make use of embedded proto\n// descriptors in generated stubs to make these utils more robust.\nclass ProtoCxxUtils {\npublic:\n  // Convert from a C++ type, e.g. foo::bar::v2, to a protobuf type, e.g.\n  // foo.bar.v2.\n  static std::string cxxToProtoType(const std::string& cxx_type_name);\n\n  // Given a method, e.g. mutable_foo, rele, and a map of renames in a give proto,\n  // determine if the method is covered by a generated C++ stub for a renamed\n  // field in proto, and if so, return the new method name.\n  static absl::optional<std::string>\n  renameMethod(absl::string_view method_name,\n               const absl::node_hash_map<std::string, std::string> renames);\n\n  // Given a constant, e.g. kFooBar, determine if it needs upgrading. We need\n  // this for synthesized oneof cases.\n  static absl::optional<std::string>\n  renameConstant(absl::string_view constant_name,\n                 const absl::node_hash_map<std::string, std::string> renames);\n\n  // Given an enum value, e.g. FOO_BAR determine if it needs upgrading.\n  static absl::optional<std::string>\n  renameEnumValue(absl::string_view enum_value_name,\n                  const absl::node_hash_map<std::string, std::string> renames);\n\n  // Convert from a protobuf type, e.g. foo.bar.v2, to a C++ type, e.g.\n  // foo::bar::v2.\n  static std::string protoToCxxType(const std::string& proto_type_name, bool qualified,\n                                    bool enum_type);\n};\n\n} // namespace ApiBooster\n"
  },
  {
    "path": "tools/clang_tools/api_booster/proto_cxx_utils_test.cc",
    "content": "#include \"gtest/gtest.h\"\n#include \"proto_cxx_utils.h\"\n\nnamespace ApiBooster {\nnamespace {\n\n// Validate C++ to proto type name conversion.\nTEST(ProtoCxxUtils, CxxToProtoType) {\n  EXPECT_EQ(\"\", ProtoCxxUtils::cxxToProtoType(\"\"));\n  EXPECT_EQ(\"foo\", ProtoCxxUtils::cxxToProtoType(\"foo\"));\n  EXPECT_EQ(\"foo.bar\", ProtoCxxUtils::cxxToProtoType(\"foo::bar\"));\n  EXPECT_EQ(\"foo.bar\", ProtoCxxUtils::cxxToProtoType(\"foo::bar::FooCase\"));\n  EXPECT_EQ(\"foo.bar.Baz.Blah\", ProtoCxxUtils::cxxToProtoType(\"foo::bar::Baz_Blah\"));\n}\n\n// Validate proto to C++ type name conversion.\nTEST(ProtoCxxUtils, ProtoToCxxType) {\n  EXPECT_EQ(\"\", ProtoCxxUtils::protoToCxxType(\"\", false, false));\n  EXPECT_EQ(\"\", ProtoCxxUtils::protoToCxxType(\"\", true, false));\n  EXPECT_EQ(\"foo\", ProtoCxxUtils::protoToCxxType(\"foo\", false, false));\n  EXPECT_EQ(\"foo\", ProtoCxxUtils::protoToCxxType(\"foo\", true, false));\n  EXPECT_EQ(\"bar\", ProtoCxxUtils::protoToCxxType(\"foo.bar\", false, false));\n  EXPECT_EQ(\"foo::bar\", ProtoCxxUtils::protoToCxxType(\"foo.bar\", true, false));\n  EXPECT_EQ(\"foo::Bar\", ProtoCxxUtils::protoToCxxType(\"foo.Bar\", true, false));\n  EXPECT_EQ(\"foo\", ProtoCxxUtils::protoToCxxType(\"foo.Bar\", true, true));\n  EXPECT_EQ(\"foo::Bar::Baz\", ProtoCxxUtils::protoToCxxType(\"foo.Bar.Baz\", true, false));\n  EXPECT_EQ(\"foo::Bar::Baz::Blah\", ProtoCxxUtils::protoToCxxType(\"foo.Bar.Baz.Blah\", true, false));\n  EXPECT_EQ(\"foo::Bar::Baz\", ProtoCxxUtils::protoToCxxType(\"foo.Bar.Baz.Blah\", true, true));\n}\n\n// Validate proto field accessor upgrades.\nTEST(ProtoCxxUtils, RenameMethod) {\n  const absl::node_hash_map<std::string, std::string> renames = {\n      {\"foo\", \"bar\"},\n      {\"bar\", \"baz\"},\n  };\n  EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameMethod(\"whatevs\", renames));\n  EXPECT_EQ(\"bar\", ProtoCxxUtils::renameMethod(\"foo\", renames));\n  EXPECT_EQ(\"baz\", ProtoCxxUtils::renameMethod(\"bar\", renames));\n\n  EXPECT_EQ(\"clear_bar\", ProtoCxxUtils::renameMethod(\"clear_foo\", renames));\n  EXPECT_EQ(\"set_bar\", ProtoCxxUtils::renameMethod(\"set_foo\", renames));\n  EXPECT_EQ(\"has_bar\", ProtoCxxUtils::renameMethod(\"has_foo\", renames));\n  EXPECT_EQ(\"mutable_bar\", ProtoCxxUtils::renameMethod(\"mutable_foo\", renames));\n  EXPECT_EQ(\"set_allocated_bar\", ProtoCxxUtils::renameMethod(\"set_allocated_foo\", renames));\n  EXPECT_EQ(\"release_bar\", ProtoCxxUtils::renameMethod(\"release_foo\", renames));\n  EXPECT_EQ(\"add_bar\", ProtoCxxUtils::renameMethod(\"add_foo\", renames));\n  EXPECT_EQ(\"bar_size\", ProtoCxxUtils::renameMethod(\"foo_size\", renames));\n}\n\n// Validate proto constant upgrades.\nTEST(ProtoCxxUtils, RenameConstant) {\n  const absl::node_hash_map<std::string, std::string> renames = {\n      {\"foo_bar\", \"bar_foo\"},\n      {\"foo_baz\", \"baz\"},\n  };\n  EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameConstant(\"whatevs\", renames));\n  EXPECT_EQ(\"kBarFoo\", ProtoCxxUtils::renameConstant(\"kFooBar\", renames));\n  EXPECT_EQ(\"kBaz\", ProtoCxxUtils::renameConstant(\"kFooBaz\", renames));\n}\n\n// Validate proto enum value upgrades.\nTEST(ProtoCxxUtils, RenameEnumValue) {\n  const absl::node_hash_map<std::string, std::string> renames = {\n      {\"FOO_BAR\", \"BAR_FOO\"},\n  };\n  EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameEnumValue(\"FOO_BAZ\", renames));\n  EXPECT_EQ(\"BAR_FOO\", ProtoCxxUtils::renameEnumValue(\"FOO_BAR\", renames));\n}\n\n} // namespace\n} // namespace ApiBooster\n"
  },
  {
    "path": "tools/clang_tools/support/BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n"
  },
  {
    "path": "tools/clang_tools/support/BUILD.prebuilt",
    "content": "# Clang 10.0 library pre-built Bazel.\n#\n# This file was mostly manually assembled (with some hacky Python scripts) from\n# clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz and corresponding\n# https://github.com/llvm/llvm-project.git source. It needs Clang 10.0 to work.\n#\n# The BUILD file has sufficient dependency relationships\n# between the prebuilt libraries in a clang-llvm distribution to support building libtooling\n# based binaries in the Envoy repository. We're chasing a moving target and as new libraries are\n# depended upon in new Clang versions, it will be necessary to augment these definitions.\n#\n# The key to understanding llvm-project layout is that there are a collection of libraries in\n# {clang,llvm}/lib. For the clang libraries, the CMakeLists.txt supplies the Clang library deps in\n# LINK_LIBS inside add_clang_library() and the llvm deps in LLVM_LINK_COMPONENTS. For the llvm\n# libraries, LLVMBuild.txt provides llvm deps (it does not reference any of the clang libs).\n#\n# It's kind of terrible that we need to do this by hand, but llvm-project is CMake canonical, and we\n# don't want to use rules_foreign_cc to build the libraries from source just to access some\n# developer libs which will exist on the filesystem of most devs who are using Clang.\n\npackage(default_visibility = [\"//visibility:public\"])\n\n# We should use cc_import below, but it doesn't like .def files in Clang. See\n# https://github.com/bazelbuild/bazel/issues/6767.\n#\n\ncc_library(\n    name = \"clang_analysis\",\n    srcs = [\"lib/libclangAnalysis.a\"],\n    hdrs = glob([\"clang/Analysis/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_astmatchers\",\n        \":clang_basic\",\n        \":clang_lex\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_basic\",\n    srcs = [\"lib/libclangBasic.a\"],\n    hdrs = glob([\n        \"clang/Basic/**\",\n        \"clang-c/**\",\n    ]),\n    deps = [\n        \":llvm_core\",\n        \":llvm_mc\",\n        \":llvm_support\",\n        \":llvm_target\",\n    ],\n)\n\ncc_library(\n    name = \"clang_ast\",\n    srcs = [\"lib/libclangAST.a\"],\n    hdrs = glob([\"clang/AST/**\"]),\n    deps = [\n        \":clang_basic\",\n        \":clang_lex\",\n        \":llvm_binaryformat\",\n        \":llvm_core\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_astmatchers\",\n    srcs = [\"lib/libclangASTMatchers.a\"],\n    hdrs = glob([\"clang/ASTMatchers/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_basic\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_driver\",\n    srcs = [\"lib/libclangDriver.a\"],\n    hdrs = glob([\"clang/Driver/**\"]),\n    deps = [\n        \":clang_basic\",\n        \":llvm_binaryformat\",\n        \":llvm_option\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_edit\",\n    srcs = [\"lib/libclangEdit.a\"],\n    hdrs = glob([\"clang/Edit/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_basic\",\n        \":clang_lex\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_format\",\n    srcs = [\"lib/libclangFormat.a\"],\n    hdrs = glob([\"clang/Format/**\"]),\n    deps = [\n        \":clang_basic\",\n        \":clang_lex\",\n        \":clang_toolingcore\",\n        \":clang_toolinginclusions\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_frontend\",\n    srcs = [\"lib/libclangFrontend.a\"],\n    hdrs = glob([\"clang/Frontend/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_basic\",\n        \":clang_driver\",\n        \":clang_edit\",\n        \":clang_lex\",\n        \":clang_parse\",\n        \":clang_sema\",\n        \":clang_serialization\",\n        \":llvm_bitreader\",\n        \":llvm_option\",\n        \":llvm_profiledata\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_lex\",\n    srcs = [\"lib/libclangLex.a\"],\n    hdrs = glob([\"clang/Lex/**\"]),\n    deps = [\n        \":clang_basic\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_parse\",\n    srcs = [\"lib/libclangParse.a\"],\n    hdrs = glob([\"clang/Parse/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_basic\",\n        \":clang_lex\",\n        \":clang_sema\",\n        \":llvm_frontend_omp\",\n        \":llvm_mc\",\n        \":llvm_mcparser\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_rewrite\",\n    srcs = [\"lib/libclangRewrite.a\"],\n    hdrs = glob([\"clang/Rewrite/**\"]),\n    deps = [\n        \":clang_basic\",\n        \":clang_lex\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_sema\",\n    srcs = [\"lib/libclangSema.a\"],\n    hdrs = glob([\"clang/Sema/**\"]),\n    deps = [\n        \":clang_analysis\",\n        \":clang_ast\",\n        \":clang_basic\",\n        \":clang_edit\",\n        \":clang_lex\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_serialization\",\n    srcs = [\"lib/libclangSerialization.a\"],\n    hdrs = glob([\"clang/Serialization/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_basic\",\n        \":clang_lex\",\n        \":clang_sema\",\n        \":llvm_bitreader\",\n        \":llvm_bitstreamreader\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_tooling\",\n    srcs = [\"lib/libclangTooling.a\"],\n    hdrs = glob([\"clang/Tooling/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_astmatchers\",\n        \":clang_basic\",\n        \":clang_driver\",\n        \":clang_format\",\n        \":clang_frontend\",\n        \":clang_lex\",\n        \":clang_rewrite\",\n        \":clang_serialization\",\n        \":clang_toolingcore\",\n        \":llvm_option\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_toolingcore\",\n    srcs = [\"lib/libclangToolingCore.a\"],\n    hdrs = glob([\"clang/Tooling/Core/**\"]),\n    deps = [\n        \":clang_ast\",\n        \":clang_basic\",\n        \":clang_lex\",\n        \":clang_rewrite\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"clang_toolinginclusions\",\n    srcs = [\"lib/libclangToolingInclusions.a\"],\n    hdrs = glob([\"clang/Tooling/Inclusions/**\"]),\n    deps = [\n        \":clang_basic\",\n        \":clang_lex\",\n        \":clang_rewrite\",\n        \":clang_toolingcore\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_analysis\",\n    srcs = [\"lib/libLLVMAnalysis.a\"],\n    hdrs = glob([\"llvm/Analysis/**\"]),\n    deps = [\n        \":llvm_binaryformat\",\n        \":llvm_core\",\n        \":llvm_object\",\n        \":llvm_profiledata\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_binaryformat\",\n    srcs = [\"lib/libLLVMBinaryFormat.a\"],\n    hdrs = glob([\"llvm/BinaryFormat/**\"]),\n    deps = [\":llvm_support\"],\n)\n\ncc_library(\n    name = \"llvm_bitreader\",\n    srcs = [\"lib/libLLVMBitReader.a\"],\n    hdrs = glob([\"llvm/Bitcode/**\"]),\n    deps = [\n        \":llvm_bitstreamreader\",\n        \":llvm_core\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_bitstreamreader\",\n    srcs = [\"lib/libLLVMBitstreamReader.a\"],\n    hdrs = glob([\"llvm/Bitstream/**\"]),\n    deps = [\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_core\",\n    srcs = [\"lib/libLLVMCore.a\"],\n    hdrs = glob([\n        \"llvm/ADT/**\",\n        \"llvm/IR/**\",\n        \"llvm/*\",\n        \"llvm-c/**\",\n    ]),\n    deps = [\n        \":llvm_binaryformat\",\n        \":llvm_remarks\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_demangle\",\n    srcs = [\"lib/libLLVMDemangle.a\"],\n    hdrs = glob([\"llvm/Demangle/**\"]),\n)\n\ncc_library(\n    name = \"llvm_frontend_omp\",\n    srcs = [\"lib/libLLVMFrontendOpenMP.a\"],\n    hdrs = glob([\"llvm/Frontend/OpenMP/**\"]),\n)\n\ncc_library(\n    name = \"llvm_mc\",\n    srcs = [\"lib/libLLVMMC.a\"],\n    hdrs = glob([\"llvm/MC/**\"]),\n    deps = [\n        \":llvm_binaryformat\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_mcparser\",\n    srcs = [\"lib/libLLVMMCParser.a\"],\n    hdrs = glob([\"llvm/MC/MCParser/**\"]),\n    deps = [\n        \":llvm_mc\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_object\",\n    srcs = [\"lib/libLLVMObject.a\"],\n    hdrs = glob([\"llvm/Object/**\"]),\n    deps = [\n        \":llvm_binaryformat\",\n        \":llvm_bitreader\",\n        \":llvm_core\",\n        \":llvm_mc\",\n        \":llvm_mcparser\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_option\",\n    srcs = [\"lib/libLLVMOption.a\"],\n    hdrs = glob([\"llvm/Option/**\"]),\n    deps = [\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_profiledata\",\n    srcs = [\"lib/libLLVMProfileData.a\"],\n    hdrs = glob([\"llvm/ProfileData/**\"]),\n    deps = [\n        \":llvm_core\",\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_remarks\",\n    srcs = [\"lib/libLLVMRemarks.a\"],\n    hdrs = glob([\"llvm/Remarks/**\"]),\n    deps = [\n        \":llvm_support\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_support\",\n    srcs = [\"lib/libLLVMSupport.a\"],\n    hdrs = glob([\n        \"llvm/Config/**\",\n        \"llvm/Support/**\",\n    ]),\n    linkopts = [\n        \"-lcurses\",\n        \"-lpthread\",\n    ],\n    deps = [\n        \":llvm_demangle\",\n    ],\n)\n\ncc_library(\n    name = \"llvm_target\",\n    srcs = [\"lib/libLLVMTarget.a\"],\n    hdrs = glob([\"llvm/Target/**\"]),\n    deps = [\n        \":llvm_analysis\",\n        \":llvm_core\",\n        \":llvm_mc\",\n        \":llvm_support\",\n    ],\n)\n"
  },
  {
    "path": "tools/clang_tools/support/clang_tools.bzl",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_binary\", \"cc_library\", \"cc_test\")\n\n_clang_tools_copts = [\n    \"-fno-exceptions\",\n    \"-fno-rtti\",\n]\n\ndef clang_tools_cc_binary(name, copts = [], tags = [], deps = [], **kwargs):\n    cc_binary(\n        name = name,\n        copts = copts + _clang_tools_copts,\n        tags = tags + [\"manual\"],\n        deps = deps + [\"@envoy//bazel/foreign_cc:zlib\"],\n        **kwargs\n    )\n\ndef clang_tools_cc_library(name, copts = [], **kwargs):\n    cc_library(\n        name = name,\n        copts = copts + _clang_tools_copts,\n        **kwargs\n    )\n\ndef clang_tools_cc_test(name, copts = [], deps = [], **kwargs):\n    cc_test(\n        name = name,\n        copts = copts + _clang_tools_copts,\n        deps = deps + [\"@com_google_googletest//:gtest_main\"],\n        **kwargs\n    )\n"
  },
  {
    "path": "tools/clang_tools/syntax_only/BUILD",
    "content": "load(\"//clang_tools/support:clang_tools.bzl\", \"envoy_clang_tools_cc_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_clang_tools_cc_binary(\n    name = \"syntax_only\",\n    srcs = [\"main.cc\"],\n    deps = [\n        \"@clang_tools//:clang_astmatchers\",\n        \"@clang_tools//:clang_basic\",\n        \"@clang_tools//:clang_tooling\",\n    ],\n)\n"
  },
  {
    "path": "tools/clang_tools/syntax_only/main.cc",
    "content": "// This is a copy of the Hello World-style syntax check tool described in\n// https://clang.llvm.org/docs/LibTooling.html. It's purpose is to provide an\n// example of how to build and run libtooling based Envoy developer tools.\n//\n// NOLINT(namespace-envoy)\n\n// Declares clang::SyntaxOnlyAction.\n#include \"clang/Frontend/FrontendActions.h\"\n#include \"clang/Tooling/CommonOptionsParser.h\"\n#include \"clang/Tooling/Tooling.h\"\n\n// Declares llvm::cl::extrahelp.\n#include \"llvm/Support/CommandLine.h\"\n\nusing namespace clang::tooling;\nusing namespace llvm;\n\nint main(int argc, const char** argv) {\n\n  // Apply a custom category to all command-line options so that they are the\n  // only ones displayed.\n  llvm::cl::OptionCategory MyToolCategory(\"my-tool options\");\n\n  // CommonOptionsParser declares HelpMessage with a description of the common\n  // command-line options related to the compilation database and input files.\n  // It's nice to have this help message in all tools.\n  cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage);\n\n  // A help message for this specific tool can be added afterwards.\n  cl::extrahelp MoreHelp(\"\\nMore help text...\\n\");\n\n  CommonOptionsParser OptionsParser(argc, argv, MyToolCategory);\n  ClangTool Tool(OptionsParser.getCompilations(), OptionsParser.getSourcePathList());\n  return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>().get());\n}\n"
  },
  {
    "path": "tools/code_format/.style.yapf",
    "content": "# The Google Python styles can be found here: https://github.com/google/styleguide/blob/gh-pages/pyguide.md\n# TODO: Look into enforcing single vs double quote.\n[style]\nbased_on_style=Google\nindent_width=2\ncolumn_limit=100\n"
  },
  {
    "path": "tools/code_format/check_format.py",
    "content": "#!/usr/bin/env python3\n\nimport argparse\nimport common\nimport functools\nimport multiprocessing\nimport os\nimport os.path\nimport pathlib\nimport re\nimport subprocess\nimport stat\nimport sys\nimport traceback\nimport shutil\nimport paths\n\nEXCLUDED_PREFIXES = (\n    \"./generated/\",\n    \"./thirdparty/\",\n    \"./build\",\n    \"./.git/\",\n    \"./bazel-\",\n    \"./.cache\",\n    \"./source/extensions/extensions_build_config.bzl\",\n    \"./bazel/toolchains/configs/\",\n    \"./tools/testdata/check_format/\",\n    \"./tools/pyformat/\",\n    \"./third_party/\",\n    \"./test/extensions/filters/http/wasm/test_data\",\n    \"./test/extensions/filters/network/wasm/test_data\",\n    \"./test/extensions/stats_sinks/wasm/test_data\",\n    \"./test/extensions/bootstrap/wasm/test_data\",\n    \"./test/extensions/common/wasm/test_data\",\n    \"./test/extensions/access_loggers/wasm/test_data\",\n    \"./source/extensions/common/wasm/ext\",\n    \"./examples/wasm\",\n)\nSUFFIXES = (\"BUILD\", \"WORKSPACE\", \".bzl\", \".cc\", \".h\", \".java\", \".m\", \".md\", \".mm\", \".proto\",\n            \".rst\")\nDOCS_SUFFIX = (\".md\", \".rst\")\nPROTO_SUFFIX = (\".proto\")\n\n# Files in these paths can make reference to protobuf stuff directly\nGOOGLE_PROTOBUF_ALLOWLIST = (\"ci/prebuilt\", \"source/common/protobuf\", \"api/test\",\n                             \"test/extensions/bootstrap/wasm/test_data\")\nREPOSITORIES_BZL = \"bazel/repositories.bzl\"\n\n# Files matching these exact names can reference real-world time. These include the class\n# definitions for real-world time, the construction of them in main(), and perf annotation.\n# For now it includes the validation server but that really should be injected too.\nREAL_TIME_ALLOWLIST = (\n    \"./source/common/common/utility.h\", \"./source/extensions/common/aws/utility.cc\",\n    \"./source/common/event/real_time_system.cc\", \"./source/common/event/real_time_system.h\",\n    \"./source/exe/main_common.cc\", \"./source/exe/main_common.h\",\n    \"./source/server/config_validation/server.cc\", \"./source/common/common/perf_annotation.h\",\n    \"./test/common/common/log_macros_test.cc\", \"./test/common/protobuf/utility_test.cc\",\n    \"./test/test_common/simulated_time_system.cc\", \"./test/test_common/simulated_time_system.h\",\n    \"./test/test_common/test_time.cc\", \"./test/test_common/test_time.h\",\n    \"./test/test_common/utility.cc\", \"./test/test_common/utility.h\",\n    \"./test/integration/integration.h\")\n\n# Tests in these paths may make use of the Registry::RegisterFactory constructor or the\n# REGISTER_FACTORY macro. Other locations should use the InjectFactory helper class to\n# perform temporary registrations.\nREGISTER_FACTORY_TEST_ALLOWLIST = (\"./test/common/config/registry_test.cc\",\n                                   \"./test/integration/clusters/\", \"./test/integration/filters/\")\n\n# Files in these paths can use MessageLite::SerializeAsString\nSERIALIZE_AS_STRING_ALLOWLIST = (\n    \"./source/common/config/version_converter.cc\",\n    \"./source/common/protobuf/utility.cc\",\n    \"./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc\",\n    \"./test/common/protobuf/utility_test.cc\",\n    \"./test/common/config/version_converter_test.cc\",\n    \"./test/common/grpc/codec_test.cc\",\n    \"./test/common/grpc/codec_fuzz_test.cc\",\n    \"./test/extensions/filters/http/common/fuzz/uber_filter.h\",\n    \"./test/extensions/bootstrap/wasm/test_data/speed_cpp.cc\",\n)\n\n# Files in these paths can use Protobuf::util::JsonStringToMessage\nJSON_STRING_TO_MESSAGE_ALLOWLIST = (\"./source/common/protobuf/utility.cc\",\n                                    \"./test/extensions/bootstrap/wasm/test_data/speed_cpp.cc\")\n\n# Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing\n# ones were grandfathered as part of PR #8484 for backwards compatibility.\nHISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST = (\"downstream_cx_length_ms\", \"downstream_cx_length_ms\",\n                                      \"initialization_time_ms\", \"loop_duration_us\", \"poll_delay_us\",\n                                      \"request_time_ms\", \"upstream_cx_connect_ms\",\n                                      \"upstream_cx_length_ms\")\n\n# Files in these paths can use std::regex\nSTD_REGEX_ALLOWLIST = (\n    \"./source/common/common/utility.cc\", \"./source/common/common/regex.h\",\n    \"./source/common/common/regex.cc\", \"./source/common/stats/tag_extractor_impl.h\",\n    \"./source/common/stats/tag_extractor_impl.cc\",\n    \"./source/common/formatter/substitution_formatter.cc\",\n    \"./source/extensions/filters/http/squash/squash_filter.h\",\n    \"./source/extensions/filters/http/squash/squash_filter.cc\", \"./source/server/admin/utils.h\",\n    \"./source/server/admin/utils.cc\", \"./source/server/admin/stats_handler.h\",\n    \"./source/server/admin/stats_handler.cc\", \"./source/server/admin/prometheus_stats.h\",\n    \"./source/server/admin/prometheus_stats.cc\", \"./tools/clang_tools/api_booster/main.cc\",\n    \"./tools/clang_tools/api_booster/proto_cxx_utils.cc\", \"./source/common/version/version.cc\")\n\n# Only one C++ file should instantiate grpc_init\nGRPC_INIT_ALLOWLIST = (\"./source/common/grpc/google_grpc_context.cc\")\n\n# These files should not throw exceptions. Add HTTP/1 when exceptions removed.\nEXCEPTION_DENYLIST = (\"./source/common/http/http2/codec_impl.h\",\n                      \"./source/common/http/http2/codec_impl.cc\")\n\nCLANG_FORMAT_PATH = os.getenv(\"CLANG_FORMAT\", \"clang-format-10\")\nBUILDIFIER_PATH = paths.getBuildifier()\nBUILDOZER_PATH = paths.getBuildozer()\nENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),\n                                      \"envoy_build_fixer.py\")\nHEADER_ORDER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), \"header_order.py\")\nSUBDIR_SET = set(common.includeDirOrder())\nINCLUDE_ANGLE = \"#include <\"\nINCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE)\nPROTO_PACKAGE_REGEX = re.compile(r\"^package (\\S+);\\n*\", re.MULTILINE)\nX_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\\\"x-envoy-.*\\\".*')\nDESIGNATED_INITIALIZER_REGEX = re.compile(r\"\\{\\s*\\.\\w+\\s*\\=\")\nMANGLED_PROTOBUF_NAME_REGEX = re.compile(r\"envoy::[a-z0-9_:]+::[A-Z][a-z]\\w*_\\w*_[A-Z]{2}\")\nHISTOGRAM_SI_SUFFIX_REGEX = re.compile(r\"(?<=HISTOGRAM\\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)\")\nTEST_NAME_STARTING_LOWER_CASE_REGEX = re.compile(r\"TEST(_.\\(.*,\\s|\\()[a-z].*\\)\\s\\{\")\nEXTENSIONS_CODEOWNERS_REGEX = re.compile(r'.*(extensions[^@]*\\s+)(@.*)')\nCOMMENT_REGEX = re.compile(r\"//|\\*\")\nDURATION_VALUE_REGEX = re.compile(r'\\b[Dd]uration\\(([0-9.]+)')\nPROTO_VALIDATION_STRING = re.compile(r'\\bmin_bytes\\b')\nVERSION_HISTORY_NEW_LINE_REGEX = re.compile(\"\\* ([a-z \\-_]+): ([a-z:`]+)\")\nVERSION_HISTORY_SECTION_NAME = re.compile(\"^[A-Z][A-Za-z ]*$\")\nRELOADABLE_FLAG_REGEX = re.compile(\".*(.)(envoy.reloadable_features.[^ ]*)\\s.*\")\n# Check for punctuation in a terminal ref clause, e.g.\n# :ref:`panic mode. <arch_overview_load_balancing_panic_threshold>`\nREF_WITH_PUNCTUATION_REGEX = re.compile(\".*\\. <[^<]*>`\\s*\")\nDOT_MULTI_SPACE_REGEX = re.compile(\"\\\\. +\")\n\n# yapf: disable\nPROTOBUF_TYPE_ERRORS = {\n    # Well-known types should be referenced from the ProtobufWkt namespace.\n    \"Protobuf::Any\":                    \"ProtobufWkt::Any\",\n    \"Protobuf::Empty\":                  \"ProtobufWkt::Empty\",\n    \"Protobuf::ListValue\":              \"ProtobufWkt::ListValue\",\n    \"Protobuf::NULL_VALUE\":             \"ProtobufWkt::NULL_VALUE\",\n    \"Protobuf::StringValue\":            \"ProtobufWkt::StringValue\",\n    \"Protobuf::Struct\":                 \"ProtobufWkt::Struct\",\n    \"Protobuf::Value\":                  \"ProtobufWkt::Value\",\n\n    # Other common mis-namespacing of protobuf types.\n    \"ProtobufWkt::Map\":                 \"Protobuf::Map\",\n    \"ProtobufWkt::MapPair\":             \"Protobuf::MapPair\",\n    \"ProtobufUtil::MessageDifferencer\": \"Protobuf::util::MessageDifferencer\"\n}\nLIBCXX_REPLACEMENTS = {\n    \"absl::make_unique<\": \"std::make_unique<\",\n}\n\nUNOWNED_EXTENSIONS = {\n  \"extensions/filters/http/ratelimit\",\n  \"extensions/filters/http/buffer\",\n  \"extensions/filters/http/rbac\",\n  \"extensions/filters/http/ip_tagging\",\n  \"extensions/filters/http/tap\",\n  \"extensions/filters/http/health_check\",\n  \"extensions/filters/http/cors\",\n  \"extensions/filters/http/ext_authz\",\n  \"extensions/filters/http/dynamo\",\n  \"extensions/filters/http/lua\",\n  \"extensions/filters/http/common\",\n  \"extensions/filters/common\",\n  \"extensions/filters/common/ratelimit\",\n  \"extensions/filters/common/rbac\",\n  \"extensions/filters/common/lua\",\n  \"extensions/filters/listener/original_dst\",\n  \"extensions/filters/listener/proxy_protocol\",\n  \"extensions/stat_sinks/statsd\",\n  \"extensions/stat_sinks/common\",\n  \"extensions/stat_sinks/common/statsd\",\n  \"extensions/health_checkers/redis\",\n  \"extensions/access_loggers/grpc\",\n  \"extensions/access_loggers/file\",\n  \"extensions/common/tap\",\n  \"extensions/transport_sockets/raw_buffer\",\n  \"extensions/transport_sockets/tap\",\n  \"extensions/tracers/zipkin\",\n  \"extensions/tracers/dynamic_ot\",\n  \"extensions/tracers/opencensus\",\n  \"extensions/tracers/lightstep\",\n  \"extensions/tracers/common\",\n  \"extensions/tracers/common/ot\",\n  \"extensions/retry/host/previous_hosts\",\n  \"extensions/filters/network/ratelimit\",\n  \"extensions/filters/network/client_ssl_auth\",\n  \"extensions/filters/network/rbac\",\n  \"extensions/filters/network/tcp_proxy\",\n  \"extensions/filters/network/echo\",\n  \"extensions/filters/network/ext_authz\",\n  \"extensions/filters/network/redis_proxy\",\n  \"extensions/filters/network/kafka\",\n  \"extensions/filters/network/kafka/broker\",\n  \"extensions/filters/network/kafka/protocol\",\n  \"extensions/filters/network/kafka/serialization\",\n  \"extensions/filters/network/mongo_proxy\",\n  \"extensions/filters/network/common\",\n  \"extensions/filters/network/common/redis\",\n}\n# yapf: enable\n\n\nclass FormatChecker:\n\n  def __init__(self, args):\n    self.operation_type = args.operation_type\n    self.target_path = args.target_path\n    self.api_prefix = args.api_prefix\n    self.api_shadow_root = args.api_shadow_prefix\n    self.envoy_build_rule_check = not args.skip_envoy_build_rule_check\n    self.namespace_check = args.namespace_check\n    self.namespace_check_excluded_paths = args.namespace_check_excluded_paths + [\n        \"./tools/api_boost/testdata/\",\n        \"./tools/clang_tools/\",\n    ]\n    self.build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [\n        \"./bazel/external/\",\n        \"./bazel/toolchains/\",\n        \"./bazel/BUILD\",\n        \"./tools/clang_tools\",\n    ]\n    self.include_dir_order = args.include_dir_order\n\n  # Map a line transformation function across each line of a file,\n  # writing the result lines as requested.\n  # If there is a clang format nesting or mismatch error, return the first occurrence\n  def evaluateLines(self, path, line_xform, write=True):\n    error_message = None\n    format_flag = True\n    output_lines = []\n    for line_number, line in enumerate(self.readLines(path)):\n      if line.find(\"// clang-format off\") != -1:\n        if not format_flag and error_message is None:\n          error_message = \"%s:%d: %s\" % (path, line_number + 1, \"clang-format nested off\")\n        format_flag = False\n      if line.find(\"// clang-format on\") != -1:\n        if format_flag and error_message is None:\n          error_message = \"%s:%d: %s\" % (path, line_number + 1, \"clang-format nested on\")\n        format_flag = True\n      if format_flag:\n        output_lines.append(line_xform(line, line_number))\n      else:\n        output_lines.append(line)\n    # We used to use fileinput in the older Python 2.7 script, but this doesn't do\n    # inplace mode and UTF-8 in Python 3, so doing it the manual way.\n    if write:\n      pathlib.Path(path).write_text('\\n'.join(output_lines), encoding='utf-8')\n    if not format_flag and error_message is None:\n      error_message = \"%s:%d: %s\" % (path, line_number + 1, \"clang-format remains off\")\n    return error_message\n\n  # Obtain all the lines in a given file.\n  def readLines(self, path):\n    return self.readFile(path).split('\\n')\n\n  # Read a UTF-8 encoded file as a str.\n  def readFile(self, path):\n    return pathlib.Path(path).read_text(encoding='utf-8')\n\n  # lookPath searches for the given executable in all directories in PATH\n  # environment variable. If it cannot be found, empty string is returned.\n  def lookPath(self, executable):\n    return shutil.which(executable) or ''\n\n  # pathExists checks whether the given path exists. This function assumes that\n  # the path is absolute and evaluates environment variables.\n  def pathExists(self, executable):\n    return os.path.exists(os.path.expandvars(executable))\n\n  # executableByOthers checks whether the given path has execute permission for\n  # others.\n  def executableByOthers(self, executable):\n    st = os.stat(os.path.expandvars(executable))\n    return bool(st.st_mode & stat.S_IXOTH)\n\n  # Check whether all needed external tools (clang-format, buildifier, buildozer) are\n  # available.\n  def checkTools(self):\n    error_messages = []\n\n    clang_format_abs_path = self.lookPath(CLANG_FORMAT_PATH)\n    if clang_format_abs_path:\n      if not self.executableByOthers(clang_format_abs_path):\n        error_messages.append(\"command {} exists, but cannot be executed by other \"\n                              \"users\".format(CLANG_FORMAT_PATH))\n    else:\n      error_messages.append(\n          \"Command {} not found. If you have clang-format in version 10.x.x \"\n          \"installed, but the binary name is different or it's not available in \"\n          \"PATH, please use CLANG_FORMAT environment variable to specify the path. \"\n          \"Examples:\\n\"\n          \"    export CLANG_FORMAT=clang-format-10.0.0\\n\"\n          \"    export CLANG_FORMAT=/opt/bin/clang-format-10\\n\"\n          \"    export CLANG_FORMAT=/usr/local/opt/llvm@10/bin/clang-format\".format(\n              CLANG_FORMAT_PATH))\n\n    def checkBazelTool(name, path, var):\n      bazel_tool_abs_path = self.lookPath(path)\n      if bazel_tool_abs_path:\n        if not self.executableByOthers(bazel_tool_abs_path):\n          error_messages.append(\"command {} exists, but cannot be executed by other \"\n                                \"users\".format(path))\n      elif self.pathExists(path):\n        if not self.executableByOthers(path):\n          error_messages.append(\"command {} exists, but cannot be executed by other \"\n                                \"users\".format(path))\n      else:\n\n        error_messages.append(\"Command {} not found. If you have {} installed, but the binary \"\n                              \"name is different or it's not available in $GOPATH/bin, please use \"\n                              \"{} environment variable to specify the path. Example:\\n\"\n                              \"    export {}=`which {}`\\n\"\n                              \"If you don't have {} installed, you can install it by:\\n\"\n                              \"    go get -u github.com/bazelbuild/buildtools/{}\".format(\n                                  path, name, var, var, name, name, name))\n\n    checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN')\n    checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN')\n\n    return error_messages\n\n  def checkNamespace(self, file_path):\n    for excluded_path in self.namespace_check_excluded_paths:\n      if file_path.startswith(excluded_path):\n        return []\n\n    nolint = \"NOLINT(namespace-%s)\" % self.namespace_check.lower()\n    text = self.readFile(file_path)\n    if not re.search(\"^\\s*namespace\\s+%s\\s*{\" % self.namespace_check, text, re.MULTILINE) and \\\n      not nolint in text:\n      return [\n          \"Unable to find %s namespace or %s for file: %s\" %\n          (self.namespace_check, nolint, file_path)\n      ]\n    return []\n\n  def packageNameForProto(self, file_path):\n    package_name = None\n    error_message = []\n    result = PROTO_PACKAGE_REGEX.search(self.readFile(file_path))\n    if result is not None and len(result.groups()) == 1:\n      package_name = result.group(1)\n    if package_name is None:\n      error_message = [\"Unable to find package name for proto file: %s\" % file_path]\n\n    return [package_name, error_message]\n\n  # To avoid breaking the Lyft import, we just check for path inclusion here.\n  def allowlistedForProtobufDeps(self, file_path):\n    return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \\\n            any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_ALLOWLIST))\n\n  # Real-world time sources should not be instantiated in the source, except for a few\n  # specific cases. They should be passed down from where they are instantied to where\n  # they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager.\n  def allowlistedForRealTime(self, file_path):\n    if file_path.endswith(\".md\"):\n      return True\n    return file_path in REAL_TIME_ALLOWLIST\n\n  def allowlistedForRegisterFactory(self, file_path):\n    if not file_path.startswith(\"./test/\"):\n      return True\n\n    return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_ALLOWLIST)\n\n  def allowlistedForSerializeAsString(self, file_path):\n    return file_path in SERIALIZE_AS_STRING_ALLOWLIST or file_path.endswith(DOCS_SUFFIX)\n\n  def allowlistedForJsonStringToMessage(self, file_path):\n    return file_path in JSON_STRING_TO_MESSAGE_ALLOWLIST\n\n  def allowlistedForHistogramSiSuffix(self, name):\n    return name in HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST\n\n  def allowlistedForStdRegex(self, file_path):\n    return file_path.startswith(\"./test\") or file_path in STD_REGEX_ALLOWLIST or file_path.endswith(\n        DOCS_SUFFIX)\n\n  def allowlistedForGrpcInit(self, file_path):\n    return file_path in GRPC_INIT_ALLOWLIST\n\n  def allowlistedForUnpackTo(self, file_path):\n    return file_path.startswith(\"./test\") or file_path in [\n        \"./source/common/protobuf/utility.cc\", \"./source/common/protobuf/utility.h\"\n    ]\n\n  def denylistedForExceptions(self, file_path):\n    # Returns true when it is a non test header file or the file_path is in DENYLIST or\n    # it is under toos/testdata subdirectory.\n    if file_path.endswith(DOCS_SUFFIX):\n      return False\n\n    return (file_path.endswith('.h') and not file_path.startswith(\"./test/\")) or file_path in EXCEPTION_DENYLIST \\\n        or self.isInSubdir(file_path, 'tools/testdata')\n\n  def isApiFile(self, file_path):\n    return file_path.startswith(self.api_prefix) or file_path.startswith(self.api_shadow_root)\n\n  def isBuildFile(self, file_path):\n    basename = os.path.basename(file_path)\n    if basename in {\"BUILD\", \"BUILD.bazel\"} or basename.endswith(\".BUILD\"):\n      return True\n    return False\n\n  def isExternalBuildFile(self, file_path):\n    return self.isBuildFile(file_path) and (file_path.startswith(\"./bazel/external/\") or\n                                            file_path.startswith(\"./tools/clang_tools\"))\n\n  def isStarlarkFile(self, file_path):\n    return file_path.endswith(\".bzl\")\n\n  def isWorkspaceFile(self, file_path):\n    return os.path.basename(file_path) == \"WORKSPACE\"\n\n  def isBuildFixerExcludedFile(self, file_path):\n    for excluded_path in self.build_fixer_check_excluded_paths:\n      if file_path.startswith(excluded_path):\n        return True\n    return False\n\n  def hasInvalidAngleBracketDirectory(self, line):\n    if not line.startswith(INCLUDE_ANGLE):\n      return False\n    path = line[INCLUDE_ANGLE_LEN:]\n    slash = path.find(\"/\")\n    if slash == -1:\n      return False\n    subdir = path[0:slash]\n    return subdir in SUBDIR_SET\n\n  def checkCurrentReleaseNotes(self, file_path, error_messages):\n    first_word_of_prior_line = ''\n    next_word_to_check = ''  # first word after :\n    prior_line = ''\n\n    def endsWithPeriod(prior_line):\n      if not prior_line:\n        return True  # Don't punctuation-check empty lines.\n      if prior_line.endswith('.'):\n        return True  # Actually ends with .\n      if prior_line.endswith('`') and REF_WITH_PUNCTUATION_REGEX.match(prior_line):\n        return True  # The text in the :ref ends with a .\n      return False\n\n    for line_number, line in enumerate(self.readLines(file_path)):\n\n      def reportError(message):\n        error_messages.append(\"%s:%d: %s\" % (file_path, line_number + 1, message))\n\n      if VERSION_HISTORY_SECTION_NAME.match(line):\n        if line == \"Deprecated\":\n          # The deprecations section is last, and does not have enforced formatting.\n          break\n\n        # Reset all parsing at the start of a section.\n        first_word_of_prior_line = ''\n        next_word_to_check = ''  # first word after :\n        prior_line = ''\n\n      # make sure flags are surrounded by ``s\n      flag_match = RELOADABLE_FLAG_REGEX.match(line)\n      if flag_match:\n        if not flag_match.groups()[0].startswith('`'):\n          reportError(\"Flag `%s` should be enclosed in back ticks\" % flag_match.groups()[1])\n\n      if line.startswith(\"* \"):\n        if not endsWithPeriod(prior_line):\n          reportError(\"The following release note does not end with a '.'\\n %s\" % prior_line)\n\n        match = VERSION_HISTORY_NEW_LINE_REGEX.match(line)\n        if not match:\n          reportError(\"Version history line malformed. \"\n                      \"Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\\n %s\" %\n                      line)\n        else:\n          first_word = match.groups()[0]\n          next_word = match.groups()[1]\n          # Do basic alphabetization checks of the first word on the line and the\n          # first word after the :\n          if first_word_of_prior_line and first_word_of_prior_line > first_word:\n            reportError(\n                \"Version history not in alphabetical order (%s vs %s): please check placement of line\\n %s. \"\n                % (first_word_of_prior_line, first_word, line))\n          if first_word_of_prior_line == first_word and next_word_to_check and next_word_to_check > next_word:\n            reportError(\n                \"Version history not in alphabetical order (%s vs %s): please check placement of line\\n %s. \"\n                % (next_word_to_check, next_word, line))\n          first_word_of_prior_line = first_word\n          next_word_to_check = next_word\n\n          prior_line = line\n      elif not line:\n        # If we hit the end of this release note block block, check the prior line.\n        if not endsWithPeriod(prior_line):\n          reportError(\"The following release note does not end with a '.'\\n %s\" % prior_line)\n      elif prior_line:\n        prior_line += line\n\n  def checkFileContents(self, file_path, checker):\n    error_messages = []\n\n    if file_path.endswith(\"version_history/current.rst\"):\n      # Version file checking has enough special cased logic to merit its own checks.\n      # This only validates entries for the current release as very old release\n      # notes have a different format.\n      self.checkCurrentReleaseNotes(file_path, error_messages)\n\n    def checkFormatErrors(line, line_number):\n\n      def reportError(message):\n        error_messages.append(\"%s:%d: %s\" % (file_path, line_number + 1, message))\n\n      checker(line, file_path, reportError)\n\n    evaluate_failure = self.evaluateLines(file_path, checkFormatErrors, False)\n    if evaluate_failure is not None:\n      error_messages.append(evaluate_failure)\n\n    return error_messages\n\n  def fixSourceLine(self, line, line_number):\n    # Strip double space after '.'  This may prove overenthusiastic and need to\n    # be restricted to comments and metadata files but works for now.\n    line = re.sub(DOT_MULTI_SPACE_REGEX, \". \", line)\n\n    if self.hasInvalidAngleBracketDirectory(line):\n      line = line.replace(\"<\", '\"').replace(\">\", '\"')\n\n    # Fix incorrect protobuf namespace references.\n    for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():\n      line = line.replace(invalid_construct, valid_construct)\n\n    # Use recommended cpp stdlib\n    for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():\n      line = line.replace(invalid_construct, valid_construct)\n\n    return line\n\n  # We want to look for a call to condvar.waitFor, but there's no strong pattern\n  # to the variable name of the condvar. If we just look for \".waitFor\" we'll also\n  # pick up time_system_.waitFor(...), and we don't want to return true for that\n  # pattern. But in that case there is a strong pattern of using time_system in\n  # various spellings as the variable name.\n  def hasCondVarWaitFor(self, line):\n    wait_for = line.find(\".waitFor(\")\n    if wait_for == -1:\n      return False\n    preceding = line[0:wait_for]\n    if preceding.endswith(\"time_system\") or preceding.endswith(\"timeSystem()\") or \\\n      preceding.endswith(\"time_system_\"):\n      return False\n    return True\n\n  # Determines whether the filename is either in the specified subdirectory, or\n  # at the top level. We consider files in the top level for the benefit of\n  # the check_format testcases in tools/testdata/check_format.\n  def isInSubdir(self, filename, *subdirs):\n    # Skip this check for check_format's unit-tests.\n    if filename.count(\"/\") <= 1:\n      return True\n    for subdir in subdirs:\n      if filename.startswith('./' + subdir + '/'):\n        return True\n    return False\n\n  # Determines if given token exists in line without leading or trailing token characters\n  # e.g. will return True for a line containing foo() but not foo_bar() or baz_foo\n  def tokenInLine(self, token, line):\n    index = 0\n    while True:\n      index = line.find(token, index)\n      # the following check has been changed from index < 1 to index < 0 because\n      # this function incorrectly returns false when the token in question is the\n      # first one in a line. The following line returns false when the token is present:\n      # (no leading whitespace) violating_symbol foo;\n      if index < 0:\n        break\n      if index == 0 or not (line[index - 1].isalnum() or line[index - 1] == '_'):\n        if index + len(token) >= len(line) or not (line[index + len(token)].isalnum() or\n                                                   line[index + len(token)] == '_'):\n          return True\n      index = index + 1\n    return False\n\n  def checkSourceLine(self, line, file_path, reportError):\n    # Check fixable errors. These may have been fixed already.\n    if line.find(\".  \") != -1:\n      reportError(\"over-enthusiastic spaces\")\n    if self.isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line):\n      reportError(\n          \"Please do not use the raw literal x-envoy in source code.  See Envoy::Http::PrefixValue.\"\n      )\n    if self.hasInvalidAngleBracketDirectory(line):\n      reportError(\"envoy includes should not have angle brackets\")\n    for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():\n      if invalid_construct in line:\n        reportError(\"incorrect protobuf type reference %s; \"\n                    \"should be %s\" % (invalid_construct, valid_construct))\n    for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():\n      if invalid_construct in line:\n        reportError(\"term %s should be replaced with standard library term %s\" %\n                    (invalid_construct, valid_construct))\n    # Do not include the virtual_includes headers.\n    if re.search(\"#include.*/_virtual_includes/\", line):\n      reportError(\"Don't include the virtual includes headers.\")\n\n    # Some errors cannot be fixed automatically, and actionable, consistent,\n    # navigable messages should be emitted to make it easy to find and fix\n    # the errors by hand.\n    if not self.allowlistedForProtobufDeps(file_path):\n      if '\"google/protobuf' in line or \"google::protobuf\" in line:\n        reportError(\"unexpected direct dependency on google.protobuf, use \"\n                    \"the definitions in common/protobuf/protobuf.h instead.\")\n    if line.startswith(\"#include <mutex>\") or line.startswith(\"#include <condition_variable\"):\n      # We don't check here for std::mutex because that may legitimately show up in\n      # comments, for example this one.\n      reportError(\"Don't use <mutex> or <condition_variable*>, switch to \"\n                  \"Thread::MutexBasicLockable in source/common/common/thread.h\")\n    if line.startswith(\"#include <shared_mutex>\"):\n      # We don't check here for std::shared_timed_mutex because that may\n      # legitimately show up in comments, for example this one.\n      reportError(\"Don't use <shared_mutex>, use absl::Mutex for reader/writer locks.\")\n    if not self.allowlistedForRealTime(file_path) and not \"NO_CHECK_FORMAT(real_time)\" in line:\n      if \"RealTimeSource\" in line or \\\n        (\"RealTimeSystem\" in line and not \"TestRealTimeSystem\" in line) or \\\n        \"std::chrono::system_clock::now\" in line or \"std::chrono::steady_clock::now\" in line or \\\n        \"std::this_thread::sleep_for\" in line or self.hasCondVarWaitFor(line):\n        reportError(\"Don't reference real-world time sources from production code; use injection\")\n    duration_arg = DURATION_VALUE_REGEX.search(line)\n    if duration_arg and duration_arg.group(1) != \"0\" and duration_arg.group(1) != \"0.0\":\n      # Matching duration(int-const or float-const) other than zero\n      reportError(\n          \"Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)\"\n      )\n    if not self.allowlistedForRegisterFactory(file_path):\n      if \"Registry::RegisterFactory<\" in line or \"REGISTER_FACTORY\" in line:\n        reportError(\"Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, \"\n                    \"use Registry::InjectFactory instead.\")\n    if not self.allowlistedForUnpackTo(file_path):\n      if \"UnpackTo\" in line:\n        reportError(\"Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead\")\n    # Check that we use the absl::Time library\n    if self.tokenInLine(\"std::get_time\", line):\n      if \"test/\" in file_path:\n        reportError(\"Don't use std::get_time; use TestUtility::parseTime in tests\")\n      else:\n        reportError(\"Don't use std::get_time; use the injectable time system\")\n    if self.tokenInLine(\"std::put_time\", line):\n      reportError(\"Don't use std::put_time; use absl::Time equivalent instead\")\n    if self.tokenInLine(\"gmtime\", line):\n      reportError(\"Don't use gmtime; use absl::Time equivalent instead\")\n    if self.tokenInLine(\"mktime\", line):\n      reportError(\"Don't use mktime; use absl::Time equivalent instead\")\n    if self.tokenInLine(\"localtime\", line):\n      reportError(\"Don't use localtime; use absl::Time equivalent instead\")\n    if self.tokenInLine(\"strftime\", line):\n      reportError(\"Don't use strftime; use absl::FormatTime instead\")\n    if self.tokenInLine(\"strptime\", line):\n      reportError(\"Don't use strptime; use absl::FormatTime instead\")\n    if self.tokenInLine(\"strerror\", line):\n      reportError(\"Don't use strerror; use Envoy::errorDetails instead\")\n    # Prefer using abseil hash maps/sets over std::unordered_map/set for performance optimizations and\n    # non-deterministic iteration order that exposes faulty assertions.\n    # See: https://abseil.io/docs/cpp/guides/container#hash-tables\n    if \"std::unordered_map\" in line:\n      reportError(\"Don't use std::unordered_map; use absl::flat_hash_map instead or \"\n                  \"absl::node_hash_map if pointer stability of keys/values is required\")\n    if \"std::unordered_set\" in line:\n      reportError(\"Don't use std::unordered_set; use absl::flat_hash_set instead or \"\n                  \"absl::node_hash_set if pointer stability of keys/values is required\")\n    if \"std::atomic_\" in line:\n      # The std::atomic_* free functions are functionally equivalent to calling\n      # operations on std::atomic<T> objects, so prefer to use that instead.\n      reportError(\"Don't use free std::atomic_* functions, use std::atomic<T> members instead.\")\n    # Block usage of certain std types/functions as iOS 11 and macOS 10.13\n    # do not support these at runtime.\n    # See: https://github.com/envoyproxy/envoy/issues/12341\n    if self.tokenInLine(\"std::any\", line):\n      reportError(\"Don't use std::any; use absl::any instead\")\n    if self.tokenInLine(\"std::get_if\", line):\n      reportError(\"Don't use std::get_if; use absl::get_if instead\")\n    if self.tokenInLine(\"std::holds_alternative\", line):\n      reportError(\"Don't use std::holds_alternative; use absl::holds_alternative instead\")\n    if self.tokenInLine(\"std::make_optional\", line):\n      reportError(\"Don't use std::make_optional; use absl::make_optional instead\")\n    if self.tokenInLine(\"std::monostate\", line):\n      reportError(\"Don't use std::monostate; use absl::monostate instead\")\n    if self.tokenInLine(\"std::optional\", line):\n      reportError(\"Don't use std::optional; use absl::optional instead\")\n    if self.tokenInLine(\"std::string_view\", line):\n      reportError(\"Don't use std::string_view; use absl::string_view instead\")\n    if self.tokenInLine(\"std::variant\", line):\n      reportError(\"Don't use std::variant; use absl::variant instead\")\n    if self.tokenInLine(\"std::visit\", line):\n      reportError(\"Don't use std::visit; use absl::visit instead\")\n    if \"__attribute__((packed))\" in line and file_path != \"./include/envoy/common/platform.h\":\n      # __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that\n      # can be used instead\n      reportError(\"Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined \"\n                  \"in include/envoy/common/platform.h instead\")\n    if DESIGNATED_INITIALIZER_REGEX.search(line):\n      # Designated initializers are not part of the C++14 standard and are not supported\n      # by MSVC\n      reportError(\"Don't use designated initializers in struct initialization, \"\n                  \"they are not part of C++14\")\n    if \" ?: \" in line:\n      # The ?: operator is non-standard, it is a GCC extension\n      reportError(\"Don't use the '?:' operator, it is a non-standard GCC extension\")\n    if line.startswith(\"using testing::Test;\"):\n      reportError(\"Don't use 'using testing::Test;, elaborate the type instead\")\n    if line.startswith(\"using testing::TestWithParams;\"):\n      reportError(\"Don't use 'using testing::Test;, elaborate the type instead\")\n    if TEST_NAME_STARTING_LOWER_CASE_REGEX.search(line):\n      # Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins\n      # with a lowercase letter.\n      reportError(\"Test names should be CamelCase, starting with a capital letter\")\n    if not self.allowlistedForSerializeAsString(file_path) and \"SerializeAsString\" in line:\n      # The MessageLite::SerializeAsString doesn't generate deterministic serialization,\n      # use MessageUtil::hash instead.\n      reportError(\n          \"Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead.\"\n      )\n    if not self.allowlistedForJsonStringToMessage(file_path) and \"JsonStringToMessage\" in line:\n      # Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing\n      # behavior.\n      reportError(\"Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.\")\n\n    if self.isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \\\n      ('.counterFromString(' in line or '.gaugeFromString(' in line or \\\n        '.histogramFromString(' in line or '.textReadoutFromString(' in line or \\\n        '->counterFromString(' in line or '->gaugeFromString(' in line or \\\n        '->histogramFromString(' in line or '->textReadoutFromString(' in line):\n      reportError(\"Don't lookup stats by name at runtime; use StatName saved during construction\")\n\n    if MANGLED_PROTOBUF_NAME_REGEX.search(line):\n      reportError(\"Don't use mangled Protobuf names for enum constants\")\n\n    hist_m = HISTOGRAM_SI_SUFFIX_REGEX.search(line)\n    if hist_m and not self.allowlistedForHistogramSiSuffix(hist_m.group(0)):\n      reportError(\n          \"Don't suffix histogram names with the unit symbol, \"\n          \"it's already part of the histogram object and unit-supporting sinks can use this information natively, \"\n          \"other sinks can add the suffix automatically on flush should they prefer to do so.\")\n\n    if not self.allowlistedForStdRegex(file_path) and \"std::regex\" in line:\n      reportError(\"Don't use std::regex in code that handles untrusted input. Use RegexMatcher\")\n\n    if not self.allowlistedForGrpcInit(file_path):\n      grpc_init_or_shutdown = line.find(\"grpc_init()\")\n      grpc_shutdown = line.find(\"grpc_shutdown()\")\n      if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and\n                                         grpc_shutdown < grpc_init_or_shutdown):\n        grpc_init_or_shutdown = grpc_shutdown\n      if grpc_init_or_shutdown != -1:\n        comment = line.find(\"// \")\n        if comment == -1 or comment > grpc_init_or_shutdown:\n          reportError(\"Don't call grpc_init() or grpc_shutdown() directly, instantiate \" +\n                      \"Grpc::GoogleGrpcContext. See #8282\")\n\n    if self.denylistedForExceptions(file_path):\n      # Skpping cases where 'throw' is a substring of a symbol like in \"foothrowBar\".\n      if \"throw\" in line.split():\n        comment_match = COMMENT_REGEX.search(line)\n        if comment_match is None or comment_match.start(0) > line.find(\"throw\"):\n          reportError(\"Don't introduce throws into exception-free files, use error \" +\n                      \"statuses instead.\")\n\n    if \"lua_pushlightuserdata\" in line:\n      reportError(\n          \"Don't use lua_pushlightuserdata, since it can cause unprotected error in call to\" +\n          \"Lua API (bad light userdata pointer) on ARM64 architecture. See \" +\n          \"https://github.com/LuaJIT/LuaJIT/issues/450#issuecomment-433659873 for details.\")\n\n    if file_path.endswith(PROTO_SUFFIX):\n      exclude_path = ['v1', 'v2', 'generated_api_shadow']\n      result = PROTO_VALIDATION_STRING.search(line)\n      if result is not None:\n        if not any(x in file_path for x in exclude_path):\n          reportError(\"min_bytes is DEPRECATED, Use min_len.\")\n\n  def checkBuildLine(self, line, file_path, reportError):\n    if \"@bazel_tools\" in line and not (self.isStarlarkFile(file_path) or\n                                       file_path.startswith(\"./bazel/\") or\n                                       \"python/runfiles\" in line):\n      reportError(\"unexpected @bazel_tools reference, please indirect via a definition in //bazel\")\n    if not self.allowlistedForProtobufDeps(file_path) and '\"protobuf\"' in line:\n      reportError(\"unexpected direct external dependency on protobuf, use \"\n                  \"//source/common/protobuf instead.\")\n    if (self.envoy_build_rule_check and not self.isStarlarkFile(file_path) and\n        not self.isWorkspaceFile(file_path) and not self.isExternalBuildFile(file_path) and\n        \"@envoy//\" in line):\n      reportError(\"Superfluous '@envoy//' prefix\")\n\n  def fixBuildLine(self, file_path, line, line_number):\n    if (self.envoy_build_rule_check and not self.isStarlarkFile(file_path) and\n        not self.isWorkspaceFile(file_path) and not self.isExternalBuildFile(file_path)):\n      line = line.replace(\"@envoy//\", \"//\")\n    return line\n\n  def fixBuildPath(self, file_path):\n    self.evaluateLines(file_path, functools.partial(self.fixBuildLine, file_path))\n\n    error_messages = []\n\n    # TODO(htuch): Add API specific BUILD fixer script.\n    if not self.isBuildFixerExcludedFile(file_path) and not self.isApiFile(\n        file_path) and not self.isStarlarkFile(file_path) and not self.isWorkspaceFile(file_path):\n      if os.system(\"%s %s %s\" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0:\n        error_messages += [\"envoy_build_fixer rewrite failed for file: %s\" % file_path]\n\n    if os.system(\"%s -lint=fix -mode=fix %s\" % (BUILDIFIER_PATH, file_path)) != 0:\n      error_messages += [\"buildifier rewrite failed for file: %s\" % file_path]\n    return error_messages\n\n  def checkBuildPath(self, file_path):\n    error_messages = []\n\n    if not self.isBuildFixerExcludedFile(file_path) and not self.isApiFile(\n        file_path) and not self.isStarlarkFile(file_path) and not self.isWorkspaceFile(file_path):\n      command = \"%s %s | diff %s -\" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)\n      error_messages += self.executeCommand(command, \"envoy_build_fixer check failed\", file_path)\n\n    if self.isBuildFile(file_path) and (file_path.startswith(self.api_prefix + \"envoy\") or\n                                        file_path.startswith(self.api_shadow_root + \"envoy\")):\n      found = False\n      for line in self.readLines(file_path):\n        if \"api_proto_package(\" in line:\n          found = True\n          break\n      if not found:\n        error_messages += [\"API build file does not provide api_proto_package()\"]\n\n    command = \"%s -mode=diff %s\" % (BUILDIFIER_PATH, file_path)\n    error_messages += self.executeCommand(command, \"buildifier check failed\", file_path)\n    error_messages += self.checkFileContents(file_path, self.checkBuildLine)\n    return error_messages\n\n  def fixSourcePath(self, file_path):\n    self.evaluateLines(file_path, self.fixSourceLine)\n\n    error_messages = []\n\n    if not file_path.endswith(DOCS_SUFFIX):\n      if not file_path.endswith(PROTO_SUFFIX):\n        error_messages += self.fixHeaderOrder(file_path)\n      error_messages += self.clangFormat(file_path)\n    if file_path.endswith(PROTO_SUFFIX) and self.isApiFile(file_path):\n      package_name, error_message = self.packageNameForProto(file_path)\n      if package_name is None:\n        error_messages += error_message\n    return error_messages\n\n  def checkSourcePath(self, file_path):\n    error_messages = self.checkFileContents(file_path, self.checkSourceLine)\n\n    if not file_path.endswith(DOCS_SUFFIX):\n      if not file_path.endswith(PROTO_SUFFIX):\n        error_messages += self.checkNamespace(file_path)\n        command = (\"%s --include_dir_order %s --path %s | diff %s -\" %\n                   (HEADER_ORDER_PATH, self.include_dir_order, file_path, file_path))\n        error_messages += self.executeCommand(command, \"header_order.py check failed\", file_path)\n      command = (\"%s %s | diff %s -\" % (CLANG_FORMAT_PATH, file_path, file_path))\n      error_messages += self.executeCommand(command, \"clang-format check failed\", file_path)\n\n    if file_path.endswith(PROTO_SUFFIX) and self.isApiFile(file_path):\n      package_name, error_message = self.packageNameForProto(file_path)\n      if package_name is None:\n        error_messages += error_message\n    return error_messages\n\n  # Example target outputs are:\n  #   - \"26,27c26\"\n  #   - \"12,13d13\"\n  #   - \"7a8,9\"\n  def executeCommand(self,\n                     command,\n                     error_message,\n                     file_path,\n                     regex=re.compile(r\"^(\\d+)[a|c|d]?\\d*(?:,\\d+[a|c|d]?\\d*)?$\")):\n    try:\n      output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip()\n      if output:\n        return output.decode('utf-8').split(\"\\n\")\n      return []\n    except subprocess.CalledProcessError as e:\n      if (e.returncode != 0 and e.returncode != 1):\n        return [\"ERROR: something went wrong while executing: %s\" % e.cmd]\n      # In case we can't find any line numbers, record an error message first.\n      error_messages = [\"%s for file: %s\" % (error_message, file_path)]\n      for line in e.output.decode('utf-8').splitlines():\n        for num in regex.findall(line):\n          error_messages.append(\"  %s:%s\" % (file_path, num))\n      return error_messages\n\n  def fixHeaderOrder(self, file_path):\n    command = \"%s --rewrite --include_dir_order %s --path %s\" % (HEADER_ORDER_PATH,\n                                                                 self.include_dir_order, file_path)\n    if os.system(command) != 0:\n      return [\"header_order.py rewrite error: %s\" % (file_path)]\n    return []\n\n  def clangFormat(self, file_path):\n    command = \"%s -i %s\" % (CLANG_FORMAT_PATH, file_path)\n    if os.system(command) != 0:\n      return [\"clang-format rewrite error: %s\" % (file_path)]\n    return []\n\n  def checkFormat(self, file_path):\n    if file_path.startswith(EXCLUDED_PREFIXES):\n      return []\n\n    if not file_path.endswith(SUFFIXES):\n      return []\n\n    error_messages = []\n    # Apply fixes first, if asked, and then run checks. If we wind up attempting to fix\n    # an issue, but there's still an error, that's a problem.\n    try_to_fix = self.operation_type == \"fix\"\n    if self.isBuildFile(file_path) or self.isStarlarkFile(file_path) or self.isWorkspaceFile(\n        file_path):\n      if try_to_fix:\n        error_messages += self.fixBuildPath(file_path)\n      error_messages += self.checkBuildPath(file_path)\n    else:\n      if try_to_fix:\n        error_messages += self.fixSourcePath(file_path)\n      error_messages += self.checkSourcePath(file_path)\n\n    if error_messages:\n      return [\"From %s\" % file_path] + error_messages\n    return error_messages\n\n  def checkFormatReturnTraceOnError(self, file_path):\n    \"\"\"Run checkFormat and return the traceback of any exception.\"\"\"\n    try:\n      return self.checkFormat(file_path)\n    except:\n      return traceback.format_exc().split(\"\\n\")\n\n  def checkOwners(self, dir_name, owned_directories, error_messages):\n    \"\"\"Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS\n    Args:\n      dir_name: the directory being checked.\n      owned_directories: directories currently listed in CODEOWNERS.\n      error_messages: where to put an error message for new unowned directories.\n    \"\"\"\n    found = False\n    for owned in owned_directories:\n      if owned.startswith(dir_name) or dir_name.startswith(owned):\n        found = True\n    if not found and dir_name not in UNOWNED_EXTENSIONS:\n      error_messages.append(\"New directory %s appears to not have owners in CODEOWNERS\" % dir_name)\n\n  def checkApiShadowStarlarkFiles(self, file_path, error_messages):\n    command = \"diff -u \"\n    command += file_path + \" \"\n    api_shadow_starlark_path = self.api_shadow_root + re.sub(r\"\\./api/\", '', file_path)\n    command += api_shadow_starlark_path\n\n    error_message = self.executeCommand(command, \"invalid .bzl in generated_api_shadow\", file_path)\n    if self.operation_type == \"check\":\n      error_messages += error_message\n    elif self.operation_type == \"fix\" and len(error_message) != 0:\n      shutil.copy(file_path, api_shadow_starlark_path)\n\n    return error_messages\n\n  def checkFormatVisitor(self, arg, dir_name, names):\n    \"\"\"Run checkFormat in parallel for the given files.\n    Args:\n      arg: a tuple (pool, result_list, owned_directories, error_messages)\n        pool and result_list are for starting tasks asynchronously.\n        owned_directories tracks directories listed in the CODEOWNERS file.\n        error_messages is a list of string format errors.\n      dir_name: the parent directory of the given files.\n      names: a list of file names.\n    \"\"\"\n\n    # Unpack the multiprocessing.Pool process pool and list of results. Since\n    # python lists are passed as references, this is used to collect the list of\n    # async results (futures) from running checkFormat and passing them back to\n    # the caller.\n    pool, result_list, owned_directories, error_messages = arg\n\n    # Sanity check CODEOWNERS.  This doesn't need to be done in a multi-threaded\n    # manner as it is a small and limited list.\n    source_prefix = './source/'\n    full_prefix = './source/extensions/'\n    # Check to see if this directory is a subdir under /source/extensions\n    # Also ignore top level directories under /source/extensions since we don't\n    # need owners for source/extensions/access_loggers etc, just the subdirectories.\n    if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]:\n      self.checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages)\n\n    for file_name in names:\n      if dir_name.startswith(\"./api\") and self.isStarlarkFile(file_name):\n        result = pool.apply_async(self.checkApiShadowStarlarkFiles,\n                                  args=(dir_name + \"/\" + file_name, error_messages))\n        result_list.append(result)\n      result = pool.apply_async(self.checkFormatReturnTraceOnError,\n                                args=(dir_name + \"/\" + file_name,))\n      result_list.append(result)\n\n  # checkErrorMessages iterates over the list with error messages and prints\n  # errors and returns a bool based on whether there were any errors.\n  def checkErrorMessages(self, error_messages):\n    if error_messages:\n      for e in error_messages:\n        print(\"ERROR: %s\" % e)\n      return True\n    return False\n\n\nif __name__ == \"__main__\":\n  parser = argparse.ArgumentParser(description=\"Check or fix file format.\")\n  parser.add_argument(\"operation_type\",\n                      type=str,\n                      choices=[\"check\", \"fix\"],\n                      help=\"specify if the run should 'check' or 'fix' format.\")\n  parser.add_argument(\n      \"target_path\",\n      type=str,\n      nargs=\"?\",\n      default=\".\",\n      help=\"specify the root directory for the script to recurse over. Default '.'.\")\n  parser.add_argument(\"--add-excluded-prefixes\",\n                      type=str,\n                      nargs=\"+\",\n                      help=\"exclude additional prefixes.\")\n  parser.add_argument(\"-j\",\n                      \"--num-workers\",\n                      type=int,\n                      default=multiprocessing.cpu_count(),\n                      help=\"number of worker processes to use; defaults to one per core.\")\n  parser.add_argument(\"--api-prefix\", type=str, default=\"./api/\", help=\"path of the API tree.\")\n  parser.add_argument(\"--api-shadow-prefix\",\n                      type=str,\n                      default=\"./generated_api_shadow/\",\n                      help=\"path of the shadow API tree.\")\n  parser.add_argument(\"--skip_envoy_build_rule_check\",\n                      action=\"store_true\",\n                      help=\"skip checking for '@envoy//' prefix in build rules.\")\n  parser.add_argument(\"--namespace_check\",\n                      type=str,\n                      nargs=\"?\",\n                      default=\"Envoy\",\n                      help=\"specify namespace check string. Default 'Envoy'.\")\n  parser.add_argument(\"--namespace_check_excluded_paths\",\n                      type=str,\n                      nargs=\"+\",\n                      default=[],\n                      help=\"exclude paths from the namespace_check.\")\n  parser.add_argument(\"--build_fixer_check_excluded_paths\",\n                      type=str,\n                      nargs=\"+\",\n                      default=[],\n                      help=\"exclude paths from envoy_build_fixer check.\")\n  parser.add_argument(\"--bazel_tools_check_excluded_paths\",\n                      type=str,\n                      nargs=\"+\",\n                      default=[],\n                      help=\"exclude paths from bazel_tools check.\")\n  parser.add_argument(\"--include_dir_order\",\n                      type=str,\n                      default=\",\".join(common.includeDirOrder()),\n                      help=\"specify the header block include directory order.\")\n  args = parser.parse_args()\n  if args.add_excluded_prefixes:\n    EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes)\n  format_checker = FormatChecker(args)\n\n  # Check whether all needed external tools are available.\n  ct_error_messages = format_checker.checkTools()\n  if format_checker.checkErrorMessages(ct_error_messages):\n    sys.exit(1)\n\n  # Returns the list of directories with owners listed in CODEOWNERS. May append errors to\n  # error_messages.\n  def ownedDirectories(error_messages):\n    owned = []\n    maintainers = [\n        '@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@asraa',\n        '@yavlasov', '@junr03', '@dio', '@jmarantz', '@antoniovicente'\n    ]\n\n    try:\n      with open('./CODEOWNERS') as f:\n        for line in f:\n          # If this line is of the form \"extensions/... @owner1 @owner2\" capture the directory\n          # name and store it in the list of directories with documented owners.\n          m = EXTENSIONS_CODEOWNERS_REGEX.search(line)\n          if m is not None and not line.startswith('#'):\n            owned.append(m.group(1).strip())\n            owners = re.findall('@\\S+', m.group(2).strip())\n            if len(owners) < 2:\n              error_messages.append(\"Extensions require at least 2 owners in CODEOWNERS:\\n\"\n                                    \"    {}\".format(line))\n            maintainer = len(set(owners).intersection(set(maintainers))) > 0\n            if not maintainer:\n              error_messages.append(\"Extensions require at least one maintainer OWNER:\\n\"\n                                    \"    {}\".format(line))\n\n      return owned\n    except IOError:\n      return []  # for the check format tests.\n\n  # Calculate the list of owned directories once per run.\n  error_messages = []\n  owned_directories = ownedDirectories(error_messages)\n\n  if os.path.isfile(args.target_path):\n    error_messages += format_checker.checkFormat(\"./\" + args.target_path)\n  else:\n    results = []\n\n    def PooledCheckFormat(path_predicate):\n      pool = multiprocessing.Pool(processes=args.num_workers)\n      # For each file in target_path, start a new task in the pool and collect the\n      # results (results is passed by reference, and is used as an output).\n      for root, _, files in os.walk(args.target_path):\n        format_checker.checkFormatVisitor((pool, results, owned_directories, error_messages), root,\n                                          [f for f in files if path_predicate(f)])\n\n      # Close the pool to new tasks, wait for all of the running tasks to finish,\n      # then collect the error messages.\n      pool.close()\n      pool.join()\n\n    # We first run formatting on non-BUILD files, since the BUILD file format\n    # requires analysis of srcs/hdrs in the BUILD file, and we don't want these\n    # to be rewritten by other multiprocessing pooled processes.\n    PooledCheckFormat(lambda f: not format_checker.isBuildFile(f))\n    PooledCheckFormat(lambda f: format_checker.isBuildFile(f))\n\n    error_messages += sum((r.get() for r in results), [])\n\n  if format_checker.checkErrorMessages(error_messages):\n    print(\"ERROR: check format failed. run 'tools/code_format/check_format.py fix'\")\n    sys.exit(1)\n\n  if args.operation_type == \"check\":\n    print(\"PASS\")\n"
  },
  {
    "path": "tools/code_format/check_format_test.sh",
    "content": "#!/bin/bash\n\ntools=\"$(dirname \"$(dirname \"$(realpath \"$0\")\")\")\"\nroot=$(realpath \"$tools/..\")\nci=\"${root}/ci\"\nexport ci\ncd \"$root\" || exit 1\nexec ./ci/run_envoy_docker.sh ./tools/code_format/check_format_test_helper.sh \"$@\"\n"
  },
  {
    "path": "tools/code_format/check_format_test_helper.py",
    "content": "#!/usr/bin/env python3\n\n# Tests check_format.py. This must be run in a context where the clang\n# version and settings are compatible with the one in the Envoy\n# docker. Normally this is run via check_format_test.sh, which\n# executes it in under docker.\n\nfrom __future__ import print_function\n\nfrom run_command import runCommand\nimport argparse\nimport logging\nimport os\nimport shutil\nimport sys\nimport tempfile\n\ncurr_dir = os.path.dirname(os.path.realpath(__file__))\ntools = os.path.dirname(curr_dir)\nsrc = os.path.join(tools, 'testdata', 'check_format')\ncheck_format = sys.executable + \" \" + os.path.join(curr_dir, 'check_format.py')\nerrors = 0\n\n\n# Runs the 'check_format' operation, on the specified file, printing\n# the comamnd run and the status code as well as the stdout, and returning\n# all of that to the caller.\ndef runCheckFormat(operation, filename):\n  command = check_format + \" \" + operation + \" \" + filename\n  status, stdout, stderr = runCommand(command)\n  return (command, status, stdout + stderr)\n\n\ndef getInputFile(filename, extra_input_files=None):\n  files_to_copy = [filename]\n  if extra_input_files is not None:\n    files_to_copy.extend(extra_input_files)\n  for f in files_to_copy:\n    infile = os.path.join(src, f)\n    directory = os.path.dirname(f)\n    if not directory == '' and not os.path.isdir(directory):\n      os.makedirs(directory)\n    shutil.copyfile(infile, f)\n  return filename\n\n\n# Attempts to fix file, returning a 4-tuple: the command, input file name,\n# output filename, captured stdout as an array of lines, and the error status\n# code.\ndef fixFileHelper(filename, extra_input_files=None):\n  command, status, stdout = runCheckFormat(\n      \"fix\", getInputFile(filename, extra_input_files=extra_input_files))\n  infile = os.path.join(src, filename)\n  return command, infile, filename, status, stdout\n\n\n# Attempts to fix a file, returning the status code and the generated output.\n# If the fix was successful, the diff is returned as a string-array. If the file\n# was not fixable, the error-messages are returned as a string-array.\ndef fixFileExpectingSuccess(file, extra_input_files=None):\n  command, infile, outfile, status, stdout = fixFileHelper(file,\n                                                           extra_input_files=extra_input_files)\n  if status != 0:\n    print(\"FAILED: \" + infile)\n    emitStdoutAsError(stdout)\n    return 1\n  status, stdout, stderr = runCommand('diff ' + outfile + ' ' + infile + '.gold')\n  if status != 0:\n    print(\"FAILED: \" + infile)\n    emitStdoutAsError(stdout + stderr)\n    return 1\n  return 0\n\n\ndef fixFileExpectingNoChange(file):\n  command, infile, outfile, status, stdout = fixFileHelper(file)\n  if status != 0:\n    return 1\n  status, stdout, stderr = runCommand('diff ' + outfile + ' ' + infile)\n  if status != 0:\n    logging.error(file + ': expected file to remain unchanged')\n    return 1\n  return 0\n\n\ndef emitStdoutAsError(stdout):\n  logging.error(\"\\n\".join(stdout))\n\n\ndef expectError(filename, status, stdout, expected_substring):\n  if status == 0:\n    logging.error(\"%s: Expected failure `%s`, but succeeded\" % (filename, expected_substring))\n    return 1\n  for line in stdout:\n    if expected_substring in line:\n      return 0\n  logging.error(\"%s: Could not find '%s' in:\\n\" % (filename, expected_substring))\n  emitStdoutAsError(stdout)\n  return 1\n\n\ndef fixFileExpectingFailure(filename, expected_substring):\n  command, infile, outfile, status, stdout = fixFileHelper(filename)\n  return expectError(filename, status, stdout, expected_substring)\n\n\ndef checkFileExpectingError(filename, expected_substring, extra_input_files=None):\n  command, status, stdout = runCheckFormat(\n      \"check\", getInputFile(filename, extra_input_files=extra_input_files))\n  return expectError(filename, status, stdout, expected_substring)\n\n\ndef checkAndFixError(filename, expected_substring, extra_input_files=None):\n  errors = checkFileExpectingError(filename,\n                                   expected_substring,\n                                   extra_input_files=extra_input_files)\n  errors += fixFileExpectingSuccess(filename, extra_input_files=extra_input_files)\n  return errors\n\n\ndef checkToolNotFoundError():\n  # Temporarily change PATH to test the error about lack of external tools.\n  oldPath = os.environ[\"PATH\"]\n  os.environ[\"PATH\"] = \"/sbin:/usr/sbin\"\n  clang_format = os.getenv(\"CLANG_FORMAT\", \"clang-format-9\")\n  # If CLANG_FORMAT points directly to the binary, skip this test.\n  if os.path.isfile(clang_format) and os.access(clang_format, os.X_OK):\n    os.environ[\"PATH\"] = oldPath\n    return 0\n  errors = checkFileExpectingError(\"no_namespace_envoy.cc\", \"Command %s not found.\" % clang_format)\n  os.environ[\"PATH\"] = oldPath\n  return errors\n\n\ndef checkUnfixableError(filename, expected_substring):\n  errors = checkFileExpectingError(filename, expected_substring)\n  errors += fixFileExpectingFailure(filename, expected_substring)\n  return errors\n\n\ndef checkFileExpectingOK(filename):\n  command, status, stdout = runCheckFormat(\"check\", getInputFile(filename))\n  if status != 0:\n    logging.error(\"Expected %s to have no errors; status=%d, output:\\n\" % (filename, status))\n    emitStdoutAsError(stdout)\n  return status + fixFileExpectingNoChange(filename)\n\n\ndef runChecks():\n  errors = 0\n\n  # The following error is the error about unavailability of external tools.\n  errors += checkToolNotFoundError()\n\n  # The following errors can be detected but not fixed automatically.\n  errors += checkUnfixableError(\"no_namespace_envoy.cc\",\n                                \"Unable to find Envoy namespace or NOLINT(namespace-envoy)\")\n  errors += checkUnfixableError(\"mutex.cc\", \"Don't use <mutex> or <condition_variable*>\")\n  errors += checkUnfixableError(\"condition_variable.cc\",\n                                \"Don't use <mutex> or <condition_variable*>\")\n  errors += checkUnfixableError(\"condition_variable_any.cc\",\n                                \"Don't use <mutex> or <condition_variable*>\")\n  errors += checkUnfixableError(\"shared_mutex.cc\", \"shared_mutex\")\n  errors += checkUnfixableError(\"shared_mutex.cc\", \"shared_mutex\")\n  real_time_inject_error = (\n      \"Don't reference real-world time sources from production code; use injection\")\n  errors += checkUnfixableError(\"real_time_source.cc\", real_time_inject_error)\n  errors += checkUnfixableError(\"real_time_system.cc\", real_time_inject_error)\n  errors += checkUnfixableError(\n      \"duration_value.cc\",\n      \"Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)\"\n  )\n  errors += checkUnfixableError(\"system_clock.cc\", real_time_inject_error)\n  errors += checkUnfixableError(\"steady_clock.cc\", real_time_inject_error)\n  errors += checkUnfixableError(\n      \"unpack_to.cc\", \"Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead\")\n  errors += checkUnfixableError(\"condvar_wait_for.cc\", real_time_inject_error)\n  errors += checkUnfixableError(\"sleep.cc\", real_time_inject_error)\n  errors += checkUnfixableError(\"std_atomic_free_functions.cc\", \"std::atomic_*\")\n  errors += checkUnfixableError(\"std_get_time.cc\", \"std::get_time\")\n  errors += checkUnfixableError(\"no_namespace_envoy.cc\",\n                                \"Unable to find Envoy namespace or NOLINT(namespace-envoy)\")\n  errors += checkUnfixableError(\"bazel_tools.BUILD\", \"unexpected @bazel_tools reference\")\n  errors += checkUnfixableError(\"proto.BUILD\", \"unexpected direct external dependency on protobuf\")\n  errors += checkUnfixableError(\"proto_deps.cc\", \"unexpected direct dependency on google.protobuf\")\n  errors += checkUnfixableError(\"attribute_packed.cc\", \"Don't use __attribute__((packed))\")\n  errors += checkUnfixableError(\"designated_initializers.cc\", \"Don't use designated initializers\")\n  errors += checkUnfixableError(\"elvis_operator.cc\", \"Don't use the '?:' operator\")\n  errors += checkUnfixableError(\"testing_test.cc\",\n                                \"Don't use 'using testing::Test;, elaborate the type instead\")\n  errors += checkUnfixableError(\n      \"serialize_as_string.cc\",\n      \"Don't use MessageLite::SerializeAsString for generating deterministic serialization\")\n  errors += checkUnfixableError(\n      \"version_history/current.rst\",\n      \"Version history not in alphabetical order (zzzzz vs aaaaa): please check placement of line\")\n  errors += checkUnfixableError(\n      \"version_history/current.rst\",\n      \"Version history not in alphabetical order (this vs aaaa): please check placement of line\")\n  errors += checkUnfixableError(\n      \"version_history/current.rst\",\n      \"Version history line malformed. Does not match VERSION_HISTORY_NEW_LINE_REGEX in \"\n      \"check_format.py\")\n  errors += checkUnfixableError(\n      \"counter_from_string.cc\",\n      \"Don't lookup stats by name at runtime; use StatName saved during construction\")\n  errors += checkUnfixableError(\n      \"gauge_from_string.cc\",\n      \"Don't lookup stats by name at runtime; use StatName saved during construction\")\n  errors += checkUnfixableError(\n      \"histogram_from_string.cc\",\n      \"Don't lookup stats by name at runtime; use StatName saved during construction\")\n  errors += checkUnfixableError(\n      \"regex.cc\", \"Don't use std::regex in code that handles untrusted input. Use RegexMatcher\")\n  errors += checkUnfixableError(\n      \"grpc_init.cc\",\n      \"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. \" +\n      \"See #8282\")\n  errors += checkUnfixableError(\n      \"grpc_shutdown.cc\",\n      \"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. \" +\n      \"See #8282\")\n  errors += checkUnfixableError(\"clang_format_double_off.cc\", \"clang-format nested off\")\n  errors += checkUnfixableError(\"clang_format_trailing_off.cc\", \"clang-format remains off\")\n  errors += checkUnfixableError(\"clang_format_double_on.cc\", \"clang-format nested on\")\n  errors += fixFileExpectingFailure(\n      \"api/missing_package.proto\",\n      \"Unable to find package name for proto file: ./api/missing_package.proto\")\n  errors += checkUnfixableError(\"proto_enum_mangling.cc\",\n                                \"Don't use mangled Protobuf names for enum constants\")\n  errors += checkUnfixableError(\"test_naming.cc\",\n                                \"Test names should be CamelCase, starting with a capital letter\")\n  errors += checkUnfixableError(\n      \"test/register_factory.cc\",\n      \"Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use \"\n      \"Registry::InjectFactory instead.\")\n  errors += checkUnfixableError(\"strerror.cc\",\n                                \"Don't use strerror; use Envoy::errorDetails instead\")\n  errors += checkUnfixableError(\n      \"std_unordered_map.cc\", \"Don't use std::unordered_map; use absl::flat_hash_map instead \" +\n      \"or absl::node_hash_map if pointer stability of keys/values is required\")\n  errors += checkUnfixableError(\n      \"std_unordered_set.cc\", \"Don't use std::unordered_set; use absl::flat_hash_set instead \" +\n      \"or absl::node_hash_set if pointer stability of keys/values is required\")\n  errors += checkUnfixableError(\"std_any.cc\", \"Don't use std::any; use absl::any instead\")\n  errors += checkUnfixableError(\"std_get_if.cc\", \"Don't use std::get_if; use absl::get_if instead\")\n  errors += checkUnfixableError(\n      \"std_holds_alternative.cc\",\n      \"Don't use std::holds_alternative; use absl::holds_alternative instead\")\n  errors += checkUnfixableError(\"std_make_optional.cc\",\n                                \"Don't use std::make_optional; use absl::make_optional instead\")\n  errors += checkUnfixableError(\"std_monostate.cc\",\n                                \"Don't use std::monostate; use absl::monostate instead\")\n  errors += checkUnfixableError(\"std_optional.cc\",\n                                \"Don't use std::optional; use absl::optional instead\")\n  errors += checkUnfixableError(\"std_string_view.cc\",\n                                \"Don't use std::string_view; use absl::string_view instead\")\n  errors += checkUnfixableError(\"std_variant.cc\",\n                                \"Don't use std::variant; use absl::variant instead\")\n  errors += checkUnfixableError(\"std_visit.cc\", \"Don't use std::visit; use absl::visit instead\")\n  errors += checkUnfixableError(\n      \"throw.cc\", \"Don't introduce throws into exception-free files, use error statuses instead.\")\n  errors += checkUnfixableError(\"pgv_string.proto\", \"min_bytes is DEPRECATED, Use min_len.\")\n  errors += checkFileExpectingOK(\"commented_throw.cc\")\n\n  # The following files have errors that can be automatically fixed.\n  errors += checkAndFixError(\"over_enthusiastic_spaces.cc\",\n                             \"./over_enthusiastic_spaces.cc:3: over-enthusiastic spaces\")\n  errors += checkAndFixError(\"extra_enthusiastic_spaces.cc\",\n                             \"./extra_enthusiastic_spaces.cc:3: over-enthusiastic spaces\")\n  errors += checkAndFixError(\"angle_bracket_include.cc\",\n                             \"envoy includes should not have angle brackets\")\n  errors += checkAndFixError(\"proto_style.cc\", \"incorrect protobuf type reference\")\n  errors += checkAndFixError(\"long_line.cc\", \"clang-format check failed\")\n  errors += checkAndFixError(\"header_order.cc\", \"header_order.py check failed\")\n  errors += checkAndFixError(\"clang_format_on.cc\",\n                             \"./clang_format_on.cc:7: over-enthusiastic spaces\")\n  # Validate that a missing license is added.\n  errors += checkAndFixError(\"license.BUILD\", \"envoy_build_fixer check failed\")\n  # Validate that an incorrect license is replaced and reordered.\n  errors += checkAndFixError(\"update_license.BUILD\", \"envoy_build_fixer check failed\")\n  # Validate that envoy_package() is added where there is an envoy_* rule occurring.\n  errors += checkAndFixError(\"add_envoy_package.BUILD\", \"envoy_build_fixer check failed\")\n  # Validate that we don't add envoy_package() when no envoy_* rule.\n  errors += checkFileExpectingOK(\"skip_envoy_package.BUILD\")\n  # Validate that we clean up gratuitous blank lines.\n  errors += checkAndFixError(\"canonical_spacing.BUILD\", \"envoy_build_fixer check failed\")\n  # Validate that unused loads are removed.\n  errors += checkAndFixError(\"remove_unused_loads.BUILD\", \"envoy_build_fixer check failed\")\n  # Validate that API proto package deps are computed automagically.\n  errors += checkAndFixError(\"canonical_api_deps.BUILD\",\n                             \"envoy_build_fixer check failed\",\n                             extra_input_files=[\n                                 \"canonical_api_deps.cc\", \"canonical_api_deps.h\",\n                                 \"canonical_api_deps.other.cc\"\n                             ])\n  errors += checkAndFixError(\"bad_envoy_build_sys_ref.BUILD\", \"Superfluous '@envoy//' prefix\")\n  errors += checkAndFixError(\"proto_format.proto\", \"clang-format check failed\")\n  errors += checkAndFixError(\n      \"cpp_std.cc\",\n      \"term absl::make_unique< should be replaced with standard library term std::make_unique<\")\n\n  errors += checkFileExpectingOK(\"real_time_source_override.cc\")\n  errors += checkFileExpectingOK(\"duration_value_zero.cc\")\n  errors += checkFileExpectingOK(\"time_system_wait_for.cc\")\n  errors += checkFileExpectingOK(\"clang_format_off.cc\")\n  return errors\n\n\nif __name__ == \"__main__\":\n  parser = argparse.ArgumentParser(description='tester for check_format.py.')\n  parser.add_argument('--log', choices=['INFO', 'WARN', 'ERROR'], default='INFO')\n  args = parser.parse_args()\n  logging.basicConfig(format='%(message)s', level=args.log)\n\n  # Now create a temp directory to copy the input files, so we can fix them\n  # without actually fixing our testdata. This requires chdiring to the temp\n  # directory, so it's annoying to comingle check-tests and fix-tests.\n  with tempfile.TemporaryDirectory() as tmp:\n    os.chdir(tmp)\n    errors = runChecks()\n\n  if errors != 0:\n    logging.error(\"%d FAILURES\" % errors)\n    exit(1)\n  logging.warning(\"PASS\")\n"
  },
  {
    "path": "tools/code_format/check_format_test_helper.sh",
    "content": "#!/bin/bash\n\ntools=\"$(dirname \"$(dirname \"$(realpath \"$0\")\")\")\"\nroot=$(realpath \"$tools/..\")\n\ncd \"$root\" || exit 1\n# to satisfy dependency on run_command\nexport PYTHONPATH=\"$tools\"\n./tools/code_format/check_format_test_helper.py \"$@\"\n"
  },
  {
    "path": "tools/code_format/check_shellcheck_format.sh",
    "content": "#!/bin/bash -e\n\nEXCLUDED_SHELLFILES=${EXCLUDED_SHELLFILES:-\"^.github|.rst$|.md$\"}\n\n\nfind_shell_files () {\n    local shellfiles\n    shellfiles=()\n    shellfiles+=(\"$(git grep \"^#!/bin/bash\" | cut -d: -f1)\")\n    shellfiles+=(\"$(git grep \"^#!/bin/sh\" | cut -d: -f1)\")\n    shellfiles+=(\"$(find . -name \"*.sh\" | cut -d/ -f2-)\")\n    shellfiles=(\"$(echo \"${shellfiles[@]}\" | tr ' ' '\\n' | sort | uniq)\")\n    for file in \"${shellfiles[@]}\"; do\n\techo \"$file\"\n    done\n}\n\nrun_shellcheck_on () {\n    local file\n    file=\"$1\"\n    echo \"Shellcheck: ${file}\"\n    shellcheck -f diff -x \"$file\"\n}\n\nrun_shellchecks () {\n    local all_shellfiles=() failed=() failure \\\n\t  filtered_shellfiles=() found_shellfiles \\\n\t  line skipped_count success_count\n\n    found_shellfiles=$(find_shell_files)\n    while read -r line; do all_shellfiles+=(\"$line\"); done \\\n\t<<< \"$found_shellfiles\"\n    while read -r line; do filtered_shellfiles+=(\"$line\"); done \\\n\t<<< \"$(echo -e \"$found_shellfiles\" | grep -vE \"${EXCLUDED_SHELLFILES}\")\"\n\n    for file in \"${filtered_shellfiles[@]}\"; do\n\trun_shellcheck_on \"$file\" || {\n\t    failed+=(\"$file\")\n\t}\n    done\n    if [[ \"${#failed[@]}\" -ne 0 ]]; then\n\techo -e \"\\nShellcheck failures:\" >&2\n\tfor failure in \"${failed[@]}\"; do\n\t    echo \"$failure\" >&2\n\tdone\n    fi\n    skipped_count=$((${#all_shellfiles[@]} - ${#filtered_shellfiles[@]}))\n    success_count=$((${#filtered_shellfiles[@]} - ${#failed[@]}))\n\n    echo -e \"\\nShellcheck totals (skipped/failed/success): ${skipped_count}/${#failed[@]}/${success_count}\"\n    if [[ \"${#failed[@]}\" -ne 0 ]]; then\n\treturn 1\n    fi\n}\n\nrun_shellchecks\n"
  },
  {
    "path": "tools/code_format/common.py",
    "content": "def includeDirOrder():\n  return (\n      \"envoy\",\n      \"common\",\n      \"source\",\n      \"exe\",\n      \"server\",\n      \"extensions\",\n      \"test\",\n  )\n"
  },
  {
    "path": "tools/code_format/envoy_build_fixer.py",
    "content": "#!/usr/bin/env python3\n\n# Enforces:\n# - License headers on Envoy BUILD files\n# - envoy_package() or envoy_extension_package() top-level invocation for standard Envoy package setup.\n# - Infers API dependencies from source files.\n# - Misc. cleanups: avoids redundant blank lines, removes unused loads.\n# - Maybe more later?\n\nimport functools\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\nimport pathlib\nimport paths\n\n# Where does Buildozer live?\nBUILDOZER_PATH = paths.getBuildozer()\n\n# Where does Buildifier live?\nBUILDIFIER_PATH = paths.getBuildifier()\n\n# Canonical Envoy license.\nLICENSE_STRING = 'licenses([\"notice\"])  # Apache 2\\n\\n'\n\n# Match any existing licenses in a BUILD file.\nOLD_LICENSES_REGEX = re.compile(r'^licenses\\(.*\\n+', re.MULTILINE)\n\n# Match an Envoy rule, e.g. envoy_cc_library( in a BUILD file.\nENVOY_RULE_REGEX = re.compile(r'envoy[_\\w]+\\(')\n\n# Match a load() statement for the envoy_package macros.\nPACKAGE_LOAD_BLOCK_REGEX = re.compile('(\"envoy_package\".*?\\)\\n)', re.DOTALL)\nEXTENSION_PACKAGE_LOAD_BLOCK_REGEX = re.compile('(\"envoy_extension_package\".*?\\)\\n)', re.DOTALL)\n\n# Match Buildozer 'print' output. Example of Buildozer print output:\n# cc_library json_transcoder_filter_lib [json_transcoder_filter.cc] (missing) (missing)\nBUILDOZER_PRINT_REGEX = re.compile(\n    '\\s*([\\w_]+)\\s+([\\w_]+)\\s+[(\\[](.*?)[)\\]]\\s+[(\\[](.*?)[)\\]]\\s+[(\\[](.*?)[)\\]]')\n\n# Match API header include in Envoy source file?\nAPI_INCLUDE_REGEX = re.compile('#include \"(envoy/.*)/[^/]+\\.pb\\.(validate\\.)?h\"')\n\n\nclass EnvoyBuildFixerError(Exception):\n  pass\n\n\n# Run Buildozer commands on a string representing a BUILD file.\ndef RunBuildozer(cmds, contents):\n  with tempfile.NamedTemporaryFile(mode='w') as cmd_file:\n    # We send the BUILD contents to buildozer on stdin and receive the\n    # transformed BUILD on stdout. The commands are provided in a file.\n    cmd_input = '\\n'.join('%s|-:%s' % (cmd, target) for cmd, target in cmds)\n    cmd_file.write(cmd_input)\n    cmd_file.flush()\n    r = subprocess.run([BUILDOZER_PATH, '-stdout', '-f', cmd_file.name],\n                       input=contents.encode(),\n                       stdout=subprocess.PIPE,\n                       stderr=subprocess.PIPE)\n    # Buildozer uses 3 for success but no change (0 is success and changed).\n    if r.returncode != 0 and r.returncode != 3:\n      raise EnvoyBuildFixerError('buildozer execution failed: %s' % r)\n    # Sometimes buildozer feels like returning nothing when the transform is a\n    # nop.\n    if not r.stdout:\n      return contents\n    return r.stdout.decode('utf-8')\n\n\n# Add an Apache 2 license and envoy_package() import and rule as needed.\ndef FixPackageAndLicense(path, contents):\n  regex_to_use = PACKAGE_LOAD_BLOCK_REGEX\n  package_string = 'envoy_package'\n\n  if 'source/extensions' in path:\n    regex_to_use = EXTENSION_PACKAGE_LOAD_BLOCK_REGEX\n    package_string = 'envoy_extension_package'\n\n  # Ensure we have an envoy_package import load if this is a real Envoy package. We also allow\n  # the prefix to be overridden if envoy is included in a larger workspace.\n  if re.search(ENVOY_RULE_REGEX, contents):\n    new_load = 'new_load {}//bazel:envoy_build_system.bzl %s' % package_string\n    contents = RunBuildozer([\n        (new_load.format(os.getenv(\"ENVOY_BAZEL_PREFIX\", \"\")), '__pkg__'),\n    ], contents)\n    # Envoy package is inserted after the load block containing the\n    # envoy_package import.\n    package_and_parens = package_string + '()'\n    if package_and_parens not in contents:\n      contents = re.sub(regex_to_use, r'\\1\\n%s\\n\\n' % package_and_parens, contents)\n      if package_and_parens not in contents:\n        raise EnvoyBuildFixerError('Unable to insert %s' % package_and_parens)\n\n  # Delete old licenses.\n  if re.search(OLD_LICENSES_REGEX, contents):\n    contents = re.sub(OLD_LICENSES_REGEX, '', contents)\n  # Add canonical Apache 2 license.\n  contents = LICENSE_STRING + contents\n  return contents\n\n\n# Run Buildifier commands on a string with lint mode.\ndef BuildifierLint(contents):\n  r = subprocess.run([BUILDIFIER_PATH, '-lint=fix', '-mode=fix', '-type=build'],\n                     input=contents.encode(),\n                     stdout=subprocess.PIPE,\n                     stderr=subprocess.PIPE)\n  if r.returncode != 0:\n    raise EnvoyBuildFixerError('buildozer execution failed: %s' % r)\n  return r.stdout.decode('utf-8')\n\n\n# Find all the API headers in a C++ source file.\ndef FindApiHeaders(source_path):\n  api_hdrs = set([])\n  contents = pathlib.Path(source_path).read_text(encoding='utf8')\n  for line in contents.split('\\n'):\n    match = re.match(API_INCLUDE_REGEX, line)\n    if match:\n      api_hdrs.add(match.group(1))\n  return api_hdrs\n\n\n# Infer and adjust rule dependencies in BUILD files for @envoy_api proto\n# files. This is very cheap to do purely via a grep+buildozer syntax level\n# step.\n#\n# This could actually be done much more generally, for all symbols and headers\n# if we made use of Clang libtooling semantic analysis. However, this requires a\n# compilation database and full build of Envoy, envoy_build_fixer.py is run\n# under check_format, which should be fast for developers.\ndef FixApiDeps(path, contents):\n  source_dirname = os.path.dirname(path)\n  buildozer_out = RunBuildozer([\n      ('print kind name srcs hdrs deps', '*'),\n  ], contents).strip()\n  deps_mutation_cmds = []\n  for line in buildozer_out.split('\\n'):\n    match = re.match(BUILDOZER_PRINT_REGEX, line)\n    if not match:\n      # buildozer might emit complex multiline output when a 'select' or other\n      # macro is used. We're not smart enough to handle these today and they\n      # require manual fixup.\n      # TODO(htuch): investigate using --output_proto on buildozer to be able to\n      # consume something more usable in this situation.\n      continue\n    kind, name, srcs, hdrs, deps = match.groups()\n    if not name:\n      continue\n    source_paths = []\n    if srcs != 'missing':\n      source_paths.extend(\n          os.path.join(source_dirname, f)\n          for f in srcs.split()\n          if f.endswith('.cc') or f.endswith('.h'))\n    if hdrs != 'missing':\n      source_paths.extend(os.path.join(source_dirname, f) for f in hdrs.split() if f.endswith('.h'))\n    api_hdrs = set([])\n    for p in source_paths:\n      # We're not smart enough to infer on generated files.\n      if os.path.exists(p):\n        api_hdrs = api_hdrs.union(FindApiHeaders(p))\n    actual_api_deps = set(['@envoy_api//%s:pkg_cc_proto' % h for h in api_hdrs])\n    existing_api_deps = set([])\n    if deps != 'missing':\n      existing_api_deps = set([\n          d for d in deps.split() if d.startswith('@envoy_api') and d.endswith('pkg_cc_proto') and\n          d != '@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto'\n      ])\n    deps_to_remove = existing_api_deps.difference(actual_api_deps)\n    if deps_to_remove:\n      deps_mutation_cmds.append(('remove deps %s' % ' '.join(deps_to_remove), name))\n    deps_to_add = actual_api_deps.difference(existing_api_deps)\n    if deps_to_add:\n      deps_mutation_cmds.append(('add deps %s' % ' '.join(deps_to_add), name))\n  return RunBuildozer(deps_mutation_cmds, contents)\n\n\ndef FixBuild(path):\n  with open(path, 'r') as f:\n    contents = f.read()\n  xforms = [\n      functools.partial(FixPackageAndLicense, path),\n      functools.partial(FixApiDeps, path),\n      BuildifierLint,\n  ]\n  for xform in xforms:\n    contents = xform(contents)\n  return contents\n\n\nif __name__ == '__main__':\n  if len(sys.argv) == 2:\n    sys.stdout.write(FixBuild(sys.argv[1]))\n    sys.exit(0)\n  elif len(sys.argv) == 3:\n    reorderd_source = FixBuild(sys.argv[1])\n    with open(sys.argv[2], 'w') as f:\n      f.write(reorderd_source)\n    sys.exit(0)\n  print('Usage: %s <source file path> [<destination file path>]' % sys.argv[0])\n  sys.exit(1)\n"
  },
  {
    "path": "tools/code_format/format_python_tools.py",
    "content": "import argparse\nimport fnmatch\nimport os\nimport sys\n\nfrom yapf.yapflib.yapf_api import FormatFile\n\nEXCLUDE_LIST = ['generated', 'venv']\n\n\ndef collectFiles():\n  \"\"\"Collect all Python files in the tools directory.\n\n  Returns: A collection of python files in the tools directory excluding\n    any directories in the EXCLUDE_LIST constant.\n  \"\"\"\n  # TODO: Add ability to collect a specific file or files.\n  matches = []\n  path_parts = os.getcwd().split('/')\n  dirname = '.'\n  if path_parts[-1] == 'tools':\n    dirname = '/'.join(path_parts[:-1])\n  for root, dirnames, filenames in os.walk(dirname):\n    dirnames[:] = [d for d in dirnames if d not in EXCLUDE_LIST]\n    for filename in fnmatch.filter(filenames, '*.py'):\n      if not filename.endswith('_pb2.py') and not filename.endswith('_pb2_grpc.py'):\n        matches.append(os.path.join(root, filename))\n  return matches\n\n\ndef validateFormat(fix=False):\n  \"\"\"Check the format of python files in the tools directory.\n\n    Arguments:\n      fix: a flag to indicate if fixes should be applied.\n  \"\"\"\n  fixes_required = False\n  failed_update_files = set()\n  successful_update_files = set()\n  for python_file in collectFiles():\n    reformatted_source, encoding, changed = FormatFile(python_file,\n                                                       style_config='tools/code_format/.style.yapf',\n                                                       in_place=fix,\n                                                       print_diff=not fix)\n    if not fix:\n      fixes_required = True if changed else fixes_required\n      if reformatted_source:\n        print(reformatted_source)\n      continue\n    file_list = failed_update_files if reformatted_source else successful_update_files\n    file_list.add(python_file)\n  if fix:\n    displayFixResults(successful_update_files, failed_update_files)\n    fixes_required = len(failed_update_files) > 0\n  return not fixes_required\n\n\ndef displayFixResults(successful_files, failed_files):\n  if successful_files:\n    print('Successfully fixed {} files'.format(len(successful_files)))\n\n  if failed_files:\n    print('The following files failed to fix inline:')\n    for failed_file in failed_files:\n      print('  - {}'.format(failed_file))\n\n\nif __name__ == '__main__':\n  parser = argparse.ArgumentParser(description='Tool to format python files.')\n  parser.add_argument('action',\n                      choices=['check', 'fix'],\n                      default='check',\n                      help='Fix invalid syntax in files.')\n  args = parser.parse_args()\n  is_valid = validateFormat(args.action == 'fix')\n  sys.exit(0 if is_valid else 1)\n"
  },
  {
    "path": "tools/code_format/format_python_tools.sh",
    "content": "#!/bin/bash\n\n\"$(dirname \"$0\")\"/../git/modified_since_last_github_commit.sh ./ py || \\\n  [[ \"${FORCE_PYTHON_FORMAT}\" == \"yes\" ]] || \\\n  { echo \"Skipping format_python_tools.sh due to no Python changes\"; exit 0; }\n\n. tools/shell_utils.sh\n\nset -e\n\necho \"Running Python format check...\"\npython_venv format_python_tools \"$1\"\n\necho \"Running Python3 flake8 check...\"\npython3 -m flake8 --version\npython3 -m flake8 . --exclude=*/venv/* --count --select=E9,F63,F72,F82 --show-source --statistics\n"
  },
  {
    "path": "tools/code_format/header_order.py",
    "content": "#!/usr/bin/env python3\n\n# Enforce header order in a given file. This will only reorder in the first sequence of contiguous\n# #include statements, so it will not play well with #ifdef.\n#\n# This attempts to enforce the guidelines at\n# https://google.github.io/styleguide/cppguide.html#Names_and_Order_of_Includes\n# with some allowances for Envoy-specific idioms.\n#\n# There is considerable overlap with what this does and clang-format's IncludeCategories (see\n# https://clang.llvm.org/docs/ClangFormatStyleOptions.html). But, clang-format doesn't seem smart\n# enough to handle block splitting and correctly detecting the main header subject to the Envoy\n# canonical paths.\n\nimport argparse\nimport common\nimport pathlib\nimport re\nimport sys\n\n\ndef ReorderHeaders(path):\n  source = pathlib.Path(path).read_text(encoding='utf-8')\n\n  all_lines = iter(source.split('\\n'))\n  before_includes_lines = []\n  includes_lines = []\n  after_includes_lines = []\n\n  # Collect all the lines prior to the first #include in before_includes_lines.\n  try:\n    while True:\n      line = next(all_lines)\n      if line.startswith('#include'):\n        includes_lines.append(line)\n        break\n      before_includes_lines.append(line)\n  except StopIteration:\n    pass\n\n  # Collect all the #include and whitespace lines in includes_lines.\n  try:\n    while True:\n      line = next(all_lines)\n      if not line:\n        continue\n      if not line.startswith('#include'):\n        after_includes_lines.append(line)\n        break\n      includes_lines.append(line)\n  except StopIteration:\n    pass\n\n  # Collect the remaining lines in after_includes_lines.\n  after_includes_lines += list(all_lines)\n\n  # Filter for includes that finds the #include of the header file associated with the source file\n  # being processed. E.g. if 'path' is source/common/common/hex.cc, this filter matches\n  # \"common/common/hex.h\".\n  def file_header_filter():\n    return lambda f: f.endswith('.h\"') and path.endswith(f[1:-3] + '.cc')\n\n  def regex_filter(regex):\n    return lambda f: re.match(regex, f)\n\n  # Filters that define the #include blocks\n  block_filters = [\n      file_header_filter(),\n      regex_filter('<.*\\.h>'),\n      regex_filter('<.*>'),\n  ]\n  for subdir in include_dir_order:\n    block_filters.append(regex_filter('\"' + subdir + '/.*\"'))\n\n  blocks = []\n  already_included = set([])\n  for b in block_filters:\n    block = []\n    for line in includes_lines:\n      header = line[len('#include '):]\n      if line not in already_included and b(header):\n        block.append(line)\n        already_included.add(line)\n    if len(block) > 0:\n      blocks.append(block)\n\n  # Anything not covered by block_filters gets its own block.\n  misc_headers = list(set(includes_lines).difference(already_included))\n  if len(misc_headers) > 0:\n    blocks.append(misc_headers)\n\n  reordered_includes_lines = '\\n\\n'.join(['\\n'.join(sorted(block)) for block in blocks])\n\n  if reordered_includes_lines:\n    reordered_includes_lines += '\\n'\n\n  return '\\n'.join(\n      filter(lambda x: x, [\n          '\\n'.join(before_includes_lines),\n          reordered_includes_lines,\n          '\\n'.join(after_includes_lines),\n      ]))\n\n\nif __name__ == '__main__':\n  parser = argparse.ArgumentParser(description='Header reordering.')\n  parser.add_argument('--path', type=str, help='specify the path to the header file')\n  parser.add_argument('--rewrite', action='store_true', help='rewrite header file in-place')\n  parser.add_argument('--include_dir_order',\n                      type=str,\n                      default=','.join(common.includeDirOrder()),\n                      help='specify the header block include directory order')\n  args = parser.parse_args()\n  target_path = args.path\n  include_dir_order = args.include_dir_order.split(',')\n  reorderd_source = ReorderHeaders(target_path)\n  if args.rewrite:\n    pathlib.Path(target_path).write_text(reorderd_source, encoding='utf-8')\n  else:\n    sys.stdout.buffer.write(reorderd_source.encode('utf-8'))\n"
  },
  {
    "path": "tools/code_format/paths.py",
    "content": "import os\nimport os.path\nimport shutil\n\n\ndef getBuildifier():\n  return os.getenv(\"BUILDIFIER_BIN\") or (os.path.expandvars(\"$GOPATH/bin/buildifier\")\n                                         if os.getenv(\"GOPATH\") else shutil.which(\"buildifier\"))\n\n\ndef getBuildozer():\n  return os.getenv(\"BUILDOZER_BIN\") or (os.path.expandvars(\"$GOPATH/bin/buildozer\")\n                                        if os.getenv(\"GOPATH\") else shutil.which(\"buildozer\"))\n"
  },
  {
    "path": "tools/code_format/requirements.txt",
    "content": "flake8==3.8.3 \\\n    --hash=sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c \\\n    --hash=sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208\nimportlib-metadata==2.0.0 \\\n    --hash=sha256:77a540690e24b0305878c37ffd421785a6f7e53c8b5720d211b211de8d0e95da \\\n    --hash=sha256:cefa1a2f919b866c5beb7c9f7b0ebb4061f30a8a9bf16d609b000e2dfaceb9c3\nmccabe==0.6.1 \\\n    --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \\\n    --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f\npycodestyle==2.6.0 \\\n    --hash=sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367 \\\n    --hash=sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e\npyflakes==2.2.0 \\\n    --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \\\n    --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8\nyapf==0.30.0 \\\n    --hash=sha256:3000abee4c28daebad55da6c85f3cd07b8062ce48e2e9943c8da1b9667d48427 \\\n    --hash=sha256:3abf61ba67cf603069710d30acbc88cfe565d907e16ad81429ae90ce9651e0c9\nzipp==3.2.0 \\\n    --hash=sha256:43f4fa8d8bb313e65d8323a3952ef8756bf40f9a5c3ea7334be23ee4ec8278b6 \\\n    --hash=sha256:b52f22895f4cfce194bc8172f3819ee8de7540aa6d873535a8668b730b8b411f\n"
  },
  {
    "path": "tools/config_validation/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\"@config_validation_pip3//:requirements.bzl\", \"requirement\")\n\nlicenses([\"notice\"])  # Apache 2\n\npy_binary(\n    name = \"validate_fragment\",\n    srcs = [\"validate_fragment.py\"],\n    data = [\"//tools/type_whisperer:all_protos_with_ext_pb_text.pb_text\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        requirement(\"PyYAML\"),\n        \"@bazel_tools//tools/python/runfiles\",\n        \"@com_google_protobuf//:protobuf_python\",\n    ],\n)\n"
  },
  {
    "path": "tools/config_validation/requirements.txt",
    "content": "PyYAML==5.3.1 \\\n    --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \\\n    --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \\\n    --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \\\n    --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \\\n    --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \\\n    --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \\\n    --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \\\n    --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \\\n    --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \\\n    --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \\\n    --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a\n"
  },
  {
    "path": "tools/config_validation/validate_fragment.py",
    "content": "# Validate a YAML fragment against an Envoy API proto3 type.\n#\n# Example usage:\n#\n# bazel run //tools/config_validation:validate_fragment -- \\\n#   envoy.config.bootstrap.v3.Bootstrap $PWD/configs/google_com_proxy.v2.yaml\n\nimport json\nimport pathlib\nimport sys\n\nimport yaml\n\nfrom google.protobuf import descriptor_pb2\nfrom google.protobuf import descriptor_pool\nfrom google.protobuf import json_format\nfrom google.protobuf import message_factory\nfrom google.protobuf import text_format\n\nfrom bazel_tools.tools.python.runfiles import runfiles\n\nimport argparse\n\n\ndef ValidateFragment(type_name, fragment):\n  \"\"\"Validate a dictionary representing a JSON/YAML fragment against an Envoy API proto3 type.\n\n  Throws Protobuf errors on parsing exceptions, successful validations produce\n  no result.\n\n  Args:\n    type_name: a string providing the type name, e.g.\n      envoy.config.bootstrap.v3.Bootstrap.\n    fragment: a dictionary representing the parsed JSON/YAML configuration\n      fragment.\n  \"\"\"\n  json_fragment = json.dumps(fragment)\n\n  r = runfiles.Create()\n  all_protos_pb_text_path = r.Rlocation(\n      'envoy/tools/type_whisperer/all_protos_with_ext_pb_text.pb_text')\n  file_desc_set = descriptor_pb2.FileDescriptorSet()\n  text_format.Parse(pathlib.Path(all_protos_pb_text_path).read_text(),\n                    file_desc_set,\n                    allow_unknown_extension=True)\n\n  pool = descriptor_pool.DescriptorPool()\n  for f in file_desc_set.file:\n    pool.Add(f)\n  desc = pool.FindMessageTypeByName(type_name)\n  msg = message_factory.MessageFactory(pool=pool).GetPrototype(desc)()\n  json_format.Parse(json_fragment, msg, descriptor_pool=pool)\n\n\ndef ParseArgs():\n  parser = argparse.ArgumentParser(\n      description='Validate a YAML fragment against an Envoy API proto3 type.')\n  parser.add_argument(\n      'message_type',\n      help='a string providing the type name, e.g. envoy.config.bootstrap.v3.Bootstrap.')\n  parser.add_argument('fragment_path', nargs='?', help='Path to a YAML configuration fragment.')\n  parser.add_argument('-s', required=False, help='YAML configuration fragment.')\n\n  return parser.parse_args()\n\n\nif __name__ == '__main__':\n  parsed_args = ParseArgs()\n  message_type = parsed_args.message_type\n  content = parsed_args.s if (parsed_args.fragment_path is None) else pathlib.Path(\n      parsed_args.fragment_path).read_text()\n  ValidateFragment(message_type, yaml.safe_load(content))\n"
  },
  {
    "path": "tools/debugging/run-valgrind.sh",
    "content": "#!/bin/sh\n#\n# Helper script to run tests under valgrind.  Usage:\n#    bazel test --run_under=`pwd`/tools/debugging/run-valgrind.sh ...\n#\n\ndir=$(dirname \"$0\")\n\n# In order to add suppressions, it's helpful to run the tool in a mode\n# where it uses the suppressions file we have so far, but also\n# generates possible new suppressions in the test output, so you can\n# paste them into the suppressions file.\n#yes | exec valgrind --gen-suppressions=yes --suppressions=\"$dir/valgrind-suppressions.txt\" \"$@\"\n\n# Ordinarily, we run with cleaner output.\nexec valgrind --gen-suppressions=yes --suppressions=\"$dir/valgrind-suppressions.txt\" \"$@\"\n"
  },
  {
    "path": "tools/debugging/valgrind-suppressions.txt",
    "content": "{\n  eliminate new/free mismatch warnings altogether, as something in the current toolchain generates this scenario.\n  Memcheck:Free\n  fun:free\n  ...\n}\n{\n   re2 cond-jump failure\n   Memcheck:Cond\n   fun:_ZNK3re210SparseSetTIvE8containsEi\n   ...\n}\n{\n   re2 uninit-value\n   Memcheck:Value8\n   fun:_ZNK3re210SparseSetTIvE8containsEi\n   ...\n}\n{\n   re2 cond-jump failure\n   Memcheck:Cond\n   fun:_ZNK3re211SparseArrayIiE9has_indexEi\n   ...\n}\n{\n   re2 uninit-value\n   Memcheck:Value8\n   fun:_ZNK3re211SparseArrayIiE9has_indexEi\n   ...\n}\n"
  },
  {
    "path": "tools/dependency/validate.py",
    "content": "#!/usr/bin/env python3\n\"\"\"Validate the relationship between Envoy dependencies and core/extensions.\n\nThis script verifies that bazel query of the build graph is consistent with\nthe use_category metadata in bazel/repository_locations.bzl.\n\"\"\"\n\nimport re\nimport subprocess\nimport sys\n\nfrom importlib.machinery import SourceFileLoader\nfrom importlib.util import spec_from_loader, module_from_spec\n\n# bazel/repository_locations.bzl must have a .bzl suffix for Starlark import, so\n# we are forced to do this workaround.\n_repository_locations_spec = spec_from_loader(\n    'repository_locations',\n    SourceFileLoader('repository_locations', 'bazel/repository_locations.bzl'))\nrepository_locations = module_from_spec(_repository_locations_spec)\n_repository_locations_spec.loader.exec_module(repository_locations)\n\n# source/extensions/extensions_build_config.bzl must have a .bzl suffix for Starlark\n# import, so we are forced to do this workaround.\n_extensions_build_config_spec = spec_from_loader(\n    'extensions_build_config',\n    SourceFileLoader('extensions_build_config', 'source/extensions/extensions_build_config.bzl'))\nextensions_build_config = module_from_spec(_extensions_build_config_spec)\n_extensions_build_config_spec.loader.exec_module(extensions_build_config)\n\nBAZEL_QUERY_EXTERNAL_DEP_RE = re.compile('@(\\w+)//')\nEXTENSION_LABEL_RE = re.compile('(//source/extensions/.*):')\n\n# TODO(htuch): Add API dependencies to metadata, shrink this set.\nUNKNOWN_DEPS = [\n    'org_golang_x_tools', 'com_github_cncf_udpa', 'org_golang_google_protobuf',\n    'io_bazel_rules_nogo', 'com_envoyproxy_protoc_gen_validate', 'opencensus_proto',\n    'io_bazel_rules_go', 'foreign_cc_platform_utils', 'com_github_golang_protobuf',\n    'com_google_googleapis'\n]\nIGNORE_DEPS = set(['envoy', 'envoy_api', 'platforms', 'bazel_tools', 'local_config_cc'] +\n                  UNKNOWN_DEPS)\n\n\nclass DependencyError(Exception):\n  \"\"\"Error in dependency relationships.\"\"\"\n  pass\n\n\nclass DependencyInfo(object):\n  \"\"\"Models dependency info in bazel/repositories.bzl.\"\"\"\n\n  def DepsByUseCategory(self, use_category):\n    \"\"\"Find the set of external dependencies in a given use_category.\n\n    Args:\n      use_category: string providing use_category.\n\n    Returns:\n      Set of dependency identifiers that match use_category.\n    \"\"\"\n    return set(name for name, metadata in repository_locations.DEPENDENCY_REPOSITORIES.items()\n               if use_category in metadata['use_category'])\n\n  def GetMetadata(self, dependency):\n    \"\"\"Obtain repository metadata for a dependency.\n\n    Args:\n      dependency: string providing dependency identifier.\n\n    Returns:\n      A dictionary with the repository metadata as defined in\n      bazel/repository_locations.bzl.\n    \"\"\"\n    return repository_locations.DEPENDENCY_REPOSITORIES.get(dependency)\n\n\nclass BuildGraph(object):\n  \"\"\"Models the Bazel build graph.\"\"\"\n\n  def QueryExternalDeps(self, *targets):\n    \"\"\"Query the build graph for transitive external dependencies.\n\n    Args:\n      targets: Bazel targets.\n\n    Returns:\n      A set of dependency identifiers that are reachable from targets.\n    \"\"\"\n    deps_query = ' union '.join(f'deps({l})' for l in targets)\n    deps = subprocess.check_output(['bazel', 'query', deps_query],\n                                   stderr=subprocess.PIPE).decode().splitlines()\n    ext_deps = set()\n    for d in deps:\n      match = BAZEL_QUERY_EXTERNAL_DEP_RE.match(d)\n      if match:\n        ext_dep = match.group(1)\n        if ext_dep not in IGNORE_DEPS:\n          ext_deps.add(ext_dep)\n    return set(ext_deps)\n\n  def ListExtensions(self):\n    \"\"\"List all extensions.\n\n    Returns:\n      Dictionary items from source/extensions/extensions_build_config.bzl.\n    \"\"\"\n    return extensions_build_config.EXTENSIONS.items()\n\n\nclass Validator(object):\n  \"\"\"Collection of validation methods.\"\"\"\n\n  def __init__(self, dep_info, build_graph):\n    self._dep_info = dep_info\n    self._build_graph = build_graph\n    self._queried_core_deps = build_graph.QueryExternalDeps(\n        '//source/exe:envoy_main_common_with_core_extensions_lib')\n\n  def ValidateBuildGraphStructure(self):\n    \"\"\"Validate basic assumptions about dependency relationship in the build graph.\n\n    Raises:\n      DependencyError: on a dependency validation error.\n    \"\"\"\n    print('Validating build dependency structure...')\n    queried_core_ext_deps = self._build_graph.QueryExternalDeps(\n        '//source/exe:envoy_main_common_with_core_extensions_lib', '//source/extensions/...')\n    queried_all_deps = self._build_graph.QueryExternalDeps('//source/...')\n    if queried_all_deps != queried_core_ext_deps:\n      raise DependencyError('Invalid build graph structure. deps(//source/...) != '\n                            'deps(//source/exe:envoy_main_common_with_core_extensions_lib) '\n                            'union deps(//source/extensions/...)')\n\n  def ValidateTestOnlyDeps(self):\n    \"\"\"Validate that test-only dependencies aren't included in //source/...\n\n    Raises:\n      DependencyError: on a dependency validation error.\n    \"\"\"\n    print('Validating test-only dependencies...')\n    queried_source_deps = self._build_graph.QueryExternalDeps('//source/...')\n    expected_test_only_deps = self._dep_info.DepsByUseCategory('test_only')\n    bad_test_only_deps = expected_test_only_deps.intersection(queried_source_deps)\n    if len(bad_test_only_deps) > 0:\n      raise DependencyError(f'//source depends on test-only dependencies: {bad_test_only_deps}')\n\n  def ValidateDataPlaneCoreDeps(self):\n    \"\"\"Validate dataplane_core dependencies.\n\n    Check that we at least tag as dataplane_core dependencies that match some\n    well-known targets for the data-plane.\n\n    Raises:\n      DependencyError: on a dependency validation error.\n    \"\"\"\n    print('Validating data-plane dependencies...')\n    # Necessary but not sufficient for dataplane. With some refactoring we could\n    # probably have more precise tagging of dataplane/controlplane/other deps in\n    # these paths.\n    queried_dataplane_core_min_deps = self._build_graph.QueryExternalDeps(\n        '//source/common/api/...', '//source/common/buffer/...', '//source/common/chromium_url/...',\n        '//source/common/crypto/...', '//source/common/conn_pool/...',\n        '//source/common/formatter/...', '//source/common/http/...', '//source/common/ssl/...',\n        '//source/common/tcp/...', '//source/common/tcp_proxy/...', '//source/common/network/...')\n    expected_dataplane_core_deps = self._dep_info.DepsByUseCategory('dataplane_core')\n    bad_dataplane_core_deps = queried_dataplane_core_min_deps.difference(\n        expected_dataplane_core_deps)\n    if len(bad_dataplane_core_deps) > 0:\n      raise DependencyError(\n          f'Observed dataplane core deps {queried_dataplane_core_min_deps} is not covered by '\n          '\"use_category\" implied core deps {expected_dataplane_core_deps}: {bad_dataplane_core_deps} '\n          'are missing')\n\n  def ValidateControlPlaneDeps(self):\n    \"\"\"Validate controlplane dependencies.\n\n    Check that we at least tag as controlplane dependencies that match some\n    well-known targets for\n    the control-plane.\n\n    Raises:\n      DependencyError: on a dependency validation error.\n    \"\"\"\n    print('Validating control-plane dependencies...')\n    # Necessary but not sufficient for controlplane. With some refactoring we could\n    # probably have more precise tagging of dataplane/controlplane/other deps in\n    # these paths.\n    queried_controlplane_core_min_deps = self._build_graph.QueryExternalDeps(\n        '//source/common/config/...')\n    expected_controlplane_core_deps = self._dep_info.DepsByUseCategory('controlplane')\n    bad_controlplane_core_deps = queried_controlplane_core_min_deps.difference(\n        expected_controlplane_core_deps)\n    if len(bad_controlplane_core_deps) > 0:\n      raise DependencyError(\n          f'Observed controlplane core deps {queried_controlplane_core_min_deps} is not covered '\n          'by \"use_category\" implied core deps {expected_controlplane_core_deps}: '\n          '{bad_controlplane_core_deps} are missing')\n\n  def ValidateExtensionDeps(self, name, target):\n    \"\"\"Validate that extensions are correctly declared for dataplane_ext and observability_ext.\n\n    Args:\n      name: extension name.\n      target: extension Bazel target.\n\n    Raises:\n      DependencyError: on a dependency validation error.\n    \"\"\"\n    print(f'Validating extension {name} dependencies...')\n    queried_deps = self._build_graph.QueryExternalDeps(target)\n    marginal_deps = queried_deps.difference(self._queried_core_deps)\n    expected_deps = []\n    for d in marginal_deps:\n      # TODO(htuch): Ensure that queried deps are fully contained in\n      # repository_locations, i.e. that we're tracking with metadata all actual\n      # dependencies. Today, we are missing API and pip3 deps based on manual\n      # inspection.\n      metadata = self._dep_info.GetMetadata(d)\n      if metadata:\n        use_category = metadata['use_category']\n        valid_use_category = any(\n            c in use_category for c in ['dataplane_ext', 'observability_ext', 'other'])\n        if not valid_use_category:\n          raise DependencyError(\n              f'Extensions {name} depends on {d} with \"use_category\" not including '\n              '[\"dataplane_ext\", \"observability_ext\", \"other\"]')\n        if 'extensions' in metadata:\n          allowed_extensions = metadata['extensions']\n          if name not in allowed_extensions:\n            raise DependencyError(\n                f'Extension {name} depends on {d} but {d} does not list {name} in its allowlist')\n\n  def ValidateAll(self):\n    \"\"\"Collection of all validations.\n\n    Raises:\n      DependencyError: on a dependency validation error.\n    \"\"\"\n    self.ValidateBuildGraphStructure()\n    self.ValidateTestOnlyDeps()\n    self.ValidateDataPlaneCoreDeps()\n    self.ValidateControlPlaneDeps()\n    # Validate the marginal dependencies introduced for each extension.\n    for name, target in sorted(build_graph.ListExtensions()):\n      target_all = EXTENSION_LABEL_RE.match(target).group(1) + '/...'\n      self.ValidateExtensionDeps(name, target_all)\n\n\nif __name__ == '__main__':\n  dep_info = DependencyInfo()\n  build_graph = BuildGraph()\n  validator = Validator(dep_info, build_graph)\n  try:\n    validator.ValidateAll()\n  except DependencyError as e:\n    print('Dependency validation failed, please check metadata in bazel/repository_locations.bzl')\n    print(e)\n    sys.exit(1)\n"
  },
  {
    "path": "tools/dependency/validate_test.py",
    "content": "#!/usr/bin/env python3\n\"\"\"Tests for validate.py\"\"\"\n\nimport unittest\n\nimport validate\n\n\nclass FakeDependencyInfo(object):\n  \"\"\"validate.DependencyInfo fake.\"\"\"\n\n  def __init__(self, deps):\n    self._deps = deps\n\n  def DepsByUseCategory(self, use_category):\n    return set(n for n, m in self._deps.items() if use_category in m['use_category'])\n\n  def GetMetadata(self, dependency):\n    return self._deps.get(dependency)\n\n\nclass FakeBuildGraph(object):\n  \"\"\"validate.BuildGraph fake.\"\"\"\n\n  def __init__(self, reachable_deps, extensions):\n    self._reachable_deps = reachable_deps\n    self._extensions = extensions\n\n  def QueryExternalDeps(self, *targets):\n    return set(sum((self._reachable_deps.get(t, []) for t in targets), []))\n\n  def ListExtensions(self):\n    return self._extensions\n\n\ndef FakeDep(use_category, extensions=[]):\n  return {'use_category': use_category, 'extensions': extensions}\n\n\nclass ValidateTest(unittest.TestCase):\n\n  def BuildValidator(self, deps, reachable_deps, extensions=[]):\n    return validate.Validator(FakeDependencyInfo(deps), FakeBuildGraph(reachable_deps, extensions))\n\n  def test_valid_build_graph_structure(self):\n    validator = self.BuildValidator({}, {\n        '//source/exe:envoy_main_common_with_core_extensions_lib': ['a'],\n        '//source/extensions/...': ['b'],\n        '//source/...': ['a', 'b']\n    })\n    validator.ValidateBuildGraphStructure()\n\n  def test_invalid_build_graph_structure(self):\n    validator = self.BuildValidator({}, {\n        '//source/exe:envoy_main_common_with_core_extensions_lib': ['a'],\n        '//source/extensions/...': ['b'],\n        '//source/...': ['a', 'b', 'c']\n    })\n    self.assertRaises(validate.DependencyError, lambda: validator.ValidateBuildGraphStructure())\n\n  def test_valid_test_only_deps(self):\n    validator = self.BuildValidator({'a': FakeDep('dataplane_core')}, {'//source/...': ['a']})\n    validator.ValidateTestOnlyDeps()\n\n  def test_invalid_test_only_deps(self):\n    validator = self.BuildValidator({'a': FakeDep('test_only')}, {'//source/...': ['a']})\n    self.assertRaises(validate.DependencyError, lambda: validator.ValidateTestOnlyDeps())\n\n  def test_valid_dataplane_core_deps(self):\n    validator = self.BuildValidator({'a': FakeDep('dataplane_core')},\n                                    {'//source/common/http/...': ['a']})\n    validator.ValidateDataPlaneCoreDeps()\n\n  def test_invalid_dataplane_core_deps(self):\n    validator = self.BuildValidator({'a': FakeDep('controlplane')},\n                                    {'//source/common/http/...': ['a']})\n    self.assertRaises(validate.DependencyError, lambda: validator.ValidateDataPlaneCoreDeps())\n\n  def test_valid_controlplane_deps(self):\n    validator = self.BuildValidator({'a': FakeDep('controlplane')},\n                                    {'//source/common/config/...': ['a']})\n    validator.ValidateControlPlaneDeps()\n\n  def test_invalid_controlplane_deps(self):\n    validator = self.BuildValidator({'a': FakeDep('other')}, {'//source/common/config/...': ['a']})\n    self.assertRaises(validate.DependencyError, lambda: validator.ValidateControlPlaneDeps())\n\n  def test_valid_extension_deps(self):\n    validator = self.BuildValidator(\n        {\n            'a': FakeDep('controlplane'),\n            'b': FakeDep('dataplane_ext', ['foo'])\n        }, {\n            '//source/extensions/foo/...': ['a', 'b'],\n            '//source/exe:envoy_main_common_with_core_extensions_lib': ['a']\n        })\n    validator.ValidateExtensionDeps('foo', '//source/extensions/foo/...')\n\n  def test_invalid_extension_deps_wrong_category(self):\n    validator = self.BuildValidator(\n        {\n            'a': FakeDep('controlplane'),\n            'b': FakeDep('controlplane', ['foo'])\n        }, {\n            '//source/extensions/foo/...': ['a', 'b'],\n            '//source/exe:envoy_main_common_with_core_extensions_lib': ['a']\n        })\n    self.assertRaises(validate.DependencyError,\n                      lambda: validator.ValidateExtensionDeps('foo', '//source/extensions/foo/...'))\n\n  def test_invalid_extension_deps_allowlist(self):\n    validator = self.BuildValidator(\n        {\n            'a': FakeDep('controlplane'),\n            'b': FakeDep('dataplane_ext', ['bar'])\n        }, {\n            '//source/extensions/foo/...': ['a', 'b'],\n            '//source/exe:envoy_main_common_with_core_extensions_lib': ['a']\n        })\n    self.assertRaises(validate.DependencyError,\n                      lambda: validator.ValidateExtensionDeps('foo', '//source/extensions/foo/...'))\n\n\nif __name__ == '__main__':\n  unittest.main()\n"
  },
  {
    "path": "tools/deprecate_features/deprecate_features.py",
    "content": "# A simple script to snag deprecated proto fields and add them to runtime_features.cc\n\nfrom __future__ import print_function\nimport re\nimport subprocess\nimport fileinput\nfrom six.moves import input\n\n\n# Sorts out the list of deprecated proto fields which should be disallowed and returns a tuple of\n# email and code changes.\ndef deprecate_proto():\n  grep_output = subprocess.check_output('grep -r \"deprecated = true\" api/*', shell=True)\n\n  filenames_and_fields = set()\n\n  # Compile the set of deprecated fields and the files they're in, deduping via set.\n  deprecated_regex = re.compile(r'.*\\/([^\\/]*.proto):[^=]* ([^= ]+) =.*')\n  for byte_line in grep_output.splitlines():\n    line = str(byte_line)\n    match = deprecated_regex.match(line)\n    if match:\n      filenames_and_fields.add(tuple([match.group(1), match.group(2)]))\n    else:\n      print('no match in ' + line + ' please address manually!')\n\n  # Now discard any deprecated features already listed in runtime_features\n  exiting_deprecated_regex = re.compile(r'.*\"envoy.deprecated_features.(.*):(.*)\",.*')\n  with open('source/common/runtime/runtime_features.cc', 'r') as features:\n    for line in features.readlines():\n      match = exiting_deprecated_regex.match(line)\n      if match:\n        filenames_and_fields.discard(tuple([match.group(1), match.group(2)]))\n\n  # Finally sort out the code to add to runtime_features.cc and a canned email for envoy-announce.\n  code_snippets = []\n  email_snippets = []\n  for (filename, field) in filenames_and_fields:\n    code_snippets.append('    \"envoy.deprecated_features.' + filename + ':' + field + '\",\\n')\n    email_snippets.append(field + ' from ' + filename + '\\n')\n  code = ''.join(code_snippets)\n  email = ''\n  if email_snippets:\n    email = ('\\nThe following deprecated configuration fields will be disallowed by default:\\n' +\n             ''.join(email_snippets))\n\n  return email, code\n\n\n# Gather code and suggested email changes.\ndeprecate_email, deprecate_code = deprecate_proto()\n\nemail = ('The Envoy maintainer team is cutting the next Envoy release.  In the new release ' +\n         deprecate_email)\n\nprint('\\n\\nSuggested envoy-announce email: \\n')\nprint(email)\n\nif not input('Apply relevant runtime changes? [yN] ').strip().lower() in ('y', 'yes'):\n  exit(1)\n\nfor line in fileinput.FileInput('source/common/runtime/runtime_features.cc', inplace=1):\n  if 'envoy.deprecated_features.deprecated.proto:is_deprecated_fatal' in line:\n    line = line.replace(line, line + deprecate_code)\n  print(line, end='')\n\nprint('\\nChanges applied.  Please send the email above to envoy-announce.\\n')\n"
  },
  {
    "path": "tools/deprecate_features/deprecate_features.sh",
    "content": "#!/bin/bash\n\n. tools/shell_utils.sh\n\nset -e\n\npython_venv deprecate_features\n"
  },
  {
    "path": "tools/deprecate_features/requirements.txt",
    "content": "six==1.15.0 \\\n    --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \\\n    --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced\n"
  },
  {
    "path": "tools/deprecate_version/deprecate_version.py",
    "content": "# Script for automating cleanup PR creation for deprecated runtime features\n#\n# sh tools/deprecate_version/deprecate_version.sh\n#\n# Direct usage (not recommended):\n#\n# python tools/deprecate_version/deprecate_version.py\n#\n# e.g\n#\n#  python tools/deprecate_version/deprecate_version.py\n#\n# A GitHub access token must be set in GH_ACCESS_TOKEN. To create one, go to\n# Settings -> Developer settings -> Personal access tokens in GitHub and create\n# a token with public_repo scope. Keep this safe, it's broader than it needs to\n# be thanks to GH permission model\n# (https://github.com/dear-github/dear-github/issues/113).\n#\n# Known issues:\n# - Minor fixup PRs (e.g. fixing a typo) will result in the creation of spurious\n#   issues.\n\nfrom __future__ import print_function\n\nimport datetime\nfrom datetime import date\nfrom collections import defaultdict\nimport os\nimport re\nimport subprocess\nimport sys\n\nimport github\nfrom git import Repo\n\ntry:\n  input = raw_input  # Python 2\nexcept NameError:\n  pass  # Python 3\n\n# Tag issues created with these labels.\nLABELS = ['deprecation', 'tech debt', 'no stalebot']\n\n\n# Errors that happen during issue creation.\nclass DeprecateVersionError(Exception):\n  pass\n\n\ndef GetConfirmation():\n  \"\"\"Obtain stdin confirmation to create issues in GH.\"\"\"\n  return input('Creates issues? [yN] ').strip().lower() in ('y', 'yes')\n\n\ndef CreateIssues(access_token, runtime_and_pr):\n  \"\"\"Create issues in GitHub for code to clean up old runtime guarded features.\n\n  Args:\n    access_token: GitHub access token (see comment at top of file).\n    runtime_and_pr: a list of runtime guards and the PRs and commits they were added.\n  \"\"\"\n  git = github.Github(access_token)\n  repo = git.get_repo('envoyproxy/envoy')\n\n  # Find GitHub label objects for LABELS.\n  labels = []\n  for label in repo.get_labels():\n    if label.name in LABELS:\n      labels.append(label)\n  if len(labels) != len(LABELS):\n    raise DeprecateVersionError('Unknown labels (expected %s, got %s)' % (LABELS, labels))\n\n  issues = []\n  for runtime_guard, pr, commit in runtime_and_pr:\n    # Who is the author?\n    if pr:\n      # Extract PR title, number, and author.\n      pr_info = repo.get_pull(pr)\n      change_title = pr_info.title\n      number = ('#%d') % pr\n      login = pr_info.user.login\n    else:\n      # Extract commit message, sha, and author.\n      # Only keep commit message title (remove description), and truncate to 50 characters.\n      change_title = commit.message.split('\\n')[0][:50]\n      number = ('commit %s') % commit.hexsha\n      email = commit.author.email\n      # Use the commit author's email to search through users for their login.\n      search_user = git.search_users(email.split('@')[0] + \" in:email\")\n      login = search_user[0].login if search_user else None\n\n    title = '%s deprecation' % (runtime_guard)\n    body = ('Your change %s (%s) introduced a runtime guarded feature. It has been 6 months since '\n            'the new code has been exercised by default, so it\\'s time to remove the old code '\n            'path. This issue tracks source code cleanup so we don\\'t forget.') % (number,\n                                                                                   change_title)\n\n    print(title)\n    print(body)\n    print('  >> Assigning to %s' % (login or email))\n    search_title = '%s in:title' % title\n\n    # TODO(htuch): Figure out how to do this without legacy and faster.\n    exists = repo.legacy_search_issues('open', search_title) or repo.legacy_search_issues(\n        'closed', search_title)\n    if exists:\n      print(\"Issue with %s already exists\" % search_title)\n      print(exists)\n      print('  >> Issue already exists, not posting!')\n    else:\n      issues.append((title, body, login))\n\n  if not issues:\n    print('No features to deprecate in this release')\n    return\n\n  if GetConfirmation():\n    print('Creating issues...')\n    for title, body, login in issues:\n      try:\n        repo.create_issue(title, body=body, assignees=[login], labels=labels)\n      except github.GithubException as e:\n        try:\n          if login:\n            body += '\\ncc @' + login\n          repo.create_issue(title, body=body, labels=labels)\n          print(('unable to assign issue %s to %s. Add them to the Envoy proxy org'\n                 'and assign it their way.') % (title, login))\n        except github.GithubException as e:\n          print('GithubException while creating issue.')\n          raise\n\n\ndef GetRuntimeAndPr():\n  \"\"\"Returns a list of tuples of [runtime features to deprecate, PR, commit the feature was added]\n  \"\"\"\n  repo = Repo(os.getcwd())\n\n  # grep source code looking for reloadable features which are true to find the\n  # PR they were added.\n  features_to_flip = []\n\n  runtime_features = re.compile(r'.*\"(envoy.reloadable_features..*)\",.*')\n\n  removal_date = date.today() - datetime.timedelta(days=183)\n  found_test_feature_true = False\n\n  # Walk the blame of runtime_features and look for true runtime features older than 6 months.\n  for commit, lines in repo.blame('HEAD', 'source/common/runtime/runtime_features.cc'):\n    for line in lines:\n      match = runtime_features.match(line)\n      if match:\n        runtime_guard = match.group(1)\n        if runtime_guard == 'envoy.reloadable_features.test_feature_false':\n          print(\"Found end sentinel\\n\")\n          if not found_test_feature_true:\n            # The script depends on the cc file having the true runtime block\n            # before the false runtime block.  Fail if one isn't found.\n            print('Failed to find test_feature_true.  Script needs fixing')\n            sys.exit(1)\n          return features_to_flip\n        if runtime_guard == 'envoy.reloadable_features.test_feature_true':\n          found_test_feature_true = True\n          continue\n        pr_num = re.search('\\(#(\\d+)\\)', commit.message)\n        # Some commits may not come from a PR (if they are part of a security point release).\n        pr = (int(pr_num.group(1))) if pr_num else None\n        pr_date = date.fromtimestamp(commit.committed_date)\n        removable = (pr_date < removal_date)\n        # Add the runtime guard and PR to the list to file issues about.\n        print('Flag ' + runtime_guard + ' added at ' + str(pr_date) + ' ' +\n              (removable and 'and is safe to remove' or 'is not ready to remove'))\n        if removable:\n          features_to_flip.append((runtime_guard, pr, commit))\n  print('Failed to find test_feature_false.  Script needs fixing')\n  sys.exit(1)\n\n\nif __name__ == '__main__':\n  runtime_and_pr = GetRuntimeAndPr()\n\n  if not runtime_and_pr:\n    print('No code is deprecated.')\n    sys.exit(0)\n\n  access_token = os.getenv('GH_ACCESS_TOKEN')\n  if not access_token:\n    print(\n        'Missing GH_ACCESS_TOKEN: see instructions in tools/deprecate_version/deprecate_version.py')\n    sys.exit(1)\n\n  CreateIssues(access_token, runtime_and_pr)\n"
  },
  {
    "path": "tools/deprecate_version/deprecate_version.sh",
    "content": "#!/bin/bash\n\n. tools/shell_utils.sh\n\nset -e\n\npython_venv deprecate_version\n"
  },
  {
    "path": "tools/deprecate_version/requirements.txt",
    "content": "certifi==2020.6.20 \\\n    --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \\\n    --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41\nchardet==3.0.4 \\\n    --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \\\n    --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691\nDeprecated==1.2.10 \\\n    --hash=sha256:525ba66fb5f90b07169fdd48b6373c18f1ee12728ca277ca44567a367d9d7f74 \\\n    --hash=sha256:a766c1dccb30c5f6eb2b203f87edd1d8588847709c78589e1521d769addc8218\ngitdb==4.0.5 \\\n    --hash=sha256:91f36bfb1ab7949b3b40e23736db18231bf7593edada2ba5c3a174a7b23657ac \\\n    --hash=sha256:c9e1f2d0db7ddb9a704c2a0217be31214e91a4fe1dea1efad19ae42ba0c285c9\nGitPython==3.1.8 \\\n    --hash=sha256:080bf8e2cf1a2b907634761c2eaefbe83b69930c94c66ad11b65a8252959f912 \\\n    --hash=sha256:1858f4fd089abe92ae465f01d5aaaf55e937eca565fb2c1fce35a51b5f85c910\nidna==2.10 \\\n    --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \\\n    --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0\nPyGithub==1.53 \\\n    --hash=sha256:776befaddab9d8fddd525d52a6ca1ac228cf62b5b1e271836d766f4925e1452e \\\n    --hash=sha256:8ad656bf79958e775ec59f7f5a3dbcbadac12147ae3dc42708b951064096af15\nPyJWT==1.7.1 \\\n    --hash=sha256:5c6eca3c2940464d106b99ba83b00c6add741c9becaec087fb7ccdefea71350e \\\n    --hash=sha256:8d59a976fb773f3e6a39c85636357c4f0e242707394cadadd9814f5cbaa20e96\nrequests==2.24.0 \\\n    --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b \\\n    --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898\nsmmap==3.0.4 \\\n    --hash=sha256:54c44c197c819d5ef1991799a7e30b662d1e520f2ac75c9efbeb54a742214cf4 \\\n    --hash=sha256:9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24\nurllib3==1.25.10 \\\n    --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \\\n    --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461\nwrapt==1.12.1 \\\n    --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7\n"
  },
  {
    "path": "tools/docker_wrapper.sh",
    "content": "#!/bin/bash\n#\n# Wraps a test invocation in docker.\n\nset -e\nIMAGE=$1\nRUN_REMOTE=$2\nLOCAL_MOUNT=$3\nDOCKER_ENV=$4\nTEST_PATH=$(realpath \"$5\")\nshift 5\n\nif [ \"${RUN_REMOTE}\" == \"yes\" ]; then\n  echo \"Using docker environment from ${DOCKER_ENV}:\"\n  cat \"${DOCKER_ENV}\"\nfi\n# shellcheck disable=SC1090\n. \"${DOCKER_ENV}\"\n\nCONTAINER_NAME=\"envoy-test-runner\"\nENVFILE=$(mktemp -t \"bazel-test-env.XXXXXX\")\nfunction cleanup() {\n  rm -f \"${ENVFILE}\"\n  if [ \"${RUN_REMOTE}\" == \"yes\" ]; then\n    docker rm -f \"${CONTAINER_NAME}\"  || true # We don't really care if it fails.\n  fi\n}\n\ntrap cleanup EXIT\n\ncat > \"${ENVFILE}\" <<EOF\nTEST_WORKSPACE=/tmp/workspace\nTEST_SRCDIR=/tmp/src\nENVOY_IP_TEST_VERSIONS=v4only\nEOF\n\nCMDLINE=\"set -a && . /env && env && /test $*\"\nLIB_PATHS=\"/lib/x86_64-linux-gnu/ /usr/lib/x86_64-linux-gnu/ /lib64/\"\n\n\nif [ \"${RUN_REMOTE}\" != \"yes\" ]; then\n  # We're running locally. If told to, mount the library directories locally.\n  LIB_MOUNTS=()\n  if [ \"${LOCAL_MOUNT}\" == \"yes\" ]\n  then\n    for path in $LIB_PATHS; do\n      LIB_MOUNTS+=(-v \"${path}:${path}:ro\")\n      done\n  fi\n\n  docker run --rm --privileged  -v \"${TEST_PATH}:/test\" \"${LIB_MOUNTS[@]}\" -i -v \"${ENVFILE}:/env\" \\\n    \"${IMAGE}\" bash -c \"${CMDLINE}\"\nelse\n  # In this case, we need to create the container, then make new layers on top of it, since we\n  # can't mount everything into it.\n  docker create -t --privileged  --name \"${CONTAINER_NAME}\" \"${IMAGE}\" \\\n    bash -c \"${CMDLINE}\"\n  docker cp \"$TEST_PATH\" \"${CONTAINER_NAME}:/test\"\n  docker cp \"$ENVFILE\" \"${CONTAINER_NAME}:/env\"\n\n  # If some local libraries are necessary, copy them over.\n  if [ \"${LOCAL_MOUNT}\" == \"yes\" ]; then\n    for path in ${LIB_PATHS}; do\n      # $path. gives us a path ending it /. This means that we will copy the contents into the\n      # destination directory, not overwrite the entire directory.\n      docker cp -L \"${path}.\" \"${CONTAINER_NAME}:${path}\"\n    done\n  fi\n\n  docker start -a \"${CONTAINER_NAME}\"\nfi\n"
  },
  {
    "path": "tools/envoy-rotate-files.el",
    "content": ";;; Defines a function to rotate between related Envoy source\n;;; files. This requires that ./find_related_envoy_files.py be placed\n;;; on $PATH and a key-binding in .emacs to elisp command\n;;; envoy-rotate-files. E.g.\n;;;\n;;; (load-file \"GIT_CLIENT/envoy/tools/envoy-rotate-files.el\")\n;;; (defun envoy-setup-hooks ()\n;;;   \"Sets up Emacs customizations for editing in the Envoy codebase\"\n;;;   (local-set-key (quote [C-tab]) 'envoy-rotate-files))\n;;;\n;;; (add-hook 'c-mode-common-hook 'envoy-setup-hooks t)\n\n(defun envoy-rotate-files ()\n  \"Rotate the current buffer based on envoy cc file patterns\"\n  (interactive)\n  (let ((related-files\n         (split-string\n          (shell-command-to-string\n           (concat \"find_related_envoy_files.py \" (buffer-file-name))))))\n    (if (null related-files)\n        (error (concat \"File \" (buffer-file-name)\n                       \" does not appear to be an envoy C++ source file\"))\n      (find-file (car related-files)))))\n"
  },
  {
    "path": "tools/envoy_collect/README.md",
    "content": "# Collect Envoy stats/log/profile.\n\nThe `envoy_collect.py` wrapper script supports gathering up a bundle of useful artifacts from Envoy's\nexecution for the purpose of performance profiling or debugging.\n\n## Debugging\n\nTo collect verbose logs, stats and other accessible data from the admin endpoint in a tarball,\ninvoke `/path/to/envoy-static` as follows under the `envoy_collect.py` wrapper:\n\n```\nenvoy_collect.py --envoy-binary /path/to/envoy-static --output-path /path/to/debug.tar -c \\\n  /path/to/envoy-config.json <other Envoy args...>\n```\n\nEnvoy will run as normal, but when interrupted by SIGINT (e.g. via `kill -s INT` or ctrl-c on\nstdin), it will write to `/path/to/debug.tar` a dump of various logs and the admin endpoint\nhandlers. The wrapper configures Envoy for maximum logging verbosity.\n\nThis tarball may be useful to attach to issues when reporting. However, a high degree of caution is\nrecommended here, as the logs are verbose and will reveal low level traffic details. It is **NOT**\nrecommended to attach this to a GitHub issue if there are any privacy concerns whatsoever, otherwise\nthe data should be manually sanitized prior to posting in public view.\n\n## Performance\n\nTo collect a performance profile, as well as the non-performance impacting logs and stats,\ninvoke `/path/to/envoy-static` as follows under the `envoy_collect.py` wrapper:\n\n```\nenvoy_collect.py --performance --envoy-binary /path/to/envoy-static --output-path /path/to/debug.tar -c \\\n  /path/to/envoy-config.json <other Envoy args...>\n```\n\nThis will run Envoy under `perf record` and include a `perf.data` file in the tarball, suitable\nfor later analysis with `perf report` or flamegraph generation.\n"
  },
  {
    "path": "tools/envoy_collect/envoy_collect.py",
    "content": "#!/usr/bin/env python\n\"\"\"Wrapper for Envoy command-line that collects stats/log/profile.\n\nExample use:\n\n  ./tools/envoy_collect.py --output-path=./envoy.tar -c\n  ./configs/google_com_proxy.v2.yaml --service-node foo\n  <Ctrl-C>\n  tar -tvf ./envoy.tar\n  -rw------- htuch/eng         0 2017-08-13 21:13 access_0.log\n  -rw------- htuch/eng       876 2017-08-13 21:13 clusters.txt\n  -rw------- htuch/eng        19 2017-08-13 21:13 listeners.txt\n  -rw------- htuch/eng        70 2017-08-13 21:13 server_info.txt\n  -rw------- htuch/eng      8443 2017-08-13 21:13 stats.txt\n  -rw------- htuch/eng      1551 2017-08-13 21:13 config.json\n  -rw------- htuch/eng     32681 2017-08-13 21:13 envoy.log\n\nThe Envoy process will execute as normal and will terminate when interrupted\nwith SIGINT (ctrl-c on stdin), collecting the various stats/log/profile in the\n--output-path tarball.\n\nTODO(htuch):\n  - Generate the full perf trace as well, since we may have a different version\n    of perf local vs. remote.\n  - Add a Bazel run wrapper.\n  - Support v2 proto config in ModifyEnvoyConfig().\n  - Flamegraph generation in post-processing.\n  - Support other modes of data collection (e.g. snapshotting on SIGUSR,\n    periodic).\n  - Validate in performance mode that we're using an opt binary.\n  - Consider handling other signals.\n  - Optional real time logging while Envoy process is running.\n  - bz2 compress tarball.\n  - Use freeze or something similar to build a static binary with embedded\n    Python, ending need to have Python on remote host (and care about version).\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport ctypes\nimport ctypes.util\nimport datetime\nimport json\nimport os\nimport pipes\nimport shutil\nimport signal\nimport subprocess as sp\nimport sys\nimport tarfile\nimport tempfile\nfrom six.moves import urllib\n\nDEFAULT_ENVOY_PATH = os.getenv('ENVOY_PATH', 'bazel-bin/source/exe/envoy-static')\nPERF_PATH = os.getenv('PERF_PATH', 'perf')\n\nPR_SET_PDEATHSIG = 1  # See prtcl(2).\n\nDUMP_HANDLERS = ['clusters', 'listeners', 'server_info', 'stats']\n\n\ndef fetch_url(url):\n  return urllib.request.urlopen(url).read().decode('utf-8')\n\n\ndef modify_envoy_config(config_path, perf, output_directory):\n  \"\"\"Modify Envoy config to support gathering logs, etc.\n\n  Args:\n    config_path: the command-line specified Envoy config path.\n    perf: boolean indicating whether in performance mode.\n    output_directory: directory path for additional generated files.\n  Returns:\n    (modified Envoy config path, list of additional files to collect)\n  \"\"\"\n  # No modifications yet when in performance profiling mode.\n  if perf:\n    return config_path, []\n\n  # Load original Envoy config.\n  with open(config_path, 'r') as f:\n    envoy_config = json.loads(f.read())\n\n  # Add unconditional access logs for all listeners.\n  access_log_paths = []\n  for n, listener in enumerate(envoy_config['listeners']):\n    for network_filter in listener['filters']:\n      if network_filter['name'] == 'http_connection_manager':\n        config = network_filter['config']\n        access_log_path = os.path.join(output_directory, 'access_%d.log' % n)\n        access_log_config = {'path': access_log_path}\n        if 'access_log' in config:\n          config['access_log'].append(access_log_config)\n        else:\n          config['access_log'] = [access_log_config]\n        access_log_paths.append(access_log_path)\n\n  # Write out modified Envoy config.\n  modified_envoy_config_path = os.path.join(output_directory, 'config.json')\n  with open(modified_envoy_config_path, 'w') as f:\n    f.write(json.dumps(envoy_config, indent=2))\n\n  return modified_envoy_config_path, access_log_paths\n\n\ndef run_envoy(envoy_shcmd_args, envoy_log_path, admin_address_path, dump_handlers_paths):\n  \"\"\"Run Envoy subprocess and trigger admin endpoint gathering on SIGINT.\n\n  Args:\n    envoy_shcmd_args: list of Envoy subprocess args.\n    envoy_log_path: path to write Envoy stderr log to.\n    admin_address_path: path to where admin address is written by Envoy.\n    dump_handlers_paths: map from admin endpoint handler to path to where the respective contents\n      are to be written.\n  Returns:\n    The Envoy subprocess exit code.\n  \"\"\"\n  envoy_shcmd = ' '.join(map(pipes.quote, envoy_shcmd_args))\n  print(envoy_shcmd)\n\n  # Some process setup stuff to ensure the child process gets cleaned up properly if the\n  # collector dies and doesn't get its signals implicitly.\n  def envoy_preexec_fn():\n    os.setpgrp()\n    libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)\n    libc.prctl(PR_SET_PDEATHSIG, signal.SIGTERM)\n\n  # Launch Envoy, register for SIGINT, and wait for the child process to exit.\n  with open(envoy_log_path, 'w') as envoy_log:\n    envoy_proc = sp.Popen(envoy_shcmd,\n                          stdin=sp.PIPE,\n                          stderr=envoy_log,\n                          preexec_fn=envoy_preexec_fn,\n                          shell=True)\n\n    def signal_handler(signum, frame):\n      # The read is deferred until the signal so that the Envoy process gets a\n      # chance to write the file out.\n      with open(admin_address_path, 'r') as f:\n        admin_address = 'http://%s' % f.read()\n      # Fetch from the admin endpoint.\n      for handler, path in dump_handlers_paths.items():\n        handler_url = '%s/%s' % (admin_address, handler)\n        print('Fetching %s' % handler_url)\n        with open(path, 'w') as f:\n          f.write(fetch_url(handler_url))\n      # Send SIGINT to Envoy process, it should exit and execution will\n      # continue from the envoy_proc.wait() below.\n      print('Sending Envoy process (PID=%d) SIGINT...' % envoy_proc.pid)\n      envoy_proc.send_signal(signal.SIGINT)\n\n    signal.signal(signal.SIGINT, signal_handler)\n    return envoy_proc.wait()\n\n\ndef envoy_collect(parse_result, unknown_args):\n  \"\"\"Run Envoy and collect its artifacts.\n\n  Args:\n    parse_result: Namespace object with envoy_collect.py's args.\n    unknown_args: list of remaining args to pass to Envoy binary.\n  \"\"\"\n  # Are we in performance mode? Otherwise, debug.\n  perf = parse_result.performance\n  return_code = 1  # Non-zero default return.\n  envoy_tmpdir = tempfile.mkdtemp(prefix='envoy-collect-tmp-')\n  # Try and do stuff with envoy_tmpdir, rm -rf regardless of success/failure.\n  try:\n    # Setup Envoy config and determine the paths of the files we're going to\n    # generate.\n    modified_envoy_config_path, access_log_paths = modify_envoy_config(\n        parse_result.config_path, perf, envoy_tmpdir)\n    dump_handlers_paths = {h: os.path.join(envoy_tmpdir, '%s.txt' % h) for h in DUMP_HANDLERS}\n    envoy_log_path = os.path.join(envoy_tmpdir, 'envoy.log')\n    # The manifest of files that will be placed in the output .tar.\n    manifest = access_log_paths + list(\n        dump_handlers_paths.values()) + [modified_envoy_config_path, envoy_log_path]\n    # This is where we will find out where the admin endpoint is listening.\n    admin_address_path = os.path.join(envoy_tmpdir, 'admin_address.txt')\n\n    # Only run under 'perf record' in performance mode.\n    if perf:\n      perf_data_path = os.path.join(envoy_tmpdir, 'perf.data')\n      manifest.append(perf_data_path)\n      perf_record_args = [\n          PERF_PATH,\n          'record',\n          '-o',\n          perf_data_path,\n          '-g',\n          '--',\n      ]\n    else:\n      perf_record_args = []\n\n    # This is how we will invoke the wrapped envoy.\n    envoy_shcmd_args = perf_record_args + [\n        parse_result.envoy_binary,\n        '-c',\n        modified_envoy_config_path,\n        '-l',\n        'error' if perf else 'trace',\n        '--admin-address-path',\n        admin_address_path,\n    ] + unknown_args[1:]\n\n    # Run the Envoy process (under 'perf record' if needed).\n    return_code = run_envoy(envoy_shcmd_args, envoy_log_path, admin_address_path,\n                            dump_handlers_paths)\n\n    # Collect manifest files and tar them.\n    with tarfile.TarFile(parse_result.output_path, 'w') as output_tar:\n      for path in manifest:\n        if os.path.exists(path):\n          print('Adding %s to archive' % path)\n          output_tar.add(path, arcname=os.path.basename(path))\n        else:\n          print('%s not found' % path)\n\n    print('Wrote Envoy artifacts to %s' % parse_result.output_path)\n  finally:\n    shutil.rmtree(envoy_tmpdir)\n  return return_code\n\n\nif __name__ == '__main__':\n  parser = argparse.ArgumentParser(description='Envoy wrapper to collect stats/log/profile.')\n  default_output_path = 'envoy-%s.tar' % datetime.datetime.now().isoformat('-')\n  parser.add_argument('--output-path', default=default_output_path, help='path to output .tar.')\n  # We either need to interpret or override these, so we declare them in\n  # envoy_collect.py and always parse and present them again when invoking\n  # Envoy.\n  parser.add_argument('--config-path',\n                      '-c',\n                      required=True,\n                      help='Path to Envoy configuration file.')\n  parser.add_argument('--log-level',\n                      '-l',\n                      help='Envoy log level. This will be overridden when invoking Envoy.')\n  # envoy_collect specific args.\n  parser.add_argument('--performance',\n                      action='store_true',\n                      help='Performance mode (collect perf trace, minimize log verbosity).')\n  parser.add_argument('--envoy-binary',\n                      default=DEFAULT_ENVOY_PATH,\n                      help='Path to Envoy binary (%s by default).' % DEFAULT_ENVOY_PATH)\n  sys.exit(envoy_collect(*parser.parse_known_args(sys.argv)))\n"
  },
  {
    "path": "tools/envoy_headersplit/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\", \"py_test\")\nload(\"@headersplit_pip3//:requirements.bzl\", \"requirement\")\nload(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\npy_binary(\n    name = \"headersplit\",\n    srcs = [\n        \"headersplit.py\",\n    ],\n    python_version = \"PY3\",\n    srcs_version = \"PY3\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        requirement(\"clang\"),\n    ],\n)\n\npy_binary(\n    name = \"replace_includes\",\n    srcs = [\n        \"replace_includes.py\",\n    ],\n    python_version = \"PY3\",\n    srcs_version = \"PY3\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":headersplit\",\n    ],\n)\n\npy_test(\n    name = \"headersplit_test\",\n    srcs = [\n        \"headersplit_test.py\",\n    ],\n    data = glob([\"code_corpus/**\"]),\n    python_version = \"PY3\",\n    srcs_version = \"PY3\",\n    tags = [\"no-sandbox\"],  # TODO (foreseeable): make this test run under sandbox\n    visibility = [\"//visibility:public\"],\n    deps = [\n        requirement(\"clang\"),\n        \":headersplit\",\n    ],\n)\n\npy_test(\n    name = \"replace_includes_test\",\n    srcs = [\n        \"replace_includes_test.py\",\n    ],\n    data = glob([\"code_corpus/**\"]),\n    python_version = \"PY3\",\n    srcs_version = \"PY3\",\n    tags = [\"no-sandbox\"],  # TODO (foreseeable): make this test run under sandbox\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":replace_includes\",\n    ],\n)\n"
  },
  {
    "path": "tools/envoy_headersplit/README.md",
    "content": "# Envoy Header Split\nTool for spliting monolithic header files in Envoy to speed up compilation\n\nSteps to divide Envoy mock headers:\n\n1. Run `headersplit.py` to divide the monolithic mock header into different classes\n\nExample (to split monolithic mock header test/mocks/network/mocks.h):\n\n```\ncd ${ENVOY_SRCDIR}/test/mocks/network/\npython3 ${ENVOY_SRCDIR}/tools/envoy_headersplit/headersplit.py -i mocks.cc -d mocks.h\n```\n\n2. Remove unused `#includes` from the new mock headers, and write Bazel dependencies for the newly divided mock classes. (this step needs to be done manually)\n\n3. Run `replace_includes.py` to replace superfluous `#includes` in Envoy directory after dividing. It will also modify the corresponding Bazel `BUILD` file.\n\nExample (to replace `#includes` after dividing mock header test/mocks/network/mocks.h):\n\n```\ncd ${ENVOY_SRCDIR}\npython3 ${ENVOY_SRCDIR}/tools/envoy_headersplit/replace_includes.py -m network\n```\n"
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/class_defn.h",
    "content": "#include \"envoy/split\"\n\n// NOLINT(namespace-envoy)\n\nnamespace {\n\nclass Foo {};\n\nclass Bar {\n  Foo getFoo();\n};\n\nclass FooBar : Foo, Bar {};\n\nclass DeadBeaf {\npublic:\n  int val();\n  FooBar foobar;\n};\n} // namespace"
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/class_defn_without_namespace.h",
    "content": "#include \"envoy/split\"\n\n// NOLINT(namespace-envoy)\n\nclass Foo {};\n\nclass Bar {\n  Foo getFoo();\n};\n\nclass FooBar : Foo, Bar {};\n\nclass DeadBeaf {\npublic:\n  int val();\n  FooBar foobar;\n};"
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/class_impl.cc",
    "content": "#include \"class_defn.h\"\n\n// NOLINT(namespace-envoy)\n\nnamespace {\nFoo Bar::getFoo() {\n  Foo foo;\n  return foo;\n}\n\nint DeadBeaf::val() { return 42; }\n\nDeadBeaf::DeadBeaf() = default;\n} // namespace"
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/fail_mocks.cc",
    "content": "#include \"fail_mocks.h\"\n\n// NOLINT(namespace-envoy)\n\n#include <string>\n\n#include \"envoy/admin/v3/server_info.pb.h\"\n#include \"envoy/config/core/v3/base.pb.h\"\n\n#include \"common/singleton/manager_impl.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing testing::_;\nusing testing::Invoke;\nusing testing::Return;\nusing testing::ReturnPointee;\nusing testing::ReturnRef;\nusing testing::SaveArg;\n\nnamespace Envoy {\nnamespace Server {\n\nMockConfigTracker::MockConfigTracker() {\n  ON_CALL(*this, add_(_, _))\n      .WillByDefault(Invoke([this](const std::string& key, Cb callback) -> EntryOwner* {\n        EXPECT_TRUE(config_tracker_callbacks_.find(key) == config_tracker_callbacks_.end());\n        config_tracker_callbacks_[key] = callback;\n        return new MockEntryOwner();\n      }));\n}\nMockConfigTracker::~MockConfigTracker() = default;\n\nMockListenerComponentFactory::MockListenerComponentFactory()\n    : socket_(std::make_shared<NiceMock<Network::MockListenSocket>>()) {\n  ON_CALL(*this, createListenSocket(_, _, _, _))\n      .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type,\n                                const Network::Socket::OptionsSharedPtr& options,\n                                const ListenSocketCreationParams&) -> Network::SocketSharedPtr {\n        if (!Network::Socket::applyOptions(options, *socket_,\n                                           envoy::config::core::v3::SocketOption::STATE_PREBIND)) {\n          throw EnvoyException(\"MockListenerComponentFactory: Setting socket options failed\");\n        }\n        return socket_;\n      }));\n}\nMockListenerComponentFactory::~MockListenerComponentFactory() = default;\n\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/fail_mocks.h",
    "content": "#pragma once\n// NOLINT(namespace-envoy)\n\n#include <chrono>\n#include <cstdint>\n#include <list>\n#include <string>\n\nnamespace Envoy {\nnamespace Server {\n\nclass MockConfigTracker : public ConfigTracker {\npublic:\n  MockConfigTracker();\n  ~MockConfigTracker() override;\n\n  struct MockEntryOwner : public EntryOwner {};\n\n  MOCK_METHOD(EntryOwner*, add_, (std::string, Cb));\n\n  // Server::ConfigTracker\n  MOCK_METHOD(const CbsMap&, getCallbacksMap, (), (const));\n  EntryOwnerPtr add(const std::string& key, Cb callback) override {\n    return EntryOwnerPtr{add_(key, std::move(callback))};\n  }\n\n  absl::node_hash_map<std::string, Cb> config_tracker_callbacks_;\n};\n\nclass MockListenerComponentFactory : public ListenerComponentFactory {\npublic:\n  MockListenerComponentFactory();\n  ~MockListenerComponentFactory() override;\n\n  DrainManagerPtr\n  createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override {\n    return DrainManagerPtr{createDrainManager_(drain_type)};\n  }\n  LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override {\n    return LdsApiPtr{createLdsApi_(lds_config)};\n  }\n\n  MOCK_METHOD(LdsApi*, createLdsApi_, (const envoy::config::core::v3::ConfigSource& lds_config));\n  MOCK_METHOD(std::vector<Network::FilterFactoryCb>, createNetworkFilterFactoryList,\n              (const Protobuf::RepeatedPtrField<envoy::config::listener::v3::Filter>& filters,\n               Configuration::FilterChainFactoryContext& filter_chain_factory_context));\n  MOCK_METHOD(std::vector<Network::ListenerFilterFactoryCb>, createListenerFilterFactoryList,\n              (const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&,\n               Configuration::ListenerFactoryContext& context));\n  MOCK_METHOD(std::vector<Network::UdpListenerFilterFactoryCb>, createUdpListenerFilterFactoryList,\n              (const Protobuf::RepeatedPtrField<envoy::config::listener::v3::ListenerFilter>&,\n               Configuration::ListenerFactoryContext& context));\n  MOCK_METHOD(Network::SocketSharedPtr, createListenSocket,\n              (Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type,\n               const Network::Socket::OptionsSharedPtr& options,\n               const ListenSocketCreationParams& params));\n  MOCK_METHOD(DrainManager*, createDrainManager_,\n              (envoy::config::listener::v3::Listener::DrainType drain_type));\n  MOCK_METHOD(uint64_t, nextListenerTag, ());\n\n  std::shared_ptr<Network::MockListenSocket> socket_;\n};\n} // namespace Server\n} // namespace Envoy\n"
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/fake_build",
    "content": "envoy_cc_test(\n    name = \"async_client_impl_test\",\n    srcs = [\"async_client_impl_test.cc\"],\n    deps = [\n        \":common_lib\",\n        \"//source/common/buffer:buffer_lib\",\n        \"//source/common/http:async_client_lib\",\n        \"//source/common/http:context_lib\",\n        \"//source/common/http:headers_lib\",\n        \"//source/common/http:utility_lib\",\n        \"//source/extensions/upstreams/http/generic:config\",\n        \"//test/mocks:common_lib\",\n        \"//test/mocks/buffer:buffer_mocks\",\n        \"//test/mocks/http:http_mocks\",\n        \"//test/mocks/local_info:local_info_mocks\",\n        \"//test/mocks/router:router_mocks\",\n        \"//test/mocks/runtime:runtime_mocks\",\n        \"//test/mocks/stats:stats_mocks\",\n        \"//test/mocks/upstream:upstream_mocks\",\n        \"//test/test_common:test_time_lib\",\n        \"@envoy_api//envoy/config/core/v3:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/route/v3:pkg_cc_proto\",\n    ],\n)"
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/fake_source_code.cc",
    "content": "// NOLINT(namespace-envoy)\n#include <chrono>\n#include <cstdint>\n#include <memory>\n#include <string>\n\n#include \"envoy/config/core/v3/base.pb.h\"\n#include \"envoy/config/route/v3/route_components.pb.h\"\n\n#include \"common/buffer/buffer_impl.h\"\n#include \"common/http/async_client_impl.h\"\n#include \"common/http/context_impl.h\"\n#include \"common/http/headers.h\"\n#include \"common/http/utility.h\"\n\n#include \"test/common/http/common.h\"\n#include \"test/mocks/buffer/mocks.h\"\n#include \"test/mocks/common.h\"\n#include \"test/mocks/http/mocks.h\"\n#include \"test/mocks/local_info/mocks.h\"\n#include \"test/mocks/router/mocks.h\"\n#include \"test/mocks/runtime/mocks.h\"\n#include \"test/mocks/stats/mocks.h\"\n#include \"test/mocks/upstream/mocks.h\"\n#include \"test/test_common/printers.h\"\n\n....useless stuff...\n\n    NiceMock<Upstream::MockClusterManager>\n        cm_;\n\n... uninteresting stuff.."
  },
  {
    "path": "tools/envoy_headersplit/code_corpus/hello.h",
    "content": "// your first c++ program\n// NOLINT(namespace-envoy)\n#include <iostream>\n\n// random strings\n\n#include \"foo/bar\"\n\nclass test {\n  test() { std::cout << \"Hello World\" << std::endl; }\n};"
  },
  {
    "path": "tools/envoy_headersplit/headersplit.py",
    "content": "# !/usr/bin/env python3\n# Lint as: python3\n\"\"\"\nThis python script can dividing monolithic mock headers into different mock classes. We need to\nremove the over-included header files in generated class codes and resolve dependencies in the\ncorresponding Bazel files manually.\n\"\"\"\nimport argparse\nimport os\nimport subprocess\nimport sys\nfrom typing import Type, List, Tuple, Dict\n\n# libclang imports\nimport clang.cindex\nfrom clang.cindex import TranslationUnit, Index, CursorKind, Cursor\n\n\ndef to_filename(classname: str) -> str:\n  \"\"\"\n  maps mock class name (in C++ codes) to filenames under the Envoy naming convention.\n  e.g. map \"MockAdminStream\" to \"admin_stream\"\n\n  Args:\n      classname: mock class name from source\n\n  Returns:\n      corresponding file name\n  \"\"\"\n  filename = classname.replace(\"Mock\", \"\", 1)  # Remove only first \"Mock\"\n  ret = \"\"\n  for index, val in enumerate(filename):\n    if val.isupper() and index > 0:\n      ret += \"_\"\n    ret += val\n  return ret.lower()\n\n\ndef get_directives(translation_unit: Type[TranslationUnit]) -> str:\n  \"\"\"\n  \"extracts\" all header includes statements and other directives from the target source code file\n\n  for instance:\n      foo.h:\n      #pragma once\n      #include \"a.h\"\n      #include \"b.h\"\n\n      int foo(){\n      }\n  this function should return\n  '#pragma once\\n#include \"a.h\"\\n#include \"b.h\"'\n\n  Args:\n      translation_unit: parsing result of target source code by libclang\n\n  Returns:\n      A string, contains all includes statements and other preprocessor directives before the\n      first non-directive statement.\n\n  Notes:\n      clang lib provides API like tranlation_unit.get_includes() to get include directives.\n      But we can't use it as it requires presence of the included files to return the full list.\n      We choose to return the string instead of list of includes since we will simply copy-paste\n      the include statements into generated headers. Return string seems more convenient\n  \"\"\"\n  cursor = translation_unit.cursor\n  for descendant in cursor.walk_preorder():\n    if descendant.location.file is not None and descendant.location.file.name == cursor.displayname:\n      filename = descendant.location.file.name\n      contents = read_file_contents(filename)\n      return contents[:descendant.extent.start.offset]\n  return \"\"\n\n\ndef cursors_in_same_file(cursor: Cursor) -> List[Cursor]:\n  \"\"\"\n  get all child cursors which are pointing to the same file as the input cursor\n\n  Args:\n    cursor: cursor of parsing result of target source code by libclang\n\n  Returns:\n    a list of cursor\n  \"\"\"\n  cursors = []\n  for descendant in cursor.walk_preorder():\n    # We don't want Cursors from files other than the input file,\n    # otherwise we get definitions for every file included\n    # when clang parsed the input file (i.e. if we don't limit descendant location,\n    # it will check definitions from included headers and get class definitions like std::string)\n    if descendant.location.file is None:\n      continue\n    if descendant.location.file.name != cursor.displayname:\n      continue\n    cursors.append(descendant)\n  return cursors\n\n\ndef class_definitions(cursor: Cursor) -> List[Cursor]:\n  \"\"\"\n  extracts all class definitions in the file pointed by cursor. (typical mocks.h)\n\n  Args:\n      cursor: cursor of parsing result of target source code by libclang\n\n  Returns:\n      a list of cursor, each pointing to a class definition.\n  \"\"\"\n  cursors = cursors_in_same_file(cursor)\n  class_cursors = []\n  for descendant in cursors:\n    # check if descendant is pointing to a class declaration block.\n    if descendant.kind != CursorKind.CLASS_DECL:\n      continue\n    if not descendant.is_definition():\n      continue\n    # check if this class is directly enclosed by a namespace.\n    if descendant.semantic_parent.kind != CursorKind.NAMESPACE:\n      continue\n    class_cursors.append(descendant)\n  return class_cursors\n\n\ndef class_implementations(cursor: Cursor) -> List[Cursor]:\n  \"\"\"\n  extracts all class implementation in the file pointed by cursor. (typical mocks.cc)\n\n  Args:\n      cursor: cursor of parsing result of target source code by libclang\n\n  Returns:\n      a list of cursor, each pointing to a class implementation.\n  \"\"\"\n  cursors = cursors_in_same_file(cursor)\n  impl_cursors = []\n  for descendant in cursors:\n    if descendant.kind == CursorKind.NAMESPACE:\n      continue\n    # check if descendant is pointing to a class method\n    if descendant.semantic_parent is None:\n      continue\n    if descendant.semantic_parent.kind == CursorKind.CLASS_DECL:\n      impl_cursors.append(descendant)\n  return impl_cursors\n\n\ndef extract_definition(cursor: Cursor, classnames: List[str]) -> Tuple[str, str, List[str]]:\n  \"\"\"\n  extracts class definition source code pointed by the cursor parameter.\n  and find dependent mock classes by naming look up.\n\n  Args:\n      cursor: libclang cursor pointing to the target mock class definition.\n      classnames: all mock class names defined in the definition header that needs to be\n          divided, used to parse class dependencies.\n  Returns:\n      class_name: a string representing the mock class name.\n      class_defn: a string contains the whole class definition body.\n      deps: a set of string contains all dependent classes for the return class.\n\n  Note:\n      It can not detect and resolve forward declaration and cyclic dependency. Need to address\n      manually.\n  \"\"\"\n  filename = cursor.location.file.name\n  contents = read_file_contents(filename)\n  class_name = cursor.spelling\n  class_defn = contents[cursor.extent.start.offset:cursor.extent.end.offset] + \";\"\n  # need to know enclosing semantic parents (namespaces)\n  # to generate corresponding definitions\n  parent_cursor = cursor.semantic_parent\n  while parent_cursor.kind == CursorKind.NAMESPACE:\n    if parent_cursor.spelling == \"\":\n      break\n    class_defn = \"namespace {} {{\\n\".format(parent_cursor.spelling) + class_defn + \"\\n}\\n\"\n    parent_cursor = parent_cursor.semantic_parent\n  # resolve dependency\n  # by simple naming look up\n  deps = set()\n  for classname in classnames:\n    if classname in class_defn and classname != class_name:\n      deps.add(classname)\n  return class_name, class_defn, deps\n\n\ndef get_implline(cursor: Cursor) -> int:\n  \"\"\"\n  finds the first line of implementation source code for class method pointed by the cursor\n  parameter. \n\n  Args:\n      cursor: libclang cursor pointing to the target mock class definition.\n\n  Returns:\n      an integer, the line number of the first line of the corresponding method implementation\n      code (zero indexed)\n\n  Note:\n      This function return line number only. Because in certain case libclang will fail in parsing\n      the method body and stops parsing early (see headersplit_test.test_class_implementations_error\n      for details). To address this issue when parsing implementation code, we passed the flag that\n      ask clang to ignore function bodies.\n      We can not get the function body directly with the same way we used in extract_definition() \n      since clang didn't parse function this time. Though we can't get the correct method extent\n      offset from Cursor, we can still get the start line of the corresponding method instead.\n      (We can't get the correct line number for the last line due to skipping function bodies)\n  \"\"\"\n  return cursor.extent.start.line - 1\n\n\ndef extract_implementations(impl_cursors: List[Cursor], source_code: str) -> Dict[str, str]:\n  \"\"\"\n  extracts method function body for each cursor in list impl_cursors from source code\n  groups those function bodies with class name to help generating the divided {classname}.cc\n  returns a dict maps class name to the concatenation of all its member methods implementations.\n\n  Args:\n      impl_cursors: list of cursors, each pointing to a mock class member function implementation.\n      source_code: string, the source code for implementations (e.g. mocks.cc)\n\n  Returns:\n      classname_to_impl: a dict maps class name to its member methods implementations\n  \"\"\"\n  classname_to_impl = dict()\n  for i, cursor in enumerate(impl_cursors):\n    classname = cursor.semantic_parent.spelling\n    # get first line of function body\n    implline = get_implline(cursor)\n    # get last line of function body\n    if i + 1 < len(impl_cursors):\n      # i is not the last method, get the start line for the next method\n      # as the last line of i\n      impl_end = get_implline(impl_cursors[i + 1])\n      impl = \"\".join(source_code[implline:impl_end])\n    else:\n      # i is the last method, after removing the lines containing close brackets\n      # for namespaces, the rest should be the function body\n      offset = 0\n      while implline + offset < len(source_code):\n        if \"// namespace\" in source_code[implline + offset]:\n          break\n        offset += 1\n      impl = \"\".join(source_code[implline:implline + offset])\n    if classname in classname_to_impl:\n      classname_to_impl[classname] += impl + \"\\n\"\n    else:\n      classname_to_impl[classname] = impl + \"\\n\"\n  return classname_to_impl\n\n\ndef get_enclosing_namespace(defn: Cursor) -> Tuple[str, str]:\n  \"\"\"\n  retrieves all enclosing namespaces for the class pointed by defn.\n  this is necessary to construct the mock class header\n  e.g.:\n  defn is pointing MockClass in the follow source code:\n\n  namespace Envoy {\n  namespace Server {\n  class MockClass2 {...}\n  namespace Configuration {\n  class MockClass {...}\n        ^ \n        defn\n  }\n  }\n  }\n\n  this function will return:\n  \"namespace Envoy {\\nnamespace Server {\\nnamespace Configuration{\\n\" and \"\\n}\\n}\\n}\\n\" \n\n  Args:\n      defn: libclang Cursor pointing to a mock class\n\n  Returns:\n      namespace_prefix, namespace_suffix: a pair of string, representing the enclosing namespaces\n  \"\"\"\n  namespace_prefix = \"\"\n  namespace_suffix = \"\"\n  parent_cursor = defn.semantic_parent\n  while parent_cursor.kind == CursorKind.NAMESPACE:\n    if parent_cursor.spelling == \"\":\n      break\n    namespace_prefix = \"namespace {} {{\\n\".format(parent_cursor.spelling) + namespace_prefix\n    namespace_suffix += \"\\n}\"\n    parent_cursor = parent_cursor.semantic_parent\n  namespace_suffix += \"\\n\"\n  return namespace_prefix, namespace_suffix\n\n\ndef read_file_contents(path):\n  with open(path, \"r\") as input_file:\n    return input_file.read()\n\n\ndef write_file_contents(class_name, class_defn, class_impl):\n  with open(\"{}.h\".format(to_filename(class_name)), \"w\") as decl_file:\n    decl_file.write(class_defn)\n  with open(\"{}.cc\".format(to_filename(class_name)), \"w\") as impl_file:\n    impl_file.write(class_impl)\n  # generating bazel build file, need to fill dependency manually\n  bazel_text = \"\"\"\nenvoy_cc_mock(\n  name = \"{}_mocks\",\n  srcs = [\"{}.cc\"],\n  hdrs = [\"{}.h\"],\n  deps = [\n\n  ]\n)\n\"\"\".format(to_filename(class_name), to_filename(class_name), to_filename(class_name))\n  with open(\"BUILD\", \"r+\") as bazel_file:\n    contents = bazel_file.read()\n    if 'name = \"{}_mocks\"'.format(to_filename(class_name)) not in contents:\n      bazel_file.write(bazel_text)\n\n\ndef main(args):\n  \"\"\"\n  divides the monolithic mock file into different mock class files.\n  \"\"\"\n  decl_filename = args[\"decl\"]\n  impl_filename = args[\"impl\"]\n  idx = Index.create()\n  impl_translation_unit = TranslationUnit.from_source(\n      impl_filename, options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)\n  impl_includes = get_directives(impl_translation_unit)\n  decl_translation_unit = idx.parse(decl_filename, [\"-x\", \"c++\"])\n  defns = class_definitions(decl_translation_unit.cursor)\n  decl_includes = get_directives(decl_translation_unit)\n  impl_cursors = class_implementations(impl_translation_unit.cursor)\n  contents = read_file_contents(impl_filename)\n  classname_to_impl = extract_implementations(impl_cursors, contents)\n  classnames = [cursor.spelling for cursor in defns]\n  for defn in defns:\n    # writing {class}.h and {classname}.cc\n    class_name, class_defn, deps = extract_definition(defn, classnames)\n    includes = \"\"\n    for name in deps:\n      includes += '#include \"{}.h\"\\n'.format(to_filename(name))\n    class_defn = decl_includes + includes + class_defn\n    class_impl = \"\"\n    if class_name not in classname_to_impl:\n      print(\"Warning: empty class {}\".format(class_name))\n    else:\n      impl_include = impl_includes.replace(decl_filename, \"{}.h\".format(to_filename(class_name)))\n      # we need to enclose methods with namespaces\n      namespace_prefix, namespace_suffix = get_enclosing_namespace(defn)\n      class_impl = impl_include + namespace_prefix + \\\n          classname_to_impl[class_name] + namespace_suffix\n    write_file_contents(class_name, class_defn, class_impl)\n\n\nif __name__ == \"__main__\":\n  PARSER = argparse.ArgumentParser()\n  PARSER.add_argument(\n      \"-d\",\n      \"--decl\",\n      default=\"mocks.h\",\n      help=\"Path to the monolithic header .h file that needs to be splitted\",\n  )\n  PARSER.add_argument(\n      \"-i\",\n      \"--impl\",\n      default=\"mocks.cc\",\n      help=\"Path to the implementation code .cc file that needs to be splitted\",\n  )\n  main(vars(PARSER.parse_args()))\n"
  },
  {
    "path": "tools/envoy_headersplit/headersplit_test.py",
    "content": "# Lint as: python3\n\"\"\"Tests for headersplit.\"\"\"\n\nimport headersplit\nimport io\nimport os\nimport subprocess\nimport sys\nimport unittest\n\n# libclang imports\nimport clang.cindex\nfrom clang.cindex import TranslationUnit, Index, CursorKind\n\n\nclass HeadersplitTest(unittest.TestCase):\n  # A header contains a simple class print hello world\n  source_code_hello_world = open(\"tools/envoy_headersplit/code_corpus/hello.h\", \"r\").read()\n  # A C++ source code contains definition for several classes\n  source_class_defn = open(\"tools/envoy_headersplit/code_corpus/class_defn.h\", \"r\").read()\n  # almost the same as above, but classes are not enclosed by namespace\n  source_class_defn_without_namespace = open(\n      \"tools/envoy_headersplit/code_corpus/class_defn_without_namespace.h\", \"r\").read()\n  # A C++ source code contains method implementations for class_defn.h\n  source_class_impl = open(\"tools/envoy_headersplit/code_corpus/class_impl.cc\", \"r\").read()\n\n  def test_to_filename(self):\n    # Test class name with one \"mock\"\n    self.assertEqual(headersplit.to_filename(\"MockAdminStream\"), \"admin_stream\")\n\n    # Test class name with two \"Mock\"\n    self.assertEqual(headersplit.to_filename(\"MockClusterMockPrioritySet\"),\n                     \"cluster_mock_priority_set\")\n\n    # Test class name with no \"Mock\"\n    self.assertEqual(headersplit.to_filename(\"TestRetryHostPredicateFactory\"),\n                     \"test_retry_host_predicate_factory\")\n\n  def test_get_directives(self):\n    includes = \"\"\"// your first c++ program\n// NOLINT(namespace-envoy)\n#include <iostream>\n\n// random strings\n\n#include \"foo/bar\"\n\n\"\"\"\n    translation_unit_hello_world = TranslationUnit.from_source(\n        \"tools/envoy_headersplit/code_corpus/hello.h\",\n        options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)\n    self.assertEqual(headersplit.get_directives(translation_unit_hello_world), includes)\n\n  def test_class_definitions(self):\n    idx = Index.create()\n    translation_unit_class_defn = idx.parse(\"tools/envoy_headersplit/code_corpus/class_defn.h\",\n                                            [\"-x\", \"c++\"])\n    defns_cursors = headersplit.class_definitions(translation_unit_class_defn.cursor)\n    defns_names = [cursor.spelling for cursor in defns_cursors]\n    self.assertEqual(defns_names, [\"Foo\", \"Bar\", \"FooBar\", \"DeadBeaf\"])\n    idx = Index.create()\n    translation_unit_class_defn = idx.parse(\n        \"tools/envoy_headersplit/code_corpus/class_defn_without_namespace.h\", [\"-x\", \"c++\"])\n    defns_cursors = headersplit.class_definitions(translation_unit_class_defn.cursor)\n    defns_names = [cursor.spelling for cursor in defns_cursors]\n    self.assertEqual(defns_names, [])\n\n  def test_class_implementations(self):\n    translation_unit_class_impl = TranslationUnit.from_source(\n        \"tools/envoy_headersplit/code_corpus/class_impl.cc\",\n        options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)\n    impls_cursors = headersplit.class_implementations(translation_unit_class_impl.cursor)\n    impls_names = [cursor.spelling for cursor in impls_cursors]\n    self.assertEqual(impls_names, [\"getFoo\", \"val\", \"DeadBeaf\"])\n\n  def test_class_implementations_error(self):\n    # LibClang will fail in parse this source file (it's modified from the original\n    # test/server/mocks.cc from Envoy repository) if we don't add flag PARSE_SKIP_FUNCTION_BODIES\n    # to ignore function bodies.\n    impl_translation_unit = TranslationUnit.from_source(\n        \"tools/envoy_headersplit/code_corpus/fail_mocks.cc\")\n    impls_cursors = headersplit.class_implementations(impl_translation_unit.cursor)\n    # impls_name is not complete in this case\n    impls_names = [cursor.spelling for cursor in impls_cursors]\n    # LibClang will stop parsing at\n    # MockListenerComponentFactory::MockListenerComponentFactory()\n    #     : socket_(std::make_shared<NiceMock<Network::MockListenSocket>>()) {\n    #       ^\n    # Since parsing stops early, we will have incomplete method list.\n    # The reason is not clear, however, this issue can be addressed by adding parsing flag to\n    # ignore function body\n\n    # get correct list of member methods\n    impl_translation_unit_correct = TranslationUnit.from_source(\n        \"tools/envoy_headersplit/code_corpus/fail_mocks.cc\",\n        options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)\n    impls_cursors_correct = headersplit.class_implementations(impl_translation_unit_correct.cursor)\n    impls_names_correct = [cursor.spelling for cursor in impls_cursors_correct]\n    self.assertNotEqual(impls_names, impls_names_correct)\n\n\nif __name__ == \"__main__\":\n  unittest.main()\n"
  },
  {
    "path": "tools/envoy_headersplit/replace_includes.py",
    "content": "# !/usr/bin/env python3\n# Lint as: python3\n\"\"\"\nThis python script can be used to refactor Envoy source code #include after dividing the monolithic\nmock headers into different mock classes. This will reduce the building time for specific tests\nsignificantly.\n\ne.g.\n\n#include \"test/mocks/server.h\" -> #include \"test/mocks/admin.h\" if the source code only used mock\nclass Server::MockAdmin.\n\nthis script needs to be executed in the Envoy directory\n\"\"\"\nfrom pathlib import Path\nfrom headersplit import to_filename\nfrom typing import List\nimport argparse\n\n\ndef to_classname(filename: str) -> str:\n  \"\"\"\n  maps divided mock class file name to class names\n  inverse function of headersplit.to_filename\n  e.g. map \"test/mocks/server/admin_stream.h\" to \"MockAdminStream\"\n\n  Args:\n      filename: string, mock class header file name (might be the whole path instead of the base name)\n\n  Returns:\n      corresponding class name\n  \"\"\"\n  classname_tokens = filename.split('/')[-1].replace('.h', '').split('_')\n  classname = \"Mock\" + ''.join(map(lambda x: x[:1].upper() + x[1:], classname_tokens))\n  return classname\n\n\ndef to_bazelname(filename: str, mockname: str) -> str:\n  \"\"\"\n  maps divided mock class file name to bazel target name\n  e.g. map \"test/mocks/server/admin_stream.h\" to \"//test/mocks/server:admin_stream_mocks\"\n\n  Args:\n      filename: string, mock class header file name (might be the whole path instead of the base name)\n      mockname: string, mock directory name\n\n  Returns:\n      corresponding bazel target name\n  \"\"\"\n  bazelname = \"//test/mocks/{}:\".format(mockname)\n  bazelname += filename.split('/')[-1].replace('.h', '') + '_mocks'.format(mockname)\n  return bazelname\n\n\ndef get_filenames(mockname: str) -> List[str]:\n  \"\"\"\n  scans all headers in test/mocks/{mockname}, return corresponding file names\n\n  Args:\n    mockname: string, mock directory name\n\n  Returns:\n    List of file name for the headers in test/mock/{mocksname}\n  \"\"\"\n  dir = Path(\"test/mocks/{}/\".format(mockname))\n  filenames = list(map(str, dir.glob('*.h')))\n  return filenames\n\n\ndef replace_includes(mockname):\n  filenames = get_filenames(mockname)\n  classnames = [to_classname(filename) for filename in filenames]\n  p = Path('./test')\n  changed_list = []  # list of test code that been refactored\n  # walk through all files and check files that contains \"{mockname}/mocks.h\"\n  # don't forget change dependency on bazel\n  for test_file in p.glob('**/*.cc'):\n    replace_includes = \"\"\n    used_mock_header = False\n    bazel_targets = \"\"\n    with test_file.open() as f:\n      content = f.read()\n      if '#include \"test/mocks/{}/mocks.h\"'.format(mockname) in content:\n        used_mock_header = True\n        replace_includes = \"\"\n        for classname in classnames:\n          if classname in content:\n            # replace mocks.h with mock class header used by this test library\n            # limitation: if some class names in classnames are substrings of others, this part\n            # will bring over-inclusion e.g. if we have MockCluster and MockClusterFactory, and\n            # the source code only used MockClusterFactory, then the result code will also include\n            # MockCluster since it also shows in the file.\n            # TODO: use clang to analysis class usage instead by simple find and replace\n            replace_includes += '#include \"test/mocks/{}/{}.h\"\\n'.format(\n                mockname, to_filename(classname))\n            bazel_targets += '\"{}\",'.format(to_bazelname(to_filename(classname), mockname))\n    if used_mock_header:\n      changed_list.append(str(test_file.relative_to(Path('.'))) + '\\n')\n      with test_file.open(mode='w') as f:\n        f.write(\n            content.replace('#include \"test/mocks/{}/mocks.h\"\\n'.format(mockname),\n                            replace_includes))\n      with (test_file.parent / 'BUILD').open() as f:\n        # write building files\n        content = f.read()\n        split_content = content.split(test_file.name)\n        split_content[1] = split_content[1].replace(\n            '\"//test/mocks/{}:{}_mocks\",'.format(mockname, mockname), bazel_targets, 1)\n        content = split_content[0] + test_file.name + split_content[1]\n      with (test_file.parent / 'BUILD').open('w') as f:\n        f.write(content)\n  with open(\"changed.txt\", \"w\") as f:\n    f.writelines(changed_list)\n\n\nif __name__ == '__main__':\n  PARSER = argparse.ArgumentParser()\n  PARSER.add_argument('-m', '--mockname', default=\"server\", help=\"mock folder that been divided\")\n  mockname = vars(PARSER.parse_args())['mockname']\n  replace_includes(mockname)\n"
  },
  {
    "path": "tools/envoy_headersplit/replace_includes_test.py",
    "content": "# Lint as: python3\n\"\"\"Tests for replace_includes.\"\"\"\n\nimport unittest\nfrom unittest import mock\nimport os\nfrom pathlib import Path\nimport replace_includes\n\n\nclass ReplaceIncludesTest(unittest.TestCase):\n\n  def test_to_classname(self):\n    # Test file name with whole path\n    self.assertEqual(replace_includes.to_classname(\"test/mocks/server/admin_stream.h\"),\n                     \"MockAdminStream\")\n    # Test file name without .h extension\n    self.assertEqual(replace_includes.to_classname(\"cluster_mock_priority_set\"),\n                     \"MockClusterMockPrioritySet\")\n\n  def test_to_bazelname(self):\n    # Test file name with whole path\n    self.assertEqual(replace_includes.to_bazelname(\"test/mocks/server/admin_stream.h\", \"server\"),\n                     \"//test/mocks/server:admin_stream_mocks\")\n    # Test file name without .h extension\n    self.assertEqual(replace_includes.to_bazelname(\"cluster_mock_priority_set\", \"upstream\"),\n                     \"//test/mocks/upstream:cluster_mock_priority_set_mocks\")\n\n  class FakeDir():\n    # fake directory to test get_filenames\n    def glob(self, _):\n      return [\n          Path(\"test/mocks/server/admin_stream.h\"),\n          Path(\"test/mocks/server/admin.h\"),\n          Path(\"test/mocks/upstream/cluster_manager.h\")\n      ]\n\n  @mock.patch(\"replace_includes.Path\", return_value=FakeDir())\n  def test_get_filenames(self, mock_Path):\n    self.assertEqual(replace_includes.get_filenames(\"sever\"), [\n        \"test/mocks/server/admin_stream.h\", \"test/mocks/server/admin.h\",\n        \"test/mocks/upstream/cluster_manager.h\"\n    ])\n\n  def test_replace_includes(self):\n    fake_source_code = open(\"tools/envoy_headersplit/code_corpus/fake_source_code.cc\", \"r\").read()\n    fake_build_file = open(\"tools/envoy_headersplit/code_corpus/fake_build\", \"r\").read()\n    os.mkdir(\"test\")\n    os.mkdir(\"test/mocks\")\n    os.mkdir(\"test/mocks/upstream\")\n    open(\"test/mocks/upstream/cluster_manager.h\", \"a\").close()\n    with open(\"test/async_client_impl_test.cc\", \"w\") as f:\n      f.write(fake_source_code)\n    with open(\"test/BUILD\", \"w\") as f:\n      f.write(fake_build_file)\n    replace_includes.replace_includes(\"upstream\")\n    source_code = \"\"\n    build_file = \"\"\n    with open(\"test/async_client_impl_test.cc\", \"r\") as f:\n      source_code = f.read()\n    with open(\"test/BUILD\", \"r\") as f:\n      build_file = f.read()\n    self.assertEqual(source_code,\n                     fake_source_code.replace(\"upstream/mocks\", \"upstream/cluster_manager\"))\n    self.assertEqual(\n        build_file,\n        fake_build_file.replace(\"upstream:upstream_mocks\", \"upstream:cluster_manager_mocks\"))\n\n\nif __name__ == \"__main__\":\n  unittest.main()\n"
  },
  {
    "path": "tools/envoy_headersplit/requirements.txt",
    "content": "clang==10.0.1 \\\n    --hash=sha256:c90eca387fede58e2398c4e211e2b38a310f5caa9adb367a8f84aa1ba2fe98b5 \\\n    --hash=sha256:f8d8e02ebaed0e9b8d5e6173c3c38b68e5f381ba34841aeb8145087d16750d89\n"
  },
  {
    "path": "tools/find_related_envoy_files.py",
    "content": "#!/usr/bin/env python\n#\n# Emits related filenames given an envoy source/include/test filename.\n# This can assist a text-editor with a hot-key to rotate between\n# files. E.g. for Emacs this is enabled by loading\n# envoy-emacs-hooks.el.\n#\n# Takes a filename as its only arg, and emits a list of files that are\n# related to it, in a deterministic order so that by visiting the\n# first file in the list, you cycle through impl, header, test, and\n# interface -- whichever of those happen to exist. One file is emitted\n# per line.\n\nimport os\nimport os.path\nimport sys\n\n# Name of top level git directory.\nENVOY_ROOT = \"/envoy/\"\n\nSOURCE_ROOT = \"source\"\nTEST_ROOT = \"test\"\nINTERFACE_REAL_ROOT = \"include/envoy\"\n\n# Synthetic name for /include/envoy/, which helps us disambiguate\n# the 'envoy' underneath include from the top level of the git repo.\nINTERFACE_SYNTHETIC_ROOT = \"include-envoy\"\n\n# We want to search the file from the leaf up for 'envoy', which is\n# the name of the top level directory in the git repo. However, it's\n# also the name of a subdirectory of 'include' -- the only\n# subdirectory of 'include' currently, so it's easier just to treat\n# that as a single element.\nfname = sys.argv[1].replace(\"/\" + INTERFACE_REAL_ROOT + \"/\", \"/\" + INTERFACE_SYNTHETIC_ROOT + \"/\")\n\n# Parse the absolute location of this repo, its relative path, and\n# file extension, exiting with no output along the way any time there\n# is trouble.\nenvoy_index = fname.rfind(ENVOY_ROOT)\nif envoy_index == -1:\n  sys.exit(0)\nenvoy_index += len(ENVOY_ROOT)\nabsolute_location = fname[0:envoy_index]  # \"/path/to/gitroot/envoy/\"\npath = fname[envoy_index:]\npath_elements = path.split(\"/\")\nif len(path_elements) < 3:\n  sys.exit(0)\nleaf = path_elements[len(path_elements) - 1]\ndot = leaf.rfind(\".\")\nif dot == -1 or dot == len(leaf) - 1:\n  sys.exit(0)\next = leaf[dot:]\n\n\n# Transforms the input filename based on some transformation rules. Nothing\n# is emitted if the input path or extension does not match the expected pattern,\n# or if the file doesn't exist.\ndef emit(source_path, dest_path, source_ending, dest_ending):\n  if fname.endswith(source_ending) and path.startswith(source_path + \"/\"):\n    path_len = len(path) - len(source_path) - len(source_ending)\n    new_path = (absolute_location + dest_path + path[len(source_path):-len(source_ending)] +\n                dest_ending)\n    if os.path.isfile(new_path):\n      print(new_path)\n\n\n# Depending on which type of file is passed into the script: test, cc,\n# h, or interface, emit any related ones in cyclic order.\nroot = path_elements[0]\nif root == TEST_ROOT:\n  emit(\"test/common\", INTERFACE_REAL_ROOT, \"_impl_test.cc\", \".h\")\n  emit(TEST_ROOT, SOURCE_ROOT, \"_test.cc\", \".cc\")\n  emit(TEST_ROOT, SOURCE_ROOT, \"_test.cc\", \".h\")\n  emit(TEST_ROOT, TEST_ROOT, \".cc\", \".h\")\n  emit(TEST_ROOT, TEST_ROOT, \".cc\", \"_test.cc\")\n  emit(TEST_ROOT, TEST_ROOT, \".h\", \"_test.cc\")\n  emit(TEST_ROOT, TEST_ROOT, \".h\", \".cc\")\n  emit(TEST_ROOT, TEST_ROOT, \"_test.cc\", \".cc\")\n  emit(TEST_ROOT, TEST_ROOT, \"_test.cc\", \".h\")\nelif root == SOURCE_ROOT and ext == \".cc\":\n  emit(SOURCE_ROOT, SOURCE_ROOT, \".cc\", \".h\")\n  emit(SOURCE_ROOT, TEST_ROOT, \".cc\", \"_test.cc\")\n  emit(\"source/common\", INTERFACE_REAL_ROOT, \"_impl.cc\", \".h\")\nelif root == SOURCE_ROOT and ext == \".h\":\n  emit(SOURCE_ROOT, TEST_ROOT, \".h\", \"_test.cc\")\n  emit(\"source/common\", INTERFACE_REAL_ROOT, \"_impl.h\", \".h\")\n  emit(SOURCE_ROOT, SOURCE_ROOT, \".h\", \".cc\")\nelif root == INTERFACE_SYNTHETIC_ROOT:\n  emit(INTERFACE_SYNTHETIC_ROOT, \"source/common\", \".h\", \"_impl.cc\")\n  emit(INTERFACE_SYNTHETIC_ROOT, \"source/common\", \".h\", \"_impl.h\")\n  emit(INTERFACE_SYNTHETIC_ROOT, \"test/common\", \".h\", \"_impl_test.cc\")\n"
  },
  {
    "path": "tools/gen_compilation_database.py",
    "content": "#!/usr/bin/env python3\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport shlex\nimport subprocess\nfrom pathlib import Path\n\n\n# This method is equivalent to https://github.com/grailbio/bazel-compilation-database/blob/master/generate.sh\ndef generateCompilationDatabase(args):\n  # We need to download all remote outputs for generated source code. This option lives here to override those\n  # specified in bazelrc.\n  bazel_options = shlex.split(os.environ.get(\"BAZEL_BUILD_OPTIONS\", \"\")) + [\n      \"--config=compdb\",\n      \"--remote_download_outputs=all\",\n  ]\n\n  subprocess.check_call([\"bazel\", \"build\"] + bazel_options + [\n      \"--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect\",\n      \"--output_groups=compdb_files,header_files\"\n  ] + args.bazel_targets)\n\n  execroot = subprocess.check_output([\"bazel\", \"info\", \"execution_root\"] +\n                                     bazel_options).decode().strip()\n\n  compdb = []\n  for compdb_file in Path(execroot).glob(\"**/*.compile_commands.json\"):\n    compdb.extend(json.loads(\"[\" + compdb_file.read_text().replace(\"__EXEC_ROOT__\", execroot) +\n                             \"]\"))\n  return compdb\n\n\ndef isHeader(filename):\n  for ext in (\".h\", \".hh\", \".hpp\", \".hxx\"):\n    if filename.endswith(ext):\n      return True\n  return False\n\n\ndef isCompileTarget(target, args):\n  filename = target[\"file\"]\n  if not args.include_headers and isHeader(filename):\n    return False\n\n  if not args.include_genfiles:\n    if filename.startswith(\"bazel-out/\"):\n      return False\n\n  if not args.include_external:\n    if filename.startswith(\"external/\"):\n      return False\n\n  return True\n\n\ndef modifyCompileCommand(target, args):\n  cc, options = target[\"command\"].split(\" \", 1)\n\n  # Workaround for bazel added C++11 options, those doesn't affect build itself but\n  # clang-tidy will misinterpret them.\n  options = options.replace(\"-std=c++0x \", \"\")\n  options = options.replace(\"-std=c++11 \", \"\")\n\n  if args.vscode:\n    # Visual Studio Code doesn't seem to like \"-iquote\". Replace it with\n    # old-style \"-I\".\n    options = options.replace(\"-iquote \", \"-I \")\n\n  if isHeader(target[\"file\"]):\n    options += \" -Wno-pragma-once-outside-header -Wno-unused-const-variable\"\n    options += \" -Wno-unused-function\"\n    if not target[\"file\"].startswith(\"external/\"):\n      # *.h file is treated as C header by default while our headers files are all C++17.\n      options = \"-x c++ -std=c++17 -fexceptions \" + options\n\n  target[\"command\"] = \" \".join([cc, options])\n  return target\n\n\ndef fixCompilationDatabase(args, db):\n  db = [modifyCompileCommand(target, args) for target in db if isCompileTarget(target, args)]\n\n  with open(\"compile_commands.json\", \"w\") as db_file:\n    json.dump(db, db_file, indent=2)\n\n\nif __name__ == \"__main__\":\n  parser = argparse.ArgumentParser(description='Generate JSON compilation database')\n  parser.add_argument('--include_external', action='store_true')\n  parser.add_argument('--include_genfiles', action='store_true')\n  parser.add_argument('--include_headers', action='store_true')\n  parser.add_argument('--vscode', action='store_true')\n  parser.add_argument('bazel_targets',\n                      nargs='*',\n                      default=[\"//source/...\", \"//test/...\", \"//tools/...\"])\n  args = parser.parse_args()\n  fixCompilationDatabase(args, generateCompilationDatabase(args))\n"
  },
  {
    "path": "tools/git/last_github_commit.sh",
    "content": "#!/bin/bash\n\n# Looking back from HEAD, find the first commit that was merged onto master by GitHub. This is\n# likely the last non-local change on a given branch. There may be some exceptions for this\n# heuristic, e.g. when patches are manually merged for security fixes on master, but this is very\n# rare.\n\ngit rev-list --no-merges --committer=\"GitHub <noreply@github.com>\" --max-count=1 HEAD\n"
  },
  {
    "path": "tools/git/modified_since_last_github_commit.sh",
    "content": "#!/bin/bash\n\nBASE=\"$(dirname \"$0\")\"\ndeclare -r BASE\ndeclare -r TARGET_PATH=$1\ndeclare -r EXTENSION=$2\nexport TARGET_PATH\n\n\ngit diff --name-only \"$(\"${BASE}\"/last_github_commit.sh)\" | grep \"\\.${EXTENSION}$\"\n"
  },
  {
    "path": "tools/github/requirements.txt",
    "content": "PyGithub==1.53 \\\n    --hash=sha256:776befaddab9d8fddd525d52a6ca1ac228cf62b5b1e271836d766f4925e1452e \\\n    --hash=sha256:8ad656bf79958e775ec59f7f5a3dbcbadac12147ae3dc42708b951064096af15\n"
  },
  {
    "path": "tools/github/sync_assignable.py",
    "content": "# Sync envoyproxy organization users to envoyproxy/assignable team.\n#\n# This can be used for bulk cleanups if envoyproxy/assignable is not consistent\n# with organization membership. In general, prefer to add new members by editing\n# the envoyproxy/assignable in the GitHub UI, which will also cause an\n# organization invite to be sent; this reduces the need to manually manage\n# access tokens.\n#\n# Note: the access token supplied must have admin:org (write:org, read:org)\n# permissions (and ideally be scoped no more widely than this). See Settings ->\n# Developer settings -> Personal access tokens for access token generation.\n# Ideally, these should be cleaned up after use.\n\nimport os\nimport sys\n\nimport github\n\n\ndef GetConfirmation():\n  \"\"\"Obtain stdin confirmation to add users in GH.\"\"\"\n  return input('Add users to envoyproxy/assignable ? [yN] ').strip().lower() in ('y', 'yes')\n\n\ndef SyncAssignable(access_token):\n  organization = github.Github(access_token).get_organization('envoyproxy')\n  team = organization.get_team_by_slug('assignable')\n  organization_members = set(organization.get_members())\n  assignable_members = set(team.get_members())\n  missing = organization_members.difference(assignable_members)\n\n  if not missing:\n    print('envoyproxy/assignable is consistent with organization membership.')\n    return 0\n\n  print('The following organization members are missing from envoyproxy/assignable:')\n  for m in missing:\n    print(m.login)\n\n  if not GetConfirmation():\n    return 1\n\n  for m in missing:\n    team.add_membership(m, 'member')\n\n\nif __name__ == '__main__':\n  access_token = os.getenv('GH_ACCESS_TOKEN')\n  if not access_token:\n    print('Missing GH_ACCESS_TOKEN')\n    sys.exit(1)\n\n  sys.exit(SyncAssignable(access_token))\n"
  },
  {
    "path": "tools/github/sync_assignable.sh",
    "content": "#!/bin/bash\n\n. tools/shell_utils.sh\n\nset -e\n\npython_venv sync_assignable\n"
  },
  {
    "path": "tools/path_fix.sh",
    "content": "#!/bin/bash\n# This script can be used to run bazel commands. It will attempt to translate paths in compiler\n# error messages to real system paths (vs. bazel symbolic links) which may be necessary for some\n# IDEs to properly associate error messages to files.\n# To invoke, do something like:\n# tools/path_fix.sh bazel build //test/...\n#\n# NOTE: This implementation is far from perfect and will need to be refined to cover all cases.\n\n\"$@\" 2>&1 |\n  while IFS= read -r LINE\n  do\n    if [[ \"${LINE}\" =~ [[:space:]]*([^:[:space:]]+):[[:digit:]]+:[[:digit:]]+: ]]; then\n      # Bazel now appears to sometimes spit out paths that don't actually exist on disk at all. I\n      # have no idea why this is happening (sigh). This check makes it so that if readlink fails we\n      # don't attempt to fix the path and just print out what we got.\n      if REAL_PATH=$(readlink -f \"${BASH_REMATCH[1]}\"); then\n          LINE=${LINE//${BASH_REMATCH[1]}/${REAL_PATH}}\n      fi\n    fi\n    echo \"${LINE}\"\n  done\n"
  },
  {
    "path": "tools/print_dependencies.py",
    "content": "#!/usr/bin/env python\n\n# Quick-and-dirty python to fetch dependency information\n\nimport imp\nimport json\nimport os.path\nimport re\nimport subprocess\nimport sys\n\nAPI_DEPS = imp.load_source('api', 'api/bazel/repository_locations.bzl')\nDEPS = imp.load_source('deps', 'bazel/repository_locations.bzl')\n\n\ndef print_deps(deps):\n  print(json.dumps(deps, sort_keys=True, indent=2))\n\n\nif __name__ == '__main__':\n  deps = []\n\n  DEPS.REPOSITORY_LOCATIONS.update(API_DEPS.REPOSITORY_LOCATIONS)\n\n  for key, loc in DEPS.REPOSITORY_LOCATIONS.items():\n    deps.append({\n        'identifier': key,\n        'file-sha256': loc.get('sha256'),\n        'file-url': loc.get('urls')[0],\n        'file-prefix': loc.get('strip_prefix', ''),\n    })\n\n  deps = sorted(deps, key=lambda k: k['identifier'])\n\n  # Print all dependencies if a target is unspecified\n  if len(sys.argv) == 1:\n    print_deps(deps)\n    exit(0)\n\n  # Bazel target to print\n  target = sys.argv[1]\n  output = subprocess.check_output(['bazel', 'query', 'deps(%s)' % target])\n\n  repos = set()\n\n  # Gather the explicit list of repositories\n  repo_regex = re.compile('^@(.*)\\/\\/')\n  for line in output.split('\\n'):\n    match = repo_regex.match(line)\n    if match:\n      repos.add(match.group(1))\n\n  deps = filter(lambda dep: dep['identifier'] in repos, deps)\n  print_deps(deps)\n"
  },
  {
    "path": "tools/proto_format/active_protos_gen.py",
    "content": "#!/usr/bin/env python3\n\n# Generate ./api/versioning/BUILD based on packages with files containing\n# \"package_version_status = ACTIVE.\"\n\nimport os\nimport string\nimport subprocess\nimport sys\n\nBUILD_FILE_TEMPLATE = string.Template(\n    \"\"\"# DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py.\n\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\n# This tracks active development versions of protos.\nproto_library(\n    name = \"active_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n$active_pkgs    ],\n)\n\n# This tracks frozen versions of protos.\nproto_library(\n    name = \"frozen_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n$frozen_pkgs    ],\n)\n\"\"\")\n\n\n# Key sort function to achieve consistent results with buildifier.\ndef BuildOrderKey(key):\n  return key.replace(':', '!')\n\n\ndef DepsFormat(pkgs):\n  if not pkgs:\n    return ''\n  return '\\n'.join(\n      '        \"//%s:pkg\",' % p.replace('.', '/') for p in sorted(pkgs, key=BuildOrderKey)) + '\\n'\n\n\n# Find packages with a given package version status in a given API tree root.\ndef FindPkgs(package_version_status, api_root):\n  try:\n    active_files = subprocess.check_output(\n        ['grep', '-l', '-r',\n         'package_version_status = %s;' % package_version_status,\n         api_root]).decode().strip().split('\\n')\n    api_protos = [f for f in active_files if f.endswith('.proto')]\n  except subprocess.CalledProcessError:\n    api_protos = []\n  return set([os.path.dirname(p)[len(api_root) + 1:] for p in api_protos])\n\n\nif __name__ == '__main__':\n  api_root = sys.argv[1]\n  active_pkgs = FindPkgs('ACTIVE', api_root)\n  frozen_pkgs = FindPkgs('FROZEN', api_root)\n  sys.stdout.write(\n      BUILD_FILE_TEMPLATE.substitute(active_pkgs=DepsFormat(active_pkgs),\n                                     frozen_pkgs=DepsFormat(frozen_pkgs)))\n"
  },
  {
    "path": "tools/proto_format/proto_format.sh",
    "content": "#!/bin/bash\n\n# Reformat API protos to canonical proto style using protoxform.\n\nset -e\nset -x\n\nread -ra BAZEL_BUILD_OPTIONS <<< \"${BAZEL_BUILD_OPTIONS:-}\"\n\n\n[[ \"$1\" == \"check\" || \"$1\" == \"fix\" || \"$1\" == \"freeze\" ]] || \\\n  (echo \"Usage: $0 <check|fix|freeze>\"; exit 1)\n\n# Developers working on protoxform and other proto format tooling changes will need to override the\n# following check by setting FORCE_PROTO_FORMAT=yes in the environment.\n./tools/git/modified_since_last_github_commit.sh ./api/envoy proto || \\\n  [[ \"${FORCE_PROTO_FORMAT}\" == \"yes\" ]] || \\\n  { echo \"Skipping proto_format.sh due to no API change\"; exit 0; }\n\nif [[ \"$2\" == \"--test\" ]]\nthen\n  echo \"protoxform_test...\"\n  ./tools/protoxform/protoxform_test.sh\n  bazel test \"${BAZEL_BUILD_OPTIONS[@]}\" //tools/protoxform:merge_active_shadow_test\nfi\n\n# Generate //versioning:active_protos.\n./tools/proto_format/active_protos_gen.py ./api > ./api/versioning/BUILD\n\n# This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files.\nBAZEL_BUILD_OPTIONS+=(\"--remote_download_outputs=all\")\n\n# If the specified command is 'freeze', we tell protoxform to adjust package version status to\n# reflect a major version freeze and then do a regular 'fix'.\nPROTO_SYNC_CMD=\"$1\"\nif [[ \"$1\" == \"freeze\" ]]\nthen\n  declare -r FREEZE_ARG=\"--//tools/api_proto_plugin:extra_args=freeze\"\n  PROTO_SYNC_CMD=\"fix\"\nfi\n\n# Invoke protoxform aspect.\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" --//tools/api_proto_plugin:default_type_db_target=@envoy_api_canonical//versioning:active_protos ${FREEZE_ARG} \\\n  @envoy_api_canonical//versioning:active_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto\n\n# Find all source protos.\nPROTO_TARGETS=()\nfor proto_type in active frozen; do\n    protos=$(bazel query \"labels(srcs, labels(deps, @envoy_api_canonical//versioning:${proto_type}_protos))\")\n    while read -r line; do PROTO_TARGETS+=(\"$line\"); done \\\n\t<<< \"$protos\"\ndone\n\n# Setup for proto_sync.py.\nTOOLS=\"$(dirname \"$(dirname \"$(realpath \"$0\")\")\")\"\n# To satisfy dependency on api_proto_plugin.\nexport PYTHONPATH=\"$TOOLS\"\n# Build protoprint and merge_active_shadow_tools for use in proto_sync.py.\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" //tools/protoxform:protoprint //tools/protoxform:merge_active_shadow\n\n# Copy back the FileDescriptorProtos that protoxform emittted to the source tree. This involves\n# pretty-printing to format with protoprint and potentially merging active/shadow versions of protos\n# with merge_active_shadow.\n./tools/proto_format/proto_sync.py \"--mode=${PROTO_SYNC_CMD}\" \"${PROTO_TARGETS[@]}\"\n\n# Need to regenerate //versioning:active_protos before building type DB below if freezing.\nif [[ \"$1\" == \"freeze\" ]]\nthen\n ./tools/proto_format/active_protos_gen.py ./api > ./api/versioning/BUILD\nfi\n\n# Generate api/BUILD file based on updated type database.\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" //tools/type_whisperer:api_build_file\ncp -f bazel-bin/tools/type_whisperer/BUILD.api_build_file api/BUILD\n\n# Misc. manual copies to keep generated_api_shadow/ in sync with api/.\ncp -f ./api/bazel/*.bzl ./api/bazel/BUILD ./generated_api_shadow/bazel\n"
  },
  {
    "path": "tools/proto_format/proto_sync.py",
    "content": "#!/usr/bin/env python3\n\n# 1. Take protoxform artifacts from Bazel cache and pretty-print with protoprint.py.\n# 2. In the case where we are generating an Envoy internal shadow, it may be\n#    necessary to combine the current active proto, subject to hand editing, with\n#    shadow artifacts from the previous verion; this is done via\n#    merge_active_shadow.py.\n# 3. Diff or copy resulting artifacts to the source tree.\n\nimport argparse\nfrom collections import defaultdict\nimport functools\nimport multiprocessing as mp\nimport os\nimport pathlib\nimport re\nimport shutil\nimport string\nimport subprocess\nimport sys\nimport tempfile\n\nfrom api_proto_plugin import utils\n\nfrom importlib.util import spec_from_loader, module_from_spec\nfrom importlib.machinery import SourceFileLoader\n\n# api/bazel/external_protos_deps.bzl must have a .bzl suffix for Starlark\n# import, so we are forced to this workaround.\n_external_proto_deps_spec = spec_from_loader(\n    'external_proto_deps',\n    SourceFileLoader('external_proto_deps', 'api/bazel/external_proto_deps.bzl'))\nexternal_proto_deps = module_from_spec(_external_proto_deps_spec)\n_external_proto_deps_spec.loader.exec_module(external_proto_deps)\n\n# These .proto import direct path prefixes are already handled by\n# api_proto_package() as implicit dependencies.\nAPI_BUILD_SYSTEM_IMPORT_PREFIXES = [\n    'google/api/annotations.proto',\n    'google/protobuf/',\n    'google/rpc/status.proto',\n    'validate/validate.proto',\n]\n\nBUILD_FILE_TEMPLATE = string.Template(\n    \"\"\"# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@envoy_api//bazel:api_build_system.bzl\", \"api_proto_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\napi_proto_package($fields)\n\"\"\")\n\nIMPORT_REGEX = re.compile('import \"(.*)\";')\nSERVICE_REGEX = re.compile('service \\w+ {')\nPACKAGE_REGEX = re.compile('\\npackage: \"([^\"]*)\"')\nPREVIOUS_MESSAGE_TYPE_REGEX = re.compile(r'previous_message_type\\s+=\\s+\"([^\"]*)\";')\n\n\nclass ProtoSyncError(Exception):\n  pass\n\n\nclass RequiresReformatError(ProtoSyncError):\n\n  def __init__(self, message):\n    super(RequiresReformatError, self).__init__(\n        '%s; either run ./ci/do_ci.sh fix_format or ./tools/proto_format/proto_format.sh fix to reformat.\\n'\n        % message)\n\n\ndef GetDirectoryFromPackage(package):\n  \"\"\"Get directory path from package name or full qualified message name\n\n  Args:\n    package: the full qualified name of package or message.\n  \"\"\"\n  return '/'.join(s for s in package.split('.') if s and s[0].islower())\n\n\ndef GetDestinationPath(src):\n  \"\"\"Obtain destination path from a proto file path by reading its package statement.\n\n  Args:\n    src: source path\n  \"\"\"\n  src_path = pathlib.Path(src)\n  contents = src_path.read_text(encoding='utf8')\n  matches = re.findall(PACKAGE_REGEX, contents)\n  if len(matches) != 1:\n    raise RequiresReformatError(\"Expect {} has only one package declaration but has {}\".format(\n        src, len(matches)))\n  return pathlib.Path(GetDirectoryFromPackage(\n      matches[0])).joinpath(src_path.name.split('.')[0] + \".proto\")\n\n\ndef GetAbsRelDestinationPath(dst_root, src):\n  \"\"\"Obtain absolute path from a proto file path combined with destination root.\n\n  Creates the parent directory if necessary.\n\n  Args:\n    dst_root: destination root path.\n    src: source path.\n  \"\"\"\n  rel_dst_path = GetDestinationPath(src)\n  dst = dst_root.joinpath(rel_dst_path)\n  dst.parent.mkdir(0o755, parents=True, exist_ok=True)\n  return dst, rel_dst_path\n\n\ndef ProtoPrint(src, dst):\n  \"\"\"Pretty-print FileDescriptorProto to a destination file.\n\n  Args:\n    src: source path for FileDescriptorProto.\n    dst: destination path for formatted proto.\n  \"\"\"\n  print('ProtoPrint %s' % dst)\n  subprocess.check_output([\n      'bazel-bin/tools/protoxform/protoprint', src,\n      str(dst),\n      './bazel-bin/tools/protoxform/protoprint.runfiles/envoy/tools/type_whisperer/api_type_db.pb_text'\n  ])\n\n\ndef MergeActiveShadow(active_src, shadow_src, dst):\n  \"\"\"Merge active/shadow FileDescriptorProto to a destination file.\n\n  Args:\n    active_src: source path for active FileDescriptorProto.\n    shadow_src: source path for active FileDescriptorProto.\n    dst: destination path for FileDescriptorProto.\n  \"\"\"\n  print('MergeActiveShadow %s' % dst)\n  subprocess.check_output([\n      'bazel-bin/tools/protoxform/merge_active_shadow',\n      active_src,\n      shadow_src,\n      dst,\n  ])\n\n\ndef SyncProtoFile(dst_srcs):\n  \"\"\"Pretty-print a proto descriptor from protoxform.py Bazel cache artifacts.\"\n\n  In the case where we are generating an Envoy internal shadow, it may be\n  necessary to combine the current active proto, subject to hand editing, with\n  shadow artifacts from the previous verion; this is done via\n  MergeActiveShadow().\n\n  Args:\n    dst_srcs: destination/sources path tuple.\n  \"\"\"\n  dst, srcs = dst_srcs\n  assert (len(srcs) > 0)\n  # If we only have one candidate source for a destination, just pretty-print.\n  if len(srcs) == 1:\n    src = srcs[0]\n    ProtoPrint(src, dst)\n  else:\n    # We should only see an active and next major version candidate from\n    # previous version today.\n    assert (len(srcs) == 2)\n    shadow_srcs = [\n        s for s in srcs if s.endswith('.next_major_version_candidate.envoy_internal.proto')\n    ]\n    active_src = [s for s in srcs if s.endswith('active_or_frozen.proto')][0]\n    # If we're building the shadow, we need to combine the next major version\n    # candidate shadow with the potentially hand edited active version.\n    if len(shadow_srcs) > 0:\n      assert (len(shadow_srcs) == 1)\n      with tempfile.NamedTemporaryFile() as f:\n        MergeActiveShadow(active_src, shadow_srcs[0], f.name)\n        ProtoPrint(f.name, dst)\n    else:\n      ProtoPrint(active_src, dst)\n    src = active_src\n  rel_dst_path = GetDestinationPath(src)\n  return ['//%s:pkg' % str(rel_dst_path.parent)]\n\n\ndef GetImportDeps(proto_path):\n  \"\"\"Obtain the Bazel dependencies for the import paths from a .proto file.\n\n  Args:\n    proto_path: path to .proto.\n\n  Returns:\n    A list of Bazel targets reflecting the imports in the .proto at proto_path.\n  \"\"\"\n  imports = []\n  with open(proto_path, 'r', encoding='utf8') as f:\n    for line in f:\n      match = re.match(IMPORT_REGEX, line)\n      if match:\n        import_path = match.group(1)\n        # We can ignore imports provided implicitly by api_proto_package().\n        if any(import_path.startswith(p) for p in API_BUILD_SYSTEM_IMPORT_PREFIXES):\n          continue\n        # Special case handling for UDPA annotations.\n        if import_path.startswith('udpa/annotations/'):\n          imports.append('@com_github_cncf_udpa//udpa/annotations:pkg')\n          continue\n        # Special case handling for UDPA core.\n        if import_path.startswith('udpa/core/v1/'):\n          imports.append('@com_github_cncf_udpa//udpa/core/v1:pkg')\n          continue\n        # Explicit remapping for external deps, compute paths for envoy/*.\n        if import_path in external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP:\n          imports.append(external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP[import_path])\n          continue\n        if import_path.startswith('envoy/'):\n          # Ignore package internal imports.\n          if os.path.dirname(proto_path).endswith(os.path.dirname(import_path)):\n            continue\n          imports.append('//%s:pkg' % os.path.dirname(import_path))\n          continue\n        raise ProtoSyncError(\n            'Unknown import path mapping for %s, please update the mappings in tools/proto_format/proto_sync.py.\\n'\n            % import_path)\n  return imports\n\n\ndef GetPreviousMessageTypeDeps(proto_path):\n  \"\"\"Obtain the Bazel dependencies for the previous version of messages in a .proto file.\n\n  We need to link in earlier proto descriptors to support Envoy reflection upgrades.\n\n  Args:\n    proto_path: path to .proto.\n\n  Returns:\n    A list of Bazel targets reflecting the previous message types in the .proto at proto_path.\n  \"\"\"\n  contents = pathlib.Path(proto_path).read_text(encoding='utf8')\n  matches = re.findall(PREVIOUS_MESSAGE_TYPE_REGEX, contents)\n  deps = []\n  for m in matches:\n    target = '//%s:pkg' % GetDirectoryFromPackage(m)\n    deps.append(target)\n  return deps\n\n\ndef HasServices(proto_path):\n  \"\"\"Does a .proto file have any service definitions?\n\n  Args:\n    proto_path: path to .proto.\n\n  Returns:\n    True iff there are service definitions in the .proto at proto_path.\n  \"\"\"\n  with open(proto_path, 'r', encoding='utf8') as f:\n    for line in f:\n      if re.match(SERVICE_REGEX, line):\n        return True\n  return False\n\n\n# Key sort function to achieve consistent results with buildifier.\ndef BuildOrderKey(key):\n  return key.replace(':', '!')\n\n\ndef BuildFileContents(root, files):\n  \"\"\"Compute the canonical BUILD contents for an api/ proto directory.\n\n  Args:\n    root: base path to directory.\n    files: a list of files in the directory.\n\n  Returns:\n    A string containing the canonical BUILD file content for root.\n  \"\"\"\n  import_deps = set(sum([GetImportDeps(os.path.join(root, f)) for f in files], []))\n  history_deps = set(sum([GetPreviousMessageTypeDeps(os.path.join(root, f)) for f in files], []))\n  deps = import_deps.union(history_deps)\n  has_services = any(HasServices(os.path.join(root, f)) for f in files)\n  fields = []\n  if has_services:\n    fields.append('    has_services = True,')\n  if deps:\n    if len(deps) == 1:\n      formatted_deps = '\"%s\"' % list(deps)[0]\n    else:\n      formatted_deps = '\\n' + '\\n'.join(\n          '        \"%s\",' % dep for dep in sorted(deps, key=BuildOrderKey)) + '\\n    '\n    fields.append('    deps = [%s],' % formatted_deps)\n  formatted_fields = '\\n' + '\\n'.join(fields) + '\\n' if fields else ''\n  return BUILD_FILE_TEMPLATE.substitute(fields=formatted_fields)\n\n\ndef SyncBuildFiles(cmd, dst_root):\n  \"\"\"Diff or in-place update api/ BUILD files.\n\n  Args:\n    cmd: 'check' or 'fix'.\n  \"\"\"\n  for root, dirs, files in os.walk(str(dst_root)):\n    is_proto_dir = any(f.endswith('.proto') for f in files)\n    if not is_proto_dir:\n      continue\n    build_contents = BuildFileContents(root, files)\n    build_path = os.path.join(root, 'BUILD')\n    with open(build_path, 'w') as f:\n      f.write(build_contents)\n\n\ndef GenerateCurrentApiDir(api_dir, dst_dir):\n  \"\"\"Helper function to generate original API repository to be compared with diff.\n  This copies the original API repository and deletes file we don't want to compare.\n\n  Args:\n    api_dir: the original api directory\n    dst_dir: the api directory to be compared in temporary directory\n  \"\"\"\n  dst = dst_dir.joinpath(\"envoy\")\n  shutil.copytree(str(api_dir.joinpath(\"envoy\")), str(dst))\n\n  for p in dst.glob('**/*.md'):\n    p.unlink()\n  # envoy.service.auth.v2alpha exist for compatibility while we don't run in protoxform\n  # so we ignore it here.\n  shutil.rmtree(str(dst.joinpath(\"service\", \"auth\", \"v2alpha\")))\n\n\ndef GitStatus(path):\n  return subprocess.check_output(['git', 'status', '--porcelain', str(path)]).decode()\n\n\ndef GitModifiedFiles(path, suffix):\n  \"\"\"Obtain a list of modified files since the last commit merged by GitHub.\n\n  Args:\n    path: path to examine.\n    suffix: path suffix to filter with.\n  Return:\n    A list of strings providing the paths of modified files in the repo.\n  \"\"\"\n  try:\n    modified_files = subprocess.check_output(\n        ['tools/git/modified_since_last_github_commit.sh', 'api', 'proto']).decode().split()\n    return modified_files\n  except subprocess.CalledProcessError as e:\n    if e.returncode == 1:\n      return []\n    raise\n\n\n# If we're not forcing format, i.e. FORCE_PROTO_FORMAT=yes, in the environment,\n# then try and see if we can skip reformatting based on some simple path\n# heuristics. This saves a ton of time, since proto format and sync is not\n# running under Bazel and can't do change detection.\ndef ShouldSync(path, api_proto_modified_files, py_tools_modified_files):\n  if os.getenv('FORCE_PROTO_FORMAT') == 'yes':\n    return True\n  # If tools change, safest thing to do is rebuild everything.\n  if len(py_tools_modified_files) > 0:\n    return True\n  # Check to see if the basename of the file has been modified since the last\n  # GitHub commit. If so, rebuild. This is safe and conservative across package\n  # migrations in v3 and v4alpha; we could achieve a lower rate of false\n  # positives if we examined package migration annotations, at the expense of\n  # complexity.\n  for p in api_proto_modified_files:\n    if os.path.basename(p) in path:\n      return True\n  # Otherwise we can safely skip syncing.\n  return False\n\n\ndef Sync(api_root, mode, labels, shadow):\n  api_proto_modified_files = GitModifiedFiles('api', 'proto')\n  py_tools_modified_files = GitModifiedFiles('tools', 'py')\n  with tempfile.TemporaryDirectory() as tmp:\n    dst_dir = pathlib.Path(tmp).joinpath(\"b\")\n    paths = []\n    for label in labels:\n      paths.append(utils.BazelBinPathForOutputArtifact(label, '.active_or_frozen.proto'))\n      paths.append(\n          utils.BazelBinPathForOutputArtifact(\n              label, '.next_major_version_candidate.envoy_internal.proto'\n              if shadow else '.next_major_version_candidate.proto'))\n    dst_src_paths = defaultdict(list)\n    for path in paths:\n      if os.stat(path).st_size > 0:\n        abs_dst_path, rel_dst_path = GetAbsRelDestinationPath(dst_dir, path)\n        if ShouldSync(path, api_proto_modified_files, py_tools_modified_files):\n          dst_src_paths[abs_dst_path].append(path)\n        else:\n          print('Skipping sync of %s' % path)\n          src_path = str(pathlib.Path(api_root, rel_dst_path))\n          shutil.copy(src_path, abs_dst_path)\n    with mp.Pool() as p:\n      pkg_deps = p.map(SyncProtoFile, dst_src_paths.items())\n    SyncBuildFiles(mode, dst_dir)\n\n    current_api_dir = pathlib.Path(tmp).joinpath(\"a\")\n    current_api_dir.mkdir(0o755, True, True)\n    api_root_path = pathlib.Path(api_root)\n    GenerateCurrentApiDir(api_root_path, current_api_dir)\n\n    # These support files are handled manually.\n    for f in [\n        'envoy/annotations/resource.proto', 'envoy/annotations/deprecation.proto',\n        'envoy/annotations/BUILD'\n    ]:\n      copy_dst_dir = pathlib.Path(dst_dir, os.path.dirname(f))\n      copy_dst_dir.mkdir(exist_ok=True)\n      shutil.copy(str(pathlib.Path(api_root, f)), str(copy_dst_dir))\n\n    diff = subprocess.run(['diff', '-Npur', \"a\", \"b\"], cwd=tmp, stdout=subprocess.PIPE).stdout\n\n    if diff.strip():\n      if mode == \"check\":\n        print(\"Please apply following patch to directory '{}'\".format(api_root), file=sys.stderr)\n        print(diff.decode(), file=sys.stderr)\n        sys.exit(1)\n      if mode == \"fix\":\n        git_status = GitStatus(api_root)\n        if git_status:\n          print('git status indicates a dirty API tree:\\n%s' % git_status)\n          print(\n              'Proto formatting may overwrite or delete files in the above list with no git backup.'\n          )\n          if input('Continue? [yN] ').strip().lower() != 'y':\n            sys.exit(1)\n        src_files = set(str(p.relative_to(current_api_dir)) for p in current_api_dir.rglob('*'))\n        dst_files = set(str(p.relative_to(dst_dir)) for p in dst_dir.rglob('*'))\n        deleted_files = src_files.difference(dst_files)\n        if deleted_files:\n          print('The following files will be deleted: %s' % sorted(deleted_files))\n          print(\n              'If this is not intended, please see https://github.com/envoyproxy/envoy/blob/master/api/STYLE.md#adding-an-extension-configuration-to-the-api.'\n          )\n          if input('Delete files? [yN] ').strip().lower() == 'y':\n            subprocess.run(['patch', '-p1'], input=diff, cwd=str(api_root_path.resolve()))\n          else:\n            sys.exit(1)\n        else:\n          subprocess.run(['patch', '-p1'], input=diff, cwd=str(api_root_path.resolve()))\n\n\nif __name__ == '__main__':\n  parser = argparse.ArgumentParser()\n  parser.add_argument('--mode', choices=['check', 'fix'])\n  parser.add_argument('--api_root', default='./api')\n  parser.add_argument('--api_shadow_root', default='./generated_api_shadow')\n  parser.add_argument('labels', nargs='*')\n  args = parser.parse_args()\n\n  Sync(args.api_root, args.mode, args.labels, False)\n  Sync(args.api_shadow_root, args.mode, args.labels, True)\n"
  },
  {
    "path": "tools/protodoc/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\"@protodoc_pip3//:requirements.bzl\", \"requirement\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\", \"envoy_proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\npy_binary(\n    name = \"generate_empty\",\n    srcs = [\"generate_empty.py\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\":protodoc\"],\n)\n\nenvoy_proto_library(\n    name = \"manifest_proto\",\n    srcs = [\"manifest.proto\"],\n)\n\npy_binary(\n    name = \"protodoc\",\n    srcs = [\"protodoc.py\"],\n    data = [\"//docs:protodoc_manifest.yaml\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":manifest_proto_py_proto\",\n        \"//tools/api_proto_plugin\",\n        \"//tools/config_validation:validate_fragment\",\n        \"@com_envoyproxy_protoc_gen_validate//validate:validate_py\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_py_proto\",\n        \"@com_google_protobuf//:protobuf_python\",\n        requirement(\"PyYAML\"),\n    ],\n)\n"
  },
  {
    "path": "tools/protodoc/generate_empty.py",
    "content": "# Generate pseudo API docs for extensions that have google.protobuf.Empty\n# config.\n\nimport json\nimport pathlib\nimport string\nimport sys\n\nimport protodoc\n\nEMPTY_EXTENSION_DOCS_TEMPLATE = string.Template(\"\"\"$header\n\n$description\n\n$reflink\n\nThis extension does not have a structured configuration, `google.protobuf.Empty\n<https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#empty>`_ should be used\ninstead.\n\n$extension\n\"\"\")\n\n\ndef GenerateEmptyExtensionsDocs(extension, details, api_extensions_root):\n  extension_root = pathlib.Path(details['path'])\n  path = pathlib.Path(api_extensions_root, extension_root, 'empty', extension_root.name + '.rst')\n  path.parent.mkdir(parents=True, exist_ok=True)\n  description = details.get('description', '')\n  reflink = ''\n  if 'ref' in details:\n    reflink = '%s %s.' % (details['title'],\n                          protodoc.FormatInternalLink('configuration overview', details['ref']))\n  content = EMPTY_EXTENSION_DOCS_TEMPLATE.substitute(header=protodoc.FormatHeader(\n      '=', details['title']),\n                                                     description=description,\n                                                     reflink=reflink,\n                                                     extension=protodoc.FormatExtension(extension))\n  path.write_text(content)\n\n\nif __name__ == '__main__':\n  empty_extensions_path = sys.argv[1]\n  api_extensions_root = sys.argv[2]\n\n  empty_extensions = json.loads(pathlib.Path(empty_extensions_path).read_text())\n  for extension, details in empty_extensions.items():\n    GenerateEmptyExtensionsDocs(extension, details, api_extensions_root)\n"
  },
  {
    "path": "tools/protodoc/manifest.proto",
    "content": "syntax = \"proto3\";\n\npackage tools.protodoc;\n\nimport \"google/protobuf/struct.proto\";\n\n// Additional structure information consumed by protodoc when generating\n// documentation for a field.\nmessage Description {\n  message EdgeConfiguration {\n    // Example secure edge default for the field.\n    google.protobuf.Value example = 1;\n\n    // Additional note to include in the configuration warning.\n    string note = 2;\n  }\n\n  // Additional information for when this field is used in edge deployments.\n  EdgeConfiguration edge_config = 1;\n\n  // TODO: add additional information here to reflect things like Envoy\n  // implementation status.\n}\n\nmessage Manifest {\n  // Map from fully qualified field name to additional information to be used in\n  // protodoc generation.\n  map<string, Description> fields = 1;\n}\n"
  },
  {
    "path": "tools/protodoc/protodoc.bzl",
    "content": "load(\"//tools/api_proto_plugin:plugin.bzl\", \"api_proto_plugin_aspect\", \"api_proto_plugin_impl\")\n\ndef _protodoc_impl(target, ctx):\n    return api_proto_plugin_impl(target, ctx, \"rst\", \"protodoc\", [\".rst\"])\n\n# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html)\n# that can be invoked from the CLI to produce docs via //tools/protodoc for\n# proto_library targets. Example use:\n#\n#   bazel build //api --aspects tools/protodoc/protodoc.bzl%protodoc_aspect \\\n#       --output_groups=rst\n#\n# The aspect builds the transitive docs, so any .proto in the dependency graph\n# get docs created.\nprotodoc_aspect = api_proto_plugin_aspect(\"//tools/protodoc\", _protodoc_impl)\n"
  },
  {
    "path": "tools/protodoc/protodoc.py",
    "content": "# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST.\n# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto\n# for the underlying protos mentioned in this file. See\n# https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax.\n\nfrom collections import defaultdict\nimport json\nimport functools\nimport os\nimport pathlib\nimport re\nimport string\nimport sys\n\nfrom google.protobuf import json_format\nfrom bazel_tools.tools.python.runfiles import runfiles\nimport yaml\n\n# We have to do some evil things to sys.path due to the way that Python module\n# resolution works; we have both tools/ trees in bazel_tools and envoy. By\n# default, Bazel leaves us with a sys.path in which the @bazel_tools repository\n# takes precedence. Now that we're done with importing runfiles above, we can\n# just remove it from the sys.path.\nsys.path = [p for p in sys.path if not p.endswith('bazel_tools')]\n\nfrom tools.api_proto_plugin import annotations\nfrom tools.api_proto_plugin import plugin\nfrom tools.api_proto_plugin import visitor\nfrom tools.config_validation import validate_fragment\n\nfrom tools.protodoc import manifest_pb2\nfrom udpa.annotations import security_pb2\nfrom udpa.annotations import status_pb2\nfrom validate import validate_pb2\n\n# Namespace prefix for Envoy core APIs.\nENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.'\n\n# Namespace prefix for Envoy top-level APIs.\nENVOY_PREFIX = '.envoy.'\n\n# Namespace prefix for WKTs.\nWKT_NAMESPACE_PREFIX = '.google.protobuf.'\n\n# Namespace prefix for RPCs.\nRPC_NAMESPACE_PREFIX = '.google.rpc.'\n\n# http://www.fileformat.info/info/unicode/char/2063/index.htm\nUNICODE_INVISIBLE_SEPARATOR = u'\\u2063'\n\n# Template for data plane API URLs.\nDATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format(\n    os.environ['ENVOY_BLOB_SHA'])\n\n# Template for formating extension descriptions.\nEXTENSION_TEMPLATE = string.Template(\"\"\"$anchor\nThis extension may be referenced by the qualified name *$extension*\n\n.. note::\n  $status\n\n  $security_posture\n\n\"\"\")\n\n# A map from the extension security postures (as defined in the\n# envoy_cc_extension build macro) to human readable text for extension docs.\nEXTENSION_SECURITY_POSTURES = {\n    'robust_to_untrusted_downstream':\n        'This extension is intended to be robust against untrusted downstream traffic. It '\n        'assumes that the upstream is trusted.',\n    'robust_to_untrusted_downstream_and_upstream':\n        'This extension is intended to be robust against both untrusted downstream and '\n        'upstream traffic.',\n    'requires_trusted_downstream_and_upstream':\n        'This extension is not hardened and should only be used in deployments'\n        ' where both the downstream and upstream are trusted.',\n    'unknown':\n        'This extension has an unknown security posture and should only be '\n        'used in deployments where both the downstream and upstream are '\n        'trusted.',\n    'data_plane_agnostic':\n        'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.',\n}\n\n# A map from the extension status value to a human readable text for extension\n# docs.\nEXTENSION_STATUS_VALUES = {\n    'alpha':\n        'This extension is functional but has not had substantial production burn time, use only with this caveat.',\n    'wip':\n        'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.',\n}\n\n\nclass ProtodocError(Exception):\n  \"\"\"Base error class for the protodoc module.\"\"\"\n\n\ndef HideNotImplemented(comment):\n  \"\"\"Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?\"\"\"\n  return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations\n\n\ndef GithubUrl(type_context):\n  \"\"\"Obtain data plane API Github URL by path from a TypeContext.\n\n  Args:\n    type_context: type_context.TypeContext for node.\n\n  Returns:\n    A string with a corresponding data plane API GitHub Url.\n  \"\"\"\n  if type_context.location is not None:\n    return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name,\n                                     type_context.location.span[0])\n  return ''\n\n\ndef FormatCommentWithAnnotations(comment, type_name=''):\n  \"\"\"Format a comment string with additional RST for annotations.\n\n  Args:\n    comment: comment string.\n    type_name: optional, 'message' or 'enum' may be specified for additional\n      message/enum specific annotations.\n\n  Returns:\n    A string with additional RST from annotations.\n  \"\"\"\n  formatted_extension = ''\n  if annotations.EXTENSION_ANNOTATION in comment.annotations:\n    extension = comment.annotations[annotations.EXTENSION_ANNOTATION]\n    formatted_extension = FormatExtension(extension)\n  return annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\\n') + formatted_extension\n\n\ndef MapLines(f, s):\n  \"\"\"Apply a function across each line in a flat string.\n\n  Args:\n    f: A string transform function for a line.\n    s: A string consisting of potentially multiple lines.\n\n  Returns:\n    A flat string with f applied to each line.\n  \"\"\"\n  return '\\n'.join(f(line) for line in s.split('\\n'))\n\n\ndef Indent(spaces, line):\n  \"\"\"Indent a string.\"\"\"\n  return ' ' * spaces + line\n\n\ndef IndentLines(spaces, lines):\n  \"\"\"Indent a list of strings.\"\"\"\n  return map(functools.partial(Indent, spaces), lines)\n\n\ndef FormatInternalLink(text, ref):\n  return ':ref:`%s <%s>`' % (text, ref)\n\n\ndef FormatExternalLink(text, ref):\n  return '`%s <%s>`_' % (text, ref)\n\n\ndef FormatHeader(style, text):\n  \"\"\"Format RST header.\n\n  Args:\n    style: underline style, e.g. '=', '-'.\n    text: header text\n\n  Returns:\n    RST formatted header.\n  \"\"\"\n  return '%s\\n%s\\n\\n' % (text, style * len(text))\n\n\ndef FormatExtension(extension):\n  \"\"\"Format extension metadata as RST.\n\n  Args:\n    extension: the name of the extension, e.g. com.acme.foo.\n\n  Returns:\n    RST formatted extension description.\n  \"\"\"\n  try:\n    extension_metadata = json.loads(pathlib.Path(\n        os.getenv('EXTENSION_DB_PATH')).read_text())[extension]\n    anchor = FormatAnchor('extension_' + extension)\n    status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '')\n    security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']]\n    return EXTENSION_TEMPLATE.substitute(anchor=anchor,\n                                         extension=extension,\n                                         status=status,\n                                         security_posture=security_posture)\n  except KeyError as e:\n    sys.stderr.write(\n        '\\n\\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\\n\\n')\n    exit(1)  # Raising the error buries the above message in tracebacks.\n\n\ndef FormatHeaderFromFile(style, source_code_info, proto_name):\n  \"\"\"Format RST header based on special file level title\n\n  Args:\n    style: underline style, e.g. '=', '-'.\n    source_code_info: SourceCodeInfo object.\n    proto_name: If the file_level_comment does not contain a user specified\n      title, use this as page title.\n\n  Returns:\n    RST formatted header, and file level comment without page title strings.\n  \"\"\"\n  anchor = FormatAnchor(FileCrossRefLabel(proto_name))\n  stripped_comment = annotations.WithoutAnnotations(\n      StripLeadingSpace('\\n'.join(c + '\\n' for c in source_code_info.file_level_comments)))\n  formatted_extension = ''\n  if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations:\n    extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION]\n    formatted_extension = FormatExtension(extension)\n  if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations:\n    return anchor + FormatHeader(\n        style, source_code_info.file_level_annotations[\n            annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment\n  return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment\n\n\ndef FormatFieldTypeAsJson(type_context, field):\n  \"\"\"Format FieldDescriptorProto.Type as a pseudo-JSON string.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    field: FieldDescriptor proto.\n  Return: RST formatted pseudo-JSON string representation of field type.\n  \"\"\"\n  if TypeNameFromFQN(field.type_name) in type_context.map_typenames:\n    return '\"{...}\"'\n  if field.label == field.LABEL_REPEATED:\n    return '[]'\n  if field.type == field.TYPE_MESSAGE:\n    return '\"{...}\"'\n  return '\"...\"'\n\n\ndef FormatMessageAsJson(type_context, msg):\n  \"\"\"Format a message definition DescriptorProto as a pseudo-JSON block.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    msg: message definition DescriptorProto.\n  Return: RST formatted pseudo-JSON string representation of message definition.\n  \"\"\"\n  lines = []\n  for index, field in enumerate(msg.field):\n    field_type_context = type_context.ExtendField(index, field.name)\n    leading_comment = field_type_context.leading_comment\n    if HideNotImplemented(leading_comment):\n      continue\n    lines.append('\"%s\": %s' % (field.name, FormatFieldTypeAsJson(type_context, field)))\n\n  if lines:\n    return '.. code-block:: json\\n\\n  {\\n' + ',\\n'.join(IndentLines(4, lines)) + '\\n  }\\n\\n'\n  else:\n    return '.. code-block:: json\\n\\n  {}\\n\\n'\n\n\ndef NormalizeFieldTypeName(field_fqn):\n  \"\"\"Normalize a fully qualified field type name, e.g.\n\n  .envoy.foo.bar.\n\n  Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n  Args:\n    field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.\n  Return: Normalized type name.\n  \"\"\"\n  if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):\n    return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):]\n  if field_fqn.startswith(ENVOY_PREFIX):\n    return field_fqn[len(ENVOY_PREFIX):]\n  return field_fqn\n\n\ndef NormalizeTypeContextName(type_name):\n  \"\"\"Normalize a type name, e.g.\n\n  envoy.foo.bar.\n\n  Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n  Args:\n    type_name: a name from a TypeContext.\n  Return: Normalized type name.\n  \"\"\"\n  return NormalizeFieldTypeName(QualifyTypeName(type_name))\n\n\ndef QualifyTypeName(type_name):\n  return '.' + type_name\n\n\ndef TypeNameFromFQN(fqn):\n  return fqn[1:]\n\n\ndef FormatEmph(s):\n  \"\"\"RST format a string for emphasis.\"\"\"\n  return '*%s*' % s\n\n\ndef FormatFieldType(type_context, field):\n  \"\"\"Format a FieldDescriptorProto type description.\n\n  Adds cross-refs for message types.\n  TODO(htuch): Add cross-refs for enums as well.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    field: FieldDescriptor proto.\n  Return: RST formatted field type.\n  \"\"\"\n  if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(\n      ENVOY_PREFIX):\n    type_name = NormalizeFieldTypeName(field.type_name)\n    if field.type == field.TYPE_MESSAGE:\n      if type_context.map_typenames and TypeNameFromFQN(\n          field.type_name) in type_context.map_typenames:\n        return 'map<%s, %s>' % tuple(\n            map(functools.partial(FormatFieldType, type_context),\n                type_context.map_typenames[TypeNameFromFQN(field.type_name)]))\n      return FormatInternalLink(type_name, MessageCrossRefLabel(type_name))\n    if field.type == field.TYPE_ENUM:\n      return FormatInternalLink(type_name, EnumCrossRefLabel(type_name))\n  elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):\n    wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]\n    return FormatExternalLink(\n        wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' %\n        wkt.lower())\n  elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):\n    rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]\n    return FormatExternalLink(\n        rpc,\n        'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower())\n  elif field.type_name:\n    return field.type_name\n\n  pretty_type_names = {\n      field.TYPE_DOUBLE: 'double',\n      field.TYPE_FLOAT: 'float',\n      field.TYPE_INT32: 'int32',\n      field.TYPE_SFIXED32: 'int32',\n      field.TYPE_SINT32: 'int32',\n      field.TYPE_FIXED32: 'uint32',\n      field.TYPE_UINT32: 'uint32',\n      field.TYPE_INT64: 'int64',\n      field.TYPE_SFIXED64: 'int64',\n      field.TYPE_SINT64: 'int64',\n      field.TYPE_FIXED64: 'uint64',\n      field.TYPE_UINT64: 'uint64',\n      field.TYPE_BOOL: 'bool',\n      field.TYPE_STRING: 'string',\n      field.TYPE_BYTES: 'bytes',\n  }\n  if field.type in pretty_type_names:\n    return FormatExternalLink(pretty_type_names[field.type],\n                              'https://developers.google.com/protocol-buffers/docs/proto#scalar')\n  raise ProtodocError('Unknown field type ' + str(field.type))\n\n\ndef StripLeadingSpace(s):\n  \"\"\"Remove leading space in flat comment strings.\"\"\"\n  return MapLines(lambda s: s[1:], s)\n\n\ndef FileCrossRefLabel(msg_name):\n  \"\"\"File cross reference label.\"\"\"\n  return 'envoy_api_file_%s' % msg_name\n\n\ndef MessageCrossRefLabel(msg_name):\n  \"\"\"Message cross reference label.\"\"\"\n  return 'envoy_api_msg_%s' % msg_name\n\n\ndef EnumCrossRefLabel(enum_name):\n  \"\"\"Enum cross reference label.\"\"\"\n  return 'envoy_api_enum_%s' % enum_name\n\n\ndef FieldCrossRefLabel(field_name):\n  \"\"\"Field cross reference label.\"\"\"\n  return 'envoy_api_field_%s' % field_name\n\n\ndef EnumValueCrossRefLabel(enum_value_name):\n  \"\"\"Enum value cross reference label.\"\"\"\n  return 'envoy_api_enum_value_%s' % enum_value_name\n\n\ndef FormatAnchor(label):\n  \"\"\"Format a label as an Envoy API RST anchor.\"\"\"\n  return '.. _%s:\\n\\n' % label\n\n\ndef FormatSecurityOptions(security_option, field, type_context, edge_config):\n  sections = []\n\n  if security_option.configure_for_untrusted_downstream:\n    sections.append(\n        Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.'))\n  if security_option.configure_for_untrusted_upstream:\n    sections.append(\n        Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.'))\n  if edge_config.note:\n    sections.append(Indent(4, edge_config.note))\n\n  example_dict = json_format.MessageToDict(edge_config.example)\n  validate_fragment.ValidateFragment(field.type_name[1:], example_dict)\n  field_name = type_context.name.split('.')[-1]\n  example = {field_name: example_dict}\n  sections.append(\n      Indent(4, 'Example configuration for untrusted environments:\\n\\n') +\n      Indent(4, '.. code-block:: yaml\\n\\n') +\n      '\\n'.join(IndentLines(6,\n                            yaml.dump(example).split('\\n'))))\n\n  return '.. attention::\\n' + '\\n\\n'.join(sections)\n\n\ndef FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):\n  \"\"\"Format a FieldDescriptorProto as RST definition list item.\n\n  Args:\n    outer_type_context: contextual information for enclosing message.\n    type_context: contextual information for message/enum/field.\n    field: FieldDescriptorProto.\n    protodoc_manifest: tools.protodoc.Manifest for proto.\n\n  Returns:\n    RST formatted definition list item.\n  \"\"\"\n  field_annotations = []\n\n  anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name)))\n  if field.options.HasExtension(validate_pb2.rules):\n    rule = field.options.Extensions[validate_pb2.rules]\n    if ((rule.HasField('message') and rule.message.required) or\n        (rule.HasField('duration') and rule.duration.required) or\n        (rule.HasField('string') and rule.string.min_len > 0) or\n        (rule.HasField('string') and rule.string.min_bytes > 0) or\n        (rule.HasField('repeated') and rule.repeated.min_items > 0)):\n      field_annotations = ['*REQUIRED*']\n  leading_comment = type_context.leading_comment\n  formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)\n  if HideNotImplemented(leading_comment):\n    return ''\n\n  if field.HasField('oneof_index'):\n    oneof_context = outer_type_context.ExtendOneof(field.oneof_index,\n                                                   type_context.oneof_names[field.oneof_index])\n    oneof_comment = oneof_context.leading_comment\n    formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment)\n    if HideNotImplemented(oneof_comment):\n      return ''\n\n    # If the oneof only has one field and marked required, mark the field as required.\n    if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[\n        field.oneof_index]:\n      field_annotations = ['*REQUIRED*']\n\n    if len(type_context.oneof_fields[field.oneof_index]) > 1:\n      # Fields in oneof shouldn't be marked as required when we have oneof comment below it.\n      field_annotations = []\n      oneof_template = '\\nPrecisely one of %s must be set.\\n' if type_context.oneof_required[\n          field.oneof_index] else '\\nOnly one of %s may be set.\\n'\n      formatted_oneof_comment += oneof_template % ', '.join(\n          FormatInternalLink(\n              f,\n              FieldCrossRefLabel(NormalizeTypeContextName(\n                  outer_type_context.ExtendField(i, f).name)))\n          for i, f in type_context.oneof_fields[field.oneof_index])\n  else:\n    formatted_oneof_comment = ''\n\n  # If there is a udpa.annotations.security option, include it after the comment.\n  if field.options.HasExtension(security_pb2.security):\n    manifest_description = protodoc_manifest.fields.get(type_context.name)\n    if not manifest_description:\n      raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name)\n    formatted_security_options = FormatSecurityOptions(\n        field.options.Extensions[security_pb2.security], field, type_context,\n        manifest_description.edge_config)\n  else:\n    formatted_security_options = ''\n\n  comment = '(%s) ' % ', '.join([FormatFieldType(type_context, field)] +\n                                field_annotations) + formatted_leading_comment\n  return anchor + field.name + '\\n' + MapLines(functools.partial(\n      Indent, 2), comment + formatted_oneof_comment) + formatted_security_options\n\n\ndef FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):\n  \"\"\"Format a DescriptorProto as RST definition list.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    msg: DescriptorProto.\n    protodoc_manifest: tools.protodoc.Manifest for proto.\n\n  Returns:\n    RST formatted definition list item.\n  \"\"\"\n  type_context.oneof_fields = defaultdict(list)\n  type_context.oneof_required = defaultdict(bool)\n  type_context.oneof_names = defaultdict(list)\n  for index, field in enumerate(msg.field):\n    if field.HasField('oneof_index'):\n      leading_comment = type_context.ExtendField(index, field.name).leading_comment\n      if HideNotImplemented(leading_comment):\n        continue\n      type_context.oneof_fields[field.oneof_index].append((index, field.name))\n  for index, oneof_decl in enumerate(msg.oneof_decl):\n    if oneof_decl.options.HasExtension(validate_pb2.required):\n      type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required]\n    type_context.oneof_names[index] = oneof_decl.name\n  return '\\n'.join(\n      FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name),\n                                      field, protodoc_manifest)\n      for index, field in enumerate(msg.field)) + '\\n'\n\n\ndef FormatEnumValueAsDefinitionListItem(type_context, enum_value):\n  \"\"\"Format a EnumValueDescriptorProto as RST definition list item.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    enum_value: EnumValueDescriptorProto.\n\n  Returns:\n    RST formatted definition list item.\n  \"\"\"\n  anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name)))\n  default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else ''\n  leading_comment = type_context.leading_comment\n  formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)\n  if HideNotImplemented(leading_comment):\n    return ''\n  comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment\n  return anchor + enum_value.name + '\\n' + MapLines(functools.partial(Indent, 2), comment)\n\n\ndef FormatEnumAsDefinitionList(type_context, enum):\n  \"\"\"Format a EnumDescriptorProto as RST definition list.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    enum: DescriptorProto.\n\n  Returns:\n    RST formatted definition list item.\n  \"\"\"\n  return '\\n'.join(\n      FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name),\n                                          enum_value)\n      for index, enum_value in enumerate(enum.value)) + '\\n'\n\n\ndef FormatProtoAsBlockComment(proto):\n  \"\"\"Format a proto as a RST block comment.\n\n  Useful in debugging, not usually referenced.\n  \"\"\"\n  return '\\n\\nproto::\\n\\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\\n'\n\n\nclass RstFormatVisitor(visitor.Visitor):\n  \"\"\"Visitor to generate a RST representation from a FileDescriptor proto.\n\n  See visitor.Visitor for visitor method docs comments.\n  \"\"\"\n\n  def __init__(self):\n    r = runfiles.Create()\n    with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f:\n      # Load as YAML, emit as JSON and then parse as proto to provide type\n      # checking.\n      protodoc_manifest_untyped = yaml.safe_load(f.read())\n      self.protodoc_manifest = manifest_pb2.Manifest()\n      json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest)\n\n  def VisitEnum(self, enum_proto, type_context):\n    normal_enum_type = NormalizeTypeContextName(type_context.name)\n    anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type))\n    header = FormatHeader('-', 'Enum %s' % normal_enum_type)\n    github_url = GithubUrl(type_context)\n    proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\\n\\n'\n    leading_comment = type_context.leading_comment\n    formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum')\n    if HideNotImplemented(leading_comment):\n      return ''\n    return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList(\n        type_context, enum_proto)\n\n  def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):\n    # Skip messages synthesized to represent map types.\n    if msg_proto.options.map_entry:\n      return ''\n    normal_msg_type = NormalizeTypeContextName(type_context.name)\n    anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type))\n    header = FormatHeader('-', normal_msg_type)\n    github_url = GithubUrl(type_context)\n    proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\\n\\n'\n    leading_comment = type_context.leading_comment\n    formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message')\n    if HideNotImplemented(leading_comment):\n      return ''\n    return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson(\n        type_context, msg_proto) + FormatMessageAsDefinitionList(\n            type_context, msg_proto,\n            self.protodoc_manifest) + '\\n'.join(nested_msgs) + '\\n' + '\\n'.join(nested_enums)\n\n  def VisitFile(self, file_proto, type_context, services, msgs, enums):\n    has_messages = True\n    if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums):\n      has_messages = False\n\n    # TODO(mattklein123): The logic in both the doc and transform tool around files without messages\n    # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs\n    # in the common case.\n    if (has_messages and\n        not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations\n        and file_proto.name.startswith('envoy')):\n      raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format(\n          file_proto.name))\n\n    # Find the earliest detached comment, attribute it to file level.\n    # Also extract file level titles if any.\n    header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name)\n    # If there are no messages, we don't include in the doc tree (no support for\n    # service rendering yet). We allow these files to be missing from the\n    # toctrees.\n    if not has_messages:\n      header = ':orphan:\\n\\n' + header\n    warnings = ''\n    if file_proto.options.HasExtension(status_pb2.file_status):\n      if file_proto.options.Extensions[status_pb2.file_status].work_in_progress:\n        warnings += ('.. warning::\\n   This API is work-in-progress and is '\n                     'subject to breaking changes.\\n\\n')\n    debug_proto = FormatProtoAsBlockComment(file_proto)\n    return header + warnings + comment + '\\n'.join(msgs) + '\\n'.join(enums)  # + debug_proto\n\n\ndef Main():\n  plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)])\n\n\nif __name__ == '__main__':\n  Main()\n"
  },
  {
    "path": "tools/protodoc/requirements.txt",
    "content": "PyYAML==5.3.1 \\\n    --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \\\n    --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \\\n    --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \\\n    --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \\\n    --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \\\n    --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \\\n    --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \\\n    --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \\\n    --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \\\n    --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \\\n    --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a\n"
  },
  {
    "path": "tools/protoxform/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\", \"py_test\")\n\nlicenses([\"notice\"])  # Apache 2\n\npy_binary(\n    name = \"merge_active_shadow\",\n    srcs = [\"merge_active_shadow.py\"],\n    deps = [\n        \"//tools/api_proto_plugin\",\n        \"@com_envoyproxy_protoc_gen_validate//validate:validate_py\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_py_proto\",\n        \"@com_google_googleapis//google/api:annotations_py_proto\",\n        \"@com_google_protobuf//:protobuf_python\",\n        \"@envoy_api_canonical//envoy/annotations:pkg_py_proto\",\n    ],\n)\n\npy_test(\n    name = \"merge_active_shadow_test\",\n    srcs = [\"merge_active_shadow_test.py\"],\n    deps = [\n        \":merge_active_shadow\",\n        \"//tools/api_proto_plugin\",\n        \"@com_google_protobuf//:protobuf_python\",\n    ],\n)\n\npy_binary(\n    name = \"protoxform\",\n    srcs = [\n        \"migrate.py\",\n        \"options.py\",\n        \"protoxform.py\",\n        \"utils.py\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//tools/api_proto_plugin\",\n        \"//tools/type_whisperer:api_type_db_proto_py_proto\",\n        \"@com_envoyproxy_protoc_gen_validate//validate:validate_py\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_py_proto\",\n        \"@com_google_googleapis//google/api:annotations_py_proto\",\n        \"@envoy_api_canonical//envoy/annotations:pkg_py_proto\",\n    ],\n)\n\npy_binary(\n    name = \"protoprint\",\n    srcs = [\n        \"options.py\",\n        \"protoprint.py\",\n        \"utils.py\",\n    ],\n    data = [\n        \"//:.clang-format\",\n        \"//tools/type_whisperer:api_type_db.pb_text\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//tools/type_whisperer\",\n        \"//tools/type_whisperer:api_type_db_proto_py_proto\",\n        \"@com_envoyproxy_protoc_gen_validate//validate:validate_py\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_py_proto\",\n        \"@com_google_googleapis//google/api:annotations_py_proto\",\n        \"@com_google_protobuf//:protobuf_python\",\n        \"@envoy_api_canonical//envoy/annotations:pkg_py_proto\",\n    ],\n)\n"
  },
  {
    "path": "tools/protoxform/merge_active_shadow.py",
    "content": "# Merge active and previous version's generated next major version candidate\n# shadow. This involve simultaneously traversing both FileDescriptorProtos and:\n# 1. Recovering hidden_envoy_depreacted_* fields and enum values in active proto.\n# 2. Recovering deprecated (sub)message types.\n# 3. Misc. fixups for oneof metadata and reserved ranges/names.\n\nfrom collections import defaultdict\nimport copy\nimport pathlib\nimport sys\n\nfrom tools.api_proto_plugin import type_context as api_type_context\n\nfrom google.protobuf import descriptor_pb2\nfrom google.protobuf import text_format\n\n# Note: we have to include those proto definitions for text_format sanity.\nfrom google.api import annotations_pb2 as _\nfrom validate import validate_pb2 as _\nfrom envoy.annotations import deprecation_pb2 as _\nfrom envoy.annotations import resource_pb2 as _\nfrom udpa.annotations import migrate_pb2 as _\nfrom udpa.annotations import security_pb2 as _\nfrom udpa.annotations import sensitive_pb2 as _\nfrom udpa.annotations import status_pb2 as _\nfrom udpa.annotations import versioning_pb2 as _\n\n\n# Set reserved_range in target_proto to reflect previous_reserved_range skipping\n# skip_reserved_numbers.\ndef AdjustReservedRange(target_proto, previous_reserved_range, skip_reserved_numbers):\n  del target_proto.reserved_range[:]\n  for rr in previous_reserved_range:\n    # We can only handle singleton ranges today.\n    assert ((rr.start == rr.end) or (rr.end == rr.start + 1))\n    if rr.start not in skip_reserved_numbers:\n      target_proto.reserved_range.add().MergeFrom(rr)\n\n\n# Merge active/shadow EnumDescriptorProtos to a fresh target EnumDescriptorProto.\ndef MergeActiveShadowEnum(active_proto, shadow_proto, target_proto):\n  target_proto.MergeFrom(active_proto)\n  if not shadow_proto:\n    return\n  shadow_values = {v.name: v for v in shadow_proto.value}\n  skip_reserved_numbers = []\n  # For every reserved name, check to see if it's in the shadow, and if so,\n  # reintroduce in target_proto.\n  del target_proto.reserved_name[:]\n  for n in active_proto.reserved_name:\n    hidden_n = 'hidden_envoy_deprecated_' + n\n    if hidden_n in shadow_values:\n      v = shadow_values[hidden_n]\n      skip_reserved_numbers.append(v.number)\n      target_proto.value.add().MergeFrom(v)\n    else:\n      target_proto.reserved_name.append(n)\n  AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers)\n  # Special fixup for deprecation of default enum values.\n  for tv in target_proto.value:\n    if tv.name == 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE':\n      for sv in shadow_proto.value:\n        if sv.number == tv.number:\n          assert (sv.number == 0)\n          tv.CopyFrom(sv)\n\n\n# Adjust source code info comments path to reflect insertions of oneof fields\n# inside the middle of an existing collection of fields.\ndef AdjustSourceCodeInfo(type_context, field_index, field_adjustment):\n\n  def HasPathPrefix(s, t):\n    return len(s) <= len(t) and all(p[0] == p[1] for p in zip(s, t))\n\n  for loc in type_context.source_code_info.proto.location:\n    if HasPathPrefix(type_context.path + [2], loc.path):\n      path_field_index = len(type_context.path) + 1\n      if path_field_index < len(loc.path) and loc.path[path_field_index] >= field_index:\n        loc.path[path_field_index] += field_adjustment\n\n\n# Merge active/shadow DescriptorProtos to a fresh target DescriptorProto.\ndef MergeActiveShadowMessage(type_context, active_proto, shadow_proto, target_proto):\n  target_proto.MergeFrom(active_proto)\n  if not shadow_proto:\n    return\n  shadow_fields = {f.name: f for f in shadow_proto.field}\n  skip_reserved_numbers = []\n  # For every reserved name, check to see if it's in the shadow, and if so,\n  # reintroduce in target_proto. We track both the normal fields we need to add\n  # back in (extra_simple_fields) and those that belong to oneofs\n  # (extra_oneof_fields). The latter require special treatment, as we can't just\n  # append them to the end of the message, they need to be reordered.\n  extra_simple_fields = []\n  extra_oneof_fields = defaultdict(list)  # oneof index -> list of fields\n  del target_proto.reserved_name[:]\n  for n in active_proto.reserved_name:\n    hidden_n = 'hidden_envoy_deprecated_' + n\n    if hidden_n in shadow_fields:\n      f = shadow_fields[hidden_n]\n      skip_reserved_numbers.append(f.number)\n      missing_field = copy.deepcopy(f)\n      # oneof fields from the shadow need to have their index set to the\n      # corresponding index in active/target_proto.\n      if missing_field.HasField('oneof_index'):\n        oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name\n        missing_oneof_index = None\n        for oneof_index, oneof_decl in enumerate(target_proto.oneof_decl):\n          if oneof_decl.name == oneof_name:\n            missing_oneof_index = oneof_index\n        if missing_oneof_index is None:\n          missing_oneof_index = len(target_proto.oneof_decl)\n          target_proto.oneof_decl.add().MergeFrom(\n              shadow_proto.oneof_decl[missing_field.oneof_index])\n        missing_field.oneof_index = missing_oneof_index\n        extra_oneof_fields[missing_oneof_index].append(missing_field)\n      else:\n        extra_simple_fields.append(missing_field)\n    else:\n      target_proto.reserved_name.append(n)\n  # Copy existing fields, as we need to nuke them.\n  existing_fields = copy.deepcopy(target_proto.field)\n  del target_proto.field[:]\n  # Rebuild fields, taking into account extra_oneof_fields. protoprint.py\n  # expects that oneof fields are consecutive, so need to sort for this.\n  current_oneof_index = None\n\n  def AppendExtraOneofFields(current_oneof_index, last_oneof_field_index):\n    # Add fields from extra_oneof_fields for current_oneof_index.\n    for oneof_f in extra_oneof_fields[current_oneof_index]:\n      target_proto.field.add().MergeFrom(oneof_f)\n    field_adjustment = len(extra_oneof_fields[current_oneof_index])\n    # Fixup the comments in source code info. Note that this is really\n    # inefficient, O(N^2) in the worst case, but since we have relatively few\n    # deprecated fields, is the easiest to implement method.\n    if last_oneof_field_index is not None:\n      AdjustSourceCodeInfo(type_context, last_oneof_field_index, field_adjustment)\n    del extra_oneof_fields[current_oneof_index]\n    return field_adjustment\n\n  field_index = 0\n  for f in existing_fields:\n    if current_oneof_index is not None:\n      field_oneof_index = f.oneof_index if f.HasField('oneof_index') else None\n      # Are we exiting the oneof? If so, add the respective extra_one_fields.\n      if field_oneof_index != current_oneof_index:\n        field_index += AppendExtraOneofFields(current_oneof_index, field_index)\n        current_oneof_index = field_oneof_index\n    elif f.HasField('oneof_index'):\n      current_oneof_index = f.oneof_index\n    target_proto.field.add().MergeFrom(f)\n    field_index += 1\n  if current_oneof_index is not None:\n    # No need to adjust source code info here, since there are no comments for\n    # trailing deprecated fields, so just set field index to None.\n    AppendExtraOneofFields(current_oneof_index, None)\n  # Non-oneof fields are easy to treat, we just append them to the existing\n  # fields. They don't get any comments, but that's fine in the generated\n  # shadows.\n  for f in extra_simple_fields:\n    target_proto.field.add().MergeFrom(f)\n  for oneof_index in sorted(extra_oneof_fields.keys()):\n    for f in extra_oneof_fields[oneof_index]:\n      target_proto.field.add().MergeFrom(f)\n  # Same is true for oneofs that are exclusively from the shadow.\n  AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers)\n  # Visit nested message types\n  del target_proto.nested_type[:]\n  shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type}\n  for index, msg in enumerate(active_proto.nested_type):\n    MergeActiveShadowMessage(\n        type_context.ExtendNestedMessage(index, msg.name, msg.options.deprecated), msg,\n        shadow_msgs.get(msg.name), target_proto.nested_type.add())\n  # Visit nested enum types\n  del target_proto.enum_type[:]\n  shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type}\n  for enum in active_proto.enum_type:\n    MergeActiveShadowEnum(enum, shadow_enums.get(enum.name), target_proto.enum_type.add())\n  # Ensure target has any deprecated sub-message types in case they are needed.\n  active_msg_names = set([msg.name for msg in active_proto.nested_type])\n  for msg in shadow_proto.nested_type:\n    if msg.name not in active_msg_names:\n      target_proto.nested_type.add().MergeFrom(msg)\n\n\n# Merge active/shadow FileDescriptorProtos, returning a the resulting FileDescriptorProto.\ndef MergeActiveShadowFile(active_file_proto, shadow_file_proto):\n  target_file_proto = copy.deepcopy(active_file_proto)\n  source_code_info = api_type_context.SourceCodeInfo(target_file_proto.name,\n                                                     target_file_proto.source_code_info)\n  package_type_context = api_type_context.TypeContext(source_code_info, target_file_proto.package)\n  # Visit message types\n  del target_file_proto.message_type[:]\n  shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type}\n  for index, msg in enumerate(active_file_proto.message_type):\n    MergeActiveShadowMessage(\n        package_type_context.ExtendMessage(index, msg.name, msg.options.deprecated), msg,\n        shadow_msgs.get(msg.name), target_file_proto.message_type.add())\n  # Visit enum types\n  del target_file_proto.enum_type[:]\n  shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type}\n  for enum in active_file_proto.enum_type:\n    MergeActiveShadowEnum(enum, shadow_enums.get(enum.name), target_file_proto.enum_type.add())\n  # Ensure target has any deprecated message types in case they are needed.\n  active_msg_names = set([msg.name for msg in active_file_proto.message_type])\n  for msg in shadow_file_proto.message_type:\n    if msg.name not in active_msg_names:\n      target_file_proto.message_type.add().MergeFrom(msg)\n  return target_file_proto\n\n\nif __name__ == '__main__':\n  active_src, shadow_src, dst = sys.argv[1:]\n  active_proto = descriptor_pb2.FileDescriptorProto()\n  text_format.Merge(pathlib.Path(active_src).read_text(), active_proto)\n  shadow_proto = descriptor_pb2.FileDescriptorProto()\n  text_format.Merge(pathlib.Path(shadow_src).read_text(), shadow_proto)\n  pathlib.Path(dst).write_text(str(MergeActiveShadowFile(active_proto, shadow_proto)))\n"
  },
  {
    "path": "tools/protoxform/merge_active_shadow_test.py",
    "content": "import unittest\n\nimport merge_active_shadow\n\nfrom tools.api_proto_plugin import type_context as api_type_context\n\nfrom google.protobuf import descriptor_pb2\nfrom google.protobuf import text_format\n\n\nclass MergeActiveShadowTest(unittest.TestCase):\n  # Dummy type context for tests that don't care about this.\n  def fakeTypeContext(self):\n    fake_source_code_info = descriptor_pb2.SourceCodeInfo()\n    source_code_info = api_type_context.SourceCodeInfo('fake', fake_source_code_info)\n    return api_type_context.TypeContext(source_code_info, 'fake_package')\n\n  # Poor man's text proto equivalence. Tensorflow has better tools for this,\n  # i.e. assertProto2Equal.\n  def assertTextProtoEq(self, lhs, rhs):\n    self.assertMultiLineEqual(lhs.strip(), rhs.strip())\n\n  def testAdjustReservedRange(self):\n    \"\"\"AdjustReservedRange removes specified skip_reserved_numbers.\"\"\"\n    desc_pb_text = \"\"\"\nreserved_range {\n  start: 41\n  end: 41\n}\nreserved_range {\n  start: 42\n  end: 42\n}\nreserved_range {\n  start: 43\n  end: 44\n}\nreserved_range {\n  start: 50\n  end: 51\n}\n    \"\"\"\n    desc = descriptor_pb2.DescriptorProto()\n    text_format.Merge(desc_pb_text, desc)\n    target = descriptor_pb2.DescriptorProto()\n    merge_active_shadow.AdjustReservedRange(target, desc.reserved_range, [42, 43])\n    target_pb_text = \"\"\"\nreserved_range {\n  start: 41\n  end: 41\n}\nreserved_range {\n  start: 50\n  end: 51\n}\n    \"\"\"\n    self.assertTextProtoEq(target_pb_text, str(target))\n\n  def testMergeActiveShadowEnum(self):\n    \"\"\"MergeActiveShadowEnum recovers shadow values.\"\"\"\n    active_pb_text = \"\"\"\nvalue {\n  number: 1\n  name: \"foo\"\n}\nvalue {\n  number: 0\n  name: \"DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE\"\n}\nvalue {\n  number: 3\n  name: \"bar\"\n}\nreserved_name: \"baz\"\nreserved_range {\n  start: 2\n  end: 3\n}\n    \"\"\"\n    active_proto = descriptor_pb2.EnumDescriptorProto()\n    text_format.Merge(active_pb_text, active_proto)\n    shadow_pb_text = \"\"\"\nvalue {\n  number: 1\n  name: \"foo\"\n}\nvalue {\n  number: 0\n  name: \"wow\"\n}\nvalue {\n  number: 3\n  name: \"bar\"\n}\nvalue {\n  number: 2\n  name: \"hidden_envoy_deprecated_baz\"\n}\nvalue {\n  number: 4\n  name: \"hidden_envoy_deprecated_huh\"\n}\n    \"\"\"\n    shadow_proto = descriptor_pb2.EnumDescriptorProto()\n    text_format.Merge(shadow_pb_text, shadow_proto)\n    target_proto = descriptor_pb2.EnumDescriptorProto()\n    merge_active_shadow.MergeActiveShadowEnum(active_proto, shadow_proto, target_proto)\n    target_pb_text = \"\"\"\nvalue {\n  name: \"foo\"\n  number: 1\n}\nvalue {\n  name: \"wow\"\n  number: 0\n}\nvalue {\n  name: \"bar\"\n  number: 3\n}\nvalue {\n  name: \"hidden_envoy_deprecated_baz\"\n  number: 2\n}\n    \"\"\"\n    self.assertTextProtoEq(target_pb_text, str(target_proto))\n\n  def testMergeActiveShadowMessageComments(self):\n    \"\"\"MergeActiveShadowMessage preserves comment field correspondence.\"\"\"\n    active_pb_text = \"\"\"\nfield {\n  number: 9\n  name: \"oneof_1_0\"\n  oneof_index: 0\n}\nfield {\n  number: 1\n  name: \"simple_field_0\"\n}\nfield {\n  number: 0\n  name: \"oneof_2_0\"\n  oneof_index: 2\n}\nfield {\n  number: 8\n  name: \"oneof_2_1\"\n  oneof_index: 2\n}\nfield {\n  number: 3\n  name: \"oneof_0_0\"\n  oneof_index: 1\n}\nfield {\n  number: 4\n  name: \"newbie\"\n}\nfield {\n  number: 7\n  name: \"oneof_3_0\"\n  oneof_index: 3\n}\nreserved_name: \"missing_oneof_field_0\"\nreserved_name: \"missing_oneof_field_1\"\nreserved_name: \"missing_oneof_field_2\"\noneof_decl {\n  name: \"oneof_0\"\n}\noneof_decl {\n  name: \"oneof_1\"\n}\noneof_decl {\n  name: \"oneof_2\"\n}\noneof_decl {\n  name: \"oneof_3\"\n}\n    \"\"\"\n    active_proto = descriptor_pb2.DescriptorProto()\n    text_format.Merge(active_pb_text, active_proto)\n    active_source_code_info_text = \"\"\"\nlocation {\n  path: [4, 1, 2, 4]\n  leading_comments: \"field_4\"\n}\nlocation {\n  path: [4, 1, 2, 5]\n  leading_comments: \"field_5\"\n}\nlocation {\n  path: [4, 1, 2, 3]\n  leading_comments: \"field_3\"\n}\nlocation {\n  path: [4, 1, 2, 0]\n  leading_comments: \"field_0\"\n}\nlocation {\n  path: [4, 1, 2, 1]\n  leading_comments: \"field_1\"\n}\nlocation {\n  path: [4, 0, 2, 2]\n  leading_comments: \"ignore_0\"\n}\nlocation {\n  path: [4, 1, 2, 6]\n  leading_comments: \"field_6\"\n}\nlocation {\n  path: [4, 1, 2, 2]\n  leading_comments: \"field_2\"\n}\nlocation {\n  path: [3]\n  leading_comments: \"ignore_1\"\n}\n\"\"\"\n    active_source_code_info = descriptor_pb2.SourceCodeInfo()\n    text_format.Merge(active_source_code_info_text, active_source_code_info)\n    shadow_pb_text = \"\"\"\nfield {\n  number: 10\n  name: \"hidden_envoy_deprecated_missing_oneof_field_0\"\n  oneof_index: 0\n}\nfield {\n  number: 11\n  name: \"hidden_envoy_deprecated_missing_oneof_field_1\"\n  oneof_index: 3\n}\nfield {\n  number: 11\n  name: \"hidden_envoy_deprecated_missing_oneof_field_2\"\n  oneof_index: 2\n}\noneof_decl {\n  name: \"oneof_0\"\n}\noneof_decl {\n  name: \"oneof_1\"\n}\noneof_decl {\n  name: \"oneof_2\"\n}\noneof_decl {\n  name: \"some_removed_oneof\"\n}\noneof_decl {\n  name: \"oneof_3\"\n}\n\"\"\"\n    shadow_proto = descriptor_pb2.DescriptorProto()\n    text_format.Merge(shadow_pb_text, shadow_proto)\n    target_proto = descriptor_pb2.DescriptorProto()\n    source_code_info = api_type_context.SourceCodeInfo('fake', active_source_code_info)\n    fake_type_context = api_type_context.TypeContext(source_code_info, 'fake_package')\n    merge_active_shadow.MergeActiveShadowMessage(fake_type_context.ExtendMessage(1, \"foo\", False),\n                                                 active_proto, shadow_proto, target_proto)\n    target_pb_text = \"\"\"\nfield {\n  name: \"oneof_1_0\"\n  number: 9\n  oneof_index: 0\n}\nfield {\n  name: \"hidden_envoy_deprecated_missing_oneof_field_0\"\n  number: 10\n  oneof_index: 0\n}\nfield {\n  name: \"simple_field_0\"\n  number: 1\n}\nfield {\n  name: \"oneof_2_0\"\n  number: 0\n  oneof_index: 2\n}\nfield {\n  name: \"oneof_2_1\"\n  number: 8\n  oneof_index: 2\n}\nfield {\n  name: \"hidden_envoy_deprecated_missing_oneof_field_2\"\n  number: 11\n  oneof_index: 2\n}\nfield {\n  name: \"oneof_0_0\"\n  number: 3\n  oneof_index: 1\n}\nfield {\n  name: \"newbie\"\n  number: 4\n}\nfield {\n  name: \"oneof_3_0\"\n  number: 7\n  oneof_index: 3\n}\nfield {\n  name: \"hidden_envoy_deprecated_missing_oneof_field_1\"\n  number: 11\n  oneof_index: 4\n}\noneof_decl {\n  name: \"oneof_0\"\n}\noneof_decl {\n  name: \"oneof_1\"\n}\noneof_decl {\n  name: \"oneof_2\"\n}\noneof_decl {\n  name: \"oneof_3\"\n}\noneof_decl {\n  name: \"some_removed_oneof\"\n}\n    \"\"\"\n    target_source_code_info_text = \"\"\"\nlocation {\n  path: 4\n  path: 1\n  path: 2\n  path: 6\n  leading_comments: \"field_4\"\n}\nlocation {\n  path: 4\n  path: 1\n  path: 2\n  path: 7\n  leading_comments: \"field_5\"\n}\nlocation {\n  path: 4\n  path: 1\n  path: 2\n  path: 4\n  leading_comments: \"field_3\"\n}\nlocation {\n  path: 4\n  path: 1\n  path: 2\n  path: 0\n  leading_comments: \"field_0\"\n}\nlocation {\n  path: 4\n  path: 1\n  path: 2\n  path: 2\n  leading_comments: \"field_1\"\n}\nlocation {\n  path: 4\n  path: 0\n  path: 2\n  path: 2\n  leading_comments: \"ignore_0\"\n}\nlocation {\n  path: 4\n  path: 1\n  path: 2\n  path: 8\n  leading_comments: \"field_6\"\n}\nlocation {\n  path: 4\n  path: 1\n  path: 2\n  path: 3\n  leading_comments: \"field_2\"\n}\nlocation {\n  path: 3\n  leading_comments: \"ignore_1\"\n}\n\"\"\"\n    self.maxDiff = None\n    self.assertTextProtoEq(target_pb_text, str(target_proto))\n    self.assertTextProtoEq(target_source_code_info_text,\n                           str(fake_type_context.source_code_info.proto))\n\n  def testMergeActiveShadowMessage(self):\n    \"\"\"MergeActiveShadowMessage recovers shadow fields with oneofs.\"\"\"\n    active_pb_text = \"\"\"\nfield {\n  number: 1\n  name: \"foo\"\n}\nfield {\n  number: 0\n  name: \"bar\"\n  oneof_index: 2\n}\nfield {\n  number: 3\n  name: \"baz\"\n}\nfield {\n  number: 4\n  name: \"newbie\"\n}\nreserved_name: \"wow\"\nreserved_range {\n  start: 2\n  end: 3\n}\noneof_decl {\n  name: \"ign\"\n}\noneof_decl {\n  name: \"ign2\"\n}\noneof_decl {\n  name: \"some_oneof\"\n}\n    \"\"\"\n    active_proto = descriptor_pb2.DescriptorProto()\n    text_format.Merge(active_pb_text, active_proto)\n    shadow_pb_text = \"\"\"\nfield {\n  number: 1\n  name: \"foo\"\n}\nfield {\n  number: 0\n  name: \"bar\"\n}\nfield {\n  number: 3\n  name: \"baz\"\n}\nfield {\n  number: 2\n  name: \"hidden_envoy_deprecated_wow\"\n  oneof_index: 0\n}\noneof_decl {\n  name: \"some_oneof\"\n}\n    \"\"\"\n    shadow_proto = descriptor_pb2.DescriptorProto()\n    text_format.Merge(shadow_pb_text, shadow_proto)\n    target_proto = descriptor_pb2.DescriptorProto()\n    merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,\n                                                 target_proto)\n    target_pb_text = \"\"\"\nfield {\n  name: \"foo\"\n  number: 1\n}\nfield {\n  name: \"bar\"\n  number: 0\n  oneof_index: 2\n}\nfield {\n  name: \"hidden_envoy_deprecated_wow\"\n  number: 2\n  oneof_index: 2\n}\nfield {\n  name: \"baz\"\n  number: 3\n}\nfield {\n  name: \"newbie\"\n  number: 4\n}\noneof_decl {\n  name: \"ign\"\n}\noneof_decl {\n  name: \"ign2\"\n}\noneof_decl {\n  name: \"some_oneof\"\n}\n    \"\"\"\n    self.assertTextProtoEq(target_pb_text, str(target_proto))\n\n  def testMergeActiveShadowMessageNoShadowMessage(self):\n    \"\"\"MergeActiveShadowMessage doesn't require a shadow message for new nested active messages.\"\"\"\n    active_proto = descriptor_pb2.DescriptorProto()\n    shadow_proto = descriptor_pb2.DescriptorProto()\n    active_proto.nested_type.add().name = 'foo'\n    target_proto = descriptor_pb2.DescriptorProto()\n    merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,\n                                                 target_proto)\n    self.assertEqual(target_proto.nested_type[0].name, 'foo')\n\n  def testMergeActiveShadowMessageNoShadowEnum(self):\n    \"\"\"MergeActiveShadowMessage doesn't require a shadow enum for new nested active enums.\"\"\"\n    active_proto = descriptor_pb2.DescriptorProto()\n    shadow_proto = descriptor_pb2.DescriptorProto()\n    active_proto.enum_type.add().name = 'foo'\n    target_proto = descriptor_pb2.DescriptorProto()\n    merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,\n                                                 target_proto)\n    self.assertEqual(target_proto.enum_type[0].name, 'foo')\n\n  def testMergeActiveShadowMessageMissing(self):\n    \"\"\"MergeActiveShadowMessage recovers missing messages from shadow.\"\"\"\n    active_proto = descriptor_pb2.DescriptorProto()\n    shadow_proto = descriptor_pb2.DescriptorProto()\n    shadow_proto.nested_type.add().name = 'foo'\n    target_proto = descriptor_pb2.DescriptorProto()\n    merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,\n                                                 target_proto)\n    self.assertEqual(target_proto.nested_type[0].name, 'foo')\n\n  def testMergeActiveShadowFileMissing(self):\n    \"\"\"MergeActiveShadowFile recovers missing messages from shadow.\"\"\"\n    active_proto = descriptor_pb2.FileDescriptorProto()\n    shadow_proto = descriptor_pb2.FileDescriptorProto()\n    shadow_proto.message_type.add().name = 'foo'\n    target_proto = descriptor_pb2.DescriptorProto()\n    target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto)\n    self.assertEqual(target_proto.message_type[0].name, 'foo')\n\n  def testMergeActiveShadowFileNoShadowMessage(self):\n    \"\"\"MergeActiveShadowFile doesn't require a shadow message for new active messages.\"\"\"\n    active_proto = descriptor_pb2.FileDescriptorProto()\n    shadow_proto = descriptor_pb2.FileDescriptorProto()\n    active_proto.message_type.add().name = 'foo'\n    target_proto = descriptor_pb2.DescriptorProto()\n    target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto)\n    self.assertEqual(target_proto.message_type[0].name, 'foo')\n\n  def testMergeActiveShadowFileNoShadowEnum(self):\n    \"\"\"MergeActiveShadowFile doesn't require a shadow enum for new active enums.\"\"\"\n    active_proto = descriptor_pb2.FileDescriptorProto()\n    shadow_proto = descriptor_pb2.FileDescriptorProto()\n    active_proto.enum_type.add().name = 'foo'\n    target_proto = descriptor_pb2.DescriptorProto()\n    target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto)\n    self.assertEqual(target_proto.enum_type[0].name, 'foo')\n\n\n# TODO(htuch): add some test for recursion.\n\nif __name__ == '__main__':\n  unittest.main()\n"
  },
  {
    "path": "tools/protoxform/migrate.py",
    "content": "# API upgrade business logic.\n\nimport copy\nimport re\n\nfrom tools.api_proto_plugin import traverse\nfrom tools.api_proto_plugin import visitor\nfrom tools.protoxform import options\nfrom tools.protoxform import utils\n\nfrom envoy_api_canonical.envoy.annotations import resource_pb2\nfrom udpa.annotations import migrate_pb2\nfrom udpa.annotations import status_pb2\nfrom google.api import annotations_pb2\n\nENVOY_API_TYPE_REGEX_STR = 'envoy_api_(msg|enum_value|field|enum)_([\\w\\.]+)'\nENVOY_COMMENT_WITH_TYPE_REGEX = re.compile('<%s>|:ref:`%s`' %\n                                           (ENVOY_API_TYPE_REGEX_STR, ENVOY_API_TYPE_REGEX_STR))\n\n\nclass UpgradeVisitor(visitor.Visitor):\n  \"\"\"Visitor to generate an upgraded proto from a FileDescriptor proto.\n\n  See visitor.Visitor for visitor method docs comments.\n  \"\"\"\n\n  def __init__(self, n, typedb, envoy_internal_shadow, package_version_status):\n    self._base_version = n\n    self._typedb = typedb\n    self._envoy_internal_shadow = envoy_internal_shadow\n    self._package_version_status = package_version_status\n\n  def _UpgradedComment(self, c):\n\n    def UpgradeType(match):\n      # We're upgrading a type within a RST anchor reference here. These are\n      # stylized and match the output format of tools/protodoc. We need to do\n      # some special handling of field/enum values, and also the normalization\n      # that was performed in v2 for envoy.api.v2 types.\n      label_ref_type, label_normalized_type_name, section_ref_type, section_normalized_type_name = match.groups(\n      )\n      if label_ref_type is not None:\n        ref_type = label_ref_type\n        normalized_type_name = label_normalized_type_name\n      else:\n        ref_type = section_ref_type\n        normalized_type_name = section_normalized_type_name\n      if ref_type == 'field' or ref_type == 'enum_value':\n        normalized_type_name, residual = normalized_type_name.rsplit('.', 1)\n      else:\n        residual = ''\n      type_name = 'envoy.' + normalized_type_name\n      api_v2_type_name = 'envoy.api.v2.' + normalized_type_name\n      if type_name in self._typedb.types:\n        type_desc = self._typedb.types[type_name]\n      else:\n        # We need to deal with envoy.api.* normalization in the v2 API. We won't\n        # need this in v3+, so rather than churn docs, we just have this workaround.\n        type_desc = self._typedb.types[api_v2_type_name]\n      repl_type = type_desc.next_version_type_name[\n          len('envoy.'):] if type_desc.next_version_type_name else normalized_type_name\n      # TODO(htuch): this should really either go through the type database or\n      # via the descriptor pool and annotations, but there are only two of these\n      # we need for the initial v2 -> v3 docs cut, so hard coding for now.\n      # Tracked at https://github.com/envoyproxy/envoy/issues/9734.\n      if repl_type == 'config.route.v3.RouteAction':\n        if residual == 'host_rewrite':\n          residual = 'host_rewrite_literal'\n        elif residual == 'auto_host_rewrite_header':\n          residual = 'auto_host_rewrite'\n      new_ref = 'envoy_api_%s_%s%s' % (ref_type, repl_type, '.' + residual if residual else '')\n      if label_ref_type is not None:\n        return '<%s>' % new_ref\n      else:\n        return ':ref:`%s`' % new_ref\n\n    return re.sub(ENVOY_COMMENT_WITH_TYPE_REGEX, UpgradeType, c)\n\n  def _UpgradedPostMethod(self, m):\n    return re.sub(r'^/v%d/' % self._base_version, '/v%d/' % (self._base_version + 1), m)\n\n  # Upgraded type using canonical type naming, e.g. foo.bar.\n  def _UpgradedTypeCanonical(self, t):\n    if not t.startswith('envoy'):\n      return t\n    type_desc = self._typedb.types[t]\n    if type_desc.next_version_type_name:\n      return type_desc.next_version_type_name\n    return t\n\n  # Upgraded type using internal type naming, e.g. .foo.bar.\n  def _UpgradedType(self, t):\n    if not t.startswith('.envoy'):\n      return t\n    return '.' + self._UpgradedTypeCanonical(t[1:])\n\n  def _Deprecate(self, proto, field_or_value):\n    \"\"\"Deprecate a field or value in a message/enum proto.\n\n    Args:\n      proto: DescriptorProto or EnumDescriptorProto message.\n      field_or_value: field or value inside proto.\n    \"\"\"\n    if self._envoy_internal_shadow:\n      field_or_value.name = 'hidden_envoy_deprecated_' + field_or_value.name\n    else:\n      reserved = proto.reserved_range.add()\n      reserved.start = field_or_value.number\n      reserved.end = field_or_value.number + 1\n      proto.reserved_name.append(field_or_value.name)\n      options.AddHideOption(field_or_value.options)\n\n  def _Rename(self, proto, migrate_annotation):\n    \"\"\"Rename a field/enum/service/message\n\n    Args:\n      proto: DescriptorProto or corresponding proto message\n      migrate_annotation: udpa.annotations.MigrateAnnotation message\n    \"\"\"\n    if migrate_annotation.rename:\n      proto.name = migrate_annotation.rename\n      migrate_annotation.rename = \"\"\n\n  def _OneofPromotion(self, msg_proto, field_proto, migrate_annotation):\n    \"\"\"Promote a field to a oneof.\n\n    Args:\n      msg_proto: DescriptorProto for message containing field.\n      field_proto: FieldDescriptorProto for field.\n      migrate_annotation: udpa.annotations.FieldMigrateAnnotation message\n    \"\"\"\n    if migrate_annotation.oneof_promotion:\n      oneof_index = -1\n      for n, oneof_decl in enumerate(msg_proto.oneof_decl):\n        if oneof_decl.name == migrate_annotation.oneof_promotion:\n          oneof_index = n\n      if oneof_index == -1:\n        oneof_index = len(msg_proto.oneof_decl)\n        oneof_decl = msg_proto.oneof_decl.add()\n        oneof_decl.name = migrate_annotation.oneof_promotion\n      field_proto.oneof_index = oneof_index\n      migrate_annotation.oneof_promotion = \"\"\n\n  def VisitService(self, service_proto, type_context):\n    upgraded_proto = copy.deepcopy(service_proto)\n    for m in upgraded_proto.method:\n      if m.options.HasExtension(annotations_pb2.http):\n        http_options = m.options.Extensions[annotations_pb2.http]\n        # TODO(htuch): figure out a more systematic approach using the type DB\n        # to service upgrade.\n        http_options.post = self._UpgradedPostMethod(http_options.post)\n      m.input_type = self._UpgradedType(m.input_type)\n      m.output_type = self._UpgradedType(m.output_type)\n    if service_proto.options.HasExtension(resource_pb2.resource):\n      upgraded_proto.options.Extensions[resource_pb2.resource].type = self._UpgradedTypeCanonical(\n          service_proto.options.Extensions[resource_pb2.resource].type)\n    return upgraded_proto\n\n  def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):\n    upgraded_proto = copy.deepcopy(msg_proto)\n    if upgraded_proto.options.deprecated and not self._envoy_internal_shadow:\n      options.AddHideOption(upgraded_proto.options)\n    options.SetVersioningAnnotation(upgraded_proto.options, type_context.name)\n    # Mark deprecated fields as ready for deletion by protoxform.\n    for f in upgraded_proto.field:\n      if f.options.deprecated:\n        self._Deprecate(upgraded_proto, f)\n        if self._envoy_internal_shadow:\n          # When shadowing, we use the upgraded version of types (which should\n          # themselves also be shadowed), to allow us to avoid unnecessary\n          # references to the previous version (and complexities around\n          # upgrading during API boosting).\n          f.type_name = self._UpgradedType(f.type_name)\n        else:\n          # Make sure the type name is erased so it isn't picked up by protoxform\n          # when computing deps.\n          f.type_name = \"\"\n      else:\n        f.type_name = self._UpgradedType(f.type_name)\n      if f.options.HasExtension(migrate_pb2.field_migrate):\n        field_migrate = f.options.Extensions[migrate_pb2.field_migrate]\n        self._Rename(f, field_migrate)\n        self._OneofPromotion(upgraded_proto, f, field_migrate)\n    # Upgrade nested messages.\n    del upgraded_proto.nested_type[:]\n    upgraded_proto.nested_type.extend(nested_msgs)\n    # Upgrade enums.\n    del upgraded_proto.enum_type[:]\n    upgraded_proto.enum_type.extend(nested_enums)\n    return upgraded_proto\n\n  def VisitEnum(self, enum_proto, type_context):\n    upgraded_proto = copy.deepcopy(enum_proto)\n    if upgraded_proto.options.deprecated and not self._envoy_internal_shadow:\n      options.AddHideOption(upgraded_proto.options)\n    for v in upgraded_proto.value:\n      if v.options.deprecated:\n        # We need special handling for the zero field, as proto3 needs some value\n        # here.\n        if v.number == 0 and not self._envoy_internal_shadow:\n          v.name = 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE'\n        else:\n          # Mark deprecated enum values as ready for deletion by protoxform.\n          self._Deprecate(upgraded_proto, v)\n      elif v.options.HasExtension(migrate_pb2.enum_value_migrate):\n        self._Rename(v, v.options.Extensions[migrate_pb2.enum_value_migrate])\n    return upgraded_proto\n\n  def VisitFile(self, file_proto, type_context, services, msgs, enums):\n    upgraded_proto = copy.deepcopy(file_proto)\n    # Upgrade imports.\n    upgraded_proto.dependency[:] = [\n        dependency for dependency in upgraded_proto.dependency\n        if dependency not in (\"udpa/annotations/migrate.proto\")\n    ]\n    # Upgrade package.\n    upgraded_proto.package = self._typedb.next_version_protos[upgraded_proto.name].qualified_package\n    upgraded_proto.name = self._typedb.next_version_protos[upgraded_proto.name].proto_path\n    upgraded_proto.options.ClearExtension(migrate_pb2.file_migrate)\n    upgraded_proto.options.Extensions[\n        status_pb2.file_status].package_version_status = self._package_version_status\n    # Upgrade comments.\n    for location in upgraded_proto.source_code_info.location:\n      location.leading_comments = self._UpgradedComment(location.leading_comments)\n      location.trailing_comments = self._UpgradedComment(location.trailing_comments)\n      for n, c in enumerate(location.leading_detached_comments):\n        location.leading_detached_comments[n] = self._UpgradedComment(c)\n    # Upgrade services.\n    del upgraded_proto.service[:]\n    upgraded_proto.service.extend(services)\n    # Upgrade messages.\n    del upgraded_proto.message_type[:]\n    upgraded_proto.message_type.extend(msgs)\n    # Upgrade enums.\n    del upgraded_proto.enum_type[:]\n    upgraded_proto.enum_type.extend(enums)\n\n    return upgraded_proto\n\n\ndef VersionUpgradeXform(n, envoy_internal_shadow, file_proto, params):\n  \"\"\"Transform a FileDescriptorProto from vN[alpha\\d] to v(N+1).\n\n  Args:\n    n: version N to upgrade from.\n    envoy_internal_shadow: generate a shadow for Envoy internal use containing deprecated fields.\n    file_proto: vN[alpha\\d] FileDescriptorProto message.\n    params: plugin parameters.\n\n  Returns:\n    v(N+1) FileDescriptorProto message.\n  \"\"\"\n  # Load type database.\n  if params['type_db_path']:\n    utils.LoadTypeDb(params['type_db_path'])\n  typedb = utils.GetTypeDb()\n  # If this isn't a proto in an upgraded package, return None.\n  if file_proto.name not in typedb.next_version_protos or not typedb.next_version_protos[\n      file_proto.name]:\n    return None\n  # Otherwise, this .proto needs upgrading, do it.\n  freeze = 'extra_args' in params and params['extra_args'] == 'freeze'\n  existing_pkg_version_status = file_proto.options.Extensions[\n      status_pb2.file_status].package_version_status\n  # Normally, we are generating the NEXT_MAJOR_VERSION_CANDIDATE. However, if\n  # freezing and previously this was the active major version, the migrated\n  # version is now the ACTIVE version.\n  if freeze and existing_pkg_version_status == status_pb2.ACTIVE:\n    package_version_status = status_pb2.ACTIVE\n  else:\n    package_version_status = status_pb2.NEXT_MAJOR_VERSION_CANDIDATE\n  return traverse.TraverseFile(\n      file_proto, UpgradeVisitor(n, typedb, envoy_internal_shadow, package_version_status))\n"
  },
  {
    "path": "tools/protoxform/options.py",
    "content": "# Manage internal options on messages/enums/fields/enum values.\n\nfrom udpa.annotations import versioning_pb2\n\n\ndef AddHideOption(options):\n  \"\"\"Mark message/enum/field/enum value as hidden.\n\n  Hidden messages are ignored when generating output.\n\n  Args:\n    options: MessageOptions/EnumOptions/FieldOptions/EnumValueOptions message.\n  \"\"\"\n  hide_option = options.uninterpreted_option.add()\n  hide_option.name.add().name_part = 'protoxform_hide'\n\n\ndef HasHideOption(options):\n  \"\"\"Is message/enum/field/enum value hidden?\n\n  Hidden messages are ignored when generating output.\n\n  Args:\n    options: MessageOptions/EnumOptions/FieldOptions/EnumValueOptions message.\n  Returns:\n    Hidden status.\n  \"\"\"\n  return any(\n      option.name[0].name_part == 'protoxform_hide' for option in options.uninterpreted_option)\n\n\ndef SetVersioningAnnotation(options, previous_message_type):\n  \"\"\"Set the udpa.annotations.versioning option.\n\n  Used by Envoy to chain back through the message type history.\n\n  Args:\n    options: MessageOptions message.\n    previous_message_type: string with earlier API type name for the message.\n  \"\"\"\n  options.Extensions[versioning_pb2.versioning].previous_message_type = previous_message_type\n\n\ndef GetVersioningAnnotation(options):\n  \"\"\"Get the udpa.annotations.versioning option.\n\n  Used by Envoy to chain back through the message type history.\n\n  Args:\n    options: MessageOptions message.\n  Returns:\n    versioning.Annotation if set otherwise None.\n  \"\"\"\n  if not options.HasExtension(versioning_pb2.versioning):\n    return None\n  return options.Extensions[versioning_pb2.versioning]\n"
  },
  {
    "path": "tools/protoxform/protoprint.py",
    "content": "# FileDescriptorProtos pretty-printer tool.\n#\n# protoprint.py provides the canonical .proto formatting for the Envoy APIs.\n#\n# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto\n# for the underlying protos mentioned in this file.\n#\n# Usage: protoprint.py <source file path> <type database path>\n\nfrom collections import deque\nimport copy\nimport functools\nimport io\nimport os\nimport pathlib\nimport re\nimport subprocess\nimport sys\n\nfrom tools.api_proto_plugin import annotations\nfrom tools.api_proto_plugin import plugin\nfrom tools.api_proto_plugin import traverse\nfrom tools.api_proto_plugin import visitor\nfrom tools.protoxform import options as protoxform_options\nfrom tools.protoxform import utils\nfrom tools.type_whisperer import type_whisperer\nfrom tools.type_whisperer.types_pb2 import Types\n\nfrom google.protobuf import descriptor_pb2\nfrom google.protobuf import text_format\n\n# Note: we have to include those proto definitions to make FormatOptions work,\n# this also serves as allowlist of extended options.\nfrom google.api import annotations_pb2 as _\nfrom validate import validate_pb2 as _\nfrom envoy.annotations import deprecation_pb2 as _\nfrom envoy.annotations import resource_pb2\nfrom udpa.annotations import migrate_pb2\nfrom udpa.annotations import security_pb2 as _\nfrom udpa.annotations import sensitive_pb2 as _\nfrom udpa.annotations import status_pb2\n\nNEXT_FREE_FIELD_MIN = 5\n\n\nclass ProtoPrintError(Exception):\n  \"\"\"Base error class for the protoprint module.\"\"\"\n\n\ndef ExtractClangProtoStyle(clang_format_text):\n  \"\"\"Extract a key:value dictionary for proto formatting.\n\n  Args:\n    clang_format_text: text from a .clang-format file.\n\n  Returns:\n    key:value dictionary suitable for passing to clang-format --style.\n  \"\"\"\n  lang = None\n  format_dict = {}\n  for line in clang_format_text.split('\\n'):\n    if lang is None or lang != 'Proto':\n      match = re.match('Language:\\s+(\\w+)', line)\n      if match:\n        lang = match.group(1)\n      continue\n    match = re.match('(\\w+):\\s+(\\w+)', line)\n    if match:\n      key, value = match.groups()\n      format_dict[key] = value\n    else:\n      break\n  return str(format_dict)\n\n\n# Ensure we are using the canonical clang-format proto style.\nCLANG_FORMAT_STYLE = ExtractClangProtoStyle(pathlib.Path('.clang-format').read_text())\n\n\ndef ClangFormat(contents):\n  \"\"\"Run proto-style oriented clang-format over given string.\n\n  Args:\n    contents: a string with proto contents.\n\n  Returns:\n    clang-formatted string\n  \"\"\"\n  return subprocess.run(\n      ['clang-format',\n       '--style=%s' % CLANG_FORMAT_STYLE, '--assume-filename=.proto'],\n      input=contents.encode('utf-8'),\n      stdout=subprocess.PIPE).stdout\n\n\ndef FormatBlock(block):\n  \"\"\"Append \\n to a .proto section (e.g.\n\n  comment, message definition, etc.) if non-empty.\n\n  Args:\n    block: a string representing the section.\n\n  Returns:\n    A string with appropriate whitespace.\n  \"\"\"\n  if block.strip():\n    return block + '\\n'\n  return ''\n\n\ndef FormatComments(comments):\n  \"\"\"Format a list of comment blocks from SourceCodeInfo.\n\n  Prefixes // to each line, separates blocks by spaces.\n\n  Args:\n    comments: a list of blocks, each block is a list of strings representing\n      lines in each block.\n\n  Returns:\n    A string reprenting the formatted comment blocks.\n  \"\"\"\n\n  # TODO(htuch): not sure why this is needed, but clang-format does some weird\n  # stuff with // comment indents when we have these trailing \\\n  def FixupTrailingBackslash(s):\n    return s[:-1].rstrip() if s.endswith('\\\\') else s\n\n  comments = '\\n\\n'.join(\n      '\\n'.join(['//%s' % FixupTrailingBackslash(line)\n                 for line in comment.split('\\n')[:-1]])\n      for comment in comments)\n  return FormatBlock(comments)\n\n\ndef CreateNextFreeFieldXform(msg_proto):\n  \"\"\"Return the next free field number annotation transformer of a message.\n\n  Args:\n    msg_proto: DescriptorProto for message.\n\n  Returns:\n    the next free field number annotation transformer.\n  \"\"\"\n  next_free = max(\n      sum([\n          [f.number + 1 for f in msg_proto.field],\n          [rr.end for rr in msg_proto.reserved_range],\n          [ex.end for ex in msg_proto.extension_range],\n      ], [1]))\n  return lambda _: next_free if next_free > NEXT_FREE_FIELD_MIN else None\n\n\ndef FormatTypeContextComments(type_context, annotation_xforms=None):\n  \"\"\"Format the leading/trailing comments in a given TypeContext.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    annotation_xforms: a dict of transformers for annotations in leading\n      comment.\n\n  Returns:\n    Tuple of formatted leading and trailing comment blocks.\n  \"\"\"\n  leading_comment = type_context.leading_comment\n  if annotation_xforms:\n    leading_comment = leading_comment.getCommentWithTransforms(annotation_xforms)\n  leading = FormatComments(list(type_context.leading_detached_comments) + [leading_comment.raw])\n  trailing = FormatBlock(FormatComments([type_context.trailing_comment]))\n  return leading, trailing\n\n\ndef FormatHeaderFromFile(source_code_info, file_proto, empty_file):\n  \"\"\"Format proto header.\n\n  Args:\n    source_code_info: SourceCodeInfo object.\n    file_proto: FileDescriptorProto for file.\n    empty_file: are there no message/enum/service defs in file?\n\n  Returns:\n    Formatted proto header as a string.\n  \"\"\"\n  # Load the type database.\n  typedb = utils.GetTypeDb()\n  # Figure out type dependencies in this .proto.\n  types = Types()\n  text_format.Merge(traverse.TraverseFile(file_proto, type_whisperer.TypeWhispererVisitor()), types)\n  type_dependencies = sum([list(t.type_dependencies) for t in types.types.values()], [])\n  for service in file_proto.service:\n    for m in service.method:\n      type_dependencies.extend([m.input_type[1:], m.output_type[1:]])\n  # Determine the envoy/ import paths from type deps.\n  envoy_proto_paths = set(\n      typedb.types[t].proto_path\n      for t in type_dependencies\n      if t.startswith('envoy.') and typedb.types[t].proto_path != file_proto.name)\n\n  def CamelCase(s):\n    return ''.join(t.capitalize() for t in re.split('[\\._]', s))\n\n  package_line = 'package %s;\\n' % file_proto.package\n  file_block = '\\n'.join(['syntax = \"proto3\";\\n', package_line])\n\n  options = descriptor_pb2.FileOptions()\n\n  options.java_outer_classname = CamelCase(os.path.basename(file_proto.name))\n  for msg in file_proto.message_type:\n    if msg.name == options.java_outer_classname:\n      # This is a workaround for Java outer class names that would otherwise\n      # conflict with types defined within the same proto file, see\n      # https://github.com/envoyproxy/envoy/pull/13378.\n      # TODO: in next major version, make this consistent.\n      options.java_outer_classname += \"OuterClass\"\n\n  options.java_multiple_files = True\n  options.java_package = 'io.envoyproxy.' + file_proto.package\n\n  # This is a workaround for C#/Ruby namespace conflicts between packages and\n  # objects, see https://github.com/envoyproxy/envoy/pull/3854.\n  # TODO(htuch): remove once v3 fixes this naming issue in\n  # https://github.com/envoyproxy/envoy/issues/8120.\n  if file_proto.package in ['envoy.api.v2.listener', 'envoy.api.v2.cluster']:\n    qualified_package = '.'.join(s.capitalize() for s in file_proto.package.split('.')) + 'NS'\n    options.csharp_namespace = qualified_package\n    options.ruby_package = qualified_package\n\n  if file_proto.service:\n    options.java_generic_services = True\n\n  if file_proto.options.HasExtension(migrate_pb2.file_migrate):\n    options.Extensions[migrate_pb2.file_migrate].CopyFrom(\n        file_proto.options.Extensions[migrate_pb2.file_migrate])\n\n  if file_proto.options.HasExtension(\n      status_pb2.file_status) and file_proto.package.endswith('alpha'):\n    options.Extensions[status_pb2.file_status].CopyFrom(\n        file_proto.options.Extensions[status_pb2.file_status])\n\n  if not empty_file:\n    options.Extensions[\n        status_pb2.file_status].package_version_status = file_proto.options.Extensions[\n            status_pb2.file_status].package_version_status\n\n  options_block = FormatOptions(options)\n\n  requires_versioning_import = any(\n      protoxform_options.GetVersioningAnnotation(m.options) for m in file_proto.message_type)\n\n  envoy_imports = list(envoy_proto_paths)\n  google_imports = []\n  infra_imports = []\n  misc_imports = []\n  public_imports = []\n\n  for idx, d in enumerate(file_proto.dependency):\n    if idx in file_proto.public_dependency:\n      public_imports.append(d)\n      continue\n    elif d.startswith('envoy/annotations') or d.startswith('udpa/annotations'):\n      infra_imports.append(d)\n    elif d.startswith('envoy/'):\n      # We ignore existing envoy/ imports, since these are computed explicitly\n      # from type_dependencies.\n      pass\n    elif d.startswith('google/'):\n      google_imports.append(d)\n    elif d.startswith('validate/'):\n      infra_imports.append(d)\n    elif d in ['udpa/annotations/versioning.proto', 'udpa/annotations/status.proto']:\n      # Skip, we decide to add this based on requires_versioning_import and options.\n      pass\n    else:\n      misc_imports.append(d)\n\n  if options.HasExtension(status_pb2.file_status):\n    infra_imports.append('udpa/annotations/status.proto')\n\n  if requires_versioning_import:\n    infra_imports.append('udpa/annotations/versioning.proto')\n\n  def FormatImportBlock(xs):\n    if not xs:\n      return ''\n    return FormatBlock('\\n'.join(sorted('import \"%s\";' % x for x in set(xs) if x)))\n\n  def FormatPublicImportBlock(xs):\n    if not xs:\n      return ''\n    return FormatBlock('\\n'.join(sorted('import public \"%s\";' % x for x in xs)))\n\n  import_block = '\\n'.join(\n      map(FormatImportBlock, [envoy_imports, google_imports, misc_imports, infra_imports]))\n  import_block += '\\n' + FormatPublicImportBlock(public_imports)\n  comment_block = FormatComments(source_code_info.file_level_comments)\n\n  return ''.join(map(FormatBlock, [file_block, import_block, options_block, comment_block]))\n\n\ndef NormalizeFieldTypeName(type_context, field_fqn):\n  \"\"\"Normalize a fully qualified field type name, e.g.\n\n  .envoy.foo.bar is normalized to foo.bar.\n\n  Considers type context to minimize type prefix.\n\n  Args:\n    field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.\n    type_context: contextual information for message/enum/field.\n\n  Returns:\n    Normalized type name as a string.\n  \"\"\"\n  if field_fqn.startswith('.'):\n    # Let's say we have type context namespace a.b.c.d.e and the type we're\n    # trying to normalize is a.b.d.e. We take (from the end) on package fragment\n    # at a time, and apply the inner-most evaluation that protoc performs to see\n    # if we evaluate to the fully qualified type. If so, we're done. It's not\n    # sufficient to compute common prefix and drop that, since in the above\n    # example the normalized type name would be d.e, which proto resolves inner\n    # most as a.b.c.d.e (bad) instead of the intended a.b.d.e.\n    field_fqn_splits = field_fqn[1:].split('.')\n    type_context_splits = type_context.name.split('.')[:-1]\n    remaining_field_fqn_splits = deque(field_fqn_splits[:-1])\n    normalized_splits = deque([field_fqn_splits[-1]])\n\n    if list(remaining_field_fqn_splits)[:1] != type_context_splits[:1] and (\n        len(remaining_field_fqn_splits) == 0 or\n        remaining_field_fqn_splits[0] in type_context_splits[1:]):\n      # Notice that in some cases it is error-prone to normalize a type name.\n      # E.g., it would be an error to replace \".external.Type\" with \"external.Type\"\n      # in the context of \"envoy.extensions.type.external.vX.Config\".\n      # In such a context protoc resolves \"external.Type\" into\n      # \"envoy.extensions.type.external.Type\", which is exactly what the use of a\n      # fully-qualified name \".external.Type\" was meant to prevent.\n      #\n      # A type SHOULD remain fully-qualified under the following conditions:\n      # 1. its root package is different from the root package of the context type\n      # 2. EITHER the type doesn't belong to any package at all\n      #    OR     its root package has a name that collides with one of the packages\n      #           of the context type\n      #\n      # E.g.,\n      # a) although \".some.Type\" has a different root package than the context type\n      #    \"TopLevelType\", it is still safe to normalize it into \"some.Type\"\n      # b) although \".google.protobuf.Any\" has a different root package than the context type\n      #    \"envoy.api.v2.Cluster\", it still safe to normalize it into \"google.protobuf.Any\"\n      # c) it is error-prone to normalize \".TopLevelType\" in the context of \"some.Type\"\n      #    into \"TopLevelType\"\n      # d) it is error-prone to normalize \".external.Type\" in the context of\n      #    \"envoy.extensions.type.external.vX.Config\" into \"external.Type\"\n      return field_fqn\n\n    def EquivalentInTypeContext(splits):\n      type_context_splits_tmp = deque(type_context_splits)\n      while type_context_splits_tmp:\n        # If we're in a.b.c and the FQN is a.d.Foo, we want to return true once\n        # we have type_context_splits_tmp as [a] and splits as [d, Foo].\n        if list(type_context_splits_tmp) + list(splits) == field_fqn_splits:\n          return True\n        # If we're in a.b.c.d.e.f and the FQN is a.b.d.e.Foo, we want to return True\n        # once we have type_context_splits_tmp as [a] and splits as [b, d, e, Foo], but\n        # not when type_context_splits_tmp is [a, b, c] and FQN is [d, e, Foo].\n        if len(splits) > 1 and '.'.join(type_context_splits_tmp).endswith('.'.join(\n            list(splits)[:-1])):\n          return False\n        type_context_splits_tmp.pop()\n      return False\n\n    while remaining_field_fqn_splits and not EquivalentInTypeContext(normalized_splits):\n      normalized_splits.appendleft(remaining_field_fqn_splits.pop())\n\n    # `extensions` is a keyword in proto2, and protoc will throw error if a type name\n    # starts with `extensions.`.\n    if normalized_splits[0] == 'extensions':\n      normalized_splits.appendleft(remaining_field_fqn_splits.pop())\n\n    return '.'.join(normalized_splits)\n  return field_fqn\n\n\ndef TypeNameFromFQN(fqn):\n  return fqn[1:]\n\n\ndef FormatFieldType(type_context, field):\n  \"\"\"Format a FieldDescriptorProto type description.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    field: FieldDescriptor proto.\n\n  Returns:\n    Formatted proto field type as string.\n  \"\"\"\n  label = 'repeated ' if field.label == field.LABEL_REPEATED else ''\n  type_name = label + NormalizeFieldTypeName(type_context, field.type_name)\n\n  if field.type == field.TYPE_MESSAGE:\n    if type_context.map_typenames and TypeNameFromFQN(\n        field.type_name) in type_context.map_typenames:\n      return 'map<%s, %s>' % tuple(\n          map(functools.partial(FormatFieldType, type_context),\n              type_context.map_typenames[TypeNameFromFQN(field.type_name)]))\n    return type_name\n  elif field.type_name:\n    return type_name\n\n  pretty_type_names = {\n      field.TYPE_DOUBLE: 'double',\n      field.TYPE_FLOAT: 'float',\n      field.TYPE_INT32: 'int32',\n      field.TYPE_SFIXED32: 'int32',\n      field.TYPE_SINT32: 'int32',\n      field.TYPE_FIXED32: 'uint32',\n      field.TYPE_UINT32: 'uint32',\n      field.TYPE_INT64: 'int64',\n      field.TYPE_SFIXED64: 'int64',\n      field.TYPE_SINT64: 'int64',\n      field.TYPE_FIXED64: 'uint64',\n      field.TYPE_UINT64: 'uint64',\n      field.TYPE_BOOL: 'bool',\n      field.TYPE_STRING: 'string',\n      field.TYPE_BYTES: 'bytes',\n  }\n  if field.type in pretty_type_names:\n    return label + pretty_type_names[field.type]\n  raise ProtoPrintError('Unknown field type ' + str(field.type))\n\n\ndef FormatServiceMethod(type_context, method):\n  \"\"\"Format a service MethodDescriptorProto.\n\n  Args:\n    type_context: contextual information for method.\n    method: MethodDescriptorProto proto.\n\n  Returns:\n    Formatted service method as string.\n  \"\"\"\n\n  def FormatStreaming(s):\n    return 'stream ' if s else ''\n\n  leading_comment, trailing_comment = FormatTypeContextComments(type_context)\n  return '%srpc %s(%s%s%s) returns (%s%s) {%s}\\n' % (\n      leading_comment, method.name, trailing_comment, FormatStreaming(\n          method.client_streaming), NormalizeFieldTypeName(\n              type_context, method.input_type), FormatStreaming(method.server_streaming),\n      NormalizeFieldTypeName(type_context, method.output_type), FormatOptions(method.options))\n\n\ndef FormatField(type_context, field):\n  \"\"\"Format FieldDescriptorProto as a proto field.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    field: FieldDescriptor proto.\n\n  Returns:\n    Formatted proto field as a string.\n  \"\"\"\n  if protoxform_options.HasHideOption(field.options):\n    return ''\n  leading_comment, trailing_comment = FormatTypeContextComments(type_context)\n\n  return '%s%s %s = %d%s;\\n%s' % (leading_comment, FormatFieldType(type_context, field), field.name,\n                                  field.number, FormatOptions(field.options), trailing_comment)\n\n\ndef FormatEnumValue(type_context, value):\n  \"\"\"Format a EnumValueDescriptorProto as a proto enum value.\n\n  Args:\n    type_context: contextual information for message/enum/field.\n    value: EnumValueDescriptorProto.\n\n  Returns:\n    Formatted proto enum value as a string.\n  \"\"\"\n  if protoxform_options.HasHideOption(value.options):\n    return ''\n  leading_comment, trailing_comment = FormatTypeContextComments(type_context)\n  formatted_annotations = FormatOptions(value.options)\n  return '%s%s = %d%s;\\n%s' % (leading_comment, value.name, value.number, formatted_annotations,\n                               trailing_comment)\n\n\ndef TextFormatValue(field, value):\n  \"\"\"Format the value as protobuf text format\n\n  Args:\n    field: a FieldDescriptor that describes the field\n    value: the value stored in the field\n\n  Returns:\n    value in protobuf text format\n  \"\"\"\n  out = io.StringIO()\n  text_format.PrintFieldValue(field, value, out)\n  return out.getvalue()\n\n\ndef FormatOptions(options):\n  \"\"\"Format *Options (e.g.\n\n  MessageOptions, FieldOptions) message.\n\n  Args:\n    options: A *Options (e.g. MessageOptions, FieldOptions) message.\n\n  Returns:\n    Formatted options as a string.\n  \"\"\"\n\n  formatted_options = []\n  for option_descriptor, option_value in sorted(options.ListFields(), key=lambda x: x[0].number):\n    option_name = '({})'.format(\n        option_descriptor.full_name) if option_descriptor.is_extension else option_descriptor.name\n    if option_descriptor.message_type and option_descriptor.label != option_descriptor.LABEL_REPEATED:\n      formatted_options.extend([\n          '{}.{} = {}'.format(option_name, subfield.name, TextFormatValue(subfield, value))\n          for subfield, value in option_value.ListFields()\n      ])\n    else:\n      formatted_options.append('{} = {}'.format(option_name,\n                                                TextFormatValue(option_descriptor, option_value)))\n\n  if formatted_options:\n    if options.DESCRIPTOR.name in ('EnumValueOptions', 'FieldOptions'):\n      return '[{}]'.format(','.join(formatted_options))\n    else:\n      return FormatBlock(''.join(\n          'option {};\\n'.format(formatted_option) for formatted_option in formatted_options))\n  return ''\n\n\ndef FormatReserved(enum_or_msg_proto):\n  \"\"\"Format reserved values/names in a [Enum]DescriptorProto.\n\n  Args:\n    enum_or_msg_proto: [Enum]DescriptorProto message.\n\n  Returns:\n    Formatted enum_or_msg_proto as a string.\n  \"\"\"\n  rrs = copy.deepcopy(enum_or_msg_proto.reserved_range)\n  # Fixups for singletons that don't seem to always have [inclusive, exclusive)\n  # format when parsed by protoc.\n  for rr in rrs:\n    if rr.start == rr.end:\n      rr.end += 1\n  reserved_fields = FormatBlock(\n      'reserved %s;\\n' %\n      ','.join(map(str, sum([list(range(rr.start, rr.end)) for rr in rrs], [])))) if rrs else ''\n  if enum_or_msg_proto.reserved_name:\n    reserved_fields += FormatBlock('reserved %s;\\n' %\n                                   ', '.join('\"%s\"' % n for n in enum_or_msg_proto.reserved_name))\n  return reserved_fields\n\n\nclass ProtoFormatVisitor(visitor.Visitor):\n  \"\"\"Visitor to generate a proto representation from a FileDescriptor proto.\n\n  See visitor.Visitor for visitor method docs comments.\n  \"\"\"\n\n  def VisitService(self, service_proto, type_context):\n    leading_comment, trailing_comment = FormatTypeContextComments(type_context)\n    methods = '\\n'.join(\n        FormatServiceMethod(type_context.ExtendMethod(index, m.name), m)\n        for index, m in enumerate(service_proto.method))\n    options = FormatBlock(FormatOptions(service_proto.options))\n    return '%sservice %s {\\n%s%s%s\\n}\\n' % (leading_comment, service_proto.name, options,\n                                            trailing_comment, methods)\n\n  def VisitEnum(self, enum_proto, type_context):\n    if protoxform_options.HasHideOption(enum_proto.options):\n      return ''\n    leading_comment, trailing_comment = FormatTypeContextComments(type_context)\n    formatted_options = FormatOptions(enum_proto.options)\n    reserved_fields = FormatReserved(enum_proto)\n    values = [\n        FormatEnumValue(type_context.ExtendField(index, value.name), value)\n        for index, value in enumerate(enum_proto.value)\n    ]\n    joined_values = ('\\n' if any('//' in v for v in values) else '').join(values)\n    return '%senum %s {\\n%s%s%s%s\\n}\\n' % (leading_comment, enum_proto.name, trailing_comment,\n                                           formatted_options, reserved_fields, joined_values)\n\n  def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):\n    # Skip messages synthesized to represent map types.\n    if msg_proto.options.map_entry:\n      return ''\n    if protoxform_options.HasHideOption(msg_proto.options):\n      return ''\n    annotation_xforms = {\n        annotations.NEXT_FREE_FIELD_ANNOTATION: CreateNextFreeFieldXform(msg_proto)\n    }\n    leading_comment, trailing_comment = FormatTypeContextComments(type_context, annotation_xforms)\n    formatted_options = FormatOptions(msg_proto.options)\n    formatted_enums = FormatBlock('\\n'.join(nested_enums))\n    formatted_msgs = FormatBlock('\\n'.join(nested_msgs))\n    reserved_fields = FormatReserved(msg_proto)\n    # Recover the oneof structure. This needs some extra work, since\n    # DescriptorProto just gives use fields and a oneof_index that can allow\n    # recovery of the original oneof placement.\n    fields = ''\n    oneof_index = None\n    for index, field in enumerate(msg_proto.field):\n      if oneof_index is not None:\n        if not field.HasField('oneof_index') or field.oneof_index != oneof_index:\n          fields += '}\\n\\n'\n          oneof_index = None\n      if oneof_index is None and field.HasField('oneof_index'):\n        oneof_index = field.oneof_index\n        assert (oneof_index < len(msg_proto.oneof_decl))\n        oneof_proto = msg_proto.oneof_decl[oneof_index]\n        oneof_leading_comment, oneof_trailing_comment = FormatTypeContextComments(\n            type_context.ExtendOneof(oneof_index, field.name))\n        fields += '%soneof %s {\\n%s%s' % (oneof_leading_comment, oneof_proto.name,\n                                          oneof_trailing_comment, FormatOptions(\n                                              oneof_proto.options))\n      fields += FormatBlock(FormatField(type_context.ExtendField(index, field.name), field))\n    if oneof_index is not None:\n      fields += '}\\n\\n'\n    return '%smessage %s {\\n%s%s%s%s%s%s\\n}\\n' % (leading_comment, msg_proto.name, trailing_comment,\n                                                  formatted_options, formatted_enums,\n                                                  formatted_msgs, reserved_fields, fields)\n\n  def VisitFile(self, file_proto, type_context, services, msgs, enums):\n    empty_file = len(services) == 0 and len(enums) == 0 and len(msgs) == 0\n    header = FormatHeaderFromFile(type_context.source_code_info, file_proto, empty_file)\n    formatted_services = FormatBlock('\\n'.join(services))\n    formatted_enums = FormatBlock('\\n'.join(enums))\n    formatted_msgs = FormatBlock('\\n'.join(msgs))\n    return ClangFormat(header + formatted_services + formatted_enums + formatted_msgs)\n\n\nif __name__ == '__main__':\n  proto_desc_path = sys.argv[1]\n  file_proto = descriptor_pb2.FileDescriptorProto()\n  input_text = pathlib.Path(proto_desc_path).read_text()\n  if not input_text:\n    sys.exit(0)\n  text_format.Merge(input_text, file_proto)\n  dst_path = pathlib.Path(sys.argv[2])\n  utils.LoadTypeDb(sys.argv[3])\n  dst_path.write_bytes(traverse.TraverseFile(file_proto, ProtoFormatVisitor()))\n"
  },
  {
    "path": "tools/protoxform/protoxform.bzl",
    "content": "load(\"//tools/api_proto_plugin:plugin.bzl\", \"api_proto_plugin_aspect\", \"api_proto_plugin_impl\")\n\ndef _protoxform_impl(target, ctx):\n    return api_proto_plugin_impl(\n        target,\n        ctx,\n        \"proto\",\n        \"protoxform\",\n        [\n            \".active_or_frozen.proto\",\n            \".next_major_version_candidate.proto\",\n            \".next_major_version_candidate.envoy_internal.proto\",\n        ],\n    )\n\n# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html)\n# that can be invoked from the CLI to perform API transforms via //tools/protoxform for\n# proto_library targets. Example use:\n#\n#   bazel build //api --aspects tools/protoxform/protoxform.bzl%protoxform_aspect \\\n#       --output_groups=proto\nprotoxform_aspect = api_proto_plugin_aspect(\"//tools/protoxform\", _protoxform_impl, use_type_db = True)\n"
  },
  {
    "path": "tools/protoxform/protoxform.py",
    "content": "# protoc plugin to map from FileDescriptorProtos to intermediate form\n#\n# protoxform takes a source FileDescriptorProto and generates active/next major\n# version candidate FileDescriptorProtos. The resulting FileDescriptorProtos are\n# then later processed by proto_sync.py, which invokes protoprint.py to format.\n\nimport copy\nimport functools\n\nfrom tools.api_proto_plugin import plugin\nfrom tools.api_proto_plugin import visitor\nfrom tools.protoxform import migrate\nfrom tools.protoxform import utils\n\n# Note: we have to include those proto definitions to ensure we don't lose these\n# during FileDescriptorProto printing.\nfrom google.api import annotations_pb2 as _\nfrom validate import validate_pb2 as _\nfrom envoy_api_canonical.envoy.annotations import deprecation_pb2 as _\nfrom envoy_api_canonical.envoy.annotations import resource_pb2\nfrom udpa.annotations import migrate_pb2\nfrom udpa.annotations import security_pb2 as _\nfrom udpa.annotations import sensitive_pb2 as _\nfrom udpa.annotations import status_pb2\n\n\nclass ProtoXformError(Exception):\n  \"\"\"Base error class for the protoxform module.\"\"\"\n\n\nclass ProtoFormatVisitor(visitor.Visitor):\n  \"\"\"Visitor to generate a proto representation from a FileDescriptor proto.\n\n  See visitor.Visitor for visitor method docs comments.\n  \"\"\"\n\n  def __init__(self, active_or_frozen, params):\n    if params['type_db_path']:\n      utils.LoadTypeDb(params['type_db_path'])\n    self._freeze = 'extra_args' in params and params['extra_args'] == 'freeze'\n    self._active_or_frozen = active_or_frozen\n\n  def VisitService(self, service_proto, type_context):\n    return None\n\n  def VisitEnum(self, enum_proto, type_context):\n    return None\n\n  def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):\n    return None\n\n  def VisitFile(self, file_proto, type_context, services, msgs, enums):\n    # Freeze protos that have next major version candidates.\n    typedb = utils.GetTypeDb()\n    output_proto = copy.deepcopy(file_proto)\n    existing_pkg_version_status = output_proto.options.Extensions[\n        status_pb2.file_status].package_version_status\n    empty_file = len(services) == 0 and len(enums) == 0 and len(msgs) == 0\n    pkg_version_status_exempt = file_proto.name.startswith('envoy/annotations') or empty_file\n    # It's a format error not to set package_version_status.\n    if existing_pkg_version_status == status_pb2.UNKNOWN and not pkg_version_status_exempt:\n      raise ProtoXformError('package_version_status must be set in %s' % file_proto.name)\n    # Only update package_version_status for .active_or_frozen.proto,\n    # migrate.VersionUpgradeXform has taken care of next major version\n    # candidates.\n    if self._active_or_frozen and not pkg_version_status_exempt:\n      # Freeze if this is an active package with a next major version. Preserve\n      # frozen status otherwise.\n      if self._freeze and typedb.next_version_protos.get(output_proto.name, None):\n        target_pkg_version_status = status_pb2.FROZEN\n      elif existing_pkg_version_status == status_pb2.FROZEN:\n        target_pkg_version_status = status_pb2.FROZEN\n      else:\n        assert (existing_pkg_version_status == status_pb2.ACTIVE)\n        target_pkg_version_status = status_pb2.ACTIVE\n      output_proto.options.Extensions[\n          status_pb2.file_status].package_version_status = target_pkg_version_status\n    return str(output_proto)\n\n\ndef Main():\n  plugin.Plugin([\n      plugin.DirectOutputDescriptor('.active_or_frozen.proto',\n                                    functools.partial(ProtoFormatVisitor, True),\n                                    want_params=True),\n      plugin.OutputDescriptor('.next_major_version_candidate.proto',\n                              functools.partial(ProtoFormatVisitor, False),\n                              functools.partial(migrate.VersionUpgradeXform, 2, False),\n                              want_params=True),\n      plugin.OutputDescriptor('.next_major_version_candidate.envoy_internal.proto',\n                              functools.partial(ProtoFormatVisitor, False),\n                              functools.partial(migrate.VersionUpgradeXform, 2, True),\n                              want_params=True)\n  ])\n\n\nif __name__ == '__main__':\n  Main()\n"
  },
  {
    "path": "tools/protoxform/protoxform_test.sh",
    "content": "#!/bin/bash\n\nset -e\n\nrm -rf bazel-bin/tools\n\nread -ra BAZEL_BUILD_OPTIONS <<< \"${BAZEL_BUILD_OPTIONS:-}\"\nBAZEL_BUILD_OPTIONS+=(\"--remote_download_outputs=all\")\nTOOLS=\"$(dirname \"$(dirname \"$(realpath \"$0\")\")\")\"\n# to satisfy dependency on run_command\nexport PYTHONPATH=\"$TOOLS\"\n\n\n# protoxform fix test cases\nPROTO_TARGETS=()\nprotos=$(bazel query \"labels(srcs, labels(deps, //tools/testdata/protoxform:fix_protos))\")\nwhile read -r line; do PROTO_TARGETS+=(\"$line\"); done \\\n    <<< \"$protos\"\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:fix_protos \\\n  //tools/testdata/protoxform:fix_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" //tools/protoxform:protoprint\n./tools/protoxform/protoxform_test_helper.py fix \"${PROTO_TARGETS[@]}\"\n\n# protoxform freeze test cases\nPROTO_TARGETS=()\nprotos=$(bazel query \"labels(srcs, labels(deps, //tools/testdata/protoxform:freeze_protos))\")\nwhile read -r line; do PROTO_TARGETS+=(\"$line\"); done \\\n    <<< \"$protos\"\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:freeze_protos \\\n  --//tools/api_proto_plugin:extra_args=freeze \\\n  //tools/testdata/protoxform:freeze_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto\nbazel build \"${BAZEL_BUILD_OPTIONS[@]}\" //tools/protoxform:protoprint\n./tools/protoxform/protoxform_test_helper.py freeze \"${PROTO_TARGETS[@]}\"\n"
  },
  {
    "path": "tools/protoxform/protoxform_test_helper.py",
    "content": "#!/usr/bin/env python3\n\nfrom run_command import runCommand\n\nimport logging\nimport os\nimport pathlib\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\n\ndef PathAndFilename(label):\n  \"\"\"Retrieve actual path and filename from bazel label\n\n  Args:\n    label: bazel label to specify target proto.\n\n  Returns:\n    actual path and filename\n  \"\"\"\n  if label.startswith('/'):\n    label = label.replace('//', '/', 1)\n  elif label.startswith('@'):\n    label = re.sub(r'@.*/', '/', label)\n  else:\n    return label\n  label = label.replace(\":\", \"/\")\n  splitted_label = label.split('/')\n  return ['/'.join(splitted_label[:len(splitted_label) - 1]), splitted_label[-1]]\n\n\ndef GoldenProtoFile(path, filename, version):\n  \"\"\"Retrieve golden proto file path. In general, those are placed in tools/testdata/protoxform.\n\n  Args:\n    path: target proto path\n    filename: target proto filename\n    version: api version to specify target golden proto filename\n\n  Returns:\n    actual golden proto absolute path\n  \"\"\"\n  base = \"./\"\n  base += path + \"/\" + filename + \".\" + version + \".gold\"\n  return os.path.abspath(base)\n\n\ndef ProtoPrint(src, dst):\n  \"\"\"Pretty-print FileDescriptorProto to a destination file.\n\n  Args:\n    src: source path for FileDescriptorProto.\n    dst: destination path for formatted proto.\n  \"\"\"\n  print('ProtoPrint %s -> %s' % (src, dst))\n  subprocess.check_call([\n      'bazel-bin/tools/protoxform/protoprint', src, dst,\n      './bazel-bin/tools/protoxform/protoprint.runfiles/envoy/tools/type_whisperer/api_type_db.pb_text'\n  ])\n\n\ndef ResultProtoFile(cmd, path, tmp, filename, version):\n  \"\"\"Retrieve result proto file path. In general, those are placed in bazel artifacts.\n\n  Args:\n    cmd: fix or freeze?\n    path: target proto path\n    tmp: temporary directory.\n    filename: target proto filename\n    version: api version to specify target result proto filename\n\n  Returns:\n    actual result proto absolute path\n  \"\"\"\n  base = \"./bazel-bin\"\n  base += os.path.join(path, \"%s_protos\" % cmd)\n  base += os.path.join(base, path)\n  base += \"/{0}.{1}.proto\".format(filename, version)\n  dst = os.path.join(tmp, filename)\n  ProtoPrint(os.path.abspath(base), dst)\n  return dst\n\n\ndef Diff(result_file, golden_file):\n  \"\"\"Execute diff command with unified form\n\n  Args:\n    result_file: result proto file\n    golden_file: golden proto file\n\n  Returns:\n    output and status code\n  \"\"\"\n  command = 'diff -u '\n  command += result_file + ' '\n  command += golden_file\n  status, stdout, stderr = runCommand(command)\n  return [status, stdout, stderr]\n\n\ndef Run(cmd, path, filename, version):\n  \"\"\"Run main execution for protoxform test\n\n  Args:\n    cmd: fix or freeze?\n    path: target proto path\n    filename: target proto filename\n    version: api version to specify target result proto filename\n\n  Returns:\n    result message extracted from diff command\n  \"\"\"\n  message = \"\"\n  with tempfile.TemporaryDirectory() as tmp:\n    golden_path = GoldenProtoFile(path, filename, version)\n    test_path = ResultProtoFile(cmd, path, tmp, filename, version)\n    if os.stat(golden_path).st_size == 0 and not os.path.exists(test_path):\n      return message\n\n    status, stdout, stderr = Diff(golden_path, test_path)\n\n    if status != 0:\n      message = '\\n'.join([str(line) for line in stdout + stderr])\n\n    return message\n\n\nif __name__ == \"__main__\":\n  messages = \"\"\n  logging.basicConfig(format='%(message)s')\n  cmd = sys.argv[1]\n  for target in sys.argv[2:]:\n    path, filename = PathAndFilename(target)\n    messages += Run(cmd, path, filename, 'active_or_frozen')\n    messages += Run(cmd, path, filename, 'next_major_version_candidate')\n    messages += Run(cmd, path, filename, 'next_major_version_candidate.envoy_internal')\n\n  if len(messages) == 0:\n    logging.warning(\"PASS\")\n    sys.exit(0)\n  else:\n    logging.error(\"FAILED:\\n{}\".format(messages))\n    sys.exit(1)\n"
  },
  {
    "path": "tools/protoxform/utils.py",
    "content": "import os\n\nfrom tools.type_whisperer.api_type_db_pb2 import TypeDb\n\nfrom google.protobuf import text_format\n\n_typedb = None\n\n\ndef GetTypeDb():\n  assert _typedb != None\n  return _typedb\n\n\ndef LoadTypeDb(type_db_path):\n  global _typedb\n  _typedb = TypeDb()\n  with open(type_db_path, 'r') as f:\n    text_format.Merge(f.read(), _typedb)\n"
  },
  {
    "path": "tools/run_command.py",
    "content": "import subprocess\n\n\n# Echoes and runs an OS command, returning exit status and the captured\n# stdout and stderr as a string array.\ndef runCommand(command):\n  proc = subprocess.run([command], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n  return proc.returncode, proc.stdout.decode('utf-8').split('\\n'), proc.stderr.decode(\n      'utf-8').split('\\n')\n"
  },
  {
    "path": "tools/shell_utils.sh",
    "content": "#!/bin/bash\n\n\nsource_venv() {\n  VENV_DIR=$1\n  if [[ \"${VIRTUAL_ENV}\" == \"\" ]]; then\n    if [[ ! -d \"${VENV_DIR}\"/venv ]]; then\n      virtualenv \"${VENV_DIR}\"/venv --python=python3\n    fi\n    # shellcheck disable=SC1090\n    source \"${VENV_DIR}/venv/bin/activate\"\n  else\n    echo \"Found existing virtualenv\"\n  fi\n}\n\npython_venv() {\n  SCRIPT_DIR=$(realpath \"$(dirname \"$0\")\")\n\n  BUILD_DIR=build_tools\n  PY_NAME=\"$1\"\n  VENV_DIR=\"${BUILD_DIR}/${PY_NAME}\"\n\n  source_venv \"${VENV_DIR}\"\n  pip install -r \"${SCRIPT_DIR}\"/requirements.txt\n\n  shift\n  python3 \"${SCRIPT_DIR}/${PY_NAME}.py\" \"$*\"\n}\n"
  },
  {
    "path": "tools/socket_passing.py",
    "content": "#!/usr/bin/env python3\n\n# This tool is a helper script that queries the admin address for all listener\n# addresses after envoy startup. (The admin address is written out to a file by\n# setting the -a flag in the envoy binary.) The script then outputs a new json\n# config file with updated listener addresses. This script is currently called\n# in the hot restart integration test to update listener addresses bound to\n# port 0 in the initial json config file.\n\nfrom collections import OrderedDict\n\nimport argparse\nimport http.client\nimport json\nimport os.path\nimport re\nimport sys\nimport time\n\n# Seconds to wait for the admin address output file to appear. The script exits\n# with failure if the file is not found.\nADMIN_FILE_TIMEOUT_SECS = 20\n\n\n# Because the hot restart files are yaml but yaml support is not included in\n# python by default, we parse this fairly manually.\ndef GenerateNewConfig(original_yaml, admin_address, updated_json):\n  # Get original listener addresses\n  with open(original_yaml, 'r') as original_file:\n    sys.stdout.write('Admin address is ' + admin_address + '\\n')\n    try:\n      admin_conn = http.client.HTTPConnection(admin_address)\n      admin_conn.request('GET', '/listeners?format=json')\n      admin_response = admin_conn.getresponse()\n      if not admin_response.status == 200:\n        return False\n      discovered_listeners = json.loads(admin_response.read().decode('utf-8'))\n    except Exception as e:\n      sys.stderr.write('Cannot connect to admin: %s\\n' % e)\n      return False\n    else:\n      raw_yaml = original_file.readlines()\n      index = 0\n      for discovered in discovered_listeners['listener_statuses']:\n        replaced = False\n        if 'pipe' in discovered['local_address']:\n          path = discovered['local_address']['pipe']['path']\n          for index in range(index + 1, len(raw_yaml) - 1):\n            if 'pipe:' in raw_yaml[index] and 'path:' in raw_yaml[index + 1]:\n              raw_yaml[index + 1] = re.sub('path:.*', 'path: \"' + path + '\"', raw_yaml[index + 1])\n              replaced = True\n              break\n        else:\n          addr = discovered['local_address']['socket_address']['address']\n          port = str(discovered['local_address']['socket_address']['port_value'])\n          if addr[0] == '[':\n            addr = addr[1:-1]  # strip [] from ipv6 address.\n          for index in range(index + 1, len(raw_yaml) - 2):\n            if ('socket_address:' in raw_yaml[index] and 'address:' in raw_yaml[index + 1] and\n                'port_value:' in raw_yaml[index + 2]):\n              raw_yaml[index + 1] = re.sub('address:.*', 'address: \"' + addr + '\"',\n                                           raw_yaml[index + 1])\n              raw_yaml[index + 2] = re.sub('port_value:.*', 'port_value: ' + port,\n                                           raw_yaml[index + 2])\n              replaced = True\n              break\n        if replaced:\n          sys.stderr.write('replaced listener at line ' + str(index) + ' with ' + str(discovered) +\n                           '\\n')\n        else:\n          sys.stderr.write('Failed to replace a discovered listener ' + str(discovered) + '\\n')\n          return False\n      with open(updated_json, 'w') as outfile:\n        outfile.writelines(raw_yaml)\n    finally:\n      admin_conn.close()\n\n  return True\n\n\nif __name__ == '__main__':\n  parser = argparse.ArgumentParser(description='Replace listener addressses in json file.')\n  parser.add_argument('-o',\n                      '--original_json',\n                      type=str,\n                      required=True,\n                      help='Path of the original config json file')\n  parser.add_argument('-a',\n                      '--admin_address_path',\n                      type=str,\n                      required=True,\n                      help='Path of the admin address file')\n  parser.add_argument('-u',\n                      '--updated_json',\n                      type=str,\n                      required=True,\n                      help='Path to output updated json config file')\n  args = parser.parse_args()\n  admin_address_path = args.admin_address_path\n\n  # Read admin address from file\n  counter = 0\n  while not os.path.exists(admin_address_path):\n    time.sleep(1)\n    counter += 1\n    if counter > ADMIN_FILE_TIMEOUT_SECS:\n      break\n\n  if not os.path.exists(admin_address_path):\n    sys.exit(1)\n\n  with open(admin_address_path, 'r') as admin_address_file:\n    admin_address = admin_address_file.read()\n\n  success = GenerateNewConfig(args.original_json, admin_address, args.updated_json)\n\n  if not success:\n    sys.exit(1)\n"
  },
  {
    "path": "tools/spelling/check_spelling.sh",
    "content": "#!/bin/bash\n\n# Applies requisite code formatters to the source tree\n# check_spelling.sh\n\n# Why choose misspell?\n# https://github.com/client9/misspell#what-are-other-misspelling-correctors-and-whats-wrong-with-them\n\nset -u\nset -e\n\nVERSION=\"0.3.4\"\nLINUX_MISSPELL_SHA=\"34d489dbc5ddb4dfd6d3cfac9fde8660e6c37e6c\"\nMAC_MISSPELL_SHA=\"f2607e2297b9e8af562e384c38045033375c7433\"\nTMP_DIR=\"/tmp\"\nOS=\"\"\n\nMISSPELL_ARGS=\"-error -o stderr\"\n\nif [[ \"$#\" -lt 1 ]]; then\n  echo \"Usage: $0 check|fix\"\n  exit 1\nfi\n\nif [[ \"$1\" == \"fix\" ]]; then\n  MISSPELL_ARGS=\"-w\"\nfi\n\nif [[ \"$(uname)\" == \"Darwin\" ]]; then\n  OS=\"mac\"\nelif [[ \"$(uname)\" == \"Linux\" ]]; then\n  OS=\"linux\"\nelse\n  echo \"Current only support mac/Linux\"\n  exit 1\nfi\n\nSCRIPTPATH=$( cd \"$(dirname \"$0\")\" ; pwd -P )\nROOTDIR=\"${SCRIPTPATH}/../..\"\ncd \"$ROOTDIR\"\n\nBIN_FILENAME=\"misspell_${VERSION}_${OS}_64bit.tar.gz\"\n# Install tools we need\nif [[ ! -e \"${TMP_DIR}/misspell\" ]]; then\n  if ! wget https://github.com/client9/misspell/releases/download/v\"${VERSION}\"/\"${BIN_FILENAME}\" \\\n  -O \"${TMP_DIR}/${BIN_FILENAME}\" --no-verbose --tries=3 -o \"${TMP_DIR}/wget.log\"; then\n    cat \"${TMP_DIR}/wget.log\"\n    exit 1\n  fi\n  tar -xvf \"${TMP_DIR}/${BIN_FILENAME}\" -C \"${TMP_DIR}\" &> /dev/null\nfi\n\nACTUAL_SHA=\"\"\nEXPECT_SHA=\"\"\n\nif [[ \"${OS}\" == \"linux\" ]]; then\n  ACTUAL_SHA=$(sha1sum \"${TMP_DIR}\"/misspell|cut -d' ' -f1)\n  EXPECT_SHA=\"${LINUX_MISSPELL_SHA}\"\nelse\n  ACTUAL_SHA=$(shasum -a 1 \"${TMP_DIR}\"/misspell|cut -d' ' -f1)\n  EXPECT_SHA=\"${MAC_MISSPELL_SHA}\"\nfi\n\nif [[ ! ${ACTUAL_SHA} == \"${EXPECT_SHA}\" ]]; then\n   echo \"Expect shasum is ${ACTUAL_SHA}, but actual is shasum ${EXPECT_SHA}\"\n   exit 1\nfi\n\nchmod +x \"${TMP_DIR}/misspell\"\n\n# Spell checking\n# All the skipping files are defined in tools/spelling/spelling_skip_files.txt\nread -ra SKIP_FILES < \"${ROOTDIR}/tools/spelling/spelling_skip_files.txt\"\nread -ra SKIP_FILES <<< \"${SKIP_FILES[@]/#/-e }\"\n\n# All the ignore words are defined in tools/spelling/spelling_allowlist_words.txt\nSPELLING_ALLOWLIST_WORDS_FILE=\"${ROOTDIR}/tools/spelling/spelling_allowlist_words.txt\"\nALLOWLIST_WORDS=$(grep -vE '^#|^$' \"${SPELLING_ALLOWLIST_WORDS_FILE}\" | xargs | tr ' ' ',')\n\ngit ls-files | grep -v \"${SKIP_FILES[@]}\" | xargs \"${TMP_DIR}/misspell\" -i \\\n  \"${ALLOWLIST_WORDS}\" ${MISSPELL_ARGS}\n"
  },
  {
    "path": "tools/spelling/check_spelling_pedantic.py",
    "content": "#! /usr/bin/env python3\n\nfrom __future__ import print_function\n\nimport argparse\nimport locale\nimport math\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom functools import partial\nfrom itertools import chain\n\n# Handle function rename between python 2/3.\ntry:\n  input = raw_input\nexcept NameError:\n  pass\n\ntry:\n  cmp\nexcept NameError:\n\n  def cmp(x, y):\n    return (x > y) - (x < y)\n\n\nCURR_DIR = os.path.dirname(os.path.realpath(__file__))\n\n# Special comment commands control behavior. These may appear anywhere\n# within a comment, but only one per line. The command applies to the\n# entire line on which it appears. The \"off\" command disables spell\n# checking until the next \"on\" command, or end-of-file. The\n# \"skip-file\" command disables spell checking in the entire file (even\n# previous comments). In a multi-line (/* */) comment, \"skip-block\"\n# disables spell checking for the remainder of the comment. For\n# sequences of full-line comments (only white space before a //\n# comment), \"skip-block\" disables spell checking the sequence of\n# comments is interrupted by a blank line or a line with code.\nSPELLCHECK_OFF = \"SPELLCHECKER(off)\"  # disable SPELLCHECK_ON (or EOF)\nSPELLCHECK_ON = \"SPELLCHECKER(on)\"  # (re-)enable\nSPELLCHECK_SKIP_FILE = \"SPELLCHECKER(skip-file)\"  # disable checking this entire file\nSPELLCHECK_SKIP_BLOCK = \"SPELLCHECKER(skip-block)\"  # disable to end of comment\n\n# Single line comments: // comment OR /* comment */\n# Limit the characters that may precede // to help filter out some code\n# mistakenly processed as a comment.\nINLINE_COMMENT = re.compile(r'(?:^|[^:\"])//( .*?$|$)|/\\*+(.*?)\\*+/')\n\n# Multi-line comments: /* comment */ (multiple lines)\nMULTI_COMMENT_START = re.compile(r'/\\*(.*?)$')\nMULTI_COMMENT_END = re.compile(r'^(.*?)\\*/')\n\n# Envoy TODO comment style.\nTODO = re.compile(r'(TODO|NOTE)\\s*\\(@?[A-Za-z0-9-]+\\):?')\n\n# Ignore parameter names in doxygen comments.\nMETHOD_DOC = re.compile('@(param\\s+\\w+|return(\\s+const)?\\s+\\w+)')\n\n# Camel Case splitter\nCAMEL_CASE = re.compile(r'[A-Z]?[a-z]+|[A-Z]+(?=[A-Z]|$)')\n\n# Base64: we assume base64 encoded data in tests is never mixed with\n# other comments on a single line.\nBASE64 = re.compile(r'^[\\s*]+([A-Za-z0-9/+=]{16,})\\s*$')\nNUMBER = re.compile(r'\\d')\n\n# Hex: match 1) longish strings of hex digits (to avoid matching \"add\" and\n# other simple words that happen to look like hex), 2) 2 or more two digit\n# hex numbers separated by colons, 3) \"0x\" prefixed hex numbers of any length,\n# or 4) UUIDs.\nHEX = re.compile(r'(?:^|\\s|[(])([A-Fa-f0-9]{8,})(?:$|\\s|[.,)])')\nHEX_SIG = re.compile(r'(?:\\W|^)([A-Fa-f0-9]{2}(:[A-Fa-f0-9]{2})+)(?:\\W|$)')\nPREFIXED_HEX = re.compile(r'0x[A-Fa-f0-9]+')\nUUID = re.compile(r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}')\nBIT_FIELDS = re.compile(r'[01]+[XxYy]+')\nAB_FIELDS = re.compile(r'\\W([AB]+)\\W')\n\n# Matches e.g. FC00::/8 or 2001::abcd/64. Does not match ::1/128, but\n# aspell ignores that anyway.\nIPV6_ADDR = re.compile(r'(?:\\W|^)([A-Fa-f0-9]+:[A-Fa-f0-9:]+/[0-9]{1,3})(?:\\W|$)')\n\n# Quoted words: \"word\", 'word', or *word*.\nQUOTED_WORD = re.compile(r'(([\"\\'])[A-Za-z0-9.:-]+(\\2))|(\\*[A-Za-z0-9.:-]+\\*)')\n\n# Backtick-quoted words that look like code. Note the overlap with RST_LINK.\nQUOTED_EXPR = re.compile(r'`[A-Za-z0-9:()<>_.,/{}\\[\\]&*-]+`')\n\n# Tuple expressions like (abc, def).\nTUPLE_EXPR = re.compile(r'\\([A-Za-z0-9]+(?:, *[A-Za-z0-9]+){1,}\\)')\n\n# Command flags (e.g. \"-rf\") and percent specifiers.\nFLAG = re.compile(r'\\W([-%][A-Za-z]+)')\n\n# Bare github users (e.g. @user).\nUSER = re.compile(r'\\W(@[A-Za-z0-9-]+)')\n\n# RST Links (e.g. `text <https://example.com>`_, :ref:`text <internal_ref>`)\nRST_LINK = re.compile(r'`([^`<])+<([^ ]+)>`')\n\n# RST inline literals.\nRST_LITERAL = re.compile(r'``.*``')\n\n# RST code block marker.\nRST_CODE_BLOCK = '.. code-block::'\n\n# Path names.\nABSPATH = re.compile(r'(?:\\s|^)((/[A-Za-z0-9_.*-]+)+)(?:\\s|$)')\nFILEREF = re.compile(r'(?:\\s|^)([A-Za-z0-9_./-]+\\.(cc|h|py|sh))(?:\\s|$)')\n\n# Ordinals (1st, 2nd, 3rd, 4th, ...)\nORDINALS = re.compile(r'([0-9]*1st|[0-9]*2nd|[0-9]*3rd|[0-9]+th)')\n\n# Start of string indent.\nINDENT = re.compile(r'^( *)')\n\nSMART_QUOTES = {\n    \"\\u2018\": \"'\",\n    \"\\u2019\": \"'\",\n    \"\\u201c\": '\"',\n    \"\\u201d\": '\"',\n}\n\n# Valid dictionary words. Anything else crashes aspell.\nDICTIONARY_WORD = re.compile(r\"^[A-Za-z']+$\")\n\nDEBUG = 0\nCOLOR = True\nMARK = False\n\n\ndef red(s):\n  if COLOR:\n    return \"\\33[1;31m\" + s + \"\\033[0m\"\n  return s\n\n\ndef debug(s):\n  if DEBUG > 0:\n    print(s)\n\n\ndef debug1(s):\n  if DEBUG > 1:\n    print(s)\n\n\nclass SpellChecker:\n  \"\"\"Aspell-based spell checker.\"\"\"\n\n  def __init__(self, dictionary_file):\n    self.dictionary_file = dictionary_file\n    self.aspell = None\n    self.prefixes = []\n    self.suffixes = []\n    self.prefix_re = None\n    self.suffix_re = None\n\n  def start(self):\n    words, prefixes, suffixes = self.load_dictionary()\n\n    self.prefixes = prefixes\n    self.suffixes = suffixes\n\n    self.prefix_re = re.compile(\"(?:\\s|^)((%s)-)\" % (\"|\".join(prefixes)), re.IGNORECASE)\n    self.suffix_re = re.compile(\"(-(%s))(?:\\s|$)\" % (\"|\".join(suffixes)), re.IGNORECASE)\n\n    # Generate aspell personal dictionary.\n    pws = os.path.join(CURR_DIR, '.aspell.en.pws')\n    with open(pws, 'w') as f:\n      f.write(\"personal_ws-1.1 en %d\\n\" % (len(words)))\n      f.writelines(words)\n\n    # Start an aspell process.\n    aspell_args = [\"aspell\", \"pipe\", \"--lang=en_US\", \"--encoding=utf-8\", \"--personal=\" + pws]\n    self.aspell = subprocess.Popen(aspell_args,\n                                   bufsize=4096,\n                                   stdin=subprocess.PIPE,\n                                   stdout=subprocess.PIPE,\n                                   stderr=subprocess.STDOUT,\n                                   universal_newlines=True)\n\n    # Read the version line that aspell emits on startup.\n    self.aspell.stdout.readline()\n\n  def stop(self):\n    if not self.aspell:\n      return\n\n    self.aspell.stdin.close()\n    self.aspell.wait()\n    self.aspell = None\n\n  def check(self, line):\n    if line.strip() == '':\n      return []\n\n    self.aspell.poll()\n    if self.aspell.returncode is not None:\n      print(\"aspell quit unexpectedly: return code %d\" % (self.aspell.returncode))\n      sys.exit(2)\n\n    debug1(\"ASPELL< %s\" % (line))\n\n    self.aspell.stdin.write(line + os.linesep)\n    self.aspell.stdin.flush()\n\n    errors = []\n    while True:\n      result = self.aspell.stdout.readline().strip()\n      debug1(\"ASPELL> %s\" % (result))\n\n      # Check for end of results.\n      if result == \"\":\n        break\n\n      t = result[0]\n      if t == \"*\" or t == \"-\" or t == \"+\":\n        # *: found in dictionary.\n        # -: found run-together words in dictionary.\n        # +: found root word in dictionary.\n        continue\n\n      # & <original> <N> <offset>: m1, m2, ... mN, g1, g2, ...\n      # ? <original> 0 <offset>: g1, g2, ....\n      # # <original> <offset>\n      original, rem = result[2:].split(\" \", 1)\n\n      if t == \"#\":\n        # Not in dictionary, but no suggestions.\n        errors.append((original, int(rem), []))\n      elif t == '&' or t == '?':\n        # Near misses and/or guesses.\n        _, rem = rem.split(\" \", 1)  # Drop N (may be 0).\n        o, rem = rem.split(\": \", 1)  # o is offset from start of line.\n        suggestions = rem.split(\", \")\n\n        errors.append((original, int(o), suggestions))\n      else:\n        print(\"aspell produced unexpected output: %s\" % (result))\n        sys.exit(2)\n\n    return errors\n\n  def load_dictionary(self):\n    # Read the custom dictionary.\n    all_words = []\n    with open(self.dictionary_file, 'r') as f:\n      all_words = f.readlines()\n\n    # Strip comments, invalid words, and blank lines.\n    words = [w for w in all_words if len(w.strip()) > 0 and re.match(DICTIONARY_WORD, w)]\n\n    suffixes = [w.strip()[1:] for w in all_words if w.startswith('-')]\n    prefixes = [w.strip()[:-1] for w in all_words if w.strip().endswith('-')]\n\n    # Allow acronyms and abbreviations to be spelled in lowercase.\n    # (e.g. Convert \"HTTP\" into \"HTTP\" and \"http\" which also matches\n    # \"Http\").\n    for word in words:\n      if word.isupper():\n        words += word.lower()\n\n    return (words, prefixes, suffixes)\n\n  def add_words(self, additions):\n    lines = []\n    with open(self.dictionary_file, 'r') as f:\n      lines = f.readlines()\n\n    additions = [w + os.linesep for w in additions]\n    additions.sort()\n\n    # Insert additions into the lines ignoring comments, suffixes, and blank lines.\n    idx = 0\n    add_idx = 0\n    while idx < len(lines) and add_idx < len(additions):\n      line = lines[idx]\n      if len(line.strip()) != 0 and line[0] != \"#\" and line[0] != '-':\n        c = cmp(additions[add_idx], line)\n        if c < 0:\n          lines.insert(idx, additions[add_idx])\n          add_idx += 1\n        elif c == 0:\n          add_idx += 1\n      idx += 1\n\n    # Append any remaining additions.\n    lines += additions[add_idx:]\n\n    with open(self.dictionary_file, 'w') as f:\n      f.writelines(lines)\n\n    self.stop()\n    self.start()\n\n\n# Split camel case words and run them through the dictionary. Returns\n# a replacement list of errors. The replacement list may contain just\n# the original error (if the word is not camel case), may be empty if\n# the split words are all spelled correctly, or may be a new set of\n# errors referencing the misspelled sub-words.\ndef check_camel_case(checker, err):\n  (word, word_offset, _) = err\n\n  debug(\"check camel case %s\" % (word))\n  parts = re.findall(CAMEL_CASE, word)\n\n  # Word is not camel case: the previous result stands.\n  if len(parts) <= 1:\n    debug(\"  -> not camel case\")\n    return [err]\n\n  split_errs = []\n  part_offset = 0\n  for part in parts:\n    debug(\"  -> part: %s\" % (part))\n    split_err = checker.check(part)\n    if split_err:\n      debug(\"    -> not found in dictionary\")\n      split_errs += [(part, word_offset + part_offset, split_err[0][2])]\n    part_offset += len(part)\n\n  return split_errs\n\n\n# Check for affixes and run them through the dictionary again. Returns\n# a replacement list of errors which may just be the original errors\n# or empty if an affix was successfully handled.\ndef check_affix(checker, err):\n  (word, word_offset, _) = err\n\n  debug(\"check affix %s\" % (word))\n\n  for prefix in checker.prefixes:\n    debug(\"  -> try %s\" % (prefix))\n    if word.lower().startswith(prefix.lower()):\n      root = word[len(prefix):]\n      if root != '':\n        debug(\"  -> check %s\" % (root))\n        root_err = checker.check(root)\n        if not root_err:\n          debug(\"  -> ok\")\n          return []\n\n  for suffix in checker.suffixes:\n    if word.lower().endswith(suffix.lower()):\n      root = word[:-len(suffix)]\n      if root != '':\n        debug(\"  -> try %s\" % (root))\n        root_err = checker.check(root)\n        if not root_err:\n          debug(\"  -> ok\")\n          return []\n\n  return [err]\n\n\n# Find occurrences of the regex within comment and replace the numbered\n# matching group with spaces. If secondary is defined, the matching\n# group must also match secondary to be masked.\ndef mask_with_regex(comment, regex, group, secondary=None):\n  found = False\n  for m in regex.finditer(comment):\n    if secondary and secondary.search(m.group(group)) is None:\n      continue\n\n    start = m.start(group)\n    end = m.end(group)\n\n    comment = comment[:start] + (' ' * (end - start)) + comment[end:]\n    found = True\n\n  return (comment, found)\n\n\n# Checks the comment at offset against the spell checker. Result is an array\n# of tuples where each tuple is the misspelled word, it's offset from the\n# start of the line, and an array of possible replacements.\ndef check_comment(checker, offset, comment):\n  # Strip smart quotes which cause problems sometimes.\n  for sq, q in SMART_QUOTES.items():\n    comment = comment.replace(sq, q)\n\n  # Replace TODO comments with spaces to preserve string offsets.\n  comment, _ = mask_with_regex(comment, TODO, 0)\n\n  # Ignore @param varname\n  comment, _ = mask_with_regex(comment, METHOD_DOC, 0)\n\n  # Similarly, look for base64 sequences, but they must have at least one\n  # digit.\n  comment, _ = mask_with_regex(comment, BASE64, 1, NUMBER)\n\n  # Various hex constants:\n  comment, _ = mask_with_regex(comment, HEX, 1)\n  comment, _ = mask_with_regex(comment, HEX_SIG, 1)\n  comment, _ = mask_with_regex(comment, PREFIXED_HEX, 0)\n  comment, _ = mask_with_regex(comment, BIT_FIELDS, 0)\n  comment, _ = mask_with_regex(comment, AB_FIELDS, 1)\n  comment, _ = mask_with_regex(comment, UUID, 0)\n  comment, _ = mask_with_regex(comment, IPV6_ADDR, 1)\n\n  # Single words in quotes:\n  comment, _ = mask_with_regex(comment, QUOTED_WORD, 0)\n\n  # RST inline literals:\n  comment, _ = mask_with_regex(comment, RST_LITERAL, 0)\n\n  # Mask the reference part of an RST link (but not the link text). Otherwise, check for a quoted\n  # code-like expression (which would mask the link text if not guarded).\n  comment, found = mask_with_regex(comment, RST_LINK, 0)\n  if not found:\n    comment, _ = mask_with_regex(comment, QUOTED_EXPR, 0)\n\n  comment, _ = mask_with_regex(comment, TUPLE_EXPR, 0)\n\n  # Command flags:\n  comment, _ = mask_with_regex(comment, FLAG, 1)\n\n  # Github user refs:\n  comment, _ = mask_with_regex(comment, USER, 1)\n\n  # Absolutew paths and references to source files.\n  comment, _ = mask_with_regex(comment, ABSPATH, 1)\n  comment, _ = mask_with_regex(comment, FILEREF, 1)\n\n  # Ordinals (1st, 2nd...)\n  comment, _ = mask_with_regex(comment, ORDINALS, 0)\n\n  if checker.prefix_re is not None:\n    comment, _ = mask_with_regex(comment, checker.prefix_re, 1)\n\n  if checker.suffix_re is not None:\n    comment, _ = mask_with_regex(comment, checker.suffix_re, 1)\n\n  # Everything got masked, return early.\n  if comment == \"\" or comment.strip() == \"\":\n    return []\n\n  # Mask leading punctuation.\n  if not comment[0].isalnum():\n    comment = ' ' + comment[1:]\n\n  errors = checker.check(comment)\n\n  # Fix up offsets relative to the start of the line vs start of the comment.\n  errors = [(w, o + offset, s) for (w, o, s) in errors]\n\n  # CamelCase words get split and re-checked\n  errors = [*chain.from_iterable(map(lambda err: check_camel_case(checker, err), errors))]\n\n  errors = [*chain.from_iterable(map(lambda err: check_affix(checker, err), errors))]\n\n  return errors\n\n\ndef print_error(file, line_offset, lines, errors):\n  # Highlight misspelled words.\n  line = lines[line_offset]\n  prefix = \"%s:%d:\" % (file, line_offset + 1)\n  for (word, offset, suggestions) in reversed(errors):\n    line = line[:offset] + red(word) + line[offset + len(word):]\n\n  print(\"%s%s\" % (prefix, line.rstrip()))\n\n  if MARK:\n    # Print a caret at the start of each misspelled word.\n    marks = ' ' * len(prefix)\n    last = 0\n    for (word, offset, suggestions) in errors:\n      marks += (' ' * (offset - last)) + '^'\n      last = offset + 1\n    print(marks)\n\n\ndef print_fix_options(word, suggestions):\n  print(\"%s:\" % (word))\n  print(\"  a: accept and add to dictionary\")\n  print(\"  A: accept and add to dictionary as ALLCAPS (for acronyms)\")\n  print(\"  f <word>: replace with the given word without modifying dictionary\")\n  print(\"  i: ignore\")\n  print(\"  r <word>: replace with given word and add to dictionary\")\n  print(\"  R <word>: replace with given word and add to dictionary as ALLCAPS (for acronyms)\")\n  print(\"  x: abort\")\n\n  if not suggestions:\n    return\n\n  col_width = max(len(word) for word in suggestions)\n  opt_width = int(math.log(len(suggestions), 10)) + 1\n  padding = 2  # Two spaces of padding.\n  delim = 2  # Colon and space after number.\n  num_cols = int(78 / (col_width + padding + opt_width + delim))\n  num_rows = int(len(suggestions) / num_cols + 1)\n  rows = [\"\"] * num_rows\n\n  indent = \" \" * padding\n  for idx, sugg in enumerate(suggestions):\n    row = idx % len(rows)\n    row_data = \"%d: %s\" % (idx, sugg)\n\n    rows[row] += indent + row_data.ljust(col_width + opt_width + delim)\n\n  for row in rows:\n    print(row)\n\n\ndef fix_error(checker, file, line_offset, lines, errors):\n  print_error(file, line_offset, lines, errors)\n\n  fixed = {}\n  replacements = []\n  additions = []\n  for (word, offset, suggestions) in errors:\n    if word in fixed:\n      # Same typo was repeated in a line, so just reuse the previous choice.\n      replacements += [fixed[word]]\n      continue\n\n    print_fix_options(word, suggestions)\n\n    replacement = \"\"\n    while replacement == \"\":\n      try:\n        choice = input(\"> \")\n      except EOFError:\n        choice = \"x\"\n\n      add = None\n      if choice == \"x\":\n        print(\"Spell checking aborted.\")\n        sys.exit(2)\n      elif choice == \"a\":\n        replacement = word\n        add = word\n      elif choice == \"A\":\n        replacement = word\n        add = word.upper()\n      elif choice[:1] == \"f\":\n        replacement = choice[1:].strip()\n        if replacement == \"\":\n          print(\"Invalid choice: '%s'. Must specify a replacement (e.g. 'f corrected').\" % (choice))\n          continue\n      elif choice == \"i\":\n        replacement = word\n      elif choice[:1] == \"r\" or choice[:1] == \"R\":\n        replacement = choice[1:].strip()\n        if replacement == \"\":\n          print(\"Invalid choice: '%s'. Must specify a replacement (e.g. 'r corrected').\" % (choice))\n          continue\n\n        if choice[:1] == \"R\":\n          if replacement.upper() not in suggestions:\n            add = replacement.upper()\n        elif replacement not in suggestions:\n          add = replacement\n      else:\n        try:\n          idx = int(choice)\n        except ValueError:\n          idx = -1\n        if idx >= 0 and idx < len(suggestions):\n          replacement = suggestions[idx]\n        else:\n          print(\"Invalid choice: '%s'\" % (choice))\n\n    fixed[word] = replacement\n    replacements += [replacement]\n    if add:\n      if re.match(DICTIONARY_WORD, add):\n        additions += [add]\n      else:\n        print(\"Cannot add %s to the dictionary: it may only contain letter and apostrophes\" % add)\n\n  if len(errors) != len(replacements):\n    print(\"Internal error %d errors with %d replacements\" % (len(errors), len(replacements)))\n    sys.exit(2)\n\n  # Perform replacements on the line.\n  line = lines[line_offset]\n  for idx in range(len(replacements) - 1, -1, -1):\n    word, offset, _ = errors[idx]\n    replacement = replacements[idx]\n    if word == replacement:\n      continue\n\n    line = line[:offset] + replacement + line[offset + len(word):]\n  lines[line_offset] = line\n\n  # Update the dictionary.\n  checker.add_words(additions)\n\n\nclass Comment:\n  \"\"\"Comment represents a comment at a location within a file.\"\"\"\n\n  def __init__(self, line, col, text, last_on_line):\n    self.line = line\n    self.col = col\n    self.text = text\n    self.last_on_line = last_on_line\n\n\n# Extract comments from lines. Returns an array of Comment.\ndef extract_comments(lines):\n  in_comment = False\n  comments = []\n  for line_idx, line in enumerate(lines):\n    line_comments = []\n    last = 0\n    if in_comment:\n      mc_end = MULTI_COMMENT_END.search(line)\n      if mc_end is None:\n        # Full line is within a multi-line comment.\n        line_comments.append((0, line))\n      else:\n        # Start of line is the end of a multi-line comment.\n        line_comments.append((0, mc_end.group(1)))\n        last = mc_end.end()\n        in_comment = False\n\n    if not in_comment:\n      for inline in INLINE_COMMENT.finditer(line, last):\n        # Single-line comment.\n        m = inline.lastindex  # 1 is //, 2 is /* ... */\n        line_comments.append((inline.start(m), inline.group(m)))\n        last = inline.end(m)\n\n      if last < len(line):\n        mc_start = MULTI_COMMENT_START.search(line, last)\n        if mc_start is not None:\n          # New multi-lie comment starts at end of line.\n          line_comments.append((mc_start.start(1), mc_start.group(1)))\n          in_comment = True\n\n    for idx, line_comment in enumerate(line_comments):\n      col, text = line_comment\n      last_on_line = idx + 1 >= len(line_comments)\n      comments.append(Comment(line=line_idx, col=col, text=text, last_on_line=last_on_line))\n\n  # Handle control statements and filter out comments that are part of\n  # RST code block directives.\n  result = []\n  n = 0\n  nc = len(comments)\n\n  while n < nc:\n    text = comments[n].text\n\n    if SPELLCHECK_SKIP_FILE in text:\n      # Skip the file: just don't return any comments.\n      return []\n\n    pos = text.find(SPELLCHECK_ON)\n    if pos != -1:\n      # Ignored because spellchecking isn't disabled. Just mask out the command.\n      comments[n].text = text[:pos] + ' ' * len(SPELLCHECK_ON) + text[pos + len(SPELLCHECK_ON):]\n      result.append(comments[n])\n      n += 1\n    elif SPELLCHECK_OFF in text or SPELLCHECK_SKIP_BLOCK in text:\n      skip_block = SPELLCHECK_SKIP_BLOCK in text\n      last_line = n\n      n += 1\n      while n < nc:\n        if skip_block:\n          if comments[n].line - last_line > 1:\n            # Gap in comments. We've skipped the block.\n            break\n          line = lines[comments[n].line]\n          if line[:comments[n].col].strip() != \"\":\n            # Some code here. We've skipped the block.\n            break\n        elif SPELLCHECK_ON in comments[n].text:\n          # Turn checking back on.\n          n += 1\n          break\n\n        n += 1\n    elif text.strip().startswith(RST_CODE_BLOCK):\n      # Start of a code block.\n      indent = len(INDENT.search(text).group(1))\n      last_line = comments[n].line\n      n += 1\n\n      while n < nc:\n        if comments[n].line - last_line > 1:\n          # Gap in comments. Code block is finished.\n          break\n        last_line = comments[n].line\n\n        if comments[n].text.strip() != \"\":\n          # Blank lines are ignored in code blocks.\n          if len(INDENT.search(comments[n].text).group(1)) <= indent:\n            # Back to original indent, or less. The code block is done.\n            break\n        n += 1\n    else:\n      result.append(comments[n])\n      n += 1\n\n  return result\n\n\ndef check_file(checker, file, lines, error_handler):\n  in_code_block = 0\n  code_block_indent = 0\n  num_errors = 0\n\n  comments = extract_comments(lines)\n  errors = []\n  for comment in comments:\n    errors += check_comment(checker, comment.col, comment.text)\n    if comment.last_on_line and len(errors) > 0:\n      # Handle all the errors in a line.\n      num_errors += len(errors)\n      error_handler(file, comment.line, lines, errors)\n      errors = []\n\n  return (len(comments), num_errors)\n\n\ndef execute(files, dictionary_file, fix):\n  checker = SpellChecker(dictionary_file)\n  checker.start()\n\n  handler = print_error\n  if fix:\n    handler = partial(fix_error, checker)\n\n  total_files = 0\n  total_comments = 0\n  total_errors = 0\n  for path in files:\n    with open(path, 'r') as f:\n      lines = f.readlines()\n      total_files += 1\n      (num_comments, num_errors) = check_file(checker, path, lines, handler)\n      total_comments += num_comments\n      total_errors += num_errors\n\n    if fix and num_errors > 0:\n      with open(path, 'w') as f:\n        f.writelines(lines)\n\n  checker.stop()\n\n  print(\"Checked %d file(s) and %d comment(s), found %d error(s).\" %\n        (total_files, total_comments, total_errors))\n\n  return total_errors == 0\n\n\nif __name__ == \"__main__\":\n  # Force UTF-8 across all open and popen calls. Fallback to 'C' as the\n  # language to handle hosts where en_US is not recognized (e.g. CI).\n  try:\n    locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n  except:\n    locale.setlocale(locale.LC_ALL, 'C.UTF-8')\n\n  default_dictionary = os.path.join(CURR_DIR, 'spelling_dictionary.txt')\n\n  parser = argparse.ArgumentParser(description=\"Check comment spelling.\")\n  parser.add_argument('operation_type',\n                      type=str,\n                      choices=['check', 'fix'],\n                      help=\"specify if the run should 'check' or 'fix' spelling.\")\n  parser.add_argument('target_paths',\n                      type=str,\n                      nargs=\"*\",\n                      help=\"specify the files for the script to process.\")\n  parser.add_argument('-d',\n                      '--debug',\n                      action='count',\n                      default=0,\n                      help=\"Debug spell checker subprocess.\")\n  parser.add_argument('--mark',\n                      action='store_true',\n                      help=\"Emits extra output to mark misspelled words.\")\n  parser.add_argument('--dictionary',\n                      type=str,\n                      default=default_dictionary,\n                      help=\"specify a location for Envoy-specific dictionary words\")\n  parser.add_argument('--color',\n                      type=str,\n                      choices=['on', 'off', 'auto'],\n                      default=\"auto\",\n                      help=\"Controls colorized output. Auto limits color to TTY devices.\")\n  parser.add_argument('--test-ignore-exts',\n                      dest='test_ignore_exts',\n                      action='store_true',\n                      help=\"For testing, ignore file extensions.\")\n  args = parser.parse_args()\n\n  COLOR = args.color == \"on\" or (args.color == \"auto\" and sys.stdout.isatty())\n  DEBUG = args.debug\n  MARK = args.mark\n\n  paths = args.target_paths\n  if not paths:\n    paths = ['./api', './include', './source', './test', './tools']\n\n  # Exclude ./third_party/ directory from spell checking, even when requested through arguments.\n  # Otherwise git pre-push hook checks it for merged commits.\n  paths = [\n      path for path in paths\n      if not path.startswith('./third_party/') and not path.startswith('./third_party/')\n  ]\n\n  exts = ['.cc', '.h', '.proto']\n  if args.test_ignore_exts:\n    exts = None\n  target_paths = []\n  for p in paths:\n    if os.path.isdir(p):\n      for root, _, files in os.walk(p):\n        target_paths += [\n            os.path.join(root, f) for f in files if (exts is None or os.path.splitext(f)[1] in exts)\n        ]\n    if os.path.isfile(p) and (exts is None or os.path.splitext(p)[1] in exts):\n      target_paths += [p]\n\n  rv = execute(target_paths, args.dictionary, args.operation_type == 'fix')\n\n  if args.operation_type == 'check':\n    if not rv:\n      print(\n          \"ERROR: spell check failed. Run 'tools/spelling/check_spelling_pedantic.py fix and/or add new \"\n          \"words to tools/spelling/spelling_dictionary.txt'\")\n      sys.exit(1)\n\n    print(\"PASS\")\n"
  },
  {
    "path": "tools/spelling/check_spelling_pedantic_test.py",
    "content": "#! /usr/bin/env python3\n\n# Tests check_spelling_pedantic.py. Normally run from check_spelling_pedantic.sh.\n\nfrom __future__ import print_function\n\nfrom run_command import runCommand\nimport argparse\nimport logging\nimport os\nimport sys\n\ncurr_dir = os.path.dirname(os.path.realpath(__file__))\ntools = os.path.dirname(curr_dir)\nsrc = os.path.join(tools, 'testdata', 'spelling')\ncheck_spelling = sys.executable + \" \" + os.path.join(curr_dir, 'check_spelling_pedantic.py')\n\n\n# Runs the 'check_spelling_pedanic' operation, on the specified file,\n# printing the comamnd run and the status code as well as the stdout,\n# and returning all of that to the caller.\ndef runCheckFormat(operation, filename):\n  command = check_spelling + \" --test-ignore-exts \" + operation + \" \" + filename\n  status, stdout, stderr = runCommand(command)\n  return (command, status, stdout + stderr)\n\n\ndef getInputFile(filename):\n  return os.path.join(src, filename)\n\n\ndef emitStdoutAsError(stdout):\n  logging.error(\"\\n\".join(stdout))\n\n\ndef expectError(filename, status, stdout, expected_substrings):\n  if status == 0:\n    logging.error(\"%s: Expected %d errors, but succeeded\" % (filename, len(expected_substrings)))\n    return 1\n  errors = 0\n  for expected_substring in expected_substrings:\n    found = False\n    for line in stdout:\n      if expected_substring in line:\n        found = True\n        break\n    if not found:\n      logging.error(\"%s: Could not find '%s' in:\\n\" % (filename, expected_substring))\n      emitStdoutAsError(stdout)\n      errors += 1\n\n  return errors\n\n\ndef checkFileExpectingErrors(filename, expected_substrings):\n  command, status, stdout = runCheckFormat(\"check\", getInputFile(filename))\n  return expectError(filename, status, stdout, expected_substrings)\n\n\ndef checkFilePathExpectingOK(filename):\n  command, status, stdout = runCheckFormat(\"check\", filename)\n  if status != 0:\n    logging.error(\"Expected %s to have no errors; status=%d, output:\\n\" % (filename, status))\n    emitStdoutAsError(stdout)\n  return status\n\n\ndef checkFileExpectingOK(filename):\n  return checkFilePathExpectingOK(getInputFile(filename))\n\n\ndef runChecks():\n  errors = 0\n\n  errors += checkFileExpectingOK(\"valid\")\n  errors += checkFileExpectingOK(\"skip_file\")\n  errors += checkFileExpectingOK(\"exclusions\")\n\n  errors += checkFileExpectingOK(\"third_party/something/file.cc\")\n  errors += checkFileExpectingOK(\"./third_party/something/file.cc\")\n\n  errors += checkFileExpectingErrors(\"typos\",\n                                     [\"spacific\", \"reelistic\", \"Awwful\", \"combeenations\", \"woork\"])\n  errors += checkFileExpectingErrors(\n      \"skip_blocks\", [\"speelinga\", \"speelingb\", \"speelingc\", \"speelingd\", \"speelinge\"])\n  errors += checkFileExpectingErrors(\"on_off\", [\"speelinga\", \"speelingb\"])\n  errors += checkFileExpectingErrors(\"rst_code_block\", [\"speelinga\", \"speelingb\"])\n  errors += checkFileExpectingErrors(\"word_splitting\", [\"Speeled\", \"Korrectly\"])\n\n  return errors\n\n\nif __name__ == \"__main__\":\n  parser = argparse.ArgumentParser(description='tester for check_format.py.')\n  parser.add_argument('--log', choices=['INFO', 'WARN', 'ERROR'], default='INFO')\n  args = parser.parse_args()\n  logging.basicConfig(format='%(message)s', level=args.log)\n\n  errors = runChecks()\n\n  if errors != 0:\n    logging.error(\"%d FAILURES\" % errors)\n    exit(1)\n  logging.warning(\"PASS\")\n"
  },
  {
    "path": "tools/spelling/check_spelling_pedantic_test.sh",
    "content": "#!/bin/bash\n\ntools=$(dirname \"$(dirname \"$(realpath \"$0\")\")\")\nroot=$(realpath \"$tools/..\")\n\ncd \"$root\" || exit 1\n# to satisfy dependency on run_command\nexport PYTHONPATH=\"$tools\"\n./tools/spelling/check_spelling_pedantic_test.py \"$@\"\n"
  },
  {
    "path": "tools/spelling/spelling_allowlist_words.txt",
    "content": "# One word per line, these words are not spell checked.\n# you can add a comment to each word to explain why you don't need to do a spell check.\n"
  },
  {
    "path": "tools/spelling/spelling_dictionary.txt",
    "content": "# Word list for check_spelling_pedantic.py -- any entry in ALLCAPS will also be accepted in\n# lower case and title case (e.g. HTTP will accept http and Http). Entries in all lower case\n# will accept title case (e.g. lyft matches Lyft). Prefixes (e.g., un-) or suffixes (e.g. -ing)\n# are allowed for any otherwise correctly spelled word.\nABI\nACK\nACL\nAES\nAFAICT\nALPN\nALS\nAMZ\nAPC\nAPI\nARN\nASAN\nASCII\nASM\nASSERTs\nAST\nAWS\nAllowlisted\nBACKTRACE\nBSON\nBPF\nCAS\nCB\nCDN\nCDS\nCEL\nDSR\nHEXDIG\nHEXDIGIT\nLTT\nOWS\nTIDs\nceil\nCHACHA\nCHLO\nCHMOD\nCHLOS\nCHLOs\nCIDR\nCLA\nCLI\nCMSG\nCN\nCNAME\nCOMMANDREPLY\nCP\nCPP\nCPU\nCQ\nCRC\nCRL\nCRLFs\nCRT\nCSDS\nCSRF\nCSS\nCSV\nCTX\nCTXs\nCVC\nCVE\nCX\nCxx\nCYGWIN\nDER\nDESC\nDFATAL\nDGRAM\nDLOG\nDNS\nDNSSEC\nDQUOTE\nDRYs\nDS\nDST\nDW\nDWORD\nEADDRINUSE\nEADDRNOTAVAIL\nEAGAIN\nECDH\nECDHE\nECDS\nECDSA\nECDSAs\nECMP\nECONNREFUSED\nEDESTRUCTION\nEDF\nEINPROGRESS\nEINVAL\nELB\nEMSGSIZE\nENOTFOUND\nENOTSUP\nENV\nEOF\nEOS\nEOY\nEPOLLOUT\nEPOLLRDHUP\nEQ\nERANGE\nEV\nEVAL\nEVLOOP\nEVP\nEWOULDBLOCK\nEXPECTs\nEXPR\nFAQ\nFASTOPEN\nFB\nFCDS\nFCM\nFFFF\nFIN\nFIPS\nFIRSTHDR\nFQDN\nFREEBIND\nFUZZER\nFUZZERS\nGC\nGCC\nGCE\nGCM\nGCOVR\nGCP\nGETting\nGLB\nGOAWAY\nGRPC\nGRO\nGSO\nGSS\nGTEST\nGURL\nGrabbit\nHashable\nHC\nHCM\nHDS\nHMAC\nHPACK\nHTAB\nHTML\nHTTP\nHTTPS\nHV\nIAM\nIANA\nIDL\nIETF\nINADDR\nINET\nINVAL\nIO\nIOS\nIP\nIPPROTO\nIPV\nIPs\nIPv\nITOA\nInjectable\nIsode\nIters\nJSON\nJSONs\nJWKS\nJWKs\nJWS\nJWT\nJWTs\nKB\nKDS\nKarlsson\nKiB\nKille\nLBs\nLC\nLDS\nLEV\nLF\nLHS\nLLVM\nLPT\nLRS\nLoggable\nMB\nMD\nMERCHANTABILITY\nMGET\nMQ\nMSET\nMSVC\nMTLS\nMTU\nMULTIFRAME\nNACK\nNACKed\nNACKs\nNBF\nNBSP\nNDEBUG\nNEXTHDR\nNGHTTP\nNOAUTH\nNOCHECKRESP\nNODELAY\nNOLINT\nNOLINTNEXTLINE\nNONBLOCK\nNONCES\nNOSORT\nNS\nNUL\nNilsson\nNonhashable\nOauth\nOCSP\nOID\nOK\nOOM\nOOMs\nOS\nOSI\nOSS\nOSX\nOT\nOU\nOVFL\nPAYLOADLEN\nPB\nPCC\nPEERCRED\nPEM\nPERF\nPGV\nPID\nPKTINFO\nPNG\nPostCBs\nPREBIND\nPRNG\nPROT\nPostgre\nPostgres\nPrereq\nQDCOUNT\nQUIC\nQoS\nRAII\nRANLUX\nRBAC\nRDN\nRDS\nREADME\nRECVDSTADDR\nRECVPKTINFO\nREFNIL\nREQ\nREUSEADDR\nREUSEPORT\nRFC\nRHS\nRLE\nRLS\nRNG\nRPC\nRSA\nRST\nRTDS\nRTTI\nRUNDIR\nRW\nRX\nRXQ\nRebalance\nRunn\nSA\nSAN\nSCT\nSDK\nSDS\nSENDSRCADDR\nSHA\nSHM\nSIGABRT\nSIGBUS\nSIGFPE\nSIGILL\nSIGINT\nSIGPIPE\nSIGSEGV\nSIGTERM\nSMTP\nSNI\nSOTW\nSPD\nSPDY\nSPIFFE\nSPKI\nSQL\nSR\nSRCDIR\nSRDS\nSRV\nSS\nSSL\nSTDSTRING\nSTL\nSTRLEN\nSTS\nSVG\nSymbolizer\nTBD\nTCLAP\nTCP\nTE\nTFO\nTID\nTLS\nTLSv\nTLV\nTMPDIR\nTODO\nTPM\nTSAN\nTSI\nTTL\nTTLs\nTX\nTXT\nUA\nUBSAN\nUDP\nUDS\nUNC\nURI\nURL\nUSEVC\nUTC\nUTF\nUUID\nUUIDs\nVC\nVCHAR\nVH\nVHDS\nVLOG\nVM\nWAITFORONE\nWASM\nWAVM\nWIP\nWKT\nWRONGPASS\nWRR\nWS\nWSA\nWSABUF\nWSAEINVAL\nWSS\nWelford's\nWi\nXDS\nXFCC\nXFF\nXML\nXN\nXNOR\nXSS\nYAML\nZXID\nabsl\naccesslog\naccessor\naccessors\nacks\nacls\naddr\nagg\nalice\nalignas\nalignof\nalloc\nalloca\nallocator\nallowlist\nallowlisted\nalls\nalphanumerics\namongst\nanno\nanys\nappmesh\narg\nargc\nargs\nargv\nartisanal\nary\nasctime\nasm\nasync\natoi\natomicity\natomics\natoull\nauth\nauthenticator\nauthenticators\nauthlen\nauthn\nauths\nauthz\nautoscale\nbackend\nbackends\nbackgrounded\nbackoff\nbackpressure\nbackticks\nbacktraces\nbacktracing\nbalancer\nbalancers\nbarbaz\nbasename\nbaz\nbazel\nbehaviour\nbenchmarked\nbidi\nbignum\nbitfield\nbitset\nbitwise\nblackhole\nblackholed\nbookkeep\nbool\nboolean\nbooleans\nbools\nboringssl\nborks\nbroadcasted\nbuf\nbuflen\nbugprone\nbuiltin\nbuiltins\nbulkstrings\nbursty\nbytecode\nbytestream\nbytestring\ncacheable\ncacheability\ncallee\ncallsite\ncallsites\ncallstack\ncancellable\ncancelled\ncancelling\ncanonicalization\ncanonicalize\ncanonicalized\ncanonicalizer\ncanonicalizing\ncardinality\ncasted\ncharset\ncheckin\nchecksum\nchrono\nchroot\nchunked\nci\nciphersuite\nciphersuites\ncircllhist\nCITT\ncloneable\ncloneability\ncmd\ncmsghdr\ncodebase\ncodec\ncodecs\ncodepath\ncodings\ncombinatorial\ncomparator\ncompat\ncompletable\ncond\ncondvar\nconf\nconfig\nconfigs\nconn\nconns\nconst\nconstexpr\nconstructible\ncopyable\ncoroutine\ncoroutines\ncors\ncout\ncoverity\ncplusplus\ncpuset\ncreds\ncrypto\ncryptographic\ncryptographically\ncstate\ncstring\nctor\nctrl\ncustomizations\ndarwin\ndatadog\ndatagram\ndatagrams\nde\ndeallocate\ndeallocated\ndeallocating\ndeallocation\ndec\ndechunk\ndechunked\ndecl\ndecls\ndecompressor\ndecompressors\ndecrement\ndecrypt\ndedup\ndedupe\ndeduplicate\ndeduplicates\ndeflater\ndeletable\ndeleter\ndelim\ndeque\ndeprecations\ndereference\ndereferences\nderegistered\ndeserialization\ndeserialize\ndeserialized\ndeserializer\ndeserializers\ndeserializing\ndest\ndestructor\ndestructors\ndesynchronize\ndeterministically\ndeterminize\ndev\ndgst\ndir\ndirname\ndjb\ndowncalls\ndowncasted\ndowncased\ndownstreams\ndrainable\ndtor\ndubbo\ndup\ndurations\ndynamodb\nemplace\nemplaced\nemscripten\nemsdk\nenablement\nencodings\nendian\nendianness\nendl\nenqueue\nenqueued\nenqueues\nenum\nenums\nepoll\nerrno\netag\netags\nevaluator\nevbuffer\nevbuffers\nevconnlistener\nevented\nevwatch\nexe\nexeclp\nexprfor\nexpectable\nextrahelp\nfaceplant\nfacto\nfailover\nfallbacks\nfastbuild\nfavicon\nfbs\nfcntl\nfd\nfds\nfdstat\nfilename\nfilenames\nfileno\nfilesystem\nfirefox\nfixdate\nfixup\nflatbuffer\nflatc\nfmt\nfmtlib\nfn\nformatter\nformatters\nformedness\nfrontend\nftruncate\nfunc\nfunctor\nfunctors\ngRPC\ngateway\ngcov\ngenrule\ngetaddrinfo\ngetaffinity\ngethostname\ngetifaddrs\ngetpeername\ngetsockname\ngetsockopt\ngetter\ngetters\ngithub\nglobals\ngmock\ngoog\ngoogle\ngoto\ngso\ngzip\nhackery\nhacky\nhandshaker\nhardcoded\nhardcoding\nhasher\nhashtagging\nhd\nhdr\nhealthcheck\nhealthchecker\nhealthcheckers\nhealthchecks\nhealths\nhealthz\nhermeticity\nhighp\nhoc\nhostname\nhostnames\nhostset\nhotrestart\nhrefs\nhuffman\nhystrix\nidempotency\nidx\nifdef\niff\nified\nimpl\nimplementors\nimpls\nindices\ninflater\ninflight\n-ing\ninit\ninitializer\ninitializers\ninlined\ninlining\ninobservability\ninotify\ninstantiation\ninstantiations\ninterpretable\nintra\nints\ninvariance\niovec\niovecs\nips\niptables\nish\nistio\nistream\nistringstream\niteratively\njavascript\njitter\njittered\nkafka\nkeepalive\nkeepalives\nketama\nkeyder\nkqueue\nkubernetes\nkv\nkvs\nlala\nlatencies\nld\nldd\nlen\nlenenc\nlexically\nlibc\nlibevent\nlibprotobuf\nlibtool\nlibstdc\nlifecycle\nlightstep\nlinearization\nlinearize\nlinearized\nlinux\nlivelock\nllvm\nloc\nlocalhost\nlockless\nlogin\nloglevel\nlogstream\nlookup\nlookups\nloopback\nlossy\nlowp\nlstat\nltrim\nlua\nlyft\nmaglev\nmalloc\nmarshaller\nmatchable\nmatcher\nmatchers\nmaxage\nmaxbuffer\nmegamiss\nmem\nmemcmp\nmemcpy\nmergeable\nmessagename\nmetadata\nmetamethod\nmetaprogramming\nmetatable\nmicrobenchmarks\nmidp\nmilli\nmisconfiguration\nmisconfigured\nmixin\nmkdir\nmmap\nmmsg\nmmsghdr\nmongo\nmoveable\nmsec\nmsg\nmsghdr\nmulti\nmulticast\nmultikill\nmultimap\nmultivalue\nmutator\nmutex\nmutexes\nmux\nmuxed\nmysql\nnamelen\nnameserver\nnamespace\nnamespaced\nnamespaces\nnamespacing\nnan\nnatively\nndk\nnetblock\nnetblocks\nnetfilter\nnonblocking\nnoncopyable\nnonresponsive\nnoop\nnop\nnthreads\nntohl\nntop\nnullable\nnulled\nnullopt\nnullptr\nnum\nnumkeys\noauth\nobservability\nocagent\noffsetof\noneof\noneway\nopcode\nopencensus\nopenssl\nopentracing\noptimizations\noptname\noptval\nostream\noutlier\noutliers\noverprovisioned\noverprovisioning\noverridable\noversized\npackagename\npageheap\nparam\nparameterization\nparameterize\nparameterized\nparameterizing\nparams\nparen\nparens\nparentid\nparentspanid\nparseable\nparsers\npassphrase\npassthrough\npathname\npausable\npcall\npcap\npclose\nperformant\npfctl\npipelined\npipelining\npkey\nplaintext\npluggable\npointee\npopen\npos\nposix\npostfix\npostfixes\npostgres\npostgresql\npragma\npre\npreallocate\npreallocating\npreallocation\nprecalculated\nprecompile\nprecompiled\nprecompute\nprecomputed\npredeclared\nprefetch\nprefetched\nprefetches\npreflight\npreorder\nprepend\nprepended\nprepends\nprev\nprobabilistically\nproc\nprofiler\nprogrammatically\nprometheus\nproto\nprotobuf\nprotobufs\nprotoc\nprotodoc\nprotos\nprotoxform\nproxied\npseudocode\npthread\npton\nptr\nptrs\npubkey\npwd\npy\nqdtext\nqps\nquantile\nquantiles\nquiesce\nquitquitquit\nqvalue\nrapidjson\nratelimit\nratelimited\nratelimiter\nrawseti\nrc\nreadded\nreadonly\nreadv\nrealloc\nrebalanced\nrebalancing\nrebuffer\nrebuilder\nreconnection\nrecurse\nrecv\nrecvfrom\nrecvmmsg\nrecvmsg\nredis\nredispatch\nredistributions\nreentrant\nrefactor\nrefactored\nrefcount\nreferencee\nreferer\nrefetch\nregex\nregexes\nreified\nreify\nreimplements\nrele\nreleasor\nreloadable\nremoting\nrenderers\nreparse\nrepeatability\nreperform\nrepicked\nrepo\nreproducibility\nrequirepass\nreselecting\nreserialize\nreservable\nresize\nresized\nresizes\nresizing\nresolv\nresolvers\nresponder\nrestarter\nresync\nretransmitting\nretriable\nretriggers\nrevalidated\nrevalidation\nrmdir\nrocketmq\nrewriter\nrollout\nroundtrip\nrpcs\nrq\nrtrim\nrtt\nruleset\nrunfiles\nruntime\nruntimes\nrver\nrxhash\nsandboxed\nsanitization\nsanitizer\nsatisfiable\nscalability\nsched\nschedulable\nschemas\nscopekey\nsd\nsecp\nsendmsg\nsendmmsg\nsendto\nserializable\nserializer\nserv\nsetenv\nsetsockopt\nsig\nsigaction\nsigactions\nsiginfo\nsignalstack\nsiloed\nsim\nsizeof\nsmatch\nsnapshotted\nsockaddr\nsocketpair\nsockfd\nsocklen\nsockopt\nsockopts\nsomestring\nspanid\nspdlog\nsplitter\nspoofable\nsrc\nssize\nstackdriver\nstacktrace\nstartup\nstateful\nstatsd\nstderr\nstdev\nstdin\nstdout\nstmt\nstr\nstreambuf\nstrerr\nstrerror\nstringbuf\nstringified\nstringify\nstringstream\nstrtoull\nstruct\nstructs\nsubclassed\nsubclasses\nsubdirectories\nsubdirectory\nsubdirs\nsubexpr\nsubexpressions\nsubitems\nsubmatch\nsubmessages\nsubnet\nsubnets\nsuboptimal\nsubsecond\nsubseconds\nsubsequence\nsubsetting\nsubstr\nsubstring\nsubstrings\nsubtrees\nsubtype\nsubtypes\nsubzone\nsuperclass\nsuperset\nsymlink\nsymlinked\nsymlinks\nsynchronizer\nsyncookie\nsys\nsyscall\nsyscalls\nsysctl\nsz\ntchar\ntchars\ntcmalloc\ntcpdump\nteardown\ntempdir\ntemplated\ntemplating\ntemplatize\ntemplatized\ntemplatizing\ntestability\ntestcase\ntestcases\ntestdata\ntestee\nthreadsafe\nthru\ntimespan\ntimestamp\ntimestamps\ntimeval\ntmp\ntmpfile\ntokenize\ntokenizes\ntokenizing\ntoolchain\ntraceid\ntraceparent\ntranscode\ntranscoded\ntranscoder\ntranscoding\ntransferral\ntriaged\ntrie\ntuple\ntuples\ntypedef\ntypeid\ntypesafe\nuber\nucontext\nudpa\nuint\nun-\nunacked\nunary\nunconfigurable\nundef\nunderflowing\nunfreed\nunicast\nunicode\nunindexed\nuninstantiated\nuniq\nunittest\nunix\nunref\nunreferenced\nunzigzag\nupcasts\nupstreams\nuptime\nupvalue\nurls\nuserdata\nuserinfo\nusername\nusr\nutil\nutils\nvalgrind\nvalidator\nvalidators\nvanishingly\nvarchar\nvariadic\nvarint\nvec\nvectorize\nverifier\nverifiers\nversa\nversioned\nvhost\nviewable\nvip\nvirtualhost\nvirtualize\nvptr\nwakeup\nwakeups\nwebsocket\nwepoll\nwhitespace\nwhitespaces\nwildcard\nwildcards\nwinsock\nworkspace\nwritev\nxDS\nxDSes\nxeon\nxform\nxhtml\nxid\nxmodem\nxxhash\nxxs\nzag\nzig\nzipkin\nzlib\nOBQ\nSemVer\nSCM\n"
  },
  {
    "path": "tools/spelling/spelling_skip_files.txt",
    "content": "OWNERS.md corpus\n"
  },
  {
    "path": "tools/stack_decode.py",
    "content": "#!/usr/bin/env python3\n\n# Call addr2line as needed to resolve addresses in a stack trace. The addresses\n# will be replaced if they can be resolved into file and line numbers. The\n# executable must include debugging information to get file and line numbers.\n#\n# Two ways to call:\n#   1) Execute binary as a subprocess: stack_decode.py executable_file [args]\n#   2) Read log data from stdin: stack_decode.py -s executable_file\n#\n# In each case this script will add file and line information to any backtrace log\n# lines found and echo back all non-Backtrace lines untouched.\n\nimport collections\nimport re\nimport subprocess\nimport sys\n\n\n# Process the log output looking for stacktrace snippets, for each line found to\n# contain backtrace output extract the address and call add2line to get the file\n# and line information. Output appended to end of original backtrace line. Output\n# any nonmatching lines unmodified. End when EOF received.\ndef decode_stacktrace_log(object_file, input_source, address_offset=0):\n  traces = {}\n  # Match something like:\n  #     [backtrace] [bazel-out/local-dbg/bin/source/server/_virtual_includes/backtrace_lib/server/backtrace.h:84]\n  backtrace_marker = \"\\[backtrace\\] [^\\s]+\"\n  # Match something like:\n  #     ${backtrace_marker} #10: SYMBOL [0xADDR]\n  # or:\n  #     ${backtrace_marker} #10: [0xADDR]\n  stackaddr_re = re.compile(\"%s #\\d+:(?: .*)? \\[(0x[0-9a-fA-F]+)\\]$\" % backtrace_marker)\n  # Match something like:\n  #     #10 0xLOCATION (BINARY+0xADDR)\n  asan_re = re.compile(\" *#\\d+ *0x[0-9a-fA-F]+ *\\([^+]*\\+(0x[0-9a-fA-F]+)\\)\")\n\n  try:\n    while True:\n      line = input_source.readline()\n      if line == \"\":\n        return  # EOF\n      stackaddr_match = stackaddr_re.search(line)\n      if not stackaddr_match:\n        stackaddr_match = asan_re.search(line)\n      if stackaddr_match:\n        address = stackaddr_match.groups()[0]\n        if address_offset != 0:\n          address = hex(int(address, 16) - address_offset)\n        file_and_line_number = run_addr2line(object_file, address)\n        file_and_line_number = trim_proc_cwd(file_and_line_number)\n        if address_offset != 0:\n          sys.stdout.write(\"%s->[%s] %s\" % (line.strip(), address, file_and_line_number))\n        else:\n          sys.stdout.write(\"%s %s\" % (line.strip(), file_and_line_number))\n        continue\n      else:\n        # Pass through print all other log lines:\n        sys.stdout.write(line)\n  except KeyboardInterrupt:\n    return\n\n\n# Execute addr2line with a particular object file and input string of addresses\n# to resolve, one per line.\n#\n# Returns list of result lines\ndef run_addr2line(obj_file, addr_to_resolve):\n  return subprocess.check_output([\"addr2line\", \"-Cpie\", obj_file, addr_to_resolve]).decode('utf-8')\n\n\n# Because of how bazel compiles, addr2line reports file names that begin with\n# \"/proc/self/cwd/\" and sometimes even \"/proc/self/cwd/./\". This isn't particularly\n# useful information, so trim it out and make a perfectly useful relative path.\ndef trim_proc_cwd(file_and_line_number):\n  trim_regex = r'/proc/self/cwd/(\\./)?'\n  return re.sub(trim_regex, '', file_and_line_number)\n\n\n# Execute pmap with a pid to calculate the addr offset\n#\n# Returns list of extended process memory information.\ndef run_pmap(pid):\n  return subprocess.check_output(['pmap', '-qX', str(pid)]).decode('utf-8')[1:]\n\n\n# Find the virtual address offset of the process. This may be needed due ASLR.\n#\n# Returns the virtual address offset as an integer, or 0 if unable to determine.\ndef find_address_offset(pid):\n  try:\n    proc_memory = run_pmap(pid)\n    match = re.search(r'([a-f0-9]+)\\s+r-xp', proc_memory)\n    if match is None:\n      return 0\n    return int(match.group(1), 16)\n  except (subprocess.CalledProcessError, PermissionError):\n    return 0\n\n\nif __name__ == \"__main__\":\n  if len(sys.argv) > 2 and sys.argv[1] == '-s':\n    decode_stacktrace_log(sys.argv[2], sys.stdin)\n    sys.exit(0)\n  elif len(sys.argv) > 1:\n    rununder = subprocess.Popen(sys.argv[1:],\n                                stdout=subprocess.PIPE,\n                                stderr=subprocess.STDOUT,\n                                universal_newlines=True)\n    offset = find_address_offset(rununder.pid)\n    decode_stacktrace_log(sys.argv[1], rununder.stdout, offset)\n    rununder.wait()\n    sys.exit(rununder.returncode)  # Pass back test pass/fail result\n  else:\n    print(\"Usage (execute subprocess): stack_decode.py executable_file [additional args]\")\n    print(\"Usage (read from stdin): stack_decode.py -s executable_file\")\n    sys.exit(1)\n"
  },
  {
    "path": "tools/testdata/check_format/add_envoy_package.BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nenvoy_cc_binary(\n    name = \"foo\",\n)\n"
  },
  {
    "path": "tools/testdata/check_format/add_envoy_package.BUILD.gold",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_binary(\n    name = \"foo\",\n)\n"
  },
  {
    "path": "tools/testdata/check_format/angle_bracket_include.cc",
    "content": "namespace Envoy {\n\n#include <common/common/utility.h>\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/angle_bracket_include.cc.gold",
    "content": "namespace Envoy {\n\n#include \"common/common/utility.h\"\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/api/missing_package.proto",
    "content": ""
  },
  {
    "path": "tools/testdata/check_format/attribute_packed.cc",
    "content": "namespace Envoy {\n\ntypedef struct {\n  int a;\n  int b;\n}  __attribute__((packed)) s;\n} // namespace Envoy"
  },
  {
    "path": "tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD",
    "content": "load(\n    \"@envoy//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_binary\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_binary(\n    name = \"envoy-static\",\n    stamped = True,\n)\n"
  },
  {
    "path": "tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold",
    "content": "load(\n    \"//bazel:envoy_build_system.bzl\",\n    \"envoy_cc_binary\",\n    \"envoy_package\",\n)\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_binary(\n    name = \"envoy-static\",\n    stamped = True,\n)\n"
  },
  {
    "path": "tools/testdata/check_format/bazel_tools.BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nenvoy_cc_binary(\n    name = \"envoy-static\",\n    stamped = True,\n    deps = [\"@bazel_tools//some:thing\"],\n)\n"
  },
  {
    "path": "tools/testdata/check_format/canonical_api_deps.BUILD",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_cc_library\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\n# Deps can be inferred, irrelevant deps are removed.\nenvoy_cc_library(\n    name = \"foo\",\n    srcs = [\"canonical_api_deps.cc\", \"canonical_api_deps.other.cc\"],\n    hdrs = [\"canonical_api_deps.h\"],\n    deps = [\"@envoy_api//envoy/types:pkg_cc_proto\"],\n)\n"
  },
  {
    "path": "tools/testdata/check_format/canonical_api_deps.BUILD.gold",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_cc_library\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\n# Deps can be inferred, irrelevant deps are removed.\nenvoy_cc_library(\n    name = \"foo\",\n    srcs = [\n        \"canonical_api_deps.cc\",\n        \"canonical_api_deps.other.cc\",\n    ],\n    hdrs = [\"canonical_api_deps.h\"],\n    deps = [\n        \"@envoy_api//envoy/api/v2/core:pkg_cc_proto\",\n        \"@envoy_api//envoy/api/v2/route:pkg_cc_proto\",\n        \"@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto\",\n    ],\n)\n"
  },
  {
    "path": "tools/testdata/check_format/canonical_api_deps.cc",
    "content": "#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n#include \"envoy/api/v2/core/base.pb.h\"\n"
  },
  {
    "path": "tools/testdata/check_format/canonical_api_deps.h",
    "content": "#include \"envoy/api/v2/route/route.pb.h\"\n"
  },
  {
    "path": "tools/testdata/check_format/canonical_api_deps.other.cc",
    "content": "include \"envoy/api/v2/listener/listener.validate.pb.h\"\n"
  },
  {
    "path": "tools/testdata/check_format/canonical_spacing.BUILD",
    "content": "\nlicenses([\"notice\"])  # Apache 2\n\n\n\n\n\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\n\nenvoy_package()\n\n\nenvoy_cc_binary(\n    name = \"foo\",\n)\n\n"
  },
  {
    "path": "tools/testdata/check_format/canonical_spacing.BUILD.gold",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_binary(\n    name = \"foo\",\n)\n"
  },
  {
    "path": "tools/testdata/check_format/clang_format_double_off.cc",
    "content": "namespace Envoy {\n\n// clang-format off\n// Turning clang format off should not be nested\n// clang-format off\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/clang_format_double_on.cc",
    "content": "namespace Envoy {\n\n// Turning clang format on when it already is enabled is not allowed\n// clang-format on\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/clang_format_off.cc",
    "content": "namespace Envoy {\n\n// clang-format off\n// Deliberate trailing spaces after periods in formatted content/comments\n// need to be retained, follow clang-format handling for Envoy-specific\n// rule sets.\n//  ~  ~\n//   .  .\n//     )\n//   ----\n// clang-format on\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/clang_format_on.cc",
    "content": "namespace Envoy {\n\n// clang-format off\n// Some ignored content.\n// clang-format on\n// Over enthusiastic spaces should be fixed after clang-format is turned on\n// Too many spaces.  Here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/clang_format_on.cc.gold",
    "content": "namespace Envoy {\n\n// clang-format off\n// Some ignored content.\n// clang-format on\n// Over enthusiastic spaces should be fixed after clang-format is turned on\n// Too many spaces. Here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/clang_format_trailing_off.cc",
    "content": "namespace Envoy {\n\n// clang format should be turned back on before the end of the file\n// clang-format off\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/commented_throw.cc",
    "content": "namespace Envoy {\n\nvoid foo() {\n  // throw std::runtime_error(\"error\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/condition_variable.cc",
    "content": "#include <condition_variable>\n\nnamespace Envoy {\n\n// Awesome stuff goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/condition_variable_any.cc",
    "content": "#include <condition_variable_any>\n\nnamespace Envoy {\n\n// Awesome stuff goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/condvar_wait_for.cc",
    "content": "namespace Envoy {\n\n// Directly calling waitFor on a condvar no good; need to inject TimeSystem.\nint waiting() {\n  return condvar.waitFor(mutex, duration);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/counter_from_string.cc",
    "content": "namespace Envoy {\n\nvoid init(Stats::Scope& scope) {\n  scope.counterFromString(\"hello\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/cpp_std.cc",
    "content": "#include <memory>\n\n#include \"absl/memory/memory.h\"\n\nnamespace Envoy {\n\nstd::unique_ptr<int> to_be_fix = absl::make_unique<int>(0);\n\n// Awesome stuff goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/cpp_std.cc.gold",
    "content": "#include <memory>\n\n#include \"absl/memory/memory.h\"\n\nnamespace Envoy {\n\nstd::unique_ptr<int> to_be_fix = std::make_unique<int>(0);\n\n// Awesome stuff goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/designated_initializers.cc",
    "content": "namespace Envoy {\n\ntypedef struct {\n  int a;\n  int b;\n} s;\n\ns my_struct = {.a = 1, .b = 2};\n\n} // namespace Envoy"
  },
  {
    "path": "tools/testdata/check_format/duration_value.cc",
    "content": "#include <chrono>\n\nnamespace Envoy {\n\nstd::chrono::duration<long int, std::nano> foo() {\n  return std::chrono::steady_clock::duration(12345);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/duration_value_zero.cc",
    "content": "#include <chrono>\n\nnamespace Envoy {\n\nstd::chrono::duration<long int, std::nano> foo_int() {\n  return std::chrono::steady_clock::duration(0);\n}\n\nstd::chrono::duration<long int, std::nano> foo_decimal() {\n  return std::chrono::steady_clock::duration(0.0);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/elvis_operator.cc",
    "content": "namespace Envoy {\n\nint val = 0 ?: 1;\n\n} // namespace Envoy"
  },
  {
    "path": "tools/testdata/check_format/extra_enthusiastic_spaces.cc",
    "content": "namespace Envoy {\n\n// Three spaces.   Need to fix them all in one shot if we want \"fix\" to result\n// in a file that \"check\"s.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/extra_enthusiastic_spaces.cc.gold",
    "content": "namespace Envoy {\n\n// Three spaces. Need to fix them all in one shot if we want \"fix\" to result\n// in a file that \"check\"s.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/gauge_from_string.cc",
    "content": "namespace Envoy {\n\nvoid init(Stats::Scope& scope) {\n  scope.gaugeFromString(\"hello\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/grpc_init.cc",
    "content": "namespace Envoy {\n\nvoid foo() {\n  grpc_init();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/grpc_shutdown.cc",
    "content": "namespace Envoy {\n\nvoid foo() {\n  grpc_shutdown();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/header_order.cc",
    "content": "#include \"absl/types/optional.h\"\n#include \"common/api/api_impl.h\"\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/version/version.h\"\n#include \"common/config/resources.h\"\n#include \"common/config/utility.h\"\n#include \"common/local_info/local_info_impl.h\"\n#include \"common/memory/stats.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/stats/thread_local_store.h\"\n#include \"common/upstream/cluster_manager_impl.h\"\n#include <cstdint>\n#include \"envoy/admin/v2alpha/config_dump.pb.h\"\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n#include \"envoy/config/bootstrap/v2//bootstrap.pb.validate.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/signal.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/server/options.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n#include <functional>\n#include \"server/configuration_impl.h\"\n#include \"server/connection_handler_impl.h\"\n#include \"server/guarddog_impl.h\"\n#include \"server/listener_hooks.h\"\n#include <signal.h>\n#include <string>\n#include <unordered_set>\n\nnamespace Envoy {\n\n// Something awesome goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/header_order.cc.gold",
    "content": "#include <signal.h>\n\n#include <cstdint>\n#include <functional>\n#include <string>\n#include <unordered_set>\n\n#include \"envoy/admin/v2alpha/config_dump.pb.h\"\n#include \"envoy/config/bootstrap/v2//bootstrap.pb.validate.h\"\n#include \"envoy/config/bootstrap/v2/bootstrap.pb.h\"\n#include \"envoy/event/dispatcher.h\"\n#include \"envoy/event/signal.h\"\n#include \"envoy/event/timer.h\"\n#include \"envoy/network/dns.h\"\n#include \"envoy/server/options.h\"\n#include \"envoy/upstream/cluster_manager.h\"\n\n#include \"common/api/api_impl.h\"\n#include \"common/api/os_sys_calls_impl.h\"\n#include \"common/common/utility.h\"\n#include \"common/config/resources.h\"\n#include \"common/config/utility.h\"\n#include \"common/local_info/local_info_impl.h\"\n#include \"common/memory/stats.h\"\n#include \"common/network/address_impl.h\"\n#include \"common/protobuf/utility.h\"\n#include \"common/router/rds_impl.h\"\n#include \"common/runtime/runtime_impl.h\"\n#include \"common/singleton/manager_impl.h\"\n#include \"common/stats/thread_local_store.h\"\n#include \"common/upstream/cluster_manager_impl.h\"\n#include \"common/version/version.h\"\n\n#include \"server/configuration_impl.h\"\n#include \"server/connection_handler_impl.h\"\n#include \"server/guarddog_impl.h\"\n#include \"server/listener_hooks.h\"\n\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\n\n// Something awesome goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/histogram_from_string.cc",
    "content": "namespace Envoy {\n\nvoid init(Stats::Scope& scope) {\n  scope.histogramFromString(\"hello\", Stats::Histogram::Unit::Unspecified);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/license.BUILD",
    "content": ""
  },
  {
    "path": "tools/testdata/check_format/license.BUILD.gold",
    "content": "licenses([\"notice\"])  # Apache 2\n"
  },
  {
    "path": "tools/testdata/check_format/long_line.cc",
    "content": "namespace Envoy {\n\nHereIsAVeryLongTypeItsReallyPrettyAbsurdButSomeTimesWeNeedToBeCreative& andIfWeMakeTypeLongWeShouldMakeTheFunctionLongTooDontYouThink(int a, int b);\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/long_line.cc.gold",
    "content": "namespace Envoy {\n\nHereIsAVeryLongTypeItsReallyPrettyAbsurdButSomeTimesWeNeedToBeCreative &\nandIfWeMakeTypeLongWeShouldMakeTheFunctionLongTooDontYouThink(int a, int b);\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/mutex.cc",
    "content": "#include <mutex>\n\nnamespace Envoy {\n\n// Awesome stuff goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/no_namespace_envoy.cc",
    "content": "// Lacks the proper Envoy namespace.\n"
  },
  {
    "path": "tools/testdata/check_format/over_enthusiastic_spaces.cc",
    "content": "namespace Envoy {\n\n// Two spaces.  No, one space is the style in Envoy.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/over_enthusiastic_spaces.cc.gold",
    "content": "namespace Envoy {\n\n// Two spaces. No, one space is the style in Envoy.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/pgv_string.proto",
    "content": "// this proto file is used to check proto validation ERROR min_bytes\n"
  },
  {
    "path": "tools/testdata/check_format/proto.BUILD",
    "content": "licenses([\"notice\"])  # Apache 2\n\nenvoy_cc_binary(\n    name = \"envoy-static\",\n    stamped = True,\n    deps = [\"protobuf\"],\n)\n"
  },
  {
    "path": "tools/testdata/check_format/proto.BUILD.gold",
    "content": "licenses([\"notice\"])  # Apache 2\n\nenvoy_cc_binary(\n    name = \"envoy-static\",\n    stamped = True,\n    deps = [\"protobuf\"],\n)\n"
  },
  {
    "path": "tools/testdata/check_format/proto_deps.cc",
    "content": "#include \"google/protobuf.h\"\n\nnamespace Envoy {\n\n// Something awesome goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/proto_deps.cc.gold",
    "content": "#include \"google/protobuf.h\"\n\nnamespace Envoy {\n\n// Something awesome goes here.\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/proto_enum_mangling.cc",
    "content": "#include \"foo.h\"\n\nnamespace Envoy {\n\nvoid foo() { auto bar = ::envoy::foo::bar::v2::RedisProxy_ConnPoolSettings_ReadPolicy_ANY; }\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/proto_format.proto",
    "content": "// This comment is too long for the line-limit built into our clang configuration, so it will need to be wrapped.\n"
  },
  {
    "path": "tools/testdata/check_format/proto_format.proto.gold",
    "content": "// This comment is too long for the line-limit built into our clang\n// configuration, so it will need to be wrapped.\n"
  },
  {
    "path": "tools/testdata/check_format/proto_style.cc",
    "content": "namespace Envoy {\n\nProtobuf::Any any;\nProtobuf::Empty empty;\nProtobuf::ListValue list_value;\nProtobuf::Value value = Protobuf::NULL_VALUE;\nProtobuf::StringValue stringvalue;\nProtobuf::Struct struct;\nProtobuf::Value value;\nProtobuf::MapPair mappair;\nProtobufWkt::Map map;\nProtobufWkt::MapPair mappair;\nProtobufUtil::MessageDifferencer messagedifferencer;\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/proto_style.cc.gold",
    "content": "namespace Envoy {\n\nProtobufWkt::Any any;\nProtobufWkt::Empty empty;\nProtobufWkt::ListValue list_value;\nProtobufWkt::Value value = ProtobufWkt::NULL_VALUE;\nProtobufWkt::StringValue stringvalue;\nProtobufWkt::Struct struct;\nProtobufWkt::Value value;\nProtobuf::MapPair mappair;\nProtobuf::Map map;\nProtobuf::MapPair mappair;\nProtobuf::util::MessageDifferencer messagedifferencer;\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/real_time_source.cc",
    "content": "namespace Envoy {\n\nint foo() {\n  RealTimeSource real_time_source;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/real_time_source_override.cc",
    "content": "namespace Envoy {\n\nint foo() {\n  RealTimeSource real_time_source; // NO_CHECK_FORMAT(real_time)\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/real_time_system.cc",
    "content": "namespace Envoy {\n\nint foo() {\n  RealTimeSystem real_time_system;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/regex.cc",
    "content": "#include <regex>\n\nnamespace Envoy {\n\nstruct BadRegex {\n  std::regex bad_;\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/remove_unused_loads.BUILD",
    "content": "load(\"//foo.bzl\", \"bar\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_package\", \"envoy_cc_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_binary(\n    name = \"foo\",\n)\n"
  },
  {
    "path": "tools/testdata/check_format/remove_unused_loads.BUILD.gold",
    "content": "load(\"//bazel:envoy_build_system.bzl\", \"envoy_package\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_cc_binary(\n    name = \"foo\",\n)\n"
  },
  {
    "path": "tools/testdata/check_format/serialize_as_string.cc",
    "content": "namespace Envoy {\n\nvoid use_serialize_as_string() {\n  google::protobuf::FieldMask mask;\n  const std::string key = mask.SerializeAsString();\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/shared_mutex.cc",
    "content": "#include <shared_mutex>\n\nnamespace Envoy {\n\nvoid make_a_mutex() {\n  std::shared_timed_mutex mutex;\n}\n\n} // namespace Envoy\n\n"
  },
  {
    "path": "tools/testdata/check_format/skip_envoy_package.BUILD",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_binary(\n    name = \"foo\",\n)\n"
  },
  {
    "path": "tools/testdata/check_format/skip_envoy_package.BUILD.gold",
    "content": "load(\"@rules_cc//cc:defs.bzl\", \"cc_binary\")\n\nlicenses([\"notice\"])  # Apache 2\n\ncc_binary(\n    name = \"foo\",\n)\n"
  },
  {
    "path": "tools/testdata/check_format/sleep.cc",
    "content": "namespace Envoy {\n\n// Directly calling sleep_for is no good; must inject time system.\nint waiting() {\n  return std::this_thread::sleep_for(mutex, duration);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_any.cc",
    "content": "#include <any>\n\nnamespace Envoy {\n    void bar() {\n        std::any foo;\n    }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_atomic_free_functions.cc",
    "content": "#include <atomic>\n\nnamespace Envoy {\n\nvoid do_atomic_stuff() {\n  std::atomic<bool> atomic_flag(false);\n  std::atomic_store(&atomic_flag, true);\n}\n\n} // namespace Envoy\n\n"
  },
  {
    "path": "tools/testdata/check_format/std_get_if.cc",
    "content": "#include <variant>\n\nnamespace Envoy {\n  void foo() {\n    absl::variant<int, float> x{12};\n    auto y = std::get_if<int>(&x);\n  }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_get_time.cc",
    "content": "#include <stdio>\n#include <iomanip>\n\nnamespace Envoy {\n\nvoid parse_time() {\n  std::tm t = {};\n  std::istringstream ss(\"2018-December-17 14:38:00\");\n  ss >> std::get_time(&t, \"%Y-%b-%d %H:%M:%S\");\n  if (ss.fail()) {\n    std::cout << \"Parse failed\\n\";\n  } else {\n    std::cout << std::put_time(&t, \"%c\") << '\\n';\n  }\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_holds_alternative.cc",
    "content": "#include <variant>\n\nnamespace Envoy {\n  void foo() {\n    absl::variant<int, double> x{12};\n    auto y = std::holds_alternative<double>(x);\n  }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_make_optional.cc",
    "content": "#include <optional>\n\nnamespace Envoy {\n    void foo() {\n      uint64_t value = 1;\n      uint64_t optional_value = std::make_optional<uint64_t>(value);\n    }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_monostate.cc",
    "content": "#include <variant>\n\nnamespace Envoy {\n  struct S {\n    S(int i) : i(i) {}\n    int i;\n  };\n\n  void foo() {\n    absl::variant<std::monostate, S> x;\n  }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_optional.cc",
    "content": "#include <optional>\n\nnamespace Envoy {\n    void bar() {\n        std::optional<int> foo;\n    }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_string_view.cc",
    "content": "#include <string>\n\nnamespace Envoy {\n  void foo() {\n    std::string_view x(\"a string literal\");\n  }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_unordered_map.cc",
    "content": "#include <unordered_map>\n\nnamespace Envoy {\n\nstd::unordered_map<int, int> foo;\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_unordered_set.cc",
    "content": "#include <unordered_set>\n\nnamespace Envoy {\n\nstd::unordered_set<int, int> foo;\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_variant.cc",
    "content": "#include <variant>\n\nnamespace Envoy {\n    void bar() {\n        std::variant<int, float> foo;\n    }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/std_visit.cc",
    "content": "#include <variant>\n\nnamespace Envoy {\n  struct SomeVisitorFunctor {\n    template<typename T>\n    void operator()(const T& i) const {}\n  };\n\n  void foo() {\n    absl::variant<int, double> x{12};\n    SomeVisitorFunctor visitor;\n    std::visit(visitor, x);\n  }\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/steady_clock.cc",
    "content": "namespace Envoy {\n\nint foo() { return std::chrono::steady_clock::now(); }\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/strerror.cc",
    "content": "#include <string.h>\n\nnamespace Envoy {\n\nchar* get_error_illegal(int err) { return strerror(err); }\nchar* get_error_legal1(int err) { return some_other_strerror(err); }\nchar* get_error_legal2(int err) { return strerror2(err); }\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/system_clock.cc",
    "content": "namespace Envoy {\n\nint foo() { return std::chrono::system_clock::now(); }\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/test/register_factory.cc",
    "content": "namespace Envoy {\n\nstatic Registry::RegisterFactory<Factory, FactoryType> registration;\n\n} // namespace\n"
  },
  {
    "path": "tools/testdata/check_format/test_naming.cc",
    "content": "namespace Envoy {\n\nTEST_F(FooBar, testSomething) {\n  \n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/testing_test.cc",
    "content": "namespace Envoy {\n\nusing testing::Test;\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/throw.cc",
    "content": "namespace Envoy {\n\nvoid foo() {\n  throw std::runtime_error(\"error\");\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/time_system_wait_for.cc",
    "content": "namespace Envoy {\n\nint waiting() { return timeSystem().waitFor(mutex, condvar, duration); }\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/unpack_to.cc",
    "content": "namespace Envoy {\n\nint foo() {\n  ProtobufWky::Any bar;\n  Protobuf::Message baz;\n  bar.UnpackTo(baz);\n}\n\n} // namespace Envoy\n"
  },
  {
    "path": "tools/testdata/check_format/update_license.BUILD",
    "content": "load(\"//some:thing.bzl\", \"foo\")\n\nlicenses([\"whatevs\"])\n\nfoo()\n"
  },
  {
    "path": "tools/testdata/check_format/update_license.BUILD.gold",
    "content": "load(\"//some:thing.bzl\", \"foo\")\n\nlicenses([\"notice\"])  # Apache 2\n\nfoo()\n"
  },
  {
    "path": "tools/testdata/check_format/version_history/current.rst",
    "content": "1.10.0 (pending)\n================\n\nSection One\n-----------------------------\n*Some doc text*\n\n* zzzzz: this should be alphabatized after a.\n* aaaaa: this should be alphabatized before z.\n* aaaaa: aaaa is before 'this'.\n* access log: Added should be added not Added.\n\nAnother Section\n---------------\n*Doc string here*\n\n* server: changed server code.\n* upstream: made a change.\n\nDeprecated\n----------\n\n* no\n* enforcement\n* here\n"
  },
  {
    "path": "tools/testdata/protoxform/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"fix_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//tools/testdata/protoxform/envoy/v2:fix_protos\",\n    ],\n)\n\nproto_library(\n    name = \"freeze_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//tools/testdata/protoxform/envoy/active_non_terminal/v2:freeze_protos\",\n        \"//tools/testdata/protoxform/envoy/active_terminal/v2:freeze_protos\",\n        \"//tools/testdata/protoxform/envoy/frozen/v2:freeze_protos\",\n        \"//tools/testdata/protoxform/envoy/frozen/v3:freeze_protos\",\n    ],\n)\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"freeze_protos\",\n    srcs = [\"active_non_terminal.proto\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.active_non_terminal.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage ActiveNonTerminal {\n  int32 foo = 1 [deprecated = true];\n  int32 bar = 2;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.active_non_terminal.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.active_non_terminal.v2\";\noption java_outer_classname = \"ActiveNonTerminalProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\nmessage ActiveNonTerminal {\n  int32 foo = 1 [deprecated = true];\n\n  int32 bar = 2;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.active_non_terminal.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.active_non_terminal.v3\";\noption java_outer_classname = \"ActiveNonTerminalProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage ActiveNonTerminal {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.active_non_terminal.v2.ActiveNonTerminal\";\n\n  int32 hidden_envoy_deprecated_foo = 1 [deprecated = true];\n\n  int32 bar = 2;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.active_non_terminal.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.active_non_terminal.v3\";\noption java_outer_classname = \"ActiveNonTerminalProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage ActiveNonTerminal {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.active_non_terminal.v2.ActiveNonTerminal\";\n\n  reserved 1;\n\n  reserved \"foo\";\n\n  int32 bar = 2;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_terminal/v2/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"freeze_protos\",\n    srcs = [\"active_terminal.proto\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.active_terminal.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage ActiveTerminal {\n  int32 foo = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.active_terminal.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.active_terminal.v2\";\noption java_outer_classname = \"ActiveTerminalProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage ActiveTerminal {\n  int32 foo = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold",
    "content": ""
  },
  {
    "path": "tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold",
    "content": ""
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v2/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"freeze_protos\",\n    srcs = [\"frozen.proto\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v2/frozen.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.frozen.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\nmessage Frozen {\n  int32 foo = 1;\n  int32 bar = 2 [deprecated = true];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.frozen.v2;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.frozen.v2\";\noption java_outer_classname = \"FrozenProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = FROZEN;\n\nmessage Frozen {\n  int32 foo = 1;\n\n  int32 bar = 2 [deprecated = true];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.frozen.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.frozen.v3\";\noption java_outer_classname = \"FrozenProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nmessage Frozen {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.frozen.v2.Frozen\";\n\n  int32 foo = 1;\n\n  int32 hidden_envoy_deprecated_bar = 2 [deprecated = true];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.frozen.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.frozen.v3\";\noption java_outer_classname = \"FrozenProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nmessage Frozen {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.frozen.v2.Frozen\";\n\n  reserved 2;\n\n  reserved \"bar\";\n\n  int32 foo = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v3/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"freeze_protos\",\n    srcs = [\"frozen.proto\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\"@com_github_cncf_udpa//udpa/annotations:pkg\"],\n)\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v3/frozen.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.frozen.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage Frozen {\n  int32 foo = 1;\n  reserved 2;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.frozen.v3;\n\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.frozen.v3\";\noption java_outer_classname = \"FrozenProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage Frozen {\n  reserved 2;\n\n  int32 foo = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold",
    "content": ""
  },
  {
    "path": "tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold",
    "content": ""
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"fix_protos\",\n    srcs = [\n        \"discovery_service.proto\",\n        \"fully_qualified_names.proto\",\n        \"oneof.proto\",\n        \"package_move.proto\",\n        \"sample.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"//tools/testdata/protoxform/external:external_protos\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@envoy_api//envoy/annotations:pkg\",\n        \"@envoy_api//envoy/api/v2:pkg\",\n    ],\n)\n\nproto_library(\n    name = \"freeze_protos\",\n    srcs = [\n        \"active_non_terminal.proto\",\n        \"active_terminal.proto\",\n        \"frozen.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"@com_github_cncf_udpa//udpa/annotations:pkg\",\n        \"@envoy_api//envoy/annotations:pkg\",\n        \"@envoy_api//envoy/api/v2:pkg\",\n    ],\n)\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/discovery_service.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nservice SomeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.v2.SomeResource\";\n\n  rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:some\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\nmessage SomeResource {\n  string foo = 1 [(udpa.annotations.field_migrate).rename = \"bar\"];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/discovery_service.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v2\";\noption java_outer_classname = \"DiscoveryServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nservice SomeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.v2.SomeResource\";\n\n  rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v2/discovery:some\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\nmessage SomeResource {\n  string foo = 1 [(udpa.annotations.field_migrate).rename = \"bar\"];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v3;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v3\";\noption java_outer_classname = \"DiscoveryServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nservice SomeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.v3.SomeResource\";\n\n  rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:some\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\nmessage SomeResource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.SomeResource\";\n\n  string bar = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v3;\n\nimport \"envoy/api/v2/discovery.proto\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"envoy/annotations/resource.proto\";\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v3\";\noption java_outer_classname = \"DiscoveryServiceProto\";\noption java_multiple_files = true;\noption java_generic_services = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nservice SomeDiscoveryService {\n  option (envoy.annotations.resource).type = \"envoy.v3.SomeResource\";\n\n  rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {\n  }\n\n  rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest)\n      returns (stream api.v2.DeltaDiscoveryResponse) {\n  }\n\n  rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {\n    option (google.api.http).post = \"/v3/discovery:some\";\n    option (google.api.http).body = \"*\";\n  }\n}\n\nmessage SomeResource {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.SomeResource\";\n\n  string bar = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"tools/testdata/protoxform/external/root_type.proto\";\nimport \"tools/testdata/protoxform/external/package_type.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.external.v3\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// Verifies normalization of fully-qualified type names.\nmessage UsesFullyQualifiedTypeNames {\n\n  envoy.api.v2.core.Locality another_envoy_type = 1;\n  .envoy.api.v2.core.Locality another_envoy_type_fqn = 2;\n\n  google.protobuf.Any google_protobuf_any = 3;\n  .google.protobuf.Any google_protobuf_any_fqn = 4;\n\n  external.PackageLevelType external_package_level_type = 5;\n  .external.PackageLevelType external_package_level_type_fqn = 6;\n\n  .RootLevelType external_root_level_type_fqn = 7;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"tools/testdata/protoxform/external/package_type.proto\";\nimport \"tools/testdata/protoxform/external/root_type.proto\";\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v2\";\noption java_outer_classname = \"FullyQualifiedNamesProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.external.v3\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\n// Verifies normalization of fully-qualified type names.\n// [#next-free-field: 8]\nmessage UsesFullyQualifiedTypeNames {\n  api.v2.core.Locality another_envoy_type = 1;\n\n  api.v2.core.Locality another_envoy_type_fqn = 2;\n\n  google.protobuf.Any google_protobuf_any = 3;\n\n  google.protobuf.Any google_protobuf_any_fqn = 4;\n\n  external.PackageLevelType external_package_level_type = 5;\n\n  external.PackageLevelType external_package_level_type_fqn = 6;\n\n  .RootLevelType external_root_level_type_fqn = 7;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.external.v3;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"tools/testdata/protoxform/external/package_type.proto\";\nimport \"tools/testdata/protoxform/external/root_type.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.external.v3\";\noption java_outer_classname = \"FullyQualifiedNamesProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// Verifies normalization of fully-qualified type names.\n// [#next-free-field: 8]\nmessage UsesFullyQualifiedTypeNames {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.v2.UsesFullyQualifiedTypeNames\";\n\n  api.v2.core.Locality another_envoy_type = 1;\n\n  api.v2.core.Locality another_envoy_type_fqn = 2;\n\n  google.protobuf.Any google_protobuf_any = 3;\n\n  google.protobuf.Any google_protobuf_any_fqn = 4;\n\n  .external.PackageLevelType external_package_level_type = 5;\n\n  .external.PackageLevelType external_package_level_type_fqn = 6;\n\n  .RootLevelType external_root_level_type_fqn = 7;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.external.v3;\n\nimport \"envoy/api/v2/core/base.proto\";\n\nimport \"google/protobuf/any.proto\";\n\nimport \"tools/testdata/protoxform/external/package_type.proto\";\nimport \"tools/testdata/protoxform/external/root_type.proto\";\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.external.v3\";\noption java_outer_classname = \"FullyQualifiedNamesProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\n// Verifies normalization of fully-qualified type names.\n// [#next-free-field: 8]\nmessage UsesFullyQualifiedTypeNames {\n  option (udpa.annotations.versioning).previous_message_type =\n      \"envoy.v2.UsesFullyQualifiedTypeNames\";\n\n  api.v2.core.Locality another_envoy_type = 1;\n\n  api.v2.core.Locality another_envoy_type_fqn = 2;\n\n  google.protobuf.Any google_protobuf_any = 3;\n\n  google.protobuf.Any google_protobuf_any_fqn = 4;\n\n  .external.PackageLevelType external_package_level_type = 5;\n\n  .external.PackageLevelType external_package_level_type_fqn = 6;\n\n  .RootLevelType external_root_level_type_fqn = 7;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/oneof.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage OneofExample {\n  string foo = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"baz_specifier\"];\n\n  oneof bar_specifier {\n    string bar = 2;\n  }\n\n  string blah = 3 [(udpa.annotations.field_migrate).oneof_promotion = \"bar_specifier\"];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/oneof.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v2\";\noption java_outer_classname = \"OneofProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage OneofExample {\n  string foo = 1 [(udpa.annotations.field_migrate).oneof_promotion = \"baz_specifier\"];\n\n  oneof bar_specifier {\n    string bar = 2;\n  }\n\n  string blah = 3 [(udpa.annotations.field_migrate).oneof_promotion = \"bar_specifier\"];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v3\";\noption java_outer_classname = \"OneofProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nmessage OneofExample {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.OneofExample\";\n\n  oneof baz_specifier {\n    string foo = 1;\n  }\n\n  oneof bar_specifier {\n    string bar = 2;\n\n    string blah = 3;\n  }\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v3\";\noption java_outer_classname = \"OneofProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nmessage OneofExample {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.OneofExample\";\n\n  oneof baz_specifier {\n    string foo = 1;\n  }\n\n  oneof bar_specifier {\n    string bar = 2;\n\n    string blah = 3;\n  }\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/package_move.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.foo.v3\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage Package {\n  message Entry {\n    string key = 1;\n    string value = 2;\n  }\n  repeated Entry entries = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/package_move.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v2\";\noption java_outer_classname = \"PackageMoveProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_migrate).move_to_package = \"envoy.foo.v3\";\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nmessage Package {\n  message Entry {\n    string key = 1;\n\n    string value = 2;\n  }\n\n  repeated Entry entries = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.foo.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.foo.v3\";\noption java_outer_classname = \"PackageMoveProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nmessage Package {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Package\";\n\n  message Entry {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Package.Entry\";\n\n    string key = 1;\n\n    string value = 2;\n  }\n\n  repeated Entry entries = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.foo.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.foo.v3\";\noption java_outer_classname = \"PackageMoveProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nmessage Package {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Package\";\n\n  message Entry {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Package.Entry\";\n\n    string key = 1;\n\n    string value = 2;\n  }\n\n  repeated Entry entries = 1;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/sample.proto",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nenum SomeEnum {\n  DEFAULT = 0 [deprecated = true];\n  FOO = 1;\n  BAR = 2 [deprecated = true];\n  BAZ = 3 [(udpa.annotations.enum_value_migrate).rename = \"WOW\"];\n}\n\nmessage Sample {\n  message Entry {\n    string key = 1;\n    string value = 2;\n  }\n  enum DeprecateEnum {\n    option deprecated = true;\n    FIRST = 0;\n    SECOND = 1;\n  }\n  repeated Entry entries = 1;\n  string will_deprecated = 2 [deprecated = true];\n  string will_rename_compoent = 3 [(udpa.annotations.field_migrate).rename = \"renamed_component\"];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v2;\n\nimport \"udpa/annotations/migrate.proto\";\nimport \"udpa/annotations/status.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v2\";\noption java_outer_classname = \"SampleProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = ACTIVE;\n\nenum SomeEnum {\n  DEFAULT = 0 [deprecated = true];\n  FOO = 1;\n  BAR = 2 [deprecated = true];\n  BAZ = 3 [(udpa.annotations.enum_value_migrate).rename = \"WOW\"];\n}\n\nmessage Sample {\n  enum DeprecateEnum {\n    option deprecated = true;\n\n    FIRST = 0;\n    SECOND = 1;\n  }\n\n  message Entry {\n    string key = 1;\n\n    string value = 2;\n  }\n\n  repeated Entry entries = 1;\n\n  string will_deprecated = 2 [deprecated = true];\n\n  string will_rename_compoent = 3 [(udpa.annotations.field_migrate).rename = \"renamed_component\"];\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v3\";\noption java_outer_classname = \"SampleProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nenum SomeEnum {\n  hidden_envoy_deprecated_DEFAULT = 0 [deprecated = true];\n  FOO = 1;\n  hidden_envoy_deprecated_BAR = 2 [deprecated = true];\n  WOW = 3;\n}\n\nmessage Sample {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Sample\";\n\n  enum DeprecateEnum {\n    option deprecated = true;\n\n    FIRST = 0;\n    SECOND = 1;\n  }\n\n  message Entry {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Sample.Entry\";\n\n    string key = 1;\n\n    string value = 2;\n  }\n\n  repeated Entry entries = 1;\n\n  string hidden_envoy_deprecated_will_deprecated = 2 [deprecated = true];\n\n  string renamed_component = 3;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold",
    "content": "syntax = \"proto3\";\n\npackage envoy.v3;\n\nimport \"udpa/annotations/status.proto\";\nimport \"udpa/annotations/versioning.proto\";\n\noption java_package = \"io.envoyproxy.envoy.v3\";\noption java_outer_classname = \"SampleProto\";\noption java_multiple_files = true;\noption (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;\n\nenum SomeEnum {\n  reserved 2;\n\n  reserved \"BAR\";\n\n  DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [deprecated = true];\n  FOO = 1;\n  WOW = 3;\n}\n\nmessage Sample {\n  option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Sample\";\n\n  message Entry {\n    option (udpa.annotations.versioning).previous_message_type = \"envoy.v2.Sample.Entry\";\n\n    string key = 1;\n\n    string value = 2;\n  }\n\n  reserved 2;\n\n  reserved \"will_deprecated\";\n\n  repeated Entry entries = 1;\n\n  string renamed_component = 3;\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/external/BUILD",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"external_protos\",\n    srcs = [\n        \"package_type.proto\",\n        \"root_type.proto\",\n    ],\n    visibility = [\"//visibility:public\"],\n    deps = [],\n)\n"
  },
  {
    "path": "tools/testdata/protoxform/external/package_type.proto",
    "content": "syntax = \"proto3\";\n\npackage external;\n\n// Type that belongs to a non-envoy package.\n//\n// Part of a test suite that verifies normalization of\n// fully-qualified type names.\nmessage PackageLevelType {\n}\n"
  },
  {
    "path": "tools/testdata/protoxform/external/root_type.proto",
    "content": "syntax = \"proto3\";\n\n// Type that doesn't belong to any package.\n//\n// Part of a test suite that verifies normalization of\n// fully-qualified type names.\nmessage RootLevelType {\n}\n"
  },
  {
    "path": "tools/testdata/spelling/exclusions",
    "content": "// Lots of things are excluded:\n\n// Github user names:\n// TODO(speeling): is ok\n// TODO(@speeling): is ok\n// TODO(speeling) is ok\n// TODO(@speeling) is ok\n// NOTE(speeling): is ok\n// NOTE(@speeling): is ok\n// NOTE(speeling) is ok\n// NOTE(@speeling) is ok\n// Even just bare @speeling is ok\n\n// Documentation comments:\n// @param speeling is ignored here\n// @return speeling is ignored here\n\n// Base64:\n// ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789/+=\n// ABCDEFGHIJKLMN12\n\n// Hex:\n// ACEFBCDA   acefbcda\n// (ACEFBCDA) (acefbcda)\n// AB:CD:EF   ab:cd:ef\n// 0xABCD     0xabcd\n// 0xD        0xd\n// ABCDEFAB-ABCD-ABCD-ABCD-ABCDEFABCDEF\n// abcdefab-abcd-abcd-abcd-abcdefabcdef\n\n// Bit fields:\n// 00xxxxxx 00XXXXX\n// 11yyyyyy 11YYYYY\n// AABBAABB\n\n// IPv6:\n// FC00::/8\n// 2001::abcd/64\n\n// Quoted words:\n// \"speeling\"          'speeling'          *speeling*\n// \"speeling-is-fun-1\" 'speeling-is-fun-1' *speeling-is-fun-1*\n// \"speeling:is:fun:1\" 'speeling:is:fun:1' *speeling:is:fun:1*\n// \"speeling.is.fun.1\" 'speeling.is.fun.1' *speeling.is.fun.1*\n\n// Quoted expressions:\n// `std::speeling`\n// `namespace::speeling<T>`\n\n// Tuple expressions\n// (speeling, phun)\n// (speeling,phun)\n\n// Flags:\n// %speeling\n// -speeling\n\n// RST links:\n// `speeling <https://speeling.com>`_\n// :ref:`speeling <speeling>`\n\n// RST literals:\n// ``speeling``\n\n// Paths:\n// /usr/local/speeling\n// /dev/speeling/*\n// envoy/speeling.h\n// envoy/speeling.cc\n// envoy/speeling.py\n// envoy/speeling.sh\n\n// Ordinals:\n// 1st, 2nd, 3rd, 4th, 10th, 11th, 1525th.\n\n// Affixes:\n// un-comment\n// uncomment\n// verb-ing\n// verbing\n\n"
  },
  {
    "path": "tools/testdata/spelling/on_off",
    "content": "// Bad speelinga.\n\n// SPELLCHECKER(off)\n// Ignoring bad speeling.\n\n// SPELLCHECKER(on)\n// Bad speelingb.\n\n// SPELLCHECKER(off)\n// Ignoring bad speeling to EOF.\n"
  },
  {
    "path": "tools/testdata/spelling/rst_code_block",
    "content": "// Code blocks are ignored.\n\n// .. code-block:: yaml\n//\n//   - speeling: ignored\n//     foo: bar\n//\n//   - speeling: still ignored\n//\n// Code block ends. No bad speelinga allowed here.\n//\n//   .. code-block:: cpp\n//\n//     std::string speeling = \"ignored\";\n//\n// Code block ends. No bad speelingb allowed here.\n"
  },
  {
    "path": "tools/testdata/spelling/skip_blocks",
    "content": "// Bad speelinga.\n//\n// SPELLCHECKER(skip-block)\n// Ignored bad speeling.\n\n// Bad speelingb.\n//\n// SPELLCHECKER(skip-block)\n// Ignored bad speeling.\n//\n// Also ignored bad speeling.\nCode with bad speeling\n// Bad speelingc.\n\n/*\n * Bad speelingd.\n *\n * SPELLCHECKER(skip-block)\n *\n * Ignored bad speeling.\n */\n\n/* Bad speelinge. */\n"
  },
  {
    "path": "tools/testdata/spelling/skip_file",
    "content": "// Bad speeling is ignored everywhere in this file.\n\n// SPELLCHECKER(skip-file)\n\n// Still ignorig bad speeling.\n"
  },
  {
    "path": "tools/testdata/spelling/typos",
    "content": "// Don't be too spacific.\n\n/**\n * Let's be reelistic.\n */\n\n/* Awwful */ /* combeenations */ // still woork.\n"
  },
  {
    "path": "tools/testdata/spelling/valid",
    "content": "// This is totally fine.\n\nIgnore stufff that isn't a komment.\n\n/* So is this. */\n\nHeer too.\n\n/**\n * As well as this.\n */\n"
  },
  {
    "path": "tools/testdata/spelling/word_splitting",
    "content": "// CamelCaseWordsMustBeSpeeledCorrectly.\n\n// snakeCaseWordsMustBeSpelledKorrectlyToo.\n"
  },
  {
    "path": "tools/type_whisperer/BUILD",
    "content": "load(\"@rules_python//python:defs.bzl\", \"py_binary\")\nload(\"//bazel:envoy_build_system.bzl\", \"envoy_cc_library\", \"envoy_package\", \"envoy_proto_library\")\nload(\"//tools/type_whisperer:api_build_file.bzl\", \"api_build_file\")\nload(\"//tools/type_whisperer:file_descriptor_set_text.bzl\", \"file_descriptor_set_text\")\nload(\"//tools/type_whisperer:type_database.bzl\", \"type_database\")\nload(\"//tools/type_whisperer:proto_cc_source.bzl\", \"proto_cc_source\")\n\nlicenses([\"notice\"])  # Apache 2\n\nenvoy_package()\n\nenvoy_proto_library(\n    name = \"types\",\n    srcs = [\"types.proto\"],\n)\n\npy_binary(\n    name = \"type_whisperer\",\n    srcs = [\"type_whisperer.py\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":types_py_proto\",\n        \"//tools/api_proto_plugin\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_py_proto\",\n        \"@com_google_protobuf//:protobuf_python\",\n    ],\n)\n\npy_binary(\n    name = \"typedb_gen\",\n    srcs = [\"typedb_gen.py\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":api_type_db_proto_py_proto\",\n        \":types_py_proto\",\n        \"//tools/api_proto_plugin:utils\",\n        \"@com_google_protobuf//:protobuf_python\",\n    ],\n)\n\npy_binary(\n    name = \"file_descriptor_set_text_gen\",\n    srcs = [\"file_descriptor_set_text_gen.py\"],\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_py_proto\",\n        \"@com_google_protobuf//:protobuf_python\",\n    ],\n)\n\npy_binary(\n    name = \"proto_cc_source_gen\",\n    srcs = [\"proto_cc_source_gen.py\"],\n    visibility = [\"//visibility:public\"],\n)\n\nlabel_flag(\n    name = \"api_type_db_target\",\n    build_setting_default = \"@envoy_api_canonical//versioning:active_protos\",\n    visibility = [\"//visibility:public\"],\n)\n\ntype_database(\n    name = \"api_type_db\",\n    targets = [\":api_type_db_target\"],\n    visibility = [\"//visibility:public\"],\n)\n\nfile_descriptor_set_text(\n    name = \"all_protos_pb_text\",\n    deps = [\"@envoy_api_canonical//:all_protos\"],\n)\n\nfile_descriptor_set_text(\n    name = \"all_protos_with_ext_pb_text\",\n    with_external_deps = True,\n    deps = [\"@envoy_api_canonical//:all_protos\"],\n)\n\nproto_cc_source(\n    name = \"embedded_all_protos\",\n    constant = \"AllProtosPbText\",\n    deps = [\":all_protos_pb_text\"],\n)\n\nproto_cc_source(\n    name = \"embedded_api_type_db\",\n    constant = \"ApiTypeDbPbText\",\n    deps = [\":api_type_db\"],\n)\n\nenvoy_cc_library(\n    name = \"api_type_db_lib\",\n    srcs = [\n        \"api_type_db.cc\",\n        \":embedded_all_protos\",\n        \":embedded_api_type_db\",\n    ],\n    hdrs = [\"api_type_db.h\"],\n    deps = [\n        \"//source/common/protobuf\",\n        \"//tools/type_whisperer:api_type_db_proto_cc_proto\",\n        \"@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto\",\n        \"@com_google_absl//absl/container:node_hash_map\",\n    ],\n)\n\nenvoy_proto_library(\n    name = \"api_type_db_proto\",\n    srcs = [\"api_type_db.proto\"],\n    deps = [\"//tools/type_whisperer:types\"],\n)\n\npy_binary(\n    name = \"proto_build_targets_gen\",\n    srcs = [\"proto_build_targets_gen.py\"],\n    deps = [\n        \":api_type_db_proto_py_proto\",\n    ],\n)\n\napi_build_file(\n    name = \"api_build_file\",\n    typedb = \"//tools/type_whisperer:api_type_db\",\n)\n"
  },
  {
    "path": "tools/type_whisperer/api_build_file.bzl",
    "content": "def _api_build_file(ctx):\n    pb_text_set = depset()\n    type_db_file = ctx.attr.typedb.files.to_list()[0]\n    args = [type_db_file.path, ctx.outputs.build.path]\n    ctx.actions.run(\n        executable = ctx.executable._proto_build_targets_gen,\n        arguments = args,\n        inputs = [type_db_file],\n        outputs = [ctx.outputs.build],\n        mnemonic = \"ApiBuildFile\",\n        use_default_shell_env = True,\n    )\n\napi_build_file = rule(\n    attrs = {\n        \"typedb\": attr.label(\n            doc = \"Type database label.\",\n            mandatory = True,\n        ),\n        \"_proto_build_targets_gen\": attr.label(\n            default = Label(\"//tools/type_whisperer:proto_build_targets_gen\"),\n            executable = True,\n            cfg = \"exec\",\n        ),\n    },\n    outputs = {\n        \"build\": \"BUILD.%{name}\",\n    },\n    implementation = _api_build_file,\n)\n"
  },
  {
    "path": "tools/type_whisperer/api_type_db.cc",
    "content": "#include \"tools/type_whisperer/api_type_db.h\"\n\n#include \"common/protobuf/protobuf.h\"\n\n#include \"tools/type_whisperer/api_type_db.pb.h\"\n#include \"udpa/annotations/migrate.pb.h\"\n\nnamespace Envoy {\nnamespace Tools {\nnamespace TypeWhisperer {\n\nextern const char* AllProtosPbText;\nextern const char* ApiTypeDbPbText;\n\nnamespace {\n\nProtobuf::DescriptorPool* loadDescriptorPool() {\n  Protobuf::FileDescriptorSet file_descriptor_set;\n  if (Protobuf::TextFormat::ParseFromString(AllProtosPbText, &file_descriptor_set)) {\n    auto* descriptor_pool = new Protobuf::DescriptorPool;\n    // The file descriptor pb_text isn't topologically sorted, so need to relax\n    // dependency checking here.\n    descriptor_pool->AllowUnknownDependencies();\n    for (const auto& file : file_descriptor_set.file()) {\n      descriptor_pool->BuildFile(file);\n    }\n    return descriptor_pool;\n  }\n  return nullptr;\n}\n\nconst Protobuf::DescriptorPool& getDescriptorPool() {\n  static auto* descriptor_pool = loadDescriptorPool();\n  return *descriptor_pool;\n}\n\ntools::type_whisperer::TypeDb* loadApiTypeDb() {\n  auto* api_type_db = new tools::type_whisperer::TypeDb;\n  if (Protobuf::TextFormat::ParseFromString(ApiTypeDbPbText, api_type_db)) {\n    return api_type_db;\n  }\n  return nullptr;\n}\n\nconst tools::type_whisperer::TypeDb& getApiTypeDb() {\n  static auto* api_type_db = loadApiTypeDb();\n  return *api_type_db;\n}\n\n} // namespace\n\nabsl::optional<TypeInformation>\nApiTypeDb::getExistingTypeInformation(const std::string& type_name) {\n  auto it = getApiTypeDb().types().find(type_name);\n  if (it == getApiTypeDb().types().end()) {\n    return {};\n  }\n  return absl::make_optional<TypeInformation>(type_name, it->second.proto_path(), false);\n}\n\nabsl::optional<TypeInformation> ApiTypeDb::getLatestTypeInformation(const std::string& type_name) {\n  std::string latest_type_name;\n  const tools::type_whisperer::TypeDbDescription* latest_type_desc{};\n  std::string current_type_name = type_name;\n  while (true) {\n    auto it = getApiTypeDb().types().find(current_type_name);\n    if (it == getApiTypeDb().types().end()) {\n      break;\n    }\n    latest_type_name = it->first;\n    latest_type_desc = &it->second;\n    current_type_name = it->second.next_version_type_name();\n  }\n  if (latest_type_desc == nullptr) {\n    return {};\n  }\n  const auto* enum_desc = getDescriptorPool().FindEnumTypeByName(type_name);\n  if (enum_desc != nullptr) {\n    auto result = absl::make_optional<TypeInformation>(latest_type_name,\n                                                       latest_type_desc->proto_path(), true);\n    for (int index = 0; index < enum_desc->value_count(); ++index) {\n      const auto* value = enum_desc->value(index);\n      if (value->options().HasExtension(udpa::annotations::enum_value_migrate)) {\n        result->renames_[value->name()] =\n            value->options().GetExtension(udpa::annotations::enum_value_migrate).rename();\n      } else if (value->options().deprecated()) {\n        result->renames_[value->name()] = \"hidden_envoy_deprecated_\" + value->name();\n      }\n    }\n    return result;\n  }\n  const auto* message_desc = getDescriptorPool().FindMessageTypeByName(type_name);\n  if (message_desc != nullptr) {\n    auto result = absl::make_optional<TypeInformation>(latest_type_name,\n                                                       latest_type_desc->proto_path(), false);\n    for (int index = 0; index < message_desc->field_count(); ++index) {\n      const auto* field = message_desc->field(index);\n      if (field->options().HasExtension(udpa::annotations::field_migrate)) {\n        result->renames_[field->name()] =\n            field->options().GetExtension(udpa::annotations::field_migrate).rename();\n      } else if (field->options().deprecated()) {\n        result->renames_[field->name()] = \"hidden_envoy_deprecated_\" + field->name();\n      }\n    }\n    return result;\n  }\n  return {};\n}\n\n} // namespace TypeWhisperer\n} // namespace Tools\n} // namespace Envoy\n"
  },
  {
    "path": "tools/type_whisperer/api_type_db.h",
    "content": "#pragma once\n\n#include <string>\n\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n\nnamespace Envoy {\nnamespace Tools {\nnamespace TypeWhisperer {\n\n// C++ representation of TypeDbDescription.\nstruct TypeInformation {\n  TypeInformation(absl::string_view type_name, absl::string_view proto_path, bool enum_type)\n      : type_name_(type_name), proto_path_(proto_path), enum_type_(enum_type) {}\n\n  // Type's name in the next major version of the API.\n  const std::string type_name_;\n\n  // Path to .proto from API root.\n  const std::string proto_path_;\n\n  // Is this an enum type?\n  const bool enum_type_;\n\n  // Field or enum value renames.\n  absl::node_hash_map<std::string, std::string> renames_;\n};\n\n// We don't expose the raw API type database to consumers, as this requires RTTI\n// and this may be linked in environments where this is not available (e.g.\n// libtool binaries).\nclass ApiTypeDb {\npublic:\n  static absl::optional<TypeInformation> getExistingTypeInformation(const std::string& type_name);\n  static absl::optional<TypeInformation> getLatestTypeInformation(const std::string& type_name);\n};\n\n} // namespace TypeWhisperer\n} // namespace Tools\n} // namespace Envoy\n"
  },
  {
    "path": "tools/type_whisperer/api_type_db.proto",
    "content": "syntax = \"proto3\";\n\npackage tools.type_whisperer;\n\nmessage TypeDbDescription {\n  // Fully qualified package name.\n  string qualified_package = 2;\n\n  // Path to .proto from API root.\n  string proto_path = 3;\n\n  // Type's name in the next major version of the API.\n  string next_version_type_name = 4;\n}\n\nmessage NextVersionFileDescription {\n  // Next version qualified package name.\n  string qualified_package = 1;\n\n  // Next version proto file path.\n  string proto_path = 2;\n}\n\n// API type database. This describes the types in the API and the upgrade path\n// of types, packages and proto paths across major versions of the API.\nmessage TypeDb {\n  // Map from fully qualified type name to description.\n  map<string, TypeDbDescription> types = 1;\n\n  // Map from .proto path to the proto's path in the next major version of the\n  // API.\n  map<string, NextVersionFileDescription> next_version_protos = 2;\n}\n"
  },
  {
    "path": "tools/type_whisperer/file_descriptor_set_text.bzl",
    "content": "load(\"@rules_proto//proto:defs.bzl\", \"ProtoInfo\")\n\ndef _file_descriptor_set_text(ctx):\n    file_descriptor_sets = depset()\n    for dep in ctx.attr.deps:\n        file_descriptor_sets = depset(transitive = [\n            file_descriptor_sets,\n            dep[ProtoInfo].transitive_descriptor_sets,\n        ])\n\n    args = [ctx.outputs.pb_text.path]\n    for dep in file_descriptor_sets.to_list():\n        ws_name = dep.owner.workspace_name\n        if (not ws_name) or ws_name in ctx.attr.proto_repositories or ctx.attr.with_external_deps:\n            args.append(dep.path)\n\n    ctx.actions.run(\n        executable = ctx.executable._file_descriptor_set_text_gen,\n        arguments = args,\n        inputs = file_descriptor_sets,\n        outputs = [ctx.outputs.pb_text],\n        mnemonic = \"FileDescriptorSetTextGen\",\n        use_default_shell_env = True,\n    )\n\nfile_descriptor_set_text = rule(\n    attrs = {\n        \"deps\": attr.label_list(\n            doc = \"List of all proto_library deps to be included.\",\n        ),\n        \"proto_repositories\": attr.string_list(\n            default = [\"envoy_api_canonical\"],\n            allow_empty = False,\n        ),\n        \"with_external_deps\": attr.bool(\n            doc = \"Include file descriptors for external dependencies.\",\n            default = False,\n        ),\n        \"_file_descriptor_set_text_gen\": attr.label(\n            default = Label(\"//tools/type_whisperer:file_descriptor_set_text_gen\"),\n            executable = True,\n            cfg = \"exec\",\n        ),\n    },\n    outputs = {\n        \"pb_text\": \"%{name}.pb_text\",\n    },\n    implementation = _file_descriptor_set_text,\n)\n"
  },
  {
    "path": "tools/type_whisperer/file_descriptor_set_text_gen.py",
    "content": "# Generate a text proto from a given list of FileDescriptorSets.\n# TODO(htuch): switch to base64 encoded binary output in the future,\n# this will avoid needing to deal with option preserving imports below.\n\nimport sys\n\nfrom google.protobuf import descriptor_pb2\n\n# Needed to avoid annotation option stripping during pb_text generation.\nfrom udpa.annotations import migrate_pb2\n\n\ndef Decode(path):\n  with open(path, 'rb') as f:\n    file_set = descriptor_pb2.FileDescriptorSet()\n    file_set.ParseFromString(f.read())\n    return str(file_set)\n\n\nif __name__ == '__main__':\n  output_path = sys.argv[1]\n  input_paths = sys.argv[2:]\n  pb_text = '\\n'.join(Decode(path) for path in input_paths)\n  with open(output_path, 'w') as f:\n    f.write(pb_text)\n"
  },
  {
    "path": "tools/type_whisperer/proto_build_targets_gen.py",
    "content": "# Generate api/BUILD based on API type database. This contains target for v2, v3\n# and all API protos. This is not the ideal way to be generating docs, see\n# https://github.com/envoyproxy/envoy/issues/10311#issuecomment-603518498.\n\nimport os\nimport re\nimport string\nimport sys\n\nfrom tools.type_whisperer.api_type_db_pb2 import TypeDb\n\nfrom google.protobuf import text_format\n\nV2_REGEXES = list(\n    map(re.compile, [\n        r'envoy[\\w\\.]*\\.(v1alpha\\d?|v1)',\n        r'envoy[\\w\\.]*\\.(v2alpha\\d?|v2)',\n        r'envoy\\.type\\.matcher$',\n        r'envoy\\.type$',\n        r'envoy\\.config\\.cluster\\.redis',\n        r'envoy\\.config\\.retry\\.previous_priorities',\n    ]))\n\nV3_REGEX = re.compile(r'envoy[\\w\\.]*\\.(v3alpha|v3)')\n\nAPI_BUILD_FILE_TEMPLATE = string.Template(\n    \"\"\"# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.\n\nload(\"@rules_proto//proto:defs.bzl\", \"proto_library\")\n\nlicenses([\"notice\"])  # Apache 2\n\nproto_library(\n    name = \"v2_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n$v2_deps\n    ],\n)\n\nproto_library(\n    name = \"v3_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n$v3_deps\n    ],\n)\n\nproto_library(\n    name = \"all_protos\",\n    visibility = [\"//visibility:public\"],\n    deps = [\n        \":v2_protos\",\n        \":v3_protos\",\n    ],\n)\n\"\"\")\n\n\ndef LoadTypeDb(type_db_path):\n  type_db = TypeDb()\n  with open(type_db_path, 'r') as f:\n    text_format.Merge(f.read(), type_db)\n  return type_db\n\n\n# Key sort function to achieve consistent results with buildifier.\ndef BuildOrderKey(key):\n  return key.replace(':', '!')\n\n\n# Remove any packages that are definitely non-root, e.g. annotations.\ndef FilterPkgs(pkgs):\n\n  def AllowedPkg(pkg):\n    return not pkg.startswith('envoy.annotations')\n\n  return filter(AllowedPkg, pkgs)\n\n\ndef DepsFormat(pkgs):\n  return '\\n'.join('        \"//%s:pkg\",' % p.replace('.', '/')\n                   for p in sorted(FilterPkgs(pkgs), key=BuildOrderKey))\n\n\ndef IsV2Package(pkg):\n  for regex in V2_REGEXES:\n    if regex.match(pkg):\n      return True\n  return False\n\n\ndef IsV3Package(pkg):\n  return V3_REGEX.match(pkg) is not None\n\n\nif __name__ == '__main__':\n  type_db_path, output_path = sys.argv[1:]\n  type_db = LoadTypeDb(type_db_path)\n  # TODO(htuch): generalize to > 2 versions\n  v2_packages = set([])\n  v3_packages = set([])\n  for desc in type_db.types.values():\n    pkg = desc.qualified_package\n    if IsV3Package(pkg):\n      v3_packages.add(pkg)\n      continue\n    if IsV2Package(pkg):\n      v2_packages.add(pkg)\n      # Special case for v2 packages that are part of v3 (still active)\n      if not desc.next_version_type_name:\n        v3_packages.add(pkg)\n  # Generate BUILD file.\n  build_file_contents = API_BUILD_FILE_TEMPLATE.substitute(v2_deps=DepsFormat(v2_packages),\n                                                           v3_deps=DepsFormat(v3_packages))\n  with open(output_path, 'w') as f:\n    f.write(build_file_contents)\n"
  },
  {
    "path": "tools/type_whisperer/proto_cc_source.bzl",
    "content": "def _proto_cc_source(ctx):\n    pb_text_set = depset()\n    for src in ctx.attr.deps:\n        pb_text_set = depset(transitive = [pb_text_set, src.files])\n    args = [ctx.attr.constant, ctx.outputs.cc.path]\n    for pb_text in pb_text_set.to_list():\n        args.append(pb_text.path)\n    ctx.actions.run(\n        executable = ctx.executable._proto_cc_source_gen,\n        arguments = args,\n        inputs = pb_text_set,\n        outputs = [ctx.outputs.cc],\n        mnemonic = \"ProtoCcSourceGen\",\n        use_default_shell_env = True,\n    )\n\nproto_cc_source = rule(\n    attrs = {\n        \"constant\": attr.string(\n            doc = \"Name of C++ constant definition.\",\n            mandatory = True,\n        ),\n        \"deps\": attr.label_list(\n            doc = \"List of all text protos to be included.\",\n        ),\n        \"proto_repositories\": attr.string_list(\n            default = [\"envoy_api_canonical\"],\n            allow_empty = False,\n        ),\n        \"_proto_cc_source_gen\": attr.label(\n            default = Label(\"//tools/type_whisperer:proto_cc_source_gen\"),\n            executable = True,\n            cfg = \"exec\",\n        ),\n    },\n    outputs = {\n        \"cc\": \"%{name}.cc\",\n    },\n    implementation = _proto_cc_source,\n)\n"
  },
  {
    "path": "tools/type_whisperer/proto_cc_source_gen.py",
    "content": "# Generate a C++ source file with given text protos embedded.\n\nimport pathlib\nimport string\nimport sys\n\nCC_SOURCE_TEMPLATE = string.Template(\"\"\"namespace Envoy {\nnamespace Tools {\nnamespace TypeWhisperer {\n\nconst char* $constant = R\"EOF($pb_text)EOF\";\n\n}\n}\n}\n\"\"\")\n\nif __name__ == '__main__':\n  constant_name = sys.argv[1]\n  output_path = sys.argv[2]\n  input_paths = sys.argv[3:]\n  pb_text = '\\n'.join(pathlib.Path(path).read_text() for path in input_paths)\n  with open(output_path, 'w') as f:\n    f.write(CC_SOURCE_TEMPLATE.substitute(constant=constant_name, pb_text=pb_text))\n"
  },
  {
    "path": "tools/type_whisperer/type_database.bzl",
    "content": "load(\":type_whisperer.bzl\", \"type_whisperer_aspect\")\n\ndef _type_database_impl(ctx):\n    type_db_deps = []\n    for target in ctx.attr.targets:\n        type_db_deps.append(target[OutputGroupInfo].types_pb_text)\n    type_db_deps = depset(transitive = type_db_deps)\n\n    args = [ctx.outputs.pb_text.path]\n    for dep in type_db_deps.to_list():\n        ws_name = dep.owner.workspace_name\n        if (not ws_name) or ws_name in ctx.attr.proto_repositories:\n            args.append(dep.path)\n\n    ctx.actions.run(\n        executable = ctx.executable._type_db_gen,\n        arguments = args,\n        inputs = type_db_deps,\n        outputs = [ctx.outputs.pb_text],\n        mnemonic = \"TypeDbGen\",\n        use_default_shell_env = True,\n    )\n\ntype_database = rule(\n    attrs = {\n        \"targets\": attr.label_list(\n            aspects = [type_whisperer_aspect],\n            doc = \"List of all proto_library target to be included.\",\n        ),\n        \"proto_repositories\": attr.string_list(\n            default = [\"envoy_api_canonical\"],\n            allow_empty = False,\n        ),\n        \"_type_db_gen\": attr.label(\n            default = Label(\"//tools/type_whisperer:typedb_gen\"),\n            executable = True,\n            cfg = \"exec\",\n        ),\n    },\n    outputs = {\n        \"pb_text\": \"%{name}.pb_text\",\n    },\n    implementation = _type_database_impl,\n)\n"
  },
  {
    "path": "tools/type_whisperer/type_whisperer.bzl",
    "content": "load(\"//tools/api_proto_plugin:plugin.bzl\", \"api_proto_plugin_aspect\", \"api_proto_plugin_impl\")\n\ndef _type_whisperer_impl(target, ctx):\n    return api_proto_plugin_impl(target, ctx, \"types_pb_text\", \"TypeWhisperer\", [\".types.pb_text\"])\n\n# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html)\n# that can be invoked from the CLI to perform API type analysis via //tools/type_whisperer for\n# proto_library targets. Example use:\n#\n#   bazel build //api --aspects tools/type_whisperer/protoxform.bzl%protoxform_aspect \\\n#       --output_groups=types_pb_text\ntype_whisperer_aspect = api_proto_plugin_aspect(\"//tools/type_whisperer\", _type_whisperer_impl)\n"
  },
  {
    "path": "tools/type_whisperer/type_whisperer.py",
    "content": "# protoc plugin to map from FileDescriptorProtos to a tools.type_whisperer.Types\n# proto. This is the type information for a single .proto, consumed by\n# typedb_gen.py.\n\nfrom tools.api_proto_plugin import plugin\nfrom tools.api_proto_plugin import visitor\n\nfrom tools.type_whisperer.types_pb2 import Types\nfrom udpa.annotations import migrate_pb2\nfrom udpa.annotations import status_pb2\n\n\nclass TypeWhispererVisitor(visitor.Visitor):\n  \"\"\"Visitor to compute type information from a FileDescriptor proto.\n\n  See visitor.Visitor for visitor method docs comments.\n  \"\"\"\n\n  def __init__(self):\n    super(TypeWhispererVisitor, self).__init__()\n    self._types = Types()\n\n  def VisitService(self, service_proto, type_context):\n    pass\n\n  def VisitEnum(self, enum_proto, type_context):\n    type_desc = self._types.types[type_context.name]\n    type_desc.next_version_upgrade = any(v.options.deprecated for v in enum_proto.value)\n    type_desc.deprecated_type = type_context.deprecated\n\n  def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):\n    type_desc = self._types.types[type_context.name]\n    type_desc.map_entry = msg_proto.options.map_entry\n    type_desc.deprecated_type = type_context.deprecated\n    type_deps = set([])\n    for f in msg_proto.field:\n      if f.type_name.startswith('.'):\n        type_deps.add(f.type_name[1:])\n      if f.options.deprecated:\n        type_desc.next_version_upgrade = True\n    type_desc.type_dependencies.extend(type_deps)\n\n  def VisitFile(self, file_proto, type_context, services, msgs, enums):\n    next_version_package = ''\n    if file_proto.options.HasExtension(migrate_pb2.file_migrate):\n      next_version_package = file_proto.options.Extensions[migrate_pb2.file_migrate].move_to_package\n    for t in self._types.types.values():\n      t.qualified_package = file_proto.package\n      t.proto_path = file_proto.name\n      t.active = file_proto.options.Extensions[\n          status_pb2.file_status].package_version_status == status_pb2.ACTIVE\n      if next_version_package:\n        t.next_version_package = next_version_package\n        t.next_version_upgrade = True\n    # Return in text proto format. This makes things easier to debug, these\n    # don't need to be compact as they are only interim build artifacts.\n    return str(self._types)\n\n\ndef Main():\n  plugin.Plugin([\n      plugin.DirectOutputDescriptor('.types.pb_text', TypeWhispererVisitor),\n  ])\n\n\nif __name__ == '__main__':\n  Main()\n"
  },
  {
    "path": "tools/type_whisperer/typedb_gen.py",
    "content": "# Python binary that stitches together the various tools.type_whisperer.Types\n# protos generated by type_whisperer.py for the entire API. The result is a type\n# database in tools.type_whisperer.TypeDb format.\n\nimport re\nimport sys\n\nfrom google.protobuf import text_format\n\nfrom tools.type_whisperer.api_type_db_pb2 import TypeDb\nfrom tools.type_whisperer.types_pb2 import Types, TypeDescription\n\n# Regexes governing v3upgrades. TODO(htuch): The regex approach will have\n# to be rethought as we go beyond v3, this is WiP.\nTYPE_UPGRADE_REGEXES = [\n    (r'(envoy[\\w\\.]*\\.)(v3alpha|v3)', r'\\1v4alpha'),\n    (r'(envoy[\\w\\.]*\\.)(v1alpha\\d?|v1)', r'\\1v3'),\n    (r'(envoy[\\w\\.]*\\.)(v2alpha\\d?|v2)', r'\\1v3'),\n    # These are special cases, e.g. upgrading versionless packages.\n    ('envoy\\.type\\.matcher', 'envoy.type.matcher.v3'),\n    ('envoy\\.type', 'envoy.type.v3'),\n    ('envoy\\.config\\.cluster\\.redis', 'envoy.extensions.clusters.redis.v3'),\n    ('envoy\\.config\\.retry\\.previous_priorities',\n     'envoy.extensions.retry.priority.previous_priorities.v3'),\n]\n\n# These packages must be upgraded to v3, even if there are no protos\n# modified. This is largely for situations where we know we want to be doing\n# structural change to have the APIs follow the x/y//vN/z.proto structure of\n# organization.\nPKG_FORCE_UPGRADE = [\n    'envoy.v2',\n    'envoy.api.v2',\n    'envoy.api.v2.auth',\n    'envoy.api.v2.cluster',\n    'envoy.api.v2.core',\n    'envoy.api.v2.endpoint',\n    'envoy.api.v2.listener',\n    'envoy.api.v2.ratelimit',\n    'envoy.api.v2.route',\n    'envoy.type',\n    'envoy.type.matcher',\n]\n\n\ndef UpgradedPackage(type_desc):\n  \"\"\"Determine upgrade package for a type.\"\"\"\n  if type_desc.next_version_package:\n    return type_desc.next_version_package\n\n  for pattern, repl in TYPE_UPGRADE_REGEXES:\n    s = re.sub(pattern, repl, type_desc.qualified_package)\n    if s != type_desc.qualified_package:\n      return s\n  raise ValueError('{} is not upgradable'.format(type_desc.qualified_package))\n\n\ndef UpgradedType(type_name, type_desc):\n  \"\"\"Determine upgraded type name.\"\"\"\n  upgraded_package = UpgradedPackage(type_desc)\n  return type_name.replace(type_desc.qualified_package, upgraded_package)\n\n\ndef UpgradedPath(proto_path, upgraded_package):\n  \"\"\"Determine upgraded API .proto path.\"\"\"\n  return '/'.join([upgraded_package.replace('.', '/'), proto_path.split('/')[-1]])\n\n\ndef UpgradedTypeWithDescription(type_name, type_desc):\n  upgrade_type_desc = TypeDescription()\n  upgrade_type_desc.qualified_package = UpgradedPackage(type_desc)\n  upgrade_type_desc.proto_path = UpgradedPath(type_desc.proto_path,\n                                              upgrade_type_desc.qualified_package)\n  upgrade_type_desc.deprecated_type = type_desc.deprecated_type\n  upgrade_type_desc.map_entry = type_desc.map_entry\n  return (UpgradedType(type_name, type_desc), upgrade_type_desc)\n\n\ndef LoadTypes(path):\n  \"\"\"Load a tools.type_whisperer.Types proto from the filesystem.\n\n  Args:\n    path: filesystem path for a file in text proto format.\n\n  Returns:\n    tools.type_whisperer.Types proto loaded from path.\n  \"\"\"\n  types = Types()\n  with open(path, 'r') as f:\n    text_format.Merge(f.read(), types)\n  return types\n\n\ndef NextVersionUpgrade(type_name, type_map, next_version_upgrade_memo, visited=None):\n  \"\"\"Does a given type require upgrade between major version?\n\n  Performs depth-first search through type dependency graph for any upgraded\n  types that will force type_name to be upgraded.\n\n  Args:\n    type_name: fully qualified type name.\n    type_map: map from type name to tools.type_whisperer.TypeDescription.\n    next_version_upgrade_memo: a memo dictionary to avoid revisiting nodes\n      across invocations.\n    visited: a set of visited nodes in the current search, used to detect loops.\n\n  Returns:\n    A boolean indicating whether the type requires upgrade.\n  \"\"\"\n  if not visited:\n    visited = set([])\n  # Ignore non-API types.\n  if not type_name.startswith('envoy'):\n    return False\n  # If we have a loop, we can't learn anything new by circling around again.\n  if type_name in visited:\n    return False\n  visited = visited.union(set([type_name]))\n  # If we have seen this type in a previous NextVersionUpgrade(), use that\n  # result.\n  if type_name in next_version_upgrade_memo:\n    return next_version_upgrade_memo[type_name]\n  type_desc = type_map[type_name]\n  # Force upgrade packages that we enumerate.\n  if type_desc.qualified_package in PKG_FORCE_UPGRADE:\n    return True\n  # Recurse and memoize.\n  should_upgrade = type_desc.next_version_upgrade or any(\n      NextVersionUpgrade(d, type_map, next_version_upgrade_memo, visited)\n      for d in type_desc.type_dependencies)\n  next_version_upgrade_memo[type_name] = should_upgrade\n  return should_upgrade\n\n\nif __name__ == '__main__':\n  # Output path for type database.\n  out_path = sys.argv[1]\n\n  # Load type descriptors for each type whisper\n  type_desc_paths = sys.argv[2:]\n  type_whispers = map(LoadTypes, type_desc_paths)\n\n  # Aggregate type descriptors to a single type map.\n  type_map = dict(sum([list(t.types.items()) for t in type_whispers], []))\n  all_pkgs = set([type_desc.qualified_package for type_desc in type_map.values()])\n\n  # Determine via DFS on each type descriptor and its deps which packages require upgrade.\n  next_version_upgrade_memo = {}\n  next_versions_pkgs = set([\n      type_desc.qualified_package\n      for type_name, type_desc in type_map.items()\n      if NextVersionUpgrade(type_name, type_map, next_version_upgrade_memo)\n  ]).union(set(['envoy.config.retry.previous_priorities', 'envoy.config.cluster.redis']))\n\n  # Generate type map entries for upgraded types. We run this twice to allow\n  # things like a v2 deprecated map field's synthesized map entry to forward\n  # propagate to v4alpha (for shadowing purposes).\n  for _ in range(2):\n    type_map.update([\n        UpgradedTypeWithDescription(type_name, type_desc)\n        for type_name, type_desc in type_map.items()\n        if type_desc.qualified_package in next_versions_pkgs and\n        (type_desc.active or type_desc.deprecated_type or type_desc.map_entry)\n    ])\n\n  # Generate the type database proto. To provide some stability across runs, in\n  # terms of the emitted proto binary blob that we track in git, we sort before\n  # loading the map entries in the proto. This seems to work in practice, but\n  # has no guarantees.\n  type_db = TypeDb()\n  next_proto_info = {}\n  for t in sorted(type_map):\n    type_desc = type_db.types[t]\n    type_desc.qualified_package = type_map[t].qualified_package\n    type_desc.proto_path = type_map[t].proto_path\n    if type_desc.qualified_package in next_versions_pkgs:\n      type_desc.next_version_type_name = UpgradedType(t, type_map[t])\n      assert (type_desc.next_version_type_name != t)\n      next_proto_info[type_map[t].proto_path] = (\n          type_map[type_desc.next_version_type_name].proto_path,\n          type_map[type_desc.next_version_type_name].qualified_package)\n  for proto_path, (next_proto_path, next_package) in sorted(next_proto_info.items()):\n    type_db.next_version_protos[proto_path].proto_path = next_proto_path\n    type_db.next_version_protos[proto_path].qualified_package = next_package\n\n  # Write out proto text.\n  with open(out_path, 'w') as f:\n    f.write(str(type_db))\n"
  },
  {
    "path": "tools/type_whisperer/types.proto",
    "content": "syntax = \"proto3\";\n\npackage tools.type_whisperer;\n\n// Description for a single type.\nmessage TypeDescription {\n  // Fully qualified package name.\n  string qualified_package = 2;\n\n  // Path to .proto from API root.\n  string proto_path = 3;\n\n  // Fully qualified type names for type dependencies.\n  repeated string type_dependencies = 4;\n\n  // Does this type have a breaking change? If so, it needs to be upgraded at\n  // the next major version.\n  bool next_version_upgrade = 5;\n\n  // The package of the type in next version\n  string next_version_package = 6;\n\n  // Is this a type in an active package?\n  bool active = 7;\n\n  // Is this type a synthesized map entry?\n  bool map_entry = 8;\n\n  // Is this type deprecated?\n  bool deprecated_type = 9;\n}\n\nmessage Types {\n  // Map from fully qualified type name to description.\n  map<string, TypeDescription> types = 1;\n}\n"
  },
  {
    "path": "tools/vscode/README.md",
    "content": "# Tools for VSCode\n\nThis directory contains tools which is useful for developers using VSCode.\n\n## Recommended VSCode setup\n\nIt is recommended to use [devcontainer](../.devcontainer/README.md), or setting up an equivalent\nenvironment. Recommended extensions and settings are listed in\n[devcontainer.json](../.devcontainer/devcontainer.json).\n\n## Refresh compilation database\n\n`tools/vscode/refresh_compdb.sh` is a script to refresh compilation database, it may take a while\nto generate all dependencies for code completion, such as protobuf generated codes, external dependencies.\nIf you changed proto definition, or changed any bazel structure, rerun this to get code completion\ncorrectly.\n\nNote that it is recommended to disable VSCode Microsoft C/C++ extension and use `vscode-clangd` instead for\nC/C++ code completion.\n\n## Generate debug config\n\n`tools/vscode/generate_debug_config.py` is a script to generate VSCode debug config in `.vscode/launch.json`.\nThe generated config will be named `<debugger type> <bazel target>`.\n\nFor example:\n```\ntools/vscode/generate_debug_config.py //source/exe:envoy-static --args \"-c envoy.yaml\"\n```\n\nGenerates an entry named `gdb //source/exe:envoy-static` for GDB in `launch.json`. It can be\nused to generate config for tests also.\n\nThe generated `gdb` config are compatible with [Native Debug](https://marketplace.visualstudio.com/items?itemName=webfreak.debug) extension,\n`lldb` config are compatible with [VSCode LLDB](https://marketplace.visualstudio.com/items?itemName=vadimcn.vscode-lldb) extension.\n"
  },
  {
    "path": "tools/vscode/generate_debug_config.py",
    "content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport pathlib\nimport shlex\nimport shutil\nimport subprocess\n\nBAZEL_OPTIONS = shlex.split(os.environ.get(\"BAZEL_BUILD_OPTIONS\", \"\"))\n\n\ndef bazelInfo(name, bazel_extra_options=[]):\n  return subprocess.check_output([\"bazel\", \"info\", name] + BAZEL_OPTIONS +\n                                 bazel_extra_options).decode().strip()\n\n\ndef getWorkspace():\n  return bazelInfo(\"workspace\")\n\n\ndef getExecutionRoot(workspace):\n  # If compilation database exists, use its execution root, this allows setting\n  # breakpoints with clangd navigation easier.\n  try:\n    compdb = pathlib.Path(workspace, \"compile_commands.json\").read_text()\n    return json.loads(compdb)[0]['directory']\n  except:\n    return bazelInfo(\"execution_root\")\n\n\ndef binaryPath(bazel_bin, target):\n  return pathlib.Path(\n      bazel_bin,\n      *[s for s in target.replace('@', 'external/').replace(':', '/').split('/') if s != ''])\n\n\ndef buildBinaryWithDebugInfo(target):\n  targets = [target, target + \".dwp\"]\n  subprocess.check_call([\"bazel\", \"build\", \"-c\", \"dbg\"] + BAZEL_OPTIONS + targets)\n\n  bazel_bin = bazelInfo(\"bazel-bin\", [\"-c\", \"dbg\"])\n  return binaryPath(bazel_bin, target)\n\n\ndef getLaunchJson(workspace):\n  try:\n    return json.loads(pathlib.Path(workspace, \".vscode\", \"launch.json\").read_text())\n  except:\n    return {\"version\": \"0.2.0\"}\n\n\ndef writeLaunchJson(workspace, launch):\n  launch_json = pathlib.Path(workspace, \".vscode\", \"launch.json\")\n  backup_launch_json = pathlib.Path(workspace, \".vscode\", \"launch.json.bak\")\n  if launch_json.exists():\n    shutil.move(str(launch_json), str(backup_launch_json))\n\n  launch_json.write_text(json.dumps(launch, indent=4))\n\n\ndef gdbConfig(target, binary, workspace, execroot, arguments):\n  return {\n      \"name\": \"gdb \" + target,\n      \"request\": \"launch\",\n      \"arguments\": arguments,\n      \"type\": \"gdb\",\n      \"target\": str(binary),\n      \"debugger_args\": [\"--directory=\" + execroot],\n      \"cwd\": \"${workspaceFolder}\",\n      \"valuesFormatting\": \"disabled\"\n  }\n\n\ndef lldbConfig(target, binary, workspace, execroot, arguments):\n  return {\n      \"name\": \"lldb \" + target,\n      \"program\": str(binary),\n      \"sourceMap\": {\n          \"/proc/self/cwd\": workspace,\n          \"/proc/self/cwd/external\": execroot + \"/external\",\n          \"/proc/self/cwd/bazel-out\": execroot + \"/bazel-out\"\n      },\n      \"cwd\": \"${workspaceFolder}\",\n      \"args\": shlex.split(arguments),\n      \"type\": \"lldb\",\n      \"request\": \"launch\"\n  }\n\n\ndef addToLaunchJson(target, binary, workspace, execroot, arguments, debugger_type):\n  launch = getLaunchJson(workspace)\n  new_config = {}\n  if debugger_type == \"lldb\":\n    new_config = lldbConfig(target, binary, workspace, execroot, arguments)\n  else:\n    new_config = gdbConfig(target, binary, workspace, execroot, arguments)\n\n  configurations = launch.get(\"configurations\", [])\n  for config in configurations:\n    if config.get(\"name\", None) == new_config[\"name\"]:\n      config.clear()\n      config.update(new_config)\n      break\n  else:\n    configurations.append(new_config)\n\n  launch[\"configurations\"] = configurations\n  writeLaunchJson(workspace, launch)\n\n\nif __name__ == \"__main__\":\n  parser = argparse.ArgumentParser(description='Build and generate launch config for VSCode')\n  parser.add_argument('--debugger', default=\"gdb\")\n  parser.add_argument('--args', default='')\n  parser.add_argument('target')\n  args = parser.parse_args()\n\n  workspace = getWorkspace()\n  execution_root = getExecutionRoot(workspace)\n  debug_binary = buildBinaryWithDebugInfo(args.target)\n  addToLaunchJson(args.target, debug_binary, workspace, execution_root, args.args, args.debugger)\n"
  },
  {
    "path": "tools/vscode/refresh_compdb.sh",
    "content": "#!/usr/bin/env bash\n\n[[ -z \"${SKIP_PROTO_FORMAT}\" ]] && tools/proto_format/proto_format.sh fix\n\n# Setting TEST_TMPDIR here so the compdb headers won't be overwritten by another bazel run\nTEST_TMPDIR=${BUILD_DIR:-/tmp}/envoy-compdb tools/gen_compilation_database.py\n\n# Kill clangd to reload the compilation database\nkillall -v /opt/llvm/bin/clangd\n"
  }
]